diff -Nru nodejs-0.11.13/AUTHORS nodejs-0.11.15/AUTHORS --- nodejs-0.11.13/AUTHORS 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/AUTHORS 2015-01-20 21:22:17.000000000 +0000 @@ -553,3 +553,30 @@ Denys Zariaiev Sean McArthur Rasmus Christian Pedersen +Greg Sabia Tucker +Calvin Metcalf +cjihrig +Chris Barber +Nick Apperson +Oguz Bastemur +Maurice Butler +Chris Dickinson +Julien Gilli +Jakob Gillich +James Halliday +Kevin Simper +Jackson Tian +Tristan Berger +Mathias Schreck +Steven R. Loomis +Matthew Fitzsimmons +Swaagie +Emmanuel Odeke +Eric Mill +Brendan Ashworth +Alejandro Oviedo +pkcs +Saúl Ibarra Corretgé +silverwind +Steven R. Loomis +James M Snell diff -Nru nodejs-0.11.13/benchmark/buffers/buffer-base64-encode.js nodejs-0.11.15/benchmark/buffers/buffer-base64-encode.js --- nodejs-0.11.13/benchmark/buffers/buffer-base64-encode.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/benchmark/buffers/buffer-base64-encode.js 2015-01-20 21:22:17.000000000 +0000 @@ -28,9 +28,8 @@ var b = Buffer(N); var s = ''; for (var i = 0; i < 256; ++i) s += String.fromCharCode(i); - - bench.start(); for (var i = 0; i < N; i += 256) b.write(s, i, 256, 'ascii'); + bench.start(); for (var i = 0; i < 32; ++i) b.toString('base64'); bench.end(64); } diff -Nru nodejs-0.11.13/benchmark/buffers/buffer-slice.js nodejs-0.11.15/benchmark/buffers/buffer-slice.js --- nodejs-0.11.13/benchmark/buffers/buffer-slice.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/benchmark/buffers/buffer-slice.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +var common = require('../common.js'); +var SlowBuffer = require('buffer').SlowBuffer; + +var bench = common.createBenchmark(main, { + type: ['fast', 'slow'], + n: [1024] +}); + +var buf = new Buffer(1024); +var slowBuf = new SlowBuffer(1024); + +function main(conf) { + var n = +conf.n; + var b = conf.type === 'fast' ? buf : slowBuf; + bench.start(); + for (var i = 0; i < n * 1024; i++) { + b.slice(10, 256); + } + bench.end(n); +} diff -Nru nodejs-0.11.13/benchmark/misc/module-loader.js nodejs-0.11.15/benchmark/misc/module-loader.js --- nodejs-0.11.13/benchmark/misc/module-loader.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/benchmark/misc/module-loader.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,72 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + + +var fs = require('fs'); +var path = require('path'); +var common = require('../common.js'); +var packageJson = '{"main": "index.js"}'; + +var tmpDirectory = path.join(__dirname, '..', 'tmp'); +var benchmarkDirectory = path.join(tmpDirectory, 'nodejs-benchmark-module'); + +var bench = common.createBenchmark(main, { + thousands: [50] +}); + +function main(conf) { + rmrf(tmpDirectory); + try { fs.mkdirSync(tmpDirectory); } catch (e) {} + try { fs.mkdirSync(benchmarkDirectory); } catch (e) {} + + var n = +conf.thousands * 1e3; + for (var i = 0; i <= n; i++) { + fs.mkdirSync(benchmarkDirectory + i); + fs.writeFileSync(benchmarkDirectory + i + '/package.json', '{"main": "index.js"}'); + fs.writeFileSync(benchmarkDirectory + i + '/index.js', 'module.exports = "";'); + } + + measure(n); +} + +function measure(n) { + bench.start(); + for (var i = 0; i <= n; i++) { + require(benchmarkDirectory + i); + } + bench.end(n / 1e3); +} + +function rmrf(location) { + if (fs.existsSync(location)) { + var things = fs.readdirSync(location); + things.forEach(function(thing) { + var cur = path.join(location, thing), + isDirectory = fs.statSync(cur).isDirectory(); + if (isDirectory) { + rmrf(cur); + return; + } + fs.unlinkSync(cur); + }); + fs.rmdirSync(location); + } +} diff -Nru nodejs-0.11.13/benchmark/README.md nodejs-0.11.15/benchmark/README.md --- nodejs-0.11.13/benchmark/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/benchmark/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,115 @@ +# Node.js core benchmark tests + +This folder contains benchmark tests to measure the performance for certain +Node.js APIs. + +## How to run tests + +There are two ways to run benchmark tests: + +1. Run all tests of a given type, for example, buffers + +```sh +node benchmark/common.js buffers +``` + +The above command will find all scripts under `buffers` directory and require +each of them as a module. When a test script is required, it creates an instance +of `Benchmark` (a class defined in common.js). In the next tick, the `Benchmark` +constructor iterates through the configuration object property values and run +the test function with each of the combined arguments in spawned processes. For +example, buffers/buffer-read.js has the following configuration: + +```js +var bench = common.createBenchmark(main, { + noAssert: [false, true], + buffer: ['fast', 'slow'], + type: ['UInt8', 'UInt16LE', 'UInt16BE', + 'UInt32LE', 'UInt32BE', + 'Int8', 'Int16LE', 'Int16BE', + 'Int32LE', 'Int32BE', + 'FloatLE', 'FloatBE', + 'DoubleLE', 'DoubleBE'], + millions: [1] +}); +``` +The runner takes one item from each of the property array value to build a list +of arguments to run the main function. The main function will receive the conf +object as follows: + +- first run: +```js + { noAssert: false, + buffer: 'fast', + type: 'UInt8', + millions: 1 + } +``` +- second run: +```js + { + noAssert: false, + buffer: 'fast', + type: 'UInt16LE', + millions: 1 + } +``` +... + +In this case, the main function will run 2*2*14*1 = 56 times. The console output +looks like the following: + +``` +buffers//buffer-read.js +buffers/buffer-read.js noAssert=false buffer=fast type=UInt8 millions=1: 271.83 +buffers/buffer-read.js noAssert=false buffer=fast type=UInt16LE millions=1: 239.43 +buffers/buffer-read.js noAssert=false buffer=fast type=UInt16BE millions=1: 244.57 +... +``` + +2. Run an individual test, for example, buffer-slice.js + +```sh +node benchmark/buffers/buffer-read.js +``` +The output: +``` +buffers/buffer-read.js noAssert=false buffer=fast type=UInt8 millions=1: 246.79 +buffers/buffer-read.js noAssert=false buffer=fast type=UInt16LE millions=1: 240.11 +buffers/buffer-read.js noAssert=false buffer=fast type=UInt16BE millions=1: 245.91 +... +``` + +## How to write a benchmark test + +The benchmark tests are grouped by types. Each type corresponds to a subdirectory, +such as `arrays`, `buffers`, or `fs`. + +Let's add a benchmark test for Buffer.slice function. We first create a file +buffers/buffer-slice.js. + +### The code snippet + +```js +var common = require('../common.js'); // Load the test runner + +var SlowBuffer = require('buffer').SlowBuffer; + +// Create a benchmark test for function `main` and the configuration variants +var bench = common.createBenchmark(main, { + type: ['fast', 'slow'], // Two types of buffer + n: [512] // Number of times (each unit is 1024) to call the slice API +}); + +function main(conf) { + // Read the parameters from the configuration + var n = +conf.n; + var b = conf.type === 'fast' ? buf : slowBuf; + bench.start(); // Start benchmarking + for (var i = 0; i < n * 1024; i++) { + // Add your test here + b.slice(10, 256); + } + bench.end(n); // End benchmarking +} +``` diff -Nru nodejs-0.11.13/ChangeLog nodejs-0.11.15/ChangeLog --- nodejs-0.11.13/ChangeLog 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/ChangeLog 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,312 @@ -2014.05.01, Version 0.11.13 (Unstable) +2015.01.20, Version 0.11.15 (Unstable) + +* v8: Upgrade to 3.28.73 + +* uv: Upgrade to 1.0.2 + +* npm: Upgrade to v2.1.6 + +* uv: float patch to revert tty breakage (Trevor Norris) + +* v8: re-implement debugger-agent (Fedor Indutny) + +* v8: apply floating irhydra patch (Fedor Indutny) + +* v8: fix postmortem-metadata generator (Refael Ackermann) + +* debugger: fix unhandled error in setBreakpoint (Miroslav Bajtoš) + +* async-wrap: add event hooks (Trevor Norris) + +* async-wrap: expose async-wrap as binding (Trevor Norris) + +* buffer, doc: misc. fix and cleanup (Trevor Norris) + +* buffer: add generic functions for (u)int ops (Yazhong Liu) + +* buffer: fix and cleanup fill() (Trevor Norris) + +* buffer: mv floating point read/write checks to JS (Trevor Norris) + +* build, i18n: improve Intl build, add "--with-intl" (Steven R. Loomis) + +* build: add small-icu support for binary packages (Julien Gilli) + +* build: do not generate support for libuv's probes (Julien Gilli) + +* build: i18n: add icu config options (Steven R. Loomis) + +* build: i18n: support little-endian machines (Steven Loomis) + +* build: vcbuild fix "The input line is too long." (Alexis Campailla) + +* child_process: improve spawn() argument handling (cjihrig) + +* cluster: avoid race enabling debugger in worker (Timothy J Fontaine) + +* cluster: cluster.disconnect() should check status (Sam Roberts) + +* cluster: do not signal children in debug mode (Fedor Indutny) + +* cluster: don't assert if worker has no handles (Sam Roberts) + +* core: fix usage of uv_cwd (Saúl Ibarra Corretgé) + +* core: replace uv_fs_readdir with uv_fs_scandir (Saúl Ibarra Corretgé) + +* crypto: createDiffieHellman throw for bad args (Trevor Norris) + +* crypto: lower RSS usage for TLSCallbacks (Fedor Indutny) + +* crypto: store thread id as pointer-sized (Alexis Campailla) + +* dns: propagate domain for c-ares methods (Chris Dickinson) + +* fs: fix symlink error message (Vladimir Kurchatkin) + +* http: Improve _addHeaderLines method (Jackson Tian) + +* http: cleanup setHeader() (Trevor Norris) + +* http: rename flush to flushHeaders (Timothy J Fontaine) + +* lib,src: fix spawnSync ignoring its 'env' option (Juanjo) + +* modules: adding load linked modules feature (Thorsten Lorenz) + +* net: Make server.connections un-enumerable (Patrick Mooney) + +* net: add pauseOnConnect option to createServer() (cjihrig) + +* net: make connect() input validation synchronous (cjihrig) + +* node: avoid automatic microtask runs (Vladimir Kurchatkin) + +* node: fix throws before timer module is loaded (Trevor Norris) + +* openssl: fix keypress requirement in apps on win32 (Fedor Indutny) + +* path: added parse() and format() functions (Rory Bradford) + +* path: allow calling platform specific methods (Timothy J Fontaine) + +* path: don't lower-cases drive letters (Bert Belder) + +* path: refactor normalizeArray() (Nathan Woltman) + +* process: pid can be a string in process.kill() (Sam Roberts) + +* readline: fix performance issue when large line (Jicheng Li) + +* readline: should not require an output stream. (Julien Gilli) + +* smalloc: check if obj has external data (Vladimir Kurchatkin) + +* smalloc: don't allow to dispose typed arrays (Vladimir Kurchatkin) + +* smalloc: fix bad assert for zero length data (Trevor Norris) + +* smalloc: fix copyOnto optimization (Vladimir Kurchatkin) + +* src: all wrap's now use actual FunctionTemplate (Trevor Norris) + +* src: fix VC++ warning C4244 (Rasmus Christian Pedersen) + +* src: remove Async Listener (Trevor Norris) + +* stream: switch _writableState.buffer to queue (Chris Dickinson) + +* streams: make setDefaultEncoding() throw (Brian White) + +* streams: set default encoding for writable streams (Johnny Ray) + +* tls: remove tls.createSecurePair code deprecation (Jackson Tian) + +* tls_wrap: ignore ZERO_RETURN after close_notify (Fedor Indutny) + +* url: change hostname regex to negate invalid chars (Jonathan Johnson) + +* url: fixed encoding for slash switching emulation. (Evan Rutledge Borden) + +* url: improve parsing speed (CGavrila) + +* url: make query() consistent (Gabriel Wicke) + +* url: support `path` for url.format (Yazhong Liu) + +* util: add es6 Symbol support for `util.inspect` (gyson) + + +2014.09.24, Version 0.11.14 (Unstable), 902090af5375e497dded310575f19de5328a9bbc + +* uv: Upgrade to v1.0.0-rc1 + +* http_parser: Upgrade to v2.3.0 + +* npm: Upgrade to v2.0.0 + +* openssl: Upgrade to v1.0.1i + +* v8: Upgrade to 3.26.33 + +* Add fast path for simple URL parsing (Gabriel Wicke) + +* Added support for options parameter in console.dir() (Xavi Magrinyà) + +* Cluster: fix shared handles on Windows (Alexis Campailla) + +* buffer: Fix incorrect Buffer.compare behavior (Feross Aboukhadijeh) + +* buffer: construct new buffer from buffer toJSON() output (cjihrig) + +* buffer: improve Buffer constructor (Kang-Hao Kenny) + +* build: linking CoreFoundation framework for OSX (Thorsten Lorenz) + +* child_process: accept uid/gid everywhere (Fedor Indutny) + +* child_process: add path to spawn ENOENT Error (Ryan Cole) + +* child_process: copy spawnSync() cwd option to proper buffer (cjihrig) + +* child_process: do not access stderr when stdio set to 'ignore' (cjihrig) + +* child_process: don't throw on EAGAIN (Charles) + +* child_process: don't throw on EMFILE/ENFILE (Ben Noordhuis) + +* child_process: use full path for cmd.exe on Win32 (Ed Morley) + +* cluster: allow multiple calls to setupMaster() (Ryan Graham) + +* cluster: centralize removal from workers list. (Julien Gilli) + +* cluster: enable error/message events using .worker (cjihrig) + +* cluster: include settings object in 'setup' event (Ryan Graham) + +* cluster: restore v0.10.x setupMaster() behaviour (Ryan Graham) + +* cluster: support options in Worker constructor (cjihrig) + +* cluster: test events emit on cluster.worker (Sam Roberts) + +* console: console.dir() accepts options object (Xavi Magrinyà) + +* crypto: add `honorCipherOrder` argument (Fedor Indutny) + +* crypto: allow padding in RSA methods (Fedor Indutny) + +* crypto: clarify RandomBytes() error msg (Mickael van der Beek) + +* crypto: never store pointer to conn in SSL_CTX (Fedor Indutny) + +* crypto: unsigned value can't be negative (Brian White) + +* dgram: remove new keyword from errnoException (Jackson Tian) + +* dns: always set variable family in lookup() (cjihrig) + +* dns: include host name in error message if available (Maciej Małecki) + +* dns: introduce lookupService function (Saúl Ibarra Corretgé) + +* dns: send lookup c-ares errors to callback (Chris Dickinson) + +* dns: throw if hostname is not string or falsey (cjihrig) + +* events: Output the event that is leaking (Arnout Kazemier) + +* fs: close file if fstat() fails in readFile() (cjihrig) + +* fs: fs.readFile should not throw uncaughtException (Jackson Tian) + +* http: add 308 status_code, see RFC7238 (Yazhong Liu) + +* http: don't default OPTIONS to chunked encoding (Nick Muerdter) + +* http: fix bailout for writeHead (Alex Kocharin) + +* http: remove unused code block (Fedor Indutny) + +* http: write() after end() emits an error. (Julien Gilli) + +* lib, src: add vm.runInDebugContext() (Ben Noordhuis) + +* lib: noisy deprecation of child_process customFds (Ryan Graham) + +* module: don't require fs several times (Robert Kowalski) + +* net,dgram: workers can listen on exclusive ports (cjihrig) + +* net,stream: add isPaused, don't read() when paused (Chris Dickinson) + +* net: Ensure consistent binding to IPV6 if address is absent (Raymond Feng) + +* net: add remoteFamily for socket (Jackson Tian) + +* net: don't emit listening if handle is closed (Eli Skeggs) + +* net: don't prefer IPv4 addresses during resolution (cjihrig) + +* net: don't throw on net.Server.close() (cjihrig) + +* net: reset `errorEmitted` on reconnect (Ed Umansky) + +* node: set names for prototype methods (Trevor Norris) + +* node: support v8 microtask queue (Vladimir Kurchatkin) + +* path: fix slice OOB in trim (Lucio M. Tato) + +* path: isAbsolute() should always return boolean (Herman Lee) + +* process: throw TypeError if kill pid not a number (Sam Roberts) + +* querystring: custom encode and decode (fengmk2) + +* querystring: do not add sep for empty array (cjihrig) + +* querystring: remove prepended ? from query field (Ezequiel Rabinovich) + +* readline: fix close event of readline.Interface() (Yazhong Liu) + +* readline: fixes scoping bug (Dan Kaplun) + +* readline: implements keypress buffering (Dan Kaplun) + +* repl: fix multi-line input (Fedor Indutny) + +* repl: fix overwrite for this._prompt (Yazhong Liu) + +* repl: proper `setPrompt()` and `multiline` support (Fedor Indutny) + +* stream: don't try to finish if buffer is not empty (Vladimir Kurchatkin) + +* stream: only end reading on null, not undefined (Jonathan Reem) + +* streams: set default hwm properly for Duplex (Andrew Oppenlander) + +* string_bytes: ucs2 support big endian (Andrew Low) + +* tls, crypto: add DHE support (Shigeki Ohtsu) + +* tls: `checkServerIdentity` option (Trevor Livingston) + +* tls: add DHE-RSA-AES128-SHA256 to the def ciphers (Shigeki Ohtsu) + +* tls: better error reporting at cert validation (Fedor Indutny) + +* tls: support multiple keys/certs (Fedor Indutny) + +* tls: throw an error, not string (Jackson Tian) + +* udp: make it possible to receive empty udp packets (Andrius Bentkus) + +* url: treat \ the same as / (isaacs) + + +2014.05.01, Version 0.11.13 (Unstable), 99c9930ad626e2796af23def7cac19b65c608d18 * v8: upgrade to 3.24.35.22 @@ -618,6 +926,161 @@ * console: `console.dir()` bypasses inspect() methods (Nathan Rajlich) +2014.12.22, Version 0.10.35 (Stable) + +* tls: re-add 1024-bit SSL certs removed by f9456a2 (Chris Dickinson) + +* timers: don't close interval timers when unrefd (Julien Gilli) + +* timers: don't mutate unref list while iterating it (Julien Gilli) + + +2014.12.17, Version 0.10.34 (Stable), 52795f8fcc2de77cf997e671ea58614e5e425dfe + +* uv: update to v0.10.30 + +* zlib: upgrade to v1.2.8 + +* child_process: check execFile args is an array (Sam Roberts) + +* child_process: check fork args is an array (Sam Roberts) + +* crypto: update root certificates (Ben Noordhuis) + +* domains: fix issues with abort on uncaught (Julien Gilli) + +* timers: Avoid linear scan in _unrefActive. (Julien Gilli) + +* timers: fix unref() memory leak (Trevor Norris) + +* v8: add api for aborting on uncaught exception (Julien Gilli) + +* debugger: fix when using "use strict" (Julien Gilli) + + +2014.10.20, Version 0.10.33 (Stable), 8d045a30e95602b443eb259a5021d33feb4df079 + +* openssl: Update to 1.0.1j (Addressing multiple CVEs) + +* uv: Update to v0.10.29 + +* child_process: properly support optional args (cjihrig) + +* crypto: Disable autonegotiation for SSLv2/3 by default (Fedor Indutny, + Timothy J Fontaine, Alexis Campailla) + + This is a behavior change, by default we will not allow the negotiation to + SSLv2 or SSLv3. If you want this behavior, run Node.js with either + `--enable-ssl2` or `--enable-ssl3` respectively. + + This does not change the behavior for users specifically requesting + `SSLv2_method` or `SSLv3_method`. While this behavior is not advised, it is + assumed you know what you're doing since you're specifically asking to use + these methods. + + +2014.09.16, Version 0.10.32 (Stable), 0fe0d121551593c23a565db8397f85f17bb0f00e + +* npm: Update to 1.4.28 + +* v8: fix a crash introduced by previous release (Fedor Indutny) + +* configure: add --openssl-no-asm flag (Fedor Indutny) + +* crypto: use domains for any callback-taking method (Chris Dickinson) + +* http: do not send `0\r\n\r\n` in TE HEAD responses (Fedor Indutny) + +* querystring: fix unescape override (Tristan Berger) + +* url: Add support for RFC 3490 separators (Mathias Bynens) + + +2014.08.19, Version 0.10.31 (Stable), 7fabdc23d843cb705d2d0739e7bbdaaf50aa3292 + +* v8: backport CVE-2013-6668 + +* openssl: Update to v1.0.1i + +* npm: Update to v1.4.23 + +* cluster: disconnect should not be synchronous (Sam Roberts) + +* fs: fix fs.readFileSync fd leak when get RangeError (Jackson Tian) + +* stream: fix Readable.wrap objectMode falsy values (James Halliday) + +* timers: fix timers with non-integer delay hanging. (Julien Gilli) + + +2014.07.31, Version 0.10.30 (Stable), bc0ff830aff1e016163d855e86ded5c98b0899e8 + +* uv: Upgrade to v0.10.28 + +* npm: Upgrade to v1.4.21 + +* v8: Interrupts must not mask stack overflow. + +* Revert "stream: start old-mode read in a next tick" (Fedor Indutny) + +* buffer: fix sign overflow in `readUIn32BE` (Fedor Indutny) + +* buffer: improve {read,write}{U}Int* methods (Nick Apperson) + +* child_process: handle writeUtf8String error (Fedor Indutny) + +* deps: backport 4ed5fde4f from v8 upstream (Fedor Indutny) + +* + +* lib: remove and restructure calls to isNaN() (cjihrig) + +* module: eliminate double `getenv()` (Maciej Małecki) + +* stream2: flush extant data on read of ended stream (Chris Dickinson) + +* streams: remove unused require('assert') (Rod Vagg) + +* timers: backport f8193ab (Julien Gilli) + +* util.h: interface compatibility (Oguz Bastemur) + +* zlib: do not crash on write after close (Fedor Indutny) + + +2014.06.05, Version 0.10.29 (Stable), ce82d6b8474bde7ac7df6d425fb88fb1bcba35bc + +* openssl: to 1.0.1h (CVE-2014-0224) + +* npm: upgrade to 1.4.14 + +* utf8: Prevent Node from sending invalid UTF-8 (Felix Geisendörfer) + - *NOTE* this introduces a breaking change, previously you could construct + invalid UTF-8 and invoke an error in a client that was expecting valid + UTF-8, now unmatched surrogate pairs are replaced with the unknown UTF-8 + character. To restore the old functionality simply have NODE_INVALID_UTF8 + environment variable set. + +* child_process: do not set args before throwing (Greg Sabia Tucker) + +* child_process: spawn() does not throw TypeError (Greg Sabia Tucker) + +* constants: export O_NONBLOCK (Fedor Indutny) + +* crypto: improve memory usage (Alexis Campailla) + +* fs: close file if fstat() fails in readFile() (cjihrig) + +* lib: name EventEmitter prototype methods (Ben Noordhuis) + +* tls: fix performance issue (Alexis Campailla) + + +2014.05.01, Version 0.10.28 (Stable), b148cbe09d4657766fdb61575ba985734c2ff0a8 + +* npm: upgrade to v1.4.9 + + 2014.05.01, Version 0.10.27 (Stable), cb7911f78ae96ef7a540df992cc1359ba9636e86 * npm: upgrade to v1.4.8 diff -Nru nodejs-0.11.13/common.gypi nodejs-0.11.15/common.gypi --- nodejs-0.11.13/common.gypi 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/common.gypi 2015-01-20 21:22:17.000000000 +0000 @@ -1,17 +1,20 @@ { 'variables': { - 'werror': '', # Turn off -Werror in V8 build. - 'visibility%': 'hidden', # V8's visibility setting - 'target_arch%': 'ia32', # set v8's target architecture - 'host_arch%': 'ia32', # set v8's host architecture - 'want_separate_host_toolset': 0, # V8 should not build target and host - 'library%': 'static_library', # allow override to 'shared_library' for DLL/.so builds - 'component%': 'static_library', # NB. these names match with what V8 expects - 'msvs_multi_core_compile': '0', # we do enable multicore compiles, but not using the V8 way + 'werror': '', # Turn off -Werror in V8 build. + 'visibility%': 'hidden', # V8's visibility setting + 'target_arch%': 'ia32', # set v8's target architecture + 'host_arch%': 'ia32', # set v8's host architecture + 'want_separate_host_toolset%': 0, # V8 should not build target and host + 'library%': 'static_library', # allow override to 'shared_library' for DLL/.so builds + 'component%': 'static_library', # NB. these names match with what V8 expects + 'msvs_multi_core_compile': '0', # we do enable multicore compiles, but not using the V8 way 'gcc_version%': 'unknown', 'clang%': 0, 'python%': 'python', + # Enable disassembler for `--print-code` v8 options + 'v8_enable_disassembler': 1, + # Enable V8's post-mortem debugging only on unix flavors. 'conditions': [ ['OS == "win"', { @@ -21,12 +24,12 @@ 'os_posix': 1, 'v8_postmortem_support': 'true' }], - ['GENERATOR == "ninja"', { + ['GENERATOR == "ninja" or OS== "mac"', { 'OBJ_DIR': '<(PRODUCT_DIR)/obj', - 'V8_BASE': '<(PRODUCT_DIR)/libv8_base.<(target_arch).a', + 'V8_BASE': '<(PRODUCT_DIR)/libv8_base.a', }, { 'OBJ_DIR': '<(PRODUCT_DIR)/obj.target', - 'V8_BASE': '<(PRODUCT_DIR)/obj.target/deps/v8/tools/gyp/libv8_base.<(target_arch).a', + 'V8_BASE': '<(PRODUCT_DIR)/obj.target/deps/v8/tools/gyp/libv8_base.a', }], ], }, @@ -35,6 +38,9 @@ 'default_configuration': 'Release', 'configurations': { 'Debug': { + 'variables': { + 'v8_enable_handle_zapping%': 1, + }, 'defines': [ 'DEBUG', '_DEBUG' ], 'cflags': [ '-g', '-O0' ], 'conditions': [ @@ -59,6 +65,9 @@ }, }, 'Release': { + 'variables': { + 'v8_enable_handle_zapping%': 0, + }, 'cflags': [ '-O3', '-ffunction-sections', '-fdata-sections' ], 'conditions': [ ['target_arch=="x64"', { @@ -232,6 +241,11 @@ }], ['OS=="freebsd" and node_use_dtrace=="true"', { 'libraries': [ '-lelf' ], + }], + ['OS=="freebsd"', { + 'ldflags': [ + '-Wl,--export-dynamic', + ], }] ], } diff -Nru nodejs-0.11.13/configure nodejs-0.11.15/configure --- nodejs-0.11.13/configure 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/configure 2015-01-20 21:22:17.000000000 +0000 @@ -6,6 +6,8 @@ import shlex import subprocess import sys +import shutil +import string CC = os.environ.get('CC', 'cc') @@ -13,6 +15,10 @@ sys.path.insert(0, os.path.join(root_dir, 'tools', 'gyp', 'pylib')) from gyp.common import GetFlavor +# imports in tools/configure.d +sys.path.insert(0, os.path.join(root_dir, 'tools', 'configure.d')) +import nodedownload + # parse our options parser = optparse.OptionParser() @@ -54,6 +60,17 @@ dest='no_ifaddrs', help='use on deprecated SunOS systems that do not support ifaddrs.h') +parser.add_option("--fully-static", + action="store_true", + dest="fully_static", + help="Generate an executable without external dynamic libraries. This " + "will not work on OSX when using default compilation environment") + +parser.add_option("--openssl-no-asm", + action="store_true", + dest="openssl_no_asm", + help="Do not build optimized assembly for OpenSSL") + # deprecated parser.add_option('--openssl-includes', action='store', @@ -225,21 +242,36 @@ dest='with_etw', help='build with ETW (default is true on Windows)') +parser.add_option('--download', + action='store', + dest='download_list', + help=nodedownload.help()) + parser.add_option('--with-icu-path', action='store', dest='with_icu_path', help='Path to icu.gyp (ICU i18n, Chromium version only.)') +parser.add_option('--with-icu-locales', + action='store', + dest='with_icu_locales', + help='Comma-separated list of locales for "small-icu". Default: "root,en". "root" is assumed.') + +parser.add_option('--with-intl', + action='store', + dest='with_intl', + help='Intl mode: none, full-icu, small-icu (default is none)') + +parser.add_option('--with-icu-source', + action='store', + dest='with_icu_source', + help='Intl mode: optional local path to icu/ dir, or path/URL of icu source archive.') + parser.add_option('--with-perfctr', action='store_true', dest='with_perfctr', help='build with performance counters (default is true on Windows)') -parser.add_option('--with-sslv2', - action='store_true', - dest='with_sslv2', - help='enable SSL v2') - parser.add_option('--without-dtrace', action='store_true', dest='without_dtrace', @@ -271,6 +303,16 @@ dest='without_ssl', help='build without SSL') +parser.add_option('--without-ssl2', + action='store_true', + dest='ssl2', + help='Disable SSL v2') + +parser.add_option('--without-ssl3', + action='store_true', + dest='ssl3', + help='Disable SSL v3') + parser.add_option('--xcode', action='store_true', dest='use_xcode', @@ -278,6 +320,8 @@ (options, args) = parser.parse_args() +# set up auto-download list +auto_downloads = nodedownload.parse(options.download_list) def b(value): """Returns the string 'true' if value is truthy, 'false' otherwise.""" @@ -342,6 +386,13 @@ '__ARM_ARCH_7M__' in cc_macros_cache) +def is_arch_armv6(): + """Check for ARMv6 instructions""" + cc_macros_cache = cc_macros() + return ('__ARM_ARCH_6__' in cc_macros_cache or + '__ARM_ARCH_6M__' in cc_macros_cache) + + def is_arm_neon(): """Check for ARM NEON support""" return '__ARM_NEON__' in cc_macros() @@ -402,7 +453,8 @@ def host_arch_win(): """Host architecture check using environ vars (better way to do this?)""" - arch = os.environ.get('PROCESSOR_ARCHITECTURE', 'x86') + observed_arch = os.environ.get('PROCESSOR_ARCHITECTURE', 'x86') + arch = os.environ.get('PROCESSOR_ARCHITEW6432', observed_arch) matchup = { 'AMD64' : 'x64', @@ -437,7 +489,14 @@ arm_float_abi = 'hard' else: arm_float_abi = 'default' - o['variables']['armv7'] = int(is_arch_armv7()) + + if is_arch_armv7(): + o['variables']['arm_version'] = '7' + elif is_arch_armv6(): + o['variables']['arm_version'] = '6' + else: + o['variables']['arm_version'] = 'default' + o['variables']['arm_fpu'] = 'vfpv3' # V8 3.18 no longer supports VFP2. o['variables']['arm_neon'] = int(is_arm_neon()) o['variables']['arm_thumb'] = 0 # -marm @@ -456,6 +515,11 @@ o['variables']['host_arch'] = host_arch o['variables']['target_arch'] = target_arch + if target_arch != host_arch and not options.without_snapshot: + o['variables']['want_separate_host_toolset'] = 1 + else: + o['variables']['want_separate_host_toolset'] = 0 + if target_arch == 'arm': configure_arm(o) @@ -603,15 +667,18 @@ def configure_openssl(o): o['variables']['node_use_openssl'] = b(not options.without_ssl) o['variables']['node_shared_openssl'] = b(options.shared_openssl) + o['variables']['openssl_no_asm'] = ( + 1 if options.openssl_no_asm else 0) if options.without_ssl: return - # OpenSSL uses `#ifndef OPENSSL_NO_SSL2` checks so only define the - # macro when we want to _disable_ SSL2. - if not options.with_sslv2: + if options.ssl2: o['defines'] += ['OPENSSL_NO_SSL2=1'] + if options.ssl3: + o['defines'] += ['OPENSSL_NO_SSL3=1'] + if options.shared_openssl: (libs, cflags) = pkg_config('openssl') or ('-lssl -lcrypto', '') @@ -630,6 +697,14 @@ o['cflags'] += cflags.split() +def configure_fullystatic(o): + if options.fully_static: + o['libraries'] += ['-static'] + if flavor == 'mac': + print("Generation of static executable will not work on OSX " + "when using default compilation environment") + + def configure_winsdk(o): if flavor != 'win': return @@ -645,13 +720,259 @@ print('ctrpp not found in WinSDK path--using pre-gen files ' 'from tools/msvs/genfiles.') +def write(filename, data): + filename = os.path.join(root_dir, filename) + print 'creating ', filename + f = open(filename, 'w+') + f.write(data) + +do_not_edit = '# Do not edit. Generated by the configure script.\n' -def configure_icu(o): +def glob_to_var(dir_base, dir_sub): + list = [] + dir_all = os.path.join(dir_base, dir_sub) + files = os.walk(dir_all) + for ent in files: + (path, dirs, files) = ent + for file in files: + if file.endswith('.cpp') or file.endswith('.c') or file.endswith('.h'): + list.append('%s/%s' % (dir_sub, file)) + break + return list + +def configure_intl(o): + icus = [ + { + 'url': 'http://download.icu-project.org/files/icu4c/54.1/icu4c-54_1-src.zip', + # from https://ssl.icu-project.org/files/icu4c/54.1/icu4c-src-54_1.md5: + 'md5': '6b89d60e2f0e140898ae4d7f72323bca', + }, + ] + def icu_download(path): + # download ICU, if needed + for icu in icus: + url = icu['url'] + md5 = icu['md5'] + local = url.split('/')[-1] + targetfile = os.path.join(root_dir, 'deps', local) + if not os.path.isfile(targetfile): + if nodedownload.candownload(auto_downloads, "icu"): + nodedownload.retrievefile(url, targetfile) + else: + print ' Re-using existing %s' % targetfile + if os.path.isfile(targetfile): + sys.stdout.write(' Checking file integrity with MD5:\r') + gotmd5 = nodedownload.md5sum(targetfile) + print ' MD5: %s %s' % (gotmd5, targetfile) + if (md5 == gotmd5): + return targetfile + else: + print ' Expected: %s *MISMATCH*' % md5 + print '\n ** Corrupted ZIP? Delete %s to retry download.\n' % targetfile + return None + icu_config = { + 'variables': {} + } + icu_config_name = 'icu_config.gypi' + def write_config(data, name): + return + + # write an empty file to start with + write(icu_config_name, do_not_edit + + pprint.pformat(icu_config, indent=2) + '\n') + + # always set icu_small, node.gyp depends on it being defined. + o['variables']['icu_small'] = b(False) + + with_intl = options.with_intl + with_icu_source = options.with_icu_source have_icu_path = bool(options.with_icu_path) - o['variables']['v8_enable_i18n_support'] = int(have_icu_path) - if have_icu_path: + if have_icu_path and with_intl: + print 'Error: Cannot specify both --with-icu-path and --with-intl' + sys.exit(1) + elif have_icu_path: + # Chromium .gyp mode: --with-icu-path + o['variables']['v8_enable_i18n_support'] = 1 + # use the .gyp given o['variables']['icu_gyp_path'] = options.with_icu_path - + return + # --with-intl= + # set the default + if with_intl is None: + with_intl = 'none' # The default mode of Intl + # sanity check localelist + if options.with_icu_locales and (with_intl != 'small-icu'): + print 'Error: --with-icu-locales only makes sense with --with-intl=small-icu' + sys.exit(1) + if with_intl == 'none' or with_intl is None: + o['variables']['v8_enable_i18n_support'] = 0 + return # no Intl + elif with_intl == 'small-icu': + # small ICU (English only) + o['variables']['v8_enable_i18n_support'] = 1 + o['variables']['icu_small'] = b(True) + with_icu_locales = options.with_icu_locales + if not with_icu_locales: + with_icu_locales = 'root,en' + locs = set(with_icu_locales.split(',')) + locs.add('root') # must have root + o['variables']['icu_locales'] = string.join(locs,',') + elif with_intl == 'full-icu': + # full ICU + o['variables']['v8_enable_i18n_support'] = 1 + elif with_intl == 'system-icu': + # ICU from pkg-config. + o['variables']['v8_enable_i18n_support'] = 1 + pkgicu = pkg_config('icu-i18n') + if not pkgicu: + print 'Error: could not load pkg-config data for "icu-i18n".' + print 'See above errors or the README.md.' + sys.exit(1) + (libs, cflags) = pkgicu + o['libraries'] += libs.split() + o['cflags'] += cflags.split() + # use the "system" .gyp + o['variables']['icu_gyp_path'] = 'tools/icu/icu-system.gyp' + return + else: + print 'Error: unknown value --with-intl=%s' % with_intl + sys.exit(1) + # Note: non-ICU implementations could use other 'with_intl' + # values. + + # this is just the 'deps' dir. Used for unpacking. + icu_parent_path = os.path.join(root_dir, 'deps') + + # The full path to the ICU source directory. + icu_full_path = os.path.join(icu_parent_path, 'icu') + + # icu-tmp is used to download and unpack the ICU tarball. + icu_tmp_path = os.path.join(icu_parent_path, 'icu-tmp') + + # --with-icu-source processing + # first, check that they didn't pass --with-icu-source=deps/icu + if with_icu_source and os.path.abspath(icu_full_path) == os.path.abspath(with_icu_source): + print 'Ignoring redundant --with-icu-source=%s' % (with_icu_source) + with_icu_source = None + # if with_icu_source is still set, try to use it. + if with_icu_source: + if os.path.isdir(icu_full_path): + print 'Deleting old ICU source: %s' % (icu_full_path) + shutil.rmtree(icu_full_path) + # now, what path was given? + if os.path.isdir(with_icu_source): + # it's a path. Copy it. + print '%s -> %s' % (with_icu_source, icu_full_path) + shutil.copytree(with_icu_source, icu_full_path) + else: + # could be file or URL. + # Set up temporary area + if os.path.isdir(icu_tmp_path): + shutil.rmtree(icu_tmp_path) + os.mkdir(icu_tmp_path) + icu_tarball = None + if os.path.isfile(with_icu_source): + # it's a file. Try to unpack it. + icu_tarball = with_icu_source + else: + # Can we download it? + local = os.path.join(icu_tmp_path, with_icu_source.split('/')[-1]) # local part + icu_tarball = nodedownload.retrievefile(with_icu_source, local) + # continue with "icu_tarball" + nodedownload.unpack(icu_tarball, icu_tmp_path) + # Did it unpack correctly? Should contain 'icu' + tmp_icu = os.path.join(icu_tmp_path, 'icu') + if os.path.isdir(tmp_icu): + os.rename(tmp_icu, icu_full_path) + shutil.rmtree(icu_tmp_path) + else: + print ' Error: --with-icu-source=%s did not result in an "icu" dir.' % with_icu_source + shutil.rmtree(icu_tmp_path) + sys.exit(1) + + # ICU mode. (icu-generic.gyp) + byteorder = sys.byteorder + o['variables']['icu_gyp_path'] = 'tools/icu/icu-generic.gyp' + # ICU source dir relative to root + o['variables']['icu_path'] = icu_full_path + if not os.path.isdir(icu_full_path): + print '* ECMA-402 (Intl) support didn\'t find ICU in %s..' % (icu_full_path) + # can we download (or find) a zipfile? + localzip = icu_download(icu_full_path) + if localzip: + nodedownload.unpack(localzip, icu_parent_path) + if not os.path.isdir(icu_full_path): + print ' Cannot build Intl without ICU in %s.' % (icu_full_path) + print ' (Fix, or disable with "--with-intl=none" )' + sys.exit(1) + else: + print '* Using ICU in %s' % (icu_full_path) + # Now, what version of ICU is it? We just need the "major", such as 54. + # uvernum.h contains it as a #define. + uvernum_h = os.path.join(icu_full_path, 'source/common/unicode/uvernum.h') + if not os.path.isfile(uvernum_h): + print ' Error: could not load %s - is ICU installed?' % uvernum_h + sys.exit(1) + icu_ver_major = None + matchVerExp = r'^\s*#define\s+U_ICU_VERSION_SHORT\s+"([^"]*)".*' + match_version = re.compile(matchVerExp) + for line in open(uvernum_h).readlines(): + m = match_version.match(line) + if m: + icu_ver_major = m.group(1) + if not icu_ver_major: + print ' Could not read U_ICU_VERSION_SHORT version from %s' % uvernum_h + sys.exit(1) + icu_endianness = sys.byteorder[0]; # TODO(srl295): EBCDIC should be 'e' + o['variables']['icu_ver_major'] = icu_ver_major + o['variables']['icu_endianness'] = icu_endianness + icu_data_file_l = 'icudt%s%s.dat' % (icu_ver_major, 'l') + icu_data_file = 'icudt%s%s.dat' % (icu_ver_major, icu_endianness) + # relative to configure + icu_data_path = os.path.join(icu_full_path, + 'source/data/in', + icu_data_file_l) + # relative to dep.. + icu_data_in = os.path.join('../../deps/icu/source/data/in', icu_data_file_l) + if not os.path.isfile(icu_data_path) and icu_endianness != 'l': + # use host endianness + icu_data_path = os.path.join(icu_full_path, + 'source/data/in', + icu_data_file) + # relative to dep.. + icu_data_in = os.path.join('icu/source/data/in', + icu_data_file) + # this is the input '.dat' file to use .. icudt*.dat + # may be little-endian if from a icu-project.org tarball + o['variables']['icu_data_in'] = icu_data_in + # this is the icudt*.dat file which node will be using (platform endianness) + o['variables']['icu_data_file'] = icu_data_file + if not os.path.isfile(icu_data_path): + print ' Error: ICU prebuilt data file %s does not exist.' % icu_data_path + print ' See the README.md.' + # .. and we're not about to build it from .gyp! + sys.exit(1) + # map from variable name to subdirs + icu_src = { + 'stubdata': 'stubdata', + 'common': 'common', + 'i18n': 'i18n', + 'io': 'io', + 'tools': 'tools/toolutil', + 'genccode': 'tools/genccode', + 'genrb': 'tools/genrb', + 'icupkg': 'tools/icupkg', + } + # this creates a variable icu_src_XXX for each of the subdirs + # with a list of the src files to use + for i in icu_src: + var = 'icu_src_%s' % i + path = '../../deps/icu/source/%s' % icu_src[i] + icu_config['variables'][var] = glob_to_var('tools/icu', path) + # write updated icu_config.gypi with a bunch of paths + write(icu_config_name, do_not_edit + + pprint.pformat(icu_config, indent=2) + '\n') + return # end of configure_intl # determine the "flavor" (operating system) we're building for, # leveraging gyp's GetFlavor function @@ -676,7 +997,8 @@ configure_v8(output) configure_openssl(output) configure_winsdk(output) -configure_icu(output) +configure_intl(output) +configure_fullystatic(output) # variables should be a root level element, # move everything else to target_defaults @@ -688,13 +1010,7 @@ } pprint.pprint(output, indent=2) -def write(filename, data): - filename = os.path.join(root_dir, filename) - print 'creating ', filename - f = open(filename, 'w+') - f.write(data) - -write('config.gypi', '# Do not edit. Generated by the configure script.\n' + +write('config.gypi', do_not_edit + pprint.pformat(output, indent=2) + '\n') config = { @@ -725,4 +1041,4 @@ gyp_args += args -subprocess.call(gyp_args) +sys.exit(subprocess.call(gyp_args)) diff -Nru nodejs-0.11.13/CONTRIBUTING.md nodejs-0.11.15/CONTRIBUTING.md --- nodejs-0.11.13/CONTRIBUTING.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/CONTRIBUTING.md 2015-01-20 21:22:17.000000000 +0000 @@ -9,7 +9,7 @@ Fork the project [on GitHub](https://github.com/joyent/node) and check out your copy. -``` +```sh $ git clone git@github.com:username/node.git $ cd node $ git remote add upstream git://github.com/joyent/node.git @@ -48,7 +48,7 @@ Okay, so you have decided on the proper branch. Create a feature branch and start hacking: -``` +```sh $ git checkout -b my-feature-branch -t origin/v0.10 ``` @@ -59,7 +59,7 @@ Make sure git knows your name and email address: -``` +```sh $ git config --global user.name "J. Random User" $ git config --global user.email "j.random.user@example.com" ``` @@ -99,7 +99,7 @@ Use `git rebase` (not `git merge`) to sync your work from time to time. -``` +```sh $ git fetch upstream $ git rebase upstream/v0.10 # or upstream/master ``` @@ -111,17 +111,30 @@ test/simple/ directory. Look at other tests to see how they should be structured (license boilerplate, common includes, etc.). -``` +```sh $ make jslint test ``` Make sure the linter is happy and that all tests pass. Please, do not submit patches that fail either check. +If you are updating tests and just want to run a single test to check it, you +can use this syntax to run it exactly as the test harness would: -### PUSH +``` +python tools/test.py -v --mode=release simple/test-stream2-transform +``` + +You can run tests directly with node: ``` +node ./test/simple/test-streams2-transform.js +``` + + +### PUSH + +```sh $ git push origin my-feature-branch ``` @@ -134,12 +147,6 @@ not send out notifications when you add commits. -### CONTRIBUTOR LICENSE AGREEMENT - -Please visit http://nodejs.org/cla.html and sign the Contributor License -Agreement. You only need to do that once. - - [stability index page]: https://github.com/joyent/node/blob/master/doc/api/documentation.markdown [issue tracker]: https://github.com/joyent/node/issues [node.js mailing list]: http://groups.google.com/group/nodejs diff -Nru nodejs-0.11.13/debian/changelog nodejs-0.11.15/debian/changelog --- nodejs-0.11.13/debian/changelog 2014-05-03 02:27:07.000000000 +0000 +++ nodejs-0.11.15/debian/changelog 2015-01-21 18:54:27.000000000 +0000 @@ -1,545 +1,557 @@ -nodejs (0.11.13-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.15-1chl1~precise1) precise; urgency=low + + * 0.11.15 (devel) release. + + -- Chris Lea Wed, 21 Jan 2015 10:52:28 -0800 + +nodejs (0.11.14-1chl1~precise1) precise; urgency=low + + * 0.11.14 (devel) release. + + -- Chris Lea Wed, 22 Oct 2014 11:01:49 -0700 + +nodejs (0.11.13-1chl1~precise1) precise; urgency=low * 0.11.13 (devel) release. -- Chris Lea Fri, 02 May 2014 19:25:57 -0700 -nodejs (0.11.11-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.11-1chl1~precise1) precise; urgency=low * 0.11.11 (devel) release. -- Chris Lea Tue, 28 Jan 2014 23:11:32 -0800 -nodejs (0.11.10-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.10-1chl1~precise1) precise; urgency=low * 0.11.10 (devel) release. -- Chris Lea Wed, 08 Jan 2014 15:27:29 -0800 -nodejs (0.11.9-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.9-1chl1~precise1) precise; urgency=low * 0.11.9 (devel) release. -- Chris Lea Thu, 21 Nov 2013 14:38:10 -0800 -nodejs (0.11.8-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.8-1chl1~precise1) precise; urgency=low * 0.11.8 (devel) release. -- Chris Lea Mon, 04 Nov 2013 08:37:37 -0800 -nodejs (0.11.7-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.7-1chl1~precise1) precise; urgency=low * 0.11.7 (devel) release. -- Chris Lea Thu, 05 Sep 2013 10:06:31 -0700 -nodejs (0.11.6-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.6-1chl1~precise1) precise; urgency=low * 0.11.6 (devel) release. -- Chris Lea Fri, 23 Aug 2013 13:17:49 -0700 -nodejs (0.11.5-2chl1~saucy1) saucy; urgency=low +nodejs (0.11.5-2chl1~precise1) precise; urgency=low * Bump version to force armhf builds. -- Chris Lea Sun, 18 Aug 2013 20:44:46 -0700 -nodejs (0.11.5-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.5-1chl1~precise1) precise; urgency=low * 0.11.5 (devel) release. -- Chris Lea Tue, 06 Aug 2013 17:34:26 -0700 -nodejs (0.11.4-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.4-1chl1~precise1) precise; urgency=low * 0.11.4 (devel) release. -- Chris Lea Tue, 16 Jul 2013 22:52:44 -0700 -nodejs (0.11.3-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.3-1chl1~precise1) precise; urgency=low * 0.11.3 (devel) release. -- Chris Lea Thu, 27 Jun 2013 10:12:55 -0700 -nodejs (0.11.2-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.2-1chl1~precise1) precise; urgency=low * 0.11.2 (devel) release. -- Chris Lea Tue, 14 May 2013 09:41:30 -0700 -nodejs (0.11.1-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.1-1chl1~precise1) precise; urgency=low * 0.11.1 (devel) release. -- Chris Lea Mon, 29 Apr 2013 17:58:16 -0700 -nodejs (0.11.0-1chl1~saucy1) saucy; urgency=low +nodejs (0.11.0-1chl1~precise1) precise; urgency=low * 0.11.0 (devel) release. -- Chris Lea Thu, 04 Apr 2013 08:38:10 -0700 -nodejs (0.10.3-1chl1~saucy1) saucy; urgency=low +nodejs (0.10.3-1chl1~precise1) precise; urgency=low * 0.10.3 (stable) release. -- Chris Lea Wed, 03 Apr 2013 11:58:20 -0700 -nodejs (0.10.2-1chl1~saucy1) saucy; urgency=low +nodejs (0.10.2-1chl1~precise1) precise; urgency=low * 0.10.2 (stable) release. -- Chris Lea Fri, 29 Mar 2013 09:10:39 -0700 -nodejs (0.10.1-2chl1~saucy1) saucy; urgency=low +nodejs (0.10.1-2chl1~precise1) precise; urgency=low * Headers aren't here. -- Chris Lea Tue, 26 Mar 2013 13:23:02 -0700 -nodejs (0.10.1-1chl1~saucy1) saucy; urgency=low +nodejs (0.10.1-1chl1~precise1) precise; urgency=low * 0.10.1 (stable release). -- Chris Lea Mon, 25 Mar 2013 17:52:17 -0700 -nodejs (0.10.0-2chl1~saucy1) saucy; urgency=low +nodejs (0.10.0-2chl1~precise1) precise; urgency=low * Fix how nodejs package replaces old dev and npm packages. -- Chris Lea Thu, 14 Mar 2013 17:17:07 -0700 -nodejs (0.10.0-1chl1~saucy1) saucy; urgency=low +nodejs (0.10.0-1chl1~precise1) precise; urgency=low * 0.10.0 (stable) release. * This is now the stable branch, npm is included in the main package. -- Chris Lea Thu, 14 Mar 2013 11:00:04 -0700 -nodejs (0.9.12-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.12-1chl1~precise1) precise; urgency=low * 0.9.12 (devel) release -- Chris Lea Wed, 06 Mar 2013 14:29:29 -0800 -nodejs (0.9.9-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.9-1chl1~precise1) precise; urgency=low * 0.9.9 (devel) release -- Chris Lea Thu, 07 Feb 2013 12:11:44 -0800 -nodejs (0.9.8-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.8-1chl1~precise1) precise; urgency=low * 0.9.8 (devel) release -- Chris Lea Thu, 24 Jan 2013 11:25:11 -0800 -nodejs (0.9.7-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.7-1chl1~precise1) precise; urgency=low * 0.9.7 (devel) release -- Chris Lea Fri, 18 Jan 2013 10:54:04 -0800 -nodejs (0.9.6-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.6-1chl1~precise1) precise; urgency=low * 0.9.6 (devel) release -- Chris Lea Fri, 11 Jan 2013 12:20:48 -0800 -nodejs (0.9.5-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.5-1chl1~precise1) precise; urgency=low * 0.9.5 (devel) release -- Chris Lea Sat, 29 Dec 2012 19:38:46 -0800 -nodejs (0.9.4-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.4-1chl1~precise1) precise; urgency=low * 0.9.4 (devel) release -- Chris Lea Thu, 27 Dec 2012 14:55:58 -0800 -nodejs (0.9.3-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.3-1chl1~precise1) precise; urgency=low * 0.9.3 (devel) release -- Chris Lea Sat, 27 Oct 2012 21:41:25 +0000 -nodejs (0.9.2-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.2-1chl1~precise1) precise; urgency=low * 0.9.2 (devel) release -- Chris Lea Wed, 19 Sep 2012 22:40:51 +0000 -nodejs (0.9.1-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.1-1chl1~precise1) precise; urgency=low * 0.9.1 (devel) release -- Chris Lea Thu, 30 Aug 2012 19:30:56 +0000 -nodejs (0.9.0-1chl1~saucy1) saucy; urgency=low +nodejs (0.9.0-1chl1~precise1) precise; urgency=low * 0.9.0 (devel) release -- Chris Lea Fri, 20 Jul 2012 19:23:28 +0000 -nodejs (0.8.3-1chl1~saucy1) saucy; urgency=low +nodejs (0.8.3-1chl1~precise1) precise; urgency=low * 0.8.3 (stable) release -- Chris Lea Thu, 19 Jul 2012 23:32:13 +0000 -nodejs (0.8.2-1chl1~saucy1) saucy; urgency=low +nodejs (0.8.2-1chl1~precise1) precise; urgency=low * 0.8.2 (stable) release -- Chris Lea Mon, 09 Jul 2012 18:37:19 +0000 -nodejs (0.8.1-2chl1~saucy1) saucy; urgency=low +nodejs (0.8.1-2chl1~precise1) precise; urgency=low * Don't use --debug in configure -- Chris Lea Fri, 06 Jul 2012 17:59:39 +0000 -nodejs (0.8.1-1chl1~saucy1) saucy; urgency=low +nodejs (0.8.1-1chl1~precise1) precise; urgency=low * 0.8.1 (stable) release -- Chris Lea Fri, 29 Jun 2012 19:16:47 +0000 -nodejs (0.8.0-1chl1~saucy1) saucy; urgency=low +nodejs (0.8.0-1chl1~precise1) precise; urgency=low * 0.8.0 (stable) release -- Chris Lea Wed, 27 Jun 2012 20:27:18 +0000 -nodejs (0.6.19-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.19-1chl1~precise1) precise; urgency=low * 0.6.19 (stable) release -- Chris Lea Mon, 11 Jun 2012 06:31:35 +0000 -nodejs (0.6.18-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.18-1chl1~precise1) precise; urgency=low * 0.6.18 (stable) release -- Chris Lea Mon, 21 May 2012 18:44:07 +0000 -nodejs (0.6.17-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.17-1chl1~precise1) precise; urgency=low * 0.6.17 (stable) release -- Chris Lea Fri, 04 May 2012 20:26:22 +0000 -nodejs (0.6.16-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.16-1chl1~precise1) precise; urgency=low * 0.6.16 (stable) release -- Chris Lea Mon, 30 Apr 2012 18:29:38 +0000 -nodejs (0.6.15-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.15-1chl1~precise1) precise; urgency=low * 0.6.15 (stable) release -- Chris Lea Mon, 09 Apr 2012 20:20:32 +0000 -nodejs (0.6.14-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.14-1chl1~precise1) precise; urgency=low * 0.6.14 (stable) release -- Chris Lea Sat, 24 Mar 2012 17:58:10 +0000 -nodejs (0.6.13-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.13-1chl1~precise1) precise; urgency=low * 0.6.13 (stable) release -- Chris Lea Fri, 16 Mar 2012 19:05:08 +0000 -nodejs (0.6.12-1chl~saucy1) saucy; urgency=low +nodejs (0.6.12-1chl~precise1) precise; urgency=low * 0.6.12 (stable) release -- Chris Lea Sun, 04 Mar 2012 02:16:58 +0000 -nodejs (0.6.11-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.11-1chl1~precise1) precise; urgency=low * 0.6.11 (stable) release -- Chris Lea Wed, 22 Feb 2012 05:07:37 +0000 -nodejs (0.6.10-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.10-1chl1~precise1) precise; urgency=low * 0.6.10 (stable) release -- Chris Lea Fri, 03 Feb 2012 20:44:50 +0000 -nodejs (0.6.9-2chl1~saucy1) saucy; urgency=low +nodejs (0.6.9-2chl1~precise1) precise; urgency=low * Include the stuff in lib/ -- Chris Lea Sat, 28 Jan 2012 22:20:07 +0000 -nodejs (0.6.9-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.9-1chl1~precise1) precise; urgency=low * 0.6.9 (stable) release -- Chris Lea Sat, 28 Jan 2012 20:39:26 +0000 -nodejs (0.6.8-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.8-1chl1~precise1) precise; urgency=low * 0.6.8 (stable) release -- Chris Lea Fri, 20 Jan 2012 23:46:35 +0000 -nodejs (0.6.2-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.2-1chl1~precise1) precise; urgency=low * 0.6.2 (stable) release -- Chris Lea Mon, 21 Nov 2011 00:36:21 +0000 -nodejs (0.6.1-5chl1~saucy1) saucy; urgency=low +nodejs (0.6.1-5chl1~precise1) precise; urgency=low * Add /usr/share/javascript to NODE_PATH -- Chris Lea Thu, 17 Nov 2011 08:58:13 +0000 -nodejs (0.6.1-4chl1~saucy1) saucy; urgency=low +nodejs (0.6.1-4chl1~precise1) precise; urgency=low * Add headers that were commented out back in. -- Chris Lea Mon, 14 Nov 2011 20:14:04 +0000 -nodejs (0.6.1-3chl1~saucy1) saucy; urgency=low +nodejs (0.6.1-3chl1~precise1) precise; urgency=low * Actually add the script correctly... -- Chris Lea Mon, 14 Nov 2011 02:51:30 +0000 -nodejs (0.6.1-2chl1~saucy1) saucy; urgency=low +nodejs (0.6.1-2chl1~precise1) precise; urgency=low * Add a script that sets NODE_PATH in /etc/profile.d -- Chris Lea Mon, 14 Nov 2011 02:35:18 +0000 -nodejs (0.6.1-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.1-1chl1~precise1) precise; urgency=low * 0.6.1 (stable) release -- Chris Lea Fri, 11 Nov 2011 23:54:20 +0000 -nodejs (0.6.0-1chl1~saucy1) saucy; urgency=low +nodejs (0.6.0-1chl1~precise1) precise; urgency=low * 0.6.0 (stable) release -- Chris Lea Sat, 05 Nov 2011 22:03:31 +0000 -nodejs (0.5.10-2chl1~saucy1) saucy; urgency=low +nodejs (0.5.10-2chl1~precise1) precise; urgency=low * Remove TODO from docs -- Chris Lea Mon, 24 Oct 2011 04:02:39 +0000 -nodejs (0.5.10-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.10-1chl1~precise1) precise; urgency=low * 0.5.10 (dev) release -- Chris Lea Mon, 24 Oct 2011 02:09:02 +0000 -nodejs (0.5.9-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.9-1chl1~precise1) precise; urgency=low * 0.5.9 (dev) release -- Chris Lea Tue, 11 Oct 2011 06:20:35 +0000 -nodejs (0.5.8-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.8-1chl1~precise1) precise; urgency=low * 0.5.8 (dev) release -- Chris Lea Sat, 01 Oct 2011 05:09:16 +0000 -nodejs (0.5.7-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.7-1chl1~precise1) precise; urgency=low * 0.5.7 (dev) release -- Chris Lea Sat, 17 Sep 2011 04:54:49 +0000 -nodejs (0.5.6-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.6-1chl1~precise1) precise; urgency=low * 0.5.6 (dev) release -- Chris Lea Sun, 11 Sep 2011 17:50:17 +0000 -nodejs (0.5.5-2chl1~saucy1) saucy; urgency=low +nodejs (0.5.5-2chl1~precise1) precise; urgency=low * Fix dbg package so this will build -- Chris Lea Mon, 29 Aug 2011 21:51:42 +0000 -nodejs (0.5.5-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.5-1chl1~precise1) precise; urgency=low * 0.5.5 (dev) release -- Chris Lea Mon, 29 Aug 2011 09:26:21 +0000 -nodejs (0.5.4-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.4-1chl1~precise1) precise; urgency=low * 0.5.4 (dev) release -- Chris Lea Sat, 13 Aug 2011 21:48:22 +0000 -nodejs (0.5.3-3chl1~saucy1) saucy; urgency=low +nodejs (0.5.3-3chl1~precise1) precise; urgency=low * Push original source so LP will build. -- Chris Lea Thu, 04 Aug 2011 00:06:12 +0000 -nodejs (0.5.3-2chl1~saucy1) saucy; urgency=low +nodejs (0.5.3-2chl1~precise1) precise; urgency=low * Fix wafadmin install path in patch. -- Chris Lea Wed, 03 Aug 2011 23:53:03 +0000 -nodejs (0.5.3-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.3-1chl1~precise1) precise; urgency=low * 0.5.3 (dev) release -- Chris Lea Wed, 03 Aug 2011 23:32:23 +0000 -nodejs (0.5.2-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.2-1chl1~precise1) precise; urgency=low * 0.5.2 (dev) release -- Chris Lea Mon, 25 Jul 2011 06:50:37 +0000 -nodejs (0.5.1-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.1-1chl1~precise1) precise; urgency=low * 0.5.1 (dev) release -- Chris Lea Sat, 16 Jul 2011 18:45:00 +0000 -nodejs (0.5.0-1chl1~saucy1) saucy; urgency=low +nodejs (0.5.0-1chl1~precise1) precise; urgency=low * 0.5.0 (dev) release -- Chris Lea Wed, 06 Jul 2011 19:16:53 +0000 -nodejs (0.4.9-1chl1~saucy1) saucy; urgency=low +nodejs (0.4.9-1chl1~precise1) precise; urgency=low * 0.4.9 release -- Chris Lea Wed, 29 Jun 2011 18:16:39 +0000 -nodejs (0.4.8-3chl1~saucy1) saucy; urgency=low +nodejs (0.4.8-3chl1~precise1) precise; urgency=low * Get node-waf correct. -- Chris Lea Mon, 06 Jun 2011 19:37:23 +0000 -nodejs (0.4.8-2chl1~saucy1) saucy; urgency=low +nodejs (0.4.8-2chl1~precise1) precise; urgency=low * Add alternatives link for node binary -- Chris Lea Mon, 06 Jun 2011 19:17:03 +0000 -nodejs (0.4.8-1chl1~saucy1) saucy; urgency=low +nodejs (0.4.8-1chl1~precise1) precise; urgency=low * 0.4.8 release -- Chris Lea Mon, 23 May 2011 05:35:03 +0000 -nodejs (0.4.7-1chl1~saucy1) saucy; urgency=low +nodejs (0.4.7-1chl1~precise1) precise; urgency=low * 0.4.7 release -- Chris Lea Sat, 23 Apr 2011 18:52:04 +0000 -nodejs (0.4.6-1chl1~saucy1) saucy; urgency=low +nodejs (0.4.6-1chl1~precise1) precise; urgency=low * 0.4.6 release -- Chris Lea Fri, 15 Apr 2011 01:17:55 +0000 -nodejs (0.4.5-1chl1~saucy1) saucy; urgency=low +nodejs (0.4.5-1chl1~precise1) precise; urgency=low * 0.4.5 release -- Chris Lea Tue, 05 Apr 2011 04:04:23 +0000 -nodejs (0.4.4-1chl1~saucy1) saucy; urgency=low +nodejs (0.4.4-1chl1~precise1) precise; urgency=low * 0.4.4 release -- Chris Lea Mon, 28 Mar 2011 20:42:01 +0000 -nodejs (0.4.3-1chl1~saucy1) saucy; urgency=low +nodejs (0.4.3-1chl1~precise1) precise; urgency=low * 0.4.3 release -- Chris Lea Sat, 19 Mar 2011 18:00:57 +0000 -nodejs (0.4.2-1chl1~saucy1) saucy; urgency=low +nodejs (0.4.2-1chl1~precise1) precise; urgency=low * 0.4.2 release -- Chris Lea Mon, 07 Mar 2011 20:09:20 +0000 -nodejs (0.4.1-2chl1~saucy1) saucy; urgency=low +nodejs (0.4.1-2chl1~precise1) precise; urgency=low * Fix waf-admin path issue -- Chris Lea Sun, 20 Feb 2011 11:10:54 +0000 -nodejs (0.4.1-1chl1~saucy1) saucy; urgency=low +nodejs (0.4.1-1chl1~precise1) precise; urgency=low * 0.4.1 release -- Chris Lea Sat, 19 Feb 2011 23:45:32 -0800 -nodejs (0.2.6-1chl1~saucy1) saucy; urgency=low +nodejs (0.2.6-1chl1~precise1) precise; urgency=low * 0.2.6 release -- Chris Lea Sun, 09 Jan 2011 18:37:57 +0000 -nodejs (0.2.5-1chl1~saucy1) saucy; urgency=low +nodejs (0.2.5-1chl1~precise1) precise; urgency=low * 0.2.5 release -- Chris Lea Wed, 17 Dec 2010 15:04:00 -0400 -nodejs (0.2.4-1chl1~saucy1) saucy; urgency=low +nodejs (0.2.4-1chl1~precise1) precise; urgency=low * 0.2.4 release -- Chris Lea Wed, 27 Oct 2010 15:36:00 -0700 -nodejs (0.2.3-2chl1~saucy1) saucy; urgency=low +nodejs (0.2.3-2chl1~precise1) precise; urgency=low - * Fixed to compile on saucy + * Fixed to compile on precise -- Chris Lea Fri, 08 Oct 2010 23:13:24 -0700 -nodejs (0.2.3-1chl1~saucy1) saucy; urgency=low +nodejs (0.2.3-1chl1~precise1) precise; urgency=low * 0.2.3 release -- Chris Lea Mon, 04 Oct 2010 12:01:24 +0100 -nodejs (0.2.2-3chl1~saucy1) saucy; urgency=low +nodejs (0.2.2-3chl1~precise1) precise; urgency=low * Remove unneeded quilt include from debian/rules -- Chris Lea Thu, 23 Sep 2010 12:45:24 -0700 -nodejs (0.2.2-2chl1~saucy1) saucy; urgency=low +nodejs (0.2.2-2chl1~precise1) precise; urgency=low * Add build dependency on quilt -- Chris Lea Thu, 23 Sep 2010 12:45:24 -0700 -nodejs (0.2.2-1chl1~saucy1) saucy; urgency=low +nodejs (0.2.2-1chl1~precise1) precise; urgency=low * New upstream release diff -Nru nodejs-0.11.13/deps/debugger-agent/debugger-agent.gyp nodejs-0.11.15/deps/debugger-agent/debugger-agent.gyp --- nodejs-0.11.13/deps/debugger-agent/debugger-agent.gyp 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/debugger-agent/debugger-agent.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,24 @@ +{ + "targets": [{ + "target_name": "debugger-agent", + "type": "<(library)", + "include_dirs": [ + "src", + "include", + "../v8/include", + "../uv/include", + + # Private node.js folder and stuff needed to include from it + "../../src", + "../cares/include", + ], + "direct_dependent_settings": { + "include_dirs": [ + "include", + ], + }, + "sources": [ + "src/agent.cc", + ], + }], +} diff -Nru nodejs-0.11.13/deps/debugger-agent/include/debugger-agent.h nodejs-0.11.15/deps/debugger-agent/include/debugger-agent.h --- nodejs-0.11.13/deps/debugger-agent/include/debugger-agent.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/debugger-agent/include/debugger-agent.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,109 @@ +// Copyright Fedor Indutny and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +#ifndef DEPS_DEBUGGER_AGENT_INCLUDE_DEBUGGER_AGENT_H_ +#define DEPS_DEBUGGER_AGENT_INCLUDE_DEBUGGER_AGENT_H_ + +#include "uv.h" +#include "v8.h" +#include "v8-debug.h" + +namespace node { + +// Forward declaration +class Environment; + +namespace debugger { + +// Forward declaration +class AgentMessage; + +class Agent { + public: + explicit Agent(node::Environment* env); + ~Agent(); + + typedef void (*DispatchHandler)(node::Environment* env); + + // Start the debugger agent thread + bool Start(int port, bool wait); + // Listen for debug events + void Enable(); + // Stop the debugger agent + void Stop(); + + inline void set_dispatch_handler(DispatchHandler handler) { + dispatch_handler_ = handler; + } + + inline node::Environment* parent_env() const { return parent_env_; } + inline node::Environment* child_env() const { return child_env_; } + + protected: + void InitAdaptor(Environment* env); + + // Worker body + void WorkerRun(); + + static void ThreadCb(Agent* agent); + static void ParentSignalCb(uv_async_t* signal); + static void ChildSignalCb(uv_async_t* signal); + static void MessageHandler(const v8::Debug::Message& message); + + // V8 API + static Agent* Unwrap(const v8::FunctionCallbackInfo& args); + static void NotifyListen(const v8::FunctionCallbackInfo& args); + static void NotifyWait(const v8::FunctionCallbackInfo& args); + static void SendCommand(const v8::FunctionCallbackInfo& args); + + void EnqueueMessage(AgentMessage* message); + + enum State { + kNone, + kRunning + }; + + // TODO(indutny): Verify that there are no races + State state_; + + int port_; + bool wait_; + + uv_sem_t start_sem_; + uv_mutex_t message_mutex_; + uv_async_t child_signal_; + + uv_thread_t thread_; + node::Environment* parent_env_; + node::Environment* child_env_; + uv_loop_t child_loop_; + v8::Persistent api_; + + // QUEUE + void* messages_[2]; + + DispatchHandler dispatch_handler_; +}; + +} // namespace debugger +} // namespace node + +#endif // DEPS_DEBUGGER_AGENT_INCLUDE_DEBUGGER_AGENT_H_ diff -Nru nodejs-0.11.13/deps/debugger-agent/lib/_debugger_agent.js nodejs-0.11.15/deps/debugger-agent/lib/_debugger_agent.js --- nodejs-0.11.13/deps/debugger-agent/lib/_debugger_agent.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/debugger-agent/lib/_debugger_agent.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,191 @@ +var assert = require('assert'); +var net = require('net'); +var util = require('util'); +var Buffer = require('buffer').Buffer; + +var Transform = require('stream').Transform; + +exports.start = function start() { + var agent = new Agent(); + + // Do not let `agent.listen()` request listening from cluster master + var cluster = require('cluster'); + cluster.isWorker = false; + cluster.isMaster = true; + + agent.on('error', function(err) { + process._rawDebug(err.stack || err); + }); + + agent.listen(process._debugAPI.port, function() { + var addr = this.address(); + process._rawDebug('Debugger listening on port %d', addr.port); + process._debugAPI.notifyListen(); + }); + + // Just to spin-off events + // TODO(indutny): Figure out why node.cc isn't doing this + setImmediate(function() { + }); + + process._debugAPI.onclose = function() { + // We don't care about it, but it prevents loop from cleaning up gently + // NOTE: removeAllListeners won't work, as it doesn't call `removeListener` + process.listeners('SIGWINCH').forEach(function(fn) { + process.removeListener('SIGWINCH', fn); + }); + + agent.close(); + }; + + // Not used now, but anyway + return agent; +}; + +function Agent() { + net.Server.call(this, this.onConnection); + + this.first = true; + this.binding = process._debugAPI; + + var self = this; + this.binding.onmessage = function(msg) { + self.clients.forEach(function(client) { + client.send({}, msg); + }); + }; + + this.clients = []; + assert(this.binding, 'Debugger agent running without bindings!'); +} +util.inherits(Agent, net.Server); + +Agent.prototype.onConnection = function onConnection(socket) { + var c = new Client(this, socket); + + c.start(); + this.clients.push(c); + + var self = this; + c.once('close', function() { + var index = self.clients.indexOf(c); + assert(index !== -1); + self.clients.splice(index, 1); + }); +}; + +Agent.prototype.notifyWait = function notifyWait() { + if (this.first) + this.binding.notifyWait(); + this.first = false; +}; + +function Client(agent, socket) { + Transform.call(this); + this._readableState.objectMode = true; + + this.agent = agent; + this.binding = this.agent.binding; + this.socket = socket; + + // Parse incoming data + this.state = 'headers'; + this.headers = {}; + this.buffer = ''; + socket.pipe(this); + + this.on('data', this.onCommand); + + var self = this; + this.socket.on('close', function() { + self.destroy(); + }); +} +util.inherits(Client, Transform); + +Client.prototype.destroy = function destroy(msg) { + this.socket.destroy(); + + this.emit('close'); +}; + +Client.prototype._transform = function _transform(data, enc, cb) { + cb(); + + this.buffer += data; + + while (true) { + if (this.state === 'headers') { + // Not enough data + if (!/\r\n/.test(this.buffer)) + break; + + if (/^\r\n/.test(this.buffer)) { + this.buffer = this.buffer.slice(2); + this.state = 'body'; + continue; + } + + // Match: + // Header-name: header-value\r\n + var match = this.buffer.match(/^([^:\s\r\n]+)\s*:\s*([^\s\r\n]+)\r\n/); + if (!match) + return this.destroy('Expected header, but failed to parse it'); + + this.headers[match[1].toLowerCase()] = match[2]; + + this.buffer = this.buffer.slice(match[0].length); + } else { + var len = this.headers['content-length']; + if (len === undefined) + return this.destroy('Expected content-length'); + + len = len | 0; + if (Buffer.byteLength(this.buffer) < len) + break; + + this.push(new Command(this.headers, this.buffer.slice(0, len))); + this.state = 'headers'; + this.buffer = this.buffer.slice(len); + this.headers = {}; + } + } +}; + +Client.prototype.send = function send(headers, data) { + if (!data) + data = ''; + + var out = []; + Object.keys(headers).forEach(function(key) { + out.push(key + ': ' + headers[key]); + }); + out.push('Content-Length: ' + Buffer.byteLength(data), ''); + + this.socket.cork(); + this.socket.write(out.join('\r\n') + '\r\n'); + + if (data.length > 0) + this.socket.write(data); + this.socket.uncork(); +}; + +Client.prototype.start = function start() { + this.send({ + Type: 'connect', + 'V8-Version': process.versions.v8, + 'Protocol-Version': 1, + 'Embedding-Host': 'node ' + process.version + }); +}; + +Client.prototype.onCommand = function onCommand(cmd) { + this.binding.sendCommand(cmd.body); + + this.agent.notifyWait(); +}; + +function Command(headers, body) { + this.headers = headers; + this.body = body; +} diff -Nru nodejs-0.11.13/deps/debugger-agent/src/agent.cc nodejs-0.11.15/deps/debugger-agent/src/agent.cc --- nodejs-0.11.13/deps/debugger-agent/src/agent.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/debugger-agent/src/agent.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,347 @@ +// Copyright Fedor Indutny and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +#include "agent.h" +#include "debugger-agent.h" + +#include "node.h" +#include "node_internals.h" // ARRAY_SIZE +#include "env.h" +#include "env-inl.h" +#include "v8.h" +#include "v8-debug.h" +#include "util.h" +#include "util-inl.h" +#include "queue.h" + +#include + +namespace node { +namespace debugger { + +using v8::Context; +using v8::Function; +using v8::FunctionCallbackInfo; +using v8::FunctionTemplate; +using v8::Handle; +using v8::HandleScope; +using v8::Integer; +using v8::Isolate; +using v8::Local; +using v8::Locker; +using v8::Object; +using v8::String; +using v8::Value; + + +Agent::Agent(Environment* env) : state_(kNone), + port_(5858), + wait_(false), + parent_env_(env), + child_env_(NULL), + dispatch_handler_(NULL) { + int err; + + err = uv_sem_init(&start_sem_, 0); + CHECK_EQ(err, 0); + + err = uv_mutex_init(&message_mutex_); + CHECK_EQ(err, 0); + + QUEUE_INIT(&messages_); +} + + +Agent::~Agent() { + Stop(); + + uv_sem_destroy(&start_sem_); + uv_mutex_destroy(&message_mutex_); + + // Clean-up messages + while (!QUEUE_EMPTY(&messages_)) { + QUEUE* q = QUEUE_HEAD(&messages_); + QUEUE_REMOVE(q); + AgentMessage* msg = ContainerOf(&AgentMessage::member, q); + delete msg; + } +} + + +bool Agent::Start(int port, bool wait) { + int err; + + if (state_ == kRunning) + return false; + + err = uv_loop_init(&child_loop_); + if (err != 0) + goto loop_init_failed; + + // Interruption signal handler + err = uv_async_init(&child_loop_, &child_signal_, ChildSignalCb); + if (err != 0) + goto async_init_failed; + uv_unref(reinterpret_cast(&child_signal_)); + + port_ = port; + wait_ = wait; + + err = uv_thread_create(&thread_, + reinterpret_cast(ThreadCb), + this); + if (err != 0) + goto thread_create_failed; + + uv_sem_wait(&start_sem_); + + state_ = kRunning; + + return true; + + thread_create_failed: + uv_close(reinterpret_cast(&child_signal_), NULL); + + async_init_failed: + err = uv_loop_close(&child_loop_); + CHECK_EQ(err, 0); + + loop_init_failed: + return false; +} + + +void Agent::Enable() { + v8::Debug::SetMessageHandler(MessageHandler); + + // Assign environment to the debugger's context + // NOTE: The debugger context is created after `SetMessageHandler()` call + parent_env()->AssignToContext(v8::Debug::GetDebugContext()); +} + + +void Agent::Stop() { + int err; + + if (state_ != kRunning) { + return; + } + + v8::Debug::SetMessageHandler(NULL); + + // Send empty message to terminate things + EnqueueMessage(new AgentMessage(NULL, 0)); + + // Signal worker thread to make it stop + err = uv_async_send(&child_signal_); + CHECK_EQ(err, 0); + + err = uv_thread_join(&thread_); + CHECK_EQ(err, 0); + + uv_close(reinterpret_cast(&child_signal_), NULL); + uv_run(&child_loop_, UV_RUN_NOWAIT); + + err = uv_loop_close(&child_loop_); + CHECK_EQ(err, 0); + + state_ = kNone; +} + + +void Agent::WorkerRun() { + static const char* argv[] = { "node", "--debug-agent" }; + Isolate* isolate = Isolate::New(); + { + Locker locker(isolate); + Isolate::Scope isolate_scope(isolate); + + HandleScope handle_scope(isolate); + Local context = Context::New(isolate); + + Context::Scope context_scope(context); + Environment* env = CreateEnvironment( + isolate, + &child_loop_, + context, + ARRAY_SIZE(argv), + argv, + ARRAY_SIZE(argv), + argv); + + child_env_ = env; + + // Expose API + InitAdaptor(env); + LoadEnvironment(env); + + CHECK_EQ(&child_loop_, env->event_loop()); + uv_run(&child_loop_, UV_RUN_DEFAULT); + + // Clean-up peristent + api_.Reset(); + + // Clean-up all running handles + env->CleanupHandles(); + + env->Dispose(); + env = NULL; + } + isolate->Dispose(); +} + + +void Agent::InitAdaptor(Environment* env) { + Isolate* isolate = env->isolate(); + HandleScope scope(isolate); + + // Create API adaptor + Local t = FunctionTemplate::New(isolate); + t->InstanceTemplate()->SetInternalFieldCount(1); + t->SetClassName(String::NewFromUtf8(isolate, "DebugAPI")); + + NODE_SET_PROTOTYPE_METHOD(t, "notifyListen", NotifyListen); + NODE_SET_PROTOTYPE_METHOD(t, "notifyWait", NotifyWait); + NODE_SET_PROTOTYPE_METHOD(t, "sendCommand", SendCommand); + + Local api = t->GetFunction()->NewInstance(); + api->SetAlignedPointerInInternalField(0, this); + + api->Set(String::NewFromUtf8(isolate, "port"), Integer::New(isolate, port_)); + + env->process_object()->Set(String::NewFromUtf8(isolate, "_debugAPI"), api); + api_.Reset(env->isolate(), api); +} + + +Agent* Agent::Unwrap(const v8::FunctionCallbackInfo& args) { + void* ptr = args.Holder()->GetAlignedPointerFromInternalField(0); + return reinterpret_cast(ptr); +} + + +void Agent::NotifyListen(const FunctionCallbackInfo& args) { + Agent* a = Unwrap(args); + + // Notify other thread that we are ready to process events + uv_sem_post(&a->start_sem_); +} + + +void Agent::NotifyWait(const FunctionCallbackInfo& args) { + Agent* a = Unwrap(args); + + a->wait_ = false; + + int err = uv_async_send(&a->child_signal_); + CHECK_EQ(err, 0); +} + + +void Agent::SendCommand(const FunctionCallbackInfo& args) { + Agent* a = Unwrap(args); + Environment* env = a->child_env(); + HandleScope scope(env->isolate()); + + String::Value v(args[0]); + + v8::Debug::SendCommand(a->parent_env()->isolate(), *v, v.length()); + if (a->dispatch_handler_ != NULL) + a->dispatch_handler_(a->parent_env()); +} + + +void Agent::ThreadCb(Agent* agent) { + agent->WorkerRun(); +} + + +void Agent::ChildSignalCb(uv_async_t* signal) { + Agent* a = ContainerOf(&Agent::child_signal_, signal); + Isolate* isolate = a->child_env()->isolate(); + + HandleScope scope(isolate); + Local api = PersistentToLocal(isolate, a->api_); + + uv_mutex_lock(&a->message_mutex_); + while (!QUEUE_EMPTY(&a->messages_)) { + QUEUE* q = QUEUE_HEAD(&a->messages_); + AgentMessage* msg = ContainerOf(&AgentMessage::member, q); + + // Time to close everything + if (msg->data() == NULL) { + QUEUE_REMOVE(q); + delete msg; + + MakeCallback(isolate, api, "onclose", 0, NULL); + break; + } + + // Waiting for client, do not send anything just yet + // TODO(indutny): move this to js-land + if (a->wait_) + break; + + QUEUE_REMOVE(q); + Local argv[] = { + String::NewFromTwoByte(isolate, + msg->data(), + String::kNormalString, + msg->length()) + }; + + // Emit message + MakeCallback(isolate, + api, + "onmessage", + ARRAY_SIZE(argv), + argv); + delete msg; + } + uv_mutex_unlock(&a->message_mutex_); +} + + +void Agent::EnqueueMessage(AgentMessage* message) { + uv_mutex_lock(&message_mutex_); + QUEUE_INSERT_TAIL(&messages_, &message->member); + uv_mutex_unlock(&message_mutex_); + uv_async_send(&child_signal_); +} + + +void Agent::MessageHandler(const v8::Debug::Message& message) { + Isolate* isolate = message.GetIsolate(); + Environment* env = Environment::GetCurrent(isolate); + Agent* a = env->debugger_agent(); + CHECK_NE(a, NULL); + CHECK_EQ(isolate, a->parent_env()->isolate()); + + HandleScope scope(isolate); + Local json = message.GetJSON(); + String::Value v(json); + + AgentMessage* msg = new AgentMessage(*v, v.length()); + a->EnqueueMessage(msg); +} + +} // namespace debugger +} // namespace node diff -Nru nodejs-0.11.13/deps/debugger-agent/src/agent.h nodejs-0.11.15/deps/debugger-agent/src/agent.h --- nodejs-0.11.13/deps/debugger-agent/src/agent.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/debugger-agent/src/agent.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,64 @@ +// Copyright Fedor Indutny and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +#ifndef DEPS_DEBUGGER_AGENT_SRC_AGENT_H_ +#define DEPS_DEBUGGER_AGENT_SRC_AGENT_H_ + +#include "v8.h" +#include "v8-debug.h" +#include "queue.h" + +#include +#include + +namespace node { +namespace debugger { + +class AgentMessage { + public: + AgentMessage(uint16_t* val, int length) : length_(length) { + if (val == NULL) { + data_ = val; + } else { + data_ = new uint16_t[length]; + memcpy(data_, val, length * sizeof(*data_)); + } + } + + ~AgentMessage() { + delete[] data_; + data_ = NULL; + } + + inline const uint16_t* data() const { return data_; } + inline int length() const { return length_; } + + QUEUE member; + + private: + uint16_t* data_; + int length_; +}; + +} // namespace debugger +} // namespace node + +#endif // DEPS_DEBUGGER_AGENT_SRC_AGENT_H_ diff -Nru nodejs-0.11.13/deps/http_parser/AUTHORS nodejs-0.11.15/deps/http_parser/AUTHORS --- nodejs-0.11.13/deps/http_parser/AUTHORS 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/http_parser/AUTHORS 2015-01-20 21:22:17.000000000 +0000 @@ -47,3 +47,4 @@ Fedor Indutny runner Alexis Campailla +David Wragg diff -Nru nodejs-0.11.13/deps/http_parser/http_parser.c nodejs-0.11.15/deps/http_parser/http_parser.c --- nodejs-0.11.13/deps/http_parser/http_parser.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/http_parser/http_parser.c 2015-01-20 21:22:17.000000000 +0000 @@ -280,6 +280,9 @@ , s_header_field_start , s_header_field + , s_header_value_discard_ws + , s_header_value_discard_ws_almost_done + , s_header_value_discard_lws , s_header_value_start , s_header_value , s_header_value_lws @@ -1380,7 +1383,7 @@ } if (ch == ':') { - parser->state = s_header_value_start; + parser->state = s_header_value_discard_ws; CALLBACK_DATA(header_field); break; } @@ -1401,28 +1404,28 @@ goto error; } - case s_header_value_start: - { + case s_header_value_discard_ws: if (ch == ' ' || ch == '\t') break; - MARK(header_value); - - parser->state = s_header_value; - parser->index = 0; - if (ch == CR) { - parser->header_state = h_general; - parser->state = s_header_almost_done; - CALLBACK_DATA(header_value); + parser->state = s_header_value_discard_ws_almost_done; break; } if (ch == LF) { - parser->state = s_header_field_start; - CALLBACK_DATA(header_value); + parser->state = s_header_value_discard_lws; break; } + /* FALLTHROUGH */ + + case s_header_value_start: + { + MARK(header_value); + + parser->state = s_header_value; + parser->index = 0; + c = LOWER(ch); switch (parser->header_state) { @@ -1570,7 +1573,17 @@ STRICT_CHECK(ch != LF); parser->state = s_header_value_lws; + break; + } + + case s_header_value_lws: + { + if (ch == ' ' || ch == '\t') { + parser->state = s_header_value_start; + goto reexecute_byte; + } + /* finished the header */ switch (parser->header_state) { case h_connection_keep_alive: parser->flags |= F_CONNECTION_KEEP_ALIVE; @@ -1585,19 +1598,29 @@ break; } + parser->state = s_header_field_start; + goto reexecute_byte; + } + + case s_header_value_discard_ws_almost_done: + { + STRICT_CHECK(ch != LF); + parser->state = s_header_value_discard_lws; break; } - case s_header_value_lws: + case s_header_value_discard_lws: { - if (ch == ' ' || ch == '\t') - parser->state = s_header_value_start; - else - { + if (ch == ' ' || ch == '\t') { + parser->state = s_header_value_discard_ws; + break; + } else { + /* header value was empty */ + MARK(header_value); parser->state = s_header_field_start; + CALLBACK_DATA_NOADVANCE(header_value); goto reexecute_byte; } - break; } case s_headers_almost_done: diff -Nru nodejs-0.11.13/deps/http_parser/http_parser.h nodejs-0.11.15/deps/http_parser/http_parser.h --- nodejs-0.11.13/deps/http_parser/http_parser.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/http_parser/http_parser.h 2015-01-20 21:22:17.000000000 +0000 @@ -26,8 +26,8 @@ /* Also update SONAME in the Makefile whenever you change these. */ #define HTTP_PARSER_VERSION_MAJOR 2 -#define HTTP_PARSER_VERSION_MINOR 2 -#define HTTP_PARSER_VERSION_PATCH 1 +#define HTTP_PARSER_VERSION_MINOR 3 +#define HTTP_PARSER_VERSION_PATCH 0 #include #if defined(_WIN32) && !defined(__MINGW32__) && (!defined(_MSC_VER) || _MSC_VER<1600) diff -Nru nodejs-0.11.13/deps/http_parser/Makefile nodejs-0.11.15/deps/http_parser/Makefile --- nodejs-0.11.13/deps/http_parser/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/http_parser/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -19,7 +19,7 @@ # IN THE SOFTWARE. PLATFORM ?= $(shell sh -c 'uname -s | tr "[A-Z]" "[a-z]"') -SONAME ?= libhttp_parser.so.2.2.1 +SONAME ?= libhttp_parser.so.2.3 CC?=gcc AR?=ar diff -Nru nodejs-0.11.13/deps/http_parser/test.c nodejs-0.11.15/deps/http_parser/test.c --- nodejs-0.11.13/deps/http_parser/test.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/http_parser/test.c 2015-01-20 21:22:17.000000000 +0000 @@ -608,8 +608,14 @@ " mno \r\n" "\t \tqrs\r\n" "Line2: \t line2\t\r\n" + "Line3:\r\n" + " line3\r\n" + "Line4: \r\n" + " \r\n" + "Connection:\r\n" + " close\r\n" "\r\n" - ,.should_keep_alive= TRUE + ,.should_keep_alive= FALSE ,.message_complete_on_eof= FALSE ,.http_major= 1 ,.http_minor= 1 @@ -618,9 +624,12 @@ ,.fragment= "" ,.request_path= "/" ,.request_url= "/" - ,.num_headers= 2 - ,.headers= { { "Line1", "abcdefghijklmno qrs" } + ,.num_headers= 5 + ,.headers= { { "Line1", "abc\tdef ghi\t\tjkl mno \t \tqrs" } , { "Line2", "line2\t" } + , { "Line3", "line3" } + , { "Line4", "" } + , { "Connection", "close" }, } ,.body= "" } @@ -904,6 +913,43 @@ ,.body= "" } +#define LINE_FOLDING_IN_HEADER_WITH_LF 34 +, {.name= "line folding in header value" + ,.type= HTTP_REQUEST + ,.raw= "GET / HTTP/1.1\n" + "Line1: abc\n" + "\tdef\n" + " ghi\n" + "\t\tjkl\n" + " mno \n" + "\t \tqrs\n" + "Line2: \t line2\t\n" + "Line3:\n" + " line3\n" + "Line4: \n" + " \n" + "Connection:\n" + " close\n" + "\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/" + ,.request_url= "/" + ,.num_headers= 5 + ,.headers= { { "Line1", "abc\tdef ghi\t\tjkl mno \t \tqrs" } + , { "Line2", "line2\t" } + , { "Line3", "line3" } + , { "Line4", "" } + , { "Connection", "close" }, + } + ,.body= "" + } + , {.name= NULL } /* sentinel */ }; diff -Nru nodejs-0.11.13/deps/npm/bin/npm nodejs-0.11.15/deps/npm/bin/npm --- nodejs-0.11.13/deps/npm/bin/npm 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/bin/npm 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,5 @@ #!/bin/sh +(set -o igncr) 2>/dev/null && set -o igncr; # cygwin encoding fix basedir=`dirname "$0"` diff -Nru nodejs-0.11.13/deps/npm/bin/npm-cli.js nodejs-0.11.15/deps/npm/bin/npm-cli.js --- nodejs-0.11.13/deps/npm/bin/npm-cli.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/bin/npm-cli.js 2015-01-20 21:22:17.000000000 +0000 @@ -19,10 +19,9 @@ log.pause() // will be unpaused when config is loaded. log.info("it worked if it ends with", "ok") -var fs = require("graceful-fs") - , path = require("path") +var path = require("path") , npm = require("../lib/npm.js") - , npmconf = require("npmconf") + , npmconf = require("../lib/config/core.js") , errorHandler = require("../lib/utils/error-handler.js") , configDefs = npmconf.defs @@ -58,16 +57,6 @@ log.info("using", "npm@%s", npm.version) log.info("using", "node@%s", process.version) -// make sure that this version of node works with this version of npm. -var semver = require("semver") - , nodeVer = process.version - , reqVer = npm.nodeVersionRequired -if (reqVer && !semver.satisfies(nodeVer, reqVer)) { - return errorHandler(new Error( - "npm doesn't work with node " + nodeVer - + "\nRequired: node@" + reqVer), true) -} - process.on("uncaughtException", errorHandler) if (conf.usage && npm.command !== "help") { diff -Nru nodejs-0.11.13/deps/npm/CHANGELOG.md nodejs-0.11.15/deps/npm/CHANGELOG.md --- nodejs-0.11.13/deps/npm/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/CHANGELOG.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1296 @@ +### v2.1.6 (2014-10-23): + +* [`681b398`](https://github.com/npm/npm/commit/681b3987a18e7aba0aaf78c91a23c7cc0ab82ce8) + [#6523](https://github.com/npm/npm/issues/6523) fix default `logelevel` doc + ([@KenanY](https://github.com/KenanY)) +* [`80b368f`](https://github.com/npm/npm/commit/80b368ffd786d4d008734b56c4a6fe12d2cb2926) + [#6528](https://github.com/npm/npm/issues/6528) `npm version` should work in + a git directory without git ([@terinjokes](https://github.com/terinjokes)) +* [`5f5f9e4`](https://github.com/npm/npm/commit/5f5f9e4ddf544c2da6adf3f8c885238b0e745076) + [#6483](https://github.com/npm/npm/issues/6483) `init-package-json@1.1.1`: + Properly pick up default values from environment variables. + ([@othiym23](https://github.com/othiym23)) +* [`a114870`](https://github.com/npm/npm/commit/a1148702f53f82d49606b2e4dac7581261fff442) + perl 5.18.x doesn't like -pi without filenames + ([@othiym23](https://github.com/othiym23)) +* [`de5ba00`](https://github.com/npm/npm/commit/de5ba007a48db876eb5bfb6156435f3512d58977) + `request@2.46.0`: Tests and cleanup. + ([@othiym23](https://github.com/othiym23)) +* [`76933f1`](https://github.com/npm/npm/commit/76933f169f17b5273b32e924a7b392d5729931a7) + `fstream-npm@1.0.1`: Always include `LICENSE[.*]`, `LICENCE[.*]`, + `CHANGES[.*]`, `CHANGELOG[.*]`, and `HISTORY[.*]`. + ([@jonathanong](https://github.com/jonathanong)) + +### v2.1.5 (2014-10-16): + +* [`6a14b23`](https://github.com/npm/npm/commit/6a14b232a0e34158bd95bb25c607167be995c204) + [#6397](https://github.com/npm/npm/issues/6397) Defactor npmconf back into + npm. ([@othiym23](https://github.com/othiym23)) +* [`4000e33`](https://github.com/npm/npm/commit/4000e3333a76ca4844681efa8737cfac24b7c2c8) + [#6323](https://github.com/npm/npm/issues/6323) Install `peerDependencies` + from top. ([@othiym23](https://github.com/othiym23)) +* [`5d119ae`](https://github.com/npm/npm/commit/5d119ae246f27353b14ff063559d1ba8c616bb89) + [#6498](https://github.com/npm/npm/issues/6498) Better error messages on + malformed `.npmrc` properties. ([@nicks](https://github.com/nicks)) +* [`ae18efb`](https://github.com/npm/npm/commit/ae18efb65fed427b1ef18e4862885bf60b87b92e) + [#6093](https://github.com/npm/npm/issues/6093) Replace instances of 'hash' + with 'object' in documentation. ([@zeke](https://github.com/zeke)) +* [`53108b2`](https://github.com/npm/npm/commit/53108b276fec5f97a38250933a2768d58b6928da) + [#1558](https://github.com/npm/npm/issues/1558) Clarify how local paths + should be used. ([@KenanY](https://github.com/KenanY)) +* [`344fa1a`](https://github.com/npm/npm/commit/344fa1a219ac8867022df3dc58a47636dde8a242) + [#6488](https://github.com/npm/npm/issues/6488) Work around bug in marked. + ([@othiym23](https://github.com/othiym23)) + +OUTDATED DEPENDENCY CLEANUP JAMBOREE + +* [`60c2942`](https://github.com/npm/npm/commit/60c2942e13655d9ecdf6e0f1f97f10cb71a75255) + `realize-package-specifier@1.2.0`: Handle names and rawSpecs more + consistently. ([@iarna](https://github.com/iarna)) +* [`1b5c95f`](https://github.com/npm/npm/commit/1b5c95fbda77b87342bd48c5ecac5b1fd571ccfe) + `sha@1.3.0`: Change line endings? + ([@ForbesLindesay](https://github.com/ForbesLindesay)) +* [`d7dee3f`](https://github.com/npm/npm/commit/d7dee3f3f7d9e7c2061a4ecb4dd93e3e4bfe4f2e) + `request@2.45.0`: Dependency updates, better proxy support, better compressed + response handling, lots of 'use strict'. + ([@mikeal](https://github.com/mikeal)) +* [`3d75180`](https://github.com/npm/npm/commit/3d75180c2cc79fa3adfa0e4cb783a27192189a65) + `opener@1.4.0`: Added gratuitous return. + ([@Domenic](https://github.com/Domenic)) +* [`8e2703f`](https://github.com/npm/npm/commit/8e2703f78d280d1edeb749e257dda1f288bad6e3) + `retry@0.6.1` / `npm-registry-client@3.2.4`: Change of ownership. + ([@tim-kos](https://github.com/tim-kos)) +* [`c87b00f`](https://github.com/npm/npm/commit/c87b00f82f92434ee77831915012c77a6c244c39) + `once@1.3.1`: Wrap once with wrappy. ([@isaacs](https://github.com/isaacs)) +* [`01ec790`](https://github.com/npm/npm/commit/01ec790fd47def56eda6abb3b8d809093e8f493f) + `npm-user-validate@0.1.1`: Correct repository URL. + ([@robertkowalski](https://github.com/robertkowalski)) +* [`389e52c`](https://github.com/npm/npm/commit/389e52c2d94c818ca8935ccdcf392994fec564a2) + `glob@4.0.6`: Now absolutely requires `graceful-fs`. + ([@isaacs](https://github.com/isaacs)) +* [`e15ab15`](https://github.com/npm/npm/commit/e15ab15a27a8f14cf0d9dc6f11dee452080378a0) + `ini@1.3.0`: Tighten up whitespace handling. + ([@isaacs](https://github.com/isaacs)) +* [`7610f3e`](https://github.com/npm/npm/commit/7610f3e62e699292ece081bfd33084d436e3246d) + `archy@1.0.0` ([@substack](https://github.com/substack)) +* [`9c13149`](https://github.com/npm/npm/commit/9c1314985e513e20ffa3ea0ca333ba2ab78299c9) + `semver@4.1.0`: Add support for prerelease identifiers. + ([@bromanko](https://github.com/bromanko)) +* [`f096c25`](https://github.com/npm/npm/commit/f096c250441b031d758f03afbe8d2321f94c7703) + `graceful-fs@3.0.4`: Add a bunch of additional tests, skip the unfortunate + complications of `graceful-fs@3.0.3`. ([@isaacs](https://github.com/isaacs)) + +### v2.1.4 (2014-10-09): + +* [`3aeb440`](https://github.com/npm/npm/commit/3aeb4401444fad83cc7a8d11bf2507658afa5248) + [#6442](https://github.com/npm/npm/issues/6442) proxying git needs `GIT_SSL_CAINFO` + ([@wmertens](https://github.com/wmertens)) +* [`a8da8d6`](https://github.com/npm/npm/commit/a8da8d6e0cd56d97728c0b76b51604ee06ef6264) + [#6413](https://github.com/npm/npm/issues/6413) write builtin config on any + global npm install ([@isaacs](https://github.com/isaacs)) +* [`9e4d632`](https://github.com/npm/npm/commit/9e4d632c0142ba55df07d624667738b8727336fc) + [#6343](https://github.com/npm/npm/issues/6343) don't pass run arguments to + pre & post scripts ([@TheLudd](https://github.com/TheLudd)) +* [`d831b1f`](https://github.com/npm/npm/commit/d831b1f7ca1a9921ea5b394e39b7130ecbc6d7b4) + [#6399](https://github.com/npm/npm/issues/6399) race condition: inflight + installs, prevent `peerDependency` problems + ([@othiym23](https://github.com/othiym23)) +* [`82b775d`](https://github.com/npm/npm/commit/82b775d6ff34c4beb6c70b2344d491a9f2026577) + [#6384](https://github.com/npm/npm/issues/6384) race condition: inflight + caching by URL rather than semver range + ([@othiym23](https://github.com/othiym23)) +* [`7bee042`](https://github.com/npm/npm/commit/7bee0429066fedcc9e6e962c043eb740b3792809) + `inflight@1.0.4`: callback can take arbitrary number of parameters + ([@othiym23](https://github.com/othiym23)) +* [`3bff494`](https://github.com/npm/npm/commit/3bff494f4abf17d6d7e0e4a3a76cf7421ecec35a) + [#5195](https://github.com/npm/npm/issues/5195) fixed regex color regression + for `npm search` ([@chrismeyersfsu](https://github.com/chrismeyersfsu)) +* [`33ba2d5`](https://github.com/npm/npm/commit/33ba2d585160a0a2a322cb76c4cd989acadcc984) + [#6387](https://github.com/npm/npm/issues/6387) allow `npm view global` if + package is specified ([@evanlucas](https://github.com/evanlucas)) +* [`99c4cfc`](https://github.com/npm/npm/commit/99c4cfceed413396d952cf05f4e3c710f9682c23) + [#6388](https://github.com/npm/npm/issues/6388) npm-publish → + npm-developers(7) ([@kennydude](https://github.com/kennydude)) + +TEST CLEANUP EXTRAVAGANZA: + +* [`8d6bfcb`](https://github.com/npm/npm/commit/8d6bfcb88408f5885a2a67409854c43e5c3a23f6) + tap tests run with no system-wide side effects + ([@chrismeyersfsu](https://github.com/chrismeyersfsu)) +* [`7a1472f`](https://github.com/npm/npm/commit/7a1472fbdbe99956ad19f629e7eb1cc07ba026ef) + added npm cache cleanup script + ([@chrismeyersfsu](https://github.com/chrismeyersfsu)) +* [`0ce6a37`](https://github.com/npm/npm/commit/0ce6a3752fa9119298df15671254db6bc1d8e64c) + stripped out dead test code (othiym23) +* replace spawn with common.npm (@chrismeyersfsu): + * [`0dcd614`](https://github.com/npm/npm/commit/0dcd61446335eaf541bf5f2d5186ec1419f86a42) + test/tap/cache-shasum-fork.js + * [`97f861c`](https://github.com/npm/npm/commit/97f861c967606a7e51e3d5047cf805d9d1adea5a) + test/tap/false_name.js + * [`d01b3de`](https://github.com/npm/npm/commit/d01b3de6ce03f25bbf3db97bfcd3cc85830d6801) + test/tap/git-cache-locking.js + * [`7b63016`](https://github.com/npm/npm/commit/7b63016778124c6728d6bd89a045c841ae3900b6) + test/tap/pack-scoped.js + * [`c877553`](https://github.com/npm/npm/commit/c877553265c39673e03f0a97972f692af81a595d) + test/tap/scripts-whitespace-windows.js + * [`df98525`](https://github.com/npm/npm/commit/df98525331e964131299d457173c697cfb3d95b9) + test/tap/prepublish.js + * [`99c4cfc`](https://github.com/npm/npm/commit/99c4cfceed413396d952cf05f4e3c710f9682c23) + test/tap/prune.js + +### v2.1.3 (2014-10-02): + +BREAKING CHANGE FOR THE SQRT(i) PEOPLE ACTUALLY USING `npm submodule`: + +* [`1e64473`](https://github.com/npm/npm/commit/1e6447360207f45ad6188e5780fdf4517de6e23d) + `rm -rf npm submodule` command, which has been broken since the Carter + Administration ([@isaacs](https://github.com/isaacs)) + +BREAKING CHANGE IF YOU ARE FOR SOME REASON STILL USING NODE 0.6 AND YOU SHOULD +NOT BE DOING THAT CAN YOU NOT: + +* [`3e431f9`](https://github.com/npm/npm/commit/3e431f9d6884acb4cde8bcb8a0b122a76b33ee1d) + [joyent/node#8492](https://github.com/joyent/node/issues/8492) bye bye + customFds, hello stdio ([@othiym23](https://github.com/othiym23)) + +Other changes: + +* [`ea607a8`](https://github.com/npm/npm/commit/ea607a8a20e891ad38eed11b5ce2c3c0a65484b9) + [#6372](https://github.com/npm/npm/issues/6372) noisily error (without + aborting) on multi-{install,build} ([@othiym23](https://github.com/othiym23)) +* [`3ee2799`](https://github.com/npm/npm/commit/3ee2799b629fd079d2db21d7e8f25fa7fa1660d0) + [#6372](https://github.com/npm/npm/issues/6372) only make cache creation + requests in flight ([@othiym23](https://github.com/othiym23)) +* [`1a90ec2`](https://github.com/npm/npm/commit/1a90ec2f2cfbefc8becc6ef0c480e5edacc8a4cb) + [#6372](https://github.com/npm/npm/issues/6372) wait to put Git URLs in + flight until normalized ([@othiym23](https://github.com/othiym23)) +* [`664795b`](https://github.com/npm/npm/commit/664795bb7d8da7142417b3f4ef5986db3a394071) + [#6372](https://github.com/npm/npm/issues/6372) log what is and isn't in + flight ([@othiym23](https://github.com/othiym23)) +* [`00ef580`](https://github.com/npm/npm/commit/00ef58025a1f52dfabf2c4dc3898621d16a6e062) + `inflight@1.0.3`: fix largely theoretical race condition, because we really + really hate race conditions ([@isaacs](https://github.com/isaacs)) +* [`1cde465`](https://github.com/npm/npm/commit/1cde4658d897ae0f93ff1d65b258e1571b391182) + [#6363](https://github.com/npm/npm/issues/6363) + `realize-package-specifier@1.1.0`: handle local dependencies better + ([@iarna](https://github.com/iarna)) +* [`86f084c`](https://github.com/npm/npm/commit/86f084c6c6d7935cd85d72d9d94b8784c914d51e) + `realize-package-specifier@1.0.2`: dependency realization! in its own module! + ([@iarna](https://github.com/iarna)) +* [`553d830`](https://github.com/npm/npm/commit/553d830334552b83606b6bebefd821c9ea71e964) + `npm-package-arg@2.1.3`: simplified semver, better tests + ([@iarna](https://github.com/iarna)) +* [`bec9b61`](https://github.com/npm/npm/commit/bec9b61a316c19f5240657594f0905a92a474352) + `readable-stream@1.0.32`: for some reason + ([@rvagg](https://github.com/rvagg)) +* [`ff08ec5`](https://github.com/npm/npm/commit/ff08ec5f6d717bdbd559de0b2ede769306a9a763) + `dezalgo@1.0.1`: use wrappy for instrumentability + ([@isaacs](https://github.com/isaacs)) + +### v2.1.2 (2014-09-29): + +* [`a1aa20e`](https://github.com/npm/npm/commit/a1aa20e44bb8285c6be1e7fa63b9da920e3a70ed) + [#6282](https://github.com/npm/npm/issues/6282) + `normalize-package-data@1.0.3`: don't prune bundledDependencies + ([@isaacs](https://github.com/isaacs)) +* [`a1f5fe1`](https://github.com/npm/npm/commit/a1f5fe1005043ce20a06e8b17a3e201aa3215357) + move locks back into cache, now path-aware + ([@othiym23](https://github.com/othiym23)) +* [`a432c4b`](https://github.com/npm/npm/commit/a432c4b48c881294d6d79b5f41c2e1c16ad15a8a) + convert lib/utils/tar.js to use atomic streams + ([@othiym23](https://github.com/othiym23)) +* [`b8c3c74`](https://github.com/npm/npm/commit/b8c3c74a3c963564233204161cc263e0912c930b) + `fs-write-stream-atomic@1.0.2`: Now works with streams1 fs.WriteStreams. + ([@isaacs](https://github.com/isaacs)) +* [`c7ab76f`](https://github.com/npm/npm/commit/c7ab76f44cce5f42add5e3ba879bd10e7e00c3e6) + logging cleanup ([@othiym23](https://github.com/othiym23)) +* [`4b2d95d`](https://github.com/npm/npm/commit/4b2d95d0641435b09d047ae5cb2226f292bf38f0) + [#6329](https://github.com/npm/npm/issues/6329) efficiently validate tmp + tarballs safely ([@othiym23](https://github.com/othiym23)) + +### v2.1.1 (2014-09-26): + +* [`563225d`](https://github.com/npm/npm/commit/563225d813ea4c12f46d4f7821ac7f76ba8ee2d6) + [#6318](https://github.com/npm/npm/issues/6318) clean up locking; prefix + lockfile with "." ([@othiym23](https://github.com/othiym23)) +* [`c7f30e4`](https://github.com/npm/npm/commit/c7f30e4550fea882d31fcd4a55b681cd30713c44) + [#6318](https://github.com/npm/npm/issues/6318) remove locking code around + tarball packing and unpacking ([@othiym23](https://github.com/othiym23)) + +### v2.1.0 (2014-09-25): + +NEW FEATURE: + +* [`3635601`](https://github.com/npm/npm/commit/36356011b6f2e6a5a81490e85a0a44eb27199dd7) + [#5520](https://github.com/npm/npm/issues/5520) Add `'npm view .'`. + ([@evanlucas](https://github.com/evanlucas)) + +Other changes: + +* [`f24b552`](https://github.com/npm/npm/commit/f24b552b596d0627549cdd7c2d68fcf9006ea50a) + [#6294](https://github.com/npm/npm/issues/6294) Lock cache → lock cache + target. ([@othiym23](https://github.com/othiym23)) +* [`ad54450`](https://github.com/npm/npm/commit/ad54450104f94c82c501138b4eee488ce3a4555e) + [#6296](https://github.com/npm/npm/issues/6296) Ensure that npm-debug.log + file is created when rollbacks are done. + ([@isaacs](https://github.com/isaacs)) +* [`6810071`](https://github.com/npm/npm/commit/681007155a40ac9d165293bd6ec5d8a1423ccfca) + docs: Default loglevel "http" → "warn". + ([@othiym23](https://github.com/othiym23)) +* [`35ac89a`](https://github.com/npm/npm/commit/35ac89a940f23db875e882ce2888208395130336) + Skip installation of installed scoped packages. + ([@timoxley](https://github.com/timoxley)) +* [`e468527`](https://github.com/npm/npm/commit/e468527256ec599892b9b88d61205e061d1ab735) + Ensure cleanup executes for scripts-whitespace-windows test. + ([@timoxley](https://github.com/timoxley)) +* [`ef9101b`](https://github.com/npm/npm/commit/ef9101b7f346797749415086956a0394528a12c4) + Ensure cleanup executes for packed-scope test. + ([@timoxley](https://github.com/timoxley)) +* [`69b4d18`](https://github.com/npm/npm/commit/69b4d18cdbc2ae04c9afaffbd273b436a394f398) + `fs-write-stream-atomic@1.0.1`: Fix a race condition in our race-condition + fixer. ([@isaacs](https://github.com/isaacs)) +* [`26b17ff`](https://github.com/npm/npm/commit/26b17ff2e3b21ee26c6fdbecc8273520cff45718) + [#6272](https://github.com/npm/npm/issues/6272) `npmconf` decides what the + default prefix is. ([@othiym23](https://github.com/othiym23)) +* [`846faca`](https://github.com/npm/npm/commit/846facacc6427dafcf5756dcd36d9036539938de) + Fix development dependency is preferred over dependency. + ([@andersjanmyr](https://github.com/andersjanmyr)) +* [`9d1a9db`](https://github.com/npm/npm/commit/9d1a9db3af5adc48a7158a5a053eeb89ee41a0e7) + [#3265](https://github.com/npm/npm/issues/3265) Re-apply a71615a. Fixes + [#3265](https://github.com/npm/npm/issues/3265) again, with a test! + ([@glasser](https://github.com/glasser)) +* [`1d41db0`](https://github.com/npm/npm/commit/1d41db0b2744a7bd50971c35cc060ea0600fb4bf) + `marked-man@0.1.4`: Fixes formatting of synopsis blocks in man docs. + ([@kapouer](https://github.com/kapouer)) +* [`a623da0`](https://github.com/npm/npm/commit/a623da01bea1b2d3f3a18b9117cfd2d8e3cbdd77) + [#5867](https://github.com/npm/npm/issues/5867) Specify dummy git template + dir when cloning to prevent copying hooks. + ([@boneskull](https://github.com/boneskull)) + +### v2.0.2 (2014-09-19): + +* [`42c872b`](https://github.com/npm/npm/commit/42c872b32cadc0e555638fc78eab3a38a04401d8) + [#5920](https://github.com/npm/npm/issues/5920) + `fs-write-stream-atomic@1.0.0` ([@isaacs](https://github.com/isaacs)) +* [`6784767`](https://github.com/npm/npm/commit/6784767fe15e28b44c81a1d4bb1738c642a65d78) + [#5920](https://github.com/npm/npm/issues/5920) make all write streams atomic + ([@isaacs](https://github.com/isaacs)) +* [`f6fac00`](https://github.com/npm/npm/commit/f6fac000dd98ebdd5ea1d5921175735d463d328b) + [#5920](https://github.com/npm/npm/issues/5920) barf on 0-length cached + tarballs ([@isaacs](https://github.com/isaacs)) +* [`3b37592`](https://github.com/npm/npm/commit/3b37592a92ea98336505189ae8ca29248b0589f4) + `write-file-atomic@1.1.0`: use graceful-fs + ([@iarna](https://github.com/iarna)) + +### v2.0.1 (2014-09-18): + +* [`74c5ab0`](https://github.com/npm/npm/commit/74c5ab0a676793c6dc19a3fd5fe149f85fecb261) + [#6201](https://github.com/npm/npm/issues/6201) `npmconf@2.1.0`: scope + always-auth to registry URI ([@othiym23](https://github.com/othiym23)) +* [`774b127`](https://github.com/npm/npm/commit/774b127da1dd6fefe2f1299e73505d9146f00294) + [#6201](https://github.com/npm/npm/issues/6201) `npm-registry-client@3.2.2`: + use scoped always-auth settings ([@othiym23](https://github.com/othiym23)) +* [`f2d2190`](https://github.com/npm/npm/commit/f2d2190aa365d22378d03afab0da13f95614a583) + [#6201](https://github.com/npm/npm/issues/6201) support saving + `--always-auth` when logging in ([@othiym23](https://github.com/othiym23)) +* [`17c941a`](https://github.com/npm/npm/commit/17c941a2d583210fe97ed47e2968d94ce9f774ba) + [#6163](https://github.com/npm/npm/issues/6163) use `write-file-atomic` + instead of `fs.writeFile()` ([@fiws](https://github.com/fiws)) +* [`fb5724f`](https://github.com/npm/npm/commit/fb5724fd98e1509c939693568df83d11417ea337) + [#5925](https://github.com/npm/npm/issues/5925) `npm init -f`: allow `npm + init` to run without prompting + ([@michaelnisi](https://github.com/michaelnisi)) +* [`b706d63`](https://github.com/npm/npm/commit/b706d637d5965dbf8f7ce07dc5c4bc80887f30d8) + [#3059](https://github.com/npm/npm/issues/3059) disable prepublish when + running `npm install --production` + ([@jussi](https://github.com/jussi)-kalliokoski) +* [`119f068`](https://github.com/npm/npm/commit/119f068eae2a36fa8b9c9ca557c70377792243a4) + attach the node version used when publishing a package to its registry + metadata ([@othiym23](https://github.com/othiym23)) +* [`8fe0081`](https://github.com/npm/npm/commit/8fe008181665519c2ac201ee432a3ece9798c31f) + seriously, don't use `npm -g update npm` + ([@thomblake](https://github.com/thomblake)) +* [`ea5b3d4`](https://github.com/npm/npm/commit/ea5b3d446b86dcabb0dbc6dba374d3039342ecb3) + `request@2.44.0` ([@othiym23](https://github.com/othiym23)) + +### v2.0.0 (2014-09-12): + +BREAKING CHANGES: + +* [`4378a17`](https://github.com/npm/npm/commit/4378a17db340404a725ffe2eb75c9936f1612670) + `semver@4.0.0`: prerelease versions no longer show up in ranges; `^0.x.y` + behaves the way it did in `semver@2` rather than `semver@3`; docs have been + reorganized for comprehensibility ([@isaacs](https://github.com/isaacs)) +* [`c6ddb64`](https://github.com/npm/npm/commit/c6ddb6462fe32bf3a27b2c4a62a032a92e982429) + npm now assumes that node is newer than 0.6 + ([@isaacs](https://github.com/isaacs)) + +Other changes: + +* [`ea515c3`](https://github.com/npm/npm/commit/ea515c3b858bf493a7b87fa4cdc2110a0d9cef7f) + [#6043](https://github.com/npm/npm/issues/6043) `slide@1.1.6`: wait until all + callbacks have finished before proceeding + ([@othiym23](https://github.com/othiym23)) +* [`0b0a59d`](https://github.com/npm/npm/commit/0b0a59d504f20f424294b1590ace73a7464f0378) + [#6043](https://github.com/npm/npm/issues/6043) defer rollbacks until just + before the CLI exits ([@isaacs](https://github.com/isaacs)) +* [`a11c88b`](https://github.com/npm/npm/commit/a11c88bdb1488b87d8dcac69df9a55a7a91184b6) + [#6175](https://github.com/npm/npm/issues/6175) pack scoped packages + correctly ([@othiym23](https://github.com/othiym23)) +* [`e4e48e0`](https://github.com/npm/npm/commit/e4e48e037d4e95fdb6acec80b04c5c6eaee59970) + [#6121](https://github.com/npm/npm/issues/6121) `read-installed@3.1.2`: don't + mark linked dev dependencies as extraneous + ([@isaacs](https://github.com/isaacs)) +* [`d673e41`](https://github.com/npm/npm/commit/d673e4185d43362c2b2a91acbca8c057e7303c7b) + `cmd-shim@2.0.1`: depend on `graceful-fs` directly + ([@ForbesLindesay](https://github.com/ForbesLindesay)) +* [`9d54d45`](https://github.com/npm/npm/commit/9d54d45e602d595bdab7eae09b9fa1dc46370147) + `npm-registry-couchapp@2.5.3`: make tests more reliable on Travis + ([@iarna](https://github.com/iarna)) +* [`673d738`](https://github.com/npm/npm/commit/673d738c6142c3d043dcee0b7aa02c9831a2e0ca) + ensure permissions are set correctly in cache when running as root + ([@isaacs](https://github.com/isaacs)) +* [`6e6a5fb`](https://github.com/npm/npm/commit/6e6a5fb74af10fd345411df4e121e554e2e3f33e) + prepare for upgrade to `node-semver@4.0.0` + ([@isaacs](https://github.com/isaacs)) +* [`ab8dd87`](https://github.com/npm/npm/commit/ab8dd87b943262f5996744e8d4cc30cc9358b7d7) + swap out `ronn` for `marked-man@0.1.3` ([@isaacs](https://github.com/isaacs)) +* [`803da54`](https://github.com/npm/npm/commit/803da5404d5a0b7c9defa3fe7fa0f2d16a2b19d3) + `npm-registry-client@3.2.0`: prepare for `node-semver@4.0.0` and include more + error information ([@isaacs](https://github.com/isaacs)) +* [`4af0e71`](https://github.com/npm/npm/commit/4af0e7134f5757c3d456d83e8349224a4ba12660) + make default error display less scary ([@isaacs](https://github.com/isaacs)) +* [`4fd9e79`](https://github.com/npm/npm/commit/4fd9e7901a15abff7a3dd478d99ce239b9580bca) + `npm-registry-client@3.2.1`: handle errors returned by the registry much, + much better ([@othiym23](https://github.com/othiym23)) +* [`ca791e2`](https://github.com/npm/npm/commit/ca791e27e97e51c1dd491bff6622ac90b54c3e23) + restore a long (always?) missing pass for deduping + ([@othiym23](https://github.com/othiym23)) +* [`ca0ef0e`](https://github.com/npm/npm/commit/ca0ef0e99bbdeccf28d550d0296baa4cb5e7ece2) + correctly interpret relative paths for local dependencies + ([@othiym23](https://github.com/othiym23)) +* [`5eb8db2`](https://github.com/npm/npm/commit/5eb8db2c370eeb4cd34f6e8dc6a935e4ea325621) + `npm-package-arg@2.1.2`: support git+file:// URLs for local bare repos + ([@othiym23](https://github.com/othiym23)) +* [`860a185`](https://github.com/npm/npm/commit/860a185c43646aca84cb93d1c05e2266045c316b) + tweak docs to no longer advocate checking in `node_modules` + ([@hunterloftis](https://github.com/hunterloftis)) +* [`80e9033`](https://github.com/npm/npm/commit/80e9033c40e373775e35c674faa6c1948661782b) + add links to nodejs.org downloads to docs + ([@meetar](https://github.com/meetar)) + +### v1.4.28 (2014-09-12): + +* [`f4540b6`](https://github.com/npm/npm/commit/f4540b6537a87e653d7495a9ddcf72949fdd4d14) + [#6043](https://github.com/npm/npm/issues/6043) defer rollbacks until just + before the CLI exits ([@isaacs](https://github.com/isaacs)) +* [`1eabfd5`](https://github.com/npm/npm/commit/1eabfd5c03f33c2bd28823714ff02059eeee3899) + [#6043](https://github.com/npm/npm/issues/6043) `slide@1.1.6`: wait until all + callbacks have finished before proceeding + ([@othiym23](https://github.com/othiym23)) + +### v2.0.0-beta.3 (2014-09-04): + +* [`fa79413`](https://github.com/npm/npm/commit/fa794138bec8edb7b88639db25ee9c010d2f4c2b) + [#6119](https://github.com/npm/npm/issues/6119) fall back to registry installs + if package.json is missing in a local directory ([@iarna](https://github.com/iarna)) +* [`16073e2`](https://github.com/npm/npm/commit/16073e2d8ae035961c4c189b602d4aacc6d6b387) + `npm-package-arg@2.1.0`: support file URIs as local specs + ([@othiym23](https://github.com/othiym23)) +* [`9164acb`](https://github.com/npm/npm/commit/9164acbdee28956fa816ce5e473c559395ae4ec2) + `github-url-from-username-repo@1.0.2`: don't match strings that are already + URIs ([@othiym23](https://github.com/othiym23)) +* [`4067d6b`](https://github.com/npm/npm/commit/4067d6bf303a69be13f3af4b19cf4fee1b0d3e12) + [#5629](https://github.com/npm/npm/issues/5629) support saving of local packages + in `package.json` ([@dylang](https://github.com/dylang)) +* [`1b2ffdf`](https://github.com/npm/npm/commit/1b2ffdf359a8c897a78f91fc5a5d535c97aaec97) + [#6097](https://github.com/npm/npm/issues/6097) document scoped packages + ([@seldo](https://github.com/seldo)) +* [`0a67d53`](https://github.com/npm/npm/commit/0a67d536067c4808a594d81288d34c0f7e97e105) + [#6007](https://github.com/npm/npm/issues/6007) `request@2.42.0`: properly + set headers on proxy requests ([@isaacs](https://github.com/isaacs)) +* [`9bac6b8`](https://github.com/npm/npm/commit/9bac6b860b674d24251bb7b8ba412fdb26cbc836) + `npmconf@2.0.8`: disallow semver ranges in tag configuration + ([@isaacs](https://github.com/isaacs)) +* [`d2d4d7c`](https://github.com/npm/npm/commit/d2d4d7cd3c32f91a87ffa11fe464d524029011c3) + [#6082](https://github.com/npm/npm/issues/6082) don't allow tagging with a + semver range as the tag name ([@isaacs](https://github.com/isaacs)) + +### v1.4.27 (2014-09-04): + +* [`4cf3c8f`](https://github.com/npm/npm/commit/4cf3c8fd78c9e2693a5f899f50c28f4823c88e2e) + [#6007](https://github.com/npm/npm/issues/6007) request@2.42.0: properly set + headers on proxy requests ([@isaacs](https://github.com/isaacs)) +* [`403cb52`](https://github.com/npm/npm/commit/403cb526be1472bb7545fa8e62d4976382cdbbe5) + [#6055](https://github.com/npm/npm/issues/6055) npmconf@1.1.8: restore + case-insensitivity of environmental config + ([@iarna](https://github.com/iarna)) + +### v2.0.0-beta.2 (2014-08-29): + +SPECIAL LABOR DAY WEEKEND RELEASE PARTY WOOO + +* [`ed207e8`](https://github.com/npm/npm/commit/ed207e88019de3150037048df6267024566e1093) + `npm-registry-client@3.1.7`: Clean up auth logic and improve logging around + auth decisions. Also error on trying to change a user document without + writing to it. ([@othiym23](https://github.com/othiym23)) +* [`66c7423`](https://github.com/npm/npm/commit/66c7423b7fb07a326b83c83727879410d43c439f) + `npmconf@2.0.7`: support -C as an alias for --prefix + ([@isaacs](https://github.com/isaacs)) +* [`0dc6a07`](https://github.com/npm/npm/commit/0dc6a07c778071c94c2251429c7d107e88a45095) + [#6059](https://github.com/npm/npm/issues/6059) run commands in prefix, not + cwd ([@isaacs](https://github.com/isaacs)) +* [`65d2179`](https://github.com/npm/npm/commit/65d2179af96737eb9038eaa24a293a62184aaa13) + `github-url-from-username-repo@1.0.1`: part 3 handle slashes in branch names + ([@robertkowalski](https://github.com/robertkowalski)) +* [`e8d75d0`](https://github.com/npm/npm/commit/e8d75d0d9f148ce2b3e8f7671fa281945bac363d) + [#6057](https://github.com/npm/npm/issues/6057) `read-installed@3.1.1`: + properly handle extraneous dev dependencies of required dependencies + ([@othiym23](https://github.com/othiym23)) +* [`0602f70`](https://github.com/npm/npm/commit/0602f708f070d524ad41573afd4c57171cab21ad) + [#6064](https://github.com/npm/npm/issues/6064) ls: do not show deps of + extraneous deps ([@isaacs](https://github.com/isaacs)) + +### v2.0.0-beta.1 (2014-08-28): + +* [`78a1fc1`](https://github.com/npm/npm/commit/78a1fc12307a0cbdbc944775ed831b876ee65855) + `github-url-from-git@1.4.0`: add support for git+https and git+ssh + ([@stefanbuck](https://github.com/stefanbuck)) +* [`bf247ed`](https://github.com/npm/npm/commit/bf247edf5429c6b3ec4d4cb798fa0eb0a9c19fc1) + `columnify@1.2.1` ([@othiym23](https://github.com/othiym23)) +* [`4bbe682`](https://github.com/npm/npm/commit/4bbe682a6d4eabcd23f892932308c9f228bf4de3) + `cmd-shim@2.0.0`: upgrade to graceful-fs 3 + ([@ForbesLindesay](https://github.com/ForbesLindesay)) +* [`ae1d590`](https://github.com/npm/npm/commit/ae1d590bdfc2476a4ed446e760fea88686e3ae05) + `npm-package-arg@2.0.4`: accept slashes in branch names + ([@thealphanerd](https://github.com/thealphanerd)) +* [`b2f51ae`](https://github.com/npm/npm/commit/b2f51aecadf585711e145b6516f99e7c05f53614) + `semver@3.0.1`: semver.clean() is cleaner + ([@isaacs](https://github.com/isaacs)) +* [`1d041a8`](https://github.com/npm/npm/commit/1d041a8a5ebd5bf6cecafab2072d4ec07823adab) + `github-url-from-username-repo@1.0.0`: accept slashes in branch names + ([@robertkowalski](https://github.com/robertkowalski)) +* [`02c85d5`](https://github.com/npm/npm/commit/02c85d592c4058e5d9eafb0be36b6743ae631998) + `async-some@1.0.1` ([@othiym23](https://github.com/othiym23)) +* [`5af493e`](https://github.com/npm/npm/commit/5af493efa8a463cd1acc4a9a394699e2c0793b9c) + ensure lifecycle spawn errors caught properly + ([@isaacs](https://github.com/isaacs)) +* [`60fe012`](https://github.com/npm/npm/commit/60fe012fac9570d6c72554cdf34a6fa95bf0f0a6) + `npmconf@2.0.6`: init.version defaults to 1.0.0 + ([@isaacs](https://github.com/isaacs)) +* [`b4c717b`](https://github.com/npm/npm/commit/b4c717bbf58fb6a0d64ad229036c79a184297ee2) + `npm-registry-client@3.1.4`: properly encode % in passwords + ([@isaacs](https://github.com/isaacs)) +* [`7b55f44`](https://github.com/npm/npm/commit/7b55f44420252baeb3f30da437d22956315c31c9) + doc: Fix 'npm help index' ([@isaacs](https://github.com/isaacs)) + +### v1.4.26 (2014-08-28): + +* [`eceea95`](https://github.com/npm/npm/commit/eceea95c804fa15b18e91c52c0beb08d42a3e77d) + `github-url-from-git@1.4.0`: add support for git+https and git+ssh + ([@stefanbuck](https://github.com/stefanbuck)) +* [`e561758`](https://github.com/npm/npm/commit/e5617587e7d7ab686192391ce55357dbc7fed0a3) + `columnify@1.2.1` ([@othiym23](https://github.com/othiym23)) +* [`0c4fab3`](https://github.com/npm/npm/commit/0c4fab372ee76eab01dda83b6749429a8564902e) + `cmd-shim@2.0.0`: upgrade to graceful-fs 3 + ([@ForbesLindesay](https://github.com/ForbesLindesay)) +* [`2d69e4d`](https://github.com/npm/npm/commit/2d69e4d95777671958b5e08d3b2f5844109d73e4) + `github-url-from-username-repo@1.0.0`: accept slashes in branch names + ([@robertkowalski](https://github.com/robertkowalski)) +* [`81f9b2b`](https://github.com/npm/npm/commit/81f9b2bac9d34c223ea093281ba3c495f23f10d1) + ensure lifecycle spawn errors caught properly + ([@isaacs](https://github.com/isaacs)) +* [`bfaab8c`](https://github.com/npm/npm/commit/bfaab8c6e0942382a96b250634ded22454c36b5a) + `npm-registry-client@2.0.7`: properly encode % in passwords + ([@isaacs](https://github.com/isaacs)) +* [`91cfb58`](https://github.com/npm/npm/commit/91cfb58dda851377ec604782263519f01fd96ad8) + doc: Fix 'npm help index' ([@isaacs](https://github.com/isaacs)) + +### v2.0.0-beta.0 (2014-08-21): + +* [`685f8be`](https://github.com/npm/npm/commit/685f8be1f2770cc75fd0e519a8d7aac72735a270) + `npm-registry-client@3.1.3`: Print the notification header returned by the + registry, and make sure status codes are printed without gratuitous quotes + around them. ([@isaacs](https://github.com/isaacs) / + [@othiym23](https://github.com/othiym23)) +* [`a8cb676`](https://github.com/npm/npm/commit/a8cb676aef0561eaf04487d2719672b097392c85) + [#5900](https://github.com/npm/npm/issues/5900) remove `npm` from its own + `engines` field in `package.json`. None of us remember why it was there. + ([@timoxley](https://github.com/timoxley)) +* [`6c47201`](https://github.com/npm/npm/commit/6c47201a7d071e8bf091b36933daf4199cc98e80) + [#5752](https://github.com/npm/npm/issues/5752), + [#6013](https://github.com/npm/npm/issues/6013) save git URLs correctly in + `_resolved` fields ([@isaacs](https://github.com/isaacs)) +* [`e4e1223`](https://github.com/npm/npm/commit/e4e1223a91c37688ba3378e1fc9d5ae045654d00) + [#5936](https://github.com/npm/npm/issues/5936) document the use of tags in + `package.json` ([@KenanY](https://github.com/KenanY)) +* [`c92b8d4`](https://github.com/npm/npm/commit/c92b8d4db7bde2a501da5b7d612684de1d629a42) + [#6004](https://github.com/npm/npm/issues/6004) manually installed scoped + packages are tracked correctly ([@dead](https://github.com/dead)-horse) +* [`21ca0aa`](https://github.com/npm/npm/commit/21ca0aaacbcfe2b89b0a439d914da0cae62de550) + [#5945](https://github.com/npm/npm/issues/5945) link scoped packages + correctly ([@dead](https://github.com/dead)-horse) +* [`16bead7`](https://github.com/npm/npm/commit/16bead7f2c82aec35b83ff0ec04df051ba456764) + [#5958](https://github.com/npm/npm/issues/5958) ensure that file streams work + in all versions of node ([@dead](https://github.com/dead)-horse) +* [`dbf0cab`](https://github.com/npm/npm/commit/dbf0cab29d0db43ac95e4b5a1fbdea1e0af75f10) + you can now pass quoted args to `npm run-script` + ([@bcoe](https://github.com/bcoe)) +* [`0583874`](https://github.com/npm/npm/commit/05838743f01ccb8d2432b3858d66847002fb62df) + `tar@1.0.1`: Add test for removing an extract target immediately after + unpacking. + ([@isaacs](https://github.com/isaacs)) +* [`cdf3b04`](https://github.com/npm/npm/commit/cdf3b0428bc0b0183fb41dcde9e34e8f42c5e3a7) + `lockfile@1.0.0`: Fix incorrect interaction between `wait`, `stale`, and + `retries` options. Part 2 of race condition leading to `ENOENT` + ([@isaacs](https://github.com/isaacs)) + errors. +* [`22d72a8`](https://github.com/npm/npm/commit/22d72a87a9e1a9ab56d9585397f63551887d9125) + `fstream@1.0.2`: Fix a double-finish call which can result in excess FS + operations after the `close` event. Part 1 of race condition leading to + `ENOENT` errors. + ([@isaacs](https://github.com/isaacs)) + +### v1.4.25 (2014-08-21): + +* [`64c0ec2`](https://github.com/npm/npm/commit/64c0ec241ef5d83761ca8de54acb3c41b079956e) + `npm-registry-client@2.0.6`: Print the notification header returned by the + registry, and make sure status codes are printed without gratuitous quotes + around them. + ([@othiym23](https://github.com/othiym23)) +* [`a8ed12b`](https://github.com/npm/npm/commit/a8ed12b) `tar@1.0.1`: + Add test for removing an extract target immediately after unpacking. + ([@isaacs](https://github.com/isaacs)) +* [`70fd11d`](https://github.com/npm/npm/commit/70fd11d) + `lockfile@1.0.0`: Fix incorrect interaction between `wait`, `stale`, + and `retries` options. Part 2 of race condition leading to `ENOENT` + errors. + ([@isaacs](https://github.com/isaacs)) +* [`0072c4d`](https://github.com/npm/npm/commit/0072c4d) + `fstream@1.0.2`: Fix a double-finish call which can result in excess + FS operations after the `close` event. Part 2 of race condition + leading to `ENOENT` errors. + ([@isaacs](https://github.com/isaacs)) + +### v2.0.0-alpha.7 (2014-08-14): + +* [`f23f1d8`](https://github.com/npm/npm/commit/f23f1d8e8f86ec1b7ab8dad68250bccaa67d61b1) + doc: update version doc to include `pre-*` increment args + ([@isaacs](https://github.com/isaacs)) +* [`b6bb746`](https://github.com/npm/npm/commit/b6bb7461824d4dc1c0936f46bd7929b5cd597986) + build: add 'make tag' to tag current release as latest + ([@isaacs](https://github.com/isaacs)) +* [`27c4bb6`](https://github.com/npm/npm/commit/27c4bb606e46e5eaf604b19fe8477bc6567f8b2e) + build: publish with `--tag=v1.4-next` ([@isaacs](https://github.com/isaacs)) +* [`cff66c3`](https://github.com/npm/npm/commit/cff66c3bf2850880058ebe2a26655dafd002495e) + build: add script to output `v1.4-next` publish tag + ([@isaacs](https://github.com/isaacs)) +* [`22abec8`](https://github.com/npm/npm/commit/22abec8833474879ac49b9604c103bc845dad779) + build: remove outdated `docpublish` make target + ([@isaacs](https://github.com/isaacs)) +* [`1be4de5`](https://github.com/npm/npm/commit/1be4de51c3976db8564f72b00d50384c921f0917) + build: remove `unpublish` step from `make publish` + ([@isaacs](https://github.com/isaacs)) +* [`e429e20`](https://github.com/npm/npm/commit/e429e2011f4d78e398f2461bca3e5a9a146fbd0c) + doc: add new changelog ([@othiym23](https://github.com/othiym23)) +* [`9243d20`](https://github.com/npm/npm/commit/9243d207896ea307082256604c10817f7c318d68) + lifecycle: test lifecycle path modification + ([@isaacs](https://github.com/isaacs)) +* [`021770b`](https://github.com/npm/npm/commit/021770b9cb07451509f0a44afff6c106311d8cf6) + lifecycle: BREAKING CHANGE do not add the directory containing node executable + ([@chulkilee](https://github.com/chulkilee)) +* [`1d5c41d`](https://github.com/npm/npm/commit/1d5c41dd0d757bce8b87f10c4135f04ece55aeb9) + install: rename .gitignore when unpacking foreign tarballs + ([@isaacs](https://github.com/isaacs)) +* [`9aac267`](https://github.com/npm/npm/commit/9aac2670a73423544d92b27cc301990a16a9563b) + cache: detect non-gzipped tar files more reliably + ([@isaacs](https://github.com/isaacs)) +* [`3f24755`](https://github.com/npm/npm/commit/3f24755c8fce3c7ab11ed1dc632cc40d7ef42f62) + `readdir-scoped-modules@1.0.0` ([@isaacs](https://github.com/isaacs)) +* [`151cd2f`](https://github.com/npm/npm/commit/151cd2ff87b8ac2fc9ea366bc9b7f766dc5b9684) + `read-installed@3.1.0` ([@isaacs](https://github.com/isaacs)) +* [`f5a9434`](https://github.com/npm/npm/commit/f5a94343a8ebe4a8cd987320b55137aef53fb3fd) + test: fix Travis timeouts ([@dylang](https://github.com/dylang)) +* [`126cafc`](https://github.com/npm/npm/commit/126cafcc6706814c88af3042f2ffff408747bff4) + `npm-registry-couchapp@2.5.0` ([@othiym23](https://github.com/othiym23)) + +### v1.4.24 (2014-08-14): + +* [`9344bd9`](https://github.com/npm/npm/commit/9344bd9b2929b5c399a0e0e0b34d45bce7bc24bb) + doc: add new changelog ([@othiym23](https://github.com/othiym23)) +* [`4be76fd`](https://github.com/npm/npm/commit/4be76fd65e895883c337a99f275ccc8c801adda3) + doc: update version doc to include `pre-*` increment args + ([@isaacs](https://github.com/isaacs)) +* [`e4f2620`](https://github.com/npm/npm/commit/e4f262036080a282ad60e236a9aeebd39fde9fe4) + build: add `make tag` to tag current release as `latest` + ([@isaacs](https://github.com/isaacs)) +* [`ec2596a`](https://github.com/npm/npm/commit/ec2596a7cb626772780b25b0a94a7e547a812bd5) + build: publish with `--tag=v1.4-next` ([@isaacs](https://github.com/isaacs)) +* [`9ee55f8`](https://github.com/npm/npm/commit/9ee55f892b8b473032a43c59912c5684fd1b39e6) + build: add script to output `v1.4-next` publish tag + ([@isaacs](https://github.com/isaacs)) +* [`aecb56f`](https://github.com/npm/npm/commit/aecb56f95a84687ea46920a0b98aaa587fee1568) + build: remove outdated `docpublish` make target + ([@isaacs](https://github.com/isaacs)) +* [`b57a9b7`](https://github.com/npm/npm/commit/b57a9b7ccd13e6b38831ed63595c8ea5763da247) + build: remove unpublish step from `make publish` + ([@isaacs](https://github.com/isaacs)) +* [`2c6acb9`](https://github.com/npm/npm/commit/2c6acb96c71c16106965d5cd829b67195dd673c7) + install: rename `.gitignore` when unpacking foreign tarballs + ([@isaacs](https://github.com/isaacs)) +* [`22f3681`](https://github.com/npm/npm/commit/22f3681923e993a47fc1769ba735bfa3dd138082) + cache: detect non-gzipped tar files more reliably + ([@isaacs](https://github.com/isaacs)) + +### v2.0.0-alpha.6 (2014-08-07): + +BREAKING CHANGE: + +* [`ea547e2`](https://github.com/npm/npm/commit/ea547e2) Bump semver to + version 3: `^0.x.y` is now functionally the same as `=0.x.y`. + ([@isaacs](https://github.com/isaacs)) + +Other changes: + +* [`d987707`](https://github.com/npm/npm/commit/d987707) move fetch into + npm-registry-client ([@othiym23](https://github.com/othiym23)) +* [`9b318e2`](https://github.com/npm/npm/commit/9b318e2) `read-installed@3.0.0` + ([@isaacs](https://github.com/isaacs)) +* [`9d73de7`](https://github.com/npm/npm/commit/9d73de7) remove unnecessary + mkdirps ([@isaacs](https://github.com/isaacs)) +* [`33ccd13`](https://github.com/npm/npm/commit/33ccd13) Don't squash execute + perms in `_git-remotes/` dir ([@adammeadows](https://github.com/adammeadows)) +* [`48fd233`](https://github.com/npm/npm/commit/48fd233) `npm-package-arg@2.0.1` + ([@isaacs](https://github.com/isaacs)) + +### v1.4.23 (2014-07-31): + +* [`8dd11d1`](https://github.com/npm/npm/commit/8dd11d1) update several + dependencies to avoid using `semver`s starting with 0. + +### v1.4.22 (2014-07-31): + +* [`d9a9e84`](https://github.com/npm/npm/commit/d9a9e84) `read-package-json@1.2.4` + ([@isaacs](https://github.com/isaacs)) +* [`86f0340`](https://github.com/npm/npm/commit/86f0340) + `github-url-from-git@1.2.0` ([@isaacs](https://github.com/isaacs)) +* [`a94136a`](https://github.com/npm/npm/commit/a94136a) `fstream@0.1.29` + ([@isaacs](https://github.com/isaacs)) +* [`bb82d18`](https://github.com/npm/npm/commit/bb82d18) `glob@4.0.5` + ([@isaacs](https://github.com/isaacs)) +* [`5b6bcf4`](https://github.com/npm/npm/commit/5b6bcf4) `cmd-shim@1.1.2` + ([@isaacs](https://github.com/isaacs)) +* [`c2aa8b3`](https://github.com/npm/npm/commit/c2aa8b3) license: Cleaned up + legalese with actual lawyer ([@isaacs](https://github.com/isaacs)) +* [`63fe0ee`](https://github.com/npm/npm/commit/63fe0ee) `init-package-json@1.0.0` + ([@isaacs](https://github.com/isaacs)) + +### v2.0.0-alpha-5 (2014-07-22): + +This release bumps up to 2.0 because of this breaking change, which could +potentially affect how your package's scripts are run: + +* [`df4b0e7`](https://github.com/npm/npm/commit/df4b0e7fc1abd9a54f98db75ec9e4d03d37d125b) + [#5518](https://github.com/npm/npm/issues/5518) BREAKING CHANGE: support + passing arguments to `run` scripts ([@bcoe](https://github.com/bcoe)) + +Other changes: + +* [`cd422c9`](https://github.com/npm/npm/commit/cd422c9de510766797c65720d70f085000f50543) + [#5748](https://github.com/npm/npm/issues/5748) link binaries for scoped + packages ([@othiym23](https://github.com/othiym23)) +* [`4c3c778`](https://github.com/npm/npm/commit/4c3c77839920e830991e0c229c3c6a855c914d67) + [#5758](https://github.com/npm/npm/issues/5758) `npm link` includes scope + when linking scoped package ([@fengmk2](https://github.com/fengmk2)) +* [`f9f58dd`](https://github.com/npm/npm/commit/f9f58dd0f5b715d4efa6619f13901916d8f99c47) + [#5707](https://github.com/npm/npm/issues/5707) document generic pre- / + post-commands ([@sudodoki](https://github.com/sudodoki)) +* [`ac7a480`](https://github.com/npm/npm/commit/ac7a4801d80361b41dce4a18f22bcdf75e396000) + [#5406](https://github.com/npm/npm/issues/5406) `npm cache` displays usage + when called without arguments + ([@michaelnisi](https://github.com/michaelnisi)) +* [`f4554e9`](https://github.com/npm/npm/commit/f4554e99d34f77a8a02884493748f7d49a9a9d8b) + Test fixes for Windows ([@isaacs](https://github.com/isaacs)) +* update dependencies ([@othiym23](https://github.com/othiym23)) + + +### v1.5.0-alpha-4 (2014-07-18): + +* fall back to `_auth` config as default auth when using default registry + ([@isaacs](https://github.com/isaacs)) +* support for 'init.version' for those who don't want to deal with semver 0.0.x + oddities ([@rvagg](https://github.com/rvagg)) +* [`be06213`](https://github.com/npm/npm/commit/be06213415f2d51a50d2c792b4cd0d3412a9a7b1) + remove residual support for `win` log level + ([@aterris](https://github.com/aterris)) + +### v1.5.0-alpha-3 (2014-07-17): + +* [`a3a85dd`](https://github.com/npm/npm/commit/a3a85dd004c9245a71ad2f0213bd1a9a90d64cd6) + `--save` scoped packages correctly ([@othiym23](https://github.com/othiym23)) +* [`18a3385`](https://github.com/npm/npm/commit/18a3385bcf8bfb8312239216afbffb7eec759150) + `npm-registry-client@3.0.2` ([@othiym23](https://github.com/othiym23)) +* [`375988b`](https://github.com/npm/npm/commit/375988b9bf5aa5170f06a790d624d31b1eb32c6d) + invalid package names are an early error for optional deps + ([@othiym23](https://github.com/othiym23)) +* consistently use `node-package-arg` instead of arbitrary package spec + splitting ([@othiym23](https://github.com/othiym23)) + +### v1.4.21 (2014-07-14): + +* [`88f51aa`](https://github.com/npm/npm/commit/88f51aa27eb9a958d1fa7ec50fee5cfdedd05110) + fix handling for 301s in `npm-registry-client@2.0.3` + ([@Raynos](https://github.com/Raynos)) + +### v1.5.0-alpha-2 (2014-07-01): + +* [`54cf625`](https://github.com/npm/npm/commit/54cf62534e3331e3f454e609e44f0b944e819283) + fix handling for 301s in `npm-registry-client@3.0.1` + ([@Raynos](https://github.com/Raynos)) +* [`e410861`](https://github.com/npm/npm/commit/e410861c69a3799c1874614cb5b87af8124ff98d) + don't crash if no username set on `whoami` + ([@isaacs](https://github.com/isaacs)) +* [`0353dde`](https://github.com/npm/npm/commit/0353ddeaca8171aa7dbdd8102b7e2eb581a86406) + respect `--json` for output ([@isaacs](https://github.com/isaacs)) +* [`b3d112a`](https://github.com/npm/npm/commit/b3d112ae190b984cc1779b9e6de92218f22380c6) + outdated: Don't show headings if there's nothing to output + ([@isaacs](https://github.com/isaacs)) +* [`bb4b90c`](https://github.com/npm/npm/commit/bb4b90c80dbf906a1cb26d85bc0625dc2758acc3) + outdated: Default to `latest` rather than `*` for unspecified deps + ([@isaacs](https://github.com/isaacs)) + +### v1.4.20 (2014-07-02): + +* [`0353dde`](https://github.com/npm/npm/commit/0353ddeaca8171aa7dbdd8102b7e2eb581a86406) + respect `--json` for output ([@isaacs](https://github.com/isaacs)) +* [`b3d112a`](https://github.com/npm/npm/commit/b3d112ae190b984cc1779b9e6de92218f22380c6) + outdated: Don't show headings if there's nothing to output + ([@isaacs](https://github.com/isaacs)) +* [`bb4b90c`](https://github.com/npm/npm/commit/bb4b90c80dbf906a1cb26d85bc0625dc2758acc3) + outdated: Default to `latest` rather than `*` for unspecified deps + ([@isaacs](https://github.com/isaacs)) + +### v1.5.0-alpha-1 (2014-07-01): + +* [`eef4884`](https://github.com/npm/npm/commit/eef4884d6487ee029813e60a5f9c54e67925d9fa) + use the correct piece of the spec for GitHub shortcuts + ([@othiym23](https://github.com/othiym23)) + +### v1.5.0-alpha-0 (2014-07-01): + +* [`7f55057`](https://github.com/npm/npm/commit/7f55057807cfdd9ceaf6331968e666424f48116c) + install scoped packages ([#5239](https://github.com/npm/npm/issues/5239)) + ([@othiym23](https://github.com/othiym23)) +* [`0df7e16`](https://github.com/npm/npm/commit/0df7e16c0232d8f4d036ebf4ec3563215517caac) + publish scoped packages ([#5239](https://github.com/npm/npm/issues/5239)) + ([@othiym23](https://github.com/othiym23)) +* [`0689ba2`](https://github.com/npm/npm/commit/0689ba249b92b4c6279a26804c96af6f92b3a501) + support (and save) --scope=@s config + ([@othiym23](https://github.com/othiym23)) +* [`f34878f`](https://github.com/npm/npm/commit/f34878fc4cee29901e4daf7bace94be01e25cad7) + scope credentials to registry ([@othiym23](https://github.com/othiym23)) +* [`0ac7ca2`](https://github.com/npm/npm/commit/0ac7ca233f7a69751fe4386af6c4daa3ee9fc0da) + capture and store bearer tokens when sent by registry + ([@othiym23](https://github.com/othiym23)) +* [`63c3277`](https://github.com/npm/npm/commit/63c3277f089b2c4417e922826bdc313ac854cad6) + only delete files that are created by npm + ([@othiym23](https://github.com/othiym23)) +* [`4f54043`](https://github.com/npm/npm/commit/4f540437091d1cbca3915cd20c2da83c2a88bb8e) + `npm-package-arg@2.0.0` ([@othiym23](https://github.com/othiym23)) +* [`9e1460e`](https://github.com/npm/npm/commit/9e1460e6ac9433019758481ec031358f4af4cd44) + `read-package-json@1.2.3` ([@othiym23](https://github.com/othiym23)) +* [`719d8ad`](https://github.com/npm/npm/commit/719d8adb9082401f905ff4207ede494661f8a554) + `fs-vacuum@1.2.1` ([@othiym23](https://github.com/othiym23)) +* [`9ef8fe4`](https://github.com/npm/npm/commit/9ef8fe4d6ead3acb3e88c712000e2d3a9480ebec) + `async-some@1.0.0` ([@othiym23](https://github.com/othiym23)) +* [`a964f65`](https://github.com/npm/npm/commit/a964f65ab662107b62a4ca58535ce817e8cca331) + `npmconf@2.0.1` ([@othiym23](https://github.com/othiym23)) +* [`113765b`](https://github.com/npm/npm/commit/113765bfb7d3801917c1d9f124b8b3d942bec89a) + `npm-registry-client@3.0.0` ([@othiym23](https://github.com/othiym23)) + +### v1.4.19 (2014-07-01): + +* [`f687433`](https://github.com/npm/npm/commit/f687433) relative URLS for + working non-root registry URLS ([@othiym23](https://github.com/othiym23)) +* [`bea190c`](https://github.com/npm/npm/commit/bea190c) + [#5591](https://github.com/npm/npm/issues/5591) bump nopt and npmconf + ([@isaacs](https://github.com/isaacs)) + +### v1.4.18 (2014-06-29): + +* Bump glob dependency from 4.0.2 to 4.0.3. It now uses graceful-fs when + available, increasing resilience to [various filesystem + errors](https://github.com/isaacs/node-graceful-fs#improvements-over-fs-module). + ([@isaacs](https://github.com/isaacs)) + +### v1.4.17 (2014-06-27): + +* replace escape codes with ansicolors + ([@othiym23](https://github.com/othiym23)) +* Allow to build all the docs OOTB. ([@GeJ](https://github.com/GeJ)) +* Use core.longpaths on win32 git - fixes + [#5525](https://github.com/npm/npm/issues/5525) ([@bmeck](https://github.com/bmeck)) +* `npmconf@1.1.2` ([@isaacs](https://github.com/isaacs)) +* Consolidate color sniffing in config/log loading process + ([@isaacs](https://github.com/isaacs)) +* add verbose log when project config file is ignored + ([@isaacs](https://github.com/isaacs)) +* npmconf: Float patch to remove 'scope' from config defs + ([@isaacs](https://github.com/isaacs)) +* doc: npm-explore can't handle a version + ([@robertkowalski](https://github.com/robertkowalski)) +* Add user-friendly errors for ENOSPC and EROFS. + ([@voodootikigod](https://github.com/voodootikigod)) +* bump tar and fstream deps ([@isaacs](https://github.com/isaacs)) +* Run the npm-registry-couchapp tests along with npm tests + ([@isaacs](https://github.com/isaacs)) + +### v1.2.8000 (2014-06-17): + +* Same as v1.4.16, but with the spinner disabled, and a version number that + starts with v1.2. + +### v1.4.16 (2014-06-17): + +* `npm-registry-client@2.0.2` ([@isaacs](https://github.com/isaacs)) +* `fstream@0.1.27` ([@isaacs](https://github.com/isaacs)) +* `sha@1.2.4` ([@isaacs](https://github.com/isaacs)) +* `rimraf@2.2.8` ([@isaacs](https://github.com/isaacs)) +* `npmlog@1.0.1` ([@isaacs](https://github.com/isaacs)) +* `npm-registry-client@2.0.1` ([@isaacs](https://github.com/isaacs)) +* removed redundant dependency ([@othiym23](https://github.com/othiym23)) +* `npmconf@1.0.5` ([@isaacs](https://github.com/isaacs)) +* Properly handle errors that can occur in the config-loading process + ([@isaacs](https://github.com/isaacs)) + +### v1.4.15 (2014-06-10): + +* cache: atomic de-race-ified package.json writing + ([@isaacs](https://github.com/isaacs)) +* `fstream@0.1.26` ([@isaacs](https://github.com/isaacs)) +* `graceful-fs@3.0.2` ([@isaacs](https://github.com/isaacs)) +* `osenv@0.1.0` ([@isaacs](https://github.com/isaacs)) +* Only spin the spinner when we're fetching stuff + ([@isaacs](https://github.com/isaacs)) +* Update `osenv@0.1.0` which removes ~/tmp as possible tmp-folder + ([@robertkowalski](https://github.com/robertkowalski)) +* `ini@1.2.1` ([@isaacs](https://github.com/isaacs)) +* `graceful-fs@3` ([@isaacs](https://github.com/isaacs)) +* Update glob and things depending on glob + ([@isaacs](https://github.com/isaacs)) +* github-url-from-username-repo and read-package-json updates + ([@isaacs](https://github.com/isaacs)) +* `editor@0.1.0` ([@isaacs](https://github.com/isaacs)) +* `columnify@1.1.0` ([@isaacs](https://github.com/isaacs)) +* bump ansi and associated deps ([@isaacs](https://github.com/isaacs)) + +### v1.4.14 (2014-06-05): + +* char-spinner: update to not bork windows + ([@isaacs](https://github.com/isaacs)) + +### v1.4.13 (2014-05-23): + +* Fix `npm install` on a tarball. + ([`ed3abf1`](https://github.com/npm/npm/commit/ed3abf1aa10000f0f687330e976d78d1955557f6), + [#5330](https://github.com/npm/npm/issues/5330), + [@othiym23](https://github.com/othiym23)) +* Fix an issue with the spinner on Node 0.8. + ([`9f00306`](https://github.com/npm/npm/commit/9f003067909440390198c0b8f92560d84da37762), + [@isaacs](https://github.com/isaacs)) +* Re-add `npm.commands.cache.clean` and `npm.commands.cache.read` APIs, and + document `npm.commands.cache.*` as npm-cache(3). + ([`e06799e`](https://github.com/npm/npm/commit/e06799e77e60c1fc51869619083a25e074d368b3), + [@isaacs](https://github.com/isaacs)) + +### v1.4.12 (2014-05-23): + +* remove normalize-package-data from top level, de-^-ify inflight dep + ([@isaacs](https://github.com/isaacs)) +* Always sort saved bundleDependencies ([@isaacs](https://github.com/isaacs)) +* add inflight to bundledDependencies + ([@othiym23](https://github.com/othiym23)) + +### v1.4.11 (2014-05-22): + +* fix `npm ls` labeling issue +* `node-gyp@0.13.1` +* default repository to https:// instead of git:// +* addLocalTarball: Remove extraneous unpack + ([@isaacs](https://github.com/isaacs)) +* Massive cache folder refactor ([@othiym23](https://github.com/othiym23) and + [@isaacs](https://github.com/isaacs)) +* Busy Spinner, no http noise ([@isaacs](https://github.com/isaacs)) +* Per-project .npmrc file support ([@isaacs](https://github.com/isaacs)) +* `npmconf@1.0.0`, Refactor config/uid/prefix loading process + ([@isaacs](https://github.com/isaacs)) +* Allow once-disallowed characters in passwords + ([@isaacs](https://github.com/isaacs)) +* Send npm version as 'version' header ([@isaacs](https://github.com/isaacs)) +* fix cygwin encoding issue (Karsten Tinnefeld) +* Allow non-github repositories with `npm repo` + ([@evanlucas](https://github.com/evanlucas)) +* Allow peer deps to be satisfied by grandparent +* Stop optional deps moving into deps on `update --save` + ([@timoxley](https://github.com/timoxley)) +* Ensure only matching deps update with `update --save*` + ([@timoxley](https://github.com/timoxley)) +* Add support for `prerelease`, `preminor`, `prepatch` to `npm version` + +### v1.4.10 (2014-05-05): + +* Don't set referer if already set +* fetch: Send referer and npm-session headers +* `run-script`: Support `--parseable` and `--json` +* list runnable scripts ([@evanlucas](https://github.com/evanlucas)) +* Use marked instead of ronn for html docs + +### v1.4.9 (2014-05-01): + +* Send referer header (with any potentially private stuff redacted) +* Fix critical typo bug in previous npm release + +### v1.4.8 (2014-05-01): + +* Check SHA before using files from cache +* adduser: allow change of the saved password +* Make `npm install` respect `config.unicode` +* Fix lifecycle to pass `Infinity` for config env value +* Don't return 0 exit code on invalid command +* cache: Handle 404s and other HTTP errors as errors +* Resolve ~ in path configs to env.HOME +* Include npm version in default user-agent conf +* npm init: Use ISC as default license, use save-prefix for deps +* Many test and doc fixes + +### v1.4.7 (2014-04-15): + +* Add `--save-prefix` option that can be used to override the default of `^` + when using `npm install --save` and its counterparts. + ([`64eefdf`](https://github.com/npm/npm/commit/64eefdfe26bb27db8dc90e3ab5d27a5ef18a4470), + [@thlorenz](https://github.com/thlorenz)) +* Allow `--silent` to silence the echoing of commands that occurs with `npm + run`. + ([`c95cf08`](https://github.com/npm/npm/commit/c95cf086e5b97dbb48ff95a72517b203a8f29eab), + [@Raynos](https://github.com/Raynos)) +* Some speed improvements to the cache, which should improve install times. + ([`cb94310`](https://github.com/npm/npm/commit/cb94310a6adb18cb7b881eacb8d67171eda8b744), + [`3b0870f`](https://github.com/npm/npm/commit/3b0870fb2f40358b3051abdab6be4319d196b99d), + [`120f5a9`](https://github.com/npm/npm/commit/120f5a93437bbbea9249801574a2f33e44e81c33), + [@isaacs](https://github.com/isaacs)) +* Improve ability to retry registry requests when a subset of the registry + servers are down. + ([`4a5257d`](https://github.com/npm/npm/commit/4a5257de3870ac3dafa39667379f19f6dcd6093e), + https://github.com/npm/npm-registry-client/commit/7686d02cb0b844626d6a401e58c0755ef3bc8432, + [@isaacs](https://github.com/isaacs)) +* Fix marking of peer dependencies as extraneous. + ([`779b164`](https://github.com/npm/npm/commit/779b1649764607b062c031c7e5c972151b4a1754), + https://github.com/npm/read-installed/commit/6680ba6ef235b1ca3273a00b70869798ad662ddc, + [@isaacs](https://github.com/isaacs)) +* Fix npm crashing when doing `npm shrinkwrap` in the presence of a + `package.json` with no dependencies. + ([`a9d9fa5`](https://github.com/npm/npm/commit/a9d9fa5ad3b8c925a589422b7be28d2735f320b0), + [@kislyuk](https://github.com/kislyuk)) +* Fix error when using `npm view` on packages that have no versions or have + been unpublished. + ([`94df2f5`](https://github.com/npm/npm/commit/94df2f56d684b35d1df043660180fc321b743dc8), + [@juliangruber](https://github.com/juliangruber); + [`2241a09`](https://github.com/npm/npm/commit/2241a09c843669c70633c399ce698cec3add40b3), + [@isaacs](https://github.com/isaacs)) + +### v1.4.6 (2014-03-19): + +* Fix extraneous package detection to work in more cases. + ([`f671286`](https://github.com/npm/npm/commit/f671286), npm/read-installed#20, + [@LaurentVB](https://github.com/LaurentVB)) + +### v1.4.5 (2014-03-18): + +* Sort dependencies in `package.json` when doing `npm install --save` and all + its variants. + ([`6fd6ff7`](https://github.com/npm/npm/commit/6fd6ff7e536ea6acd33037b1878d4eca1f931985), + [@domenic](https://github.com/domenic)) +* Add `--save-exact` option, usable alongside `--save` and its variants, which + will write the exact version number into `package.json` instead of the + appropriate semver-compatibility range. + ([`17f07df`](https://github.com/npm/npm/commit/17f07df8ad8e594304c2445bf7489cb53346f2c5), + [@timoxley](https://github.com/timoxley)) +* Accept gzipped content from the registry to speed up downloads and save + bandwidth. + ([`a3762de`](https://github.com/npm/npm/commit/a3762de843b842be8fa0ab57cdcd6b164f145942), + npm/npm-registry-client#40, [@fengmk2](https://github.com/fengmk2)) +* Fix `npm ls`'s `--depth` and `--log` options. + ([`1d29b17`](https://github.com/npm/npm/commit/1d29b17f5193d52a5c4faa412a95313dcf41ed91), + npm/read-installed#13, [@zertosh](https://github.com/zertosh)) +* Fix "Adding a cache directory to the cache will make the world implode" in + certain cases. + ([`9a4b2c4`](https://github.com/npm/npm/commit/9a4b2c4667c2b1e0054e3d5611ab86acb1760834), + domenic/path-is-inside#1, [@pmarques](https://github.com/pmarques)) +* Fix readmes not being uploaded in certain rare cases. + ([`527b72c`](https://github.com/npm/npm/commit/527b72cca6c55762b51e592c48a9f28cc7e2ff8b), + [@isaacs](https://github.com/isaacs)) + +### v1.4.4 (2014-02-20): + +* Add `npm t` as an alias for `npm test` (which is itself an alias for `npm run + test`, or even `npm run-script test`). We like making running your tests + easy. ([`14e650b`](https://github.com/npm/npm/commit/14e650bce0bfebba10094c961ac104a61417a5de), [@isaacs](https://github.com/isaacs)) + +### v1.4.3 (2014-02-16): + +* Add back `npm prune --production`, which was removed in 1.3.24. + ([`acc4d02`](https://github.com/npm/npm/commit/acc4d023c57d07704b20a0955e4bf10ee91bdc83), + [@davglass](https://github.com/davglass)) +* Default `npm install --save` and its counterparts to use the `^` version + specifier, instead of `~`. + ([`0a3151c`](https://github.com/npm/npm/commit/0a3151c9cbeb50c1c65895685c2eabdc7e2608dc), + [@mikolalysenko](https://github.com/mikolalysenko)) +* Make `npm shrinkwrap` output dependencies in a sorted order, so that diffs + between shrinkwrap files should be saner now. + ([`059b2bf`](https://github.com/npm/npm/commit/059b2bfd06ae775205a37257dca80142596a0113), + [@Raynos](https://github.com/Raynos)) +* Fix `npm dedupe` not correctly respecting dependency constraints. + ([`86028e9`](https://github.com/npm/npm/commit/86028e9fd8524d5e520ce01ba2ebab5a030103fc), + [@rafeca](https://github.com/rafeca)) +* Fix `npm ls` giving spurious warnings when you used `"latest"` as a version + specifier. + (https://github.com/npm/read-installed/commit/d2956400e0386931c926e0f30c334840e0938f14, + [@bajtos](https://github.com/bajtos)) +* Fixed a bug where using `npm link` on packages without a `name` value could + cause npm to delete itself. + ([`401a642`](https://github.com/npm/npm/commit/401a64286aa6665a94d1d2f13604f7014c5fce87), + [@isaacs](https://github.com/isaacs)) +* Fixed `npm install ./pkg@1.2.3` to actually install the directory at + `pkg@1.2.3`; before it would try to find version `1.2.3` of the package + `./pkg` in the npm registry. + ([`46d8768`](https://github.com/npm/npm/commit/46d876821d1dd94c050d5ebc86444bed12c56739), + [@rlidwka](https://github.com/rlidwka); see also + [`f851b79`](https://github.com/npm/npm/commit/f851b79a71d9a5f5125aa85877c94faaf91bea5f)) +* Fix `npm outdated` to respect the `color` configuration option. + ([`d4f6f3f`](https://github.com/npm/npm/commit/d4f6f3ff83bd14fb60d3ac6392cb8eb6b1c55ce1), + [@timoxley](https://github.com/timoxley)) +* Fix `npm outdated --parseable`. + ([`9575a23`](https://github.com/npm/npm/commit/9575a23f955ce3e75b509c89504ef0bd707c8cf6), + [@yhpark](https://github.com/yhpark)) +* Fix a lockfile-related errors when using certain Git URLs. + ([`164b97e`](https://github.com/npm/npm/commit/164b97e6089f64e686db7a9a24016f245effc37f), + [@nigelzor](https://github.com/nigelzor)) + +### v1.4.2 (2014-02-13): + +* Fixed an issue related to mid-publish GET requests made against the registry. + (https://github.com/npm/npm-registry-client/commit/acbec48372bc1816c67c9e7cbf814cf50437ff93, + [@isaacs](https://github.com/isaacs)) + +### v1.4.1 (2014-02-13): + +* Fix `npm shrinkwrap` forgetting to shrinkwrap dependencies that were also + development dependencies. + ([`9c575c5`](https://github.com/npm/npm/commit/9c575c56efa9b0c8b0d4a17cb9c1de3833004bcd), + [@diwu1989](https://github.com/diwu1989)) +* Fixed publishing of pre-existing packages with uppercase characters in their + name. + (https://github.com/npm/npm-registry-client/commit/9345d3b6c3d8510dd5c4418f27ee1fce59acebad, + [@isaacs](https://github.com/isaacs)) + +### v1.4.0 (2014-02-12): + +* Remove `npm publish --force`. See + https://github.com/npm/npmjs.org/issues/148. + ([@isaacs](https://github.com/isaacs), + npm/npm-registry-client@2c8dba990de6a59af6545b75cc00a6dc12777c2a) +* Other changes to the registry client related to saved configs and couch + logins. ([@isaacs](https://github.com/isaacs); + npm/npm-registry-client@25e2b019a1588155e5f87d035c27e79963b75951, + npm/npm-registry-client@9e41e9101b68036e0f078398785f618575f3cdde, + npm/npm-registry-client@2c8dba990de6a59af6545b75cc00a6dc12777c2a) +* Show an error to the user when doing `npm update` and the `package.json` + specifies a version that does not exist. + ([@evanlucas](https://github.com/evanlucas), + [`027a33a`](https://github.com/npm/npm/commit/027a33a5c594124cc1d82ddec5aee2c18bc8dc32)) +* Fix some issues with cache ownership in certain installation configurations. + ([@outcoldman](https://github.com/outcoldman), + [`a132690`](https://github.com/npm/npm/commit/a132690a2876cda5dcd1e4ca751f21dfcb11cb9e)) +* Fix issues where GitHub shorthand dependencies `user/repo` were not always + treated the same as full Git URLs. + ([@robertkowalski](https://github.com/robertkowalski), + https://github.com/meryn/normalize-package-data/commit/005d0b637aec1895117fcb4e3b49185eebf9e240) + +### v1.3.26 (2014-02-02): + +* Fixes and updates to publishing code + ([`735427a`](https://github.com/npm/npm/commit/735427a69ba4fe92aafa2d88f202aaa42920a9e2) + and + [`c0ac832`](https://github.com/npm/npm/commit/c0ac83224d49aa62e55577f8f27d53bbfd640dc5), + [@isaacs](https://github.com/isaacs)) +* Fix `npm bugs` with no arguments. + ([`b99d465`](https://github.com/npm/npm/commit/b99d465221ac03bca30976cbf4d62ca80ab34091), + [@Hoops](https://github.com/Hoops)) + +### v1.3.25 (2014-01-25): + +* Remove gubblebum blocky font from documentation headers. + ([`6940c9a`](https://github.com/npm/npm/commit/6940c9a100160056dc6be8f54a7ad7fa8ceda7e2), + [@isaacs](https://github.com/isaacs)) + +### v1.3.24 (2014-01-19): + +* Make the search output prettier, with nice truncated columns, and a `--long` + option to create wrapping columns. + ([`20439b2`](https://github.com/npm/npm/commit/20439b2) and + [`3a6942d`](https://github.com/npm/npm/commit/3a6942d), + [@timoxley](https://github.com/timoxley)) +* Support multiple packagenames in `npm docs`. + ([`823010b`](https://github.com/npm/npm/commit/823010b), + [@timoxley](https://github.com/timoxley)) +* Fix the `npm adduser` bug regarding "Error: default value must be string or + number" again. ([`b9b4248`](https://github.com/npm/npm/commit/b9b4248), + [@isaacs](https://github.com/isaacs)) +* Fix `scripts` entries containing whitespaces on Windows. + ([`80282ed`](https://github.com/npm/npm/commit/80282ed), + [@robertkowalski](https://github.com/robertkowalski)) +* Fix `npm update` for Git URLs that have credentials in them + ([`93fc364`](https://github.com/npm/npm/commit/93fc364), + [@danielsantiago](https://github.com/danielsantiago)) +* Fix `npm install` overwriting `npm link`-ed dependencies when they are tagged + Git dependencies. ([`af9bbd9`](https://github.com/npm/npm/commit/af9bbd9), + [@evanlucas](https://github.com/evanlucas)) +* Remove `npm prune --production` since it buggily removed some dependencies + that were necessary for production; see + [#4509](https://github.com/npm/npm/issues/4509). Hopefully it can make its + triumphant return, one day. + ([`1101b6a`](https://github.com/npm/npm/commit/1101b6a), + [@isaacs](https://github.com/isaacs)) + +Dependency updates: +* [`909cccf`](https://github.com/npm/npm/commit/909cccf) `read-package-json@1.1.6` +* [`a3891b6`](https://github.com/npm/npm/commit/a3891b6) `rimraf@2.2.6` +* [`ac6efbc`](https://github.com/npm/npm/commit/ac6efbc) `sha@1.2.3` +* [`dd30038`](https://github.com/npm/npm/commit/dd30038) `node-gyp@0.12.2` +* [`c8c3ebe`](https://github.com/npm/npm/commit/c8c3ebe) `npm-registry-client@0.3.3` +* [`4315286`](https://github.com/npm/npm/commit/4315286) `npmconf@0.1.12` + +### v1.3.23 (2014-01-03): + +* Properly handle installations that contained a certain class of circular + dependencies. + ([`5dc93e8`](https://github.com/npm/npm/commit/5dc93e8c82604c45b6067b1acf1c768e0bfce754), + [@substack](https://github.com/substack)) + +### v1.3.22 (2013-12-25): + +* Fix a critical bug in `npm adduser` that would manifest in the error message + "Error: default value must be string or number." + ([`fba4bd2`](https://github.com/npm/npm/commit/fba4bd24bc2ab00ccfeda2043aa53af7d75ef7ce), + [@isaacs](https://github.com/isaacs)) +* Allow `npm bugs` in the current directory to open the current package's bugs + URL. + ([`d04cf64`](https://github.com/npm/npm/commit/d04cf6483932c693452f3f778c2fa90f6153a4af), + [@evanlucas](https://github.com/evanlucas)) +* Several fixes to various error messages to include more useful or updated + information. + ([`1e6f2a7`](https://github.com/npm/npm/commit/1e6f2a72ca058335f9f5e7ca22d01e1a8bb0f9f7), + [`ff46366`](https://github.com/npm/npm/commit/ff46366bd40ff0ef33c7bac8400bc912c56201d1), + [`8b4bb48`](https://github.com/npm/npm/commit/8b4bb4815d80a3612186dc5549d698e7b988eb03); + [@rlidwka](https://github.com/rlidwka), + [@evanlucas](https://github.com/evanlucas)) + +### v1.3.21 (2013-12-17): + +* Fix a critical bug that prevented publishing due to incorrect hash + calculation. + ([`4ca4a2c`](https://github.com/npm/npm-registry-client/commit/4ca4a2c6333144299428be6b572e2691aa59852e), + [@dominictarr](https://github.com/dominictarr)) + +### v1.3.20 (2013-12-17): + +* Fixes a critical bug in v1.3.19. Thankfully, due to that bug, no one could + install npm v1.3.19 :) + +### v1.3.19 (2013-12-16): + +* Adds atomic PUTs for publishing packages, which should result in far fewer + requests and less room for replication errors on the server-side. + +### v1.3.18 (2013-12-16): + +* Added an `--ignore-scripts` option, which will prevent `package.json` scripts + from being run. Most notably, this will work on `npm install`, so e.g. `npm + install --ignore-scripts` will not run preinstall and prepublish scripts. + ([`d7e67bf`](https://github.com/npm/npm/commit/d7e67bf0d94b085652ec1c87d595afa6f650a8f6), + [@sqs](https://github.com/sqs)) +* Fixed a bug introduced in 1.3.16 that would manifest with certain cache + configurations, by causing spurious errors saying "Adding a cache directory + to the cache will make the world implode." + ([`966373f`](https://github.com/npm/npm/commit/966373fad8d741637f9744882bde9f6e94000865), + [@domenic](https://github.com/domenic)) +* Re-fixed the multiple download of URL dependencies, whose fix was reverted in + 1.3.17. + ([`a362c3f`](https://github.com/npm/npm/commit/a362c3f1919987419ed8a37c8defa19d2e6697b0), + [@spmason](https://github.com/spmason)) + +### v1.3.17 (2013-12-11): + +* This release reverts + [`644c2ff`](https://github.com/npm/npm/commit/644c2ff3e3d9c93764f7045762477f48864d64a7), + which avoided re-downloading URL and shinkwrap dependencies when doing `npm + install`. You can see the in-depth reasoning in + [`d8c907e`](https://github.com/npm/npm/commit/d8c907edc2019b75cff0f53467e34e0ffd7e5fba); + the problem was, that the patch changed the behavior of `npm install -f` to + reinstall all dependencies. +* A new version of the no-re-downloading fix has been submitted as + [#4303](https://github.com/npm/npm/issues/4303) and will hopefully be + included in the next release. + +### v1.3.16 (2013-12-11): + +* Git URL dependencies are now updated on `npm install`, fixing a two-year old + bug + ([`5829ecf`](https://github.com/npm/npm/commit/5829ecf032b392d2133bd351f53d3c644961396b), + [@robertkowalski](https://github.com/robertkowalski)). Additional progress on + reducing the resulting Git-related I/O is tracked as + [#4191](https://github.com/npm/npm/issues/4191), but for now, this will be a + big improvement. +* Added a `--json` mode to `npm outdated` to give a parseable output. + ([`0b6c9b7`](https://github.com/npm/npm/commit/0b6c9b7c8c5579f4d7d37a0c24d9b7a12ccbe5fe), + [@yyx990803](https://github.com/yyx990803)) +* Made `npm outdated` much prettier and more useful. It now outputs a + color-coded and easy-to-read table. + ([`fd3017f`](https://github.com/npm/npm/commit/fd3017fc3e9d42acf6394a5285122edb4dc16106), + [@quimcalpe](https://github.com/quimcalpe)) +* Added the `--depth` option to `npm outdated`, so that e.g. you can do `npm + outdated --depth=0` to show only top-level outdated dependencies. + ([`1d184ef`](https://github.com/npm/npm/commit/1d184ef3f4b4bc309d38e9128732e3e6fb46d49c), + [@yyx990803](https://github.com/yyx990803)) +* Added a `--no-git-tag-version` option to `npm version`, for doing the usual + job of `npm version` minus the Git tagging. This could be useful if you need + to increase the version in other related files before actually adding the + tag. + ([`59ca984`](https://github.com/npm/npm/commit/59ca9841ba4f4b2f11b8e72533f385c77ae9f8bd), + [@evanlucas](https://github.com/evanlucas)) +* Made `npm repo` and `npm docs` work without any arguments, adding them to the + list of npm commands that work on the package in the current directory when + invoked without arguments. + ([`bf9048e`](https://github.com/npm/npm/commit/bf9048e2fa16d43fbc4b328d162b0a194ca484e8), + [@robertkowalski](https://github.com/robertkowalski); + [`07600d0`](https://github.com/npm/npm/commit/07600d006c652507cb04ac0dae9780e35073dd67), + [@wilmoore](https://github.com/wilmoore)). There are a few other commands we + still want to implement this for; see + [#4204](https://github.com/npm/npm/issues/4204). +* Pass through the `GIT_SSL_NO_VERIFY` environment variable to Git, if it is + set; we currently do this with a few other environment variables, but we + missed that one. + ([`c625de9`](https://github.com/npm/npm/commit/c625de91770df24c189c77d2e4bc821f2265efa8), + [@arikon](https://github.com/arikon)) +* Fixed `npm dedupe` on Windows due to incorrect path separators being used + ([`7677de4`](https://github.com/npm/npm/commit/7677de4583100bc39407093ecc6bc13715bf8161), + [@mcolyer](https://github.com/mcolyer)). +* Fixed the `npm help` command when multiple words were searched for; it + previously gave a `ReferenceError`. + ([`6a28dd1`](https://github.com/npm/npm/commit/6a28dd147c6957a93db12b1081c6e0da44fe5e3c), + [@dereckson](https://github.com/dereckson)) +* Stopped re-downloading URL and shrinkwrap dependencies, as demonstrated in + [#3463](https://github.com/npm/npm/issues/3463) + ([`644c2ff`](https://github.com/isaacs/npm/commit/644c2ff3e3d9c93764f7045762477f48864d64a7), + [@spmason](https://github.com/spmason)). You can use the `--force` option to + force re-download and installation of all dependencies. diff -Nru nodejs-0.11.13/deps/npm/doc/api/npm-bin.md nodejs-0.11.15/deps/npm/doc/api/npm-bin.md --- nodejs-0.11.13/deps/npm/doc/api/npm-bin.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/api/npm-bin.md 2015-01-20 21:22:17.000000000 +0000 @@ -10,4 +10,4 @@ Print the folder where npm will install executables. This function should not be used programmatically. Instead, just refer -to the `npm.bin` member. +to the `npm.bin` property. diff -Nru nodejs-0.11.13/deps/npm/doc/api/npm-cache.md nodejs-0.11.15/deps/npm/doc/api/npm-cache.md --- nodejs-0.11.13/deps/npm/doc/api/npm-cache.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/api/npm-cache.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,30 @@ +npm-cache(3) -- manage the npm cache programmatically +===================================================== + +## SYNOPSIS + + npm.commands.cache([args], callback) + + // helpers + npm.commands.cache.clean([args], callback) + npm.commands.cache.add([args], callback) + npm.commands.cache.read(name, version, forceBypass, callback) + +## DESCRIPTION + +This acts much the same ways as the npm-cache(1) command line +functionality. + +The callback is called with the package.json data of the thing that is +eventually added to or read from the cache. + +The top level `npm.commands.cache(...)` functionality is a public +interface, and like all commands on the `npm.commands` object, it will +match the command line behavior exactly. + +However, the cache folder structure and the cache helper functions are +considered **internal** API surface, and as such, may change in future +releases of npm, potentially without warning or significant version +incrementation. + +Use at your own risk. diff -Nru nodejs-0.11.13/deps/npm/doc/api/npm-help-search.md nodejs-0.11.15/deps/npm/doc/api/npm-help-search.md --- nodejs-0.11.13/deps/npm/doc/api/npm-help-search.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/api/npm-help-search.md 2015-01-20 21:22:17.000000000 +0000 @@ -27,4 +27,4 @@ * file: Name of the file that matched -The silent parameter is not neccessary not used, but it may in the future. +The silent parameter is not necessary not used, but it may in the future. diff -Nru nodejs-0.11.13/deps/npm/doc/api/npm-link.md nodejs-0.11.15/deps/npm/doc/api/npm-link.md --- nodejs-0.11.13/deps/npm/doc/api/npm-link.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/api/npm-link.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,8 +3,8 @@ ## SYNOPSIS - npm.command.link(callback) - npm.command.link(packages, callback) + npm.commands.link(callback) + npm.commands.link(packages, callback) ## DESCRIPTION diff -Nru nodejs-0.11.13/deps/npm/doc/api/npm-load.md nodejs-0.11.15/deps/npm/doc/api/npm-load.md --- nodejs-0.11.13/deps/npm/doc/api/npm-load.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/api/npm-load.md 2015-01-20 21:22:17.000000000 +0000 @@ -10,9 +10,9 @@ npm.load() must be called before any other function call. Both parameters are optional, but the second is recommended. -The first parameter is an object hash of command-line config params, and the -second parameter is a callback that will be called when npm is loaded and -ready to serve. +The first parameter is an object containing command-line config params, and the +second parameter is a callback that will be called when npm is loaded and ready +to serve. The first parameter should follow a similar structure as the package.json config object. diff -Nru nodejs-0.11.13/deps/npm/doc/api/npm.md nodejs-0.11.15/deps/npm/doc/api/npm.md --- nodejs-0.11.13/deps/npm/doc/api/npm.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/api/npm.md 2015-01-20 21:22:17.000000000 +0000 @@ -25,13 +25,12 @@ To find documentation of the command line client, see `npm(1)`. -Prior to using npm's commands, `npm.load()` must be called. -If you provide `configObject` as an object hash of top-level -configs, they override the values stored in the various config -locations. In the npm command line client, this set of configs -is parsed from the command line options. Additional configuration -params are loaded from two configuration files. See `npm-config(1)`, -`npm-config(7)`, and `npmrc(5)` for more information. +Prior to using npm's commands, `npm.load()` must be called. If you provide +`configObject` as an object map of top-level configs, they override the values +stored in the various config locations. In the npm command line client, this +set of configs is parsed from the command line options. Additional +configuration params are loaded from two configuration files. See +`npm-config(1)`, `npm-config(7)`, and `npmrc(5)` for more information. After that, each of the functions are accessible in the commands object: `npm.commands.`. See `npm-index(7)` for a list of @@ -88,9 +87,9 @@ ## MAGIC -For each of the methods in the `npm.commands` hash, a method is added to -the npm object, which takes a set of positional string arguments rather -than an array and a callback. +For each of the methods in the `npm.commands` object, a method is added to the +npm object, which takes a set of positional string arguments rather than an +array and a callback. If the last argument is a callback, then it will use the supplied callback. However, if no callback is provided, then it will print out diff -Nru nodejs-0.11.13/deps/npm/doc/api/npm-submodule.md nodejs-0.11.15/deps/npm/doc/api/npm-submodule.md --- nodejs-0.11.13/deps/npm/doc/api/npm-submodule.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/api/npm-submodule.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -npm-submodule(3) -- Add a package as a git submodule -==================================================== - -## SYNOPSIS - - npm.commands.submodule(packages, callback) - -## DESCRIPTION - -For each package specified, npm will check if it has a git repository url -in its package.json description then add it as a git submodule at -`node_modules/`. - -This is a convenience only. From then on, it's up to you to manage -updates by using the appropriate git commands. npm will stubbornly -refuse to update, modify, or remove anything with a `.git` subfolder -in it. - -This command also does not install missing dependencies, if the package -does not include them in its git repository. If `npm ls` reports that -things are missing, you can either install, link, or submodule them yourself, -or you can do `npm explore -- npm install` to install the -dependencies into the submodule folder. - -## SEE ALSO - -* npm help json -* git help submodule diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-adduser.md nodejs-0.11.15/deps/npm/doc/cli/npm-adduser.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-adduser.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-adduser.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,30 +3,62 @@ ## SYNOPSIS - npm adduser + npm adduser [--registry=url] [--scope=@orgname] [--always-auth] ## DESCRIPTION -Create or verify a user named `` in the npm registry, and -save the credentials to the `.npmrc` file. +Create or verify a user named `` in the specified registry, and +save the credentials to the `.npmrc` file. If no registry is specified, +the default registry will be used (see `npm-config(7)`). The username, password, and email are read in from prompts. You may use this command to change your email address, but not username or password. -To reset your password, go to +To reset your password, go to You may use this command multiple times with the same user account to authorize on a new machine. +`npm login` is an alias to `adduser` and behaves exactly the same way. + ## CONFIGURATION ### registry Default: http://registry.npmjs.org/ -The base URL of the npm package registry. +The base URL of the npm package registry. If `scope` is also specified, +this registry will only be used for packages with that scope. See `npm-scope(7)`. + +### scope + +Default: none + +If specified, the user and login credentials given will be associated +with the specified scope. See `npm-scope(7)`. You can use both at the same time, +e.g. + + npm adduser --registry=http://myregistry.example.com --scope=@myco + +This will set a registry for the given scope and login or create a user for +that registry at the same time. + +### always-auth + +Default: false + +If specified, save configuration indicating that all requests to the given +registry should include authorization information. Useful for private +registries. Can be used with `--registry` and / or `--scope`, e.g. + + npm adduser --registry=http://private-registry.example.com --always-auth + +This will ensure that all requests to that registry (including for tarballs) +include an authorization header. See `always-auth` in `npm-config(7)` for more +details on always-auth. Registry-specific configuaration of `always-auth` takes +precedence over any global configuration. ## SEE ALSO diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-cache.md nodejs-0.11.15/deps/npm/doc/cli/npm-cache.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-cache.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-cache.md 2015-01-20 21:22:17.000000000 +0000 @@ -37,16 +37,14 @@ For each package that is added to the cache, three pieces of information are stored in `{cache}/{name}/{version}`: -* .../package/: - A folder containing the package contents as they appear in the tarball. -* .../package.json: - The package.json file, as npm sees it, with overlays applied and a _id attribute. +* .../package/package.json: + The package.json file, as npm sees it. * .../package.tgz: The tarball for that version. Additionally, whenever a registry request is made, a `.cache.json` file is placed at the corresponding URI, to store the ETag and the requested -data. +data. This is stored in `{cache}/{hostname}/{path}/.cache.json`. Commands that make non-essential registry requests (such as `search` and `view`, or the completion scripts) generally specify a minimum timeout. diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-explore.md nodejs-0.11.15/deps/npm/doc/cli/npm-explore.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-explore.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-explore.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,7 +3,7 @@ ## SYNOPSIS - npm explore [@] [ -- ] + npm explore [ -- ] ## DESCRIPTION @@ -32,7 +32,6 @@ ## SEE ALSO -* npm-submodule(1) * npm-folders(5) * npm-edit(1) * npm-rebuild(1) diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-init.md nodejs-0.11.15/deps/npm/doc/cli/npm-init.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-init.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-init.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,7 +3,7 @@ ## SYNOPSIS - npm init + npm init [-f|--force|-y|--yes] ## DESCRIPTION @@ -18,6 +18,9 @@ It is strictly additive, so it does not delete options from your package.json without a really good reason to do so. +If you invoke it with `-f`, `--force`, `-y`, or `--yes`, it will use only +defaults and not prompt you for any options. + ## SEE ALSO * diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-install.md nodejs-0.11.15/deps/npm/doc/cli/npm-install.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-install.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-install.md 2015-01-20 21:22:17.000000000 +0000 @@ -7,10 +7,10 @@ npm install npm install npm install - npm install [--save|--save-dev|--save-optional] [--save-exact] - npm install @ - npm install @ - npm install @ + npm install [@/] [--save|--save-dev|--save-optional] [--save-exact] + npm install [@/]@ + npm install [@/]@ + npm install [@/]@ npm i (with any of the previous argument usage) ## DESCRIPTION @@ -70,7 +70,7 @@ npm install https://github.com/indexzero/forever/tarball/v0.5.6 -* `npm install [--save|--save-dev|--save-optional]`: +* `npm install [@/] [--save|--save-dev|--save-optional]`: Do a `@` install, where `` is the "tag" config. (See `npm-config(7)`.) @@ -98,9 +98,19 @@ exact version rather than using npm's default semver range operator. + `` is optional. The package will be downloaded from the registry + associated with the specified scope. If no registry is associated with + the given scope the default registry is assumed. See `npm-scope(7)`. + + Note: if you do not include the @-symbol on your scope name, npm will + interpret this as a GitHub repository instead, see below. Scopes names + must also be followed by a slash. + Examples: npm install sax --save + npm install githubname/reponame + npm install @myorg/privatepackage npm install node-tap --save-dev npm install dtrace-provider --save-optional npm install readable-stream --save --save-exact @@ -110,7 +120,7 @@ working directory, then it will try to install that, and only try to fetch the package by name if it is not valid. -* `npm install @`: +* `npm install [@/]@`: Install the version of the package that is referenced by the specified tag. If the tag does not exist in the registry data for that package, then this @@ -119,17 +129,19 @@ Example: npm install sax@latest + npm install @myorg/mypackage@latest -* `npm install @`: +* `npm install [@/]@`: - Install the specified version of the package. This will fail if the version - has not been published to the registry. + Install the specified version of the package. This will fail if the + version has not been published to the registry. Example: npm install sax@0.1.1 + npm install @myorg/privatepackage@1.5.0 -* `npm install @`: +* `npm install [@/]@`: Install a version of the package matching the specified version range. This will follow the same rules for resolving dependencies described in `package.json(5)`. @@ -140,6 +152,19 @@ Example: npm install sax@">=0.1.0 <0.2.0" + npm install @myorg/privatepackage@">=0.1.0 <0.2.0" + +* `npm install /`: + + Install the package at `https://github.com/githubname/githubrepo" by + attempting to clone it using `git`. + + Example: + + npm install mygithubuser/myproject + + To reference a package in a git repo that is not on GitHub, see git + remote urls below. * `npm install `: diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-link.md nodejs-0.11.15/deps/npm/doc/cli/npm-link.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-link.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-link.md 2015-01-20 21:22:17.000000000 +0000 @@ -4,7 +4,7 @@ ## SYNOPSIS npm link (in package folder) - npm link + npm link [@/] npm ln (with any of the previous argument usage) ## DESCRIPTION @@ -12,7 +12,8 @@ Package linking is a two-step process. First, `npm link` in a package folder will create a globally-installed -symbolic link from `prefix/package-name` to the current folder. +symbolic link from `prefix/package-name` to the current folder (see +`npm-config(7)` for the value of `prefix`). Next, in some other location, `npm link package-name` will create a symlink from the local `node_modules` folder to the global symlink. @@ -20,12 +21,14 @@ Note that `package-name` is taken from `package.json`, not from directory name. +The package name can be optionally prefixed with a scope. See `npm-scope(7)`. +The scope must by preceded by an @-symbol and followed by a slash. + When creating tarballs for `npm publish`, the linked packages are "snapshotted" to their current state by resolving the symbolic links. -This is -handy for installing your own stuff, so that you can work on it and test it -iteratively without having to continually rebuild. +This is handy for installing your own stuff, so that you can work on it and +test it iteratively without having to continually rebuild. For example: @@ -51,6 +54,11 @@ That is, it first creates a global link, and then links the global installation target into your project's `node_modules` folder. +If your linked package is scoped (see `npm-scope(7)`) your link command must +include that scope, e.g. + + npm link @myorg/privatepackage + ## SEE ALSO * npm-developers(7) diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-ls.md nodejs-0.11.15/deps/npm/doc/cli/npm-ls.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-ls.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-ls.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,10 +3,10 @@ ## SYNOPSIS - npm list [ ...] - npm ls [ ...] - npm la [ ...] - npm ll [ ...] + npm list [[@/] ...] + npm ls [[@/] ...] + npm la [[@/] ...] + npm ll [[@/] ...] ## DESCRIPTION diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm.md nodejs-0.11.15/deps/npm/doc/cli/npm.md --- nodejs-0.11.13/deps/npm/doc/cli/npm.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm.md 2015-01-20 21:22:17.000000000 +0000 @@ -31,6 +31,22 @@ Use the `npm search` command to show everything that's available. Use `npm ls` to show everything you've installed. +## DEPENDENCIES + +If a package references to another package with a git URL, npm depends +on a preinstalled git. + +If one of the packages npm tries to install is a native node module and +requires compiling of C++ Code, npm will use +[node-gyp](https://github.com/TooTallNate/node-gyp) for that task. +For a Unix system, [node-gyp](https://github.com/TooTallNate/node-gyp) +needs Python, make and a buildchain like GCC. On Windows, +Python and Microsoft Visual Studio C++ is needed. Python 3 is +not supported by [node-gyp](https://github.com/TooTallNate/node-gyp). +For more information visit +[the node-gyp repository](https://github.com/TooTallNate/node-gyp) and +the [node-gyp Wiki](https://github.com/TooTallNate/node-gyp/wiki). + ## DIRECTORIES See `npm-folders(5)` to learn about where npm puts stuff. diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-prefix.md nodejs-0.11.15/deps/npm/doc/cli/npm-prefix.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-prefix.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-prefix.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,11 +3,15 @@ ## SYNOPSIS - npm prefix + npm prefix [-g] ## DESCRIPTION -Print the prefix to standard out. +Print the local prefix to standard out. This is the closest parent directory +to contain a package.json file unless `-g` is also specified. + +If `-g` is specified, this will be the value of the global prefix. See +`npm-config(7)` for more detail. ## SEE ALSO diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-publish.md nodejs-0.11.15/deps/npm/doc/cli/npm-publish.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-publish.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-publish.md 2015-01-20 21:22:17.000000000 +0000 @@ -9,7 +9,13 @@ ## DESCRIPTION -Publishes a package to the registry so that it can be installed by name. +Publishes a package to the registry so that it can be installed by name. See +`npm-developers(7)` for details on what's included in the published package, as +well as details on how the package is built. + +By default npm will publish to the public registry. This can be overridden by +specifying a different default registry or using a `npm-scope(7)` in the name +(see `package.json(5)`). * ``: A folder containing a package.json file @@ -24,7 +30,7 @@ and `npm install` installs the `latest` tag. Fails if the package name and version combination already exists in -the registry. +the specified registry. Once a package is published with a given name and version, that specific name and version combination can never be used again, even if diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-restart.md nodejs-0.11.15/deps/npm/doc/cli/npm-restart.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-restart.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-restart.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,15 +3,12 @@ ## SYNOPSIS - npm restart + npm restart [-- ] ## DESCRIPTION -This runs a package's "restart" script, if one was provided. -Otherwise it runs package's "stop" script, if one was provided, and then -the "start" script. - -If no version is specified, then it restarts the "active" version. +This runs a package's "restart" script, if one was provided. Otherwise it runs +package's "stop" script, if one was provided, and then the "start" script. ## SEE ALSO diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-run-script.md nodejs-0.11.15/deps/npm/doc/cli/npm-run-script.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-run-script.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-run-script.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,17 +3,29 @@ ## SYNOPSIS - npm run-script [] + npm run-script [command] [-- ] + npm run [command] [-- ] ## DESCRIPTION This runs an arbitrary command from a package's `"scripts"` object. If no package name is provided, it will search for a `package.json` -in the current folder and use its `"scripts"` object. +in the current folder and use its `"scripts"` object. If no `"command"` +is provided, it will list the available top level scripts. It is used by the test, start, restart, and stop commands, but can be called directly, as well. +As of [`npm@2.0.0`](http://blog.npmjs.org/post/98131109725/npm-2-0-0), you can +use custom arguments when executing scripts. The special option `--` is used by +[getopt](http://goo.gl/KxMmtG) to delimit the end of the options. npm will pass +all the arguments after the `--` directly to your script: + + npm run test -- --grep="pattern" + +The arguments will only be passed to the script specified after ```npm run``` +and not to any pre or post script. + ## SEE ALSO * npm-scripts(7) diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-shrinkwrap.md nodejs-0.11.15/deps/npm/doc/cli/npm-shrinkwrap.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-shrinkwrap.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-shrinkwrap.md 2015-01-20 21:22:17.000000000 +0000 @@ -163,15 +163,6 @@ ### Caveats -Shrinkwrap files only lock down package versions, not actual package -contents. While discouraged, a package author can republish an -existing version of a package, causing shrinkwrapped packages using -that version to pick up different code than they were before. If you -want to avoid any risk that a byzantine author replaces a package -you're using with code that breaks your application, you could modify -the shrinkwrap file to use git URL references rather than version -numbers so that npm always fetches all packages from git. - If you wish to lock down the specific bytes included in a package, for example to have 100% confidence in being able to reproduce a deployment or build, then you ought to check your dependencies into diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-start.md nodejs-0.11.15/deps/npm/doc/cli/npm-start.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-start.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-start.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,7 +3,7 @@ ## SYNOPSIS - npm start + npm start [-- ] ## DESCRIPTION diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-stop.md nodejs-0.11.15/deps/npm/doc/cli/npm-stop.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-stop.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-stop.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,7 +3,7 @@ ## SYNOPSIS - npm stop + npm stop [-- ] ## DESCRIPTION diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-submodule.md nodejs-0.11.15/deps/npm/doc/cli/npm-submodule.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-submodule.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-submodule.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -npm-submodule(1) -- Add a package as a git submodule -==================================================== - -## SYNOPSIS - - npm submodule - -## DESCRIPTION - -If the specified package has a git repository url in its package.json -description, then this command will add it as a git submodule at -`node_modules/`. - -This is a convenience only. From then on, it's up to you to manage -updates by using the appropriate git commands. npm will stubbornly -refuse to update, modify, or remove anything with a `.git` subfolder -in it. - -This command also does not install missing dependencies, if the package -does not include them in its git repository. If `npm ls` reports that -things are missing, you can either install, link, or submodule them yourself, -or you can do `npm explore -- npm install` to install the -dependencies into the submodule folder. - -## SEE ALSO - -* package.json(5) -* git help submodule diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-test.md nodejs-0.11.15/deps/npm/doc/cli/npm-test.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-test.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-test.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,8 +3,8 @@ ## SYNOPSIS - npm test - npm tst + npm test [-- ] + npm tst [-- ] ## DESCRIPTION diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-uninstall.md nodejs-0.11.15/deps/npm/doc/cli/npm-uninstall.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-uninstall.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-uninstall.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,7 +3,7 @@ ## SYNOPSIS - npm uninstall [--save|--save-dev|--save-optional] + npm uninstall [@/] [--save|--save-dev|--save-optional] npm rm (with any of the previous argument usage) ## DESCRIPTION @@ -27,9 +27,12 @@ * `--save-optional`: Package will be removed from your `optionalDependencies`. +Scope is optional and follows the usual rules for `npm-scope(7)`. + Examples: npm uninstall sax --save + npm uninstall @myorg/privatepackage --save npm uninstall node-tap --save-dev npm uninstall dtrace-provider --save-optional diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-unpublish.md nodejs-0.11.15/deps/npm/doc/cli/npm-unpublish.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-unpublish.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-unpublish.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,7 +3,7 @@ ## SYNOPSIS - npm unpublish [@] + npm unpublish [@/][@] ## WARNING @@ -27,6 +27,8 @@ version combination can never be reused. In order to publish the package again, a new version number must be used. +The scope is optional and follows the usual rules for `npm-scope(7)`. + ## SEE ALSO * npm-deprecate(1) diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-update.md nodejs-0.11.15/deps/npm/doc/cli/npm-update.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-update.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-update.md 2015-01-20 21:22:17.000000000 +0000 @@ -12,8 +12,11 @@ It will also install missing packages. -If the `-g` flag is specified, this command will update globally installed packages. -If no package name is specified, all packages in the specified location (global or local) will be updated. +If the `-g` flag is specified, this command will update globally installed +packages. + +If no package name is specified, all packages in the specified location (global +or local) will be updated. ## SEE ALSO diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-version.md nodejs-0.11.15/deps/npm/doc/cli/npm-version.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-version.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-version.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,17 +3,17 @@ ## SYNOPSIS - npm version [ | major | minor | patch] + npm version [ | major | minor | patch | premajor | preminor | prepatch | prerelease] ## DESCRIPTION Run this in a package directory to bump the version and write the new data back to the package.json file. -The `newversion` argument should be a valid semver string, *or* a valid -second argument to semver.inc (one of "patch", "minor", or -"major"). In the second case, the existing version will be incremented -by 1 in the specified field. +The `newversion` argument should be a valid semver string, *or* a +valid second argument to semver.inc (one of "patch", "minor", "major", +"prepatch", "preminor", "premajor", "prerelease"). In the second case, +the existing version will be incremented by 1 in the specified field. If run in a git repo, it will also create a version commit and tag, and fail if the repo is not clean. diff -Nru nodejs-0.11.13/deps/npm/doc/cli/npm-view.md nodejs-0.11.15/deps/npm/doc/cli/npm-view.md --- nodejs-0.11.13/deps/npm/doc/cli/npm-view.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/cli/npm-view.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,8 +3,8 @@ ## SYNOPSIS - npm view [@] [[.]...] - npm v [@] [[.]...] + npm view [@/][@] [[.]...] + npm v [@/][@] [[.]...] ## DESCRIPTION diff -Nru nodejs-0.11.13/deps/npm/doc/files/npm-folders.md nodejs-0.11.15/deps/npm/doc/files/npm-folders.md --- nodejs-0.11.13/deps/npm/doc/files/npm-folders.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/files/npm-folders.md 2015-01-20 21:22:17.000000000 +0000 @@ -42,6 +42,12 @@ Global installs on Windows go to `{prefix}/node_modules` (that is, no `lib` folder.) +Scoped packages are installed the same way, except they are grouped together +in a sub-folder of the relevant `node_modules` folder with the name of that +scope prefix by the @ symbol, e.g. `npm install @myorg/package` would place +the package in `{prefix}/node_modules/@myorg/package`. See `scopes(7)` for +more details. + If you wish to `require()` a package, then install it locally. ### Executables diff -Nru nodejs-0.11.13/deps/npm/doc/files/npmrc.md nodejs-0.11.15/deps/npm/doc/files/npmrc.md --- nodejs-0.11.13/deps/npm/doc/files/npmrc.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/files/npmrc.md 2015-01-20 21:22:17.000000000 +0000 @@ -13,8 +13,9 @@ ## FILES -The three relevant files are: +The four relevant files are: +* per-project config file (/path/to/my/project/.npmrc) * per-user config file (~/.npmrc) * global config file ($PREFIX/npmrc) * npm builtin config file (/path/to/npm/npmrc) @@ -29,6 +30,17 @@ priority order. For example, a setting in the userconfig file would override the setting in the globalconfig file. +### Per-project config file + +When working locally in a project, a `.npmrc` file in the root of the +project (ie, a sibling of `node_modules` and `package.json`) will set +config values specific to this project. + +Note that this only applies to the root of the project that you're +running npm in. It has no effect when your module is published. For +example, you can't publish a module that forces itself to install +globally, or in a different location. + ### Per-user config file `$HOME/.npmrc` (or the `userconfig` param, if set in the environment diff -Nru nodejs-0.11.13/deps/npm/doc/files/package.json.md nodejs-0.11.15/deps/npm/doc/files/package.json.md --- nodejs-0.11.13/deps/npm/doc/files/package.json.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/files/package.json.md 2015-01-20 21:22:17.000000000 +0000 @@ -30,6 +30,9 @@ * You may want to check the npm registry to see if there's something by that name already, before you get too attached to it. http://registry.npmjs.org/ +A name can be optionally prefixed by a scope, e.g. `@myorg/mypackage`. See +`npm-scope(7)` for more detail. + ## version The *most* important things in your package.json are the name and version fields. @@ -216,7 +219,7 @@ The CommonJS [Packages](http://wiki.commonjs.org/wiki/Packages/1.0) spec details a few ways that you can indicate the structure of your package using a `directories` -hash. If you look at [npm's package.json](http://registry.npmjs.org/npm/latest), +object. If you look at [npm's package.json](http://registry.npmjs.org/npm/latest), you'll see that it has directories for doc, lib, and man. In the future, this information may be used in other creative ways. @@ -228,10 +231,10 @@ ### directories.bin -If you specify a "bin" directory, then all the files in that folder will -be used as the "bin" hash. +If you specify a `bin` directory, then all the files in that folder will +be added as children of the `bin` path. -If you have a "bin" hash already, then this has no effect. +If you have a `bin` path already, then this has no effect. ### directories.man @@ -271,7 +274,7 @@ ## scripts -The "scripts" member is an object hash of script commands that are run +The "scripts" property is a dictionary containing script commands that are run at various times in the lifecycle of your package. The key is the lifecycle event, and the value is the command to run at that point. @@ -279,9 +282,9 @@ ## config -A "config" hash can be used to set configuration -parameters used in package scripts that persist across upgrades. For -instance, if a package had the following: +A "config" object can be used to set configuration parameters used in package +scripts that persist across upgrades. For instance, if a package had the +following: { "name" : "foo" , "config" : { "port" : "8080" } } @@ -295,13 +298,13 @@ ## dependencies -Dependencies are specified with a simple hash of package name to +Dependencies are specified in a simple object that maps a package name to a version range. The version range is a string which has one or more -space-separated descriptors. Dependencies can also be identified with -a tarball or git URL. +space-separated descriptors. Dependencies can also be identified with a +tarball or git URL. **Please do not put test harnesses or transpilers in your -`dependencies` hash.** See `devDependencies`, below. +`dependencies` object.** See `devDependencies`, below. See semver(7) for more details about specifying version ranges. @@ -320,6 +323,8 @@ * `range1 || range2` Passes if either range1 or range2 are satisfied. * `git...` See 'Git URLs as Dependencies' below * `user/repo` See 'GitHub URLs' below +* `tag` A specific version tagged and published as `tag` See `npm-tag(1)` +* `path/path/path` See Local Paths below For example, these are all valid: @@ -334,6 +339,8 @@ , "elf" : "~1.2.3" , "two" : "2.x" , "thr" : "3.3.x" + , "lat" : "latest" + , "dyl" : "file:../dyl" } } @@ -369,14 +376,40 @@ } } +## Local Paths + +As of version 2.0.0 you can provide a path to a local directory that contains a +package. Local paths can be saved using `npm install --save`, using any of +these forms: + + ../foo/bar + ~/foo/bar + ./foo/bar + /foo/bar + +in which case they will be normalized to a relative path and added to your +`package.json`. For example: + + { + "name": "baz", + "dependencies": { + "bar": "file:../foo/bar" + } + } + +This feature is helpful for local offline development and creating +tests that require npm installing where you don't want to hit an +external server, but should not be used when publishing packages +to the public registry. + ## devDependencies If someone is planning on downloading and using your module in their program, then they probably don't want or need to download and build the external test or documentation framework that you use. -In this case, it's best to list these additional items in a -`devDependencies` hash. +In this case, it's best to map these additional items in a `devDependencies` +object. These things will be installed when doing `npm link` or `npm install` from the root of a package, and can be managed like any other npm @@ -447,11 +480,11 @@ ## optionalDependencies -If a dependency can be used, but you would like npm to proceed if it -cannot be found or fails to install, then you may put it in the -`optionalDependencies` hash. This is a map of package name to version -or url, just like the `dependencies` hash. The difference is that -failure is tolerated. +If a dependency can be used, but you would like npm to proceed if it cannot be +found or fails to install, then you may put it in the `optionalDependencies` +object. This is a map of package name to version or url, just like the +`dependencies` object. The difference is that build failures do not cause +installation to fail. It is still your program's responsibility to handle the lack of the dependency. For example, something like this: @@ -499,12 +532,12 @@ ## engineStrict If you are sure that your module will *definitely not* run properly on -versions of Node/npm other than those specified in the `engines` hash, +versions of Node/npm other than those specified in the `engines` object, then you can set `"engineStrict": true` in your package.json file. This will override the user's `engine-strict` config setting. Please do not do this unless you are really very very sure. If your -engines hash is something overly restrictive, you can quite easily and +engines object is something overly restrictive, you can quite easily and inadvertently lock yourself into obscurity and prevent your users from updating to new versions of Node. Consider this choice carefully. If people abuse it, it will be removed in a future version of npm. @@ -553,11 +586,11 @@ If you set `"private": true` in your package.json, then npm will refuse to publish it. -This is a way to prevent accidental publication of private repositories. -If you would like to ensure that a given package is only ever published -to a specific registry (for example, an internal registry), -then use the `publishConfig` hash described below -to override the `registry` config param at publish-time. +This is a way to prevent accidental publication of private repositories. If +you would like to ensure that a given package is only ever published to a +specific registry (for example, an internal registry), then use the +`publishConfig` dictionary described below to override the `registry` config +param at publish-time. ## publishConfig diff -Nru nodejs-0.11.13/deps/npm/doc/misc/npm-coding-style.md nodejs-0.11.15/deps/npm/doc/misc/npm-coding-style.md --- nodejs-0.11.13/deps/npm/doc/misc/npm-coding-style.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/misc/npm-coding-style.md 2015-01-20 21:22:17.000000000 +0000 @@ -147,7 +147,7 @@ ## Case, naming, etc. Use `lowerCamelCase` for multiword identifiers when they refer to objects, -functions, methods, members, or anything not specified in this section. +functions, methods, properties, or anything not specified in this section. Use `UpperCamelCase` for class names (things that you'd pass to "new"). diff -Nru nodejs-0.11.13/deps/npm/doc/misc/npm-config.md nodejs-0.11.15/deps/npm/doc/misc/npm-config.md --- nodejs-0.11.13/deps/npm/doc/misc/npm-config.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/misc/npm-config.md 2015-01-20 21:22:17.000000000 +0000 @@ -24,8 +24,9 @@ ### npmrc Files -The three relevant files are: +The four relevant files are: +* per-project config file (/path/to/my/project/.npmrc) * per-user config file (~/.npmrc) * global config file ($PREFIX/npmrc) * npm builtin config file (/path/to/npm/npmrc) @@ -49,6 +50,7 @@ * `-dd`, `--verbose`: `--loglevel verbose` * `-ddd`: `--loglevel silly` * `-g`: `--global` +* `-C`: `--prefix` * `-l`: `--long` * `-m`: `--message` * `-p`, `--porcelain`: `--parseable` @@ -145,6 +147,15 @@ See also the `strict-ssl` config. +### cafile + +* Default: `null` +* Type: path + +A path to a file containing one or multiple Certificate Authority signing +certificates. Similar to the `ca` setting, but allows for multiple CA's, as +well as for the CA information to be stored in a file on disk. + ### cache * Default: Windows: `%AppData%\npm-cache`, Posix: `~/.npm` @@ -243,12 +254,6 @@ The command to run for `npm edit` or `npm config edit`. -### email - -The email of the logged-in user. - -Set by the `npm adduser` command. Should not be set explicitly. - ### engine-strict * Default: false @@ -379,34 +384,42 @@ [init-package-json](https://github.com/isaacs/init-package-json) module for more information, or npm-init(1). -### init.author.name +### init-author-name * Default: "" * Type: String The value `npm init` should use by default for the package author's name. -### init.author.email +### init-author-email * Default: "" * Type: String The value `npm init` should use by default for the package author's email. -### init.author.url +### init-author-url * Default: "" * Type: String The value `npm init` should use by default for the package author's homepage. -### init.license +### init-license * Default: "ISC" * Type: String The value `npm init` should use by default for the package license. +### init-version + +* Default: "0.0.0" +* Type: semver + +The value that `npm init` should use by default for the package +version number, if not already set in package.json. + ### json * Default: false @@ -451,15 +464,15 @@ ### loglevel -* Default: "http" +* Default: "warn" * Type: String -* Values: "silent", "win", "error", "warn", "http", "info", "verbose", "silly" +* Values: "silent", "error", "warn", "http", "info", "verbose", "silly" What level of logs to report. On failure, *all* logs are written to `npm-debug.log` in the current working directory. Any logs of a higher level than the setting are shown. -The default is "http", which shows http, warn, and error output. +The default is "warn", which shows warn and error output. ### logstream @@ -497,7 +510,7 @@ * Default: process.version * Type: semver or false -The node version to use when checking package's "engines" hash. +The node version to use when checking a package's `engines` map. ### npat @@ -519,7 +532,7 @@ * Default: true * Type: Boolean -Attempt to install packages in the `optionalDependencies` hash. Note +Attempt to install packages in the `optionalDependencies` object. Note that if these packages fail to install, the overall installation process is not aborted. @@ -597,8 +610,8 @@ Save installed packages to a package.json file as dependencies. -When used with the `npm rm` command, it removes it from the dependencies -hash. +When used with the `npm rm` command, it removes it from the `dependencies` +object. Only works if there is already a package.json file present. @@ -619,10 +632,10 @@ * Default: false * Type: Boolean -Save installed packages to a package.json file as devDependencies. +Save installed packages to a package.json file as `devDependencies`. When used with the `npm rm` command, it removes it from the -devDependencies hash. +`devDependencies` object. Only works if there is already a package.json file present. @@ -644,7 +657,7 @@ optionalDependencies. When used with the `npm rm` command, it removes it from the -devDependencies hash. +`devDependencies` object. Only works if there is already a package.json file present. @@ -653,14 +666,25 @@ * Default: '^' * Type: String -Configure how versions of packages installed to a package.json file via +Configure how versions of packages installed to a package.json file via `--save` or `--save-dev` get prefixed. For example if a package has version `1.2.3`, by default it's version is -set to `^1.2.3` which allows minor upgrades for that package, but after +set to `^1.2.3` which allows minor upgrades for that package, but after `npm config set save-prefix='~'` it would be set to `~1.2.3` which only allows patch upgrades. +### scope + +* Default: "" +* Type: String + +Associate an operation with a scope for a scoped registry. Useful when logging +in to a private registry for the first time: +`npm login --scope=@organization --registry=registry.organization.com`, which +will cause `@organization` to be mapped to the registry for future installation +of packages specified according to the pattern `@organization/package`. + ### searchopts * Default: "" @@ -712,6 +736,17 @@ Note that git requires you to have set up GPG keys in your git configs for this to work properly. +### spin + +* Default: true +* Type: Boolean or `"always"` + +When set to `true`, npm will display an ascii spinner while it is doing +things, if `process.stderr` is a TTY. + +Set to `false` to suppress the spinner, or set to `always` to output +the spinner even for non-TTY outputs. + ### strict-ssl * Default: true @@ -773,13 +808,6 @@ The UID to set to when running package scripts as root. -### username - -* Default: null -* Type: String - -The username on the npm registry. Set with `npm adduser` - ### userconfig * Default: ~/.npmrc @@ -820,8 +848,8 @@ * Default: false * Type: boolean -If true, output the npm version as well as node's `process.versions` -hash, and exit successfully. +If true, output the npm version as well as node's `process.versions` map, and +exit successfully. Only relevant when specified explicitly on the command line. diff -Nru nodejs-0.11.13/deps/npm/doc/misc/npm-developers.md nodejs-0.11.15/deps/npm/doc/misc/npm-developers.md --- nodejs-0.11.13/deps/npm/doc/misc/npm-developers.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/misc/npm-developers.md 2015-01-20 21:22:17.000000000 +0000 @@ -76,7 +76,7 @@ * scripts: If you have a special compilation or installation script, then you - should put it in the `scripts` hash. You should definitely have at + should put it in the `scripts` object. You should definitely have at least a basic smoke-test command as the "scripts.test" field. See npm-scripts(7). @@ -86,8 +86,8 @@ then you need to specify that in the "main" field. * directories: - This is a hash of folders. The best ones to include are "lib" and - "doc", but if you specify a folder full of man pages in "man", then + This is an object mapping names to folders. The best ones to include are + "lib" and "doc", but if you use "man" to specify a folder full of man pages, they'll get installed just like these ones. You can use `npm init` in the root of your package in order to get you diff -Nru nodejs-0.11.13/deps/npm/doc/misc/npm-faq.md nodejs-0.11.15/deps/npm/doc/misc/npm-faq.md --- nodejs-0.11.13/deps/npm/doc/misc/npm-faq.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/misc/npm-faq.md 2015-01-20 21:22:17.000000000 +0000 @@ -75,18 +75,20 @@ ## Should I check my `node_modules` folder into git? -Mikeal Rogers answered this question very well: +Usually, no. Allow npm to resolve dependencies for your packages. - +For packages you **deploy**, such as websites and apps, +you should use npm shrinkwrap to lock down your full dependency tree: -tl;dr +https://www.npmjs.org/doc/cli/npm-shrinkwrap.html -* Check `node_modules` into git for things you **deploy**, such as - websites and apps. -* Do not check `node_modules` into git for libraries and modules - intended to be reused. -* Use npm to manage dependencies in your dev environment, but not in - your deployment scripts. +If you are paranoid about depending on the npm ecosystem, +you should run a private npm mirror or a private cache. + +If you want 100% confidence in being able to reproduce the specific bytes +included in a deployment, you should use an additional mechanism that can +verify contents rather than versions. For example, +Amazon machine images, DigitalOcean snapshots, Heroku slugs, or simple tarballs. ## Is it 'npm' or 'NPM' or 'Npm'? @@ -133,7 +135,7 @@ ## How do I update npm? - npm update npm -g + npm install npm -g You can also update all outdated local packages by doing `npm update` without any arguments, or global packages by doing `npm update -g`. @@ -258,7 +260,7 @@ configuration settings, so this would be rather difficult to do properly. It would have to track every previous value for this config, and always accept any of them, or else yesterday's install may -be broken tomorrow. Complexity hurdle #5. +be broken tomorrow. Complexity hurdle #4. Never going to happen. The folder is named `node_modules`. It is written indelibly in the Node Way, handed down from the ancient times @@ -340,7 +342,7 @@ The npm open source project, The npm Registry, and [the community website](https://www.npmjs.org) are maintained and operated by the -good folks at [npm, Inc.](https://www.npmjs.com) +good folks at [npm, Inc.](http://www.npmjs.com) ## I have a question or request not addressed here. Where should I put it? diff -Nru nodejs-0.11.13/deps/npm/doc/misc/npm-index.md nodejs-0.11.15/deps/npm/doc/misc/npm-index.md --- nodejs-0.11.13/deps/npm/doc/misc/npm-index.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/misc/npm-index.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,411 +1,419 @@ npm-index(7) -- Index of all npm documentation ============================================== -## README(1) +### README(1) node package manager -# Command Line Documentation +## Command Line Documentation -## npm(1) +Using npm on the command line + +### npm(1) node package manager -## npm-adduser(1) +### npm-adduser(1) Add a registry user account -## npm-bin(1) +### npm-bin(1) Display npm bin folder -## npm-bugs(1) +### npm-bugs(1) Bugs for a package in a web browser maybe -## npm-build(1) +### npm-build(1) Build a package -## npm-bundle(1) +### npm-bundle(1) REMOVED -## npm-cache(1) +### npm-cache(1) Manipulates packages cache -## npm-completion(1) +### npm-completion(1) Tab Completion for npm -## npm-config(1) +### npm-config(1) Manage the npm configuration files -## npm-dedupe(1) +### npm-dedupe(1) Reduce duplication -## npm-deprecate(1) +### npm-deprecate(1) Deprecate a version of a package -## npm-docs(1) +### npm-docs(1) Docs for a package in a web browser maybe -## npm-edit(1) +### npm-edit(1) Edit an installed package -## npm-explore(1) +### npm-explore(1) Browse an installed package -## npm-help-search(1) +### npm-help-search(1) Search npm help documentation -## npm-help(1) +### npm-help(1) Get help on npm -## npm-init(1) +### npm-init(1) Interactively create a package.json file -## npm-install(1) +### npm-install(1) Install a package -## npm-link(1) +### npm-link(1) Symlink a package folder -## npm-ls(1) +### npm-ls(1) List installed packages -## npm-outdated(1) +### npm-outdated(1) Check for outdated packages -## npm-owner(1) +### npm-owner(1) Manage package owners -## npm-pack(1) +### npm-pack(1) Create a tarball from a package -## npm-prefix(1) +### npm-prefix(1) Display prefix -## npm-prune(1) +### npm-prune(1) Remove extraneous packages -## npm-publish(1) +### npm-publish(1) Publish a package -## npm-rebuild(1) +### npm-rebuild(1) Rebuild a package -## npm-repo(1) +### npm-repo(1) Open package repository page in the browser -## npm-restart(1) +### npm-restart(1) Start a package -## npm-rm(1) +### npm-rm(1) Remove a package -## npm-root(1) +### npm-root(1) Display npm root -## npm-run-script(1) +### npm-run-script(1) Run arbitrary package scripts -## npm-search(1) +### npm-search(1) Search for packages -## npm-shrinkwrap(1) +### npm-shrinkwrap(1) Lock down dependency versions -## npm-star(1) +### npm-star(1) Mark your favorite packages -## npm-stars(1) +### npm-stars(1) View packages marked as favorites -## npm-start(1) +### npm-start(1) Start a package -## npm-stop(1) +### npm-stop(1) Stop a package -## npm-submodule(1) - -Add a package as a git submodule - -## npm-tag(1) +### npm-tag(1) Tag a published version -## npm-test(1) +### npm-test(1) Test a package -## npm-uninstall(1) +### npm-uninstall(1) Remove a package -## npm-unpublish(1) +### npm-unpublish(1) Remove a package from the registry -## npm-update(1) +### npm-update(1) Update a package -## npm-version(1) +### npm-version(1) Bump a package version -## npm-view(1) +### npm-view(1) View registry info -## npm-whoami(1) +### npm-whoami(1) Display npm username -# API Documentation +## API Documentation -## npm(3) +Using npm in your Node programs + +### npm(3) node package manager -## npm-bin(3) +### npm-bin(3) Display npm bin folder -## npm-bugs(3) +### npm-bugs(3) Bugs for a package in a web browser maybe -## npm-commands(3) +### npm-cache(3) + +manage the npm cache programmatically + +### npm-commands(3) npm commands -## npm-config(3) +### npm-config(3) Manage the npm configuration files -## npm-deprecate(3) +### npm-deprecate(3) Deprecate a version of a package -## npm-docs(3) +### npm-docs(3) Docs for a package in a web browser maybe -## npm-edit(3) +### npm-edit(3) Edit an installed package -## npm-explore(3) +### npm-explore(3) Browse an installed package -## npm-help-search(3) +### npm-help-search(3) Search the help pages -## npm-init(3) +### npm-init(3) Interactively create a package.json file -## npm-install(3) +### npm-install(3) install a package programmatically -## npm-link(3) +### npm-link(3) Symlink a package folder -## npm-load(3) +### npm-load(3) Load config settings -## npm-ls(3) +### npm-ls(3) List installed packages -## npm-outdated(3) +### npm-outdated(3) Check for outdated packages -## npm-owner(3) +### npm-owner(3) Manage package owners -## npm-pack(3) +### npm-pack(3) Create a tarball from a package -## npm-prefix(3) +### npm-prefix(3) Display prefix -## npm-prune(3) +### npm-prune(3) Remove extraneous packages -## npm-publish(3) +### npm-publish(3) Publish a package -## npm-rebuild(3) +### npm-rebuild(3) Rebuild a package -## npm-repo(3) +### npm-repo(3) Open package repository page in the browser -## npm-restart(3) +### npm-restart(3) Start a package -## npm-root(3) +### npm-root(3) Display npm root -## npm-run-script(3) +### npm-run-script(3) Run arbitrary package scripts -## npm-search(3) +### npm-search(3) Search for packages -## npm-shrinkwrap(3) +### npm-shrinkwrap(3) programmatically generate package shrinkwrap file -## npm-start(3) +### npm-start(3) Start a package -## npm-stop(3) +### npm-stop(3) Stop a package -## npm-submodule(3) - -Add a package as a git submodule - -## npm-tag(3) +### npm-tag(3) Tag a published version -## npm-test(3) +### npm-test(3) Test a package -## npm-uninstall(3) +### npm-uninstall(3) uninstall a package programmatically -## npm-unpublish(3) +### npm-unpublish(3) Remove a package from the registry -## npm-update(3) +### npm-update(3) Update a package -## npm-version(3) +### npm-version(3) Bump a package version -## npm-view(3) +### npm-view(3) View registry info -## npm-whoami(3) +### npm-whoami(3) Display npm username -# Files +## Files + +File system structures npm uses -## npm-folders(5) +### npm-folders(5) Folder Structures Used by npm -## npmrc(5) +### npmrc(5) The npm config files -## package.json(5) +### package.json(5) Specifics of npm's package.json handling -# Misc +## Misc -## npm-coding-style(7) +Various other bits and bobs + +### npm-coding-style(7) npm's "funny" coding style -## npm-config(7) +### npm-config(7) More than you probably want to know about npm configuration -## npm-developers(7) +### npm-developers(7) Developer Guide -## npm-disputes(7) +### npm-disputes(7) Handling Module Name Disputes -## npm-faq(7) +### npm-faq(7) Frequently Asked Questions -## npm-index(7) +### npm-index(7) Index of all npm documentation -## npm-registry(7) +### npm-registry(7) The JavaScript Package Registry -## npm-scripts(7) +### npm-scope(7) + +Scoped packages + +### npm-scripts(7) How npm handles the "scripts" field -## removing-npm(7) +### removing-npm(7) Cleaning the Slate -## semver(7) +### semver(7) The semantic versioner for npm diff -Nru nodejs-0.11.13/deps/npm/doc/misc/npm-registry.md nodejs-0.11.15/deps/npm/doc/misc/npm-registry.md --- nodejs-0.11.13/deps/npm/doc/misc/npm-registry.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/misc/npm-registry.md 2015-01-20 21:22:17.000000000 +0000 @@ -12,15 +12,14 @@ account information. The official public npm registry is at . It -is powered by a CouchDB database at -. The code for the couchapp is -available at . npm user accounts -are CouchDB users, stored in the -database. +is powered by a CouchDB database, of which there is a public mirror at +. The code for the couchapp is +available at . -The registry URL is supplied by the `registry` config parameter. See -`npm-config(1)`, `npmrc(5)`, and `npm-config(7)` for more on managing -npm's configuration. +The registry URL used is determined by the scope of the package (see +`npm-scope(7)`). If no scope is specified, the default registry is used, which is +supplied by the `registry` config parameter. See `npm-config(1)`, +`npmrc(5)`, and `npm-config(7)` for more on managing npm's configuration. ## Can I run my own private registry? diff -Nru nodejs-0.11.13/deps/npm/doc/misc/npm-scope.md nodejs-0.11.15/deps/npm/doc/misc/npm-scope.md --- nodejs-0.11.13/deps/npm/doc/misc/npm-scope.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/misc/npm-scope.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,84 @@ +npm-scope(7) -- Scoped packages +=============================== + +## DESCRIPTION + +All npm packages have a name. Some package names also have a scope. A scope +follows the usual rules for package names (url-safe characters, no leading dots +or underscores). When used in package names, preceded by an @-symbol and +followed by a slash, e.g. + + @somescope/somepackagename + +Scopes are a way of grouping related packages together, and also affect a few +things about the way npm treats the package. + +**As of 2014-09-03, scoped packages are not supported by the public npm registry**. +However, the npm client is backwards-compatible with un-scoped registries, so +it can be used to work with scoped and un-scoped registries at the same time. + +## Installing scoped packages + +Scoped packages are installed to a sub-folder of the regular installation +folder, e.g. if your other packages are installed in `node_modules/packagename`, +scoped modules will be in `node_modules/@myorg/packagename`. The scope folder +(`@myorg`) is simply the name of the scope preceded by an @-symbol, and can +contain any number of scoped packages. + +A scoped package is installed by referencing it by name, preceded by an +@-symbol, in `npm install`: + + npm install @myorg/mypackage + +Or in `package.json`: + + "dependencies": { + "@myorg/mypackage": "^1.3.0" + } + +Note that if the @-symbol is omitted in either case npm will instead attempt to +install from GitHub; see `npm-install(1)`. + +## Requiring scoped packages + +Because scoped packages are installed into a scope folder, you have to +include the name of the scope when requiring them in your code, e.g. + + require('@myorg/mypackage') + +There is nothing special about the way Node treats scope folders, this is +just specifying to require the module `mypackage` in the folder called `@myorg`. + +## Publishing scoped packages + +Scoped packages can be published to any registry that supports them. +*As of 2014-09-03, the public npm registry does not support scoped packages*, +so attempting to publish a scoped package to the registry will fail unless +you have associated that scope with a different registry, see below. + +## Associating a scope with a registry + +Scopes can be associated with a separate registry. This allows you to +seamlessly use a mix of packages from the public npm registry and one or more +private registries, such as npm Enterprise. + +You can associate a scope with a registry at login, e.g. + + npm login --registry=http://reg.example.com --scope=@myco + +Scopes have a many-to-one relationship with registries: one registry can +host multiple scopes, but a scope only ever points to one registry. + +You can also associate a scope with a registry using `npm config`: + + npm config set @myco:registry http://reg.example.com + +Once a scope is associated with a registry, any `npm install` for a package +with that scope will request packages from that registry instead. Any +`npm publish` for a package name that contains the scope will be published to +that registry instead. + +## SEE ALSO + +* npm-install(1) +* npm-publish(1) \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/doc/misc/npm-scripts.md nodejs-0.11.15/deps/npm/doc/misc/npm-scripts.md --- nodejs-0.11.13/deps/npm/doc/misc/npm-scripts.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/misc/npm-scripts.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,7 +3,7 @@ ## DESCRIPTION -npm supports the "scripts" member of the package.json script, for the +npm supports the "scripts" property of the package.json script, for the following scripts: * prepublish: @@ -33,8 +33,10 @@ Run by the `npm restart` command. Note: `npm restart` will run the stop and start scripts if no `restart` script is provided. -Additionally, arbitrary scripts can be run by doing -`npm run-script `. +Additionally, arbitrary scripts can be executed by running `npm +run-script `. *Pre* and *post* commands with matching +names will be run for those as well (e.g. `premyscript`, `myscript`, +`postmyscript`). ## NOTE: INSTALL SCRIPTS ARE AN ANTIPATTERN @@ -135,7 +137,7 @@ `npm_config_` prefix. For instance, you can view the effective `root` config by checking the `npm_config_root` environment variable. -### Special: package.json "config" hash +### Special: package.json "config" object The package.json "config" keys are overwritten in the environment if there is a config param of `[@]:`. For example, diff -Nru nodejs-0.11.13/deps/npm/doc/misc/semver.md nodejs-0.11.15/deps/npm/doc/misc/semver.md --- nodejs-0.11.13/deps/npm/doc/misc/semver.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/doc/misc/semver.md 2015-01-20 21:22:17.000000000 +0000 @@ -34,52 +34,177 @@ ## Versions -A "version" is described by the v2.0.0 specification found at +A "version" is described by the `v2.0.0` specification found at . A leading `"="` or `"v"` character is stripped off and ignored. ## Ranges -The following range styles are supported: +A `version range` is a set of `comparators` which specify versions +that satisfy the range. + +A `comparator` is composed of an `operator` and a `version`. The set +of primitive `operators` is: + +* `<` Less than +* `<=` Less than or equal to +* `>` Greater than +* `>=` Greater than or equal to +* `=` Equal. If no operator is specified, then equality is assumed, + so this operator is optional, but MAY be included. + +For example, the comparator `>=1.2.7` would match the versions +`1.2.7`, `1.2.8`, `2.5.3`, and `1.3.9`, but not the versions `1.2.6` +or `1.1.0`. + +Comparators can be joined by whitespace to form a `comparator set`, +which is satisfied by the **intersection** of all of the comparators +it includes. + +A range is composed of one or more comparator sets, joined by `||`. A +version matches a range if and only if every comparator in at least +one of the `||`-separated comparator sets is satisfied by the version. + +For example, the range `>=1.2.7 <1.3.0` would match the versions +`1.2.7`, `1.2.8`, and `1.2.99`, but not the versions `1.2.6`, `1.3.0`, +or `1.1.0`. + +The range `1.2.7 || >=1.2.9 <2.0.0` would match the versions `1.2.7`, +`1.2.9`, and `1.4.6`, but not the versions `1.2.8` or `2.0.0`. + +### Prerelease Tags + +If a version has a prerelease tag (for example, `1.2.3-alpha.3`) then +it will only be allowed to satisfy comparator sets if at least one +comparator with the same `[major, minor, patch]` tuple also has a +prerelease tag. + +For example, the range `>1.2.3-alpha.3` would be allowed to match the +version `1.2.3-alpha.7`, but it would *not* be satisfied by +`3.4.5-alpha.9`, even though `3.4.5-alpha.9` is technically "greater +than" `1.2.3-alpha.3` according to the SemVer sort rules. The version +range only accepts prerelease tags on the `1.2.3` version. The +version `3.4.5` *would* satisfy the range, because it does not have a +prerelease flag, and `3.4.5` is greater than `1.2.3-alpha.7`. + +The purpose for this behavior is twofold. First, prerelease versions +frequently are updated very quickly, and contain many breaking changes +that are (by the author's design) not yet fit for public consumption. +Therefore, by default, they are excluded from range matching +semantics. + +Second, a user who has opted into using a prerelease version has +clearly indicated the intent to use *that specific* set of +alpha/beta/rc versions. By including a prerelease tag in the range, +the user is indicating that they are aware of the risk. However, it +is still not appropriate to assume that they have opted into taking a +similar risk on the *next* set of prerelease versions. + +### Advanced Range Syntax + +Advanced range syntax desugars to primitive comparators in +deterministic ways. + +Advanced ranges may be combined in the same way as primitive +comparators using white space or `||`. + +#### Hyphen Ranges `X.Y.Z - A.B.C` + +Specifies an inclusive set. -* `1.2.3` A specific version. When nothing else will do. Note that - build metadata is still ignored, so `1.2.3+build2012` will satisfy - this range. -* `>1.2.3` Greater than a specific version. -* `<1.2.3` Less than a specific version. If there is no prerelease - tag on the version range, then no prerelease version will be allowed - either, even though these are technically "less than". -* `>=1.2.3` Greater than or equal to. Note that prerelease versions - are NOT equal to their "normal" equivalents, so `1.2.3-beta` will - not satisfy this range, but `2.3.0-beta` will. -* `<=1.2.3` Less than or equal to. In this case, prerelease versions - ARE allowed, so `1.2.3-beta` would satisfy. * `1.2.3 - 2.3.4` := `>=1.2.3 <=2.3.4` -* `~1.2.3` := `>=1.2.3-0 <1.3.0-0` "Reasonably close to 1.2.3". When - using tilde operators, prerelease versions are supported as well, - but a prerelease of the next significant digit will NOT be - satisfactory, so `1.3.0-beta` will not satisfy `~1.2.3`. -* `^1.2.3` := `>=1.2.3-0 <2.0.0-0` "Compatible with 1.2.3". When - using caret operators, anything from the specified version (including - prerelease) will be supported up to, but not including, the next - major version (or its prereleases). `1.5.1` will satisfy `^1.2.3`, - while `1.2.2` and `2.0.0-beta` will not. -* `^0.1.3` := `>=0.1.3-0 <0.2.0-0` "Compatible with 0.1.3". 0.x.x versions are - special: the first non-zero component indicates potentially breaking changes, - meaning the caret operator matches any version with the same first non-zero - component starting at the specified version. -* `^0.0.2` := `=0.0.2` "Only the version 0.0.2 is considered compatible" -* `~1.2` := `>=1.2.0-0 <1.3.0-0` "Any version starting with 1.2" -* `^1.2` := `>=1.2.0-0 <2.0.0-0` "Any version compatible with 1.2" -* `1.2.x` := `>=1.2.0-0 <1.3.0-0` "Any version starting with 1.2" -* `~1` := `>=1.0.0-0 <2.0.0-0` "Any version starting with 1" -* `^1` := `>=1.0.0-0 <2.0.0-0` "Any version compatible with 1" -* `1.x` := `>=1.0.0-0 <2.0.0-0` "Any version starting with 1" +If a partial version is provided as the first version in the inclusive +range, then the missing pieces are replaced with zeroes. + +* `1.2 - 2.3.4` := `>=1.2.0 <=2.3.4` + +If a partial version is provided as the second version in the +inclusive range, then all versions that start with the supplied parts +of the tuple are accepted, but nothing that would be greater than the +provided tuple parts. + +* `1.2.3 - 2.3` := `>=1.2.3 <2.4.0` +* `1.2.3 - 2` := `>=1.2.3 <3.0.0` + +#### X-Ranges `1.2.x` `1.X` `1.2.*` `*` + +Any of `X`, `x`, or `*` may be used to "stand in" for one of the +numeric values in the `[major, minor, patch]` tuple. + +* `*` := `>=0.0.0` (Any version satisfies) +* `1.x` := `>=1.0.0 <2.0.0` (Matching major version) +* `1.2.x` := `>=1.2.0 <1.3.0` (Matching major and minor versions) + +A partial version range is treated as an X-Range, so the special +character is in fact optional. + +* `""` (empty string) := `*` := `>=0.0.0` +* `1` := `1.x.x` := `>=1.0.0 <2.0.0` +* `1.2` := `1.2.x` := `>=1.2.0 <1.3.0` + +#### Tilde Ranges `~1.2.3` `~1.2` `~1` + +Allows patch-level changes if a minor version is specified on the +comparator. Allows minor-level changes if not. + +* `~1.2.3` := `>=1.2.3 <1.(2+1).0` := `>=1.2.3 <1.3.0` +* `~1.2` := `>=1.2.0 <1.(2+1).0` := `>=1.2.0 <1.3.0` (Same as `1.2.x`) +* `~1` := `>=1.0.0 <(1+1).0.0` := `>=1.0.0 <2.0.0` (Same as `1.x`) +* `~0.2.3` := `>=0.2.3 <0.(2+1).0` := `>=0.2.3 <0.3.0` +* `~0.2` := `>=0.2.0 <0.(2+1).0` := `>=0.2.0 <0.3.0` (Same as `0.2.x`) +* `~0` := `>=0.0.0 <(0+1).0.0` := `>=0.0.0 <1.0.0` (Same as `0.x`) +* `~1.2.3-beta.2` := `>=1.2.3-beta.2 <1.3.0` Note that prereleases in + the `1.2.3` version will be allowed, if they are greater than or + equal to `beta.2`. So, `1.2.3-beta.4` would be allowed, but + `1.2.4-beta.2` would not, because it is a prerelease of a + different `[major, minor, patch]` tuple. + +Note: this is the same as the `~>` operator in rubygems. + +#### Caret Ranges `^1.2.3` `^0.2.5` `^0.0.4` + +Allows changes that do not modify the left-most non-zero digit in the +`[major, minor, patch]` tuple. In other words, this allows patch and +minor updates for versions `1.0.0` and above, patch updates for +versions `0.X >=0.1.0`, and *no* updates for versions `0.0.X`. + +Many authors treat a `0.x` version as if the `x` were the major +"breaking-change" indicator. + +Caret ranges are ideal when an author may make breaking changes +between `0.2.4` and `0.3.0` releases, which is a common practice. +However, it presumes that there will *not* be breaking changes between +`0.2.4` and `0.2.5`. It allows for changes that are presumed to be +additive (but non-breaking), according to commonly observed practices. + +* `^1.2.3` := `>=1.2.3 <2.0.0` +* `^0.2.3` := `>=0.2.3 <0.3.0` +* `^0.0.3` := `>=0.0.3 <0.0.4` +* `^1.2.3-beta.2` := `>=1.2.3-beta.2 <2.0.0` Note that prereleases in + the `1.2.3` version will be allowed, if they are greater than or + equal to `beta.2`. So, `1.2.3-beta.4` would be allowed, but + `1.2.4-beta.2` would not, because it is a prerelease of a + different `[major, minor, patch]` tuple. +* `^0.0.3-beta` := `>=0.0.3-beta <0.0.4` Note that prereleases in the + `0.0.3` version *only* will be allowed, if they are greater than or + equal to `beta`. So, `0.0.3-pr.2` would be allowed. + +When parsing caret ranges, a missing `patch` value desugars to the +number `0`, but will allow flexibility within that value, even if the +major and minor versions are both `0`. + +* `^1.2.x` := `>=1.2.0 <2.0.0` +* `^0.0.x` := `>=0.0.0 <0.1.0` +* `^0.0` := `>=0.0.0 <0.1.0` + +A missing `minor` and `patch` values will desugar to zero, but also +allow flexibility within those values, even if the major version is +zero. -Ranges can be joined with either a space (which implies "and") or a -`||` (which implies "or"). +* `^1.x` := `>=1.0.0 <2.0.0` +* `^0.x` := `>=0.0.0 <1.0.0` ## Functions @@ -90,42 +215,50 @@ Strict-mode Comparators and Ranges will be strict about the SemVer strings that they parse. -* valid(v): Return the parsed version, or null if it's not valid. -* inc(v, release): Return the version incremented by the release type - (major, minor, patch, or prerelease), or null if it's not valid. +* `valid(v)`: Return the parsed version, or null if it's not valid. +* `inc(v, release)`: Return the version incremented by the release + type (`major`, `premajor`, `minor`, `preminor`, `patch`, + `prepatch`, or `prerelease`), or null if it's not valid + * `premajor` in one call will bump the version up to the next major + version and down to a prerelease of that major version. + `preminor`, and `prepatch` work the same way. + * If called from a non-prerelease version, the `prerelease` will work the + same as `prepatch`. It increments the patch version, then makes a + prerelease. If the input version is already a prerelease it simply + increments it. ### Comparison -* gt(v1, v2): `v1 > v2` -* gte(v1, v2): `v1 >= v2` -* lt(v1, v2): `v1 < v2` -* lte(v1, v2): `v1 <= v2` -* eq(v1, v2): `v1 == v2` This is true if they're logically equivalent, +* `gt(v1, v2)`: `v1 > v2` +* `gte(v1, v2)`: `v1 >= v2` +* `lt(v1, v2)`: `v1 < v2` +* `lte(v1, v2)`: `v1 <= v2` +* `eq(v1, v2)`: `v1 == v2` This is true if they're logically equivalent, even if they're not the exact same string. You already know how to compare strings. -* neq(v1, v2): `v1 != v2` The opposite of eq. -* cmp(v1, comparator, v2): Pass in a comparison string, and it'll call +* `neq(v1, v2)`: `v1 != v2` The opposite of `eq`. +* `cmp(v1, comparator, v2)`: Pass in a comparison string, and it'll call the corresponding function above. `"==="` and `"!=="` do simple string comparison, but are included for completeness. Throws if an invalid comparison string is provided. -* compare(v1, v2): Return 0 if v1 == v2, or 1 if v1 is greater, or -1 if - v2 is greater. Sorts in ascending order if passed to Array.sort(). -* rcompare(v1, v2): The reverse of compare. Sorts an array of versions - in descending order when passed to Array.sort(). +* `compare(v1, v2)`: Return `0` if `v1 == v2`, or `1` if `v1` is greater, or `-1` if + `v2` is greater. Sorts in ascending order if passed to `Array.sort()`. +* `rcompare(v1, v2)`: The reverse of compare. Sorts an array of versions + in descending order when passed to `Array.sort()`. ### Ranges -* validRange(range): Return the valid range or null if it's not valid -* satisfies(version, range): Return true if the version satisfies the +* `validRange(range)`: Return the valid range or null if it's not valid +* `satisfies(version, range)`: Return true if the version satisfies the range. -* maxSatisfying(versions, range): Return the highest version in the list - that satisfies the range, or null if none of them do. -* gtr(version, range): Return true if version is greater than all the +* `maxSatisfying(versions, range)`: Return the highest version in the list + that satisfies the range, or `null` if none of them do. +* `gtr(version, range)`: Return `true` if version is greater than all the versions possible in the range. -* ltr(version, range): Return true if version is less than all the +* `ltr(version, range)`: Return `true` if version is less than all the versions possible in the range. -* outside(version, range, hilo): Return true if the version is outside +* `outside(version, range, hilo)`: Return true if the version is outside the bounds of the range in either the high or low direction. The `hilo` argument must be either the string `'>'` or `'<'`. (This is the function called by `gtr` and `ltr`.) @@ -134,8 +267,8 @@ greater than a range, less than a range, *or* satisfy a range! For example, the range `1.2 <1.2.9 || >2.0.0` would have a hole from `1.2.9` until `2.0.0`, so the version `1.2.10` would not be greater than the -range (because 2.0.1 satisfies, which is higher), nor less than the -range (since 1.2.8 satisfies, which is lower), and it also does not +range (because `2.0.1` satisfies, which is higher), nor less than the +range (since `1.2.8` satisfies, which is lower), and it also does not satisfy the range. If you want to know if a version satisfies or does not satisfy a diff -Nru nodejs-0.11.13/deps/npm/.eslintrc nodejs-0.11.15/deps/npm/.eslintrc --- nodejs-0.11.13/deps/npm/.eslintrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/.eslintrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,17 @@ +{ + "env" : { + "node" : true + }, + "rules" : { + "semi": [2, "never"], + "strict": 0, + "quotes": [1, "double", "avoid-escape"], + "no-use-before-define": 0, + "curly": 0, + "no-underscore-dangle": 0, + "no-lonely-if": 1, + "no-unused-vars": [2, {"vars" : "all", "args" : "after-used"}], + "no-mixed-requires": 0, + "space-infix-ops": 0 + } +} diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-bin.html nodejs-0.11.15/deps/npm/html/doc/api/npm-bin.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-bin.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-bin.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,17 +10,13 @@

npm-bin

Display npm bin folder

- -

SYNOPSIS

- -
npm.commands.bin(args, cb)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.bin(args, cb)
+

DESCRIPTION

Print the folder where npm will install executables.

-

This function should not be used programmatically. Instead, just refer -to the npm.bin member.

+to the npm.bin property.

+
@@ -32,5 +28,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-bugs.html nodejs-0.11.15/deps/npm/html/doc/api/npm-bugs.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-bugs.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-bugs.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,23 +10,18 @@

npm-bugs

Bugs for a package in a web browser maybe

- -

SYNOPSIS

- -
npm.commands.bugs(package, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.bugs(package, callback)
+

DESCRIPTION

This command tries to guess at the likely location of a package's bug tracker URL, and then tries to open it using the --browser config param.

-

Like other commands, the first parameter is an array. This command only uses the first element, which is expected to be a package name with an optional version number.

-

This command will launch a browser, so this command may not be the most friendly for programmatic use.

+
@@ -38,5 +33,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-cache.html nodejs-0.11.15/deps/npm/html/doc/api/npm-cache.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-cache.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-cache.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,46 @@ + + + npm-cache + + + + + + +
+ +

npm-cache

manage the npm cache programmatically

+

SYNOPSIS

+
npm.commands.cache([args], callback)
+
+// helpers
+npm.commands.cache.clean([args], callback)
+npm.commands.cache.add([args], callback)
+npm.commands.cache.read(name, version, forceBypass, callback)
+

DESCRIPTION

+

This acts much the same ways as the npm-cache(1) command line +functionality.

+

The callback is called with the package.json data of the thing that is +eventually added to or read from the cache.

+

The top level npm.commands.cache(...) functionality is a public +interface, and like all commands on the npm.commands object, it will +match the command line behavior exactly.

+

However, the cache folder structure and the cache helper functions are +considered internal API surface, and as such, may change in future +releases of npm, potentially without warning or significant version +incrementation.

+

Use at your own risk.

+ +
+ + + + + + + + + + + + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-commands.html nodejs-0.11.15/deps/npm/html/doc/api/npm-commands.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-commands.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-commands.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,26 +10,21 @@

npm-commands

npm commands

- -

SYNOPSIS

- -
npm.commands[<command>](args, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands[<command>](args, callback)
+

DESCRIPTION

npm comes with a full set of commands, and each of the commands takes a similar set of arguments.

-

In general, all commands on the command object take an array of positional argument strings. The last argument to any function is a callback. Some commands are special and take other optional arguments.

-

All commands have their own man page. See man npm-<command> for command-line usage, or man 3 npm-<command> for programmatic usage.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -41,5 +36,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-config.html nodejs-0.11.15/deps/npm/html/doc/api/npm-config.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-config.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-config.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,31 +10,42 @@

npm-config

Manage the npm configuration files

- -

SYNOPSIS

- +

SYNOPSIS

npm.commands.config(args, callback)
 var val = npm.config.get(key)
-npm.config.set(key, val)
- -

DESCRIPTION

- +npm.config.set(key, val) +

DESCRIPTION

This function acts much the same way as the command-line version. The first element in the array tells config what to do. Possible values are:

- -
  • set

    Sets a config parameter. The second element in args is interpreted as the -key, and the third element is interpreted as the value.

  • get

    Gets the value of a config parameter. The second element in args is the -key to get the value of.

  • delete (rm or del)

    Deletes a parameter from the config. The second element in args is the -key to delete.

  • list (ls)

    Show all configs that aren't secret. No parameters necessary.

  • edit:

    Opens the config file in the default editor. This command isn't very useful -programmatically, but it is made available.

- +
    +
  • set

    +

    Sets a config parameter. The second element in args is interpreted as the + key, and the third element is interpreted as the value.

    +
  • +
  • get

    +

    Gets the value of a config parameter. The second element in args is the + key to get the value of.

    +
  • +
  • delete (rm or del)

    +

    Deletes a parameter from the config. The second element in args is the + key to delete.

    +
  • +
  • list (ls)

    +

    Show all configs that aren't secret. No parameters necessary.

    +
  • +
  • edit:

    +

    Opens the config file in the default editor. This command isn't very useful + programmatically, but it is made available.

    +
  • +

To programmatically access npm configuration settings, or set them for the duration of a program, use the npm.config.set and npm.config.get functions instead.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -46,5 +57,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-deprecate.html nodejs-0.11.15/deps/npm/html/doc/api/npm-deprecate.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-deprecate.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-deprecate.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,30 +10,32 @@

npm-deprecate

Deprecate a version of a package

- -

SYNOPSIS

- -
npm.commands.deprecate(args, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.deprecate(args, callback)
+

DESCRIPTION

This command will update the npm registry entry for a package, providing a deprecation warning to all who attempt to install it.

-

The 'args' parameter must have exactly two elements:

- -
  • package[@version]

    The version portion is optional, and may be either a range, or a -specific version, or a tag.

  • message

    The warning message that will be printed whenever a user attempts to -install the package.

- +
    +
  • package[@version]

    +

    The version portion is optional, and may be either a range, or a + specific version, or a tag.

    +
  • +
  • message

    +

    The warning message that will be printed whenever a user attempts to + install the package.

    +
  • +

Note that you must be the package owner to deprecate something. See the owner and adduser help topics.

-

To un-deprecate a package, specify an empty string ("") for the message argument.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -45,5 +47,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-docs.html nodejs-0.11.15/deps/npm/html/doc/api/npm-docs.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-docs.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-docs.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,23 +10,18 @@

npm-docs

Docs for a package in a web browser maybe

- -

SYNOPSIS

- -
npm.commands.docs(package, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.docs(package, callback)
+

DESCRIPTION

This command tries to guess at the likely location of a package's documentation URL, and then tries to open it using the --browser config param.

-

Like other commands, the first parameter is an array. This command only uses the first element, which is expected to be a package name with an optional version number.

-

This command will launch a browser, so this command may not be the most friendly for programmatic use.

+
@@ -38,5 +33,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-edit.html nodejs-0.11.15/deps/npm/html/doc/api/npm-edit.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-edit.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-edit.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,28 +10,21 @@

npm-edit

Edit an installed package

- -

SYNOPSIS

- -
npm.commands.edit(package, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.edit(package, callback)
+

DESCRIPTION

Opens the package folder in the default editor (or whatever you've configured as the npm editor config -- see npm help config.)

-

After it has been edited, the package is rebuilt so as to pick up any changes in compiled packages.

-

For instance, you can do npm install connect to install connect into your package, and then npm.commands.edit(["connect"], callback) to make a few changes to your locally installed copy.

-

The first parameter is a string array with a single element, the package to open. The package can optionally have a version number attached.

-

Since this command opens an editor in a new process, be careful about where and how this is used.

+
@@ -43,5 +36,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-explore.html nodejs-0.11.15/deps/npm/html/doc/api/npm-explore.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-explore.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-explore.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,22 +10,16 @@

npm-explore

Browse an installed package

- -

SYNOPSIS

- -
npm.commands.explore(args, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.explore(args, callback)
+

DESCRIPTION

Spawn a subshell in the directory of the installed package specified.

-

If a command is specified, then it is run in the subshell, which then immediately terminates.

-

Note that the package is not automatically rebuilt afterwards, so be sure to use npm rebuild <pkg> if you make any changes.

-

The first element in the 'args' parameter must be a package name. After that is the optional command, which can be any number of strings. All of the strings will be combined into one, space-delimited command.

+
@@ -37,5 +31,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-help-search.html nodejs-0.11.15/deps/npm/html/doc/api/npm-help-search.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-help-search.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-help-search.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,30 +10,29 @@

npm-help-search

Search the help pages

- -

SYNOPSIS

- -
npm.commands.helpSearch(args, [silent,] callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.helpSearch(args, [silent,] callback)
+

DESCRIPTION

This command is rarely useful, but it exists in the rare case that it is.

-

This command takes an array of search terms and returns the help pages that match in order of best match.

-

If there is only one match, then npm displays that help section. If there are multiple results, the results are printed to the screen formatted and the array of results is returned. Each result is an object with these properties:

+
    +
  • hits: +A map of args to number of hits on that arg. For example, {"npm": 3}
  • +
  • found: +Total number of unique args that matched.
  • +
  • totalHits: +Total number of hits.
  • +
  • lines: +An array of all matching lines (and some adjacent lines).
  • +
  • file: +Name of the file that matched
  • +
+

The silent parameter is not necessary not used, but it may in the future.

-
  • hits: -A map of args to number of hits on that arg. For example, {"npm": 3}
  • found: -Total number of unique args that matched.
  • totalHits: -Total number of hits.
  • lines: -An array of all matching lines (and some adjacent lines).
  • file: -Name of the file that matched
- -

The silent parameter is not neccessary not used, but it may in the future.

@@ -45,5 +44,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm.html nodejs-0.11.15/deps/npm/html/doc/api/npm.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,9 +10,7 @@

npm

node package manager

- -

SYNOPSIS

- +

SYNOPSIS

var npm = require("npm")
 npm.load([configObject, ]function (er, npm) {
   // use the npm object, now that it's loaded.
@@ -23,77 +21,83 @@
   console.log("prefix = %s", npm.prefix)
 
   npm.commands.install(["package"], cb)
-})
- -

VERSION

- -

1.4.9

- -

DESCRIPTION

- +}) +

VERSION

+

2.1.6

+

DESCRIPTION

This is the API documentation for npm. To find documentation of the command line -client, see npm(1).

- -

Prior to using npm's commands, npm.load() must be called. -If you provide configObject as an object hash of top-level -configs, they override the values stored in the various config -locations. In the npm command line client, this set of configs -is parsed from the command line options. Additional configuration -params are loaded from two configuration files. See npm-config(1), -npm-config(7), and npmrc(5) for more information.

- +client, see npm(1).

+

Prior to using npm's commands, npm.load() must be called. If you provide +configObject as an object map of top-level configs, they override the values +stored in the various config locations. In the npm command line client, this +set of configs is parsed from the command line options. Additional +configuration params are loaded from two configuration files. See +npm-config(1), npm-config(7), and npmrc(5) for more information.

After that, each of the functions are accessible in the -commands object: npm.commands.<cmd>. See npm-index(7) for a list of +commands object: npm.commands.<cmd>. See npm-index(7) for a list of all possible commands.

-

All commands on the command object take an array of positional argument strings. The last argument to any function is a callback. Some commands take other optional arguments.

-

Configs cannot currently be set on a per function basis, as each call to npm.config.set will change the value for all npm commands in that process.

-

To find API documentation for a specific command, run the npm apihelp command.

- -

METHODS AND PROPERTIES

- -
  • npm.load(configs, cb)

    Load the configuration params, and call the cb function once the -globalconfig and userconfig files have been loaded as well, or on -nextTick if they've already been loaded.

  • npm.config

    An object for accessing npm configuration parameters.

    • npm.config.get(key)

    • npm.config.set(key, val)
    • npm.config.del(key)

  • npm.dir or npm.root

    The node_modules directory where npm will operate.

  • npm.prefix

    The prefix where npm is operating. (Most often the current working -directory.)

  • npm.cache

    The place where npm keeps JSON and tarballs it fetches from the -registry (or uploads to the registry).

  • npm.tmp

    npm's temporary working directory.

  • npm.deref

    Get the "real" name for a command that has either an alias or -abbreviation.

- -

MAGIC

- -

For each of the methods in the npm.commands hash, a method is added to -the npm object, which takes a set of positional string arguments rather -than an array and a callback.

- +

METHODS AND PROPERTIES

+
    +
  • npm.load(configs, cb)

    +

    Load the configuration params, and call the cb function once the + globalconfig and userconfig files have been loaded as well, or on + nextTick if they've already been loaded.

    +
  • +
  • npm.config

    +

    An object for accessing npm configuration parameters.

    +
      +
    • npm.config.get(key)
    • +
    • npm.config.set(key, val)
    • +
    • npm.config.del(key)
    • +
    +
  • +
  • npm.dir or npm.root

    +

    The node_modules directory where npm will operate.

    +
  • +
  • npm.prefix

    +

    The prefix where npm is operating. (Most often the current working + directory.)

    +
  • +
  • npm.cache

    +

    The place where npm keeps JSON and tarballs it fetches from the + registry (or uploads to the registry).

    +
  • +
  • npm.tmp

    +

    npm's temporary working directory.

    +
  • +
  • npm.deref

    +

    Get the "real" name for a command that has either an alias or + abbreviation.

    +
  • +
+

MAGIC

+

For each of the methods in the npm.commands object, a method is added to the +npm object, which takes a set of positional string arguments rather than an +array and a callback.

If the last argument is a callback, then it will use the supplied callback. However, if no callback is provided, then it will print out the error or results.

-

For example, this would work in a node repl:

-
> npm = require("npm")
 > npm.load()  // wait a sec...
-> npm.install("dnode", "express")
- -

Note that that won't work in a node program, since the install +> npm.install("dnode", "express") +

Note that that won't work in a node program, since the install method will get called before the configuration load is completed.

- -

ABBREVS

- +

ABBREVS

In order to support npm ins foo instead of npm install foo, the npm.commands object has a set of abbreviations as well as the full method names. Use the npm.deref method to find the real name.

-

For example:

- -
var cmd = npm.deref("unp") // cmd === "unpublish"
+
var cmd = npm.deref("unp") // cmd === "unpublish"
+
@@ -105,5 +109,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-init.html nodejs-0.11.15/deps/npm/html/doc/api/npm-init.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-init.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-init.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,33 +10,24 @@

npm init

Interactively create a package.json file

- -

SYNOPSIS

- -
npm.commands.init(args, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.init(args, callback)
+

DESCRIPTION

This will ask you a bunch of questions, and then write a package.json for you.

-

It attempts to make reasonable guesses about what you want things to be set to, and then writes a package.json file with the options you've selected.

-

If you already have a package.json file, it'll read that first, and default to the options in there.

-

It is strictly additive, so it does not delete options from your package.json without a really good reason to do so.

-

Since this function expects to be run on the command-line, it doesn't work very well as a programmatically. The best option is to roll your own, and since JavaScript makes it stupid simple to output formatted JSON, that is the preferred method. If you're sure you want to handle command-line prompting, then go ahead and use this programmatically.

+

SEE ALSO

+

package.json(5)

-

SEE ALSO

- -

package.json(5)

@@ -48,5 +39,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-install.html nodejs-0.11.15/deps/npm/html/doc/api/npm-install.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-install.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-install.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,23 +10,17 @@

npm-install

install a package programmatically

- -

SYNOPSIS

- -
npm.commands.install([where,] packages, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.install([where,] packages, callback)
+

DESCRIPTION

This acts much the same ways as installing on the command-line.

-

The 'where' parameter is optional and only used internally, and it specifies where the packages should be installed to.

-

The 'packages' parameter is an array of strings. Each element in the array is the name of a package to be installed.

-

Finally, 'callback' is a function that will be called when all packages have been installed or when an error has been encountered.

+
@@ -38,5 +32,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-link.html nodejs-0.11.15/deps/npm/html/doc/api/npm-link.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-link.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-link.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,37 +10,27 @@

npm-link

Symlink a package folder

- -

SYNOPSIS

- -
npm.command.link(callback)
-npm.command.link(packages, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.link(callback)
+npm.commands.link(packages, callback)
+

DESCRIPTION

Package linking is a two-step process.

-

Without parameters, link will create a globally-installed symbolic link from prefix/package-name to the current folder.

-

With a parameters, link will create a symlink from the local node_modules folder to the global symlink.

-

When creating tarballs for npm publish, the linked packages are "snapshotted" to their current state by resolving the symbolic links.

-

This is handy for installing your own stuff, so that you can work on it and test it iteratively without having to continually rebuild.

-

For example:

-
npm.commands.link(cb)           # creates global link from the cwd
                                 # (say redis package)
-npm.commands.link('redis', cb)  # link-install the package
- -

Now, any changes to the redis package will be reflected in +npm.commands.link('redis', cb) # link-install the package +

Now, any changes to the redis package will be reflected in the package in the current working directory

+
@@ -52,5 +42,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-load.html nodejs-0.11.15/deps/npm/html/doc/api/npm-load.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-load.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-load.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,30 +10,22 @@

npm-load

Load config settings

- -

SYNOPSIS

- -
npm.load(conf, cb)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.load(conf, cb)
+

DESCRIPTION

npm.load() must be called before any other function call. Both parameters are optional, but the second is recommended.

- -

The first parameter is an object hash of command-line config params, and the -second parameter is a callback that will be called when npm is loaded and -ready to serve.

- +

The first parameter is an object containing command-line config params, and the +second parameter is a callback that will be called when npm is loaded and ready +to serve.

The first parameter should follow a similar structure as the package.json config object.

-

For example, to emulate the --dev flag, pass an object that looks like this:

-
{
   "dev": true
-}
+} +

For a list of all the available command-line configs, see npm help config

-

For a list of all the available command-line configs, see npm help config

@@ -45,5 +37,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-ls.html nodejs-0.11.15/deps/npm/html/doc/api/npm-ls.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-ls.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-ls.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,57 +10,48 @@

npm-ls

List installed packages

- -

SYNOPSIS

- -
npm.commands.ls(args, [silent,] callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.ls(args, [silent,] callback)
+

DESCRIPTION

This command will print to stdout all the versions of packages that are installed, as well as their dependencies, in a tree-structure. It will also return that data using the callback.

-

This command does not take any arguments, but args must be defined. Beyond that, if any arguments are passed in, npm will politely warn that it does not take positional arguments, though you may set config flags like with any other command, such as global to list global packages.

-

It will print out extraneous, missing, and invalid packages.

-

If the silent parameter is set to true, nothing will be output to the screen, but the data will still be returned.

-

Callback is provided an error if one occurred, the full data about which packages are installed and which dependencies they will receive, and a "lite" data object which just shows which versions are installed where. Note that the full data object is a circular structure, so care must be taken if it is serialized to JSON.

- -

CONFIGURATION

- +

CONFIGURATION

long

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Show extended information.

-

parseable

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Show parseable output instead of tree view.

-

global

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

List packages in the global install prefix instead of in the current project.

-

Note, if parseable is set or long isn't set, then duplicates will be trimmed. This means that if a submodule a same dependency as a parent module, then the dependency will only be output once.

+
@@ -72,5 +63,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-outdated.html nodejs-0.11.15/deps/npm/html/doc/api/npm-outdated.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-outdated.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-outdated.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,17 +10,13 @@

npm-outdated

Check for outdated packages

- -

SYNOPSIS

- -
npm.commands.outdated([packages,] callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.outdated([packages,] callback)
+

DESCRIPTION

This command will check the registry to see if the specified packages are currently outdated.

-

If the 'packages' parameter is left out, npm will check all packages.

+
@@ -32,5 +28,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-owner.html nodejs-0.11.15/deps/npm/html/doc/api/npm-owner.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-owner.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-owner.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,32 +10,32 @@

npm-owner

Manage package owners

- -

SYNOPSIS

- -
npm.commands.owner(args, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.owner(args, callback)
+

DESCRIPTION

The first element of the 'args' parameter defines what to do, and the subsequent elements depend on the action. Possible values for the action are (order of parameters are given in parenthesis):

- -
  • ls (package): +
      +
    • ls (package): List all the users who have access to modify a package and push new versions. -Handy when you need to know who to bug for help.
    • add (user, package): +Handy when you need to know who to bug for help.
    • +
    • add (user, package): Add a new user as a maintainer of a package. This user is enabled to modify -metadata, publish new versions, and add other owners.
    • rm (user, package): +metadata, publish new versions, and add other owners.
    • +
    • rm (user, package): Remove a user from the package owner list. This immediately revokes their -privileges.
    - +privileges.
  • +

Note that there is only one level of access. Either you can modify a package, or you can't. Future versions may contain more fine-grained access levels, but that is not implemented at this time.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -47,5 +47,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-pack.html nodejs-0.11.15/deps/npm/html/doc/api/npm-pack.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-pack.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-pack.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,23 +10,18 @@

npm-pack

Create a tarball from a package

- -

SYNOPSIS

- -
npm.commands.pack([packages,] callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.pack([packages,] callback)
+

DESCRIPTION

For anything that's installable (that is, a package folder, tarball, tarball url, name@tag, name@version, or name), this command will fetch it to the cache, and then copy the tarball to the current working directory as <name>-<version>.tgz, and then write the filenames out to stdout.

-

If the same package is specified multiple times, then the file will be overwritten the second time.

-

If no arguments are supplied, then npm packs the current package folder.

+
@@ -38,5 +33,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-prefix.html nodejs-0.11.15/deps/npm/html/doc/api/npm-prefix.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-prefix.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-prefix.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,19 +10,14 @@

npm-prefix

Display prefix

- -

SYNOPSIS

- -
npm.commands.prefix(args, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.prefix(args, callback)
+

DESCRIPTION

Print the prefix to standard out.

-

'args' is never used and callback is never called with data. 'args' must be present or things will break.

-

This function is not useful programmatically

+
@@ -34,5 +29,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-prune.html nodejs-0.11.15/deps/npm/html/doc/api/npm-prune.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-prune.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-prune.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,21 +10,15 @@

npm-prune

Remove extraneous packages

- -

SYNOPSIS

- -
npm.commands.prune([packages,] callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.prune([packages,] callback)
+

DESCRIPTION

This command removes "extraneous" packages.

-

The first parameter is optional, and it specifies packages to be removed.

-

No packages are specified, then all packages will be checked.

-

Extraneous packages are packages that are not listed on the parent package's dependencies list.

+
@@ -36,5 +30,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-publish.html nodejs-0.11.15/deps/npm/html/doc/api/npm-publish.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-publish.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-publish.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,30 +10,31 @@

npm-publish

Publish a package

- -

SYNOPSIS

- -
npm.commands.publish([packages,] callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.publish([packages,] callback)
+

DESCRIPTION

Publishes a package to the registry so that it can be installed by name. Possible values in the 'packages' array are:

- -
  • <folder>: -A folder containing a package.json file

  • <tarball>: +

      +
    • <folder>: +A folder containing a package.json file

      +
    • +
    • <tarball>: A url or file path to a gzipped tar archive containing a single folder -with a package.json file inside.

    - +with a package.json file inside.

    +
  • +

If the package array is empty, npm will try to publish something in the current working directory.

-

This command could fails if one of the packages specified already exists in the registry. Overwrites when the "force" environment variable is set.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -45,5 +46,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-rebuild.html nodejs-0.11.15/deps/npm/html/doc/api/npm-rebuild.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-rebuild.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-rebuild.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,20 +10,15 @@

npm-rebuild

Rebuild a package

- -

SYNOPSIS

- -
npm.commands.rebuild([packages,] callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.rebuild([packages,] callback)
+

DESCRIPTION

This command runs the npm build command on each of the matched packages. This is useful when you install a new version of node, and must recompile all your C++ addons with the new binary. If no 'packages' parameter is specify, every package will be rebuilt.

- -

CONFIGURATION

- +

CONFIGURATION

See npm help build

+
@@ -35,5 +30,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-repo.html nodejs-0.11.15/deps/npm/html/doc/api/npm-repo.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-repo.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-repo.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,23 +10,18 @@

npm-repo

Open package repository page in the browser

- -

SYNOPSIS

- -
npm.commands.repo(package, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.repo(package, callback)
+

DESCRIPTION

This command tries to guess at the likely location of a package's repository URL, and then tries to open it using the --browser config param.

-

Like other commands, the first parameter is an array. This command only uses the first element, which is expected to be a package name with an optional version number.

-

This command will launch a browser, so this command may not be the most friendly for programmatic use.

+
@@ -38,5 +33,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-restart.html nodejs-0.11.15/deps/npm/html/doc/api/npm-restart.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-restart.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-restart.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,25 +10,21 @@

npm-restart

Start a package

- -

SYNOPSIS

- -
npm.commands.restart(packages, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.restart(packages, callback)
+

DESCRIPTION

This runs a package's "restart" script, if one was provided. Otherwise it runs package's "stop" script, if one was provided, and then the "start" script.

-

If no version is specified, then it restarts the "active" version.

-

npm can run tests on multiple packages. Just specify multiple packages in the packages parameter.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -40,5 +36,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-root.html nodejs-0.11.15/deps/npm/html/doc/api/npm-root.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-root.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-root.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,19 +10,14 @@

npm-root

Display npm root

- -

SYNOPSIS

- -
npm.commands.root(args, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.root(args, callback)
+

DESCRIPTION

Print the effective node_modules folder to standard out.

-

'args' is never used and callback is never called with data. 'args' must be present or things will break.

-

This function is not useful programmatically.

+
@@ -34,5 +29,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-run-script.html nodejs-0.11.15/deps/npm/html/doc/api/npm-run-script.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-run-script.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-run-script.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,27 +10,26 @@

npm-run-script

Run arbitrary package scripts

- -

SYNOPSIS

- -
npm.commands.run-script(args, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.run-script(args, callback)
+

DESCRIPTION

This runs an arbitrary command from a package's "scripts" object.

-

It is used by the test, start, restart, and stop commands, but can be called directly, as well.

-

The 'args' parameter is an array of strings. Behavior depends on the number of elements. If there is only one element, npm assumes that the element represents a command to be run on the local repository. If there is more than one element, then the first is assumed to be the package and the second is assumed to be the command to run. All other elements are ignored.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -42,5 +41,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-search.html nodejs-0.11.15/deps/npm/html/doc/api/npm-search.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-search.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-search.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,30 +10,38 @@

npm-search

Search for packages

- -

SYNOPSIS

- -
npm.commands.search(searchTerms, [silent,] [staleness,] callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.search(searchTerms, [silent,] [staleness,] callback)
+

DESCRIPTION

Search the registry for packages matching the search terms. The available parameters are:

- -
  • searchTerms: -Array of search terms. These terms are case-insensitive.
  • silent: -If true, npm will not log anything to the console.
  • staleness: +
      +
    • searchTerms: +Array of search terms. These terms are case-insensitive.
    • +
    • silent: +If true, npm will not log anything to the console.
    • +
    • staleness: This is the threshold for stale packages. "Fresh" packages are not refreshed -from the registry. This value is measured in seconds.
    • callback: +from the registry. This value is measured in seconds.

    • +
    • callback: Returns an object where each key is the name of a package, and the value is information about that package along with a 'words' property, which is a space-delimited string of all of the interesting words in that package. -The only properties included are those that are searched, which generally include:

      • name
      • description
      • maintainers
      • url
      • keywords
    - +The only properties included are those that are searched, which generally include:

    +
      +
    • name
    • +
    • description
    • +
    • maintainers
    • +
    • url
    • +
    • keywords
    • +
    +
  • +

A search on the registry excludes any result that does not match all of the search terms. It also removes any items from the results that contain an excluded term (the "searchexclude" config). The search is case insensitive and doesn't try to read your mind (it doesn't do any verb tense matching or the like).

+
@@ -45,5 +53,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-shrinkwrap.html nodejs-0.11.15/deps/npm/html/doc/api/npm-shrinkwrap.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-shrinkwrap.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-shrinkwrap.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,24 +10,18 @@

npm-shrinkwrap

programmatically generate package shrinkwrap file

- -

SYNOPSIS

- -
npm.commands.shrinkwrap(args, [silent,] callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.shrinkwrap(args, [silent,] callback)
+

DESCRIPTION

This acts much the same ways as shrinkwrapping on the command-line.

-

This command does not take any arguments, but 'args' must be defined. Beyond that, if any arguments are passed in, npm will politely warn that it does not take positional arguments.

-

If the 'silent' parameter is set to true, nothing will be output to the screen, but the shrinkwrap file will still be written.

-

Finally, 'callback' is a function that will be called when the shrinkwrap has been saved.

+
@@ -39,5 +33,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-start.html nodejs-0.11.15/deps/npm/html/doc/api/npm-start.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-start.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-start.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,17 +10,13 @@

npm-start

Start a package

- -

SYNOPSIS

- -
npm.commands.start(packages, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.start(packages, callback)
+

DESCRIPTION

This runs a package's "start" script, if one was provided.

-

npm can run tests on multiple packages. Just specify multiple packages in the packages parameter.

+
@@ -32,5 +28,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-stop.html nodejs-0.11.15/deps/npm/html/doc/api/npm-stop.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-stop.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-stop.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,17 +10,13 @@

npm-stop

Stop a package

- -

SYNOPSIS

- -
npm.commands.stop(packages, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.stop(packages, callback)
+

DESCRIPTION

This runs a package's "stop" script, if one was provided.

-

npm can run stop on multiple packages. Just specify multiple packages in the packages parameter.

+
@@ -32,5 +28,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-submodule.html nodejs-0.11.15/deps/npm/html/doc/api/npm-submodule.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-submodule.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-submodule.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,31 +10,27 @@

npm-submodule

Add a package as a git submodule

- -

SYNOPSIS

- -
npm.commands.submodule(packages, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.submodule(packages, callback)
+

DESCRIPTION

For each package specified, npm will check if it has a git repository url in its package.json description then add it as a git submodule at node_modules/<pkg name>.

-

This is a convenience only. From then on, it's up to you to manage updates by using the appropriate git commands. npm will stubbornly refuse to update, modify, or remove anything with a .git subfolder in it.

-

This command also does not install missing dependencies, if the package does not include them in its git repository. If npm ls reports that things are missing, you can either install, link, or submodule them yourself, or you can do npm explore <pkgname> -- npm install to install the dependencies into the submodule folder.

+

SEE ALSO

+
    +
  • npm help json
  • +
  • git help submodule
  • +
-

SEE ALSO

- -
  • npm help json
  • git help submodule
@@ -46,5 +42,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-tag.html nodejs-0.11.15/deps/npm/html/doc/api/npm-tag.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-tag.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-tag.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,27 +10,21 @@

npm-tag

Tag a published version

- -

SYNOPSIS

- -
npm.commands.tag(package@version, tag, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.tag(package@version, tag, callback)
+

DESCRIPTION

Tags the specified version of the package with the specified tag, or the --tag config if not specified.

-

The 'package@version' is an array of strings, but only the first two elements are currently used.

-

The first element must be in the form package@version, where package is the package name and version is the version number (much like installing a specific version).

-

The second element is the name of the tag to tag this version with. If this parameter is missing or falsey (empty), the default froom the config will be used. For more information about how to set this config, check man 3 npm-config for programmatic usage or man npm-config for cli usage.

+
@@ -42,5 +36,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-test.html nodejs-0.11.15/deps/npm/html/doc/api/npm-test.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-test.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-test.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,20 +10,15 @@

npm-test

Test a package

- -

SYNOPSIS

- -
  npm.commands.test(packages, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
  npm.commands.test(packages, callback)
+

DESCRIPTION

This runs a package's "test" script, if one was provided.

-

To run tests as a condition of installation, set the npat config to true.

-

npm can run tests on multiple packages. Just specify multiple packages in the packages parameter.

+
@@ -35,5 +30,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-uninstall.html nodejs-0.11.15/deps/npm/html/doc/api/npm-uninstall.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-uninstall.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-uninstall.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,20 +10,15 @@

npm-uninstall

uninstall a package programmatically

- -

SYNOPSIS

- -
npm.commands.uninstall(packages, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.uninstall(packages, callback)
+

DESCRIPTION

This acts much the same ways as uninstalling on the command-line.

-

The 'packages' parameter is an array of strings. Each element in the array is the name of a package to be uninstalled.

-

Finally, 'callback' is a function that will be called when all packages have been uninstalled or when an error has been encountered.

+
@@ -35,5 +30,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-unpublish.html nodejs-0.11.15/deps/npm/html/doc/api/npm-unpublish.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-unpublish.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-unpublish.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,24 +10,18 @@

npm-unpublish

Remove a package from the registry

- -

SYNOPSIS

- -
npm.commands.unpublish(package, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.unpublish(package, callback)
+

DESCRIPTION

This removes a package version from the registry, deleting its entry and removing the tarball.

-

The package parameter must be defined.

-

Only the first element in the package parameter is used. If there is no first element, then npm assumes that the package at the current working directory is what is meant.

-

If no version is specified, or if all versions are removed then the root package entry is removed from the registry entirely.

+
@@ -39,5 +33,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-update.html nodejs-0.11.15/deps/npm/html/doc/api/npm-update.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-update.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-update.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,16 +10,12 @@

npm-update

Update a package

- -

SYNOPSIS

- -
npm.commands.update(packages, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.update(packages, callback)
+

DESCRIPTION

Updates a package, upgrading it to the latest version. It also installs any missing packages.

-

The 'packages' argument is an array of packages to update. The 'callback' parameter will be called when done or when an error occurs.

+
@@ -31,5 +27,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-version.html nodejs-0.11.15/deps/npm/html/doc/api/npm-version.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-version.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-version.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,22 +10,17 @@

npm-version

Bump a package version

- -

SYNOPSIS

- -
npm.commands.version(newversion, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.version(newversion, callback)
+

DESCRIPTION

Run this in a package directory to bump the version and write the new data back to the package.json file.

-

If run in a git repo, it will also create a version commit and tag, and fail if the repo is not clean.

-

Like all other commands, this function takes a string array as its first parameter. The difference, however, is this function will fail if it does not have exactly one element. The only element should be a version number.

+
@@ -37,5 +32,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-view.html nodejs-0.11.15/deps/npm/html/doc/api/npm-view.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-view.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-view.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,97 +10,66 @@

npm-view

View registry info

- -

SYNOPSIS

- -
npm.commands.view(args, [silent,] callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.view(args, [silent,] callback)
+

DESCRIPTION

This command shows data about a package and prints it to the stream referenced by the outfd config, which defaults to stdout.

-

The "args" parameter is an ordered list that closely resembles the command-line usage. The elements should be ordered such that the first element is the package and version (package@version). The version is optional. After that, the rest of the parameters are fields with optional subfields ("field.subfield") which can be used to get only the information desired from the registry.

-

The callback will be passed all of the data returned by the query.

-

For example, to get the package registry entry for the connect package, you can do this:

- -
npm.commands.view(["connect"], callback)
- -

If no version is specified, "latest" is assumed.

- +
npm.commands.view(["connect"], callback)
+

If no version is specified, "latest" is assumed.

Field names can be specified after the package descriptor. For example, to show the dependencies of the ronn package at version 0.3.5, you could do the following:

- -
npm.commands.view(["ronn@0.3.5", "dependencies"], callback)
- -

You can view child field by separating them with a period. +

npm.commands.view(["ronn@0.3.5", "dependencies"], callback)
+

You can view child field by separating them with a period. To view the git repository URL for the latest version of npm, you could do this:

- -
npm.commands.view(["npm", "repository.url"], callback)
- -

For fields that are arrays, requesting a non-numeric field will return +

npm.commands.view(["npm", "repository.url"], callback)
+

For fields that are arrays, requesting a non-numeric field will return all of the values from the objects in the list. For example, to get all the contributor names for the "express" project, you can do this:

- -
npm.commands.view(["express", "contributors.email"], callback)
- -

You may also use numeric indices in square braces to specifically select +

npm.commands.view(["express", "contributors.email"], callback)
+

You may also use numeric indices in square braces to specifically select an item in an array field. To just get the email address of the first contributor in the list, you can do this:

- -
npm.commands.view(["express", "contributors[0].email"], callback)
- -

Multiple fields may be specified, and will be printed one after another. +

npm.commands.view(["express", "contributors[0].email"], callback)
+

Multiple fields may be specified, and will be printed one after another. For exampls, to get all the contributor names and email addresses, you can do this:

- -
npm.commands.view(["express", "contributors.name", "contributors.email"], callback)
- -

"Person" fields are shown as a string if they would be shown as an +

npm.commands.view(["express", "contributors.name", "contributors.email"], callback)
+

"Person" fields are shown as a string if they would be shown as an object. So, for example, this will show the list of npm contributors in the shortened string format. (See npm help json for more on this.)

- -
npm.commands.view(["npm", "contributors"], callback)
- -

If a version range is provided, then data will be printed for every +

npm.commands.view(["npm", "contributors"], callback)
+

If a version range is provided, then data will be printed for every matching version of the package. This will show which version of jsdom was required by each matching version of yui3:

- -
npm.commands.view(["yui3@'>0.5.4'", "dependencies.jsdom"], callback)
- -

OUTPUT

- +
npm.commands.view(["yui3@'>0.5.4'", "dependencies.jsdom"], callback)
+

OUTPUT

If only a single string field for a single version is output, then it will not be colorized or quoted, so as to enable piping the output to another command.

-

If the version range matches multiple versions, than each printed value will be prefixed with the version it applies to.

-

If multiple fields are requested, than each of them are prefixed with the field name.

-

Console output can be disabled by setting the 'silent' parameter to true.

- -

RETURN VALUE

- +

RETURN VALUE

The data returned will be an object in this formation:

-
{ <version>:
   { <field>: <value>
   , ... }
-, ... }
+, ... } +

corresponding to the list of fields selected.

-

corresponding to the list of fields selected.

@@ -112,5 +81,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/api/npm-whoami.html nodejs-0.11.15/deps/npm/html/doc/api/npm-whoami.html --- nodejs-0.11.13/deps/npm/html/doc/api/npm-whoami.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/api/npm-whoami.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,19 +10,14 @@

npm-whoami

Display npm username

- -

SYNOPSIS

- -
npm.commands.whoami(args, callback)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm.commands.whoami(args, callback)
+

DESCRIPTION

Print the username config to standard output.

-

'args' is never used and callback is never called with data. 'args' must be present or things will break.

-

This function is not useful programmatically

+
@@ -34,5 +29,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-adduser.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-adduser.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-adduser.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-adduser.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,37 +10,52 @@

npm-adduser

Add a registry user account

- -

SYNOPSIS

- -
npm adduser
- -

DESCRIPTION

- -

Create or verify a user named <username> in the npm registry, and -save the credentials to the .npmrc file.

- +

SYNOPSIS

+
npm adduser [--registry=url] [--scope=@orgname] [--always-auth]
+

DESCRIPTION

+

Create or verify a user named <username> in the specified registry, and +save the credentials to the .npmrc file. If no registry is specified, +the default registry will be used (see npm-config(7)).

The username, password, and email are read in from prompts.

-

You may use this command to change your email address, but not username or password.

- -

To reset your password, go to https://npmjs.org/forgot

- +

To reset your password, go to https://www.npmjs.org/forgot

You may use this command multiple times with the same user account to authorize on a new machine.

- -

CONFIGURATION

- +

npm login is an alias to adduser and behaves exactly the same way.

+

CONFIGURATION

registry

+

Default: http://registry.npmjs.org/

+

The base URL of the npm package registry. If scope is also specified, +this registry will only be used for packages with that scope. See npm-scope(7).

+

scope

+

Default: none

+

If specified, the user and login credentials given will be associated +with the specified scope. See npm-scope(7). You can use both at the same time, +e.g.

+
npm adduser --registry=http://myregistry.example.com --scope=@myco
+

This will set a registry for the given scope and login or create a user for +that registry at the same time.

+

always-auth

+

Default: false

+

If specified, save configuration indicating that all requests to the given +registry should include authorization information. Useful for private +registries. Can be used with --registry and / or --scope, e.g.

+
npm adduser --registry=http://private-registry.example.com --always-auth
+

This will ensure that all requests to that registry (including for tarballs) +include an authorization header. See always-auth in npm-config(7) for more +details on always-auth. Registry-specific configuaration of always-auth takes +precedence over any global configuration.

+

SEE ALSO

+ -

Default: http://registry.npmjs.org/

- -

The base URL of the npm package registry.

- -

SEE ALSO

- -
@@ -52,5 +67,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-bin.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-bin.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-bin.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-bin.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,18 +10,20 @@

npm-bin

Display npm bin folder

- -

SYNOPSIS

- -
npm bin
- -

DESCRIPTION

- +

SYNOPSIS

+
npm bin
+

DESCRIPTION

Print the folder where npm will install executables.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -33,5 +35,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-bugs.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-bugs.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-bugs.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-bugs.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,36 +10,39 @@

npm-bugs

Bugs for a package in a web browser maybe

- -

SYNOPSIS

- +

SYNOPSIS

npm bugs <pkgname>
-npm bugs (with no args in a package dir)
- -

DESCRIPTION

- +npm bugs (with no args in a package dir) +

DESCRIPTION

This command tries to guess at the likely location of a package's bug tracker URL, and then tries to open it using the --browser config param. If no package name is provided, it will search for a package.json in the current folder and use the name property.

- -

CONFIGURATION

- +

CONFIGURATION

browser

- -
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • Type: String
- +
    +
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • +
  • Type: String
  • +

The browser that is called by the npm bugs command to open websites.

-

registry

- -
  • Default: https://registry.npmjs.org/
  • Type: url
- +

The base URL of the npm package registry.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -51,5 +54,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-build.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-build.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-build.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-build.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,23 +10,23 @@

npm-build

Build a package

- -

SYNOPSIS

- -
npm build <package-folder>
- -
  • <package-folder>: -A folder containing a package.json file in its root.
- -

DESCRIPTION

- +

SYNOPSIS

+
npm build <package-folder>
+
    +
  • <package-folder>: +A folder containing a package.json file in its root.
  • +
+

DESCRIPTION

This is the plumbing command called by npm link and npm install.

-

It should generally not be called directly.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -38,5 +38,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-bundle.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-bundle.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-bundle.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-bundle.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,18 +10,16 @@

npm-bundle

REMOVED

- -

DESCRIPTION

- +

DESCRIPTION

The npm bundle command has been removed in 1.0, for the simple reason that it is no longer necessary, as the default behavior is now to install packages into the local space.

-

Just use npm install now to do what npm bundle used to do.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -33,5 +31,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-cache.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-cache.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-cache.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-cache.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,9 +10,7 @@

npm-cache

Manipulates packages cache

- -

SYNOPSIS

- +

SYNOPSIS

npm cache add <tarball file>
 npm cache add <folder>
 npm cache add <tarball url>
@@ -20,54 +18,58 @@
 
 npm cache ls [<path>]
 
-npm cache clean [<path>]
- -

DESCRIPTION

- +npm cache clean [<path>] +

DESCRIPTION

Used to add, list, or clear the npm cache folder.

- -
  • add: +

      +
    • add: Add the specified package to the local cache. This command is primarily intended to be used internally by npm, but it can provide a way to -add data to the local installation cache explicitly.

    • ls: +add data to the local installation cache explicitly.

      +
    • +
    • ls: Show the data in the cache. Argument is a path to show in the cache folder. Works a bit like the find program, but limited by the -depth config.

    • clean: +depth config.

      +
    • +
    • clean: Delete data out of the cache folder. If an argument is provided, then it specifies a subpath to delete. If no argument is provided, then -the entire cache is cleared.

    - -

    DETAILS

    - +the entire cache is cleared.

    +
  • +
+

DETAILS

npm stores cache data in the directory specified in npm config get cache. For each package that is added to the cache, three pieces of information are stored in {cache}/{name}/{version}:

- -
  • .../package/: -A folder containing the package contents as they appear in the tarball.
  • .../package.json: -The package.json file, as npm sees it, with overlays applied and a _id attribute.
  • .../package.tgz: -The tarball for that version.
- +
    +
  • .../package/package.json: +The package.json file, as npm sees it.
  • +
  • .../package.tgz: +The tarball for that version.
  • +

Additionally, whenever a registry request is made, a .cache.json file is placed at the corresponding URI, to store the ETag and the requested -data.

- +data. This is stored in {cache}/{hostname}/{path}/.cache.json.

Commands that make non-essential registry requests (such as search and view, or the completion scripts) generally specify a minimum timeout. If the .cache.json file is younger than the specified timeout, then they do not make an HTTP request to the registry.

- -

CONFIGURATION

- +

CONFIGURATION

cache

-

Default: ~/.npm on Posix, or %AppData%/npm-cache on Windows.

-

The root cache folder.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -79,5 +81,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-completion.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-completion.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-completion.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-completion.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,31 +10,27 @@

npm-completion

Tab Completion for npm

- -

SYNOPSIS

- -
. <(npm completion)
- -

DESCRIPTION

- +

SYNOPSIS

+
. <(npm completion)
+

DESCRIPTION

Enables tab-completion in all npm commands.

-

The synopsis above loads the completions into your current shell. Adding it to your ~/.bashrc or ~/.zshrc will make the completions available everywhere.

-

You may of course also pipe the output of npm completion to a file such as /usr/local/etc/bash_completion.d/npm if you have a system that will read that file for you.

-

When COMP_CWORD, COMP_LINE, and COMP_POINT are defined in the environment, npm completion acts in "plumbing mode", and outputs completions based on the arguments.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -46,5 +42,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-config.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-config.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-config.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-config.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,9 +10,7 @@

npm-config

Manage the npm configuration files

- -

SYNOPSIS

- +

SYNOPSIS

npm config set <key> <value> [--global]
 npm config get <key>
 npm config delete <key>
@@ -20,61 +18,43 @@
 npm config edit
 npm c [set|get|delete|list]
 npm get <key>
-npm set <key> <value> [--global]
- -

DESCRIPTION

- +npm set <key> <value> [--global] +

DESCRIPTION

npm gets its config settings from the command line, environment variables, npmrc files, and in some cases, the package.json file.

- -

See npmrc(5) for more information about the npmrc files.

- -

See npm-config(7) for a more thorough discussion of the mechanisms +

See npmrc(5) for more information about the npmrc files.

+

See npm-config(7) for a more thorough discussion of the mechanisms involved.

-

The npm config command can be used to update and edit the contents of the user and global npmrc files.

- -

Sub-commands

- +

Sub-commands

Config supports the following sub-commands:

-

set

- -
npm config set key value
- -

Sets the config key to the value.

- +
npm config set key value
+

Sets the config key to the value.

If value is omitted, then it sets it to "true".

-

get

- -
npm config get key
- -

Echo the config value to stdout.

- +
npm config get key
+

Echo the config value to stdout.

list

- -
npm config list
- -

Show all the config settings.

- +
npm config list
+

Show all the config settings.

delete

- -
npm config delete key
- -

Deletes the key from all configuration files.

- +
npm config delete key
+

Deletes the key from all configuration files.

edit

- -
npm config edit
- -

Opens the config file in an editor. Use the --global flag to edit the +

npm config edit
+

Opens the config file in an editor. Use the --global flag to edit the global config.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -86,5 +66,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-dedupe.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-dedupe.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-dedupe.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-dedupe.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,60 +10,48 @@

npm-dedupe

Reduce duplication

- -

SYNOPSIS

- +

SYNOPSIS

npm dedupe [package names...]
-npm ddp [package names...]
- -

DESCRIPTION

- +npm ddp [package names...] +

DESCRIPTION

Searches the local package tree and attempts to simplify the overall structure by moving dependencies further up the tree, where they can be more effectively shared by multiple dependent packages.

-

For example, consider this dependency graph:

-
a
 +-- b <-- depends on c@1.0.x
 |   `-- c@1.0.3
 `-- d <-- depends on c@~1.0.9
-    `-- c@1.0.10
- -

In this case, npm-dedupe(1) will transform the tree to:

- + `-- c@1.0.10 +

In this case, npm-dedupe(1) will transform the tree to:

a
 +-- b
 +-- d
-`-- c@1.0.10
- -

Because of the hierarchical nature of node's module lookup, b and d +`-- c@1.0.10 +

Because of the hierarchical nature of node's module lookup, b and d will both get their dependency met by the single c package at the root level of the tree.

-

If a suitable version exists at the target location in the tree already, then it will be left untouched, but the other duplicates will be deleted.

-

If no suitable version can be found, then a warning is printed, and nothing is done.

-

If any arguments are supplied, then they are filters, and only the named packages will be touched.

-

Note that this operation transforms the dependency tree, and may result in packages getting updated versions, perhaps from the npm registry.

-

This feature is experimental, and may change in future versions.

-

The --tag argument will apply to all of the affected dependencies. If a tag with the given name exists, the tagged version is preferred over newer versions.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -75,5 +63,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-deprecate.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-deprecate.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-deprecate.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-deprecate.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,29 +10,23 @@

npm-deprecate

Deprecate a version of a package

- -

SYNOPSIS

- -
npm deprecate <name>[@<version>] <message>
- -

DESCRIPTION

- +

SYNOPSIS

+
npm deprecate <name>[@<version>] <message>
+

DESCRIPTION

This command will update the npm registry entry for a package, providing a deprecation warning to all who attempt to install it.

-

It works on version ranges as well as specific versions, so you can do something like this:

- -
npm deprecate my-thing@"< 0.2.3" "critical bug fixed in v0.2.3"
- -

Note that you must be the package owner to deprecate something. See the +

npm deprecate my-thing@"< 0.2.3" "critical bug fixed in v0.2.3"
+

Note that you must be the package owner to deprecate something. See the owner and adduser help topics.

-

To un-deprecate a package, specify an empty string ("") for the message argument.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -44,5 +38,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-docs.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-docs.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-docs.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-docs.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,39 +10,41 @@

npm-docs

Docs for a package in a web browser maybe

- -

SYNOPSIS

- +

SYNOPSIS

npm docs [<pkgname> [<pkgname> ...]]
 npm docs (with no args in a package dir)
 npm home [<pkgname> [<pkgname> ...]]
-npm home (with no args in a package dir)
- -

DESCRIPTION

- +npm home (with no args in a package dir) +

DESCRIPTION

This command tries to guess at the likely location of a package's documentation URL, and then tries to open it using the --browser config param. You can pass multiple package names at once. If no package name is provided, it will search for a package.json in the current folder and use the name property.

- -

CONFIGURATION

- +

CONFIGURATION

browser

- -
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • Type: String
- +
    +
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • +
  • Type: String
  • +

The browser that is called by the npm docs command to open websites.

-

registry

- -
  • Default: https://registry.npmjs.org/
  • Type: url
- +

The base URL of the npm package registry.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -54,5 +56,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-edit.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-edit.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-edit.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-edit.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,35 +10,34 @@

npm-edit

Edit an installed package

- -

SYNOPSIS

- -
npm edit <name>[@<version>]
- -

DESCRIPTION

- +

SYNOPSIS

+
npm edit <name>[@<version>]
+

DESCRIPTION

Opens the package folder in the default editor (or whatever you've -configured as the npm editor config -- see npm-config(7).)

- +configured as the npm editor config -- see npm-config(7).)

After it has been edited, the package is rebuilt so as to pick up any changes in compiled packages.

-

For instance, you can do npm install connect to install connect into your package, and then npm edit connect to make a few changes to your locally installed copy.

- -

CONFIGURATION

- +

CONFIGURATION

editor

- -
  • Default: EDITOR environment variable if set, or "vi" on Posix, -or "notepad" on Windows.
  • Type: path
- +
    +
  • Default: EDITOR environment variable if set, or "vi" on Posix, +or "notepad" on Windows.
  • +
  • Type: path
  • +

The command to run for npm edit or npm config edit.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -50,5 +49,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-explore.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-explore.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-explore.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-explore.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,38 +10,34 @@

npm-explore

Browse an installed package

- -

SYNOPSIS

- -
npm explore <name>[@<version>] [ -- <cmd>]
- -

DESCRIPTION

- +

SYNOPSIS

+
npm explore <name> [ -- <cmd>]
+

DESCRIPTION

Spawn a subshell in the directory of the installed package specified.

-

If a command is specified, then it is run in the subshell, which then immediately terminates.

-

This is particularly handy in the case of git submodules in the node_modules folder:

- -
npm explore some-dependency -- git pull origin master
- -

Note that the package is not automatically rebuilt afterwards, so be +

npm explore some-dependency -- git pull origin master
+

Note that the package is not automatically rebuilt afterwards, so be sure to use npm rebuild <pkg> if you make any changes.

- -

CONFIGURATION

- +

CONFIGURATION

shell

- -
  • Default: SHELL environment variable, or "bash" on Posix, or "cmd" on -Windows
  • Type: path
- +
    +
  • Default: SHELL environment variable, or "bash" on Posix, or "cmd" on +Windows
  • +
  • Type: path
  • +

The shell to run for the npm explore command.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -53,5 +49,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-help.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-help.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-help.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-help.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,34 +10,37 @@

npm-help

Get help on npm

- -

SYNOPSIS

- +

SYNOPSIS

npm help <topic>
-npm help some search terms
- -

DESCRIPTION

- +npm help some search terms +

DESCRIPTION

If supplied a topic, then show the appropriate documentation page.

-

If the topic does not exist, or if multiple terms are provided, then run the help-search command to find a match. Note that, if help-search finds a single subject, then it will run help on that topic, so unique matches are equivalent to specifying a topic name.

- -

CONFIGURATION

- +

CONFIGURATION

viewer

- -
  • Default: "man" on Posix, "browser" on Windows
  • Type: path
- +
    +
  • Default: "man" on Posix, "browser" on Windows
  • +
  • Type: path
  • +

The program to use to view help content.

-

Set to "browser" to view html help content in the default web browser.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -49,5 +52,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-help-search.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-help-search.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-help-search.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-help-search.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,36 +10,31 @@

npm-help-search

Search npm help documentation

- -

SYNOPSIS

- -
npm help-search some search terms
- -

DESCRIPTION

- +

SYNOPSIS

+
npm help-search some search terms
+

DESCRIPTION

This command will search the npm markdown documentation files for the terms provided, and then list the results, sorted by relevance.

-

If only one result is found, then it will show that help topic.

-

If the argument to npm help is not a known help topic, then it will call help-search. It is rarely if ever necessary to call this command directly.

- -

CONFIGURATION

- +

CONFIGURATION

long

- -
  • Type: Boolean
  • Default false
- +
    +
  • Type: Boolean
  • +
  • Default false
  • +

If true, the "long" flag will cause help-search to output context around where the terms were found in the documentation.

-

If false, then help-search will just list out the help topics found.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -51,5 +46,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm.html nodejs-0.11.15/deps/npm/html/doc/cli/npm.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,129 +10,139 @@

npm

node package manager

- -

SYNOPSIS

- -
npm <command> [args]
- -

VERSION

- -

1.4.9

- -

DESCRIPTION

- +

SYNOPSIS

+
npm <command> [args]
+

VERSION

+

2.1.6

+

DESCRIPTION

npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency conflicts intelligently.

-

It is extremely configurable to support a wide variety of use cases. Most commonly, it is used to publish, discover, install, and develop node programs.

-

Run npm help to get a list of available commands.

- -

INTRODUCTION

- +

INTRODUCTION

You probably got npm because you want to install stuff.

-

Use npm install blerg to install the latest version of "blerg". Check out -npm-install(1) for more info. It can do a lot of stuff.

- +npm-install(1) for more info. It can do a lot of stuff.

Use the npm search command to show everything that's available. Use npm ls to show everything you've installed.

- -

DIRECTORIES

- -

See npm-folders(5) to learn about where npm puts stuff.

- +

DEPENDENCIES

+

If a package references to another package with a git URL, npm depends +on a preinstalled git.

+

If one of the packages npm tries to install is a native node module and +requires compiling of C++ Code, npm will use +node-gyp for that task. +For a Unix system, node-gyp +needs Python, make and a buildchain like GCC. On Windows, +Python and Microsoft Visual Studio C++ is needed. Python 3 is +not supported by node-gyp. +For more information visit +the node-gyp repository and +the node-gyp Wiki.

+

DIRECTORIES

+

See npm-folders(5) to learn about where npm puts stuff.

In particular, npm has two modes of operation:

- -
  • global mode:
    npm installs packages into the install prefix at -prefix/lib/node_modules and bins are installed in prefix/bin.
  • local mode:
    npm installs packages into the current project directory, which +
      +
    • global mode:
      npm installs packages into the install prefix at +prefix/lib/node_modules and bins are installed in prefix/bin.
    • +
    • local mode:
      npm installs packages into the current project directory, which defaults to the current working directory. Packages are installed to -./node_modules, and bins are installed to ./node_modules/.bin.
    - +./node_modules, and bins are installed to ./node_modules/.bin.
  • +

Local mode is the default. Use --global or -g on any command to operate in global mode instead.

- -

DEVELOPER USAGE

- +

DEVELOPER USAGE

If you're using npm to develop and publish your code, check out the following help topics:

- -
  • json: -Make a package.json file. See package.json(5).
  • link: +
      +
    • json: +Make a package.json file. See package.json(5).
    • +
    • link: For linking your current working code into Node's path, so that you don't have to reinstall every time you make a change. Use -npm link to do this.
    • install: +npm link to do this.
    • +
    • install: It's a good idea to install things if you don't need the symbolic link. Especially, installing other peoples code from the registry is done via -npm install
    • adduser: +npm install
    • +
    • adduser: Create an account or log in. Credentials are stored in the -user config file.
    • publish: -Use the npm publish command to upload your code to the registry.
    - -

    CONFIGURATION

    - +user config file.
  • +
  • publish: +Use the npm publish command to upload your code to the registry.
  • +
+

CONFIGURATION

npm is extremely configurable. It reads its configuration options from 5 places.

- -
  • Command line switches:
    Set a config with --key val. All keys take a value, even if they +
      +
    • Command line switches:
      Set a config with --key val. All keys take a value, even if they are booleans (the config parser doesn't know what the options are at the time of parsing.) If no value is provided, then the option is set -to boolean true.
    • Environment Variables:
      Set any config by prefixing the name in an environment variable with -npm_config_. For example, export npm_config_key=val.
    • User Configs:
      The file at $HOME/.npmrc is an ini-formatted list of configs. If +to boolean true.
    • +
    • Environment Variables:
      Set any config by prefixing the name in an environment variable with +npm_config_. For example, export npm_config_key=val.
    • +
    • User Configs:
      The file at $HOME/.npmrc is an ini-formatted list of configs. If present, it is parsed. If the userconfig option is set in the cli -or env, then that will be used instead.
    • Global Configs:
      The file found at ../etc/npmrc (from the node executable, by default +or env, then that will be used instead.
    • +
    • Global Configs:
      The file found at ../etc/npmrc (from the node executable, by default this resolves to /usr/local/etc/npmrc) will be parsed if it is found. If the globalconfig option is set in the cli, env, or user config, -then that file is parsed instead.
    • Defaults:
      npm's default configuration options are defined in -lib/utils/config-defs.js. These must not be changed.
    - -

    See npm-config(7) for much much more information.

    - -

    CONTRIBUTIONS

    - +then that file is parsed instead.
  • +
  • Defaults:
    npm's default configuration options are defined in +lib/utils/config-defs.js. These must not be changed.
  • +
+

See npm-config(7) for much much more information.

+

CONTRIBUTIONS

Patches welcome!

- -
  • code: -Read through npm-coding-style(7) if you plan to submit code. -You don't have to agree with it, but you do have to follow it.
  • docs: +
      +
    • code: +Read through npm-coding-style(7) if you plan to submit code. +You don't have to agree with it, but you do have to follow it.
    • +
    • docs: If you find an error in the documentation, edit the appropriate markdown -file in the "doc" folder. (Don't worry about generating the man page.)
    - +file in the "doc" folder. (Don't worry about generating the man page.)
  • +

Contributors are listed in npm's package.json file. You can view them easily by doing npm view npm contributors.

-

If you would like to contribute, but don't know what to work on, check the issues list or ask on the mailing list.

- - - -

BUGS

- + +

BUGS

When you find issues, please report them:

- - - +

Be sure to include all of the output from the npm command that didn't work as expected. The npm-debug.log file is also helpful to provide.

-

You can also look for isaacs in #node.js on irc://irc.freenode.net. He will no doubt tell you to put the output in a gist or email.

- -

AUTHOR

- +

AUTHOR

Isaac Z. Schlueter :: isaacs :: @izs :: -i@izs.me

- -

SEE ALSO

+i@izs.me

+

SEE ALSO

+ -
@@ -144,5 +154,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-init.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-init.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-init.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-init.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,27 +10,25 @@

npm-init

Interactively create a package.json file

- -

SYNOPSIS

- -
npm init
- -

DESCRIPTION

- +

SYNOPSIS

+
npm init [-f|--force|-y|--yes]
+

DESCRIPTION

This will ask you a bunch of questions, and then write a package.json for you.

-

It attempts to make reasonable guesses about what you want things to be set to, and then writes a package.json file with the options you've selected.

-

If you already have a package.json file, it'll read that first, and default to the options in there.

-

It is strictly additive, so it does not delete options from your package.json without a really good reason to do so.

+

If you invoke it with -f, --force, -y, or --yes, it will use only +defaults and not prompt you for any options.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -42,5 +40,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-install.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-install.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-install.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-install.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,103 +10,172 @@

npm-install

Install a package

- -

SYNOPSIS

- +

SYNOPSIS

npm install (with no args in a package dir)
 npm install <tarball file>
 npm install <tarball url>
 npm install <folder>
-npm install <name> [--save|--save-dev|--save-optional] [--save-exact]
-npm install <name>@<tag>
-npm install <name>@<version>
-npm install <name>@<version range>
-npm i (with any of the previous argument usage)
- -

DESCRIPTION

- +npm install [@<scope>/]<name> [--save|--save-dev|--save-optional] [--save-exact] +npm install [@<scope>/]<name>@<tag> +npm install [@<scope>/]<name>@<version> +npm install [@<scope>/]<name>@<version range> +npm i (with any of the previous argument usage) +

DESCRIPTION

This command installs a package, and any packages that it depends on. If the package has a shrinkwrap file, the installation of dependencies will be driven -by that. See npm-shrinkwrap(1).

- +by that. See npm-shrinkwrap(1).

A package is:

- -
  • a) a folder containing a program described by a package.json file
  • b) a gzipped tarball containing (a)
  • c) a url that resolves to (b)
  • d) a <name>@<version> that is published on the registry (see npm-registry(7)) with (c)
  • e) a <name>@<tag> that points to (d)
  • f) a <name> that has a "latest" tag satisfying (e)
  • g) a <git remote url> that resolves to (b)
- +
    +
  • a) a folder containing a program described by a package.json file
  • +
  • b) a gzipped tarball containing (a)
  • +
  • c) a url that resolves to (b)
  • +
  • d) a <name>@<version> that is published on the registry (see npm-registry(7)) with (c)
  • +
  • e) a <name>@<tag> that points to (d)
  • +
  • f) a <name> that has a "latest" tag satisfying (e)
  • +
  • g) a <git remote url> that resolves to (b)
  • +

Even if you never publish your package, you can still get a lot of benefits of using npm if you just want to write a node program (a), and perhaps if you also want to be able to easily install it elsewhere after packing it up into a tarball (b).

- -
  • npm install (in package directory, no arguments):

    Install the dependencies in the local node_modules folder.

    In global mode (ie, with -g or --global appended to the command), -it installs the current package context (ie, the current working -directory) as a global package.

    By default, npm install will install all modules listed as -dependencies. With the --production flag, -npm will not install modules listed in devDependencies.

  • npm install <folder>:

    Install a package that is sitting in a folder on the filesystem.

  • npm install <tarball file>:

    Install a package that is sitting on the filesystem. Note: if you just want -to link a dev directory into your npm root, you can do this more easily by -using npm link.

    Example:

      npm install ./package.tgz
  • npm install <tarball url>:

    Fetch the tarball url, and then install it. In order to distinguish between -this and other options, the argument must start with "http://" or "https://"

    Example:

      npm install https://github.com/indexzero/forever/tarball/v0.5.6
  • npm install <name> [--save|--save-dev|--save-optional]:

    Do a <name>@<tag> install, where <tag> is the "tag" config. (See -npm-config(7).)

    In most cases, this will install the latest version -of the module published on npm.

    Example:

    npm install sax

    npm install takes 3 exclusive, optional flags which save or update -the package version in your main package.json:

    • --save: Package will appear in your dependencies.

    • --save-dev: Package will appear in your devDependencies.

    • --save-optional: Package will appear in your optionalDependencies.

      When using any of the above options to save dependencies to your -package.json, there is an additional, optional flag:

    • --save-exact: Saved dependencies will be configured with an +

        +
      • npm install (in package directory, no arguments):

        +

        Install the dependencies in the local node_modules folder.

        +

        In global mode (ie, with -g or --global appended to the command), + it installs the current package context (ie, the current working + directory) as a global package.

        +

        By default, npm install will install all modules listed as + dependencies. With the --production flag, + npm will not install modules listed in devDependencies.

        +
      • +
      • npm install <folder>:

        +

        Install a package that is sitting in a folder on the filesystem.

        +
      • +
      • npm install <tarball file>:

        +

        Install a package that is sitting on the filesystem. Note: if you just want + to link a dev directory into your npm root, you can do this more easily by + using npm link.

        +

        Example:

        +
            npm install ./package.tgz
        +
      • +
      • npm install <tarball url>:

        +

        Fetch the tarball url, and then install it. In order to distinguish between + this and other options, the argument must start with "http://" or "https://"

        +

        Example:

        +
            npm install https://github.com/indexzero/forever/tarball/v0.5.6
        +
      • +
      • npm install [@<scope>/]<name> [--save|--save-dev|--save-optional]:

        +

        Do a <name>@<tag> install, where <tag> is the "tag" config. (See + npm-config(7).)

        +

        In most cases, this will install the latest version + of the module published on npm.

        +

        Example:

        +
            npm install sax
        +

        npm install takes 3 exclusive, optional flags which save or update + the package version in your main package.json:

        +
          +
        • --save: Package will appear in your dependencies.

          +
        • +
        • --save-dev: Package will appear in your devDependencies.

          +
        • +
        • --save-optional: Package will appear in your optionalDependencies.

          +

          When using any of the above options to save dependencies to your +package.json, there is an additional, optional flag:

          +
        • +
        • --save-exact: Saved dependencies will be configured with an exact version rather than using npm's default semver range -operator.

          Examples:

          npm install sax --save - npm install node-tap --save-dev - npm install dtrace-provider --save-optional - npm install readable-stream --save --save-exact

          Note: If there is a file or folder named <name> in the current +operator.

          +

          <scope> is optional. The package will be downloaded from the registry +associated with the specified scope. If no registry is associated with +the given scope the default registry is assumed. See npm-scope(7).

          +

          Note: if you do not include the @-symbol on your scope name, npm will +interpret this as a GitHub repository instead, see below. Scopes names +must also be followed by a slash.

          +

          Examples:

          +
          npm install sax --save
          +npm install githubname/reponame
          +npm install @myorg/privatepackage
          +npm install node-tap --save-dev
          +npm install dtrace-provider --save-optional
          +npm install readable-stream --save --save-exact
          +
        • +
        +
      • +
      +
      **Note**: If there is a file or folder named `<name>` in the current
       working directory, then it will try to install that, and only try to
      -fetch the package by name if it is not valid.

  • npm install <name>@<tag>:

    Install the version of the package that is referenced by the specified tag. -If the tag does not exist in the registry data for that package, then this -will fail.

    Example:

      npm install sax@latest
  • npm install <name>@<version>:

    Install the specified version of the package. This will fail if the version -has not been published to the registry.

    Example:

      npm install sax@0.1.1
  • npm install <name>@<version range>:

    Install a version of the package matching the specified version range. This -will follow the same rules for resolving dependencies described in package.json(5).

    Note that most version ranges must be put in quotes so that your shell will -treat it as a single argument.

    Example:

    npm install sax@">=0.1.0 <0.2.0"

  • npm install <git remote url>:

    Install a package by cloning a git remote url. The format of the git -url is:

    <protocol>://[<user>@]<hostname><separator><path>[#<commit-ish>]

    <protocol> is one of git, git+ssh, git+http, or -git+https. If no <commit-ish> is specified, then master is -used.

    Examples:

      git+ssh://git@github.com:npm/npm.git#v1.0.27
    -  git+https://isaacs@github.com/npm/npm.git
    -  git://github.com/npm/npm.git#v1.0.27
- +fetch the package by name if it is not valid. +
    +
  • npm install [@<scope>/]<name>@<tag>:

    +

    Install the version of the package that is referenced by the specified tag. + If the tag does not exist in the registry data for that package, then this + will fail.

    +

    Example:

    +
        npm install sax@latest
    +    npm install @myorg/mypackage@latest
    +
  • +
  • npm install [@<scope>/]<name>@<version>:

    +

    Install the specified version of the package. This will fail if the + version has not been published to the registry.

    +

    Example:

    +
        npm install sax@0.1.1
    +    npm install @myorg/privatepackage@1.5.0
    +
  • +
  • npm install [@<scope>/]<name>@<version range>:

    +

    Install a version of the package matching the specified version range. This + will follow the same rules for resolving dependencies described in package.json(5).

    +

    Note that most version ranges must be put in quotes so that your shell will + treat it as a single argument.

    +

    Example:

    +
        npm install sax@">=0.1.0 <0.2.0"
    +    npm install @myorg/privatepackage@">=0.1.0 <0.2.0"
    +
  • +
  • npm install <githubname>/<githubrepo>:

    +

    Install the package at https://github.com/githubname/githubrepo" by + attempting to clone it usinggit`.

    +

    Example:

    +
        npm install mygithubuser/myproject
    +

    To reference a package in a git repo that is not on GitHub, see git + remote urls below.

    +
  • +
  • npm install <git remote url>:

    +

    Install a package by cloning a git remote url. The format of the git + url is:

    +
        <protocol>://[<user>@]<hostname><separator><path>[#<commit-ish>]
    +

    <protocol> is one of git, git+ssh, git+http, or + git+https. If no <commit-ish> is specified, then master is + used.

    +

    Examples:

    +
        git+ssh://git@github.com:npm/npm.git#v1.0.27
    +    git+https://isaacs@github.com/npm/npm.git
    +    git://github.com/npm/npm.git#v1.0.27
    +
  • +

You may combine multiple arguments, and even multiple types of arguments. For example:

- -
npm install sax@">=0.1.0 <0.2.0" bench supervisor
- -

The --tag argument will apply to all of the specified install targets. If a +

npm install sax@">=0.1.0 <0.2.0" bench supervisor
+

The --tag argument will apply to all of the specified install targets. If a tag with the given name exists, the tagged version is preferred over newer versions.

-

The --force argument will force npm to fetch remote resources even if a local copy exists on disk.

- -
npm install sax --force
- -

The --global argument will cause npm to install the package globally -rather than locally. See npm-folders(5).

- +
npm install sax --force
+

The --global argument will cause npm to install the package globally +rather than locally. See npm-folders(5).

The --link argument will cause npm to link global installs into the local space in some cases.

-

The --no-bin-links argument will prevent npm from creating symlinks for any binaries the package might contain.

-

The --no-optional argument will prevent optional dependencies from being installed.

-

The --no-shrinkwrap argument, which will ignore an available shrinkwrap file and use the package.json instead.

-

The --nodedir=/path/to/node/source argument will allow npm to find the node source code so that npm can compile native modules.

- -

See npm-config(7). Many of the configuration params have some +

See npm-config(7). Many of the configuration params have some effect on installation, since that's most of what npm does.

- -

ALGORITHM

- +

ALGORITHM

To install a package, npm uses the following algorithm:

-
install(where, what, family, ancestors)
 fetch what, unpack to <where>/node_modules/<what>
 for each dep in what.dependencies
@@ -115,46 +184,50 @@
     not in <where>/node_modules/<what>/node_modules/*
     and not in <family>
   add precise version deps to <family>
-  install(<where>/node_modules/<what>, dep, family)
- -

For this package{dep} structure: A{B,C}, B{C}, C{D}, + install(<where>/node_modules/<what>, dep, family) +

For this package{dep} structure: A{B,C}, B{C}, C{D}, this algorithm produces:

-
A
 +-- B
 `-- C
-    `-- D
- -

That is, the dependency from B to C is satisfied by the fact that A + `-- D +

That is, the dependency from B to C is satisfied by the fact that A already caused C to be installed at a higher level.

- -

See npm-folders(5) for a more detailed description of the specific +

See npm-folders(5) for a more detailed description of the specific folder structures that npm creates.

- -

Limitations of npm's Install Algorithm

- +

Limitations of npm's Install Algorithm

There are some very rare and pathological edge-cases where a cycle can cause npm to try to install a never-ending tree of packages. Here is the simplest case:

- -
A -> B -> A' -> B' -> A -> B -> A' -> B' -> A -> ...
- -

where A is some version of a package, and A' is a different version +

A -> B -> A' -> B' -> A -> B -> A' -> B' -> A -> ...
+

where A is some version of a package, and A' is a different version of the same package. Because B depends on a different version of A than the one that is already in the tree, it must install a separate copy. The same is true of A', which must install B'. Because B' depends on the original version of A, which has been overridden, the cycle falls into infinite regress.

-

To avoid this situation, npm flat-out refuses to install any name@version that is already present anywhere in the tree of package folder ancestors. A more correct, but more complex, solution would be to symlink the existing version into the new location. If this ever affects a real use-case, it will be investigated.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -166,5 +239,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-link.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-link.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-link.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-link.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,60 +10,56 @@

npm-link

Symlink a package folder

- -

SYNOPSIS

- +

SYNOPSIS

npm link (in package folder)
-npm link <pkgname>
-npm ln (with any of the previous argument usage)
- -

DESCRIPTION

- +npm link [@<scope>/]<pkgname> +npm ln (with any of the previous argument usage) +

DESCRIPTION

Package linking is a two-step process.

-

First, npm link in a package folder will create a globally-installed -symbolic link from prefix/package-name to the current folder.

- +symbolic link from prefix/package-name to the current folder (see +npm-config(7) for the value of prefix).

Next, in some other location, npm link package-name will create a symlink from the local node_modules folder to the global symlink.

-

Note that package-name is taken from package.json, not from directory name.

- +

The package name can be optionally prefixed with a scope. See npm-scope(7). +The scope must by preceded by an @-symbol and followed by a slash.

When creating tarballs for npm publish, the linked packages are "snapshotted" to their current state by resolving the symbolic links.

- -

This is -handy for installing your own stuff, so that you can work on it and test it -iteratively without having to continually rebuild.

- +

This is handy for installing your own stuff, so that you can work on it and +test it iteratively without having to continually rebuild.

For example:

-
cd ~/projects/node-redis    # go into the package directory
 npm link                    # creates global link
 cd ~/projects/node-bloggy   # go into some other package directory.
-npm link redis              # link-install the package
- -

Now, any changes to ~/projects/node-redis will be reflected in +npm link redis # link-install the package +

Now, any changes to ~/projects/node-redis will be reflected in ~/projects/node-bloggy/node_modules/redis/

-

You may also shortcut the two steps in one. For example, to do the above use-case in a shorter way:

-
cd ~/projects/node-bloggy  # go into the dir of your main project
-npm link ../node-redis     # link the dir of your dependency
- -

The second line is the equivalent of doing:

- +npm link ../node-redis # link the dir of your dependency +

The second line is the equivalent of doing:

(cd ../node-redis; npm link)
-npm link redis
- -

That is, it first creates a global link, and then links the global +npm link redis +

That is, it first creates a global link, and then links the global installation target into your project's node_modules folder.

+

If your linked package is scoped (see npm-scope(7)) your link command must +include that scope, e.g.

+
npm link @myorg/privatepackage
+

SEE ALSO

+ -

SEE ALSO

- -
@@ -75,5 +71,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-ls.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-ls.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-ls.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-ls.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,72 +10,70 @@

npm-ls

List installed packages

- -

SYNOPSIS

- -
npm list [<pkg> ...]
-npm ls [<pkg> ...]
-npm la [<pkg> ...]
-npm ll [<pkg> ...]
- -

DESCRIPTION

- +

SYNOPSIS

+
npm list [[@<scope>/]<pkg> ...]
+npm ls [[@<scope>/]<pkg> ...]
+npm la [[@<scope>/]<pkg> ...]
+npm ll [[@<scope>/]<pkg> ...]
+

DESCRIPTION

This command will print to stdout all the versions of packages that are installed, as well as their dependencies, in a tree-structure.

-

Positional arguments are name@version-range identifiers, which will limit the results to only the paths to the packages named. Note that nested packages will also show the paths to the specified packages. For example, running npm ls promzard in npm's source tree will show:

- -
npm@1.4.9 /path/to/npm
+
npm@2.1.6 /path/to/npm
 └─┬ init-package-json@0.0.4
-  └── promzard@0.1.5
- -

It will print out extraneous, missing, and invalid packages.

- + └── promzard@0.1.5 +

It will print out extraneous, missing, and invalid packages.

If a project specifies git urls for dependencies these are shown in parentheses after the name@version to make it easier for users to recognize potential forks of a project.

-

When run as ll or la, it shows extended information by default.

- -

CONFIGURATION

- +

CONFIGURATION

json

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Show information in JSON format.

-

long

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Show extended information.

-

parseable

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Show parseable output instead of tree view.

-

global

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

List packages in the global install prefix instead of in the current project.

-

depth

- -
  • Type: Int
- +
    +
  • Type: Int
  • +

Max display depth of the dependency tree.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -87,5 +85,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-outdated.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-outdated.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-outdated.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-outdated.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,56 +10,52 @@

npm-outdated

Check for outdated packages

- -

SYNOPSIS

- -
npm outdated [<name> [<name> ...]]
- -

DESCRIPTION

- +

SYNOPSIS

+
npm outdated [<name> [<name> ...]]
+

DESCRIPTION

This command will check the registry to see if any (or, specific) installed packages are currently outdated.

-

The resulting field 'wanted' shows the latest version according to the version specified in the package.json, the field 'latest' the very latest version of the package.

- -

CONFIGURATION

- +

CONFIGURATION

json

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Show information in JSON format.

-

long

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Show extended information.

-

parseable

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Show parseable output instead of tree view.

-

global

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Check packages in the global install prefix instead of in the current project.

-

depth

- -
  • Type: Int
- +
    +
  • Type: Int
  • +

Max depth for checking dependency tree.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -71,5 +67,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-owner.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-owner.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-owner.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-owner.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,32 +10,34 @@

npm-owner

Manage package owners

- -

SYNOPSIS

- +

SYNOPSIS

npm owner ls <package name>
 npm owner add <user> <package name>
-npm owner rm <user> <package name>
- -

DESCRIPTION

- +npm owner rm <user> <package name> +

DESCRIPTION

Manage ownership of published packages.

- -
  • ls: +
      +
    • ls: List all the users who have access to modify a package and push new versions. -Handy when you need to know who to bug for help.
    • add: +Handy when you need to know who to bug for help.
    • +
    • add: Add a new user as a maintainer of a package. This user is enabled to modify -metadata, publish new versions, and add other owners.
    • rm: +metadata, publish new versions, and add other owners.
    • +
    • rm: Remove a user from the package owner list. This immediately revokes their -privileges.
    - +privileges.
  • +

Note that there is only one level of access. Either you can modify a package, or you can't. Future versions may contain more fine-grained access levels, but that is not implemented at this time.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -47,5 +49,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-pack.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-pack.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-pack.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-pack.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,27 +10,26 @@

npm-pack

Create a tarball from a package

- -

SYNOPSIS

- -
npm pack [<pkg> [<pkg> ...]]
- -

DESCRIPTION

- +

SYNOPSIS

+
npm pack [<pkg> [<pkg> ...]]
+

DESCRIPTION

For anything that's installable (that is, a package folder, tarball, tarball url, name@tag, name@version, or name), this command will fetch it to the cache, and then copy the tarball to the current working directory as <name>-<version>.tgz, and then write the filenames out to stdout.

-

If the same package is specified multiple times, then the file will be overwritten the second time.

-

If no arguments are supplied, then npm packs the current package folder.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -42,5 +41,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-prefix.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-prefix.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-prefix.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-prefix.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,18 +10,23 @@

npm-prefix

Display prefix

+

SYNOPSIS

+
npm prefix [-g]
+

DESCRIPTION

+

Print the local prefix to standard out. This is the closest parent directory +to contain a package.json file unless -g is also specified.

+

If -g is specified, this will be the value of the global prefix. See +npm-config(7) for more detail.

+

SEE ALSO

+ -

SYNOPSIS

- -
npm prefix
- -

DESCRIPTION

- -

Print the prefix to standard out.

- -

SEE ALSO

- -
@@ -33,5 +38,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-prune.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-prune.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-prune.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-prune.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,27 +10,24 @@

npm-prune

Remove extraneous packages

- -

SYNOPSIS

- +

SYNOPSIS

npm prune [<name> [<name ...]]
-npm prune [<name> [<name ...]] [--production]
- -

DESCRIPTION

- +npm prune [<name> [<name ...]] [--production] +

DESCRIPTION

This command removes "extraneous" packages. If a package name is provided, then only packages matching one of the supplied names are removed.

-

Extraneous packages are packages that are not listed on the parent package's dependencies list.

-

If the --production flag is specified, this command will remove the packages specified in your devDependencies.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -42,5 +39,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-publish.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-publish.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-publish.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-publish.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,34 +10,44 @@

npm-publish

Publish a package

- -

SYNOPSIS

- +

SYNOPSIS

npm publish <tarball> [--tag <tag>]
-npm publish <folder> [--tag <tag>]
- -

DESCRIPTION

- -

Publishes a package to the registry so that it can be installed by name.

- -
  • <folder>: -A folder containing a package.json file

  • <tarball>: +npm publish <folder> [--tag <tag>] +

    DESCRIPTION

    +

    Publishes a package to the registry so that it can be installed by name. See +npm-developers(7) for details on what's included in the published package, as +well as details on how the package is built.

    +

    By default npm will publish to the public registry. This can be overridden by +specifying a different default registry or using a npm-scope(7) in the name +(see package.json(5)).

    +
      +
    • <folder>: +A folder containing a package.json file

      +
    • +
    • <tarball>: A url or file path to a gzipped tar archive containing a single folder -with a package.json file inside.

    • [--tag <tag>] +with a package.json file inside.

      +
    • +
    • [--tag <tag>] Registers the published package with the given tag, such that npm install <name>@<tag> will install this version. By default, npm publish updates -and npm install installs the latest tag.

    - +and npm install installs the latest tag.

    +
  • +

Fails if the package name and version combination already exists in -the registry.

- +the specified registry.

Once a package is published with a given name and version, that specific name and version combination can never be used again, even if -it is removed with npm-unpublish(1).

- -

SEE ALSO

+it is removed with npm-unpublish(1).

+

SEE ALSO

+ -
@@ -49,5 +59,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-rebuild.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-rebuild.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-rebuild.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-rebuild.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,24 +10,23 @@

npm-rebuild

Rebuild a package

- -

SYNOPSIS

- +

SYNOPSIS

npm rebuild [<name> [<name> ...]]
-npm rb [<name> [<name> ...]]
- -
  • <name>: -The package to rebuild
- -

DESCRIPTION

- +npm rb [<name> [<name> ...]] +
    +
  • <name>: +The package to rebuild
  • +
+

DESCRIPTION

This command runs the npm build command on the matched folders. This is useful when you install a new version of node, and must recompile all your C++ addons with the new binary.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -39,5 +38,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-repo.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-repo.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-repo.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-repo.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,30 +10,27 @@

npm-repo

Open package repository page in the browser

- -

SYNOPSIS

- +

SYNOPSIS

npm repo <pkgname>
-npm repo (with no args in a package dir)
- -

DESCRIPTION

- +npm repo (with no args in a package dir) +

DESCRIPTION

This command tries to guess at the likely location of a package's repository URL, and then tries to open it using the --browser config param. If no package name is provided, it will search for a package.json in the current folder and use the name property.

- -

CONFIGURATION

- +

CONFIGURATION

browser

- -
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • Type: String
- +
    +
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • +
  • Type: String
  • +

The browser that is called by the npm repo command to open websites.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -45,5 +42,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-restart.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-restart.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-restart.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-restart.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,22 +10,20 @@

npm-restart

Start a package

+

SYNOPSIS

+
npm restart [-- <args>]
+

DESCRIPTION

+

This runs a package's "restart" script, if one was provided. Otherwise it runs +package's "stop" script, if one was provided, and then the "start" script.

+

SEE ALSO

+ -

SYNOPSIS

- -
npm restart <name>
- -

DESCRIPTION

- -

This runs a package's "restart" script, if one was provided. -Otherwise it runs package's "stop" script, if one was provided, and then -the "start" script.

- -

If no version is specified, then it restarts the "active" version.

- -

SEE ALSO

- -
@@ -37,5 +35,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-rm.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-rm.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-rm.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-rm.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,22 +10,24 @@

npm-rm

Remove a package

- -

SYNOPSIS

- +

SYNOPSIS

npm rm <name>
 npm r <name>
 npm uninstall <name>
-npm un <name>
- -

DESCRIPTION

- +npm un <name> +

DESCRIPTION

This uninstalls a package, completely removing everything npm installed on its behalf.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -37,5 +39,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-root.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-root.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-root.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-root.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,18 +10,20 @@

npm-root

Display npm root

- -

SYNOPSIS

- -
npm root
- -

DESCRIPTION

- +

SYNOPSIS

+
npm root
+

DESCRIPTION

Print the effective node_modules folder to standard out.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -33,5 +35,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-run-script.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-run-script.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-run-script.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-run-script.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,23 +10,32 @@

npm-run-script

Run arbitrary package scripts

- -

SYNOPSIS

- -
npm run-script [<pkg>] <command>
- -

DESCRIPTION

- +

SYNOPSIS

+
npm run-script [command] [-- <args>]
+npm run [command] [-- <args>]
+

DESCRIPTION

This runs an arbitrary command from a package's "scripts" object. If no package name is provided, it will search for a package.json -in the current folder and use its "scripts" object.

- +in the current folder and use its "scripts" object. If no "command" +is provided, it will list the available top level scripts.

It is used by the test, start, restart, and stop commands, but can be called directly, as well.

+

As of npm@2.0.0, you can +use custom arguments when executing scripts. The special option -- is used by +getopt to delimit the end of the options. npm will pass +all the arguments after the -- directly to your script:

+
npm run test -- --grep="pattern"
+

The arguments will only be passed to the script specified after npm run +and not to any pre or post script.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -38,5 +47,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-search.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-search.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-search.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-search.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,35 +10,34 @@

npm-search

Search for packages

- -

SYNOPSIS

- +

SYNOPSIS

npm search [--long] [search terms ...]
 npm s [search terms ...]
-npm se [search terms ...]
- -

DESCRIPTION

- +npm se [search terms ...] +

DESCRIPTION

Search the registry for packages matching the search terms.

-

If a term starts with /, then it's interpreted as a regular expression. A trailing / will be ignored in this case. (Note that many regular expression characters must be escaped or quoted in most shells.)

- -

CONFIGURATION

- +

CONFIGURATION

long

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Display full package descriptions and other long text across multiple lines. When disabled (default) search results are truncated to fit neatly on a single line. Modules with extremely long names will fall on multiple lines.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -50,5 +49,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-shrinkwrap.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-shrinkwrap.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-shrinkwrap.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-shrinkwrap.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,18 +10,13 @@

npm-shrinkwrap

Lock down dependency versions

- -

SYNOPSIS

- -
npm shrinkwrap
- -

DESCRIPTION

- +

SYNOPSIS

+
npm shrinkwrap
+

DESCRIPTION

This command locks down the versions of a package's dependencies so that you can control exactly which versions of each dependency will be used when your package is installed. The "package.json" file is still required if you want to use "npm install".

-

By default, "npm install" recursively installs the target's dependencies (as specified in package.json), choosing the latest available version that satisfies the dependency's semver pattern. In @@ -34,62 +29,47 @@ possible or desirable, as when another author owns the npm package. It's also possible to check dependencies directly into source control, but that may be undesirable for other reasons.

-

As an example, consider package A:

-
{
   "name": "A",
   "version": "0.1.0",
   "dependencies": {
     "B": "<0.1.0"
   }
-}
- -

package B:

- +} +

package B:

{
   "name": "B",
   "version": "0.0.1",
   "dependencies": {
     "C": "<0.1.0"
   }
-}
- -

and package C:

- +} +

and package C:

{
   "name": "C,
   "version": "0.0.1"
-}
- -

If these are the only versions of A, B, and C available in the +} +

If these are the only versions of A, B, and C available in the registry, then a normal "npm install A" will install:

-
A@0.1.0
 `-- B@0.0.1
-    `-- C@0.0.1
- -

However, if B@0.0.2 is published, then a fresh "npm install A" will + `-- C@0.0.1 +

However, if B@0.0.2 is published, then a fresh "npm install A" will install:

-
A@0.1.0
 `-- B@0.0.2
-    `-- C@0.0.1
- -

assuming the new version did not modify B's dependencies. Of course, + `-- C@0.0.1 +

assuming the new version did not modify B's dependencies. Of course, the new version of B could include a new version of C and any number of new dependencies. If such changes are undesirable, the author of A could specify a dependency on B@0.0.1. However, if A's author and B's author are not the same person, there's no way for A's author to say that he or she does not want to pull in newly published versions of C when B hasn't changed at all.

-

In this case, A's author can run

- -
npm shrinkwrap
- -

This generates npm-shrinkwrap.json, which will look something like this:

- +
npm shrinkwrap
+

This generates npm-shrinkwrap.json, which will look something like this:

{
   "name": "A",
   "version": "0.1.0",
@@ -103,9 +83,8 @@
       }
     }
   }
-}
- -

The shrinkwrap command has locked down the dependencies based on +} +

The shrinkwrap command has locked down the dependencies based on what's currently installed in node_modules. When "npm install" installs a package with a npm-shrinkwrap.json file in the package root, the shrinkwrap file (rather than package.json files) completely @@ -114,77 +93,66 @@ installs of this package will use B@0.0.1 and C@0.1.0, regardless the dependencies and versions listed in A's, B's, and C's package.json files.

- -

Using shrinkwrapped packages

- +

Using shrinkwrapped packages

Using a shrinkwrapped package is no different than using any other package: you can "npm install" it by hand, or add a dependency to your package.json file and "npm install" it.

- -

Building shrinkwrapped packages

- +

Building shrinkwrapped packages

To shrinkwrap an existing package:

- -
  1. Run "npm install" in the package root to install the current -versions of all dependencies.
  2. Validate that the package works as expected with these versions.
  3. Run "npm shrinkwrap", add npm-shrinkwrap.json to git, and publish -your package.
- +
    +
  1. Run "npm install" in the package root to install the current +versions of all dependencies.
  2. +
  3. Validate that the package works as expected with these versions.
  4. +
  5. Run "npm shrinkwrap", add npm-shrinkwrap.json to git, and publish +your package.
  6. +

To add or update a dependency in a shrinkwrapped package:

- -
  1. Run "npm install" in the package root to install the current -versions of all dependencies.
  2. Add or update dependencies. "npm install" each new or updated +
      +
    1. Run "npm install" in the package root to install the current +versions of all dependencies.
    2. +
    3. Add or update dependencies. "npm install" each new or updated package individually and then update package.json. Note that they must be explicitly named in order to be installed: running npm install with no arguments will merely reproduce the existing -shrinkwrap.
    4. Validate that the package works as expected with the new -dependencies.
    5. Run "npm shrinkwrap", commit the new npm-shrinkwrap.json, and -publish your package.
    - -

    You can use npm-outdated(1) to view dependencies with newer versions +shrinkwrap.

  3. +
  4. Validate that the package works as expected with the new +dependencies.
  5. +
  6. Run "npm shrinkwrap", commit the new npm-shrinkwrap.json, and +publish your package.
  7. +
+

You can use npm-outdated(1) to view dependencies with newer versions available.

- -

Other Notes

- +

Other Notes

A shrinkwrap file must be consistent with the package's package.json file. "npm shrinkwrap" will fail if required dependencies are not already installed, since that would result in a shrinkwrap that wouldn't actually work. Similarly, the command will fail if there are extraneous packages (not referenced by package.json), since that would indicate that package.json is not correct.

-

Since "npm shrinkwrap" is intended to lock down your dependencies for production use, devDependencies will not be included unless you explicitly set the --dev flag when you run npm shrinkwrap. If installed devDependencies are excluded, then npm will print a warning. If you want them to be installed with your module by default, please consider adding them to dependencies instead.

-

If shrinkwrapped package A depends on shrinkwrapped package B, B's shrinkwrap will not be used as part of the installation of A. However, because A's shrinkwrap is constructed from a valid installation of B and recursively specifies all dependencies, the contents of B's shrinkwrap will implicitly be included in A's shrinkwrap.

- -

Caveats

- -

Shrinkwrap files only lock down package versions, not actual package -contents. While discouraged, a package author can republish an -existing version of a package, causing shrinkwrapped packages using -that version to pick up different code than they were before. If you -want to avoid any risk that a byzantine author replaces a package -you're using with code that breaks your application, you could modify -the shrinkwrap file to use git URL references rather than version -numbers so that npm always fetches all packages from git.

- +

Caveats

If you wish to lock down the specific bytes included in a package, for example to have 100% confidence in being able to reproduce a deployment or build, then you ought to check your dependencies into source control, or pursue some other mechanism that can verify contents rather than versions.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -196,5 +164,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-star.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-star.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-star.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-star.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,24 +10,21 @@

npm-star

Mark your favorite packages

- -

SYNOPSIS

- +

SYNOPSIS

npm star <pkgname> [<pkg>, ...]
-npm unstar <pkgname> [<pkg>, ...]
- -

DESCRIPTION

- +npm unstar <pkgname> [<pkg>, ...] +

DESCRIPTION

"Starring" a package means that you have some interest in it. It's a vaguely positive way to show that you care.

-

"Unstarring" is the same thing, but in reverse.

-

It's a boolean thing. Starring repeatedly has no additional effect.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -39,5 +36,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-stars.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-stars.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-stars.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-stars.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,23 +10,22 @@

npm-stars

View packages marked as favorites

- -

SYNOPSIS

- +

SYNOPSIS

npm stars
-npm stars [username]
- -

DESCRIPTION

- +npm stars [username] +

DESCRIPTION

If you have starred a lot of neat things and want to find them again quickly this command lets you do just that.

-

You may also want to see your friend's favorite packages, in this case you will most certainly enjoy this command.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -38,5 +37,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-start.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-start.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-start.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-start.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,18 +10,19 @@

npm-start

Start a package

- -

SYNOPSIS

- -
npm start <name>
- -

DESCRIPTION

- +

SYNOPSIS

+
npm start [-- <args>]
+

DESCRIPTION

This runs a package's "start" script, if one was provided.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -33,5 +34,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-stop.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-stop.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-stop.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-stop.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,18 +10,19 @@

npm-stop

Stop a package

- -

SYNOPSIS

- -
npm stop <name>
- -

DESCRIPTION

- +

SYNOPSIS

+
npm stop [-- <args>]
+

DESCRIPTION

This runs a package's "stop" script, if one was provided.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -33,5 +34,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-submodule.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-submodule.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-submodule.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-submodule.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,31 +10,27 @@

npm-submodule

Add a package as a git submodule

- -

SYNOPSIS

- -
npm submodule <pkg>
- -

DESCRIPTION

- +

SYNOPSIS

+
npm submodule <pkg>
+

DESCRIPTION

If the specified package has a git repository url in its package.json description, then this command will add it as a git submodule at node_modules/<pkg name>.

-

This is a convenience only. From then on, it's up to you to manage updates by using the appropriate git commands. npm will stubbornly refuse to update, modify, or remove anything with a .git subfolder in it.

-

This command also does not install missing dependencies, if the package does not include them in its git repository. If npm ls reports that things are missing, you can either install, link, or submodule them yourself, or you can do npm explore <pkgname> -- npm install to install the dependencies into the submodule folder.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -46,5 +42,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-tag.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-tag.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-tag.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-tag.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,32 +10,29 @@

npm-tag

Tag a published version

- -

SYNOPSIS

- -
npm tag <name>@<version> [<tag>]
- -

DESCRIPTION

- +

SYNOPSIS

+
npm tag <name>@<version> [<tag>]
+

DESCRIPTION

Tags the specified version of the package with the specified tag, or the --tag config if not specified.

-

A tag can be used when installing packages as a reference to a version instead of using a specific version number:

- -
npm install <name>@<tag>
- -

When installing dependencies, a preferred tagged version may be specified:

- -
npm install --tag <tag>
- -

This also applies to npm dedupe.

- +
npm install <name>@<tag>
+

When installing dependencies, a preferred tagged version may be specified:

+
npm install --tag <tag>
+

This also applies to npm dedupe.

Publishing a package always sets the "latest" tag to the published version.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -47,5 +44,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-test.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-test.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-test.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-test.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,22 +10,22 @@

npm-test

Test a package

- -

SYNOPSIS

- -
  npm test <name>
-  npm tst <name>
- -

DESCRIPTION

- +

SYNOPSIS

+
  npm test [-- <args>]
+  npm tst [-- <args>]
+

DESCRIPTION

This runs a package's "test" script, if one was provided.

-

To run tests as a condition of installation, set the npat config to true.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -37,5 +37,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-uninstall.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-uninstall.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-uninstall.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-uninstall.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,38 +10,42 @@

npm-rm

Remove a package

- -

SYNOPSIS

- -
npm uninstall <name> [--save|--save-dev|--save-optional]
-npm rm (with any of the previous argument usage)
- -

DESCRIPTION

- +

SYNOPSIS

+
npm uninstall [@<scope>/]<package> [--save|--save-dev|--save-optional]
+npm rm (with any of the previous argument usage)
+

DESCRIPTION

This uninstalls a package, completely removing everything npm installed on its behalf.

-

Example:

- -
npm uninstall sax
- -

In global mode (ie, with -g or --global appended to the command), +

npm uninstall sax
+

In global mode (ie, with -g or --global appended to the command), it uninstalls the current package context as a global package.

-

npm uninstall takes 3 exclusive, optional flags which save or update the package version in your main package.json:

- -
  • --save: Package will be removed from your dependencies.

  • --save-dev: Package will be removed from your devDependencies.

  • --save-optional: Package will be removed from your optionalDependencies.

- +
    +
  • --save: Package will be removed from your dependencies.

    +
  • +
  • --save-dev: Package will be removed from your devDependencies.

    +
  • +
  • --save-optional: Package will be removed from your optionalDependencies.

    +
  • +
+

Scope is optional and follows the usual rules for npm-scope(7).

Examples:

-
npm uninstall sax --save
+npm uninstall @myorg/privatepackage --save
 npm uninstall node-tap --save-dev
-npm uninstall dtrace-provider --save-optional
- -

SEE ALSO

+npm uninstall dtrace-provider --save-optional +

SEE ALSO

+ -
@@ -53,5 +57,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-unpublish.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-unpublish.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-unpublish.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-unpublish.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,36 +10,32 @@

npm-unpublish

Remove a package from the registry

- -

SYNOPSIS

- -
npm unpublish <name>[@<version>]
- -

WARNING

- +

SYNOPSIS

+
npm unpublish [@<scope>/]<name>[@<version>]
+

WARNING

It is generally considered bad behavior to remove versions of a library that others are depending on!

-

Consider using the deprecate command instead, if your intent is to encourage users to upgrade.

-

There is plenty of room on the registry.

- -

DESCRIPTION

- +

DESCRIPTION

This removes a package version from the registry, deleting its entry and removing the tarball.

-

If no version is specified, or if all versions are removed then the root package entry is removed from the registry entirely.

-

Even if a package version is unpublished, that specific name and version combination can never be reused. In order to publish the package again, a new version number must be used.

+

The scope is optional and follows the usual rules for npm-scope(7).

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -51,5 +47,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-update.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-update.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-update.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-update.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,24 +10,25 @@

npm-update

Update a package

- -

SYNOPSIS

- -
npm update [-g] [<name> [<name> ...]]
- -

DESCRIPTION

- +

SYNOPSIS

+
npm update [-g] [<name> [<name> ...]]
+

DESCRIPTION

This command will update all the packages listed to the latest version (specified by the tag config).

-

It will also install missing packages.

+

If the -g flag is specified, this command will update globally installed +packages.

+

If no package name is specified, all packages in the specified location (global +or local) will be updated.

+

SEE ALSO

+ -

If the -g flag is specified, this command will update globally installed packages. -If no package name is specified, all packages in the specified location (global or local) will be updated.

- -

SEE ALSO

- -
@@ -39,5 +40,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-version.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-version.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-version.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-version.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,35 +10,25 @@

npm-version

Bump a package version

- -

SYNOPSIS

- -
npm version [<newversion> | major | minor | patch]
- -

DESCRIPTION

- +

SYNOPSIS

+
npm version [<newversion> | major | minor | patch | premajor | preminor | prepatch | prerelease]
+

DESCRIPTION

Run this in a package directory to bump the version and write the new data back to the package.json file.

- -

The newversion argument should be a valid semver string, or a valid -second argument to semver.inc (one of "patch", "minor", or -"major"). In the second case, the existing version will be incremented -by 1 in the specified field.

- +

The newversion argument should be a valid semver string, or a +valid second argument to semver.inc (one of "patch", "minor", "major", +"prepatch", "preminor", "premajor", "prerelease"). In the second case, +the existing version will be incremented by 1 in the specified field.

If run in a git repo, it will also create a version commit and tag, and fail if the repo is not clean.

-

If supplied with --message (shorthand: -m) config option, npm will use it as a commit message when creating a version commit. If the message config contains %s then that will be replaced with the resulting version number. For example:

- -
npm version patch -m "Upgrade to %s for reasons"
- -

If the sign-git-tag config is set, then the tag will be signed using +

npm version patch -m "Upgrade to %s for reasons"
+

If the sign-git-tag config is set, then the tag will be signed using the -s flag to git. Note that you must have a default GPG key set up in your git config for this to work properly. For example:

-
$ npm config set sign-git-tag true
 $ npm version patch
 
@@ -46,11 +36,14 @@
 user: "isaacs (http://blog.izs.me/) <i@izs.me>"
 2048-bit RSA key, ID 6C481CF6, created 2010-08-31
 
-Enter passphrase:
- -

SEE ALSO

+Enter passphrase: +

SEE ALSO

+ -
@@ -62,5 +55,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-view.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-view.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-view.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-view.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,89 +10,67 @@

npm-view

View registry info

- -

SYNOPSIS

- -
npm view <name>[@<version>] [<field>[.<subfield>]...]
-npm v <name>[@<version>] [<field>[.<subfield>]...]
- -

DESCRIPTION

- +

SYNOPSIS

+
npm view [@<scope>/]<name>[@<version>] [<field>[.<subfield>]...]
+npm v [@<scope>/]<name>[@<version>] [<field>[.<subfield>]...]
+

DESCRIPTION

This command shows data about a package and prints it to the stream referenced by the outfd config, which defaults to stdout.

-

To show the package registry entry for the connect package, you can do this:

- -
npm view connect
- -

The default version is "latest" if unspecified.

- +
npm view connect
+

The default version is "latest" if unspecified.

Field names can be specified after the package descriptor. For example, to show the dependencies of the ronn package at version 0.3.5, you could do the following:

- -
npm view ronn@0.3.5 dependencies
- -

You can view child field by separating them with a period. +

npm view ronn@0.3.5 dependencies
+

You can view child field by separating them with a period. To view the git repository URL for the latest version of npm, you could do this:

- -
npm view npm repository.url
- -

This makes it easy to view information about a dependency with a bit of +

npm view npm repository.url
+

This makes it easy to view information about a dependency with a bit of shell scripting. For example, to view all the data about the version of opts that ronn depends on, you can do this:

- -
npm view opts@$(npm view ronn dependencies.opts)
- -

For fields that are arrays, requesting a non-numeric field will return +

npm view opts@$(npm view ronn dependencies.opts)
+

For fields that are arrays, requesting a non-numeric field will return all of the values from the objects in the list. For example, to get all the contributor names for the "express" project, you can do this:

- -
npm view express contributors.email
- -

You may also use numeric indices in square braces to specifically select +

npm view express contributors.email
+

You may also use numeric indices in square braces to specifically select an item in an array field. To just get the email address of the first contributor in the list, you can do this:

- -
npm view express contributors[0].email
- -

Multiple fields may be specified, and will be printed one after another. +

npm view express contributors[0].email
+

Multiple fields may be specified, and will be printed one after another. For exampls, to get all the contributor names and email addresses, you can do this:

- -
npm view express contributors.name contributors.email
- -

"Person" fields are shown as a string if they would be shown as an +

npm view express contributors.name contributors.email
+

"Person" fields are shown as a string if they would be shown as an object. So, for example, this will show the list of npm contributors in -the shortened string format. (See package.json(5) for more on this.)

- -
npm view npm contributors
- -

If a version range is provided, then data will be printed for every +the shortened string format. (See package.json(5) for more on this.)

+
npm view npm contributors
+

If a version range is provided, then data will be printed for every matching version of the package. This will show which version of jsdom was required by each matching version of yui3:

- -
npm view yui3@'>0.5.4' dependencies.jsdom
- -

OUTPUT

- +
npm view yui3@'>0.5.4' dependencies.jsdom
+

OUTPUT

If only a single string field for a single version is output, then it will not be colorized or quoted, so as to enable piping the output to another command. If the field is an object, it will be output as a JavaScript object literal.

-

If the --json flag is given, the outputted fields will be JSON.

-

If the version range matches multiple versions, than each printed value will be prefixed with the version it applies to.

-

If multiple fields are requested, than each of them are prefixed with the field name.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -104,5 +82,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/cli/npm-whoami.html nodejs-0.11.15/deps/npm/html/doc/cli/npm-whoami.html --- nodejs-0.11.13/deps/npm/html/doc/cli/npm-whoami.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/cli/npm-whoami.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,18 +10,18 @@

npm-whoami

Display npm username

- -

SYNOPSIS

- -
npm whoami
- -

DESCRIPTION

- +

SYNOPSIS

+
npm whoami
+

DESCRIPTION

Print the username config to standard output.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -33,5 +33,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/files/npm-folders.html nodejs-0.11.15/deps/npm/html/doc/files/npm-folders.html --- nodejs-0.11.13/deps/npm/html/doc/files/npm-folders.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/files/npm-folders.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,119 +10,92 @@

npm-folders

Folder Structures Used by npm

- -

DESCRIPTION

- +

DESCRIPTION

npm puts various things on your computer. That's its job.

-

This document will tell you what it puts where.

-

tl;dr

- -
  • Local install (default): puts stuff in ./node_modules of the current -package root.
  • Global install (with -g): puts stuff in /usr/local or wherever node -is installed.
  • Install it locally if you're going to require() it.
  • Install it globally if you're going to run it on the command line.
  • If you need both, then install it in both places, or use npm link.
- -

prefix Configuration

- +
    +
  • Local install (default): puts stuff in ./node_modules of the current +package root.
  • +
  • Global install (with -g): puts stuff in /usr/local or wherever node +is installed.
  • +
  • Install it locally if you're going to require() it.
  • +
  • Install it globally if you're going to run it on the command line.
  • +
  • If you need both, then install it in both places, or use npm link.
  • +
+

prefix Configuration

The prefix config defaults to the location where node is installed. On most systems, this is /usr/local, and most of the time is the same as node's process.installPrefix.

-

On windows, this is the exact location of the node.exe binary. On Unix systems, it's one level up, since node is typically installed at {prefix}/bin/node rather than {prefix}/node.exe.

-

When the global flag is set, npm installs things into this prefix. When it is not set, it uses the root of the current package, or the current working directory if not in a package already.

- -

Node Modules

- +

Node Modules

Packages are dropped into the node_modules folder under the prefix. When installing locally, this means that you can require("packagename") to load its main module, or require("packagename/lib/path/to/sub/module") to load other modules.

-

Global installs on Unix systems go to {prefix}/lib/node_modules. Global installs on Windows go to {prefix}/node_modules (that is, no lib folder.)

- +

Scoped packages are installed the same way, except they are grouped together +in a sub-folder of the relevant node_modules folder with the name of that +scope prefix by the @ symbol, e.g. npm install @myorg/package would place +the package in {prefix}/node_modules/@myorg/package. See scopes(7) for +more details.

If you wish to require() a package, then install it locally.

- -

Executables

- +

Executables

When in global mode, executables are linked into {prefix}/bin on Unix, or directly into {prefix} on Windows.

-

When in local mode, executables are linked into ./node_modules/.bin so that they can be made available to scripts run through npm. (For example, so that a test runner will be in the path when you run npm test.)

- -

Man Pages

- +

Man Pages

When in global mode, man pages are linked into {prefix}/share/man.

-

When in local mode, man pages are not installed.

-

Man pages are not installed on Windows systems.

- -

Cache

- -

See npm-cache(1). Cache files are stored in ~/.npm on Posix, or +

Cache

+

See npm-cache(1). Cache files are stored in ~/.npm on Posix, or ~/npm-cache on Windows.

-

This is controlled by the cache configuration param.

- -

Temp Files

- +

Temp Files

Temporary files are stored by default in the folder specified by the tmp config, which defaults to the TMPDIR, TMP, or TEMP environment variables, or /tmp on Unix and c:\windows\temp on Windows.

-

Temp files are given a unique folder under this root for each run of the program, and are deleted upon successful exit.

- -

More Information

- +

More Information

When installing locally, npm first tries to find an appropriate prefix folder. This is so that npm install foo@1.2.3 will install to the sensible root of your package, even if you happen to have cded into some other folder.

-

Starting at the $PWD, npm will walk up the folder tree checking for a folder that contains either a package.json file, or a node_modules folder. If such a thing is found, then that is treated as the effective "current directory" for the purpose of running npm commands. (This behavior is inspired by and similar to git's .git-folder seeking logic when running git commands in a working dir.)

-

If no package root is found, then the current folder is used.

-

When you run npm install foo@1.2.3, then the package is loaded into the cache, and then unpacked into ./node_modules/foo. Then, any of foo's dependencies are similarly unpacked into ./node_modules/foo/node_modules/....

-

Any bin files are symlinked to ./node_modules/.bin/, so that they may be found by npm scripts when necessary.

- -

Global Installation

- +

Global Installation

If the global configuration is set to true, then npm will install packages "globally".

-

For global installation, packages are installed roughly the same way, but using the folders described above.

- -

Cycles, Conflicts, and Folder Parsimony

- +

Cycles, Conflicts, and Folder Parsimony

Cycles are handled using the property of node's module system that it walks up the directories looking for node_modules folders. So, at every stage, if a package is already installed in an ancestor node_modules folder, then it is not installed at the current location.

-

Consider the case above, where foo -> bar -> baz. Imagine if, in addition to that, baz depended on bar, so you'd have: foo -> bar -> baz -> bar -> baz .... However, since the folder @@ -130,21 +103,16 @@ put another copy of bar into .../baz/node_modules, since when it calls require("bar"), it will get the copy that is installed in foo/node_modules/bar.

-

This shortcut is only used if the exact same version would be installed in multiple nested node_modules folders. It is still possible to have a/node_modules/b/node_modules/a if the two "a" packages are different versions. However, without repeating the exact same package multiple times, an infinite regress will always be prevented.

-

Another optimization can be made by installing dependencies at the highest level possible, below the localized "target" folder.

- -

Example

- +

Example

Consider this dependency graph:

-
foo
 +-- blerg@1.2.5
 +-- bar@1.2.3
@@ -155,10 +123,8 @@
 |   `-- asdf@*
 `-- baz@1.2.3
     `-- quux@3.x
-        `-- bar
- -

In this case, we might expect a folder structure like this:

- + `-- bar +

In this case, we might expect a folder structure like this:

foo
 +-- node_modules
     +-- blerg (1.2.5) <---[A]
@@ -170,43 +136,43 @@
     |       `-- asdf (2.3.4)
     `-- baz (1.2.3) <---[D]
         `-- node_modules
-            `-- quux (3.2.0) <---[E]
- -

Since foo depends directly on bar@1.2.3 and baz@1.2.3, those are + `-- quux (3.2.0) <---[E] +

Since foo depends directly on bar@1.2.3 and baz@1.2.3, those are installed in foo's node_modules folder.

-

Even though the latest copy of blerg is 1.3.7, foo has a specific dependency on version 1.2.5. So, that gets installed at [A]. Since the parent installation of blerg satisfies bar's dependency on blerg@1.x, it does not install another copy under [B].

-

Bar [B] also has dependencies on baz and asdf, so those are installed in bar's node_modules folder. Because it depends on baz@2.x, it cannot re-use the baz@1.2.3 installed in the parent node_modules folder [D], and must install its own copy [C].

-

Underneath bar, the baz -> quux -> bar dependency creates a cycle. However, because bar is already in quux's ancestry [B], it does not unpack another copy of bar into that folder.

-

Underneath foo -> baz [D], quux's [E] folder tree is empty, because its dependency on bar is satisfied by the parent folder copy installed at [B].

-

For a graphical breakdown of what is installed where, use npm ls.

- -

Publishing

- +

Publishing

Upon publishing, npm will look in the node_modules folder. If any of the items there are not in the bundledDependencies array, then they will not be included in the package tarball.

-

This allows a package maintainer to install all of their dependencies (and dev dependencies) locally, but only re-publish those items that -cannot be found elsewhere. See package.json(5) for more information.

- -

SEE ALSO

+cannot be found elsewhere. See package.json(5) for more information.

+

SEE ALSO

+ -
@@ -218,5 +184,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/files/npm-global.html nodejs-0.11.15/deps/npm/html/doc/files/npm-global.html --- nodejs-0.11.13/deps/npm/html/doc/files/npm-global.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/files/npm-global.html 2015-01-20 21:22:17.000000000 +0000 @@ -1,128 +1,101 @@ - npm-folders + npm-global - +

npm-folders

Folder Structures Used by npm

- -

DESCRIPTION

- +

DESCRIPTION

npm puts various things on your computer. That's its job.

-

This document will tell you what it puts where.

-

tl;dr

- -
  • Local install (default): puts stuff in ./node_modules of the current -package root.
  • Global install (with -g): puts stuff in /usr/local or wherever node -is installed.
  • Install it locally if you're going to require() it.
  • Install it globally if you're going to run it on the command line.
  • If you need both, then install it in both places, or use npm link.
- -

prefix Configuration

- +
    +
  • Local install (default): puts stuff in ./node_modules of the current +package root.
  • +
  • Global install (with -g): puts stuff in /usr/local or wherever node +is installed.
  • +
  • Install it locally if you're going to require() it.
  • +
  • Install it globally if you're going to run it on the command line.
  • +
  • If you need both, then install it in both places, or use npm link.
  • +
+

prefix Configuration

The prefix config defaults to the location where node is installed. On most systems, this is /usr/local, and most of the time is the same as node's process.installPrefix.

-

On windows, this is the exact location of the node.exe binary. On Unix systems, it's one level up, since node is typically installed at {prefix}/bin/node rather than {prefix}/node.exe.

-

When the global flag is set, npm installs things into this prefix. When it is not set, it uses the root of the current package, or the current working directory if not in a package already.

- -

Node Modules

- +

Node Modules

Packages are dropped into the node_modules folder under the prefix. When installing locally, this means that you can require("packagename") to load its main module, or require("packagename/lib/path/to/sub/module") to load other modules.

-

Global installs on Unix systems go to {prefix}/lib/node_modules. Global installs on Windows go to {prefix}/node_modules (that is, no lib folder.)

- +

Scoped packages are installed the same way, except they are grouped together +in a sub-folder of the relevant node_modules folder with the name of that +scope prefix by the @ symbol, e.g. npm install @myorg/package would place +the package in {prefix}/node_modules/@myorg/package. See scopes(7) for +more details.

If you wish to require() a package, then install it locally.

- -

Executables

- +

Executables

When in global mode, executables are linked into {prefix}/bin on Unix, or directly into {prefix} on Windows.

-

When in local mode, executables are linked into ./node_modules/.bin so that they can be made available to scripts run through npm. (For example, so that a test runner will be in the path when you run npm test.)

- -

Man Pages

- +

Man Pages

When in global mode, man pages are linked into {prefix}/share/man.

-

When in local mode, man pages are not installed.

-

Man pages are not installed on Windows systems.

- -

Cache

- -

See npm-cache(1). Cache files are stored in ~/.npm on Posix, or +

Cache

+

See npm-cache(1). Cache files are stored in ~/.npm on Posix, or ~/npm-cache on Windows.

-

This is controlled by the cache configuration param.

- -

Temp Files

- +

Temp Files

Temporary files are stored by default in the folder specified by the tmp config, which defaults to the TMPDIR, TMP, or TEMP environment variables, or /tmp on Unix and c:\windows\temp on Windows.

-

Temp files are given a unique folder under this root for each run of the program, and are deleted upon successful exit.

- -

More Information

- +

More Information

When installing locally, npm first tries to find an appropriate prefix folder. This is so that npm install foo@1.2.3 will install to the sensible root of your package, even if you happen to have cded into some other folder.

-

Starting at the $PWD, npm will walk up the folder tree checking for a folder that contains either a package.json file, or a node_modules folder. If such a thing is found, then that is treated as the effective "current directory" for the purpose of running npm commands. (This behavior is inspired by and similar to git's .git-folder seeking logic when running git commands in a working dir.)

-

If no package root is found, then the current folder is used.

-

When you run npm install foo@1.2.3, then the package is loaded into the cache, and then unpacked into ./node_modules/foo. Then, any of foo's dependencies are similarly unpacked into ./node_modules/foo/node_modules/....

-

Any bin files are symlinked to ./node_modules/.bin/, so that they may be found by npm scripts when necessary.

- -

Global Installation

- +

Global Installation

If the global configuration is set to true, then npm will install packages "globally".

-

For global installation, packages are installed roughly the same way, but using the folders described above.

- -

Cycles, Conflicts, and Folder Parsimony

- +

Cycles, Conflicts, and Folder Parsimony

Cycles are handled using the property of node's module system that it walks up the directories looking for node_modules folders. So, at every stage, if a package is already installed in an ancestor node_modules folder, then it is not installed at the current location.

-

Consider the case above, where foo -> bar -> baz. Imagine if, in addition to that, baz depended on bar, so you'd have: foo -> bar -> baz -> bar -> baz .... However, since the folder @@ -130,21 +103,16 @@ put another copy of bar into .../baz/node_modules, since when it calls require("bar"), it will get the copy that is installed in foo/node_modules/bar.

-

This shortcut is only used if the exact same version would be installed in multiple nested node_modules folders. It is still possible to have a/node_modules/b/node_modules/a if the two "a" packages are different versions. However, without repeating the exact same package multiple times, an infinite regress will always be prevented.

-

Another optimization can be made by installing dependencies at the highest level possible, below the localized "target" folder.

- -

Example

- +

Example

Consider this dependency graph:

-
foo
 +-- blerg@1.2.5
 +-- bar@1.2.3
@@ -155,10 +123,8 @@
 |   `-- asdf@*
 `-- baz@1.2.3
     `-- quux@3.x
-        `-- bar
- -

In this case, we might expect a folder structure like this:

- + `-- bar +

In this case, we might expect a folder structure like this:

foo
 +-- node_modules
     +-- blerg (1.2.5) <---[A]
@@ -170,43 +136,43 @@
     |       `-- asdf (2.3.4)
     `-- baz (1.2.3) <---[D]
         `-- node_modules
-            `-- quux (3.2.0) <---[E]
- -

Since foo depends directly on bar@1.2.3 and baz@1.2.3, those are + `-- quux (3.2.0) <---[E] +

Since foo depends directly on bar@1.2.3 and baz@1.2.3, those are installed in foo's node_modules folder.

-

Even though the latest copy of blerg is 1.3.7, foo has a specific dependency on version 1.2.5. So, that gets installed at [A]. Since the parent installation of blerg satisfies bar's dependency on blerg@1.x, it does not install another copy under [B].

-

Bar [B] also has dependencies on baz and asdf, so those are installed in bar's node_modules folder. Because it depends on baz@2.x, it cannot re-use the baz@1.2.3 installed in the parent node_modules folder [D], and must install its own copy [C].

-

Underneath bar, the baz -> quux -> bar dependency creates a cycle. However, because bar is already in quux's ancestry [B], it does not unpack another copy of bar into that folder.

-

Underneath foo -> baz [D], quux's [E] folder tree is empty, because its dependency on bar is satisfied by the parent folder copy installed at [B].

-

For a graphical breakdown of what is installed where, use npm ls.

- -

Publishing

- +

Publishing

Upon publishing, npm will look in the node_modules folder. If any of the items there are not in the bundledDependencies array, then they will not be included in the package tarball.

-

This allows a package maintainer to install all of their dependencies (and dev dependencies) locally, but only re-publish those items that -cannot be found elsewhere. See package.json(5) for more information.

- -

SEE ALSO

+cannot be found elsewhere. See package.json(5) for more information.

+

SEE ALSO

+ -
@@ -218,5 +184,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/files/npm-json.html nodejs-0.11.15/deps/npm/html/doc/files/npm-json.html --- nodejs-0.11.13/deps/npm/html/doc/files/npm-json.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/files/npm-json.html 2015-01-20 21:22:17.000000000 +0000 @@ -1,266 +1,189 @@ - package.json + npm-json - +

package.json

Specifics of npm's package.json handling

- -

DESCRIPTION

- +

DESCRIPTION

This document is all you need to know about what's required in your package.json file. It must be actual JSON, not just a JavaScript object literal.

-

A lot of the behavior described in this document is affected by the config -settings described in npm-config(7).

- +settings described in npm-config(7).

name

-

The most important things in your package.json are the name and version fields. Those are actually required, and your package won't install without them. The name and version together form an identifier that is assumed to be completely unique. Changes to the package should come along with changes to the version.

-

The name is what your thing is called. Some tips:

- -
  • Don't put "js" or "node" in the name. It's assumed that it's js, since you're +
      +
    • Don't put "js" or "node" in the name. It's assumed that it's js, since you're writing a package.json file, and you can specify the engine using the "engines" -field. (See below.)
    • The name ends up being part of a URL, an argument on the command line, and a +field. (See below.)
    • +
    • The name ends up being part of a URL, an argument on the command line, and a folder name. Any name with non-url-safe characters will be rejected. -Also, it can't start with a dot or an underscore.
    • The name will probably be passed as an argument to require(), so it should -be something short, but also reasonably descriptive.
    • You may want to check the npm registry to see if there's something by that name -already, before you get too attached to it. http://registry.npmjs.org/
    - +Also, it can't start with a dot or an underscore.
  • +
  • The name will probably be passed as an argument to require(), so it should +be something short, but also reasonably descriptive.
  • +
  • You may want to check the npm registry to see if there's something by that name +already, before you get too attached to it. http://registry.npmjs.org/
  • +
+

A name can be optionally prefixed by a scope, e.g. @myorg/mypackage. See +npm-scope(7) for more detail.

version

-

The most important things in your package.json are the name and version fields. Those are actually required, and your package won't install without them. The name and version together form an identifier that is assumed to be completely unique. Changes to the package should come along with changes to the version.

-

Version must be parseable by node-semver, which is bundled with npm as a dependency. (npm install semver to use it yourself.)

- -

More on version numbers and ranges at semver(7).

- +

More on version numbers and ranges at semver(7).

description

-

Put a description in it. It's a string. This helps people discover your package, as it's listed in npm search.

-

keywords

-

Put keywords in it. It's an array of strings. This helps people discover your package as it's listed in npm search.

-

homepage

-

The url to the project homepage.

-

NOTE: This is not the same as "url". If you put a "url" field, then the registry will think it's a redirection to your package that has been published somewhere else, and spit at you.

-

Literally. Spit. I'm so not kidding.

-

bugs

-

The url to your project's issue tracker and / or the email address to which issues should be reported. These are helpful for people who encounter issues with your package.

-

It should look like this:

-
{ "url" : "http://github.com/owner/project/issues"
 , "email" : "project@hostname.com"
-}
- -

You can specify either one or both values. If you want to provide only a url, +} +

You can specify either one or both values. If you want to provide only a url, you can specify the value for "bugs" as a simple string instead of an object.

-

If a url is provided, it will be used by the npm bugs command.

-

license

-

You should specify a license for your package so that people know how they are permitted to use it, and any restrictions you're placing on it.

-

The simplest way, assuming you're using a common license such as BSD-3-Clause or MIT, is to just specify the standard SPDX ID of the license you're using, like this:

- -
{ "license" : "BSD-3-Clause" }
- -

You can check the full list of SPDX license IDs. +

{ "license" : "BSD-3-Clause" }
+

You can check the full list of SPDX license IDs. Ideally you should pick one that is OSI approved.

-

It's also a good idea to include a LICENSE file at the top level in your package.

-

people fields: author, contributors

-

The "author" is one person. "contributors" is an array of people. A "person" is an object with a "name" field and optionally "url" and "email", like this:

-
{ "name" : "Barney Rubble"
 , "email" : "b@rubble.com"
 , "url" : "http://barnyrubble.tumblr.com/"
-}
- -

Or you can shorten that all into a single string, and npm will parse it for you:

- -
"Barney Rubble <b@rubble.com> (http://barnyrubble.tumblr.com/)
- -

Both email and url are optional either way.

- +} +

Or you can shorten that all into a single string, and npm will parse it for you:

+
"Barney Rubble <b@rubble.com> (http://barnyrubble.tumblr.com/)
+

Both email and url are optional either way.

npm also sets a top-level "maintainers" field with your npm user info.

-

files

-

The "files" field is an array of files to include in your project. If you name a folder in the array, then it will also include the files inside that folder. (Unless they would be ignored by another rule.)

-

You can also provide a ".npmignore" file in the root of your package, which will keep files from being included, even if they would be picked up by the files array. The ".npmignore" file works just like a ".gitignore".

-

main

-

The main field is a module ID that is the primary entry point to your program. That is, if your package is named foo, and a user installs it, and then does require("foo"), then your main module's exports object will be returned.

-

This should be a module ID relative to the root of your package folder.

-

For most modules, it makes the most sense to have a main script and often not much else.

-

bin

-

A lot of packages have one or more executable files that they'd like to install into the PATH. npm makes this pretty easy (in fact, it uses this feature to install the "npm" executable.)

-

To use this, supply a bin field in your package.json which is a map of command name to local file name. On install, npm will symlink that file into prefix/bin for global installs, or ./node_modules/.bin/ for local installs.

-

For example, npm has this:

- -
{ "bin" : { "npm" : "./cli.js" } }
- -

So, when you install npm, it'll create a symlink from the cli.js script to +

{ "bin" : { "npm" : "./cli.js" } }
+

So, when you install npm, it'll create a symlink from the cli.js script to /usr/local/bin/npm.

-

If you have a single executable, and its name should be the name of the package, then you can just supply it as a string. For example:

-
{ "name": "my-program"
 , "version": "1.2.5"
-, "bin": "./path/to/program" }
- -

would be the same as this:

- +, "bin": "./path/to/program" } +

would be the same as this:

{ "name": "my-program"
 , "version": "1.2.5"
-, "bin" : { "my-program" : "./path/to/program" } }
- -

man

- +, "bin" : { "my-program" : "./path/to/program" } } +

man

Specify either a single file or an array of filenames to put in place for the man program to find.

-

If only a single file is provided, then it's installed such that it is the result from man <pkgname>, regardless of its actual filename. For example:

-
{ "name" : "foo"
 , "version" : "1.2.3"
 , "description" : "A packaged foo fooer for fooing foos"
 , "main" : "foo.js"
 , "man" : "./man/doc.1"
-}
- -

would link the ./man/doc.1 file in such that it is the target for man foo

- +} +

would link the ./man/doc.1 file in such that it is the target for man foo

If the filename doesn't start with the package name, then it's prefixed. So, this:

-
{ "name" : "foo"
 , "version" : "1.2.3"
 , "description" : "A packaged foo fooer for fooing foos"
 , "main" : "foo.js"
 , "man" : [ "./man/foo.1", "./man/bar.1" ]
-}
- -

will create files to do man foo and man foo-bar.

- +} +

will create files to do man foo and man foo-bar.

Man files must end with a number, and optionally a .gz suffix if they are compressed. The number dictates which man section the file is installed into.

-
{ "name" : "foo"
 , "version" : "1.2.3"
 , "description" : "A packaged foo fooer for fooing foos"
 , "main" : "foo.js"
 , "man" : [ "./man/foo.1", "./man/foo.2" ]
-}
- -

will create entries for man foo and man 2 foo

- +} +

will create entries for man foo and man 2 foo

directories

-

The CommonJS Packages spec details a few ways that you can indicate the structure of your package using a directories -hash. If you look at npm's package.json, +object. If you look at npm's package.json, you'll see that it has directories for doc, lib, and man.

-

In the future, this information may be used in other creative ways.

-

directories.lib

-

Tell people where the bulk of your library is. Nothing special is done with the lib folder in any way, but it's useful meta info.

-

directories.bin

- -

If you specify a "bin" directory, then all the files in that folder will -be used as the "bin" hash.

- -

If you have a "bin" hash already, then this has no effect.

- +

If you specify a bin directory, then all the files in that folder will +be added as children of the bin path.

+

If you have a bin path already, then this has no effect.

directories.man

-

A folder that is full of man pages. Sugar to generate a "man" array by walking the folder.

-

directories.doc

-

Put markdown files in here. Eventually, these will be displayed nicely, maybe, someday.

-

directories.example

-

Put example scripts in here. Someday, it might be exposed in some clever way.

-

repository

-

Specify the place where your code lives. This is helpful for people who want to contribute. If the git repo is on github, then the npm docs command will be able to find you.

-

Do it like this:

-
"repository" :
   { "type" : "git"
   , "url" : "http://github.com/npm/npm.git"
@@ -269,52 +192,54 @@
 "repository" :
   { "type" : "svn"
   , "url" : "http://v8.googlecode.com/svn/trunk/"
-  }
- -

The URL should be a publicly available (perhaps read-only) url that can be handed + } +

The URL should be a publicly available (perhaps read-only) url that can be handed directly to a VCS program without any modification. It should not be a url to an html project page that you put in your browser. It's for computers.

-

scripts

- -

The "scripts" member is an object hash of script commands that are run +

The "scripts" property is a dictionary containing script commands that are run at various times in the lifecycle of your package. The key is the lifecycle event, and the value is the command to run at that point.

- -

See npm-scripts(7) to find out more about writing package scripts.

- +

See npm-scripts(7) to find out more about writing package scripts.

config

- -

A "config" hash can be used to set configuration -parameters used in package scripts that persist across upgrades. For -instance, if a package had the following:

- +

A "config" object can be used to set configuration parameters used in package +scripts that persist across upgrades. For instance, if a package had the +following:

{ "name" : "foo"
-, "config" : { "port" : "8080" } }
- -

and then had a "start" command that then referenced the +, "config" : { "port" : "8080" } } +

and then had a "start" command that then referenced the npm_package_config_port environment variable, then the user could override that by doing npm config set foo:port 8001.

- -

See npm-config(7) and npm-scripts(7) for more on package +

See npm-config(7) and npm-scripts(7) for more on package configs.

-

dependencies

- -

Dependencies are specified with a simple hash of package name to +

Dependencies are specified in a simple object that maps a package name to a version range. The version range is a string which has one or more -space-separated descriptors. Dependencies can also be identified with -a tarball or git URL.

- +space-separated descriptors. Dependencies can also be identified with a +tarball or git URL.

Please do not put test harnesses or transpilers in your -dependencies hash. See devDependencies, below.

- -

See semver(7) for more details about specifying version ranges.

- -
  • version Must match version exactly
  • >version Must be greater than version
  • >=version etc
  • <version
  • <=version
  • ~version "Approximately equivalent to version" See semver(7)
  • ^version "Compatible with version" See semver(7)
  • 1.2.x 1.2.0, 1.2.1, etc., but not 1.3.0
  • http://... See 'URLs as Dependencies' below
  • * Matches any version
  • "" (just an empty string) Same as *
  • version1 - version2 Same as >=version1 <=version2.
  • range1 || range2 Passes if either range1 or range2 are satisfied.
  • git... See 'Git URLs as Dependencies' below
  • user/repo See 'GitHub URLs' below
- +dependencies object. See devDependencies, below.

+

See semver(7) for more details about specifying version ranges.

+
    +
  • version Must match version exactly
  • +
  • >version Must be greater than version
  • +
  • >=version etc
  • +
  • <version
  • +
  • <=version
  • +
  • ~version "Approximately equivalent to version" See semver(7)
  • +
  • ^version "Compatible with version" See semver(7)
  • +
  • 1.2.x 1.2.0, 1.2.1, etc., but not 1.3.0
  • +
  • http://... See 'URLs as Dependencies' below
  • +
  • * Matches any version
  • +
  • "" (just an empty string) Same as *
  • +
  • version1 - version2 Same as >=version1 <=version2.
  • +
  • range1 || range2 Passes if either range1 or range2 are satisfied.
  • +
  • git... See 'Git URLs as Dependencies' below
  • +
  • user/repo See 'GitHub URLs' below
  • +
  • tag A specific version tagged and published as tag See npm-tag(1)
  • +
  • path/path/path See Local Paths below
  • +

For example, these are all valid:

-
{ "dependencies" :
   { "foo" : "1.0.0 - 2.9999.9999"
   , "bar" : ">=1.0.2 <2.1.2"
@@ -326,60 +251,65 @@
   , "elf" : "~1.2.3"
   , "two" : "2.x"
   , "thr" : "3.3.x"
+  , "lat" : "latest"
+  , "dyl" : "file:../dyl"
   }
-}
- -

URLs as Dependencies

- +} +

URLs as Dependencies

You may specify a tarball URL in place of a version range.

-

This tarball will be downloaded and installed locally to your package at install time.

- -

Git URLs as Dependencies

- +

Git URLs as Dependencies

Git urls can be of the form:

-
git://github.com/user/project.git#commit-ish
 git+ssh://user@hostname:project.git#commit-ish
 git+ssh://user@hostname/project.git#commit-ish
 git+http://user@hostname/project/blah.git#commit-ish
-git+https://user@hostname/project/blah.git#commit-ish
- -

The commit-ish can be any tag, sha, or branch which can be supplied as +git+https://user@hostname/project/blah.git#commit-ish +

The commit-ish can be any tag, sha, or branch which can be supplied as an argument to git checkout. The default is master.

- -

GitHub URLs

- +

GitHub URLs

As of version 1.1.65, you can refer to GitHub urls as just "foo": "user/foo-project". For example:

-
{
   "name": "foo",
   "version": "0.0.0",
   "dependencies": {
     "express": "visionmedia/express"
   }
-}
- -

devDependencies

- +} +

Local Paths

+

As of version 2.0.0 you can provide a path to a local directory that contains a +package. Local paths can be saved using npm install --save, using any of +these forms:

+
../foo/bar
+~/foo/bar
+./foo/bar
+/foo/bar
+

in which case they will be normalized to a relative path and added to your +package.json. For example:

+
{
+  "name": "baz",
+  "dependencies": {
+    "bar": "file:../foo/bar"
+  }
+}
+

This feature is helpful for local offline development and creating +tests that require npm installing where you don't want to hit an +external server, but should not be used when publishing packages +to the public registry.

+

devDependencies

If someone is planning on downloading and using your module in their program, then they probably don't want or need to download and build the external test or documentation framework that you use.

- -

In this case, it's best to list these additional items in a -devDependencies hash.

- +

In this case, it's best to map these additional items in a devDependencies +object.

These things will be installed when doing npm link or npm install from the root of a package, and can be managed like any other npm -configuration param. See npm-config(7) for more on the topic.

- +configuration param. See npm-config(7) for more on the topic.

For build steps that are not platform-specific, such as compiling CoffeeScript or other languages to JavaScript, use the prepublish script to do this, and make the required package a devDependency.

-

For example:

-
{ "name": "ethopia-waza",
   "description": "a delightfully fruity coffee varietal",
   "version": "1.2.3",
@@ -390,64 +320,48 @@
     "prepublish": "coffee -o lib/ -c src/waza.coffee"
   },
   "main": "lib/waza.js"
-}
- -

The prepublish script will be run before publishing, so that users +} +

The prepublish script will be run before publishing, so that users can consume the functionality without requiring them to compile it themselves. In dev mode (ie, locally running npm install), it'll run this script as well, so that you can test it easily.

- -

peerDependencies

- +

peerDependencies

In some cases, you want to express the compatibility of your package with an host tool or library, while not necessarily doing a require of this host. This is usually refered to as a plugin. Notably, your module may be exposing a specific interface, expected and specified by the host documentation.

-

For example:

-
{
   "name": "tea-latte",
   "version": "1.3.5"
   "peerDependencies": {
     "tea": "2.x"
   }
-}
- -

This ensures your package tea-latte can be installed along with the second +} +

This ensures your package tea-latte can be installed along with the second major version of the host package tea only. The host package is automatically installed if needed. npm install tea-latte could possibly yield the following dependency graph:

-
├── tea-latte@1.3.5
-└── tea@2.2.0
- -

Trying to install another plugin with a conflicting requirement will cause an +└── tea@2.2.0 +

Trying to install another plugin with a conflicting requirement will cause an error. For this reason, make sure your plugin requirement is as broad as possible, and not to lock it down to specific patch versions.

-

Assuming the host complies with semver, only changes in the host package's major version will break your plugin. Thus, if you've worked with every 1.x version of the host package, use "^1.0" or "1.x" to express this. If you depend on features introduced in 1.5.2, use ">= 1.5.2 < 2".

- -

bundledDependencies

- +

bundledDependencies

Array of package names that will be bundled when publishing the package.

-

If this is spelled "bundleDependencies", then that is also honorable.

- -

optionalDependencies

- -

If a dependency can be used, but you would like npm to proceed if it -cannot be found or fails to install, then you may put it in the -optionalDependencies hash. This is a map of package name to version -or url, just like the dependencies hash. The difference is that -failure is tolerated.

- +

optionalDependencies

+

If a dependency can be used, but you would like npm to proceed if it cannot be +found or fails to install, then you may put it in the optionalDependencies +object. This is a map of package name to version or url, just like the +dependencies object. The difference is that build failures do not cause +installation to fail.

It is still your program's responsibility to handle the lack of the dependency. For example, something like this:

-
try {
   var foo = require('foo')
   var fooVersion = require('foo/package.json').version
@@ -462,122 +376,104 @@
 
 if (foo) {
   foo.doFooThings()
-}
- -

Entries in optionalDependencies will override entries of the same name in +} +

Entries in optionalDependencies will override entries of the same name in dependencies, so it's usually best to only put in one place.

-

engines

-

You can specify the version of node that your stuff works on:

- -
{ "engines" : { "node" : ">=0.10.3 <0.12" } }
- -

And, like with dependencies, if you don't specify the version (or if you +

{ "engines" : { "node" : ">=0.10.3 <0.12" } }
+

And, like with dependencies, if you don't specify the version (or if you specify "*" as the version), then any version of node will do.

-

If you specify an "engines" field, then npm will require that "node" be somewhere on that list. If "engines" is omitted, then npm will just assume that it works on node.

-

You can also use the "engines" field to specify which versions of npm are capable of properly installing your program. For example:

- -
{ "engines" : { "npm" : "~1.0.20" } }
- -

Note that, unless the user has set the engine-strict config flag, this +

{ "engines" : { "npm" : "~1.0.20" } }
+

Note that, unless the user has set the engine-strict config flag, this field is advisory only.

- -

engineStrict

- +

engineStrict

If you are sure that your module will definitely not run properly on -versions of Node/npm other than those specified in the engines hash, +versions of Node/npm other than those specified in the engines object, then you can set "engineStrict": true in your package.json file. This will override the user's engine-strict config setting.

-

Please do not do this unless you are really very very sure. If your -engines hash is something overly restrictive, you can quite easily and +engines object is something overly restrictive, you can quite easily and inadvertently lock yourself into obscurity and prevent your users from updating to new versions of Node. Consider this choice carefully. If people abuse it, it will be removed in a future version of npm.

-

os

-

You can specify which operating systems your module will run on:

- -
"os" : [ "darwin", "linux" ]
- -

You can also blacklist instead of whitelist operating systems, +

"os" : [ "darwin", "linux" ]
+

You can also blacklist instead of whitelist operating systems, just prepend the blacklisted os with a '!':

- -
"os" : [ "!win32" ]
- -

The host operating system is determined by process.platform

- +
"os" : [ "!win32" ]
+

The host operating system is determined by process.platform

It is allowed to both blacklist, and whitelist, although there isn't any good reason to do this.

-

cpu

-

If your code only runs on certain cpu architectures, you can specify which ones.

- -
"cpu" : [ "x64", "ia32" ]
- -

Like the os option, you can also blacklist architectures:

- -
"cpu" : [ "!arm", "!mips" ]
- -

The host architecture is determined by process.arch

- -

preferGlobal

- +
"cpu" : [ "x64", "ia32" ]
+

Like the os option, you can also blacklist architectures:

+
"cpu" : [ "!arm", "!mips" ]
+

The host architecture is determined by process.arch

+

preferGlobal

If your package is primarily a command-line application that should be installed globally, then set this value to true to provide a warning if it is installed locally.

-

It doesn't actually prevent users from installing it locally, but it does help prevent some confusion if it doesn't work as expected.

-

private

-

If you set "private": true in your package.json, then npm will refuse to publish it.

- -

This is a way to prevent accidental publication of private repositories. -If you would like to ensure that a given package is only ever published -to a specific registry (for example, an internal registry), -then use the publishConfig hash described below -to override the registry config param at publish-time.

- -

publishConfig

- +

This is a way to prevent accidental publication of private repositories. If +you would like to ensure that a given package is only ever published to a +specific registry (for example, an internal registry), then use the +publishConfig dictionary described below to override the registry config +param at publish-time.

+

publishConfig

This is a set of config values that will be used at publish-time. It's especially handy if you want to set the tag or registry, so that you can ensure that a given package is not tagged with "latest" or published to the global public registry by default.

-

Any config values can be overridden, but of course only "tag" and "registry" probably matter for the purposes of publishing.

- -

See npm-config(7) to see the list of config options that can be +

See npm-config(7) to see the list of config options that can be overridden.

- -

DEFAULT VALUES

- +

DEFAULT VALUES

npm will default some values based on package contents.

- -
  • "scripts": {"start": "node server.js"}

    If there is a server.js file in the root of your package, then npm -will default the start command to node server.js.

  • "scripts":{"preinstall": "node-gyp rebuild"}

    If there is a binding.gyp file in the root of your package, npm will -default the preinstall command to compile using node-gyp.

  • "contributors": [...]

    If there is an AUTHORS file in the root of your package, npm will +

      +
    • "scripts": {"start": "node server.js"}

      +

      If there is a server.js file in the root of your package, then npm +will default the start command to node server.js.

      +
    • +
    • "scripts":{"preinstall": "node-gyp rebuild"}

      +

      If there is a binding.gyp file in the root of your package, npm will +default the preinstall command to compile using node-gyp.

      +
    • +
    • "contributors": [...]

      +

      If there is an AUTHORS file in the root of your package, npm will treat each line as a Name <email> (url) format, where email and url are optional. Lines which start with a # or are blank, will be -ignored.

    - -

    SEE ALSO

    +ignored.

    +
  • +
+

SEE ALSO

+ -
@@ -589,5 +485,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/files/npmrc.html nodejs-0.11.15/deps/npm/html/doc/files/npmrc.html --- nodejs-0.11.13/deps/npm/html/doc/files/npmrc.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/files/npmrc.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,57 +10,58 @@

npmrc

The npm config files

- -

DESCRIPTION

- +

DESCRIPTION

npm gets its config settings from the command line, environment variables, and npmrc files.

-

The npm config command can be used to update and edit the contents of the user and global npmrc files.

- -

For a list of available configuration options, see npm-config(7).

- -

FILES

- -

The three relevant files are:

- -
  • per-user config file (~/.npmrc)
  • global config file ($PREFIX/npmrc)
  • npm builtin config file (/path/to/npm/npmrc)
- +

For a list of available configuration options, see npm-config(7).

+

FILES

+

The four relevant files are:

+
    +
  • per-project config file (/path/to/my/project/.npmrc)
  • +
  • per-user config file (~/.npmrc)
  • +
  • global config file ($PREFIX/npmrc)
  • +
  • npm builtin config file (/path/to/npm/npmrc)
  • +

All npm config files are an ini-formatted list of key = value parameters. Environment variables can be replaced using ${VARIABLE_NAME}. For example:

- -
prefix = ${HOME}/.npm-packages
- -

Each of these files is loaded, and config options are resolved in +

prefix = ${HOME}/.npm-packages
+

Each of these files is loaded, and config options are resolved in priority order. For example, a setting in the userconfig file would override the setting in the globalconfig file.

- -

Per-user config file

- +

Per-project config file

+

When working locally in a project, a .npmrc file in the root of the +project (ie, a sibling of node_modules and package.json) will set +config values specific to this project.

+

Note that this only applies to the root of the project that you're +running npm in. It has no effect when your module is published. For +example, you can't publish a module that forces itself to install +globally, or in a different location.

+

Per-user config file

$HOME/.npmrc (or the userconfig param, if set in the environment or on the command line)

- -

Global config file

- +

Global config file

$PREFIX/etc/npmrc (or the globalconfig param, if set above): This file is an ini-file formatted list of key = value parameters. Environment variables can be replaced as above.

- -

Built-in config file

- +

Built-in config file

path/to/npm/itself/npmrc

-

This is an unchangeable "builtin" configuration file that npm keeps consistent across updates. Set fields in here using the ./configure script that comes with npm. This is primarily for distribution maintainers to override default configs in a standard and consistent manner.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -72,5 +73,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/files/package.json.html nodejs-0.11.15/deps/npm/html/doc/files/package.json.html --- nodejs-0.11.13/deps/npm/html/doc/files/package.json.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/files/package.json.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,257 +10,180 @@

package.json

Specifics of npm's package.json handling

- -

DESCRIPTION

- +

DESCRIPTION

This document is all you need to know about what's required in your package.json file. It must be actual JSON, not just a JavaScript object literal.

-

A lot of the behavior described in this document is affected by the config -settings described in npm-config(7).

- +settings described in npm-config(7).

name

-

The most important things in your package.json are the name and version fields. Those are actually required, and your package won't install without them. The name and version together form an identifier that is assumed to be completely unique. Changes to the package should come along with changes to the version.

-

The name is what your thing is called. Some tips:

- -
  • Don't put "js" or "node" in the name. It's assumed that it's js, since you're +
      +
    • Don't put "js" or "node" in the name. It's assumed that it's js, since you're writing a package.json file, and you can specify the engine using the "engines" -field. (See below.)
    • The name ends up being part of a URL, an argument on the command line, and a +field. (See below.)
    • +
    • The name ends up being part of a URL, an argument on the command line, and a folder name. Any name with non-url-safe characters will be rejected. -Also, it can't start with a dot or an underscore.
    • The name will probably be passed as an argument to require(), so it should -be something short, but also reasonably descriptive.
    • You may want to check the npm registry to see if there's something by that name -already, before you get too attached to it. http://registry.npmjs.org/
    - +Also, it can't start with a dot or an underscore.
  • +
  • The name will probably be passed as an argument to require(), so it should +be something short, but also reasonably descriptive.
  • +
  • You may want to check the npm registry to see if there's something by that name +already, before you get too attached to it. http://registry.npmjs.org/
  • +
+

A name can be optionally prefixed by a scope, e.g. @myorg/mypackage. See +npm-scope(7) for more detail.

version

-

The most important things in your package.json are the name and version fields. Those are actually required, and your package won't install without them. The name and version together form an identifier that is assumed to be completely unique. Changes to the package should come along with changes to the version.

-

Version must be parseable by node-semver, which is bundled with npm as a dependency. (npm install semver to use it yourself.)

- -

More on version numbers and ranges at semver(7).

- +

More on version numbers and ranges at semver(7).

description

-

Put a description in it. It's a string. This helps people discover your package, as it's listed in npm search.

-

keywords

-

Put keywords in it. It's an array of strings. This helps people discover your package as it's listed in npm search.

-

homepage

-

The url to the project homepage.

-

NOTE: This is not the same as "url". If you put a "url" field, then the registry will think it's a redirection to your package that has been published somewhere else, and spit at you.

-

Literally. Spit. I'm so not kidding.

-

bugs

-

The url to your project's issue tracker and / or the email address to which issues should be reported. These are helpful for people who encounter issues with your package.

-

It should look like this:

-
{ "url" : "http://github.com/owner/project/issues"
 , "email" : "project@hostname.com"
-}
- -

You can specify either one or both values. If you want to provide only a url, +} +

You can specify either one or both values. If you want to provide only a url, you can specify the value for "bugs" as a simple string instead of an object.

-

If a url is provided, it will be used by the npm bugs command.

-

license

-

You should specify a license for your package so that people know how they are permitted to use it, and any restrictions you're placing on it.

-

The simplest way, assuming you're using a common license such as BSD-3-Clause or MIT, is to just specify the standard SPDX ID of the license you're using, like this:

- -
{ "license" : "BSD-3-Clause" }
- -

You can check the full list of SPDX license IDs. +

{ "license" : "BSD-3-Clause" }
+

You can check the full list of SPDX license IDs. Ideally you should pick one that is OSI approved.

-

It's also a good idea to include a LICENSE file at the top level in your package.

-

people fields: author, contributors

-

The "author" is one person. "contributors" is an array of people. A "person" is an object with a "name" field and optionally "url" and "email", like this:

-
{ "name" : "Barney Rubble"
 , "email" : "b@rubble.com"
 , "url" : "http://barnyrubble.tumblr.com/"
-}
- -

Or you can shorten that all into a single string, and npm will parse it for you:

- -
"Barney Rubble <b@rubble.com> (http://barnyrubble.tumblr.com/)
- -

Both email and url are optional either way.

- +} +

Or you can shorten that all into a single string, and npm will parse it for you:

+
"Barney Rubble <b@rubble.com> (http://barnyrubble.tumblr.com/)
+

Both email and url are optional either way.

npm also sets a top-level "maintainers" field with your npm user info.

-

files

-

The "files" field is an array of files to include in your project. If you name a folder in the array, then it will also include the files inside that folder. (Unless they would be ignored by another rule.)

-

You can also provide a ".npmignore" file in the root of your package, which will keep files from being included, even if they would be picked up by the files array. The ".npmignore" file works just like a ".gitignore".

-

main

-

The main field is a module ID that is the primary entry point to your program. That is, if your package is named foo, and a user installs it, and then does require("foo"), then your main module's exports object will be returned.

-

This should be a module ID relative to the root of your package folder.

-

For most modules, it makes the most sense to have a main script and often not much else.

-

bin

-

A lot of packages have one or more executable files that they'd like to install into the PATH. npm makes this pretty easy (in fact, it uses this feature to install the "npm" executable.)

-

To use this, supply a bin field in your package.json which is a map of command name to local file name. On install, npm will symlink that file into prefix/bin for global installs, or ./node_modules/.bin/ for local installs.

-

For example, npm has this:

- -
{ "bin" : { "npm" : "./cli.js" } }
- -

So, when you install npm, it'll create a symlink from the cli.js script to +

{ "bin" : { "npm" : "./cli.js" } }
+

So, when you install npm, it'll create a symlink from the cli.js script to /usr/local/bin/npm.

-

If you have a single executable, and its name should be the name of the package, then you can just supply it as a string. For example:

-
{ "name": "my-program"
 , "version": "1.2.5"
-, "bin": "./path/to/program" }
- -

would be the same as this:

- +, "bin": "./path/to/program" } +

would be the same as this:

{ "name": "my-program"
 , "version": "1.2.5"
-, "bin" : { "my-program" : "./path/to/program" } }
- -

man

- +, "bin" : { "my-program" : "./path/to/program" } } +

man

Specify either a single file or an array of filenames to put in place for the man program to find.

-

If only a single file is provided, then it's installed such that it is the result from man <pkgname>, regardless of its actual filename. For example:

-
{ "name" : "foo"
 , "version" : "1.2.3"
 , "description" : "A packaged foo fooer for fooing foos"
 , "main" : "foo.js"
 , "man" : "./man/doc.1"
-}
- -

would link the ./man/doc.1 file in such that it is the target for man foo

- +} +

would link the ./man/doc.1 file in such that it is the target for man foo

If the filename doesn't start with the package name, then it's prefixed. So, this:

-
{ "name" : "foo"
 , "version" : "1.2.3"
 , "description" : "A packaged foo fooer for fooing foos"
 , "main" : "foo.js"
 , "man" : [ "./man/foo.1", "./man/bar.1" ]
-}
- -

will create files to do man foo and man foo-bar.

- +} +

will create files to do man foo and man foo-bar.

Man files must end with a number, and optionally a .gz suffix if they are compressed. The number dictates which man section the file is installed into.

-
{ "name" : "foo"
 , "version" : "1.2.3"
 , "description" : "A packaged foo fooer for fooing foos"
 , "main" : "foo.js"
 , "man" : [ "./man/foo.1", "./man/foo.2" ]
-}
- -

will create entries for man foo and man 2 foo

- +} +

will create entries for man foo and man 2 foo

directories

-

The CommonJS Packages spec details a few ways that you can indicate the structure of your package using a directories -hash. If you look at npm's package.json, +object. If you look at npm's package.json, you'll see that it has directories for doc, lib, and man.

-

In the future, this information may be used in other creative ways.

-

directories.lib

-

Tell people where the bulk of your library is. Nothing special is done with the lib folder in any way, but it's useful meta info.

-

directories.bin

- -

If you specify a "bin" directory, then all the files in that folder will -be used as the "bin" hash.

- -

If you have a "bin" hash already, then this has no effect.

- +

If you specify a bin directory, then all the files in that folder will +be added as children of the bin path.

+

If you have a bin path already, then this has no effect.

directories.man

-

A folder that is full of man pages. Sugar to generate a "man" array by walking the folder.

-

directories.doc

-

Put markdown files in here. Eventually, these will be displayed nicely, maybe, someday.

-

directories.example

-

Put example scripts in here. Someday, it might be exposed in some clever way.

-

repository

-

Specify the place where your code lives. This is helpful for people who want to contribute. If the git repo is on github, then the npm docs command will be able to find you.

-

Do it like this:

-
"repository" :
   { "type" : "git"
   , "url" : "http://github.com/npm/npm.git"
@@ -269,52 +192,54 @@
 "repository" :
   { "type" : "svn"
   , "url" : "http://v8.googlecode.com/svn/trunk/"
-  }
- -

The URL should be a publicly available (perhaps read-only) url that can be handed + } +

The URL should be a publicly available (perhaps read-only) url that can be handed directly to a VCS program without any modification. It should not be a url to an html project page that you put in your browser. It's for computers.

-

scripts

- -

The "scripts" member is an object hash of script commands that are run +

The "scripts" property is a dictionary containing script commands that are run at various times in the lifecycle of your package. The key is the lifecycle event, and the value is the command to run at that point.

- -

See npm-scripts(7) to find out more about writing package scripts.

- +

See npm-scripts(7) to find out more about writing package scripts.

config

- -

A "config" hash can be used to set configuration -parameters used in package scripts that persist across upgrades. For -instance, if a package had the following:

- +

A "config" object can be used to set configuration parameters used in package +scripts that persist across upgrades. For instance, if a package had the +following:

{ "name" : "foo"
-, "config" : { "port" : "8080" } }
- -

and then had a "start" command that then referenced the +, "config" : { "port" : "8080" } } +

and then had a "start" command that then referenced the npm_package_config_port environment variable, then the user could override that by doing npm config set foo:port 8001.

- -

See npm-config(7) and npm-scripts(7) for more on package +

See npm-config(7) and npm-scripts(7) for more on package configs.

-

dependencies

- -

Dependencies are specified with a simple hash of package name to +

Dependencies are specified in a simple object that maps a package name to a version range. The version range is a string which has one or more -space-separated descriptors. Dependencies can also be identified with -a tarball or git URL.

- +space-separated descriptors. Dependencies can also be identified with a +tarball or git URL.

Please do not put test harnesses or transpilers in your -dependencies hash. See devDependencies, below.

- -

See semver(7) for more details about specifying version ranges.

- -
  • version Must match version exactly
  • >version Must be greater than version
  • >=version etc
  • <version
  • <=version
  • ~version "Approximately equivalent to version" See semver(7)
  • ^version "Compatible with version" See semver(7)
  • 1.2.x 1.2.0, 1.2.1, etc., but not 1.3.0
  • http://... See 'URLs as Dependencies' below
  • * Matches any version
  • "" (just an empty string) Same as *
  • version1 - version2 Same as >=version1 <=version2.
  • range1 || range2 Passes if either range1 or range2 are satisfied.
  • git... See 'Git URLs as Dependencies' below
  • user/repo See 'GitHub URLs' below
- +dependencies object. See devDependencies, below.

+

See semver(7) for more details about specifying version ranges.

+
    +
  • version Must match version exactly
  • +
  • >version Must be greater than version
  • +
  • >=version etc
  • +
  • <version
  • +
  • <=version
  • +
  • ~version "Approximately equivalent to version" See semver(7)
  • +
  • ^version "Compatible with version" See semver(7)
  • +
  • 1.2.x 1.2.0, 1.2.1, etc., but not 1.3.0
  • +
  • http://... See 'URLs as Dependencies' below
  • +
  • * Matches any version
  • +
  • "" (just an empty string) Same as *
  • +
  • version1 - version2 Same as >=version1 <=version2.
  • +
  • range1 || range2 Passes if either range1 or range2 are satisfied.
  • +
  • git... See 'Git URLs as Dependencies' below
  • +
  • user/repo See 'GitHub URLs' below
  • +
  • tag A specific version tagged and published as tag See npm-tag(1)
  • +
  • path/path/path See Local Paths below
  • +

For example, these are all valid:

-
{ "dependencies" :
   { "foo" : "1.0.0 - 2.9999.9999"
   , "bar" : ">=1.0.2 <2.1.2"
@@ -326,60 +251,65 @@
   , "elf" : "~1.2.3"
   , "two" : "2.x"
   , "thr" : "3.3.x"
+  , "lat" : "latest"
+  , "dyl" : "file:../dyl"
   }
-}
- -

URLs as Dependencies

- +} +

URLs as Dependencies

You may specify a tarball URL in place of a version range.

-

This tarball will be downloaded and installed locally to your package at install time.

- -

Git URLs as Dependencies

- +

Git URLs as Dependencies

Git urls can be of the form:

-
git://github.com/user/project.git#commit-ish
 git+ssh://user@hostname:project.git#commit-ish
 git+ssh://user@hostname/project.git#commit-ish
 git+http://user@hostname/project/blah.git#commit-ish
-git+https://user@hostname/project/blah.git#commit-ish
- -

The commit-ish can be any tag, sha, or branch which can be supplied as +git+https://user@hostname/project/blah.git#commit-ish +

The commit-ish can be any tag, sha, or branch which can be supplied as an argument to git checkout. The default is master.

- -

GitHub URLs

- +

GitHub URLs

As of version 1.1.65, you can refer to GitHub urls as just "foo": "user/foo-project". For example:

-
{
   "name": "foo",
   "version": "0.0.0",
   "dependencies": {
     "express": "visionmedia/express"
   }
-}
- -

devDependencies

- +} +

Local Paths

+

As of version 2.0.0 you can provide a path to a local directory that contains a +package. Local paths can be saved using npm install --save, using any of +these forms:

+
../foo/bar
+~/foo/bar
+./foo/bar
+/foo/bar
+

in which case they will be normalized to a relative path and added to your +package.json. For example:

+
{
+  "name": "baz",
+  "dependencies": {
+    "bar": "file:../foo/bar"
+  }
+}
+

This feature is helpful for local offline development and creating +tests that require npm installing where you don't want to hit an +external server, but should not be used when publishing packages +to the public registry.

+

devDependencies

If someone is planning on downloading and using your module in their program, then they probably don't want or need to download and build the external test or documentation framework that you use.

- -

In this case, it's best to list these additional items in a -devDependencies hash.

- +

In this case, it's best to map these additional items in a devDependencies +object.

These things will be installed when doing npm link or npm install from the root of a package, and can be managed like any other npm -configuration param. See npm-config(7) for more on the topic.

- +configuration param. See npm-config(7) for more on the topic.

For build steps that are not platform-specific, such as compiling CoffeeScript or other languages to JavaScript, use the prepublish script to do this, and make the required package a devDependency.

-

For example:

-
{ "name": "ethopia-waza",
   "description": "a delightfully fruity coffee varietal",
   "version": "1.2.3",
@@ -390,64 +320,48 @@
     "prepublish": "coffee -o lib/ -c src/waza.coffee"
   },
   "main": "lib/waza.js"
-}
- -

The prepublish script will be run before publishing, so that users +} +

The prepublish script will be run before publishing, so that users can consume the functionality without requiring them to compile it themselves. In dev mode (ie, locally running npm install), it'll run this script as well, so that you can test it easily.

- -

peerDependencies

- +

peerDependencies

In some cases, you want to express the compatibility of your package with an host tool or library, while not necessarily doing a require of this host. This is usually refered to as a plugin. Notably, your module may be exposing a specific interface, expected and specified by the host documentation.

-

For example:

-
{
   "name": "tea-latte",
   "version": "1.3.5"
   "peerDependencies": {
     "tea": "2.x"
   }
-}
- -

This ensures your package tea-latte can be installed along with the second +} +

This ensures your package tea-latte can be installed along with the second major version of the host package tea only. The host package is automatically installed if needed. npm install tea-latte could possibly yield the following dependency graph:

-
├── tea-latte@1.3.5
-└── tea@2.2.0
- -

Trying to install another plugin with a conflicting requirement will cause an +└── tea@2.2.0 +

Trying to install another plugin with a conflicting requirement will cause an error. For this reason, make sure your plugin requirement is as broad as possible, and not to lock it down to specific patch versions.

-

Assuming the host complies with semver, only changes in the host package's major version will break your plugin. Thus, if you've worked with every 1.x version of the host package, use "^1.0" or "1.x" to express this. If you depend on features introduced in 1.5.2, use ">= 1.5.2 < 2".

- -

bundledDependencies

- +

bundledDependencies

Array of package names that will be bundled when publishing the package.

-

If this is spelled "bundleDependencies", then that is also honorable.

- -

optionalDependencies

- -

If a dependency can be used, but you would like npm to proceed if it -cannot be found or fails to install, then you may put it in the -optionalDependencies hash. This is a map of package name to version -or url, just like the dependencies hash. The difference is that -failure is tolerated.

- +

optionalDependencies

+

If a dependency can be used, but you would like npm to proceed if it cannot be +found or fails to install, then you may put it in the optionalDependencies +object. This is a map of package name to version or url, just like the +dependencies object. The difference is that build failures do not cause +installation to fail.

It is still your program's responsibility to handle the lack of the dependency. For example, something like this:

-
try {
   var foo = require('foo')
   var fooVersion = require('foo/package.json').version
@@ -462,122 +376,104 @@
 
 if (foo) {
   foo.doFooThings()
-}
- -

Entries in optionalDependencies will override entries of the same name in +} +

Entries in optionalDependencies will override entries of the same name in dependencies, so it's usually best to only put in one place.

-

engines

-

You can specify the version of node that your stuff works on:

- -
{ "engines" : { "node" : ">=0.10.3 <0.12" } }
- -

And, like with dependencies, if you don't specify the version (or if you +

{ "engines" : { "node" : ">=0.10.3 <0.12" } }
+

And, like with dependencies, if you don't specify the version (or if you specify "*" as the version), then any version of node will do.

-

If you specify an "engines" field, then npm will require that "node" be somewhere on that list. If "engines" is omitted, then npm will just assume that it works on node.

-

You can also use the "engines" field to specify which versions of npm are capable of properly installing your program. For example:

- -
{ "engines" : { "npm" : "~1.0.20" } }
- -

Note that, unless the user has set the engine-strict config flag, this +

{ "engines" : { "npm" : "~1.0.20" } }
+

Note that, unless the user has set the engine-strict config flag, this field is advisory only.

- -

engineStrict

- +

engineStrict

If you are sure that your module will definitely not run properly on -versions of Node/npm other than those specified in the engines hash, +versions of Node/npm other than those specified in the engines object, then you can set "engineStrict": true in your package.json file. This will override the user's engine-strict config setting.

-

Please do not do this unless you are really very very sure. If your -engines hash is something overly restrictive, you can quite easily and +engines object is something overly restrictive, you can quite easily and inadvertently lock yourself into obscurity and prevent your users from updating to new versions of Node. Consider this choice carefully. If people abuse it, it will be removed in a future version of npm.

-

os

-

You can specify which operating systems your module will run on:

- -
"os" : [ "darwin", "linux" ]
- -

You can also blacklist instead of whitelist operating systems, +

"os" : [ "darwin", "linux" ]
+

You can also blacklist instead of whitelist operating systems, just prepend the blacklisted os with a '!':

- -
"os" : [ "!win32" ]
- -

The host operating system is determined by process.platform

- +
"os" : [ "!win32" ]
+

The host operating system is determined by process.platform

It is allowed to both blacklist, and whitelist, although there isn't any good reason to do this.

-

cpu

-

If your code only runs on certain cpu architectures, you can specify which ones.

- -
"cpu" : [ "x64", "ia32" ]
- -

Like the os option, you can also blacklist architectures:

- -
"cpu" : [ "!arm", "!mips" ]
- -

The host architecture is determined by process.arch

- -

preferGlobal

- +
"cpu" : [ "x64", "ia32" ]
+

Like the os option, you can also blacklist architectures:

+
"cpu" : [ "!arm", "!mips" ]
+

The host architecture is determined by process.arch

+

preferGlobal

If your package is primarily a command-line application that should be installed globally, then set this value to true to provide a warning if it is installed locally.

-

It doesn't actually prevent users from installing it locally, but it does help prevent some confusion if it doesn't work as expected.

-

private

-

If you set "private": true in your package.json, then npm will refuse to publish it.

- -

This is a way to prevent accidental publication of private repositories. -If you would like to ensure that a given package is only ever published -to a specific registry (for example, an internal registry), -then use the publishConfig hash described below -to override the registry config param at publish-time.

- -

publishConfig

- +

This is a way to prevent accidental publication of private repositories. If +you would like to ensure that a given package is only ever published to a +specific registry (for example, an internal registry), then use the +publishConfig dictionary described below to override the registry config +param at publish-time.

+

publishConfig

This is a set of config values that will be used at publish-time. It's especially handy if you want to set the tag or registry, so that you can ensure that a given package is not tagged with "latest" or published to the global public registry by default.

-

Any config values can be overridden, but of course only "tag" and "registry" probably matter for the purposes of publishing.

- -

See npm-config(7) to see the list of config options that can be +

See npm-config(7) to see the list of config options that can be overridden.

- -

DEFAULT VALUES

- +

DEFAULT VALUES

npm will default some values based on package contents.

- -
  • "scripts": {"start": "node server.js"}

    If there is a server.js file in the root of your package, then npm -will default the start command to node server.js.

  • "scripts":{"preinstall": "node-gyp rebuild"}

    If there is a binding.gyp file in the root of your package, npm will -default the preinstall command to compile using node-gyp.

  • "contributors": [...]

    If there is an AUTHORS file in the root of your package, npm will +

      +
    • "scripts": {"start": "node server.js"}

      +

      If there is a server.js file in the root of your package, then npm +will default the start command to node server.js.

      +
    • +
    • "scripts":{"preinstall": "node-gyp rebuild"}

      +

      If there is a binding.gyp file in the root of your package, npm will +default the preinstall command to compile using node-gyp.

      +
    • +
    • "contributors": [...]

      +

      If there is an AUTHORS file in the root of your package, npm will treat each line as a Name <email> (url) format, where email and url are optional. Lines which start with a # or are blank, will be -ignored.

    - -

    SEE ALSO

    +ignored.

    +
  • +
+

SEE ALSO

+ -
@@ -589,5 +485,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/index.html nodejs-0.11.15/deps/npm/html/doc/index.html --- nodejs-0.11.13/deps/npm/html/doc/index.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/index.html 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ - npm-index + index @@ -10,414 +10,215 @@

npm-index

Index of all npm documentation

- -

README

- +

README

node package manager

- -

Command Line Documentation

- -

npm(1)

- +

Command Line Documentation

+

Using npm on the command line

+

npm(1)

node package manager

- -

npm-adduser(1)

- +

npm-adduser(1)

Add a registry user account

- -

npm-bin(1)

- +

npm-bin(1)

Display npm bin folder

- -

npm-bugs(1)

- +

npm-bugs(1)

Bugs for a package in a web browser maybe

- -

npm-build(1)

- +

npm-build(1)

Build a package

- -

npm-bundle(1)

- +

npm-bundle(1)

REMOVED

- -

npm-cache(1)

- +

npm-cache(1)

Manipulates packages cache

- -

npm-completion(1)

- +

npm-completion(1)

Tab Completion for npm

- -

npm-config(1)

- +

npm-config(1)

Manage the npm configuration files

- -

npm-dedupe(1)

- +

npm-dedupe(1)

Reduce duplication

- -

npm-deprecate(1)

- +

npm-deprecate(1)

Deprecate a version of a package

- -

npm-docs(1)

- +

npm-docs(1)

Docs for a package in a web browser maybe

- -

npm-edit(1)

- +

npm-edit(1)

Edit an installed package

- -

npm-explore(1)

- +

npm-explore(1)

Browse an installed package

- -

npm-help-search(1)

- +

npm-help-search(1)

Search npm help documentation

- -

npm-help(1)

- +

npm-help(1)

Get help on npm

- -

npm-init(1)

- +

npm-init(1)

Interactively create a package.json file

- -

npm-install(1)

- +

npm-install(1)

Install a package

- - - +

Symlink a package folder

- -

npm-ls(1)

- +

npm-ls(1)

List installed packages

- -

npm-outdated(1)

- +

npm-outdated(1)

Check for outdated packages

- -

npm-owner(1)

- +

npm-owner(1)

Manage package owners

- -

npm-pack(1)

- +

npm-pack(1)

Create a tarball from a package

- -

npm-prefix(1)

- +

npm-prefix(1)

Display prefix

- -

npm-prune(1)

- +

npm-prune(1)

Remove extraneous packages

- -

npm-publish(1)

- +

npm-publish(1)

Publish a package

- -

npm-rebuild(1)

- +

npm-rebuild(1)

Rebuild a package

- -

npm-repo(1)

- +

npm-repo(1)

Open package repository page in the browser

- -

npm-restart(1)

- +

npm-restart(1)

Start a package

- -

npm-rm(1)

- +

npm-rm(1)

Remove a package

- -

npm-root(1)

- +

npm-root(1)

Display npm root

- -

npm-run-script(1)

- +

npm-run-script(1)

Run arbitrary package scripts

- -

npm-search(1)

- +

npm-search(1)

Search for packages

- -

npm-shrinkwrap(1)

- +

npm-shrinkwrap(1)

Lock down dependency versions

- -

npm-star(1)

- +

npm-star(1)

Mark your favorite packages

- -

npm-stars(1)

- +

npm-stars(1)

View packages marked as favorites

- -

npm-start(1)

- +

npm-start(1)

Start a package

- -

npm-stop(1)

- +

npm-stop(1)

Stop a package

- -

npm-submodule(1)

- -

Add a package as a git submodule

- -

npm-tag(1)

- +

npm-tag(1)

Tag a published version

- -

npm-test(1)

- +

npm-test(1)

Test a package

- -

npm-uninstall(1)

- +

npm-uninstall(1)

Remove a package

- -

npm-unpublish(1)

- +

npm-unpublish(1)

Remove a package from the registry

- -

npm-update(1)

- +

npm-update(1)

Update a package

- -

npm-version(1)

- +

npm-version(1)

Bump a package version

- -

npm-view(1)

- +

npm-view(1)

View registry info

- -

npm-whoami(1)

- +

npm-whoami(1)

Display npm username

- -

API Documentation

- -

npm(3)

- +

API Documentation

+

Using npm in your Node programs

+

npm(3)

node package manager

- -

npm-bin(3)

- +

npm-bin(3)

Display npm bin folder

- -

npm-bugs(3)

- +

npm-bugs(3)

Bugs for a package in a web browser maybe

- -

npm-commands(3)

- +

npm-cache(3)

+

manage the npm cache programmatically

+

npm-commands(3)

npm commands

- -

npm-config(3)

- +

npm-config(3)

Manage the npm configuration files

- -

npm-deprecate(3)

- +

npm-deprecate(3)

Deprecate a version of a package

- -

npm-docs(3)

- +

npm-docs(3)

Docs for a package in a web browser maybe

- -

npm-edit(3)

- +

npm-edit(3)

Edit an installed package

- -

npm-explore(3)

- +

npm-explore(3)

Browse an installed package

- -

npm-help-search(3)

- +

npm-help-search(3)

Search the help pages

- -

npm-init(3)

- +

npm-init(3)

Interactively create a package.json file

- -

npm-install(3)

- +

npm-install(3)

install a package programmatically

- - - +

Symlink a package folder

- -

npm-load(3)

- +

npm-load(3)

Load config settings

- -

npm-ls(3)

- +

npm-ls(3)

List installed packages

- -

npm-outdated(3)

- +

npm-outdated(3)

Check for outdated packages

- -

npm-owner(3)

- +

npm-owner(3)

Manage package owners

- -

npm-pack(3)

- +

npm-pack(3)

Create a tarball from a package

- -

npm-prefix(3)

- +

npm-prefix(3)

Display prefix

- -

npm-prune(3)

- +

npm-prune(3)

Remove extraneous packages

- -

npm-publish(3)

- +

npm-publish(3)

Publish a package

- -

npm-rebuild(3)

- +

npm-rebuild(3)

Rebuild a package

- -

npm-repo(3)

- +

npm-repo(3)

Open package repository page in the browser

- -

npm-restart(3)

- +

npm-restart(3)

Start a package

- -

npm-root(3)

- +

npm-root(3)

Display npm root

- -

npm-run-script(3)

- +

npm-run-script(3)

Run arbitrary package scripts

- -

npm-search(3)

- +

npm-search(3)

Search for packages

- -

npm-shrinkwrap(3)

- +

npm-shrinkwrap(3)

programmatically generate package shrinkwrap file

- -

npm-start(3)

- +

npm-start(3)

Start a package

- -

npm-stop(3)

- +

npm-stop(3)

Stop a package

- -

npm-submodule(3)

- -

Add a package as a git submodule

- -

npm-tag(3)

- +

npm-tag(3)

Tag a published version

- -

npm-test(3)

- +

npm-test(3)

Test a package

- -

npm-uninstall(3)

- +

npm-uninstall(3)

uninstall a package programmatically

- -

npm-unpublish(3)

- +

npm-unpublish(3)

Remove a package from the registry

- -

npm-update(3)

- +

npm-update(3)

Update a package

- -

npm-version(3)

- +

npm-version(3)

Bump a package version

- -

npm-view(3)

- +

npm-view(3)

View registry info

- -

npm-whoami(3)

- +

npm-whoami(3)

Display npm username

- -

Files

- -

npm-folders(5)

- +

Files

+

File system structures npm uses

+

npm-folders(5)

Folder Structures Used by npm

- -

npmrc(5)

- +

npmrc(5)

The npm config files

- -

package.json(5)

- +

package.json(5)

Specifics of npm's package.json handling

- -

Misc

- -

npm-coding-style(7)

- +

Misc

+

Various other bits and bobs

+

npm-coding-style(7)

npm's "funny" coding style

- -

npm-config(7)

- +

npm-config(7)

More than you probably want to know about npm configuration

- -

npm-developers(7)

- +

npm-developers(7)

Developer Guide

- -

npm-disputes(7)

- +

npm-disputes(7)

Handling Module Name Disputes

- -

npm-faq(7)

- +

npm-faq(7)

Frequently Asked Questions

- -

npm-index(7)

- +

npm-index(7)

Index of all npm documentation

- -

npm-registry(7)

- +

npm-registry(7)

The JavaScript Package Registry

- -

npm-scripts(7)

- +

npm-scope(7)

+

Scoped packages

+

npm-scripts(7)

How npm handles the "scripts" field

- -

removing-npm(7)

- +

removing-npm(7)

Cleaning the Slate

- -

semver(7)

- +

semver(7)

The semantic versioner for npm

+
@@ -429,5 +230,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/index.html nodejs-0.11.15/deps/npm/html/doc/misc/index.html --- nodejs-0.11.13/deps/npm/html/doc/misc/index.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/index.html 1970-01-01 00:00:00.000000000 +0000 @@ -1,438 +0,0 @@ - - - index - - - - -
-

npm-index

Index of all npm documentation

- -

README

- -

node package manager

- -

Command Line Documentation

- -

npm(1)

- -

node package manager

- -

npm-adduser(1)

- -

Add a registry user account

- -

npm-bin(1)

- -

Display npm bin folder

- -

npm-bugs(1)

- -

Bugs for a package in a web browser maybe

- -

npm-build(1)

- -

Build a package

- -

npm-bundle(1)

- -

REMOVED

- -

npm-cache(1)

- -

Manipulates packages cache

- -

npm-completion(1)

- -

Tab Completion for npm

- -

npm-config(1)

- -

Manage the npm configuration files

- -

npm-dedupe(1)

- -

Reduce duplication

- -

npm-deprecate(1)

- -

Deprecate a version of a package

- -

npm-docs(1)

- -

Docs for a package in a web browser maybe

- -

npm-edit(1)

- -

Edit an installed package

- -

npm-explore(1)

- -

Browse an installed package

- -

npm-help-search(1)

- -

Search npm help documentation

- -

npm-help(1)

- -

Get help on npm

- -

npm-init(1)

- -

Interactively create a package.json file

- -

npm-install(1)

- -

Install a package

- - - -

Symlink a package folder

- -

npm-ls(1)

- -

List installed packages

- -

npm-outdated(1)

- -

Check for outdated packages

- -

npm-owner(1)

- -

Manage package owners

- -

npm-pack(1)

- -

Create a tarball from a package

- -

npm-prefix(1)

- -

Display prefix

- -

npm-prune(1)

- -

Remove extraneous packages

- -

npm-publish(1)

- -

Publish a package

- -

npm-rebuild(1)

- -

Rebuild a package

- -

npm-restart(1)

- -

Start a package

- -

npm-rm(1)

- -

Remove a package

- -

npm-root(1)

- -

Display npm root

- -

npm-run-script(1)

- -

Run arbitrary package scripts

- -

npm-search(1)

- -

Search for packages

- -

npm-shrinkwrap(1)

- -

Lock down dependency versions

- -

npm-star(1)

- -

Mark your favorite packages

- -

npm-stars(1)

- -

View packages marked as favorites

- -

npm-start(1)

- -

Start a package

- -

npm-stop(1)

- -

Stop a package

- -

npm-submodule(1)

- -

Add a package as a git submodule

- -

npm-tag(1)

- -

Tag a published version

- -

npm-test(1)

- -

Test a package

- -

npm-uninstall(1)

- -

Remove a package

- -

npm-unpublish(1)

- -

Remove a package from the registry

- -

npm-update(1)

- -

Update a package

- -

npm-version(1)

- -

Bump a package version

- -

npm-view(1)

- -

View registry info

- -

npm-whoami(1)

- -

Display npm username

- -

API Documentation

- -

npm(3)

- -

node package manager

- -

npm-bin(3)

- -

Display npm bin folder

- -

npm-bugs(3)

- -

Bugs for a package in a web browser maybe

- -

npm-commands(3)

- -

npm commands

- -

npm-config(3)

- -

Manage the npm configuration files

- -

npm-deprecate(3)

- -

Deprecate a version of a package

- -

npm-docs(3)

- -

Docs for a package in a web browser maybe

- -

npm-edit(3)

- -

Edit an installed package

- -

npm-explore(3)

- -

Browse an installed package

- -

npm-help-search(3)

- -

Search the help pages

- -

npm-init(3)

- -

Interactively create a package.json file

- -

npm-install(3)

- -

install a package programmatically

- - - -

Symlink a package folder

- -

npm-load(3)

- -

Load config settings

- -

npm-ls(3)

- -

List installed packages

- -

npm-outdated(3)

- -

Check for outdated packages

- -

npm-owner(3)

- -

Manage package owners

- -

npm-pack(3)

- -

Create a tarball from a package

- -

npm-prefix(3)

- -

Display prefix

- -

npm-prune(3)

- -

Remove extraneous packages

- -

npm-publish(3)

- -

Publish a package

- -

npm-rebuild(3)

- -

Rebuild a package

- -

npm-restart(3)

- -

Start a package

- -

npm-root(3)

- -

Display npm root

- -

npm-run-script(3)

- -

Run arbitrary package scripts

- -

npm-search(3)

- -

Search for packages

- -

npm-shrinkwrap(3)

- -

programmatically generate package shrinkwrap file

- -

npm-start(3)

- -

Start a package

- -

npm-stop(3)

- -

Stop a package

- -

npm-submodule(3)

- -

Add a package as a git submodule

- -

npm-tag(3)

- -

Tag a published version

- -

npm-test(3)

- -

Test a package

- -

npm-uninstall(3)

- -

uninstall a package programmatically

- -

npm-unpublish(3)

- -

Remove a package from the registry

- -

npm-update(3)

- -

Update a package

- -

npm-version(3)

- -

Bump a package version

- -

npm-view(3)

- -

View registry info

- -

npm-whoami(3)

- -

Display npm username

- -

Files

- -

npm-folders(5)

- -

Folder Structures Used by npm

- -

npmrc(5)

- -

The npm config files

- -

package.json(5)

- -

Specifics of npm's package.json handling

- -

Misc

- -

npm-coding-style(7)

- -

npm's "funny" coding style

- -

npm-config(7)

- -

More than you probably want to know about npm configuration

- -

npm-developers(7)

- -

Developer Guide

- -

npm-disputes(7)

- -

Handling Module Name Disputes

- -

npm-faq(7)

- -

Frequently Asked Questions

- -

npm-registry(7)

- -

The JavaScript Package Registry

- -

npm-scripts(7)

- -

How npm handles the "scripts" field

- -

removing-npm(7)

- -

Cleaning the Slate

- -

semver(7)

- -

The semantic versioner for npm

-
- - diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/npm-coding-style.html nodejs-0.11.15/deps/npm/html/doc/misc/npm-coding-style.html --- nodejs-0.11.13/deps/npm/html/doc/misc/npm-coding-style.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/npm-coding-style.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,71 +10,51 @@

npm-coding-style

npm's "funny" coding style

- -

DESCRIPTION

- +

DESCRIPTION

npm's coding style is a bit unconventional. It is not different for difference's sake, but rather a carefully crafted style that is designed to reduce visual clutter and make bugs more apparent.

-

If you want to contribute to npm (which is very encouraged), you should make your code conform to npm's style.

-

Note: this concerns npm's code not the specific packages at npmjs.org

- -

Line Length

- +

Line Length

Keep lines shorter than 80 characters. It's better for lines to be too short than to be too long. Break up long lists, objects, and other statements onto multiple lines.

- -

Indentation

- +

Indentation

Two-spaces. Tabs are better, but they look like hell in web browsers (and on github), and node uses 2 spaces, so that's that.

-

Configure your editor appropriately.

- -

Curly braces

- +

Curly braces

Curly braces belong on the same line as the thing that necessitates them.

-

Bad:

-
function ()
-{
- -

Good:

- -
function () {
- -

If a block needs to wrap to the next line, use a curly brace. Don't +{ +

Good:

+
function () {
+

If a block needs to wrap to the next line, use a curly brace. Don't use it if it doesn't.

-

Bad:

-
if (foo) { bar() }
 while (foo)
-  bar()
- -

Good:

- + bar() +

Good:

if (foo) bar()
 while (foo) {
   bar()
-}
- -

Semicolons

- +} +

Semicolons

Don't use them except in four situations:

- -
  • for (;;) loops. They're actually required.
  • null loops like: while (something) ; (But you'd better have a good -reason for doing that.)
  • case "foo": doSomething(); break
  • In front of a leading ( or [ at the start of the line. +
      +
    • for (;;) loops. They're actually required.
    • +
    • null loops like: while (something) ; (But you'd better have a good +reason for doing that.)
    • +
    • case "foo": doSomething(); break
    • +
    • In front of a leading ( or [ at the start of the line. This prevents the expression from being interpreted -as a function call or property access, respectively.
    - +as a function call or property access, respectively.
  • +

Some examples of good semicolon usage:

-
;(x || y).doSomething()
 ;[a, b, c].forEach(doSomething)
 for (var i = 0; i < 10; i ++) {
@@ -84,18 +64,14 @@
     default: throw new Error("unknown state")
   }
   end()
-}
- -

Note that starting lines with - and + also should be prefixed +} +

Note that starting lines with - and + also should be prefixed with a semicolon, but this is much less common.

- -

Comma First

- +

Comma First

If there is a list of things separated by commas, and it wraps across multiple lines, put the comma at the start of the next line, directly below the token that starts the list. Put the final token in the list on a line by itself. For example:

-
var magicWords = [ "abracadabra"
                  , "gesundheit"
                  , "ventrilo"
@@ -106,84 +82,60 @@
   , a = 1
   , b = "abc"
   , etc
-  , somethingElse
- -

Whitespace

- + , somethingElse +

Whitespace

Put a single space in front of ( for anything other than a function call. Also use a single space wherever it makes things more readable.

-

Don't leave trailing whitespace at the end of lines. Don't indent empty lines. Don't use more spaces than are helpful.

- -

Functions

- +

Functions

Use named functions. They make stack traces a lot easier to read.

- -

Callbacks, Sync/async Style

- +

Callbacks, Sync/async Style

Use the asynchronous/non-blocking versions of things as much as possible. It might make more sense for npm to use the synchronous fs APIs, but this way, the fs and http and child process stuff all uses the same callback-passing methodology.

-

The callback should always be the last argument in the list. Its first argument is the Error or null.

-

Be very careful never to ever ever throw anything. It's worse than useless. Just send the error message back as the first argument to the callback.

- -

Errors

- +

Errors

Always create a new Error object with your message. Don't just return a string message to the callback. Stack traces are handy.

- -

Logging

- +

Logging

Logging is done using the npmlog utility.

-

Please clean up logs when they are no longer helpful. In particular, logging the same object over and over again is not helpful. Logs should report what's happening so that it's easier to track down where a fault occurs.

- -

Use appropriate log levels. See npm-config(7) and search for +

Use appropriate log levels. See npm-config(7) and search for "loglevel".

- -

Case, naming, etc.

- +

Case, naming, etc.

Use lowerCamelCase for multiword identifiers when they refer to objects, -functions, methods, members, or anything not specified in this section.

- +functions, methods, properties, or anything not specified in this section.

Use UpperCamelCase for class names (things that you'd pass to "new").

-

Use all-lower-hyphen-css-case for multiword filenames and config keys.

-

Use named functions. They make stack traces easier to follow.

-

Use CAPS_SNAKE_CASE for constants, things that should never change and are rarely used.

-

Use a single uppercase letter for function names where the function would normally be anonymous, but needs to call itself recursively. It makes it clear that it's a "throwaway" function.

-

null, undefined, false, 0

-

Boolean variables and functions should always be either true or false. Don't set it to 0 unless it's supposed to be a number.

-

When something is intentionally missing or removed, set it to null.

-

Don't set things to undefined. Reserve that value to mean "not yet set to anything."

-

Boolean objects are verboten.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -195,5 +147,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/npm-config.html nodejs-0.11.15/deps/npm/html/doc/misc/npm-config.html --- nodejs-0.11.13/deps/npm/html/doc/misc/npm-config.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/npm-config.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,20 +10,14 @@

npm-config

More than you probably want to know about npm configuration

- -

DESCRIPTION

- +

DESCRIPTION

npm gets its configuration values from 6 sources, in this priority:

- -

Command Line Flags

- +

Command Line Flags

Putting --foo bar on the command line sets the foo configuration parameter to "bar". A -- argument tells the cli parser to stop reading flags. A --flag parameter that is at the end of the command will be given the value of true.

- -

Environment Variables

- +

Environment Variables

Any environment variables that start with npm_config_ will be interpreted as a configuration parameter. For example, putting npm_config_foo=bar in your environment will set the foo @@ -31,707 +25,729 @@ are not given a value will be given the value of true. Config values are case-insensitive, so NPM_CONFIG_FOO=bar will work the same.

- -

npmrc Files

- -

The three relevant files are:

- -
  • per-user config file (~/.npmrc)
  • global config file ($PREFIX/npmrc)
  • npm builtin config file (/path/to/npm/npmrc)
- -

See npmrc(5) for more details.

- -

Default Configs

- +

npmrc Files

+

The four relevant files are:

+
    +
  • per-project config file (/path/to/my/project/.npmrc)
  • +
  • per-user config file (~/.npmrc)
  • +
  • global config file ($PREFIX/npmrc)
  • +
  • npm builtin config file (/path/to/npm/npmrc)
  • +
+

See npmrc(5) for more details.

+

Default Configs

A set of configuration parameters that are internal to npm, and are defaults if nothing else is specified.

- -

Shorthands and Other CLI Niceties

- +

Shorthands and Other CLI Niceties

The following shorthands are parsed on the command-line:

- -
  • -v: --version
  • -h, -?, --help, -H: --usage
  • -s, --silent: --loglevel silent
  • -q, --quiet: --loglevel warn
  • -d: --loglevel info
  • -dd, --verbose: --loglevel verbose
  • -ddd: --loglevel silly
  • -g: --global
  • -l: --long
  • -m: --message
  • -p, --porcelain: --parseable
  • -reg: --registry
  • -v: --version
  • -f: --force
  • -desc: --description
  • -S: --save
  • -D: --save-dev
  • -O: --save-optional
  • -B: --save-bundle
  • -E: --save-exact
  • -y: --yes
  • -n: --yes false
  • ll and la commands: ls --long
- +
    +
  • -v: --version
  • +
  • -h, -?, --help, -H: --usage
  • +
  • -s, --silent: --loglevel silent
  • +
  • -q, --quiet: --loglevel warn
  • +
  • -d: --loglevel info
  • +
  • -dd, --verbose: --loglevel verbose
  • +
  • -ddd: --loglevel silly
  • +
  • -g: --global
  • +
  • -C: --prefix
  • +
  • -l: --long
  • +
  • -m: --message
  • +
  • -p, --porcelain: --parseable
  • +
  • -reg: --registry
  • +
  • -v: --version
  • +
  • -f: --force
  • +
  • -desc: --description
  • +
  • -S: --save
  • +
  • -D: --save-dev
  • +
  • -O: --save-optional
  • +
  • -B: --save-bundle
  • +
  • -E: --save-exact
  • +
  • -y: --yes
  • +
  • -n: --yes false
  • +
  • ll and la commands: ls --long
  • +

If the specified configuration param resolves unambiguously to a known configuration parameter, then it is expanded to that configuration parameter. For example:

-
npm ls --par
 # same as:
-npm ls --parseable
- -

If multiple single-character shorthands are strung together, and the +npm ls --parseable +

If multiple single-character shorthands are strung together, and the resulting combination is unambiguously not some other configuration param, then it is expanded to its various component pieces. For example:

-
npm ls -gpld
 # same as:
-npm ls --global --parseable --long --loglevel info
- -

Per-Package Config Settings

- -

When running scripts (see npm-scripts(7)) the package.json "config" +npm ls --global --parseable --long --loglevel info +

Per-Package Config Settings

+

When running scripts (see npm-scripts(7)) the package.json "config" keys are overwritten in the environment if there is a config param of <name>[@<version>]:<key>. For example, if the package.json has this:

-
{ "name" : "foo"
 , "config" : { "port" : "8080" }
-, "scripts" : { "start" : "node server.js" } }
- -

and the server.js is this:

- -
http.createServer(...).listen(process.env.npm_package_config_port)
- -

then the user could change the behavior by doing:

- -
npm config set foo:port 80
- -

See package.json(5) for more information.

- -

Config Settings

- +, "scripts" : { "start" : "node server.js" } } +

and the server.js is this:

+
http.createServer(...).listen(process.env.npm_package_config_port)
+

then the user could change the behavior by doing:

+
npm config set foo:port 80
+

See package.json(5) for more information.

+

Config Settings

always-auth

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Force npm to always require authentication when accessing the registry, even for GET requests.

- - -
  • Default: true
  • Type: Boolean
- +
    +
  • Default: true
  • +
  • Type: Boolean
  • +

Tells npm to create symlinks (or .cmd shims on Windows) for package executables.

-

Set to false to have it not do this. This can be used to work around the fact that some file systems don't support symlinks, even on ostensibly Unix systems.

-

browser

- -
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • Type: String
- +
    +
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • +
  • Type: String
  • +

The browser that is called by the npm docs command to open websites.

-

ca

- -
  • Default: The npm CA certificate
  • Type: String or null
- +
    +
  • Default: The npm CA certificate
  • +
  • Type: String or null
  • +

The Certificate Authority signing certificate that is trusted for SSL connections to the registry.

-

Set to null to only allow "known" registrars, or to a specific CA cert to trust only that specific signing authority.

-

See also the strict-ssl config.

- +

cafile

+
    +
  • Default: null
  • +
  • Type: path
  • +
+

A path to a file containing one or multiple Certificate Authority signing +certificates. Similar to the ca setting, but allows for multiple CA's, as +well as for the CA information to be stored in a file on disk.

cache

- -
  • Default: Windows: %AppData%\npm-cache, Posix: ~/.npm
  • Type: path
- -

The location of npm's cache directory. See npm-cache(1)

- +
    +
  • Default: Windows: %AppData%\npm-cache, Posix: ~/.npm
  • +
  • Type: path
  • +
+

The location of npm's cache directory. See npm-cache(1)

cache-lock-stale

- -
  • Default: 60000 (1 minute)
  • Type: Number
- +
    +
  • Default: 60000 (1 minute)
  • +
  • Type: Number
  • +

The number of ms before cache folder lockfiles are considered stale.

-

cache-lock-retries

- -
  • Default: 10
  • Type: Number
- +
    +
  • Default: 10
  • +
  • Type: Number
  • +

Number of times to retry to acquire a lock on cache folder lockfiles.

-

cache-lock-wait

- -
  • Default: 10000 (10 seconds)
  • Type: Number
- +
    +
  • Default: 10000 (10 seconds)
  • +
  • Type: Number
  • +

Number of ms to wait for cache lock files to expire.

-

cache-max

- -
  • Default: Infinity
  • Type: Number
- +
    +
  • Default: Infinity
  • +
  • Type: Number
  • +

The maximum time (in seconds) to keep items in the registry cache before re-checking against the registry.

-

Note that no purging is done unless the npm cache clean command is explicitly used, and that only GET requests use the cache.

-

cache-min

- -
  • Default: 10
  • Type: Number
- +
    +
  • Default: 10
  • +
  • Type: Number
  • +

The minimum time (in seconds) to keep items in the registry cache before re-checking against the registry.

-

Note that no purging is done unless the npm cache clean command is explicitly used, and that only GET requests use the cache.

-

cert

- -
  • Default: null
  • Type: String
- +
    +
  • Default: null
  • +
  • Type: String
  • +

A client certificate to pass when accessing the registry.

-

color

- -
  • Default: true on Posix, false on Windows
  • Type: Boolean or "always"
- +
    +
  • Default: true on Posix, false on Windows
  • +
  • Type: Boolean or "always"
  • +

If false, never shows colors. If "always" then always shows colors. If true, then only prints color codes for tty file descriptors.

-

depth

- -
  • Default: Infinity
  • Type: Number
- +
    +
  • Default: Infinity
  • +
  • Type: Number
  • +

The depth to go when recursing directories for npm ls and npm cache ls.

-

description

- -
  • Default: true
  • Type: Boolean
- +
    +
  • Default: true
  • +
  • Type: Boolean
  • +

Show the description in npm search

-

dev

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Install dev-dependencies along with packages.

-

Note that dev-dependencies are also installed if the npat flag is set.

-

editor

- -
  • Default: EDITOR environment variable if set, or "vi" on Posix, -or "notepad" on Windows.
  • Type: path
- +
    +
  • Default: EDITOR environment variable if set, or "vi" on Posix, +or "notepad" on Windows.
  • +
  • Type: path
  • +

The command to run for npm edit or npm config edit.

- -

email

- -

The email of the logged-in user.

- -

Set by the npm adduser command. Should not be set explicitly.

-

engine-strict

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

If set to true, then npm will stubbornly refuse to install (or even consider installing) any package that claims to not be compatible with the current Node.js version.

-

force

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Makes various commands more forceful.

- -
  • lifecycle script failure does not block progress.
  • publishing clobbers previously published versions.
  • skips cache when requesting from the registry.
  • prevents checks against clobbering non-npm files.
- +
    +
  • lifecycle script failure does not block progress.
  • +
  • publishing clobbers previously published versions.
  • +
  • skips cache when requesting from the registry.
  • +
  • prevents checks against clobbering non-npm files.
  • +

fetch-retries

- -
  • Default: 2
  • Type: Number
- +
    +
  • Default: 2
  • +
  • Type: Number
  • +

The "retries" config for the retry module to use when fetching packages from the registry.

-

fetch-retry-factor

- -
  • Default: 10
  • Type: Number
- +
    +
  • Default: 10
  • +
  • Type: Number
  • +

The "factor" config for the retry module to use when fetching packages.

-

fetch-retry-mintimeout

- -
  • Default: 10000 (10 seconds)
  • Type: Number
- +
    +
  • Default: 10000 (10 seconds)
  • +
  • Type: Number
  • +

The "minTimeout" config for the retry module to use when fetching packages.

-

fetch-retry-maxtimeout

- -
  • Default: 60000 (1 minute)
  • Type: Number
- +
    +
  • Default: 60000 (1 minute)
  • +
  • Type: Number
  • +

The "maxTimeout" config for the retry module to use when fetching packages.

-

git

- -
  • Default: "git"
  • Type: String
- +
    +
  • Default: "git"
  • +
  • Type: String
  • +

The command to use for git commands. If git is installed on the computer, but is not in the PATH, then set this to the full path to the git binary.

-

git-tag-version

- -
  • Default: true
  • Type: Boolean
- +
    +
  • Default: true
  • +
  • Type: Boolean
  • +

Tag the commit when using the npm version command.

-

global

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Operates in "global" mode, so that packages are installed into the prefix folder instead of the current working directory. See -npm-folders(5) for more on the differences in behavior.

- -
  • packages are installed into the {prefix}/lib/node_modules folder, instead of the -current working directory.
  • bin files are linked to {prefix}/bin
  • man pages are linked to {prefix}/share/man
- +npm-folders(5) for more on the differences in behavior.

+
    +
  • packages are installed into the {prefix}/lib/node_modules folder, instead of the +current working directory.
  • +
  • bin files are linked to {prefix}/bin
  • +
  • man pages are linked to {prefix}/share/man
  • +

globalconfig

- -
  • Default: {prefix}/etc/npmrc
  • Type: path
- +
    +
  • Default: {prefix}/etc/npmrc
  • +
  • Type: path
  • +

The config file to read for global config options.

-

group

- -
  • Default: GID of the current process
  • Type: String or Number
- +
    +
  • Default: GID of the current process
  • +
  • Type: String or Number
  • +

The group to use when running package scripts in global mode as the root user.

-

heading

- -
  • Default: "npm"
  • Type: String
- +
    +
  • Default: "npm"
  • +
  • Type: String
  • +

The string that starts all the debugging log output.

-

https-proxy

- -
  • Default: the HTTPS_PROXY or https_proxy or HTTP_PROXY or -http_proxy environment variables.
  • Type: url
- +
    +
  • Default: the HTTPS_PROXY or https_proxy or HTTP_PROXY or +http_proxy environment variables.
  • +
  • Type: url
  • +

A proxy to use for outgoing https requests.

-

ignore-scripts

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

If true, npm does not run scripts specified in package.json files.

-

init-module

- -
  • Default: ~/.npm-init.js
  • Type: path
- +
    +
  • Default: ~/.npm-init.js
  • +
  • Type: path
  • +

A module that will be loaded by the npm init command. See the documentation for the init-package-json module -for more information, or npm-init(1).

- -

init.author.name

- -
  • Default: ""
  • Type: String
- +for more information, or npm-init(1).

+

init-author-name

+
    +
  • Default: ""
  • +
  • Type: String
  • +

The value npm init should use by default for the package author's name.

- -

init.author.email

- -
  • Default: ""
  • Type: String
- +

init-author-email

+
    +
  • Default: ""
  • +
  • Type: String
  • +

The value npm init should use by default for the package author's email.

- -

init.author.url

- -
  • Default: ""
  • Type: String
- +

init-author-url

+
    +
  • Default: ""
  • +
  • Type: String
  • +

The value npm init should use by default for the package author's homepage.

- -

init.license

- -
  • Default: "ISC"
  • Type: String
- +

init-license

+
    +
  • Default: "ISC"
  • +
  • Type: String
  • +

The value npm init should use by default for the package license.

- +

init-version

+
    +
  • Default: "0.0.0"
  • +
  • Type: semver
  • +
+

The value that npm init should use by default for the package +version number, if not already set in package.json.

json

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Whether or not to output JSON data, rather than the normal output.

-

This feature is currently experimental, and the output data structures for many commands is either not implemented in JSON yet, or subject to change. Only the output from npm ls --json is currently valid.

-

key

- -
  • Default: null
  • Type: String
- +
    +
  • Default: null
  • +
  • Type: String
  • +

A client key to pass when accessing the registry.

- - -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

If true, then local installs will link if there is a suitable globally installed package.

-

Note that this means that local installs can cause things to be installed into the global space at the same time. The link is only done if one of the two conditions are met:

- -
  • The package is not already installed globally, or
  • the globally installed version is identical to the version that is -being installed locally.
- +
    +
  • The package is not already installed globally, or
  • +
  • the globally installed version is identical to the version that is +being installed locally.
  • +

local-address

- -
  • Default: undefined
  • Type: IP Address
- +
    +
  • Default: undefined
  • +
  • Type: IP Address
  • +

The IP address of the local interface to use when making connections to the npm registry. Must be IPv4 in versions of Node prior to 0.12.

-

loglevel

- -
  • Default: "http"
  • Type: String
  • Values: "silent", "win", "error", "warn", "http", "info", "verbose", "silly"
- +
    +
  • Default: "warn"
  • +
  • Type: String
  • +
  • Values: "silent", "error", "warn", "http", "info", "verbose", "silly"
  • +

What level of logs to report. On failure, all logs are written to npm-debug.log in the current working directory.

-

Any logs of a higher level than the setting are shown. -The default is "http", which shows http, warn, and error output.

- +The default is "warn", which shows warn and error output.

logstream

- -
  • Default: process.stderr
  • Type: Stream
- +
    +
  • Default: process.stderr
  • +
  • Type: Stream
  • +

This is the stream that is passed to the npmlog module at run time.

-

It cannot be set from the command line, but if you are using npm programmatically, you may wish to send logs to somewhere other than stderr.

-

If the color config is set to true, then this stream will receive colored output if it is a TTY.

-

long

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Show extended information in npm ls and npm search.

-

message

- -
  • Default: "%s"
  • Type: String
- +
    +
  • Default: "%s"
  • +
  • Type: String
  • +

Commit message which is used by npm version when creating version commit.

-

Any "%s" in the message will be replaced with the version number.

-

node-version

- -
  • Default: process.version
  • Type: semver or false
- -

The node version to use when checking package's "engines" hash.

- +
    +
  • Default: process.version
  • +
  • Type: semver or false
  • +
+

The node version to use when checking a package's engines map.

npat

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Run tests on installation.

-

onload-script

- -
  • Default: false
  • Type: path
- +
    +
  • Default: false
  • +
  • Type: path
  • +

A node module to require() when npm loads. Useful for programmatic usage.

-

optional

- -
  • Default: true
  • Type: Boolean
- -

Attempt to install packages in the optionalDependencies hash. Note +

    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

Attempt to install packages in the optionalDependencies object. Note that if these packages fail to install, the overall installation process is not aborted.

-

parseable

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Output parseable results from commands that write to standard output.

-

prefix

- - - +

The location to install global items. If set on the command line, then it forces non-global commands to run in the specified folder.

-

production

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Set to true to run in "production" mode.

- -
  1. devDependencies are not installed at the topmost level when running -local npm install without any arguments.
  2. Set the NODE_ENV="production" for lifecycle scripts.
- +
    +
  1. devDependencies are not installed at the topmost level when running +local npm install without any arguments.
  2. +
  3. Set the NODE_ENV="production" for lifecycle scripts.
  4. +

proprietary-attribs

- -
  • Default: true
  • Type: Boolean
- +
    +
  • Default: true
  • +
  • Type: Boolean
  • +

Whether or not to include proprietary extended attributes in the tarballs created by npm.

-

Unless you are expecting to unpack package tarballs with something other than npm -- particularly a very outdated tar implementation -- leave this as true.

-

proxy

- -
  • Default: HTTP_PROXY or http_proxy environment variable, or null
  • Type: url
- +
    +
  • Default: HTTP_PROXY or http_proxy environment variable, or null
  • +
  • Type: url
  • +

A proxy to use for outgoing http requests.

-

rebuild-bundle

- -
  • Default: true
  • Type: Boolean
- +
    +
  • Default: true
  • +
  • Type: Boolean
  • +

Rebuild bundled dependencies after installation.

-

registry

- -
  • Default: https://registry.npmjs.org/
  • Type: url
- +

The base URL of the npm package registry.

-

rollback

- -
  • Default: true
  • Type: Boolean
- +
    +
  • Default: true
  • +
  • Type: Boolean
  • +

Remove failed installs.

-

save

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Save installed packages to a package.json file as dependencies.

- -

When used with the npm rm command, it removes it from the dependencies -hash.

- +

When used with the npm rm command, it removes it from the dependencies +object.

Only works if there is already a package.json file present.

-

save-bundle

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

If a package would be saved at install time by the use of --save, --save-dev, or --save-optional, then also put it in the bundleDependencies list.

-

When used with the npm rm command, it removes it from the bundledDependencies list.

-

save-dev

- -
  • Default: false
  • Type: Boolean
- -

Save installed packages to a package.json file as devDependencies.

- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Save installed packages to a package.json file as devDependencies.

When used with the npm rm command, it removes it from the -devDependencies hash.

- +devDependencies object.

Only works if there is already a package.json file present.

-

save-exact

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Dependencies saved to package.json using --save, --save-dev or --save-optional will be configured with an exact version rather than using npm's default semver range operator.

-

save-optional

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Save installed packages to a package.json file as optionalDependencies.

-

When used with the npm rm command, it removes it from the -devDependencies hash.

- +devDependencies object.

Only works if there is already a package.json file present.

-

save-prefix

- -
  • Default: '^'
  • Type: String
- -

Configure how versions of packages installed to a package.json file via +

    +
  • Default: '^'
  • +
  • Type: String
  • +
+

Configure how versions of packages installed to a package.json file via --save or --save-dev get prefixed.

-

For example if a package has version 1.2.3, by default it's version is -set to ^1.2.3 which allows minor upgrades for that package, but after
npm config set save-prefix='~' it would be set to ~1.2.3 which only allows +set to ^1.2.3 which allows minor upgrades for that package, but after +npm config set save-prefix='~' it would be set to ~1.2.3 which only allows patch upgrades.

- +

scope

+
    +
  • Default: ""
  • +
  • Type: String
  • +
+

Associate an operation with a scope for a scoped registry. Useful when logging +in to a private registry for the first time: +npm login --scope=@organization --registry=registry.organization.com, which +will cause @organization to be mapped to the registry for future installation +of packages specified according to the pattern @organization/package.

searchopts

- -
  • Default: ""
  • Type: String
- +
    +
  • Default: ""
  • +
  • Type: String
  • +

Space-separated options that are always passed to search.

-

searchexclude

- -
  • Default: ""
  • Type: String
- +
    +
  • Default: ""
  • +
  • Type: String
  • +

Space-separated options that limit the results from search.

-

searchsort

- -
  • Default: "name"
  • Type: String
  • Values: "name", "-name", "date", "-date", "description", -"-description", "keywords", "-keywords"
- +
    +
  • Default: "name"
  • +
  • Type: String
  • +
  • Values: "name", "-name", "date", "-date", "description", +"-description", "keywords", "-keywords"
  • +

Indication of which field to sort search results by. Prefix with a - character to indicate reverse sort.

-

shell

- -
  • Default: SHELL environment variable, or "bash" on Posix, or "cmd" on -Windows
  • Type: path
- +
    +
  • Default: SHELL environment variable, or "bash" on Posix, or "cmd" on +Windows
  • +
  • Type: path
  • +

The shell to run for the npm explore command.

-

shrinkwrap

- -
  • Default: true
  • Type: Boolean
- +
    +
  • Default: true
  • +
  • Type: Boolean
  • +

If set to false, then ignore npm-shrinkwrap.json files when installing.

-

sign-git-tag

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

If set to true, then the npm version command will tag the version using -s to add a signature.

-

Note that git requires you to have set up GPG keys in your git configs for this to work properly.

- +

spin

+
    +
  • Default: true
  • +
  • Type: Boolean or "always"
  • +
+

When set to true, npm will display an ascii spinner while it is doing +things, if process.stderr is a TTY.

+

Set to false to suppress the spinner, or set to always to output +the spinner even for non-TTY outputs.

strict-ssl

- -
  • Default: true
  • Type: Boolean
- +
    +
  • Default: true
  • +
  • Type: Boolean
  • +

Whether or not to do SSL key validation when making requests to the registry via https.

-

See also the ca config.

-

tag

- -
  • Default: latest
  • Type: String
- +
    +
  • Default: latest
  • +
  • Type: String
  • +

If you ask npm to install a package and don't tell it a specific version, then it will install the specified tag.

-

Also the tag that is added to the package@version specified by the npm tag command, if no explicit tag is given.

-

tmp

- -
  • Default: TMPDIR environment variable, or "/tmp"
  • Type: path
- +
    +
  • Default: TMPDIR environment variable, or "/tmp"
  • +
  • Type: path
  • +

Where to store temporary files and folders. All temp files are deleted on success, but left behind on failure for forensic purposes.

-

unicode

- -
  • Default: true
  • Type: Boolean
- +
    +
  • Default: true
  • +
  • Type: Boolean
  • +

When set to true, npm uses unicode characters in the tree output. When false, it uses ascii characters to draw trees.

-

unsafe-perm

- -
  • Default: false if running as root, true otherwise
  • Type: Boolean
- +
    +
  • Default: false if running as root, true otherwise
  • +
  • Type: Boolean
  • +

Set to true to suppress the UID/GID switching when running package scripts. If set explicitly to false, then installing as a non-root user will fail.

-

usage

- -
  • Default: false
  • Type: Boolean
- +
    +
  • Default: false
  • +
  • Type: Boolean
  • +

Set to show short usage output (like the -H output) -instead of complete help when doing npm-help(1).

- +instead of complete help when doing npm-help(1).

user

- -
  • Default: "nobody"
  • Type: String or Number
- +
    +
  • Default: "nobody"
  • +
  • Type: String or Number
  • +

The UID to set to when running package scripts as root.

- -

username

- -
  • Default: null
  • Type: String
- -

The username on the npm registry. Set with npm adduser

-

userconfig

- -
  • Default: ~/.npmrc
  • Type: path
- +
    +
  • Default: ~/.npmrc
  • +
  • Type: path
  • +

The location of user-level configuration settings.

-

umask

- -
  • Default: 022
  • Type: Octal numeric string
- +
    +
  • Default: 022
  • +
  • Type: Octal numeric string
  • +

The "umask" value to use when setting the file creation mode on files and folders.

-

Folders and executables are given a mode which is 0777 masked against this value. Other files are given a mode which is 0666 masked against this value. Thus, the defaults are 0755 and 0644 respectively.

-

user-agent

- -
  • Default: node/{process.version} {process.platform} {process.arch}
  • Type: String
- +
    +
  • Default: node/{process.version} {process.platform} {process.arch}
  • +
  • Type: String
  • +

Sets a User-Agent to the request header

-

version

- -
  • Default: false
  • Type: boolean
- +
    +
  • Default: false
  • +
  • Type: boolean
  • +

If true, output the npm version and exit successfully.

-

Only relevant when specified explicitly on the command line.

-

versions

- -
  • Default: false
  • Type: boolean
- -

If true, output the npm version as well as node's process.versions -hash, and exit successfully.

- +
    +
  • Default: false
  • +
  • Type: boolean
  • +
+

If true, output the npm version as well as node's process.versions map, and +exit successfully.

Only relevant when specified explicitly on the command line.

-

viewer

- -
  • Default: "man" on Posix, "browser" on Windows
  • Type: path
- +
    +
  • Default: "man" on Posix, "browser" on Windows
  • +
  • Type: path
  • +

The program to use to view help content.

-

Set to "browser" to view html help content in the default web browser.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -743,5 +759,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/npm-developers.html nodejs-0.11.15/deps/npm/html/doc/misc/npm-developers.html --- nodejs-0.11.13/deps/npm/html/doc/misc/npm-developers.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/npm-developers.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,172 +10,166 @@

npm-developers

Developer Guide

- -

DESCRIPTION

- +

DESCRIPTION

So, you've decided to use npm to develop (and maybe publish/deploy) your project.

-

Fantastic!

-

There are a few things that you need to do above the simple steps that your users will do to install your program.

- -

About These Documents

- +

About These Documents

These are man pages. If you install npm, you should be able to then do man npm-thing to get the documentation on a particular topic, or npm help thing to see the same information.

- -

What is a package

- +

What is a package

A package is:

- -
  • a) a folder containing a program described by a package.json file
  • b) a gzipped tarball containing (a)
  • c) a url that resolves to (b)
  • d) a <name>@<version> that is published on the registry with (c)
  • e) a <name>@<tag> that points to (d)
  • f) a <name> that has a "latest" tag satisfying (e)
  • g) a git url that, when cloned, results in (a).
- +
    +
  • a) a folder containing a program described by a package.json file
  • +
  • b) a gzipped tarball containing (a)
  • +
  • c) a url that resolves to (b)
  • +
  • d) a <name>@<version> that is published on the registry with (c)
  • +
  • e) a <name>@<tag> that points to (d)
  • +
  • f) a <name> that has a "latest" tag satisfying (e)
  • +
  • g) a git url that, when cloned, results in (a).
  • +

Even if you never publish your package, you can still get a lot of benefits of using npm if you just want to write a node program (a), and perhaps if you also want to be able to easily install it elsewhere after packing it up into a tarball (b).

-

Git urls can be of the form:

-
git://github.com/user/project.git#commit-ish
 git+ssh://user@hostname:project.git#commit-ish
 git+http://user@hostname/project/blah.git#commit-ish
-git+https://user@hostname/project/blah.git#commit-ish
- -

The commit-ish can be any tag, sha, or branch which can be supplied as +git+https://user@hostname/project/blah.git#commit-ish +

The commit-ish can be any tag, sha, or branch which can be supplied as an argument to git checkout. The default is master.

- -

The package.json File

- +

The package.json File

You need to have a package.json file in the root of your project to do much of anything with npm. That is basically the whole interface.

- -

See package.json(5) for details about what goes in that file. At the very +

See package.json(5) for details about what goes in that file. At the very least, you need:

- -
  • name: +

      +
    • name: This should be a string that identifies your project. Please do not use the name to specify that it runs on node, or is in JavaScript. You can use the "engines" field to explicitly state the versions of node (or whatever else) that your program requires, and it's pretty -well assumed that it's javascript.

      It does not necessarily need to match your github repository name.

      So, node-foo and bar-js are bad names. foo or bar are better.

    • version: -A semver-compatible version.

    • engines: +well assumed that it's javascript.

      +

      It does not necessarily need to match your github repository name.

      +

      So, node-foo and bar-js are bad names. foo or bar are better.

      +
    • +
    • version: +A semver-compatible version.

      +
    • +
    • engines: Specify the versions of node (or whatever else) that your program runs on. The node API changes a lot, and there may be bugs or new -functionality that you depend on. Be explicit.

    • author: -Take some credit.

    • scripts: +functionality that you depend on. Be explicit.

      +
    • +
    • author: +Take some credit.

      +
    • +
    • scripts: If you have a special compilation or installation script, then you -should put it in the scripts hash. You should definitely have at +should put it in the scripts object. You should definitely have at least a basic smoke-test command as the "scripts.test" field. -See npm-scripts(7).

    • main: +See npm-scripts(7).

      +
    • +
    • main: If you have a single module that serves as the entry point to your program (like what the "foo" package gives you at require("foo")), -then you need to specify that in the "main" field.

    • directories: -This is a hash of folders. The best ones to include are "lib" and -"doc", but if you specify a folder full of man pages in "man", then -they'll get installed just like these ones.

    - +then you need to specify that in the "main" field.

    +
  • +
  • directories: +This is an object mapping names to folders. The best ones to include are +"lib" and "doc", but if you use "man" to specify a folder full of man pages, +they'll get installed just like these ones.

    +
  • +

You can use npm init in the root of your package in order to get you -started with a pretty basic package.json file. See npm-init(1) for +started with a pretty basic package.json file. See npm-init(1) for more info.

- -

Keeping files out of your package

- +

Keeping files out of your package

Use a .npmignore file to keep stuff out of your package. If there's no .npmignore file, but there is a .gitignore file, then npm will ignore the stuff matched by the .gitignore file. If you want to include something that is excluded by your .gitignore file, you can create an empty .npmignore file to override it.

-

By default, the following paths and files are ignored, so there's no need to add them to .npmignore explicitly:

- -
  • .*.swp
  • ._*
  • .DS_Store
  • .git
  • .hg
  • .lock-wscript
  • .svn
  • .wafpickle-*
  • CVS
  • npm-debug.log
- +
    +
  • .*.swp
  • +
  • ._*
  • +
  • .DS_Store
  • +
  • .git
  • +
  • .hg
  • +
  • .lock-wscript
  • +
  • .svn
  • +
  • .wafpickle-*
  • +
  • CVS
  • +
  • npm-debug.log
  • +

Additionally, everything in node_modules is ignored, except for bundled dependencies. npm automatically handles this for you, so don't bother adding node_modules to .npmignore.

-

The following paths and files are never ignored, so adding them to .npmignore is pointless:

- - - - - + +

npm link is designed to install a development package and see the changes in real time without having to keep re-installing it. (You do need to either re-link or npm rebuild -g to update compiled packages, of course.)

- -

More info at npm-link(1).

- -

Before Publishing: Make Sure Your Package Installs and Works

- +

More info at npm-link(1).

+

Before Publishing: Make Sure Your Package Installs and Works

This is important.

-

If you can not install it locally, you'll have problems trying to publish it. Or, worse yet, you'll be able to publish it, but you'll be publishing a broken or pointless package. So don't do that.

-

In the root of your package, do this:

- -
npm install . -g
- -

That'll show you that it's working. If you'd rather just create a symlink +

npm install . -g
+

That'll show you that it's working. If you'd rather just create a symlink package that points to your working directory, then do this:

- -
npm link
- -

Use npm ls -g to see if it's there.

- +
npm link
+

Use npm ls -g to see if it's there.

To test a local install, go into some other folder, and then do:

-
cd ../some-other-folder
-npm install ../my-package
- -

to install it locally into the node_modules folder in that other place.

- +npm install ../my-package +

to install it locally into the node_modules folder in that other place.

Then go into the node-repl, and try using require("my-thing") to bring in your module's main module.

- -

Create a User Account

- +

Create a User Account

Create a user with the adduser command. It works like this:

- -
npm adduser
- -

and then follow the prompts.

- -

This is documented better in npm-adduser(1).

- -

Publish your package

- +
npm adduser
+

and then follow the prompts.

+

This is documented better in npm-adduser(1).

+

Publish your package

This part's easy. IN the root of your folder, do this:

- -
npm publish
- -

You can give publish a url to a tarball, or a filename of a tarball, +

npm publish
+

You can give publish a url to a tarball, or a filename of a tarball, or a path to a folder.

-

Note that pretty much everything in that folder will be exposed by default. So, if you have secret stuff in there, use a .npmignore file to list out the globs to ignore, or publish from a fresh checkout.

- -

Brag about it

- +

Brag about it

Send emails, write blogs, blab in IRC.

-

Tell the world how easy it is to install your program!

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -187,5 +181,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/npm-disputes.html nodejs-0.11.15/deps/npm/html/doc/misc/npm-disputes.html --- nodejs-0.11.13/deps/npm/html/doc/misc/npm-disputes.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/npm-disputes.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,90 +10,97 @@

npm-disputes

Handling Module Name Disputes

- -

SYNOPSIS

- -
  1. Get the author email with npm owner ls <pkgname>
  2. Email the author, CC support@npmjs.com
  3. After a few weeks, if there's no resolution, we'll sort it out.
- +

SYNOPSIS

+
    +
  1. Get the author email with npm owner ls <pkgname>
  2. +
  3. Email the author, CC support@npmjs.com
  4. +
  5. After a few weeks, if there's no resolution, we'll sort it out.
  6. +

Don't squat on package names. Publish code or move out of the way.

- -

DESCRIPTION

- +

DESCRIPTION

There sometimes arise cases where a user publishes a module, and then later, some other user wants to use that name. Here are some common ways that happens (each of these is based on actual events.)

- -
  1. Joe writes a JavaScript module foo, which is not node-specific. +
      +
    1. Joe writes a JavaScript module foo, which is not node-specific. Joe doesn't use node at all. Bob wants to use foo in node, so he wraps it in an npm module. Some time later, Joe starts using node, -and wants to take over management of his program.
    2. Bob writes an npm module foo, and publishes it. Perhaps much +and wants to take over management of his program.
    3. +
    4. Bob writes an npm module foo, and publishes it. Perhaps much later, Joe finds a bug in foo, and fixes it. He sends a pull request to Bob, but Bob doesn't have the time to deal with it, because he has a new job and a new baby and is focused on his new erlang project, and kind of not involved with node any more. Joe would like to publish a new foo, but can't, because the name is -taken.
    5. Bob writes a 10-line flow-control library, and calls it foo, and +taken.
    6. +
    7. Bob writes a 10-line flow-control library, and calls it foo, and publishes it to the npm registry. Being a simple little thing, it never really has to be updated. Joe works for Foo Inc, the makers of the critically acclaimed and widely-marketed foo JavaScript toolkit framework. They publish it to npm as foojs, but people are -routinely confused when npm install foo is some different thing.
    8. Bob writes a parser for the widely-known foo file format, because +routinely confused when npm install foo is some different thing.
    9. +
    10. Bob writes a parser for the widely-known foo file format, because he needs it for work. Then, he gets a new job, and never updates the prototype. Later on, Joe writes a much more complete foo parser, -but can't publish, because Bob's foo is in the way.
    - +but can't publish, because Bob's foo is in the way.
  2. +

The validity of Joe's claim in each situation can be debated. However, Joe's appropriate course of action in each case is the same.

- -
  1. npm owner ls foo. This will tell Joe the email address of the -owner (Bob).
  2. Joe emails Bob, explaining the situation as respectfully as +
      +
    1. npm owner ls foo. This will tell Joe the email address of the +owner (Bob).
    2. +
    3. Joe emails Bob, explaining the situation as respectfully as possible, and what he would like to do with the module name. He -adds the npm support staff support@npmjs.com to the CC list of +adds the npm support staff support@npmjs.com to the CC list of the email. Mention in the email that Bob can run npm owner add -joe foo to add Joe as an owner of the foo package.
    4. After a reasonable amount of time, if Bob has not responded, or if +joe foo to add Joe as an owner of the foo package.
    5. +
    6. After a reasonable amount of time, if Bob has not responded, or if Bob and Joe can't come to any sort of resolution, email support -support@npmjs.com and we'll sort it out. ("Reasonable" is +support@npmjs.com and we'll sort it out. ("Reasonable" is usually at least 4 weeks, but extra time is allowed around common -holidays.)
    - -

    REASONING

    - +holidays.)
  3. +
+

REASONING

In almost every case so far, the parties involved have been able to reach an amicable resolution without any major intervention. Most people really do want to be reasonable, and are probably not even aware that they're in your way.

-

Module ecosystems are most vibrant and powerful when they are as self-directed as possible. If an admin one day deletes something you had worked on, then that is going to make most people quite upset, regardless of the justification. When humans solve their problems by talking to other humans with respect, everyone has the chance to end up feeling good about the interaction.

- -

EXCEPTIONS

- +

EXCEPTIONS

Some things are not allowed, and will be removed without discussion if they are brought to the attention of the npm registry admins, including but not limited to:

- -
  1. Malware (that is, a package designed to exploit or harm the machine on -which it is installed).
  2. Violations of copyright or licenses (for example, cloning an +
      +
    1. Malware (that is, a package designed to exploit or harm the machine on +which it is installed).
    2. +
    3. Violations of copyright or licenses (for example, cloning an MIT-licensed program, and then removing or changing the copyright and -license statement).
    4. Illegal content.
    5. "Squatting" on a package name that you plan to use, but aren't +license statement).
    6. +
    7. Illegal content.
    8. +
    9. "Squatting" on a package name that you plan to use, but aren't actually using. Sorry, I don't care how great the name is, or how perfect a fit it is for the thing that someday might happen. If someone wants to use it today, and you're just taking up space with -an empty tarball, you're going to be evicted.
    10. Putting empty packages in the registry. Packages must have SOME +an empty tarball, you're going to be evicted.
    11. +
    12. Putting empty packages in the registry. Packages must have SOME functionality. It can be silly, but it can't be nothing. (See -also: squatting.)
    13. Doing weird things with the registry, like using it as your own +also: squatting.)
    14. +
    15. Doing weird things with the registry, like using it as your own personal application database or otherwise putting non-packagey -things into it.
    - +things into it.
  3. +

If you see bad behavior like this, please report it right away.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -105,5 +112,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/npm-faq.html nodejs-0.11.15/deps/npm/html/doc/misc/npm-faq.html --- nodejs-0.11.13/deps/npm/html/doc/misc/npm-faq.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/npm-faq.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,238 +10,180 @@

npm-faq

Frequently Asked Questions

- -

Where can I find these docs in HTML?

- +

Where can I find these docs in HTML?

https://www.npmjs.org/doc/, or run:

- -
npm config set viewer browser
- -

to open these documents in your default web browser rather than man.

- -

It didn't work.

- +
npm config set viewer browser
+

to open these documents in your default web browser rather than man.

+

It didn't work.

That's not really a question.

- -

Why didn't it work?

- +

Why didn't it work?

I don't know yet.

-

Read the error output, and if you can't figure out what it means, do what it says and post a bug with all the information it asks for.

- -

Where does npm put stuff?

- -

See npm-folders(5)

- +

Where does npm put stuff?

+

See npm-folders(5)

tl;dr:

- -
  • Use the npm root command to see where modules go, and the npm bin -command to see where executables go
  • Global installs are different from local installs. If you install +
      +
    • Use the npm root command to see where modules go, and the npm bin +command to see where executables go
    • +
    • Global installs are different from local installs. If you install something with the -g flag, then its executables go in npm bin -g -and its modules go in npm root -g.
    - -

    How do I install something on my computer in a central location?

    - +and its modules go in npm root -g.
  • +
+

How do I install something on my computer in a central location?

Install it globally by tacking -g or --global to the command. (This is especially important for command line utilities that need to add their bins to the global system PATH.)

- -

I installed something globally, but I can't require() it

- +

I installed something globally, but I can't require() it

Install it locally.

-

The global install location is a place for command-line utilities to put their bins in the system PATH. It's not for use with require().

-

If you require() a module in your code, then that means it's a dependency, and a part of your program. You need to install it locally in your program.

- -

Why can't npm just put everything in one place, like other package managers?

- +

Why can't npm just put everything in one place, like other package managers?

Not every change is an improvement, but every improvement is a change. This would be like asking git to do network IO for every commit. It's not going to happen, because it's a terrible idea that causes more problems than it solves.

-

It is much harder to avoid dependency conflicts without nesting dependencies. This is fundamental to the way that npm works, and has -proven to be an extremely successful approach. See npm-folders(5) for +proven to be an extremely successful approach. See npm-folders(5) for more details.

-

If you want a package to be installed in one place, and have all your programs reference the same copy of it, then use the npm link command. That's what it's for. Install it globally, then link it into each program that uses it.

- -

Whatever, I really want the old style 'everything global' style.

- +

Whatever, I really want the old style 'everything global' style.

Write your own package manager. You could probably even wrap up npm in a shell script if you really wanted to.

-

npm will not help you do something that is known to be a bad idea.

- -

Should I check my node_modules folder into git?

- -

Mikeal Rogers answered this question very well:

- -

http://www.futurealoof.com/posts/nodemodules-in-git.html

- -

tl;dr

- -
  • Check node_modules into git for things you deploy, such as -websites and apps.
  • Do not check node_modules into git for libraries and modules -intended to be reused.
  • Use npm to manage dependencies in your dev environment, but not in -your deployment scripts.
- -

Is it 'npm' or 'NPM' or 'Npm'?

- +

Should I check my node_modules folder into git?

+

Usually, no. Allow npm to resolve dependencies for your packages.

+

For packages you deploy, such as websites and apps, +you should use npm shrinkwrap to lock down your full dependency tree:

+

https://www.npmjs.org/doc/cli/npm-shrinkwrap.html

+

If you are paranoid about depending on the npm ecosystem, +you should run a private npm mirror or a private cache.

+

If you want 100% confidence in being able to reproduce the specific bytes +included in a deployment, you should use an additional mechanism that can +verify contents rather than versions. For example, +Amazon machine images, DigitalOcean snapshots, Heroku slugs, or simple tarballs.

+

Is it 'npm' or 'NPM' or 'Npm'?

npm should never be capitalized unless it is being displayed in a location that is customarily all-caps (such as the title of man pages.)

- -

If 'npm' is an acronym, why is it never capitalized?

- +

If 'npm' is an acronym, why is it never capitalized?

Contrary to the belief of many, "npm" is not in fact an abbreviation for "Node Package Manager". It is a recursive bacronymic abbreviation for "npm is not an acronym". (If it was "ninaa", then it would be an acronym, and thus incorrectly named.)

-

"NPM", however, is an acronym (more precisely, a capitonym) for the National Association of Pastoral Musicians. You can learn more about them at http://npm.org/.

-

In software, "NPM" is a Non-Parametric Mapping utility written by Chris Rorden. You can analyze pictures of brains with it. Learn more about the (capitalized) NPM program at http://www.cabiatl.com/mricro/npm/.

-

The first seed that eventually grew into this flower was a bash utility named "pm", which was a shortened descendent of "pkgmakeinst", a bash function that was used to install various different things on different platforms, most often using Yahoo's yinst. If npm was ever an acronym for anything, it was node pm or maybe new pm.

-

So, in all seriousness, the "npm" project is named after its command-line utility, which was organically selected to be easily typed by a right-handed programmer using a US QWERTY keyboard layout, ending with the right-ring-finger in a postition to type the - key for flags and other command-line arguments. That command-line utility is always lower-case, though it starts most sentences it is a part of.

- -

How do I list installed packages?

- +

How do I list installed packages?

npm ls

- -

How do I search for packages?

- +

How do I search for packages?

npm search

-

Arguments are greps. npm search jsdom shows jsdom packages.

- -

How do I update npm?

- -
npm update npm -g
- -

You can also update all outdated local packages by doing npm update without +

How do I update npm?

+
npm install npm -g
+

You can also update all outdated local packages by doing npm update without any arguments, or global packages by doing npm update -g.

-

Occasionally, the version of npm will progress such that the current version cannot be properly installed with the version that you have installed already. (Consider, if there is ever a bug in the update command.)

-

In those cases, you can do this:

- -
curl https://www.npmjs.org/install.sh | sh
- -

What is a package?

- +
curl https://www.npmjs.org/install.sh | sh
+

What is a package?

A package is:

- -
  • a) a folder containing a program described by a package.json file
  • b) a gzipped tarball containing (a)
  • c) a url that resolves to (b)
  • d) a <name>@<version> that is published on the registry with (c)
  • e) a <name>@<tag> that points to (d)
  • f) a <name> that has a "latest" tag satisfying (e)
  • g) a git url that, when cloned, results in (a).
- +
    +
  • a) a folder containing a program described by a package.json file
  • +
  • b) a gzipped tarball containing (a)
  • +
  • c) a url that resolves to (b)
  • +
  • d) a <name>@<version> that is published on the registry with (c)
  • +
  • e) a <name>@<tag> that points to (d)
  • +
  • f) a <name> that has a "latest" tag satisfying (e)
  • +
  • g) a git url that, when cloned, results in (a).
  • +

Even if you never publish your package, you can still get a lot of benefits of using npm if you just want to write a node program (a), and perhaps if you also want to be able to easily install it elsewhere after packing it up into a tarball (b).

-

Git urls can be of the form:

-
git://github.com/user/project.git#commit-ish
 git+ssh://user@hostname:project.git#commit-ish
 git+http://user@hostname/project/blah.git#commit-ish
-git+https://user@hostname/project/blah.git#commit-ish
- -

The commit-ish can be any tag, sha, or branch which can be supplied as +git+https://user@hostname/project/blah.git#commit-ish +

The commit-ish can be any tag, sha, or branch which can be supplied as an argument to git checkout. The default is master.

- -

What is a module?

- +

What is a module?

A module is anything that can be loaded with require() in a Node.js program. The following things are all examples of things that can be loaded as modules:

- -
  • A folder with a package.json file containing a main field.
  • A folder with an index.js file in it.
  • A JavaScript file.
- +
    +
  • A folder with a package.json file containing a main field.
  • +
  • A folder with an index.js file in it.
  • +
  • A JavaScript file.
  • +

Most npm packages are modules, because they are libraries that you load with require. However, there's no requirement that an npm package be a module! Some only contain an executable command-line interface, and don't provide a main field for use in Node programs.

-

Almost all npm packages (at least, those that are Node programs) contain many modules within them (because every file they load with require() is a module).

-

In the context of a Node program, the module is also the thing that was loaded from a file. For example, in the following program:

- -
var req = require('request')
- -

we might say that "The variable req refers to the request module".

- -

So, why is it the "node_modules" folder, but "package.json" file? Why not node_packages or module.json?

- +
var req = require('request')
+

we might say that "The variable req refers to the request module".

+

So, why is it the "node_modules" folder, but "package.json" file? Why not node_packages or module.json?

The package.json file defines the package. (See "What is a package?" above.)

-

The node_modules folder is the place Node.js looks for modules. (See "What is a module?" above.)

-

For example, if you create a file at node_modules/foo.js and then had a program that did var f = require('foo.js') then it would load the module. However, foo.js is not a "package" in this case, because it does not have a package.json.

-

Alternatively, if you create a package which does not have an index.js or a "main" field in the package.json file, then it is not a module. Even if it's installed in node_modules, it can't be an argument to require().

- -

"node_modules" is the name of my deity's arch-rival, and a Forbidden Word in my religion. Can I configure npm to use a different folder?

- +

"node_modules" is the name of my deity's arch-rival, and a Forbidden Word in my religion. Can I configure npm to use a different folder?

No. This will never happen. This question comes up sometimes, because it seems silly from the outside that npm couldn't just be configured to put stuff somewhere else, and then npm could load them from there. It's an arbitrary spelling choice, right? What's the big deal?

-

At the time of this writing, the string 'node_modules' appears 151 times in 53 separate files in npm and node core (excluding tests and documentation).

-

Some of these references are in node's built-in module loader. Since npm is not involved at all at run-time, node itself would have to be configured to know where you've decided to stick stuff. Complexity hurdle #1. Since the Node module system is locked, this cannot be changed, and is enough to kill this request. But I'll continue, in deference to your deity's delicate feelings regarding spelling.

-

Many of the others are in dependencies that npm uses, which are not necessarily tightly coupled to npm (in the sense that they do not read npm's configuration files, etc.) Each of these would have to be configured to take the name of the node_modules folder as a parameter. Complexity hurdle #2.

-

Furthermore, npm has the ability to "bundle" dependencies by adding the dep names to the "bundledDependencies" list in package.json, which causes the folder to be included in the package tarball. What @@ -249,7 +191,6 @@ different spelling for node_modules? npm would have to rename the folder at publish time, and then be smart enough to unpack it using your locally configured name. Complexity hurdle #3.

-

Furthermore, what happens when you change this name? Fine, it's easy enough the first time, just rename the node_modules folders to ./blergyblerp/ or whatever name you choose. But what about when you @@ -257,99 +198,81 @@ configuration settings, so this would be rather difficult to do properly. It would have to track every previous value for this config, and always accept any of them, or else yesterday's install may -be broken tomorrow. Complexity hurdle #5.

- +be broken tomorrow. Complexity hurdle #4.

Never going to happen. The folder is named node_modules. It is written indelibly in the Node Way, handed down from the ancient times of Node 0.3.

- -

How do I install node with npm?

- +

How do I install node with npm?

You don't. Try one of these node version managers:

-

Unix:

- - - +

Windows:

- - - -

How can I use npm for development?

- -

See npm-developers(7) and package.json(5).

- + +

How can I use npm for development?

+

See npm-developers(7) and package.json(5).

You'll most likely want to npm link your development folder. That's awesomely handy.

- -

To set up your own private registry, check out npm-registry(7).

- -

Can I list a url as a dependency?

- +

To set up your own private registry, check out npm-registry(7).

+

Can I list a url as a dependency?

Yes. It should be a url to a gzipped tarball containing a single folder that has a package.json in its root, or a git url. (See "what is a package?" above.)

- - - -

See npm-link(1)

- -

The package registry website. What is that exactly?

- -

See npm-registry(7).

- -

I forgot my password, and can't publish. How do I reset it?

- + +

See npm-link(1)

+

The package registry website. What is that exactly?

+

See npm-registry(7).

+

I forgot my password, and can't publish. How do I reset it?

Go to https://npmjs.org/forgot.

- -

I get ECONNREFUSED a lot. What's up?

- +

I get ECONNREFUSED a lot. What's up?

Either the registry is down, or node's DNS isn't able to reach out.

-

To check if the registry is down, open up https://registry.npmjs.org/ in a web browser. This will also tell you if you are just unable to access the internet for some reason.

- -

If the registry IS down, let us know by emailing support@npmjs.com +

If the registry IS down, let us know by emailing support@npmjs.com or posting an issue at https://github.com/npm/npm/issues. If it's down for the world (and not just on your local network) then we're probably already being pinged about it.

-

You can also often get a faster response by visiting the #npm channel on Freenode IRC.

- -

Why no namespaces?

- +

Why no namespaces?

Please see this discussion: https://github.com/npm/npm/issues/798

-

tl;dr - It doesn't actually make things better, and can make them worse.

-

If you want to namespace your own packages, you may: simply use the - character to separate the names. npm is a mostly anarchic system. There is not sufficient need to impose namespace rules on everyone.

- -

Who does npm?

- +

Who does npm?

npm was originally written by Isaac Z. Schlueter, and many others have contributed to it, some of them quite substantially.

-

The npm open source project, The npm Registry, and the community website are maintained and operated by the -good folks at npm, Inc.

- -

I have a question or request not addressed here. Where should I put it?

- +good folks at npm, Inc.

+

I have a question or request not addressed here. Where should I put it?

Post an issue on the github project:

- - - -

Why does npm hate me?

- + +

Why does npm hate me?

npm is not capable of hatred. It loves everyone, especially you.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -361,5 +284,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/npm-index.html nodejs-0.11.15/deps/npm/html/doc/misc/npm-index.html --- nodejs-0.11.13/deps/npm/html/doc/misc/npm-index.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/npm-index.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,414 +10,215 @@

npm-index

Index of all npm documentation

- -

README

- +

README

node package manager

- -

Command Line Documentation

- -

npm(1)

- +

Command Line Documentation

+

Using npm on the command line

+

npm(1)

node package manager

- -

npm-adduser(1)

- +

npm-adduser(1)

Add a registry user account

- -

npm-bin(1)

- +

npm-bin(1)

Display npm bin folder

- -

npm-bugs(1)

- +

npm-bugs(1)

Bugs for a package in a web browser maybe

- -

npm-build(1)

- +

npm-build(1)

Build a package

- -

npm-bundle(1)

- +

npm-bundle(1)

REMOVED

- -

npm-cache(1)

- +

npm-cache(1)

Manipulates packages cache

- -

npm-completion(1)

- +

npm-completion(1)

Tab Completion for npm

- -

npm-config(1)

- +

npm-config(1)

Manage the npm configuration files

- -

npm-dedupe(1)

- +

npm-dedupe(1)

Reduce duplication

- -

npm-deprecate(1)

- +

npm-deprecate(1)

Deprecate a version of a package

- -

npm-docs(1)

- +

npm-docs(1)

Docs for a package in a web browser maybe

- -

npm-edit(1)

- +

npm-edit(1)

Edit an installed package

- -

npm-explore(1)

- +

npm-explore(1)

Browse an installed package

- -

npm-help-search(1)

- +

npm-help-search(1)

Search npm help documentation

- -

npm-help(1)

- +

npm-help(1)

Get help on npm

- -

npm-init(1)

- +

npm-init(1)

Interactively create a package.json file

- -

npm-install(1)

- +

npm-install(1)

Install a package

- - - +

Symlink a package folder

- -

npm-ls(1)

- +

npm-ls(1)

List installed packages

- -

npm-outdated(1)

- +

npm-outdated(1)

Check for outdated packages

- -

npm-owner(1)

- +

npm-owner(1)

Manage package owners

- -

npm-pack(1)

- +

npm-pack(1)

Create a tarball from a package

- -

npm-prefix(1)

- +

npm-prefix(1)

Display prefix

- -

npm-prune(1)

- +

npm-prune(1)

Remove extraneous packages

- -

npm-publish(1)

- +

npm-publish(1)

Publish a package

- -

npm-rebuild(1)

- +

npm-rebuild(1)

Rebuild a package

- -

npm-repo(1)

- +

npm-repo(1)

Open package repository page in the browser

- -

npm-restart(1)

- +

npm-restart(1)

Start a package

- -

npm-rm(1)

- +

npm-rm(1)

Remove a package

- -

npm-root(1)

- +

npm-root(1)

Display npm root

- -

npm-run-script(1)

- +

npm-run-script(1)

Run arbitrary package scripts

- -

npm-search(1)

- +

npm-search(1)

Search for packages

- -

npm-shrinkwrap(1)

- +

npm-shrinkwrap(1)

Lock down dependency versions

- -

npm-star(1)

- +

npm-star(1)

Mark your favorite packages

- -

npm-stars(1)

- +

npm-stars(1)

View packages marked as favorites

- -

npm-start(1)

- +

npm-start(1)

Start a package

- -

npm-stop(1)

- +

npm-stop(1)

Stop a package

- -

npm-submodule(1)

- -

Add a package as a git submodule

- -

npm-tag(1)

- +

npm-tag(1)

Tag a published version

- -

npm-test(1)

- +

npm-test(1)

Test a package

- -

npm-uninstall(1)

- +

npm-uninstall(1)

Remove a package

- -

npm-unpublish(1)

- +

npm-unpublish(1)

Remove a package from the registry

- -

npm-update(1)

- +

npm-update(1)

Update a package

- -

npm-version(1)

- +

npm-version(1)

Bump a package version

- -

npm-view(1)

- +

npm-view(1)

View registry info

- -

npm-whoami(1)

- +

npm-whoami(1)

Display npm username

- -

API Documentation

- -

npm(3)

- +

API Documentation

+

Using npm in your Node programs

+

npm(3)

node package manager

- -

npm-bin(3)

- +

npm-bin(3)

Display npm bin folder

- -

npm-bugs(3)

- +

npm-bugs(3)

Bugs for a package in a web browser maybe

- -

npm-commands(3)

- +

npm-cache(3)

+

manage the npm cache programmatically

+

npm-commands(3)

npm commands

- -

npm-config(3)

- +

npm-config(3)

Manage the npm configuration files

- -

npm-deprecate(3)

- +

npm-deprecate(3)

Deprecate a version of a package

- -

npm-docs(3)

- +

npm-docs(3)

Docs for a package in a web browser maybe

- -

npm-edit(3)

- +

npm-edit(3)

Edit an installed package

- -

npm-explore(3)

- +

npm-explore(3)

Browse an installed package

- -

npm-help-search(3)

- +

npm-help-search(3)

Search the help pages

- -

npm-init(3)

- +

npm-init(3)

Interactively create a package.json file

- -

npm-install(3)

- +

npm-install(3)

install a package programmatically

- - - +

Symlink a package folder

- -

npm-load(3)

- +

npm-load(3)

Load config settings

- -

npm-ls(3)

- +

npm-ls(3)

List installed packages

- -

npm-outdated(3)

- +

npm-outdated(3)

Check for outdated packages

- -

npm-owner(3)

- +

npm-owner(3)

Manage package owners

- -

npm-pack(3)

- +

npm-pack(3)

Create a tarball from a package

- -

npm-prefix(3)

- +

npm-prefix(3)

Display prefix

- -

npm-prune(3)

- +

npm-prune(3)

Remove extraneous packages

- -

npm-publish(3)

- +

npm-publish(3)

Publish a package

- -

npm-rebuild(3)

- +

npm-rebuild(3)

Rebuild a package

- -

npm-repo(3)

- +

npm-repo(3)

Open package repository page in the browser

- -

npm-restart(3)

- +

npm-restart(3)

Start a package

- -

npm-root(3)

- +

npm-root(3)

Display npm root

- -

npm-run-script(3)

- +

npm-run-script(3)

Run arbitrary package scripts

- -

npm-search(3)

- +

npm-search(3)

Search for packages

- -

npm-shrinkwrap(3)

- +

npm-shrinkwrap(3)

programmatically generate package shrinkwrap file

- -

npm-start(3)

- +

npm-start(3)

Start a package

- -

npm-stop(3)

- +

npm-stop(3)

Stop a package

- -

npm-submodule(3)

- -

Add a package as a git submodule

- -

npm-tag(3)

- +

npm-tag(3)

Tag a published version

- -

npm-test(3)

- +

npm-test(3)

Test a package

- -

npm-uninstall(3)

- +

npm-uninstall(3)

uninstall a package programmatically

- -

npm-unpublish(3)

- +

npm-unpublish(3)

Remove a package from the registry

- -

npm-update(3)

- +

npm-update(3)

Update a package

- -

npm-version(3)

- +

npm-version(3)

Bump a package version

- -

npm-view(3)

- +

npm-view(3)

View registry info

- -

npm-whoami(3)

- +

npm-whoami(3)

Display npm username

- -

Files

- -

npm-folders(5)

- +

Files

+

File system structures npm uses

+

npm-folders(5)

Folder Structures Used by npm

- -

npmrc(5)

- +

npmrc(5)

The npm config files

- -

package.json(5)

- +

package.json(5)

Specifics of npm's package.json handling

- -

Misc

- -

npm-coding-style(7)

- +

Misc

+

Various other bits and bobs

+

npm-coding-style(7)

npm's "funny" coding style

- -

npm-config(7)

- +

npm-config(7)

More than you probably want to know about npm configuration

- -

npm-developers(7)

- +

npm-developers(7)

Developer Guide

- -

npm-disputes(7)

- +

npm-disputes(7)

Handling Module Name Disputes

- -

npm-faq(7)

- +

npm-faq(7)

Frequently Asked Questions

- -

npm-index(7)

- +

npm-index(7)

Index of all npm documentation

- -

npm-registry(7)

- +

npm-registry(7)

The JavaScript Package Registry

- -

npm-scripts(7)

- +

npm-scope(7)

+

Scoped packages

+

npm-scripts(7)

How npm handles the "scripts" field

- -

removing-npm(7)

- +

removing-npm(7)

Cleaning the Slate

- -

semver(7)

- +

semver(7)

The semantic versioner for npm

+
@@ -429,5 +230,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/npm-registry.html nodejs-0.11.15/deps/npm/html/doc/misc/npm-registry.html --- nodejs-0.11.13/deps/npm/html/doc/misc/npm-registry.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/npm-registry.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,69 +10,55 @@

npm-registry

The JavaScript Package Registry

- -

DESCRIPTION

- +

DESCRIPTION

To resolve packages by name and version, npm talks to a registry website that implements the CommonJS Package Registry specification for reading package info.

-

Additionally, npm's package registry implementation supports several write APIs as well, to allow for publishing packages and managing user account information.

-

The official public npm registry is at http://registry.npmjs.org/. It -is powered by a CouchDB database at -http://isaacs.iriscouch.com/registry. The code for the couchapp is -available at http://github.com/npm/npmjs.org. npm user accounts -are CouchDB users, stored in the http://isaacs.iriscouch.com/_users -database.

- -

The registry URL is supplied by the registry config parameter. See -npm-config(1), npmrc(5), and npm-config(7) for more on managing -npm's configuration.

- -

Can I run my own private registry?

- +is powered by a CouchDB database, of which there is a public mirror at +http://skimdb.npmjs.com/registry. The code for the couchapp is +available at http://github.com/npm/npm-registry-couchapp.

+

The registry URL used is determined by the scope of the package (see +npm-scope(7)). If no scope is specified, the default registry is used, which is +supplied by the registry config parameter. See npm-config(1), +npmrc(5), and npm-config(7) for more on managing npm's configuration.

+

Can I run my own private registry?

Yes!

-

The easiest way is to replicate the couch database, and use the same (or similar) design doc to implement the APIs.

-

If you set up continuous replication from the official CouchDB, and then set your internal CouchDB as the registry config, then you'll be able to read any published packages, in addition to your private ones, and by default will only publish internally. If you then want to publish a package for the whole world to see, you can simply override the --registry config for that command.

- -

I don't want my package published in the official registry. It's private.

- +

I don't want my package published in the official registry. It's private.

Set "private": true in your package.json to prevent it from being published at all, or "publishConfig":{"registry":"http://my-internal-registry.local"} to force it to be published only to your internal registry.

- -

See package.json(5) for more info on what goes in the package.json file.

- -

Will you replicate from my registry into the public one?

- +

See package.json(5) for more info on what goes in the package.json file.

+

Will you replicate from my registry into the public one?

No. If you want things to be public, then publish them into the public registry using npm. What little security there is would be for nought otherwise.

- -

Do I have to use couchdb to build a registry that npm can talk to?

- +

Do I have to use couchdb to build a registry that npm can talk to?

No, but it's way easier. Basically, yes, you do, or you have to effectively implement the entire CouchDB API anyway.

- -

Is there a website or something to see package docs and such?

- +

Is there a website or something to see package docs and such?

Yes, head over to https://npmjs.org/

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -84,5 +70,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/npm-scope.html nodejs-0.11.15/deps/npm/html/doc/misc/npm-scope.html --- nodejs-0.11.13/deps/npm/html/doc/misc/npm-scope.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/npm-scope.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,82 @@ + + + npm-scope + + + + + + +
+ +

npm-scope

Scoped packages

+

DESCRIPTION

+

All npm packages have a name. Some package names also have a scope. A scope +follows the usual rules for package names (url-safe characters, no leading dots +or underscores). When used in package names, preceded by an @-symbol and +followed by a slash, e.g.

+
@somescope/somepackagename
+

Scopes are a way of grouping related packages together, and also affect a few +things about the way npm treats the package.

+

As of 2014-09-03, scoped packages are not supported by the public npm registry. +However, the npm client is backwards-compatible with un-scoped registries, so +it can be used to work with scoped and un-scoped registries at the same time.

+

Installing scoped packages

+

Scoped packages are installed to a sub-folder of the regular installation +folder, e.g. if your other packages are installed in node_modules/packagename, +scoped modules will be in node_modules/@myorg/packagename. The scope folder +(@myorg) is simply the name of the scope preceded by an @-symbol, and can +contain any number of scoped packages.

+

A scoped package is installed by referencing it by name, preceded by an +@-symbol, in npm install:

+
npm install @myorg/mypackage
+

Or in package.json:

+
"dependencies": {
+  "@myorg/mypackage": "^1.3.0"
+}
+

Note that if the @-symbol is omitted in either case npm will instead attempt to +install from GitHub; see npm-install(1).

+

Requiring scoped packages

+

Because scoped packages are installed into a scope folder, you have to +include the name of the scope when requiring them in your code, e.g.

+
require('@myorg/mypackage')
+

There is nothing special about the way Node treats scope folders, this is +just specifying to require the module mypackage in the folder called @myorg.

+

Publishing scoped packages

+

Scoped packages can be published to any registry that supports them. +As of 2014-09-03, the public npm registry does not support scoped packages, +so attempting to publish a scoped package to the registry will fail unless +you have associated that scope with a different registry, see below.

+

Associating a scope with a registry

+

Scopes can be associated with a separate registry. This allows you to +seamlessly use a mix of packages from the public npm registry and one or more +private registries, such as npm Enterprise.

+

You can associate a scope with a registry at login, e.g.

+
npm login --registry=http://reg.example.com --scope=@myco
+

Scopes have a many-to-one relationship with registries: one registry can +host multiple scopes, but a scope only ever points to one registry.

+

You can also associate a scope with a registry using npm config:

+
npm config set @myco:registry http://reg.example.com
+

Once a scope is associated with a registry, any npm install for a package +with that scope will request packages from that registry instead. Any +npm publish for a package name that contains the scope will be published to +that registry instead.

+

SEE ALSO

+ + +
+ + + + + + + + + + + + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/npm-scripts.html nodejs-0.11.15/deps/npm/html/doc/misc/npm-scripts.html --- nodejs-0.11.13/deps/npm/html/doc/misc/npm-scripts.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/npm-scripts.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,221 +10,205 @@

npm-scripts

How npm handles the "scripts" field

- -

DESCRIPTION

- -

npm supports the "scripts" member of the package.json script, for the +

DESCRIPTION

+

npm supports the "scripts" property of the package.json script, for the following scripts:

- -
  • prepublish: +
      +
    • prepublish: Run BEFORE the package is published. (Also run on local npm -install without any arguments.)
    • publish, postpublish: -Run AFTER the package is published.
    • preinstall: -Run BEFORE the package is installed
    • install, postinstall: -Run AFTER the package is installed.
    • preuninstall, uninstall: -Run BEFORE the package is uninstalled.
    • postuninstall: -Run AFTER the package is uninstalled.
    • preupdate: -Run BEFORE the package is updated with the update command.
    • update, postupdate: -Run AFTER the package is updated with the update command.
    • pretest, test, posttest: -Run by the npm test command.
    • prestop, stop, poststop: -Run by the npm stop command.
    • prestart, start, poststart: -Run by the npm start command.
    • prerestart, restart, postrestart: +install without any arguments.)
    • +
    • publish, postpublish: +Run AFTER the package is published.
    • +
    • preinstall: +Run BEFORE the package is installed
    • +
    • install, postinstall: +Run AFTER the package is installed.
    • +
    • preuninstall, uninstall: +Run BEFORE the package is uninstalled.
    • +
    • postuninstall: +Run AFTER the package is uninstalled.
    • +
    • preupdate: +Run BEFORE the package is updated with the update command.
    • +
    • update, postupdate: +Run AFTER the package is updated with the update command.
    • +
    • pretest, test, posttest: +Run by the npm test command.
    • +
    • prestop, stop, poststop: +Run by the npm stop command.
    • +
    • prestart, start, poststart: +Run by the npm start command.
    • +
    • prerestart, restart, postrestart: Run by the npm restart command. Note: npm restart will run the -stop and start scripts if no restart script is provided.
    - -

    Additionally, arbitrary scripts can be run by doing -npm run-script <stage> <pkg>.

    - -

    NOTE: INSTALL SCRIPTS ARE AN ANTIPATTERN

    - +stop and start scripts if no restart script is provided.
  • +
+

Additionally, arbitrary scripts can be executed by running npm +run-script <pkg> <stage>. Pre and post commands with matching +names will be run for those as well (e.g. premyscript, myscript, +postmyscript).

+

NOTE: INSTALL SCRIPTS ARE AN ANTIPATTERN

tl;dr Don't use install. Use a .gyp file for compilation, and prepublish for anything else.

-

You should almost never have to explicitly set a preinstall or install script. If you are doing this, please consider if there is another option.

-

The only valid use of install or preinstall scripts is for compilation which must be done on the target architecture. In early versions of node, this was often done using the node-waf scripts, or a standalone Makefile, and early versions of npm required that it be explicitly set in package.json. This was not portable, and harder to do properly.

-

In the current version of node, the standard way to do this is using a .gyp file. If you have a file with a .gyp extension in the root of your package, then npm will run the appropriate node-gyp commands automatically at install time. This is the only officially supported method for compiling binary addons, and does not require that you add anything to your package.json file.

-

If you have to do other things before your package is used, in a way that is not dependent on the operating system or architecture of the target system, then use a prepublish script instead. This includes tasks such as:

- -
  • Compile CoffeeScript source code into JavaScript.
  • Create minified versions of JavaScript source code.
  • Fetching remote resources that your package will use.
- +
    +
  • Compile CoffeeScript source code into JavaScript.
  • +
  • Create minified versions of JavaScript source code.
  • +
  • Fetching remote resources that your package will use.
  • +

The advantage of doing these things at prepublish time instead of preinstall or install time is that they can be done once, in a single place, and thus greatly reduce complexity and variability. Additionally, this means that:

- -
  • You can depend on coffee-script as a devDependency, and thus -your users don't need to have it installed.
  • You don't need to include the minifiers in your package, reducing -the size for your users.
  • You don't need to rely on your users having curl or wget or -other system tools on the target machines.
- -

DEFAULT VALUES

- +
    +
  • You can depend on coffee-script as a devDependency, and thus +your users don't need to have it installed.
  • +
  • You don't need to include the minifiers in your package, reducing +the size for your users.
  • +
  • You don't need to rely on your users having curl or wget or +other system tools on the target machines.
  • +
+

DEFAULT VALUES

npm will default some script values based on package contents.

- -
  • "start": "node server.js":

    If there is a server.js file in the root of your package, then npm -will default the start command to node server.js.

  • "preinstall": "node-waf clean || true; node-waf configure build":

    If there is a wscript file in the root of your package, npm will -default the preinstall command to compile using node-waf.

- -

USER

- +
    +
  • "start": "node server.js":

    +

    If there is a server.js file in the root of your package, then npm +will default the start command to node server.js.

    +
  • +
  • "preinstall": "node-waf clean || true; node-waf configure build":

    +

    If there is a wscript file in the root of your package, npm will +default the preinstall command to compile using node-waf.

    +
  • +
+

USER

If npm was invoked with root privileges, then it will change the uid to the user account or uid specified by the user config, which defaults to nobody. Set the unsafe-perm flag to run scripts with root privileges.

- -

ENVIRONMENT

- +

ENVIRONMENT

Package scripts run in an environment where many pieces of information are made available regarding the setup of npm and the current state of the process.

-

path

-

If you depend on modules that define executable scripts, like test suites, then those executables will be added to the PATH for executing the scripts. So, if your package.json has this:

-
{ "name" : "foo"
 , "dependencies" : { "bar" : "0.1.x" }
-, "scripts": { "start" : "bar ./test" } }
- -

then you could run npm start to execute the bar script, which is +, "scripts": { "start" : "bar ./test" } } +

then you could run npm start to execute the bar script, which is exported into the node_modules/.bin directory on npm install.

-

package.json vars

-

The package.json fields are tacked onto the npm_package_ prefix. So, for instance, if you had {"name":"foo", "version":"1.2.5"} in your package.json file, then your package scripts would have the npm_package_name environment variable set to "foo", and the npm_package_version set to "1.2.5"

-

configuration

-

Configuration parameters are put in the environment with the npm_config_ prefix. For instance, you can view the effective root config by checking the npm_config_root environment variable.

- -

Special: package.json "config" hash

- +

Special: package.json "config" object

The package.json "config" keys are overwritten in the environment if there is a config param of <name>[@<version>]:<key>. For example, if the package.json has this:

-
{ "name" : "foo"
 , "config" : { "port" : "8080" }
-, "scripts" : { "start" : "node server.js" } }
- -

and the server.js is this:

- -
http.createServer(...).listen(process.env.npm_package_config_port)
- -

then the user could change the behavior by doing:

- -
npm config set foo:port 80
- -

current lifecycle event

- +, "scripts" : { "start" : "node server.js" } } +

and the server.js is this:

+
http.createServer(...).listen(process.env.npm_package_config_port)
+

then the user could change the behavior by doing:

+
npm config set foo:port 80
+

current lifecycle event

Lastly, the npm_lifecycle_event environment variable is set to whichever stage of the cycle is being executed. So, you could have a single script used for different parts of the process which switches based on what's currently happening.

-

Objects are flattened following this format, so if you had {"scripts":{"install":"foo.js"}} in your package.json, then you'd see this in the script:

- -
process.env.npm_package_scripts_install === "foo.js"
- -

EXAMPLES

- +
process.env.npm_package_scripts_install === "foo.js"
+

EXAMPLES

For example, if your package.json contains this:

-
{ "scripts" :
   { "install" : "scripts/install.js"
   , "postinstall" : "scripts/install.js"
   , "uninstall" : "scripts/uninstall.js"
   }
-}
- -

then the scripts/install.js will be called for the install, +} +

then the scripts/install.js will be called for the install, post-install, stages of the lifecycle, and the scripts/uninstall.js would be called when the package is uninstalled. Since scripts/install.js is running for three different phases, it would be wise in this case to look at the npm_lifecycle_event environment variable.

-

If you want to run a make command, you can do so. This works just fine:

-
{ "scripts" :
   { "preinstall" : "./configure"
   , "install" : "make && make install"
   , "test" : "make test"
   }
-}
- -

EXITING

- +} +

EXITING

Scripts are run by passing the line as a script argument to sh.

-

If the script exits with a code other than 0, then this will abort the process.

-

Note that these script files don't have to be nodejs or even javascript programs. They just have to be some kind of executable file.

- -

HOOK SCRIPTS

- +

HOOK SCRIPTS

If you want to run a specific script at a specific lifecycle event for ALL packages, then you can use a hook script.

-

Place an executable file at node_modules/.hooks/{eventname}, and it'll get run for all packages when they are going through that point in the package lifecycle for any packages installed in that root.

-

Hook scripts are run exactly the same way as package.json scripts. That is, they are in a separate child process, with the env described above.

- -

BEST PRACTICES

- -
  • Don't exit with a non-zero error code unless you really mean it. +

    BEST PRACTICES

    +
      +
    • Don't exit with a non-zero error code unless you really mean it. Except for uninstall scripts, this will cause the npm action to fail, and potentially be rolled back. If the failure is minor or only will prevent some optional features, then it's better to just -print a warning and exit successfully.
    • Try not to use scripts to do what npm can do for you. Read through -package.json(5) to see all the things that you can specify and enable +print a warning and exit successfully.
    • +
    • Try not to use scripts to do what npm can do for you. Read through +package.json(5) to see all the things that you can specify and enable by simply describing your package appropriately. In general, this -will lead to a more robust and consistent state.
    • Inspect the env to determine where to put things. For instance, if +will lead to a more robust and consistent state.
    • +
    • Inspect the env to determine where to put things. For instance, if the npm_config_binroot environ is set to /home/user/bin, then don't try to install executables into /usr/local/bin. The user -probably set it up that way for a reason.
    • Don't prefix your script commands with "sudo". If root permissions +probably set it up that way for a reason.
    • +
    • Don't prefix your script commands with "sudo". If root permissions are required for some reason, then it'll fail with that error, and -the user will sudo the npm command in question.
    - -

    SEE ALSO

    +the user will sudo the npm command in question.
  • +
+

SEE ALSO

+ -
@@ -236,5 +220,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/removing-npm.html nodejs-0.11.15/deps/npm/html/doc/misc/removing-npm.html --- nodejs-0.11.13/deps/npm/html/doc/misc/removing-npm.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/removing-npm.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,56 +10,42 @@

npm-removal

Cleaning the Slate

- -

SYNOPSIS

- +

SYNOPSIS

So sad to see you go.

- -
sudo npm uninstall npm -g
- -

Or, if that fails, get the npm source code, and do:

- -
sudo make uninstall
- -

More Severe Uninstalling

- +
sudo npm uninstall npm -g
+

Or, if that fails, get the npm source code, and do:

+
sudo make uninstall
+

More Severe Uninstalling

Usually, the above instructions are sufficient. That will remove npm, but leave behind anything you've installed.

-

If that doesn't work, or if you require more drastic measures, continue reading.

-

Note that this is only necessary for globally-installed packages. Local installs are completely contained within a project's node_modules folder. Delete that folder, and everything is gone (unless a package's install script is particularly ill-behaved).

-

This assumes that you installed node and npm in the default place. If you configured node with a different --prefix, or installed npm with a different prefix setting, then adjust the paths accordingly, replacing /usr/local with your install prefix.

-

To remove everything npm-related manually:

- -
rm -rf /usr/local/{lib/node{,/.npm,_modules},bin,share/man}/npm*
- -

If you installed things with npm, then your best bet is to uninstall +

rm -rf /usr/local/{lib/node{,/.npm,_modules},bin,share/man}/npm*
+

If you installed things with npm, then your best bet is to uninstall them with npm first, and then install them again once you have a proper install. This can help find any symlinks that are lying around:

- -
ls -laF /usr/local/{lib/node{,/.npm},bin,share/man} | grep npm
- -

Prior to version 0.3, npm used shim files for executables and node +

ls -laF /usr/local/{lib/node{,/.npm},bin,share/man} | grep npm
+

Prior to version 0.3, npm used shim files for executables and node modules. To track those down, you can do the following:

+
find /usr/local/{lib/node,bin} -exec grep -l npm \{\} \; ;
+

(This is also in the README file.)

+

SEE ALSO

+ -
find /usr/local/{lib/node,bin} -exec grep -l npm \{\} \; ;
- -

(This is also in the README file.)

- -

SEE ALSO

- -
@@ -71,5 +57,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/misc/semver.html nodejs-0.11.15/deps/npm/html/doc/misc/semver.html --- nodejs-0.11.13/deps/npm/html/doc/misc/semver.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/misc/semver.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,9 +10,7 @@

semver

The semantic versioner for npm

- -

Usage

- +

Usage

$ npm install semver
 
 semver.valid('1.2.3') // '1.2.3'
@@ -20,10 +18,8 @@
 semver.clean('  =v1.2.3   ') // '1.2.3'
 semver.satisfies('1.2.3', '1.x || >=2.5.0 || 5.0.0 - 7.2.3') // true
 semver.gt('1.2.3', '9.8.7') // false
-semver.lt('1.2.3', '9.8.7') // true
- -

As a command-line utility:

- +semver.lt('1.2.3', '9.8.7') // true +

As a command-line utility:

$ semver -h
 
 Usage: semver <version> [<version> [...]] [-r <range> | -i <inc> | -d <dec>]
@@ -40,85 +36,222 @@
 then exits failure.
 
 Versions are printed in ascending order, so supplying
-multiple versions to the utility will just sort them.
- -

Versions

- -

A "version" is described by the v2.0.0 specification found at +multiple versions to the utility will just sort them. +

Versions

+

A "version" is described by the v2.0.0 specification found at http://semver.org/.

-

A leading "=" or "v" character is stripped off and ignored.

- -

Ranges

- -

The following range styles are supported:

- -
  • 1.2.3 A specific version. When nothing else will do. Note that -build metadata is still ignored, so 1.2.3+build2012 will satisfy -this range.
  • >1.2.3 Greater than a specific version.
  • <1.2.3 Less than a specific version. If there is no prerelease -tag on the version range, then no prerelease version will be allowed -either, even though these are technically "less than".
  • >=1.2.3 Greater than or equal to. Note that prerelease versions -are NOT equal to their "normal" equivalents, so 1.2.3-beta will -not satisfy this range, but 2.3.0-beta will.
  • <=1.2.3 Less than or equal to. In this case, prerelease versions -ARE allowed, so 1.2.3-beta would satisfy.
  • 1.2.3 - 2.3.4 := >=1.2.3 <=2.3.4
  • ~1.2.3 := >=1.2.3-0 <1.3.0-0 "Reasonably close to 1.2.3". When -using tilde operators, prerelease versions are supported as well, -but a prerelease of the next significant digit will NOT be -satisfactory, so 1.3.0-beta will not satisfy ~1.2.3.
  • ^1.2.3 := >=1.2.3-0 <2.0.0-0 "Compatible with 1.2.3". When -using caret operators, anything from the specified version (including -prerelease) will be supported up to, but not including, the next -major version (or its prereleases). 1.5.1 will satisfy ^1.2.3, -while 1.2.2 and 2.0.0-beta will not.
  • ^0.1.3 := >=0.1.3-0 <0.2.0-0 "Compatible with 0.1.3". 0.x.x versions are -special: the first non-zero component indicates potentially breaking changes, -meaning the caret operator matches any version with the same first non-zero -component starting at the specified version.
  • ^0.0.2 := =0.0.2 "Only the version 0.0.2 is considered compatible"
  • ~1.2 := >=1.2.0-0 <1.3.0-0 "Any version starting with 1.2"
  • ^1.2 := >=1.2.0-0 <2.0.0-0 "Any version compatible with 1.2"
  • 1.2.x := >=1.2.0-0 <1.3.0-0 "Any version starting with 1.2"
  • ~1 := >=1.0.0-0 <2.0.0-0 "Any version starting with 1"
  • ^1 := >=1.0.0-0 <2.0.0-0 "Any version compatible with 1"
  • 1.x := >=1.0.0-0 <2.0.0-0 "Any version starting with 1"
- -

Ranges can be joined with either a space (which implies "and") or a -|| (which implies "or").

- -

Functions

- +

Ranges

+

A version range is a set of comparators which specify versions +that satisfy the range.

+

A comparator is composed of an operator and a version. The set +of primitive operators is:

+
    +
  • < Less than
  • +
  • <= Less than or equal to
  • +
  • > Greater than
  • +
  • >= Greater than or equal to
  • +
  • = Equal. If no operator is specified, then equality is assumed, +so this operator is optional, but MAY be included.
  • +
+

For example, the comparator >=1.2.7 would match the versions +1.2.7, 1.2.8, 2.5.3, and 1.3.9, but not the versions 1.2.6 +or 1.1.0.

+

Comparators can be joined by whitespace to form a comparator set, +which is satisfied by the intersection of all of the comparators +it includes.

+

A range is composed of one or more comparator sets, joined by ||. A +version matches a range if and only if every comparator in at least +one of the ||-separated comparator sets is satisfied by the version.

+

For example, the range >=1.2.7 <1.3.0 would match the versions +1.2.7, 1.2.8, and 1.2.99, but not the versions 1.2.6, 1.3.0, +or 1.1.0.

+

The range 1.2.7 || >=1.2.9 <2.0.0 would match the versions 1.2.7, +1.2.9, and 1.4.6, but not the versions 1.2.8 or 2.0.0.

+

Prerelease Tags

+

If a version has a prerelease tag (for example, 1.2.3-alpha.3) then +it will only be allowed to satisfy comparator sets if at least one +comparator with the same [major, minor, patch] tuple also has a +prerelease tag.

+

For example, the range >1.2.3-alpha.3 would be allowed to match the +version 1.2.3-alpha.7, but it would not be satisfied by +3.4.5-alpha.9, even though 3.4.5-alpha.9 is technically "greater +than" 1.2.3-alpha.3 according to the SemVer sort rules. The version +range only accepts prerelease tags on the 1.2.3 version. The +version 3.4.5 would satisfy the range, because it does not have a +prerelease flag, and 3.4.5 is greater than 1.2.3-alpha.7.

+

The purpose for this behavior is twofold. First, prerelease versions +frequently are updated very quickly, and contain many breaking changes +that are (by the author's design) not yet fit for public consumption. +Therefore, by default, they are excluded from range matching +semantics.

+

Second, a user who has opted into using a prerelease version has +clearly indicated the intent to use that specific set of +alpha/beta/rc versions. By including a prerelease tag in the range, +the user is indicating that they are aware of the risk. However, it +is still not appropriate to assume that they have opted into taking a +similar risk on the next set of prerelease versions.

+

Advanced Range Syntax

+

Advanced range syntax desugars to primitive comparators in +deterministic ways.

+

Advanced ranges may be combined in the same way as primitive +comparators using white space or ||.

+

Hyphen Ranges X.Y.Z - A.B.C

+

Specifies an inclusive set.

+
    +
  • 1.2.3 - 2.3.4 := >=1.2.3 <=2.3.4
  • +
+

If a partial version is provided as the first version in the inclusive +range, then the missing pieces are replaced with zeroes.

+
    +
  • 1.2 - 2.3.4 := >=1.2.0 <=2.3.4
  • +
+

If a partial version is provided as the second version in the +inclusive range, then all versions that start with the supplied parts +of the tuple are accepted, but nothing that would be greater than the +provided tuple parts.

+
    +
  • 1.2.3 - 2.3 := >=1.2.3 <2.4.0
  • +
  • 1.2.3 - 2 := >=1.2.3 <3.0.0
  • +
+

X-Ranges 1.2.x 1.X 1.2.* *

+

Any of X, x, or * may be used to "stand in" for one of the +numeric values in the [major, minor, patch] tuple.

+
    +
  • * := >=0.0.0 (Any version satisfies)
  • +
  • 1.x := >=1.0.0 <2.0.0 (Matching major version)
  • +
  • 1.2.x := >=1.2.0 <1.3.0 (Matching major and minor versions)
  • +
+

A partial version range is treated as an X-Range, so the special +character is in fact optional.

+
    +
  • "" (empty string) := * := >=0.0.0
  • +
  • 1 := 1.x.x := >=1.0.0 <2.0.0
  • +
  • 1.2 := 1.2.x := >=1.2.0 <1.3.0
  • +
+

Tilde Ranges ~1.2.3 ~1.2 ~1

+

Allows patch-level changes if a minor version is specified on the +comparator. Allows minor-level changes if not.

+
    +
  • ~1.2.3 := >=1.2.3 <1.(2+1).0 := >=1.2.3 <1.3.0
  • +
  • ~1.2 := >=1.2.0 <1.(2+1).0 := >=1.2.0 <1.3.0 (Same as 1.2.x)
  • +
  • ~1 := >=1.0.0 <(1+1).0.0 := >=1.0.0 <2.0.0 (Same as 1.x)
  • +
  • ~0.2.3 := >=0.2.3 <0.(2+1).0 := >=0.2.3 <0.3.0
  • +
  • ~0.2 := >=0.2.0 <0.(2+1).0 := >=0.2.0 <0.3.0 (Same as 0.2.x)
  • +
  • ~0 := >=0.0.0 <(0+1).0.0 := >=0.0.0 <1.0.0 (Same as 0.x)
  • +
  • ~1.2.3-beta.2 := >=1.2.3-beta.2 <1.3.0 Note that prereleases in +the 1.2.3 version will be allowed, if they are greater than or +equal to beta.2. So, 1.2.3-beta.4 would be allowed, but +1.2.4-beta.2 would not, because it is a prerelease of a +different [major, minor, patch] tuple.
  • +
+

Note: this is the same as the ~> operator in rubygems.

+

Caret Ranges ^1.2.3 ^0.2.5 ^0.0.4

+

Allows changes that do not modify the left-most non-zero digit in the +[major, minor, patch] tuple. In other words, this allows patch and +minor updates for versions 1.0.0 and above, patch updates for +versions 0.X >=0.1.0, and no updates for versions 0.0.X.

+

Many authors treat a 0.x version as if the x were the major +"breaking-change" indicator.

+

Caret ranges are ideal when an author may make breaking changes +between 0.2.4 and 0.3.0 releases, which is a common practice. +However, it presumes that there will not be breaking changes between +0.2.4 and 0.2.5. It allows for changes that are presumed to be +additive (but non-breaking), according to commonly observed practices.

+
    +
  • ^1.2.3 := >=1.2.3 <2.0.0
  • +
  • ^0.2.3 := >=0.2.3 <0.3.0
  • +
  • ^0.0.3 := >=0.0.3 <0.0.4
  • +
  • ^1.2.3-beta.2 := >=1.2.3-beta.2 <2.0.0 Note that prereleases in +the 1.2.3 version will be allowed, if they are greater than or +equal to beta.2. So, 1.2.3-beta.4 would be allowed, but +1.2.4-beta.2 would not, because it is a prerelease of a +different [major, minor, patch] tuple.
  • +
  • ^0.0.3-beta := >=0.0.3-beta <0.0.4 Note that prereleases in the +0.0.3 version only will be allowed, if they are greater than or +equal to beta. So, 0.0.3-pr.2 would be allowed.
  • +
+

When parsing caret ranges, a missing patch value desugars to the +number 0, but will allow flexibility within that value, even if the +major and minor versions are both 0.

+
    +
  • ^1.2.x := >=1.2.0 <2.0.0
  • +
  • ^0.0.x := >=0.0.0 <0.1.0
  • +
  • ^0.0 := >=0.0.0 <0.1.0
  • +
+

A missing minor and patch values will desugar to zero, but also +allow flexibility within those values, even if the major version is +zero.

+
    +
  • ^1.x := >=1.0.0 <2.0.0
  • +
  • ^0.x := >=0.0.0 <1.0.0
  • +
+

Functions

All methods and classes take a final loose boolean argument that, if true, will be more forgiving about not-quite-valid semver strings. The resulting output will always be 100% strict, of course.

-

Strict-mode Comparators and Ranges will be strict about the SemVer strings that they parse.

- -
  • valid(v): Return the parsed version, or null if it's not valid.
  • inc(v, release): Return the version incremented by the release type -(major, minor, patch, or prerelease), or null if it's not valid.
- -

Comparison

- -
  • gt(v1, v2): v1 > v2
  • gte(v1, v2): v1 >= v2
  • lt(v1, v2): v1 < v2
  • lte(v1, v2): v1 <= v2
  • eq(v1, v2): v1 == v2 This is true if they're logically equivalent, +
      +
    • valid(v): Return the parsed version, or null if it's not valid.
    • +
    • inc(v, release): Return the version incremented by the release +type (major, premajor, minor, preminor, patch, +prepatch, or prerelease), or null if it's not valid
        +
      • premajor in one call will bump the version up to the next major +version and down to a prerelease of that major version. +preminor, and prepatch work the same way.
      • +
      • If called from a non-prerelease version, the prerelease will work the +same as prepatch. It increments the patch version, then makes a +prerelease. If the input version is already a prerelease it simply +increments it.
      • +
      +
    • +
    +

    Comparison

    +
      +
    • gt(v1, v2): v1 > v2
    • +
    • gte(v1, v2): v1 >= v2
    • +
    • lt(v1, v2): v1 < v2
    • +
    • lte(v1, v2): v1 <= v2
    • +
    • eq(v1, v2): v1 == v2 This is true if they're logically equivalent, even if they're not the exact same string. You already know how to -compare strings.
    • neq(v1, v2): v1 != v2 The opposite of eq.
    • cmp(v1, comparator, v2): Pass in a comparison string, and it'll call +compare strings.
    • +
    • neq(v1, v2): v1 != v2 The opposite of eq.
    • +
    • cmp(v1, comparator, v2): Pass in a comparison string, and it'll call the corresponding function above. "===" and "!==" do simple string comparison, but are included for completeness. Throws if an -invalid comparison string is provided.
    • compare(v1, v2): Return 0 if v1 == v2, or 1 if v1 is greater, or -1 if -v2 is greater. Sorts in ascending order if passed to Array.sort().
    • rcompare(v1, v2): The reverse of compare. Sorts an array of versions -in descending order when passed to Array.sort().
    - -

    Ranges

    - -
    • validRange(range): Return the valid range or null if it's not valid
    • satisfies(version, range): Return true if the version satisfies the -range.
    • maxSatisfying(versions, range): Return the highest version in the list -that satisfies the range, or null if none of them do.
    • gtr(version, range): Return true if version is greater than all the -versions possible in the range.
    • ltr(version, range): Return true if version is less than all the -versions possible in the range.
    • outside(version, range, hilo): Return true if the version is outside +invalid comparison string is provided.
    • +
    • compare(v1, v2): Return 0 if v1 == v2, or 1 if v1 is greater, or -1 if +v2 is greater. Sorts in ascending order if passed to Array.sort().
    • +
    • rcompare(v1, v2): The reverse of compare. Sorts an array of versions +in descending order when passed to Array.sort().
    • +
    +

    Ranges

    +
      +
    • validRange(range): Return the valid range or null if it's not valid
    • +
    • satisfies(version, range): Return true if the version satisfies the +range.
    • +
    • maxSatisfying(versions, range): Return the highest version in the list +that satisfies the range, or null if none of them do.
    • +
    • gtr(version, range): Return true if version is greater than all the +versions possible in the range.
    • +
    • ltr(version, range): Return true if version is less than all the +versions possible in the range.
    • +
    • outside(version, range, hilo): Return true if the version is outside the bounds of the range in either the high or low direction. The hilo argument must be either the string '>' or '<'. (This is -the function called by gtr and ltr.)
    - +the function called by gtr and ltr.)
  • +

Note that, since ranges may be non-contiguous, a version might not be greater than a range, less than a range, or satisfy a range! For example, the range 1.2 <1.2.9 || >2.0.0 would have a hole from 1.2.9 until 2.0.0, so the version 1.2.10 would not be greater than the -range (because 2.0.1 satisfies, which is higher), nor less than the -range (since 1.2.8 satisfies, which is lower), and it also does not +range (because 2.0.1 satisfies, which is higher), nor less than the +range (since 1.2.8 satisfies, which is lower), and it also does not satisfy the range.

-

If you want to know if a version satisfies or does not satisfy a range, use the satisfies(version, range) function.

+
@@ -130,5 +263,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/doc/README.html nodejs-0.11.15/deps/npm/html/doc/README.html --- nodejs-0.11.13/deps/npm/html/doc/README.html 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/doc/README.html 2015-01-20 21:22:17.000000000 +0000 @@ -10,153 +10,97 @@

npm

node package manager

- -

![Build Status -## SYNOPSIS

- +

Build Status

+

SYNOPSIS

This is just enough info to get you up and running.

-

Much more info available via npm help once it's installed.

- -

IMPORTANT

- +

IMPORTANT

You need node v0.8 or higher to run this program.

-

To install an old and unsupported version of npm that works on node 0.3 and prior, clone the git repo and dig through the old tags and branches.

- -

Super Easy Install

- -

npm comes with node now.

- -

Windows Computers

- -

Get the MSI. npm is in it.

- -

Apple Macintosh Computers

- -

Get the pkg. npm is in it.

- -

Other Sorts of Unices

- +

Super Easy Install

+

npm comes with node now.

+

Windows Computers

+

Get the MSI. npm is in it.

+

Apple Macintosh Computers

+

Get the pkg. npm is in it.

+

Other Sorts of Unices

Run make install. npm will be installed with node.

-

If you want a more fancy pants install (a different version, customized paths, etc.) then read on.

- -

Fancy Install (Unix)

- +

Fancy Install (Unix)

There's a pretty robust install script at https://www.npmjs.org/install.sh. You can download that and run it.

-

Here's an example using curl:

- -
curl -L https://npmjs.org/install.sh | sh
- -

Slightly Fancier

- +
curl -L https://npmjs.org/install.sh | sh
+

Slightly Fancier

You can set any npm configuration params with that script:

- -
npm_config_prefix=/some/path sh install.sh
- -

Or, you can run it in uber-debuggery mode:

- -
npm_debug=1 sh install.sh
- -

Even Fancier

- +
npm_config_prefix=/some/path sh install.sh
+

Or, you can run it in uber-debuggery mode:

+
npm_debug=1 sh install.sh
+

Even Fancier

Get the code with git. Use make to build the docs and do other stuff. If you plan on hacking on npm, make link is your friend.

-

If you've got the npm source code, you can also semi-permanently set arbitrary config keys using the ./configure --key=val ..., and then run npm commands by doing node cli.js <cmd> <args>. (This is helpful for testing, or running stuff without actually installing npm itself.)

- -

Fancy Windows Install

- +

Fancy Windows Install

You can download a zip file from https://npmjs.org/dist/, and unpack it in the same folder where node.exe lives.

-

If that's not fancy enough for you, then you can fetch the code with git, and mess with it directly.

- -

Installing on Cygwin

- +

Installing on Cygwin

No.

- -

Permissions when Using npm to Install Other Stuff

- +

Permissions when Using npm to Install Other Stuff

tl;dr

- -
  • Use sudo for greater safety. Or don't, if you prefer not to.
  • npm will downgrade permissions if it's root before running any build -scripts that package authors specified.
- -

More details...

- +
    +
  • Use sudo for greater safety. Or don't, if you prefer not to.
  • +
  • npm will downgrade permissions if it's root before running any build +scripts that package authors specified.
  • +
+

More details...

As of version 0.3, it is recommended to run npm as root. This allows npm to change the user identifier to the nobody user prior to running any package build or test commands.

-

If you are not the root user, or if you are on a platform that does not support uid switching, then npm will not attempt to change the userid.

-

If you would like to ensure that npm always runs scripts as the "nobody" user, and have it fail if it cannot downgrade permissions, then set the following configuration param:

- -
npm config set unsafe-perm false
- -

This will prevent running in unsafe mode, even as non-root users.

- -

Uninstalling

- +
npm config set unsafe-perm false
+

This will prevent running in unsafe mode, even as non-root users.

+

Uninstalling

So sad to see you go.

- -
sudo npm uninstall npm -g
- -

Or, if that fails,

- -
sudo make uninstall
- -

More Severe Uninstalling

- +
sudo npm uninstall npm -g
+

Or, if that fails,

+
sudo make uninstall
+

More Severe Uninstalling

Usually, the above instructions are sufficient. That will remove npm, but leave behind anything you've installed.

-

If you would like to remove all the packages that you have installed, then you can use the npm ls command to find them, and then npm rm to remove them.

-

To remove cruft left behind by npm 0.x, you can use the included clean-old.sh script file. You can run it conveniently like this:

- -
npm explore npm -g -- sh scripts/clean-old.sh
- -

npm uses two configuration files, one for per-user configs, and another +

npm explore npm -g -- sh scripts/clean-old.sh
+

npm uses two configuration files, one for per-user configs, and another for global (every-user) configs. You can view them by doing:

-
npm config get userconfig   # defaults to ~/.npmrc
-npm config get globalconfig # defaults to /usr/local/etc/npmrc
- -

Uninstalling npm does not remove configuration files by default. You +npm config get globalconfig # defaults to /usr/local/etc/npmrc +

Uninstalling npm does not remove configuration files by default. You must remove them yourself manually if you want them gone. Note that this means that future npm installs will not remember the settings that you have chosen.

- -

Using npm Programmatically

- +

Using npm Programmatically

If you would like to use npm programmatically, you can do that. It's not very well documented, but it is rather simple.

-

Most of the time, unless you actually want to do all the things that npm does, you should try using one of npm's dependencies rather than using npm itself, if possible.

-

Eventually, npm will be just a thin cli wrapper around the modules that it depends on, but for now, there are some things that you must use npm itself to do.

-
var npm = require("npm")
 npm.load(myConfigObject, function (er) {
   if (er) return handlError(er)
@@ -164,91 +108,73 @@
     if (er) return commandFailed(er)
     // command succeeded, and data might have some info
   })
-  npm.on("log", function (message) { .... })
-})
- -

The load function takes an object hash of the command-line configs. + npm.registry.log.on("log", function (message) { .... }) +}) +

The load function takes an object hash of the command-line configs. The various npm.commands.<cmd> functions take an array of positional argument strings. The last argument to any npm.commands.<cmd> function is a callback. Some commands take other optional arguments. Read the source.

-

You cannot set configs individually for any single npm function at this time. Since npm is a singleton, any call to npm.config.set will change the value for all npm commands in that process.

-

See ./bin/npm-cli.js for an example of pulling config values off of the command line arguments using nopt. You may also want to check out npm help config to learn about all the options you can set there.

- -

More Docs

- +

More Docs

Check out the docs, especially the faq.

-

You can use the npm help command to read any of them.

-

If you're a developer, and you want to use npm to publish your program, you should read this

- - - +

"npm" and "The npm Registry" are owned by npm, Inc. All rights reserved. See the included LICENSE file for more details.

-

"Node.js" and "node" are trademarks owned by Joyent, Inc.

-

Modules published on the npm registry are not officially endorsed by npm, Inc. or the Node.js project.

-

Data published to the npm registry is not part of npm itself, and is the sole property of the publisher. While every effort is made to ensure accountability, there is absolutely no guarantee, warrantee, or assertion expressed or implied as to the quality, fitness for a specific purpose, or lack of malice in any given npm package.

-

If you have a complaint about a package in the public npm registry, and cannot resolve it with the package owner, please email -support@npmjs.com and explain the situation.

- +support@npmjs.com and explain the situation.

Any data published to The npm Registry (including user account information) may be removed or modified at the sole discretion of the npm server administrators.

- -

In plainer english

- +

In plainer english

npm is the property of npm, Inc.

-

If you publish something, it's yours, and you are solely accountable for it.

-

If other people publish something, it's theirs.

-

Users can publish Bad Stuff. It will be removed promptly if reported. But there is no vetting process for published modules, and you use them at your own risk. Please inspect the source.

-

If you publish Bad Stuff, we may delete it from the registry, or even ban your account in extreme cases. So don't do that.

- -

BUGS

- +

BUGS

When you find issues, please report them:

- - - +

Be sure to include all of the output from the npm command that didn't work as expected. The npm-debug.log file is also helpful to provide.

-

You can also look for isaacs in #node.js on irc://irc.freenode.net. He will no doubt tell you to put the output in a gist or email.

+

SEE ALSO

+ -

SEE ALSO

- -
@@ -260,5 +186,5 @@ - + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-bin.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-bin.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-bin.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-bin.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +

npm-bin

Display npm bin folder

+

SYNOPSIS

+
npm.commands.bin(args, cb)
+

DESCRIPTION

+

Print the folder where npm will install executables.

+

This function should not be used programmatically. Instead, just refer +to the npm.bin property.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-bugs.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-bugs.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-bugs.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-bugs.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +

npm-bugs

Bugs for a package in a web browser maybe

+

SYNOPSIS

+
npm.commands.bugs(package, callback)
+

DESCRIPTION

+

This command tries to guess at the likely location of a package's +bug tracker URL, and then tries to open it using the --browser +config param.

+

Like other commands, the first parameter is an array. This command only +uses the first element, which is expected to be a package name with an +optional version number.

+

This command will launch a browser, so this command may not be the most +friendly for programmatic use.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-cache.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-cache.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-cache.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-cache.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +

npm-cache

manage the npm cache programmatically

+

SYNOPSIS

+
npm.commands.cache([args], callback)
+
+// helpers
+npm.commands.cache.clean([args], callback)
+npm.commands.cache.add([args], callback)
+npm.commands.cache.read(name, version, forceBypass, callback)
+

DESCRIPTION

+

This acts much the same ways as the npm-cache(1) command line +functionality.

+

The callback is called with the package.json data of the thing that is +eventually added to or read from the cache.

+

The top level npm.commands.cache(...) functionality is a public +interface, and like all commands on the npm.commands object, it will +match the command line behavior exactly.

+

However, the cache folder structure and the cache helper functions are +considered internal API surface, and as such, may change in future +releases of npm, potentially without warning or significant version +incrementation.

+

Use at your own risk.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-commands.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-commands.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-commands.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-commands.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ +

npm-commands

npm commands

+

SYNOPSIS

+
npm.commands[<command>](args, callback)
+

DESCRIPTION

+

npm comes with a full set of commands, and each of the commands takes a +similar set of arguments.

+

In general, all commands on the command object take an array of positional +argument strings. The last argument to any function is a callback. Some +commands are special and take other optional arguments.

+

All commands have their own man page. See man npm-<command> for command-line +usage, or man 3 npm-<command> for programmatic usage.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-config.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-config.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-config.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-config.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,37 @@ +

npm-config

Manage the npm configuration files

+

SYNOPSIS

+
npm.commands.config(args, callback)
+var val = npm.config.get(key)
+npm.config.set(key, val)
+

DESCRIPTION

+

This function acts much the same way as the command-line version. The first +element in the array tells config what to do. Possible values are:

+
    +
  • set

    +

    Sets a config parameter. The second element in args is interpreted as the + key, and the third element is interpreted as the value.

    +
  • +
  • get

    +

    Gets the value of a config parameter. The second element in args is the + key to get the value of.

    +
  • +
  • delete (rm or del)

    +

    Deletes a parameter from the config. The second element in args is the + key to delete.

    +
  • +
  • list (ls)

    +

    Show all configs that aren't secret. No parameters necessary.

    +
  • +
  • edit:

    +

    Opens the config file in the default editor. This command isn't very useful + programmatically, but it is made available.

    +
  • +
+

To programmatically access npm configuration settings, or set them for +the duration of a program, use the npm.config.set and npm.config.get +functions instead.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-deprecate.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-deprecate.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-deprecate.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-deprecate.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ +

npm-deprecate

Deprecate a version of a package

+

SYNOPSIS

+
npm.commands.deprecate(args, callback)
+

DESCRIPTION

+

This command will update the npm registry entry for a package, providing +a deprecation warning to all who attempt to install it.

+

The 'args' parameter must have exactly two elements:

+
    +
  • package[@version]

    +

    The version portion is optional, and may be either a range, or a + specific version, or a tag.

    +
  • +
  • message

    +

    The warning message that will be printed whenever a user attempts to + install the package.

    +
  • +
+

Note that you must be the package owner to deprecate something. See the +owner and adduser help topics.

+

To un-deprecate a package, specify an empty string ("") for the message argument.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-docs.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-docs.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-docs.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-docs.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +

npm-docs

Docs for a package in a web browser maybe

+

SYNOPSIS

+
npm.commands.docs(package, callback)
+

DESCRIPTION

+

This command tries to guess at the likely location of a package's +documentation URL, and then tries to open it using the --browser +config param.

+

Like other commands, the first parameter is an array. This command only +uses the first element, which is expected to be a package name with an +optional version number.

+

This command will launch a browser, so this command may not be the most +friendly for programmatic use.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-edit.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-edit.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-edit.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-edit.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ +

npm-edit

Edit an installed package

+

SYNOPSIS

+
npm.commands.edit(package, callback)
+

DESCRIPTION

+

Opens the package folder in the default editor (or whatever you've +configured as the npm editor config -- see npm help config.)

+

After it has been edited, the package is rebuilt so as to pick up any +changes in compiled packages.

+

For instance, you can do npm install connect to install connect +into your package, and then npm.commands.edit(["connect"], callback) +to make a few changes to your locally installed copy.

+

The first parameter is a string array with a single element, the package +to open. The package can optionally have a version number attached.

+

Since this command opens an editor in a new process, be careful about where +and how this is used.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-explore.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-explore.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-explore.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-explore.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,11 @@ +

npm-explore

Browse an installed package

+

SYNOPSIS

+
npm.commands.explore(args, callback)
+

DESCRIPTION

+

Spawn a subshell in the directory of the installed package specified.

+

If a command is specified, then it is run in the subshell, which then +immediately terminates.

+

Note that the package is not automatically rebuilt afterwards, so be +sure to use npm rebuild <pkg> if you make any changes.

+

The first element in the 'args' parameter must be a package name. After that is the optional command, which can be any number of strings. All of the strings will be combined into one, space-delimited command.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-help-search.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-help-search.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-help-search.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-help-search.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,24 @@ +

npm-help-search

Search the help pages

+

SYNOPSIS

+
npm.commands.helpSearch(args, [silent,] callback)
+

DESCRIPTION

+

This command is rarely useful, but it exists in the rare case that it is.

+

This command takes an array of search terms and returns the help pages that +match in order of best match.

+

If there is only one match, then npm displays that help section. If there +are multiple results, the results are printed to the screen formatted and the +array of results is returned. Each result is an object with these properties:

+
    +
  • hits: +A map of args to number of hits on that arg. For example, {"npm": 3}
  • +
  • found: +Total number of unique args that matched.
  • +
  • totalHits: +Total number of hits.
  • +
  • lines: +An array of all matching lines (and some adjacent lines).
  • +
  • file: +Name of the file that matched
  • +
+

The silent parameter is not necessary not used, but it may in the future.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,89 @@ +

npm

node package manager

+

SYNOPSIS

+
var npm = require("npm")
+npm.load([configObject, ]function (er, npm) {
+  // use the npm object, now that it's loaded.
+
+  npm.config.set(key, val)
+  val = npm.config.get(key)
+
+  console.log("prefix = %s", npm.prefix)
+
+  npm.commands.install(["package"], cb)
+})
+

VERSION

+

2.1.6

+

DESCRIPTION

+

This is the API documentation for npm. +To find documentation of the command line +client, see npm(1).

+

Prior to using npm's commands, npm.load() must be called. If you provide +configObject as an object map of top-level configs, they override the values +stored in the various config locations. In the npm command line client, this +set of configs is parsed from the command line options. Additional +configuration params are loaded from two configuration files. See +npm-config(1), npm-config(7), and npmrc(5) for more information.

+

After that, each of the functions are accessible in the +commands object: npm.commands.<cmd>. See npm-index(7) for a list of +all possible commands.

+

All commands on the command object take an array of positional argument +strings. The last argument to any function is a callback. Some +commands take other optional arguments.

+

Configs cannot currently be set on a per function basis, as each call to +npm.config.set will change the value for all npm commands in that process.

+

To find API documentation for a specific command, run the npm apihelp +command.

+

METHODS AND PROPERTIES

+
    +
  • npm.load(configs, cb)

    +

    Load the configuration params, and call the cb function once the + globalconfig and userconfig files have been loaded as well, or on + nextTick if they've already been loaded.

    +
  • +
  • npm.config

    +

    An object for accessing npm configuration parameters.

    +
      +
    • npm.config.get(key)
    • +
    • npm.config.set(key, val)
    • +
    • npm.config.del(key)
    • +
    +
  • +
  • npm.dir or npm.root

    +

    The node_modules directory where npm will operate.

    +
  • +
  • npm.prefix

    +

    The prefix where npm is operating. (Most often the current working + directory.)

    +
  • +
  • npm.cache

    +

    The place where npm keeps JSON and tarballs it fetches from the + registry (or uploads to the registry).

    +
  • +
  • npm.tmp

    +

    npm's temporary working directory.

    +
  • +
  • npm.deref

    +

    Get the "real" name for a command that has either an alias or + abbreviation.

    +
  • +
+

MAGIC

+

For each of the methods in the npm.commands object, a method is added to the +npm object, which takes a set of positional string arguments rather than an +array and a callback.

+

If the last argument is a callback, then it will use the supplied +callback. However, if no callback is provided, then it will print out +the error or results.

+

For example, this would work in a node repl:

+
> npm = require("npm")
+> npm.load()  // wait a sec...
+> npm.install("dnode", "express")
+

Note that that won't work in a node program, since the install +method will get called before the configuration load is completed.

+

ABBREVS

+

In order to support npm ins foo instead of npm install foo, the +npm.commands object has a set of abbreviations as well as the full +method names. Use the npm.deref method to find the real name.

+

For example:

+
var cmd = npm.deref("unp") // cmd === "unpublish"
+
diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-init.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-init.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-init.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-init.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,19 @@ +

npm init

Interactively create a package.json file

+

SYNOPSIS

+
npm.commands.init(args, callback)
+

DESCRIPTION

+

This will ask you a bunch of questions, and then write a package.json for you.

+

It attempts to make reasonable guesses about what you want things to be set to, +and then writes a package.json file with the options you've selected.

+

If you already have a package.json file, it'll read that first, and default to +the options in there.

+

It is strictly additive, so it does not delete options from your package.json +without a really good reason to do so.

+

Since this function expects to be run on the command-line, it doesn't work very +well as a programmatically. The best option is to roll your own, and since +JavaScript makes it stupid simple to output formatted JSON, that is the +preferred method. If you're sure you want to handle command-line prompting, +then go ahead and use this programmatically.

+

SEE ALSO

+

package.json(5)

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-install.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-install.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-install.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-install.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,12 @@ +

npm-install

install a package programmatically

+

SYNOPSIS

+
npm.commands.install([where,] packages, callback)
+

DESCRIPTION

+

This acts much the same ways as installing on the command-line.

+

The 'where' parameter is optional and only used internally, and it specifies +where the packages should be installed to.

+

The 'packages' parameter is an array of strings. Each element in the array is +the name of a package to be installed.

+

Finally, 'callback' is a function that will be called when all packages have been +installed or when an error has been encountered.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-link.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-link.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-link.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-link.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +

npm-link

Symlink a package folder

+

SYNOPSIS

+
npm.commands.link(callback)
+npm.commands.link(packages, callback)
+

DESCRIPTION

+

Package linking is a two-step process.

+

Without parameters, link will create a globally-installed +symbolic link from prefix/package-name to the current folder.

+

With a parameters, link will create a symlink from the local node_modules +folder to the global symlink.

+

When creating tarballs for npm publish, the linked packages are +"snapshotted" to their current state by resolving the symbolic links.

+

This is +handy for installing your own stuff, so that you can work on it and test it +iteratively without having to continually rebuild.

+

For example:

+
npm.commands.link(cb)           # creates global link from the cwd
+                                # (say redis package)
+npm.commands.link('redis', cb)  # link-install the package
+

Now, any changes to the redis package will be reflected in +the package in the current working directory

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-load.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-load.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-load.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-load.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,17 @@ +

npm-load

Load config settings

+

SYNOPSIS

+
npm.load(conf, cb)
+

DESCRIPTION

+

npm.load() must be called before any other function call. Both parameters are +optional, but the second is recommended.

+

The first parameter is an object containing command-line config params, and the +second parameter is a callback that will be called when npm is loaded and ready +to serve.

+

The first parameter should follow a similar structure as the package.json +config object.

+

For example, to emulate the --dev flag, pass an object that looks like this:

+
{
+  "dev": true
+}
+

For a list of all the available command-line configs, see npm help config

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-ls.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-ls.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-ls.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-ls.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,43 @@ +

npm-ls

List installed packages

+

SYNOPSIS

+
npm.commands.ls(args, [silent,] callback)
+

DESCRIPTION

+

This command will print to stdout all the versions of packages that are +installed, as well as their dependencies, in a tree-structure. It will also +return that data using the callback.

+

This command does not take any arguments, but args must be defined. +Beyond that, if any arguments are passed in, npm will politely warn that it +does not take positional arguments, though you may set config flags +like with any other command, such as global to list global packages.

+

It will print out extraneous, missing, and invalid packages.

+

If the silent parameter is set to true, nothing will be output to the screen, +but the data will still be returned.

+

Callback is provided an error if one occurred, the full data about which +packages are installed and which dependencies they will receive, and a +"lite" data object which just shows which versions are installed where. +Note that the full data object is a circular structure, so care must be +taken if it is serialized to JSON.

+

CONFIGURATION

+

long

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Show extended information.

+

parseable

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Show parseable output instead of tree view.

+

global

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

List packages in the global install prefix instead of in the current +project.

+

Note, if parseable is set or long isn't set, then duplicates will be trimmed. +This means that if a submodule a same dependency as a parent module, then the +dependency will only be output once.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-outdated.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-outdated.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-outdated.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-outdated.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +

npm-outdated

Check for outdated packages

+

SYNOPSIS

+
npm.commands.outdated([packages,] callback)
+

DESCRIPTION

+

This command will check the registry to see if the specified packages are +currently outdated.

+

If the 'packages' parameter is left out, npm will check all packages.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-owner.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-owner.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-owner.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-owner.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ +

npm-owner

Manage package owners

+

SYNOPSIS

+
npm.commands.owner(args, callback)
+

DESCRIPTION

+

The first element of the 'args' parameter defines what to do, and the subsequent +elements depend on the action. Possible values for the action are (order of +parameters are given in parenthesis):

+
    +
  • ls (package): +List all the users who have access to modify a package and push new versions. +Handy when you need to know who to bug for help.
  • +
  • add (user, package): +Add a new user as a maintainer of a package. This user is enabled to modify +metadata, publish new versions, and add other owners.
  • +
  • rm (user, package): +Remove a user from the package owner list. This immediately revokes their +privileges.
  • +
+

Note that there is only one level of access. Either you can modify a package, +or you can't. Future versions may contain more fine-grained access levels, but +that is not implemented at this time.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-pack.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-pack.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-pack.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-pack.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +

npm-pack

Create a tarball from a package

+

SYNOPSIS

+
npm.commands.pack([packages,] callback)
+

DESCRIPTION

+

For anything that's installable (that is, a package folder, tarball, +tarball url, name@tag, name@version, or name), this command will fetch +it to the cache, and then copy the tarball to the current working +directory as <name>-<version>.tgz, and then write the filenames out to +stdout.

+

If the same package is specified multiple times, then the file will be +overwritten the second time.

+

If no arguments are supplied, then npm packs the current package folder.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-prefix.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-prefix.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-prefix.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-prefix.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,9 @@ +

npm-prefix

Display prefix

+

SYNOPSIS

+
npm.commands.prefix(args, callback)
+

DESCRIPTION

+

Print the prefix to standard out.

+

'args' is never used and callback is never called with data. +'args' must be present or things will break.

+

This function is not useful programmatically

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-prune.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-prune.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-prune.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-prune.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +

npm-prune

Remove extraneous packages

+

SYNOPSIS

+
npm.commands.prune([packages,] callback)
+

DESCRIPTION

+

This command removes "extraneous" packages.

+

The first parameter is optional, and it specifies packages to be removed.

+

No packages are specified, then all packages will be checked.

+

Extraneous packages are packages that are not listed on the parent +package's dependencies list.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-publish.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-publish.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-publish.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-publish.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,26 @@ +

npm-publish

Publish a package

+

SYNOPSIS

+
npm.commands.publish([packages,] callback)
+

DESCRIPTION

+

Publishes a package to the registry so that it can be installed by name. +Possible values in the 'packages' array are:

+
    +
  • <folder>: +A folder containing a package.json file

    +
  • +
  • <tarball>: +A url or file path to a gzipped tar archive containing a single folder +with a package.json file inside.

    +
  • +
+

If the package array is empty, npm will try to publish something in the +current working directory.

+

This command could fails if one of the packages specified already exists in +the registry. Overwrites when the "force" environment variable is set.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-rebuild.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-rebuild.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-rebuild.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-rebuild.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +

npm-rebuild

Rebuild a package

+

SYNOPSIS

+
npm.commands.rebuild([packages,] callback)
+

DESCRIPTION

+

This command runs the npm build command on each of the matched packages. This is useful +when you install a new version of node, and must recompile all your C++ addons with +the new binary. If no 'packages' parameter is specify, every package will be rebuilt.

+

CONFIGURATION

+

See npm help build

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-repo.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-repo.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-repo.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-repo.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +

npm-repo

Open package repository page in the browser

+

SYNOPSIS

+
npm.commands.repo(package, callback)
+

DESCRIPTION

+

This command tries to guess at the likely location of a package's +repository URL, and then tries to open it using the --browser +config param.

+

Like other commands, the first parameter is an array. This command only +uses the first element, which is expected to be a package name with an +optional version number.

+

This command will launch a browser, so this command may not be the most +friendly for programmatic use.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-restart.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-restart.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-restart.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-restart.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ +

npm-restart

Start a package

+

SYNOPSIS

+
npm.commands.restart(packages, callback)
+

DESCRIPTION

+

This runs a package's "restart" script, if one was provided. +Otherwise it runs package's "stop" script, if one was provided, and then +the "start" script.

+

If no version is specified, then it restarts the "active" version.

+

npm can run tests on multiple packages. Just specify multiple packages +in the packages parameter.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-root.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-root.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-root.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-root.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,9 @@ +

npm-root

Display npm root

+

SYNOPSIS

+
npm.commands.root(args, callback)
+

DESCRIPTION

+

Print the effective node_modules folder to standard out.

+

'args' is never used and callback is never called with data. +'args' must be present or things will break.

+

This function is not useful programmatically.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-run-script.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-run-script.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-run-script.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-run-script.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +

npm-run-script

Run arbitrary package scripts

+

SYNOPSIS

+
npm.commands.run-script(args, callback)
+

DESCRIPTION

+

This runs an arbitrary command from a package's "scripts" object.

+

It is used by the test, start, restart, and stop commands, but can be +called directly, as well.

+

The 'args' parameter is an array of strings. Behavior depends on the number +of elements. If there is only one element, npm assumes that the element +represents a command to be run on the local repository. If there is more than +one element, then the first is assumed to be the package and the second is +assumed to be the command to run. All other elements are ignored.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-search.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-search.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-search.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-search.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +

npm-search

Search for packages

+

SYNOPSIS

+
npm.commands.search(searchTerms, [silent,] [staleness,] callback)
+

DESCRIPTION

+

Search the registry for packages matching the search terms. The available parameters are:

+
    +
  • searchTerms: +Array of search terms. These terms are case-insensitive.
  • +
  • silent: +If true, npm will not log anything to the console.
  • +
  • staleness: +This is the threshold for stale packages. "Fresh" packages are not refreshed +from the registry. This value is measured in seconds.
  • +
  • callback: +Returns an object where each key is the name of a package, and the value +is information about that package along with a 'words' property, which is +a space-delimited string of all of the interesting words in that package. +The only properties included are those that are searched, which generally include:

    +
      +
    • name
    • +
    • description
    • +
    • maintainers
    • +
    • url
    • +
    • keywords
    • +
    +
  • +
+

A search on the registry excludes any result that does not match all of the +search terms. It also removes any items from the results that contain an +excluded term (the "searchexclude" config). The search is case insensitive +and doesn't try to read your mind (it doesn't do any verb tense matching or the +like).

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-shrinkwrap.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-shrinkwrap.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-shrinkwrap.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-shrinkwrap.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +

npm-shrinkwrap

programmatically generate package shrinkwrap file

+

SYNOPSIS

+
npm.commands.shrinkwrap(args, [silent,] callback)
+

DESCRIPTION

+

This acts much the same ways as shrinkwrapping on the command-line.

+

This command does not take any arguments, but 'args' must be defined. +Beyond that, if any arguments are passed in, npm will politely warn that it +does not take positional arguments.

+

If the 'silent' parameter is set to true, nothing will be output to the screen, +but the shrinkwrap file will still be written.

+

Finally, 'callback' is a function that will be called when the shrinkwrap has +been saved.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-start.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-start.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-start.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-start.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +

npm-start

Start a package

+

SYNOPSIS

+
npm.commands.start(packages, callback)
+

DESCRIPTION

+

This runs a package's "start" script, if one was provided.

+

npm can run tests on multiple packages. Just specify multiple packages +in the packages parameter.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-stop.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-stop.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-stop.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-stop.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +

npm-stop

Stop a package

+

SYNOPSIS

+
npm.commands.stop(packages, callback)
+

DESCRIPTION

+

This runs a package's "stop" script, if one was provided.

+

npm can run stop on multiple packages. Just specify multiple packages +in the packages parameter.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-submodule.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-submodule.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-submodule.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-submodule.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +

npm-submodule

Add a package as a git submodule

+

SYNOPSIS

+
npm.commands.submodule(packages, callback)
+

DESCRIPTION

+

For each package specified, npm will check if it has a git repository url +in its package.json description then add it as a git submodule at +node_modules/<pkg name>.

+

This is a convenience only. From then on, it's up to you to manage +updates by using the appropriate git commands. npm will stubbornly +refuse to update, modify, or remove anything with a .git subfolder +in it.

+

This command also does not install missing dependencies, if the package +does not include them in its git repository. If npm ls reports that +things are missing, you can either install, link, or submodule them yourself, +or you can do npm explore <pkgname> -- npm install to install the +dependencies into the submodule folder.

+

SEE ALSO

+
    +
  • npm help json
  • +
  • git help submodule
  • +
+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-tag.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-tag.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-tag.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-tag.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ +

npm-tag

Tag a published version

+

SYNOPSIS

+
npm.commands.tag(package@version, tag, callback)
+

DESCRIPTION

+

Tags the specified version of the package with the specified tag, or the +--tag config if not specified.

+

The 'package@version' is an array of strings, but only the first two elements are +currently used.

+

The first element must be in the form package@version, where package +is the package name and version is the version number (much like installing a +specific version).

+

The second element is the name of the tag to tag this version with. If this +parameter is missing or falsey (empty), the default froom the config will be +used. For more information about how to set this config, check +man 3 npm-config for programmatic usage or man npm-config for cli usage.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-test.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-test.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-test.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-test.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +

npm-test

Test a package

+

SYNOPSIS

+
  npm.commands.test(packages, callback)
+

DESCRIPTION

+

This runs a package's "test" script, if one was provided.

+

To run tests as a condition of installation, set the npat config to +true.

+

npm can run tests on multiple packages. Just specify multiple packages +in the packages parameter.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-uninstall.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-uninstall.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-uninstall.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-uninstall.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +

npm-uninstall

uninstall a package programmatically

+

SYNOPSIS

+
npm.commands.uninstall(packages, callback)
+

DESCRIPTION

+

This acts much the same ways as uninstalling on the command-line.

+

The 'packages' parameter is an array of strings. Each element in the array is +the name of a package to be uninstalled.

+

Finally, 'callback' is a function that will be called when all packages have been +uninstalled or when an error has been encountered.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-unpublish.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-unpublish.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-unpublish.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-unpublish.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +

npm-unpublish

Remove a package from the registry

+

SYNOPSIS

+
npm.commands.unpublish(package, callback)
+

DESCRIPTION

+

This removes a package version from the registry, deleting its +entry and removing the tarball.

+

The package parameter must be defined.

+

Only the first element in the package parameter is used. If there is no first +element, then npm assumes that the package at the current working directory +is what is meant.

+

If no version is specified, or if all versions are removed then +the root package entry is removed from the registry entirely.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-update.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-update.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-update.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-update.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,7 @@ +

npm-update

Update a package

+

SYNOPSIS

+
npm.commands.update(packages, callback)
+

DESCRIPTION

+

Updates a package, upgrading it to the latest version. It also installs any missing packages.

+

The 'packages' argument is an array of packages to update. The 'callback' parameter will be called when done or when an error occurs.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-version.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-version.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-version.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-version.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,12 @@ +

npm-version

Bump a package version

+

SYNOPSIS

+
npm.commands.version(newversion, callback)
+

DESCRIPTION

+

Run this in a package directory to bump the version and write the new +data back to the package.json file.

+

If run in a git repo, it will also create a version commit and tag, and +fail if the repo is not clean.

+

Like all other commands, this function takes a string array as its first +parameter. The difference, however, is this function will fail if it does +not have exactly one element. The only element should be a version number.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-view.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-view.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-view.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-view.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,61 @@ +

npm-view

View registry info

+

SYNOPSIS

+
npm.commands.view(args, [silent,] callback)
+

DESCRIPTION

+

This command shows data about a package and prints it to the stream +referenced by the outfd config, which defaults to stdout.

+

The "args" parameter is an ordered list that closely resembles the command-line +usage. The elements should be ordered such that the first element is +the package and version (package@version). The version is optional. After that, +the rest of the parameters are fields with optional subfields ("field.subfield") +which can be used to get only the information desired from the registry.

+

The callback will be passed all of the data returned by the query.

+

For example, to get the package registry entry for the connect package, +you can do this:

+
npm.commands.view(["connect"], callback)
+

If no version is specified, "latest" is assumed.

+

Field names can be specified after the package descriptor. +For example, to show the dependencies of the ronn package at version +0.3.5, you could do the following:

+
npm.commands.view(["ronn@0.3.5", "dependencies"], callback)
+

You can view child field by separating them with a period. +To view the git repository URL for the latest version of npm, you could +do this:

+
npm.commands.view(["npm", "repository.url"], callback)
+

For fields that are arrays, requesting a non-numeric field will return +all of the values from the objects in the list. For example, to get all +the contributor names for the "express" project, you can do this:

+
npm.commands.view(["express", "contributors.email"], callback)
+

You may also use numeric indices in square braces to specifically select +an item in an array field. To just get the email address of the first +contributor in the list, you can do this:

+
npm.commands.view(["express", "contributors[0].email"], callback)
+

Multiple fields may be specified, and will be printed one after another. +For exampls, to get all the contributor names and email addresses, you +can do this:

+
npm.commands.view(["express", "contributors.name", "contributors.email"], callback)
+

"Person" fields are shown as a string if they would be shown as an +object. So, for example, this will show the list of npm contributors in +the shortened string format. (See npm help json for more on this.)

+
npm.commands.view(["npm", "contributors"], callback)
+

If a version range is provided, then data will be printed for every +matching version of the package. This will show which version of jsdom +was required by each matching version of yui3:

+
npm.commands.view(["yui3@'>0.5.4'", "dependencies.jsdom"], callback)
+

OUTPUT

+

If only a single string field for a single version is output, then it +will not be colorized or quoted, so as to enable piping the output to +another command.

+

If the version range matches multiple versions, than each printed value +will be prefixed with the version it applies to.

+

If multiple fields are requested, than each of them are prefixed with +the field name.

+

Console output can be disabled by setting the 'silent' parameter to true.

+

RETURN VALUE

+

The data returned will be an object in this formation:

+
{ <version>:
+  { <field>: <value>
+  , ... }
+, ... }
+

corresponding to the list of fields selected.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-whoami.html nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-whoami.html --- nodejs-0.11.13/deps/npm/html/partial/doc/api/npm-whoami.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/api/npm-whoami.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,9 @@ +

npm-whoami

Display npm username

+

SYNOPSIS

+
npm.commands.whoami(args, callback)
+

DESCRIPTION

+

Print the username config to standard output.

+

'args' is never used and callback is never called with data. +'args' must be present or things will break.

+

This function is not useful programmatically

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-adduser.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-adduser.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-adduser.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-adduser.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,47 @@ +

npm-adduser

Add a registry user account

+

SYNOPSIS

+
npm adduser [--registry=url] [--scope=@orgname] [--always-auth]
+

DESCRIPTION

+

Create or verify a user named <username> in the specified registry, and +save the credentials to the .npmrc file. If no registry is specified, +the default registry will be used (see npm-config(7)).

+

The username, password, and email are read in from prompts.

+

You may use this command to change your email address, but not username +or password.

+

To reset your password, go to https://www.npmjs.org/forgot

+

You may use this command multiple times with the same user account to +authorize on a new machine.

+

npm login is an alias to adduser and behaves exactly the same way.

+

CONFIGURATION

+

registry

+

Default: http://registry.npmjs.org/

+

The base URL of the npm package registry. If scope is also specified, +this registry will only be used for packages with that scope. See npm-scope(7).

+

scope

+

Default: none

+

If specified, the user and login credentials given will be associated +with the specified scope. See npm-scope(7). You can use both at the same time, +e.g.

+
npm adduser --registry=http://myregistry.example.com --scope=@myco
+

This will set a registry for the given scope and login or create a user for +that registry at the same time.

+

always-auth

+

Default: false

+

If specified, save configuration indicating that all requests to the given +registry should include authorization information. Useful for private +registries. Can be used with --registry and / or --scope, e.g.

+
npm adduser --registry=http://private-registry.example.com --always-auth
+

This will ensure that all requests to that registry (including for tarballs) +include an authorization header. See always-auth in npm-config(7) for more +details on always-auth. Registry-specific configuaration of always-auth takes +precedence over any global configuration.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-bin.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-bin.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-bin.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-bin.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +

npm-bin

Display npm bin folder

+

SYNOPSIS

+
npm bin
+

DESCRIPTION

+

Print the folder where npm will install executables.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-bugs.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-bugs.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-bugs.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-bugs.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,34 @@ +

npm-bugs

Bugs for a package in a web browser maybe

+

SYNOPSIS

+
npm bugs <pkgname>
+npm bugs (with no args in a package dir)
+

DESCRIPTION

+

This command tries to guess at the likely location of a package's +bug tracker URL, and then tries to open it using the --browser +config param. If no package name is provided, it will search for +a package.json in the current folder and use the name property.

+

CONFIGURATION

+

browser

+
    +
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • +
  • Type: String
  • +
+

The browser that is called by the npm bugs command to open websites.

+

registry

+ +

The base URL of the npm package registry.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-build.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-build.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-build.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-build.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +

npm-build

Build a package

+

SYNOPSIS

+
npm build <package-folder>
+
    +
  • <package-folder>: +A folder containing a package.json file in its root.
  • +
+

DESCRIPTION

+

This is the plumbing command called by npm link and npm install.

+

It should generally not be called directly.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-bundle.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-bundle.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-bundle.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-bundle.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,11 @@ +

npm-bundle

REMOVED

+

DESCRIPTION

+

The npm bundle command has been removed in 1.0, for the simple reason +that it is no longer necessary, as the default behavior is now to +install packages into the local space.

+

Just use npm install now to do what npm bundle used to do.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-cache.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-cache.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-cache.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-cache.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,61 @@ +

npm-cache

Manipulates packages cache

+

SYNOPSIS

+
npm cache add <tarball file>
+npm cache add <folder>
+npm cache add <tarball url>
+npm cache add <name>@<version>
+
+npm cache ls [<path>]
+
+npm cache clean [<path>]
+

DESCRIPTION

+

Used to add, list, or clear the npm cache folder.

+
    +
  • add: +Add the specified package to the local cache. This command is primarily +intended to be used internally by npm, but it can provide a way to +add data to the local installation cache explicitly.

    +
  • +
  • ls: +Show the data in the cache. Argument is a path to show in the cache +folder. Works a bit like the find program, but limited by the +depth config.

    +
  • +
  • clean: +Delete data out of the cache folder. If an argument is provided, then +it specifies a subpath to delete. If no argument is provided, then +the entire cache is cleared.

    +
  • +
+

DETAILS

+

npm stores cache data in the directory specified in npm config get cache. +For each package that is added to the cache, three pieces of information are +stored in {cache}/{name}/{version}:

+
    +
  • .../package/package.json: +The package.json file, as npm sees it.
  • +
  • .../package.tgz: +The tarball for that version.
  • +
+

Additionally, whenever a registry request is made, a .cache.json file +is placed at the corresponding URI, to store the ETag and the requested +data. This is stored in {cache}/{hostname}/{path}/.cache.json.

+

Commands that make non-essential registry requests (such as search and +view, or the completion scripts) generally specify a minimum timeout. +If the .cache.json file is younger than the specified timeout, then +they do not make an HTTP request to the registry.

+

CONFIGURATION

+

cache

+

Default: ~/.npm on Posix, or %AppData%/npm-cache on Windows.

+

The root cache folder.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-completion.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-completion.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-completion.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-completion.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +

npm-completion

Tab Completion for npm

+

SYNOPSIS

+
. <(npm completion)
+

DESCRIPTION

+

Enables tab-completion in all npm commands.

+

The synopsis above +loads the completions into your current shell. Adding it to +your ~/.bashrc or ~/.zshrc will make the completions available +everywhere.

+

You may of course also pipe the output of npm completion to a file +such as /usr/local/etc/bash_completion.d/npm if you have a system +that will read that file for you.

+

When COMP_CWORD, COMP_LINE, and COMP_POINT are defined in the +environment, npm completion acts in "plumbing mode", and outputs +completions based on the arguments.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-config.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-config.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-config.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-config.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,46 @@ +

npm-config

Manage the npm configuration files

+

SYNOPSIS

+
npm config set <key> <value> [--global]
+npm config get <key>
+npm config delete <key>
+npm config list
+npm config edit
+npm c [set|get|delete|list]
+npm get <key>
+npm set <key> <value> [--global]
+

DESCRIPTION

+

npm gets its config settings from the command line, environment +variables, npmrc files, and in some cases, the package.json file.

+

See npmrc(5) for more information about the npmrc files.

+

See npm-config(7) for a more thorough discussion of the mechanisms +involved.

+

The npm config command can be used to update and edit the contents +of the user and global npmrc files.

+

Sub-commands

+

Config supports the following sub-commands:

+

set

+
npm config set key value
+

Sets the config key to the value.

+

If value is omitted, then it sets it to "true".

+

get

+
npm config get key
+

Echo the config value to stdout.

+

list

+
npm config list
+

Show all the config settings.

+

delete

+
npm config delete key
+

Deletes the key from all configuration files.

+

edit

+
npm config edit
+

Opens the config file in an editor. Use the --global flag to edit the +global config.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-dedupe.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-dedupe.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-dedupe.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-dedupe.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,43 @@ +

npm-dedupe

Reduce duplication

+

SYNOPSIS

+
npm dedupe [package names...]
+npm ddp [package names...]
+

DESCRIPTION

+

Searches the local package tree and attempts to simplify the overall +structure by moving dependencies further up the tree, where they can +be more effectively shared by multiple dependent packages.

+

For example, consider this dependency graph:

+
a
++-- b <-- depends on c@1.0.x
+|   `-- c@1.0.3
+`-- d <-- depends on c@~1.0.9
+    `-- c@1.0.10
+

In this case, npm-dedupe(1) will transform the tree to:

+
a
++-- b
++-- d
+`-- c@1.0.10
+

Because of the hierarchical nature of node's module lookup, b and d +will both get their dependency met by the single c package at the root +level of the tree.

+

If a suitable version exists at the target location in the tree +already, then it will be left untouched, but the other duplicates will +be deleted.

+

If no suitable version can be found, then a warning is printed, and +nothing is done.

+

If any arguments are supplied, then they are filters, and only the +named packages will be touched.

+

Note that this operation transforms the dependency tree, and may +result in packages getting updated versions, perhaps from the npm +registry.

+

This feature is experimental, and may change in future versions.

+

The --tag argument will apply to all of the affected dependencies. If a +tag with the given name exists, the tagged version is preferred over newer +versions.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-deprecate.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-deprecate.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-deprecate.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-deprecate.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +

npm-deprecate

Deprecate a version of a package

+

SYNOPSIS

+
npm deprecate <name>[@<version>] <message>
+

DESCRIPTION

+

This command will update the npm registry entry for a package, providing +a deprecation warning to all who attempt to install it.

+

It works on version ranges as well as specific versions, so you can do +something like this:

+
npm deprecate my-thing@"< 0.2.3" "critical bug fixed in v0.2.3"
+

Note that you must be the package owner to deprecate something. See the +owner and adduser help topics.

+

To un-deprecate a package, specify an empty string ("") for the message argument.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-docs.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-docs.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-docs.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-docs.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,36 @@ +

npm-docs

Docs for a package in a web browser maybe

+

SYNOPSIS

+
npm docs [<pkgname> [<pkgname> ...]]
+npm docs (with no args in a package dir)
+npm home [<pkgname> [<pkgname> ...]]
+npm home (with no args in a package dir)
+

DESCRIPTION

+

This command tries to guess at the likely location of a package's +documentation URL, and then tries to open it using the --browser +config param. You can pass multiple package names at once. If no +package name is provided, it will search for a package.json in +the current folder and use the name property.

+

CONFIGURATION

+

browser

+
    +
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • +
  • Type: String
  • +
+

The browser that is called by the npm docs command to open websites.

+

registry

+ +

The base URL of the npm package registry.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-edit.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-edit.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-edit.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-edit.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,29 @@ +

npm-edit

Edit an installed package

+

SYNOPSIS

+
npm edit <name>[@<version>]
+

DESCRIPTION

+

Opens the package folder in the default editor (or whatever you've +configured as the npm editor config -- see npm-config(7).)

+

After it has been edited, the package is rebuilt so as to pick up any +changes in compiled packages.

+

For instance, you can do npm install connect to install connect +into your package, and then npm edit connect to make a few +changes to your locally installed copy.

+

CONFIGURATION

+

editor

+
    +
  • Default: EDITOR environment variable if set, or "vi" on Posix, +or "notepad" on Windows.
  • +
  • Type: path
  • +
+

The command to run for npm edit or npm config edit.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-explore.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-explore.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-explore.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-explore.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,29 @@ +

npm-explore

Browse an installed package

+

SYNOPSIS

+
npm explore <name> [ -- <cmd>]
+

DESCRIPTION

+

Spawn a subshell in the directory of the installed package specified.

+

If a command is specified, then it is run in the subshell, which then +immediately terminates.

+

This is particularly handy in the case of git submodules in the +node_modules folder:

+
npm explore some-dependency -- git pull origin master
+

Note that the package is not automatically rebuilt afterwards, so be +sure to use npm rebuild <pkg> if you make any changes.

+

CONFIGURATION

+

shell

+
    +
  • Default: SHELL environment variable, or "bash" on Posix, or "cmd" on +Windows
  • +
  • Type: path
  • +
+

The shell to run for the npm explore command.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-help.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-help.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-help.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-help.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,32 @@ +

npm-help

Get help on npm

+

SYNOPSIS

+
npm help <topic>
+npm help some search terms
+

DESCRIPTION

+

If supplied a topic, then show the appropriate documentation page.

+

If the topic does not exist, or if multiple terms are provided, then run +the help-search command to find a match. Note that, if help-search +finds a single subject, then it will run help on that topic, so unique +matches are equivalent to specifying a topic name.

+

CONFIGURATION

+

viewer

+
    +
  • Default: "man" on Posix, "browser" on Windows
  • +
  • Type: path
  • +
+

The program to use to view help content.

+

Set to "browser" to view html help content in the default web browser.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-help-search.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-help-search.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-help-search.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-help-search.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,26 @@ +

npm-help-search

Search npm help documentation

+

SYNOPSIS

+
npm help-search some search terms
+

DESCRIPTION

+

This command will search the npm markdown documentation files for the +terms provided, and then list the results, sorted by relevance.

+

If only one result is found, then it will show that help topic.

+

If the argument to npm help is not a known help topic, then it will +call help-search. It is rarely if ever necessary to call this +command directly.

+

CONFIGURATION

+

long

+
    +
  • Type: Boolean
  • +
  • Default false
  • +
+

If true, the "long" flag will cause help-search to output context around +where the terms were found in the documentation.

+

If false, then help-search will just list out the help topics found.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,134 @@ +

npm

node package manager

+

SYNOPSIS

+
npm <command> [args]
+

VERSION

+

2.1.6

+

DESCRIPTION

+

npm is the package manager for the Node JavaScript platform. It puts +modules in place so that node can find them, and manages dependency +conflicts intelligently.

+

It is extremely configurable to support a wide variety of use cases. +Most commonly, it is used to publish, discover, install, and develop node +programs.

+

Run npm help to get a list of available commands.

+

INTRODUCTION

+

You probably got npm because you want to install stuff.

+

Use npm install blerg to install the latest version of "blerg". Check out +npm-install(1) for more info. It can do a lot of stuff.

+

Use the npm search command to show everything that's available. +Use npm ls to show everything you've installed.

+

DEPENDENCIES

+

If a package references to another package with a git URL, npm depends +on a preinstalled git.

+

If one of the packages npm tries to install is a native node module and +requires compiling of C++ Code, npm will use +node-gyp for that task. +For a Unix system, node-gyp +needs Python, make and a buildchain like GCC. On Windows, +Python and Microsoft Visual Studio C++ is needed. Python 3 is +not supported by node-gyp. +For more information visit +the node-gyp repository and +the node-gyp Wiki.

+

DIRECTORIES

+

See npm-folders(5) to learn about where npm puts stuff.

+

In particular, npm has two modes of operation:

+
    +
  • global mode:
    npm installs packages into the install prefix at +prefix/lib/node_modules and bins are installed in prefix/bin.
  • +
  • local mode:
    npm installs packages into the current project directory, which +defaults to the current working directory. Packages are installed to +./node_modules, and bins are installed to ./node_modules/.bin.
  • +
+

Local mode is the default. Use --global or -g on any command to +operate in global mode instead.

+

DEVELOPER USAGE

+

If you're using npm to develop and publish your code, check out the +following help topics:

+
    +
  • json: +Make a package.json file. See package.json(5).
  • +
  • link: +For linking your current working code into Node's path, so that you +don't have to reinstall every time you make a change. Use +npm link to do this.
  • +
  • install: +It's a good idea to install things if you don't need the symbolic link. +Especially, installing other peoples code from the registry is done via +npm install
  • +
  • adduser: +Create an account or log in. Credentials are stored in the +user config file.
  • +
  • publish: +Use the npm publish command to upload your code to the registry.
  • +
+

CONFIGURATION

+

npm is extremely configurable. It reads its configuration options from +5 places.

+
    +
  • Command line switches:
    Set a config with --key val. All keys take a value, even if they +are booleans (the config parser doesn't know what the options are at +the time of parsing.) If no value is provided, then the option is set +to boolean true.
  • +
  • Environment Variables:
    Set any config by prefixing the name in an environment variable with +npm_config_. For example, export npm_config_key=val.
  • +
  • User Configs:
    The file at $HOME/.npmrc is an ini-formatted list of configs. If +present, it is parsed. If the userconfig option is set in the cli +or env, then that will be used instead.
  • +
  • Global Configs:
    The file found at ../etc/npmrc (from the node executable, by default +this resolves to /usr/local/etc/npmrc) will be parsed if it is found. +If the globalconfig option is set in the cli, env, or user config, +then that file is parsed instead.
  • +
  • Defaults:
    npm's default configuration options are defined in +lib/utils/config-defs.js. These must not be changed.
  • +
+

See npm-config(7) for much much more information.

+

CONTRIBUTIONS

+

Patches welcome!

+
    +
  • code: +Read through npm-coding-style(7) if you plan to submit code. +You don't have to agree with it, but you do have to follow it.
  • +
  • docs: +If you find an error in the documentation, edit the appropriate markdown +file in the "doc" folder. (Don't worry about generating the man page.)
  • +
+

Contributors are listed in npm's package.json file. You can view them +easily by doing npm view npm contributors.

+

If you would like to contribute, but don't know what to work on, check +the issues list or ask on the mailing list.

+ +

BUGS

+

When you find issues, please report them:

+ +

Be sure to include all of the output from the npm command that didn't work +as expected. The npm-debug.log file is also helpful to provide.

+

You can also look for isaacs in #node.js on irc://irc.freenode.net. He +will no doubt tell you to put the output in a gist or email.

+

AUTHOR

+

Isaac Z. Schlueter :: +isaacs :: +@izs :: +i@izs.me

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-init.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-init.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-init.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-init.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +

npm-init

Interactively create a package.json file

+

SYNOPSIS

+
npm init [-f|--force|-y|--yes]
+

DESCRIPTION

+

This will ask you a bunch of questions, and then write a package.json for you.

+

It attempts to make reasonable guesses about what you want things to be set to, +and then writes a package.json file with the options you've selected.

+

If you already have a package.json file, it'll read that first, and default to +the options in there.

+

It is strictly additive, so it does not delete options from your package.json +without a really good reason to do so.

+

If you invoke it with -f, --force, -y, or --yes, it will use only +defaults and not prompt you for any options.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-install.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-install.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-install.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-install.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,219 @@ +

npm-install

Install a package

+

SYNOPSIS

+
npm install (with no args in a package dir)
+npm install <tarball file>
+npm install <tarball url>
+npm install <folder>
+npm install [@<scope>/]<name> [--save|--save-dev|--save-optional] [--save-exact]
+npm install [@<scope>/]<name>@<tag>
+npm install [@<scope>/]<name>@<version>
+npm install [@<scope>/]<name>@<version range>
+npm i (with any of the previous argument usage)
+

DESCRIPTION

+

This command installs a package, and any packages that it depends on. If the +package has a shrinkwrap file, the installation of dependencies will be driven +by that. See npm-shrinkwrap(1).

+

A package is:

+
    +
  • a) a folder containing a program described by a package.json file
  • +
  • b) a gzipped tarball containing (a)
  • +
  • c) a url that resolves to (b)
  • +
  • d) a <name>@<version> that is published on the registry (see npm-registry(7)) with (c)
  • +
  • e) a <name>@<tag> that points to (d)
  • +
  • f) a <name> that has a "latest" tag satisfying (e)
  • +
  • g) a <git remote url> that resolves to (b)
  • +
+

Even if you never publish your package, you can still get a lot of +benefits of using npm if you just want to write a node program (a), and +perhaps if you also want to be able to easily install it elsewhere +after packing it up into a tarball (b).

+
    +
  • npm install (in package directory, no arguments):

    +

    Install the dependencies in the local node_modules folder.

    +

    In global mode (ie, with -g or --global appended to the command), + it installs the current package context (ie, the current working + directory) as a global package.

    +

    By default, npm install will install all modules listed as + dependencies. With the --production flag, + npm will not install modules listed in devDependencies.

    +
  • +
  • npm install <folder>:

    +

    Install a package that is sitting in a folder on the filesystem.

    +
  • +
  • npm install <tarball file>:

    +

    Install a package that is sitting on the filesystem. Note: if you just want + to link a dev directory into your npm root, you can do this more easily by + using npm link.

    +

    Example:

    +
        npm install ./package.tgz
    +
  • +
  • npm install <tarball url>:

    +

    Fetch the tarball url, and then install it. In order to distinguish between + this and other options, the argument must start with "http://" or "https://"

    +

    Example:

    +
        npm install https://github.com/indexzero/forever/tarball/v0.5.6
    +
  • +
  • npm install [@<scope>/]<name> [--save|--save-dev|--save-optional]:

    +

    Do a <name>@<tag> install, where <tag> is the "tag" config. (See + npm-config(7).)

    +

    In most cases, this will install the latest version + of the module published on npm.

    +

    Example:

    +
        npm install sax
    +

    npm install takes 3 exclusive, optional flags which save or update + the package version in your main package.json:

    +
      +
    • --save: Package will appear in your dependencies.

      +
    • +
    • --save-dev: Package will appear in your devDependencies.

      +
    • +
    • --save-optional: Package will appear in your optionalDependencies.

      +

      When using any of the above options to save dependencies to your +package.json, there is an additional, optional flag:

      +
    • +
    • --save-exact: Saved dependencies will be configured with an +exact version rather than using npm's default semver range +operator.

      +

      <scope> is optional. The package will be downloaded from the registry +associated with the specified scope. If no registry is associated with +the given scope the default registry is assumed. See npm-scope(7).

      +

      Note: if you do not include the @-symbol on your scope name, npm will +interpret this as a GitHub repository instead, see below. Scopes names +must also be followed by a slash.

      +

      Examples:

      +
      npm install sax --save
      +npm install githubname/reponame
      +npm install @myorg/privatepackage
      +npm install node-tap --save-dev
      +npm install dtrace-provider --save-optional
      +npm install readable-stream --save --save-exact
      +
    • +
    +
  • +
+
**Note**: If there is a file or folder named `<name>` in the current
+working directory, then it will try to install that, and only try to
+fetch the package by name if it is not valid.
+
    +
  • npm install [@<scope>/]<name>@<tag>:

    +

    Install the version of the package that is referenced by the specified tag. + If the tag does not exist in the registry data for that package, then this + will fail.

    +

    Example:

    +
        npm install sax@latest
    +    npm install @myorg/mypackage@latest
    +
  • +
  • npm install [@<scope>/]<name>@<version>:

    +

    Install the specified version of the package. This will fail if the + version has not been published to the registry.

    +

    Example:

    +
        npm install sax@0.1.1
    +    npm install @myorg/privatepackage@1.5.0
    +
  • +
  • npm install [@<scope>/]<name>@<version range>:

    +

    Install a version of the package matching the specified version range. This + will follow the same rules for resolving dependencies described in package.json(5).

    +

    Note that most version ranges must be put in quotes so that your shell will + treat it as a single argument.

    +

    Example:

    +
        npm install sax@">=0.1.0 <0.2.0"
    +    npm install @myorg/privatepackage@">=0.1.0 <0.2.0"
    +
  • +
  • npm install <githubname>/<githubrepo>:

    +

    Install the package at https://github.com/githubname/githubrepo" by + attempting to clone it usinggit`.

    +

    Example:

    +
        npm install mygithubuser/myproject
    +

    To reference a package in a git repo that is not on GitHub, see git + remote urls below.

    +
  • +
  • npm install <git remote url>:

    +

    Install a package by cloning a git remote url. The format of the git + url is:

    +
        <protocol>://[<user>@]<hostname><separator><path>[#<commit-ish>]
    +

    <protocol> is one of git, git+ssh, git+http, or + git+https. If no <commit-ish> is specified, then master is + used.

    +

    Examples:

    +
        git+ssh://git@github.com:npm/npm.git#v1.0.27
    +    git+https://isaacs@github.com/npm/npm.git
    +    git://github.com/npm/npm.git#v1.0.27
    +
  • +
+

You may combine multiple arguments, and even multiple types of arguments. +For example:

+
npm install sax@">=0.1.0 <0.2.0" bench supervisor
+

The --tag argument will apply to all of the specified install targets. If a +tag with the given name exists, the tagged version is preferred over newer +versions.

+

The --force argument will force npm to fetch remote resources even if a +local copy exists on disk.

+
npm install sax --force
+

The --global argument will cause npm to install the package globally +rather than locally. See npm-folders(5).

+

The --link argument will cause npm to link global installs into the +local space in some cases.

+

The --no-bin-links argument will prevent npm from creating symlinks for +any binaries the package might contain.

+

The --no-optional argument will prevent optional dependencies from +being installed.

+

The --no-shrinkwrap argument, which will ignore an available +shrinkwrap file and use the package.json instead.

+

The --nodedir=/path/to/node/source argument will allow npm to find the +node source code so that npm can compile native modules.

+

See npm-config(7). Many of the configuration params have some +effect on installation, since that's most of what npm does.

+

ALGORITHM

+

To install a package, npm uses the following algorithm:

+
install(where, what, family, ancestors)
+fetch what, unpack to <where>/node_modules/<what>
+for each dep in what.dependencies
+  resolve dep to precise version
+for each dep@version in what.dependencies
+    not in <where>/node_modules/<what>/node_modules/*
+    and not in <family>
+  add precise version deps to <family>
+  install(<where>/node_modules/<what>, dep, family)
+

For this package{dep} structure: A{B,C}, B{C}, C{D}, +this algorithm produces:

+
A
++-- B
+`-- C
+    `-- D
+

That is, the dependency from B to C is satisfied by the fact that A +already caused C to be installed at a higher level.

+

See npm-folders(5) for a more detailed description of the specific +folder structures that npm creates.

+

Limitations of npm's Install Algorithm

+

There are some very rare and pathological edge-cases where a cycle can +cause npm to try to install a never-ending tree of packages. Here is +the simplest case:

+
A -> B -> A' -> B' -> A -> B -> A' -> B' -> A -> ...
+

where A is some version of a package, and A' is a different version +of the same package. Because B depends on a different version of A +than the one that is already in the tree, it must install a separate +copy. The same is true of A', which must install B'. Because B' +depends on the original version of A, which has been overridden, the +cycle falls into infinite regress.

+

To avoid this situation, npm flat-out refuses to install any +name@version that is already present anywhere in the tree of package +folder ancestors. A more correct, but more complex, solution would be +to symlink the existing version into the new location. If this ever +affects a real use-case, it will be investigated.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-link.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-link.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-link.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-link.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,51 @@ +

npm-link

Symlink a package folder

+

SYNOPSIS

+
npm link (in package folder)
+npm link [@<scope>/]<pkgname>
+npm ln (with any of the previous argument usage)
+

DESCRIPTION

+

Package linking is a two-step process.

+

First, npm link in a package folder will create a globally-installed +symbolic link from prefix/package-name to the current folder (see +npm-config(7) for the value of prefix).

+

Next, in some other location, npm link package-name will create a +symlink from the local node_modules folder to the global symlink.

+

Note that package-name is taken from package.json, +not from directory name.

+

The package name can be optionally prefixed with a scope. See npm-scope(7). +The scope must by preceded by an @-symbol and followed by a slash.

+

When creating tarballs for npm publish, the linked packages are +"snapshotted" to their current state by resolving the symbolic links.

+

This is handy for installing your own stuff, so that you can work on it and +test it iteratively without having to continually rebuild.

+

For example:

+
cd ~/projects/node-redis    # go into the package directory
+npm link                    # creates global link
+cd ~/projects/node-bloggy   # go into some other package directory.
+npm link redis              # link-install the package
+

Now, any changes to ~/projects/node-redis will be reflected in +~/projects/node-bloggy/node_modules/redis/

+

You may also shortcut the two steps in one. For example, to do the +above use-case in a shorter way:

+
cd ~/projects/node-bloggy  # go into the dir of your main project
+npm link ../node-redis     # link the dir of your dependency
+

The second line is the equivalent of doing:

+
(cd ../node-redis; npm link)
+npm link redis
+

That is, it first creates a global link, and then links the global +installation target into your project's node_modules folder.

+

If your linked package is scoped (see npm-scope(7)) your link command must +include that scope, e.g.

+
npm link @myorg/privatepackage
+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-ls.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-ls.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-ls.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-ls.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,65 @@ +

npm-ls

List installed packages

+

SYNOPSIS

+
npm list [[@<scope>/]<pkg> ...]
+npm ls [[@<scope>/]<pkg> ...]
+npm la [[@<scope>/]<pkg> ...]
+npm ll [[@<scope>/]<pkg> ...]
+

DESCRIPTION

+

This command will print to stdout all the versions of packages that are +installed, as well as their dependencies, in a tree-structure.

+

Positional arguments are name@version-range identifiers, which will +limit the results to only the paths to the packages named. Note that +nested packages will also show the paths to the specified packages. +For example, running npm ls promzard in npm's source tree will show:

+
npm@2.1.6 /path/to/npm
+└─┬ init-package-json@0.0.4
+  └── promzard@0.1.5
+

It will print out extraneous, missing, and invalid packages.

+

If a project specifies git urls for dependencies these are shown +in parentheses after the name@version to make it easier for users to +recognize potential forks of a project.

+

When run as ll or la, it shows extended information by default.

+

CONFIGURATION

+

json

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Show information in JSON format.

+

long

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Show extended information.

+

parseable

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Show parseable output instead of tree view.

+

global

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

List packages in the global install prefix instead of in the current +project.

+

depth

+
    +
  • Type: Int
  • +
+

Max display depth of the dependency tree.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-outdated.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-outdated.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-outdated.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-outdated.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,47 @@ +

npm-outdated

Check for outdated packages

+

SYNOPSIS

+
npm outdated [<name> [<name> ...]]
+

DESCRIPTION

+

This command will check the registry to see if any (or, specific) installed +packages are currently outdated.

+

The resulting field 'wanted' shows the latest version according to the +version specified in the package.json, the field 'latest' the very latest +version of the package.

+

CONFIGURATION

+

json

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Show information in JSON format.

+

long

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Show extended information.

+

parseable

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Show parseable output instead of tree view.

+

global

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Check packages in the global install prefix instead of in the current +project.

+

depth

+
    +
  • Type: Int
  • +
+

Max depth for checking dependency tree.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-owner.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-owner.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-owner.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-owner.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,29 @@ +

npm-owner

Manage package owners

+

SYNOPSIS

+
npm owner ls <package name>
+npm owner add <user> <package name>
+npm owner rm <user> <package name>
+

DESCRIPTION

+

Manage ownership of published packages.

+
    +
  • ls: +List all the users who have access to modify a package and push new versions. +Handy when you need to know who to bug for help.
  • +
  • add: +Add a new user as a maintainer of a package. This user is enabled to modify +metadata, publish new versions, and add other owners.
  • +
  • rm: +Remove a user from the package owner list. This immediately revokes their +privileges.
  • +
+

Note that there is only one level of access. Either you can modify a package, +or you can't. Future versions may contain more fine-grained access levels, but +that is not implemented at this time.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-pack.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-pack.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-pack.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-pack.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +

npm-pack

Create a tarball from a package

+

SYNOPSIS

+
npm pack [<pkg> [<pkg> ...]]
+

DESCRIPTION

+

For anything that's installable (that is, a package folder, tarball, +tarball url, name@tag, name@version, or name), this command will fetch +it to the cache, and then copy the tarball to the current working +directory as <name>-<version>.tgz, and then write the filenames out to +stdout.

+

If the same package is specified multiple times, then the file will be +overwritten the second time.

+

If no arguments are supplied, then npm packs the current package folder.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-prefix.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-prefix.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-prefix.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-prefix.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +

npm-prefix

Display prefix

+

SYNOPSIS

+
npm prefix [-g]
+

DESCRIPTION

+

Print the local prefix to standard out. This is the closest parent directory +to contain a package.json file unless -g is also specified.

+

If -g is specified, this will be the value of the global prefix. See +npm-config(7) for more detail.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-prune.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-prune.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-prune.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-prune.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,19 @@ +

npm-prune

Remove extraneous packages

+

SYNOPSIS

+
npm prune [<name> [<name ...]]
+npm prune [<name> [<name ...]] [--production]
+

DESCRIPTION

+

This command removes "extraneous" packages. If a package name is +provided, then only packages matching one of the supplied names are +removed.

+

Extraneous packages are packages that are not listed on the parent +package's dependencies list.

+

If the --production flag is specified, this command will remove the +packages specified in your devDependencies.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-publish.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-publish.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-publish.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-publish.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,39 @@ +

npm-publish

Publish a package

+

SYNOPSIS

+
npm publish <tarball> [--tag <tag>]
+npm publish <folder> [--tag <tag>]
+

DESCRIPTION

+

Publishes a package to the registry so that it can be installed by name. See +npm-developers(7) for details on what's included in the published package, as +well as details on how the package is built.

+

By default npm will publish to the public registry. This can be overridden by +specifying a different default registry or using a npm-scope(7) in the name +(see package.json(5)).

+
    +
  • <folder>: +A folder containing a package.json file

    +
  • +
  • <tarball>: +A url or file path to a gzipped tar archive containing a single folder +with a package.json file inside.

    +
  • +
  • [--tag <tag>] +Registers the published package with the given tag, such that npm install +<name>@<tag> will install this version. By default, npm publish updates +and npm install installs the latest tag.

    +
  • +
+

Fails if the package name and version combination already exists in +the specified registry.

+

Once a package is published with a given name and version, that +specific name and version combination can never be used again, even if +it is removed with npm-unpublish(1).

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-rebuild.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-rebuild.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-rebuild.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-rebuild.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +

npm-rebuild

Rebuild a package

+

SYNOPSIS

+
npm rebuild [<name> [<name> ...]]
+npm rb [<name> [<name> ...]]
+
    +
  • <name>: +The package to rebuild
  • +
+

DESCRIPTION

+

This command runs the npm build command on the matched folders. This is useful +when you install a new version of node, and must recompile all your C++ addons with +the new binary.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-repo.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-repo.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-repo.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-repo.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +

npm-repo

Open package repository page in the browser

+

SYNOPSIS

+
npm repo <pkgname>
+npm repo (with no args in a package dir)
+

DESCRIPTION

+

This command tries to guess at the likely location of a package's +repository URL, and then tries to open it using the --browser +config param. If no package name is provided, it will search for +a package.json in the current folder and use the name property.

+

CONFIGURATION

+

browser

+
    +
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • +
  • Type: String
  • +
+

The browser that is called by the npm repo command to open websites.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-restart.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-restart.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-restart.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-restart.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +

npm-restart

Start a package

+

SYNOPSIS

+
npm restart [-- <args>]
+

DESCRIPTION

+

This runs a package's "restart" script, if one was provided. Otherwise it runs +package's "stop" script, if one was provided, and then the "start" script.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-rm.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-rm.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-rm.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-rm.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,19 @@ +

npm-rm

Remove a package

+

SYNOPSIS

+
npm rm <name>
+npm r <name>
+npm uninstall <name>
+npm un <name>
+

DESCRIPTION

+

This uninstalls a package, completely removing everything npm installed +on its behalf.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-root.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-root.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-root.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-root.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +

npm-root

Display npm root

+

SYNOPSIS

+
npm root
+

DESCRIPTION

+

Print the effective node_modules folder to standard out.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-run-script.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-run-script.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-run-script.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-run-script.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ +

npm-run-script

Run arbitrary package scripts

+

SYNOPSIS

+
npm run-script [command] [-- <args>]
+npm run [command] [-- <args>]
+

DESCRIPTION

+

This runs an arbitrary command from a package's "scripts" object. +If no package name is provided, it will search for a package.json +in the current folder and use its "scripts" object. If no "command" +is provided, it will list the available top level scripts.

+

It is used by the test, start, restart, and stop commands, but can be +called directly, as well.

+

As of npm@2.0.0, you can +use custom arguments when executing scripts. The special option -- is used by +getopt to delimit the end of the options. npm will pass +all the arguments after the -- directly to your script:

+
npm run test -- --grep="pattern"
+

The arguments will only be passed to the script specified after npm run +and not to any pre or post script.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-search.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-search.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-search.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-search.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,29 @@ +

npm-search

Search for packages

+

SYNOPSIS

+
npm search [--long] [search terms ...]
+npm s [search terms ...]
+npm se [search terms ...]
+

DESCRIPTION

+

Search the registry for packages matching the search terms.

+

If a term starts with /, then it's interpreted as a regular expression. +A trailing / will be ignored in this case. (Note that many regular +expression characters must be escaped or quoted in most shells.)

+

CONFIGURATION

+

long

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Display full package descriptions and other long text across multiple +lines. When disabled (default) search results are truncated to fit +neatly on a single line. Modules with extremely long names will +fall on multiple lines.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-shrinkwrap.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-shrinkwrap.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-shrinkwrap.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-shrinkwrap.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,144 @@ +

npm-shrinkwrap

Lock down dependency versions

+

SYNOPSIS

+
npm shrinkwrap
+

DESCRIPTION

+

This command locks down the versions of a package's dependencies so +that you can control exactly which versions of each dependency will be +used when your package is installed. The "package.json" file is still +required if you want to use "npm install".

+

By default, "npm install" recursively installs the target's +dependencies (as specified in package.json), choosing the latest +available version that satisfies the dependency's semver pattern. In +some situations, particularly when shipping software where each change +is tightly managed, it's desirable to fully specify each version of +each dependency recursively so that subsequent builds and deploys do +not inadvertently pick up newer versions of a dependency that satisfy +the semver pattern. Specifying specific semver patterns in each +dependency's package.json would facilitate this, but that's not always +possible or desirable, as when another author owns the npm package. +It's also possible to check dependencies directly into source control, +but that may be undesirable for other reasons.

+

As an example, consider package A:

+
{
+  "name": "A",
+  "version": "0.1.0",
+  "dependencies": {
+    "B": "<0.1.0"
+  }
+}
+

package B:

+
{
+  "name": "B",
+  "version": "0.0.1",
+  "dependencies": {
+    "C": "<0.1.0"
+  }
+}
+

and package C:

+
{
+  "name": "C,
+  "version": "0.0.1"
+}
+

If these are the only versions of A, B, and C available in the +registry, then a normal "npm install A" will install:

+
A@0.1.0
+`-- B@0.0.1
+    `-- C@0.0.1
+

However, if B@0.0.2 is published, then a fresh "npm install A" will +install:

+
A@0.1.0
+`-- B@0.0.2
+    `-- C@0.0.1
+

assuming the new version did not modify B's dependencies. Of course, +the new version of B could include a new version of C and any number +of new dependencies. If such changes are undesirable, the author of A +could specify a dependency on B@0.0.1. However, if A's author and B's +author are not the same person, there's no way for A's author to say +that he or she does not want to pull in newly published versions of C +when B hasn't changed at all.

+

In this case, A's author can run

+
npm shrinkwrap
+

This generates npm-shrinkwrap.json, which will look something like this:

+
{
+  "name": "A",
+  "version": "0.1.0",
+  "dependencies": {
+    "B": {
+      "version": "0.0.1",
+      "dependencies": {
+        "C": {
+          "version": "0.1.0"
+        }
+      }
+    }
+  }
+}
+

The shrinkwrap command has locked down the dependencies based on +what's currently installed in node_modules. When "npm install" +installs a package with a npm-shrinkwrap.json file in the package +root, the shrinkwrap file (rather than package.json files) completely +drives the installation of that package and all of its dependencies +(recursively). So now the author publishes A@0.1.0, and subsequent +installs of this package will use B@0.0.1 and C@0.1.0, regardless the +dependencies and versions listed in A's, B's, and C's package.json +files.

+

Using shrinkwrapped packages

+

Using a shrinkwrapped package is no different than using any other +package: you can "npm install" it by hand, or add a dependency to your +package.json file and "npm install" it.

+

Building shrinkwrapped packages

+

To shrinkwrap an existing package:

+
    +
  1. Run "npm install" in the package root to install the current +versions of all dependencies.
  2. +
  3. Validate that the package works as expected with these versions.
  4. +
  5. Run "npm shrinkwrap", add npm-shrinkwrap.json to git, and publish +your package.
  6. +
+

To add or update a dependency in a shrinkwrapped package:

+
    +
  1. Run "npm install" in the package root to install the current +versions of all dependencies.
  2. +
  3. Add or update dependencies. "npm install" each new or updated +package individually and then update package.json. Note that they +must be explicitly named in order to be installed: running npm +install with no arguments will merely reproduce the existing +shrinkwrap.
  4. +
  5. Validate that the package works as expected with the new +dependencies.
  6. +
  7. Run "npm shrinkwrap", commit the new npm-shrinkwrap.json, and +publish your package.
  8. +
+

You can use npm-outdated(1) to view dependencies with newer versions +available.

+

Other Notes

+

A shrinkwrap file must be consistent with the package's package.json +file. "npm shrinkwrap" will fail if required dependencies are not +already installed, since that would result in a shrinkwrap that +wouldn't actually work. Similarly, the command will fail if there are +extraneous packages (not referenced by package.json), since that would +indicate that package.json is not correct.

+

Since "npm shrinkwrap" is intended to lock down your dependencies for +production use, devDependencies will not be included unless you +explicitly set the --dev flag when you run npm shrinkwrap. If +installed devDependencies are excluded, then npm will print a +warning. If you want them to be installed with your module by +default, please consider adding them to dependencies instead.

+

If shrinkwrapped package A depends on shrinkwrapped package B, B's +shrinkwrap will not be used as part of the installation of A. However, +because A's shrinkwrap is constructed from a valid installation of B +and recursively specifies all dependencies, the contents of B's +shrinkwrap will implicitly be included in A's shrinkwrap.

+

Caveats

+

If you wish to lock down the specific bytes included in a package, for +example to have 100% confidence in being able to reproduce a +deployment or build, then you ought to check your dependencies into +source control, or pursue some other mechanism that can verify +contents rather than versions.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-star.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-star.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-star.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-star.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ +

npm-star

Mark your favorite packages

+

SYNOPSIS

+
npm star <pkgname> [<pkg>, ...]
+npm unstar <pkgname> [<pkg>, ...]
+

DESCRIPTION

+

"Starring" a package means that you have some interest in it. It's +a vaguely positive way to show that you care.

+

"Unstarring" is the same thing, but in reverse.

+

It's a boolean thing. Starring repeatedly has no additional effect.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-stars.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-stars.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-stars.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-stars.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,17 @@ +

npm-stars

View packages marked as favorites

+

SYNOPSIS

+
npm stars
+npm stars [username]
+

DESCRIPTION

+

If you have starred a lot of neat things and want to find them again +quickly this command lets you do just that.

+

You may also want to see your friend's favorite packages, in this case +you will most certainly enjoy this command.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-start.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-start.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-start.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-start.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,14 @@ +

npm-start

Start a package

+

SYNOPSIS

+
npm start [-- <args>]
+

DESCRIPTION

+

This runs a package's "start" script, if one was provided.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-stop.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-stop.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-stop.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-stop.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,14 @@ +

npm-stop

Stop a package

+

SYNOPSIS

+
npm stop [-- <args>]
+

DESCRIPTION

+

This runs a package's "stop" script, if one was provided.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-submodule.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-submodule.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-submodule.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-submodule.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +

npm-submodule

Add a package as a git submodule

+

SYNOPSIS

+
npm submodule <pkg>
+

DESCRIPTION

+

If the specified package has a git repository url in its package.json +description, then this command will add it as a git submodule at +node_modules/<pkg name>.

+

This is a convenience only. From then on, it's up to you to manage +updates by using the appropriate git commands. npm will stubbornly +refuse to update, modify, or remove anything with a .git subfolder +in it.

+

This command also does not install missing dependencies, if the package +does not include them in its git repository. If npm ls reports that +things are missing, you can either install, link, or submodule them yourself, +or you can do npm explore <pkgname> -- npm install to install the +dependencies into the submodule folder.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-tag.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-tag.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-tag.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-tag.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,24 @@ +

npm-tag

Tag a published version

+

SYNOPSIS

+
npm tag <name>@<version> [<tag>]
+

DESCRIPTION

+

Tags the specified version of the package with the specified tag, or the +--tag config if not specified.

+

A tag can be used when installing packages as a reference to a version instead +of using a specific version number:

+
npm install <name>@<tag>
+

When installing dependencies, a preferred tagged version may be specified:

+
npm install --tag <tag>
+

This also applies to npm dedupe.

+

Publishing a package always sets the "latest" tag to the published version.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-test.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-test.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-test.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-test.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,17 @@ +

npm-test

Test a package

+

SYNOPSIS

+
  npm test [-- <args>]
+  npm tst [-- <args>]
+

DESCRIPTION

+

This runs a package's "test" script, if one was provided.

+

To run tests as a condition of installation, set the npat config to +true.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-uninstall.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-uninstall.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-uninstall.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-uninstall.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,37 @@ +

npm-rm

Remove a package

+

SYNOPSIS

+
npm uninstall [@<scope>/]<package> [--save|--save-dev|--save-optional]
+npm rm (with any of the previous argument usage)
+

DESCRIPTION

+

This uninstalls a package, completely removing everything npm installed +on its behalf.

+

Example:

+
npm uninstall sax
+

In global mode (ie, with -g or --global appended to the command), +it uninstalls the current package context as a global package.

+

npm uninstall takes 3 exclusive, optional flags which save or update +the package version in your main package.json:

+
    +
  • --save: Package will be removed from your dependencies.

    +
  • +
  • --save-dev: Package will be removed from your devDependencies.

    +
  • +
  • --save-optional: Package will be removed from your optionalDependencies.

    +
  • +
+

Scope is optional and follows the usual rules for npm-scope(7).

+

Examples:

+
npm uninstall sax --save
+npm uninstall @myorg/privatepackage --save
+npm uninstall node-tap --save-dev
+npm uninstall dtrace-provider --save-optional
+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-unpublish.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-unpublish.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-unpublish.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-unpublish.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ +

npm-unpublish

Remove a package from the registry

+

SYNOPSIS

+
npm unpublish [@<scope>/]<name>[@<version>]
+

WARNING

+

It is generally considered bad behavior to remove versions of a library +that others are depending on!

+

Consider using the deprecate command +instead, if your intent is to encourage users to upgrade.

+

There is plenty of room on the registry.

+

DESCRIPTION

+

This removes a package version from the registry, deleting its +entry and removing the tarball.

+

If no version is specified, or if all versions are removed then +the root package entry is removed from the registry entirely.

+

Even if a package version is unpublished, that specific name and +version combination can never be reused. In order to publish the +package again, a new version number must be used.

+

The scope is optional and follows the usual rules for npm-scope(7).

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-update.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-update.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-update.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-update.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +

npm-update

Update a package

+

SYNOPSIS

+
npm update [-g] [<name> [<name> ...]]
+

DESCRIPTION

+

This command will update all the packages listed to the latest version +(specified by the tag config).

+

It will also install missing packages.

+

If the -g flag is specified, this command will update globally installed +packages.

+

If no package name is specified, all packages in the specified location (global +or local) will be updated.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-version.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-version.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-version.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-version.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,35 @@ +

npm-version

Bump a package version

+

SYNOPSIS

+
npm version [<newversion> | major | minor | patch | premajor | preminor | prepatch | prerelease]
+

DESCRIPTION

+

Run this in a package directory to bump the version and write the new +data back to the package.json file.

+

The newversion argument should be a valid semver string, or a +valid second argument to semver.inc (one of "patch", "minor", "major", +"prepatch", "preminor", "premajor", "prerelease"). In the second case, +the existing version will be incremented by 1 in the specified field.

+

If run in a git repo, it will also create a version commit and tag, and +fail if the repo is not clean.

+

If supplied with --message (shorthand: -m) config option, npm will +use it as a commit message when creating a version commit. If the +message config contains %s then that will be replaced with the +resulting version number. For example:

+
npm version patch -m "Upgrade to %s for reasons"
+

If the sign-git-tag config is set, then the tag will be signed using +the -s flag to git. Note that you must have a default GPG key set up +in your git config for this to work properly. For example:

+
$ npm config set sign-git-tag true
+$ npm version patch
+
+You need a passphrase to unlock the secret key for
+user: "isaacs (http://blog.izs.me/) <i@izs.me>"
+2048-bit RSA key, ID 6C481CF6, created 2010-08-31
+
+Enter passphrase:
+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-view.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-view.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-view.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-view.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,62 @@ +

npm-view

View registry info

+

SYNOPSIS

+
npm view [@<scope>/]<name>[@<version>] [<field>[.<subfield>]...]
+npm v [@<scope>/]<name>[@<version>] [<field>[.<subfield>]...]
+

DESCRIPTION

+

This command shows data about a package and prints it to the stream +referenced by the outfd config, which defaults to stdout.

+

To show the package registry entry for the connect package, you can do +this:

+
npm view connect
+

The default version is "latest" if unspecified.

+

Field names can be specified after the package descriptor. +For example, to show the dependencies of the ronn package at version +0.3.5, you could do the following:

+
npm view ronn@0.3.5 dependencies
+

You can view child field by separating them with a period. +To view the git repository URL for the latest version of npm, you could +do this:

+
npm view npm repository.url
+

This makes it easy to view information about a dependency with a bit of +shell scripting. For example, to view all the data about the version of +opts that ronn depends on, you can do this:

+
npm view opts@$(npm view ronn dependencies.opts)
+

For fields that are arrays, requesting a non-numeric field will return +all of the values from the objects in the list. For example, to get all +the contributor names for the "express" project, you can do this:

+
npm view express contributors.email
+

You may also use numeric indices in square braces to specifically select +an item in an array field. To just get the email address of the first +contributor in the list, you can do this:

+
npm view express contributors[0].email
+

Multiple fields may be specified, and will be printed one after another. +For exampls, to get all the contributor names and email addresses, you +can do this:

+
npm view express contributors.name contributors.email
+

"Person" fields are shown as a string if they would be shown as an +object. So, for example, this will show the list of npm contributors in +the shortened string format. (See package.json(5) for more on this.)

+
npm view npm contributors
+

If a version range is provided, then data will be printed for every +matching version of the package. This will show which version of jsdom +was required by each matching version of yui3:

+
npm view yui3@'>0.5.4' dependencies.jsdom
+

OUTPUT

+

If only a single string field for a single version is output, then it +will not be colorized or quoted, so as to enable piping the output to +another command. If the field is an object, it will be output as a JavaScript object literal.

+

If the --json flag is given, the outputted fields will be JSON.

+

If the version range matches multiple versions, than each printed value +will be prefixed with the version it applies to.

+

If multiple fields are requested, than each of them are prefixed with +the field name.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-whoami.html nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-whoami.html --- nodejs-0.11.13/deps/npm/html/partial/doc/cli/npm-whoami.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/cli/npm-whoami.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +

npm-whoami

Display npm username

+

SYNOPSIS

+
npm whoami
+

DESCRIPTION

+

Print the username config to standard output.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/files/npm-folders.html nodejs-0.11.15/deps/npm/html/partial/doc/files/npm-folders.html --- nodejs-0.11.13/deps/npm/html/partial/doc/files/npm-folders.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/files/npm-folders.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,164 @@ +

npm-folders

Folder Structures Used by npm

+

DESCRIPTION

+

npm puts various things on your computer. That's its job.

+

This document will tell you what it puts where.

+

tl;dr

+
    +
  • Local install (default): puts stuff in ./node_modules of the current +package root.
  • +
  • Global install (with -g): puts stuff in /usr/local or wherever node +is installed.
  • +
  • Install it locally if you're going to require() it.
  • +
  • Install it globally if you're going to run it on the command line.
  • +
  • If you need both, then install it in both places, or use npm link.
  • +
+

prefix Configuration

+

The prefix config defaults to the location where node is installed. +On most systems, this is /usr/local, and most of the time is the same +as node's process.installPrefix.

+

On windows, this is the exact location of the node.exe binary. On Unix +systems, it's one level up, since node is typically installed at +{prefix}/bin/node rather than {prefix}/node.exe.

+

When the global flag is set, npm installs things into this prefix. +When it is not set, it uses the root of the current package, or the +current working directory if not in a package already.

+

Node Modules

+

Packages are dropped into the node_modules folder under the prefix. +When installing locally, this means that you can +require("packagename") to load its main module, or +require("packagename/lib/path/to/sub/module") to load other modules.

+

Global installs on Unix systems go to {prefix}/lib/node_modules. +Global installs on Windows go to {prefix}/node_modules (that is, no +lib folder.)

+

Scoped packages are installed the same way, except they are grouped together +in a sub-folder of the relevant node_modules folder with the name of that +scope prefix by the @ symbol, e.g. npm install @myorg/package would place +the package in {prefix}/node_modules/@myorg/package. See scopes(7) for +more details.

+

If you wish to require() a package, then install it locally.

+

Executables

+

When in global mode, executables are linked into {prefix}/bin on Unix, +or directly into {prefix} on Windows.

+

When in local mode, executables are linked into +./node_modules/.bin so that they can be made available to scripts run +through npm. (For example, so that a test runner will be in the path +when you run npm test.)

+

Man Pages

+

When in global mode, man pages are linked into {prefix}/share/man.

+

When in local mode, man pages are not installed.

+

Man pages are not installed on Windows systems.

+

Cache

+

See npm-cache(1). Cache files are stored in ~/.npm on Posix, or +~/npm-cache on Windows.

+

This is controlled by the cache configuration param.

+

Temp Files

+

Temporary files are stored by default in the folder specified by the +tmp config, which defaults to the TMPDIR, TMP, or TEMP environment +variables, or /tmp on Unix and c:\windows\temp on Windows.

+

Temp files are given a unique folder under this root for each run of the +program, and are deleted upon successful exit.

+

More Information

+

When installing locally, npm first tries to find an appropriate +prefix folder. This is so that npm install foo@1.2.3 will install +to the sensible root of your package, even if you happen to have cded +into some other folder.

+

Starting at the $PWD, npm will walk up the folder tree checking for a +folder that contains either a package.json file, or a node_modules +folder. If such a thing is found, then that is treated as the effective +"current directory" for the purpose of running npm commands. (This +behavior is inspired by and similar to git's .git-folder seeking +logic when running git commands in a working dir.)

+

If no package root is found, then the current folder is used.

+

When you run npm install foo@1.2.3, then the package is loaded into +the cache, and then unpacked into ./node_modules/foo. Then, any of +foo's dependencies are similarly unpacked into +./node_modules/foo/node_modules/....

+

Any bin files are symlinked to ./node_modules/.bin/, so that they may +be found by npm scripts when necessary.

+

Global Installation

+

If the global configuration is set to true, then npm will +install packages "globally".

+

For global installation, packages are installed roughly the same way, +but using the folders described above.

+

Cycles, Conflicts, and Folder Parsimony

+

Cycles are handled using the property of node's module system that it +walks up the directories looking for node_modules folders. So, at every +stage, if a package is already installed in an ancestor node_modules +folder, then it is not installed at the current location.

+

Consider the case above, where foo -> bar -> baz. Imagine if, in +addition to that, baz depended on bar, so you'd have: +foo -> bar -> baz -> bar -> baz .... However, since the folder +structure is: foo/node_modules/bar/node_modules/baz, there's no need to +put another copy of bar into .../baz/node_modules, since when it calls +require("bar"), it will get the copy that is installed in +foo/node_modules/bar.

+

This shortcut is only used if the exact same +version would be installed in multiple nested node_modules folders. It +is still possible to have a/node_modules/b/node_modules/a if the two +"a" packages are different versions. However, without repeating the +exact same package multiple times, an infinite regress will always be +prevented.

+

Another optimization can be made by installing dependencies at the +highest level possible, below the localized "target" folder.

+

Example

+

Consider this dependency graph:

+
foo
++-- blerg@1.2.5
++-- bar@1.2.3
+|   +-- blerg@1.x (latest=1.3.7)
+|   +-- baz@2.x
+|   |   `-- quux@3.x
+|   |       `-- bar@1.2.3 (cycle)
+|   `-- asdf@*
+`-- baz@1.2.3
+    `-- quux@3.x
+        `-- bar
+

In this case, we might expect a folder structure like this:

+
foo
++-- node_modules
+    +-- blerg (1.2.5) <---[A]
+    +-- bar (1.2.3) <---[B]
+    |   `-- node_modules
+    |       +-- baz (2.0.2) <---[C]
+    |       |   `-- node_modules
+    |       |       `-- quux (3.2.0)
+    |       `-- asdf (2.3.4)
+    `-- baz (1.2.3) <---[D]
+        `-- node_modules
+            `-- quux (3.2.0) <---[E]
+

Since foo depends directly on bar@1.2.3 and baz@1.2.3, those are +installed in foo's node_modules folder.

+

Even though the latest copy of blerg is 1.3.7, foo has a specific +dependency on version 1.2.5. So, that gets installed at [A]. Since the +parent installation of blerg satisfies bar's dependency on blerg@1.x, +it does not install another copy under [B].

+

Bar [B] also has dependencies on baz and asdf, so those are installed in +bar's node_modules folder. Because it depends on baz@2.x, it cannot +re-use the baz@1.2.3 installed in the parent node_modules folder [D], +and must install its own copy [C].

+

Underneath bar, the baz -> quux -> bar dependency creates a cycle. +However, because bar is already in quux's ancestry [B], it does not +unpack another copy of bar into that folder.

+

Underneath foo -> baz [D], quux's [E] folder tree is empty, because its +dependency on bar is satisfied by the parent folder copy installed at [B].

+

For a graphical breakdown of what is installed where, use npm ls.

+

Publishing

+

Upon publishing, npm will look in the node_modules folder. If any of +the items there are not in the bundledDependencies array, then they will +not be included in the package tarball.

+

This allows a package maintainer to install all of their dependencies +(and dev dependencies) locally, but only re-publish those items that +cannot be found elsewhere. See package.json(5) for more information.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/files/npm-global.html nodejs-0.11.15/deps/npm/html/partial/doc/files/npm-global.html --- nodejs-0.11.13/deps/npm/html/partial/doc/files/npm-global.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/files/npm-global.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,164 @@ +

npm-folders

Folder Structures Used by npm

+

DESCRIPTION

+

npm puts various things on your computer. That's its job.

+

This document will tell you what it puts where.

+

tl;dr

+
    +
  • Local install (default): puts stuff in ./node_modules of the current +package root.
  • +
  • Global install (with -g): puts stuff in /usr/local or wherever node +is installed.
  • +
  • Install it locally if you're going to require() it.
  • +
  • Install it globally if you're going to run it on the command line.
  • +
  • If you need both, then install it in both places, or use npm link.
  • +
+

prefix Configuration

+

The prefix config defaults to the location where node is installed. +On most systems, this is /usr/local, and most of the time is the same +as node's process.installPrefix.

+

On windows, this is the exact location of the node.exe binary. On Unix +systems, it's one level up, since node is typically installed at +{prefix}/bin/node rather than {prefix}/node.exe.

+

When the global flag is set, npm installs things into this prefix. +When it is not set, it uses the root of the current package, or the +current working directory if not in a package already.

+

Node Modules

+

Packages are dropped into the node_modules folder under the prefix. +When installing locally, this means that you can +require("packagename") to load its main module, or +require("packagename/lib/path/to/sub/module") to load other modules.

+

Global installs on Unix systems go to {prefix}/lib/node_modules. +Global installs on Windows go to {prefix}/node_modules (that is, no +lib folder.)

+

Scoped packages are installed the same way, except they are grouped together +in a sub-folder of the relevant node_modules folder with the name of that +scope prefix by the @ symbol, e.g. npm install @myorg/package would place +the package in {prefix}/node_modules/@myorg/package. See scopes(7) for +more details.

+

If you wish to require() a package, then install it locally.

+

Executables

+

When in global mode, executables are linked into {prefix}/bin on Unix, +or directly into {prefix} on Windows.

+

When in local mode, executables are linked into +./node_modules/.bin so that they can be made available to scripts run +through npm. (For example, so that a test runner will be in the path +when you run npm test.)

+

Man Pages

+

When in global mode, man pages are linked into {prefix}/share/man.

+

When in local mode, man pages are not installed.

+

Man pages are not installed on Windows systems.

+

Cache

+

See npm-cache(1). Cache files are stored in ~/.npm on Posix, or +~/npm-cache on Windows.

+

This is controlled by the cache configuration param.

+

Temp Files

+

Temporary files are stored by default in the folder specified by the +tmp config, which defaults to the TMPDIR, TMP, or TEMP environment +variables, or /tmp on Unix and c:\windows\temp on Windows.

+

Temp files are given a unique folder under this root for each run of the +program, and are deleted upon successful exit.

+

More Information

+

When installing locally, npm first tries to find an appropriate +prefix folder. This is so that npm install foo@1.2.3 will install +to the sensible root of your package, even if you happen to have cded +into some other folder.

+

Starting at the $PWD, npm will walk up the folder tree checking for a +folder that contains either a package.json file, or a node_modules +folder. If such a thing is found, then that is treated as the effective +"current directory" for the purpose of running npm commands. (This +behavior is inspired by and similar to git's .git-folder seeking +logic when running git commands in a working dir.)

+

If no package root is found, then the current folder is used.

+

When you run npm install foo@1.2.3, then the package is loaded into +the cache, and then unpacked into ./node_modules/foo. Then, any of +foo's dependencies are similarly unpacked into +./node_modules/foo/node_modules/....

+

Any bin files are symlinked to ./node_modules/.bin/, so that they may +be found by npm scripts when necessary.

+

Global Installation

+

If the global configuration is set to true, then npm will +install packages "globally".

+

For global installation, packages are installed roughly the same way, +but using the folders described above.

+

Cycles, Conflicts, and Folder Parsimony

+

Cycles are handled using the property of node's module system that it +walks up the directories looking for node_modules folders. So, at every +stage, if a package is already installed in an ancestor node_modules +folder, then it is not installed at the current location.

+

Consider the case above, where foo -> bar -> baz. Imagine if, in +addition to that, baz depended on bar, so you'd have: +foo -> bar -> baz -> bar -> baz .... However, since the folder +structure is: foo/node_modules/bar/node_modules/baz, there's no need to +put another copy of bar into .../baz/node_modules, since when it calls +require("bar"), it will get the copy that is installed in +foo/node_modules/bar.

+

This shortcut is only used if the exact same +version would be installed in multiple nested node_modules folders. It +is still possible to have a/node_modules/b/node_modules/a if the two +"a" packages are different versions. However, without repeating the +exact same package multiple times, an infinite regress will always be +prevented.

+

Another optimization can be made by installing dependencies at the +highest level possible, below the localized "target" folder.

+

Example

+

Consider this dependency graph:

+
foo
++-- blerg@1.2.5
++-- bar@1.2.3
+|   +-- blerg@1.x (latest=1.3.7)
+|   +-- baz@2.x
+|   |   `-- quux@3.x
+|   |       `-- bar@1.2.3 (cycle)
+|   `-- asdf@*
+`-- baz@1.2.3
+    `-- quux@3.x
+        `-- bar
+

In this case, we might expect a folder structure like this:

+
foo
++-- node_modules
+    +-- blerg (1.2.5) <---[A]
+    +-- bar (1.2.3) <---[B]
+    |   `-- node_modules
+    |       +-- baz (2.0.2) <---[C]
+    |       |   `-- node_modules
+    |       |       `-- quux (3.2.0)
+    |       `-- asdf (2.3.4)
+    `-- baz (1.2.3) <---[D]
+        `-- node_modules
+            `-- quux (3.2.0) <---[E]
+

Since foo depends directly on bar@1.2.3 and baz@1.2.3, those are +installed in foo's node_modules folder.

+

Even though the latest copy of blerg is 1.3.7, foo has a specific +dependency on version 1.2.5. So, that gets installed at [A]. Since the +parent installation of blerg satisfies bar's dependency on blerg@1.x, +it does not install another copy under [B].

+

Bar [B] also has dependencies on baz and asdf, so those are installed in +bar's node_modules folder. Because it depends on baz@2.x, it cannot +re-use the baz@1.2.3 installed in the parent node_modules folder [D], +and must install its own copy [C].

+

Underneath bar, the baz -> quux -> bar dependency creates a cycle. +However, because bar is already in quux's ancestry [B], it does not +unpack another copy of bar into that folder.

+

Underneath foo -> baz [D], quux's [E] folder tree is empty, because its +dependency on bar is satisfied by the parent folder copy installed at [B].

+

For a graphical breakdown of what is installed where, use npm ls.

+

Publishing

+

Upon publishing, npm will look in the node_modules folder. If any of +the items there are not in the bundledDependencies array, then they will +not be included in the package tarball.

+

This allows a package maintainer to install all of their dependencies +(and dev dependencies) locally, but only re-publish those items that +cannot be found elsewhere. See package.json(5) for more information.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/files/npm-json.html nodejs-0.11.15/deps/npm/html/partial/doc/files/npm-json.html --- nodejs-0.11.13/deps/npm/html/partial/doc/files/npm-json.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/files/npm-json.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,465 @@ +

package.json

Specifics of npm's package.json handling

+

DESCRIPTION

+

This document is all you need to know about what's required in your package.json +file. It must be actual JSON, not just a JavaScript object literal.

+

A lot of the behavior described in this document is affected by the config +settings described in npm-config(7).

+

name

+

The most important things in your package.json are the name and version fields. +Those are actually required, and your package won't install without +them. The name and version together form an identifier that is assumed +to be completely unique. Changes to the package should come along with +changes to the version.

+

The name is what your thing is called. Some tips:

+
    +
  • Don't put "js" or "node" in the name. It's assumed that it's js, since you're +writing a package.json file, and you can specify the engine using the "engines" +field. (See below.)
  • +
  • The name ends up being part of a URL, an argument on the command line, and a +folder name. Any name with non-url-safe characters will be rejected. +Also, it can't start with a dot or an underscore.
  • +
  • The name will probably be passed as an argument to require(), so it should +be something short, but also reasonably descriptive.
  • +
  • You may want to check the npm registry to see if there's something by that name +already, before you get too attached to it. http://registry.npmjs.org/
  • +
+

A name can be optionally prefixed by a scope, e.g. @myorg/mypackage. See +npm-scope(7) for more detail.

+

version

+

The most important things in your package.json are the name and version fields. +Those are actually required, and your package won't install without +them. The name and version together form an identifier that is assumed +to be completely unique. Changes to the package should come along with +changes to the version.

+

Version must be parseable by +node-semver, which is bundled +with npm as a dependency. (npm install semver to use it yourself.)

+

More on version numbers and ranges at semver(7).

+

description

+

Put a description in it. It's a string. This helps people discover your +package, as it's listed in npm search.

+

keywords

+

Put keywords in it. It's an array of strings. This helps people +discover your package as it's listed in npm search.

+

homepage

+

The url to the project homepage.

+

NOTE: This is not the same as "url". If you put a "url" field, +then the registry will think it's a redirection to your package that has +been published somewhere else, and spit at you.

+

Literally. Spit. I'm so not kidding.

+

bugs

+

The url to your project's issue tracker and / or the email address to which +issues should be reported. These are helpful for people who encounter issues +with your package.

+

It should look like this:

+
{ "url" : "http://github.com/owner/project/issues"
+, "email" : "project@hostname.com"
+}
+

You can specify either one or both values. If you want to provide only a url, +you can specify the value for "bugs" as a simple string instead of an object.

+

If a url is provided, it will be used by the npm bugs command.

+

license

+

You should specify a license for your package so that people know how they are +permitted to use it, and any restrictions you're placing on it.

+

The simplest way, assuming you're using a common license such as BSD-3-Clause +or MIT, is to just specify the standard SPDX ID of the license you're using, +like this:

+
{ "license" : "BSD-3-Clause" }
+

You can check the full list of SPDX license IDs. +Ideally you should pick one that is +OSI approved.

+

It's also a good idea to include a LICENSE file at the top level in +your package.

+

people fields: author, contributors

+

The "author" is one person. "contributors" is an array of people. A "person" +is an object with a "name" field and optionally "url" and "email", like this:

+
{ "name" : "Barney Rubble"
+, "email" : "b@rubble.com"
+, "url" : "http://barnyrubble.tumblr.com/"
+}
+

Or you can shorten that all into a single string, and npm will parse it for you:

+
"Barney Rubble <b@rubble.com> (http://barnyrubble.tumblr.com/)
+

Both email and url are optional either way.

+

npm also sets a top-level "maintainers" field with your npm user info.

+

files

+

The "files" field is an array of files to include in your project. If +you name a folder in the array, then it will also include the files +inside that folder. (Unless they would be ignored by another rule.)

+

You can also provide a ".npmignore" file in the root of your package, +which will keep files from being included, even if they would be picked +up by the files array. The ".npmignore" file works just like a +".gitignore".

+

main

+

The main field is a module ID that is the primary entry point to your program. +That is, if your package is named foo, and a user installs it, and then does +require("foo"), then your main module's exports object will be returned.

+

This should be a module ID relative to the root of your package folder.

+

For most modules, it makes the most sense to have a main script and often not +much else.

+

bin

+

A lot of packages have one or more executable files that they'd like to +install into the PATH. npm makes this pretty easy (in fact, it uses this +feature to install the "npm" executable.)

+

To use this, supply a bin field in your package.json which is a map of +command name to local file name. On install, npm will symlink that file into +prefix/bin for global installs, or ./node_modules/.bin/ for local +installs.

+

For example, npm has this:

+
{ "bin" : { "npm" : "./cli.js" } }
+

So, when you install npm, it'll create a symlink from the cli.js script to +/usr/local/bin/npm.

+

If you have a single executable, and its name should be the name +of the package, then you can just supply it as a string. For example:

+
{ "name": "my-program"
+, "version": "1.2.5"
+, "bin": "./path/to/program" }
+

would be the same as this:

+
{ "name": "my-program"
+, "version": "1.2.5"
+, "bin" : { "my-program" : "./path/to/program" } }
+

man

+

Specify either a single file or an array of filenames to put in place for the +man program to find.

+

If only a single file is provided, then it's installed such that it is the +result from man <pkgname>, regardless of its actual filename. For example:

+
{ "name" : "foo"
+, "version" : "1.2.3"
+, "description" : "A packaged foo fooer for fooing foos"
+, "main" : "foo.js"
+, "man" : "./man/doc.1"
+}
+

would link the ./man/doc.1 file in such that it is the target for man foo

+

If the filename doesn't start with the package name, then it's prefixed. +So, this:

+
{ "name" : "foo"
+, "version" : "1.2.3"
+, "description" : "A packaged foo fooer for fooing foos"
+, "main" : "foo.js"
+, "man" : [ "./man/foo.1", "./man/bar.1" ]
+}
+

will create files to do man foo and man foo-bar.

+

Man files must end with a number, and optionally a .gz suffix if they are +compressed. The number dictates which man section the file is installed into.

+
{ "name" : "foo"
+, "version" : "1.2.3"
+, "description" : "A packaged foo fooer for fooing foos"
+, "main" : "foo.js"
+, "man" : [ "./man/foo.1", "./man/foo.2" ]
+}
+

will create entries for man foo and man 2 foo

+

directories

+

The CommonJS Packages spec details a +few ways that you can indicate the structure of your package using a directories +object. If you look at npm's package.json, +you'll see that it has directories for doc, lib, and man.

+

In the future, this information may be used in other creative ways.

+

directories.lib

+

Tell people where the bulk of your library is. Nothing special is done +with the lib folder in any way, but it's useful meta info.

+

directories.bin

+

If you specify a bin directory, then all the files in that folder will +be added as children of the bin path.

+

If you have a bin path already, then this has no effect.

+

directories.man

+

A folder that is full of man pages. Sugar to generate a "man" array by +walking the folder.

+

directories.doc

+

Put markdown files in here. Eventually, these will be displayed nicely, +maybe, someday.

+

directories.example

+

Put example scripts in here. Someday, it might be exposed in some clever way.

+

repository

+

Specify the place where your code lives. This is helpful for people who +want to contribute. If the git repo is on github, then the npm docs +command will be able to find you.

+

Do it like this:

+
"repository" :
+  { "type" : "git"
+  , "url" : "http://github.com/npm/npm.git"
+  }
+
+"repository" :
+  { "type" : "svn"
+  , "url" : "http://v8.googlecode.com/svn/trunk/"
+  }
+

The URL should be a publicly available (perhaps read-only) url that can be handed +directly to a VCS program without any modification. It should not be a url to an +html project page that you put in your browser. It's for computers.

+

scripts

+

The "scripts" property is a dictionary containing script commands that are run +at various times in the lifecycle of your package. The key is the lifecycle +event, and the value is the command to run at that point.

+

See npm-scripts(7) to find out more about writing package scripts.

+

config

+

A "config" object can be used to set configuration parameters used in package +scripts that persist across upgrades. For instance, if a package had the +following:

+
{ "name" : "foo"
+, "config" : { "port" : "8080" } }
+

and then had a "start" command that then referenced the +npm_package_config_port environment variable, then the user could +override that by doing npm config set foo:port 8001.

+

See npm-config(7) and npm-scripts(7) for more on package +configs.

+

dependencies

+

Dependencies are specified in a simple object that maps a package name to a +version range. The version range is a string which has one or more +space-separated descriptors. Dependencies can also be identified with a +tarball or git URL.

+

Please do not put test harnesses or transpilers in your +dependencies object. See devDependencies, below.

+

See semver(7) for more details about specifying version ranges.

+
    +
  • version Must match version exactly
  • +
  • >version Must be greater than version
  • +
  • >=version etc
  • +
  • <version
  • +
  • <=version
  • +
  • ~version "Approximately equivalent to version" See semver(7)
  • +
  • ^version "Compatible with version" See semver(7)
  • +
  • 1.2.x 1.2.0, 1.2.1, etc., but not 1.3.0
  • +
  • http://... See 'URLs as Dependencies' below
  • +
  • * Matches any version
  • +
  • "" (just an empty string) Same as *
  • +
  • version1 - version2 Same as >=version1 <=version2.
  • +
  • range1 || range2 Passes if either range1 or range2 are satisfied.
  • +
  • git... See 'Git URLs as Dependencies' below
  • +
  • user/repo See 'GitHub URLs' below
  • +
  • tag A specific version tagged and published as tag See npm-tag(1)
  • +
  • path/path/path See Local Paths below
  • +
+

For example, these are all valid:

+
{ "dependencies" :
+  { "foo" : "1.0.0 - 2.9999.9999"
+  , "bar" : ">=1.0.2 <2.1.2"
+  , "baz" : ">1.0.2 <=2.3.4"
+  , "boo" : "2.0.1"
+  , "qux" : "<1.0.0 || >=2.3.1 <2.4.5 || >=2.5.2 <3.0.0"
+  , "asd" : "http://asdf.com/asdf.tar.gz"
+  , "til" : "~1.2"
+  , "elf" : "~1.2.3"
+  , "two" : "2.x"
+  , "thr" : "3.3.x"
+  , "lat" : "latest"
+  , "dyl" : "file:../dyl"
+  }
+}
+

URLs as Dependencies

+

You may specify a tarball URL in place of a version range.

+

This tarball will be downloaded and installed locally to your package at +install time.

+

Git URLs as Dependencies

+

Git urls can be of the form:

+
git://github.com/user/project.git#commit-ish
+git+ssh://user@hostname:project.git#commit-ish
+git+ssh://user@hostname/project.git#commit-ish
+git+http://user@hostname/project/blah.git#commit-ish
+git+https://user@hostname/project/blah.git#commit-ish
+

The commit-ish can be any tag, sha, or branch which can be supplied as +an argument to git checkout. The default is master.

+

GitHub URLs

+

As of version 1.1.65, you can refer to GitHub urls as just "foo": "user/foo-project". For example:

+
{
+  "name": "foo",
+  "version": "0.0.0",
+  "dependencies": {
+    "express": "visionmedia/express"
+  }
+}
+

Local Paths

+

As of version 2.0.0 you can provide a path to a local directory that contains a +package. Local paths can be saved using npm install --save, using any of +these forms:

+
../foo/bar
+~/foo/bar
+./foo/bar
+/foo/bar
+

in which case they will be normalized to a relative path and added to your +package.json. For example:

+
{
+  "name": "baz",
+  "dependencies": {
+    "bar": "file:../foo/bar"
+  }
+}
+

This feature is helpful for local offline development and creating +tests that require npm installing where you don't want to hit an +external server, but should not be used when publishing packages +to the public registry.

+

devDependencies

+

If someone is planning on downloading and using your module in their +program, then they probably don't want or need to download and build +the external test or documentation framework that you use.

+

In this case, it's best to map these additional items in a devDependencies +object.

+

These things will be installed when doing npm link or npm install +from the root of a package, and can be managed like any other npm +configuration param. See npm-config(7) for more on the topic.

+

For build steps that are not platform-specific, such as compiling +CoffeeScript or other languages to JavaScript, use the prepublish +script to do this, and make the required package a devDependency.

+

For example:

+
{ "name": "ethopia-waza",
+  "description": "a delightfully fruity coffee varietal",
+  "version": "1.2.3",
+  "devDependencies": {
+    "coffee-script": "~1.6.3"
+  },
+  "scripts": {
+    "prepublish": "coffee -o lib/ -c src/waza.coffee"
+  },
+  "main": "lib/waza.js"
+}
+

The prepublish script will be run before publishing, so that users +can consume the functionality without requiring them to compile it +themselves. In dev mode (ie, locally running npm install), it'll +run this script as well, so that you can test it easily.

+

peerDependencies

+

In some cases, you want to express the compatibility of your package with an +host tool or library, while not necessarily doing a require of this host. +This is usually refered to as a plugin. Notably, your module may be exposing +a specific interface, expected and specified by the host documentation.

+

For example:

+
{
+  "name": "tea-latte",
+  "version": "1.3.5"
+  "peerDependencies": {
+    "tea": "2.x"
+  }
+}
+

This ensures your package tea-latte can be installed along with the second +major version of the host package tea only. The host package is automatically +installed if needed. npm install tea-latte could possibly yield the following +dependency graph:

+
├── tea-latte@1.3.5
+└── tea@2.2.0
+

Trying to install another plugin with a conflicting requirement will cause an +error. For this reason, make sure your plugin requirement is as broad as +possible, and not to lock it down to specific patch versions.

+

Assuming the host complies with semver, only changes in +the host package's major version will break your plugin. Thus, if you've worked +with every 1.x version of the host package, use "^1.0" or "1.x" to express +this. If you depend on features introduced in 1.5.2, use ">= 1.5.2 < 2".

+

bundledDependencies

+

Array of package names that will be bundled when publishing the package.

+

If this is spelled "bundleDependencies", then that is also honorable.

+

optionalDependencies

+

If a dependency can be used, but you would like npm to proceed if it cannot be +found or fails to install, then you may put it in the optionalDependencies +object. This is a map of package name to version or url, just like the +dependencies object. The difference is that build failures do not cause +installation to fail.

+

It is still your program's responsibility to handle the lack of the +dependency. For example, something like this:

+
try {
+  var foo = require('foo')
+  var fooVersion = require('foo/package.json').version
+} catch (er) {
+  foo = null
+}
+if ( notGoodFooVersion(fooVersion) ) {
+  foo = null
+}
+
+// .. then later in your program ..
+
+if (foo) {
+  foo.doFooThings()
+}
+

Entries in optionalDependencies will override entries of the same name in +dependencies, so it's usually best to only put in one place.

+

engines

+

You can specify the version of node that your stuff works on:

+
{ "engines" : { "node" : ">=0.10.3 <0.12" } }
+

And, like with dependencies, if you don't specify the version (or if you +specify "*" as the version), then any version of node will do.

+

If you specify an "engines" field, then npm will require that "node" be +somewhere on that list. If "engines" is omitted, then npm will just assume +that it works on node.

+

You can also use the "engines" field to specify which versions of npm +are capable of properly installing your program. For example:

+
{ "engines" : { "npm" : "~1.0.20" } }
+

Note that, unless the user has set the engine-strict config flag, this +field is advisory only.

+

engineStrict

+

If you are sure that your module will definitely not run properly on +versions of Node/npm other than those specified in the engines object, +then you can set "engineStrict": true in your package.json file. +This will override the user's engine-strict config setting.

+

Please do not do this unless you are really very very sure. If your +engines object is something overly restrictive, you can quite easily and +inadvertently lock yourself into obscurity and prevent your users from +updating to new versions of Node. Consider this choice carefully. If +people abuse it, it will be removed in a future version of npm.

+

os

+

You can specify which operating systems your +module will run on:

+
"os" : [ "darwin", "linux" ]
+

You can also blacklist instead of whitelist operating systems, +just prepend the blacklisted os with a '!':

+
"os" : [ "!win32" ]
+

The host operating system is determined by process.platform

+

It is allowed to both blacklist, and whitelist, although there isn't any +good reason to do this.

+

cpu

+

If your code only runs on certain cpu architectures, +you can specify which ones.

+
"cpu" : [ "x64", "ia32" ]
+

Like the os option, you can also blacklist architectures:

+
"cpu" : [ "!arm", "!mips" ]
+

The host architecture is determined by process.arch

+

preferGlobal

+

If your package is primarily a command-line application that should be +installed globally, then set this value to true to provide a warning +if it is installed locally.

+

It doesn't actually prevent users from installing it locally, but it +does help prevent some confusion if it doesn't work as expected.

+

private

+

If you set "private": true in your package.json, then npm will refuse +to publish it.

+

This is a way to prevent accidental publication of private repositories. If +you would like to ensure that a given package is only ever published to a +specific registry (for example, an internal registry), then use the +publishConfig dictionary described below to override the registry config +param at publish-time.

+

publishConfig

+

This is a set of config values that will be used at publish-time. It's +especially handy if you want to set the tag or registry, so that you can +ensure that a given package is not tagged with "latest" or published to +the global public registry by default.

+

Any config values can be overridden, but of course only "tag" and +"registry" probably matter for the purposes of publishing.

+

See npm-config(7) to see the list of config options that can be +overridden.

+

DEFAULT VALUES

+

npm will default some values based on package contents.

+
    +
  • "scripts": {"start": "node server.js"}

    +

    If there is a server.js file in the root of your package, then npm +will default the start command to node server.js.

    +
  • +
  • "scripts":{"preinstall": "node-gyp rebuild"}

    +

    If there is a binding.gyp file in the root of your package, npm will +default the preinstall command to compile using node-gyp.

    +
  • +
  • "contributors": [...]

    +

    If there is an AUTHORS file in the root of your package, npm will +treat each line as a Name <email> (url) format, where email and url +are optional. Lines which start with a # or are blank, will be +ignored.

    +
  • +
+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/files/npmrc.html nodejs-0.11.15/deps/npm/html/partial/doc/files/npmrc.html --- nodejs-0.11.13/deps/npm/html/partial/doc/files/npmrc.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/files/npmrc.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,53 @@ +

npmrc

The npm config files

+

DESCRIPTION

+

npm gets its config settings from the command line, environment +variables, and npmrc files.

+

The npm config command can be used to update and edit the contents +of the user and global npmrc files.

+

For a list of available configuration options, see npm-config(7).

+

FILES

+

The four relevant files are:

+
    +
  • per-project config file (/path/to/my/project/.npmrc)
  • +
  • per-user config file (~/.npmrc)
  • +
  • global config file ($PREFIX/npmrc)
  • +
  • npm builtin config file (/path/to/npm/npmrc)
  • +
+

All npm config files are an ini-formatted list of key = value +parameters. Environment variables can be replaced using +${VARIABLE_NAME}. For example:

+
prefix = ${HOME}/.npm-packages
+

Each of these files is loaded, and config options are resolved in +priority order. For example, a setting in the userconfig file would +override the setting in the globalconfig file.

+

Per-project config file

+

When working locally in a project, a .npmrc file in the root of the +project (ie, a sibling of node_modules and package.json) will set +config values specific to this project.

+

Note that this only applies to the root of the project that you're +running npm in. It has no effect when your module is published. For +example, you can't publish a module that forces itself to install +globally, or in a different location.

+

Per-user config file

+

$HOME/.npmrc (or the userconfig param, if set in the environment +or on the command line)

+

Global config file

+

$PREFIX/etc/npmrc (or the globalconfig param, if set above): +This file is an ini-file formatted list of key = value parameters. +Environment variables can be replaced as above.

+

Built-in config file

+

path/to/npm/itself/npmrc

+

This is an unchangeable "builtin" configuration file that npm keeps +consistent across updates. Set fields in here using the ./configure +script that comes with npm. This is primarily for distribution +maintainers to override default configs in a standard and consistent +manner.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/files/package.json.html nodejs-0.11.15/deps/npm/html/partial/doc/files/package.json.html --- nodejs-0.11.13/deps/npm/html/partial/doc/files/package.json.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/files/package.json.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,465 @@ +

package.json

Specifics of npm's package.json handling

+

DESCRIPTION

+

This document is all you need to know about what's required in your package.json +file. It must be actual JSON, not just a JavaScript object literal.

+

A lot of the behavior described in this document is affected by the config +settings described in npm-config(7).

+

name

+

The most important things in your package.json are the name and version fields. +Those are actually required, and your package won't install without +them. The name and version together form an identifier that is assumed +to be completely unique. Changes to the package should come along with +changes to the version.

+

The name is what your thing is called. Some tips:

+
    +
  • Don't put "js" or "node" in the name. It's assumed that it's js, since you're +writing a package.json file, and you can specify the engine using the "engines" +field. (See below.)
  • +
  • The name ends up being part of a URL, an argument on the command line, and a +folder name. Any name with non-url-safe characters will be rejected. +Also, it can't start with a dot or an underscore.
  • +
  • The name will probably be passed as an argument to require(), so it should +be something short, but also reasonably descriptive.
  • +
  • You may want to check the npm registry to see if there's something by that name +already, before you get too attached to it. http://registry.npmjs.org/
  • +
+

A name can be optionally prefixed by a scope, e.g. @myorg/mypackage. See +npm-scope(7) for more detail.

+

version

+

The most important things in your package.json are the name and version fields. +Those are actually required, and your package won't install without +them. The name and version together form an identifier that is assumed +to be completely unique. Changes to the package should come along with +changes to the version.

+

Version must be parseable by +node-semver, which is bundled +with npm as a dependency. (npm install semver to use it yourself.)

+

More on version numbers and ranges at semver(7).

+

description

+

Put a description in it. It's a string. This helps people discover your +package, as it's listed in npm search.

+

keywords

+

Put keywords in it. It's an array of strings. This helps people +discover your package as it's listed in npm search.

+

homepage

+

The url to the project homepage.

+

NOTE: This is not the same as "url". If you put a "url" field, +then the registry will think it's a redirection to your package that has +been published somewhere else, and spit at you.

+

Literally. Spit. I'm so not kidding.

+

bugs

+

The url to your project's issue tracker and / or the email address to which +issues should be reported. These are helpful for people who encounter issues +with your package.

+

It should look like this:

+
{ "url" : "http://github.com/owner/project/issues"
+, "email" : "project@hostname.com"
+}
+

You can specify either one or both values. If you want to provide only a url, +you can specify the value for "bugs" as a simple string instead of an object.

+

If a url is provided, it will be used by the npm bugs command.

+

license

+

You should specify a license for your package so that people know how they are +permitted to use it, and any restrictions you're placing on it.

+

The simplest way, assuming you're using a common license such as BSD-3-Clause +or MIT, is to just specify the standard SPDX ID of the license you're using, +like this:

+
{ "license" : "BSD-3-Clause" }
+

You can check the full list of SPDX license IDs. +Ideally you should pick one that is +OSI approved.

+

It's also a good idea to include a LICENSE file at the top level in +your package.

+

people fields: author, contributors

+

The "author" is one person. "contributors" is an array of people. A "person" +is an object with a "name" field and optionally "url" and "email", like this:

+
{ "name" : "Barney Rubble"
+, "email" : "b@rubble.com"
+, "url" : "http://barnyrubble.tumblr.com/"
+}
+

Or you can shorten that all into a single string, and npm will parse it for you:

+
"Barney Rubble <b@rubble.com> (http://barnyrubble.tumblr.com/)
+

Both email and url are optional either way.

+

npm also sets a top-level "maintainers" field with your npm user info.

+

files

+

The "files" field is an array of files to include in your project. If +you name a folder in the array, then it will also include the files +inside that folder. (Unless they would be ignored by another rule.)

+

You can also provide a ".npmignore" file in the root of your package, +which will keep files from being included, even if they would be picked +up by the files array. The ".npmignore" file works just like a +".gitignore".

+

main

+

The main field is a module ID that is the primary entry point to your program. +That is, if your package is named foo, and a user installs it, and then does +require("foo"), then your main module's exports object will be returned.

+

This should be a module ID relative to the root of your package folder.

+

For most modules, it makes the most sense to have a main script and often not +much else.

+

bin

+

A lot of packages have one or more executable files that they'd like to +install into the PATH. npm makes this pretty easy (in fact, it uses this +feature to install the "npm" executable.)

+

To use this, supply a bin field in your package.json which is a map of +command name to local file name. On install, npm will symlink that file into +prefix/bin for global installs, or ./node_modules/.bin/ for local +installs.

+

For example, npm has this:

+
{ "bin" : { "npm" : "./cli.js" } }
+

So, when you install npm, it'll create a symlink from the cli.js script to +/usr/local/bin/npm.

+

If you have a single executable, and its name should be the name +of the package, then you can just supply it as a string. For example:

+
{ "name": "my-program"
+, "version": "1.2.5"
+, "bin": "./path/to/program" }
+

would be the same as this:

+
{ "name": "my-program"
+, "version": "1.2.5"
+, "bin" : { "my-program" : "./path/to/program" } }
+

man

+

Specify either a single file or an array of filenames to put in place for the +man program to find.

+

If only a single file is provided, then it's installed such that it is the +result from man <pkgname>, regardless of its actual filename. For example:

+
{ "name" : "foo"
+, "version" : "1.2.3"
+, "description" : "A packaged foo fooer for fooing foos"
+, "main" : "foo.js"
+, "man" : "./man/doc.1"
+}
+

would link the ./man/doc.1 file in such that it is the target for man foo

+

If the filename doesn't start with the package name, then it's prefixed. +So, this:

+
{ "name" : "foo"
+, "version" : "1.2.3"
+, "description" : "A packaged foo fooer for fooing foos"
+, "main" : "foo.js"
+, "man" : [ "./man/foo.1", "./man/bar.1" ]
+}
+

will create files to do man foo and man foo-bar.

+

Man files must end with a number, and optionally a .gz suffix if they are +compressed. The number dictates which man section the file is installed into.

+
{ "name" : "foo"
+, "version" : "1.2.3"
+, "description" : "A packaged foo fooer for fooing foos"
+, "main" : "foo.js"
+, "man" : [ "./man/foo.1", "./man/foo.2" ]
+}
+

will create entries for man foo and man 2 foo

+

directories

+

The CommonJS Packages spec details a +few ways that you can indicate the structure of your package using a directories +object. If you look at npm's package.json, +you'll see that it has directories for doc, lib, and man.

+

In the future, this information may be used in other creative ways.

+

directories.lib

+

Tell people where the bulk of your library is. Nothing special is done +with the lib folder in any way, but it's useful meta info.

+

directories.bin

+

If you specify a bin directory, then all the files in that folder will +be added as children of the bin path.

+

If you have a bin path already, then this has no effect.

+

directories.man

+

A folder that is full of man pages. Sugar to generate a "man" array by +walking the folder.

+

directories.doc

+

Put markdown files in here. Eventually, these will be displayed nicely, +maybe, someday.

+

directories.example

+

Put example scripts in here. Someday, it might be exposed in some clever way.

+

repository

+

Specify the place where your code lives. This is helpful for people who +want to contribute. If the git repo is on github, then the npm docs +command will be able to find you.

+

Do it like this:

+
"repository" :
+  { "type" : "git"
+  , "url" : "http://github.com/npm/npm.git"
+  }
+
+"repository" :
+  { "type" : "svn"
+  , "url" : "http://v8.googlecode.com/svn/trunk/"
+  }
+

The URL should be a publicly available (perhaps read-only) url that can be handed +directly to a VCS program without any modification. It should not be a url to an +html project page that you put in your browser. It's for computers.

+

scripts

+

The "scripts" property is a dictionary containing script commands that are run +at various times in the lifecycle of your package. The key is the lifecycle +event, and the value is the command to run at that point.

+

See npm-scripts(7) to find out more about writing package scripts.

+

config

+

A "config" object can be used to set configuration parameters used in package +scripts that persist across upgrades. For instance, if a package had the +following:

+
{ "name" : "foo"
+, "config" : { "port" : "8080" } }
+

and then had a "start" command that then referenced the +npm_package_config_port environment variable, then the user could +override that by doing npm config set foo:port 8001.

+

See npm-config(7) and npm-scripts(7) for more on package +configs.

+

dependencies

+

Dependencies are specified in a simple object that maps a package name to a +version range. The version range is a string which has one or more +space-separated descriptors. Dependencies can also be identified with a +tarball or git URL.

+

Please do not put test harnesses or transpilers in your +dependencies object. See devDependencies, below.

+

See semver(7) for more details about specifying version ranges.

+
    +
  • version Must match version exactly
  • +
  • >version Must be greater than version
  • +
  • >=version etc
  • +
  • <version
  • +
  • <=version
  • +
  • ~version "Approximately equivalent to version" See semver(7)
  • +
  • ^version "Compatible with version" See semver(7)
  • +
  • 1.2.x 1.2.0, 1.2.1, etc., but not 1.3.0
  • +
  • http://... See 'URLs as Dependencies' below
  • +
  • * Matches any version
  • +
  • "" (just an empty string) Same as *
  • +
  • version1 - version2 Same as >=version1 <=version2.
  • +
  • range1 || range2 Passes if either range1 or range2 are satisfied.
  • +
  • git... See 'Git URLs as Dependencies' below
  • +
  • user/repo See 'GitHub URLs' below
  • +
  • tag A specific version tagged and published as tag See npm-tag(1)
  • +
  • path/path/path See Local Paths below
  • +
+

For example, these are all valid:

+
{ "dependencies" :
+  { "foo" : "1.0.0 - 2.9999.9999"
+  , "bar" : ">=1.0.2 <2.1.2"
+  , "baz" : ">1.0.2 <=2.3.4"
+  , "boo" : "2.0.1"
+  , "qux" : "<1.0.0 || >=2.3.1 <2.4.5 || >=2.5.2 <3.0.0"
+  , "asd" : "http://asdf.com/asdf.tar.gz"
+  , "til" : "~1.2"
+  , "elf" : "~1.2.3"
+  , "two" : "2.x"
+  , "thr" : "3.3.x"
+  , "lat" : "latest"
+  , "dyl" : "file:../dyl"
+  }
+}
+

URLs as Dependencies

+

You may specify a tarball URL in place of a version range.

+

This tarball will be downloaded and installed locally to your package at +install time.

+

Git URLs as Dependencies

+

Git urls can be of the form:

+
git://github.com/user/project.git#commit-ish
+git+ssh://user@hostname:project.git#commit-ish
+git+ssh://user@hostname/project.git#commit-ish
+git+http://user@hostname/project/blah.git#commit-ish
+git+https://user@hostname/project/blah.git#commit-ish
+

The commit-ish can be any tag, sha, or branch which can be supplied as +an argument to git checkout. The default is master.

+

GitHub URLs

+

As of version 1.1.65, you can refer to GitHub urls as just "foo": "user/foo-project". For example:

+
{
+  "name": "foo",
+  "version": "0.0.0",
+  "dependencies": {
+    "express": "visionmedia/express"
+  }
+}
+

Local Paths

+

As of version 2.0.0 you can provide a path to a local directory that contains a +package. Local paths can be saved using npm install --save, using any of +these forms:

+
../foo/bar
+~/foo/bar
+./foo/bar
+/foo/bar
+

in which case they will be normalized to a relative path and added to your +package.json. For example:

+
{
+  "name": "baz",
+  "dependencies": {
+    "bar": "file:../foo/bar"
+  }
+}
+

This feature is helpful for local offline development and creating +tests that require npm installing where you don't want to hit an +external server, but should not be used when publishing packages +to the public registry.

+

devDependencies

+

If someone is planning on downloading and using your module in their +program, then they probably don't want or need to download and build +the external test or documentation framework that you use.

+

In this case, it's best to map these additional items in a devDependencies +object.

+

These things will be installed when doing npm link or npm install +from the root of a package, and can be managed like any other npm +configuration param. See npm-config(7) for more on the topic.

+

For build steps that are not platform-specific, such as compiling +CoffeeScript or other languages to JavaScript, use the prepublish +script to do this, and make the required package a devDependency.

+

For example:

+
{ "name": "ethopia-waza",
+  "description": "a delightfully fruity coffee varietal",
+  "version": "1.2.3",
+  "devDependencies": {
+    "coffee-script": "~1.6.3"
+  },
+  "scripts": {
+    "prepublish": "coffee -o lib/ -c src/waza.coffee"
+  },
+  "main": "lib/waza.js"
+}
+

The prepublish script will be run before publishing, so that users +can consume the functionality without requiring them to compile it +themselves. In dev mode (ie, locally running npm install), it'll +run this script as well, so that you can test it easily.

+

peerDependencies

+

In some cases, you want to express the compatibility of your package with an +host tool or library, while not necessarily doing a require of this host. +This is usually refered to as a plugin. Notably, your module may be exposing +a specific interface, expected and specified by the host documentation.

+

For example:

+
{
+  "name": "tea-latte",
+  "version": "1.3.5"
+  "peerDependencies": {
+    "tea": "2.x"
+  }
+}
+

This ensures your package tea-latte can be installed along with the second +major version of the host package tea only. The host package is automatically +installed if needed. npm install tea-latte could possibly yield the following +dependency graph:

+
├── tea-latte@1.3.5
+└── tea@2.2.0
+

Trying to install another plugin with a conflicting requirement will cause an +error. For this reason, make sure your plugin requirement is as broad as +possible, and not to lock it down to specific patch versions.

+

Assuming the host complies with semver, only changes in +the host package's major version will break your plugin. Thus, if you've worked +with every 1.x version of the host package, use "^1.0" or "1.x" to express +this. If you depend on features introduced in 1.5.2, use ">= 1.5.2 < 2".

+

bundledDependencies

+

Array of package names that will be bundled when publishing the package.

+

If this is spelled "bundleDependencies", then that is also honorable.

+

optionalDependencies

+

If a dependency can be used, but you would like npm to proceed if it cannot be +found or fails to install, then you may put it in the optionalDependencies +object. This is a map of package name to version or url, just like the +dependencies object. The difference is that build failures do not cause +installation to fail.

+

It is still your program's responsibility to handle the lack of the +dependency. For example, something like this:

+
try {
+  var foo = require('foo')
+  var fooVersion = require('foo/package.json').version
+} catch (er) {
+  foo = null
+}
+if ( notGoodFooVersion(fooVersion) ) {
+  foo = null
+}
+
+// .. then later in your program ..
+
+if (foo) {
+  foo.doFooThings()
+}
+

Entries in optionalDependencies will override entries of the same name in +dependencies, so it's usually best to only put in one place.

+

engines

+

You can specify the version of node that your stuff works on:

+
{ "engines" : { "node" : ">=0.10.3 <0.12" } }
+

And, like with dependencies, if you don't specify the version (or if you +specify "*" as the version), then any version of node will do.

+

If you specify an "engines" field, then npm will require that "node" be +somewhere on that list. If "engines" is omitted, then npm will just assume +that it works on node.

+

You can also use the "engines" field to specify which versions of npm +are capable of properly installing your program. For example:

+
{ "engines" : { "npm" : "~1.0.20" } }
+

Note that, unless the user has set the engine-strict config flag, this +field is advisory only.

+

engineStrict

+

If you are sure that your module will definitely not run properly on +versions of Node/npm other than those specified in the engines object, +then you can set "engineStrict": true in your package.json file. +This will override the user's engine-strict config setting.

+

Please do not do this unless you are really very very sure. If your +engines object is something overly restrictive, you can quite easily and +inadvertently lock yourself into obscurity and prevent your users from +updating to new versions of Node. Consider this choice carefully. If +people abuse it, it will be removed in a future version of npm.

+

os

+

You can specify which operating systems your +module will run on:

+
"os" : [ "darwin", "linux" ]
+

You can also blacklist instead of whitelist operating systems, +just prepend the blacklisted os with a '!':

+
"os" : [ "!win32" ]
+

The host operating system is determined by process.platform

+

It is allowed to both blacklist, and whitelist, although there isn't any +good reason to do this.

+

cpu

+

If your code only runs on certain cpu architectures, +you can specify which ones.

+
"cpu" : [ "x64", "ia32" ]
+

Like the os option, you can also blacklist architectures:

+
"cpu" : [ "!arm", "!mips" ]
+

The host architecture is determined by process.arch

+

preferGlobal

+

If your package is primarily a command-line application that should be +installed globally, then set this value to true to provide a warning +if it is installed locally.

+

It doesn't actually prevent users from installing it locally, but it +does help prevent some confusion if it doesn't work as expected.

+

private

+

If you set "private": true in your package.json, then npm will refuse +to publish it.

+

This is a way to prevent accidental publication of private repositories. If +you would like to ensure that a given package is only ever published to a +specific registry (for example, an internal registry), then use the +publishConfig dictionary described below to override the registry config +param at publish-time.

+

publishConfig

+

This is a set of config values that will be used at publish-time. It's +especially handy if you want to set the tag or registry, so that you can +ensure that a given package is not tagged with "latest" or published to +the global public registry by default.

+

Any config values can be overridden, but of course only "tag" and +"registry" probably matter for the purposes of publishing.

+

See npm-config(7) to see the list of config options that can be +overridden.

+

DEFAULT VALUES

+

npm will default some values based on package contents.

+
    +
  • "scripts": {"start": "node server.js"}

    +

    If there is a server.js file in the root of your package, then npm +will default the start command to node server.js.

    +
  • +
  • "scripts":{"preinstall": "node-gyp rebuild"}

    +

    If there is a binding.gyp file in the root of your package, npm will +default the preinstall command to compile using node-gyp.

    +
  • +
  • "contributors": [...]

    +

    If there is an AUTHORS file in the root of your package, npm will +treat each line as a Name <email> (url) format, where email and url +are optional. Lines which start with a # or are blank, will be +ignored.

    +
  • +
+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/index.html nodejs-0.11.15/deps/npm/html/partial/doc/index.html --- nodejs-0.11.13/deps/npm/html/partial/doc/index.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/index.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,210 @@ +

npm-index

Index of all npm documentation

+

README

+

node package manager

+

Command Line Documentation

+

Using npm on the command line

+

npm(1)

+

node package manager

+

npm-adduser(1)

+

Add a registry user account

+

npm-bin(1)

+

Display npm bin folder

+

npm-bugs(1)

+

Bugs for a package in a web browser maybe

+

npm-build(1)

+

Build a package

+

npm-bundle(1)

+

REMOVED

+

npm-cache(1)

+

Manipulates packages cache

+

npm-completion(1)

+

Tab Completion for npm

+

npm-config(1)

+

Manage the npm configuration files

+

npm-dedupe(1)

+

Reduce duplication

+

npm-deprecate(1)

+

Deprecate a version of a package

+

npm-docs(1)

+

Docs for a package in a web browser maybe

+

npm-edit(1)

+

Edit an installed package

+

npm-explore(1)

+

Browse an installed package

+

npm-help-search(1)

+

Search npm help documentation

+

npm-help(1)

+

Get help on npm

+

npm-init(1)

+

Interactively create a package.json file

+

npm-install(1)

+

Install a package

+ +

Symlink a package folder

+

npm-ls(1)

+

List installed packages

+

npm-outdated(1)

+

Check for outdated packages

+

npm-owner(1)

+

Manage package owners

+

npm-pack(1)

+

Create a tarball from a package

+

npm-prefix(1)

+

Display prefix

+

npm-prune(1)

+

Remove extraneous packages

+

npm-publish(1)

+

Publish a package

+

npm-rebuild(1)

+

Rebuild a package

+

npm-repo(1)

+

Open package repository page in the browser

+

npm-restart(1)

+

Start a package

+

npm-rm(1)

+

Remove a package

+

npm-root(1)

+

Display npm root

+

npm-run-script(1)

+

Run arbitrary package scripts

+

npm-search(1)

+

Search for packages

+

npm-shrinkwrap(1)

+

Lock down dependency versions

+

npm-star(1)

+

Mark your favorite packages

+

npm-stars(1)

+

View packages marked as favorites

+

npm-start(1)

+

Start a package

+

npm-stop(1)

+

Stop a package

+

npm-tag(1)

+

Tag a published version

+

npm-test(1)

+

Test a package

+

npm-uninstall(1)

+

Remove a package

+

npm-unpublish(1)

+

Remove a package from the registry

+

npm-update(1)

+

Update a package

+

npm-version(1)

+

Bump a package version

+

npm-view(1)

+

View registry info

+

npm-whoami(1)

+

Display npm username

+

API Documentation

+

Using npm in your Node programs

+

npm(3)

+

node package manager

+

npm-bin(3)

+

Display npm bin folder

+

npm-bugs(3)

+

Bugs for a package in a web browser maybe

+

npm-cache(3)

+

manage the npm cache programmatically

+

npm-commands(3)

+

npm commands

+

npm-config(3)

+

Manage the npm configuration files

+

npm-deprecate(3)

+

Deprecate a version of a package

+

npm-docs(3)

+

Docs for a package in a web browser maybe

+

npm-edit(3)

+

Edit an installed package

+

npm-explore(3)

+

Browse an installed package

+

npm-help-search(3)

+

Search the help pages

+

npm-init(3)

+

Interactively create a package.json file

+

npm-install(3)

+

install a package programmatically

+ +

Symlink a package folder

+

npm-load(3)

+

Load config settings

+

npm-ls(3)

+

List installed packages

+

npm-outdated(3)

+

Check for outdated packages

+

npm-owner(3)

+

Manage package owners

+

npm-pack(3)

+

Create a tarball from a package

+

npm-prefix(3)

+

Display prefix

+

npm-prune(3)

+

Remove extraneous packages

+

npm-publish(3)

+

Publish a package

+

npm-rebuild(3)

+

Rebuild a package

+

npm-repo(3)

+

Open package repository page in the browser

+

npm-restart(3)

+

Start a package

+

npm-root(3)

+

Display npm root

+

npm-run-script(3)

+

Run arbitrary package scripts

+

npm-search(3)

+

Search for packages

+

npm-shrinkwrap(3)

+

programmatically generate package shrinkwrap file

+

npm-start(3)

+

Start a package

+

npm-stop(3)

+

Stop a package

+

npm-tag(3)

+

Tag a published version

+

npm-test(3)

+

Test a package

+

npm-uninstall(3)

+

uninstall a package programmatically

+

npm-unpublish(3)

+

Remove a package from the registry

+

npm-update(3)

+

Update a package

+

npm-version(3)

+

Bump a package version

+

npm-view(3)

+

View registry info

+

npm-whoami(3)

+

Display npm username

+

Files

+

File system structures npm uses

+

npm-folders(5)

+

Folder Structures Used by npm

+

npmrc(5)

+

The npm config files

+

package.json(5)

+

Specifics of npm's package.json handling

+

Misc

+

Various other bits and bobs

+

npm-coding-style(7)

+

npm's "funny" coding style

+

npm-config(7)

+

More than you probably want to know about npm configuration

+

npm-developers(7)

+

Developer Guide

+

npm-disputes(7)

+

Handling Module Name Disputes

+

npm-faq(7)

+

Frequently Asked Questions

+

npm-index(7)

+

Index of all npm documentation

+

npm-registry(7)

+

The JavaScript Package Registry

+

npm-scope(7)

+

Scoped packages

+

npm-scripts(7)

+

How npm handles the "scripts" field

+

removing-npm(7)

+

Cleaning the Slate

+

semver(7)

+

The semantic versioner for npm

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-coding-style.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-coding-style.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-coding-style.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-coding-style.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,127 @@ +

npm-coding-style

npm's "funny" coding style

+

DESCRIPTION

+

npm's coding style is a bit unconventional. It is not different for +difference's sake, but rather a carefully crafted style that is +designed to reduce visual clutter and make bugs more apparent.

+

If you want to contribute to npm (which is very encouraged), you should +make your code conform to npm's style.

+

Note: this concerns npm's code not the specific packages at npmjs.org

+

Line Length

+

Keep lines shorter than 80 characters. It's better for lines to be +too short than to be too long. Break up long lists, objects, and other +statements onto multiple lines.

+

Indentation

+

Two-spaces. Tabs are better, but they look like hell in web browsers +(and on github), and node uses 2 spaces, so that's that.

+

Configure your editor appropriately.

+

Curly braces

+

Curly braces belong on the same line as the thing that necessitates them.

+

Bad:

+
function ()
+{
+

Good:

+
function () {
+

If a block needs to wrap to the next line, use a curly brace. Don't +use it if it doesn't.

+

Bad:

+
if (foo) { bar() }
+while (foo)
+  bar()
+

Good:

+
if (foo) bar()
+while (foo) {
+  bar()
+}
+

Semicolons

+

Don't use them except in four situations:

+
    +
  • for (;;) loops. They're actually required.
  • +
  • null loops like: while (something) ; (But you'd better have a good +reason for doing that.)
  • +
  • case "foo": doSomething(); break
  • +
  • In front of a leading ( or [ at the start of the line. +This prevents the expression from being interpreted +as a function call or property access, respectively.
  • +
+

Some examples of good semicolon usage:

+
;(x || y).doSomething()
+;[a, b, c].forEach(doSomething)
+for (var i = 0; i < 10; i ++) {
+  switch (state) {
+    case "begin": start(); continue
+    case "end": finish(); break
+    default: throw new Error("unknown state")
+  }
+  end()
+}
+

Note that starting lines with - and + also should be prefixed +with a semicolon, but this is much less common.

+

Comma First

+

If there is a list of things separated by commas, and it wraps +across multiple lines, put the comma at the start of the next +line, directly below the token that starts the list. Put the +final token in the list on a line by itself. For example:

+
var magicWords = [ "abracadabra"
+                 , "gesundheit"
+                 , "ventrilo"
+                 ]
+  , spells = { "fireball" : function () { setOnFire() }
+             , "water" : function () { putOut() }
+             }
+  , a = 1
+  , b = "abc"
+  , etc
+  , somethingElse
+

Whitespace

+

Put a single space in front of ( for anything other than a function call. +Also use a single space wherever it makes things more readable.

+

Don't leave trailing whitespace at the end of lines. Don't indent empty +lines. Don't use more spaces than are helpful.

+

Functions

+

Use named functions. They make stack traces a lot easier to read.

+

Callbacks, Sync/async Style

+

Use the asynchronous/non-blocking versions of things as much as possible. +It might make more sense for npm to use the synchronous fs APIs, but this +way, the fs and http and child process stuff all uses the same callback-passing +methodology.

+

The callback should always be the last argument in the list. Its first +argument is the Error or null.

+

Be very careful never to ever ever throw anything. It's worse than useless. +Just send the error message back as the first argument to the callback.

+

Errors

+

Always create a new Error object with your message. Don't just return a +string message to the callback. Stack traces are handy.

+

Logging

+

Logging is done using the npmlog +utility.

+

Please clean up logs when they are no longer helpful. In particular, +logging the same object over and over again is not helpful. Logs should +report what's happening so that it's easier to track down where a fault +occurs.

+

Use appropriate log levels. See npm-config(7) and search for +"loglevel".

+

Case, naming, etc.

+

Use lowerCamelCase for multiword identifiers when they refer to objects, +functions, methods, properties, or anything not specified in this section.

+

Use UpperCamelCase for class names (things that you'd pass to "new").

+

Use all-lower-hyphen-css-case for multiword filenames and config keys.

+

Use named functions. They make stack traces easier to follow.

+

Use CAPS_SNAKE_CASE for constants, things that should never change +and are rarely used.

+

Use a single uppercase letter for function names where the function +would normally be anonymous, but needs to call itself recursively. It +makes it clear that it's a "throwaway" function.

+

null, undefined, false, 0

+

Boolean variables and functions should always be either true or +false. Don't set it to 0 unless it's supposed to be a number.

+

When something is intentionally missing or removed, set it to null.

+

Don't set things to undefined. Reserve that value to mean "not yet +set to anything."

+

Boolean objects are verboten.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-config.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-config.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-config.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-config.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,739 @@ +

npm-config

More than you probably want to know about npm configuration

+

DESCRIPTION

+

npm gets its configuration values from 6 sources, in this priority:

+

Command Line Flags

+

Putting --foo bar on the command line sets the foo configuration +parameter to "bar". A -- argument tells the cli parser to stop +reading flags. A --flag parameter that is at the end of the +command will be given the value of true.

+

Environment Variables

+

Any environment variables that start with npm_config_ will be +interpreted as a configuration parameter. For example, putting +npm_config_foo=bar in your environment will set the foo +configuration parameter to bar. Any environment configurations that +are not given a value will be given the value of true. Config +values are case-insensitive, so NPM_CONFIG_FOO=bar will work the +same.

+

npmrc Files

+

The four relevant files are:

+
    +
  • per-project config file (/path/to/my/project/.npmrc)
  • +
  • per-user config file (~/.npmrc)
  • +
  • global config file ($PREFIX/npmrc)
  • +
  • npm builtin config file (/path/to/npm/npmrc)
  • +
+

See npmrc(5) for more details.

+

Default Configs

+

A set of configuration parameters that are internal to npm, and are +defaults if nothing else is specified.

+

Shorthands and Other CLI Niceties

+

The following shorthands are parsed on the command-line:

+
    +
  • -v: --version
  • +
  • -h, -?, --help, -H: --usage
  • +
  • -s, --silent: --loglevel silent
  • +
  • -q, --quiet: --loglevel warn
  • +
  • -d: --loglevel info
  • +
  • -dd, --verbose: --loglevel verbose
  • +
  • -ddd: --loglevel silly
  • +
  • -g: --global
  • +
  • -C: --prefix
  • +
  • -l: --long
  • +
  • -m: --message
  • +
  • -p, --porcelain: --parseable
  • +
  • -reg: --registry
  • +
  • -v: --version
  • +
  • -f: --force
  • +
  • -desc: --description
  • +
  • -S: --save
  • +
  • -D: --save-dev
  • +
  • -O: --save-optional
  • +
  • -B: --save-bundle
  • +
  • -E: --save-exact
  • +
  • -y: --yes
  • +
  • -n: --yes false
  • +
  • ll and la commands: ls --long
  • +
+

If the specified configuration param resolves unambiguously to a known +configuration parameter, then it is expanded to that configuration +parameter. For example:

+
npm ls --par
+# same as:
+npm ls --parseable
+

If multiple single-character shorthands are strung together, and the +resulting combination is unambiguously not some other configuration +param, then it is expanded to its various component pieces. For +example:

+
npm ls -gpld
+# same as:
+npm ls --global --parseable --long --loglevel info
+

Per-Package Config Settings

+

When running scripts (see npm-scripts(7)) the package.json "config" +keys are overwritten in the environment if there is a config param of +<name>[@<version>]:<key>. For example, if the package.json has +this:

+
{ "name" : "foo"
+, "config" : { "port" : "8080" }
+, "scripts" : { "start" : "node server.js" } }
+

and the server.js is this:

+
http.createServer(...).listen(process.env.npm_package_config_port)
+

then the user could change the behavior by doing:

+
npm config set foo:port 80
+

See package.json(5) for more information.

+

Config Settings

+

always-auth

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Force npm to always require authentication when accessing the registry, +even for GET requests.

+ +
    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

Tells npm to create symlinks (or .cmd shims on Windows) for package +executables.

+

Set to false to have it not do this. This can be used to work around +the fact that some file systems don't support symlinks, even on +ostensibly Unix systems.

+

browser

+
    +
  • Default: OS X: "open", Windows: "start", Others: "xdg-open"
  • +
  • Type: String
  • +
+

The browser that is called by the npm docs command to open websites.

+

ca

+
    +
  • Default: The npm CA certificate
  • +
  • Type: String or null
  • +
+

The Certificate Authority signing certificate that is trusted for SSL +connections to the registry.

+

Set to null to only allow "known" registrars, or to a specific CA cert +to trust only that specific signing authority.

+

See also the strict-ssl config.

+

cafile

+
    +
  • Default: null
  • +
  • Type: path
  • +
+

A path to a file containing one or multiple Certificate Authority signing +certificates. Similar to the ca setting, but allows for multiple CA's, as +well as for the CA information to be stored in a file on disk.

+

cache

+
    +
  • Default: Windows: %AppData%\npm-cache, Posix: ~/.npm
  • +
  • Type: path
  • +
+

The location of npm's cache directory. See npm-cache(1)

+

cache-lock-stale

+
    +
  • Default: 60000 (1 minute)
  • +
  • Type: Number
  • +
+

The number of ms before cache folder lockfiles are considered stale.

+

cache-lock-retries

+
    +
  • Default: 10
  • +
  • Type: Number
  • +
+

Number of times to retry to acquire a lock on cache folder lockfiles.

+

cache-lock-wait

+
    +
  • Default: 10000 (10 seconds)
  • +
  • Type: Number
  • +
+

Number of ms to wait for cache lock files to expire.

+

cache-max

+
    +
  • Default: Infinity
  • +
  • Type: Number
  • +
+

The maximum time (in seconds) to keep items in the registry cache before +re-checking against the registry.

+

Note that no purging is done unless the npm cache clean command is +explicitly used, and that only GET requests use the cache.

+

cache-min

+
    +
  • Default: 10
  • +
  • Type: Number
  • +
+

The minimum time (in seconds) to keep items in the registry cache before +re-checking against the registry.

+

Note that no purging is done unless the npm cache clean command is +explicitly used, and that only GET requests use the cache.

+

cert

+
    +
  • Default: null
  • +
  • Type: String
  • +
+

A client certificate to pass when accessing the registry.

+

color

+
    +
  • Default: true on Posix, false on Windows
  • +
  • Type: Boolean or "always"
  • +
+

If false, never shows colors. If "always" then always shows colors. +If true, then only prints color codes for tty file descriptors.

+

depth

+
    +
  • Default: Infinity
  • +
  • Type: Number
  • +
+

The depth to go when recursing directories for npm ls and +npm cache ls.

+

description

+
    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

Show the description in npm search

+

dev

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Install dev-dependencies along with packages.

+

Note that dev-dependencies are also installed if the npat flag is +set.

+

editor

+
    +
  • Default: EDITOR environment variable if set, or "vi" on Posix, +or "notepad" on Windows.
  • +
  • Type: path
  • +
+

The command to run for npm edit or npm config edit.

+

engine-strict

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

If set to true, then npm will stubbornly refuse to install (or even +consider installing) any package that claims to not be compatible with +the current Node.js version.

+

force

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Makes various commands more forceful.

+
    +
  • lifecycle script failure does not block progress.
  • +
  • publishing clobbers previously published versions.
  • +
  • skips cache when requesting from the registry.
  • +
  • prevents checks against clobbering non-npm files.
  • +
+

fetch-retries

+
    +
  • Default: 2
  • +
  • Type: Number
  • +
+

The "retries" config for the retry module to use when fetching +packages from the registry.

+

fetch-retry-factor

+
    +
  • Default: 10
  • +
  • Type: Number
  • +
+

The "factor" config for the retry module to use when fetching +packages.

+

fetch-retry-mintimeout

+
    +
  • Default: 10000 (10 seconds)
  • +
  • Type: Number
  • +
+

The "minTimeout" config for the retry module to use when fetching +packages.

+

fetch-retry-maxtimeout

+
    +
  • Default: 60000 (1 minute)
  • +
  • Type: Number
  • +
+

The "maxTimeout" config for the retry module to use when fetching +packages.

+

git

+
    +
  • Default: "git"
  • +
  • Type: String
  • +
+

The command to use for git commands. If git is installed on the +computer, but is not in the PATH, then set this to the full path to +the git binary.

+

git-tag-version

+
    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

Tag the commit when using the npm version command.

+

global

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Operates in "global" mode, so that packages are installed into the +prefix folder instead of the current working directory. See +npm-folders(5) for more on the differences in behavior.

+
    +
  • packages are installed into the {prefix}/lib/node_modules folder, instead of the +current working directory.
  • +
  • bin files are linked to {prefix}/bin
  • +
  • man pages are linked to {prefix}/share/man
  • +
+

globalconfig

+
    +
  • Default: {prefix}/etc/npmrc
  • +
  • Type: path
  • +
+

The config file to read for global config options.

+

group

+
    +
  • Default: GID of the current process
  • +
  • Type: String or Number
  • +
+

The group to use when running package scripts in global mode as the root +user.

+

heading

+
    +
  • Default: "npm"
  • +
  • Type: String
  • +
+

The string that starts all the debugging log output.

+

https-proxy

+
    +
  • Default: the HTTPS_PROXY or https_proxy or HTTP_PROXY or +http_proxy environment variables.
  • +
  • Type: url
  • +
+

A proxy to use for outgoing https requests.

+

ignore-scripts

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

If true, npm does not run scripts specified in package.json files.

+

init-module

+
    +
  • Default: ~/.npm-init.js
  • +
  • Type: path
  • +
+

A module that will be loaded by the npm init command. See the +documentation for the +init-package-json module +for more information, or npm-init(1).

+

init-author-name

+
    +
  • Default: ""
  • +
  • Type: String
  • +
+

The value npm init should use by default for the package author's name.

+

init-author-email

+
    +
  • Default: ""
  • +
  • Type: String
  • +
+

The value npm init should use by default for the package author's email.

+

init-author-url

+
    +
  • Default: ""
  • +
  • Type: String
  • +
+

The value npm init should use by default for the package author's homepage.

+

init-license

+
    +
  • Default: "ISC"
  • +
  • Type: String
  • +
+

The value npm init should use by default for the package license.

+

init-version

+
    +
  • Default: "0.0.0"
  • +
  • Type: semver
  • +
+

The value that npm init should use by default for the package +version number, if not already set in package.json.

+

json

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Whether or not to output JSON data, rather than the normal output.

+

This feature is currently experimental, and the output data structures +for many commands is either not implemented in JSON yet, or subject to +change. Only the output from npm ls --json is currently valid.

+

key

+
    +
  • Default: null
  • +
  • Type: String
  • +
+

A client key to pass when accessing the registry.

+ +
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

If true, then local installs will link if there is a suitable globally +installed package.

+

Note that this means that local installs can cause things to be +installed into the global space at the same time. The link is only done +if one of the two conditions are met:

+
    +
  • The package is not already installed globally, or
  • +
  • the globally installed version is identical to the version that is +being installed locally.
  • +
+

local-address

+
    +
  • Default: undefined
  • +
  • Type: IP Address
  • +
+

The IP address of the local interface to use when making connections +to the npm registry. Must be IPv4 in versions of Node prior to 0.12.

+

loglevel

+
    +
  • Default: "warn"
  • +
  • Type: String
  • +
  • Values: "silent", "error", "warn", "http", "info", "verbose", "silly"
  • +
+

What level of logs to report. On failure, all logs are written to +npm-debug.log in the current working directory.

+

Any logs of a higher level than the setting are shown. +The default is "warn", which shows warn and error output.

+

logstream

+
    +
  • Default: process.stderr
  • +
  • Type: Stream
  • +
+

This is the stream that is passed to the +npmlog module at run time.

+

It cannot be set from the command line, but if you are using npm +programmatically, you may wish to send logs to somewhere other than +stderr.

+

If the color config is set to true, then this stream will receive +colored output if it is a TTY.

+

long

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Show extended information in npm ls and npm search.

+

message

+
    +
  • Default: "%s"
  • +
  • Type: String
  • +
+

Commit message which is used by npm version when creating version commit.

+

Any "%s" in the message will be replaced with the version number.

+

node-version

+
    +
  • Default: process.version
  • +
  • Type: semver or false
  • +
+

The node version to use when checking a package's engines map.

+

npat

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Run tests on installation.

+

onload-script

+
    +
  • Default: false
  • +
  • Type: path
  • +
+

A node module to require() when npm loads. Useful for programmatic +usage.

+

optional

+
    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

Attempt to install packages in the optionalDependencies object. Note +that if these packages fail to install, the overall installation +process is not aborted.

+

parseable

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Output parseable results from commands that write to +standard output.

+

prefix

+ +

The location to install global items. If set on the command line, then +it forces non-global commands to run in the specified folder.

+

production

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Set to true to run in "production" mode.

+
    +
  1. devDependencies are not installed at the topmost level when running +local npm install without any arguments.
  2. +
  3. Set the NODE_ENV="production" for lifecycle scripts.
  4. +
+

proprietary-attribs

+
    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

Whether or not to include proprietary extended attributes in the +tarballs created by npm.

+

Unless you are expecting to unpack package tarballs with something other +than npm -- particularly a very outdated tar implementation -- leave +this as true.

+

proxy

+
    +
  • Default: HTTP_PROXY or http_proxy environment variable, or null
  • +
  • Type: url
  • +
+

A proxy to use for outgoing http requests.

+

rebuild-bundle

+
    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

Rebuild bundled dependencies after installation.

+

registry

+ +

The base URL of the npm package registry.

+

rollback

+
    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

Remove failed installs.

+

save

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Save installed packages to a package.json file as dependencies.

+

When used with the npm rm command, it removes it from the dependencies +object.

+

Only works if there is already a package.json file present.

+

save-bundle

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

If a package would be saved at install time by the use of --save, +--save-dev, or --save-optional, then also put it in the +bundleDependencies list.

+

When used with the npm rm command, it removes it from the +bundledDependencies list.

+

save-dev

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Save installed packages to a package.json file as devDependencies.

+

When used with the npm rm command, it removes it from the +devDependencies object.

+

Only works if there is already a package.json file present.

+

save-exact

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Dependencies saved to package.json using --save, --save-dev or +--save-optional will be configured with an exact version rather than +using npm's default semver range operator.

+

save-optional

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Save installed packages to a package.json file as +optionalDependencies.

+

When used with the npm rm command, it removes it from the +devDependencies object.

+

Only works if there is already a package.json file present.

+

save-prefix

+
    +
  • Default: '^'
  • +
  • Type: String
  • +
+

Configure how versions of packages installed to a package.json file via +--save or --save-dev get prefixed.

+

For example if a package has version 1.2.3, by default it's version is +set to ^1.2.3 which allows minor upgrades for that package, but after +npm config set save-prefix='~' it would be set to ~1.2.3 which only allows +patch upgrades.

+

scope

+
    +
  • Default: ""
  • +
  • Type: String
  • +
+

Associate an operation with a scope for a scoped registry. Useful when logging +in to a private registry for the first time: +npm login --scope=@organization --registry=registry.organization.com, which +will cause @organization to be mapped to the registry for future installation +of packages specified according to the pattern @organization/package.

+

searchopts

+
    +
  • Default: ""
  • +
  • Type: String
  • +
+

Space-separated options that are always passed to search.

+

searchexclude

+
    +
  • Default: ""
  • +
  • Type: String
  • +
+

Space-separated options that limit the results from search.

+

searchsort

+
    +
  • Default: "name"
  • +
  • Type: String
  • +
  • Values: "name", "-name", "date", "-date", "description", +"-description", "keywords", "-keywords"
  • +
+

Indication of which field to sort search results by. Prefix with a - +character to indicate reverse sort.

+

shell

+
    +
  • Default: SHELL environment variable, or "bash" on Posix, or "cmd" on +Windows
  • +
  • Type: path
  • +
+

The shell to run for the npm explore command.

+

shrinkwrap

+
    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

If set to false, then ignore npm-shrinkwrap.json files when +installing.

+

sign-git-tag

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

If set to true, then the npm version command will tag the version +using -s to add a signature.

+

Note that git requires you to have set up GPG keys in your git configs +for this to work properly.

+

spin

+
    +
  • Default: true
  • +
  • Type: Boolean or "always"
  • +
+

When set to true, npm will display an ascii spinner while it is doing +things, if process.stderr is a TTY.

+

Set to false to suppress the spinner, or set to always to output +the spinner even for non-TTY outputs.

+

strict-ssl

+
    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

Whether or not to do SSL key validation when making requests to the +registry via https.

+

See also the ca config.

+

tag

+
    +
  • Default: latest
  • +
  • Type: String
  • +
+

If you ask npm to install a package and don't tell it a specific version, then +it will install the specified tag.

+

Also the tag that is added to the package@version specified by the npm +tag command, if no explicit tag is given.

+

tmp

+
    +
  • Default: TMPDIR environment variable, or "/tmp"
  • +
  • Type: path
  • +
+

Where to store temporary files and folders. All temp files are deleted +on success, but left behind on failure for forensic purposes.

+

unicode

+
    +
  • Default: true
  • +
  • Type: Boolean
  • +
+

When set to true, npm uses unicode characters in the tree output. When +false, it uses ascii characters to draw trees.

+

unsafe-perm

+
    +
  • Default: false if running as root, true otherwise
  • +
  • Type: Boolean
  • +
+

Set to true to suppress the UID/GID switching when running package +scripts. If set explicitly to false, then installing as a non-root user +will fail.

+

usage

+
    +
  • Default: false
  • +
  • Type: Boolean
  • +
+

Set to show short usage output (like the -H output) +instead of complete help when doing npm-help(1).

+

user

+
    +
  • Default: "nobody"
  • +
  • Type: String or Number
  • +
+

The UID to set to when running package scripts as root.

+

userconfig

+
    +
  • Default: ~/.npmrc
  • +
  • Type: path
  • +
+

The location of user-level configuration settings.

+

umask

+
    +
  • Default: 022
  • +
  • Type: Octal numeric string
  • +
+

The "umask" value to use when setting the file creation mode on files +and folders.

+

Folders and executables are given a mode which is 0777 masked against +this value. Other files are given a mode which is 0666 masked against +this value. Thus, the defaults are 0755 and 0644 respectively.

+

user-agent

+
    +
  • Default: node/{process.version} {process.platform} {process.arch}
  • +
  • Type: String
  • +
+

Sets a User-Agent to the request header

+

version

+
    +
  • Default: false
  • +
  • Type: boolean
  • +
+

If true, output the npm version and exit successfully.

+

Only relevant when specified explicitly on the command line.

+

versions

+
    +
  • Default: false
  • +
  • Type: boolean
  • +
+

If true, output the npm version as well as node's process.versions map, and +exit successfully.

+

Only relevant when specified explicitly on the command line.

+

viewer

+
    +
  • Default: "man" on Posix, "browser" on Windows
  • +
  • Type: path
  • +
+

The program to use to view help content.

+

Set to "browser" to view html help content in the default web browser.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-developers.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-developers.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-developers.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-developers.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,161 @@ +

npm-developers

Developer Guide

+

DESCRIPTION

+

So, you've decided to use npm to develop (and maybe publish/deploy) +your project.

+

Fantastic!

+

There are a few things that you need to do above the simple steps +that your users will do to install your program.

+

About These Documents

+

These are man pages. If you install npm, you should be able to +then do man npm-thing to get the documentation on a particular +topic, or npm help thing to see the same information.

+

What is a package

+

A package is:

+
    +
  • a) a folder containing a program described by a package.json file
  • +
  • b) a gzipped tarball containing (a)
  • +
  • c) a url that resolves to (b)
  • +
  • d) a <name>@<version> that is published on the registry with (c)
  • +
  • e) a <name>@<tag> that points to (d)
  • +
  • f) a <name> that has a "latest" tag satisfying (e)
  • +
  • g) a git url that, when cloned, results in (a).
  • +
+

Even if you never publish your package, you can still get a lot of +benefits of using npm if you just want to write a node program (a), and +perhaps if you also want to be able to easily install it elsewhere +after packing it up into a tarball (b).

+

Git urls can be of the form:

+
git://github.com/user/project.git#commit-ish
+git+ssh://user@hostname:project.git#commit-ish
+git+http://user@hostname/project/blah.git#commit-ish
+git+https://user@hostname/project/blah.git#commit-ish
+

The commit-ish can be any tag, sha, or branch which can be supplied as +an argument to git checkout. The default is master.

+

The package.json File

+

You need to have a package.json file in the root of your project to do +much of anything with npm. That is basically the whole interface.

+

See package.json(5) for details about what goes in that file. At the very +least, you need:

+
    +
  • name: +This should be a string that identifies your project. Please do not +use the name to specify that it runs on node, or is in JavaScript. +You can use the "engines" field to explicitly state the versions of +node (or whatever else) that your program requires, and it's pretty +well assumed that it's javascript.

    +

    It does not necessarily need to match your github repository name.

    +

    So, node-foo and bar-js are bad names. foo or bar are better.

    +
  • +
  • version: +A semver-compatible version.

    +
  • +
  • engines: +Specify the versions of node (or whatever else) that your program +runs on. The node API changes a lot, and there may be bugs or new +functionality that you depend on. Be explicit.

    +
  • +
  • author: +Take some credit.

    +
  • +
  • scripts: +If you have a special compilation or installation script, then you +should put it in the scripts object. You should definitely have at +least a basic smoke-test command as the "scripts.test" field. +See npm-scripts(7).

    +
  • +
  • main: +If you have a single module that serves as the entry point to your +program (like what the "foo" package gives you at require("foo")), +then you need to specify that in the "main" field.

    +
  • +
  • directories: +This is an object mapping names to folders. The best ones to include are +"lib" and "doc", but if you use "man" to specify a folder full of man pages, +they'll get installed just like these ones.

    +
  • +
+

You can use npm init in the root of your package in order to get you +started with a pretty basic package.json file. See npm-init(1) for +more info.

+

Keeping files out of your package

+

Use a .npmignore file to keep stuff out of your package. If there's +no .npmignore file, but there is a .gitignore file, then npm will +ignore the stuff matched by the .gitignore file. If you want to +include something that is excluded by your .gitignore file, you can +create an empty .npmignore file to override it.

+

By default, the following paths and files are ignored, so there's no +need to add them to .npmignore explicitly:

+
    +
  • .*.swp
  • +
  • ._*
  • +
  • .DS_Store
  • +
  • .git
  • +
  • .hg
  • +
  • .lock-wscript
  • +
  • .svn
  • +
  • .wafpickle-*
  • +
  • CVS
  • +
  • npm-debug.log
  • +
+

Additionally, everything in node_modules is ignored, except for +bundled dependencies. npm automatically handles this for you, so don't +bother adding node_modules to .npmignore.

+

The following paths and files are never ignored, so adding them to +.npmignore is pointless:

+ + +

npm link is designed to install a development package and see the +changes in real time without having to keep re-installing it. (You do +need to either re-link or npm rebuild -g to update compiled packages, +of course.)

+

More info at npm-link(1).

+

Before Publishing: Make Sure Your Package Installs and Works

+

This is important.

+

If you can not install it locally, you'll have +problems trying to publish it. Or, worse yet, you'll be able to +publish it, but you'll be publishing a broken or pointless package. +So don't do that.

+

In the root of your package, do this:

+
npm install . -g
+

That'll show you that it's working. If you'd rather just create a symlink +package that points to your working directory, then do this:

+
npm link
+

Use npm ls -g to see if it's there.

+

To test a local install, go into some other folder, and then do:

+
cd ../some-other-folder
+npm install ../my-package
+

to install it locally into the node_modules folder in that other place.

+

Then go into the node-repl, and try using require("my-thing") to +bring in your module's main module.

+

Create a User Account

+

Create a user with the adduser command. It works like this:

+
npm adduser
+

and then follow the prompts.

+

This is documented better in npm-adduser(1).

+

Publish your package

+

This part's easy. IN the root of your folder, do this:

+
npm publish
+

You can give publish a url to a tarball, or a filename of a tarball, +or a path to a folder.

+

Note that pretty much everything in that folder will be exposed +by default. So, if you have secret stuff in there, use a +.npmignore file to list out the globs to ignore, or publish +from a fresh checkout.

+

Brag about it

+

Send emails, write blogs, blab in IRC.

+

Tell the world how easy it is to install your program!

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-disputes.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-disputes.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-disputes.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-disputes.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,92 @@ +

npm-disputes

Handling Module Name Disputes

+

SYNOPSIS

+
    +
  1. Get the author email with npm owner ls <pkgname>
  2. +
  3. Email the author, CC support@npmjs.com
  4. +
  5. After a few weeks, if there's no resolution, we'll sort it out.
  6. +
+

Don't squat on package names. Publish code or move out of the way.

+

DESCRIPTION

+

There sometimes arise cases where a user publishes a module, and then +later, some other user wants to use that name. Here are some common +ways that happens (each of these is based on actual events.)

+
    +
  1. Joe writes a JavaScript module foo, which is not node-specific. +Joe doesn't use node at all. Bob wants to use foo in node, so he +wraps it in an npm module. Some time later, Joe starts using node, +and wants to take over management of his program.
  2. +
  3. Bob writes an npm module foo, and publishes it. Perhaps much +later, Joe finds a bug in foo, and fixes it. He sends a pull +request to Bob, but Bob doesn't have the time to deal with it, +because he has a new job and a new baby and is focused on his new +erlang project, and kind of not involved with node any more. Joe +would like to publish a new foo, but can't, because the name is +taken.
  4. +
  5. Bob writes a 10-line flow-control library, and calls it foo, and +publishes it to the npm registry. Being a simple little thing, it +never really has to be updated. Joe works for Foo Inc, the makers +of the critically acclaimed and widely-marketed foo JavaScript +toolkit framework. They publish it to npm as foojs, but people are +routinely confused when npm install foo is some different thing.
  6. +
  7. Bob writes a parser for the widely-known foo file format, because +he needs it for work. Then, he gets a new job, and never updates the +prototype. Later on, Joe writes a much more complete foo parser, +but can't publish, because Bob's foo is in the way.
  8. +
+

The validity of Joe's claim in each situation can be debated. However, +Joe's appropriate course of action in each case is the same.

+
    +
  1. npm owner ls foo. This will tell Joe the email address of the +owner (Bob).
  2. +
  3. Joe emails Bob, explaining the situation as respectfully as +possible, and what he would like to do with the module name. He +adds the npm support staff support@npmjs.com to the CC list of +the email. Mention in the email that Bob can run npm owner add +joe foo to add Joe as an owner of the foo package.
  4. +
  5. After a reasonable amount of time, if Bob has not responded, or if +Bob and Joe can't come to any sort of resolution, email support +support@npmjs.com and we'll sort it out. ("Reasonable" is +usually at least 4 weeks, but extra time is allowed around common +holidays.)
  6. +
+

REASONING

+

In almost every case so far, the parties involved have been able to reach +an amicable resolution without any major intervention. Most people +really do want to be reasonable, and are probably not even aware that +they're in your way.

+

Module ecosystems are most vibrant and powerful when they are as +self-directed as possible. If an admin one day deletes something you +had worked on, then that is going to make most people quite upset, +regardless of the justification. When humans solve their problems by +talking to other humans with respect, everyone has the chance to end up +feeling good about the interaction.

+

EXCEPTIONS

+

Some things are not allowed, and will be removed without discussion if +they are brought to the attention of the npm registry admins, including +but not limited to:

+
    +
  1. Malware (that is, a package designed to exploit or harm the machine on +which it is installed).
  2. +
  3. Violations of copyright or licenses (for example, cloning an +MIT-licensed program, and then removing or changing the copyright and +license statement).
  4. +
  5. Illegal content.
  6. +
  7. "Squatting" on a package name that you plan to use, but aren't +actually using. Sorry, I don't care how great the name is, or how +perfect a fit it is for the thing that someday might happen. If +someone wants to use it today, and you're just taking up space with +an empty tarball, you're going to be evicted.
  8. +
  9. Putting empty packages in the registry. Packages must have SOME +functionality. It can be silly, but it can't be nothing. (See +also: squatting.)
  10. +
  11. Doing weird things with the registry, like using it as your own +personal application database or otherwise putting non-packagey +things into it.
  12. +
+

If you see bad behavior like this, please report it right away.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-faq.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-faq.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-faq.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-faq.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,264 @@ +

npm-faq

Frequently Asked Questions

+

Where can I find these docs in HTML?

+

https://www.npmjs.org/doc/, or run:

+
npm config set viewer browser
+

to open these documents in your default web browser rather than man.

+

It didn't work.

+

That's not really a question.

+

Why didn't it work?

+

I don't know yet.

+

Read the error output, and if you can't figure out what it means, +do what it says and post a bug with all the information it asks for.

+

Where does npm put stuff?

+

See npm-folders(5)

+

tl;dr:

+
    +
  • Use the npm root command to see where modules go, and the npm bin +command to see where executables go
  • +
  • Global installs are different from local installs. If you install +something with the -g flag, then its executables go in npm bin -g +and its modules go in npm root -g.
  • +
+

How do I install something on my computer in a central location?

+

Install it globally by tacking -g or --global to the command. (This +is especially important for command line utilities that need to add +their bins to the global system PATH.)

+

I installed something globally, but I can't require() it

+

Install it locally.

+

The global install location is a place for command-line utilities +to put their bins in the system PATH. It's not for use with require().

+

If you require() a module in your code, then that means it's a +dependency, and a part of your program. You need to install it locally +in your program.

+

Why can't npm just put everything in one place, like other package managers?

+

Not every change is an improvement, but every improvement is a change. +This would be like asking git to do network IO for every commit. It's +not going to happen, because it's a terrible idea that causes more +problems than it solves.

+

It is much harder to avoid dependency conflicts without nesting +dependencies. This is fundamental to the way that npm works, and has +proven to be an extremely successful approach. See npm-folders(5) for +more details.

+

If you want a package to be installed in one place, and have all your +programs reference the same copy of it, then use the npm link command. +That's what it's for. Install it globally, then link it into each +program that uses it.

+

Whatever, I really want the old style 'everything global' style.

+

Write your own package manager. You could probably even wrap up npm +in a shell script if you really wanted to.

+

npm will not help you do something that is known to be a bad idea.

+

Should I check my node_modules folder into git?

+

Usually, no. Allow npm to resolve dependencies for your packages.

+

For packages you deploy, such as websites and apps, +you should use npm shrinkwrap to lock down your full dependency tree:

+

https://www.npmjs.org/doc/cli/npm-shrinkwrap.html

+

If you are paranoid about depending on the npm ecosystem, +you should run a private npm mirror or a private cache.

+

If you want 100% confidence in being able to reproduce the specific bytes +included in a deployment, you should use an additional mechanism that can +verify contents rather than versions. For example, +Amazon machine images, DigitalOcean snapshots, Heroku slugs, or simple tarballs.

+

Is it 'npm' or 'NPM' or 'Npm'?

+

npm should never be capitalized unless it is being displayed in a +location that is customarily all-caps (such as the title of man pages.)

+

If 'npm' is an acronym, why is it never capitalized?

+

Contrary to the belief of many, "npm" is not in fact an abbreviation for +"Node Package Manager". It is a recursive bacronymic abbreviation for +"npm is not an acronym". (If it was "ninaa", then it would be an +acronym, and thus incorrectly named.)

+

"NPM", however, is an acronym (more precisely, a capitonym) for the +National Association of Pastoral Musicians. You can learn more +about them at http://npm.org/.

+

In software, "NPM" is a Non-Parametric Mapping utility written by +Chris Rorden. You can analyze pictures of brains with it. Learn more +about the (capitalized) NPM program at http://www.cabiatl.com/mricro/npm/.

+

The first seed that eventually grew into this flower was a bash utility +named "pm", which was a shortened descendent of "pkgmakeinst", a +bash function that was used to install various different things on different +platforms, most often using Yahoo's yinst. If npm was ever an +acronym for anything, it was node pm or maybe new pm.

+

So, in all seriousness, the "npm" project is named after its command-line +utility, which was organically selected to be easily typed by a right-handed +programmer using a US QWERTY keyboard layout, ending with the +right-ring-finger in a postition to type the - key for flags and +other command-line arguments. That command-line utility is always +lower-case, though it starts most sentences it is a part of.

+

How do I list installed packages?

+

npm ls

+

How do I search for packages?

+

npm search

+

Arguments are greps. npm search jsdom shows jsdom packages.

+

How do I update npm?

+
npm install npm -g
+

You can also update all outdated local packages by doing npm update without +any arguments, or global packages by doing npm update -g.

+

Occasionally, the version of npm will progress such that the current +version cannot be properly installed with the version that you have +installed already. (Consider, if there is ever a bug in the update +command.)

+

In those cases, you can do this:

+
curl https://www.npmjs.org/install.sh | sh
+

What is a package?

+

A package is:

+
    +
  • a) a folder containing a program described by a package.json file
  • +
  • b) a gzipped tarball containing (a)
  • +
  • c) a url that resolves to (b)
  • +
  • d) a <name>@<version> that is published on the registry with (c)
  • +
  • e) a <name>@<tag> that points to (d)
  • +
  • f) a <name> that has a "latest" tag satisfying (e)
  • +
  • g) a git url that, when cloned, results in (a).
  • +
+

Even if you never publish your package, you can still get a lot of +benefits of using npm if you just want to write a node program (a), and +perhaps if you also want to be able to easily install it elsewhere +after packing it up into a tarball (b).

+

Git urls can be of the form:

+
git://github.com/user/project.git#commit-ish
+git+ssh://user@hostname:project.git#commit-ish
+git+http://user@hostname/project/blah.git#commit-ish
+git+https://user@hostname/project/blah.git#commit-ish
+

The commit-ish can be any tag, sha, or branch which can be supplied as +an argument to git checkout. The default is master.

+

What is a module?

+

A module is anything that can be loaded with require() in a Node.js +program. The following things are all examples of things that can be +loaded as modules:

+
    +
  • A folder with a package.json file containing a main field.
  • +
  • A folder with an index.js file in it.
  • +
  • A JavaScript file.
  • +
+

Most npm packages are modules, because they are libraries that you +load with require. However, there's no requirement that an npm +package be a module! Some only contain an executable command-line +interface, and don't provide a main field for use in Node programs.

+

Almost all npm packages (at least, those that are Node programs) +contain many modules within them (because every file they load with +require() is a module).

+

In the context of a Node program, the module is also the thing that +was loaded from a file. For example, in the following program:

+
var req = require('request')
+

we might say that "The variable req refers to the request module".

+

So, why is it the "node_modules" folder, but "package.json" file? Why not node_packages or module.json?

+

The package.json file defines the package. (See "What is a +package?" above.)

+

The node_modules folder is the place Node.js looks for modules. +(See "What is a module?" above.)

+

For example, if you create a file at node_modules/foo.js and then +had a program that did var f = require('foo.js') then it would load +the module. However, foo.js is not a "package" in this case, +because it does not have a package.json.

+

Alternatively, if you create a package which does not have an +index.js or a "main" field in the package.json file, then it is +not a module. Even if it's installed in node_modules, it can't be +an argument to require().

+

"node_modules" is the name of my deity's arch-rival, and a Forbidden Word in my religion. Can I configure npm to use a different folder?

+

No. This will never happen. This question comes up sometimes, +because it seems silly from the outside that npm couldn't just be +configured to put stuff somewhere else, and then npm could load them +from there. It's an arbitrary spelling choice, right? What's the big +deal?

+

At the time of this writing, the string 'node_modules' appears 151 +times in 53 separate files in npm and node core (excluding tests and +documentation).

+

Some of these references are in node's built-in module loader. Since +npm is not involved at all at run-time, node itself would have to +be configured to know where you've decided to stick stuff. Complexity +hurdle #1. Since the Node module system is locked, this cannot be +changed, and is enough to kill this request. But I'll continue, in +deference to your deity's delicate feelings regarding spelling.

+

Many of the others are in dependencies that npm uses, which are not +necessarily tightly coupled to npm (in the sense that they do not read +npm's configuration files, etc.) Each of these would have to be +configured to take the name of the node_modules folder as a +parameter. Complexity hurdle #2.

+

Furthermore, npm has the ability to "bundle" dependencies by adding +the dep names to the "bundledDependencies" list in package.json, +which causes the folder to be included in the package tarball. What +if the author of a module bundles its dependencies, and they use a +different spelling for node_modules? npm would have to rename the +folder at publish time, and then be smart enough to unpack it using +your locally configured name. Complexity hurdle #3.

+

Furthermore, what happens when you change this name? Fine, it's +easy enough the first time, just rename the node_modules folders to +./blergyblerp/ or whatever name you choose. But what about when you +change it again? npm doesn't currently track any state about past +configuration settings, so this would be rather difficult to do +properly. It would have to track every previous value for this +config, and always accept any of them, or else yesterday's install may +be broken tomorrow. Complexity hurdle #4.

+

Never going to happen. The folder is named node_modules. It is +written indelibly in the Node Way, handed down from the ancient times +of Node 0.3.

+

How do I install node with npm?

+

You don't. Try one of these node version managers:

+

Unix:

+ +

Windows:

+ +

How can I use npm for development?

+

See npm-developers(7) and package.json(5).

+

You'll most likely want to npm link your development folder. That's +awesomely handy.

+

To set up your own private registry, check out npm-registry(7).

+

Can I list a url as a dependency?

+

Yes. It should be a url to a gzipped tarball containing a single folder +that has a package.json in its root, or a git url. +(See "what is a package?" above.)

+ +

See npm-link(1)

+

The package registry website. What is that exactly?

+

See npm-registry(7).

+

I forgot my password, and can't publish. How do I reset it?

+

Go to https://npmjs.org/forgot.

+

I get ECONNREFUSED a lot. What's up?

+

Either the registry is down, or node's DNS isn't able to reach out.

+

To check if the registry is down, open up +https://registry.npmjs.org/ in a web browser. This will also tell +you if you are just unable to access the internet for some reason.

+

If the registry IS down, let us know by emailing support@npmjs.com +or posting an issue at https://github.com/npm/npm/issues. If it's +down for the world (and not just on your local network) then we're +probably already being pinged about it.

+

You can also often get a faster response by visiting the #npm channel +on Freenode IRC.

+

Why no namespaces?

+

Please see this discussion: https://github.com/npm/npm/issues/798

+

tl;dr - It doesn't actually make things better, and can make them worse.

+

If you want to namespace your own packages, you may: simply use the +- character to separate the names. npm is a mostly anarchic system. +There is not sufficient need to impose namespace rules on everyone.

+

Who does npm?

+

npm was originally written by Isaac Z. Schlueter, and many others have +contributed to it, some of them quite substantially.

+

The npm open source project, The npm Registry, and the community +website are maintained and operated by the +good folks at npm, Inc.

+

I have a question or request not addressed here. Where should I put it?

+

Post an issue on the github project:

+ +

Why does npm hate me?

+

npm is not capable of hatred. It loves everyone, especially you.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-index.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-index.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-index.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-index.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,210 @@ +

npm-index

Index of all npm documentation

+

README

+

node package manager

+

Command Line Documentation

+

Using npm on the command line

+

npm(1)

+

node package manager

+

npm-adduser(1)

+

Add a registry user account

+

npm-bin(1)

+

Display npm bin folder

+

npm-bugs(1)

+

Bugs for a package in a web browser maybe

+

npm-build(1)

+

Build a package

+

npm-bundle(1)

+

REMOVED

+

npm-cache(1)

+

Manipulates packages cache

+

npm-completion(1)

+

Tab Completion for npm

+

npm-config(1)

+

Manage the npm configuration files

+

npm-dedupe(1)

+

Reduce duplication

+

npm-deprecate(1)

+

Deprecate a version of a package

+

npm-docs(1)

+

Docs for a package in a web browser maybe

+

npm-edit(1)

+

Edit an installed package

+

npm-explore(1)

+

Browse an installed package

+

npm-help-search(1)

+

Search npm help documentation

+

npm-help(1)

+

Get help on npm

+

npm-init(1)

+

Interactively create a package.json file

+

npm-install(1)

+

Install a package

+ +

Symlink a package folder

+

npm-ls(1)

+

List installed packages

+

npm-outdated(1)

+

Check for outdated packages

+

npm-owner(1)

+

Manage package owners

+

npm-pack(1)

+

Create a tarball from a package

+

npm-prefix(1)

+

Display prefix

+

npm-prune(1)

+

Remove extraneous packages

+

npm-publish(1)

+

Publish a package

+

npm-rebuild(1)

+

Rebuild a package

+

npm-repo(1)

+

Open package repository page in the browser

+

npm-restart(1)

+

Start a package

+

npm-rm(1)

+

Remove a package

+

npm-root(1)

+

Display npm root

+

npm-run-script(1)

+

Run arbitrary package scripts

+

npm-search(1)

+

Search for packages

+

npm-shrinkwrap(1)

+

Lock down dependency versions

+

npm-star(1)

+

Mark your favorite packages

+

npm-stars(1)

+

View packages marked as favorites

+

npm-start(1)

+

Start a package

+

npm-stop(1)

+

Stop a package

+

npm-tag(1)

+

Tag a published version

+

npm-test(1)

+

Test a package

+

npm-uninstall(1)

+

Remove a package

+

npm-unpublish(1)

+

Remove a package from the registry

+

npm-update(1)

+

Update a package

+

npm-version(1)

+

Bump a package version

+

npm-view(1)

+

View registry info

+

npm-whoami(1)

+

Display npm username

+

API Documentation

+

Using npm in your Node programs

+

npm(3)

+

node package manager

+

npm-bin(3)

+

Display npm bin folder

+

npm-bugs(3)

+

Bugs for a package in a web browser maybe

+

npm-cache(3)

+

manage the npm cache programmatically

+

npm-commands(3)

+

npm commands

+

npm-config(3)

+

Manage the npm configuration files

+

npm-deprecate(3)

+

Deprecate a version of a package

+

npm-docs(3)

+

Docs for a package in a web browser maybe

+

npm-edit(3)

+

Edit an installed package

+

npm-explore(3)

+

Browse an installed package

+

npm-help-search(3)

+

Search the help pages

+

npm-init(3)

+

Interactively create a package.json file

+

npm-install(3)

+

install a package programmatically

+ +

Symlink a package folder

+

npm-load(3)

+

Load config settings

+

npm-ls(3)

+

List installed packages

+

npm-outdated(3)

+

Check for outdated packages

+

npm-owner(3)

+

Manage package owners

+

npm-pack(3)

+

Create a tarball from a package

+

npm-prefix(3)

+

Display prefix

+

npm-prune(3)

+

Remove extraneous packages

+

npm-publish(3)

+

Publish a package

+

npm-rebuild(3)

+

Rebuild a package

+

npm-repo(3)

+

Open package repository page in the browser

+

npm-restart(3)

+

Start a package

+

npm-root(3)

+

Display npm root

+

npm-run-script(3)

+

Run arbitrary package scripts

+

npm-search(3)

+

Search for packages

+

npm-shrinkwrap(3)

+

programmatically generate package shrinkwrap file

+

npm-start(3)

+

Start a package

+

npm-stop(3)

+

Stop a package

+

npm-tag(3)

+

Tag a published version

+

npm-test(3)

+

Test a package

+

npm-uninstall(3)

+

uninstall a package programmatically

+

npm-unpublish(3)

+

Remove a package from the registry

+

npm-update(3)

+

Update a package

+

npm-version(3)

+

Bump a package version

+

npm-view(3)

+

View registry info

+

npm-whoami(3)

+

Display npm username

+

Files

+

File system structures npm uses

+

npm-folders(5)

+

Folder Structures Used by npm

+

npmrc(5)

+

The npm config files

+

package.json(5)

+

Specifics of npm's package.json handling

+

Misc

+

Various other bits and bobs

+

npm-coding-style(7)

+

npm's "funny" coding style

+

npm-config(7)

+

More than you probably want to know about npm configuration

+

npm-developers(7)

+

Developer Guide

+

npm-disputes(7)

+

Handling Module Name Disputes

+

npm-faq(7)

+

Frequently Asked Questions

+

npm-index(7)

+

Index of all npm documentation

+

npm-registry(7)

+

The JavaScript Package Registry

+

npm-scope(7)

+

Scoped packages

+

npm-scripts(7)

+

How npm handles the "scripts" field

+

removing-npm(7)

+

Cleaning the Slate

+

semver(7)

+

The semantic versioner for npm

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-registry.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-registry.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-registry.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-registry.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,50 @@ +

npm-registry

The JavaScript Package Registry

+

DESCRIPTION

+

To resolve packages by name and version, npm talks to a registry website +that implements the CommonJS Package Registry specification for reading +package info.

+

Additionally, npm's package registry implementation supports several +write APIs as well, to allow for publishing packages and managing user +account information.

+

The official public npm registry is at http://registry.npmjs.org/. It +is powered by a CouchDB database, of which there is a public mirror at +http://skimdb.npmjs.com/registry. The code for the couchapp is +available at http://github.com/npm/npm-registry-couchapp.

+

The registry URL used is determined by the scope of the package (see +npm-scope(7)). If no scope is specified, the default registry is used, which is +supplied by the registry config parameter. See npm-config(1), +npmrc(5), and npm-config(7) for more on managing npm's configuration.

+

Can I run my own private registry?

+

Yes!

+

The easiest way is to replicate the couch database, and use the same (or +similar) design doc to implement the APIs.

+

If you set up continuous replication from the official CouchDB, and then +set your internal CouchDB as the registry config, then you'll be able +to read any published packages, in addition to your private ones, and by +default will only publish internally. If you then want to publish a +package for the whole world to see, you can simply override the +--registry config for that command.

+

I don't want my package published in the official registry. It's private.

+

Set "private": true in your package.json to prevent it from being +published at all, or +"publishConfig":{"registry":"http://my-internal-registry.local"} +to force it to be published only to your internal registry.

+

See package.json(5) for more info on what goes in the package.json file.

+

Will you replicate from my registry into the public one?

+

No. If you want things to be public, then publish them into the public +registry using npm. What little security there is would be for nought +otherwise.

+

Do I have to use couchdb to build a registry that npm can talk to?

+

No, but it's way easier. Basically, yes, you do, or you have to +effectively implement the entire CouchDB API anyway.

+

Is there a website or something to see package docs and such?

+

Yes, head over to https://npmjs.org/

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-scope.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-scope.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-scope.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-scope.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,58 @@ +

npm-scope

Scoped packages

+

DESCRIPTION

+

All npm packages have a name. Some package names also have a scope. A scope +follows the usual rules for package names (url-safe characters, no leading dots +or underscores). When used in package names, preceded by an @-symbol and +followed by a slash, e.g.

+
@somescope/somepackagename
+

Scopes are a way of grouping related packages together, and also affect a few +things about the way npm treats the package.

+

As of 2014-09-03, scoped packages are not supported by the public npm registry. +However, the npm client is backwards-compatible with un-scoped registries, so +it can be used to work with scoped and un-scoped registries at the same time.

+

Installing scoped packages

+

Scoped packages are installed to a sub-folder of the regular installation +folder, e.g. if your other packages are installed in node_modules/packagename, +scoped modules will be in node_modules/@myorg/packagename. The scope folder +(@myorg) is simply the name of the scope preceded by an @-symbol, and can +contain any number of scoped packages.

+

A scoped package is installed by referencing it by name, preceded by an +@-symbol, in npm install:

+
npm install @myorg/mypackage
+

Or in package.json:

+
"dependencies": {
+  "@myorg/mypackage": "^1.3.0"
+}
+

Note that if the @-symbol is omitted in either case npm will instead attempt to +install from GitHub; see npm-install(1).

+

Requiring scoped packages

+

Because scoped packages are installed into a scope folder, you have to +include the name of the scope when requiring them in your code, e.g.

+
require('@myorg/mypackage')
+

There is nothing special about the way Node treats scope folders, this is +just specifying to require the module mypackage in the folder called @myorg.

+

Publishing scoped packages

+

Scoped packages can be published to any registry that supports them. +As of 2014-09-03, the public npm registry does not support scoped packages, +so attempting to publish a scoped package to the registry will fail unless +you have associated that scope with a different registry, see below.

+

Associating a scope with a registry

+

Scopes can be associated with a separate registry. This allows you to +seamlessly use a mix of packages from the public npm registry and one or more +private registries, such as npm Enterprise.

+

You can associate a scope with a registry at login, e.g.

+
npm login --registry=http://reg.example.com --scope=@myco
+

Scopes have a many-to-one relationship with registries: one registry can +host multiple scopes, but a scope only ever points to one registry.

+

You can also associate a scope with a registry using npm config:

+
npm config set @myco:registry http://reg.example.com
+

Once a scope is associated with a registry, any npm install for a package +with that scope will request packages from that registry instead. Any +npm publish for a package name that contains the scope will be published to +that registry instead.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-scripts.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-scripts.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/npm-scripts.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/npm-scripts.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,200 @@ +

npm-scripts

How npm handles the "scripts" field

+

DESCRIPTION

+

npm supports the "scripts" property of the package.json script, for the +following scripts:

+
    +
  • prepublish: +Run BEFORE the package is published. (Also run on local npm +install without any arguments.)
  • +
  • publish, postpublish: +Run AFTER the package is published.
  • +
  • preinstall: +Run BEFORE the package is installed
  • +
  • install, postinstall: +Run AFTER the package is installed.
  • +
  • preuninstall, uninstall: +Run BEFORE the package is uninstalled.
  • +
  • postuninstall: +Run AFTER the package is uninstalled.
  • +
  • preupdate: +Run BEFORE the package is updated with the update command.
  • +
  • update, postupdate: +Run AFTER the package is updated with the update command.
  • +
  • pretest, test, posttest: +Run by the npm test command.
  • +
  • prestop, stop, poststop: +Run by the npm stop command.
  • +
  • prestart, start, poststart: +Run by the npm start command.
  • +
  • prerestart, restart, postrestart: +Run by the npm restart command. Note: npm restart will run the +stop and start scripts if no restart script is provided.
  • +
+

Additionally, arbitrary scripts can be executed by running npm +run-script <pkg> <stage>. Pre and post commands with matching +names will be run for those as well (e.g. premyscript, myscript, +postmyscript).

+

NOTE: INSTALL SCRIPTS ARE AN ANTIPATTERN

+

tl;dr Don't use install. Use a .gyp file for compilation, and +prepublish for anything else.

+

You should almost never have to explicitly set a preinstall or +install script. If you are doing this, please consider if there is +another option.

+

The only valid use of install or preinstall scripts is for +compilation which must be done on the target architecture. In early +versions of node, this was often done using the node-waf scripts, or +a standalone Makefile, and early versions of npm required that it be +explicitly set in package.json. This was not portable, and harder to +do properly.

+

In the current version of node, the standard way to do this is using a +.gyp file. If you have a file with a .gyp extension in the root +of your package, then npm will run the appropriate node-gyp commands +automatically at install time. This is the only officially supported +method for compiling binary addons, and does not require that you add +anything to your package.json file.

+

If you have to do other things before your package is used, in a way +that is not dependent on the operating system or architecture of the +target system, then use a prepublish script instead. This includes +tasks such as:

+
    +
  • Compile CoffeeScript source code into JavaScript.
  • +
  • Create minified versions of JavaScript source code.
  • +
  • Fetching remote resources that your package will use.
  • +
+

The advantage of doing these things at prepublish time instead of +preinstall or install time is that they can be done once, in a +single place, and thus greatly reduce complexity and variability. +Additionally, this means that:

+
    +
  • You can depend on coffee-script as a devDependency, and thus +your users don't need to have it installed.
  • +
  • You don't need to include the minifiers in your package, reducing +the size for your users.
  • +
  • You don't need to rely on your users having curl or wget or +other system tools on the target machines.
  • +
+

DEFAULT VALUES

+

npm will default some script values based on package contents.

+
    +
  • "start": "node server.js":

    +

    If there is a server.js file in the root of your package, then npm +will default the start command to node server.js.

    +
  • +
  • "preinstall": "node-waf clean || true; node-waf configure build":

    +

    If there is a wscript file in the root of your package, npm will +default the preinstall command to compile using node-waf.

    +
  • +
+

USER

+

If npm was invoked with root privileges, then it will change the uid +to the user account or uid specified by the user config, which +defaults to nobody. Set the unsafe-perm flag to run scripts with +root privileges.

+

ENVIRONMENT

+

Package scripts run in an environment where many pieces of information +are made available regarding the setup of npm and the current state of +the process.

+

path

+

If you depend on modules that define executable scripts, like test +suites, then those executables will be added to the PATH for +executing the scripts. So, if your package.json has this:

+
{ "name" : "foo"
+, "dependencies" : { "bar" : "0.1.x" }
+, "scripts": { "start" : "bar ./test" } }
+

then you could run npm start to execute the bar script, which is +exported into the node_modules/.bin directory on npm install.

+

package.json vars

+

The package.json fields are tacked onto the npm_package_ prefix. So, +for instance, if you had {"name":"foo", "version":"1.2.5"} in your +package.json file, then your package scripts would have the +npm_package_name environment variable set to "foo", and the +npm_package_version set to "1.2.5"

+

configuration

+

Configuration parameters are put in the environment with the +npm_config_ prefix. For instance, you can view the effective root +config by checking the npm_config_root environment variable.

+

Special: package.json "config" object

+

The package.json "config" keys are overwritten in the environment if +there is a config param of <name>[@<version>]:<key>. For example, +if the package.json has this:

+
{ "name" : "foo"
+, "config" : { "port" : "8080" }
+, "scripts" : { "start" : "node server.js" } }
+

and the server.js is this:

+
http.createServer(...).listen(process.env.npm_package_config_port)
+

then the user could change the behavior by doing:

+
npm config set foo:port 80
+

current lifecycle event

+

Lastly, the npm_lifecycle_event environment variable is set to +whichever stage of the cycle is being executed. So, you could have a +single script used for different parts of the process which switches +based on what's currently happening.

+

Objects are flattened following this format, so if you had +{"scripts":{"install":"foo.js"}} in your package.json, then you'd +see this in the script:

+
process.env.npm_package_scripts_install === "foo.js"
+

EXAMPLES

+

For example, if your package.json contains this:

+
{ "scripts" :
+  { "install" : "scripts/install.js"
+  , "postinstall" : "scripts/install.js"
+  , "uninstall" : "scripts/uninstall.js"
+  }
+}
+

then the scripts/install.js will be called for the install, +post-install, stages of the lifecycle, and the scripts/uninstall.js +would be called when the package is uninstalled. Since +scripts/install.js is running for three different phases, it would +be wise in this case to look at the npm_lifecycle_event environment +variable.

+

If you want to run a make command, you can do so. This works just +fine:

+
{ "scripts" :
+  { "preinstall" : "./configure"
+  , "install" : "make && make install"
+  , "test" : "make test"
+  }
+}
+

EXITING

+

Scripts are run by passing the line as a script argument to sh.

+

If the script exits with a code other than 0, then this will abort the +process.

+

Note that these script files don't have to be nodejs or even +javascript programs. They just have to be some kind of executable +file.

+

HOOK SCRIPTS

+

If you want to run a specific script at a specific lifecycle event for +ALL packages, then you can use a hook script.

+

Place an executable file at node_modules/.hooks/{eventname}, and +it'll get run for all packages when they are going through that point +in the package lifecycle for any packages installed in that root.

+

Hook scripts are run exactly the same way as package.json scripts. +That is, they are in a separate child process, with the env described +above.

+

BEST PRACTICES

+
    +
  • Don't exit with a non-zero error code unless you really mean it. +Except for uninstall scripts, this will cause the npm action to +fail, and potentially be rolled back. If the failure is minor or +only will prevent some optional features, then it's better to just +print a warning and exit successfully.
  • +
  • Try not to use scripts to do what npm can do for you. Read through +package.json(5) to see all the things that you can specify and enable +by simply describing your package appropriately. In general, this +will lead to a more robust and consistent state.
  • +
  • Inspect the env to determine where to put things. For instance, if +the npm_config_binroot environ is set to /home/user/bin, then +don't try to install executables into /usr/local/bin. The user +probably set it up that way for a reason.
  • +
  • Don't prefix your script commands with "sudo". If root permissions +are required for some reason, then it'll fail with that error, and +the user will sudo the npm command in question.
  • +
+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/removing-npm.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/removing-npm.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/removing-npm.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/removing-npm.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,37 @@ +

npm-removal

Cleaning the Slate

+

SYNOPSIS

+

So sad to see you go.

+
sudo npm uninstall npm -g
+

Or, if that fails, get the npm source code, and do:

+
sudo make uninstall
+

More Severe Uninstalling

+

Usually, the above instructions are sufficient. That will remove +npm, but leave behind anything you've installed.

+

If that doesn't work, or if you require more drastic measures, +continue reading.

+

Note that this is only necessary for globally-installed packages. Local +installs are completely contained within a project's node_modules +folder. Delete that folder, and everything is gone (unless a package's +install script is particularly ill-behaved).

+

This assumes that you installed node and npm in the default place. If +you configured node with a different --prefix, or installed npm with a +different prefix setting, then adjust the paths accordingly, replacing +/usr/local with your install prefix.

+

To remove everything npm-related manually:

+
rm -rf /usr/local/{lib/node{,/.npm,_modules},bin,share/man}/npm*
+

If you installed things with npm, then your best bet is to uninstall +them with npm first, and then install them again once you have a +proper install. This can help find any symlinks that are lying +around:

+
ls -laF /usr/local/{lib/node{,/.npm},bin,share/man} | grep npm
+

Prior to version 0.3, npm used shim files for executables and node +modules. To track those down, you can do the following:

+
find /usr/local/{lib/node,bin} -exec grep -l npm \{\} \; ;
+

(This is also in the README file.)

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/misc/semver.html nodejs-0.11.15/deps/npm/html/partial/doc/misc/semver.html --- nodejs-0.11.13/deps/npm/html/partial/doc/misc/semver.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/misc/semver.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,243 @@ +

semver

The semantic versioner for npm

+

Usage

+
$ npm install semver
+
+semver.valid('1.2.3') // '1.2.3'
+semver.valid('a.b.c') // null
+semver.clean('  =v1.2.3   ') // '1.2.3'
+semver.satisfies('1.2.3', '1.x || >=2.5.0 || 5.0.0 - 7.2.3') // true
+semver.gt('1.2.3', '9.8.7') // false
+semver.lt('1.2.3', '9.8.7') // true
+

As a command-line utility:

+
$ semver -h
+
+Usage: semver <version> [<version> [...]] [-r <range> | -i <inc> | -d <dec>]
+Test if version(s) satisfy the supplied range(s), and sort them.
+
+Multiple versions or ranges may be supplied, unless increment
+or decrement options are specified.  In that case, only a single
+version may be used, and it is incremented by the specified level
+
+Program exits successfully if any valid version satisfies
+all supplied ranges, and prints all satisfying versions.
+
+If no versions are valid, or ranges are not satisfied,
+then exits failure.
+
+Versions are printed in ascending order, so supplying
+multiple versions to the utility will just sort them.
+

Versions

+

A "version" is described by the v2.0.0 specification found at +http://semver.org/.

+

A leading "=" or "v" character is stripped off and ignored.

+

Ranges

+

A version range is a set of comparators which specify versions +that satisfy the range.

+

A comparator is composed of an operator and a version. The set +of primitive operators is:

+
    +
  • < Less than
  • +
  • <= Less than or equal to
  • +
  • > Greater than
  • +
  • >= Greater than or equal to
  • +
  • = Equal. If no operator is specified, then equality is assumed, +so this operator is optional, but MAY be included.
  • +
+

For example, the comparator >=1.2.7 would match the versions +1.2.7, 1.2.8, 2.5.3, and 1.3.9, but not the versions 1.2.6 +or 1.1.0.

+

Comparators can be joined by whitespace to form a comparator set, +which is satisfied by the intersection of all of the comparators +it includes.

+

A range is composed of one or more comparator sets, joined by ||. A +version matches a range if and only if every comparator in at least +one of the ||-separated comparator sets is satisfied by the version.

+

For example, the range >=1.2.7 <1.3.0 would match the versions +1.2.7, 1.2.8, and 1.2.99, but not the versions 1.2.6, 1.3.0, +or 1.1.0.

+

The range 1.2.7 || >=1.2.9 <2.0.0 would match the versions 1.2.7, +1.2.9, and 1.4.6, but not the versions 1.2.8 or 2.0.0.

+

Prerelease Tags

+

If a version has a prerelease tag (for example, 1.2.3-alpha.3) then +it will only be allowed to satisfy comparator sets if at least one +comparator with the same [major, minor, patch] tuple also has a +prerelease tag.

+

For example, the range >1.2.3-alpha.3 would be allowed to match the +version 1.2.3-alpha.7, but it would not be satisfied by +3.4.5-alpha.9, even though 3.4.5-alpha.9 is technically "greater +than" 1.2.3-alpha.3 according to the SemVer sort rules. The version +range only accepts prerelease tags on the 1.2.3 version. The +version 3.4.5 would satisfy the range, because it does not have a +prerelease flag, and 3.4.5 is greater than 1.2.3-alpha.7.

+

The purpose for this behavior is twofold. First, prerelease versions +frequently are updated very quickly, and contain many breaking changes +that are (by the author's design) not yet fit for public consumption. +Therefore, by default, they are excluded from range matching +semantics.

+

Second, a user who has opted into using a prerelease version has +clearly indicated the intent to use that specific set of +alpha/beta/rc versions. By including a prerelease tag in the range, +the user is indicating that they are aware of the risk. However, it +is still not appropriate to assume that they have opted into taking a +similar risk on the next set of prerelease versions.

+

Advanced Range Syntax

+

Advanced range syntax desugars to primitive comparators in +deterministic ways.

+

Advanced ranges may be combined in the same way as primitive +comparators using white space or ||.

+

Hyphen Ranges X.Y.Z - A.B.C

+

Specifies an inclusive set.

+
    +
  • 1.2.3 - 2.3.4 := >=1.2.3 <=2.3.4
  • +
+

If a partial version is provided as the first version in the inclusive +range, then the missing pieces are replaced with zeroes.

+
    +
  • 1.2 - 2.3.4 := >=1.2.0 <=2.3.4
  • +
+

If a partial version is provided as the second version in the +inclusive range, then all versions that start with the supplied parts +of the tuple are accepted, but nothing that would be greater than the +provided tuple parts.

+
    +
  • 1.2.3 - 2.3 := >=1.2.3 <2.4.0
  • +
  • 1.2.3 - 2 := >=1.2.3 <3.0.0
  • +
+

X-Ranges 1.2.x 1.X 1.2.* *

+

Any of X, x, or * may be used to "stand in" for one of the +numeric values in the [major, minor, patch] tuple.

+
    +
  • * := >=0.0.0 (Any version satisfies)
  • +
  • 1.x := >=1.0.0 <2.0.0 (Matching major version)
  • +
  • 1.2.x := >=1.2.0 <1.3.0 (Matching major and minor versions)
  • +
+

A partial version range is treated as an X-Range, so the special +character is in fact optional.

+
    +
  • "" (empty string) := * := >=0.0.0
  • +
  • 1 := 1.x.x := >=1.0.0 <2.0.0
  • +
  • 1.2 := 1.2.x := >=1.2.0 <1.3.0
  • +
+

Tilde Ranges ~1.2.3 ~1.2 ~1

+

Allows patch-level changes if a minor version is specified on the +comparator. Allows minor-level changes if not.

+
    +
  • ~1.2.3 := >=1.2.3 <1.(2+1).0 := >=1.2.3 <1.3.0
  • +
  • ~1.2 := >=1.2.0 <1.(2+1).0 := >=1.2.0 <1.3.0 (Same as 1.2.x)
  • +
  • ~1 := >=1.0.0 <(1+1).0.0 := >=1.0.0 <2.0.0 (Same as 1.x)
  • +
  • ~0.2.3 := >=0.2.3 <0.(2+1).0 := >=0.2.3 <0.3.0
  • +
  • ~0.2 := >=0.2.0 <0.(2+1).0 := >=0.2.0 <0.3.0 (Same as 0.2.x)
  • +
  • ~0 := >=0.0.0 <(0+1).0.0 := >=0.0.0 <1.0.0 (Same as 0.x)
  • +
  • ~1.2.3-beta.2 := >=1.2.3-beta.2 <1.3.0 Note that prereleases in +the 1.2.3 version will be allowed, if they are greater than or +equal to beta.2. So, 1.2.3-beta.4 would be allowed, but +1.2.4-beta.2 would not, because it is a prerelease of a +different [major, minor, patch] tuple.
  • +
+

Note: this is the same as the ~> operator in rubygems.

+

Caret Ranges ^1.2.3 ^0.2.5 ^0.0.4

+

Allows changes that do not modify the left-most non-zero digit in the +[major, minor, patch] tuple. In other words, this allows patch and +minor updates for versions 1.0.0 and above, patch updates for +versions 0.X >=0.1.0, and no updates for versions 0.0.X.

+

Many authors treat a 0.x version as if the x were the major +"breaking-change" indicator.

+

Caret ranges are ideal when an author may make breaking changes +between 0.2.4 and 0.3.0 releases, which is a common practice. +However, it presumes that there will not be breaking changes between +0.2.4 and 0.2.5. It allows for changes that are presumed to be +additive (but non-breaking), according to commonly observed practices.

+
    +
  • ^1.2.3 := >=1.2.3 <2.0.0
  • +
  • ^0.2.3 := >=0.2.3 <0.3.0
  • +
  • ^0.0.3 := >=0.0.3 <0.0.4
  • +
  • ^1.2.3-beta.2 := >=1.2.3-beta.2 <2.0.0 Note that prereleases in +the 1.2.3 version will be allowed, if they are greater than or +equal to beta.2. So, 1.2.3-beta.4 would be allowed, but +1.2.4-beta.2 would not, because it is a prerelease of a +different [major, minor, patch] tuple.
  • +
  • ^0.0.3-beta := >=0.0.3-beta <0.0.4 Note that prereleases in the +0.0.3 version only will be allowed, if they are greater than or +equal to beta. So, 0.0.3-pr.2 would be allowed.
  • +
+

When parsing caret ranges, a missing patch value desugars to the +number 0, but will allow flexibility within that value, even if the +major and minor versions are both 0.

+
    +
  • ^1.2.x := >=1.2.0 <2.0.0
  • +
  • ^0.0.x := >=0.0.0 <0.1.0
  • +
  • ^0.0 := >=0.0.0 <0.1.0
  • +
+

A missing minor and patch values will desugar to zero, but also +allow flexibility within those values, even if the major version is +zero.

+
    +
  • ^1.x := >=1.0.0 <2.0.0
  • +
  • ^0.x := >=0.0.0 <1.0.0
  • +
+

Functions

+

All methods and classes take a final loose boolean argument that, if +true, will be more forgiving about not-quite-valid semver strings. +The resulting output will always be 100% strict, of course.

+

Strict-mode Comparators and Ranges will be strict about the SemVer +strings that they parse.

+
    +
  • valid(v): Return the parsed version, or null if it's not valid.
  • +
  • inc(v, release): Return the version incremented by the release +type (major, premajor, minor, preminor, patch, +prepatch, or prerelease), or null if it's not valid
      +
    • premajor in one call will bump the version up to the next major +version and down to a prerelease of that major version. +preminor, and prepatch work the same way.
    • +
    • If called from a non-prerelease version, the prerelease will work the +same as prepatch. It increments the patch version, then makes a +prerelease. If the input version is already a prerelease it simply +increments it.
    • +
    +
  • +
+

Comparison

+
    +
  • gt(v1, v2): v1 > v2
  • +
  • gte(v1, v2): v1 >= v2
  • +
  • lt(v1, v2): v1 < v2
  • +
  • lte(v1, v2): v1 <= v2
  • +
  • eq(v1, v2): v1 == v2 This is true if they're logically equivalent, +even if they're not the exact same string. You already know how to +compare strings.
  • +
  • neq(v1, v2): v1 != v2 The opposite of eq.
  • +
  • cmp(v1, comparator, v2): Pass in a comparison string, and it'll call +the corresponding function above. "===" and "!==" do simple +string comparison, but are included for completeness. Throws if an +invalid comparison string is provided.
  • +
  • compare(v1, v2): Return 0 if v1 == v2, or 1 if v1 is greater, or -1 if +v2 is greater. Sorts in ascending order if passed to Array.sort().
  • +
  • rcompare(v1, v2): The reverse of compare. Sorts an array of versions +in descending order when passed to Array.sort().
  • +
+

Ranges

+
    +
  • validRange(range): Return the valid range or null if it's not valid
  • +
  • satisfies(version, range): Return true if the version satisfies the +range.
  • +
  • maxSatisfying(versions, range): Return the highest version in the list +that satisfies the range, or null if none of them do.
  • +
  • gtr(version, range): Return true if version is greater than all the +versions possible in the range.
  • +
  • ltr(version, range): Return true if version is less than all the +versions possible in the range.
  • +
  • outside(version, range, hilo): Return true if the version is outside +the bounds of the range in either the high or low direction. The +hilo argument must be either the string '>' or '<'. (This is +the function called by gtr and ltr.)
  • +
+

Note that, since ranges may be non-contiguous, a version might not be +greater than a range, less than a range, or satisfy a range! For +example, the range 1.2 <1.2.9 || >2.0.0 would have a hole from 1.2.9 +until 2.0.0, so the version 1.2.10 would not be greater than the +range (because 2.0.1 satisfies, which is higher), nor less than the +range (since 1.2.8 satisfies, which is lower), and it also does not +satisfy the range.

+

If you want to know if a version satisfies or does not satisfy a +range, use the satisfies(version, range) function.

+ diff -Nru nodejs-0.11.13/deps/npm/html/partial/doc/README.html nodejs-0.11.15/deps/npm/html/partial/doc/README.html --- nodejs-0.11.13/deps/npm/html/partial/doc/README.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/html/partial/doc/README.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,166 @@ +

npm

node package manager

+

Build Status

+

SYNOPSIS

+

This is just enough info to get you up and running.

+

Much more info available via npm help once it's installed.

+

IMPORTANT

+

You need node v0.8 or higher to run this program.

+

To install an old and unsupported version of npm that works on node 0.3 +and prior, clone the git repo and dig through the old tags and branches.

+

Super Easy Install

+

npm comes with node now.

+

Windows Computers

+

Get the MSI. npm is in it.

+

Apple Macintosh Computers

+

Get the pkg. npm is in it.

+

Other Sorts of Unices

+

Run make install. npm will be installed with node.

+

If you want a more fancy pants install (a different version, customized +paths, etc.) then read on.

+

Fancy Install (Unix)

+

There's a pretty robust install script at +https://www.npmjs.org/install.sh. You can download that and run it.

+

Here's an example using curl:

+
curl -L https://npmjs.org/install.sh | sh
+

Slightly Fancier

+

You can set any npm configuration params with that script:

+
npm_config_prefix=/some/path sh install.sh
+

Or, you can run it in uber-debuggery mode:

+
npm_debug=1 sh install.sh
+

Even Fancier

+

Get the code with git. Use make to build the docs and do other stuff. +If you plan on hacking on npm, make link is your friend.

+

If you've got the npm source code, you can also semi-permanently set +arbitrary config keys using the ./configure --key=val ..., and then +run npm commands by doing node cli.js <cmd> <args>. (This is helpful +for testing, or running stuff without actually installing npm itself.)

+

Fancy Windows Install

+

You can download a zip file from https://npmjs.org/dist/, and unpack it +in the same folder where node.exe lives.

+

If that's not fancy enough for you, then you can fetch the code with +git, and mess with it directly.

+

Installing on Cygwin

+

No.

+

Permissions when Using npm to Install Other Stuff

+

tl;dr

+
    +
  • Use sudo for greater safety. Or don't, if you prefer not to.
  • +
  • npm will downgrade permissions if it's root before running any build +scripts that package authors specified.
  • +
+

More details...

+

As of version 0.3, it is recommended to run npm as root. +This allows npm to change the user identifier to the nobody user prior +to running any package build or test commands.

+

If you are not the root user, or if you are on a platform that does not +support uid switching, then npm will not attempt to change the userid.

+

If you would like to ensure that npm always runs scripts as the +"nobody" user, and have it fail if it cannot downgrade permissions, then +set the following configuration param:

+
npm config set unsafe-perm false
+

This will prevent running in unsafe mode, even as non-root users.

+

Uninstalling

+

So sad to see you go.

+
sudo npm uninstall npm -g
+

Or, if that fails,

+
sudo make uninstall
+

More Severe Uninstalling

+

Usually, the above instructions are sufficient. That will remove +npm, but leave behind anything you've installed.

+

If you would like to remove all the packages that you have installed, +then you can use the npm ls command to find them, and then npm rm to +remove them.

+

To remove cruft left behind by npm 0.x, you can use the included +clean-old.sh script file. You can run it conveniently like this:

+
npm explore npm -g -- sh scripts/clean-old.sh
+

npm uses two configuration files, one for per-user configs, and another +for global (every-user) configs. You can view them by doing:

+
npm config get userconfig   # defaults to ~/.npmrc
+npm config get globalconfig # defaults to /usr/local/etc/npmrc
+

Uninstalling npm does not remove configuration files by default. You +must remove them yourself manually if you want them gone. Note that +this means that future npm installs will not remember the settings that +you have chosen.

+

Using npm Programmatically

+

If you would like to use npm programmatically, you can do that. +It's not very well documented, but it is rather simple.

+

Most of the time, unless you actually want to do all the things that +npm does, you should try using one of npm's dependencies rather than +using npm itself, if possible.

+

Eventually, npm will be just a thin cli wrapper around the modules +that it depends on, but for now, there are some things that you must +use npm itself to do.

+
var npm = require("npm")
+npm.load(myConfigObject, function (er) {
+  if (er) return handlError(er)
+  npm.commands.install(["some", "args"], function (er, data) {
+    if (er) return commandFailed(er)
+    // command succeeded, and data might have some info
+  })
+  npm.registry.log.on("log", function (message) { .... })
+})
+

The load function takes an object hash of the command-line configs. +The various npm.commands.<cmd> functions take an array of +positional argument strings. The last argument to any +npm.commands.<cmd> function is a callback. Some commands take other +optional arguments. Read the source.

+

You cannot set configs individually for any single npm function at this +time. Since npm is a singleton, any call to npm.config.set will +change the value for all npm commands in that process.

+

See ./bin/npm-cli.js for an example of pulling config values off of the +command line arguments using nopt. You may also want to check out npm +help config to learn about all the options you can set there.

+

More Docs

+

Check out the docs, +especially the faq.

+

You can use the npm help command to read any of them.

+

If you're a developer, and you want to use npm to publish your program, +you should read this

+ +

"npm" and "The npm Registry" are owned by npm, Inc. +All rights reserved. See the included LICENSE file for more details.

+

"Node.js" and "node" are trademarks owned by Joyent, Inc.

+

Modules published on the npm registry are not officially endorsed by +npm, Inc. or the Node.js project.

+

Data published to the npm registry is not part of npm itself, and is +the sole property of the publisher. While every effort is made to +ensure accountability, there is absolutely no guarantee, warrantee, or +assertion expressed or implied as to the quality, fitness for a +specific purpose, or lack of malice in any given npm package.

+

If you have a complaint about a package in the public npm registry, +and cannot resolve it with the package +owner, please email +support@npmjs.com and explain the situation.

+

Any data published to The npm Registry (including user account +information) may be removed or modified at the sole discretion of the +npm server administrators.

+

In plainer english

+

npm is the property of npm, Inc.

+

If you publish something, it's yours, and you are solely accountable +for it.

+

If other people publish something, it's theirs.

+

Users can publish Bad Stuff. It will be removed promptly if reported. +But there is no vetting process for published modules, and you use +them at your own risk. Please inspect the source.

+

If you publish Bad Stuff, we may delete it from the registry, or even +ban your account in extreme cases. So don't do that.

+

BUGS

+

When you find issues, please report them:

+ +

Be sure to include all of the output from the npm command that didn't work +as expected. The npm-debug.log file is also helpful to provide.

+

You can also look for isaacs in #node.js on irc://irc.freenode.net. He +will no doubt tell you to put the output in a gist or email.

+

SEE ALSO

+ + diff -Nru nodejs-0.11.13/deps/npm/lib/adduser.js nodejs-0.11.15/deps/npm/lib/adduser.js --- nodejs-0.11.13/deps/npm/lib/adduser.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/adduser.js 2015-01-20 21:22:17.000000000 +0000 @@ -15,14 +15,15 @@ adduser.usage = "npm adduser\nThen enter stuff at the prompts" function adduser (args, cb) { + npm.spinner.stop() if (!crypto) return cb(new Error( "You must compile node with ssl support to use the adduser feature")) - var c = { u : npm.config.get("username") || "" - , p : npm.config.get("_password") || "" - , e : npm.config.get("email") || "" + var creds = npm.config.getCredentialsByURI(npm.config.get("registry")) + var c = { u : creds.username || "" + , p : creds.password || "" + , e : creds.email || "" } - , changed = false , u = {} , fns = [readUsername, readPassword, readEmail, save] @@ -94,7 +95,7 @@ return readPassword(c, u, cb) } - c.changed = c.changed || c.p != pw + c.changed = c.changed || c.p !== pw u.p = pw cb(er) }) @@ -131,17 +132,47 @@ registry.username = u.u registry.password = u.p } + npm.spinner.start() // save existing configs, but yank off for this PUT - registry.adduser(u.u, u.p, u.e, function (er) { + var uri = npm.config.get("registry") + var scope = npm.config.get("scope") + + // there may be a saved scope and no --registry (for login) + if (scope) { + if (scope.charAt(0) !== "@") scope = "@" + scope + + var scopedRegistry = npm.config.get(scope + ":registry") + if (scopedRegistry) uri = scopedRegistry + } + + registry.adduser(uri, u.u, u.p, u.e, function (er, doc) { + npm.spinner.stop() if (er) return cb(er) + registry.username = u.u registry.password = u.p registry.email = u.e - npm.config.set("username", u.u, "user") - npm.config.set("_password", u.p, "user") - npm.config.set("email", u.e, "user") + + // don't want this polluting the configuration npm.config.del("_token", "user") + + if (scope) npm.config.set(scope + ":registry", uri, "user") + + if (doc && doc.token) { + npm.config.setCredentialsByURI(uri, { + token : doc.token + }) + } + else { + npm.config.setCredentialsByURI(uri, { + username : u.u, + password : u.p, + email : u.e, + alwaysAuth : npm.config.get("always-auth") + }) + } + log.info("adduser", "Authorized user %s", u.u) npm.config.save("user", cb) }) diff -Nru nodejs-0.11.13/deps/npm/lib/bugs.js nodejs-0.11.15/deps/npm/lib/bugs.js --- nodejs-0.11.13/deps/npm/lib/bugs.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/bugs.js 2015-01-20 21:22:17.000000000 +0000 @@ -9,17 +9,23 @@ , opener = require("opener") , path = require("path") , readJson = require("read-package-json") + , npa = require("npm-package-arg") , fs = require("fs") + , mapToRegistry = require("./utils/map-to-registry.js") bugs.completion = function (opts, cb) { if (opts.conf.argv.remain.length > 2) return cb() - registry.get("/-/short", 60000, function (er, list) { - return cb(null, list || []) + mapToRegistry("-/short", npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri, { timeout : 60000 }, function (er, list) { + return cb(null, list || []) + }) }) } function bugs (args, cb) { - var n = args.length && args[0].split("@").shift() || '.' + var n = args.length && npa(args[0]).name || '.' fs.stat(n, function (er, s) { if (er && er.code === "ENOENT") return callRegistry(n, cb) else if (er) return cb (er) @@ -54,8 +60,13 @@ } function callRegistry (n, cb) { - registry.get(n + "/latest", 3600, function (er, d) { + mapToRegistry(n, npm.config, function (er, uri) { if (er) return cb(er) - getUrlAndOpen (d, cb) + + registry.get(uri + "/latest", { timeout : 3600 }, function (er, d) { + if (er) return cb(er) + + getUrlAndOpen (d, cb) + }) }) } diff -Nru nodejs-0.11.13/deps/npm/lib/build.js nodejs-0.11.15/deps/npm/lib/build.js --- nodejs-0.11.13/deps/npm/lib/build.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/build.js 2015-01-20 21:22:17.000000000 +0000 @@ -19,6 +19,8 @@ , cmdShim = require("cmd-shim") , cmdShimIfExists = cmdShim.ifExists , asyncMap = require("slide").asyncMap + , ini = require("ini") + , writeFile = require("write-file-atomic") module.exports = build build.usage = "npm build \n(this is plumbing)" @@ -41,6 +43,7 @@ function build_ (global, didPre, didRB) { return function (folder, cb) { folder = path.resolve(folder) + if (build._didBuild[folder]) log.error("build", "already built", folder) build._didBuild[folder] = true log.info("build", folder) readJson(path.resolve(folder, "package.json"), function (er, pkg) { @@ -48,7 +51,7 @@ chain ( [ !didPre && [lifecycle, pkg, "preinstall", folder] , [linkStuff, pkg, folder, global, didRB] - , pkg.name === "npm" && [writeBuiltinConf, folder] + , [writeBuiltinConf, pkg, folder] , didPre !== build._noLC && [lifecycle, pkg, "install", folder] , didPre !== build._noLC && [lifecycle, pkg, "postinstall", folder] , didPre !== build._noLC @@ -58,14 +61,21 @@ }) }} -function writeBuiltinConf (folder, cb) { - // the builtin config is "sticky". Any time npm installs itself, - // it puts its builtin config file there, as well. - if (!npm.config.usingBuiltin - || folder !== path.dirname(__dirname)) { +function writeBuiltinConf (pkg, folder, cb) { + // the builtin config is "sticky". Any time npm installs + // itself globally, it puts its builtin config file there + var parent = path.dirname(folder) + var dir = npm.globalDir + + if (pkg.name !== "npm" || + !npm.config.get("global") || + !npm.config.usingBuiltin || + dir !== parent) { return cb() } - npm.config.save("builtin", cb) + + var data = ini.stringify(npm.config.sources.builtin.data) + writeFile(path.resolve(folder, "npmrc"), data, cb) } function linkStuff (pkg, folder, global, didRB, cb) { @@ -75,9 +85,8 @@ // if it's global, and folder is in {prefix}/node_modules, // then bins are in {prefix}/bin // otherwise, then bins are in folder/../.bin - var parent = path.dirname(folder) + var parent = pkg.name[0] === "@" ? path.dirname(path.dirname(folder)) : path.dirname(folder) , gnm = global && npm.globalDir - , top = parent === npm.dir , gtop = parent === gnm log.verbose("linkStuff", [global, gnm, gtop, parent]) @@ -96,7 +105,7 @@ function shouldWarn(pkg, folder, global, cb) { var parent = path.dirname(folder) , top = parent === npm.dir - , cwd = process.cwd() + , cwd = npm.localPrefix readJson(path.resolve(cwd, "package.json"), function(er, topPkg) { if (er) return cb(er) @@ -150,7 +159,7 @@ if (build._didBuild[file]) return cb() log.verbose("rebuild bundle", file) // if file is not a package dir, then don't do it. - fs.lstat(path.resolve(file, "package.json"), function (er, st) { + fs.lstat(path.resolve(file, "package.json"), function (er) { if (er) return cb() build_(false)(file, cb) }) @@ -219,7 +228,6 @@ var parseMan = man.match(/(.*\.([0-9]+)(\.gz)?)$/) , stem = parseMan[1] , sxn = parseMan[2] - , gz = parseMan[3] || "" , bn = path.basename(stem) , manDest = path.join(manRoot, "man" + sxn, bn) diff -Nru nodejs-0.11.13/deps/npm/lib/cache/add-local.js nodejs-0.11.15/deps/npm/lib/cache/add-local.js --- nodejs-0.11.13/deps/npm/lib/cache/add-local.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/cache/add-local.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,116 @@ +var assert = require("assert") + , path = require("path") + , mkdir = require("mkdirp") + , chownr = require("chownr") + , pathIsInside = require("path-is-inside") + , readJson = require("read-package-json") + , log = require("npmlog") + , npm = require("../npm.js") + , tar = require("../utils/tar.js") + , deprCheck = require("../utils/depr-check.js") + , getCacheStat = require("./get-stat.js") + , cachedPackageRoot = require("./cached-package-root.js") + , addLocalTarball = require("./add-local-tarball.js") + , sha = require("sha") + +module.exports = addLocal + +function addLocal (p, pkgData, cb_) { + assert(typeof p === "object", "must have spec info") + assert(typeof cb === "function", "must have callback") + + pkgData = pkgData || {} + + function cb (er, data) { + if (er) { + log.error("addLocal", "Could not install %s", p.spec) + return cb_(er) + } + if (data && !data._fromGithub) { + data._from = path.relative(npm.prefix, p.spec) || "." + } + return cb_(er, data) + } + + if (p.type === "directory") { + addLocalDirectory(p.spec, pkgData, null, cb) + } + else { + addLocalTarball(p.spec, pkgData, null, cb) + } +} + +// At this point, if shasum is set, it's something that we've already +// read and checked. Just stashing it in the data at this point. +function addLocalDirectory (p, pkgData, shasum, cb) { + assert(pkgData, "must pass package data") + assert(typeof cb === "function", "must have callback") + + // if it's a folder, then read the package.json, + // tar it to the proper place, and add the cache tar + if (pathIsInside(p, npm.cache)) return cb(new Error( + "Adding a cache directory to the cache will make the world implode.")) + + readJson(path.join(p, "package.json"), false, function (er, data) { + if (er) return cb(er) + + if (!data.name) { + return cb(new Error("No name provided in package.json")) + } + else if (pkgData.name && pkgData.name !== data.name) { + return cb(new Error( + "Invalid package: expected " + pkgData.name + " but found " + data.name + )) + } + + if (!data.version) { + return cb(new Error("No version provided in package.json")) + } + else if (pkgData.version && pkgData.version !== data.version) { + return cb(new Error( + "Invalid package: expected " + pkgData.name + "@" + pkgData.version + + " but found " + data.name + "@" + data.version + )) + } + + deprCheck(data) + + // pack to {cache}/name/ver/package.tgz + var root = cachedPackageRoot(data) + var tgz = path.resolve(root, "package.tgz") + var pj = path.resolve(root, "package/package.json") + getCacheStat(function (er, cs) { + mkdir(path.dirname(pj), function (er, made) { + if (er) return cb(er) + var fancy = !pathIsInside(p, npm.tmp) + tar.pack(tgz, p, data, fancy, function (er) { + if (er) { + log.error( "addLocalDirectory", "Could not pack %j to %j" + , p, tgz ) + return cb(er) + } + + if (!cs || isNaN(cs.uid) || isNaN(cs.gid)) next() + + chownr(made || tgz, cs.uid, cs.gid, next) + }) + }) + }) + + function next (er) { + if (er) return cb(er) + // if we have the shasum already, just add it + if (shasum) { + return addLocalTarball(tgz, data, shasum, cb) + } else { + sha.get(tgz, function (er, shasum) { + if (er) { + return cb(er) + } + data._shasum = shasum + return addLocalTarball(tgz, data, shasum, cb) + }) + } + } + }) +} diff -Nru nodejs-0.11.13/deps/npm/lib/cache/add-local-tarball.js nodejs-0.11.15/deps/npm/lib/cache/add-local-tarball.js --- nodejs-0.11.13/deps/npm/lib/cache/add-local-tarball.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/cache/add-local-tarball.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,180 @@ +var mkdir = require("mkdirp") + , assert = require("assert") + , fs = require("graceful-fs") + , writeFileAtomic = require("write-file-atomic") + , path = require("path") + , sha = require("sha") + , npm = require("../npm.js") + , log = require("npmlog") + , tar = require("../utils/tar.js") + , pathIsInside = require("path-is-inside") + , getCacheStat = require("./get-stat.js") + , cachedPackageRoot = require("./cached-package-root.js") + , chownr = require("chownr") + , inflight = require("inflight") + , once = require("once") + , writeStream = require("fs-write-stream-atomic") + , randomBytes = require("crypto").pseudoRandomBytes // only need uniqueness + +module.exports = addLocalTarball + +function addLocalTarball (p, pkgData, shasum, cb) { + assert(typeof p === "string", "must have path") + assert(typeof cb === "function", "must have callback") + + if (!pkgData) pkgData = {} + + // If we don't have a shasum yet, compute it. + if (!shasum) { + return sha.get(p, function (er, shasum) { + if (er) return cb(er) + log.silly("addLocalTarball", "shasum (computed)", shasum) + addLocalTarball(p, pkgData, shasum, cb) + }) + } + + if (pathIsInside(p, npm.cache)) { + if (path.basename(p) !== "package.tgz") { + return cb(new Error("Not a valid cache tarball name: "+p)) + } + log.verbose("addLocalTarball", "adding from inside cache", p) + return addPlacedTarball(p, pkgData, shasum, cb) + } + + addTmpTarball(p, pkgData, shasum, function (er, data) { + if (data) { + data._resolved = p + data._shasum = data._shasum || shasum + } + return cb(er, data) + }) +} + +function addPlacedTarball (p, pkgData, shasum, cb) { + assert(pkgData, "should have package data by now") + assert(typeof cb === "function", "cb function required") + + getCacheStat(function (er, cs) { + if (er) return cb(er) + return addPlacedTarball_(p, pkgData, cs.uid, cs.gid, shasum, cb) + }) +} + +function addPlacedTarball_ (p, pkgData, uid, gid, resolvedSum, cb) { + var folder = path.join(cachedPackageRoot(pkgData), "package") + + // First, make sure we have the shasum, if we don't already. + if (!resolvedSum) { + sha.get(p, function (er, shasum) { + if (er) return cb(er) + addPlacedTarball_(p, pkgData, uid, gid, shasum, cb) + }) + return + } + + mkdir(folder, function (er) { + if (er) return cb(er) + var pj = path.join(folder, "package.json") + var json = JSON.stringify(pkgData, null, 2) + writeFileAtomic(pj, json, function (er) { + cb(er, pkgData) + }) + }) +} + +function addTmpTarball (tgz, pkgData, shasum, cb) { + assert(typeof cb === "function", "must have callback function") + assert(shasum, "must have shasum by now") + + cb = inflight("addTmpTarball:" + tgz, cb) + if (!cb) return log.verbose("addTmpTarball", tgz, "already in flight; not adding") + log.verbose("addTmpTarball", tgz, "not in flight; adding") + + // we already have the package info, so just move into place + if (pkgData && pkgData.name && pkgData.version) { + log.verbose( + "addTmpTarball", + "already have metadata; skipping unpack for", + pkgData.name + "@" + pkgData.version + ) + return addTmpTarball_(tgz, pkgData, shasum, cb) + } + + // This is a tarball we probably downloaded from the internet. The shasum's + // already been checked, but we haven't ever had a peek inside, so we unpack + // it here just to make sure it is what it says it is. + // + // NOTE: we might not have any clue what we think it is, for example if the + // user just did `npm install ./foo.tgz` + + // generate a unique filename + randomBytes(6, function (er, random) { + if (er) return cb(er) + + var target = path.join(npm.tmp, "unpack-" + random.toString("hex")) + getCacheStat(function (er, cs) { + if (er) return cb(er) + + log.verbose("addTmpTarball", "validating metadata from", tgz) + tar.unpack(tgz, target, null, null, cs.uid, cs.gid, function (er, data) { + if (er) return cb(er) + + // check that this is what we expected. + if (!data.name) { + return cb(new Error("No name provided")) + } + else if (pkgData.name && data.name !== pkgData.name) { + return cb(new Error("Invalid Package: expected " + pkgData.name + + " but found " + data.name)) + } + + if (!data.version) { + return cb(new Error("No version provided")) + } + else if (pkgData.version && data.version !== pkgData.version) { + return cb(new Error("Invalid Package: expected " + + pkgData.name + "@" + pkgData.version + + " but found " + data.name + "@" + data.version)) + } + + addTmpTarball_(tgz, data, shasum, cb) + }) + }) + }) +} + +function addTmpTarball_ (tgz, data, shasum, cb) { + assert(typeof cb === "function", "must have callback function") + cb = once(cb) + + assert(data.name, "should have package name by now") + assert(data.version, "should have package version by now") + + var root = cachedPackageRoot(data) + var pkg = path.resolve(root, "package") + var target = path.resolve(root, "package.tgz") + getCacheStat(function (er, cs) { + if (er) return cb(er) + mkdir(pkg, function (er, created) { + + // chown starting from the first dir created by mkdirp, + // or the root dir, if none had to be created, so that + // we know that we get all the children. + function chown () { + chownr(created || root, cs.uid, cs.gid, done) + } + + if (er) return cb(er) + var read = fs.createReadStream(tgz) + var write = writeStream(target, { mode: npm.modes.file }) + var fin = cs.uid && cs.gid ? chown : done + read.on("error", cb).pipe(write).on("error", cb).on("close", fin) + }) + + }) + + function done() { + data._shasum = data._shasum || shasum + cb(null, data) + } +} diff -Nru nodejs-0.11.13/deps/npm/lib/cache/add-named.js nodejs-0.11.15/deps/npm/lib/cache/add-named.js --- nodejs-0.11.13/deps/npm/lib/cache/add-named.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/cache/add-named.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,275 @@ +var path = require("path") + , assert = require("assert") + , fs = require("graceful-fs") + , http = require("http") + , log = require("npmlog") + , semver = require("semver") + , readJson = require("read-package-json") + , url = require("url") + , npm = require("../npm.js") + , registry = npm.registry + , deprCheck = require("../utils/depr-check.js") + , inflight = require("inflight") + , addRemoteTarball = require("./add-remote-tarball.js") + , cachedPackageRoot = require("./cached-package-root.js") + , mapToRegistry = require("../utils/map-to-registry.js") + + +module.exports = addNamed + +function getOnceFromRegistry (name, from, next, done) { + mapToRegistry(name, npm.config, function (er, uri) { + if (er) return done(er) + + var key = "registry:" + uri + next = inflight(key, next) + if (!next) return log.verbose(from, key, "already in flight; waiting") + else log.verbose(from, key, "not in flight; fetching") + + registry.get(uri, null, next) + }) +} + +function addNamed (name, version, data, cb_) { + assert(typeof name === "string", "must have module name") + assert(typeof cb_ === "function", "must have callback") + + var key = name + "@" + version + log.verbose("addNamed", key) + + function cb (er, data) { + if (data && !data._fromGithub) data._from = key + cb_(er, data) + } + + log.silly("addNamed", "semver.valid", semver.valid(version)) + log.silly("addNamed", "semver.validRange", semver.validRange(version)) + var fn = ( semver.valid(version, true) ? addNameVersion + : semver.validRange(version, true) ? addNameRange + : addNameTag + ) + fn(name, version, data, cb) +} + +function addNameTag (name, tag, data, cb) { + log.info("addNameTag", [name, tag]) + var explicit = true + if (!tag) { + explicit = false + tag = npm.config.get("tag") + } + + getOnceFromRegistry(name, "addNameTag", next, cb) + + function next (er, data, json, resp) { + if (!er) er = errorResponse(name, resp) + if (er) return cb(er) + + log.silly("addNameTag", "next cb for", name, "with tag", tag) + + engineFilter(data) + if (data["dist-tags"] && data["dist-tags"][tag] + && data.versions[data["dist-tags"][tag]]) { + var ver = data["dist-tags"][tag] + return addNamed(name, ver, data.versions[ver], cb) + } + if (!explicit && Object.keys(data.versions).length) { + return addNamed(name, "*", data, cb) + } + + er = installTargetsError(tag, data) + return cb(er) + } +} + +function engineFilter (data) { + var npmv = npm.version + , nodev = npm.config.get("node-version") + , strict = npm.config.get("engine-strict") + + if (!nodev || npm.config.get("force")) return data + + Object.keys(data.versions || {}).forEach(function (v) { + var eng = data.versions[v].engines + if (!eng) return + if (!strict && !data.versions[v].engineStrict) return + if (eng.node && !semver.satisfies(nodev, eng.node, true) + || eng.npm && !semver.satisfies(npmv, eng.npm, true)) { + delete data.versions[v] + } + }) +} + +function addNameVersion (name, v, data, cb) { + var ver = semver.valid(v, true) + if (!ver) return cb(new Error("Invalid version: "+v)) + + var response + + if (data) { + response = null + return next() + } + + getOnceFromRegistry(name, "addNameVersion", setData, cb) + + function setData (er, d, json, resp) { + if (!er) { + er = errorResponse(name, resp) + } + if (er) return cb(er) + data = d && d.versions[ver] + if (!data) { + er = new Error("version not found: "+name+"@"+ver) + er.package = name + er.statusCode = 404 + return cb(er) + } + response = resp + next() + } + + function next () { + deprCheck(data) + var dist = data.dist + + if (!dist) return cb(new Error("No dist in "+data._id+" package")) + + if (!dist.tarball) return cb(new Error( + "No dist.tarball in " + data._id + " package")) + + if ((response && response.statusCode !== 304) || npm.config.get("force")) { + return fetchit() + } + + // we got cached data, so let's see if we have a tarball. + var pkgroot = cachedPackageRoot({name : name, version : ver}) + var pkgtgz = path.join(pkgroot, "package.tgz") + var pkgjson = path.join(pkgroot, "package", "package.json") + fs.stat(pkgtgz, function (er) { + if (!er) { + readJson(pkgjson, function (er, data) { + if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) + + if (data) { + if (!data.name) return cb(new Error("No name provided")) + if (!data.version) return cb(new Error("No version provided")) + + // check the SHA of the package we have, to ensure it wasn't installed + // from somewhere other than the registry (eg, a fork) + if (data._shasum && dist.shasum && data._shasum !== dist.shasum) { + return fetchit() + } + } + + if (er) return fetchit() + else return cb(null, data) + }) + } else return fetchit() + }) + + function fetchit () { + if (!npm.config.get("registry")) { + return cb(new Error("Cannot fetch: "+dist.tarball)) + } + + // Use the same protocol as the registry. https registry --> https + // tarballs, but only if they're the same hostname, or else detached + // tarballs may not work. + var tb = url.parse(dist.tarball) + var rp = url.parse(npm.config.get("registry")) + if (tb.hostname === rp.hostname + && tb.protocol !== rp.protocol) { + tb.protocol = url.parse(npm.config.get("registry")).protocol + delete tb.href + } + tb = url.format(tb) + + // Only add non-shasum'ed packages if --forced. Only ancient things + // would lack this for good reasons nowadays. + if (!dist.shasum && !npm.config.get("force")) { + return cb(new Error("package lacks shasum: " + data._id)) + } + return addRemoteTarball(tb, data, dist.shasum, cb) + } + } +} + +function addNameRange (name, range, data, cb) { + range = semver.validRange(range, true) + if (range === null) return cb(new Error( + "Invalid version range: " + range + )) + + log.silly("addNameRange", {name:name, range:range, hasData:!!data}) + + if (data) return next() + + getOnceFromRegistry(name, "addNameRange", setData, cb) + + function setData (er, d, json, resp) { + if (!er) { + er = errorResponse(name, resp) + } + if (er) return cb(er) + data = d + next() + } + + function next () { + log.silly( "addNameRange", "number 2" + , {name:name, range:range, hasData:!!data}) + engineFilter(data) + + log.silly("addNameRange", "versions" + , [data.name, Object.keys(data.versions || {})]) + + // if the tagged version satisfies, then use that. + var tagged = data["dist-tags"][npm.config.get("tag")] + if (tagged + && data.versions[tagged] + && semver.satisfies(tagged, range, true)) { + return addNamed(name, tagged, data.versions[tagged], cb) + } + + // find the max satisfying version. + var versions = Object.keys(data.versions || {}) + var ms = semver.maxSatisfying(versions, range, true) + if (!ms) { + return cb(installTargetsError(range, data)) + } + + // if we don't have a registry connection, try to see if + // there's a cached copy that will be ok. + addNamed(name, ms, data.versions[ms], cb) + } +} + +function installTargetsError (requested, data) { + var targets = Object.keys(data["dist-tags"]).filter(function (f) { + return (data.versions || {}).hasOwnProperty(f) + }).concat(Object.keys(data.versions || {})) + + requested = data.name + (requested ? "@'" + requested + "'" : "") + + targets = targets.length + ? "Valid install targets:\n" + JSON.stringify(targets) + "\n" + : "No valid targets found.\n" + + "Perhaps not compatible with your version of node?" + + var er = new Error( "No compatible version found: " + + requested + "\n" + targets) + er.code = "ETARGET" + return er +} + +function errorResponse (name, response) { + var er + if (response.statusCode >= 400) { + er = new Error(http.STATUS_CODES[response.statusCode]) + er.statusCode = response.statusCode + er.code = "E" + er.statusCode + er.pkgid = name + } + return er +} diff -Nru nodejs-0.11.13/deps/npm/lib/cache/add-remote-git.js nodejs-0.11.15/deps/npm/lib/cache/add-remote-git.js --- nodejs-0.11.13/deps/npm/lib/cache/add-remote-git.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/cache/add-remote-git.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,270 @@ +var mkdir = require("mkdirp") + , assert = require("assert") + , git = require("../utils/git.js") + , once = require("once") + , fs = require("graceful-fs") + , log = require("npmlog") + , path = require("path") + , url = require("url") + , chownr = require("chownr") + , zlib = require("zlib") + , crypto = require("crypto") + , npm = require("../npm.js") + , rm = require("../utils/gently-rm.js") + , inflight = require("inflight") + , getCacheStat = require("./get-stat.js") + , addLocalTarball = require("./add-local-tarball.js") + , writeStream = require("fs-write-stream-atomic") + + +// 1. cacheDir = path.join(cache,'_git-remotes',sha1(u)) +// 2. checkGitDir(cacheDir) ? 4. : 3. (rm cacheDir if necessary) +// 3. git clone --mirror u cacheDir +// 4. cd cacheDir && git fetch -a origin +// 5. git archive /tmp/random.tgz +// 6. addLocalTarball(/tmp/random.tgz) --format=tar --prefix=package/ +// silent flag is used if this should error quietly +module.exports = function addRemoteGit (u, silent, cb) { + assert(typeof u === "string", "must have git URL") + assert(typeof cb === "function", "must have callback") + + log.verbose("addRemoteGit", "u=%j silent=%j", u, silent) + var parsed = url.parse(u, true) + log.silly("addRemoteGit", "parsed", parsed) + + // git is so tricky! + // if the path is like ssh://foo:22/some/path then it works, but + // it needs the ssh:// + // If the path is like ssh://foo:some/path then it works, but + // only if you remove the ssh:// + var origUrl = u + u = u.replace(/^git\+/, "") + .replace(/#.*$/, "") + + // ssh paths that are scp-style urls don't need the ssh:// + if (parsed.pathname.match(/^\/?:/)) { + u = u.replace(/^ssh:\/\//, "") + } + + cb = inflight(u, cb) + if (!cb) return log.verbose("addRemoteGit", u, "already in flight; waiting") + log.verbose("addRemoteGit", u, "not in flight; cloning") + + // figure out what we should check out. + var co = parsed.hash && parsed.hash.substr(1) || "master" + + var v = crypto.createHash("sha1").update(u).digest("hex").slice(0, 8) + v = u.replace(/[^a-zA-Z0-9]+/g, "-")+"-"+v + + log.verbose("addRemoteGit", [u, co]) + + var p = path.join(npm.config.get("cache"), "_git-remotes", v) + + // we don't need global templates when cloning. use this empty dir to specify as template dir + mkdir(path.join(npm.config.get("cache"), "_git-remotes", "_templates"), function (er) { + if (er) return cb(er) + checkGitDir(p, u, co, origUrl, silent, function (er, data) { + if (er) return cb(er, data) + + addModeRecursive(p, npm.modes.file, function (er) { + return cb(er, data) + }) + }) + }) +} + +function checkGitDir (p, u, co, origUrl, silent, cb) { + fs.stat(p, function (er, s) { + if (er) return cloneGitRemote(p, u, co, origUrl, silent, cb) + if (!s.isDirectory()) return rm(p, function (er){ + if (er) return cb(er) + cloneGitRemote(p, u, co, origUrl, silent, cb) + }) + + var args = [ "config", "--get", "remote.origin.url" ] + var env = gitEnv() + + // check for git + git.whichAndExec(args, {cwd: p, env: env}, function (er, stdout, stderr) { + var stdoutTrimmed = (stdout + "\n" + stderr).trim() + if (er || u !== stdout.trim()) { + log.warn( "`git config --get remote.origin.url` returned " + + "wrong result ("+u+")", stdoutTrimmed ) + return rm(p, function (er){ + if (er) return cb(er) + cloneGitRemote(p, u, co, origUrl, silent, cb) + }) + } + log.verbose("git remote.origin.url", stdoutTrimmed) + archiveGitRemote(p, u, co, origUrl, cb) + }) + }) +} + +function cloneGitRemote (p, u, co, origUrl, silent, cb) { + mkdir(p, function (er) { + if (er) return cb(er) + + var args = [ "clone", "--template=" + path.join(npm.config.get("cache"), + "_git_remotes", "_templates"), "--mirror", u, p ] + var env = gitEnv() + + // check for git + git.whichAndExec(args, {cwd: p, env: env}, function (er, stdout, stderr) { + stdout = (stdout + "\n" + stderr).trim() + if (er) { + if (silent) { + log.verbose("git clone " + u, stdout) + } else { + log.error("git clone " + u, stdout) + } + return cb(er) + } + log.verbose("git clone " + u, stdout) + archiveGitRemote(p, u, co, origUrl, cb) + }) + }) +} + +function archiveGitRemote (p, u, co, origUrl, cb) { + var archive = [ "fetch", "-a", "origin" ] + var resolve = [ "rev-list", "-n1", co ] + var env = gitEnv() + + var resolved = null + var tmp + + git.whichAndExec(archive, {cwd: p, env: env}, function (er, stdout, stderr) { + stdout = (stdout + "\n" + stderr).trim() + if (er) { + log.error("git fetch -a origin ("+u+")", stdout) + return cb(er) + } + log.verbose("git fetch -a origin ("+u+")", stdout) + tmp = path.join(npm.tmp, Date.now()+"-"+Math.random(), "tmp.tgz") + + if (process.platform === "win32") { + log.silly("verifyOwnership", "skipping for windows") + resolveHead() + } else { + getCacheStat(function(er, cs) { + if (er) { + log.error("Could not get cache stat") + return cb(er) + } + chownr(p, cs.uid, cs.gid, function(er) { + if (er) { + log.error("Failed to change folder ownership under npm cache for %s", p) + return cb(er) + } + resolveHead() + }) + }) + } + }) + + function resolveHead () { + git.whichAndExec(resolve, {cwd: p, env: env}, function (er, stdout, stderr) { + stdout = (stdout + "\n" + stderr).trim() + if (er) { + log.error("Failed resolving git HEAD (" + u + ")", stderr) + return cb(er) + } + log.verbose("git rev-list -n1 " + co, stdout) + var parsed = url.parse(origUrl) + parsed.hash = stdout + resolved = url.format(parsed) + + if (parsed.protocol !== "git:") { + resolved = "git+" + resolved + } + + // https://github.com/npm/npm/issues/3224 + // node incorrectly sticks a / at the start of the path + // We know that the host won't change, so split and detect this + var spo = origUrl.split(parsed.host) + var spr = resolved.split(parsed.host) + if (spo[1].charAt(0) === ":" && spr[1].charAt(0) === "/") + spr[1] = spr[1].slice(1) + resolved = spr.join(parsed.host) + + log.verbose("resolved git url", resolved) + next() + }) + } + + function next () { + mkdir(path.dirname(tmp), function (er) { + if (er) return cb(er) + var gzip = zlib.createGzip({ level: 9 }) + var args = ["archive", co, "--format=tar", "--prefix=package/"] + var out = writeStream(tmp) + var env = gitEnv() + cb = once(cb) + var cp = git.spawn(args, { env: env, cwd: p }) + cp.on("error", cb) + cp.stderr.on("data", function(chunk) { + log.silly(chunk.toString(), "git archive") + }) + + cp.stdout.pipe(gzip).pipe(out).on("close", function() { + addLocalTarball(tmp, null, null, function(er, data) { + if (data) data._resolved = resolved + cb(er, data) + }) + }) + }) + } +} + +var gitEnv_ +function gitEnv () { + // git responds to env vars in some weird ways in post-receive hooks + // so don't carry those along. + if (gitEnv_) return gitEnv_ + gitEnv_ = {} + for (var k in process.env) { + if (!~["GIT_PROXY_COMMAND","GIT_SSH","GIT_SSL_NO_VERIFY","GIT_SSL_CAINFO"].indexOf(k) && k.match(/^GIT/)) continue + gitEnv_[k] = process.env[k] + } + return gitEnv_ +} + +// similar to chmodr except it add permissions rather than overwriting them +// adapted from https://github.com/isaacs/chmodr/blob/master/chmodr.js +function addModeRecursive(p, mode, cb) { + fs.readdir(p, function (er, children) { + // Any error other than ENOTDIR means it's not readable, or doesn't exist. + // Give up. + if (er && er.code !== "ENOTDIR") return cb(er) + if (er || !children.length) return addMode(p, mode, cb) + + var len = children.length + var errState = null + children.forEach(function (child) { + addModeRecursive(path.resolve(p, child), mode, then) + }) + + function then (er) { + if (errState) return undefined + if (er) return cb(errState = er) + if (--len === 0) return addMode(p, dirMode(mode), cb) + } + }) +} + +function addMode(p, mode, cb) { + fs.stat(p, function (er, stats) { + if (er) return cb(er) + mode = stats.mode | mode + fs.chmod(p, mode, cb) + }) +} + +// taken from https://github.com/isaacs/chmodr/blob/master/chmodr.js +function dirMode(mode) { + if (mode & parseInt("0400", 8)) mode |= parseInt("0100", 8) + if (mode & parseInt( "040", 8)) mode |= parseInt( "010", 8) + if (mode & parseInt( "04", 8)) mode |= parseInt( "01", 8) + return mode +} diff -Nru nodejs-0.11.13/deps/npm/lib/cache/add-remote-tarball.js nodejs-0.11.15/deps/npm/lib/cache/add-remote-tarball.js --- nodejs-0.11.13/deps/npm/lib/cache/add-remote-tarball.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/cache/add-remote-tarball.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,111 @@ +var mkdir = require("mkdirp") + , assert = require("assert") + , log = require("npmlog") + , path = require("path") + , sha = require("sha") + , retry = require("retry") + , createWriteStream = require("fs-write-stream-atomic") + , npm = require("../npm.js") + , registry = npm.registry + , inflight = require("inflight") + , addLocalTarball = require("./add-local-tarball.js") + , cacheFile = require("npm-cache-filename") + +module.exports = addRemoteTarball + +function addRemoteTarball (u, pkgData, shasum, cb_) { + assert(typeof u === "string", "must have module URL") + assert(typeof cb_ === "function", "must have callback") + + function cb (er, data) { + if (data) { + data._from = u + data._shasum = data._shasum || shasum + data._resolved = u + } + cb_(er, data) + } + + cb_ = inflight(u, cb_) + if (!cb_) return log.verbose("addRemoteTarball", u, "already in flight; waiting") + log.verbose("addRemoteTarball", u, "not in flight; adding") + + // XXX Fetch direct to cache location, store tarballs under + // ${cache}/registry.npmjs.org/pkg/-/pkg-1.2.3.tgz + var tmp = cacheFile(npm.tmp, u) + + function next (er, resp, shasum) { + if (er) return cb(er) + addLocalTarball(tmp, pkgData, shasum, cb) + } + + log.verbose("addRemoteTarball", [u, shasum]) + mkdir(path.dirname(tmp), function (er) { + if (er) return cb(er) + addRemoteTarball_(u, tmp, shasum, next) + }) +} + +function addRemoteTarball_(u, tmp, shasum, cb) { + // Tuned to spread 3 attempts over about a minute. + // See formula at . + var operation = retry.operation({ + retries: npm.config.get("fetch-retries") + , factor: npm.config.get("fetch-retry-factor") + , minTimeout: npm.config.get("fetch-retry-mintimeout") + , maxTimeout: npm.config.get("fetch-retry-maxtimeout") + }) + + operation.attempt(function (currentAttempt) { + log.info("retry", "fetch attempt " + currentAttempt + + " at " + (new Date()).toLocaleTimeString()) + fetchAndShaCheck(u, tmp, shasum, function (er, response, shasum) { + // Only retry on 408, 5xx or no `response`. + var sc = response && response.statusCode + var statusRetry = !sc || (sc === 408 || sc >= 500) + if (er && statusRetry && operation.retry(er)) { + log.info("retry", "will retry, error on last attempt: " + er) + return + } + cb(er, response, shasum) + }) + }) +} + +function fetchAndShaCheck (u, tmp, shasum, cb) { + registry.fetch(u, null, function (er, response) { + if (er) { + log.error("fetch failed", u) + return cb(er, response) + } + + var tarball = createWriteStream(tmp, { mode : npm.modes.file }) + tarball.on("error", function (er) { + cb(er) + tarball.destroy() + }) + + tarball.on("finish", function () { + if (!shasum) { + // Well, we weren't given a shasum, so at least sha what we have + // in case we want to compare it to something else later + return sha.get(tmp, function (er, shasum) { + log.silly("fetchAndShaCheck", "shasum", shasum) + cb(er, response, shasum) + }) + } + + // validate that the url we just downloaded matches the expected shasum. + log.silly("fetchAndShaCheck", "shasum", shasum) + sha.check(tmp, shasum, function (er) { + if (er && er.message) { + // add original filename for better debuggability + er.message = er.message + "\n" + "From: " + u + } + return cb(er, response, shasum) + }) + }) + + response.pipe(tarball) + }) +} diff -Nru nodejs-0.11.13/deps/npm/lib/cache/cached-package-root.js nodejs-0.11.15/deps/npm/lib/cache/cached-package-root.js --- nodejs-0.11.13/deps/npm/lib/cache/cached-package-root.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/cache/cached-package-root.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,14 @@ +var assert = require("assert") +var resolve = require("path").resolve + +var npm = require("../npm.js") + +module.exports = getCacheRoot + +function getCacheRoot (data) { + assert(data, "must pass package metadata") + assert(data.name, "package metadata must include name") + assert(data.version, "package metadata must include version") + + return resolve(npm.cache, data.name, data.version) +} diff -Nru nodejs-0.11.13/deps/npm/lib/cache/get-stat.js nodejs-0.11.15/deps/npm/lib/cache/get-stat.js --- nodejs-0.11.13/deps/npm/lib/cache/get-stat.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/cache/get-stat.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,66 @@ +var mkdir = require("mkdirp") + , fs = require("graceful-fs") + , log = require("npmlog") + , chownr = require("chownr") + , npm = require("../npm.js") + , inflight = require("inflight") + +// to maintain the cache dir's permissions consistently. +var cacheStat = null +module.exports = function getCacheStat (cb) { + if (cacheStat) return cb(null, cacheStat) + + fs.stat(npm.cache, function (er, st) { + if (er) return makeCacheDir(cb) + if (!st.isDirectory()) { + log.error("getCacheStat", "invalid cache dir %j", npm.cache) + return cb(er) + } + return cb(null, cacheStat = st) + }) +} + +function makeCacheDir (cb) { + cb = inflight("makeCacheDir", cb) + if (!cb) return log.verbose("getCacheStat", "cache creation already in flight; waiting") + log.verbose("getCacheStat", "cache creation not in flight; initializing") + + if (!process.getuid) return mkdir(npm.cache, function (er) { + return cb(er, {}) + }) + + var uid = +process.getuid() + , gid = +process.getgid() + + if (uid === 0) { + if (process.env.SUDO_UID) uid = +process.env.SUDO_UID + if (process.env.SUDO_GID) gid = +process.env.SUDO_GID + } + if (uid !== 0 || !process.env.HOME) { + cacheStat = {uid: uid, gid: gid} + return mkdir(npm.cache, afterMkdir) + } + + fs.stat(process.env.HOME, function (er, st) { + if (er) { + log.error("makeCacheDir", "homeless?") + return cb(er) + } + cacheStat = st + log.silly("makeCacheDir", "cache dir uid, gid", [st.uid, st.gid]) + return mkdir(npm.cache, afterMkdir) + }) + + function afterMkdir (er, made) { + if (er || !cacheStat || isNaN(cacheStat.uid) || isNaN(cacheStat.gid)) { + return cb(er, cacheStat) + } + + if (!made) return cb(er, cacheStat) + + // ensure that the ownership is correct. + chownr(made, cacheStat.uid, cacheStat.gid, function (er) { + return cb(er, cacheStat) + }) + } +} diff -Nru nodejs-0.11.13/deps/npm/lib/cache/maybe-github.js nodejs-0.11.15/deps/npm/lib/cache/maybe-github.js --- nodejs-0.11.13/deps/npm/lib/cache/maybe-github.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/cache/maybe-github.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,32 @@ +var assert = require("assert") + , log = require("npmlog") + , addRemoteGit = require("./add-remote-git.js") + +module.exports = function maybeGithub (p, cb) { + assert(typeof p === "string", "must pass package name") + assert(typeof cb === "function", "must pass callback") + + var u = "git://github.com/" + p + log.info("maybeGithub", "Attempting %s from %s", p, u) + + return addRemoteGit(u, true, function (er, data) { + if (er) { + var upriv = "git+ssh://git@github.com:" + p + log.info("maybeGithub", "Attempting %s from %s", p, upriv) + + return addRemoteGit(upriv, false, function (er, data) { + if (er) return cb(er) + + success(upriv, data) + }) + } + + success(u, data) + }) + + function success (u, data) { + data._from = u + data._fromGithub = true + return cb(null, data) + } +} diff -Nru nodejs-0.11.13/deps/npm/lib/cache.js nodejs-0.11.15/deps/npm/lib/cache.js --- nodejs-0.11.13/deps/npm/lib/cache.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/cache.js 2015-01-20 21:22:17.000000000 +0000 @@ -7,15 +7,14 @@ // /* -fetching a url: -1. Check for url in inFlightUrls. If present, add cb, and return. -2. create inFlightURL list -3. Acquire lock at {cache}/{sha(url)}.lock +fetching a URL: +1. Check for URL in inflight URLs. If present, add cb, and return. +2. Acquire lock at {cache}/{sha(url)}.lock retries = {cache-lock-retries, def=3} stale = {cache-lock-stale, def=30000} wait = {cache-lock-wait, def=100} -4. if lock can't be acquired, then fail -5. fetch url, clear lock, call cbs +3. if lock can't be acquired, then fail +4. fetch url, clear lock, call cbs cache folders: 1. urls: http!/server.com/path/to/thing @@ -48,43 +47,41 @@ adding a local tarball: 1. untar to tmp/random/{blah} 2. goto folder(2) + +adding a namespaced package: +1. lookup registry for @namespace +2. namespace_registry.get('name') +3. add url(namespace/latest.tarball) */ exports = module.exports = cache -cache.read = read -cache.clean = clean + cache.unpack = unpack -cache.lock = lock -cache.unlock = unlock +cache.clean = clean +cache.read = read -var mkdir = require("mkdirp") - , spawn = require("child_process").spawn - , exec = require("child_process").execFile - , once = require("once") - , fetch = require("./utils/fetch.js") - , npm = require("./npm.js") +var npm = require("./npm.js") , fs = require("graceful-fs") + , writeFileAtomic = require("write-file-atomic") + , assert = require("assert") , rm = require("./utils/gently-rm.js") , readJson = require("read-package-json") - , registry = npm.registry , log = require("npmlog") , path = require("path") - , sha = require("sha") , asyncMap = require("slide").asyncMap - , semver = require("semver") , tar = require("./utils/tar.js") , fileCompletion = require("./utils/completion/file-completion.js") - , url = require("url") - , chownr = require("chownr") - , lockFile = require("lockfile") - , crypto = require("crypto") - , retry = require("retry") - , zlib = require("zlib") - , chmodr = require("chmodr") - , which = require("which") - , isGitUrl = require("./utils/is-git-url.js") - , pathIsInside = require("path-is-inside") - , http = require("http") + , deprCheck = require("./utils/depr-check.js") + , addNamed = require("./cache/add-named.js") + , addLocal = require("./cache/add-local.js") + , addRemoteTarball = require("./cache/add-remote-tarball.js") + , addRemoteGit = require("./cache/add-remote-git.js") + , maybeGithub = require("./cache/maybe-github.js") + , inflight = require("inflight") + , realizePackageSpecifier = require("realize-package-specifier") + , npa = require("npm-package-arg") + , getStat = require("./cache/get-stat.js") + , cachedPackageRoot = require("./cache/cached-package-root.js") cache.usage = "npm cache add " + "\nnpm cache add " @@ -121,9 +118,8 @@ switch (cmd) { case "rm": case "clear": case "clean": return clean(args, cb) case "list": case "sl": case "ls": return ls(args, cb) - case "add": return add(args, cb) - default: return cb(new Error( - "Invalid cache action: "+cmd)) + case "add": return add(args, npm.prefix, cb) + default: return cb("Usage: "+cache.usage) } } @@ -131,37 +127,63 @@ // just do a readJson and return. // if they're not, then fetch them from the registry. function read (name, ver, forceBypass, cb) { - if (typeof cb !== "function") cb = forceBypass, forceBypass = true - var jsonFile = path.join(npm.cache, name, ver, "package", "package.json") + assert(typeof name === "string", "must include name of module to install") + assert(typeof cb === "function", "must include callback") + + if (forceBypass === undefined || forceBypass === null) forceBypass = true + + var root = cachedPackageRoot({name : name, version : ver}) function c (er, data) { + log.silly("cache", "addNamed cb", name+"@"+ver) + if (er) log.verbose("cache", "addNamed error for", name+"@"+ver, er) + if (data) deprCheck(data) + return cb(er, data) } if (forceBypass && npm.config.get("force")) { log.verbose("using force", "skipping cache") - return addNamed(name, ver, c) + return addNamed(name, ver, null, c) } - readJson(jsonFile, function (er, data) { - er = needName(er, data) - er = needVersion(er, data) + readJson(path.join(root, "package", "package.json"), function (er, data) { if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) - if (er) return addNamed(name, ver, c) - deprCheck(data) - c(er, data) + + if (data) { + if (!data.name) return cb(new Error("No name provided")) + if (!data.version) return cb(new Error("No version provided")) + } + + if (er) return addNamed(name, ver, null, c) + else c(er, data) }) } +function normalize (args) { + var normalized = "" + if (args.length > 0) { + var a = npa(args[0]) + if (a.name) normalized = a.name + if (a.rawSpec) normalized = [normalized, a.rawSpec].join("/") + if (args.length > 1) normalized = [normalized].concat(args.slice(1)).join("/") + } + + if (normalized.substr(-1) === "/") { + normalized = normalized.substr(0, normalized.length - 1) + } + log.silly("ls", "normalized", normalized) + + return normalized +} + // npm cache ls [] function ls (args, cb) { - args = args.join("/").split("@").join("/") - if (args.substr(-1) === "/") args = args.substr(0, args.length - 1) var prefix = npm.config.get("cache") - if (0 === prefix.indexOf(process.env.HOME)) { + if (prefix.indexOf(process.env.HOME) === 0) { prefix = "~" + prefix.substr(process.env.HOME.length) } - ls_(args, npm.config.get("depth"), function (er, files) { + ls_(normalize(args), npm.config.get("depth"), function (er, files) { console.log(files.map(function (f) { return path.join(prefix, f) }).join("\n").trim()) @@ -176,11 +198,11 @@ // npm cache clean [] function clean (args, cb) { - if (!cb) cb = args, args = [] + assert(typeof cb === "function", "must include callback") + if (!args) args = [] - args = args.join("/").split("@").join("/") - if (args.substr(-1) === "/") args = args.substr(0, args.length - 1) - var f = path.join(npm.cache, path.normalize(args)) + + var f = path.join(npm.cache, path.normalize(normalize(args))) if (f === npm.cache) { fs.readdir(npm.cache, function (er, files) { if (er) return cb() @@ -191,27 +213,29 @@ }) , rm, cb ) }) - } else rm(path.join(npm.cache, path.normalize(args)), cb) + } else rm(path.join(npm.cache, path.normalize(normalize(args))), cb) } // npm cache add // npm cache add // npm cache add // npm cache add -cache.add = function (pkg, ver, scrub, cb) { - if (typeof cb !== "function") cb = scrub, scrub = false - if (typeof cb !== "function") cb = ver, ver = null +cache.add = function (pkg, ver, where, scrub, cb) { + assert(typeof pkg === "string", "must include name of package to install") + assert(typeof cb === "function", "must include callback") + if (scrub) { return clean([], function (er) { if (er) return cb(er) - add([pkg, ver], cb) + add([pkg, ver], where, cb) }) } - log.verbose("cache add", [pkg, ver]) - return add([pkg, ver], cb) + return add([pkg, ver], where, cb) } -function add (args, cb) { + +var adding = 0 +function add (args, where, cb) { // this is hot code. almost everything passes through here. // the args can be any of: // ["url"] @@ -227,1155 +251,70 @@ + " npm cache add @\n" + " npm cache add \n" + " npm cache add \n" - , name , spec + log.silly("cache add", "args", args) + if (args[1] === undefined) args[1] = null // at this point the args length must ==2 if (args[1] !== null) { - name = args[0] - spec = args[1] + spec = args[0]+"@"+args[1] } else if (args.length === 2) { spec = args[0] } - log.verbose("cache add", "name=%j spec=%j args=%j", name, spec, args) - - if (!name && !spec) return cb(usage) - - cb = afterAdd([name, spec], cb) - - // see if the spec is a url - // otherwise, treat as name@version - var p = url.parse(spec) || {} - log.verbose("parsed url", p) - - // If there's a /, and it's a path, then install the path. - // If not, and there's a @, it could be that we got name@http://blah - // in that case, we will not have a protocol now, but if we - // split and check, we will. - if (!name && !p.protocol) { - if (spec.indexOf("/") !== -1 || - process.platform === "win32" && spec.indexOf("\\") !== -1) { - return maybeFile(spec, p, cb) - } else if (spec.indexOf("@") !== -1) { - return maybeAt(spec, cb) - } - } - - add_(name, spec, p, cb) -} - -function afterAdd (arg, cb) { return function (er, data) { - if (er || !data || !data.name || !data.version) { - return cb(er, data) - } - - // Save the resolved, shasum, etc. into the data so that the next - // time we load from this cached data, we have all the same info. - var name = data.name - var ver = data.version - var pj = path.join(npm.cache, name, ver, "package", "package.json") - fs.writeFile(pj, JSON.stringify(data), "utf8", function (er) { - cb(er, data) - }) -}} - - - -function maybeFile (spec, p, cb) { - fs.stat(spec, function (er, stat) { - if (!er) { - // definitely a local thing - addLocal(spec, cb) - } else if (er && spec.indexOf("@") !== -1) { - // bar@baz/loofa - maybeAt(spec, cb) - } else { - // Already know it's not a url, so must be local - addLocal(spec, cb) - } - }) -} - -function maybeAt (spec, cb) { - var tmp = spec.split("@") - - // split name@2.3.4 only if name is a valid package name, - // don't split in case of "./test@example.com/" (local path) - var name = tmp.shift() - spec = tmp.join("@") - return add([name, spec], cb) -} - -function add_ (name, spec, p, cb) { - switch (p.protocol) { - case "http:": - case "https:": - return addRemoteTarball(spec, null, name, cb) - - default: - if (isGitUrl(p)) - return addRemoteGit(spec, p, name, false, cb) - - // if we have a name and a spec, then try name@spec - // if not, then try just spec (which may try name@"" if not found) - if (name) { - addNamed(name, spec, cb) - } else { - addLocal(spec, cb) - } - } -} - -function fetchAndShaCheck (u, tmp, shasum, cb) { - fetch(u, tmp, function (er, response) { - if (er) { - log.error("fetch failed", u) - return cb(er, response) - } - - if (!shasum) { - // Well, we weren't given a shasum, so at least sha what we have - // in case we want to compare it to something else later - return sha.get(tmp, function (er, shasum) { - cb(er, response, shasum) - }) - } - - // validate that the url we just downloaded matches the expected shasum. - sha.check(tmp, shasum, function (er) { - if (er != null && er.message) { - // add original filename for better debuggability - er.message = er.message + '\n' + 'From: ' + u - } - return cb(er, response, shasum) - }) - }) -} - -// Only have a single download action at once for a given url -// additional calls stack the callbacks. -var inFlightURLs = {} -function addRemoteTarball (u, shasum, name, version, cb_) { - if (typeof cb_ !== "function") cb_ = version, version = "" - if (typeof cb_ !== "function") cb_ = name, name = "" - if (typeof cb_ !== "function") cb_ = shasum, shasum = null - - if (!inFlightURLs[u]) inFlightURLs[u] = [] - var iF = inFlightURLs[u] - iF.push(cb_) - if (iF.length > 1) return - - function cb (er, data) { - if (data) { - data._from = u - data._shasum = data._shasum || shasum - data._resolved = u - } - unlock(u, function () { - var c - while (c = iF.shift()) c(er, data) - delete inFlightURLs[u] - }) - } - - var tmp = path.join(npm.tmp, Date.now()+"-"+Math.random(), "tmp.tgz") - - lock(u, function (er) { - if (er) return cb(er) - - log.verbose("addRemoteTarball", [u, shasum]) - mkdir(path.dirname(tmp), function (er) { - if (er) return cb(er) - addRemoteTarball_(u, tmp, shasum, done) - }) - }) - - function done (er, resp, shasum) { - if (er) return cb(er) - addLocalTarball(tmp, name, version, shasum, cb) - } -} - -function addRemoteTarball_(u, tmp, shasum, cb) { - // Tuned to spread 3 attempts over about a minute. - // See formula at . - var operation = retry.operation - ( { retries: npm.config.get("fetch-retries") - , factor: npm.config.get("fetch-retry-factor") - , minTimeout: npm.config.get("fetch-retry-mintimeout") - , maxTimeout: npm.config.get("fetch-retry-maxtimeout") }) - - operation.attempt(function (currentAttempt) { - log.info("retry", "fetch attempt " + currentAttempt - + " at " + (new Date()).toLocaleTimeString()) - fetchAndShaCheck(u, tmp, shasum, function (er, response, shasum) { - // Only retry on 408, 5xx or no `response`. - var sc = response && response.statusCode - var statusRetry = !sc || (sc === 408 || sc >= 500) - if (er && statusRetry && operation.retry(er)) { - log.info("retry", "will retry, error on last attempt: " + er) - return - } - cb(er, response, shasum) - }) - }) -} - -// 1. cacheDir = path.join(cache,'_git-remotes',sha1(u)) -// 2. checkGitDir(cacheDir) ? 4. : 3. (rm cacheDir if necessary) -// 3. git clone --mirror u cacheDir -// 4. cd cacheDir && git fetch -a origin -// 5. git archive /tmp/random.tgz -// 6. addLocalTarball(/tmp/random.tgz) --format=tar --prefix=package/ -// silent flag is used if this should error quietly -function addRemoteGit (u, parsed, name, silent, cb_) { - if (typeof cb_ !== "function") cb_ = name, name = null - - if (!inFlightURLs[u]) inFlightURLs[u] = [] - var iF = inFlightURLs[u] - iF.push(cb_) - if (iF.length > 1) return - - // git is so tricky! - // if the path is like ssh://foo:22/some/path then it works, but - // it needs the ssh:// - // If the path is like ssh://foo:some/path then it works, but - // only if you remove the ssh:// - var origUrl = u - u = u.replace(/^git\+/, "") - .replace(/#.*$/, "") - - // ssh paths that are scp-style urls don't need the ssh:// - if (parsed.pathname.match(/^\/?:/)) { - u = u.replace(/^ssh:\/\//, "") - } - - function cb (er, data) { - unlock(u, function () { - var c - while (c = iF.shift()) c(er, data) - delete inFlightURLs[origUrl] - }) - } - - lock(u, function (er) { - if (er) return cb(er) - - // figure out what we should check out. - var co = parsed.hash && parsed.hash.substr(1) || "master" - - var v = crypto.createHash("sha1").update(u).digest("hex").slice(0, 8) - v = u.replace(/[^a-zA-Z0-9]+/g, '-') + '-' + v - - log.verbose("addRemoteGit", [u, co]) - - var p = path.join(npm.config.get("cache"), "_git-remotes", v) - - checkGitDir(p, u, co, origUrl, silent, function(er, data) { - chmodr(p, npm.modes.file, function(erChmod) { - if (er) return cb(er, data) - return cb(erChmod, data) - }) - }) - }) -} - -function checkGitDir (p, u, co, origUrl, silent, cb) { - fs.stat(p, function (er, s) { - if (er) return cloneGitRemote(p, u, co, origUrl, silent, cb) - if (!s.isDirectory()) return rm(p, function (er){ - if (er) return cb(er) - cloneGitRemote(p, u, co, origUrl, silent, cb) - }) - - var git = npm.config.get("git") - var args = [ "config", "--get", "remote.origin.url" ] - var env = gitEnv() - - // check for git - which(git, function (err) { - if (err) { - err.code = "ENOGIT" - return cb(err) - } - exec(git, args, {cwd: p, env: env}, function (er, stdout, stderr) { - stdoutTrimmed = (stdout + "\n" + stderr).trim() - if (er || u !== stdout.trim()) { - log.warn( "`git config --get remote.origin.url` returned " - + "wrong result ("+u+")", stdoutTrimmed ) - return rm(p, function (er){ - if (er) return cb(er) - cloneGitRemote(p, u, co, origUrl, silent, cb) - }) - } - log.verbose("git remote.origin.url", stdoutTrimmed) - archiveGitRemote(p, u, co, origUrl, cb) - }) - }) - }) -} - -function cloneGitRemote (p, u, co, origUrl, silent, cb) { - mkdir(p, function (er) { - if (er) return cb(er) - - var git = npm.config.get("git") - var args = [ "clone", "--mirror", u, p ] - var env = gitEnv() - - // check for git - which(git, function (err) { - if (err) { - err.code = "ENOGIT" - return cb(err) - } - exec(git, args, {cwd: p, env: env}, function (er, stdout, stderr) { - stdout = (stdout + "\n" + stderr).trim() - if (er) { - if (silent) { - log.verbose("git clone " + u, stdout) - } else { - log.error("git clone " + u, stdout) - } - return cb(er) - } - log.verbose("git clone " + u, stdout) - archiveGitRemote(p, u, co, origUrl, cb) - }) - }) - }) -} - -function archiveGitRemote (p, u, co, origUrl, cb) { - var git = npm.config.get("git") - var archive = [ "fetch", "-a", "origin" ] - var resolve = [ "rev-list", "-n1", co ] - var env = gitEnv() - - var errState = null - var n = 0 - var resolved = null - var tmp - - exec(git, archive, {cwd: p, env: env}, function (er, stdout, stderr) { - stdout = (stdout + "\n" + stderr).trim() - if (er) { - log.error("git fetch -a origin ("+u+")", stdout) - return cb(er) - } - log.verbose("git fetch -a origin ("+u+")", stdout) - tmp = path.join(npm.tmp, Date.now()+"-"+Math.random(), "tmp.tgz") - verifyOwnership() - }) - - function verifyOwnership() { - if (process.platform === "win32") { - log.silly("verifyOwnership", "skipping for windows") - resolveHead() - } else { - getCacheStat(function(er, cs) { - if (er) { - log.error("Could not get cache stat") - return cb(er) - } - chownr(p, cs.uid, cs.gid, function(er) { - if (er) { - log.error("Failed to change folder ownership under npm cache for %s", p) - return cb(er) - } - resolveHead() - }) - }) - } - } - - function resolveHead () { - exec(git, resolve, {cwd: p, env: env}, function (er, stdout, stderr) { - stdout = (stdout + "\n" + stderr).trim() - if (er) { - log.error("Failed resolving git HEAD (" + u + ")", stderr) - return cb(er) - } - log.verbose("git rev-list -n1 " + co, stdout) - var parsed = url.parse(origUrl) - parsed.hash = stdout - resolved = url.format(parsed) - - // https://github.com/npm/npm/issues/3224 - // node incorrectly sticks a / at the start of the path - // We know that the host won't change, so split and detect this - var spo = origUrl.split(parsed.host) - var spr = resolved.split(parsed.host) - if (spo[1].charAt(0) === ':' && spr[1].charAt(0) === '/') - spr[1] = spr[1].slice(1) - resolved = spr.join(parsed.host) - - log.verbose('resolved git url', resolved) - next() - }) - } - - function next () { - mkdir(path.dirname(tmp), function (er) { - if (er) return cb(er) - var gzip = zlib.createGzip({ level: 9 }) - var git = npm.config.get("git") - var args = ["archive", co, "--format=tar", "--prefix=package/"] - var out = fs.createWriteStream(tmp) - var env = gitEnv() - cb = once(cb) - var cp = spawn(git, args, { env: env, cwd: p }) - cp.on("error", cb) - cp.stderr.on("data", function(chunk) { - log.silly(chunk.toString(), "git archive") - }) - - cp.stdout.pipe(gzip).pipe(out).on("close", function() { - addLocalTarball(tmp, function(er, data) { - if (data) data._resolved = resolved - cb(er, data) - }) - }) - }) - } -} - -var gitEnv_ -function gitEnv () { - // git responds to env vars in some weird ways in post-receive hooks - // so don't carry those along. - if (gitEnv_) return gitEnv_ - gitEnv_ = {} - for (var k in process.env) { - if (!~['GIT_PROXY_COMMAND','GIT_SSH','GIT_SSL_NO_VERIFY'].indexOf(k) && k.match(/^GIT/)) continue - gitEnv_[k] = process.env[k] - } - return gitEnv_ -} - - -// only have one request in flight for a given -// name@blah thing. -var inFlightNames = {} -function addNamed (name, x, data, cb_) { - if (typeof cb_ !== "function") cb_ = data, data = null - log.verbose("addNamed", [name, x]) - - var k = name + "@" + x - if (!inFlightNames[k]) inFlightNames[k] = [] - var iF = inFlightNames[k] - iF.push(cb_) - if (iF.length > 1) return - - function cb (er, data) { - if (data && !data._fromGithub) data._from = k - unlock(k, function () { - var c - while (c = iF.shift()) c(er, data) - delete inFlightNames[k] - }) - } - - log.verbose("addNamed", [semver.valid(x), semver.validRange(x)]) - lock(k, function (er, fd) { - if (er) return cb(er) - - var fn = ( semver.valid(x, true) ? addNameVersion - : semver.validRange(x, true) ? addNameRange - : addNameTag - ) - fn(name, x, data, cb) - }) -} - -function addNameTag (name, tag, data, cb_) { - if (typeof cb_ !== "function") cb_ = data, data = null - log.info("addNameTag", [name, tag]) - var explicit = true - if (!tag) { - explicit = false - tag = npm.config.get("tag") - } - - function cb(er, data) { - // might be username/project - // in that case, try it as a github url. - if (er && tag.split("/").length === 2) { - return maybeGithub(tag, name, er, cb_) - } - return cb_(er, data) - } - - registry.get(name, function (er, data, json, response) { - if (!er) { - er = errorResponse(name, response) - } - if (er) return cb(er) - engineFilter(data) - if (data["dist-tags"] && data["dist-tags"][tag] - && data.versions[data["dist-tags"][tag]]) { - var ver = data["dist-tags"][tag] - return addNamed(name, ver, data.versions[ver], cb) - } - if (!explicit && Object.keys(data.versions).length) { - return addNamed(name, "*", data, cb) - } - - er = installTargetsError(tag, data) - return cb(er) - }) -} - - -function engineFilter (data) { - var npmv = npm.version - , nodev = npm.config.get("node-version") - , strict = npm.config.get("engine-strict") - - if (!nodev || npm.config.get("force")) return data - - Object.keys(data.versions || {}).forEach(function (v) { - var eng = data.versions[v].engines - if (!eng) return - if (!strict && !data.versions[v].engineStrict) return - if (eng.node && !semver.satisfies(nodev, eng.node, true) - || eng.npm && !semver.satisfies(npmv, eng.npm, true)) { - delete data.versions[v] - } - }) -} - -function errorResponse (name, response) { - if (response.statusCode >= 400) { - var er = new Error(http.STATUS_CODES[response.statusCode]) - er.statusCode = response.statusCode - er.code = "E" + er.statusCode - er.pkgid = name - } - return er -} - -function addNameRange (name, range, data, cb) { - if (typeof cb !== "function") cb = data, data = null - - range = semver.validRange(range, true) - if (range === null) return cb(new Error( - "Invalid version range: "+range)) - - log.silly("addNameRange", {name:name, range:range, hasData:!!data}) - - if (data) return next() - registry.get(name, function (er, d, json, response) { - if (!er) { - er = errorResponse(name, response) - } - if (er) return cb(er) - data = d - next() - }) - - function next () { - log.silly( "addNameRange", "number 2" - , {name:name, range:range, hasData:!!data}) - engineFilter(data) - - log.silly("addNameRange", "versions" - , [data.name, Object.keys(data.versions || {})]) - - // if the tagged version satisfies, then use that. - var tagged = data["dist-tags"][npm.config.get("tag")] - if (tagged - && data.versions[tagged] - && semver.satisfies(tagged, range, true)) { - return addNamed(name, tagged, data.versions[tagged], cb) - } - - // find the max satisfying version. - var versions = Object.keys(data.versions || {}) - var ms = semver.maxSatisfying(versions, range, true) - if (!ms) { - return cb(installTargetsError(range, data)) - } - - // if we don't have a registry connection, try to see if - // there's a cached copy that will be ok. - addNamed(name, ms, data.versions[ms], cb) - } -} - -function installTargetsError (requested, data) { - var targets = Object.keys(data["dist-tags"]).filter(function (f) { - return (data.versions || {}).hasOwnProperty(f) - }).concat(Object.keys(data.versions || {})) - - requested = data.name + (requested ? "@'" + requested + "'" : "") - - targets = targets.length - ? "Valid install targets:\n" + JSON.stringify(targets) + "\n" - : "No valid targets found.\n" - + "Perhaps not compatible with your version of node?" - - var er = new Error( "No compatible version found: " - + requested + "\n" + targets) - er.code = "ETARGET" - return er -} - -function addNameVersion (name, v, data, cb) { - if (typeof cb !== "function") cb = data, data = null - - var ver = semver.valid(v, true) - if (!ver) return cb(new Error("Invalid version: "+v)) - - var response - - if (data) { - response = null - return next() - } - registry.get(name, function (er, d, json, resp) { - if (!er) { - er = errorResponse(name, resp) - } - if (er) return cb(er) - data = d && d.versions[ver] - if (!data) { - er = new Error('version not found: ' + name + '@' + ver) - er.package = name - er.statusCode = 404 - return cb(er) - } - response = resp - next() - }) - - function next () { - deprCheck(data) - var dist = data.dist - - if (!dist) return cb(new Error("No dist in "+data._id+" package")) - - if (!dist.tarball) return cb(new Error( - "No dist.tarball in " + data._id + " package")) - - if ((response && response.statusCode !== 304) || npm.config.get("force")) { - return fetchit() - } - - // we got cached data, so let's see if we have a tarball. - var pkgroot = path.join(npm.cache, name, ver) - var pkgtgz = path.join(pkgroot, "package.tgz") - var pkgjson = path.join(pkgroot, "package", "package.json") - fs.stat(pkgtgz, function (er, s) { - if (!er) { - readJson(pkgjson, function (er, data) { - er = needName(er, data) - er = needVersion(er, data) - if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") - return cb(er) - if (er) return fetchit() - // check the SHA of the package we have, to ensure it wasn't installed - // from somewhere other than the registry (eg, a fork) - if (data._shasum && dist.shasum && data._shasum !== dist.shasum) - return fetchit() - return cb(null, data) - }) - } else return fetchit() - }) - - function fetchit () { - if (!npm.config.get("registry")) { - return cb(new Error("Cannot fetch: "+dist.tarball)) - } - - // use the same protocol as the registry. - // https registry --> https tarballs, but - // only if they're the same hostname, or else - // detached tarballs may not work. - var tb = url.parse(dist.tarball) - var rp = url.parse(npm.config.get("registry")) - if (tb.hostname === rp.hostname - && tb.protocol !== rp.protocol) { - tb.protocol = url.parse(npm.config.get("registry")).protocol - delete tb.href - } - tb = url.format(tb) - - // only add non-shasum'ed packages if --forced. - // only ancient things would lack this for good reasons nowadays. - if (!dist.shasum && !npm.config.get("force")) { - return cb(new Error("package lacks shasum: " + data._id)) - } - return addRemoteTarball( tb - , dist.shasum - , name - , ver - , cb ) - } - } -} - -function addLocal (p, name, cb_) { - if (typeof cb_ !== "function") cb_ = name, name = "" - - function cb (er, data) { - unlock(p, function () { - if (er) { - // if it doesn't have a / in it, it might be a - // remote thing. - if (p.indexOf("/") === -1 && p.charAt(0) !== "." - && (process.platform !== "win32" || p.indexOf("\\") === -1)) { - return addNamed(p, "", cb_) - } - log.error("addLocal", "Could not install %s", p) - return cb_(er) - } - if (data && !data._fromGithub) data._from = p - return cb_(er, data) - }) - } - - lock(p, function (er) { - if (er) return cb(er) - // figure out if this is a folder or file. - fs.stat(p, function (er, s) { - if (er) { - // might be username/project - // in that case, try it as a github url. - if (p.split("/").length === 2) { - return maybeGithub(p, name, er, cb) - } - return cb(er) - } - if (s.isDirectory()) addLocalDirectory(p, name, cb) - else addLocalTarball(p, name, cb) - }) - }) -} - -function maybeGithub (p, name, er, cb) { - var u = "git://github.com/" + p - , up = url.parse(u) - log.info("maybeGithub", "Attempting %s from %s", p, u) - - return addRemoteGit(u, up, name, true, function (er2, data) { - if (er2) { - var upriv = "git+ssh://git@github.com:" + p - , uppriv = url.parse(upriv) - - log.info("maybeGithub", "Attempting %s from %s", p, upriv) - - return addRemoteGit(upriv, uppriv, false, name, function (er3, data) { - if (er3) return cb(er) - success(upriv, data) - }) - } - success(u, data) - }) - - function success (u, data) { - data._from = u - data._fromGithub = true - return cb(null, data) - } -} - -function addLocalTarball (p, name, version, shasum, cb_) { - if (typeof cb_ !== "function") cb_ = shasum, shasum = null - if (typeof cb_ !== "function") cb_ = version, version = "" - if (typeof cb_ !== "function") cb_ = name, name = "" - - // If we don't have a shasum yet, then get the shasum now. - if (!shasum) { - return sha.get(p, function (er, shasum) { - if (er) return cb_(er) - addLocalTarball(p, name, version, shasum, cb_) - }) - } - - // if it's a tar, and not in place, - // then unzip to .tmp, add the tmp folder, and clean up tmp - if (pathIsInside(p, npm.tmp)) - return addTmpTarball(p, name, version, shasum, cb_) - - if (pathIsInside(p, npm.cache)) { - if (path.basename(p) !== "package.tgz") return cb_(new Error( - "Not a valid cache tarball name: "+p)) - return addPlacedTarball(p, name, shasum, cb_) - } - - function cb (er, data) { - if (data) { - data._resolved = p - data._shasum = data._shasum || shasum - } - return cb_(er, data) - } - - // just copy it over and then add the temp tarball file. - var tmp = path.join(npm.tmp, name + Date.now() - + "-" + Math.random(), "tmp.tgz") - mkdir(path.dirname(tmp), function (er) { - if (er) return cb(er) - var from = fs.createReadStream(p) - , to = fs.createWriteStream(tmp) - , errState = null - function errHandler (er) { - if (errState) return - return cb(errState = er) - } - from.on("error", errHandler) - to.on("error", errHandler) - to.on("close", function () { - if (errState) return - log.verbose("chmod", tmp, npm.modes.file.toString(8)) - fs.chmod(tmp, npm.modes.file, function (er) { - if (er) return cb(er) - addTmpTarball(tmp, name, null, shasum, cb) - }) - }) - from.pipe(to) - }) -} - -// to maintain the cache dir's permissions consistently. -var cacheStat = null -function getCacheStat (cb) { - if (cacheStat) return cb(null, cacheStat) - fs.stat(npm.cache, function (er, st) { - if (er) return makeCacheDir(cb) - if (!st.isDirectory()) { - log.error("getCacheStat", "invalid cache dir %j", npm.cache) - return cb(er) - } - return cb(null, cacheStat = st) - }) -} - -function makeCacheDir (cb) { - if (!process.getuid) return mkdir(npm.cache, cb) + log.verbose("cache add", "spec", spec) - var uid = +process.getuid() - , gid = +process.getgid() + if (!spec) return cb(usage) - if (uid === 0) { - if (process.env.SUDO_UID) uid = +process.env.SUDO_UID - if (process.env.SUDO_GID) gid = +process.env.SUDO_GID + if (adding <= 0) { + npm.spinner.start() } - if (uid !== 0 || !process.env.HOME) { - cacheStat = {uid: uid, gid: gid} - return mkdir(npm.cache, afterMkdir) - } - - fs.stat(process.env.HOME, function (er, st) { - if (er) { - log.error("makeCacheDir", "homeless?") - return cb(er) - } - cacheStat = st - log.silly("makeCacheDir", "cache dir uid, gid", [st.uid, st.gid]) - return mkdir(npm.cache, afterMkdir) - }) + adding++ + cb = afterAdd(cb) + + realizePackageSpecifier(spec, where, function (err, p) { + if (err) return cb(err) + + log.silly("cache add", "parsed spec", p) + + switch (p.type) { + case "local": + case "directory": + addLocal(p, null, cb) + break + case "remote": + addRemoteTarball(p.spec, {name : p.name}, null, cb) + break + case "git": + addRemoteGit(p.spec, false, cb) + break + case "github": + maybeGithub(p.spec, cb) + break + default: + if (p.name) return addNamed(p.name, p.spec, null, cb) - function afterMkdir (er, made) { - if (er || !cacheStat || isNaN(cacheStat.uid) || isNaN(cacheStat.gid)) { - return cb(er, cacheStat) + cb(new Error("couldn't figure out how to install " + spec)) } - - if (!made) return cb(er, cacheStat) - - // ensure that the ownership is correct. - chownr(made, cacheStat.uid, cacheStat.gid, function (er) { - return cb(er, cacheStat) - }) - } -} - - - - -function addPlacedTarball (p, name, shasum, cb) { - if (!cb) cb = name, name = "" - getCacheStat(function (er, cs) { - if (er) return cb(er) - return addPlacedTarball_(p, name, cs.uid, cs.gid, shasum, cb) }) } -// Resolved sum is the shasum from the registry dist object, but -// *not* necessarily the shasum of this tarball, because for stupid -// historical reasons, npm re-packs each package an extra time through -// a temp directory, so all installed packages are actually built with -// *this* version of npm, on this machine. -// -// Once upon a time, this meant that we could change package formats -// around and fix junk that might be added by incompatible tar -// implementations. Then, for a while, it was a way to correct bs -// added by bugs in our own tar implementation. Now, it's just -// garbage, but cleaning it up is a pain, and likely to cause issues -// if anything is overlooked, so it's not high priority. -// -// If you're bored, and looking to make npm go faster, and you've -// already made it this far in this file, here's a better methodology: -// -// cache.add should really be cache.place. That is, it should take -// a set of arguments like it does now, but then also a destination -// folder. -// -// cache.add('foo@bar', '/path/node_modules/foo', cb) -// -// 1. Resolve 'foo@bar' to some specific: -// - git url -// - local folder -// - local tarball -// - tarball url -// 2. If resolved through the registry, then pick up the dist.shasum -// along the way. -// 3. Acquire request() stream fetching bytes: FETCH -// 4. FETCH.pipe(tar unpack stream to dest) -// 5. FETCH.pipe(shasum generator) -// When the tar and shasum streams both finish, make sure that the -// shasum matches dist.shasum, and if not, clean up and bail. -// -// publish(cb) -// -// 1. read package.json -// 2. get root package object (for rev, and versions) -// 3. update root package doc with version info -// 4. remove _attachments object -// 5. remove versions object -// 5. jsonify, remove last } -// 6. get stream: registry.put(/package) -// 7. write trailing-}-less JSON -// 8. write "_attachments": -// 9. JSON.stringify(attachments), remove trailing } -// 10. Write start of attachments (stubs) -// 11. JSON(filename)+':{"type":"application/octet-stream","data":"' -// 12. acquire tar packing stream, PACK -// 13. PACK.pipe(PUT) -// 14. PACK.pipe(shasum generator) -// 15. when PACK finishes, get shasum -// 16. PUT.write('"}},') (finish _attachments -// 17. update "versions" object with current package version -// (including dist.shasum and dist.tarball) -// 18. write '"versions":' + JSON(versions) -// 19. write '}}' (versions, close main doc) - -function addPlacedTarball_ (p, name, uid, gid, resolvedSum, cb) { - // now we know it's in place already as .cache/name/ver/package.tgz - // unpack to .cache/name/ver/package/, read the package.json, - // and fire cb with the json data. - var target = path.dirname(p) - , folder = path.join(target, "package") - - lock(folder, function (er) { - if (er) return cb(er) - rmUnpack() - }) - - function rmUnpack () { - rm(folder, function (er) { - unlock(folder, function () { - if (er) { - log.error("addPlacedTarball", "Could not remove %j", folder) - return cb(er) - } - thenUnpack() - }) - }) - } - - function thenUnpack () { - tar.unpack(p, folder, null, null, uid, gid, function (er) { - if (er) { - log.error("addPlacedTarball", "Could not unpack %j to %j", p, target) - return cb(er) - } - // calculate the sha of the file that we just unpacked. - // this is so that the data is available when publishing. - sha.get(p, function (er, shasum) { - if (er) { - log.error("addPlacedTarball", "shasum fail", p) - return cb(er) - } - readJson(path.join(folder, "package.json"), function (er, data) { - er = needName(er, data) - er = needVersion(er, data) - if (er) { - log.error("addPlacedTarball", "Couldn't read json in %j" - , folder) - return cb(er) - } - - data.dist = data.dist || {} - data.dist.shasum = shasum - deprCheck(data) - asyncMap([p], function (f, cb) { - log.verbose("chmod", f, npm.modes.file.toString(8)) - fs.chmod(f, npm.modes.file, cb) - }, function (f, cb) { - if (process.platform === "win32") { - log.silly("chown", "skipping for windows", f) - cb() - } else if (typeof uid === "number" - && typeof gid === "number" - && parseInt(uid, 10) === uid - && parseInt(gid, 10) === gid) { - log.verbose("chown", f, [uid, gid]) - fs.chown(f, uid, gid, cb) - } else { - log.verbose("chown", "skip for invalid uid/gid", [f, uid, gid]) - cb() - } - }, function (er) { - cb(er, data) - }) - }) - }) - }) - } -} - -// At this point, if shasum is set, it's something that we've already -// read and checked. Just stashing it in the data at this point. -function addLocalDirectory (p, name, shasum, cb) { - if (typeof cb !== "function") cb = shasum, shasum = "" - if (typeof cb !== "function") cb = name, name = "" - // if it's a folder, then read the package.json, - // tar it to the proper place, and add the cache tar - if (pathIsInside(p, npm.cache)) return cb(new Error( - "Adding a cache directory to the cache will make the world implode.")) - readJson(path.join(p, "package.json"), false, function (er, data) { - er = needName(er, data) - er = needVersion(er, data) - if (er) return cb(er) - deprCheck(data) - var random = Date.now() + "-" + Math.random() - , tmp = path.join(npm.tmp, random) - , tmptgz = path.resolve(tmp, "tmp.tgz") - , placed = path.resolve( npm.cache, data.name - , data.version, "package.tgz" ) - , placeDirect = path.basename(p) === "package" - , tgz = placeDirect ? placed : tmptgz - , version = data.version - - name = data.name - - getCacheStat(function (er, cs) { - mkdir(path.dirname(tgz), function (er, made) { - if (er) return cb(er) - - var fancy = !pathIsInside(p, npm.tmp) - && !pathIsInside(p, npm.cache) - tar.pack(tgz, p, data, fancy, function (er) { - if (er) { - log.error( "addLocalDirectory", "Could not pack %j to %j" - , p, tgz ) - return cb(er) - } - - // if we don't get a cache stat, or if the gid/uid is not - // a number, then just move on. chown would fail anyway. - if (!cs || isNaN(cs.uid) || isNaN(cs.gid)) return cb() - - chownr(made || tgz, cs.uid, cs.gid, function (er) { - if (er) return cb(er) - addLocalTarball(tgz, name, version, shasum, cb) - }) - }) - }) - }) - }) -} - -// XXX This is where it should be fixed -// Right now it's unpacking to a "package" folder, and then -// adding that local folder, for historical reasons. -// Instead, unpack to the *cache* folder, and then copy the -// tgz into place in the cache, so the shasum doesn't change. -function addTmpTarball (tgz, name, version, shasum, cb) { - // Just have a placeholder here so we can move it into place after. - var tmp = false - if (!version) { - tmp = true - version = 'tmp_' + crypto.randomBytes(6).toString('hex') - } - if (!name) { - tmp = true - name = 'tmp_' + crypto.randomBytes(6).toString('hex') - } - if (!tmp) { - var pdir = path.resolve(npm.cache, name, version, "package") - } else { - var pdir = path.resolve(npm.cache, name + version + "package") - } - - getCacheStat(function (er, cs) { - if (er) return cb(er) - tar.unpack(tgz, pdir, null, null, cs.uid, cs.gid, next) - }) - - function next (er) { - if (er) return cb(er) - // it MUST be able to get a version now! - var pj = path.resolve(pdir, "package.json") - readJson(pj, function (er, data) { - if (er) return cb(er) - if (version === data.version && name === data.name && !tmp) { - addTmpTarball_(tgz, data, name, version, shasum, cb) - } else { - var old = pdir - name = data.name - version = data.version - pdir = path.resolve(npm.cache, name, version, "package") - mkdir(path.dirname(pdir), function(er) { - if (er) return cb(er) - rm(pdir, function(er) { - if (er) return cb(er) - fs.rename(old, pdir, function(er) { - if (er) return cb(er) - rm(old, function(er) { - if (er) return cb(er) - addTmpTarball_(tgz, data, name, version, shasum, cb) - }) - }) - }) - }) - } - }) - } -} - -function addTmpTarball_ (tgz, data, name, version, shasum, cb) { - cb = once(cb) - var target = path.resolve(npm.cache, name, version, "package.tgz") - var read = fs.createReadStream(tgz) - var write = fs.createWriteStream(target) - read.on("error", cb).pipe(write).on("error", cb).on("close", done) - - function done() { - data._shasum = data._shasum || shasum - cb(null, data) - } -} - function unpack (pkg, ver, unpackTarget, dMode, fMode, uid, gid, cb) { if (typeof cb !== "function") cb = gid, gid = null if (typeof cb !== "function") cb = uid, uid = null if (typeof cb !== "function") cb = fMode, fMode = null if (typeof cb !== "function") cb = dMode, dMode = null - read(pkg, ver, false, function (er, data) { + read(pkg, ver, false, function (er) { if (er) { log.error("unpack", "Could not read data for %s", pkg + "@" + ver) return cb(er) } npm.commands.unbuild([unpackTarget], true, function (er) { if (er) return cb(er) - tar.unpack( path.join(npm.cache, pkg, ver, "package.tgz") + tar.unpack( path.join(cachedPackageRoot({name : pkg, version : ver}), "package.tgz") , unpackTarget , dMode, fMode , uid, gid @@ -1384,65 +323,26 @@ }) } -var deprecated = {} - , deprWarned = {} -function deprCheck (data) { - if (deprecated[data._id]) data.deprecated = deprecated[data._id] - if (data.deprecated) deprecated[data._id] = data.deprecated - else return - if (!deprWarned[data._id]) { - deprWarned[data._id] = true - log.warn("deprecated", "%s: %s", data._id, data.deprecated) - } -} +function afterAdd (cb) { return function (er, data) { + adding-- + if (adding <= 0) npm.spinner.stop() -function lockFileName (u) { - var c = u.replace(/[^a-zA-Z0-9]+/g, "-").replace(/^-+|-+$/g, "") - , h = crypto.createHash("sha1").update(u).digest("hex") - h = h.substr(0, 8) - c = c.substr(-32) - log.silly("lockFile", h + "-" + c, u) - return path.resolve(npm.config.get("cache"), h + "-" + c + ".lock") -} + if (er || !data || !data.name || !data.version) return cb(er, data) + log.silly("cache", "afterAdd", data.name+"@"+data.version) -var myLocks = {} -function lock (u, cb) { - // the cache dir needs to exist already for this. - getCacheStat(function (er, cs) { - if (er) return cb(er) - var opts = { stale: npm.config.get("cache-lock-stale") - , retries: npm.config.get("cache-lock-retries") - , wait: npm.config.get("cache-lock-wait") } - var lf = lockFileName(u) - log.verbose("lock", u, lf) - lockFile.lock(lf, opts, function(er) { - if (!er) myLocks[lf] = true - cb(er) + // Save the resolved, shasum, etc. into the data so that the next + // time we load from this cached data, we have all the same info. + var pj = path.join(cachedPackageRoot(data), "package", "package.json") + + var done = inflight(pj, cb) + if (!done) return log.verbose("afterAdd", pj, "already in flight; not writing") + log.verbose("afterAdd", pj, "not in flight; writing") + + getStat(function (er, cs) { + if (er) return done(er) + writeFileAtomic(pj, JSON.stringify(data), {chown : cs}, function (er) { + if (!er) log.verbose("afterAdd", pj, "written") + return done(er, data) }) }) -} - -function unlock (u, cb) { - var lf = lockFileName(u) - , locked = myLocks[lf] - if (locked === false) { - return process.nextTick(cb) - } else if (locked === true) { - myLocks[lf] = false - lockFile.unlock(lockFileName(u), cb) - } else { - throw new Error("Attempt to unlock " + u + ", which hasn't been locked") - } -} - -function needName(er, data) { - return er ? er - : (data && !data.name) ? new Error("No name provided") - : null -} - -function needVersion(er, data) { - return er ? er - : (data && !data.version) ? new Error("No version provided") - : null -} +}} diff -Nru nodejs-0.11.13/deps/npm/lib/completion.js nodejs-0.11.15/deps/npm/lib/completion.js --- nodejs-0.11.13/deps/npm/lib/completion.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/completion.js 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ + "source <(npm completion)" var npm = require("./npm.js") - , npmconf = require("npmconf") + , npmconf = require("./config/core.js") , configDefs = npmconf.defs , configTypes = configDefs.types , shorthands = configDefs.shorthands @@ -26,12 +26,11 @@ , path = require("path") , bashExists = null , zshExists = null - , bashProfExists = null - fs.stat(path.resolve(process.env.HOME, ".bashrc"), function (er, b) { + fs.stat(path.resolve(process.env.HOME, ".bashrc"), function (er) { bashExists = !er next() }) - fs.stat(path.resolve(process.env.HOME, ".zshrc"), function (er, b) { + fs.stat(path.resolve(process.env.HOME, ".zshrc"), function (er) { zshExists = !er next() }) @@ -79,7 +78,6 @@ , word = words[w] , line = process.env.COMP_LINE , point = +process.env.COMP_POINT - , lineLength = line.length , partialLine = line.substr(0, point) , partialWords = words.slice(0, w) @@ -159,7 +157,7 @@ if (er) return cb(er) d = d.replace(/^\#\!.*?\n/, "") - process.stdout.write(d, function (n) { cb() }) + process.stdout.write(d, function () { cb() }) process.stdout.on("error", function (er) { // Darwin is a real dick sometimes. // @@ -218,8 +216,6 @@ , split = word.match(/^(-+)((?:no-)*)(.*)$/) , dashes = split[1] , no = split[2] - , conf = split[3] - , confs = allConfs , flags = configNames.filter(isFlag) console.error(flags) @@ -233,7 +229,7 @@ // expand with the valid values of various config values. // not yet implemented. function configValueCompl (opts, cb) { - console.error('configValue', opts) + console.error("configValue", opts) return cb(null, []) } @@ -241,7 +237,6 @@ function isFlag (word) { // shorthands never take args. var split = word.match(/^(-*)((?:no-)+)?(.*)$/) - , dashes = split[1] , no = split[2] , conf = split[3] return no || configTypes[conf] === Boolean || shorthands[conf] diff -Nru nodejs-0.11.13/deps/npm/lib/config/core.js nodejs-0.11.15/deps/npm/lib/config/core.js --- nodejs-0.11.13/deps/npm/lib/config/core.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config/core.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,427 @@ + +var CC = require("config-chain").ConfigChain +var inherits = require("inherits") +var configDefs = require("./defaults.js") +var types = configDefs.types +var once = require("once") +var fs = require("fs") +var path = require("path") +var nopt = require("nopt") +var ini = require("ini") +var Octal = configDefs.Octal +var mkdirp = require("mkdirp") + +exports.load = load +exports.Conf = Conf +exports.loaded = false +exports.rootConf = null +exports.usingBuiltin = false +exports.defs = configDefs + +Object.defineProperty(exports, "defaults", { get: function () { + return configDefs.defaults +}, enumerable: true }) + +Object.defineProperty(exports, "types", { get: function () { + return configDefs.types +}, enumerable: true }) + +exports.validate = validate + +var myUid = process.env.SUDO_UID !== undefined + ? process.env.SUDO_UID : (process.getuid && process.getuid()) +var myGid = process.env.SUDO_GID !== undefined + ? process.env.SUDO_GID : (process.getgid && process.getgid()) + + +var loading = false +var loadCbs = [] +function load () { + var cli, builtin, cb + for (var i = 0; i < arguments.length; i++) + switch (typeof arguments[i]) { + case "string": builtin = arguments[i]; break + case "object": cli = arguments[i]; break + case "function": cb = arguments[i]; break + } + + if (!cb) + cb = function () {} + + if (exports.loaded) { + var ret = exports.loaded + if (cli) { + ret = new Conf(ret) + ret.unshift(cli) + } + return process.nextTick(cb.bind(null, null, ret)) + } + + // either a fresh object, or a clone of the passed in obj + if (!cli) + cli = {} + else + cli = Object.keys(cli).reduce(function (c, k) { + c[k] = cli[k] + return c + }, {}) + + loadCbs.push(cb) + if (loading) + return + + loading = true + + cb = once(function (er, conf) { + if (!er) + exports.loaded = conf + loadCbs.forEach(function (fn) { + fn(er, conf) + }) + loadCbs.length = 0 + }) + + // check for a builtin if provided. + exports.usingBuiltin = !!builtin + var rc = exports.rootConf = new Conf() + if (builtin) + rc.addFile(builtin, "builtin") + else + rc.add({}, "builtin") + + rc.on("load", function () { + load_(builtin, rc, cli, cb) + }) + rc.on("error", cb) +} + +function load_(builtin, rc, cli, cb) { + var defaults = configDefs.defaults + var conf = new Conf(rc) + + conf.usingBuiltin = !!builtin + conf.add(cli, "cli") + conf.addEnv() + + conf.loadPrefix(function(er) { + if (er) + return cb(er) + + // If you're doing `npm --userconfig=~/foo.npmrc` then you'd expect + // that ~/.npmrc won't override the stuff in ~/foo.npmrc (or, indeed + // be used at all). + // + // However, if the cwd is ~, then ~/.npmrc is the home for the project + // config, and will override the userconfig. + // + // If you're not setting the userconfig explicitly, then it will be loaded + // twice, which is harmless but excessive. If you *are* setting the + // userconfig explicitly then it will override your explicit intent, and + // that IS harmful and unexpected. + // + // Solution: Do not load project config file that is the same as either + // the default or resolved userconfig value. npm will log a "verbose" + // message about this when it happens, but it is a rare enough edge case + // that we don't have to be super concerned about it. + var projectConf = path.resolve(conf.localPrefix, ".npmrc") + var defaultUserConfig = rc.get("userconfig") + var resolvedUserConfig = conf.get("userconfig") + if (!conf.get("global") && + projectConf !== defaultUserConfig && + projectConf !== resolvedUserConfig) { + conf.addFile(projectConf, "project") + conf.once("load", afterPrefix) + } else { + conf.add({}, "project") + afterPrefix() + } + }) + + function afterPrefix() { + conf.addFile(conf.get("userconfig"), "user") + conf.once("error", cb) + conf.once("load", afterUser) + } + + function afterUser () { + // globalconfig and globalignorefile defaults + // need to respond to the 'prefix' setting up to this point. + // Eg, `npm config get globalconfig --prefix ~/local` should + // return `~/local/etc/npmrc` + // annoying humans and their expectations! + if (conf.get("prefix")) { + var etc = path.resolve(conf.get("prefix"), "etc") + defaults.globalconfig = path.resolve(etc, "npmrc") + defaults.globalignorefile = path.resolve(etc, "npmignore") + } + + conf.addFile(conf.get("globalconfig"), "global") + + // move the builtin into the conf stack now. + conf.root = defaults + conf.add(rc.shift(), "builtin") + conf.once("load", function () { + conf.loadExtras(afterExtras) + }) + } + + function afterExtras(er) { + if (er) + return cb(er) + + // warn about invalid bits. + validate(conf) + + var cafile = conf.get("cafile") + + if (cafile) { + return conf.loadCAFile(cafile, finalize) + } + + finalize() + } + + function finalize(er) { + if (er) { + return cb(er) + } + + exports.loaded = conf + cb(er, conf) + } +} + +// Basically the same as CC, but: +// 1. Always ini +// 2. Parses environment variable names in field values +// 3. Field values that start with ~/ are replaced with process.env.HOME +// 4. Can inherit from another Conf object, using it as the base. +inherits(Conf, CC) +function Conf (base) { + if (!(this instanceof Conf)) + return new Conf(base) + + CC.apply(this) + + if (base) + if (base instanceof Conf) + this.root = base.list[0] || base.root + else + this.root = base + else + this.root = configDefs.defaults +} + +Conf.prototype.loadPrefix = require("./load-prefix.js") +Conf.prototype.loadCAFile = require("./load-cafile.js") +Conf.prototype.loadUid = require("./load-uid.js") +Conf.prototype.setUser = require("./set-user.js") +Conf.prototype.findPrefix = require("./find-prefix.js") +Conf.prototype.getCredentialsByURI = require("./get-credentials-by-uri.js") +Conf.prototype.setCredentialsByURI = require("./set-credentials-by-uri.js") + +Conf.prototype.loadExtras = function(cb) { + this.setUser(function(er) { + if (er) + return cb(er) + this.loadUid(function(er) { + if (er) + return cb(er) + // Without prefix, nothing will ever work + mkdirp(this.prefix, cb) + }.bind(this)) + }.bind(this)) +} + +Conf.prototype.save = function (where, cb) { + var target = this.sources[where] + if (!target || !(target.path || target.source) || !target.data) { + if (where !== "builtin") + var er = new Error("bad save target: " + where) + if (cb) { + process.nextTick(cb.bind(null, er)) + return this + } + return this.emit("error", er) + } + + if (target.source) { + var pref = target.prefix || "" + Object.keys(target.data).forEach(function (k) { + target.source[pref + k] = target.data[k] + }) + if (cb) process.nextTick(cb) + return this + } + + var data = ini.stringify(target.data) + + then = then.bind(this) + done = done.bind(this) + this._saving ++ + + var mode = where === "user" ? "0600" : "0666" + if (!data.trim()) { + fs.unlink(target.path, function () { + // ignore the possible error (e.g. the file doesn't exist) + done(null) + }) + } else { + mkdirp(path.dirname(target.path), function (er) { + if (er) + return then(er) + fs.writeFile(target.path, data, "utf8", function (er) { + if (er) + return then(er) + if (where === "user" && myUid && myGid) + fs.chown(target.path, +myUid, +myGid, then) + else + then() + }) + }) + } + + function then (er) { + if (er) + return done(er) + fs.chmod(target.path, mode, done) + } + + function done (er) { + if (er) { + if (cb) return cb(er) + else return this.emit("error", er) + } + this._saving -- + if (this._saving === 0) { + if (cb) cb() + this.emit("save") + } + } + + return this +} + +Conf.prototype.addFile = function (file, name) { + name = name || file + var marker = {__source__:name} + this.sources[name] = { path: file, type: "ini" } + this.push(marker) + this._await() + fs.readFile(file, "utf8", function (er, data) { + if (er) // just ignore missing files. + return this.add({}, marker) + this.addString(data, file, "ini", marker) + }.bind(this)) + return this +} + +// always ini files. +Conf.prototype.parse = function (content, file) { + return CC.prototype.parse.call(this, content, file, "ini") +} + +Conf.prototype.add = function (data, marker) { + try { + Object.keys(data).forEach(function (k) { + data[k] = parseField(data[k], k) + }) + } + catch (e) { + this.emit("error", e) + return this + } + return CC.prototype.add.call(this, data, marker) +} + +Conf.prototype.addEnv = function (env) { + env = env || process.env + var conf = {} + Object.keys(env) + .filter(function (k) { return k.match(/^npm_config_/i) }) + .forEach(function (k) { + if (!env[k]) + return + + // leave first char untouched, even if + // it is a "_" - convert all other to "-" + var p = k.toLowerCase() + .replace(/^npm_config_/, "") + .replace(/(?!^)_/g, "-") + conf[p] = env[k] + }) + return CC.prototype.addEnv.call(this, "", conf, "env") +} + +function parseField (f, k) { + if (typeof f !== "string" && !(f instanceof String)) + return f + + // type can be an array or single thing. + var typeList = [].concat(types[k]) + var isPath = -1 !== typeList.indexOf(path) + var isBool = -1 !== typeList.indexOf(Boolean) + var isString = -1 !== typeList.indexOf(String) + var isOctal = -1 !== typeList.indexOf(Octal) + var isNumber = isOctal || (-1 !== typeList.indexOf(Number)) + + f = (""+f).trim() + + if (f.match(/^".*"$/)) { + try { + f = JSON.parse(f) + } + catch (e) { + throw new Error("Failed parsing JSON config key " + k + ": " + f) + } + } + + if (isBool && !isString && f === "") + return true + + switch (f) { + case "true": return true + case "false": return false + case "null": return null + case "undefined": return undefined + } + + f = envReplace(f) + + if (isPath) { + var homePattern = process.platform === "win32" ? /^~(\/|\\)/ : /^~\// + if (f.match(homePattern) && process.env.HOME) { + f = path.resolve(process.env.HOME, f.substr(2)) + } + f = path.resolve(f) + } + + if (isNumber && !isNaN(f)) + f = isOctal ? parseInt(f, 8) : +f + + return f +} + +function envReplace (f) { + if (typeof f !== "string" || !f) return f + + // replace any ${ENV} values with the appropriate environ. + var envExpr = /(\\*)\$\{([^}]+)\}/g + return f.replace(envExpr, function (orig, esc, name) { + esc = esc.length && esc.length % 2 + if (esc) + return orig + if (undefined === process.env[name]) + throw new Error("Failed to replace env in config: "+orig) + return process.env[name] + }) +} + +function validate (cl) { + // warn about invalid configs at every level. + cl.list.forEach(function (conf) { + nopt.clean(conf, configDefs.types) + }) + + nopt.clean(cl.root, configDefs.types) +} diff -Nru nodejs-0.11.13/deps/npm/lib/config/defaults.js nodejs-0.11.15/deps/npm/lib/config/defaults.js --- nodejs-0.11.13/deps/npm/lib/config/defaults.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config/defaults.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,383 @@ +// defaults, types, and shorthands. + + +var path = require("path") + , url = require("url") + , Stream = require("stream").Stream + , semver = require("semver") + , stableFamily = semver.parse(process.version) + , nopt = require("nopt") + , os = require("os") + , osenv = require("osenv") + +var log +try { + log = require("npmlog") +} catch (er) { + var util = require("util") + log = { warn: function (m) { + console.warn(m + " " + util.format.apply(util, [].slice.call(arguments, 1))) + } } +} + +exports.Octal = Octal +function Octal () {} +function validateOctal (data, k, val) { + // must be either an integer or an octal string. + if (typeof val === "number") { + data[k] = val + return true + } + + if (typeof val === "string") { + if (val.charAt(0) !== "0" || isNaN(val)) return false + data[k] = parseInt(val, 8).toString(8) + } +} + +function validateSemver (data, k, val) { + if (!semver.valid(val)) return false + data[k] = semver.valid(val) +} + +function validateTag (data, k, val) { + val = ("" + val).trim() + if (!val || semver.validRange(val)) return false + data[k] = val +} + +function validateStream (data, k, val) { + if (!(val instanceof Stream)) return false + data[k] = val +} + +nopt.typeDefs.semver = { type: semver, validate: validateSemver } +nopt.typeDefs.Octal = { type: Octal, validate: validateOctal } +nopt.typeDefs.Stream = { type: Stream, validate: validateStream } + +// Don't let --tag=1.2.3 ever be a thing +var tag = {} +nopt.typeDefs.tag = { type: tag, validate: validateTag } + +nopt.invalidHandler = function (k, val, type) { + log.warn("invalid config", k + "=" + JSON.stringify(val)) + + if (Array.isArray(type)) { + if (type.indexOf(url) !== -1) type = url + else if (type.indexOf(path) !== -1) type = path + } + + switch (type) { + case tag: + log.warn("invalid config", "Tag must not be a SemVer range") + break + case Octal: + log.warn("invalid config", "Must be octal number, starting with 0") + break + case url: + log.warn("invalid config", "Must be a full url with 'http://'") + break + case path: + log.warn("invalid config", "Must be a valid filesystem path") + break + case Number: + log.warn("invalid config", "Must be a numeric value") + break + case Stream: + log.warn("invalid config", "Must be an instance of the Stream class") + break + } +} + +if (!stableFamily || (+stableFamily.minor % 2)) stableFamily = null +else stableFamily = stableFamily.major + "." + stableFamily.minor + +var defaults + +var temp = osenv.tmpdir() +var home = osenv.home() + +var uidOrPid = process.getuid ? process.getuid() : process.pid + +if (home) process.env.HOME = home +else home = path.resolve(temp, "npm-" + uidOrPid) + +var cacheExtra = process.platform === "win32" ? "npm-cache" : ".npm" +var cacheRoot = process.platform === "win32" && process.env.APPDATA || home +var cache = path.resolve(cacheRoot, cacheExtra) + + +var globalPrefix +Object.defineProperty(exports, "defaults", {get: function () { + if (defaults) return defaults + + if (process.env.PREFIX) { + globalPrefix = process.env.PREFIX + } else if (process.platform === "win32") { + // c:\node\node.exe --> prefix=c:\node\ + globalPrefix = path.dirname(process.execPath) + } else { + // /usr/local/bin/node --> prefix=/usr/local + globalPrefix = path.dirname(path.dirname(process.execPath)) + + // destdir only is respected on Unix + if (process.env.DESTDIR) { + globalPrefix = path.join(process.env.DESTDIR, globalPrefix) + } + } + + defaults = { + "always-auth" : false + , "bin-links" : true + , browser : null + + , ca: null + , cafile: null + + , cache : cache + + , "cache-lock-stale": 60000 + , "cache-lock-retries": 10 + , "cache-lock-wait": 10000 + + , "cache-max": Infinity + , "cache-min": 10 + + , cert: null + + , color : true + , depth: Infinity + , description : true + , dev : false + , editor : osenv.editor() + , "engine-strict": false + , force : false + + , "fetch-retries": 2 + , "fetch-retry-factor": 10 + , "fetch-retry-mintimeout": 10000 + , "fetch-retry-maxtimeout": 60000 + + , git: "git" + , "git-tag-version": true + + , global : false + , globalconfig : path.resolve(globalPrefix, "etc", "npmrc") + , group : process.platform === "win32" ? 0 + : process.env.SUDO_GID || (process.getgid && process.getgid()) + , heading: "npm" + , "ignore-scripts": false + , "init-module": path.resolve(home, ".npm-init.js") + , "init-author-name" : "" + , "init-author-email" : "" + , "init-author-url" : "" + , "init-version": "1.0.0" + , "init-license": "ISC" + , json: false + , key: null + , link: false + , "local-address" : undefined + , loglevel : "warn" + , logstream : process.stderr + , long : false + , message : "%s" + , "node-version" : process.version + , npat : false + , "onload-script" : false + , optional: true + , parseable : false + , prefix : globalPrefix + , production: process.env.NODE_ENV === "production" + , "proprietary-attribs": true + , proxy : process.env.HTTP_PROXY || process.env.http_proxy || null + , "https-proxy" : process.env.HTTPS_PROXY || process.env.https_proxy || + process.env.HTTP_PROXY || process.env.http_proxy || null + , "user-agent" : "npm/{npm-version} " + + "node/{node-version} " + + "{platform} " + + "{arch}" + , "rebuild-bundle" : true + , registry : "https://registry.npmjs.org/" + , rollback : true + , save : false + , "save-bundle": false + , "save-dev" : false + , "save-exact" : false + , "save-optional" : false + , "save-prefix": "^" + , scope : "" + , searchopts: "" + , searchexclude: null + , searchsort: "name" + , shell : osenv.shell() + , shrinkwrap: true + , "sign-git-tag": false + , spin: true + , "strict-ssl": true + , tag : "latest" + , tmp : temp + , unicode : true + , "unsafe-perm" : process.platform === "win32" + || process.platform === "cygwin" + || !( process.getuid && process.setuid + && process.getgid && process.setgid ) + || process.getuid() !== 0 + , usage : false + , user : process.platform === "win32" ? 0 : "nobody" + , userconfig : path.resolve(home, ".npmrc") + , umask: process.umask ? process.umask() : parseInt("022", 8) + , version : false + , versions : false + , viewer: process.platform === "win32" ? "browser" : "man" + + , _exit : true + } + + return defaults +}}) + +exports.types = + { "always-auth" : Boolean + , "bin-links": Boolean + , browser : [null, String] + , ca: [null, String, Array] + , cafile : path + , cache : path + , "cache-lock-stale": Number + , "cache-lock-retries": Number + , "cache-lock-wait": Number + , "cache-max": Number + , "cache-min": Number + , cert: [null, String] + , color : ["always", Boolean] + , depth : Number + , description : Boolean + , dev : Boolean + , editor : String + , "engine-strict": Boolean + , force : Boolean + , "fetch-retries": Number + , "fetch-retry-factor": Number + , "fetch-retry-mintimeout": Number + , "fetch-retry-maxtimeout": Number + , git: String + , "git-tag-version": Boolean + , global : Boolean + , globalconfig : path + , group : [Number, String] + , "https-proxy" : [null, url] + , "user-agent" : String + , "heading": String + , "ignore-scripts": Boolean + , "init-module": path + , "init-author-name" : String + , "init-author-email" : String + , "init-author-url" : ["", url] + , "init-license": String + , "init-version": semver + , json: Boolean + , key: [null, String] + , link: Boolean + // local-address must be listed as an IP for a local network interface + // must be IPv4 due to node bug + , "local-address" : getLocalAddresses() + , loglevel : ["silent","error","warn","http","info","verbose","silly"] + , logstream : Stream + , long : Boolean + , message: String + , "node-version" : [null, semver] + , npat : Boolean + , "onload-script" : [null, String] + , optional: Boolean + , parseable : Boolean + , prefix: path + , production: Boolean + , "proprietary-attribs": Boolean + , proxy : [null, url] + , "rebuild-bundle" : Boolean + , registry : [null, url] + , rollback : Boolean + , save : Boolean + , "save-bundle": Boolean + , "save-dev" : Boolean + , "save-exact" : Boolean + , "save-optional" : Boolean + , "save-prefix": String + , scope : String + , searchopts : String + , searchexclude: [null, String] + , searchsort: [ "name", "-name" + , "description", "-description" + , "author", "-author" + , "date", "-date" + , "keywords", "-keywords" ] + , shell : String + , shrinkwrap: Boolean + , "sign-git-tag": Boolean + , spin: ["always", Boolean] + , "strict-ssl": Boolean + , tag : tag + , tmp : path + , unicode : Boolean + , "unsafe-perm" : Boolean + , usage : Boolean + , user : [Number, String] + , userconfig : path + , umask: Octal + , version : Boolean + , versions : Boolean + , viewer: String + , _exit : Boolean + } + +function getLocalAddresses() { + Object.keys(os.networkInterfaces()).map(function (nic) { + return os.networkInterfaces()[nic].filter(function (addr) { + return addr.family === "IPv4" + }) + .map(function (addr) { + return addr.address + }) + }).reduce(function (curr, next) { + return curr.concat(next) + }, []).concat(undefined) +} + +exports.shorthands = + { s : ["--loglevel", "silent"] + , d : ["--loglevel", "info"] + , dd : ["--loglevel", "verbose"] + , ddd : ["--loglevel", "silly"] + , noreg : ["--no-registry"] + , N : ["--no-registry"] + , reg : ["--registry"] + , "no-reg" : ["--no-registry"] + , silent : ["--loglevel", "silent"] + , verbose : ["--loglevel", "verbose"] + , quiet: ["--loglevel", "warn"] + , q: ["--loglevel", "warn"] + , h : ["--usage"] + , H : ["--usage"] + , "?" : ["--usage"] + , help : ["--usage"] + , v : ["--version"] + , f : ["--force"] + , gangster : ["--force"] + , gangsta : ["--force"] + , desc : ["--description"] + , "no-desc" : ["--no-description"] + , "local" : ["--no-global"] + , l : ["--long"] + , m : ["--message"] + , p : ["--parseable"] + , porcelain : ["--parseable"] + , g : ["--global"] + , S : ["--save"] + , D : ["--save-dev"] + , E : ["--save-exact"] + , O : ["--save-optional"] + , y : ["--yes"] + , n : ["--no-yes"] + , B : ["--save-bundle"] + , C : ["--prefix"] + } diff -Nru nodejs-0.11.13/deps/npm/lib/config/find-prefix.js nodejs-0.11.15/deps/npm/lib/config/find-prefix.js --- nodejs-0.11.13/deps/npm/lib/config/find-prefix.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config/find-prefix.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,56 @@ +// try to find the most reasonable prefix to use + +module.exports = findPrefix + +var fs = require("fs") +var path = require("path") + +function findPrefix (p, cb_) { + function cb (er, p) { + process.nextTick(function () { + cb_(er, p) + }) + } + + p = path.resolve(p) + // if there's no node_modules folder, then + // walk up until we hopefully find one. + // if none anywhere, then use cwd. + var walkedUp = false + while (path.basename(p) === "node_modules") { + p = path.dirname(p) + walkedUp = true + } + if (walkedUp) return cb(null, p) + + findPrefix_(p, p, cb) +} + +function findPrefix_ (p, original, cb) { + if (p === "/" + || (process.platform === "win32" && p.match(/^[a-zA-Z]:(\\|\/)?$/))) { + return cb(null, original) + } + fs.readdir(p, function (er, files) { + // an error right away is a bad sign. + // unless the prefix was simply a non + // existent directory. + if (er && p === original) { + if (er.code === "ENOENT") return cb(null, original); + return cb(er) + } + + // walked up too high or something. + if (er) return cb(null, original) + + if (files.indexOf("node_modules") !== -1 + || files.indexOf("package.json") !== -1) { + return cb(null, p) + } + + var d = path.dirname(p) + if (d === p) return cb(null, original) + + return findPrefix_(d, original, cb) + }) +} diff -Nru nodejs-0.11.13/deps/npm/lib/config/get-credentials-by-uri.js nodejs-0.11.15/deps/npm/lib/config/get-credentials-by-uri.js --- nodejs-0.11.13/deps/npm/lib/config/get-credentials-by-uri.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config/get-credentials-by-uri.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,73 @@ +var assert = require("assert") + +var toNerfDart = require("./nerf-dart.js") + +module.exports = getCredentialsByURI + +function getCredentialsByURI (uri) { + assert(uri && typeof uri === "string", "registry URL is required") + var nerfed = toNerfDart(uri) + var defnerf = toNerfDart(this.get("registry")) + + // hidden class micro-optimization + var c = { + scope : nerfed, + token : undefined, + password : undefined, + username : undefined, + email : undefined, + auth : undefined, + alwaysAuth : undefined + } + + if (this.get(nerfed + ":_authToken")) { + c.token = this.get(nerfed + ":_authToken") + // the bearer token is enough, don't confuse things + return c + } + + // Handle the old-style _auth= style for the default + // registry, if set. + // + // XXX(isaacs): Remove when npm 1.4 is no longer relevant + var authDef = this.get("_auth") + var userDef = this.get("username") + var passDef = this.get("_password") + if (authDef && !(userDef && passDef)) { + authDef = new Buffer(authDef, "base64").toString() + authDef = authDef.split(":") + userDef = authDef.shift() + passDef = authDef.join(":") + } + + if (this.get(nerfed + ":_password")) { + c.password = new Buffer(this.get(nerfed + ":_password"), "base64").toString("utf8") + } else if (nerfed === defnerf && passDef) { + c.password = passDef + } + + if (this.get(nerfed + ":username")) { + c.username = this.get(nerfed + ":username") + } else if (nerfed === defnerf && userDef) { + c.username = userDef + } + + if (this.get(nerfed + ":email")) { + c.email = this.get(nerfed + ":email") + } else if (this.get("email")) { + c.email = this.get("email") + } + + if (this.get(nerfed + ":always-auth") !== undefined) { + var val = this.get(nerfed + ":always-auth") + c.alwaysAuth = val === "false" ? false : !!val + } else if (this.get("always-auth") !== undefined) { + c.alwaysAuth = this.get("always-auth") + } + + if (c.username && c.password) { + c.auth = new Buffer(c.username + ":" + c.password).toString("base64") + } + + return c +} diff -Nru nodejs-0.11.13/deps/npm/lib/config/load-cafile.js nodejs-0.11.15/deps/npm/lib/config/load-cafile.js --- nodejs-0.11.13/deps/npm/lib/config/load-cafile.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config/load-cafile.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,30 @@ +module.exports = loadCAFile + +var fs = require("fs") + +function loadCAFile(cafilePath, cb) { + if (!cafilePath) + return process.nextTick(cb) + + fs.readFile(cafilePath, "utf8", afterCARead.bind(this)) + + function afterCARead(er, cadata) { + if (er) + return cb(er) + + var delim = "-----END CERTIFICATE-----" + var output + + output = cadata + .split(delim) + .filter(function(xs) { + return !!xs.trim() + }) + .map(function(xs) { + return xs.trimLeft() + delim + }) + + this.set("ca", output) + cb(null) + } +} diff -Nru nodejs-0.11.13/deps/npm/lib/config/load-prefix.js nodejs-0.11.15/deps/npm/lib/config/load-prefix.js --- nodejs-0.11.13/deps/npm/lib/config/load-prefix.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config/load-prefix.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,49 @@ +module.exports = loadPrefix + +var findPrefix = require("./find-prefix.js") +var path = require("path") + +function loadPrefix (cb) { + var cli = this.list[0] + + Object.defineProperty(this, "prefix", + { set : function (prefix) { + var g = this.get("global") + this[g ? "globalPrefix" : "localPrefix"] = prefix + }.bind(this) + , get : function () { + var g = this.get("global") + return g ? this.globalPrefix : this.localPrefix + }.bind(this) + , enumerable : true + }) + + Object.defineProperty(this, "globalPrefix", + { set : function (prefix) { + this.set("prefix", prefix) + }.bind(this) + , get : function () { + return path.resolve(this.get("prefix")) + }.bind(this) + , enumerable : true + }) + + var p + Object.defineProperty(this, "localPrefix", + { set : function (prefix) { p = prefix }, + get : function () { return p } + , enumerable: true }) + + // try to guess at a good node_modules location. + // If we are *explicitly* given a prefix on the cli, then + // always use that. otherwise, infer local prefix from cwd. + if (Object.prototype.hasOwnProperty.call(cli, "prefix")) { + p = path.resolve(cli.prefix) + process.nextTick(cb) + } else { + findPrefix(process.cwd(), function (er, found) { + p = found + cb(er) + }) + } +} diff -Nru nodejs-0.11.13/deps/npm/lib/config/load-uid.js nodejs-0.11.15/deps/npm/lib/config/load-uid.js --- nodejs-0.11.13/deps/npm/lib/config/load-uid.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config/load-uid.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +module.exports = loadUid + +var getUid = require("uid-number") + +// Call in the context of a npmconf object + +function loadUid (cb) { + // if we're not in unsafe-perm mode, then figure out who + // to run stuff as. Do this first, to support `npm update npm -g` + if (!this.get("unsafe-perm")) { + getUid(this.get("user"), this.get("group"), cb) + } else { + process.nextTick(cb) + } +} diff -Nru nodejs-0.11.13/deps/npm/lib/config/nerf-dart.js nodejs-0.11.15/deps/npm/lib/config/nerf-dart.js --- nodejs-0.11.13/deps/npm/lib/config/nerf-dart.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config/nerf-dart.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +var url = require("url") + +module.exports = toNerfDart + +/** + * Maps a URL to an identifier. + * + * Name courtesy schiffertronix media LLC, a New Jersey corporation + * + * @param {String} uri The URL to be nerfed. + * + * @returns {String} A nerfed URL. + */ +function toNerfDart(uri) { + var parsed = url.parse(uri) + parsed.pathname = "/" + delete parsed.protocol + delete parsed.auth + + return url.format(parsed) +} diff -Nru nodejs-0.11.13/deps/npm/lib/config/set-credentials-by-uri.js nodejs-0.11.15/deps/npm/lib/config/set-credentials-by-uri.js --- nodejs-0.11.13/deps/npm/lib/config/set-credentials-by-uri.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config/set-credentials-by-uri.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,42 @@ +var assert = require("assert") + +var toNerfDart = require("./nerf-dart.js") + +module.exports = setCredentialsByURI + +function setCredentialsByURI (uri, c) { + assert(uri && typeof uri === "string", "registry URL is required") + assert(c && typeof c === "object", "credentials are required") + + var nerfed = toNerfDart(uri) + + if (c.token) { + this.set(nerfed + ":_authToken", c.token, "user") + this.del(nerfed + ":_password", "user") + this.del(nerfed + ":username", "user") + this.del(nerfed + ":email", "user") + this.del(nerfed + ":always-auth", "user") + } + else if (c.username || c.password || c.email) { + assert(c.username, "must include username") + assert(c.password, "must include password") + assert(c.email, "must include email address") + + this.del(nerfed + ":_authToken", "user") + + var encoded = new Buffer(c.password, "utf8").toString("base64") + this.set(nerfed + ":_password", encoded, "user") + this.set(nerfed + ":username", c.username, "user") + this.set(nerfed + ":email", c.email, "user") + + if (c.alwaysAuth !== undefined) { + this.set(nerfed + ":always-auth", c.alwaysAuth, "user") + } + else { + this.del(nerfed + ":always-auth", "user") + } + } + else { + throw new Error("No credentials to set.") + } +} diff -Nru nodejs-0.11.13/deps/npm/lib/config/set-user.js nodejs-0.11.15/deps/npm/lib/config/set-user.js --- nodejs-0.11.13/deps/npm/lib/config/set-user.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config/set-user.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,29 @@ +module.exports = setUser + +var assert = require("assert") +var path = require("path") +var fs = require("fs") +var mkdirp = require("mkdirp") + +function setUser (cb) { + var defaultConf = this.root + assert(defaultConf !== Object.prototype) + + // If global, leave it as-is. + // If not global, then set the user to the owner of the prefix folder. + // Just set the default, so it can be overridden. + if (this.get("global")) return cb() + if (process.env.SUDO_UID) { + defaultConf.user = +(process.env.SUDO_UID) + return cb() + } + + var prefix = path.resolve(this.get("prefix")) + mkdirp(prefix, function (er) { + if (er) return cb(er) + fs.stat(prefix, function (er, st) { + defaultConf.user = st && st.uid + return cb(er) + }) + }) +} diff -Nru nodejs-0.11.13/deps/npm/lib/config.js nodejs-0.11.15/deps/npm/lib/config.js --- nodejs-0.11.13/deps/npm/lib/config.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/config.js 2015-01-20 21:22:17.000000000 +0000 @@ -11,9 +11,9 @@ var log = require("npmlog") , npm = require("./npm.js") - , spawn = require("child_process").spawn + , npmconf = require("./config/core.js") , fs = require("graceful-fs") - , npmconf = require("npmconf") + , writeFileAtomic = require("write-file-atomic") , types = npmconf.defs.types , ini = require("ini") , editor = require("editor") @@ -89,17 +89,16 @@ if (key === "logstream") return arr return arr.concat( ini.stringify(obj) - .replace(/\n$/m, '') - .replace(/^/g, '; ') - .replace(/\n/g, '\n; ') - .split('\n')) + .replace(/\n$/m, "") + .replace(/^/g, "; ") + .replace(/\n/g, "\n; ") + .split("\n")) }, [])) .concat([""]) .join(os.EOL) - fs.writeFile + writeFileAtomic ( f , data - , "utf8" , function (er) { if (er) return cb(er) editor(f, { editor: e }, cb) @@ -150,10 +149,6 @@ return a > b ? 1 : -1 } -function reverse (a, b) { - return a > b ? -1 : 1 -} - function public (k) { return !(k.charAt(0) === "_" || types[k] !== types[k]) } @@ -236,7 +231,6 @@ , bpath = builtin.path , bconfKeys = getKeys(bconf) if (bconfKeys.length) { - var path = require("path") msg += "; builtin config " + bpath + "\n" bconfKeys.forEach(function (k) { var val = (k.charAt(0) === "_") diff -Nru nodejs-0.11.13/deps/npm/lib/dedupe.js nodejs-0.11.15/deps/npm/lib/dedupe.js --- nodejs-0.11.13/deps/npm/lib/dedupe.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/dedupe.js 2015-01-20 21:22:17.000000000 +0000 @@ -11,14 +11,11 @@ var asyncMap = require("slide").asyncMap var path = require("path") var readJson = require("read-package-json") -var archy = require("archy") -var util = require("util") -var RegClient = require("npm-registry-client") -var npmconf = require("npmconf") var semver = require("semver") var rm = require("./utils/gently-rm.js") var log = require("npmlog") var npm = require("./npm.js") +var mapToRegistry = require("./utils/map-to-registry.js") module.exports = dedupe @@ -64,7 +61,7 @@ Object.keys(obj.children).forEach(function (k) { U(obj.children[k]) }) - }) + })(data) // then collect them up and figure out who needs them ;(function C (obj) { @@ -93,7 +90,6 @@ })] })] }).map(function (item) { - var name = item[0] var set = item[1] var ranges = set.map(function (i) { @@ -137,7 +133,6 @@ b.pop() // find the longest chain that both A and B share. // then push the name back on it, and join by /node_modules/ - var res = [] for (var i = 0, al = a.length, bl = b.length; i < al && i < bl && a[i] === b[i]; i++); return a.slice(0, i).concat(name).join(path.sep + "node_modules" + path.sep) }) : undefined @@ -204,9 +199,9 @@ // hrm? return cb(new Error("danger zone\n" + name + " " + - + regMatch + " " + locMatch)) + regMatch + " " + locMatch)) - }, function (er, installed) { + }, function (er) { if (er) return cb(er) asyncMap(remove, rm, function (er) { if (er) return cb(er) @@ -245,12 +240,19 @@ var versions = data.versions var ranges = data.ranges - npm.registry.get(name, function (er, data) { + mapToRegistry(name, npm.config, function (er, uri) { + if (er) return cb(er) + + npm.registry.get(uri, null, next) + }) + + function next (er, data) { var regVersions = er ? [] : Object.keys(data.versions) var locMatch = bestMatch(versions, ranges) - var regMatch; var tag = npm.config.get("tag") var distTag = data["dist-tags"] && data["dist-tags"][tag] + + var regMatch if (distTag && data.versions[distTag] && matches(distTag, ranges)) { regMatch = distTag } else { @@ -258,7 +260,7 @@ } cb(null, [[name, has, loc, locMatch, regMatch, locs]]) - }) + } }, cb) } diff -Nru nodejs-0.11.13/deps/npm/lib/deprecate.js nodejs-0.11.15/deps/npm/lib/deprecate.js --- nodejs-0.11.13/deps/npm/lib/deprecate.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/deprecate.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,3 +1,6 @@ +var npm = require("./npm.js") + , mapToRegistry = require("./utils/map-to-registry.js") + , npa = require("npm-package-arg") module.exports = deprecate @@ -6,42 +9,36 @@ deprecate.completion = function (opts, cb) { // first, get a list of remote packages this user owns. // once we have a user account, then don't complete anything. - var un = npm.config.get("username") - if (!npm.config.get("username")) return cb() if (opts.conf.argv.remain.length > 2) return cb() // get the list of packages by user - var uri = "/-/by-user/"+encodeURIComponent(un) - registry.get(uri, null, 60000, function (er, list) { - if (er) return cb() - console.error(list) - return cb(null, list[un]) + var path = "/-/by-user/" + mapToRegistry(path, npm.config, function (er, uri) { + if (er) return cb(er) + + var c = npm.config.getCredentialsByURI(uri) + if (!(c && c.username)) return cb() + + npm.registry.get(uri + c.username, { timeout : 60000 }, function (er, list) { + if (er) return cb() + console.error(list) + return cb(null, list[c.username]) + }) }) } -var semver = require("semver") - , npm = require("./npm.js") - , registry = npm.registry - function deprecate (args, cb) { var pkg = args[0] , msg = args[1] if (msg === undefined) return cb("Usage: " + deprecate.usage) + // fetch the data and make sure it exists. - pkg = pkg.split(/@/) - var name = pkg.shift() - , ver = pkg.join("@") - if (semver.validRange(ver) === null) { - return cb(new Error("invalid version range: "+ver)) - } - registry.get(name, function (er, data) { + var p = npa(pkg) + + mapToRegistry(p.name, npm.config, next) + + function next (er, uri) { if (er) return cb(er) - // filter all the versions that match - Object.keys(data.versions).filter(function (v) { - return semver.satisfies(v, ver, true) - }).forEach(function (v) { - data.versions[v].deprecated = msg - }) - // now update the doc on the registry - registry.request('PUT', data._id, data, cb) - }) + + npm.registry.deprecate(uri, p.spec, msg, cb) + } } diff -Nru nodejs-0.11.13/deps/npm/lib/docs.js nodejs-0.11.15/deps/npm/lib/docs.js --- nodejs-0.11.13/deps/npm/lib/docs.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/docs.js 2015-01-20 21:22:17.000000000 +0000 @@ -5,16 +5,21 @@ docs.usage += "npm docs ." docs.completion = function (opts, cb) { - registry.get("/-/short", 60000, function (er, list) { - return cb(null, list || []) + mapToRegistry("/-/short", npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri, { timeout : 60000 }, function (er, list) { + return cb(null, list || []) + }) }) } var npm = require("./npm.js") , registry = npm.registry , opener = require("opener") - , path = require('path') - , log = require('npmlog') + , path = require("path") + , log = require("npmlog") + , mapToRegistry = require("./utils/map-to-registry.js") function url (json) { return json.homepage ? json.homepage : "https://npmjs.org/package/" + json.name @@ -26,7 +31,9 @@ if (!pending) return getDoc('.', cb) args.forEach(function(proj) { getDoc(proj, function(err) { - if (err) return cb(err) + if (err) { + return cb(err) + } --pending || cb() }) }) @@ -34,11 +41,12 @@ function getDoc (project, cb) { project = project || '.' - var package = path.resolve(process.cwd(), "package.json") + var package = path.resolve(npm.localPrefix, "package.json") if (project === '.' || project === './') { + var json try { - var json = require(package) + json = require(package) if (!json.name) throw new Error('package.json does not have a valid "name" property') project = json.name } catch (e) { @@ -49,7 +57,13 @@ return opener(url(json), { command: npm.config.get("browser") }, cb) } - registry.get(project + "/latest", 3600, function (er, json) { + mapToRegistry(project, npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri + "/latest", { timeout : 3600 }, next) + }) + + function next (er, json) { var github = "https://github.com/" + project + "#readme" if (er) { @@ -58,5 +72,5 @@ } return opener(url(json), { command: npm.config.get("browser") }, cb) - }) + } } diff -Nru nodejs-0.11.13/deps/npm/lib/edit.js nodejs-0.11.15/deps/npm/lib/edit.js --- nodejs-0.11.13/deps/npm/lib/edit.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/edit.js 2015-01-20 21:22:17.000000000 +0000 @@ -7,7 +7,6 @@ edit.completion = require("./utils/completion/installed-shallow.js") var npm = require("./npm.js") - , spawn = require("child_process").spawn , path = require("path") , fs = require("graceful-fs") , editor = require("editor") diff -Nru nodejs-0.11.13/deps/npm/lib/explore.js nodejs-0.11.15/deps/npm/lib/explore.js --- nodejs-0.11.13/deps/npm/lib/explore.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/explore.js 2015-01-20 21:22:17.000000000 +0000 @@ -26,7 +26,8 @@ "\nExploring "+cwd+"\n"+ "Type 'exit' or ^D when finished\n") - var shell = spawn(sh, args, { cwd: cwd, customFds: [0, 1, 2] }) + npm.spinner.stop() + var shell = spawn(sh, args, { cwd: cwd, stdio: "inherit" }) shell.on("close", function (er) { // only fail if non-interactive. if (!args.length) return cb() diff -Nru nodejs-0.11.13/deps/npm/lib/help.js nodejs-0.11.15/deps/npm/lib/help.js --- nodejs-0.11.13/deps/npm/lib/help.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/help.js 2015-01-20 21:22:17.000000000 +0000 @@ -6,8 +6,7 @@ getSections(cb) } -var fs = require("graceful-fs") - , path = require("path") +var path = require("path") , spawn = require("child_process").spawn , npm = require("./npm.js") , log = require("npmlog") @@ -15,6 +14,7 @@ , glob = require("glob") function help (args, cb) { + npm.spinner.stop() var argv = npm.config.get("argv").cooked var argnum = 0 @@ -31,7 +31,7 @@ // npm help : show basic usage if (!section) { - var valid = argv[0] === 'help' ? 0 : 1 + var valid = argv[0] === "help" ? 0 : 1 return npmUsage(valid, cb) } @@ -57,7 +57,6 @@ // npm help
: Try to find the path var manroot = path.resolve(__dirname, "..", "man") - var htmlroot = path.resolve(__dirname, "..", "html", "doc") // legacy if (section === "global") @@ -108,10 +107,11 @@ env.MANPATH = manpath var viewer = npm.config.get("viewer") + var conf switch (viewer) { case "woman": var a = ["-e", "(woman-find-file \"" + man + "\")"] - var conf = { env: env, customFds: [ 0, 1, 2] } + conf = { env: env, stdio: "inherit" } var woman = spawn("emacsclient", a, conf) woman.on("close", cb) break @@ -121,9 +121,9 @@ break default: - var conf = { env: env, customFds: [ 0, 1, 2] } - var man = spawn("man", [num, section], conf) - man.on("close", cb) + conf = { env: env, stdio: "inherit" } + var manProcess = spawn("man", [num, section], conf) + manProcess.on("close", cb) break } } @@ -153,8 +153,8 @@ function npmUsage (valid, cb) { npm.config.set("loglevel", "silent") log.level = "silent" - console.log - ( ["\nUsage: npm " + console.log( + [ "\nUsage: npm " , "" , "where is one of:" , npm.config.get("long") ? usages() @@ -192,12 +192,11 @@ + (usage.split("\n") .join("\n" + (new Array(maxLen + 6).join(" ")))) }).join("\n") - return out } function wrap (arr) { - var out = [''] + var out = [""] , l = 0 , line @@ -210,9 +209,9 @@ arr.sort(function (a,b) { return a 3) return "" for (var out = line, a = 0, l = args.length; a < l; a ++) { var finder = out.toLowerCase().split(args[a].toLowerCase()) - , newOut = [] + , newOut = "" , p = 0 + finder.forEach(function (f) { - newOut.push( out.substr(p, f.length) - , "\1" - , out.substr(p + f.length, args[a].length) - , "\2" ) + newOut += out.substr(p, f.length) + + var hilit = out.substr(p + f.length, args[a].length) + if (npm.color) hilit = color.bgBlack(color.red(hilit)) + newOut += hilit + p += f.length + args[a].length }) - out = newOut.join("") } - if (npm.color) { - var color = "\033[31;40m" - , reset = "\033[0m" - } else { - var color = "" - , reset = "" - } - out = out.split("\1").join(color) - .split("\2").join(reset) - return out + + return newOut }).join("\n").trim() return out }).join("\n") diff -Nru nodejs-0.11.13/deps/npm/lib/init.js nodejs-0.11.15/deps/npm/lib/init.js --- nodejs-0.11.13/deps/npm/lib/init.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/init.js 2015-01-20 21:22:17.000000000 +0000 @@ -7,30 +7,35 @@ , npm = require("./npm.js") , initJson = require("init-package-json") -init.usage = "npm init" +init.usage = "npm init [--force/-f]" function init (args, cb) { var dir = process.cwd() log.pause() - var initFile = npm.config.get('init-module') - - console.log( - ["This utility will walk you through creating a package.json file." - ,"It only covers the most common items, and tries to guess sane defaults." - ,"" - ,"See `npm help json` for definitive documentation on these fields" - ,"and exactly what they do." - ,"" - ,"Use `npm install --save` afterwards to install a package and" - ,"save it as a dependency in the package.json file." - ,"" - ,"Press ^C at any time to quit." - ].join("\n")) - + npm.spinner.stop() + var initFile = npm.config.get("init-module") + if (!initJson.yes(npm.config)) { + console.log( + ["This utility will walk you through creating a package.json file." + ,"It only covers the most common items, and tries to guess sane defaults." + ,"" + ,"See `npm help json` for definitive documentation on these fields" + ,"and exactly what they do." + ,"" + ,"Use `npm install --save` afterwards to install a package and" + ,"save it as a dependency in the package.json file." + ,"" + ,"Press ^C at any time to quit." + ].join("\n")) + } initJson(dir, initFile, npm.config, function (er, data) { log.resume() - log.silly('package data', data) - log.info('init', 'written successfully') + log.silly("package data", data) + log.info("init", "written successfully") + if (er && er.message === "canceled") { + log.warn("init", "canceled") + return cb(null, data) + } cb(er, data) }) } diff -Nru nodejs-0.11.13/deps/npm/lib/install.js nodejs-0.11.15/deps/npm/lib/install.js --- nodejs-0.11.13/deps/npm/lib/install.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/install.js 2015-01-20 21:22:17.000000000 +0000 @@ -34,26 +34,34 @@ // if it starts with https?://, then just give up, because it's a url // for now, not yet implemented. var registry = npm.registry - registry.get("/-/short", function (er, pkgs) { - if (er) return cb() - if (!opts.partialWord) return cb(null, pkgs) + mapToRegistry("-/short", npm.config, function (er, uri) { + if (er) return cb(er) - var name = opts.partialWord.split("@").shift() - pkgs = pkgs.filter(function (p) { - return p.indexOf(name) === 0 - }) + registry.get(uri, null, function (er, pkgs) { + if (er) return cb() + if (!opts.partialWord) return cb(null, pkgs) - if (pkgs.length !== 1 && opts.partialWord === name) { - return cb(null, pkgs) - } + var name = npa(opts.partialWord).name + pkgs = pkgs.filter(function (p) { + return p.indexOf(name) === 0 + }) - registry.get(pkgs[0], function (er, d) { - if (er) return cb() - return cb(null, Object.keys(d["dist-tags"] || {}) - .concat(Object.keys(d.versions || {})) - .map(function (t) { - return pkgs[0] + "@" + t - })) + if (pkgs.length !== 1 && opts.partialWord === name) { + return cb(null, pkgs) + } + + mapToRegistry(pkgs[0], npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri, null, function (er, d) { + if (er) return cb() + return cb(null, Object.keys(d["dist-tags"] || {}) + .concat(Object.keys(d.versions || {})) + .map(function (t) { + return pkgs[0] + "@" + t + })) + }) + }) }) }) } @@ -65,6 +73,7 @@ , log = require("npmlog") , path = require("path") , fs = require("graceful-fs") + , writeFileAtomic = require("write-file-atomic") , cache = require("./cache.js") , asyncMap = require("slide").asyncMap , chain = require("slide").chain @@ -72,9 +81,14 @@ , mkdir = require("mkdirp") , lifecycle = require("./utils/lifecycle.js") , archy = require("archy") - , isGitUrl = require("./utils/is-git-url.js") , npmInstallChecks = require("npm-install-checks") , sortedObject = require("sorted-object") + , mapToRegistry = require("./utils/map-to-registry.js") + , npa = require("npm-package-arg") + , inflight = require("inflight") + , locker = require("./utils/locker.js") + , lock = locker.lock + , unlock = locker.unlock function install (args, cb_) { var hasArguments = !!args.length @@ -110,7 +124,7 @@ where = args args = [].concat(cb_) // pass in [] to do default dep-install cb_ = arguments[2] - log.verbose("install", "where,what", [where, args]) + log.verbose("install", "where, what", [where, args]) } if (!npm.config.get("global")) { @@ -119,7 +133,7 @@ }) } - mkdir(where, function (er, made) { + mkdir(where, function (er) { if (er) return cb(er) // install dependencies locally by default, // or install current folder globally @@ -134,6 +148,22 @@ } var deps = Object.keys(data.dependencies || {}) log.verbose("install", "where, deps", [where, deps]) + + // FIXME: Install peerDependencies as direct dependencies, but only at + // the top level. Should only last until peerDependencies are nerfed to + // no longer implicitly install themselves. + var peers = [] + Object.keys(data.peerDependencies || {}).forEach(function (dep) { + if (!data.dependencies[dep]) { + log.verbose( + "install", + "peerDependency", dep, "wasn't going to be installed; adding" + ) + peers.push(dep) + } + }) + log.verbose("install", "where, peers", [where, peers]) + var context = { family: {} , ancestors: {} , explicit: false @@ -151,11 +181,12 @@ installManyTop(deps.map(function (dep) { var target = data.dependencies[dep] - , parsed = url.parse(target.replace(/^git\+/, "git")) - target = dep + "@" + target - return target - }), where, context, function(er, results) { - if (er) return cb(er, results) + return dep + "@" + target + }).concat(peers.map(function (dep) { + var target = data.peerDependencies[dep] + return dep + "@" + target + })), where, context, function(er, results) { + if (er || npm.config.get("production")) return cb(er, results) lifecycle(data, "prepublish", where, function(er) { return cb(er, results) }) @@ -197,7 +228,7 @@ function findPeerInvalid_ (packageMap, fpiList) { if (fpiList.indexOf(packageMap) !== -1) - return + return undefined fpiList.push(packageMap) @@ -205,8 +236,8 @@ var pkg = packageMap[packageName] if (pkg.peerInvalid) { - var peersDepending = {}; - for (peerName in packageMap) { + var peersDepending = {} + for (var peerName in packageMap) { var peer = packageMap[peerName] if (peer.peerDependencies && peer.peerDependencies[packageName]) { peersDepending[peer.name + "@" + peer.version] = @@ -252,7 +283,13 @@ if (opts && opts.dev) { if (!data.dependencies) data.dependencies = {} Object.keys(data.devDependencies || {}).forEach(function (k) { - data.dependencies[k] = data.devDependencies[k] + if (data.dependencies[k]) { + log.warn("package.json", "Dependency '%s' exists in both dependencies " + + "and devDependencies, using '%s@%s' from dependencies", + k, k, data.dependencies[k]) + } else { + data.dependencies[k] = data.devDependencies[k] + } }) } @@ -284,13 +321,12 @@ var wrapfile = path.resolve(where, "npm-shrinkwrap.json") fs.readFile(wrapfile, "utf8", function (er, wrapjson) { - if (er) { - log.verbose("readDependencies", "using package.json deps") - return cb(null, data, null) - } + if (er) return cb(null, data, null) + log.verbose("readDependencies", "npm-shrinkwrap.json is overriding dependencies") + var newwrap try { - var newwrap = JSON.parse(wrapjson) + newwrap = JSON.parse(wrapjson) } catch (ex) { return cb(ex) } @@ -336,21 +372,33 @@ return cb(null, installed, tree, pretty) } - var saveBundle = npm.config.get('save-bundle') - var savePrefix = npm.config.get('save-prefix') || "^"; + var saveBundle = npm.config.get("save-bundle") + var savePrefix = npm.config.get("save-prefix") // each item in the tree is a top-level thing that should be saved // to the package.json file. // The relevant tree shape is { : {what:} } var saveTarget = path.resolve(where, "package.json") - , things = Object.keys(tree).map(function (k) { - // if "what" was a url, then save that instead. - var t = tree[k] - , u = url.parse(t.from) - , w = t.what.split("@") - if (u && u.protocol) w[1] = t.from - return w - }).reduce(function (set, k) { + + asyncMap(Object.keys(tree), function (k, cb) { + // if "what" was a url, then save that instead. + var t = tree[k] + , u = url.parse(t.from) + , a = npa(t.what) + , w = [a.name, a.spec] + + + fs.stat(t.from, function (er){ + if (!er) { + w[1] = "file:" + t.from + } else if (u && u.protocol) { + w[1] = t.from + } + cb(null, [w]) + }) + } + , function (er, arr) { + var things = arr.reduce(function (set, k) { var rangeDescriptor = semver.valid(k[1], true) && semver.gte(k[1], "0.1.0", true) && !npm.config.get("save-exact") @@ -359,46 +407,49 @@ return set }, {}) - // don't use readJson, because we don't want to do all the other - // tricky npm-specific stuff that's in there. - fs.readFile(saveTarget, function (er, data) { - // ignore errors here, just don't save it. - try { - data = JSON.parse(data.toString("utf8")) - } catch (ex) { - er = ex - } - - if (er) { - return cb(null, installed, tree, pretty) - } - - var deps = npm.config.get("save-optional") ? "optionalDependencies" - : npm.config.get("save-dev") ? "devDependencies" - : "dependencies" - - if (saveBundle) { - var bundle = data.bundleDependencies || data.bundledDependencies - delete data.bundledDependencies - if (!Array.isArray(bundle)) bundle = [] - data.bundleDependencies = bundle.sort() - } - - log.verbose('saving', things) - data[deps] = data[deps] || {} - Object.keys(things).forEach(function (t) { - data[deps][t] = things[t] + + // don't use readJson, because we don't want to do all the other + // tricky npm-specific stuff that's in there. + fs.readFile(saveTarget, function (er, data) { + // ignore errors here, just don't save it. + try { + data = JSON.parse(data.toString("utf8")) + } catch (ex) { + er = ex + } + + if (er) { + return cb(null, installed, tree, pretty) + } + + var deps = npm.config.get("save-optional") ? "optionalDependencies" + : npm.config.get("save-dev") ? "devDependencies" + : "dependencies" + if (saveBundle) { - var i = bundle.indexOf(t) - if (i === -1) bundle.push(t) + var bundle = data.bundleDependencies || data.bundledDependencies + delete data.bundledDependencies + if (!Array.isArray(bundle)) bundle = [] + data.bundleDependencies = bundle.sort() } - }) - data[deps] = sortedObject(data[deps]) + log.verbose("saving", things) + data[deps] = data[deps] || {} + Object.keys(things).forEach(function (t) { + data[deps][t] = things[t] + if (saveBundle) { + var i = bundle.indexOf(t) + if (i === -1) bundle.push(t) + data.bundleDependencies = bundle.sort() + } + }) + + data[deps] = sortedObject(data[deps]) - data = JSON.stringify(data, null, 2) + "\n" - fs.writeFile(saveTarget, data, function (er) { - cb(er, installed, tree, pretty) + data = JSON.stringify(data, null, 2) + "\n" + writeFileAtomic(saveTarget, data, function (er) { + cb(er, installed, tree, pretty) + }) }) }) } @@ -409,22 +460,22 @@ // that the submodules are not immediately require()able. // TODO: Show the complete tree, ls-style, but only if --long is provided function prettify (tree, installed) { - if (npm.config.get("json")) { - function red (set, kv) { - set[kv[0]] = kv[1] - return set - } + function red (set, kv) { + set[kv[0]] = kv[1] + return set + } + if (npm.config.get("json")) { tree = Object.keys(tree).map(function (p) { if (!tree[p]) return null - var what = tree[p].what.split("@") - , name = what.shift() - , version = what.join("@") + var what = npa(tree[p].what) + , name = what.name + , version = what.spec , o = { name: name, version: version, from: tree[p].from } o.dependencies = tree[p].children.map(function P (dep) { - var what = dep.what.split("@") - , name = what.shift() - , version = what.join("@") + var what = npa(dep.what) + , name = what.name + , version = what.spec , o = { version: version, from: dep.from } o.dependencies = dep.children.map(P).reduce(red, {}) return [name, o] @@ -524,36 +575,61 @@ function installManyTop_ (what, where, context, cb) { var nm = path.resolve(where, "node_modules") - , names = context.explicit - ? what.map(function (w) { return w.split(/@/).shift() }) - : [] fs.readdir(nm, function (er, pkgs) { if (er) return installMany(what, where, context, cb) - pkgs = pkgs.filter(function (p) { + + var scopes = [], unscoped = [] + pkgs.filter(function (p) { return !p.match(/^[\._-]/) + }).forEach(function (p) { + // @names deserve deeper investigation + if (p[0] === "@") { + scopes.push(p) + } + else { + unscoped.push(p) + } }) - asyncMap(pkgs.map(function (p) { - return path.resolve(nm, p, "package.json") - }), function (jsonfile, cb) { - readJson(jsonfile, log.warn, function (er, data) { - if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) - if (er) return cb(null, []) - return cb(null, [[data.name, data.version]]) - }) - }, function (er, packages) { - // if there's nothing in node_modules, then don't freak out. - if (er) packages = [] - // add all the existing packages to the family list. - // however, do not add to the ancestors list. - packages.forEach(function (p) { - context.family[p[0]] = p[1] + + maybeScoped(scopes, nm, function (er, scoped) { + if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) + // recombine unscoped with @scope/package packages + asyncMap(unscoped.concat(scoped).map(function (p) { + return path.resolve(nm, p, "package.json") + }), function (jsonfile, cb) { + readJson(jsonfile, log.warn, function (er, data) { + if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) + if (er) return cb(null, []) + cb(null, [[data.name, data.version]]) + }) + }, function (er, packages) { + // if there's nothing in node_modules, then don't freak out. + if (er) packages = [] + // add all the existing packages to the family list. + // however, do not add to the ancestors list. + packages.forEach(function (p) { + context.family[p[0]] = p[1] + }) + installMany(what, where, context, cb) }) - return installMany(what, where, context, cb) }) }) } +function maybeScoped (scopes, where, cb) { + // find packages in scopes + asyncMap(scopes, function (scope, cb) { + fs.readdir(path.resolve(where, scope), function (er, scoped) { + if (er) return cb(er) + var paths = scoped.map(function (p) { + return path.join(scope, p) + }) + cb(null, paths) + }) + }, cb) +} + function installMany (what, where, context, cb) { // readDependencies takes care of figuring out whether the list of // dependencies we'll iterate below comes from an existing shrinkwrap from a @@ -593,7 +669,7 @@ targets.forEach(function (t) { newPrev[t.name] = t.version }) - log.silly("resolved", targets) + log.silly("install resolved", targets) targets.filter(function (t) { return t }).forEach(function (t) { log.info("install", "%s into %s", t._id, where) }) @@ -615,60 +691,69 @@ } function targetResolver (where, context, deps) { - var alreadyInstalledManually = context.explicit ? [] : null + var alreadyInstalledManually = [] + , resolveLeft = 0 , nm = path.resolve(where, "node_modules") , parent = context.parent , wrap = context.wrap - if (!context.explicit) fs.readdir(nm, function (er, inst) { - if (er) return alreadyInstalledManually = [] - - // don't even mess with non-package looking things - inst = inst.filter(function (p) { - return !p.match(/^[\._-]/) - }) + if (!context.explicit) readdir(nm) - asyncMap(inst, function (pkg, cb) { - readJson(path.resolve(nm, pkg, "package.json"), log.warn, function (er, d) { - if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) - // error means it's not a package, most likely. - if (er) return cb(null, []) - - // if it's a bundled dep, then assume that anything there is valid. - // otherwise, make sure that it's a semver match with what we want. - var bd = parent.bundleDependencies - if (bd && bd.indexOf(d.name) !== -1 || - semver.satisfies(d.version, deps[d.name] || "*", true) || - deps[d.name] === d._resolved) { - return cb(null, d.name) - } + function readdir(name) { + resolveLeft++ + fs.readdir(name, function (er, inst) { + if (er) return resolveLeft-- + + // don't even mess with non-package looking things + inst = inst.filter(function (p) { + if (!p.match(/^[@\._-]/)) return true + // scoped packages + readdir(path.join(name, p)) + }) - // see if the package had been previously linked - fs.lstat(path.resolve(nm, pkg), function(err, s) { - if (err) return cb(null, []) - if (s.isSymbolicLink()) { + asyncMap(inst, function (pkg, cb) { + readJson(path.resolve(name, pkg, "package.json"), log.warn, function (er, d) { + if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) + // error means it's not a package, most likely. + if (er) return cb(null, []) + + // if it's a bundled dep, then assume that anything there is valid. + // otherwise, make sure that it's a semver match with what we want. + var bd = parent.bundleDependencies + if (bd && bd.indexOf(d.name) !== -1 || + semver.satisfies(d.version, deps[d.name] || "*", true) || + deps[d.name] === d._resolved) { return cb(null, d.name) } - // something is there, but it's not satisfactory. Clobber it. - return cb(null, []) + // see if the package had been previously linked + fs.lstat(path.resolve(nm, pkg), function(err, s) { + if (err) return cb(null, []) + if (s.isSymbolicLink()) { + return cb(null, d.name) + } + + // something is there, but it's not satisfactory. Clobber it. + return cb(null, []) + }) }) + }, function (er, inst) { + // this is the list of things that are valid and should be ignored. + alreadyInstalledManually = alreadyInstalledManually.concat(inst) + resolveLeft-- }) - }, function (er, inst) { - // this is the list of things that are valid and should be ignored. - alreadyInstalledManually = inst }) - }) + } var to = 0 return function resolver (what, cb) { - if (!alreadyInstalledManually) return setTimeout(function () { + if (resolveLeft) return setTimeout(function () { resolver(what, cb) }, to++) // now we know what's been installed here manually, // or tampered with in some way that npm doesn't want to overwrite. - if (alreadyInstalledManually.indexOf(what.split("@").shift()) !== -1) { + if (alreadyInstalledManually.indexOf(npa(what).name) !== -1) { log.verbose("already installed", "skipping %s %s", what, where) return cb(null, []) } @@ -692,7 +777,7 @@ } if (wrap) { - var name = what.split(/@/).shift() + var name = npa(what).name if (wrap[name]) { var wrapTarget = readWrap(wrap[name]) what = name + "@" + wrapTarget @@ -709,19 +794,16 @@ // already has a matching copy. // If it's not a git repo, and the parent already has that pkg, then // we can skip installing it again. - cache.add(what, function (er, data) { + var pkgroot = path.resolve(npm.prefix, (parent && parent._from) || "") + cache.add(what, null, pkgroot, false, function (er, data) { if (er && parent && parent.optionalDependencies && - parent.optionalDependencies.hasOwnProperty(what.split("@")[0])) { + parent.optionalDependencies.hasOwnProperty(npa(what).name)) { log.warn("optional dep failed, continuing", what) log.verbose("optional dep failed, continuing", [what, er]) return cb(null, []) } - var isGit = false - , maybeGit = what.split("@").slice(1).join() - - if (maybeGit) - isGit = isGitUrl(url.parse(maybeGit)) + var isGit = npa(what).type === "git" if (!er && data && @@ -733,6 +815,7 @@ return cb(null, []) } + if (data && !data._from) data._from = what if (er && parent && parent.name) er.parent = parent.name return cb(er, data || []) @@ -771,6 +854,13 @@ , parent = context.parent readJson(jsonFile, log.warn, function (er, data) { + function thenLink () { + npm.commands.link([target.name], function (er, d) { + log.silly("localLink", "back from link", [er, d]) + cb(er, [resultList(target, where, parent && parent._id)]) + }) + } + if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) if (er || data._id === target._id) { if (er) { @@ -781,14 +871,6 @@ thenLink() }) } else thenLink() - - function thenLink () { - npm.commands.link([target.name], function (er, d) { - log.silly("localLink", "back from link", [er, d]) - cb(er, [resultList(target, where, parent && parent._id)]) - }) - } - } else { log.verbose("localLink", "install locally (no link)", target._id) installOne_(target, where, context, cb) @@ -819,15 +901,9 @@ , target._from ] } -// name => install locations -var installOnesInProgress = Object.create(null) +var installed = Object.create(null) -function isIncompatibleInstallOneInProgress(target, where) { - return target.name in installOnesInProgress && - installOnesInProgress[target.name].indexOf(where) !== -1 -} - -function installOne_ (target, where, context, cb) { +function installOne_ (target, where, context, cb_) { var nm = path.resolve(where, "node_modules") , targetFolder = path.resolve(nm, target.name) , prettyWhere = path.relative(process.cwd(), where) @@ -835,39 +911,55 @@ if (prettyWhere === ".") prettyWhere = null - if (isIncompatibleInstallOneInProgress(target, where)) { - var prettyTarget = path.relative(process.cwd(), targetFolder) + cb_ = inflight(target.name + ":" + where, cb_) + if (!cb_) return log.verbose( + "installOne", + "of", target.name, + "to", where, + "already in flight; waiting" + ) + else log.verbose( + "installOne", + "of", target.name, + "to", where, + "not in flight; installing" + ) + + function cb(er, data) { + unlock(nm, target.name, function () { cb_(er, data) }) + } - // just call back, with no error. the error will be detected in the - // final check for peer-invalid dependencies - return cb() - } - - if (!(target.name in installOnesInProgress)) { - installOnesInProgress[target.name] = [] - } - installOnesInProgress[target.name].push(where) - var indexOfIOIP = installOnesInProgress[target.name].length - 1 - , force = npm.config.get("force") - , nodeVersion = npm.config.get("node-version") - , strict = npm.config.get("engine-strict") - , c = npmInstallChecks - - chain - ( [ [c.checkEngine, target, npm.version, nodeVersion, force, strict] - , [c.checkPlatform, target, force] - , [c.checkCycle, target, context.ancestors] - , [c.checkGit, targetFolder] - , [write, target, targetFolder, context] ] - , function (er, d) { - installOnesInProgress[target.name].splice(indexOfIOIP, 1) + lock(nm, target.name, function (er) { + if (er) return cb(er) - if (er) return cb(er) + if (targetFolder in installed) { + log.error("install", "trying to install", target.version, "to", targetFolder) + log.error("install", "but already installed versions", installed[targetFolder]) + installed[targetFolder].push(target.version) + } + else { + installed[targetFolder] = [target.version] + } - d.push(resultList(target, where, parent && parent._id)) - cb(er, d) - } - ) + var force = npm.config.get("force") + , nodeVersion = npm.config.get("node-version") + , strict = npm.config.get("engine-strict") + , c = npmInstallChecks + + chain( + [ [c.checkEngine, target, npm.version, nodeVersion, force, strict] + , [c.checkPlatform, target, force] + , [c.checkCycle, target, context.ancestors] + , [c.checkGit, targetFolder] + , [write, target, targetFolder, context] ] + , function (er, d) { + if (er) return cb(er) + + d.push(resultList(target, where, parent && parent._id)) + cb(er, d) + } + ) + }) } function write (target, targetFolder, context, cb_) { @@ -881,17 +973,16 @@ // is the list of installed packages from that last thing. if (!er) return cb_(er, data) - if (false === npm.config.get("rollback")) return cb_(er) - npm.commands.unbuild([targetFolder], true, function (er2) { - if (er2) log.error("error rolling back", target._id, er2) - return cb_(er, data) - }) + if (npm.config.get("rollback") === false) return cb_(er) + npm.rollbacks.push(targetFolder) + cb_(er, data) } var bundled = [] - chain - ( [ [ cache.unpack, target.name, target.version, targetFolder + log.silly("install write", "writing", target.name, target.version, "to", targetFolder) + chain( + [ [ cache.unpack, target.name, target.version, targetFolder , null, null, user, group ] , [ fs, "writeFile" , path.resolve(targetFolder, "package.json") @@ -926,14 +1017,27 @@ , explicit: false , wrap: wrap } + var actions = + [ [ installManyAndBuild, deps, depsTargetFolder, depsContext ] ] + + // FIXME: This is an accident waiting to happen! + // + // 1. If multiple children at the same level of the tree share a + // peerDependency that's not in the parent's dependencies, because + // the peerDeps don't get added to the family, they will keep + // getting reinstalled (worked around by inflighting installOne). + // 2. The installer can't safely build at the parent level because + // that's already being done by the parent's installAndBuild. This + // runs the risk of the peerDependency never getting built. + // + // The fix: Don't install peerDependencies; require them to be + // included as explicit dependencies / devDependencies, and warn + // or error when they're missing. See #5080 for more arguments in + // favor of killing implicit peerDependency installs with fire. var peerDeps = prepareForInstallMany(data, "peerDependencies", bundled, wrap, family) var pdTargetFolder = path.resolve(targetFolder, "..", "..") var pdContext = context - - var actions = - [ [ installManyAndBuild, deps, depsTargetFolder, depsContext ] ] - if (peerDeps.length > 0) { actions.push( [ installMany, peerDeps, pdTargetFolder, pdContext ] @@ -976,9 +1080,9 @@ return !semver.satisfies(family[d], packageData[depsKey][d], true) return true }).map(function (d) { - var t = packageData[depsKey][d] - , parsed = url.parse(t.replace(/^git\+/, "git")) - t = d + "@" + t + var v = packageData[depsKey][d] + var t = d + "@" + v + log.silly("prepareForInstallMany", "adding", t, "from", packageData.name, depsKey) return t }) } diff -Nru nodejs-0.11.13/deps/npm/lib/link.js nodejs-0.11.15/deps/npm/lib/link.js --- nodejs-0.11.13/deps/npm/lib/link.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/link.js 2015-01-20 21:22:17.000000000 +0000 @@ -10,6 +10,7 @@ , path = require("path") , rm = require("./utils/gently-rm.js") , build = require("./build.js") + , npa = require("npm-package-arg") module.exports = link @@ -49,25 +50,26 @@ function linkInstall (pkgs, cb) { asyncMap(pkgs, function (pkg, cb) { + var t = path.resolve(npm.globalDir, "..") + , pp = path.resolve(npm.globalDir, pkg) + , rp = null + , target = path.resolve(npm.dir, pkg) + function n (er, data) { if (er) return cb(er, data) // install returns [ [folder, pkgId], ... ] // but we definitely installed just one thing. var d = data.filter(function (d) { return !d[3] }) + var what = npa(d[0][0]) pp = d[0][1] - pkg = path.basename(pp) + pkg = what.name target = path.resolve(npm.dir, pkg) next() } - var t = path.resolve(npm.globalDir, "..") - , pp = path.resolve(npm.globalDir, pkg) - , rp = null - , target = path.resolve(npm.dir, pkg) - - // if it's a folder or a random not-installed thing, then - // link or install it first - if (pkg.indexOf("/") !== -1 || pkg.indexOf("\\") !== -1) { + // if it's a folder, a random not-installed thing, or not a scoped package, + // then link or install it first + if (pkg[0] !== "@" && (pkg.indexOf("/") !== -1 || pkg.indexOf("\\") !== -1)) { return fs.lstat(path.resolve(pkg), function (er, st) { if (er || !st.isDirectory()) { npm.commands.install(t, pkg, n) @@ -132,7 +134,7 @@ if (er) return cb(er) log.verbose("link", "build target", target) // also install missing dependencies. - npm.commands.install(me, [], function (er, installed) { + npm.commands.install(me, [], function (er) { if (er) return cb(er) // build the global stuff. Don't run *any* scripts, because // install command already will have done that. diff -Nru nodejs-0.11.13/deps/npm/lib/ls.js nodejs-0.11.15/deps/npm/lib/ls.js --- nodejs-0.11.13/deps/npm/lib/ls.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/ls.js 2015-01-20 21:22:17.000000000 +0000 @@ -14,7 +14,8 @@ , archy = require("archy") , semver = require("semver") , url = require("url") - , isGitUrl = require("./utils/is-git-url.js") + , color = require("ansicolors") + , npa = require("npm-package-arg") ls.usage = "npm ls" @@ -28,9 +29,9 @@ // npm ls 'foo@~1.3' bar 'baz@<2' if (!args) args = [] else args = args.map(function (a) { - var nv = a.split("@") - , name = nv.shift() - , ver = semver.validRange(nv.join("@")) || "" + var p = npa(a) + , name = p.name + , ver = semver.validRange(p.rawSpec) || "" return [ name, ver ] }) @@ -38,6 +39,7 @@ var depth = npm.config.get("depth") var opt = { depth: depth, log: log.warn, dev: true } readInstalled(dir, opt, function (er, data) { + pruneNestedExtraneous(data) var bfs = bfsify(data, args) , lite = getLite(bfs) @@ -68,15 +70,22 @@ // if any errors were found, then complain and exit status 1 if (lite.problems && lite.problems.length) { - er = lite.problems.join('\n') + er = lite.problems.join("\n") } cb(er, data, lite) }) } -// only include -function filter (data, args) { - +function pruneNestedExtraneous (data, visited) { + visited = visited || [] + visited.push(data) + for (var i in data.dependencies) { + if (data.dependencies[i].extraneous) { + data.dependencies[i].dependencies = {} + } else if (visited.indexOf(data.dependencies[i]) === -1) { + pruneNestedExtraneous(data.dependencies[i], visited) + } + } } function alphasort (a, b) { @@ -129,7 +138,7 @@ if (typeof dep === "string") { lite.problems = lite.problems || [] var p - if (data.depth >= maxDepth) { + if (data.depth > maxDepth) { p = "max depth reached: " } else { p = "missing: " @@ -221,16 +230,14 @@ } function makeArchy_ (data, long, dir, depth, parent, d) { - var color = npm.color if (typeof data === "string") { - if (depth < npm.config.get("depth")) { + if (depth -1 <= npm.config.get("depth")) { // just missing - var p = parent.link || parent.path var unmet = "UNMET DEPENDENCY" - if (color) { - unmet = "\033[31;40m" + unmet + "\033[0m" + if (npm.color) { + unmet = color.bgBlack(color.red(unmet)) } - data = unmet + " " + d + " " + data + data = unmet + " " + d + "@" + data } else { data = d+"@"+ data } @@ -241,35 +248,37 @@ // the top level is a bit special. out.label = data._id || "" if (data._found === true && data._id) { - var pre = color ? "\033[33;40m" : "" - , post = color ? "\033[m" : "" - out.label = pre + out.label.trim() + post + " " + if (npm.color) { + out.label = color.bgBlack(color.yellow(out.label.trim())) + " " + } + else { + out.label = out.label.trim() + " " + } } if (data.link) out.label += " -> " + data.link if (data.invalid) { if (data.realName !== data.name) out.label += " ("+data.realName+")" - out.label += " " + (color ? "\033[31;40m" : "") - + "invalid" - + (color ? "\033[0m" : "") + var invalid = "invalid" + if (npm.color) invalid = color.bgBlack(color.red(invalid)) + out.label += " " + invalid } if (data.peerInvalid) { - out.label += " " + (color ? "\033[31;40m" : "") - + "peer invalid" - + (color ? "\033[0m" : "") + var peerInvalid = "peer invalid" + if (npm.color) peerInvalid = color.bgBlack(color.red(peerInvalid)) + out.label += " " + peerInvalid } if (data.extraneous && data.path !== dir) { - out.label += " " + (color ? "\033[32;40m" : "") - + "extraneous" - + (color ? "\033[0m" : "") + var extraneous = "extraneous" + if (npm.color) extraneous = color.bgBlack(color.green(extraneous)) + out.label += " " + extraneous } // add giturl to name@version if (data._resolved) { - var p = url.parse(data._resolved) - if (isGitUrl(p)) + if (npa(data._resolved).type === "git") out.label += " (" + data._resolved + ")" } @@ -294,7 +303,7 @@ return out } -function getExtras (data, dir) { +function getExtras (data) { var extras = [] if (data.description) extras.push(data.description) @@ -329,7 +338,6 @@ if (typeof data === "string") { if (data.depth < npm.config.get("depth")) { - var p = parent.link || parent.path data = npm.config.get("long") ? path.resolve(parent.path, "node_modules", d) + ":"+d+"@"+JSON.stringify(data)+":INVALID:MISSING" diff -Nru nodejs-0.11.13/deps/npm/lib/npm.js nodejs-0.11.15/deps/npm/lib/npm.js --- nodejs-0.11.13/deps/npm/lib/npm.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/npm.js 2015-01-20 21:22:17.000000000 +0000 @@ -15,21 +15,16 @@ require('child-process-close') var EventEmitter = require("events").EventEmitter - , npm = module.exports = new EventEmitter - , config = require("./config.js") - , npmconf = require("npmconf") + , npm = module.exports = new EventEmitter() + , npmconf = require("./config/core.js") , log = require("npmlog") , fs = require("graceful-fs") , path = require("path") , abbrev = require("abbrev") , which = require("which") , semver = require("semver") - , findPrefix = require("./utils/find-prefix.js") - , getUid = require("uid-number") - , mkdirp = require("mkdirp") - , slide = require("slide") - , chain = slide.chain , RegClient = require("npm-registry-client") + , charSpin = require("char-spinner") npm.config = { loaded: false, @@ -41,41 +36,16 @@ } } -// /usr/local is often a read-only fs, which is not -// well handled by node or mkdirp. Just double-check -// in the case of errors when making the prefix dirs. -function mkdir (p, cb) { - mkdirp(p, function (er, made) { - // it could be that we couldn't create it, because it - // already exists, and is on a read-only fs. - if (er) { - return fs.stat(p, function (er2, st) { - if (er2 || !st.isDirectory()) return cb(er) - return cb(null, made) - }) - } - return cb(er, made) - }) -} - npm.commands = {} +npm.rollbacks = [] + try { var pv = process.version.replace(/^v/, '') // startup, ok to do this synchronously var j = JSON.parse(fs.readFileSync( path.join(__dirname, "../package.json"))+"") npm.version = j.version - npm.nodeVersionRequired = j.engines.node - if (!semver.satisfies(pv, j.engines.node)) { - log.warn("unsupported version", ["" - ,"npm requires node version: "+j.engines.node - ,"And you have: "+pv - ,"which is not satisfactory." - ,"" - ,"Bad things will likely happen. You have been warned." - ,""].join("\n")) - } } catch (ex) { try { log.info("error reading version", ex) @@ -129,7 +99,6 @@ , "update" , "outdated" , "prune" - , "submodule" , "pack" , "dedupe" @@ -173,6 +142,7 @@ ] , plumbing = [ "build" , "unbuild" + , "isntall" , "xmas" , "substack" , "visnup" @@ -182,6 +152,25 @@ }) , abbrevs = abbrev(fullList) +npm.spinner = + { int: null + , started: false + , start: function () { + if (npm.spinner.int) return + var c = npm.config.get("spin") + if (!c) return + var stream = npm.config.get("logstream") + var opt = { tty: c !== "always", stream: stream } + opt.cleanup = !npm.spinner.started + npm.spinner.int = charSpin(opt) + npm.spinner.started = true + } + , stop: function () { + clearInterval(npm.spinner.int) + npm.spinner.int = null + } + } + Object.keys(abbrevs).concat(plumbing).forEach(function addCommand (c) { Object.defineProperty(npm.commands, c, { get : function () { if (!loaded) throw new Error( @@ -204,17 +193,20 @@ } if (args.length === 1) args.unshift([]) - npm.registry.refer = [a].concat(args[0]).map(function (arg) { - // exclude anything that might be a URL, path, or private module - // Those things will always have a slash in them somewhere - if (arg && arg.match && arg.match(/\/|\\/)) { - return "[REDACTED]" - } else { - return arg - } - }).filter(function (arg) { - return arg && arg.match - }).join(" ") + npm.registry.version = npm.version + if (!npm.registry.refer) { + npm.registry.refer = [a].concat(args[0]).map(function (arg) { + // exclude anything that might be a URL, path, or private module + // Those things will always have a slash in them somewhere + if (arg && arg.match && arg.match(/\/|\\/)) { + return "[REDACTED]" + } else { + return arg + } + }).filter(function (arg) { + return arg && arg.match + }).join(" ") + } cmd.apply(npm, args) } @@ -274,6 +266,8 @@ function cb (er) { if (loadErr) return + loadErr = er + if (er) return cb_(er) if (npm.config.get("force")) { log.warn("using --force", "I sure hope you know what you are doing.") } @@ -306,6 +300,21 @@ npmconf.load(cli, builtin, function (er, config) { if (er === config) er = null + npm.config = config + if (er) return cb(er) + + // if the "project" config is not a filename, and we're + // not in global mode, then that means that it collided + // with either the default or effective userland config + if (!config.get("global") + && config.sources.project + && config.sources.project.type !== "ini") { + log.verbose("config" + , "Skipping project config: %s. " + + "(matches userconfig)" + , config.localPrefix + "/.npmrc") + } + // Include npm-version and node-version in user-agent var ua = config.get("user-agent") || "" ua = ua.replace(/\{node-version\}/gi, process.version) @@ -314,27 +323,19 @@ ua = ua.replace(/\{arch\}/gi, process.arch) config.set("user-agent", ua) - npm.config = config - var color = config.get("color") log.level = config.get("loglevel") log.heading = config.get("heading") || "npm" log.stream = config.get("logstream") - switch (color) { - case "always": log.enableColor(); break - case false: log.disableColor(); break - } - log.resume() - - if (er) return cb(er) - // see if we need to color normal output switch (color) { case "always": + log.enableColor() npm.color = true break case false: + log.disableColor() npm.color = false break default: @@ -346,120 +347,28 @@ break } + log.resume() + // at this point the configs are all set. // go ahead and spin up the registry client. - var token = config.get("_token") - if (typeof token === "string") { - try { - token = JSON.parse(token) - config.set("_token", token, "user") - config.save("user") - } catch (e) { token = null } - } - npm.registry = new RegClient(npm.config) - // save the token cookie in the config file - if (npm.registry.couchLogin) { - npm.registry.couchLogin.tokenSet = function (tok) { - npm.config.set("_token", tok, "user") - // ignore save error. best effort. - npm.config.save("user") - } - } - var umask = npm.config.get("umask") npm.modes = { exec: 0777 & (~umask) , file: 0666 & (~umask) , umask: umask } - chain([ [ loadPrefix, npm, cli ] - , [ setUser, config, config.root ] - , [ loadUid, npm ] - ], cb) - }) - }) -} - -function loadPrefix (npm, config, cb) { - // try to guess at a good node_modules location. - var p - , gp - if (!Object.prototype.hasOwnProperty.call(config, "prefix")) { - p = process.cwd() - } else { - p = npm.config.get("prefix") - } - gp = npm.config.get("prefix") - - findPrefix(p, function (er, p) { - Object.defineProperty(npm, "localPrefix", - { get : function () { return p } - , set : function (r) { return p = r } - , enumerable : true - }) - // the prefix MUST exist, or else nothing works. - if (!npm.config.get("global")) { - mkdir(p, next) - } else { - next(er) - } - }) - - gp = path.resolve(gp) - Object.defineProperty(npm, "globalPrefix", - { get : function () { return gp } - , set : function (r) { return gp = r } - , enumerable : true - }) - // the prefix MUST exist, or else nothing works. - mkdir(gp, next) - - - var i = 2 - , errState = null - function next (er) { - if (errState) return - if (er) return cb(errState = er) - if (--i === 0) return cb() - } -} - - -function loadUid (npm, cb) { - // if we're not in unsafe-perm mode, then figure out who - // to run stuff as. Do this first, to support `npm update npm -g` - if (!npm.config.get("unsafe-perm")) { - getUid(npm.config.get("user"), npm.config.get("group"), cb) - } else { - process.nextTick(cb) - } -} + var gp = Object.getOwnPropertyDescriptor(config, "globalPrefix") + Object.defineProperty(npm, "globalPrefix", gp) -function setUser (cl, dc, cb) { - // If global, leave it as-is. - // If not global, then set the user to the owner of the prefix folder. - // Just set the default, so it can be overridden. - if (cl.get("global")) return cb() - if (process.env.SUDO_UID) { - dc.user = +(process.env.SUDO_UID) - return cb() - } + var lp = Object.getOwnPropertyDescriptor(config, "localPrefix") + Object.defineProperty(npm, "localPrefix", lp) - var prefix = path.resolve(cl.get("prefix")) - mkdir(prefix, function (er) { - if (er) { - log.error("could not create prefix dir", prefix) - return cb(er) - } - fs.stat(prefix, function (er, st) { - dc.user = st && st.uid - return cb(er) + return cb(null, npm) }) }) } - Object.defineProperty(npm, "prefix", { get : function () { return npm.config.get("global") ? npm.globalPrefix : npm.localPrefix @@ -514,11 +423,7 @@ }) var tmpFolder -var crypto = require("crypto") -var rand = crypto.randomBytes(6) - .toString("base64") - .replace(/\//g, '_') - .replace(/\+/, '-') +var rand = require("crypto").randomBytes(4).toString("hex") Object.defineProperty(npm, "tmp", { get : function () { if (!tmpFolder) tmpFolder = "npm-" + process.pid + "-" + rand diff -Nru nodejs-0.11.13/deps/npm/lib/outdated.js nodejs-0.11.15/deps/npm/lib/outdated.js --- nodejs-0.11.13/deps/npm/lib/outdated.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/outdated.js 2015-01-20 21:22:17.000000000 +0000 @@ -28,22 +28,23 @@ , asyncMap = require("slide").asyncMap , npm = require("./npm.js") , url = require("url") - , isGitUrl = require("./utils/is-git-url.js") , color = require("ansicolors") , styles = require("ansistyles") , table = require("text-table") , semver = require("semver") , os = require("os") + , mapToRegistry = require("./utils/map-to-registry.js") + , npa = require("npm-package-arg") function outdated (args, silent, cb) { if (typeof cb !== "function") cb = silent, silent = false var dir = path.resolve(npm.dir, "..") outdated_(args, dir, {}, 0, function (er, list) { - if (er || silent) return cb(er, list) + if (er || silent || list.length === 0) return cb(er, list) if (npm.config.get("json")) { console.log(makeJSON(list)) } else if (npm.config.get("parseable")) { - console.log(makeParseable(list)); + console.log(makeParseable(list)) } else { var outList = list.map(makePretty) var outTable = [[ "Package" @@ -70,8 +71,7 @@ // [[ dir, dep, has, want, latest ]] function makePretty (p) { - var parseable = npm.config.get("parseable") - , dep = p[1] + var dep = p[1] , dir = path.resolve(p[0], "node_modules", dep) , has = p[2] , want = p[3] @@ -100,7 +100,7 @@ function ansiTrim (str) { var r = new RegExp("\x1b(?:\\[(?:\\d+[ABCDEFGJKSTm]|\\d+;\\d+[Hfm]|" + - "\\d+;\\d+;\\d+m|6n|s|u|\\?25[lh])|\\w)", "g"); + "\\d+;\\d+;\\d+m|6n|s|u|\\?25[lh])|\\w)", "g") return str.replace(r, "") } @@ -115,7 +115,7 @@ , dir = path.resolve(p[0], "node_modules", dep) , has = p[2] , want = p[3] - , latest = p[4]; + , latest = p[4] return [ dir , dep + "@" + want @@ -155,12 +155,33 @@ } var deps = null readJson(path.resolve(dir, "package.json"), function (er, d) { + d = d || {} if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) deps = (er) ? true : (d.dependencies || {}) + + if (npm.config.get("save-dev")) { + deps = d.devDependencies || {} + return next() + } + + if (npm.config.get("save")) { + // remove optional dependencies from dependencies during --save. + Object.keys(d.optionalDependencies || {}).forEach(function (k) { + delete deps[k] + }) + return next() + } + + if (npm.config.get("save-optional")) { + deps = d.optionalDependencies || {} + return next() + } + var doUpdate = npm.config.get("dev") || (!npm.config.get("production") && !Object.keys(parentHas).length && !npm.config.get("global")) + if (!er && d && doUpdate) { Object.keys(d.devDependencies || {}).forEach(function (k) { if (!(k in parentHas)) { @@ -204,7 +225,7 @@ if (!has || !deps) return if (deps === true) { deps = Object.keys(has).reduce(function (l, r) { - l[r] = "*" + l[r] = "latest" return l }, {}) } @@ -244,23 +265,25 @@ return skip() } - if (isGitUrl(url.parse(req))) + if (npa(req).type === "git") return doIt("git", "git") - var registry = npm.registry // search for the latest package - registry.get(dep, function (er, d) { + mapToRegistry(dep, npm.config, function (er, uri) { + if (er) return cb(er) + + npm.registry.get(uri, null, updateDeps) + }) + + function updateDeps (er, d) { if (er) return cb() - if (!d || !d['dist-tags'] || !d.versions) return cb() - var l = d.versions[d['dist-tags'].latest] + if (!d || !d["dist-tags"] || !d.versions) return cb() + var l = d.versions[d["dist-tags"].latest] if (!l) return cb() - // set to true if found in doc - var found = false - var r = req - if (d['dist-tags'][req]) - r = d['dist-tags'][req] + if (d["dist-tags"][req]) + r = d["dist-tags"][req] if (semver.validRange(r, true)) { // some kind of semver range. @@ -273,13 +296,13 @@ } // We didn't find the version in the doc. See if cache can find it. - cache.add(dep, req, onCacheAdd) + cache.add(dep, req, null, false, onCacheAdd) function onCacheAdd(er, d) { // if this fails, then it means we can't update this thing. // it's probably a thing that isn't published. if (er) { - if (er.code && er.code === 'ETARGET') { + if (er.code && er.code === "ETARGET") { // no viable version found return skip(er) } @@ -298,6 +321,5 @@ else skip() } - - }) + } } diff -Nru nodejs-0.11.13/deps/npm/lib/owner.js nodejs-0.11.15/deps/npm/lib/owner.js --- nodejs-0.11.13/deps/npm/lib/owner.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/owner.js 2015-01-20 21:22:17.000000000 +0000 @@ -5,6 +5,12 @@ + "\nnpm owner rm " + "\nnpm owner ls " +var npm = require("./npm.js") + , registry = npm.registry + , log = require("npmlog") + , readJson = require("read-package-json") + , mapToRegistry = require("./utils/map-to-registry.js") + owner.completion = function (opts, cb) { var argv = opts.conf.argv.remain if (argv.length > 4) return cb() @@ -14,61 +20,78 @@ else subs.push("ls", "list") return cb(null, subs) } - var un = encodeURIComponent(npm.config.get("username")) - switch (argv[2]) { - case "ls": - if (argv.length > 3) return cb() - else return registry.get("/-/short", cb) - - case "rm": - if (argv.length > 3) { - var theUser = encodeURIComponent(argv[3]) - , uri = "/-/by-user/"+theUser+"|"+un - console.error(uri) - return registry.get(uri, function (er, d) { + + npm.commands.whoami([], true, function (er, username) { + if (er) return cb() + + var un = encodeURIComponent(username) + var byUser, theUser + switch (argv[2]) { + case "ls": + if (argv.length > 3) return cb() + return mapToRegistry("-/short", npm.config, function (er, uri) { if (er) return cb(er) - // return the intersection - return cb(null, d[theUser].filter(function (p) { - // kludge for server adminery. - return un === "isaacs" || d[un].indexOf(p) === -1 - })) + + registry.get(uri, null, cb) }) - } - // else fallthrough - case "add": - if (argv.length > 3) { - var theUser = encodeURIComponent(argv[3]) - , uri = "/-/by-user/"+theUser+"|"+un - console.error(uri) - return registry.get(uri, function (er, d) { - console.error(uri, er || d) - // return mine that they're not already on. + + case "rm": + if (argv.length > 3) { + theUser = encodeURIComponent(argv[3]) + byUser = "-/by-user/" + theUser + "|" + un + return mapToRegistry(byUser, npm.config, function (er, uri) { + if (er) return cb(er) + + console.error(uri) + registry.get(uri, null, function (er, d) { + if (er) return cb(er) + // return the intersection + return cb(null, d[theUser].filter(function (p) { + // kludge for server adminery. + return un === "isaacs" || d[un].indexOf(p) === -1 + })) + }) + }) + } + // else fallthrough + case "add": + if (argv.length > 3) { + theUser = encodeURIComponent(argv[3]) + byUser = "-/by-user/" + theUser + "|" + un + return mapToRegistry(byUser, npm.config, function (er, uri) { + if (er) return cb(er) + + console.error(uri) + registry.get(uri, null, function (er, d) { + console.error(uri, er || d) + // return mine that they're not already on. + if (er) return cb(er) + var mine = d[un] || [] + , theirs = d[theUser] || [] + return cb(null, mine.filter(function (p) { + return theirs.indexOf(p) === -1 + })) + }) + }) + } + // just list all users who aren't me. + return mapToRegistry("-/users", npm.config, function (er, uri) { if (er) return cb(er) - var mine = d[un] || [] - , theirs = d[theUser] || [] - return cb(null, mine.filter(function (p) { - return theirs.indexOf(p) === -1 - })) + + registry.get(uri, null, function (er, list) { + if (er) return cb() + return cb(null, Object.keys(list).filter(function (n) { + return n !== un + })) + }) }) - } - // just list all users who aren't me. - return registry.get("/-/users", function (er, list) { - if (er) return cb() - return cb(null, Object.keys(list).filter(function (n) { - return n !== un - })) - }) - default: - return cb() - } + default: + return cb() + } + }) } -var npm = require("./npm.js") - , registry = npm.registry - , log = require("npmlog") - , readJson = require("read-package-json") - function owner (args, cb) { var action = args.shift() switch (action) { @@ -86,17 +109,23 @@ ls(pkg, cb) }) - registry.get(pkg, function (er, data) { - var msg = "" - if (er) { - log.error("owner ls", "Couldn't get owner data", pkg) - return cb(er) - } - var owners = data.maintainers - if (!owners || !owners.length) msg = "admin party!" - else msg = owners.map(function (o) { return o.name +" <"+o.email+">" }).join("\n") - console.log(msg) - cb(er, owners) + mapToRegistry(pkg, npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri, null, function (er, data) { + var msg = "" + if (er) { + log.error("owner ls", "Couldn't get owner data", pkg) + return cb(er) + } + var owners = data.maintainers + if (!owners || !owners.length) msg = "admin party!" + else msg = owners.map(function (o) { + return o.name + " <" + o.email + ">" + }).join("\n") + console.log(msg) + cb(er, owners) + }) }) } @@ -115,7 +144,7 @@ var o = owners[i] if (o.name === u.name) { log.info( "owner add" - , "Already a package owner: "+o.name+" <"+o.email+">") + , "Already a package owner: " + o.name + " <" + o.email + ">") return false } } @@ -140,7 +169,7 @@ return !match }) if (!found) { - log.info("owner rm", "Not a package owner: "+user) + log.info("owner rm", "Not a package owner: " + user) return false } if (!m.length) return new Error( @@ -151,14 +180,19 @@ function mutate (pkg, user, mutation, cb) { if (user) { - registry.get("/-/user/org.couchdb.user:"+user, mutate_) + var byUser = "-/user/org.couchdb.user:" + user + mapToRegistry(byUser, npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri, null, mutate_) + }) } else { mutate_(null, null) } function mutate_ (er, u) { if (!er && user && (!u || u.error)) er = new Error( - "Couldn't get user data for "+user+": "+JSON.stringify(u)) + "Couldn't get user data for " + user + ": " + JSON.stringify(u)) if (er) { log.error("owner mutate", "Error getting user data for %s", user) @@ -166,27 +200,34 @@ } if (u) u = { "name" : u.name, "email" : u.email } - registry.get(pkg, function (er, data) { - if (er) { - log.error("owner mutate", "Error getting package data for %s", pkg) - return cb(er) - } - var m = mutation(u, data.maintainers) - if (!m) return cb() // handled - if (m instanceof Error) return cb(m) // error - data = { _id : data._id - , _rev : data._rev - , maintainers : m - } - registry.request("PUT" - , pkg+"/-rev/"+data._rev, data - , function (er, data) { - if (!er && data.error) er = new Error( - "Failed to update package metadata: "+JSON.stringify(data)) + mapToRegistry(pkg, npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri, null, function (er, data) { if (er) { - log.error("owner mutate", "Failed to update package metadata") + log.error("owner mutate", "Error getting package data for %s", pkg) + return cb(er) } - cb(er, data) + var m = mutation(u, data.maintainers) + if (!m) return cb() // handled + if (m instanceof Error) return cb(m) // error + data = { _id : data._id + , _rev : data._rev + , maintainers : m + } + var dataPath = pkg + "/-rev/" + data._rev + mapToRegistry(dataPath, npm.config, function (er, uri) { + if (er) return cb(er) + + registry.request("PUT", uri, { body : data }, function (er, data) { + if (!er && data.error) er = new Error( + "Failed to update package metadata: " + JSON.stringify(data)) + if (er) { + log.error("owner mutate", "Failed to update package metadata") + } + cb(er, data) + }) + }) }) }) } @@ -201,5 +242,5 @@ } function unknown (action, cb) { - cb("Usage: \n"+owner.usage) + cb("Usage: \n" + owner.usage) } diff -Nru nodejs-0.11.13/deps/npm/lib/pack.js nodejs-0.11.15/deps/npm/lib/pack.js --- nodejs-0.11.13/deps/npm/lib/pack.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/pack.js 2015-01-20 21:22:17.000000000 +0000 @@ -11,6 +11,8 @@ , chain = require("slide").chain , path = require("path") , cwd = process.cwd() + , writeStream = require('fs-write-stream-atomic') + , cachedPackageRoot = require("./cache/cached-package-root.js") pack.usage = "npm pack " @@ -40,15 +42,17 @@ // add to cache, then cp to the cwd function pack_ (pkg, cb) { - cache.add(pkg, function (er, data) { + cache.add(pkg, null, null, false, function (er, data) { if (er) return cb(er) - var fname = path.resolve(data._id.replace(/@/g, "-") + ".tgz") - , cached = path.resolve( npm.cache - , data.name - , data.version - , "package.tgz" ) + + // scoped packages get special treatment + var name = data.name + if (name[0] === "@") name = name.substr(1).replace(/\//g, "-") + var fname = name + "-" + data.version + ".tgz" + + var cached = path.join(cachedPackageRoot(data), "package.tgz") , from = fs.createReadStream(cached) - , to = fs.createWriteStream(fname) + , to = writeStream(fname) , errState = null from.on("error", cb_) diff -Nru nodejs-0.11.13/deps/npm/lib/prune.js nodejs-0.11.15/deps/npm/lib/prune.js --- nodejs-0.11.13/deps/npm/lib/prune.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/prune.js 2015-01-20 21:22:17.000000000 +0000 @@ -15,7 +15,7 @@ function prune (args, cb) { //check if is a valid package.json file var jsonFile = path.resolve(npm.dir, "..", "package.json" ) - readJson(jsonFile, log.warn, function (er, data) { + readJson(jsonFile, log.warn, function (er) { if (er) return cb(er) next() }) diff -Nru nodejs-0.11.13/deps/npm/lib/publish.js nodejs-0.11.15/deps/npm/lib/publish.js --- nodejs-0.11.13/deps/npm/lib/publish.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/publish.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,16 +1,17 @@ module.exports = publish -var npm = require("./npm.js") +var url = require("url") + , npm = require("./npm.js") , log = require("npmlog") - , tar = require("./utils/tar.js") , path = require("path") , readJson = require("read-package-json") - , fs = require("graceful-fs") , lifecycle = require("./utils/lifecycle.js") , chain = require("slide").chain - , Conf = require("npmconf").Conf + , Conf = require("./config/core.js").Conf , RegClient = require("npm-registry-client") + , mapToRegistry = require("./utils/map-to-registry.js") + , cachedPackageRoot = require("./cache/cached-package-root.js") publish.usage = "npm publish " + "\nnpm publish " @@ -24,7 +25,10 @@ } function publish (args, isRetry, cb) { - if (typeof cb !== "function") cb = isRetry, isRetry = false + if (typeof cb !== "function") { + cb = isRetry + isRetry = false + } if (args.length === 0) args = ["."] if (args.length !== 1) return cb(publish.usage) @@ -32,14 +36,18 @@ var arg = args[0] // if it's a local folder, then run the prepublish there, first. readJson(path.resolve(arg, "package.json"), function (er, data) { - er = needVersion(er, data) if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) - // error is ok. could be publishing a url or tarball - // however, that means that we will not have automatically run - // the prepublish script, since that gets run when adding a folder - // to the cache. + + if (data) { + if (!data.name) return cb(new Error("No name provided")) + if (!data.version) return cb(new Error("No version provided")) + } + + // Error is OK. Could be publishing a URL or tarball, however, that means + // that we will not have automatically run the prepublish script, since + // that gets run when adding a folder to the cache. if (er) return cacheAddPublish(arg, false, isRetry, cb) - cacheAddPublish(arg, true, isRetry, cb) + else cacheAddPublish(arg, true, isRetry, cb) }) } @@ -49,15 +57,12 @@ // That means that we can run publish/postpublish in the dir, rather than // in the cache dir. function cacheAddPublish (dir, didPre, isRetry, cb) { - npm.commands.cache.add(dir, function (er, data) { + npm.commands.cache.add(dir, null, null, false, function (er, data) { if (er) return cb(er) log.silly("publish", data) - var cachedir = path.resolve( npm.cache - , data.name - , data.version - , "package" ) - chain - ( [ !didPre && [lifecycle, data, "prepublish", cachedir] + var cachedir = path.resolve(cachedPackageRoot(data), "package") + chain([ !didPre && + [lifecycle, data, "prepublish", cachedir] , [publish_, dir, data, isRetry, cachedir] , [lifecycle, data, "publish", didPre ? dir : cachedir] , [lifecycle, data, "postpublish", didPre ? dir : cachedir] ] @@ -68,51 +73,61 @@ function publish_ (arg, data, isRetry, cachedir, cb) { if (!data) return cb(new Error("no package.json file found")) - // check for publishConfig hash var registry = npm.registry + var config = npm.config + + // check for publishConfig hash if (data.publishConfig) { - var pubConf = new Conf(npm.config) - pubConf.save = npm.config.save.bind(npm.config) + config = new Conf(npm.config) + config.save = npm.config.save.bind(npm.config) // don't modify the actual publishConfig object, in case we have // to set a login token or some other data. - pubConf.unshift(Object.keys(data.publishConfig).reduce(function (s, k) { + config.unshift(Object.keys(data.publishConfig).reduce(function (s, k) { s[k] = data.publishConfig[k] return s }, {})) - registry = new RegClient(pubConf) + registry = new RegClient(config) } - data._npmVersion = npm.version - data._npmUser = { name: npm.config.get("username") - , email: npm.config.get("email") } + data._npmVersion = npm.version + data._nodeVersion = process.versions.node delete data.modules - if (data.private) return cb(new Error - ("This package has been marked as private\n" - +"Remove the 'private' field from the package.json to publish it.")) - - var tarball = cachedir + ".tgz" - registry.publish(data, tarball, function (er) { - if (er && er.code === "EPUBLISHCONFLICT" - && npm.config.get("force") && !isRetry) { - log.warn("publish", "Forced publish over "+data._id) - return npm.commands.unpublish([data._id], function (er) { - // ignore errors. Use the force. Reach out with your feelings. - // but if it fails again, then report the first error. - publish([arg], er || true, cb) - }) - } - // report the unpublish error if this was a retry and unpublish failed - if (er && isRetry && isRetry !== true) return cb(isRetry) + if (data.private) return cb( + new Error( + "This package has been marked as private\n" + + "Remove the 'private' field from the package.json to publish it." + ) + ) + + mapToRegistry(data.name, config, function (er, registryURI) { if (er) return cb(er) - console.log("+ " + data._id) - cb() - }) -} -function needVersion(er, data) { - return er ? er - : (data && !data.version) ? new Error("No version provided") - : null + var tarball = cachedir + ".tgz" + + // we just want the base registry URL in this case + var registryBase = url.resolve(registryURI, ".") + log.verbose("publish", "registryBase", registryBase) + + var c = config.getCredentialsByURI(registryBase) + data._npmUser = {name: c.username, email: c.email} + + registry.publish(registryBase, data, tarball, function (er) { + if (er && er.code === "EPUBLISHCONFLICT" + && npm.config.get("force") && !isRetry) { + log.warn("publish", "Forced publish over " + data._id) + return npm.commands.unpublish([data._id], function (er) { + // ignore errors. Use the force. Reach out with your feelings. + // but if it fails again, then report the first error. + publish([arg], er || true, cb) + }) + } + // report the unpublish error if this was a retry and unpublish failed + if (er && isRetry && isRetry !== true) return cb(isRetry) + if (er) return cb(er) + console.log("+ " + data._id) + cb() + }) + }) } diff -Nru nodejs-0.11.13/deps/npm/lib/rebuild.js nodejs-0.11.15/deps/npm/lib/rebuild.js --- nodejs-0.11.13/deps/npm/lib/rebuild.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/rebuild.js 2015-01-20 21:22:17.000000000 +0000 @@ -4,10 +4,8 @@ var readInstalled = require("read-installed") , semver = require("semver") , log = require("npmlog") - , path = require("path") , npm = require("./npm.js") - , asyncMap = require("slide").asyncMap - , fs = require("graceful-fs") + , npa = require("npm-package-arg") rebuild.usage = "npm rebuild [[@] [name[@] ...]]" @@ -49,9 +47,9 @@ else if (data.name && data._id) { for (var i = 0, l = args.length; i < l; i ++) { var arg = args[i] - , nv = arg.split("@") - , n = nv.shift() - , v = nv.join("@") + , nv = npa(arg) + , n = nv.name + , v = nv.rawSpec if (n !== data.name) continue if (!semver.satisfies(data.version, v, true)) continue pass = true diff -Nru nodejs-0.11.13/deps/npm/lib/repo.js nodejs-0.11.15/deps/npm/lib/repo.js --- nodejs-0.11.13/deps/npm/lib/repo.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/repo.js 2015-01-20 21:22:17.000000000 +0000 @@ -5,23 +5,29 @@ repo.completion = function (opts, cb) { if (opts.conf.argv.remain.length > 2) return cb() - registry.get("/-/short", 60000, function (er, list) { - return cb(null, list || []) + mapToRegistry("/-/short", npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri, { timeout : 60000 }, function (er, list) { + return cb(null, list || []) + }) }) } var npm = require("./npm.js") , registry = npm.registry - , log = require("npmlog") , opener = require("opener") , github = require('github-url-from-git') , githubUserRepo = require("github-url-from-username-repo") , path = require("path") , readJson = require("read-package-json") , fs = require("fs") + , url_ = require("url") + , mapToRegistry = require("./utils/map-to-registry.js") + , npa = require("npm-package-arg") function repo (args, cb) { - var n = args.length && args[0].split("@").shift() || '.' + var n = args.length && npa(args[0]).name || "." fs.stat(n, function (er, s) { if (er && er.code === "ENOENT") return callRegistry(n, cb) else if (er) return cb(er) @@ -34,21 +40,45 @@ } function getUrlAndOpen (d, cb) { - var r = d.repository; - if (!r) return cb(new Error('no repository')); + var r = d.repository + if (!r) return cb(new Error('no repository')) // XXX remove this when npm@v1.3.10 from node 0.10 is deprecated // from https://github.com/npm/npm-www/issues/418 if (githubUserRepo(r.url)) r.url = githubUserRepo(r.url) - var url = github(r.url) + + var url = (r.url && ~r.url.indexOf('github')) + ? github(r.url) + : nonGithubUrl(r.url) + if (!url) return cb(new Error('no repository: could not get url')) opener(url, { command: npm.config.get("browser") }, cb) } function callRegistry (n, cb) { - registry.get(n + "/latest", 3600, function (er, d) { + mapToRegistry(n, npm.config, function (er, uri) { if (er) return cb(er) - getUrlAndOpen(d, cb) + + registry.get(uri + "/latest", { timeout : 3600 }, function (er, d) { + if (er) return cb(er) + getUrlAndOpen(d, cb) + }) }) } + +function nonGithubUrl (url) { + try { + var idx = url.indexOf('@') + if (idx !== -1) { + url = url.slice(idx+1).replace(/:([^\d]+)/, '/$1') + } + url = url_.parse(url) + var protocol = url.protocol === 'https:' + ? 'https:' + : 'http:' + return protocol + '//' + (url.host || '') + + url.path.replace(/\.git$/, '') + } + catch(e) {} +} diff -Nru nodejs-0.11.13/deps/npm/lib/run-script.js nodejs-0.11.15/deps/npm/lib/run-script.js --- nodejs-0.11.13/deps/npm/lib/run-script.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/run-script.js 2015-01-20 21:22:17.000000000 +0000 @@ -7,10 +7,8 @@ , readJson = require("read-package-json") , log = require("npmlog") , chain = require("slide").chain - , fs = require("graceful-fs") - , asyncMap = require("slide").asyncMap -runScript.usage = "npm run-script [] " +runScript.usage = "npm run-script [-- ]" runScript.completion = function (opts, cb) { @@ -23,7 +21,7 @@ if (argv.length === 3) { // either specified a script locally, in which case, done, // or a package, in which case, complete against its scripts - var json = path.join(npm.prefix, "package.json") + var json = path.join(npm.localPrefix, "package.json") return readJson(json, function (er, d) { if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) if (er) d = {} @@ -32,7 +30,7 @@ if (scripts.indexOf(argv[2]) !== -1) return cb() // ok, try to find out which package it was, then var pref = npm.config.get("global") ? npm.config.get("prefix") - : npm.prefix + : npm.localPrefix var pkgDir = path.resolve( pref, "node_modules" , argv[2], "package.json" ) readJson(pkgDir, function (er, d) { @@ -55,8 +53,11 @@ next() }) - if (npm.config.get("global")) scripts = [], next() - else readJson(path.join(npm.prefix, "package.json"), function (er, d) { + if (npm.config.get("global")) { + scripts = [] + next() + } + else readJson(path.join(npm.localPrefix, "package.json"), function (er, d) { if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) d = d || {} scripts = Object.keys(d.scripts || {}) @@ -65,38 +66,88 @@ function next () { if (!installed || !scripts) return - return cb(null, scripts.concat(installed)) + + cb(null, scripts.concat(installed)) } } function runScript (args, cb) { - if (!args.length) return cb(runScript.usage) - var pkgdir = args.length === 1 ? process.cwd() - : path.resolve(npm.dir, args[0]) - , cmd = args.pop() + if (!args.length) return list(cb) + + var pkgdir = npm.localPrefix + , cmd = args.shift() readJson(path.resolve(pkgdir, "package.json"), function (er, d) { if (er) return cb(er) - run(d, pkgdir, cmd, cb) + run(d, pkgdir, cmd, args, cb) }) } -function run (pkg, wd, cmd, cb) { - var cmds = [] +function list(cb) { + var json = path.join(npm.localPrefix, "package.json") + return readJson(json, function(er, d) { + if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) + if (er) d = {} + var scripts = Object.keys(d.scripts || {}) + + if (log.level === "silent") { + return cb(null, scripts) + } + + if (npm.config.get("json")) { + console.log(JSON.stringify(d.scripts || {}, null, 2)) + return cb(null, scripts) + } + + var s = ":" + var prefix = "" + if (!npm.config.get("parseable")) { + s = "\n " + prefix = " " + console.log("Available scripts in the %s package:", d.name) + } + scripts.forEach(function(script) { + console.log(prefix + script + s + d.scripts[script]) + }) + return cb(null, scripts) + }) +} + +function run (pkg, wd, cmd, args, cb) { if (!pkg.scripts) pkg.scripts = {} + + var cmds if (cmd === "restart") { - cmds = ["prestop","stop","poststop" - ,"restart" - ,"prestart","start","poststart"] + cmds = [ + "prestop", "stop", "poststop", + "restart", + "prestart", "start", "poststart" + ] } else { cmds = [cmd] } + if (!cmd.match(/^(pre|post)/)) { cmds = ["pre"+cmd].concat(cmds).concat("post"+cmd) } + log.verbose("run-script", cmds) chain(cmds.map(function (c) { + // pass cli arguments after -- to script. + if (pkg.scripts[c] && c === cmd) pkg.scripts[c] = pkg.scripts[c] + joinArgs(args) + // when running scripts explicitly, assume that they're trusted. return [lifecycle, pkg, c, wd, true] }), cb) } + +// join arguments after '--' and pass them to script, +// handle special characters such as ', ", ' '. +function joinArgs (args) { + var joinedArgs = "" + args.forEach(function(arg) { + if (arg.match(/[ '"]/)) arg = '"' + arg.replace(/"/g, '\\"') + '"' + joinedArgs += " " + arg + }) + return joinedArgs +} diff -Nru nodejs-0.11.13/deps/npm/lib/search.js nodejs-0.11.15/deps/npm/lib/search.js --- nodejs-0.11.13/deps/npm/lib/search.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/search.js 2015-01-20 21:22:17.000000000 +0000 @@ -4,6 +4,7 @@ var npm = require("./npm.js") , registry = npm.registry , columnify = require('columnify') + , mapToRegistry = require("./utils/map-to-registry.js") search.usage = "npm search [some search terms ...]" @@ -57,10 +58,18 @@ } function getFilteredData (staleness, args, notArgs, cb) { - registry.get( "/-/all", staleness, false - , true, function (er, data) { + var opts = { + timeout : staleness, + follow : true, + staleOk : true + } + mapToRegistry("-/all", npm.config, function (er, uri) { if (er) return cb(er) - return cb(null, filter(data, args, notArgs)) + + registry.get(uri, opts, function (er, data) { + if (er) return cb(er) + return cb(null, filter(data, args, notArgs)) + }) }) } @@ -115,7 +124,7 @@ for (var i = 0, l = args.length; i < l; i ++) { if (!match(words, args[i])) return false } - for (var i = 0, l = notArgs.length; i < l; i ++) { + for (i = 0, l = notArgs.length; i < l; i ++) { if (match(words, notArgs[i])) return false } return true @@ -209,7 +218,7 @@ if (arg.charAt(0) === "/") { //arg = arg.replace(/\/$/, "") - return str.replace( new RegExp(arg.substr(1, arg.length - 1), "gi") + return str.replace( new RegExp(arg.substr(1, arg.length - 2), "gi") , function (bit) { return markStart + bit + markEnd } ) } @@ -218,7 +227,7 @@ var pieces = str.toLowerCase().split(arg.toLowerCase()) , p = 0 - return pieces.map(function (piece, i) { + return pieces.map(function (piece) { piece = str.substr(p, piece.length) var mark = markStart + str.substr(p+piece.length, arg.length) @@ -239,12 +248,12 @@ } function getMaxWidth() { + var cols try { var tty = require("tty") , stdout = process.stdout - , cols = !tty.isatty(stdout.fd) ? Infinity - : process.stdout.getWindowSize()[0] - cols = (cols == 0) ? Infinity : cols + cols = !tty.isatty(stdout.fd) ? Infinity : process.stdout.getWindowSize()[0] + cols = (cols === 0) ? Infinity : cols } catch (ex) { cols = Infinity } return cols } diff -Nru nodejs-0.11.13/deps/npm/lib/shrinkwrap.js nodejs-0.11.15/deps/npm/lib/shrinkwrap.js --- nodejs-0.11.13/deps/npm/lib/shrinkwrap.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/shrinkwrap.js 2015-01-20 21:22:17.000000000 +0000 @@ -6,6 +6,7 @@ var npm = require("./npm.js") , log = require("npmlog") , fs = require("fs") + , writeFileAtomic = require("write-file-atomic") , path = require("path") , readJson = require("read-package-json") , sortedObject = require("sorted-object") @@ -60,8 +61,9 @@ // copy the keys over in a well defined order // because javascript objects serialize arbitrarily pkginfo.dependencies = sortedObject(pkginfo.dependencies || {}) + var swdata try { - var swdata = JSON.stringify(pkginfo, null, 2) + "\n" + swdata = JSON.stringify(pkginfo, null, 2) + "\n" } catch (er) { log.error("shrinkwrap", "Error converting package info to json") return cb(er) @@ -69,7 +71,7 @@ var file = path.resolve(npm.prefix, "npm-shrinkwrap.json") - fs.writeFile(file, swdata, function (er) { + writeFileAtomic(file, swdata, function (er) { if (er) return cb(er) if (silent) return cb(null, pkginfo) console.log("wrote npm-shrinkwrap.json") diff -Nru nodejs-0.11.13/deps/npm/lib/star.js nodejs-0.11.15/deps/npm/lib/star.js --- nodejs-0.11.13/deps/npm/lib/star.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/star.js 2015-01-20 21:22:17.000000000 +0000 @@ -5,13 +5,18 @@ , registry = npm.registry , log = require("npmlog") , asyncMap = require("slide").asyncMap + , mapToRegistry = require("./utils/map-to-registry.js") star.usage = "npm star [pkg, pkg, ...]\n" + "npm unstar [pkg, pkg, ...]" star.completion = function (opts, cb) { - registry.get("/-/short", 60000, function (er, list) { - return cb(null, list || []) + mapToRegistry("-/short", npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri, { timeout : 60000 }, function (er, list) { + return cb(null, list || []) + }) }) } @@ -22,12 +27,16 @@ , using = !(npm.command.match(/^un/)) if (!using) s = u asyncMap(args, function (pkg, cb) { - registry.star(pkg, using, function (er, data, raw, req) { - if (!er) { - console.log(s + " "+pkg) - log.verbose("star", data) - } - cb(er, data, raw, req) + mapToRegistry(pkg, npm.config, function (er, uri) { + if (er) return cb(er) + + registry.star(uri, using, function (er, data, raw, req) { + if (!er) { + console.log(s + " "+pkg) + log.verbose("star", data) + } + cb(er, data, raw, req) + }) }) }, cb) } diff -Nru nodejs-0.11.13/deps/npm/lib/stars.js nodejs-0.11.15/deps/npm/lib/stars.js --- nodejs-0.11.13/deps/npm/lib/stars.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/stars.js 2015-01-20 21:22:17.000000000 +0000 @@ -5,18 +5,23 @@ var npm = require("./npm.js") , registry = npm.registry , log = require("npmlog") + , mapToRegistry = require("./utils/map-to-registry.js") function stars (args, cb) { - var name = args.length === 1 ? args[0] : npm.config.get("username") - registry.stars(name, showstars) + npm.commands.whoami([], true, function (er, username) { + var name = args.length === 1 ? args[0] : username + mapToRegistry("", npm.config, function (er, uri) { + if (er) return cb(er) + + registry.stars(uri, name, showstars) + }) + }) function showstars (er, data) { - if (er) { - return cb(er) - } + if (er) return cb(er) if (data.rows.length === 0) { - log.warn('stars', 'user has not starred any packages.') + log.warn("stars", "user has not starred any packages.") } else { data.rows.forEach(function(a) { console.log(a.value) diff -Nru nodejs-0.11.13/deps/npm/lib/submodule.js nodejs-0.11.15/deps/npm/lib/submodule.js --- nodejs-0.11.13/deps/npm/lib/submodule.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/submodule.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -// npm submodule -// Check the package contents for a git repository url. -// If there is one, then create a git submodule in the node_modules folder. - -module.exports = submodule - -var npm = require("./npm.js") - , exec = require("child_process").execFile - , cache = require("./cache.js") - , asyncMap = require("slide").asyncMap - , chain = require("slide").chain - , which = require("which") - -submodule.usage = "npm submodule " - -submodule.completion = require("./docs.js").completion - -function submodule (args, cb) { - if (npm.config.get("global")) { - return cb(new Error("Cannot use submodule command in global mode.")) - } - - if (args.length === 0) return cb(submodule.usage) - - asyncMap(args, function (arg, cb) { - cache.add(arg, cb) - }, function (er, pkgs) { - if (er) return cb(er) - chain(pkgs.map(function (pkg) { return function (cb) { - submodule_(pkg, cb) - }}), cb) - }) - -} - -function submodule_ (pkg, cb) { - if (!pkg.repository - || pkg.repository.type !== "git" - || !pkg.repository.url) { - return cb(new Error(pkg._id + ": No git repository listed")) - } - - // prefer https:// github urls - pkg.repository.url = pkg.repository.url - .replace(/^(git:\/\/)?(git@)?github.com[:\/]/, "https://github.com/") - - // first get the list of submodules, and update if it's already there. - getSubmodules(function (er, modules) { - if (er) return cb(er) - // if there's already a submodule, then just update it. - if (modules.indexOf(pkg.name) !== -1) { - return updateSubmodule(pkg.name, cb) - } - addSubmodule(pkg.name, pkg.repository.url, cb) - }) -} - -function updateSubmodule (name, cb) { - var git = npm.config.get("git") - var args = [ "submodule", "update", "--init", "node_modules/", name ] - - // check for git - which(git, function (err) { - if (err) { - err.code = "ENOGIT" - return cb(err) - } - - exec(git, args, cb) - }) -} - -function addSubmodule (name, url, cb) { - var git = npm.config.get("git") - var args = [ "submodule", "add", url, "node_modules/", name ] - - // check for git - which(git, function (err) { - if (err) { - err.code = "ENOGIT" - return cb(err) - } - - exec(git, args, function (er) { - if (er) return cb(er) - updateSubmodule(name, cb) - }) - }) -} - - -var getSubmodules = function getSubmodules (cb) { - var git = npm.config.get("git") - var args = [ "submodule", "status" ] - - // check for git - which(git, function (err) { - if (err) { - err.code = "ENOGIT" - return cb(err) - } - exec(git, args, function (er, stdout, stderr) { - if (er) return cb(er) - res = stdout.trim().split(/\n/).map(function (line) { - return line.trim().split(/\s+/)[1] - }).filter(function (line) { - // only care about submodules in the node_modules folder. - return line && line.match(/^node_modules\//) - }).map(function (line) { - return line.replace(/^node_modules\//g, "") - }) - - // memoize. - getSubmodules = function (cb) { return cb(null, res) } - - cb(null, res) - }) - }) -} diff -Nru nodejs-0.11.13/deps/npm/lib/tag.js nodejs-0.11.15/deps/npm/lib/tag.js --- nodejs-0.11.13/deps/npm/lib/tag.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/tag.js 2015-01-20 21:22:17.000000000 +0000 @@ -7,12 +7,28 @@ var npm = require("./npm.js") , registry = npm.registry + , mapToRegistry = require("./utils/map-to-registry.js") + , npa = require("npm-package-arg") + , semver = require("semver") function tag (args, cb) { - var thing = (args.shift() || "").split("@") - , project = thing.shift() - , version = thing.join("@") + var thing = npa(args.shift() || "") + , project = thing.name + , version = thing.rawSpec , t = args.shift() || npm.config.get("tag") + + t = t.trim() + if (!project || !version || !t) return cb("Usage:\n"+tag.usage) - registry.tag(project, version, t, cb) + + if (semver.validRange(t)) { + var er = new Error("Tag name must not be a valid SemVer range: " + t) + return cb(er) + } + + mapToRegistry(project, npm.config, function (er, uri) { + if (er) return cb(er) + + registry.tag(uri, version, t, cb) + }) } diff -Nru nodejs-0.11.13/deps/npm/lib/test.js nodejs-0.11.15/deps/npm/lib/test.js --- nodejs-0.11.13/deps/npm/lib/test.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,6 @@ module.exports = test var testCmd = require("./utils/lifecycle.js").cmd("test") - , log = require("npmlog") function test (args, cb) { testCmd(args, function (er) { diff -Nru nodejs-0.11.13/deps/npm/lib/unbuild.js nodejs-0.11.15/deps/npm/lib/unbuild.js --- nodejs-0.11.13/deps/npm/lib/unbuild.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/unbuild.js 2015-01-20 21:22:17.000000000 +0000 @@ -2,11 +2,9 @@ unbuild.usage = "npm unbuild \n(this is plumbing)" var readJson = require("read-package-json") - , rm = require("./utils/gently-rm.js") , gentlyRm = require("./utils/gently-rm.js") , npm = require("./npm.js") , path = require("path") - , fs = require("graceful-fs") , lifecycle = require("./utils/lifecycle.js") , asyncMap = require("slide").asyncMap , chain = require("slide").chain @@ -16,7 +14,7 @@ // args is a list of folders. // remove any bins/etc, and then delete the folder. function unbuild (args, silent, cb) { - if (typeof silent === 'function') cb = silent, silent = false + if (typeof silent === "function") cb = silent, silent = false asyncMap(args, unbuild_(silent), cb) } @@ -26,10 +24,10 @@ } folder = path.resolve(folder) delete build._didBuild[folder] - log.info(folder, "unbuild") + log.verbose("unbuild", folder.substr(npm.prefix.length + 1)) readJson(path.resolve(folder, "package.json"), function (er, pkg) { // if no json, then just trash it, but no scripts or whatever. - if (er) return rm(folder, cb) + if (er) return gentlyRm(folder, false, cb) readJson.cache.del(folder) chain ( [ [lifecycle, pkg, "preuninstall", folder, false, true] @@ -40,7 +38,7 @@ } , [rmStuff, pkg, folder] , [lifecycle, pkg, "postuninstall", folder, false, true] - , [rm, folder] ] + , [gentlyRm, folder, undefined] ] , cb ) }) }} @@ -55,7 +53,8 @@ readJson.cache.del(path.resolve(folder, "package.json")) - log.verbose([top, gnm, parent], "unbuild " + pkg._id) + log.verbose("unbuild rmStuff", pkg._id, "from", gnm) + if (!top) log.verbose("unbuild rmStuff", "in", parent) asyncMap([rmBins, rmMans], function (fn, cb) { fn(pkg, folder, parent, top, cb) }, cb) @@ -67,8 +66,8 @@ log.verbose([binRoot, pkg.bin], "binRoot") asyncMap(Object.keys(pkg.bin), function (b, cb) { if (process.platform === "win32") { - chain([ [rm, path.resolve(binRoot, b) + ".cmd"] - , [rm, path.resolve(binRoot, b) ] ], cb) + chain([ [gentlyRm, path.resolve(binRoot, b) + ".cmd", undefined] + , [gentlyRm, path.resolve(binRoot, b), undefined] ], cb) } else { gentlyRm( path.resolve(binRoot, b) , !npm.config.get("force") && folder @@ -87,12 +86,12 @@ var manRoot = path.resolve(npm.config.get("prefix"), "share", "man") asyncMap(pkg.man, function (man, cb) { if (Array.isArray(man)) { - man.forEach(rm) + man.forEach(rmMan) } else { - rm(man) + rmMan(man) } - function rm(man) { + function rmMan(man) { var parseMan = man.match(/(.*)\.([0-9]+)(\.gz)?$/) , stem = parseMan[1] , sxn = parseMan[2] diff -Nru nodejs-0.11.13/deps/npm/lib/uninstall.js nodejs-0.11.15/deps/npm/lib/uninstall.js --- nodejs-0.11.13/deps/npm/lib/uninstall.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/uninstall.js 2015-01-20 21:22:17.000000000 +0000 @@ -9,6 +9,7 @@ uninstall.completion = require("./utils/completion/installed-shallow.js") var fs = require("graceful-fs") + , writeFileAtomic = require("write-file-atomic") , log = require("npmlog") , readJson = require("read-package-json") , path = require("path") @@ -79,14 +80,15 @@ // don't use readJson here, because we don't want all the defaults // filled in, for mans and other bs. fs.readFile(pj, 'utf8', function (er, json) { + var pkg try { - var pkg = JSON.parse(json) + pkg = JSON.parse(json) } catch (_) {} if (!pkg) return cb_(null, data) var bundle if (npm.config.get('save-bundle')) { - var bundle = pkg.bundleDependencies || pkg.bundledDependencies + bundle = pkg.bundleDependencies || pkg.bundledDependencies if (!Array.isArray(bundle)) bundle = undefined } @@ -119,7 +121,7 @@ } } - fs.writeFile(pj, JSON.stringify(pkg, null, 2) + "\n", function (er) { + writeFileAtomic(pj, JSON.stringify(pkg, null, 2) + "\n", function (er) { return cb_(er, data) }) }) diff -Nru nodejs-0.11.13/deps/npm/lib/unpublish.js nodejs-0.11.15/deps/npm/lib/unpublish.js --- nodejs-0.11.13/deps/npm/lib/unpublish.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/unpublish.js 2015-01-20 21:22:17.000000000 +0000 @@ -6,58 +6,72 @@ , registry = npm.registry , readJson = require("read-package-json") , path = require("path") + , mapToRegistry = require("./utils/map-to-registry.js") + , npa = require("npm-package-arg") unpublish.usage = "npm unpublish [@]" unpublish.completion = function (opts, cb) { if (opts.conf.argv.remain.length >= 3) return cb() - var un = encodeURIComponent(npm.config.get("username")) - if (!un) return cb() - registry.get("/-/by-user/"+un, function (er, pkgs) { - // do a bit of filtering at this point, so that we don't need - // to fetch versions for more than one thing, but also don't - // accidentally a whole project. - pkgs = pkgs[un] - if (!pkgs || !pkgs.length) return cb() - var partial = opts.partialWord.split("@") - , pp = partial.shift() - , pv = partial.join("@") - pkgs = pkgs.filter(function (p) { - return p.indexOf(pp) === 0 - }) - if (pkgs.length > 1) return cb(null, pkgs) - registry.get(pkgs[0], function (er, d) { + npm.commands.whoami([], true, function (er, username) { + if (er) return cb() + + var un = encodeURIComponent(username) + if (!un) return cb() + var byUser = "-/by-user/" + un + mapToRegistry(byUser, npm.config, function (er, uri) { if (er) return cb(er) - var vers = Object.keys(d.versions) - if (!vers.length) return cb(null, pkgs) - return cb(null, vers.map(function (v) { - return pkgs[0]+"@"+v - })) + + registry.get(uri, null, function (er, pkgs) { + // do a bit of filtering at this point, so that we don't need + // to fetch versions for more than one thing, but also don't + // accidentally a whole project. + pkgs = pkgs[un] + if (!pkgs || !pkgs.length) return cb() + var pp = npa(opts.partialWord).name + pkgs = pkgs.filter(function (p) { + return p.indexOf(pp) === 0 + }) + if (pkgs.length > 1) return cb(null, pkgs) + mapToRegistry(pkgs[0], npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri, null, function (er, d) { + if (er) return cb(er) + var vers = Object.keys(d.versions) + if (!vers.length) return cb(null, pkgs) + return cb(null, vers.map(function (v) { + return pkgs[0] + "@" + v + })) + }) + }) + }) }) }) } function unpublish (args, cb) { - if (args.length > 1) return cb(unpublish.usage) - var thing = args.length ? args.shift().split("@") : [] - , project = thing.shift() - , version = thing.join("@") + var thing = args.length ? npa(args[0]) : {} + , project = thing.name + , version = thing.rawSpec + log.silly("unpublish", "args[0]", args[0]) + log.silly("unpublish", "thing", thing) if (!version && !npm.config.get("force")) { return cb("Refusing to delete entire project.\n" - +"Run with --force to do this.\n" - +unpublish.usage) + + "Run with --force to do this.\n" + + unpublish.usage) } - if (!project || path.resolve(project) === npm.prefix) { + if (!project || path.resolve(project) === npm.localPrefix) { // if there's a package.json in the current folder, then // read the package name and version out of that. - var cwdJson = path.join(process.cwd(), "package.json") + var cwdJson = path.join(npm.localPrefix, "package.json") return readJson(cwdJson, function (er, data) { if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) - if (er) return cb("Usage:\n"+unpublish.usage) + if (er) return cb("Usage:\n" + unpublish.usage) gotProject(data.name, data.version, cb) }) } @@ -78,6 +92,10 @@ return cb(er) } - registry.unpublish(project, version, cb) + mapToRegistry(project, npm.config, function (er, uri) { + if (er) return cb(er) + + registry.unpublish(uri, version, cb) + }) }) } diff -Nru nodejs-0.11.13/deps/npm/lib/update.js nodejs-0.11.15/deps/npm/lib/update.js --- nodejs-0.11.13/deps/npm/lib/update.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/update.js 2015-01-20 21:22:17.000000000 +0000 @@ -10,7 +10,6 @@ update.usage = "npm update [pkg]" var npm = require("./npm.js") - , lifecycle = require("./utils/lifecycle.js") , asyncMap = require("slide").asyncMap , log = require("npmlog") diff -Nru nodejs-0.11.13/deps/npm/lib/utils/completion/file-completion.js nodejs-0.11.15/deps/npm/lib/utils/completion/file-completion.js --- nodejs-0.11.13/deps/npm/lib/utils/completion/file-completion.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/completion/file-completion.js 2015-01-20 21:22:17.000000000 +0000 @@ -2,7 +2,6 @@ var mkdir = require("mkdirp") , path = require("path") - , fs = require("graceful-fs") , glob = require("glob") function fileCompletion (root, req, depth, cb) { diff -Nru nodejs-0.11.13/deps/npm/lib/utils/depr-check.js nodejs-0.11.15/deps/npm/lib/utils/depr-check.js --- nodejs-0.11.13/deps/npm/lib/utils/depr-check.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/depr-check.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +var log = require("npmlog") + +var deprecated = {} + , deprWarned = {} +module.exports = function deprCheck (data) { + if (deprecated[data._id]) data.deprecated = deprecated[data._id] + if (data.deprecated) deprecated[data._id] = data.deprecated + else return + if (!deprWarned[data._id]) { + deprWarned[data._id] = true + log.warn("deprecated", "%s: %s", data._id, data.deprecated) + } +} diff -Nru nodejs-0.11.13/deps/npm/lib/utils/error-handler.js nodejs-0.11.15/deps/npm/lib/utils/error-handler.js --- nodejs-0.11.13/deps/npm/lib/utils/error-handler.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/error-handler.js 2015-01-20 21:22:17.000000000 +0000 @@ -9,11 +9,14 @@ , path = require("path") , wroteLogFile = false , exitCode = 0 + , rollbacks = npm.rollbacks + , chain = require("slide").chain + , writeStream = require("fs-write-stream-atomic") process.on("exit", function (code) { // console.error("exit", code) - if (!npm.config.loaded) return + if (!npm.config || !npm.config.loaded) return if (code) itWorked = false if (itWorked) log.info("ok") else { @@ -22,13 +25,18 @@ } if (wroteLogFile) { - log.error("", ["" - ,"Additional logging details can be found in:" + // just a line break + if (log.levels[log.level] <= log.levels.error) console.error("") + + log.error("", + ["Please include the following file with any support request:" ," " + path.resolve("npm-debug.log") ].join("\n")) wroteLogFile = false } - log.error("not ok", "code", code) + if (code) { + log.error("code", code) + } } var doExit = npm.config.get("_exit") @@ -46,14 +54,34 @@ function exit (code, noLog) { exitCode = exitCode || process.exitCode || code - var doExit = npm.config.get("_exit") + var doExit = npm.config ? npm.config.get("_exit") : true log.verbose("exit", [code, doExit]) if (log.level === "silent") noLog = true - if (code && !noLog) writeLogFile(reallyExit) - else rm("npm-debug.log", function () { rm(npm.tmp, reallyExit) }) + if (rollbacks.length) { + chain(rollbacks.map(function (f) { + return function (cb) { + npm.commands.unbuild([f], true, cb) + } + }), function (er) { + if (er) { + log.error("error rolling back", er) + if (!code) errorHandler(er) + else if (noLog) rm("npm-debug.log", reallyExit.bind(null, er)) + else writeLogFile(reallyExit.bind(this, er)) + } else { + if (!noLog && code) writeLogFile(reallyExit) + else rm("npm-debug.log", reallyExit) + } + }) + rollbacks.length = 0 + } + else if (code && !noLog) writeLogFile(reallyExit) + else rm("npm-debug.log", reallyExit) + + function reallyExit (er) { + if (er && !code) code = typeof er.errno === "number" ? er.errno : 1 - function reallyExit() { // truncate once it's been written. log.record.length = 0 @@ -63,14 +91,14 @@ // if we're really exiting, then let it exit on its own, so that // in-process stuff can finish or clean up first. if (!doExit) process.emit("exit", code) + npm.spinner.stop() } } function errorHandler (er) { - var printStack = false // console.error("errorHandler", er) - if (!npm.config.loaded) { + if (!npm.config || !npm.config.loaded) { // logging won't work unless we pretend that it's ready er = er || new Error("Exit prior to config file resolving.") console.error(er.stack || er.message) @@ -93,13 +121,55 @@ var m = er.code || er.message.match(/^(?:Error: )?(E[A-Z]+)/) if (m && !er.code) er.code = m + ; [ "type" + , "fstream_path" + , "fstream_unc_path" + , "fstream_type" + , "fstream_class" + , "fstream_finish_call" + , "fstream_linkpath" + , "stack" + , "fstream_stack" + , "statusCode" + , "pkgid" + ].forEach(function (k) { + var v = er[k] + if (!v) return + if (k === "fstream_stack") v = v.join("\n") + log.verbose(k, v) + }) + + log.verbose("cwd", process.cwd()) + + var os = require("os") + // log.error("System", os.type() + " " + os.release()) + // log.error("command", process.argv.map(JSON.stringify).join(" ")) + // log.error("node -v", process.version) + // log.error("npm -v", npm.version) + log.error("", os.type() + " " + os.release()) + log.error("argv", process.argv.map(JSON.stringify).join(" ")) + log.error("node", process.version) + log.error("npm ", "v" + npm.version) + + ; [ "file" + , "path" + , "code" + , "errno" + , "syscall" + ].forEach(function (k) { + var v = er[k] + if (v) log.error(k, v) + }) + + // just a line break + if (log.levels[log.level] <= log.levels.error) console.error("") + switch (er.code) { case "ECONNREFUSED": log.error("", er) log.error("", ["\nIf you are behind a proxy, please make sure that the" ,"'proxy' config is set properly. See: 'npm help config'" ].join("\n")) - printStack = true break case "EACCES": @@ -107,11 +177,9 @@ log.error("", er) log.error("", ["\nPlease try running this command again as root/Administrator." ].join("\n")) - printStack = true break case "ELIFECYCLE": - er.code = "ELIFECYCLE" log.error("", er.message) log.error("", ["","Failed at the "+er.pkgid+" "+er.stage+" script." ,"This is most likely a problem with the "+er.pkgname+" package," @@ -125,7 +193,6 @@ break case "ENOGIT": - er.code = "ENOGIT" log.error("", er.message) log.error("", ["","Failed using git." ,"This is most likely not a problem with npm itself." @@ -134,7 +201,6 @@ break case "EJSONPARSE": - er.code = "EJSONPARSE" log.error("", er.message) log.error("", "File: "+er.file) log.error("", ["Failed to parse package.json data." @@ -144,43 +210,35 @@ ].join("\n"), "JSON.parse") break + // TODO(isaacs) + // Add a special case here for E401 and E403 explaining auth issues? + case "E404": - er.code = "E404" var msg = [er.message] if (er.pkgid && er.pkgid !== "-") { msg.push("", "'"+er.pkgid+"' is not in the npm registry." - ,"You should bug the author to publish it") + ,"You should bug the author to publish it (or use the name yourself!)") if (er.parent) { msg.push("It was specified as a dependency of '"+er.parent+"'") } - if (er.pkgid.match(/^node[\.\-]|[\.\-]js$/)) { - var s = er.pkgid.replace(/^node[\.\-]|[\.\-]js$/g, "") - if (s !== er.pkgid) { - s = s.replace(/[^a-z0-9]/g, ' ') - msg.push("\nMaybe try 'npm search " + s + "'") - } - } msg.push("\nNote that you can also install from a" - ,"tarball, folder, or http url, or git url.") + ,"tarball, folder, http url, or git url.") } + // There's no need to have 404 in the message as well. + msg[0] = msg[0].replace(/^404\s+/, "") log.error("404", msg.join("\n")) break case "EPUBLISHCONFLICT": - er.code = "EPUBLISHCONFLICT" log.error("publish fail", ["Cannot publish over existing version." ,"Update the 'version' field in package.json and try again." ,"" - ,"If the previous version was published in error, see:" - ," npm help unpublish" - ,"" ,"To automatically increment version numbers, see:" ," npm help version" ].join("\n")) break case "EISGIT": - er.code = "EISGIT" log.error("git", [er.message ," "+er.path ,"Refusing to remove it. Update manually," @@ -189,7 +247,6 @@ break case "ECYCLE": - er.code = "ECYCLE" log.error("cycle", [er.message ,"While installing: "+er.pkgid ,"Found a pathological dependency case that npm cannot solve." @@ -198,7 +255,6 @@ break case "EBADPLATFORM": - er.code = "EBADPLATFORM" log.error("notsup", [er.message ,"Not compatible with your operating system or architecture: "+er.pkgid ,"Valid OS: "+er.os.join(",") @@ -267,51 +323,30 @@ break } // else passthrough + case "ENOSPC": + log.error("nospc", [er.message + ,"This is most likely not a problem with npm itself" + ,"and is related to insufficient space on your system." + ].join("\n")) + break + + case "EROFS": + log.error("rofs", [er.message + ,"This is most likely not a problem with npm itself" + ,"and is related to the file system being read-only." + ,"\nOften virtualized file systems, or other file systems" + ,"that don't support symlinks, give this error." + ].join("\n")) + break + default: - log.error("", er.stack || er.message || er) - log.error("", ["If you need help, you may report this *entire* log," - ,"including the npm and node versions, at:" + log.error("", er.message || er) + log.error("", ["", "If you need help, you may report this error at:" ," " ].join("\n")) - printStack = false break } - var os = require("os") - // just a line break - if (log.levels[log.level] <= log.levels.error) console.error("") - log.error("System", os.type() + " " + os.release()) - log.error("command", process.argv - .map(JSON.stringify).join(" ")) - log.error("cwd", process.cwd()) - log.error("node -v", process.version) - log.error("npm -v", npm.version) - - ; [ "file" - , "path" - , "type" - , "syscall" - , "fstream_path" - , "fstream_unc_path" - , "fstream_type" - , "fstream_class" - , "fstream_finish_call" - , "fstream_linkpath" - , "code" - , "errno" - , "stack" - , "fstream_stack" - ].forEach(function (k) { - var v = er[k] - if (k === "stack") { - if (!printStack) return - if (!v) v = er.message - } - if (!v) return - if (k === "fstream_stack") v = v.join("\n") - log.error(k, v) - }) - exit(typeof er.errno === "number" ? er.errno : 1) } @@ -321,19 +356,17 @@ writingLogFile = true wroteLogFile = true - var fs = require("graceful-fs") - , fstr = fs.createWriteStream("npm-debug.log") - , util = require("util") + var fstr = writeStream("npm-debug.log") , os = require("os") , out = "" log.record.forEach(function (m) { var pref = [m.id, m.level] if (m.prefix) pref.push(m.prefix) - pref = pref.join(' ') + pref = pref.join(" ") m.message.trim().split(/\r?\n/).map(function (line) { - return (pref + ' ' + line).trim() + return (pref + " " + line).trim() }).forEach(function (line) { out += line + os.EOL }) diff -Nru nodejs-0.11.13/deps/npm/lib/utils/fetch.js nodejs-0.11.15/deps/npm/lib/utils/fetch.js --- nodejs-0.11.13/deps/npm/lib/utils/fetch.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/fetch.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -/** - * Fetch an HTTP url to a local file. - **/ - -var request = require("request") - , fs = require("graceful-fs") - , npm = require("../npm.js") - , url = require("url") - , log = require("npmlog") - , path = require("path") - , mkdir = require("mkdirp") - , chownr = require("chownr") - , regHost - , once = require("once") - -module.exports = fetch - -function fetch (remote, local, headers, cb) { - if (typeof cb !== "function") cb = headers, headers = {} - cb = once(cb) - log.verbose("fetch", "to=", local) - mkdir(path.dirname(local), function (er, made) { - if (er) return cb(er) - fetch_(remote, local, headers, cb) - }) -} - -function fetch_ (remote, local, headers, cb) { - var fstr = fs.createWriteStream(local, { mode : npm.modes.file }) - var response = null - - fstr.on("error", function (er) { - cb(er) - fstr.destroy() - }) - - var req = makeRequest(remote, fstr, headers) - req.on("response", function (res) { - log.http(res.statusCode, remote) - response = res - response.resume() - // Work around bug in node v0.10.0 where the CryptoStream - // gets stuck and never starts reading again. - if (process.version === "v0.10.0") { - response.resume = function (orig) { return function() { - var ret = orig.apply(response, arguments) - if (response.socket.encrypted) - response.socket.encrypted.read(0) - return ret - }}(response.resume) - } - }) - - fstr.on("close", function () { - var er - if (response && response.statusCode && response.statusCode >= 400) { - er = new Error(response.statusCode + " " - + require("http").STATUS_CODES[response.statusCode]) - } - cb(er, response) - }) -} - -function makeRequest (remote, fstr, headers) { - remote = url.parse(remote) - log.http("GET", remote.href) - regHost = regHost || url.parse(npm.config.get("registry")).host - - if (remote.host === regHost && npm.config.get("always-auth")) { - remote.auth = new Buffer( npm.config.get("_auth") - , "base64" ).toString("utf8") - if (!remote.auth) return fstr.emit("error", new Error( - "Auth required and none provided. Please run 'npm adduser'")) - } - - var proxy - if (remote.protocol !== "https:" || !(proxy = npm.config.get("https-proxy"))) { - proxy = npm.config.get("proxy") - } - - var opts = { url: remote - , proxy: proxy - , strictSSL: npm.config.get("strict-ssl") - , rejectUnauthorized: npm.config.get("strict-ssl") - , ca: remote.host === regHost ? npm.config.get("ca") : undefined - , headers: { "user-agent": npm.config.get("user-agent") }} - var req = request(opts) - req.on("error", function (er) { - fstr.emit("error", er) - }) - req.pipe(fstr) - return req -} diff -Nru nodejs-0.11.13/deps/npm/lib/utils/find-prefix.js nodejs-0.11.15/deps/npm/lib/utils/find-prefix.js --- nodejs-0.11.13/deps/npm/lib/utils/find-prefix.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/find-prefix.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -// try to find the most reasonable prefix to use - -module.exports = findPrefix - -var fs = require("graceful-fs") - , path = require("path") - , npm = require("../npm.js") - -function findPrefix (p, cb_) { - function cb (er, p) { - process.nextTick(function () { - cb_(er, p) - }) - } - - p = path.resolve(p) - // if there's no node_modules folder, then - // walk up until we hopefully find one. - // if none anywhere, then use cwd. - var walkedUp = false - while (path.basename(p) === "node_modules") { - p = path.dirname(p) - walkedUp = true - } - if (walkedUp) return cb(null, p) - - findPrefix_(p, p, cb) -} - -function findPrefix_ (p, original, cb) { - if (p === "/" - || (process.platform === "win32" && p.match(/^[a-zA-Z]:(\\|\/)?$/))) { - return cb(null, original) - } - fs.readdir(p, function (er, files) { - // an error right away is a bad sign. - // unless the prefix was simply a non - // existent directory. - if (er && p === original) { - if (er.code === "ENOENT") return cb(null, original); - return cb(er) - } - - // walked up too high or something. - if (er) return cb(null, original) - - if (files.indexOf("node_modules") !== -1 - || files.indexOf("package.json") !== -1) { - return cb(null, p) - } - - var d = path.dirname(p) - if (d === p) return cb(null, original) - - return findPrefix_(d, original, cb) - }) -} diff -Nru nodejs-0.11.13/deps/npm/lib/utils/gently-rm.js nodejs-0.11.15/deps/npm/lib/utils/gently-rm.js --- nodejs-0.11.13/deps/npm/lib/utils/gently-rm.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/gently-rm.js 2015-01-20 21:22:17.000000000 +0000 @@ -3,54 +3,159 @@ module.exports = gentlyRm -var rimraf = require("rimraf") - , fs = require("graceful-fs") - , npm = require("../npm.js") - , path = require("path") - -function gentlyRm (p, gently, cb) { - if (!cb) cb = gently, gently = null +var npm = require("../npm.js") + , log = require("npmlog") + , resolve = require("path").resolve + , dirname = require("path").dirname + , lstat = require("graceful-fs").lstat + , readlink = require("graceful-fs").readlink + , isInside = require("path-is-inside") + , vacuum = require("fs-vacuum") + , rimraf = require("rimraf") + , some = require("async-some") + +function gentlyRm (path, gently, cb) { + if (!cb) { + cb = gently + gently = null + } // never rm the root, prefix, or bin dirs. // just a safety precaution. - p = path.resolve(p) - if (p === npm.dir || - p === npm.root || - p === npm.bin || - p === npm.prefix || - p === npm.globalDir || - p === npm.globalRoot || - p === npm.globalBin || - p === npm.globalPrefix) { - return cb(new Error("May not delete: " + p)) + var prefixes = [ + npm.dir, npm.root, npm.bin, npm.prefix, + npm.globalDir, npm.globalRoot, npm.globalBin, npm.globalPrefix + ] + + var resolved = resolve(path) + if (prefixes.indexOf(resolved) !== -1) { + log.verbose("gentlyRm", resolved, "is part of npm and can't be removed") + return cb(new Error("May not delete: "+resolved)) } - if (npm.config.get("force") || !gently) { - return rimraf(p, cb) + var options = {log : log.silly.bind(log, "gentlyRm")} + if (npm.config.get("force") || !gently) options.purge = true + + if (!gently) { + log.verbose("gentlyRm", "vacuuming", resolved) + return vacuum(resolved, options, cb) } - gently = path.resolve(gently) + var parent = resolve(gently) + log.verbose("gentlyRm", "verifying that", parent, "is managed by npm") + some(prefixes, isManaged(parent), function (er, matched) { + if (er) return cb(er) + + if (!matched) { + log.verbose("gentlyRm", parent, "is not managed by npm") + return clobberFail(resolved, parent, cb) + } + + log.silly("gentlyRm", parent, "is managed by npm") - // lstat it, see if it's a symlink. - fs.lstat(p, function (er, s) { - if (er) return rimraf(p, cb) - if (!s.isSymbolicLink()) next(null, path.resolve(p)) - realish(p, next) + if (isInside(resolved, parent)) { + log.silly("gentlyRm", resolved, "is under", parent) + log.verbose("gentlyRm", "vacuuming", resolved, "up to", parent) + options.base = parent + return vacuum(resolved, options, cb) + } + + log.silly("gentlyRm", resolved, "is not under", parent) + log.silly("gentlyRm", "checking to see if", resolved, "is a link") + lstat(resolved, function (er, stat) { + if (er) { + if (er.code === "ENOENT") return cb(null) + return cb(er) + } + + if (!stat.isSymbolicLink()) { + log.verbose("gentlyRm", resolved, "is outside", parent, "and not a link") + return clobberFail(resolved, parent, cb) + } + + log.silly("gentlyRm", resolved, "is a link") + readlink(resolved, function (er, link) { + if (er) { + if (er.code === "ENOENT") return cb(null) + return cb(er) + } + + var source = resolve(dirname(resolved), link) + if (isInside(source, parent)) { + log.silly("gentlyRm", source, "inside", parent) + log.verbose("gentlyRm", "vacuuming", resolved) + return vacuum(resolved, options, cb) + } + + log.silly("gentlyRm", "checking to see if", source, "is managed by npm") + some(prefixes, isManaged(source), function (er, matched) { + if (er) return cb(er) + + if (matched) { + log.silly("gentlyRm", source, "is under", matched) + log.verbose("gentlyRm", "removing", resolved) + rimraf(resolved, cb) + } + + log.verbose("gentlyRm", source, "is not managed by npm") + return clobberFail(path, parent, cb) + }) + }) + }) }) +} - function next (er, rp) { - if (rp && rp.indexOf(gently) !== 0) { - return clobberFail(p, gently, cb) +var resolvedPaths = {} +function isManaged (target) { + return predicate + + function predicate (path, cb) { + if (!path) { + log.verbose("isManaged", "no path") + return cb(null, false) } - rimraf(p, cb) + + path = resolve(path) + + // if the path has already been memoized, return immediately + var resolved = resolvedPaths[path] + if (resolved) { + var inside = isInside(target, resolved) + log.silly("isManaged", target, inside ? "is" : "is not", "inside", resolved) + + return cb(null, inside && path) + } + + // otherwise, check the path + lstat(path, function (er, stat) { + if (er) { + if (er.code === "ENOENT") return cb(null, false) + + return cb(er) + } + + // if it's not a link, cache & test the path itself + if (!stat.isSymbolicLink()) return cacheAndTest(path, path, target, cb) + + // otherwise, cache & test the link's source + readlink(path, function (er, source) { + if (er) { + if (er.code === "ENOENT") return cb(null, false) + + return cb(er) + } + + cacheAndTest(resolve(path, source), path, target, cb) + }) + }) } -} -function realish (p, cb) { - fs.readlink(p, function (er, r) { - if (er) return cb(er) - return cb(null, path.resolve(path.dirname(p), r)) - }) + function cacheAndTest (resolved, source, target, cb) { + resolvedPaths[source] = resolved + var inside = isInside(target, resolved) + log.silly("cacheAndTest", target, inside ? "is" : "is not", "inside", resolved) + cb(null, inside && source) + } } function clobberFail (p, g, cb) { diff -Nru nodejs-0.11.13/deps/npm/lib/utils/git.js nodejs-0.11.15/deps/npm/lib/utils/git.js --- nodejs-0.11.13/deps/npm/lib/utils/git.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/git.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,50 @@ + +// handle some git configuration for windows + +exports.spawn = spawnGit +exports.chainableExec = chainableExec +exports.whichAndExec = whichAndExec + +var exec = require("child_process").execFile + , spawn = require("child_process").spawn + , npm = require("../npm.js") + , which = require("which") + , git = npm.config.get("git") + , assert = require("assert") + , log = require("npmlog") + +function prefixGitArgs() { + return process.platform === "win32" ? ["-c", "core.longpaths=true"] : [] +} + +function execGit(args, options, cb) { + log.info("git", args) + return exec(git, prefixGitArgs().concat(args || []), options, cb) +} + +function spawnGit(args, options, cb) { + log.info("git", args) + return spawn(git, prefixGitArgs().concat(args || []), options) +} + +function chainableExec() { + var args = Array.prototype.slice.call(arguments) + return [execGit].concat(args) +} + +function whichGit(cb) { + return which(git, cb) +} + +function whichAndExec(args, options, cb) { + assert.equal(typeof cb, "function", "no callback provided") + // check for git + whichGit(function (err) { + if (err) { + err.code = "ENOGIT" + return cb(err) + } + + execGit(args, options, cb) + }) +} diff -Nru nodejs-0.11.13/deps/npm/lib/utils/is-git-url.js nodejs-0.11.15/deps/npm/lib/utils/is-git-url.js --- nodejs-0.11.13/deps/npm/lib/utils/is-git-url.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/is-git-url.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -module.exports = isGitUrl - -function isGitUrl (url) { - switch (url.protocol) { - case "git:": - case "git+http:": - case "git+https:": - case "git+rsync:": - case "git+ftp:": - case "git+ssh:": - return true - } -} diff -Nru nodejs-0.11.13/deps/npm/lib/utils/lifecycle.js nodejs-0.11.15/deps/npm/lib/utils/lifecycle.js --- nodejs-0.11.13/deps/npm/lib/utils/lifecycle.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/lifecycle.js 2015-01-20 21:22:17.000000000 +0000 @@ -71,11 +71,6 @@ , p = wd.split("node_modules") , acc = path.resolve(p.shift()) - // first add the directory containing the `node` executable currently - // running, so that any lifecycle script that invoke "node" will execute - // this same one. - pathArr.unshift(path.dirname(process.execPath)) - p.forEach(function (pp) { pathArr.unshift(path.join(acc, "node_modules", ".bin")) acc = path.join(acc, "node_modules", pp) @@ -96,24 +91,23 @@ env.npm_lifecycle_script = pkg.scripts[stage] } - if (failOk) { - cb = (function (cb_) { return function (er) { - if (er) log.warn("continuing anyway", er.message) - cb_() - }})(cb) - } - - if (npm.config.get("force")) { - cb = (function (cb_) { return function (er) { - if (er) log.info("forced, continuing", er) - cb_() - }})(cb) + function done (er) { + if (er) { + if (npm.config.get("force")) { + log.info("forced, continuing", er) + er = null + } else if (failOk) { + log.warn("continuing anyway", er.message) + er = null + } + } + cb(er) } chain ( [ packageLifecycle && [runPackageLifecycle, pkg, env, wd, unsafe] , [runHookLifecycle, pkg, env, wd, unsafe] ] - , cb ) + , done ) } function validWd (d, cb) { @@ -162,6 +156,9 @@ , group = unsafe ? null : npm.config.get("group") if (log.level !== 'silent') { + if (npm.spinner.int) { + npm.config.get("logstream").write("\r \r") + } console.log(note) } log.verbose("unsafe-perm in lifecycle", unsafe) @@ -207,12 +204,17 @@ } var proc = spawn(sh, [shFlag, cmd], conf) + proc.on("error", procError) proc.on("close", function (code, signal) { if (signal) { process.kill(process.pid, signal); } else if (code) { var er = new Error("Exit status " + code) } + procError(er) + }) + + function procError (er) { if (er && !npm.ROLLBACK) { log.info(pkg._id, "Failed to exec "+stage+" script") er.message = pkg._id + " " @@ -232,7 +234,7 @@ return cb() } cb(er) - }) + } } @@ -346,13 +348,9 @@ function cmd (stage) { function CMD (args, cb) { - if (args.length) { - chain(args.map(function (p) { - return [npm.commands, "run-script", [p, stage]] - }), cb) - } else npm.commands["run-script"]([stage], cb) + npm.commands["run-script"]([stage].concat(args), cb) } - CMD.usage = "npm "+stage+" " + CMD.usage = "npm "+stage+" [-- ]" var installedShallow = require("./completion/installed-shallow.js") CMD.completion = function (opts, cb) { installedShallow(opts, function (d) { diff -Nru nodejs-0.11.13/deps/npm/lib/utils/locker.js nodejs-0.11.15/deps/npm/lib/utils/locker.js --- nodejs-0.11.13/deps/npm/lib/utils/locker.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/locker.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,75 @@ +var crypto = require("crypto") +var resolve = require("path").resolve + +var lockfile = require("lockfile") +var log = require("npmlog") +var mkdirp = require("mkdirp") + +var npm = require("../npm.js") +var getStat = require("../cache/get-stat.js") + +var installLocks = {} + +function lockFileName (base, name) { + var c = name.replace(/[^a-zA-Z0-9]+/g, "-").replace(/^-+|-+$/g, "") + , p = resolve(base, name) + , h = crypto.createHash("sha1").update(p).digest("hex") + , l = resolve(npm.cache, "_locks") + + return resolve(l, c.substr(0, 24)+"-"+h.substr(0, 16)+".lock") +} + +function lock (base, name, cb) { + getStat(function (er) { + var lockDir = resolve(npm.cache, "_locks") + mkdirp(lockDir, function () { + if (er) return cb(er) + + var opts = { stale: npm.config.get("cache-lock-stale") + , retries: npm.config.get("cache-lock-retries") + , wait: npm.config.get("cache-lock-wait") } + var lf = lockFileName(base, name) + lockfile.lock(lf, opts, function (er) { + if (er) log.warn("locking", lf, "failed", er) + + if (!er) { + log.verbose("lock", "using", lf, "for", resolve(base, name)) + installLocks[lf] = true + } + + cb(er) + }) + }) + }) +} + +function unlock (base, name, cb) { + var lf = lockFileName(base, name) + , locked = installLocks[lf] + if (locked === false) { + return process.nextTick(cb) + } + else if (locked === true) { + lockfile.unlock(lf, function (er) { + if (er) { + log.warn("unlocking", lf, "failed", er) + } + else { + installLocks[lf] = false + log.verbose("unlock", "done using", lf, "for", resolve(base, name)) + } + + cb(er) + }) + } + else { + throw new Error( + "Attempt to unlock " + resolve(base, name) + ", which hasn't been locked" + ) + } +} + +module.exports = { + lock : lock, + unlock : unlock +} diff -Nru nodejs-0.11.13/deps/npm/lib/utils/map-to-registry.js nodejs-0.11.15/deps/npm/lib/utils/map-to-registry.js --- nodejs-0.11.13/deps/npm/lib/utils/map-to-registry.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/map-to-registry.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +var url = require("url") + +var log = require("npmlog") + , npa = require("npm-package-arg") + +module.exports = mapToRegistry + +function mapToRegistry(name, config, cb) { + var uri + var scopedRegistry + + // the name itself takes precedence + var data = npa(name) + if (data.scope) { + // the name is definitely scoped, so escape now + name = name.replace("/", "%2f") + + log.silly("mapToRegistry", "scope", data.scope) + + scopedRegistry = config.get(data.scope + ":registry") + if (scopedRegistry) { + log.silly("mapToRegistry", "scopedRegistry (scoped package)", scopedRegistry) + uri = url.resolve(scopedRegistry, name) + } + else { + log.verbose("mapToRegistry", "no registry URL found for scope", data.scope) + } + } + + // ...then --scope=@scope or --scope=scope + var scope = config.get("scope") + if (!uri && scope) { + // I'm an enabler, sorry + if (scope.charAt(0) !== "@") scope = "@" + scope + + scopedRegistry = config.get(scope + ":registry") + if (scopedRegistry) { + log.silly("mapToRegistry", "scopedRegistry (scope in config)", scopedRegistry) + uri = url.resolve(scopedRegistry, name) + } + else { + log.verbose("mapToRegistry", "no registry URL found for scope", scope) + } + } + + // ...and finally use the default registry + if (!uri) { + uri = url.resolve(config.get("registry"), name) + } + + log.verbose("mapToRegistry", "name", name) + log.verbose("mapToRegistry", "uri", uri) + cb(null, uri) +} diff -Nru nodejs-0.11.13/deps/npm/lib/utils/tar.js nodejs-0.11.15/deps/npm/lib/utils/tar.js --- nodejs-0.11.13/deps/npm/lib/utils/tar.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/utils/tar.js 2015-01-20 21:22:17.000000000 +0000 @@ -3,12 +3,12 @@ var npm = require("../npm.js") , fs = require("graceful-fs") + , writeFileAtomic = require("write-file-atomic") , path = require("path") , log = require("npmlog") , uidNumber = require("uid-number") , rm = require("./gently-rm.js") , readJson = require("read-package-json") - , cache = require("../cache.js") , myUid = process.getuid && process.getuid() , myGid = process.getgid && process.getgid() , tar = require("tar") @@ -17,14 +17,6 @@ , Packer = require("fstream-npm") , lifecycle = require("./lifecycle.js") -function lock(path, cb) { - return cache.lock('tar://' + path, cb) -} - -function unlock(path, cb) { - return cache.unlock('tar://' + path, cb) -} - if (process.env.SUDO_UID && myUid === 0) { if (!isNaN(process.env.SUDO_UID)) myUid = +process.env.SUDO_UID if (!isNaN(process.env.SUDO_GID)) myGid = +process.env.SUDO_GID @@ -51,73 +43,40 @@ } } -function pack_ (tarball, folder, pkg, cb_) { - var tarballLock = false - , folderLock = false - - function cb (er) { - if (folderLock) - unlock(folder, function() { - folderLock = false - cb(er) - }) - else if (tarballLock) - unlock(tarball, function() { - tarballLock = false - cb(er) - }) - else - cb_(er) - } - - lock(folder, function(er) { - if (er) return cb(er) - folderLock = true - next() - }) - - lock(tarball, function (er) { - if (er) return cb(er) - tarballLock = true - next() - }) - - function next () { - if (!tarballLock || !folderLock) return +function pack_ (tarball, folder, pkg, cb) { + new Packer({ path: folder, type: "Directory", isDirectory: true }) + .on("error", function (er) { + if (er) log.error("tar pack", "Error reading " + folder) + return cb(er) + }) - new Packer({ path: folder, type: "Directory", isDirectory: true }) - .on("error", function (er) { - if (er) log.error("tar pack", "Error reading " + folder) - return cb(er) - }) - - // By default, npm includes some proprietary attributes in the - // package tarball. This is sane, and allowed by the spec. - // However, npm *itself* excludes these from its own package, - // so that it can be more easily bootstrapped using old and - // non-compliant tar implementations. - .pipe(tar.Pack({ noProprietary: !npm.config.get("proprietary-attribs") })) - .on("error", function (er) { - if (er) log.error("tar.pack", "tar creation error", tarball) - cb(er) - }) - .pipe(zlib.Gzip()) - .on("error", function (er) { - if (er) log.error("tar.pack", "gzip error "+tarball) - cb(er) - }) - .pipe(fstream.Writer({ type: "File", path: tarball })) - .on("error", function (er) { - if (er) log.error("tar.pack", "Could not write "+tarball) - cb(er) - }) - .on("close", cb) - } + // By default, npm includes some proprietary attributes in the + // package tarball. This is sane, and allowed by the spec. + // However, npm *itself* excludes these from its own package, + // so that it can be more easily bootstrapped using old and + // non-compliant tar implementations. + .pipe(tar.Pack({ noProprietary: !npm.config.get("proprietary-attribs") })) + .on("error", function (er) { + if (er) log.error("tar.pack", "tar creation error", tarball) + cb(er) + }) + .pipe(zlib.Gzip()) + .on("error", function (er) { + if (er) log.error("tar.pack", "gzip error "+tarball) + cb(er) + }) + .pipe(fstream.Writer({ type: "File", path: tarball })) + .on("error", function (er) { + if (er) log.error("tar.pack", "Could not write "+tarball) + cb(er) + }) + .on("close", cb) } function unpack (tarball, unpackTarget, dMode, fMode, uid, gid, cb) { - log.verbose("tar unpack", tarball) + log.verbose("tar", "unpack", tarball) + log.verbose("tar", "unpacking to", unpackTarget) if (typeof cb !== "function") cb = gid, gid = null if (typeof cb !== "function") cb = uid, uid = null if (typeof cb !== "function") cb = fMode, fMode = npm.modes.file @@ -129,52 +88,9 @@ }) } -function unpack_ ( tarball, unpackTarget, dMode, fMode, uid, gid, cb_ ) { - var parent = path.dirname(unpackTarget) - , base = path.basename(unpackTarget) - , folderLock - , tarballLock - - function cb (er) { - if (folderLock) - unlock(unpackTarget, function() { - folderLock = false - cb(er) - }) - else if (tarballLock) - unlock(tarball, function() { - tarballLock = false - cb(er) - }) - else - cb_(er) - } - - lock(unpackTarget, function (er) { +function unpack_ ( tarball, unpackTarget, dMode, fMode, uid, gid, cb ) { + rm(unpackTarget, function (er) { if (er) return cb(er) - folderLock = true - next() - }) - - lock(tarball, function (er) { - if (er) return cb(er) - tarballLock = true - next() - }) - - function next() { - if (!tarballLock || !folderLock) return - rmGunz() - } - - function rmGunz () { - rm(unpackTarget, function (er) { - if (er) return cb(er) - gtp() - }) - } - - function gtp () { // gzip {tarball} --decompress --stdout \ // | tar -mvxpf - --strip-components=1 -C {unpackTarget} gunzTarPerm( tarball, unpackTarget @@ -184,7 +100,7 @@ if (er) return cb(er) readJson(path.resolve(folder, "package.json"), cb) }) - } + }) } @@ -202,6 +118,17 @@ var fst = fs.createReadStream(tarball) + fst.on("open", function (fd) { + fs.fstat(fd, function (er, st) { + if (er) return fst.emit("error", er) + if (st.size === 0) { + er = new Error("0-byte tarball\n" + + "Please run `npm cache clean`") + fst.emit("error", er) + } + }) + }) + // figure out who we're supposed to be, if we're not pretending // to be a specific user. if (npm.config.get("unsafe-perm") && process.platform !== "win32") { @@ -240,6 +167,7 @@ extractOpts.gid = gid } + var sawIgnores = {} extractOpts.filter = function () { // symbolic links are not allowed in packages. if (this.type.match(/^.*Link$/)) { @@ -248,76 +176,100 @@ + " -> " + this.linkpath ) return false } + + // Note: This mirrors logic in the fs read operations that are + // employed during tarball creation, in the fstream-npm module. + // It is duplicated here to handle tarballs that are created + // using other means, such as system tar or git archive. + if (this.type === "File") { + var base = path.basename(this.path) + if (base === ".npmignore") { + sawIgnores[ this.path ] = true + } else if (base === ".gitignore") { + var npmignore = this.path.replace(/\.gitignore$/, ".npmignore") + if (sawIgnores[npmignore]) { + // Skip this one, already seen. + return false + } else { + // Rename, may be clobbered later. + this.path = npmignore + this._path = npmignore + } + } + } + return true } - fst.on("error", function (er) { - if (er) log.error("tar.unpack", "error reading "+tarball) - cb(er) - }) - fst.on("data", function OD (c) { - // detect what it is. - // Then, depending on that, we'll figure out whether it's - // a single-file module, gzipped tarball, or naked tarball. - // gzipped files all start with 1f8b08 - if (c[0] === 0x1F && - c[1] === 0x8B && - c[2] === 0x08) { - fst - .pipe(zlib.Unzip()) - .on("error", function (er) { - if (er) log.error("tar.unpack", "unzip error "+tarball) - cb(er) - }) - .pipe(tar.Extract(extractOpts)) - .on("entry", extractEntry) - .on("error", function (er) { - if (er) log.error("tar.unpack", "untar error "+tarball) - cb(er) - }) - .on("close", cb) - } else if (c.toString().match(/^package\//)) { - // naked tar - fst - .pipe(tar.Extract(extractOpts)) - .on("entry", extractEntry) - .on("error", function (er) { - if (er) log.error("tar.unpack", "untar error "+tarball) - cb(er) - }) - .on("close", cb) - } else { - // naked js file - var jsOpts = { path: path.resolve(target, "index.js") } - - if (process.platform !== "win32" && - typeof uid === "number" && - typeof gid === "number") { - jsOpts.uid = uid - jsOpts.gid = gid - } - - fst - .pipe(fstream.Writer(jsOpts)) - .on("error", function (er) { - if (er) log.error("tar.unpack", "copy error "+tarball) - cb(er) - }) - .on("close", function () { - var j = path.resolve(target, "package.json") - readJson(j, function (er, d) { - if (er) { - log.error("not a package", tarball) - return cb(er) - } - fs.writeFile(j, JSON.stringify(d) + "\n", cb) + fst + .on("error", function (er) { + if (er) log.error("tar.unpack", "error reading "+tarball) + cb(er) + }) + .on("data", function OD (c) { + // detect what it is. + // Then, depending on that, we'll figure out whether it's + // a single-file module, gzipped tarball, or naked tarball. + // gzipped files all start with 1f8b08 + if (c[0] === 0x1F && + c[1] === 0x8B && + c[2] === 0x08) { + fst + .pipe(zlib.Unzip()) + .on("error", function (er) { + if (er) log.error("tar.unpack", "unzip error "+tarball) + cb(er) }) - }) - } + .pipe(tar.Extract(extractOpts)) + .on("entry", extractEntry) + .on("error", function (er) { + if (er) log.error("tar.unpack", "untar error "+tarball) + cb(er) + }) + .on("close", cb) + } else if (c.toString().match(/^package\//) || + c.toString().match(/^pax_global_header/)) { + // naked tar + fst + .pipe(tar.Extract(extractOpts)) + .on("entry", extractEntry) + .on("error", function (er) { + if (er) log.error("tar.unpack", "untar error "+tarball) + cb(er) + }) + .on("close", cb) + } else { + // naked js file + var jsOpts = { path: path.resolve(target, "index.js") } + + if (process.platform !== "win32" && + typeof uid === "number" && + typeof gid === "number") { + jsOpts.uid = uid + jsOpts.gid = gid + } + + fst + .pipe(fstream.Writer(jsOpts)) + .on("error", function (er) { + if (er) log.error("tar.unpack", "copy error "+tarball) + cb(er) + }) + .on("close", function () { + var j = path.resolve(target, "package.json") + readJson(j, function (er, d) { + if (er) { + log.error("not a package", tarball) + return cb(er) + } + writeFileAtomic(j, JSON.stringify(d) + "\n", cb) + }) + }) + } - // now un-hook, and re-emit the chunk - fst.removeListener("data", OD) - fst.emit("data", c) - }) + // now un-hook, and re-emit the chunk + fst.removeListener("data", OD) + fst.emit("data", c) + }) } diff -Nru nodejs-0.11.13/deps/npm/lib/version.js nodejs-0.11.15/deps/npm/lib/version.js --- nodejs-0.11.13/deps/npm/lib/version.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/version.js 2015-01-20 21:22:17.000000000 +0000 @@ -6,12 +6,14 @@ , semver = require("semver") , path = require("path") , fs = require("graceful-fs") + , writeFileAtomic = require("write-file-atomic") , chain = require("slide").chain , log = require("npmlog") , which = require("which") , npm = require("./npm.js") + , git = require("./utils/git.js") -version.usage = "npm version [ | major | minor | patch]\n" +version.usage = "npm version [ | major | minor | patch | prerelease | preminor | premajor ]\n" + "\n(run in package dir)\n" + "'npm -v' or 'npm --version' to print npm version " + "("+npm.version+")\n" @@ -22,7 +24,7 @@ function version (args, silent, cb_) { if (typeof cb_ !== "function") cb_ = silent, silent = false if (args.length > 1) return cb_(version.usage) - fs.readFile(path.join(process.cwd(), "package.json"), function (er, data) { + fs.readFile(path.join(npm.localPrefix, "package.json"), function (er, data) { if (!args.length) { var v = {} Object.keys(process.versions).forEach(function (k) { @@ -37,6 +39,9 @@ if (data && data.name && data.version) { v[data.name] = data.version } + if (npm.config.get("json")) { + v = JSON.stringify(v, null, 2) + } console.log(v) return cb_() } @@ -53,13 +58,13 @@ return cb_(er) } - var newVer = semver.valid(args[0]) - if (!newVer) newVer = semver.inc(data.version, args[0]) - if (!newVer) return cb_(version.usage) + var newVer = semver.valid(args[0]) + if (!newVer) newVer = semver.inc(data.version, args[0]) + if (!newVer) return cb_(version.usage) if (data.version === newVer) return cb_(new Error("Version not changed")) data.version = newVer - fs.stat(path.join(process.cwd(), ".git"), function (er, s) { + fs.stat(path.join(npm.localPrefix, ".git"), function (er, s) { function cb (er) { if (!er && !silent) console.log("v" + newVer) cb_(er) @@ -74,47 +79,49 @@ } function checkGit (data, cb) { - var git = npm.config.get("git") var args = [ "status", "--porcelain" ] - var env = process.env + var options = {env: process.env} // check for git - which(git, function (err) { - if (err) { - err.code = "ENOGIT" - return cb(err) + git.whichAndExec(args, options, function (er, stdout) { + if (er && er.code === "ENOGIT") { + log.warn( + "version", + "This is a Git checkout, but the git command was not found.", + "npm could not create a Git tag for this release!" + ) + return write(data, cb) } - gitFound() - }) - - function gitFound () { - exec(git, args, {env: env}, function (er, stdout, stderr) { - var lines = stdout.trim().split("\n").filter(function (line) { - return line.trim() && !line.match(/^\?\? /) - }).map(function (line) { - return line.trim() - }) - if (lines.length) return cb(new Error( - "Git working directory not clean.\n"+lines.join("\n"))) - write(data, function (er) { - if (er) return cb(er) - var message = npm.config.get("message").replace(/%s/g, data.version) - , sign = npm.config.get("sign-git-tag") - , flag = sign ? "-sm" : "-am" - chain - ( [ [ exec, git, [ "add", "package.json" ], {env: process.env} ] - , [ exec, git, [ "commit", "-m", message ], {env: process.env} ] - , [ exec, git, [ "tag", "v" + data.version, flag, message ] - , {env: process.env} ] ] - , cb ) - }) + var lines = stdout.trim().split("\n").filter(function (line) { + return line.trim() && !line.match(/^\?\? /) + }).map(function (line) { + return line.trim() + }) + if (lines.length) return cb(new Error( + "Git working directory not clean.\n"+lines.join("\n"))) + write(data, function (er) { + if (er) return cb(er) + var message = npm.config.get("message").replace(/%s/g, data.version) + , sign = npm.config.get("sign-git-tag") + , flag = sign ? "-sm" : "-am" + chain + ( [ git.chainableExec([ "add", "package.json" ], {env: process.env}) + , git.chainableExec([ "commit", "-m", message ], {env: process.env}) + , sign && function (cb) { + npm.spinner.stop() + cb() + } + + , git.chainableExec([ "tag", "v" + data.version, flag, message ] + , {env: process.env}) ] + , cb ) }) - } + }) } function write (data, cb) { - fs.writeFile( path.join(process.cwd(), "package.json") + writeFileAtomic( path.join(npm.localPrefix, "package.json") , new Buffer(JSON.stringify(data, null, 2) + "\n") , cb ) } diff -Nru nodejs-0.11.13/deps/npm/lib/view.js nodejs-0.11.15/deps/npm/lib/view.js --- nodejs-0.11.13/deps/npm/lib/view.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/view.js 2015-01-20 21:22:17.000000000 +0000 @@ -5,17 +5,25 @@ view.completion = function (opts, cb) { if (opts.conf.argv.remain.length <= 2) { - return registry.get("/-/short", cb) + return mapToRegistry("-/short", npm.config, function (er, uri) { + if (er) return cb(er) + + registry.get(uri, null, cb) + }) } // have the package, get the fields. var tag = npm.config.get("tag") - registry.get(opts.conf.argv.remain[2], function (er, d) { + mapToRegistry(opts.conf.argv.remain[2], npm.config, function (er, uri) { if (er) return cb(er) - var dv = d.versions[d["dist-tags"][tag]] - , fields = [] - d.versions = Object.keys(d.versions).sort(semver.compareLoose) - fields = getFields(d).concat(getFields(dv)) - cb(null, fields) + + registry.get(uri, null, function (er, d) { + if (er) return cb(er) + var dv = d.versions[d["dist-tags"][tag]] + , fields = [] + d.versions = Object.keys(d.versions).sort(semver.compareLoose) + fields = getFields(d).concat(getFields(dv)) + cb(null, fields) + }) }) function getFields (d, f, pref) { @@ -27,11 +35,12 @@ var p = pref.concat(k).join(".") f.push(p) if (Array.isArray(d[k])) { - return d[k].forEach(function (val, i) { + d[k].forEach(function (val, i) { var pi = p + "[" + i + "]" if (val && typeof val === "object") getFields(val, f, [p]) else f.push(pi) }) + return } if (typeof d[k] === "object") getFields(d[k], f, [p]) }) @@ -40,68 +49,104 @@ } var npm = require("./npm.js") + , readJson = require("read-package-json") , registry = npm.registry , log = require("npmlog") , util = require("util") , semver = require("semver") + , mapToRegistry = require("./utils/map-to-registry.js") + , npa = require("npm-package-arg") + , path = require("path") function view (args, silent, cb) { if (typeof cb !== "function") cb = silent, silent = false - if (!args.length) return cb("Usage: "+view.usage) + + if (!args.length) args = ["."] + var pkg = args.shift() - , nv = pkg.split("@") - , name = nv.shift() - , version = nv.join("@") || npm.config.get("tag") + , nv = npa(pkg) + , name = nv.name + , local = (name === "." || !name) + + if (npm.config.get("global") && local) { + return cb(new Error("Cannot use view command in global mode.")) + } - if (name === ".") return cb(view.usage) + if (local) { + var dir = npm.prefix + readJson(path.resolve(dir, "package.json"), function (er, d) { + d = d || {} + if (er && er.code !== "ENOENT" && er.code !== "ENOTDIR") return cb(er) + if (!d.name) return cb(new Error("Invalid package.json")) + + var p = d.name + nv = npa(p) + if (pkg && ~pkg.indexOf("@")) { + nv.rawSpec = pkg.split("@")[pkg.indexOf("@")] + } + + fetchAndRead(nv, args, silent, cb) + }) + } else { + fetchAndRead(nv, args, silent, cb) + } +} +function fetchAndRead (nv, args, silent, cb) { // get the data about this package - registry.get(name, function (er, data) { + var name = nv.name + , version = nv.rawSpec || npm.config.get("tag") + + mapToRegistry(name, npm.config, function (er, uri) { if (er) return cb(er) - if (data["dist-tags"] && data["dist-tags"].hasOwnProperty(version)) { - version = data["dist-tags"][version] - } - if (data.time && data.time.unpublished) { - var u = data.time.unpublished - var er = new Error("Unpublished by " + u.name + " on " + u.time) - er.statusCode = 404 - er.code = "E404" - er.pkgid = data._id - return cb(er, data) - } + registry.get(uri, null, function (er, data) { + if (er) return cb(er) + if (data["dist-tags"] && data["dist-tags"].hasOwnProperty(version)) { + version = data["dist-tags"][version] + } + if (data.time && data.time.unpublished) { + var u = data.time.unpublished + er = new Error("Unpublished by " + u.name + " on " + u.time) + er.statusCode = 404 + er.code = "E404" + er.pkgid = data._id + return cb(er, data) + } - var results = [] - , error = null - , versions = data.versions || {} - data.versions = Object.keys(versions).sort(semver.compareLoose) - if (!args.length) args = [""] - - // remove readme unless we asked for it - if (-1 === args.indexOf("readme")) { - delete data.readme - } - Object.keys(versions).forEach(function (v) { - if (semver.satisfies(v, version, true)) args.forEach(function (args) { - // remove readme unless we asked for it - if (-1 === args.indexOf("readme")) { - delete versions[v].readme - } - results.push(showFields(data, versions[v], args)) + var results = [] + , error = null + , versions = data.versions || {} + data.versions = Object.keys(versions).sort(semver.compareLoose) + if (!args.length) args = [""] + + // remove readme unless we asked for it + if (-1 === args.indexOf("readme")) { + delete data.readme + } + + Object.keys(versions).forEach(function (v) { + if (semver.satisfies(v, version, true)) args.forEach(function (args) { + // remove readme unless we asked for it + if (-1 === args.indexOf("readme")) { + delete versions[v].readme + } + results.push(showFields(data, versions[v], args)) + }) }) - }) - results = results.reduce(reducer, {}) - var retval = results + results = results.reduce(reducer, {}) + var retval = results - if (args.length === 1 && args[0] === "") { - retval = cleanBlanks(retval) - log.silly("cleanup", retval) - } + if (args.length === 1 && args[0] === "") { + retval = cleanBlanks(retval) + log.silly("cleanup", retval) + } - if (error || silent) cb(error, retval) - else printData(results, data._id, cb.bind(null, error, retval)) + if (error || silent) cb(error, retval) + else printData(results, data._id, cb.bind(null, error, retval)) + }) }) } @@ -139,8 +184,9 @@ , tail = fields while (!field && fields.length) field = tail.shift() fields = [field].concat(tail) + var o if (!field && !tail.length) { - var o = {} + o = {} o[version] = {} o[version][title] = data return o @@ -160,7 +206,6 @@ return search(data[0], fields, version, title) } var results = [] - , res = null data.forEach(function (data, i) { var tl = title.length , newt = title.substr(0, tl-(fields.join(".").length) - 1) @@ -170,9 +215,7 @@ results = results.reduce(reducer, {}) return results } - if (!data.hasOwnProperty(field)) { - return - } + if (!data.hasOwnProperty(field)) return undefined data = data[field] if (tail.length) { if (typeof data === "object") { @@ -182,7 +225,7 @@ return new Error("Not an object: "+data) } } - var o = {} + o = {} o[version] = {} o[version][title] = data return o @@ -191,15 +234,15 @@ function printData (data, name, cb) { var versions = Object.keys(data) , msg = "" - , showVersions = versions.length > 1 - , showFields + , includeVersions = versions.length > 1 + , includeFields - versions.forEach(function (v, i) { + versions.forEach(function (v) { var fields = Object.keys(data[v]) - showFields = showFields || (fields.length > 1) + includeFields = includeFields || (fields.length > 1) fields.forEach(function (f) { var d = cleanup(data[v][f]) - if (showVersions || showFields || typeof d !== "string") { + if (includeVersions || includeFields || typeof d !== "string") { d = cleanup(data[v][f]) d = npm.config.get("json") ? JSON.stringify(d, null, 2) @@ -207,10 +250,10 @@ } else if (typeof d === "string" && npm.config.get("json")) { d = JSON.stringify(d) } - if (f && showFields) f += " = " - if (d.indexOf("\n") !== -1) d = "\n" + d - msg += (showVersions ? name + "@" + v + " " : "") - + (showFields ? f : "") + d + "\n" + if (f && includeFields) f += " = " + if (d.indexOf("\n") !== -1) d = " \n" + d + msg += (includeVersions ? name + "@" + v + " " : "") + + (includeFields ? f : "") + d + "\n" }) }) @@ -254,4 +297,3 @@ + (d.email ? " <"+d.email+">" : "") + (d.url ? " ("+d.url+")" : "") } - diff -Nru nodejs-0.11.13/deps/npm/lib/whoami.js nodejs-0.11.15/deps/npm/lib/whoami.js --- nodejs-0.11.13/deps/npm/lib/whoami.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/whoami.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,13 +1,39 @@ -module.exports = whoami - var npm = require("./npm.js") -whoami.usage = "npm whoami\n(just prints the 'username' config)" +module.exports = whoami + +whoami.usage = "npm whoami\n(just prints username according to given registry)" function whoami (args, silent, cb) { - if (typeof cb !== "function") cb = silent, silent = false - var me = npm.config.get("username") - msg = me ? me : "Not authed. Run 'npm adduser'" + // FIXME: need tighter checking on this, but is a breaking change + if (typeof cb !== "function") { + cb = silent + silent = false + } + + var registry = npm.config.get("registry") + if (!registry) return cb(new Error("no default registry set")) + + var credentials = npm.config.getCredentialsByURI(registry) + if (credentials) { + if (credentials.username) { + if (!silent) console.log(credentials.username) + return process.nextTick(cb.bind(this, null, credentials.username)) + } + else if (credentials.token) { + return npm.registry.whoami(registry, function (er, username) { + if (er) return cb(er) + + if (!silent) console.log(username) + cb(null, username) + }) + } + } + + // At this point, if they have a credentials object, it doesn't + // have a token or auth in it. Probably just the default + // registry. + var msg = "Not authed. Run 'npm adduser'" if (!silent) console.log(msg) - process.nextTick(cb.bind(this, null, me)) + process.nextTick(cb.bind(this, null, msg)) } diff -Nru nodejs-0.11.13/deps/npm/lib/xmas.js nodejs-0.11.15/deps/npm/lib/xmas.js --- nodejs-0.11.13/deps/npm/lib/xmas.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/lib/xmas.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,5 @@ // happy xmas -var npm = require("./npm.js") - , log = require("npmlog") +var log = require("npmlog") module.exports = function (args, cb) { var s = process.platform === "win32" ? " *" : " \u2605" @@ -20,26 +19,26 @@ for (var i = 0; i < H; i ++) w(" ") w(x+"\033[33m"+s+"\n") var M = H * 2 - 1 - for (L = 1; L <= H; L ++) { + for (var L = 1; L <= H; L ++) { var O = L * 2 - 2 var S = (M - O) / 2 - for (var i = 0; i < S; i ++) w(" ") + for (i = 0; i < S; i ++) w(" ") w(x+"\033[32m"+f) - for (var i = 0; i < O; i ++) w( + for (i = 0; i < O; i ++) w( "\033["+oc[Math.floor(Math.random()*oc.length)]+"m"+ o[Math.floor(Math.random() * o.length)] ) w(x+"\033[32m"+b+"\n") } w(" ") - for (var i = 1; i < H; i ++) w("\033[32m"+l) + for (i = 1; i < H; i ++) w("\033[32m"+l) w("| "+x+" |") - for (var i = 1; i < H; i ++) w("\033[32m"+l) + for (i = 1; i < H; i ++) w("\033[32m"+l) if (H > 10) { w("\n ") - for (var i = 1; i < H; i ++) w(" ") + for (i = 1; i < H; i ++) w(" ") w("| "+x+" |") - for (var i = 1; i < H; i ++) w(" ") + for (i = 1; i < H; i ++) w(" ") } })(20) w("\n\n") diff -Nru nodejs-0.11.13/deps/npm/LICENSE nodejs-0.11.15/deps/npm/LICENSE --- nodejs-0.11.13/deps/npm/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -1,8 +1,11 @@ Copyright (c) npm, Inc. and Contributors All rights reserved. -npm is released under the Artistic License 2.0. -The text of the License follows: +npm is released under the Artistic License 2.0, subject to additional terms +that are listed below. + +The text of the npm License follows and the text of the additional terms +follows the Artistic License 2.0 terms: -------- @@ -214,14 +217,39 @@ -------- -"Node.js" and "node" trademark Joyent, Inc. npm is not officially -part of the Node.js project, and is neither owned by nor -officially affiliated with Joyent, Inc. - -Packages published in the npm registry (other than the Software and -its included dependencies) are not part of npm itself, are the sole -property of their respective maintainers, and are not covered by -this license. +The following additional terms shall apply to use of the npm software, the npm +website, the npm repository and any other services or products offered by npm, +Inc.: + +"Node.js" trademark Joyent, Inc. npm is not officially part of the Node.js +project, and is neither owned by nor affiliated with Joyent, Inc. + +"npm" and "The npm Registry" are owned by npm, Inc. All rights reserved. + +Modules published on the npm registry are not officially endorsed by npm, Inc. +or the Node.js project. + +Data published to the npm registry is not part of npm itself, and is the sole +property of the publisher. While every effort is made to ensure accountability, +there is absolutely no guarantee, warrantee, or assertion expressed or implied +as to the quality, fitness for a specific purpose, or lack of malice in any +given npm package. Packages downloaded through the npm registry are +independently licensed and are not covered by this license. + +Additional policies relating to, and restrictions on use of, npm products and +services are available on the npm website. All such policies and restrictions, +as updated from time to time, are hereby incorporated into this license +agreement. By using npm, you acknowledge your agreement to all such policies +and restrictions. + +If you have a complaint about a package in the public npm registry, and cannot +resolve it with the package owner, please email support@npmjs.com and explain +the situation. See the [npm Dispute Resolution +policy](https://github.com/npm/policies/blob/master/disputes.md) for more +details. + +Any data published to The npm Registry (including user account information) may +be removed or modified at the sole discretion of the npm server administrators. "npm Logo" created by Mathias Pettersson and Brian Hammond, used with permission. diff -Nru nodejs-0.11.13/deps/npm/Makefile nodejs-0.11.15/deps/npm/Makefile --- nodejs-0.11.13/deps/npm/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,9 @@ # vim: set softtabstop=2 shiftwidth=2: SHELL = bash +PUBLISHTAG = $(shell node scripts/publish-tag.js) +BRANCH = $(shell git rev-parse --abbrev-ref HEAD) + markdowns = $(shell find doc -name '*.md' | grep -v 'index') README.md html_docdeps = html/dochead.html \ @@ -28,6 +31,28 @@ |sed 's|doc/misc/|man/man7/|g' ) \ man/man7/npm-index.7 + +cli_partdocs = $(shell find doc/cli -name '*.md' \ + |sed 's|.md|.html|g' \ + |sed 's|doc/cli/|html/partial/doc/cli/|g' ) \ + html/partial/doc/README.html + +api_partdocs = $(shell find doc/api -name '*.md' \ + |sed 's|.md|.html|g' \ + |sed 's|doc/api/|html/partial/doc/api/|g' ) + +files_partdocs = $(shell find doc/files -name '*.md' \ + |sed 's|.md|.html|g' \ + |sed 's|doc/files/|html/partial/doc/files/|g' ) \ + html/partial/doc/files/npm-json.html \ + html/partial/doc/files/npm-global.html + +misc_partdocs = $(shell find doc/misc -name '*.md' \ + |sed 's|.md|.html|g' \ + |sed 's|doc/misc/|html/partial/doc/misc/|g' ) \ + html/partial/doc/index.html + + cli_htmldocs = $(shell find doc/cli -name '*.md' \ |sed 's|.md|.html|g' \ |sed 's|doc/cli/|html/doc/cli/|g' ) \ @@ -50,6 +75,8 @@ mandocs = $(api_mandocs) $(cli_mandocs) $(files_mandocs) $(misc_mandocs) +partdocs = $(api_partdocs) $(cli_partdocs) $(files_partdocs) $(misc_partdocs) + htmldocs = $(api_htmldocs) $(cli_htmldocs) $(files_htmldocs) $(misc_htmldocs) all: doc @@ -60,7 +87,7 @@ @echo "in this folder that you're looking at right now." node cli.js install -g -f npm -install: docclean all +install: all node cli.js install -g -f # backwards compat @@ -69,27 +96,31 @@ link: uninstall node cli.js link -f -clean: ronnclean doc-clean uninstall +clean: markedclean marked-manclean doc-clean uninstall rm -rf npmrc node cli.js cache clean uninstall: node cli.js rm npm -g -f -doc: $(mandocs) $(htmldocs) +doc: $(mandocs) $(htmldocs) $(partdocs) -ronnclean: - rm -rf node_modules/ronn node_modules/.bin/ronn .building_ronn +markedclean: + rm -rf node_modules/marked node_modules/.bin/marked .building_marked + +marked-manclean: + rm -rf node_modules/marked-man node_modules/.bin/marked-man .building_marked-man docclean: doc-clean doc-clean: rm -rf \ - .building_ronn \ + .building_marked \ + .building_marked-man \ html/doc \ html/api \ man -# use `npm install ronn` for this to work. +# use `npm install marked-man` for this to work. man/man1/npm-README.1: README.md scripts/doc-build.sh package.json @[ -d man/man1 ] || mkdir -p man/man1 scripts/doc-build.sh $< $@ @@ -112,48 +143,82 @@ @[ -d man/man5 ] || mkdir -p man/man5 scripts/doc-build.sh $< $@ +man/man7/%.7: doc/misc/%.md scripts/doc-build.sh package.json + @[ -d man/man7 ] || mkdir -p man/man7 + scripts/doc-build.sh $< $@ + + doc/misc/npm-index.md: scripts/index-build.js package.json node scripts/index-build.js > $@ -html/doc/index.html: doc/misc/npm-index.md $(html_docdeps) - @[ -d html/doc ] || mkdir -p html/doc - scripts/doc-build.sh $< $@ -man/man7/%.7: doc/misc/%.md scripts/doc-build.sh package.json - @[ -d man/man7 ] || mkdir -p man/man7 +# html/doc depends on html/partial/doc +html/doc/%.html: html/partial/doc/%.html + @[ -d html/doc ] || mkdir -p html/doc scripts/doc-build.sh $< $@ -html/doc/README.html: README.md $(html_docdeps) +html/doc/README.html: html/partial/doc/README.html @[ -d html/doc ] || mkdir -p html/doc scripts/doc-build.sh $< $@ -html/doc/cli/%.html: doc/cli/%.md $(html_docdeps) +html/doc/cli/%.html: html/partial/doc/cli/%.html @[ -d html/doc/cli ] || mkdir -p html/doc/cli scripts/doc-build.sh $< $@ -html/doc/api/%.html: doc/api/%.md $(html_docdeps) +html/doc/misc/%.html: html/partial/doc/misc/%.html + @[ -d html/doc/misc ] || mkdir -p html/doc/misc + scripts/doc-build.sh $< $@ + +html/doc/files/%.html: html/partial/doc/files/%.html + @[ -d html/doc/files ] || mkdir -p html/doc/files + scripts/doc-build.sh $< $@ + +html/doc/api/%.html: html/partial/doc/api/%.html @[ -d html/doc/api ] || mkdir -p html/doc/api scripts/doc-build.sh $< $@ -html/doc/files/npm-json.html: html/doc/files/package.json.html + +html/partial/doc/index.html: doc/misc/npm-index.md $(html_docdeps) + @[ -d html/partial/doc ] || mkdir -p html/partial/doc + scripts/doc-build.sh $< $@ + +html/partial/doc/README.html: README.md $(html_docdeps) + @[ -d html/partial/doc ] || mkdir -p html/partial/doc + scripts/doc-build.sh $< $@ + +html/partial/doc/cli/%.html: doc/cli/%.md $(html_docdeps) + @[ -d html/partial/doc/cli ] || mkdir -p html/partial/doc/cli + scripts/doc-build.sh $< $@ + +html/partial/doc/api/%.html: doc/api/%.md $(html_docdeps) + @[ -d html/partial/doc/api ] || mkdir -p html/partial/doc/api + scripts/doc-build.sh $< $@ + +html/partial/doc/files/npm-json.html: html/partial/doc/files/package.json.html cp $< $@ -html/doc/files/npm-global.html: html/doc/files/npm-folders.html +html/partial/doc/files/npm-global.html: html/partial/doc/files/npm-folders.html cp $< $@ -html/doc/files/%.html: doc/files/%.md $(html_docdeps) - @[ -d html/doc/files ] || mkdir -p html/doc/files +html/partial/doc/files/%.html: doc/files/%.md $(html_docdeps) + @[ -d html/partial/doc/files ] || mkdir -p html/partial/doc/files scripts/doc-build.sh $< $@ -html/doc/misc/%.html: doc/misc/%.md $(html_docdeps) - @[ -d html/doc/misc ] || mkdir -p html/doc/misc +html/partial/doc/misc/%.html: doc/misc/%.md $(html_docdeps) + @[ -d html/partial/doc/misc ] || mkdir -p html/partial/doc/misc scripts/doc-build.sh $< $@ -ronn: node_modules/.bin/ronn -node_modules/.bin/ronn: - node cli.js install ronn --no-global +marked: node_modules/.bin/marked + +node_modules/.bin/marked: + node cli.js install marked --no-global + +marked-man: node_modules/.bin/marked-man + +node_modules/.bin/marked-man: + node cli.js install marked-man --no-global doc: man @@ -162,48 +227,15 @@ test: doc node cli.js test +tag: + npm tag npm@$(PUBLISHTAG) latest + publish: link doc @git push origin :v$(shell npm -v) 2>&1 || true - @npm unpublish npm@$(shell npm -v) 2>&1 || true git clean -fd &&\ - git push origin &&\ + git push origin $(BRANCH) &&\ git push origin --tags &&\ - npm publish &&\ - make doc-publish &&\ - make zip-publish - -docpublish: doc-publish -doc-publish: doc - # legacy urls - for f in $$(find html/doc/{cli,files,misc}/ -name '*.html'); do \ - j=$$(basename $$f | sed 's|^npm-||g'); \ - if ! [ -f html/doc/$$j ] && [ $$j != README.html ] && [ $$j != index.html ]; then \ - perl -pi -e 's/ href="\.\.\// href="/g' <$$f >html/doc/$$j; \ - fi; \ - done - mkdir -p html/api - for f in $$(find html/doc/api/ -name '*.html'); do \ - j=$$(basename $$f | sed 's|^npm-||g'); \ - perl -pi -e 's/ href="\.\.\// href="/g' <$$f >html/api/$$j; \ - done - rsync -vazu --stats --no-implied-dirs --delete \ - html/doc/* \ - ../npm-www/doc - rsync -vazu --stats --no-implied-dirs --delete \ - html/static/style.css \ - ../npm-www/static/ - #cleanup - rm -rf html/api - for f in html/doc/*.html; do \ - case $$f in \ - html/doc/README.html) continue ;; \ - html/doc/index.html) continue ;; \ - *) rm $$f ;; \ - esac; \ - done - -zip-publish: release - scp release/* node@nodejs.org:dist/npm/ + npm publish --tag=$(PUBLISHTAG) release: @bash scripts/release.sh @@ -211,4 +243,4 @@ sandwich: @[ $$(whoami) = "root" ] && (echo "ok"; echo "ham" > sandwich) || (echo "make it yourself" && exit 13) -.PHONY: all latest install dev link doc clean uninstall test man doc-publish doc-clean docclean docpublish release zip-publish +.PHONY: all latest install dev link doc clean uninstall test man doc-clean docclean release diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm.1 nodejs-0.11.15/deps/npm/man/man1/npm.1 --- nodejs-0.11.13/deps/npm/man/man1/npm.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,230 +1,212 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM" "1" "May 2014" "" "" -. +.TH "NPM" "1" "October 2014" "" "" .SH "NAME" -\fBnpm\fR \-\- node package manager -. -.SH "SYNOPSIS" -. +\fBnpm\fR \- node package manager +.SH SYNOPSIS +.P +.RS 2 .nf npm [args] -. .fi -. -.SH "VERSION" -1.4.9 -. -.SH "DESCRIPTION" +.RE +.SH VERSION +.P +2.1.6 +.SH DESCRIPTION +.P npm is the package manager for the Node JavaScript platform\. It puts modules in place so that node can find them, and manages dependency conflicts intelligently\. -. .P It is extremely configurable to support a wide variety of use cases\. Most commonly, it is used to publish, discover, install, and develop node programs\. -. .P Run \fBnpm help\fR to get a list of available commands\. -. -.SH "INTRODUCTION" +.SH INTRODUCTION +.P You probably got npm because you want to install stuff\. -. .P -npm help Use \fBnpm install blerg\fR to install the latest version of "blerg"\. Check out \fBnpm\-install\fR for more info\. It can do a lot of stuff\. -. +Use \fBnpm install blerg\fR to install the latest version of "blerg"\. Check out +npm help \fBnpm\-install\fR for more info\. It can do a lot of stuff\. +.P +Use the \fBnpm search\fR command to show everything that's available\. +Use \fBnpm ls\fR to show everything you've installed\. +.SH DEPENDENCIES +.P +If a package references to another package with a git URL, npm depends +on a preinstalled git\. +.P +If one of the packages npm tries to install is a native node module and +requires compiling of C++ Code, npm will use +node\-gyp \fIhttps://github\.com/TooTallNate/node\-gyp\fR for that task\. +For a Unix system, node\-gyp \fIhttps://github\.com/TooTallNate/node\-gyp\fR +needs Python, make and a buildchain like GCC\. On Windows, +Python and Microsoft Visual Studio C++ is needed\. Python 3 is +not supported by node\-gyp \fIhttps://github\.com/TooTallNate/node\-gyp\fR\|\. +For more information visit +the node\-gyp repository \fIhttps://github\.com/TooTallNate/node\-gyp\fR and +the node\-gyp Wiki \fIhttps://github\.com/TooTallNate/node\-gyp/wiki\fR\|\. +.SH DIRECTORIES .P -Use the \fBnpm search\fR command to show everything that\'s available\. -Use \fBnpm ls\fR to show everything you\'ve installed\. -. -.SH "DIRECTORIES" -npm help See \fBnpm\-folders\fR to learn about where npm puts stuff\. -. +See npm help 5 \fBnpm\-folders\fR to learn about where npm puts stuff\. .P In particular, npm has two modes of operation: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 global mode: -. .br -npm installs packages into the install prefix at \fBprefix/lib/node_modules\fR and bins are installed in \fBprefix/bin\fR\|\. -. -.IP "\(bu" 4 +npm installs packages into the install prefix at +\fBprefix/lib/node_modules\fR and bins are installed in \fBprefix/bin\fR\|\. +.IP \(bu 2 local mode: -. .br npm installs packages into the current project directory, which -defaults to the current working directory\. Packages are installed to \fB\|\./node_modules\fR, and bins are installed to \fB\|\./node_modules/\.bin\fR\|\. -. -.IP "" 0 -. +defaults to the current working directory\. Packages are installed to +\fB\|\./node_modules\fR, and bins are installed to \fB\|\./node_modules/\.bin\fR\|\. + +.RE .P Local mode is the default\. Use \fB\-\-global\fR or \fB\-g\fR on any command to operate in global mode instead\. -. -.SH "DEVELOPER USAGE" -If you\'re using npm to develop and publish your code, check out the +.SH DEVELOPER USAGE +.P +If you're using npm to develop and publish your code, check out the following help topics: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 json: -npm help Make a package\.json file\. See \fBpackage\.json\fR\|\. -. -.IP "\(bu" 4 +Make a package\.json file\. See npm help 5 \fBpackage\.json\fR\|\. +.IP \(bu 2 link: -For linking your current working code into Node\'s path, so that you -don\'t have to reinstall every time you make a change\. Use \fBnpm link\fR to do this\. -. -.IP "\(bu" 4 +For linking your current working code into Node's path, so that you +don't have to reinstall every time you make a change\. Use +\fBnpm link\fR to do this\. +.IP \(bu 2 install: -It\'s a good idea to install things if you don\'t need the symbolic link\. -Especially, installing other peoples code from the registry is done via \fBnpm install\fR -. -.IP "\(bu" 4 +It's a good idea to install things if you don't need the symbolic link\. +Especially, installing other peoples code from the registry is done via +\fBnpm install\fR +.IP \(bu 2 adduser: Create an account or log in\. Credentials are stored in the user config file\. -. -.IP "\(bu" 4 +.IP \(bu 2 publish: Use the \fBnpm publish\fR command to upload your code to the registry\. -. -.IP "" 0 -. -.SH "CONFIGURATION" + +.RE +.SH CONFIGURATION +.P npm is extremely configurable\. It reads its configuration options from 5 places\. -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 Command line switches: -. .br Set a config with \fB\-\-key val\fR\|\. All keys take a value, even if they -are booleans (the config parser doesn\'t know what the options are at +are booleans (the config parser doesn't know what the options are at the time of parsing\.) If no value is provided, then the option is set to boolean \fBtrue\fR\|\. -. -.IP "\(bu" 4 +.IP \(bu 2 Environment Variables: -. .br -Set any config by prefixing the name in an environment variable with \fBnpm_config_\fR\|\. For example, \fBexport npm_config_key=val\fR\|\. -. -.IP "\(bu" 4 +Set any config by prefixing the name in an environment variable with +\fBnpm_config_\fR\|\. For example, \fBexport npm_config_key=val\fR\|\. +.IP \(bu 2 User Configs: -. .br The file at $HOME/\.npmrc is an ini\-formatted list of configs\. If present, it is parsed\. If the \fBuserconfig\fR option is set in the cli or env, then that will be used instead\. -. -.IP "\(bu" 4 +.IP \(bu 2 Global Configs: -. .br The file found at \.\./etc/npmrc (from the node executable, by default this resolves to /usr/local/etc/npmrc) will be parsed if it is found\. If the \fBglobalconfig\fR option is set in the cli, env, or user config, then that file is parsed instead\. -. -.IP "\(bu" 4 +.IP \(bu 2 Defaults: -. .br -npm\'s default configuration options are defined in +npm's default configuration options are defined in lib/utils/config\-defs\.js\. These must not be changed\. -. -.IP "" 0 -. -.P -npm help See \fBnpm\-config\fR for much much more information\. -. -.SH "CONTRIBUTIONS" + +.RE +.P +See npm help 7 \fBnpm\-config\fR for much much more information\. +.SH CONTRIBUTIONS +.P Patches welcome! -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 code: -npm help Read through \fBnpm\-coding\-style\fR if you plan to submit code\. -You don\'t have to agree with it, but you do have to follow it\. -. -.IP "\(bu" 4 +Read through npm help 7 \fBnpm\-coding\-style\fR if you plan to submit code\. +You don't have to agree with it, but you do have to follow it\. +.IP \(bu 2 docs: If you find an error in the documentation, edit the appropriate markdown -file in the "doc" folder\. (Don\'t worry about generating the man page\.) -. -.IP "" 0 -. +file in the "doc" folder\. (Don't worry about generating the man page\.) + +.RE .P -Contributors are listed in npm\'s \fBpackage\.json\fR file\. You can view them +Contributors are listed in npm's \fBpackage\.json\fR file\. You can view them easily by doing \fBnpm view npm contributors\fR\|\. -. .P -If you would like to contribute, but don\'t know what to work on, check +If you would like to contribute, but don't know what to work on, check the issues list or ask on the mailing list\. -. -.IP "\(bu" 4 -\fIhttp://github\.com/npm/npm/issues\fR -. -.IP "\(bu" 4 -\fInpm\-@googlegroups\.com\fR -. -.IP "" 0 -. -.SH "BUGS" +.RS 0 +.IP \(bu 2 +http://github\.com/npm/npm/issues +.IP \(bu 2 +npm\-@googlegroups\.com + +.RE +.SH BUGS +.P When you find issues, please report them: -. -.IP "\(bu" 4 -web: \fIhttp://github\.com/npm/npm/issues\fR -. -.IP "\(bu" 4 -email: \fInpm\-@googlegroups\.com\fR -. -.IP "" 0 -. +.RS 0 +.IP \(bu 2 +web: +http://github\.com/npm/npm/issues +.IP \(bu 2 +email: +npm\-@googlegroups\.com + +.RE .P -Be sure to include \fIall\fR of the output from the npm command that didn\'t work +Be sure to include \fIall\fR of the output from the npm command that didn't work as expected\. The \fBnpm\-debug\.log\fR file is also helpful to provide\. -. .P You can also look for isaacs in #node\.js on irc://irc\.freenode\.net\. He will no doubt tell you to put the output in a gist or email\. -. -.SH "AUTHOR" -Isaac Z\. Schlueter \fIhttp://blog\.izs\.me/\fR :: isaacs \fIhttps://github\.com/isaacs/\fR :: @izs \fIhttp://twitter\.com/izs\fR :: \fIi@izs\.me\fR -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH AUTHOR +.P +Isaac Z\. Schlueter \fIhttp://blog\.izs\.me/\fR :: +isaacs \fIhttps://github\.com/isaacs/\fR :: +@izs \fIhttp://twitter\.com/izs\fR :: +i@izs\.me +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help help -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 README -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help index -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 7 index +.IP \(bu 2 npm apihelp npm -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-adduser.1 nodejs-0.11.15/deps/npm/man/man1/npm-adduser.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-adduser.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-adduser.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,63 +1,85 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-ADDUSER" "1" "May 2014" "" "" -. +.TH "NPM\-ADDUSER" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-adduser\fR \-\- Add a registry user account -. -.SH "SYNOPSIS" -. +\fBnpm-adduser\fR \- Add a registry user account +.SH SYNOPSIS +.P +.RS 2 .nf -npm adduser -. +npm adduser [\-\-registry=url] [\-\-scope=@orgname] [\-\-always\-auth] .fi -. -.SH "DESCRIPTION" -Create or verify a user named \fB\fR in the npm registry, and -save the credentials to the \fB\|\.npmrc\fR file\. -. +.RE +.SH DESCRIPTION +.P +Create or verify a user named \fB\fR in the specified registry, and +save the credentials to the \fB\|\.npmrc\fR file\. If no registry is specified, +the default registry will be used (see npm help 7 \fBnpm\-config\fR)\. .P The username, password, and email are read in from prompts\. -. .P You may use this command to change your email address, but not username or password\. -. .P -To reset your password, go to \fIhttps://npmjs\.org/forgot\fR -. +To reset your password, go to https://www\.npmjs\.org/forgot .P You may use this command multiple times with the same user account to authorize on a new machine\. -. -.SH "CONFIGURATION" -. -.SS "registry" +.P +\fBnpm login\fR is an alias to \fBadduser\fR and behaves exactly the same way\. +.SH CONFIGURATION +.SS registry +.P Default: http://registry\.npmjs\.org/ -. .P -The base URL of the npm package registry\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +The base URL of the npm package registry\. If \fBscope\fR is also specified, +this registry will only be used for packages with that scope\. See npm help 7 \fBnpm\-scope\fR\|\. +.SS scope +.P +Default: none +.P +If specified, the user and login credentials given will be associated +with the specified scope\. See npm help 7 \fBnpm\-scope\fR\|\. You can use both at the same time, +e\.g\. +.P +.RS 2 +.nf +npm adduser \-\-registry=http://myregistry\.example\.com \-\-scope=@myco +.fi +.RE +.P +This will set a registry for the given scope and login or create a user for +that registry at the same time\. +.SS always\-auth +.P +Default: false +.P +If specified, save configuration indicating that all requests to the given +registry should include authorization information\. Useful for private +registries\. Can be used with \fB\-\-registry\fR and / or \fB\-\-scope\fR, e\.g\. +.P +.RS 2 +.nf +npm adduser \-\-registry=http://private\-registry\.example\.com \-\-always\-auth +.fi +.RE +.P +This will ensure that all requests to that registry (including for tarballs) +include an authorization header\. See \fBalways\-auth\fR in npm help 7 \fBnpm\-config\fR for more +details on always\-auth\. Registry\-specific configuaration of \fBalways\-auth\fR takes +precedence over any global configuration\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 npm help owner -. -.IP "\(bu" 4 +.IP \(bu 2 npm help whoami -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-bin.1 nodejs-0.11.15/deps/npm/man/man1/npm-bin.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-bin.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-bin.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,30 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-BIN" "1" "May 2014" "" "" -. +.TH "NPM\-BIN" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-bin\fR \-\- Display npm bin folder -. -.SH "SYNOPSIS" -. +\fBnpm-bin\fR \- Display npm bin folder +.SH SYNOPSIS +.P +.RS 2 .nf npm bin -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Print the folder where npm will install executables\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help prefix -. -.IP "\(bu" 4 +.IP \(bu 2 npm help root -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-bugs.1 nodejs-0.11.15/deps/npm/man/man1/npm-bugs.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-bugs.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-bugs.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,78 +1,59 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-BUGS" "1" "May 2014" "" "" -. +.TH "NPM\-BUGS" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-bugs\fR \-\- Bugs for a package in a web browser maybe -. -.SH "SYNOPSIS" -. +\fBnpm-bugs\fR \- Bugs for a package in a web browser maybe +.SH SYNOPSIS +.P +.RS 2 .nf npm bugs npm bugs (with no args in a package dir) -. .fi -. -.SH "DESCRIPTION" -This command tries to guess at the likely location of a package\'s +.RE +.SH DESCRIPTION +.P +This command tries to guess at the likely location of a package's bug tracker URL, and then tries to open it using the \fB\-\-browser\fR config param\. If no package name is provided, it will search for a \fBpackage\.json\fR in the current folder and use the \fBname\fR property\. -. -.SH "CONFIGURATION" -. -.SS "browser" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS browser +.RS 0 +.IP \(bu 2 Default: OS X: \fB"open"\fR, Windows: \fB"start"\fR, Others: \fB"xdg\-open"\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P The browser that is called by the \fBnpm bugs\fR command to open websites\. -. -.SS "registry" -. -.IP "\(bu" 4 +.SS registry +.RS 0 +.IP \(bu 2 Default: https://registry\.npmjs\.org/ -. -.IP "\(bu" 4 +.IP \(bu 2 Type: url -. -.IP "" 0 -. + +.RE .P The base URL of the npm package registry\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help docs -. -.IP "\(bu" 4 +.IP \(bu 2 npm help view -. -.IP "\(bu" 4 +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 5 package\.json + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-build.1 nodejs-0.11.15/deps/npm/man/man1/npm-build.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-build.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-build.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,34 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-BUILD" "1" "May 2014" "" "" -. +.TH "NPM\-BUILD" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-build\fR \-\- Build a package -. -.SH "SYNOPSIS" -. +\fBnpm-build\fR \- Build a package +.SH SYNOPSIS +.P +.RS 2 .nf npm build -. .fi -. -.IP "\(bu" 4 +.RE +.RS 0 +.IP \(bu 2 \fB\fR: A folder containing a \fBpackage\.json\fR file in its root\. -. -.IP "" 0 -. -.SH "DESCRIPTION" + +.RE +.SH DESCRIPTION +.P This is the plumbing command called by \fBnpm link\fR and \fBnpm install\fR\|\. -. .P It should generally not be called directly\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help link -. -.IP "\(bu" 4 -npm help scripts -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "" 0 +.IP \(bu 2 +npm help 7 scripts +.IP \(bu 2 +npm help 5 package\.json + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-bundle.1 nodejs-0.11.15/deps/npm/man/man1/npm-bundle.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-bundle.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-bundle.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,23 +1,17 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-BUNDLE" "1" "May 2014" "" "" -. +.TH "NPM\-BUNDLE" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-bundle\fR \-\- REMOVED -. -.SH "DESCRIPTION" +\fBnpm-bundle\fR \- REMOVED +.SH DESCRIPTION +.P The \fBnpm bundle\fR command has been removed in 1\.0, for the simple reason that it is no longer necessary, as the default behavior is now to install packages into the local space\. -. .P Just use \fBnpm install\fR now to do what \fBnpm bundle\fR used to do\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help install -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-cache.1 nodejs-0.11.15/deps/npm/man/man1/npm-cache.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-cache.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-cache.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,104 +1,86 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-CACHE" "1" "May 2014" "" "" -. +.TH "NPM\-CACHE" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-cache\fR \-\- Manipulates packages cache -. -.SH "SYNOPSIS" -. +\fBnpm-cache\fR \- Manipulates packages cache +.SH SYNOPSIS +.P +.RS 2 .nf npm cache add npm cache add npm cache add npm cache add @ + npm cache ls [] + npm cache clean [] -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Used to add, list, or clear the npm cache folder\. -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 add: Add the specified package to the local cache\. This command is primarily intended to be used internally by npm, but it can provide a way to add data to the local installation cache explicitly\. -. -.IP "\(bu" 4 +.IP \(bu 2 ls: Show the data in the cache\. Argument is a path to show in the cache -folder\. Works a bit like the \fBfind\fR program, but limited by the \fBdepth\fR config\. -. -.IP "\(bu" 4 +folder\. Works a bit like the \fBfind\fR program, but limited by the +\fBdepth\fR config\. +.IP \(bu 2 clean: Delete data out of the cache folder\. If an argument is provided, then it specifies a subpath to delete\. If no argument is provided, then the entire cache is cleared\. -. -.IP "" 0 -. -.SH "DETAILS" + +.RE +.SH DETAILS +.P npm stores cache data in the directory specified in \fBnpm config get cache\fR\|\. For each package that is added to the cache, three pieces of information are stored in \fB{cache}/{name}/{version}\fR: -. -.IP "\(bu" 4 -\|\.\.\./package/: -A folder containing the package contents as they appear in the tarball\. -. -.IP "\(bu" 4 -\|\.\.\./package\.json: -The package\.json file, as npm sees it, with overlays applied and a _id attribute\. -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 +\|\.\.\./package/package\.json: +The package\.json file, as npm sees it\. +.IP \(bu 2 \|\.\.\./package\.tgz: The tarball for that version\. -. -.IP "" 0 -. + +.RE .P Additionally, whenever a registry request is made, a \fB\|\.cache\.json\fR file is placed at the corresponding URI, to store the ETag and the requested -data\. -. +data\. This is stored in \fB{cache}/{hostname}/{path}/\.cache\.json\fR\|\. .P -Commands that make non\-essential registry requests (such as \fBsearch\fR and \fBview\fR, or the completion scripts) generally specify a minimum timeout\. +Commands that make non\-essential registry requests (such as \fBsearch\fR and +\fBview\fR, or the completion scripts) generally specify a minimum timeout\. If the \fB\|\.cache\.json\fR file is younger than the specified timeout, then they do not make an HTTP request to the registry\. -. -.SH "CONFIGURATION" -. -.SS "cache" +.SH CONFIGURATION +.SS cache +.P Default: \fB~/\.npm\fR on Posix, or \fB%AppData%/npm\-cache\fR on Windows\. -. .P The root cache folder\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 +.IP \(bu 2 npm help pack -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-completion.1 nodejs-0.11.15/deps/npm/man/man1/npm-completion.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-completion.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-completion.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,47 +1,37 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-COMPLETION" "1" "May 2014" "" "" -. +.TH "NPM\-COMPLETION" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-completion\fR \-\- Tab Completion for npm -. -.SH "SYNOPSIS" -. +\fBnpm-completion\fR \- Tab Completion for npm +.SH SYNOPSIS +.P +.RS 2 .nf \|\. <(npm completion) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Enables tab\-completion in all npm commands\. -. .P The synopsis above loads the completions into your current shell\. Adding it to your ~/\.bashrc or ~/\.zshrc will make the completions available everywhere\. -. .P You may of course also pipe the output of npm completion to a file such as \fB/usr/local/etc/bash_completion\.d/npm\fR if you have a system that will read that file for you\. -. .P When \fBCOMP_CWORD\fR, \fBCOMP_LINE\fR, and \fBCOMP_POINT\fR are defined in the environment, \fBnpm completion\fR acts in "plumbing mode", and outputs completions based on the arguments\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help developers -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 developers +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 npm help npm -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-config.1 nodejs-0.11.15/deps/npm/man/man1/npm-config.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-config.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-config.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,13 +1,9 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-CONFIG" "1" "May 2014" "" "" -. +.TH "NPM\-CONFIG" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-config\fR \-\- Manage the npm configuration files -. -.SH "SYNOPSIS" -. +\fBnpm-config\fR \- Manage the npm configuration files +.SH SYNOPSIS +.P +.RS 2 .nf npm config set [\-\-global] npm config get @@ -17,97 +13,83 @@ npm c [set|get|delete|list] npm get npm set [\-\-global] -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P npm gets its config settings from the command line, environment variables, \fBnpmrc\fR files, and in some cases, the \fBpackage\.json\fR file\. -. .P -npm help See npmrc for more information about the npmrc files\. -. +See npm help 5 npmrc for more information about the npmrc files\. .P -npm help See \fBnpm\-config\fR for a more thorough discussion of the mechanisms +See npm help 7 \fBnpm\-config\fR for a more thorough discussion of the mechanisms involved\. -. .P The \fBnpm config\fR command can be used to update and edit the contents of the user and global npmrc files\. -. -.SH "Sub\-commands" +.SH Sub\-commands +.P Config supports the following sub\-commands: -. -.SS "set" -. +.SS set +.P +.RS 2 .nf npm config set key value -. .fi -. +.RE .P Sets the config key to the value\. -. .P If value is omitted, then it sets it to "true"\. -. -.SS "get" -. +.SS get +.P +.RS 2 .nf npm config get key -. .fi -. +.RE .P Echo the config value to stdout\. -. -.SS "list" -. +.SS list +.P +.RS 2 .nf npm config list -. .fi -. +.RE .P Show all the config settings\. -. -.SS "delete" -. +.SS delete +.P +.RS 2 .nf npm config delete key -. .fi -. +.RE .P Deletes the key from all configuration files\. -. -.SS "edit" -. +.SS edit +.P +.RS 2 .nf npm config edit -. .fi -. +.RE .P Opens the config file in an editor\. Use the \fB\-\-global\fR flag to edit the global config\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 npm help npm -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-dedupe.1 nodejs-0.11.15/deps/npm/man/man1/npm-dedupe.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-dedupe.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-dedupe.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,96 +1,74 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-DEDUPE" "1" "May 2014" "" "" -. +.TH "NPM\-DEDUPE" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-dedupe\fR \-\- Reduce duplication -. -.SH "SYNOPSIS" -. +\fBnpm-dedupe\fR \- Reduce duplication +.SH SYNOPSIS +.P +.RS 2 .nf npm dedupe [package names\.\.\.] npm ddp [package names\.\.\.] -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Searches the local package tree and attempts to simplify the overall structure by moving dependencies further up the tree, where they can be more effectively shared by multiple dependent packages\. -. .P For example, consider this dependency graph: -. -.IP "" 4 -. +.P +.RS 2 .nf a +\-\- b <\-\- depends on c@1\.0\.x | `\-\- c@1\.0\.3 `\-\- d <\-\- depends on c@~1\.0\.9 `\-\- c@1\.0\.10 -. .fi -. -.IP "" 0 -. -.P -npm help In this case, \fBnpm\-dedupe\fR will transform the tree to: -. -.IP "" 4 -. +.RE +.P +In this case, npm help \fBnpm\-dedupe\fR will transform the tree to: +.P +.RS 2 .nf a +\-\- b +\-\- d `\-\- c@1\.0\.10 -. .fi -. -.IP "" 0 -. +.RE .P -Because of the hierarchical nature of node\'s module lookup, b and d +Because of the hierarchical nature of node's module lookup, b and d will both get their dependency met by the single c package at the root level of the tree\. -. .P If a suitable version exists at the target location in the tree already, then it will be left untouched, but the other duplicates will be deleted\. -. .P If no suitable version can be found, then a warning is printed, and nothing is done\. -. .P If any arguments are supplied, then they are filters, and only the named packages will be touched\. -. .P Note that this operation transforms the dependency tree, and may result in packages getting updated versions, perhaps from the npm registry\. -. .P This feature is experimental, and may change in future versions\. -. .P The \fB\-\-tag\fR argument will apply to all of the affected dependencies\. If a tag with the given name exists, the tagged version is preferred over newer versions\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help ls -. -.IP "\(bu" 4 +.IP \(bu 2 npm help update -. -.IP "\(bu" 4 +.IP \(bu 2 npm help install -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-deprecate.1 nodejs-0.11.15/deps/npm/man/man1/npm-deprecate.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-deprecate.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-deprecate.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,48 +1,37 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-DEPRECATE" "1" "May 2014" "" "" -. +.TH "NPM\-DEPRECATE" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-deprecate\fR \-\- Deprecate a version of a package -. -.SH "SYNOPSIS" -. +\fBnpm-deprecate\fR \- Deprecate a version of a package +.SH SYNOPSIS +.P +.RS 2 .nf npm deprecate [@] -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command will update the npm registry entry for a package, providing a deprecation warning to all who attempt to install it\. -. .P It works on version ranges as well as specific versions, so you can do something like this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm deprecate my\-thing@"< 0\.2\.3" "critical bug fixed in v0\.2\.3" -. .fi -. -.IP "" 0 -. +.RE .P -Note that you must be the package owner to deprecate something\. See the \fBowner\fR and \fBadduser\fR help topics\. -. +Note that you must be the package owner to deprecate something\. See the +\fBowner\fR and \fBadduser\fR help topics\. .P To un\-deprecate a package, specify an empty string (\fB""\fR) for the \fBmessage\fR argument\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 -npm help registry -. -.IP "" 0 +.IP \(bu 2 +npm help 7 registry + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-docs.1 nodejs-0.11.15/deps/npm/man/man1/npm-docs.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-docs.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-docs.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,78 +1,60 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-DOCS" "1" "May 2014" "" "" -. +.TH "NPM\-DOCS" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-docs\fR \-\- Docs for a package in a web browser maybe -. -.SH "SYNOPSIS" -. +\fBnpm-docs\fR \- Docs for a package in a web browser maybe +.SH SYNOPSIS +.P +.RS 2 .nf npm docs [ [ \.\.\.]] npm docs (with no args in a package dir) npm home [ [ \.\.\.]] npm home (with no args in a package dir) -. .fi -. -.SH "DESCRIPTION" -This command tries to guess at the likely location of a package\'s +.RE +.SH DESCRIPTION +.P +This command tries to guess at the likely location of a package's documentation URL, and then tries to open it using the \fB\-\-browser\fR config param\. You can pass multiple package names at once\. If no package name is provided, it will search for a \fBpackage\.json\fR in the current folder and use the \fBname\fR property\. -. -.SH "CONFIGURATION" -. -.SS "browser" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS browser +.RS 0 +.IP \(bu 2 Default: OS X: \fB"open"\fR, Windows: \fB"start"\fR, Others: \fB"xdg\-open"\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P The browser that is called by the \fBnpm docs\fR command to open websites\. -. -.SS "registry" -. -.IP "\(bu" 4 +.SS registry +.RS 0 +.IP \(bu 2 Default: https://registry\.npmjs\.org/ -. -.IP "\(bu" 4 +.IP \(bu 2 Type: url -. -.IP "" 0 -. + +.RE .P The base URL of the npm package registry\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help view -. -.IP "\(bu" 4 +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 5 package\.json + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-edit.1 nodejs-0.11.15/deps/npm/man/man1/npm-edit.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-edit.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-edit.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,66 +1,50 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-EDIT" "1" "May 2014" "" "" -. +.TH "NPM\-EDIT" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-edit\fR \-\- Edit an installed package -. -.SH "SYNOPSIS" -. +\fBnpm-edit\fR \- Edit an installed package +.SH SYNOPSIS +.P +.RS 2 .nf npm edit [@] -. .fi -. -.SH "DESCRIPTION" -Opens the package folder in the default editor (or whatever you\'ve -npm help configured as the npm \fBeditor\fR config \-\- see \fBnpm\-config\fR\|\.) -. +.RE +.SH DESCRIPTION +.P +Opens the package folder in the default editor (or whatever you've +configured as the npm \fBeditor\fR config \-\- see npm help 7 \fBnpm\-config\fR\|\.) .P After it has been edited, the package is rebuilt so as to pick up any changes in compiled packages\. -. .P For instance, you can do \fBnpm install connect\fR to install connect into your package, and then \fBnpm edit connect\fR to make a few changes to your locally installed copy\. -. -.SH "CONFIGURATION" -. -.SS "editor" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS editor +.RS 0 +.IP \(bu 2 Default: \fBEDITOR\fR environment variable if set, or \fB"vi"\fR on Posix, or \fB"notepad"\fR on Windows\. -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P The command to run for \fBnpm edit\fR or \fBnpm config edit\fR\|\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help explore -. -.IP "\(bu" 4 +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-explore.1 nodejs-0.11.15/deps/npm/man/man1/npm-explore.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-explore.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-explore.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,76 +1,55 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-EXPLORE" "1" "May 2014" "" "" -. +.TH "NPM\-EXPLORE" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-explore\fR \-\- Browse an installed package -. -.SH "SYNOPSIS" -. +\fBnpm-explore\fR \- Browse an installed package +.SH SYNOPSIS +.P +.RS 2 .nf -npm explore [@] [ \-\- ] -. +npm explore [ \-\- ] .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Spawn a subshell in the directory of the installed package specified\. -. .P If a command is specified, then it is run in the subshell, which then immediately terminates\. -. .P -This is particularly handy in the case of git submodules in the \fBnode_modules\fR folder: -. -.IP "" 4 -. +This is particularly handy in the case of git submodules in the +\fBnode_modules\fR folder: +.P +.RS 2 .nf npm explore some\-dependency \-\- git pull origin master -. .fi -. -.IP "" 0 -. +.RE .P Note that the package is \fInot\fR automatically rebuilt afterwards, so be sure to use \fBnpm rebuild \fR if you make any changes\. -. -.SH "CONFIGURATION" -. -.SS "shell" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS shell +.RS 0 +.IP \(bu 2 Default: SHELL environment variable, or "bash" on Posix, or "cmd" on Windows -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P The shell to run for the \fBnpm explore\fR command\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help submodule -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help edit -. -.IP "\(bu" 4 +.IP \(bu 2 npm help rebuild -. -.IP "\(bu" 4 +.IP \(bu 2 npm help build -. -.IP "\(bu" 4 +.IP \(bu 2 npm help install -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-help.1 nodejs-0.11.15/deps/npm/man/man1/npm-help.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-help.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-help.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,77 +1,57 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-HELP" "1" "May 2014" "" "" -. +.TH "NPM\-HELP" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-help\fR \-\- Get help on npm -. -.SH "SYNOPSIS" -. +\fBnpm-help\fR \- Get help on npm +.SH SYNOPSIS +.P +.RS 2 .nf npm help npm help some search terms -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P If supplied a topic, then show the appropriate documentation page\. -. .P If the topic does not exist, or if multiple terms are provided, then run the \fBhelp\-search\fR command to find a match\. Note that, if \fBhelp\-search\fR finds a single subject, then it will run \fBhelp\fR on that topic, so unique matches are equivalent to specifying a topic name\. -. -.SH "CONFIGURATION" -. -.SS "viewer" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS viewer +.RS 0 +.IP \(bu 2 Default: "man" on Posix, "browser" on Windows -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P The program to use to view help content\. -. .P Set to \fB"browser"\fR to view html help content in the default web browser\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help npm -. -.IP "\(bu" 4 +.IP \(bu 2 README -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 npm help help\-search -. -.IP "\(bu" 4 -npm help index -. -.IP "" 0 +.IP \(bu 2 +npm help 7 index + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-help-search.1 nodejs-0.11.15/deps/npm/man/man1/npm-help-search.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-help-search.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-help-search.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,59 +1,45 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-HELP\-SEARCH" "1" "May 2014" "" "" -. +.TH "NPM\-HELP\-SEARCH" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-help-search\fR \-\- Search npm help documentation -. -.SH "SYNOPSIS" -. +\fBnpm-help-search\fR \- Search npm help documentation +.SH SYNOPSIS +.P +.RS 2 .nf npm help\-search some search terms -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command will search the npm markdown documentation files for the terms provided, and then list the results, sorted by relevance\. -. .P If only one result is found, then it will show that help topic\. -. .P If the argument to \fBnpm help\fR is not a known help topic, then it will call \fBhelp\-search\fR\|\. It is rarely if ever necessary to call this command directly\. -. -.SH "CONFIGURATION" -. -.SS "long" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS long +.RS 0 +.IP \(bu 2 Type: Boolean -. -.IP "\(bu" 4 +.IP \(bu 2 Default false -. -.IP "" 0 -. + +.RE .P If true, the "long" flag will cause help\-search to output context around where the terms were found in the documentation\. -. .P If false, then help\-search will just list out the help topics found\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help npm -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 npm help help -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-init.1 nodejs-0.11.15/deps/npm/man/man1/npm-init.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-init.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-init.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,36 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-INIT" "1" "May 2014" "" "" -. +.TH "NPM\-INIT" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-init\fR \-\- Interactively create a package\.json file -. -.SH "SYNOPSIS" -. +\fBnpm-init\fR \- Interactively create a package\.json file +.SH SYNOPSIS +.P +.RS 2 .nf -npm init -. +npm init [\-f|\-\-force|\-y|\-\-yes] .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This will ask you a bunch of questions, and then write a package\.json for you\. -. .P It attempts to make reasonable guesses about what you want things to be set to, -and then writes a package\.json file with the options you\'ve selected\. -. +and then writes a package\.json file with the options you've selected\. .P -If you already have a package\.json file, it\'ll read that first, and default to +If you already have a package\.json file, it'll read that first, and default to the options in there\. -. .P It is strictly additive, so it does not delete options from your package\.json without a really good reason to do so\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -\fIhttps://github\.com/isaacs/init\-package\-json\fR -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 +.P +If you invoke it with \fB\-f\fR, \fB\-\-force\fR, \fB\-y\fR, or \fB\-\-yes\fR, it will use only +defaults and not prompt you for any options\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +https://github\.com/isaacs/init\-package\-json +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 npm help version -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-install.1 nodejs-0.11.15/deps/npm/man/man1/npm-install.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-install.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-install.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,334 +1,269 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-INSTALL" "1" "May 2014" "" "" -. +.TH "NPM\-INSTALL" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-install\fR \-\- Install a package -. -.SH "SYNOPSIS" -. +\fBnpm-install\fR \- Install a package +.SH SYNOPSIS +.P +.RS 2 .nf npm install (with no args in a package dir) npm install npm install npm install -npm install [\-\-save|\-\-save\-dev|\-\-save\-optional] [\-\-save\-exact] -npm install @ -npm install @ -npm install @ +npm install [@/] [\-\-save|\-\-save\-dev|\-\-save\-optional] [\-\-save\-exact] +npm install [@/]@ +npm install [@/]@ +npm install [@/]@ npm i (with any of the previous argument usage) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command installs a package, and any packages that it depends on\. If the package has a shrinkwrap file, the installation of dependencies will be driven -npm help by that\. See npm\-shrinkwrap\. -. +by that\. See npm help shrinkwrap\. .P A \fBpackage\fR is: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 a) a folder containing a program described by a package\.json file -. -.IP "\(bu" 4 +.IP \(bu 2 b) a gzipped tarball containing (a) -. -.IP "\(bu" 4 +.IP \(bu 2 c) a url that resolves to (b) -. -.IP "\(bu" 4 -d) a \fB@\fR that is published on the registry (npm help see \fBnpm\-registry\fR) with (c) -. -.IP "\(bu" 4 +.IP \(bu 2 +d) a \fB@\fR that is published on the registry (see npm help 7 \fBnpm\-registry\fR) with (c) +.IP \(bu 2 e) a \fB@\fR that points to (d) -. -.IP "\(bu" 4 +.IP \(bu 2 f) a \fB\fR that has a "latest" tag satisfying (e) -. -.IP "\(bu" 4 +.IP \(bu 2 g) a \fB\fR that resolves to (b) -. -.IP "" 0 -. + +.RE .P Even if you never publish your package, you can still get a lot of benefits of using npm if you just want to write a node program (a), and perhaps if you also want to be able to easily install it elsewhere after packing it up into a tarball (b)\. -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 \fBnpm install\fR (in package directory, no arguments): -. -.IP -Install the dependencies in the local node_modules folder\. -. -.IP -In global mode (ie, with \fB\-g\fR or \fB\-\-global\fR appended to the command), -it installs the current package context (ie, the current working -directory) as a global package\. -. -.IP -By default, \fBnpm install\fR will install all modules listed as -dependencies\. With the \fB\-\-production\fR flag, -npm will not install modules listed in \fBdevDependencies\fR\|\. -. -.IP "\(bu" 4 + Install the dependencies in the local node_modules folder\. + In global mode (ie, with \fB\-g\fR or \fB\-\-global\fR appended to the command), + it installs the current package context (ie, the current working + directory) as a global package\. + By default, \fBnpm install\fR will install all modules listed as + dependencies\. With the \fB\-\-production\fR flag, + npm will not install modules listed in \fBdevDependencies\fR\|\. +.IP \(bu 2 \fBnpm install \fR: -. -.IP -Install a package that is sitting in a folder on the filesystem\. -. -.IP "\(bu" 4 + Install a package that is sitting in a folder on the filesystem\. +.IP \(bu 2 \fBnpm install \fR: -. -.IP -Install a package that is sitting on the filesystem\. Note: if you just want -to link a dev directory into your npm root, you can do this more easily by -using \fBnpm link\fR\|\. -. -.IP -Example: -. -.IP "" 4 -. + Install a package that is sitting on the filesystem\. Note: if you just want + to link a dev directory into your npm root, you can do this more easily by + using \fBnpm link\fR\|\. + Example: +.P +.RS 2 .nf - npm install \./package\.tgz -. + npm install \./package\.tgz .fi -. -.IP "" 0 - -. -.IP "\(bu" 4 +.RE +.IP \(bu 2 \fBnpm install \fR: -. -.IP -Fetch the tarball url, and then install it\. In order to distinguish between -this and other options, the argument must start with "http://" or "https://" -. -.IP -Example: -. -.IP "" 4 -. -.nf - npm install https://github\.com/indexzero/forever/tarball/v0\.5\.6 -. -.fi -. -.IP "" 0 - -. -.IP "\(bu" 4 -\fBnpm install [\-\-save|\-\-save\-dev|\-\-save\-optional]\fR: -. -.IP -Do a \fB@\fR install, where \fB\fR is the "tag" config\. (npm help See \fBnpm\-config\fR\|\.) -. -.IP -In most cases, this will install the latest version -of the module published on npm\. -. -.IP -Example: -. -.IP - npm install sax -. -.IP -\fBnpm install\fR takes 3 exclusive, optional flags which save or update -the package version in your main package\.json: -. -.IP "\(bu" 4 + Fetch the tarball url, and then install it\. In order to distinguish between + this and other options, the argument must start with "http://" or "https://" + Example: +.P +.RS 2 +.nf + npm install https://github\.com/indexzero/forever/tarball/v0\.5\.6 +.fi +.RE +.IP \(bu 2 +\fBnpm install [@/] [\-\-save|\-\-save\-dev|\-\-save\-optional]\fR: + Do a \fB@\fR install, where \fB\fR is the "tag" config\. (See + npm help 7 \fBnpm\-config\fR\|\.) + In most cases, this will install the latest version + of the module published on npm\. + Example: +.P +.RS 2 +.nf + npm install sax +.fi +.RE + \fBnpm install\fR takes 3 exclusive, optional flags which save or update + the package version in your main package\.json: +.RS 0 +.IP \(bu 2 \fB\-\-save\fR: Package will appear in your \fBdependencies\fR\|\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-\-save\-dev\fR: Package will appear in your \fBdevDependencies\fR\|\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-\-save\-optional\fR: Package will appear in your \fBoptionalDependencies\fR\|\. -. -.IP When using any of the above options to save dependencies to your package\.json, there is an additional, optional flag: -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-\-save\-exact\fR: Saved dependencies will be configured with an -exact version rather than using npm\'s default semver range +exact version rather than using npm's default semver range operator\. -. -.IP +\fB\fR is optional\. The package will be downloaded from the registry +associated with the specified scope\. If no registry is associated with +the given scope the default registry is assumed\. See npm help 7 \fBnpm\-scope\fR\|\. +Note: if you do not include the @\-symbol on your scope name, npm will +interpret this as a GitHub repository instead, see below\. Scopes names +must also be followed by a slash\. Examples: -. -.IP - npm install sax \-\-save - npm install node\-tap \-\-save\-dev - npm install dtrace\-provider \-\-save\-optional - npm install readable\-stream \-\-save \-\-save\-exact -. -.IP -\fBNote\fR: If there is a file or folder named \fB\fR in the current -working directory, then it will try to install that, and only try to -fetch the package by name if it is not valid\. -. -.IP "" 0 - -. -.IP "\(bu" 4 -\fBnpm install @\fR: -. -.IP -Install the version of the package that is referenced by the specified tag\. -If the tag does not exist in the registry data for that package, then this -will fail\. -. -.IP -Example: -. -.IP "" 4 -. +.P +.RS 2 .nf - npm install sax@latest -. +npm install sax \-\-save +npm install githubname/reponame +npm install @myorg/privatepackage +npm install node\-tap \-\-save\-dev +npm install dtrace\-provider \-\-save\-optional +npm install readable\-stream \-\-save \-\-save\-exact .fi -. -.IP "" 0 +.RE + +.RE -. -.IP "\(bu" 4 -\fBnpm install @\fR: -. -.IP -Install the specified version of the package\. This will fail if the version -has not been published to the registry\. -. -.IP -Example: -. -.IP "" 4 -. +.RE +.P +.RS 2 .nf - npm install sax@0\.1\.1 -. +**Note**: If there is a file or folder named `` in the current +working directory, then it will try to install that, and only try to +fetch the package by name if it is not valid\. .fi -. -.IP "" 0 - -. -.IP "\(bu" 4 -\fBnpm install @\fR: -. -.IP -Install a version of the package matching the specified version range\. This -npm help will follow the same rules for resolving dependencies described in \fBpackage\.json\fR\|\. -. -.IP -Note that most version ranges must be put in quotes so that your shell will -treat it as a single argument\. -. -.IP -Example: -. -.IP - npm install sax@">=0\.1\.0 <0\.2\.0" -. -.IP "\(bu" 4 +.RE +.RS 0 +.IP \(bu 2 +\fBnpm install [@/]@\fR: + Install the version of the package that is referenced by the specified tag\. + If the tag does not exist in the registry data for that package, then this + will fail\. + Example: +.P +.RS 2 +.nf + npm install sax@latest + npm install @myorg/mypackage@latest +.fi +.RE +.IP \(bu 2 +\fBnpm install [@/]@\fR: + Install the specified version of the package\. This will fail if the + version has not been published to the registry\. + Example: +.P +.RS 2 +.nf + npm install sax@0\.1\.1 + npm install @myorg/privatepackage@1\.5\.0 +.fi +.RE +.IP \(bu 2 +\fBnpm install [@/]@\fR: + Install a version of the package matching the specified version range\. This + will follow the same rules for resolving dependencies described in npm help 5 \fBpackage\.json\fR\|\. + Note that most version ranges must be put in quotes so that your shell will + treat it as a single argument\. + Example: +.P +.RS 2 +.nf + npm install sax@">=0\.1\.0 <0\.2\.0" + npm install @myorg/privatepackage@">=0\.1\.0 <0\.2\.0" +.fi +.RE +.IP \(bu 2 +\fBnpm install /\fR: + Install the package at \fBhttps://github\.com/githubname/githubrepo" by + attempting to clone it using\fRgit`\. + Example: +.P +.RS 2 +.nf + npm install mygithubuser/myproject +.fi +.RE + To reference a package in a git repo that is not on GitHub, see git + remote urls below\. +.IP \(bu 2 \fBnpm install \fR: -. -.IP -Install a package by cloning a git remote url\. The format of the git -url is: -. -.IP - ://[@][#] -. -.IP -\fB\fR is one of \fBgit\fR, \fBgit+ssh\fR, \fBgit+http\fR, or \fBgit+https\fR\|\. If no \fB\fR is specified, then \fBmaster\fR is -used\. -. -.IP -Examples: -. -.IP "" 4 -. -.nf - git+ssh://git@github\.com:npm/npm\.git#v1\.0\.27 - git+https://isaacs@github\.com/npm/npm\.git - git://github\.com/npm/npm\.git#v1\.0\.27 -. + Install a package by cloning a git remote url\. The format of the git + url is: +.P +.RS 2 +.nf + ://[@][#] +.fi +.RE + \fB\fR is one of \fBgit\fR, \fBgit+ssh\fR, \fBgit+http\fR, or + \fBgit+https\fR\|\. If no \fB\fR is specified, then \fBmaster\fR is + used\. + Examples: +.P +.RS 2 +.nf + git+ssh://git@github\.com:npm/npm\.git#v1\.0\.27 + git+https://isaacs@github\.com/npm/npm\.git + git://github\.com/npm/npm\.git#v1\.0\.27 .fi -. -.IP "" 0 +.RE -. -.IP "" 0 -. +.RE .P You may combine multiple arguments, and even multiple types of arguments\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf npm install sax@">=0\.1\.0 <0\.2\.0" bench supervisor -. .fi -. -.IP "" 0 -. +.RE .P The \fB\-\-tag\fR argument will apply to all of the specified install targets\. If a tag with the given name exists, the tagged version is preferred over newer versions\. -. .P The \fB\-\-force\fR argument will force npm to fetch remote resources even if a local copy exists on disk\. -. -.IP "" 4 -. +.P +.RS 2 .nf npm install sax \-\-force -. .fi -. -.IP "" 0 -. +.RE .P The \fB\-\-global\fR argument will cause npm to install the package globally -npm help rather than locally\. See \fBnpm\-folders\fR\|\. -. +rather than locally\. See npm help 5 \fBnpm\-folders\fR\|\. .P The \fB\-\-link\fR argument will cause npm to link global installs into the local space in some cases\. -. .P The \fB\-\-no\-bin\-links\fR argument will prevent npm from creating symlinks for any binaries the package might contain\. -. .P The \fB\-\-no\-optional\fR argument will prevent optional dependencies from being installed\. -. .P The \fB\-\-no\-shrinkwrap\fR argument, which will ignore an available shrinkwrap file and use the package\.json instead\. -. .P The \fB\-\-nodedir=/path/to/node/source\fR argument will allow npm to find the node source code so that npm can compile native modules\. -. .P -npm help See \fBnpm\-config\fR\|\. Many of the configuration params have some -effect on installation, since that\'s most of what npm does\. -. -.SH "ALGORITHM" +See npm help 7 \fBnpm\-config\fR\|\. Many of the configuration params have some +effect on installation, since that's most of what npm does\. +.SH ALGORITHM +.P To install a package, npm uses the following algorithm: -. -.IP "" 4 -. +.P +.RS 2 .nf install(where, what, family, ancestors) fetch what, unpack to /node_modules/ @@ -339,103 +274,78 @@ and not in add precise version deps to install(/node_modules/, dep, family) -. .fi -. -.IP "" 0 -. +.RE .P For this \fBpackage{dep}\fR structure: \fBA{B,C}, B{C}, C{D}\fR, this algorithm produces: -. -.IP "" 4 -. +.P +.RS 2 .nf A +\-\- B `\-\- C `\-\- D -. .fi -. -.IP "" 0 -. +.RE .P That is, the dependency from B to C is satisfied by the fact that A already caused C to be installed at a higher level\. -. .P -npm help See npm\-folders for a more detailed description of the specific +See npm help 5 folders for a more detailed description of the specific folder structures that npm creates\. -. -.SS "Limitations of npm's Install Algorithm" +.SS Limitations of npm's Install Algorithm +.P There are some very rare and pathological edge\-cases where a cycle can cause npm to try to install a never\-ending tree of packages\. Here is the simplest case: -. -.IP "" 4 -. -.nf -A \-> B \-> A\' \-> B\' \-> A \-> B \-> A\' \-> B\' \-> A \-> \.\.\. -. -.fi -. -.IP "" 0 -. .P -where \fBA\fR is some version of a package, and \fBA\'\fR is a different version +.RS 2 +.nf +A \-> B \-> A' \-> B' \-> A \-> B \-> A' \-> B' \-> A \-> \.\.\. +.fi +.RE +.P +where \fBA\fR is some version of a package, and \fBA'\fR is a different version of the same package\. Because \fBB\fR depends on a different version of \fBA\fR than the one that is already in the tree, it must install a separate -copy\. The same is true of \fBA\'\fR, which must install \fBB\'\fR\|\. Because \fBB\'\fR +copy\. The same is true of \fBA'\fR, which must install \fBB'\fR\|\. Because \fBB'\fR depends on the original version of \fBA\fR, which has been overridden, the cycle falls into infinite regress\. -. .P -To avoid this situation, npm flat\-out refuses to install any \fBname@version\fR that is already present anywhere in the tree of package +To avoid this situation, npm flat\-out refuses to install any +\fBname@version\fR that is already present anywhere in the tree of package folder ancestors\. A more correct, but more complex, solution would be to symlink the existing version into the new location\. If this ever affects a real use\-case, it will be investigated\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help update -. -.IP "\(bu" 4 +.IP \(bu 2 npm help link -. -.IP "\(bu" 4 +.IP \(bu 2 npm help rebuild -. -.IP "\(bu" 4 -npm help scripts -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 scripts +.IP \(bu 2 npm help build -. -.IP "\(bu" 4 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help tag -. -.IP "\(bu" 4 +.IP \(bu 2 npm help rm -. -.IP "\(bu" 4 +.IP \(bu 2 npm help shrinkwrap -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-link.1 nodejs-0.11.15/deps/npm/man/man1/npm-link.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-link.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-link.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,119 +1,100 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-LINK" "1" "May 2014" "" "" -. +.TH "NPM\-LINK" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-link\fR \-\- Symlink a package folder -. -.SH "SYNOPSIS" -. +\fBnpm-link\fR \- Symlink a package folder +.SH SYNOPSIS +.P +.RS 2 .nf npm link (in package folder) -npm link +npm link [@/] npm ln (with any of the previous argument usage) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Package linking is a two\-step process\. -. .P First, \fBnpm link\fR in a package folder will create a globally\-installed -symbolic link from \fBprefix/package\-name\fR to the current folder\. -. +symbolic link from \fBprefix/package\-name\fR to the current folder (see +npm help 7 \fBnpm\-config\fR for the value of \fBprefix\fR)\. .P Next, in some other location, \fBnpm link package\-name\fR will create a symlink from the local \fBnode_modules\fR folder to the global symlink\. -. .P Note that \fBpackage\-name\fR is taken from \fBpackage\.json\fR, not from directory name\. -. +.P +The package name can be optionally prefixed with a scope\. See npm help 7 \fBnpm\-scope\fR\|\. +The scope must by preceded by an @\-symbol and followed by a slash\. .P When creating tarballs for \fBnpm publish\fR, the linked packages are "snapshotted" to their current state by resolving the symbolic links\. -. .P -This is -handy for installing your own stuff, so that you can work on it and test it -iteratively without having to continually rebuild\. -. +This is handy for installing your own stuff, so that you can work on it and +test it iteratively without having to continually rebuild\. .P For example: -. -.IP "" 4 -. +.P +.RS 2 .nf cd ~/projects/node\-redis # go into the package directory npm link # creates global link cd ~/projects/node\-bloggy # go into some other package directory\. npm link redis # link\-install the package -. .fi -. -.IP "" 0 -. +.RE .P Now, any changes to ~/projects/node\-redis will be reflected in ~/projects/node\-bloggy/node_modules/redis/ -. .P You may also shortcut the two steps in one\. For example, to do the above use\-case in a shorter way: -. -.IP "" 4 -. +.P +.RS 2 .nf cd ~/projects/node\-bloggy # go into the dir of your main project npm link \.\./node\-redis # link the dir of your dependency -. .fi -. -.IP "" 0 -. +.RE .P The second line is the equivalent of doing: -. -.IP "" 4 -. +.P +.RS 2 .nf (cd \.\./node\-redis; npm link) npm link redis -. .fi -. -.IP "" 0 -. +.RE .P That is, it first creates a global link, and then links the global -installation target into your project\'s \fBnode_modules\fR folder\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help developers -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 +installation target into your project's \fBnode_modules\fR folder\. +.P +If your linked package is scoped (see npm help 7 \fBnpm\-scope\fR) your link command must +include that scope, e\.g\. +.P +.RS 2 +.nf +npm link @myorg/privatepackage +.fi +.RE +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 developers +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-ls.1 nodejs-0.11.15/deps/npm/man/man1/npm-ls.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-ls.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-ls.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,146 +1,111 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-LS" "1" "May 2014" "" "" -. +.TH "NPM\-LS" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-ls\fR \-\- List installed packages -. -.SH "SYNOPSIS" -. +\fBnpm-ls\fR \- List installed packages +.SH SYNOPSIS +.P +.RS 2 .nf -npm list [ \.\.\.] -npm ls [ \.\.\.] -npm la [ \.\.\.] -npm ll [ \.\.\.] -. +npm list [[@/] \.\.\.] +npm ls [[@/] \.\.\.] +npm la [[@/] \.\.\.] +npm ll [[@/] \.\.\.] .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command will print to stdout all the versions of packages that are installed, as well as their dependencies, in a tree\-structure\. -. .P Positional arguments are \fBname@version\-range\fR identifiers, which will limit the results to only the paths to the packages named\. Note that nested packages will \fIalso\fR show the paths to the specified packages\. -For example, running \fBnpm ls promzard\fR in npm\'s source tree will show: -. -.IP "" 4 -. +For example, running \fBnpm ls promzard\fR in npm's source tree will show: +.P +.RS 2 .nf -npm@1.4.9 /path/to/npm +npm@2.1.6 /path/to/npm └─┬ init\-package\-json@0\.0\.4 └── promzard@0\.1\.5 -. .fi -. -.IP "" 0 -. +.RE .P It will print out extraneous, missing, and invalid packages\. -. .P If a project specifies git urls for dependencies these are shown in parentheses after the name@version to make it easier for users to recognize potential forks of a project\. -. .P When run as \fBll\fR or \fBla\fR, it shows extended information by default\. -. -.SH "CONFIGURATION" -. -.SS "json" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS json +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Show information in JSON format\. -. -.SS "long" -. -.IP "\(bu" 4 +.SS long +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Show extended information\. -. -.SS "parseable" -. -.IP "\(bu" 4 +.SS parseable +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Show parseable output instead of tree view\. -. -.SS "global" -. -.IP "\(bu" 4 +.SS global +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P List packages in the global install prefix instead of in the current project\. -. -.SS "depth" -. -.IP "\(bu" 4 +.SS depth +.RS 0 +.IP \(bu 2 Type: Int -. -.IP "" 0 -. + +.RE .P Max display depth of the dependency tree\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help link -. -.IP "\(bu" 4 +.IP \(bu 2 npm help prune -. -.IP "\(bu" 4 +.IP \(bu 2 npm help outdated -. -.IP "\(bu" 4 +.IP \(bu 2 npm help update -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-outdated.1 nodejs-0.11.15/deps/npm/man/man1/npm-outdated.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-outdated.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-outdated.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,102 +1,79 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-OUTDATED" "1" "May 2014" "" "" -. +.TH "NPM\-OUTDATED" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-outdated\fR \-\- Check for outdated packages -. -.SH "SYNOPSIS" -. +\fBnpm-outdated\fR \- Check for outdated packages +.SH SYNOPSIS +.P +.RS 2 .nf npm outdated [ [ \.\.\.]] -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command will check the registry to see if any (or, specific) installed packages are currently outdated\. -. .P -The resulting field \'wanted\' shows the latest version according to the -version specified in the package\.json, the field \'latest\' the very latest +The resulting field 'wanted' shows the latest version according to the +version specified in the package\.json, the field 'latest' the very latest version of the package\. -. -.SH "CONFIGURATION" -. -.SS "json" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS json +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Show information in JSON format\. -. -.SS "long" -. -.IP "\(bu" 4 +.SS long +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Show extended information\. -. -.SS "parseable" -. -.IP "\(bu" 4 +.SS parseable +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Show parseable output instead of tree view\. -. -.SS "global" -. -.IP "\(bu" 4 +.SS global +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Check packages in the global install prefix instead of in the current project\. -. -.SS "depth" -. -.IP "\(bu" 4 +.SS depth +.RS 0 +.IP \(bu 2 Type: Int -. -.IP "" 0 -. + +.RE .P Max depth for checking dependency tree\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help update -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 -npm help folders -. -.IP "" 0 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 +npm help 5 folders + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-owner.1 nodejs-0.11.15/deps/npm/man/man1/npm-owner.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-owner.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-owner.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,58 +1,47 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-OWNER" "1" "May 2014" "" "" -. +.TH "NPM\-OWNER" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-owner\fR \-\- Manage package owners -. -.SH "SYNOPSIS" -. +\fBnpm-owner\fR \- Manage package owners +.SH SYNOPSIS +.P +.RS 2 .nf npm owner ls npm owner add npm owner rm -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Manage ownership of published packages\. -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 ls: List all the users who have access to modify a package and push new versions\. Handy when you need to know who to bug for help\. -. -.IP "\(bu" 4 +.IP \(bu 2 add: Add a new user as a maintainer of a package\. This user is enabled to modify metadata, publish new versions, and add other owners\. -. -.IP "\(bu" 4 +.IP \(bu 2 rm: Remove a user from the package owner list\. This immediately revokes their privileges\. -. -.IP "" 0 -. + +.RE .P Note that there is only one level of access\. Either you can modify a package, -or you can\'t\. Future versions may contain more fine\-grained access levels, but +or you can't\. Future versions may contain more fine\-grained access levels, but that is not implemented at this time\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help adduser -. -.IP "\(bu" 4 -npm help disputes -. -.IP "" 0 +.IP \(bu 2 +npm help 7 disputes + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-pack.1 nodejs-0.11.15/deps/npm/man/man1/npm-pack.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-pack.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-pack.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,48 +1,37 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-PACK" "1" "May 2014" "" "" -. +.TH "NPM\-PACK" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-pack\fR \-\- Create a tarball from a package -. -.SH "SYNOPSIS" -. +\fBnpm-pack\fR \- Create a tarball from a package +.SH SYNOPSIS +.P +.RS 2 .nf npm pack [ [ \.\.\.]] -. .fi -. -.SH "DESCRIPTION" -For anything that\'s installable (that is, a package folder, tarball, +.RE +.SH DESCRIPTION +.P +For anything that's installable (that is, a package folder, tarball, tarball url, name@tag, name@version, or name), this command will fetch it to the cache, and then copy the tarball to the current working directory as \fB\-\.tgz\fR, and then write the filenames out to stdout\. -. .P If the same package is specified multiple times, then the file will be overwritten the second time\. -. .P If no arguments are supplied, then npm packs the current package folder\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help cache -. -.IP "\(bu" 4 +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-prefix.1 nodejs-0.11.15/deps/npm/man/man1/npm-prefix.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-prefix.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-prefix.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,34 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-PREFIX" "1" "May 2014" "" "" -. +.TH "NPM\-PREFIX" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-prefix\fR \-\- Display prefix -. -.SH "SYNOPSIS" -. +\fBnpm-prefix\fR \- Display prefix +.SH SYNOPSIS +.P +.RS 2 .nf -npm prefix -. +npm prefix [\-g] .fi -. -.SH "DESCRIPTION" -Print the prefix to standard out\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.RE +.SH DESCRIPTION +.P +Print the local prefix to standard out\. This is the closest parent directory +to contain a package\.json file unless \fB\-g\fR is also specified\. +.P +If \fB\-g\fR is specified, this will be the value of the global prefix\. See +npm help 7 \fBnpm\-config\fR for more detail\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help root -. -.IP "\(bu" 4 +.IP \(bu 2 npm help bin -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-prune.1 nodejs-0.11.15/deps/npm/man/man1/npm-prune.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-prune.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-prune.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,33 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-PRUNE" "1" "May 2014" "" "" -. +.TH "NPM\-PRUNE" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-prune\fR \-\- Remove extraneous packages -. -.SH "SYNOPSIS" -. +\fBnpm-prune\fR \- Remove extraneous packages +.SH SYNOPSIS +.P +.RS 2 .nf npm prune [ [ [ [\-\-tag ] npm publish [\-\-tag ] -. .fi -. -.SH "DESCRIPTION" -Publishes a package to the registry so that it can be installed by name\. -. -.IP "\(bu" 4 +.RE +.SH DESCRIPTION +.P +Publishes a package to the registry so that it can be installed by name\. See +npm help 7 \fBnpm\-developers\fR for details on what's included in the published package, as +well as details on how the package is built\. +.P +By default npm will publish to the public registry\. This can be overridden by +specifying a different default registry or using a npm help 7 \fBnpm\-scope\fR in the name +(see npm help 5 \fBpackage\.json\fR)\. +.RS 0 +.IP \(bu 2 \fB\fR: A folder containing a package\.json file -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\fR: A url or file path to a gzipped tar archive containing a single folder with a package\.json file inside\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fB[\-\-tag ]\fR Registers the published package with the given tag, such that \fBnpm install @\fR will install this version\. By default, \fBnpm publish\fR updates and \fBnpm install\fR installs the \fBlatest\fR tag\. -. -.IP "" 0 -. + +.RE .P Fails if the package name and version combination already exists in -the registry\. -. +the specified registry\. .P Once a package is published with a given name and version, that specific name and version combination can never be used again, even if -npm help it is removed with npm\-unpublish\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +it is removed with npm help unpublish\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help adduser -. -.IP "\(bu" 4 +.IP \(bu 2 npm help owner -. -.IP "\(bu" 4 +.IP \(bu 2 npm help deprecate -. -.IP "\(bu" 4 +.IP \(bu 2 npm help tag -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-README.1 nodejs-0.11.15/deps/npm/man/man1/npm-README.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-README.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-README.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,220 +1,176 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM" "1" "May 2014" "" "" -. +.TH "NPM" "1" "October 2014" "" "" .SH "NAME" -\fBnpm\fR \-\- node package manager![Build Status \fIhttps://img\.shields\.io/travis/npm/npm/master\.svg)](https://travis\-ci\.org/npm/npm\fR -## SYNOPSIS -. +\fBnpm\fR \- node package manager +.P +Build Status \fIhttps://img\.shields\.io/travis/npm/npm/master\.svg\fR \fIhttps://travis\-ci\.org/npm/npm\fR +.SH SYNOPSIS .P This is just enough info to get you up and running\. -. .P -Much more info available via \fBnpm help\fR once it\'s installed\. -. -.SH "IMPORTANT" +Much more info available via \fBnpm help\fR once it's installed\. +.SH IMPORTANT +.P \fBYou need node v0\.8 or higher to run this program\.\fR -. .P To install an old \fBand unsupported\fR version of npm that works on node 0\.3 and prior, clone the git repo and dig through the old tags and branches\. -. -.SH "Super Easy Install" -npm comes with node now\. -. -.SS "Windows Computers" -Get the MSI\. npm is in it\. -. -.SS "Apple Macintosh Computers" -Get the pkg\. npm is in it\. -. -.SS "Other Sorts of Unices" +.SH Super Easy Install +.P +npm comes with node \fIhttp://nodejs\.org/download/\fR now\. +.SS Windows Computers +.P +Get the MSI \fIhttp://nodejs\.org/download/\fR\|\. npm is in it\. +.SS Apple Macintosh Computers +.P +Get the pkg \fIhttp://nodejs\.org/download/\fR\|\. npm is in it\. +.SS Other Sorts of Unices +.P Run \fBmake install\fR\|\. npm will be installed with node\. -. .P If you want a more fancy pants install (a different version, customized paths, etc\.) then read on\. -. -.SH "Fancy Install (Unix)" -There\'s a pretty robust install script at \fIhttps://www\.npmjs\.org/install\.sh\fR\|\. You can download that and run it\. -. -.P -Here\'s an example using curl: -. -.IP "" 4 -. +.SH Fancy Install (Unix) +.P +There's a pretty robust install script at +https://www\.npmjs\.org/install\.sh\|\. You can download that and run it\. +.P +Here's an example using curl: +.P +.RS 2 .nf curl \-L https://npmjs\.org/install\.sh | sh -. .fi -. -.IP "" 0 -. -.SS "Slightly Fancier" +.RE +.SS Slightly Fancier +.P You can set any npm configuration params with that script: -. -.IP "" 4 -. +.P +.RS 2 .nf npm_config_prefix=/some/path sh install\.sh -. .fi -. -.IP "" 0 -. +.RE .P Or, you can run it in uber\-debuggery mode: -. -.IP "" 4 -. +.P +.RS 2 .nf npm_debug=1 sh install\.sh -. .fi -. -.IP "" 0 -. -.SS "Even Fancier" +.RE +.SS Even Fancier +.P Get the code with git\. Use \fBmake\fR to build the docs and do other stuff\. If you plan on hacking on npm, \fBmake link\fR is your friend\. -. .P -If you\'ve got the npm source code, you can also semi\-permanently set +If you've got the npm source code, you can also semi\-permanently set arbitrary config keys using the \fB\|\./configure \-\-key=val \.\.\.\fR, and then run npm commands by doing \fBnode cli\.js \fR\|\. (This is helpful for testing, or running stuff without actually installing npm itself\.) -. -.SH "Fancy Windows Install" -You can download a zip file from \fIhttps://npmjs\.org/dist/\fR, and unpack it +.SH Fancy Windows Install +.P +You can download a zip file from https://npmjs\.org/dist/, and unpack it in the same folder where node\.exe lives\. -. .P -If that\'s not fancy enough for you, then you can fetch the code with +If that's not fancy enough for you, then you can fetch the code with git, and mess with it directly\. -. -.SH "Installing on Cygwin" +.SH Installing on Cygwin +.P No\. -. -.SH "Permissions when Using npm to Install Other Stuff" +.SH Permissions when Using npm to Install Other Stuff +.P \fBtl;dr\fR -. -.IP "\(bu" 4 -Use \fBsudo\fR for greater safety\. Or don\'t, if you prefer not to\. -. -.IP "\(bu" 4 -npm will downgrade permissions if it\'s root before running any build +.RS 0 +.IP \(bu 2 +Use \fBsudo\fR for greater safety\. Or don't, if you prefer not to\. +.IP \(bu 2 +npm will downgrade permissions if it's root before running any build scripts that package authors specified\. -. -.IP "" 0 -. -.SS "More details\.\.\." + +.RE +.SS More details\.\.\. +.P As of version 0\.3, it is recommended to run npm as root\. This allows npm to change the user identifier to the \fBnobody\fR user prior to running any package build or test commands\. -. .P If you are not the root user, or if you are on a platform that does not support uid switching, then npm will not attempt to change the userid\. -. .P If you would like to ensure that npm \fBalways\fR runs scripts as the "nobody" user, and have it fail if it cannot downgrade permissions, then set the following configuration param: -. -.IP "" 4 -. +.P +.RS 2 .nf npm config set unsafe\-perm false -. .fi -. -.IP "" 0 -. +.RE .P This will prevent running in unsafe mode, even as non\-root users\. -. -.SH "Uninstalling" +.SH Uninstalling +.P So sad to see you go\. -. -.IP "" 4 -. +.P +.RS 2 .nf sudo npm uninstall npm \-g -. .fi -. -.IP "" 0 -. +.RE .P Or, if that fails, -. -.IP "" 4 -. +.P +.RS 2 .nf sudo make uninstall -. .fi -. -.IP "" 0 -. -.SH "More Severe Uninstalling" +.RE +.SH More Severe Uninstalling +.P Usually, the above instructions are sufficient\. That will remove -npm, but leave behind anything you\'ve installed\. -. +npm, but leave behind anything you've installed\. .P If you would like to remove all the packages that you have installed, then you can use the \fBnpm ls\fR command to find them, and then \fBnpm rm\fR to remove them\. -. .P -To remove cruft left behind by npm 0\.x, you can use the included \fBclean\-old\.sh\fR script file\. You can run it conveniently like this: -. -.IP "" 4 -. +To remove cruft left behind by npm 0\.x, you can use the included +\fBclean\-old\.sh\fR script file\. You can run it conveniently like this: +.P +.RS 2 .nf npm explore npm \-g \-\- sh scripts/clean\-old\.sh -. .fi -. -.IP "" 0 -. +.RE .P npm uses two configuration files, one for per\-user configs, and another for global (every\-user) configs\. You can view them by doing: -. -.IP "" 4 -. +.P +.RS 2 .nf npm config get userconfig # defaults to ~/\.npmrc npm config get globalconfig # defaults to /usr/local/etc/npmrc -. .fi -. -.IP "" 0 -. +.RE .P Uninstalling npm does not remove configuration files by default\. You must remove them yourself manually if you want them gone\. Note that this means that future npm installs will not remember the settings that you have chosen\. -. -.SH "Using npm Programmatically" +.SH Using npm Programmatically +.P If you would like to use npm programmatically, you can do that\. -It\'s not very well documented, but it \fIis\fR rather simple\. -. +It's not very well documented, but it \fIis\fR rather simple\. .P Most of the time, unless you actually want to do all the things that -npm does, you should try using one of npm\'s dependencies rather than +npm does, you should try using one of npm's dependencies rather than using npm itself, if possible\. -. .P Eventually, npm will be just a thin cli wrapper around the modules that it depends on, but for now, there are some things that you must use npm itself to do\. -. -.IP "" 4 -. +.P +.RS 2 .nf var npm = require("npm") npm\.load(myConfigObject, function (er) { @@ -223,119 +179,100 @@ if (er) return commandFailed(er) // command succeeded, and data might have some info }) - npm\.on("log", function (message) { \.\.\.\. }) + npm\.registry\.log\.on("log", function (message) { \.\.\.\. }) }) -. .fi -. -.IP "" 0 -. +.RE .P The \fBload\fR function takes an object hash of the command\-line configs\. The various \fBnpm\.commands\.\fR functions take an \fBarray\fR of -positional argument \fBstrings\fR\|\. The last argument to any \fBnpm\.commands\.\fR function is a callback\. Some commands take other +positional argument \fBstrings\fR\|\. The last argument to any +\fBnpm\.commands\.\fR function is a callback\. Some commands take other optional arguments\. Read the source\. -. .P You cannot set configs individually for any single npm function at this time\. Since \fBnpm\fR is a singleton, any call to \fBnpm\.config\.set\fR will change the value for \fIall\fR npm commands in that process\. -. .P See \fB\|\./bin/npm\-cli\.js\fR for an example of pulling config values off of the command line arguments using nopt\. You may also want to check out \fBnpm help config\fR to learn about all the options you can set there\. -. -.SH "More Docs" +.SH More Docs +.P Check out the docs \fIhttps://www\.npmjs\.org/doc/\fR, especially the faq \fIhttps://www\.npmjs\.org/doc/faq\.html\fR\|\. -. .P You can use the \fBnpm help\fR command to read any of them\. -. .P -If you\'re a developer, and you want to use npm to publish your program, +If you're a developer, and you want to use npm to publish your program, you should read this \fIhttps://www\.npmjs\.org/doc/developers\.html\fR -. -.SH "Legal Stuff" +.SH Legal Stuff +.P "npm" and "The npm Registry" are owned by npm, Inc\. All rights reserved\. See the included LICENSE file for more details\. -. .P "Node\.js" and "node" are trademarks owned by Joyent, Inc\. -. .P Modules published on the npm registry are not officially endorsed by npm, Inc\. or the Node\.js project\. -. .P Data published to the npm registry is not part of npm itself, and is the sole property of the publisher\. While every effort is made to ensure accountability, there is absolutely no guarantee, warrantee, or assertion expressed or implied as to the quality, fitness for a specific purpose, or lack of malice in any given npm package\. -. .P If you have a complaint about a package in the public npm registry, and cannot resolve it with the package -owner \fIhttps://www\.npmjs\.org/doc/misc/npm\-disputes\.html\fR, please email \fIsupport@npmjs\.com\fR and explain the situation\. -. +owner \fIhttps://www\.npmjs\.org/doc/misc/npm\-disputes\.html\fR, please email +support@npmjs\.com and explain the situation\. .P Any data published to The npm Registry (including user account information) may be removed or modified at the sole discretion of the npm server administrators\. -. -.SS "In plainer english" +.SS In plainer english +.P npm is the property of npm, Inc\. -. .P -If you publish something, it\'s yours, and you are solely accountable +If you publish something, it's yours, and you are solely accountable for it\. -. .P -If other people publish something, it\'s theirs\. -. +If other people publish something, it's theirs\. .P Users can publish Bad Stuff\. It will be removed promptly if reported\. But there is no vetting process for published modules, and you use them at your own risk\. Please inspect the source\. -. .P If you publish Bad Stuff, we may delete it from the registry, or even -ban your account in extreme cases\. So don\'t do that\. -. -.SH "BUGS" +ban your account in extreme cases\. So don't do that\. +.SH BUGS +.P When you find issues, please report them: -. -.IP "\(bu" 4 -web: \fIhttps://github\.com/npm/npm/issues\fR -. -.IP "\(bu" 4 -email: \fInpm\-@googlegroups\.com\fR -. -.IP "" 0 -. +.RS 0 +.IP \(bu 2 +web: +https://github\.com/npm/npm/issues +.IP \(bu 2 +email: +npm\-@googlegroups\.com + +.RE .P -Be sure to include \fIall\fR of the output from the npm command that didn\'t work +Be sure to include \fIall\fR of the output from the npm command that didn't work as expected\. The \fBnpm\-debug\.log\fR file is also helpful to provide\. -. .P You can also look for isaacs in #node\.js on irc://irc\.freenode\.net\. He will no doubt tell you to put the output in a gist or email\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help npm -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 npm help help -. -.IP "\(bu" 4 -npm help index -. -.IP "" 0 +.IP \(bu 2 +npm help 7 index + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-rebuild.1 nodejs-0.11.15/deps/npm/man/man1/npm-rebuild.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-rebuild.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-rebuild.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,31 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-REBUILD" "1" "May 2014" "" "" -. +.TH "NPM\-REBUILD" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-rebuild\fR \-\- Rebuild a package -. -.SH "SYNOPSIS" -. +\fBnpm-rebuild\fR \- Rebuild a package +.SH SYNOPSIS +.P +.RS 2 .nf npm rebuild [ [ \.\.\.]] npm rb [ [ \.\.\.]] -. .fi -. -.IP "\(bu" 4 +.RE +.RS 0 +.IP \(bu 2 \fB\fR: The package to rebuild -. -.IP "" 0 -. -.SH "DESCRIPTION" + +.RE +.SH DESCRIPTION +.P This command runs the \fBnpm build\fR command on the matched folders\. This is useful when you install a new version of node, and must recompile all your C++ addons with the new binary\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help build -. -.IP "\(bu" 4 +.IP \(bu 2 npm help install -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-repo.1 nodejs-0.11.15/deps/npm/man/man1/npm-repo.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-repo.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-repo.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,47 +1,37 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-REPO" "1" "May 2014" "" "" -. +.TH "NPM\-REPO" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-repo\fR \-\- Open package repository page in the browser -. -.SH "SYNOPSIS" -. +\fBnpm-repo\fR \- Open package repository page in the browser +.SH SYNOPSIS +.P +.RS 2 .nf npm repo npm repo (with no args in a package dir) -. .fi -. -.SH "DESCRIPTION" -This command tries to guess at the likely location of a package\'s +.RE +.SH DESCRIPTION +.P +This command tries to guess at the likely location of a package's repository URL, and then tries to open it using the \fB\-\-browser\fR config param\. If no package name is provided, it will search for a \fBpackage\.json\fR in the current folder and use the \fBname\fR property\. -. -.SH "CONFIGURATION" -. -.SS "browser" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS browser +.RS 0 +.IP \(bu 2 Default: OS X: \fB"open"\fR, Windows: \fB"start"\fR, Others: \fB"xdg\-open"\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P The browser that is called by the \fBnpm repo\fR command to open websites\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help docs -. -.IP "\(bu" 4 +.IP \(bu 2 npm help config -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-restart.1 nodejs-0.11.15/deps/npm/man/man1/npm-restart.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-restart.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-restart.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,29 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-RESTART" "1" "May 2014" "" "" -. +.TH "NPM\-RESTART" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-restart\fR \-\- Start a package -. -.SH "SYNOPSIS" -. +\fBnpm-restart\fR \- Start a package +.SH SYNOPSIS +.P +.RS 2 .nf -npm restart -. +npm restart [\-\- ] .fi -. -.SH "DESCRIPTION" -This runs a package\'s "restart" script, if one was provided\. -Otherwise it runs package\'s "stop" script, if one was provided, and then -the "start" script\. -. +.RE +.SH DESCRIPTION .P -If no version is specified, then it restarts the "active" version\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +This runs a package's "restart" script, if one was provided\. Otherwise it runs +package's "stop" script, if one was provided, and then the "start" script\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help run\-script -. -.IP "\(bu" 4 -npm help scripts -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 scripts +.IP \(bu 2 npm help test -. -.IP "\(bu" 4 +.IP \(bu 2 npm help start -. -.IP "\(bu" 4 +.IP \(bu 2 npm help stop -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-rm.1 nodejs-0.11.15/deps/npm/man/man1/npm-rm.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-rm.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-rm.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,34 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-RM" "1" "May 2014" "" "" -. +.TH "NPM\-RM" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-rm\fR \-\- Remove a package -. -.SH "SYNOPSIS" -. +\fBnpm-rm\fR \- Remove a package +.SH SYNOPSIS +.P +.RS 2 .nf npm rm npm r npm uninstall npm un -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This uninstalls a package, completely removing everything npm installed on its behalf\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help prune -. -.IP "\(bu" 4 +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-root.1 nodejs-0.11.15/deps/npm/man/man1/npm-root.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-root.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-root.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,30 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-ROOT" "1" "May 2014" "" "" -. +.TH "NPM\-ROOT" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-root\fR \-\- Display npm root -. -.SH "SYNOPSIS" -. +\fBnpm-root\fR \- Display npm root +.SH SYNOPSIS +.P +.RS 2 .nf npm root -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Print the effective \fBnode_modules\fR folder to standard out\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help prefix -. -.IP "\(bu" 4 +.IP \(bu 2 npm help bin -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-run-script.1 nodejs-0.11.15/deps/npm/man/man1/npm-run-script.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-run-script.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-run-script.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,49 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-RUN\-SCRIPT" "1" "May 2014" "" "" -. +.TH "NPM\-RUN\-SCRIPT" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-run-script\fR \-\- Run arbitrary package scripts -. -.SH "SYNOPSIS" -. +\fBnpm-run-script\fR \- Run arbitrary package scripts +.SH SYNOPSIS +.P +.RS 2 .nf -npm run\-script [] -. +npm run\-script [command] [\-\- ] +npm run [command] [\-\- ] .fi -. -.SH "DESCRIPTION" -This runs an arbitrary command from a package\'s \fB"scripts"\fR object\. +.RE +.SH DESCRIPTION +.P +This runs an arbitrary command from a package's \fB"scripts"\fR object\. If no package name is provided, it will search for a \fBpackage\.json\fR -in the current folder and use its \fB"scripts"\fR object\. -. +in the current folder and use its \fB"scripts"\fR object\. If no \fB"command"\fR +is provided, it will list the available top level scripts\. .P It is used by the test, start, restart, and stop commands, but can be called directly, as well\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help scripts -. -.IP "\(bu" 4 +.P +As of \fBnpm@2\.0\.0\fR \fIhttp://blog\.npmjs\.org/post/98131109725/npm\-2\-0\-0\fR, you can +use custom arguments when executing scripts\. The special option \fB\-\-\fR is used by +getopt \fIhttp://goo\.gl/KxMmtG\fR to delimit the end of the options\. npm will pass +all the arguments after the \fB\-\-\fR directly to your script: +.P +.RS 2 +.nf +npm run test \-\- \-\-grep="pattern" +.fi +.RE +.P +The arguments will only be passed to the script specified after \fBnpm run\fR +and not to any pre or post script\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 scripts +.IP \(bu 2 npm help test -. -.IP "\(bu" 4 +.IP \(bu 2 npm help start -. -.IP "\(bu" 4 +.IP \(bu 2 npm help restart -. -.IP "\(bu" 4 +.IP \(bu 2 npm help stop -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-search.1 nodejs-0.11.15/deps/npm/man/man1/npm-search.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-search.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-search.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,62 +1,48 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-SEARCH" "1" "May 2014" "" "" -. +.TH "NPM\-SEARCH" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-search\fR \-\- Search for packages -. -.SH "SYNOPSIS" -. +\fBnpm-search\fR \- Search for packages +.SH SYNOPSIS +.P +.RS 2 .nf npm search [\-\-long] [search terms \.\.\.] npm s [search terms \.\.\.] npm se [search terms \.\.\.] -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Search the registry for packages matching the search terms\. -. .P -If a term starts with \fB/\fR, then it\'s interpreted as a regular expression\. +If a term starts with \fB/\fR, then it's interpreted as a regular expression\. A trailing \fB/\fR will be ignored in this case\. (Note that many regular expression characters must be escaped or quoted in most shells\.) -. -.SH "CONFIGURATION" -. -.SS "long" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS long +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Display full package descriptions and other long text across multiple lines\. When disabled (default) search results are truncated to fit neatly on a single line\. Modules with extremely long names will fall on multiple lines\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 npm help view -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-shrinkwrap.1 nodejs-0.11.15/deps/npm/man/man1/npm-shrinkwrap.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-shrinkwrap.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-shrinkwrap.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,36 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-SHRINKWRAP" "1" "May 2014" "" "" -. +.TH "NPM\-SHRINKWRAP" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-shrinkwrap\fR \-\- Lock down dependency versions -. -.SH "SYNOPSIS" -. +\fBnpm-shrinkwrap\fR \- Lock down dependency versions +.SH SYNOPSIS +.P +.RS 2 .nf npm shrinkwrap -. .fi -. -.SH "DESCRIPTION" -This command locks down the versions of a package\'s dependencies so +.RE +.SH DESCRIPTION +.P +This command locks down the versions of a package's dependencies so that you can control exactly which versions of each dependency will be used when your package is installed\. The "package\.json" file is still required if you want to use "npm install"\. -. .P -By default, "npm install" recursively installs the target\'s +By default, "npm install" recursively installs the target's dependencies (as specified in package\.json), choosing the latest -available version that satisfies the dependency\'s semver pattern\. In +available version that satisfies the dependency's semver pattern\. In some situations, particularly when shipping software where each change -is tightly managed, it\'s desirable to fully specify each version of +is tightly managed, it's desirable to fully specify each version of each dependency recursively so that subsequent builds and deploys do not inadvertently pick up newer versions of a dependency that satisfy the semver pattern\. Specifying specific semver patterns in each -dependency\'s package\.json would facilitate this, but that\'s not always +dependency's package\.json would facilitate this, but that's not always possible or desirable, as when another author owns the npm package\. -It\'s also possible to check dependencies directly into source control, +It's also possible to check dependencies directly into source control, but that may be undesirable for other reasons\. -. .P As an example, consider package A: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "A", @@ -46,16 +39,12 @@ "B": "<0\.1\.0" } } -. .fi -. -.IP "" 0 -. +.RE .P package B: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "B", @@ -64,82 +53,61 @@ "C": "<0\.1\.0" } } -. .fi -. -.IP "" 0 -. +.RE .P and package C: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "C, "version": "0\.0\.1" } -. .fi -. -.IP "" 0 -. +.RE .P If these are the only versions of A, B, and C available in the registry, then a normal "npm install A" will install: -. -.IP "" 4 -. +.P +.RS 2 .nf A@0\.1\.0 `\-\- B@0\.0\.1 `\-\- C@0\.0\.1 -. .fi -. -.IP "" 0 -. +.RE .P However, if B@0\.0\.2 is published, then a fresh "npm install A" will install: -. -.IP "" 4 -. +.P +.RS 2 .nf A@0\.1\.0 `\-\- B@0\.0\.2 `\-\- C@0\.0\.1 -. .fi -. -.IP "" 0 -. +.RE .P -assuming the new version did not modify B\'s dependencies\. Of course, +assuming the new version did not modify B's dependencies\. Of course, the new version of B could include a new version of C and any number of new dependencies\. If such changes are undesirable, the author of A -could specify a dependency on B@0\.0\.1\. However, if A\'s author and B\'s -author are not the same person, there\'s no way for A\'s author to say +could specify a dependency on B@0\.0\.1\. However, if A's author and B's +author are not the same person, there's no way for A's author to say that he or she does not want to pull in newly published versions of C -when B hasn\'t changed at all\. -. +when B hasn't changed at all\. +.P +In this case, A's author can run .P -In this case, A\'s author can run -. -.IP "" 4 -. +.RS 2 .nf npm shrinkwrap -. .fi -. -.IP "" 0 -. +.RE .P This generates npm\-shrinkwrap\.json, which will look something like this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "A", @@ -155,79 +123,68 @@ } } } -. .fi -. -.IP "" 0 -. +.RE .P The shrinkwrap command has locked down the dependencies based on -what\'s currently installed in node_modules\. When "npm install" +what's currently installed in node_modules\. When "npm install" installs a package with a npm\-shrinkwrap\.json file in the package root, the shrinkwrap file (rather than package\.json files) completely drives the installation of that package and all of its dependencies (recursively)\. So now the author publishes A@0\.1\.0, and subsequent installs of this package will use B@0\.0\.1 and C@0\.1\.0, regardless the -dependencies and versions listed in A\'s, B\'s, and C\'s package\.json +dependencies and versions listed in A's, B's, and C's package\.json files\. -. -.SS "Using shrinkwrapped packages" +.SS Using shrinkwrapped packages +.P Using a shrinkwrapped package is no different than using any other package: you can "npm install" it by hand, or add a dependency to your package\.json file and "npm install" it\. -. -.SS "Building shrinkwrapped packages" +.SS Building shrinkwrapped packages +.P To shrinkwrap an existing package: -. -.IP "1" 4 +.RS 0 +.IP 1. 3 Run "npm install" in the package root to install the current versions of all dependencies\. -. -.IP "2" 4 +.IP 2. 3 Validate that the package works as expected with these versions\. -. -.IP "3" 4 +.IP 3. 3 Run "npm shrinkwrap", add npm\-shrinkwrap\.json to git, and publish your package\. -. -.IP "" 0 -. + +.RE .P To add or update a dependency in a shrinkwrapped package: -. -.IP "1" 4 +.RS 0 +.IP 1. 3 Run "npm install" in the package root to install the current versions of all dependencies\. -. -.IP "2" 4 +.IP 2. 3 Add or update dependencies\. "npm install" each new or updated package individually and then update package\.json\. Note that they must be explicitly named in order to be installed: running \fBnpm install\fR with no arguments will merely reproduce the existing shrinkwrap\. -. -.IP "3" 4 +.IP 3. 3 Validate that the package works as expected with the new dependencies\. -. -.IP "4" 4 +.IP 4. 3 Run "npm shrinkwrap", commit the new npm\-shrinkwrap\.json, and publish your package\. -. -.IP "" 0 -. + +.RE .P -npm help You can use npm\-outdated to view dependencies with newer versions +You can use npm help outdated to view dependencies with newer versions available\. -. -.SS "Other Notes" -A shrinkwrap file must be consistent with the package\'s package\.json +.SS Other Notes +.P +A shrinkwrap file must be consistent with the package's package\.json file\. "npm shrinkwrap" will fail if required dependencies are not already installed, since that would result in a shrinkwrap that -wouldn\'t actually work\. Similarly, the command will fail if there are +wouldn't actually work\. Similarly, the command will fail if there are extraneous packages (not referenced by package\.json), since that would indicate that package\.json is not correct\. -. .P Since "npm shrinkwrap" is intended to lock down your dependencies for production use, \fBdevDependencies\fR will not be included unless you @@ -235,41 +192,27 @@ installed \fBdevDependencies\fR are excluded, then npm will print a warning\. If you want them to be installed with your module by default, please consider adding them to \fBdependencies\fR instead\. -. .P -If shrinkwrapped package A depends on shrinkwrapped package B, B\'s +If shrinkwrapped package A depends on shrinkwrapped package B, B's shrinkwrap will not be used as part of the installation of A\. However, -because A\'s shrinkwrap is constructed from a valid installation of B -and recursively specifies all dependencies, the contents of B\'s -shrinkwrap will implicitly be included in A\'s shrinkwrap\. -. -.SS "Caveats" -Shrinkwrap files only lock down package versions, not actual package -contents\. While discouraged, a package author can republish an -existing version of a package, causing shrinkwrapped packages using -that version to pick up different code than they were before\. If you -want to avoid any risk that a byzantine author replaces a package -you\'re using with code that breaks your application, you could modify -the shrinkwrap file to use git URL references rather than version -numbers so that npm always fetches all packages from git\. -. +because A's shrinkwrap is constructed from a valid installation of B +and recursively specifies all dependencies, the contents of B's +shrinkwrap will implicitly be included in A's shrinkwrap\. +.SS Caveats .P If you wish to lock down the specific bytes included in a package, for example to have 100% confidence in being able to reproduce a deployment or build, then you ought to check your dependencies into source control, or pursue some other mechanism that can verify contents rather than versions\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 npm help ls -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-star.1 nodejs-0.11.15/deps/npm/man/man1/npm-star.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-star.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-star.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,30 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-STAR" "1" "May 2014" "" "" -. +.TH "NPM\-STAR" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-star\fR \-\- Mark your favorite packages -. -.SH "SYNOPSIS" -. +\fBnpm-star\fR \- Mark your favorite packages +.SH SYNOPSIS +.P +.RS 2 .nf npm star [, \.\.\.] npm unstar [, \.\.\.] -. .fi -. -.SH "DESCRIPTION" -"Starring" a package means that you have some interest in it\. It\'s +.RE +.SH DESCRIPTION +.P +"Starring" a package means that you have some interest in it\. It's a vaguely positive way to show that you care\. -. .P "Unstarring" is the same thing, but in reverse\. -. .P -It\'s a boolean thing\. Starring repeatedly has no additional effect\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +It's a boolean thing\. Starring repeatedly has no additional effect\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help view -. -.IP "\(bu" 4 +.IP \(bu 2 npm help whoami -. -.IP "\(bu" 4 +.IP \(bu 2 npm help adduser -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-stars.1 nodejs-0.11.15/deps/npm/man/man1/npm-stars.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-stars.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-stars.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,31 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-STARS" "1" "May 2014" "" "" -. +.TH "NPM\-STARS" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-stars\fR \-\- View packages marked as favorites -. -.SH "SYNOPSIS" -. +\fBnpm-stars\fR \- View packages marked as favorites +.SH SYNOPSIS +.P +.RS 2 .nf npm stars npm stars [username] -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P If you have starred a lot of neat things and want to find them again quickly this command lets you do just that\. -. .P -You may also want to see your friend\'s favorite packages, in this case +You may also want to see your friend's favorite packages, in this case you will most certainly enjoy this command\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help star -. -.IP "\(bu" 4 +.IP \(bu 2 npm help view -. -.IP "\(bu" 4 +.IP \(bu 2 npm help whoami -. -.IP "\(bu" 4 +.IP \(bu 2 npm help adduser -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-start.1 nodejs-0.11.15/deps/npm/man/man1/npm-start.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-start.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-start.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,28 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-START" "1" "May 2014" "" "" -. +.TH "NPM\-START" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-start\fR \-\- Start a package -. -.SH "SYNOPSIS" -. +\fBnpm-start\fR \- Start a package +.SH SYNOPSIS +.P +.RS 2 .nf -npm start -. +npm start [\-\- ] .fi -. -.SH "DESCRIPTION" -This runs a package\'s "start" script, if one was provided\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.RE +.SH DESCRIPTION +.P +This runs a package's "start" script, if one was provided\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help run\-script -. -.IP "\(bu" 4 -npm help scripts -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 scripts +.IP \(bu 2 npm help test -. -.IP "\(bu" 4 +.IP \(bu 2 npm help restart -. -.IP "\(bu" 4 +.IP \(bu 2 npm help stop -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-stop.1 nodejs-0.11.15/deps/npm/man/man1/npm-stop.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-stop.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-stop.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,28 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-STOP" "1" "May 2014" "" "" -. +.TH "NPM\-STOP" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-stop\fR \-\- Stop a package -. -.SH "SYNOPSIS" -. +\fBnpm-stop\fR \- Stop a package +.SH SYNOPSIS +.P +.RS 2 .nf -npm stop -. +npm stop [\-\- ] .fi -. -.SH "DESCRIPTION" -This runs a package\'s "stop" script, if one was provided\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.RE +.SH DESCRIPTION +.P +This runs a package's "stop" script, if one was provided\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help run\-script -. -.IP "\(bu" 4 -npm help scripts -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 scripts +.IP \(bu 2 npm help test -. -.IP "\(bu" 4 +.IP \(bu 2 npm help start -. -.IP "\(bu" 4 +.IP \(bu 2 npm help restart -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-submodule.1 nodejs-0.11.15/deps/npm/man/man1/npm-submodule.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-submodule.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-submodule.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,35 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-SUBMODULE" "1" "May 2014" "" "" -. +.TH "NPM\-SUBMODULE" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-submodule\fR \-\- Add a package as a git submodule -. -.SH "SYNOPSIS" -. +\fBnpm-submodule\fR \- Add a package as a git submodule +.SH SYNOPSIS +.P +.RS 2 .nf npm submodule -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P If the specified package has a git repository url in its package\.json -description, then this command will add it as a git submodule at \fBnode_modules/\fR\|\. -. +description, then this command will add it as a git submodule at +\fBnode_modules/\fR\|\. .P -This is a convenience only\. From then on, it\'s up to you to manage +This is a convenience only\. From then on, it's up to you to manage updates by using the appropriate git commands\. npm will stubbornly refuse to update, modify, or remove anything with a \fB\|\.git\fR subfolder in it\. -. .P This command also does not install missing dependencies, if the package does not include them in its git repository\. If \fBnpm ls\fR reports that things are missing, you can either install, link, or submodule them yourself, or you can do \fBnpm explore \-\- npm install\fR to install the dependencies into the submodule folder\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 git help submodule -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-tag.1 nodejs-0.11.15/deps/npm/man/man1/npm-tag.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-tag.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-tag.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,74 +1,54 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-TAG" "1" "May 2014" "" "" -. +.TH "NPM\-TAG" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-tag\fR \-\- Tag a published version -. -.SH "SYNOPSIS" -. +\fBnpm-tag\fR \- Tag a published version +.SH SYNOPSIS +.P +.RS 2 .nf npm tag @ [] -. .fi -. -.SH "DESCRIPTION" -Tags the specified version of the package with the specified tag, or the \fB\-\-tag\fR config if not specified\. -. +.RE +.SH DESCRIPTION +.P +Tags the specified version of the package with the specified tag, or the +\fB\-\-tag\fR config if not specified\. .P A tag can be used when installing packages as a reference to a version instead of using a specific version number: -. -.IP "" 4 -. +.P +.RS 2 .nf npm install @ -. .fi -. -.IP "" 0 -. +.RE .P When installing dependencies, a preferred tagged version may be specified: -. -.IP "" 4 -. +.P +.RS 2 .nf npm install \-\-tag -. .fi -. -.IP "" 0 -. +.RE .P This also applies to \fBnpm dedupe\fR\|\. -. .P Publishing a package always sets the "latest" tag to the published version\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help dedupe -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-test.1 nodejs-0.11.15/deps/npm/man/man1/npm-test.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-test.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-test.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,32 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-TEST" "1" "May 2014" "" "" -. +.TH "NPM\-TEST" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-test\fR \-\- Test a package -. -.SH "SYNOPSIS" -. +\fBnpm-test\fR \- Test a package +.SH SYNOPSIS +.P +.RS 2 .nf - npm test - npm tst -. + npm test [\-\- ] + npm tst [\-\- ] .fi -. -.SH "DESCRIPTION" -This runs a package\'s "test" script, if one was provided\. -. +.RE +.SH DESCRIPTION +.P +This runs a package's "test" script, if one was provided\. .P To run tests as a condition of installation, set the \fBnpat\fR config to true\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help run\-script -. -.IP "\(bu" 4 -npm help scripts -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 scripts +.IP \(bu 2 npm help start -. -.IP "\(bu" 4 +.IP \(bu 2 npm help restart -. -.IP "\(bu" 4 +.IP \(bu 2 npm help stop -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-uninstall.1 nodejs-0.11.15/deps/npm/man/man1/npm-uninstall.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-uninstall.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-uninstall.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,87 +1,68 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-RM" "1" "May 2014" "" "" -. +.TH "NPM\-RM" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-rm\fR \-\- Remove a package -. -.SH "SYNOPSIS" -. +\fBnpm-rm\fR \- Remove a package +.SH SYNOPSIS +.P +.RS 2 .nf -npm uninstall [\-\-save|\-\-save\-dev|\-\-save\-optional] +npm uninstall [@/] [\-\-save|\-\-save\-dev|\-\-save\-optional] npm rm (with any of the previous argument usage) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This uninstalls a package, completely removing everything npm installed on its behalf\. -. .P Example: -. -.IP "" 4 -. +.P +.RS 2 .nf npm uninstall sax -. .fi -. -.IP "" 0 -. +.RE .P In global mode (ie, with \fB\-g\fR or \fB\-\-global\fR appended to the command), it uninstalls the current package context as a global package\. -. .P \fBnpm uninstall\fR takes 3 exclusive, optional flags which save or update the package version in your main package\.json: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 \fB\-\-save\fR: Package will be removed from your \fBdependencies\fR\|\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-\-save\-dev\fR: Package will be removed from your \fBdevDependencies\fR\|\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-\-save\-optional\fR: Package will be removed from your \fBoptionalDependencies\fR\|\. -. -.IP "" 0 -. + +.RE +.P +Scope is optional and follows the usual rules for npm help 7 \fBnpm\-scope\fR\|\. .P Examples: -. -.IP "" 4 -. +.P +.RS 2 .nf npm uninstall sax \-\-save +npm uninstall @myorg/privatepackage \-\-save npm uninstall node\-tap \-\-save\-dev npm uninstall dtrace\-provider \-\-save\-optional -. .fi -. -.IP "" 0 -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.RE +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help prune -. -.IP "\(bu" 4 +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-unpublish.1 nodejs-0.11.15/deps/npm/man/man1/npm-unpublish.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-unpublish.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-unpublish.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,58 +1,47 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-UNPUBLISH" "1" "May 2014" "" "" -. +.TH "NPM\-UNPUBLISH" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-unpublish\fR \-\- Remove a package from the registry -. -.SH "SYNOPSIS" -. +\fBnpm-unpublish\fR \- Remove a package from the registry +.SH SYNOPSIS +.P +.RS 2 .nf -npm unpublish [@] -. +npm unpublish [@/][@] .fi -. -.SH "WARNING" +.RE +.SH WARNING +.P \fBIt is generally considered bad behavior to remove versions of a library that others are depending on!\fR -. .P Consider using the \fBdeprecate\fR command instead, if your intent is to encourage users to upgrade\. -. .P There is plenty of room on the registry\. -. -.SH "DESCRIPTION" +.SH DESCRIPTION +.P This removes a package version from the registry, deleting its entry and removing the tarball\. -. .P If no version is specified, or if all versions are removed then the root package entry is removed from the registry entirely\. -. .P Even if a package version is unpublished, that specific name and version combination can never be reused\. In order to publish the package again, a new version number must be used\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.P +The scope is optional and follows the usual rules for npm help 7 \fBnpm\-scope\fR\|\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help deprecate -. -.IP "\(bu" 4 +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help adduser -. -.IP "\(bu" 4 +.IP \(bu 2 npm help owner -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-update.1 nodejs-0.11.15/deps/npm/man/man1/npm-update.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-update.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-update.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,45 +1,37 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-UPDATE" "1" "May 2014" "" "" -. +.TH "NPM\-UPDATE" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-update\fR \-\- Update a package -. -.SH "SYNOPSIS" -. +\fBnpm-update\fR \- Update a package +.SH SYNOPSIS +.P +.RS 2 .nf npm update [\-g] [ [ \.\.\.]] -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command will update all the packages listed to the latest version (specified by the \fBtag\fR config)\. -. .P It will also install missing packages\. -. .P -If the \fB\-g\fR flag is specified, this command will update globally installed packages\. -If no package name is specified, all packages in the specified location (global or local) will be updated\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +If the \fB\-g\fR flag is specified, this command will update globally installed +packages\. +.P +If no package name is specified, all packages in the specified location (global +or local) will be updated\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help outdated -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help ls -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-version.1 nodejs-0.11.15/deps/npm/man/man1/npm-version.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-version.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-version.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,75 +1,61 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-VERSION" "1" "May 2014" "" "" -. +.TH "NPM\-VERSION" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-version\fR \-\- Bump a package version -. -.SH "SYNOPSIS" -. +\fBnpm-version\fR \- Bump a package version +.SH SYNOPSIS +.P +.RS 2 .nf -npm version [ | major | minor | patch] -. +npm version [ | major | minor | patch | premajor | preminor | prepatch | prerelease] .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Run this in a package directory to bump the version and write the new data back to the package\.json file\. -. .P -The \fBnewversion\fR argument should be a valid semver string, \fIor\fR a valid -second argument to semver\.inc (one of "patch", "minor", or -"major")\. In the second case, the existing version will be incremented -by 1 in the specified field\. -. +The \fBnewversion\fR argument should be a valid semver string, \fIor\fR a +valid second argument to semver\.inc (one of "patch", "minor", "major", +"prepatch", "preminor", "premajor", "prerelease")\. In the second case, +the existing version will be incremented by 1 in the specified field\. .P If run in a git repo, it will also create a version commit and tag, and fail if the repo is not clean\. -. .P If supplied with \fB\-\-message\fR (shorthand: \fB\-m\fR) config option, npm will -use it as a commit message when creating a version commit\. If the \fBmessage\fR config contains \fB%s\fR then that will be replaced with the +use it as a commit message when creating a version commit\. If the +\fBmessage\fR config contains \fB%s\fR then that will be replaced with the resulting version number\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf npm version patch \-m "Upgrade to %s for reasons" -. .fi -. -.IP "" 0 -. +.RE .P If the \fBsign\-git\-tag\fR config is set, then the tag will be signed using the \fB\-s\fR flag to git\. Note that you must have a default GPG key set up in your git config for this to work properly\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf $ npm config set sign\-git\-tag true $ npm version patch + You need a passphrase to unlock the secret key for user: "isaacs (http://blog\.izs\.me/) " 2048\-bit RSA key, ID 6C481CF6, created 2010\-08\-31 + Enter passphrase: -. .fi -. -.IP "" 0 -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.RE +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help init -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 -npm help semver -. -.IP "" 0 +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 +npm help 7 semver + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-view.1 nodejs-0.11.15/deps/npm/man/man1/npm-view.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-view.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-view.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,186 +1,136 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-VIEW" "1" "May 2014" "" "" -. +.TH "NPM\-VIEW" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-view\fR \-\- View registry info -. -.SH "SYNOPSIS" -. -.nf -npm view [@] [[\.]\.\.\.] -npm v [@] [[\.]\.\.\.] -. +\fBnpm-view\fR \- View registry info +.SH SYNOPSIS +.P +.RS 2 +.nf +npm view [@/][@] [[\.]\.\.\.] +npm v [@/][@] [[\.]\.\.\.] .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command shows data about a package and prints it to the stream referenced by the \fBoutfd\fR config, which defaults to stdout\. -. .P To show the package registry entry for the \fBconnect\fR package, you can do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm view connect -. .fi -. -.IP "" 0 -. +.RE .P The default version is "latest" if unspecified\. -. .P Field names can be specified after the package descriptor\. For example, to show the dependencies of the \fBronn\fR package at version 0\.3\.5, you could do the following: -. -.IP "" 4 -. +.P +.RS 2 .nf npm view ronn@0\.3\.5 dependencies -. .fi -. -.IP "" 0 -. +.RE .P You can view child field by separating them with a period\. To view the git repository URL for the latest version of npm, you could do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm view npm repository\.url -. .fi -. -.IP "" 0 -. +.RE .P This makes it easy to view information about a dependency with a bit of shell scripting\. For example, to view all the data about the version of opts that ronn depends on, you can do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm view opts@$(npm view ronn dependencies\.opts) -. .fi -. -.IP "" 0 -. +.RE .P For fields that are arrays, requesting a non\-numeric field will return all of the values from the objects in the list\. For example, to get all the contributor names for the "express" project, you can do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm view express contributors\.email -. .fi -. -.IP "" 0 -. +.RE .P You may also use numeric indices in square braces to specifically select an item in an array field\. To just get the email address of the first contributor in the list, you can do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm view express contributors[0]\.email -. .fi -. -.IP "" 0 -. +.RE .P Multiple fields may be specified, and will be printed one after another\. For exampls, to get all the contributor names and email addresses, you can do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm view express contributors\.name contributors\.email -. .fi -. -.IP "" 0 -. +.RE .P "Person" fields are shown as a string if they would be shown as an object\. So, for example, this will show the list of npm contributors in -the shortened string format\. (npm help See \fBpackage\.json\fR for more on this\.) -. -.IP "" 4 -. +the shortened string format\. (See npm help 5 \fBpackage\.json\fR for more on this\.) +.P +.RS 2 .nf npm view npm contributors -. .fi -. -.IP "" 0 -. +.RE .P If a version range is provided, then data will be printed for every matching version of the package\. This will show which version of jsdom was required by each matching version of yui3: -. -.IP "" 4 -. -.nf -npm view yui3@\'>0\.5\.4\' dependencies\.jsdom -. -.fi -. -.IP "" 0 -. -.SH "OUTPUT" +.P +.RS 2 +.nf +npm view yui3@'>0\.5\.4' dependencies\.jsdom +.fi +.RE +.SH OUTPUT +.P If only a single string field for a single version is output, then it will not be colorized or quoted, so as to enable piping the output to another command\. If the field is an object, it will be output as a JavaScript object literal\. -. .P If the \-\-json flag is given, the outputted fields will be JSON\. -. .P If the version range matches multiple versions, than each printed value will be prefixed with the version it applies to\. -. .P If multiple fields are requested, than each of them are prefixed with the field name\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help search -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 npm help docs -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man1/npm-whoami.1 nodejs-0.11.15/deps/npm/man/man1/npm-whoami.1 --- nodejs-0.11.13/deps/npm/man/man1/npm-whoami.1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man1/npm-whoami.1 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,26 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-WHOAMI" "1" "May 2014" "" "" -. +.TH "NPM\-WHOAMI" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-whoami\fR \-\- Display npm username -. -.SH "SYNOPSIS" -. +\fBnpm-whoami\fR \- Display npm username +.SH SYNOPSIS +.P +.RS 2 .nf npm whoami -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Print the \fBusername\fR config to standard output\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 npm help adduser -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm.3 nodejs-0.11.15/deps/npm/man/man3/npm.3 --- nodejs-0.11.13/deps/npm/man/man3/npm.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,162 +1,124 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM" "3" "May 2014" "" "" -. +.TH "NPM" "3" "October 2014" "" "" .SH "NAME" -\fBnpm\fR \-\- node package manager -. -.SH "SYNOPSIS" -. +\fBnpm\fR \- node package manager +.SH SYNOPSIS +.P +.RS 2 .nf var npm = require("npm") npm\.load([configObject, ]function (er, npm) { - // use the npm object, now that it\'s loaded\. + // use the npm object, now that it's loaded\. + npm\.config\.set(key, val) val = npm\.config\.get(key) + console\.log("prefix = %s", npm\.prefix) + npm\.commands\.install(["package"], cb) }) -. .fi -. -.SH "VERSION" -1.4.9 -. -.SH "DESCRIPTION" +.RE +.SH VERSION +.P +2.1.6 +.SH DESCRIPTION +.P This is the API documentation for npm\. To find documentation of the command line -npm help client, see \fBnpm\fR\|\. -. +client, see npm help \fBnpm\fR\|\. .P -Prior to using npm\'s commands, \fBnpm\.load()\fR must be called\. -If you provide \fBconfigObject\fR as an object hash of top\-level -configs, they override the values stored in the various config -locations\. In the npm command line client, this set of configs -is parsed from the command line options\. Additional configuration -npm help npm help params are loaded from two configuration files\. See \fBnpm\-config\fR, \fBnpm\-confignpm help \fR, and \fBnpmrc\fR for more information\. -. +Prior to using npm's commands, \fBnpm\.load()\fR must be called\. If you provide +\fBconfigObject\fR as an object map of top\-level configs, they override the values +stored in the various config locations\. In the npm command line client, this +set of configs is parsed from the command line options\. Additional +configuration params are loaded from two configuration files\. See +npm help \fBnpm\-config\fR, npm help 7 \fBnpm\-config\fR, and npm help 5 \fBnpmrc\fR for more information\. .P After that, each of the functions are accessible in the -npm help commands object: \fBnpm\.commands\.\fR\|\. See \fBnpm\-index\fR for a list of +commands object: \fBnpm\.commands\.\fR\|\. See npm help 7 \fBnpm\-index\fR for a list of all possible commands\. -. .P -All commands on the command object take an \fBarray\fR of positional argument \fBstrings\fR\|\. The last argument to any function is a callback\. Some +All commands on the command object take an \fBarray\fR of positional argument +\fBstrings\fR\|\. The last argument to any function is a callback\. Some commands take other optional arguments\. -. .P Configs cannot currently be set on a per function basis, as each call to npm\.config\.set will change the value for \fIall\fR npm commands in that process\. -. .P To find API documentation for a specific command, run the \fBnpm apihelp\fR command\. -. -.SH "METHODS AND PROPERTIES" -. -.IP "\(bu" 4 +.SH METHODS AND PROPERTIES +.RS 0 +.IP \(bu 2 \fBnpm\.load(configs, cb)\fR -. -.IP -Load the configuration params, and call the \fBcb\fR function once the -globalconfig and userconfig files have been loaded as well, or on -nextTick if they\'ve already been loaded\. -. -.IP "\(bu" 4 + Load the configuration params, and call the \fBcb\fR function once the + globalconfig and userconfig files have been loaded as well, or on + nextTick if they've already been loaded\. +.IP \(bu 2 \fBnpm\.config\fR -. -.IP -An object for accessing npm configuration parameters\. -. -.IP "\(bu" 4 + An object for accessing npm configuration parameters\. +.RS 0 +.IP \(bu 2 \fBnpm\.config\.get(key)\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fBnpm\.config\.set(key, val)\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fBnpm\.config\.del(key)\fR -. -.IP "" 0 -. -.IP "\(bu" 4 +.RE +.IP \(bu 2 \fBnpm\.dir\fR or \fBnpm\.root\fR -. -.IP -The \fBnode_modules\fR directory where npm will operate\. -. -.IP "\(bu" 4 + The \fBnode_modules\fR directory where npm will operate\. +.IP \(bu 2 \fBnpm\.prefix\fR -. -.IP -The prefix where npm is operating\. (Most often the current working -directory\.) -. -.IP "\(bu" 4 + The prefix where npm is operating\. (Most often the current working + directory\.) +.IP \(bu 2 \fBnpm\.cache\fR -. -.IP -The place where npm keeps JSON and tarballs it fetches from the -registry (or uploads to the registry)\. -. -.IP "\(bu" 4 + The place where npm keeps JSON and tarballs it fetches from the + registry (or uploads to the registry)\. +.IP \(bu 2 \fBnpm\.tmp\fR -. -.IP -npm\'s temporary working directory\. -. -.IP "\(bu" 4 + npm's temporary working directory\. +.IP \(bu 2 \fBnpm\.deref\fR -. -.IP -Get the "real" name for a command that has either an alias or -abbreviation\. -. -.IP "" 0 -. -.SH "MAGIC" -For each of the methods in the \fBnpm\.commands\fR hash, a method is added to -the npm object, which takes a set of positional string arguments rather -than an array and a callback\. -. + Get the "real" name for a command that has either an alias or + abbreviation\. + +.RE +.SH MAGIC +.P +For each of the methods in the \fBnpm\.commands\fR object, a method is added to the +npm object, which takes a set of positional string arguments rather than an +array and a callback\. .P If the last argument is a callback, then it will use the supplied callback\. However, if no callback is provided, then it will print out the error or results\. -. .P For example, this would work in a node repl: -. -.IP "" 4 -. +.P +.RS 2 .nf > npm = require("npm") > npm\.load() // wait a sec\.\.\. > npm\.install("dnode", "express") -. .fi -. -.IP "" 0 -. +.RE .P -Note that that \fIwon\'t\fR work in a node program, since the \fBinstall\fR +Note that that \fIwon't\fR work in a node program, since the \fBinstall\fR method will get called before the configuration load is completed\. -. -.SH "ABBREVS" -In order to support \fBnpm ins foo\fR instead of \fBnpm install foo\fR, the \fBnpm\.commands\fR object has a set of abbreviations as well as the full +.SH ABBREVS +.P +In order to support \fBnpm ins foo\fR instead of \fBnpm install foo\fR, the +\fBnpm\.commands\fR object has a set of abbreviations as well as the full method names\. Use the \fBnpm\.deref\fR method to find the real name\. -. .P For example: -. -.IP "" 4 -. +.P +.RS 2 .nf var cmd = npm\.deref("unp") // cmd === "unpublish" -. .fi -. -.IP "" 0 +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-bin.3 nodejs-0.11.15/deps/npm/man/man3/npm-bin.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-bin.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-bin.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,21 +1,17 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-BIN" "3" "May 2014" "" "" -. +.TH "NPM\-BIN" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-bin\fR \-\- Display npm bin folder -. -.SH "SYNOPSIS" -. +\fBnpm-bin\fR \- Display npm bin folder +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.bin(args, cb) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Print the folder where npm will install executables\. -. .P This function should not be used programmatically\. Instead, just refer -to the \fBnpm\.bin\fR member\. +to the \fBnpm\.bin\fR property\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-bugs.3 nodejs-0.11.15/deps/npm/man/man3/npm-bugs.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-bugs.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-bugs.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,28 +1,23 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-BUGS" "3" "May 2014" "" "" -. +.TH "NPM\-BUGS" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-bugs\fR \-\- Bugs for a package in a web browser maybe -. -.SH "SYNOPSIS" -. +\fBnpm-bugs\fR \- Bugs for a package in a web browser maybe +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.bugs(package, callback) -. .fi -. -.SH "DESCRIPTION" -This command tries to guess at the likely location of a package\'s +.RE +.SH DESCRIPTION +.P +This command tries to guess at the likely location of a package's bug tracker URL, and then tries to open it using the \fB\-\-browser\fR config param\. -. .P Like other commands, the first parameter is an array\. This command only uses the first element, which is expected to be a package name with an optional version number\. -. .P This command will launch a browser, so this command may not be the most friendly for programmatic use\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-cache.3 nodejs-0.11.15/deps/npm/man/man3/npm-cache.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-cache.3 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-cache.3 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,34 @@ +.TH "NPM\-CACHE" "3" "October 2014" "" "" +.SH "NAME" +\fBnpm-cache\fR \- manage the npm cache programmatically +.SH SYNOPSIS +.P +.RS 2 +.nf +npm\.commands\.cache([args], callback) + +// helpers +npm\.commands\.cache\.clean([args], callback) +npm\.commands\.cache\.add([args], callback) +npm\.commands\.cache\.read(name, version, forceBypass, callback) +.fi +.RE +.SH DESCRIPTION +.P +This acts much the same ways as the npm help cache command line +functionality\. +.P +The callback is called with the package\.json data of the thing that is +eventually added to or read from the cache\. +.P +The top level \fBnpm\.commands\.cache(\.\.\.)\fR functionality is a public +interface, and like all commands on the \fBnpm\.commands\fR object, it will +match the command line behavior exactly\. +.P +However, the cache folder structure and the cache helper functions are +considered \fBinternal\fR API surface, and as such, may change in future +releases of npm, potentially without warning or significant version +incrementation\. +.P +Use at your own risk\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-commands.3 nodejs-0.11.15/deps/npm/man/man3/npm-commands.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-commands.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-commands.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,28 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-COMMANDS" "3" "May 2014" "" "" -. +.TH "NPM\-COMMANDS" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-commands\fR \-\- npm commands -. -.SH "SYNOPSIS" -. +\fBnpm-commands\fR \- npm commands +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands[](args, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P npm comes with a full set of commands, and each of the commands takes a similar set of arguments\. -. .P In general, all commands on the command object take an \fBarray\fR of positional argument \fBstrings\fR\|\. The last argument to any function is a callback\. Some commands are special and take other optional arguments\. -. .P All commands have their own man page\. See \fBman npm\-\fR for command\-line usage, or \fBman 3 npm\-\fR for programmatic usage\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help index -. -.IP "" 0 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 index + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-config.3 nodejs-0.11.15/deps/npm/man/man3/npm-config.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-config.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-config.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,69 +1,49 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-CONFIG" "3" "May 2014" "" "" -. +.TH "NPM\-CONFIG" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-config\fR \-\- Manage the npm configuration files -. -.SH "SYNOPSIS" -. +\fBnpm-config\fR \- Manage the npm configuration files +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.config(args, callback) var val = npm\.config\.get(key) npm\.config\.set(key, val) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This function acts much the same way as the command\-line version\. The first element in the array tells config what to do\. Possible values are: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 \fBset\fR -. -.IP -Sets a config parameter\. The second element in \fBargs\fR is interpreted as the -key, and the third element is interpreted as the value\. -. -.IP "\(bu" 4 + Sets a config parameter\. The second element in \fBargs\fR is interpreted as the + key, and the third element is interpreted as the value\. +.IP \(bu 2 \fBget\fR -. -.IP -Gets the value of a config parameter\. The second element in \fBargs\fR is the -key to get the value of\. -. -.IP "\(bu" 4 + Gets the value of a config parameter\. The second element in \fBargs\fR is the + key to get the value of\. +.IP \(bu 2 \fBdelete\fR (\fBrm\fR or \fBdel\fR) -. -.IP -Deletes a parameter from the config\. The second element in \fBargs\fR is the -key to delete\. -. -.IP "\(bu" 4 + Deletes a parameter from the config\. The second element in \fBargs\fR is the + key to delete\. +.IP \(bu 2 \fBlist\fR (\fBls\fR) -. -.IP -Show all configs that aren\'t secret\. No parameters necessary\. -. -.IP "\(bu" 4 + Show all configs that aren't secret\. No parameters necessary\. +.IP \(bu 2 \fBedit\fR: -. -.IP -Opens the config file in the default editor\. This command isn\'t very useful -programmatically, but it is made available\. -. -.IP "" 0 -. + Opens the config file in the default editor\. This command isn't very useful + programmatically, but it is made available\. + +.RE .P To programmatically access npm configuration settings, or set them for the duration of a program, use the \fBnpm\.config\.set\fR and \fBnpm\.config\.get\fR functions instead\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm apihelp npm -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-deprecate.3 nodejs-0.11.15/deps/npm/man/man3/npm-deprecate.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-deprecate.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-deprecate.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,57 +1,43 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-DEPRECATE" "3" "May 2014" "" "" -. +.TH "NPM\-DEPRECATE" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-deprecate\fR \-\- Deprecate a version of a package -. -.SH "SYNOPSIS" -. +\fBnpm-deprecate\fR \- Deprecate a version of a package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.deprecate(args, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command will update the npm registry entry for a package, providing a deprecation warning to all who attempt to install it\. -. .P -The \'args\' parameter must have exactly two elements: -. -.IP "\(bu" 4 +The 'args' parameter must have exactly two elements: +.RS 0 +.IP \(bu 2 \fBpackage[@version]\fR -. -.IP -The \fBversion\fR portion is optional, and may be either a range, or a -specific version, or a tag\. -. -.IP "\(bu" 4 + The \fBversion\fR portion is optional, and may be either a range, or a + specific version, or a tag\. +.IP \(bu 2 \fBmessage\fR -. -.IP -The warning message that will be printed whenever a user attempts to -install the package\. -. -.IP "" 0 -. + The warning message that will be printed whenever a user attempts to + install the package\. + +.RE .P -Note that you must be the package owner to deprecate something\. See the \fBowner\fR and \fBadduser\fR help topics\. -. +Note that you must be the package owner to deprecate something\. See the +\fBowner\fR and \fBadduser\fR help topics\. .P To un\-deprecate a package, specify an empty string (\fB""\fR) for the \fBmessage\fR argument\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm apihelp publish -. -.IP "\(bu" 4 +.IP \(bu 2 npm apihelp unpublish -. -.IP "\(bu" 4 -npm help registry -. -.IP "" 0 +.IP \(bu 2 +npm help 7 registry + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-docs.3 nodejs-0.11.15/deps/npm/man/man3/npm-docs.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-docs.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-docs.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,28 +1,23 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-DOCS" "3" "May 2014" "" "" -. +.TH "NPM\-DOCS" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-docs\fR \-\- Docs for a package in a web browser maybe -. -.SH "SYNOPSIS" -. +\fBnpm-docs\fR \- Docs for a package in a web browser maybe +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.docs(package, callback) -. .fi -. -.SH "DESCRIPTION" -This command tries to guess at the likely location of a package\'s +.RE +.SH DESCRIPTION +.P +This command tries to guess at the likely location of a package's documentation URL, and then tries to open it using the \fB\-\-browser\fR config param\. -. .P Like other commands, the first parameter is an array\. This command only uses the first element, which is expected to be a package name with an optional version number\. -. .P This command will launch a browser, so this command may not be the most friendly for programmatic use\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-edit.3 nodejs-0.11.15/deps/npm/man/man3/npm-edit.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-edit.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-edit.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,28 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-EDIT" "3" "May 2014" "" "" -. +.TH "NPM\-EDIT" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-edit\fR \-\- Edit an installed package -. -.SH "SYNOPSIS" -. +\fBnpm-edit\fR \- Edit an installed package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.edit(package, callback) -. .fi -. -.SH "DESCRIPTION" -Opens the package folder in the default editor (or whatever you\'ve +.RE +.SH DESCRIPTION +.P +Opens the package folder in the default editor (or whatever you've configured as the npm \fBeditor\fR config \-\- see \fBnpm help config\fR\|\.) -. .P After it has been edited, the package is rebuilt so as to pick up any changes in compiled packages\. -. .P For instance, you can do \fBnpm install connect\fR to install connect into your package, and then \fBnpm\.commands\.edit(["connect"], callback)\fR to make a few changes to your locally installed copy\. -. .P The first parameter is a string array with a single element, the package to open\. The package can optionally have a version number attached\. -. .P Since this command opens an editor in a new process, be careful about where and how this is used\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-explore.3 nodejs-0.11.15/deps/npm/man/man3/npm-explore.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-explore.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-explore.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,28 +1,22 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-EXPLORE" "3" "May 2014" "" "" -. +.TH "NPM\-EXPLORE" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-explore\fR \-\- Browse an installed package -. -.SH "SYNOPSIS" -. +\fBnpm-explore\fR \- Browse an installed package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.explore(args, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Spawn a subshell in the directory of the installed package specified\. -. .P If a command is specified, then it is run in the subshell, which then immediately terminates\. -. .P Note that the package is \fInot\fR automatically rebuilt afterwards, so be sure to use \fBnpm rebuild \fR if you make any changes\. -. .P -The first element in the \'args\' parameter must be a package name\. After that is the optional command, which can be any number of strings\. All of the strings will be combined into one, space\-delimited command\. +The first element in the 'args' parameter must be a package name\. After that is the optional command, which can be any number of strings\. All of the strings will be combined into one, space\-delimited command\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-help-search.3 nodejs-0.11.15/deps/npm/man/man3/npm-help-search.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-help-search.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-help-search.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,51 +1,41 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-HELP\-SEARCH" "3" "May 2014" "" "" -. +.TH "NPM\-HELP\-SEARCH" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-help-search\fR \-\- Search the help pages -. -.SH "SYNOPSIS" -. +\fBnpm-help-search\fR \- Search the help pages +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.helpSearch(args, [silent,] callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command is rarely useful, but it exists in the rare case that it is\. -. .P This command takes an array of search terms and returns the help pages that match in order of best match\. -. .P If there is only one match, then npm displays that help section\. If there are multiple results, the results are printed to the screen formatted and the array of results is returned\. Each result is an object with these properties: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 hits: A map of args to number of hits on that arg\. For example, {"npm": 3} -. -.IP "\(bu" 4 +.IP \(bu 2 found: Total number of unique args that matched\. -. -.IP "\(bu" 4 +.IP \(bu 2 totalHits: Total number of hits\. -. -.IP "\(bu" 4 +.IP \(bu 2 lines: An array of all matching lines (and some adjacent lines)\. -. -.IP "\(bu" 4 +.IP \(bu 2 file: Name of the file that matched -. -.IP "" 0 -. + +.RE .P -The silent parameter is not neccessary not used, but it may in the future\. +The silent parameter is not necessary not used, but it may in the future\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-init.3 nodejs-0.11.15/deps/npm/man/man3/npm-init.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-init.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-init.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,32 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "INIT" "3" "May 2014" "" "" -. +.TH "NPM" "" "October 2014" "" "" .SH "NAME" -\fBinit\fR \-\- Interactively create a package\.json file -. -.SH "SYNOPSIS" -. +\fBnpm\fR +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.init(args, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This will ask you a bunch of questions, and then write a package\.json for you\. -. .P It attempts to make reasonable guesses about what you want things to be set to, -and then writes a package\.json file with the options you\'ve selected\. -. +and then writes a package\.json file with the options you've selected\. .P -If you already have a package\.json file, it\'ll read that first, and default to +If you already have a package\.json file, it'll read that first, and default to the options in there\. -. .P It is strictly additive, so it does not delete options from your package\.json without a really good reason to do so\. -. .P -Since this function expects to be run on the command\-line, it doesn\'t work very +Since this function expects to be run on the command\-line, it doesn't work very well as a programmatically\. The best option is to roll your own, and since JavaScript makes it stupid simple to output formatted JSON, that is the -preferred method\. If you\'re sure you want to handle command\-line prompting, +preferred method\. If you're sure you want to handle command\-line prompting, then go ahead and use this programmatically\. -. -.SH "SEE ALSO" -npm help package\.json +.SH SEE ALSO +.P +npm help 5 package\.json + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-install.3 nodejs-0.11.15/deps/npm/man/man3/npm-install.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-install.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-install.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,23 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-INSTALL" "3" "May 2014" "" "" -. +.TH "NPM\-INSTALL" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-install\fR \-\- install a package programmatically -. -.SH "SYNOPSIS" -. +\fBnpm-install\fR \- install a package programmatically +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.install([where,] packages, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This acts much the same ways as installing on the command\-line\. -. .P -The \'where\' parameter is optional and only used internally, and it specifies +The 'where' parameter is optional and only used internally, and it specifies where the packages should be installed to\. -. .P -The \'packages\' parameter is an array of strings\. Each element in the array is +The 'packages' parameter is an array of strings\. Each element in the array is the name of a package to be installed\. -. .P -Finally, \'callback\' is a function that will be called when all packages have been +Finally, 'callback' is a function that will be called when all packages have been installed or when an error has been encountered\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-link.3 nodejs-0.11.15/deps/npm/man/man3/npm-link.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-link.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-link.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,53 +1,41 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-LINK" "3" "May 2014" "" "" -. +.TH "NPM\-LINK" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-link\fR \-\- Symlink a package folder -. -.SH "SYNOPSIS" -. +\fBnpm-link\fR \- Symlink a package folder +.SH SYNOPSIS +.P +.RS 2 .nf -npm\.command\.link(callback) -npm\.command\.link(packages, callback) -. +npm\.commands\.link(callback) +npm\.commands\.link(packages, callback) .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Package linking is a two\-step process\. -. .P Without parameters, link will create a globally\-installed symbolic link from \fBprefix/package\-name\fR to the current folder\. -. .P With a parameters, link will create a symlink from the local \fBnode_modules\fR folder to the global symlink\. -. .P When creating tarballs for \fBnpm publish\fR, the linked packages are "snapshotted" to their current state by resolving the symbolic links\. -. .P This is handy for installing your own stuff, so that you can work on it and test it iteratively without having to continually rebuild\. -. .P For example: -. -.IP "" 4 -. +.P +.RS 2 .nf npm\.commands\.link(cb) # creates global link from the cwd # (say redis package) -npm\.commands\.link(\'redis\', cb) # link\-install the package -. +npm\.commands\.link('redis', cb) # link\-install the package .fi -. -.IP "" 0 -. +.RE .P Now, any changes to the redis package will be reflected in the package in the current working directory + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-load.3 nodejs-0.11.15/deps/npm/man/man3/npm-load.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-load.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-load.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,34 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-LOAD" "3" "May 2014" "" "" -. +.TH "NPM\-LOAD" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-load\fR \-\- Load config settings -. -.SH "SYNOPSIS" -. +\fBnpm-load\fR \- Load config settings +.SH SYNOPSIS +.P +.RS 2 .nf npm\.load(conf, cb) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P npm\.load() must be called before any other function call\. Both parameters are optional, but the second is recommended\. -. .P -The first parameter is an object hash of command\-line config params, and the -second parameter is a callback that will be called when npm is loaded and -ready to serve\. -. +The first parameter is an object containing command\-line config params, and the +second parameter is a callback that will be called when npm is loaded and ready +to serve\. .P The first parameter should follow a similar structure as the package\.json config object\. -. .P For example, to emulate the \-\-dev flag, pass an object that looks like this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "dev": true } -. .fi -. -.IP "" 0 -. +.RE .P For a list of all the available command\-line configs, see \fBnpm help config\fR + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-ls.3 nodejs-0.11.15/deps/npm/man/man3/npm-ls.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-ls.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-ls.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,86 +1,68 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-LS" "3" "May 2014" "" "" -. +.TH "NPM\-LS" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-ls\fR \-\- List installed packages -. -.SH "SYNOPSIS" -. +\fBnpm-ls\fR \- List installed packages +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.ls(args, [silent,] callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command will print to stdout all the versions of packages that are installed, as well as their dependencies, in a tree\-structure\. It will also return that data using the callback\. -. .P This command does not take any arguments, but args must be defined\. Beyond that, if any arguments are passed in, npm will politely warn that it does not take positional arguments, though you may set config flags like with any other command, such as \fBglobal\fR to list global packages\. -. .P It will print out extraneous, missing, and invalid packages\. -. .P If the silent parameter is set to true, nothing will be output to the screen, but the data will still be returned\. -. .P Callback is provided an error if one occurred, the full data about which packages are installed and which dependencies they will receive, and a "lite" data object which just shows which versions are installed where\. Note that the full data object is a circular structure, so care must be taken if it is serialized to JSON\. -. -.SH "CONFIGURATION" -. -.SS "long" -. -.IP "\(bu" 4 +.SH CONFIGURATION +.SS long +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Show extended information\. -. -.SS "parseable" -. -.IP "\(bu" 4 +.SS parseable +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Show parseable output instead of tree view\. -. -.SS "global" -. -.IP "\(bu" 4 +.SS global +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P List packages in the global install prefix instead of in the current project\. -. .P -Note, if parseable is set or long isn\'t set, then duplicates will be trimmed\. +Note, if parseable is set or long isn't set, then duplicates will be trimmed\. This means that if a submodule a same dependency as a parent module, then the dependency will only be output once\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-outdated.3 nodejs-0.11.15/deps/npm/man/man3/npm-outdated.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-outdated.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-outdated.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,21 +1,17 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-OUTDATED" "3" "May 2014" "" "" -. +.TH "NPM\-OUTDATED" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-outdated\fR \-\- Check for outdated packages -. -.SH "SYNOPSIS" -. +\fBnpm-outdated\fR \- Check for outdated packages +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.outdated([packages,] callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command will check the registry to see if the specified packages are currently outdated\. -. .P -If the \'packages\' parameter is left out, npm will check all packages\. +If the 'packages' parameter is left out, npm will check all packages\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-owner.3 nodejs-0.11.15/deps/npm/man/man3/npm-owner.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-owner.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-owner.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,52 +1,43 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-OWNER" "3" "May 2014" "" "" -. +.TH "NPM\-OWNER" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-owner\fR \-\- Manage package owners -. -.SH "SYNOPSIS" -. +\fBnpm-owner\fR \- Manage package owners +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.owner(args, callback) -. .fi -. -.SH "DESCRIPTION" -The first element of the \'args\' parameter defines what to do, and the subsequent +.RE +.SH DESCRIPTION +.P +The first element of the 'args' parameter defines what to do, and the subsequent elements depend on the action\. Possible values for the action are (order of parameters are given in parenthesis): -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 ls (package): List all the users who have access to modify a package and push new versions\. Handy when you need to know who to bug for help\. -. -.IP "\(bu" 4 +.IP \(bu 2 add (user, package): Add a new user as a maintainer of a package\. This user is enabled to modify metadata, publish new versions, and add other owners\. -. -.IP "\(bu" 4 +.IP \(bu 2 rm (user, package): Remove a user from the package owner list\. This immediately revokes their privileges\. -. -.IP "" 0 -. + +.RE .P Note that there is only one level of access\. Either you can modify a package, -or you can\'t\. Future versions may contain more fine\-grained access levels, but +or you can't\. Future versions may contain more fine\-grained access levels, but that is not implemented at this time\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm apihelp publish -. -.IP "\(bu" 4 -npm help registry -. -.IP "" 0 +.IP \(bu 2 +npm help 7 registry + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-pack.3 nodejs-0.11.15/deps/npm/man/man3/npm-pack.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-pack.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-pack.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,28 +1,23 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-PACK" "3" "May 2014" "" "" -. +.TH "NPM\-PACK" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-pack\fR \-\- Create a tarball from a package -. -.SH "SYNOPSIS" -. +\fBnpm-pack\fR \- Create a tarball from a package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.pack([packages,] callback) -. .fi -. -.SH "DESCRIPTION" -For anything that\'s installable (that is, a package folder, tarball, +.RE +.SH DESCRIPTION +.P +For anything that's installable (that is, a package folder, tarball, tarball url, name@tag, name@version, or name), this command will fetch it to the cache, and then copy the tarball to the current working directory as \fB\-\.tgz\fR, and then write the filenames out to stdout\. -. .P If the same package is specified multiple times, then the file will be overwritten the second time\. -. .P If no arguments are supplied, then npm packs the current package folder\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-prefix.3 nodejs-0.11.15/deps/npm/man/man3/npm-prefix.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-prefix.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-prefix.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,24 +1,19 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-PREFIX" "3" "May 2014" "" "" -. +.TH "NPM\-PREFIX" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-prefix\fR \-\- Display prefix -. -.SH "SYNOPSIS" -. +\fBnpm-prefix\fR \- Display prefix +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.prefix(args, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Print the prefix to standard out\. -. .P -\'args\' is never used and callback is never called with data\. -\'args\' must be present or things will break\. -. +\|'args' is never used and callback is never called with data\. +\|'args' must be present or things will break\. .P This function is not useful programmatically + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-prune.3 nodejs-0.11.15/deps/npm/man/man3/npm-prune.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-prune.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-prune.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,27 +1,21 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-PRUNE" "3" "May 2014" "" "" -. +.TH "NPM\-PRUNE" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-prune\fR \-\- Remove extraneous packages -. -.SH "SYNOPSIS" -. +\fBnpm-prune\fR \- Remove extraneous packages +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.prune([packages,] callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command removes "extraneous" packages\. -. .P The first parameter is optional, and it specifies packages to be removed\. -. .P No packages are specified, then all packages will be checked\. -. .P Extraneous packages are packages that are not listed on the parent -package\'s dependencies list\. +package's dependencies list\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-publish.3 nodejs-0.11.15/deps/npm/man/man3/npm-publish.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-publish.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-publish.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,51 +1,41 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-PUBLISH" "3" "May 2014" "" "" -. +.TH "NPM\-PUBLISH" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-publish\fR \-\- Publish a package -. -.SH "SYNOPSIS" -. +\fBnpm-publish\fR \- Publish a package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.publish([packages,] callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Publishes a package to the registry so that it can be installed by name\. -Possible values in the \'packages\' array are: -. -.IP "\(bu" 4 +Possible values in the 'packages' array are: +.RS 0 +.IP \(bu 2 \fB\fR: A folder containing a package\.json file -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\fR: A url or file path to a gzipped tar archive containing a single folder with a package\.json file inside\. -. -.IP "" 0 -. + +.RE .P If the package array is empty, npm will try to publish something in the current working directory\. -. .P This command could fails if one of the packages specified already exists in the registry\. Overwrites when the "force" environment variable is set\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help adduser -. -.IP "\(bu" 4 +.IP \(bu 2 npm apihelp owner -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-rebuild.3 nodejs-0.11.15/deps/npm/man/man3/npm-rebuild.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-rebuild.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-rebuild.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,22 +1,19 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-REBUILD" "3" "May 2014" "" "" -. +.TH "NPM\-REBUILD" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-rebuild\fR \-\- Rebuild a package -. -.SH "SYNOPSIS" -. +\fBnpm-rebuild\fR \- Rebuild a package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.rebuild([packages,] callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command runs the \fBnpm build\fR command on each of the matched packages\. This is useful when you install a new version of node, and must recompile all your C++ addons with -the new binary\. If no \'packages\' parameter is specify, every package will be rebuilt\. -. -.SH "CONFIGURATION" +the new binary\. If no 'packages' parameter is specify, every package will be rebuilt\. +.SH CONFIGURATION +.P See \fBnpm help build\fR + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-repo.3 nodejs-0.11.15/deps/npm/man/man3/npm-repo.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-repo.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-repo.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,28 +1,23 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-REPO" "3" "May 2014" "" "" -. +.TH "NPM\-REPO" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-repo\fR \-\- Open package repository page in the browser -. -.SH "SYNOPSIS" -. +\fBnpm-repo\fR \- Open package repository page in the browser +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.repo(package, callback) -. .fi -. -.SH "DESCRIPTION" -This command tries to guess at the likely location of a package\'s +.RE +.SH DESCRIPTION +.P +This command tries to guess at the likely location of a package's repository URL, and then tries to open it using the \fB\-\-browser\fR config param\. -. .P Like other commands, the first parameter is an array\. This command only uses the first element, which is expected to be a package name with an optional version number\. -. .P This command will launch a browser, so this command may not be the most friendly for programmatic use\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-restart.3 nodejs-0.11.15/deps/npm/man/man3/npm-restart.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-restart.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-restart.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,29 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-RESTART" "3" "May 2014" "" "" -. +.TH "NPM\-RESTART" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-restart\fR \-\- Start a package -. -.SH "SYNOPSIS" -. +\fBnpm-restart\fR \- Start a package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.restart(packages, callback) -. .fi -. -.SH "DESCRIPTION" -This runs a package\'s "restart" script, if one was provided\. -Otherwise it runs package\'s "stop" script, if one was provided, and then +.RE +.SH DESCRIPTION +.P +This runs a package's "restart" script, if one was provided\. +Otherwise it runs package's "stop" script, if one was provided, and then the "start" script\. -. .P If no version is specified, then it restarts the "active" version\. -. .P npm can run tests on multiple packages\. Just specify multiple packages in the \fBpackages\fR parameter\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm apihelp start -. -.IP "\(bu" 4 +.IP \(bu 2 npm apihelp stop -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-root.3 nodejs-0.11.15/deps/npm/man/man3/npm-root.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-root.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-root.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,24 +1,19 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-ROOT" "3" "May 2014" "" "" -. +.TH "NPM\-ROOT" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-root\fR \-\- Display npm root -. -.SH "SYNOPSIS" -. +\fBnpm-root\fR \- Display npm root +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.root(args, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Print the effective \fBnode_modules\fR folder to standard out\. -. .P -\'args\' is never used and callback is never called with data\. -\'args\' must be present or things will break\. -. +\|'args' is never used and callback is never called with data\. +\|'args' must be present or things will break\. .P This function is not useful programmatically\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-run-script.3 nodejs-0.11.15/deps/npm/man/man3/npm-run-script.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-run-script.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-run-script.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,48 +1,37 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-RUN\-SCRIPT" "3" "May 2014" "" "" -. +.TH "NPM\-RUN\-SCRIPT" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-run-script\fR \-\- Run arbitrary package scripts -. -.SH "SYNOPSIS" -. +\fBnpm-run-script\fR \- Run arbitrary package scripts +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.run\-script(args, callback) -. .fi -. -.SH "DESCRIPTION" -This runs an arbitrary command from a package\'s "scripts" object\. -. +.RE +.SH DESCRIPTION +.P +This runs an arbitrary command from a package's "scripts" object\. .P It is used by the test, start, restart, and stop commands, but can be called directly, as well\. -. .P -The \'args\' parameter is an array of strings\. Behavior depends on the number +The 'args' parameter is an array of strings\. Behavior depends on the number of elements\. If there is only one element, npm assumes that the element represents a command to be run on the local repository\. If there is more than one element, then the first is assumed to be the package and the second is assumed to be the command to run\. All other elements are ignored\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help scripts -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 scripts +.IP \(bu 2 npm apihelp test -. -.IP "\(bu" 4 +.IP \(bu 2 npm apihelp start -. -.IP "\(bu" 4 +.IP \(bu 2 npm apihelp restart -. -.IP "\(bu" 4 +.IP \(bu 2 npm apihelp stop -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-search.3 nodejs-0.11.15/deps/npm/man/man3/npm-search.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-search.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-search.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,64 +1,52 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-SEARCH" "3" "May 2014" "" "" -. +.TH "NPM\-SEARCH" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-search\fR \-\- Search for packages -. -.SH "SYNOPSIS" -. +\fBnpm-search\fR \- Search for packages +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.search(searchTerms, [silent,] [staleness,] callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Search the registry for packages matching the search terms\. The available parameters are: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 searchTerms: Array of search terms\. These terms are case\-insensitive\. -. -.IP "\(bu" 4 +.IP \(bu 2 silent: If true, npm will not log anything to the console\. -. -.IP "\(bu" 4 +.IP \(bu 2 staleness: This is the threshold for stale packages\. "Fresh" packages are not refreshed from the registry\. This value is measured in seconds\. -. -.IP "\(bu" 4 +.IP \(bu 2 callback: Returns an object where each key is the name of a package, and the value -is information about that package along with a \'words\' property, which is +is information about that package along with a 'words' property, which is a space\-delimited string of all of the interesting words in that package\. The only properties included are those that are searched, which generally include: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 name -. -.IP "\(bu" 4 +.IP \(bu 2 description -. -.IP "\(bu" 4 +.IP \(bu 2 maintainers -. -.IP "\(bu" 4 +.IP \(bu 2 url -. -.IP "\(bu" 4 +.IP \(bu 2 keywords -. -.IP "" 0 -. -.IP "" 0 -. +.RE + +.RE .P A search on the registry excludes any result that does not match all of the search terms\. It also removes any items from the results that contain an excluded term (the "searchexclude" config)\. The search is case insensitive -and doesn\'t try to read your mind (it doesn\'t do any verb tense matching or the +and doesn't try to read your mind (it doesn't do any verb tense matching or the like)\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-shrinkwrap.3 nodejs-0.11.15/deps/npm/man/man3/npm-shrinkwrap.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-shrinkwrap.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-shrinkwrap.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,30 +1,24 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-SHRINKWRAP" "3" "May 2014" "" "" -. +.TH "NPM\-SHRINKWRAP" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-shrinkwrap\fR \-\- programmatically generate package shrinkwrap file -. -.SH "SYNOPSIS" -. +\fBnpm-shrinkwrap\fR \- programmatically generate package shrinkwrap file +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.shrinkwrap(args, [silent,] callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This acts much the same ways as shrinkwrapping on the command\-line\. -. .P -This command does not take any arguments, but \'args\' must be defined\. +This command does not take any arguments, but 'args' must be defined\. Beyond that, if any arguments are passed in, npm will politely warn that it does not take positional arguments\. -. .P -If the \'silent\' parameter is set to true, nothing will be output to the screen, +If the 'silent' parameter is set to true, nothing will be output to the screen, but the shrinkwrap file will still be written\. -. .P -Finally, \'callback\' is a function that will be called when the shrinkwrap has +Finally, 'callback' is a function that will be called when the shrinkwrap has been saved\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-start.3 nodejs-0.11.15/deps/npm/man/man3/npm-start.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-start.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-start.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,21 +1,17 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-START" "3" "May 2014" "" "" -. +.TH "NPM\-START" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-start\fR \-\- Start a package -. -.SH "SYNOPSIS" -. +\fBnpm-start\fR \- Start a package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.start(packages, callback) -. .fi -. -.SH "DESCRIPTION" -This runs a package\'s "start" script, if one was provided\. -. +.RE +.SH DESCRIPTION +.P +This runs a package's "start" script, if one was provided\. .P npm can run tests on multiple packages\. Just specify multiple packages in the \fBpackages\fR parameter\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-stop.3 nodejs-0.11.15/deps/npm/man/man3/npm-stop.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-stop.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-stop.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,21 +1,17 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-STOP" "3" "May 2014" "" "" -. +.TH "NPM\-STOP" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-stop\fR \-\- Stop a package -. -.SH "SYNOPSIS" -. +\fBnpm-stop\fR \- Stop a package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.stop(packages, callback) -. .fi -. -.SH "DESCRIPTION" -This runs a package\'s "stop" script, if one was provided\. -. +.RE +.SH DESCRIPTION +.P +This runs a package's "stop" script, if one was provided\. .P npm can run stop on multiple packages\. Just specify multiple packages in the \fBpackages\fR parameter\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-submodule.3 nodejs-0.11.15/deps/npm/man/man3/npm-submodule.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-submodule.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-submodule.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,35 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-SUBMODULE" "3" "May 2014" "" "" -. +.TH "NPM\-SUBMODULE" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-submodule\fR \-\- Add a package as a git submodule -. -.SH "SYNOPSIS" -. +\fBnpm-submodule\fR \- Add a package as a git submodule +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.submodule(packages, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P For each package specified, npm will check if it has a git repository url -in its package\.json description then add it as a git submodule at \fBnode_modules/\fR\|\. -. +in its package\.json description then add it as a git submodule at +\fBnode_modules/\fR\|\. .P -This is a convenience only\. From then on, it\'s up to you to manage +This is a convenience only\. From then on, it's up to you to manage updates by using the appropriate git commands\. npm will stubbornly refuse to update, modify, or remove anything with a \fB\|\.git\fR subfolder in it\. -. .P This command also does not install missing dependencies, if the package does not include them in its git repository\. If \fBnpm ls\fR reports that things are missing, you can either install, link, or submodule them yourself, or you can do \fBnpm explore \-\- npm install\fR to install the dependencies into the submodule folder\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help json -. -.IP "\(bu" 4 +.IP \(bu 2 git help submodule -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-tag.3 nodejs-0.11.15/deps/npm/man/man3/npm-tag.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-tag.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-tag.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,27 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-TAG" "3" "May 2014" "" "" -. +.TH "NPM\-TAG" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-tag\fR \-\- Tag a published version -. -.SH "SYNOPSIS" -. +\fBnpm-tag\fR \- Tag a published version +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.tag(package@version, tag, callback) -. .fi -. -.SH "DESCRIPTION" -Tags the specified version of the package with the specified tag, or the \fB\-\-tag\fR config if not specified\. -. +.RE +.SH DESCRIPTION +.P +Tags the specified version of the package with the specified tag, or the +\fB\-\-tag\fR config if not specified\. .P -The \'package@version\' is an array of strings, but only the first two elements are +The 'package@version' is an array of strings, but only the first two elements are currently used\. -. .P The first element must be in the form package@version, where package is the package name and version is the version number (much like installing a specific version)\. -. .P The second element is the name of the tag to tag this version with\. If this parameter is missing or falsey (empty), the default froom the config will be -used\. For more information about how to set this config, check \fBman 3 npm\-config\fR for programmatic usage or \fBman npm\-config\fR for cli usage\. +used\. For more information about how to set this config, check +\fBman 3 npm\-config\fR for programmatic usage or \fBman npm\-config\fR for cli usage\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-test.3 nodejs-0.11.15/deps/npm/man/man3/npm-test.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-test.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-test.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,25 +1,20 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-TEST" "3" "May 2014" "" "" -. +.TH "NPM\-TEST" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-test\fR \-\- Test a package -. -.SH "SYNOPSIS" -. +\fBnpm-test\fR \- Test a package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.test(packages, callback) -. .fi -. -.SH "DESCRIPTION" -This runs a package\'s "test" script, if one was provided\. -. +.RE +.SH DESCRIPTION +.P +This runs a package's "test" script, if one was provided\. .P To run tests as a condition of installation, set the \fBnpat\fR config to true\. -. .P npm can run tests on multiple packages\. Just specify multiple packages in the \fBpackages\fR parameter\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-uninstall.3 nodejs-0.11.15/deps/npm/man/man3/npm-uninstall.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-uninstall.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-uninstall.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,25 +1,20 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-UNINSTALL" "3" "May 2014" "" "" -. +.TH "NPM\-UNINSTALL" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-uninstall\fR \-\- uninstall a package programmatically -. -.SH "SYNOPSIS" -. +\fBnpm-uninstall\fR \- uninstall a package programmatically +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.uninstall(packages, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This acts much the same ways as uninstalling on the command\-line\. -. .P -The \'packages\' parameter is an array of strings\. Each element in the array is +The 'packages' parameter is an array of strings\. Each element in the array is the name of a package to be uninstalled\. -. .P -Finally, \'callback\' is a function that will be called when all packages have been +Finally, 'callback' is a function that will be called when all packages have been uninstalled or when an error has been encountered\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-unpublish.3 nodejs-0.11.15/deps/npm/man/man3/npm-unpublish.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-unpublish.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-unpublish.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,30 +1,24 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-UNPUBLISH" "3" "May 2014" "" "" -. +.TH "NPM\-UNPUBLISH" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-unpublish\fR \-\- Remove a package from the registry -. -.SH "SYNOPSIS" -. +\fBnpm-unpublish\fR \- Remove a package from the registry +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.unpublish(package, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This removes a package version from the registry, deleting its entry and removing the tarball\. -. .P The package parameter must be defined\. -. .P Only the first element in the package parameter is used\. If there is no first element, then npm assumes that the package at the current working directory is what is meant\. -. .P If no version is specified, or if all versions are removed then the root package entry is removed from the registry entirely\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-update.3 nodejs-0.11.15/deps/npm/man/man3/npm-update.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-update.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-update.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,18 +1,18 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-UPDATE" "3" "May 2014" "" "" -. +.TH "NPM\-UPDATE" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-update\fR \-\- Update a package -. -.SH "SYNOPSIS" -. +\fBnpm-update\fR \- Update a package +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.update(packages, callback) -. .fi +.RE +.TH "DESCRIPTION" "" "October 2014" "" "" +.SH "NAME" +\fBDESCRIPTION\fR +.P Updates a package, upgrading it to the latest version\. It also installs any missing packages\. -. .P -The \'packages\' argument is an array of packages to update\. The \'callback\' parameter will be called when done or when an error occurs\. +The 'packages' argument is an array of packages to update\. The 'callback' parameter will be called when done or when an error occurs\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-version.3 nodejs-0.11.15/deps/npm/man/man3/npm-version.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-version.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-version.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,27 +1,22 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-VERSION" "3" "May 2014" "" "" -. +.TH "NPM\-VERSION" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-version\fR \-\- Bump a package version -. -.SH "SYNOPSIS" -. +\fBnpm-version\fR \- Bump a package version +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.version(newversion, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Run this in a package directory to bump the version and write the new data back to the package\.json file\. -. .P If run in a git repo, it will also create a version commit and tag, and fail if the repo is not clean\. -. .P Like all other commands, this function takes a string array as its first parameter\. The difference, however, is this function will fail if it does not have exactly one element\. The only element should be a version number\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-view.3 nodejs-0.11.15/deps/npm/man/man3/npm-view.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-view.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-view.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,176 +1,131 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-VIEW" "3" "May 2014" "" "" -. +.TH "NPM\-VIEW" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-view\fR \-\- View registry info -. -.SH "SYNOPSIS" -. +\fBnpm-view\fR \- View registry info +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.view(args, [silent,] callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P This command shows data about a package and prints it to the stream referenced by the \fBoutfd\fR config, which defaults to stdout\. -. .P The "args" parameter is an ordered list that closely resembles the command\-line usage\. The elements should be ordered such that the first element is the package and version (package@version)\. The version is optional\. After that, the rest of the parameters are fields with optional subfields ("field\.subfield") which can be used to get only the information desired from the registry\. -. .P The callback will be passed all of the data returned by the query\. -. .P For example, to get the package registry entry for the \fBconnect\fR package, you can do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm\.commands\.view(["connect"], callback) -. .fi -. -.IP "" 0 -. +.RE .P If no version is specified, "latest" is assumed\. -. .P Field names can be specified after the package descriptor\. For example, to show the dependencies of the \fBronn\fR package at version 0\.3\.5, you could do the following: -. -.IP "" 4 -. +.P +.RS 2 .nf npm\.commands\.view(["ronn@0\.3\.5", "dependencies"], callback) -. .fi -. -.IP "" 0 -. +.RE .P You can view child field by separating them with a period\. To view the git repository URL for the latest version of npm, you could do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm\.commands\.view(["npm", "repository\.url"], callback) -. .fi -. -.IP "" 0 -. +.RE .P For fields that are arrays, requesting a non\-numeric field will return all of the values from the objects in the list\. For example, to get all the contributor names for the "express" project, you can do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm\.commands\.view(["express", "contributors\.email"], callback) -. .fi -. -.IP "" 0 -. +.RE .P You may also use numeric indices in square braces to specifically select an item in an array field\. To just get the email address of the first contributor in the list, you can do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm\.commands\.view(["express", "contributors[0]\.email"], callback) -. .fi -. -.IP "" 0 -. +.RE .P Multiple fields may be specified, and will be printed one after another\. For exampls, to get all the contributor names and email addresses, you can do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm\.commands\.view(["express", "contributors\.name", "contributors\.email"], callback) -. .fi -. -.IP "" 0 -. +.RE .P "Person" fields are shown as a string if they would be shown as an object\. So, for example, this will show the list of npm contributors in the shortened string format\. (See \fBnpm help json\fR for more on this\.) -. -.IP "" 4 -. +.P +.RS 2 .nf npm\.commands\.view(["npm", "contributors"], callback) -. .fi -. -.IP "" 0 -. +.RE .P If a version range is provided, then data will be printed for every matching version of the package\. This will show which version of jsdom was required by each matching version of yui3: -. -.IP "" 4 -. -.nf -npm\.commands\.view(["yui3@\'>0\.5\.4\'", "dependencies\.jsdom"], callback) -. -.fi -. -.IP "" 0 -. -.SH "OUTPUT" +.P +.RS 2 +.nf +npm\.commands\.view(["yui3@'>0\.5\.4'", "dependencies\.jsdom"], callback) +.fi +.RE +.SH OUTPUT +.P If only a single string field for a single version is output, then it will not be colorized or quoted, so as to enable piping the output to another command\. -. .P If the version range matches multiple versions, than each printed value will be prefixed with the version it applies to\. -. .P If multiple fields are requested, than each of them are prefixed with the field name\. -. .P -Console output can be disabled by setting the \'silent\' parameter to true\. -. -.SH "RETURN VALUE" +Console output can be disabled by setting the 'silent' parameter to true\. +.SH RETURN VALUE +.P The data returned will be an object in this formation: -. -.IP "" 4 -. +.P +.RS 2 .nf { : { : , \.\.\. } , \.\.\. } -. .fi -. -.IP "" 0 -. +.RE .P corresponding to the list of fields selected\. + diff -Nru nodejs-0.11.13/deps/npm/man/man3/npm-whoami.3 nodejs-0.11.15/deps/npm/man/man3/npm-whoami.3 --- nodejs-0.11.13/deps/npm/man/man3/npm-whoami.3 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man3/npm-whoami.3 2015-01-20 21:22:17.000000000 +0000 @@ -1,24 +1,19 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-WHOAMI" "3" "May 2014" "" "" -. +.TH "NPM\-WHOAMI" "3" "October 2014" "" "" .SH "NAME" -\fBnpm-whoami\fR \-\- Display npm username -. -.SH "SYNOPSIS" -. +\fBnpm-whoami\fR \- Display npm username +.SH SYNOPSIS +.P +.RS 2 .nf npm\.commands\.whoami(args, callback) -. .fi -. -.SH "DESCRIPTION" +.RE +.SH DESCRIPTION +.P Print the \fBusername\fR config to standard output\. -. .P -\'args\' is never used and callback is never called with data\. -\'args\' must be present or things will break\. -. +\|'args' is never used and callback is never called with data\. +\|'args' must be present or things will break\. .P This function is not useful programmatically + diff -Nru nodejs-0.11.13/deps/npm/man/man5/npm-folders.5 nodejs-0.11.15/deps/npm/man/man5/npm-folders.5 --- nodejs-0.11.13/deps/npm/man/man5/npm-folders.5 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man5/npm-folders.5 2015-01-20 21:22:17.000000000 +0000 @@ -1,141 +1,132 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-FOLDERS" "5" "May 2014" "" "" -. +.TH "NPM\-FOLDERS" "5" "October 2014" "" "" .SH "NAME" -\fBnpm-folders\fR \-\- Folder Structures Used by npm -. -.SH "DESCRIPTION" -npm puts various things on your computer\. That\'s its job\. -. +\fBnpm-folders\fR \- Folder Structures Used by npm +.SH DESCRIPTION +.P +npm puts various things on your computer\. That's its job\. .P This document will tell you what it puts where\. -. -.SS "tl;dr" -. -.IP "\(bu" 4 +.SS tl;dr +.RS 0 +.IP \(bu 2 Local install (default): puts stuff in \fB\|\./node_modules\fR of the current package root\. -. -.IP "\(bu" 4 +.IP \(bu 2 Global install (with \fB\-g\fR): puts stuff in /usr/local or wherever node is installed\. -. -.IP "\(bu" 4 -Install it \fBlocally\fR if you\'re going to \fBrequire()\fR it\. -. -.IP "\(bu" 4 -Install it \fBglobally\fR if you\'re going to run it on the command line\. -. -.IP "\(bu" 4 +.IP \(bu 2 +Install it \fBlocally\fR if you're going to \fBrequire()\fR it\. +.IP \(bu 2 +Install it \fBglobally\fR if you're going to run it on the command line\. +.IP \(bu 2 If you need both, then install it in both places, or use \fBnpm link\fR\|\. -. -.IP "" 0 -. -.SS "prefix Configuration" + +.RE +.SS prefix Configuration +.P The \fBprefix\fR config defaults to the location where node is installed\. On most systems, this is \fB/usr/local\fR, and most of the time is the same -as node\'s \fBprocess\.installPrefix\fR\|\. -. +as node's \fBprocess\.installPrefix\fR\|\. .P On windows, this is the exact location of the node\.exe binary\. On Unix -systems, it\'s one level up, since node is typically installed at \fB{prefix}/bin/node\fR rather than \fB{prefix}/node\.exe\fR\|\. -. +systems, it's one level up, since node is typically installed at +\fB{prefix}/bin/node\fR rather than \fB{prefix}/node\.exe\fR\|\. .P When the \fBglobal\fR flag is set, npm installs things into this prefix\. When it is not set, it uses the root of the current package, or the current working directory if not in a package already\. -. -.SS "Node Modules" +.SS Node Modules +.P Packages are dropped into the \fBnode_modules\fR folder under the \fBprefix\fR\|\. -When installing locally, this means that you can \fBrequire("packagename")\fR to load its main module, or \fBrequire("packagename/lib/path/to/sub/module")\fR to load other modules\. -. +When installing locally, this means that you can +\fBrequire("packagename")\fR to load its main module, or +\fBrequire("packagename/lib/path/to/sub/module")\fR to load other modules\. .P Global installs on Unix systems go to \fB{prefix}/lib/node_modules\fR\|\. -Global installs on Windows go to \fB{prefix}/node_modules\fR (that is, no \fBlib\fR folder\.) -. +Global installs on Windows go to \fB{prefix}/node_modules\fR (that is, no +\fBlib\fR folder\.) +.P +Scoped packages are installed the same way, except they are grouped together +in a sub\-folder of the relevant \fBnode_modules\fR folder with the name of that +scope prefix by the @ symbol, e\.g\. \fBnpm install @myorg/package\fR would place +the package in \fB{prefix}/node_modules/@myorg/package\fR\|\. See npm help 7 \fBscopes\fR for +more details\. .P If you wish to \fBrequire()\fR a package, then install it locally\. -. -.SS "Executables" +.SS Executables +.P When in global mode, executables are linked into \fB{prefix}/bin\fR on Unix, or directly into \fB{prefix}\fR on Windows\. -. .P -When in local mode, executables are linked into \fB\|\./node_modules/\.bin\fR so that they can be made available to scripts run +When in local mode, executables are linked into +\fB\|\./node_modules/\.bin\fR so that they can be made available to scripts run through npm\. (For example, so that a test runner will be in the path when you run \fBnpm test\fR\|\.) -. -.SS "Man Pages" +.SS Man Pages +.P When in global mode, man pages are linked into \fB{prefix}/share/man\fR\|\. -. .P When in local mode, man pages are not installed\. -. .P Man pages are not installed on Windows systems\. -. -.SS "Cache" -npm help See \fBnpm\-cache\fR\|\. Cache files are stored in \fB~/\.npm\fR on Posix, or \fB~/npm\-cache\fR on Windows\. -. +.SS Cache +.P +See npm help \fBnpm\-cache\fR\|\. Cache files are stored in \fB~/\.npm\fR on Posix, or +\fB~/npm\-cache\fR on Windows\. .P This is controlled by the \fBcache\fR configuration param\. -. -.SS "Temp Files" -Temporary files are stored by default in the folder specified by the \fBtmp\fR config, which defaults to the TMPDIR, TMP, or TEMP environment +.SS Temp Files +.P +Temporary files are stored by default in the folder specified by the +\fBtmp\fR config, which defaults to the TMPDIR, TMP, or TEMP environment variables, or \fB/tmp\fR on Unix and \fBc:\\windows\\temp\fR on Windows\. -. .P Temp files are given a unique folder under this root for each run of the program, and are deleted upon successful exit\. -. -.SH "More Information" -When installing locally, npm first tries to find an appropriate \fBprefix\fR folder\. This is so that \fBnpm install foo@1\.2\.3\fR will install +.SH More Information +.P +When installing locally, npm first tries to find an appropriate +\fBprefix\fR folder\. This is so that \fBnpm install foo@1\.2\.3\fR will install to the sensible root of your package, even if you happen to have \fBcd\fRed into some other folder\. -. .P Starting at the $PWD, npm will walk up the folder tree checking for a folder that contains either a \fBpackage\.json\fR file, or a \fBnode_modules\fR folder\. If such a thing is found, then that is treated as the effective "current directory" for the purpose of running npm commands\. (This -behavior is inspired by and similar to git\'s \.git\-folder seeking +behavior is inspired by and similar to git's \.git\-folder seeking logic when running git commands in a working dir\.) -. .P If no package root is found, then the current folder is used\. -. .P When you run \fBnpm install foo@1\.2\.3\fR, then the package is loaded into the cache, and then unpacked into \fB\|\./node_modules/foo\fR\|\. Then, any of -foo\'s dependencies are similarly unpacked into \fB\|\./node_modules/foo/node_modules/\.\.\.\fR\|\. -. +foo's dependencies are similarly unpacked into +\fB\|\./node_modules/foo/node_modules/\.\.\.\fR\|\. .P Any bin files are symlinked to \fB\|\./node_modules/\.bin/\fR, so that they may be found by npm scripts when necessary\. -. -.SS "Global Installation" +.SS Global Installation +.P If the \fBglobal\fR configuration is set to true, then npm will install packages "globally"\. -. .P For global installation, packages are installed roughly the same way, but using the folders described above\. -. -.SS "Cycles, Conflicts, and Folder Parsimony" -Cycles are handled using the property of node\'s module system that it +.SS Cycles, Conflicts, and Folder Parsimony +.P +Cycles are handled using the property of node's module system that it walks up the directories looking for \fBnode_modules\fR folders\. So, at every stage, if a package is already installed in an ancestor \fBnode_modules\fR folder, then it is not installed at the current location\. -. .P Consider the case above, where \fBfoo \-> bar \-> baz\fR\|\. Imagine if, in -addition to that, baz depended on bar, so you\'d have: \fBfoo \-> bar \-> baz \-> bar \-> baz \.\.\.\fR\|\. However, since the folder -structure is: \fBfoo/node_modules/bar/node_modules/baz\fR, there\'s no need to +addition to that, baz depended on bar, so you'd have: +\fBfoo \-> bar \-> baz \-> bar \-> baz \.\.\.\fR\|\. However, since the folder +structure is: \fBfoo/node_modules/bar/node_modules/baz\fR, there's no need to put another copy of bar into \fB\|\.\.\./baz/node_modules\fR, since when it calls -require("bar"), it will get the copy that is installed in \fBfoo/node_modules/bar\fR\|\. -. +require("bar"), it will get the copy that is installed in +\fBfoo/node_modules/bar\fR\|\. .P This shortcut is only used if the exact same version would be installed in multiple nested \fBnode_modules\fR folders\. It @@ -143,16 +134,14 @@ "a" packages are different versions\. However, without repeating the exact same package multiple times, an infinite regress will always be prevented\. -. .P Another optimization can be made by installing dependencies at the highest level possible, below the localized "target" folder\. -. -.SS "\fIExample\fR" +.SS Example +.P Consider this dependency graph: -. -.IP "" 4 -. +.P +.RS 2 .nf foo +\-\- blerg@1\.2\.5 @@ -165,16 +154,12 @@ `\-\- baz@1\.2\.3 `\-\- quux@3\.x `\-\- bar -. .fi -. -.IP "" 0 -. +.RE .P In this case, we might expect a folder structure like this: -. -.IP "" 4 -. +.P +.RS 2 .nf foo +\-\- node_modules @@ -188,77 +173,59 @@ `\-\- baz (1\.2\.3) <\-\-\-[D] `\-\- node_modules `\-\- quux (3\.2\.0) <\-\-\-[E] -. .fi -. -.IP "" 0 -. +.RE .P Since foo depends directly on \fBbar@1\.2\.3\fR and \fBbaz@1\.2\.3\fR, those are -installed in foo\'s \fBnode_modules\fR folder\. -. +installed in foo's \fBnode_modules\fR folder\. .P Even though the latest copy of blerg is 1\.3\.7, foo has a specific dependency on version 1\.2\.5\. So, that gets installed at [A]\. Since the -parent installation of blerg satisfies bar\'s dependency on \fBblerg@1\.x\fR, +parent installation of blerg satisfies bar's dependency on \fBblerg@1\.x\fR, it does not install another copy under [B]\. -. .P Bar [B] also has dependencies on baz and asdf, so those are installed in -bar\'s \fBnode_modules\fR folder\. Because it depends on \fBbaz@2\.x\fR, it cannot +bar's \fBnode_modules\fR folder\. Because it depends on \fBbaz@2\.x\fR, it cannot re\-use the \fBbaz@1\.2\.3\fR installed in the parent \fBnode_modules\fR folder [D], and must install its own copy [C]\. -. .P Underneath bar, the \fBbaz \-> quux \-> bar\fR dependency creates a cycle\. -However, because bar is already in quux\'s ancestry [B], it does not +However, because bar is already in quux's ancestry [B], it does not unpack another copy of bar into that folder\. -. .P -Underneath \fBfoo \-> baz\fR [D], quux\'s [E] folder tree is empty, because its +Underneath \fBfoo \-> baz\fR [D], quux's [E] folder tree is empty, because its dependency on bar is satisfied by the parent folder copy installed at [B]\. -. .P For a graphical breakdown of what is installed where, use \fBnpm ls\fR\|\. -. -.SS "Publishing" +.SS Publishing +.P Upon publishing, npm will look in the \fBnode_modules\fR folder\. If any of the items there are not in the \fBbundledDependencies\fR array, then they will not be included in the package tarball\. -. .P This allows a package maintainer to install all of their dependencies (and dev dependencies) locally, but only re\-publish those items that -npm help cannot be found elsewhere\. See \fBpackage\.json\fR for more information\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 +cannot be found elsewhere\. See npm help 5 \fBpackage\.json\fR for more information\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help pack -. -.IP "\(bu" 4 +.IP \(bu 2 npm help cache -. -.IP "\(bu" 4 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 npm help publish -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man5/npm-global.5 nodejs-0.11.15/deps/npm/man/man5/npm-global.5 --- nodejs-0.11.13/deps/npm/man/man5/npm-global.5 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man5/npm-global.5 2015-01-20 21:22:17.000000000 +0000 @@ -1,141 +1,132 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-FOLDERS" "5" "May 2014" "" "" -. +.TH "NPM\-FOLDERS" "5" "October 2014" "" "" .SH "NAME" -\fBnpm-folders\fR \-\- Folder Structures Used by npm -. -.SH "DESCRIPTION" -npm puts various things on your computer\. That\'s its job\. -. +\fBnpm-folders\fR \- Folder Structures Used by npm +.SH DESCRIPTION +.P +npm puts various things on your computer\. That's its job\. .P This document will tell you what it puts where\. -. -.SS "tl;dr" -. -.IP "\(bu" 4 +.SS tl;dr +.RS 0 +.IP \(bu 2 Local install (default): puts stuff in \fB\|\./node_modules\fR of the current package root\. -. -.IP "\(bu" 4 +.IP \(bu 2 Global install (with \fB\-g\fR): puts stuff in /usr/local or wherever node is installed\. -. -.IP "\(bu" 4 -Install it \fBlocally\fR if you\'re going to \fBrequire()\fR it\. -. -.IP "\(bu" 4 -Install it \fBglobally\fR if you\'re going to run it on the command line\. -. -.IP "\(bu" 4 +.IP \(bu 2 +Install it \fBlocally\fR if you're going to \fBrequire()\fR it\. +.IP \(bu 2 +Install it \fBglobally\fR if you're going to run it on the command line\. +.IP \(bu 2 If you need both, then install it in both places, or use \fBnpm link\fR\|\. -. -.IP "" 0 -. -.SS "prefix Configuration" + +.RE +.SS prefix Configuration +.P The \fBprefix\fR config defaults to the location where node is installed\. On most systems, this is \fB/usr/local\fR, and most of the time is the same -as node\'s \fBprocess\.installPrefix\fR\|\. -. +as node's \fBprocess\.installPrefix\fR\|\. .P On windows, this is the exact location of the node\.exe binary\. On Unix -systems, it\'s one level up, since node is typically installed at \fB{prefix}/bin/node\fR rather than \fB{prefix}/node\.exe\fR\|\. -. +systems, it's one level up, since node is typically installed at +\fB{prefix}/bin/node\fR rather than \fB{prefix}/node\.exe\fR\|\. .P When the \fBglobal\fR flag is set, npm installs things into this prefix\. When it is not set, it uses the root of the current package, or the current working directory if not in a package already\. -. -.SS "Node Modules" +.SS Node Modules +.P Packages are dropped into the \fBnode_modules\fR folder under the \fBprefix\fR\|\. -When installing locally, this means that you can \fBrequire("packagename")\fR to load its main module, or \fBrequire("packagename/lib/path/to/sub/module")\fR to load other modules\. -. +When installing locally, this means that you can +\fBrequire("packagename")\fR to load its main module, or +\fBrequire("packagename/lib/path/to/sub/module")\fR to load other modules\. .P Global installs on Unix systems go to \fB{prefix}/lib/node_modules\fR\|\. -Global installs on Windows go to \fB{prefix}/node_modules\fR (that is, no \fBlib\fR folder\.) -. +Global installs on Windows go to \fB{prefix}/node_modules\fR (that is, no +\fBlib\fR folder\.) +.P +Scoped packages are installed the same way, except they are grouped together +in a sub\-folder of the relevant \fBnode_modules\fR folder with the name of that +scope prefix by the @ symbol, e\.g\. \fBnpm install @myorg/package\fR would place +the package in \fB{prefix}/node_modules/@myorg/package\fR\|\. See npm help 7 \fBscopes\fR for +more details\. .P If you wish to \fBrequire()\fR a package, then install it locally\. -. -.SS "Executables" +.SS Executables +.P When in global mode, executables are linked into \fB{prefix}/bin\fR on Unix, or directly into \fB{prefix}\fR on Windows\. -. .P -When in local mode, executables are linked into \fB\|\./node_modules/\.bin\fR so that they can be made available to scripts run +When in local mode, executables are linked into +\fB\|\./node_modules/\.bin\fR so that they can be made available to scripts run through npm\. (For example, so that a test runner will be in the path when you run \fBnpm test\fR\|\.) -. -.SS "Man Pages" +.SS Man Pages +.P When in global mode, man pages are linked into \fB{prefix}/share/man\fR\|\. -. .P When in local mode, man pages are not installed\. -. .P Man pages are not installed on Windows systems\. -. -.SS "Cache" -npm help See \fBnpm\-cache\fR\|\. Cache files are stored in \fB~/\.npm\fR on Posix, or \fB~/npm\-cache\fR on Windows\. -. +.SS Cache +.P +See npm help \fBnpm\-cache\fR\|\. Cache files are stored in \fB~/\.npm\fR on Posix, or +\fB~/npm\-cache\fR on Windows\. .P This is controlled by the \fBcache\fR configuration param\. -. -.SS "Temp Files" -Temporary files are stored by default in the folder specified by the \fBtmp\fR config, which defaults to the TMPDIR, TMP, or TEMP environment +.SS Temp Files +.P +Temporary files are stored by default in the folder specified by the +\fBtmp\fR config, which defaults to the TMPDIR, TMP, or TEMP environment variables, or \fB/tmp\fR on Unix and \fBc:\\windows\\temp\fR on Windows\. -. .P Temp files are given a unique folder under this root for each run of the program, and are deleted upon successful exit\. -. -.SH "More Information" -When installing locally, npm first tries to find an appropriate \fBprefix\fR folder\. This is so that \fBnpm install foo@1\.2\.3\fR will install +.SH More Information +.P +When installing locally, npm first tries to find an appropriate +\fBprefix\fR folder\. This is so that \fBnpm install foo@1\.2\.3\fR will install to the sensible root of your package, even if you happen to have \fBcd\fRed into some other folder\. -. .P Starting at the $PWD, npm will walk up the folder tree checking for a folder that contains either a \fBpackage\.json\fR file, or a \fBnode_modules\fR folder\. If such a thing is found, then that is treated as the effective "current directory" for the purpose of running npm commands\. (This -behavior is inspired by and similar to git\'s \.git\-folder seeking +behavior is inspired by and similar to git's \.git\-folder seeking logic when running git commands in a working dir\.) -. .P If no package root is found, then the current folder is used\. -. .P When you run \fBnpm install foo@1\.2\.3\fR, then the package is loaded into the cache, and then unpacked into \fB\|\./node_modules/foo\fR\|\. Then, any of -foo\'s dependencies are similarly unpacked into \fB\|\./node_modules/foo/node_modules/\.\.\.\fR\|\. -. +foo's dependencies are similarly unpacked into +\fB\|\./node_modules/foo/node_modules/\.\.\.\fR\|\. .P Any bin files are symlinked to \fB\|\./node_modules/\.bin/\fR, so that they may be found by npm scripts when necessary\. -. -.SS "Global Installation" +.SS Global Installation +.P If the \fBglobal\fR configuration is set to true, then npm will install packages "globally"\. -. .P For global installation, packages are installed roughly the same way, but using the folders described above\. -. -.SS "Cycles, Conflicts, and Folder Parsimony" -Cycles are handled using the property of node\'s module system that it +.SS Cycles, Conflicts, and Folder Parsimony +.P +Cycles are handled using the property of node's module system that it walks up the directories looking for \fBnode_modules\fR folders\. So, at every stage, if a package is already installed in an ancestor \fBnode_modules\fR folder, then it is not installed at the current location\. -. .P Consider the case above, where \fBfoo \-> bar \-> baz\fR\|\. Imagine if, in -addition to that, baz depended on bar, so you\'d have: \fBfoo \-> bar \-> baz \-> bar \-> baz \.\.\.\fR\|\. However, since the folder -structure is: \fBfoo/node_modules/bar/node_modules/baz\fR, there\'s no need to +addition to that, baz depended on bar, so you'd have: +\fBfoo \-> bar \-> baz \-> bar \-> baz \.\.\.\fR\|\. However, since the folder +structure is: \fBfoo/node_modules/bar/node_modules/baz\fR, there's no need to put another copy of bar into \fB\|\.\.\./baz/node_modules\fR, since when it calls -require("bar"), it will get the copy that is installed in \fBfoo/node_modules/bar\fR\|\. -. +require("bar"), it will get the copy that is installed in +\fBfoo/node_modules/bar\fR\|\. .P This shortcut is only used if the exact same version would be installed in multiple nested \fBnode_modules\fR folders\. It @@ -143,16 +134,14 @@ "a" packages are different versions\. However, without repeating the exact same package multiple times, an infinite regress will always be prevented\. -. .P Another optimization can be made by installing dependencies at the highest level possible, below the localized "target" folder\. -. -.SS "\fIExample\fR" +.SS Example +.P Consider this dependency graph: -. -.IP "" 4 -. +.P +.RS 2 .nf foo +\-\- blerg@1\.2\.5 @@ -165,16 +154,12 @@ `\-\- baz@1\.2\.3 `\-\- quux@3\.x `\-\- bar -. .fi -. -.IP "" 0 -. +.RE .P In this case, we might expect a folder structure like this: -. -.IP "" 4 -. +.P +.RS 2 .nf foo +\-\- node_modules @@ -188,77 +173,59 @@ `\-\- baz (1\.2\.3) <\-\-\-[D] `\-\- node_modules `\-\- quux (3\.2\.0) <\-\-\-[E] -. .fi -. -.IP "" 0 -. +.RE .P Since foo depends directly on \fBbar@1\.2\.3\fR and \fBbaz@1\.2\.3\fR, those are -installed in foo\'s \fBnode_modules\fR folder\. -. +installed in foo's \fBnode_modules\fR folder\. .P Even though the latest copy of blerg is 1\.3\.7, foo has a specific dependency on version 1\.2\.5\. So, that gets installed at [A]\. Since the -parent installation of blerg satisfies bar\'s dependency on \fBblerg@1\.x\fR, +parent installation of blerg satisfies bar's dependency on \fBblerg@1\.x\fR, it does not install another copy under [B]\. -. .P Bar [B] also has dependencies on baz and asdf, so those are installed in -bar\'s \fBnode_modules\fR folder\. Because it depends on \fBbaz@2\.x\fR, it cannot +bar's \fBnode_modules\fR folder\. Because it depends on \fBbaz@2\.x\fR, it cannot re\-use the \fBbaz@1\.2\.3\fR installed in the parent \fBnode_modules\fR folder [D], and must install its own copy [C]\. -. .P Underneath bar, the \fBbaz \-> quux \-> bar\fR dependency creates a cycle\. -However, because bar is already in quux\'s ancestry [B], it does not +However, because bar is already in quux's ancestry [B], it does not unpack another copy of bar into that folder\. -. .P -Underneath \fBfoo \-> baz\fR [D], quux\'s [E] folder tree is empty, because its +Underneath \fBfoo \-> baz\fR [D], quux's [E] folder tree is empty, because its dependency on bar is satisfied by the parent folder copy installed at [B]\. -. .P For a graphical breakdown of what is installed where, use \fBnpm ls\fR\|\. -. -.SS "Publishing" +.SS Publishing +.P Upon publishing, npm will look in the \fBnode_modules\fR folder\. If any of the items there are not in the \fBbundledDependencies\fR array, then they will not be included in the package tarball\. -. .P This allows a package maintainer to install all of their dependencies (and dev dependencies) locally, but only re\-publish those items that -npm help cannot be found elsewhere\. See \fBpackage\.json\fR for more information\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 +cannot be found elsewhere\. See npm help 5 \fBpackage\.json\fR for more information\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help pack -. -.IP "\(bu" 4 +.IP \(bu 2 npm help cache -. -.IP "\(bu" 4 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 npm help publish -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man5/npm-json.5 nodejs-0.11.15/deps/npm/man/man5/npm-json.5 --- nodejs-0.11.13/deps/npm/man/man5/npm-json.5 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man5/npm-json.5 2015-01-20 21:22:17.000000000 +0000 @@ -1,253 +1,209 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "PACKAGE\.JSON" "5" "May 2014" "" "" -. +.TH "PACKAGE\.JSON" "5" "October 2014" "" "" .SH "NAME" -\fBpackage.json\fR \-\- Specifics of npm\'s package\.json handling -. -.SH "DESCRIPTION" -This document is all you need to know about what\'s required in your package\.json +\fBpackage.json\fR \- Specifics of npm's package\.json handling +.SH DESCRIPTION +.P +This document is all you need to know about what's required in your package\.json file\. It must be actual JSON, not just a JavaScript object literal\. -. .P A lot of the behavior described in this document is affected by the config -npm help settings described in \fBnpm\-config\fR\|\. -. -.SH "name" +settings described in npm help 7 \fBnpm\-config\fR\|\. +.SH name +.P The \fImost\fR important things in your package\.json are the name and version fields\. -Those are actually required, and your package won\'t install without +Those are actually required, and your package won't install without them\. The name and version together form an identifier that is assumed to be completely unique\. Changes to the package should come along with changes to the version\. -. .P The name is what your thing is called\. Some tips: -. -.IP "\(bu" 4 -Don\'t put "js" or "node" in the name\. It\'s assumed that it\'s js, since you\'re +.RS 0 +.IP \(bu 2 +Don't put "js" or "node" in the name\. It's assumed that it's js, since you're writing a package\.json file, and you can specify the engine using the "engines" field\. (See below\.) -. -.IP "\(bu" 4 +.IP \(bu 2 The name ends up being part of a URL, an argument on the command line, and a folder name\. Any name with non\-url\-safe characters will be rejected\. -Also, it can\'t start with a dot or an underscore\. -. -.IP "\(bu" 4 +Also, it can't start with a dot or an underscore\. +.IP \(bu 2 The name will probably be passed as an argument to require(), so it should be something short, but also reasonably descriptive\. -. -.IP "\(bu" 4 -You may want to check the npm registry to see if there\'s something by that name +.IP \(bu 2 +You may want to check the npm registry to see if there's something by that name already, before you get too attached to it\. http://registry\.npmjs\.org/ -. -.IP "" 0 -. -.SH "version" + +.RE +.P +A name can be optionally prefixed by a scope, e\.g\. \fB@myorg/mypackage\fR\|\. See +npm help 7 \fBnpm\-scope\fR for more detail\. +.SH version +.P The \fImost\fR important things in your package\.json are the name and version fields\. -Those are actually required, and your package won\'t install without +Those are actually required, and your package won't install without them\. The name and version together form an identifier that is assumed to be completely unique\. Changes to the package should come along with changes to the version\. -. .P -Version must be parseable by node\-semver \fIhttps://github\.com/isaacs/node\-semver\fR, which is bundled +Version must be parseable by +node\-semver \fIhttps://github\.com/isaacs/node\-semver\fR, which is bundled with npm as a dependency\. (\fBnpm install semver\fR to use it yourself\.) -. .P -npm help More on version numbers and ranges at semver\. -. -.SH "description" -Put a description in it\. It\'s a string\. This helps people discover your -package, as it\'s listed in \fBnpm search\fR\|\. -. -.SH "keywords" -Put keywords in it\. It\'s an array of strings\. This helps people -discover your package as it\'s listed in \fBnpm search\fR\|\. -. -.SH "homepage" +More on version numbers and ranges at npm help 7 semver\. +.SH description +.P +Put a description in it\. It's a string\. This helps people discover your +package, as it's listed in \fBnpm search\fR\|\. +.SH keywords +.P +Put keywords in it\. It's an array of strings\. This helps people +discover your package as it's listed in \fBnpm search\fR\|\. +.SH homepage +.P The url to the project homepage\. -. .P \fBNOTE\fR: This is \fInot\fR the same as "url"\. If you put a "url" field, -then the registry will think it\'s a redirection to your package that has +then the registry will think it's a redirection to your package that has been published somewhere else, and spit at you\. -. .P -Literally\. Spit\. I\'m so not kidding\. -. -.SH "bugs" -The url to your project\'s issue tracker and / or the email address to which +Literally\. Spit\. I'm so not kidding\. +.SH bugs +.P +The url to your project's issue tracker and / or the email address to which issues should be reported\. These are helpful for people who encounter issues with your package\. -. .P It should look like this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "url" : "http://github\.com/owner/project/issues" , "email" : "project@hostname\.com" } -. .fi -. -.IP "" 0 -. +.RE .P You can specify either one or both values\. If you want to provide only a url, you can specify the value for "bugs" as a simple string instead of an object\. -. .P If a url is provided, it will be used by the \fBnpm bugs\fR command\. -. -.SH "license" +.SH license +.P You should specify a license for your package so that people know how they are -permitted to use it, and any restrictions you\'re placing on it\. -. +permitted to use it, and any restrictions you're placing on it\. .P -The simplest way, assuming you\'re using a common license such as BSD\-3\-Clause -or MIT, is to just specify the standard SPDX ID of the license you\'re using, +The simplest way, assuming you're using a common license such as BSD\-3\-Clause +or MIT, is to just specify the standard SPDX ID of the license you're using, like this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "license" : "BSD\-3\-Clause" } -. .fi -. -.IP "" 0 -. +.RE .P You can check the full list of SPDX license IDs \fIhttps://spdx\.org/licenses/\fR\|\. -Ideally you should pick one that is OSI \fIhttp://opensource\.org/licenses/alphabetical\fR approved\. -. +Ideally you should pick one that is +OSI \fIhttp://opensource\.org/licenses/alphabetical\fR approved\. .P -It\'s also a good idea to include a LICENSE file at the top level in +It's also a good idea to include a LICENSE file at the top level in your package\. -. -.SH "people fields: author, contributors" +.SH people fields: author, contributors +.P The "author" is one person\. "contributors" is an array of people\. A "person" is an object with a "name" field and optionally "url" and "email", like this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "Barney Rubble" , "email" : "b@rubble\.com" , "url" : "http://barnyrubble\.tumblr\.com/" } -. .fi -. -.IP "" 0 -. +.RE .P Or you can shorten that all into a single string, and npm will parse it for you: -. -.IP "" 4 -. +.P +.RS 2 .nf "Barney Rubble (http://barnyrubble\.tumblr\.com/) -. .fi -. -.IP "" 0 -. +.RE .P Both email and url are optional either way\. -. .P npm also sets a top\-level "maintainers" field with your npm user info\. -. -.SH "files" +.SH files +.P The "files" field is an array of files to include in your project\. If you name a folder in the array, then it will also include the files inside that folder\. (Unless they would be ignored by another rule\.) -. .P You can also provide a "\.npmignore" file in the root of your package, which will keep files from being included, even if they would be picked up by the files array\. The "\.npmignore" file works just like a "\.gitignore"\. -. -.SH "main" +.SH main +.P The main field is a module ID that is the primary entry point to your program\. -That is, if your package is named \fBfoo\fR, and a user installs it, and then does \fBrequire("foo")\fR, then your main module\'s exports object will be returned\. -. +That is, if your package is named \fBfoo\fR, and a user installs it, and then does +\fBrequire("foo")\fR, then your main module's exports object will be returned\. .P This should be a module ID relative to the root of your package folder\. -. .P For most modules, it makes the most sense to have a main script and often not much else\. -. -.SH "bin" -A lot of packages have one or more executable files that they\'d like to +.SH bin +.P +A lot of packages have one or more executable files that they'd like to install into the PATH\. npm makes this pretty easy (in fact, it uses this feature to install the "npm" executable\.) -. .P To use this, supply a \fBbin\fR field in your package\.json which is a map of -command name to local file name\. On install, npm will symlink that file into \fBprefix/bin\fR for global installs, or \fB\|\./node_modules/\.bin/\fR for local +command name to local file name\. On install, npm will symlink that file into +\fBprefix/bin\fR for global installs, or \fB\|\./node_modules/\.bin/\fR for local installs\. -. .P For example, npm has this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "bin" : { "npm" : "\./cli\.js" } } -. .fi -. -.IP "" 0 -. +.RE .P -So, when you install npm, it\'ll create a symlink from the \fBcli\.js\fR script to \fB/usr/local/bin/npm\fR\|\. -. +So, when you install npm, it'll create a symlink from the \fBcli\.js\fR script to +\fB/usr/local/bin/npm\fR\|\. .P If you have a single executable, and its name should be the name of the package, then you can just supply it as a string\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "my\-program" , "version": "1\.2\.5" , "bin": "\./path/to/program" } -. .fi -. -.IP "" 0 -. +.RE .P would be the same as this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "my\-program" , "version": "1\.2\.5" , "bin" : { "my\-program" : "\./path/to/program" } } -. .fi -. -.IP "" 0 -. -.SH "man" -Specify either a single file or an array of filenames to put in place for the \fBman\fR program to find\. -. +.RE +.SH man .P -If only a single file is provided, then it\'s installed such that it is the +Specify either a single file or an array of filenames to put in place for the +\fBman\fR program to find\. +.P +If only a single file is provided, then it's installed such that it is the result from \fBman \fR, regardless of its actual filename\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "foo" , "version" : "1\.2\.3" @@ -255,20 +211,15 @@ , "main" : "foo\.js" , "man" : "\./man/doc\.1" } -. .fi -. -.IP "" 0 -. +.RE .P would link the \fB\|\./man/doc\.1\fR file in such that it is the target for \fBman foo\fR -. .P -If the filename doesn\'t start with the package name, then it\'s prefixed\. +If the filename doesn't start with the package name, then it's prefixed\. So, this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "foo" , "version" : "1\.2\.3" @@ -276,20 +227,15 @@ , "main" : "foo\.js" , "man" : [ "\./man/foo\.1", "\./man/bar\.1" ] } -. .fi -. -.IP "" 0 -. +.RE .P will create files to do \fBman foo\fR and \fBman foo\-bar\fR\|\. -. .P Man files must end with a number, and optionally a \fB\|\.gz\fR suffix if they are compressed\. The number dictates which man section the file is installed into\. -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "foo" , "version" : "1\.2\.3" @@ -297,169 +243,142 @@ , "main" : "foo\.js" , "man" : [ "\./man/foo\.1", "\./man/foo\.2" ] } -. .fi -. -.IP "" 0 -. +.RE .P will create entries for \fBman foo\fR and \fBman 2 foo\fR -. -.SH "directories" +.SH directories +.P The CommonJS Packages \fIhttp://wiki\.commonjs\.org/wiki/Packages/1\.0\fR spec details a few ways that you can indicate the structure of your package using a \fBdirectories\fR -hash\. If you look at npm\'s package\.json \fIhttp://registry\.npmjs\.org/npm/latest\fR, -you\'ll see that it has directories for doc, lib, and man\. -. +object\. If you look at npm's package\.json \fIhttp://registry\.npmjs\.org/npm/latest\fR, +you'll see that it has directories for doc, lib, and man\. .P In the future, this information may be used in other creative ways\. -. -.SS "directories\.lib" +.SS directories\.lib +.P Tell people where the bulk of your library is\. Nothing special is done -with the lib folder in any way, but it\'s useful meta info\. -. -.SS "directories\.bin" -If you specify a "bin" directory, then all the files in that folder will -be used as the "bin" hash\. -. -.P -If you have a "bin" hash already, then this has no effect\. -. -.SS "directories\.man" +with the lib folder in any way, but it's useful meta info\. +.SS directories\.bin +.P +If you specify a \fBbin\fR directory, then all the files in that folder will +be added as children of the \fBbin\fR path\. +.P +If you have a \fBbin\fR path already, then this has no effect\. +.SS directories\.man +.P A folder that is full of man pages\. Sugar to generate a "man" array by walking the folder\. -. -.SS "directories\.doc" +.SS directories\.doc +.P Put markdown files in here\. Eventually, these will be displayed nicely, maybe, someday\. -. -.SS "directories\.example" +.SS directories\.example +.P Put example scripts in here\. Someday, it might be exposed in some clever way\. -. -.SH "repository" +.SH repository +.P Specify the place where your code lives\. This is helpful for people who want to contribute\. If the git repo is on github, then the \fBnpm docs\fR command will be able to find you\. -. .P Do it like this: -. -.IP "" 4 -. +.P +.RS 2 .nf "repository" : { "type" : "git" , "url" : "http://github\.com/npm/npm\.git" } + "repository" : { "type" : "svn" , "url" : "http://v8\.googlecode\.com/svn/trunk/" } -. .fi -. -.IP "" 0 -. +.RE .P The URL should be a publicly available (perhaps read\-only) url that can be handed directly to a VCS program without any modification\. It should not be a url to an -html project page that you put in your browser\. It\'s for computers\. -. -.SH "scripts" -The "scripts" member is an object hash of script commands that are run +html project page that you put in your browser\. It's for computers\. +.SH scripts +.P +The "scripts" property is a dictionary containing script commands that are run at various times in the lifecycle of your package\. The key is the lifecycle event, and the value is the command to run at that point\. -. .P -npm help See \fBnpm\-scripts\fR to find out more about writing package scripts\. -. -.SH "config" -A "config" hash can be used to set configuration -parameters used in package scripts that persist across upgrades\. For -instance, if a package had the following: -. -.IP "" 4 -. +See npm help 7 \fBnpm\-scripts\fR to find out more about writing package scripts\. +.SH config +.P +A "config" object can be used to set configuration parameters used in package +scripts that persist across upgrades\. For instance, if a package had the +following: +.P +.RS 2 .nf { "name" : "foo" , "config" : { "port" : "8080" } } -. .fi -. -.IP "" 0 -. +.RE .P -and then had a "start" command that then referenced the \fBnpm_package_config_port\fR environment variable, then the user could +and then had a "start" command that then referenced the +\fBnpm_package_config_port\fR environment variable, then the user could override that by doing \fBnpm config set foo:port 8001\fR\|\. -. .P -npm help See \fBnpm\-confignpm help \fR and \fBnpm\-scripts\fR for more on package +See npm help 7 \fBnpm\-config\fR and npm help 7 \fBnpm\-scripts\fR for more on package configs\. -. -.SH "dependencies" -Dependencies are specified with a simple hash of package name to +.SH dependencies +.P +Dependencies are specified in a simple object that maps a package name to a version range\. The version range is a string which has one or more -space\-separated descriptors\. Dependencies can also be identified with -a tarball or git URL\. -. -.P -\fBPlease do not put test harnesses or transpilers in your \fBdependencies\fR hash\.\fR See \fBdevDependencies\fR, below\. -. -.P -npm help See semver for more details about specifying version ranges\. -. -.IP "\(bu" 4 +space\-separated descriptors\. Dependencies can also be identified with a +tarball or git URL\. +.P +\fBPlease do not put test harnesses or transpilers in your +\fBdependencies\fR object\.\fR See \fBdevDependencies\fR, below\. +.P +See npm help 7 semver for more details about specifying version ranges\. +.RS 0 +.IP \(bu 2 \fBversion\fR Must match \fBversion\fR exactly -. -.IP "\(bu" 4 +.IP \(bu 2 \fB>version\fR Must be greater than \fBversion\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB>=version\fR etc -. -.IP "\(bu" 4 +.IP \(bu 2 \fB=version1 <=version2\fR\|\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fBrange1 || range2\fR Passes if either range1 or range2 are satisfied\. -. -.IP "\(bu" 4 -\fBgit\.\.\.\fR See \'Git URLs as Dependencies\' below -. -.IP "\(bu" 4 -\fBuser/repo\fR See \'GitHub URLs\' below -. -.IP "" 0 -. +.IP \(bu 2 +\fBgit\.\.\.\fR See 'Git URLs as Dependencies' below +.IP \(bu 2 +\fBuser/repo\fR See 'GitHub URLs' below +.IP \(bu 2 +\fBtag\fR A specific version tagged and published as \fBtag\fR See npm help \fBnpm\-tag\fR +.IP \(bu 2 +\fBpath/path/path\fR See Local Paths below + +.RE .P For example, these are all valid: -. -.IP "" 4 -. +.P +.RS 2 .nf { "dependencies" : { "foo" : "1\.0\.0 \- 2\.9999\.9999" @@ -472,45 +391,39 @@ , "elf" : "~1\.2\.3" , "two" : "2\.x" , "thr" : "3\.3\.x" + , "lat" : "latest" + , "dyl" : "file:\.\./dyl" } } -. .fi -. -.IP "" 0 -. -.SS "URLs as Dependencies" +.RE +.SS URLs as Dependencies +.P You may specify a tarball URL in place of a version range\. -. .P This tarball will be downloaded and installed locally to your package at install time\. -. -.SS "Git URLs as Dependencies" +.SS Git URLs as Dependencies +.P Git urls can be of the form: -. -.IP "" 4 -. +.P +.RS 2 .nf git://github\.com/user/project\.git#commit\-ish git+ssh://user@hostname:project\.git#commit\-ish git+ssh://user@hostname/project\.git#commit\-ish git+http://user@hostname/project/blah\.git#commit\-ish git+https://user@hostname/project/blah\.git#commit\-ish -. .fi -. -.IP "" 0 -. +.RE .P The \fBcommit\-ish\fR can be any tag, sha, or branch which can be supplied as an argument to \fBgit checkout\fR\|\. The default is \fBmaster\fR\|\. -. -.SH "GitHub URLs" +.SH GitHub URLs +.P As of version 1\.1\.65, you can refer to GitHub urls as just "foo": "user/foo\-project"\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "foo", @@ -519,34 +432,61 @@ "express": "visionmedia/express" } } -. .fi -. -.IP "" 0 -. -.SH "devDependencies" +.RE +.SH Local Paths +.P +As of version 2\.0\.0 you can provide a path to a local directory that contains a +package\. Local paths can be saved using \fBnpm install \-\-save\fR, using any of +these forms: +.P +.RS 2 +.nf +\|\.\./foo/bar +~/foo/bar +\|\./foo/bar +/foo/bar +.fi +.RE +.P +in which case they will be normalized to a relative path and added to your +\fBpackage\.json\fR\|\. For example: +.P +.RS 2 +.nf +{ + "name": "baz", + "dependencies": { + "bar": "file:\.\./foo/bar" + } +} +.fi +.RE +.P +This feature is helpful for local offline development and creating +tests that require npm installing where you don't want to hit an +external server, but should not be used when publishing packages +to the public registry\. +.SH devDependencies +.P If someone is planning on downloading and using your module in their -program, then they probably don\'t want or need to download and build +program, then they probably don't want or need to download and build the external test or documentation framework that you use\. -. .P -In this case, it\'s best to list these additional items in a \fBdevDependencies\fR hash\. -. +In this case, it's best to map these additional items in a \fBdevDependencies\fR +object\. .P These things will be installed when doing \fBnpm link\fR or \fBnpm install\fR from the root of a package, and can be managed like any other npm -npm help configuration param\. See \fBnpm\-config\fR for more on the topic\. -. +configuration param\. See npm help 7 \fBnpm\-config\fR for more on the topic\. .P For build steps that are not platform\-specific, such as compiling CoffeeScript or other languages to JavaScript, use the \fBprepublish\fR script to do this, and make the required package a devDependency\. -. .P For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "ethopia\-waza", "description": "a delightfully fruity coffee varietal", @@ -559,28 +499,23 @@ }, "main": "lib/waza\.js" } -. .fi -. -.IP "" 0 -. +.RE .P The \fBprepublish\fR script will be run before publishing, so that users can consume the functionality without requiring them to compile it -themselves\. In dev mode (ie, locally running \fBnpm install\fR), it\'ll +themselves\. In dev mode (ie, locally running \fBnpm install\fR), it'll run this script as well, so that you can test it easily\. -. -.SH "peerDependencies" +.SH peerDependencies +.P In some cases, you want to express the compatibility of your package with an host tool or library, while not necessarily doing a \fBrequire\fR of this host\. This is usually refered to as a \fIplugin\fR\|\. Notably, your module may be exposing a specific interface, expected and specified by the host documentation\. -. .P For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "tea\-latte", @@ -589,283 +524,223 @@ "tea": "2\.x" } } -. .fi -. -.IP "" 0 -. +.RE .P This ensures your package \fBtea\-latte\fR can be installed \fIalong\fR with the second major version of the host package \fBtea\fR only\. The host package is automatically installed if needed\. \fBnpm install tea\-latte\fR could possibly yield the following dependency graph: -. -.IP "" 4 -. +.P +.RS 2 .nf ├── tea\-latte@1\.3\.5 └── tea@2\.2\.0 -. .fi -. -.IP "" 0 -. +.RE .P Trying to install another plugin with a conflicting requirement will cause an error\. For this reason, make sure your plugin requirement is as broad as possible, and not to lock it down to specific patch versions\. -. .P Assuming the host complies with semver \fIhttp://semver\.org/\fR, only changes in -the host package\'s major version will break your plugin\. Thus, if you\'ve worked +the host package's major version will break your plugin\. Thus, if you've worked with every 1\.x version of the host package, use \fB"^1\.0"\fR or \fB"1\.x"\fR to express this\. If you depend on features introduced in 1\.5\.2, use \fB">= 1\.5\.2 < 2"\fR\|\. -. -.SH "bundledDependencies" +.SH bundledDependencies +.P Array of package names that will be bundled when publishing the package\. -. .P If this is spelled \fB"bundleDependencies"\fR, then that is also honorable\. -. -.SH "optionalDependencies" -If a dependency can be used, but you would like npm to proceed if it -cannot be found or fails to install, then you may put it in the \fBoptionalDependencies\fR hash\. This is a map of package name to version -or url, just like the \fBdependencies\fR hash\. The difference is that -failure is tolerated\. -. +.SH optionalDependencies .P -It is still your program\'s responsibility to handle the lack of the +If a dependency can be used, but you would like npm to proceed if it cannot be +found or fails to install, then you may put it in the \fBoptionalDependencies\fR +object\. This is a map of package name to version or url, just like the +\fBdependencies\fR object\. The difference is that build failures do not cause +installation to fail\. +.P +It is still your program's responsibility to handle the lack of the dependency\. For example, something like this: -. -.IP "" 4 -. +.P +.RS 2 .nf try { - var foo = require(\'foo\') - var fooVersion = require(\'foo/package\.json\')\.version + var foo = require('foo') + var fooVersion = require('foo/package\.json')\.version } catch (er) { foo = null } if ( notGoodFooVersion(fooVersion) ) { foo = null } + // \.\. then later in your program \.\. + if (foo) { foo\.doFooThings() } -. .fi -. -.IP "" 0 -. -.P -Entries in \fBoptionalDependencies\fR will override entries of the same name in \fBdependencies\fR, so it\'s usually best to only put in one place\. -. -.SH "engines" +.RE +.P +Entries in \fBoptionalDependencies\fR will override entries of the same name in +\fBdependencies\fR, so it's usually best to only put in one place\. +.SH engines +.P You can specify the version of node that your stuff works on: -. -.IP "" 4 -. +.P +.RS 2 .nf { "engines" : { "node" : ">=0\.10\.3 <0\.12" } } -. .fi -. -.IP "" 0 -. +.RE .P -And, like with dependencies, if you don\'t specify the version (or if you +And, like with dependencies, if you don't specify the version (or if you specify "*" as the version), then any version of node will do\. -. .P If you specify an "engines" field, then npm will require that "node" be somewhere on that list\. If "engines" is omitted, then npm will just assume that it works on node\. -. .P You can also use the "engines" field to specify which versions of npm are capable of properly installing your program\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "engines" : { "npm" : "~1\.0\.20" } } -. .fi -. -.IP "" 0 -. +.RE .P Note that, unless the user has set the \fBengine\-strict\fR config flag, this field is advisory only\. -. -.SH "engineStrict" +.SH engineStrict +.P If you are sure that your module will \fIdefinitely not\fR run properly on -versions of Node/npm other than those specified in the \fBengines\fR hash, +versions of Node/npm other than those specified in the \fBengines\fR object, then you can set \fB"engineStrict": true\fR in your package\.json file\. -This will override the user\'s \fBengine\-strict\fR config setting\. -. +This will override the user's \fBengine\-strict\fR config setting\. .P Please do not do this unless you are really very very sure\. If your -engines hash is something overly restrictive, you can quite easily and +engines object is something overly restrictive, you can quite easily and inadvertently lock yourself into obscurity and prevent your users from updating to new versions of Node\. Consider this choice carefully\. If people abuse it, it will be removed in a future version of npm\. -. -.SH "os" +.SH os +.P You can specify which operating systems your module will run on: -. -.IP "" 4 -. +.P +.RS 2 .nf "os" : [ "darwin", "linux" ] -. .fi -. -.IP "" 0 -. +.RE .P You can also blacklist instead of whitelist operating systems, -just prepend the blacklisted os with a \'!\': -. -.IP "" 4 -. +just prepend the blacklisted os with a '!': +.P +.RS 2 .nf "os" : [ "!win32" ] -. .fi -. -.IP "" 0 -. +.RE .P The host operating system is determined by \fBprocess\.platform\fR -. .P -It is allowed to both blacklist, and whitelist, although there isn\'t any +It is allowed to both blacklist, and whitelist, although there isn't any good reason to do this\. -. -.SH "cpu" +.SH cpu +.P If your code only runs on certain cpu architectures, you can specify which ones\. -. -.IP "" 4 -. +.P +.RS 2 .nf "cpu" : [ "x64", "ia32" ] -. .fi -. -.IP "" 0 -. +.RE .P Like the \fBos\fR option, you can also blacklist architectures: -. -.IP "" 4 -. +.P +.RS 2 .nf "cpu" : [ "!arm", "!mips" ] -. .fi -. -.IP "" 0 -. +.RE .P The host architecture is determined by \fBprocess\.arch\fR -. -.SH "preferGlobal" +.SH preferGlobal +.P If your package is primarily a command\-line application that should be installed globally, then set this value to \fBtrue\fR to provide a warning if it is installed locally\. -. .P -It doesn\'t actually prevent users from installing it locally, but it -does help prevent some confusion if it doesn\'t work as expected\. -. -.SH "private" +It doesn't actually prevent users from installing it locally, but it +does help prevent some confusion if it doesn't work as expected\. +.SH private +.P If you set \fB"private": true\fR in your package\.json, then npm will refuse to publish it\. -. .P -This is a way to prevent accidental publication of private repositories\. -If you would like to ensure that a given package is only ever published -to a specific registry (for example, an internal registry), -then use the \fBpublishConfig\fR hash described below -to override the \fBregistry\fR config param at publish\-time\. -. -.SH "publishConfig" -This is a set of config values that will be used at publish\-time\. It\'s +This is a way to prevent accidental publication of private repositories\. If +you would like to ensure that a given package is only ever published to a +specific registry (for example, an internal registry), then use the +\fBpublishConfig\fR dictionary described below to override the \fBregistry\fR config +param at publish\-time\. +.SH publishConfig +.P +This is a set of config values that will be used at publish\-time\. It's especially handy if you want to set the tag or registry, so that you can ensure that a given package is not tagged with "latest" or published to the global public registry by default\. -. .P Any config values can be overridden, but of course only "tag" and "registry" probably matter for the purposes of publishing\. -. .P -npm help See \fBnpm\-config\fR to see the list of config options that can be +See npm help 7 \fBnpm\-config\fR to see the list of config options that can be overridden\. -. -.SH "DEFAULT VALUES" +.SH DEFAULT VALUES +.P npm will default some values based on package contents\. -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 \fB"scripts": {"start": "node server\.js"}\fR -. -.IP If there is a \fBserver\.js\fR file in the root of your package, then npm will default the \fBstart\fR command to \fBnode server\.js\fR\|\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fB"scripts":{"preinstall": "node\-gyp rebuild"}\fR -. -.IP If there is a \fBbinding\.gyp\fR file in the root of your package, npm will default the \fBpreinstall\fR command to compile using node\-gyp\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fB"contributors": [\.\.\.]\fR -. -.IP If there is an \fBAUTHORS\fR file in the root of your package, npm will treat each line as a \fBName (url)\fR format, where email and url are optional\. Lines which start with a \fB#\fR or are blank, will be ignored\. -. -.IP "" 0 -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help semver -. -.IP "\(bu" 4 + +.RE +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 semver +.IP \(bu 2 npm help init -. -.IP "\(bu" 4 +.IP \(bu 2 npm help version -. -.IP "\(bu" 4 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 npm help help -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 +.IP \(bu 2 npm help rm -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man5/npmrc.5 nodejs-0.11.15/deps/npm/man/man5/npmrc.5 --- nodejs-0.11.13/deps/npm/man/man5/npmrc.5 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man5/npmrc.5 2015-01-20 21:22:17.000000000 +0000 @@ -1,89 +1,83 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPMRC" "5" "May 2014" "" "" -. +.TH "NPMRC" "5" "October 2014" "" "" .SH "NAME" -\fBnpmrc\fR \-\- The npm config files -. -.SH "DESCRIPTION" +\fBnpmrc\fR \- The npm config files +.SH DESCRIPTION +.P npm gets its config settings from the command line, environment variables, and \fBnpmrc\fR files\. -. .P The \fBnpm config\fR command can be used to update and edit the contents of the user and global npmrc files\. -. .P -npm help For a list of available configuration options, see npm\-config\. -. -.SH "FILES" -The three relevant files are: -. -.IP "\(bu" 4 +For a list of available configuration options, see npm help 7 config\. +.SH FILES +.P +The four relevant files are: +.RS 0 +.IP \(bu 2 +per\-project config file (/path/to/my/project/\.npmrc) +.IP \(bu 2 per\-user config file (~/\.npmrc) -. -.IP "\(bu" 4 +.IP \(bu 2 global config file ($PREFIX/npmrc) -. -.IP "\(bu" 4 +.IP \(bu 2 npm builtin config file (/path/to/npm/npmrc) -. -.IP "" 0 -. + +.RE .P All npm config files are an ini\-formatted list of \fBkey = value\fR -parameters\. Environment variables can be replaced using \fB${VARIABLE_NAME}\fR\|\. For example: -. -.IP "" 4 -. +parameters\. Environment variables can be replaced using +\fB${VARIABLE_NAME}\fR\|\. For example: +.P +.RS 2 .nf prefix = ${HOME}/\.npm\-packages -. .fi -. -.IP "" 0 -. +.RE .P Each of these files is loaded, and config options are resolved in priority order\. For example, a setting in the userconfig file would override the setting in the globalconfig file\. -. -.SS "Per\-user config file" +.SS Per\-project config file +.P +When working locally in a project, a \fB\|\.npmrc\fR file in the root of the +project (ie, a sibling of \fBnode_modules\fR and \fBpackage\.json\fR) will set +config values specific to this project\. +.P +Note that this only applies to the root of the project that you're +running npm in\. It has no effect when your module is published\. For +example, you can't publish a module that forces itself to install +globally, or in a different location\. +.SS Per\-user config file +.P \fB$HOME/\.npmrc\fR (or the \fBuserconfig\fR param, if set in the environment or on the command line) -. -.SS "Global config file" +.SS Global config file +.P \fB$PREFIX/etc/npmrc\fR (or the \fBglobalconfig\fR param, if set above): This file is an ini\-file formatted list of \fBkey = value\fR parameters\. Environment variables can be replaced as above\. -. -.SS "Built\-in config file" +.SS Built\-in config file +.P \fBpath/to/npm/itself/npmrc\fR -. .P This is an unchangeable "builtin" configuration file that npm keeps consistent across updates\. Set fields in here using the \fB\|\./configure\fR script that comes with npm\. This is primarily for distribution maintainers to override default configs in a standard and consistent manner\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 npm help npm -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man5/package.json.5 nodejs-0.11.15/deps/npm/man/man5/package.json.5 --- nodejs-0.11.13/deps/npm/man/man5/package.json.5 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man5/package.json.5 2015-01-20 21:22:17.000000000 +0000 @@ -1,253 +1,209 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "PACKAGE\.JSON" "5" "May 2014" "" "" -. +.TH "PACKAGE\.JSON" "5" "October 2014" "" "" .SH "NAME" -\fBpackage.json\fR \-\- Specifics of npm\'s package\.json handling -. -.SH "DESCRIPTION" -This document is all you need to know about what\'s required in your package\.json +\fBpackage.json\fR \- Specifics of npm's package\.json handling +.SH DESCRIPTION +.P +This document is all you need to know about what's required in your package\.json file\. It must be actual JSON, not just a JavaScript object literal\. -. .P A lot of the behavior described in this document is affected by the config -npm help settings described in \fBnpm\-config\fR\|\. -. -.SH "name" +settings described in npm help 7 \fBnpm\-config\fR\|\. +.SH name +.P The \fImost\fR important things in your package\.json are the name and version fields\. -Those are actually required, and your package won\'t install without +Those are actually required, and your package won't install without them\. The name and version together form an identifier that is assumed to be completely unique\. Changes to the package should come along with changes to the version\. -. .P The name is what your thing is called\. Some tips: -. -.IP "\(bu" 4 -Don\'t put "js" or "node" in the name\. It\'s assumed that it\'s js, since you\'re +.RS 0 +.IP \(bu 2 +Don't put "js" or "node" in the name\. It's assumed that it's js, since you're writing a package\.json file, and you can specify the engine using the "engines" field\. (See below\.) -. -.IP "\(bu" 4 +.IP \(bu 2 The name ends up being part of a URL, an argument on the command line, and a folder name\. Any name with non\-url\-safe characters will be rejected\. -Also, it can\'t start with a dot or an underscore\. -. -.IP "\(bu" 4 +Also, it can't start with a dot or an underscore\. +.IP \(bu 2 The name will probably be passed as an argument to require(), so it should be something short, but also reasonably descriptive\. -. -.IP "\(bu" 4 -You may want to check the npm registry to see if there\'s something by that name +.IP \(bu 2 +You may want to check the npm registry to see if there's something by that name already, before you get too attached to it\. http://registry\.npmjs\.org/ -. -.IP "" 0 -. -.SH "version" + +.RE +.P +A name can be optionally prefixed by a scope, e\.g\. \fB@myorg/mypackage\fR\|\. See +npm help 7 \fBnpm\-scope\fR for more detail\. +.SH version +.P The \fImost\fR important things in your package\.json are the name and version fields\. -Those are actually required, and your package won\'t install without +Those are actually required, and your package won't install without them\. The name and version together form an identifier that is assumed to be completely unique\. Changes to the package should come along with changes to the version\. -. .P -Version must be parseable by node\-semver \fIhttps://github\.com/isaacs/node\-semver\fR, which is bundled +Version must be parseable by +node\-semver \fIhttps://github\.com/isaacs/node\-semver\fR, which is bundled with npm as a dependency\. (\fBnpm install semver\fR to use it yourself\.) -. .P -npm help More on version numbers and ranges at semver\. -. -.SH "description" -Put a description in it\. It\'s a string\. This helps people discover your -package, as it\'s listed in \fBnpm search\fR\|\. -. -.SH "keywords" -Put keywords in it\. It\'s an array of strings\. This helps people -discover your package as it\'s listed in \fBnpm search\fR\|\. -. -.SH "homepage" +More on version numbers and ranges at npm help 7 semver\. +.SH description +.P +Put a description in it\. It's a string\. This helps people discover your +package, as it's listed in \fBnpm search\fR\|\. +.SH keywords +.P +Put keywords in it\. It's an array of strings\. This helps people +discover your package as it's listed in \fBnpm search\fR\|\. +.SH homepage +.P The url to the project homepage\. -. .P \fBNOTE\fR: This is \fInot\fR the same as "url"\. If you put a "url" field, -then the registry will think it\'s a redirection to your package that has +then the registry will think it's a redirection to your package that has been published somewhere else, and spit at you\. -. .P -Literally\. Spit\. I\'m so not kidding\. -. -.SH "bugs" -The url to your project\'s issue tracker and / or the email address to which +Literally\. Spit\. I'm so not kidding\. +.SH bugs +.P +The url to your project's issue tracker and / or the email address to which issues should be reported\. These are helpful for people who encounter issues with your package\. -. .P It should look like this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "url" : "http://github\.com/owner/project/issues" , "email" : "project@hostname\.com" } -. .fi -. -.IP "" 0 -. +.RE .P You can specify either one or both values\. If you want to provide only a url, you can specify the value for "bugs" as a simple string instead of an object\. -. .P If a url is provided, it will be used by the \fBnpm bugs\fR command\. -. -.SH "license" +.SH license +.P You should specify a license for your package so that people know how they are -permitted to use it, and any restrictions you\'re placing on it\. -. +permitted to use it, and any restrictions you're placing on it\. .P -The simplest way, assuming you\'re using a common license such as BSD\-3\-Clause -or MIT, is to just specify the standard SPDX ID of the license you\'re using, +The simplest way, assuming you're using a common license such as BSD\-3\-Clause +or MIT, is to just specify the standard SPDX ID of the license you're using, like this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "license" : "BSD\-3\-Clause" } -. .fi -. -.IP "" 0 -. +.RE .P You can check the full list of SPDX license IDs \fIhttps://spdx\.org/licenses/\fR\|\. -Ideally you should pick one that is OSI \fIhttp://opensource\.org/licenses/alphabetical\fR approved\. -. +Ideally you should pick one that is +OSI \fIhttp://opensource\.org/licenses/alphabetical\fR approved\. .P -It\'s also a good idea to include a LICENSE file at the top level in +It's also a good idea to include a LICENSE file at the top level in your package\. -. -.SH "people fields: author, contributors" +.SH people fields: author, contributors +.P The "author" is one person\. "contributors" is an array of people\. A "person" is an object with a "name" field and optionally "url" and "email", like this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "Barney Rubble" , "email" : "b@rubble\.com" , "url" : "http://barnyrubble\.tumblr\.com/" } -. .fi -. -.IP "" 0 -. +.RE .P Or you can shorten that all into a single string, and npm will parse it for you: -. -.IP "" 4 -. +.P +.RS 2 .nf "Barney Rubble (http://barnyrubble\.tumblr\.com/) -. .fi -. -.IP "" 0 -. +.RE .P Both email and url are optional either way\. -. .P npm also sets a top\-level "maintainers" field with your npm user info\. -. -.SH "files" +.SH files +.P The "files" field is an array of files to include in your project\. If you name a folder in the array, then it will also include the files inside that folder\. (Unless they would be ignored by another rule\.) -. .P You can also provide a "\.npmignore" file in the root of your package, which will keep files from being included, even if they would be picked up by the files array\. The "\.npmignore" file works just like a "\.gitignore"\. -. -.SH "main" +.SH main +.P The main field is a module ID that is the primary entry point to your program\. -That is, if your package is named \fBfoo\fR, and a user installs it, and then does \fBrequire("foo")\fR, then your main module\'s exports object will be returned\. -. +That is, if your package is named \fBfoo\fR, and a user installs it, and then does +\fBrequire("foo")\fR, then your main module's exports object will be returned\. .P This should be a module ID relative to the root of your package folder\. -. .P For most modules, it makes the most sense to have a main script and often not much else\. -. -.SH "bin" -A lot of packages have one or more executable files that they\'d like to +.SH bin +.P +A lot of packages have one or more executable files that they'd like to install into the PATH\. npm makes this pretty easy (in fact, it uses this feature to install the "npm" executable\.) -. .P To use this, supply a \fBbin\fR field in your package\.json which is a map of -command name to local file name\. On install, npm will symlink that file into \fBprefix/bin\fR for global installs, or \fB\|\./node_modules/\.bin/\fR for local +command name to local file name\. On install, npm will symlink that file into +\fBprefix/bin\fR for global installs, or \fB\|\./node_modules/\.bin/\fR for local installs\. -. .P For example, npm has this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "bin" : { "npm" : "\./cli\.js" } } -. .fi -. -.IP "" 0 -. +.RE .P -So, when you install npm, it\'ll create a symlink from the \fBcli\.js\fR script to \fB/usr/local/bin/npm\fR\|\. -. +So, when you install npm, it'll create a symlink from the \fBcli\.js\fR script to +\fB/usr/local/bin/npm\fR\|\. .P If you have a single executable, and its name should be the name of the package, then you can just supply it as a string\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "my\-program" , "version": "1\.2\.5" , "bin": "\./path/to/program" } -. .fi -. -.IP "" 0 -. +.RE .P would be the same as this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "my\-program" , "version": "1\.2\.5" , "bin" : { "my\-program" : "\./path/to/program" } } -. .fi -. -.IP "" 0 -. -.SH "man" -Specify either a single file or an array of filenames to put in place for the \fBman\fR program to find\. -. +.RE +.SH man .P -If only a single file is provided, then it\'s installed such that it is the +Specify either a single file or an array of filenames to put in place for the +\fBman\fR program to find\. +.P +If only a single file is provided, then it's installed such that it is the result from \fBman \fR, regardless of its actual filename\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "foo" , "version" : "1\.2\.3" @@ -255,20 +211,15 @@ , "main" : "foo\.js" , "man" : "\./man/doc\.1" } -. .fi -. -.IP "" 0 -. +.RE .P would link the \fB\|\./man/doc\.1\fR file in such that it is the target for \fBman foo\fR -. .P -If the filename doesn\'t start with the package name, then it\'s prefixed\. +If the filename doesn't start with the package name, then it's prefixed\. So, this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "foo" , "version" : "1\.2\.3" @@ -276,20 +227,15 @@ , "main" : "foo\.js" , "man" : [ "\./man/foo\.1", "\./man/bar\.1" ] } -. .fi -. -.IP "" 0 -. +.RE .P will create files to do \fBman foo\fR and \fBman foo\-bar\fR\|\. -. .P Man files must end with a number, and optionally a \fB\|\.gz\fR suffix if they are compressed\. The number dictates which man section the file is installed into\. -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "foo" , "version" : "1\.2\.3" @@ -297,169 +243,142 @@ , "main" : "foo\.js" , "man" : [ "\./man/foo\.1", "\./man/foo\.2" ] } -. .fi -. -.IP "" 0 -. +.RE .P will create entries for \fBman foo\fR and \fBman 2 foo\fR -. -.SH "directories" +.SH directories +.P The CommonJS Packages \fIhttp://wiki\.commonjs\.org/wiki/Packages/1\.0\fR spec details a few ways that you can indicate the structure of your package using a \fBdirectories\fR -hash\. If you look at npm\'s package\.json \fIhttp://registry\.npmjs\.org/npm/latest\fR, -you\'ll see that it has directories for doc, lib, and man\. -. +object\. If you look at npm's package\.json \fIhttp://registry\.npmjs\.org/npm/latest\fR, +you'll see that it has directories for doc, lib, and man\. .P In the future, this information may be used in other creative ways\. -. -.SS "directories\.lib" +.SS directories\.lib +.P Tell people where the bulk of your library is\. Nothing special is done -with the lib folder in any way, but it\'s useful meta info\. -. -.SS "directories\.bin" -If you specify a "bin" directory, then all the files in that folder will -be used as the "bin" hash\. -. -.P -If you have a "bin" hash already, then this has no effect\. -. -.SS "directories\.man" +with the lib folder in any way, but it's useful meta info\. +.SS directories\.bin +.P +If you specify a \fBbin\fR directory, then all the files in that folder will +be added as children of the \fBbin\fR path\. +.P +If you have a \fBbin\fR path already, then this has no effect\. +.SS directories\.man +.P A folder that is full of man pages\. Sugar to generate a "man" array by walking the folder\. -. -.SS "directories\.doc" +.SS directories\.doc +.P Put markdown files in here\. Eventually, these will be displayed nicely, maybe, someday\. -. -.SS "directories\.example" +.SS directories\.example +.P Put example scripts in here\. Someday, it might be exposed in some clever way\. -. -.SH "repository" +.SH repository +.P Specify the place where your code lives\. This is helpful for people who want to contribute\. If the git repo is on github, then the \fBnpm docs\fR command will be able to find you\. -. .P Do it like this: -. -.IP "" 4 -. +.P +.RS 2 .nf "repository" : { "type" : "git" , "url" : "http://github\.com/npm/npm\.git" } + "repository" : { "type" : "svn" , "url" : "http://v8\.googlecode\.com/svn/trunk/" } -. .fi -. -.IP "" 0 -. +.RE .P The URL should be a publicly available (perhaps read\-only) url that can be handed directly to a VCS program without any modification\. It should not be a url to an -html project page that you put in your browser\. It\'s for computers\. -. -.SH "scripts" -The "scripts" member is an object hash of script commands that are run +html project page that you put in your browser\. It's for computers\. +.SH scripts +.P +The "scripts" property is a dictionary containing script commands that are run at various times in the lifecycle of your package\. The key is the lifecycle event, and the value is the command to run at that point\. -. .P -npm help See \fBnpm\-scripts\fR to find out more about writing package scripts\. -. -.SH "config" -A "config" hash can be used to set configuration -parameters used in package scripts that persist across upgrades\. For -instance, if a package had the following: -. -.IP "" 4 -. +See npm help 7 \fBnpm\-scripts\fR to find out more about writing package scripts\. +.SH config +.P +A "config" object can be used to set configuration parameters used in package +scripts that persist across upgrades\. For instance, if a package had the +following: +.P +.RS 2 .nf { "name" : "foo" , "config" : { "port" : "8080" } } -. .fi -. -.IP "" 0 -. +.RE .P -and then had a "start" command that then referenced the \fBnpm_package_config_port\fR environment variable, then the user could +and then had a "start" command that then referenced the +\fBnpm_package_config_port\fR environment variable, then the user could override that by doing \fBnpm config set foo:port 8001\fR\|\. -. .P -npm help See \fBnpm\-confignpm help \fR and \fBnpm\-scripts\fR for more on package +See npm help 7 \fBnpm\-config\fR and npm help 7 \fBnpm\-scripts\fR for more on package configs\. -. -.SH "dependencies" -Dependencies are specified with a simple hash of package name to +.SH dependencies +.P +Dependencies are specified in a simple object that maps a package name to a version range\. The version range is a string which has one or more -space\-separated descriptors\. Dependencies can also be identified with -a tarball or git URL\. -. -.P -\fBPlease do not put test harnesses or transpilers in your \fBdependencies\fR hash\.\fR See \fBdevDependencies\fR, below\. -. -.P -npm help See semver for more details about specifying version ranges\. -. -.IP "\(bu" 4 +space\-separated descriptors\. Dependencies can also be identified with a +tarball or git URL\. +.P +\fBPlease do not put test harnesses or transpilers in your +\fBdependencies\fR object\.\fR See \fBdevDependencies\fR, below\. +.P +See npm help 7 semver for more details about specifying version ranges\. +.RS 0 +.IP \(bu 2 \fBversion\fR Must match \fBversion\fR exactly -. -.IP "\(bu" 4 +.IP \(bu 2 \fB>version\fR Must be greater than \fBversion\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB>=version\fR etc -. -.IP "\(bu" 4 +.IP \(bu 2 \fB=version1 <=version2\fR\|\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fBrange1 || range2\fR Passes if either range1 or range2 are satisfied\. -. -.IP "\(bu" 4 -\fBgit\.\.\.\fR See \'Git URLs as Dependencies\' below -. -.IP "\(bu" 4 -\fBuser/repo\fR See \'GitHub URLs\' below -. -.IP "" 0 -. +.IP \(bu 2 +\fBgit\.\.\.\fR See 'Git URLs as Dependencies' below +.IP \(bu 2 +\fBuser/repo\fR See 'GitHub URLs' below +.IP \(bu 2 +\fBtag\fR A specific version tagged and published as \fBtag\fR See npm help \fBnpm\-tag\fR +.IP \(bu 2 +\fBpath/path/path\fR See Local Paths below + +.RE .P For example, these are all valid: -. -.IP "" 4 -. +.P +.RS 2 .nf { "dependencies" : { "foo" : "1\.0\.0 \- 2\.9999\.9999" @@ -472,45 +391,39 @@ , "elf" : "~1\.2\.3" , "two" : "2\.x" , "thr" : "3\.3\.x" + , "lat" : "latest" + , "dyl" : "file:\.\./dyl" } } -. .fi -. -.IP "" 0 -. -.SS "URLs as Dependencies" +.RE +.SS URLs as Dependencies +.P You may specify a tarball URL in place of a version range\. -. .P This tarball will be downloaded and installed locally to your package at install time\. -. -.SS "Git URLs as Dependencies" +.SS Git URLs as Dependencies +.P Git urls can be of the form: -. -.IP "" 4 -. +.P +.RS 2 .nf git://github\.com/user/project\.git#commit\-ish git+ssh://user@hostname:project\.git#commit\-ish git+ssh://user@hostname/project\.git#commit\-ish git+http://user@hostname/project/blah\.git#commit\-ish git+https://user@hostname/project/blah\.git#commit\-ish -. .fi -. -.IP "" 0 -. +.RE .P The \fBcommit\-ish\fR can be any tag, sha, or branch which can be supplied as an argument to \fBgit checkout\fR\|\. The default is \fBmaster\fR\|\. -. -.SH "GitHub URLs" +.SH GitHub URLs +.P As of version 1\.1\.65, you can refer to GitHub urls as just "foo": "user/foo\-project"\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "foo", @@ -519,34 +432,61 @@ "express": "visionmedia/express" } } -. .fi -. -.IP "" 0 -. -.SH "devDependencies" +.RE +.SH Local Paths +.P +As of version 2\.0\.0 you can provide a path to a local directory that contains a +package\. Local paths can be saved using \fBnpm install \-\-save\fR, using any of +these forms: +.P +.RS 2 +.nf +\|\.\./foo/bar +~/foo/bar +\|\./foo/bar +/foo/bar +.fi +.RE +.P +in which case they will be normalized to a relative path and added to your +\fBpackage\.json\fR\|\. For example: +.P +.RS 2 +.nf +{ + "name": "baz", + "dependencies": { + "bar": "file:\.\./foo/bar" + } +} +.fi +.RE +.P +This feature is helpful for local offline development and creating +tests that require npm installing where you don't want to hit an +external server, but should not be used when publishing packages +to the public registry\. +.SH devDependencies +.P If someone is planning on downloading and using your module in their -program, then they probably don\'t want or need to download and build +program, then they probably don't want or need to download and build the external test or documentation framework that you use\. -. .P -In this case, it\'s best to list these additional items in a \fBdevDependencies\fR hash\. -. +In this case, it's best to map these additional items in a \fBdevDependencies\fR +object\. .P These things will be installed when doing \fBnpm link\fR or \fBnpm install\fR from the root of a package, and can be managed like any other npm -npm help configuration param\. See \fBnpm\-config\fR for more on the topic\. -. +configuration param\. See npm help 7 \fBnpm\-config\fR for more on the topic\. .P For build steps that are not platform\-specific, such as compiling CoffeeScript or other languages to JavaScript, use the \fBprepublish\fR script to do this, and make the required package a devDependency\. -. .P For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "ethopia\-waza", "description": "a delightfully fruity coffee varietal", @@ -559,28 +499,23 @@ }, "main": "lib/waza\.js" } -. .fi -. -.IP "" 0 -. +.RE .P The \fBprepublish\fR script will be run before publishing, so that users can consume the functionality without requiring them to compile it -themselves\. In dev mode (ie, locally running \fBnpm install\fR), it\'ll +themselves\. In dev mode (ie, locally running \fBnpm install\fR), it'll run this script as well, so that you can test it easily\. -. -.SH "peerDependencies" +.SH peerDependencies +.P In some cases, you want to express the compatibility of your package with an host tool or library, while not necessarily doing a \fBrequire\fR of this host\. This is usually refered to as a \fIplugin\fR\|\. Notably, your module may be exposing a specific interface, expected and specified by the host documentation\. -. .P For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name": "tea\-latte", @@ -589,283 +524,223 @@ "tea": "2\.x" } } -. .fi -. -.IP "" 0 -. +.RE .P This ensures your package \fBtea\-latte\fR can be installed \fIalong\fR with the second major version of the host package \fBtea\fR only\. The host package is automatically installed if needed\. \fBnpm install tea\-latte\fR could possibly yield the following dependency graph: -. -.IP "" 4 -. +.P +.RS 2 .nf ├── tea\-latte@1\.3\.5 └── tea@2\.2\.0 -. .fi -. -.IP "" 0 -. +.RE .P Trying to install another plugin with a conflicting requirement will cause an error\. For this reason, make sure your plugin requirement is as broad as possible, and not to lock it down to specific patch versions\. -. .P Assuming the host complies with semver \fIhttp://semver\.org/\fR, only changes in -the host package\'s major version will break your plugin\. Thus, if you\'ve worked +the host package's major version will break your plugin\. Thus, if you've worked with every 1\.x version of the host package, use \fB"^1\.0"\fR or \fB"1\.x"\fR to express this\. If you depend on features introduced in 1\.5\.2, use \fB">= 1\.5\.2 < 2"\fR\|\. -. -.SH "bundledDependencies" +.SH bundledDependencies +.P Array of package names that will be bundled when publishing the package\. -. .P If this is spelled \fB"bundleDependencies"\fR, then that is also honorable\. -. -.SH "optionalDependencies" -If a dependency can be used, but you would like npm to proceed if it -cannot be found or fails to install, then you may put it in the \fBoptionalDependencies\fR hash\. This is a map of package name to version -or url, just like the \fBdependencies\fR hash\. The difference is that -failure is tolerated\. -. +.SH optionalDependencies .P -It is still your program\'s responsibility to handle the lack of the +If a dependency can be used, but you would like npm to proceed if it cannot be +found or fails to install, then you may put it in the \fBoptionalDependencies\fR +object\. This is a map of package name to version or url, just like the +\fBdependencies\fR object\. The difference is that build failures do not cause +installation to fail\. +.P +It is still your program's responsibility to handle the lack of the dependency\. For example, something like this: -. -.IP "" 4 -. +.P +.RS 2 .nf try { - var foo = require(\'foo\') - var fooVersion = require(\'foo/package\.json\')\.version + var foo = require('foo') + var fooVersion = require('foo/package\.json')\.version } catch (er) { foo = null } if ( notGoodFooVersion(fooVersion) ) { foo = null } + // \.\. then later in your program \.\. + if (foo) { foo\.doFooThings() } -. .fi -. -.IP "" 0 -. -.P -Entries in \fBoptionalDependencies\fR will override entries of the same name in \fBdependencies\fR, so it\'s usually best to only put in one place\. -. -.SH "engines" +.RE +.P +Entries in \fBoptionalDependencies\fR will override entries of the same name in +\fBdependencies\fR, so it's usually best to only put in one place\. +.SH engines +.P You can specify the version of node that your stuff works on: -. -.IP "" 4 -. +.P +.RS 2 .nf { "engines" : { "node" : ">=0\.10\.3 <0\.12" } } -. .fi -. -.IP "" 0 -. +.RE .P -And, like with dependencies, if you don\'t specify the version (or if you +And, like with dependencies, if you don't specify the version (or if you specify "*" as the version), then any version of node will do\. -. .P If you specify an "engines" field, then npm will require that "node" be somewhere on that list\. If "engines" is omitted, then npm will just assume that it works on node\. -. .P You can also use the "engines" field to specify which versions of npm are capable of properly installing your program\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf { "engines" : { "npm" : "~1\.0\.20" } } -. .fi -. -.IP "" 0 -. +.RE .P Note that, unless the user has set the \fBengine\-strict\fR config flag, this field is advisory only\. -. -.SH "engineStrict" +.SH engineStrict +.P If you are sure that your module will \fIdefinitely not\fR run properly on -versions of Node/npm other than those specified in the \fBengines\fR hash, +versions of Node/npm other than those specified in the \fBengines\fR object, then you can set \fB"engineStrict": true\fR in your package\.json file\. -This will override the user\'s \fBengine\-strict\fR config setting\. -. +This will override the user's \fBengine\-strict\fR config setting\. .P Please do not do this unless you are really very very sure\. If your -engines hash is something overly restrictive, you can quite easily and +engines object is something overly restrictive, you can quite easily and inadvertently lock yourself into obscurity and prevent your users from updating to new versions of Node\. Consider this choice carefully\. If people abuse it, it will be removed in a future version of npm\. -. -.SH "os" +.SH os +.P You can specify which operating systems your module will run on: -. -.IP "" 4 -. +.P +.RS 2 .nf "os" : [ "darwin", "linux" ] -. .fi -. -.IP "" 0 -. +.RE .P You can also blacklist instead of whitelist operating systems, -just prepend the blacklisted os with a \'!\': -. -.IP "" 4 -. +just prepend the blacklisted os with a '!': +.P +.RS 2 .nf "os" : [ "!win32" ] -. .fi -. -.IP "" 0 -. +.RE .P The host operating system is determined by \fBprocess\.platform\fR -. .P -It is allowed to both blacklist, and whitelist, although there isn\'t any +It is allowed to both blacklist, and whitelist, although there isn't any good reason to do this\. -. -.SH "cpu" +.SH cpu +.P If your code only runs on certain cpu architectures, you can specify which ones\. -. -.IP "" 4 -. +.P +.RS 2 .nf "cpu" : [ "x64", "ia32" ] -. .fi -. -.IP "" 0 -. +.RE .P Like the \fBos\fR option, you can also blacklist architectures: -. -.IP "" 4 -. +.P +.RS 2 .nf "cpu" : [ "!arm", "!mips" ] -. .fi -. -.IP "" 0 -. +.RE .P The host architecture is determined by \fBprocess\.arch\fR -. -.SH "preferGlobal" +.SH preferGlobal +.P If your package is primarily a command\-line application that should be installed globally, then set this value to \fBtrue\fR to provide a warning if it is installed locally\. -. .P -It doesn\'t actually prevent users from installing it locally, but it -does help prevent some confusion if it doesn\'t work as expected\. -. -.SH "private" +It doesn't actually prevent users from installing it locally, but it +does help prevent some confusion if it doesn't work as expected\. +.SH private +.P If you set \fB"private": true\fR in your package\.json, then npm will refuse to publish it\. -. .P -This is a way to prevent accidental publication of private repositories\. -If you would like to ensure that a given package is only ever published -to a specific registry (for example, an internal registry), -then use the \fBpublishConfig\fR hash described below -to override the \fBregistry\fR config param at publish\-time\. -. -.SH "publishConfig" -This is a set of config values that will be used at publish\-time\. It\'s +This is a way to prevent accidental publication of private repositories\. If +you would like to ensure that a given package is only ever published to a +specific registry (for example, an internal registry), then use the +\fBpublishConfig\fR dictionary described below to override the \fBregistry\fR config +param at publish\-time\. +.SH publishConfig +.P +This is a set of config values that will be used at publish\-time\. It's especially handy if you want to set the tag or registry, so that you can ensure that a given package is not tagged with "latest" or published to the global public registry by default\. -. .P Any config values can be overridden, but of course only "tag" and "registry" probably matter for the purposes of publishing\. -. .P -npm help See \fBnpm\-config\fR to see the list of config options that can be +See npm help 7 \fBnpm\-config\fR to see the list of config options that can be overridden\. -. -.SH "DEFAULT VALUES" +.SH DEFAULT VALUES +.P npm will default some values based on package contents\. -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 \fB"scripts": {"start": "node server\.js"}\fR -. -.IP If there is a \fBserver\.js\fR file in the root of your package, then npm will default the \fBstart\fR command to \fBnode server\.js\fR\|\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fB"scripts":{"preinstall": "node\-gyp rebuild"}\fR -. -.IP If there is a \fBbinding\.gyp\fR file in the root of your package, npm will default the \fBpreinstall\fR command to compile using node\-gyp\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fB"contributors": [\.\.\.]\fR -. -.IP If there is an \fBAUTHORS\fR file in the root of your package, npm will treat each line as a \fBName (url)\fR format, where email and url are optional\. Lines which start with a \fB#\fR or are blank, will be ignored\. -. -.IP "" 0 -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help semver -. -.IP "\(bu" 4 + +.RE +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 semver +.IP \(bu 2 npm help init -. -.IP "\(bu" 4 +.IP \(bu 2 npm help version -. -.IP "\(bu" 4 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 npm help help -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 npm help install -. -.IP "\(bu" 4 +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 +.IP \(bu 2 npm help rm -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man7/index.7 nodejs-0.11.15/deps/npm/man/man7/index.7 --- nodejs-0.11.13/deps/npm/man/man7/index.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/index.7 1970-01-01 00:00:00.000000000 +0000 @@ -1,298 +0,0 @@ -.\" Generated with Ronnjs 0.4.0 -.\" http://github.com/kapouer/ronnjs -. -.TH "NPM\-INDEX" "7" "July 2013" "" "" -. -.SH "NAME" -\fBnpm-index\fR \-\- Index of all npm documentation -. -npm help .SH "README" -node package manager -. -npm help .SH "npm" -node package manager -. -npm help .SH "npm\-adduser" -Add a registry user account -. -npm help .SH "npm\-bin" -Display npm bin folder -. -npm help .SH "npm\-bugs" -Bugs for a package in a web browser maybe -. -npm help .SH "npm\-build" -Build a package -. -npm help .SH "npm\-bundle" -REMOVED -. -npm help .SH "npm\-cache" -Manipulates packages cache -. -npm help .SH "npm\-completion" -Tab Completion for npm -. -npm help .SH "npm\-config" -Manage the npm configuration files -. -npm help .SH "npm\-dedupe" -Reduce duplication -. -npm help .SH "npm\-deprecate" -Deprecate a version of a package -. -npm help .SH "npm\-docs" -Docs for a package in a web browser maybe -. -npm help .SH "npm\-edit" -Edit an installed package -. -npm help .SH "npm\-explore" -Browse an installed package -. -npm help .SH "npm\-help\-search" -Search npm help documentation -. -npm help .SH "npm\-help" -Get help on npm -. -npm help .SH "npm\-init" -Interactively create a package\.json file -. -npm help .SH "npm\-install" -Install a package -. -npm help .SH "npm\-link" -Symlink a package folder -. -npm help .SH "npm\-ls" -List installed packages -. -npm help .SH "npm\-outdated" -Check for outdated packages -. -npm help .SH "npm\-owner" -Manage package owners -. -npm help .SH "npm\-pack" -Create a tarball from a package -. -npm help .SH "npm\-prefix" -Display prefix -. -npm help .SH "npm\-prune" -Remove extraneous packages -. -npm help .SH "npm\-publish" -Publish a package -. -npm help .SH "npm\-rebuild" -Rebuild a package -. -npm help .SH "npm\-restart" -Start a package -. -npm help .SH "npm\-rm" -Remove a package -. -npm help .SH "npm\-root" -Display npm root -. -npm help .SH "npm\-run\-script" -Run arbitrary package scripts -. -npm help .SH "npm\-search" -Search for packages -. -npm help .SH "npm\-shrinkwrap" -Lock down dependency versions -. -npm help .SH "npm\-star" -Mark your favorite packages -. -npm help .SH "npm\-stars" -View packages marked as favorites -. -npm help .SH "npm\-start" -Start a package -. -npm help .SH "npm\-stop" -Stop a package -. -npm help .SH "npm\-submodule" -Add a package as a git submodule -. -npm help .SH "npm\-tag" -Tag a published version -. -npm help .SH "npm\-test" -Test a package -. -npm help .SH "npm\-uninstall" -Remove a package -. -npm help .SH "npm\-unpublish" -Remove a package from the registry -. -npm help .SH "npm\-update" -Update a package -. -npm help .SH "npm\-version" -Bump a package version -. -npm help .SH "npm\-view" -View registry info -. -npm help .SH "npm\-whoami" -Display npm username -. -npm apihelp .SH "npm" -node package manager -. -npm apihelp .SH "npm\-bin" -Display npm bin folder -. -npm apihelp .SH "npm\-bugs" -Bugs for a package in a web browser maybe -. -npm apihelp .SH "npm\-commands" -npm commands -. -npm apihelp .SH "npm\-config" -Manage the npm configuration files -. -npm apihelp .SH "npm\-deprecate" -Deprecate a version of a package -. -npm apihelp .SH "npm\-docs" -Docs for a package in a web browser maybe -. -npm apihelp .SH "npm\-edit" -Edit an installed package -. -npm apihelp .SH "npm\-explore" -Browse an installed package -. -npm apihelp .SH "npm\-help\-search" -Search the help pages -. -npm apihelp .SH "npm\-init" -Interactively create a package\.json file -. -npm apihelp .SH "npm\-install" -install a package programmatically -. -npm apihelp .SH "npm\-link" -Symlink a package folder -. -npm apihelp .SH "npm\-load" -Load config settings -. -npm apihelp .SH "npm\-ls" -List installed packages -. -npm apihelp .SH "npm\-outdated" -Check for outdated packages -. -npm apihelp .SH "npm\-owner" -Manage package owners -. -npm apihelp .SH "npm\-pack" -Create a tarball from a package -. -npm apihelp .SH "npm\-prefix" -Display prefix -. -npm apihelp .SH "npm\-prune" -Remove extraneous packages -. -npm apihelp .SH "npm\-publish" -Publish a package -. -npm apihelp .SH "npm\-rebuild" -Rebuild a package -. -npm apihelp .SH "npm\-restart" -Start a package -. -npm apihelp .SH "npm\-root" -Display npm root -. -npm apihelp .SH "npm\-run\-script" -Run arbitrary package scripts -. -npm apihelp .SH "npm\-search" -Search for packages -. -npm apihelp .SH "npm\-shrinkwrap" -programmatically generate package shrinkwrap file -. -npm apihelp .SH "npm\-start" -Start a package -. -npm apihelp .SH "npm\-stop" -Stop a package -. -npm apihelp .SH "npm\-submodule" -Add a package as a git submodule -. -npm apihelp .SH "npm\-tag" -Tag a published version -. -npm apihelp .SH "npm\-test" -Test a package -. -npm apihelp .SH "npm\-uninstall" -uninstall a package programmatically -. -npm apihelp .SH "npm\-unpublish" -Remove a package from the registry -. -npm apihelp .SH "npm\-update" -Update a package -. -npm apihelp .SH "npm\-version" -Bump a package version -. -npm apihelp .SH "npm\-view" -View registry info -. -npm apihelp .SH "npm\-whoami" -Display npm username -. -npm help .SH "npm\-folders" -Folder Structures Used by npm -. -npm help .SH "npmrc" -The npm config files -. -npm help .SH "package\.json" -Specifics of npm\'s package\.json handling -. -npm help .SH "npm\-coding\-style" -npm\'s "funny" coding style -. -npm help .SH "npm\-config" -More than you probably want to know about npm configuration -. -npm help .SH "npm\-developers" -Developer Guide -. -npm help .SH "npm\-disputes" -Handling Module Name Disputes -. -npm help .SH "npm\-faq" -Frequently Asked Questions -. -npm help .SH "npm\-registry" -The JavaScript Package Registry -. -npm help .SH "npm\-scripts" -How npm handles the "scripts" field -. -npm help .SH "removing\-npm" -Cleaning the Slate -. -npm help .SH "semver" -The semantic versioner for npm diff -Nru nodejs-0.11.13/deps/npm/man/man7/npm-coding-style.7 nodejs-0.11.15/deps/npm/man/man7/npm-coding-style.7 --- nodejs-0.11.13/deps/npm/man/man7/npm-coding-style.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/npm-coding-style.7 2015-01-20 21:22:17.000000000 +0000 @@ -1,121 +1,92 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-CODING\-STYLE" "7" "May 2014" "" "" -. +.TH "NPM\-CODING\-STYLE" "7" "October 2014" "" "" .SH "NAME" -\fBnpm-coding-style\fR \-\- npm\'s "funny" coding style -. -.SH "DESCRIPTION" -npm\'s coding style is a bit unconventional\. It is not different for -difference\'s sake, but rather a carefully crafted style that is +\fBnpm-coding-style\fR \- npm's "funny" coding style +.SH DESCRIPTION +.P +npm's coding style is a bit unconventional\. It is not different for +difference's sake, but rather a carefully crafted style that is designed to reduce visual clutter and make bugs more apparent\. -. .P If you want to contribute to npm (which is very encouraged), you should -make your code conform to npm\'s style\. -. +make your code conform to npm's style\. +.P +Note: this concerns npm's code not the specific packages at npmjs\.org +.SH Line Length .P -Note: this concerns npm\'s code not the specific packages at npmjs\.org -. -.SH "Line Length" -Keep lines shorter than 80 characters\. It\'s better for lines to be +Keep lines shorter than 80 characters\. It's better for lines to be too short than to be too long\. Break up long lists, objects, and other statements onto multiple lines\. -. -.SH "Indentation" +.SH Indentation +.P Two\-spaces\. Tabs are better, but they look like hell in web browsers -(and on github), and node uses 2 spaces, so that\'s that\. -. +(and on github), and node uses 2 spaces, so that's that\. .P Configure your editor appropriately\. -. -.SH "Curly braces" +.SH Curly braces +.P Curly braces belong on the same line as the thing that necessitates them\. -. .P Bad: -. -.IP "" 4 -. +.P +.RS 2 .nf function () { -. .fi -. -.IP "" 0 -. +.RE .P Good: -. -.IP "" 4 -. +.P +.RS 2 .nf function () { -. .fi -. -.IP "" 0 -. -.P -If a block needs to wrap to the next line, use a curly brace\. Don\'t -use it if it doesn\'t\. -. +.RE +.P +If a block needs to wrap to the next line, use a curly brace\. Don't +use it if it doesn't\. .P Bad: -. -.IP "" 4 -. +.P +.RS 2 .nf if (foo) { bar() } while (foo) bar() -. .fi -. -.IP "" 0 -. +.RE .P Good: -. -.IP "" 4 -. +.P +.RS 2 .nf if (foo) bar() while (foo) { bar() } -. .fi -. -.IP "" 0 -. -.SH "Semicolons" -Don\'t use them except in four situations: -. -.IP "\(bu" 4 -\fBfor (;;)\fR loops\. They\'re actually required\. -. -.IP "\(bu" 4 -null loops like: \fBwhile (something) ;\fR (But you\'d better have a good +.RE +.SH Semicolons +.P +Don't use them except in four situations: +.RS 0 +.IP \(bu 2 +\fBfor (;;)\fR loops\. They're actually required\. +.IP \(bu 2 +null loops like: \fBwhile (something) ;\fR (But you'd better have a good reason for doing that\.) -. -.IP "\(bu" 4 +.IP \(bu 2 \fBcase "foo": doSomething(); break\fR -. -.IP "\(bu" 4 +.IP \(bu 2 In front of a leading \fB(\fR or \fB[\fR at the start of the line\. This prevents the expression from being interpreted as a function call or property access, respectively\. -. -.IP "" 0 -. + +.RE .P Some examples of good semicolon usage: -. -.IP "" 4 -. +.P +.RS 2 .nf ;(x || y)\.doSomething() ;[a, b, c]\.forEach(doSomething) @@ -127,23 +98,19 @@ } end() } -. .fi -. -.IP "" 0 -. +.RE .P Note that starting lines with \fB\-\fR and \fB+\fR also should be prefixed with a semicolon, but this is much less common\. -. -.SH "Comma First" +.SH Comma First +.P If there is a list of things separated by commas, and it wraps across multiple lines, put the comma at the start of the next line, directly below the token that starts the list\. Put the final token in the list on a line by itself\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf var magicWords = [ "abracadabra" , "gesundheit" @@ -156,99 +123,82 @@ , b = "abc" , etc , somethingElse -. .fi -. -.IP "" 0 -. -.SH "Whitespace" +.RE +.SH Whitespace +.P Put a single space in front of ( for anything other than a function call\. Also use a single space wherever it makes things more readable\. -. .P -Don\'t leave trailing whitespace at the end of lines\. Don\'t indent empty -lines\. Don\'t use more spaces than are helpful\. -. -.SH "Functions" +Don't leave trailing whitespace at the end of lines\. Don't indent empty +lines\. Don't use more spaces than are helpful\. +.SH Functions +.P Use named functions\. They make stack traces a lot easier to read\. -. -.SH "Callbacks, Sync/async Style" +.SH Callbacks, Sync/async Style +.P Use the asynchronous/non\-blocking versions of things as much as possible\. It might make more sense for npm to use the synchronous fs APIs, but this way, the fs and http and child process stuff all uses the same callback\-passing methodology\. -. .P The callback should always be the last argument in the list\. Its first argument is the Error or null\. -. .P -Be very careful never to ever ever throw anything\. It\'s worse than useless\. +Be very careful never to ever ever throw anything\. It's worse than useless\. Just send the error message back as the first argument to the callback\. -. -.SH "Errors" -Always create a new Error object with your message\. Don\'t just return a +.SH Errors +.P +Always create a new Error object with your message\. Don't just return a string message to the callback\. Stack traces are handy\. -. -.SH "Logging" +.SH Logging +.P Logging is done using the npmlog \fIhttps://github\.com/npm/npmlog\fR utility\. -. .P Please clean up logs when they are no longer helpful\. In particular, logging the same object over and over again is not helpful\. Logs should -report what\'s happening so that it\'s easier to track down where a fault +report what's happening so that it's easier to track down where a fault occurs\. -. .P -npm help Use appropriate log levels\. See \fBnpm\-config\fR and search for +Use appropriate log levels\. See npm help 7 \fBnpm\-config\fR and search for "loglevel"\. -. -.SH "Case, naming, etc\." +.SH Case, naming, etc\. +.P Use \fBlowerCamelCase\fR for multiword identifiers when they refer to objects, -functions, methods, members, or anything not specified in this section\. -. +functions, methods, properties, or anything not specified in this section\. .P -Use \fBUpperCamelCase\fR for class names (things that you\'d pass to "new")\. -. +Use \fBUpperCamelCase\fR for class names (things that you'd pass to "new")\. .P Use \fBall\-lower\-hyphen\-css\-case\fR for multiword filenames and config keys\. -. .P Use named functions\. They make stack traces easier to follow\. -. .P Use \fBCAPS_SNAKE_CASE\fR for constants, things that should never change and are rarely used\. -. .P Use a single uppercase letter for function names where the function would normally be anonymous, but needs to call itself recursively\. It -makes it clear that it\'s a "throwaway" function\. -. -.SH "null, undefined, false, 0" -Boolean variables and functions should always be either \fBtrue\fR or \fBfalse\fR\|\. Don\'t set it to 0 unless it\'s supposed to be a number\. -. +makes it clear that it's a "throwaway" function\. +.SH null, undefined, false, 0 +.P +Boolean variables and functions should always be either \fBtrue\fR or +\fBfalse\fR\|\. Don't set it to 0 unless it's supposed to be a number\. .P When something is intentionally missing or removed, set it to \fBnull\fR\|\. -. .P -Don\'t set things to \fBundefined\fR\|\. Reserve that value to mean "not yet +Don't set things to \fBundefined\fR\|\. Reserve that value to mean "not yet set to anything\." -. .P Boolean objects are verboten\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help developers -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 developers +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 npm help npm -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man7/npm-config.7 nodejs-0.11.15/deps/npm/man/man7/npm-config.7 --- nodejs-0.11.13/deps/npm/man/man7/npm-config.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/npm-config.7 2015-01-20 21:22:17.000000000 +0000 @@ -1,1484 +1,1198 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-CONFIG" "7" "May 2014" "" "" -. +.TH "NPM\-CONFIG" "7" "October 2014" "" "" .SH "NAME" -\fBnpm-config\fR \-\- More than you probably want to know about npm configuration -. -.SH "DESCRIPTION" +\fBnpm-config\fR \- More than you probably want to know about npm configuration +.SH DESCRIPTION +.P npm gets its configuration values from 6 sources, in this priority: -. -.SS "Command Line Flags" +.SS Command Line Flags +.P Putting \fB\-\-foo bar\fR on the command line sets the \fBfoo\fR configuration parameter to \fB"bar"\fR\|\. A \fB\-\-\fR argument tells the cli parser to stop reading flags\. A \fB\-\-flag\fR parameter that is at the \fIend\fR of the command will be given the value of \fBtrue\fR\|\. -. -.SS "Environment Variables" +.SS Environment Variables +.P Any environment variables that start with \fBnpm_config_\fR will be -interpreted as a configuration parameter\. For example, putting \fBnpm_config_foo=bar\fR in your environment will set the \fBfoo\fR +interpreted as a configuration parameter\. For example, putting +\fBnpm_config_foo=bar\fR in your environment will set the \fBfoo\fR configuration parameter to \fBbar\fR\|\. Any environment configurations that are not given a value will be given the value of \fBtrue\fR\|\. Config values are case\-insensitive, so \fBNPM_CONFIG_FOO=bar\fR will work the same\. -. -.SS "npmrc Files" -The three relevant files are: -. -.IP "\(bu" 4 +.SS npmrc Files +.P +The four relevant files are: +.RS 0 +.IP \(bu 2 +per\-project config file (/path/to/my/project/\.npmrc) +.IP \(bu 2 per\-user config file (~/\.npmrc) -. -.IP "\(bu" 4 +.IP \(bu 2 global config file ($PREFIX/npmrc) -. -.IP "\(bu" 4 +.IP \(bu 2 npm builtin config file (/path/to/npm/npmrc) -. -.IP "" 0 -. -.P -npm help See npmrc for more details\. -. -.SS "Default Configs" + +.RE +.P +See npm help 5 npmrc for more details\. +.SS Default Configs +.P A set of configuration parameters that are internal to npm, and are defaults if nothing else is specified\. -. -.SH "Shorthands and Other CLI Niceties" +.SH Shorthands and Other CLI Niceties +.P The following shorthands are parsed on the command\-line: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 \fB\-v\fR: \fB\-\-version\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-h\fR, \fB\-?\fR, \fB\-\-help\fR, \fB\-H\fR: \fB\-\-usage\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-s\fR, \fB\-\-silent\fR: \fB\-\-loglevel silent\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-q\fR, \fB\-\-quiet\fR: \fB\-\-loglevel warn\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-d\fR: \fB\-\-loglevel info\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-dd\fR, \fB\-\-verbose\fR: \fB\-\-loglevel verbose\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-ddd\fR: \fB\-\-loglevel silly\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-g\fR: \fB\-\-global\fR -. -.IP "\(bu" 4 +.IP \(bu 2 +\fB\-C\fR: \fB\-\-prefix\fR +.IP \(bu 2 \fB\-l\fR: \fB\-\-long\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-m\fR: \fB\-\-message\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-p\fR, \fB\-\-porcelain\fR: \fB\-\-parseable\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-reg\fR: \fB\-\-registry\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-v\fR: \fB\-\-version\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-f\fR: \fB\-\-force\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-desc\fR: \fB\-\-description\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-S\fR: \fB\-\-save\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-D\fR: \fB\-\-save\-dev\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-O\fR: \fB\-\-save\-optional\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-B\fR: \fB\-\-save\-bundle\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-E\fR: \fB\-\-save\-exact\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-y\fR: \fB\-\-yes\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\-n\fR: \fB\-\-yes false\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fBll\fR and \fBla\fR commands: \fBls \-\-long\fR -. -.IP "" 0 -. + +.RE .P If the specified configuration param resolves unambiguously to a known configuration parameter, then it is expanded to that configuration parameter\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf npm ls \-\-par # same as: npm ls \-\-parseable -. .fi -. -.IP "" 0 -. +.RE .P If multiple single\-character shorthands are strung together, and the resulting combination is unambiguously not some other configuration param, then it is expanded to its various component pieces\. For example: -. -.IP "" 4 -. +.P +.RS 2 .nf npm ls \-gpld # same as: npm ls \-\-global \-\-parseable \-\-long \-\-loglevel info -. .fi -. -.IP "" 0 -. -.SH "Per\-Package Config Settings" -When running scripts (npm help see \fBnpm\-scripts\fR) the package\.json "config" -keys are overwritten in the environment if there is a config param of \fB[@]:\fR\|\. For example, if the package\.json has +.RE +.SH Per\-Package Config Settings +.P +When running scripts (see npm help 7 \fBnpm\-scripts\fR) the package\.json "config" +keys are overwritten in the environment if there is a config param of +\fB[@]:\fR\|\. For example, if the package\.json has this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "foo" , "config" : { "port" : "8080" } , "scripts" : { "start" : "node server\.js" } } -. .fi -. -.IP "" 0 -. +.RE .P and the server\.js is this: -. -.IP "" 4 -. +.P +.RS 2 .nf http\.createServer(\.\.\.)\.listen(process\.env\.npm_package_config_port) -. .fi -. -.IP "" 0 -. +.RE .P then the user could change the behavior by doing: -. -.IP "" 4 -. +.P +.RS 2 .nf npm config set foo:port 80 -. .fi -. -.IP "" 0 -. -.P -npm help See package\.json for more information\. -. -.SH "Config Settings" -. -.SS "always\-auth" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.RE +.P +See npm help 5 package\.json for more information\. +.SH Config Settings +.SS always\-auth +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Force npm to always require authentication when accessing the registry, even for \fBGET\fR requests\. -. -.SS "bin\-links" -. -.IP "\(bu" 4 +.SS bin\-links +.RS 0 +.IP \(bu 2 Default: \fBtrue\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Tells npm to create symlinks (or \fB\|\.cmd\fR shims on Windows) for package executables\. -. .P Set to false to have it not do this\. This can be used to work around -the fact that some file systems don\'t support symlinks, even on +the fact that some file systems don't support symlinks, even on ostensibly Unix systems\. -. -.SS "browser" -. -.IP "\(bu" 4 +.SS browser +.RS 0 +.IP \(bu 2 Default: OS X: \fB"open"\fR, Windows: \fB"start"\fR, Others: \fB"xdg\-open"\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P The browser that is called by the \fBnpm docs\fR command to open websites\. -. -.SS "ca" -. -.IP "\(bu" 4 +.SS ca +.RS 0 +.IP \(bu 2 Default: The npm CA certificate -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String or null -. -.IP "" 0 -. + +.RE .P The Certificate Authority signing certificate that is trusted for SSL connections to the registry\. -. .P Set to \fBnull\fR to only allow "known" registrars, or to a specific CA cert to trust only that specific signing authority\. -. .P See also the \fBstrict\-ssl\fR config\. -. -.SS "cache" -. -.IP "\(bu" 4 +.SS cafile +.RS 0 +.IP \(bu 2 +Default: \fBnull\fR +.IP \(bu 2 +Type: path + +.RE +.P +A path to a file containing one or multiple Certificate Authority signing +certificates\. Similar to the \fBca\fR setting, but allows for multiple CA's, as +well as for the CA information to be stored in a file on disk\. +.SS cache +.RS 0 +.IP \(bu 2 Default: Windows: \fB%AppData%\\npm\-cache\fR, Posix: \fB~/\.npm\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. -.P -npm help The location of npm\'s cache directory\. See \fBnpm\-cache\fR -. -.SS "cache\-lock\-stale" -. -.IP "\(bu" 4 + +.RE +.P +The location of npm's cache directory\. See npm help \fBnpm\-cache\fR +.SS cache\-lock\-stale +.RS 0 +.IP \(bu 2 Default: 60000 (1 minute) -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Number -. -.IP "" 0 -. + +.RE .P The number of ms before cache folder lockfiles are considered stale\. -. -.SS "cache\-lock\-retries" -. -.IP "\(bu" 4 +.SS cache\-lock\-retries +.RS 0 +.IP \(bu 2 Default: 10 -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Number -. -.IP "" 0 -. + +.RE .P Number of times to retry to acquire a lock on cache folder lockfiles\. -. -.SS "cache\-lock\-wait" -. -.IP "\(bu" 4 +.SS cache\-lock\-wait +.RS 0 +.IP \(bu 2 Default: 10000 (10 seconds) -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Number -. -.IP "" 0 -. + +.RE .P Number of ms to wait for cache lock files to expire\. -. -.SS "cache\-max" -. -.IP "\(bu" 4 +.SS cache\-max +.RS 0 +.IP \(bu 2 Default: Infinity -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Number -. -.IP "" 0 -. + +.RE .P The maximum time (in seconds) to keep items in the registry cache before re\-checking against the registry\. -. .P Note that no purging is done unless the \fBnpm cache clean\fR command is explicitly used, and that only GET requests use the cache\. -. -.SS "cache\-min" -. -.IP "\(bu" 4 +.SS cache\-min +.RS 0 +.IP \(bu 2 Default: 10 -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Number -. -.IP "" 0 -. + +.RE .P The minimum time (in seconds) to keep items in the registry cache before re\-checking against the registry\. -. .P Note that no purging is done unless the \fBnpm cache clean\fR command is explicitly used, and that only GET requests use the cache\. -. -.SS "cert" -. -.IP "\(bu" 4 +.SS cert +.RS 0 +.IP \(bu 2 Default: \fBnull\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P A client certificate to pass when accessing the registry\. -. -.SS "color" -. -.IP "\(bu" 4 +.SS color +.RS 0 +.IP \(bu 2 Default: true on Posix, false on Windows -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean or \fB"always"\fR -. -.IP "" 0 -. + +.RE .P If false, never shows colors\. If \fB"always"\fR then always shows colors\. If true, then only prints color codes for tty file descriptors\. -. -.SS "depth" -. -.IP "\(bu" 4 +.SS depth +.RS 0 +.IP \(bu 2 Default: Infinity -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Number -. -.IP "" 0 -. -.P -The depth to go when recursing directories for \fBnpm ls\fR and \fBnpm cache ls\fR\|\. -. -.SS "description" -. -.IP "\(bu" 4 + +.RE +.P +The depth to go when recursing directories for \fBnpm ls\fR and +\fBnpm cache ls\fR\|\. +.SS description +.RS 0 +.IP \(bu 2 Default: true -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Show the description in \fBnpm search\fR -. -.SS "dev" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS dev +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Install \fBdev\-dependencies\fR along with packages\. -. .P Note that \fBdev\-dependencies\fR are also installed if the \fBnpat\fR flag is set\. -. -.SS "editor" -. -.IP "\(bu" 4 +.SS editor +.RS 0 +.IP \(bu 2 Default: \fBEDITOR\fR environment variable if set, or \fB"vi"\fR on Posix, or \fB"notepad"\fR on Windows\. -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P The command to run for \fBnpm edit\fR or \fBnpm config edit\fR\|\. -. -.SS "email" -The email of the logged\-in user\. -. -.P -Set by the \fBnpm adduser\fR command\. Should not be set explicitly\. -. -.SS "engine\-strict" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS engine\-strict +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P If set to true, then npm will stubbornly refuse to install (or even consider installing) any package that claims to not be compatible with the current Node\.js version\. -. -.SS "force" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS force +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Makes various commands more forceful\. -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 lifecycle script failure does not block progress\. -. -.IP "\(bu" 4 +.IP \(bu 2 publishing clobbers previously published versions\. -. -.IP "\(bu" 4 +.IP \(bu 2 skips cache when requesting from the registry\. -. -.IP "\(bu" 4 +.IP \(bu 2 prevents checks against clobbering non\-npm files\. -. -.IP "" 0 -. -.SS "fetch\-retries" -. -.IP "\(bu" 4 + +.RE +.SS fetch\-retries +.RS 0 +.IP \(bu 2 Default: 2 -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Number -. -.IP "" 0 -. + +.RE .P The "retries" config for the \fBretry\fR module to use when fetching packages from the registry\. -. -.SS "fetch\-retry\-factor" -. -.IP "\(bu" 4 +.SS fetch\-retry\-factor +.RS 0 +.IP \(bu 2 Default: 10 -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Number -. -.IP "" 0 -. + +.RE .P The "factor" config for the \fBretry\fR module to use when fetching packages\. -. -.SS "fetch\-retry\-mintimeout" -. -.IP "\(bu" 4 +.SS fetch\-retry\-mintimeout +.RS 0 +.IP \(bu 2 Default: 10000 (10 seconds) -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Number -. -.IP "" 0 -. + +.RE .P The "minTimeout" config for the \fBretry\fR module to use when fetching packages\. -. -.SS "fetch\-retry\-maxtimeout" -. -.IP "\(bu" 4 +.SS fetch\-retry\-maxtimeout +.RS 0 +.IP \(bu 2 Default: 60000 (1 minute) -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Number -. -.IP "" 0 -. + +.RE .P The "maxTimeout" config for the \fBretry\fR module to use when fetching packages\. -. -.SS "git" -. -.IP "\(bu" 4 +.SS git +.RS 0 +.IP \(bu 2 Default: \fB"git"\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P The command to use for git commands\. If git is installed on the computer, but is not in the \fBPATH\fR, then set this to the full path to the git binary\. -. -.SS "git\-tag\-version" -. -.IP "\(bu" 4 +.SS git\-tag\-version +.RS 0 +.IP \(bu 2 Default: \fBtrue\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Tag the commit when using the \fBnpm version\fR command\. -. -.SS "global" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. -.P -npm help Operates in "global" mode, so that packages are installed into the \fBprefix\fR folder instead of the current working directory\. See \fBnpm\-folders\fR for more on the differences in behavior\. -. -.IP "\(bu" 4 +.SS global +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE +.P +Operates in "global" mode, so that packages are installed into the +\fBprefix\fR folder instead of the current working directory\. See +npm help 5 \fBnpm\-folders\fR for more on the differences in behavior\. +.RS 0 +.IP \(bu 2 packages are installed into the \fB{prefix}/lib/node_modules\fR folder, instead of the current working directory\. -. -.IP "\(bu" 4 +.IP \(bu 2 bin files are linked to \fB{prefix}/bin\fR -. -.IP "\(bu" 4 +.IP \(bu 2 man pages are linked to \fB{prefix}/share/man\fR -. -.IP "" 0 -. -.SS "globalconfig" -. -.IP "\(bu" 4 + +.RE +.SS globalconfig +.RS 0 +.IP \(bu 2 Default: {prefix}/etc/npmrc -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P The config file to read for global config options\. -. -.SS "group" -. -.IP "\(bu" 4 +.SS group +.RS 0 +.IP \(bu 2 Default: GID of the current process -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String or Number -. -.IP "" 0 -. + +.RE .P The group to use when running package scripts in global mode as the root user\. -. -.SS "heading" -. -.IP "\(bu" 4 +.SS heading +.RS 0 +.IP \(bu 2 Default: \fB"npm"\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P The string that starts all the debugging log output\. -. -.SS "https\-proxy" -. -.IP "\(bu" 4 -Default: the \fBHTTPS_PROXY\fR or \fBhttps_proxy\fR or \fBHTTP_PROXY\fR or \fBhttp_proxy\fR environment variables\. -. -.IP "\(bu" 4 +.SS https\-proxy +.RS 0 +.IP \(bu 2 +Default: the \fBHTTPS_PROXY\fR or \fBhttps_proxy\fR or \fBHTTP_PROXY\fR or +\fBhttp_proxy\fR environment variables\. +.IP \(bu 2 Type: url -. -.IP "" 0 -. + +.RE .P A proxy to use for outgoing https requests\. -. -.SS "ignore\-scripts" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS ignore\-scripts +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P If true, npm does not run scripts specified in package\.json files\. -. -.SS "init\-module" -. -.IP "\(bu" 4 +.SS init\-module +.RS 0 +.IP \(bu 2 Default: ~/\.npm\-init\.js -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P A module that will be loaded by the \fBnpm init\fR command\. See the -documentation for the init\-package\-json \fIhttps://github\.com/isaacs/init\-package\-json\fR module -npm help for more information, or npm\-init\. -. -.SS "init\.author\.name" -. -.IP "\(bu" 4 +documentation for the +init\-package\-json \fIhttps://github\.com/isaacs/init\-package\-json\fR module +for more information, or npm help init\. +.SS init\-author\-name +.RS 0 +.IP \(bu 2 Default: "" -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. -.P -The value \fBnpm init\fR should use by default for the package author\'s name\. -. -.SS "init\.author\.email" -. -.IP "\(bu" 4 + +.RE +.P +The value \fBnpm init\fR should use by default for the package author's name\. +.SS init\-author\-email +.RS 0 +.IP \(bu 2 Default: "" -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. -.P -The value \fBnpm init\fR should use by default for the package author\'s email\. -. -.SS "init\.author\.url" -. -.IP "\(bu" 4 + +.RE +.P +The value \fBnpm init\fR should use by default for the package author's email\. +.SS init\-author\-url +.RS 0 +.IP \(bu 2 Default: "" -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. -.P -The value \fBnpm init\fR should use by default for the package author\'s homepage\. -. -.SS "init\.license" -. -.IP "\(bu" 4 + +.RE +.P +The value \fBnpm init\fR should use by default for the package author's homepage\. +.SS init\-license +.RS 0 +.IP \(bu 2 Default: "ISC" -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P The value \fBnpm init\fR should use by default for the package license\. -. -.SS "json" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS init\-version +.RS 0 +.IP \(bu 2 +Default: "0\.0\.0" +.IP \(bu 2 +Type: semver + +.RE +.P +The value that \fBnpm init\fR should use by default for the package +version number, if not already set in package\.json\. +.SS json +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Whether or not to output JSON data, rather than the normal output\. -. .P This feature is currently experimental, and the output data structures for many commands is either not implemented in JSON yet, or subject to change\. Only the output from \fBnpm ls \-\-json\fR is currently valid\. -. -.SS "key" -. -.IP "\(bu" 4 +.SS key +.RS 0 +.IP \(bu 2 Default: \fBnull\fR -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P A client key to pass when accessing the registry\. -. -.SS "link" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS link +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P If true, then local installs will link if there is a suitable globally installed package\. -. .P Note that this means that local installs can cause things to be installed into the global space at the same time\. The link is only done if one of the two conditions are met: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 The package is not already installed globally, or -. -.IP "\(bu" 4 +.IP \(bu 2 the globally installed version is identical to the version that is being installed locally\. -. -.IP "" 0 -. -.SS "local\-address" -. -.IP "\(bu" 4 + +.RE +.SS local\-address +.RS 0 +.IP \(bu 2 Default: undefined -. -.IP "\(bu" 4 +.IP \(bu 2 Type: IP Address -. -.IP "" 0 -. + +.RE .P The IP address of the local interface to use when making connections to the npm registry\. Must be IPv4 in versions of Node prior to 0\.12\. -. -.SS "loglevel" -. -.IP "\(bu" 4 -Default: "http" -. -.IP "\(bu" 4 +.SS loglevel +.RS 0 +.IP \(bu 2 +Default: "warn" +.IP \(bu 2 Type: String -. -.IP "\(bu" 4 -Values: "silent", "win", "error", "warn", "http", "info", "verbose", "silly" -. -.IP "" 0 -. +.IP \(bu 2 +Values: "silent", "error", "warn", "http", "info", "verbose", "silly" + +.RE .P -What level of logs to report\. On failure, \fIall\fR logs are written to \fBnpm\-debug\.log\fR in the current working directory\. -. +What level of logs to report\. On failure, \fIall\fR logs are written to +\fBnpm\-debug\.log\fR in the current working directory\. .P Any logs of a higher level than the setting are shown\. -The default is "http", which shows http, warn, and error output\. -. -.SS "logstream" -. -.IP "\(bu" 4 +The default is "warn", which shows warn and error output\. +.SS logstream +.RS 0 +.IP \(bu 2 Default: process\.stderr -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Stream -. -.IP "" 0 -. + +.RE .P -This is the stream that is passed to the npmlog \fIhttps://github\.com/npm/npmlog\fR module at run time\. -. +This is the stream that is passed to the +npmlog \fIhttps://github\.com/npm/npmlog\fR module at run time\. .P It cannot be set from the command line, but if you are using npm programmatically, you may wish to send logs to somewhere other than stderr\. -. .P If the \fBcolor\fR config is set to true, then this stream will receive colored output if it is a TTY\. -. -.SS "long" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS long +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Show extended information in \fBnpm ls\fR and \fBnpm search\fR\|\. -. -.SS "message" -. -.IP "\(bu" 4 +.SS message +.RS 0 +.IP \(bu 2 Default: "%s" -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P Commit message which is used by \fBnpm version\fR when creating version commit\. -. .P Any "%s" in the message will be replaced with the version number\. -. -.SS "node\-version" -. -.IP "\(bu" 4 +.SS node\-version +.RS 0 +.IP \(bu 2 Default: process\.version -. -.IP "\(bu" 4 +.IP \(bu 2 Type: semver or false -. -.IP "" 0 -. -.P -The node version to use when checking package\'s "engines" hash\. -. -.SS "npat" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. + +.RE +.P +The node version to use when checking a package's \fBengines\fR map\. +.SS npat +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Run tests on installation\. -. -.SS "onload\-script" -. -.IP "\(bu" 4 +.SS onload\-script +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P A node module to \fBrequire()\fR when npm loads\. Useful for programmatic usage\. -. -.SS "optional" -. -.IP "\(bu" 4 +.SS optional +.RS 0 +.IP \(bu 2 Default: true -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P -Attempt to install packages in the \fBoptionalDependencies\fR hash\. Note +Attempt to install packages in the \fBoptionalDependencies\fR object\. Note that if these packages fail to install, the overall installation process is not aborted\. -. -.SS "parseable" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS parseable +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Output parseable results from commands that write to standard output\. -. -.SS "prefix" -. -.IP "\(bu" 4 -npm help Default: see npm\-folders -. -.IP "\(bu" 4 +.SS prefix +.RS 0 +.IP \(bu 2 +Default: see npm help 5 folders +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P The location to install global items\. If set on the command line, then it forces non\-global commands to run in the specified folder\. -. -.SS "production" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS production +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Set to true to run in "production" mode\. -. -.IP "1" 4 +.RS 0 +.IP 1. 3 devDependencies are not installed at the topmost level when running local \fBnpm install\fR without any arguments\. -. -.IP "2" 4 +.IP 2. 3 Set the NODE_ENV="production" for lifecycle scripts\. -. -.IP "" 0 -. -.SS "proprietary\-attribs" -. -.IP "\(bu" 4 + +.RE +.SS proprietary\-attribs +.RS 0 +.IP \(bu 2 Default: true -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Whether or not to include proprietary extended attributes in the tarballs created by npm\. -. .P Unless you are expecting to unpack package tarballs with something other than npm \-\- particularly a very outdated tar implementation \-\- leave this as true\. -. -.SS "proxy" -. -.IP "\(bu" 4 +.SS proxy +.RS 0 +.IP \(bu 2 Default: \fBHTTP_PROXY\fR or \fBhttp_proxy\fR environment variable, or null -. -.IP "\(bu" 4 +.IP \(bu 2 Type: url -. -.IP "" 0 -. + +.RE .P A proxy to use for outgoing http requests\. -. -.SS "rebuild\-bundle" -. -.IP "\(bu" 4 +.SS rebuild\-bundle +.RS 0 +.IP \(bu 2 Default: true -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Rebuild bundled dependencies after installation\. -. -.SS "registry" -. -.IP "\(bu" 4 +.SS registry +.RS 0 +.IP \(bu 2 Default: https://registry\.npmjs\.org/ -. -.IP "\(bu" 4 +.IP \(bu 2 Type: url -. -.IP "" 0 -. + +.RE .P The base URL of the npm package registry\. -. -.SS "rollback" -. -.IP "\(bu" 4 +.SS rollback +.RS 0 +.IP \(bu 2 Default: true -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Remove failed installs\. -. -.SS "save" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS save +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Save installed packages to a package\.json file as dependencies\. -. .P -When used with the \fBnpm rm\fR command, it removes it from the dependencies -hash\. -. +When used with the \fBnpm rm\fR command, it removes it from the \fBdependencies\fR +object\. .P Only works if there is already a package\.json file present\. -. -.SS "save\-bundle" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS save\-bundle +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P -If a package would be saved at install time by the use of \fB\-\-save\fR, \fB\-\-save\-dev\fR, or \fB\-\-save\-optional\fR, then also put it in the \fBbundleDependencies\fR list\. -. +If a package would be saved at install time by the use of \fB\-\-save\fR, +\fB\-\-save\-dev\fR, or \fB\-\-save\-optional\fR, then also put it in the +\fBbundleDependencies\fR list\. .P When used with the \fBnpm rm\fR command, it removes it from the bundledDependencies list\. -. -.SS "save\-dev" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS save\-dev +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P -Save installed packages to a package\.json file as devDependencies\. -. +Save installed packages to a package\.json file as \fBdevDependencies\fR\|\. .P When used with the \fBnpm rm\fR command, it removes it from the -devDependencies hash\. -. +\fBdevDependencies\fR object\. .P Only works if there is already a package\.json file present\. -. -.SS "save\-exact" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. -.P -Dependencies saved to package\.json using \fB\-\-save\fR, \fB\-\-save\-dev\fR or \fB\-\-save\-optional\fR will be configured with an exact version rather than -using npm\'s default semver range operator\. -. -.SS "save\-optional" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS save\-exact +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE +.P +Dependencies saved to package\.json using \fB\-\-save\fR, \fB\-\-save\-dev\fR or +\fB\-\-save\-optional\fR will be configured with an exact version rather than +using npm's default semver range operator\. +.SS save\-optional +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Save installed packages to a package\.json file as optionalDependencies\. -. .P When used with the \fBnpm rm\fR command, it removes it from the -devDependencies hash\. -. +\fBdevDependencies\fR object\. .P Only works if there is already a package\.json file present\. -. -.SS "save\-prefix" -. -.IP "\(bu" 4 -Default: \'^\' -. -.IP "\(bu" 4 +.SS save\-prefix +.RS 0 +.IP \(bu 2 +Default: '^' +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P -Configure how versions of packages installed to a package\.json file via \fB\-\-save\fR or \fB\-\-save\-dev\fR get prefixed\. -. +Configure how versions of packages installed to a package\.json file via +\fB\-\-save\fR or \fB\-\-save\-dev\fR get prefixed\. .P -For example if a package has version \fB1\.2\.3\fR, by default it\'s version is +For example if a package has version \fB1\.2\.3\fR, by default it's version is set to \fB^1\.2\.3\fR which allows minor upgrades for that package, but after -. -.br -\fBnpm config set save\-prefix=\'~\'\fR it would be set to \fB~1\.2\.3\fR which only allows +\fBnpm config set save\-prefix='~'\fR it would be set to \fB~1\.2\.3\fR which only allows patch upgrades\. -. -.SS "searchopts" -. -.IP "\(bu" 4 +.SS scope +.RS 0 +.IP \(bu 2 Default: "" -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE +.P +Associate an operation with a scope for a scoped registry\. Useful when logging +in to a private registry for the first time: +\fBnpm login \-\-scope=@organization \-\-registry=registry\.organization\.com\fR, which +will cause \fB@organization\fR to be mapped to the registry for future installation +of packages specified according to the pattern \fB@organization/package\fR\|\. +.SS searchopts +.RS 0 +.IP \(bu 2 +Default: "" +.IP \(bu 2 +Type: String + +.RE .P Space\-separated options that are always passed to search\. -. -.SS "searchexclude" -. -.IP "\(bu" 4 +.SS searchexclude +.RS 0 +.IP \(bu 2 Default: "" -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P Space\-separated options that limit the results from search\. -. -.SS "searchsort" -. -.IP "\(bu" 4 +.SS searchsort +.RS 0 +.IP \(bu 2 Default: "name" -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "\(bu" 4 +.IP \(bu 2 Values: "name", "\-name", "date", "\-date", "description", "\-description", "keywords", "\-keywords" -. -.IP "" 0 -. + +.RE .P Indication of which field to sort search results by\. Prefix with a \fB\-\fR character to indicate reverse sort\. -. -.SS "shell" -. -.IP "\(bu" 4 +.SS shell +.RS 0 +.IP \(bu 2 Default: SHELL environment variable, or "bash" on Posix, or "cmd" on Windows -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P The shell to run for the \fBnpm explore\fR command\. -. -.SS "shrinkwrap" -. -.IP "\(bu" 4 +.SS shrinkwrap +.RS 0 +.IP \(bu 2 Default: true -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P If set to false, then ignore \fBnpm\-shrinkwrap\.json\fR files when installing\. -. -.SS "sign\-git\-tag" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS sign\-git\-tag +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P If set to true, then the \fBnpm version\fR command will tag the version using \fB\-s\fR to add a signature\. -. .P Note that git requires you to have set up GPG keys in your git configs for this to work properly\. -. -.SS "strict\-ssl" -. -.IP "\(bu" 4 +.SS spin +.RS 0 +.IP \(bu 2 Default: true -. -.IP "\(bu" 4 +.IP \(bu 2 +Type: Boolean or \fB"always"\fR + +.RE +.P +When set to \fBtrue\fR, npm will display an ascii spinner while it is doing +things, if \fBprocess\.stderr\fR is a TTY\. +.P +Set to \fBfalse\fR to suppress the spinner, or set to \fBalways\fR to output +the spinner even for non\-TTY outputs\. +.SS strict\-ssl +.RS 0 +.IP \(bu 2 +Default: true +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Whether or not to do SSL key validation when making requests to the registry via https\. -. .P See also the \fBca\fR config\. -. -.SS "tag" -. -.IP "\(bu" 4 +.SS tag +.RS 0 +.IP \(bu 2 Default: latest -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P -If you ask npm to install a package and don\'t tell it a specific version, then +If you ask npm to install a package and don't tell it a specific version, then it will install the specified tag\. -. .P Also the tag that is added to the package@version specified by the \fBnpm tag\fR command, if no explicit tag is given\. -. -.SS "tmp" -. -.IP "\(bu" 4 +.SS tmp +.RS 0 +.IP \(bu 2 Default: TMPDIR environment variable, or "/tmp" -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P Where to store temporary files and folders\. All temp files are deleted on success, but left behind on failure for forensic purposes\. -. -.SS "unicode" -. -.IP "\(bu" 4 +.SS unicode +.RS 0 +.IP \(bu 2 Default: true -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P When set to true, npm uses unicode characters in the tree output\. When false, it uses ascii characters to draw trees\. -. -.SS "unsafe\-perm" -. -.IP "\(bu" 4 +.SS unsafe\-perm +.RS 0 +.IP \(bu 2 Default: false if running as root, true otherwise -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Boolean -. -.IP "" 0 -. + +.RE .P Set to true to suppress the UID/GID switching when running package scripts\. If set explicitly to false, then installing as a non\-root user will fail\. -. -.SS "usage" -. -.IP "\(bu" 4 -Default: false -. -.IP "\(bu" 4 -Type: Boolean -. -.IP "" 0 -. +.SS usage +.RS 0 +.IP \(bu 2 +Default: false +.IP \(bu 2 +Type: Boolean + +.RE .P Set to show short usage output (like the \-H output) -npm help instead of complete help when doing \fBnpm\-help\fR\|\. -. -.SS "user" -. -.IP "\(bu" 4 +instead of complete help when doing npm help \fBnpm\-help\fR\|\. +.SS user +.RS 0 +.IP \(bu 2 Default: "nobody" -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String or Number -. -.IP "" 0 -. + +.RE .P The UID to set to when running package scripts as root\. -. -.SS "username" -. -.IP "\(bu" 4 -Default: null -. -.IP "\(bu" 4 -Type: String -. -.IP "" 0 -. -.P -The username on the npm registry\. Set with \fBnpm adduser\fR -. -.SS "userconfig" -. -.IP "\(bu" 4 +.SS userconfig +.RS 0 +.IP \(bu 2 Default: ~/\.npmrc -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P The location of user\-level configuration settings\. -. -.SS "umask" -. -.IP "\(bu" 4 +.SS umask +.RS 0 +.IP \(bu 2 Default: 022 -. -.IP "\(bu" 4 +.IP \(bu 2 Type: Octal numeric string -. -.IP "" 0 -. + +.RE .P The "umask" value to use when setting the file creation mode on files and folders\. -. .P Folders and executables are given a mode which is \fB0777\fR masked against this value\. Other files are given a mode which is \fB0666\fR masked against this value\. Thus, the defaults are \fB0755\fR and \fB0644\fR respectively\. -. -.SS "user\-agent" -. -.IP "\(bu" 4 +.SS user\-agent +.RS 0 +.IP \(bu 2 Default: node/{process\.version} {process\.platform} {process\.arch} -. -.IP "\(bu" 4 +.IP \(bu 2 Type: String -. -.IP "" 0 -. + +.RE .P Sets a User\-Agent to the request header -. -.SS "version" -. -.IP "\(bu" 4 +.SS version +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: boolean -. -.IP "" 0 -. + +.RE .P If true, output the npm version and exit successfully\. -. .P Only relevant when specified explicitly on the command line\. -. -.SS "versions" -. -.IP "\(bu" 4 +.SS versions +.RS 0 +.IP \(bu 2 Default: false -. -.IP "\(bu" 4 +.IP \(bu 2 Type: boolean -. -.IP "" 0 -. -.P -If true, output the npm version as well as node\'s \fBprocess\.versions\fR -hash, and exit successfully\. -. + +.RE +.P +If true, output the npm version as well as node's \fBprocess\.versions\fR map, and +exit successfully\. .P Only relevant when specified explicitly on the command line\. -. -.SS "viewer" -. -.IP "\(bu" 4 +.SS viewer +.RS 0 +.IP \(bu 2 Default: "man" on Posix, "browser" on Windows -. -.IP "\(bu" 4 +.IP \(bu 2 Type: path -. -.IP "" 0 -. + +.RE .P The program to use to view help content\. -. .P Set to \fB"browser"\fR to view html help content in the default web browser\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help scripts -. -.IP "\(bu" 4 -npm help folders -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 7 scripts +.IP \(bu 2 +npm help 5 folders +.IP \(bu 2 npm help npm -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man7/npm-developers.7 nodejs-0.11.15/deps/npm/man/man7/npm-developers.7 --- nodejs-0.11.13/deps/npm/man/man7/npm-developers.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/npm-developers.7 2015-01-20 21:22:17.000000000 +0000 @@ -1,335 +1,258 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-DEVELOPERS" "7" "May 2014" "" "" -. +.TH "NPM\-DEVELOPERS" "7" "October 2014" "" "" .SH "NAME" -\fBnpm-developers\fR \-\- Developer Guide -. -.SH "DESCRIPTION" -So, you\'ve decided to use npm to develop (and maybe publish/deploy) +\fBnpm-developers\fR \- Developer Guide +.SH DESCRIPTION +.P +So, you've decided to use npm to develop (and maybe publish/deploy) your project\. -. .P Fantastic! -. .P There are a few things that you need to do above the simple steps that your users will do to install your program\. -. -.SH "About These Documents" +.SH About These Documents +.P These are man pages\. If you install npm, you should be able to then do \fBman npm\-thing\fR to get the documentation on a particular topic, or \fBnpm help thing\fR to see the same information\. -. -.SH "What is a " +.SH What is a \fBpackage\fR +.P A package is: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 a) a folder containing a program described by a package\.json file -. -.IP "\(bu" 4 +.IP \(bu 2 b) a gzipped tarball containing (a) -. -.IP "\(bu" 4 +.IP \(bu 2 c) a url that resolves to (b) -. -.IP "\(bu" 4 +.IP \(bu 2 d) a \fB@\fR that is published on the registry with (c) -. -.IP "\(bu" 4 +.IP \(bu 2 e) a \fB@\fR that points to (d) -. -.IP "\(bu" 4 +.IP \(bu 2 f) a \fB\fR that has a "latest" tag satisfying (e) -. -.IP "\(bu" 4 +.IP \(bu 2 g) a \fBgit\fR url that, when cloned, results in (a)\. -. -.IP "" 0 -. + +.RE .P Even if you never publish your package, you can still get a lot of benefits of using npm if you just want to write a node program (a), and perhaps if you also want to be able to easily install it elsewhere after packing it up into a tarball (b)\. -. .P Git urls can be of the form: -. -.IP "" 4 -. +.P +.RS 2 .nf git://github\.com/user/project\.git#commit\-ish git+ssh://user@hostname:project\.git#commit\-ish git+http://user@hostname/project/blah\.git#commit\-ish git+https://user@hostname/project/blah\.git#commit\-ish -. .fi -. -.IP "" 0 -. +.RE .P The \fBcommit\-ish\fR can be any tag, sha, or branch which can be supplied as an argument to \fBgit checkout\fR\|\. The default is \fBmaster\fR\|\. -. -.SH "The package\.json File" +.SH The package\.json File +.P You need to have a \fBpackage\.json\fR file in the root of your project to do much of anything with npm\. That is basically the whole interface\. -. .P -npm help See \fBpackage\.json\fR for details about what goes in that file\. At the very +See npm help 5 \fBpackage\.json\fR for details about what goes in that file\. At the very least, you need: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 name: This should be a string that identifies your project\. Please do not use the name to specify that it runs on node, or is in JavaScript\. You can use the "engines" field to explicitly state the versions of -node (or whatever else) that your program requires, and it\'s pretty -well assumed that it\'s javascript\. -. -.IP +node (or whatever else) that your program requires, and it's pretty +well assumed that it's javascript\. It does not necessarily need to match your github repository name\. -. -.IP So, \fBnode\-foo\fR and \fBbar\-js\fR are bad names\. \fBfoo\fR or \fBbar\fR are better\. -. -.IP "\(bu" 4 +.IP \(bu 2 version: A semver\-compatible version\. -. -.IP "\(bu" 4 +.IP \(bu 2 engines: Specify the versions of node (or whatever else) that your program runs on\. The node API changes a lot, and there may be bugs or new functionality that you depend on\. Be explicit\. -. -.IP "\(bu" 4 +.IP \(bu 2 author: Take some credit\. -. -.IP "\(bu" 4 +.IP \(bu 2 scripts: If you have a special compilation or installation script, then you -should put it in the \fBscripts\fR hash\. You should definitely have at +should put it in the \fBscripts\fR object\. You should definitely have at least a basic smoke\-test command as the "scripts\.test" field\. -npm help See npm\-scripts\. -. -.IP "\(bu" 4 +See npm help 7 scripts\. +.IP \(bu 2 main: If you have a single module that serves as the entry point to your program (like what the "foo" package gives you at require("foo")), then you need to specify that in the "main" field\. -. -.IP "\(bu" 4 +.IP \(bu 2 directories: -This is a hash of folders\. The best ones to include are "lib" and -"doc", but if you specify a folder full of man pages in "man", then -they\'ll get installed just like these ones\. -. -.IP "" 0 -. +This is an object mapping names to folders\. The best ones to include are +"lib" and "doc", but if you use "man" to specify a folder full of man pages, +they'll get installed just like these ones\. + +.RE .P You can use \fBnpm init\fR in the root of your package in order to get you -npm help started with a pretty basic package\.json file\. See \fBnpm\-init\fR for +started with a pretty basic package\.json file\. See npm help \fBnpm\-init\fR for more info\. -. -.SH "Keeping files " -Use a \fB\|\.npmignore\fR file to keep stuff out of your package\. If there\'s +.SH Keeping files \fIout\fR of your package +.P +Use a \fB\|\.npmignore\fR file to keep stuff out of your package\. If there's no \fB\|\.npmignore\fR file, but there \fIis\fR a \fB\|\.gitignore\fR file, then npm will ignore the stuff matched by the \fB\|\.gitignore\fR file\. If you \fIwant\fR to include something that is excluded by your \fB\|\.gitignore\fR file, you can create an empty \fB\|\.npmignore\fR file to override it\. -. .P -By default, the following paths and files are ignored, so there\'s no +By default, the following paths and files are ignored, so there's no need to add them to \fB\|\.npmignore\fR explicitly: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 \fB\|\.*\.swp\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\|\._*\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\|\.DS_Store\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\|\.git\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\|\.hg\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\|\.lock\-wscript\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\|\.svn\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fB\|\.wafpickle\-*\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fBCVS\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fBnpm\-debug\.log\fR -. -.IP "" 0 -. + +.RE .P Additionally, everything in \fBnode_modules\fR is ignored, except for -bundled dependencies\. npm automatically handles this for you, so don\'t +bundled dependencies\. npm automatically handles this for you, so don't bother adding \fBnode_modules\fR to \fB\|\.npmignore\fR\|\. -. .P -The following paths and files are never ignored, so adding them to \fB\|\.npmignore\fR is pointless: -. -.IP "\(bu" 4 +The following paths and files are never ignored, so adding them to +\fB\|\.npmignore\fR is pointless: +.RS 0 +.IP \(bu 2 \fBpackage\.json\fR -. -.IP "\(bu" 4 +.IP \(bu 2 \fBREADME\.*\fR -. -.IP "" 0 -. -.SH "Link Packages" + +.RE +.SH Link Packages +.P \fBnpm link\fR is designed to install a development package and see the changes in real time without having to keep re\-installing it\. (You do need to either re\-link or \fBnpm rebuild \-g\fR to update compiled packages, of course\.) -. .P -npm help More info at \fBnpm\-link\fR\|\. -. -.SH "Before Publishing: Make Sure Your Package Installs and Works" +More info at npm help \fBnpm\-link\fR\|\. +.SH Before Publishing: Make Sure Your Package Installs and Works +.P \fBThis is important\.\fR -. .P -If you can not install it locally, you\'ll have -problems trying to publish it\. Or, worse yet, you\'ll be able to -publish it, but you\'ll be publishing a broken or pointless package\. -So don\'t do that\. -. +If you can not install it locally, you'll have +problems trying to publish it\. Or, worse yet, you'll be able to +publish it, but you'll be publishing a broken or pointless package\. +So don't do that\. .P In the root of your package, do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm install \. \-g -. .fi -. -.IP "" 0 -. +.RE .P -That\'ll show you that it\'s working\. If you\'d rather just create a symlink +That'll show you that it's working\. If you'd rather just create a symlink package that points to your working directory, then do this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm link -. .fi -. -.IP "" 0 -. +.RE .P -Use \fBnpm ls \-g\fR to see if it\'s there\. -. +Use \fBnpm ls \-g\fR to see if it's there\. .P To test a local install, go into some other folder, and then do: -. -.IP "" 4 -. +.P +.RS 2 .nf cd \.\./some\-other\-folder npm install \.\./my\-package -. .fi -. -.IP "" 0 -. +.RE .P to install it locally into the node_modules folder in that other place\. -. .P Then go into the node\-repl, and try using require("my\-thing") to -bring in your module\'s main module\. -. -.SH "Create a User Account" +bring in your module's main module\. +.SH Create a User Account +.P Create a user with the adduser command\. It works like this: -. -.IP "" 4 -. +.P +.RS 2 .nf npm adduser -. .fi -. -.IP "" 0 -. +.RE .P and then follow the prompts\. -. .P -npm help This is documented better in npm\-adduser\. -. -.SH "Publish your package" -This part\'s easy\. IN the root of your folder, do this: -. -.IP "" 4 -. +This is documented better in npm help adduser\. +.SH Publish your package +.P +This part's easy\. IN the root of your folder, do this: +.P +.RS 2 .nf npm publish -. .fi -. -.IP "" 0 -. +.RE .P You can give publish a url to a tarball, or a filename of a tarball, or a path to a folder\. -. .P Note that pretty much \fBeverything in that folder will be exposed\fR -by default\. So, if you have secret stuff in there, use a \fB\|\.npmignore\fR file to list out the globs to ignore, or publish +by default\. So, if you have secret stuff in there, use a +\fB\|\.npmignore\fR file to list out the globs to ignore, or publish from a fresh checkout\. -. -.SH "Brag about it" +.SH Brag about it +.P Send emails, write blogs, blab in IRC\. -. .P Tell the world how easy it is to install your program! -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help faq -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 faq +.IP \(bu 2 npm help npm -. -.IP "\(bu" 4 +.IP \(bu 2 npm help init -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 -npm help scripts -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 +npm help 7 scripts +.IP \(bu 2 npm help publish -. -.IP "\(bu" 4 +.IP \(bu 2 npm help adduser -. -.IP "\(bu" 4 -npm help registry -. -.IP "" 0 +.IP \(bu 2 +npm help 7 registry + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man7/npm-disputes.7 nodejs-0.11.15/deps/npm/man/man7/npm-disputes.7 --- nodejs-0.11.13/deps/npm/man/man7/npm-disputes.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/npm-disputes.7 2015-01-20 21:22:17.000000000 +0000 @@ -1,92 +1,78 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-DISPUTES" "7" "May 2014" "" "" -. +.TH "NPM\-DISPUTES" "7" "October 2014" "" "" .SH "NAME" -\fBnpm-disputes\fR \-\- Handling Module Name Disputes -. -.SH "SYNOPSIS" -. -.IP "1" 4 +\fBnpm-disputes\fR \- Handling Module Name Disputes +.SH SYNOPSIS +.RS 0 +.IP 1. 3 Get the author email with \fBnpm owner ls \fR -. -.IP "2" 4 -Email the author, CC \fIsupport@npmjs\.com\fR -. -.IP "3" 4 -After a few weeks, if there\'s no resolution, we\'ll sort it out\. -. -.IP "" 0 -. -.P -Don\'t squat on package names\. Publish code or move out of the way\. -. -.SH "DESCRIPTION" +.IP 2. 3 +Email the author, CC support@npmjs\.com +.IP 3. 3 +After a few weeks, if there's no resolution, we'll sort it out\. + +.RE +.P +Don't squat on package names\. Publish code or move out of the way\. +.SH DESCRIPTION +.P There sometimes arise cases where a user publishes a module, and then later, some other user wants to use that name\. Here are some common ways that happens (each of these is based on actual events\.) -. -.IP "1" 4 +.RS 0 +.IP 1. 3 Joe writes a JavaScript module \fBfoo\fR, which is not node\-specific\. -Joe doesn\'t use node at all\. Bob wants to use \fBfoo\fR in node, so he +Joe doesn't use node at all\. Bob wants to use \fBfoo\fR in node, so he wraps it in an npm module\. Some time later, Joe starts using node, and wants to take over management of his program\. -. -.IP "2" 4 +.IP 2. 3 Bob writes an npm module \fBfoo\fR, and publishes it\. Perhaps much later, Joe finds a bug in \fBfoo\fR, and fixes it\. He sends a pull -request to Bob, but Bob doesn\'t have the time to deal with it, +request to Bob, but Bob doesn't have the time to deal with it, because he has a new job and a new baby and is focused on his new erlang project, and kind of not involved with node any more\. Joe -would like to publish a new \fBfoo\fR, but can\'t, because the name is +would like to publish a new \fBfoo\fR, but can't, because the name is taken\. -. -.IP "3" 4 +.IP 3. 3 Bob writes a 10\-line flow\-control library, and calls it \fBfoo\fR, and publishes it to the npm registry\. Being a simple little thing, it never really has to be updated\. Joe works for Foo Inc, the makers of the critically acclaimed and widely\-marketed \fBfoo\fR JavaScript toolkit framework\. They publish it to npm as \fBfoojs\fR, but people are routinely confused when \fBnpm install foo\fR is some different thing\. -. -.IP "4" 4 +.IP 4. 3 Bob writes a parser for the widely\-known \fBfoo\fR file format, because he needs it for work\. Then, he gets a new job, and never updates the prototype\. Later on, Joe writes a much more complete \fBfoo\fR parser, -but can\'t publish, because Bob\'s \fBfoo\fR is in the way\. -. -.IP "" 0 -. -.P -The validity of Joe\'s claim in each situation can be debated\. However, -Joe\'s appropriate course of action in each case is the same\. -. -.IP "1" 4 +but can't publish, because Bob's \fBfoo\fR is in the way\. + +.RE +.P +The validity of Joe's claim in each situation can be debated\. However, +Joe's appropriate course of action in each case is the same\. +.RS 0 +.IP 1. 3 \fBnpm owner ls foo\fR\|\. This will tell Joe the email address of the owner (Bob)\. -. -.IP "2" 4 +.IP 2. 3 Joe emails Bob, explaining the situation \fBas respectfully as possible\fR, and what he would like to do with the module name\. He -adds the npm support staff \fIsupport@npmjs\.com\fR to the CC list of +adds the npm support staff support@npmjs\.com to the CC list of the email\. Mention in the email that Bob can run \fBnpm owner add joe foo\fR to add Joe as an owner of the \fBfoo\fR package\. -. -.IP "3" 4 +.IP 3. 3 After a reasonable amount of time, if Bob has not responded, or if -Bob and Joe can\'t come to any sort of resolution, email support \fIsupport@npmjs\.com\fR and we\'ll sort it out\. ("Reasonable" is +Bob and Joe can't come to any sort of resolution, email support +support@npmjs\.com and we'll sort it out\. ("Reasonable" is usually at least 4 weeks, but extra time is allowed around common holidays\.) -. -.IP "" 0 -. -.SH "REASONING" + +.RE +.SH REASONING +.P In almost every case so far, the parties involved have been able to reach an amicable resolution without any major intervention\. Most people really do want to be reasonable, and are probably not even aware that -they\'re in your way\. -. +they're in your way\. .P Module ecosystems are most vibrant and powerful when they are as self\-directed as possible\. If an admin one day deletes something you @@ -94,53 +80,45 @@ regardless of the justification\. When humans solve their problems by talking to other humans with respect, everyone has the chance to end up feeling good about the interaction\. -. -.SH "EXCEPTIONS" +.SH EXCEPTIONS +.P Some things are not allowed, and will be removed without discussion if they are brought to the attention of the npm registry admins, including but not limited to: -. -.IP "1" 4 +.RS 0 +.IP 1. 3 Malware (that is, a package designed to exploit or harm the machine on which it is installed)\. -. -.IP "2" 4 +.IP 2. 3 Violations of copyright or licenses (for example, cloning an MIT\-licensed program, and then removing or changing the copyright and license statement)\. -. -.IP "3" 4 +.IP 3. 3 Illegal content\. -. -.IP "4" 4 -"Squatting" on a package name that you \fIplan\fR to use, but aren\'t -actually using\. Sorry, I don\'t care how great the name is, or how +.IP 4. 3 +"Squatting" on a package name that you \fIplan\fR to use, but aren't +actually using\. Sorry, I don't care how great the name is, or how perfect a fit it is for the thing that someday might happen\. If -someone wants to use it today, and you\'re just taking up space with -an empty tarball, you\'re going to be evicted\. -. -.IP "5" 4 +someone wants to use it today, and you're just taking up space with +an empty tarball, you're going to be evicted\. +.IP 5. 3 Putting empty packages in the registry\. Packages must have SOME -functionality\. It can be silly, but it can\'t be \fInothing\fR\|\. (See +functionality\. It can be silly, but it can't be \fInothing\fR\|\. (See also: squatting\.) -. -.IP "6" 4 +.IP 6. 3 Doing weird things with the registry, like using it as your own personal application database or otherwise putting non\-packagey things into it\. -. -.IP "" 0 -. + +.RE .P If you see bad behavior like this, please report it right away\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 -npm help registry -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help 7 registry +.IP \(bu 2 npm help owner -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man7/npm-faq.7 nodejs-0.11.15/deps/npm/man/man7/npm-faq.7 --- nodejs-0.11.13/deps/npm/man/man7/npm-faq.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/npm-faq.7 2015-01-20 21:22:17.000000000 +0000 @@ -1,145 +1,118 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-FAQ" "7" "May 2014" "" "" -. +.TH "NPM\-FAQ" "7" "October 2014" "" "" .SH "NAME" -\fBnpm-faq\fR \-\- Frequently Asked Questions -. -.SH "Where can I find these docs in HTML?" -\fIhttps://www\.npmjs\.org/doc/\fR, or run: -. -.IP "" 4 -. +\fBnpm-faq\fR \- Frequently Asked Questions +.SH Where can I find these docs in HTML? +.P +https://www\.npmjs\.org/doc/, or run: +.P +.RS 2 .nf npm config set viewer browser -. .fi -. -.IP "" 0 -. +.RE .P to open these documents in your default web browser rather than \fBman\fR\|\. -. -.SH "It didn't work\." -That\'s not really a question\. -. -.SH "Why didn't it work?" -I don\'t know yet\. -. +.SH It didn't work\. +.P +That's not really a question\. +.SH Why didn't it work? .P -Read the error output, and if you can\'t figure out what it means, +I don't know yet\. +.P +Read the error output, and if you can't figure out what it means, do what it says and post a bug with all the information it asks for\. -. -.SH "Where does npm put stuff?" -npm help See \fBnpm\-folders\fR -. +.SH Where does npm put stuff? +.P +See npm help 5 \fBnpm\-folders\fR .P tl;dr: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 Use the \fBnpm root\fR command to see where modules go, and the \fBnpm bin\fR command to see where executables go -. -.IP "\(bu" 4 +.IP \(bu 2 Global installs are different from local installs\. If you install something with the \fB\-g\fR flag, then its executables go in \fBnpm bin \-g\fR and its modules go in \fBnpm root \-g\fR\|\. -. -.IP "" 0 -. -.SH "How do I install something on my computer in a central location?" + +.RE +.SH How do I install something on my computer in a central location? +.P Install it globally by tacking \fB\-g\fR or \fB\-\-global\fR to the command\. (This is especially important for command line utilities that need to add their bins to the global system \fBPATH\fR\|\.) -. -.SH "I installed something globally, but I can't " +.SH I installed something globally, but I can't \fBrequire()\fR it +.P Install it locally\. -. .P The global install location is a place for command\-line utilities -to put their bins in the system \fBPATH\fR\|\. It\'s not for use with \fBrequire()\fR\|\. -. +to put their bins in the system \fBPATH\fR\|\. It's not for use with \fBrequire()\fR\|\. .P -If you \fBrequire()\fR a module in your code, then that means it\'s a +If you \fBrequire()\fR a module in your code, then that means it's a dependency, and a part of your program\. You need to install it locally in your program\. -. -.SH "Why can't npm just put everything in one place, like other package managers?" +.SH Why can't npm just put everything in one place, like other package managers? +.P Not every change is an improvement, but every improvement is a change\. -This would be like asking git to do network IO for every commit\. It\'s -not going to happen, because it\'s a terrible idea that causes more +This would be like asking git to do network IO for every commit\. It's +not going to happen, because it's a terrible idea that causes more problems than it solves\. -. .P It is much harder to avoid dependency conflicts without nesting dependencies\. This is fundamental to the way that npm works, and has -npm help proven to be an extremely successful approach\. See \fBnpm\-folders\fR for +proven to be an extremely successful approach\. See npm help 5 \fBnpm\-folders\fR for more details\. -. .P If you want a package to be installed in one place, and have all your programs reference the same copy of it, then use the \fBnpm link\fR command\. -That\'s what it\'s for\. Install it globally, then link it into each +That's what it's for\. Install it globally, then link it into each program that uses it\. -. -.SH "Whatever, I really want the old style 'everything global' style\." +.SH Whatever, I really want the old style 'everything global' style\. +.P Write your own package manager\. You could probably even wrap up \fBnpm\fR in a shell script if you really wanted to\. -. .P npm will not help you do something that is known to be a bad idea\. -. -.SH "Should I check my " -Mikeal Rogers answered this question very well: -. -.P -\fIhttp://www\.futurealoof\.com/posts/nodemodules\-in\-git\.html\fR -. -.P -tl;dr -. -.IP "\(bu" 4 -Check \fBnode_modules\fR into git for things you \fBdeploy\fR, such as -websites and apps\. -. -.IP "\(bu" 4 -Do not check \fBnode_modules\fR into git for libraries and modules -intended to be reused\. -. -.IP "\(bu" 4 -Use npm to manage dependencies in your dev environment, but not in -your deployment scripts\. -. -.IP "" 0 -. -.SH "Is it 'npm' or 'NPM' or 'Npm'?" +.SH Should I check my \fBnode_modules\fR folder into git? +.P +Usually, no\. Allow npm to resolve dependencies for your packages\. +.P +For packages you \fBdeploy\fR, such as websites and apps, +you should use npm shrinkwrap to lock down your full dependency tree: +.P +https://www\.npmjs\.org/doc/cli/npm\-shrinkwrap\.html +.P +If you are paranoid about depending on the npm ecosystem, +you should run a private npm mirror or a private cache\. +.P +If you want 100% confidence in being able to reproduce the specific bytes +included in a deployment, you should use an additional mechanism that can +verify contents rather than versions\. For example, +Amazon machine images, DigitalOcean snapshots, Heroku slugs, or simple tarballs\. +.SH Is it 'npm' or 'NPM' or 'Npm'? +.P npm should never be capitalized unless it is being displayed in a location that is customarily all\-caps (such as the title of man pages\.) -. -.SH "If 'npm' is an acronym, why is it never capitalized?" +.SH If 'npm' is an acronym, why is it never capitalized? +.P Contrary to the belief of many, "npm" is not in fact an abbreviation for "Node Package Manager"\. It is a recursive bacronymic abbreviation for "npm is not an acronym"\. (If it was "ninaa", then it would be an acronym, and thus incorrectly named\.) -. .P "NPM", however, \fIis\fR an acronym (more precisely, a capitonym) for the National Association of Pastoral Musicians\. You can learn more -about them at \fIhttp://npm\.org/\fR\|\. -. +about them at http://npm\.org/\|\. .P In software, "NPM" is a Non\-Parametric Mapping utility written by Chris Rorden\. You can analyze pictures of brains with it\. Learn more -about the (capitalized) NPM program at \fIhttp://www\.cabiatl\.com/mricro/npm/\fR\|\. -. +about the (capitalized) NPM program at http://www\.cabiatl\.com/mricro/npm/\|\. .P The first seed that eventually grew into this flower was a bash utility named "pm", which was a shortened descendent of "pkgmakeinst", a bash function that was used to install various different things on different -platforms, most often using Yahoo\'s \fByinst\fR\|\. If \fBnpm\fR was ever an +platforms, most often using Yahoo's \fByinst\fR\|\. If \fBnpm\fR was ever an acronym for anything, it was \fBnode pm\fR or maybe \fBnew pm\fR\|\. -. .P So, in all seriousness, the "npm" project is named after its command\-line utility, which was organically selected to be easily typed by a right\-handed @@ -147,183 +120,151 @@ right\-ring\-finger in a postition to type the \fB\-\fR key for flags and other command\-line arguments\. That command\-line utility is always lower\-case, though it starts most sentences it is a part of\. -. -.SH "How do I list installed packages?" +.SH How do I list installed packages? +.P \fBnpm ls\fR -. -.SH "How do I search for packages?" +.SH How do I search for packages? +.P \fBnpm search\fR -. .P Arguments are greps\. \fBnpm search jsdom\fR shows jsdom packages\. -. -.SH "How do I update npm?" -. +.SH How do I update npm? +.P +.RS 2 .nf -npm update npm \-g -. +npm install npm \-g .fi -. +.RE .P You can also update all outdated local packages by doing \fBnpm update\fR without any arguments, or global packages by doing \fBnpm update \-g\fR\|\. -. .P Occasionally, the version of npm will progress such that the current version cannot be properly installed with the version that you have installed already\. (Consider, if there is ever a bug in the \fBupdate\fR command\.) -. .P In those cases, you can do this: -. -.IP "" 4 -. +.P +.RS 2 .nf curl https://www\.npmjs\.org/install\.sh | sh -. .fi -. -.IP "" 0 -. -.SH "What is a " +.RE +.SH What is a \fBpackage\fR? +.P A package is: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 a) a folder containing a program described by a package\.json file -. -.IP "\(bu" 4 +.IP \(bu 2 b) a gzipped tarball containing (a) -. -.IP "\(bu" 4 +.IP \(bu 2 c) a url that resolves to (b) -. -.IP "\(bu" 4 +.IP \(bu 2 d) a \fB@\fR that is published on the registry with (c) -. -.IP "\(bu" 4 +.IP \(bu 2 e) a \fB@\fR that points to (d) -. -.IP "\(bu" 4 +.IP \(bu 2 f) a \fB\fR that has a "latest" tag satisfying (e) -. -.IP "\(bu" 4 +.IP \(bu 2 g) a \fBgit\fR url that, when cloned, results in (a)\. -. -.IP "" 0 -. + +.RE .P Even if you never publish your package, you can still get a lot of benefits of using npm if you just want to write a node program (a), and perhaps if you also want to be able to easily install it elsewhere after packing it up into a tarball (b)\. -. .P Git urls can be of the form: -. -.IP "" 4 -. +.P +.RS 2 .nf git://github\.com/user/project\.git#commit\-ish git+ssh://user@hostname:project\.git#commit\-ish git+http://user@hostname/project/blah\.git#commit\-ish git+https://user@hostname/project/blah\.git#commit\-ish -. .fi -. -.IP "" 0 -. +.RE .P The \fBcommit\-ish\fR can be any tag, sha, or branch which can be supplied as an argument to \fBgit checkout\fR\|\. The default is \fBmaster\fR\|\. -. -.SH "What is a " +.SH What is a \fBmodule\fR? +.P A module is anything that can be loaded with \fBrequire()\fR in a Node\.js program\. The following things are all examples of things that can be loaded as modules: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 A folder with a \fBpackage\.json\fR file containing a \fBmain\fR field\. -. -.IP "\(bu" 4 +.IP \(bu 2 A folder with an \fBindex\.js\fR file in it\. -. -.IP "\(bu" 4 +.IP \(bu 2 A JavaScript file\. -. -.IP "" 0 -. + +.RE .P Most npm packages are modules, because they are libraries that you -load with \fBrequire\fR\|\. However, there\'s no requirement that an npm +load with \fBrequire\fR\|\. However, there's no requirement that an npm package be a module! Some only contain an executable command\-line -interface, and don\'t provide a \fBmain\fR field for use in Node programs\. -. +interface, and don't provide a \fBmain\fR field for use in Node programs\. .P -Almost all npm packages (at least, those that are Node programs) \fIcontain\fR many modules within them (because every file they load with \fBrequire()\fR is a module)\. -. +Almost all npm packages (at least, those that are Node programs) +\fIcontain\fR many modules within them (because every file they load with +\fBrequire()\fR is a module)\. .P In the context of a Node program, the \fBmodule\fR is also the thing that was loaded \fIfrom\fR a file\. For example, in the following program: -. -.IP "" 4 -. +.P +.RS 2 .nf -var req = require(\'request\') -. +var req = require('request') .fi -. -.IP "" 0 -. +.RE .P we might say that "The variable \fBreq\fR refers to the \fBrequest\fR module"\. -. -.SH "So, why is it the "" +.SH So, why is it the "\fBnode_modules\fR" folder, but "\fBpackage\.json\fR" file? Why not \fBnode_packages\fR or \fBmodule\.json\fR? +.P The \fBpackage\.json\fR file defines the package\. (See "What is a package?" above\.) -. .P The \fBnode_modules\fR folder is the place Node\.js looks for modules\. (See "What is a module?" above\.) -. .P For example, if you create a file at \fBnode_modules/foo\.js\fR and then -had a program that did \fBvar f = require(\'foo\.js\')\fR then it would load +had a program that did \fBvar f = require('foo\.js')\fR then it would load the module\. However, \fBfoo\.js\fR is not a "package" in this case, because it does not have a package\.json\. -. .P -Alternatively, if you create a package which does not have an \fBindex\.js\fR or a \fB"main"\fR field in the \fBpackage\.json\fR file, then it is -not a module\. Even if it\'s installed in \fBnode_modules\fR, it can\'t be +Alternatively, if you create a package which does not have an +\fBindex\.js\fR or a \fB"main"\fR field in the \fBpackage\.json\fR file, then it is +not a module\. Even if it's installed in \fBnode_modules\fR, it can't be an argument to \fBrequire()\fR\|\. -. -.SH ""node_modules"" +.SH \fB"node_modules"\fR is the name of my deity's arch\-rival, and a Forbidden Word in my religion\. Can I configure npm to use a different folder? +.P No\. This will never happen\. This question comes up sometimes, -because it seems silly from the outside that npm couldn\'t just be +because it seems silly from the outside that npm couldn't just be configured to put stuff somewhere else, and then npm could load them -from there\. It\'s an arbitrary spelling choice, right? What\'s the big +from there\. It's an arbitrary spelling choice, right? What's the big deal? -. .P -At the time of this writing, the string \fB\'node_modules\'\fR appears 151 +At the time of this writing, the string \fB\|'node_modules'\fR appears 151 times in 53 separate files in npm and node core (excluding tests and documentation)\. -. .P -Some of these references are in node\'s built\-in module loader\. Since +Some of these references are in node's built\-in module loader\. Since npm is not involved \fBat all\fR at run\-time, node itself would have to -be configured to know where you\'ve decided to stick stuff\. Complexity +be configured to know where you've decided to stick stuff\. Complexity hurdle #1\. Since the Node module system is locked, this cannot be -changed, and is enough to kill this request\. But I\'ll continue, in -deference to your deity\'s delicate feelings regarding spelling\. -. +changed, and is enough to kill this request\. But I'll continue, in +deference to your deity's delicate feelings regarding spelling\. .P Many of the others are in dependencies that npm uses, which are not necessarily tightly coupled to npm (in the sense that they do not read -npm\'s configuration files, etc\.) Each of these would have to be +npm's configuration files, etc\.) Each of these would have to be configured to take the name of the \fBnode_modules\fR folder as a parameter\. Complexity hurdle #2\. -. .P Furthermore, npm has the ability to "bundle" dependencies by adding the dep names to the \fB"bundledDependencies"\fR list in package\.json, @@ -332,148 +273,127 @@ different spelling for \fBnode_modules\fR? npm would have to rename the folder at publish time, and then be smart enough to unpack it using your locally configured name\. Complexity hurdle #3\. -. .P -Furthermore, what happens when you \fIchange\fR this name? Fine, it\'s -easy enough the first time, just rename the \fBnode_modules\fR folders to \fB\|\./blergyblerp/\fR or whatever name you choose\. But what about when you -change it again? npm doesn\'t currently track any state about past +Furthermore, what happens when you \fIchange\fR this name? Fine, it's +easy enough the first time, just rename the \fBnode_modules\fR folders to +\fB\|\./blergyblerp/\fR or whatever name you choose\. But what about when you +change it again? npm doesn't currently track any state about past configuration settings, so this would be rather difficult to do properly\. It would have to track every previous value for this -config, and always accept any of them, or else yesterday\'s install may -be broken tomorrow\. Complexity hurdle #5\. -. +config, and always accept any of them, or else yesterday's install may +be broken tomorrow\. Complexity hurdle #4\. .P Never going to happen\. The folder is named \fBnode_modules\fR\|\. It is written indelibly in the Node Way, handed down from the ancient times of Node 0\.3\. -. -.SH "How do I install node with npm?" -You don\'t\. Try one of these node version managers: -. +.SH How do I install node with npm? +.P +You don't\. Try one of these node version managers: .P Unix: -. -.IP "\(bu" 4 -\fIhttp://github\.com/isaacs/nave\fR -. -.IP "\(bu" 4 -\fIhttp://github\.com/visionmedia/n\fR -. -.IP "\(bu" 4 -\fIhttp://github\.com/creationix/nvm\fR -. -.IP "" 0 -. +.RS 0 +.IP \(bu 2 +http://github\.com/isaacs/nave +.IP \(bu 2 +http://github\.com/visionmedia/n +.IP \(bu 2 +http://github\.com/creationix/nvm + +.RE .P Windows: -. -.IP "\(bu" 4 -\fIhttp://github\.com/marcelklehr/nodist\fR -. -.IP "\(bu" 4 -\fIhttps://github\.com/hakobera/nvmw\fR -. -.IP "\(bu" 4 -\fIhttps://github\.com/nanjingboy/nvmw\fR -. -.IP "" 0 -. -.SH "How can I use npm for development?" -npm help See \fBnpm\-developersnpm help \fR and \fBpackage\.json\fR\|\. -. +.RS 0 +.IP \(bu 2 +http://github\.com/marcelklehr/nodist +.IP \(bu 2 +https://github\.com/hakobera/nvmw +.IP \(bu 2 +https://github\.com/nanjingboy/nvmw + +.RE +.SH How can I use npm for development? +.P +See npm help 7 \fBnpm\-developers\fR and npm help 5 \fBpackage\.json\fR\|\. .P -You\'ll most likely want to \fBnpm link\fR your development folder\. That\'s +You'll most likely want to \fBnpm link\fR your development folder\. That's awesomely handy\. -. .P -npm help To set up your own private registry, check out \fBnpm\-registry\fR\|\. -. -.SH "Can I list a url as a dependency?" +To set up your own private registry, check out npm help 7 \fBnpm\-registry\fR\|\. +.SH Can I list a url as a dependency? +.P Yes\. It should be a url to a gzipped tarball containing a single folder that has a package\.json in its root, or a git url\. (See "what is a package?" above\.) -. -.SH "How do I symlink to a dev folder so I don't have to keep re\-installing?" -npm help See \fBnpm\-link\fR -. -.SH "The package registry website\. What is that exactly?" -npm help See \fBnpm\-registry\fR\|\. -. -.SH "I forgot my password, and can't publish\. How do I reset it?" -Go to \fIhttps://npmjs\.org/forgot\fR\|\. -. -.SH "I get ECONNREFUSED a lot\. What's up?" -Either the registry is down, or node\'s DNS isn\'t able to reach out\. -. +.SH How do I symlink to a dev folder so I don't have to keep re\-installing? +.P +See npm help \fBnpm\-link\fR +.SH The package registry website\. What is that exactly? +.P +See npm help 7 \fBnpm\-registry\fR\|\. +.SH I forgot my password, and can't publish\. How do I reset it? .P -To check if the registry is down, open up \fIhttps://registry\.npmjs\.org/\fR in a web browser\. This will also tell +Go to https://npmjs\.org/forgot\|\. +.SH I get ECONNREFUSED a lot\. What's up? +.P +Either the registry is down, or node's DNS isn't able to reach out\. +.P +To check if the registry is down, open up +https://registry\.npmjs\.org/ in a web browser\. This will also tell you if you are just unable to access the internet for some reason\. -. .P -If the registry IS down, let us know by emailing \fIsupport@npmjs\.com\fR -or posting an issue at \fIhttps://github\.com/npm/npm/issues\fR\|\. If it\'s -down for the world (and not just on your local network) then we\'re +If the registry IS down, let us know by emailing support@npmjs\.com +or posting an issue at https://github\.com/npm/npm/issues\|\. If it's +down for the world (and not just on your local network) then we're probably already being pinged about it\. -. .P You can also often get a faster response by visiting the #npm channel on Freenode IRC\. -. -.SH "Why no namespaces?" -Please see this discussion: \fIhttps://github\.com/npm/npm/issues/798\fR -. +.SH Why no namespaces? +.P +Please see this discussion: https://github\.com/npm/npm/issues/798 .P -tl;dr \- It doesn\'t actually make things better, and can make them worse\. -. +tl;dr \- It doesn't actually make things better, and can make them worse\. .P -If you want to namespace your own packages, you may: simply use the \fB\-\fR character to separate the names\. npm is a mostly anarchic system\. +If you want to namespace your own packages, you may: simply use the +\fB\-\fR character to separate the names\. npm is a mostly anarchic system\. There is not sufficient need to impose namespace rules on everyone\. -. -.SH "Who does npm?" +.SH Who does npm? +.P npm was originally written by Isaac Z\. Schlueter, and many others have contributed to it, some of them quite substantially\. -. .P The npm open source project, The npm Registry, and the community website \fIhttps://www\.npmjs\.org\fR are maintained and operated by the -good folks at npm, Inc\. \fIhttps://www\.npmjs\.com\fR -. -.SH "I have a question or request not addressed here\. Where should I put it?" +good folks at npm, Inc\. \fIhttp://www\.npmjs\.com\fR +.SH I have a question or request not addressed here\. Where should I put it? +.P Post an issue on the github project: -. -.IP "\(bu" 4 -\fIhttps://github\.com/npm/npm/issues\fR -. -.IP "" 0 -. -.SH "Why does npm hate me?" +.RS 0 +.IP \(bu 2 +https://github\.com/npm/npm/issues + +.RE +.SH Why does npm hate me? +.P npm is not capable of hatred\. It loves everyone, especially you\. -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help npm -. -.IP "\(bu" 4 -npm help developers -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 7 developers +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help folders -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 folders + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man7/npm-index.7 nodejs-0.11.15/deps/npm/man/man7/npm-index.7 --- nodejs-0.11.13/deps/npm/man/man7/npm-index.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/npm-index.7 2015-01-20 21:22:17.000000000 +0000 @@ -1,307 +1,316 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-INDEX" "7" "May 2014" "" "" -. +.TH "NPM\-INDEX" "7" "October 2014" "" "" .SH "NAME" -\fBnpm-index\fR \-\- Index of all npm documentation -. -npm help .SH "README" +\fBnpm-index\fR \- Index of all npm documentation +.SS npm help README +.P node package manager -. -npm help .SH "npm" +.SH Command Line Documentation +.P +Using npm on the command line +.SS npm help npm +.P node package manager -. -npm help .SH "npm\-adduser" +.SS npm help adduser +.P Add a registry user account -. -npm help .SH "npm\-bin" +.SS npm help bin +.P Display npm bin folder -. -npm help .SH "npm\-bugs" +.SS npm help bugs +.P Bugs for a package in a web browser maybe -. -npm help .SH "npm\-build" +.SS npm help build +.P Build a package -. -npm help .SH "npm\-bundle" +.SS npm help bundle +.P REMOVED -. -npm help .SH "npm\-cache" +.SS npm help cache +.P Manipulates packages cache -. -npm help .SH "npm\-completion" +.SS npm help completion +.P Tab Completion for npm -. -npm help .SH "npm\-config" +.SS npm help config +.P Manage the npm configuration files -. -npm help .SH "npm\-dedupe" +.SS npm help dedupe +.P Reduce duplication -. -npm help .SH "npm\-deprecate" +.SS npm help deprecate +.P Deprecate a version of a package -. -npm help .SH "npm\-docs" +.SS npm help docs +.P Docs for a package in a web browser maybe -. -npm help .SH "npm\-edit" +.SS npm help edit +.P Edit an installed package -. -npm help .SH "npm\-explore" +.SS npm help explore +.P Browse an installed package -. -npm help .SH "npm\-help\-search" +.SS npm help help\-search +.P Search npm help documentation -. -npm help .SH "npm\-help" +.SS npm help help +.P Get help on npm -. -npm help .SH "npm\-init" +.SS npm help init +.P Interactively create a package\.json file -. -npm help .SH "npm\-install" +.SS npm help install +.P Install a package -. -npm help .SH "npm\-link" +.SS npm help link +.P Symlink a package folder -. -npm help .SH "npm\-ls" +.SS npm help ls +.P List installed packages -. -npm help .SH "npm\-outdated" +.SS npm help outdated +.P Check for outdated packages -. -npm help .SH "npm\-owner" +.SS npm help owner +.P Manage package owners -. -npm help .SH "npm\-pack" +.SS npm help pack +.P Create a tarball from a package -. -npm help .SH "npm\-prefix" +.SS npm help prefix +.P Display prefix -. -npm help .SH "npm\-prune" +.SS npm help prune +.P Remove extraneous packages -. -npm help .SH "npm\-publish" +.SS npm help publish +.P Publish a package -. -npm help .SH "npm\-rebuild" +.SS npm help rebuild +.P Rebuild a package -. -npm help .SH "npm\-repo" +.SS npm help repo +.P Open package repository page in the browser -. -npm help .SH "npm\-restart" +.SS npm help restart +.P Start a package -. -npm help .SH "npm\-rm" +.SS npm help rm +.P Remove a package -. -npm help .SH "npm\-root" +.SS npm help root +.P Display npm root -. -npm help .SH "npm\-run\-script" +.SS npm help run\-script +.P Run arbitrary package scripts -. -npm help .SH "npm\-search" +.SS npm help search +.P Search for packages -. -npm help .SH "npm\-shrinkwrap" +.SS npm help shrinkwrap +.P Lock down dependency versions -. -npm help .SH "npm\-star" +.SS npm help star +.P Mark your favorite packages -. -npm help .SH "npm\-stars" +.SS npm help stars +.P View packages marked as favorites -. -npm help .SH "npm\-start" +.SS npm help start +.P Start a package -. -npm help .SH "npm\-stop" +.SS npm help stop +.P Stop a package -. -npm help .SH "npm\-submodule" -Add a package as a git submodule -. -npm help .SH "npm\-tag" +.SS npm help tag +.P Tag a published version -. -npm help .SH "npm\-test" +.SS npm help test +.P Test a package -. -npm help .SH "npm\-uninstall" +.SS npm help uninstall +.P Remove a package -. -npm help .SH "npm\-unpublish" +.SS npm help unpublish +.P Remove a package from the registry -. -npm help .SH "npm\-update" +.SS npm help update +.P Update a package -. -npm help .SH "npm\-version" +.SS npm help version +.P Bump a package version -. -npm help .SH "npm\-view" +.SS npm help view +.P View registry info -. -npm help .SH "npm\-whoami" +.SS npm help whoami +.P Display npm username -. -npm apihelp .SH "npm" +.SH API Documentation +.P +Using npm in your Node programs +.SS npm apihelp npm +.P node package manager -. -npm apihelp .SH "npm\-bin" +.SS npm apihelp bin +.P Display npm bin folder -. -npm apihelp .SH "npm\-bugs" +.SS npm apihelp bugs +.P Bugs for a package in a web browser maybe -. -npm apihelp .SH "npm\-commands" +.SS npm apihelp cache +.P +manage the npm cache programmatically +.SS npm apihelp commands +.P npm commands -. -npm apihelp .SH "npm\-config" +.SS npm apihelp config +.P Manage the npm configuration files -. -npm apihelp .SH "npm\-deprecate" +.SS npm apihelp deprecate +.P Deprecate a version of a package -. -npm apihelp .SH "npm\-docs" +.SS npm apihelp docs +.P Docs for a package in a web browser maybe -. -npm apihelp .SH "npm\-edit" +.SS npm apihelp edit +.P Edit an installed package -. -npm apihelp .SH "npm\-explore" +.SS npm apihelp explore +.P Browse an installed package -. -npm apihelp .SH "npm\-help\-search" +.SS npm apihelp help\-search +.P Search the help pages -. -npm apihelp .SH "npm\-init" +.SS npm apihelp init +.P Interactively create a package\.json file -. -npm apihelp .SH "npm\-install" +.SS npm apihelp install +.P install a package programmatically -. -npm apihelp .SH "npm\-link" +.SS npm apihelp link +.P Symlink a package folder -. -npm apihelp .SH "npm\-load" +.SS npm apihelp load +.P Load config settings -. -npm apihelp .SH "npm\-ls" +.SS npm apihelp ls +.P List installed packages -. -npm apihelp .SH "npm\-outdated" +.SS npm apihelp outdated +.P Check for outdated packages -. -npm apihelp .SH "npm\-owner" +.SS npm apihelp owner +.P Manage package owners -. -npm apihelp .SH "npm\-pack" +.SS npm apihelp pack +.P Create a tarball from a package -. -npm apihelp .SH "npm\-prefix" +.SS npm apihelp prefix +.P Display prefix -. -npm apihelp .SH "npm\-prune" +.SS npm apihelp prune +.P Remove extraneous packages -. -npm apihelp .SH "npm\-publish" +.SS npm apihelp publish +.P Publish a package -. -npm apihelp .SH "npm\-rebuild" +.SS npm apihelp rebuild +.P Rebuild a package -. -npm apihelp .SH "npm\-repo" +.SS npm apihelp repo +.P Open package repository page in the browser -. -npm apihelp .SH "npm\-restart" +.SS npm apihelp restart +.P Start a package -. -npm apihelp .SH "npm\-root" +.SS npm apihelp root +.P Display npm root -. -npm apihelp .SH "npm\-run\-script" +.SS npm apihelp run\-script +.P Run arbitrary package scripts -. -npm apihelp .SH "npm\-search" +.SS npm apihelp search +.P Search for packages -. -npm apihelp .SH "npm\-shrinkwrap" +.SS npm apihelp shrinkwrap +.P programmatically generate package shrinkwrap file -. -npm apihelp .SH "npm\-start" +.SS npm apihelp start +.P Start a package -. -npm apihelp .SH "npm\-stop" +.SS npm apihelp stop +.P Stop a package -. -npm apihelp .SH "npm\-submodule" -Add a package as a git submodule -. -npm apihelp .SH "npm\-tag" +.SS npm apihelp tag +.P Tag a published version -. -npm apihelp .SH "npm\-test" +.SS npm apihelp test +.P Test a package -. -npm apihelp .SH "npm\-uninstall" +.SS npm apihelp uninstall +.P uninstall a package programmatically -. -npm apihelp .SH "npm\-unpublish" +.SS npm apihelp unpublish +.P Remove a package from the registry -. -npm apihelp .SH "npm\-update" +.SS npm apihelp update +.P Update a package -. -npm apihelp .SH "npm\-version" +.SS npm apihelp version +.P Bump a package version -. -npm apihelp .SH "npm\-view" +.SS npm apihelp view +.P View registry info -. -npm apihelp .SH "npm\-whoami" +.SS npm apihelp whoami +.P Display npm username -. -npm help .SH "npm\-folders" +.SH Files +.P +File system structures npm uses +.SS npm help 5 folders +.P Folder Structures Used by npm -. -npm help .SH "npmrc" +.SS npm help 5 npmrc +.P The npm config files -. -npm help .SH "package\.json" -Specifics of npm\'s package\.json handling -. -npm help .SH "npm\-coding\-style" -npm\'s "funny" coding style -. -npm help .SH "npm\-config" +.SS npm help 5 package\.json +.P +Specifics of npm's package\.json handling +.SH Misc +.P +Various other bits and bobs +.SS npm help 7 coding\-style +.P +npm's "funny" coding style +.SS npm help 7 config +.P More than you probably want to know about npm configuration -. -npm help .SH "npm\-developers" +.SS npm help 7 developers +.P Developer Guide -. -npm help .SH "npm\-disputes" +.SS npm help 7 disputes +.P Handling Module Name Disputes -. -npm help .SH "npm\-faq" +.SS npm help 7 faq +.P Frequently Asked Questions -. -npm help .SH "npm\-index" +.SS npm help 7 index +.P Index of all npm documentation -. -npm help .SH "npm\-registry" +.SS npm help 7 registry +.P The JavaScript Package Registry -. -npm help .SH "npm\-scripts" +.SS npm help 7 scope +.P +Scoped packages +.SS npm help 7 scripts +.P How npm handles the "scripts" field -. -npm help .SH "removing\-npm" +.SS npm help 7 removing\-npm +.P Cleaning the Slate -. -npm help .SH "semver" +.SS npm help 7 semver +.P The semantic versioner for npm + diff -Nru nodejs-0.11.13/deps/npm/man/man7/npm-registry.7 nodejs-0.11.15/deps/npm/man/man7/npm-registry.7 --- nodejs-0.11.13/deps/npm/man/man7/npm-registry.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/npm-registry.7 2015-01-20 21:22:17.000000000 +0000 @@ -1,82 +1,70 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-REGISTRY" "7" "May 2014" "" "" -. +.TH "NPM\-REGISTRY" "7" "October 2014" "" "" .SH "NAME" -\fBnpm-registry\fR \-\- The JavaScript Package Registry -. -.SH "DESCRIPTION" +\fBnpm-registry\fR \- The JavaScript Package Registry +.SH DESCRIPTION +.P To resolve packages by name and version, npm talks to a registry website that implements the CommonJS Package Registry specification for reading package info\. -. .P -Additionally, npm\'s package registry implementation supports several +Additionally, npm's package registry implementation supports several write APIs as well, to allow for publishing packages and managing user account information\. -. .P -The official public npm registry is at \fIhttp://registry\.npmjs\.org/\fR\|\. It -is powered by a CouchDB database at \fIhttp://isaacs\.iriscouch\.com/registry\fR\|\. The code for the couchapp is -available at \fIhttp://github\.com/npm/npmjs\.org\fR\|\. npm user accounts -are CouchDB users, stored in the \fIhttp://isaacs\.iriscouch\.com/_users\fR -database\. -. -.P -npm help npm help The registry URL is supplied by the \fBregistry\fR config parameter\. See \fBnpm\-config\fR, \fBnpmrcnpm help \fR, and \fBnpm\-config\fR for more on managing -npm\'s configuration\. -. -.SH "Can I run my own private registry?" +The official public npm registry is at http://registry\.npmjs\.org/\|\. It +is powered by a CouchDB database, of which there is a public mirror at +http://skimdb\.npmjs\.com/registry\|\. The code for the couchapp is +available at http://github\.com/npm/npm\-registry\-couchapp\|\. +.P +The registry URL used is determined by the scope of the package (see +npm help 7 \fBnpm\-scope\fR)\. If no scope is specified, the default registry is used, which is +supplied by the \fBregistry\fR config parameter\. See npm help \fBnpm\-config\fR, +npm help 5 \fBnpmrc\fR, and npm help 7 \fBnpm\-config\fR for more on managing npm's configuration\. +.SH Can I run my own private registry? +.P Yes! -. .P The easiest way is to replicate the couch database, and use the same (or similar) design doc to implement the APIs\. -. .P If you set up continuous replication from the official CouchDB, and then -set your internal CouchDB as the registry config, then you\'ll be able +set your internal CouchDB as the registry config, then you'll be able to read any published packages, in addition to your private ones, and by default will only publish internally\. If you then want to publish a -package for the whole world to see, you can simply override the \fB\-\-registry\fR config for that command\. -. -.SH "I don't want my package published in the official registry\. It's private\." +package for the whole world to see, you can simply override the +\fB\-\-registry\fR config for that command\. +.SH I don't want my package published in the official registry\. It's private\. +.P Set \fB"private": true\fR in your package\.json to prevent it from being -published at all, or \fB"publishConfig":{"registry":"http://my\-internal\-registry\.local"}\fR +published at all, or +\fB"publishConfig":{"registry":"http://my\-internal\-registry\.local"}\fR to force it to be published only to your internal registry\. -. .P -npm help See \fBpackage\.json\fR for more info on what goes in the package\.json file\. -. -.SH "Will you replicate from my registry into the public one?" +See npm help 5 \fBpackage\.json\fR for more info on what goes in the package\.json file\. +.SH Will you replicate from my registry into the public one? +.P No\. If you want things to be public, then publish them into the public registry using npm\. What little security there is would be for nought otherwise\. -. -.SH "Do I have to use couchdb to build a registry that npm can talk to?" -No, but it\'s way easier\. Basically, yes, you do, or you have to +.SH Do I have to use couchdb to build a registry that npm can talk to? +.P +No, but it's way easier\. Basically, yes, you do, or you have to effectively implement the entire CouchDB API anyway\. -. -.SH "Is there a website or something to see package docs and such?" -Yes, head over to \fIhttps://npmjs\.org/\fR -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH Is there a website or something to see package docs and such? +.P +Yes, head over to https://npmjs\.org/ +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help config -. -.IP "\(bu" 4 -npm help config -. -.IP "\(bu" 4 -npm help npmrc -. -.IP "\(bu" 4 -npm help developers -. -.IP "\(bu" 4 -npm help disputes -. -.IP "" 0 +.IP \(bu 2 +npm help 7 config +.IP \(bu 2 +npm help 5 npmrc +.IP \(bu 2 +npm help 7 developers +.IP \(bu 2 +npm help 7 disputes + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man7/npm-scope.7 nodejs-0.11.15/deps/npm/man/man7/npm-scope.7 --- nodejs-0.11.13/deps/npm/man/man7/npm-scope.7 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/npm-scope.7 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,108 @@ +.TH "NPM\-SCOPE" "7" "October 2014" "" "" +.SH "NAME" +\fBnpm-scope\fR \- Scoped packages +.SH DESCRIPTION +.P +All npm packages have a name\. Some package names also have a scope\. A scope +follows the usual rules for package names (url\-safe characters, no leading dots +or underscores)\. When used in package names, preceded by an @\-symbol and +followed by a slash, e\.g\. +.P +.RS 2 +.nf +@somescope/somepackagename +.fi +.RE +.P +Scopes are a way of grouping related packages together, and also affect a few +things about the way npm treats the package\. +.P +\fBAs of 2014\-09\-03, scoped packages are not supported by the public npm registry\fR\|\. +However, the npm client is backwards\-compatible with un\-scoped registries, so +it can be used to work with scoped and un\-scoped registries at the same time\. +.SH Installing scoped packages +.P +Scoped packages are installed to a sub\-folder of the regular installation +folder, e\.g\. if your other packages are installed in \fBnode_modules/packagename\fR, +scoped modules will be in \fBnode_modules/@myorg/packagename\fR\|\. The scope folder +(\fB@myorg\fR) is simply the name of the scope preceded by an @\-symbol, and can +contain any number of scoped packages\. +.P +A scoped package is installed by referencing it by name, preceded by an +@\-symbol, in \fBnpm install\fR: +.P +.RS 2 +.nf +npm install @myorg/mypackage +.fi +.RE +.P +Or in \fBpackage\.json\fR: +.P +.RS 2 +.nf +"dependencies": { + "@myorg/mypackage": "^1\.3\.0" +} +.fi +.RE +.P +Note that if the @\-symbol is omitted in either case npm will instead attempt to +install from GitHub; see npm help \fBnpm\-install\fR\|\. +.SH Requiring scoped packages +.P +Because scoped packages are installed into a scope folder, you have to +include the name of the scope when requiring them in your code, e\.g\. +.P +.RS 2 +.nf +require('@myorg/mypackage') +.fi +.RE +.P +There is nothing special about the way Node treats scope folders, this is +just specifying to require the module \fBmypackage\fR in the folder called \fB@myorg\fR\|\. +.SH Publishing scoped packages +.P +Scoped packages can be published to any registry that supports them\. +\fIAs of 2014\-09\-03, the public npm registry does not support scoped packages\fR, +so attempting to publish a scoped package to the registry will fail unless +you have associated that scope with a different registry, see below\. +.SH Associating a scope with a registry +.P +Scopes can be associated with a separate registry\. This allows you to +seamlessly use a mix of packages from the public npm registry and one or more +private registries, such as npm Enterprise\. +.P +You can associate a scope with a registry at login, e\.g\. +.P +.RS 2 +.nf +npm login \-\-registry=http://reg\.example\.com \-\-scope=@myco +.fi +.RE +.P +Scopes have a many\-to\-one relationship with registries: one registry can +host multiple scopes, but a scope only ever points to one registry\. +.P +You can also associate a scope with a registry using \fBnpm config\fR: +.P +.RS 2 +.nf +npm config set @myco:registry http://reg\.example\.com +.fi +.RE +.P +Once a scope is associated with a registry, any \fBnpm install\fR for a package +with that scope will request packages from that registry instead\. Any +\fBnpm publish\fR for a package name that contains the scope will be published to +that registry instead\. +.SH SEE ALSO +.RS 0 +.IP \(bu 2 +npm help install +.IP \(bu 2 +npm help publish + +.RE + diff -Nru nodejs-0.11.13/deps/npm/man/man7/npm-scripts.7 nodejs-0.11.15/deps/npm/man/man7/npm-scripts.7 --- nodejs-0.11.13/deps/npm/man/man7/npm-scripts.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/npm-scripts.7 2015-01-20 21:22:17.000000000 +0000 @@ -1,77 +1,64 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-SCRIPTS" "7" "May 2014" "" "" -. +.TH "NPM\-SCRIPTS" "7" "October 2014" "" "" .SH "NAME" -\fBnpm-scripts\fR \-\- How npm handles the "scripts" field -. -.SH "DESCRIPTION" -npm supports the "scripts" member of the package\.json script, for the +\fBnpm-scripts\fR \- How npm handles the "scripts" field +.SH DESCRIPTION +.P +npm supports the "scripts" property of the package\.json script, for the following scripts: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 prepublish: Run BEFORE the package is published\. (Also run on local \fBnpm install\fR without any arguments\.) -. -.IP "\(bu" 4 +.IP \(bu 2 publish, postpublish: Run AFTER the package is published\. -. -.IP "\(bu" 4 +.IP \(bu 2 preinstall: Run BEFORE the package is installed -. -.IP "\(bu" 4 +.IP \(bu 2 install, postinstall: Run AFTER the package is installed\. -. -.IP "\(bu" 4 +.IP \(bu 2 preuninstall, uninstall: Run BEFORE the package is uninstalled\. -. -.IP "\(bu" 4 +.IP \(bu 2 postuninstall: Run AFTER the package is uninstalled\. -. -.IP "\(bu" 4 +.IP \(bu 2 preupdate: Run BEFORE the package is updated with the update command\. -. -.IP "\(bu" 4 +.IP \(bu 2 update, postupdate: Run AFTER the package is updated with the update command\. -. -.IP "\(bu" 4 +.IP \(bu 2 pretest, test, posttest: Run by the \fBnpm test\fR command\. -. -.IP "\(bu" 4 +.IP \(bu 2 prestop, stop, poststop: Run by the \fBnpm stop\fR command\. -. -.IP "\(bu" 4 +.IP \(bu 2 prestart, start, poststart: Run by the \fBnpm start\fR command\. -. -.IP "\(bu" 4 +.IP \(bu 2 prerestart, restart, postrestart: Run by the \fBnpm restart\fR command\. Note: \fBnpm restart\fR will run the stop and start scripts if no \fBrestart\fR script is provided\. -. -.IP "" 0 -. -.P -Additionally, arbitrary scripts can be run by doing \fBnpm run\-script \fR\|\. -. -.SH "NOTE: INSTALL SCRIPTS ARE AN ANTIPATTERN" -\fBtl;dr\fR Don\'t use \fBinstall\fR\|\. Use a \fB\|\.gyp\fR file for compilation, and \fBprepublish\fR for anything else\. -. + +.RE +.P +Additionally, arbitrary scripts can be executed by running \fBnpm +run\-script \fR\|\. \fIPre\fR and \fIpost\fR commands with matching +names will be run for those as well (e\.g\. \fBpremyscript\fR, \fBmyscript\fR, +\fBpostmyscript\fR)\. +.SH NOTE: INSTALL SCRIPTS ARE AN ANTIPATTERN +.P +\fBtl;dr\fR Don't use \fBinstall\fR\|\. Use a \fB\|\.gyp\fR file for compilation, and +\fBprepublish\fR for anything else\. .P -You should almost never have to explicitly set a \fBpreinstall\fR or \fBinstall\fR script\. If you are doing this, please consider if there is +You should almost never have to explicitly set a \fBpreinstall\fR or +\fBinstall\fR script\. If you are doing this, please consider if there is another option\. -. .P The only valid use of \fBinstall\fR or \fBpreinstall\fR scripts is for compilation which must be done on the target architecture\. In early @@ -79,173 +66,147 @@ a standalone \fBMakefile\fR, and early versions of npm required that it be explicitly set in package\.json\. This was not portable, and harder to do properly\. -. .P -In the current version of node, the standard way to do this is using a \fB\|\.gyp\fR file\. If you have a file with a \fB\|\.gyp\fR extension in the root +In the current version of node, the standard way to do this is using a +\fB\|\.gyp\fR file\. If you have a file with a \fB\|\.gyp\fR extension in the root of your package, then npm will run the appropriate \fBnode\-gyp\fR commands automatically at install time\. This is the only officially supported method for compiling binary addons, and does not require that you add anything to your package\.json file\. -. .P If you have to do other things before your package is used, in a way that is not dependent on the operating system or architecture of the target system, then use a \fBprepublish\fR script instead\. This includes tasks such as: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 Compile CoffeeScript source code into JavaScript\. -. -.IP "\(bu" 4 +.IP \(bu 2 Create minified versions of JavaScript source code\. -. -.IP "\(bu" 4 +.IP \(bu 2 Fetching remote resources that your package will use\. -. -.IP "" 0 -. + +.RE .P -The advantage of doing these things at \fBprepublish\fR time instead of \fBpreinstall\fR or \fBinstall\fR time is that they can be done once, in a +The advantage of doing these things at \fBprepublish\fR time instead of +\fBpreinstall\fR or \fBinstall\fR time is that they can be done once, in a single place, and thus greatly reduce complexity and variability\. Additionally, this means that: -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 You can depend on \fBcoffee\-script\fR as a \fBdevDependency\fR, and thus -your users don\'t need to have it installed\. -. -.IP "\(bu" 4 -You don\'t need to include the minifiers in your package, reducing +your users don't need to have it installed\. +.IP \(bu 2 +You don't need to include the minifiers in your package, reducing the size for your users\. -. -.IP "\(bu" 4 -You don\'t need to rely on your users having \fBcurl\fR or \fBwget\fR or +.IP \(bu 2 +You don't need to rely on your users having \fBcurl\fR or \fBwget\fR or other system tools on the target machines\. -. -.IP "" 0 -. -.SH "DEFAULT VALUES" + +.RE +.SH DEFAULT VALUES +.P npm will default some script values based on package contents\. -. -.IP "\(bu" 4 +.RS 0 +.IP \(bu 2 \fB"start": "node server\.js"\fR: -. -.IP If there is a \fBserver\.js\fR file in the root of your package, then npm will default the \fBstart\fR command to \fBnode server\.js\fR\|\. -. -.IP "\(bu" 4 +.IP \(bu 2 \fB"preinstall": "node\-waf clean || true; node\-waf configure build"\fR: -. -.IP If there is a \fBwscript\fR file in the root of your package, npm will default the \fBpreinstall\fR command to compile using node\-waf\. -. -.IP "" 0 -. -.SH "USER" + +.RE +.SH USER +.P If npm was invoked with root privileges, then it will change the uid to the user account or uid specified by the \fBuser\fR config, which defaults to \fBnobody\fR\|\. Set the \fBunsafe\-perm\fR flag to run scripts with root privileges\. -. -.SH "ENVIRONMENT" +.SH ENVIRONMENT +.P Package scripts run in an environment where many pieces of information are made available regarding the setup of npm and the current state of the process\. -. -.SS "path" +.SS path +.P If you depend on modules that define executable scripts, like test suites, then those executables will be added to the \fBPATH\fR for executing the scripts\. So, if your package\.json has this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "foo" , "dependencies" : { "bar" : "0\.1\.x" } , "scripts": { "start" : "bar \./test" } } -. .fi -. -.IP "" 0 -. +.RE .P then you could run \fBnpm start\fR to execute the \fBbar\fR script, which is exported into the \fBnode_modules/\.bin\fR directory on \fBnpm install\fR\|\. -. -.SS "package\.json vars" +.SS package\.json vars +.P The package\.json fields are tacked onto the \fBnpm_package_\fR prefix\. So, for instance, if you had \fB{"name":"foo", "version":"1\.2\.5"}\fR in your -package\.json file, then your package scripts would have the \fBnpm_package_name\fR environment variable set to "foo", and the \fBnpm_package_version\fR set to "1\.2\.5" -. -.SS "configuration" -Configuration parameters are put in the environment with the \fBnpm_config_\fR prefix\. For instance, you can view the effective \fBroot\fR +package\.json file, then your package scripts would have the +\fBnpm_package_name\fR environment variable set to "foo", and the +\fBnpm_package_version\fR set to "1\.2\.5" +.SS configuration +.P +Configuration parameters are put in the environment with the +\fBnpm_config_\fR prefix\. For instance, you can view the effective \fBroot\fR config by checking the \fBnpm_config_root\fR environment variable\. -. -.SS "Special: package\.json "config" hash" +.SS Special: package\.json "config" object +.P The package\.json "config" keys are overwritten in the environment if there is a config param of \fB[@]:\fR\|\. For example, if the package\.json has this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "name" : "foo" , "config" : { "port" : "8080" } , "scripts" : { "start" : "node server\.js" } } -. .fi -. -.IP "" 0 -. +.RE .P and the server\.js is this: -. -.IP "" 4 -. +.P +.RS 2 .nf http\.createServer(\.\.\.)\.listen(process\.env\.npm_package_config_port) -. .fi -. -.IP "" 0 -. +.RE .P then the user could change the behavior by doing: -. -.IP "" 4 -. +.P +.RS 2 .nf npm config set foo:port 80 -. .fi -. -.IP "" 0 -. -.SS "current lifecycle event" +.RE +.SS current lifecycle event +.P Lastly, the \fBnpm_lifecycle_event\fR environment variable is set to whichever stage of the cycle is being executed\. So, you could have a single script used for different parts of the process which switches -based on what\'s currently happening\. -. +based on what's currently happening\. .P -Objects are flattened following this format, so if you had \fB{"scripts":{"install":"foo\.js"}}\fR in your package\.json, then you\'d +Objects are flattened following this format, so if you had +\fB{"scripts":{"install":"foo\.js"}}\fR in your package\.json, then you'd see this in the script: -. -.IP "" 4 -. +.P +.RS 2 .nf process\.env\.npm_package_scripts_install === "foo\.js" -. .fi -. -.IP "" 0 -. -.SH "EXAMPLES" +.RE +.SH EXAMPLES +.P For example, if your package\.json contains this: -. -.IP "" 4 -. +.P +.RS 2 .nf { "scripts" : { "install" : "scripts/install\.js" @@ -253,24 +214,20 @@ , "uninstall" : "scripts/uninstall\.js" } } -. .fi -. -.IP "" 0 -. +.RE .P then the \fBscripts/install\.js\fR will be called for the install, post\-install, stages of the lifecycle, and the \fBscripts/uninstall\.js\fR -would be called when the package is uninstalled\. Since \fBscripts/install\.js\fR is running for three different phases, it would +would be called when the package is uninstalled\. Since +\fBscripts/install\.js\fR is running for three different phases, it would be wise in this case to look at the \fBnpm_lifecycle_event\fR environment variable\. -. .P If you want to run a make command, you can do so\. This works just fine: -. -.IP "" 4 -. +.P +.RS 2 .nf { "scripts" : { "preinstall" : "\./configure" @@ -278,77 +235,64 @@ , "test" : "make test" } } -. .fi -. -.IP "" 0 -. -.SH "EXITING" +.RE +.SH EXITING +.P Scripts are run by passing the line as a script argument to \fBsh\fR\|\. -. .P If the script exits with a code other than 0, then this will abort the process\. -. .P -Note that these script files don\'t have to be nodejs or even +Note that these script files don't have to be nodejs or even javascript programs\. They just have to be some kind of executable file\. -. -.SH "HOOK SCRIPTS" +.SH HOOK SCRIPTS +.P If you want to run a specific script at a specific lifecycle event for ALL packages, then you can use a hook script\. -. .P Place an executable file at \fBnode_modules/\.hooks/{eventname}\fR, and -it\'ll get run for all packages when they are going through that point +it'll get run for all packages when they are going through that point in the package lifecycle for any packages installed in that root\. -. .P Hook scripts are run exactly the same way as package\.json scripts\. That is, they are in a separate child process, with the env described above\. -. -.SH "BEST PRACTICES" -. -.IP "\(bu" 4 -Don\'t exit with a non\-zero error code unless you \fIreally\fR mean it\. +.SH BEST PRACTICES +.RS 0 +.IP \(bu 2 +Don't exit with a non\-zero error code unless you \fIreally\fR mean it\. Except for uninstall scripts, this will cause the npm action to fail, and potentially be rolled back\. If the failure is minor or -only will prevent some optional features, then it\'s better to just +only will prevent some optional features, then it's better to just print a warning and exit successfully\. -. -.IP "\(bu" 4 -npm help Try not to use scripts to do what npm can do for you\. Read through \fBpackage\.json\fR to see all the things that you can specify and enable +.IP \(bu 2 +Try not to use scripts to do what npm can do for you\. Read through +npm help 5 \fBpackage\.json\fR to see all the things that you can specify and enable by simply describing your package appropriately\. In general, this will lead to a more robust and consistent state\. -. -.IP "\(bu" 4 +.IP \(bu 2 Inspect the env to determine where to put things\. For instance, if the \fBnpm_config_binroot\fR environ is set to \fB/home/user/bin\fR, then -don\'t try to install executables into \fB/usr/local/bin\fR\|\. The user +don't try to install executables into \fB/usr/local/bin\fR\|\. The user probably set it up that way for a reason\. -. -.IP "\(bu" 4 -Don\'t prefix your script commands with "sudo"\. If root permissions -are required for some reason, then it\'ll fail with that error, and +.IP \(bu 2 +Don't prefix your script commands with "sudo"\. If root permissions +are required for some reason, then it'll fail with that error, and the user will sudo the npm command in question\. -. -.IP "" 0 -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 + +.RE +.SH SEE ALSO +.RS 0 +.IP \(bu 2 npm help run\-script -. -.IP "\(bu" 4 -npm help package\.json -. -.IP "\(bu" 4 -npm help developers -. -.IP "\(bu" 4 +.IP \(bu 2 +npm help 5 package\.json +.IP \(bu 2 +npm help 7 developers +.IP \(bu 2 npm help install -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man7/removing-npm.7 nodejs-0.11.15/deps/npm/man/man7/removing-npm.7 --- nodejs-0.11.13/deps/npm/man/man7/removing-npm.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/removing-npm.7 2015-01-20 21:22:17.000000000 +0000 @@ -1,107 +1,78 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "NPM\-REMOVAL" "1" "May 2014" "" "" -. +.TH "NPM\-REMOVAL" "1" "October 2014" "" "" .SH "NAME" -\fBnpm-removal\fR \-\- Cleaning the Slate -. -.SH "SYNOPSIS" +\fBnpm-removal\fR \- Cleaning the Slate +.SH SYNOPSIS +.P So sad to see you go\. -. -.IP "" 4 -. +.P +.RS 2 .nf sudo npm uninstall npm \-g -. .fi -. -.IP "" 0 -. +.RE .P Or, if that fails, get the npm source code, and do: -. -.IP "" 4 -. +.P +.RS 2 .nf sudo make uninstall -. .fi -. -.IP "" 0 -. -.SH "More Severe Uninstalling" +.RE +.SH More Severe Uninstalling +.P Usually, the above instructions are sufficient\. That will remove -npm, but leave behind anything you\'ve installed\. -. +npm, but leave behind anything you've installed\. .P -If that doesn\'t work, or if you require more drastic measures, +If that doesn't work, or if you require more drastic measures, continue reading\. -. .P Note that this is only necessary for globally\-installed packages\. Local -installs are completely contained within a project\'s \fBnode_modules\fR -folder\. Delete that folder, and everything is gone (unless a package\'s +installs are completely contained within a project's \fBnode_modules\fR +folder\. Delete that folder, and everything is gone (unless a package's install script is particularly ill\-behaved)\. -. .P This assumes that you installed node and npm in the default place\. If you configured node with a different \fB\-\-prefix\fR, or installed npm with a -different prefix setting, then adjust the paths accordingly, replacing \fB/usr/local\fR with your install prefix\. -. +different prefix setting, then adjust the paths accordingly, replacing +\fB/usr/local\fR with your install prefix\. .P To remove everything npm\-related manually: -. -.IP "" 4 -. +.P +.RS 2 .nf rm \-rf /usr/local/{lib/node{,/\.npm,_modules},bin,share/man}/npm* -. .fi -. -.IP "" 0 -. +.RE .P If you installed things \fIwith\fR npm, then your best bet is to uninstall them with npm first, and then install them again once you have a proper install\. This can help find any symlinks that are lying around: -. -.IP "" 4 -. +.P +.RS 2 .nf ls \-laF /usr/local/{lib/node{,/\.npm},bin,share/man} | grep npm -. .fi -. -.IP "" 0 -. +.RE .P Prior to version 0\.3, npm used shim files for executables and node modules\. To track those down, you can do the following: -. -.IP "" 4 -. +.P +.RS 2 .nf find /usr/local/{lib/node,bin} \-exec grep \-l npm \\{\\} \\; ; -. .fi -. -.IP "" 0 -. +.RE .P (This is also in the README file\.) -. -.SH "SEE ALSO" -. -.IP "\(bu" 4 +.SH SEE ALSO +.RS 0 +.IP \(bu 2 README -. -.IP "\(bu" 4 +.IP \(bu 2 npm help rm -. -.IP "\(bu" 4 +.IP \(bu 2 npm help prune -. -.IP "" 0 + +.RE diff -Nru nodejs-0.11.13/deps/npm/man/man7/semver.7 nodejs-0.11.15/deps/npm/man/man7/semver.7 --- nodejs-0.11.13/deps/npm/man/man7/semver.7 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/man/man7/semver.7 2015-01-20 21:22:17.000000000 +0000 @@ -1,219 +1,352 @@ -.\" Generated with Ronnjs 0.3.8 -.\" http://github.com/kapouer/ronnjs/ -. -.TH "SEMVER" "7" "May 2014" "" "" -. +.TH "SEMVER" "7" "October 2014" "" "" .SH "NAME" -\fBsemver\fR \-\- The semantic versioner for npm -. -.SH "Usage" -. +\fBsemver\fR \- The semantic versioner for npm +.SH Usage +.P +.RS 2 .nf $ npm install semver -semver\.valid(\'1\.2\.3\') // \'1\.2\.3\' -semver\.valid(\'a\.b\.c\') // null -semver\.clean(\' =v1\.2\.3 \') // \'1\.2\.3\' -semver\.satisfies(\'1\.2\.3\', \'1\.x || >=2\.5\.0 || 5\.0\.0 \- 7\.2\.3\') // true -semver\.gt(\'1\.2\.3\', \'9\.8\.7\') // false -semver\.lt(\'1\.2\.3\', \'9\.8\.7\') // true -. + +semver\.valid('1\.2\.3') // '1\.2\.3' +semver\.valid('a\.b\.c') // null +semver\.clean(' =v1\.2\.3 ') // '1\.2\.3' +semver\.satisfies('1\.2\.3', '1\.x || >=2\.5\.0 || 5\.0\.0 \- 7\.2\.3') // true +semver\.gt('1\.2\.3', '9\.8\.7') // false +semver\.lt('1\.2\.3', '9\.8\.7') // true .fi -. +.RE .P As a command\-line utility: -. -.IP "" 4 -. +.P +.RS 2 .nf $ semver \-h + Usage: semver [ [\.\.\.]] [\-r | \-i | \-d ] Test if version(s) satisfy the supplied range(s), and sort them\. + Multiple versions or ranges may be supplied, unless increment or decrement options are specified\. In that case, only a single version may be used, and it is incremented by the specified level + Program exits successfully if any valid version satisfies all supplied ranges, and prints all satisfying versions\. + If no versions are valid, or ranges are not satisfied, then exits failure\. + Versions are printed in ascending order, so supplying multiple versions to the utility will just sort them\. -. .fi -. -.IP "" 0 -. -.SH "Versions" -A "version" is described by the v2\.0\.0 specification found at \fIhttp://semver\.org/\fR\|\. -. +.RE +.SH Versions +.P +A "version" is described by the \fBv2\.0\.0\fR specification found at +http://semver\.org/\|\. .P A leading \fB"="\fR or \fB"v"\fR character is stripped off and ignored\. -. -.SH "Ranges" -The following range styles are supported: -. -.IP "\(bu" 4 -\fB1\.2\.3\fR A specific version\. When nothing else will do\. Note that -build metadata is still ignored, so \fB1\.2\.3+build2012\fR will satisfy -this range\. -. -.IP "\(bu" 4 -\fB>1\.2\.3\fR Greater than a specific version\. -. -.IP "\(bu" 4 -\fB<1\.2\.3\fR Less than a specific version\. If there is no prerelease -tag on the version range, then no prerelease version will be allowed -either, even though these are technically "less than"\. -. -.IP "\(bu" 4 -\fB>=1\.2\.3\fR Greater than or equal to\. Note that prerelease versions -are NOT equal to their "normal" equivalents, so \fB1\.2\.3\-beta\fR will -not satisfy this range, but \fB2\.3\.0\-beta\fR will\. -. -.IP "\(bu" 4 -\fB<=1\.2\.3\fR Less than or equal to\. In this case, prerelease versions -ARE allowed, so \fB1\.2\.3\-beta\fR would satisfy\. -. -.IP "\(bu" 4 +.SH Ranges +.P +A \fBversion range\fR is a set of \fBcomparators\fR which specify versions +that satisfy the range\. +.P +A \fBcomparator\fR is composed of an \fBoperator\fR and a \fBversion\fR\|\. The set +of primitive \fBoperators\fR is: +.RS 0 +.IP \(bu 2 +\fB<\fR Less than +.IP \(bu 2 +\fB<=\fR Less than or equal to +.IP \(bu 2 +\fB>\fR Greater than +.IP \(bu 2 +\fB>=\fR Greater than or equal to +.IP \(bu 2 +\fB=\fR Equal\. If no operator is specified, then equality is assumed, +so this operator is optional, but MAY be included\. + +.RE +.P +For example, the comparator \fB>=1\.2\.7\fR would match the versions +\fB1\.2\.7\fR, \fB1\.2\.8\fR, \fB2\.5\.3\fR, and \fB1\.3\.9\fR, but not the versions \fB1\.2\.6\fR +or \fB1\.1\.0\fR\|\. +.P +Comparators can be joined by whitespace to form a \fBcomparator set\fR, +which is satisfied by the \fBintersection\fR of all of the comparators +it includes\. +.P +A range is composed of one or more comparator sets, joined by \fB||\fR\|\. A +version matches a range if and only if every comparator in at least +one of the \fB||\fR\-separated comparator sets is satisfied by the version\. +.P +For example, the range \fB>=1\.2\.7 <1\.3\.0\fR would match the versions +\fB1\.2\.7\fR, \fB1\.2\.8\fR, and \fB1\.2\.99\fR, but not the versions \fB1\.2\.6\fR, \fB1\.3\.0\fR, +or \fB1\.1\.0\fR\|\. +.P +The range \fB1\.2\.7 || >=1\.2\.9 <2\.0\.0\fR would match the versions \fB1\.2\.7\fR, +\fB1\.2\.9\fR, and \fB1\.4\.6\fR, but not the versions \fB1\.2\.8\fR or \fB2\.0\.0\fR\|\. +.SS Prerelease Tags +.P +If a version has a prerelease tag (for example, \fB1\.2\.3\-alpha\.3\fR) then +it will only be allowed to satisfy comparator sets if at least one +comparator with the same \fB[major, minor, patch]\fR tuple also has a +prerelease tag\. +.P +For example, the range \fB>1\.2\.3\-alpha\.3\fR would be allowed to match the +version \fB1\.2\.3\-alpha\.7\fR, but it would \fInot\fR be satisfied by +\fB3\.4\.5\-alpha\.9\fR, even though \fB3\.4\.5\-alpha\.9\fR is technically "greater +than" \fB1\.2\.3\-alpha\.3\fR according to the SemVer sort rules\. The version +range only accepts prerelease tags on the \fB1\.2\.3\fR version\. The +version \fB3\.4\.5\fR \fIwould\fR satisfy the range, because it does not have a +prerelease flag, and \fB3\.4\.5\fR is greater than \fB1\.2\.3\-alpha\.7\fR\|\. +.P +The purpose for this behavior is twofold\. First, prerelease versions +frequently are updated very quickly, and contain many breaking changes +that are (by the author's design) not yet fit for public consumption\. +Therefore, by default, they are excluded from range matching +semantics\. +.P +Second, a user who has opted into using a prerelease version has +clearly indicated the intent to use \fIthat specific\fR set of +alpha/beta/rc versions\. By including a prerelease tag in the range, +the user is indicating that they are aware of the risk\. However, it +is still not appropriate to assume that they have opted into taking a +similar risk on the \fInext\fR set of prerelease versions\. +.SS Advanced Range Syntax +.P +Advanced range syntax desugars to primitive comparators in +deterministic ways\. +.P +Advanced ranges may be combined in the same way as primitive +comparators using white space or \fB||\fR\|\. +.SS Hyphen Ranges \fBX\.Y\.Z \- A\.B\.C\fR +.P +Specifies an inclusive set\. +.RS 0 +.IP \(bu 2 \fB1\.2\.3 \- 2\.3\.4\fR := \fB>=1\.2\.3 <=2\.3\.4\fR -. -.IP "\(bu" 4 -\fB~1\.2\.3\fR := \fB>=1\.2\.3\-0 <1\.3\.0\-0\fR "Reasonably close to 1\.2\.3"\. When -using tilde operators, prerelease versions are supported as well, -but a prerelease of the next significant digit will NOT be -satisfactory, so \fB1\.3\.0\-beta\fR will not satisfy \fB~1\.2\.3\fR\|\. -. -.IP "\(bu" 4 -\fB^1\.2\.3\fR := \fB>=1\.2\.3\-0 <2\.0\.0\-0\fR "Compatible with 1\.2\.3"\. When -using caret operators, anything from the specified version (including -prerelease) will be supported up to, but not including, the next -major version (or its prereleases)\. \fB1\.5\.1\fR will satisfy \fB^1\.2\.3\fR, -while \fB1\.2\.2\fR and \fB2\.0\.0\-beta\fR will not\. -. -.IP "\(bu" 4 -\fB^0\.1\.3\fR := \fB>=0\.1\.3\-0 <0\.2\.0\-0\fR "Compatible with 0\.1\.3"\. 0\.x\.x versions are -special: the first non\-zero component indicates potentially breaking changes, -meaning the caret operator matches any version with the same first non\-zero -component starting at the specified version\. -. -.IP "\(bu" 4 -\fB^0\.0\.2\fR := \fB=0\.0\.2\fR "Only the version 0\.0\.2 is considered compatible" -. -.IP "\(bu" 4 -\fB~1\.2\fR := \fB>=1\.2\.0\-0 <1\.3\.0\-0\fR "Any version starting with 1\.2" -. -.IP "\(bu" 4 -\fB^1\.2\fR := \fB>=1\.2\.0\-0 <2\.0\.0\-0\fR "Any version compatible with 1\.2" -. -.IP "\(bu" 4 -\fB1\.2\.x\fR := \fB>=1\.2\.0\-0 <1\.3\.0\-0\fR "Any version starting with 1\.2" -. -.IP "\(bu" 4 -\fB~1\fR := \fB>=1\.0\.0\-0 <2\.0\.0\-0\fR "Any version starting with 1" -. -.IP "\(bu" 4 -\fB^1\fR := \fB>=1\.0\.0\-0 <2\.0\.0\-0\fR "Any version compatible with 1" -. -.IP "\(bu" 4 -\fB1\.x\fR := \fB>=1\.0\.0\-0 <2\.0\.0\-0\fR "Any version starting with 1" -. -.IP "" 0 -. -.P -Ranges can be joined with either a space (which implies "and") or a \fB||\fR (which implies "or")\. -. -.SH "Functions" + +.RE +.P +If a partial version is provided as the first version in the inclusive +range, then the missing pieces are replaced with zeroes\. +.RS 0 +.IP \(bu 2 +\fB1\.2 \- 2\.3\.4\fR := \fB>=1\.2\.0 <=2\.3\.4\fR + +.RE +.P +If a partial version is provided as the second version in the +inclusive range, then all versions that start with the supplied parts +of the tuple are accepted, but nothing that would be greater than the +provided tuple parts\. +.RS 0 +.IP \(bu 2 +\fB1\.2\.3 \- 2\.3\fR := \fB>=1\.2\.3 <2\.4\.0\fR +.IP \(bu 2 +\fB1\.2\.3 \- 2\fR := \fB>=1\.2\.3 <3\.0\.0\fR + +.RE +.SS X\-Ranges \fB1\.2\.x\fR \fB1\.X\fR \fB1\.2\.*\fR \fB*\fR +.P +Any of \fBX\fR, \fBx\fR, or \fB*\fR may be used to "stand in" for one of the +numeric values in the \fB[major, minor, patch]\fR tuple\. +.RS 0 +.IP \(bu 2 +\fB*\fR := \fB>=0\.0\.0\fR (Any version satisfies) +.IP \(bu 2 +\fB1\.x\fR := \fB>=1\.0\.0 <2\.0\.0\fR (Matching major version) +.IP \(bu 2 +\fB1\.2\.x\fR := \fB>=1\.2\.0 <1\.3\.0\fR (Matching major and minor versions) + +.RE +.P +A partial version range is treated as an X\-Range, so the special +character is in fact optional\. +.RS 0 +.IP \(bu 2 +\fB""\fR (empty string) := \fB*\fR := \fB>=0\.0\.0\fR +.IP \(bu 2 +\fB1\fR := \fB1\.x\.x\fR := \fB>=1\.0\.0 <2\.0\.0\fR +.IP \(bu 2 +\fB1\.2\fR := \fB1\.2\.x\fR := \fB>=1\.2\.0 <1\.3\.0\fR + +.RE +.SS Tilde Ranges \fB~1\.2\.3\fR \fB~1\.2\fR \fB~1\fR +.P +Allows patch\-level changes if a minor version is specified on the +comparator\. Allows minor\-level changes if not\. +.RS 0 +.IP \(bu 2 +\fB~1\.2\.3\fR := \fB>=1\.2\.3 <1\.(2+1)\.0\fR := \fB>=1\.2\.3 <1\.3\.0\fR +.IP \(bu 2 +\fB~1\.2\fR := \fB>=1\.2\.0 <1\.(2+1)\.0\fR := \fB>=1\.2\.0 <1\.3\.0\fR (Same as \fB1\.2\.x\fR) +.IP \(bu 2 +\fB~1\fR := \fB>=1\.0\.0 <(1+1)\.0\.0\fR := \fB>=1\.0\.0 <2\.0\.0\fR (Same as \fB1\.x\fR) +.IP \(bu 2 +\fB~0\.2\.3\fR := \fB>=0\.2\.3 <0\.(2+1)\.0\fR := \fB>=0\.2\.3 <0\.3\.0\fR +.IP \(bu 2 +\fB~0\.2\fR := \fB>=0\.2\.0 <0\.(2+1)\.0\fR := \fB>=0\.2\.0 <0\.3\.0\fR (Same as \fB0\.2\.x\fR) +.IP \(bu 2 +\fB~0\fR := \fB>=0\.0\.0 <(0+1)\.0\.0\fR := \fB>=0\.0\.0 <1\.0\.0\fR (Same as \fB0\.x\fR) +.IP \(bu 2 +\fB~1\.2\.3\-beta\.2\fR := \fB>=1\.2\.3\-beta\.2 <1\.3\.0\fR Note that prereleases in +the \fB1\.2\.3\fR version will be allowed, if they are greater than or +equal to \fBbeta\.2\fR\|\. So, \fB1\.2\.3\-beta\.4\fR would be allowed, but +\fB1\.2\.4\-beta\.2\fR would not, because it is a prerelease of a +different \fB[major, minor, patch]\fR tuple\. + +.RE +.P +Note: this is the same as the \fB~>\fR operator in rubygems\. +.SS Caret Ranges \fB^1\.2\.3\fR \fB^0\.2\.5\fR \fB^0\.0\.4\fR +.P +Allows changes that do not modify the left\-most non\-zero digit in the +\fB[major, minor, patch]\fR tuple\. In other words, this allows patch and +minor updates for versions \fB1\.0\.0\fR and above, patch updates for +versions \fB0\.X >=0\.1\.0\fR, and \fIno\fR updates for versions \fB0\.0\.X\fR\|\. +.P +Many authors treat a \fB0\.x\fR version as if the \fBx\fR were the major +"breaking\-change" indicator\. +.P +Caret ranges are ideal when an author may make breaking changes +between \fB0\.2\.4\fR and \fB0\.3\.0\fR releases, which is a common practice\. +However, it presumes that there will \fInot\fR be breaking changes between +\fB0\.2\.4\fR and \fB0\.2\.5\fR\|\. It allows for changes that are presumed to be +additive (but non\-breaking), according to commonly observed practices\. +.RS 0 +.IP \(bu 2 +\fB^1\.2\.3\fR := \fB>=1\.2\.3 <2\.0\.0\fR +.IP \(bu 2 +\fB^0\.2\.3\fR := \fB>=0\.2\.3 <0\.3\.0\fR +.IP \(bu 2 +\fB^0\.0\.3\fR := \fB>=0\.0\.3 <0\.0\.4\fR +.IP \(bu 2 +\fB^1\.2\.3\-beta\.2\fR := \fB>=1\.2\.3\-beta\.2 <2\.0\.0\fR Note that prereleases in +the \fB1\.2\.3\fR version will be allowed, if they are greater than or +equal to \fBbeta\.2\fR\|\. So, \fB1\.2\.3\-beta\.4\fR would be allowed, but +\fB1\.2\.4\-beta\.2\fR would not, because it is a prerelease of a +different \fB[major, minor, patch]\fR tuple\. +.IP \(bu 2 +\fB^0\.0\.3\-beta\fR := \fB>=0\.0\.3\-beta <0\.0\.4\fR Note that prereleases in the +\fB0\.0\.3\fR version \fIonly\fR will be allowed, if they are greater than or +equal to \fBbeta\fR\|\. So, \fB0\.0\.3\-pr\.2\fR would be allowed\. + +.RE +.P +When parsing caret ranges, a missing \fBpatch\fR value desugars to the +number \fB0\fR, but will allow flexibility within that value, even if the +major and minor versions are both \fB0\fR\|\. +.RS 0 +.IP \(bu 2 +\fB^1\.2\.x\fR := \fB>=1\.2\.0 <2\.0\.0\fR +.IP \(bu 2 +\fB^0\.0\.x\fR := \fB>=0\.0\.0 <0\.1\.0\fR +.IP \(bu 2 +\fB^0\.0\fR := \fB>=0\.0\.0 <0\.1\.0\fR + +.RE +.P +A missing \fBminor\fR and \fBpatch\fR values will desugar to zero, but also +allow flexibility within those values, even if the major version is +zero\. +.RS 0 +.IP \(bu 2 +\fB^1\.x\fR := \fB>=1\.0\.0 <2\.0\.0\fR +.IP \(bu 2 +\fB^0\.x\fR := \fB>=0\.0\.0 <1\.0\.0\fR + +.RE +.SH Functions +.P All methods and classes take a final \fBloose\fR boolean argument that, if true, will be more forgiving about not\-quite\-valid semver strings\. The resulting output will always be 100% strict, of course\. -. .P Strict\-mode Comparators and Ranges will be strict about the SemVer strings that they parse\. -. -.IP "\(bu" 4 -valid(v): Return the parsed version, or null if it\'s not valid\. -. -.IP "\(bu" 4 -inc(v, release): Return the version incremented by the release type -(major, minor, patch, or prerelease), or null if it\'s not valid\. -. -.IP "" 0 -. -.SS "Comparison" -. -.IP "\(bu" 4 -gt(v1, v2): \fBv1 > v2\fR -. -.IP "\(bu" 4 -gte(v1, v2): \fBv1 >= v2\fR -. -.IP "\(bu" 4 -lt(v1, v2): \fBv1 < v2\fR -. -.IP "\(bu" 4 -lte(v1, v2): \fBv1 <= v2\fR -. -.IP "\(bu" 4 -eq(v1, v2): \fBv1 == v2\fR This is true if they\'re logically equivalent, -even if they\'re not the exact same string\. You already know how to +.RS 0 +.IP \(bu 2 +\fBvalid(v)\fR: Return the parsed version, or null if it's not valid\. +.IP \(bu 2 +\fBinc(v, release)\fR: Return the version incremented by the release +type (\fBmajor\fR, \fBpremajor\fR, \fBminor\fR, \fBpreminor\fR, \fBpatch\fR, +\fBprepatch\fR, or \fBprerelease\fR), or null if it's not valid +.RS 0 +.IP \(bu 2 +\fBpremajor\fR in one call will bump the version up to the next major +version and down to a prerelease of that major version\. +\fBpreminor\fR, and \fBprepatch\fR work the same way\. +.IP \(bu 2 +If called from a non\-prerelease version, the \fBprerelease\fR will work the +same as \fBprepatch\fR\|\. It increments the patch version, then makes a +prerelease\. If the input version is already a prerelease it simply +increments it\. + +.RE + +.RE +.SS Comparison +.RS 0 +.IP \(bu 2 +\fBgt(v1, v2)\fR: \fBv1 > v2\fR +.IP \(bu 2 +\fBgte(v1, v2)\fR: \fBv1 >= v2\fR +.IP \(bu 2 +\fBlt(v1, v2)\fR: \fBv1 < v2\fR +.IP \(bu 2 +\fBlte(v1, v2)\fR: \fBv1 <= v2\fR +.IP \(bu 2 +\fBeq(v1, v2)\fR: \fBv1 == v2\fR This is true if they're logically equivalent, +even if they're not the exact same string\. You already know how to compare strings\. -. -.IP "\(bu" 4 -neq(v1, v2): \fBv1 != v2\fR The opposite of eq\. -. -.IP "\(bu" 4 -cmp(v1, comparator, v2): Pass in a comparison string, and it\'ll call +.IP \(bu 2 +\fBneq(v1, v2)\fR: \fBv1 != v2\fR The opposite of \fBeq\fR\|\. +.IP \(bu 2 +\fBcmp(v1, comparator, v2)\fR: Pass in a comparison string, and it'll call the corresponding function above\. \fB"==="\fR and \fB"!=="\fR do simple string comparison, but are included for completeness\. Throws if an invalid comparison string is provided\. -. -.IP "\(bu" 4 -compare(v1, v2): Return 0 if v1 == v2, or 1 if v1 is greater, or \-1 if -v2 is greater\. Sorts in ascending order if passed to Array\.sort()\. -. -.IP "\(bu" 4 -rcompare(v1, v2): The reverse of compare\. Sorts an array of versions -in descending order when passed to Array\.sort()\. -. -.IP "" 0 -. -.SS "Ranges" -. -.IP "\(bu" 4 -validRange(range): Return the valid range or null if it\'s not valid -. -.IP "\(bu" 4 -satisfies(version, range): Return true if the version satisfies the +.IP \(bu 2 +\fBcompare(v1, v2)\fR: Return \fB0\fR if \fBv1 == v2\fR, or \fB1\fR if \fBv1\fR is greater, or \fB\-1\fR if +\fBv2\fR is greater\. Sorts in ascending order if passed to \fBArray\.sort()\fR\|\. +.IP \(bu 2 +\fBrcompare(v1, v2)\fR: The reverse of compare\. Sorts an array of versions +in descending order when passed to \fBArray\.sort()\fR\|\. + +.RE +.SS Ranges +.RS 0 +.IP \(bu 2 +\fBvalidRange(range)\fR: Return the valid range or null if it's not valid +.IP \(bu 2 +\fBsatisfies(version, range)\fR: Return true if the version satisfies the range\. -. -.IP "\(bu" 4 -maxSatisfying(versions, range): Return the highest version in the list -that satisfies the range, or null if none of them do\. -. -.IP "\(bu" 4 -gtr(version, range): Return true if version is greater than all the +.IP \(bu 2 +\fBmaxSatisfying(versions, range)\fR: Return the highest version in the list +that satisfies the range, or \fBnull\fR if none of them do\. +.IP \(bu 2 +\fBgtr(version, range)\fR: Return \fBtrue\fR if version is greater than all the versions possible in the range\. -. -.IP "\(bu" 4 -ltr(version, range): Return true if version is less than all the +.IP \(bu 2 +\fBltr(version, range)\fR: Return \fBtrue\fR if version is less than all the versions possible in the range\. -. -.IP "\(bu" 4 -outside(version, range, hilo): Return true if the version is outside -the bounds of the range in either the high or low direction\. The \fBhilo\fR argument must be either the string \fB\'>\'\fR or \fB\'<\'\fR\|\. (This is +.IP \(bu 2 +\fBoutside(version, range, hilo)\fR: Return true if the version is outside +the bounds of the range in either the high or low direction\. The +\fBhilo\fR argument must be either the string \fB\|'>'\fR or \fB\|'<'\fR\|\. (This is the function called by \fBgtr\fR and \fBltr\fR\|\.) -. -.IP "" 0 -. + +.RE .P Note that, since ranges may be non\-contiguous, a version might not be greater than a range, less than a range, \fIor\fR satisfy a range! For example, the range \fB1\.2 <1\.2\.9 || >2\.0\.0\fR would have a hole from \fB1\.2\.9\fR until \fB2\.0\.0\fR, so the version \fB1\.2\.10\fR would not be greater than the -range (because 2\.0\.1 satisfies, which is higher), nor less than the -range (since 1\.2\.8 satisfies, which is lower), and it also does not +range (because \fB2\.0\.1\fR satisfies, which is higher), nor less than the +range (since \fB1\.2\.8\fR satisfies, which is lower), and it also does not satisfy the range\. -. .P If you want to know if a version satisfies or does not satisfy a range, use the \fBsatisfies(version, range)\fR function\. + diff -Nru nodejs-0.11.13/deps/npm/node_modules/abbrev/abbrev.js nodejs-0.11.15/deps/npm/node_modules/abbrev/abbrev.js --- nodejs-0.11.13/deps/npm/node_modules/abbrev/abbrev.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/abbrev/abbrev.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,62 @@ + +module.exports = exports = abbrev.abbrev = abbrev + +abbrev.monkeyPatch = monkeyPatch + +function monkeyPatch () { + Object.defineProperty(Array.prototype, 'abbrev', { + value: function () { return abbrev(this) }, + enumerable: false, configurable: true, writable: true + }) + + Object.defineProperty(Object.prototype, 'abbrev', { + value: function () { return abbrev(Object.keys(this)) }, + enumerable: false, configurable: true, writable: true + }) +} + +function abbrev (list) { + if (arguments.length !== 1 || !Array.isArray(list)) { + list = Array.prototype.slice.call(arguments, 0) + } + for (var i = 0, l = list.length, args = [] ; i < l ; i ++) { + args[i] = typeof list[i] === "string" ? list[i] : String(list[i]) + } + + // sort them lexicographically, so that they're next to their nearest kin + args = args.sort(lexSort) + + // walk through each, seeing how much it has in common with the next and previous + var abbrevs = {} + , prev = "" + for (var i = 0, l = args.length ; i < l ; i ++) { + var current = args[i] + , next = args[i + 1] || "" + , nextMatches = true + , prevMatches = true + if (current === next) continue + for (var j = 0, cl = current.length ; j < cl ; j ++) { + var curChar = current.charAt(j) + nextMatches = nextMatches && curChar === next.charAt(j) + prevMatches = prevMatches && curChar === prev.charAt(j) + if (!nextMatches && !prevMatches) { + j ++ + break + } + } + prev = current + if (j === cl) { + abbrevs[current] = current + continue + } + for (var a = current.substr(0, j) ; j <= cl ; j ++) { + abbrevs[a] = current + a += current.charAt(j) + } + } + return abbrevs +} + +function lexSort (a, b) { + return a === b ? 0 : a > b ? 1 : -1 +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/abbrev/CONTRIBUTING.md nodejs-0.11.15/deps/npm/node_modules/abbrev/CONTRIBUTING.md --- nodejs-0.11.13/deps/npm/node_modules/abbrev/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/abbrev/CONTRIBUTING.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ + To get started, sign the + Contributor License Agreement. diff -Nru nodejs-0.11.13/deps/npm/node_modules/abbrev/lib/abbrev.js nodejs-0.11.15/deps/npm/node_modules/abbrev/lib/abbrev.js --- nodejs-0.11.13/deps/npm/node_modules/abbrev/lib/abbrev.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/abbrev/lib/abbrev.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ - -module.exports = exports = abbrev.abbrev = abbrev - -abbrev.monkeyPatch = monkeyPatch - -function monkeyPatch () { - Object.defineProperty(Array.prototype, 'abbrev', { - value: function () { return abbrev(this) }, - enumerable: false, configurable: true, writable: true - }) - - Object.defineProperty(Object.prototype, 'abbrev', { - value: function () { return abbrev(Object.keys(this)) }, - enumerable: false, configurable: true, writable: true - }) -} - -function abbrev (list) { - if (arguments.length !== 1 || !Array.isArray(list)) { - list = Array.prototype.slice.call(arguments, 0) - } - for (var i = 0, l = list.length, args = [] ; i < l ; i ++) { - args[i] = typeof list[i] === "string" ? list[i] : String(list[i]) - } - - // sort them lexicographically, so that they're next to their nearest kin - args = args.sort(lexSort) - - // walk through each, seeing how much it has in common with the next and previous - var abbrevs = {} - , prev = "" - for (var i = 0, l = args.length ; i < l ; i ++) { - var current = args[i] - , next = args[i + 1] || "" - , nextMatches = true - , prevMatches = true - if (current === next) continue - for (var j = 0, cl = current.length ; j < cl ; j ++) { - var curChar = current.charAt(j) - nextMatches = nextMatches && curChar === next.charAt(j) - prevMatches = prevMatches && curChar === prev.charAt(j) - if (!nextMatches && !prevMatches) { - j ++ - break - } - } - prev = current - if (j === cl) { - abbrevs[current] = current - continue - } - for (var a = current.substr(0, j) ; j <= cl ; j ++) { - abbrevs[a] = current - a += current.charAt(j) - } - } - return abbrevs -} - -function lexSort (a, b) { - return a === b ? 0 : a > b ? 1 : -1 -} - - -// tests -if (module === require.main) { - -var assert = require("assert") -var util = require("util") - -console.log("running tests") -function test (list, expect) { - var actual = abbrev(list) - assert.deepEqual(actual, expect, - "abbrev("+util.inspect(list)+") === " + util.inspect(expect) + "\n"+ - "actual: "+util.inspect(actual)) - actual = abbrev.apply(exports, list) - assert.deepEqual(abbrev.apply(exports, list), expect, - "abbrev("+list.map(JSON.stringify).join(",")+") === " + util.inspect(expect) + "\n"+ - "actual: "+util.inspect(actual)) -} - -test([ "ruby", "ruby", "rules", "rules", "rules" ], -{ rub: 'ruby' -, ruby: 'ruby' -, rul: 'rules' -, rule: 'rules' -, rules: 'rules' -}) -test(["fool", "foom", "pool", "pope"], -{ fool: 'fool' -, foom: 'foom' -, poo: 'pool' -, pool: 'pool' -, pop: 'pope' -, pope: 'pope' -}) -test(["a", "ab", "abc", "abcd", "abcde", "acde"], -{ a: 'a' -, ab: 'ab' -, abc: 'abc' -, abcd: 'abcd' -, abcde: 'abcde' -, ac: 'acde' -, acd: 'acde' -, acde: 'acde' -}) - -console.log("pass") - -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/abbrev/package.json nodejs-0.11.15/deps/npm/node_modules/abbrev/package.json --- nodejs-0.11.13/deps/npm/node_modules/abbrev/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/abbrev/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,14 +1,14 @@ { "name": "abbrev", - "version": "1.0.4", + "version": "1.0.5", "description": "Like ruby's abbrev module, but in js", "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me" }, - "main": "./lib/abbrev.js", + "main": "abbrev.js", "scripts": { - "test": "node lib/abbrev.js" + "test": "node test.js" }, "repository": { "type": "git", @@ -18,8 +18,28 @@ "type": "MIT", "url": "https://github.com/isaacs/abbrev-js/raw/master/LICENSE" }, - "readme": "# abbrev-js\n\nJust like [ruby's Abbrev](http://apidock.com/ruby/Abbrev).\n\nUsage:\n\n var abbrev = require(\"abbrev\");\n abbrev(\"foo\", \"fool\", \"folding\", \"flop\");\n \n // returns:\n { fl: 'flop'\n , flo: 'flop'\n , flop: 'flop'\n , fol: 'folding'\n , fold: 'folding'\n , foldi: 'folding'\n , foldin: 'folding'\n , folding: 'folding'\n , foo: 'foo'\n , fool: 'fool'\n }\n\nThis is handy for command-line scripts, or other cases where you want to be able to accept shorthands.\n", - "readmeFilename": "README.md", - "_id": "abbrev@1.0.4", - "_from": "abbrev@latest" + "bugs": { + "url": "https://github.com/isaacs/abbrev-js/issues" + }, + "homepage": "https://github.com/isaacs/abbrev-js", + "_id": "abbrev@1.0.5", + "_shasum": "5d8257bd9ebe435e698b2fa431afde4fe7b10b03", + "_from": "abbrev@latest", + "_npmVersion": "1.4.7", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "5d8257bd9ebe435e698b2fa431afde4fe7b10b03", + "tarball": "http://registry.npmjs.org/abbrev/-/abbrev-1.0.5.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.0.5.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/abbrev/test.js nodejs-0.11.15/deps/npm/node_modules/abbrev/test.js --- nodejs-0.11.13/deps/npm/node_modules/abbrev/test.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/abbrev/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,47 @@ +var abbrev = require('./abbrev.js') +var assert = require("assert") +var util = require("util") + +console.log("TAP Version 13") +var count = 0 + +function test (list, expect) { + count++ + var actual = abbrev(list) + assert.deepEqual(actual, expect, + "abbrev("+util.inspect(list)+") === " + util.inspect(expect) + "\n"+ + "actual: "+util.inspect(actual)) + actual = abbrev.apply(exports, list) + assert.deepEqual(abbrev.apply(exports, list), expect, + "abbrev("+list.map(JSON.stringify).join(",")+") === " + util.inspect(expect) + "\n"+ + "actual: "+util.inspect(actual)) + console.log('ok - ' + list.join(' ')) +} + +test([ "ruby", "ruby", "rules", "rules", "rules" ], +{ rub: 'ruby' +, ruby: 'ruby' +, rul: 'rules' +, rule: 'rules' +, rules: 'rules' +}) +test(["fool", "foom", "pool", "pope"], +{ fool: 'fool' +, foom: 'foom' +, poo: 'pool' +, pool: 'pool' +, pop: 'pope' +, pope: 'pope' +}) +test(["a", "ab", "abc", "abcd", "abcde", "acde"], +{ a: 'a' +, ab: 'ab' +, abc: 'abc' +, abcd: 'abcd' +, abcde: 'abcde' +, ac: 'acde' +, acd: 'acde' +, acde: 'acde' +}) + +console.log("0..%d", count) diff -Nru nodejs-0.11.13/deps/npm/node_modules/ansi/color-spaces.pl nodejs-0.11.15/deps/npm/node_modules/ansi/color-spaces.pl --- nodejs-0.11.13/deps/npm/node_modules/ansi/color-spaces.pl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ansi/color-spaces.pl 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -#!/usr/bin/perl -# Author: Todd Larason -# $XFree86: xc/programs/xterm/vttests/256colors2.pl,v 1.1 1999/07/11 08:49:54 dawes Exp $ - -print "256 color mode\n\n"; - -# display back ground colors - -for ($fgbg = 38; $fgbg <= 48; $fgbg +=10) { - -# first the system ones: -print "System colors:\n"; -for ($color = 0; $color < 8; $color++) { - print "\x1b[${fgbg};5;${color}m::"; -} -print "\x1b[0m\n"; -for ($color = 8; $color < 16; $color++) { - print "\x1b[${fgbg};5;${color}m::"; -} -print "\x1b[0m\n\n"; - -# now the color cube -print "Color cube, 6x6x6:\n"; -for ($green = 0; $green < 6; $green++) { - for ($red = 0; $red < 6; $red++) { - for ($blue = 0; $blue < 6; $blue++) { - $color = 16 + ($red * 36) + ($green * 6) + $blue; - print "\x1b[${fgbg};5;${color}m::"; - } - print "\x1b[0m "; - } - print "\n"; -} - -# now the grayscale ramp -print "Grayscale ramp:\n"; -for ($color = 232; $color < 256; $color++) { - print "\x1b[${fgbg};5;${color}m::"; -} -print "\x1b[0m\n\n"; - -} - -print "Examples for the 3-byte color mode\n\n"; - -for ($fgbg = 38; $fgbg <= 48; $fgbg +=10) { - -# now the color cube -print "Color cube\n"; -for ($green = 0; $green < 256; $green+=51) { - for ($red = 0; $red < 256; $red+=51) { - for ($blue = 0; $blue < 256; $blue+=51) { - print "\x1b[${fgbg};2;${red};${green};${blue}m::"; - } - print "\x1b[0m "; - } - print "\n"; -} - -# now the grayscale ramp -print "Grayscale ramp:\n"; -for ($gray = 8; $gray < 256; $gray+=10) { - print "\x1b[${fgbg};2;${gray};${gray};${gray}m::"; -} -print "\x1b[0m\n\n"; - -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/ansi/examples/starwars.js nodejs-0.11.15/deps/npm/node_modules/ansi/examples/starwars.js --- nodejs-0.11.13/deps/npm/node_modules/ansi/examples/starwars.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ansi/examples/starwars.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -#!/usr/bin/env node - -/** - * A little script to play the ASCII Star Wars, but with a hidden cursor, - * since over `telnet(1)` the cursor remains visible which is annoying. - */ - -process.title = 'starwars' - -var net = require('net') - , cursor = require('../')(process.stdout) - , color = process.argv[2] - -// enable "raw mode" so that keystrokes aren't visible -process.stdin.resume() -if (process.stdin.setRawMode) { - process.stdin.setRawMode(true) -} else { - require('tty').setRawMode(true) -} - -// connect to the ASCII Star Wars server -var socket = net.connect(23, 'towel.blinkenlights.nl') - -socket.on('connect', function () { - if (color in cursor.fg) { - cursor.fg[color]() - } - cursor.hide() - socket.pipe(process.stdout) -}) - -process.stdin.on('data', function (data) { - if (data.toString() === '\u0003') { - // Ctrl+C; a.k.a SIGINT - socket.destroy() - process.stdin.pause() - } -}) - -process.on('exit', function () { - cursor - .show() - .fg.reset() - .write('\n') -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/ansi/History.md nodejs-0.11.15/deps/npm/node_modules/ansi/History.md --- nodejs-0.11.13/deps/npm/node_modules/ansi/History.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ansi/History.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ + +0.3.0 / 2014-05-09 +================== + + * package: remove "test" script and "devDependencies" + * package: remove "engines" section + * pacakge: remove "bin" section + * package: beautify + * examples: remove `starwars` example (#15) + * Documented goto, horizontalAbsolute, and eraseLine methods in README.md (#12, @Jammerwoch) + * add `.jshintrc` file + +< 0.3.0 +======= + + * Prehistoric diff -Nru nodejs-0.11.13/deps/npm/node_modules/ansi/.jshintrc nodejs-0.11.15/deps/npm/node_modules/ansi/.jshintrc --- nodejs-0.11.13/deps/npm/node_modules/ansi/.jshintrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ansi/.jshintrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4 @@ +{ + "laxcomma": true, + "asi": true +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/ansi/package.json nodejs-0.11.15/deps/npm/node_modules/ansi/package.json --- nodejs-0.11.13/deps/npm/node_modules/ansi/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ansi/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -11,7 +11,7 @@ "256", "stream" ], - "version": "0.2.1", + "version": "0.3.0", "author": { "name": "Nathan Rajlich", "email": "nathan@tootallnate.net", @@ -22,29 +22,33 @@ "url": "git://github.com/TooTallNate/ansi.js.git" }, "main": "./lib/ansi.js", - "bin": { - "beep": "./examples/beep/index.js", - "clear": "./examples/clear/index.js", - "starwars": "./examples/starwars.js" - }, - "scripts": { - "test": "mocha --reporter spec" - }, - "devDependencies": { - "mocha": "*" - }, - "engines": { - "node": "*" - }, - "readme": "ansi.js\n=========\n### Advanced ANSI formatting tool for Node.js\n\n`ansi.js` is a module for Node.js that provides an easy-to-use API for\nwriting ANSI escape codes to `Stream` instances. ANSI escape codes are used to do\nfancy things in a terminal window, like render text in colors, delete characters,\nlines, the entire window, or hide and show the cursor, and lots more!\n\nThe code for the example in the screenshot above can be found in the\n`examples/imgcat` directory.\n\n#### Features:\n\n * 256 color support for the terminal!\n * Make a beep sound from your terminal!\n * Works with *any* writable `Stream` instance.\n * Allows you to move the cursor anywhere on the terminal window.\n * Allows you to delete existing contents from the terminal window.\n * Allows you to hide and show the cursor.\n * Converts CSS color codes and RGB values into ANSI escape codes.\n * Low-level; you are in control of when escape codes are used, it's not abstracted.\n\n\nInstallation\n------------\n\nInstall with `npm`:\n\n``` bash\n$ npm install ansi\n```\n\n\nExample\n-------\n\n``` js\nvar ansi = require('ansi')\n , cursor = ansi(process.stdout)\n\n// You can chain your calls forever:\ncursor\n .red() // Set font color to red\n .bg.grey() // Set background color to grey\n .write('Hello World!') // Write 'Hello World!' to stdout\n .bg.reset() // Reset the bgcolor before writing the trailing \\n,\n // to avoid Terminal glitches\n .write('\\n') // And a final \\n to wrap things up\n\n// Rendering modes are persistent:\ncursor.hex('#660000').bold().underline()\n\n// You can use the regular logging functions, text will be green\nconsole.log('This is blood red, bold text')\n\n// To reset just the foreground color:\ncursor.fg.reset()\n\nconsole.log('This will still be bold')\n\n// Clean up after yourself!\ncursor.reset()\n```\n\n\nLicense\n-------\n\n(The MIT License)\n\nCopyright (c) 2012 Nathan Rajlich <nathan@tootallnate.net>\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n'Software'), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n", - "readmeFilename": "README.md", "bugs": { "url": "https://github.com/TooTallNate/ansi.js/issues" }, - "_id": "ansi@0.2.1", + "homepage": "https://github.com/TooTallNate/ansi.js", + "_id": "ansi@0.3.0", + "_shasum": "74b2f1f187c8553c7f95015bcb76009fb43d38e0", + "_from": "ansi@latest", + "_npmVersion": "1.4.9", + "_npmUser": { + "name": "tootallnate", + "email": "nathan@tootallnate.net" + }, + "maintainers": [ + { + "name": "TooTallNate", + "email": "nathan@tootallnate.net" + }, + { + "name": "tootallnate", + "email": "nathan@tootallnate.net" + } + ], "dist": { - "shasum": "76961682ac06d5ea0729af53295ea8f953a0cb21" + "shasum": "74b2f1f187c8553c7f95015bcb76009fb43d38e0", + "tarball": "http://registry.npmjs.org/ansi/-/ansi-0.3.0.tgz" }, - "_from": "ansi@latest", - "_resolved": "https://registry.npmjs.org/ansi/-/ansi-0.2.1.tgz" + "directories": {}, + "_resolved": "https://registry.npmjs.org/ansi/-/ansi-0.3.0.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/ansi/README.md nodejs-0.11.15/deps/npm/node_modules/ansi/README.md --- nodejs-0.11.13/deps/npm/node_modules/ansi/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ansi/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -7,9 +7,6 @@ fancy things in a terminal window, like render text in colors, delete characters, lines, the entire window, or hide and show the cursor, and lots more! -The code for the example in the screenshot above can be found in the -`examples/imgcat` directory. - #### Features: * 256 color support for the terminal! @@ -51,7 +48,7 @@ // Rendering modes are persistent: cursor.hex('#660000').bold().underline() -// You can use the regular logging functions, text will be green +// You can use the regular logging functions, text will be green: console.log('This is blood red, bold text') // To reset just the foreground color: @@ -59,6 +56,16 @@ console.log('This will still be bold') +// to go to a location (x,y) on the console +// note: 1-indexed, not 0-indexed: +cursor.goto(10, 5).write('Five down, ten over') + +// to clear the current line: +cursor.horizontalAbsolute(0).eraseLine().write('Starting again') + +// to go to a different column on the current line: +cursor.horizontalAbsolute(5).write('column five') + // Clean up after yourself! cursor.reset() ``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/archy/examples/beep.js nodejs-0.11.15/deps/npm/node_modules/archy/examples/beep.js --- nodejs-0.11.13/deps/npm/node_modules/archy/examples/beep.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/archy/examples/beep.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,24 @@ +var archy = require('../'); +var s = archy({ + label : 'beep', + nodes : [ + 'ity', + { + label : 'boop', + nodes : [ + { + label : 'o_O', + nodes : [ + { + label : 'oh', + nodes : [ 'hello', 'puny' ] + }, + 'human' + ] + }, + 'party\ntime!' + ] + } + ] +}); +console.log(s); diff -Nru nodejs-0.11.13/deps/npm/node_modules/archy/examples/multi_line.js nodejs-0.11.15/deps/npm/node_modules/archy/examples/multi_line.js --- nodejs-0.11.13/deps/npm/node_modules/archy/examples/multi_line.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/archy/examples/multi_line.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,25 @@ +var archy = require('../'); + +var s = archy({ + label : 'beep\none\ntwo', + nodes : [ + 'ity', + { + label : 'boop', + nodes : [ + { + label : 'o_O\nwheee', + nodes : [ + { + label : 'oh', + nodes : [ 'hello', 'puny\nmeat' ] + }, + 'creature' + ] + }, + 'party\ntime!' + ] + } + ] +}); +console.log(s); diff -Nru nodejs-0.11.13/deps/npm/node_modules/archy/LICENSE nodejs-0.11.15/deps/npm/node_modules/archy/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/archy/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/archy/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +This software is released under the MIT license: + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/archy/package.json nodejs-0.11.15/deps/npm/node_modules/archy/package.json --- nodejs-0.11.13/deps/npm/node_modules/archy/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/archy/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,22 +1,42 @@ { "name": "archy", - "version": "0.0.2", + "version": "1.0.0", "description": "render nested hierarchies `npm ls` style with unicode pipes", "main": "index.js", - "directories": { - "lib": ".", - "example": "example", - "test": "test" - }, "devDependencies": { - "tap": "~0.2.3" + "tap": "~0.3.3", + "tape": "~0.1.1" }, "scripts": { "test": "tap test" }, + "testling": { + "files": "test/*.js", + "browsers": { + "iexplore": [ + "6.0", + "7.0", + "8.0", + "9.0" + ], + "chrome": [ + "20.0" + ], + "firefox": [ + "10.0", + "15.0" + ], + "safari": [ + "5.1" + ], + "opera": [ + "12.0" + ] + } + }, "repository": { "type": "git", - "url": "git://github.com/substack/node-archy.git" + "url": "http://github.com/substack/node-archy.git" }, "keywords": [ "hierarchy", @@ -30,23 +50,30 @@ "email": "mail@substack.net", "url": "http://substack.net" }, - "license": "MIT/X11", - "engine": { - "node": ">=0.4" - }, + "license": "MIT", + "gitHead": "30223c16191e877bf027b15b12daf077b9b55b84", + "bugs": { + "url": "https://github.com/substack/node-archy/issues" + }, + "homepage": "https://github.com/substack/node-archy", + "_id": "archy@1.0.0", + "_shasum": "f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40", + "_from": "archy@>=1.0.0 <2.0.0", + "_npmVersion": "1.4.25", "_npmUser": { - "name": "isaacs", - "email": "i@izs.me" + "name": "substack", + "email": "mail@substack.net" + }, + "maintainers": [ + { + "name": "substack", + "email": "mail@substack.net" + } + ], + "dist": { + "shasum": "f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40", + "tarball": "http://registry.npmjs.org/archy/-/archy-1.0.0.tgz" }, - "_id": "archy@0.0.2", - "dependencies": {}, - "optionalDependencies": {}, - "engines": { - "node": "*" - }, - "_engineSupported": true, - "_npmVersion": "1.1.13", - "_nodeVersion": "v0.7.7-pre", - "_defaultsLoaded": true, - "_from": "archy@0.0.2" + "directories": {}, + "_resolved": "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/archy/README.markdown nodejs-0.11.15/deps/npm/node_modules/archy/README.markdown --- nodejs-0.11.13/deps/npm/node_modules/archy/README.markdown 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/archy/README.markdown 2015-01-20 21:22:17.000000000 +0000 @@ -1,12 +1,12 @@ -archy -===== +# archy Render nested hierarchies `npm ls` style with unicode pipes. +[![browser support](http://ci.testling.com/substack/node-archy.png)](http://ci.testling.com/substack/node-archy) + [![build status](https://secure.travis-ci.org/substack/node-archy.png)](http://travis-ci.org/substack/node-archy) -example -======= +# example ``` js var archy = require('archy'); @@ -50,13 +50,11 @@ time! ``` -methods -======= +# methods var archy = require('archy') -archy(obj, prefix='', opts={}) ------------------------------- +## archy(obj, prefix='', opts={}) Return a string representation of `obj` with unicode pipe characters like how `npm ls` looks. @@ -77,8 +75,7 @@ To disable unicode results in favor of all-ansi output set `opts.unicode` to `false`. -install -======= +# install With [npm](http://npmjs.org) do: @@ -86,7 +83,6 @@ npm install archy ``` -license -======= +# license -MIT/X11 +MIT diff -Nru nodejs-0.11.13/deps/npm/node_modules/archy/test/beep.js nodejs-0.11.15/deps/npm/node_modules/archy/test/beep.js --- nodejs-0.11.13/deps/npm/node_modules/archy/test/beep.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/archy/test/beep.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,40 @@ +var test = require('tape'); +var archy = require('../'); + +test('beep', function (t) { + var s = archy({ + label : 'beep', + nodes : [ + 'ity', + { + label : 'boop', + nodes : [ + { + label : 'o_O', + nodes : [ + { + label : 'oh', + nodes : [ 'hello', 'puny' ] + }, + 'human' + ] + }, + 'party!' + ] + } + ] + }); + t.equal(s, [ + 'beep', + '├── ity', + '└─┬ boop', + ' ├─┬ o_O', + ' │ ├─┬ oh', + ' │ │ ├── hello', + ' │ │ └── puny', + ' │ └── human', + ' └── party!', + '' + ].join('\n')); + t.end(); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/archy/test/multi_line.js nodejs-0.11.15/deps/npm/node_modules/archy/test/multi_line.js --- nodejs-0.11.13/deps/npm/node_modules/archy/test/multi_line.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/archy/test/multi_line.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,45 @@ +var test = require('tape'); +var archy = require('../'); + +test('multi-line', function (t) { + var s = archy({ + label : 'beep\none\ntwo', + nodes : [ + 'ity', + { + label : 'boop', + nodes : [ + { + label : 'o_O\nwheee', + nodes : [ + { + label : 'oh', + nodes : [ 'hello', 'puny\nmeat' ] + }, + 'creature' + ] + }, + 'party\ntime!' + ] + } + ] + }); + t.equal(s, [ + 'beep', + '│ one', + '│ two', + '├── ity', + '└─┬ boop', + ' ├─┬ o_O', + ' │ │ wheee', + ' │ ├─┬ oh', + ' │ │ ├── hello', + ' │ │ └── puny', + ' │ │ meat', + ' │ └── creature', + ' └── party', + ' time!', + '' + ].join('\n')); + t.end(); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/archy/test/non_unicode.js nodejs-0.11.15/deps/npm/node_modules/archy/test/non_unicode.js --- nodejs-0.11.13/deps/npm/node_modules/archy/test/non_unicode.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/archy/test/non_unicode.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,40 @@ +var test = require('tape'); +var archy = require('../'); + +test('beep', function (t) { + var s = archy({ + label : 'beep', + nodes : [ + 'ity', + { + label : 'boop', + nodes : [ + { + label : 'o_O', + nodes : [ + { + label : 'oh', + nodes : [ 'hello', 'puny' ] + }, + 'human' + ] + }, + 'party!' + ] + } + ] + }, '', { unicode : false }); + t.equal(s, [ + 'beep', + '+-- ity', + '`-- boop', + ' +-- o_O', + ' | +-- oh', + ' | | +-- hello', + ' | | `-- puny', + ' | `-- human', + ' `-- party!', + '' + ].join('\n')); + t.end(); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/async-some/.eslintrc nodejs-0.11.15/deps/npm/node_modules/async-some/.eslintrc --- nodejs-0.11.13/deps/npm/node_modules/async-some/.eslintrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/async-some/.eslintrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "env" : { + "node" : true + }, + "rules" : { + "curly" : 0, + "no-lonely-if" : 1, + "no-mixed-requires" : 0, + "no-underscore-dangle" : 0, + "no-unused-vars" : [2, {"vars" : "all", "args" : "after-used"}], + "no-use-before-define" : [2, "nofunc"], + "quotes" : [1, "double", "avoid-escape"], + "semi" : [2, "never"], + "space-after-keywords" : 1, + "space-infix-ops" : 0, + "strict" : 0 + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/async-some/.npmignore nodejs-0.11.15/deps/npm/node_modules/async-some/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/async-some/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/async-some/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +node_modules diff -Nru nodejs-0.11.13/deps/npm/node_modules/async-some/package.json nodejs-0.11.15/deps/npm/node_modules/async-some/package.json --- nodejs-0.11.13/deps/npm/node_modules/async-some/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/async-some/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,57 @@ +{ + "name": "async-some", + "version": "1.0.1", + "description": "short-circuited, asynchronous version of Array.protototype.some", + "main": "some.js", + "scripts": { + "test": "tap test/*.js" + }, + "repository": { + "type": "git", + "url": "https://github.com/othiym23/async-some.git" + }, + "keywords": [ + "async", + "some", + "array", + "collections", + "fp" + ], + "author": { + "name": "Forrest L Norvell", + "email": "ogd@aoaioxxysz.net" + }, + "license": "ISC", + "bugs": { + "url": "https://github.com/othiym23/async-some/issues" + }, + "homepage": "https://github.com/othiym23/async-some", + "dependencies": { + "dezalgo": "^1.0.0" + }, + "devDependencies": { + "tap": "^0.4.11" + }, + "gitHead": "e73d6d1fbc03cca5a0d54f456f39bab294a4c7b7", + "_id": "async-some@1.0.1", + "_shasum": "8b54f08d46f0f9babc72ea9d646c245d23a4d9e5", + "_from": "async-some@>=1.0.1-0 <2.0.0-0", + "_npmVersion": "1.5.0-pre", + "_npmUser": { + "name": "othiym23", + "email": "ogd@aoaioxxysz.net" + }, + "maintainers": [ + { + "name": "othiym23", + "email": "ogd@aoaioxxysz.net" + } + ], + "dist": { + "shasum": "8b54f08d46f0f9babc72ea9d646c245d23a4d9e5", + "tarball": "http://registry.npmjs.org/async-some/-/async-some-1.0.1.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/async-some/-/async-some-1.0.1.tgz", + "readme": "ERROR: No README data found!" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/async-some/README.md nodejs-0.11.15/deps/npm/node_modules/async-some/README.md --- nodejs-0.11.13/deps/npm/node_modules/async-some/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/async-some/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,62 @@ +# some + +Short-circuited async Array.prototype.some implementation. + +Serially evaluates a list of values from a JS array or arraylike +against an asynchronous predicate, terminating on the first truthy +value. If the predicate encounters an error, pass it to the completion +callback. Otherwise, pass the truthy value passed by the predicate, or +`false` if no truthy value was passed. + +Is +[Zalgo](http://blog.izs.me/post/59142742143/designing-apis-for-asynchrony)-proof, +browser-safe, and pretty efficient. + +## Usage + +```javascript +var some = require("async-some"); +var resolve = require("path").resolve; +var stat = require("fs").stat; +var readFileSync = require("fs").readFileSync; + +some(["apple", "seaweed", "ham", "quince"], porkDetector, function (error, match) { + if (error) return console.error(error); + + if (match) return console.dir(JSON.parse(readFileSync(match))); + + console.error("time to buy more Sporkle™!"); +}); + +var PREFIX = resolve(__dirname, "../pork_store"); +function porkDetector(value, cb) { + var path = resolve(PREFIX, value + ".json"); + stat(path, function (er, stat) { + if (er) { + if (er.code === "ENOENT") return cb(null, false); + + return cb(er); + } + + cb(er, path); + }); +} +``` + +### some(list, test, callback) + +* `list` {Object} An arraylike (either an Array or the arguments arraylike) to + be checked. +* `test` {Function} The predicate against which the elements of `list` will be + tested. Takes two parameters: + * `element` {any} The element of the list to be tested. + * `callback` {Function} The continuation to be called once the test is + complete. Takes (again) two values: + * `error` {Error} Any errors that the predicate encountered. + * `value` {any} A truthy value. A non-falsy result terminates checking the + entire list. +* `callback` {Function} The callback to invoke when either a value has been + found or the entire input list has been processed with no result. Is invoked + with the traditional two parameters: + * `error` {Error} Errors that were encountered during the evaluation of some(). + * `match` {any} Value successfully matched by `test`, if any. diff -Nru nodejs-0.11.13/deps/npm/node_modules/async-some/some.js nodejs-0.11.15/deps/npm/node_modules/async-some/some.js --- nodejs-0.11.13/deps/npm/node_modules/async-some/some.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/async-some/some.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,47 @@ +var assert = require("assert") +var dezalgoify = require("dezalgo") + +module.exports = some + +/** + * short-circuited async Array.prototype.some implementation + * + * Serially evaluates a list of values from a JS array or arraylike + * against an asynchronous predicate, terminating on the first truthy + * value. If the predicate encounters an error, pass it to the completion + * callback. Otherwise, pass the truthy value passed by the predicate, or + * `false` if no truthy value was passed. + */ +function some (list, test, cb) { + assert("length" in list, "array must be arraylike") + assert.equal(typeof test, "function", "predicate must be callable") + assert.equal(typeof cb, "function", "callback must be callable") + + var array = slice(list) + , index = 0 + , length = array.length + , hecomes = dezalgoify(cb) + + map() + + function map () { + if (index >= length) return hecomes(null, false) + + test(array[index], reduce) + } + + function reduce (er, result) { + if (er) return hecomes(er, false) + if (result) return hecomes(null, result) + + index++ + map() + } +} + +// Array.prototype.slice on arguments arraylike is expensive +function slice(args) { + var l = args.length, a = [], i + for (i = 0; i < l; i++) a[i] = args[i] + return a +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/async-some/test/base-case.js nodejs-0.11.15/deps/npm/node_modules/async-some/test/base-case.js --- nodejs-0.11.13/deps/npm/node_modules/async-some/test/base-case.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/async-some/test/base-case.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,35 @@ +var test = require("tap").test + +var some = require("../some.js") + +test("some() array base case", function (t) { + some([], failer, function (error, match) { + t.ifError(error, "ran successfully") + + t.notOk(match, "nothing to find, so nothing found") + + t.end() + }) + + function failer(value, cb) { + cb(new Error("test should never have been called")) + } +}) + +test("some() arguments arraylike base case", function (t) { + go() + + function go() { + some(arguments, failer, function (error, match) { + t.ifError(error, "ran successfully") + + t.notOk(match, "nothing to find, so nothing found") + + t.end() + }) + + function failer(value, cb) { + cb(new Error("test should never have been called")) + } + } +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/async-some/test/parameters.js nodejs-0.11.15/deps/npm/node_modules/async-some/test/parameters.js --- nodejs-0.11.13/deps/npm/node_modules/async-some/test/parameters.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/async-some/test/parameters.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,37 @@ +var test = require("tap").test + +var some = require("../some.js") + +var NOP = function () {} + +test("some() called with bogus parameters", function (t) { + t.throws(function () { + some() + }, "throws when called with no parameters") + + t.throws(function () { + some(null, NOP, NOP) + }, "throws when called with no list") + + t.throws(function () { + some([], null, NOP) + }, "throws when called with no predicate") + + t.throws(function () { + some([], NOP, null) + }, "throws when called with no callback") + + t.throws(function () { + some({}, NOP, NOP) + }, "throws when called with wrong list type") + + t.throws(function () { + some([], "ham", NOP) + }, "throws when called with wrong test type") + + t.throws(function () { + some([], NOP, "ham") + }, "throws when called with wrong test type") + + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/async-some/test/simple.js nodejs-0.11.15/deps/npm/node_modules/async-some/test/simple.js --- nodejs-0.11.13/deps/npm/node_modules/async-some/test/simple.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/async-some/test/simple.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,60 @@ +var test = require("tap").test + +var some = require("../some.js") + +test("some() doesn't find anything asynchronously", function (t) { + some(["a", "b", "c", "d", "e", "f", "g"], predicate, function (error, match) { + t.ifError(error, "ran successfully") + + t.notOk(match, "nothing to find, so nothing found") + + t.end() + }) + + function predicate(value, cb) { + // dezalgo ensures it's safe to not do this, but just in case + setTimeout(function () { cb(null, value > "j" && value) }) + } +}) + +test("some() doesn't find anything synchronously", function (t) { + some(["a", "b", "c", "d", "e", "f", "g"], predicate, function (error, match) { + t.ifError(error, "ran successfully") + + t.notOk(match, "nothing to find, so nothing found") + + t.end() + }) + + function predicate(value, cb) { + cb(null, value > "j" && value) + } +}) + +test("some() doesn't find anything asynchronously", function (t) { + some(["a", "b", "c", "d", "e", "f", "g"], predicate, function (error, match) { + t.ifError(error, "ran successfully") + + t.equals(match, "d", "found expected element") + + t.end() + }) + + function predicate(value, cb) { + setTimeout(function () { cb(null, value > "c" && value) }) + } +}) + +test("some() doesn't find anything synchronously", function (t) { + some(["a", "b", "c", "d", "e", "f", "g"], predicate, function (error, match) { + t.ifError(error, "ran successfully") + + t.equals(match, "d", "found expected") + + t.end() + }) + + function predicate(value, cb) { + cb(null, value > "c" && value) + } +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/char-spinner/LICENSE nodejs-0.11.15/deps/npm/node_modules/char-spinner/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/char-spinner/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/char-spinner/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/char-spinner/package.json nodejs-0.11.15/deps/npm/node_modules/char-spinner/package.json --- nodejs-0.11.13/deps/npm/node_modules/char-spinner/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/char-spinner/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +{ + "name": "char-spinner", + "version": "1.0.1", + "description": "Put a little spinner on process.stderr, as unobtrusively as possible.", + "main": "spin.js", + "directories": { + "test": "test" + }, + "dependencies": {}, + "devDependencies": { + "tap": "^0.4.10" + }, + "scripts": { + "test": "tap test/*.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/isaacs/char-spinner" + }, + "keywords": [ + "char", + "spinner" + ], + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "license": "ISC", + "bugs": { + "url": "https://github.com/isaacs/char-spinner/issues" + }, + "homepage": "https://github.com/isaacs/char-spinner", + "gitHead": "091b2ff5960aa083f68a5619fa93999d072aa152", + "_id": "char-spinner@1.0.1", + "_shasum": "e6ea67bd247e107112983b7ab0479ed362800081", + "_from": "char-spinner@latest", + "_npmVersion": "1.4.13", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "e6ea67bd247e107112983b7ab0479ed362800081", + "tarball": "http://registry.npmjs.org/char-spinner/-/char-spinner-1.0.1.tgz" + }, + "_resolved": "https://registry.npmjs.org/char-spinner/-/char-spinner-1.0.1.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/char-spinner/README.md nodejs-0.11.15/deps/npm/node_modules/char-spinner/README.md --- nodejs-0.11.13/deps/npm/node_modules/char-spinner/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/char-spinner/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,31 @@ +# char-spinner + +Put a little spinner on process.stderr, as unobtrusively as possible. + +## USAGE + +```javascript +var spinner = require("char-spinner") + +// All options are optional +// even the options argument itself is optional +spinner(options) +``` + +## OPTIONS + +Usually the defaults are what you want. Mostly they're just +configurable for testing purposes. + +* `stream` Output stream. Default=`process.stderr` +* `tty` Only show spinner if output stream has a truish `.isTTY`. Default=`true` +* `string` String of chars to spin. Default=`'/-\\|'` +* `interval` Number of ms between frames, bigger = slower. Default=`50` +* `cleanup` Print `'\r \r'` to stream on process exit. Default=`true` +* `unref` Unreference the spinner interval so that the process can + exit normally. Default=`true` +* `delay` Number of frames to "skip over" before printing the spinner. + Useful if you want to avoid showing the spinner for very fast + actions. Default=`2` + +Returns the generated interval, if one was created. diff -Nru nodejs-0.11.13/deps/npm/node_modules/char-spinner/spin.js nodejs-0.11.15/deps/npm/node_modules/char-spinner/spin.js --- nodejs-0.11.13/deps/npm/node_modules/char-spinner/spin.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/char-spinner/spin.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,51 @@ +module.exports = spinner + +function spinner(opt) { + opt = opt || {} + var str = opt.stream || process.stderr + var tty = typeof opt.tty === 'boolean' ? opt.tty : true + var string = opt.string || '/-\\|' + var ms = typeof opt.interval === 'number' ? opt.interval : 50 + if (ms < 0) ms = 0 + if (tty && !str.isTTY) return false + var CR = str.isTTY ? '\u001b[0G' : '\u000d'; + var CLEAR = str.isTTY ? '\u001b[2K' : '\u000d \u000d'; + + var s = 0 + var sprite = string.split('') + var wrote = false + + var delay = typeof opt.delay === 'number' ? opt.delay : 2 + + var interval = setInterval(function() { + if (--delay >= 0) return + s = ++s % sprite.length + var c = sprite[s] + str.write(c + CR) + wrote = true + }, ms) + + var unref = typeof opt.unref === 'boolean' ? opt.unref : true + if (unref && typeof interval.unref === 'function') { + interval.unref() + } + + var cleanup = typeof opt.cleanup === 'boolean' ? opt.cleanup : true + if (cleanup) { + process.on('exit', function() { + if (wrote) { + str.write(CLEAR); + } + }) + } + + module.exports.clear = function () { + str.write(CLEAR); + }; + + return interval +} + +module.exports.clear = function () {}; + + diff -Nru nodejs-0.11.13/deps/npm/node_modules/char-spinner/test/basic.js nodejs-0.11.15/deps/npm/node_modules/char-spinner/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/char-spinner/test/basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/char-spinner/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,35 @@ +var test = require('tap').test +var spinner = require('../spin.js') + +test('does nothing when not a tty', function(t) { + var int = spinner({ + stream: { write: function(c) { + throw new Error('wrote something: ' + JSON.stringify(c)) + }, isTTY: false }, + }) + t.notOk(int) + t.end() +}) + +test('write spinny stuff', function(t) { + var output = '' + var written = 0 + var expect = "b\u001b[0Gc\u001b[0Gd\u001b[0Ge\u001b[0Gf\u001b[0Gg\u001b[0Gh\u001b[0Gi\u001b[0Gj\u001b[0Gk\u001b[0Gl\u001b[0Gm\u001b[0Gn\u001b[0Go\u001b[0Gp\u001b[0Ga\u001b[0Gb\u001b[0Gc\u001b[0Gd\u001b[0Ge\u001b[0Gf\u001b[0Gg\u001b[0Gh\u001b[0Gi\u001b[0Gj\u001b[0Gk\u001b[0Gl\u001b[0Gm\u001b[0Gn\u001b[0Go\u001b[0Gp\u001b[0Ga\u001b[0Gb\u001b[0Gc\u001b[0Gd\u001b[0Ge\u001b[0Gf\u001b[0Gg\u001b[0Gh\u001b[0Gi\u001b[0Gj\u001b[0Gk\u001b[0Gl\u001b[0Gm\u001b[0Gn\u001b[0Go\u001b[0Gp\u001b[0Ga\u001b[0Gb\u001b[0Gc\u001b[0G" + + var int = spinner({ + interval: 0, + string: 'abcdefghijklmnop', + stream: { + write: function(c) { + output += c + if (++written == 50) { + t.equal(output, expect) + clearInterval(int) + t.end() + } + }, + isTTY: true + }, + cleanup: false + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/cmd-shim/index.js nodejs-0.11.15/deps/npm/node_modules/cmd-shim/index.js --- nodejs-0.11.13/deps/npm/node_modules/cmd-shim/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/cmd-shim/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,180 +1,180 @@ -// On windows, create a .cmd file. -// Read the #! in the file to see what it uses. The vast majority -// of the time, this will be either: -// "#!/usr/bin/env " -// or: -// "#! " -// -// Write a binroot/pkg.bin + ".cmd" file that has this line in it: -// @ %~dp0 %* - -module.exports = cmdShim -cmdShim.ifExists = cmdShimIfExists - -try { - var fs = require("graceful-fs") -} catch (e) { - var fs = require("fs") -} - -var mkdir = require("mkdirp") - , path = require("path") - , shebangExpr = /^#\!\s*(?:\/usr\/bin\/env)?\s*([^ \t]+)(.*)$/ - -function cmdShimIfExists (from, to, cb) { - fs.stat(from, function (er) { - if (er) return cb() - cmdShim(from, to, cb) - }) -} - -// Try to unlink, but ignore errors. -// Any problems will surface later. -function rm (path, cb) { - fs.unlink(path, function(er) { - cb() - }) -} - -function cmdShim (from, to, cb) { - fs.stat(from, function (er, stat) { - if (er) - return cb(er) - - cmdShim_(from, to, cb) - }) -} - -function cmdShim_ (from, to, cb) { - var then = times(2, next, cb) - rm(to, then) - rm(to + ".cmd", then) - - function next(er) { - writeShim(from, to, cb) - } -} - -function writeShim (from, to, cb) { - // make a cmd file and a sh script - // First, check if the bin is a #! of some sort. - // If not, then assume it's something that'll be compiled, or some other - // sort of script, and just call it directly. - mkdir(path.dirname(to), function (er) { - if (er) - return cb(er) - fs.readFile(from, "utf8", function (er, data) { - if (er) return writeShim_(from, to, null, null, cb) - var firstLine = data.trim().split(/\r*\n/)[0] - , shebang = firstLine.match(shebangExpr) - if (!shebang) return writeShim_(from, to, null, null, cb) - var prog = shebang[1] - , args = shebang[2] || "" - return writeShim_(from, to, prog, args, cb) - }) - }) -} - -function writeShim_ (from, to, prog, args, cb) { - var shTarget = path.relative(path.dirname(to), from) - , target = shTarget.split("/").join("\\") - , longProg - , shProg = prog && prog.split("\\").join("/") - , shLongProg - shTarget = shTarget.split("\\").join("/") - args = args || "" - if (!prog) { - prog = "\"%~dp0\\" + target + "\"" - shProg = "\"$basedir/" + shTarget + "\"" - args = "" - target = "" - shTarget = "" - } else { - longProg = "\"%~dp0\\" + prog + ".exe\"" - shLongProg = "\"$basedir/" + prog + "\"" - target = "\"%~dp0\\" + target + "\"" - shTarget = "\"$basedir/" + shTarget + "\"" - } - - // @IF EXIST "%~dp0\node.exe" ( - // "%~dp0\node.exe" "%~dp0\.\node_modules\npm\bin\npm-cli.js" %* - // ) ELSE ( - // node "%~dp0\.\node_modules\npm\bin\npm-cli.js" %* - // ) - var cmd - if (longProg) { - cmd = "@IF EXIST " + longProg + " (\r\n" - + " " + longProg + " " + args + " " + target + " %*\r\n" - + ") ELSE (\r\n" - + " " + prog + " " + args + " " + target + " %*\r\n" - + ")" - } else { - cmd = prog + " " + args + " " + target + " %*\r\n" - } - - // #!/bin/sh - // basedir=`dirname "$0"` - // - // case `uname` in - // *CYGWIN*) basedir=`cygpath -w "$basedir"`;; - // esac - // - // if [ -x "$basedir/node.exe" ]; then - // "$basedir/node.exe" "$basedir/node_modules/npm/bin/npm-cli.js" "$@" - // ret=$? - // else - // node "$basedir/node_modules/npm/bin/npm-cli.js" "$@" - // ret=$? - // fi - // exit $ret - - var sh = "#!/bin/sh\n" - - if (shLongProg) { - sh = sh - + "basedir=`dirname \"$0\"`\n" - + "\n" - + "case `uname` in\n" - + " *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;\n" - + "esac\n" - + "\n" - - sh = sh - + "if [ -x "+shLongProg+" ]; then\n" - + " " + shLongProg + " " + args + " " + shTarget + " \"$@\"\n" - + " ret=$?\n" - + "else \n" - + " " + shProg + " " + args + " " + shTarget + " \"$@\"\n" - + " ret=$?\n" - + "fi\n" - + "exit $ret\n" - } else { - sh = shProg + " " + args + " " + shTarget + " \"$@\"\n" - + "exit $?\n" - } - - var then = times(2, next, cb) - fs.writeFile(to + ".cmd", cmd, "utf8", then) - fs.writeFile(to, sh, "utf8", then) - function next () { - chmodShim(to, cb) - } -} - -function chmodShim (to, cb) { - var then = times(2, cb, cb) - fs.chmod(to, 0755, then) - fs.chmod(to + ".cmd", 0755, then) -} - -function times(n, ok, cb) { - var errState = null - return function(er) { - if (!errState) { - if (er) - cb(errState = er) - else if (--n === 0) - ok() - } - } -} +// On windows, create a .cmd file. +// Read the #! in the file to see what it uses. The vast majority +// of the time, this will be either: +// "#!/usr/bin/env " +// or: +// "#! " +// +// Write a binroot/pkg.bin + ".cmd" file that has this line in it: +// @ %~dp0 %* + +module.exports = cmdShim +cmdShim.ifExists = cmdShimIfExists + +var fs = require("graceful-fs") + +var mkdir = require("mkdirp") + , path = require("path") + , shebangExpr = /^#\!\s*(?:\/usr\/bin\/env)?\s*([^ \t]+)(.*)$/ + +function cmdShimIfExists (from, to, cb) { + fs.stat(from, function (er) { + if (er) return cb() + cmdShim(from, to, cb) + }) +} + +// Try to unlink, but ignore errors. +// Any problems will surface later. +function rm (path, cb) { + fs.unlink(path, function(er) { + cb() + }) +} + +function cmdShim (from, to, cb) { + fs.stat(from, function (er, stat) { + if (er) + return cb(er) + + cmdShim_(from, to, cb) + }) +} + +function cmdShim_ (from, to, cb) { + var then = times(2, next, cb) + rm(to, then) + rm(to + ".cmd", then) + + function next(er) { + writeShim(from, to, cb) + } +} + +function writeShim (from, to, cb) { + // make a cmd file and a sh script + // First, check if the bin is a #! of some sort. + // If not, then assume it's something that'll be compiled, or some other + // sort of script, and just call it directly. + mkdir(path.dirname(to), function (er) { + if (er) + return cb(er) + fs.readFile(from, "utf8", function (er, data) { + if (er) return writeShim_(from, to, null, null, cb) + var firstLine = data.trim().split(/\r*\n/)[0] + , shebang = firstLine.match(shebangExpr) + if (!shebang) return writeShim_(from, to, null, null, cb) + var prog = shebang[1] + , args = shebang[2] || "" + return writeShim_(from, to, prog, args, cb) + }) + }) +} + +function writeShim_ (from, to, prog, args, cb) { + var shTarget = path.relative(path.dirname(to), from) + , target = shTarget.split("/").join("\\") + , longProg + , shProg = prog && prog.split("\\").join("/") + , shLongProg + shTarget = shTarget.split("\\").join("/") + args = args || "" + if (!prog) { + prog = "\"%~dp0\\" + target + "\"" + shProg = "\"$basedir/" + shTarget + "\"" + args = "" + target = "" + shTarget = "" + } else { + longProg = "\"%~dp0\\" + prog + ".exe\"" + shLongProg = "\"$basedir/" + prog + "\"" + target = "\"%~dp0\\" + target + "\"" + shTarget = "\"$basedir/" + shTarget + "\"" + } + + // @IF EXIST "%~dp0\node.exe" ( + // "%~dp0\node.exe" "%~dp0\.\node_modules\npm\bin\npm-cli.js" %* + // ) ELSE ( + // SETLOCAL + // SET PATHEXT=%PATHEXT:;.JS;=;% + // node "%~dp0\.\node_modules\npm\bin\npm-cli.js" %* + // ) + var cmd + if (longProg) { + cmd = "@IF EXIST " + longProg + " (\r\n" + + " " + longProg + " " + args + " " + target + " %*\r\n" + + ") ELSE (\r\n" + + " @SETLOCAL\r\n" + + " @SET PATHEXT=%PATHEXT:;.JS;=;%\r\n" + + " " + prog + " " + args + " " + target + " %*\r\n" + + ")" + } else { + cmd = prog + " " + args + " " + target + " %*\r\n" + } + + // #!/bin/sh + // basedir=`dirname "$0"` + // + // case `uname` in + // *CYGWIN*) basedir=`cygpath -w "$basedir"`;; + // esac + // + // if [ -x "$basedir/node.exe" ]; then + // "$basedir/node.exe" "$basedir/node_modules/npm/bin/npm-cli.js" "$@" + // ret=$? + // else + // node "$basedir/node_modules/npm/bin/npm-cli.js" "$@" + // ret=$? + // fi + // exit $ret + + var sh = "#!/bin/sh\n" + + if (shLongProg) { + sh = sh + + "basedir=`dirname \"$0\"`\n" + + "\n" + + "case `uname` in\n" + + " *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;\n" + + "esac\n" + + "\n" + + sh = sh + + "if [ -x "+shLongProg+" ]; then\n" + + " " + shLongProg + " " + args + " " + shTarget + " \"$@\"\n" + + " ret=$?\n" + + "else \n" + + " " + shProg + " " + args + " " + shTarget + " \"$@\"\n" + + " ret=$?\n" + + "fi\n" + + "exit $ret\n" + } else { + sh = shProg + " " + args + " " + shTarget + " \"$@\"\n" + + "exit $?\n" + } + + var then = times(2, next, cb) + fs.writeFile(to + ".cmd", cmd, "utf8", then) + fs.writeFile(to, sh, "utf8", then) + function next () { + chmodShim(to, cb) + } +} + +function chmodShim (to, cb) { + var then = times(2, cb, cb) + fs.chmod(to, 0755, then) + fs.chmod(to + ".cmd", 0755, then) +} + +function times(n, ok, cb) { + var errState = null + return function(er) { + if (!errState) { + if (er) + cb(errState = er) + else if (--n === 0) + ok() + } + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/cmd-shim/LICENSE nodejs-0.11.15/deps/npm/node_modules/cmd-shim/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/cmd-shim/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/cmd-shim/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -1,27 +1,27 @@ -Copyright (c) Isaac Z. Schlueter ("Author") -All rights reserved. - -The BSD License - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Copyright (c) Isaac Z. Schlueter ("Author") +All rights reserved. + +The BSD License + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/cmd-shim/.npmignore nodejs-0.11.15/deps/npm/node_modules/cmd-shim/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/cmd-shim/.npmignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/cmd-shim/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -1,16 +1,16 @@ -lib-cov -*.seed -*.log -*.csv -*.dat -*.out -*.pid -*.gz - -pids -logs -results - -npm-debug.log - -node_modules +lib-cov +*.seed +*.log +*.csv +*.dat +*.out +*.pid +*.gz + +pids +logs +results + +npm-debug.log + +node_modules diff -Nru nodejs-0.11.13/deps/npm/node_modules/cmd-shim/package.json nodejs-0.11.15/deps/npm/node_modules/cmd-shim/package.json --- nodejs-0.11.13/deps/npm/node_modules/cmd-shim/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/cmd-shim/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "cmd-shim", - "version": "1.1.1", + "version": "2.0.1", "description": "Used in npm for command line application support", "scripts": { "test": "tap test/*.js" @@ -10,26 +10,37 @@ "url": "https://github.com/ForbesLindesay/cmd-shim.git" }, "license": "BSD", - "optionalDependencies": { - "graceful-fs": "2" - }, "dependencies": { - "mkdirp": "~0.3.3", - "graceful-fs": "2" + "graceful-fs": ">3.0.1 <4.0.0-0", + "mkdirp": "~0.5.0" }, "devDependencies": { - "tap": "~0.4.1", - "rimraf": "~2.1.4" + "tap": "~0.4.11", + "rimraf": "~2.2.8" }, - "readme": "# cmd-shim\r\n\r\nThe cmd-shim used in npm to create executable scripts on Windows,\r\nsince symlinks are not suitable for this purpose there.\r\n\r\nOn Unix systems, you should use a symbolic link instead.\r\n\r\n[![Build Status](https://travis-ci.org/ForbesLindesay/cmd-shim.png?branch=master)](https://travis-ci.org/ForbesLindesay/cmd-shim) [![Dependency Status](https://gemnasium.com/ForbesLindesay/cmd-shim.png)](https://gemnasium.com/ForbesLindesay/cmd-shim)\r\n\r\n## Installation\r\n\r\n```\r\nnpm install cmd-shim\r\n```\r\n\r\n## API\r\n\r\n### cmdShim(from, to, cb)\r\n\r\nCreate a cmd shim at `to` for the command line program at `from`.\r\ne.g.\r\n\r\n```javascript\r\nvar cmdShim = require('cmd-shim');\r\ncmdShim(__dirname + '/cli.js', '/usr/bin/command-name', function (err) {\r\n if (err) throw err;\r\n});\r\n```\r\n\r\n### cmdShim.ifExists(from, to, cb)\r\n\r\nThe same as above, but will just continue if the file does not exist.\r\nSource:\r\n\r\n```javascript\r\nfunction cmdShimIfExists (from, to, cb) {\r\n fs.stat(from, function (er) {\r\n if (er) return cb()\r\n cmdShim(from, to, cb)\r\n })\r\n}\r\n```\r\n", - "readmeFilename": "README.md", + "gitHead": "6f53d506be590fe9ac20c9801512cd1a3aad5974", "bugs": { "url": "https://github.com/ForbesLindesay/cmd-shim/issues" }, - "_id": "cmd-shim@1.1.1", + "homepage": "https://github.com/ForbesLindesay/cmd-shim", + "_id": "cmd-shim@2.0.1", + "_shasum": "4512a373d2391679aec51ad1d4733559e9b85d4a", + "_from": "cmd-shim@>=2.0.1-0 <3.0.0-0", + "_npmVersion": "1.5.0-alpha-4", + "_npmUser": { + "name": "forbeslindesay", + "email": "forbes@lindesay.co.uk" + }, + "maintainers": [ + { + "name": "forbeslindesay", + "email": "forbes@lindesay.co.uk" + } + ], "dist": { - "shasum": "87741e2a8b6307ea1ea8bf1f65287cb4a9ca977a" + "shasum": "4512a373d2391679aec51ad1d4733559e9b85d4a", + "tarball": "http://registry.npmjs.org/cmd-shim/-/cmd-shim-2.0.1.tgz" }, - "_from": "cmd-shim@latest", - "_resolved": "https://registry.npmjs.org/cmd-shim/-/cmd-shim-1.1.1.tgz" + "directories": {}, + "_resolved": "https://registry.npmjs.org/cmd-shim/-/cmd-shim-2.0.1.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/cmd-shim/README.md nodejs-0.11.15/deps/npm/node_modules/cmd-shim/README.md --- nodejs-0.11.13/deps/npm/node_modules/cmd-shim/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/cmd-shim/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,44 @@ -# cmd-shim - -The cmd-shim used in npm to create executable scripts on Windows, -since symlinks are not suitable for this purpose there. - -On Unix systems, you should use a symbolic link instead. - -[![Build Status](https://travis-ci.org/ForbesLindesay/cmd-shim.png?branch=master)](https://travis-ci.org/ForbesLindesay/cmd-shim) [![Dependency Status](https://gemnasium.com/ForbesLindesay/cmd-shim.png)](https://gemnasium.com/ForbesLindesay/cmd-shim) - -## Installation - -``` -npm install cmd-shim -``` - -## API - -### cmdShim(from, to, cb) - -Create a cmd shim at `to` for the command line program at `from`. -e.g. - -```javascript -var cmdShim = require('cmd-shim'); -cmdShim(__dirname + '/cli.js', '/usr/bin/command-name', function (err) { - if (err) throw err; -}); -``` - -### cmdShim.ifExists(from, to, cb) - -The same as above, but will just continue if the file does not exist. -Source: - -```javascript -function cmdShimIfExists (from, to, cb) { - fs.stat(from, function (er) { - if (er) return cb() - cmdShim(from, to, cb) - }) -} -``` +# cmd-shim + +The cmd-shim used in npm to create executable scripts on Windows, +since symlinks are not suitable for this purpose there. + +On Unix systems, you should use a symbolic link instead. + +[![Build Status](https://img.shields.io/travis/ForbesLindesay/cmd-shim/master.svg)](https://travis-ci.org/ForbesLindesay/cmd-shim) +[![Dependency Status](https://img.shields.io/gemnasium/ForbesLindesay/cmd-shim.svg)](https://gemnasium.com/ForbesLindesay/cmd-shim) +[![NPM version](https://img.shields.io/npm/v/cmd-shim.svg)](http://badge.fury.io/js/cmd-shim) + +## Installation + +``` +npm install cmd-shim +``` + +## API + +### cmdShim(from, to, cb) + +Create a cmd shim at `to` for the command line program at `from`. +e.g. + +```javascript +var cmdShim = require('cmd-shim'); +cmdShim(__dirname + '/cli.js', '/usr/bin/command-name', function (err) { + if (err) throw err; +}); +``` + +### cmdShim.ifExists(from, to, cb) + +The same as above, but will just continue if the file does not exist. +Source: + +```javascript +function cmdShimIfExists (from, to, cb) { + fs.stat(from, function (er) { + if (er) return cb() + cmdShim(from, to, cb) + }) +} +``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/cmd-shim/test/00-setup.js nodejs-0.11.15/deps/npm/node_modules/cmd-shim/test/00-setup.js --- nodejs-0.11.13/deps/npm/node_modules/cmd-shim/test/00-setup.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/cmd-shim/test/00-setup.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,34 @@ -var test = require('tap').test -var mkdirp = require('mkdirp') -var fs = require('fs') -var path = require('path') -var fixtures = path.resolve(__dirname, 'fixtures') - -var froms = { - 'from.exe': 'exe', - 'from.env': '#!/usr/bin/env node\nconsole.log(/hi/)\n', - 'from.env.args': '#!/usr/bin/env node --expose_gc\ngc()\n', - 'from.sh': '#!/usr/bin/sh\necho hi\n', - 'from.sh.args': '#!/usr/bin/sh -x\necho hi\n' -} - -var cmdShim = require('../') - -test('create fixture', function (t) { - mkdirp(fixtures, function (er) { - if (er) - throw er - t.pass('made dir') - Object.keys(froms).forEach(function (f) { - t.test('write ' + f, function (t) { - fs.writeFile(path.resolve(fixtures, f), froms[f], function (er) { - if (er) - throw er - t.pass('wrote ' + f) - t.end() - }) - }) - }) - t.end() - }) -}) +var test = require('tap').test +var mkdirp = require('mkdirp') +var fs = require('fs') +var path = require('path') +var fixtures = path.resolve(__dirname, 'fixtures') + +var froms = { + 'from.exe': 'exe', + 'from.env': '#!/usr/bin/env node\nconsole.log(/hi/)\n', + 'from.env.args': '#!/usr/bin/env node --expose_gc\ngc()\n', + 'from.sh': '#!/usr/bin/sh\necho hi\n', + 'from.sh.args': '#!/usr/bin/sh -x\necho hi\n' +} + +var cmdShim = require('../') + +test('create fixture', function (t) { + mkdirp(fixtures, function (er) { + if (er) + throw er + t.pass('made dir') + Object.keys(froms).forEach(function (f) { + t.test('write ' + f, function (t) { + fs.writeFile(path.resolve(fixtures, f), froms[f], function (er) { + if (er) + throw er + t.pass('wrote ' + f) + t.end() + }) + }) + }) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/cmd-shim/test/basic.js nodejs-0.11.15/deps/npm/node_modules/cmd-shim/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/cmd-shim/test/basic.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/cmd-shim/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,167 +1,175 @@ -var test = require('tap').test -var mkdirp = require('mkdirp') -var fs = require('fs') -var path = require('path') -var fixtures = path.resolve(__dirname, 'fixtures') - -var cmdShim = require('../') - -test('no shebang', function (t) { - var from = path.resolve(fixtures, 'from.exe') - var to = path.resolve(fixtures, 'exe.shim') - cmdShim(from, to, function(er) { - if (er) - throw er - t.equal(fs.readFileSync(to, 'utf8'), - "\"$basedir/from.exe\" \"$@\"\nexit $?\n") - t.equal(fs.readFileSync(to + '.cmd', 'utf8'), - "\"%~dp0\\from.exe\" %*\r\n") - t.end() - }) -}) - -test('env shebang', function (t) { - var from = path.resolve(fixtures, 'from.env') - var to = path.resolve(fixtures, 'env.shim') - cmdShim(from, to, function(er) { - if (er) - throw er - console.error('%j', fs.readFileSync(to, 'utf8')) - console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) - - t.equal(fs.readFileSync(to, 'utf8'), - "#!/bin/sh"+ - "\nbasedir=`dirname \"$0\"`"+ - "\n"+ - "\ncase `uname` in"+ - "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;"+ - "\nesac"+ - "\n"+ - "\nif [ -x \"$basedir/node\" ]; then"+ - "\n \"$basedir/node\" \"$basedir/from.env\" \"$@\""+ - "\n ret=$?"+ - "\nelse "+ - "\n node \"$basedir/from.env\" \"$@\""+ - "\n ret=$?"+ - "\nfi"+ - "\nexit $ret"+ - "\n") - t.equal(fs.readFileSync(to + '.cmd', 'utf8'), - "@IF EXIST \"%~dp0\\node.exe\" (\r"+ - "\n \"%~dp0\\node.exe\" \"%~dp0\\from.env\" %*\r"+ - "\n) ELSE (\r"+ - "\n node \"%~dp0\\from.env\" %*\r"+ - "\n)") - t.end() - }) -}) - -test('env shebang with args', function (t) { - var from = path.resolve(fixtures, 'from.env.args') - var to = path.resolve(fixtures, 'env.args.shim') - cmdShim(from, to, function(er) { - if (er) - throw er - console.error('%j', fs.readFileSync(to, 'utf8')) - console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) - - t.equal(fs.readFileSync(to, 'utf8'), - "#!/bin/sh"+ - "\nbasedir=`dirname \"$0\"`"+ - "\n"+ - "\ncase `uname` in"+ - "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;"+ - "\nesac"+ - "\n"+ - "\nif [ -x \"$basedir/node\" ]; then"+ - "\n \"$basedir/node\" --expose_gc \"$basedir/from.env.args\" \"$@\""+ - "\n ret=$?"+ - "\nelse "+ - "\n node --expose_gc \"$basedir/from.env.args\" \"$@\""+ - "\n ret=$?"+ - "\nfi"+ - "\nexit $ret"+ - "\n") - t.equal(fs.readFileSync(to + '.cmd', 'utf8'), - "@IF EXIST \"%~dp0\\node.exe\" (\r"+ - "\n \"%~dp0\\node.exe\" --expose_gc \"%~dp0\\from.env.args\" %*\r"+ - "\n) ELSE (\r"+ - "\n node --expose_gc \"%~dp0\\from.env.args\" %*\r"+ - "\n)") - t.end() - }) -}) - -test('explicit shebang', function (t) { - var from = path.resolve(fixtures, 'from.sh') - var to = path.resolve(fixtures, 'sh.shim') - cmdShim(from, to, function(er) { - if (er) - throw er - console.error('%j', fs.readFileSync(to, 'utf8')) - console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) - - t.equal(fs.readFileSync(to, 'utf8'), - "#!/bin/sh" + - "\nbasedir=`dirname \"$0\"`" + - "\n" + - "\ncase `uname` in" + - "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;" + - "\nesac" + - "\n" + - "\nif [ -x \"$basedir//usr/bin/sh\" ]; then" + - "\n \"$basedir//usr/bin/sh\" \"$basedir/from.sh\" \"$@\"" + - "\n ret=$?" + - "\nelse " + - "\n /usr/bin/sh \"$basedir/from.sh\" \"$@\"" + - "\n ret=$?" + - "\nfi" + - "\nexit $ret" + - "\n") - - t.equal(fs.readFileSync(to + '.cmd', 'utf8'), - "@IF EXIST \"%~dp0\\/usr/bin/sh.exe\" (\r" + - "\n \"%~dp0\\/usr/bin/sh.exe\" \"%~dp0\\from.sh\" %*\r" + - "\n) ELSE (\r" + - "\n /usr/bin/sh \"%~dp0\\from.sh\" %*\r" + - "\n)") - t.end() - }) -}) - -test('explicit shebang with args', function (t) { - var from = path.resolve(fixtures, 'from.sh.args') - var to = path.resolve(fixtures, 'sh.args.shim') - cmdShim(from, to, function(er) { - if (er) - throw er - console.error('%j', fs.readFileSync(to, 'utf8')) - console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) - - t.equal(fs.readFileSync(to, 'utf8'), - "#!/bin/sh" + - "\nbasedir=`dirname \"$0\"`" + - "\n" + - "\ncase `uname` in" + - "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;" + - "\nesac" + - "\n" + - "\nif [ -x \"$basedir//usr/bin/sh\" ]; then" + - "\n \"$basedir//usr/bin/sh\" -x \"$basedir/from.sh.args\" \"$@\"" + - "\n ret=$?" + - "\nelse " + - "\n /usr/bin/sh -x \"$basedir/from.sh.args\" \"$@\"" + - "\n ret=$?" + - "\nfi" + - "\nexit $ret" + - "\n") - - t.equal(fs.readFileSync(to + '.cmd', 'utf8'), - "@IF EXIST \"%~dp0\\/usr/bin/sh.exe\" (\r" + - "\n \"%~dp0\\/usr/bin/sh.exe\" -x \"%~dp0\\from.sh.args\" %*\r" + - "\n) ELSE (\r" + - "\n /usr/bin/sh -x \"%~dp0\\from.sh.args\" %*\r" + - "\n)") - t.end() - }) -}) +var test = require('tap').test +var mkdirp = require('mkdirp') +var fs = require('fs') +var path = require('path') +var fixtures = path.resolve(__dirname, 'fixtures') + +var cmdShim = require('../') + +test('no shebang', function (t) { + var from = path.resolve(fixtures, 'from.exe') + var to = path.resolve(fixtures, 'exe.shim') + cmdShim(from, to, function(er) { + if (er) + throw er + t.equal(fs.readFileSync(to, 'utf8'), + "\"$basedir/from.exe\" \"$@\"\nexit $?\n") + t.equal(fs.readFileSync(to + '.cmd', 'utf8'), + "\"%~dp0\\from.exe\" %*\r\n") + t.end() + }) +}) + +test('env shebang', function (t) { + var from = path.resolve(fixtures, 'from.env') + var to = path.resolve(fixtures, 'env.shim') + cmdShim(from, to, function(er) { + if (er) + throw er + console.error('%j', fs.readFileSync(to, 'utf8')) + console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) + + t.equal(fs.readFileSync(to, 'utf8'), + "#!/bin/sh"+ + "\nbasedir=`dirname \"$0\"`"+ + "\n"+ + "\ncase `uname` in"+ + "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;"+ + "\nesac"+ + "\n"+ + "\nif [ -x \"$basedir/node\" ]; then"+ + "\n \"$basedir/node\" \"$basedir/from.env\" \"$@\""+ + "\n ret=$?"+ + "\nelse "+ + "\n node \"$basedir/from.env\" \"$@\""+ + "\n ret=$?"+ + "\nfi"+ + "\nexit $ret"+ + "\n") + t.equal(fs.readFileSync(to + '.cmd', 'utf8'), + "@IF EXIST \"%~dp0\\node.exe\" (\r"+ + "\n \"%~dp0\\node.exe\" \"%~dp0\\from.env\" %*\r"+ + "\n) ELSE (\r"+ + "\n @SETLOCAL\r"+ + "\n @SET PATHEXT=%PATHEXT:;.JS;=;%\r"+ + "\n node \"%~dp0\\from.env\" %*\r"+ + "\n)") + t.end() + }) +}) + +test('env shebang with args', function (t) { + var from = path.resolve(fixtures, 'from.env.args') + var to = path.resolve(fixtures, 'env.args.shim') + cmdShim(from, to, function(er) { + if (er) + throw er + console.error('%j', fs.readFileSync(to, 'utf8')) + console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) + + t.equal(fs.readFileSync(to, 'utf8'), + "#!/bin/sh"+ + "\nbasedir=`dirname \"$0\"`"+ + "\n"+ + "\ncase `uname` in"+ + "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;"+ + "\nesac"+ + "\n"+ + "\nif [ -x \"$basedir/node\" ]; then"+ + "\n \"$basedir/node\" --expose_gc \"$basedir/from.env.args\" \"$@\""+ + "\n ret=$?"+ + "\nelse "+ + "\n node --expose_gc \"$basedir/from.env.args\" \"$@\""+ + "\n ret=$?"+ + "\nfi"+ + "\nexit $ret"+ + "\n") + t.equal(fs.readFileSync(to + '.cmd', 'utf8'), + "@IF EXIST \"%~dp0\\node.exe\" (\r"+ + "\n \"%~dp0\\node.exe\" --expose_gc \"%~dp0\\from.env.args\" %*\r"+ + "\n) ELSE (\r"+ + "\n @SETLOCAL\r"+ + "\n @SET PATHEXT=%PATHEXT:;.JS;=;%\r"+ + "\n node --expose_gc \"%~dp0\\from.env.args\" %*\r"+ + "\n)") + t.end() + }) +}) + +test('explicit shebang', function (t) { + var from = path.resolve(fixtures, 'from.sh') + var to = path.resolve(fixtures, 'sh.shim') + cmdShim(from, to, function(er) { + if (er) + throw er + console.error('%j', fs.readFileSync(to, 'utf8')) + console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) + + t.equal(fs.readFileSync(to, 'utf8'), + "#!/bin/sh" + + "\nbasedir=`dirname \"$0\"`" + + "\n" + + "\ncase `uname` in" + + "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;" + + "\nesac" + + "\n" + + "\nif [ -x \"$basedir//usr/bin/sh\" ]; then" + + "\n \"$basedir//usr/bin/sh\" \"$basedir/from.sh\" \"$@\"" + + "\n ret=$?" + + "\nelse " + + "\n /usr/bin/sh \"$basedir/from.sh\" \"$@\"" + + "\n ret=$?" + + "\nfi" + + "\nexit $ret" + + "\n") + + t.equal(fs.readFileSync(to + '.cmd', 'utf8'), + "@IF EXIST \"%~dp0\\/usr/bin/sh.exe\" (\r" + + "\n \"%~dp0\\/usr/bin/sh.exe\" \"%~dp0\\from.sh\" %*\r" + + "\n) ELSE (\r" + + "\n @SETLOCAL\r"+ + "\n @SET PATHEXT=%PATHEXT:;.JS;=;%\r"+ + "\n /usr/bin/sh \"%~dp0\\from.sh\" %*\r" + + "\n)") + t.end() + }) +}) + +test('explicit shebang with args', function (t) { + var from = path.resolve(fixtures, 'from.sh.args') + var to = path.resolve(fixtures, 'sh.args.shim') + cmdShim(from, to, function(er) { + if (er) + throw er + console.error('%j', fs.readFileSync(to, 'utf8')) + console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) + + t.equal(fs.readFileSync(to, 'utf8'), + "#!/bin/sh" + + "\nbasedir=`dirname \"$0\"`" + + "\n" + + "\ncase `uname` in" + + "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;" + + "\nesac" + + "\n" + + "\nif [ -x \"$basedir//usr/bin/sh\" ]; then" + + "\n \"$basedir//usr/bin/sh\" -x \"$basedir/from.sh.args\" \"$@\"" + + "\n ret=$?" + + "\nelse " + + "\n /usr/bin/sh -x \"$basedir/from.sh.args\" \"$@\"" + + "\n ret=$?" + + "\nfi" + + "\nexit $ret" + + "\n") + + t.equal(fs.readFileSync(to + '.cmd', 'utf8'), + "@IF EXIST \"%~dp0\\/usr/bin/sh.exe\" (\r" + + "\n \"%~dp0\\/usr/bin/sh.exe\" -x \"%~dp0\\from.sh.args\" %*\r" + + "\n) ELSE (\r" + + "\n @SETLOCAL\r"+ + "\n @SET PATHEXT=%PATHEXT:;.JS;=;%\r"+ + "\n /usr/bin/sh -x \"%~dp0\\from.sh.args\" %*\r" + + "\n)") + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/cmd-shim/test/zz-cleanup.js nodejs-0.11.15/deps/npm/node_modules/cmd-shim/test/zz-cleanup.js --- nodejs-0.11.13/deps/npm/node_modules/cmd-shim/test/zz-cleanup.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/cmd-shim/test/zz-cleanup.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,13 +1,13 @@ -var test = require('tap').test -var path = require('path') -var fixtures = path.resolve(__dirname, 'fixtures') -var rimraf = require('rimraf') - -test('cleanup', function(t) { - rimraf(fixtures, function(er) { - if (er) - throw er - t.pass('cleaned up') - t.end() - }) -}) +var test = require('tap').test +var path = require('path') +var fixtures = path.resolve(__dirname, 'fixtures') +var rimraf = require('rimraf') + +test('cleanup', function(t) { + rimraf(fixtures, function(er) { + if (er) + throw er + t.pass('cleaned up') + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/cmd-shim/.travis.yml nodejs-0.11.15/deps/npm/node_modules/cmd-shim/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/cmd-shim/.travis.yml 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/cmd-shim/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,4 @@ -language: node_js -node_js: - - "0.10" +language: node_js +node_js: + - "0.10" - "0.8" \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/index.js nodejs-0.11.15/deps/npm/node_modules/columnify/index.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,9 +1,13 @@ "use strict" +var wcwidth = require('./width') var utils = require('./utils') var padRight = utils.padRight +var padCenter = utils.padCenter +var padLeft = utils.padLeft var splitIntoLines = utils.splitIntoLines var splitLongWords = utils.splitLongWords +var truncateString = utils.truncateString var DEFAULTS = { maxWidth: Infinity, @@ -11,6 +15,9 @@ columnSplitter: ' ', truncate: false, truncateMarker: '…', + preserveNewLines: false, + paddingChr: ' ', + showHeaders: true, headingTransform: function(key) { return key.toUpperCase() }, @@ -26,14 +33,21 @@ var columnConfigs = options.config || {} delete options.config // remove config so doesn't appear on every column. + var maxLineWidth = options.maxLineWidth || Infinity + delete options.maxLineWidth // this is a line control option, don't pass it to column + // Option defaults inheritance: // options.config[columnName] => options => DEFAULTS options = mixin(options, DEFAULTS) options.config = options.config || Object.create(null) options.spacing = options.spacing || '\n' // probably useless + options.preserveNewLines = !!options.preserveNewLines + options.showHeaders = !!options.showHeaders; + options.columns = options.columns || options.include // alias include/columns, prefer columns if supplied + var columnNames = options.columns || [] // optional user-supplied columns to include - var columnNames = options.include || [] // optional user-supplied columns to include + items = toArray(items, columnNames) // if not suppled column names, automatically determine columns from data keys if (!columnNames.length) { @@ -67,8 +81,13 @@ result[columnName] = item[columnName] != null ? item[columnName] : '' // toString everything result[columnName] = '' + result[columnName] - // remove funky chars - result[columnName] = result[columnName].replace(/\s+/g, " ") + if (columns[columnName].preserveNewLines) { + // merge non-newline whitespace chars + result[columnName] = result[columnName].replace(/[^\S\n]/gmi, ' ') + } else { + // merge all whitespace chars + result[columnName] = result[columnName].replace(/\s/gmi, ' ') + } }) return result }) @@ -84,12 +103,13 @@ // add headers var headers = {} - columnNames.forEach(function(columnName) { - var column = columns[columnName] - headers[columnName] = column.headingTransform(columnName) - }) - items.unshift(headers) - + if(options.showHeaders) { + columnNames.forEach(function(columnName) { + var column = columns[columnName] + headers[columnName] = column.headingTransform(columnName) + }) + items.unshift(headers) + } // get actual max-width between min & max // based on length of data in columns columnNames.forEach(function(columnName) { @@ -97,7 +117,7 @@ column.width = items.map(function(item) { return item[columnName] }).reduce(function(min, cur) { - return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, cur.length))) + return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, wcwidth(cur)))) }, 0) }) @@ -119,7 +139,7 @@ // if truncating required, only include first line + add truncation char if (column.truncate && item[columnName].length > 1) { - item[columnName] = splitIntoLines(cell, column.width - column.truncateMarker.length) + item[columnName] = splitIntoLines(cell, column.width - wcwidth(column.truncateMarker)) var firstLine = item[columnName][0] if (!endsWith(firstLine, column.truncateMarker)) item[columnName][0] += column.truncateMarker item[columnName] = item[columnName].slice(0, 1) @@ -133,21 +153,23 @@ var column = columns[columnName] column.width = items.map(function(item) { return item[columnName].reduce(function(min, cur) { - return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, cur.length))) + return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, wcwidth(cur)))) }, 0) }).reduce(function(min, cur) { return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, cur))) }, 0) }) - var rows = createRows(items, columns, columnNames) // merge lines into rows + var rows = createRows(items, columns, columnNames, options.paddingChr) // merge lines into rows // conceive output return rows.reduce(function(output, row) { return output.concat(row.reduce(function(rowOut, line) { return rowOut.concat(line.join(options.columnSplitter)) }, [])) - }, []).join(options.spacing) + }, []).map(function(line) { + return truncateString(line, maxLineWidth) + }).join(options.spacing) } /** @@ -159,7 +181,7 @@ * @return Array items wrapped in arrays, corresponding to lines */ -function createRows(items, columns, columnNames) { +function createRows(items, columns, columnNames, paddingChr) { return items.map(function(item) { var row = [] var numLines = 0 @@ -172,7 +194,9 @@ columnNames.forEach(function(columnName) { var column = columns[columnName] var val = item[columnName][i] || '' // || '' ensures empty columns get padded - row[i].push(padRight(val, column.width)) + if (column.align == 'right') row[i].push(padLeft(val, column.width, paddingChr)) + else if (column.align == 'center') row[i].push(padCenter(val, column.width, paddingChr)) + else row[i].push(padRight(val, column.width, paddingChr)) }) } return row @@ -208,3 +232,16 @@ var lastIndex = target.lastIndexOf(searchString); return lastIndex !== -1 && lastIndex === position; } + + +function toArray(items, columnNames) { + if (Array.isArray(items)) return items + var rows = [] + for (var key in items) { + var item = {} + item[columnNames[0] || 'key'] = key + item[columnNames[1] || 'value'] = items[key] + rows.push(item) + } + return rows +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/LICENSE nodejs-0.11.15/deps/npm/node_modules/columnify/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/columnify/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Tim Oxley + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/cli.js nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/cli.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/cli.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/cli.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,39 @@ +#!/usr/bin/env node +'use strict'; +var fs = require('fs'); +var pkg = require('./package.json'); +var strip = require('./'); +var input = process.argv[2]; + +function help() { + console.log([ + pkg.description, + '', + 'Usage', + ' $ strip-ansi > ', + ' $ cat | strip-ansi > ', + '', + 'Example', + ' $ strip-ansi unicorn.txt > unicorn-stripped.txt' + ].join('\n')); +} + +if (process.argv.indexOf('--help') !== -1) { + help(); + return; +} + +if (process.argv.indexOf('--version') !== -1) { + console.log(pkg.version); + return; +} + +if (input) { + process.stdout.write(strip(fs.readFileSync(input, 'utf8'))); + return; +} + +process.stdin.setEncoding('utf8'); +process.stdin.on('data', function (data) { + process.stdout.write(strip(data)); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/index.js nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/index.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,6 @@ +'use strict'; +var ansiRegex = require('ansi-regex')(); + +module.exports = function (str) { + return typeof str === 'string' ? str.replace(ansiRegex, '') : str; +}; diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/index.js nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/index.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4 @@ +'use strict'; +module.exports = function () { + return /\u001b\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]/g; +}; diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/package.json nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/package.json --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,78 @@ +{ + "name": "ansi-regex", + "version": "0.2.1", + "description": "Regular expression for matching ANSI escape codes", + "license": "MIT", + "repository": { + "type": "git", + "url": "git://github.com/sindresorhus/ansi-regex" + }, + "author": { + "name": "Sindre Sorhus", + "email": "sindresorhus@gmail.com", + "url": "http://sindresorhus.com" + }, + "engines": { + "node": ">=0.10.0" + }, + "scripts": { + "test": "mocha" + }, + "files": [ + "index.js" + ], + "keywords": [ + "ansi", + "styles", + "color", + "colour", + "colors", + "terminal", + "console", + "cli", + "string", + "tty", + "escape", + "formatting", + "rgb", + "256", + "shell", + "xterm", + "command-line", + "text", + "regex", + "regexp", + "re", + "match", + "test", + "find", + "pattern" + ], + "devDependencies": { + "mocha": "*" + }, + "bugs": { + "url": "https://github.com/sindresorhus/ansi-regex/issues" + }, + "homepage": "https://github.com/sindresorhus/ansi-regex", + "_id": "ansi-regex@0.2.1", + "_shasum": "0d8e946967a3d8143f93e24e298525fc1b2235f9", + "_from": "ansi-regex@0.2.1", + "_npmVersion": "1.4.9", + "_npmUser": { + "name": "sindresorhus", + "email": "sindresorhus@gmail.com" + }, + "maintainers": [ + { + "name": "sindresorhus", + "email": "sindresorhus@gmail.com" + } + ], + "dist": { + "shasum": "0d8e946967a3d8143f93e24e298525fc1b2235f9", + "tarball": "http://registry.npmjs.org/ansi-regex/-/ansi-regex-0.2.1.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-0.2.1.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/readme.md nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/readme.md --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/readme.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/node_modules/ansi-regex/readme.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +# ansi-regex [![Build Status](https://travis-ci.org/sindresorhus/ansi-regex.svg?branch=master)](https://travis-ci.org/sindresorhus/ansi-regex) + +> Regular expression for matching [ANSI escape codes](http://en.wikipedia.org/wiki/ANSI_escape_code) + + +## Install + +```sh +$ npm install --save ansi-regex +``` + + +## Usage + +```js +var ansiRegex = require('ansi-regex'); + +ansiRegex().test('\u001b[4mcake\u001b[0m'); +//=> true + +ansiRegex().test('cake'); +//=> false + +'\u001b[4mcake\u001b[0m'.match(ansiRegex()); +//=> ['\u001b[4m', '\u001b[0m'] +``` + +*It's a function so you can create multiple instances. Regexes with the global flag will have the `.lastIndex` property changed for each call to methods on the instance. Therefore reusing the instance with multiple calls will not work as expected for `.test()`.* + + +## License + +MIT © [Sindre Sorhus](http://sindresorhus.com) diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/package.json nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/package.json --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,88 @@ +{ + "name": "strip-ansi", + "version": "1.0.0", + "description": "Strip ANSI escape codes", + "license": "MIT", + "bin": { + "strip-ansi": "cli.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/sindresorhus/strip-ansi" + }, + "author": { + "name": "Sindre Sorhus", + "email": "sindresorhus@gmail.com", + "url": "http://sindresorhus.com" + }, + "engines": { + "node": ">=0.10.0" + }, + "scripts": { + "test": "mocha" + }, + "files": [ + "index.js", + "cli.js" + ], + "keywords": [ + "strip", + "trim", + "remove", + "ansi", + "styles", + "color", + "colour", + "colors", + "terminal", + "console", + "cli", + "string", + "tty", + "escape", + "formatting", + "rgb", + "256", + "shell", + "xterm", + "log", + "logging", + "command-line", + "text" + ], + "dependencies": { + "ansi-regex": "^0.2.1" + }, + "devDependencies": { + "mocha": "*" + }, + "gitHead": "6fea2ef935f1ba10d43e4c4d9814af328803935c", + "bugs": { + "url": "https://github.com/sindresorhus/strip-ansi/issues" + }, + "homepage": "https://github.com/sindresorhus/strip-ansi", + "_id": "strip-ansi@1.0.0", + "_shasum": "6c021321d6ece161a3c608fbab268c7328901c73", + "_from": "strip-ansi@>=1.0.0-0 <2.0.0-0", + "_npmVersion": "1.4.14", + "_npmUser": { + "name": "sindresorhus", + "email": "sindresorhus@gmail.com" + }, + "maintainers": [ + { + "name": "sindresorhus", + "email": "sindresorhus@gmail.com" + }, + { + "name": "jbnicolai", + "email": "jappelman@xebia.com" + } + ], + "dist": { + "shasum": "6c021321d6ece161a3c608fbab268c7328901c73", + "tarball": "http://registry.npmjs.org/strip-ansi/-/strip-ansi-1.0.0.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-1.0.0.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/readme.md nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/readme.md --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/strip-ansi/readme.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/strip-ansi/readme.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,43 @@ +# strip-ansi [![Build Status](https://travis-ci.org/sindresorhus/strip-ansi.svg?branch=master)](https://travis-ci.org/sindresorhus/strip-ansi) + +> Strip [ANSI escape codes](http://en.wikipedia.org/wiki/ANSI_escape_code) + + +## Install + +```sh +$ npm install --save strip-ansi +``` + + +## Usage + +```js +var stripAnsi = require('strip-ansi'); + +stripAnsi('\x1b[4mcake\x1b[0m'); +//=> 'cake' +``` + + +## CLI + +```sh +$ npm install --global strip-ansi +``` + +```sh +$ strip-ansi --help + +Usage + $ strip-ansi > + $ cat | strip-ansi > + +Example + $ strip-ansi unicorn.txt > unicorn-stripped.txt +``` + + +## License + +MIT © [Sindre Sorhus](http://sindresorhus.com) diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/combining.js nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/combining.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/combining.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/combining.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,50 @@ +module.exports = [ + [ 0x0300, 0x036F ], [ 0x0483, 0x0486 ], [ 0x0488, 0x0489 ], + [ 0x0591, 0x05BD ], [ 0x05BF, 0x05BF ], [ 0x05C1, 0x05C2 ], + [ 0x05C4, 0x05C5 ], [ 0x05C7, 0x05C7 ], [ 0x0600, 0x0603 ], + [ 0x0610, 0x0615 ], [ 0x064B, 0x065E ], [ 0x0670, 0x0670 ], + [ 0x06D6, 0x06E4 ], [ 0x06E7, 0x06E8 ], [ 0x06EA, 0x06ED ], + [ 0x070F, 0x070F ], [ 0x0711, 0x0711 ], [ 0x0730, 0x074A ], + [ 0x07A6, 0x07B0 ], [ 0x07EB, 0x07F3 ], [ 0x0901, 0x0902 ], + [ 0x093C, 0x093C ], [ 0x0941, 0x0948 ], [ 0x094D, 0x094D ], + [ 0x0951, 0x0954 ], [ 0x0962, 0x0963 ], [ 0x0981, 0x0981 ], + [ 0x09BC, 0x09BC ], [ 0x09C1, 0x09C4 ], [ 0x09CD, 0x09CD ], + [ 0x09E2, 0x09E3 ], [ 0x0A01, 0x0A02 ], [ 0x0A3C, 0x0A3C ], + [ 0x0A41, 0x0A42 ], [ 0x0A47, 0x0A48 ], [ 0x0A4B, 0x0A4D ], + [ 0x0A70, 0x0A71 ], [ 0x0A81, 0x0A82 ], [ 0x0ABC, 0x0ABC ], + [ 0x0AC1, 0x0AC5 ], [ 0x0AC7, 0x0AC8 ], [ 0x0ACD, 0x0ACD ], + [ 0x0AE2, 0x0AE3 ], [ 0x0B01, 0x0B01 ], [ 0x0B3C, 0x0B3C ], + [ 0x0B3F, 0x0B3F ], [ 0x0B41, 0x0B43 ], [ 0x0B4D, 0x0B4D ], + [ 0x0B56, 0x0B56 ], [ 0x0B82, 0x0B82 ], [ 0x0BC0, 0x0BC0 ], + [ 0x0BCD, 0x0BCD ], [ 0x0C3E, 0x0C40 ], [ 0x0C46, 0x0C48 ], + [ 0x0C4A, 0x0C4D ], [ 0x0C55, 0x0C56 ], [ 0x0CBC, 0x0CBC ], + [ 0x0CBF, 0x0CBF ], [ 0x0CC6, 0x0CC6 ], [ 0x0CCC, 0x0CCD ], + [ 0x0CE2, 0x0CE3 ], [ 0x0D41, 0x0D43 ], [ 0x0D4D, 0x0D4D ], + [ 0x0DCA, 0x0DCA ], [ 0x0DD2, 0x0DD4 ], [ 0x0DD6, 0x0DD6 ], + [ 0x0E31, 0x0E31 ], [ 0x0E34, 0x0E3A ], [ 0x0E47, 0x0E4E ], + [ 0x0EB1, 0x0EB1 ], [ 0x0EB4, 0x0EB9 ], [ 0x0EBB, 0x0EBC ], + [ 0x0EC8, 0x0ECD ], [ 0x0F18, 0x0F19 ], [ 0x0F35, 0x0F35 ], + [ 0x0F37, 0x0F37 ], [ 0x0F39, 0x0F39 ], [ 0x0F71, 0x0F7E ], + [ 0x0F80, 0x0F84 ], [ 0x0F86, 0x0F87 ], [ 0x0F90, 0x0F97 ], + [ 0x0F99, 0x0FBC ], [ 0x0FC6, 0x0FC6 ], [ 0x102D, 0x1030 ], + [ 0x1032, 0x1032 ], [ 0x1036, 0x1037 ], [ 0x1039, 0x1039 ], + [ 0x1058, 0x1059 ], [ 0x1160, 0x11FF ], [ 0x135F, 0x135F ], + [ 0x1712, 0x1714 ], [ 0x1732, 0x1734 ], [ 0x1752, 0x1753 ], + [ 0x1772, 0x1773 ], [ 0x17B4, 0x17B5 ], [ 0x17B7, 0x17BD ], + [ 0x17C6, 0x17C6 ], [ 0x17C9, 0x17D3 ], [ 0x17DD, 0x17DD ], + [ 0x180B, 0x180D ], [ 0x18A9, 0x18A9 ], [ 0x1920, 0x1922 ], + [ 0x1927, 0x1928 ], [ 0x1932, 0x1932 ], [ 0x1939, 0x193B ], + [ 0x1A17, 0x1A18 ], [ 0x1B00, 0x1B03 ], [ 0x1B34, 0x1B34 ], + [ 0x1B36, 0x1B3A ], [ 0x1B3C, 0x1B3C ], [ 0x1B42, 0x1B42 ], + [ 0x1B6B, 0x1B73 ], [ 0x1DC0, 0x1DCA ], [ 0x1DFE, 0x1DFF ], + [ 0x200B, 0x200F ], [ 0x202A, 0x202E ], [ 0x2060, 0x2063 ], + [ 0x206A, 0x206F ], [ 0x20D0, 0x20EF ], [ 0x302A, 0x302F ], + [ 0x3099, 0x309A ], [ 0xA806, 0xA806 ], [ 0xA80B, 0xA80B ], + [ 0xA825, 0xA826 ], [ 0xFB1E, 0xFB1E ], [ 0xFE00, 0xFE0F ], + [ 0xFE20, 0xFE23 ], [ 0xFEFF, 0xFEFF ], [ 0xFFF9, 0xFFFB ], + [ 0x10A01, 0x10A03 ], [ 0x10A05, 0x10A06 ], [ 0x10A0C, 0x10A0F ], + [ 0x10A38, 0x10A3A ], [ 0x10A3F, 0x10A3F ], [ 0x1D167, 0x1D169 ], + [ 0x1D173, 0x1D182 ], [ 0x1D185, 0x1D18B ], [ 0x1D1AA, 0x1D1AD ], + [ 0x1D242, 0x1D244 ], [ 0xE0001, 0xE0001 ], [ 0xE0020, 0xE007F ], + [ 0xE0100, 0xE01EF ] +] diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/docs/index.md nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/docs/index.md --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/docs/index.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/docs/index.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,65 @@ +### Javascript porting of Markus Kuhn's wcwidth() implementation + +The following explanation comes from the original C implementation: + +This is an implementation of wcwidth() and wcswidth() (defined in +IEEE Std 1002.1-2001) for Unicode. + +http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html +http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html + +In fixed-width output devices, Latin characters all occupy a single +"cell" position of equal width, whereas ideographic CJK characters +occupy two such cells. Interoperability between terminal-line +applications and (teletype-style) character terminals using the +UTF-8 encoding requires agreement on which character should advance +the cursor by how many cell positions. No established formal +standards exist at present on which Unicode character shall occupy +how many cell positions on character terminals. These routines are +a first attempt of defining such behavior based on simple rules +applied to data provided by the Unicode Consortium. + +For some graphical characters, the Unicode standard explicitly +defines a character-cell width via the definition of the East Asian +FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes. +In all these cases, there is no ambiguity about which width a +terminal shall use. For characters in the East Asian Ambiguous (A) +class, the width choice depends purely on a preference of backward +compatibility with either historic CJK or Western practice. +Choosing single-width for these characters is easy to justify as +the appropriate long-term solution, as the CJK practice of +displaying these characters as double-width comes from historic +implementation simplicity (8-bit encoded characters were displayed +single-width and 16-bit ones double-width, even for Greek, +Cyrillic, etc.) and not any typographic considerations. + +Much less clear is the choice of width for the Not East Asian +(Neutral) class. Existing practice does not dictate a width for any +of these characters. It would nevertheless make sense +typographically to allocate two character cells to characters such +as for instance EM SPACE or VOLUME INTEGRAL, which cannot be +represented adequately with a single-width glyph. The following +routines at present merely assign a single-cell width to all +neutral characters, in the interest of simplicity. This is not +entirely satisfactory and should be reconsidered before +establishing a formal standard in this area. At the moment, the +decision which Not East Asian (Neutral) characters should be +represented by double-width glyphs cannot yet be answered by +applying a simple rule from the Unicode database content. Setting +up a proper standard for the behavior of UTF-8 character terminals +will require a careful analysis not only of each Unicode character, +but also of each presentation form, something the author of these +routines has avoided to do so far. + +http://www.unicode.org/unicode/reports/tr11/ + +Markus Kuhn -- 2007-05-26 (Unicode 5.0) + +Permission to use, copy, modify, and distribute this software +for any purpose and without fee is hereby granted. The author +disclaims all warranties with regard to this software. + +Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c + + + diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/index.js nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/index.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,99 @@ +"use strict" + +var defaults = require('defaults') +var combining = require('./combining') + +var DEFAULTS = { + nul: 0, + control: 0 +} + +module.exports = function wcwidth(str) { + return wcswidth(str, DEFAULTS) +} + +module.exports.config = function(opts) { + opts = defaults(opts || {}, DEFAULTS) + return function wcwidth(str) { + return wcswidth(str, opts) + } +} + +/* + * The following functions define the column width of an ISO 10646 + * character as follows: + * - The null character (U+0000) has a column width of 0. + * - Other C0/C1 control characters and DEL will lead to a return value + * of -1. + * - Non-spacing and enclosing combining characters (general category + * code Mn or Me in the + * Unicode database) have a column width of 0. + * - SOFT HYPHEN (U+00AD) has a column width of 1. + * - Other format characters (general category code Cf in the Unicode + * database) and ZERO WIDTH + * SPACE (U+200B) have a column width of 0. + * - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF) + * have a column width of 0. + * - Spacing characters in the East Asian Wide (W) or East Asian + * Full-width (F) category as + * defined in Unicode Technical Report #11 have a column width of 2. + * - All remaining characters (including all printable ISO 8859-1 and + * WGL4 characters, Unicode control characters, etc.) have a column + * width of 1. + * This implementation assumes that characters are encoded in ISO 10646. +*/ + +function wcswidth(str, opts) { + if (typeof str !== 'string') return wcwidth(str, opts) + + var s = 0 + for (var i = 0; i < str.length; i++) { + var n = wcwidth(str.charCodeAt(i), opts) + if (n < 0) return -1 + s += n + } + + return s +} + +function wcwidth(ucs, opts) { + // test for 8-bit control characters + if (ucs === 0) return opts.nul + if (ucs < 32 || (ucs >= 0x7f && ucs < 0xa0)) return opts.control + + // binary search in table of non-spacing characters + if (bisearch(ucs)) return 0 + + // if we arrive here, ucs is not a combining or C0/C1 control character + return 1 + + (ucs >= 0x1100 && + (ucs <= 0x115f || // Hangul Jamo init. consonants + ucs == 0x2329 || ucs == 0x232a || + (ucs >= 0x2e80 && ucs <= 0xa4cf && + ucs != 0x303f) || // CJK ... Yi + (ucs >= 0xac00 && ucs <= 0xd7a3) || // Hangul Syllables + (ucs >= 0xf900 && ucs <= 0xfaff) || // CJK Compatibility Ideographs + (ucs >= 0xfe10 && ucs <= 0xfe19) || // Vertical forms + (ucs >= 0xfe30 && ucs <= 0xfe6f) || // CJK Compatibility Forms + (ucs >= 0xff00 && ucs <= 0xff60) || // Fullwidth Forms + (ucs >= 0xffe0 && ucs <= 0xffe6) || + (ucs >= 0x20000 && ucs <= 0x2fffd) || + (ucs >= 0x30000 && ucs <= 0x3fffd))); +} + +function bisearch(ucs) { + var min = 0 + var max = combining.length - 1 + var mid + + if (ucs < combining[0][0] || ucs > combining[max][1]) return false + + while (max >= min) { + mid = Math.floor((min + max) / 2) + if (ucs > combining[mid][1]) min = mid + 1 + else if (ucs < combining[mid][0]) max = mid - 1 + else return true + } + + return false +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/LICENSE nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,30 @@ +wcwidth.js: JavaScript Portng of Markus Kuhn's wcwidth() Implementation +======================================================================= + +Copyright (C) 2012 by Jun Woong. + +This package is a JavaScript porting of `wcwidth()` implementation +[by Markus Kuhn](http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c). + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/index.js nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/index.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +var clone = require('clone'); + +module.exports = function(options, defaults) { + options = options || {}; + + Object.keys(defaults).forEach(function(key) { + if (typeof options[key] === 'undefined') { + options[key] = clone(defaults[key]); + } + }); + + return options; +}; \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/clone.js nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/clone.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/clone.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/clone.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,129 @@ +'use strict'; + +function objectToString(o) { + return Object.prototype.toString.call(o); +} + +// shim for Node's 'util' package +// DO NOT REMOVE THIS! It is required for compatibility with EnderJS (http://enderjs.com/). +var util = { + isArray: function (ar) { + return Array.isArray(ar) || (typeof ar === 'object' && objectToString(ar) === '[object Array]'); + }, + isDate: function (d) { + return typeof d === 'object' && objectToString(d) === '[object Date]'; + }, + isRegExp: function (re) { + return typeof re === 'object' && objectToString(re) === '[object RegExp]'; + }, + getRegExpFlags: function (re) { + var flags = ''; + re.global && (flags += 'g'); + re.ignoreCase && (flags += 'i'); + re.multiline && (flags += 'm'); + return flags; + } +}; + + +if (typeof module === 'object') + module.exports = clone; + +/** + * Clones (copies) an Object using deep copying. + * + * This function supports circular references by default, but if you are certain + * there are no circular references in your object, you can save some CPU time + * by calling clone(obj, false). + * + * Caution: if `circular` is false and `parent` contains circular references, + * your program may enter an infinite loop and crash. + * + * @param `parent` - the object to be cloned + * @param `circular` - set to true if the object to be cloned may contain + * circular references. (optional - true by default) + * @param `depth` - set to a number if the object is only to be cloned to + * a particular depth. (optional - defaults to Infinity) + * @param `prototype` - sets the prototype to be used when cloning an object. + * (optional - defaults to parent prototype). +*/ + +function clone(parent, circular, depth, prototype) { + // maintain two arrays for circular references, where corresponding parents + // and children have the same index + var allParents = []; + var allChildren = []; + + var useBuffer = typeof Buffer != 'undefined'; + + if (typeof circular == 'undefined') + circular = true; + + if (typeof depth == 'undefined') + depth = Infinity; + + // recurse this function so we don't reset allParents and allChildren + function _clone(parent, depth) { + // cloning null always returns null + if (parent === null) + return null; + + if (depth == 0) + return parent; + + var child; + if (typeof parent != 'object') { + return parent; + } + + if (util.isArray(parent)) { + child = []; + } else if (util.isRegExp(parent)) { + child = new RegExp(parent.source, util.getRegExpFlags(parent)); + if (parent.lastIndex) child.lastIndex = parent.lastIndex; + } else if (util.isDate(parent)) { + child = new Date(parent.getTime()); + } else if (useBuffer && Buffer.isBuffer(parent)) { + child = new Buffer(parent.length); + parent.copy(child); + return child; + } else { + if (typeof prototype == 'undefined') child = Object.create(Object.getPrototypeOf(parent)); + else child = Object.create(prototype); + } + + if (circular) { + var index = allParents.indexOf(parent); + + if (index != -1) { + return allChildren[index]; + } + allParents.push(parent); + allChildren.push(child); + } + + for (var i in parent) { + child[i] = _clone(parent[i], depth - 1); + } + + return child; + } + + return _clone(parent, depth); +} + +/** + * Simple flat clone using prototype, accepts only objects, usefull for property + * override on FLAT configuration object (no nested props). + * + * USE WITH CAUTION! This may not behave as you wish if you do not know how this + * works. + */ +clone.clonePrototype = function(parent) { + if (parent === null) + return null; + + var c = function () {}; + c.prototype = parent; + return new c(); +}; diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/LICENSE nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +Copyright © 2011-2014 Paul Vorbach + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/.npmignore nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +node_modules/ diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/package.json nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/package.json --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,121 @@ +{ + "name": "clone", + "description": "deep cloning of objects and arrays", + "tags": [ + "clone", + "object", + "array", + "function", + "date" + ], + "version": "0.1.18", + "repository": { + "type": "git", + "url": "git://github.com/pvorb/node-clone.git" + }, + "bugs": { + "url": "https://github.com/pvorb/node-clone/issues" + }, + "main": "clone.js", + "author": { + "name": "Paul Vorbach", + "email": "paul@vorba.ch", + "url": "http://paul.vorba.ch/" + }, + "contributors": [ + { + "name": "Blake Miner", + "email": "miner.blake@gmail.com", + "url": "http://www.blakeminer.com/" + }, + { + "name": "Tian You", + "email": "axqd001@gmail.com", + "url": "http://blog.axqd.net/" + }, + { + "name": "George Stagas", + "email": "gstagas@gmail.com", + "url": "http://stagas.com/" + }, + { + "name": "Tobiasz Cudnik", + "email": "tobiasz.cudnik@gmail.com", + "url": "https://github.com/TobiaszCudnik" + }, + { + "name": "Pavel Lang", + "email": "langpavel@phpskelet.org", + "url": "https://github.com/langpavel" + }, + { + "name": "Dan MacTough", + "url": "http://yabfog.com/" + }, + { + "name": "w1nk", + "url": "https://github.com/w1nk" + }, + { + "name": "Hugh Kennedy", + "url": "http://twitter.com/hughskennedy" + }, + { + "name": "Dustin Diaz", + "url": "http://dustindiaz.com" + }, + { + "name": "Ilya Shaisultanov", + "url": "https://github.com/diversario" + }, + { + "name": "Nathan MacInnes", + "email": "nathan@macinn.es", + "url": "http://macinn.es/" + }, + { + "name": "Benjamin E. Coe", + "email": "ben@npmjs.com", + "url": "https://twitter.com/benjamincoe" + }, + { + "name": "Nathan Zadoks", + "url": "https://github.com/nathan7" + } + ], + "license": "MIT", + "engines": { + "node": "*" + }, + "dependencies": {}, + "devDependencies": { + "underscore": "*", + "nodeunit": "*" + }, + "optionalDependencies": {}, + "scripts": { + "test": "nodeunit test.js" + }, + "gitHead": "17eea36140d61d97a9954c53417d0e04a00525d9", + "homepage": "https://github.com/pvorb/node-clone", + "_id": "clone@0.1.18", + "_shasum": "64a0d5d57eaa85a1a8af380cd1db8c7b3a895f66", + "_from": "clone@>=0.1.5-0 <0.2.0-0", + "_npmVersion": "1.4.14", + "_npmUser": { + "name": "pvorb", + "email": "paul@vorba.ch" + }, + "maintainers": [ + { + "name": "pvorb", + "email": "paul@vorb.de" + } + ], + "dist": { + "shasum": "64a0d5d57eaa85a1a8af380cd1db8c7b3a895f66", + "tarball": "http://registry.npmjs.org/clone/-/clone-0.1.18.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/clone/-/clone-0.1.18.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/README.md nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/README.md --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,126 @@ +# clone + +[![build status](https://secure.travis-ci.org/pvorb/node-clone.png)](http://travis-ci.org/pvorb/node-clone) + +offers foolproof _deep cloning_ of variables in JavaScript. + + +## Installation + + npm install clone + +or + + ender build clone + + +## Example + +~~~ javascript +var clone = require('clone'); + +var a, b; + +a = { foo: { bar: 'baz' } }; // inital value of a + +b = clone(a); // clone a -> b +a.foo.bar = 'foo'; // change a + +console.log(a); // show a +console.log(b); // show b +~~~ + +This will print: + +~~~ javascript +{ foo: { bar: 'foo' } } +{ foo: { bar: 'baz' } } +~~~ + +**clone** masters cloning simple objects (even with custom prototype), arrays, +Date objects, and RegExp objects. Everything is cloned recursively, so that you +can clone dates in arrays in objects, for example. + + +## API + +`clone(val, circular, depth)` + + * `val` -- the value that you want to clone, any type allowed + * `circular` -- boolean + + Call `clone` with `circular` set to `false` if you are certain that `obj` + contains no circular references. This will give better performance if needed. + There is no error if `undefined` or `null` is passed as `obj`. + * `depth` -- depth to wich the object is to be cloned (optional, + defaults to infinity) + +`clone.clonePrototype(obj)` + + * `obj` -- the object that you want to clone + +Does a prototype clone as +[described by Oran Looney](http://oranlooney.com/functional-javascript/). + + +## Circular References + +~~~ javascript +var a, b; + +a = { hello: 'world' }; + +a.myself = a; +b = clone(a); + +console.log(b); +~~~ + +This will print: + +~~~ javascript +{ hello: "world", myself: [Circular] } +~~~ + +So, `b.myself` points to `b`, not `a`. Neat! + + +## Test + + npm test + + +## Caveat + +Some special objects like a socket or `process.stdout`/`stderr` are known to not +be cloneable. If you find other objects that cannot be cloned, please [open an +issue](https://github.com/pvorb/node-clone/issues/new). + + +## Bugs and Issues + +If you encounter any bugs or issues, feel free to [open an issue at +github](https://github.com/pvorb/node-clone/issues) or send me an email to +. I also always like to hear from you, if you’re using my code. + +## License + +Copyright © 2011-2014 [Paul Vorbach](http://paul.vorba.ch/) and +[contributors](https://github.com/pvorb/node-clone/graphs/contributors). + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/test.js nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/test.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/test.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,271 @@ +if(module.parent === null) { + console.log('Run this test file with nodeunit:'); + console.log('$ nodeunit test.js'); +} + + +var clone = require('./'); +var util = require('util'); +var _ = require('underscore'); + + + +exports["clone string"] = function(test) { + test.expect(2); // how many tests? + + var a = "foo"; + test.strictEqual(clone(a), a); + a = ""; + test.strictEqual(clone(a), a); + + test.done(); +}; + + + +exports["clone number"] = function(test) { + test.expect(5); // how many tests? + + var a = 0; + test.strictEqual(clone(a), a); + a = 1; + test.strictEqual(clone(a), a); + a = -1000; + test.strictEqual(clone(a), a); + a = 3.1415927; + test.strictEqual(clone(a), a); + a = -3.1415927; + test.strictEqual(clone(a), a); + + test.done(); +}; + + + +exports["clone date"] = function(test) { + test.expect(3); // how many tests? + + var a = new Date; + var c = clone(a); + test.ok(a instanceof Date); + test.ok(c instanceof Date); + test.equal(c.getTime(), a.getTime()); + + test.done(); +}; + + + +exports["clone object"] = function(test) { + test.expect(2); // how many tests? + + var a = { foo: { bar: "baz" } }; + var b = clone(a); + + test.ok(_(a).isEqual(b), "underscore equal"); + test.deepEqual(b, a); + + test.done(); +}; + + + +exports["clone array"] = function(test) { + test.expect(2); // how many tests? + + var a = [ + { foo: "bar" }, + "baz" + ]; + var b = clone(a); + + test.ok(_(a).isEqual(b), "underscore equal"); + test.deepEqual(b, a); + + test.done(); +}; + +exports["clone buffer"] = function(test) { + test.expect(1); + + var a = new Buffer("this is a test buffer"); + var b = clone(a); + + // no underscore equal since it has no concept of Buffers + test.deepEqual(b, a); + test.done(); +}; + + + +exports["clone regexp"] = function(test) { + test.expect(5); + + var a = /abc123/gi; + var b = clone(a); + + test.deepEqual(b, a); + + var c = /a/g; + test.ok(c.lastIndex === 0); + + c.exec('123a456a'); + test.ok(c.lastIndex === 4); + + var d = clone(c); + test.ok(d.global); + test.ok(d.lastIndex === 4); + + test.done(); +}; + + +exports["clone object containing array"] = function(test) { + test.expect(2); // how many tests? + + var a = { + arr1: [ { a: '1234', b: '2345' } ], + arr2: [ { c: '345', d: '456' } ] + }; + var b = clone(a); + + test.ok(_(a).isEqual(b), "underscore equal"); + test.deepEqual(b, a); + + test.done(); +}; + + + +exports["clone object with circular reference"] = function(test) { + test.expect(8); // how many tests? + + var _ = test.ok; + var c = [1, "foo", {'hello': 'bar'}, function() {}, false, [2]]; + var b = [c, 2, 3, 4]; + var a = {'b': b, 'c': c}; + a.loop = a; + a.loop2 = a; + c.loop = c; + c.aloop = a; + var aCopy = clone(a); + _(a != aCopy); + _(a.c != aCopy.c); + _(aCopy.c == aCopy.b[0]); + _(aCopy.c.loop.loop.aloop == aCopy); + _(aCopy.c[0] == a.c[0]); + + //console.log(util.inspect(aCopy, true, null) ); + //console.log("------------------------------------------------------------"); + //console.log(util.inspect(a, true, null) ); + _(eq(a, aCopy)); + aCopy.c[0] = 2; + _(!eq(a, aCopy)); + aCopy.c = "2"; + _(!eq(a, aCopy)); + //console.log("------------------------------------------------------------"); + //console.log(util.inspect(aCopy, true, null) ); + + function eq(x, y) { + return util.inspect(x, true, null) === util.inspect(y, true, null); + } + + test.done(); +}; + + + +exports['clonePrototype'] = function(test) { + test.expect(3); // how many tests? + + var a = { + a: "aaa", + x: 123, + y: 45.65 + }; + var b = clone.clonePrototype(a); + + test.strictEqual(b.a, a.a); + test.strictEqual(b.x, a.x); + test.strictEqual(b.y, a.y); + + test.done(); +} + +exports['cloneWithinNewVMContext'] = function(test) { + test.expect(3); + var vm = require('vm'); + var ctx = vm.createContext({ clone: clone }); + var script = "clone( {array: [1, 2, 3], date: new Date(), regex: /^foo$/ig} );"; + var results = vm.runInContext(script, ctx); + test.ok(results.array instanceof Array); + test.ok(results.date instanceof Date); + test.ok(results.regex instanceof RegExp); + test.done(); +} + +exports['cloneObjectWithNoConstructor'] = function(test) { + test.expect(3); + var n = null; + var a = { foo: 'bar' }; + a.__proto__ = n; + test.ok(typeof a === 'object'); + test.ok(typeof a !== null); + var b = clone(a); + test.ok(a.foo, b.foo); + test.done(); +} + +exports['clone object with depth argument'] = function (test) { + test.expect(6); + var a = { + foo: { + bar : { + baz : 'qux' + } + } + }; + var b = clone(a, false, 1); + test.deepEqual(b, a); + test.notEqual(b, a); + test.strictEqual(b.foo, a.foo); + + b = clone(a, true, 2); + test.deepEqual(b, a); + test.notEqual(b.foo, a.foo); + test.strictEqual(b.foo.bar, a.foo.bar); + test.done(); +} + +exports['maintain prototype chain in clones'] = function (test) { + test.expect(1); + function Constructor() {} + var a = new Constructor(); + var b = clone(a); + test.strictEqual(Object.getPrototypeOf(a), Object.getPrototypeOf(b)); + test.done(); +} + +exports['parent prototype is overriden with prototype provided'] = function (test) { + test.expect(1); + function Constructor() {} + var a = new Constructor(); + var b = clone(a, true, Infinity, null); + test.strictEqual(b.__defineSetter__, undefined); + test.done(); +} + +exports['clone object with null children'] = function(test) { + test.expect(1); + var a = { + foo: { + bar: null, + baz: { + qux: false + } + } + }; + var b = clone(a); + test.deepEqual(b, a); + test.done(); +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/.travis.yml nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/node_modules/clone/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +language: node_js +node_js: + - 0.6 + - 0.8 + - 0.10 diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/.npmignore nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +node_modules diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/package.json nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/package.json --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,50 @@ +{ + "name": "defaults", + "version": "1.0.0", + "description": "merge single level defaults over a config object", + "main": "index.js", + "scripts": { + "test": "node test.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/tmpvar/defaults.git" + }, + "keywords": [ + "config", + "defaults" + ], + "author": { + "name": "Elijah Insua", + "email": "tmpvar@gmail.com" + }, + "license": "MIT", + "readmeFilename": "README.md", + "dependencies": { + "clone": "~0.1.5" + }, + "devDependencies": { + "tap": "~0.4.0" + }, + "readme": "# defaults\n\nA simple one level options merge utility\n\n## install\n\n`npm install defaults`\n\n## use\n\n```javascript\n\nvar defaults = require('defaults');\n\nvar handle = function(options, fn) {\n options = defaults(options, {\n timeout: 100\n });\n\n setTimeout(function() {\n fn(options);\n }, options.timeout);\n}\n\nhandle({ timeout: 1000 }, function() {\n // we're here 1000 ms later\n});\n\nhandle({ timeout: 10000 }, function() {\n // we're here 10s later\n});\n\n```\n\n## summary\n\nthis module exports a function that takes 2 arguments: `options` and `defaults`. When called, it overrides all of `undefined` properties in `options` with the clones of properties defined in `defaults`\n\nSidecases: if called with a falsy `options` value, options will be initialized to a new object before being merged onto.\n\n## license\n\nMIT", + "_id": "defaults@1.0.0", + "dist": { + "shasum": "3ae25f44416c6c01f9809a25fcdd285912d2a6b1", + "tarball": "http://registry.npmjs.org/defaults/-/defaults-1.0.0.tgz" + }, + "_npmVersion": "1.1.65", + "_npmUser": { + "name": "tmpvar", + "email": "tmpvar@gmail.com" + }, + "maintainers": [ + { + "name": "tmpvar", + "email": "tmpvar@gmail.com" + } + ], + "directories": {}, + "_shasum": "3ae25f44416c6c01f9809a25fcdd285912d2a6b1", + "_from": "defaults@>=1.0.0-0 <2.0.0-0", + "_resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.0.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/README.md nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/README.md --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,43 @@ +# defaults + +A simple one level options merge utility + +## install + +`npm install defaults` + +## use + +```javascript + +var defaults = require('defaults'); + +var handle = function(options, fn) { + options = defaults(options, { + timeout: 100 + }); + + setTimeout(function() { + fn(options); + }, options.timeout); +} + +handle({ timeout: 1000 }, function() { + // we're here 1000 ms later +}); + +handle({ timeout: 10000 }, function() { + // we're here 10s later +}); + +``` + +## summary + +this module exports a function that takes 2 arguments: `options` and `defaults`. When called, it overrides all of `undefined` properties in `options` with the clones of properties defined in `defaults` + +Sidecases: if called with a falsy `options` value, options will be initialized to a new object before being merged onto. + +## license + +MIT \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/test.js nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/test.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/test.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/node_modules/defaults/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,34 @@ +var defaults = require('./'), + test = require('tap').test; + +test("ensure options is an object", function(t) { + var options = defaults(false, { a : true }); + t.ok(options.a); + t.end() +}); + +test("ensure defaults override keys", function(t) { + var result = defaults({}, { a: false, b: true }); + t.ok(result.b, 'b merges over undefined'); + t.equal(result.a, false, 'a merges over undefined'); + t.end(); +}); + +test("ensure defined keys are not overwritten", function(t) { + var result = defaults({ b: false }, { a: false, b: true }); + t.equal(result.b, false, 'b not merged'); + t.equal(result.a, false, 'a merges over undefined'); + t.end(); +}); + +test("ensure defaults clone nested objects", function(t) { + var d = { a: [1,2,3], b: { hello : 'world' } }; + var result = defaults({}, d); + t.equal(result.a.length, 3, 'objects should be clones'); + t.ok(result.a !== d.a, 'objects should be clones'); + + t.equal(Object.keys(result.b).length, 1, 'objects should be clones'); + t.ok(result.b !== d.b, 'objects should be clones'); + t.end(); +}); + diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/.npmignore nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +node_modules diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/package.json nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/package.json --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,60 @@ +{ + "name": "wcwidth", + "version": "1.0.0", + "description": "Port of C's wcwidth() and wcswidth()", + "author": { + "name": "Tim Oxley" + }, + "contributors": [ + { + "name": "Woong Jun", + "email": "woong.jun@gmail.com", + "url": "http://code.woong.org/" + } + ], + "main": "index.js", + "dependencies": { + "defaults": "^1.0.0" + }, + "devDependencies": { + "tape": "^2.13.4" + }, + "license": "MIT", + "keywords": [ + "wide character", + "wc", + "wide character string", + "wcs", + "terminal", + "width", + "wcwidth", + "wcswidth" + ], + "directories": { + "doc": "docs", + "test": "test" + }, + "scripts": { + "test": "tape test/*.js" + }, + "gitHead": "5bc3aafd45c89f233c27b9479c18a23ca91ba660", + "_id": "wcwidth@1.0.0", + "_shasum": "02d059ff7a8fc741e0f6b5da1e69b2b40daeca6f", + "_from": "wcwidth@>=1.0.0-0 <2.0.0-0", + "_npmVersion": "1.4.23", + "_npmUser": { + "name": "timoxley", + "email": "secoif@gmail.com" + }, + "maintainers": [ + { + "name": "timoxley", + "email": "secoif@gmail.com" + } + ], + "dist": { + "shasum": "02d059ff7a8fc741e0f6b5da1e69b2b40daeca6f", + "tarball": "http://registry.npmjs.org/wcwidth/-/wcwidth-1.0.0.tgz" + }, + "_resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.0.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/Readme.md nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/Readme.md --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/Readme.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/Readme.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +# wcwidth + +Determine columns needed for a fixed-size wide-character string + +---- + +wcwidth is a simple JavaScript port of [wcwidth](http://man7.org/linux/man-pages/man3/wcswidth.3.html) implemented in C by Markus Kuhn. + +JavaScript port [originally](https://github.com/mycoboco/wcwidth.js) written by Woong Jun (http://code.woong.org/) + +## Example + +```js +'한'.length // => 1 +wcwidth('한'); // => 2 + +'한글'.length // => 2 +wcwidth('한글'); // => 4 +``` + +`wcwidth()` and its string version, `wcswidth()` are defined by IEEE Std +1002.1-2001, a.k.a. POSIX.1-2001, and return the number of columns used +to represent the given wide character and string. + +Markus's implementation assumes the wide character given to those +functions to be encoded in ISO 10646, which is almost true for +JavaScript's characters. + +[Further explaination here](docs) + +## License + +MIT diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/test/index.js nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/test/index.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/node_modules/wcwidth/test/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/node_modules/wcwidth/test/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,64 @@ +"use strict" + +var wcwidth = require('../') +var test = require('tape') + +test('handles regular strings', function(t) { + t.strictEqual(wcwidth('abc'), 3) + t.end() +}) + +test('handles multibyte strings', function(t) { + t.strictEqual(wcwidth('字的模块'), 8) + t.end() +}) + +test('handles multibyte characters mixed with regular characters', function(t) { + t.strictEqual(wcwidth('abc 字的模块'), 12) + t.end() +}) + +test('ignores control characters e.g. \\n', function(t) { + t.strictEqual(wcwidth('abc\n字的模块\ndef'), 14) + t.end() +}) + +test('ignores bad input', function(t) { + t.strictEqual(wcwidth(''), 0) + t.strictEqual(wcwidth(3), 0) + t.strictEqual(wcwidth({}), 0) + t.strictEqual(wcwidth([]), 0) + t.strictEqual(wcwidth(), 0) + t.end() +}) + +test('ignores nul (charcode 0)', function(t) { + t.strictEqual(wcwidth(String.fromCharCode(0)), 0) + t.end() +}) + +test('ignores nul mixed with chars', function(t) { + t.strictEqual(wcwidth('a' + String.fromCharCode(0) + '\n字的'), 5) + t.end() +}) + +test('can have custom value for nul', function(t) { + t.strictEqual(wcwidth.config({ + nul: 10 + })(String.fromCharCode(0) + 'a字的'), 15) + t.end() +}) + +test('can have custom control char value', function(t) { + t.strictEqual(wcwidth.config({ + control: 1 + })('abc\n字的模块\ndef'), 16) + t.end() +}) + +test('negative custom control chars == -1', function(t) { + t.strictEqual(wcwidth.config({ + control: -1 + })('abc\n字的模块\ndef'), -1) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/package.json nodejs-0.11.15/deps/npm/node_modules/columnify/package.json --- nodejs-0.11.13/deps/npm/node_modules/columnify/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,18 +1,20 @@ { "name": "columnify", - "version": "0.1.2", + "version": "1.2.1", "description": "Render data in text columns, supports in-column text-wrap.", "main": "index.js", "scripts": { - "test": "tap test" + "pretest": "npm prune", + "test": "faucet" }, "author": { "name": "Tim Oxley" }, "license": "MIT", "devDependencies": { - "tape": "~2.3.0", - "tap": "~0.4.6" + "chalk": "^0.4.0", + "faucet": "0.0.1", + "tape": "~2.12.3" }, "repository": { "type": "git", @@ -31,12 +33,31 @@ "url": "https://github.com/timoxley/columnify/issues" }, "homepage": "https://github.com/timoxley/columnify", - "readme": "# columnify\n\n[![Build Status](https://travis-ci.org/timoxley/columnify.png?branch=master)](https://travis-ci.org/timoxley/columnify)\n\nCreate text-based columns suitable for console output. \nSupports minimum and maximum column widths via truncation and text wrapping.\n\nDesigned to [handle sensible wrapping in npm search results](https://github.com/isaacs/npm/pull/2328).\n\n`npm search` before & after integrating columnify:\n\n![npm-tidy-search](https://f.cloud.github.com/assets/43438/1848959/ae02ad04-76a1-11e3-8255-4781debffc26.gif)\n\n## Installation & Update\n\n```\n$ npm install --save columnify@latest\n```\n\n## Usage\n\n```js\nvar columnify = require('columnify')\nvar columns = columnify(data, options)\nconsole.log(columns)\n```\n\n## Examples\n\n### Simple Columns\n\nText is aligned under column headings. Columns are automatically resized\nto fit the content of the largest cell. Each cell will be padded with\nspaces to fill the available space and ensure column contents are\nleft-aligned.\n\n```js\nvar columnify = require('columnify')\n\nvar columns = columnify([{\n name: 'mod1',\n version: '0.0.1'\n}, {\n name: 'module2',\n version: '0.2.0'\n}])\n\nconsole.log(columns)\n```\n```\nNAME VERSION\nmod1 0.0.1 \nmodule2 0.2.0 \n```\n\n### Wrapping Column Cells\n\nYou can define the maximum width before wrapping for individual cells in\ncolumns. Minimum width is also supported. Wrapping will happen at word\nboundaries. Empty cells or those which do not fill the max/min width\nwill be padded with spaces.\n\n```js\nvar columnify = require('columnify')\n\nvar columns = columnify([{\n name: 'mod1',\n description: 'some description which happens to be far larger than the max',\n version: '0.0.1',\n}, {\n name: 'module-two',\n description: 'another description larger than the max',\n version: '0.2.0',\n})\n\nconsole.log(columns)\n```\n```\nNAME DESCRIPTION VERSION\nmod1 some description which happens 0.0.1\n to be far larger than the max\nmodule-two another description larger 0.2.0\n than the max\n```\n\n### Truncated Columns\n\nYou can disable wrapping and instead truncate content at the maximum\ncolumn width. Truncation respects word boundaries. A truncation marker,\n`…` will appear next to the last word in any truncated line.\n\n```js\nvar columns = columnify(data, {\n truncate: true,\n config: {\n description: {\n maxWidth: 20\n }\n }\n})\n\nconsole.log(columns)\n```\n\n```\nNAME DESCRIPTION VERSION\nmod1 some description… 0.0.1 \nmodule-two another description… 0.2.0 \n```\n\n\n### Custom Truncation Marker\n\nYou can change the truncation marker to something other than the default\n`…`.\n\n```js\nvar columns = columnify(data, {\n truncate: true,\n truncateMarker: '>',\n widths: {\n description: {\n maxWidth: 20\n }\n }\n})\n\nconsole.log(columns)\n```\n\n```\nNAME DESCRIPTION VERSION\nmod1 some description> 0.0.1 \nmodule-two another description> 0.2.0 \n```\n\n### Custom Column Splitter\n\nIf your columns need some bling, you can split columns with custom\ncharacters.\n\n```js\n\nvar columns = columnify(data, {\n columnSplitter: ' | '\n})\n\nconsole.log(columns)\n```\n```\nNAME | DESCRIPTION | VERSION\nmod1 | some description which happens to be far larger than the max | 0.0.1\nmodule-two | another description larger than the max | 0.2.0\n```\n\n### Filtering & Ordering Columns\n\nBy default, all properties are converted into columns, whether or not\nthey exist on every object or not.\n\nTo explicitly specify which columns to include, and in which order,\nsupply an \"include\" array:\n\n```js\nvar data = [{\n name: 'module1',\n description: 'some description',\n version: '0.0.1',\n}, {\n name: 'module2',\n description: 'another description',\n version: '0.2.0',\n}]\n\nvar columns = columnify(data, {\n include: ['name', 'version'] // note description not included\n})\n\nconsole.log(columns)\n```\n\n```\nNAME VERSION\nmodule1 0.0.1\nmodule2 0.2.0\n```\n## License\n\nMIT\n", - "readmeFilename": "Readme.md", - "_id": "columnify@0.1.2", + "dependencies": { + "strip-ansi": "^1.0.0", + "wcwidth": "^1.0.0" + }, + "directories": { + "test": "test" + }, + "gitHead": "14e77bef3f57acaa3f390145915a9f2d2a4f882c", + "_id": "columnify@1.2.1", + "_shasum": "921ec51c178f4126d3c07e9acecd67a55c7953e4", + "_from": "columnify@>=1.2.1-0 <2.0.0-0", + "_npmVersion": "1.4.23", + "_npmUser": { + "name": "timoxley", + "email": "secoif@gmail.com" + }, + "maintainers": [ + { + "name": "timoxley", + "email": "secoif@gmail.com" + } + ], "dist": { - "shasum": "ab1a1f1e37b26ba4b87c6920fb717fe51c827042" + "shasum": "921ec51c178f4126d3c07e9acecd67a55c7953e4", + "tarball": "http://registry.npmjs.org/columnify/-/columnify-1.2.1.tgz" }, - "_from": "columnify@0.1.2", - "_resolved": "https://registry.npmjs.org/columnify/-/columnify-0.1.2.tgz" + "_resolved": "https://registry.npmjs.org/columnify/-/columnify-1.2.1.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/Readme.md nodejs-0.11.15/deps/npm/node_modules/columnify/Readme.md --- nodejs-0.11.13/deps/npm/node_modules/columnify/Readme.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/Readme.md 2015-01-20 21:22:17.000000000 +0000 @@ -2,8 +2,12 @@ [![Build Status](https://travis-ci.org/timoxley/columnify.png?branch=master)](https://travis-ci.org/timoxley/columnify) -Create text-based columns suitable for console output. -Supports minimum and maximum column widths via truncation and text wrapping. +Create text-based columns suitable for console output from objects or +arrays of objects. + +Columns are automatically resized to fit the content of the largest +cell. Each cell will be padded with spaces to fill the available space +and ensure column contents are left-aligned. Designed to [handle sensible wrapping in npm search results](https://github.com/isaacs/npm/pull/2328). @@ -19,7 +23,7 @@ ## Usage -```js +```javascript var columnify = require('columnify') var columns = columnify(data, options) console.log(columns) @@ -27,14 +31,56 @@ ## Examples -### Simple Columns +### Columnify Objects -Text is aligned under column headings. Columns are automatically resized -to fit the content of the largest cell. Each cell will be padded with -spaces to fill the available space and ensure column contents are -left-aligned. +Objects are converted to a list of key/value pairs: -```js +```javascript + +var data = { + "commander@0.6.1": 1, + "minimatch@0.2.14": 3, + "mkdirp@0.3.5": 2, + "sigmund@1.0.0": 3 +} + +console.log(columnify(data)) +``` +#### Output: +``` +KEY VALUE +commander@0.6.1 1 +minimatch@0.2.14 3 +mkdirp@0.3.5 2 +sigmund@1.0.0 3 +``` + +### Custom Column Names + +```javascript +var data = { + "commander@0.6.1": 1, + "minimatch@0.2.14": 3, + "mkdirp@0.3.5": 2, + "sigmund@1.0.0": 3 +} + +console.log(columnify(data, {columns: ['MODULE', 'COUNT']})) +``` +#### Output: +``` +MODULE COUNT +commander@0.6.1 1 +minimatch@0.2.14 3 +mkdirp@0.3.5 2 +sigmund@1.0.0 3 +``` + +### Columnify Arrays of Objects + +Column headings are extracted from the keys in supplied objects. + +```javascript var columnify = require('columnify') var columns = columnify([{ @@ -47,6 +93,7 @@ console.log(columns) ``` +#### Output: ``` NAME VERSION mod1 0.0.1 @@ -60,8 +107,7 @@ boundaries. Empty cells or those which do not fill the max/min width will be padded with spaces. -```js -var columnify = require('columnify') +```javascript var columns = columnify([{ name: 'mod1', @@ -75,6 +121,7 @@ console.log(columns) ``` +#### Output: ``` NAME DESCRIPTION VERSION mod1 some description which happens 0.0.1 @@ -83,13 +130,13 @@ than the max ``` -### Truncated Columns +### Truncating Column Cells You can disable wrapping and instead truncate content at the maximum column width. Truncation respects word boundaries. A truncation marker, `…` will appear next to the last word in any truncated line. -```js +```javascript var columns = columnify(data, { truncate: true, config: { @@ -101,20 +148,145 @@ console.log(columns) ``` - +#### Output: ``` NAME DESCRIPTION VERSION mod1 some description… 0.0.1 module-two another description… 0.2.0 ``` +### Filtering & Ordering Columns + +By default, all properties are converted into columns, whether or not +they exist on every object or not. + +To explicitly specify which columns to include, and in which order, +supply a "columns" or "include" array ("include" is just an alias). + +```javascript +var data = [{ + name: 'module1', + description: 'some description', + version: '0.0.1', +}, { + name: 'module2', + description: 'another description', + version: '0.2.0', +}] + +var columns = columnify(data, { + columns: ['name', 'version'] // note description not included +}) + +console.log(columns) +``` + +#### Output: +``` +NAME VERSION +module1 0.0.1 +module2 0.2.0 +``` + + +## Other Configuration Options + +### Align Right/Center + +```js +var data = { + "mocha@1.18.2": 1, + "commander@2.0.0": 1, + "debug@0.8.1": 1 +} + +columnify(data, {config: {value: {align: 'right'}}}) +``` + +#### Output: +``` +KEY VALUE +mocha@1.18.2 1 +commander@2.0.0 1 +debug@0.8.1 1 +``` + +Align Center works in a similar way. + + +### Padding + +```js +var data = { + "shortKey": "veryVeryVeryVeryVeryLongVal", + "veryVeryVeryVeryVeryLongKey": "shortVal" +} + +columnify(data, { paddingChr: '.'}) +``` + +#### Output: +``` +KEY........................ VALUE...................... +shortKey................... veryVeryVeryVeryVeryLongVal +veryVeryVeryVeryVeryLongKey shortVal................... +``` + +### Preserve existing newlines + +By default, `columnify` sanitises text by replacing any occurance of 1 or more whitespace characters with a single space. + +`columnify` can be configured to respect existing new line characters using the `preserveNewLines` option. Note this will still collapse all other whitespace. + +```javascript +var data = [{ + name: "glob@3.2.9", + paths: [ + "node_modules/tap/node_modules/glob", + "node_modules/tape/node_modules/glob" + ].join('\n') +}, { + name: "nopt@2.2.1", + paths: [ + "node_modules/tap/node_modules/nopt" + ] +}, { + name: "runforcover@0.0.2", + paths: "node_modules/tap/node_modules/runforcover" +}] + +console.log(columnify(data, {preserveNewLines: true})) +``` +#### Output: +``` +NAME PATHS +glob@3.2.9 node_modules/tap/node_modules/glob + node_modules/tape/node_modules/glob +nopt@2.2.1 node_modules/tap/node_modules/nopt +runforcover@0.0.2 node_modules/tap/node_modules/runforcover +``` + +Compare this with output without `preserveNewLines`: + +```javascript +console.log(columnify(data, {preserveNewLines: false})) +// or just +console.log(columnify(data)) +``` + +``` +NAME PATHS +glob@3.2.9 node_modules/tap/node_modules/glob node_modules/tape/node_modules/glob +nopt@2.2.1 node_modules/tap/node_modules/nopt +runforcover@0.0.2 node_modules/tap/node_modules/runforcover +``` ### Custom Truncation Marker You can change the truncation marker to something other than the default `…`. -```js +```javascript var columns = columnify(data, { truncate: true, truncateMarker: '>', @@ -127,7 +299,7 @@ console.log(columns) ``` - +#### Output: ``` NAME DESCRIPTION VERSION mod1 some description> 0.0.1 @@ -139,7 +311,7 @@ If your columns need some bling, you can split columns with custom characters. -```js +```javascript var columns = columnify(data, { columnSplitter: ' | ' @@ -147,43 +319,49 @@ console.log(columns) ``` +#### Output: ``` NAME | DESCRIPTION | VERSION mod1 | some description which happens to be far larger than the max | 0.0.1 module-two | another description larger than the max | 0.2.0 ``` -### Filtering & Ordering Columns - -By default, all properties are converted into columns, whether or not -they exist on every object or not. +## Multibyte Character Support -To explicitly specify which columns to include, and in which order, -supply an "include" array: +`columnify` uses [mycoboco/wcwidth.js](https://github.com/mycoboco/wcwidth.js) to calculate length of multibyte characters: -```js +```javascript var data = [{ - name: 'module1', + name: 'module-one', description: 'some description', version: '0.0.1', }, { - name: 'module2', - description: 'another description', - version: '0.2.0', + name: '这是一个很长的名字的模块', + description: '这真的是一个描述的内容这个描述很长', + version: "0.3.3" }] -var columns = columnify(data, { - include: ['name', 'version'] // note description not included -}) +console.log(columnify(data)) +``` -console.log(columns) +#### Without multibyte handling: + +i.e. before columnify added this feature + +``` +NAME DESCRIPTION VERSION +module-one some description 0.0.1 +这是一个很长的名字的模块 这真的是一个描述的内容这个描述很长 0.3.3 ``` +#### With multibyte handling: + ``` -NAME VERSION -module1 0.0.1 -module2 0.2.0 +NAME DESCRIPTION VERSION +module-one some description 0.0.1 +这是一个很长的名字的模块 这真的是一个描述的内容这个描述很长 0.3.3 ``` + ## License MIT diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/utils.js nodejs-0.11.15/deps/npm/node_modules/columnify/utils.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/utils.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/utils.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,3 +1,18 @@ +"use strict" + +var wcwidth = require('./width') + +/** + * repeat string `str` up to total length of `len` + * + * @param String str string to repeat + * @param Number len total length of output string + */ + +function repeatString(str, len) { + return Array.apply(null, {length: len + 1}).join(str).slice(0, len) +} + /** * Pad `str` up to total length `max` with `chr`. * If `str` is longer than `max`, padRight will return `str` unaltered. @@ -11,15 +26,52 @@ function padRight(str, max, chr) { str = str != null ? str : '' str = String(str) - var length = 1 + max - str.length + var length = max - wcwidth(str) + if (length <= 0) return str + return str + repeatString(chr || ' ', length) +} + +/** + * Pad `str` up to total length `max` with `chr`. + * If `str` is longer than `max`, padCenter will return `str` unaltered. + * + * @param String str string to pad + * @param Number max total length of output string + * @param String chr optional. Character to pad with. default: ' ' + * @return String padded str + */ + +function padCenter(str, max, chr) { + str = str != null ? str : '' + str = String(str) + var length = max - wcwidth(str) + if (length <= 0) return str + var lengthLeft = Math.floor(length/2) + var lengthRight = length - lengthLeft + return repeatString(chr || ' ', lengthLeft) + str + repeatString(chr || ' ', lengthRight) +} + +/** + * Pad `str` up to total length `max` with `chr`, on the left. + * If `str` is longer than `max`, padRight will return `str` unaltered. + * + * @param String str string to pad + * @param Number max total length of output string + * @param String chr optional. Character to pad with. default: ' ' + * @return String padded str + */ + +function padLeft(str, max, chr) { + str = str != null ? str : '' + str = String(str) + var length = max - wcwidth(str) if (length <= 0) return str - return str + Array.apply(null, {length: length}) - .join(chr || ' ') + return repeatString(chr || ' ', length) + str } /** * Split a String `str` into lines of maxiumum length `max`. - * Splits on word boundaries. + * Splits on word boundaries. Preserves existing new lines. * * @param String str string to split * @param Number max length of each line @@ -27,16 +79,23 @@ */ function splitIntoLines(str, max) { - return str.trim().split(' ').reduce(function(lines, word) { - var line = lines[lines.length - 1] - if (line && line.join(' ').length + word.length < max) { - lines[lines.length - 1].push(word) // add to line - } - else lines.push([word]) // new line - return lines - }, []).map(function(l) { - return l.join(' ') - }) + function _splitIntoLines(str, max) { + return str.trim().split(' ').reduce(function(lines, word) { + var line = lines[lines.length - 1] + if (line && wcwidth(line.join(' ')) + wcwidth(word) < max) { + lines[lines.length - 1].push(word) // add to line + } + else lines.push([word]) // new line + return lines + }, []).map(function(l) { + return l.join(' ') + }) + } + return str.split('\n').map(function(str) { + return _splitIntoLines(str, max) + }).reduce(function(lines, line) { + return lines.concat(line) + }, []) } /** @@ -55,22 +114,67 @@ if (!str) return result.join(' ') || '' var words = str.split(' ') var word = words.shift() || str + if (wcwidth(word) > max) { + // slice is based on length no wcwidth + var i = 0 + var wwidth = 0 + var limit = max - wcwidth(truncationChar) + while (i < word.length) { + var w = wcwidth(word.charAt(i)) + if(w + wwidth > limit) + break + wwidth += w + ++i + } - if (word.length > max) { - var remainder = word.slice(max - truncationChar.length) // get remainder + var remainder = word.slice(i) // get remainder words.unshift(remainder) // save remainder for next loop - word = word.slice(0, max - truncationChar.length) // grab truncated word + word = word.slice(0, i) // grab truncated word word += truncationChar // add trailing … or whatever } result.push(word) return splitLongWords(words.join(' '), max, truncationChar, result) } + +/** + * Truncate `str` into total width `max` + * If `str` is shorter than `max`, will return `str` unaltered. + * + * @param String str string to truncated + * @param Number max total wcwidth of output string + * @return String truncated str + */ + +function truncateString(str, max) { + + str = str != null ? str : '' + str = String(str) + + if(max == Infinity) return str + + var i = 0 + var wwidth = 0 + while (i < str.length) { + var w = wcwidth(str.charAt(i)) + if(w + wwidth > max) + break + wwidth += w + ++i + } + return str.slice(0, i) +} + + + /** * Exports */ module.exports.padRight = padRight +module.exports.padCenter = padCenter +module.exports.padLeft = padLeft module.exports.splitIntoLines = splitIntoLines module.exports.splitLongWords = splitLongWords +module.exports.truncateString = truncateString diff -Nru nodejs-0.11.13/deps/npm/node_modules/columnify/width.js nodejs-0.11.15/deps/npm/node_modules/columnify/width.js --- nodejs-0.11.13/deps/npm/node_modules/columnify/width.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/columnify/width.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,6 @@ +var stripAnsi = require('strip-ansi') +var wcwidth = require('wcwidth') + +module.exports = function(str) { + return wcwidth(stripAnsi(str)) +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/index.js nodejs-0.11.15/deps/npm/node_modules/config-chain/index.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,282 @@ +var ProtoList = require('proto-list') + , path = require('path') + , fs = require('fs') + , ini = require('ini') + , EE = require('events').EventEmitter + , url = require('url') + , http = require('http') + +var exports = module.exports = function () { + var args = [].slice.call(arguments) + , conf = new ConfigChain() + + while(args.length) { + var a = args.shift() + if(a) conf.push + ( 'string' === typeof a + ? json(a) + : a ) + } + + return conf +} + +//recursively find a file... + +var find = exports.find = function () { + var rel = path.join.apply(null, [].slice.call(arguments)) + + function find(start, rel) { + var file = path.join(start, rel) + try { + fs.statSync(file) + return file + } catch (err) { + if(path.dirname(start) !== start) // root + return find(path.dirname(start), rel) + } + } + return find(__dirname, rel) +} + +var parse = exports.parse = function (content, file, type) { + content = '' + content + // if we don't know what it is, try json and fall back to ini + // if we know what it is, then it must be that. + if (!type) { + try { return JSON.parse(content) } + catch (er) { return ini.parse(content) } + } else if (type === 'json') { + if (this.emit) { + try { return JSON.parse(content) } + catch (er) { this.emit('error', er) } + } else { + return JSON.parse(content) + } + } else { + return ini.parse(content) + } +} + +var json = exports.json = function () { + var args = [].slice.call(arguments).filter(function (arg) { return arg != null }) + var file = path.join.apply(null, args) + var content + try { + content = fs.readFileSync(file,'utf-8') + } catch (err) { + return + } + return parse(content, file, 'json') +} + +var env = exports.env = function (prefix, env) { + env = env || process.env + var obj = {} + var l = prefix.length + for(var k in env) { + if(k.indexOf(prefix) === 0) + obj[k.substring(l)] = env[k] + } + + return obj +} + +exports.ConfigChain = ConfigChain +function ConfigChain () { + EE.apply(this) + ProtoList.apply(this, arguments) + this._awaiting = 0 + this._saving = 0 + this.sources = {} +} + +// multi-inheritance-ish +var extras = { + constructor: { value: ConfigChain } +} +Object.keys(EE.prototype).forEach(function (k) { + extras[k] = Object.getOwnPropertyDescriptor(EE.prototype, k) +}) +ConfigChain.prototype = Object.create(ProtoList.prototype, extras) + +ConfigChain.prototype.del = function (key, where) { + // if not specified where, then delete from the whole chain, scorched + // earth style + if (where) { + var target = this.sources[where] + target = target && target.data + if (!target) { + return this.emit('error', new Error('not found '+where)) + } + delete target[key] + } else { + for (var i = 0, l = this.list.length; i < l; i ++) { + delete this.list[i][key] + } + } + return this +} + +ConfigChain.prototype.set = function (key, value, where) { + var target + + if (where) { + target = this.sources[where] + target = target && target.data + if (!target) { + return this.emit('error', new Error('not found '+where)) + } + } else { + target = this.list[0] + if (!target) { + return this.emit('error', new Error('cannot set, no confs!')) + } + } + target[key] = value + return this +} + +ConfigChain.prototype.get = function (key, where) { + if (where) { + where = this.sources[where] + if (where) where = where.data + if (where && Object.hasOwnProperty.call(where, key)) return where[key] + return undefined + } + return this.list[0][key] +} + +ConfigChain.prototype.save = function (where, type, cb) { + if (typeof type === 'function') cb = type, type = null + var target = this.sources[where] + if (!target || !(target.path || target.source) || !target.data) { + // TODO: maybe save() to a url target could be a PUT or something? + // would be easy to swap out with a reddis type thing, too + return this.emit('error', new Error('bad save target: '+where)) + } + + if (target.source) { + var pref = target.prefix || '' + Object.keys(target.data).forEach(function (k) { + target.source[pref + k] = target.data[k] + }) + return this + } + + var type = type || target.type + var data = target.data + if (target.type === 'json') { + data = JSON.stringify(data) + } else { + data = ini.stringify(data) + } + + this._saving ++ + fs.writeFile(target.path, data, 'utf8', function (er) { + this._saving -- + if (er) { + if (cb) return cb(er) + else return this.emit('error', er) + } + if (this._saving === 0) { + if (cb) cb() + this.emit('save') + } + }.bind(this)) + return this +} + +ConfigChain.prototype.addFile = function (file, type, name) { + name = name || file + var marker = {__source__:name} + this.sources[name] = { path: file, type: type } + this.push(marker) + this._await() + fs.readFile(file, 'utf8', function (er, data) { + if (er) this.emit('error', er) + this.addString(data, file, type, marker) + }.bind(this)) + return this +} + +ConfigChain.prototype.addEnv = function (prefix, env, name) { + name = name || 'env' + var data = exports.env(prefix, env) + this.sources[name] = { data: data, source: env, prefix: prefix } + return this.add(data, name) +} + +ConfigChain.prototype.addUrl = function (req, type, name) { + this._await() + var href = url.format(req) + name = name || href + var marker = {__source__:name} + this.sources[name] = { href: href, type: type } + this.push(marker) + http.request(req, function (res) { + var c = [] + var ct = res.headers['content-type'] + if (!type) { + type = ct.indexOf('json') !== -1 ? 'json' + : ct.indexOf('ini') !== -1 ? 'ini' + : href.match(/\.json$/) ? 'json' + : href.match(/\.ini$/) ? 'ini' + : null + marker.type = type + } + + res.on('data', c.push.bind(c)) + .on('end', function () { + this.addString(Buffer.concat(c), href, type, marker) + }.bind(this)) + .on('error', this.emit.bind(this, 'error')) + + }.bind(this)) + .on('error', this.emit.bind(this, 'error')) + .end() + + return this +} + +ConfigChain.prototype.addString = function (data, file, type, marker) { + data = this.parse(data, file, type) + this.add(data, marker) + return this +} + +ConfigChain.prototype.add = function (data, marker) { + if (marker && typeof marker === 'object') { + var i = this.list.indexOf(marker) + if (i === -1) { + return this.emit('error', new Error('bad marker')) + } + this.splice(i, 1, data) + marker = marker.__source__ + this.sources[marker] = this.sources[marker] || {} + this.sources[marker].data = data + // we were waiting for this. maybe emit 'load' + this._resolve() + } else { + if (typeof marker === 'string') { + this.sources[marker] = this.sources[marker] || {} + this.sources[marker].data = data + } + // trigger the load event if nothing was already going to do so. + this._await() + this.push(data) + process.nextTick(this._resolve.bind(this)) + } + return this +} + +ConfigChain.prototype.parse = exports.parse + +ConfigChain.prototype._await = function () { + this._awaiting++ +} + +ConfigChain.prototype._resolve = function () { + this._awaiting-- + if (this._awaiting === 0) this.emit('load', this) +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/LICENCE nodejs-0.11.15/deps/npm/node_modules/config-chain/LICENCE --- nodejs-0.11.13/deps/npm/node_modules/config-chain/LICENCE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/LICENCE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +Copyright (c) 2011 Dominic Tarr + +Permission is hereby granted, free of charge, +to any person obtaining a copy of this software and +associated documentation files (the "Software"), to +deal in the Software without restriction, including +without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom +the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR +ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/node_modules/proto-list/LICENSE nodejs-0.11.15/deps/npm/node_modules/config-chain/node_modules/proto-list/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/config-chain/node_modules/proto-list/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/node_modules/proto-list/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,23 @@ +Copyright 2009, 2010, 2011 Isaac Z. Schlueter. +All rights reserved. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/node_modules/proto-list/package.json nodejs-0.11.15/deps/npm/node_modules/config-chain/node_modules/proto-list/package.json --- nodejs-0.11.13/deps/npm/node_modules/config-chain/node_modules/proto-list/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/node_modules/proto-list/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,51 @@ +{ + "name": "proto-list", + "version": "1.2.3", + "description": "A utility for managing a prototype chain", + "main": "./proto-list.js", + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "scripts": { + "test": "tap test/*.js" + }, + "repository": { + "type": "git", + "url": "https://github.com/isaacs/proto-list" + }, + "license": { + "type": "MIT", + "url": "https://github.com/isaacs/proto-list/blob/master/LICENSE" + }, + "devDependencies": { + "tap": "0" + }, + "gitHead": "44d76897176861d176a53ed3f3fc5e05cdee7643", + "bugs": { + "url": "https://github.com/isaacs/proto-list/issues" + }, + "homepage": "https://github.com/isaacs/proto-list", + "_id": "proto-list@1.2.3", + "_shasum": "6235554a1bca1f0d15e3ca12ca7329d5def42bd9", + "_from": "proto-list@~1.2.1", + "_npmVersion": "1.4.14", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "6235554a1bca1f0d15e3ca12ca7329d5def42bd9", + "tarball": "http://registry.npmjs.org/proto-list/-/proto-list-1.2.3.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.3.tgz", + "readme": "ERROR: No README data found!" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/node_modules/proto-list/proto-list.js nodejs-0.11.15/deps/npm/node_modules/config-chain/node_modules/proto-list/proto-list.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/node_modules/proto-list/proto-list.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/node_modules/proto-list/proto-list.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,88 @@ + +module.exports = ProtoList + +function setProto(obj, proto) { + if (typeof Object.setPrototypeOf === "function") + return Object.setPrototypeOf(obj, proto) + else + obj.__proto__ = proto +} + +function ProtoList () { + this.list = [] + var root = null + Object.defineProperty(this, 'root', { + get: function () { return root }, + set: function (r) { + root = r + if (this.list.length) { + setProto(this.list[this.list.length - 1], r) + } + }, + enumerable: true, + configurable: true + }) +} + +ProtoList.prototype = + { get length () { return this.list.length } + , get keys () { + var k = [] + for (var i in this.list[0]) k.push(i) + return k + } + , get snapshot () { + var o = {} + this.keys.forEach(function (k) { o[k] = this.get(k) }, this) + return o + } + , get store () { + return this.list[0] + } + , push : function (obj) { + if (typeof obj !== "object") obj = {valueOf:obj} + if (this.list.length >= 1) { + setProto(this.list[this.list.length - 1], obj) + } + setProto(obj, this.root) + return this.list.push(obj) + } + , pop : function () { + if (this.list.length >= 2) { + setProto(this.list[this.list.length - 2], this.root) + } + return this.list.pop() + } + , unshift : function (obj) { + setProto(obj, this.list[0] || this.root) + return this.list.unshift(obj) + } + , shift : function () { + if (this.list.length === 1) { + setProto(this.list[0], this.root) + } + return this.list.shift() + } + , get : function (key) { + return this.list[0][key] + } + , set : function (key, val, save) { + if (!this.length) this.push({}) + if (save && this.list[0].hasOwnProperty(key)) this.push({}) + return this.list[0][key] = val + } + , forEach : function (fn, thisp) { + for (var key in this.list[0]) fn.call(thisp, key, this.list[0][key]) + } + , slice : function () { + return this.list.slice.apply(this.list, arguments) + } + , splice : function () { + // handle injections + var ret = this.list.splice.apply(this.list, arguments) + for (var i = 0, l = this.list.length; i < l; i++) { + setProto(this.list[i], this.list[i + 1] || this.root) + } + return ret + } + } diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/node_modules/proto-list/README.md nodejs-0.11.15/deps/npm/node_modules/config-chain/node_modules/proto-list/README.md --- nodejs-0.11.13/deps/npm/node_modules/config-chain/node_modules/proto-list/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/node_modules/proto-list/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +A list of objects, bound by their prototype chain. + +Used in npm's config stuff. diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/node_modules/proto-list/test/basic.js nodejs-0.11.15/deps/npm/node_modules/config-chain/node_modules/proto-list/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/node_modules/proto-list/test/basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/node_modules/proto-list/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,61 @@ +var tap = require("tap") + , test = tap.test + , ProtoList = require("../proto-list.js") + +tap.plan(1) + +tap.test("protoList tests", function (t) { + var p = new ProtoList + p.push({foo:"bar"}) + p.push({}) + p.set("foo", "baz") + t.equal(p.get("foo"), "baz") + + var p = new ProtoList + p.push({foo:"bar"}) + p.set("foo", "baz") + t.equal(p.get("foo"), "baz") + t.equal(p.length, 1) + p.pop() + t.equal(p.length, 0) + p.set("foo", "asdf") + t.equal(p.length, 1) + t.equal(p.get("foo"), "asdf") + p.push({bar:"baz"}) + t.equal(p.length, 2) + t.equal(p.get("foo"), "asdf") + p.shift() + t.equal(p.length, 1) + t.equal(p.get("foo"), undefined) + + + p.unshift({foo:"blo", bar:"rab"}) + p.unshift({foo:"boo"}) + t.equal(p.length, 3) + t.equal(p.get("foo"), "boo") + t.equal(p.get("bar"), "rab") + + var ret = p.splice(1, 1, {bar:"bar"}) + t.same(ret, [{foo:"blo", bar:"rab"}]) + t.equal(p.get("bar"), "bar") + + // should not inherit default object properties + t.equal(p.get('hasOwnProperty'), undefined) + + // unless we give it those. + p.root = {} + t.equal(p.get('hasOwnProperty'), {}.hasOwnProperty) + + p.root = {default:'monkey'} + t.equal(p.get('default'), 'monkey') + + p.push({red:'blue'}) + p.push({red:'blue'}) + p.push({red:'blue'}) + while (p.length) { + t.equal(p.get('default'), 'monkey') + p.shift() + } + + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/.npmignore nodejs-0.11.15/deps/npm/node_modules/config-chain/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/config-chain/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +node_modules +node_modules/* +npm_debug.log diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/package.json nodejs-0.11.15/deps/npm/node_modules/config-chain/package.json --- nodejs-0.11.13/deps/npm/node_modules/config-chain/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +{ + "name": "config-chain", + "version": "1.1.8", + "description": "HANDLE CONFIGURATION ONCE AND FOR ALL", + "homepage": "http://github.com/dominictarr/config-chain", + "repository": { + "type": "git", + "url": "https://github.com/dominictarr/config-chain.git" + }, + "dependencies": { + "proto-list": "~1.2.1", + "ini": "1" + }, + "devDependencies": { + "tap": "0.3.0" + }, + "author": { + "name": "Dominic Tarr", + "email": "dominic.tarr@gmail.com", + "url": "http://dominictarr.com" + }, + "scripts": { + "test": "tap test/" + }, + "readme": "#config-chain\n\nUSE THIS MODULE TO LOAD ALL YOUR CONFIGURATIONS\n\n``` js\n\n //npm install config-chain\n\n var cc = require('config-chain')\n , opts = require('optimist').argv //ALWAYS USE OPTIMIST FOR COMMAND LINE OPTIONS.\n , env = opts.env || process.env.YOUR_APP_ENV || 'dev' //SET YOUR ENV LIKE THIS.\n\n // EACH ARG TO CONFIGURATOR IS LOADED INTO CONFIGURATION CHAIN\n // EARLIER ITEMS OVERIDE LATER ITEMS\n // PUTS COMMAND LINE OPTS FIRST, AND DEFAULTS LAST!\n\n //strings are interpereted as filenames.\n //will be loaded synchronously\n\n var conf =\n cc(\n //OVERRIDE SETTINGS WITH COMMAND LINE OPTS\n opts,\n\n //ENV VARS IF PREFIXED WITH 'myApp_'\n\n cc.env('myApp_'), //myApp_foo = 'like this'\n\n //FILE NAMED BY ENV\n path.join(__dirname, 'config.' + env + '.json'),\n\n //IF `env` is PRODUCTION\n env === 'prod'\n ? path.join(__dirname, 'special.json') //load a special file\n : null //NULL IS IGNORED!\n\n //SUBDIR FOR ENV CONFIG\n path.join(__dirname, 'config', env, 'config.json'),\n\n //SEARCH PARENT DIRECTORIES FROM CURRENT DIR FOR FILE\n cc.find('config.json'),\n\n //PUT DEFAULTS LAST\n {\n host: 'localhost'\n port: 8000\n })\n\n var host = conf.get('host')\n\n // or\n\n var host = conf.store.host\n\n```\n\nFINALLY, EASY FLEXIBLE CONFIGURATIONS!\n\n##see also: [proto-list](https://github.com/isaacs/proto-list/)\n\nWHATS THAT YOU SAY?\n\nYOU WANT A \"CLASS\" SO THAT YOU CAN DO CRAYCRAY JQUERY CRAPS?\n\nEXTEND WITH YOUR OWN FUNCTIONALTY!?\n\n## CONFIGCHAIN LIVES TO SERVE ONLY YOU!\n\n```javascript\nvar cc = require('config-chain')\n\n// all the stuff you did before\nvar config = cc({\n some: 'object'\n },\n cc.find('config.json'),\n cc.env('myApp_')\n )\n // CONFIGS AS A SERVICE, aka \"CaaS\", aka EVERY DEVOPS DREAM OMG!\n .addUrl('http://configurator:1234/my-configs')\n // ASYNC FTW!\n .addFile('/path/to/file.json')\n\n // OBJECTS ARE OK TOO, they're SYNC but they still ORDER RIGHT\n // BECAUSE PROMISES ARE USED BUT NO, NOT *THOSE* PROMISES, JUST\n // ACTUAL PROMISES LIKE YOU MAKE TO YOUR MOM, KEPT OUT OF LOVE\n .add({ another: 'object' })\n\n // DIE A THOUSAND DEATHS IF THIS EVER HAPPENS!!\n .on('error', function (er) {\n // IF ONLY THERE WAS SOMETHIGN HARDER THAN THROW\n // MY SORROW COULD BE ADEQUATELY EXPRESSED. /o\\\n throw er\n })\n\n // THROW A PARTY IN YOUR FACE WHEN ITS ALL LOADED!!\n .on('load', function (config) {\n console.awesome('HOLY SHIT!')\n })\n```\n\n# BORING API DOCS\n\n## cc(...args)\n\nMAKE A CHAIN AND ADD ALL THE ARGS.\n\nIf the arg is a STRING, then it shall be a JSON FILENAME.\n\nSYNC I/O!\n\nRETURN THE CHAIN!\n\n## cc.json(...args)\n\nJoin the args INTO A JSON FILENAME!\n\nSYNC I/O!\n\n## cc.find(relativePath)\n\nSEEK the RELATIVE PATH by climbing the TREE OF DIRECTORIES.\n\nRETURN THE FOUND PATH!\n\nSYNC I/O!\n\n## cc.parse(content, file, type)\n\nParse the content string, and guess the type from either the\nspecified type or the filename.\n\nRETURN THE RESULTING OBJECT!\n\nNO I/O!\n\n## cc.env(prefix, env=process.env)\n\nGet all the keys on the provided env object (or process.env) which are\nprefixed by the specified prefix, and put the values on a new object.\n\nRETURN THE RESULTING OBJECT!\n\nNO I/O!\n\n## cc.ConfigChain()\n\nThe ConfigChain class for CRAY CRAY JQUERY STYLE METHOD CHAINING!\n\nOne of these is returned by the main exported function, as well.\n\nIt inherits (prototypically) from\n[ProtoList](https://github.com/isaacs/proto-list/), and also inherits\n(parasitically) from\n[EventEmitter](http://nodejs.org/api/events.html#events_class_events_eventemitter)\n\nIt has all the methods from both, and except where noted, they are\nunchanged.\n\n### LET IT BE KNOWN THAT chain IS AN INSTANCE OF ConfigChain.\n\n## chain.sources\n\nA list of all the places where it got stuff. The keys are the names\npassed to addFile or addUrl etc, and the value is an object with some\ninfo about the data source.\n\n## chain.addFile(filename, type, [name=filename])\n\nFilename is the name of the file. Name is an arbitrary string to be\nused later if you desire. Type is either 'ini' or 'json', and will\ntry to guess intelligently if omitted.\n\nLoaded files can be saved later.\n\n## chain.addUrl(url, type, [name=url])\n\nSame as the filename thing, but with a url.\n\nCan't be saved later.\n\n## chain.addEnv(prefix, env, [name='env'])\n\nAdd all the keys from the env object that start with the prefix.\n\n## chain.addString(data, file, type, [name])\n\nParse the string and add it to the set. (Mainly used internally.)\n\n## chain.add(object, [name])\n\nAdd the object to the set.\n\n## chain.root {Object}\n\nThe root from which all the other config objects in the set descend\nprototypically.\n\nPut your defaults here.\n\n## chain.set(key, value, name)\n\nSet the key to the value on the named config object. If name is\nunset, then set it on the first config object in the set. (That is,\nthe one with the highest priority, which was added first.)\n\n## chain.get(key, [name])\n\nGet the key from the named config object explicitly, or from the\nresolved configs if not specified.\n\n## chain.save(name, type)\n\nWrite the named config object back to its origin.\n\nCurrently only supported for env and file config types.\n\nFor files, encode the data according to the type.\n\n## chain.on('save', function () {})\n\nWhen one or more files are saved, emits `save` event when they're all\nsaved.\n\n## chain.on('load', function (chain) {})\n\nWhen the config chain has loaded all the specified files and urls and\nsuch, the 'load' event fires.\n", + "readmeFilename": "readme.markdown", + "bugs": { + "url": "https://github.com/dominictarr/config-chain/issues" + }, + "_id": "config-chain@1.1.8", + "dist": { + "shasum": "0943d0b7227213a20d4eaff4434f4a1c0a052cad", + "tarball": "http://registry.npmjs.org/config-chain/-/config-chain-1.1.8.tgz" + }, + "_from": "config-chain@^1.1.8", + "_npmVersion": "1.3.6", + "_npmUser": { + "name": "dominictarr", + "email": "dominic.tarr@gmail.com" + }, + "maintainers": [ + { + "name": "dominictarr", + "email": "dominic.tarr@gmail.com" + }, + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "directories": {}, + "_shasum": "0943d0b7227213a20d4eaff4434f4a1c0a052cad", + "_resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.8.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/readme.markdown nodejs-0.11.15/deps/npm/node_modules/config-chain/readme.markdown --- nodejs-0.11.13/deps/npm/node_modules/config-chain/readme.markdown 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/readme.markdown 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,228 @@ +#config-chain + +USE THIS MODULE TO LOAD ALL YOUR CONFIGURATIONS + +``` js + + //npm install config-chain + + var cc = require('config-chain') + , opts = require('optimist').argv //ALWAYS USE OPTIMIST FOR COMMAND LINE OPTIONS. + , env = opts.env || process.env.YOUR_APP_ENV || 'dev' //SET YOUR ENV LIKE THIS. + + // EACH ARG TO CONFIGURATOR IS LOADED INTO CONFIGURATION CHAIN + // EARLIER ITEMS OVERIDE LATER ITEMS + // PUTS COMMAND LINE OPTS FIRST, AND DEFAULTS LAST! + + //strings are interpereted as filenames. + //will be loaded synchronously + + var conf = + cc( + //OVERRIDE SETTINGS WITH COMMAND LINE OPTS + opts, + + //ENV VARS IF PREFIXED WITH 'myApp_' + + cc.env('myApp_'), //myApp_foo = 'like this' + + //FILE NAMED BY ENV + path.join(__dirname, 'config.' + env + '.json'), + + //IF `env` is PRODUCTION + env === 'prod' + ? path.join(__dirname, 'special.json') //load a special file + : null //NULL IS IGNORED! + + //SUBDIR FOR ENV CONFIG + path.join(__dirname, 'config', env, 'config.json'), + + //SEARCH PARENT DIRECTORIES FROM CURRENT DIR FOR FILE + cc.find('config.json'), + + //PUT DEFAULTS LAST + { + host: 'localhost' + port: 8000 + }) + + var host = conf.get('host') + + // or + + var host = conf.store.host + +``` + +FINALLY, EASY FLEXIBLE CONFIGURATIONS! + +##see also: [proto-list](https://github.com/isaacs/proto-list/) + +WHATS THAT YOU SAY? + +YOU WANT A "CLASS" SO THAT YOU CAN DO CRAYCRAY JQUERY CRAPS? + +EXTEND WITH YOUR OWN FUNCTIONALTY!? + +## CONFIGCHAIN LIVES TO SERVE ONLY YOU! + +```javascript +var cc = require('config-chain') + +// all the stuff you did before +var config = cc({ + some: 'object' + }, + cc.find('config.json'), + cc.env('myApp_') + ) + // CONFIGS AS A SERVICE, aka "CaaS", aka EVERY DEVOPS DREAM OMG! + .addUrl('http://configurator:1234/my-configs') + // ASYNC FTW! + .addFile('/path/to/file.json') + + // OBJECTS ARE OK TOO, they're SYNC but they still ORDER RIGHT + // BECAUSE PROMISES ARE USED BUT NO, NOT *THOSE* PROMISES, JUST + // ACTUAL PROMISES LIKE YOU MAKE TO YOUR MOM, KEPT OUT OF LOVE + .add({ another: 'object' }) + + // DIE A THOUSAND DEATHS IF THIS EVER HAPPENS!! + .on('error', function (er) { + // IF ONLY THERE WAS SOMETHIGN HARDER THAN THROW + // MY SORROW COULD BE ADEQUATELY EXPRESSED. /o\ + throw er + }) + + // THROW A PARTY IN YOUR FACE WHEN ITS ALL LOADED!! + .on('load', function (config) { + console.awesome('HOLY SHIT!') + }) +``` + +# BORING API DOCS + +## cc(...args) + +MAKE A CHAIN AND ADD ALL THE ARGS. + +If the arg is a STRING, then it shall be a JSON FILENAME. + +SYNC I/O! + +RETURN THE CHAIN! + +## cc.json(...args) + +Join the args INTO A JSON FILENAME! + +SYNC I/O! + +## cc.find(relativePath) + +SEEK the RELATIVE PATH by climbing the TREE OF DIRECTORIES. + +RETURN THE FOUND PATH! + +SYNC I/O! + +## cc.parse(content, file, type) + +Parse the content string, and guess the type from either the +specified type or the filename. + +RETURN THE RESULTING OBJECT! + +NO I/O! + +## cc.env(prefix, env=process.env) + +Get all the keys on the provided env object (or process.env) which are +prefixed by the specified prefix, and put the values on a new object. + +RETURN THE RESULTING OBJECT! + +NO I/O! + +## cc.ConfigChain() + +The ConfigChain class for CRAY CRAY JQUERY STYLE METHOD CHAINING! + +One of these is returned by the main exported function, as well. + +It inherits (prototypically) from +[ProtoList](https://github.com/isaacs/proto-list/), and also inherits +(parasitically) from +[EventEmitter](http://nodejs.org/api/events.html#events_class_events_eventemitter) + +It has all the methods from both, and except where noted, they are +unchanged. + +### LET IT BE KNOWN THAT chain IS AN INSTANCE OF ConfigChain. + +## chain.sources + +A list of all the places where it got stuff. The keys are the names +passed to addFile or addUrl etc, and the value is an object with some +info about the data source. + +## chain.addFile(filename, type, [name=filename]) + +Filename is the name of the file. Name is an arbitrary string to be +used later if you desire. Type is either 'ini' or 'json', and will +try to guess intelligently if omitted. + +Loaded files can be saved later. + +## chain.addUrl(url, type, [name=url]) + +Same as the filename thing, but with a url. + +Can't be saved later. + +## chain.addEnv(prefix, env, [name='env']) + +Add all the keys from the env object that start with the prefix. + +## chain.addString(data, file, type, [name]) + +Parse the string and add it to the set. (Mainly used internally.) + +## chain.add(object, [name]) + +Add the object to the set. + +## chain.root {Object} + +The root from which all the other config objects in the set descend +prototypically. + +Put your defaults here. + +## chain.set(key, value, name) + +Set the key to the value on the named config object. If name is +unset, then set it on the first config object in the set. (That is, +the one with the highest priority, which was added first.) + +## chain.get(key, [name]) + +Get the key from the named config object explicitly, or from the +resolved configs if not specified. + +## chain.save(name, type) + +Write the named config object back to its origin. + +Currently only supported for env and file config types. + +For files, encode the data according to the type. + +## chain.on('save', function () {}) + +When one or more files are saved, emits `save` event when they're all +saved. + +## chain.on('load', function (chain) {}) + +When the config chain has loaded all the specified files and urls and +such, the 'load' event fires. diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/test/broken.js nodejs-0.11.15/deps/npm/node_modules/config-chain/test/broken.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/test/broken.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/test/broken.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ + + +var cc = require('..') +var assert = require('assert') + + +//throw on invalid json +assert.throws(function () { + cc(__dirname + '/broken.json') +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/test/broken.json nodejs-0.11.15/deps/npm/node_modules/config-chain/test/broken.json --- nodejs-0.11.13/deps/npm/node_modules/config-chain/test/broken.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/test/broken.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +{ + "name": "config-chain", + "version": "0.3.0", + "description": "HANDLE CONFIGURATION ONCE AND FOR ALL", + "homepage": "http://github.com/dominictarr/config-chain", + "repository": { + "type": "git", + "url": "https://github.com/dominictarr/config-chain.git" + } + //missing , and then this comment. this json is intensionally invalid + "dependencies": { + "proto-list": "1", + "ini": "~1.0.2" + }, + "bundleDependencies": ["ini"], + "REM": "REMEMBER TO REMOVE BUNDLING WHEN/IF ISAACS MERGES ini#7", + "author": "Dominic Tarr (http://dominictarr.com)", + "scripts": { + "test": "node test/find-file.js && node test/ini.js && node test/env.js" + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/test/chain-class.js nodejs-0.11.15/deps/npm/node_modules/config-chain/test/chain-class.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/test/chain-class.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/test/chain-class.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,100 @@ +var test = require('tap').test +var CC = require('../index.js').ConfigChain + +var env = { foo_blaz : 'blzaa', foo_env : 'myenv' } +var jsonObj = { blaz: 'json', json: true } +var iniObj = { 'x.y.z': 'xyz', blaz: 'ini' } + +var fs = require('fs') +var ini = require('ini') + +fs.writeFileSync('/tmp/config-chain-class.json', JSON.stringify(jsonObj)) +fs.writeFileSync('/tmp/config-chain-class.ini', ini.stringify(iniObj)) + +var http = require('http') +var reqs = 0 +http.createServer(function (q, s) { + if (++reqs === 2) this.close() + if (q.url === '/json') { + // make sure that the requests come back from the server + // out of order. they should still be ordered properly + // in the resulting config object set. + setTimeout(function () { + s.setHeader('content-type', 'application/json') + s.end(JSON.stringify({ + blaz: 'http', + http: true, + json: true + })) + }, 200) + } else { + s.setHeader('content-type', 'application/ini') + s.end(ini.stringify({ + blaz: 'http', + http: true, + ini: true, + json: false + })) + } +}).listen(1337) + +test('basic class test', function (t) { + var cc = new CC() + var expectlist = + [ { blaz: 'json', json: true }, + { 'x.y.z': 'xyz', blaz: 'ini' }, + { blaz: 'blzaa', env: 'myenv' }, + { blaz: 'http', http: true, json: true }, + { blaz: 'http', http: true, ini: true, json: false } ] + + cc.addFile('/tmp/config-chain-class.json') + .addFile('/tmp/config-chain-class.ini') + .addEnv('foo_', env) + .addUrl('http://localhost:1337/json') + .addUrl('http://localhost:1337/ini') + .on('load', function () { + t.same(cc.list, expectlist) + t.same(cc.snapshot, { blaz: 'json', + json: true, + 'x.y.z': 'xyz', + env: 'myenv', + http: true, + ini: true }) + + cc.del('blaz', '/tmp/config-chain-class.json') + t.same(cc.snapshot, { blaz: 'ini', + json: true, + 'x.y.z': 'xyz', + env: 'myenv', + http: true, + ini: true }) + cc.del('blaz') + t.same(cc.snapshot, { json: true, + 'x.y.z': 'xyz', + env: 'myenv', + http: true, + ini: true }) + cc.shift() + t.same(cc.snapshot, { 'x.y.z': 'xyz', + env: 'myenv', + http: true, + json: true, + ini: true }) + cc.shift() + t.same(cc.snapshot, { env: 'myenv', + http: true, + json: true, + ini: true }) + cc.shift() + t.same(cc.snapshot, { http: true, + json: true, + ini: true }) + cc.shift() + t.same(cc.snapshot, { http: true, + ini: true, + json: false }) + cc.shift() + t.same(cc.snapshot, {}) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/test/env.js nodejs-0.11.15/deps/npm/node_modules/config-chain/test/env.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/test/env.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/test/env.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +var cc = require('..') +var assert = require('assert') + +assert.deepEqual({ + hello: true +}, cc.env('test_', { + 'test_hello': true, + 'ignore_this': 4, + 'ignore_test_this_too': [] +})) diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/test/find-file.js nodejs-0.11.15/deps/npm/node_modules/config-chain/test/find-file.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/test/find-file.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/test/find-file.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ + +var fs = require('fs') + , assert = require('assert') + , objx = { + rand: Math.random() + } + +fs.writeFileSync('/tmp/random-test-config.json', JSON.stringify(objx)) + +var cc = require('../') +var path = cc.find('tmp/random-test-config.json') + +assert.equal(path, '/tmp/random-test-config.json') \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/test/get.js nodejs-0.11.15/deps/npm/node_modules/config-chain/test/get.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/test/get.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/test/get.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +var cc = require("../"); + +var chain = cc() + , name = "forFun"; + +chain + .add({ + __sample:"for fun only" + }, name) + .on("load", function() { + //It throw exception here + console.log(chain.get("__sample", name)); + //But if I drop the name param, it run normally and return as expected: "for fun only" + //console.log(chain.get("__sample")); + }); diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/test/ignore-unfound-file.js nodejs-0.11.15/deps/npm/node_modules/config-chain/test/ignore-unfound-file.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/test/ignore-unfound-file.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/test/ignore-unfound-file.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ + +var cc = require('..') + +//should not throw +cc(__dirname, 'non_existing_file') diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/test/ini.js nodejs-0.11.15/deps/npm/node_modules/config-chain/test/ini.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/test/ini.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/test/ini.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ + + +var cc =require('..') +var INI = require('ini') +var assert = require('assert') + +function test(obj) { + + var _json, _ini + var json = cc.parse (_json = JSON.stringify(obj)) + var ini = cc.parse (_ini = INI.stringify(obj)) +console.log(_ini, _json) + assert.deepEqual(json, ini) +} + + +test({hello: true}) + diff -Nru nodejs-0.11.13/deps/npm/node_modules/config-chain/test/save.js nodejs-0.11.15/deps/npm/node_modules/config-chain/test/save.js --- nodejs-0.11.13/deps/npm/node_modules/config-chain/test/save.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/config-chain/test/save.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,59 @@ +var CC = require('../index.js').ConfigChain +var test = require('tap').test + +var f1 = '/tmp/f1.ini' +var f2 = '/tmp/f2.json' + +var ini = require('ini') + +var f1data = {foo: {bar: 'baz'}, bloo: 'jaus'} +var f2data = {oof: {rab: 'zab'}, oolb: 'suaj'} + +var fs = require('fs') + +fs.writeFileSync(f1, ini.stringify(f1data), 'utf8') +fs.writeFileSync(f2, JSON.stringify(f2data), 'utf8') + +test('test saving and loading ini files', function (t) { + new CC() + .add({grelb:'blerg'}, 'opt') + .addFile(f1, 'ini', 'inifile') + .addFile(f2, 'json', 'jsonfile') + .on('load', function (cc) { + + t.same(cc.snapshot, { grelb: 'blerg', + bloo: 'jaus', + foo: { bar: 'baz' }, + oof: { rab: 'zab' }, + oolb: 'suaj' }) + + t.same(cc.list, [ { grelb: 'blerg' }, + { bloo: 'jaus', foo: { bar: 'baz' } }, + { oof: { rab: 'zab' }, oolb: 'suaj' } ]) + + cc.set('grelb', 'brelg', 'opt') + .set('foo', 'zoo', 'inifile') + .set('oof', 'ooz', 'jsonfile') + .save('inifile') + .save('jsonfile') + .on('save', function () { + t.equal(fs.readFileSync(f1, 'utf8'), + "bloo = jaus\nfoo = zoo\n") + t.equal(fs.readFileSync(f2, 'utf8'), + "{\"oof\":\"ooz\",\"oolb\":\"suaj\"}") + + t.same(cc.snapshot, { grelb: 'brelg', + bloo: 'jaus', + foo: 'zoo', + oof: 'ooz', + oolb: 'suaj' }) + + t.same(cc.list, [ { grelb: 'brelg' }, + { bloo: 'jaus', foo: 'zoo' }, + { oof: 'ooz', oolb: 'suaj' } ]) + + t.pass('ok') + t.end() + }) + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/dezalgo/dezalgo.js nodejs-0.11.15/deps/npm/node_modules/dezalgo/dezalgo.js --- nodejs-0.11.13/deps/npm/node_modules/dezalgo/dezalgo.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/dezalgo/dezalgo.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +var wrappy = require('wrappy') +module.exports = wrappy(dezalgo) + +var asap = require('asap') + +function dezalgo (cb) { + var sync = true + asap(function () { + sync = false + }) + + return function zalgoSafe() { + var args = arguments + var me = this + if (sync) + asap(function() { + cb.apply(me, args) + }) + else + cb.apply(me, args) + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/dezalgo/node_modules/asap/asap.js nodejs-0.11.15/deps/npm/node_modules/dezalgo/node_modules/asap/asap.js --- nodejs-0.11.13/deps/npm/node_modules/dezalgo/node_modules/asap/asap.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/dezalgo/node_modules/asap/asap.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,113 @@ + +// Use the fastest possible means to execute a task in a future turn +// of the event loop. + +// linked list of tasks (single, with head node) +var head = {task: void 0, next: null}; +var tail = head; +var flushing = false; +var requestFlush = void 0; +var isNodeJS = false; + +function flush() { + /* jshint loopfunc: true */ + + while (head.next) { + head = head.next; + var task = head.task; + head.task = void 0; + var domain = head.domain; + + if (domain) { + head.domain = void 0; + domain.enter(); + } + + try { + task(); + + } catch (e) { + if (isNodeJS) { + // In node, uncaught exceptions are considered fatal errors. + // Re-throw them synchronously to interrupt flushing! + + // Ensure continuation if the uncaught exception is suppressed + // listening "uncaughtException" events (as domains does). + // Continue in next event to avoid tick recursion. + if (domain) { + domain.exit(); + } + setTimeout(flush, 0); + if (domain) { + domain.enter(); + } + + throw e; + + } else { + // In browsers, uncaught exceptions are not fatal. + // Re-throw them asynchronously to avoid slow-downs. + setTimeout(function() { + throw e; + }, 0); + } + } + + if (domain) { + domain.exit(); + } + } + + flushing = false; +} + +if (typeof process !== "undefined" && process.nextTick) { + // Node.js before 0.9. Note that some fake-Node environments, like the + // Mocha test runner, introduce a `process` global without a `nextTick`. + isNodeJS = true; + + requestFlush = function () { + process.nextTick(flush); + }; + +} else if (typeof setImmediate === "function") { + // In IE10, Node.js 0.9+, or https://github.com/NobleJS/setImmediate + if (typeof window !== "undefined") { + requestFlush = setImmediate.bind(window, flush); + } else { + requestFlush = function () { + setImmediate(flush); + }; + } + +} else if (typeof MessageChannel !== "undefined") { + // modern browsers + // http://www.nonblocking.io/2011/06/windownexttick.html + var channel = new MessageChannel(); + channel.port1.onmessage = flush; + requestFlush = function () { + channel.port2.postMessage(0); + }; + +} else { + // old browsers + requestFlush = function () { + setTimeout(flush, 0); + }; +} + +function asap(task) { + tail = tail.next = { + task: task, + domain: isNodeJS && process.domain, + next: null + }; + + if (!flushing) { + flushing = true; + requestFlush(); + } +}; + +module.exports = asap; + diff -Nru nodejs-0.11.13/deps/npm/node_modules/dezalgo/node_modules/asap/LICENSE.md nodejs-0.11.15/deps/npm/node_modules/dezalgo/node_modules/asap/LICENSE.md --- nodejs-0.11.13/deps/npm/node_modules/dezalgo/node_modules/asap/LICENSE.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/dezalgo/node_modules/asap/LICENSE.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ + +Copyright 2009–2013 Contributors. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + diff -Nru nodejs-0.11.13/deps/npm/node_modules/dezalgo/node_modules/asap/package.json nodejs-0.11.15/deps/npm/node_modules/dezalgo/node_modules/asap/package.json --- nodejs-0.11.13/deps/npm/node_modules/dezalgo/node_modules/asap/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/dezalgo/node_modules/asap/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,39 @@ +{ + "name": "asap", + "version": "1.0.0", + "description": "High-priority task queue for Node.js and browsers", + "keywords": [ + "event", + "task", + "queue" + ], + "licenses": [ + { + "type": "MIT", + "url": "https://github.com/kriskowal/asap/raw/master/LICENSE.md" + } + ], + "main": "asap", + "readme": "\n# ASAP\n\nThis `asap` CommonJS package contains a single `asap` module that\nexports a single `asap` function that executes a function **as soon as\npossible**.\n\n```javascript\nasap(function () {\n // ...\n});\n```\n\nMore formally, ASAP provides a fast event queue that will execute tasks\nuntil it is empty before yielding to the JavaScript engine's underlying\nevent-loop. When the event queue becomes non-empty, ASAP schedules a\nflush event, preferring for that event to occur before the JavaScript\nengine has an opportunity to perform IO tasks or rendering, thus making\nthe first task and subsequent tasks semantically indistinguishable.\nASAP uses a variety of techniques to preserve this invariant on\ndifferent versions of browsers and NodeJS.\n\nBy design, ASAP can starve the event loop on the theory that, if there\nis enough work to be done synchronously, albeit in separate events, long\nenough to starve input or output, it is a strong indicator that the\nprogram needs to push back on scheduling more work.\n\nTake care. ASAP can sustain infinite recursive calls indefinitely\nwithout warning. This is behaviorally equivalent to an infinite loop.\nIt will not halt from a stack overflow, but it *will* chew through\nmemory (which is an oddity I cannot explain at this time). Just as with\ninfinite loops, you can monitor a Node process for this behavior with a\nheart-beat signal. As with infinite loops, a very small amount of\ncaution goes a long way to avoiding problems.\n\n```javascript\nfunction loop() {\n asap(loop);\n}\nloop();\n```\n\nASAP is distinct from `setImmediate` in that it does not suffer the\noverhead of returning a handle and being possible to cancel. For a\n`setImmediate` shim, consider [setImmediate][].\n\n[setImmediate]: https://github.com/noblejs/setimmediate\n\nIf a task throws an exception, it will not interrupt the flushing of\nhigh-priority tasks. The exception will be postponed to a later,\nlow-priority event to avoid slow-downs, when the underlying JavaScript\nengine will treat it as it does any unhandled exception.\n\n## Heritage\n\nASAP has been factored out of the [Q][] asynchronous promise library.\nIt originally had a naïve implementation in terms of `setTimeout`, but\n[Malte Ubl][NonBlocking] provided an insight that `postMessage` might be\nuseful for creating a high-priority, no-delay event dispatch hack.\nSince then, Internet Explorer proposed and implemented `setImmediate`.\nRobert Kratić began contributing to Q by measuring the performance of\nthe internal implementation of `asap`, paying particular attention to\nerror recovery. Domenic, Robert, and I collectively settled on the\ncurrent strategy of unrolling the high-priority event queue internally\nregardless of what strategy we used to dispatch the potentially\nlower-priority flush event. Domenic went on to make ASAP cooperate with\nNodeJS domains.\n\n[Q]: https://github.com/kriskowal/q\n[NonBlocking]: http://www.nonblocking.io/2011/06/windownexttick.html\n\nFor further reading, Nicholas Zakas provided a thorough article on [The\nCase for setImmediate][NCZ].\n\n[NCZ]: http://www.nczonline.net/blog/2013/07/09/the-case-for-setimmediate/\n\n## License\n\nCopyright 2009-2013 by Contributors\nMIT License (enclosed)\n\n", + "readmeFilename": "README.md", + "_id": "asap@1.0.0", + "dist": { + "shasum": "b2a45da5fdfa20b0496fc3768cc27c12fa916a7d", + "tarball": "http://registry.npmjs.org/asap/-/asap-1.0.0.tgz" + }, + "_from": "asap@>=1.0.0 <2.0.0", + "_npmVersion": "1.2.15", + "_npmUser": { + "name": "kriskowal", + "email": "kris.kowal@cixar.com" + }, + "maintainers": [ + { + "name": "kriskowal", + "email": "kris.kowal@cixar.com" + } + ], + "directories": {}, + "_shasum": "b2a45da5fdfa20b0496fc3768cc27c12fa916a7d", + "_resolved": "https://registry.npmjs.org/asap/-/asap-1.0.0.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/dezalgo/node_modules/asap/README.md nodejs-0.11.15/deps/npm/node_modules/dezalgo/node_modules/asap/README.md --- nodejs-0.11.13/deps/npm/node_modules/dezalgo/node_modules/asap/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/dezalgo/node_modules/asap/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,81 @@ + +# ASAP + +This `asap` CommonJS package contains a single `asap` module that +exports a single `asap` function that executes a function **as soon as +possible**. + +```javascript +asap(function () { + // ... +}); +``` + +More formally, ASAP provides a fast event queue that will execute tasks +until it is empty before yielding to the JavaScript engine's underlying +event-loop. When the event queue becomes non-empty, ASAP schedules a +flush event, preferring for that event to occur before the JavaScript +engine has an opportunity to perform IO tasks or rendering, thus making +the first task and subsequent tasks semantically indistinguishable. +ASAP uses a variety of techniques to preserve this invariant on +different versions of browsers and NodeJS. + +By design, ASAP can starve the event loop on the theory that, if there +is enough work to be done synchronously, albeit in separate events, long +enough to starve input or output, it is a strong indicator that the +program needs to push back on scheduling more work. + +Take care. ASAP can sustain infinite recursive calls indefinitely +without warning. This is behaviorally equivalent to an infinite loop. +It will not halt from a stack overflow, but it *will* chew through +memory (which is an oddity I cannot explain at this time). Just as with +infinite loops, you can monitor a Node process for this behavior with a +heart-beat signal. As with infinite loops, a very small amount of +caution goes a long way to avoiding problems. + +```javascript +function loop() { + asap(loop); +} +loop(); +``` + +ASAP is distinct from `setImmediate` in that it does not suffer the +overhead of returning a handle and being possible to cancel. For a +`setImmediate` shim, consider [setImmediate][]. + +[setImmediate]: https://github.com/noblejs/setimmediate + +If a task throws an exception, it will not interrupt the flushing of +high-priority tasks. The exception will be postponed to a later, +low-priority event to avoid slow-downs, when the underlying JavaScript +engine will treat it as it does any unhandled exception. + +## Heritage + +ASAP has been factored out of the [Q][] asynchronous promise library. +It originally had a naïve implementation in terms of `setTimeout`, but +[Malte Ubl][NonBlocking] provided an insight that `postMessage` might be +useful for creating a high-priority, no-delay event dispatch hack. +Since then, Internet Explorer proposed and implemented `setImmediate`. +Robert Kratić began contributing to Q by measuring the performance of +the internal implementation of `asap`, paying particular attention to +error recovery. Domenic, Robert, and I collectively settled on the +current strategy of unrolling the high-priority event queue internally +regardless of what strategy we used to dispatch the potentially +lower-priority flush event. Domenic went on to make ASAP cooperate with +NodeJS domains. + +[Q]: https://github.com/kriskowal/q +[NonBlocking]: http://www.nonblocking.io/2011/06/windownexttick.html + +For further reading, Nicholas Zakas provided a thorough article on [The +Case for setImmediate][NCZ]. + +[NCZ]: http://www.nczonline.net/blog/2013/07/09/the-case-for-setimmediate/ + +## License + +Copyright 2009-2013 by Contributors +MIT License (enclosed) + diff -Nru nodejs-0.11.13/deps/npm/node_modules/dezalgo/package.json nodejs-0.11.15/deps/npm/node_modules/dezalgo/package.json --- nodejs-0.11.13/deps/npm/node_modules/dezalgo/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/dezalgo/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,67 @@ +{ + "name": "dezalgo", + "version": "1.0.1", + "description": "Contain async insanity so that the dark pony lord doesn't eat souls", + "main": "dezalgo.js", + "directories": { + "test": "test" + }, + "dependencies": { + "asap": "^1.0.0", + "wrappy": "1" + }, + "devDependencies": { + "tap": "^0.4.11" + }, + "scripts": { + "test": "tap test/*.js" + }, + "repository": { + "type": "git", + "url": "https://github.com/npm/dezalgo" + }, + "keywords": [ + "async", + "zalgo", + "the dark pony", + "he comes", + "asynchrony of all holy and good", + "T̯̪ͅo̯͖̹ ̻̮̖̲͢i̥̖n̢͈͇̝͍v͏͉ok̭̬̝ͅe̞͍̩̫͍̩͝ ̩̮̖̟͇͉́t͔͔͎̗h͏̗̟e̘͉̰̦̠̞͓ ͕h͉̟͎̪̠̱͠ḭ̮̩v̺͉͇̩e̵͖-̺̪m͍i̜n̪̲̲̲̮d̷ ̢r̠̼̯̹̦̦͘ͅe͓̳͓̙p̺̗̫͙͘ͅr͔̰͜e̴͓̞s͉̩̩͟ͅe͏̣n͚͇̗̭̺͍tì͙̣n͏̖̥̗͎̰̪g̞͓̭̱̯̫̕ ̣̱͜ͅc̦̰̰̠̮͎͙̀hao̺̜̻͍͙ͅs͉͓̘.͎̼̺̼͕̹͘", + "̠̞̱̰I͖͇̝̻n̦̰͍̰̟v̤̺̫̳̭̼̗͘ò̹̟̩̩͚k̢̥̠͍͉̦̬i̖͓͔̮̱̻͘n̶̳͙̫͎g̖̯̣̲̪͉ ̞͎̗͕͚ͅt̲͕̘̺̯̗̦h̘̦̲̜̻e̳͎͉̬͙ ̴̞̪̲̥f̜̯͓͓̭̭͢e̱̘͔̮e̜̤l̺̱͖̯͓͙͈͢i̵̦̬͉͔̫͚͕n͉g̨͖̙̙̹̹̟̤ ͉̪o̞̠͍̪̰͙ͅf̬̲̺ ͔͕̲͕͕̲̕c̙͉h̝͔̩̙̕ͅa̲͖̻̗̹o̥̼̫s̝̖̜̝͚̫̟.̺͚ ̸̱̲W̶̥̣͖̦i͏̤̬̱̳̣ͅt͉h̗̪̪ ̷̱͚̹̪ǫ͕̗̣̳̦͎u̼̦͔̥̮̕ţ͖͎̻͔͉ ̴͎̩òr̹̰̖͉͈͝d̷̲̦̖͓e̲͓̠r", + "̧͚̜͓̰̭̭Ṯ̫̹̜̮̟̮͝h͚̘̩̘̖̰́e ̥̘͓͉͔͙̼N̟̜̣̘͔̪e̞̞̤͢z̰̖̘͇p̠͟e̺̱̣͍͙̝ṛ̘̬͔̙͇̠d͝ḭ̯̱̥̗̩a̛ͅn͏̦ ̷̥hi̥v̖̳̹͉̮̱͝e̹̪̘̖̰̟-̴͙͓͚̜̻mi̗̺̻͙̺ͅn̪̯͈d ͏̘͓̫̳ͅơ̹͔̳̖̣͓f͈̹̘ ͕ͅc̗̤̠̜̮̥̥h̡͍̩̭̫͚̱a̤͉̤͔͜os͕̤̼͍̲̀ͅ.̡̱ ̦Za̯̱̗̭͍̣͚l̗͉̰̤g͏̣̭̬̗̲͖ͅo̶̭̩̳̟͈.̪̦̰̳", + "H̴̱̦̗̬̣͓̺e̮ ͉̠̰̞͎̖͟ẁh̛̺̯ͅo̖̫͡ ̢Ẁa̡̗i̸t͖̣͉̀ş͔̯̩ ̤̦̮͇̞̦̲B͎̭͇̦̼e̢hin͏͙̟̪d̴̰͓̻̣̮͕ͅ T͖̮̕h͖e̘̺̰̙͘ ̥Ẁ̦͔̻͚a̞͖̪͉l̪̠̻̰̣̠l̲͎͞", + "Z̘͍̼͎̣͔͝Ą̲̜̱̱̹̤͇L̶̝̰̭͔G͍̖͍O̫͜ͅ!̼̤ͅ", + "H̝̪̜͓̀̌̂̒E̢̙̠̣ ̴̳͇̥̟̠͍̐C̹̓̑̐̆͝Ó̶̭͓̚M̬̼Ĕ̖̤͔͔̟̹̽̿̊ͥ̍ͫS̻̰̦̻̖̘̱̒ͪ͌̅͟" + ], + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "license": "ISC", + "bugs": { + "url": "https://github.com/npm/dezalgo/issues" + }, + "homepage": "https://github.com/npm/dezalgo", + "gitHead": "0a5eee75c179611f8b67f663015d68bb517e57d2", + "_id": "dezalgo@1.0.1", + "_shasum": "12bde135060807900d5a7aebb607c2abb7c76937", + "_from": "dezalgo@latest", + "_npmVersion": "2.0.0", + "_nodeVersion": "0.10.31", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "12bde135060807900d5a7aebb607c2abb7c76937", + "tarball": "http://registry.npmjs.org/dezalgo/-/dezalgo-1.0.1.tgz" + }, + "_resolved": "https://registry.npmjs.org/dezalgo/-/dezalgo-1.0.1.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/dezalgo/README.md nodejs-0.11.15/deps/npm/node_modules/dezalgo/README.md --- nodejs-0.11.13/deps/npm/node_modules/dezalgo/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/dezalgo/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,29 @@ +# dezalgo + +Contain async insanity so that the dark pony lord doesn't eat souls + +See [this blog +post](http://blog.izs.me/post/59142742143/designing-apis-for-asynchrony). + +## USAGE + +Pass a callback to `dezalgo` and it will ensure that it is *always* +called in a future tick, and never in this tick. + +```javascript +var dz = require('dezalgo') + +var cache = {} +function maybeSync(arg, cb) { + cb = dz(cb) + + // this will actually defer to nextTick + if (cache[arg]) cb(null, cache[arg]) + + fs.readFile(arg, function (er, data) { + // since this is *already* defered, it will call immediately + if (er) cb(er) + cb(null, cache[arg] = data) + }) +} +``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/dezalgo/test/basic.js nodejs-0.11.15/deps/npm/node_modules/dezalgo/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/dezalgo/test/basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/dezalgo/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,29 @@ +var test = require('tap').test +var dz = require('../dezalgo.js') + +test('the dark pony', function(t) { + + var n = 0 + function foo(i, cb) { + cb = dz(cb) + if (++n % 2) cb(true, i) + else process.nextTick(cb.bind(null, false, i)) + } + + var called = 0 + var order = [0, 2, 4, 6, 8, 1, 3, 5, 7, 9] + var o = 0 + for (var i = 0; i < 10; i++) { + foo(i, function(cached, i) { + t.equal(i, order[o++]) + t.equal(i % 2, cached ? 0 : 1) + called++ + }) + t.equal(called, 0) + } + + setTimeout(function() { + t.equal(called, 10) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/editor/index.js nodejs-0.11.15/deps/npm/node_modules/editor/index.js --- nodejs-0.11.13/deps/npm/node_modules/editor/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/editor/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -9,18 +9,12 @@ var ed = /^win/.test(process.platform) ? 'notepad' : 'vim'; var editor = opts.editor || process.env.VISUAL || process.env.EDITOR || ed; + var args = editor.split(/\s+/); + var bin = args.shift(); - setRaw(true); - var ps = spawn(editor, [ file ], { customFds : [ 0, 1, 2 ] }); + var ps = spawn(bin, args.concat([ file ]), { stdio: 'inherit' }); ps.on('exit', function (code, sig) { - setRaw(false); - process.stdin.pause(); if (typeof cb === 'function') cb(code, sig) }); }; - -var tty = require('tty'); -function setRaw (mode) { - process.stdin.setRawMode ? process.stdin.setRawMode(mode) : tty.setRawMode(mode); -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/editor/LICENSE nodejs-0.11.15/deps/npm/node_modules/editor/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/editor/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/editor/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -1,18 +1,21 @@ -This software is released under the MIT license: +Copyright 2013 James Halliday (mail@substack.net) -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: +This project is free software released under the MIT license: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/editor/package.json nodejs-0.11.15/deps/npm/node_modules/editor/package.json --- nodejs-0.11.13/deps/npm/node_modules/editor/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/editor/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "editor", - "version": "0.0.5", + "version": "0.1.0", "description": "launch $EDITOR in your program", "main": "index.js", "directories": { @@ -33,11 +33,26 @@ "engine": { "node": ">=0.6" }, - "readme": "editor\n======\n\nLaunch $EDITOR in your program.\n\nexample\n=======\n\n``` js\nvar editor = require('editor');\neditor('beep.json', function (code, sig) {\n console.log('finished editing with code ' + code);\n});\n```\n\n***\n\n```\n$ node edit.js\n```\n\n![editor](http://substack.net/images/screenshots/editor.png)\n\n```\nfinished editing with code 0\n```\n\nmethods\n=======\n\n``` js\nvar editor = require('editor')\n```\n\neditor(file, opts={}, cb)\n-------------------------\n\nLaunch the `$EDITOR` (or `opts.editor`) for `file`.\n\nWhen the editor exits, `cb(code, sig)` fires.\n\ninstall\n=======\n\nWith [npm](http://npmjs.org) do:\n\n```\nnpm install editor\n```\n\nlicense\n=======\n\nMIT\n", - "readmeFilename": "README.markdown", "bugs": { "url": "https://github.com/substack/node-editor/issues" }, - "_id": "editor@0.0.5", - "_from": "editor@latest" + "_id": "editor@0.1.0", + "dist": { + "shasum": "542f4662c6a8c88e862fc11945e204e51981b9a1", + "tarball": "http://registry.npmjs.org/editor/-/editor-0.1.0.tgz" + }, + "_from": "editor@latest", + "_npmVersion": "1.3.21", + "_npmUser": { + "name": "substack", + "email": "mail@substack.net" + }, + "maintainers": [ + { + "name": "substack", + "email": "mail@substack.net" + } + ], + "_shasum": "542f4662c6a8c88e862fc11945e204e51981b9a1", + "_resolved": "https://registry.npmjs.org/editor/-/editor-0.1.0.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream/lib/file-writer.js nodejs-0.11.15/deps/npm/node_modules/fstream/lib/file-writer.js --- nodejs-0.11.13/deps/npm/node_modules/fstream/lib/file-writer.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream/lib/file-writer.js 2015-01-20 21:22:17.000000000 +0000 @@ -72,7 +72,11 @@ // allow 2 buffered writes, because otherwise there's just too // much stop and go bs. - return ret || (me._stream._queue && me._stream._queue.length <= 2) + if (ret === false && me._stream._queue) { + return me._stream._queue.length <= 2; + } else { + return ret; + } } FileWriter.prototype.end = function (c) { diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream/lib/reader.js nodejs-0.11.15/deps/npm/node_modules/fstream/lib/reader.js --- nodejs-0.11.13/deps/npm/node_modules/fstream/lib/reader.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream/lib/reader.js 2015-01-20 21:22:17.000000000 +0000 @@ -210,13 +210,15 @@ return } - if (me._paused) { + if (me._paused && me.type !== "Directory") { me.once("resume", go) return } var ev = events[e ++] - if (!ev) return me._read() + if (!ev) { + return me._read() + } me.emit(ev, props) go() })() diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream/lib/writer.js nodejs-0.11.15/deps/npm/node_modules/fstream/lib/writer.js --- nodejs-0.11.13/deps/npm/node_modules/fstream/lib/writer.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream/lib/writer.js 2015-01-20 21:22:17.000000000 +0000 @@ -246,6 +246,9 @@ Writer.prototype._finish = function () { var me = this + if (me._finishing) return + me._finishing = true + // console.error(" W Finish", me._path, me.size) // set up all the things. diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream/package.json nodejs-0.11.15/deps/npm/node_modules/fstream/package.json --- nodejs-0.11.13/deps/npm/node_modules/fstream/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "fstream", "description": "Advanced file system stream things", - "version": "0.1.25", + "version": "1.0.2", "repository": { "type": "git", "url": "git://github.com/isaacs/fstream.git" @@ -16,10 +16,10 @@ "node": ">=0.6" }, "dependencies": { - "rimraf": "2", - "mkdirp": "0.3", - "graceful-fs": "~2.0.0", - "inherits": "~2.0.0" + "graceful-fs": "3", + "inherits": "~2.0.0", + "mkdirp": ">=0.5 0", + "rimraf": "2" }, "devDependencies": { "tap": "" @@ -28,16 +28,30 @@ "test": "tap examples/*.js" }, "license": "BSD", - "readme": "Like FS streams, but with stat on them, and supporting directories and\nsymbolic links, as well as normal files. Also, you can use this to set\nthe stats on a file, even if you don't change its contents, or to create\na symlink, etc.\n\nSo, for example, you can \"write\" a directory, and it'll call `mkdir`. You\ncan specify a uid and gid, and it'll call `chown`. You can specify a\n`mtime` and `atime`, and it'll call `utimes`. You can call it a symlink\nand provide a `linkpath` and it'll call `symlink`.\n\nNote that it won't automatically resolve symbolic links. So, if you\ncall `fstream.Reader('/some/symlink')` then you'll get an object\nthat stats and then ends immediately (since it has no data). To follow\nsymbolic links, do this: `fstream.Reader({path:'/some/symlink', follow:\ntrue })`.\n\nThere are various checks to make sure that the bytes emitted are the\nsame as the intended size, if the size is set.\n\n## Examples\n\n```javascript\nfstream\n .Writer({ path: \"path/to/file\"\n , mode: 0755\n , size: 6\n })\n .write(\"hello\\n\")\n .end()\n```\n\nThis will create the directories if they're missing, and then write\n`hello\\n` into the file, chmod it to 0755, and assert that 6 bytes have\nbeen written when it's done.\n\n```javascript\nfstream\n .Writer({ path: \"path/to/file\"\n , mode: 0755\n , size: 6\n , flags: \"a\"\n })\n .write(\"hello\\n\")\n .end()\n```\n\nYou can pass flags in, if you want to append to a file.\n\n```javascript\nfstream\n .Writer({ path: \"path/to/symlink\"\n , linkpath: \"./file\"\n , SymbolicLink: true\n , mode: \"0755\" // octal strings supported\n })\n .end()\n```\n\nIf isSymbolicLink is a function, it'll be called, and if it returns\ntrue, then it'll treat it as a symlink. If it's not a function, then\nany truish value will make a symlink, or you can set `type:\n'SymbolicLink'`, which does the same thing.\n\nNote that the linkpath is relative to the symbolic link location, not\nthe parent dir or cwd.\n\n```javascript\nfstream\n .Reader(\"path/to/dir\")\n .pipe(fstream.Writer(\"path/to/other/dir\"))\n```\n\nThis will do like `cp -Rp path/to/dir path/to/other/dir`. If the other\ndir exists and isn't a directory, then it'll emit an error. It'll also\nset the uid, gid, mode, etc. to be identical. In this way, it's more\nlike `rsync -a` than simply a copy.\n", - "readmeFilename": "README.md", + "gitHead": "b3b74e92ef4a91ae206fab90b7998c7cd2e4290d", "bugs": { "url": "https://github.com/isaacs/fstream/issues" }, "homepage": "https://github.com/isaacs/fstream", - "_id": "fstream@0.1.25", + "_id": "fstream@1.0.2", + "_shasum": "56930ff1b4d4d7b1a689c8656b3a11e744ab92c6", + "_from": "fstream@1.0.2", + "_npmVersion": "1.4.23", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], "dist": { - "shasum": "deef2db7c7898357c2b37202212a9e5b36abc732" + "shasum": "56930ff1b4d4d7b1a689c8656b3a11e744ab92c6", + "tarball": "http://registry.npmjs.org/fstream/-/fstream-1.0.2.tgz" }, - "_from": "fstream@0.1.25", - "_resolved": "https://registry.npmjs.org/fstream/-/fstream-0.1.25.tgz" + "directories": {}, + "_resolved": "https://registry.npmjs.org/fstream/-/fstream-1.0.2.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream-npm/fstream-npm.js nodejs-0.11.15/deps/npm/node_modules/fstream-npm/fstream-npm.js --- nodejs-0.11.13/deps/npm/node_modules/fstream-npm/fstream-npm.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream-npm/fstream-npm.js 2015-01-20 21:22:17.000000000 +0000 @@ -93,6 +93,12 @@ // readme files should never be ignored. if (entry.match(/^readme(\.[^\.]*)$/i)) return true + // license files should never be ignored. + if (entry.match(/^(license|licence)(\.[^\.]*)?$/i)) return true + + // changelogs should never be ignored. + if (entry.match(/^(changes|changelog|history)(\.[^\.]*)?$/i)) return true + // special rules. see below. if (entry === "node_modules" && this.packageRoot) return true diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream-npm/LICENCE nodejs-0.11.15/deps/npm/node_modules/fstream-npm/LICENCE --- nodejs-0.11.13/deps/npm/node_modules/fstream-npm/LICENCE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream-npm/LICENCE 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) Isaac Z. Schlueter -All rights reserved. - -The BSD License - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS -``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream-npm/LICENSE nodejs-0.11.15/deps/npm/node_modules/fstream-npm/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/fstream-npm/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream-npm/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/ignore.js nodejs-0.11.15/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/ignore.js --- nodejs-0.11.13/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/ignore.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/ignore.js 2015-01-20 21:22:17.000000000 +0000 @@ -85,18 +85,18 @@ this.pause() - var then = function then (er) { + var then = function (er) { if (errState) return if (er) return this.emit("error", errState = er) if (-- count === 0) { this.filterEntries() this.resume() + } else { + this.addIgnoreFile(newIg[newIg.length - count], then) } }.bind(this) - newIg.forEach(function (ig) { - this.addIgnoreFile(ig, then) - }, this) + this.addIgnoreFile(newIg[0], then) } diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/LICENSE nodejs-0.11.15/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -1,27 +1,15 @@ -Copyright (c) Isaac Z. Schlueter ("Author") -All rights reserved. +The ISC License -The BSD License +Copyright (c) Isaac Z. Schlueter and Contributors -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/package.json nodejs-0.11.15/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/package.json --- nodejs-0.11.13/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "fstream-ignore", "description": "A thing for ignoring files based on globs", - "version": "0.0.7", + "version": "1.0.1", "repository": { "type": "git", "url": "git://github.com/isaacs/fstream-ignore.git" @@ -16,25 +16,40 @@ "test": "tap test/*.js" }, "dependencies": { - "minimatch": "~0.2.0", - "fstream": "~0.1.17", - "inherits": "2" + "fstream": "^1.0.0", + "inherits": "2", + "minimatch": "^1.0.0" }, "devDependencies": { "tap": "", "rimraf": "", "mkdirp": "" }, - "license": "BSD", - "readme": "# fstream-ignore\n\nA fstream DirReader that filters out files that match globs in `.ignore`\nfiles throughout the tree, like how git ignores files based on a\n`.gitignore` file.\n\nHere's an example:\n\n```javascript\nvar Ignore = require(\"fstream-ignore\")\nIgnore({ path: __dirname\n , ignoreFiles: [\".ignore\", \".gitignore\"]\n })\n .on(\"child\", function (c) {\n console.error(c.path.substr(c.root.path.length + 1))\n })\n .pipe(tar.Pack())\n .pipe(fs.createWriteStream(\"foo.tar\"))\n```\n\nThis will tar up the files in __dirname into `foo.tar`, ignoring\nanything matched by the globs in any .iginore or .gitignore file.\n", - "readmeFilename": "README.md", + "license": "ISC", + "gitHead": "290f2b621fa4f8fe3eec97307d22527fa2065375", "bugs": { "url": "https://github.com/isaacs/fstream-ignore/issues" }, - "_id": "fstream-ignore@0.0.7", + "homepage": "https://github.com/isaacs/fstream-ignore", + "_id": "fstream-ignore@1.0.1", + "_shasum": "153df36c4fa2cb006fb915dc71ac9d75f6a17c82", + "_from": "fstream-ignore@>=1.0.0 <2.0.0", + "_npmVersion": "1.4.22", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], "dist": { - "shasum": "eea3033f0c3728139de7b57ab1b0d6d89c353c63" + "shasum": "153df36c4fa2cb006fb915dc71ac9d75f6a17c82", + "tarball": "http://registry.npmjs.org/fstream-ignore/-/fstream-ignore-1.0.1.tgz" }, - "_from": "fstream-ignore@~0.0.5", - "_resolved": "https://registry.npmjs.org/fstream-ignore/-/fstream-ignore-0.0.7.tgz" + "directories": {}, + "_resolved": "https://registry.npmjs.org/fstream-ignore/-/fstream-ignore-1.0.1.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/test/read-file-order.js nodejs-0.11.15/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/test/read-file-order.js --- nodejs-0.11.13/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/test/read-file-order.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream-npm/node_modules/fstream-ignore/test/read-file-order.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,81 @@ +var IgnoreFile = require("../") +, fs = require('fs') + +// set the ignores just for this test +var c = require("./common.js") +c.ignores({ ".gitignore": ["a/b/c/abc"] }) +c.ignores({ ".ignore": ["*", "!a/b/c/abc"] }) + +// the only files we expect to see +var expected = + [ "/a" + , "/a/b" + , "/a/b/c" + , "/a/b/c/abc" ] + +var originalReadFile = fs.readFile +, parallelCount = 0 +, firstCall + +// Overwrite fs.readFile so that when .gitignore and .ignore are read in +// parallel, .ignore will always be read first. +fs.readFile = function (filename, options, callback) { + if (typeof options === 'function') { + callback = options + options = false + } + + parallelCount++ + + process.nextTick(function () { + if (parallelCount > 1) { + if (!firstCall) { + return firstCall = function (cb) { + originalReadFile(filename, options, function (err, data) { + callback(err, data) + if (cb) cb() + }) + } + } + + if (filename.indexOf('.gitignore') !== -1) { + firstCall(function () { + originalReadFile(filename, options, callback) + }) + } else { + originalReadFile(filename, options, function (err, data) { + callback(err, data) + firstCall() + }) + } + } else { + originalReadFile(filename, options, callback) + parallelCount = 0 + } + }) +} + +require("tap").test("read file order", function (t) { + t.pass("start") + + IgnoreFile({ path: __dirname + "/fixtures" + , ignoreFiles: [".gitignore", ".ignore"] }) + .on("ignoreFile", function (e) { + console.error("ignore file!", e) + }) + .on("child", function (e) { + var p = e.path.substr(e.root.path.length) + var i = expected.indexOf(p) + if (i === -1) { + t.fail("unexpected file found", {f: p}) + } else { + t.pass(p) + expected.splice(i, 1) + } + }) + .on("close", function () { + fs.readFile = originalReadFile + t.notOk(expected.length, "all expected files should be seen") + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/fstream-npm/package.json nodejs-0.11.15/deps/npm/node_modules/fstream-npm/package.json --- nodejs-0.11.13/deps/npm/node_modules/fstream-npm/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fstream-npm/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,22 +6,42 @@ }, "name": "fstream-npm", "description": "fstream class for creating npm packages", - "version": "0.1.6", + "version": "1.0.1", "repository": { "type": "git", "url": "git://github.com/isaacs/fstream-npm.git" }, "main": "./fstream-npm.js", "dependencies": { - "fstream-ignore": "~0.0.5", + "fstream-ignore": "^1.0.0", "inherits": "2" }, - "license": "BSD", - "readme": "# fstream-npm\n\nThis is an fstream DirReader class that will read a directory and filter\nthings according to the semantics of what goes in an npm package.\n\nFor example:\n\n```javascript\n// This will print out all the files that would be included\n// by 'npm publish' or 'npm install' of this directory.\n\nvar FN = require(\"fstream-npm\")\nFN({ path: \"./\" })\n .on(\"child\", function (e) {\n console.error(e.path.substr(e.root.path.length + 1))\n })\n```\n\n", - "readmeFilename": "README.md", + "license": "ISC", + "gitHead": "4a95e1903f93dc122320349bb55e367ddd08ad6b", "bugs": { "url": "https://github.com/isaacs/fstream-npm/issues" }, - "_id": "fstream-npm@0.1.6", - "_from": "fstream-npm@latest" + "homepage": "https://github.com/isaacs/fstream-npm", + "_id": "fstream-npm@1.0.1", + "scripts": {}, + "_shasum": "1e35c77f0fa24f5d6367e6d447ae7d6ddb482db2", + "_from": "fstream-npm@>=1.0.1 <1.1.0", + "_npmVersion": "2.1.3", + "_nodeVersion": "0.10.31", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "1e35c77f0fa24f5d6367e6d447ae7d6ddb482db2", + "tarball": "http://registry.npmjs.org/fstream-npm/-/fstream-npm-1.0.1.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/fstream-npm/-/fstream-npm-1.0.1.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/.eslintrc nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/.eslintrc --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/.eslintrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/.eslintrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "env" : { + "node" : true + }, + "rules" : { + "curly" : 0, + "no-lonely-if" : 1, + "no-mixed-requires" : 0, + "no-underscore-dangle" : 0, + "no-unused-vars" : [2, {"vars" : "all", "args" : "after-used"}], + "no-use-before-define" : [2, "nofunc"], + "quotes" : [1, "double", "avoid-escape"], + "semi" : [2, "never"], + "space-after-keywords" : 1, + "space-infix-ops" : 0, + "strict" : 0 + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/.npmignore nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +node_modules diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/package.json nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/package.json --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,42 @@ +{ + "name": "fs-vacuum", + "version": "1.2.1", + "description": "recursively remove empty directories -- to a point", + "main": "vacuum.js", + "scripts": { + "test": "tap test/*.js" + }, + "repository": { + "type": "git", + "url": "https://github.com/npm/fs-vacuum.git" + }, + "keywords": [ + "rm", + "rimraf", + "clean" + ], + "author": { + "name": "Forrest L Norvell", + "email": "ogd@aoaioxxysz.net" + }, + "license": "ISC", + "bugs": { + "url": "https://github.com/npm/fs-vacuum/issues" + }, + "homepage": "https://github.com/npm/fs-vacuum", + "devDependencies": { + "mkdirp": "^0.5.0", + "tap": "^0.4.11", + "tmp": "0.0.23" + }, + "dependencies": { + "graceful-fs": "^3.0.2", + "rimraf": "^2.2.8" + }, + "readme": "# fs-vacuum\n\nRemove the empty branches of a directory tree, optionally up to (but not\nincluding) a specified base directory. Optionally nukes the leaf directory.\n\n## Usage\n\n```javascript\nvar logger = require(\"npmlog\");\nvar vacuum = require(\"fs-vacuum\");\n\nvar options = {\n base : \"/path/to/my/tree/root\",\n purge : true,\n log : logger.silly.bind(logger, \"myCleanup\")\n};\n\n/* Assuming there are no other files or directories in \"out\", \"to\", or \"my\",\n * the final path will just be \"/path/to/my/tree/root\".\n */\nvacuum(\"/path/to/my/tree/root/out/to/my/files\", function (error) {\n if (error) console.error(\"Unable to cleanly vacuum:\", error.message);\n});\n```\n# vacuum(directory, options, callback)\n\n* `directory` {String} Leaf node to remove. **Must be a directory, symlink, or file.**\n* `options` {Object}\n * `base` {String} No directories at or above this level of the filesystem will be removed.\n * `purge` {Boolean} If set, nuke the whole leaf directory, including its contents.\n * `log` {Function} A logging function that takes `npmlog`-compatible argument lists.\n* `callback` {Function} Function to call once vacuuming is complete.\n * `error` {Error} What went wrong along the way, if anything.\n", + "readmeFilename": "README.md", + "gitHead": "bad24b21c45d86b3da991f2c3d058ef03546d83e", + "_id": "fs-vacuum@1.2.1", + "_shasum": "1bc3c62da30d6272569b8b9089c9811abb0a600b", + "_from": "fs-vacuum@>=1.2.1-0 <1.3.0-0" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/README.md nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/README.md --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +# fs-vacuum + +Remove the empty branches of a directory tree, optionally up to (but not +including) a specified base directory. Optionally nukes the leaf directory. + +## Usage + +```javascript +var logger = require("npmlog"); +var vacuum = require("fs-vacuum"); + +var options = { + base : "/path/to/my/tree/root", + purge : true, + log : logger.silly.bind(logger, "myCleanup") +}; + +/* Assuming there are no other files or directories in "out", "to", or "my", + * the final path will just be "/path/to/my/tree/root". + */ +vacuum("/path/to/my/tree/root/out/to/my/files", function (error) { + if (error) console.error("Unable to cleanly vacuum:", error.message); +}); +``` +# vacuum(directory, options, callback) + +* `directory` {String} Leaf node to remove. **Must be a directory, symlink, or file.** +* `options` {Object} + * `base` {String} No directories at or above this level of the filesystem will be removed. + * `purge` {Boolean} If set, nuke the whole leaf directory, including its contents. + * `log` {Function} A logging function that takes `npmlog`-compatible argument lists. +* `callback` {Function} Function to call once vacuuming is complete. + * `error` {Error} What went wrong along the way, if anything. diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/arguments.js nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/arguments.js --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/arguments.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/arguments.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,24 @@ +var test = require("tap").test + +var vacuum = require("../vacuum.js") + +test("vacuum throws on missing parameters", function (t) { + t.throws(vacuum, "called with no parameters") + t.throws(function () { vacuum("directory", {}) }, "called with no callback") + + t.end() +}) + +test('vacuum throws on incorrect types ("Forrest is pedantic" section)', function (t) { + t.throws(function () { + vacuum({}, {}, function () {}) + }, "called with path parameter of incorrect type") + t.throws(function () { + vacuum("directory", "directory", function () {}) + }, "called with options of wrong type") + t.throws(function () { + vacuum("directory", {}, "whoops") + }, "called with callback that isn't callable") + + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/base-leaf-mismatch.js nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/base-leaf-mismatch.js --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/base-leaf-mismatch.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/base-leaf-mismatch.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ +var test = require("tap").test + +var vacuum = require("../vacuum.js") + +test("vacuum errors when base is set and path is not under it", function (t) { + vacuum("/a/made/up/path", {base : "/root/elsewhere"}, function (er) { + t.ok(er, "got an error") + t.equal( + er.message, + "/a/made/up/path is not a child of /root/elsewhere", + "got the expected error message" + ) + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/no-entries-file-no-purge.js nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/no-entries-file-no-purge.js --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/no-entries-file-no-purge.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/no-entries-file-no-purge.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,78 @@ +var path = require("path") + +var test = require("tap").test +var statSync = require("graceful-fs").statSync +var writeFile = require("graceful-fs").writeFile +var readdirSync = require("graceful-fs").readdirSync +var mkdtemp = require("tmp").dir +var mkdirp = require("mkdirp") + +var vacuum = require("../vacuum.js") + +// CONSTANTS +var TEMP_OPTIONS = { + unsafeCleanup : true, + mode : "0700" +} +var SHORT_PATH = path.join("i", "am", "a", "path") +var PARTIAL_PATH = path.join(SHORT_PATH, "that", "ends", "at", "a") +var FULL_PATH = path.join(PARTIAL_PATH, "file") + +var messages = [] +function log() { messages.push(Array.prototype.slice.call(arguments).join(" ")) } + +var testBase, partialPath, fullPath +test("xXx setup xXx", function (t) { + mkdtemp(TEMP_OPTIONS, function (er, tmpdir) { + t.ifError(er, "temp directory exists") + + testBase = path.resolve(tmpdir, SHORT_PATH) + partialPath = path.resolve(tmpdir, PARTIAL_PATH) + fullPath = path.resolve(tmpdir, FULL_PATH) + + mkdirp(partialPath, function (er) { + t.ifError(er, "made test path") + + writeFile(fullPath, new Buffer("hi"), function (er) { + t.ifError(er, "made file") + + t.end() + }) + }) + }) +}) + +test("remove up to a point", function (t) { + vacuum(fullPath, {purge : false, base : testBase, log : log}, function (er) { + t.ifError(er, "cleaned up to base") + + t.equal(messages.length, 6, "got 5 removal & 1 finish message") + t.equal(messages[5], "finished vacuuming up to " + testBase) + + var stat + var verifyPath = fullPath + + function verify() { stat = statSync(verifyPath) } + + // handle the file separately + t.throws(verify, verifyPath + " cannot be statted") + t.notOk(stat && stat.isFile(), verifyPath + " is totally gone") + verifyPath = path.dirname(verifyPath) + + for (var i = 0; i < 4; i++) { + t.throws(verify, verifyPath + " cannot be statted") + t.notOk(stat && stat.isDirectory(), verifyPath + " is totally gone") + verifyPath = path.dirname(verifyPath) + } + + t.doesNotThrow(function () { + stat = statSync(testBase) + }, testBase + " can be statted") + t.ok(stat && stat.isDirectory(), testBase + " is still a directory") + + var files = readdirSync(testBase) + t.equal(files.length, 0, "nothing left in base directory") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/no-entries-link-no-purge.js nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/no-entries-link-no-purge.js --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/no-entries-link-no-purge.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/no-entries-link-no-purge.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,78 @@ +var path = require("path") + +var test = require("tap").test +var statSync = require("graceful-fs").statSync +var symlinkSync = require("graceful-fs").symlinkSync +var readdirSync = require("graceful-fs").readdirSync +var mkdtemp = require("tmp").dir +var mkdirp = require("mkdirp") + +var vacuum = require("../vacuum.js") + +// CONSTANTS +var TEMP_OPTIONS = { + unsafeCleanup : true, + mode : "0700" +} +var SHORT_PATH = path.join("i", "am", "a", "path") +var TARGET_PATH = path.join("target-link", "in", "the", "middle") +var PARTIAL_PATH = path.join(SHORT_PATH, "with", "a") +var FULL_PATH = path.join(PARTIAL_PATH, "link") +var EXPANDO_PATH = path.join(SHORT_PATH, "with", "a", "link", "in", "the", "middle") + +var messages = [] +function log() { messages.push(Array.prototype.slice.call(arguments).join(" ")) } + +var testBase, targetPath, partialPath, fullPath, expandoPath +test("xXx setup xXx", function (t) { + mkdtemp(TEMP_OPTIONS, function (er, tmpdir) { + t.ifError(er, "temp directory exists") + + testBase = path.resolve(tmpdir, SHORT_PATH) + targetPath = path.resolve(tmpdir, TARGET_PATH) + partialPath = path.resolve(tmpdir, PARTIAL_PATH) + fullPath = path.resolve(tmpdir, FULL_PATH) + expandoPath = path.resolve(tmpdir, EXPANDO_PATH) + + mkdirp(partialPath, function (er) { + t.ifError(er, "made test path") + + mkdirp(targetPath, function (er) { + t.ifError(er, "made target path") + + symlinkSync(path.join(tmpdir, "target-link"), fullPath) + + t.end() + }) + }) + }) +}) + +test("remove up to a point", function (t) { + vacuum(expandoPath, {purge : false, base : testBase, log : log}, function (er) { + t.ifError(er, "cleaned up to base") + + t.equal(messages.length, 7, "got 6 removal & 1 finish message") + t.equal(messages[6], "finished vacuuming up to " + testBase) + + var stat + var verifyPath = expandoPath + function verify() { stat = statSync(verifyPath) } + + for (var i = 0; i < 6; i++) { + t.throws(verify, verifyPath + " cannot be statted") + t.notOk(stat && stat.isDirectory(), verifyPath + " is totally gone") + verifyPath = path.dirname(verifyPath) + } + + t.doesNotThrow(function () { + stat = statSync(testBase) + }, testBase + " can be statted") + t.ok(stat && stat.isDirectory(), testBase + " is still a directory") + + var files = readdirSync(testBase) + t.equal(files.length, 0, "nothing left in base directory") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/no-entries-no-purge.js nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/no-entries-no-purge.js --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/no-entries-no-purge.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/no-entries-no-purge.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,61 @@ +var path = require("path") + +var test = require("tap").test +var statSync = require("graceful-fs").statSync +var mkdtemp = require("tmp").dir +var mkdirp = require("mkdirp") + +var vacuum = require("../vacuum.js") + +// CONSTANTS +var TEMP_OPTIONS = { + unsafeCleanup : true, + mode : "0700" +} +var SHORT_PATH = path.join("i", "am", "a", "path") +var LONG_PATH = path.join(SHORT_PATH, "of", "a", "certain", "length") + +var messages = [] +function log() { messages.push(Array.prototype.slice.call(arguments).join(" ")) } + +var testPath, testBase +test("xXx setup xXx", function (t) { + mkdtemp(TEMP_OPTIONS, function (er, tmpdir) { + t.ifError(er, "temp directory exists") + + testBase = path.resolve(tmpdir, SHORT_PATH) + testPath = path.resolve(tmpdir, LONG_PATH) + + mkdirp(testPath, function (er) { + t.ifError(er, "made test path") + + t.end() + }) + }) +}) + +test("remove up to a point", function (t) { + vacuum(testPath, {purge : false, base : testBase, log : log}, function (er) { + t.ifError(er, "cleaned up to base") + + t.equal(messages.length, 5, "got 4 removal & 1 finish message") + t.equal(messages[4], "finished vacuuming up to " + testBase) + + var stat + var verifyPath = testPath + function verify() { stat = statSync(verifyPath) } + + for (var i = 0; i < 4; i++) { + t.throws(verify, verifyPath + " cannot be statted") + t.notOk(stat && stat.isDirectory(), verifyPath + " is totally gone") + verifyPath = path.dirname(verifyPath) + } + + t.doesNotThrow(function () { + stat = statSync(testBase) + }, testBase + " can be statted") + t.ok(stat && stat.isDirectory(), testBase + " is still a directory") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/no-entries-with-link-purge.js nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/no-entries-with-link-purge.js --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/no-entries-with-link-purge.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/no-entries-with-link-purge.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,78 @@ +var path = require("path") + +var test = require("tap").test +var statSync = require("graceful-fs").statSync +var writeFileSync = require("graceful-fs").writeFileSync +var symlinkSync = require("graceful-fs").symlinkSync +var mkdtemp = require("tmp").dir +var mkdirp = require("mkdirp") + +var vacuum = require("../vacuum.js") + +// CONSTANTS +var TEMP_OPTIONS = { + unsafeCleanup : true, + mode : "0700" +} +var SHORT_PATH = path.join("i", "am", "a", "path") +var TARGET_PATH = "link-target" +var FIRST_FILE = path.join(TARGET_PATH, "monsieurs") +var SECOND_FILE = path.join(TARGET_PATH, "mesdames") +var PARTIAL_PATH = path.join(SHORT_PATH, "with", "a", "definite") +var FULL_PATH = path.join(PARTIAL_PATH, "target") + +var messages = [] +function log() { messages.push(Array.prototype.slice.call(arguments).join(" ")) } + +var testBase, partialPath, fullPath, targetPath +test("xXx setup xXx", function (t) { + mkdtemp(TEMP_OPTIONS, function (er, tmpdir) { + t.ifError(er, "temp directory exists") + + testBase = path.resolve(tmpdir, SHORT_PATH) + targetPath = path.resolve(tmpdir, TARGET_PATH) + partialPath = path.resolve(tmpdir, PARTIAL_PATH) + fullPath = path.resolve(tmpdir, FULL_PATH) + + mkdirp(partialPath, function (er) { + t.ifError(er, "made test path") + + mkdirp(targetPath, function (er) { + t.ifError(er, "made target path") + + writeFileSync(path.resolve(tmpdir, FIRST_FILE), new Buffer("c'est vraiment joli")) + writeFileSync(path.resolve(tmpdir, SECOND_FILE), new Buffer("oui oui")) + symlinkSync(targetPath, fullPath) + + t.end() + }) + }) + }) +}) + +test("remove up to a point", function (t) { + vacuum(fullPath, {purge : true, base : testBase, log : log}, function (er) { + t.ifError(er, "cleaned up to base") + + t.equal(messages.length, 5, "got 4 removal & 1 finish message") + t.equal(messages[0], "purging " + fullPath) + t.equal(messages[4], "finished vacuuming up to " + testBase) + + var stat + var verifyPath = fullPath + function verify() { stat = statSync(verifyPath) } + + for (var i = 0; i < 4; i++) { + t.throws(verify, verifyPath + " cannot be statted") + t.notOk(stat && stat.isDirectory(), verifyPath + " is totally gone") + verifyPath = path.dirname(verifyPath) + } + + t.doesNotThrow(function () { + stat = statSync(testBase) + }, testBase + " can be statted") + t.ok(stat && stat.isDirectory(), testBase + " is still a directory") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/no-entries-with-purge.js nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/no-entries-with-purge.js --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/no-entries-with-purge.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/no-entries-with-purge.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,67 @@ +var path = require("path") + +var test = require("tap").test +var statSync = require("graceful-fs").statSync +var writeFileSync = require("graceful-fs").writeFileSync +var mkdtemp = require("tmp").dir +var mkdirp = require("mkdirp") + +var vacuum = require("../vacuum.js") + +// CONSTANTS +var TEMP_OPTIONS = { + unsafeCleanup : true, + mode : "0700" +} +var SHORT_PATH = path.join("i", "am", "a", "path") +var LONG_PATH = path.join(SHORT_PATH, "of", "a", "certain", "kind") +var FIRST_FILE = path.join(LONG_PATH, "monsieurs") +var SECOND_FILE = path.join(LONG_PATH, "mesdames") + +var messages = [] +function log() { messages.push(Array.prototype.slice.call(arguments).join(" ")) } + +var testPath, testBase +test("xXx setup xXx", function (t) { + mkdtemp(TEMP_OPTIONS, function (er, tmpdir) { + t.ifError(er, "temp directory exists") + + testBase = path.resolve(tmpdir, SHORT_PATH) + testPath = path.resolve(tmpdir, LONG_PATH) + + mkdirp(testPath, function (er) { + t.ifError(er, "made test path") + + writeFileSync(path.resolve(tmpdir, FIRST_FILE), new Buffer("c'est vraiment joli")) + writeFileSync(path.resolve(tmpdir, SECOND_FILE), new Buffer("oui oui")) + t.end() + }) + }) +}) + +test("remove up to a point", function (t) { + vacuum(testPath, {purge : true, base : testBase, log : log}, function (er) { + t.ifError(er, "cleaned up to base") + + t.equal(messages.length, 5, "got 4 removal & 1 finish message") + t.equal(messages[0], "purging " + testPath) + t.equal(messages[4], "finished vacuuming up to " + testBase) + + var stat + var verifyPath = testPath + function verify() { stat = statSync(verifyPath) } + + for (var i = 0; i < 4; i++) { + t.throws(verify, verifyPath + " cannot be statted") + t.notOk(stat && stat.isDirectory(), verifyPath + " is totally gone") + verifyPath = path.dirname(verifyPath) + } + + t.doesNotThrow(function () { + stat = statSync(testBase) + }, testBase + " can be statted") + t.ok(stat && stat.isDirectory(), testBase + " is still a directory") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/other-directories-no-purge.js nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/other-directories-no-purge.js --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/test/other-directories-no-purge.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/test/other-directories-no-purge.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,76 @@ +var path = require("path") + +var test = require("tap").test +var statSync = require("graceful-fs").statSync +var mkdtemp = require("tmp").dir +var mkdirp = require("mkdirp") + +var vacuum = require("../vacuum.js") + +// CONSTANTS +var TEMP_OPTIONS = { + unsafeCleanup : true, + mode : "0700" +} +var SHORT_PATH = path.join("i", "am", "a", "path") +var REMOVE_PATH = path.join(SHORT_PATH, "of", "a", "certain", "length") +var OTHER_PATH = path.join(SHORT_PATH, "of", "no", "qualities") + +var messages = [] +function log() { messages.push(Array.prototype.slice.call(arguments).join(" ")) } + +var testBase, testPath, otherPath +test("xXx setup xXx", function (t) { + mkdtemp(TEMP_OPTIONS, function (er, tmpdir) { + t.ifError(er, "temp directory exists") + + testBase = path.resolve(tmpdir, SHORT_PATH) + testPath = path.resolve(tmpdir, REMOVE_PATH) + otherPath = path.resolve(tmpdir, OTHER_PATH) + + mkdirp(testPath, function (er) { + t.ifError(er, "made test path") + + mkdirp(otherPath, function (er) { + t.ifError(er, "made other path") + + t.end() + }) + }) + }) +}) + +test("remove up to a point", function (t) { + vacuum(testPath, {purge : false, base : testBase, log : log}, function (er) { + t.ifError(er, "cleaned up to base") + + t.equal(messages.length, 4, "got 3 removal & 1 finish message") + t.equal( + messages[3], "quitting because other entries in " + testBase + "/of", + "got expected final message" + ) + + var stat + var verifyPath = testPath + function verify() { stat = statSync(verifyPath) } + + for (var i = 0; i < 3; i++) { + t.throws(verify, verifyPath + " cannot be statted") + t.notOk(stat && stat.isDirectory(), verifyPath + " is totally gone") + verifyPath = path.dirname(verifyPath) + } + + t.doesNotThrow(function () { + stat = statSync(otherPath) + }, otherPath + " can be statted") + t.ok(stat && stat.isDirectory(), otherPath + " is still a directory") + + var intersection = path.join(testBase, "of") + t.doesNotThrow(function () { + stat = statSync(intersection) + }, intersection + " can be statted") + t.ok(stat && stat.isDirectory(), intersection + " is still a directory") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/vacuum.js nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/vacuum.js --- nodejs-0.11.13/deps/npm/node_modules/fs-vacuum/vacuum.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-vacuum/vacuum.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,104 @@ +var assert = require("assert") +var dirname = require("path").dirname +var resolve = require("path").resolve + +var rimraf = require("rimraf") +var lstat = require("graceful-fs").lstat +var readdir = require("graceful-fs").readdir +var rmdir = require("graceful-fs").rmdir +var unlink = require("graceful-fs").unlink + +module.exports = vacuum + +function vacuum(leaf, options, cb) { + assert(typeof leaf === "string", "must pass in path to remove") + assert(typeof cb === "function", "must pass in callback") + + if (!options) options = {} + assert(typeof options === "object", "options must be an object") + + var log = options.log ? options.log : function () {} + + var base = options.base + if (base && resolve(leaf).indexOf(resolve(base)) !== 0) { + return cb(new Error(resolve(leaf) + " is not a child of " + resolve(base))) + } + + lstat(leaf, function (error, stat) { + if (error) { + if (error.code === "ENOENT") return cb(null) + + log(error.stack) + return cb(error) + } + + if (!(stat && (stat.isDirectory() || stat.isSymbolicLink() || stat.isFile()))) { + log(leaf, "is not a directory, file, or link") + return cb(new Error(leaf + " is not a directory, file, or link")) + } + + if (options.purge) { + log("purging", leaf) + rimraf(leaf, function (error) { + if (error) return cb(error) + + next(dirname(leaf)) + }) + } + else if (!stat.isDirectory()) { + log("removing", leaf) + unlink(leaf, function (error) { + if (error) return cb(error) + + next(dirname(leaf)) + }) + } + else { + next(leaf) + } + }) + + function next(branch) { + // either we've reached the base or we've reached the root + if ((base && resolve(branch) === resolve(base)) || branch === dirname(branch)) { + log("finished vacuuming up to", branch) + return cb(null) + } + + readdir(branch, function (error, files) { + if (error) { + if (error.code === "ENOENT") return cb(null) + + log("unable to check directory", branch, "due to", error.message) + return cb(error) + } + + if (files.length > 0) { + log("quitting because other entries in", branch) + return cb(null) + } + + log("removing", branch) + lstat(branch, function (error, stat) { + if (error) { + if (error.code === "ENOENT") return cb(null) + + log("unable to lstat", branch, "due to", error.message) + return cb(error) + } + + var remove = stat.isDirectory() ? rmdir : unlink + remove(branch, function (error) { + if (error) { + if (error.code === "ENOENT") return cb(null) + + log("unable to remove", branch, "due to", error.message) + return cb(error) + } + + next(dirname(branch)) + }) + }) + }) + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-write-stream-atomic/index.js nodejs-0.11.15/deps/npm/node_modules/fs-write-stream-atomic/index.js --- nodejs-0.11.13/deps/npm/node_modules/fs-write-stream-atomic/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-write-stream-atomic/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,96 @@ +var fs = require('graceful-fs') +var util = require('util') +var crypto = require('crypto') + +function md5hex () { + var hash = crypto.createHash('md5') + for (var ii=0; ii=1.0.2 <1.1.0", + "_npmVersion": "2.1.0", + "_nodeVersion": "0.10.31", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "fe0c6cec75256072b2fef8180d97e309fe3f5efb", + "tarball": "http://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.2.tgz" + }, + "_resolved": "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.2.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-write-stream-atomic/README.md nodejs-0.11.15/deps/npm/node_modules/fs-write-stream-atomic/README.md --- nodejs-0.11.13/deps/npm/node_modules/fs-write-stream-atomic/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-write-stream-atomic/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,35 @@ +# fs-write-stream-atomic + +Like `fs.createWriteStream(...)`, but atomic. + +Writes to a tmp file and does an atomic `fs.rename` to move it into +place when it's done. + +First rule of debugging: **It's always a race condition.** + +## USAGE + +```javascript +var fsWriteStreamAtomic = require('fs-write-stream-atomic') +// options are optional. +var write = fsWriteStreamAtomic('output.txt', options) +var read = fs.createReadStream('input.txt') +read.pipe(write) + +// When the write stream emits a 'finish' or 'close' event, +// you can be sure that it is moved into place, and contains +// all the bytes that were written to it, even if something else +// was writing to `output.txt` at the same time. +``` + +### `fsWriteStreamAtomic(filename, [options])` + +* `filename` {String} The file we want to write to +* `options` {Object} + * `chown` {Object} User and group to set ownership after write + * `uid` {Number} + * `gid` {Number} + * `encoding` {String} default = 'utf8' + * `mode` {Number} default = `0666` + * `flags` {String} default = `'w'` + diff -Nru nodejs-0.11.13/deps/npm/node_modules/fs-write-stream-atomic/test/basic.js nodejs-0.11.15/deps/npm/node_modules/fs-write-stream-atomic/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/fs-write-stream-atomic/test/basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/fs-write-stream-atomic/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,89 @@ +var test = require('tap').test +var writeStream = require('../index.js') +var fs = require('fs') +var path = require('path') + +test('basic', function (t) { + // open 10 write streams to the same file. + // then write to each of them, and to the target + // and verify at the end that each of them does their thing + var target = path.resolve(__dirname, 'test.txt') + var n = 10 + + var streams = [] + for (var i = 0; i < n; i++) { + var s = writeStream(target) + s.on('finish', verifier('finish')) + s.on('close', verifier('close')) + streams.push(s) + } + + var verifierCalled = 0 + function verifier (ev) { return function () { + if (ev === 'close') + t.equal(this.__emittedFinish, true) + else { + this.__emittedFinish = true + t.equal(ev, 'finish') + } + + // make sure that one of the atomic streams won. + var res = fs.readFileSync(target, 'utf8') + var lines = res.trim().split(/\n/) + lines.forEach(function (line) { + var first = lines[0].match(/\d+$/)[0] + var cur = line.match(/\d+$/)[0] + t.equal(cur, first) + }) + + var resExpr = /^first write \d+\nsecond write \d+\nthird write \d+\nfinal write \d+\n$/ + t.similar(res, resExpr) + + // should be called once for each close, and each finish + if (++verifierCalled === n * 2) { + t.end() + } + }} + + // now write something to each stream. + streams.forEach(function (stream, i) { + stream.write('first write ' + i + '\n') + }) + + // wait a sec for those writes to go out. + setTimeout(function () { + // write something else to the target. + fs.writeFileSync(target, 'brutality!\n') + + // write some more stuff. + streams.forEach(function (stream, i) { + stream.write('second write ' + i + '\n') + }) + + setTimeout(function () { + // Oops! Deleted the file! + fs.unlinkSync(target) + + // write some more stuff. + streams.forEach(function (stream, i) { + stream.write('third write ' + i + '\n') + }) + + setTimeout(function () { + fs.writeFileSync(target, 'brutality TWO!\n') + streams.forEach(function (stream, i) { + stream.end('final write ' + i + '\n') + }) + }, 50) + }, 50) + }, 50) +}) + +test('cleanup', function (t) { + fs.readdirSync(__dirname).filter(function (f) { + return f.match(/^test.txt/) + }).forEach(function (file) { + fs.unlinkSync(path.resolve(__dirname, file)) + }) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/History.md nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/History.md --- nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/History.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/History.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ - -1.1.1 / 2013-04-23 -================== - - * package.json: Move test stuff to devDeps - -1.1.0 / 2013-04-19 -================== - - * Add support for gist urls diff -Nru nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/index.js nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/index.js --- nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,8 +1,9 @@ -var re = /^(?:https?:\/\/|git:\/\/)?(?:[^@]+@)?(gist.github.com|github.com)[:\/]([^\/]+\/[^\/]+?|[0-9]+)$/ - -module.exports = function(url){ +// convert git:// form url to github URL, e.g., +// git://github.com/bcoe/foo.git +// https://github.com/bcoe/foo. +function githubUrlFromGit(url, opts){ try { - var m = re.exec(url.replace(/\.git$/, '')); + var m = re(opts).exec(url.replace(/\.git(#.*)?$/, '')); var host = m[1]; var path = m[2]; return 'https://' + host + '/' + path; @@ -10,3 +11,22 @@ // ignore } }; + +// generate the git:// parsing regex +// with options, e.g., the ability +// to specify multiple GHE domains. +function re(opts) { + opts = opts || {}; + // whitelist of URLs that should be treated as GitHub repos. + var baseUrls = ['gist.github.com', 'github.com'].concat(opts.extraBaseUrls || []); + // build regex from whitelist. + return new RegExp( + /^(?:https?:\/\/|git:\/\/|git\+ssh:\/\/|git\+https:\/\/)?(?:[^@]+@)?/.source + + '(' + baseUrls.join('|') + ')' + + /[:\/]([^\/]+\/[^\/]+?|[0-9]+)$/.source + ); +} + +githubUrlFromGit.re = re(); + +module.exports = githubUrlFromGit; diff -Nru nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/LICENSE nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2013 TJ Holowaychuk + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/package.json nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/package.json --- nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,57 @@ { "name": "github-url-from-git", - "version": "1.1.1", + "version": "1.4.0", "description": "Parse a github git url and return the github repo url", "main": "index.js", "scripts": { "test": "mocha test.js --reporter spec --require should" }, - "repository": "", + "repository": { + "type": "git", + "url": "https://github.com/visionmedia/node-github-url-from-git.git" + }, "keywords": [ "github", "git", "url", "parser" ], - "author": "", + "author": { + "name": "TJ Holowaychuk" + }, "license": "MIT", "devDependencies": { "better-assert": "~1.0.0", "mocha": "~1.9.0", "should": "~1.2.2" }, - "readme": "\n# github-url-from-git\n\n```js\ndescribe('parse(url)', function(){\n it('should support git://*', function(){\n var url = 'git://github.com/jamesor/mongoose-versioner';\n parse(url).should.equal('https://github.com/jamesor/mongoose-versioner');\n })\n\n it('should support git://*.git', function(){\n var url = 'git://github.com/treygriffith/cellar.git';\n parse(url).should.equal('https://github.com/treygriffith/cellar');\n })\n\n it('should support https://*', function(){\n var url = 'https://github.com/Empeeric/i18n-node';\n parse(url).should.equal('https://github.com/Empeeric/i18n-node');\n })\n\n it('should support https://*.git', function(){\n var url = 'https://jpillora@github.com/banchee/tranquil.git';\n parse(url).should.equal('https://github.com/banchee/tranquil');\n })\n\n it('should return undefined on failure', function(){\n var url = 'git://github.com/justgord/.git';\n assert(null == parse(url));\n })\n\n it('should parse git@gist urls', function() {\n var url = 'git@gist.github.com:3135914.git';\n parse(url).should.equal('https://gist.github.com/3135914')\n })\n\n it('should parse https://gist urls', function() {\n var url = 'https://gist.github.com/3135914.git';\n parse(url).should.equal('https://gist.github.com/3135914')\n })\n})\n```\n", - "readmeFilename": "Readme.md", - "_id": "github-url-from-git@1.1.1", + "gitHead": "154df00b0b590c29be5d2a5822e7b2e160b75345", + "bugs": { + "url": "https://github.com/visionmedia/node-github-url-from-git/issues" + }, + "homepage": "https://github.com/visionmedia/node-github-url-from-git", + "_id": "github-url-from-git@1.4.0", + "_shasum": "285e6b520819001bde128674704379e4ff03e0de", + "_from": "github-url-from-git@>=1.4.0-0 <2.0.0-0", + "_npmVersion": "2.0.0-alpha.7", + "_npmUser": { + "name": "bcoe", + "email": "bencoe@gmail.com" + }, + "maintainers": [ + { + "name": "tjholowaychuk", + "email": "tj@vision-media.ca" + }, + { + "name": "bcoe", + "email": "bencoe@gmail.com" + } + ], "dist": { - "shasum": "a14903bccbd30c91ea41765ae68ba1b27a53c4d1" + "shasum": "285e6b520819001bde128674704379e4ff03e0de", + "tarball": "http://registry.npmjs.org/github-url-from-git/-/github-url-from-git-1.4.0.tgz" }, - "_from": "github-url-from-git@1.1.1", - "_resolved": "https://registry.npmjs.org/github-url-from-git/-/github-url-from-git-1.1.1.tgz" + "directories": {}, + "_resolved": "https://registry.npmjs.org/github-url-from-git/-/github-url-from-git-1.4.0.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/Readme.md nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/Readme.md --- nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/Readme.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/Readme.md 2015-01-20 21:22:17.000000000 +0000 @@ -28,6 +28,33 @@ assert(null == parse(url)); }) + it('should parse git@github.com:bcoe/thumbd.git', function() { + var url = 'git@github.com:bcoe/thumbd.git'; + parse(url).should.eql('https://github.com/bcoe/thumbd'); + }) + + it('should parse git@github.com:bcoe/thumbd.git#2.7.0', function() { + var url = 'git@github.com:bcoe/thumbd.git#2.7.0'; + parse(url).should.eql('https://github.com/bcoe/thumbd'); + }) + + it('should parse git+https://github.com/bcoe/thumbd.git', function() { + var url = 'git+https://github.com/bcoe/thumbd.git'; + parse(url).should.eql('https://github.com/bcoe/thumbd'); + }) + + it('should parse git+ssh://github.com/bcoe/thumbd.git', function() { + var url = 'git+ssh://github.com/bcoe/thumbd.git'; + parse(url).should.eql('https://github.com/bcoe/thumbd'); + }) + + it('should parse https://EastCloud@github.com/EastCloud/node-websockets.git', function() { + var url = 'https://EastCloud@github.com/EastCloud/node-websockets.git'; + parse(url).should.eql('https://github.com/EastCloud/node-websockets'); + }) + + // gist urls. + it('should parse git@gist urls', function() { var url = 'git@gist.github.com:3135914.git'; parse(url).should.equal('https://gist.github.com/3135914') @@ -37,5 +64,29 @@ var url = 'https://gist.github.com/3135914.git'; parse(url).should.equal('https://gist.github.com/3135914') }) + + // Handle arbitrary GitHub Enterprise domains. + + it('should parse parse extra GHE urls provided', function() { + var url = 'git://github.example.com/treygriffith/cellar.git'; + parse( + url, {extraBaseUrls: ['github.example.com']} + ).should.equal('https://github.example.com/treygriffith/cellar'); + }); + + it('should parse GHE urls with multiple subdomains', function() { + var url = 'git://github.internal.example.com/treygriffith/cellar.git'; + parse( + url, {extraBaseUrls: ['github.internal.example.com']} + ).should.equal('https://github.internal.example.com/treygriffith/cellar'); + }); +}) + +describe('re', function() { + it('should expose GitHub url parsing regex', function() { + parse.re.source.should.equal( + /^(?:https?:\/\/|git:\/\/)?(?:[^@]+@)?(gist.github.com|github.com)[:\/]([^\/]+\/[^\/]+?|[0-9]+)$/.source + ) + }); }) ``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/test.js nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/test.js --- nodejs-0.11.13/deps/npm/node_modules/github-url-from-git/test.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/github-url-from-git/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,3 @@ - var parse = require('./'); var assert = require('better-assert'); @@ -28,6 +27,33 @@ assert(null == parse(url)); }) + it('should parse git@github.com:bcoe/thumbd.git', function() { + var url = 'git@github.com:bcoe/thumbd.git'; + parse(url).should.eql('https://github.com/bcoe/thumbd'); + }) + + it('should parse git@github.com:bcoe/thumbd.git#2.7.0', function() { + var url = 'git@github.com:bcoe/thumbd.git#2.7.0'; + parse(url).should.eql('https://github.com/bcoe/thumbd'); + }) + + it('should parse git+https://github.com/bcoe/thumbd.git', function() { + var url = 'git+https://github.com/bcoe/thumbd.git'; + parse(url).should.eql('https://github.com/bcoe/thumbd'); + }) + + it('should parse git+ssh://github.com/bcoe/thumbd.git', function() { + var url = 'git+ssh://github.com/bcoe/thumbd.git'; + parse(url).should.eql('https://github.com/bcoe/thumbd'); + }) + + it('should parse https://EastCloud@github.com/EastCloud/node-websockets.git', function() { + var url = 'https://EastCloud@github.com/EastCloud/node-websockets.git'; + parse(url).should.eql('https://github.com/EastCloud/node-websockets'); + }) + + // gist urls. + it('should parse git@gist urls', function() { var url = 'git@gist.github.com:3135914.git'; parse(url).should.equal('https://gist.github.com/3135914') @@ -37,4 +63,28 @@ var url = 'https://gist.github.com/3135914.git'; parse(url).should.equal('https://gist.github.com/3135914') }) + + // Handle arbitrary GitHub Enterprise domains. + + it('should parse parse extra GHE urls provided', function() { + var url = 'git://github.example.com/treygriffith/cellar.git'; + parse( + url, {extraBaseUrls: ['github.example.com']} + ).should.equal('https://github.example.com/treygriffith/cellar'); + }); + + it('should parse GHE urls with multiple subdomains', function() { + var url = 'git://github.internal.example.com/treygriffith/cellar.git'; + parse( + url, {extraBaseUrls: ['github.internal.example.com']} + ).should.equal('https://github.internal.example.com/treygriffith/cellar'); + }); +}) + +describe('re', function() { + it('should expose GitHub url parsing regex', function() { + parse.re.source.should.equal( + /^(?:https?:\/\/|git:\/\/|git\+ssh:\/\/|git\+https:\/\/)?(?:[^@]+@)?(gist.github.com|github.com)[:\/]([^\/]+\/[^\/]+?|[0-9]+)$/.source + ) + }); }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/github-url-from-username-repo/index.js nodejs-0.11.15/deps/npm/node_modules/github-url-from-username-repo/index.js --- nodejs-0.11.13/deps/npm/node_modules/github-url-from-username-repo/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/github-url-from-username-repo/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,9 +1,21 @@ module.exports = getUrl -function getUrl (r) { +function getUrl (r, forBrowser) { if (!r) return null - if (/^[\w-]+\/[\w\.-]+$/.test(r)) - return "git://github.com/" + r - else - return null + // Regex taken from https://github.com/npm/npm-package-arg/commit/01dce583c64afae07b66a2a8a6033aeba871c3cd + // Note: This does not fully test the git ref format. + // See https://www.kernel.org/pub/software/scm/git/docs/git-check-ref-format.html + // + // The only way to do this properly would be to shell out to + // git-check-ref-format, and as this is a fast sync function, + // we don't want to do that. Just let git fail if it turns + // out that the commit-ish is invalid. + // GH usernames cannot start with . or - + if (/^[^@%\/\s\.-][^:@%\/\s]*\/[^@\s\/%]+(?:#.*)?$/.test(r)) { + if (forBrowser) + r = r.replace("#", "/tree/") + return "https://github.com/" + r + } + + return null } diff -Nru nodejs-0.11.13/deps/npm/node_modules/github-url-from-username-repo/package.json nodejs-0.11.15/deps/npm/node_modules/github-url-from-username-repo/package.json --- nodejs-0.11.13/deps/npm/node_modules/github-url-from-username-repo/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/github-url-from-username-repo/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "github-url-from-username-repo", - "version": "0.1.0", + "version": "1.0.2", "description": "Create urls from username/repo", "main": "index.js", "scripts": { @@ -26,13 +26,11 @@ "github", "repo" ], - "readme": "[![Build Status](https://travis-ci.org/robertkowalski/github-url-from-username-repo.png?branch=master)](https://travis-ci.org/robertkowalski/github-url-from-username-repo)\n[![Dependency Status](https://gemnasium.com/robertkowalski/github-url-from-username-repo.png)](https://gemnasium.com/robertkowalski/github-url-from-username-repo)\n\n\n# github-url-from-username-repo\n\n## Usage\n\n```javascript\n\nvar getUrl = require(\"github-url-from-username-repo\")\ngetUrl(\"visionmedia/express\") // git://github.com/visionmedia/express\n\n```", + "readme": "[![Build Status](https://travis-ci.org/robertkowalski/github-url-from-username-repo.png?branch=master)](https://travis-ci.org/robertkowalski/github-url-from-username-repo)\n[![Dependency Status](https://gemnasium.com/robertkowalski/github-url-from-username-repo.png)](https://gemnasium.com/robertkowalski/github-url-from-username-repo)\n\n\n# github-url-from-username-repo\n\n## API\n\n### getUrl(url, [forBrowser])\n\nGet's the url normalized for npm.\nIf `forBrowser` is true, return a GitHub url that is usable in a webbrowser.\n\n## Usage\n\n```javascript\n\nvar getUrl = require(\"github-url-from-username-repo\")\ngetUrl(\"visionmedia/express\") // https://github.com/visionmedia/express\n\n```\n", "readmeFilename": "README.md", + "gitHead": "d404a13f7f04edaed0e2f068a43b81230b8c7aee", "homepage": "https://github.com/robertkowalski/github-url-from-username-repo", - "_id": "github-url-from-username-repo@0.1.0", - "dist": { - "shasum": "fe398af670692e91af7bcfc5ae1d99ff97b1df89" - }, - "_from": "github-url-from-username-repo@0.1.0", - "_resolved": "https://registry.npmjs.org/github-url-from-username-repo/-/github-url-from-username-repo-0.1.0.tgz" + "_id": "github-url-from-username-repo@1.0.2", + "_shasum": "7dd79330d2abe69c10c2cef79714c97215791dfa", + "_from": "github-url-from-username-repo@>=1.0.2-0 <2.0.0-0" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/github-url-from-username-repo/README.md nodejs-0.11.15/deps/npm/node_modules/github-url-from-username-repo/README.md --- nodejs-0.11.13/deps/npm/node_modules/github-url-from-username-repo/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/github-url-from-username-repo/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -4,11 +4,18 @@ # github-url-from-username-repo +## API + +### getUrl(url, [forBrowser]) + +Get's the url normalized for npm. +If `forBrowser` is true, return a GitHub url that is usable in a webbrowser. + ## Usage ```javascript var getUrl = require("github-url-from-username-repo") -getUrl("visionmedia/express") // git://github.com/visionmedia/express +getUrl("visionmedia/express") // https://github.com/visionmedia/express -``` \ No newline at end of file +``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/github-url-from-username-repo/test/index.js nodejs-0.11.15/deps/npm/node_modules/github-url-from-username-repo/test/index.js --- nodejs-0.11.13/deps/npm/node_modules/github-url-from-username-repo/test/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/github-url-from-username-repo/test/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -4,7 +4,7 @@ describe("github url from username/repo", function () { it("returns a github url for the username/repo", function () { var url = getUrl("visionmedia/express") - assert.equal("git://github.com/visionmedia/express", url) + assert.equal("https://github.com/visionmedia/express", url) }) it("returns null if it does not match", function () { @@ -17,18 +17,54 @@ assert.deepEqual(null, url) }) + it("returns null for something that's already a URI", function () { + var url = getUrl("file:../relative") + assert.deepEqual(null, url) + }) + it("works with .", function () { - var url = getUrl("component/downloader.js") - assert.equal("git://github.com/component/downloader.js", url) + var url = getUrl("component/.download.er.js.") + assert.equal("https://github.com/component/.download.er.js.", url) }) it("works with . in the beginning", function () { var url = getUrl("component/.downloader.js") - assert.equal("git://github.com/component/.downloader.js", url) + assert.equal("https://github.com/component/.downloader.js", url) }) it("works with -", function () { var url = getUrl("component/-dow-nloader.j-s") - assert.equal("git://github.com/component/-dow-nloader.j-s", url) + assert.equal("https://github.com/component/-dow-nloader.j-s", url) + }) + + it("can handle branches with #", function () { + var url = getUrl( + "component/entejs#1c3e1fe71640b4b477f04d947bd53c473799b277") + + assert.equal("https://github.com/component/entejs#1c3e1fe71640b" + + "4b477f04d947bd53c473799b277", url) + }) + + it("can handle branches with slashes", function () { + var url = getUrl( + "component/entejs#some/branch/name") + + assert.equal("https://github.com/component/entejs#some/branch/name", url) + }) + + describe("browser mode", function () { + it("is able to return urls for branches", function () { + var url = getUrl( + "component/entejs#1c3e1fe71640b4b477f04d947bd53c473799b277", true) + + assert.equal("https://github.com/component/entejs/tree/1c3e1fe71640b" + + "4b477f04d947bd53c473799b277", url) + }) + it("is able to return urls without a branch for the browser", function () { + var url = getUrl( + "component/entejs", true) + + assert.equal("https://github.com/component/entejs", url) + }) }) }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/glob.js nodejs-0.11.15/deps/npm/node_modules/glob/glob.js --- nodejs-0.11.13/deps/npm/node_modules/glob/glob.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/glob.js 2015-01-20 21:22:17.000000000 +0000 @@ -36,7 +36,7 @@ module.exports = glob -var fs = require("fs") +var fs = require("graceful-fs") , minimatch = require("minimatch") , Minimatch = minimatch.Minimatch , inherits = require("inherits") @@ -44,6 +44,7 @@ , path = require("path") , isDir = {} , assert = require("assert").ok +, once = require("once") function glob (pattern, options, cb) { if (typeof options === "function") cb = options, options = {} @@ -91,6 +92,7 @@ } if (typeof cb === "function") { + cb = once(cb) this.on("error", cb) this.on("end", function (matches) { cb(null, matches) @@ -149,6 +151,10 @@ this.stat = !!options.stat this.debug = !!options.debug || !!options.globDebug + + if (/\bglob\b/.test(process.env.NODE_DEBUG || '')) + this.debug = true + if (this.debug) this.log = console.error @@ -176,6 +182,10 @@ // Keep them as a list so we can fill in when nonull is set. this.matches = new Array(n) + if (this.minimatch.set.length === 0) { + return process.nextTick(this._finish.bind(this)) + } + this.minimatch.set.forEach(iterator.bind(this)) function iterator (pattern, i, set) { this._process(pattern, 0, i, function (er) { @@ -319,34 +329,34 @@ this.log("peq: _stat, then next") this._stat(m, next) } + } + done = true - function next(exists, isDir) { - this.log("next", m, exists, isDir) - var ev = m === this.EOF ? "end" : "match" - - // "end" can only happen once. - assert(!this._endEmitted) - if (ev === "end") - this._endEmitted = true - - if (exists) { - // Doesn't mean it necessarily doesn't exist, it's possible - // we just didn't check because we don't care that much, or - // this is EOF anyway. - if (isDir && !m.match(/\/$/)) { - m = m + "/" - } else if (!isDir && m.match(/\/$/)) { - m = m.replace(/\/+$/, "") - } + function next(exists, isDir) { + this.log("next", m, exists, isDir) + var ev = m === this.EOF ? "end" : "match" + + // "end" can only happen once. + assert(!this._endEmitted) + if (ev === "end") + this._endEmitted = true + + if (exists) { + // Doesn't mean it necessarily doesn't exist, it's possible + // we just didn't check because we don't care that much, or + // this is EOF anyway. + if (isDir && !m.match(/\/$/)) { + m = m + "/" + } else if (!isDir && m.match(/\/$/)) { + m = m.replace(/\/+$/, "") } - this.log("emit", ev, m) - this.emit(ev, m) - this._processingEmitQueue = false - if (done && m !== this.EOF && !this.paused) - this._processEmitQueue() } + this.log("emit", ev, m) + this.emit(ev, m) + this._processingEmitQueue = false + if (done && m !== this.EOF && !this.paused) + this._processEmitQueue() } - done = true } Glob.prototype._process = function (pattern, depth, index, cb_) { @@ -429,9 +439,9 @@ if (prefix === null) read = "." else if (isAbsolute(prefix) || isAbsolute(pattern.join("/"))) { if (!prefix || !isAbsolute(prefix)) { - prefix = path.join("/", prefix) + prefix = "/" + prefix } - read = prefix = path.resolve(prefix) + read = prefix // if (process.platform === "win32") // read = prefix = prefix.replace(/^[a-zA-Z]:|\\/g, "/") @@ -491,12 +501,20 @@ // It will only match dot entries if it starts with a dot, or if // dot is set. Stuff like @(.foo|.bar) isn't allowed. var pn = pattern[n] + var negate = !!this.minimatch.negate; var rawGlob = pattern[n]._glob , dotOk = this.dot || rawGlob.charAt(0) === "." entries = entries.filter(function (e) { - return (e.charAt(0) !== "." || dotOk) && - e.match(pattern[n]) + if (e.charAt(0) !== "." || dotOk) { + if (negate && n === 0) { + return !e.match(pattern[n]); + } else { + return e.match(pattern[n]); + } + } + + return null; }) // If n === pattern.length - 1, then there's no need for the extra stat diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/LICENSE nodejs-0.11.15/deps/npm/node_modules/glob/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/glob/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -1,27 +1,15 @@ -Copyright (c) Isaac Z. Schlueter ("Author") -All rights reserved. +The ISC License -The BSD License +Copyright (c) Isaac Z. Schlueter and Contributors -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/.npmignore nodejs-0.11.15/deps/npm/node_modules/glob/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/glob/.npmignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -1,2 +1,3 @@ .*.swp test/a/ +node_modules/* Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/npm/node_modules/glob/oh-my-glob.gif and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/npm/node_modules/glob/oh-my-glob.gif differ diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/package.json nodejs-0.11.15/deps/npm/node_modules/glob/package.json --- nodejs-0.11.13/deps/npm/node_modules/glob/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "glob", "description": "a little globber", - "version": "3.2.9", + "version": "4.0.6", "repository": { "type": "git", "url": "git://github.com/isaacs/node-glob.git" @@ -16,8 +16,10 @@ "node": "*" }, "dependencies": { - "minimatch": "~0.2.11", - "inherits": "2" + "graceful-fs": "^3.0.2", + "inherits": "2", + "minimatch": "^1.0.0", + "once": "^1.3.0" }, "devDependencies": { "tap": "~0.4.0", @@ -28,13 +30,32 @@ "test": "tap test/*.js", "test-regen": "TEST_REGEN=1 node test/00-setup.js" }, - "license": "BSD", - "readme": "# Glob\n\nMatch files using the patterns the shell uses, like stars and stuff.\n\nThis is a glob implementation in JavaScript. It uses the `minimatch`\nlibrary to do its matching.\n\n## Attention: node-glob users!\n\nThe API has changed dramatically between 2.x and 3.x. This library is\nnow 100% JavaScript, and the integer flags have been replaced with an\noptions object.\n\nAlso, there's an event emitter class, proper tests, and all the other\nthings you've come to expect from node modules.\n\nAnd best of all, no compilation!\n\n## Usage\n\n```javascript\nvar glob = require(\"glob\")\n\n// options is optional\nglob(\"**/*.js\", options, function (er, files) {\n // files is an array of filenames.\n // If the `nonull` option is set, and nothing\n // was found, then files is [\"**/*.js\"]\n // er is an error object or null.\n})\n```\n\n## Features\n\nPlease see the [minimatch\ndocumentation](https://github.com/isaacs/minimatch) for more details.\n\nSupports these glob features:\n\n* Brace Expansion\n* Extended glob matching\n* \"Globstar\" `**` matching\n\nSee:\n\n* `man sh`\n* `man bash`\n* `man 3 fnmatch`\n* `man 5 gitignore`\n* [minimatch documentation](https://github.com/isaacs/minimatch)\n\n## glob(pattern, [options], cb)\n\n* `pattern` {String} Pattern to be matched\n* `options` {Object}\n* `cb` {Function}\n * `err` {Error | null}\n * `matches` {Array} filenames found matching the pattern\n\nPerform an asynchronous glob search.\n\n## glob.sync(pattern, [options])\n\n* `pattern` {String} Pattern to be matched\n* `options` {Object}\n* return: {Array} filenames found matching the pattern\n\nPerform a synchronous glob search.\n\n## Class: glob.Glob\n\nCreate a Glob object by instanting the `glob.Glob` class.\n\n```javascript\nvar Glob = require(\"glob\").Glob\nvar mg = new Glob(pattern, options, cb)\n```\n\nIt's an EventEmitter, and starts walking the filesystem to find matches\nimmediately.\n\n### new glob.Glob(pattern, [options], [cb])\n\n* `pattern` {String} pattern to search for\n* `options` {Object}\n* `cb` {Function} Called when an error occurs, or matches are found\n * `err` {Error | null}\n * `matches` {Array} filenames found matching the pattern\n\nNote that if the `sync` flag is set in the options, then matches will\nbe immediately available on the `g.found` member.\n\n### Properties\n\n* `minimatch` The minimatch object that the glob uses.\n* `options` The options object passed in.\n* `error` The error encountered. When an error is encountered, the\n glob object is in an undefined state, and should be discarded.\n* `aborted` Boolean which is set to true when calling `abort()`. There\n is no way at this time to continue a glob search after aborting, but\n you can re-use the statCache to avoid having to duplicate syscalls.\n* `statCache` Collection of all the stat results the glob search\n performed.\n* `cache` Convenience object. Each field has the following possible\n values:\n * `false` - Path does not exist\n * `true` - Path exists\n * `1` - Path exists, and is not a directory\n * `2` - Path exists, and is a directory\n * `[file, entries, ...]` - Path exists, is a directory, and the\n array value is the results of `fs.readdir`\n\n### Events\n\n* `end` When the matching is finished, this is emitted with all the\n matches found. If the `nonull` option is set, and no match was found,\n then the `matches` list contains the original pattern. The matches\n are sorted, unless the `nosort` flag is set.\n* `match` Every time a match is found, this is emitted with the matched.\n* `error` Emitted when an unexpected error is encountered, or whenever\n any fs error occurs if `options.strict` is set.\n* `abort` When `abort()` is called, this event is raised.\n\n### Methods\n\n* `abort` Stop the search.\n\n### Options\n\nAll the options that can be passed to Minimatch can also be passed to\nGlob to change pattern matching behavior. Also, some have been added,\nor have glob-specific ramifications.\n\nAll options are false by default, unless otherwise noted.\n\nAll options are added to the glob object, as well.\n\n* `cwd` The current working directory in which to search. Defaults\n to `process.cwd()`.\n* `root` The place where patterns starting with `/` will be mounted\n onto. Defaults to `path.resolve(options.cwd, \"/\")` (`/` on Unix\n systems, and `C:\\` or some such on Windows.)\n* `dot` Include `.dot` files in normal matches and `globstar` matches.\n Note that an explicit dot in a portion of the pattern will always\n match dot files.\n* `nomount` By default, a pattern starting with a forward-slash will be\n \"mounted\" onto the root setting, so that a valid filesystem path is\n returned. Set this flag to disable that behavior.\n* `mark` Add a `/` character to directory matches. Note that this\n requires additional stat calls.\n* `nosort` Don't sort the results.\n* `stat` Set to true to stat *all* results. This reduces performance\n somewhat, and is completely unnecessary, unless `readdir` is presumed\n to be an untrustworthy indicator of file existence. It will cause\n ELOOP to be triggered one level sooner in the case of cyclical\n symbolic links.\n* `silent` When an unusual error is encountered\n when attempting to read a directory, a warning will be printed to\n stderr. Set the `silent` option to true to suppress these warnings.\n* `strict` When an unusual error is encountered\n when attempting to read a directory, the process will just continue on\n in search of other matches. Set the `strict` option to raise an error\n in these cases.\n* `cache` See `cache` property above. Pass in a previously generated\n cache object to save some fs calls.\n* `statCache` A cache of results of filesystem information, to prevent\n unnecessary stat calls. While it should not normally be necessary to\n set this, you may pass the statCache from one glob() call to the\n options object of another, if you know that the filesystem will not\n change between calls. (See \"Race Conditions\" below.)\n* `sync` Perform a synchronous glob search.\n* `nounique` In some cases, brace-expanded patterns can result in the\n same file showing up multiple times in the result set. By default,\n this implementation prevents duplicates in the result set.\n Set this flag to disable that behavior.\n* `nonull` Set to never return an empty set, instead returning a set\n containing the pattern itself. This is the default in glob(3).\n* `nocase` Perform a case-insensitive match. Note that case-insensitive\n filesystems will sometimes result in glob returning results that are\n case-insensitively matched anyway, since readdir and stat will not\n raise an error.\n* `debug` Set to enable debug logging in minimatch and glob.\n* `globDebug` Set to enable debug logging in glob, but not minimatch.\n\n## Comparisons to other fnmatch/glob implementations\n\nWhile strict compliance with the existing standards is a worthwhile\ngoal, some discrepancies exist between node-glob and other\nimplementations, and are intentional.\n\nIf the pattern starts with a `!` character, then it is negated. Set the\n`nonegate` flag to suppress this behavior, and treat leading `!`\ncharacters normally. This is perhaps relevant if you wish to start the\npattern with a negative extglob pattern like `!(a|B)`. Multiple `!`\ncharacters at the start of a pattern will negate the pattern multiple\ntimes.\n\nIf a pattern starts with `#`, then it is treated as a comment, and\nwill not match anything. Use `\\#` to match a literal `#` at the\nstart of a line, or set the `nocomment` flag to suppress this behavior.\n\nThe double-star character `**` is supported by default, unless the\n`noglobstar` flag is set. This is supported in the manner of bsdglob\nand bash 4.1, where `**` only has special significance if it is the only\nthing in a path part. That is, `a/**/b` will match `a/x/y/b`, but\n`a/**b` will not.\n\nIf an escaped pattern has no matches, and the `nonull` flag is set,\nthen glob returns the pattern as-provided, rather than\ninterpreting the character escapes. For example,\n`glob.match([], \"\\\\*a\\\\?\")` will return `\"\\\\*a\\\\?\"` rather than\n`\"*a?\"`. This is akin to setting the `nullglob` option in bash, except\nthat it does not resolve escaped pattern characters.\n\nIf brace expansion is not disabled, then it is performed before any\nother interpretation of the glob pattern. Thus, a pattern like\n`+(a|{b),c)}`, which would not be valid in bash or zsh, is expanded\n**first** into the set of `+(a|b)` and `+(a|c)`, and those patterns are\nchecked for validity. Since those two are valid, matching proceeds.\n\n## Windows\n\n**Please only use forward-slashes in glob expressions.**\n\nThough windows uses either `/` or `\\` as its path separator, only `/`\ncharacters are used by this glob implementation. You must use\nforward-slashes **only** in glob expressions. Back-slashes will always\nbe interpreted as escape characters, not path separators.\n\nResults from absolute patterns such as `/foo/*` are mounted onto the\nroot setting using `path.join`. On windows, this will by default result\nin `/foo/*` matching `C:\\foo\\bar.txt`.\n\n## Race Conditions\n\nGlob searching, by its very nature, is susceptible to race conditions,\nsince it relies on directory walking and such.\n\nAs a result, it is possible that a file that exists when glob looks for\nit may have been deleted or modified by the time it returns the result.\n\nAs part of its internal implementation, this program caches all stat\nand readdir calls that it makes, in order to cut down on system\noverhead. However, this also makes it even more susceptible to races,\nespecially if the cache or statCache objects are reused between glob\ncalls.\n\nUsers are thus advised not to use a glob result as a guarantee of\nfilesystem state in the face of rapid changes. For the vast majority\nof operations, this is never a problem.\n", - "readmeFilename": "README.md", + "license": "ISC", + "gitHead": "6825c425e738eaffa315d8cdb1a4c3255ededcb3", "bugs": { "url": "https://github.com/isaacs/node-glob/issues" }, "homepage": "https://github.com/isaacs/node-glob", - "_id": "glob@3.2.9", - "_from": "glob@latest" + "_id": "glob@4.0.6", + "_shasum": "695c50bdd4e2fb5c5d370b091f388d3707e291a7", + "_from": "glob@>=4.0.6 <5.0.0", + "_npmVersion": "2.0.0", + "_nodeVersion": "0.10.31", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "695c50bdd4e2fb5c5d370b091f388d3707e291a7", + "tarball": "http://registry.npmjs.org/glob/-/glob-4.0.6.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/glob/-/glob-4.0.6.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/README.md nodejs-0.11.15/deps/npm/node_modules/glob/README.md --- nodejs-0.11.13/deps/npm/node_modules/glob/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -5,16 +5,7 @@ This is a glob implementation in JavaScript. It uses the `minimatch` library to do its matching. -## Attention: node-glob users! - -The API has changed dramatically between 2.x and 3.x. This library is -now 100% JavaScript, and the integer flags have been replaced with an -options object. - -Also, there's an event emitter class, proper tests, and all the other -things you've come to expect from node modules. - -And best of all, no compilation! +![](oh-my-glob.gif) ## Usage diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/test/bash-results.json nodejs-0.11.15/deps/npm/node_modules/glob/test/bash-results.json --- nodejs-0.11.13/deps/npm/node_modules/glob/test/bash-results.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/test/bash-results.json 2015-01-20 21:22:17.000000000 +0000 @@ -283,9 +283,11 @@ "{./*/*,/tmp/glob-test/*}": [ "./examples/g.js", "./examples/usr-local.js", + "./node_modules/graceful-fs", "./node_modules/inherits", "./node_modules/minimatch", "./node_modules/mkdirp", + "./node_modules/once", "./node_modules/rimraf", "./node_modules/tap", "./test/00-setup.js", @@ -293,8 +295,11 @@ "./test/bash-comparison.js", "./test/bash-results.json", "./test/cwd-test.js", + "./test/empty-set.js", + "./test/error-callback.js", "./test/globstar-match.js", "./test/mark.js", + "./test/negation-test.js", "./test/new-glob-optional-options.js", "./test/nocase-nomagic.js", "./test/pause-resume.js", diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/test/empty-set.js nodejs-0.11.15/deps/npm/node_modules/glob/test/empty-set.js --- nodejs-0.11.13/deps/npm/node_modules/glob/test/empty-set.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/test/empty-set.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +var test = require('tap').test +var glob = require("../glob.js") + +// Patterns that cannot match anything +var patterns = [ + '# comment', + ' ', + '\n', + 'just doesnt happen to match anything so this is a control' +] + +patterns.forEach(function (p) { + test(JSON.stringify(p), function (t) { + glob(p, function (e, f) { + t.equal(e, null, 'no error') + t.same(f, [], 'no returned values') + t.end() + }) + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/test/error-callback.js nodejs-0.11.15/deps/npm/node_modules/glob/test/error-callback.js --- nodejs-0.11.13/deps/npm/node_modules/glob/test/error-callback.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/test/error-callback.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +var fs +try { fs = require('graceful-fs') } catch (e) { fs = require('fs') } +var test = require('tap').test +var glob = require('../') + +test('mock fs', function(t) { + fs.readdir = function(path, cb) { + process.nextTick(function() { + cb(new Error('mock fs.readdir error')) + }) + } + t.pass('mocked') + t.end() +}) + +test('error callback', function(t) { + glob('*', function(err, res) { + t.ok(err, 'expecting mock error') + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/test/negation-test.js nodejs-0.11.15/deps/npm/node_modules/glob/test/negation-test.js --- nodejs-0.11.13/deps/npm/node_modules/glob/test/negation-test.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/test/negation-test.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ +// Negation test +// Show that glob respect's minimatch's negate flag + +var glob = require('../glob.js') +var test = require('tap').test + +test('glob respects minimatch negate flag when activated with leading !', function(t) { + var expect = ["abcdef/g", "abcfed/g", "c/d", "cb/e", "symlink/a"] + var results = glob("!b**/*", {cwd: 'a'}, function (er, results) { + if (er) + throw er + + t.same(results, expect) + t.end() + }); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/test/nocase-nomagic.js nodejs-0.11.15/deps/npm/node_modules/glob/test/nocase-nomagic.js --- nodejs-0.11.13/deps/npm/node_modules/glob/test/nocase-nomagic.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/test/nocase-nomagic.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,5 @@ -var fs = require('fs'); +var fs +try { fs = require('graceful-fs') } catch (e) { fs = require('fs') } var test = require('tap').test; var glob = require('../'); @@ -11,10 +12,10 @@ function fakeStat(path) { var ret switch (path.toLowerCase()) { - case '/tmp': case '/tmp/': + case '/tmp': case '/tmp/': case 'c:\\tmp': case 'c:\\tmp\\': ret = { isDirectory: function() { return true } } break - case '/tmp/a': + case '/tmp/a': case 'c:\\tmp\\a': ret = { isDirectory: function() { return false } } break } @@ -39,10 +40,10 @@ function fakeReaddir(path) { var ret switch (path.toLowerCase()) { - case '/tmp': case '/tmp/': + case '/tmp': case '/tmp/': case 'c:\\tmp': case 'c:\\tmp\\': ret = [ 'a', 'A' ] break - case '/': + case '/': case 'c:\\': ret = ['tmp', 'tMp', 'tMP', 'TMP'] } return ret @@ -76,6 +77,11 @@ '/tMp/a', '/tmp/A', '/tmp/a' ] + if(process.platform.match(/^win/)) { + want = want.map(function(p) { + return 'C:' + p + }) + } glob('/tmp/a', { nocase: true }, function(er, res) { if (er) throw er @@ -100,6 +106,12 @@ '/tMp/a', '/tmp/A', '/tmp/a' ] + if(process.platform.match(/^win/)) { + want = want.map(function(p) { + return 'C:' + p + }) + } + glob('/tmp/*', { nocase: true }, function(er, res) { if (er) throw er diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/test/stat.js nodejs-0.11.15/deps/npm/node_modules/glob/test/stat.js --- nodejs-0.11.13/deps/npm/node_modules/glob/test/stat.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/test/stat.js 2015-01-20 21:22:17.000000000 +0000 @@ -20,7 +20,7 @@ t.same(eof, matches) var cache = Object.keys(this.statCache) t.same(cache.map(function (f) { - return path.relative(__dirname, f) + return path.relative(__dirname, f).replace(/\\/g, '/') }).sort(), matches) cache.forEach(function(c) { diff -Nru nodejs-0.11.13/deps/npm/node_modules/glob/.travis.yml nodejs-0.11.15/deps/npm/node_modules/glob/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/glob/.travis.yml 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/glob/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -1,3 +1,4 @@ language: node_js node_js: - - 0.8 + - 0.10 + - 0.11 diff -Nru nodejs-0.11.13/deps/npm/node_modules/graceful-fs/fs.js nodejs-0.11.15/deps/npm/node_modules/graceful-fs/fs.js --- nodejs-0.11.13/deps/npm/node_modules/graceful-fs/fs.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/graceful-fs/fs.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,11 @@ +// eeeeeevvvvviiiiiiillllll +// more evil than monkey-patching the native builtin? +// Not sure. + +var mod = require("module") +var pre = '(function (exports, require, module, __filename, __dirname) { ' +var post = '});' +var src = pre + process.binding('natives').fs + post +var vm = require('vm') +var fn = vm.runInThisContext(src) +return fn(exports, require, module, __filename, __dirname) diff -Nru nodejs-0.11.13/deps/npm/node_modules/graceful-fs/graceful-fs.js nodejs-0.11.15/deps/npm/node_modules/graceful-fs/graceful-fs.js --- nodejs-0.11.13/deps/npm/node_modules/graceful-fs/graceful-fs.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/graceful-fs/graceful-fs.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,20 +1,17 @@ // Monkey-patching the fs module. // It's ugly, but there is simply no other way to do this. -var fs = module.exports = require('fs') +var fs = module.exports = require('./fs.js') var assert = require('assert') // fix up some busted stuff, mostly on windows and old nodes require('./polyfills.js') -// The EMFILE enqueuing stuff - var util = require('util') function noop () {} var debug = noop -var util = require('util') if (util.debuglog) debug = util.debuglog('gfs') else if (/\bgfs\b/i.test(process.env.NODE_DEBUG || '')) diff -Nru nodejs-0.11.13/deps/npm/node_modules/graceful-fs/package.json nodejs-0.11.15/deps/npm/node_modules/graceful-fs/package.json --- nodejs-0.11.13/deps/npm/node_modules/graceful-fs/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/graceful-fs/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "graceful-fs", "description": "A drop-in replacement for fs, making various improvements.", - "version": "2.0.2", + "version": "3.0.4", "repository": { "type": "git", "url": "git://github.com/isaacs/node-graceful-fs.git" @@ -38,11 +38,34 @@ "EACCESS" ], "license": "BSD", + "devDependencies": { + "mkdirp": "^0.5.0", + "rimraf": "^2.2.8", + "tap": "^0.4.13" + }, + "gitHead": "d3fd03247ccc4fa8a3eee399307fd266c70efb06", "bugs": { "url": "https://github.com/isaacs/node-graceful-fs/issues" }, - "readme": "ERROR: No README data found!", "homepage": "https://github.com/isaacs/node-graceful-fs", - "_id": "graceful-fs@2.0.2", - "_from": "graceful-fs@latest" + "_id": "graceful-fs@3.0.4", + "_shasum": "a0306d9b0940e0fc512d33b5df1014e88e0637a3", + "_from": "graceful-fs@>=3.0.4 <4.0.0", + "_npmVersion": "1.4.28", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "a0306d9b0940e0fc512d33b5df1014e88e0637a3", + "tarball": "http://registry.npmjs.org/graceful-fs/-/graceful-fs-3.0.4.tgz" + }, + "_resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-3.0.4.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/graceful-fs/polyfills.js nodejs-0.11.15/deps/npm/node_modules/graceful-fs/polyfills.js --- nodejs-0.11.13/deps/npm/node_modules/graceful-fs/polyfills.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/graceful-fs/polyfills.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,4 @@ -var fs = require('fs') +var fs = require('./fs.js') var constants = require('constants') var origCwd = process.cwd @@ -116,15 +116,25 @@ // https://github.com/isaacs/node-graceful-fs/issues/4 // Chown should not fail on einval or eperm if non-root. +// It should not fail on enosys ever, as this just indicates +// that a fs doesn't support the intended operation. fs.chown = chownFix(fs.chown) fs.fchown = chownFix(fs.fchown) fs.lchown = chownFix(fs.lchown) +fs.chmod = chownFix(fs.chmod) +fs.fchmod = chownFix(fs.fchmod) +fs.lchmod = chownFix(fs.lchmod) + fs.chownSync = chownFixSync(fs.chownSync) fs.fchownSync = chownFixSync(fs.fchownSync) fs.lchownSync = chownFixSync(fs.lchownSync) +fs.chmodSync = chownFix(fs.chmodSync) +fs.fchmodSync = chownFix(fs.fchmodSync) +fs.lchmodSync = chownFix(fs.lchmodSync) + function chownFix (orig) { if (!orig) return orig return function (target, uid, gid, cb) { @@ -146,15 +156,32 @@ } } +// ENOSYS means that the fs doesn't support the op. Just ignore +// that, because it doesn't matter. +// +// if there's no getuid, or if getuid() is something other +// than 0, and the error is EINVAL or EPERM, then just ignore +// it. +// +// This specific case is a silent failure in cp, install, tar, +// and most other unix tools that manage permissions. +// +// When running as root, or if other types of errors are +// encountered, then it's strict. function chownErOk (er) { - // if there's no getuid, or if getuid() is something other than 0, - // and the error is EINVAL or EPERM, then just ignore it. - // This specific case is a silent failure in cp, install, tar, - // and most other unix tools that manage permissions. - // When running as root, or if other types of errors are encountered, - // then it's strict. - if (!er || (!process.getuid || process.getuid() !== 0) - && (er.code === "EINVAL" || er.code === "EPERM")) return true + if (!er) + return true + + if (er.code === "ENOSYS") + return true + + var nonroot = !process.getuid || process.getuid() !== 0 + if (nonroot) { + if (er.code === "EINVAL" || er.code === "EPERM") + return true + } + + return false } diff -Nru nodejs-0.11.13/deps/npm/node_modules/graceful-fs/README.md nodejs-0.11.15/deps/npm/node_modules/graceful-fs/README.md --- nodejs-0.11.13/deps/npm/node_modules/graceful-fs/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/graceful-fs/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -7,7 +7,7 @@ platforms and environments, and to make filesystem access more resilient to errors. -## Improvements over fs module +## Improvements over [fs module](http://api.nodejs.org/fs.html) graceful-fs: @@ -24,3 +24,13 @@ On Windows, it retries renaming a file for up to one second if `EACCESS` or `EPERM` error occurs, likely because antivirus software has locked the directory. + +## USAGE + +```javascript +// use just like fs +var fs = require('graceful-fs') + +// now go and do stuff with it... +fs.readFileSync('some-file-or-whatever') +``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/graceful-fs/test/max-open.js nodejs-0.11.15/deps/npm/node_modules/graceful-fs/test/max-open.js --- nodejs-0.11.13/deps/npm/node_modules/graceful-fs/test/max-open.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/graceful-fs/test/max-open.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,69 @@ +var test = require('tap').test +var fs = require('../') + +test('open lots of stuff', function (t) { + // Get around EBADF from libuv by making sure that stderr is opened + // Otherwise Darwin will refuse to give us a FD for stderr! + process.stderr.write('') + + // How many parallel open()'s to do + var n = 1024 + var opens = 0 + var fds = [] + var going = true + var closing = false + var doneCalled = 0 + + for (var i = 0; i < n; i++) { + go() + } + + function go() { + opens++ + fs.open(__filename, 'r', function (er, fd) { + if (er) throw er + fds.push(fd) + if (going) go() + }) + } + + // should hit ulimit pretty fast + setTimeout(function () { + going = false + t.equal(opens - fds.length, n) + done() + }, 100) + + + function done () { + if (closing) return + doneCalled++ + + if (fds.length === 0) { + //console.error('done called %d times', doneCalled) + // First because of the timeout + // Then to close the fd's opened afterwards + // Then this time, to complete. + // Might take multiple passes, depending on CPU speed + // and ulimit, but at least 3 in every case. + t.ok(doneCalled >= 3) + return t.end() + } + + closing = true + setTimeout(function () { + // console.error('do closing again') + closing = false + done() + }, 100) + + // console.error('closing time') + var closes = fds.slice(0) + fds.length = 0 + closes.forEach(function (fd) { + fs.close(fd, function (er) { + if (er) throw er + }) + }) + } +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/graceful-fs/test/open.js nodejs-0.11.15/deps/npm/node_modules/graceful-fs/test/open.js --- nodejs-0.11.13/deps/npm/node_modules/graceful-fs/test/open.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/graceful-fs/test/open.js 2015-01-20 21:22:17.000000000 +0000 @@ -2,7 +2,7 @@ var fs = require('../graceful-fs.js') test('graceful fs is monkeypatched fs', function (t) { - t.equal(fs, require('fs')) + t.equal(fs, require('../fs.js')) t.end() }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/graceful-fs/test/readdir-sort.js nodejs-0.11.15/deps/npm/node_modules/graceful-fs/test/readdir-sort.js --- nodejs-0.11.13/deps/npm/node_modules/graceful-fs/test/readdir-sort.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/graceful-fs/test/readdir-sort.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ var test = require("tap").test -var fs = require("fs") +var fs = require("../fs.js") var readdir = fs.readdir fs.readdir = function(path, cb) { @@ -14,7 +14,6 @@ g.readdir("whatevers", function (er, files) { if (er) throw er - console.error(files) t.same(files, [ "a", "b", "z" ]) t.end() }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/graceful-fs/test/write-then-read.js nodejs-0.11.15/deps/npm/node_modules/graceful-fs/test/write-then-read.js --- nodejs-0.11.13/deps/npm/node_modules/graceful-fs/test/write-then-read.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/graceful-fs/test/write-then-read.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,45 @@ +var fs = require('../'); +var rimraf = require('rimraf'); +var mkdirp = require('mkdirp'); +var test = require('tap').test; +var p = require('path').resolve(__dirname, 'files'); + +// Make sure to reserve the stderr fd +process.stderr.write(''); + +var num = 4097; +var paths = new Array(num); + +test('make files', function (t) { + rimraf.sync(p); + mkdirp.sync(p); + + for (var i = 0; i < num; ++i) { + paths[i] = 'files/file-' + i; + fs.writeFileSync(paths[i], 'content'); + } + + t.end(); +}) + +test('read files', function (t) { + // now read them + var done = 0; + for (var i = 0; i < num; ++i) { + fs.readFile(paths[i], function(err, data) { + if (err) + throw err; + + ++done; + if (done === num) { + t.pass('success'); + t.end() + } + }); + } +}); + +test('cleanup', function (t) { + rimraf.sync(p); + t.end(); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/inflight/.eslintrc nodejs-0.11.15/deps/npm/node_modules/inflight/.eslintrc --- nodejs-0.11.13/deps/npm/node_modules/inflight/.eslintrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/inflight/.eslintrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,17 @@ +{ + "env" : { + "node" : true + }, + "rules" : { + "semi": [2, "never"], + "strict": 0, + "quotes": [1, "single", "avoid-escape"], + "no-use-before-define": 0, + "curly": 0, + "no-underscore-dangle": 0, + "no-lonely-if": 1, + "no-unused-vars": [2, {"vars" : "all", "args" : "after-used"}], + "no-mixed-requires": 0, + "space-infix-ops": 0 + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/inflight/inflight.js nodejs-0.11.15/deps/npm/node_modules/inflight/inflight.js --- nodejs-0.11.13/deps/npm/node_modules/inflight/inflight.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/inflight/inflight.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ +var wrappy = require('wrappy') +var reqs = Object.create(null) +var once = require('once') + +module.exports = wrappy(inflight) + +function inflight (key, cb) { + if (reqs[key]) { + reqs[key].push(cb) + return null + } else { + reqs[key] = [cb] + return makeres(key) + } +} + +function makeres (key) { + return once(function RES () { + var cbs = reqs[key] + var len = cbs.length + var args = slice(arguments) + for (var i = 0; i < len; i++) { + cbs[i].apply(null, args) + } + if (cbs.length > len) { + // added more in the interim. + // de-zalgo, just in case, but don't call again. + cbs.splice(0, len) + process.nextTick(function () { + RES.apply(null, args) + }) + } else { + delete reqs[key] + } + }) +} + +function slice (args) { + var length = args.length + var array = [] + + for (var i = 0; i < length; i++) array[i] = args[i] + return array +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/inflight/LICENSE nodejs-0.11.15/deps/npm/node_modules/inflight/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/inflight/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/inflight/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/inflight/package.json nodejs-0.11.15/deps/npm/node_modules/inflight/package.json --- nodejs-0.11.13/deps/npm/node_modules/inflight/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/inflight/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,36 @@ +{ + "name": "inflight", + "version": "1.0.4", + "description": "Add callbacks to requests in flight to avoid async duplication", + "main": "inflight.js", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + }, + "devDependencies": { + "tap": "^0.4.10" + }, + "scripts": { + "test": "tap test.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/isaacs/inflight" + }, + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "bugs": { + "url": "https://github.com/isaacs/inflight/issues" + }, + "homepage": "https://github.com/isaacs/inflight", + "license": "ISC", + "readme": "# inflight\n\nAdd callbacks to requests in flight to avoid async duplication\n\n## USAGE\n\n```javascript\nvar inflight = require('inflight')\n\n// some request that does some stuff\nfunction req(key, callback) {\n // key is any random string. like a url or filename or whatever.\n //\n // will return either a falsey value, indicating that the\n // request for this key is already in flight, or a new callback\n // which when called will call all callbacks passed to inflightk\n // with the same key\n callback = inflight(key, callback)\n\n // If we got a falsey value back, then there's already a req going\n if (!callback) return\n\n // this is where you'd fetch the url or whatever\n // callback is also once()-ified, so it can safely be assigned\n // to multiple events etc. First call wins.\n setTimeout(function() {\n callback(null, key)\n }, 100)\n}\n\n// only assigns a single setTimeout\n// when it dings, all cbs get called\nreq('foo', cb1)\nreq('foo', cb2)\nreq('foo', cb3)\nreq('foo', cb4)\n```\n", + "readmeFilename": "README.md", + "gitHead": "c7b5531d572a867064d4a1da9e013e8910b7d1ba", + "_id": "inflight@1.0.4", + "_shasum": "6cbb4521ebd51ce0ec0a936bfd7657ef7e9b172a", + "_from": "inflight@>=1.0.4 <1.1.0" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/inflight/README.md nodejs-0.11.15/deps/npm/node_modules/inflight/README.md --- nodejs-0.11.13/deps/npm/node_modules/inflight/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/inflight/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,37 @@ +# inflight + +Add callbacks to requests in flight to avoid async duplication + +## USAGE + +```javascript +var inflight = require('inflight') + +// some request that does some stuff +function req(key, callback) { + // key is any random string. like a url or filename or whatever. + // + // will return either a falsey value, indicating that the + // request for this key is already in flight, or a new callback + // which when called will call all callbacks passed to inflightk + // with the same key + callback = inflight(key, callback) + + // If we got a falsey value back, then there's already a req going + if (!callback) return + + // this is where you'd fetch the url or whatever + // callback is also once()-ified, so it can safely be assigned + // to multiple events etc. First call wins. + setTimeout(function() { + callback(null, key) + }, 100) +} + +// only assigns a single setTimeout +// when it dings, all cbs get called +req('foo', cb1) +req('foo', cb2) +req('foo', cb3) +req('foo', cb4) +``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/inflight/test.js nodejs-0.11.15/deps/npm/node_modules/inflight/test.js --- nodejs-0.11.13/deps/npm/node_modules/inflight/test.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/inflight/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,97 @@ +var test = require('tap').test +var inf = require('./inflight.js') + + +function req (key, cb) { + cb = inf(key, cb) + if (cb) setTimeout(function () { + cb(key) + cb(key) + }) + return cb +} + +test('basic', function (t) { + var calleda = false + var a = req('key', function (k) { + t.notOk(calleda) + calleda = true + t.equal(k, 'key') + if (calledb) t.end() + }) + t.ok(a, 'first returned cb function') + + var calledb = false + var b = req('key', function (k) { + t.notOk(calledb) + calledb = true + t.equal(k, 'key') + if (calleda) t.end() + }) + + t.notOk(b, 'second should get falsey inflight response') +}) + +test('timing', function (t) { + var expect = [ + 'method one', + 'start one', + 'end one', + 'two', + 'tick', + 'three' + ] + var i = 0 + + function log (m) { + t.equal(m, expect[i], m + ' === ' + expect[i]) + ++i + if (i === expect.length) + t.end() + } + + function method (name, cb) { + log('method ' + name) + process.nextTick(cb) + } + + var one = inf('foo', function () { + log('start one') + var three = inf('foo', function () { + log('three') + }) + if (three) method('three', three) + log('end one') + }) + + method('one', one) + + var two = inf('foo', function () { + log('two') + }) + if (two) method('one', two) + + process.nextTick(log.bind(null, 'tick')) +}) + +test('parameters', function (t) { + t.plan(8) + + var a = inf('key', function (first, second, third) { + t.equal(first, 1) + t.equal(second, 2) + t.equal(third, 3) + }) + t.ok(a, 'first returned cb function') + + var b = inf('key', function (first, second, third) { + t.equal(first, 1) + t.equal(second, 2) + t.equal(third, 3) + }) + t.notOk(b, 'second should get falsey inflight response') + + setTimeout(function () { + a(1, 2, 3) + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/inherits/package.json nodejs-0.11.15/deps/npm/node_modules/inherits/package.json --- nodejs-0.11.13/deps/npm/node_modules/inherits/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/inherits/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -28,5 +28,24 @@ "url": "https://github.com/isaacs/inherits/issues" }, "_id": "inherits@2.0.1", - "_from": "inherits@" + "dist": { + "shasum": "b17d08d326b4423e568eff719f91b0b1cbdf69f1", + "tarball": "http://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz" + }, + "_from": "inherits@latest", + "_npmVersion": "1.3.8", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "directories": {}, + "_shasum": "b17d08d326b4423e568eff719f91b0b1cbdf69f1", + "_resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", + "homepage": "https://github.com/isaacs/inherits" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/ini/ini.js nodejs-0.11.15/deps/npm/node_modules/ini/ini.js --- nodejs-0.11.13/deps/npm/node_modules/ini/ini.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ini/ini.js 2015-01-20 21:22:17.000000000 +0000 @@ -7,31 +7,47 @@ var eol = process.platform === "win32" ? "\r\n" : "\n" -function encode (obj, section) { +function encode (obj, opt) { var children = [] , out = "" + if (typeof opt === "string") { + opt = { + section: opt, + whitespace: false + } + } else { + opt = opt || {} + opt.whitespace = opt.whitespace === true + } + + var separator = opt.whitespace ? " = " : "=" + Object.keys(obj).forEach(function (k, _, __) { var val = obj[k] if (val && Array.isArray(val)) { val.forEach(function(item) { - out += safe(k + "[]") + " = " + safe(item) + "\n" + out += safe(k + "[]") + separator + safe(item) + "\n" }) } else if (val && typeof val === "object") { children.push(k) } else { - out += safe(k) + " = " + safe(val) + eol + out += safe(k) + separator + safe(val) + eol } }) - if (section && out.length) { - out = "[" + safe(section) + "]" + eol + out + if (opt.section && out.length) { + out = "[" + safe(opt.section) + "]" + eol + out } children.forEach(function (k, _, __) { var nk = dotSplit(k).join('\\.') - var child = encode(obj[k], (section ? section + "." : "") + nk) + var section = (opt.section ? opt.section + "." : "") + nk + var child = encode(obj[k], { + section: section, + whitespace: opt.whitespace + }) if (out.length && child.length) { out += eol } @@ -42,12 +58,12 @@ } function dotSplit (str) { - return str.replace(/\1/g, '\2LITERAL\\1LITERAL\2') - .replace(/\\\./g, '\1') + return str.replace(/\1/g, '\u0002LITERAL\\1LITERAL\u0002') + .replace(/\\\./g, '\u0001') .split(/\./).map(function (part) { return part.replace(/\1/g, '\\.') - .replace(/\2LITERAL\\1LITERAL\2/g, '\1') - }) + .replace(/\2LITERAL\\1LITERAL\2/g, '\u0001') + }) } function decode (str) { @@ -61,7 +77,7 @@ , section = null lines.forEach(function (line, _, __) { - if (!line || line.match(/^\s*;/)) return + if (!line || line.match(/^\s*[;#]/)) return var match = line.match(re) if (!match) return if (match[1] !== undefined) { @@ -122,21 +138,29 @@ return out } +function isQuoted (val) { + return (val.charAt(0) === "\"" && val.slice(-1) === "\"") + || (val.charAt(0) === "'" && val.slice(-1) === "'") +} + function safe (val) { return ( typeof val !== "string" || val.match(/[\r\n]/) || val.match(/^\[/) || (val.length > 1 - && val.charAt(0) === "\"" - && val.slice(-1) === "\"") + && isQuoted(val)) || val !== val.trim() ) ? JSON.stringify(val) - : val.replace(/;/g, '\\;') + : val.replace(/;/g, '\\;').replace(/#/g, "\\#") } function unsafe (val, doUnesc) { val = (val || "").trim() - if (val.charAt(0) === "\"" && val.slice(-1) === "\"") { + if (isQuoted(val)) { + // remove the single quotes before calling JSON.parse + if (val.charAt(0) === "'") { + val = val.substr(1, val.length - 2); + } try { val = JSON.parse(val) } catch (_) {} } else { // walk the val to find the first not-escaped ; character @@ -145,12 +169,12 @@ for (var i = 0, l = val.length; i < l; i++) { var c = val.charAt(i) if (esc) { - if (c === "\\" || c === ";") + if ("\\;#".indexOf(c) !== -1) unesc += c else unesc += "\\" + c esc = false - } else if (c === ";") { + } else if (";#".indexOf(c) !== -1) { break } else if (c === "\\") { esc = true diff -Nru nodejs-0.11.13/deps/npm/node_modules/ini/.npmignore nodejs-0.11.15/deps/npm/node_modules/ini/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/ini/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ini/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +node_modules diff -Nru nodejs-0.11.13/deps/npm/node_modules/ini/package.json nodejs-0.11.15/deps/npm/node_modules/ini/package.json --- nodejs-0.11.13/deps/npm/node_modules/ini/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ini/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "ini", "description": "An ini encoder/decoder for node", - "version": "1.1.0", + "version": "1.3.0", "repository": { "type": "git", "url": "git://github.com/isaacs/ini.git" @@ -20,10 +20,32 @@ }, "dependencies": {}, "devDependencies": { - "tap": "~0.0.9" + "tap": "~0.4.0" }, - "readme": "An ini format parser and serializer for node.\n\nSections are treated as nested objects. Items before the first heading\nare saved on the object directly.\n\n## Usage\n\nConsider an ini-file `config.ini` that looks like this:\n\n ; this comment is being ignored\n scope = global\n\n [database]\n user = dbuser\n password = dbpassword\n database = use_this_database\n\n [paths.default]\n datadir = /var/lib/data\n array[] = first value\n array[] = second value\n array[] = third value\n\nYou can read, manipulate and write the ini-file like so:\n\n var fs = require('fs')\n , ini = require('ini')\n\n var config = ini.parse(fs.readFileSync('./config.ini', 'utf-8'))\n\n config.scope = 'local'\n config.database.database = 'use_another_database'\n config.paths.default.tmpdir = '/tmp'\n delete config.paths.default.datadir\n config.paths.default.array.push('fourth value')\n\n fs.writeFileSync('./config_modified.ini', ini.stringify(config, 'section'))\n\nThis will result in a file called `config_modified.ini` being written to the filesystem with the following content:\n\n [section]\n scope = local\n [section.database]\n user = dbuser\n password = dbpassword\n database = use_another_database\n [section.paths.default]\n tmpdir = /tmp\n array[] = first value\n array[] = second value\n array[] = third value\n array[] = fourth value\n\n\n## API\n\n### decode(inistring)\nDecode the ini-style formatted `inistring` into a nested object.\n\n### parse(inistring)\nAlias for `decode(inistring)`\n\n### encode(object, [section])\nEncode the object `object` into an ini-style formatted string. If the optional parameter `section` is given, then all top-level properties of the object are put into this section and the `section`-string is prepended to all sub-sections, see the usage example above.\n\n### stringify(object, [section])\nAlias for `encode(object, [section])`\n\n### safe(val)\nEscapes the string `val` such that it is safe to be used as a key or value in an ini-file. Basically escapes quotes. For example\n\n ini.safe('\"unsafe string\"')\n\nwould result in\n\n \"\\\"unsafe string\\\"\"\n\n### unsafe(val)\nUnescapes the string `val`\n", - "readmeFilename": "README.md", - "_id": "ini@1.1.0", - "_from": "ini@latest" + "gitHead": "6c314944d0201f3199e1189aeb5687d0aaf1c575", + "bugs": { + "url": "https://github.com/isaacs/ini/issues" + }, + "homepage": "https://github.com/isaacs/ini", + "_id": "ini@1.3.0", + "_shasum": "625483e56c643a7721014c76604d3353f44bd429", + "_from": "ini@>=1.3.0 <2.0.0", + "_npmVersion": "2.0.0", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "625483e56c643a7721014c76604d3353f44bd429", + "tarball": "http://registry.npmjs.org/ini/-/ini-1.3.0.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/ini/-/ini-1.3.0.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/ini/README.md nodejs-0.11.15/deps/npm/node_modules/ini/README.md --- nodejs-0.11.13/deps/npm/node_modules/ini/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ini/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,7 @@ An ini format parser and serializer for node. -Sections are treated as nested objects. Items before the first heading -are saved on the object directly. +Sections are treated as nested objects. Items before the first +heading are saved on the object directly. ## Usage @@ -34,40 +34,62 @@ delete config.paths.default.datadir config.paths.default.array.push('fourth value') - fs.writeFileSync('./config_modified.ini', ini.stringify(config, 'section')) + fs.writeFileSync('./config_modified.ini', ini.stringify(config, { section: 'section' })) -This will result in a file called `config_modified.ini` being written to the filesystem with the following content: +This will result in a file called `config_modified.ini` being written +to the filesystem with the following content: [section] - scope = local + scope=local [section.database] - user = dbuser - password = dbpassword - database = use_another_database + user=dbuser + password=dbpassword + database=use_another_database [section.paths.default] - tmpdir = /tmp - array[] = first value - array[] = second value - array[] = third value - array[] = fourth value + tmpdir=/tmp + array[]=first value + array[]=second value + array[]=third value + array[]=fourth value ## API ### decode(inistring) + Decode the ini-style formatted `inistring` into a nested object. ### parse(inistring) + Alias for `decode(inistring)` -### encode(object, [section]) -Encode the object `object` into an ini-style formatted string. If the optional parameter `section` is given, then all top-level properties of the object are put into this section and the `section`-string is prepended to all sub-sections, see the usage example above. +### encode(object, [options]) -### stringify(object, [section]) -Alias for `encode(object, [section])` +Encode the object `object` into an ini-style formatted string. If the +optional parameter `section` is given, then all top-level properties +of the object are put into this section and the `section`-string is +prepended to all sub-sections, see the usage example above. + +The `options` object may contain the following: + +* `section` A string which will be the first `section` in the encoded + ini data. Defaults to none. +* `whitespace` Boolean to specify whether to put whitespace around the + `=` character. By default, whitespace is omitted, to be friendly to + some persnickety old parsers that don't tolerate it well. But some + find that it's more human-readable and pretty with the whitespace. + +For backwards compatibility reasons, if a `string` options is passed +in, then it is assumed to be the `section` value. + +### stringify(object, [options]) + +Alias for `encode(object, [options])` ### safe(val) -Escapes the string `val` such that it is safe to be used as a key or value in an ini-file. Basically escapes quotes. For example + +Escapes the string `val` such that it is safe to be used as a key or +value in an ini-file. Basically escapes quotes. For example ini.safe('"unsafe string"') @@ -76,4 +98,5 @@ "\"unsafe string\"" ### unsafe(val) + Unescapes the string `val` diff -Nru nodejs-0.11.13/deps/npm/node_modules/ini/test/fixtures/foo.ini nodejs-0.11.15/deps/npm/node_modules/ini/test/fixtures/foo.ini --- nodejs-0.11.13/deps/npm/node_modules/ini/test/fixtures/foo.ini 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ini/test/fixtures/foo.ini 2015-01-20 21:22:17.000000000 +0000 @@ -8,6 +8,16 @@ ; wrap in quotes to get a key with a bracket, not a section. "[disturbing]" = hey you never know +; Test single quotes +s = 'something' + +; Test mixing quotes + +s1 = "something' + +; Test double quotes +s2 = "something else" + ; Test arrays zr[] = deedee ar[] = one @@ -45,3 +55,9 @@ ; this next one is not a comment! it's escaped! nocomment = this\; this is not a comment + +# Support the use of the number sign (#) as an alternative to the semicolon for indicating comments. +# http://en.wikipedia.org/wiki/INI_file#Comments + +# this next one is not a comment! it's escaped! +noHashComment = this\# this is not a comment diff -Nru nodejs-0.11.13/deps/npm/node_modules/ini/test/foo.js nodejs-0.11.15/deps/npm/node_modules/ini/test/foo.js --- nodejs-0.11.13/deps/npm/node_modules/ini/test/foo.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/ini/test/foo.js 2015-01-20 21:22:17.000000000 +0000 @@ -6,33 +6,40 @@ , fixture = path.resolve(__dirname, "./fixtures/foo.ini") , data = fs.readFileSync(fixture, "utf8") , d - , expectE = 'o = p\n' - + 'a with spaces = b c\n' - + '" xa n p " = "\\"\\r\\nyoyoyo\\r\\r\\n"\n' - + '"[disturbing]" = hey you never know\n' - + 'zr[] = deedee\n' - + 'ar[] = one\n' - + 'ar[] = three\n' - + 'ar[] = this is included\n' - + 'br = warm\n' + , expectE = 'o=p\n' + + 'a with spaces=b c\n' + + '" xa n p "="\\"\\r\\nyoyoyo\\r\\r\\n"\n' + + '"[disturbing]"=hey you never know\n' + + 's=something\n' + + 's1=\"something\'\n' + + 's2=something else\n' + + 'zr[]=deedee\n' + + 'ar[]=one\n' + + 'ar[]=three\n' + + 'ar[]=this is included\n' + + 'br=warm\n' + '\n' + '[a]\n' - + 'av = a val\n' - + 'e = { o: p, a: ' + + 'av=a val\n' + + 'e={ o: p, a: ' + '{ av: a val, b: { c: { e: "this [value]" ' - + '} } } }\nj = "\\"{ o: \\"p\\", a: { av:' + + '} } } }\nj="\\"{ o: \\"p\\", a: { av:' + ' \\"a val\\", b: { c: { e: \\"this [value]' - + '\\" } } } }\\""\n"[]" = a square?\n' - + 'cr[] = four\ncr[] = eight\n\n' - +'[a.b.c]\ne = 1\n' - + 'j = 2\n\n[x\\.y\\.z]\nx.y.z = xyz\n\n' - + '[x\\.y\\.z.a\\.b\\.c]\na.b.c = abc\n' - + 'nocomment = this\\; this is not a comment\n' + + '\\" } } } }\\""\n"[]"=a square?\n' + + 'cr[]=four\ncr[]=eight\n\n' + +'[a.b.c]\ne=1\n' + + 'j=2\n\n[x\\.y\\.z]\nx.y.z=xyz\n\n' + + '[x\\.y\\.z.a\\.b\\.c]\na.b.c=abc\n' + + 'nocomment=this\\; this is not a comment\n' + + 'noHashComment=this\\# this is not a comment\n' , expectD = { o: 'p', 'a with spaces': 'b c', " xa n p ":'"\r\nyoyoyo\r\r\n', '[disturbing]': 'hey you never know', + 's': 'something', + 's1' : '\"something\'', + 's2': 'something else', 'zr': ['deedee'], 'ar': ['one', 'three', 'this is included'], 'br': 'warm', @@ -47,10 +54,21 @@ 'x.y.z': 'xyz', 'a.b.c': { 'a.b.c': 'abc', - 'nocomment': 'this\; this is not a comment' + 'nocomment': 'this\; this is not a comment', + noHashComment: 'this\# this is not a comment' } } } + , expectF = '[prefix.log]\n' + + 'type=file\n\n' + + '[prefix.log.level]\n' + + 'label=debug\n' + + 'value=10\n' + , expectG = '[log]\n' + + 'type = file\n\n' + + '[log.level]\n' + + 'label = debug\n' + + 'value = 10\n' test("decode from file", function (t) { var d = i.decode(data) @@ -69,3 +87,19 @@ t.end() }) + +test("encode with option", function (t) { + var obj = {log: { type:'file', level: {label:'debug', value:10} } } + e = i.encode(obj, {section: 'prefix'}) + + t.equal(e, expectF) + t.end() +}) + +test("encode with whitespace", function (t) { + var obj = {log: { type:'file', level: {label:'debug', value:10} } } + e = i.encode(obj, {whitespace: true}) + + t.equal(e, expectG) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/init-package-json/default-input.js nodejs-0.11.15/deps/npm/node_modules/init-package-json/default-input.js --- nodejs-0.11.13/deps/npm/node_modules/init-package-json/default-input.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/init-package-json/default-input.js 2015-01-20 21:22:17.000000000 +0000 @@ -38,11 +38,14 @@ }) }} +var name = package.name || basename +exports.name = yes ? name : prompt('name', name) + +var version = package.version || config.get('init-version') || '1.0.0' +exports.version = yes ? version : prompt('version', version) -exports.name = prompt('name', package.name || basename) -exports.version = prompt('version', package.version || '0.0.0') if (!package.description) { - exports.description = prompt('description') + exports.description = yes ? '' : prompt('description') } if (!package.main) { @@ -63,7 +66,8 @@ else f = f[0] - return cb(null, prompt('entry point', f || 'index.js')) + var index = f || 'index.js' + return cb(null, yes ? index : prompt('entry point', index)) }) } } @@ -121,26 +125,32 @@ function tx (test) { return test || notest } - if (!s.test || s.test === notest) { - if (d.indexOf('tap') !== -1) - s.test = prompt('test command', 'tap test/*.js', tx) - else if (d.indexOf('expresso') !== -1) - s.test = prompt('test command', 'expresso test', tx) - else if (d.indexOf('mocha') !== -1) - s.test = prompt('test command', 'mocha', tx) - else - s.test = prompt('test command', tx) + var commands = { + 'tap':'tap test/*.js' + , 'expresso':'expresso test' + , 'mocha':'mocha' + } + var command + Object.keys(commands).forEach(function (k) { + if (d.indexOf(k) !== -1) command = commands[k] + }) + var ps = 'test command' + if (yes) { + s.test = command || notest + } else { + s.test = command ? prompt(ps, command, tx) : prompt(ps, tx) + } } - return cb(null, s) } if (!package.repository) { exports.repository = function (cb) { fs.readFile('.git/config', 'utf8', function (er, gconf) { - if (er || !gconf) return cb(null, prompt('git repository')) - + if (er || !gconf) { + return cb(null, yes ? '' : prompt('git repository')) + } gconf = gconf.split(/\r?\n/) var i = gconf.indexOf('[remote "origin"]') if (i !== -1) { @@ -150,15 +160,15 @@ else u = u.replace(/^\s*url = /, '') } if (u && u.match(/^git@github.com:/)) - u = u.replace(/^git@github.com:/, 'git://github.com/') + u = u.replace(/^git@github.com:/, 'https://github.com/') - return cb(null, prompt('git repository', u)) + return cb(null, yes ? u : prompt('git repository', u)) }) } } if (!package.keywords) { - exports.keywords = prompt('keywords', function (s) { + exports.keywords = yes ? '' : prompt('keywords', function (s) { if (!s) return undefined if (Array.isArray(s)) s = s.join(' ') if (typeof s !== 'string') return s @@ -167,15 +177,14 @@ } if (!package.author) { - exports.author = config.get('init.author.name') + exports.author = config.get('init-author-name') ? { - "name" : config.get('init.author.name'), - "email" : config.get('init.author.email'), - "url" : config.get('init.author.url') + "name" : config.get('init-author-name'), + "email" : config.get('init-author-email'), + "url" : config.get('init-author-url') } : prompt('author') } -exports.license = prompt('license', package.license || - config.get('init.license') || - 'ISC') +var license = package.license || config.get('init-license') || 'ISC' +exports.license = yes ? license : prompt('license', license) diff -Nru nodejs-0.11.13/deps/npm/node_modules/init-package-json/init-package-json.js nodejs-0.11.15/deps/npm/node_modules/init-package-json/init-package-json.js --- nodejs-0.11.13/deps/npm/node_modules/init-package-json/init-package-json.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/init-package-json/init-package-json.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,6 @@ module.exports = init +module.exports.yes = yes var PZ = require('promzard').PromZard var path = require('path') @@ -14,6 +15,13 @@ // readJson.extras(file, data, cb) var readJson = require('read-package-json') +function yes (conf) { + return !!( + conf.get('yes') || conf.get('y') || + conf.get('force') || conf.get('f') + ) +} + function init (dir, input, config, cb) { if (typeof config === 'function') cb = config, config = {} @@ -35,7 +43,7 @@ var package = path.resolve(dir, 'package.json') input = path.resolve(input) var pkg - var ctx = {} + var ctx = { yes: yes(config) } var es = readJson.extraSet readJson.extraSet = es.filter(function (fn) { @@ -91,14 +99,21 @@ delete pkg.repository var d = JSON.stringify(pkg, null, 2) + '\n' + function write (yes) { + fs.writeFile(package, d, 'utf8', function (er) { + if (!er && yes) console.log('Wrote to %s:\n\n%s\n', package, d) + return cb(er, pkg) + }) + } + if (ctx.yes) { + return write(true) + } console.log('About to write to %s:\n\n%s\n', package, d) read({prompt:'Is this ok? ', default: 'yes'}, function (er, ok) { if (!ok || ok.toLowerCase().charAt(0) !== 'y') { console.log('Aborted.') } else { - fs.writeFile(package, d, 'utf8', function (er) { - return cb(er, pkg) - }) + return write() } }) }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/init-package-json/node_modules/promzard/package.json nodejs-0.11.15/deps/npm/node_modules/init-package-json/node_modules/promzard/package.json --- nodejs-0.11.13/deps/npm/node_modules/init-package-json/node_modules/promzard/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/init-package-json/node_modules/promzard/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "promzard", "description": "prompting wizardly", - "version": "0.2.1", + "version": "0.2.2", "repository": { "url": "git://github.com/isaacs/promzard" }, @@ -21,14 +21,29 @@ "test": "tap test/*.js" }, "license": "ISC", - "readme": "# promzard\n\nA prompting wizard for building files from specialized PromZard modules.\nUsed by `npm init`.\n\nA reimplementation of @SubStack's\n[prompter](https://github.com/substack/node-prompter), which does not\nuse AST traversal.\n\nFrom another point of view, it's a reimplementation of\n[@Marak](https://github.com/marak)'s\n[wizard](https://github.com/Marak/wizard) which doesn't use schemas.\n\nThe goal is a nice drop-in enhancement for `npm init`.\n\n## Usage\n\n```javascript\nvar promzard = require('promzard')\npromzard(inputFile, optionalContextAdditions, function (er, data) {\n // .. you know what you doing ..\n})\n```\n\nIn the `inputFile` you can have something like this:\n\n```javascript\nvar fs = require('fs')\nmodule.exports = {\n \"greeting\": prompt(\"Who shall you greet?\", \"world\", function (who) {\n return \"Hello, \" + who\n }),\n \"filename\": __filename,\n \"directory\": function (cb) {\n fs.readdir(__dirname, cb)\n }\n}\n```\n\nWhen run, promzard will display the prompts and resolve the async\nfunctions in order, and then either give you an error, or the resolved\ndata, ready to be dropped into a JSON file or some other place.\n\n\n### promzard(inputFile, ctx, callback)\n\nThe inputFile is just a node module. You can require() things, set\nmodule.exports, etc. Whatever that module exports is the result, and it\nis walked over to call any functions as described below.\n\nThe only caveat is that you must give PromZard the full absolute path\nto the module (you can get this via Node's `require.resolve`.) Also,\nthe `prompt` function is injected into the context object, so watch out.\n\nWhatever you put in that `ctx` will of course also be available in the\nmodule. You can get quite fancy with this, passing in existing configs\nand so on.\n\n### Class: promzard.PromZard(file, ctx)\n\nJust like the `promzard` function, but the EventEmitter that makes it\nall happen. Emits either a `data` event with the data, or a `error`\nevent if it blows up.\n\nIf `error` is emitted, then `data` never will be.\n\n### prompt(...)\n\nIn the promzard input module, you can call the `prompt` function.\nThis prompts the user to input some data. The arguments are interpreted\nbased on type:\n\n1. `string` The first string encountered is the prompt. The second is\n the default value.\n2. `function` A transformer function which receives the data and returns\n something else. More than meets the eye.\n3. `object` The `prompt` member is the prompt, the `default` member is\n the default value, and the `transform` is the transformer.\n\nWhatever the final value is, that's what will be put on the resulting\nobject.\n\n### Functions\n\nIf there are any functions on the promzard input module's exports, then\npromzard will call each of them with a callback. This way, your module\ncan do asynchronous actions if necessary to validate or ascertain\nwhatever needs verification.\n\nThe functions are called in the context of the ctx object, and are given\na single argument, which is a callback that should be called with either\nan error, or the result to assign to that spot.\n\nIn the async function, you can also call prompt() and return the result\nof the prompt in the callback.\n\nFor example, this works fine in a promzard module:\n\n```\nexports.asyncPrompt = function (cb) {\n fs.stat(someFile, function (er, st) {\n // if there's an error, no prompt, just error\n // otherwise prompt and use the actual file size as the default\n cb(er, prompt('file size', st.size))\n })\n}\n```\n\nYou can also return other async functions in the async function\ncallback. Though that's a bit silly, it could be a handy way to reuse\nfunctionality in some cases.\n\n### Sync vs Async\n\nThe `prompt()` function is not synchronous, though it appears that way.\nIt just returns a token that is swapped out when the data object is\nwalked over asynchronously later, and returns a token.\n\nFor that reason, prompt() calls whose results don't end up on the data\nobject are never shown to the user. For example, this will only prompt\nonce:\n\n```\nexports.promptThreeTimes = prompt('prompt me once', 'shame on you')\nexports.promptThreeTimes = prompt('prompt me twice', 'um....')\nexports.promptThreeTimes = prompt('you cant prompt me again')\n```\n\n### Isn't this exactly the sort of 'looks sync' that you said was bad about other libraries?\n\nYeah, sorta. I wouldn't use promzard for anything more complicated than\na wizard that spits out prompts to set up a config file or something.\nMaybe there are other use cases I haven't considered.\n", - "readmeFilename": "README.md", "bugs": { "url": "https://github.com/isaacs/promzard/issues" }, "homepage": "https://github.com/isaacs/promzard", - "_id": "promzard@0.2.1", - "_shasum": "c4c7cbe5182465c13b43540be9daf47098b4e75b", - "_from": "promzard@~0.2.0", - "_resolved": "https://registry.npmjs.org/promzard/-/promzard-0.2.1.tgz" + "_id": "promzard@0.2.2", + "_shasum": "918b9f2b29458cb001781a8856502e4a79b016e0", + "_from": "promzard@>=0.2.0 <0.3.0", + "_npmVersion": "1.4.10", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "918b9f2b29458cb001781a8856502e4a79b016e0", + "tarball": "http://registry.npmjs.org/promzard/-/promzard-0.2.2.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/promzard/-/promzard-0.2.2.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/init-package-json/node_modules/promzard/promzard.js nodejs-0.11.15/deps/npm/node_modules/init-package-json/node_modules/promzard/promzard.js --- nodejs-0.11.13/deps/npm/node_modules/init-package-json/node_modules/promzard/promzard.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/init-package-json/node_modules/promzard/promzard.js 2015-01-20 21:22:17.000000000 +0000 @@ -121,7 +121,7 @@ else if (a && typeof a === 'object') { p = a.prompt || p d = a.default || d - t = a.tranform || t + t = a.transform || t } } diff -Nru nodejs-0.11.13/deps/npm/node_modules/init-package-json/node_modules/promzard/test/basic.js nodejs-0.11.15/deps/npm/node_modules/init-package-json/node_modules/promzard/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/init-package-json/node_modules/promzard/test/basic.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/init-package-json/node_modules/promzard/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -70,10 +70,18 @@ } if (output.match(/keywords: $/)) { c.stdin.write('fugazi function waiting room\n') + // "read" module is weird on node >= 0.10 when not a TTY + // requires explicit ending for reasons. + // could dig in, but really just wanna make tests pass, whatever. + c.stdin.end() return } } + c.on('exit', function () { + console.error('exit event') + }) + c.on('close', function () { console.error('actual', actual) actual = JSON.parse(actual) diff -Nru nodejs-0.11.13/deps/npm/node_modules/init-package-json/node_modules/promzard/test/exports.js nodejs-0.11.15/deps/npm/node_modules/init-package-json/node_modules/promzard/test/exports.js --- nodejs-0.11.13/deps/npm/node_modules/init-package-json/node_modules/promzard/test/exports.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/init-package-json/node_modules/promzard/test/exports.js 2015-01-20 21:22:17.000000000 +0000 @@ -20,7 +20,7 @@ child.stdin.write('\n'); }, 100) setTimeout(function () { - child.stdin.write('55\n'); + child.stdin.end('55\n'); }, 200) child.on('close', function () { diff -Nru nodejs-0.11.13/deps/npm/node_modules/init-package-json/node_modules/promzard/test/fn.js nodejs-0.11.15/deps/npm/node_modules/init-package-json/node_modules/promzard/test/fn.js --- nodejs-0.11.13/deps/npm/node_modules/init-package-json/node_modules/promzard/test/fn.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/init-package-json/node_modules/promzard/test/fn.js 2015-01-20 21:22:17.000000000 +0000 @@ -43,7 +43,7 @@ child.stdin.write('55\n') }, 150) setTimeout(function () { - child.stdin.write('async prompt\n') + child.stdin.end('async prompt\n') }, 200) }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/init-package-json/.npmignore nodejs-0.11.15/deps/npm/node_modules/init-package-json/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/init-package-json/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/init-package-json/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2 @@ +node_modules/ +.eslintrc diff -Nru nodejs-0.11.13/deps/npm/node_modules/init-package-json/package.json nodejs-0.11.15/deps/npm/node_modules/init-package-json/package.json --- nodejs-0.11.13/deps/npm/node_modules/init-package-json/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/init-package-json/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "init-package-json", - "version": "0.0.16", + "version": "1.1.1", "main": "init-package-json.js", "scripts": { "test": "tap test/*.js" @@ -17,15 +17,16 @@ "license": "ISC", "description": "A node module to get your node module started", "dependencies": { + "glob": "^4.0.2", "promzard": "~0.2.0", "read": "~1.0.1", "read-package-json": "1", - "semver": "2.x", - "glob": "~3.2.7" + "semver": "2.x || 3.x || 4" }, "devDependencies": { - "tap": "~0.2.5", - "rimraf": "~2.0.2" + "npm": "^2.1.4", + "rimraf": "^2.1.4", + "tap": "^0.4.13" }, "keywords": [ "init", @@ -39,11 +40,12 @@ ], "readme": "# init-package-json\n\nA node module to get your node module started.\n\n## Usage\n\n```javascript\nvar init = require('init-package-json')\nvar path = require('path')\n\n// a path to a promzard module. In the event that this file is\n// not found, one will be provided for you.\nvar initFile = path.resolve(process.env.HOME, '.npm-init')\n\n// the dir where we're doin stuff.\nvar dir = process.cwd()\n\n// extra stuff that gets put into the PromZard module's context.\n// In npm, this is the resolved config object. Exposed as 'config'\n// Optional.\nvar configData = { some: 'extra stuff' }\n\n// Any existing stuff from the package.json file is also exposed in the\n// PromZard module as the `package` object. There will also be free\n// vars for:\n// * `filename` path to the package.json file\n// * `basename` the tip of the package dir\n// * `dirname` the parent of the package dir\n\ninit(dir, initFile, configData, function (er, data) {\n // the data's already been written to {dir}/package.json\n // now you can do stuff with it\n})\n```\n\nOr from the command line:\n\n```\n$ npm-init\n```\n\nSee [PromZard](https://github.com/isaacs/promzard) for details about\nwhat can go in the config file.\n", "readmeFilename": "README.md", + "gitHead": "a4df4e57f9b6a2bf906ad50612dbed7dcb2f2c2b", "bugs": { "url": "https://github.com/isaacs/init-package-json/issues" }, "homepage": "https://github.com/isaacs/init-package-json", - "_id": "init-package-json@0.0.16", - "_shasum": "f7bb96fcb0a2c8061d15a2c3180323b17a65aa16", - "_from": "init-package-json@latest" + "_id": "init-package-json@1.1.1", + "_shasum": "e09e9f1fb541e0fddc9175c5ce1736fd45ff4bf8", + "_from": "init-package-json@>=1.1.1 <2.0.0" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/init-package-json/test/npm-defaults.js nodejs-0.11.15/deps/npm/node_modules/init-package-json/test/npm-defaults.js --- nodejs-0.11.13/deps/npm/node_modules/init-package-json/test/npm-defaults.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/init-package-json/test/npm-defaults.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,49 @@ +var test = require("tap").test +var rimraf = require("rimraf") +var resolve = require("path").resolve + +var npm = require("npm") +var init = require("../") + +var EXPECTED = { + name : "test", + version : "3.1.4", + description : "", + main : "basic.js", + scripts : { + test : 'echo "Error: no test specified" && exit 1' + }, + keywords : [], + author : "npmbot (http://npm.im)", + license : "WTFPL" +} + +test("npm configuration values pulled from environment", function (t) { + /*eslint camelcase:0 */ + process.env.npm_config_yes = "yes" + + process.env.npm_config_init_author_name = "npmbot" + process.env.npm_config_init_author_email = "n@p.m" + process.env.npm_config_init_author_url = "http://npm.im" + + process.env.npm_config_init_license = EXPECTED.license + process.env.npm_config_init_version = EXPECTED.version + + npm.load({}, function (err) { + t.ifError(err, "npm loaded successfully") + + process.chdir(resolve(__dirname)) + init(__dirname, __dirname, npm.config, function (er, data) { + t.ifError(err, "init ran successfully") + + t.same(data, EXPECTED, "got the package data from the environment") + t.end() + }) + }) +}) + +test("cleanup", function (t) { + rimraf.sync(resolve(__dirname, "package.json")) + t.pass("cleaned up") + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/lockfile/lockfile.js nodejs-0.11.15/deps/npm/node_modules/lockfile/lockfile.js --- nodejs-0.11.13/deps/npm/node_modules/lockfile/lockfile.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/lockfile/lockfile.js 2015-01-20 21:22:17.000000000 +0000 @@ -7,9 +7,9 @@ } var os = require('os') -var filetime = 'ctime' +exports.filetime = 'ctime' if (os.platform() == "win32") { - filetime = 'mtime' + exports.filetime = 'mtime' } var debug @@ -92,7 +92,7 @@ }) fs.close(fd, function (er) { - var age = Date.now() - st[filetime].getTime() + var age = Date.now() - st[exports.filetime].getTime() return cb(er, age <= opts.stale) }) }) @@ -125,29 +125,39 @@ } finally { fs.closeSync(fd) } - var age = Date.now() - st[filetime].getTime() + var age = Date.now() - st[exports.filetime].getTime() return (age <= opts.stale) } } -var req = 0 +var req = 1 exports.lock = function (path, opts, cb) { if (typeof opts === 'function') cb = opts, opts = {} opts.req = opts.req || req++ debug('lock', path, opts) + opts.start = opts.start || Date.now() if (typeof opts.retries === 'number' && opts.retries > 0) { - cb = (function (orig) { return function (er, fd) { - if (!er) return orig(er, fd) - var newRT = opts.retries - 1 - opts_ = Object.create(opts, { retries: { value: newRT }}) - debug('lock retry', path, newRT) - if (opts.retryWait) setTimeout(function() { - exports.lock(path, opts_, orig) - }, opts.retryWait) - else exports.lock(path, opts_, orig) + debug('has retries', opts.retries) + var retries = opts.retries + opts.retries = 0 + cb = (function (orig) { return function cb (er, fd) { + debug('retry-mutated callback') + retries -= 1 + if (!er || retries < 0) return orig(er, fd) + + debug('lock retry', path, opts) + + if (opts.retryWait) setTimeout(retry, opts.retryWait) + else retry() + + function retry () { + opts.start = Date.now() + debug('retrying', opts.start) + exports.lock(path, opts, cb) + } }})(cb) } @@ -167,30 +177,57 @@ if (er.code !== 'EEXIST') return cb(er) // someone's got this one. see if it's valid. - if (opts.stale) fs.stat(path, function (statEr, st) { - if (statEr) { - if (statEr.code === 'ENOENT') { - // expired already! - var opts_ = Object.create(opts, { stale: { value: false }}) - debug('lock stale enoent retry', path, opts_) - exports.lock(path, opts_, cb) - return - } - return cb(statEr) + if (!opts.stale) return notStale(er, path, opts, cb) + + return maybeStale(er, path, opts, false, cb) + }) +} + + +// Staleness checking algorithm +// 1. acquire $lock, fail +// 2. stat $lock, find that it is stale +// 3. acquire $lock.STALE +// 4. stat $lock, assert that it is still stale +// 5. unlink $lock +// 6. link $lock.STALE $lock +// 7. unlink $lock.STALE +// On any failure, clean up whatever we've done, and raise the error. +function maybeStale (originalEr, path, opts, hasStaleLock, cb) { + fs.stat(path, function (statEr, st) { + if (statEr) { + if (statEr.code === 'ENOENT') { + // expired already! + opts.stale = false + debug('lock stale enoent retry', path, opts) + exports.lock(path, opts, cb) + return } + return cb(statEr) + } - var age = Date.now() - st[filetime].getTime() - if (age > opts.stale) { - debug('lock stale', path, opts_) - exports.unlock(path, function (er) { - if (er) return cb(er) - var opts_ = Object.create(opts, { stale: { value: false }}) - debug('lock stale retry', path, opts_) - exports.lock(path, opts_, cb) + var age = Date.now() - st[exports.filetime].getTime() + if (age <= opts.stale) return notStale(originalEr, path, opts, cb) + + debug('lock stale', path, opts) + if (hasStaleLock) { + exports.unlock(path, function (er) { + if (er) return cb(er) + debug('lock stale retry', path, opts) + fs.link(path + '.STALE', path, function (er) { + fs.unlink(path + '.STALE', function () { + // best effort. if the unlink fails, oh well. + cb(er) + }) }) - } else notStale(er, path, opts, cb) - }) - else notStale(er, path, opts, cb) + }) + } else { + debug('acquire .STALE file lock', opts) + exports.lock(path + '.STALE', opts, function (er) { + if (er) return cb(er) + maybeStale(originalEr, path, opts, true, cb) + }) + } }) } @@ -201,20 +238,22 @@ if (typeof opts.wait !== 'number' || opts.wait <= 0) return cb(er) - // console.error('wait', path, opts.wait) - // wait for some ms for the lock to clear - var start = Date.now() + // poll for some ms for the lock to clear + var now = Date.now() + var start = opts.start || now var end = start + opts.wait - function retry () { - debug('notStale retry', path, opts) - var now = Date.now() - var newWait = end - now - var newOpts = Object.create(opts, { wait: { value: newWait }}) - exports.lock(path, newOpts, cb) - } + if (end <= now) + return cb(er) - var timer = setTimeout(retry, 100) + debug('now=%d, wait until %d (delta=%d)', start, end, end-start) + var wait = Math.min(end - start, opts.pollPeriod || 100) + var timer = setTimeout(poll, wait) + + function poll () { + debug('notStale, polling', path, opts) + exports.lock(path, opts, cb) + } } exports.lockSync = function (path, opts) { @@ -236,7 +275,7 @@ if (opts.stale) { var st = fs.statSync(path) - var ct = st[filetime].getTime() + var ct = st[exports.filetime].getTime() if (!(ct % 1000) && (opts.stale % 1000)) { // probably don't have subsecond resolution. // round up the staleness indicator. @@ -264,8 +303,8 @@ if (typeof opts.retries === 'number' && opts.retries > 0) { var newRT = opts.retries - 1 debug('retryThrow', path, opts, newRT) - var opts_ = Object.create(opts, { retries: { value: newRT }}) - return exports.lockSync(path, opts_) + opts.retries = newRT + return exports.lockSync(path, opts) } throw er } diff -Nru nodejs-0.11.13/deps/npm/node_modules/lockfile/package.json nodejs-0.11.15/deps/npm/node_modules/lockfile/package.json --- nodejs-0.11.13/deps/npm/node_modules/lockfile/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/lockfile/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "lockfile", - "version": "0.4.2", + "version": "1.0.0", "main": "lockfile.js", "directories": { "test": "test" @@ -31,15 +31,33 @@ }, "license": "BSD", "description": "A very polite lock file utility, which endeavors to not litter, and to wait patiently for others.", - "readme": "# lockfile\n\nA very polite lock file utility, which endeavors to not litter, and to\nwait patiently for others.\n\n## Usage\n\n```javascript\nvar lockFile = require('lockfile')\n\n// opts is optional, and defaults to {}\nlockFile.lock('some-file.lock', opts, function (er) {\n // if the er happens, then it failed to acquire a lock.\n // if there was not an error, then the file was created,\n // and won't be deleted until we unlock it.\n\n // do my stuff, free of interruptions\n // then, some time later, do:\n lockFile.unlock('some-file.lock', function (er) {\n // er means that an error happened, and is probably bad.\n })\n})\n```\n\n## Methods\n\nSync methods return the value/throw the error, others don't. Standard\nnode fs stuff.\n\nAll known locks are removed when the process exits. Of course, it's\npossible for certain types of failures to cause this to fail, but a best\neffort is made to not be a litterbug.\n\n### lockFile.lock(path, [opts], cb)\n\nAcquire a file lock on the specified path\n\n### lockFile.lockSync(path, [opts])\n\nAcquire a file lock on the specified path\n\n### lockFile.unlock(path, cb)\n\nClose and unlink the lockfile.\n\n### lockFile.unlockSync(path)\n\nClose and unlink the lockfile.\n\n### lockFile.check(path, [opts], cb)\n\nCheck if the lockfile is locked and not stale.\n\nReturns boolean.\n\n### lockFile.checkSync(path, [opts], cb)\n\nCheck if the lockfile is locked and not stale.\n\nCallback is called with `cb(error, isLocked)`.\n\n## Options\n\n### opts.wait\n\nA number of milliseconds to wait for locks to expire before giving up.\nOnly used by lockFile.lock. Relies on fs.watch. If the lock is not\ncleared by the time the wait expires, then it returns with the original\nerror.\n\n### opts.stale\n\nA number of milliseconds before locks are considered to have expired.\n\n### opts.retries\n\nUsed by lock and lockSync. Retry `n` number of times before giving up.\n\n### opts.retryWait\n\nUsed by lock. Wait `n` milliseconds before retrying.\n", - "readmeFilename": "README.md", + "gitHead": "9590c6f02521eb1bb154ddc3ca9a7e84ce770c45", "bugs": { "url": "https://github.com/isaacs/lockfile/issues" }, - "_id": "lockfile@0.4.2", + "homepage": "https://github.com/isaacs/lockfile", + "_id": "lockfile@1.0.0", + "_shasum": "b3a7609dda6012060083bacb0ab0ecbca58e9203", + "_from": "lockfile@1.0.0", + "_npmVersion": "1.4.23", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "trevorburnham", + "email": "trevorburnham@gmail.com" + }, + { + "name": "isaacs", + "email": "i@izs.me" + } + ], "dist": { - "shasum": "ab91f5d3745bc005ae4fa34d078910d1f2b9612d" + "shasum": "b3a7609dda6012060083bacb0ab0ecbca58e9203", + "tarball": "http://registry.npmjs.org/lockfile/-/lockfile-1.0.0.tgz" }, - "_from": "lockfile@0.4.2", - "_resolved": "https://registry.npmjs.org/lockfile/-/lockfile-0.4.2.tgz" + "_resolved": "https://registry.npmjs.org/lockfile/-/lockfile-1.0.0.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/lockfile/README.md nodejs-0.11.15/deps/npm/node_modules/lockfile/README.md --- nodejs-0.11.13/deps/npm/node_modules/lockfile/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/lockfile/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -64,9 +64,14 @@ ### opts.wait A number of milliseconds to wait for locks to expire before giving up. -Only used by lockFile.lock. Relies on fs.watch. If the lock is not -cleared by the time the wait expires, then it returns with the original -error. +Only used by lockFile.lock. Poll for `opts.wait` ms. If the lock is +not cleared by the time the wait expires, then it returns with the +original error. + +### opts.pollPeriod + +When using `opts.wait`, this is the period in ms in which it polls to +check if the lock has expired. Defaults to `100`. ### opts.stale diff -Nru nodejs-0.11.13/deps/npm/node_modules/lockfile/test/basic.js nodejs-0.11.15/deps/npm/node_modules/lockfile/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/lockfile/test/basic.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/lockfile/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -4,6 +4,11 @@ var fs = require('fs') var touch = require('touch') +// On Unix systems, it uses ctime by default for staleness checks, since it's +// the most reliable. However, because this test artificially sets some locks +// to an earlier time to simulate staleness, we use mtime here. +lockFile.filetime = 'mtime' + test('setup', function (t) { try { lockFile.unlockSync('basic-lock') } catch (er) {} try { lockFile.unlockSync('sync-lock') } catch (er) {} @@ -127,36 +132,35 @@ lockFile.lock('stale-lock', function (er) { if (er) throw er + // simulate 2s old + touch.sync('stale-lock', { time: new Date(Date.now() - 2000) }) + var opts = { stale: 1 } - setTimeout(next, 1000) - function next () { - lockFile.check('stale-lock', opts, function (er, locked) { + lockFile.check('stale-lock', opts, function (er, locked) { + if (er) throw er + t.notOk(locked) + lockFile.lock('stale-lock', opts, function (er) { if (er) throw er - t.notOk(locked) - lockFile.lock('stale-lock', opts, function (er) { + lockFile.unlock('stale-lock', function (er) { if (er) throw er - lockFile.unlock('stale-lock', function (er) { - if (er) throw er - t.end() - }) + t.end() }) }) - } + }) }) }) test('staleness sync test', function (t) { var opts = { stale: 1 } lockFile.lockSync('stale-lock') - setTimeout(next, 1000) - function next () { - var locked - locked = lockFile.checkSync('stale-lock', opts) - t.notOk(locked) - lockFile.lockSync('stale-lock', opts) - lockFile.unlockSync('stale-lock') - t.end() - } + // simulate 2s old + touch.sync('stale-lock', { time: new Date(Date.now() - 2000) }) + var locked + locked = lockFile.checkSync('stale-lock', opts) + t.notOk(locked) + lockFile.lockSync('stale-lock', opts) + lockFile.unlockSync('stale-lock') + t.end() }) test('retries', function (t) { @@ -238,7 +242,7 @@ }, 10) // try to get another lock. this must fail! - var opt = { stale: 1000, wait: 2000 } + var opt = { stale: 1000, wait: 2000, pollInterval: 1000 } lockFile.lock('stale-wait-lock', opt, function (er) { if (!er) t.fail('got second lock? that unpossible!') @@ -256,21 +260,20 @@ // nt file system tunneling feature will make file creation time not updated var opts = { stale: 1000 } lockFile.lockSync('stale-windows-lock') - setTimeout(next, 2000) - function next () { - var locked - lockFile.unlockSync('stale-windows-lock') - lockFile.lockSync('stale-windows-lock', opts) - locked = lockFile.checkSync('stale-windows-lock', opts) - t.ok(locked, "should be locked and not stale") - lockFile.lock('stale-windows-lock', opts, function (er) { - if (!er) - t.fail('got second lock? impossible, windows file tunneling problem!') - else - t.pass('second lock failed, windows file tunneling problem fixed') - t.end() - }) - } + touch.sync('stale-windows-lock', { time: new Date(Date.now() - 3000) }) + + var locked + lockFile.unlockSync('stale-windows-lock') + lockFile.lockSync('stale-windows-lock', opts) + locked = lockFile.checkSync('stale-windows-lock', opts) + t.ok(locked, "should be locked and not stale") + lockFile.lock('stale-windows-lock', opts, function (er) { + if (!er) + t.fail('got second lock? impossible, windows file tunneling problem!') + else + t.pass('second lock failed, windows file tunneling problem fixed') + t.end() + }) }) @@ -283,7 +286,7 @@ try { lockFile.unlockSync('retry-lock') } catch (er) {} try { lockFile.unlockSync('contentious-lock') } catch (er) {} try { lockFile.unlockSync('stale-wait-lock') } catch (er) {} - try { lockFile.unlockSync('stale-windows-lock') } catch (er) {} + try { lockFile.unlockSync('stale-windows-lock') } catch (er) {} t.end() }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/lockfile/test/retry-time.js nodejs-0.11.15/deps/npm/node_modules/lockfile/test/retry-time.js --- nodejs-0.11.13/deps/npm/node_modules/lockfile/test/retry-time.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/lockfile/test/retry-time.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,62 @@ +// In these tests, we do the following: +// try for 200ms (rt=2) +// wait for 300ms +// try for 200ms (rt=1) +// wait for 300ms +// try for 200ms (rt=0) +// fail after 1200 +// Actual time will be more like 1220-ish for setTimeout irregularity +// But it should NOT be as slow as 2000. + +var lockFile = require('../') +var touch = require('touch') +var test = require('tap').test +var fs = require('fs') + +var RETRYWAIT = 100 +var WAIT = 100 +var RETRIES = 2 +var EXPECTTIME = (RETRYWAIT * RETRIES) + (WAIT * (RETRIES + 1)) +var TOOLONG = EXPECTTIME * 1.1 + +test('setup', function (t) { + touch.sync('file.lock') + t.end() +}) + +var pollPeriods = [10, 100, 10000] +pollPeriods.forEach(function (pp) { + test('retry+wait, poll=' + pp, function (t) { + var ended = false + var timer = setTimeout(function() { + t.fail('taking too long!') + ended = true + t.end() + }, 2000) + timer.unref() + + var start = Date.now() + lockFile.lock('file.lock', { + wait: WAIT, + retries: RETRIES, + retryWait: RETRYWAIT, + pollPeriod: pp + }, function (er) { + if (ended) return + var time = Date.now() - start + console.error('t=%d', time) + t.ok(time >= EXPECTTIME, 'should take at least ' + EXPECTTIME) + t.ok(time < TOOLONG, 'should take less than ' + TOOLONG) + clearTimeout(timer) + t.end() + }) + }) +}) + +test('cleanup', function (t) { + fs.unlinkSync('file.lock') + t.end() + setTimeout(function() { + process.exit(1) + }, 500).unref() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/lockfile/test/stale-contention.js nodejs-0.11.15/deps/npm/node_modules/lockfile/test/stale-contention.js --- nodejs-0.11.13/deps/npm/node_modules/lockfile/test/stale-contention.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/lockfile/test/stale-contention.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,85 @@ +var fs = require('fs') +var lockFile = require('../') +var test = require('tap').test +var path = require('path') +var lock = path.resolve(__dirname, 'stale.lock') +var touch = require('touch') +var spawn = require('child_process').spawn +var node = process.execPath + +// We're using a lockfile with an artificially old date, +// so make it use that instead of ctime. +// Probably you should never do this in production! +lockFile.filetime = 'mtime' + +if (process.argv[2] === 'child') { + return child() +} + +function child () { + // Make fs.stat take 100ms to return its data + // This is important because, in a test scenario where + // we're statting the same exact file rapid-fire like this, + // it'll end up being cached by the FS, and never trigger + // the race condition we're trying to expose. + fs.stat = function (stat) { return function () { + var args = [].slice.call(arguments) + var cb = args.pop() + stat.apply(fs, args.concat(function(er, st) { + setTimeout(function () { + cb(er, st) + }, 100) + })) + }}(fs.stat) + + lockFile.lock(lock, { stale: 100000 }, function (er) { + if (er && er.code !== 'EEXIST') + throw er + else if (er) + process.exit(17) + else + setTimeout(function(){}, 500) + }) +} + +test('create stale file', function (t) { + try { fs.unlinkSync(lock) } catch (er) {} + touch.sync(lock, { time: '1979-07-01T19:10:00.000Z' }) + t.end() +}) + +test('contenders', function (t) { + var n = 10 + var fails = 0 + var wins = 0 + var args = [ __filename, 'child' ] + var opt = { stdio: [0, "pipe", 2] } + for (var i = 0; i < n; i++) { + spawn(node, args, opt).on('close', then) + } + + function then (code) { + if (code === 17) { + fails ++ + } else if (code) { + t.fail("unexpected failure", code) + fails ++ + } else { + wins ++ + } + if (fails + wins === n) { + done() + } + } + + function done () { + t.equal(wins, 1, "should have 1 lock winner") + t.equal(fails, n - 1, "all others should lose") + t.end() + } +}) + +test('remove stale file', function (t) { + try { fs.unlinkSync(lock) } catch (er) {} + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/minimatch/minimatch.js nodejs-0.11.15/deps/npm/node_modules/minimatch/minimatch.js --- nodejs-0.11.13/deps/npm/node_modules/minimatch/minimatch.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/minimatch/minimatch.js 2015-01-20 21:22:17.000000000 +0000 @@ -260,6 +260,13 @@ } Minimatch.prototype.braceExpand = braceExpand + +function pad(n, width, z) { + z = z || '0'; + n = n + ''; + return n.length >= width ? n : new Array(width - n.length + 1).join(z) + n; +} + function braceExpand (pattern, options) { options = options || this.options pattern = typeof pattern === "undefined" @@ -332,13 +339,18 @@ this.debug("numset", numset[1], numset[2]) var suf = braceExpand.call(this, pattern.substr(numset[0].length), options) , start = +numset[1] + , needPadding = numset[1][0] === '0' + , startWidth = numset[1].length + , padded , end = +numset[2] , inc = start > end ? -1 : 1 , set = [] + for (var i = start; i != (end + inc); i += inc) { + padded = needPadding ? pad(i, startWidth) : i + '' // append all the suffixes for (var ii = 0, ll = suf.length; ii < ll; ii ++) { - set.push(i + suf[ii]) + set.push(padded + suf[ii]) } } return set @@ -813,11 +825,12 @@ } minimatch.match = function (list, pattern, options) { + options = options || {} var mm = new Minimatch(pattern, options) list = list.filter(function (f) { return mm.match(f) }) - if (options.nonull && !list.length) { + if (mm.options.nonull && !list.length) { list.push(pattern) } return list @@ -853,12 +866,17 @@ var set = this.set this.debug(this.pattern, "set", set) - var splitFile = path.basename(f.join("/")).split("/") + // Find the basename of the path by looking for the last non-empty segment + var filename; + for (var i = f.length - 1; i >= 0; i--) { + filename = f[i] + if (filename) break + } for (var i = 0, l = set.length; i < l; i ++) { var pattern = set[i], file = f if (options.matchBase && pattern.length === 1) { - file = splitFile + file = [filename] } var hit = this.matchOne(file, pattern, partial) if (hit) { @@ -975,7 +993,7 @@ } // no match was found. // However, in partial mode, we can't say this is necessarily over. - // If there's more *pattern* left, then + // If there's more *pattern* left, then if (partial) { // ran out of file this.debug("\n>>> no match, partial?", file, fr, pattern, pr) diff -Nru nodejs-0.11.13/deps/npm/node_modules/minimatch/node_modules/sigmund/package.json nodejs-0.11.15/deps/npm/node_modules/minimatch/node_modules/sigmund/package.json --- nodejs-0.11.13/deps/npm/node_modules/minimatch/node_modules/sigmund/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/minimatch/node_modules/sigmund/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -32,11 +32,27 @@ }, "license": "BSD", "readme": "# sigmund\n\nQuick and dirty signatures for Objects.\n\nThis is like a much faster `deepEquals` comparison, which returns a\nstring key suitable for caches and the like.\n\n## Usage\n\n```javascript\nfunction doSomething (someObj) {\n var key = sigmund(someObj, maxDepth) // max depth defaults to 10\n var cached = cache.get(key)\n if (cached) return cached)\n\n var result = expensiveCalculation(someObj)\n cache.set(key, result)\n return result\n}\n```\n\nThe resulting key will be as unique and reproducible as calling\n`JSON.stringify` or `util.inspect` on the object, but is much faster.\nIn order to achieve this speed, some differences are glossed over.\nFor example, the object `{0:'foo'}` will be treated identically to the\narray `['foo']`.\n\nAlso, just as there is no way to summon the soul from the scribblings\nof a cocain-addled psychoanalyst, there is no way to revive the object\nfrom the signature string that sigmund gives you. In fact, it's\nbarely even readable.\n\nAs with `sys.inspect` and `JSON.stringify`, larger objects will\nproduce larger signature strings.\n\nBecause sigmund is a bit less strict than the more thorough\nalternatives, the strings will be shorter, and also there is a\nslightly higher chance for collisions. For example, these objects\nhave the same signature:\n\n var obj1 = {a:'b',c:/def/,g:['h','i',{j:'',k:'l'}]}\n var obj2 = {a:'b',c:'/def/',g:['h','i','{jkl']}\n\nLike a good Freudian, sigmund is most effective when you already have\nsome understanding of what you're looking for. It can help you help\nyourself, but you must be willing to do some work as well.\n\nCycles are handled, and cyclical objects are silently omitted (though\nthe key is included in the signature output.)\n\nThe second argument is the maximum depth, which defaults to 10,\nbecause that is the maximum object traversal depth covered by most\ninsurance carriers.\n", - "readmeFilename": "README.md", + "_id": "sigmund@1.0.0", + "dist": { + "shasum": "66a2b3a749ae8b5fb89efd4fcc01dc94fbe02296", + "tarball": "http://registry.npmjs.org/sigmund/-/sigmund-1.0.0.tgz" + }, + "_npmVersion": "1.1.48", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "_shasum": "66a2b3a749ae8b5fb89efd4fcc01dc94fbe02296", + "_from": "sigmund@~1.0.0", + "_resolved": "https://registry.npmjs.org/sigmund/-/sigmund-1.0.0.tgz", "bugs": { "url": "https://github.com/isaacs/sigmund/issues" }, - "homepage": "https://github.com/isaacs/sigmund", - "_id": "sigmund@1.0.0", - "_from": "sigmund@~1.0.0" + "homepage": "https://github.com/isaacs/sigmund" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/minimatch/package.json nodejs-0.11.15/deps/npm/node_modules/minimatch/package.json --- nodejs-0.11.13/deps/npm/node_modules/minimatch/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/minimatch/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "minimatch", "description": "a glob matcher in javascript", - "version": "0.2.14", + "version": "1.0.0", "repository": { "type": "git", "url": "git://github.com/isaacs/minimatch.git" @@ -29,12 +29,30 @@ "type": "MIT", "url": "http://github.com/isaacs/minimatch/raw/master/LICENSE" }, - "readme": "# minimatch\n\nA minimal matching utility.\n\n[![Build Status](https://secure.travis-ci.org/isaacs/minimatch.png)](http://travis-ci.org/isaacs/minimatch)\n\n\nThis is the matching library used internally by npm.\n\nEventually, it will replace the C binding in node-glob.\n\nIt works by converting glob expressions into JavaScript `RegExp`\nobjects.\n\n## Usage\n\n```javascript\nvar minimatch = require(\"minimatch\")\n\nminimatch(\"bar.foo\", \"*.foo\") // true!\nminimatch(\"bar.foo\", \"*.bar\") // false!\nminimatch(\"bar.foo\", \"*.+(bar|foo)\", { debug: true }) // true, and noisy!\n```\n\n## Features\n\nSupports these glob features:\n\n* Brace Expansion\n* Extended glob matching\n* \"Globstar\" `**` matching\n\nSee:\n\n* `man sh`\n* `man bash`\n* `man 3 fnmatch`\n* `man 5 gitignore`\n\n## Minimatch Class\n\nCreate a minimatch object by instanting the `minimatch.Minimatch` class.\n\n```javascript\nvar Minimatch = require(\"minimatch\").Minimatch\nvar mm = new Minimatch(pattern, options)\n```\n\n### Properties\n\n* `pattern` The original pattern the minimatch object represents.\n* `options` The options supplied to the constructor.\n* `set` A 2-dimensional array of regexp or string expressions.\n Each row in the\n array corresponds to a brace-expanded pattern. Each item in the row\n corresponds to a single path-part. For example, the pattern\n `{a,b/c}/d` would expand to a set of patterns like:\n\n [ [ a, d ]\n , [ b, c, d ] ]\n\n If a portion of the pattern doesn't have any \"magic\" in it\n (that is, it's something like `\"foo\"` rather than `fo*o?`), then it\n will be left as a string rather than converted to a regular\n expression.\n\n* `regexp` Created by the `makeRe` method. A single regular expression\n expressing the entire pattern. This is useful in cases where you wish\n to use the pattern somewhat like `fnmatch(3)` with `FNM_PATH` enabled.\n* `negate` True if the pattern is negated.\n* `comment` True if the pattern is a comment.\n* `empty` True if the pattern is `\"\"`.\n\n### Methods\n\n* `makeRe` Generate the `regexp` member if necessary, and return it.\n Will return `false` if the pattern is invalid.\n* `match(fname)` Return true if the filename matches the pattern, or\n false otherwise.\n* `matchOne(fileArray, patternArray, partial)` Take a `/`-split\n filename, and match it against a single row in the `regExpSet`. This\n method is mainly for internal use, but is exposed so that it can be\n used by a glob-walker that needs to avoid excessive filesystem calls.\n\nAll other methods are internal, and will be called as necessary.\n\n## Functions\n\nThe top-level exported function has a `cache` property, which is an LRU\ncache set to store 100 items. So, calling these methods repeatedly\nwith the same pattern and options will use the same Minimatch object,\nsaving the cost of parsing it multiple times.\n\n### minimatch(path, pattern, options)\n\nMain export. Tests a path against the pattern using the options.\n\n```javascript\nvar isJS = minimatch(file, \"*.js\", { matchBase: true })\n```\n\n### minimatch.filter(pattern, options)\n\nReturns a function that tests its\nsupplied argument, suitable for use with `Array.filter`. Example:\n\n```javascript\nvar javascripts = fileList.filter(minimatch.filter(\"*.js\", {matchBase: true}))\n```\n\n### minimatch.match(list, pattern, options)\n\nMatch against the list of\nfiles, in the style of fnmatch or glob. If nothing is matched, and\noptions.nonull is set, then return a list containing the pattern itself.\n\n```javascript\nvar javascripts = minimatch.match(fileList, \"*.js\", {matchBase: true}))\n```\n\n### minimatch.makeRe(pattern, options)\n\nMake a regular expression object from the pattern.\n\n## Options\n\nAll options are `false` by default.\n\n### debug\n\nDump a ton of stuff to stderr.\n\n### nobrace\n\nDo not expand `{a,b}` and `{1..3}` brace sets.\n\n### noglobstar\n\nDisable `**` matching against multiple folder names.\n\n### dot\n\nAllow patterns to match filenames starting with a period, even if\nthe pattern does not explicitly have a period in that spot.\n\nNote that by default, `a/**/b` will **not** match `a/.d/b`, unless `dot`\nis set.\n\n### noext\n\nDisable \"extglob\" style patterns like `+(a|b)`.\n\n### nocase\n\nPerform a case-insensitive match.\n\n### nonull\n\nWhen a match is not found by `minimatch.match`, return a list containing\nthe pattern itself. When set, an empty list is returned if there are\nno matches.\n\n### matchBase\n\nIf set, then patterns without slashes will be matched\nagainst the basename of the path if it contains slashes. For example,\n`a?b` would match the path `/xyz/123/acb`, but not `/xyz/acb/123`.\n\n### nocomment\n\nSuppress the behavior of treating `#` at the start of a pattern as a\ncomment.\n\n### nonegate\n\nSuppress the behavior of treating a leading `!` character as negation.\n\n### flipNegate\n\nReturns from negate expressions the same as if they were not negated.\n(Ie, true on a hit, false on a miss.)\n\n\n## Comparisons to other fnmatch/glob implementations\n\nWhile strict compliance with the existing standards is a worthwhile\ngoal, some discrepancies exist between minimatch and other\nimplementations, and are intentional.\n\nIf the pattern starts with a `!` character, then it is negated. Set the\n`nonegate` flag to suppress this behavior, and treat leading `!`\ncharacters normally. This is perhaps relevant if you wish to start the\npattern with a negative extglob pattern like `!(a|B)`. Multiple `!`\ncharacters at the start of a pattern will negate the pattern multiple\ntimes.\n\nIf a pattern starts with `#`, then it is treated as a comment, and\nwill not match anything. Use `\\#` to match a literal `#` at the\nstart of a line, or set the `nocomment` flag to suppress this behavior.\n\nThe double-star character `**` is supported by default, unless the\n`noglobstar` flag is set. This is supported in the manner of bsdglob\nand bash 4.1, where `**` only has special significance if it is the only\nthing in a path part. That is, `a/**/b` will match `a/x/y/b`, but\n`a/**b` will not.\n\nIf an escaped pattern has no matches, and the `nonull` flag is set,\nthen minimatch.match returns the pattern as-provided, rather than\ninterpreting the character escapes. For example,\n`minimatch.match([], \"\\\\*a\\\\?\")` will return `\"\\\\*a\\\\?\"` rather than\n`\"*a?\"`. This is akin to setting the `nullglob` option in bash, except\nthat it does not resolve escaped pattern characters.\n\nIf brace expansion is not disabled, then it is performed before any\nother interpretation of the glob pattern. Thus, a pattern like\n`+(a|{b),c)}`, which would not be valid in bash or zsh, is expanded\n**first** into the set of `+(a|b)` and `+(a|c)`, and those patterns are\nchecked for validity. Since those two are valid, matching proceeds.\n", - "readmeFilename": "README.md", + "gitHead": "b374a643976eb55cdc19c60b6dd51ebe9bcc607a", "bugs": { "url": "https://github.com/isaacs/minimatch/issues" }, "homepage": "https://github.com/isaacs/minimatch", - "_id": "minimatch@0.2.14", - "_from": "minimatch@latest" + "_id": "minimatch@1.0.0", + "_shasum": "e0dd2120b49e1b724ce8d714c520822a9438576d", + "_from": "minimatch@latest", + "_npmVersion": "1.4.21", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "e0dd2120b49e1b724ce8d714c520822a9438576d", + "tarball": "http://registry.npmjs.org/minimatch/-/minimatch-1.0.0.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/minimatch/-/minimatch-1.0.0.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/minimatch/README.md nodejs-0.11.15/deps/npm/node_modules/minimatch/README.md --- nodejs-0.11.13/deps/npm/node_modules/minimatch/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/minimatch/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -157,8 +157,8 @@ ### nonull When a match is not found by `minimatch.match`, return a list containing -the pattern itself. When set, an empty list is returned if there are -no matches. +the pattern itself if this option is set. When not set, an empty list +is returned if there are no matches. ### matchBase diff -Nru nodejs-0.11.13/deps/npm/node_modules/minimatch/test/brace-expand.js nodejs-0.11.15/deps/npm/node_modules/minimatch/test/brace-expand.js --- nodejs-0.11.13/deps/npm/node_modules/minimatch/test/brace-expand.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/minimatch/test/brace-expand.js 2015-01-20 21:22:17.000000000 +0000 @@ -21,6 +21,13 @@ , "a4b" , "a5b" ] ] , [ "a{b}c", ["a{b}c"] ] + , [ "a{00..05}b" + , ["a00b" + ,"a01b" + ,"a02b" + ,"a03b" + ,"a04b" + ,"a05b" ] ] ].forEach(function (tc) { var p = tc[0] , expect = tc[1] diff -Nru nodejs-0.11.13/deps/npm/node_modules/minimatch/test/defaults.js nodejs-0.11.15/deps/npm/node_modules/minimatch/test/defaults.js --- nodejs-0.11.13/deps/npm/node_modules/minimatch/test/defaults.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/minimatch/test/defaults.js 2015-01-20 21:22:17.000000000 +0000 @@ -237,7 +237,7 @@ var pattern = c[0] , expect = c[1].sort(alpha) - , options = c[2] || {} + , options = c[2] , f = c[3] || files , tapOpts = c[4] || {} diff -Nru nodejs-0.11.13/deps/npm/node_modules/minimatch/.travis.yml nodejs-0.11.15/deps/npm/node_modules/minimatch/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/minimatch/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/minimatch/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4 @@ +language: node_js +node_js: + - 0.10 + - 0.11 diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/bin/cmd.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/bin/cmd.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/bin/cmd.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/bin/cmd.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +#!/usr/bin/env node + +var mkdirp = require('../'); +var minimist = require('minimist'); +var fs = require('fs'); + +var argv = minimist(process.argv.slice(2), { + alias: { m: 'mode', h: 'help' }, + string: [ 'mode' ] +}); +if (argv.help) { + fs.createReadStream(__dirname + '/usage.txt').pipe(process.stdout); + return; +} + +var paths = argv._.slice(); +var mode = argv.mode ? parseInt(argv.mode, 8) : undefined; + +(function next () { + if (paths.length === 0) return; + var p = paths.shift(); + + if (mode === undefined) mkdirp(p, cb) + else mkdirp(p, mode, cb) + + function cb (err) { + if (err) { + console.error(err.message); + process.exit(1); + } + else next(); + } +})(); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/bin/usage.txt nodejs-0.11.15/deps/npm/node_modules/mkdirp/bin/usage.txt --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/bin/usage.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/bin/usage.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,12 @@ +usage: mkdirp [DIR1,DIR2..] {OPTIONS} + + Create each supplied directory including any necessary parent directories that + don't yet exist. + + If the directory already exists, do nothing. + +OPTIONS are: + + -m, --mode If a directory needs to be created, set the mode as an octal + permission string. + diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/index.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/index.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -3,27 +3,36 @@ module.exports = mkdirP.mkdirp = mkdirP.mkdirP = mkdirP; -function mkdirP (p, mode, f, made) { - if (typeof mode === 'function' || mode === undefined) { - f = mode; +function mkdirP (p, opts, f, made) { + if (typeof opts === 'function') { + f = opts; + opts = {}; + } + else if (!opts || typeof opts !== 'object') { + opts = { mode: opts }; + } + + var mode = opts.mode; + var xfs = opts.fs || fs; + + if (mode === undefined) { mode = 0777 & (~process.umask()); } if (!made) made = null; - + var cb = f || function () {}; - if (typeof mode === 'string') mode = parseInt(mode, 8); p = path.resolve(p); - - fs.mkdir(p, mode, function (er) { + + xfs.mkdir(p, mode, function (er) { if (!er) { made = made || p; return cb(null, made); } switch (er.code) { case 'ENOENT': - mkdirP(path.dirname(p), mode, function (er, made) { + mkdirP(path.dirname(p), opts, function (er, made) { if (er) cb(er, made); - else mkdirP(p, mode, cb, made); + else mkdirP(p, opts, cb, made); }); break; @@ -31,7 +40,7 @@ // there already. If so, then hooray! If not, then something // is borked. default: - fs.stat(p, function (er2, stat) { + xfs.stat(p, function (er2, stat) { // if the stat fails, then that's super weird. // let the original error be the failure reason. if (er2 || !stat.isDirectory()) cb(er, made) @@ -42,24 +51,30 @@ }); } -mkdirP.sync = function sync (p, mode, made) { +mkdirP.sync = function sync (p, opts, made) { + if (!opts || typeof opts !== 'object') { + opts = { mode: opts }; + } + + var mode = opts.mode; + var xfs = opts.fs || fs; + if (mode === undefined) { mode = 0777 & (~process.umask()); } if (!made) made = null; - if (typeof mode === 'string') mode = parseInt(mode, 8); p = path.resolve(p); try { - fs.mkdirSync(p, mode); + xfs.mkdirSync(p, mode); made = made || p; } catch (err0) { switch (err0.code) { case 'ENOENT' : - made = sync(path.dirname(p), mode, made); - sync(p, mode, made); + made = sync(path.dirname(p), opts, made); + sync(p, opts, made); break; // In the case of any other error, just see if there's a dir @@ -68,7 +83,7 @@ default: var stat; try { - stat = fs.statSync(p); + stat = xfs.statSync(p); } catch (err1) { throw err0; diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/example/parse.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/example/parse.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/example/parse.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/example/parse.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2 @@ +var argv = require('../')(process.argv.slice(2)); +console.dir(argv); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/index.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/index.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,187 @@ +module.exports = function (args, opts) { + if (!opts) opts = {}; + + var flags = { bools : {}, strings : {} }; + + [].concat(opts['boolean']).filter(Boolean).forEach(function (key) { + flags.bools[key] = true; + }); + + [].concat(opts.string).filter(Boolean).forEach(function (key) { + flags.strings[key] = true; + }); + + var aliases = {}; + Object.keys(opts.alias || {}).forEach(function (key) { + aliases[key] = [].concat(opts.alias[key]); + aliases[key].forEach(function (x) { + aliases[x] = [key].concat(aliases[key].filter(function (y) { + return x !== y; + })); + }); + }); + + var defaults = opts['default'] || {}; + + var argv = { _ : [] }; + Object.keys(flags.bools).forEach(function (key) { + setArg(key, defaults[key] === undefined ? false : defaults[key]); + }); + + var notFlags = []; + + if (args.indexOf('--') !== -1) { + notFlags = args.slice(args.indexOf('--')+1); + args = args.slice(0, args.indexOf('--')); + } + + function setArg (key, val) { + var value = !flags.strings[key] && isNumber(val) + ? Number(val) : val + ; + setKey(argv, key.split('.'), value); + + (aliases[key] || []).forEach(function (x) { + setKey(argv, x.split('.'), value); + }); + } + + for (var i = 0; i < args.length; i++) { + var arg = args[i]; + + if (/^--.+=/.test(arg)) { + // Using [\s\S] instead of . because js doesn't support the + // 'dotall' regex modifier. See: + // http://stackoverflow.com/a/1068308/13216 + var m = arg.match(/^--([^=]+)=([\s\S]*)$/); + setArg(m[1], m[2]); + } + else if (/^--no-.+/.test(arg)) { + var key = arg.match(/^--no-(.+)/)[1]; + setArg(key, false); + } + else if (/^--.+/.test(arg)) { + var key = arg.match(/^--(.+)/)[1]; + var next = args[i + 1]; + if (next !== undefined && !/^-/.test(next) + && !flags.bools[key] + && (aliases[key] ? !flags.bools[aliases[key]] : true)) { + setArg(key, next); + i++; + } + else if (/^(true|false)$/.test(next)) { + setArg(key, next === 'true'); + i++; + } + else { + setArg(key, flags.strings[key] ? '' : true); + } + } + else if (/^-[^-]+/.test(arg)) { + var letters = arg.slice(1,-1).split(''); + + var broken = false; + for (var j = 0; j < letters.length; j++) { + var next = arg.slice(j+2); + + if (next === '-') { + setArg(letters[j], next) + continue; + } + + if (/[A-Za-z]/.test(letters[j]) + && /-?\d+(\.\d*)?(e-?\d+)?$/.test(next)) { + setArg(letters[j], next); + broken = true; + break; + } + + if (letters[j+1] && letters[j+1].match(/\W/)) { + setArg(letters[j], arg.slice(j+2)); + broken = true; + break; + } + else { + setArg(letters[j], flags.strings[letters[j]] ? '' : true); + } + } + + var key = arg.slice(-1)[0]; + if (!broken && key !== '-') { + if (args[i+1] && !/^(-|--)[^-]/.test(args[i+1]) + && !flags.bools[key] + && (aliases[key] ? !flags.bools[aliases[key]] : true)) { + setArg(key, args[i+1]); + i++; + } + else if (args[i+1] && /true|false/.test(args[i+1])) { + setArg(key, args[i+1] === 'true'); + i++; + } + else { + setArg(key, flags.strings[key] ? '' : true); + } + } + } + else { + argv._.push( + flags.strings['_'] || !isNumber(arg) ? arg : Number(arg) + ); + } + } + + Object.keys(defaults).forEach(function (key) { + if (!hasKey(argv, key.split('.'))) { + setKey(argv, key.split('.'), defaults[key]); + + (aliases[key] || []).forEach(function (x) { + setKey(argv, x.split('.'), defaults[key]); + }); + } + }); + + notFlags.forEach(function(key) { + argv._.push(key); + }); + + return argv; +}; + +function hasKey (obj, keys) { + var o = obj; + keys.slice(0,-1).forEach(function (key) { + o = (o[key] || {}); + }); + + var key = keys[keys.length - 1]; + return key in o; +} + +function setKey (obj, keys, value) { + var o = obj; + keys.slice(0,-1).forEach(function (key) { + if (o[key] === undefined) o[key] = {}; + o = o[key]; + }); + + var key = keys[keys.length - 1]; + if (o[key] === undefined || typeof o[key] === 'boolean') { + o[key] = value; + } + else if (Array.isArray(o[key])) { + o[key].push(value); + } + else { + o[key] = [ o[key], value ]; + } +} + +function isNumber (x) { + if (typeof x === 'number') return true; + if (/^0x[0-9a-f]+$/i.test(x)) return true; + return /^[-+]?(?:\d+(?:\.\d*)?|\.\d+)(e[-+]?\d+)?$/.test(x); +} + +function longest (xs) { + return Math.max.apply(null, xs.map(function (x) { return x.length })); +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/LICENSE nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +This software is released under the MIT license: + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/package.json nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/package.json --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,67 @@ +{ + "name": "minimist", + "version": "0.0.8", + "description": "parse argument options", + "main": "index.js", + "devDependencies": { + "tape": "~1.0.4", + "tap": "~0.4.0" + }, + "scripts": { + "test": "tap test/*.js" + }, + "testling": { + "files": "test/*.js", + "browsers": [ + "ie/6..latest", + "ff/5", + "firefox/latest", + "chrome/10", + "chrome/latest", + "safari/5.1", + "safari/latest", + "opera/12" + ] + }, + "repository": { + "type": "git", + "url": "git://github.com/substack/minimist.git" + }, + "homepage": "https://github.com/substack/minimist", + "keywords": [ + "argv", + "getopt", + "parser", + "optimist" + ], + "author": { + "name": "James Halliday", + "email": "mail@substack.net", + "url": "http://substack.net" + }, + "license": "MIT", + "bugs": { + "url": "https://github.com/substack/minimist/issues" + }, + "_id": "minimist@0.0.8", + "dist": { + "shasum": "857fcabfc3397d2625b8228262e86aa7a011b05d", + "tarball": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz" + }, + "_from": "minimist@0.0.8", + "_npmVersion": "1.4.3", + "_npmUser": { + "name": "substack", + "email": "mail@substack.net" + }, + "maintainers": [ + { + "name": "substack", + "email": "mail@substack.net" + } + ], + "directories": {}, + "_shasum": "857fcabfc3397d2625b8228262e86aa7a011b05d", + "_resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "readme": "ERROR: No README data found!" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/readme.markdown nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/readme.markdown --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/readme.markdown 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/readme.markdown 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,73 @@ +# minimist + +parse argument options + +This module is the guts of optimist's argument parser without all the +fanciful decoration. + +[![browser support](https://ci.testling.com/substack/minimist.png)](http://ci.testling.com/substack/minimist) + +[![build status](https://secure.travis-ci.org/substack/minimist.png)](http://travis-ci.org/substack/minimist) + +# example + +``` js +var argv = require('minimist')(process.argv.slice(2)); +console.dir(argv); +``` + +``` +$ node example/parse.js -a beep -b boop +{ _: [], a: 'beep', b: 'boop' } +``` + +``` +$ node example/parse.js -x 3 -y 4 -n5 -abc --beep=boop foo bar baz +{ _: [ 'foo', 'bar', 'baz' ], + x: 3, + y: 4, + n: 5, + a: true, + b: true, + c: true, + beep: 'boop' } +``` + +# methods + +``` js +var parseArgs = require('minimist') +``` + +## var argv = parseArgs(args, opts={}) + +Return an argument object `argv` populated with the array arguments from `args`. + +`argv._` contains all the arguments that didn't have an option associated with +them. + +Numeric-looking arguments will be returned as numbers unless `opts.string` or +`opts.boolean` is set for that argument name. + +Any arguments after `'--'` will not be parsed and will end up in `argv._`. + +options can be: + +* `opts.string` - a string or array of strings argument names to always treat as +strings +* `opts.boolean` - a string or array of strings to always treat as booleans +* `opts.alias` - an object mapping string names to strings or arrays of string +argument names to use as aliases +* `opts.default` - an object mapping string argument names to default values + +# install + +With [npm](https://npmjs.org) do: + +``` +npm install minimist +``` + +# license + +MIT diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/dash.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/dash.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/dash.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/dash.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,24 @@ +var parse = require('../'); +var test = require('tape'); + +test('-', function (t) { + t.plan(5); + t.deepEqual(parse([ '-n', '-' ]), { n: '-', _: [] }); + t.deepEqual(parse([ '-' ]), { _: [ '-' ] }); + t.deepEqual(parse([ '-f-' ]), { f: '-', _: [] }); + t.deepEqual( + parse([ '-b', '-' ], { boolean: 'b' }), + { b: true, _: [ '-' ] } + ); + t.deepEqual( + parse([ '-s', '-' ], { string: 's' }), + { s: '-', _: [] } + ); +}); + +test('-a -- b', function (t) { + t.plan(3); + t.deepEqual(parse([ '-a', '--', 'b' ]), { a: true, _: [ 'b' ] }); + t.deepEqual(parse([ '--a', '--', 'b' ]), { a: true, _: [ 'b' ] }); + t.deepEqual(parse([ '--a', '--', 'b' ]), { a: true, _: [ 'b' ] }); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/default_bool.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/default_bool.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/default_bool.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/default_bool.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +var test = require('tape'); +var parse = require('../'); + +test('boolean default true', function (t) { + var argv = parse([], { + boolean: 'sometrue', + default: { sometrue: true } + }); + t.equal(argv.sometrue, true); + t.end(); +}); + +test('boolean default false', function (t) { + var argv = parse([], { + boolean: 'somefalse', + default: { somefalse: false } + }); + t.equal(argv.somefalse, false); + t.end(); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/dotted.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/dotted.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/dotted.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/dotted.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ +var parse = require('../'); +var test = require('tape'); + +test('dotted alias', function (t) { + var argv = parse(['--a.b', '22'], {default: {'a.b': 11}, alias: {'a.b': 'aa.bb'}}); + t.equal(argv.a.b, 22); + t.equal(argv.aa.bb, 22); + t.end(); +}); + +test('dotted default', function (t) { + var argv = parse('', {default: {'a.b': 11}, alias: {'a.b': 'aa.bb'}}); + t.equal(argv.a.b, 11); + t.equal(argv.aa.bb, 11); + t.end(); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/long.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/long.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/long.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/long.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,31 @@ +var test = require('tape'); +var parse = require('../'); + +test('long opts', function (t) { + t.deepEqual( + parse([ '--bool' ]), + { bool : true, _ : [] }, + 'long boolean' + ); + t.deepEqual( + parse([ '--pow', 'xixxle' ]), + { pow : 'xixxle', _ : [] }, + 'long capture sp' + ); + t.deepEqual( + parse([ '--pow=xixxle' ]), + { pow : 'xixxle', _ : [] }, + 'long capture eq' + ); + t.deepEqual( + parse([ '--host', 'localhost', '--port', '555' ]), + { host : 'localhost', port : 555, _ : [] }, + 'long captures sp' + ); + t.deepEqual( + parse([ '--host=localhost', '--port=555' ]), + { host : 'localhost', port : 555, _ : [] }, + 'long captures eq' + ); + t.end(); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/parse.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/parse.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/parse.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/parse.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,318 @@ +var parse = require('../'); +var test = require('tape'); + +test('parse args', function (t) { + t.deepEqual( + parse([ '--no-moo' ]), + { moo : false, _ : [] }, + 'no' + ); + t.deepEqual( + parse([ '-v', 'a', '-v', 'b', '-v', 'c' ]), + { v : ['a','b','c'], _ : [] }, + 'multi' + ); + t.end(); +}); + +test('comprehensive', function (t) { + t.deepEqual( + parse([ + '--name=meowmers', 'bare', '-cats', 'woo', + '-h', 'awesome', '--multi=quux', + '--key', 'value', + '-b', '--bool', '--no-meep', '--multi=baz', + '--', '--not-a-flag', 'eek' + ]), + { + c : true, + a : true, + t : true, + s : 'woo', + h : 'awesome', + b : true, + bool : true, + key : 'value', + multi : [ 'quux', 'baz' ], + meep : false, + name : 'meowmers', + _ : [ 'bare', '--not-a-flag', 'eek' ] + } + ); + t.end(); +}); + +test('nums', function (t) { + var argv = parse([ + '-x', '1234', + '-y', '5.67', + '-z', '1e7', + '-w', '10f', + '--hex', '0xdeadbeef', + '789' + ]); + t.deepEqual(argv, { + x : 1234, + y : 5.67, + z : 1e7, + w : '10f', + hex : 0xdeadbeef, + _ : [ 789 ] + }); + t.deepEqual(typeof argv.x, 'number'); + t.deepEqual(typeof argv.y, 'number'); + t.deepEqual(typeof argv.z, 'number'); + t.deepEqual(typeof argv.w, 'string'); + t.deepEqual(typeof argv.hex, 'number'); + t.deepEqual(typeof argv._[0], 'number'); + t.end(); +}); + +test('flag boolean', function (t) { + var argv = parse([ '-t', 'moo' ], { boolean: 't' }); + t.deepEqual(argv, { t : true, _ : [ 'moo' ] }); + t.deepEqual(typeof argv.t, 'boolean'); + t.end(); +}); + +test('flag boolean value', function (t) { + var argv = parse(['--verbose', 'false', 'moo', '-t', 'true'], { + boolean: [ 't', 'verbose' ], + default: { verbose: true } + }); + + t.deepEqual(argv, { + verbose: false, + t: true, + _: ['moo'] + }); + + t.deepEqual(typeof argv.verbose, 'boolean'); + t.deepEqual(typeof argv.t, 'boolean'); + t.end(); +}); + +test('flag boolean default false', function (t) { + var argv = parse(['moo'], { + boolean: ['t', 'verbose'], + default: { verbose: false, t: false } + }); + + t.deepEqual(argv, { + verbose: false, + t: false, + _: ['moo'] + }); + + t.deepEqual(typeof argv.verbose, 'boolean'); + t.deepEqual(typeof argv.t, 'boolean'); + t.end(); + +}); + +test('boolean groups', function (t) { + var argv = parse([ '-x', '-z', 'one', 'two', 'three' ], { + boolean: ['x','y','z'] + }); + + t.deepEqual(argv, { + x : true, + y : false, + z : true, + _ : [ 'one', 'two', 'three' ] + }); + + t.deepEqual(typeof argv.x, 'boolean'); + t.deepEqual(typeof argv.y, 'boolean'); + t.deepEqual(typeof argv.z, 'boolean'); + t.end(); +}); + +test('newlines in params' , function (t) { + var args = parse([ '-s', "X\nX" ]) + t.deepEqual(args, { _ : [], s : "X\nX" }); + + // reproduce in bash: + // VALUE="new + // line" + // node program.js --s="$VALUE" + args = parse([ "--s=X\nX" ]) + t.deepEqual(args, { _ : [], s : "X\nX" }); + t.end(); +}); + +test('strings' , function (t) { + var s = parse([ '-s', '0001234' ], { string: 's' }).s; + t.equal(s, '0001234'); + t.equal(typeof s, 'string'); + + var x = parse([ '-x', '56' ], { string: 'x' }).x; + t.equal(x, '56'); + t.equal(typeof x, 'string'); + t.end(); +}); + +test('stringArgs', function (t) { + var s = parse([ ' ', ' ' ], { string: '_' })._; + t.same(s.length, 2); + t.same(typeof s[0], 'string'); + t.same(s[0], ' '); + t.same(typeof s[1], 'string'); + t.same(s[1], ' '); + t.end(); +}); + +test('empty strings', function(t) { + var s = parse([ '-s' ], { string: 's' }).s; + t.equal(s, ''); + t.equal(typeof s, 'string'); + + var str = parse([ '--str' ], { string: 'str' }).str; + t.equal(str, ''); + t.equal(typeof str, 'string'); + + var letters = parse([ '-art' ], { + string: [ 'a', 't' ] + }); + + t.equal(letters.a, ''); + t.equal(letters.r, true); + t.equal(letters.t, ''); + + t.end(); +}); + + +test('slashBreak', function (t) { + t.same( + parse([ '-I/foo/bar/baz' ]), + { I : '/foo/bar/baz', _ : [] } + ); + t.same( + parse([ '-xyz/foo/bar/baz' ]), + { x : true, y : true, z : '/foo/bar/baz', _ : [] } + ); + t.end(); +}); + +test('alias', function (t) { + var argv = parse([ '-f', '11', '--zoom', '55' ], { + alias: { z: 'zoom' } + }); + t.equal(argv.zoom, 55); + t.equal(argv.z, argv.zoom); + t.equal(argv.f, 11); + t.end(); +}); + +test('multiAlias', function (t) { + var argv = parse([ '-f', '11', '--zoom', '55' ], { + alias: { z: [ 'zm', 'zoom' ] } + }); + t.equal(argv.zoom, 55); + t.equal(argv.z, argv.zoom); + t.equal(argv.z, argv.zm); + t.equal(argv.f, 11); + t.end(); +}); + +test('nested dotted objects', function (t) { + var argv = parse([ + '--foo.bar', '3', '--foo.baz', '4', + '--foo.quux.quibble', '5', '--foo.quux.o_O', + '--beep.boop' + ]); + + t.same(argv.foo, { + bar : 3, + baz : 4, + quux : { + quibble : 5, + o_O : true + } + }); + t.same(argv.beep, { boop : true }); + t.end(); +}); + +test('boolean and alias with chainable api', function (t) { + var aliased = [ '-h', 'derp' ]; + var regular = [ '--herp', 'derp' ]; + var opts = { + herp: { alias: 'h', boolean: true } + }; + var aliasedArgv = parse(aliased, { + boolean: 'herp', + alias: { h: 'herp' } + }); + var propertyArgv = parse(regular, { + boolean: 'herp', + alias: { h: 'herp' } + }); + var expected = { + herp: true, + h: true, + '_': [ 'derp' ] + }; + + t.same(aliasedArgv, expected); + t.same(propertyArgv, expected); + t.end(); +}); + +test('boolean and alias with options hash', function (t) { + var aliased = [ '-h', 'derp' ]; + var regular = [ '--herp', 'derp' ]; + var opts = { + alias: { 'h': 'herp' }, + boolean: 'herp' + }; + var aliasedArgv = parse(aliased, opts); + var propertyArgv = parse(regular, opts); + var expected = { + herp: true, + h: true, + '_': [ 'derp' ] + }; + t.same(aliasedArgv, expected); + t.same(propertyArgv, expected); + t.end(); +}); + +test('boolean and alias using explicit true', function (t) { + var aliased = [ '-h', 'true' ]; + var regular = [ '--herp', 'true' ]; + var opts = { + alias: { h: 'herp' }, + boolean: 'h' + }; + var aliasedArgv = parse(aliased, opts); + var propertyArgv = parse(regular, opts); + var expected = { + herp: true, + h: true, + '_': [ ] + }; + + t.same(aliasedArgv, expected); + t.same(propertyArgv, expected); + t.end(); +}); + +// regression, see https://github.com/substack/node-optimist/issues/71 +test('boolean and --x=true', function(t) { + var parsed = parse(['--boool', '--other=true'], { + boolean: 'boool' + }); + + t.same(parsed.boool, true); + t.same(parsed.other, 'true'); + + parsed = parse(['--boool', '--other=false'], { + boolean: 'boool' + }); + + t.same(parsed.boool, true); + t.same(parsed.other, 'false'); + t.end(); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/parse_modified.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/parse_modified.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/parse_modified.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/parse_modified.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,9 @@ +var parse = require('../'); +var test = require('tape'); + +test('parse with modifier functions' , function (t) { + t.plan(1); + + var argv = parse([ '-b', '123' ], { boolean: 'b' }); + t.deepEqual(argv, { b: true, _: ['123'] }); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/short.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/short.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/short.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/short.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,67 @@ +var parse = require('../'); +var test = require('tape'); + +test('numeric short args', function (t) { + t.plan(2); + t.deepEqual(parse([ '-n123' ]), { n: 123, _: [] }); + t.deepEqual( + parse([ '-123', '456' ]), + { 1: true, 2: true, 3: 456, _: [] } + ); +}); + +test('short', function (t) { + t.deepEqual( + parse([ '-b' ]), + { b : true, _ : [] }, + 'short boolean' + ); + t.deepEqual( + parse([ 'foo', 'bar', 'baz' ]), + { _ : [ 'foo', 'bar', 'baz' ] }, + 'bare' + ); + t.deepEqual( + parse([ '-cats' ]), + { c : true, a : true, t : true, s : true, _ : [] }, + 'group' + ); + t.deepEqual( + parse([ '-cats', 'meow' ]), + { c : true, a : true, t : true, s : 'meow', _ : [] }, + 'short group next' + ); + t.deepEqual( + parse([ '-h', 'localhost' ]), + { h : 'localhost', _ : [] }, + 'short capture' + ); + t.deepEqual( + parse([ '-h', 'localhost', '-p', '555' ]), + { h : 'localhost', p : 555, _ : [] }, + 'short captures' + ); + t.end(); +}); + +test('mixed short bool and capture', function (t) { + t.same( + parse([ '-h', 'localhost', '-fp', '555', 'script.js' ]), + { + f : true, p : 555, h : 'localhost', + _ : [ 'script.js' ] + } + ); + t.end(); +}); + +test('short and long', function (t) { + t.deepEqual( + parse([ '-h', 'localhost', '-fp', '555', 'script.js' ]), + { + f : true, p : 555, h : 'localhost', + _ : [ 'script.js' ] + } + ); + t.end(); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/whitespace.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/whitespace.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/test/whitespace.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/test/whitespace.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +var parse = require('../'); +var test = require('tape'); + +test('whitespace should be whitespace' , function (t) { + t.plan(1); + var x = parse([ '-x', '\t' ]).x; + t.equal(x, '\t'); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/.travis.yml nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/node_modules/minimist/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/node_modules/minimist/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4 @@ +language: node_js +node_js: + - "0.8" + - "0.10" diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/package.json nodejs-0.11.15/deps/npm/node_modules/mkdirp/package.json --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,7 @@ { "name": "mkdirp", "description": "Recursively mkdir, like `mkdir -p`", - "version": "0.3.5", + "version": "0.5.0", "author": { "name": "James Halliday", "email": "mail@substack.net", @@ -14,20 +14,45 @@ ], "repository": { "type": "git", - "url": "http://github.com/substack/node-mkdirp.git" + "url": "https://github.com/substack/node-mkdirp.git" }, "scripts": { "test": "tap test/*.js" }, + "dependencies": { + "minimist": "0.0.8" + }, "devDependencies": { - "tap": "~0.4.0" + "tap": "~0.4.0", + "mock-fs": "~2.2.0" + }, + "bin": { + "mkdirp": "bin/cmd.js" }, "license": "MIT", - "readme": "# mkdirp\n\nLike `mkdir -p`, but in node.js!\n\n[![build status](https://secure.travis-ci.org/substack/node-mkdirp.png)](http://travis-ci.org/substack/node-mkdirp)\n\n# example\n\n## pow.js\n\n```js\nvar mkdirp = require('mkdirp');\n \nmkdirp('/tmp/foo/bar/baz', function (err) {\n if (err) console.error(err)\n else console.log('pow!')\n});\n```\n\nOutput\n\n```\npow!\n```\n\nAnd now /tmp/foo/bar/baz exists, huzzah!\n\n# methods\n\n```js\nvar mkdirp = require('mkdirp');\n```\n\n## mkdirp(dir, mode, cb)\n\nCreate a new directory and any necessary subdirectories at `dir` with octal\npermission string `mode`.\n\nIf `mode` isn't specified, it defaults to `0777 & (~process.umask())`.\n\n`cb(err, made)` fires with the error or the first directory `made`\nthat had to be created, if any.\n\n## mkdirp.sync(dir, mode)\n\nSynchronously create a new directory and any necessary subdirectories at `dir`\nwith octal permission string `mode`.\n\nIf `mode` isn't specified, it defaults to `0777 & (~process.umask())`.\n\nReturns the first directory that had to be created, if any.\n\n# install\n\nWith [npm](http://npmjs.org) do:\n\n```\nnpm install mkdirp\n```\n\n# license\n\nMIT\n", - "readmeFilename": "readme.markdown", "bugs": { "url": "https://github.com/substack/node-mkdirp/issues" }, - "_id": "mkdirp@0.3.5", - "_from": "mkdirp@latest" + "homepage": "https://github.com/substack/node-mkdirp", + "_id": "mkdirp@0.5.0", + "dist": { + "shasum": "1d73076a6df986cd9344e15e71fcc05a4c9abf12", + "tarball": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.0.tgz" + }, + "_from": "mkdirp@latest", + "_npmVersion": "1.4.3", + "_npmUser": { + "name": "substack", + "email": "mail@substack.net" + }, + "maintainers": [ + { + "name": "substack", + "email": "mail@substack.net" + } + ], + "directories": {}, + "_shasum": "1d73076a6df986cd9344e15e71fcc05a4c9abf12", + "_resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.0.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/README.markdown nodejs-0.11.15/deps/npm/node_modules/mkdirp/README.markdown --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/README.markdown 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/README.markdown 2015-01-20 21:22:17.000000000 +0000 @@ -31,25 +31,54 @@ var mkdirp = require('mkdirp'); ``` -## mkdirp(dir, mode, cb) +## mkdirp(dir, opts, cb) Create a new directory and any necessary subdirectories at `dir` with octal -permission string `mode`. +permission string `opts.mode`. If `opts` is a non-object, it will be treated as +the `opts.mode`. -If `mode` isn't specified, it defaults to `0777 & (~process.umask())`. +If `opts.mode` isn't specified, it defaults to `0777 & (~process.umask())`. `cb(err, made)` fires with the error or the first directory `made` that had to be created, if any. -## mkdirp.sync(dir, mode) +You can optionally pass in an alternate `fs` implementation by passing in +`opts.fs`. Your implementation should have `opts.fs.mkdir(path, mode, cb)` and +`opts.fs.stat(path, cb)`. + +## mkdirp.sync(dir, opts) Synchronously create a new directory and any necessary subdirectories at `dir` -with octal permission string `mode`. +with octal permission string `opts.mode`. If `opts` is a non-object, it will be +treated as the `opts.mode`. -If `mode` isn't specified, it defaults to `0777 & (~process.umask())`. +If `opts.mode` isn't specified, it defaults to `0777 & (~process.umask())`. Returns the first directory that had to be created, if any. +You can optionally pass in an alternate `fs` implementation by passing in +`opts.fs`. Your implementation should have `opts.fs.mkdirSync(path, mode)` and +`opts.fs.statSync(path)`. + +# usage + +This package also ships with a `mkdirp` command. + +``` +usage: mkdirp [DIR1,DIR2..] {OPTIONS} + + Create each supplied directory including any necessary parent directories that + don't yet exist. + + If the directory already exists, do nothing. + +OPTIONS are: + + -m, --mode If a directory needs to be created, set the mode as an octal + permission string. + +``` + # install With [npm](http://npmjs.org) do: @@ -58,6 +87,14 @@ npm install mkdirp ``` +to get the library, or + +``` +npm install -g mkdirp +``` + +to get the command. + # license MIT diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/mkdirp.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/mkdirp.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/mkdirp.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/mkdirp.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,11 @@ var mkdirp = require('../'); var path = require('path'); var fs = require('fs'); +var exists = fs.exists || path.exists; var test = require('tap').test; test('woo', function (t) { - t.plan(2); + t.plan(5); var x = Math.floor(Math.random() * Math.pow(16,4)).toString(16); var y = Math.floor(Math.random() * Math.pow(16,4)).toString(16); var z = Math.floor(Math.random() * Math.pow(16,4)).toString(16); @@ -12,16 +13,13 @@ var file = '/tmp/' + [x,y,z].join('/'); mkdirp(file, 0755, function (err) { - if (err) t.fail(err); - else path.exists(file, function (ex) { - if (!ex) t.fail('file not created') - else fs.stat(file, function (err, stat) { - if (err) t.fail(err) - else { - t.equal(stat.mode & 0777, 0755); - t.ok(stat.isDirectory(), 'target not a directory'); - t.end(); - } + t.ifError(err); + exists(file, function (ex) { + t.ok(ex, 'file created'); + fs.stat(file, function (err, stat) { + t.ifError(err); + t.equal(stat.mode & 0777, 0755); + t.ok(stat.isDirectory(), 'target not a directory'); }) }) }); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/opts_fs.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/opts_fs.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/opts_fs.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/opts_fs.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ +var mkdirp = require('../'); +var path = require('path'); +var test = require('tap').test; +var mockfs = require('mock-fs'); + +test('opts.fs', function (t) { + t.plan(5); + + var x = Math.floor(Math.random() * Math.pow(16,4)).toString(16); + var y = Math.floor(Math.random() * Math.pow(16,4)).toString(16); + var z = Math.floor(Math.random() * Math.pow(16,4)).toString(16); + + var file = '/beep/boop/' + [x,y,z].join('/'); + var xfs = mockfs.fs(); + + mkdirp(file, { fs: xfs, mode: 0755 }, function (err) { + t.ifError(err); + xfs.exists(file, function (ex) { + t.ok(ex, 'created file'); + xfs.stat(file, function (err, stat) { + t.ifError(err); + t.equal(stat.mode & 0777, 0755); + t.ok(stat.isDirectory(), 'target not a directory'); + }); + }); + }); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/opts_fs_sync.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/opts_fs_sync.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/opts_fs_sync.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/opts_fs_sync.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,25 @@ +var mkdirp = require('../'); +var path = require('path'); +var test = require('tap').test; +var mockfs = require('mock-fs'); + +test('opts.fs sync', function (t) { + t.plan(4); + + var x = Math.floor(Math.random() * Math.pow(16,4)).toString(16); + var y = Math.floor(Math.random() * Math.pow(16,4)).toString(16); + var z = Math.floor(Math.random() * Math.pow(16,4)).toString(16); + + var file = '/beep/boop/' + [x,y,z].join('/'); + var xfs = mockfs.fs(); + + mkdirp.sync(file, { fs: xfs, mode: 0755 }); + xfs.exists(file, function (ex) { + t.ok(ex, 'created file'); + xfs.stat(file, function (err, stat) { + t.ifError(err); + t.equal(stat.mode & 0777, 0755); + t.ok(stat.isDirectory(), 'target not a directory'); + }); + }); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/perm.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/perm.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/perm.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/perm.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,23 +1,21 @@ var mkdirp = require('../'); var path = require('path'); var fs = require('fs'); +var exists = fs.exists || path.exists; var test = require('tap').test; test('async perm', function (t) { - t.plan(2); + t.plan(5); var file = '/tmp/' + (Math.random() * (1<<30)).toString(16); mkdirp(file, 0755, function (err) { - if (err) t.fail(err); - else path.exists(file, function (ex) { - if (!ex) t.fail('file not created') - else fs.stat(file, function (err, stat) { - if (err) t.fail(err) - else { - t.equal(stat.mode & 0777, 0755); - t.ok(stat.isDirectory(), 'target not a directory'); - t.end(); - } + t.ifError(err); + exists(file, function (ex) { + t.ok(ex, 'file created'); + fs.stat(file, function (err, stat) { + t.ifError(err); + t.equal(stat.mode & 0777, 0755); + t.ok(stat.isDirectory(), 'target not a directory'); }) }) }); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/perm_sync.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/perm_sync.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/perm_sync.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/perm_sync.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,34 @@ var mkdirp = require('../'); var path = require('path'); var fs = require('fs'); +var exists = fs.exists || path.exists; var test = require('tap').test; test('sync perm', function (t) { - t.plan(2); + t.plan(4); var file = '/tmp/' + (Math.random() * (1<<30)).toString(16) + '.json'; mkdirp.sync(file, 0755); - path.exists(file, function (ex) { - if (!ex) t.fail('file not created') - else fs.stat(file, function (err, stat) { - if (err) t.fail(err) - else { - t.equal(stat.mode & 0777, 0755); - t.ok(stat.isDirectory(), 'target not a directory'); - t.end(); - } - }) + exists(file, function (ex) { + t.ok(ex, 'file created'); + fs.stat(file, function (err, stat) { + t.ifError(err); + t.equal(stat.mode & 0777, 0755); + t.ok(stat.isDirectory(), 'target not a directory'); + }); }); }); test('sync root perm', function (t) { - t.plan(1); + t.plan(3); var file = '/tmp'; mkdirp.sync(file, 0755); - path.exists(file, function (ex) { - if (!ex) t.fail('file not created') - else fs.stat(file, function (err, stat) { - if (err) t.fail(err) - else { - t.ok(stat.isDirectory(), 'target not a directory'); - t.end(); - } + exists(file, function (ex) { + t.ok(ex, 'file created'); + fs.stat(file, function (err, stat) { + t.ifError(err); + t.ok(stat.isDirectory(), 'target not a directory'); }) }); }); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/race.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/race.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/race.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/race.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,11 @@ var mkdirp = require('../').mkdirp; var path = require('path'); var fs = require('fs'); +var exists = fs.exists || path.exists; var test = require('tap').test; test('race', function (t) { - t.plan(4); + t.plan(6); var ps = [ '', 'tmp' ]; for (var i = 0; i < 25; i++) { @@ -24,17 +25,15 @@ function mk (file, cb) { mkdirp(file, 0755, function (err) { - if (err) t.fail(err); - else path.exists(file, function (ex) { - if (!ex) t.fail('file not created') - else fs.stat(file, function (err, stat) { - if (err) t.fail(err) - else { - t.equal(stat.mode & 0777, 0755); - t.ok(stat.isDirectory(), 'target not a directory'); - if (cb) cb(); - } - }) + t.ifError(err); + exists(file, function (ex) { + t.ok(ex, 'file created'); + fs.stat(file, function (err, stat) { + t.ifError(err); + t.equal(stat.mode & 0777, 0755); + t.ok(stat.isDirectory(), 'target not a directory'); + if (cb) cb(); + }); }) }); } diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/rel.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/rel.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/rel.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/rel.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,11 @@ var mkdirp = require('../'); var path = require('path'); var fs = require('fs'); +var exists = fs.exists || path.exists; var test = require('tap').test; test('rel', function (t) { - t.plan(2); + t.plan(5); var x = Math.floor(Math.random() * Math.pow(16,4)).toString(16); var y = Math.floor(Math.random() * Math.pow(16,4)).toString(16); var z = Math.floor(Math.random() * Math.pow(16,4)).toString(16); @@ -15,17 +16,14 @@ var file = [x,y,z].join('/'); mkdirp(file, 0755, function (err) { - if (err) t.fail(err); - else path.exists(file, function (ex) { - if (!ex) t.fail('file not created') - else fs.stat(file, function (err, stat) { - if (err) t.fail(err) - else { - process.chdir(cwd); - t.equal(stat.mode & 0777, 0755); - t.ok(stat.isDirectory(), 'target not a directory'); - t.end(); - } + t.ifError(err); + exists(file, function (ex) { + t.ok(ex, 'file created'); + fs.stat(file, function (err, stat) { + t.ifError(err); + process.chdir(cwd); + t.equal(stat.mode & 0777, 0755); + t.ok(stat.isDirectory(), 'target not a directory'); }) }) }); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/sync.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/sync.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/sync.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/sync.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,11 @@ var mkdirp = require('../'); var path = require('path'); var fs = require('fs'); +var exists = fs.exists || path.exists; var test = require('tap').test; test('sync', function (t) { - t.plan(2); + t.plan(4); var x = Math.floor(Math.random() * Math.pow(16,4)).toString(16); var y = Math.floor(Math.random() * Math.pow(16,4)).toString(16); var z = Math.floor(Math.random() * Math.pow(16,4)).toString(16); @@ -18,15 +19,12 @@ return t.end(); } - path.exists(file, function (ex) { - if (!ex) t.fail('file not created') - else fs.stat(file, function (err, stat) { - if (err) t.fail(err) - else { - t.equal(stat.mode & 0777, 0755); - t.ok(stat.isDirectory(), 'target not a directory'); - t.end(); - } + exists(file, function (ex) { + t.ok(ex, 'file created'); + fs.stat(file, function (err, stat) { + t.ifError(err); + t.equal(stat.mode & 0777, 0755); + t.ok(stat.isDirectory(), 'target not a directory'); }); }); }); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/umask.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/umask.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/umask.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/umask.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,11 @@ var mkdirp = require('../'); var path = require('path'); var fs = require('fs'); +var exists = fs.exists || path.exists; var test = require('tap').test; test('implicit mode from umask', function (t) { - t.plan(2); + t.plan(5); var x = Math.floor(Math.random() * Math.pow(16,4)).toString(16); var y = Math.floor(Math.random() * Math.pow(16,4)).toString(16); var z = Math.floor(Math.random() * Math.pow(16,4)).toString(16); @@ -12,17 +13,14 @@ var file = '/tmp/' + [x,y,z].join('/'); mkdirp(file, function (err) { - if (err) t.fail(err); - else path.exists(file, function (ex) { - if (!ex) t.fail('file not created') - else fs.stat(file, function (err, stat) { - if (err) t.fail(err) - else { - t.equal(stat.mode & 0777, 0777 & (~process.umask())); - t.ok(stat.isDirectory(), 'target not a directory'); - t.end(); - } - }) + t.ifError(err); + exists(file, function (ex) { + t.ok(ex, 'file created'); + fs.stat(file, function (err, stat) { + t.ifError(err); + t.equal(stat.mode & 0777, 0777 & (~process.umask())); + t.ok(stat.isDirectory(), 'target not a directory'); + }); }) }); }); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/umask_sync.js nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/umask_sync.js --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/test/umask_sync.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/test/umask_sync.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,11 @@ var mkdirp = require('../'); var path = require('path'); var fs = require('fs'); +var exists = fs.exists || path.exists; var test = require('tap').test; test('umask sync modes', function (t) { - t.plan(2); + t.plan(4); var x = Math.floor(Math.random() * Math.pow(16,4)).toString(16); var y = Math.floor(Math.random() * Math.pow(16,4)).toString(16); var z = Math.floor(Math.random() * Math.pow(16,4)).toString(16); @@ -18,15 +19,12 @@ return t.end(); } - path.exists(file, function (ex) { - if (!ex) t.fail('file not created') - else fs.stat(file, function (err, stat) { - if (err) t.fail(err) - else { - t.equal(stat.mode & 0777, (0777 & (~process.umask()))); - t.ok(stat.isDirectory(), 'target not a directory'); - t.end(); - } + exists(file, function (ex) { + t.ok(ex, 'file created'); + fs.stat(file, function (err, stat) { + t.ifError(err); + t.equal(stat.mode & 0777, (0777 & (~process.umask()))); + t.ok(stat.isDirectory(), 'target not a directory'); }); }); }); diff -Nru nodejs-0.11.13/deps/npm/node_modules/mkdirp/.travis.yml nodejs-0.11.15/deps/npm/node_modules/mkdirp/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/mkdirp/.travis.yml 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/mkdirp/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -2,4 +2,4 @@ node_js: - 0.6 - 0.8 - - 0.9 + - "0.10" diff -Nru nodejs-0.11.13/deps/npm/node_modules/node-gyp/lib/configure.js nodejs-0.11.15/deps/npm/node_modules/node-gyp/lib/configure.js --- nodejs-0.11.13/deps/npm/node_modules/node-gyp/lib/configure.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/node-gyp/lib/configure.js 2015-01-20 21:22:17.000000000 +0000 @@ -327,6 +327,9 @@ // execute `gyp` from the current target nodedir argv.unshift(gyp_script) + // make sure python uses files that came with this particular node package + process.env.PYTHONPATH = path.resolve(__dirname, '..', 'gyp', 'pylib') + var cp = gyp.spawn(python, argv) cp.on('exit', onCpExit) } diff -Nru nodejs-0.11.13/deps/npm/node_modules/node-gyp/lib/install.js nodejs-0.11.15/deps/npm/node_modules/node-gyp/lib/install.js --- nodejs-0.11.13/deps/npm/node_modules/node-gyp/lib/install.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/node-gyp/lib/install.js 2015-01-20 21:22:17.000000000 +0000 @@ -72,6 +72,9 @@ version = version.version log.verbose('install', 'installing version: %s', version) + // distributions starting with 0.10.0 contain sha256 checksums + var checksumAlgo = semver.gte(version, '0.10.0') ? 'sha256' : 'sha1' + // the directory where the dev files will be installed var devDir = path.resolve(gyp.devDir, version) @@ -153,7 +156,7 @@ } function getContentSha(res, callback) { - var shasum = crypto.createHash('sha1') + var shasum = crypto.createHash(checksumAlgo) res.on('data', function (chunk) { shasum.update(chunk) }).on('end', function () { @@ -243,11 +246,11 @@ cb(new Error(res.statusCode + ' status code downloading tarball')) return } - // content sha1 - getContentSha(res, function (_, sha1) { + // content checksum + getContentSha(res, function (_, checksum) { var filename = path.basename(tarballUrl).trim() - contentShasums[filename] = sha1 - log.verbose('content sha1', filename, sha1) + contentShasums[filename] = checksum + log.verbose('content checksum', filename, checksum) }) // start unzipping and untaring @@ -288,12 +291,12 @@ async-- if (!async) { - log.verbose('download contents shasums', JSON.stringify(contentShasums)) + log.verbose('download contents checksum', JSON.stringify(contentShasums)) // check content shasums for (var k in contentShasums) { - log.verbose('validating download shasum for ' + k, '(%s == %s)', contentShasums[k], expectShasums[k]) + log.verbose('validating download checksum for ' + k, '(%s == %s)', contentShasums[k], expectShasums[k]) if (contentShasums[k] !== expectShasums[k]) { - cb(new Error(k + ' local sha1 ' + contentShasums[k] + ' not match remote ' + expectShasums[k])) + cb(new Error(k + ' local checksum ' + contentShasums[k] + ' not match remote ' + expectShasums[k])) return } } @@ -303,17 +306,18 @@ } function downloadShasums(done) { - log.verbose('check download content sha1, need to download `SHASUMS.txt`...') - var shasumsPath = path.resolve(devDir, 'SHASUMS.txt') - , shasumsUrl = distUrl + '/v' + version + '/SHASUMS.txt' + var shasumsFile = (checksumAlgo === 'sha256') ? 'SHASUMS256.txt' : 'SHASUMS.txt' + log.verbose('check download content checksum, need to download `' + shasumsFile + '`...') + var shasumsPath = path.resolve(devDir, shasumsFile) + , shasumsUrl = distUrl + '/v' + version + '/' + shasumsFile - log.verbose('`SHASUMS.txt` url', shasumsUrl) + log.verbose('checksum url', shasumsUrl) var req = download(shasumsUrl) if (!req) return req.on('error', done) req.on('response', function (res) { if (res.statusCode !== 200) { - done(new Error(res.statusCode + ' status code downloading SHASUMS.txt')) + done(new Error(res.statusCode + ' status code downloading checksum')) return } @@ -332,7 +336,7 @@ expectShasums[name] = items[0] }) - log.verbose('`SHASUMS.txt` data', JSON.stringify(expectShasums)) + log.verbose('checksum data', JSON.stringify(expectShasums)) done() }) }) @@ -366,9 +370,9 @@ return } - getContentSha(res, function (_, sha1) { - contentShasums['node.lib'] = sha1 - log.verbose('content sha1', 'node.lib', sha1) + getContentSha(res, function (_, checksum) { + contentShasums['node.lib'] = checksum + log.verbose('content checksum', 'node.lib', checksum) }) var ws = fs.createWriteStream(nodeLibPath32) @@ -392,9 +396,9 @@ return } - getContentSha(res, function (_, sha1) { - contentShasums['x64/node.lib'] = sha1 - log.verbose('content sha1', 'x64/node.lib', sha1) + getContentSha(res, function (_, checksum) { + contentShasums['x64/node.lib'] = checksum + log.verbose('content checksum', 'x64/node.lib', checksum) }) var ws = fs.createWriteStream(nodeLibPath64) diff -Nru nodejs-0.11.13/deps/npm/node_modules/node-gyp/lib/node-gyp.js nodejs-0.11.15/deps/npm/node_modules/node-gyp/lib/node-gyp.js --- nodejs-0.11.13/deps/npm/node_modules/node-gyp/lib/node-gyp.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/node-gyp/lib/node-gyp.js 2015-01-20 21:22:17.000000000 +0000 @@ -203,9 +203,6 @@ return ' - ' + c + ' - ' + require('./' + c).usage }).join('\n') , '' - , ' for specific command usage and options try:' - , ' $ node-gyp --help' - , '' , 'node-gyp@' + this.version + ' ' + path.resolve(__dirname, '..') , 'node@' + process.versions.node ].join('\n') diff -Nru nodejs-0.11.13/deps/npm/node_modules/node-gyp/package.json nodejs-0.11.15/deps/npm/node_modules/node-gyp/package.json --- nodejs-0.11.13/deps/npm/node_modules/node-gyp/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/node-gyp/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -10,7 +10,7 @@ "bindings", "gyp" ], - "version": "0.13.0", + "version": "1.0.2", "installVersion": 9, "author": { "name": "Nathan Rajlich", @@ -27,33 +27,55 @@ }, "main": "./lib/node-gyp.js", "dependencies": { - "glob": "3", - "graceful-fs": "2", - "fstream": "0", - "minimatch": "0", - "mkdirp": "0", - "nopt": "2", + "fstream": "^1.0.0", + "glob": "3 || 4", + "graceful-fs": "3", + "minimatch": "1", + "mkdirp": "^0.5.0", + "nopt": "2 || 3", "npmlog": "0", "osenv": "0", "request": "2", "rimraf": "2", - "semver": "~2.2.1", - "tar": "0", + "semver": "2.x || 3.x || 4", + "tar": "^1.0.0", "which": "1" }, "engines": { "node": ">= 0.8.0" }, - "readme": "node-gyp\n=========\n### Node.js native addon build tool\n\n`node-gyp` is a cross-platform command-line tool written in Node.js for compiling\nnative addon modules for Node.js, which takes away the pain of dealing with the\nvarious differences in build platforms. It is the replacement to the `node-waf`\nprogram which is removed for node `v0.8`. If you have a native addon for node that\nstill has a `wscript` file, then you should definitely add a `binding.gyp` file\nto support the latest versions of node.\n\nMultiple target versions of node are supported (i.e. `0.8`, `0.9`, `0.10`, ..., `1.0`,\netc.), regardless of what version of node is actually installed on your system\n(`node-gyp` downloads the necessary development files for the target version).\n\n#### Features:\n\n * Easy to use, consistent interface\n * Same commands to build your module on every platform\n * Supports multiple target versions of Node\n\n\nInstallation\n------------\n\nYou can install with `npm`:\n\n``` bash\n$ npm install -g node-gyp\n```\n\nYou will also need to install:\n\n * On Unix:\n * `python` (`v2.7` recommended, `v3.x.x` is __*not*__ supported)\n * `make`\n * A proper C/C++ compiler toolchain, like GCC\n * On Windows:\n * [Python][windows-python] ([`v2.7.3`][windows-python-v2.7.3] recommended, `v3.x.x` is __*not*__ supported)\n * Windows XP/Vista/7:\n * Microsoft Visual Studio C++ 2010 ([Express][msvc2010] version works well)\n * For 64-bit builds of node and native modules you will _**also**_ need the [Windows 7 64-bit SDK][win7sdk]\n * If the install fails, try uninstalling any C++ 2010 x64&x86 Redistributable that you have installed first.\n * If you get errors that the 64-bit compilers are not installed you may also need the [compiler update for the Windows SDK 7.1]\n * Windows 7/8:\n * Microsoft Visual Studio C++ 2012 for Windows Desktop ([Express][msvc2012] version works well)\n\nNote that OS X is just a flavour of Unix and so needs `python`, `make`, and C/C++.\nAn easy way to obtain these is to install XCode from Apple,\nand then use it to install the command line tools (under Preferences -> Downloads).\n\nHow to Use\n----------\n\nTo compile your native addon, first go to its root directory:\n\n``` bash\n$ cd my_node_addon\n```\n\nThe next step is to generate the appropriate project build files for the current\nplatform. Use `configure` for that:\n\n``` bash\n$ node-gyp configure\n```\n\n__Note__: The `configure` step looks for the `binding.gyp` file in the current\ndirectory to processs. See below for instructions on creating the `binding.gyp` file.\n\nNow you will have either a `Makefile` (on Unix platforms) or a `vcxproj` file\n(on Windows) in the `build/` directory. Next invoke the `build` command:\n\n``` bash\n$ node-gyp build\n```\n\nNow you have your compiled `.node` bindings file! The compiled bindings end up\nin `build/Debug/` or `build/Release/`, depending on the build mode. At this point\nyou can require the `.node` file with Node and run your tests!\n\n__Note:__ To create a _Debug_ build of the bindings file, pass the `--debug` (or\n`-d`) switch when running the either `configure` or `build` command.\n\n\nThe \"binding.gyp\" file\n----------------------\n\nPreviously when node had `node-waf` you had to write a `wscript` file. The\nreplacement for that is the `binding.gyp` file, which describes the configuration\nto build your module in a JSON-like format. This file gets placed in the root of\nyour package, alongside the `package.json` file.\n\nA barebones `gyp` file appropriate for building a node addon looks like:\n\n``` python\n{\n \"targets\": [\n {\n \"target_name\": \"binding\",\n \"sources\": [ \"src/binding.cc\" ]\n }\n ]\n}\n```\n\nSome additional resources for writing `gyp` files:\n\n * [\"Hello World\" node addon example](https://github.com/joyent/node/tree/master/test/addons/hello-world)\n * [gyp user documentation](http://code.google.com/p/gyp/wiki/GypUserDocumentation)\n * [gyp input format reference](http://code.google.com/p/gyp/wiki/InputFormatReference)\n * [*\"binding.gyp\" files out in the wild* wiki page](https://github.com/TooTallNate/node-gyp/wiki/%22binding.gyp%22-files-out-in-the-wild)\n\n\nCommands\n--------\n\n`node-gyp` responds to the following commands:\n\n| **Command** | **Description**\n|:--------------|:---------------------------------------------------------------\n| `build` | Invokes `make`/`msbuild.exe` and builds the native addon\n| `clean` | Removes any the `build` dir if it exists\n| `configure` | Generates project build files for the current platform\n| `rebuild` | Runs \"clean\", \"configure\" and \"build\" all in a row\n| `install` | Installs node development header files for the given version\n| `list` | Lists the currently installed node development file versions\n| `remove` | Removes the node development header files for the given version\n\n\nLicense\n-------\n\n(The MIT License)\n\nCopyright (c) 2012 Nathan Rajlich <nathan@tootallnate.net>\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n'Software'), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n[windows-python]: http://www.python.org/getit/windows\n[windows-python-v2.7.3]: http://www.python.org/download/releases/2.7.3#download\n[msvc2010]: http://go.microsoft.com/?linkid=9709949\n[msvc2012]: http://go.microsoft.com/?linkid=9816758\n[win7sdk]: http://www.microsoft.com/en-us/download/details.aspx?id=8279\n[compiler update for the Windows SDK 7.1]: http://www.microsoft.com/en-us/download/details.aspx?id=4422\n", - "readmeFilename": "README.md", + "gitHead": "1e399b471945b35f3bfbca4a10fba31a6739b5db", "bugs": { "url": "https://github.com/TooTallNate/node-gyp/issues" }, "homepage": "https://github.com/TooTallNate/node-gyp", - "_id": "node-gyp@0.13.0", + "_id": "node-gyp@1.0.2", + "scripts": {}, + "_shasum": "b0bb6d2d762271408dd904853e7aa3000ed2eb57", + "_from": "node-gyp@>=1.0.1-0 <1.1.0-0", + "_npmVersion": "2.0.0-beta.3", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "TooTallNate", + "email": "nathan@tootallnate.net" + }, + { + "name": "tootallnate", + "email": "nathan@tootallnate.net" + }, + { + "name": "isaacs", + "email": "i@izs.me" + } + ], "dist": { - "shasum": "97765303203579f1445358d4d25d03645811d87d" + "shasum": "b0bb6d2d762271408dd904853e7aa3000ed2eb57", + "tarball": "http://registry.npmjs.org/node-gyp/-/node-gyp-1.0.2.tgz" }, - "_from": "node-gyp@latest", - "_resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-0.13.0.tgz" + "directories": {}, + "_resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-1.0.2.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/node-gyp/README.md nodejs-0.11.15/deps/npm/node_modules/node-gyp/README.md --- nodejs-0.11.13/deps/npm/node_modules/node-gyp/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/node-gyp/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -3,7 +3,8 @@ ### Node.js native addon build tool `node-gyp` is a cross-platform command-line tool written in Node.js for compiling -native addon modules for Node.js, which takes away the pain of dealing with the +native addon modules for Node.js. It bundles the [gyp](https://code.google.com/p/gyp/) +project used by the Chromium team and takes away the pain of dealing with the various differences in build platforms. It is the replacement to the `node-waf` program which is removed for node `v0.8`. If you have a native addon for node that still has a `wscript` file, then you should definitely add a `binding.gyp` file @@ -45,6 +46,21 @@ * Windows 7/8: * Microsoft Visual Studio C++ 2012 for Windows Desktop ([Express][msvc2012] version works well) +If you have multiple Python versions installed, you can identify which Python +version `node-gyp` uses by setting the '--python' variable: + +``` bash +$ node-gyp --python /path/to/python2.7 +``` + +If `node-gyp` is called by way of `npm` *and* you have multiple versions of +Python installed, then you can set `npm`'s 'python' config key to the appropriate +value: + +``` bash +$ npm config set python /path/to/executable/python2.7 +``` + Note that OS X is just a flavour of Unix and so needs `python`, `make`, and C/C++. An easy way to obtain these is to install XCode from Apple, and then use it to install the command line tools (under Preferences -> Downloads). diff -Nru nodejs-0.11.13/deps/npm/node_modules/nopt/bin/nopt.js nodejs-0.11.15/deps/npm/node_modules/nopt/bin/nopt.js --- nodejs-0.11.13/deps/npm/node_modules/nopt/bin/nopt.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/nopt/bin/nopt.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,6 @@ #!/usr/bin/env node var nopt = require("../lib/nopt") + , path = require("path") , types = { num: Number , bool: Boolean , help: Boolean @@ -11,6 +12,7 @@ , clear: Boolean , config: Boolean , length: Number + , file: path } , shorthands = { s: [ "--str", "astring" ] , b: [ "--bool" ] @@ -22,6 +24,7 @@ , n: [ "--num", "125" ] , c: ["--config"] , l: ["--length"] + , f: ["--file"] } , parsed = nopt( types , shorthands diff -Nru nodejs-0.11.13/deps/npm/node_modules/nopt/lib/nopt.js nodejs-0.11.15/deps/npm/node_modules/nopt/lib/nopt.js --- nodejs-0.11.13/deps/npm/node_modules/nopt/lib/nopt.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/nopt/lib/nopt.js 2015-01-20 21:22:17.000000000 +0000 @@ -50,7 +50,7 @@ function clean (data, types, typeDefs) { typeDefs = typeDefs || exports.typeDefs var remove = {} - , typeDefault = [false, true, null, String, Number, Array] + , typeDefault = [false, true, null, String, Array] Object.keys(data).forEach(function (k) { if (k === "argv") return @@ -126,6 +126,8 @@ function validatePath (data, k, val) { if (val === true) return false + if (val === null) return true + val = String(val) var homePattern = process.platform === 'win32' ? /^~(\/|\\)/ : /^~\// if (val.match(homePattern) && process.env.HOME) { diff -Nru nodejs-0.11.13/deps/npm/node_modules/nopt/package.json nodejs-0.11.15/deps/npm/node_modules/nopt/package.json --- nodejs-0.11.13/deps/npm/node_modules/nopt/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/nopt/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "nopt", - "version": "2.2.1", + "version": "3.0.1", "description": "Option parsing for Node, supporting types, shorthands, etc. Used by npm.", "author": { "name": "Isaac Z. Schlueter", @@ -28,13 +28,14 @@ "devDependencies": { "tap": "~0.4.8" }, - "readme": "If you want to write an option parser, and have it be good, there are\ntwo ways to do it. The Right Way, and the Wrong Way.\n\nThe Wrong Way is to sit down and write an option parser. We've all done\nthat.\n\nThe Right Way is to write some complex configurable program with so many\noptions that you go half-insane just trying to manage them all, and put\nit off with duct-tape solutions until you see exactly to the core of the\nproblem, and finally snap and write an awesome option parser.\n\nIf you want to write an option parser, don't write an option parser.\nWrite a package manager, or a source control system, or a service\nrestarter, or an operating system. You probably won't end up with a\ngood one of those, but if you don't give up, and you are relentless and\ndiligent enough in your procrastination, you may just end up with a very\nnice option parser.\n\n## USAGE\n\n // my-program.js\n var nopt = require(\"nopt\")\n , Stream = require(\"stream\").Stream\n , path = require(\"path\")\n , knownOpts = { \"foo\" : [String, null]\n , \"bar\" : [Stream, Number]\n , \"baz\" : path\n , \"bloo\" : [ \"big\", \"medium\", \"small\" ]\n , \"flag\" : Boolean\n , \"pick\" : Boolean\n , \"many\" : [String, Array]\n }\n , shortHands = { \"foofoo\" : [\"--foo\", \"Mr. Foo\"]\n , \"b7\" : [\"--bar\", \"7\"]\n , \"m\" : [\"--bloo\", \"medium\"]\n , \"p\" : [\"--pick\"]\n , \"f\" : [\"--flag\"]\n }\n // everything is optional.\n // knownOpts and shorthands default to {}\n // arg list defaults to process.argv\n // slice defaults to 2\n , parsed = nopt(knownOpts, shortHands, process.argv, 2)\n console.log(parsed)\n\nThis would give you support for any of the following:\n\n```bash\n$ node my-program.js --foo \"blerp\" --no-flag\n{ \"foo\" : \"blerp\", \"flag\" : false }\n\n$ node my-program.js ---bar 7 --foo \"Mr. Hand\" --flag\n{ bar: 7, foo: \"Mr. Hand\", flag: true }\n\n$ node my-program.js --foo \"blerp\" -f -----p\n{ foo: \"blerp\", flag: true, pick: true }\n\n$ node my-program.js -fp --foofoo\n{ foo: \"Mr. Foo\", flag: true, pick: true }\n\n$ node my-program.js --foofoo -- -fp # -- stops the flag parsing.\n{ foo: \"Mr. Foo\", argv: { remain: [\"-fp\"] } }\n\n$ node my-program.js --blatzk -fp # unknown opts are ok.\n{ blatzk: true, flag: true, pick: true }\n\n$ node my-program.js --blatzk=1000 -fp # but you need to use = if they have a value\n{ blatzk: 1000, flag: true, pick: true }\n\n$ node my-program.js --no-blatzk -fp # unless they start with \"no-\"\n{ blatzk: false, flag: true, pick: true }\n\n$ node my-program.js --baz b/a/z # known paths are resolved.\n{ baz: \"/Users/isaacs/b/a/z\" }\n\n# if Array is one of the types, then it can take many\n# values, and will always be an array. The other types provided\n# specify what types are allowed in the list.\n\n$ node my-program.js --many 1 --many null --many foo\n{ many: [\"1\", \"null\", \"foo\"] }\n\n$ node my-program.js --many foo\n{ many: [\"foo\"] }\n```\n\nRead the tests at the bottom of `lib/nopt.js` for more examples of\nwhat this puppy can do.\n\n## Types\n\nThe following types are supported, and defined on `nopt.typeDefs`\n\n* String: A normal string. No parsing is done.\n* path: A file system path. Gets resolved against cwd if not absolute.\n* url: A url. If it doesn't parse, it isn't accepted.\n* Number: Must be numeric.\n* Date: Must parse as a date. If it does, and `Date` is one of the options,\n then it will return a Date object, not a string.\n* Boolean: Must be either `true` or `false`. If an option is a boolean,\n then it does not need a value, and its presence will imply `true` as\n the value. To negate boolean flags, do `--no-whatever` or `--whatever\n false`\n* NaN: Means that the option is strictly not allowed. Any value will\n fail.\n* Stream: An object matching the \"Stream\" class in node. Valuable\n for use when validating programmatically. (npm uses this to let you\n supply any WriteStream on the `outfd` and `logfd` config options.)\n* Array: If `Array` is specified as one of the types, then the value\n will be parsed as a list of options. This means that multiple values\n can be specified, and that the value will always be an array.\n\nIf a type is an array of values not on this list, then those are\nconsidered valid values. For instance, in the example above, the\n`--bloo` option can only be one of `\"big\"`, `\"medium\"`, or `\"small\"`,\nand any other value will be rejected.\n\nWhen parsing unknown fields, `\"true\"`, `\"false\"`, and `\"null\"` will be\ninterpreted as their JavaScript equivalents, and numeric values will be\ninterpreted as a number.\n\nYou can also mix types and values, or multiple types, in a list. For\ninstance `{ blah: [Number, null] }` would allow a value to be set to\neither a Number or null. When types are ordered, this implies a\npreference, and the first type that can be used to properly interpret\nthe value will be used.\n\nTo define a new type, add it to `nopt.typeDefs`. Each item in that\nhash is an object with a `type` member and a `validate` method. The\n`type` member is an object that matches what goes in the type list. The\n`validate` method is a function that gets called with `validate(data,\nkey, val)`. Validate methods should assign `data[key]` to the valid\nvalue of `val` if it can be handled properly, or return boolean\n`false` if it cannot.\n\nYou can also call `nopt.clean(data, types, typeDefs)` to clean up a\nconfig object and remove its invalid properties.\n\n## Error Handling\n\nBy default, nopt outputs a warning to standard error when invalid\noptions are found. You can change this behavior by assigning a method\nto `nopt.invalidHandler`. This method will be called with\nthe offending `nopt.invalidHandler(key, val, types)`.\n\nIf no `nopt.invalidHandler` is assigned, then it will console.error\nits whining. If it is assigned to boolean `false` then the warning is\nsuppressed.\n\n## Abbreviations\n\nYes, they are supported. If you define options like this:\n\n```javascript\n{ \"foolhardyelephants\" : Boolean\n, \"pileofmonkeys\" : Boolean }\n```\n\nThen this will work:\n\n```bash\nnode program.js --foolhar --pil\nnode program.js --no-f --pileofmon\n# etc.\n```\n\n## Shorthands\n\nShorthands are a hash of shorter option names to a snippet of args that\nthey expand to.\n\nIf multiple one-character shorthands are all combined, and the\ncombination does not unambiguously match any other option or shorthand,\nthen they will be broken up into their constituent parts. For example:\n\n```json\n{ \"s\" : [\"--loglevel\", \"silent\"]\n, \"g\" : \"--global\"\n, \"f\" : \"--force\"\n, \"p\" : \"--parseable\"\n, \"l\" : \"--long\"\n}\n```\n\n```bash\nnpm ls -sgflp\n# just like doing this:\nnpm ls --loglevel silent --global --force --long --parseable\n```\n\n## The Rest of the args\n\nThe config object returned by nopt is given a special member called\n`argv`, which is an object with the following fields:\n\n* `remain`: The remaining args after all the parsing has occurred.\n* `original`: The args as they originally appeared.\n* `cooked`: The args after flags and shorthands are expanded.\n\n## Slicing\n\nNode programs are called with more or less the exact argv as it appears\nin C land, after the v8 and node-specific options have been plucked off.\nAs such, `argv[0]` is always `node` and `argv[1]` is always the\nJavaScript program being run.\n\nThat's usually not very useful to you. So they're sliced off by\ndefault. If you want them, then you can pass in `0` as the last\nargument, or any other number that you'd like to slice off the start of\nthe list.\n", + "readme": "If you want to write an option parser, and have it be good, there are\ntwo ways to do it. The Right Way, and the Wrong Way.\n\nThe Wrong Way is to sit down and write an option parser. We've all done\nthat.\n\nThe Right Way is to write some complex configurable program with so many\noptions that you go half-insane just trying to manage them all, and put\nit off with duct-tape solutions until you see exactly to the core of the\nproblem, and finally snap and write an awesome option parser.\n\nIf you want to write an option parser, don't write an option parser.\nWrite a package manager, or a source control system, or a service\nrestarter, or an operating system. You probably won't end up with a\ngood one of those, but if you don't give up, and you are relentless and\ndiligent enough in your procrastination, you may just end up with a very\nnice option parser.\n\n## USAGE\n\n // my-program.js\n var nopt = require(\"nopt\")\n , Stream = require(\"stream\").Stream\n , path = require(\"path\")\n , knownOpts = { \"foo\" : [String, null]\n , \"bar\" : [Stream, Number]\n , \"baz\" : path\n , \"bloo\" : [ \"big\", \"medium\", \"small\" ]\n , \"flag\" : Boolean\n , \"pick\" : Boolean\n , \"many\" : [String, Array]\n }\n , shortHands = { \"foofoo\" : [\"--foo\", \"Mr. Foo\"]\n , \"b7\" : [\"--bar\", \"7\"]\n , \"m\" : [\"--bloo\", \"medium\"]\n , \"p\" : [\"--pick\"]\n , \"f\" : [\"--flag\"]\n }\n // everything is optional.\n // knownOpts and shorthands default to {}\n // arg list defaults to process.argv\n // slice defaults to 2\n , parsed = nopt(knownOpts, shortHands, process.argv, 2)\n console.log(parsed)\n\nThis would give you support for any of the following:\n\n```bash\n$ node my-program.js --foo \"blerp\" --no-flag\n{ \"foo\" : \"blerp\", \"flag\" : false }\n\n$ node my-program.js ---bar 7 --foo \"Mr. Hand\" --flag\n{ bar: 7, foo: \"Mr. Hand\", flag: true }\n\n$ node my-program.js --foo \"blerp\" -f -----p\n{ foo: \"blerp\", flag: true, pick: true }\n\n$ node my-program.js -fp --foofoo\n{ foo: \"Mr. Foo\", flag: true, pick: true }\n\n$ node my-program.js --foofoo -- -fp # -- stops the flag parsing.\n{ foo: \"Mr. Foo\", argv: { remain: [\"-fp\"] } }\n\n$ node my-program.js --blatzk -fp # unknown opts are ok.\n{ blatzk: true, flag: true, pick: true }\n\n$ node my-program.js --blatzk=1000 -fp # but you need to use = if they have a value\n{ blatzk: 1000, flag: true, pick: true }\n\n$ node my-program.js --no-blatzk -fp # unless they start with \"no-\"\n{ blatzk: false, flag: true, pick: true }\n\n$ node my-program.js --baz b/a/z # known paths are resolved.\n{ baz: \"/Users/isaacs/b/a/z\" }\n\n# if Array is one of the types, then it can take many\n# values, and will always be an array. The other types provided\n# specify what types are allowed in the list.\n\n$ node my-program.js --many 1 --many null --many foo\n{ many: [\"1\", \"null\", \"foo\"] }\n\n$ node my-program.js --many foo\n{ many: [\"foo\"] }\n```\n\nRead the tests at the bottom of `lib/nopt.js` for more examples of\nwhat this puppy can do.\n\n## Types\n\nThe following types are supported, and defined on `nopt.typeDefs`\n\n* String: A normal string. No parsing is done.\n* path: A file system path. Gets resolved against cwd if not absolute.\n* url: A url. If it doesn't parse, it isn't accepted.\n* Number: Must be numeric.\n* Date: Must parse as a date. If it does, and `Date` is one of the options,\n then it will return a Date object, not a string.\n* Boolean: Must be either `true` or `false`. If an option is a boolean,\n then it does not need a value, and its presence will imply `true` as\n the value. To negate boolean flags, do `--no-whatever` or `--whatever\n false`\n* NaN: Means that the option is strictly not allowed. Any value will\n fail.\n* Stream: An object matching the \"Stream\" class in node. Valuable\n for use when validating programmatically. (npm uses this to let you\n supply any WriteStream on the `outfd` and `logfd` config options.)\n* Array: If `Array` is specified as one of the types, then the value\n will be parsed as a list of options. This means that multiple values\n can be specified, and that the value will always be an array.\n\nIf a type is an array of values not on this list, then those are\nconsidered valid values. For instance, in the example above, the\n`--bloo` option can only be one of `\"big\"`, `\"medium\"`, or `\"small\"`,\nand any other value will be rejected.\n\nWhen parsing unknown fields, `\"true\"`, `\"false\"`, and `\"null\"` will be\ninterpreted as their JavaScript equivalents.\n\nYou can also mix types and values, or multiple types, in a list. For\ninstance `{ blah: [Number, null] }` would allow a value to be set to\neither a Number or null. When types are ordered, this implies a\npreference, and the first type that can be used to properly interpret\nthe value will be used.\n\nTo define a new type, add it to `nopt.typeDefs`. Each item in that\nhash is an object with a `type` member and a `validate` method. The\n`type` member is an object that matches what goes in the type list. The\n`validate` method is a function that gets called with `validate(data,\nkey, val)`. Validate methods should assign `data[key]` to the valid\nvalue of `val` if it can be handled properly, or return boolean\n`false` if it cannot.\n\nYou can also call `nopt.clean(data, types, typeDefs)` to clean up a\nconfig object and remove its invalid properties.\n\n## Error Handling\n\nBy default, nopt outputs a warning to standard error when invalid\noptions are found. You can change this behavior by assigning a method\nto `nopt.invalidHandler`. This method will be called with\nthe offending `nopt.invalidHandler(key, val, types)`.\n\nIf no `nopt.invalidHandler` is assigned, then it will console.error\nits whining. If it is assigned to boolean `false` then the warning is\nsuppressed.\n\n## Abbreviations\n\nYes, they are supported. If you define options like this:\n\n```javascript\n{ \"foolhardyelephants\" : Boolean\n, \"pileofmonkeys\" : Boolean }\n```\n\nThen this will work:\n\n```bash\nnode program.js --foolhar --pil\nnode program.js --no-f --pileofmon\n# etc.\n```\n\n## Shorthands\n\nShorthands are a hash of shorter option names to a snippet of args that\nthey expand to.\n\nIf multiple one-character shorthands are all combined, and the\ncombination does not unambiguously match any other option or shorthand,\nthen they will be broken up into their constituent parts. For example:\n\n```json\n{ \"s\" : [\"--loglevel\", \"silent\"]\n, \"g\" : \"--global\"\n, \"f\" : \"--force\"\n, \"p\" : \"--parseable\"\n, \"l\" : \"--long\"\n}\n```\n\n```bash\nnpm ls -sgflp\n# just like doing this:\nnpm ls --loglevel silent --global --force --long --parseable\n```\n\n## The Rest of the args\n\nThe config object returned by nopt is given a special member called\n`argv`, which is an object with the following fields:\n\n* `remain`: The remaining args after all the parsing has occurred.\n* `original`: The args as they originally appeared.\n* `cooked`: The args after flags and shorthands are expanded.\n\n## Slicing\n\nNode programs are called with more or less the exact argv as it appears\nin C land, after the v8 and node-specific options have been plucked off.\nAs such, `argv[0]` is always `node` and `argv[1]` is always the\nJavaScript program being run.\n\nThat's usually not very useful to you. So they're sliced off by\ndefault. If you want them, then you can pass in `0` as the last\nargument, or any other number that you'd like to slice off the start of\nthe list.\n", "readmeFilename": "README.md", + "gitHead": "4296f7aba7847c198fea2da594f9e1bec02817ec", "bugs": { "url": "https://github.com/isaacs/nopt/issues" }, "homepage": "https://github.com/isaacs/nopt", - "_id": "nopt@2.2.1", - "_shasum": "2aa09b7d1768487b3b89a9c5aa52335bff0baea7", + "_id": "nopt@3.0.1", + "_shasum": "bce5c42446a3291f47622a370abbf158fbbacbfd", "_from": "nopt@latest" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/nopt/README.md nodejs-0.11.15/deps/npm/node_modules/nopt/README.md --- nodejs-0.11.13/deps/npm/node_modules/nopt/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/nopt/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -116,8 +116,7 @@ and any other value will be rejected. When parsing unknown fields, `"true"`, `"false"`, and `"null"` will be -interpreted as their JavaScript equivalents, and numeric values will be -interpreted as a number. +interpreted as their JavaScript equivalents. You can also mix types and values, or multiple types, in a list. For instance `{ blah: [Number, null] }` would allow a value to be set to diff -Nru nodejs-0.11.13/deps/npm/node_modules/nopt/test/basic.js nodejs-0.11.15/deps/npm/node_modules/nopt/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/nopt/test/basic.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/nopt/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -23,6 +23,14 @@ t.end() }) +// https://github.com/npm/nopt/issues/24 +test("Unknown options are not parsed as numbers", function (t) { + var parsed = nopt({"parse-me": Number}, null, ['--leave-as-is=1.20', '--parse-me=1.20'], 0) + t.equal(parsed['leave-as-is'], '1.20') + t.equal(parsed['parse-me'], 1.2) + t.end() +}); + test("other tests", function (t) { var util = require("util") @@ -170,7 +178,7 @@ ,{t:["true"]} ,[]] ,["-aoa one -aoa null -aoa 100" - ,{aoa:["one", null, 100]} + ,{aoa:["one", null, '100']} ,[]] ,["-str 100" ,{str:"100"} diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/AUTHORS nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/AUTHORS --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/AUTHORS 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/AUTHORS 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4 @@ +# Names sorted by how much code was originally theirs. +Isaac Z. Schlueter +Meryn Stol +Robert Kowalski \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/core_module_names.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/core_module_names.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/core_module_names.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/core_module_names.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,29 @@ +[ +"http", +"events", +"util", +"domain", +"cluster", +"buffer", +"stream", +"crypto", +"tls", +"fs", +"string_decoder", +"path", +"net", +"dgram", +"dns", +"https", +"url", +"punycode", +"readline", +"repl", +"vm", +"child_process", +"assert", +"zlib", +"tty", +"os", +"querystring" +] diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/extract_description.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/extract_description.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/extract_description.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/extract_description.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,14 @@ +module.exports = extractDescription + +// Extracts description from contents of a readme file in markdown format +function extractDescription (d) { + if (!d) return; + if (d === "ERROR: No README data found!") return; + // the first block of text before the first heading + // that isn't the first line heading + d = d.trim().split('\n') + for (var s = 0; d[s] && d[s].trim().match(/^(#|$)/); s ++); + var l = d.length + for (var e = s + 1; e < l && d[e].trim(); e ++); + return d.slice(s, e).join(' ').trim() +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/fixer.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/fixer.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/fixer.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/fixer.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,404 @@ +var semver = require("semver") +var parseGitHubURL = require("github-url-from-git") +var depTypes = ["dependencies","devDependencies","optionalDependencies"] +var extractDescription = require("./extract_description") +var url = require("url") +var typos = require("./typos") +var coreModuleNames = require("./core_module_names") +var githubUserRepo = require("github-url-from-username-repo") + +var fixer = module.exports = { + // default warning function + warn: function() {}, + + fixRepositoryField: function(data) { + if (data.repositories) { + this.warn("repositories"); + data.repository = data.repositories[0] + } + if (!data.repository) return this.warn("missingRepository") + if (typeof data.repository === "string") { + data.repository = { + type: "git", + url: data.repository + } + } + var r = data.repository.url || "" + if (r) { + var ghurl = parseGitHubURL(r) + if (ghurl) { + r = ghurl.replace(/^https?:\/\//, 'git://') + } else if (githubUserRepo(r)) { + // repo has 'user/reponame' filled in as repo + data.repository.url = githubUserRepo(r) + } + } + + if (r.match(/github.com\/[^\/]+\/[^\/]+\.git\.git$/)) { + this.warn("brokenGitUrl", r) + } + } + +, fixTypos: function(data) { + Object.keys(typos.topLevel).forEach(function (d) { + if (data.hasOwnProperty(d)) { + this.warn("typo", d, typos.topLevel[d]) + } + }, this) + } + +, fixScriptsField: function(data) { + if (!data.scripts) return + if (typeof data.scripts !== "object") { + this.warn("nonObjectScripts") + delete data.scripts + } + Object.keys(data.scripts).forEach(function (k) { + if (typeof data.scripts[k] !== "string") { + this.warn("nonStringScript") + delete data.scripts[k] + } else if (typos.script[k]) { + this.warn("typo", k, typos.script[k], "scripts") + } + }, this) + } + +, fixFilesField: function(data) { + var files = data.files + if (files && !Array.isArray(files)) { + this.warn("nonArrayFiles") + delete data.files + } else if (data.files) { + data.files = data.files.filter(function(file) { + if (!file || typeof file !== "string") { + this.warn("invalidFilename", file) + return false + } else { + return true + } + }, this) + } + } + +, fixBinField: function(data) { + if (!data.bin) return; + if (typeof data.bin === "string") { + var b = {} + b[data.name] = data.bin + data.bin = b + } + } + +, fixManField: function(data) { + if (!data.man) return; + if (typeof data.man === "string") { + data.man = [ data.man ] + } + } +, fixBundleDependenciesField: function(data) { + var bdd = "bundledDependencies" + var bd = "bundleDependencies" + if (data[bdd] && !data[bd]) { + data[bd] = data[bdd] + delete data[bdd] + } + if (data[bd] && !Array.isArray(data[bd])) { + this.warn("nonArrayBundleDependencies") + delete data[bd] + } else if (data[bd]) { + data[bd] = data[bd].filter(function(bd) { + if (!bd || typeof bd !== 'string') { + this.warn("nonStringBundleDependency", bd) + return false + } else { + if (!data.dependencies) { + data.dependencies = {} + } + if (!data.dependencies.hasOwnProperty(bd)) { + this.warn("nonDependencyBundleDependency", bd) + data.dependencies[bd] = "*" + } + return true + } + }, this) + } + } + +, fixDependencies: function(data, strict) { + var loose = !strict + objectifyDeps(data, this.warn) + addOptionalDepsToDeps(data, this.warn) + this.fixBundleDependenciesField(data) + + ;['dependencies','devDependencies'].forEach(function(deps) { + if (!(deps in data)) return + if (!data[deps] || typeof data[deps] !== "object") { + this.warn("nonObjectDependencies", deps) + delete data[deps] + return + } + Object.keys(data[deps]).forEach(function (d) { + var r = data[deps][d] + if (typeof r !== 'string') { + this.warn("nonStringDependency", d, JSON.stringify(r)) + delete data[deps][d] + } + // "/" is not allowed as packagename for publishing, but for git-urls + // normalize shorthand-urls + if (githubUserRepo(data[deps][d])) { + data[deps][d] = 'git+' + githubUserRepo(data[deps][d]) + } + }, this) + }, this) + } + +, fixModulesField: function (data) { + if (data.modules) { + this.warn("deprecatedModules") + delete data.modules + } + } + +, fixKeywordsField: function (data) { + if (typeof data.keywords === "string") { + data.keywords = data.keywords.split(/,\s+/) + } + if (data.keywords && !Array.isArray(data.keywords)) { + delete data.keywords + this.warn("nonArrayKeywords") + } else if (data.keywords) { + data.keywords = data.keywords.filter(function(kw) { + if (typeof kw !== "string" || !kw) { + this.warn("nonStringKeyword"); + return false + } else { + return true + } + }, this) + } + } + +, fixVersionField: function(data, strict) { + // allow "loose" semver 1.0 versions in non-strict mode + // enforce strict semver 2.0 compliance in strict mode + var loose = !strict + if (!data.version) { + data.version = "" + return true + } + if (!semver.valid(data.version, loose)) { + throw new Error('Invalid version: "'+ data.version + '"') + } + data.version = semver.clean(data.version, loose) + return true + } + +, fixPeople: function(data) { + modifyPeople(data, unParsePerson) + modifyPeople(data, parsePerson) + } + +, fixNameField: function(data, strict) { + if (!data.name && !strict) { + data.name = "" + return + } + if (typeof data.name !== "string") { + throw new Error("name field must be a string.") + } + if (!strict) + data.name = data.name.trim() + ensureValidName(data.name, strict) + if (coreModuleNames.indexOf(data.name) !== -1) + this.warn("conflictingName", data.name) + } + + +, fixDescriptionField: function (data) { + if (data.description && typeof data.description !== 'string') { + this.warn("nonStringDescription") + delete data.description + } + if (data.readme && !data.description) + data.description = extractDescription(data.readme) + if(data.description === undefined) delete data.description; + if (!data.description) this.warn("missingDescription") + } + +, fixReadmeField: function (data) { + if (!data.readme) { + this.warn("missingReadme") + data.readme = "ERROR: No README data found!" + } + } + +, fixBugsField: function(data) { + if (!data.bugs && data.repository && data.repository.url) { + var gh = parseGitHubURL(data.repository.url) + if(gh) { + if(gh.match(/^https:\/\/github.com\//)) + data.bugs = {url: gh + "/issues"} + else // gist url + data.bugs = {url: gh} + } + } + else if(data.bugs) { + var emailRe = /^.+@.*\..+$/ + if(typeof data.bugs == "string") { + if(emailRe.test(data.bugs)) + data.bugs = {email:data.bugs} + else if(url.parse(data.bugs).protocol) + data.bugs = {url: data.bugs} + else + this.warn("nonEmailUrlBugsString") + } + else { + bugsTypos(data.bugs, this.warn) + var oldBugs = data.bugs + data.bugs = {} + if(oldBugs.url) { + if(typeof(oldBugs.url) == "string" && url.parse(oldBugs.url).protocol) + data.bugs.url = oldBugs.url + else + this.warn("nonUrlBugsUrlField") + } + if(oldBugs.email) { + if(typeof(oldBugs.email) == "string" && emailRe.test(oldBugs.email)) + data.bugs.email = oldBugs.email + else + this.warn("nonEmailBugsEmailField") + } + } + if(!data.bugs.email && !data.bugs.url) { + delete data.bugs + this.warn("emptyNormalizedBugs") + } + } + } + +, fixHomepageField: function(data) { + if (!data.homepage && data.repository && data.repository.url) { + var gh = parseGitHubURL(data.repository.url) + if (gh) + data.homepage = gh + else + return true + } else if (!data.homepage) + return true + + if(typeof data.homepage !== "string") { + this.warn("nonUrlHomepage") + return delete data.homepage + } + if(!url.parse(data.homepage).protocol) { + this.warn("missingProtocolHomepage") + data.homepage = "http://" + data.homepage + } + } +} + +function isValidScopedPackageName(spec) { + if (spec.charAt(0) !== '@') return false + + var rest = spec.slice(1).split('/') + if (rest.length !== 2) return false + + return rest[0] && rest[1] && + rest[0] === encodeURIComponent(rest[0]) && + rest[1] === encodeURIComponent(rest[1]) +} + +function isCorrectlyEncodedName(spec) { + return !spec.match(/[\/@\s\+%:]/) && + spec === encodeURIComponent(spec) +} + +function ensureValidName (name, strict) { + if (name.charAt(0) === "." || + !(isValidScopedPackageName(name) || isCorrectlyEncodedName(name)) || + (strict && name !== name.toLowerCase()) || + name.toLowerCase() === "node_modules" || + name.toLowerCase() === "favicon.ico") { + throw new Error("Invalid name: " + JSON.stringify(name)) + } +} + +function modifyPeople (data, fn) { + if (data.author) data.author = fn(data.author) + ;["maintainers", "contributors"].forEach(function (set) { + if (!Array.isArray(data[set])) return; + data[set] = data[set].map(fn) + }) + return data +} + +function unParsePerson (person) { + if (typeof person === "string") return person + var name = person.name || "" + var u = person.url || person.web + var url = u ? (" ("+u+")") : "" + var e = person.email || person.mail + var email = e ? (" <"+e+">") : "" + return name+email+url +} + +function parsePerson (person) { + if (typeof person !== "string") return person + var name = person.match(/^([^\(<]+)/) + var url = person.match(/\(([^\)]+)\)/) + var email = person.match(/<([^>]+)>/) + var obj = {} + if (name && name[0].trim()) obj.name = name[0].trim() + if (email) obj.email = email[1]; + if (url) obj.url = url[1]; + return obj +} + +function addOptionalDepsToDeps (data, warn) { + var o = data.optionalDependencies + if (!o) return; + var d = data.dependencies || {} + Object.keys(o).forEach(function (k) { + d[k] = o[k] + }) + data.dependencies = d +} + +function depObjectify (deps, type, warn) { + if (!deps) return {} + if (typeof deps === "string") { + deps = deps.trim().split(/[\n\r\s\t ,]+/) + } + if (!Array.isArray(deps)) return deps + warn("deprecatedArrayDependencies", type) + var o = {} + deps.filter(function (d) { + return typeof d === "string" + }).forEach(function(d) { + d = d.trim().split(/(:?[@\s><=])/) + var dn = d.shift() + var dv = d.join("") + dv = dv.trim() + dv = dv.replace(/^@/, "") + o[dn] = dv + }) + return o +} + +function objectifyDeps (data, warn) { + depTypes.forEach(function (type) { + if (!data[type]) return; + data[type] = depObjectify(data[type], type, warn) + }) +} + +function bugsTypos(bugs, warn) { + if (!bugs) return + Object.keys(bugs).forEach(function (k) { + if (typos.bugs[k]) { + warn("typo", k, typos.bugs[k], "bugs") + bugs[typos.bugs[k]] = bugs[k] + delete bugs[k] + } + }) +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/make_warning.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/make_warning.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/make_warning.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/make_warning.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,23 @@ +var util = require("util") +var messages = require("./warning_messages.json") + +module.exports = function() { + var args = Array.prototype.slice.call(arguments, 0) + var warningName = args.shift() + if (warningName == "typo") { + return makeTypoWarning.apply(null,args) + } + else { + var msgTemplate = messages[warningName] ? messages[warningName] : warningName + ": '%s'" + args.unshift(msgTemplate) + return util.format.apply(null, args) + } +} + +function makeTypoWarning (providedName, probableName, field) { + if (field) { + providedName = field + "['" + providedName + "']" + probableName = field + "['" + probableName + "']" + } + return util.format(messages.typo, providedName, probableName) +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/normalize.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/normalize.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/normalize.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/normalize.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,37 @@ +module.exports = normalize + +var fixer = require("./fixer") +var makeWarning = require("./make_warning") + +var fieldsToFix = ['name','version','description','repository','modules','scripts' + ,'files','bin','man','bugs','keywords','readme','homepage'] +var otherThingsToFix = ['dependencies','people', 'typos'] + +var thingsToFix = fieldsToFix.map(function(fieldName) { + return ucFirst(fieldName) + "Field" +}) +// two ways to do this in CoffeeScript on only one line, sub-70 chars: +// thingsToFix = fieldsToFix.map (name) -> ucFirst(name) + "Field" +// thingsToFix = (ucFirst(name) + "Field" for name in fieldsToFix) +thingsToFix = thingsToFix.concat(otherThingsToFix) + +function normalize (data, warn, strict) { + if(warn === true) warn = null, strict = true + if(!strict) strict = false + if(!warn || data.private) warn = function(msg) { /* noop */ } + + if (data.scripts && + data.scripts.install === "node-gyp rebuild" && + !data.scripts.preinstall) { + data.gypfile = true + } + fixer.warn = function() { warn(makeWarning.apply(null, arguments)) } + thingsToFix.forEach(function(thingName) { + fixer["fix" + ucFirst(thingName)](data, strict) + }) + data._id = data.name + "@" + data.version +} + +function ucFirst (string) { + return string.charAt(0).toUpperCase() + string.slice(1); +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/safe_format.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/safe_format.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/safe_format.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/safe_format.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,9 @@ +var util = require('util') + +module.exports = function() { + var args = Array.prototype.slice.call(arguments, 0) + args.forEach(function(arg) { + if (!arg) throw new TypeError('Bad arguments.') + }) + return util.format.apply(null, arguments) +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/typos.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/typos.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/typos.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/typos.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,25 @@ +{ + "topLevel": { + "dependancies": "dependencies" + ,"dependecies": "dependencies" + ,"depdenencies": "dependencies" + ,"devEependencies": "devDependencies" + ,"depends": "dependencies" + ,"dev-dependencies": "devDependencies" + ,"devDependences": "devDependencies" + ,"devDepenencies": "devDependencies" + ,"devdependencies": "devDependencies" + ,"repostitory": "repository" + ,"repo": "repository" + ,"prefereGlobal": "preferGlobal" + ,"hompage": "homepage" + ,"hampage": "homepage" + ,"autohr": "author" + ,"autor": "author" + ,"contributers": "contributors" + ,"publicationConfig": "publishConfig" + ,"script": "scripts" + }, + "bugs": { "web": "url", "name": "url" }, + "script": { "server": "start", "tests": "test" } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/warning_messages.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/warning_messages.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/lib/warning_messages.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/lib/warning_messages.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,29 @@ +{ + "repositories": "'repositories' (plural) Not supported. Please pick one as the 'repository' field" + ,"missingRepository": "No repository field." + ,"brokenGitUrl": "Probably broken git url: %s" + ,"nonObjectScripts": "scripts must be an object" + ,"nonStringScript": "script values must be string commands" + ,"nonArrayFiles": "Invalid 'files' member" + ,"invalidFilename": "Invalid filename in 'files' list: %s" + ,"nonArrayBundleDependencies": "Invalid 'bundleDependencies' list. Must be array of package names" + ,"nonStringBundleDependency": "Invalid bundleDependencies member: %s" + ,"nonDependencyBundleDependency": "Non-dependency in bundleDependencies: %s" + ,"nonObjectDependencies": "%s field must be an object" + ,"nonStringDependency": "Invalid dependency: %s %s" + ,"deprecatedArrayDependencies": "specifying %s as array is deprecated" + ,"deprecatedModules": "modules field is deprecated" + ,"nonArrayKeywords": "keywords should be an array of strings" + ,"nonStringKeyword": "keywords should be an array of strings" + ,"conflictingName": "%s is also the name of a node core module." + ,"nonStringDescription": "'description' field should be a string" + ,"missingDescription": "No description" + ,"missingReadme": "No README data" + ,"nonEmailUrlBugsString": "Bug string field must be url, email, or {email,url}" + ,"nonUrlBugsUrlField": "bugs.url field must be a string url. Deleted." + ,"nonEmailBugsEmailField": "bugs.email field must be a string email. Deleted." + ,"emptyNormalizedBugs": "Normalized value of bugs field is an empty object. Deleted." + ,"nonUrlHomepage": "homepage field must be a string url. Deleted." + ,"missingProtocolHomepage": "homepage field must start with a protocol." + ,"typo": "%s should probably be %s." +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/LICENSE nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,30 @@ +This package contains code originally written by Isaac Z. Schlueter. +Used with permission. + +Copyright (c) Meryn Stol ("Author") +All rights reserved. + +The BSD License + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/.npmignore nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +/node_modules/ \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/package.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/package.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,75 @@ +{ + "name": "normalize-package-data", + "version": "1.0.3", + "author": { + "name": "Meryn Stol", + "email": "merynstol@gmail.com" + }, + "description": "Normalizes data that can be found in package.json files.", + "repository": { + "type": "git", + "url": "git://github.com/meryn/normalize-package-data.git" + }, + "main": "lib/normalize.js", + "scripts": { + "test": "tap test/*.js" + }, + "dependencies": { + "github-url-from-git": "^1.3.0", + "github-url-from-username-repo": "^1.0.0", + "semver": "2 || 3 || 4" + }, + "devDependencies": { + "tap": "~0.2.5", + "underscore": "~1.4.4", + "async": "~0.9.0" + }, + "contributors": [ + { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me" + }, + { + "name": "Meryn Stol", + "email": "merynstol@gmail.com" + }, + { + "name": "Robert Kowalski", + "email": "rok@kowalski.gd" + } + ], + "gitHead": "8c30091c83b1a41e113757148c4543ef61ff863d", + "bugs": { + "url": "https://github.com/meryn/normalize-package-data/issues" + }, + "homepage": "https://github.com/meryn/normalize-package-data", + "_id": "normalize-package-data@1.0.3", + "_shasum": "8be955b8907af975f1a4584ea8bb9b41492312f5", + "_from": "normalize-package-data@>=1.0.3 <1.1.0", + "_npmVersion": "2.1.0", + "_nodeVersion": "0.10.31", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "meryn", + "email": "merynstol@gmail.com" + }, + { + "name": "isaacs", + "email": "i@izs.me" + }, + { + "name": "othiym23", + "email": "ogd@aoaioxxysz.net" + } + ], + "dist": { + "shasum": "8be955b8907af975f1a4584ea8bb9b41492312f5", + "tarball": "http://registry.npmjs.org/normalize-package-data/-/normalize-package-data-1.0.3.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-1.0.3.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/README.md nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/README.md --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,101 @@ +# normalize-package-data [![Build Status](https://travis-ci.org/meryn/normalize-package-data.png?branch=master)](https://travis-ci.org/meryn/normalize-package-data) + +normalize-package data exports a function that normalizes package metadata. This data is typically found in a package.json file, but in principle could come from any source - for example the npm registry. + +normalize-package-data is used by [read-package-json](https://npmjs.org/package/read-package-json) to normalize the data it reads from a package.json file. In turn, read-package-json is used by [npm](https://npmjs.org/package/npm) and various npm-related tools. + +## Installation + +``` +npm install normalize-package-data +``` + +## Usage + +Basic usage is really simple. You call the function that normalize-package-data exports. Let's call it `normalizeData`. + +```javascript +normalizeData = require('normalize-package-data') +packageData = fs.readFileSync("package.json") +normalizeData(packageData) +// packageData is now normalized +``` + +#### Strict mode + +You may activate strict validation by passing true as the second argument. + +```javascript +normalizeData = require('normalize-package-data') +packageData = fs.readFileSync("package.json") +warnFn = function(msg) { console.error(msg) } +normalizeData(packageData, true) +// packageData is now normalized +``` + +If strict mode is activated, only Semver 2.0 version strings are accepted. Otherwise, Semver 1.0 strings are accepted as well. Packages must have a name, and the name field must not have contain leading or trailing whitespace. + +#### Warnings + +Optionally, you may pass a "warning" function. It gets called whenever the `normalizeData` function encounters something that doesn't look right. It indicates less than perfect input data. + +```javascript +normalizeData = require('normalize-package-data') +packageData = fs.readFileSync("package.json") +warnFn = function(msg) { console.error(msg) } +normalizeData(packageData, warnFn) +// packageData is now normalized. Any number of warnings may have been logged. +``` + +You may combine strict validation with warnings by passing `true` as the second argument, and `warnFn` as third. + +When `private` field is set to `true`, warnings will be suppressed. + +### Potential exceptions + +If the supplied data has an invalid name or version vield, `normalizeData` will throw an error. Depending on where you call `normalizeData`, you may want to catch these errors so can pass them to a callback. + +## What normalization (currently) entails + +* The value of `name` field gets trimmed (unless in strict mode). +* The value of the `version` field gets cleaned by `semver.clean`. See [documentation for the semver module](https://github.com/isaacs/node-semver). +* If `name` and/or `version` fields are missing, they are set to empty strings. +* If `files` field is not an array, it will be removed. +* If `bin` field is a string, then `bin` field will become an object with `name` set to the value of the `name` field, and `bin` set to the original string value. +* If `man` field is a string, it will become an array with the original string as its sole member. +* If `keywords` field is string, it is considered to be a list of keywords separated by one or more white-space characters. It gets converted to an array by splitting on `\s+`. +* All people fields (`author`, `maintainers`, `contributors`) get converted into objects with name, email and url properties. +* If `bundledDependencies` field (a typo) exists and `bundleDependencies` field does not, `bundledDependencies` will get renamed to `bundleDependencies`. +* If the value of any of the dependencies fields (`dependencies`, `devDependencies`, `optionalDependencies`) is a string, it gets converted into an object with familiar `name=>value` pairs. +* The values in `optionalDependencies` get added to `dependencies`. The `optionalDependencies` array is left untouched. +* If `description` field does not exist, but `readme` field does, then (more or less) the first paragraph of text that's found in the readme is taken as value for `description`. +* If `repository` field is a string, it will become an object with `url` set to the original string value, and `type` set to `"git"`. +* If `repository.url` is not a valid url, but in the style of "[owner-name]/[repo-name]", `repository.url` will be set to git://github.com/[owner-name]/[repo-name] +* If `bugs` field is a string, the value of `bugs` field is changed into an object with `url` set to the original string value. +* If `bugs` field does not exist, but `repository` field points to a repository hosted on GitHub, the value of the `bugs` field gets set to an url in the form of https://github.com/[owner-name]/[repo-name]/issues . If the repository field points to a GitHub Gist repo url, the associated http url is chosen. +* If `bugs` field is an object, the resulting value only has email and url properties. If email and url properties are not strings, they are ignored. If no valid values for either email or url is found, bugs field will be removed. +* If `homepage` field is not a string, it will be removed. +* If the url in the `homepage` field does not specify a protocol, then http is assumed. For example, `myproject.org` will be changed to `http://myproject.org`. +* If `homepage` field does not exist, but `repository` field points to a repository hosted on GitHub, the value of the `homepage` field gets set to an url in the form of https://github.com/[owner-name]/[repo-name]/ . If the repository field points to a GitHub Gist repo url, the associated http url is chosen. + +### Rules for name field + +If `name` field is given, the value of the name field must be a string. The string may not: + +* start with a period. +* contain the following characters: `/@\s+%` +* contain and characters that would need to be encoded for use in urls. +* resemble the word `node_modules` or `favicon.ico` (case doesn't matter). + +### Rules for version field + +If `version` field is given, the value of the version field must be a valid *semver* string, as determined by the `semver.valid` method. See [documentation for the semver module](https://github.com/isaacs/node-semver). + +## Credits + +This package contains code based on read-package-json written by Isaac Z. Schlueter. Used with permisson. + +## License + +normalize-package-data is released under the [BSD 2-Clause License](http://opensource.org/licenses/MIT). +Copyright (c) 2013 Meryn Stol diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/basic.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,34 @@ +var tap = require("tap") +var normalize = require("../lib/normalize") +var path = require("path") +var fs = require("fs") + +tap.test("basic test", function (t) { + var p = path.resolve(__dirname, "./fixtures/read-package-json.json") + fs.readFile (p, function (err, contents) { + if (err) throw err; + var originalData = JSON.parse(contents.toString()) + var data = JSON.parse(contents.toString()) + normalize(data) + t.ok(data) + verifyFields(t, data, originalData) + t.end() + }) +}) + +function verifyFields (t, normalized, original) { + t.equal(normalized.version, original.version, "Version field stays same") + t.equal(normalized._id, normalized.name + "@" + normalized.version, "It gets good id.") + t.equal(normalized.name, original.name, "Name stays the same.") + t.type(normalized.author, "object", "author field becomes object") + t.deepEqual(normalized.scripts, original.scripts, "scripts field (object) stays same") + t.equal(normalized.main, original.main) + // optional deps are folded in. + t.deepEqual(normalized.optionalDependencies, + original.optionalDependencies) + t.has(normalized.dependencies, original.optionalDependencies, "opt depedencies are copied into dependencies") + t.has(normalized.dependencies, original.dependencies, "regular depedencies stay in place") + t.deepEqual(normalized.devDependencies, original.devDependencies) + t.type(normalized.bugs, "object", "bugs should become object") + t.equal(normalized.bugs.url, "https://github.com/isaacs/read-package-json/issues") +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/consistency.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/consistency.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/consistency.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/consistency.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,36 @@ +var tap = require("tap") +var normalize = require("../lib/normalize") +var path = require("path") +var fs = require("fs") +var _ = require("underscore") +var async = require("async") + +var data, clonedData +var warn + +tap.test("consistent normalization", function(t) { + path.resolve(__dirname, "./fixtures/read-package-json.json") + fs.readdir (__dirname + "/fixtures", function (err, entries) { + // entries = ['coffee-script.json'] // uncomment to limit to a specific file + verifyConsistency = function(entryName, next) { + warn = function(msg) { + // t.equal("",msg) // uncomment to have some kind of logging of warnings + } + filename = __dirname + "/fixtures/" + entryName + fs.readFile(filename, function(err, contents) { + if (err) return next(err) + data = JSON.parse(contents.toString()) + normalize(data, warn) + clonedData = _.clone(data) + normalize(data, warn) + t.deepEqual(clonedData, data, + "Normalization of " + entryName + " is consistent.") + next(null) + }) // fs.readFile + } // verifyConsistency + async.forEach(entries, verifyConsistency, function(err) { + if (err) throw err + t.end() + }) + }) // fs.readdir +}) // tap.test \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/dependencies.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/dependencies.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/dependencies.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/dependencies.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ +var tap = require("tap") +var normalize = require("../lib/normalize") + +var warningMessages = require("../lib/warning_messages.json") +var safeFormat = require("../lib/safe_format") + +tap.test("warn if dependency contains anything else but a string", function(t) { + var a + var warnings = [] + function warn(w) { + warnings.push(w) + } + normalize(a={ + dependencies: { "a": 123}, + devDependencies: { "b": 456}, + optionalDependencies: { "c": 789} + }, warn) + + var wanted1 = safeFormat(warningMessages.nonStringDependency, "a", 123) + var wanted2 = safeFormat(warningMessages.nonStringDependency, "b", 456) + var wanted3 = safeFormat(warningMessages.nonStringDependency, "c", 789) + t.ok(~warnings.indexOf(wanted1), wanted1) + t.ok(~warnings.indexOf(wanted2), wanted2) + t.ok(~warnings.indexOf(wanted3), wanted3) + t.end() +}) + +tap.test("warn if bundleDependencies array contains anything else but strings", function(t) { + var a + var warnings = [] + function warn(w) { + warnings.push(w) + } + normalize(a={ + bundleDependencies: ["abc", 123, {foo:"bar"}] + }, warn) + + var wanted1 = safeFormat(warningMessages.nonStringBundleDependency, 123) + var wanted2 = safeFormat(warningMessages.nonStringBundleDependency, {foo:"bar"}) + var wanted2 = safeFormat(warningMessages.nonDependencyBundleDependency, "abc") + t.ok(~warnings.indexOf(wanted1), wanted1) + t.ok(~warnings.indexOf(wanted2), wanted2) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/async.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/async.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/async.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/async.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,36 @@ +{ + "name": "async", + "description": "Higher-order functions and common patterns for asynchronous code", + "main": "./lib/async", + "author": "Caolan McMahon", + "version": "0.2.6", + "repository" : { + "type" : "git", + "url" : "http://github.com/caolan/async.git" + }, + "bugs" : { + "url" : "http://github.com/caolan/async/issues" + }, + "licenses" : [ + { + "type" : "MIT", + "url" : "http://github.com/caolan/async/raw/master/LICENSE" + } + ], + "devDependencies": { + "nodeunit": ">0.0.0", + "uglify-js": "1.2.x", + "nodelint": ">0.0.0" + }, + "jam": { + "main": "lib/async.js", + "include": [ + "lib/async.js", + "README.md", + "LICENSE" + ] + }, + "scripts": { + "test": "nodeunit test/test-async.js" + } +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/bcrypt.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/bcrypt.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/bcrypt.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/bcrypt.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,56 @@ +{ + "name": "bcrypt", + "description": "A bcrypt library for NodeJS.", + "keywords": [ + "bcrypt", + "password", + "auth", + "authentication", + "encryption", + "crypt", + "crypto" + ], + "main": "./bcrypt", + "version": "0.7.5", + "author": "Nick Campbell (http://github.com/ncb000gt)", + "engines": { + "node": ">= 0.6.0" + }, + "repository": { + "type": "git", + "url": "http://github.com/ncb000gt/node.bcrypt.js.git" + }, + "licenses": [ + { + "type": "MIT" + } + ], + "bugs": { + "url": "http://github.com/ncb000gt/node.bcrypt.js/issues" + }, + "scripts": { + "test": "node-gyp configure build && nodeunit test" + }, + "dependencies": { + "bindings": "1.0.0" + }, + "devDependencies": { + "nodeunit": ">=0.6.4" + }, + "contributors": [ + "Antonio Salazar Cardozo (https://github.com/Shadowfiend)", + "Van Nguyen (https://github.com/thegoleffect)", + "David Trejo (https://github.com/dtrejo)", + "Ben Glow (https://github.com/pixelglow)", + "NewITFarmer.com <> (https://github.com/newitfarmer)", + "Alfred Westerveld (https://github.com/alfredwesterveld)", + "Vincent Côté-Roy (https://github.com/vincentcr)", + "Lloyd Hilaiel (https://github.com/lloyd)", + "Roman Shtylman (https://github.com/shtylman)", + "Vadim Graboys (https://github.com/vadimg)", + "Ben Noorduis <> (https://github.com/bnoordhuis)", + "Nate Rajlich (https://github.com/tootallnate)", + "Sean McArthur (https://github.com/seanmonstar)", + "Fanie Oosthuysen (https://github.com/weareu)" + ] +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/coffee-script.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/coffee-script.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/coffee-script.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/coffee-script.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,35 @@ +{ + "name": "coffee-script", + "description": "Unfancy JavaScript", + "keywords": ["javascript", "language", "coffeescript", "compiler"], + "author": "Jeremy Ashkenas", + "version": "1.6.2", + "licenses": [{ + "type": "MIT", + "url": "https://raw.github.com/jashkenas/coffee-script/master/LICENSE" + }], + "engines": { + "node": ">=0.8.0" + }, + "directories" : { + "lib" : "./lib/coffee-script" + }, + "main" : "./lib/coffee-script/coffee-script", + "bin": { + "coffee": "./bin/coffee", + "cake": "./bin/cake" + }, + "scripts": { + "test": "node ./bin/cake test" + }, + "homepage": "http://coffeescript.org", + "bugs": "https://github.com/jashkenas/coffee-script/issues", + "repository": { + "type": "git", + "url": "git://github.com/jashkenas/coffee-script.git" + }, + "devDependencies": { + "uglify-js": "~2.2", + "jison": ">=0.2.0" + } +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/http-server.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/http-server.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/http-server.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/http-server.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,53 @@ +{ + "name": "http-server", + "preferGlobal": true, + "version": "0.3.0", + "author": "Nodejitsu ", + "description": "a simple zero-configuration command-line http server", + "contributors": [ + { + "name": "Marak Squires", + "email": "marak@nodejitsu.com" + } + ], + "bin": { + "http-server": "./bin/http-server" + }, + "scripts": { + "start": "node ./bin/http-server", + "test": "vows --spec --isolate", + "predeploy": "echo This will be run before deploying the app", + "postdeploy": "echo This will be run after deploying the app" + }, + "main": "./lib/http-server", + "repository": { + "type": "git", + "url": "https://github.com/nodejitsu/http-server.git" + }, + "keywords": [ + "cli", + "http", + "server" + ], + "dependencies" : { + "colors" : "*", + "flatiron" : "0.1.x", + "optimist" : "0.2.x", + "union" : "0.1.x", + "ecstatic" : "0.1.x", + "plates" : "https://github.com/flatiron/plates/tarball/master" + }, + "analyze": false, + "devDependencies": { + "vows" : "0.5.x", + "request" : "2.1.x" + }, + "bundledDependencies": [ + "union", + "ecstatic" + ], + "license": "MIT", + "engines": { + "node": ">=0.6" + } +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/movefile.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/movefile.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/movefile.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/movefile.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +{ + "name": "movefile", + "description": "rename implementation working over devices", + "version": "0.2.0", + "author": "yazgazan ", + "main": "./build/Release/movefile", + "keywords": ["move", "file", "rename"], + "repository": "git://github.com/yazgazan/movefile.git", + "directories": { + "lib": "./build/Release/" + }, + "scripts": { + "install": "./node_modules/node-gyp/bin/node-gyp.js configure && ./node_modules/node-gyp/bin/node-gyp.js build" + }, + "engines": { + "node": "*" + }, + "dependencies": { + "node-gyp": "~0.9.1" + } +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/node-module_exist.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/node-module_exist.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/node-module_exist.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/node-module_exist.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,26 @@ +{ + "name": "node-module_exist", + "description": "Find if a NodeJS module is available to require or not", + "version": "0.0.1", + "main": "module_exist.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "repository": { + "type": "git", + "url": "git@gist.github.com:3135914.git" + }, + "homepage": "https://github.com/FGRibreau", + "author": { + "name": "Francois-Guillaume Ribreau", + "url": "http://fgribreau.com.com/" + }, + "devDependencies": { + "nodeunit": "~0.7.4" + }, + "keywords": [ + "core", + "modules" + ], + "license": "MIT" +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/no-description.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/no-description.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/no-description.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/no-description.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4 @@ +{ + "name": "foo-bar-package", + "version": "0.0.1" +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/npm.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/npm.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/npm.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/npm.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,135 @@ +{ + "version": "1.2.17", + "name": "npm", + "publishConfig": { + "proprietary-attribs": false + }, + "description": "A package manager for node", + "keywords": [ + "package manager", + "modules", + "install", + "package.json" + ], + "preferGlobal": true, + "config": { + "publishtest": false + }, + "homepage": "https://npmjs.org/doc/", + "author": "Isaac Z. Schlueter (http://blog.izs.me)", + "repository": { + "type": "git", + "url": "https://github.com/isaacs/npm" + }, + "bugs": { + "email": "npm-@googlegroups.com", + "url": "http://github.com/isaacs/npm/issues" + }, + "directories": { + "doc": "./doc", + "man": "./man", + "lib": "./lib", + "bin": "./bin" + }, + "main": "./lib/npm.js", + "bin": "./bin/npm-cli.js", + "dependencies": { + "semver": "~1.1.2", + "ini": "~1.1.0", + "slide": "1", + "abbrev": "~1.0.4", + "graceful-fs": "~1.2.0", + "minimatch": "~0.2.11", + "nopt": "~2.1.1", + "rimraf": "2", + "request": "~2.9", + "which": "1", + "tar": "~0.1.17", + "fstream": "~0.1.22", + "block-stream": "*", + "inherits": "1", + "mkdirp": "~0.3.3", + "read": "~1.0.4", + "lru-cache": "~2.3.0", + "node-gyp": "~0.9.3", + "fstream-npm": "~0.1.3", + "uid-number": "0", + "archy": "0", + "chownr": "0", + "npmlog": "0", + "ansi": "~0.1.2", + "npm-registry-client": "~0.2.18", + "read-package-json": "~0.3.0", + "read-installed": "0", + "glob": "~3.1.21", + "init-package-json": "0.0.6", + "osenv": "0", + "lockfile": "~0.3.0", + "retry": "~0.6.0", + "once": "~1.1.1", + "npmconf": "0", + "opener": "~1.3.0", + "chmodr": "~0.1.0", + "cmd-shim": "~1.1.0" + }, + "bundleDependencies": [ + "semver", + "ini", + "slide", + "abbrev", + "graceful-fs", + "minimatch", + "nopt", + "rimraf", + "request", + "which", + "tar", + "fstream", + "block-stream", + "inherits", + "mkdirp", + "read", + "lru-cache", + "node-gyp", + "fstream-npm", + "uid-number", + "archy", + "chownr", + "npmlog", + "ansi", + "npm-registry-client", + "read-package-json", + "read-installed", + "glob", + "init-package-json", + "osenv", + "lockfile", + "retry", + "once", + "npmconf", + "opener", + "chmodr", + "cmd-shim" + ], + "devDependencies": { + "ronn": "~0.3.6", + "tap": "~0.4.0" + }, + "engines": { + "node": ">=0.6", + "npm": "1" + }, + "scripts": { + "test": "node ./test/run.js && tap test/tap/*.js", + "tap": "tap test/tap/*.js", + "prepublish": "node bin/npm-cli.js prune ; rm -rf test/*/*/node_modules ; make -j4 doc", + "dumpconf": "env | grep npm | sort | uniq", + "echo": "node bin/npm-cli.js" + }, + "licenses": [ + { + "type": "MIT +no-false-attribs", + "url": "https://github.com/isaacs/npm/raw/master/LICENSE" + } + ] +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/read-package-json.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/read-package-json.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/read-package-json.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/read-package-json.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ +{ + "name": "read-package-json", + "version": "0.1.1", + "author": "Isaac Z. Schlueter (http://blog.izs.me/)", + "description": "The thing npm uses to read package.json files with semantics and defaults and validation", + "repository": { + "type": "git", + "url": "git://github.com/isaacs/read-package-json.git" + }, + "main": "read-json.js", + "scripts": { + "test": "tap test/*.js" + }, + "dependencies": { + "glob": "~3.1.9", + "lru-cache": "~1.1.0", + "semver": "~1.0.14", + "slide": "~1.1.3" + }, + "devDependencies": { + "tap": "~0.2.5" + }, + "optionalDependencies": { + "npmlog": "0", + "graceful-fs": "~1.1.8" + } +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/request.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/request.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/request.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/request.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,39 @@ +{ + "name": "request", + "description": "Simplified HTTP request client.", + "tags": [ + "http", + "simple", + "util", + "utility" + ], + "version": "2.16.7", + "author": "Mikeal Rogers ", + "repository": { + "type": "git", + "url": "http://github.com/mikeal/request.git" + }, + "bugs": { + "url": "http://github.com/mikeal/request/issues" + }, + "engines": [ + "node >= 0.8.0" + ], + "main": "index.js", + "dependencies": { + "form-data": "~0.0.3", + "mime": "~1.2.7", + "hawk": "~0.10.2", + "node-uuid": "~1.4.0", + "cookie-jar": "~0.2.0", + "aws-sign": "~0.2.0", + "oauth-sign": "~0.2.0", + "forever-agent": "~0.2.0", + "tunnel-agent": "~0.2.0", + "json-stringify-safe": "~3.0.0", + "qs": "~0.5.4" + }, + "scripts": { + "test": "node tests/run.js" + } +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/underscore.json nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/underscore.json --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/fixtures/underscore.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/fixtures/underscore.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,17 @@ +{ + "name" : "underscore", + "description" : "JavaScript's functional programming helper library.", + "homepage" : "http://underscorejs.org", + "keywords" : ["util", "functional", "server", "client", "browser"], + "author" : "Jeremy Ashkenas ", + "repository" : {"type": "git", "url": "git://github.com/documentcloud/underscore.git"}, + "main" : "underscore.js", + "version" : "1.4.4", + "devDependencies": { + "phantomjs": "1.9.0-1" + }, + "scripts": { + "test": "phantomjs test/vendor/runner.js test/index.html?noglobals=true" + }, + "license" : "MIT" +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/github-urls.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/github-urls.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/github-urls.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/github-urls.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ +var tap = require("tap") +var normalize = require("../lib/normalize") +var fs = require("fs") +var async = require("async") + +var data +var warn + +tap.test("consistent normalization", function(t) { + var entries = [ + 'read-package-json.json', + 'http-server.json', + "movefile.json", + "node-module_exist.json" + ] + var verifyConsistency = function(entryName, next) { + warn = function(msg) { + // t.equal("",msg) // uncomment to have some kind of logging of warnings + } + var filename = __dirname + "/fixtures/" + entryName + fs.readFile(filename, function(err, contents) { + if (err) return next(err) + data = JSON.parse(contents.toString()) + normalize(data, warn) + if(data.name == "node-module_exist") { + t.same(data.bugs.url, "https://gist.github.com/3135914") + } + if(data.name == "read-package-json") { + t.same(data.bugs.url, "https://github.com/isaacs/read-package-json/issues") + } + if(data.name == "http-server") { + t.same(data.bugs.url, "https://github.com/nodejitsu/http-server/issues") + } + if(data.name == "movefile") { + t.same(data.bugs.url, "https://github.com/yazgazan/movefile/issues") + } + next(null) + }) // fs.readFile + } // verifyConsistency + async.forEach(entries, verifyConsistency, function(err) { + if (err) throw err + t.end() + }) +}) // tap.test diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/normalize.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/normalize.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/normalize.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/normalize.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,226 @@ +var tap = require("tap") +var fs = require("fs") +var path = require("path") + +var globals = Object.keys(global) + +var normalize = require("../lib/normalize") +var warningMessages = require("../lib/warning_messages.json") +var safeFormat = require("../lib/safe_format") + +var rpjPath = path.resolve(__dirname,"./fixtures/read-package-json.json") +tap.test("normalize some package data", function(t) { + var packageData = require(rpjPath) + var warnings = [] + normalize(packageData, function(warning) { + warnings.push(warning) + }) + // there's no readme data in this particular object + t.equal( warnings.length, 1, "There's exactly one warning.") + fs.readFile(rpjPath, function(err, data) { + if(err) throw err + // Various changes have been made + t.notEqual(packageData, JSON.parse(data), "Output is different from input.") + t.end() + }) +}) + +tap.test("runs without passing warning function", function(t) { + var packageData = require(rpjPath) + fs.readFile(rpjPath, function(err, data) { + if(err) throw err + normalize(JSON.parse(data)) + t.ok(true, "If you read this, this means I'm still alive.") + t.end() + }) +}) + +tap.test("empty object", function(t) { + var packageData = {} + var expect = + { name: '', + version: '', + readme: 'ERROR: No README data found!', + _id: '@' } + + var warnings = [] + function warn(m) { + warnings.push(m) + } + normalize(packageData, warn) + t.same(packageData, expect) + t.same(warnings, [ + warningMessages.missingDescription, + warningMessages.missingRepository, + warningMessages.missingReadme + ]) + t.end() +}) + +tap.test("core module name", function(t) { + var warnings = [] + function warn(m) { + warnings.push(m) + } + var a + normalize(a={ + name: "http", + readme: "read yourself how about", + homepage: 123, + bugs: "what is this i don't even", + repository: "Hello." + }, warn) + + var expect = [ + safeFormat(warningMessages.conflictingName, 'http'), + warningMessages.nonEmailUrlBugsString, + warningMessages.emptyNormalizedBugs, + warningMessages.nonUrlHomepage + ] + t.same(warnings, expect) + t.end() +}) + +tap.test("urls required", function(t) { + var warnings = [] + function warn(w) { + warnings.push(w) + } + normalize({ + bugs: { + url: "/1", + email: "not an email address" + } + }, warn) + var a + normalize(a={ + readme: "read yourself how about", + homepage: 123, + bugs: "what is this i don't even", + repository: "Hello." + }, warn) + + console.error(a) + + var expect = + [ warningMessages.missingDescription, + warningMessages.missingRepository, + warningMessages.nonUrlBugsUrlField, + warningMessages.nonEmailBugsEmailField, + warningMessages.emptyNormalizedBugs, + warningMessages.missingReadme, + warningMessages.nonEmailUrlBugsString, + warningMessages.emptyNormalizedBugs, + warningMessages.nonUrlHomepage ] + t.same(warnings, expect) + t.end() +}) + +tap.test("homepage field must start with a protocol.", function(t) { + var warnings = [] + function warn(w) { + warnings.push(w) + } + var a + normalize(a={ + homepage: 'example.org' + }, warn) + + console.error(a) + + var expect = + [ warningMessages.missingDescription, + warningMessages.missingRepository, + warningMessages.missingReadme, + warningMessages.missingProtocolHomepage ] + t.same(warnings, expect) + t.same(a.homepage, 'http://example.org') + t.end() +}) + +tap.test("gist bugs url", function(t) { + var d = { + repository: "git@gist.github.com:123456.git" + } + normalize(d) + t.same(d.repository, { type: 'git', url: 'git@gist.github.com:123456.git' }) + t.same(d.bugs, { url: 'https://gist.github.com/123456' }) + t.end(); +}); + +tap.test("singularize repositories", function(t) { + var d = {repositories:["git@gist.github.com:123456.git"]} + normalize(d) + t.same(d.repository, { type: 'git', url: 'git@gist.github.com:123456.git' }) + t.end() +}); + +tap.test("treat visionmedia/express as github repo", function(t) { + var d = {repository: {type: "git", url: "visionmedia/express"}} + normalize(d) + t.same(d.repository, { type: "git", url: "https://github.com/visionmedia/express" }) + t.end() +}); + +tap.test("treat isaacs/node-graceful-fs as github repo", function(t) { + var d = {repository: {type: "git", url: "isaacs/node-graceful-fs"}} + normalize(d) + t.same(d.repository, { type: "git", url: "https://github.com/isaacs/node-graceful-fs" }) + t.end() +}); + +tap.test("homepage field will set to github url if repository is a github repo", function(t) { + var a + normalize(a={ + repository: { type: "git", url: "https://github.com/isaacs/node-graceful-fs" } + }) + t.same(a.homepage, 'https://github.com/isaacs/node-graceful-fs') + t.end() +}) + +tap.test("homepage field will set to github gist url if repository is a gist", function(t) { + var a + normalize(a={ + repository: { type: "git", url: "git@gist.github.com:123456.git" } + }) + t.same(a.homepage, 'https://gist.github.com/123456') + t.end() +}) + +tap.test("homepage field will set to github gist url if repository is a shorthand reference", function(t) { + var a + normalize(a={ + repository: { type: "git", url: "sindresorhus/chalk" } + }) + t.same(a.homepage, 'https://github.com/sindresorhus/chalk') + t.end() +}) + +tap.test("treat isaacs/node-graceful-fs as github repo in dependencies", function(t) { + var d = {dependencies: {"node-graceful-fs": "isaacs/node-graceful-fs"}} + normalize(d) + t.same(d.dependencies, {"node-graceful-fs": "git+https://github.com/isaacs/node-graceful-fs" }) + t.end() +}); + +tap.test("deprecation warning for array in dependencies fields", function(t) { + var a + var warnings = [] + function warn(w) { + warnings.push(w) + } + normalize(a={ + dependencies: [], + devDependencies: [], + optionalDependencies: [] + }, warn) + t.ok(~warnings.indexOf(safeFormat(warningMessages.deprecatedArrayDependencies, 'dependencies')), "deprecation warning") + t.ok(~warnings.indexOf(safeFormat(warningMessages.deprecatedArrayDependencies, 'devDependencies')), "deprecation warning") + t.ok(~warnings.indexOf(safeFormat(warningMessages.deprecatedArrayDependencies, 'optionalDependencies')), "deprecation warning") + t.end() +}) + +tap.test('no new globals', function(t) { + t.same(Object.keys(global), globals) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/scoped.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/scoped.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/scoped.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/scoped.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,51 @@ +var test = require("tap").test + +var fixNameField = require("../lib/fixer.js").fixNameField + +test("a simple scoped module has a valid name", function (t) { + var data = {name : "@org/package"} + fixNameField(data, false) + t.equal(data.name, "@org/package", "name was unchanged") + + t.end() +}) + +test("'org@package' is not a valid name", function (t) { + t.throws(function () { + fixNameField({name : "org@package"}, false) + }, "blows up as expected") + + t.end() +}) + +test("'org=package' is not a valid name", function (t) { + t.throws(function () { + fixNameField({name : "org=package"}, false) + }, "blows up as expected") + + t.end() +}) + +test("'@org=sub/package' is not a valid name", function (t) { + t.throws(function () { + fixNameField({name : "@org=sub/package"}, false) + }, "blows up as expected") + + t.end() +}) + +test("'@org/' is not a valid name", function (t) { + t.throws(function () { + fixNameField({name : "@org/"}, false) + }, "blows up as expected") + + t.end() +}) + +test("'@/package' is not a valid name", function (t) { + t.throws(function () { + fixNameField({name : "@/package"}, false) + }, "blows up as expected") + + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/strict.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/strict.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/strict.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/strict.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +var test = require("tap").test + +var normalize = require("../") + +test("strict", function(t) { + var threw + + try { + threw = false + normalize({name: "X"}, true) + } catch (er) { + threw = true + t.equal(er.message, 'Invalid name: "X"') + } finally { + t.equal(threw, true) + } + + try { + threw = false + normalize({name:" x "}, true) + } catch (er) { + threw = true + t.equal(er.message, 'Invalid name: " x "') + } finally { + t.equal(threw, true) + } + + try { + threw = false + normalize({name:"x",version:"01.02.03"}, true) + } catch (er) { + threw = true + t.equal(er.message, 'Invalid version: "01.02.03"') + } finally { + t.equal(threw, true) + } + + // these should not throw + var slob = {name:" X ",version:"01.02.03",dependencies:{ + y:">01.02.03", + z:"! 99 $$ASFJ(Aawenf90awenf as;naw.3j3qnraw || an elephant" + }} + normalize(slob, false) + t.same(slob, + { name: 'X', + version: '1.2.3', + dependencies: + { y: '>01.02.03', + z: '! 99 $$ASFJ(Aawenf90awenf as;naw.3j3qnraw || an elephant' }, + readme: 'ERROR: No README data found!', + _id: 'X@1.2.3' }) + + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/typo.js nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/typo.js --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/test/typo.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/test/typo.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,115 @@ +var test = require('tap').test + +var normalize = require('../') +var typos = require('../lib/typos.json') +var warningMessages = require("../lib/warning_messages.json") +var safeFormat = require("../lib/safe_format") + +test('typos', function(t) { + var warnings = [] + function warn(m) { + warnings.push(m) + } + + var typoMessage = safeFormat.bind(undefined, warningMessages.typo) + + var expect = + [ warningMessages.missingRepository, + typoMessage('dependancies', 'dependencies'), + typoMessage('dependecies', 'dependencies'), + typoMessage('depdenencies', 'dependencies'), + typoMessage('devEependencies', 'devDependencies'), + typoMessage('depends', 'dependencies'), + typoMessage('dev-dependencies', 'devDependencies'), + typoMessage('devDependences', 'devDependencies'), + typoMessage('devDepenencies', 'devDependencies'), + typoMessage('devdependencies', 'devDependencies'), + typoMessage('repostitory', 'repository'), + typoMessage('repo', 'repository'), + typoMessage('prefereGlobal', 'preferGlobal'), + typoMessage('hompage', 'homepage'), + typoMessage('hampage', 'homepage'), + typoMessage('autohr', 'author'), + typoMessage('autor', 'author'), + typoMessage('contributers', 'contributors'), + typoMessage('publicationConfig', 'publishConfig') ] + + normalize({"dependancies": "dependencies" + ,"dependecies": "dependencies" + ,"depdenencies": "dependencies" + ,"devEependencies": "devDependencies" + ,"depends": "dependencies" + ,"dev-dependencies": "devDependencies" + ,"devDependences": "devDependencies" + ,"devDepenencies": "devDependencies" + ,"devdependencies": "devDependencies" + ,"repostitory": "repository" + ,"repo": "repository" + ,"prefereGlobal": "preferGlobal" + ,"hompage": "homepage" + ,"hampage": "homepage" + ,"autohr": "author" + ,"autor": "author" + ,"contributers": "contributors" + ,"publicationConfig": "publishConfig" + ,readme:"asdf" + ,name:"name" + ,version:"1.2.5"}, warn) + + t.same(warnings, expect) + + warnings.length = 0 + var expect = + [ warningMessages.missingDescription, + warningMessages.missingRepository, + typoMessage("bugs['web']", "bugs['url']"), + typoMessage("bugs['name']", "bugs['url']"), + warningMessages.nonUrlBugsUrlField, + warningMessages.emptyNormalizedBugs, + warningMessages.missingReadme ] + + normalize({name:"name" + ,version:"1.2.5" + ,bugs:{web:"url",name:"url"}}, warn) + + t.same(warnings, expect) + + warnings.length = 0 + var expect = + [ warningMessages.missingDescription, + warningMessages.missingRepository, + warningMessages.missingReadme, + typoMessage('script', 'scripts') ] + + normalize({name:"name" + ,version:"1.2.5" + ,script:{server:"start",tests:"test"}}, warn) + + t.same(warnings, expect) + + warnings.length = 0 + expect = + [ warningMessages.missingDescription, + warningMessages.missingRepository, + typoMessage("scripts['server']", "scripts['start']"), + typoMessage("scripts['tests']", "scripts['test']"), + warningMessages.missingReadme ] + + normalize({name:"name" + ,version:"1.2.5" + ,scripts:{server:"start",tests:"test"}}, warn) + + t.same(warnings, expect) + + warnings.length = 0 + expect = [] + + normalize({private: true + ,name:"name" + ,version:"1.2.5" + ,scripts:{server:"start",tests:"test"}}, warn) + + t.same(warnings, expect) + + t.end(); +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/.travis.yml nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/normalize-package-data/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/normalize-package-data/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +language: node_js +node_js: + - "0.10" diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-cache-filename/index.js nodejs-0.11.15/deps/npm/node_modules/npm-cache-filename/index.js --- nodejs-0.11.13/deps/npm/node_modules/npm-cache-filename/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-cache-filename/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +var url = require('url');; +var path = require('path');; + +module.exports = cf;; + +function cf(root, u) { + if (!u) + return cf.bind(null, root);; + + u = url.parse(u);; + var h = u.host.replace(/:/g, '_');; + // Strip off any /-rev/... or ?rev=... bits + var revre = /(\?rev=|\?.*?&rev=|\/-rev\/).*$/ + var parts = u.path.replace(revre, '').split('/').slice(1) + var p = [root, h].concat(parts.map(function(part) { + return encodeURIComponent(part).replace(/%/g, '_');; + }));; + + return path.join.apply(path, p);; +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-cache-filename/LICENSE nodejs-0.11.15/deps/npm/node_modules/npm-cache-filename/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/npm-cache-filename/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-cache-filename/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) npm, Inc. and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-cache-filename/package.json nodejs-0.11.15/deps/npm/node_modules/npm-cache-filename/package.json --- nodejs-0.11.13/deps/npm/node_modules/npm-cache-filename/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-cache-filename/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +{ + "name": "npm-cache-filename", + "version": "1.0.1", + "description": "Given a cache folder and url, return the appropriate cache folder.", + "main": "index.js", + "dependencies": {}, + "devDependencies": { + "tap": "^0.4.10" + }, + "scripts": { + "test": "tap test.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/npm/npm-cache-filename" + }, + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "license": "ISC", + "bugs": { + "url": "https://github.com/npm/npm-cache-filename/issues" + }, + "homepage": "https://github.com/npm/npm-cache-filename", + "readme": "# npm-cache-filename\n\nGiven a cache folder and url, return the appropriate cache folder.\n\n## USAGE\n\n```javascript\nvar cf = require('npm-cache-filename');\nconsole.log(cf('/tmp/cache', 'https://registry.npmjs.org:1234/foo/bar'));\n// outputs: /tmp/cache/registry.npmjs.org_1234/foo/bar\n```\n\nAs a bonus, you can also bind it to a specific root path:\n\n```javascript\nvar cf = require('npm-cache-filename');\nvar getFile = cf('/tmp/cache');\n\nconsole.log(getFile('https://registry.npmjs.org:1234/foo/bar'));\n// outputs: /tmp/cache/registry.npmjs.org_1234/foo/bar\n```\n", + "readmeFilename": "README.md", + "_id": "npm-cache-filename@1.0.1", + "_shasum": "9b640f0c1a5ba1145659685372a9ff71f70c4323", + "_from": "npm-cache-filename@latest", + "_resolved": "https://registry.npmjs.org/npm-cache-filename/-/npm-cache-filename-1.0.1.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-cache-filename/README.md nodejs-0.11.15/deps/npm/node_modules/npm-cache-filename/README.md --- nodejs-0.11.13/deps/npm/node_modules/npm-cache-filename/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-cache-filename/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +# npm-cache-filename + +Given a cache folder and url, return the appropriate cache folder. + +## USAGE + +```javascript +var cf = require('npm-cache-filename'); +console.log(cf('/tmp/cache', 'https://registry.npmjs.org:1234/foo/bar')); +// outputs: /tmp/cache/registry.npmjs.org_1234/foo/bar +``` + +As a bonus, you can also bind it to a specific root path: + +```javascript +var cf = require('npm-cache-filename'); +var getFile = cf('/tmp/cache'); + +console.log(getFile('https://registry.npmjs.org:1234/foo/bar')); +// outputs: /tmp/cache/registry.npmjs.org_1234/foo/bar +``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-cache-filename/test.js nodejs-0.11.15/deps/npm/node_modules/npm-cache-filename/test.js --- nodejs-0.11.13/deps/npm/node_modules/npm-cache-filename/test.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-cache-filename/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +var test = require('tap').test;; +test('it does the thing it says it does', function(t) { + var cf = require('./');; + + t.equal(cf('/tmp/cache', 'https://foo:134/xyz?adf=foo:bar/baz'), + '/tmp/cache/foo_134/xyz_3Fadf_3Dfoo_3Abar/baz');; + + var getFile = cf('/tmp/cache');; + t.equal(getFile('https://foo:134/xyz?adf=foo:bar/baz'), + '/tmp/cache/foo_134/xyz_3Fadf_3Dfoo_3Abar/baz');; + + t.equal(cf("/tmp", "https://foo:134/xyz/-rev/baz"), + '/tmp/foo_134/xyz') + t.equal(cf("/tmp", "https://foo:134/xyz/?rev=baz"), + '/tmp/foo_134/xyz') + t.equal(cf("/tmp", "https://foo:134/xyz/?foo&rev=baz"), + '/tmp/foo_134/xyz') + t.equal(cf("/tmp", "https://foo:134/xyz-rev/baz"), + '/tmp/foo_134/xyz-rev/baz') + t.end(); +});; diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/config-defs.js nodejs-0.11.15/deps/npm/node_modules/npmconf/config-defs.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/config-defs.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/config-defs.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,359 +0,0 @@ -// defaults, types, and shorthands. - - -var path = require("path") - , url = require("url") - , Stream = require("stream").Stream - , semver = require("semver") - , stableFamily = semver.parse(process.version) - , nopt = require("nopt") - , os = require('os') - , osenv = require("osenv") - -try { - var log = require("npmlog") -} catch (er) { - var util = require('util') - var log = { warn: function (m) { - console.warn(m + util.format.apply(util, [].slice.call(arguments, 1))) - } } -} - -exports.Octal = Octal -function Octal () {} -function validateOctal (data, k, val) { - // must be either an integer or an octal string. - if (typeof val === "number") { - data[k] = val - return true - } - - if (typeof val === "string") { - if (val.charAt(0) !== "0" || isNaN(val)) return false - data[k] = parseInt(val, 8).toString(8) - } -} - -function validateSemver (data, k, val) { - if (!semver.valid(val)) return false - data[k] = semver.valid(val) -} - -function validateStream (data, k, val) { - if (!(val instanceof Stream)) return false - data[k] = val -} - -nopt.typeDefs.semver = { type: semver, validate: validateSemver } -nopt.typeDefs.Octal = { type: Octal, validate: validateOctal } -nopt.typeDefs.Stream = { type: Stream, validate: validateStream } - -nopt.invalidHandler = function (k, val, type, data) { - log.warn("invalid config", k + "=" + JSON.stringify(val)) - - if (Array.isArray(type)) { - if (type.indexOf(url) !== -1) type = url - else if (type.indexOf(path) !== -1) type = path - } - - switch (type) { - case Octal: - log.warn("invalid config", "Must be octal number, starting with 0") - break - case url: - log.warn("invalid config", "Must be a full url with 'http://'") - break - case path: - log.warn("invalid config", "Must be a valid filesystem path") - break - case Number: - log.warn("invalid config", "Must be a numeric value") - break - case Stream: - log.warn("invalid config", "Must be an instance of the Stream class") - break - } -} - -if (!stableFamily || (+stableFamily.minor % 2)) stableFamily = null -else stableFamily = stableFamily.major + "." + stableFamily.minor - -var defaults - -var temp = osenv.tmpdir() -var home = osenv.home() - -var uidOrPid = process.getuid ? process.getuid() : process.pid - -if (home) process.env.HOME = home -else home = path.resolve(temp, "npm-" + uidOrPid) - -var cacheExtra = process.platform === "win32" ? "npm-cache" : ".npm" -var cacheRoot = process.platform === "win32" && process.env.APPDATA || home -var cache = path.resolve(cacheRoot, cacheExtra) - - -var globalPrefix -Object.defineProperty(exports, "defaults", {get: function () { - if (defaults) return defaults - - if (process.env.PREFIX) { - globalPrefix = process.env.PREFIX - } else if (process.platform === "win32") { - // c:\node\node.exe --> prefix=c:\node\ - globalPrefix = path.dirname(process.execPath) - } else { - // /usr/local/bin/node --> prefix=/usr/local - globalPrefix = path.dirname(path.dirname(process.execPath)) - - // destdir only is respected on Unix - if (process.env.DESTDIR) { - globalPrefix = path.join(process.env.DESTDIR, globalPrefix) - } - } - - return defaults = - { "always-auth" : false - , "bin-links" : true - , browser : null - - , ca: null - - , cache : cache - - , "cache-lock-stale": 60000 - , "cache-lock-retries": 10 - , "cache-lock-wait": 10000 - - , "cache-max": Infinity - , "cache-min": 10 - - , cert: null - - , color : true - , depth: Infinity - , description : true - , dev : false - , editor : osenv.editor() - , email: "" - , "engine-strict": false - , force : false - - , "fetch-retries": 2 - , "fetch-retry-factor": 10 - , "fetch-retry-mintimeout": 10000 - , "fetch-retry-maxtimeout": 60000 - - , git: "git" - , "git-tag-version": true - - , global : false - , globalconfig : path.resolve(globalPrefix, "etc", "npmrc") - , group : process.platform === "win32" ? 0 - : process.env.SUDO_GID || (process.getgid && process.getgid()) - , heading: "npm" - , "ignore-scripts": false - , "init-module": path.resolve(home, '.npm-init.js') - , "init.author.name" : "" - , "init.author.email" : "" - , "init.author.url" : "" - , "init.license": "ISC" - , json: false - , key: null - , link: false - , "local-address" : undefined - , loglevel : "http" - , logstream : process.stderr - , long : false - , message : "%s" - , "node-version" : process.version - , npat : false - , "onload-script" : false - , optional: true - , parseable : false - , prefix : globalPrefix - , production: process.env.NODE_ENV === "production" - , "proprietary-attribs": true - , proxy : process.env.HTTP_PROXY || process.env.http_proxy || null - , "https-proxy" : process.env.HTTPS_PROXY || process.env.https_proxy || - process.env.HTTP_PROXY || process.env.http_proxy || null - , "user-agent" : "npm/{npm-version} " - + "node/{node-version} " - + "{platform} " - + "{arch}" - , "rebuild-bundle" : true - , registry : "https://registry.npmjs.org/" - , rollback : true - , save : false - , "save-bundle": false - , "save-dev" : false - , "save-exact" : false - , "save-optional" : false - , "save-prefix": "^" - , searchopts: "" - , searchexclude: null - , searchsort: "name" - , shell : osenv.shell() - , shrinkwrap: true - , "sign-git-tag": false - , "strict-ssl": true - , tag : "latest" - , tmp : temp - , unicode : true - , "unsafe-perm" : process.platform === "win32" - || process.platform === "cygwin" - || !( process.getuid && process.setuid - && process.getgid && process.setgid ) - || process.getuid() !== 0 - , usage : false - , user : process.platform === "win32" ? 0 : "nobody" - , username : "" - , userconfig : path.resolve(home, ".npmrc") - , umask: 022 - , version : false - , versions : false - , viewer: process.platform === "win32" ? "browser" : "man" - - , _exit : true - } -}}) - -exports.types = - { "always-auth" : Boolean - , "bin-links": Boolean - , browser : [null, String] - , ca: [null, String, Array] - , cache : path - , "cache-lock-stale": Number - , "cache-lock-retries": Number - , "cache-lock-wait": Number - , "cache-max": Number - , "cache-min": Number - , cert: [null, String] - , color : ["always", Boolean] - , depth : Number - , description : Boolean - , dev : Boolean - , editor : String - , email: [null, String] - , "engine-strict": Boolean - , force : Boolean - , "fetch-retries": Number - , "fetch-retry-factor": Number - , "fetch-retry-mintimeout": Number - , "fetch-retry-maxtimeout": Number - , git: String - , "git-tag-version": Boolean - , global : Boolean - , globalconfig : path - , group : [Number, String] - , "https-proxy" : [null, url] - , "user-agent" : String - , "heading": String - , "ignore-scripts": Boolean - , "init-module": path - , "init.author.name" : String - , "init.author.email" : String - , "init.author.url" : ["", url] - , "init.license": String - , json: Boolean - , key: [null, String] - , link: Boolean - // local-address must be listed as an IP for a local network interface - // must be IPv4 due to node bug - , "local-address" : Object.keys(os.networkInterfaces()).map(function (nic) { - return os.networkInterfaces()[nic].filter(function (addr) { - return addr.family === "IPv4" - }) - .map(function (addr) { - return addr.address - }) - }).reduce(function (curr, next) { - return curr.concat(next) - }, []) - , loglevel : ["silent","win","error","warn","http","info","verbose","silly"] - , logstream : Stream - , long : Boolean - , message: String - , "node-version" : [null, semver] - , npat : Boolean - , "onload-script" : [null, String] - , optional: Boolean - , parseable : Boolean - , prefix: path - , production: Boolean - , "proprietary-attribs": Boolean - , proxy : [null, url] - , "rebuild-bundle" : Boolean - , registry : [null, url] - , rollback : Boolean - , save : Boolean - , "save-bundle": Boolean - , "save-dev" : Boolean - , "save-exact" : Boolean - , "save-optional" : Boolean - , "save-prefix": String - , searchopts : String - , searchexclude: [null, String] - , searchsort: [ "name", "-name" - , "description", "-description" - , "author", "-author" - , "date", "-date" - , "keywords", "-keywords" ] - , shell : String - , shrinkwrap: Boolean - , "sign-git-tag": Boolean - , "strict-ssl": Boolean - , tag : String - , tmp : path - , unicode : Boolean - , "unsafe-perm" : Boolean - , usage : Boolean - , user : [Number, String] - , username : String - , userconfig : path - , umask: Octal - , version : Boolean - , versions : Boolean - , viewer: String - , _exit : Boolean - , _password: String - } - -exports.shorthands = - { s : ["--loglevel", "silent"] - , d : ["--loglevel", "info"] - , dd : ["--loglevel", "verbose"] - , ddd : ["--loglevel", "silly"] - , noreg : ["--no-registry"] - , N : ["--no-registry"] - , reg : ["--registry"] - , "no-reg" : ["--no-registry"] - , silent : ["--loglevel", "silent"] - , verbose : ["--loglevel", "verbose"] - , quiet: ["--loglevel", "warn"] - , q: ["--loglevel", "warn"] - , h : ["--usage"] - , H : ["--usage"] - , "?" : ["--usage"] - , help : ["--usage"] - , v : ["--version"] - , f : ["--force"] - , gangster : ["--force"] - , gangsta : ["--force"] - , desc : ["--description"] - , "no-desc" : ["--no-description"] - , "local" : ["--no-global"] - , l : ["--long"] - , m : ["--message"] - , p : ["--parseable"] - , porcelain : ["--parseable"] - , g : ["--global"] - , S : ["--save"] - , D : ["--save-dev"] - , E : ["--save-exact"] - , O : ["--save-optional"] - , y : ["--yes"] - , n : ["--no-yes"] - , B : ["--save-bundle"] - } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/LICENSE nodejs-0.11.15/deps/npm/node_modules/npmconf/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/npmconf/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -Copyright (c) Isaac Z. Schlueter ("Author") -All rights reserved. - -The BSD License - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/index.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/index.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/index.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,282 +0,0 @@ -var ProtoList = require('proto-list') - , path = require('path') - , fs = require('fs') - , ini = require('ini') - , EE = require('events').EventEmitter - , url = require('url') - , http = require('http') - -var exports = module.exports = function () { - var args = [].slice.call(arguments) - , conf = new ConfigChain() - - while(args.length) { - var a = args.shift() - if(a) conf.push - ( 'string' === typeof a - ? json(a) - : a ) - } - - return conf -} - -//recursively find a file... - -var find = exports.find = function () { - var rel = path.join.apply(null, [].slice.call(arguments)) - - function find(start, rel) { - var file = path.join(start, rel) - try { - fs.statSync(file) - return file - } catch (err) { - if(path.dirname(start) !== start) // root - return find(path.dirname(start), rel) - } - } - return find(__dirname, rel) -} - -var parse = exports.parse = function (content, file, type) { - content = '' + content - // if we don't know what it is, try json and fall back to ini - // if we know what it is, then it must be that. - if (!type) { - try { return JSON.parse(content) } - catch (er) { return ini.parse(content) } - } else if (type === 'json') { - if (this.emit) { - try { return JSON.parse(content) } - catch (er) { this.emit('error', er) } - } else { - return JSON.parse(content) - } - } else { - return ini.parse(content) - } -} - -var json = exports.json = function () { - var args = [].slice.call(arguments).filter(function (arg) { return arg != null }) - var file = path.join.apply(null, args) - var content - try { - content = fs.readFileSync(file,'utf-8') - } catch (err) { - return - } - return parse(content, file, 'json') -} - -var env = exports.env = function (prefix, env) { - env = env || process.env - var obj = {} - var l = prefix.length - for(var k in env) { - if(k.indexOf(prefix) === 0) - obj[k.substring(l)] = env[k] - } - - return obj -} - -exports.ConfigChain = ConfigChain -function ConfigChain () { - EE.apply(this) - ProtoList.apply(this, arguments) - this._awaiting = 0 - this._saving = 0 - this.sources = {} -} - -// multi-inheritance-ish -var extras = { - constructor: { value: ConfigChain } -} -Object.keys(EE.prototype).forEach(function (k) { - extras[k] = Object.getOwnPropertyDescriptor(EE.prototype, k) -}) -ConfigChain.prototype = Object.create(ProtoList.prototype, extras) - -ConfigChain.prototype.del = function (key, where) { - // if not specified where, then delete from the whole chain, scorched - // earth style - if (where) { - var target = this.sources[where] - target = target && target.data - if (!target) { - return this.emit('error', new Error('not found '+where)) - } - delete target[key] - } else { - for (var i = 0, l = this.list.length; i < l; i ++) { - delete this.list[i][key] - } - } - return this -} - -ConfigChain.prototype.set = function (key, value, where) { - var target - - if (where) { - target = this.sources[where] - target = target && target.data - if (!target) { - return this.emit('error', new Error('not found '+where)) - } - } else { - target = this.list[0] - if (!target) { - return this.emit('error', new Error('cannot set, no confs!')) - } - } - target[key] = value - return this -} - -ConfigChain.prototype.get = function (key, where) { - if (where) { - where = this.sources[where] - if (where) where = where.data - if (where && Object.hasOwnProperty.call(where, key)) return where[key] - return undefined - } - return this.list[0][key] -} - -ConfigChain.prototype.save = function (where, type, cb) { - if (typeof type === 'function') cb = type, type = null - var target = this.sources[where] - if (!target || !(target.path || target.source) || !target.data) { - // TODO: maybe save() to a url target could be a PUT or something? - // would be easy to swap out with a reddis type thing, too - return this.emit('error', new Error('bad save target: '+where)) - } - - if (target.source) { - var pref = target.prefix || '' - Object.keys(target.data).forEach(function (k) { - target.source[pref + k] = target.data[k] - }) - return this - } - - var type = type || target.type - var data = target.data - if (target.type === 'json') { - data = JSON.stringify(data) - } else { - data = ini.stringify(data) - } - - this._saving ++ - fs.writeFile(target.path, data, 'utf8', function (er) { - this._saving -- - if (er) { - if (cb) return cb(er) - else return this.emit('error', er) - } - if (this._saving === 0) { - if (cb) cb() - this.emit('save') - } - }.bind(this)) - return this -} - -ConfigChain.prototype.addFile = function (file, type, name) { - name = name || file - var marker = {__source__:name} - this.sources[name] = { path: file, type: type } - this.push(marker) - this._await() - fs.readFile(file, 'utf8', function (er, data) { - if (er) this.emit('error', er) - this.addString(data, file, type, marker) - }.bind(this)) - return this -} - -ConfigChain.prototype.addEnv = function (prefix, env, name) { - name = name || 'env' - var data = exports.env(prefix, env) - this.sources[name] = { data: data, source: env, prefix: prefix } - return this.add(data, name) -} - -ConfigChain.prototype.addUrl = function (req, type, name) { - this._await() - var href = url.format(req) - name = name || href - var marker = {__source__:name} - this.sources[name] = { href: href, type: type } - this.push(marker) - http.request(req, function (res) { - var c = [] - var ct = res.headers['content-type'] - if (!type) { - type = ct.indexOf('json') !== -1 ? 'json' - : ct.indexOf('ini') !== -1 ? 'ini' - : href.match(/\.json$/) ? 'json' - : href.match(/\.ini$/) ? 'ini' - : null - marker.type = type - } - - res.on('data', c.push.bind(c)) - .on('end', function () { - this.addString(Buffer.concat(c), href, type, marker) - }.bind(this)) - .on('error', this.emit.bind(this, 'error')) - - }.bind(this)) - .on('error', this.emit.bind(this, 'error')) - .end() - - return this -} - -ConfigChain.prototype.addString = function (data, file, type, marker) { - data = this.parse(data, file, type) - this.add(data, marker) - return this -} - -ConfigChain.prototype.add = function (data, marker) { - if (marker && typeof marker === 'object') { - var i = this.list.indexOf(marker) - if (i === -1) { - return this.emit('error', new Error('bad marker')) - } - this.splice(i, 1, data) - marker = marker.__source__ - this.sources[marker] = this.sources[marker] || {} - this.sources[marker].data = data - // we were waiting for this. maybe emit 'load' - this._resolve() - } else { - if (typeof marker === 'string') { - this.sources[marker] = this.sources[marker] || {} - this.sources[marker].data = data - } - // trigger the load event if nothing was already going to do so. - this._await() - this.push(data) - process.nextTick(this._resolve.bind(this)) - } - return this -} - -ConfigChain.prototype.parse = exports.parse - -ConfigChain.prototype._await = function () { - this._awaiting++ -} - -ConfigChain.prototype._resolve = function () { - this._awaiting-- - if (this._awaiting === 0) this.emit('load', this) -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/LICENCE nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/LICENCE --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/LICENCE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/LICENCE 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -Copyright (c) 2011 Dominic Tarr - -Permission is hereby granted, free of charge, -to any person obtaining a copy of this software and -associated documentation files (the "Software"), to -deal in the Software without restriction, including -without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom -the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/LICENSE nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -Copyright 2009, 2010, 2011 Isaac Z. Schlueter. -All rights reserved. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/package.json nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/package.json --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/package.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -{ - "name": "proto-list", - "version": "1.2.2", - "description": "A utility for managing a prototype chain", - "main": "./proto-list.js", - "author": { - "name": "Isaac Z. Schlueter", - "email": "i@izs.me", - "url": "http://blog.izs.me/" - }, - "scripts": { - "test": "tap test/*.js" - }, - "repository": { - "type": "git", - "url": "https://github.com/isaacs/proto-list" - }, - "license": { - "type": "MIT", - "url": "https://github.com/isaacs/proto-list/blob/master/LICENSE" - }, - "devDependencies": { - "tap": "0" - }, - "readme": "A list of objects, bound by their prototype chain.\n\nUsed in npm's config stuff.\n", - "readmeFilename": "README.md", - "bugs": { - "url": "https://github.com/isaacs/proto-list/issues" - }, - "homepage": "https://github.com/isaacs/proto-list", - "_id": "proto-list@1.2.2", - "_shasum": "48b88798261ec2c4a785720cdfec6200d57d3326", - "_from": "proto-list@~1.2.1", - "_resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.2.tgz" -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/proto-list.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/proto-list.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/proto-list.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/proto-list.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ - -module.exports = ProtoList - -function ProtoList () { - this.list = [] - var root = null - Object.defineProperty(this, 'root', { - get: function () { return root }, - set: function (r) { - root = r - if (this.list.length) { - this.list[this.list.length - 1].__proto__ = r - } - }, - enumerable: true, - configurable: true - }) -} - -ProtoList.prototype = - { get length () { return this.list.length } - , get keys () { - var k = [] - for (var i in this.list[0]) k.push(i) - return k - } - , get snapshot () { - var o = {} - this.keys.forEach(function (k) { o[k] = this.get(k) }, this) - return o - } - , get store () { - return this.list[0] - } - , push : function (obj) { - if (typeof obj !== "object") obj = {valueOf:obj} - if (this.list.length >= 1) { - this.list[this.list.length - 1].__proto__ = obj - } - obj.__proto__ = this.root - return this.list.push(obj) - } - , pop : function () { - if (this.list.length >= 2) { - this.list[this.list.length - 2].__proto__ = this.root - } - return this.list.pop() - } - , unshift : function (obj) { - obj.__proto__ = this.list[0] || this.root - return this.list.unshift(obj) - } - , shift : function () { - if (this.list.length === 1) { - this.list[0].__proto__ = this.root - } - return this.list.shift() - } - , get : function (key) { - return this.list[0][key] - } - , set : function (key, val, save) { - if (!this.length) this.push({}) - if (save && this.list[0].hasOwnProperty(key)) this.push({}) - return this.list[0][key] = val - } - , forEach : function (fn, thisp) { - for (var key in this.list[0]) fn.call(thisp, key, this.list[0][key]) - } - , slice : function () { - return this.list.slice.apply(this.list, arguments) - } - , splice : function () { - // handle injections - var ret = this.list.splice.apply(this.list, arguments) - for (var i = 0, l = this.list.length; i < l; i++) { - this.list[i].__proto__ = this.list[i + 1] || this.root - } - return ret - } - } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/README.md nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/README.md --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -A list of objects, bound by their prototype chain. - -Used in npm's config stuff. diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/test/basic.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/test/basic.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/node_modules/proto-list/test/basic.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -var tap = require("tap") - , test = tap.test - , ProtoList = require("../proto-list.js") - -tap.plan(1) - -tap.test("protoList tests", function (t) { - var p = new ProtoList - p.push({foo:"bar"}) - p.push({}) - p.set("foo", "baz") - t.equal(p.get("foo"), "baz") - - var p = new ProtoList - p.push({foo:"bar"}) - p.set("foo", "baz") - t.equal(p.get("foo"), "baz") - t.equal(p.length, 1) - p.pop() - t.equal(p.length, 0) - p.set("foo", "asdf") - t.equal(p.length, 1) - t.equal(p.get("foo"), "asdf") - p.push({bar:"baz"}) - t.equal(p.length, 2) - t.equal(p.get("foo"), "asdf") - p.shift() - t.equal(p.length, 1) - t.equal(p.get("foo"), undefined) - - - p.unshift({foo:"blo", bar:"rab"}) - p.unshift({foo:"boo"}) - t.equal(p.length, 3) - t.equal(p.get("foo"), "boo") - t.equal(p.get("bar"), "rab") - - var ret = p.splice(1, 1, {bar:"bar"}) - t.same(ret, [{foo:"blo", bar:"rab"}]) - t.equal(p.get("bar"), "bar") - - // should not inherit default object properties - t.equal(p.get('hasOwnProperty'), undefined) - - // unless we give it those. - p.root = {} - t.equal(p.get('hasOwnProperty'), {}.hasOwnProperty) - - p.root = {default:'monkey'} - t.equal(p.get('default'), 'monkey') - - p.push({red:'blue'}) - p.push({red:'blue'}) - p.push({red:'blue'}) - while (p.length) { - t.equal(p.get('default'), 'monkey') - p.shift() - } - - t.end() -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/.npmignore nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/.npmignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/.npmignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -node_modules -node_modules/* -npm_debug.log diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/package.json nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/package.json --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/package.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -{ - "name": "config-chain", - "version": "1.1.8", - "description": "HANDLE CONFIGURATION ONCE AND FOR ALL", - "homepage": "http://github.com/dominictarr/config-chain", - "repository": { - "type": "git", - "url": "https://github.com/dominictarr/config-chain.git" - }, - "dependencies": { - "proto-list": "~1.2.1", - "ini": "1" - }, - "devDependencies": { - "tap": "0.3.0" - }, - "author": { - "name": "Dominic Tarr", - "email": "dominic.tarr@gmail.com", - "url": "http://dominictarr.com" - }, - "scripts": { - "test": "tap test/" - }, - "readme": "#config-chain\n\nUSE THIS MODULE TO LOAD ALL YOUR CONFIGURATIONS\n\n``` js\n\n //npm install config-chain\n\n var cc = require('config-chain')\n , opts = require('optimist').argv //ALWAYS USE OPTIMIST FOR COMMAND LINE OPTIONS.\n , env = opts.env || process.env.YOUR_APP_ENV || 'dev' //SET YOUR ENV LIKE THIS.\n\n // EACH ARG TO CONFIGURATOR IS LOADED INTO CONFIGURATION CHAIN\n // EARLIER ITEMS OVERIDE LATER ITEMS\n // PUTS COMMAND LINE OPTS FIRST, AND DEFAULTS LAST!\n\n //strings are interpereted as filenames.\n //will be loaded synchronously\n\n var conf =\n cc(\n //OVERRIDE SETTINGS WITH COMMAND LINE OPTS\n opts,\n\n //ENV VARS IF PREFIXED WITH 'myApp_'\n\n cc.env('myApp_'), //myApp_foo = 'like this'\n\n //FILE NAMED BY ENV\n path.join(__dirname, 'config.' + env + '.json'),\n\n //IF `env` is PRODUCTION\n env === 'prod'\n ? path.join(__dirname, 'special.json') //load a special file\n : null //NULL IS IGNORED!\n\n //SUBDIR FOR ENV CONFIG\n path.join(__dirname, 'config', env, 'config.json'),\n\n //SEARCH PARENT DIRECTORIES FROM CURRENT DIR FOR FILE\n cc.find('config.json'),\n\n //PUT DEFAULTS LAST\n {\n host: 'localhost'\n port: 8000\n })\n\n var host = conf.get('host')\n\n // or\n\n var host = conf.store.host\n\n```\n\nFINALLY, EASY FLEXIBLE CONFIGURATIONS!\n\n##see also: [proto-list](https://github.com/isaacs/proto-list/)\n\nWHATS THAT YOU SAY?\n\nYOU WANT A \"CLASS\" SO THAT YOU CAN DO CRAYCRAY JQUERY CRAPS?\n\nEXTEND WITH YOUR OWN FUNCTIONALTY!?\n\n## CONFIGCHAIN LIVES TO SERVE ONLY YOU!\n\n```javascript\nvar cc = require('config-chain')\n\n// all the stuff you did before\nvar config = cc({\n some: 'object'\n },\n cc.find('config.json'),\n cc.env('myApp_')\n )\n // CONFIGS AS A SERVICE, aka \"CaaS\", aka EVERY DEVOPS DREAM OMG!\n .addUrl('http://configurator:1234/my-configs')\n // ASYNC FTW!\n .addFile('/path/to/file.json')\n\n // OBJECTS ARE OK TOO, they're SYNC but they still ORDER RIGHT\n // BECAUSE PROMISES ARE USED BUT NO, NOT *THOSE* PROMISES, JUST\n // ACTUAL PROMISES LIKE YOU MAKE TO YOUR MOM, KEPT OUT OF LOVE\n .add({ another: 'object' })\n\n // DIE A THOUSAND DEATHS IF THIS EVER HAPPENS!!\n .on('error', function (er) {\n // IF ONLY THERE WAS SOMETHIGN HARDER THAN THROW\n // MY SORROW COULD BE ADEQUATELY EXPRESSED. /o\\\n throw er\n })\n\n // THROW A PARTY IN YOUR FACE WHEN ITS ALL LOADED!!\n .on('load', function (config) {\n console.awesome('HOLY SHIT!')\n })\n```\n\n# BORING API DOCS\n\n## cc(...args)\n\nMAKE A CHAIN AND ADD ALL THE ARGS.\n\nIf the arg is a STRING, then it shall be a JSON FILENAME.\n\nSYNC I/O!\n\nRETURN THE CHAIN!\n\n## cc.json(...args)\n\nJoin the args INTO A JSON FILENAME!\n\nSYNC I/O!\n\n## cc.find(relativePath)\n\nSEEK the RELATIVE PATH by climbing the TREE OF DIRECTORIES.\n\nRETURN THE FOUND PATH!\n\nSYNC I/O!\n\n## cc.parse(content, file, type)\n\nParse the content string, and guess the type from either the\nspecified type or the filename.\n\nRETURN THE RESULTING OBJECT!\n\nNO I/O!\n\n## cc.env(prefix, env=process.env)\n\nGet all the keys on the provided env object (or process.env) which are\nprefixed by the specified prefix, and put the values on a new object.\n\nRETURN THE RESULTING OBJECT!\n\nNO I/O!\n\n## cc.ConfigChain()\n\nThe ConfigChain class for CRAY CRAY JQUERY STYLE METHOD CHAINING!\n\nOne of these is returned by the main exported function, as well.\n\nIt inherits (prototypically) from\n[ProtoList](https://github.com/isaacs/proto-list/), and also inherits\n(parasitically) from\n[EventEmitter](http://nodejs.org/api/events.html#events_class_events_eventemitter)\n\nIt has all the methods from both, and except where noted, they are\nunchanged.\n\n### LET IT BE KNOWN THAT chain IS AN INSTANCE OF ConfigChain.\n\n## chain.sources\n\nA list of all the places where it got stuff. The keys are the names\npassed to addFile or addUrl etc, and the value is an object with some\ninfo about the data source.\n\n## chain.addFile(filename, type, [name=filename])\n\nFilename is the name of the file. Name is an arbitrary string to be\nused later if you desire. Type is either 'ini' or 'json', and will\ntry to guess intelligently if omitted.\n\nLoaded files can be saved later.\n\n## chain.addUrl(url, type, [name=url])\n\nSame as the filename thing, but with a url.\n\nCan't be saved later.\n\n## chain.addEnv(prefix, env, [name='env'])\n\nAdd all the keys from the env object that start with the prefix.\n\n## chain.addString(data, file, type, [name])\n\nParse the string and add it to the set. (Mainly used internally.)\n\n## chain.add(object, [name])\n\nAdd the object to the set.\n\n## chain.root {Object}\n\nThe root from which all the other config objects in the set descend\nprototypically.\n\nPut your defaults here.\n\n## chain.set(key, value, name)\n\nSet the key to the value on the named config object. If name is\nunset, then set it on the first config object in the set. (That is,\nthe one with the highest priority, which was added first.)\n\n## chain.get(key, [name])\n\nGet the key from the named config object explicitly, or from the\nresolved configs if not specified.\n\n## chain.save(name, type)\n\nWrite the named config object back to its origin.\n\nCurrently only supported for env and file config types.\n\nFor files, encode the data according to the type.\n\n## chain.on('save', function () {})\n\nWhen one or more files are saved, emits `save` event when they're all\nsaved.\n\n## chain.on('load', function (chain) {})\n\nWhen the config chain has loaded all the specified files and urls and\nsuch, the 'load' event fires.\n", - "readmeFilename": "readme.markdown", - "bugs": { - "url": "https://github.com/dominictarr/config-chain/issues" - }, - "_id": "config-chain@1.1.8", - "_shasum": "0943d0b7227213a20d4eaff4434f4a1c0a052cad", - "_from": "config-chain@~1.1.8", - "_resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.8.tgz" -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/readme.markdown nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/readme.markdown --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/readme.markdown 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/readme.markdown 1970-01-01 00:00:00.000000000 +0000 @@ -1,228 +0,0 @@ -#config-chain - -USE THIS MODULE TO LOAD ALL YOUR CONFIGURATIONS - -``` js - - //npm install config-chain - - var cc = require('config-chain') - , opts = require('optimist').argv //ALWAYS USE OPTIMIST FOR COMMAND LINE OPTIONS. - , env = opts.env || process.env.YOUR_APP_ENV || 'dev' //SET YOUR ENV LIKE THIS. - - // EACH ARG TO CONFIGURATOR IS LOADED INTO CONFIGURATION CHAIN - // EARLIER ITEMS OVERIDE LATER ITEMS - // PUTS COMMAND LINE OPTS FIRST, AND DEFAULTS LAST! - - //strings are interpereted as filenames. - //will be loaded synchronously - - var conf = - cc( - //OVERRIDE SETTINGS WITH COMMAND LINE OPTS - opts, - - //ENV VARS IF PREFIXED WITH 'myApp_' - - cc.env('myApp_'), //myApp_foo = 'like this' - - //FILE NAMED BY ENV - path.join(__dirname, 'config.' + env + '.json'), - - //IF `env` is PRODUCTION - env === 'prod' - ? path.join(__dirname, 'special.json') //load a special file - : null //NULL IS IGNORED! - - //SUBDIR FOR ENV CONFIG - path.join(__dirname, 'config', env, 'config.json'), - - //SEARCH PARENT DIRECTORIES FROM CURRENT DIR FOR FILE - cc.find('config.json'), - - //PUT DEFAULTS LAST - { - host: 'localhost' - port: 8000 - }) - - var host = conf.get('host') - - // or - - var host = conf.store.host - -``` - -FINALLY, EASY FLEXIBLE CONFIGURATIONS! - -##see also: [proto-list](https://github.com/isaacs/proto-list/) - -WHATS THAT YOU SAY? - -YOU WANT A "CLASS" SO THAT YOU CAN DO CRAYCRAY JQUERY CRAPS? - -EXTEND WITH YOUR OWN FUNCTIONALTY!? - -## CONFIGCHAIN LIVES TO SERVE ONLY YOU! - -```javascript -var cc = require('config-chain') - -// all the stuff you did before -var config = cc({ - some: 'object' - }, - cc.find('config.json'), - cc.env('myApp_') - ) - // CONFIGS AS A SERVICE, aka "CaaS", aka EVERY DEVOPS DREAM OMG! - .addUrl('http://configurator:1234/my-configs') - // ASYNC FTW! - .addFile('/path/to/file.json') - - // OBJECTS ARE OK TOO, they're SYNC but they still ORDER RIGHT - // BECAUSE PROMISES ARE USED BUT NO, NOT *THOSE* PROMISES, JUST - // ACTUAL PROMISES LIKE YOU MAKE TO YOUR MOM, KEPT OUT OF LOVE - .add({ another: 'object' }) - - // DIE A THOUSAND DEATHS IF THIS EVER HAPPENS!! - .on('error', function (er) { - // IF ONLY THERE WAS SOMETHIGN HARDER THAN THROW - // MY SORROW COULD BE ADEQUATELY EXPRESSED. /o\ - throw er - }) - - // THROW A PARTY IN YOUR FACE WHEN ITS ALL LOADED!! - .on('load', function (config) { - console.awesome('HOLY SHIT!') - }) -``` - -# BORING API DOCS - -## cc(...args) - -MAKE A CHAIN AND ADD ALL THE ARGS. - -If the arg is a STRING, then it shall be a JSON FILENAME. - -SYNC I/O! - -RETURN THE CHAIN! - -## cc.json(...args) - -Join the args INTO A JSON FILENAME! - -SYNC I/O! - -## cc.find(relativePath) - -SEEK the RELATIVE PATH by climbing the TREE OF DIRECTORIES. - -RETURN THE FOUND PATH! - -SYNC I/O! - -## cc.parse(content, file, type) - -Parse the content string, and guess the type from either the -specified type or the filename. - -RETURN THE RESULTING OBJECT! - -NO I/O! - -## cc.env(prefix, env=process.env) - -Get all the keys on the provided env object (or process.env) which are -prefixed by the specified prefix, and put the values on a new object. - -RETURN THE RESULTING OBJECT! - -NO I/O! - -## cc.ConfigChain() - -The ConfigChain class for CRAY CRAY JQUERY STYLE METHOD CHAINING! - -One of these is returned by the main exported function, as well. - -It inherits (prototypically) from -[ProtoList](https://github.com/isaacs/proto-list/), and also inherits -(parasitically) from -[EventEmitter](http://nodejs.org/api/events.html#events_class_events_eventemitter) - -It has all the methods from both, and except where noted, they are -unchanged. - -### LET IT BE KNOWN THAT chain IS AN INSTANCE OF ConfigChain. - -## chain.sources - -A list of all the places where it got stuff. The keys are the names -passed to addFile or addUrl etc, and the value is an object with some -info about the data source. - -## chain.addFile(filename, type, [name=filename]) - -Filename is the name of the file. Name is an arbitrary string to be -used later if you desire. Type is either 'ini' or 'json', and will -try to guess intelligently if omitted. - -Loaded files can be saved later. - -## chain.addUrl(url, type, [name=url]) - -Same as the filename thing, but with a url. - -Can't be saved later. - -## chain.addEnv(prefix, env, [name='env']) - -Add all the keys from the env object that start with the prefix. - -## chain.addString(data, file, type, [name]) - -Parse the string and add it to the set. (Mainly used internally.) - -## chain.add(object, [name]) - -Add the object to the set. - -## chain.root {Object} - -The root from which all the other config objects in the set descend -prototypically. - -Put your defaults here. - -## chain.set(key, value, name) - -Set the key to the value on the named config object. If name is -unset, then set it on the first config object in the set. (That is, -the one with the highest priority, which was added first.) - -## chain.get(key, [name]) - -Get the key from the named config object explicitly, or from the -resolved configs if not specified. - -## chain.save(name, type) - -Write the named config object back to its origin. - -Currently only supported for env and file config types. - -For files, encode the data according to the type. - -## chain.on('save', function () {}) - -When one or more files are saved, emits `save` event when they're all -saved. - -## chain.on('load', function (chain) {}) - -When the config chain has loaded all the specified files and urls and -such, the 'load' event fires. diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/broken.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/broken.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/broken.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/broken.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ - - -var cc = require('..') -var assert = require('assert') - - -//throw on invalid json -assert.throws(function () { - cc(__dirname + '/broken.json') -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/broken.json nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/broken.json --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/broken.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/broken.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -{ - "name": "config-chain", - "version": "0.3.0", - "description": "HANDLE CONFIGURATION ONCE AND FOR ALL", - "homepage": "http://github.com/dominictarr/config-chain", - "repository": { - "type": "git", - "url": "https://github.com/dominictarr/config-chain.git" - } - //missing , and then this comment. this json is intensionally invalid - "dependencies": { - "proto-list": "1", - "ini": "~1.0.2" - }, - "bundleDependencies": ["ini"], - "REM": "REMEMBER TO REMOVE BUNDLING WHEN/IF ISAACS MERGES ini#7", - "author": "Dominic Tarr (http://dominictarr.com)", - "scripts": { - "test": "node test/find-file.js && node test/ini.js && node test/env.js" - } -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/chain-class.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/chain-class.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/chain-class.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/chain-class.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -var test = require('tap').test -var CC = require('../index.js').ConfigChain - -var env = { foo_blaz : 'blzaa', foo_env : 'myenv' } -var jsonObj = { blaz: 'json', json: true } -var iniObj = { 'x.y.z': 'xyz', blaz: 'ini' } - -var fs = require('fs') -var ini = require('ini') - -fs.writeFileSync('/tmp/config-chain-class.json', JSON.stringify(jsonObj)) -fs.writeFileSync('/tmp/config-chain-class.ini', ini.stringify(iniObj)) - -var http = require('http') -var reqs = 0 -http.createServer(function (q, s) { - if (++reqs === 2) this.close() - if (q.url === '/json') { - // make sure that the requests come back from the server - // out of order. they should still be ordered properly - // in the resulting config object set. - setTimeout(function () { - s.setHeader('content-type', 'application/json') - s.end(JSON.stringify({ - blaz: 'http', - http: true, - json: true - })) - }, 200) - } else { - s.setHeader('content-type', 'application/ini') - s.end(ini.stringify({ - blaz: 'http', - http: true, - ini: true, - json: false - })) - } -}).listen(1337) - -test('basic class test', function (t) { - var cc = new CC() - var expectlist = - [ { blaz: 'json', json: true }, - { 'x.y.z': 'xyz', blaz: 'ini' }, - { blaz: 'blzaa', env: 'myenv' }, - { blaz: 'http', http: true, json: true }, - { blaz: 'http', http: true, ini: true, json: false } ] - - cc.addFile('/tmp/config-chain-class.json') - .addFile('/tmp/config-chain-class.ini') - .addEnv('foo_', env) - .addUrl('http://localhost:1337/json') - .addUrl('http://localhost:1337/ini') - .on('load', function () { - t.same(cc.list, expectlist) - t.same(cc.snapshot, { blaz: 'json', - json: true, - 'x.y.z': 'xyz', - env: 'myenv', - http: true, - ini: true }) - - cc.del('blaz', '/tmp/config-chain-class.json') - t.same(cc.snapshot, { blaz: 'ini', - json: true, - 'x.y.z': 'xyz', - env: 'myenv', - http: true, - ini: true }) - cc.del('blaz') - t.same(cc.snapshot, { json: true, - 'x.y.z': 'xyz', - env: 'myenv', - http: true, - ini: true }) - cc.shift() - t.same(cc.snapshot, { 'x.y.z': 'xyz', - env: 'myenv', - http: true, - json: true, - ini: true }) - cc.shift() - t.same(cc.snapshot, { env: 'myenv', - http: true, - json: true, - ini: true }) - cc.shift() - t.same(cc.snapshot, { http: true, - json: true, - ini: true }) - cc.shift() - t.same(cc.snapshot, { http: true, - ini: true, - json: false }) - cc.shift() - t.same(cc.snapshot, {}) - t.end() - }) -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/env.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/env.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/env.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/env.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -var cc = require('..') -var assert = require('assert') - -assert.deepEqual({ - hello: true -}, cc.env('test_', { - 'test_hello': true, - 'ignore_this': 4, - 'ignore_test_this_too': [] -})) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/find-file.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/find-file.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/find-file.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/find-file.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ - -var fs = require('fs') - , assert = require('assert') - , objx = { - rand: Math.random() - } - -fs.writeFileSync('/tmp/random-test-config.json', JSON.stringify(objx)) - -var cc = require('../') -var path = cc.find('tmp/random-test-config.json') - -assert.equal(path, '/tmp/random-test-config.json') \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/get.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/get.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/get.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/get.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -var cc = require("../"); - -var chain = cc() - , name = "forFun"; - -chain - .add({ - __sample:"for fun only" - }, name) - .on("load", function() { - //It throw exception here - console.log(chain.get("__sample", name)); - //But if I drop the name param, it run normally and return as expected: "for fun only" - //console.log(chain.get("__sample")); - }); diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/ignore-unfound-file.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/ignore-unfound-file.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/ignore-unfound-file.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/ignore-unfound-file.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ - -var cc = require('..') - -//should not throw -cc(__dirname, 'non_existing_file') diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/ini.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/ini.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/ini.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/ini.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ - - -var cc =require('..') -var INI = require('ini') -var assert = require('assert') - -function test(obj) { - - var _json, _ini - var json = cc.parse (_json = JSON.stringify(obj)) - var ini = cc.parse (_ini = INI.stringify(obj)) -console.log(_ini, _json) - assert.deepEqual(json, ini) -} - - -test({hello: true}) - diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/save.js nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/save.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/node_modules/config-chain/test/save.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/node_modules/config-chain/test/save.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -var CC = require('../index.js').ConfigChain -var test = require('tap').test - -var f1 = '/tmp/f1.ini' -var f2 = '/tmp/f2.json' - -var ini = require('ini') - -var f1data = {foo: {bar: 'baz'}, bloo: 'jaus'} -var f2data = {oof: {rab: 'zab'}, oolb: 'suaj'} - -var fs = require('fs') - -fs.writeFileSync(f1, ini.stringify(f1data), 'utf8') -fs.writeFileSync(f2, JSON.stringify(f2data), 'utf8') - -test('test saving and loading ini files', function (t) { - new CC() - .add({grelb:'blerg'}, 'opt') - .addFile(f1, 'ini', 'inifile') - .addFile(f2, 'json', 'jsonfile') - .on('load', function (cc) { - - t.same(cc.snapshot, { grelb: 'blerg', - bloo: 'jaus', - foo: { bar: 'baz' }, - oof: { rab: 'zab' }, - oolb: 'suaj' }) - - t.same(cc.list, [ { grelb: 'blerg' }, - { bloo: 'jaus', foo: { bar: 'baz' } }, - { oof: { rab: 'zab' }, oolb: 'suaj' } ]) - - cc.set('grelb', 'brelg', 'opt') - .set('foo', 'zoo', 'inifile') - .set('oof', 'ooz', 'jsonfile') - .save('inifile') - .save('jsonfile') - .on('save', function () { - t.equal(fs.readFileSync(f1, 'utf8'), - "bloo = jaus\nfoo = zoo\n") - t.equal(fs.readFileSync(f2, 'utf8'), - "{\"oof\":\"ooz\",\"oolb\":\"suaj\"}") - - t.same(cc.snapshot, { grelb: 'brelg', - bloo: 'jaus', - foo: 'zoo', - oof: 'ooz', - oolb: 'suaj' }) - - t.same(cc.list, [ { grelb: 'brelg' }, - { bloo: 'jaus', foo: 'zoo' }, - { oof: 'ooz', oolb: 'suaj' } ]) - - t.pass('ok') - t.end() - }) - }) -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/npmconf.js nodejs-0.11.15/deps/npm/node_modules/npmconf/npmconf.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/npmconf.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/npmconf.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,338 +0,0 @@ - -var CC = require('config-chain').ConfigChain -var inherits = require('inherits') -var configDefs = require('./config-defs.js') -var types = configDefs.types -var once = require('once') -var fs = require('fs') -var path = require('path') -var nopt = require('nopt') -var ini = require('ini') -var Octal = configDefs.Octal -var mkdirp = require('mkdirp') - -exports.load = load -exports.Conf = Conf -exports.loaded = false -exports.rootConf = null -exports.usingBuiltin = false -exports.defs = configDefs -Object.defineProperty(exports, 'defaults', { get: function () { - return configDefs.defaults -}, enumerable: true }) -Object.defineProperty(exports, 'types', { get: function () { - return configDefs.types -}, enumerable: true }) - -exports.validate = validate - -var myUid = process.env.SUDO_UID !== undefined - ? process.env.SUDO_UID : (process.getuid && process.getuid()) -var myGid = process.env.SUDO_GID !== undefined - ? process.env.SUDO_GID : (process.getgid && process.getgid()) - - -var loading = false -var loadCbs = [] -function load (cli_, builtin_, cb_) { - var cli, builtin, cb - for (var i = 0; i < arguments.length; i++) - switch (typeof arguments[i]) { - case 'string': builtin = arguments[i]; break - case 'object': cli = arguments[i]; break - case 'function': cb = arguments[i]; break - } - - if (!cb) - cb = function () {} - - if (exports.loaded) { - var ret = exports.loaded - if (cli) { - ret = new Conf(ret) - ret.unshift(cli) - } - return process.nextTick(cb.bind(null, null, ret)) - } - - // either a fresh object, or a clone of the passed in obj - if (!cli) - cli = {} - else - cli = Object.keys(cli).reduce(function (c, k) { - c[k] = cli[k] - return c - }, {}) - - loadCbs.push(cb) - if (loading) - return - loading = true - - cb = once(function (er, conf) { - if (!er) - exports.loaded = conf - loadCbs.forEach(function (fn) { - fn(er, conf) - }) - loadCbs.length = 0 - }) - - // check for a builtin if provided. - exports.usingBuiltin = !!builtin - var rc = exports.rootConf = new Conf() - var defaults = configDefs.defaults - if (builtin) - rc.addFile(builtin, 'builtin') - else - rc.add({}, 'builtin') - - rc.on('load', function () { - var conf = new Conf(rc) - conf.usingBuiltin = !!builtin - conf.add(cli, 'cli') - conf.addEnv() - conf.addFile(conf.get('userconfig'), 'user') - conf.once('error', cb) - conf.once('load', function () { - // globalconfig and globalignorefile defaults - // need to respond to the "prefix" setting up to this point. - // Eg, `npm config get globalconfig --prefix ~/local` should - // return `~/local/etc/npmrc` - // annoying humans and their expectations! - if (conf.get('prefix')) { - var etc = path.resolve(conf.get("prefix"), "etc") - defaults.globalconfig = path.resolve(etc, "npmrc") - defaults.globalignorefile = path.resolve(etc, "npmignore") - } - conf.addFile(conf.get('globalconfig'), 'global') - - // move the builtin into the conf stack now. - conf.root = defaults - conf.add(rc.shift(), 'builtin') - conf.once('load', function () { - // warn about invalid bits. - validate(conf) - exports.loaded = conf - cb(null, conf) - }) - }) - }) -} - - -// Basically the same as CC, but: -// 1. Always ini -// 2. Parses environment variable names in field values -// 3. Field values that start with ~/ are replaced with process.env.HOME -// 4. Can inherit from another Conf object, using it as the base. -inherits(Conf, CC) -function Conf (base) { - if (!(this instanceof Conf)) - return new Conf(base) - - CC.apply(this) - - if (base) - if (base instanceof Conf) - this.root = base.list[0] || base.root - else - this.root = base - else - this.root = configDefs.defaults -} - -Conf.prototype.save = function (where, cb) { - var target = this.sources[where] - if (!target || !(target.path || target.source) || !target.data) { - if (where !== 'builtin') - var er = new Error('bad save target: '+where) - if (cb) { - process.nextTick(cb.bind(null, er)) - return this - } - return this.emit('error', er) - } - - if (target.source) { - var pref = target.prefix || '' - Object.keys(target.data).forEach(function (k) { - target.source[pref + k] = target.data[k] - }) - if (cb) process.nextTick(cb) - return this - } - - var data = target.data - - if (typeof data._password === 'string' && - typeof data.username === 'string') { - var auth = data.username + ':' + data._password - data = Object.keys(data).reduce(function (c, k) { - if (k === 'username' || k === '_password') - return c - c[k] = data[k] - return c - }, { _auth: new Buffer(auth, 'utf8').toString('base64') }) - delete data.username - delete data._password - } - - data = ini.stringify(data) - - then = then.bind(this) - done = done.bind(this) - this._saving ++ - - var mode = where === 'user' ? 0600 : 0666 - if (!data.trim()) - fs.unlink(target.path, done) - else { - mkdirp(path.dirname(target.path), function (er) { - if (er) - return then(er) - fs.writeFile(target.path, data, 'utf8', function (er) { - if (er) - return then(er) - if (where === 'user' && myUid && myGid) - fs.chown(target.path, +myUid, +myGid, then) - else - then() - }) - }) - } - - function then (er) { - if (er) - return done(er) - fs.chmod(target.path, mode, done) - } - - function done (er) { - if (er) { - if (cb) return cb(er) - else return this.emit('error', er) - } - this._saving -- - if (this._saving === 0) { - if (cb) cb() - this.emit('save') - } - } - - return this -} - -Conf.prototype.addFile = function (file, name) { - name = name || file - var marker = {__source__:name} - this.sources[name] = { path: file, type: 'ini' } - this.push(marker) - this._await() - fs.readFile(file, 'utf8', function (er, data) { - if (er) // just ignore missing files. - return this.add({}, marker) - this.addString(data, file, 'ini', marker) - }.bind(this)) - return this -} - -// always ini files. -Conf.prototype.parse = function (content, file) { - return CC.prototype.parse.call(this, content, file, 'ini') -} - -Conf.prototype.add = function (data, marker) { - Object.keys(data).forEach(function (k) { - data[k] = parseField(data[k], k) - }) - if (Object.prototype.hasOwnProperty.call(data, '_auth')) { - var auth = new Buffer(data._auth, 'base64').toString('utf8').split(':') - var username = auth.shift() - var password = auth.join(':') - data.username = username - data._password = password - } - return CC.prototype.add.call(this, data, marker) -} - -Conf.prototype.addEnv = function (env) { - env = env || process.env - var conf = {} - Object.keys(env) - .filter(function (k) { return k.match(/^npm_config_[^_]/i) }) - .forEach(function (k) { - if (!env[k]) - return - - conf[k.replace(/^npm_config_/i, '') - .toLowerCase() - .replace(/_/g, '-')] = env[k] - }) - return CC.prototype.addEnv.call(this, '', conf, 'env') -} - -function parseField (f, k, emptyIsFalse) { - if (typeof f !== 'string' && !(f instanceof String)) - return f - - // type can be an array or single thing. - var typeList = [].concat(types[k]) - var isPath = -1 !== typeList.indexOf(path) - var isBool = -1 !== typeList.indexOf(Boolean) - var isString = -1 !== typeList.indexOf(String) - var isOctal = -1 !== typeList.indexOf(Octal) - var isNumber = isOctal || (-1 !== typeList.indexOf(Number)) - - f = (''+f).trim() - - if (f.match(/^".*"$/)) - f = JSON.parse(f) - - if (isBool && !isString && f === '') - return true - - switch (f) { - case 'true': return true - case 'false': return false - case 'null': return null - case 'undefined': return undefined - } - - f = envReplace(f) - - if (isPath) { - var homePattern = process.platform === 'win32' ? /^~(\/|\\)/ : /^~\// - if (f.match(homePattern) && process.env.HOME) { - f = path.resolve(process.env.HOME, f.substr(2)) - } - f = path.resolve(f) - } - - if (isNumber && !isNaN(f)) - f = isOctal ? parseInt(f, 8) : +f - - return f -} - -function envReplace (f) { - if (typeof f !== "string" || !f) return f - - // replace any ${ENV} values with the appropriate environ. - var envExpr = /(\\*)\$\{([^}]+)\}/g - return f.replace(envExpr, function (orig, esc, name, i, s) { - esc = esc.length && esc.length % 2 - if (esc) - return orig - if (undefined === process.env[name]) - throw new Error("Failed to replace env in config: "+orig) - return process.env[name] - }) -} - -function validate (cl) { - // warn about invalid configs at every level. - cl.list.forEach(function (conf, level) { - nopt.clean(conf, configDefs.types) - }) -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/.npmignore nodejs-0.11.15/deps/npm/node_modules/npmconf/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/npmconf/.npmignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/.npmignore 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -/test/fixtures/userconfig-with-gc diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/package.json nodejs-0.11.15/deps/npm/node_modules/npmconf/package.json --- nodejs-0.11.13/deps/npm/node_modules/npmconf/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/package.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -{ - "name": "npmconf", - "version": "0.1.15", - "description": "The config thing npm uses", - "main": "npmconf.js", - "directories": { - "test": "test" - }, - "dependencies": { - "config-chain": "~1.1.8", - "inherits": "~2.0.0", - "once": "~1.3.0", - "mkdirp": "~0.3.3", - "osenv": "0.0.3", - "nopt": "2", - "semver": "2", - "ini": "~1.1.0" - }, - "devDependencies": { - "tap": "~0.4.0" - }, - "scripts": { - "test": "tap test/*.js" - }, - "repository": { - "type": "git", - "url": "git://github.com/isaacs/npmconf" - }, - "keywords": [ - "npm", - "config", - "config-chain", - "conf", - "ini" - ], - "author": { - "name": "Isaac Z. Schlueter", - "email": "i@izs.me", - "url": "http://blog.izs.me" - }, - "license": "BSD", - "readme": "# npmconf\n\nThe config thing npm uses\n\nIf you are interested in interacting with the config settings that npm\nuses, then use this module.\n\nHowever, if you are writing a new Node.js program, and want\nconfiguration functionality similar to what npm has, but for your\nown thing, then I'd recommend using [rc](https://github.com/dominictarr/rc),\nwhich is probably what you want.\n\nIf I were to do it all over again, that's what I'd do for npm. But,\nalas, there are many systems depending on many of the particulars of\nnpm's configuration setup, so it's not worth the cost of changing.\n\n## USAGE\n\n```javascript\nvar npmconf = require('npmconf')\n\n// pass in the cli options that you read from the cli\n// or whatever top-level configs you want npm to use for now.\nnpmconf.load({some:'configs'}, function (er, conf) {\n // do stuff with conf\n conf.get('some', 'cli') // 'configs'\n conf.get('username') // 'joebobwhatevers'\n conf.set('foo', 'bar', 'user')\n conf.save('user', function (er) {\n // foo = bar is now saved to ~/.npmrc or wherever\n })\n})\n```\n", - "readmeFilename": "README.md", - "bugs": { - "url": "https://github.com/isaacs/npmconf/issues" - }, - "homepage": "https://github.com/isaacs/npmconf", - "_id": "npmconf@0.1.15", - "_shasum": "9df0f0545d04d121330c32a5be9d351b4a8df029", - "_from": "npmconf@latest" -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/README.md nodejs-0.11.15/deps/npm/node_modules/npmconf/README.md --- nodejs-0.11.13/deps/npm/node_modules/npmconf/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -# npmconf - -The config thing npm uses - -If you are interested in interacting with the config settings that npm -uses, then use this module. - -However, if you are writing a new Node.js program, and want -configuration functionality similar to what npm has, but for your -own thing, then I'd recommend using [rc](https://github.com/dominictarr/rc), -which is probably what you want. - -If I were to do it all over again, that's what I'd do for npm. But, -alas, there are many systems depending on many of the particulars of -npm's configuration setup, so it's not worth the cost of changing. - -## USAGE - -```javascript -var npmconf = require('npmconf') - -// pass in the cli options that you read from the cli -// or whatever top-level configs you want npm to use for now. -npmconf.load({some:'configs'}, function (er, conf) { - // do stuff with conf - conf.get('some', 'cli') // 'configs' - conf.get('username') // 'joebobwhatevers' - conf.set('foo', 'bar', 'user') - conf.save('user', function (er) { - // foo = bar is now saved to ~/.npmrc or wherever - }) -}) -``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/test/00-setup.js nodejs-0.11.15/deps/npm/node_modules/npmconf/test/00-setup.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/test/00-setup.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/test/00-setup.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -var path = require('path') -var userconfigSrc = path.resolve(__dirname, 'fixtures', 'userconfig') -exports.userconfig = userconfigSrc + '-with-gc' -exports.globalconfig = path.resolve(__dirname, 'fixtures', 'globalconfig') -exports.builtin = path.resolve(__dirname, 'fixtures', 'builtin') - -// set the userconfig in the env -// unset anything else that npm might be trying to foist on us -Object.keys(process.env).forEach(function (k) { - if (k.match(/^npm_config_/i)) { - delete process.env[k] - } -}) -process.env.npm_config_userconfig = exports.userconfig -process.env.npm_config_other_env_thing = 1000 -process.env.random_env_var = 'asdf' - -if (module === require.main) { - // set the globalconfig in the userconfig - var fs = require('fs') - var uc = fs.readFileSync(userconfigSrc) - var gcini = 'globalconfig = ' + exports.globalconfig + '\n' - fs.writeFileSync(exports.userconfig, gcini + uc) - - console.log('0..1') - console.log('ok 1 setup done') -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/test/basic.js nodejs-0.11.15/deps/npm/node_modules/npmconf/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/test/basic.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/test/basic.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -var test = require('tap').test -var npmconf = require('../npmconf.js') -var common = require('./00-setup.js') - -var ucData = - { globalconfig: common.globalconfig, - email: 'i@izs.me', - 'env-thing': 'asdf', - 'init.author.name': 'Isaac Z. Schlueter', - 'init.author.email': 'i@izs.me', - 'init.author.url': 'http://blog.izs.me/', - 'proprietary-attribs': false, - 'npm:publishtest': true, - '_npmjs.org:couch': 'https://admin:password@localhost:5984/registry', - _auth: 'dXNlcm5hbWU6cGFzc3dvcmQ=', - 'npm-www:nocache': '1', - nodedir: '/Users/isaacs/dev/js/node-v0.8', - 'sign-git-tag': true, - message: 'v%s', - 'strict-ssl': false, - 'tmp': process.env.HOME + '/.tmp', - username : "username", - _password : "password", - _token: - { AuthSession: 'yabba-dabba-doodle', - version: '1', - expires: '1345001053415', - path: '/', - httponly: true } } - -var envData = { userconfig: common.userconfig, 'other-env-thing': '1000' } - -var gcData = { 'package-config:foo': 'boo' } - -var biData = {} - -var cli = { foo: 'bar', umask: 022 } - -var expectList = -[ cli, - envData, - ucData, - gcData, - biData ] - -var expectSources = -{ cli: { data: cli }, - env: - { data: envData, - source: envData, - prefix: '' }, - user: - { path: common.userconfig, - type: 'ini', - data: ucData }, - global: - { path: common.globalconfig, - type: 'ini', - data: gcData }, - builtin: { data: biData } } - -test('no builtin', function (t) { - npmconf.load(cli, function (er, conf) { - if (er) throw er - t.same(conf.list, expectList) - t.same(conf.sources, expectSources) - t.same(npmconf.rootConf.list, []) - t.equal(npmconf.rootConf.root, npmconf.defs.defaults) - t.equal(conf.root, npmconf.defs.defaults) - t.equal(conf.get('umask'), 022) - t.equal(conf.get('heading'), 'npm') - t.end() - }) -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/test/builtin.js nodejs-0.11.15/deps/npm/node_modules/npmconf/test/builtin.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/test/builtin.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/test/builtin.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -var test = require('tap').test -var npmconf = require('../npmconf.js') -var common = require('./00-setup.js') - -var ucData = - { globalconfig: common.globalconfig, - email: 'i@izs.me', - 'env-thing': 'asdf', - 'init.author.name': 'Isaac Z. Schlueter', - 'init.author.email': 'i@izs.me', - 'init.author.url': 'http://blog.izs.me/', - 'proprietary-attribs': false, - 'npm:publishtest': true, - '_npmjs.org:couch': 'https://admin:password@localhost:5984/registry', - _auth: 'dXNlcm5hbWU6cGFzc3dvcmQ=', - 'npm-www:nocache': '1', - nodedir: '/Users/isaacs/dev/js/node-v0.8', - 'sign-git-tag': true, - message: 'v%s', - 'strict-ssl': false, - 'tmp': process.env.HOME + '/.tmp', - username : "username", - _password : "password", - _token: - { AuthSession: 'yabba-dabba-doodle', - version: '1', - expires: '1345001053415', - path: '/', - httponly: true } } - -var envData = { userconfig: common.userconfig, 'other-env-thing': '1000' } - -var gcData = { 'package-config:foo': 'boo' } - -var biData = { 'builtin-config': true } - -var cli = { foo: 'bar', heading: 'foo', 'git-tag-version': false } - -var expectList = -[ cli, - envData, - ucData, - gcData, - biData ] - -var expectSources = -{ cli: { data: cli }, - env: - { data: envData, - source: envData, - prefix: '' }, - user: - { path: common.userconfig, - type: 'ini', - data: ucData }, - global: - { path: common.globalconfig, - type: 'ini', - data: gcData }, - builtin: { data: biData } } - -test('with builtin', function (t) { - npmconf.load(cli, common.builtin, function (er, conf) { - if (er) throw er - t.same(conf.list, expectList) - t.same(conf.sources, expectSources) - t.same(npmconf.rootConf.list, []) - t.equal(npmconf.rootConf.root, npmconf.defs.defaults) - t.equal(conf.root, npmconf.defs.defaults) - t.equal(conf.get('heading'), 'foo') - t.equal(conf.get('git-tag-version'), false) - t.end() - }) -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/test/fixtures/builtin nodejs-0.11.15/deps/npm/node_modules/npmconf/test/fixtures/builtin --- nodejs-0.11.13/deps/npm/node_modules/npmconf/test/fixtures/builtin 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/test/fixtures/builtin 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -builtin-config = true diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/test/fixtures/globalconfig nodejs-0.11.15/deps/npm/node_modules/npmconf/test/fixtures/globalconfig --- nodejs-0.11.13/deps/npm/node_modules/npmconf/test/fixtures/globalconfig 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/test/fixtures/globalconfig 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -package-config:foo = boo diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/test/fixtures/userconfig nodejs-0.11.15/deps/npm/node_modules/npmconf/test/fixtures/userconfig --- nodejs-0.11.13/deps/npm/node_modules/npmconf/test/fixtures/userconfig 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/test/fixtures/userconfig 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -email = i@izs.me -env-thing = ${random_env_var} -init.author.name = Isaac Z. Schlueter -init.author.email = i@izs.me -init.author.url = http://blog.izs.me/ -proprietary-attribs = false -npm:publishtest = true -_npmjs.org:couch = https://admin:password@localhost:5984/registry -_auth = dXNlcm5hbWU6cGFzc3dvcmQ= -npm-www:nocache = 1 -nodedir = /Users/isaacs/dev/js/node-v0.8 -sign-git-tag = true -message = v%s -strict-ssl = false -tmp = ~/.tmp - -[_token] -AuthSession = yabba-dabba-doodle -version = 1 -expires = 1345001053415 -path = / -httponly = true diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmconf/test/save.js nodejs-0.11.15/deps/npm/node_modules/npmconf/test/save.js --- nodejs-0.11.13/deps/npm/node_modules/npmconf/test/save.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmconf/test/save.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -var test = require('tap').test -var npmconf = require('../npmconf.js') -var common = require('./00-setup.js') -var fs = require('fs') -var ini = require('ini') -var expectConf = - [ 'globalconfig = ' + common.globalconfig, - 'email = i@izs.me', - 'env-thing = asdf', - 'init.author.name = Isaac Z. Schlueter', - 'init.author.email = i@izs.me', - 'init.author.url = http://blog.izs.me/', - 'proprietary-attribs = false', - 'npm:publishtest = true', - '_npmjs.org:couch = https://admin:password@localhost:5984/registry', - '_auth = dXNlcm5hbWU6cGFzc3dvcmQ=', - 'npm-www:nocache = 1', - 'sign-git-tag = false', - 'message = v%s', - 'strict-ssl = false', - 'username = username', - '_password = password', - '', - '[_token]', - 'AuthSession = yabba-dabba-doodle', - 'version = 1', - 'expires = 1345001053415', - 'path = /', - 'httponly = true', - '' ].join('\n') -var expectFile = - [ 'globalconfig = ' + common.globalconfig, - 'email = i@izs.me', - 'env-thing = asdf', - 'init.author.name = Isaac Z. Schlueter', - 'init.author.email = i@izs.me', - 'init.author.url = http://blog.izs.me/', - 'proprietary-attribs = false', - 'npm:publishtest = true', - '_npmjs.org:couch = https://admin:password@localhost:5984/registry', - '_auth = dXNlcm5hbWU6cGFzc3dvcmQ=', - 'npm-www:nocache = 1', - 'sign-git-tag = false', - 'message = v%s', - 'strict-ssl = false', - '', - '[_token]', - 'AuthSession = yabba-dabba-doodle', - 'version = 1', - 'expires = 1345001053415', - 'path = /', - 'httponly = true', - '' ].join('\n') - -test('saving configs', function (t) { - npmconf.load(function (er, conf) { - if (er) - throw er - conf.set('sign-git-tag', false, 'user') - conf.del('nodedir') - conf.del('tmp') - var foundConf = ini.stringify(conf.sources.user.data) - t.same(ini.parse(foundConf), ini.parse(expectConf)) - fs.unlinkSync(common.userconfig) - conf.save('user', function (er) { - if (er) - throw er - var uc = fs.readFileSync(conf.get('userconfig'), 'utf8') - t.same(ini.parse(uc), ini.parse(expectFile)) - t.end() - }) - }) -}) - diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-install-checks/LICENSE nodejs-0.11.15/deps/npm/node_modules/npm-install-checks/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/npm-install-checks/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-install-checks/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,6 @@ -Copyright (c) Robert Kowalski ("Author") +Copyright (c) Robert Kowalski and Isaac Z. Schlueter ("Authors") All rights reserved. - The BSD License Redistribution and use in source and binary forms, with or without @@ -15,10 +14,10 @@ notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR @@ -26,209 +25,3 @@ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -This uses parts of npm, (c) Isaac Z. Schlueter, under the following license: - - -The Artistic License 2.0 - -Copyright (c) 2000-2006, The Perl Foundation. - -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - -Preamble - -This license establishes the terms under which a given free software -Package may be copied, modified, distributed, and/or redistributed. -The intent is that the Copyright Holder maintains some artistic -control over the development of that Package while still keeping the -Package available as open source and free software. - -You are always permitted to make arrangements wholly outside of this -license directly with the Copyright Holder of a given Package. If the -terms of this license do not permit the full use that you propose to -make of the Package, you should contact the Copyright Holder and seek -a different licensing arrangement. - -Definitions - - "Copyright Holder" means the individual(s) or organization(s) - named in the copyright notice for the entire Package. - - "Contributor" means any party that has contributed code or other - material to the Package, in accordance with the Copyright Holder's - procedures. - - "You" and "your" means any person who would like to copy, - distribute, or modify the Package. - - "Package" means the collection of files distributed by the - Copyright Holder, and derivatives of that collection and/or of - those files. A given Package may consist of either the Standard - Version, or a Modified Version. - - "Distribute" means providing a copy of the Package or making it - accessible to anyone else, or in the case of a company or - organization, to others outside of your company or organization. - - "Distributor Fee" means any fee that you charge for Distributing - this Package or providing support for this Package to another - party. It does not mean licensing fees. - - "Standard Version" refers to the Package if it has not been - modified, or has been modified only in ways explicitly requested - by the Copyright Holder. - - "Modified Version" means the Package, if it has been changed, and - such changes were not explicitly requested by the Copyright - Holder. - - "Original License" means this Artistic License as Distributed with - the Standard Version of the Package, in its current version or as - it may be modified by The Perl Foundation in the future. - - "Source" form means the source code, documentation source, and - configuration files for the Package. - - "Compiled" form means the compiled bytecode, object code, binary, - or any other form resulting from mechanical transformation or - translation of the Source form. - - -Permission for Use and Modification Without Distribution - -(1) You are permitted to use the Standard Version and create and use -Modified Versions for any purpose without restriction, provided that -you do not Distribute the Modified Version. - - -Permissions for Redistribution of the Standard Version - -(2) You may Distribute verbatim copies of the Source form of the -Standard Version of this Package in any medium without restriction, -either gratis or for a Distributor Fee, provided that you duplicate -all of the original copyright notices and associated disclaimers. At -your discretion, such verbatim copies may or may not include a -Compiled form of the Package. - -(3) You may apply any bug fixes, portability changes, and other -modifications made available from the Copyright Holder. The resulting -Package will still be considered the Standard Version, and as such -will be subject to the Original License. - - -Distribution of Modified Versions of the Package as Source - -(4) You may Distribute your Modified Version as Source (either gratis -or for a Distributor Fee, and with or without a Compiled form of the -Modified Version) provided that you clearly document how it differs -from the Standard Version, including, but not limited to, documenting -any non-standard features, executables, or modules, and provided that -you do at least ONE of the following: - - (a) make the Modified Version available to the Copyright Holder - of the Standard Version, under the Original License, so that the - Copyright Holder may include your modifications in the Standard - Version. - - (b) ensure that installation of your Modified Version does not - prevent the user installing or running the Standard Version. In - addition, the Modified Version must bear a name that is different - from the name of the Standard Version. - - (c) allow anyone who receives a copy of the Modified Version to - make the Source form of the Modified Version available to others - under - - (i) the Original License or - - (ii) a license that permits the licensee to freely copy, - modify and redistribute the Modified Version using the same - licensing terms that apply to the copy that the licensee - received, and requires that the Source form of the Modified - Version, and of any works derived from it, be made freely - available in that license fees are prohibited but Distributor - Fees are allowed. - - -Distribution of Compiled Forms of the Standard Version -or Modified Versions without the Source - -(5) You may Distribute Compiled forms of the Standard Version without -the Source, provided that you include complete instructions on how to -get the Source of the Standard Version. Such instructions must be -valid at the time of your distribution. If these instructions, at any -time while you are carrying out such distribution, become invalid, you -must provide new instructions on demand or cease further distribution. -If you provide valid instructions or cease distribution within thirty -days after you become aware that the instructions are invalid, then -you do not forfeit any of your rights under this license. - -(6) You may Distribute a Modified Version in Compiled form without -the Source, provided that you comply with Section 4 with respect to -the Source of the Modified Version. - - -Aggregating or Linking the Package - -(7) You may aggregate the Package (either the Standard Version or -Modified Version) with other packages and Distribute the resulting -aggregation provided that you do not charge a licensing fee for the -Package. Distributor Fees are permitted, and licensing fees for other -components in the aggregation are permitted. The terms of this license -apply to the use and Distribution of the Standard or Modified Versions -as included in the aggregation. - -(8) You are permitted to link Modified and Standard Versions with -other works, to embed the Package in a larger work of your own, or to -build stand-alone binary or bytecode versions of applications that -include the Package, and Distribute the result without restriction, -provided the result does not expose a direct interface to the Package. - - -Items That are Not Considered Part of a Modified Version - -(9) Works (including, but not limited to, modules and scripts) that -merely extend or make use of the Package, do not, by themselves, cause -the Package to be a Modified Version. In addition, such works are not -considered parts of the Package itself, and are not subject to the -terms of this license. - - -General Provisions - -(10) Any use, modification, and distribution of the Standard or -Modified Versions is governed by this Artistic License. By using, -modifying or distributing the Package, you accept this license. Do not -use, modify, or distribute the Package, if you do not accept this -license. - -(11) If your Modified Version has been derived from a Modified -Version made by someone other than you, you are nevertheless required -to ensure that your Modified Version complies with the requirements of -this license. - -(12) This license does not grant you the right to use any trademark, -service mark, tradename, or logo of the Copyright Holder. - -(13) This license includes the non-exclusive, worldwide, -free-of-charge patent license to make, have made, use, offer to sell, -sell, import and otherwise transfer the Package with respect to any -patent claims licensable by the Copyright Holder that are necessarily -infringed by the Package. If you institute patent litigation -(including a cross-claim or counterclaim) against any party alleging -that the Package constitutes direct or contributory patent -infringement, then this Artistic License to you shall terminate on the -date that such litigation is filed. - -(14) Disclaimer of Warranty: -THE PACKAGE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS -IS' AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES. THE IMPLIED -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR -NON-INFRINGEMENT ARE DISCLAIMED TO THE EXTENT PERMITTED BY YOUR LOCAL -LAW. UNLESS REQUIRED BY LAW, NO COPYRIGHT HOLDER OR CONTRIBUTOR WILL -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL -DAMAGES ARISING IN ANY WAY OUT OF THE USE OF THE PACKAGE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-install-checks/package.json nodejs-0.11.15/deps/npm/node_modules/npm-install-checks/package.json --- nodejs-0.11.13/deps/npm/node_modules/npm-install-checks/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-install-checks/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,11 +1,11 @@ { "name": "npm-install-checks", - "version": "1.0.0", + "version": "1.0.4", "description": "checks that npm runs during the installation of a module", "main": "index.js", "dependencies": { - "npmlog": "0.0.6", - "semver": "~2.2.1" + "npmlog": "0.1", + "semver": "^2.3.0 || 3.x || 4" }, "devDependencies": { "tap": "~0.4.8", @@ -17,9 +17,9 @@ }, "repository": { "type": "git", - "url": "git://github.com/robertkowalski/npm-install-checks.git" + "url": "git://github.com/npm/npm-install-checks.git" }, - "homepage": "https://github.com/robertkowalski/npm-install-checks", + "homepage": "https://github.com/npm/npm-install-checks", "keywords": [ "npm,", "install" @@ -30,14 +30,31 @@ }, "license": "BSD-2-Clause", "bugs": { - "url": "https://github.com/robertkowalski/npm-install-checks/issues" + "url": "https://github.com/npm/npm-install-checks/issues" }, - "readme": "# npm-install-checks\n\nA package that contains checks that npm runs during the installation.\n\n## API\n\n### .checkEngine(target, npmVer, nodeVer, force, strict, cb)\nCheck if node/npm version is supported by the package.\n\nError type: `ENOTSUP`\n\n### .checkPlatform(target, force, cb)\nCheck if OS/Arch is supported by the package.\n\nError type: `EBADPLATFORM`\n\n### .checkCycle(target, ancestors, cb)\nCheck for cyclic dependencies.\n\nError type: `ECYCLE`\n\n### .checkGit(folder, cb)\nCheck if a folder is a .git folder.\n\nError type: `EISGIT`\n", - "readmeFilename": "README.md", - "_id": "npm-install-checks@1.0.0", + "gitHead": "05944f95860b0ac3769667551c4b7aa3d3fcdc32", + "_id": "npm-install-checks@1.0.4", + "_shasum": "9757c6f9d4d493c2489465da6d07a8ed416d44c8", + "_from": "npm-install-checks@>=1.0.2-0 <1.1.0-0", + "_npmVersion": "2.0.0-beta.3", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "robertkowalski", + "email": "rok@kowalski.gd" + }, + { + "name": "isaacs", + "email": "i@izs.me" + } + ], "dist": { - "shasum": "7e1469b5e0c693b2ae2a8830b5fc4e7bf76c88fd" + "shasum": "9757c6f9d4d493c2489465da6d07a8ed416d44c8", + "tarball": "http://registry.npmjs.org/npm-install-checks/-/npm-install-checks-1.0.4.tgz" }, - "_from": "npm-install-checks@1.0.0", - "_resolved": "https://registry.npmjs.org/npm-install-checks/-/npm-install-checks-1.0.0.tgz" + "directories": {}, + "_resolved": "https://registry.npmjs.org/npm-install-checks/-/npm-install-checks-1.0.4.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmlog/.npmrc nodejs-0.11.15/deps/npm/node_modules/npmlog/.npmrc --- nodejs-0.11.13/deps/npm/node_modules/npmlog/.npmrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmlog/.npmrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2 @@ +save-prefix = ~ +proprietary-attribs = false diff -Nru nodejs-0.11.13/deps/npm/node_modules/npmlog/package.json nodejs-0.11.15/deps/npm/node_modules/npmlog/package.json --- nodejs-0.11.13/deps/npm/node_modules/npmlog/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npmlog/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "npmlog", "description": "logger for npm", - "version": "0.0.6", + "version": "0.1.1", "repository": { "type": "git", "url": "git://github.com/isaacs/npmlog.git" @@ -16,7 +16,7 @@ "test": "tap test/*.js" }, "dependencies": { - "ansi": "~0.2.1" + "ansi": "~0.3.0" }, "devDependencies": { "tap": "" @@ -24,9 +24,12 @@ "license": "BSD", "readme": "# npmlog\n\nThe logger util that npm uses.\n\nThis logger is very basic. It does the logging for npm. It supports\ncustom levels and colored output.\n\nBy default, logs are written to stderr. If you want to send log messages\nto outputs other than streams, then you can change the `log.stream`\nmember, or you can just listen to the events that it emits, and do\nwhatever you want with them.\n\n# Basic Usage\n\n```\nvar log = require('npmlog')\n\n// additional stuff ---------------------------+\n// message ----------+ |\n// prefix ----+ | |\n// level -+ | | |\n// v v v v\n log.info('fyi', 'I have a kitty cat: %j', myKittyCat)\n```\n\n## log.level\n\n* {String}\n\nThe level to display logs at. Any logs at or above this level will be\ndisplayed. The special level `silent` will prevent anything from being\ndisplayed ever.\n\n## log.record\n\n* {Array}\n\nAn array of all the log messages that have been entered.\n\n## log.maxRecordSize\n\n* {Number}\n\nThe maximum number of records to keep. If log.record gets bigger than\n10% over this value, then it is sliced down to 90% of this value.\n\nThe reason for the 10% window is so that it doesn't have to resize a\nlarge array on every log entry.\n\n## log.prefixStyle\n\n* {Object}\n\nA style object that specifies how prefixes are styled. (See below)\n\n## log.headingStyle\n\n* {Object}\n\nA style object that specifies how the heading is styled. (See below)\n\n## log.heading\n\n* {String} Default: \"\"\n\nIf set, a heading that is printed at the start of every line.\n\n## log.stream\n\n* {Stream} Default: `process.stderr`\n\nThe stream where output is written.\n\n## log.enableColor()\n\nForce colors to be used on all messages, regardless of the output\nstream.\n\n## log.disableColor()\n\nDisable colors on all messages.\n\n## log.pause()\n\nStop emitting messages to the stream, but do not drop them.\n\n## log.resume()\n\nEmit all buffered messages that were written while paused.\n\n## log.log(level, prefix, message, ...)\n\n* `level` {String} The level to emit the message at\n* `prefix` {String} A string prefix. Set to \"\" to skip.\n* `message...` Arguments to `util.format`\n\nEmit a log message at the specified level.\n\n## log\\[level](prefix, message, ...)\n\nFor example,\n\n* log.silly(prefix, message, ...)\n* log.verbose(prefix, message, ...)\n* log.info(prefix, message, ...)\n* log.http(prefix, message, ...)\n* log.warn(prefix, message, ...)\n* log.error(prefix, message, ...)\n\nLike `log.log(level, prefix, message, ...)`. In this way, each level is\ngiven a shorthand, so you can do `log.info(prefix, message)`.\n\n## log.addLevel(level, n, style, disp)\n\n* `level` {String} Level indicator\n* `n` {Number} The numeric level\n* `style` {Object} Object with fg, bg, inverse, etc.\n* `disp` {String} Optional replacement for `level` in the output.\n\nSets up a new level with a shorthand function and so forth.\n\nNote that if the number is `Infinity`, then setting the level to that\nwill cause all log messages to be suppressed. If the number is\n`-Infinity`, then the only way to show it is to enable all log messages.\n\n# Events\n\nEvents are all emitted with the message object.\n\n* `log` Emitted for all messages\n* `log.` Emitted for all messages with the `` level.\n* `` Messages with prefixes also emit their prefix as an event.\n\n# Style Objects\n\nStyle objects can have the following fields:\n\n* `fg` {String} Color for the foreground text\n* `bg` {String} Color for the background\n* `bold`, `inverse`, `underline` {Boolean} Set the associated property\n* `bell` {Boolean} Make a noise (This is pretty annoying, probably.)\n\n# Message Objects\n\nEvery log event is emitted with a message object, and the `log.record`\nlist contains all of them that have been created. They have the\nfollowing fields:\n\n* `id` {Number}\n* `level` {String}\n* `prefix` {String}\n* `message` {String} Result of `util.format()`\n* `messageRaw` {Array} Arguments to `util.format()`\n", "readmeFilename": "README.md", + "gitHead": "b58e360cd99db707d1191ce6125ae53d79f075a1", "bugs": { "url": "https://github.com/isaacs/npmlog/issues" }, - "_id": "npmlog@0.0.6", + "homepage": "https://github.com/isaacs/npmlog", + "_id": "npmlog@0.1.1", + "_shasum": "8b9b9e4405d7ec48c31c2346965aadc7abaecaa5", "_from": "npmlog@latest" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/LICENSE nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/npa.js nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/npa.js --- nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/npa.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/npa.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,187 @@ +var url = require("url") +var assert = require("assert") +var util = require("util") +var semver = require("semver") +var path = require("path") + +module.exports = npa + +var isWindows = process.platform === "win32" || global.FAKE_WINDOWS +var slashRe = isWindows ? /\\|\// : /\// + +var parseName = /^(?:@([^\/]+?)\/)?([^\/]+?)$/ +var nameAt = /^(@([^\/]+?)\/)?([^\/]+?)@/ +var debug = util.debuglog ? util.debuglog("npa") + : /\bnpa\b/i.test(process.env.NODE_DEBUG || "") + ? function () { + console.error("NPA: " + util.format.apply(util, arguments).split("\n").join("\nNPA: ")) + } : function () {} + +function validName (name) { + if (!name) { + debug("not a name %j", name) + return false + } + var n = name.trim() + if (!n || n.charAt(0) === "." + || !n.match(/^[a-zA-Z0-9]/) + || n.match(/[\/\(\)&\?#\|<>@:%\s\\\*'"!~`]/) + || n.toLowerCase() === "node_modules" + || n !== encodeURIComponent(n) + || n.toLowerCase() === "favicon.ico") { + debug("not a valid name %j", name) + return false + } + return n +} + +function npa (arg) { + assert.equal(typeof arg, "string") + arg = arg.trim() + + var res = new Result + res.raw = arg + res.scope = null + + // See if it's something like foo@... + var nameparse = arg.match(nameAt) + debug("nameparse", nameparse) + if (nameparse && validName(nameparse[3]) && + (!nameparse[2] || validName(nameparse[2]))) { + res.name = (nameparse[1] || "") + nameparse[3] + if (nameparse[2]) + res.scope = "@" + nameparse[2] + arg = arg.substr(nameparse[0].length) + } else { + res.name = null + } + + res.rawSpec = arg + res.spec = arg + + var urlparse = url.parse(arg) + debug("urlparse", urlparse) + + // windows paths look like urls + // don't be fooled! + if (isWindows && urlparse && urlparse.protocol && + urlparse.protocol.match(/^[a-zA-Z]:$/)) { + debug("windows url-ish local path", urlparse) + urlparse = {} + } + + if (urlparse.protocol) { + return parseUrl(res, arg, urlparse) + } + + // parse git stuff + // parse tag/range/local/remote + + if (maybeGitHubShorthand(arg)) { + res.type = "github" + res.spec = arg + return res + } + + // at this point, it's not a url, and not github + // If it's a valid name, and doesn't already have a name, then assume + // $name@"" range + // + // if it's got / chars in it, then assume that it's local. + + if (res.name) { + var version = semver.valid(arg, true) + var range = semver.validRange(arg, true) + // foo@... + if (version) { + res.spec = version + res.type = "version" + } else if (range) { + res.spec = range + res.type = "range" + } else if (slashRe.test(arg)) { + parseLocal(res, arg) + } else { + res.type = "tag" + res.spec = arg + } + } else { + var p = arg.match(parseName) + if (p && validName(p[2]) && + (!p[1] || validName(p[1]))) { + res.type = "range" + res.spec = "*" + res.rawSpec = "" + res.name = arg + if (p[1]) + res.scope = "@" + p[1] + } else { + parseLocal(res, arg) + } + } + + return res +} + +function parseLocal (res, arg) { + // turns out nearly every character is allowed in fs paths + if (/\0/.test(arg)) { + throw new Error("Invalid Path: " + JSON.stringify(arg)) + } + res.type = "local" + res.spec = path.resolve(arg) +} + +function maybeGitHubShorthand (arg) { + // Note: This does not fully test the git ref format. + // See https://www.kernel.org/pub/software/scm/git/docs/git-check-ref-format.html + // + // The only way to do this properly would be to shell out to + // git-check-ref-format, and as this is a fast sync function, + // we don't want to do that. Just let git fail if it turns + // out that the commit-ish is invalid. + // GH usernames cannot start with . or - + return /^[^@%\/\s\.-][^@%\/\s]*\/[^@\s\/%]+(?:#.*)?$/.test(arg) +} + +function parseUrl (res, arg, urlparse) { + // check the protocol, and then see if it's git or not + switch (urlparse.protocol) { + case "git:": + case "git+http:": + case "git+https:": + case "git+rsync:": + case "git+ftp:": + case "git+ssh:": + case "git+file:": + res.type = 'git' + res.spec = arg.replace(/^git\+/, '') + break + + case 'http:': + case 'https:': + res.type = 'remote' + res.spec = arg + break + + case 'file:': + res.type = 'local' + res.spec = urlparse.pathname + break; + + default: + throw new Error('Unsupported URL Type: ' + arg) + break + } + + return res +} + + +function Result () { + if (!(this instanceof Result)) return new Result +} +Result.prototype.name = null +Result.prototype.type = null +Result.prototype.spec = null +Result.prototype.raw = null diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/package.json nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/package.json --- nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,38 @@ +{ + "name": "npm-package-arg", + "version": "2.1.3", + "description": "Parse the things that can be arguments to `npm install`", + "main": "npa.js", + "directories": { + "test": "test" + }, + "dependencies": { + "semver": "4" + }, + "devDependencies": { + "tap": "^0.4.9" + }, + "scripts": { + "test": "tap test/*.js" + }, + "repository": { + "type": "git", + "url": "https://github.com/npm/npm-package-arg" + }, + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "license": "ISC", + "bugs": { + "url": "https://github.com/npm/npm-package-arg/issues" + }, + "homepage": "https://github.com/npm/npm-package-arg", + "readme": "# npm-package-arg\n\nParse the things that can be arguments to `npm install`\n\nTakes an argument like `foo@1.2`, or `foo@user/foo`, or\n`http://x.com/foo.tgz`, or `git+https://github.com/user/foo`, and\nfigures out what type of thing it is.\n\n## USAGE\n\n```javascript\nvar assert = require(\"assert\")\nvar npa = require(\"npm-package-arg\")\n\n// Pass in the descriptor, and it'll return an object\nvar parsed = npa(\"foo@1.2\")\n\n// Returns an object like:\n// {\n// name: \"foo\", // The bit in front of the @\n// type: \"range\", // the type of descriptor this is\n// spec: \"1.2\" // the specifier for this descriptor\n// }\n\n// Completely unreasonable invalid garbage throws an error\n// Make sure you wrap this in a try/catch if you have not\n// already sanitized the inputs!\nassert.throws(function() {\n npa(\"this is not \\0 a valid package name or url\")\n})\n```\n\nFor more examples, see the test file.\n\n## Result Objects\n\nThe objects that are returned by npm-package-arg contain the following\nfields:\n\n* `name` - If known, the `name` field expected in the resulting pkg.\n* `type` - One of the following strings:\n * `git` - A git repo\n * `github` - A github shorthand, like `user/project`\n * `tag` - A tagged version, like `\"foo@latest\"`\n * `version` - A specific version number, like `\"foo@1.2.3\"`\n * `range` - A version range, like `\"foo@2.x\"`\n * `local` - A local file or folder path\n * `remote` - An http url (presumably to a tgz)\n* `spec` - The \"thing\". URL, the range, git repo, etc.\n* `raw` - The original un-modified string that was provided.\n* `rawSpec` - The part after the `name@...`, as it was originally\n provided.\n* `scope` - If a name is something like `@org/module` then the `scope`\n field will be set to `org`. If it doesn't have a scoped name, then\n scope is `null`.\n", + "readmeFilename": "README.md", + "gitHead": "9aaabc2aae746371a05f54cdb57a5f9ada003d8f", + "_id": "npm-package-arg@2.1.3", + "_shasum": "dfba34bd82dd327c10cb43a65c8db6ef0b812bf7", + "_from": "npm-package-arg@~2.1.3" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/README.md nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/README.md --- nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,55 @@ +# npm-package-arg + +Parse the things that can be arguments to `npm install` + +Takes an argument like `foo@1.2`, or `foo@user/foo`, or +`http://x.com/foo.tgz`, or `git+https://github.com/user/foo`, and +figures out what type of thing it is. + +## USAGE + +```javascript +var assert = require("assert") +var npa = require("npm-package-arg") + +// Pass in the descriptor, and it'll return an object +var parsed = npa("foo@1.2") + +// Returns an object like: +// { +// name: "foo", // The bit in front of the @ +// type: "range", // the type of descriptor this is +// spec: "1.2" // the specifier for this descriptor +// } + +// Completely unreasonable invalid garbage throws an error +// Make sure you wrap this in a try/catch if you have not +// already sanitized the inputs! +assert.throws(function() { + npa("this is not \0 a valid package name or url") +}) +``` + +For more examples, see the test file. + +## Result Objects + +The objects that are returned by npm-package-arg contain the following +fields: + +* `name` - If known, the `name` field expected in the resulting pkg. +* `type` - One of the following strings: + * `git` - A git repo + * `github` - A github shorthand, like `user/project` + * `tag` - A tagged version, like `"foo@latest"` + * `version` - A specific version number, like `"foo@1.2.3"` + * `range` - A version range, like `"foo@2.x"` + * `local` - A local file or folder path + * `remote` - An http url (presumably to a tgz) +* `spec` - The "thing". URL, the range, git repo, etc. +* `raw` - The original un-modified string that was provided. +* `rawSpec` - The part after the `name@...`, as it was originally + provided. +* `scope` - If a name is something like `@org/module` then the `scope` + field will be set to `org`. If it doesn't have a scoped name, then + scope is `null`. diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/test/basic.js nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/test/basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,203 @@ +var npa = require("../npa.js") +var path = require("path") + +require("tap").test("basic", function (t) { + t.setMaxListeners(999) + + var tests = { + "foo@1.2": { + name: "foo", + type: "range", + spec: ">=1.2.0 <1.3.0", + raw: "foo@1.2", + rawSpec: "1.2" + }, + + "@foo/bar": { + raw: "@foo/bar", + name: "@foo/bar", + scope: "@foo", + rawSpec: "", + spec: "*", + type: "range" + }, + + "@foo/bar@": { + raw: "@foo/bar@", + name: "@foo/bar", + scope: "@foo", + rawSpec: "", + spec: "*", + type: "range" + }, + + "@foo/bar@baz": { + raw: "@foo/bar@baz", + name: "@foo/bar", + scope: "@foo", + rawSpec: "baz", + spec: "baz", + type: "tag" + }, + + "@f fo o al/ a d s ;f ": { + raw: "@f fo o al/ a d s ;f", + name: null, + rawSpec: "@f fo o al/ a d s ;f", + spec: path.resolve("@f fo o al/ a d s ;f"), + type: "local" + }, + + "foo@1.2.3": { + name: "foo", + type: "version", + spec: "1.2.3", + raw: "foo@1.2.3" + }, + + "foo@=v1.2.3": { + name: "foo", + type: "version", + spec: "1.2.3", + raw: "foo@=v1.2.3", + rawSpec: "=v1.2.3" + }, + + "git+ssh://git@github.com/user/foo#1.2.3": { + name: null, + type: "git", + spec: "ssh://git@github.com/user/foo#1.2.3", + raw: "git+ssh://git@github.com/user/foo#1.2.3" + }, + + "git+file://path/to/repo#1.2.3": { + name: null, + type: "git", + spec: "file://path/to/repo#1.2.3", + raw: "git+file://path/to/repo#1.2.3" + }, + + "git://github.com/user/foo": { + name: null, + type: "git", + spec: "git://github.com/user/foo", + raw: "git://github.com/user/foo" + }, + + "@foo/bar@git+ssh://github.com/user/foo": { + name: "@foo/bar", + scope: "@foo", + spec: "ssh://github.com/user/foo", + rawSpec: "git+ssh://github.com/user/foo", + raw: "@foo/bar@git+ssh://github.com/user/foo" + }, + + "/path/to/foo": { + name: null, + type: "local", + spec: "/path/to/foo", + raw: "/path/to/foo" + }, + + "file:path/to/foo": { + name: null, + type: "local", + spec: "path/to/foo", + raw: "file:path/to/foo" + }, + + "file:~/path/to/foo": { + name: null, + type: "local", + spec: "~/path/to/foo", + raw: "file:~/path/to/foo" + }, + + "file:../path/to/foo": { + name: null, + type: "local", + spec: "../path/to/foo", + raw: "file:../path/to/foo" + }, + + "file:///path/to/foo": { + name: null, + type: "local", + spec: "/path/to/foo", + raw: "file:///path/to/foo" + }, + + "https://server.com/foo.tgz": { + name: null, + type: "remote", + spec: "https://server.com/foo.tgz", + raw: "https://server.com/foo.tgz" + }, + + "user/foo-js": { + name: null, + type: "github", + spec: "user/foo-js", + raw: "user/foo-js" + }, + + "user/foo-js#bar/baz": { + name: null, + type: "github", + spec: "user/foo-js#bar/baz", + raw: "user/foo-js#bar/baz" + }, + + "user..blerg--/..foo-js# . . . . . some . tags / / /": { + name: null, + type: "github", + spec: "user..blerg--/..foo-js# . . . . . some . tags / / /", + raw: "user..blerg--/..foo-js# . . . . . some . tags / / /" + }, + + "user/foo-js#bar/baz/bin": { + name: null, + type: "github", + spec: "user/foo-js#bar/baz/bin", + raw: "user/foo-js#bar/baz/bin" + }, + + "foo@user/foo-js": { + name: "foo", + type: "github", + spec: "user/foo-js", + raw: "foo@user/foo-js" + }, + + "foo@latest": { + name: "foo", + type: "tag", + spec: "latest", + raw: "foo@latest" + }, + + "foo": { + name: "foo", + type: "range", + spec: "*", + raw: "foo" + } + } + + Object.keys(tests).forEach(function (arg) { + var res = npa(arg) + t.type(res, "Result") + t.has(res, tests[arg]) + }) + + // Completely unreasonable invalid garbage throws an error + t.throws(function() { + npa("this is not a \0 valid package name or url") + }) + + t.throws(function() { + npa("gopher://yea right") + }, "Unsupported URL Type: gopher://yea right") + + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/test/windows.js nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/test/windows.js --- nodejs-0.11.13/deps/npm/node_modules/npm-package-arg/test/windows.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-package-arg/test/windows.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,41 @@ +global.FAKE_WINDOWS = true + +var npa = require("../npa.js") +var test = require("tap").test +var path = require("path") + +var cases = { + "C:\\x\\y\\z": { + raw: 'C:\\x\\y\\z', + scope: null, + name: null, + rawSpec: 'C:\\x\\y\\z', + spec: path.resolve('C:\\x\\y\\z'), + type: 'local' + }, + "foo@C:\\x\\y\\z": { + raw: 'foo@C:\\x\\y\\z', + scope: null, + name: 'foo', + rawSpec: 'C:\\x\\y\\z', + spec: path.resolve('C:\\x\\y\\z'), + type: 'local' + }, + "foo@/foo/bar/baz": { + raw: 'foo@/foo/bar/baz', + scope: null, + name: 'foo', + rawSpec: '/foo/bar/baz', + spec: path.resolve('/foo/bar/baz'), + type: 'local' + } +} + +test("parse a windows path", function (t) { + Object.keys(cases).forEach(function (c) { + var expect = cases[c] + var actual = npa(c) + t.same(actual, expect, c) + }) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/index.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/index.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -2,10 +2,9 @@ module.exports = RegClient -var fs = require('fs') -, url = require('url') -, path = require('path') +var url = require('url') , npmlog +, cacheFile = require('npm-cache-filename') try { npmlog = require("npmlog") @@ -45,7 +44,10 @@ registry = null } + this.registry = registry + if (!conf.get('cache')) throw new Error("Cache dir is required") + this.cacheFile = cacheFile(this.conf.get('cache')) this.log = conf.log || conf.get('log') || npmlog } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/adduser.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/adduser.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/adduser.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/adduser.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,12 +1,12 @@ module.exports = adduser -var crypto = require('crypto') +var url = require("url") -function sha (s) { - return crypto.createHash("sha1").update(s).digest("hex") -} +function adduser (base, username, password, email, cb) { + if (!base) return cb(new Error("Required base URI not supplied")) -function adduser (username, password, email, cb) { + username = ("" + (username || "")).trim() + if (!username) return cb(new Error("No username supplied.")) password = ("" + (password || "")).trim() if (!password) return cb(new Error("No password supplied.")) @@ -17,12 +17,7 @@ return cb(new Error("Please use a real email address.")) } - if (password.indexOf(":") !== -1) return cb(new Error( - "Sorry, ':' chars are not allowed in passwords.\n"+ - "See for why.")) - - var salt = crypto.randomBytes(30).toString('hex') - , userobj = + var userobj = { name : username , password : password , email : email @@ -34,15 +29,13 @@ // pluck off any other username/password/token. it needs to be the // same as the user we're becoming now. replace them on error. - var pre = { username: this.conf.get('username') - , password: this.conf.get('_password') - , auth: this.conf.get('_auth') + var c = this.conf.getCredentialsByURI(base) + var pre = { username: c.username + , password: c.password + , email: c.email , token: this.conf.get('_token') } this.conf.del('_token') - this.conf.del('username') - this.conf.del('_auth') - this.conf.del('_password') if (this.couchLogin) { this.couchLogin.token = null } @@ -59,19 +52,22 @@ this.log.verbose("adduser", "before first PUT", logObj) + var uri = url.resolve(base, '/-/user/org.couchdb.user:' + encodeURIComponent(username)) this.request('PUT' - , '/-/user/org.couchdb.user:'+encodeURIComponent(username) - , userobj + , uri + , { body : userobj } , function (error, data, json, response) { // if it worked, then we just created a new user, and all is well. // but if we're updating a current record, then it'll 409 first - if (error && !this.conf.get('_auth')) { + var c = this.conf.getCredentialsByURI(base) + if (error && !c.auth) { // must be trying to re-auth on a new machine. // use this info as auth - var b = new Buffer(username + ":" + password) - this.conf.set('_auth', b.toString("base64")) - this.conf.set('username', username) - this.conf.set('_password', password) + this.conf.setCredentialsByURI(base, { + username : username, + password : password, + email : email + }) } if (!error || !response || response.statusCode !== 409) { @@ -80,8 +76,8 @@ this.log.verbose("adduser", "update existing user") return this.request('GET' - , '/-/user/org.couchdb.user:'+encodeURIComponent(username) + - '?write=true' + , uri + '?write=true' + , null , function (er, data, json, response) { if (er || data.error) { return cb(er, data, json, response) @@ -93,45 +89,48 @@ }) this.log.verbose("adduser", "userobj", logObj) this.request('PUT' - , '/-/user/org.couchdb.user:'+encodeURIComponent(username) - + "/-rev/" + userobj._rev - , userobj - , cb ) + , uri + "/-rev/" + userobj._rev + , { body : userobj } + , cb) }.bind(this)) }.bind(this)) -} -function done (cb, pre) { - return function (error, data, json, response) { - if (!error && (!response || response.statusCode === 201)) { - return cb(error, data, json, response) - } - - // there was some kind of error, re-instate previous auth/token/etc. - this.conf.set('_token', pre.token) - if (this.couchLogin) { - this.couchLogin.token = pre.token - if (this.couchLogin.tokenSet) { - this.couchLogin.tokenSet(pre.token) + function done (cb, pre) { + return function (error, data, json, response) { + if (!error && (!response || response.statusCode === 201)) { + return cb(error, data, json, response) + } + + // there was some kind of error, re-instate previous auth/token/etc. + this.conf.set('_token', pre.token) + if (this.couchLogin) { + this.couchLogin.token = pre.token + if (this.couchLogin.tokenSet) { + this.couchLogin.tokenSet(pre.token) + } + } + this.conf.setCredentialsByURI(base, { + username : pre.username, + password : pre.password, + email : pre.email + }) + + this.log.verbose("adduser", "back", [error, data, json]) + if (!error) { + error = new Error( + (response && response.statusCode || "") + " " + + "Could not create user\n" + JSON.stringify(data) + ) } - } - this.conf.set('username', pre.username) - this.conf.set('_password', pre.password) - this.conf.set('_auth', pre.auth) - - this.log.verbose("adduser", "back", [error, data, json]) - if (!error) { - error = new Error( (response && response.statusCode || "") + " "+ - "Could not create user\n"+JSON.stringify(data)) - } - if (response - && (response.statusCode === 401 || response.statusCode === 403)) { - this.log.warn("adduser", "Incorrect username or password\n" - +"You can reset your account by visiting:\n" - +"\n" - +" https://npmjs.org/forgot\n") - } - return cb(error) - }.bind(this) + if (response && (response.statusCode === 401 || response.statusCode === 403)) { + this.log.warn("adduser", "Incorrect username or password\n" + + "You can reset your account by visiting:\n" + + "\n" + + " https://npmjs.org/forgot\n") + } + + return cb(error) + }.bind(this) + } } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/attempt.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/attempt.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/attempt.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/attempt.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +var retry = require("retry") + +module.exports = attempt + +function attempt(cb) { + // Tuned to spread 3 attempts over about a minute. + // See formula at . + var operation = retry.operation({ + retries : this.conf.get("fetch-retries") || 2, + factor : this.conf.get("fetch-retry-factor"), + minTimeout : this.conf.get("fetch-retry-mintimeout") || 10000, + maxTimeout : this.conf.get("fetch-retry-maxtimeout") || 60000 + }) + + var client = this + operation.attempt(function (currentAttempt) { + client.log.info("attempt", "registry request try #"+currentAttempt+ + " at "+(new Date()).toLocaleTimeString()) + + cb(operation) + }) +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/authify.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/authify.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/authify.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/authify.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ +var url = require("url") + +module.exports = authify + +function authify (authed, parsed, headers) { + var c = this.conf.getCredentialsByURI(url.format(parsed)) + + if (c && c.token) { + this.log.verbose("request", "using bearer token for auth") + headers.authorization = "Bearer " + c.token + + return null + } + + if (authed) { + if (c && c.username && c.password) { + var username = encodeURIComponent(c.username) + var password = encodeURIComponent(c.password) + parsed.auth = username + ":" + password + } + else { + return new Error( + "This request requires auth credentials. Run `npm login` and repeat the request." + ) + } + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/bugs.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/bugs.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/bugs.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/bugs.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,8 +1,8 @@ module.exports = bugs -function bugs (name, cb) { - this.get(name + "/latest", 3600, function (er, d) { +function bugs (uri, cb) { + this.get(uri + "/latest", 3600, function (er, d) { if (er) return cb(er) cb(null, d.bugs) }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/deprecate.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/deprecate.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/deprecate.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/deprecate.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,11 @@ - module.exports = deprecate +var url = require("url") var semver = require("semver") -function deprecate (name, ver, message, cb) { - if (!this.conf.get('username')) { +function deprecate (uri, ver, message, cb) { + var c = this.conf.getCredentialsByURI(uri) + if (!(c.token || c.auth)) { return cb(new Error("Must be logged in to deprecate a package")) } @@ -12,9 +13,7 @@ return cb(new Error("invalid version range: "+ver)) } - var users = {} - - this.get(name + '?write=true', function (er, data) { + this.get(uri + '?write=true', null, function (er, data) { if (er) return cb(er) // filter all the versions that match Object.keys(data.versions).filter(function (v) { @@ -23,6 +22,6 @@ data.versions[v].deprecated = message }) // now update the doc on the registry - this.request('PUT', data._id, data, cb) + this.request('PUT', url.resolve(uri, data._id), { body : data }, cb) }.bind(this)) } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/fetch.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/fetch.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/fetch.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/fetch.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,89 @@ +var assert = require("assert") + , url = require("url") + +var request = require("request") + , once = require("once") + +module.exports = fetch + +function fetch (uri, headers, cb) { + assert(uri, "must pass resource to fetch") + assert(cb, "must pass callback") + + if (!headers) headers = {} + + cb = once(cb) + + var client = this + this.attempt(function (operation) { + makeRequest.call(client, uri, headers, function (er, req) { + if (er) return cb(er) + + req.on("error", function (er) { + if (operation.retry(er)) { + client.log.info("retry", "will retry, error on last attempt: " + er) + } + }) + + req.on("response", function (res) { + client.log.http("fetch", "" + res.statusCode, uri) + + var er + var statusCode = res && res.statusCode + if (statusCode === 200) { + // Work around bug in node v0.10.0 where the CryptoStream + // gets stuck and never starts reading again. + res.resume() + if (process.version === "v0.10.0") unstick(res) + + return cb(null, res) + } + // Only retry on 408, 5xx or no `response`. + else if (statusCode === 408) { + er = new Error("request timed out") + } + else if (statusCode >= 500) { + er = new Error("server error " + statusCode) + } + + if (er && operation.retry(er)) { + client.log.info("retry", "will retry, error on last attempt: " + er) + } + else { + cb(new Error("fetch failed with status code " + statusCode)) + } + }) + }) + }) +} + +function unstick(response) { + response.resume = function (orig) { return function() { + var ret = orig.apply(response, arguments) + if (response.socket.encrypted) response.socket.encrypted.read(0) + return ret + }}(response.resume) +} + +function makeRequest (remote, headers, cb) { + var parsed = url.parse(remote) + this.log.http("fetch", "GET", parsed.href) + + var er = this.authify( + this.conf.getCredentialsByURI(remote).alwaysAuth, + parsed, + headers + ) + if (er) return cb(er) + + var opts = this.initialize( + parsed, + "GET", + "application/x-tar", + headers + ) + // always want to follow redirects for fetch + opts.followRedirect = true + + cb(null, request(opts)) +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/get.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/get.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/get.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/get.js 2015-01-20 21:22:17.000000000 +0000 @@ -2,90 +2,94 @@ module.exports = get var fs = require("graceful-fs") + , assert = require("assert") , path = require("path") , mkdir = require("mkdirp") , chownr = require("chownr") + , url = require("url") -function get (uri, timeout, nofollow, staleOk, cb) { - if (typeof cb !== "function") cb = staleOk, staleOk = false - if (typeof cb !== "function") cb = nofollow, nofollow = false - if (typeof cb !== "function") cb = timeout, timeout = -1 - if (typeof cb !== "function") cb = version, version = null +/** + * options: + * + * timeout: request timeouts + * follow: follow redirects + * staleOk: stale results are OK + * stat: results of checking for cached metadata + * data: the cached metadata + */ +function get (uri, options, cb) { + assert(uri, "must have URL to fetch") + assert(cb, "must have callback") + if (!options) options = {} - timeout = Math.min(timeout, this.conf.get('cache-max') || 0) - timeout = Math.max(timeout, this.conf.get('cache-min') || -Infinity) + var parsed = url.parse(uri) + assert(parsed.protocol, "must have a URL that starts with npm:, http:, or https:") - if (!this.conf.get('registry')) timeout = Infinity - - if ( process.env.COMP_CWORD !== undefined - && process.env.COMP_LINE !== undefined - && process.env.COMP_POINT !== undefined - ) timeout = Math.max(timeout, 60000) + var cache = this.cacheFile(uri) + "/.cache.json" // /-/all is special. // It uses timestamp-based caching and partial updates, // because it is a monster. - if (uri === "/-/all") { - return requestAll.call(this, cb) + if (parsed.pathname === "/-/all") { + return requestAll.call(this, uri, cache, cb) } - var cacheUri = uri - // on windows ":" is not an allowed character in a foldername - cacheUri = cacheUri.replace(/:/g, '_').replace(/\?write=true$/, '') - var cache = path.join(this.conf.get('cache'), cacheUri, ".cache.json") - // If the GET is part of a write operation (PUT or DELETE), then // skip past the cache entirely, but still save the results. - if (uri.match(/\?write=true$/)) - return get_.call(this, uri, timeout, cache, null, null, nofollow, staleOk, cb) - + if (uri.match(/\?write=true$/)) { + return get_.call(this, uri, cache, options, cb) + } fs.stat(cache, function (er, stat) { if (!er) fs.readFile(cache, function (er, data) { try { data = JSON.parse(data) } catch (ex) { data = null } - get_.call(this, uri, timeout, cache, stat, data, nofollow, staleOk, cb) + options.stat = stat + options.data = data + get_.call(this, uri, cache, options, cb) }.bind(this)) - else get_.call(this, uri, timeout, cache, null, null, nofollow, staleOk, cb) + else { + get_.call(this, uri, cache, options, cb) + } }.bind(this)) } -function requestAll (cb) { - var cache = path.join(this.conf.get('cache'), "/-/all", ".cache.json") - - mkdir(path.join(this.conf.get('cache'), "-", "all"), function (er) { +function requestAll (uri, cache, cb) { + this.log.info("get", cache) + mkdir(path.dirname(cache), function (er) { + if (er) return cb(er) fs.readFile(cache, function (er, data) { - if (er) return requestAll_.call(this, 0, {}, cb) + if (er) return requestAll_.call(this, uri, 0, {}, cache, cb) try { data = JSON.parse(data) } catch (ex) { fs.writeFile(cache, "{}", function (er) { if (er) return cb(new Error("Broken cache.")) - return requestAll_.call(this, 0, {}, cb) + return requestAll_.call(this, uri, 0, {}, cache, cb) }.bind(this)) } var t = +data._updated || 0 - requestAll_.call(this, t, data, cb) + requestAll_.call(this, uri, t, data, cache, cb) }.bind(this)) }.bind(this)) } -function requestAll_ (c, data, cb) { +function requestAll_ (uri, c, data, cache, cb) { // use the cache and update in the background if it's not too old if (Date.now() - c < 60000) { cb(null, data) cb = function () {} } - var uri = "/-/all/since?stale=update_after&startkey=" + c - if (c === 0) { this.log.warn("", "Building the local index for the first time, please be patient") - uri = "/-/all" + uri = url.resolve(uri, "/-/all") + } + else { + uri = url.resolve(uri, "/-/all/since?stale=update_after&startkey=" + c) } - var cache = path.join(this.conf.get('cache'), "-/all", ".cache.json") - this.request('GET', uri, function (er, updates, _, res) { + this.request('GET', uri, null, function (er, updates, _, res) { if (er) return cb(er, data) var headers = res.headers , updated = updates._updated || Date.parse(headers.date) @@ -101,10 +105,25 @@ }) } -function get_ (uri, timeout, cache, stat, data, nofollow, staleOk, cb) { - var etag +function get_ (uri, cache, options, cb) { + var staleOk = options.staleOk === undefined ? false : options.staleOk + , follow = options.follow + , data = options.data + , stat = options.stat + , etag + + var timeout = options.timeout === undefined ? -1 : options.timeout + timeout = Math.min(timeout, this.conf.get('cache-max') || 0) + timeout = Math.max(timeout, this.conf.get('cache-min') || -Infinity) + if (process.env.COMP_CWORD !== undefined && + process.env.COMP_LINE !== undefined && + process.env.COMP_POINT !== undefined) { + timeout = Math.max(timeout, 60000) + } + if (data && data._etag) etag = data._etag - if (timeout && timeout > 0 && stat && data) { + + if (timeout && timeout > 0 && options.stat && options.data) { if ((Date.now() - stat.mtime.getTime())/1000 < timeout) { this.log.verbose("registry.get", uri, "not expired, no request") delete data._etag @@ -119,7 +138,7 @@ } } - this.request('GET', uri, null, etag, nofollow, function (er, remoteData, raw, response) { + this.request('GET', uri, { etag : etag, follow : follow }, function (er, remoteData, raw, response) { // if we get an error talking to the registry, but we have it // from the cache, then just pretend we got it. if (er && cache && data && !data.error) { diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/initialize.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/initialize.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/initialize.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/initialize.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,41 @@ +var crypto = require("crypto") + +var pkg = require("../package.json") + +module.exports = initialize + +function initialize (uri, method, accept, headers) { + if (!this.sessionToken) { + this.sessionToken = crypto.randomBytes(8).toString("hex") + this.log.verbose("request id", this.sessionToken) + } + + var strict = this.conf.get("strict-ssl") + if (strict === undefined) strict = true + + var p = this.conf.get("proxy") + var sp = this.conf.get("https-proxy") || p + + var opts = { + url : uri, + method : method, + headers : headers, + proxy : uri.protocol === "https:" ? sp : p, + localAddress : this.conf.get("local-address"), + strictSSL : strict, + cert : this.conf.get("cert"), + key : this.conf.get("key"), + ca : this.conf.get("ca") + } + + headers.version = this.version || pkg.version + headers.accept = accept + + if (this.refer) headers.referer = this.refer + + headers["npm-session"] = this.sessionToken + headers["user-agent"] = this.conf.get("user-agent") || + "node/" + process.version + + return opts +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/publish.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/publish.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/publish.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/publish.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,25 +1,30 @@ module.exports = publish -var path = require("path") - , url = require("url") +var url = require("url") , semver = require("semver") , crypto = require("crypto") , fs = require("fs") + , fixNameField = require("normalize-package-data/lib/fixer.js").fixNameField -function publish (data, tarball, cb) { - var email = this.conf.get('email') - var auth = this.conf.get('_auth') - var username = this.conf.get('username') +function escaped(name) { + return name.replace("/", "%2f") +} - if (!email || !auth || !username) { +function publish (uri, data, tarball, cb) { + var c = this.conf.getCredentialsByURI(uri) + if (!(c.token || (c.auth && c.username && c.email))) { var er = new Error("auth and email required for publishing") er.code = 'ENEEDAUTH' return cb(er) } - if (data.name !== encodeURIComponent(data.name)) - return cb(new Error('invalid name: must be url-safe')) + try { + fixNameField(data, true) + } + catch (er) { + return cb(er) + } var ver = semver.clean(data.version) if (!ver) @@ -29,14 +34,14 @@ var self = this fs.stat(tarball, function(er, s) { if (er) return cb(er) - fs.readFile(tarball, 'base64', function(er, tardata) { + fs.readFile(tarball, function(er, tarbuffer) { if (er) return cb(er) - putFirst.call(self, data, tardata, s, username, email, cb) + putFirst.call(self, uri, data, tarbuffer, s, c, cb) }) }) } -function putFirst (data, tardata, stat, username, email, cb) { +function putFirst (registry, data, tarbuffer, stat, creds, cb) { // optimistically try to PUT all in one single atomic thing. // If 409, then GET and merge, try again. // If other error, then fail. @@ -48,36 +53,35 @@ , "dist-tags" : {} , versions : {} , readme: data.readme || "" - , maintainers : - [ { name : username - , email : email - } - ] } + if (!creds.token) { + root.maintainers = [{name : creds.username, email : creds.email}] + data.maintainers = JSON.parse(JSON.stringify(root.maintainers)) + } + root.versions[ data.version ] = data - data.maintainers = JSON.parse(JSON.stringify(root.maintainers)) var tag = data.tag || this.conf.get('tag') || "latest" root["dist-tags"][tag] = data.version - var registry = this.conf.get('registry') var tbName = data.name + "-" + data.version + ".tgz" , tbURI = data.name + "/-/" + tbName data._id = data.name+"@"+data.version data.dist = data.dist || {} - data.dist.shasum = crypto.createHash("sha1").update(tardata, 'base64').digest("hex") + data.dist.shasum = crypto.createHash("sha1").update(tarbuffer).digest("hex") data.dist.tarball = url.resolve(registry, tbURI) .replace(/^https:\/\//, "http://") root._attachments = {} root._attachments[ tbName ] = { - content_type: 'application/octet-stream', - data: tardata, - length: stat.size - }; + "content_type": "application/octet-stream", + "data": tarbuffer.toString("base64"), + "length": stat.size + } - this.request("PUT", data.name, root, function (er, parsed, json, res) { + var fixed = url.resolve(registry, escaped(data.name)) + this.request("PUT", fixed, { body : root }, function (er, parsed, json, res) { var r409 = "must supply latest _rev to update existing package" var r409b = "Document update conflict." var conflict = res && res.statusCode === 409 @@ -95,16 +99,15 @@ return cb(er, parsed, json, res) // let's see what versions are already published. - var getUrl = data.name + "?write=true" - this.request("GET", getUrl, function (er, current) { - if (er) - return cb(er) - putNext.call(this, data.version, root, current, cb) + this.request("GET", fixed + "?write=true", null, function (er, current) { + if (er) return cb(er) + + putNext.call(this, registry, data.version, root, current, cb) }.bind(this)) }.bind(this)) } -function putNext(newVersion, root, current, cb) { +function putNext(registry, newVersion, root, current, cb) { // already have the tardata on the root object // just merge in existing stuff var curVers = Object.keys(current.versions || {}).map(function (v) { @@ -134,7 +137,7 @@ // ignore these case 'maintainers': - break; + break // copy default: @@ -144,7 +147,8 @@ var maint = JSON.parse(JSON.stringify(root.maintainers)) root.versions[newVersion].maintainers = maint - this.request("PUT", root.name, current, cb) + var uri = url.resolve(registry, escaped(root.name)) + this.request("PUT", uri, { body : current }, cb) } function conflictError (pkgid, version) { diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/request.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/request.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/request.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/request.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,53 +1,61 @@ -module.exports = regRequest - -var url = require("url") +var assert = require("assert") + , url = require("url") , zlib = require("zlib") - , fs = require("graceful-fs") - , rm = require("rimraf") - , asyncMap = require("slide").asyncMap , Stream = require("stream").Stream + +var rm = require("rimraf") , request = require("request") - , retry = require("retry") - , crypto = require("crypto") + , once = require("once") -function regRequest (method, where, what, etag, nofollow, cb_) { - if (typeof cb_ !== "function") cb_ = nofollow, nofollow = false - if (typeof cb_ !== "function") cb_ = etag, etag = null - if (typeof cb_ !== "function") cb_ = what, what = null +module.exports = regRequest - if (!this.sessionToken) { - this.sessionToken = crypto.randomBytes(8).toString("hex") +// npm: means +// 1. https +// 2. send authorization +// 3. content-type is 'application/json' -- metadata +function regRequest (method, uri, options, cb_) { + assert(uri, "must pass resource to load") + assert(cb_, "must pass callback") + + options = options || {} + + var parsed = url.parse(uri) + var where = parsed.pathname + var what = options.body + var follow = (typeof options.follow === "boolean" ? options.follow : true) + this.log.verbose("request", "on initialization, where is", where) + + if (parsed.search) { + where = where + parsed.search + parsed.search = "" } - - var registry = this.conf.get('registry') - if (!registry) return cb(new Error( - "No registry url provided: " + method + " " + where)) + parsed.pathname = "/" + this.log.verbose("request", "after pass 1, where is", where) // Since there are multiple places where an error could occur, // don't let the cb be called more than once. - var errState = null - function cb (er) { - if (errState) return - if (er) errState = er - cb_.apply(null, arguments) - } + var cb = once(cb_) if (where.match(/^\/?favicon.ico/)) { return cb(new Error("favicon.ico isn't a package, it's a picture.")) } var adduserChange = /^\/?-\/user\/org\.couchdb\.user:([^\/]+)\/-rev/ - , adduserNew = /^\/?-\/user\/org\.couchdb\.user:([^\/]+)/ - , nu = where.match(adduserNew) - , uc = where.match(adduserChange) - , alwaysAuth = this.conf.get('always-auth') - , isDel = method === "DELETE" - , isWrite = what || isDel - , authRequired = (alwaysAuth || isWrite) && !nu || uc || isDel + , isUserChange = where.match(adduserChange) + , adduserNew = /^\/?-\/user\/org\.couchdb\.user:([^\/]+)$/ + , isNewUser = where.match(adduserNew) + , registry = url.format(parsed) + , alwaysAuth = this.conf.getCredentialsByURI(registry).alwaysAuth + , isDelete = method === "DELETE" + , isWrite = what || isDelete + + if (isUserChange && !isWrite) { + return cb(new Error("trying to change user document without writing(?!)")) + } // resolve to a full url on the registry if (!where.match(/^https?:\/\//)) { - this.log.verbose("url raw", where) + this.log.verbose("request", "url raw", where) var q = where.split("?") where = q.shift() @@ -59,51 +67,42 @@ if (p.match(/^org.couchdb.user/)) { return p.replace(/\//g, encodeURIComponent("/")) } - return encodeURIComponent(p) + return p }).join("/") if (q) where += "?" + q - this.log.verbose("url resolving", [registry, where]) - where = url.resolve(registry, where) - this.log.verbose("url resolved", where) - } - - var remote = url.parse(where) - , auth = this.conf.get('_auth') - if (authRequired && !auth) { - var un = this.conf.get('username') - var pw = this.conf.get('_password') - if (un && pw) - auth = new Buffer(un + ':' + pw).toString('base64') - } - - if (authRequired && !auth) { - return cb(new Error( - "Cannot insert data into the registry without auth")) + this.log.verbose("request", "resolving registry", [registry, where]) + where = url.resolve(registry, where) + this.log.verbose("request", "after pass 2, where is", where) } - if (auth && authRequired) { - remote.auth = new Buffer(auth, "base64").toString("utf8") + var authed + // new users can *not* use auth, because they don't *have* auth yet + if (isNewUser) { + this.log.verbose("request", "new user, so can't send auth") + authed = false + } + else if (alwaysAuth) { + this.log.verbose("request", "always-auth set; sending authorization") + authed = true + } + else if (isWrite) { + this.log.verbose("request", "sending authorization for write operation") + authed = true + } + else { + // most of the time we don't want to auth + this.log.verbose("request", "no auth needed") + authed = false } - // Tuned to spread 3 attempts over about a minute. - // See formula at . - var operation = retry.operation({ - retries: this.conf.get('fetch-retries') || 2, - factor: this.conf.get('fetch-retry-factor'), - minTimeout: this.conf.get('fetch-retry-mintimeout') || 10000, - maxTimeout: this.conf.get('fetch-retry-maxtimeout') || 60000 - }) - var self = this - operation.attempt(function (currentAttempt) { - self.log.info("trying", "registry request attempt " + currentAttempt - + " at " + (new Date()).toLocaleTimeString()) - makeRequest.call(self, method, remote, where, what, etag, nofollow + this.attempt(function (operation) { + makeRequest.call(self, method, where, what, options.etag, follow, authed , function (er, parsed, raw, response) { - if (!er || er.message.match(/^SSL Error/)) { + if (!er || (er.message && er.message.match(/^SSL Error/))) { if (er) - er.code = 'ESSL' + er.code = "ESSL" return cb(er, parsed, raw, response) } @@ -115,56 +114,47 @@ var statusRetry = !statusCode || timeout || serverError if (er && statusRetry && operation.retry(er)) { self.log.info("retry", "will retry, error on last attempt: " + er) - return + return undefined + } + if (response) { + self.log.verbose("headers", response.headers) + if (response.headers["npm-notice"]) { + self.log.warn("notice", response.headers["npm-notice"]) + } } - if (response) - this.log.verbose("headers", response.headers) cb.apply(null, arguments) - }.bind(this)) - }.bind(this)) + }) + }) } -function makeRequest (method, remote, where, what, etag, nofollow, cb_) { - var cbCalled = false - function cb () { - if (cbCalled) return - cbCalled = true - cb_.apply(null, arguments) - } - - var strict = this.conf.get('strict-ssl') - if (strict === undefined) strict = true - var opts = { url: remote - , method: method - , encoding: null // tell request let body be Buffer instance - , ca: this.conf.get('ca') - , localAddress: this.conf.get('local-address') - , cert: this.conf.get('cert') - , key: this.conf.get('key') - , strictSSL: strict } - , headers = opts.headers = {} - if (etag) { - this.log.verbose("etag", etag) - headers[method === "GET" ? "if-none-match" : "if-match"] = etag - } +function makeRequest (method, where, what, etag, follow, authed, cb_) { + var cb = once(cb_) - headers['npm-session'] = this.sessionToken + var parsed = url.parse(where) + var headers = {} - if (this.refer) { - headers.referer = this.refer - } + // metadata should be compressed + headers["accept-encoding"] = "gzip" - headers.accept = "application/json" - headers['accept-encoding'] = 'gzip' + var er = this.authify(authed, parsed, headers) + if (er) return cb_(er) + + var opts = this.initialize( + parsed, + method, + "application/json", + headers + ) - headers["user-agent"] = this.conf.get('user-agent') || - 'node/' + process.version + opts.followRedirect = follow + opts.encoding = null // tell request let body be Buffer instance - var p = this.conf.get('proxy') - var sp = this.conf.get('https-proxy') || p - opts.proxy = remote.protocol === "https:" ? sp : p + if (etag) { + this.log.verbose("etag", etag) + headers[method === "GET" ? "if-none-match" : "if-match"] = etag + } - // figure out wth 'what' is + // figure out wth "what" is if (what) { if (Buffer.isBuffer(what) || typeof what === "string") { opts.body = what @@ -179,11 +169,7 @@ } } - if (nofollow) { - opts.followRedirect = false - } - - this.log.http(method, remote.href || "/") + this.log.http("request", method, parsed.href || "/") var done = requestDone.call(this, method, where, cb) var req = request(opts, decodeResponseBody(done)) @@ -208,7 +194,7 @@ response.socket.destroy() } - if (response.headers['content-encoding'] !== 'gzip') return cb(er, response, data) + if (response.headers["content-encoding"] !== "gzip") return cb(er, response, data) zlib.gunzip(data, function (er, buf) { if (er) return cb(er, response, data) @@ -225,7 +211,7 @@ var urlObj = url.parse(where) if (urlObj.auth) - urlObj.auth = '***' + urlObj.auth = "***" this.log.http(response.statusCode, url.format(urlObj)) var parsed @@ -255,7 +241,7 @@ , null, data, response ) } - var er = null + er = null if (parsed && response.headers.etag) { parsed._etag = response.headers.etag } @@ -263,16 +249,21 @@ if (parsed && parsed.error && response.statusCode >= 400) { var w = url.parse(where).pathname.substr(1) var name - if (!w.match(/^-/) && parsed.error === "not_found") { + if (!w.match(/^-/)) { w = w.split("/") name = w[w.indexOf("_rewrite") + 1] - er = new Error("404 Not Found: "+name) - er.code = "E404" - er.pkgid = name + } + + if (name && parsed.error === "not_found") { + er = new Error("404 Not Found: " + name) } else { er = new Error( parsed.error + " " + (parsed.reason || "") + ": " + w) } + if (name) er.pkgid = name + er.statusCode = response.statusCode + er.code = "E" + er.statusCode + } else if (method !== "HEAD" && method !== "GET") { // invalidate cache // This is irrelevant for commands that do etag caching, but @@ -280,26 +271,7 @@ // from thinking that it didn't work when it did. // Note that failure is an acceptable option here, since the // only result will be a stale cache for some helper commands. - var path = require("path") - , p = url.parse(where).pathname.split("/") - , _ = "/" - , caches = p.map(function (part) { - part = part.replace(/:/g, "_") - return _ = path.join(_, part) - }).map(function (cache) { - return path.join(this.conf.get('cache'), cache, ".cache.json") - }, this) - - // if the method is DELETE, then also remove the thing itself. - // Note that the search index is probably invalid. Whatever. - // That's what you get for deleting stuff. Don't do that. - if (method === "DELETE") { - p = p.slice(0, p.indexOf("-rev")) - p = p.join("/").replace(/:/g, "_") - caches.push(path.join(this.conf.get('cache'), p)) - } - - asyncMap(caches, rm, function () {}) + rm(this.cacheFile(where), function() {}) } return cb(er, parsed, data, response) }.bind(this) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/star.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/star.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/star.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/star.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,13 +1,16 @@ module.exports = star -function star (package, starred, cb) { - if (!this.conf.get('username')) return cb(new Error( - "Must be logged in to star/unstar packages")) +function star (uri, starred, cb) { + var c = this.conf.getCredentialsByURI(uri) + if (c.token) { + return cb(new Error("This operation is unsupported for token-based auth")) + } + else if (!c.auth) { + return cb(new Error("Must be logged in to star/unstar packages")) + } - var users = {} - - this.request("GET", package + '?write=true', function (er, fullData) { + this.request("GET", uri + "?write=true", null, function (er, fullData) { if (er) return cb(er) fullData = { _id: fullData._id @@ -16,14 +19,14 @@ if (starred) { this.log.info("starring", fullData._id) - fullData.users[this.conf.get('username')] = true + fullData.users[c.username] = true this.log.verbose("starring", fullData) } else { - delete fullData.users[this.conf.get('username')] + delete fullData.users[c.username] this.log.info("unstarring", fullData._id) this.log.verbose("unstarring", fullData) } - return this.request("PUT", package, fullData, cb) + return this.request("PUT", uri, { body : fullData }, cb) }.bind(this)) } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/stars.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/stars.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/stars.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/stars.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,9 +1,9 @@ -var qs = require('querystring') +var url = require("url") module.exports = stars -function stars (name, cb) { +function stars (base, name, cb) { name = encodeURIComponent(name) var path = "/-/_view/starredByUser?key=\""+name+"\"" - this.request("GET", path, cb) + this.request("GET", url.resolve(base, path), null, cb) } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/tag.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/tag.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/tag.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/tag.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,5 @@ - module.exports = tag -function tag (project, version, tag, cb) { - this.request("PUT", project+"/"+tag, JSON.stringify(version), cb) +function tag (uri, version, tagName, cb) { + this.request("PUT", uri+"/"+tagName, { body : JSON.stringify(version) }, cb) } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/unpublish.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/unpublish.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/unpublish.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/unpublish.js 2015-01-20 21:22:17.000000000 +0000 @@ -11,28 +11,28 @@ , url = require("url") , chain = require("slide").chain -function unpublish (name, ver, cb) { +function unpublish (uri, ver, cb) { if (typeof cb !== "function") cb = ver, ver = null - var u = name + '?write=true' - this.get(u, null, -1, true, function (er, data) { + this.get(uri + "?write=true", { timeout : -1, follow : false }, function (er, data) { if (er) { - this.log.info("unpublish", name+" not published") + this.log.info("unpublish", uri+" not published") return cb() } // remove all if no version specified if (!ver) { this.log.info("unpublish", "No version specified, removing all") - return this.request("DELETE", name+'/-rev/'+data._rev, cb) + return this.request("DELETE", uri+"/-rev/"+data._rev, null, cb) } var versions = data.versions || {} , versionPublic = versions.hasOwnProperty(ver) + var dist if (!versionPublic) { - this.log.info("unpublish", name+"@"+ver+" not published") + this.log.info("unpublish", uri+"@"+ver+" not published") } else { - var dist = versions[ver].dist + dist = versions[ver].dist this.log.verbose("unpublish", "removing attachments", dist) } @@ -40,7 +40,7 @@ // if it was the only version, then delete the whole package. if (!Object.keys(versions).length) { this.log.info("unpublish", "No versions remain, removing entire package") - return this.request("DELETE", name+"/-rev/"+data._rev, cb) + return this.request("DELETE", uri + "/-rev/" + data._rev, null, cb) } if (!versionPublic) return cb() @@ -58,8 +58,9 @@ var rev = data._rev delete data._revisions delete data._attachments - var cb_ = detacher.call(this, data, dist, cb) - this.request("PUT", name+"/-rev/"+rev, data, function (er) { + var cb_ = detacher.call(this, uri, data, dist, cb) + + this.request("PUT", uri + "/-rev/" + rev, { body : data }, function (er) { if (er) { this.log.error("unpublish", "Failed to update data") } @@ -68,20 +69,20 @@ }.bind(this)) } -function detacher (data, dist, cb) { +function detacher (uri, data, dist, cb) { return function (er) { if (er) return cb(er) - this.get(data.name, function (er, data) { + this.get(escape(uri, data.name), null, function (er, data) { if (er) return cb(er) var tb = url.parse(dist.tarball) - detach.call(this, data, tb.pathname, data._rev, function (er) { + detach.call(this, uri, data, tb.pathname, data._rev, function (er) { if (er || !dist.bin) return cb(er) chain(Object.keys(dist.bin).map(function (bt) { return function (cb) { var d = dist.bin[bt] - detach.call(this, data, url.parse(d.tarball).pathname, null, cb) + detach.call(this, uri, data, url.parse(d.tarball).pathname, null, cb) }.bind(this) }, this), cb) }.bind(this)) @@ -89,16 +90,21 @@ }.bind(this) } -function detach (data, path, rev, cb) { +function detach (uri, data, path, rev, cb) { if (rev) { path += "/-rev/" + rev this.log.info("detach", path) - return this.request("DELETE", path, cb) + return this.request("DELETE", url.resolve(uri, path), null, cb) } - this.get(data.name, function (er, data) { + this.get(escape(uri, data.name), null, function (er, data) { rev = data._rev if (!rev) return cb(new Error( "No _rev found in "+data._id)) detach.call(this, data, path, rev, cb) }.bind(this)) } + +function escape (base, name) { + var escaped = name.replace(/\//, "%2f") + return url.resolve(base, escaped) +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/upload.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/upload.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/upload.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/upload.js 2015-01-20 21:22:17.000000000 +0000 @@ -3,12 +3,12 @@ var fs = require('fs') , Stream = require("stream").Stream -function upload (where, file, etag, nofollow, cb) { +function upload (uri, file, etag, nofollow, cb) { if (typeof nofollow === "function") cb = nofollow, nofollow = false if (typeof etag === "function") cb = etag, etag = null if (file instanceof Stream) { - return this.request("PUT", where, file, etag, nofollow, cb) + return this.request("PUT", uri, { body : file, etag : etag, follow : !nofollow }, cb) } fs.stat(file, function (er, stat) { @@ -17,6 +17,6 @@ s.size = stat.size s.on("error", cb) - this.request("PUT", where, s, etag, nofollow, cb) + this.request("PUT", uri, { body : s, etag : etag, follow : !nofollow }, cb) }.bind(this)) } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/util/nerf-dart.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/util/nerf-dart.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/util/nerf-dart.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/util/nerf-dart.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +var url = require("url") + +module.exports = toNerfDart + +/** + * Maps a URL to an identifier. + * + * Name courtesy schiffertronix media LLC, a New Jersey corporation + * + * @param {String} uri The URL to be nerfed. + * + * @returns {String} A nerfed URL. + */ +function toNerfDart(uri) { + var parsed = url.parse(uri) + parsed.pathname = "/" + delete parsed.protocol + delete parsed.auth + + return url.format(parsed) +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/whoami.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/whoami.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/lib/whoami.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/lib/whoami.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +module.exports = whoami + +var url = require("url") + +function whoami (uri, cb) { + if (!this.conf.getCredentialsByURI(uri)) { + return cb(new Error("Must be logged in to see who you are")) + } + + this.request("GET", url.resolve(uri, "whoami"), null, function (er, userdata) { + if (er) return cb(er) + + cb(null, userdata.username) + }) +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/LICENSE nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -1,27 +1,15 @@ -Copyright (c) Isaac Z. Schlueter ("Author") -All rights reserved. +The ISC License -The BSD License +Copyright (c) Isaac Z. Schlueter and Contributors -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/.npmignore nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/.npmignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -1,3 +1,5 @@ test/fixtures/cache node_modules npm-debug.log +.eslintrc +.jshintrc diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/package.json nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/package.json --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "npm-registry-client", "description": "Client for the npm registry", - "version": "0.4.8", + "version": "3.2.4", "repository": { "url": "git://github.com/isaacs/npm-registry-client" }, @@ -15,30 +15,36 @@ "test": "tap test/*.js" }, "dependencies": { - "request": "2 >=2.25.0", - "graceful-fs": "~2.0.0", - "semver": "^2.2.1", - "slide": "~1.1.3", "chownr": "0", - "mkdirp": "~0.3.3", - "rimraf": "~2", - "retry": "0.6.0", + "graceful-fs": "^3.0.0", + "mkdirp": "^0.5.0", + "normalize-package-data": "~1.0.1", + "npm-cache-filename": "^1.0.0", + "once": "^1.3.0", + "request": "2 >=2.25.0", + "retry": "^0.6.1", + "rimraf": "2", + "semver": "2 >=2.2.1 || 3.x || 4", + "slide": "^1.1.3", "npmlog": "" }, "devDependencies": { + "concat-stream": "^1.4.6", + "npmconf": "^2.1.0", "tap": "" }, "optionalDependencies": { "npmlog": "" }, - "license": "BSD", - "readme": "# npm-registry-client\n\nThe code that npm uses to talk to the registry.\n\nIt handles all the caching and HTTP calls.\n\n## Usage\n\n```javascript\nvar RegClient = require('npm-registry-client')\nvar client = new RegClient(config)\n\nclient.get(\"npm\", \"latest\", 1000, function (er, data, raw, res) {\n // error is an error if there was a problem.\n // data is the parsed data object\n // raw is the json string\n // res is the response from couch\n})\n```\n\n# Configuration\n\nThis program is designed to work with\n[npmconf](https://npmjs.org/package/npmconf), but you can also pass in\na plain-jane object with the appropriate configs, and it'll shim it\nfor you. Any configuration thingie that has get/set/del methods will\nalso be accepted.\n\n* `registry` **Required** {String} URL to the registry\n* `cache` **Required** {String} Path to the cache folder\n* `always-auth` {Boolean} Auth even for GET requests.\n* `auth` {String} A base64-encoded `username:password`\n* `email` {String} User's email address\n* `tag` {String} The default tag to use when publishing new packages.\n Default = `\"latest\"`\n* `ca` {String} Cerficate signing authority certificates to trust.\n* `cert` {String} Client certificate (PEM encoded). Enable access\n to servers that require client certificates\n* `key` {String} Private key (PEM encoded) for client certificate 'cert'\n* `strict-ssl` {Boolean} Whether or not to be strict with SSL\n certificates. Default = `true`\n* `user-agent` {String} User agent header to send. Default =\n `\"node/{process.version} {process.platform} {process.arch}\"`\n* `log` {Object} The logger to use. Defaults to `require(\"npmlog\")` if\n that works, otherwise logs are disabled.\n* `fetch-retries` {Number} Number of times to retry on GET failures.\n Default=2\n* `fetch-retry-factor` {Number} `factor` setting for `node-retry`. Default=10\n* `fetch-retry-mintimeout` {Number} `minTimeout` setting for `node-retry`.\n Default=10000 (10 seconds)\n* `fetch-retry-maxtimeout` {Number} `maxTimeout` setting for `node-retry`.\n Default=60000 (60 seconds)\n* `proxy` {URL} The url to proxy requests through.\n* `https-proxy` {URL} The url to proxy https requests through.\n Defaults to be the same as `proxy` if unset.\n* `_auth` {String} The base64-encoded authorization header.\n* `username` `_password` {String} Username/password to use to generate\n `_auth` if not supplied.\n* `_token` {Object} A token for use with\n [couch-login](https://npmjs.org/package/couch-login)\n\n# client.request(method, where, [what], [etag], [nofollow], cb)\n\n* `method` {String} HTTP method\n* `where` {String} Path to request on the server\n* `what` {Stream | Buffer | String | Object} The request body. Objects\n that are not Buffers or Streams are encoded as JSON.\n* `etag` {String} The cached ETag\n* `nofollow` {Boolean} Prevent following 302/301 responses\n* `cb` {Function}\n * `error` {Error | null}\n * `data` {Object} the parsed data object\n * `raw` {String} the json\n * `res` {Response Object} response from couch\n\nMake a request to the registry. All the other methods are wrappers\naround this. one.\n\n# client.adduser(username, password, email, cb)\n\n* `username` {String}\n* `password` {String}\n* `email` {String}\n* `cb` {Function}\n\nAdd a user account to the registry, or verify the credentials.\n\n# client.deprecate(name, version, message, cb)\n\n* `name` {String} The package name\n* `version` {String} Semver version range\n* `message` {String} The message to use as a deprecation warning\n* `cb` {Function}\n\nDeprecate a version of a package in the registry.\n\n# client.bugs(name, cb)\n\n* `name` {String} the name of the package\n* `cb` {Function}\n\nGet the url for bugs of a package\n\n# client.get(url, [timeout], [nofollow], [staleOk], cb)\n\n* `url` {String} The url path to fetch\n* `timeout` {Number} Number of seconds old that a cached copy must be\n before a new request will be made.\n* `nofollow` {Boolean} Do not follow 301/302 responses\n* `staleOk` {Boolean} If there's cached data available, then return that\n to the callback quickly, and update the cache the background.\n\nFetches data from the registry via a GET request, saving it in\nthe cache folder with the ETag.\n\n# client.publish(data, tarball, [readme], cb)\n\n* `data` {Object} Package data\n* `tarball` {String | Stream} Filename or stream of the package tarball\n* `readme` {String} Contents of the README markdown file\n* `cb` {Function}\n\nPublish a package to the registry.\n\nNote that this does not create the tarball from a folder. However, it\ncan accept a gzipped tar stream or a filename to a tarball.\n\n# client.star(package, starred, cb)\n\n* `package` {String} Name of the package to star\n* `starred` {Boolean} True to star the package, false to unstar it.\n* `cb` {Function}\n\nStar or unstar a package.\n\nNote that the user does not have to be the package owner to star or\nunstar a package, though other writes do require that the user be the\npackage owner.\n\n# client.stars(username, cb)\n\n* `username` {String} Name of user to fetch starred packages for.\n* `cb` {Function}\n\nView your own or another user's starred packages.\n\n# client.tag(project, version, tag, cb)\n\n* `project` {String} Project name\n* `version` {String} Version to tag\n* `tag` {String} Tag name to apply\n* `cb` {Function}\n\nMark a version in the `dist-tags` hash, so that `pkg@tag`\nwill fetch the specified version.\n\n# client.unpublish(name, [ver], cb)\n\n* `name` {String} package name\n* `ver` {String} version to unpublish. Leave blank to unpublish all\n versions.\n* `cb` {Function}\n\nRemove a version of a package (or all versions) from the registry. When\nthe last version us unpublished, the entire document is removed from the\ndatabase.\n\n# client.upload(where, file, [etag], [nofollow], cb)\n\n* `where` {String} URL path to upload to\n* `file` {String | Stream} Either the filename or a readable stream\n* `etag` {String} Cache ETag\n* `nofollow` {Boolean} Do not follow 301/302 responses\n* `cb` {Function}\n\nUpload an attachment. Mostly used by `client.publish()`.\n", + "license": "ISC", + "readme": "# npm-registry-client\n\nThe code that npm uses to talk to the registry.\n\nIt handles all the caching and HTTP calls.\n\n## Usage\n\n```javascript\nvar RegClient = require('npm-registry-client')\nvar client = new RegClient(config)\nvar uri = \"npm://registry.npmjs.org/npm\"\nvar options = {timeout: 1000}\n\nclient.get(uri, options, function (error, data, raw, res) {\n // error is an error if there was a problem.\n // data is the parsed data object\n // raw is the json string\n // res is the response from couch\n})\n```\n\n# Registry URLs\n\nThe registry calls take either a full URL pointing to a resource in the\nregistry, or a base URL for the registry as a whole (for the base URL, any path\nwill be ignored). In addition to `http` and `https`, `npm` URLs are allowed.\n`npm` URLs are `https` URLs with the additional restrictions that they will\nalways include authorization credentials, and the response is always registry\nmetadata (and not tarballs or other attachments).\n\n# Configuration\n\nThis program is designed to work with\n[npmconf](https://npmjs.org/package/npmconf), but you can also pass in\na plain-jane object with the appropriate configs, and it'll shim it\nfor you. Any configuration thingie that has get/set/del methods will\nalso be accepted.\n\n* `cache` **Required** {String} Path to the cache folder\n* `always-auth` {Boolean} Auth even for GET requests.\n* `auth` {String} A base64-encoded `username:password`\n* `email` {String} User's email address\n* `tag` {String} The default tag to use when publishing new packages.\n Default = `\"latest\"`\n* `ca` {String} Cerficate signing authority certificates to trust.\n* `cert` {String} Client certificate (PEM encoded). Enable access\n to servers that require client certificates\n* `key` {String} Private key (PEM encoded) for client certificate 'cert'\n* `strict-ssl` {Boolean} Whether or not to be strict with SSL\n certificates. Default = `true`\n* `user-agent` {String} User agent header to send. Default =\n `\"node/{process.version} {process.platform} {process.arch}\"`\n* `log` {Object} The logger to use. Defaults to `require(\"npmlog\")` if\n that works, otherwise logs are disabled.\n* `fetch-retries` {Number} Number of times to retry on GET failures.\n Default=2\n* `fetch-retry-factor` {Number} `factor` setting for `node-retry`. Default=10\n* `fetch-retry-mintimeout` {Number} `minTimeout` setting for `node-retry`.\n Default=10000 (10 seconds)\n* `fetch-retry-maxtimeout` {Number} `maxTimeout` setting for `node-retry`.\n Default=60000 (60 seconds)\n* `proxy` {URL} The url to proxy requests through.\n* `https-proxy` {URL} The url to proxy https requests through.\n Defaults to be the same as `proxy` if unset.\n* `_auth` {String} The base64-encoded authorization header.\n* `username` `_password` {String} Username/password to use to generate\n `_auth` if not supplied.\n* `_token` {Object} A token for use with\n [couch-login](https://npmjs.org/package/couch-login)\n\n# client.request(method, uri, options, cb)\n\n* `method` {String} HTTP method\n* `uri` {String} URI pointing to the resource to request\n* `options` {Object} Object containing optional per-request properties.\n * `what` {Stream | Buffer | String | Object} The request body. Objects\n that are not Buffers or Streams are encoded as JSON.\n * `etag` {String} The cached ETag\n * `follow` {Boolean} Follow 302/301 responses (defaults to true)\n* `cb` {Function}\n * `error` {Error | null}\n * `data` {Object} the parsed data object\n * `raw` {String} the json\n * `res` {Response Object} response from couch\n\nMake a request to the registry. All the other methods are wrappers around\n`request`.\n\n# client.adduser(base, username, password, email, cb)\n\n* `base` {String} Base registry URL\n* `username` {String}\n* `password` {String}\n* `email` {String}\n* `cb` {Function}\n\nAdd a user account to the registry, or verify the credentials.\n\n# client.deprecate(uri, version, message, cb)\n\n* `uri` {String} Full registry URI for the deprecated package\n* `version` {String} Semver version range\n* `message` {String} The message to use as a deprecation warning\n* `cb` {Function}\n\nDeprecate a version of a package in the registry.\n\n# client.bugs(uri, cb)\n\n* `uri` {String} Full registry URI for the package\n* `cb` {Function}\n\nGet the url for bugs of a package\n\n# client.get(uri, options, cb)\n\n* `uri` {String} The complete registry URI to fetch\n* `options` {Object} Object containing optional per-request properties.\n * `timeout` {Number} Duration before the request times out.\n * `follow` {Boolean} Follow 302/301 responses (defaults to true)\n * `staleOk` {Boolean} If there's cached data available, then return that\n to the callback quickly, and update the cache the background.\n\nFetches data from the registry via a GET request, saving it in the cache folder\nwith the ETag.\n\n# client.publish(uri, data, tarball, cb)\n\n* `uri` {String} The registry URI to publish to\n* `data` {Object} Package data\n* `tarball` {String | Stream} Filename or stream of the package tarball\n* `cb` {Function}\n\nPublish a package to the registry.\n\nNote that this does not create the tarball from a folder. However, it can\naccept a gzipped tar stream or a filename to a tarball.\n\n# client.star(uri, starred, cb)\n\n* `uri` {String} The complete registry URI to star\n* `starred` {Boolean} True to star the package, false to unstar it.\n* `cb` {Function}\n\nStar or unstar a package.\n\nNote that the user does not have to be the package owner to star or unstar a\npackage, though other writes do require that the user be the package owner.\n\n# client.stars(base, username, cb)\n\n* `base` {String} The base URL for the registry\n* `username` {String} Name of user to fetch starred packages for.\n* `cb` {Function}\n\nView your own or another user's starred packages.\n\n# client.tag(uri, version, tag, cb)\n\n* `uri` {String} The complete registry URI to tag\n* `version` {String} Version to tag\n* `tag` {String} Tag name to apply\n* `cb` {Function}\n\nMark a version in the `dist-tags` hash, so that `pkg@tag` will fetch the\nspecified version.\n\n# client.unpublish(uri, [ver], cb)\n\n* `uri` {String} The complete registry URI to unpublish\n* `ver` {String} version to unpublish. Leave blank to unpublish all\n versions.\n* `cb` {Function}\n\nRemove a version of a package (or all versions) from the registry. When the\nlast version us unpublished, the entire document is removed from the database.\n\n# client.upload(uri, file, [etag], [nofollow], cb)\n\n* `uri` {String} The complete registry URI to upload to\n* `file` {String | Stream} Either the filename or a readable stream\n* `etag` {String} Cache ETag\n* `nofollow` {Boolean} Do not follow 301/302 responses\n* `cb` {Function}\n\nUpload an attachment. Mostly used by `client.publish()`.\n", "readmeFilename": "README.md", + "gitHead": "ddafd4913bdca30a1f9111660767f71653604b57", "bugs": { "url": "https://github.com/isaacs/npm-registry-client/issues" }, "homepage": "https://github.com/isaacs/npm-registry-client", - "_id": "npm-registry-client@0.4.8", - "_shasum": "a6685a161033101be6064b7af887ab440e8695d0", - "_from": "npm-registry-client@~0.4.7" + "_id": "npm-registry-client@3.2.4", + "_shasum": "8659b3449e1c9a9f8181dad142cadb048bfe521f", + "_from": "npm-registry-client@>=3.2.4 <3.3.0" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/README.md nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/README.md --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -9,8 +9,10 @@ ```javascript var RegClient = require('npm-registry-client') var client = new RegClient(config) +var uri = "npm://registry.npmjs.org/npm" +var options = {timeout: 1000} -client.get("npm", "latest", 1000, function (er, data, raw, res) { +client.get(uri, options, function (error, data, raw, res) { // error is an error if there was a problem. // data is the parsed data object // raw is the json string @@ -18,6 +20,15 @@ }) ``` +# Registry URLs + +The registry calls take either a full URL pointing to a resource in the +registry, or a base URL for the registry as a whole (for the base URL, any path +will be ignored). In addition to `http` and `https`, `npm` URLs are allowed. +`npm` URLs are `https` URLs with the additional restrictions that they will +always include authorization credentials, and the response is always registry +metadata (and not tarballs or other attachments). + # Configuration This program is designed to work with @@ -26,7 +37,6 @@ for you. Any configuration thingie that has get/set/del methods will also be accepted. -* `registry` **Required** {String} URL to the registry * `cache` **Required** {String} Path to the cache folder * `always-auth` {Boolean} Auth even for GET requests. * `auth` {String} A base64-encoded `username:password` @@ -59,25 +69,27 @@ * `_token` {Object} A token for use with [couch-login](https://npmjs.org/package/couch-login) -# client.request(method, where, [what], [etag], [nofollow], cb) +# client.request(method, uri, options, cb) * `method` {String} HTTP method -* `where` {String} Path to request on the server -* `what` {Stream | Buffer | String | Object} The request body. Objects - that are not Buffers or Streams are encoded as JSON. -* `etag` {String} The cached ETag -* `nofollow` {Boolean} Prevent following 302/301 responses +* `uri` {String} URI pointing to the resource to request +* `options` {Object} Object containing optional per-request properties. + * `what` {Stream | Buffer | String | Object} The request body. Objects + that are not Buffers or Streams are encoded as JSON. + * `etag` {String} The cached ETag + * `follow` {Boolean} Follow 302/301 responses (defaults to true) * `cb` {Function} * `error` {Error | null} * `data` {Object} the parsed data object * `raw` {String} the json * `res` {Response Object} response from couch -Make a request to the registry. All the other methods are wrappers -around this. one. +Make a request to the registry. All the other methods are wrappers around +`request`. -# client.adduser(username, password, email, cb) +# client.adduser(base, username, password, email, cb) +* `base` {String} Base registry URL * `username` {String} * `password` {String} * `email` {String} @@ -85,89 +97,88 @@ Add a user account to the registry, or verify the credentials. -# client.deprecate(name, version, message, cb) +# client.deprecate(uri, version, message, cb) -* `name` {String} The package name +* `uri` {String} Full registry URI for the deprecated package * `version` {String} Semver version range * `message` {String} The message to use as a deprecation warning * `cb` {Function} Deprecate a version of a package in the registry. -# client.bugs(name, cb) +# client.bugs(uri, cb) -* `name` {String} the name of the package +* `uri` {String} Full registry URI for the package * `cb` {Function} Get the url for bugs of a package -# client.get(url, [timeout], [nofollow], [staleOk], cb) +# client.get(uri, options, cb) -* `url` {String} The url path to fetch -* `timeout` {Number} Number of seconds old that a cached copy must be - before a new request will be made. -* `nofollow` {Boolean} Do not follow 301/302 responses -* `staleOk` {Boolean} If there's cached data available, then return that - to the callback quickly, and update the cache the background. +* `uri` {String} The complete registry URI to fetch +* `options` {Object} Object containing optional per-request properties. + * `timeout` {Number} Duration before the request times out. + * `follow` {Boolean} Follow 302/301 responses (defaults to true) + * `staleOk` {Boolean} If there's cached data available, then return that + to the callback quickly, and update the cache the background. -Fetches data from the registry via a GET request, saving it in -the cache folder with the ETag. +Fetches data from the registry via a GET request, saving it in the cache folder +with the ETag. -# client.publish(data, tarball, [readme], cb) +# client.publish(uri, data, tarball, cb) +* `uri` {String} The registry URI to publish to * `data` {Object} Package data * `tarball` {String | Stream} Filename or stream of the package tarball -* `readme` {String} Contents of the README markdown file * `cb` {Function} Publish a package to the registry. -Note that this does not create the tarball from a folder. However, it -can accept a gzipped tar stream or a filename to a tarball. +Note that this does not create the tarball from a folder. However, it can +accept a gzipped tar stream or a filename to a tarball. -# client.star(package, starred, cb) +# client.star(uri, starred, cb) -* `package` {String} Name of the package to star +* `uri` {String} The complete registry URI to star * `starred` {Boolean} True to star the package, false to unstar it. * `cb` {Function} Star or unstar a package. -Note that the user does not have to be the package owner to star or -unstar a package, though other writes do require that the user be the -package owner. +Note that the user does not have to be the package owner to star or unstar a +package, though other writes do require that the user be the package owner. -# client.stars(username, cb) +# client.stars(base, username, cb) +* `base` {String} The base URL for the registry * `username` {String} Name of user to fetch starred packages for. * `cb` {Function} View your own or another user's starred packages. -# client.tag(project, version, tag, cb) +# client.tag(uri, version, tag, cb) -* `project` {String} Project name +* `uri` {String} The complete registry URI to tag * `version` {String} Version to tag * `tag` {String} Tag name to apply * `cb` {Function} -Mark a version in the `dist-tags` hash, so that `pkg@tag` -will fetch the specified version. +Mark a version in the `dist-tags` hash, so that `pkg@tag` will fetch the +specified version. -# client.unpublish(name, [ver], cb) +# client.unpublish(uri, [ver], cb) -* `name` {String} package name +* `uri` {String} The complete registry URI to unpublish * `ver` {String} version to unpublish. Leave blank to unpublish all versions. * `cb` {Function} -Remove a version of a package (or all versions) from the registry. When -the last version us unpublished, the entire document is removed from the -database. +Remove a version of a package (or all versions) from the registry. When the +last version us unpublished, the entire document is removed from the database. -# client.upload(where, file, [etag], [nofollow], cb) +# client.upload(uri, file, [etag], [nofollow], cb) -* `where` {String} URL path to upload to +* `uri` {String} The complete registry URI to upload to * `file` {String | Stream} Either the filename or a readable stream * `etag` {String} Cache ETag * `nofollow` {Boolean} Do not follow 301/302 responses diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/00-setup.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/00-setup.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/00-setup.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/00-setup.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,10 @@ -var tap = require('tap') -var rimraf = require('rimraf') +var tap = require("tap") +var rimraf = require("rimraf") -tap.test('setup', function (t) { - rimraf(__dirname + '/fixtures/cache', function (er) { +tap.test("setup", function (t) { + rimraf(__dirname + "/fixtures/cache", function (er) { if (er) throw er - t.pass('cache cleaned') + t.pass("cache cleaned") t.end() }) }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/adduser-new.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/adduser-new.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/adduser-new.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/adduser-new.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,26 +1,21 @@ -var tap = require('tap') -var server = require('./fixtures/server.js') -var RC = require('../') -var client = new RC({ - cache: __dirname + '/fixtures/cache' - , registry: 'http://localhost:' + server.port }) - -var userdata = -{ name: 'username', - email: 'i@izs.me', - _id: 'org.couchdb.user:username', - type: 'user', - roles: [], - date: '2012-06-07T04:11:21.591Z' } -, password = "password" +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") +var client = common.freshClient() + +var password = "%1234@asdf%" , username = "username" -, crypto = require("crypto") -, SD = require('string_decoder').StringDecoder -, decoder = new SD - -function sha (s) { - return crypto.createHash("sha1").update(s).digest("hex") -} +, email = "i@izs.me" +, userdata = { + name: username, + email: email, + _id: "org.couchdb.user:username", + type: "user", + roles: [], + date: "2012-06-07T04:11:21.591Z" } +, SD = require("string_decoder").StringDecoder +, decoder = new SD() tap.test("create new user account", function (t) { server.expect("/-/user/org.couchdb.user:username", function (req, res) { @@ -41,7 +36,7 @@ }) }) - client.adduser(username, password, "i@izs.me", function (er, data, raw, res) { + client.adduser("http://localhost:1337/", username, password, email, function (er, data) { if (er) throw er t.deepEqual(data, { created: true }) t.end() diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/adduser-update.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/adduser-update.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/adduser-update.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/adduser-update.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,22 @@ -var tap = require('tap') -var server = require('./fixtures/server.js') -var RC = require('../') -var client = new RC({ - cache: __dirname + '/fixtures/cache' - , registry: 'http://localhost:' + server.port }) - -var userdata = -{ name: 'username', - email: 'i@izs.me', - _id: 'org.couchdb.user:username', - type: 'user', - roles: [], - _rev: "1-15aac515ac515aac515aac515aac5125" -} +var tap = require("tap") -, password = "password" -, username = "username" -, crypto = require("crypto") -, SD = require('string_decoder').StringDecoder -, decoder = new SD +var server = require("./lib/server.js") +var common = require("./lib/common.js") +var client = common.freshClient() +var password = "%1234@asdf%" +, username = "username" +, email = "i@izs.me" +, userdata = { + name: username, + email: email, + _id: "org.couchdb.user:username", + type: "user", + roles: [], + date: "2012-06-07T04:11:21.591Z" } +, SD = require("string_decoder").StringDecoder +, decoder = new SD() -function sha (s) { - return crypto.createHash("sha1").update(s).digest("hex") -} tap.test("update a user acct", function (t) { server.expect("PUT", "/-/user/org.couchdb.user:username", function (req, res) { @@ -56,9 +49,7 @@ }) }) - - - client.adduser(username, password, "i@izs.me", function (er, data, raw, res) { + client.adduser("http://localhost:1337/", username, password, email, function (er, data) { if (er) throw er t.deepEqual(data, { created: true }) t.end() diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/basic.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/basic.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/basic.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -var tap = require('tap') -var server = require('./fixtures/server.js') -var RC = require('../') -var rimraf = require("rimraf") -var client = new RC({ - cache: __dirname + '/fixtures/cache' - , registry: 'http://localhost:' + server.port }) -var us = require('./fixtures/underscore/1.3.3/cache.json') -var usroot = require("./fixtures/underscore/cache.json") - -tap.test("basic request", function (t) { - server.expect("/underscore/1.3.3", function (req, res) { - console.error('got a request') - res.json(us) - }) - - server.expect("/underscore", function (req, res) { - console.error('got a request') - res.json(usroot) - }) - - t.plan(2) - client.get("/underscore/1.3.3", function (er, data, raw, res) { - console.error("got response") - t.deepEqual(data, us) - }) - - client.get("/underscore", function (er, data, raw, res) { - console.error("got response") - t.deepEqual(data, usroot) - }) -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/bugs.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/bugs.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/bugs.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/bugs.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,28 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") +var client = common.freshClient() + +tap.test("get the URL for the bugs page on a package", function (t) { + server.expect("GET", "/sample/latest", function (req, res) { + t.equal(req.method, "GET") + + res.json({ + bugs : { + url : "http://github.com/example/sample/issues", + email : "sample@example.com" + } + }) + }) + + client.bugs("http://localhost:1337/sample", function (error, info) { + t.ifError(error) + + t.ok(info.url, "got the URL") + t.ok(info.email, "got the email address") + + t.end() + }) +}) + diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/deprecate.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/deprecate.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/deprecate.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/deprecate.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,66 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "_authToken"] = "not-bad-meaning-bad-but-bad-meaning-wombat" + +var client = common.freshClient(configuration) + +var cache = require("./fixtures/underscore/cache.json") + +var VERSION = "1.3.2" +var MESSAGE = "uhhh" + +tap.test("deprecate a package", function (t) { + server.expect("GET", "/underscore?write=true", function (req, res) { + t.equal(req.method, "GET") + + res.json(cache) + }) + + server.expect("PUT", "/underscore", function (req, res) { + t.equal(req.method, "PUT") + + var b = "" + req.setEncoding("utf8") + req.on("data", function (d) { + b += d + }) + + req.on("end", function () { + var updated = JSON.parse(b) + + var undeprecated = [ + "1.0.3", "1.0.4", "1.1.0", "1.1.1", "1.1.2", "1.1.3", "1.1.4", "1.1.5", "1.1.6", + "1.1.7", "1.2.0", "1.2.1", "1.2.2", "1.2.3", "1.2.4", "1.3.0", "1.3.1", "1.3.3" + ] + for (var i = 0; i < undeprecated.length; i++) { + var current = undeprecated[i] + t.notEqual( + updated.versions[current].deprecated, + MESSAGE, + current + " not deprecated" + ) + } + + t.equal( + updated.versions[VERSION].deprecated, + MESSAGE, + VERSION + " deprecated" + ) + res.statusCode = 201 + res.json({deprecated:true}) + }) + }) + + client.deprecate(common.registry + "/underscore", VERSION, MESSAGE, function (er, data) { + t.ifError(er) + t.ok(data.deprecated, "was deprecated") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-404.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-404.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-404.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-404.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ +var resolve = require("path").resolve +var createReadStream = require("graceful-fs").createReadStream +var readFileSync = require("graceful-fs").readFileSync + +var tap = require("tap") +var cat = require("concat-stream") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var tgz = resolve(__dirname, "./fixtures/underscore/1.3.3/package.tgz") + +tap.test("basic fetch", function (t) { + server.expect("/underscore/-/underscore-1.3.3.tgz", function (req, res) { + t.equal(req.method, "GET", "got expected method") + + res.writeHead(200, { + "content-type" : "application/x-tar", + "content-encoding" : "gzip" + }) + + createReadStream(tgz).pipe(res) + }) + + var client = common.freshClient() + client.fetch( + "http://localhost:1337/underscore/-/underscore-1.3.3.tgz", + null, + function (er, res) { + t.ifError(er, "loaded successfully") + + var sink = cat(function (data) { + t.deepEqual(data, readFileSync(tgz)) + t.end() + }) + + res.on("error", function (error) { + t.ifError(error, "no errors on stream") + }) + + res.pipe(sink) + } + ) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-408.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-408.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-408.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-408.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,52 @@ +var resolve = require("path").resolve +var createReadStream = require("graceful-fs").createReadStream +var readFileSync = require("graceful-fs").readFileSync + +var tap = require("tap") +var cat = require("concat-stream") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var tgz = resolve(__dirname, "./fixtures/underscore/1.3.3/package.tgz") + +tap.test("fetch with retry on timeout", function (t) { + server.expect("/underscore/-/underscore-1.3.3.tgz", function (req, res) { + t.equal(req.method, "GET", "got expected method") + + res.writeHead(408) + res.end() + }) + + server.expect("/underscore/-/underscore-1.3.3.tgz", function (req, res) { + t.equal(req.method, "GET", "got expected method") + + res.writeHead(200, { + "content-type" : "application/x-tar", + "content-encoding" : "gzip" + }) + + createReadStream(tgz).pipe(res) + }) + + var client = common.freshClient() + client.conf.set("fetch-retry-mintimeout", 100) + client.fetch( + "http://localhost:1337/underscore/-/underscore-1.3.3.tgz", + {}, + function (er, res) { + t.ifError(er, "loaded successfully") + + var sink = cat(function (data) { + t.deepEqual(data, readFileSync(tgz)) + t.end() + }) + + res.on("error", function (error) { + t.ifError(error, "no errors on stream") + }) + + res.pipe(sink) + } + ) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-503.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-503.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-503.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-503.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,52 @@ +var resolve = require("path").resolve +var createReadStream = require("graceful-fs").createReadStream +var readFileSync = require("graceful-fs").readFileSync + +var tap = require("tap") +var cat = require("concat-stream") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var tgz = resolve(__dirname, "./fixtures/underscore/1.3.3/package.tgz") + +tap.test("fetch with retry on server error", function (t) { + server.expect("/underscore/-/underscore-1.3.3.tgz", function (req, res) { + t.equal(req.method, "GET", "got expected method") + + res.writeHead(503) + res.end() + }) + + server.expect("/underscore/-/underscore-1.3.3.tgz", function (req, res) { + t.equal(req.method, "GET", "got expected method") + + res.writeHead(200, { + "content-type" : "application/x-tar", + "content-encoding" : "gzip" + }) + + createReadStream(tgz).pipe(res) + }) + + var client = common.freshClient() + client.conf.set("fetch-retry-mintimeout", 100) + client.fetch( + "http://localhost:1337/underscore/-/underscore-1.3.3.tgz", + {}, + function (er, res) { + t.ifError(er, "loaded successfully") + + var sink = cat(function (data) { + t.deepEqual(data, readFileSync(tgz)) + t.end() + }) + + res.on("error", function (error) { + t.ifError(error, "no errors on stream") + }) + + res.pipe(sink) + } + ) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-authed.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-authed.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-authed.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-authed.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,56 @@ +var resolve = require("path").resolve +var createReadStream = require("graceful-fs").createReadStream +var readFileSync = require("graceful-fs").readFileSync + +var tap = require("tap") +var cat = require("concat-stream") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var tgz = resolve(__dirname, "./fixtures/underscore/1.3.3/package.tgz") + +tap.test("basic fetch with scoped always-auth enabled", function (t) { + server.expect("/underscore/-/underscore-1.3.3.tgz", function (req, res) { + t.equal(req.method, "GET", "got expected method") + t.equal( + req.headers.authorization, + "Basic dXNlcm5hbWU6JTEyMzRAYXNkZiU=", + "got expected auth header" + ) + + res.writeHead(200, { + "content-type" : "application/x-tar", + "content-encoding" : "gzip" + }) + + createReadStream(tgz).pipe(res) + }) + + var nerfed = "//localhost:" + server.port + "/:" + var configuration = {} + configuration[nerfed + "username"] = "username" + configuration[nerfed + "_password"] = new Buffer("%1234@asdf%").toString("base64") + configuration[nerfed + "email"] = "i@izs.me" + configuration[nerfed + "always-auth"] = true + + var client = common.freshClient(configuration) + client.fetch( + "http://localhost:1337/underscore/-/underscore-1.3.3.tgz", + null, + function (er, res) { + t.ifError(er, "loaded successfully") + + var sink = cat(function (data) { + t.deepEqual(data, readFileSync(tgz)) + t.end() + }) + + res.on("error", function (error) { + t.ifError(error, "no errors on stream") + }) + + res.pipe(sink) + } + ) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-basic.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-basic.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ +var resolve = require("path").resolve +var createReadStream = require("graceful-fs").createReadStream +var readFileSync = require("graceful-fs").readFileSync + +var tap = require("tap") +var cat = require("concat-stream") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var tgz = resolve(__dirname, "./fixtures/underscore/1.3.3/package.tgz") + +tap.test("basic fetch", function (t) { + server.expect("/underscore/-/underscore-1.3.3.tgz", function (req, res) { + t.equal(req.method, "GET", "got expected method") + + res.writeHead(200, { + "content-type" : "application/x-tar", + "content-encoding" : "gzip" + }) + + createReadStream(tgz).pipe(res) + }) + + var client = common.freshClient() + client.fetch( + "http://localhost:1337/underscore/-/underscore-1.3.3.tgz", + null, + function (er, res) { + t.ifError(er, "loaded successfully") + + var sink = cat(function (data) { + t.deepEqual(data, readFileSync(tgz)) + t.end() + }) + + res.on("error", function (error) { + t.ifError(error, "no errors on stream") + }) + + res.pipe(sink) + } + ) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-not-authed.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-not-authed.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fetch-not-authed.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fetch-not-authed.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,52 @@ +var resolve = require("path").resolve +var createReadStream = require("graceful-fs").createReadStream +var readFileSync = require("graceful-fs").readFileSync + +var tap = require("tap") +var cat = require("concat-stream") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var tgz = resolve(__dirname, "./fixtures/underscore/1.3.3/package.tgz") + +tap.test("basic fetch with scoped always-auth disabled", function (t) { + server.expect("/underscore/-/underscore-1.3.3.tgz", function (req, res) { + t.equal(req.method, "GET", "got expected method") + t.notOk(req.headers.authorization, "received no auth header") + + res.writeHead(200, { + "content-type" : "application/x-tar", + "content-encoding" : "gzip" + }) + + createReadStream(tgz).pipe(res) + }) + + var nerfed = "//localhost:" + server.port + "/:" + var configuration = {} + configuration[nerfed + "username"] = "username" + configuration[nerfed + "_password"] = new Buffer("%1234@asdf%").toString("base64") + configuration[nerfed + "email"] = "i@izs.me" + configuration[nerfed + "always-auth"] = false + + var client = common.freshClient(configuration) + client.fetch( + "http://localhost:1337/underscore/-/underscore-1.3.3.tgz", + null, + function (er, res) { + t.ifError(er, "loaded successfully") + + var sink = cat(function (data) { + t.deepEqual(data, readFileSync(tgz)) + t.end() + }) + + res.on("error", function (error) { + t.ifError(error, "no errors on stream") + }) + + res.pipe(sink) + } + ) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fixtures/@npm/npm-registry-client/cache.json nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fixtures/@npm/npm-registry-client/cache.json --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fixtures/@npm/npm-registry-client/cache.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fixtures/@npm/npm-registry-client/cache.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +{"_id":"@npm%2fnpm-registry-client","_rev":"213-0a1049cf56172b7d9a1184742c6477b9","name":"@npm/npm-registry-client","description":"Client for the npm registry","dist-tags":{"latest":"2.0.4","v2.0":"2.0.3"},"versions":{"0.0.1":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.0.1","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"engines":{"node":"*"},"_npmUser":{"name":"isaacs","email":"i@izs.me"},"_id":"@npm%2fnpm-registry-client@0.0.1","_engineSupported":true,"_npmVersion":"1.1.24","_nodeVersion":"v0.7.10-pre","_defaultsLoaded":true,"dist":{"shasum":"693a08f6d2faea22bbd2bf412508a63d3e6229a7","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.1.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.0.2":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.0.2","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"engines":{"node":"*"},"_npmUser":{"name":"isaacs","email":"i@izs.me"},"_id":"@npm%2fnpm-registry-client@0.0.2","_engineSupported":true,"_npmVersion":"1.1.24","_nodeVersion":"v0.7.10-pre","_defaultsLoaded":true,"dist":{"shasum":"b48c0ec5563c6a6fdc253454fc56d2c60c5a26f4","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.2.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.0.3":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.0.3","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"engines":{"node":"*"},"_npmUser":{"name":"isaacs","email":"i@izs.me"},"_id":"@npm%2fnpm-registry-client@0.0.3","_engineSupported":true,"_npmVersion":"1.1.24","_nodeVersion":"v0.7.10-pre","_defaultsLoaded":true,"dist":{"shasum":"ccc0254c2d59e3ea9b9050e2b16edef78df1a1e8","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.3.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.0.4":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.0.4","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"engines":{"node":"*"},"_npmUser":{"name":"isaacs","email":"i@izs.me"},"_id":"@npm%2fnpm-registry-client@0.0.4","_engineSupported":true,"_npmVersion":"1.1.25","_nodeVersion":"v0.7.10-pre","_defaultsLoaded":true,"dist":{"shasum":"faabd25ef477521c74ac21e0f4cf3a2f66d18fb3","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.4.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.0.5":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.0.5","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"engines":{"node":"*"},"_id":"@npm%2fnpm-registry-client@0.0.5","dist":{"shasum":"85219810c9d89ae8d28ea766e7cf74efbd9f1e52","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.5.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.0.6":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"The code that npm uses to talk to the registry","version":"0.0.6","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"engines":{"node":"*"},"_id":"@npm%2fnpm-registry-client@0.0.6","dist":{"shasum":"cc6533b3b41df65e6e9db2601fbbf1a509a7e94c","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.6.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.0.7":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"The code that npm uses to talk to the registry","version":"0.0.7","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"engines":{"node":"*"},"_id":"@npm%2fnpm-registry-client@0.0.7","dist":{"shasum":"0cee1d1c61f1c8e483774fe1f7bbb81c4f394a3a","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.7.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.0.8":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.0.8","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.0.8","dist":{"shasum":"1b7411c3f7310ec2a96b055b00e7ca606e47bd07","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.8.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.0.9":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.0.9","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.0.9","dist":{"shasum":"6d5bfde431559ac9e2e52a7db85f5839b874f022","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.9.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.0.10":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.0.10","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.0.10","dist":{"shasum":"0c8b6a4615bce82aa6cc04a0d1f7dc89921f7a38","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.10.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.0.11":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.0.11","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.0.11","dist":{"shasum":"afab40be5bed1faa946d8e1827844698f2ec1db7","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.0.11.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.1.0":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.1.0","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.1.0","dist":{"shasum":"1077d6bbb5e432450239dc6622a59474953ffbea","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.1.0.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.1.1":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.1.1","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.1.1","dist":{"shasum":"759765361d09b715270f59cf50f10908e4e9c5fc","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.1.1.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.1.2":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.1.2","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.1.2","dist":{"shasum":"541ce93abb3d35f5c325545c718dd3bbeaaa9ff0","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.1.2.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.1.3":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.1.3","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.1.3","dist":{"shasum":"e9a40d7031e8f809af5fd85aa9aac979e17efc97","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.1.3.tgz"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.1.4":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.1.4","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.1.4","dist":{"shasum":"b211485b046191a1085362376530316f0cab0420","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.1.4.tgz"},"_npmVersion":"1.1.48","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.0":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.0","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.0","dist":{"shasum":"6508a4b4d96f31057d5200ca5779531bafd2b840","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.0.tgz"},"_npmVersion":"1.1.49","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.1":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.1","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"node-uuid":"~1.3.3","request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.1","dist":{"shasum":"1bc8c4576c368cd88253d8a52daf40c55b89bb1a","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.1.tgz"},"_npmVersion":"1.1.49","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.5":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.5","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.5","dist":{"shasum":"2f55d675dfb977403b1ad0d96874c1d30e8058d7","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.5.tgz"},"_npmVersion":"1.1.51","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.6":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.6","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.6","dist":{"shasum":"f05df6695360360ad220e6e13a6a7bace7165fbe","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.6.tgz"},"_npmVersion":"1.1.56","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.7":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.7","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.0.14","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.7","dist":{"shasum":"867bad8854cae82ed89ee3b7f1d391af59491671","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.7.tgz"},"_npmVersion":"1.1.59","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.8":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.8","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.6","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.8","dist":{"shasum":"ef194cdb70f1ea03a576cff2c97392fa96e36563","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.8.tgz"},"_npmVersion":"1.1.62","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.9":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.9","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.9","dist":{"shasum":"3cec10431dfed1594adaf99c50f482ee56ecf9e4","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.9.tgz"},"_npmVersion":"1.1.59","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.10":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.10","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2.0.1","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.10","dist":{"shasum":"1e69726dae0944e78562fd77243f839c6a2ced1e","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.10.tgz"},"_npmVersion":"1.1.64","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.11":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.11","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.11","dist":{"shasum":"d92f33c297eb1bbd57fd597c3d8f5f7e9340a0b5","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.11.tgz"},"_npmVersion":"1.1.70","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.12":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.12","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.1.8","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.12","dist":{"shasum":"3bfb6fc0e4b131d665580cd1481c341fe521bfd3","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.12.tgz"},"_from":".","_npmVersion":"1.2.2","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.13":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.13","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.13","dist":{"shasum":"e03f2a4340065511b7184a3e2862cd5d459ef027","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.13.tgz"},"_from":".","_npmVersion":"1.2.4","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.14":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.14","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.14","dist":{"shasum":"186874a7790417a340d582b1cd4a7c338087ee12","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.14.tgz"},"_from":".","_npmVersion":"1.2.10","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.15":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.15","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.15","dist":{"shasum":"f71f32b7185855f1f8b7a5ef49e49d2357c2c552","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.15.tgz"},"_from":".","_npmVersion":"1.2.10","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.16":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.16","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.16","dist":{"shasum":"3331323b5050fc5afdf77c3a35913c16f3e43964","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.16.tgz"},"_from":".","_npmVersion":"1.2.10","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.17":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.17","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.17","dist":{"shasum":"1df2bbecac6751f5d9600fb43722aef96d956773","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.17.tgz"},"_from":".","_npmVersion":"1.2.11","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.18":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.18","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.9.202","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.18","dist":{"shasum":"198c8d15ed9b1ed546faf6e431eb63a6b18193ad","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.18.tgz"},"_from":".","_npmVersion":"1.2.13","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.19":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.19","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.16","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.19","dist":{"shasum":"106da826f0d2007f6e081f2b68fb6f26fa951b20","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.19.tgz"},"_from":".","_npmVersion":"1.2.14","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.20":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.20","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.16","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","_id":"@npm%2fnpm-registry-client@0.2.20","dist":{"shasum":"3fff194331e26660be2cf8ebf45ddf7d36add5f6","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.20.tgz"},"_from":".","_npmVersion":"1.2.15","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.21":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.21","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.16","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"_id":"@npm%2fnpm-registry-client@0.2.21","dist":{"shasum":"d85dd32525f193925c46ff9eb0e0f529dfd1b254","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.21.tgz"},"_from":".","_npmVersion":"1.2.18","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.22":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.22","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"~2.20.0","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"_id":"@npm%2fnpm-registry-client@0.2.22","dist":{"shasum":"caa22ff40a1ccd632a660b8b80c333c8f92d5a17","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.22.tgz"},"_from":".","_npmVersion":"1.2.18","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.23":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.23","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.20.0","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"_id":"@npm%2fnpm-registry-client@0.2.23","dist":{"shasum":"a320ab2b1d048b4f7b88e40bd86974ca322b4c24","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.23.tgz"},"_from":".","_npmVersion":"1.2.19","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.24":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.24","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.20.0","graceful-fs":"~1.2.0","semver":"~1.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"_id":"@npm%2fnpm-registry-client@0.2.24","dist":{"shasum":"e12f644338619319ee7f233363a1714a87f3c72d","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.24.tgz"},"_from":".","_npmVersion":"1.2.22","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.25":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.25","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.20.0","graceful-fs":"~1.2.0","semver":"~2.0.5","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"_id":"@npm%2fnpm-registry-client@0.2.25","dist":{"shasum":"c2caeb1dcf937d6fcc4a187765d401f5e2f54027","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.25.tgz"},"_from":".","_npmVersion":"1.2.32","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.26":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.26","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.20.0","graceful-fs":"~1.2.0","semver":"~2.0.5","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"_id":"@npm%2fnpm-registry-client@0.2.26","dist":{"shasum":"4c5a2b3de946e383032f10fa497d0c15ee5f4c60","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.26.tgz"},"_from":".","_npmVersion":"1.3.1","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.27":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.27","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.20.0","graceful-fs":"~2.0.0","semver":"~2.0.5","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.15","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"_id":"@npm%2fnpm-registry-client@0.2.27","dist":{"shasum":"8f338189d32769267886a07ad7b7fd2267446adf","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.27.tgz"},"_from":".","_npmVersion":"1.3.2","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.28":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.28","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"~2.1.0","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.18","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"_id":"@npm%2fnpm-registry-client@0.2.28","dist":{"shasum":"959141fc0180d7b1ad089e87015a8a2142a8bffc","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.28.tgz"},"_from":".","_npmVersion":"1.3.6","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.29":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.29","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.18","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.2.29","dist":{"shasum":"66ff2766f0c61d41e8a6139d3692d8833002c686","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.29.tgz"},"_from":".","_npmVersion":"1.3.12","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.30":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.30","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.18","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.2.30","dist":{"shasum":"f01cae5c51aa0a1c5dc2516cbad3ebde068d3eaa","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.30.tgz"},"_from":".","_npmVersion":"1.3.14","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.2.31":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.2.31","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.18","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.2.31","dist":{"shasum":"24a23e24e43246677cb485f8391829e9536563d4","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.2.31.tgz"},"_from":".","_npmVersion":"1.3.17","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.3.0":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.3.0","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.18","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.3.0","dist":{"shasum":"66eab02a69be67f232ac14023eddfb8308c2eccd","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.3.0.tgz"},"_from":".","_npmVersion":"1.3.18","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.3.1":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.3.1","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.18","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.3.1","dist":{"shasum":"16dba07cc304442edcece378218672d0a1258ef8","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.3.1.tgz"},"_from":".","_npmVersion":"1.3.18","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.3.2":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.3.2","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.18","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.3.2","dist":{"shasum":"ea3060bd0a87fb1d97b87433b50f38f7272b1686","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.3.2.tgz"},"_from":".","_npmVersion":"1.3.20","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.3.3":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.3.3","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.18","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.3.3","dist":{"shasum":"da08bb681fb24aa5c988ca71f8c10f27f09daf4a","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.3.3.tgz"},"_from":".","_npmVersion":"1.3.21","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.3.4":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.3.4","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.18","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.3.4","dist":{"shasum":"25d771771590b1ca39277aea4506af234c5f4342","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.3.4.tgz"},"_from":".","_npmVersion":"1.3.25","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.3.5":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.3.5","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","couch-login":"~0.1.18","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.3.5","dist":{"shasum":"98ba1ac851a3939a3fb9917c28fa8da522dc635f","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.3.5.tgz"},"_from":".","_npmVersion":"1.3.25","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.3.6":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.3.6","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.3.6","dist":{"shasum":"c48a2a03643769acc49672860f7920ec6bffac6e","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.3.6.tgz"},"_from":".","_npmVersion":"1.3.26","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.4.0":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.0","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.0","dist":{"shasum":"30d0c178b7f2e54183a6a3fc9fe4071eb10290bf","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.0.tgz"},"_from":".","_npmVersion":"1.3.26","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.4.1":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.1","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.1","dist":{"shasum":"9c49b3e44558e2072158fb085be8a083c5f83537","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.1.tgz"},"_from":".","_npmVersion":"1.4.0","_npmUser":{"name":"npm-www","email":"npm@npmjs.com"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.4.2":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.2","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.2","dist":{"shasum":"d9568a9413bee14951201ce73f3b3992ec6658c0","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.2.tgz"},"_from":".","_npmVersion":"1.4.1","_npmUser":{"name":"npm-www","email":"npm@npmjs.com"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.4.3":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.3","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.3","dist":{"shasum":"aa188fc5067158e991a57f4697c54994108f5389","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.3.tgz"},"_from":".","_npmVersion":"1.4.2","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.4.4":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.4","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.4","dist":{"shasum":"f9dbc383a49069d8c7f67755a3ff6e424aff584f","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.4.tgz"},"_from":".","_npmVersion":"1.4.2","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.4.5":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.5","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.5","dist":{"shasum":"7d6fdca46139470715f9477ddb5ad3e770d4de7b","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.5.tgz"},"_from":".","_npmVersion":"1.4.4","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.4.6":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.6","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.6","_from":".","_npmVersion":"1.4.6","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"dist":{"shasum":"657f69a79543fc4cc264c3b2de958bd15f7140fe","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.6.tgz"},"directories":{}},"0.4.7":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.7","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.7","dist":{"shasum":"f4369b59890da7882527eb7c427dd95d43707afb","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.7.tgz"},"_from":".","_npmVersion":"1.4.6","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"directories":{}},"0.4.8":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.8","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.8","_shasum":"a6685a161033101be6064b7af887ab440e8695d0","_from":".","_npmVersion":"1.4.8","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"dist":{"shasum":"a6685a161033101be6064b7af887ab440e8695d0","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.8.tgz"},"directories":{}},"0.4.9":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.9","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.9","_shasum":"304d3d4726a58e33d8cc965afdc9ed70b996580c","_from":".","_npmVersion":"1.4.10","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"dist":{"shasum":"304d3d4726a58e33d8cc965afdc9ed70b996580c","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.9.tgz"},"directories":{}},"0.4.10":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.10","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"^2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.10","_shasum":"ab7bf1be3ba07d769eaf74dee3c9347e02283116","_from":".","_npmVersion":"1.4.10","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"dist":{"shasum":"ab7bf1be3ba07d769eaf74dee3c9347e02283116","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.10.tgz"},"directories":{}},"0.4.11":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.11","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"2 >=2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.11","_shasum":"032e9b6b050ed052ee9441841a945a184ea6bc33","_from":".","_npmVersion":"1.4.10","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"dist":{"shasum":"032e9b6b050ed052ee9441841a945a184ea6bc33","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.11.tgz"},"directories":{}},"0.4.12":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"0.4.12","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"request":"2 >=2.25.0","graceful-fs":"~2.0.0","semver":"2 >=2.2.1","slide":"~1.1.3","chownr":"0","mkdirp":"~0.3.3","rimraf":"~2","retry":"0.6.0","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@0.4.12","_shasum":"34303422f6a3da93ca3a387a2650d707c8595b99","_from":".","_npmVersion":"1.4.10","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"dist":{"shasum":"34303422f6a3da93ca3a387a2650d707c8595b99","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-0.4.12.tgz"},"directories":{}},"1.0.0":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"1.0.0","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"~2.0.0","mkdirp":"~0.3.3","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@1.0.0","_shasum":"2a6f9dfdce5f8ebf4b9af4dbfd738384d25014e5","_from":".","_npmVersion":"1.4.10","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"dist":{"shasum":"2a6f9dfdce5f8ebf4b9af4dbfd738384d25014e5","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-1.0.0.tgz"},"directories":{}},"1.0.1":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"1.0.1","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"~2.0.0","mkdirp":"~0.3.3","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"98b1278c230cf6c159f189e2f8c69daffa727ab8","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@1.0.1","_shasum":"c5f6a87d285f2005a35d3f67d9c724bce551b0f1","_from":".","_npmVersion":"1.4.13","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"dist":{"shasum":"c5f6a87d285f2005a35d3f67d9c724bce551b0f1","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-1.0.1.tgz"},"directories":{}},"2.0.0":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"2.0.0","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"~2.0.0","mkdirp":"~0.3.3","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"47a98069b6a34e751cbd5b84ce92858cae5abe70","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@2.0.0","_shasum":"88810dac2d534c0df1d905c79e723392fcfc791a","_from":".","_npmVersion":"1.4.14","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"}],"dist":{"shasum":"88810dac2d534c0df1d905c79e723392fcfc791a","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-2.0.0.tgz"},"directories":{}},"2.0.1":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"2.0.1","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"~0.3.3","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"123e40131f83f7265f66ecd2a558cce44a3aea86","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@2.0.1","_shasum":"611c7cb7c8f7ff22be2ebc6398423b5de10db0e2","_from":".","_npmVersion":"1.4.14","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"611c7cb7c8f7ff22be2ebc6398423b5de10db0e2","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-2.0.1.tgz"},"directories":{}},"2.0.2":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"2.0.2","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"~0.3.3","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"6ecc311c9dd4890f2d9b6bae60447070a3321e12","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@2.0.2","_shasum":"a82b000354c7f830114fb18444764bc477d5740f","_from":".","_npmVersion":"1.4.15","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"a82b000354c7f830114fb18444764bc477d5740f","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-2.0.2.tgz"},"directories":{}},"3.0.0":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"3.0.0","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"~0.3.3","normalize-package-data":"^0.4.0","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"6bb1aec1e85fa82ee075bd997d6fb9f2dbb7f643","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@3.0.0","_shasum":"4febc5cdb274e9fa06bc3008910e3fa1ec007994","_from":".","_npmVersion":"1.5.0-pre","_npmUser":{"name":"othiym23","email":"ogd@aoaioxxysz.net"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"4febc5cdb274e9fa06bc3008910e3fa1ec007994","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-3.0.0.tgz"},"directories":{}},"3.0.1":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"3.0.1","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"~0.3.3","normalize-package-data":"^0.4.0","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"fe8382dde609ea1e3580fcdc5bc3d0bba119cfc6","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@3.0.1","_shasum":"5f3ee362ce5c237cfb798fce22c77875fc1a63c2","_from":".","_npmVersion":"1.5.0-alpha-1","_npmUser":{"name":"othiym23","email":"ogd@aoaioxxysz.net"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"5f3ee362ce5c237cfb798fce22c77875fc1a63c2","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-3.0.1.tgz"},"directories":{}},"2.0.3":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"2.0.3","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"~0.3.3","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"2578fb9a807d77417554ba235ba8fac39405e832","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@2.0.3","_shasum":"93dad3d9a162c99404badb71739c622c0f3b9a72","_from":".","_npmVersion":"1.5.0-alpha-1","_npmUser":{"name":"othiym23","email":"ogd@aoaioxxysz.net"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"93dad3d9a162c99404badb71739c622c0f3b9a72","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-2.0.3.tgz"},"directories":{}},"3.0.2":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"3.0.2","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"~0.3.3","normalize-package-data":"^0.4.0","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"15343019160ace0b9874cf0ec186b3425dbc7301","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@3.0.2","_shasum":"5dd0910157ce55f4286a1871d39f9a2128cd3c99","_from":".","_npmVersion":"1.5.0-alpha-2","_npmUser":{"name":"othiym23","email":"ogd@aoaioxxysz.net"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"5dd0910157ce55f4286a1871d39f9a2128cd3c99","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-3.0.2.tgz"},"directories":{}},"3.0.3":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"3.0.3","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"~0.3.3","normalize-package-data":"^0.4.0","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1 || 3.x","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"b18a780d1185f27c06c27812147b83aba0d4a2f5","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@3.0.3","_shasum":"2377dc1cf69b4d374b3a95fb7feba8c804d8cb30","_from":".","_npmVersion":"2.0.0-alpha-5","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"2377dc1cf69b4d374b3a95fb7feba8c804d8cb30","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-3.0.3.tgz"},"directories":{}},"3.0.4":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"3.0.4","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"~0.5.0","normalize-package-data":"^0.4.0","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1 || 3.x","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"54900fe4b2eb5b99ee6dfe173f145732fdfae80e","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@3.0.4","_shasum":"d4a177d1f25615cfaef9b6844fa366ffbf5f578a","_from":".","_npmVersion":"2.0.0-alpha-5","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"d4a177d1f25615cfaef9b6844fa366ffbf5f578a","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-3.0.4.tgz"},"directories":{}},"3.0.5":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"3.0.5","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"0.5","normalize-package-data":"0.4","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"2","semver":"2 >=2.2.1 || 3.x","slide":"^1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"BSD","gitHead":"635db1654346bc86473df7b39626601425f46177","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@3.0.5","_shasum":"cdabaefa399b81ac8a86a48718aefd80e7b19ff3","_from":".","_npmVersion":"2.0.0-alpha-5","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"cdabaefa399b81ac8a86a48718aefd80e7b19ff3","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-3.0.5.tgz"},"directories":{}},"3.0.6":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"3.0.6","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"^0.5.0","normalize-package-data":"0.4","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"2","semver":"2 >=2.2.1 || 3.x","slide":"^1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"ISC","gitHead":"eba30fadd724ed5cad1aec95ac3ee907a59b7317","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@3.0.6","_shasum":"14a17d9a60ed2a80b04edcbc596dbce0d96540ee","_from":".","_npmVersion":"1.4.22","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"14a17d9a60ed2a80b04edcbc596dbce0d96540ee","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-3.0.6.tgz"},"directories":{}},"2.0.4":{"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"name":"@npm/npm-registry-client","description":"Client for the npm registry","version":"2.0.4","repository":{"url":"git://github.com/isaacs/npm-registry-client"},"main":"index.js","scripts":{"test":"tap test/*.js"},"dependencies":{"chownr":"0","graceful-fs":"^3.0.0","mkdirp":"^0.5.0","npm-cache-filename":"^1.0.0","request":"2 >=2.25.0","retry":"0.6.0","rimraf":"~2","semver":"2 >=2.2.1","slide":"~1.1.3","npmlog":""},"devDependencies":{"tap":""},"optionalDependencies":{"npmlog":""},"license":"ISC","gitHead":"a10f621d9cdc813b9d3092a14b661f65bfa6d40d","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"homepage":"https://github.com/isaacs/npm-registry-client","_id":"@npm%2fnpm-registry-client@2.0.4","_shasum":"528e08900d7655c12096d1637d1c3a7a5b451019","_from":".","_npmVersion":"1.4.22","_npmUser":{"name":"isaacs","email":"i@izs.me"},"maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"dist":{"shasum":"528e08900d7655c12096d1637d1c3a7a5b451019","tarball":"http://registry.npmjs.org/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-2.0.4.tgz"},"directories":{}}},"readme":"# npm-registry-client\u000a\u000aThe code that npm uses to talk to the registry.\u000a\u000aIt handles all the caching and HTTP calls.\u000a\u000a## Usage\u000a\u000a```javascript\u000avar RegClient = require('npm-registry-client')\u000avar client = new RegClient(config)\u000avar uri = \"npm://registry.npmjs.org/npm\"\u000avar options = {timeout: 1000}\u000a\u000aclient.get(uri, options, function (error, data, raw, res) {\u000a // error is an error if there was a problem.\u000a // data is the parsed data object\u000a // raw is the json string\u000a // res is the response from couch\u000a})\u000a```\u000a\u000a# Registry URLs\u000a\u000aThe registry calls take either a full URL pointing to a resource in the\u000aregistry, or a base URL for the registry as a whole (for the base URL, any path\u000awill be ignored). In addition to `http` and `https`, `npm` URLs are allowed.\u000a`npm` URLs are `https` URLs with the additional restrictions that they will\u000aalways include authorization credentials, and the response is always registry\u000ametadata (and not tarballs or other attachments).\u000a\u000a# Configuration\u000a\u000aThis program is designed to work with\u000a[npmconf](https://npmjs.org/package/npmconf), but you can also pass in\u000aa plain-jane object with the appropriate configs, and it'll shim it\u000afor you. Any configuration thingie that has get/set/del methods will\u000aalso be accepted.\u000a\u000a* `cache` **Required** {String} Path to the cache folder\u000a* `always-auth` {Boolean} Auth even for GET requests.\u000a* `auth` {String} A base64-encoded `username:password`\u000a* `email` {String} User's email address\u000a* `tag` {String} The default tag to use when publishing new packages.\u000a Default = `\"latest\"`\u000a* `ca` {String} Cerficate signing authority certificates to trust.\u000a* `cert` {String} Client certificate (PEM encoded). Enable access\u000a to servers that require client certificates\u000a* `key` {String} Private key (PEM encoded) for client certificate 'cert'\u000a* `strict-ssl` {Boolean} Whether or not to be strict with SSL\u000a certificates. Default = `true`\u000a* `user-agent` {String} User agent header to send. Default =\u000a `\"node/{process.version} {process.platform} {process.arch}\"`\u000a* `log` {Object} The logger to use. Defaults to `require(\"npmlog\")` if\u000a that works, otherwise logs are disabled.\u000a* `fetch-retries` {Number} Number of times to retry on GET failures.\u000a Default=2\u000a* `fetch-retry-factor` {Number} `factor` setting for `node-retry`. Default=10\u000a* `fetch-retry-mintimeout` {Number} `minTimeout` setting for `node-retry`.\u000a Default=10000 (10 seconds)\u000a* `fetch-retry-maxtimeout` {Number} `maxTimeout` setting for `node-retry`.\u000a Default=60000 (60 seconds)\u000a* `proxy` {URL} The url to proxy requests through.\u000a* `https-proxy` {URL} The url to proxy https requests through.\u000a Defaults to be the same as `proxy` if unset.\u000a* `_auth` {String} The base64-encoded authorization header.\u000a* `username` `_password` {String} Username/password to use to generate\u000a `_auth` if not supplied.\u000a* `_token` {Object} A token for use with\u000a [couch-login](https://npmjs.org/package/couch-login)\u000a\u000a# client.request(method, uri, options, cb)\u000a\u000a* `method` {String} HTTP method\u000a* `uri` {String} URI pointing to the resource to request\u000a* `options` {Object} Object containing optional per-request properties.\u000a * `what` {Stream | Buffer | String | Object} The request body. Objects\u000a that are not Buffers or Streams are encoded as JSON.\u000a * `etag` {String} The cached ETag\u000a * `follow` {Boolean} Follow 302/301 responses (defaults to true)\u000a* `cb` {Function}\u000a * `error` {Error | null}\u000a * `data` {Object} the parsed data object\u000a * `raw` {String} the json\u000a * `res` {Response Object} response from couch\u000a\u000aMake a request to the registry. All the other methods are wrappers around\u000a`request`.\u000a\u000a# client.adduser(base, username, password, email, cb)\u000a\u000a* `base` {String} Base registry URL\u000a* `username` {String}\u000a* `password` {String}\u000a* `email` {String}\u000a* `cb` {Function}\u000a\u000aAdd a user account to the registry, or verify the credentials.\u000a\u000a# client.deprecate(uri, version, message, cb)\u000a\u000a* `uri` {String} Full registry URI for the deprecated package\u000a* `version` {String} Semver version range\u000a* `message` {String} The message to use as a deprecation warning\u000a* `cb` {Function}\u000a\u000aDeprecate a version of a package in the registry.\u000a\u000a# client.bugs(uri, cb)\u000a\u000a* `uri` {String} Full registry URI for the package\u000a* `cb` {Function}\u000a\u000aGet the url for bugs of a package\u000a\u000a# client.get(uri, options, cb)\u000a\u000a* `uri` {String} The complete registry URI to fetch\u000a* `options` {Object} Object containing optional per-request properties.\u000a * `timeout` {Number} Duration before the request times out.\u000a * `follow` {Boolean} Follow 302/301 responses (defaults to true)\u000a * `staleOk` {Boolean} If there's cached data available, then return that\u000a to the callback quickly, and update the cache the background.\u000a\u000aFetches data from the registry via a GET request, saving it in the cache folder\u000awith the ETag.\u000a\u000a# client.publish(uri, data, tarball, cb)\u000a\u000a* `uri` {String} The registry URI to publish to\u000a* `data` {Object} Package data\u000a* `tarball` {String | Stream} Filename or stream of the package tarball\u000a* `cb` {Function}\u000a\u000aPublish a package to the registry.\u000a\u000aNote that this does not create the tarball from a folder. However, it can\u000aaccept a gzipped tar stream or a filename to a tarball.\u000a\u000a# client.star(uri, starred, cb)\u000a\u000a* `uri` {String} The complete registry URI to star\u000a* `starred` {Boolean} True to star the package, false to unstar it.\u000a* `cb` {Function}\u000a\u000aStar or unstar a package.\u000a\u000aNote that the user does not have to be the package owner to star or unstar a\u000apackage, though other writes do require that the user be the package owner.\u000a\u000a# client.stars(base, username, cb)\u000a\u000a* `base` {String} The base URL for the registry\u000a* `username` {String} Name of user to fetch starred packages for.\u000a* `cb` {Function}\u000a\u000aView your own or another user's starred packages.\u000a\u000a# client.tag(uri, version, tag, cb)\u000a\u000a* `uri` {String} The complete registry URI to tag\u000a* `version` {String} Version to tag\u000a* `tag` {String} Tag name to apply\u000a* `cb` {Function}\u000a\u000aMark a version in the `dist-tags` hash, so that `pkg@tag` will fetch the\u000aspecified version.\u000a\u000a# client.unpublish(uri, [ver], cb)\u000a\u000a* `uri` {String} The complete registry URI to unpublish\u000a* `ver` {String} version to unpublish. Leave blank to unpublish all\u000a versions.\u000a* `cb` {Function}\u000a\u000aRemove a version of a package (or all versions) from the registry. When the\u000alast version us unpublished, the entire document is removed from the database.\u000a\u000a# client.upload(uri, file, [etag], [nofollow], cb)\u000a\u000a* `uri` {String} The complete registry URI to upload to\u000a* `file` {String | Stream} Either the filename or a readable stream\u000a* `etag` {String} Cache ETag\u000a* `nofollow` {Boolean} Do not follow 301/302 responses\u000a* `cb` {Function}\u000a\u000aUpload an attachment. Mostly used by `client.publish()`.\u000a","maintainers":[{"name":"isaacs","email":"i@izs.me"},{"name":"othiym23","email":"ogd@aoaioxxysz.net"}],"time":{"modified":"2014-07-31T21:59:52.896Z","created":"2012-06-07T04:43:36.581Z","0.0.1":"2012-06-07T04:43:38.123Z","0.0.2":"2012-06-07T05:35:05.937Z","0.0.3":"2012-06-09T00:55:25.861Z","0.0.4":"2012-06-11T03:53:26.548Z","0.0.5":"2012-06-11T23:48:11.235Z","0.0.6":"2012-06-17T06:23:27.320Z","0.0.7":"2012-06-18T19:19:38.315Z","0.0.8":"2012-06-28T20:40:20.563Z","0.0.9":"2012-07-10T03:28:04.651Z","0.0.10":"2012-07-11T17:03:45.151Z","0.0.11":"2012-07-17T14:06:37.489Z","0.1.0":"2012-07-23T18:17:38.007Z","0.1.1":"2012-07-23T21:21:28.196Z","0.1.2":"2012-07-24T06:14:12.831Z","0.1.3":"2012-08-07T02:02:20.564Z","0.1.4":"2012-08-15T03:04:52.822Z","0.1.5":"2012-08-17T21:59:33.310Z","0.2.0":"2012-08-17T22:00:18.081Z","0.2.1":"2012-08-17T22:07:28.827Z","0.2.2":"2012-08-17T22:37:24.352Z","0.2.3":"2012-08-19T19:16:44.808Z","0.2.4":"2012-08-19T19:18:51.792Z","0.2.5":"2012-08-20T16:54:50.794Z","0.2.6":"2012-08-22T00:25:04.766Z","0.2.7":"2012-08-27T19:07:34.829Z","0.2.8":"2012-10-02T19:53:50.661Z","0.2.9":"2012-10-03T22:09:50.766Z","0.2.10":"2012-10-25T14:55:54.216Z","0.2.11":"2012-12-21T16:26:38.094Z","0.2.12":"2013-01-18T22:22:41.668Z","0.2.13":"2013-02-06T00:16:35.939Z","0.2.14":"2013-02-10T02:44:02.764Z","0.2.15":"2013-02-11T19:18:55.678Z","0.2.16":"2013-02-15T17:09:03.249Z","0.2.17":"2013-02-16T03:47:13.898Z","0.2.18":"2013-03-06T22:09:23.536Z","0.2.19":"2013-03-20T06:27:39.128Z","0.2.20":"2013-03-28T00:43:07.558Z","0.2.21":"2013-04-29T15:46:54.094Z","0.2.22":"2013-04-29T15:51:02.178Z","0.2.23":"2013-05-11T00:28:14.198Z","0.2.24":"2013-05-24T21:27:50.693Z","0.2.25":"2013-06-20T15:36:46.277Z","0.2.26":"2013-07-06T17:12:54.670Z","0.2.27":"2013-07-11T07:14:45.740Z","0.2.28":"2013-08-02T20:27:41.732Z","0.2.29":"2013-10-28T18:23:24.477Z","0.2.30":"2013-11-18T23:12:00.540Z","0.2.31":"2013-12-16T08:36:43.044Z","0.3.0":"2013-12-17T07:03:10.699Z","0.3.1":"2013-12-17T16:53:27.867Z","0.3.2":"2013-12-17T22:25:14.882Z","0.3.3":"2013-12-21T16:07:06.773Z","0.3.4":"2014-01-29T15:24:05.163Z","0.3.5":"2014-01-31T01:53:19.656Z","0.3.6":"2014-02-07T00:17:21.362Z","0.4.0":"2014-02-13T01:17:18.973Z","0.4.1":"2014-02-13T23:47:37.892Z","0.4.2":"2014-02-14T00:29:13.086Z","0.4.3":"2014-02-16T03:40:54.640Z","0.4.4":"2014-02-16T03:41:48.856Z","0.4.5":"2014-03-12T05:09:17.474Z","0.4.6":"2014-03-29T19:44:15.041Z","0.4.7":"2014-04-02T19:41:07.149Z","0.4.8":"2014-05-01T22:24:54.980Z","0.4.9":"2014-05-12T21:52:55.127Z","0.4.10":"2014-05-13T16:44:29.801Z","0.4.11":"2014-05-13T20:33:04.738Z","0.4.12":"2014-05-14T06:14:22.842Z","1.0.0":"2014-05-14T23:04:37.188Z","1.0.1":"2014-06-03T00:55:54.448Z","2.0.0":"2014-06-06T04:23:46.579Z","2.0.1":"2014-06-06T06:25:14.419Z","2.0.2":"2014-06-14T00:33:10.205Z","3.0.0":"2014-07-02T00:30:29.154Z","3.0.1":"2014-07-14T23:29:05.057Z","2.0.3":"2014-07-15T00:09:36.043Z","3.0.2":"2014-07-17T06:30:02.659Z","3.0.3":"2014-07-23T21:20:42.406Z","3.0.4":"2014-07-25T00:27:26.007Z","3.0.5":"2014-07-25T00:28:48.007Z","3.0.6":"2014-07-31T21:57:49.043Z","2.0.4":"2014-07-31T21:59:52.896Z"},"author":{"name":"Isaac Z. Schlueter","email":"i@izs.me","url":"http://blog.izs.me/"},"repository":{"url":"git://github.com/isaacs/npm-registry-client"},"users":{"fgribreau":true,"fengmk2":true},"readmeFilename":"README.md","homepage":"https://github.com/isaacs/npm-registry-client","bugs":{"url":"https://github.com/isaacs/npm-registry-client/issues"},"license":"ISC","_attachments":{}} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fixtures/server.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fixtures/server.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fixtures/server.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fixtures/server.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -// a fake registry server. - -var http = require('http') -var server = http.createServer(handler) -var port = server.port = process.env.PORT || 1337 -server.listen(port) - -module.exports = server - -server._expect = {} - -var expect = {} -function handler (req, res) { - req.connection.setTimeout(1000) - - var u = '* ' + req.url - , mu = req.method + ' ' + req.url - - var k = server._expect[mu] ? mu : server._expect[u] ? u : null - if (!k) throw Error('unexpected request: ' + req.method + ' ' + req.url) - - var fn = server._expect[k].shift() - if (!fn) throw Error('unexpected request' + req.method + ' ' + req.url) - - - var remain = (Object.keys(server._expect).reduce(function (s, k) { - return s + server._expect[k].length - }, 0)) - if (remain === 0) server.close() - else console.error("TEST SERVER: %d reqs remain", remain) - console.error(Object.keys(server._expect).map(function(k) { - return [k, server._expect[k].length] - }).reduce(function (acc, kv) { - acc[kv[0]] = kv[1] - return acc - }, {})) - - res.json = json - fn(req, res) -} - -function json (o) { - this.setHeader('content-type', 'application/json') - this.end(JSON.stringify(o)) -} - -server.expect = function (method, u, fn) { - if (typeof u === 'function') { - fn = u - u = method - method = '*' - } - u = method + ' ' + u - server._expect[u] = server._expect[u] || [] - server._expect[u].push(fn) -} Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/fixtures/underscore/1.3.3/package.tgz and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/fixtures/underscore/1.3.3/package.tgz differ diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/get-all.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/get-all.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/get-all.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/get-all.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") +var client = common.freshClient() + +tap.test("basic request", function (t) { + server.expect("/-/all", function (req, res) { + res.json([]) + }) + + client.get("http://localhost:1337/-/all", null, function (er) { + t.ifError(er, "no error") + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/get-basic.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/get-basic.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/get-basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/get-basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,35 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") +var client = common.freshClient() + +var us = require("./fixtures/underscore/1.3.3/cache.json") +var usroot = require("./fixtures/underscore/cache.json") + +tap.test("basic request", function (t) { + server.expect("/underscore/1.3.3", function (req, res) { + res.json(us) + }) + + server.expect("/underscore", function (req, res) { + res.json(usroot) + }) + + server.expect("/@bigco%2funderscore", function (req, res) { + res.json(usroot) + }) + + t.plan(3) + client.get("http://localhost:1337/underscore/1.3.3", null, function (er, data) { + t.deepEqual(data, us) + }) + + client.get("http://localhost:1337/underscore", null, function (er, data) { + t.deepEqual(data, usroot) + }) + + client.get("http://localhost:1337/@bigco%2funderscore", null, function (er, data) { + t.deepEqual(data, usroot) + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/get-error-403.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/get-error-403.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/get-error-403.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/get-error-403.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +tap.test("get fails with 403", function (t) { + server.expect("/habanero", function (req, res) { + t.equal(req.method, "GET", "got expected method") + + res.writeHead(403) + res.end("{\"error\":\"get that cat out of the toilet that's gross omg\"}") + }) + + var client = common.freshClient() + client.conf.set("fetch-retry-mintimeout", 100) + client.get( + "http://localhost:1337/habanero", + {}, + function (er) { + t.ok(er, "failed as expected") + + t.equal(er.statusCode, 403, "status code was attached as expected") + t.equal(er.code, "E403", "error code was formatted as expected") + t.equal( + er.message, + "get that cat out of the toilet that's gross omg : habanero", + "got error message" + ) + + t.end() + } + ) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/lib/common.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/lib/common.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/lib/common.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/lib/common.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,81 @@ +var resolve = require("path").resolve + +var server = require("./server.js") +var RC = require("../../") +var toNerfDart = require("../../lib/util/nerf-dart.js") + +var REGISTRY = "http://localhost:" + server.port + +module.exports = { + port : server.port, + registry : REGISTRY, + freshClient : function freshClient(config) { + config = config || {} + config.cache = resolve(__dirname, "../fixtures/cache") + config.registry = REGISTRY + var container = { + get: function (k) { return config[k] }, + set: function (k, v) { config[k] = v }, + del: function (k) { delete config[k] }, + getCredentialsByURI: function(uri) { + var nerfed = toNerfDart(uri) + var c = {scope : nerfed} + + if (this.get(nerfed + ":_authToken")) { + c.token = this.get(nerfed + ":_authToken") + // the bearer token is enough, don't confuse things + return c + } + + if (this.get(nerfed + ":_password")) { + c.password = new Buffer(this.get(nerfed + ":_password"), "base64").toString("utf8") + } + + if (this.get(nerfed + ":username")) { + c.username = this.get(nerfed + ":username") + } + + if (this.get(nerfed + ":email")) { + c.email = this.get(nerfed + ":email") + } + + if (this.get(nerfed + ":always-auth") !== undefined) { + c.alwaysAuth = this.get(nerfed + ":always-auth") + } + + if (c.username && c.password) { + c.auth = new Buffer(c.username + ":" + c.password).toString("base64") + } + + return c + }, + setCredentialsByURI: function (uri, c) { + var nerfed = toNerfDart(uri) + + if (c.token) { + this.set(nerfed + ":_authToken", c.token, "user") + this.del(nerfed + ":_password", "user") + this.del(nerfed + ":username", "user") + this.del(nerfed + ":email", "user") + } + else if (c.username || c.password || c.email) { + this.del(nerfed + ":_authToken", "user") + + var encoded = new Buffer(c.password, "utf8").toString("base64") + this.set(nerfed + ":_password", encoded, "user") + this.set(nerfed + ":username", c.username, "user") + this.set(nerfed + ":email", c.email, "user") + } + else { + throw new Error("No credentials to set.") + } + } + } + + var client = new RC(container) + server.log = client.log + client.log.level = "silent" + + return client + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/lib/server.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/lib/server.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/lib/server.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/lib/server.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,66 @@ +// a fake registry server. + +var http = require('http') +var server = http.createServer(handler) +var port = server.port = process.env.PORT || 1337 +var assert = require("assert") +server.listen(port) + +module.exports = server + +server._expect = {} + +function handler (req, res) { + req.connection.setTimeout(1000) + + // If we got authorization, make sure it's the right password. + if (req.headers.authorization && req.headers.authorization.match(/^Basic/)) { + var auth = req.headers.authorization.replace(/^Basic /, "") + auth = new Buffer(auth, "base64").toString("utf8") + assert.equal(auth, "username:%1234@asdf%") + } + + var u = '* ' + req.url + , mu = req.method + ' ' + req.url + + var k = server._expect[mu] ? mu : server._expect[u] ? u : null + if (!k) throw Error('unexpected request: ' + req.method + ' ' + req.url) + + var fn = server._expect[k].shift() + if (!fn) throw Error('unexpected request' + req.method + ' ' + req.url) + + + var remain = (Object.keys(server._expect).reduce(function (s, k) { + return s + server._expect[k].length + }, 0)) + if (remain === 0) server.close() + else this.log.info("fake-registry", "TEST SERVER: %d reqs remain", remain) + this.log.info("fake-registry", Object.keys(server._expect).map(function(k) { + return [k, server._expect[k].length] + }).reduce(function (acc, kv) { + acc[kv[0]] = kv[1] + return acc + }, {})) + + res.json = json + fn(req, res) +} + +function json (o) { + this.setHeader('content-type', 'application/json') + this.end(JSON.stringify(o)) +} + +// this log is meanto to be overridden +server.log = require("npmlog") + +server.expect = function (method, u, fn) { + if (typeof u === 'function') { + fn = u + u = method + method = '*' + } + u = method + ' ' + u + server._expect[u] = server._expect[u] || [] + server._expect[u].push(fn) +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/publish-again.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/publish-again.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/publish-again.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/publish-again.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,24 +1,29 @@ -var tap = require('tap') -var server = require('./fixtures/server.js') -var RC = require('../') -var client = new RC( - { cache: __dirname + '/fixtures/cache' - , registry: 'http://localhost:' + server.port - , username: "username" - , password: "password" - , email: "i@izs.me" - , _auth: new Buffer("username:password").toString('base64') - , "always-auth": true - }) - +var tap = require("tap") var fs = require("fs") +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "username"] = "username" +configuration[nerfed + "_password"] = new Buffer("%1234@asdf%").toString("base64") +configuration[nerfed + "email"] = "i@izs.me" + +var client = common.freshClient(configuration) + tap.test("publish again", function (t) { + // not really a tarball, but doesn't matter + var tarball = require.resolve("../package.json") + var pd = fs.readFileSync(tarball, "base64") + var pkg = require("../package.json") var lastTime = null + server.expect("/npm-registry-client", function (req, res) { t.equal(req.method, "PUT") var b = "" - req.setEncoding('utf8') + req.setEncoding("utf8") req.on("data", function (d) { b += d }) @@ -28,8 +33,8 @@ t.equal(o._id, "npm-registry-client") t.equal(o["dist-tags"].latest, pkg.version) t.has(o.versions[pkg.version], pkg) - t.same(o.maintainers, [ { name: 'username', email: 'i@izs.me' } ]) - var att = o._attachments[ pkg.name + '-' + pkg.version + '.tgz' ] + t.same(o.maintainers, [ { name: "username", email: "i@izs.me" } ]) + var att = o._attachments[ pkg.name + "-" + pkg.version + ".tgz" ] t.same(att.data, pd) res.statusCode = 409 res.json({reason: "must supply latest _rev to update existing package"}) @@ -54,7 +59,7 @@ t.ok(lastTime) var b = "" - req.setEncoding('utf8') + req.setEncoding("utf8") req.on("data", function (d) { b += d }) @@ -68,12 +73,7 @@ }) }) - - // not really a tarball, but doesn't matter - var tarball = require.resolve('../package.json') - var pd = fs.readFileSync(tarball, 'base64') - var pkg = require('../package.json') - client.publish(pkg, tarball, function (er, data, raw, res) { + client.publish("http://localhost:1337/", pkg, tarball, function (er, data) { if (er) throw er t.deepEqual(data, { created: true }) t.end() diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/publish-again-scoped.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/publish-again-scoped.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/publish-again-scoped.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/publish-again-scoped.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,82 @@ +var tap = require("tap") +var fs = require("fs") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "username"] = "username" +configuration[nerfed + "_password"] = new Buffer("%1234@asdf%").toString("base64") +configuration[nerfed + "email"] = "i@izs.me" + +var client = common.freshClient(configuration) + +tap.test("publish again", function (t) { + // not really a tarball, but doesn't matter + var tarball = require.resolve("../package.json") + var pd = fs.readFileSync(tarball, "base64") + var pkg = require("../package.json") + var lastTime = null + + server.expect("/@npm%2fnpm-registry-client", function (req, res) { + t.equal(req.method, "PUT") + var b = "" + req.setEncoding("utf8") + req.on("data", function (d) { + b += d + }) + + req.on("end", function () { + var o = lastTime = JSON.parse(b) + t.equal(o._id, "@npm/npm-registry-client") + t.equal(o["dist-tags"].latest, pkg.version) + t.has(o.versions[pkg.version], pkg) + t.same(o.maintainers, [ { name: "username", email: "i@izs.me" } ]) + var att = o._attachments[ pkg.name + "-" + pkg.version + ".tgz" ] + t.same(att.data, pd) + res.statusCode = 409 + res.json({reason: "must supply latest _rev to update existing package"}) + }) + }) + + server.expect("/@npm%2fnpm-registry-client?write=true", function (req, res) { + t.equal(req.method, "GET") + t.ok(lastTime) + for (var i in lastTime.versions) { + var v = lastTime.versions[i] + delete lastTime.versions[i] + lastTime.versions["0.0.2"] = v + lastTime["dist-tags"] = { latest: "0.0.2" } + } + lastTime._rev = "asdf" + res.json(lastTime) + }) + + server.expect("/@npm%2fnpm-registry-client", function (req, res) { + t.equal(req.method, "PUT") + t.ok(lastTime) + + var b = "" + req.setEncoding("utf8") + req.on("data", function (d) { + b += d + }) + + req.on("end", function() { + var o = JSON.parse(b) + t.equal(o._rev, "asdf") + t.deepEqual(o.versions["0.0.2"], o.versions[pkg.version]) + res.statusCode = 201 + res.json({created: true}) + }) + }) + + pkg.name = "@npm/npm-registry-client" + client.publish("http://localhost:1337/", pkg, tarball, function (er, data) { + if (er) throw er + t.deepEqual(data, { created: true }) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/publish.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/publish.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/publish.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/publish.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,24 +1,29 @@ -var tap = require('tap') -var crypto = require('crypto') -var server = require('./fixtures/server.js') -var RC = require('../') -var client = new RC( - { cache: __dirname + '/fixtures/cache' - , registry: 'http://localhost:' + server.port - , username: "username" - , password: "password" - , email: "i@izs.me" - , _auth: new Buffer("username:password").toString('base64') - , "always-auth": true - }) - +var tap = require("tap") +var crypto = require("crypto") var fs = require("fs") +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "username"] = "username" +configuration[nerfed + "_password"] = new Buffer("%1234@asdf%").toString("base64") +configuration[nerfed + "email"] = "i@izs.me" + +var client = common.freshClient(configuration) + tap.test("publish", function (t) { + // not really a tarball, but doesn't matter + var tarball = require.resolve("../package.json") + var pd = fs.readFileSync(tarball, "base64") + var pkg = require("../package.json") + server.expect("/npm-registry-client", function (req, res) { t.equal(req.method, "PUT") var b = "" - req.setEncoding('utf8') + req.setEncoding("utf8") req.on("data", function (d) { b += d }) @@ -28,22 +33,18 @@ t.equal(o._id, "npm-registry-client") t.equal(o["dist-tags"].latest, pkg.version) t.has(o.versions[pkg.version], pkg) - t.same(o.maintainers, [ { name: 'username', email: 'i@izs.me' } ]) + t.same(o.maintainers, [ { name: "username", email: "i@izs.me" } ]) t.same(o.maintainers, o.versions[pkg.version].maintainers) - var att = o._attachments[ pkg.name + '-' + pkg.version + '.tgz' ] + var att = o._attachments[ pkg.name + "-" + pkg.version + ".tgz" ] t.same(att.data, pd) - var hash = crypto.createHash('sha1').update(pd, 'base64').digest('hex') + var hash = crypto.createHash("sha1").update(pd, "base64").digest("hex") t.equal(o.versions[pkg.version].dist.shasum, hash) res.statusCode = 201 res.json({created:true}) }) }) - // not really a tarball, but doesn't matter - var tarball = require.resolve('../package.json') - var pd = fs.readFileSync(tarball, 'base64') - var pkg = require('../package.json') - client.publish(pkg, tarball, function (er, data, raw, res) { + client.publish("http://localhost:1337/", pkg, tarball, function (er, data) { if (er) throw er t.deepEqual(data, { created: true }) t.end() diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/publish-scoped-auth-token.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/publish-scoped-auth-token.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/publish-scoped-auth-token.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/publish-scoped-auth-token.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,52 @@ +var tap = require("tap") +var crypto = require("crypto") +var fs = require("fs") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "_authToken"] = "of-glad-tidings" + +var client = common.freshClient(configuration) + +tap.test("publish", function (t) { + // not really a tarball, but doesn't matter + var tarball = require.resolve("../package.json") + var pd = fs.readFileSync(tarball, "base64") + var pkg = require("../package.json") + pkg.name = "@npm/npm-registry-client" + + server.expect("/@npm%2fnpm-registry-client", function (req, res) { + t.equal(req.method, "PUT") + t.equal(req.headers.authorization, "Bearer of-glad-tidings") + + var b = "" + req.setEncoding("utf8") + req.on("data", function (d) { + b += d + }) + + req.on("end", function () { + var o = JSON.parse(b) + t.equal(o._id, "@npm/npm-registry-client") + t.equal(o["dist-tags"].latest, pkg.version) + t.has(o.versions[pkg.version], pkg) + t.same(o.maintainers, o.versions[pkg.version].maintainers) + var att = o._attachments[ pkg.name + "-" + pkg.version + ".tgz" ] + t.same(att.data, pd) + var hash = crypto.createHash("sha1").update(pd, "base64").digest("hex") + t.equal(o.versions[pkg.version].dist.shasum, hash) + res.statusCode = 201 + res.json({created:true}) + }) + }) + + client.publish(common.registry, pkg, tarball, function (er, data) { + if (er) throw er + t.deepEqual(data, { created: true }) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/publish-scoped.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/publish-scoped.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/publish-scoped.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/publish-scoped.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,57 @@ +var tap = require("tap") +var crypto = require("crypto") +var fs = require("fs") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "username"] = "username" +configuration[nerfed + "_password"] = new Buffer("%1234@asdf%").toString("base64") +configuration[nerfed + "email"] = "ogd@aoaioxxysz.net" + +var client = common.freshClient(configuration) + +var _auth = new Buffer("username:%1234@asdf%").toString("base64") + +tap.test("publish", function (t) { + // not really a tarball, but doesn't matter + var tarball = require.resolve("../package.json") + var pd = fs.readFileSync(tarball, "base64") + var pkg = require("../package.json") + pkg.name = "@npm/npm-registry-client" + + server.expect("/@npm%2fnpm-registry-client", function (req, res) { + t.equal(req.method, "PUT") + t.equal(req.headers.authorization, "Basic " + _auth) + + var b = "" + req.setEncoding("utf8") + req.on("data", function (d) { + b += d + }) + + req.on("end", function () { + var o = JSON.parse(b) + t.equal(o._id, "@npm/npm-registry-client") + t.equal(o["dist-tags"].latest, pkg.version) + t.has(o.versions[pkg.version], pkg) + t.same(o.maintainers, [ { name: "username", email: "ogd@aoaioxxysz.net" } ]) + t.same(o.maintainers, o.versions[pkg.version].maintainers) + var att = o._attachments[ pkg.name + "-" + pkg.version + ".tgz" ] + t.same(att.data, pd) + var hash = crypto.createHash("sha1").update(pd, "base64").digest("hex") + t.equal(o.versions[pkg.version].dist.shasum, hash) + res.statusCode = 201 + res.json({created:true}) + }) + }) + + client.publish(common.registry, pkg, tarball, function (er, data) { + if (er) throw er + t.deepEqual(data, { created: true }) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/redirects.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/redirects.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/redirects.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/redirects.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,48 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") +var client = common.freshClient() + +var pkg = { + _id: "some-package@1.2.3", + name: "some-package", + version: "1.2.3" +} + +tap.test("basic request", function (t) { + // Expect one request for { follow : false } + server.expect("/-/some-package/1.2.3", function (req, res) { + res.writeHead(301, { + "Location": "/some-package/1.2.3" + }) + res.end("Redirecting") + }) + + // Expect 2 requests for { follow : true } + server.expect("/-/some-package/1.2.3", function (req, res) { + res.writeHead(301, { + "Location": "/some-package/1.2.3" + }) + res.end("Redirecting") + }) + + server.expect("/some-package/1.2.3", function (req, res) { + res.json(pkg) + }) + + t.plan(2); + + client.get("http://localhost:1337/-/some-package/1.2.3", { + follow: false + }, function(er, data) { + t.assert(er, "Error must be set"); + }) + + client.get("http://localhost:1337/-/some-package/1.2.3", { + follow: true + }, function(er, data) { + t.deepEqual(data, pkg) + }) +}) + diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/request-gzip-content.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/request-gzip-content.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/request-gzip-content.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/request-gzip-content.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,47 +1,51 @@ -var zlib = require('zlib') -var tap = require('tap') -var server = require('./fixtures/server.js') -var RC = require('../') +var zlib = require("zlib") +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") +var client = common.freshClient({ + "fetch-retries" : 1, + "fetch-retry-mintimeout" : 10, + "fetch-retry-maxtimeout" : 100 +}) + +var TEST_URL = "http://localhost:1337/some-package-gzip/1.2.3" + var pkg = { - _id: 'some-package-gzip@1.2.3', - name: 'some-package-gzip', - version: '1.2.3' + _id: "some-package-gzip@1.2.3", + name: "some-package-gzip", + version: "1.2.3" } zlib.gzip(JSON.stringify(pkg), function (err, pkgGzip) { - var client = new RC({ - cache: __dirname + '/fixtures/cache' - , 'fetch-retries': 1 - , 'fetch-retry-mintimeout': 10 - , 'fetch-retry-maxtimeout': 100 - , registry: 'http://localhost:' + server.port }) + tap.test("request gzip package content", function (t) { + t.ifError(err, "example package compressed") - tap.test('request gzip package content', function (t) { - server.expect('GET', '/some-package-gzip/1.2.3', function (req, res) { + server.expect("GET", "/some-package-gzip/1.2.3", function (req, res) { res.statusCode = 200 - res.setHeader('Content-Encoding', 'gzip'); - res.setHeader('Content-Type', 'application/json'); + res.setHeader("Content-Encoding", "gzip") + res.setHeader("Content-Type", "application/json") res.end(pkgGzip) }) - client.get('/some-package-gzip/1.2.3', function (er, data, raw, res) { + client.get(TEST_URL, null, function (er, data) { if (er) throw er t.deepEqual(data, pkg) t.end() }) }) - tap.test('request wrong gzip package content', function (t) { - server.expect('GET', '/some-package-gzip-error/1.2.3', function (req, res) { + tap.test("request wrong gzip package content", function (t) { + server.expect("GET", "/some-package-gzip-error/1.2.3", function (req, res) { res.statusCode = 200 - res.setHeader('Content-Encoding', 'gzip') - res.setHeader('Content-Type', 'application/json') - res.end(new Buffer('wrong gzip content')) + res.setHeader("Content-Encoding", "gzip") + res.setHeader("Content-Type", "application/json") + res.end(new Buffer("wrong gzip content")) }) - client.get('/some-package-gzip-error/1.2.3', function (er, data, raw, res) { + client.get(TEST_URL, null, function (er) { t.ok(er) t.end() }) }) -}); +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/retries.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/retries.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/retries.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/retries.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,47 @@ -var tap = require('tap') -var server = require('./fixtures/server.js') -var RC = require('../') -var pkg = { _id: 'some-package@1.2.3', - name: 'some-package', - version: '1.2.3' } -var client = new RC({ - 'fetch-retries': 6 - , 'fetch-retry-mintimeout': 10 - , 'fetch-retry-maxtimeout': 100 - , cache: __dirname + '/fixtures/cache' - , registry: 'http://localhost:' + server.port }) +var tap = require("tap") -tap.test('create new user account', function (t) { +var server = require("./lib/server.js") +var common = require("./lib/common.js") +var client = common.freshClient({ + "fetch-retries": 6, + "fetch-retry-mintimeout": 10, + "fetch-retry-maxtimeout": 100 +}) + +var pkg = { + _id : "some-package@1.2.3", + name : "some-package", + version : "1.2.3" +} + +tap.test("create new user account", function (t) { // first time, return a 408 - server.expect('GET', '/some-package/1.2.3', function (req, res) { + server.expect("GET", "/some-package/1.2.3", function (req, res) { res.statusCode = 408 - res.end('Timeout') + res.end("Timeout") }) // then, slam the door in their face - server.expect('GET', '/some-package/1.2.3', function (req, res) { + server.expect("GET", "/some-package/1.2.3", function (req, res) { res.destroy() }) // then, blame someone else - server.expect('GET', '/some-package/1.2.3', function (req, res) { + server.expect("GET", "/some-package/1.2.3", function (req, res) { res.statusCode = 502 - res.end('Gateway Timeout') + res.end("Gateway Timeout") }) // 'No one's home right now, come back later' - server.expect('GET', '/some-package/1.2.3', function (req, res) { + server.expect("GET", "/some-package/1.2.3", function (req, res) { res.statusCode = 503 - res.setHeader('retry-after', '10') - res.end('Come back later') + res.setHeader("retry-after", "10") + res.end("Come back later") }) // finally, you may enter. - server.expect('GET', '/some-package/1.2.3', function (req, res) { + server.expect("GET", "/some-package/1.2.3", function (req, res) { res.statusCode = 200 res.json(pkg) }) - client.get('/some-package/1.2.3', function (er, data, raw, res) { + client.get("http://localhost:1337/some-package/1.2.3", null, function (er, data) { if (er) throw er t.deepEqual(data, pkg) t.end() diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/star.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/star.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/star.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/star.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,62 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var DEP_USER = "username" + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "username"] = DEP_USER +configuration[nerfed + "_password"] = new Buffer("%1234@asdf%").toString("base64") +configuration[nerfed + "email"] = "i@izs.me" + +var client = common.freshClient(configuration) + +var cache = require("./fixtures/underscore/cache.json") + +tap.test("star a package", function (t) { + server.expect("GET", "/underscore?write=true", function (req, res) { + t.equal(req.method, "GET") + + res.json(cache) + }) + + server.expect("PUT", "/underscore", function (req, res) { + t.equal(req.method, "PUT") + + var b = "" + req.setEncoding("utf8") + req.on("data", function (d) { + b += d + }) + + req.on("end", function () { + var updated = JSON.parse(b) + + var already = [ + "vesln", "mvolkmann", "lancehunt", "mikl", "linus", "vasc", "bat", + "dmalam", "mbrevoort", "danielr", "rsimoes", "thlorenz" + ] + for (var i = 0; i < already.length; i++) { + var current = already[i] + t.ok( + updated.users[current], + current + " still likes this package" + ) + } + t.ok(updated.users[DEP_USER], "user is in the starred list") + + res.statusCode = 201 + res.json({starred:true}) + }) + }) + + client.star("http://localhost:1337/underscore", true, function (error, data) { + t.ifError(error, "no errors") + t.ok(data.starred, "was starred") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/stars.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/stars.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/stars.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/stars.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,26 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") +var client = common.freshClient() + +var users = [ + "benjamincoe", + "seldo", + "ceejbot" +] + +tap.test("get the URL for the bugs page on a package", function (t) { + server.expect("GET", "/-/_view/starredByUser?key=%22sample%22", function (req, res) { + t.equal(req.method, "GET") + + res.json(users) + }) + + client.stars("http://localhost:1337/", "sample", function (error, info) { + t.ifError(error, "no errors") + t.deepEqual(info, users, "got the list of users") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/tag.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/tag.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/tag.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/tag.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,41 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "username"] = "username" +configuration[nerfed + "_password"] = new Buffer("%1234@asdf%").toString("base64") +configuration[nerfed + "email"] = "i@izs.me" + +var client = common.freshClient(configuration) + +tap.test("tag a package", function (t) { + server.expect("PUT", "/underscore/not-lodash", function (req, res) { + t.equal(req.method, "PUT") + + var b = "" + req.setEncoding("utf8") + req.on("data", function (d) { + b += d + }) + + req.on("end", function () { + var updated = JSON.parse(b) + + t.deepEqual(updated, {"1.3.2":{}}) + + res.statusCode = 201 + res.json({tagged:true}) + }) + }) + + client.tag("http://localhost:1337/underscore", {"1.3.2":{}}, "not-lodash", function (error, data) { + t.ifError(error, "no errors") + t.ok(data.tagged, "was tagged") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/unpublish.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/unpublish.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/unpublish.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/unpublish.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,59 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "_authToken"] = "of-glad-tidings" + +var client = common.freshClient(configuration) + +var cache = require("./fixtures/underscore/cache.json") + +var REV = "/-rev/72-47f2986bfd8e8b55068b204588bbf484" +var VERSION = "1.3.2" + +tap.test("unpublish a package", function (t) { + server.expect("GET", "/underscore?write=true", function (req, res) { + t.equal(req.method, "GET") + + res.json(cache) + }) + + server.expect("PUT", "/underscore" + REV, function (req, res) { + t.equal(req.method, "PUT") + + var b = "" + req.setEncoding("utf-8") + req.on("data", function (d) { + b += d + }) + + req.on("end", function () { + var updated = JSON.parse(b) + t.notOk(updated.versions[VERSION]) + }) + + res.json(cache) + }) + + server.expect("GET", "/underscore", function (req, res) { + t.equal(req.method, "GET") + + res.json(cache) + }) + + server.expect("DELETE", "/underscore/-/underscore-1.3.2.tgz" + REV, function (req, res) { + t.equal(req.method, "DELETE") + + res.json({unpublished:true}) + }) + + client.unpublish("http://localhost:1337/underscore", VERSION, function (error) { + t.ifError(error, "no errors") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/unpublish-scoped.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/unpublish-scoped.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/unpublish-scoped.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/unpublish-scoped.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,59 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "_authToken"] = "of-glad-tidings" + +var client = common.freshClient(configuration) + +var cache = require("./fixtures/@npm/npm-registry-client/cache.json") + +var REV = "/-rev/213-0a1049cf56172b7d9a1184742c6477b9" +var VERSION = "3.0.6" + +tap.test("unpublish a package", function (t) { + server.expect("GET", "/@npm%2fnpm-registry-client?write=true", function (req, res) { + t.equal(req.method, "GET") + + res.json(cache) + }) + + server.expect("PUT", "/@npm%2fnpm-registry-client" + REV, function (req, res) { + t.equal(req.method, "PUT") + + var b = "" + req.setEncoding("utf-8") + req.on("data", function (d) { + b += d + }) + + req.on("end", function () { + var updated = JSON.parse(b) + t.notOk(updated.versions[VERSION]) + }) + + res.json(cache) + }) + + server.expect("GET", "/@npm%2fnpm-registry-client", function (req, res) { + t.equal(req.method, "GET") + + res.json(cache) + }) + + server.expect("DELETE", "/@npm%2fnpm-registry-client/-/@npm%2fnpm-registry-client-" + VERSION + ".tgz" + REV, function (req, res) { + t.equal(req.method, "DELETE") + + res.json({unpublished:true}) + }) + + client.unpublish("http://localhost:1337/@npm%2fnpm-registry-client", VERSION, function (error) { + t.ifError(error, "no errors") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/upload.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/upload.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/upload.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/upload.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,36 @@ +var tap = require("tap") +var Readable = require("stream").Readable +var inherits = require("util").inherits + +var common = require("./lib/common.js") +var server = require("./lib/server.js") + +var cache = require("./fixtures/underscore/cache.json") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "_authToken"] = "of-glad-tidings" + +var client = common.freshClient(configuration) + +function OneA() { + Readable.call(this) + this.push("A") + this.push(null) +} +inherits(OneA, Readable) + +tap.test("uploading a tarball", function (t) { + server.expect("PUT", "/underscore", function (req, res) { + t.equal(req.method, "PUT") + + res.json(cache) + }) + + client.upload("http://localhost:1337/underscore", new OneA(), "daedabeefa", true, function (error) { + t.ifError(error, "no errors") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/whoami.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/whoami.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/whoami.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/whoami.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,30 @@ +var tap = require("tap") + +var server = require("./lib/server.js") +var common = require("./lib/common.js") + +var nerfed = "//localhost:" + server.port + "/:" + +var configuration = {} +configuration[nerfed + "_authToken"] = "not-bad-meaning-bad-but-bad-meaning-wombat" + +var client = common.freshClient(configuration) + +var WHOIAM = "wombat" + +tap.test("whoami", function (t) { + server.expect("GET", "/whoami", function (req, res) { + t.equal(req.method, "GET") + // only available for token-based auth for now + t.equal(req.headers.authorization, "Bearer not-bad-meaning-bad-but-bad-meaning-wombat") + + res.json({username : WHOIAM}) + }) + + client.whoami(common.registry, function (error, wombat) { + t.ifError(error, "no errors") + t.equal(wombat, WHOIAM, "im a wombat") + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/zz-cleanup.js nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/zz-cleanup.js --- nodejs-0.11.13/deps/npm/node_modules/npm-registry-client/test/zz-cleanup.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-registry-client/test/zz-cleanup.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,10 @@ -var tap = require('tap') -var rimraf = require('rimraf') +var tap = require("tap") +var rimraf = require("rimraf") -tap.test('teardown', function (t) { - rimraf(__dirname + '/fixtures/cache', function (er) { +tap.test("teardown", function (t) { + rimraf(__dirname + "/fixtures/cache", function (er) { if (er) throw er - t.pass('cache cleaned') + t.pass("cache cleaned") t.end() }) }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-user-validate/npm-user-validate.js nodejs-0.11.15/deps/npm/node_modules/npm-user-validate/npm-user-validate.js --- nodejs-0.11.13/deps/npm/node_modules/npm-user-validate/npm-user-validate.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-user-validate/npm-user-validate.js 2015-01-20 21:22:17.000000000 +0000 @@ -8,9 +8,7 @@ urlSafe: 'Username may not contain non-url-safe chars', dot: 'Username may not start with "."' }, - password: { - badchars: 'Password passwords cannot contain these characters: \'!:@"' - }, + password: {}, email: { valid: 'Email must be an email address' } @@ -41,9 +39,5 @@ } function pw (pw) { - if (pw.match(/['!:@"]/)) { - return new Error(requirements.password.badchars) - } - return null } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-user-validate/package.json nodejs-0.11.15/deps/npm/node_modules/npm-user-validate/package.json --- nodejs-0.11.13/deps/npm/node_modules/npm-user-validate/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-user-validate/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "npm-user-validate", - "version": "0.0.3", + "version": "0.1.1", "description": "User validations for npm", "main": "npm-user-validate.js", "devDependencies": { @@ -11,7 +11,7 @@ }, "repository": { "type": "git", - "url": "https://github.com/robertkowalski/npm-user-validate" + "url": "git://github.com/npm/npm-user-validate.git" }, "keywords": [ "npm", @@ -23,15 +23,34 @@ "email": "rok@kowalski.gd" }, "license": "BSD", - "readme": "[![Build Status](https://travis-ci.org/robertkowalski/npm-user-validate.png?branch=master)](https://travis-ci.org/robertkowalski/npm-user-validate)\n[![devDependency Status](https://david-dm.org/robertkowalski/npm-user-validate/dev-status.png)](https://david-dm.org/robertkowalski/npm-user-validate#info=devDependencies)\n\n# npm-user-validate\n\nValidation for the npm client and npm-www (and probably other npm projects)", - "readmeFilename": "README.md", + "gitHead": "64c9bd4ded742c41afdb3a8414fbbfdbfdcdf6b7", "bugs": { - "url": "https://github.com/robertkowalski/npm-user-validate/issues" + "url": "https://github.com/npm/npm-user-validate/issues" }, - "_id": "npm-user-validate@0.0.3", + "homepage": "https://github.com/npm/npm-user-validate", + "_id": "npm-user-validate@0.1.1", + "_shasum": "ea7774636c3c8fe6d01e174bd9f2ee0e22eeed57", + "_from": "npm-user-validate@>=0.1.1 <0.2.0", + "_npmVersion": "2.1.3", + "_nodeVersion": "0.10.31", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "robertkowalski", + "email": "rok@kowalski.gd" + }, + { + "name": "isaacs", + "email": "i@izs.me" + } + ], "dist": { - "shasum": "7b147d11038083fb0ba2d60ff851dc20322aa9f6" + "shasum": "ea7774636c3c8fe6d01e174bd9f2ee0e22eeed57", + "tarball": "http://registry.npmjs.org/npm-user-validate/-/npm-user-validate-0.1.1.tgz" }, - "_from": "npm-user-validate@0.0.3", - "_resolved": "https://registry.npmjs.org/npm-user-validate/-/npm-user-validate-0.0.3.tgz" + "directories": {}, + "_resolved": "https://registry.npmjs.org/npm-user-validate/-/npm-user-validate-0.1.1.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-user-validate/README.md nodejs-0.11.15/deps/npm/node_modules/npm-user-validate/README.md --- nodejs-0.11.13/deps/npm/node_modules/npm-user-validate/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-user-validate/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ -[![Build Status](https://travis-ci.org/robertkowalski/npm-user-validate.png?branch=master)](https://travis-ci.org/robertkowalski/npm-user-validate) -[![devDependency Status](https://david-dm.org/robertkowalski/npm-user-validate/dev-status.png)](https://david-dm.org/robertkowalski/npm-user-validate#info=devDependencies) +[![Build Status](https://travis-ci.org/npm/npm-user-validate.png?branch=master)](https://travis-ci.org/npm/npm-user-validate) +[![devDependency Status](https://david-dm.org/npm/npm-user-validate/dev-status.png)](https://david-dm.org/npm/npm-user-validate#info=devDependencies) # npm-user-validate -Validation for the npm client and npm-www (and probably other npm projects) \ No newline at end of file +Validation for the npm client and npm-www (and probably other npm projects) diff -Nru nodejs-0.11.13/deps/npm/node_modules/npm-user-validate/test/pw.test.js nodejs-0.11.15/deps/npm/node_modules/npm-user-validate/test/pw.test.js --- nodejs-0.11.13/deps/npm/node_modules/npm-user-validate/test/pw.test.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/npm-user-validate/test/pw.test.js 2015-01-20 21:22:17.000000000 +0000 @@ -3,25 +3,25 @@ test('pw contains a \'', function (t) { err = v('\'') - t.type(err, 'object') + t.type(err, 'null') t.end() }) test('pw contains a :', function (t) { err = v(':') - t.type(err, 'object') + t.type(err, 'null') t.end() }) test('pw contains a @', function (t) { err = v('@') - t.type(err, 'object') + t.notOk(err, 'null') t.end() }) test('pw contains a "', function (t) { err = v('"') - t.type(err, 'object') + t.type(err, 'null') t.end() }) @@ -29,4 +29,4 @@ err = v('duck') t.type(err, 'null') t.end() -}) \ No newline at end of file +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/once/once.js nodejs-0.11.15/deps/npm/node_modules/once/once.js --- nodejs-0.11.13/deps/npm/node_modules/once/once.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/once/once.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,5 @@ -module.exports = once +var wrappy = require('wrappy') +module.exports = wrappy(once) once.proto = once(function () { Object.defineProperty(Function.prototype, 'once', { diff -Nru nodejs-0.11.13/deps/npm/node_modules/once/package.json nodejs-0.11.15/deps/npm/node_modules/once/package.json --- nodejs-0.11.13/deps/npm/node_modules/once/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/once/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,12 +1,14 @@ { "name": "once", - "version": "1.3.0", + "version": "1.3.1", "description": "Run a function exactly one time", "main": "once.js", "directories": { "test": "test" }, - "dependencies": {}, + "dependencies": { + "wrappy": "1" + }, "devDependencies": { "tap": "~0.3.0" }, @@ -29,11 +31,30 @@ "url": "http://blog.izs.me/" }, "license": "BSD", - "readme": "# once\n\nOnly call a function once.\n\n## usage\n\n```javascript\nvar once = require('once')\n\nfunction load (file, cb) {\n cb = once(cb)\n loader.load('file')\n loader.once('load', cb)\n loader.once('error', cb)\n}\n```\n\nOr add to the Function.prototype in a responsible way:\n\n```javascript\n// only has to be done once\nrequire('once').proto()\n\nfunction load (file, cb) {\n cb = cb.once()\n loader.load('file')\n loader.once('load', cb)\n loader.once('error', cb)\n}\n```\n\nIronically, the prototype feature makes this module twice as\ncomplicated as necessary.\n\nTo check whether you function has been called, use `fn.called`. Once the\nfunction is called for the first time the return value of the original\nfunction is saved in `fn.value` and subsequent calls will continue to\nreturn this value.\n\n```javascript\nvar once = require('once')\n\nfunction load (cb) {\n cb = once(cb)\n var stream = createStream()\n stream.once('data', cb)\n stream.once('end', function () {\n if (!cb.called) cb(new Error('not found'))\n })\n}\n```\n", - "readmeFilename": "README.md", + "gitHead": "c90ac02a74f433ce47f6938869e68dd6196ffc2c", "bugs": { "url": "https://github.com/isaacs/once/issues" }, - "_id": "once@1.3.0", - "_from": "once@latest" + "homepage": "https://github.com/isaacs/once", + "_id": "once@1.3.1", + "_shasum": "f3f3e4da5b7d27b5c732969ee3e67e729457b31f", + "_from": "once@>=1.3.1 <2.0.0", + "_npmVersion": "2.0.0", + "_nodeVersion": "0.10.31", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "f3f3e4da5b7d27b5c732969ee3e67e729457b31f", + "tarball": "http://registry.npmjs.org/once/-/once-1.3.1.tgz" + }, + "_resolved": "https://registry.npmjs.org/once/-/once-1.3.1.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/once/test/once.js nodejs-0.11.15/deps/npm/node_modules/once/test/once.js --- nodejs-0.11.13/deps/npm/node_modules/once/test/once.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/once/test/once.js 2015-01-20 21:22:17.000000000 +0000 @@ -3,11 +3,14 @@ test('once', function (t) { var f = 0 - var foo = once(function (g) { + function fn (g) { t.equal(f, 0) f ++ return f + g + this - }) + } + fn.ownProperty = {} + var foo = once(fn) + t.equal(fn.ownProperty, foo.ownProperty) t.notOk(foo.called) for (var i = 0; i < 1E3; i++) { t.same(f, i === 0 ? 0 : 1) diff -Nru nodejs-0.11.13/deps/npm/node_modules/opener/LICENSE.txt nodejs-0.11.15/deps/npm/node_modules/opener/LICENSE.txt --- nodejs-0.11.13/deps/npm/node_modules/opener/LICENSE.txt 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/opener/LICENSE.txt 2015-01-20 21:22:17.000000000 +0000 @@ -1,14 +1,19 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2012 Domenic Denicola - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - +Copyright © 2012–2014 Domenic Denicola + +This work is free. You can redistribute it and/or modify it under the +terms of the Do What The Fuck You Want To Public License, Version 2, +as published by Sam Hocevar. See below for more details. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. diff -Nru nodejs-0.11.13/deps/npm/node_modules/opener/opener.js nodejs-0.11.15/deps/npm/node_modules/opener/opener.js --- nodejs-0.11.13/deps/npm/node_modules/opener/opener.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/opener/opener.js 2015-01-20 21:22:17.000000000 +0000 @@ -38,7 +38,7 @@ args = ["/c", "start", '""'].concat(args); } - childProcess.execFile(command, args, options, callback); + return childProcess.execFile(command, args, options, callback); } // Export `opener` for programmatic access. diff -Nru nodejs-0.11.13/deps/npm/node_modules/opener/package.json nodejs-0.11.15/deps/npm/node_modules/opener/package.json --- nodejs-0.11.13/deps/npm/node_modules/opener/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/opener/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,11 +1,11 @@ { "name": "opener", "description": "Opens stuff, like webpages and files and executables, cross-platform", - "version": "1.3.0", + "version": "1.4.0", "author": { "name": "Domenic Denicola", "email": "domenic@domenicdenicola.com", - "url": "http://domenicdenicola.com" + "url": "http://domenic.me/" }, "license": "WTFPL", "repository": { @@ -23,12 +23,28 @@ "lint": "jshint opener.js" }, "devDependencies": { - "jshint": ">= 0.9.0" + "jshint": "^2.5.4" }, - "readme": "# It Opens Stuff\r\n\r\nThat is, in your desktop environment. This will make *actual windows pop up*, with stuff in them:\r\n\r\n```bash\r\nnpm install opener -g\r\n\r\nopener http://google.com\r\nopener ./my-file.txt\r\nopener firefox\r\nopener npm run lint\r\n```\r\n\r\nAlso if you want to use it programmatically you can do that too:\r\n\r\n```js\r\nvar opener = require(\"opener\");\r\n\r\nopener(\"http://google.com\");\r\nopener(\"./my-file.txt\");\r\nopener(\"firefox\");\r\nopener(\"npm run lint\");\r\n```\r\n\r\n## Use It for Good\r\n\r\nLike opening the user's browser with a test harness in your package's test script:\r\n\r\n```json\r\n{\r\n \"scripts\": {\r\n \"test\": \"opener ./test/runner.html\"\r\n },\r\n \"devDependencies\": {\r\n \"opener\": \"*\"\r\n }\r\n}\r\n```\r\n\r\n## Why\r\n\r\nBecause Windows has `start`, Macs have `open`, and *nix has `xdg-open`. At least\r\n[according to some guy on StackOverflow](http://stackoverflow.com/q/1480971/3191). And I like things that work on all\r\nthree. Like Node.js. And Opener.\r\n", - "_id": "opener@1.3.0", + "gitHead": "b9d36d4f82c26560acdadbabbb10ddba46a30dc5", + "homepage": "https://github.com/domenic/opener", + "_id": "opener@1.4.0", + "_shasum": "d11f86eeeb076883735c9d509f538fe82d10b941", + "_from": "opener@>=1.4.0 <1.5.0", + "_npmVersion": "1.4.23", + "_npmUser": { + "name": "domenic", + "email": "domenic@domenicdenicola.com" + }, + "maintainers": [ + { + "name": "domenic", + "email": "domenic@domenicdenicola.com" + } + ], "dist": { - "shasum": "d72b4b2e61b0a4ca7822a7554070620002fb90d9" + "shasum": "d11f86eeeb076883735c9d509f538fe82d10b941", + "tarball": "http://registry.npmjs.org/opener/-/opener-1.4.0.tgz" }, - "_from": "opener@latest" + "directories": {}, + "_resolved": "https://registry.npmjs.org/opener/-/opener-1.4.0.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/opener/README.md nodejs-0.11.15/deps/npm/node_modules/opener/README.md --- nodejs-0.11.13/deps/npm/node_modules/opener/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/opener/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,57 @@ -# It Opens Stuff - -That is, in your desktop environment. This will make *actual windows pop up*, with stuff in them: - -```bash -npm install opener -g - -opener http://google.com -opener ./my-file.txt -opener firefox -opener npm run lint -``` - -Also if you want to use it programmatically you can do that too: - -```js -var opener = require("opener"); - -opener("http://google.com"); -opener("./my-file.txt"); -opener("firefox"); -opener("npm run lint"); -``` - -## Use It for Good - -Like opening the user's browser with a test harness in your package's test script: - -```json -{ - "scripts": { - "test": "opener ./test/runner.html" - }, - "devDependencies": { - "opener": "*" - } -} -``` - -## Why - -Because Windows has `start`, Macs have `open`, and *nix has `xdg-open`. At least -[according to some guy on StackOverflow](http://stackoverflow.com/q/1480971/3191). And I like things that work on all -three. Like Node.js. And Opener. +# It Opens Stuff + +That is, in your desktop environment. This will make *actual windows pop up*, with stuff in them: + +```bash +npm install opener -g + +opener http://google.com +opener ./my-file.txt +opener firefox +opener npm run lint +``` + +Also if you want to use it programmatically you can do that too: + +```js +var opener = require("opener"); + +opener("http://google.com"); +opener("./my-file.txt"); +opener("firefox"); +opener("npm run lint"); +``` + +Plus, it returns the child process created, so you can do things like let your script exit while the window stays open: + +```js +var editor = opener("documentation.odt"); +editor.unref(); +// These other unrefs may be necessary if your OS's opener process +// exits before the process it started is complete. +editor.stdin.unref(); +editor.stdout.unref(); +editor.stderr.unref(); +``` + + +## Use It for Good + +Like opening the user's browser with a test harness in your package's test script: + +```json +{ + "scripts": { + "test": "opener ./test/runner.html" + }, + "devDependencies": { + "opener": "*" + } +} +``` + +## Why + +Because Windows has `start`, Macs have `open`, and *nix has `xdg-open`. At least +[according to some guy on StackOverflow](http://stackoverflow.com/q/1480971/3191). And I like things that work on all +three. Like Node.js. And Opener. diff -Nru nodejs-0.11.13/deps/npm/node_modules/osenv/LICENSE nodejs-0.11.15/deps/npm/node_modules/osenv/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/osenv/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/osenv/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,4 @@ -Copyright (c) Isaac Z. Schlueter +Copyright (c) Isaac Z. Schlueter ("Author") All rights reserved. The BSD License @@ -6,20 +6,22 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS -``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/osenv/osenv.js nodejs-0.11.15/deps/npm/node_modules/osenv/osenv.js --- nodejs-0.11.13/deps/npm/node_modules/osenv/osenv.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/osenv/osenv.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,7 @@ var isWindows = process.platform === 'win32' -var windir = isWindows ? process.env.windir || 'C:\\Windows' : null var path = require('path') var exec = require('child_process').exec +var os = require('os') // looking up envs is a bit costly. // Also, sometimes we want to have a fallback @@ -46,14 +46,7 @@ }, 'hostname') memo('tmpdir', function () { - var t = isWindows ? 'temp' : 'tmp' - return process.env.TMPDIR || - process.env.TMP || - process.env.TEMP || - ( exports.home() ? path.resolve(exports.home(), t) - : isWindows ? path.resolve(windir, t) - : '/tmp' - ) + return os.tmpDir() }) memo('home', function () { diff -Nru nodejs-0.11.13/deps/npm/node_modules/osenv/package.json nodejs-0.11.15/deps/npm/node_modules/osenv/package.json --- nodejs-0.11.13/deps/npm/node_modules/osenv/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/osenv/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,13 +1,13 @@ { "name": "osenv", - "version": "0.0.3", + "version": "0.1.0", "main": "osenv.js", "directories": { "test": "test" }, "dependencies": {}, "devDependencies": { - "tap": "~0.2.5" + "tap": "~0.4.9" }, "scripts": { "test": "tap test/*.js" @@ -33,6 +33,12 @@ "license": "BSD", "description": "Look up environment settings specific to different operating systems", "readme": "# osenv\n\nLook up environment settings specific to different operating systems.\n\n## Usage\n\n```javascript\nvar osenv = require('osenv')\nvar path = osenv.path()\nvar user = osenv.user()\n// etc.\n\n// Some things are not reliably in the env, and have a fallback command:\nvar h = osenv.hostname(function (er, hostname) {\n h = hostname\n})\n// This will still cause it to be memoized, so calling osenv.hostname()\n// is now an immediate operation.\n\n// You can always send a cb, which will get called in the nextTick\n// if it's been memoized, or wait for the fallback data if it wasn't\n// found in the environment.\nosenv.hostname(function (er, hostname) {\n if (er) console.error('error looking up hostname')\n else console.log('this machine calls itself %s', hostname)\n})\n```\n\n## osenv.hostname()\n\nThe machine name. Calls `hostname` if not found.\n\n## osenv.user()\n\nThe currently logged-in user. Calls `whoami` if not found.\n\n## osenv.prompt()\n\nEither PS1 on unix, or PROMPT on Windows.\n\n## osenv.tmpdir()\n\nThe place where temporary files should be created.\n\n## osenv.home()\n\nNo place like it.\n\n## osenv.path()\n\nAn array of the places that the operating system will search for\nexecutables.\n\n## osenv.editor() \n\nReturn the executable name of the editor program. This uses the EDITOR\nand VISUAL environment variables, and falls back to `vi` on Unix, or\n`notepad.exe` on Windows.\n\n## osenv.shell()\n\nThe SHELL on Unix, which Windows calls the ComSpec. Defaults to 'bash'\nor 'cmd'.\n", - "_id": "osenv@0.0.3", - "_from": "osenv@latest" + "readmeFilename": "README.md", + "bugs": { + "url": "https://github.com/isaacs/osenv/issues" + }, + "homepage": "https://github.com/isaacs/osenv", + "_id": "osenv@0.1.0", + "_shasum": "61668121eec584955030b9f470b1d2309504bfcb", + "_from": "osenv@~0.1.0" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/osenv/test/unix.js nodejs-0.11.15/deps/npm/node_modules/osenv/test/unix.js --- nodejs-0.11.13/deps/npm/node_modules/osenv/test/unix.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/osenv/test/unix.js 2015-01-20 21:22:17.000000000 +0000 @@ -23,7 +23,6 @@ process.env.VISUAL = 'visualedit' process.env.SHELL = 'zsh' - tap.test('basic unix sanity test', function (t) { var osenv = require('../osenv.js') @@ -48,10 +47,6 @@ process.env.TEMP = '' delete require.cache[require.resolve('../osenv.js')] var osenv = require('../osenv.js') - t.equal(osenv.tmpdir(), '/home/sirUser/tmp') - - delete require.cache[require.resolve('../osenv.js')] - var osenv = require('../osenv.js') osenv.home = function () { return null } t.equal(osenv.tmpdir(), '/tmp') diff -Nru nodejs-0.11.13/deps/npm/node_modules/osenv/test/windows.js nodejs-0.11.15/deps/npm/node_modules/osenv/test/windows.js --- nodejs-0.11.13/deps/npm/node_modules/osenv/test/windows.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/osenv/test/windows.js 2015-01-20 21:22:17.000000000 +0000 @@ -12,7 +12,7 @@ // load this before clubbing the platform name. var tap = require('tap') -process.env.windir = 'C:\\windows' +process.env.windir = 'c:\\windows' process.env.USERDOMAIN = 'some-domain' process.env.USERNAME = 'sirUser' process.env.USERPROFILE = 'C:\\Users\\sirUser' @@ -29,8 +29,6 @@ tap.test('basic windows sanity test', function (t) { var osenv = require('../osenv.js') - var osenv = require('../osenv.js') - t.equal(osenv.user(), process.env.USERDOMAIN + '\\' + process.env.USERNAME) t.equal(osenv.home(), process.env.USERPROFILE) @@ -53,13 +51,8 @@ process.env.TEMP = '' delete require.cache[require.resolve('../osenv.js')] var osenv = require('../osenv.js') - t.equal(osenv.tmpdir(), 'C:\\Users\\sirUser\\temp') - - process.env.TEMP = '' - delete require.cache[require.resolve('../osenv.js')] - var osenv = require('../osenv.js') osenv.home = function () { return null } - t.equal(osenv.tmpdir(), 'C:\\windows\\temp') + t.equal(osenv.tmpdir(), 'c:\\windows\\temp') t.equal(osenv.editor(), 'edit') process.env.EDITOR = '' diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/duplex.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/duplex.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/duplex.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/duplex.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +module.exports = require("./lib/_stream_duplex.js") diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/lib/_stream_duplex.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/lib/_stream_duplex.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/lib/_stream_duplex.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/lib/_stream_duplex.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,89 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// a duplex stream is just a stream that is both readable and writable. +// Since JS doesn't have multiple prototypal inheritance, this class +// prototypally inherits from Readable, and then parasitically from +// Writable. + +module.exports = Duplex; + +/**/ +var objectKeys = Object.keys || function (obj) { + var keys = []; + for (var key in obj) keys.push(key); + return keys; +} +/**/ + + +/**/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/**/ + +var Readable = require('./_stream_readable'); +var Writable = require('./_stream_writable'); + +util.inherits(Duplex, Readable); + +forEach(objectKeys(Writable.prototype), function(method) { + if (!Duplex.prototype[method]) + Duplex.prototype[method] = Writable.prototype[method]; +}); + +function Duplex(options) { + if (!(this instanceof Duplex)) + return new Duplex(options); + + Readable.call(this, options); + Writable.call(this, options); + + if (options && options.readable === false) + this.readable = false; + + if (options && options.writable === false) + this.writable = false; + + this.allowHalfOpen = true; + if (options && options.allowHalfOpen === false) + this.allowHalfOpen = false; + + this.once('end', onend); +} + +// the no-half-open enforcer +function onend() { + // if we allow half-open state, or if the writable side ended, + // then we're ok. + if (this.allowHalfOpen || this._writableState.ended) + return; + + // no more data can be written. + // But allow more writes to happen in this tick. + process.nextTick(this.end.bind(this)); +} + +function forEach (xs, f) { + for (var i = 0, l = xs.length; i < l; i++) { + f(xs[i], i); + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/lib/_stream_passthrough.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/lib/_stream_passthrough.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/lib/_stream_passthrough.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/lib/_stream_passthrough.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,46 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// a passthrough stream. +// basically just the most minimal sort of Transform stream. +// Every written chunk gets output as-is. + +module.exports = PassThrough; + +var Transform = require('./_stream_transform'); + +/**/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/**/ + +util.inherits(PassThrough, Transform); + +function PassThrough(options) { + if (!(this instanceof PassThrough)) + return new PassThrough(options); + + Transform.call(this, options); +} + +PassThrough.prototype._transform = function(chunk, encoding, cb) { + cb(null, chunk); +}; diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/lib/_stream_readable.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/lib/_stream_readable.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/lib/_stream_readable.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/lib/_stream_readable.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,982 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +module.exports = Readable; + +/**/ +var isArray = require('isarray'); +/**/ + + +/**/ +var Buffer = require('buffer').Buffer; +/**/ + +Readable.ReadableState = ReadableState; + +var EE = require('events').EventEmitter; + +/**/ +if (!EE.listenerCount) EE.listenerCount = function(emitter, type) { + return emitter.listeners(type).length; +}; +/**/ + +var Stream = require('stream'); + +/**/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/**/ + +var StringDecoder; + +util.inherits(Readable, Stream); + +function ReadableState(options, stream) { + options = options || {}; + + // the point at which it stops calling _read() to fill the buffer + // Note: 0 is a valid value, means "don't call _read preemptively ever" + var hwm = options.highWaterMark; + this.highWaterMark = (hwm || hwm === 0) ? hwm : 16 * 1024; + + // cast to ints. + this.highWaterMark = ~~this.highWaterMark; + + this.buffer = []; + this.length = 0; + this.pipes = null; + this.pipesCount = 0; + this.flowing = false; + this.ended = false; + this.endEmitted = false; + this.reading = false; + + // In streams that never have any data, and do push(null) right away, + // the consumer can miss the 'end' event if they do some I/O before + // consuming the stream. So, we don't emit('end') until some reading + // happens. + this.calledRead = false; + + // a flag to be able to tell if the onwrite cb is called immediately, + // or on a later tick. We set this to true at first, becuase any + // actions that shouldn't happen until "later" should generally also + // not happen before the first write call. + this.sync = true; + + // whenever we return null, then we set a flag to say + // that we're awaiting a 'readable' event emission. + this.needReadable = false; + this.emittedReadable = false; + this.readableListening = false; + + + // object stream flag. Used to make read(n) ignore n and to + // make all the buffer merging and length checks go away + this.objectMode = !!options.objectMode; + + // Crypto is kind of old and crusty. Historically, its default string + // encoding is 'binary' so we have to make this configurable. + // Everything else in the universe uses 'utf8', though. + this.defaultEncoding = options.defaultEncoding || 'utf8'; + + // when piping, we only care about 'readable' events that happen + // after read()ing all the bytes and not getting any pushback. + this.ranOut = false; + + // the number of writers that are awaiting a drain event in .pipe()s + this.awaitDrain = 0; + + // if true, a maybeReadMore has been scheduled + this.readingMore = false; + + this.decoder = null; + this.encoding = null; + if (options.encoding) { + if (!StringDecoder) + StringDecoder = require('string_decoder/').StringDecoder; + this.decoder = new StringDecoder(options.encoding); + this.encoding = options.encoding; + } +} + +function Readable(options) { + if (!(this instanceof Readable)) + return new Readable(options); + + this._readableState = new ReadableState(options, this); + + // legacy + this.readable = true; + + Stream.call(this); +} + +// Manually shove something into the read() buffer. +// This returns true if the highWaterMark has not been hit yet, +// similar to how Writable.write() returns true if you should +// write() some more. +Readable.prototype.push = function(chunk, encoding) { + var state = this._readableState; + + if (typeof chunk === 'string' && !state.objectMode) { + encoding = encoding || state.defaultEncoding; + if (encoding !== state.encoding) { + chunk = new Buffer(chunk, encoding); + encoding = ''; + } + } + + return readableAddChunk(this, state, chunk, encoding, false); +}; + +// Unshift should *always* be something directly out of read() +Readable.prototype.unshift = function(chunk) { + var state = this._readableState; + return readableAddChunk(this, state, chunk, '', true); +}; + +function readableAddChunk(stream, state, chunk, encoding, addToFront) { + var er = chunkInvalid(state, chunk); + if (er) { + stream.emit('error', er); + } else if (chunk === null || chunk === undefined) { + state.reading = false; + if (!state.ended) + onEofChunk(stream, state); + } else if (state.objectMode || chunk && chunk.length > 0) { + if (state.ended && !addToFront) { + var e = new Error('stream.push() after EOF'); + stream.emit('error', e); + } else if (state.endEmitted && addToFront) { + var e = new Error('stream.unshift() after end event'); + stream.emit('error', e); + } else { + if (state.decoder && !addToFront && !encoding) + chunk = state.decoder.write(chunk); + + // update the buffer info. + state.length += state.objectMode ? 1 : chunk.length; + if (addToFront) { + state.buffer.unshift(chunk); + } else { + state.reading = false; + state.buffer.push(chunk); + } + + if (state.needReadable) + emitReadable(stream); + + maybeReadMore(stream, state); + } + } else if (!addToFront) { + state.reading = false; + } + + return needMoreData(state); +} + + + +// if it's past the high water mark, we can push in some more. +// Also, if we have no data yet, we can stand some +// more bytes. This is to work around cases where hwm=0, +// such as the repl. Also, if the push() triggered a +// readable event, and the user called read(largeNumber) such that +// needReadable was set, then we ought to push more, so that another +// 'readable' event will be triggered. +function needMoreData(state) { + return !state.ended && + (state.needReadable || + state.length < state.highWaterMark || + state.length === 0); +} + +// backwards compatibility. +Readable.prototype.setEncoding = function(enc) { + if (!StringDecoder) + StringDecoder = require('string_decoder/').StringDecoder; + this._readableState.decoder = new StringDecoder(enc); + this._readableState.encoding = enc; +}; + +// Don't raise the hwm > 128MB +var MAX_HWM = 0x800000; +function roundUpToNextPowerOf2(n) { + if (n >= MAX_HWM) { + n = MAX_HWM; + } else { + // Get the next highest power of 2 + n--; + for (var p = 1; p < 32; p <<= 1) n |= n >> p; + n++; + } + return n; +} + +function howMuchToRead(n, state) { + if (state.length === 0 && state.ended) + return 0; + + if (state.objectMode) + return n === 0 ? 0 : 1; + + if (n === null || isNaN(n)) { + // only flow one buffer at a time + if (state.flowing && state.buffer.length) + return state.buffer[0].length; + else + return state.length; + } + + if (n <= 0) + return 0; + + // If we're asking for more than the target buffer level, + // then raise the water mark. Bump up to the next highest + // power of 2, to prevent increasing it excessively in tiny + // amounts. + if (n > state.highWaterMark) + state.highWaterMark = roundUpToNextPowerOf2(n); + + // don't have that much. return null, unless we've ended. + if (n > state.length) { + if (!state.ended) { + state.needReadable = true; + return 0; + } else + return state.length; + } + + return n; +} + +// you can override either this method, or the async _read(n) below. +Readable.prototype.read = function(n) { + var state = this._readableState; + state.calledRead = true; + var nOrig = n; + var ret; + + if (typeof n !== 'number' || n > 0) + state.emittedReadable = false; + + // if we're doing read(0) to trigger a readable event, but we + // already have a bunch of data in the buffer, then just trigger + // the 'readable' event and move on. + if (n === 0 && + state.needReadable && + (state.length >= state.highWaterMark || state.ended)) { + emitReadable(this); + return null; + } + + n = howMuchToRead(n, state); + + // if we've ended, and we're now clear, then finish it up. + if (n === 0 && state.ended) { + ret = null; + + // In cases where the decoder did not receive enough data + // to produce a full chunk, then immediately received an + // EOF, state.buffer will contain [, ]. + // howMuchToRead will see this and coerce the amount to + // read to zero (because it's looking at the length of the + // first in state.buffer), and we'll end up here. + // + // This can only happen via state.decoder -- no other venue + // exists for pushing a zero-length chunk into state.buffer + // and triggering this behavior. In this case, we return our + // remaining data and end the stream, if appropriate. + if (state.length > 0 && state.decoder) { + ret = fromList(n, state); + state.length -= ret.length; + } + + if (state.length === 0) + endReadable(this); + + return ret; + } + + // All the actual chunk generation logic needs to be + // *below* the call to _read. The reason is that in certain + // synthetic stream cases, such as passthrough streams, _read + // may be a completely synchronous operation which may change + // the state of the read buffer, providing enough data when + // before there was *not* enough. + // + // So, the steps are: + // 1. Figure out what the state of things will be after we do + // a read from the buffer. + // + // 2. If that resulting state will trigger a _read, then call _read. + // Note that this may be asynchronous, or synchronous. Yes, it is + // deeply ugly to write APIs this way, but that still doesn't mean + // that the Readable class should behave improperly, as streams are + // designed to be sync/async agnostic. + // Take note if the _read call is sync or async (ie, if the read call + // has returned yet), so that we know whether or not it's safe to emit + // 'readable' etc. + // + // 3. Actually pull the requested chunks out of the buffer and return. + + // if we need a readable event, then we need to do some reading. + var doRead = state.needReadable; + + // if we currently have less than the highWaterMark, then also read some + if (state.length - n <= state.highWaterMark) + doRead = true; + + // however, if we've ended, then there's no point, and if we're already + // reading, then it's unnecessary. + if (state.ended || state.reading) + doRead = false; + + if (doRead) { + state.reading = true; + state.sync = true; + // if the length is currently zero, then we *need* a readable event. + if (state.length === 0) + state.needReadable = true; + // call internal read method + this._read(state.highWaterMark); + state.sync = false; + } + + // If _read called its callback synchronously, then `reading` + // will be false, and we need to re-evaluate how much data we + // can return to the user. + if (doRead && !state.reading) + n = howMuchToRead(nOrig, state); + + if (n > 0) + ret = fromList(n, state); + else + ret = null; + + if (ret === null) { + state.needReadable = true; + n = 0; + } + + state.length -= n; + + // If we have nothing in the buffer, then we want to know + // as soon as we *do* get something into the buffer. + if (state.length === 0 && !state.ended) + state.needReadable = true; + + // If we happened to read() exactly the remaining amount in the + // buffer, and the EOF has been seen at this point, then make sure + // that we emit 'end' on the very next tick. + if (state.ended && !state.endEmitted && state.length === 0) + endReadable(this); + + return ret; +}; + +function chunkInvalid(state, chunk) { + var er = null; + if (!Buffer.isBuffer(chunk) && + 'string' !== typeof chunk && + chunk !== null && + chunk !== undefined && + !state.objectMode) { + er = new TypeError('Invalid non-string/buffer chunk'); + } + return er; +} + + +function onEofChunk(stream, state) { + if (state.decoder && !state.ended) { + var chunk = state.decoder.end(); + if (chunk && chunk.length) { + state.buffer.push(chunk); + state.length += state.objectMode ? 1 : chunk.length; + } + } + state.ended = true; + + // if we've ended and we have some data left, then emit + // 'readable' now to make sure it gets picked up. + if (state.length > 0) + emitReadable(stream); + else + endReadable(stream); +} + +// Don't emit readable right away in sync mode, because this can trigger +// another read() call => stack overflow. This way, it might trigger +// a nextTick recursion warning, but that's not so bad. +function emitReadable(stream) { + var state = stream._readableState; + state.needReadable = false; + if (state.emittedReadable) + return; + + state.emittedReadable = true; + if (state.sync) + process.nextTick(function() { + emitReadable_(stream); + }); + else + emitReadable_(stream); +} + +function emitReadable_(stream) { + stream.emit('readable'); +} + + +// at this point, the user has presumably seen the 'readable' event, +// and called read() to consume some data. that may have triggered +// in turn another _read(n) call, in which case reading = true if +// it's in progress. +// However, if we're not ended, or reading, and the length < hwm, +// then go ahead and try to read some more preemptively. +function maybeReadMore(stream, state) { + if (!state.readingMore) { + state.readingMore = true; + process.nextTick(function() { + maybeReadMore_(stream, state); + }); + } +} + +function maybeReadMore_(stream, state) { + var len = state.length; + while (!state.reading && !state.flowing && !state.ended && + state.length < state.highWaterMark) { + stream.read(0); + if (len === state.length) + // didn't get any data, stop spinning. + break; + else + len = state.length; + } + state.readingMore = false; +} + +// abstract method. to be overridden in specific implementation classes. +// call cb(er, data) where data is <= n in length. +// for virtual (non-string, non-buffer) streams, "length" is somewhat +// arbitrary, and perhaps not very meaningful. +Readable.prototype._read = function(n) { + this.emit('error', new Error('not implemented')); +}; + +Readable.prototype.pipe = function(dest, pipeOpts) { + var src = this; + var state = this._readableState; + + switch (state.pipesCount) { + case 0: + state.pipes = dest; + break; + case 1: + state.pipes = [state.pipes, dest]; + break; + default: + state.pipes.push(dest); + break; + } + state.pipesCount += 1; + + var doEnd = (!pipeOpts || pipeOpts.end !== false) && + dest !== process.stdout && + dest !== process.stderr; + + var endFn = doEnd ? onend : cleanup; + if (state.endEmitted) + process.nextTick(endFn); + else + src.once('end', endFn); + + dest.on('unpipe', onunpipe); + function onunpipe(readable) { + if (readable !== src) return; + cleanup(); + } + + function onend() { + dest.end(); + } + + // when the dest drains, it reduces the awaitDrain counter + // on the source. This would be more elegant with a .once() + // handler in flow(), but adding and removing repeatedly is + // too slow. + var ondrain = pipeOnDrain(src); + dest.on('drain', ondrain); + + function cleanup() { + // cleanup event handlers once the pipe is broken + dest.removeListener('close', onclose); + dest.removeListener('finish', onfinish); + dest.removeListener('drain', ondrain); + dest.removeListener('error', onerror); + dest.removeListener('unpipe', onunpipe); + src.removeListener('end', onend); + src.removeListener('end', cleanup); + + // if the reader is waiting for a drain event from this + // specific writer, then it would cause it to never start + // flowing again. + // So, if this is awaiting a drain, then we just call it now. + // If we don't know, then assume that we are waiting for one. + if (!dest._writableState || dest._writableState.needDrain) + ondrain(); + } + + // if the dest has an error, then stop piping into it. + // however, don't suppress the throwing behavior for this. + function onerror(er) { + unpipe(); + dest.removeListener('error', onerror); + if (EE.listenerCount(dest, 'error') === 0) + dest.emit('error', er); + } + // This is a brutally ugly hack to make sure that our error handler + // is attached before any userland ones. NEVER DO THIS. + if (!dest._events || !dest._events.error) + dest.on('error', onerror); + else if (isArray(dest._events.error)) + dest._events.error.unshift(onerror); + else + dest._events.error = [onerror, dest._events.error]; + + + + // Both close and finish should trigger unpipe, but only once. + function onclose() { + dest.removeListener('finish', onfinish); + unpipe(); + } + dest.once('close', onclose); + function onfinish() { + dest.removeListener('close', onclose); + unpipe(); + } + dest.once('finish', onfinish); + + function unpipe() { + src.unpipe(dest); + } + + // tell the dest that it's being piped to + dest.emit('pipe', src); + + // start the flow if it hasn't been started already. + if (!state.flowing) { + // the handler that waits for readable events after all + // the data gets sucked out in flow. + // This would be easier to follow with a .once() handler + // in flow(), but that is too slow. + this.on('readable', pipeOnReadable); + + state.flowing = true; + process.nextTick(function() { + flow(src); + }); + } + + return dest; +}; + +function pipeOnDrain(src) { + return function() { + var dest = this; + var state = src._readableState; + state.awaitDrain--; + if (state.awaitDrain === 0) + flow(src); + }; +} + +function flow(src) { + var state = src._readableState; + var chunk; + state.awaitDrain = 0; + + function write(dest, i, list) { + var written = dest.write(chunk); + if (false === written) { + state.awaitDrain++; + } + } + + while (state.pipesCount && null !== (chunk = src.read())) { + + if (state.pipesCount === 1) + write(state.pipes, 0, null); + else + forEach(state.pipes, write); + + src.emit('data', chunk); + + // if anyone needs a drain, then we have to wait for that. + if (state.awaitDrain > 0) + return; + } + + // if every destination was unpiped, either before entering this + // function, or in the while loop, then stop flowing. + // + // NB: This is a pretty rare edge case. + if (state.pipesCount === 0) { + state.flowing = false; + + // if there were data event listeners added, then switch to old mode. + if (EE.listenerCount(src, 'data') > 0) + emitDataEvents(src); + return; + } + + // at this point, no one needed a drain, so we just ran out of data + // on the next readable event, start it over again. + state.ranOut = true; +} + +function pipeOnReadable() { + if (this._readableState.ranOut) { + this._readableState.ranOut = false; + flow(this); + } +} + + +Readable.prototype.unpipe = function(dest) { + var state = this._readableState; + + // if we're not piping anywhere, then do nothing. + if (state.pipesCount === 0) + return this; + + // just one destination. most common case. + if (state.pipesCount === 1) { + // passed in one, but it's not the right one. + if (dest && dest !== state.pipes) + return this; + + if (!dest) + dest = state.pipes; + + // got a match. + state.pipes = null; + state.pipesCount = 0; + this.removeListener('readable', pipeOnReadable); + state.flowing = false; + if (dest) + dest.emit('unpipe', this); + return this; + } + + // slow case. multiple pipe destinations. + + if (!dest) { + // remove all. + var dests = state.pipes; + var len = state.pipesCount; + state.pipes = null; + state.pipesCount = 0; + this.removeListener('readable', pipeOnReadable); + state.flowing = false; + + for (var i = 0; i < len; i++) + dests[i].emit('unpipe', this); + return this; + } + + // try to find the right one. + var i = indexOf(state.pipes, dest); + if (i === -1) + return this; + + state.pipes.splice(i, 1); + state.pipesCount -= 1; + if (state.pipesCount === 1) + state.pipes = state.pipes[0]; + + dest.emit('unpipe', this); + + return this; +}; + +// set up data events if they are asked for +// Ensure readable listeners eventually get something +Readable.prototype.on = function(ev, fn) { + var res = Stream.prototype.on.call(this, ev, fn); + + if (ev === 'data' && !this._readableState.flowing) + emitDataEvents(this); + + if (ev === 'readable' && this.readable) { + var state = this._readableState; + if (!state.readableListening) { + state.readableListening = true; + state.emittedReadable = false; + state.needReadable = true; + if (!state.reading) { + this.read(0); + } else if (state.length) { + emitReadable(this, state); + } + } + } + + return res; +}; +Readable.prototype.addListener = Readable.prototype.on; + +// pause() and resume() are remnants of the legacy readable stream API +// If the user uses them, then switch into old mode. +Readable.prototype.resume = function() { + emitDataEvents(this); + this.read(0); + this.emit('resume'); +}; + +Readable.prototype.pause = function() { + emitDataEvents(this, true); + this.emit('pause'); +}; + +function emitDataEvents(stream, startPaused) { + var state = stream._readableState; + + if (state.flowing) { + // https://github.com/isaacs/readable-stream/issues/16 + throw new Error('Cannot switch to old mode now.'); + } + + var paused = startPaused || false; + var readable = false; + + // convert to an old-style stream. + stream.readable = true; + stream.pipe = Stream.prototype.pipe; + stream.on = stream.addListener = Stream.prototype.on; + + stream.on('readable', function() { + readable = true; + + var c; + while (!paused && (null !== (c = stream.read()))) + stream.emit('data', c); + + if (c === null) { + readable = false; + stream._readableState.needReadable = true; + } + }); + + stream.pause = function() { + paused = true; + this.emit('pause'); + }; + + stream.resume = function() { + paused = false; + if (readable) + process.nextTick(function() { + stream.emit('readable'); + }); + else + this.read(0); + this.emit('resume'); + }; + + // now make it start, just in case it hadn't already. + stream.emit('readable'); +} + +// wrap an old-style stream as the async data source. +// This is *not* part of the readable stream interface. +// It is an ugly unfortunate mess of history. +Readable.prototype.wrap = function(stream) { + var state = this._readableState; + var paused = false; + + var self = this; + stream.on('end', function() { + if (state.decoder && !state.ended) { + var chunk = state.decoder.end(); + if (chunk && chunk.length) + self.push(chunk); + } + + self.push(null); + }); + + stream.on('data', function(chunk) { + if (state.decoder) + chunk = state.decoder.write(chunk); + + // don't skip over falsy values in objectMode + //if (state.objectMode && util.isNullOrUndefined(chunk)) + if (state.objectMode && (chunk === null || chunk === undefined)) + return; + else if (!state.objectMode && (!chunk || !chunk.length)) + return; + + var ret = self.push(chunk); + if (!ret) { + paused = true; + stream.pause(); + } + }); + + // proxy all the other methods. + // important when wrapping filters and duplexes. + for (var i in stream) { + if (typeof stream[i] === 'function' && + typeof this[i] === 'undefined') { + this[i] = function(method) { return function() { + return stream[method].apply(stream, arguments); + }}(i); + } + } + + // proxy certain important events. + var events = ['error', 'close', 'destroy', 'pause', 'resume']; + forEach(events, function(ev) { + stream.on(ev, self.emit.bind(self, ev)); + }); + + // when we try to consume some more bytes, simply unpause the + // underlying stream. + self._read = function(n) { + if (paused) { + paused = false; + stream.resume(); + } + }; + + return self; +}; + + + +// exposed for testing purposes only. +Readable._fromList = fromList; + +// Pluck off n bytes from an array of buffers. +// Length is the combined lengths of all the buffers in the list. +function fromList(n, state) { + var list = state.buffer; + var length = state.length; + var stringMode = !!state.decoder; + var objectMode = !!state.objectMode; + var ret; + + // nothing in the list, definitely empty. + if (list.length === 0) + return null; + + if (length === 0) + ret = null; + else if (objectMode) + ret = list.shift(); + else if (!n || n >= length) { + // read it all, truncate the array. + if (stringMode) + ret = list.join(''); + else + ret = Buffer.concat(list, length); + list.length = 0; + } else { + // read just some of it. + if (n < list[0].length) { + // just take a part of the first list item. + // slice is the same for buffers and strings. + var buf = list[0]; + ret = buf.slice(0, n); + list[0] = buf.slice(n); + } else if (n === list[0].length) { + // first list is a perfect match + ret = list.shift(); + } else { + // complex case. + // we have enough to cover it, but it spans past the first buffer. + if (stringMode) + ret = ''; + else + ret = new Buffer(n); + + var c = 0; + for (var i = 0, l = list.length; i < l && c < n; i++) { + var buf = list[0]; + var cpy = Math.min(n - c, buf.length); + + if (stringMode) + ret += buf.slice(0, cpy); + else + buf.copy(ret, c, 0, cpy); + + if (cpy < buf.length) + list[0] = buf.slice(cpy); + else + list.shift(); + + c += cpy; + } + } + } + + return ret; +} + +function endReadable(stream) { + var state = stream._readableState; + + // If we get here before consuming all the bytes, then that is a + // bug in node. Should never happen. + if (state.length > 0) + throw new Error('endReadable called on non-empty stream'); + + if (!state.endEmitted && state.calledRead) { + state.ended = true; + process.nextTick(function() { + // Check that we didn't get one last unshift. + if (!state.endEmitted && state.length === 0) { + state.endEmitted = true; + stream.readable = false; + stream.emit('end'); + } + }); + } +} + +function forEach (xs, f) { + for (var i = 0, l = xs.length; i < l; i++) { + f(xs[i], i); + } +} + +function indexOf (xs, x) { + for (var i = 0, l = xs.length; i < l; i++) { + if (xs[i] === x) return i; + } + return -1; +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/lib/_stream_transform.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/lib/_stream_transform.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/lib/_stream_transform.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/lib/_stream_transform.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,210 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + + +// a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + +module.exports = Transform; + +var Duplex = require('./_stream_duplex'); + +/**/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/**/ + +util.inherits(Transform, Duplex); + + +function TransformState(options, stream) { + this.afterTransform = function(er, data) { + return afterTransform(stream, er, data); + }; + + this.needTransform = false; + this.transforming = false; + this.writecb = null; + this.writechunk = null; +} + +function afterTransform(stream, er, data) { + var ts = stream._transformState; + ts.transforming = false; + + var cb = ts.writecb; + + if (!cb) + return stream.emit('error', new Error('no writecb in Transform class')); + + ts.writechunk = null; + ts.writecb = null; + + if (data !== null && data !== undefined) + stream.push(data); + + if (cb) + cb(er); + + var rs = stream._readableState; + rs.reading = false; + if (rs.needReadable || rs.length < rs.highWaterMark) { + stream._read(rs.highWaterMark); + } +} + + +function Transform(options) { + if (!(this instanceof Transform)) + return new Transform(options); + + Duplex.call(this, options); + + var ts = this._transformState = new TransformState(options, this); + + // when the writable side finishes, then flush out anything remaining. + var stream = this; + + // start out asking for a readable event once data is transformed. + this._readableState.needReadable = true; + + // we have implemented the _read method, and done the other things + // that Readable wants before the first _read call, so unset the + // sync guard flag. + this._readableState.sync = false; + + this.once('finish', function() { + if ('function' === typeof this._flush) + this._flush(function(er) { + done(stream, er); + }); + else + done(stream); + }); +} + +Transform.prototype.push = function(chunk, encoding) { + this._transformState.needTransform = false; + return Duplex.prototype.push.call(this, chunk, encoding); +}; + +// This is the part where you do stuff! +// override this function in implementation classes. +// 'chunk' is an input chunk. +// +// Call `push(newChunk)` to pass along transformed output +// to the readable side. You may call 'push' zero or more times. +// +// Call `cb(err)` when you are done with this chunk. If you pass +// an error, then that'll put the hurt on the whole operation. If you +// never call cb(), then you'll never get another chunk. +Transform.prototype._transform = function(chunk, encoding, cb) { + throw new Error('not implemented'); +}; + +Transform.prototype._write = function(chunk, encoding, cb) { + var ts = this._transformState; + ts.writecb = cb; + ts.writechunk = chunk; + ts.writeencoding = encoding; + if (!ts.transforming) { + var rs = this._readableState; + if (ts.needTransform || + rs.needReadable || + rs.length < rs.highWaterMark) + this._read(rs.highWaterMark); + } +}; + +// Doesn't matter what the args are here. +// _transform does all the work. +// That we got here means that the readable side wants more data. +Transform.prototype._read = function(n) { + var ts = this._transformState; + + if (ts.writechunk !== null && ts.writecb && !ts.transforming) { + ts.transforming = true; + this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform); + } else { + // mark that we need a transform, so that any data that comes in + // will get processed, now that we've asked for it. + ts.needTransform = true; + } +}; + + +function done(stream, er) { + if (er) + return stream.emit('error', er); + + // if there's nothing in the write buffer, then that means + // that nothing more will ever be provided + var ws = stream._writableState; + var rs = stream._readableState; + var ts = stream._transformState; + + if (ws.length) + throw new Error('calling transform done when ws.length != 0'); + + if (ts.transforming) + throw new Error('calling transform done when still transforming'); + + return stream.push(null); +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/lib/_stream_writable.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/lib/_stream_writable.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/lib/_stream_writable.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/lib/_stream_writable.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,386 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// A bit simpler than readable streams. +// Implement an async ._write(chunk, cb), and it'll handle all +// the drain event emission and buffering. + +module.exports = Writable; + +/**/ +var Buffer = require('buffer').Buffer; +/**/ + +Writable.WritableState = WritableState; + + +/**/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/**/ + +var Stream = require('stream'); + +util.inherits(Writable, Stream); + +function WriteReq(chunk, encoding, cb) { + this.chunk = chunk; + this.encoding = encoding; + this.callback = cb; +} + +function WritableState(options, stream) { + options = options || {}; + + // the point at which write() starts returning false + // Note: 0 is a valid value, means that we always return false if + // the entire buffer is not flushed immediately on write() + var hwm = options.highWaterMark; + this.highWaterMark = (hwm || hwm === 0) ? hwm : 16 * 1024; + + // object stream flag to indicate whether or not this stream + // contains buffers or objects. + this.objectMode = !!options.objectMode; + + // cast to ints. + this.highWaterMark = ~~this.highWaterMark; + + this.needDrain = false; + // at the start of calling end() + this.ending = false; + // when end() has been called, and returned + this.ended = false; + // when 'finish' is emitted + this.finished = false; + + // should we decode strings into buffers before passing to _write? + // this is here so that some node-core streams can optimize string + // handling at a lower level. + var noDecode = options.decodeStrings === false; + this.decodeStrings = !noDecode; + + // Crypto is kind of old and crusty. Historically, its default string + // encoding is 'binary' so we have to make this configurable. + // Everything else in the universe uses 'utf8', though. + this.defaultEncoding = options.defaultEncoding || 'utf8'; + + // not an actual buffer we keep track of, but a measurement + // of how much we're waiting to get pushed to some underlying + // socket or file. + this.length = 0; + + // a flag to see when we're in the middle of a write. + this.writing = false; + + // a flag to be able to tell if the onwrite cb is called immediately, + // or on a later tick. We set this to true at first, becuase any + // actions that shouldn't happen until "later" should generally also + // not happen before the first write call. + this.sync = true; + + // a flag to know if we're processing previously buffered items, which + // may call the _write() callback in the same tick, so that we don't + // end up in an overlapped onwrite situation. + this.bufferProcessing = false; + + // the callback that's passed to _write(chunk,cb) + this.onwrite = function(er) { + onwrite(stream, er); + }; + + // the callback that the user supplies to write(chunk,encoding,cb) + this.writecb = null; + + // the amount that is being written when _write is called. + this.writelen = 0; + + this.buffer = []; + + // True if the error was already emitted and should not be thrown again + this.errorEmitted = false; +} + +function Writable(options) { + var Duplex = require('./_stream_duplex'); + + // Writable ctor is applied to Duplexes, though they're not + // instanceof Writable, they're instanceof Readable. + if (!(this instanceof Writable) && !(this instanceof Duplex)) + return new Writable(options); + + this._writableState = new WritableState(options, this); + + // legacy. + this.writable = true; + + Stream.call(this); +} + +// Otherwise people can pipe Writable streams, which is just wrong. +Writable.prototype.pipe = function() { + this.emit('error', new Error('Cannot pipe. Not readable.')); +}; + + +function writeAfterEnd(stream, state, cb) { + var er = new Error('write after end'); + // TODO: defer error events consistently everywhere, not just the cb + stream.emit('error', er); + process.nextTick(function() { + cb(er); + }); +} + +// If we get something that is not a buffer, string, null, or undefined, +// and we're not in objectMode, then that's an error. +// Otherwise stream chunks are all considered to be of length=1, and the +// watermarks determine how many objects to keep in the buffer, rather than +// how many bytes or characters. +function validChunk(stream, state, chunk, cb) { + var valid = true; + if (!Buffer.isBuffer(chunk) && + 'string' !== typeof chunk && + chunk !== null && + chunk !== undefined && + !state.objectMode) { + var er = new TypeError('Invalid non-string/buffer chunk'); + stream.emit('error', er); + process.nextTick(function() { + cb(er); + }); + valid = false; + } + return valid; +} + +Writable.prototype.write = function(chunk, encoding, cb) { + var state = this._writableState; + var ret = false; + + if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + + if (Buffer.isBuffer(chunk)) + encoding = 'buffer'; + else if (!encoding) + encoding = state.defaultEncoding; + + if (typeof cb !== 'function') + cb = function() {}; + + if (state.ended) + writeAfterEnd(this, state, cb); + else if (validChunk(this, state, chunk, cb)) + ret = writeOrBuffer(this, state, chunk, encoding, cb); + + return ret; +}; + +function decodeChunk(state, chunk, encoding) { + if (!state.objectMode && + state.decodeStrings !== false && + typeof chunk === 'string') { + chunk = new Buffer(chunk, encoding); + } + return chunk; +} + +// if we're already writing something, then just put this +// in the queue, and wait our turn. Otherwise, call _write +// If we return false, then we need a drain event, so set that flag. +function writeOrBuffer(stream, state, chunk, encoding, cb) { + chunk = decodeChunk(state, chunk, encoding); + if (Buffer.isBuffer(chunk)) + encoding = 'buffer'; + var len = state.objectMode ? 1 : chunk.length; + + state.length += len; + + var ret = state.length < state.highWaterMark; + // we must ensure that previous needDrain will not be reset to false. + if (!ret) + state.needDrain = true; + + if (state.writing) + state.buffer.push(new WriteReq(chunk, encoding, cb)); + else + doWrite(stream, state, len, chunk, encoding, cb); + + return ret; +} + +function doWrite(stream, state, len, chunk, encoding, cb) { + state.writelen = len; + state.writecb = cb; + state.writing = true; + state.sync = true; + stream._write(chunk, encoding, state.onwrite); + state.sync = false; +} + +function onwriteError(stream, state, sync, er, cb) { + if (sync) + process.nextTick(function() { + cb(er); + }); + else + cb(er); + + stream._writableState.errorEmitted = true; + stream.emit('error', er); +} + +function onwriteStateUpdate(state) { + state.writing = false; + state.writecb = null; + state.length -= state.writelen; + state.writelen = 0; +} + +function onwrite(stream, er) { + var state = stream._writableState; + var sync = state.sync; + var cb = state.writecb; + + onwriteStateUpdate(state); + + if (er) + onwriteError(stream, state, sync, er, cb); + else { + // Check if we're actually ready to finish, but don't emit yet + var finished = needFinish(stream, state); + + if (!finished && !state.bufferProcessing && state.buffer.length) + clearBuffer(stream, state); + + if (sync) { + process.nextTick(function() { + afterWrite(stream, state, finished, cb); + }); + } else { + afterWrite(stream, state, finished, cb); + } + } +} + +function afterWrite(stream, state, finished, cb) { + if (!finished) + onwriteDrain(stream, state); + cb(); + if (finished) + finishMaybe(stream, state); +} + +// Must force callback to be called on nextTick, so that we don't +// emit 'drain' before the write() consumer gets the 'false' return +// value, and has a chance to attach a 'drain' listener. +function onwriteDrain(stream, state) { + if (state.length === 0 && state.needDrain) { + state.needDrain = false; + stream.emit('drain'); + } +} + + +// if there's something in the buffer waiting, then process it +function clearBuffer(stream, state) { + state.bufferProcessing = true; + + for (var c = 0; c < state.buffer.length; c++) { + var entry = state.buffer[c]; + var chunk = entry.chunk; + var encoding = entry.encoding; + var cb = entry.callback; + var len = state.objectMode ? 1 : chunk.length; + + doWrite(stream, state, len, chunk, encoding, cb); + + // if we didn't call the onwrite immediately, then + // it means that we need to wait until it does. + // also, that means that the chunk and cb are currently + // being processed, so move the buffer counter past them. + if (state.writing) { + c++; + break; + } + } + + state.bufferProcessing = false; + if (c < state.buffer.length) + state.buffer = state.buffer.slice(c); + else + state.buffer.length = 0; +} + +Writable.prototype._write = function(chunk, encoding, cb) { + cb(new Error('not implemented')); +}; + +Writable.prototype.end = function(chunk, encoding, cb) { + var state = this._writableState; + + if (typeof chunk === 'function') { + cb = chunk; + chunk = null; + encoding = null; + } else if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + + if (typeof chunk !== 'undefined' && chunk !== null) + this.write(chunk, encoding); + + // ignore unnecessary end() calls. + if (!state.ending && !state.finished) + endWritable(this, state, cb); +}; + + +function needFinish(stream, state) { + return (state.ending && + state.length === 0 && + !state.finished && + !state.writing); +} + +function finishMaybe(stream, state) { + var need = needFinish(stream, state); + if (need) { + state.finished = true; + stream.emit('finish'); + } + return need; +} + +function endWritable(stream, state, cb) { + state.ending = true; + finishMaybe(stream, state); + if (cb) { + if (state.finished) + process.nextTick(cb); + else + stream.once('finish', cb); + } + state.ended = true; +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/LICENSE nodejs-0.11.15/deps/npm/node_modules/readable-stream/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ +Copyright (c) Isaac Z. Schlueter ("Author") +All rights reserved. + +The BSD License + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/core-util-is/float.patch nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/core-util-is/float.patch --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/core-util-is/float.patch 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/core-util-is/float.patch 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,604 @@ +diff --git a/lib/util.js b/lib/util.js +index a03e874..9074e8e 100644 +--- a/lib/util.js ++++ b/lib/util.js +@@ -19,430 +19,6 @@ + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + +-var formatRegExp = /%[sdj%]/g; +-exports.format = function(f) { +- if (!isString(f)) { +- var objects = []; +- for (var i = 0; i < arguments.length; i++) { +- objects.push(inspect(arguments[i])); +- } +- return objects.join(' '); +- } +- +- var i = 1; +- var args = arguments; +- var len = args.length; +- var str = String(f).replace(formatRegExp, function(x) { +- if (x === '%%') return '%'; +- if (i >= len) return x; +- switch (x) { +- case '%s': return String(args[i++]); +- case '%d': return Number(args[i++]); +- case '%j': +- try { +- return JSON.stringify(args[i++]); +- } catch (_) { +- return '[Circular]'; +- } +- default: +- return x; +- } +- }); +- for (var x = args[i]; i < len; x = args[++i]) { +- if (isNull(x) || !isObject(x)) { +- str += ' ' + x; +- } else { +- str += ' ' + inspect(x); +- } +- } +- return str; +-}; +- +- +-// Mark that a method should not be used. +-// Returns a modified function which warns once by default. +-// If --no-deprecation is set, then it is a no-op. +-exports.deprecate = function(fn, msg) { +- // Allow for deprecating things in the process of starting up. +- if (isUndefined(global.process)) { +- return function() { +- return exports.deprecate(fn, msg).apply(this, arguments); +- }; +- } +- +- if (process.noDeprecation === true) { +- return fn; +- } +- +- var warned = false; +- function deprecated() { +- if (!warned) { +- if (process.throwDeprecation) { +- throw new Error(msg); +- } else if (process.traceDeprecation) { +- console.trace(msg); +- } else { +- console.error(msg); +- } +- warned = true; +- } +- return fn.apply(this, arguments); +- } +- +- return deprecated; +-}; +- +- +-var debugs = {}; +-var debugEnviron; +-exports.debuglog = function(set) { +- if (isUndefined(debugEnviron)) +- debugEnviron = process.env.NODE_DEBUG || ''; +- set = set.toUpperCase(); +- if (!debugs[set]) { +- if (new RegExp('\\b' + set + '\\b', 'i').test(debugEnviron)) { +- var pid = process.pid; +- debugs[set] = function() { +- var msg = exports.format.apply(exports, arguments); +- console.error('%s %d: %s', set, pid, msg); +- }; +- } else { +- debugs[set] = function() {}; +- } +- } +- return debugs[set]; +-}; +- +- +-/** +- * Echos the value of a value. Trys to print the value out +- * in the best way possible given the different types. +- * +- * @param {Object} obj The object to print out. +- * @param {Object} opts Optional options object that alters the output. +- */ +-/* legacy: obj, showHidden, depth, colors*/ +-function inspect(obj, opts) { +- // default options +- var ctx = { +- seen: [], +- stylize: stylizeNoColor +- }; +- // legacy... +- if (arguments.length >= 3) ctx.depth = arguments[2]; +- if (arguments.length >= 4) ctx.colors = arguments[3]; +- if (isBoolean(opts)) { +- // legacy... +- ctx.showHidden = opts; +- } else if (opts) { +- // got an "options" object +- exports._extend(ctx, opts); +- } +- // set default options +- if (isUndefined(ctx.showHidden)) ctx.showHidden = false; +- if (isUndefined(ctx.depth)) ctx.depth = 2; +- if (isUndefined(ctx.colors)) ctx.colors = false; +- if (isUndefined(ctx.customInspect)) ctx.customInspect = true; +- if (ctx.colors) ctx.stylize = stylizeWithColor; +- return formatValue(ctx, obj, ctx.depth); +-} +-exports.inspect = inspect; +- +- +-// http://en.wikipedia.org/wiki/ANSI_escape_code#graphics +-inspect.colors = { +- 'bold' : [1, 22], +- 'italic' : [3, 23], +- 'underline' : [4, 24], +- 'inverse' : [7, 27], +- 'white' : [37, 39], +- 'grey' : [90, 39], +- 'black' : [30, 39], +- 'blue' : [34, 39], +- 'cyan' : [36, 39], +- 'green' : [32, 39], +- 'magenta' : [35, 39], +- 'red' : [31, 39], +- 'yellow' : [33, 39] +-}; +- +-// Don't use 'blue' not visible on cmd.exe +-inspect.styles = { +- 'special': 'cyan', +- 'number': 'yellow', +- 'boolean': 'yellow', +- 'undefined': 'grey', +- 'null': 'bold', +- 'string': 'green', +- 'date': 'magenta', +- // "name": intentionally not styling +- 'regexp': 'red' +-}; +- +- +-function stylizeWithColor(str, styleType) { +- var style = inspect.styles[styleType]; +- +- if (style) { +- return '\u001b[' + inspect.colors[style][0] + 'm' + str + +- '\u001b[' + inspect.colors[style][1] + 'm'; +- } else { +- return str; +- } +-} +- +- +-function stylizeNoColor(str, styleType) { +- return str; +-} +- +- +-function arrayToHash(array) { +- var hash = {}; +- +- array.forEach(function(val, idx) { +- hash[val] = true; +- }); +- +- return hash; +-} +- +- +-function formatValue(ctx, value, recurseTimes) { +- // Provide a hook for user-specified inspect functions. +- // Check that value is an object with an inspect function on it +- if (ctx.customInspect && +- value && +- isFunction(value.inspect) && +- // Filter out the util module, it's inspect function is special +- value.inspect !== exports.inspect && +- // Also filter out any prototype objects using the circular check. +- !(value.constructor && value.constructor.prototype === value)) { +- var ret = value.inspect(recurseTimes, ctx); +- if (!isString(ret)) { +- ret = formatValue(ctx, ret, recurseTimes); +- } +- return ret; +- } +- +- // Primitive types cannot have properties +- var primitive = formatPrimitive(ctx, value); +- if (primitive) { +- return primitive; +- } +- +- // Look up the keys of the object. +- var keys = Object.keys(value); +- var visibleKeys = arrayToHash(keys); +- +- if (ctx.showHidden) { +- keys = Object.getOwnPropertyNames(value); +- } +- +- // Some type of object without properties can be shortcutted. +- if (keys.length === 0) { +- if (isFunction(value)) { +- var name = value.name ? ': ' + value.name : ''; +- return ctx.stylize('[Function' + name + ']', 'special'); +- } +- if (isRegExp(value)) { +- return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); +- } +- if (isDate(value)) { +- return ctx.stylize(Date.prototype.toString.call(value), 'date'); +- } +- if (isError(value)) { +- return formatError(value); +- } +- } +- +- var base = '', array = false, braces = ['{', '}']; +- +- // Make Array say that they are Array +- if (isArray(value)) { +- array = true; +- braces = ['[', ']']; +- } +- +- // Make functions say that they are functions +- if (isFunction(value)) { +- var n = value.name ? ': ' + value.name : ''; +- base = ' [Function' + n + ']'; +- } +- +- // Make RegExps say that they are RegExps +- if (isRegExp(value)) { +- base = ' ' + RegExp.prototype.toString.call(value); +- } +- +- // Make dates with properties first say the date +- if (isDate(value)) { +- base = ' ' + Date.prototype.toUTCString.call(value); +- } +- +- // Make error with message first say the error +- if (isError(value)) { +- base = ' ' + formatError(value); +- } +- +- if (keys.length === 0 && (!array || value.length == 0)) { +- return braces[0] + base + braces[1]; +- } +- +- if (recurseTimes < 0) { +- if (isRegExp(value)) { +- return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); +- } else { +- return ctx.stylize('[Object]', 'special'); +- } +- } +- +- ctx.seen.push(value); +- +- var output; +- if (array) { +- output = formatArray(ctx, value, recurseTimes, visibleKeys, keys); +- } else { +- output = keys.map(function(key) { +- return formatProperty(ctx, value, recurseTimes, visibleKeys, key, array); +- }); +- } +- +- ctx.seen.pop(); +- +- return reduceToSingleString(output, base, braces); +-} +- +- +-function formatPrimitive(ctx, value) { +- if (isUndefined(value)) +- return ctx.stylize('undefined', 'undefined'); +- if (isString(value)) { +- var simple = '\'' + JSON.stringify(value).replace(/^"|"$/g, '') +- .replace(/'/g, "\\'") +- .replace(/\\"/g, '"') + '\''; +- return ctx.stylize(simple, 'string'); +- } +- if (isNumber(value)) { +- // Format -0 as '-0'. Strict equality won't distinguish 0 from -0, +- // so instead we use the fact that 1 / -0 < 0 whereas 1 / 0 > 0 . +- if (value === 0 && 1 / value < 0) +- return ctx.stylize('-0', 'number'); +- return ctx.stylize('' + value, 'number'); +- } +- if (isBoolean(value)) +- return ctx.stylize('' + value, 'boolean'); +- // For some reason typeof null is "object", so special case here. +- if (isNull(value)) +- return ctx.stylize('null', 'null'); +-} +- +- +-function formatError(value) { +- return '[' + Error.prototype.toString.call(value) + ']'; +-} +- +- +-function formatArray(ctx, value, recurseTimes, visibleKeys, keys) { +- var output = []; +- for (var i = 0, l = value.length; i < l; ++i) { +- if (hasOwnProperty(value, String(i))) { +- output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, +- String(i), true)); +- } else { +- output.push(''); +- } +- } +- keys.forEach(function(key) { +- if (!key.match(/^\d+$/)) { +- output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, +- key, true)); +- } +- }); +- return output; +-} +- +- +-function formatProperty(ctx, value, recurseTimes, visibleKeys, key, array) { +- var name, str, desc; +- desc = Object.getOwnPropertyDescriptor(value, key) || { value: value[key] }; +- if (desc.get) { +- if (desc.set) { +- str = ctx.stylize('[Getter/Setter]', 'special'); +- } else { +- str = ctx.stylize('[Getter]', 'special'); +- } +- } else { +- if (desc.set) { +- str = ctx.stylize('[Setter]', 'special'); +- } +- } +- if (!hasOwnProperty(visibleKeys, key)) { +- name = '[' + key + ']'; +- } +- if (!str) { +- if (ctx.seen.indexOf(desc.value) < 0) { +- if (isNull(recurseTimes)) { +- str = formatValue(ctx, desc.value, null); +- } else { +- str = formatValue(ctx, desc.value, recurseTimes - 1); +- } +- if (str.indexOf('\n') > -1) { +- if (array) { +- str = str.split('\n').map(function(line) { +- return ' ' + line; +- }).join('\n').substr(2); +- } else { +- str = '\n' + str.split('\n').map(function(line) { +- return ' ' + line; +- }).join('\n'); +- } +- } +- } else { +- str = ctx.stylize('[Circular]', 'special'); +- } +- } +- if (isUndefined(name)) { +- if (array && key.match(/^\d+$/)) { +- return str; +- } +- name = JSON.stringify('' + key); +- if (name.match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)) { +- name = name.substr(1, name.length - 2); +- name = ctx.stylize(name, 'name'); +- } else { +- name = name.replace(/'/g, "\\'") +- .replace(/\\"/g, '"') +- .replace(/(^"|"$)/g, "'"); +- name = ctx.stylize(name, 'string'); +- } +- } +- +- return name + ': ' + str; +-} +- +- +-function reduceToSingleString(output, base, braces) { +- var numLinesEst = 0; +- var length = output.reduce(function(prev, cur) { +- numLinesEst++; +- if (cur.indexOf('\n') >= 0) numLinesEst++; +- return prev + cur.replace(/\u001b\[\d\d?m/g, '').length + 1; +- }, 0); +- +- if (length > 60) { +- return braces[0] + +- (base === '' ? '' : base + '\n ') + +- ' ' + +- output.join(',\n ') + +- ' ' + +- braces[1]; +- } +- +- return braces[0] + base + ' ' + output.join(', ') + ' ' + braces[1]; +-} +- +- + // NOTE: These type checking functions intentionally don't use `instanceof` + // because it is fragile and can be easily faked with `Object.create()`. + function isArray(ar) { +@@ -522,166 +98,10 @@ function isPrimitive(arg) { + exports.isPrimitive = isPrimitive; + + function isBuffer(arg) { +- return arg instanceof Buffer; ++ return Buffer.isBuffer(arg); + } + exports.isBuffer = isBuffer; + + function objectToString(o) { + return Object.prototype.toString.call(o); +-} +- +- +-function pad(n) { +- return n < 10 ? '0' + n.toString(10) : n.toString(10); +-} +- +- +-var months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', +- 'Oct', 'Nov', 'Dec']; +- +-// 26 Feb 16:19:34 +-function timestamp() { +- var d = new Date(); +- var time = [pad(d.getHours()), +- pad(d.getMinutes()), +- pad(d.getSeconds())].join(':'); +- return [d.getDate(), months[d.getMonth()], time].join(' '); +-} +- +- +-// log is just a thin wrapper to console.log that prepends a timestamp +-exports.log = function() { +- console.log('%s - %s', timestamp(), exports.format.apply(exports, arguments)); +-}; +- +- +-/** +- * Inherit the prototype methods from one constructor into another. +- * +- * The Function.prototype.inherits from lang.js rewritten as a standalone +- * function (not on Function.prototype). NOTE: If this file is to be loaded +- * during bootstrapping this function needs to be rewritten using some native +- * functions as prototype setup using normal JavaScript does not work as +- * expected during bootstrapping (see mirror.js in r114903). +- * +- * @param {function} ctor Constructor function which needs to inherit the +- * prototype. +- * @param {function} superCtor Constructor function to inherit prototype from. +- */ +-exports.inherits = function(ctor, superCtor) { +- ctor.super_ = superCtor; +- ctor.prototype = Object.create(superCtor.prototype, { +- constructor: { +- value: ctor, +- enumerable: false, +- writable: true, +- configurable: true +- } +- }); +-}; +- +-exports._extend = function(origin, add) { +- // Don't do anything if add isn't an object +- if (!add || !isObject(add)) return origin; +- +- var keys = Object.keys(add); +- var i = keys.length; +- while (i--) { +- origin[keys[i]] = add[keys[i]]; +- } +- return origin; +-}; +- +-function hasOwnProperty(obj, prop) { +- return Object.prototype.hasOwnProperty.call(obj, prop); +-} +- +- +-// Deprecated old stuff. +- +-exports.p = exports.deprecate(function() { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- console.error(exports.inspect(arguments[i])); +- } +-}, 'util.p: Use console.error() instead'); +- +- +-exports.exec = exports.deprecate(function() { +- return require('child_process').exec.apply(this, arguments); +-}, 'util.exec is now called `child_process.exec`.'); +- +- +-exports.print = exports.deprecate(function() { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- process.stdout.write(String(arguments[i])); +- } +-}, 'util.print: Use console.log instead'); +- +- +-exports.puts = exports.deprecate(function() { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- process.stdout.write(arguments[i] + '\n'); +- } +-}, 'util.puts: Use console.log instead'); +- +- +-exports.debug = exports.deprecate(function(x) { +- process.stderr.write('DEBUG: ' + x + '\n'); +-}, 'util.debug: Use console.error instead'); +- +- +-exports.error = exports.deprecate(function(x) { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- process.stderr.write(arguments[i] + '\n'); +- } +-}, 'util.error: Use console.error instead'); +- +- +-exports.pump = exports.deprecate(function(readStream, writeStream, callback) { +- var callbackCalled = false; +- +- function call(a, b, c) { +- if (callback && !callbackCalled) { +- callback(a, b, c); +- callbackCalled = true; +- } +- } +- +- readStream.addListener('data', function(chunk) { +- if (writeStream.write(chunk) === false) readStream.pause(); +- }); +- +- writeStream.addListener('drain', function() { +- readStream.resume(); +- }); +- +- readStream.addListener('end', function() { +- writeStream.end(); +- }); +- +- readStream.addListener('close', function() { +- call(); +- }); +- +- readStream.addListener('error', function(err) { +- writeStream.end(); +- call(err); +- }); +- +- writeStream.addListener('error', function(err) { +- readStream.destroy(); +- call(err); +- }); +-}, 'util.pump(): Use readableStream.pipe() instead'); +- +- +-var uv; +-exports._errnoException = function(err, syscall) { +- if (isUndefined(uv)) uv = process.binding('uv'); +- var errname = uv.errname(err); +- var e = new Error(syscall + ' ' + errname); +- e.code = errname; +- e.errno = errname; +- e.syscall = syscall; +- return e; +-}; ++} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/core-util-is/lib/util.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/core-util-is/lib/util.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/core-util-is/lib/util.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/core-util-is/lib/util.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,107 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// NOTE: These type checking functions intentionally don't use `instanceof` +// because it is fragile and can be easily faked with `Object.create()`. +function isArray(ar) { + return Array.isArray(ar); +} +exports.isArray = isArray; + +function isBoolean(arg) { + return typeof arg === 'boolean'; +} +exports.isBoolean = isBoolean; + +function isNull(arg) { + return arg === null; +} +exports.isNull = isNull; + +function isNullOrUndefined(arg) { + return arg == null; +} +exports.isNullOrUndefined = isNullOrUndefined; + +function isNumber(arg) { + return typeof arg === 'number'; +} +exports.isNumber = isNumber; + +function isString(arg) { + return typeof arg === 'string'; +} +exports.isString = isString; + +function isSymbol(arg) { + return typeof arg === 'symbol'; +} +exports.isSymbol = isSymbol; + +function isUndefined(arg) { + return arg === void 0; +} +exports.isUndefined = isUndefined; + +function isRegExp(re) { + return isObject(re) && objectToString(re) === '[object RegExp]'; +} +exports.isRegExp = isRegExp; + +function isObject(arg) { + return typeof arg === 'object' && arg !== null; +} +exports.isObject = isObject; + +function isDate(d) { + return isObject(d) && objectToString(d) === '[object Date]'; +} +exports.isDate = isDate; + +function isError(e) { + return isObject(e) && + (objectToString(e) === '[object Error]' || e instanceof Error); +} +exports.isError = isError; + +function isFunction(arg) { + return typeof arg === 'function'; +} +exports.isFunction = isFunction; + +function isPrimitive(arg) { + return arg === null || + typeof arg === 'boolean' || + typeof arg === 'number' || + typeof arg === 'string' || + typeof arg === 'symbol' || // ES6 symbol + typeof arg === 'undefined'; +} +exports.isPrimitive = isPrimitive; + +function isBuffer(arg) { + return Buffer.isBuffer(arg); +} +exports.isBuffer = isBuffer; + +function objectToString(o) { + return Object.prototype.toString.call(o); +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/core-util-is/package.json nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/core-util-is/package.json --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/core-util-is/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/core-util-is/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +{ + "name": "core-util-is", + "version": "1.0.1", + "description": "The `util.is*` functions introduced in Node v0.12.", + "main": "lib/util.js", + "repository": { + "type": "git", + "url": "git://github.com/isaacs/core-util-is" + }, + "keywords": [ + "util", + "isBuffer", + "isArray", + "isNumber", + "isString", + "isRegExp", + "isThis", + "isThat", + "polyfill" + ], + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "license": "MIT", + "bugs": { + "url": "https://github.com/isaacs/core-util-is/issues" + }, + "readme": "# core-util-is\n\nThe `util.is*` functions introduced in Node v0.12.\n", + "readmeFilename": "README.md", + "homepage": "https://github.com/isaacs/core-util-is", + "_id": "core-util-is@1.0.1", + "dist": { + "shasum": "6b07085aef9a3ccac6ee53bf9d3df0c1521a5538", + "tarball": "http://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz" + }, + "_from": "core-util-is@>=1.0.0 <1.1.0", + "_npmVersion": "1.3.23", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "directories": {}, + "_shasum": "6b07085aef9a3ccac6ee53bf9d3df0c1521a5538", + "_resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz", + "scripts": {} +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/core-util-is/README.md nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/core-util-is/README.md --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/core-util-is/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/core-util-is/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +# core-util-is + +The `util.is*` functions introduced in Node v0.12. diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/core-util-is/util.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/core-util-is/util.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/core-util-is/util.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/core-util-is/util.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,106 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// NOTE: These type checking functions intentionally don't use `instanceof` +// because it is fragile and can be easily faked with `Object.create()`. +function isArray(ar) { + return Array.isArray(ar); +} +exports.isArray = isArray; + +function isBoolean(arg) { + return typeof arg === 'boolean'; +} +exports.isBoolean = isBoolean; + +function isNull(arg) { + return arg === null; +} +exports.isNull = isNull; + +function isNullOrUndefined(arg) { + return arg == null; +} +exports.isNullOrUndefined = isNullOrUndefined; + +function isNumber(arg) { + return typeof arg === 'number'; +} +exports.isNumber = isNumber; + +function isString(arg) { + return typeof arg === 'string'; +} +exports.isString = isString; + +function isSymbol(arg) { + return typeof arg === 'symbol'; +} +exports.isSymbol = isSymbol; + +function isUndefined(arg) { + return arg === void 0; +} +exports.isUndefined = isUndefined; + +function isRegExp(re) { + return isObject(re) && objectToString(re) === '[object RegExp]'; +} +exports.isRegExp = isRegExp; + +function isObject(arg) { + return typeof arg === 'object' && arg !== null; +} +exports.isObject = isObject; + +function isDate(d) { + return isObject(d) && objectToString(d) === '[object Date]'; +} +exports.isDate = isDate; + +function isError(e) { + return isObject(e) && objectToString(e) === '[object Error]'; +} +exports.isError = isError; + +function isFunction(arg) { + return typeof arg === 'function'; +} +exports.isFunction = isFunction; + +function isPrimitive(arg) { + return arg === null || + typeof arg === 'boolean' || + typeof arg === 'number' || + typeof arg === 'string' || + typeof arg === 'symbol' || // ES6 symbol + typeof arg === 'undefined'; +} +exports.isPrimitive = isPrimitive; + +function isBuffer(arg) { + return arg instanceof Buffer; +} +exports.isBuffer = isBuffer; + +function objectToString(o) { + return Object.prototype.toString.call(o); +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/isarray/build/build.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/isarray/build/build.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/isarray/build/build.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/isarray/build/build.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,209 @@ + +/** + * Require the given path. + * + * @param {String} path + * @return {Object} exports + * @api public + */ + +function require(path, parent, orig) { + var resolved = require.resolve(path); + + // lookup failed + if (null == resolved) { + orig = orig || path; + parent = parent || 'root'; + var err = new Error('Failed to require "' + orig + '" from "' + parent + '"'); + err.path = orig; + err.parent = parent; + err.require = true; + throw err; + } + + var module = require.modules[resolved]; + + // perform real require() + // by invoking the module's + // registered function + if (!module.exports) { + module.exports = {}; + module.client = module.component = true; + module.call(this, module.exports, require.relative(resolved), module); + } + + return module.exports; +} + +/** + * Registered modules. + */ + +require.modules = {}; + +/** + * Registered aliases. + */ + +require.aliases = {}; + +/** + * Resolve `path`. + * + * Lookup: + * + * - PATH/index.js + * - PATH.js + * - PATH + * + * @param {String} path + * @return {String} path or null + * @api private + */ + +require.resolve = function(path) { + if (path.charAt(0) === '/') path = path.slice(1); + var index = path + '/index.js'; + + var paths = [ + path, + path + '.js', + path + '.json', + path + '/index.js', + path + '/index.json' + ]; + + for (var i = 0; i < paths.length; i++) { + var path = paths[i]; + if (require.modules.hasOwnProperty(path)) return path; + } + + if (require.aliases.hasOwnProperty(index)) { + return require.aliases[index]; + } +}; + +/** + * Normalize `path` relative to the current path. + * + * @param {String} curr + * @param {String} path + * @return {String} + * @api private + */ + +require.normalize = function(curr, path) { + var segs = []; + + if ('.' != path.charAt(0)) return path; + + curr = curr.split('/'); + path = path.split('/'); + + for (var i = 0; i < path.length; ++i) { + if ('..' == path[i]) { + curr.pop(); + } else if ('.' != path[i] && '' != path[i]) { + segs.push(path[i]); + } + } + + return curr.concat(segs).join('/'); +}; + +/** + * Register module at `path` with callback `definition`. + * + * @param {String} path + * @param {Function} definition + * @api private + */ + +require.register = function(path, definition) { + require.modules[path] = definition; +}; + +/** + * Alias a module definition. + * + * @param {String} from + * @param {String} to + * @api private + */ + +require.alias = function(from, to) { + if (!require.modules.hasOwnProperty(from)) { + throw new Error('Failed to alias "' + from + '", it does not exist'); + } + require.aliases[to] = from; +}; + +/** + * Return a require function relative to the `parent` path. + * + * @param {String} parent + * @return {Function} + * @api private + */ + +require.relative = function(parent) { + var p = require.normalize(parent, '..'); + + /** + * lastIndexOf helper. + */ + + function lastIndexOf(arr, obj) { + var i = arr.length; + while (i--) { + if (arr[i] === obj) return i; + } + return -1; + } + + /** + * The relative require() itself. + */ + + function localRequire(path) { + var resolved = localRequire.resolve(path); + return require(resolved, parent, path); + } + + /** + * Resolve relative to the parent. + */ + + localRequire.resolve = function(path) { + var c = path.charAt(0); + if ('/' == c) return path.slice(1); + if ('.' == c) return require.normalize(p, path); + + // resolve deps by returning + // the dep in the nearest "deps" + // directory + var segs = parent.split('/'); + var i = lastIndexOf(segs, 'deps') + 1; + if (!i) i = 0; + path = segs.slice(0, i + 1).join('/') + '/deps/' + path; + return path; + }; + + /** + * Check if module is defined at `path`. + */ + + localRequire.exists = function(path) { + return require.modules.hasOwnProperty(localRequire.resolve(path)); + }; + + return localRequire; +}; +require.register("isarray/index.js", function(exports, require, module){ +module.exports = Array.isArray || function (arr) { + return Object.prototype.toString.call(arr) == '[object Array]'; +}; + +}); +require.alias("isarray/index.js", "isarray/index.js"); + diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/isarray/component.json nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/isarray/component.json --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/isarray/component.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/isarray/component.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,19 @@ +{ + "name" : "isarray", + "description" : "Array#isArray for older browsers", + "version" : "0.0.1", + "repository" : "juliangruber/isarray", + "homepage": "https://github.com/juliangruber/isarray", + "main" : "index.js", + "scripts" : [ + "index.js" + ], + "dependencies" : {}, + "keywords": ["browser","isarray","array"], + "author": { + "name": "Julian Gruber", + "email": "mail@juliangruber.com", + "url": "http://juliangruber.com" + }, + "license": "MIT" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/isarray/index.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/isarray/index.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/isarray/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/isarray/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +module.exports = Array.isArray || function (arr) { + return Object.prototype.toString.call(arr) == '[object Array]'; +}; diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/isarray/package.json nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/isarray/package.json --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/isarray/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/isarray/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +{ + "name": "isarray", + "description": "Array#isArray for older browsers", + "version": "0.0.1", + "repository": { + "type": "git", + "url": "git://github.com/juliangruber/isarray.git" + }, + "homepage": "https://github.com/juliangruber/isarray", + "main": "index.js", + "scripts": { + "test": "tap test/*.js" + }, + "dependencies": {}, + "devDependencies": { + "tap": "*" + }, + "keywords": [ + "browser", + "isarray", + "array" + ], + "author": { + "name": "Julian Gruber", + "email": "mail@juliangruber.com", + "url": "http://juliangruber.com" + }, + "license": "MIT", + "readme": "\n# isarray\n\n`Array#isArray` for older browsers.\n\n## Usage\n\n```js\nvar isArray = require('isarray');\n\nconsole.log(isArray([])); // => true\nconsole.log(isArray({})); // => false\n```\n\n## Installation\n\nWith [npm](http://npmjs.org) do\n\n```bash\n$ npm install isarray\n```\n\nThen bundle for the browser with\n[browserify](https://github.com/substack/browserify).\n\nWith [component](http://component.io) do\n\n```bash\n$ component install juliangruber/isarray\n```\n\n## License\n\n(MIT)\n\nCopyright (c) 2013 Julian Gruber <julian@juliangruber.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n", + "readmeFilename": "README.md", + "_id": "isarray@0.0.1", + "dist": { + "shasum": "8a18acfca9a8f4177e09abfc6038939b05d1eedf", + "tarball": "http://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz" + }, + "_from": "isarray@0.0.1", + "_npmVersion": "1.2.18", + "_npmUser": { + "name": "juliangruber", + "email": "julian@juliangruber.com" + }, + "maintainers": [ + { + "name": "juliangruber", + "email": "julian@juliangruber.com" + } + ], + "directories": {}, + "_shasum": "8a18acfca9a8f4177e09abfc6038939b05d1eedf", + "_resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "bugs": { + "url": "https://github.com/juliangruber/isarray/issues" + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/isarray/README.md nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/isarray/README.md --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/isarray/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/isarray/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ + +# isarray + +`Array#isArray` for older browsers. + +## Usage + +```js +var isArray = require('isarray'); + +console.log(isArray([])); // => true +console.log(isArray({})); // => false +``` + +## Installation + +With [npm](http://npmjs.org) do + +```bash +$ npm install isarray +``` + +Then bundle for the browser with +[browserify](https://github.com/substack/browserify). + +With [component](http://component.io) do + +```bash +$ component install juliangruber/isarray +``` + +## License + +(MIT) + +Copyright (c) 2013 Julian Gruber <julian@juliangruber.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/string_decoder/index.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/string_decoder/index.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/string_decoder/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/string_decoder/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,221 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +var Buffer = require('buffer').Buffer; + +var isBufferEncoding = Buffer.isEncoding + || function(encoding) { + switch (encoding && encoding.toLowerCase()) { + case 'hex': case 'utf8': case 'utf-8': case 'ascii': case 'binary': case 'base64': case 'ucs2': case 'ucs-2': case 'utf16le': case 'utf-16le': case 'raw': return true; + default: return false; + } + } + + +function assertEncoding(encoding) { + if (encoding && !isBufferEncoding(encoding)) { + throw new Error('Unknown encoding: ' + encoding); + } +} + +// StringDecoder provides an interface for efficiently splitting a series of +// buffers into a series of JS strings without breaking apart multi-byte +// characters. CESU-8 is handled as part of the UTF-8 encoding. +// +// @TODO Handling all encodings inside a single object makes it very difficult +// to reason about this code, so it should be split up in the future. +// @TODO There should be a utf8-strict encoding that rejects invalid UTF-8 code +// points as used by CESU-8. +var StringDecoder = exports.StringDecoder = function(encoding) { + this.encoding = (encoding || 'utf8').toLowerCase().replace(/[-_]/, ''); + assertEncoding(encoding); + switch (this.encoding) { + case 'utf8': + // CESU-8 represents each of Surrogate Pair by 3-bytes + this.surrogateSize = 3; + break; + case 'ucs2': + case 'utf16le': + // UTF-16 represents each of Surrogate Pair by 2-bytes + this.surrogateSize = 2; + this.detectIncompleteChar = utf16DetectIncompleteChar; + break; + case 'base64': + // Base-64 stores 3 bytes in 4 chars, and pads the remainder. + this.surrogateSize = 3; + this.detectIncompleteChar = base64DetectIncompleteChar; + break; + default: + this.write = passThroughWrite; + return; + } + + // Enough space to store all bytes of a single character. UTF-8 needs 4 + // bytes, but CESU-8 may require up to 6 (3 bytes per surrogate). + this.charBuffer = new Buffer(6); + // Number of bytes received for the current incomplete multi-byte character. + this.charReceived = 0; + // Number of bytes expected for the current incomplete multi-byte character. + this.charLength = 0; +}; + + +// write decodes the given buffer and returns it as JS string that is +// guaranteed to not contain any partial multi-byte characters. Any partial +// character found at the end of the buffer is buffered up, and will be +// returned when calling write again with the remaining bytes. +// +// Note: Converting a Buffer containing an orphan surrogate to a String +// currently works, but converting a String to a Buffer (via `new Buffer`, or +// Buffer#write) will replace incomplete surrogates with the unicode +// replacement character. See https://codereview.chromium.org/121173009/ . +StringDecoder.prototype.write = function(buffer) { + var charStr = ''; + // if our last write ended with an incomplete multibyte character + while (this.charLength) { + // determine how many remaining bytes this buffer has to offer for this char + var available = (buffer.length >= this.charLength - this.charReceived) ? + this.charLength - this.charReceived : + buffer.length; + + // add the new bytes to the char buffer + buffer.copy(this.charBuffer, this.charReceived, 0, available); + this.charReceived += available; + + if (this.charReceived < this.charLength) { + // still not enough chars in this buffer? wait for more ... + return ''; + } + + // remove bytes belonging to the current character from the buffer + buffer = buffer.slice(available, buffer.length); + + // get the character that was split + charStr = this.charBuffer.slice(0, this.charLength).toString(this.encoding); + + // CESU-8: lead surrogate (D800-DBFF) is also the incomplete character + var charCode = charStr.charCodeAt(charStr.length - 1); + if (charCode >= 0xD800 && charCode <= 0xDBFF) { + this.charLength += this.surrogateSize; + charStr = ''; + continue; + } + this.charReceived = this.charLength = 0; + + // if there are no more bytes in this buffer, just emit our char + if (buffer.length === 0) { + return charStr; + } + break; + } + + // determine and set charLength / charReceived + this.detectIncompleteChar(buffer); + + var end = buffer.length; + if (this.charLength) { + // buffer the incomplete character bytes we got + buffer.copy(this.charBuffer, 0, buffer.length - this.charReceived, end); + end -= this.charReceived; + } + + charStr += buffer.toString(this.encoding, 0, end); + + var end = charStr.length - 1; + var charCode = charStr.charCodeAt(end); + // CESU-8: lead surrogate (D800-DBFF) is also the incomplete character + if (charCode >= 0xD800 && charCode <= 0xDBFF) { + var size = this.surrogateSize; + this.charLength += size; + this.charReceived += size; + this.charBuffer.copy(this.charBuffer, size, 0, size); + buffer.copy(this.charBuffer, 0, 0, size); + return charStr.substring(0, end); + } + + // or just emit the charStr + return charStr; +}; + +// detectIncompleteChar determines if there is an incomplete UTF-8 character at +// the end of the given buffer. If so, it sets this.charLength to the byte +// length that character, and sets this.charReceived to the number of bytes +// that are available for this character. +StringDecoder.prototype.detectIncompleteChar = function(buffer) { + // determine how many bytes we have to check at the end of this buffer + var i = (buffer.length >= 3) ? 3 : buffer.length; + + // Figure out if one of the last i bytes of our buffer announces an + // incomplete char. + for (; i > 0; i--) { + var c = buffer[buffer.length - i]; + + // See http://en.wikipedia.org/wiki/UTF-8#Description + + // 110XXXXX + if (i == 1 && c >> 5 == 0x06) { + this.charLength = 2; + break; + } + + // 1110XXXX + if (i <= 2 && c >> 4 == 0x0E) { + this.charLength = 3; + break; + } + + // 11110XXX + if (i <= 3 && c >> 3 == 0x1E) { + this.charLength = 4; + break; + } + } + this.charReceived = i; +}; + +StringDecoder.prototype.end = function(buffer) { + var res = ''; + if (buffer && buffer.length) + res = this.write(buffer); + + if (this.charReceived) { + var cr = this.charReceived; + var buf = this.charBuffer; + var enc = this.encoding; + res += buf.slice(0, cr).toString(enc); + } + + return res; +}; + +function passThroughWrite(buffer) { + return buffer.toString(this.encoding); +} + +function utf16DetectIncompleteChar(buffer) { + this.charReceived = buffer.length % 2; + this.charLength = this.charReceived ? 2 : 0; +} + +function base64DetectIncompleteChar(buffer) { + this.charReceived = buffer.length % 3; + this.charLength = this.charReceived ? 3 : 0; +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/string_decoder/LICENSE nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/string_decoder/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/string_decoder/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/string_decoder/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +Copyright Joyent, Inc. and other Node contributors. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the +following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/string_decoder/.npmignore nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/string_decoder/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/string_decoder/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/string_decoder/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2 @@ +build +test diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/string_decoder/package.json nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/string_decoder/package.json --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/string_decoder/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/string_decoder/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +{ + "name": "string_decoder", + "version": "0.10.31", + "description": "The string_decoder module from Node core", + "main": "index.js", + "dependencies": {}, + "devDependencies": { + "tap": "~0.4.8" + }, + "scripts": { + "test": "tap test/simple/*.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/rvagg/string_decoder.git" + }, + "homepage": "https://github.com/rvagg/string_decoder", + "keywords": [ + "string", + "decoder", + "browser", + "browserify" + ], + "license": "MIT", + "gitHead": "d46d4fd87cf1d06e031c23f1ba170ca7d4ade9a0", + "bugs": { + "url": "https://github.com/rvagg/string_decoder/issues" + }, + "_id": "string_decoder@0.10.31", + "_shasum": "62e203bc41766c6c28c9fc84301dab1c5310fa94", + "_from": "string_decoder@>=0.10.0 <0.11.0", + "_npmVersion": "1.4.23", + "_npmUser": { + "name": "rvagg", + "email": "rod@vagg.org" + }, + "maintainers": [ + { + "name": "substack", + "email": "mail@substack.net" + }, + { + "name": "rvagg", + "email": "rod@vagg.org" + } + ], + "dist": { + "shasum": "62e203bc41766c6c28c9fc84301dab1c5310fa94", + "tarball": "http://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "readme": "ERROR: No README data found!" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/string_decoder/README.md nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/string_decoder/README.md --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/node_modules/string_decoder/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/node_modules/string_decoder/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,7 @@ +**string_decoder.js** (`require('string_decoder')`) from Node.js core + +Copyright Joyent, Inc. and other Node contributors. See LICENCE file for details. + +Version numbers match the versions found in Node core, e.g. 0.10.24 matches Node 0.10.24, likewise 0.11.10 matches Node 0.11.10. **Prefer the stable version over the unstable.** + +The *build/* directory contains a build script that will scrape the source from the [joyent/node](https://github.com/joyent/node) repo given a specific Node version. \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/.npmignore nodejs-0.11.15/deps/npm/node_modules/readable-stream/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +build/ +test/ +examples/ +fs.js +zlib.js \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/package.json nodejs-0.11.15/deps/npm/node_modules/readable-stream/package.json --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,71 @@ +{ + "name": "readable-stream", + "version": "1.0.32", + "description": "Streams2, a user-land copy of the stream library from Node.js v0.10.x", + "main": "readable.js", + "dependencies": { + "core-util-is": "~1.0.0", + "isarray": "0.0.1", + "string_decoder": "~0.10.x", + "inherits": "~2.0.1" + }, + "devDependencies": { + "tap": "~0.2.6" + }, + "scripts": { + "test": "tap test/simple/*.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/isaacs/readable-stream" + }, + "keywords": [ + "readable", + "stream", + "pipe" + ], + "browser": { + "util": false + }, + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "license": "MIT", + "gitHead": "2024ad52b1e475465488b4ad39eb41d067ffcbb9", + "bugs": { + "url": "https://github.com/isaacs/readable-stream/issues" + }, + "homepage": "https://github.com/isaacs/readable-stream", + "_id": "readable-stream@1.0.32", + "_shasum": "6b44a88ba984cd0ec0834ae7d59a47c39aef48ec", + "_from": "readable-stream@*", + "_npmVersion": "2.0.2", + "_nodeVersion": "0.10.31", + "_npmUser": { + "name": "rvagg", + "email": "rod@vagg.org" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + }, + { + "name": "tootallnate", + "email": "nathan@tootallnate.net" + }, + { + "name": "rvagg", + "email": "rod@vagg.org" + } + ], + "dist": { + "shasum": "6b44a88ba984cd0ec0834ae7d59a47c39aef48ec", + "tarball": "http://registry.npmjs.org/readable-stream/-/readable-stream-1.0.32.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.32.tgz", + "readme": "ERROR: No README data found!" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/passthrough.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/passthrough.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/passthrough.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/passthrough.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +module.exports = require("./lib/_stream_passthrough.js") diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/readable.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/readable.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/readable.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/readable.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,6 @@ +exports = module.exports = require('./lib/_stream_readable.js'); +exports.Readable = exports; +exports.Writable = require('./lib/_stream_writable.js'); +exports.Duplex = require('./lib/_stream_duplex.js'); +exports.Transform = require('./lib/_stream_transform.js'); +exports.PassThrough = require('./lib/_stream_passthrough.js'); diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/README.md nodejs-0.11.15/deps/npm/node_modules/readable-stream/README.md --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +# readable-stream + +***Node-core streams for userland*** + +[![NPM](https://nodei.co/npm/readable-stream.png?downloads=true)](https://nodei.co/npm/readable-stream/) +[![NPM](https://nodei.co/npm-dl/readable-stream.png)](https://nodei.co/npm/readable-stream/) + +This package is a mirror of the Streams2 and Streams3 implementations in Node-core. + +If you want to guarantee a stable streams base, regardless of what version of Node you, or the users of your libraries are using, use **readable-stream** *only* and avoid the *"stream"* module in Node-core. + +**readable-stream** comes in two major versions, v1.0.x and v1.1.x. The former tracks the Streams2 implementation in Node 0.10, including bug-fixes and minor improvements as they are added. The latter tracks Streams3 as it develops in Node 0.11; we will likely see a v1.2.x branch for Node 0.12. + +**readable-stream** uses proper patch-level versioning so if you pin to `"~1.0.0"` you’ll get the latest Node 0.10 Streams2 implementation, including any fixes and minor non-breaking improvements. The patch-level versions of 1.0.x and 1.1.x should mirror the patch-level versions of Node-core releases. You should prefer the **1.0.x** releases for now and when you’re ready to start using Streams3, pin to `"~1.1.0"` + diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/transform.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/transform.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/transform.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/transform.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +module.exports = require("./lib/_stream_transform.js") diff -Nru nodejs-0.11.13/deps/npm/node_modules/readable-stream/writable.js nodejs-0.11.15/deps/npm/node_modules/readable-stream/writable.js --- nodejs-0.11.13/deps/npm/node_modules/readable-stream/writable.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/readable-stream/writable.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +module.exports = require("./lib/_stream_writable.js") diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/debuglog/debuglog.js nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/debuglog/debuglog.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/debuglog/debuglog.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/debuglog/debuglog.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +var util = require('util'); + +module.exports = (util && util.debuglog) || debuglog; + +var debugs = {}; +var debugEnviron = process.env.NODE_DEBUG || ''; + +function debuglog(set) { + set = set.toUpperCase(); + if (!debugs[set]) { + if (new RegExp('\\b' + set + '\\b', 'i').test(debugEnviron)) { + var pid = process.pid; + debugs[set] = function() { + var msg = util.format.apply(exports, arguments); + console.error('%s %d: %s', set, pid, msg); + }; + } else { + debugs[set] = function() {}; + } + } + return debugs[set]; +}; diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/debuglog/LICENSE nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/debuglog/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/debuglog/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/debuglog/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,19 @@ +Copyright Joyent, Inc. and other Node contributors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/debuglog/package.json nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/debuglog/package.json --- nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/debuglog/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/debuglog/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,45 @@ +{ + "name": "debuglog", + "version": "1.0.1", + "description": "backport of util.debuglog from node v0.11", + "license": "MIT", + "main": "debuglog.js", + "repository": { + "type": "git", + "url": "https://github.com/sam-github/node-debuglog.git" + }, + "author": { + "name": "Sam Roberts", + "email": "sam@strongloop.com" + }, + "engines": { + "node": "*" + }, + "browser": { + "util": false + }, + "bugs": { + "url": "https://github.com/sam-github/node-debuglog/issues" + }, + "homepage": "https://github.com/sam-github/node-debuglog", + "_id": "debuglog@1.0.1", + "dist": { + "shasum": "aa24ffb9ac3df9a2351837cfb2d279360cd78492", + "tarball": "http://registry.npmjs.org/debuglog/-/debuglog-1.0.1.tgz" + }, + "_from": "debuglog@>=1.0.1-0 <2.0.0-0", + "_npmVersion": "1.4.3", + "_npmUser": { + "name": "octet", + "email": "sam@strongloop.com" + }, + "maintainers": [ + { + "name": "octet", + "email": "sam@strongloop.com" + } + ], + "directories": {}, + "_shasum": "aa24ffb9ac3df9a2351837cfb2d279360cd78492", + "_resolved": "https://registry.npmjs.org/debuglog/-/debuglog-1.0.1.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/debuglog/README.md nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/debuglog/README.md --- nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/debuglog/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/debuglog/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,40 @@ +# debuglog - backport of util.debuglog() from node v0.11 + +To facilitate using the `util.debuglog()` function that will be available when +node v0.12 is released now, this is a copy extracted from the source. + +## require('debuglog') + +Return `util.debuglog`, if it exists, otherwise it will return an internal copy +of the implementation from node v0.11. + +## debuglog(section) + +* `section` {String} The section of the program to be debugged +* Returns: {Function} The logging function + +This is used to create a function which conditionally writes to stderr +based on the existence of a `NODE_DEBUG` environment variable. If the +`section` name appears in that environment variable, then the returned +function will be similar to `console.error()`. If not, then the +returned function is a no-op. + +For example: + +```javascript +var debuglog = util.debuglog('foo'); + +var bar = 123; +debuglog('hello from foo [%d]', bar); +``` + +If this program is run with `NODE_DEBUG=foo` in the environment, then +it will output something like: + + FOO 3245: hello from foo [123] + +where `3245` is the process id. If it is not run with that +environment variable set, then it will not print anything. + +You may separate multiple `NODE_DEBUG` environment variables with a +comma. For example, `NODE_DEBUG=fs,net,tls`. diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/LICENSE nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/package.json nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/package.json --- nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +{ + "name": "readdir-scoped-modules", + "version": "1.0.0", + "description": "Like `fs.readdir` but handling `@org/module` dirs as if they were a single entry.", + "main": "readdir.js", + "directories": { + "test": "test" + }, + "dependencies": { + "debuglog": "^1.0.1", + "dezalgo": "^1.0.0", + "once": "^1.3.0" + }, + "devDependencies": { + "tap": "0.4" + }, + "scripts": { + "test": "tap test/*.js" + }, + "repository": { + "type": "git", + "url": "https://github.com/npm/readdir-scoped-modules" + }, + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "license": "ISC", + "bugs": { + "url": "https://github.com/npm/readdir-scoped-modules/issues" + }, + "homepage": "https://github.com/npm/readdir-scoped-modules", + "gitHead": "35a4a7a2325d12ed25ed322cd61f976b740f7fb7", + "_id": "readdir-scoped-modules@1.0.0", + "_shasum": "e939de969b38b3e7dfaa14fbcfe7a2fd15a4ea37", + "_from": "readdir-scoped-modules@>=1.0.0-0 <2.0.0-0", + "_npmVersion": "2.0.0-alpha.6.0", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "e939de969b38b3e7dfaa14fbcfe7a2fd15a4ea37", + "tarball": "http://registry.npmjs.org/readdir-scoped-modules/-/readdir-scoped-modules-1.0.0.tgz" + }, + "_resolved": "https://registry.npmjs.org/readdir-scoped-modules/-/readdir-scoped-modules-1.0.0.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/readdir.js nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/readdir.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/readdir.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/readdir.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,71 @@ +var fs = require ('fs') +var dz = require ('dezalgo') +var once = require ('once') +var path = require ('path') +var debug = require ('debuglog') ('rds') + +module . exports = readdir + +function readdir (dir, cb) { + fs . readdir (dir, function (er, kids) { + if (er) + return cb (er) + + debug ('dir=%j, kids=%j', dir, kids) + readScopes (dir, kids, function (er, data) { + if (er) + return cb (er) + + // Sort for bonus consistency points + data = data . sort (function (a, b) { + return a > b ? 1 : -1 + }) + + return cb (null, data) + }) + }) +} + +// Turn [ 'a', '@scope' ] into +// ['a', '@scope/foo', '@scope/bar'] +function readScopes (root, kids, cb) { + var scopes = kids . filter (function (kid) { + return kid . charAt (0) === '@' + }) + + kids = kids . filter (function (kid) { + return kid . charAt (0) !== '@' + }) + + debug ('scopes=%j', scopes) + + if (scopes . length === 0) + dz (cb) (null, kids) // prevent maybe-sync zalgo release + + cb = once (cb) + var l = scopes . length + scopes . forEach (function (scope) { + var scopedir = path . resolve (root, scope) + debug ('root=%j scope=%j scopedir=%j', root, scope, scopedir) + fs . readdir (scopedir, then . bind (null, scope)) + }) + + function then (scope, er, scopekids) { + if (er) + return cb (er) + + // XXX: Not sure how old this node bug is. Maybe superstition? + scopekids = scopekids . filter (function (scopekid) { + return !(scopekid === '.' || scopekid === '..' || !scopekid) + }) + + kids . push . apply (kids, scopekids . map (function (scopekid) { + return scope + '/' + scopekid + })) + + debug ('scope=%j scopekids=%j kids=%j', scope, scopekids, kids) + + if (--l === 0) + cb (null, kids) + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/README.md nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/README.md --- nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,17 @@ +# readdir-scoped-modules + +Like `fs.readdir` but handling `@org/module` dirs as if they were +a single entry. + +Used by npm. + +## USAGE + +```javascript +var readdir = require('readdir-scoped-modules') + +readdir('node_modules', function (er, entries) { + // entries will be something like + // ['a', '@org/foo', '@org/bar'] +}) +``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/test/basic.js nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/test/basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/readdir-scoped-modules/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,14 @@ +var test = require ('tap') . test +var readdir = require ('../readdir.js') + +test ('basic', function (t) { + // should not get {a,b}/{x,y}, but SHOULD get @org/ and @scope children + var expect = [ '@org/x', '@org/y', '@scope/x', '@scope/y', 'a', 'b' ] + + readdir (__dirname + '/fixtures', function (er, kids) { + if (er) + throw er + t.same(kids, expect) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/util-extend/package.json nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/util-extend/package.json --- nodejs-0.11.13/deps/npm/node_modules/read-installed/node_modules/util-extend/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/node_modules/util-extend/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -17,7 +17,24 @@ "bugs": { "url": "https://github.com/isaacs/util-extend/issues" }, - "homepage": "https://github.com/isaacs/util-extend", "_id": "util-extend@1.0.1", - "_from": "util-extend@^1.0.1" + "dist": { + "shasum": "bb703b79480293ddcdcfb3c6a9fea20f483415bc", + "tarball": "http://registry.npmjs.org/util-extend/-/util-extend-1.0.1.tgz" + }, + "_from": "util-extend@>=1.0.1-0 <2.0.0-0", + "_npmVersion": "1.3.4", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "directories": {}, + "_shasum": "bb703b79480293ddcdcfb3c6a9fea20f483415bc", + "_resolved": "https://registry.npmjs.org/util-extend/-/util-extend-1.0.1.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/package.json nodejs-0.11.15/deps/npm/node_modules/read-installed/package.json --- nodejs-0.11.13/deps/npm/node_modules/read-installed/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,7 @@ { "name": "read-installed", "description": "Read all the installed packages in a folder, and return a tree structure with all the data.", - "version": "2.0.2", + "version": "3.1.3", "repository": { "type": "git", "url": "git://github.com/isaacs/read-installed" @@ -11,14 +11,16 @@ "test": "tap ./test/*.js" }, "dependencies": { + "debuglog": "^1.0.1", "read-package-json": "1", - "semver": "2", + "readdir-scoped-modules": "^1.0.0", + "semver": "2 || 3 || 4", "slide": "~1.1.3", "util-extend": "^1.0.1", - "graceful-fs": "~2" + "graceful-fs": "2 || 3" }, "optionalDependencies": { - "graceful-fs": "~2" + "graceful-fs": "2 || 3" }, "author": { "name": "Isaac Z. Schlueter", @@ -27,14 +29,37 @@ }, "license": "ISC", "devDependencies": { + "mkdirp": "^0.5.0", + "rimraf": "^2.2.8", "tap": "~0.4.8" }, - "readme": "# read-installed\n\nRead all the installed packages in a folder, and return a tree\nstructure with all the data.\n\nnpm uses this.\n\n## 2.0.0\n\nBreaking changes in `2.0.0`:\n\nThe second argument is now an `Object` that contains the following keys:\n\n * `depth` optional, defaults to Infinity\n * `log` optional log Function\n * `dev` optional, default false, set to true to include devDependencies\n\n## Usage\n\n```javascript\nvar readInstalled = require(\"read-installed\")\n// optional options\nvar options = { dev: false, log: fn, depth: 2 }\nreadInstalled(folder, options, function (er, data) {\n ...\n})\n```\n", - "readmeFilename": "README.md", + "gitHead": "50e45af7581b1a879c62146fafbfa1b92842f7df", "bugs": { "url": "https://github.com/isaacs/read-installed/issues" }, "homepage": "https://github.com/isaacs/read-installed", - "_id": "read-installed@2.0.2", - "_from": "read-installed@latest" + "_id": "read-installed@3.1.3", + "_shasum": "c09092a13c2117f22842cad16804f3b059129d11", + "_from": "read-installed@>=3.1.2-0 <3.2.0-0", + "_npmVersion": "2.0.0-beta.3", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + }, + { + "name": "othiym23", + "email": "ogd@aoaioxxysz.net" + } + ], + "dist": { + "shasum": "c09092a13c2117f22842cad16804f3b059129d11", + "tarball": "http://registry.npmjs.org/read-installed/-/read-installed-3.1.3.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/read-installed/-/read-installed-3.1.3.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/read-installed.js nodejs-0.11.15/deps/npm/node_modules/read-installed/read-installed.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/read-installed.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/read-installed.js 2015-01-20 21:22:17.000000000 +0000 @@ -101,6 +101,10 @@ var util = require("util") var extend = require("util-extend") +var debug = require("debuglog")("read-installed") + +var readdir = require("readdir-scoped-modules") + module.exports = readInstalled function readInstalled (folder, opts, cb) { @@ -120,25 +124,29 @@ opts.log = function () {} opts.dev = !!opts.dev + opts.realpathSeen = {} + opts.findUnmetSeen = [] + readInstalled_(folder, null, null, null, 0, opts, function (er, obj) { if (er) return cb(er) // now obj has all the installed things, where they're installed // figure out the inheritance links, now that the object is built. resolveInheritance(obj, opts) - markExtraneous(obj) + obj.root = true + unmarkExtraneous(obj, opts) cb(null, obj) }) } -var rpSeen = {} function readInstalled_ (folder, parent, name, reqver, depth, opts, cb) { var installed , obj , real , link + , realpathSeen = opts.realpathSeen - fs.readdir(path.resolve(folder, "node_modules"), function (er, i) { + readdir(path.resolve(folder, "node_modules"), function (er, i) { // error indicates that nothing is installed here if (er) i = [] installed = i.filter(function (f) { return f.charAt(0) !== "." }) @@ -161,7 +169,7 @@ return next(er) } fs.realpath(folder, function (er, rp) { - //console.error("realpath(%j) = %j", folder, rp) + debug("realpath(%j) = %j", folder, rp) real = rp if (st.isSymbolicLink()) link = rp next(er) @@ -176,10 +184,10 @@ errState = er return cb(null, []) } - //console.error('next', installed, obj && typeof obj, name, real) + debug('next', installed, obj && typeof obj, name, real) if (!installed || !obj || !real || called) return called = true - if (rpSeen[real]) return cb(null, rpSeen[real]) + if (realpathSeen[real]) return cb(null, realpathSeen[real]) if (obj === true) { obj = {dependencies:{}, path:folder} installed.forEach(function (i) { obj.dependencies[i] = "*" }) @@ -188,6 +196,9 @@ obj.realName = name || obj.name obj.dependencies = obj.dependencies || {} + // At this point, figure out what dependencies we NEED to get met + obj._dependencies = copy(obj.dependencies) + // "foo":"http://blah" and "foo":"latest" are always presumed valid if (reqver && semver.validRange(reqver, true) @@ -195,21 +206,17 @@ obj.invalid = true } - if (parent) { - var deps = parent.dependencies || {} - var inDeps = name in deps - var devDeps = parent.devDependencies || {} - var inDev = opts.dev && (name in devDeps) - if (!inDeps && !inDev) { - obj.extraneous = true - } - } + // Mark as extraneous at this point. + // This will be un-marked in unmarkExtraneous, where we mark as + // not-extraneous everything that is required in some way from + // the root object. + obj.extraneous = true obj.path = obj.path || folder obj.realPath = real obj.link = link if (parent && !obj.link) obj.parent = parent - rpSeen[real] = obj + realpathSeen[real] = obj obj.depth = depth //if (depth >= opts.depth) return cb(null, obj) asyncMap(installed, function (pkg, cb) { @@ -217,23 +224,9 @@ if (!rv && obj.devDependencies && opts.dev) rv = obj.devDependencies[pkg] - if (depth >= opts.depth) { - // just try to get the version number - var pkgfolder = path.resolve(folder, "node_modules", pkg) - , jsonFile = path.resolve(pkgfolder, "package.json") - return readJson(jsonFile, function (er, depData) { - // already out of our depth, ignore errors - if (er || !depData || !depData.version) return cb(null, obj) - if (depth === opts.depth) { - // edge case, ignore dependencies - depData.dependencies = {} - depData.peerDependencies = {} - obj.dependencies[pkg] = depData - } else { - obj.dependencies[pkg] = depData.version - } - cb(null, obj) - }) + if (depth > opts.depth) { + obj.dependencies = {} + return cb(null, obj) } readInstalled_( path.resolve(folder, "node_modules/"+pkg) @@ -273,50 +266,45 @@ findUnmet(obj.dependencies[dep], opts) }) Object.keys(obj.dependencies).forEach(function (dep) { - resolveInheritance(obj.dependencies[dep], opts) + if (typeof obj.dependencies[dep] === "object") { + resolveInheritance(obj.dependencies[dep], opts) + } else { + debug("unmet dep! %s %s@%s", obj.name, dep, obj.dependencies[dep]) + } }) findUnmet(obj, opts) } // find unmet deps by walking up the tree object. // No I/O -var fuSeen = [] function findUnmet (obj, opts) { - if (fuSeen.indexOf(obj) !== -1) return - fuSeen.push(obj) - //console.error("find unmet", obj.name, obj.parent && obj.parent.name) + var findUnmetSeen = opts.findUnmetSeen + if (findUnmetSeen.indexOf(obj) !== -1) return + findUnmetSeen.push(obj) + debug("find unmet parent=%s obj=", obj.parent && obj.parent.name, obj.name || obj) var deps = obj.dependencies = obj.dependencies || {} - //console.error(deps) + debug(deps) Object.keys(deps) .filter(function (d) { return typeof deps[d] === "string" }) .forEach(function (d) { - //console.error("find unmet", obj.name, d, deps[d]) - var r = obj.parent - , found = null - while (r && !found && typeof deps[d] === "string") { - // if r is a valid choice, then use that. - found = r.dependencies[d] - if (!found && r.realName === d) found = r - - if (!found) { - r = r.link ? null : r.parent - continue - } - // "foo":"http://blah" and "foo":"latest" are always presumed valid - if ( typeof deps[d] === "string" - && semver.validRange(deps[d], true) - && !semver.satisfies(found.version, deps[d], true)) { - // the bad thing will happen - opts.log("unmet dependency", obj.path + " requires "+d+"@'"+deps[d] - +"' but will load\n" - +found.path+",\nwhich is version "+found.version - ) - found.invalid = true - } + var found = findDep(obj, d) + debug("finding dep %j", d, found && found.name || found) + // "foo":"http://blah" and "foo":"latest" are always presumed valid + if (typeof deps[d] === "string" && + semver.validRange(deps[d], true) && + found && + !semver.satisfies(found.version, deps[d], true)) { + // the bad thing will happen + opts.log( "unmet dependency" + , obj.path + " requires "+d+"@'"+deps[d] + + "' but will load\n" + + found.path+",\nwhich is version "+found.version ) + found.invalid = true + } + if (found) { deps[d] = found } - }) var peerDeps = obj.peerDependencies = obj.peerDependencies || {} @@ -331,7 +319,11 @@ obj.dependencies[d] = peerDeps[d] } } else { - dependency = obj.parent.dependencies && obj.parent.dependencies[d] + var r = obj.parent + while (r && !dependency) { + dependency = r.dependencies && r.dependencies[d] + r = r.link ? null : r.parent + } } if (!dependency) { @@ -339,34 +331,58 @@ obj.dependencies[d] = peerDeps[d] } else if (!semver.satisfies(dependency.version, peerDeps[d], true)) { dependency.peerInvalid = true - } else { - dependency.extraneous = false } }) return obj } -function recursivelyMarkExtraneous (obj, extraneous) { - // stop recursion if we're not changing anything - if (obj.extraneous === extraneous) return +function unmarkExtraneous (obj, opts) { + // Mark all non-required deps as extraneous. + // start from the root object and mark as non-extraneous all modules + // that haven't been previously flagged as extraneous then propagate + // to all their dependencies + + obj.extraneous = false + + var deps = obj._dependencies + if (opts.dev && obj.devDependencies && (obj.root || obj.link)) { + Object.keys(obj.devDependencies).forEach(function (k) { + deps[k] = obj.devDependencies[k] + }) + } - obj.extraneous = extraneous - var deps = obj.dependencies = obj.dependencies || {} - Object.keys(deps).forEach(function(d){ - recursivelyMarkExtraneous(deps[d], extraneous) - }); + if (obj.peerDependencies) { + Object.keys(obj.peerDependencies).forEach(function (k) { + deps[k] = obj.peerDependencies[k] + }) + } + + debug("not extraneous", obj._id, deps) + Object.keys(deps).forEach(function (d) { + var dep = findDep(obj, d) + if (dep && dep.extraneous) { + unmarkExtraneous(dep, opts) + } + }) } -function markExtraneous (obj) { - // start from the root object and mark as non-extraneous all modules that haven't been previously flagged as - // extraneous then propagate to all their dependencies - var deps = obj.dependencies = obj.dependencies || {} - Object.keys(deps).forEach(function(d){ - if (!deps[d].extraneous){ - recursivelyMarkExtraneous(deps[d], false); +// Find the one that will actually be loaded by require() +// so we can make sure it's valid etc. +function findDep (obj, d) { + var r = obj + , found = null + while (r && !found) { + // if r is a valid choice, then use that. + // kinda weird if a pkg depends on itself, but after the first + // iteration of this loop, it indicates a dep cycle. + if (typeof r.dependencies[d] === "object") { + found = r.dependencies[d] } - }); + if (!found && r.realName === d) found = r + r = r.link ? null : r.parent + } + return found } function copy (obj) { diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/basic.js nodejs-0.11.15/deps/npm/node_modules/read-installed/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/basic.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,8 +1,9 @@ var readInstalled = require("../read-installed.js") -var json = require("./fixtures/package.json") -var known = [].concat(Object.keys(json.dependencies) - , Object.keys(json.optionalDependencies) - , Object.keys(json.devDependencies)).sort() +var json = require("../package.json") +var d = Object.keys(json.dependencies) +var dd = Object.keys(json.devDependencies) +var od = Object.keys(json.optionalDependencies) +var known = d.concat(dd).concat(od).sort() var test = require("tap").test var path = require("path") @@ -36,9 +37,7 @@ default: delete map[i] } var dep = map.dependencies -// delete map.dependencies if (dep) { -// map.dependencies = dep for (var i in dep) if (typeof dep[i] === "object") { cleanup(dep[i]) } diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/cyclic-extraneous-peer-deps.js nodejs-0.11.15/deps/npm/node_modules/read-installed/test/cyclic-extraneous-peer-deps.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/cyclic-extraneous-peer-deps.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/cyclic-extraneous-peer-deps.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,81 @@ +var test = require("tap").test +var mkdirp = require("mkdirp") +var rimraf = require("rimraf") +var fs = require("fs") +var path = require("path") +var readInstalled = require("../read-installed.js") + +var parent = { + name: "parent", + version: "1.2.3", + dependencies: {}, + devDependencies: { + "child1":"*" + }, + readme:"." +} + +var child1 = { + name: "child1", + version: "1.2.3", + peerDependencies: { + child2: "*" + }, + readme:"." +} + +var child2 = { + name: "child2", + version: "1.2.3", + peerDependencies: { + child1: "*" + }, + readme:"." +} + + +var root = path.resolve(__dirname, "cyclic-extraneous-peer-deps") +var parentjson = path.resolve(root, "package.json") +var child1root = path.resolve(root, "node_modules/child1") +var child1json = path.resolve(child1root, "package.json") +var child2root = path.resolve(root, "node_modules/child2") +var child2json = path.resolve(child2root, "package.json") + +test("setup", function (t) { + rimraf.sync(root) + mkdirp.sync(child1root) + mkdirp.sync(child2root) + fs.writeFileSync(parentjson, JSON.stringify(parent, null, 2) + "\n", "utf8") + fs.writeFileSync(child1json, JSON.stringify(child1, null, 2) + "\n", "utf8") + fs.writeFileSync(child2json, JSON.stringify(child2, null, 2) + "\n", "utf8") + t.pass("setup done") + t.end() +}) + +test("dev mode", function (t) { + // peer dev deps should both be not extraneous. + readInstalled(root, { dev: true }, function (er, data) { + if (er) + throw er + t.notOk(data.dependencies.child1.extraneous, "c1 not extra") + t.notOk(data.dependencies.child2.extraneous, "c2 not extra") + t.end() + }) +}) + +test("prod mode", function (t) { + readInstalled(root, { dev: false }, function (er, data) { + if (er) + throw er + t.ok(data.dependencies.child1.extraneous, "c1 extra") + t.ok(data.dependencies.child2.extraneous, "c2 extra") + t.end() + }) +}) + + +test("cleanup", function (t) { + rimraf.sync(root) + t.pass("cleanup done") + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/depth-0.js nodejs-0.11.15/deps/npm/node_modules/read-installed/test/depth-0.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/depth-0.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/depth-0.js 2015-01-20 21:22:17.000000000 +0000 @@ -16,6 +16,8 @@ // Exclude self from dependencies when depth = 0 delete map.dependencies[json.name] var subdeps = Object.keys(map.dependencies).reduce(function(acc, dep) { + // Exclude self from dependencies when depth = current depth + delete map.dependencies[dep].dependencies[dep] acc += Object.keys(map.dependencies[dep].dependencies).length; return acc; }, 0); diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/dev.js nodejs-0.11.15/deps/npm/node_modules/read-installed/test/dev.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/dev.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/dev.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ var readInstalled = require("../read-installed.js") var test = require("tap").test -var json = require("./fixtures/package.json") +var json = require("../package.json") var path = require("path") var known = [].concat(Object.keys(json.dependencies) , Object.keys(json.optionalDependencies) @@ -17,7 +17,7 @@ var deps = Object.keys(map.dependencies).sort() t.equal(deps.length, known.length, "array lengths are equal") t.deepEqual(deps, known, "arrays should be equal") - t.ok(map.dependencies.tap.extraneous, 'extraneous is set on devDep') + t.ok(map.dependencies.tap.extraneous, "extraneous is set on devDep") t.end() }) }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/extraneous-dev.js nodejs-0.11.15/deps/npm/node_modules/read-installed/test/extraneous-dev.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/extraneous-dev.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/extraneous-dev.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +var readInstalled = require("../read-installed.js") +var test = require("tap").test +var path = require("path") + +test("extraneous detected", function(t) { + // This test verifies read-installed#16 + readInstalled( + path.join(__dirname, "fixtures/extraneous-dev-dep"), + { + log: console.error, + dev: true + }, + function (err, map) { + t.ifError(err, "read-installed made it") + + t.notOk(map.dependencies.d.extraneous, "d is not extraneous, it's required by root") + t.ok(map.dependencies.x.extraneous, "x is extraneous, it's only a dev dep of d") + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/extraneous.js nodejs-0.11.15/deps/npm/node_modules/read-installed/test/extraneous.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/extraneous.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/extraneous.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ var readInstalled = require('../read-installed.js') var test = require('tap').test -var path = require('path'); +var path = require('path') test('extraneous detected', function(t) { // This test verifies read-installed#16 @@ -12,6 +12,6 @@ t.ok(map.dependencies.bar.extraneous, 'bar is extraneous, it\'s not required by any module') t.notOk(map.dependencies.asdf.extraneous, 'asdf is not extraneous, it\'s required by ghjk') t.notOk(map.dependencies.ghjk.extraneous, 'ghjk is not extraneous, it\'s required by our root module') - t.end(); + t.end() }) }) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/fixtures/extraneous-dev-dep/package.json nodejs-0.11.15/deps/npm/node_modules/read-installed/test/fixtures/extraneous-dev-dep/package.json --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/fixtures/extraneous-dev-dep/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/fixtures/extraneous-dev-dep/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "name": "extraneous-dev-dep", + "version": "0.0.0", + "dependencies": { + "d": "1.0.0" + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/fixtures/grandparent-peer/package.json nodejs-0.11.15/deps/npm/node_modules/read-installed/test/fixtures/grandparent-peer/package.json --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/fixtures/grandparent-peer/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/fixtures/grandparent-peer/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "name": "example", + "version": "0.0.0", + "dependencies": { + "plugin-wrapper": "0.0.0", + "framework": "0.0.0" + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/fixtures/grandparent-peer-dev/package.json nodejs-0.11.15/deps/npm/node_modules/read-installed/test/fixtures/grandparent-peer-dev/package.json --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/fixtures/grandparent-peer-dev/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/fixtures/grandparent-peer-dev/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "name": "example", + "version": "0.0.0", + "devDependencies": { + "plugin-wrapper": "0.0.0", + "framework": "0.0.0" + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/grandparent-peer-dev.js nodejs-0.11.15/deps/npm/node_modules/read-installed/test/grandparent-peer-dev.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/grandparent-peer-dev.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/grandparent-peer-dev.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +var readInstalled = require('../read-installed.js') +var test = require('tap').test +var path = require('path'); + +function allValid(t, map) { + var deps = Object.keys(map.dependencies || {}) + deps.forEach(function (dep) { + t.ok(map.dependencies[dep].extraneous, 'dependency ' + dep + ' of ' + map.name + ' is extraneous') + }) +} + +test('grandparent dev peer dependencies should be extraneous', function(t) { + readInstalled( + path.join(__dirname, 'fixtures/grandparent-peer-dev'), + { log: console.error }, + function(err, map) { + allValid(t, map) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/grandparent-peer.js nodejs-0.11.15/deps/npm/node_modules/read-installed/test/grandparent-peer.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/grandparent-peer.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/grandparent-peer.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,24 @@ +var readInstalled = require('../read-installed.js') +var test = require('tap').test +var path = require('path'); + +function allValid(t, map) { + var deps = Object.keys(map.dependencies || {}) + deps.forEach(function (dep) { + t.notOk(map.dependencies[dep].invalid, 'dependency ' + dep + ' of ' + map.name + ' is not invalid') + t.notOk(typeof map.dependencies[dep] === 'string', 'dependency ' + dep + ' of ' + map.name + ' is not missing') + }) + deps.forEach(function (dep) { + allValid(t, map.dependencies[dep]) + }) +} + +test('grandparent can satisfy peer dependencies', function(t) { + readInstalled( + path.join(__dirname, 'fixtures/grandparent-peer'), + { log: console.error }, + function(err, map) { + allValid(t, map) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/linked-dep-dev-deps-extraneous.js nodejs-0.11.15/deps/npm/node_modules/read-installed/test/linked-dep-dev-deps-extraneous.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/linked-dep-dev-deps-extraneous.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/linked-dep-dev-deps-extraneous.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,59 @@ +var test = require('tap').test +var path = require('path') +var fs = require('fs') +var mkdirp = require('mkdirp') +var rimraf = require('rimraf') +var readInstalled = require('../') + +var root = path.resolve(__dirname, 'root') +var pkg = path.resolve(root, 'pkg') +var pkgnm = path.resolve(pkg, 'node_modules') +var linkdepSrc = path.resolve(root, 'linkdep') +var linkdepLink = path.resolve(pkgnm, 'linkdep') +var devdep = path.resolve(linkdepSrc, 'node_modules', 'devdep') + +function pjson (dir, data) { + mkdirp.sync(dir) + var d = path.resolve(dir, 'package.json') + fs.writeFileSync(d, JSON.stringify(data)) +} + +test('setup', function (t) { + rimraf.sync(root) + pjson(pkg, { + name: 'root', + version: '1.2.3', + dependencies: { + linkdep: '' + } + }) + pjson(linkdepSrc, { + name: 'linkdep', + version: '1.2.3', + devDependencies: { + devdep: '' + } + }) + pjson(devdep, { + name: 'devdep', + version: '1.2.3' + }) + + mkdirp.sync(pkgnm) + fs.symlinkSync(linkdepSrc, linkdepLink, 'dir') + + t.end() +}) + +test('basic', function (t) { + readInstalled(pkg, { dev: true }, function (er, data) { + var dd = data.dependencies.linkdep.dependencies.devdep + t.notOk(dd.extraneous, 'linked dev dep should not be extraneous') + t.end() + }) +}) + +test('cleanup', function (t) { + rimraf.sync(root) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-installed/test/noargs.js nodejs-0.11.15/deps/npm/node_modules/read-installed/test/noargs.js --- nodejs-0.11.13/deps/npm/node_modules/read-installed/test/noargs.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-installed/test/noargs.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ var readInstalled = require("../read-installed.js") var test = require("tap").test -var json = require("./fixtures/package.json") +var json = require("../package.json") var path = require("path") var known = [].concat(Object.keys(json.dependencies) , Object.keys(json.optionalDependencies) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/AUTHORS nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/AUTHORS --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/AUTHORS 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/AUTHORS 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -# Names sorted by how much code was originally theirs. -Isaac Z. Schlueter -Meryn Stol -Robert Kowalski \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/core_module_names.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/core_module_names.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/core_module_names.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/core_module_names.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -[ -"http", -"events", -"util", -"domain", -"cluster", -"buffer", -"stream", -"crypto", -"tls", -"fs", -"string_decoder", -"path", -"net", -"dgram", -"dns", -"https", -"url", -"punycode", -"readline", -"repl", -"vm", -"child_process", -"assert", -"zlib", -"tty", -"os", -"querystring" -] diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/extract_description.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/extract_description.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/extract_description.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/extract_description.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -module.exports = extractDescription - -// Extracts description from contents of a readme file in markdown format -function extractDescription (d) { - if (!d) return; - if (d === "ERROR: No README data found!") return; - // the first block of text before the first heading - // that isn't the first line heading - d = d.trim().split('\n') - for (var s = 0; d[s] && d[s].trim().match(/^(#|$)/); s ++); - var l = d.length - for (var e = s + 1; e < l && d[e].trim(); e ++); - return d.slice(s, e).join(' ').trim() -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/fixer.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/fixer.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/fixer.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/fixer.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,382 +0,0 @@ -var semver = require("semver") -var parseGitHubURL = require("github-url-from-git") -var depTypes = ["dependencies","devDependencies","optionalDependencies"] -var extractDescription = require("./extract_description") -var url = require("url") -var typos = require("./typos") -var coreModuleNames = require("./core_module_names") -var githubUserRepo = require("github-url-from-username-repo") - -var fixer = module.exports = { - // default warning function - warn: function() {}, - - fixRepositoryField: function(data) { - if (data.repositories) { - this.warn("repositories"); - data.repository = data.repositories[0] - } - if (!data.repository) return this.warn("missingRepository") - if (typeof data.repository === "string") { - data.repository = { - type: "git", - url: data.repository - } - } - var r = data.repository.url || "" - if (r) { - var ghurl = parseGitHubURL(r) - if (ghurl) { - r = ghurl.replace(/^https?:\/\//, 'git://') - } else if (githubUserRepo(r)) { - // repo has 'user/reponame' filled in as repo - data.repository.url = githubUserRepo(r) - } - } - - if (r.match(/github.com\/[^\/]+\/[^\/]+\.git\.git$/)) { - this.warn("brokenGitUrl", r) - } - } - -, fixTypos: function(data) { - Object.keys(typos.topLevel).forEach(function (d) { - if (data.hasOwnProperty(d)) { - this.warn("typo", d, typos.topLevel[d]) - } - }, this) - } - -, fixScriptsField: function(data) { - if (!data.scripts) return - if (typeof data.scripts !== "object") { - this.warn("nonObjectScripts") - delete data.scripts - } - Object.keys(data.scripts).forEach(function (k) { - if (typeof data.scripts[k] !== "string") { - this.warn("nonStringScript") - delete data.scripts[k] - } else if (typos.script[k]) { - this.warn("typo", k, typos.script[k], "scripts") - } - }, this) - } - -, fixFilesField: function(data) { - var files = data.files - if (files && !Array.isArray(files)) { - this.warn("nonArrayFiles") - delete data.files - } else if (data.files) { - data.files = data.files.filter(function(file) { - if (!file || typeof file !== "string") { - this.warn("invalidFilename", file) - return false - } else { - return true - } - }, this) - } - } - -, fixBinField: function(data) { - if (!data.bin) return; - if (typeof data.bin === "string") { - var b = {} - b[data.name] = data.bin - data.bin = b - } - } - -, fixManField: function(data) { - if (!data.man) return; - if (typeof data.man === "string") { - data.man = [ data.man ] - } - } -, fixBundleDependenciesField: function(data) { - var bdd = "bundledDependencies" - var bd = "bundleDependencies" - if (data[bdd] && !data[bd]) { - data[bd] = data[bdd] - delete data[bdd] - } - if (data[bd] && !Array.isArray(data[bd])) { - this.warn("nonArrayBundleDependencies") - delete data[bd] - } else if (data[bd]) { - data[bd] = data[bd].filter(function(bd) { - if (!bd || typeof bd !== 'string') { - this.warn("nonStringBundleDependency", bd) - return false - } else { - return true - } - }, this) - } - } - -, fixDependencies: function(data, strict) { - var loose = !strict - objectifyDeps(data, this.warn) - addOptionalDepsToDeps(data, this.warn) - this.fixBundleDependenciesField(data) - - ;['dependencies','devDependencies'].forEach(function(deps) { - if (!(deps in data)) return - if (!data[deps] || typeof data[deps] !== "object") { - this.warn("nonObjectDependencies", deps) - delete data[deps] - return - } - Object.keys(data[deps]).forEach(function (d) { - var r = data[deps][d] - if (typeof r !== 'string') { - this.warn("nonStringDependency", d, JSON.stringify(r)) - delete data[deps][d] - } - // "/" is not allowed as packagename for publishing, but for git-urls - // normalize shorthand-urls - if (githubUserRepo(data[deps][d])) { - data[deps][d] = githubUserRepo(data[deps][d]) - } - }, this) - }, this) - } - -, fixModulesField: function (data) { - if (data.modules) { - this.warn("deprecatedModules") - delete data.modules - } - } - -, fixKeywordsField: function (data) { - if (typeof data.keywords === "string") { - data.keywords = data.keywords.split(/,\s+/) - } - if (data.keywords && !Array.isArray(data.keywords)) { - delete data.keywords - this.warn("nonArrayKeywords") - } else if (data.keywords) { - data.keywords = data.keywords.filter(function(kw) { - if (typeof kw !== "string" || !kw) { - this.warn("nonStringKeyword"); - return false - } else { - return true - } - }, this) - } - } - -, fixVersionField: function(data, strict) { - // allow "loose" semver 1.0 versions in non-strict mode - // enforce strict semver 2.0 compliance in strict mode - var loose = !strict - if (!data.version) { - data.version = "" - return true - } - if (!semver.valid(data.version, loose)) { - throw new Error('Invalid version: "'+ data.version + '"') - } - data.version = semver.clean(data.version, loose) - return true - } - -, fixPeople: function(data) { - modifyPeople(data, unParsePerson) - modifyPeople(data, parsePerson) - } - -, fixNameField: function(data, strict) { - if (!data.name && !strict) { - data.name = "" - return - } - if (typeof data.name !== "string") { - throw new Error("name field must be a string.") - } - if (!strict) - data.name = data.name.trim() - ensureValidName(data.name, strict) - if (coreModuleNames.indexOf(data.name) !== -1) - this.warn("conflictingName", data.name) - } - - -, fixDescriptionField: function (data) { - if (data.description && typeof data.description !== 'string') { - this.warn("nonStringDescription") - delete data.description - } - if (data.readme && !data.description) - data.description = extractDescription(data.readme) - if(data.description === undefined) delete data.description; - if (!data.description) this.warn("missingDescription") - } - -, fixReadmeField: function (data) { - if (!data.readme) { - this.warn("missingReadme") - data.readme = "ERROR: No README data found!" - } - } - -, fixBugsField: function(data) { - if (!data.bugs && data.repository && data.repository.url) { - var gh = parseGitHubURL(data.repository.url) - if(gh) { - if(gh.match(/^https:\/\/github.com\//)) - data.bugs = {url: gh + "/issues"} - else // gist url - data.bugs = {url: gh} - } - } - else if(data.bugs) { - var emailRe = /^.+@.*\..+$/ - if(typeof data.bugs == "string") { - if(emailRe.test(data.bugs)) - data.bugs = {email:data.bugs} - else if(url.parse(data.bugs).protocol) - data.bugs = {url: data.bugs} - else - this.warn("nonEmailUrlBugsString") - } - else { - bugsTypos(data.bugs, this.warn) - var oldBugs = data.bugs - data.bugs = {} - if(oldBugs.url) { - if(typeof(oldBugs.url) == "string" && url.parse(oldBugs.url).protocol) - data.bugs.url = oldBugs.url - else - this.warn("nonUrlBugsUrlField") - } - if(oldBugs.email) { - if(typeof(oldBugs.email) == "string" && emailRe.test(oldBugs.email)) - data.bugs.email = oldBugs.email - else - this.warn("nonEmailBugsEmailField") - } - } - if(!data.bugs.email && !data.bugs.url) { - delete data.bugs - this.warn("emptyNormalizedBugs") - } - } - } - -, fixHomepageField: function(data) { - if (!data.homepage && data.repository && data.repository.url) { - var gh = parseGitHubURL(data.repository.url) - if (gh) - data.homepage = gh - else - return true - } else if (!data.homepage) - return true - - if(typeof data.homepage !== "string") { - this.warn("nonUrlHomepage") - return delete data.homepage - } - if(!url.parse(data.homepage).protocol) { - this.warn("missingProtocolHomepage") - data.homepage = "http://" + data.homepage - } - } -} - -function ensureValidName (name, strict) { - if (name.charAt(0) === "." || - name.match(/[\/@\s\+%:]/) || - name !== encodeURIComponent(name) || - (strict && name !== name.toLowerCase()) || - name.toLowerCase() === "node_modules" || - name.toLowerCase() === "favicon.ico") { - throw new Error("Invalid name: " + JSON.stringify(name)) - } -} - -function modifyPeople (data, fn) { - if (data.author) data.author = fn(data.author) - ;["maintainers", "contributors"].forEach(function (set) { - if (!Array.isArray(data[set])) return; - data[set] = data[set].map(fn) - }) - return data -} - -function unParsePerson (person) { - if (typeof person === "string") return person - var name = person.name || "" - var u = person.url || person.web - var url = u ? (" ("+u+")") : "" - var e = person.email || person.mail - var email = e ? (" <"+e+">") : "" - return name+email+url -} - -function parsePerson (person) { - if (typeof person !== "string") return person - var name = person.match(/^([^\(<]+)/) - var url = person.match(/\(([^\)]+)\)/) - var email = person.match(/<([^>]+)>/) - var obj = {} - if (name && name[0].trim()) obj.name = name[0].trim() - if (email) obj.email = email[1]; - if (url) obj.url = url[1]; - return obj -} - -function addOptionalDepsToDeps (data, warn) { - var o = data.optionalDependencies - if (!o) return; - var d = data.dependencies || {} - Object.keys(o).forEach(function (k) { - d[k] = o[k] - }) - data.dependencies = d -} - -function depObjectify (deps, type, warn) { - if (!deps) return {} - if (typeof deps === "string") { - deps = deps.trim().split(/[\n\r\s\t ,]+/) - } - if (!Array.isArray(deps)) return deps - warn("deprecatedArrayDependencies", type) - var o = {} - deps.filter(function (d) { - return typeof d === "string" - }).forEach(function(d) { - d = d.trim().split(/(:?[@\s><=])/) - var dn = d.shift() - var dv = d.join("") - dv = dv.trim() - dv = dv.replace(/^@/, "") - o[dn] = dv - }) - return o -} - -function objectifyDeps (data, warn) { - depTypes.forEach(function (type) { - if (!data[type]) return; - data[type] = depObjectify(data[type], type, warn) - }) -} - -function bugsTypos(bugs, warn) { - if (!bugs) return - Object.keys(bugs).forEach(function (k) { - if (typos.bugs[k]) { - warn("typo", k, typos.bugs[k], "bugs") - bugs[typos.bugs[k]] = bugs[k] - delete bugs[k] - } - }) -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/make_warning.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/make_warning.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/make_warning.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/make_warning.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -var util = require("util") -var messages = require("./warning_messages.json") - -module.exports = function() { - var args = Array.prototype.slice.call(arguments, 0) - var warningName = args.shift() - if (warningName == "typo") { - return makeTypoWarning.apply(null,args) - } - else { - var msgTemplate = messages[warningName] ? messages[warningName] : warningName + ": '%s'" - args.unshift(msgTemplate) - return util.format.apply(null, args) - } -} - -function makeTypoWarning (providedName, probableName, field) { - if (field) { - providedName = field + "['" + providedName + "']" - probableName = field + "['" + probableName + "']" - } - return util.format(messages.typo, providedName, probableName) -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/normalize.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/normalize.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/normalize.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/normalize.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -module.exports = normalize - -var fixer = require("./fixer") -var makeWarning = require("./make_warning") - -var fieldsToFix = ['name','version','description','repository','modules','scripts' - ,'files','bin','man','bugs','keywords','readme','homepage'] -var otherThingsToFix = ['dependencies','people', 'typos'] - -var thingsToFix = fieldsToFix.map(function(fieldName) { - return ucFirst(fieldName) + "Field" -}) -// two ways to do this in CoffeeScript on only one line, sub-70 chars: -// thingsToFix = fieldsToFix.map (name) -> ucFirst(name) + "Field" -// thingsToFix = (ucFirst(name) + "Field" for name in fieldsToFix) -thingsToFix = thingsToFix.concat(otherThingsToFix) - -function normalize (data, warn, strict) { - if(warn === true) warn = null, strict = true - if(!strict) strict = false - if(!warn || data.private) warn = function(msg) { /* noop */ } - - if (data.scripts && - data.scripts.install === "node-gyp rebuild" && - !data.scripts.preinstall) { - data.gypfile = true - } - fixer.warn = function() { warn(makeWarning.apply(null, arguments)) } - thingsToFix.forEach(function(thingName) { - fixer["fix" + ucFirst(thingName)](data, strict) - }) - data._id = data.name + "@" + data.version -} - -function ucFirst (string) { - return string.charAt(0).toUpperCase() + string.slice(1); -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/safe_format.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/safe_format.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/safe_format.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/safe_format.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -var util = require('util') - -module.exports = function() { - var args = Array.prototype.slice.call(arguments, 0) - args.forEach(function(arg) { - if (!arg) throw new TypeError('Bad arguments.') - }) - return util.format.apply(null, arguments) -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/typos.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/typos.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/typos.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/typos.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -{ - "topLevel": { - "dependancies": "dependencies" - ,"dependecies": "dependencies" - ,"depdenencies": "dependencies" - ,"devEependencies": "devDependencies" - ,"depends": "dependencies" - ,"dev-dependencies": "devDependencies" - ,"devDependences": "devDependencies" - ,"devDepenencies": "devDependencies" - ,"devdependencies": "devDependencies" - ,"repostitory": "repository" - ,"repo": "repository" - ,"prefereGlobal": "preferGlobal" - ,"hompage": "homepage" - ,"hampage": "homepage" - ,"autohr": "author" - ,"autor": "author" - ,"contributers": "contributors" - ,"publicationConfig": "publishConfig" - ,"script": "scripts" - }, - "bugs": { "web": "url", "name": "url" }, - "script": { "server": "start", "tests": "test" } -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/warning_messages.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/warning_messages.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/warning_messages.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/lib/warning_messages.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -{ - "repositories": "'repositories' (plural) Not supported. Please pick one as the 'repository' field" - ,"missingRepository": "No repository field." - ,"brokenGitUrl": "Probably broken git url: %s" - ,"nonObjectScripts": "scripts must be an object" - ,"nonStringScript": "script values must be string commands" - ,"nonArrayFiles": "Invalid 'files' member" - ,"invalidFilename": "Invalid filename in 'files' list: %s" - ,"nonArrayBundleDependencies": "Invalid 'bundleDependencies' list. Must be array of package names" - ,"nonStringBundleDependency": "Invalid bundleDependencies member: %s" - ,"nonObjectDependencies": "%s field must be an object" - ,"nonStringDependency": "Invalid dependency: %s %s" - ,"deprecatedArrayDependencies": "specifying %s as array is deprecated" - ,"deprecatedModules": "modules field is deprecated" - ,"nonArrayKeywords": "keywords should be an array of strings" - ,"nonStringKeyword": "keywords should be an array of strings" - ,"conflictingName": "%s is also the name of a node core module." - ,"nonStringDescription": "'description' field should be a string" - ,"missingDescription": "No description" - ,"missingReadme": "No README data" - ,"nonEmailUrlBugsString": "Bug string field must be url, email, or {email,url}" - ,"nonUrlBugsUrlField": "bugs.url field must be a string url. Deleted." - ,"nonEmailBugsEmailField": "bugs.email field must be a string email. Deleted." - ,"emptyNormalizedBugs": "Normalized value of bugs field is an empty object. Deleted." - ,"nonUrlHomepage": "homepage field must be a string url. Deleted." - ,"missingProtocolHomepage": "homepage field must start with a protocol." - ,"typo": "%s should probably be %s." -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/LICENSE nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -This package contains code originally written by Isaac Z. Schlueter. -Used with permission. - -Copyright (c) Meryn Stol ("Author") -All rights reserved. - -The BSD License - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/.npmignore nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/.npmignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/.npmignore 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -/node_modules/ \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/package.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/package.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/package.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -{ - "name": "normalize-package-data", - "version": "0.2.13", - "author": { - "name": "Meryn Stol", - "email": "merynstol@gmail.com" - }, - "description": "Normalizes data that can be found in package.json files.", - "repository": { - "type": "git", - "url": "git://github.com/meryn/normalize-package-data.git" - }, - "main": "lib/normalize.js", - "scripts": { - "test": "tap test/*.js" - }, - "dependencies": { - "github-url-from-git": "~1.1.1", - "github-url-from-username-repo": "^0.1.0", - "semver": "2" - }, - "devDependencies": { - "tap": "~0.2.5", - "underscore": "~1.4.4", - "async": "~0.2.7" - }, - "contributors": [ - { - "name": "Isaac Z. Schlueter", - "email": "i@izs.me" - }, - { - "name": "Meryn Stol", - "email": "merynstol@gmail.com" - }, - { - "name": "Robert Kowalski", - "email": "rok@kowalski.gd" - } - ], - "readme": "# normalize-package-data [![Build Status](https://travis-ci.org/meryn/normalize-package-data.png?branch=master)](https://travis-ci.org/meryn/normalize-package-data)\n\nnormalize-package data exports a function that normalizes package metadata. This data is typically found in a package.json file, but in principle could come from any source - for example the npm registry.\n\nnormalize-package-data is used by [read-package-json](https://npmjs.org/package/read-package-json) to normalize the data it reads from a package.json file. In turn, read-package-json is used by [npm](https://npmjs.org/package/npm) and various npm-related tools.\n\n## Installation\n\n```\nnpm install normalize-package-data\n```\n\n## Usage\n\nBasic usage is really simple. You call the function that normalize-package-data exports. Let's call it `normalizeData`.\n\n```javascript\nnormalizeData = require('normalize-package-data')\npackageData = fs.readfileSync(\"package.json\")\nnormalizeData(packageData)\n// packageData is now normalized\n```\n\n#### Strict mode\n\nYou may activate strict validation by passing true as the second argument.\n\n```javascript\nnormalizeData = require('normalize-package-data')\npackageData = fs.readfileSync(\"package.json\")\nwarnFn = function(msg) { console.error(msg) }\nnormalizeData(packageData, true)\n// packageData is now normalized\n```\n\nIf strict mode is activated, only Semver 2.0 version strings are accepted. Otherwise, Semver 1.0 strings are accepted as well. Packages must have a name, and the name field must not have contain leading or trailing whitespace.\n\n#### Warnings\n\nOptionally, you may pass a \"warning\" function. It gets called whenever the `normalizeData` function encounters something that doesn't look right. It indicates less than perfect input data.\n\n```javascript\nnormalizeData = require('normalize-package-data')\npackageData = fs.readfileSync(\"package.json\")\nwarnFn = function(msg) { console.error(msg) }\nnormalizeData(packageData, warnFn)\n// packageData is now normalized. Any number of warnings may have been logged.\n```\n\nYou may combine strict validation with warnings by passing `true` as the second argument, and `warnFn` as third.\n\nWhen `private` field is set to `true`, warnings will be suppressed.\n\n### Potential exceptions\n\nIf the supplied data has an invalid name or version vield, `normalizeData` will throw an error. Depending on where you call `normalizeData`, you may want to catch these errors so can pass them to a callback.\n\n## What normalization (currently) entails\n\n* The value of `name` field gets trimmed (unless in strict mode).\n* The value of the `version` field gets cleaned by `semver.clean`. See [documentation for the semver module](https://github.com/isaacs/node-semver).\n* If `name` and/or `version` fields are missing, they are set to empty strings.\n* If `files` field is not an array, it will be removed.\n* If `bin` field is a string, then `bin` field will become an object with `name` set to the value of the `name` field, and `bin` set to the original string value.\n* If `man` field is a string, it will become an array with the original string as its sole member.\n* If `keywords` field is string, it is considered to be a list of keywords separated by one or more white-space characters. It gets converted to an array by splitting on `\\s+`.\n* All people fields (`author`, `maintainers`, `contributors`) get converted into objects with name, email and url properties.\n* If `bundledDependencies` field (a typo) exists and `bundleDependencies` field does not, `bundledDependencies` will get renamed to `bundleDependencies`.\n* If the value of any of the dependencies fields (`dependencies`, `devDependencies`, `optionalDependencies`) is a string, it gets converted into an object with familiar `name=>value` pairs.\n* The values in `optionalDependencies` get added to `dependencies`. The `optionalDependencies` array is left untouched.\n* If `description` field does not exists, but `readme` field does, then (more or less) the first paragraph of text that's found in the readme is taken as value for `description`.\n* If `repository` field is a string, it will become an object with `url` set to the original string value, and `type` set to `\"git\"`.\n* If `repository.url` is not a valid url, but in the style of \"[owner-name]/[repo-name]\", `repository.url` will be set to git://github.com/[owner-name]/[repo-name]\n* If `bugs` field is a string, the value of `bugs` field is changed into an object with `url` set to the original string value.\n* If `bugs` field does not exist, but `repository` field points to a repository hosted on GitHub, the value of the `bugs` field gets set to an url in the form of https://github.com/[owner-name]/[repo-name]/issues . If the repository field points to a GitHub Gist repo url, the associated http url is chosen.\n* If `bugs` field is an object, the resulting value only has email and url properties. If email and url properties are not strings, they are ignored. If no valid values for either email or url is found, bugs field will be removed.\n* If `homepage` field is not a string, it will be removed.\n* If the url in the `homepage` field does not specify a protocol, then http is assumed. For example, `myproject.org` will be changed to `http://myproject.org`.\n* If `homepage` field does not exist, but `repository` field points to a repository hosted on GitHub, the value of the `homepage` field gets set to an url in the form of https://github.com/[owner-name]/[repo-name]/ . If the repository field points to a GitHub Gist repo url, the associated http url is chosen.\n\n### Rules for name field\n\nIf `name` field is given, the value of the name field must be a string. The string may not:\n\n* start with a period.\n* contain the following characters: `/@\\s+%`\n* contain and characters that would need to be encoded for use in urls.\n* resemble the word `node_modules` or `favicon.ico` (case doesn't matter).\n\n### Rules for version field\n\nIf `version` field is given, the value of the version field must be a valid *semver* string, as determined by the `semver.valid` method. See [documentation for the semver module](https://github.com/isaacs/node-semver).\n\n## Credits\n\nThis package contains code based on read-package-json written by Isaac Z. Schlueter. Used with permisson.\n\n## License\n\nnormalize-package-data is released under the [BSD 2-Clause License](http://opensource.org/licenses/MIT). \nCopyright (c) 2013 Meryn Stol ", - "readmeFilename": "README.md", - "bugs": { - "url": "https://github.com/meryn/normalize-package-data/issues" - }, - "homepage": "https://github.com/meryn/normalize-package-data", - "_id": "normalize-package-data@0.2.13", - "_shasum": "50f9fd9e77b1c8411cd231db2962e73963de774d", - "_from": "normalize-package-data@^0.2.13" -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/README.md nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/README.md --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -# normalize-package-data [![Build Status](https://travis-ci.org/meryn/normalize-package-data.png?branch=master)](https://travis-ci.org/meryn/normalize-package-data) - -normalize-package data exports a function that normalizes package metadata. This data is typically found in a package.json file, but in principle could come from any source - for example the npm registry. - -normalize-package-data is used by [read-package-json](https://npmjs.org/package/read-package-json) to normalize the data it reads from a package.json file. In turn, read-package-json is used by [npm](https://npmjs.org/package/npm) and various npm-related tools. - -## Installation - -``` -npm install normalize-package-data -``` - -## Usage - -Basic usage is really simple. You call the function that normalize-package-data exports. Let's call it `normalizeData`. - -```javascript -normalizeData = require('normalize-package-data') -packageData = fs.readfileSync("package.json") -normalizeData(packageData) -// packageData is now normalized -``` - -#### Strict mode - -You may activate strict validation by passing true as the second argument. - -```javascript -normalizeData = require('normalize-package-data') -packageData = fs.readfileSync("package.json") -warnFn = function(msg) { console.error(msg) } -normalizeData(packageData, true) -// packageData is now normalized -``` - -If strict mode is activated, only Semver 2.0 version strings are accepted. Otherwise, Semver 1.0 strings are accepted as well. Packages must have a name, and the name field must not have contain leading or trailing whitespace. - -#### Warnings - -Optionally, you may pass a "warning" function. It gets called whenever the `normalizeData` function encounters something that doesn't look right. It indicates less than perfect input data. - -```javascript -normalizeData = require('normalize-package-data') -packageData = fs.readfileSync("package.json") -warnFn = function(msg) { console.error(msg) } -normalizeData(packageData, warnFn) -// packageData is now normalized. Any number of warnings may have been logged. -``` - -You may combine strict validation with warnings by passing `true` as the second argument, and `warnFn` as third. - -When `private` field is set to `true`, warnings will be suppressed. - -### Potential exceptions - -If the supplied data has an invalid name or version vield, `normalizeData` will throw an error. Depending on where you call `normalizeData`, you may want to catch these errors so can pass them to a callback. - -## What normalization (currently) entails - -* The value of `name` field gets trimmed (unless in strict mode). -* The value of the `version` field gets cleaned by `semver.clean`. See [documentation for the semver module](https://github.com/isaacs/node-semver). -* If `name` and/or `version` fields are missing, they are set to empty strings. -* If `files` field is not an array, it will be removed. -* If `bin` field is a string, then `bin` field will become an object with `name` set to the value of the `name` field, and `bin` set to the original string value. -* If `man` field is a string, it will become an array with the original string as its sole member. -* If `keywords` field is string, it is considered to be a list of keywords separated by one or more white-space characters. It gets converted to an array by splitting on `\s+`. -* All people fields (`author`, `maintainers`, `contributors`) get converted into objects with name, email and url properties. -* If `bundledDependencies` field (a typo) exists and `bundleDependencies` field does not, `bundledDependencies` will get renamed to `bundleDependencies`. -* If the value of any of the dependencies fields (`dependencies`, `devDependencies`, `optionalDependencies`) is a string, it gets converted into an object with familiar `name=>value` pairs. -* The values in `optionalDependencies` get added to `dependencies`. The `optionalDependencies` array is left untouched. -* If `description` field does not exists, but `readme` field does, then (more or less) the first paragraph of text that's found in the readme is taken as value for `description`. -* If `repository` field is a string, it will become an object with `url` set to the original string value, and `type` set to `"git"`. -* If `repository.url` is not a valid url, but in the style of "[owner-name]/[repo-name]", `repository.url` will be set to git://github.com/[owner-name]/[repo-name] -* If `bugs` field is a string, the value of `bugs` field is changed into an object with `url` set to the original string value. -* If `bugs` field does not exist, but `repository` field points to a repository hosted on GitHub, the value of the `bugs` field gets set to an url in the form of https://github.com/[owner-name]/[repo-name]/issues . If the repository field points to a GitHub Gist repo url, the associated http url is chosen. -* If `bugs` field is an object, the resulting value only has email and url properties. If email and url properties are not strings, they are ignored. If no valid values for either email or url is found, bugs field will be removed. -* If `homepage` field is not a string, it will be removed. -* If the url in the `homepage` field does not specify a protocol, then http is assumed. For example, `myproject.org` will be changed to `http://myproject.org`. -* If `homepage` field does not exist, but `repository` field points to a repository hosted on GitHub, the value of the `homepage` field gets set to an url in the form of https://github.com/[owner-name]/[repo-name]/ . If the repository field points to a GitHub Gist repo url, the associated http url is chosen. - -### Rules for name field - -If `name` field is given, the value of the name field must be a string. The string may not: - -* start with a period. -* contain the following characters: `/@\s+%` -* contain and characters that would need to be encoded for use in urls. -* resemble the word `node_modules` or `favicon.ico` (case doesn't matter). - -### Rules for version field - -If `version` field is given, the value of the version field must be a valid *semver* string, as determined by the `semver.valid` method. See [documentation for the semver module](https://github.com/isaacs/node-semver). - -## Credits - -This package contains code based on read-package-json written by Isaac Z. Schlueter. Used with permisson. - -## License - -normalize-package-data is released under the [BSD 2-Clause License](http://opensource.org/licenses/MIT). -Copyright (c) 2013 Meryn Stol \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/basic.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/basic.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/basic.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -var tap = require("tap") -var normalize = require("../lib/normalize") -var path = require("path") -var fs = require("fs") - -tap.test("basic test", function (t) { - var p = path.resolve(__dirname, "./fixtures/read-package-json.json") - fs.readFile (p, function (err, contents) { - if (err) throw err; - var originalData = JSON.parse(contents.toString()) - var data = JSON.parse(contents.toString()) - normalize(data) - t.ok(data) - verifyFields(t, data, originalData) - t.end() - }) -}) - -function verifyFields (t, normalized, original) { - t.equal(normalized.version, original.version, "Version field stays same") - t.equal(normalized._id, normalized.name + "@" + normalized.version, "It gets good id.") - t.equal(normalized.name, original.name, "Name stays the same.") - t.type(normalized.author, "object", "author field becomes object") - t.deepEqual(normalized.scripts, original.scripts, "scripts field (object) stays same") - t.equal(normalized.main, original.main) - // optional deps are folded in. - t.deepEqual(normalized.optionalDependencies, - original.optionalDependencies) - t.has(normalized.dependencies, original.optionalDependencies, "opt depedencies are copied into dependencies") - t.has(normalized.dependencies, original.dependencies, "regular depedencies stay in place") - t.deepEqual(normalized.devDependencies, original.devDependencies) - t.type(normalized.bugs, "object", "bugs should become object") - t.equal(normalized.bugs.url, "https://github.com/isaacs/read-package-json/issues") -} diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/consistency.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/consistency.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/consistency.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/consistency.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -var tap = require("tap") -var normalize = require("../lib/normalize") -var path = require("path") -var fs = require("fs") -var _ = require("underscore") -var async = require("async") - -var data, clonedData -var warn - -tap.test("consistent normalization", function(t) { - path.resolve(__dirname, "./fixtures/read-package-json.json") - fs.readdir (__dirname + "/fixtures", function (err, entries) { - // entries = ['coffee-script.json'] // uncomment to limit to a specific file - verifyConsistency = function(entryName, next) { - warn = function(msg) { - // t.equal("",msg) // uncomment to have some kind of logging of warnings - } - filename = __dirname + "/fixtures/" + entryName - fs.readFile(filename, function(err, contents) { - if (err) return next(err) - data = JSON.parse(contents.toString()) - normalize(data, warn) - clonedData = _.clone(data) - normalize(data, warn) - t.deepEqual(clonedData, data, - "Normalization of " + entryName + " is consistent.") - next(null) - }) // fs.readFile - } // verifyConsistency - async.forEach(entries, verifyConsistency, function(err) { - if (err) throw err - t.end() - }) - }) // fs.readdir -}) // tap.test \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/dependencies.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/dependencies.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/dependencies.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/dependencies.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -var tap = require("tap") -var normalize = require("../lib/normalize") - -var warningMessages = require("../lib/warning_messages.json") -var safeFormat = require("../lib/safe_format") - -tap.test("warn if dependency contains anything else but a string", function(t) { - var a - var warnings = [] - function warn(w) { - warnings.push(w) - } - normalize(a={ - dependencies: { "a": 123}, - devDependencies: { "b": 456}, - optionalDependencies: { "c": 789} - }, warn) - - var wanted1 = safeFormat(warningMessages.nonStringDependency, "a", 123) - var wanted2 = safeFormat(warningMessages.nonStringDependency, "b", 456) - var wanted3 = safeFormat(warningMessages.nonStringDependency, "c", 789) - t.ok(~warnings.indexOf(wanted1), wanted1) - t.ok(~warnings.indexOf(wanted2), wanted2) - t.ok(~warnings.indexOf(wanted3), wanted3) - t.end() -}) - -tap.test("warn if bundleDependencies array contains anything else but strings", function(t) { - var a - var warnings = [] - function warn(w) { - warnings.push(w) - } - normalize(a={ - bundleDependencies: ["abc", 123, {foo:"bar"}] - }, warn) - - var wanted1 = safeFormat(warningMessages.nonStringBundleDependency, 123) - var wanted2 = safeFormat(warningMessages.nonStringBundleDependency, {foo:"bar"}) - t.ok(~warnings.indexOf(wanted1), wanted1) - t.ok(~warnings.indexOf(wanted2), wanted2) - t.end() -}) \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/async.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/async.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/async.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/async.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -{ - "name": "async", - "description": "Higher-order functions and common patterns for asynchronous code", - "main": "./lib/async", - "author": "Caolan McMahon", - "version": "0.2.6", - "repository" : { - "type" : "git", - "url" : "http://github.com/caolan/async.git" - }, - "bugs" : { - "url" : "http://github.com/caolan/async/issues" - }, - "licenses" : [ - { - "type" : "MIT", - "url" : "http://github.com/caolan/async/raw/master/LICENSE" - } - ], - "devDependencies": { - "nodeunit": ">0.0.0", - "uglify-js": "1.2.x", - "nodelint": ">0.0.0" - }, - "jam": { - "main": "lib/async.js", - "include": [ - "lib/async.js", - "README.md", - "LICENSE" - ] - }, - "scripts": { - "test": "nodeunit test/test-async.js" - } -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/bcrypt.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/bcrypt.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/bcrypt.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/bcrypt.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -{ - "name": "bcrypt", - "description": "A bcrypt library for NodeJS.", - "keywords": [ - "bcrypt", - "password", - "auth", - "authentication", - "encryption", - "crypt", - "crypto" - ], - "main": "./bcrypt", - "version": "0.7.5", - "author": "Nick Campbell (http://github.com/ncb000gt)", - "engines": { - "node": ">= 0.6.0" - }, - "repository": { - "type": "git", - "url": "http://github.com/ncb000gt/node.bcrypt.js.git" - }, - "licenses": [ - { - "type": "MIT" - } - ], - "bugs": { - "url": "http://github.com/ncb000gt/node.bcrypt.js/issues" - }, - "scripts": { - "test": "node-gyp configure build && nodeunit test" - }, - "dependencies": { - "bindings": "1.0.0" - }, - "devDependencies": { - "nodeunit": ">=0.6.4" - }, - "contributors": [ - "Antonio Salazar Cardozo (https://github.com/Shadowfiend)", - "Van Nguyen (https://github.com/thegoleffect)", - "David Trejo (https://github.com/dtrejo)", - "Ben Glow (https://github.com/pixelglow)", - "NewITFarmer.com <> (https://github.com/newitfarmer)", - "Alfred Westerveld (https://github.com/alfredwesterveld)", - "Vincent Côté-Roy (https://github.com/vincentcr)", - "Lloyd Hilaiel (https://github.com/lloyd)", - "Roman Shtylman (https://github.com/shtylman)", - "Vadim Graboys (https://github.com/vadimg)", - "Ben Noorduis <> (https://github.com/bnoordhuis)", - "Nate Rajlich (https://github.com/tootallnate)", - "Sean McArthur (https://github.com/seanmonstar)", - "Fanie Oosthuysen (https://github.com/weareu)" - ] -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/coffee-script.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/coffee-script.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/coffee-script.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/coffee-script.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -{ - "name": "coffee-script", - "description": "Unfancy JavaScript", - "keywords": ["javascript", "language", "coffeescript", "compiler"], - "author": "Jeremy Ashkenas", - "version": "1.6.2", - "licenses": [{ - "type": "MIT", - "url": "https://raw.github.com/jashkenas/coffee-script/master/LICENSE" - }], - "engines": { - "node": ">=0.8.0" - }, - "directories" : { - "lib" : "./lib/coffee-script" - }, - "main" : "./lib/coffee-script/coffee-script", - "bin": { - "coffee": "./bin/coffee", - "cake": "./bin/cake" - }, - "scripts": { - "test": "node ./bin/cake test" - }, - "homepage": "http://coffeescript.org", - "bugs": "https://github.com/jashkenas/coffee-script/issues", - "repository": { - "type": "git", - "url": "git://github.com/jashkenas/coffee-script.git" - }, - "devDependencies": { - "uglify-js": "~2.2", - "jison": ">=0.2.0" - } -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/http-server.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/http-server.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/http-server.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/http-server.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -{ - "name": "http-server", - "preferGlobal": true, - "version": "0.3.0", - "author": "Nodejitsu ", - "description": "a simple zero-configuration command-line http server", - "contributors": [ - { - "name": "Marak Squires", - "email": "marak@nodejitsu.com" - } - ], - "bin": { - "http-server": "./bin/http-server" - }, - "scripts": { - "start": "node ./bin/http-server", - "test": "vows --spec --isolate", - "predeploy": "echo This will be run before deploying the app", - "postdeploy": "echo This will be run after deploying the app" - }, - "main": "./lib/http-server", - "repository": { - "type": "git", - "url": "https://github.com/nodejitsu/http-server.git" - }, - "keywords": [ - "cli", - "http", - "server" - ], - "dependencies" : { - "colors" : "*", - "flatiron" : "0.1.x", - "optimist" : "0.2.x", - "union" : "0.1.x", - "ecstatic" : "0.1.x", - "plates" : "https://github.com/flatiron/plates/tarball/master" - }, - "analyze": false, - "devDependencies": { - "vows" : "0.5.x", - "request" : "2.1.x" - }, - "bundledDependencies": [ - "union", - "ecstatic" - ], - "license": "MIT", - "engines": { - "node": ">=0.6" - } -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/movefile.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/movefile.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/movefile.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/movefile.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -{ - "name": "movefile", - "description": "rename implementation working over devices", - "version": "0.2.0", - "author": "yazgazan ", - "main": "./build/Release/movefile", - "keywords": ["move", "file", "rename"], - "repository": "git://github.com/yazgazan/movefile.git", - "directories": { - "lib": "./build/Release/" - }, - "scripts": { - "install": "./node_modules/node-gyp/bin/node-gyp.js configure && ./node_modules/node-gyp/bin/node-gyp.js build" - }, - "engines": { - "node": "*" - }, - "dependencies": { - "node-gyp": "~0.9.1" - } -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/node-module_exist.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/node-module_exist.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/node-module_exist.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/node-module_exist.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -{ - "name": "node-module_exist", - "description": "Find if a NodeJS module is available to require or not", - "version": "0.0.1", - "main": "module_exist.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" - }, - "repository": { - "type": "git", - "url": "git@gist.github.com:3135914.git" - }, - "homepage": "https://github.com/FGRibreau", - "author": { - "name": "Francois-Guillaume Ribreau", - "url": "http://fgribreau.com.com/" - }, - "devDependencies": { - "nodeunit": "~0.7.4" - }, - "keywords": [ - "core", - "modules" - ], - "license": "MIT" -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/no-description.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/no-description.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/no-description.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/no-description.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -{ - "name": "foo-bar-package", - "version": "0.0.1" -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/npm.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/npm.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/npm.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/npm.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,135 +0,0 @@ -{ - "version": "1.2.17", - "name": "npm", - "publishConfig": { - "proprietary-attribs": false - }, - "description": "A package manager for node", - "keywords": [ - "package manager", - "modules", - "install", - "package.json" - ], - "preferGlobal": true, - "config": { - "publishtest": false - }, - "homepage": "https://npmjs.org/doc/", - "author": "Isaac Z. Schlueter (http://blog.izs.me)", - "repository": { - "type": "git", - "url": "https://github.com/isaacs/npm" - }, - "bugs": { - "email": "npm-@googlegroups.com", - "url": "http://github.com/isaacs/npm/issues" - }, - "directories": { - "doc": "./doc", - "man": "./man", - "lib": "./lib", - "bin": "./bin" - }, - "main": "./lib/npm.js", - "bin": "./bin/npm-cli.js", - "dependencies": { - "semver": "~1.1.2", - "ini": "~1.1.0", - "slide": "1", - "abbrev": "~1.0.4", - "graceful-fs": "~1.2.0", - "minimatch": "~0.2.11", - "nopt": "~2.1.1", - "rimraf": "2", - "request": "~2.9", - "which": "1", - "tar": "~0.1.17", - "fstream": "~0.1.22", - "block-stream": "*", - "inherits": "1", - "mkdirp": "~0.3.3", - "read": "~1.0.4", - "lru-cache": "~2.3.0", - "node-gyp": "~0.9.3", - "fstream-npm": "~0.1.3", - "uid-number": "0", - "archy": "0", - "chownr": "0", - "npmlog": "0", - "ansi": "~0.1.2", - "npm-registry-client": "~0.2.18", - "read-package-json": "~0.3.0", - "read-installed": "0", - "glob": "~3.1.21", - "init-package-json": "0.0.6", - "osenv": "0", - "lockfile": "~0.3.0", - "retry": "~0.6.0", - "once": "~1.1.1", - "npmconf": "0", - "opener": "~1.3.0", - "chmodr": "~0.1.0", - "cmd-shim": "~1.1.0" - }, - "bundleDependencies": [ - "semver", - "ini", - "slide", - "abbrev", - "graceful-fs", - "minimatch", - "nopt", - "rimraf", - "request", - "which", - "tar", - "fstream", - "block-stream", - "inherits", - "mkdirp", - "read", - "lru-cache", - "node-gyp", - "fstream-npm", - "uid-number", - "archy", - "chownr", - "npmlog", - "ansi", - "npm-registry-client", - "read-package-json", - "read-installed", - "glob", - "init-package-json", - "osenv", - "lockfile", - "retry", - "once", - "npmconf", - "opener", - "chmodr", - "cmd-shim" - ], - "devDependencies": { - "ronn": "~0.3.6", - "tap": "~0.4.0" - }, - "engines": { - "node": ">=0.6", - "npm": "1" - }, - "scripts": { - "test": "node ./test/run.js && tap test/tap/*.js", - "tap": "tap test/tap/*.js", - "prepublish": "node bin/npm-cli.js prune ; rm -rf test/*/*/node_modules ; make -j4 doc", - "dumpconf": "env | grep npm | sort | uniq", - "echo": "node bin/npm-cli.js" - }, - "licenses": [ - { - "type": "MIT +no-false-attribs", - "url": "https://github.com/isaacs/npm/raw/master/LICENSE" - } - ] -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/read-package-json.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/read-package-json.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/read-package-json.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/read-package-json.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -{ - "name": "read-package-json", - "version": "0.1.1", - "author": "Isaac Z. Schlueter (http://blog.izs.me/)", - "description": "The thing npm uses to read package.json files with semantics and defaults and validation", - "repository": { - "type": "git", - "url": "git://github.com/isaacs/read-package-json.git" - }, - "main": "read-json.js", - "scripts": { - "test": "tap test/*.js" - }, - "dependencies": { - "glob": "~3.1.9", - "lru-cache": "~1.1.0", - "semver": "~1.0.14", - "slide": "~1.1.3" - }, - "devDependencies": { - "tap": "~0.2.5" - }, - "optionalDependencies": { - "npmlog": "0", - "graceful-fs": "~1.1.8" - } -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/request.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/request.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/request.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/request.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -{ - "name": "request", - "description": "Simplified HTTP request client.", - "tags": [ - "http", - "simple", - "util", - "utility" - ], - "version": "2.16.7", - "author": "Mikeal Rogers ", - "repository": { - "type": "git", - "url": "http://github.com/mikeal/request.git" - }, - "bugs": { - "url": "http://github.com/mikeal/request/issues" - }, - "engines": [ - "node >= 0.8.0" - ], - "main": "index.js", - "dependencies": { - "form-data": "~0.0.3", - "mime": "~1.2.7", - "hawk": "~0.10.2", - "node-uuid": "~1.4.0", - "cookie-jar": "~0.2.0", - "aws-sign": "~0.2.0", - "oauth-sign": "~0.2.0", - "forever-agent": "~0.2.0", - "tunnel-agent": "~0.2.0", - "json-stringify-safe": "~3.0.0", - "qs": "~0.5.4" - }, - "scripts": { - "test": "node tests/run.js" - } -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/underscore.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/underscore.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/underscore.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/fixtures/underscore.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -{ - "name" : "underscore", - "description" : "JavaScript's functional programming helper library.", - "homepage" : "http://underscorejs.org", - "keywords" : ["util", "functional", "server", "client", "browser"], - "author" : "Jeremy Ashkenas ", - "repository" : {"type": "git", "url": "git://github.com/documentcloud/underscore.git"}, - "main" : "underscore.js", - "version" : "1.4.4", - "devDependencies": { - "phantomjs": "1.9.0-1" - }, - "scripts": { - "test": "phantomjs test/vendor/runner.js test/index.html?noglobals=true" - }, - "license" : "MIT" -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/github-urls.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/github-urls.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/github-urls.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/github-urls.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -var tap = require("tap") -var normalize = require("../lib/normalize") -var path = require("path") -var fs = require("fs") -var _ = require("underscore") -var async = require("async") - -var data, clonedData -var warn - -tap.test("consistent normalization", function(t) { - entries = [ - 'read-package-json.json', - 'http-server.json', - "movefile.json", - "node-module_exist.json" - ] - verifyConsistency = function(entryName, next) { - warn = function(msg) { - // t.equal("",msg) // uncomment to have some kind of logging of warnings - } - filename = __dirname + "/fixtures/" + entryName - fs.readFile(filename, function(err, contents) { - if (err) return next(err) - data = JSON.parse(contents.toString()) - normalize(data, warn) - if(data.name == "node-module_exist") { - t.same(data.bugs.url, "https://gist.github.com/3135914") - } - if(data.name == "read-package-json") { - t.same(data.bugs.url, "https://github.com/isaacs/read-package-json/issues") - } - if(data.name == "http-server") { - t.same(data.bugs.url, "https://github.com/nodejitsu/http-server/issues") - } - if(data.name == "movefile") { - t.same(data.bugs.url, "https://github.com/yazgazan/movefile/issues") - } - next(null) - }) // fs.readFile - } // verifyConsistency - async.forEach(entries, verifyConsistency, function(err) { - if (err) throw err - t.end() - }) -}) // tap.test \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/normalize.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/normalize.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/normalize.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/normalize.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,226 +0,0 @@ -var tap = require("tap") -var fs = require("fs") -var path = require("path") - -var globals = Object.keys(global) - -var normalize = require("../lib/normalize") -var warningMessages = require("../lib/warning_messages.json") -var safeFormat = require("../lib/safe_format") - -var rpjPath = path.resolve(__dirname,"./fixtures/read-package-json.json") -tap.test("normalize some package data", function(t) { - var packageData = require(rpjPath) - var warnings = [] - normalize(packageData, function(warning) { - warnings.push(warning) - }) - // there's no readme data in this particular object - t.equal( warnings.length, 1, "There's exactly one warning.") - fs.readFile(rpjPath, function(err, data) { - if(err) throw err - // Various changes have been made - t.notEqual(packageData, JSON.parse(data), "Output is different from input.") - t.end() - }) -}) - -tap.test("runs without passing warning function", function(t) { - var packageData = require(rpjPath) - fs.readFile(rpjPath, function(err, data) { - if(err) throw err - normalize(JSON.parse(data)) - t.ok(true, "If you read this, this means I'm still alive.") - t.end() - }) -}) - -tap.test("empty object", function(t) { - var packageData = {} - var expect = - { name: '', - version: '', - readme: 'ERROR: No README data found!', - _id: '@' } - - var warnings = [] - function warn(m) { - warnings.push(m) - } - normalize(packageData, warn) - t.same(packageData, expect) - t.same(warnings, [ - warningMessages.missingDescription, - warningMessages.missingRepository, - warningMessages.missingReadme - ]) - t.end() -}) - -tap.test("core module name", function(t) { - var warnings = [] - function warn(m) { - warnings.push(m) - } - var a - normalize(a={ - name: "http", - readme: "read yourself how about", - homepage: 123, - bugs: "what is this i don't even", - repository: "Hello." - }, warn) - - var expect = [ - safeFormat(warningMessages.conflictingName, 'http'), - warningMessages.nonEmailUrlBugsString, - warningMessages.emptyNormalizedBugs, - warningMessages.nonUrlHomepage - ] - t.same(warnings, expect) - t.end() -}) - -tap.test("urls required", function(t) { - var warnings = [] - function warn(w) { - warnings.push(w) - } - normalize({ - bugs: { - url: "/1", - email: "not an email address" - } - }, warn) - var a - normalize(a={ - readme: "read yourself how about", - homepage: 123, - bugs: "what is this i don't even", - repository: "Hello." - }, warn) - - console.error(a) - - var expect = - [ warningMessages.missingDescription, - warningMessages.missingRepository, - warningMessages.nonUrlBugsUrlField, - warningMessages.nonEmailBugsEmailField, - warningMessages.emptyNormalizedBugs, - warningMessages.missingReadme, - warningMessages.nonEmailUrlBugsString, - warningMessages.emptyNormalizedBugs, - warningMessages.nonUrlHomepage ] - t.same(warnings, expect) - t.end() -}) - -tap.test("homepage field must start with a protocol.", function(t) { - var warnings = [] - function warn(w) { - warnings.push(w) - } - var a - normalize(a={ - homepage: 'example.org' - }, warn) - - console.error(a) - - var expect = - [ warningMessages.missingDescription, - warningMessages.missingRepository, - warningMessages.missingReadme, - warningMessages.missingProtocolHomepage ] - t.same(warnings, expect) - t.same(a.homepage, 'http://example.org') - t.end() -}) - -tap.test("gist bugs url", function(t) { - var d = { - repository: "git@gist.github.com:123456.git" - } - normalize(d) - t.same(d.repository, { type: 'git', url: 'git@gist.github.com:123456.git' }) - t.same(d.bugs, { url: 'https://gist.github.com/123456' }) - t.end(); -}); - -tap.test("singularize repositories", function(t) { - var d = {repositories:["git@gist.github.com:123456.git"]} - normalize(d) - t.same(d.repository, { type: 'git', url: 'git@gist.github.com:123456.git' }) - t.end() -}); - -tap.test("treat visionmedia/express as github repo", function(t) { - var d = {repository: {type: "git", url: "visionmedia/express"}} - normalize(d) - t.same(d.repository, { type: "git", url: "git://github.com/visionmedia/express" }) - t.end() -}); - -tap.test("treat isaacs/node-graceful-fs as github repo", function(t) { - var d = {repository: {type: "git", url: "isaacs/node-graceful-fs"}} - normalize(d) - t.same(d.repository, { type: "git", url: "git://github.com/isaacs/node-graceful-fs" }) - t.end() -}); - -tap.test("homepage field will set to github url if repository is a github repo", function(t) { - var a - normalize(a={ - repository: { type: "git", url: "git://github.com/isaacs/node-graceful-fs" } - }) - t.same(a.homepage, 'https://github.com/isaacs/node-graceful-fs') - t.end() -}) - -tap.test("homepage field will set to github gist url if repository is a gist", function(t) { - var a - normalize(a={ - repository: { type: "git", url: "git@gist.github.com:123456.git" } - }) - t.same(a.homepage, 'https://gist.github.com/123456') - t.end() -}) - -tap.test("homepage field will set to github gist url if repository is a shorthand reference", function(t) { - var a - normalize(a={ - repository: { type: "git", url: "sindresorhus/chalk" } - }) - t.same(a.homepage, 'https://github.com/sindresorhus/chalk') - t.end() -}) - -tap.test("treat isaacs/node-graceful-fs as github repo in dependencies", function(t) { - var d = {dependencies: {"node-graceful-fs": "isaacs/node-graceful-fs"}} - normalize(d) - t.same(d.dependencies, {"node-graceful-fs": "git://github.com/isaacs/node-graceful-fs" }) - t.end() -}); - -tap.test("deprecation warning for array in dependencies fields", function(t) { - var a - var warnings = [] - function warn(w) { - warnings.push(w) - } - normalize(a={ - dependencies: [], - devDependencies: [], - optionalDependencies: [] - }, warn) - t.ok(~warnings.indexOf(safeFormat(warningMessages.deprecatedArrayDependencies, 'dependencies')), "deprecation warning") - t.ok(~warnings.indexOf(safeFormat(warningMessages.deprecatedArrayDependencies, 'devDependencies')), "deprecation warning") - t.ok(~warnings.indexOf(safeFormat(warningMessages.deprecatedArrayDependencies, 'optionalDependencies')), "deprecation warning") - t.end() -}) - -tap.test('no new globals', function(t) { - t.same(Object.keys(global), globals) - t.end() -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/strict.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/strict.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/strict.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/strict.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -var test = require("tap").test - -var normalize = require("../") - -test("strict", function(t) { - var threw - - try { - threw = false - normalize({name: "X"}, true) - } catch (er) { - threw = true - t.equal(er.message, 'Invalid name: "X"') - } finally { - t.equal(threw, true) - } - - try { - threw = false - normalize({name:" x "}, true) - } catch (er) { - threw = true - t.equal(er.message, 'Invalid name: " x "') - } finally { - t.equal(threw, true) - } - - try { - threw = false - normalize({name:"x",version:"01.02.03"}, true) - } catch (er) { - threw = true - t.equal(er.message, 'Invalid version: "01.02.03"') - } finally { - t.equal(threw, true) - } - - // these should not throw - var slob = {name:" X ",version:"01.02.03",dependencies:{ - y:">01.02.03", - z:"! 99 $$ASFJ(Aawenf90awenf as;naw.3j3qnraw || an elephant" - }} - normalize(slob, false) - t.same(slob, - { name: 'X', - version: '1.2.3', - dependencies: - { y: '>01.02.03', - z: '! 99 $$ASFJ(Aawenf90awenf as;naw.3j3qnraw || an elephant' }, - readme: 'ERROR: No README data found!', - _id: 'X@1.2.3' }) - - t.end() -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/typo.js nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/typo.js --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/typo.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/test/typo.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,115 +0,0 @@ -var test = require('tap').test - -var normalize = require('../') -var typos = require('../lib/typos.json') -var warningMessages = require("../lib/warning_messages.json") -var safeFormat = require("../lib/safe_format") - -test('typos', function(t) { - var warnings = [] - function warn(m) { - warnings.push(m) - } - - var typoMessage = safeFormat.bind(undefined, warningMessages.typo) - - var expect = - [ warningMessages.missingRepository, - typoMessage('dependancies', 'dependencies'), - typoMessage('dependecies', 'dependencies'), - typoMessage('depdenencies', 'dependencies'), - typoMessage('devEependencies', 'devDependencies'), - typoMessage('depends', 'dependencies'), - typoMessage('dev-dependencies', 'devDependencies'), - typoMessage('devDependences', 'devDependencies'), - typoMessage('devDepenencies', 'devDependencies'), - typoMessage('devdependencies', 'devDependencies'), - typoMessage('repostitory', 'repository'), - typoMessage('repo', 'repository'), - typoMessage('prefereGlobal', 'preferGlobal'), - typoMessage('hompage', 'homepage'), - typoMessage('hampage', 'homepage'), - typoMessage('autohr', 'author'), - typoMessage('autor', 'author'), - typoMessage('contributers', 'contributors'), - typoMessage('publicationConfig', 'publishConfig') ] - - normalize({"dependancies": "dependencies" - ,"dependecies": "dependencies" - ,"depdenencies": "dependencies" - ,"devEependencies": "devDependencies" - ,"depends": "dependencies" - ,"dev-dependencies": "devDependencies" - ,"devDependences": "devDependencies" - ,"devDepenencies": "devDependencies" - ,"devdependencies": "devDependencies" - ,"repostitory": "repository" - ,"repo": "repository" - ,"prefereGlobal": "preferGlobal" - ,"hompage": "homepage" - ,"hampage": "homepage" - ,"autohr": "author" - ,"autor": "author" - ,"contributers": "contributors" - ,"publicationConfig": "publishConfig" - ,readme:"asdf" - ,name:"name" - ,version:"1.2.5"}, warn) - - t.same(warnings, expect) - - warnings.length = 0 - var expect = - [ warningMessages.missingDescription, - warningMessages.missingRepository, - typoMessage("bugs['web']", "bugs['url']"), - typoMessage("bugs['name']", "bugs['url']"), - warningMessages.nonUrlBugsUrlField, - warningMessages.emptyNormalizedBugs, - warningMessages.missingReadme ] - - normalize({name:"name" - ,version:"1.2.5" - ,bugs:{web:"url",name:"url"}}, warn) - - t.same(warnings, expect) - - warnings.length = 0 - var expect = - [ warningMessages.missingDescription, - warningMessages.missingRepository, - warningMessages.missingReadme, - typoMessage('script', 'scripts') ] - - normalize({name:"name" - ,version:"1.2.5" - ,script:{server:"start",tests:"test"}}, warn) - - t.same(warnings, expect) - - warnings.length = 0 - expect = - [ warningMessages.missingDescription, - warningMessages.missingRepository, - typoMessage("scripts['server']", "scripts['start']"), - typoMessage("scripts['tests']", "scripts['test']"), - warningMessages.missingReadme ] - - normalize({name:"name" - ,version:"1.2.5" - ,scripts:{server:"start",tests:"test"}}, warn) - - t.same(warnings, expect) - - warnings.length = 0 - expect = [] - - normalize({private: true - ,name:"name" - ,version:"1.2.5" - ,scripts:{server:"start",tests:"test"}}, warn) - - t.same(warnings, expect) - - t.end(); -}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/.travis.yml nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/.travis.yml 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/node_modules/normalize-package-data/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -language: node_js -node_js: - - "0.10" diff -Nru nodejs-0.11.13/deps/npm/node_modules/read-package-json/package.json nodejs-0.11.15/deps/npm/node_modules/read-package-json/package.json --- nodejs-0.11.13/deps/npm/node_modules/read-package-json/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/read-package-json/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "read-package-json", - "version": "1.1.9", + "version": "1.2.7", "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me", @@ -16,25 +16,47 @@ "test": "tap test/*.js" }, "dependencies": { - "glob": "~3.2.1", + "github-url-from-git": "^1.3.0", + "github-url-from-username-repo": "~1.0.0", + "glob": "^4.0.2", "lru-cache": "2", - "normalize-package-data": "^0.2.13", - "graceful-fs": "2" + "normalize-package-data": "^1.0.0", + "graceful-fs": "2 || 3" }, "devDependencies": { "tap": "~0.2.5" }, "optionalDependencies": { - "graceful-fs": "2" + "graceful-fs": "2 || 3" }, "license": "ISC", - "readme": "# read-package-json\n\nThis is the thing that npm uses to read package.json files. It\nvalidates some stuff, and loads some default things.\n\nIt keeps a cache of the files you've read, so that you don't end\nup reading the same package.json file multiple times.\n\nNote that if you just want to see what's literally in the package.json\nfile, you can usually do `var data = require('some-module/package.json')`.\n\nThis module is basically only needed by npm, but it's handy to see what\nnpm will see when it looks at your package.\n\n## Usage\n\n```javascript\nvar readJson = require('read-package-json')\n\n// readJson(filename, [logFunction=noop], [strict=false], cb)\nreadJson('/path/to/package.json', console.error, false, function (er, data) {\n if (er) {\n console.error(\"There was an error reading the file\")\n return\n }\n\n console.error('the package data is', data)\n});\n```\n\n## readJson(file, [logFn = noop], [strict = false], cb)\n\n* `file` {String} The path to the package.json file\n* `logFn` {Function} Function to handle logging. Defaults to a noop.\n* `strict` {Boolean} True to enforce SemVer 2.0 version strings, and\n other strict requirements.\n* `cb` {Function} Gets called with `(er, data)`, as is The Node Way.\n\nReads the JSON file and does the things.\n\n## `package.json` Fields\n\nSee `man 5 package.json` or `npm help json`.\n\n## readJson.log\n\nBy default this is a reference to the `npmlog` module. But if that\nmodule can't be found, then it'll be set to just a dummy thing that does\nnothing.\n\nReplace with your own `{log,warn,error}` object for fun loggy time.\n\n## readJson.extras(file, data, cb)\n\nRun all the extra stuff relative to the file, with the parsed data.\n\nModifies the data as it does stuff. Calls the cb when it's done.\n\n## readJson.extraSet = [fn, fn, ...]\n\nArray of functions that are called by `extras`. Each one receives the\narguments `fn(file, data, cb)` and is expected to call `cb(er, data)`\nwhen done or when an error occurs.\n\nOrder is indeterminate, so each function should be completely\nindependent.\n\nMix and match!\n\n## readJson.cache\n\nThe `lru-cache` object that readJson uses to not read the same file over\nand over again. See\n[lru-cache](https://github.com/isaacs/node-lru-cache) for details.\n\n## Other Relevant Files Besides `package.json`\n\nSome other files have an effect on the resulting data object, in the\nfollowing ways:\n\n### `README?(.*)`\n\nIf there is a `README` or `README.*` file present, then npm will attach\na `readme` field to the data with the contents of this file.\n\nOwing to the fact that roughly 100% of existing node modules have\nMarkdown README files, it will generally be assumed to be Markdown,\nregardless of the extension. Please plan accordingly.\n\n### `server.js`\n\nIf there is a `server.js` file, and there is not already a\n`scripts.start` field, then `scripts.start` will be set to `node\nserver.js`.\n\n### `AUTHORS`\n\nIf there is not already a `contributors` field, then the `contributors`\nfield will be set to the contents of the `AUTHORS` file, split by lines,\nand parsed.\n\n### `bindings.gyp`\n\nIf a bindings.gyp file exists, and there is not already a\n`scripts.install` field, then the `scripts.install` field will be set to\n`node-gyp rebuild`.\n\n### `wscript`\n\nIf a wscript file exists, and there is not already a `scripts.install`\nfield, then the `scripts.install` field will be set to `node-waf clean ;\nnode-waf configure build`.\n\nNote that the `bindings.gyp` file supercedes this, since node-waf has\nbeen deprecated in favor of node-gyp.\n\n### `index.js`\n\nIf the json file does not exist, but there is a `index.js` file\npresent instead, and that file has a package comment, then it will try\nto parse the package comment, and use that as the data instead.\n\nA package comment looks like this:\n\n```javascript\n/**package\n * { \"name\": \"my-bare-module\"\n * , \"version\": \"1.2.3\"\n * , \"description\": \"etc....\" }\n **/\n\n// or...\n\n/**package\n{ \"name\": \"my-bare-module\"\n, \"version\": \"1.2.3\"\n, \"description\": \"etc....\" }\n**/\n```\n\nThe important thing is that it starts with `/**package`, and ends with\n`**/`. If the package.json file exists, then the index.js is not\nparsed.\n\n### `{directories.man}/*.[0-9]`\n\nIf there is not already a `man` field defined as an array of files or a\nsingle file, and\nthere is a `directories.man` field defined, then that directory will\nbe searched for manpages.\n\nAny valid manpages found in that directory will be assigned to the `man`\narray, and installed in the appropriate man directory at package install\ntime, when installed globally on a Unix system.\n\n### `{directories.bin}/*`\n\nIf there is not already a `bin` field defined as a string filename or a\nhash of ` : ` pairs, then the `directories.bin`\ndirectory will be searched and all the files within it will be linked as\nexecutables at install time.\n\nWhen installing locally, npm links bins into `node_modules/.bin`, which\nis in the `PATH` environ when npm runs scripts. When\ninstalling globally, they are linked into `{prefix}/bin`, which is\npresumably in the `PATH` environment variable.\n", - "readmeFilename": "README.md", + "gitHead": "41d6696c527e32a1cb38ebf0b6fc91b489b0499c", "bugs": { "url": "https://github.com/isaacs/read-package-json/issues" }, "homepage": "https://github.com/isaacs/read-package-json", - "_id": "read-package-json@1.1.9", - "_shasum": "9c319185e5f8461661c01f8d4e5e80b468aa18ee", - "_from": "read-package-json@latest" + "_id": "read-package-json@1.2.7", + "_shasum": "f0b440c461a218f4dbf48b094e80fc65c5248502", + "_from": "read-package-json@>=1.2.7-0 <1.3.0-0", + "_npmVersion": "2.0.0-beta.0", + "_npmUser": { + "name": "othiym23", + "email": "ogd@aoaioxxysz.net" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + }, + { + "name": "othiym23", + "email": "ogd@aoaioxxysz.net" + } + ], + "dist": { + "shasum": "f0b440c461a218f4dbf48b094e80fc65c5248502", + "tarball": "http://registry.npmjs.org/read-package-json/-/read-package-json-1.2.7.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/read-package-json/-/read-package-json-1.2.7.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/index.js nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/index.js --- nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,38 @@ +"use strict" +var fs = require("fs") +var path = require("path") +var dz = require("dezalgo") +var npa = require("npm-package-arg") + +module.exports = function (spec, where, cb) { + if (where instanceof Function) { cb = where; where = null } + if (where == null) where = "." + cb = dz(cb) + try { + var dep = npa(spec) + } + catch (e) { + return cb(e) + } + var specpath = dep.type == "local" + ? path.resolve(where, dep.spec) + : path.resolve(dep.rawSpec? dep.rawSpec: dep.name) + fs.stat(specpath, function (er, s) { + if (er) return finalize() + if (!s.isDirectory()) return finalize("local") + fs.stat(path.join(specpath, "package.json"), function (er) { + finalize(er ? null : "directory") + }) + }) + function finalize(type) { + if (type != null && type != dep.type) { + dep.type = type + if (! dep.rawSpec) { + dep.rawSpec = dep.name + dep.name = null + } + } + if (dep.type == "local" || dep.type == "directory") dep.spec = specpath + cb(null, dep) + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/.npmignore nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +*~ +.#* +node_modules diff -Nru nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/package.json nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/package.json --- nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,53 @@ +{ + "name": "realize-package-specifier", + "version": "1.2.0", + "description": "Like npm-package-arg, but more so, producing full file paths and differentiating local tar and directory sources.", + "main": "index.js", + "scripts": { + "test": "tap test/*.js" + }, + "license": "ISC", + "repository": { + "type": "git", + "url": "https://github.com/npm/realize-package-specifier.git" + }, + "author": { + "name": "Rebecca Turner", + "email": "me@re-becca.org", + "url": "http://re-becca.org" + }, + "homepage": "https://github.com/npm/realize-package-specifier", + "dependencies": { + "dezalgo": "^1.0.1", + "npm-package-arg": "^2.1.3" + }, + "devDependencies": { + "require-inject": "^1.1.0", + "tap": "^0.4.12" + }, + "gitHead": "39016343d5bd5572ab39374323e9588e54985910", + "bugs": { + "url": "https://github.com/npm/realize-package-specifier/issues" + }, + "_id": "realize-package-specifier@1.2.0", + "_shasum": "93364e40dee38369f92e9b0c76124500342132f2", + "_from": "realize-package-specifier@>=1.2.0 <1.3.0", + "_npmVersion": "2.1.2", + "_nodeVersion": "0.10.32", + "_npmUser": { + "name": "iarna", + "email": "me@re-becca.org" + }, + "maintainers": [ + { + "name": "iarna", + "email": "me@re-becca.org" + } + ], + "dist": { + "shasum": "93364e40dee38369f92e9b0c76124500342132f2", + "tarball": "http://registry.npmjs.org/realize-package-specifier/-/realize-package-specifier-1.2.0.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/realize-package-specifier/-/realize-package-specifier-1.2.0.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/README.md nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/README.md --- nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,58 @@ +realize-package-specifier +------------------------- + +Parse a package specifier, peeking at the disk to differentiate between +local tarballs, directories and named modules. This implements the logic +used by `npm install` and `npm cache` to determine where to get packages +from. + +```javascript +var realizePackageSpecifier = require("realize-package-specifier") +realizePackageSpecifier("foo.tar.gz", ".", function (err, package) { + … +}) +``` + +* realizePackageSpecifier(*spec*, [*where*,] *callback*) + +Parses *spec* using `npm-package-arg` and then uses stat to check to see if +it refers to a local tarball or package directory. Stats are done relative +to *where*. If it does then the local module is loaded. If it doesn't then +target is left as a remote package specifier. Package directories are +recognized by the presence of a package.json in them. + +*spec* -- a package specifier, like: `foo@1.2`, or `foo@user/foo`, or +`http://x.com/foo.tgz`, or `git+https://github.com/user/foo` + +*where* (optional, default: .) -- The directory in which we should look for +local tarballs or package directories. + +*callback* function(*err*, *result*) -- Called once we've determined what +kind of specifier this is. The *result* object will be very like the one +returned by `npm-package-arg` except with three differences: 1) There's a +new type of `directory`. 2) The `local` type only refers to tarballs. 2) +For all `local` and `directory` type results spec will contain the full path of +the local package. + +## Result Objects + +The full definition of the result object is: + +* `name` - If known, the `name` field expected in the resulting pkg. +* `type` - One of the following strings: + * `git` - A git repo + * `github` - A github shorthand, like `user/project` + * `tag` - A tagged version, like `"foo@latest"` + * `version` - A specific version number, like `"foo@1.2.3"` + * `range` - A version range, like `"foo@2.x"` + * `local` - A local file path + * `directory` - A local package directory + * `remote` - An http url (presumably to a tgz) +* `spec` - The "thing". URL, the range, git repo, etc. +* `raw` - The original un-modified string that was provided. +* `rawSpec` - The part after the `name@...`, as it was originally + provided. +* `scope` - If a name is something like `@org/module` then the `scope` + field will be set to `org`. If it doesn't have a scoped name, then + scope is `null`. + diff -Nru nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/test/basic.js nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/test/basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,121 @@ +"use strict" +var test = require("tap").test +var requireInject = require("require-inject") +var path = require("path") + +var re = { + tarball: /[\/\\]a.tar.gz$/, + packagedir: /[\/\\]b$/, + packagejson: /[\/\\]b[\/\\]package.json$/, + nonpackagedir: /[\/\\]c$/, + nopackagejson: /[\/\\]c[\/\\]package.json$/, + remotename: /[\/\\]d$/, + packagedirlikegithub: /[\/\\]e[\/\\]1$/, + packagejsonlikegithub: /[\/\\]e[\/\\]1[\/\\]package.json$/, + github: /[\/\\]e[\/\\]2$/ +} + +var rps = requireInject("../index", { + "fs": { + "stat": function (path, callback) { + if (re.tarball.test(path)) { + callback(null,{isDirectory:function(){ return false }}) + } + else if (re.packagedir.test(path)) { + callback(null,{isDirectory:function(){ return true }}) + } + else if (re.packagejson.test(path)) { + callback(null,{}) + } + else if (re.nonpackagedir.test(path)) { + callback(null,{isDirectory:function(){ return true }}) + } + else if (re.nopackagejson.test(path)) { + callback(new Error("EFILENOTFOUND")) + } + else if (re.remotename.test(path)) { + callback(new Error("EFILENOTFOUND")) + } + else if (re.packagedirlikegithub.test(path)) { + callback(null,{isDirectory:function(){ return true }}) + } + else if (re.packagejsonlikegithub.test(path)) { + callback(null,{}) + } + else if (re.github.test(path)) { + callback(new Error("EFILENOTFOUND")) + } + else { + throw new Error("Unknown stat fixture path: "+path) + } + } + } +}) + +test("realize-package-specifier", function (t) { + t.plan(10) + rps("a.tar.gz", function (err, result) { + t.is(result.type, "local", "local tarball") + }) + rps("b", function (err, result) { + t.is(result.type, "directory", "local package directory") + }) + rps("c", function (err, result) { + t.is(result.type, "range", "remote package, non-package local directory") + }) + rps("d", function (err, result) { + t.is(result.type, "range", "remote package, no local directory") + }) + rps("file:./a.tar.gz", function (err, result) { + t.is(result.type, "local", "local tarball") + }) + rps("file:./b", function (err, result) { + t.is(result.type, "directory", "local package directory") + }) + rps("file:./c", function (err, result) { + t.is(result.type, "local", "non-package local directory, specified with a file URL") + }) + rps("file:./d", function (err, result) { + t.is(result.type, "local", "no local directory, specified with a file URL") + }) + rps("e/1", function (err, result) { + t.is(result.type, "directory", "local package directory") + }) + rps("e/2", function (err, result) { + t.is(result.type, "github", "github package dependency") + }) +}) +test("named realize-package-specifier", function (t) { + t.plan(10) + + rps("a@a.tar.gz", function (err, result) { + t.is(result.type, "local", "named local tarball") + }) + rps("b@b", function (err, result) { + t.is(result.type, "directory", "named local package directory") + }) + rps("c@c", function (err, result) { + t.is(result.type, "tag", "remote package, non-package local directory") + }) + rps("d@d", function (err, result) { + t.is(result.type, "tag", "remote package, no local directory") + }) + rps("a@file:./a.tar.gz", function (err, result) { + t.is(result.type, "local", "local tarball") + }) + rps("b@file:./b", function (err, result) { + t.is(result.type, "directory", "local package directory") + }) + rps("c@file:./c", function (err, result) { + t.is(result.type, "local", "non-package local directory, specified with a file URL") + }) + rps("d@file:./d", function (err, result) { + t.is(result.type, "local", "no local directory, specified with a file URL") + }) + rps("e@e/1", function (err, result) { + t.is(result.type, "directory", "local package directory") + }) + rps("e@e/2", function (err, result) { + t.is(result.type, "github", "github package dependency") + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/test/npa-basic.js nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/test/npa-basic.js --- nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/test/npa-basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/test/npa-basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,207 @@ +var test = require("tap").test; +var rps = require("../index.js") +var path = require("path") + +test("npa-basic", function (t) { + t.setMaxListeners(999) + + var tests = { + "foo@1.2": { + name: "foo", + type: "range", + spec: ">=1.2.0 <1.3.0", + raw: "foo@1.2", + rawSpec: "1.2" + }, + + "@foo/bar": { + raw: "@foo/bar", + name: "@foo/bar", + scope: "@foo", + rawSpec: "", + spec: "*", + type: "range" + }, + + "@foo/bar@": { + raw: "@foo/bar@", + name: "@foo/bar", + scope: "@foo", + rawSpec: "", + spec: "*", + type: "range" + }, + + "@foo/bar@baz": { + raw: "@foo/bar@baz", + name: "@foo/bar", + scope: "@foo", + rawSpec: "baz", + spec: "baz", + type: "tag" + }, + + "@f fo o al/ a d s ;f ": { + raw: "@f fo o al/ a d s ;f", + name: null, + rawSpec: "@f fo o al/ a d s ;f", + spec: path.resolve("@f fo o al/ a d s ;f"), + type: "local" + }, + + "foo@1.2.3": { + name: "foo", + type: "version", + spec: "1.2.3", + raw: "foo@1.2.3" + }, + + "foo@=v1.2.3": { + name: "foo", + type: "version", + spec: "1.2.3", + raw: "foo@=v1.2.3", + rawSpec: "=v1.2.3" + }, + + "git+ssh://git@github.com/user/foo#1.2.3": { + name: null, + type: "git", + spec: "ssh://git@github.com/user/foo#1.2.3", + raw: "git+ssh://git@github.com/user/foo#1.2.3" + }, + + "git+file://path/to/repo#1.2.3": { + name: null, + type: "git", + spec: "file://path/to/repo#1.2.3", + raw: "git+file://path/to/repo#1.2.3" + }, + + "git://github.com/user/foo": { + name: null, + type: "git", + spec: "git://github.com/user/foo", + raw: "git://github.com/user/foo" + }, + + "@foo/bar@git+ssh://github.com/user/foo": { + name: "@foo/bar", + scope: "@foo", + spec: "ssh://github.com/user/foo", + rawSpec: "git+ssh://github.com/user/foo", + raw: "@foo/bar@git+ssh://github.com/user/foo" + }, + + "/path/to/foo": { + name: null, + type: "local", + spec: "/path/to/foo", + raw: "/path/to/foo" + }, + + "file:path/to/foo": { + name: null, + type: "local", + spec: path.resolve(__dirname,"..","path/to/foo"), + raw: "file:path/to/foo" + }, + + "file:~/path/to/foo": { + name: null, + type: "local", + spec: path.resolve(__dirname,"..","~/path/to/foo"), + raw: "file:~/path/to/foo" + }, + + "file:../path/to/foo": { + name: null, + type: "local", + spec: path.resolve(__dirname,"..","../path/to/foo"), + raw: "file:../path/to/foo" + }, + + "file:///path/to/foo": { + name: null, + type: "local", + spec: "/path/to/foo", + raw: "file:///path/to/foo" + }, + + "https://server.com/foo.tgz": { + name: null, + type: "remote", + spec: "https://server.com/foo.tgz", + raw: "https://server.com/foo.tgz" + }, + + "user/foo-js": { + name: null, + type: "github", + spec: "user/foo-js", + raw: "user/foo-js" + }, + + "user/foo-js#bar/baz": { + name: null, + type: "github", + spec: "user/foo-js#bar/baz", + raw: "user/foo-js#bar/baz" + }, + + "user..blerg--/..foo-js# . . . . . some . tags / / /": { + name: null, + type: "github", + spec: "user..blerg--/..foo-js# . . . . . some . tags / / /", + raw: "user..blerg--/..foo-js# . . . . . some . tags / / /" + }, + + "user/foo-js#bar/baz/bin": { + name: null, + type: "github", + spec: "user/foo-js#bar/baz/bin", + raw: "user/foo-js#bar/baz/bin" + }, + + "foo@user/foo-js": { + name: "foo", + type: "github", + spec: "user/foo-js", + raw: "foo@user/foo-js" + }, + + "foo@latest": { + name: "foo", + type: "tag", + spec: "latest", + raw: "foo@latest" + }, + + "foo": { + name: "foo", + type: "range", + spec: "*", + raw: "foo" + } + } + + t.plan( 2 + Object.keys(tests).length * 3 ) + + Object.keys(tests).forEach(function (arg) { + rps(arg, path.resolve(__dirname,'..'), function(err, res) { + t.notOk(err, "No error") + t.type(res, "Result") + t.has(res, tests[arg]) + }) + }) + + // Completely unreasonable invalid garbage throws an error + rps("this is not a \0 valid package name or url", path.resolve(__dirname,'..'), function (err) { + t.ok(err, "error") + }) + + rps("gopher://yea right", path.resolve(__dirname,'..'), function (err) { + t.ok(err, "Unsupported URL Type: gopher://yea right") + }) + +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/test/npa-windows.js nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/test/npa-windows.js --- nodejs-0.11.13/deps/npm/node_modules/realize-package-specifier/test/npa-windows.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/realize-package-specifier/test/npa-windows.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,42 @@ +global.FAKE_WINDOWS = true + +var rps = require('../index.js') +var test = require("tap").test +var path = require("path") + +var cases = { + "C:\\x\\y\\z": { + raw: 'C:\\x\\y\\z', + scope: null, + name: null, + rawSpec: 'C:\\x\\y\\z', + spec: path.resolve('C:\\x\\y\\z'), + type: 'local' + }, + "foo@C:\\x\\y\\z": { + raw: 'foo@C:\\x\\y\\z', + scope: null, + name: 'foo', + rawSpec: 'C:\\x\\y\\z', + spec: path.resolve('C:\\x\\y\\z'), + type: 'local' + }, + "foo@/foo/bar/baz": { + raw: 'foo@/foo/bar/baz', + scope: null, + name: 'foo', + rawSpec: '/foo/bar/baz', + spec: path.resolve('/foo/bar/baz'), + type: 'local' + } +} + +test("parse a windows path", function (t) { + t.plan( Object.keys(cases).length ) + Object.keys(cases).forEach(function (c) { + var expect = cases[c] + rps(c, function(err, actual) { + t.same(actual, expect, c) + }) + }) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/CHANGELOG.md nodejs-0.11.15/deps/npm/node_modules/request/CHANGELOG.md --- nodejs-0.11.13/deps/npm/node_modules/request/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/CHANGELOG.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,954 @@ +## Change Log + +### upcoming (2014/07/09 12:10 +00:00) +- [#946](https://github.com/mikeal/request/pull/946) defaults: merge headers (@aj0strow) +- [#844](https://github.com/mikeal/request/pull/844) Add support for HTTP[S]_PROXY environment variables. Fixes #595. (@jvmccarthy) + +### v2.37.1 (2014/07/07 17:25 +00:00) +- [8711b2f](https://github.com/mikeal/request/commit/8711b2f3489553a7ddae69fa8c9f538182c9d5c8) 2.37.1 (@mikeal) + +### v2.37.0 (2014/07/07 17:25 +00:00) +- [79472b2](https://github.com/mikeal/request/commit/79472b263cde77504a354913a16bdc9fbdc9ed5d) append secureOptions to poolKey (@medovob) +- [#907](https://github.com/mikeal/request/pull/907) append secureOptions to poolKey (@medovob) +- [b223a8a](https://github.com/mikeal/request/commit/b223a8add0cbdd4e699a52da66aeb0f0cb17a0c3) expose tough-cookie's getCookiesSync (@charlespwd) +- [f4dcad0](https://github.com/mikeal/request/commit/f4dcad0fa6e2f2388abae508ad7256a1e1214ab2) test getCookies method (@charlespwd) +- [adcf62b](https://github.com/mikeal/request/commit/adcf62bf45ec19a28198ca8d3f37e7d7babc883a) update readme (@charlespwd) +- [4fdf13b](https://github.com/mikeal/request/commit/4fdf13b57dcd20b9fe03c0956f5df70c82d6e4a3) Merge branch 'charlespwd-master' (@lalitkapoor) +- [83e370d](https://github.com/mikeal/request/commit/83e370d54ca2a5fb162e40e7e705e1e9d702ba0a) Bump version of hawk dep. (@samccone) +- [#927](https://github.com/mikeal/request/pull/927) Bump version of hawk dep. (@samccone) +- [c42dcec](https://github.com/mikeal/request/commit/c42dcec10a307cb2299861f87720d491a89142b4) package.json: use OSI-style license name (@isaacs) +- [8892cb7](https://github.com/mikeal/request/commit/8892cb7bb8945807ff25038e888222d4e902acc8) Swap mime module. (@eiriksm) +- [d92395e](https://github.com/mikeal/request/commit/d92395e638cbfe5c31eb4ff54941b98b09057486) Make package.json so node .8 understands it. (@eiriksm) +- [6ebd748](https://github.com/mikeal/request/commit/6ebd748a02a49976d41ebbc4f8396acf8fda1c14) Add some additional hacks to work in the browser. (@eiriksm) +- [#943](https://github.com/mikeal/request/pull/943) New mime module (@eiriksm) +- [561454d](https://github.com/mikeal/request/commit/561454d18a68b7a03163308f6d29e127afe97426) Add some code comments about why we do the extra checks. (@eiriksm) +- [#944](https://github.com/mikeal/request/pull/944) Make request work with browserify (@eiriksm) +- [6a0add7](https://github.com/mikeal/request/commit/6a0add70b2687cf751b3446a15a513a1fd141738) defaults: merge headers (@aj0strow) +- [407c1ad](https://github.com/mikeal/request/commit/407c1ada61afca4d4ba50155c6d9430754541df1) prefer late return statement (@aj0strow) +- [4ab40ba](https://github.com/mikeal/request/commit/4ab40ba2f9aca8958cab149eb9cfbd9edb5534aa) Added support for manual querystring in form option (@charlespwd) +- [a55627c](https://github.com/mikeal/request/commit/a55627cd9f468cefb2971bb501ebc0c2fc27aa8b) Updated README (@charlespwd) +- [#949](https://github.com/mikeal/request/pull/949) Manually enter querystring in form option (@charlespwd) +- [10246c8](https://github.com/mikeal/request/commit/10246c84819db14b32fccca040029b06449242a3) [PATCH v2] Add support for gzip content decoding (@kevinoid) +- [6180c5f](https://github.com/mikeal/request/commit/6180c5f45c01fb2158b9a44f894a34263479fa84) check for content-length header before setting it in nextTick (@camilleanne) +- [#951](https://github.com/mikeal/request/pull/951) Add support for gzip content decoding (@kevinoid) +- [849c681](https://github.com/mikeal/request/commit/849c681846ce3b5492bd47261de391377a3ac19b) Silence EventEmitter memory leak warning #311 (@watson) +- [#955](https://github.com/mikeal/request/pull/955) check for content-length header before setting it in nextTick (@camilleanne) +- [#957](https://github.com/mikeal/request/pull/957) Silence EventEmitter memory leak warning #311 (@watson) +- [c1d951e](https://github.com/mikeal/request/commit/c1d951e536bd41c957f0cade41d051c9d41d1462) Fixing for 0.8 (@mikeal) +- [4851118](https://github.com/mikeal/request/commit/48511186495888a5f0cb15a107325001ac91990e) 2.37.0 (@mikeal) + +### v2.36.1 (2014/05/19 20:59 +00:00) +- [c3914fc](https://github.com/mikeal/request/commit/c3914fcd4a74faf6dbf0fb6a4a188e871e0c51b8) 2.36.1 (@mikeal) + +### v2.36.0 (2014/05/19 20:59 +00:00) +- [76a96de](https://github.com/mikeal/request/commit/76a96de75580042aa780e9587ff7a22522119c3f) Reventing lodash merge change. (@mikeal) +- [b8bb57e](https://github.com/mikeal/request/commit/b8bb57efb17e72e2ac6d957c05c3f2570c7ba6a0) 2.36.0 (@mikeal) + +### v2.35.1 (2014/05/17 20:57 +00:00) +- [4bbd153](https://github.com/mikeal/request/commit/4bbd1532a68cadf1a88dd69c277645e9b781f364) 2.35.1 (@mikeal) + +### v2.35.0 (2014/05/17 20:57 +00:00) +- [2833da3](https://github.com/mikeal/request/commit/2833da3c3c1c34f4130ad1ba470354fc32410691) initial changelog (@lalitkapoor) +- [49319e6](https://github.com/mikeal/request/commit/49319e6c09a8a169c95a8d282c900f9fecd50371) Merge branch 'master' of https://github.com/mikeal/request into create-changelog-based-on-pull-requests (@lalitkapoor) +- [#815](https://github.com/mikeal/request/pull/815) Create changelog based on pull requests (@lalitkapoor) +- [4b6ce1a](https://github.com/mikeal/request/commit/4b6ce1ac0f79cb8fa633e281d3eb4c0cb61794e1) It appears that secureOptions is an undocumented feature to fix issues with broken server. See joynet/node #5119 (@nw) +- [#821](https://github.com/mikeal/request/pull/821) added secureOptions back (@nw) +- [eddd488](https://github.com/mikeal/request/commit/eddd4889fb1bc95c741749e79d9749aab3e103fc) Fixing #825 (@mikeal) +- [4627a7a](https://github.com/mikeal/request/commit/4627a7a14078494ded8c66c19c43efd07324cbd8) improve error reporting for invalid protocols (@FND) +- [#840](https://github.com/mikeal/request/pull/840) improve error reporting for invalid protocols (@FND) +- [#810](https://github.com/mikeal/request/pull/810) add some exposition to mpu example in README.md (@mikermcneil) +- [8a0e2d6](https://github.com/mikeal/request/commit/8a0e2d65351560858275c73505df12b537f4d001) Added support for HTTP_PROXY and HTTPS_PROXY environment variables, if the proxy option isn't already set. (@jvmccarthy) +- [f60d348](https://github.com/mikeal/request/commit/f60d348dc1840ee6d7b709efcc2b3cd1a03aef63) Fix word consistency +- [#850](https://github.com/mikeal/request/pull/850) Fix word consistency in readme (@0xNobody) +- [#809](https://github.com/mikeal/request/pull/809) upgrade tunnel-proxy to 0.4.0 (@ksato9700) +- [e86377c](https://github.com/mikeal/request/commit/e86377c0c1e7695c3997f7802175ca37f5a5113b) Won't use HTTP(S)_PROXY env var if proxy explicitly set to null. (@jvmccarthy) +- [f1bb537](https://github.com/mikeal/request/commit/f1bb537ee2440bd664ea8c445ac3a2c6e31e9932) Add support for RFC 6750 Bearer Tokens +- [ba51a26](https://github.com/mikeal/request/commit/ba51a26079ec52c0a9145fbe8b6796d46e79bb8e) Add documentation about auth.bearer (@phedny) +- [#861](https://github.com/mikeal/request/pull/861) Add support for RFC 6750 Bearer Tokens (@phedny) +- [b8ee579](https://github.com/mikeal/request/commit/b8ee5790ace95440a56074f6afe866f4662e9e88) Fix typo (@dandv) +- [#866](https://github.com/mikeal/request/pull/866) Fix typo (@dandv) +- [b292b59](https://github.com/mikeal/request/commit/b292b59fadecb35dac3bee0959c4b4b782e772e3) Clean code syntax in test-pipes.js (@tgohn) +- [f7996d5](https://github.com/mikeal/request/commit/f7996d5fcfed85e03f293a7c9739e385b64ecaad) Add test for request.pipefilter (@tgohn) +- [#869](https://github.com/mikeal/request/pull/869) Pipefilter test (@tgohn) +- [86b99b6](https://github.com/mikeal/request/commit/86b99b671a3c86f4f963a6c67047343fd8edae8f) Fix typo in form example (@mscdex) +- [2ba4808](https://github.com/mikeal/request/commit/2ba48083ddf2607f85e2c479e0d254483c2610fe) failing test (@lalitkapoor) +- [39396b0](https://github.com/mikeal/request/commit/39396b0bb2e90eb7ec4dfcf5d2e731a2cb156f5c) extend passed in options (@lalitkapoor) +- [#891](https://github.com/mikeal/request/pull/891) fixes 857 - options object is mutated by calling request (@lalitkapoor) +- [54a51c6](https://github.com/mikeal/request/commit/54a51c665887e162ccb9f6b17b9c1f3b017ccc29) merge options (@vohof) +- [25b95db](https://github.com/mikeal/request/commit/25b95dbdddf874f014386a0a9fe35a7c903b7415) tilde? (@vohof) +- [#897](https://github.com/mikeal/request/pull/897) merge with default options (@vohof) +- [a1e4b1a](https://github.com/mikeal/request/commit/a1e4b1a9c2f39ce565fd023bb604da139f689d43) Fixes #555 (@pigulla) +- [#901](https://github.com/mikeal/request/pull/901) Fixes #555 (@pigulla) +- [6498a5f](https://github.com/mikeal/request/commit/6498a5f1ae68050cfeabf8f34f75bc72b08f1805) 2.35.0 (@mikeal) + +### v2.34.1 (2014/02/18 19:35 +00:00) +- [aefea20](https://github.com/mikeal/request/commit/aefea20b215ff1a48f0d8d27dcac0186604e3b2d) 2.34.1 (@mikeal) + +### v2.34.0 (2014/02/18 19:35 +00:00) +- [46edc90](https://github.com/mikeal/request/commit/46edc902e6ffdee39038a6702021728cb9d9b8fa) simpler (@joaojeronimo) +- [#781](https://github.com/mikeal/request/pull/781) simpler isReadStream function (@joaojeronimo) +- [fe2f59f](https://github.com/mikeal/request/commit/fe2f59fdc72de5c86404e51ab6bc4e0e8ece95f2) Provide ability to override content-type when `json` option used (@vvo) +- [#785](https://github.com/mikeal/request/pull/785) Provide ability to override content-type when `json` option used (@vvo) +- [d134f01](https://github.com/mikeal/request/commit/d134f012e64702e8f4070d61504b39524e1a07ba) Adds content-length calculation when submitting forms using form-data library. This is related to issue 345. (@Juul) +- [#793](https://github.com/mikeal/request/pull/793) Adds content-length calculation when submitting forms using form-data li... (@Juul) +- [3ebf25c](https://github.com/mikeal/request/commit/3ebf25c5af1194d8f7b3a3330fe89e729532809b) adding failing test (@lalitkapoor) +- [0f57a90](https://github.com/mikeal/request/commit/0f57a90384588727a5446bb1f5bf4e0be2d85780) accept options in arguments (@lalitkapoor) +- [7fb1647](https://github.com/mikeal/request/commit/7fb164731a5aad80c6539e33eda4ad4a51bb7871) silently ignore errors when adding cookie to jar (@lalitkapoor) +- [d6b2b1c](https://github.com/mikeal/request/commit/d6b2b1c279d12cdddc6593060672d49b12e63fea) add additional header test (@lalitkapoor) +- [f29e6df](https://github.com/mikeal/request/commit/f29e6dfadc6c3a45b6190998b6608059f87f3c32) Added the Apache license to the package.json. (@keskival) +- [#802](https://github.com/mikeal/request/pull/802) Added the Apache license to the package.json. (@keskival) +- [#801](https://github.com/mikeal/request/pull/801) 794 ignore cookie parsing and domain errors (@lalitkapoor) +- [54e6dfb](https://github.com/mikeal/request/commit/54e6dfb77d57757d4006982f813ebaab9e005cd5) Rewrite UNIX Domain Socket support into 2.33.1. Add test. (@lyuzashi) +- [3eaed2f](https://github.com/mikeal/request/commit/3eaed2f2e82d9d17a583bcc54270c16a7b674206) Use setImmediate when available, otherwise fallback to nextTick (@lyuzashi) +- [746ca75](https://github.com/mikeal/request/commit/746ca757da24d5011e92e04cb00c90098a7680fd) Indent wrapped buildRequest function (@lyuzashi) +- [#516](https://github.com/mikeal/request/pull/516) UNIX Socket URL Support (@native-digital) +- [9a5b0a8](https://github.com/mikeal/request/commit/9a5b0a81eca9836f05b0192c05c0d41e79034461) initial format (@lalitkapoor) +- [9380a49](https://github.com/mikeal/request/commit/9380a49779ddb081eba5d0ee51e4396d72d52066) upgrade tunnel-proxy to 0.4.0 (@ksato9700) +- [1efea37](https://github.com/mikeal/request/commit/1efea374286c728c3c988ee2264fb44cd8c41d88) add some exposition to mpu example in README.md (@mikermcneil) +- [ba0d63a](https://github.com/mikeal/request/commit/ba0d63ae23a3fc95dfe012df0bd6c8d7e87b1df7) made the language clearer (@mikermcneil) +- [b43aa81](https://github.com/mikeal/request/commit/b43aa81789c0b8c7ae90d2b983f79dde4a125470) 2.34.0 (@mikeal) + +### v2.33.1 (2014/01/16 19:48 +00:00) +- [afcf827](https://github.com/mikeal/request/commit/afcf827559b3223c96ac1bbd19bd1e4a6d7771e3) 2.33.1 (@mikeal) + +### v2.33.0 (2014/01/16 19:48 +00:00) +- [7f1cc8f](https://github.com/mikeal/request/commit/7f1cc8ff5a8d9443e7a793f4655487e722b75b0d) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [3e43d3d](https://github.com/mikeal/request/commit/3e43d3d5175f5f18d1e97b2f5d4ca6ac6c216e4a) 2.33.0 (@mikeal) + +### v2.32.1 (2014/01/16 19:33 +00:00) +- [dd44f39](https://github.com/mikeal/request/commit/dd44f39d37daacbbeb21f9e960f13adbb44eea0a) 2.32.1 (@mikeal) + +### v2.32.0 (2014/01/16 19:33 +00:00) +- [#757](https://github.com/mikeal/request/pull/757) require aws-sign2 (@mafintosh) +- [#744](https://github.com/mikeal/request/pull/744) Use Cookie.parse (@lalitkapoor) +- [5eaee1c](https://github.com/mikeal/request/commit/5eaee1ce4008ede1df15201622ac478c892d6a8a) Upgrade tough-cookie to 0.10.0 (@stash) +- [#763](https://github.com/mikeal/request/pull/763) Upgrade tough-cookie to 0.10.0 (@stash) +- [d2489d0](https://github.com/mikeal/request/commit/d2489d0e24d9a538224f5c8c090dcdeb1f8d4969) Fixed auth error for some servers like twisted. According to rfc 2617 auth scheme token should be case-insensitive. (@bobyrizov) +- [#764](https://github.com/mikeal/request/pull/764) Case-insensitive authentication scheme (@bobyrizov) +- [cbee3d0](https://github.com/mikeal/request/commit/cbee3d04ee9f704501a64edb7b9b6d201e98494b) Use tough-cookie CookieJar sync API (@stash) +- [3eeaf6a](https://github.com/mikeal/request/commit/3eeaf6a90df7b806d91ae1e8e2f56862ece2ea33) Emit error, not cookieError (@stash) +- [#767](https://github.com/mikeal/request/pull/767) Use tough-cookie CookieJar sync API (@stash) +- [9eac534](https://github.com/mikeal/request/commit/9eac534dd11e40bba65456491cb62ad68d8f41fa) 2.32.0 (@mikeal) + +### v2.31.1 (2014/01/08 02:57 +00:00) +- [b1b5e91](https://github.com/mikeal/request/commit/b1b5e9161e149574ba5528c401a70bfadef1a98a) 2.31.1 (@mikeal) + +### v2.31.0 (2014/01/08 02:57 +00:00) +- [dd2577f](https://github.com/mikeal/request/commit/dd2577f8264d4d4b07484dec7094b72c00c8416f) Removing s3 test. (@mikeal) +- [fef5bf3](https://github.com/mikeal/request/commit/fef5bf34258e3695b61c048c683f1d4a7f99b368) Fix callback arguments documentation (@mmalecki) +- [#736](https://github.com/mikeal/request/pull/736) Fix callback arguments documentation (@mmalecki) +- [5531c20](https://github.com/mikeal/request/commit/5531c208678145ef35b06e948190be2fd6a8a1c8) updating README example: cookie jar api changed cookie module changed to tough-cookie (@emkay) +- [#741](https://github.com/mikeal/request/pull/741) README example is using old cookie jar api (@emkay) +- [9d73e5a](https://github.com/mikeal/request/commit/9d73e5a277af141a6e4fa9dbcae5d0c3b755d277) add note about JSON output body type (@iansltx) +- [#742](https://github.com/mikeal/request/pull/742) Add note about JSON output body type (@iansltx) +- [41e20a4](https://github.com/mikeal/request/commit/41e20a4d288e30101e493b383a0e4852a3271a98) Use Cookie.parse (@lalitkapoor) +- [4d09556](https://github.com/mikeal/request/commit/4d095562a5c42ffb41b0ff194e9e6f32c0f44372) updating setCookie example to make it clear that the callback is required (@emkay) +- [#745](https://github.com/mikeal/request/pull/745) updating setCookie example to make it clear that the callback is required (@emkay) +- [b7ede1d](https://github.com/mikeal/request/commit/b7ede1d56f9a2764e4bf764687b81419df817e5a) README: Markdown code highlight (@weakish) +- [#746](https://github.com/mikeal/request/pull/746) README: Markdown code highlight (@weakish) +- [#645](https://github.com/mikeal/request/pull/645) update twitter api url to v1.1 (@mick) +- [20dcd18](https://github.com/mikeal/request/commit/20dcd18ce8e3397ba7e0213da9c760b048ca5b49) require aws-sign2 (@mafintosh) +- [df2c426](https://github.com/mikeal/request/commit/df2c4264321c3db1387ddf9a945d63b9ae7d57b8) 2.31.0 (@mikeal) + +### v2.30.1 (2013/12/13 19:17 +00:00) +- [eba2d40](https://github.com/mikeal/request/commit/eba2d402fcdcf1ac878de8672b1c9f5da856dcc1) 2.30.1 (@mikeal) + +### v2.30.0 (2013/12/13 19:17 +00:00) +- [aee3819](https://github.com/mikeal/request/commit/aee38191557574ef570fd9c764af0af7072cc92a) Fix TypeError when calling request.cookie +- [#728](https://github.com/mikeal/request/pull/728) Fix TypeError when calling request.cookie (@scarletmeow) +- [628ef76](https://github.com/mikeal/request/commit/628ef768b1f52710b8eb4e14be4db69d174d1dcb) better DIGEST support (@dai-shi) +- [d919bc1](https://github.com/mikeal/request/commit/d919bc1ce97fa461c365437a0c739bbaa6b86de7) ignore null authValues (DIGEST) (@dai-shi) +- [75fc209](https://github.com/mikeal/request/commit/75fc209c5a9e6c647a04e42048c30f46c66fc103) DIGEST support: pass algoritm and opaque, add TODO items, test case for compatible mode (@dai-shi) +- [#730](https://github.com/mikeal/request/pull/730) better HTTP DIGEST support (@dai-shi) +- [937a24a](https://github.com/mikeal/request/commit/937a24a168a126f406ee8eb55eb78169ddc53497) JSHINT: Creating global 'for' variable. Should be 'for (var ...'. +- [#732](https://github.com/mikeal/request/pull/732) JSHINT: Creating global 'for' variable. Should be 'for (var ...'. (@Fritz-Lium) +- [f03be23](https://github.com/mikeal/request/commit/f03be2309bd85a89d2e3c208b2fb4be1a2b95c79) Make digest qop regex more robust (see #730) (@nylen) +- [c7d97ae](https://github.com/mikeal/request/commit/c7d97aefaebf773ce62c72e9ec656f0250b7a1e7) 2.30.0 (@mikeal) + +### v2.29.1 (2013/12/06 20:05 +00:00) +- [e0f2c41](https://github.com/mikeal/request/commit/e0f2c41bd4e15518e97dd2f4c134be51ed4cb68b) 2.29.1 (@mikeal) + +### v2.29.0 (2013/12/06 20:05 +00:00) +- [3c2cad1](https://github.com/mikeal/request/commit/3c2cad11301380f4056eb3ca4c0c124f7f7f72f5) make request.defaults(options, requester) run the requester for all methods (@jchris) +- [#727](https://github.com/mikeal/request/pull/727) fix requester bug (@jchris) +- [0c9f875](https://github.com/mikeal/request/commit/0c9f87542cd1f919751d3ed1f00208ce7705f8e7) 2.29.0 (@mikeal) + +### v2.28.1 (2013/12/04 19:42 +00:00) +- [3e6a300](https://github.com/mikeal/request/commit/3e6a300121586da81b871f759a9feec52810474a) 2.28.1 (@mikeal) + +### v2.28.0 (2013/12/04 19:42 +00:00) +- [ac26f43](https://github.com/mikeal/request/commit/ac26f43d9a8212289f92056d3029c207f755cef4) Update request.js (@wprl) +- [adc2cb6](https://github.com/mikeal/request/commit/adc2cb6721e5980e8ed667a3f558cce8c89ee6c2) Use random cnonce (@wprl) +- [ff16a9d](https://github.com/mikeal/request/commit/ff16a9daf93e01cecee7fabec64c3e1b423f7db5) Add test for random cnonce (@wprl) +- [df64c2b](https://github.com/mikeal/request/commit/df64c2bc8f691ecc6f6c214e2254bab439830b88) Restore whitespace (@wprl) +- [#630](https://github.com/mikeal/request/pull/630) Send random cnonce for HTTP Digest requests (@wprl) +- [aca5a16](https://github.com/mikeal/request/commit/aca5a169c44cc658e8310691a2ae1cfc4c2b0958) update twitter api url to v1.1 (@mick) +- [abcbadd](https://github.com/mikeal/request/commit/abcbadd1b2a113c34a37b62d36ddcfd74452850e) Test case for #304. (@diversario) +- [b8cf874](https://github.com/mikeal/request/commit/b8cf8743b66d8eee4048561a7d81659f053393c8) fix failure when running with NODE_DEBUG=request, and a test for that (@jrgm) +- [e6c7d1f](https://github.com/mikeal/request/commit/e6c7d1f6d23922480c09427d5f54f84eec60b7af) quiet, but check that stderr output has something reasonable for debug (@jrgm) +- [#659](https://github.com/mikeal/request/pull/659) fix failure when running with NODE_DEBUG=request, and a test for that (@jrgm) +- [23164e4](https://github.com/mikeal/request/commit/23164e4f33bd0837d796037c3d0121db23653c34) option.tunnel to explicitly disable tunneling (@seanmonstar) +- [#662](https://github.com/mikeal/request/pull/662) option.tunnel to explicitly disable tunneling (@seanmonstar) +- [#656](https://github.com/mikeal/request/pull/656) Test case for #304. (@diversario) +- [da16120](https://github.com/mikeal/request/commit/da16120a8f0751b305a341c012dbdcfd62e83585) Change `secureOptions' to `secureProtocol' for HTTPS request (@richarddong) +- [43d9d0a](https://github.com/mikeal/request/commit/43d9d0a76974d2c61681ddee04479d514ebfa320) add `ciphers' and `secureProtocol' to `options' in `getAgent' (@richarddong) +- [#666](https://github.com/mikeal/request/pull/666) make `ciphers` and `secureProtocol` to work in https request (@richarddong) +- [524e035](https://github.com/mikeal/request/commit/524e0356b73240409a11989d369511419526b5ed) change cookie module (@sxyizhiren) +- [#674](https://github.com/mikeal/request/pull/674) change cookie module,to tough-cookie.please check it . (@sxyizhiren) +- [e8dbcc8](https://github.com/mikeal/request/commit/e8dbcc83d4eff3c14e03bd754174e2c5d45f2872) tests: Fixed test-timeout.js events unit test (@Turbo87) +- [aed1c71](https://github.com/mikeal/request/commit/aed1c71fac0047b66a236a990a5569445cfe995d) Added Travis CI configuration file (@Turbo87) +- [#683](https://github.com/mikeal/request/pull/683) Travis CI support (@Turbo87) +- [8bfa640](https://github.com/mikeal/request/commit/8bfa6403ce03cbd3f3de6b82388bfcc314e56c61) dependencies: Set `tough-cookie` as optional dependency (@Turbo87) +- [bcc138d](https://github.com/mikeal/request/commit/bcc138da67b7e1cf29dc7d264a73d8b1d1f4b0e4) dependencies: Set `form-data` as optional dependency (@Turbo87) +- [751ac28](https://github.com/mikeal/request/commit/751ac28b7f13bfeff2a0e920ca2926a005dcb6f0) dependencies: Set `tunnel-agent` as optional dependency (@Turbo87) +- [6d7c1c9](https://github.com/mikeal/request/commit/6d7c1c9d8e3a300ff6f2a93e7f3361799acf716b) dependencies: Set `http-signature` as optional dependency (@Turbo87) +- [733f1e3](https://github.com/mikeal/request/commit/733f1e3ae042a513a18cde1c6e444b18ee07ad66) Added .npmignore file (@Turbo87) +- [e2fc346](https://github.com/mikeal/request/commit/e2fc346b7e5e470fcd36189bcadf63c53feebb22) dependencies: Set `hawk` as optional dependency (@Turbo87) +- [e87d45f](https://github.com/mikeal/request/commit/e87d45fe89ea220035bf07696a70292763f7135f) dependencies: Set `aws-sign` as optional dependency (@Turbo87) +- [1cd81ba](https://github.com/mikeal/request/commit/1cd81ba30908b77cff2fa618aeb232fefaa53ada) lib: Added optional() function (@Turbo87) +- [28c2c38](https://github.com/mikeal/request/commit/28c2c3820feab0cc719df213a60838db019f3e1a) dependencies: Set `oauth-sign` as optional dependency (@Turbo87) +- [2ceddf7](https://github.com/mikeal/request/commit/2ceddf7e793feb99c5b6a76998efe238965b22cd) TravisCI: Test with and without optional dependencies (@Turbo87) +- [#682](https://github.com/mikeal/request/pull/682) Optional dependencies (@Turbo87) +- [2afab5b](https://github.com/mikeal/request/commit/2afab5b665a2e03becbc4a42ad481bb737405655) Handle blank password in basic auth. (@diversario) +- [cabe5a6](https://github.com/mikeal/request/commit/cabe5a62dc71282ce8725672184efe9d97ba79a5) Handle `auth.password` and `auth.username`. (@diversario) +- [#690](https://github.com/mikeal/request/pull/690) Handle blank password in basic auth. (@diversario) +- [33100c3](https://github.com/mikeal/request/commit/33100c3c7fa678f592374f7b2526fe9a0499b6f6) Typo (@VRMink) +- [#694](https://github.com/mikeal/request/pull/694) Typo in README (@ExxKA) +- [9072ff1](https://github.com/mikeal/request/commit/9072ff1556bcb002772838a94e1541585ef68f02) Edited README.md for formatting and clarity of phrasing (@Zearin) +- [#696](https://github.com/mikeal/request/pull/696) Edited README.md for formatting and clarity of phrasing (@Zearin) +- [07ee58d](https://github.com/mikeal/request/commit/07ee58d3a8145740ba34cc724f123518e4b3d1c3) Fixing listing in callback part of docs. (@lukasz-zak) +- [#710](https://github.com/mikeal/request/pull/710) Fixing listing in callback part of docs. (@lukasz-zak) +- [8ee21d0](https://github.com/mikeal/request/commit/8ee21d0dcc637090f98251eba22b9f4fd1602f0e) Request.multipart no longer crashes when header 'Content-type' is present (@pastaclub) +- [#715](https://github.com/mikeal/request/pull/715) Request.multipart no longer crashes when header 'Content-type' present (@pastaclub) +- [8b04ca6](https://github.com/mikeal/request/commit/8b04ca6ad8d025c275e40b806a69112ac53bd416) doc: Removed use of gendered pronouns (@oztu) +- [#719](https://github.com/mikeal/request/pull/719) Made a comment gender neutral. (@oztu) +- [8795fc6](https://github.com/mikeal/request/commit/8795fc68cce26b9a45d10db9eaffd4bc943aca3a) README.md: add custom HTTP Headers example. (@tcort) +- [#724](https://github.com/mikeal/request/pull/724) README.md: add custom HTTP Headers example. (@tcort) +- [c5d5b1f](https://github.com/mikeal/request/commit/c5d5b1fcf348e768943fe632a9a313d704d35c65) Changing dep. (@mikeal) +- [bf04163](https://github.com/mikeal/request/commit/bf04163883fa9c62d4e1a9fdd64d6efd7723d5f8) 2.28.0 (@mikeal) + +### v2.27.1 (2013/08/15 21:30 +00:00) +- [a80a026](https://github.com/mikeal/request/commit/a80a026e362a9462d6948adc1b0d2831432147d2) 2.27.1 (@mikeal) + +### v2.27.0 (2013/08/15 21:30 +00:00) +- [3627b9c](https://github.com/mikeal/request/commit/3627b9cc7752cfe57ac609ed613509ff61017045) rename Request and remove .DS_Store (@joaojeronimo) +- [920f9b8](https://github.com/mikeal/request/commit/920f9b88f7dd8f8d153e72371b1bf2d16d5e4160) rename Request (@joaojeronimo) +- [c243cc6](https://github.com/mikeal/request/commit/c243cc66131216bb57bcc0fd79c250a7927ee424) for some reason it removed request.js (@joaojeronimo) +- [#619](https://github.com/mikeal/request/pull/619) decouple things a bit (@CrowdProcess) +- [ed4ecc5](https://github.com/mikeal/request/commit/ed4ecc5ae5cd1d9559a937e84638c9234244878b) Try normal stringify first, then fall back to safe stringify (@mikeal) +- [5642ff5](https://github.com/mikeal/request/commit/5642ff56e64c19e8183dcd5b6f9d07cca295a79e) 2.27.0 (@mikeal) + +### v2.26.1 (2013/08/07 16:31 +00:00) +- [b422510](https://github.com/mikeal/request/commit/b422510ba16315c3e0e1293a17f3a8fa7a653a77) 2.26.1 (@mikeal) + +### v2.26.0 (2013/08/07 16:31 +00:00) +- [3b5b62c](https://github.com/mikeal/request/commit/3b5b62cdd4f3b92e63a65d3a7265f5a85b11c4c9) Only include :password in Basic Auth if it's defined (fixes #602) (@bendrucker) +- [#605](https://github.com/mikeal/request/pull/605) Only include ":" + pass in Basic Auth if it's defined (fixes #602) (@bendrucker) +- [cce2c2c](https://github.com/mikeal/request/commit/cce2c2c8ea5b0136932b2432e4e25c0124d58d5a) Moved init of self.uri.pathname (@lexander) +- [08793ec](https://github.com/mikeal/request/commit/08793ec2f266ef88fbe6c947e6b334e04d4b9dc9) Fix all header casing issues forever. (@mikeal) +- [#613](https://github.com/mikeal/request/pull/613) Fixes #583, moved initialization of self.uri.pathname (@lexander) +- [f98ff99](https://github.com/mikeal/request/commit/f98ff990d294165498c9fbf79b2de12722e5c842) Update this old ass readme with some new HOTNESS! (@mikeal) +- [3312010](https://github.com/mikeal/request/commit/3312010f72d035f22b87a6d8d463f0d91b88fea1) markdown badge instead. (@mikeal) +- [9cf657c](https://github.com/mikeal/request/commit/9cf657c1f08bf460911b8bb0a8c5c0d3ae6135c7) Shorter title. (@mikeal) +- [2c61d66](https://github.com/mikeal/request/commit/2c61d66f1dc323bb612729c7320797b79b22034c) put Request out (@joaojeronimo) +- [28513a1](https://github.com/mikeal/request/commit/28513a1b371452699438c0eb73471f8969146264) 2.26.0 (@mikeal) + +### v2.25.1 (2013/07/23 21:51 +00:00) +- [6387b21](https://github.com/mikeal/request/commit/6387b21a9fb2e16ee4dd2ab73b757eca298587b5) 2.25.1 (@mikeal) + +### v2.25.0 (2013/07/23 21:51 +00:00) +- [828f12a](https://github.com/mikeal/request/commit/828f12a1ae0f187deee4d531b2eaf7531169aaf2) 2.25.0 (@mikeal) + +### v2.24.1 (2013/07/23 20:51 +00:00) +- [29ae1bc](https://github.com/mikeal/request/commit/29ae1bc454c03216beeea69d65b538ce4f61e8c1) 2.24.1 (@mikeal) + +### v2.24.0 (2013/07/23 20:51 +00:00) +- [f667318](https://github.com/mikeal/request/commit/f66731870d5f3e0e5655cd89612049b540c34714) Fixed a small typo (@michalstanko) +- [#601](https://github.com/mikeal/request/pull/601) Fixed a small typo (@michalstanko) +- [#594](https://github.com/mikeal/request/pull/594) Emit complete event when there is no callback (@RomainLK) +- [#596](https://github.com/mikeal/request/pull/596) Global agent is being used when pool is specified (@Cauldrath) +- [41ce492](https://github.com/mikeal/request/commit/41ce4926fb08242f19135fd3ae10b18991bc3ee0) New deps. (@mikeal) +- [8176c94](https://github.com/mikeal/request/commit/8176c94d5d17bd14ef4bfe459fbfe9cee5cbcc6f) 2.24.0 (@mikeal) + +### v2.23.1 (2013/07/23 02:45 +00:00) +- [63f31cb](https://github.com/mikeal/request/commit/63f31cb1d170a4af498fbdd7566f867423caf8e3) 2.23.1 (@mikeal) + +### v2.23.0 (2013/07/23 02:44 +00:00) +- [758f598](https://github.com/mikeal/request/commit/758f598de8d6024db3fa8ee7d0a1fc3e45c50f53) Initial commit. Request package. (@mikeal) +- [104cc94](https://github.com/mikeal/request/commit/104cc94839d4b71aaf3681142daefba7ace78c94) Removing unnecessary markup. (@mikeal) +- [12a4cb8](https://github.com/mikeal/request/commit/12a4cb88b949cb4a81d51189d432c25c08522a87) Matching node documentation style. (@mikeal) +- [ab96993](https://github.com/mikeal/request/commit/ab969931106b10b5f8658dc9e0f512c5dfc2a7da) Release tarball. (@mikeal) +- [e7e37ad](https://github.com/mikeal/request/commit/e7e37ad537081a040ea3e527aac23ae859b40b2c) Removing old tarball. (@mikeal) +- [e66e90d](https://github.com/mikeal/request/commit/e66e90dd814ae7bfbcd52003609d7bde9eafea57) Adding automatic redirect following. (@mikeal) +- [2fc5b84](https://github.com/mikeal/request/commit/2fc5b84832ae42f6ddb081b1909d0a6ca00c8d51) Adding SSL support. (@mikeal) +- [a3ac375](https://github.com/mikeal/request/commit/a3ac375d4b5800a038ae26233425fadc26866fbc) Fixing bug where callback fired for every redirect. (@mikeal) +- [1139efe](https://github.com/mikeal/request/commit/1139efedb5aad4a328c1d8ff45fe77839a69169f) Cleaning up tests. (@mikeal) +- [bb49fe6](https://github.com/mikeal/request/commit/bb49fe6709fa06257f4b7aadc2e450fd45a41328) Rolling version. (@mikeal) +- [4ff3493](https://github.com/mikeal/request/commit/4ff349371931ec837339aa9082c4ac7ddd4c7c35) Updates to README.md (@mikeal) +- [1c9cf71](https://github.com/mikeal/request/commit/1c9cf719c92b02ba85c4e47bd2b92a3303cbe1cf) Adding optional body buffer. (@mikeal) +- [49dfef4](https://github.com/mikeal/request/commit/49dfef42630c4fda6fb208534c00638dc0f06a6b) Rolling version. (@mikeal) +- [ab40cc8](https://github.com/mikeal/request/commit/ab40cc850652e325fcc3b0a44ee7303ae0a7b77f) Preserve the original full path. (@mikeal) +- [6d70f62](https://github.com/mikeal/request/commit/6d70f62c356f18098ca738b3dbedcf212ac3d8d8) Rolling version. (@mikeal) +- [e2ca15a](https://github.com/mikeal/request/commit/e2ca15a0f7e986e3063977ee9bd2eb69e86bdb1f) Fixing bugs and rolling version. (@mikeal) +- [8165254](https://github.com/mikeal/request/commit/81652543d3a09553cbf33095a7932dec53ccecc2) Cleanup. Fixing '' === '/' path bug. (@mikeal) +- [a0536a4](https://github.com/mikeal/request/commit/a0536a46d0b91e204fbde1e4341461bc827c9542) Rolling version. (@mikeal) +- [9ccaad7](https://github.com/mikeal/request/commit/9ccaad7dce05e5dcc3eacaf1500404622a0d8067) Adding stream support for request and response bodies. (@mikeal) +- [585166d](https://github.com/mikeal/request/commit/585166d979d4476e460e9835cc0516d04a9a3e11) Rolling version. (@mikeal) +- [41111c8](https://github.com/mikeal/request/commit/41111c88d711da80ea123df238d62038b89769bf) Bugfix release for response stream. (@mikeal) +- [86e375d](https://github.com/mikeal/request/commit/86e375d093700affe4d6d2b76a7acedbe8da140c) Remove host header when we add it. (@mikeal) +- [3a6277c](https://github.com/mikeal/request/commit/3a6277c81cfd3457c760f2aaea44852ef832a1e8) Rolling version. (@mikeal) +- [7a11f69](https://github.com/mikeal/request/commit/7a11f69d5353ecc1319e2e91ca4aefbaf0338136) writing requestBodyStream into request (@beanieboi) +- [186e9cf](https://github.com/mikeal/request/commit/186e9cf692511d768f8016d311609a0a0a315af6) Using sys.pump (@mikeal) +- [09e7ade](https://github.com/mikeal/request/commit/09e7ade541e1d40316a3f153128871a353e707b1) Fixing host port addition. Rolling version. (@mikeal) +- [cec3f3f](https://github.com/mikeal/request/commit/cec3f3f619322f27e2a82c7fd8971722f98d04d6) Using builtin base64. (@mikeal) +- [2a2e2a2](https://github.com/mikeal/request/commit/2a2e2a2f5c4760d4da3caa1a0f2d14c31a4222dc) new structure. new convenience methods (@mikeal) +- [f835b5f](https://github.com/mikeal/request/commit/f835b5fb605506b8ecd3c17bebe9ed54f0066cfc) removing old files. (@mikeal) +- [91616c4](https://github.com/mikeal/request/commit/91616c4e4f488f75a8b04b5b6f0ceef7e814cffd) Adding better redirect handling. (@mikeal) +- [3a95433](https://github.com/mikeal/request/commit/3a95433cbec9693a16ff365148489a058720ae7c) Fixing tests. (@mikeal) +- [38eb1d2](https://github.com/mikeal/request/commit/38eb1d2fa8dea582bb7c3fb37a7b05ff91857a46) By popular demand, proxy support! Not really tested yet but it seems to kinda work. (@mikeal) +- [45d41df](https://github.com/mikeal/request/commit/45d41dff63f36b25b3403e59c8b172b7aa9ed373) Added proxy auth. (@mikeal) +- [85e3d97](https://github.com/mikeal/request/commit/85e3d97e0dced39a3769c4e3f2707ba3aaab1eaa) Fixing for non-proxy case. (@mikeal) +- [f796da7](https://github.com/mikeal/request/commit/f796da74849d2b0732bd1bae1d2dcaf1243142c1) Fixing relative uri's for forwards. (@mikeal) +- [dead30e](https://github.com/mikeal/request/commit/dead30ebef9c3ff806b895e2bd32f52ba3988c69) Adding support for specifying an encoding for the response body. (@mikeal) +- [9433344](https://github.com/mikeal/request/commit/943334488dcc8e7f90727b86f9eb1bc502c33b4f) Removing debugging statement (@mikeal) +- [41efb7a](https://github.com/mikeal/request/commit/41efb7a7dcca3b47e97c23c6cdbd3e860d3bd82b) Error on maxRedirects exceeded. (@mikeal) +- [9549570](https://github.com/mikeal/request/commit/95495701fa4e99a3ab85acdab71ecdaabe0dbd45) Allow options.url, people do it all the time, might as well just support it. (@mikeal) +- [21a53c0](https://github.com/mikeal/request/commit/21a53c016edcc113e809219639807b46d29dba36) Pumping version. (@mikeal) +- [aca9782](https://github.com/mikeal/request/commit/aca9782285fe1d727570fe8d799561f45d49048e) Fixing byteLength !== string lenght issues. (@mikeal) +- [a77c296](https://github.com/mikeal/request/commit/a77c296431eda2a211f59bdb88654c4a64ed4ef3) Don't rely on automatic semicolon insertion (pretty please :) (@papandreou) +- [8b02f29](https://github.com/mikeal/request/commit/8b02f29c9019dd1d1dd291dd85889b26f592a137) Also set content-length when options.body is the empty string. (@papandreou) +- [023281c](https://github.com/mikeal/request/commit/023281ca9b4414a9bc0170c2b08aaf886a7a08f7) Simplified boolean logic. (@papandreou) +- [4f897fd](https://github.com/mikeal/request/commit/4f897fdd6c7c93bea73dbf34623f09af63bb1ed4) Simplified check for whether response.headers.location starts with "http:" or "https:". (@papandreou) +- [6d7db85](https://github.com/mikeal/request/commit/6d7db85cadf401dffdec07a4d66822207898c69e) Fixed double var declaration. (@papandreou) +- [97255cf](https://github.com/mikeal/request/commit/97255cfd2a4aa8f34d307e7cd96fe1c1f13cb26a) Process redirects as soon as the response arrives. Prevents the uninteresting redirect response from being pumped into responseBodyStream. (@papandreou) +- [b2af15f](https://github.com/mikeal/request/commit/b2af15f4fcbe1115cf8b53c5ae89fbf2365bfffc) New feature: If options.noBuffer is true, don't buffer up the response, just return it. Most of the time getting a readable stream is much more flexible than having the option to pipe the response into a writable stream. For one thing, the stream can be paused. (@papandreou) +- [fee5f89](https://github.com/mikeal/request/commit/fee5f89159a8f36b25df509c55093bf7ebd1c993) A few fixes/changes from papandreou's code, also added new semantics for onResponse. (@mikeal) +- [fa72fcb](https://github.com/mikeal/request/commit/fa72fcb950029b222f0621e2d49304e35d08c380) Updated documentation. (@mikeal) +- [4fc7209](https://github.com/mikeal/request/commit/4fc72098e7eeb9518951b9306115340ffdcce7ce) Fix for both onResponse and callback (@mikeal) +- [3153436](https://github.com/mikeal/request/commit/3153436404fca865a65649d46eb22d9797128c9d) Adding license information. (@mikeal) +- [59570de](https://github.com/mikeal/request/commit/59570dec37913c7e530303a83f03781d9aca958c) Fix for unescaping passwords for basic auth. (@notmatt) +- [0d771ab](https://github.com/mikeal/request/commit/0d771ab7882b97d776179972c51c59386f91b953) require querystring (@notmatt) +- [875f79b](https://github.com/mikeal/request/commit/875f79b6a40340457fafafdadac813cfa5343689) Allow request's body to be an object. (@Stanley) +- [86895b9](https://github.com/mikeal/request/commit/86895b9c37f7b412b7df963c2a75361ff402d8c5) Merge branch 'master' of github.com:Stanley/request (@Stanley) +- [4c9c984](https://github.com/mikeal/request/commit/4c9c984cb37bfd4e901ce24b0e9b283604c27bf4) Better tests. (@mikeal) +- [02f6b38](https://github.com/mikeal/request/commit/02f6b38c1697a55ed43940d1fd0bef6225d4faa2) Added specs for body option (@Stanley) +- [af66607](https://github.com/mikeal/request/commit/af666072a22b8df4d75fe71885139059f56ea5ee) Made specs pass (@Stanley) +- [641ec05](https://github.com/mikeal/request/commit/641ec052dd95797816e781b2c3ac2524841db7cb) Merge branch 'master' of https://github.com/Stanley/request into jsonbody (@mikeal) +- [ab4c96b](https://github.com/mikeal/request/commit/ab4c96be1c002c10806d967a4b266543f8b0267c) Moved spec tests to normal node script tests. Style changes to code and docs. (@mikeal) +- [fc2a7ef](https://github.com/mikeal/request/commit/fc2a7ef301c1266938a5aeb539e4f3fc3b5191dd) Clearer wording for json option. (@mikeal) +- [01371d7](https://github.com/mikeal/request/commit/01371d728082e22aabeb840da82a30aec62d7d8a) Removing specs loader. (@mikeal) +- [560dadd](https://github.com/mikeal/request/commit/560dadd6cbd293622c66cd82b5506704c9850b13) Adding newline to end of test files, makes for cleaner diffs in the future. (@mikeal) +- [a0348dd](https://github.com/mikeal/request/commit/a0348dd0fef462c3c678a639619c27101c757035) Add pass message when tests finish. (@mikeal) +- [da77a0e](https://github.com/mikeal/request/commit/da77a0e152c1dd43f5c1e698110d23e4d32280db) Adding better debug message on failures for GET tests. (@mikeal) +- [6aade82](https://github.com/mikeal/request/commit/6aade822a90724a47176771d137e30b0a702e7ef) throw on error. (@mikeal) +- [4f41b8d](https://github.com/mikeal/request/commit/4f41b8dbbf9a93c53d5ccdf483c9d7803e279916) Rolling version. (@mikeal) +- [7cf01f0](https://github.com/mikeal/request/commit/7cf01f0481afb367b5d0d4878645ac535cfe9a2e) master is moving to node v0.3.6+ (@mikeal) +- [cb403a4](https://github.com/mikeal/request/commit/cb403a4cfdbe3d98feb9151fdbdae1e1436e59ab) Initial support for 0.3.6+.\n\nExperimental support for Request objects as streams. It's untested and requires a pending patch to node.js (@mikeal) +- [a3c80f9](https://github.com/mikeal/request/commit/a3c80f98f42f25d4cb02d5d9e34ba0e67cc89293) Adding defaults call. (@mikeal) +- [55f22f9](https://github.com/mikeal/request/commit/55f22f96365c57aa8687de951e3f9ed982eba408) Request will keep it's own agent pool so that it can expose a maxSockets setting for easy pool sizing. (@mikeal) +- [004741c](https://github.com/mikeal/request/commit/004741c23dc0eaf61f111161bb913ba418e033e4) Fixing reference error. (@mikeal) +- [8548541](https://github.com/mikeal/request/commit/85485414150fbac58b08126b3684f81dcb930bf1) Simplified pool implementation. (@mikeal) +- [9121c47](https://github.com/mikeal/request/commit/9121c47e4cbe47bccc20a75e0e6c6c098dce04fb) Default to globalPool. (@mikeal) +- [9ec3490](https://github.com/mikeal/request/commit/9ec3490aefd52f05b57e6db13730ace54b4439d1) Support for https. Requires pending patch in node core for consistent Agent API. (@mikeal) +- [146b154](https://github.com/mikeal/request/commit/146b154a1a31ae7a30aa9f28e891e4824af548fa) Fixes for reference errors. (@mikeal) +- [8756120](https://github.com/mikeal/request/commit/8756120f83ceb94f8ba600acba274ba512696eef) Only create an agent when a relevant option is passed. (@mikeal) +- [cc3cf03](https://github.com/mikeal/request/commit/cc3cf0322847982875ff32a7cef25c39c29630ba) New HTTP client doesn't require such explicit error listener management. (@mikeal) +- [f7c0379](https://github.com/mikeal/request/commit/f7c0379b99ac7989df7f934be67cc3ae979591bb) Fixing bug in .pipe() handling. Thanks tanepiper. (@mikeal) +- [897a7ef](https://github.com/mikeal/request/commit/897a7ef020cefcb7a36c04a11e286238df8ecdaa) Fixes for streams, docs, and convenience methods. (@mikeal) +- [7c2899a](https://github.com/mikeal/request/commit/7c2899a046b750eda495b23b2d58604260deddbc) Doc fixes. (@mikeal) +- [f535fe1](https://github.com/mikeal/request/commit/f535fe1008c8f11bb37e16f95fe287ed93343704) Doc fixes. (@mikeal) +- [d1deb5b](https://github.com/mikeal/request/commit/d1deb5b4dda4474fe9d480ad42ace664d89e73ee) Pipe tests, all passing! (@mikeal) +- [d67a041](https://github.com/mikeal/request/commit/d67a041783df8d724662d82f9fb792db1be3f4f0) Moving basic example to the top. (@mikeal) +- [6a98b9e](https://github.com/mikeal/request/commit/6a98b9e4a561b516b14d325c48785a9d6f40c514) Do not mix encoding option with pipeing. (@mikeal) +- [06b67ef](https://github.com/mikeal/request/commit/06b67ef01f73572a6a9b586854d4c21be427bdb2) Disable pooling with {pool:false} (@mikeal) +- [1c24881](https://github.com/mikeal/request/commit/1c248815b5dfffda43541e367bd4d66955ca0325) Send all arguments passed to stream methods. (@mikeal) +- [7946393](https://github.com/mikeal/request/commit/7946393893e75df24b390b7ab19eb5b9d6c23891) Better errors and warnings for different pipe conditions. (@mikeal) +- [ee2108d](https://github.com/mikeal/request/commit/ee2108db592113a0fe3840c361277fdd89f0c89c) Removing commented out legacy code. (@mikeal) +- [5f838b3](https://github.com/mikeal/request/commit/5f838b3582eda465f366d7df89c6dd69920405f2) Fixing redirect issue, thanks @linus (@mikeal) +- [c08758e](https://github.com/mikeal/request/commit/c08758e25290ee12278b3eb95d502645e0d66e4e) Adding del alias, thanks tanepiper. (@mikeal) +- [0b7d675](https://github.com/mikeal/request/commit/0b7d6756c120ebf17ce6c70fc1ff4ecd6850e704) Keep require('https') from throwing if node is compiled with --without-ssl. This will still throw for Invalid Protocol if https is used. Which makes more sense and makes request work without SSl support. (@davglass) +- [02fc9f7](https://github.com/mikeal/request/commit/02fc9f7cc8912402a5a98ddefaffa5f6da870562) Rolling version. Pushed new version to npm. (@mikeal) +- [0b30532](https://github.com/mikeal/request/commit/0b30532ee1a3cabb177017acfa7885b157031df2) Sent a patch today to fix this in core but this hack will fix node that predates that fix to core. (@mikeal) +- [5d5d8f4](https://github.com/mikeal/request/commit/5d5d8f43156b04fd3ceb312cfdf47cc2b0c4104d) Rolling version. Pushed new version to npm. (@mikeal) +- [1c00080](https://github.com/mikeal/request/commit/1c000809f1795d2e21635a626cf730aba2049d3e) Fixing reference to tls. (@mikeal) +- [4c355d1](https://github.com/mikeal/request/commit/4c355d1f87fced167e4b21770bfe6f8208f32b53) Be a better stream. (@mikeal) +- [9bed22f](https://github.com/mikeal/request/commit/9bed22f22e007201d4faeebdb486603c3bb088c3) Rolled version and pushed to npm (@mikeal) +- [34df8e2](https://github.com/mikeal/request/commit/34df8e2301dcfd10705b9ff3b257741b0816c8a1) typo in `request.defaults` (@clement) +- [4d7a6d4](https://github.com/mikeal/request/commit/4d7a6d46fa481e43fe873b8c8fad2f7dd816dbb5) default value only if undefined in `request.defaults` + misplaced `return` statement (@clement) +- [243a565](https://github.com/mikeal/request/commit/243a56563f1014318a467e46113b2c61b485f377) Adding support for request(url) (@mikeal) +- [83a9cec](https://github.com/mikeal/request/commit/83a9cec3cb2f7a43a1e10c13da8d0dd72b937965) Fixing case where + is in user or password. (@mikeal) +- [8bb7f98](https://github.com/mikeal/request/commit/8bb7f98ba8b78c217552c979811c07f1299318fe) making Request a duplex stream rather than adding special handling for pipes out. (@mikeal) +- [55a1fde](https://github.com/mikeal/request/commit/55a1fdedcad1e291502ce10010dda7e478a1b503) pause and resume should act on response instead of request (@tobowers) +- [63125a3](https://github.com/mikeal/request/commit/63125a33523e72e449ceef76da57b63522998282) Making request really smart about pipeing to itself so that we can do simple proxy cats (@mikeal) +- [2f9e257](https://github.com/mikeal/request/commit/2f9e257bc39eb329eec660c6d675fb40172fc5a5) Rolling version since master right now has some pretty hot new code in it. (@mikeal) +- [#31](https://github.com/mikeal/request/pull/31) Error on piping a request to a destination (@tobowers) +- [b1f3d54](https://github.com/mikeal/request/commit/b1f3d5439d24b848b2bf3a6459eea74cb0e43df3) The "end" event that was supposed to be emitted to fix a core bug in NodeJS wasn't fired because it wasn't emitted on the response object. (@voxpelli) +- [#35](https://github.com/mikeal/request/pull/35) The "end" event isn't emitted for some responses (@voxpelli) +- [40b1c67](https://github.com/mikeal/request/commit/40b1c676e1d3a292719ad2dd9cf9354c101bad47) Rolling version. (@mikeal) +- [9a28022](https://github.com/mikeal/request/commit/9a28022d0e438d0028e61a53e897689470025e50) Fixing bug in forwarding with new pipes logic. (@mikeal) +- [44e4e56](https://github.com/mikeal/request/commit/44e4e5605b0a9e02036393bcbd3a8d91280f5611) Fixing big bug in forwarding logic. (@mikeal) +- [b0cff72](https://github.com/mikeal/request/commit/b0cff72d63689d96e0b1d49a8a5aef9ccc71cb8b) Added timeout option to abort the request before the response starts responding (@mbrevoort) +- [cc76b10](https://github.com/mikeal/request/commit/cc76b109590437bfae54116e3424b2c6e44a3b3e) corrected spelling error in README (@mbrevoort) +- [#45](https://github.com/mikeal/request/pull/45) Added timeout option (@mbrevoort) +- [1cca56b](https://github.com/mikeal/request/commit/1cca56b29bb670c53d5995e76c0b075a747b5ad7) Fixing for node http client refactor. (@mikeal) +- [2a78aa3](https://github.com/mikeal/request/commit/2a78aa3f827e76c548e001fa519448b24466b518) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [ce12273](https://github.com/mikeal/request/commit/ce12273d3990c1446d3166bbd9e35c0e2435f137) New fs.ReadStream handling hotness. (@mikeal) +- [535e30a](https://github.com/mikeal/request/commit/535e30a4bd4a8e41d97ffa6a4e99630ac09a4bcb) Adding pipe support to HTTP ServerResponse objects. (@mikeal) +- [2f0cf6b](https://github.com/mikeal/request/commit/2f0cf6bf44edbaec4c0a0cb15a679302de7f0aff) Setting proper statusCode. (@mikeal) +- [6e3ecb1](https://github.com/mikeal/request/commit/6e3ecb106c3a32101d80ac0f87968fddd3ac5e2c) Adding test for pipeing file to disc. (@mikeal) +- [bbbb52e](https://github.com/mikeal/request/commit/bbbb52e406b65100b557caa3687a1aa04fab6ff3) Pumping version. (@mikeal) +- [a10b6e4](https://github.com/mikeal/request/commit/a10b6e4c08478364b8079801fdb23f3530fcc85f) Adding reference to Request instance on response to make it easier on inline callbacks. fixes #43. (@mikeal) +- [b9aff1f](https://github.com/mikeal/request/commit/b9aff1fe007dab3f93e666f047fa03a4e8f5f8b7) Add body property to resp when we have it as a shorthand. fixes #28 (@mikeal) +- [411b30d](https://github.com/mikeal/request/commit/411b30dab1fe5b20880113aa801a2fdbb7c35c40) If the error is handled and not throw we would still process redirects. Fixes #34. (@mikeal) +- [8f3c2b4](https://github.com/mikeal/request/commit/8f3c2b4f6dee8838f30e2430a23d5071128148f0) w00t! request 2.0 (@mikeal) +- [9957542](https://github.com/mikeal/request/commit/9957542cc6928443f3a7769510673665b5a90040) valid semver. (@mikeal) +- [31f5ee2](https://github.com/mikeal/request/commit/31f5ee28726ac7e14355cad0c6d2785f9ca422c6) Drastically improved header handling. (@mikeal) +- [c99b8fc](https://github.com/mikeal/request/commit/c99b8fcd706ae035f6248669b017ac2995e45f31) Return destination stream from pipe(). (@mikeal) +- [cba588c](https://github.com/mikeal/request/commit/cba588cec1e204d70f40f8bd11df0e27dc78ef0c) Style fixes. Bye Bye semi-colons. Mostly lined up with npm style. (@mikeal) +- [8515a51](https://github.com/mikeal/request/commit/8515a510ccc0a661d7c28fce6e513a7d71be7f8f) Clearer spacing. Slightly more consistent. (@mikeal) +- [3acd82a](https://github.com/mikeal/request/commit/3acd82a10e7d973fc5dbaa574c2e8906e48e1ee9) add failing test for issue #51 (@benatkin) +- [68c17f6](https://github.com/mikeal/request/commit/68c17f6c9a3d7217368b3b8bc61203e6a14eb4f0) implement parsing json response when json is truthy (@benatkin) +- [1cb1ec1](https://github.com/mikeal/request/commit/1cb1ec114b03394a0a530f245a857d8424cad02d) allow empty string (@benatkin) +- [4f8d2df](https://github.com/mikeal/request/commit/4f8d2df9f845690667a56e7698dbaf23b5028177) support JSON APIs that don't set the write content type (@benatkin) +- [#53](https://github.com/mikeal/request/pull/53) Parse json: Issue #51 (@benatkin) +- [c63e6e9](https://github.com/mikeal/request/commit/c63e6e96378a2b050bddbe1b39337662f304dc95) Adding proxy to docs, don't know why this wasn't already in. (@mikeal) +- [ef767d1](https://github.com/mikeal/request/commit/ef767d12f13a9c78d3df89add7556f5421204843) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [1b12d3a](https://github.com/mikeal/request/commit/1b12d3a9f48a6142d75fa1790c80eb313388ca44) Emit a proper error. (@mikeal) +- [47314d7](https://github.com/mikeal/request/commit/47314d7cb41fe9c3a7717a502bed9cf1b6074ffc) Greatly expanded documentation. (@mikeal) +- [e477369](https://github.com/mikeal/request/commit/e477369b4bbc271248ee8b686c556567570a6cca) Doc refinements. (@mikeal) +- [fe4d221](https://github.com/mikeal/request/commit/fe4d22109bc1411c29b253756d609856327ff146) Fix for newer npm (@mikeal) +- [7b2f788](https://github.com/mikeal/request/commit/7b2f788293e205edc7b46a7fd5304296b5e800e3) More doc cleanup. (@mikeal) +- [f8eb2e2](https://github.com/mikeal/request/commit/f8eb2e229aca38547236d48066a0b3f9f8f67638) Copy headers so that they survive mutation. (@mikeal) +- [59eab0e](https://github.com/mikeal/request/commit/59eab0e5e49c6d32697822f712ed725843e70010) Rolling version. (@mikeal) +- [76bf5f6](https://github.com/mikeal/request/commit/76bf5f6c6e37f6cb972b3d4f1ac495a4ceaaa00d) Improvements to json handling and defaults. (@mikeal) +- [81e2c40](https://github.com/mikeal/request/commit/81e2c4040a9911a242148e1d4a482ac6c745d8eb) Rolling version. (@mikeal) +- [76d8924](https://github.com/mikeal/request/commit/76d8924cab295f80518a71d5903f1e815618414f) Proper checking and handling of json bodies (@mikeal) +- [a8422a8](https://github.com/mikeal/request/commit/a8422a80895ed70e3871c7826a51933a75c51b69) Rolling version. (@mikeal) +- [f236376](https://github.com/mikeal/request/commit/f2363760782c3d532900a86d383c34f3c94f6d5f) Adding pipefilter. (@mikeal) +- [dd85f8d](https://github.com/mikeal/request/commit/dd85f8da969c2cc1825a7dfec6eac430de36440c) Rolling version. (@mikeal) +- [#66](https://github.com/mikeal/request/pull/66) Do not overwrite established content-type headers for read stream deliver (@voodootikigod) +- [b09212f](https://github.com/mikeal/request/commit/b09212f38fe736c2c92a1ee076cae9d0f4c612c3) Do not overwrite established content-type headers for read stream deliveries. (@voodootikigod) +- [01bc25d](https://github.com/mikeal/request/commit/01bc25d25343d73e9f5731b3d0df1cf5923398d4) Only apply workaround on pre-0.5 node.js and move test to assert.equal (@mikeal) +- [d487131](https://github.com/mikeal/request/commit/d487131ebc2f7a4bf265061845f7f3ea2fd3ed34) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [1200df5](https://github.com/mikeal/request/commit/1200df52bd334f9a44a43846159146b8f938fd9e) Rolling version. (@mikeal) +- [8279362](https://github.com/mikeal/request/commit/82793626f6965884a3720d66f5a276d7d4d30873) fix global var leaks (@aheckmann) +- [#67](https://github.com/mikeal/request/pull/67) fixed global variable leaks (@aheckmann) +- [ab91204](https://github.com/mikeal/request/commit/ab9120495a89536c7152e3cdf17d684323b40474) Test that chunked responses are properly toString'ed (@isaacs) +- [9bff39f](https://github.com/mikeal/request/commit/9bff39fa485f28d7f1754e72f026418ca1186783) Properly flatten chunked responses (@isaacs) +- [8e4e956](https://github.com/mikeal/request/commit/8e4e95654391c71c22933ffd422fdc82d20ac059) Fix #52 Make the tests runnable with npm (@isaacs) +- [a9aa9d6](https://github.com/mikeal/request/commit/a9aa9d6d50ef0481553da3e50e40e723a58de10a) Fix #71 Respect the strictSSL flag (@isaacs) +- [#69](https://github.com/mikeal/request/pull/69) Flatten chunked requests properly (@isaacs) +- [#73](https://github.com/mikeal/request/pull/73) Fix #71 Respect the strictSSL flag (@isaacs) +- [#70](https://github.com/mikeal/request/pull/70) add test script to package.json (@isaacs) +- [08ca561](https://github.com/mikeal/request/commit/08ca5617e0d8bcadee98f10f94a49cbf2dd02862) Fixing case where encoding is set. Also cleaning up trailing whitespace because my editor likes to do that now. (@mikeal) +- [0be269f](https://github.com/mikeal/request/commit/0be269f7d9da6c3a14a59d5579546fee9d038960) Fixing case where no body exists. (@mikeal) +- [2f37bbc](https://github.com/mikeal/request/commit/2f37bbc51ff84c3c28ae419138a19bd33a9f0103) Fixing timeout tests. (@mikeal) +- [f551a2f](https://github.com/mikeal/request/commit/f551a2f02a87994249c2fd37dc8f20a29e8bf529) Fixing legacy naming of self as options. (@mikeal) +- [717789e](https://github.com/mikeal/request/commit/717789ec9f690e9d5216ce1c27688eef822940cc) Avoid duplicate emit when using a timeout (@Marsup) +- [#76](https://github.com/mikeal/request/pull/76) Bug when a request fails and a timeout is set (@Marsup) +- [c1d255e](https://github.com/mikeal/request/commit/c1d255e5bcc5791ab69809913fe6d917ab93c8b7) global leakage in request.defaults (@isaacs) +- [14070f2](https://github.com/mikeal/request/commit/14070f269c79cae6ef9e7f7a415867150599bb8e) Don't require SSL for non-SSL requests (@isaacs) +- [4b8f696](https://github.com/mikeal/request/commit/4b8f6965e14c6fb704cf16f5bc011e4787cf32b2) Set proxy auth instead of just setting auth a second time (@isaacs) +- [cd22fbd](https://github.com/mikeal/request/commit/cd22fbdb00b90c5c75187ecf41373cfbb4af5bcd) Merge branch 'proxy-auth-bug' (@isaacs) +- [#78](https://github.com/mikeal/request/pull/78) Don't try to do strictSSL for non-ssl connections (@isaacs) +- [d8c53fc](https://github.com/mikeal/request/commit/d8c53fceca3af385753880395c680f6ec3d4d560) Removing legacy call to sys.puts (@mikeal) +- [731b32b](https://github.com/mikeal/request/commit/731b32b654bb217de3466b8d149ce480988bb24b) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [9c897df](https://github.com/mikeal/request/commit/9c897dffc7e238f10eb7e14c61978d6821c70f56) Enhance redirect handling: (1) response._redirectsFollowed reports the total number of redirects followed instead of being reset to 0; (2) add response.redirects, an array of the response.statusCode and response.headers.location for each redirect. (@danmactough) +- [#81](https://github.com/mikeal/request/pull/81) Enhance redirect handling (@danmactough) +- [4c84001](https://github.com/mikeal/request/commit/4c8400103ec18a0729e29e9ffb17dda65ce02f6d) Document strictSSL option (@isaacs) +- [d517ac0](https://github.com/mikeal/request/commit/d517ac03278b3ebd9a46ca9f263bea68d655822b) allow passing in buffers as multipart bodies (@kkaefer) +- [6563865](https://github.com/mikeal/request/commit/6563865b80573ad3c68834a6633aff6d322b59d5) bugs[web] should be bugs[url] (@isaacs) +- [2625854](https://github.com/mikeal/request/commit/262585480c148c56772dfc8386cfc59d5d262ca0) add option followAllRedirects to follow post/put redirects +- [bc057af](https://github.com/mikeal/request/commit/bc057affb58272d9152766956e5cde4ea51ca043) fix typo, force redirects to always use GET +- [d68b434](https://github.com/mikeal/request/commit/d68b434693dbf848dff4c570c4249a35329cc24f) Support node 0.5.11-style url parsing (@isaacs) +- [#96](https://github.com/mikeal/request/pull/96) Authless parsed url host support (@isaacs) +- [9f66c6d](https://github.com/mikeal/request/commit/9f66c6d79bc6515d870b906df39bd9d6d9164994) Typo, causing 'TypeError: Cannot read property 'length' of undefined' (@isaacs) +- [#97](https://github.com/mikeal/request/pull/97) Typo in previous pull causes TypeError in non-0.5.11 versions (@isaacs) +- [b320e05](https://github.com/mikeal/request/commit/b320e05f2d84510f47a6b6857d091c8cd4d3ae2e) When no request body is being sent set 'content-length':0. fixes #89 (@mikeal) +- [059916c](https://github.com/mikeal/request/commit/059916c545a0faa953cb8ac66b8c3ae243b1c8ce) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [248e9d6](https://github.com/mikeal/request/commit/248e9d65e73ac868948a82d07feaf33387723a1d) Fix for pipe() after response. Added response event, fixed and updated tests, removed deprecated stream objects. (@mikeal) +- [a2e5d6e](https://github.com/mikeal/request/commit/a2e5d6e30d3e101f8c5a034ef0401fdde8608ccf) Fixing double callback firing. node 0.5 is much better about calling errors on the client object which, when aborting on timeout, predictable emits an error which then triggers a double callback. (@mikeal) +- [5f80577](https://github.com/mikeal/request/commit/5f805775e6aeaaf229cc781439b29108fb69f373) Release for 0.6 (@mikeal) +- [bf906de](https://github.com/mikeal/request/commit/bf906de601121b52c433b0af208550f1db892cde) Adding some oauth support, tested with Twitter. (@mikeal) +- [8869b2e](https://github.com/mikeal/request/commit/8869b2e88cc305e224556c5ca75b7b59311911d9) Removing irrelevant comments. (@mikeal) +- [8323eed](https://github.com/mikeal/request/commit/8323eed4915bb73b33544bc276f3840c13969134) Closed issue 82 : handling cookies - added tests too +- [739f841](https://github.com/mikeal/request/commit/739f84166d619778ab96fd0b0f4f1f43e8b0fdda) Closed issue 82 : handling cookies - added tests too +- [7daf841](https://github.com/mikeal/request/commit/7daf8415fb1a4e707ec54eb413169e49d8bbe521) Closed issue 82 : handling cookies - added tests too +- [6c22041](https://github.com/mikeal/request/commit/6c22041a4719bf081c827dda8f35e7b79b4c39d9) changed README +- [3db7f7d](https://github.com/mikeal/request/commit/3db7f7d38e95406b84f06fed52b69038b0250904) Updated README +- [6181b7a](https://github.com/mikeal/request/commit/6181b7a8a4be75bcf75cd3ff6dacb8e910737e92) Documented request.cookie() and request.jar() +- [fc44260](https://github.com/mikeal/request/commit/fc44260d13f0094bfe96d18878a11c6fe88b69e5) Tiny cookie example error on README +- [366831b](https://github.com/mikeal/request/commit/366831b705b5d5ebfbec5f63b4b140cbafcb4515) Remove instanceof check for CookieJar (mikeal suggestion) +- [88488cf](https://github.com/mikeal/request/commit/88488cf076efbd916b0326e0981e280c993963a7) Also add cookie to the user defined cookie jar (mikeal's suggestion) +- [f6fef5b](https://github.com/mikeal/request/commit/f6fef5bfa4ba8e1dfa3022df8991716e5cba7264) Updated cookie documentation in README file +- [b519044](https://github.com/mikeal/request/commit/b5190441a889164dfeb4148fac643fd7a87cfb51) request.defaults({jar: false}) disables cookies && also updated README +- [856a65c](https://github.com/mikeal/request/commit/856a65cd28402efbe3831a68d73937564a27ea9b) Update jar documentation in the options also +- [#102](https://github.com/mikeal/request/pull/102) Implemented cookies - closes issue 82: https://github.com/mikeal/request/issues/82 (@alessioalex) +- [62592e7](https://github.com/mikeal/request/commit/62592e7fe9ee5ecaee80b8f5bc2400e4a277e694) Cookie bugs (@janjongboom) +- [a06ad2f](https://github.com/mikeal/request/commit/a06ad2f955270974409e75c088e1f5d1f5298ff5) Follow redirects should work on PUT and POST requests as well. This is more consistent to other frameworks, e.g. .NET (@janjongboom) +- [bf3f5d3](https://github.com/mikeal/request/commit/bf3f5d30fdabf6946096623fc3398bb66ed19a1f) Cookies shouldn't be discarded when followRedirect = true (@janjongboom) +- [16db85c](https://github.com/mikeal/request/commit/16db85c07e6c2516269299640fdddca6db7bc051) Revert "Follow redirects should work on PUT and POST requests as well. This is more consistent to other frameworks, e.g. .NET" (@janjongboom) +- [841664e](https://github.com/mikeal/request/commit/841664e309f329be98c1a011c634f5291af1eebc) Add test for proxy option (@dominictarr) +- [#105](https://github.com/mikeal/request/pull/105) added test for proxy option. (@dominictarr) +- [50d2d39](https://github.com/mikeal/request/commit/50d2d3934cd86d7142a4aab66017bb1ef82329cf) Fixing test, emitter matches on req.url so it needs the full url. (@mikeal) +- [668a291](https://github.com/mikeal/request/commit/668a291013380af305eba12b1d5c7a5376a74c76) Adding some documentation for OAuth signing support. (@mikeal) +- [04faa3b](https://github.com/mikeal/request/commit/04faa3bf2b1f4ec710414c6ec7231b24767b2f89) Minor improvements in example (@mikeal) +- [0fddc17](https://github.com/mikeal/request/commit/0fddc1798dcd9b213e3f8aec504c61cecf4d7997) Another small fix to the url in the docs. (@mikeal) +- [337649a](https://github.com/mikeal/request/commit/337649a08b4263c0d108cd4621475c8ff9cf8dd0) Add oauth to options. (@mikeal) +- [#86](https://github.com/mikeal/request/pull/86) Can't post binary to multipart requests (@developmentseed) +- [4e4d428](https://github.com/mikeal/request/commit/4e4d4285490be20abf89ff1fb54fb5088c01c00e) Update to Iris Couch URL (@jhs) +- [#110](https://github.com/mikeal/request/pull/110) Update to Iris Couch URL (@iriscouch) +- [d7af099](https://github.com/mikeal/request/commit/d7af0994b382466367f2cafc5376150e661eeb9d) Remove the global `i` as it's causing my test suites to fail with leak detection turned on. (@3rd-Eden) +- [#117](https://github.com/mikeal/request/pull/117) Remove the global `i` (@3rd-Eden) +- [b2a4ad1](https://github.com/mikeal/request/commit/b2a4ad1e7d7553230e932ea093d7f77f38147ef9) Force all cookie keys into lower case as suggested by LinusU (@jhurliman) +- [055a726](https://github.com/mikeal/request/commit/055a7268b40425643d23bd6a4f09c7268dbab680) Applying a modified version of pull request #106 as suggested by janjongboom (@jhurliman) +- [#121](https://github.com/mikeal/request/pull/121) Another patch for cookie handling regression (@jhurliman) +- [a353f4e](https://github.com/mikeal/request/commit/a353f4eeb312ea378d34b624f5c4df33eefa152c) Merge remote-tracking branch 'upstream/master' (@janjongboom) +- [#104](https://github.com/mikeal/request/pull/104) Cookie handling contains bugs (@janjongboom) +- [a3be5ad](https://github.com/mikeal/request/commit/a3be5ad5ea112422ed00da632530b93bcf54727c) Fix encoding of characters like ( (@mikeal) +- [dd2067b](https://github.com/mikeal/request/commit/dd2067bbbf77d1132c9ed480848645136b8a5521) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [ddc4e45](https://github.com/mikeal/request/commit/ddc4e453c3b9a0e11da4df156c5e15206abfc1ef) Pushed new version to npm (@mikeal) +- [feee5eb](https://github.com/mikeal/request/commit/feee5ebd2ca8c09db25b5cb13cd951f7c4322a49) Real fix for encoding issues in javascript and oauth. (@mikeal) +- [23896cd](https://github.com/mikeal/request/commit/23896cdc66d75ec176876167ff21da72b7ff181b) Pushed new version to npm. (@mikeal) +- [a471ed2](https://github.com/mikeal/request/commit/a471ed2ca8acdca1010a0fc20434c5c9956b0d0c) HTTP redirect tests (@jhs) +- [a4a9aa1](https://github.com/mikeal/request/commit/a4a9aa199ff958630791e131092ec332ada00a49) A self-signed certificate for upcoming HTTPS testing (@jhs) +- [10ac6b9](https://github.com/mikeal/request/commit/10ac6b9db40263bec1bf63ee7e057000ffd2d7e9) HTTPS tests, for now a copy of the test-body tests (@jhs) +- [105aed1](https://github.com/mikeal/request/commit/105aed1ff99add1957f91df7efabf406e262f463) Support an "httpModules" object for custom http/https module behavior (@jhs) +- [#112](https://github.com/mikeal/request/pull/112) Support using a custom http-like module (@iriscouch) +- [d05a875](https://github.com/mikeal/request/commit/d05a8753af576fc1adccc7ffe9633690371c05ee) Test for #129 (@mikeal) +- [06cdfaa](https://github.com/mikeal/request/commit/06cdfaa3c29233dac3f47e156f2b5b3a0f0ae4b8) return body as buffer when encoding is null +- [#132](https://github.com/mikeal/request/pull/132) return the body as a Buffer when encoding is set to null (@jahewson) +- [4882e51](https://github.com/mikeal/request/commit/4882e519ed6b8d08795da5de37166148ce0ee440) fixed cookies parsing, updated tests (@afanasy) +- [2be228e](https://github.com/mikeal/request/commit/2be228ec8b48a60028bd1d80c8cbebf23964f913) Change `host` to `hostname` in request hash +- [#135](https://github.com/mikeal/request/pull/135) host vs hostname (@iangreenleaf) +- [e24abc5](https://github.com/mikeal/request/commit/e24abc5cc2c6fa154ae04fe58a16d135eeba4951) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [c99c809](https://github.com/mikeal/request/commit/c99c809bb48b9c0193aae3789c5c844f7f6cbe92) Reverting host -> hostname because it breaks in pre-0.6. (@mikeal) +- [a1134d8](https://github.com/mikeal/request/commit/a1134d855f928fde5c4fe9ee255c111da0195bfc) adding logging (@mikeal) +- [#133](https://github.com/mikeal/request/pull/133) Fixed cookies parsing (@afanasy) +- [9179471](https://github.com/mikeal/request/commit/9179471f9f63b6ba9c9078a35cb888337ce295e8) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [cbb180b](https://github.com/mikeal/request/commit/cbb180b0399074995c235a555e3e3e162d738f7c) Fixes to oauth test. (@mikeal) +- [e1c351f](https://github.com/mikeal/request/commit/e1c351f92958634ccf3fbe78aa2f5b06d9c9a5fa) Published new version. (@mikeal) +- [3ceee86](https://github.com/mikeal/request/commit/3ceee86f1f3aad3a6877d6d3813e087549f3b485) Formatting fixes. (@mikeal) +- [18e1af5](https://github.com/mikeal/request/commit/18e1af5e38168dcb95c8ae29bb234f1ad9bbbdf9) Fixing log error. (@mikeal) +- [edc19b5](https://github.com/mikeal/request/commit/edc19b5249f655714efa0f8fa110cf663b742921) Pushed new version. (@mikeal) +- [f51c32b](https://github.com/mikeal/request/commit/f51c32bd6f4da0419ed8404b610c43ee3f21cf92) added "form" option to readme. (@petejkim) +- [#144](https://github.com/mikeal/request/pull/144) added "form" option to readme (@petejkim) +- [b58022e](https://github.com/mikeal/request/commit/b58022ecda782af93e35e5f9601013b90b09ca73) add "forever" method (@thejh) +- [79d4651](https://github.com/mikeal/request/commit/79d46510ddff2e2c12c69f7ae4072ec489e27b0e) remove logging (@thejh) +- [f87cbf6](https://github.com/mikeal/request/commit/f87cbf6ec6fc0fc2869c340114514c887b304a80) retry on ECONNRESET on reused socket (@thejh) +- [1a91675](https://github.com/mikeal/request/commit/1a916757f4ec48b1282fddfa0aaa0fa6a1bf1267) Multipart requests should respect content-type if set; Issue #145 (@apeace) +- [#146](https://github.com/mikeal/request/pull/146) Multipart should respect content-type if previously set (@apeace) +- [#148](https://github.com/mikeal/request/pull/148) Retry Agent (@thejh) +- [70c5b63](https://github.com/mikeal/request/commit/70c5b63aca29a7d1629fa2909ff5b7199bbf0fd1) Publishing new version to npm. (@mikeal) +- [fc0f04b](https://github.com/mikeal/request/commit/fc0f04bab5d6be56a2c19d47d3e8386bd9a0b29e) Fix: timeout on socket, timeout after redirect +- [ef79e59](https://github.com/mikeal/request/commit/ef79e59bbb88ed3e7d4368fe3ca5eee411bda345) Fix: timeout after redirect 2 +- [c32a218](https://github.com/mikeal/request/commit/c32a218da2296e89a269f1832d95b12c4aa10852) merge master (@jroes) +- [d2d9b54](https://github.com/mikeal/request/commit/d2d9b545e5679b829d33deeba0b22f9050fd78b1) add line to docs describing followAllRedirects option (@jroes) +- [#90](https://github.com/mikeal/request/pull/90) add option followAllRedirects to follow post/put redirects (@jroes) +- [c08ab7e](https://github.com/mikeal/request/commit/c08ab7efaefd39c04deb6986716efe5a6069528e) Emit an event after we create the request object so that people can manipulate it before nextTick(). (@mikeal) +- [#162](https://github.com/mikeal/request/pull/162) Fix issue #159 (@dpetukhov) +- [e77a169](https://github.com/mikeal/request/commit/e77a1695c5c632c067857e99274f28a1d74301fe) fixing streaming example. fixes #164 (@mikeal) +- [ee53386](https://github.com/mikeal/request/commit/ee53386d85975c79b801edbb4f5bb7ff4c5dc90b) fixes #127 (@mikeal) +- [e2cd9de](https://github.com/mikeal/request/commit/e2cd9de9a9d10e1aa4cf4e26006bb30fa5086f0b) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [a0ab977](https://github.com/mikeal/request/commit/a0ab9770a8fb89f970bb3783ed4e6dde9e33511b) Added failing test for #125. (@papandreou) +- [c80800a](https://github.com/mikeal/request/commit/c80800a834b0f8bc0fb40d1fad4d4165a83369fd) Fix cookie jar/headers.cookie collision. Closes #125. (@papandreou) +- [1ac9e2d](https://github.com/mikeal/request/commit/1ac9e2d1bf776728a1fe676dd3693ef66f50f7f7) Redirect test: Also assert that the request cookie doesn't get doubled in the request for the landing page. (@papandreou) +- [07bbf33](https://github.com/mikeal/request/commit/07bbf331e2a0d40d261487f6222e8cafee0e50e3) Fixes #150 (@mikeal) +- [c640eed](https://github.com/mikeal/request/commit/c640eed292c06eac3ec89f60031ddf0fc0add732) Cookie jar handling: Don't double the cookies on each redirect (see discussion on #139). (@papandreou) +- [808de8b](https://github.com/mikeal/request/commit/808de8b0ba49d4bb81590ec37a873e6be4d9a416) Adding some missing mime types #138 (@serby) +- [#161](https://github.com/mikeal/request/pull/161) Fix cookie jar/headers.cookie collision (#125) (@papandreou) +- [#168](https://github.com/mikeal/request/pull/168) Picking off an EasyFix by adding some missing mimetypes. (@serby) +- [2a30487](https://github.com/mikeal/request/commit/2a304879f4218c1e46195d882bc81c0f874be329) bugfix - allow add cookie to wrapped request (defaults) (@fabianonunes) +- [a18b4f1](https://github.com/mikeal/request/commit/a18b4f14559f56cf52ca1b421daa6a934d28d51b) Making pipeDest a public prototype method rather than keeping it private. (@mikeal) +- [#170](https://github.com/mikeal/request/pull/170) can't create a cookie in a wrapped request (defaults) (@fabianonunes) +- [49a0f60](https://github.com/mikeal/request/commit/49a0f604779c91dd1759a02cbb195ccbd8d73f5d) Structural refactor, getting read for composable API. (@mikeal) +- [5daa0b2](https://github.com/mikeal/request/commit/5daa0b28b06cf109614f19e76b0e0b9b25ee3baf) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [e4df85c](https://github.com/mikeal/request/commit/e4df85c72221bf09ee7e1eb54f6c881851bd4164) Composable API for OAuth. (@mikeal) +- [945ec40](https://github.com/mikeal/request/commit/945ec40baef968ddd468c3b4dfce01621e4a0e31) Composable form API (@mikeal) +- [c30b47f](https://github.com/mikeal/request/commit/c30b47f229522a75af85da269157377b4a7dc37d) Use this, return this. (@mikeal) +- [e908644](https://github.com/mikeal/request/commit/e908644a69f9107b954f13635736f1e640216aec) Composable multipart API. (@mikeal) +- [e115677](https://github.com/mikeal/request/commit/e115677b1a03576eb96386986c350f211a4f38cd) Composable jar. Guard against overwrites on retry. (@mikeal) +- [a482e48](https://github.com/mikeal/request/commit/a482e4802e11fd122b12e18d1b18b49850fef823) Updating copyright for the new year. (@mikeal) +- [3c6581a](https://github.com/mikeal/request/commit/3c6581a9d4508fe5d75e111ae0fb94c5e0078404) Adding clobber argument for appending to headers. thanks @isaacs (@mikeal) +- [54e6aca](https://github.com/mikeal/request/commit/54e6aca0ab5982621fc9b35500f2154e50c0c95d) Fixes #144. (@mikeal) +- [12f4997](https://github.com/mikeal/request/commit/12f4997ed83bfbfefa3fc5b5635bc9a6829aa0d7) Fixing clobber. (@mikeal) +- [2f34fd1](https://github.com/mikeal/request/commit/2f34fd13b7ec86cb1c67e0a58664b9e060a34a50) Added support for a "query" option value that is a hash of querystring values that is merged (taking precedence over) with the querystring passed in the uri string. (@csainty) +- [a32d9e7](https://github.com/mikeal/request/commit/a32d9e7069533fb727a71730dbaa0f62ebefb731) Added a js based test runner so I can run tests on windows. (@csainty) +- [e0b6ce0](https://github.com/mikeal/request/commit/e0b6ce063de0c4223c97982128bb8203caf4a331) Tidied up an issue where ?> was being appended to URLs. (@csainty) +- [d47150d](https://github.com/mikeal/request/commit/d47150d6748a452df336d8de9743218028a876db) Refactored to match the composable style (@csainty) +- [b7e0929](https://github.com/mikeal/request/commit/b7e0929837873a8132476bb2b4d2e2a0fdc7cd0f) implemented issue #173 allow uri to be first argument (@twilson63) +- [b7264a6](https://github.com/mikeal/request/commit/b7264a6626481d5da50a28c91ea0be7b688c9daf) removed debug line and reset ports (@twilson63) +- [76598c9](https://github.com/mikeal/request/commit/76598c92bee64376e5d431285ac1bf6783140dbb) removed npm-debug (@twilson63) +- [#177](https://github.com/mikeal/request/pull/177) Issue #173 Support uri as first and optional config as second argument (@twilson63) +- [0f24051](https://github.com/mikeal/request/commit/0f240517dea65337636a49cb1cc2b5327504430e) Renamed query to qs. It was actually my first choice, but there appeared to be conflicts with the qs = require('querystring'). These are no longer present though and must have been unrelated. (@csainty) +- [becedaa](https://github.com/mikeal/request/commit/becedaaa7681b0c4ad5c0a9b9922fc950f091af2) Changed test structure to no longer require a server, modeled on the oauth tests. This also lets me revert some of the changes I had to make to the test server and proxy tests (@csainty) +- [9b2bbf0](https://github.com/mikeal/request/commit/9b2bbf0c12e87a59320efac67759041cd4af913f) Modified how the qs function works, it now no longer tweaks the existing request uri, instead it recreates a new one. This allows me to revert all the other changes I had to make previously and gives a nice clean commit that is self contained. (@csainty) +- [5ac7e26](https://github.com/mikeal/request/commit/5ac7e26ce4f7bf5a334df91df83699891171c0ae) failing test for .pipe(dst, opts) (@substack) +- [3b2422e](https://github.com/mikeal/request/commit/3b2422e62fbd6359b841e59a2c1888db71a22c2c) fix for failing pipe opts test (@substack) +- [8788c8b](https://github.com/mikeal/request/commit/8788c8b8cba96662e9d94a96eb04d96b904adea3) added uri param for post, put, head, del shortcuts (@twilson63) +- [#179](https://github.com/mikeal/request/pull/179) fix to add opts in .pipe(stream, opts) (@substack) +- [#180](https://github.com/mikeal/request/pull/180) Modified the post, put, head and del shortcuts to support uri optional param (@twilson63) +- [37d0699](https://github.com/mikeal/request/commit/37d0699eb681e85b7df4896b0a68b6865e596cb3) Fixing end bug i introduced being stupid. (@mikeal) +- [3a97292](https://github.com/mikeal/request/commit/3a97292f45273fa2cc937c0698ba19964780b4bb) fixed defaults functionality to support (uri, options, callback) (@twilson63) +- [#182](https://github.com/mikeal/request/pull/182) Fix request.defaults to support (uri, options, callback) api (@twilson63) +- [c94b200](https://github.com/mikeal/request/commit/c94b200258fa48697e386121a3e114ab7bed2ecf) Switched npm test from the bash script to a node script so that it is cross-platform. (@csainty) +- [#176](https://github.com/mikeal/request/pull/176) Querystring option (@csainty) +- [3b1e609](https://github.com/mikeal/request/commit/3b1e6094451e8d34c93353177de9d76e9a805e43) Adding defaults test back in. (@mikeal) +- [b4ae0c2](https://github.com/mikeal/request/commit/b4ae0c2d50f018a90a3ec8daa1d14c92a99873b9) Fixing idiotic bug I introduced. (@mikeal) +- [32f76c8](https://github.com/mikeal/request/commit/32f76c8baaf784dc2f4f1871153b1796bcebdcfe) Pushed new version to npm. (@mikeal) +- [00d0d9f](https://github.com/mikeal/request/commit/00d0d9f432182f13a5b8aa2e3a2a144b5c179015) Adding accept header to json support. (@mikeal) +- [0f580e6](https://github.com/mikeal/request/commit/0f580e6f6317c5301a52c0b6963d58e27112abca) Add abort support to the returned request (@itay) +- [4505e6d](https://github.com/mikeal/request/commit/4505e6d39a44229bfe5dc4d9a920233e05a7dfdb) Fixing some edge streaming cases with redirects by reusing the Request object. (@mikeal) +- [eed57af](https://github.com/mikeal/request/commit/eed57af8fe3e16632e9e0043d4d7f4d147dbfb8f) Published new version. (@mikeal) +- [97386b5](https://github.com/mikeal/request/commit/97386b5d7315b5c83702ffc7d0b09e34ecb67e04) Fixing pretty bad bug from the composable refactor. (@mikeal) +- [b693ce6](https://github.com/mikeal/request/commit/b693ce64e16aaa859d4edc86f82fbb11e00d33c0) Move abort to a prototype method, don't raise error (@itay) +- [1330eef](https://github.com/mikeal/request/commit/1330eef3ec84a651a435c95cf1ff1a4003086440) Merge branch 'master' of git://github.com/mikeal/request (@itay) +- [#188](https://github.com/mikeal/request/pull/188) Add abort support to the returned request (@itay) +- [5ff4645](https://github.com/mikeal/request/commit/5ff46453e713da1ae66a0d510eda4919e4080abe) Style changes. (@mikeal) +- [2dbd1e4](https://github.com/mikeal/request/commit/2dbd1e4350c2941b795b0e5ee7c0a00cd04cce09) Fixing new params style on master for head request. (@mikeal) +- [14989b2](https://github.com/mikeal/request/commit/14989b2dfc6830dbdad5364930fba1d2995aba06) Pushed new version to npm. (@mikeal) +- [0ea2351](https://github.com/mikeal/request/commit/0ea2351ef017ada9b8472f8d73086715ebe30c6a) Fixes #190. outdated check on options.json from before we had boolean support. (@mikeal) +- [21bf78c](https://github.com/mikeal/request/commit/21bf78c264316f75f4e6c571461521cda6ccf088) Adds a block on DELETE requests in status 300-400 (@goatslacker) +- [0c0c201](https://github.com/mikeal/request/commit/0c0c20139b28b21a860f72b8ce0124046fae421d) Adds tests for GH-119 Fix (@goatslacker) +- [#193](https://github.com/mikeal/request/pull/193) Fixes GH-119 (@goatslacker) +- [5815a69](https://github.com/mikeal/request/commit/5815a697347f20658dc2bdfd0d06e41d0aa0dac4) Fixes #194. setTimeout only works on node 0.6+ (@mikeal) +- [1ddcd60](https://github.com/mikeal/request/commit/1ddcd605bc8936c5b3534e1cf9aa1b29fa2b060b) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [7b35b4f](https://github.com/mikeal/request/commit/7b35b4ff63bbdf133f0f600a88a87b5723d29bdf) Removing old checks for self.req, it's ensured if start() is called. Implementing early pause/resume for when streams try to pause/resume before any data is emitted. Fixes #195. (@mikeal) +- [f01b79b](https://github.com/mikeal/request/commit/f01b79bb651f64065bac8877739223527f5b5592) Make ForeverAgent work with HTTPS (@isaacs) +- [#197](https://github.com/mikeal/request/pull/197) Make ForeverAgent work with HTTPS (@isaacs) +- [8d85b57](https://github.com/mikeal/request/commit/8d85b57ebb81c9d2d0a6b94aed41bf2ab0e3ad09) Forever inherits bugfix (@isaacs) +- [#198](https://github.com/mikeal/request/pull/198) Bugfix on forever usage of util.inherits (@isaacs) +- [37446f5](https://github.com/mikeal/request/commit/37446f54bb21cf9c83ffa81d354d799ae7ecf9ed) Add a test of HTTPS strict with CA checking (@isaacs) +- [8378d2e](https://github.com/mikeal/request/commit/8378d2ef9b8121a9851d21b3f6ec8304bde61c9d) Support tunneling HTTPS requests over proxies (@isaacs) +- [#199](https://github.com/mikeal/request/pull/199) Tunnel (@isaacs) +- [f0052ac](https://github.com/mikeal/request/commit/f0052ac5e6ca9f3f4aa49f6cda6ba15eb5d8b8e6) Published new version to npm. (@mikeal) +- [cea668f](https://github.com/mikeal/request/commit/cea668f6f7d444831313ccc0e0d301d25f2bd421) Adding more explicit error when undefined is passed as uri or options. (@mikeal) +- [047b7b5](https://github.com/mikeal/request/commit/047b7b52f3b11f4c44a02aeb1c3583940ddb59c7) Fix special method functions that get passed an options object. (@mikeal) +- [746de0e](https://github.com/mikeal/request/commit/746de0ef2f564534b29eeb8f296a59bd2c3086a7) pass through Basic authorization option for HTTPS tunneling +- [6fda9d7](https://github.com/mikeal/request/commit/6fda9d7d75e24cc1302995e41e26a91e03fdfc9a) Always clobber internal objects for qs but preserve old querystring args when clobber is present. (@mikeal) +- [75ca7a2](https://github.com/mikeal/request/commit/75ca7a25bc9c6102e87f3660a25835c7fcd70edb) Merge branch 'master' of https://github.com/mikeal/request +- [3b9f0fd](https://github.com/mikeal/request/commit/3b9f0fd3da4ae74de9ec76e7c66c57a7f8641df2) Fix cookies so that attributes are case insensitive +- [fddbd6e](https://github.com/mikeal/request/commit/fddbd6ee7d531bc4a82f629633b9d1637cb039e8) Properly set cookies during redirects +- [0d0bdb7](https://github.com/mikeal/request/commit/0d0bdb793f908492d4086fae8744f1e33e68d8c6) Remove request body when following non-GET redirects +- [#203](https://github.com/mikeal/request/pull/203) Fix cookie and redirect bugs and add auth support for HTTPS tunnel (@milewise) +- [b5fa773](https://github.com/mikeal/request/commit/b5fa773994de1799cf53491db7f5f3ba32825b20) Replace all occurrences of special chars in RFC3986 (@chriso) +- [bc6cd6c](https://github.com/mikeal/request/commit/bc6cd6ca6c6157bad76f0b2b23d4993f389ba977) documenting additional behavior of json option (@jphaas) +- [80e4e43](https://github.com/mikeal/request/commit/80e4e43186de1e9dcfaa1c9a921451560b91267c) Fixes #215. (@mikeal) +- [51f343b](https://github.com/mikeal/request/commit/51f343b9adfc11ec1b2ddcfb52a57e1e13feacb2) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [89c0f1d](https://github.com/mikeal/request/commit/89c0f1dd324bc65ad9c07436fb2c8220de388c42) titlecase authorization for oauth (@visnup) +- [#217](https://github.com/mikeal/request/pull/217) need to use Authorization (titlecase) header with Tumblr OAuth (@visnup) +- [8c163eb](https://github.com/mikeal/request/commit/8c163eb9349459839fc720658979d5c97a955825) Double quotes are optional, and the space after the ; could be required (@janjongboom) +- [#224](https://github.com/mikeal/request/pull/224) Multipart content-type change (@janjongboom) +- [96f4b9b](https://github.com/mikeal/request/commit/96f4b9b1f7b937a92f3f94f10d6d02f8878b6107) Style changes. (@mikeal) +- [b131c64](https://github.com/mikeal/request/commit/b131c64816f621cf15f8c51e76eb105778b4aad8) Adding safe .toJSON method. fixes #167 (@mikeal) +- [05d6e02](https://github.com/mikeal/request/commit/05d6e02c31ec4e6fcfadbfbe5414e701710f6e55) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [74ca9a4](https://github.com/mikeal/request/commit/74ca9a4852b666d30dd71421e8cc8b8a83177148) Unified error and complete handling. Fixes #171 (@mikeal) +- [a86c7dc](https://github.com/mikeal/request/commit/a86c7dc7d0a7c640c7def4c0215e46e76a11ff56) Fixing followAllRedirects and all the redirect tests. (@mikeal) +- [#211](https://github.com/mikeal/request/pull/211) Replace all occurrences of special chars in RFC3986 (@chriso) +- [7e24e8a](https://github.com/mikeal/request/commit/7e24e8a48d0dcfe10d0cc08b3c4e9627b9a95a97) New version on npm, first 3.0 release candidate. (@mikeal) +- [22e0f0d](https://github.com/mikeal/request/commit/22e0f0d73459c11b81b0f66a2cde85492dd8e38f) Added test for .toJSON() (@mikeal) +- [df32746](https://github.com/mikeal/request/commit/df32746f157948b6ae05e87a35cf1768e065ef0b) Adding toJSON to npm test. (@mikeal) +- [e65bfba](https://github.com/mikeal/request/commit/e65bfba98f0886a059a268dcdceabf41aec1e5cc) New version in npm. (@mikeal) +- [2b95921](https://github.com/mikeal/request/commit/2b959217151aaff7a6e7cc15e2acfccd1bbb9b85) Fixing defaults when url is passed instead of uri. (@mikeal) +- [e0534d8](https://github.com/mikeal/request/commit/e0534d860b4931a7a6e645b328fd4418a5433057) Pushed new version to npm. (@mikeal) +- [d2dc835](https://github.com/mikeal/request/commit/d2dc83538379e9e1fafb94f5698c56b4a5318d8d) don't error when null is passed for options (@polotek) +- [db80bf0](https://github.com/mikeal/request/commit/db80bf0444bd98c45f635f305154b9da20eed328) expose initParams (@polotek) +- [8cf019c](https://github.com/mikeal/request/commit/8cf019c9f9f719694408840823e92da08ab9dac3) allow request.defaults to override the main request method (@polotek) +- [#240](https://github.com/mikeal/request/pull/240) don't error when null is passed for options (@polotek) +- [69d017d](https://github.com/mikeal/request/commit/69d017de57622429f123235cc5855f36b3e18d1c) added dynamic boundary for multipart requests (@zephrax) +- [fc13e18](https://github.com/mikeal/request/commit/fc13e185f5e28a280d347e61622ba708e1cd7bbc) added dynamic boundary for multipart requests (@zephrax) +- [#243](https://github.com/mikeal/request/pull/243) Dynamic boundary (@zephrax) +- [1764176](https://github.com/mikeal/request/commit/176417698a84c53c0a69bdfd2a05a2942919816c) Fixing the set-cookie header (@jeromegn) +- [#246](https://github.com/mikeal/request/pull/246) Fixing the set-cookie header (@jeromegn) +- [6f9da89](https://github.com/mikeal/request/commit/6f9da89348b848479c23192c04b3c0ddd5a4c8bc) do not set content-length header to 0 when self.method is GET or self.method is undefined (@sethbridges) +- [efc0ea4](https://github.com/mikeal/request/commit/efc0ea44d63372a30011822ad9d37bd3d7b85952) Experimental AWS signing. Signing code from knox. (@mikeal) +- [4c08a1c](https://github.com/mikeal/request/commit/4c08a1c10bc0ebb679e212ad87419f6c4cc341eb) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [fdb10eb](https://github.com/mikeal/request/commit/fdb10eb493110b8e6e4f679524f38cef946e3f08) Adding support for aws in options. (@mikeal) +- [dac6a30](https://github.com/mikeal/request/commit/dac6a301ae03207af88fae6f5017e82157b79b41) Fixing upgraded stat size and supporting content-type and content-md5 properly. (@mikeal) +- [98cb503](https://github.com/mikeal/request/commit/98cb50325e1d7789fd9f44523d2315df5f890d10) Allow body === '' /* the empty string */. (@Filirom1) +- [0e9ac12](https://github.com/mikeal/request/commit/0e9ac12c69aaca370fbca94b41358e1c3a2f6170) fixed just another global leak of i (@sreuter) +- [#260](https://github.com/mikeal/request/pull/260) fixed just another leak of 'i' (@sreuter) +- [#255](https://github.com/mikeal/request/pull/255) multipart allow body === '' ( the empty string ) (@Filirom1) +- [#249](https://github.com/mikeal/request/pull/249) Fix for the fix of your (closed) issue #89 where self.headers[content-length] is set to 0 for all methods (@sethbridges) +- [adc9ab1](https://github.com/mikeal/request/commit/adc9ab1f563f3cb4681ac8241fcc75e6099efde2) style changes. making @rwaldron cry (@mikeal) +- [155e6ee](https://github.com/mikeal/request/commit/155e6ee270924d5698d3fea37cefc1926cbaf998) Fixed `pool: false` to not use the global agent (@timshadel) +- [1232a8e](https://github.com/mikeal/request/commit/1232a8e46752619d4d4b51d558e6725faf7bf3aa) JSON test should check for equality (@timshadel) +- [#261](https://github.com/mikeal/request/pull/261) Setting 'pool' to 'false' does NOT disable Agent pooling (@timshadel) +- [#262](https://github.com/mikeal/request/pull/262) JSON test should check for equality (@timshadel) +- [914a723](https://github.com/mikeal/request/commit/914a72300702a78a08263fe98a43d25e25713a70) consumer_key and token_secret need to be encoded for OAuth v1 (@nanodocumet) +- [500e790](https://github.com/mikeal/request/commit/500e790f8773f245ff43dd9c14ec3d5c92fe0b9e) Fix uncontrolled crash when "this.uri" is an invalid URI (@naholyr) +- [#265](https://github.com/mikeal/request/pull/265) uncaughtException when redirected to invalid URI (@naholyr) +- [#263](https://github.com/mikeal/request/pull/263) Bug in OAuth key generation for sha1 (@nanodocumet) +- [f4b87cf](https://github.com/mikeal/request/commit/f4b87cf439453b3ca1d63e85b3aeb3373ee1f17e) I'm not OCD seriously (@TehShrike) +- [#268](https://github.com/mikeal/request/pull/268) I'm not OCD seriously (@TehShrike) +- [fcab7f1](https://github.com/mikeal/request/commit/fcab7f1953cd6fb141a7d98f60580c50b59fb73f) Adding a line break to the preamble as the first part of a multipart was not recognized by a server I was communicating with. (@proksoup) +- [661b62e](https://github.com/mikeal/request/commit/661b62e5319bf0143312404f1fc81c895c46f6e6) Commenting out failing post test. Need to figure out a way to test this now that the default is to use a UUID for the frontier. (@mikeal) +- [7165c86](https://github.com/mikeal/request/commit/7165c867fa5dea4dcb0aab74d2bf8ab5541e3f1b) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [5a7ca9b](https://github.com/mikeal/request/commit/5a7ca9b398c1300c08a28fb7f266054c3ce8c57a) Added drain event and returning the boolean from write to proper handle back pressure when piping. (@mafintosh) +- [#273](https://github.com/mikeal/request/pull/273) Pipe back pressure issue (@mafintosh) +- [f8ae8d1](https://github.com/mikeal/request/commit/f8ae8d18627e4743996d8600f77f4e4c05a2a590) New version in npm. (@mikeal) +- [7ff5dae](https://github.com/mikeal/request/commit/7ff5daef152bcfac5b02e661e5476a57b9693489) Merge remote-tracking branch 'upstream/master' (@proksoup) +- [1f34700](https://github.com/mikeal/request/commit/1f34700e5614ea2a2d78b80dd467c002c3e91cb3) fix tests with boundary by injecting boundry from header (@benatkin) +- [ee2b2c2](https://github.com/mikeal/request/commit/ee2b2c2f7a8625fde4d71d79e19cdc5d98f09955) Like in [node.js](https://github.com/joyent/node/blob/master/lib/net.js#L52) print logs if NODE_DEBUG contains the word request (@Filirom1) +- [#279](https://github.com/mikeal/request/pull/279) fix tests with boundary by injecting boundry from header (@benatkin) +- [3daebaf](https://github.com/mikeal/request/commit/3daebaf2551c8d0df7dac1ebff0af4fe08608768) Merge branch 'master' of https://github.com/mikeal/request (@proksoup) +- [dba2ebf](https://github.com/mikeal/request/commit/dba2ebf09552258f37b60122c19b236064b0d216) Updating with corresponding tests. (@proksoup) +- [396531d](https://github.com/mikeal/request/commit/396531d083c94bc807a25f7c3a50a0c92a00c5f7) Removing console.log of multipart (@proksoup) +- [54226a3](https://github.com/mikeal/request/commit/54226a38816b4169e0a7a5d8b1a7feba78235fec) Okay, trying it as an optional parameter, with a new test in test-body.js to verify (@proksoup) +- [23ae7d5](https://github.com/mikeal/request/commit/23ae7d576cc63d645eecf057112b71d6cb73e7b1) Remove non-"oauth_" parameters from being added into the OAuth Authorization header (@jplock) +- [8b82ef4](https://github.com/mikeal/request/commit/8b82ef4ff0b50b0c8dcfb830f62466fa30662666) Removing guard, there are some cases where this is valid. (@mikeal) +- [82440f7](https://github.com/mikeal/request/commit/82440f76f22a5fca856735af66e2dc3fcf240c0d) Adding back in guard for _started, need to keep some measure of safety but we should defer this restriction for as long as possible. (@mikeal) +- [#282](https://github.com/mikeal/request/pull/282) OAuth Authorization header contains non-"oauth_" parameters (@jplock) +- [087be3e](https://github.com/mikeal/request/commit/087be3ebbada53699d14839374f1679f63f3138f) Remove stray `console.log()` call in multipart generator. (@bcherry) +- [0a8a5ab](https://github.com/mikeal/request/commit/0a8a5ab6a08eaeffd45ef4e028be2259d61bb0ee) Merge remote-tracking branch 'upstream/master' (@proksoup) +- [#241](https://github.com/mikeal/request/pull/241) Composability updates suggested by issue #239 (@polotek) +- [#284](https://github.com/mikeal/request/pull/284) Remove stray `console.log()` call in multipart generator. (@bcherry) +- [8344666](https://github.com/mikeal/request/commit/8344666f682a302c914cce7ae9cea8de054f9240) Fix #206 Change HTTP/HTTPS agent when redirecting between protocols (@isaacs) +- [#272](https://github.com/mikeal/request/pull/272) Boundary begins with CRLF? (@proksoup) +- [#214](https://github.com/mikeal/request/pull/214) documenting additional behavior of json option (@jphaas) +- [#207](https://github.com/mikeal/request/pull/207) Fix #206 Change HTTP/HTTPS agent when redirecting between protocols (@isaacs) +- [9cadd61](https://github.com/mikeal/request/commit/9cadd61d989e85715ea07da8770a3077db41cca3) Allow parser errors to bubble up to request (@mscdex) +- [6a00fea](https://github.com/mikeal/request/commit/6a00fea09eed99257c0aec2bb66fbf109b0f573a) Only add socket error handler callback once (@mscdex) +- [975ea90](https://github.com/mikeal/request/commit/975ea90bed9503c67055b20e36baf4bcba54a052) Fix style (@mscdex) +- [205dfd2](https://github.com/mikeal/request/commit/205dfd2e21c13407d89d3ed92dc2b44b987d962b) Use .once() when listening for parser error (@mscdex) +- [ff9b564](https://github.com/mikeal/request/commit/ff9b5643d6b5679a9e7d7997ec6275dac10b000e) Add a space after if (@Filirom1) +- [#280](https://github.com/mikeal/request/pull/280) Like in node.js print options if NODE_DEBUG contains the word request (@Filirom1) +- [d38e57b](https://github.com/mikeal/request/commit/d38e57bbb3d827aa87427f2130aa5a5a3a973161) Test for #289 (@isaacs) +- [820af58](https://github.com/mikeal/request/commit/820af5839f2a193d091d98f23fd588bd919e3e58) A test of POST redirect following with 303 status (@isaacs) +- [7adc5a2](https://github.com/mikeal/request/commit/7adc5a21869bc92cc3b5e84d32c585952c8e5e87) Use self.encoding when calling Buffer.toString() (@isaacs) +- [#290](https://github.com/mikeal/request/pull/290) A test for #289 (@isaacs) +- [#293](https://github.com/mikeal/request/pull/293) Allow parser errors to bubble up to request (@mscdex) +- [ed68b8d](https://github.com/mikeal/request/commit/ed68b8dd024561e9d47d80df255fb79d783c13a7) Updated the twitter oauth dance. The comments weren't clear. Also removed token_key. No longer needed with twitter oauth. (@joemccann) +- [6bc19cd](https://github.com/mikeal/request/commit/6bc19cda351b59f8e45405499a100abd0b456e42) Forgot to remove token_secret; no longer needed for twitter. (@joemccann) +- [1f21b17](https://github.com/mikeal/request/commit/1f21b17fc4ff3a7011b23e3c9261d66effa3aa40) Adding form-data support. (@mikeal) +- [827e950](https://github.com/mikeal/request/commit/827e950500746eb9d3a3fa6f174416b194c9dedf) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [b211200](https://github.com/mikeal/request/commit/b2112009a31fc7f9122970d392750f62b6e77111) Test fixes for relative import. Adding to run all (@mikeal) +- [1268195](https://github.com/mikeal/request/commit/1268195b75bd5bb3954b4c4f2d9feb80a97994d1) Bundling mime module rather than keep around our own mime-map. (@mikeal) +- [4f51cec](https://github.com/mikeal/request/commit/4f51cecdc363946b957585c3deccfd8c37e19aa0) Docs for the form API, pumping version. (@mikeal) +- [90245d7](https://github.com/mikeal/request/commit/90245d7199215d7b195cf7e36b203ca0bd0a6bd3) Doc fixes. (@mikeal) +- [d98ef41](https://github.com/mikeal/request/commit/d98ef411c560bd1168f242c524a378914ff8eac4) Pushed new version to npm. (@mikeal) +- [3e11937](https://github.com/mikeal/request/commit/3e119375acda2da225afdb1596f6346dbd551fba) Pass servername to tunneling secure socket creation (@isaacs) +- [7725b23](https://github.com/mikeal/request/commit/7725b235fdec8889c0c91d55c99992dc683e2e22) Declare dependencies more sanely (@isaacs) +- [#317](https://github.com/mikeal/request/pull/317) Workaround for #313 (@isaacs) +- [#318](https://github.com/mikeal/request/pull/318) Pass servername to tunneling secure socket creation (@isaacs) +- [0c470bc](https://github.com/mikeal/request/commit/0c470bccf1ec097ae600b6116e6244cb624dc00e) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [0d98e5b](https://github.com/mikeal/request/commit/0d98e5b7ea6bd9c4f21535d3682bbed2f2e05df4) Pushed new version to npm. (@mikeal) +- [64a4448](https://github.com/mikeal/request/commit/64a44488ac8c792a1f548f305fc5c61efe0d77fb) when setting defaults, the wrapper adds the jar method assuming it has the same signature as get, meaning undefined is passed into initParams, which subsequently fails. now passing jar function directly as it has no need of defaults anyway seeing as it only creates a new cookie jar (@StuartHarris) +- [48c9881](https://github.com/mikeal/request/commit/48c988118bda4691fffbfcf30d5a39b6c1438736) Added test to illustrate #321 (@alexindigo) +- [8ce0f2a](https://github.com/mikeal/request/commit/8ce0f2a3b6929cd0f7998e00d850eaf5401afdb7) Added *src* stream removal on redirect. #321 (@alexindigo) +- [c32f0bb](https://github.com/mikeal/request/commit/c32f0bb9feaa71917843856c23b4aae99f78ad4d) Do not try to remove listener from an undefined connection (@strk) +- [#326](https://github.com/mikeal/request/pull/326) Do not try to remove listener from an undefined connection (@CartoDB) +- [#322](https://github.com/mikeal/request/pull/322) Fix + test for piped into request bumped into redirect. #321 (@alexindigo) +- [85b6a63](https://github.com/mikeal/request/commit/85b6a632ac7d3456485fbf931043f10f5f6344a5) New version in npm. (@mikeal) +- [f462bd3](https://github.com/mikeal/request/commit/f462bd3fa421fa5e5ca6c91852333db90297b80e) Rolling trunk version. (@mikeal) +- [8a82c5b](https://github.com/mikeal/request/commit/8a82c5b0990cc58fa4cb7f81814d13ba7ae35453) Adding url to redirect error for better debugging. (@mikeal) +- [013c986](https://github.com/mikeal/request/commit/013c986d0a8b5b2811cd06dd3733f4a3d37df1cc) Better debugging of max redirect errors. (@mikeal) +- [#320](https://github.com/mikeal/request/pull/320) request.defaults() doesn't need to wrap jar() (@redbadger) +- [4797f88](https://github.com/mikeal/request/commit/4797f88b42c3cf8680cbde09bf473678a5707aed) Fix #296 - Only set Content-Type if body exists (@Marsup) +- [f6bcf3e](https://github.com/mikeal/request/commit/f6bcf3eb51982180e813c69cccb942734f815ffe) fixup aws function to work in more situations (@nlf) +- [ba6c88a](https://github.com/mikeal/request/commit/ba6c88af5e771c2a0e007e6166e037a149561e09) added short blurb on using aws (@nlf) +- [#343](https://github.com/mikeal/request/pull/343) Allow AWS to work in more situations, added a note in the README on its usage (@nathan-lafreniere) +- [288c52a](https://github.com/mikeal/request/commit/288c52a2a1579164500c26136552827112801ff1) switch to a case insensitive getter when fetching headers for aws auth signing (@nlf) +- [#332](https://github.com/mikeal/request/pull/332) Fix #296 - Only set Content-Type if body exists (@Marsup) +- [7a16286](https://github.com/mikeal/request/commit/7a162868de65b6de15e00c1f707b5e0f292c5f86) Emit errors for anything in init so that it is catchable in a redirect. (@mikeal) +- [d288d21](https://github.com/mikeal/request/commit/d288d21d709fa81067f5af53737dfde06f842262) fix bug (@azylman) +- [#355](https://github.com/mikeal/request/pull/355) stop sending erroneous headers on redirected requests (@azylman) +- [b0b97f5](https://github.com/mikeal/request/commit/b0b97f53a9e94f3aeaa05e2cda5b820668f6e3b2) delete _form along with everything else on a redirect (@jgautier) +- [#360](https://github.com/mikeal/request/pull/360) Delete self._form along with everything else on redirect (@jgautier) +- [61e3850](https://github.com/mikeal/request/commit/61e3850f0f91ca6732fbd06b46796fbcd2fea1ad) Made it so that if we pass in Content-Length or content-length in the headers, don't make a new version (@danjenkins) +- [#361](https://github.com/mikeal/request/pull/361) Don't create a Content-Length header if we already have it set (@danjenkins) +- [590452d](https://github.com/mikeal/request/commit/590452d6569e68e480d4f40b88022f1b81914ad6) inside oauth.hmacsign: running rfc3986 on base_uri instead of just encodeURIComponent. +- [#362](https://github.com/mikeal/request/pull/362) Running `rfc3986` on `base_uri` in `oauth.hmacsign` instead of just `encodeURIComponent` (@jeffmarshall) +- [f7dc90c](https://github.com/mikeal/request/commit/f7dc90c8dae743d5736dc6c807eecde613eb4fd4) Revert "Merge pull request #362 from jeffmarshall/master" (@mikeal) +- [d631a26](https://github.com/mikeal/request/commit/d631a26e263077eca3d4925de9b0a8d57365ba90) reintroducing the WTF escape + encoding, also fixing a typo. +- [#363](https://github.com/mikeal/request/pull/363) rfc3986 on base_uri, now passes tests (@jeffmarshall) +- [bfe2791](https://github.com/mikeal/request/commit/bfe2791f596b749eed6961159d41a404c3aba0d0) oauth fix. (@mikeal) +- [#344](https://github.com/mikeal/request/pull/344) Make AWS auth signing find headers correctly (@nathan-lafreniere) +- [e863f25](https://github.com/mikeal/request/commit/e863f25336abc7b9f9936c20e0c06da8db0c6593) style change. (@mikeal) +- [3e5a87c](https://github.com/mikeal/request/commit/3e5a87ce28b3bb45861b32f283cd20d0084d78a7) Don't remove x_auth_type for Twitter reverse auth (@drudge) +- [#369](https://github.com/mikeal/request/pull/369) Don't remove x_auth_mode for Twitter reverse auth (@drudge) +- [25d4667](https://github.com/mikeal/request/commit/25d466773c43949e2eea4236ffc62841757fd1f0) x_auth_mode not x_auth_type (@drudge) +- [#370](https://github.com/mikeal/request/pull/370) Twitter reverse auth uses x_auth_mode not x_auth_type (@drudge) +- [cadf4dc](https://github.com/mikeal/request/commit/cadf4dc54f4ee3fae821f6beb1ea6443e528bf6f) massive style commit. (@mikeal) +- [33453a5](https://github.com/mikeal/request/commit/33453a53bc37e4499853b9d929b3603cdf7a31cd) New version in npm. (@mikeal) +- [b638185](https://github.com/mikeal/request/commit/b6381854006470af1d0607f636992c7247b6720f) Setting master version. (@mikeal) +- [8014d2a](https://github.com/mikeal/request/commit/8014d2a5b797f07cf56d2f39a346031436e1b064) correct Host header for proxy tunnel CONNECT (@ypocat) +- [#374](https://github.com/mikeal/request/pull/374) Correct Host header for proxy tunnel CONNECT (@ypocat) +- [8c3e9cb](https://github.com/mikeal/request/commit/8c3e9cb529767cff5e7206e2e76531183085b42a) If one of the request parameters is called "timestamp", the "oauth_timestamp" OAuth parameter will get removed during the parameter cleanup loop. (@jplock) +- [#375](https://github.com/mikeal/request/pull/375) Fix for missing oauth_timestamp parameter (@jplock) +- [69e6dc5](https://github.com/mikeal/request/commit/69e6dc5c80e67bbd7d135c3ceb657a1b2df58763) Fixed headers piping on redirects (@kapetan) +- [#376](https://github.com/mikeal/request/pull/376) Headers lost on redirect (@kapetan) +- [62dbbf3](https://github.com/mikeal/request/commit/62dbbf3d77b0851ba424d4f09d1d0c0be91c1f2d) Resolving the Invalid signature when using "qs" (@landeiro) +- [d4cf4f9](https://github.com/mikeal/request/commit/d4cf4f98e11f9a85b6bdfd0481c85c8ac34061ce) fixes missing host header on retried request when using forever agent +- [#380](https://github.com/mikeal/request/pull/380) Fixes missing host header on retried request when using forever agent (@mac-) +- [#381](https://github.com/mikeal/request/pull/381) Resolving "Invalid signature. Expected signature base string: " (@landeiro) +- [ea2f975](https://github.com/mikeal/request/commit/ea2f975ae83efe956b77cbcd0fd9ad42c0d5192f) Ensure that uuid is treated as a property name, not an index. (@othiym23) +- [#388](https://github.com/mikeal/request/pull/388) Ensure "safe" toJSON doesn't break EventEmitters (@othiym23) +- [11a3bc0](https://github.com/mikeal/request/commit/11a3bc0ea3063f6f0071248e03c8595bfa9fd046) Add more reporting to tests (@mmalecki) +- [#398](https://github.com/mikeal/request/pull/398) Add more reporting to tests (@mmalecki) +- [b85bf63](https://github.com/mikeal/request/commit/b85bf633fe8197dc38855f10016a0a76a8ab600a) Optimize environment lookup to happen once only (@mmalecki) +- [#403](https://github.com/mikeal/request/pull/403) Optimize environment lookup to happen once only (@mmalecki) +- [dbb9a20](https://github.com/mikeal/request/commit/dbb9a205fafd7bf5a05d2dbe7eb2c6833b4387dc) renaming tests/googledoodle.png to match it's actual image type of jpeg (@nfriedly) +- [e2d7d4f](https://github.com/mikeal/request/commit/e2d7d4fd35869354ba14a333a4b4989b648e1971) Add more auth options, including digest support (@nylen) +- [d0d536c](https://github.com/mikeal/request/commit/d0d536c1e5a9a342694ffa5f14ef8fbe8dcfa8bd) Add tests for basic and digest auth (@nylen) +- [85fd359](https://github.com/mikeal/request/commit/85fd359890646ef9f55cc6e5c6a32e74f4fbb786) Document new auth options (@nylen) +- [#338](https://github.com/mikeal/request/pull/338) Add more auth options, including digest support (@nylen) +- [fd2e2fa](https://github.com/mikeal/request/commit/fd2e2fa1e6d580cbc34afd3ae1200682cecb3cf9) Fixed a typo. (@jerem) +- [#415](https://github.com/mikeal/request/pull/415) Fixed a typo. (@jerem) +- [53c1508](https://github.com/mikeal/request/commit/53c1508c9c6a58f7d846de82cad36402497a4a4f) Fix for #417 (@mikeal) +- [b23f985](https://github.com/mikeal/request/commit/b23f985e02da4a96f1369541a128c4204a355666) Fixing merge conflict. (@mikeal) +- [28e8be5](https://github.com/mikeal/request/commit/28e8be5175793ac99236df88e26c0139a143e32d) Lost a forever fix in the previous merge. Fixing. (@mikeal) +- [e4d1e25](https://github.com/mikeal/request/commit/e4d1e25c1648ef91f6baf1ef407c712509af4b66) Copy options before adding callback. (@nrn) +- [22bc67d](https://github.com/mikeal/request/commit/22bc67d7ac739e9c9f74c026f875a0a7c686e29d) Respect specified {Host,host} headers, not just {host} (@andrewschaaf) +- [#430](https://github.com/mikeal/request/pull/430) Respect specified {Host,host} headers, not just {host} (@andrewschaaf) +- [6b11acf](https://github.com/mikeal/request/commit/6b11acf3e29fb84daef4e940314cae5ac2e580c6) Updating form-data. (@mikeal) +- [d195845](https://github.com/mikeal/request/commit/d195845c3e1de42c9aee752eec8efa4dda87ec74) Updating mime (@mikeal) +- [20ba1d6](https://github.com/mikeal/request/commit/20ba1d6d38191aa7545b927a7262a18c5c63575b) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [0150d9f](https://github.com/mikeal/request/commit/0150d9fa13e51d99880013b9ec29343850b40c2f) Consider `options.rejectUnauthorized` when pooling https agents (@mmalecki) +- [3e07b6d](https://github.com/mikeal/request/commit/3e07b6d4b81037d0e6e595670db483708ffa8698) Use `rejectUnauthorized: false` in tests (@mmalecki) +- [3995878](https://github.com/mikeal/request/commit/3995878d9fff18a8707f27ffeb4ed6401086adce) Support `key` and `cert` options (@mmalecki) +- [#433](https://github.com/mikeal/request/pull/433) Added support for HTTPS cert & key (@indexzero) +- [8b0f4e8](https://github.com/mikeal/request/commit/8b0f4e8fba33d578a891218201d87e3316ea9844) Released 2.14.0 (@mikeal) +- [54172c6](https://github.com/mikeal/request/commit/54172c68cab8360372e1e64e3fa14902662950bd) Rolling master version. (@mikeal) +- [aa4a285](https://github.com/mikeal/request/commit/aa4a28586354901b0c9b298a0aa79abb5ed175af) Add patch convenience method. (@mloar) +- [66501b9](https://github.com/mikeal/request/commit/66501b9872abc9a2065430cd5ed4a34dd45c8bee) protect against double callback (@spollack) +- [#444](https://github.com/mikeal/request/pull/444) protect against double callbacks on error path (@spollack) +- [#448](https://github.com/mikeal/request/pull/448) Convenience method for PATCH (@mloar) +- [6f0f8c5](https://github.com/mikeal/request/commit/6f0f8c5ee2b2fdc7118804664c2215fe9cb5a2f2) No longer doing bundle dependencies (@mikeal) +- [3997f98](https://github.com/mikeal/request/commit/3997f980722241c18454a00aeeda07d701c27a8f) No longer using bundle dependencies (@mikeal) +- [cba36ce](https://github.com/mikeal/request/commit/cba36ce64e68bd26e230b65f81256776ac66e686) Adding hawk signing to request. (@mikeal) +- [c7a8be6](https://github.com/mikeal/request/commit/c7a8be6d174eff05a9cb2fda987979e475d8543f) Fixing bug in empty options. (@mikeal) +- [67d753f](https://github.com/mikeal/request/commit/67d753fec99fa1f5a3b35ec0bbbc98896418d86c) node-uuid is much better. (@mikeal) +- [337718b](https://github.com/mikeal/request/commit/337718baa08cafb3e706d275fd7344a3c92363bb) Smarter test runner. (@mikeal) +- [bcc33ac](https://github.com/mikeal/request/commit/bcc33aca57baf6fe2a81fbf5983048c9220c71b1) Moved the cookie jar in to it's own module. (@mikeal) +- [3261be4](https://github.com/mikeal/request/commit/3261be4b5d6f45f62b9f50bec18af770cbb70957) Put aws signing in its own package. (@mikeal) +- [fbed723](https://github.com/mikeal/request/commit/fbed7234d7b532813105efdc4c54777396a6773b) OAuth signing is now in its own library. (@mikeal) +- [ef5ab90](https://github.com/mikeal/request/commit/ef5ab90277fb00d0e8eb1c565b0f6ef8c52601d3) Forever agent is now it's own package. (@mikeal) +- [ca1ed81](https://github.com/mikeal/request/commit/ca1ed813c62c7493dc77108b3efc907cc36930cb) tunneling agent is now it's own library. (@mikeal) +- [5c75621](https://github.com/mikeal/request/commit/5c75621ba5cea18bcf114117112121d361e5f3c9) Moving from main.js to index. cause it's not 2010 anymore. (@mikeal) +- [#413](https://github.com/mikeal/request/pull/413) rename googledoodle.png to .jpg (@nfriedly) +- [b4c4c28](https://github.com/mikeal/request/commit/b4c4c28424d906cd96a2131010b21d7facf8b666) Merge branch 'master' of github.com:mikeal/request (@nrn) +- [#310](https://github.com/mikeal/request/pull/310) Twitter Oauth Stuff Out of Date; Now Updated (@joemccann) +- [8b0e7e8](https://github.com/mikeal/request/commit/8b0e7e8c9d196d7286d1563aa54affcc4c8b0e1d) Comment to explain init() and start(). (@mikeal) +- [43d578d](https://github.com/mikeal/request/commit/43d578dc0206388eeae9584f540d550a06308fc8) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [b7c5ed4](https://github.com/mikeal/request/commit/b7c5ed48b618f71f138f9f08f8d705336f907e01) destroy the response if present when destroying the request (@mafintosh) +- [b279277](https://github.com/mikeal/request/commit/b279277dc2fb4b649640322980315d74db0d13f3) response.abort should be response.destroy (@mafintosh) +- [#454](https://github.com/mikeal/request/pull/454) Destroy the response if present when destroying the request (clean merge) (@mafintosh) +- [#429](https://github.com/mikeal/request/pull/429) Copy options before adding callback. (@nrn) +- [e0e0fb4](https://github.com/mikeal/request/commit/e0e0fb451f17945a02203639e4836aa327b4e30b) hawk 0.9.0 (@hueniverse) +- [#456](https://github.com/mikeal/request/pull/456) hawk 0.9.0 (@hueniverse) +- [2f60bc2](https://github.com/mikeal/request/commit/2f60bc253ff6e28df58a33da24b710b6d506849f) Fixes #453 (@mikeal) +- [805b6e4](https://github.com/mikeal/request/commit/805b6e4fe3afeeb407b4fca2e34e9caabe30f747) Fixing hawk README to match new usage. (@mikeal) +- [8feb957](https://github.com/mikeal/request/commit/8feb957911083bce552d1898b7ffcaa87104cd21) Removing old logref code. (@mikeal) +- [fcf6d67](https://github.com/mikeal/request/commit/fcf6d6765247a2645a233d95468ade2960294074) Safe stringify. (@mikeal) +- [62455bc](https://github.com/mikeal/request/commit/62455bca81e8760f25a2bf1dec2b06c8e915de79) hawk 0.10 (@hueniverse) +- [c361b41](https://github.com/mikeal/request/commit/c361b4140e7e6e4fe2a8f039951b65d54af65f42) hawk 0.10 (@hueniverse) +- [fa1ef30](https://github.com/mikeal/request/commit/fa1ef30dcdac83b271ce38c71975df0ed96b08f7) Strip the UTF8 BOM from a UTF encoded response (@kppullin) +- [9d636c0](https://github.com/mikeal/request/commit/9d636c0b3e882742e15ba989d0c2413f95364680) if query params are empty, then request path shouldn't end with a '?' (@jaipandya) +- [#462](https://github.com/mikeal/request/pull/462) if query params are empty, then request path shouldn't end with a '?' (merges cleanly now) (@jaipandya) +- [#460](https://github.com/mikeal/request/pull/460) hawk 0.10.0 (@hueniverse) +- [#461](https://github.com/mikeal/request/pull/461) Strip the UTF8 BOM from a UTF encoded response (@kppullin) +- [6d29ed7](https://github.com/mikeal/request/commit/6d29ed72e34f3b2b6d8a5cfadd96dd26b3dd246d) Moving response handlers to onResponse. (@mikeal) +- [885d6eb](https://github.com/mikeal/request/commit/885d6ebeb6130c2ab7624304f4a01a898573390b) Using querystring library from visionmedia (@kbackowski) +- [#471](https://github.com/mikeal/request/pull/471) Using querystring library from visionmedia (@kbackowski) +- [346bb42](https://github.com/mikeal/request/commit/346bb42898c5804576d9e9b3adf40123260bf73b) On strictSSL set rejectUnauthorized. (@mikeal) +- [8a45365](https://github.com/mikeal/request/commit/8a453656a705d2fa98fbf9092b1600d2ddadbb5a) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [32cfd3c](https://github.com/mikeal/request/commit/32cfd3cf7b3f23c2b1d36c5ccb475cbb3a4693ff) Style changes. (@mikeal) +- [ec07ee2](https://github.com/mikeal/request/commit/ec07ee2d3eeb90b6d0ad9f6d7f3a36da72276841) Print debug logs NODE_DEBUG=request in environment (@isaacs) +- [681af64](https://github.com/mikeal/request/commit/681af644a2ebccad8bcccb75984f7f10f909b382) Flow data in v0.10-style streams (@isaacs) +- [#473](https://github.com/mikeal/request/pull/473) V0.10 compat (@isaacs) +- [f07a8ba](https://github.com/mikeal/request/commit/f07a8baebf7001addbc0f7d7c869adddc21768ce) Release. (@mikeal) +- [1f947a1](https://github.com/mikeal/request/commit/1f947a1d2728147fbf4f57aa361d0bedcebfc206) Rolling master version. (@mikeal) +- [7a217bb](https://github.com/mikeal/request/commit/7a217bbdced9a05a786fe6534ab52734df342d3e) Reinstate querystring for `unescape` (@shimaore) +- [b0b4ca9](https://github.com/mikeal/request/commit/b0b4ca913e119337e9313a157eee2f08f77ddc38) Test for `unescape` (@shimaore) +- [#475](https://github.com/mikeal/request/pull/475) Use `unescape` from `querystring` (@shimaore) +- [28fc741](https://github.com/mikeal/request/commit/28fc741fa958a9783031189964ef6f6d7e3f3264) Release. (@mikeal) +- [d3e28ef](https://github.com/mikeal/request/commit/d3e28ef7144da4d9f22f8fb475bd5aa6a80fb947) Rolling master version. (@mikeal) +- [8f8bb9e](https://github.com/mikeal/request/commit/8f8bb9ee8c4dcd9eb815249fbe2a7cf54f61b56f) Changing so if Accept header is explicitly set, sending json does not overwrite. (@RoryH) +- [#479](https://github.com/mikeal/request/pull/479) Changing so if Accept header is explicitly set, sending json does not ov... (@RoryH) +- [7694372](https://github.com/mikeal/request/commit/7694372f3dc9d57ac29ca7ee5c00146aa5e1e747) Proper version for latest. (@mikeal) +- [aa208cf](https://github.com/mikeal/request/commit/aa208cf5c682262529d749f592db147182cacfaf) 0.8+ only now (@mikeal) +- [16b5ab9](https://github.com/mikeal/request/commit/16b5ab9151823067b05b382241483ef10811c3e1) Upgrading qs. (@mikeal) +- [7d10c1e](https://github.com/mikeal/request/commit/7d10c1e83b4663f592c773e7fece83435585a06f) Merge branch 'master' of github.com:mikeal/request (@mikeal) +- [b8ca4b4](https://github.com/mikeal/request/commit/b8ca4b474b8215cab44ef8ef789303571b3d016f) pumping hawk version. (@mikeal) +- [9c0e484](https://github.com/mikeal/request/commit/9c0e48430e3a9de8715e77c07c98301399eaf6e3) release (@mikeal) +- [a9f1896](https://github.com/mikeal/request/commit/a9f189697e2a813bee9bff31de32a25e99e55cf2) rolling master version. (@mikeal) +- [560a1f8](https://github.com/mikeal/request/commit/560a1f8b927099e44b75274375a690df2a05de67) Set content-type on input. (@mikeal) +- [5fec436](https://github.com/mikeal/request/commit/5fec436b6602bc8c76133664bca23e98f511b096) Release. (@mikeal) +- [88d8d5b](https://github.com/mikeal/request/commit/88d8d5bc80679b78a39cab8e6d8295728a0a150d) Rolling version. (@mikeal) +- [d05b6ba](https://github.com/mikeal/request/commit/d05b6ba72702c2411b4627d4d89190a5f2aba562) Empty body must be passed as empty string, exclude JSON case (@Olegas) +- [#490](https://github.com/mikeal/request/pull/490) Empty response body (3-rd argument) must be passed to callback as an empty string (@Olegas) +- [8aa13cd](https://github.com/mikeal/request/commit/8aa13cd5b5e22b24466ef0e59fa8b5f1d0f0795a) Added redirect event (@Cauldrath) +- [4d63a04](https://github.com/mikeal/request/commit/4d63a042553c90718bf0b90652921b26c52dcb31) Moving response emit above setHeaders on destination streams (@kenperkins) +- [#498](https://github.com/mikeal/request/pull/498) Moving response emit above setHeaders on destination streams (@kenperkins) +- [c40993f](https://github.com/mikeal/request/commit/c40993fc987b1a8a3cb08cd5699b2f1b2bd4b28b) Fix a regression introduced by cba36ce6 (@nylen) +- [edc2e17](https://github.com/mikeal/request/commit/edc2e17e8154239efa6bd2914435798c18882635) Don't delete headers when retrying a request with proper authentication (@nylen) +- [a375ac1](https://github.com/mikeal/request/commit/a375ac15460f4f3b679f4418d7fc467a5cc94499) Refactor and expand basic auth tests (@nylen) +- [9bc28bf](https://github.com/mikeal/request/commit/9bc28bf912fb0afdd14b36b0ccbafb185a32546a) Cleanup whitespace. (@mikeal) +- [9a35cd2](https://github.com/mikeal/request/commit/9a35cd2248d9492b099c7ee46d68ca017b6a701c) Fix basic auth for passwords that contain colons (@tonistiigi) +- [f724810](https://github.com/mikeal/request/commit/f724810c7b9f82fa1423d0a4d19fcb5aaca98137) Honor the .strictSSL option when using proxies (tunnel-agent) (@jhs) +- [95a2558](https://github.com/mikeal/request/commit/95a25580375be1b9c39cc2e88a36a8387395bc13) Add HTTP Signature support. (@davidlehn) +- [921c973](https://github.com/mikeal/request/commit/921c973015721ee0f92ed670f5e88bca057104cc) * Make password optional to support the format: http://username@hostname/ +- [2759ebb](https://github.com/mikeal/request/commit/2759ebbe07e8563fd3ded698d2236309fb28176b) add 'localAddress' support (@yyfrankyy) +- [#513](https://github.com/mikeal/request/pull/513) add 'localAddress' support (@yyfrankyy) +- [#512](https://github.com/mikeal/request/pull/512) Make password optional to support the format: http://username@hostname/ (@pajato1) +- [#508](https://github.com/mikeal/request/pull/508) Honor the .strictSSL option when using proxies (tunnel-agent) (@iriscouch) +- [5f036e6](https://github.com/mikeal/request/commit/5f036e6f5d3102a89e5401a53090a0627a7850a8) Conflicts: index.js (@nylen) +- [89d2602](https://github.com/mikeal/request/commit/89d2602ef4e3a4e6e51284f6a29b5767c79ffaba) Conflicts: README.md (@davidlehn) +- [#502](https://github.com/mikeal/request/pull/502) Fix POST (and probably other) requests that are retried after 401 Unauthorized (@nylen) +- [eb3e033](https://github.com/mikeal/request/commit/eb3e033170403832fe7070955db32112ec46005f) Merge branch 'master' of git://github.com/mikeal/request (@davidlehn) +- [#510](https://github.com/mikeal/request/pull/510) Add HTTP Signature support. (@digitalbazaar) +- [227d998](https://github.com/mikeal/request/commit/227d9985426214b6ac68702933346000298d7790) Update the internal path variable when querystring is changed (@jblebrun) +- [#519](https://github.com/mikeal/request/pull/519) Update internal path state on post-creation QS changes (@incredible-labs) +- [428b9c1](https://github.com/mikeal/request/commit/428b9c1ad9831b7dfd6cec4ce68df358590c6d65) Fixing test-tunnel.js (@noway421) +- [2417599](https://github.com/mikeal/request/commit/24175993f6c362f7fca5965feb0a11756f00baf3) Improving test-localAddress.js (@noway421) +- [#520](https://github.com/mikeal/request/pull/520) Fixing test-tunnel.js (@noway421) +- [1e37f1b](https://github.com/mikeal/request/commit/1e37f1bea45174e09e6450bc71dfc081c8cd94de) Some explaining comments (@noway421) +- [909b024](https://github.com/mikeal/request/commit/909b024619c9e47f615749661d610cccd8421d80) Updating dependencies (@noway421) +- [#523](https://github.com/mikeal/request/pull/523) Updating dependencies (@noway421) +- [47191e1](https://github.com/mikeal/request/commit/47191e1a5e29714fb0c5f8b2162b2971570df644) 2.17.0 (@mikeal) +- [14def5a](https://github.com/mikeal/request/commit/14def5af5903d03f66bd6c9be534e6b76f47c063) 2.18.0 (@mikeal) +- [56fd6b7](https://github.com/mikeal/request/commit/56fd6b7ec6da162894df0809126d688f30900d25) 2.18.1 (@mikeal) +- [37dd689](https://github.com/mikeal/request/commit/37dd68989670f8937b537579a4299d9649b8aa16) Fixing dep. (@mikeal) +- [dd7209a](https://github.com/mikeal/request/commit/dd7209a84dd40afe87db31c6ab66885e2015cb8f) 2.19.0 (@mikeal) +- [62f3b92](https://github.com/mikeal/request/commit/62f3b9203690d4ad34486fc506fc78a1c9971e03) 2.19.1 (@mikeal) +- [74c6b2e](https://github.com/mikeal/request/commit/74c6b2e315872980ee9a9a000d25e724138f28b1) Adding test for onelineproxy. (@mikeal) +- [2a01cc0](https://github.com/mikeal/request/commit/2a01cc082f544647f7176a992e02668519a694be) Fixing onelineproxy. (@mikeal) +- [8b4c920](https://github.com/mikeal/request/commit/8b4c9203adb372f2ee99b1b012406b482b27c68d) 2.20.0 (@mikeal) +- [d8d4a33](https://github.com/mikeal/request/commit/d8d4a3311d8d31df88fa8a2ab3265872e5cb97ae) 2.20.1 (@mikeal) +- [5937012](https://github.com/mikeal/request/commit/59370123b22e8c971e4ee48c3d0caf920d890bda) dependencies versions bump (@jodaka) +- [#529](https://github.com/mikeal/request/pull/529) dependencies versions bump (@jodaka) +- [#521](https://github.com/mikeal/request/pull/521) Improving test-localAddress.js (@noway421) +- [#503](https://github.com/mikeal/request/pull/503) Fix basic auth for passwords that contain colons (@tonistiigi) +- [#497](https://github.com/mikeal/request/pull/497) Added redirect event (@Cauldrath) +- [297a9ea](https://github.com/mikeal/request/commit/297a9ea827655e5fb406a86907bb0d89b01deae8) fix typo (@fredericosilva) +- [#532](https://github.com/mikeal/request/pull/532) fix typo (@fredericosilva) +- [3691db5](https://github.com/mikeal/request/commit/3691db5a2d0981d4aeabfda5b988a5c69074e187) Allow explicitly empty user field for basic authentication. (@mikeando) +- [#536](https://github.com/mikeal/request/pull/536) Allow explicitly empty user field for basic authentication. (@mikeando) +- [5d36e32](https://github.com/mikeal/request/commit/5d36e324047f79cbbf3bb9b71fef633f02b36367) 2.21.0 (@mikeal) +- [9bd98d6](https://github.com/mikeal/request/commit/9bd98d6052f222aa348635c1acb2e2c99eed0f8c) 2.21.1 (@mikeal) +- [a918e04](https://github.com/mikeal/request/commit/a918e04a8d767a2948567ea29ed3fdd1650c16b1) The exported request function doesn't have an auth method (@tschaub) +- [1ebe1ac](https://github.com/mikeal/request/commit/1ebe1ac2f78e8a6149c03ce68fcb23d56df2316e) exposing Request class (@regality) +- [#542](https://github.com/mikeal/request/pull/542) Expose Request class (@ifit) +- [467573d](https://github.com/mikeal/request/commit/467573d17b4db5f93ed425ace0594370a7820c7c) Update http-signatures version. (@davidlehn) +- [#541](https://github.com/mikeal/request/pull/541) The exported request function doesn't have an auth method (@tschaub) +- [3040bbe](https://github.com/mikeal/request/commit/3040bbe5de846811151dab8dc09944acc93a338e) Fix redirections, (@criloz) +- [#564](https://github.com/mikeal/request/pull/564) Fix redirections (@NebTex) +- [397b435](https://github.com/mikeal/request/commit/397b4350fcf885460d7dced94cf1db1f5c167f80) handle ciphers and secureOptions in agentOptions (@SamPlacette) +- [65a2778](https://github.com/mikeal/request/commit/65a27782db7d2798b6490ea08efacb8f3b0a401c) tests and fix for null agentOptions case (@SamPlacette) +- [#568](https://github.com/mikeal/request/pull/568) use agentOptions to create agent when specified in request (@SamPlacette) +- [c116920](https://github.com/mikeal/request/commit/c116920a2cbef25afe2e1bbcf4df074e1e2f9dbb) Let's see how we do with only the main guard. (@mikeal) +- [f54a335](https://github.com/mikeal/request/commit/f54a3358119298634a7b0c29a21bf1471fc23d98) Fix spelling of "ignoring." (@bigeasy) +- [5cd215f](https://github.com/mikeal/request/commit/5cd215f327e113dc6c062634e405c577986cfd3c) Change isUrl regex to accept mixed case (@lexander) +- [02c8e74](https://github.com/mikeal/request/commit/02c8e749360a47d45e3e7b51b7f751fe498d2f25) #583 added tests for isUrl regex change. (@lexander) +- [#581](https://github.com/mikeal/request/pull/581) Fix spelling of "ignoring." (@bigeasy) +- [#544](https://github.com/mikeal/request/pull/544) Update http-signature version. (@digitalbazaar) +- [e77746b](https://github.com/mikeal/request/commit/e77746bf42e974dc91a84d03f44f750dd7ee0989) global cookie jar disabled by default, send jar: true to enable. (@threepointone) +- [46015ac](https://github.com/mikeal/request/commit/46015ac8d5b74f8107a6ec9fd07c133f46c5d833) 2.22.0 (@mikeal) +- [e5da4a5](https://github.com/mikeal/request/commit/e5da4a5e1a20bf4f23681f7b996f22c5fadae91d) 2.22.1 (@mikeal) +- [#587](https://github.com/mikeal/request/pull/587) Global cookie jar disabled by default (@threepointone) +- [fac9da1](https://github.com/mikeal/request/commit/fac9da1cc426bf0a4bcc5f0b7d0d0aea8b1cce38) Prevent setting headers after they are sent (@wpreul) +- [#589](https://github.com/mikeal/request/pull/589) Prevent setting headers after they are sent (@wpreul) +- [bc1537a](https://github.com/mikeal/request/commit/bc1537ab79064cea532b0d14110ce4e49a663bde) Emit complete event when there is no callback +- [de8508e](https://github.com/mikeal/request/commit/de8508e9feac10563596aeee26727567b3c2e33c) Added check to see if the global pool is being used before using the global agent (@Cauldrath) +- [03441ef](https://github.com/mikeal/request/commit/03441ef919e51a742aaf9e168d917e97e2d9eb6b) 2.23.0 (@mikeal) diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/CONTRIBUTING.md nodejs-0.11.15/deps/npm/node_modules/request/CONTRIBUTING.md --- nodejs-0.11.13/deps/npm/node_modules/request/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/CONTRIBUTING.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ +# This is an OPEN Open Source Project + +----------------------------------------- + +## What? + +Individuals making significant and valuable contributions are given +commit-access to the project to contribute as they see fit. This project is +more like an open wiki than a standard guarded open source project. + +## Rules + +There are a few basic ground-rules for contributors: + +1. **No `--force` pushes** or modifying the Git history in any way. +1. **Non-master branches** ought to be used for ongoing work. +1. **External API changes and significant modifications** ought to be subject + to an **internal pull-request** to solicit feedback from other contributors. +1. Internal pull-requests to solicit feedback are *encouraged* for any other + non-trivial contribution but left to the discretion of the contributor. +1. For significant changes wait a full 24 hours before merging so that active + contributors who are distributed throughout the world have a chance to weigh + in. +1. Contributors should attempt to adhere to the prevailing code-style. +1. Run `npm test` locally before submitting your PR, to catch any easy to miss + style & testing issues. To diagnose test failures, there are two ways to + run a single test file: + - `node_modules/.bin/taper tests/test-file.js` - run using the default + [`taper`](/nylen/taper) test reporter. + - `node tests/test-file.js` - view the raw + [tap](https://testanything.org/) output. + + +## Releases + +Declaring formal releases remains the prerogative of the project maintainer. + +## Changes to this arrangement + +This is an experiment and feedback is welcome! This document may also be +subject to pull-requests or changes by contributors where you believe you have +something valuable to add or change. + +----------------------------------------- diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/disabled.appveyor.yml nodejs-0.11.15/deps/npm/node_modules/request/disabled.appveyor.yml --- nodejs-0.11.13/deps/npm/node_modules/request/disabled.appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/disabled.appveyor.yml 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,36 @@ +# http://www.appveyor.com/docs/appveyor-yml + +# Fix line endings in Windows. (runs before repo cloning) +init: + - git config --global core.autocrlf input + +# Test against these versions of Node.js. +environment: + matrix: + - nodejs_version: "0.10" + - nodejs_version: "0.8" + - nodejs_version: "0.11" + +# Allow failing jobs for bleeding-edge Node.js versions. +matrix: + allow_failures: + - nodejs_version: "0.11" + +# Install scripts. (runs after repo cloning) +install: + # Get the latest stable version of Node 0.STABLE.latest + - ps: Update-NodeJsInstallation (Get-NodeJsLatestBuild $env:nodejs_version) + # Typical npm stuff. + - npm install + +# Post-install test scripts. +test_script: + # Output useful info for debugging. + - ps: "npm test # PowerShell" # Pass comment to PS for easier debugging + - cmd: npm test + +# Don't actually build. +build: off + +# Set build version format here instead of in the admin panel. +version: "{build}" diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/.eslintrc nodejs-0.11.15/deps/npm/node_modules/request/.eslintrc --- nodejs-0.11.13/deps/npm/node_modules/request/.eslintrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/.eslintrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +{ + "env": { + "node": true + }, + "rules": { + // Disallow semi-colons, unless needed to disambiguate statement + "semi": [2, "never"], + // Require strings to use single quotes + "quotes": [2, "single"], + // Require curly braces for all control statements + "curly": 2, + // Disallow using variables and functions before they've been defined + "no-use-before-define": 2, + // Allow any case for variable naming + "camelcase": 0, + // Disallow unused variables, except as function arguments + "no-unused-vars": [2, {"args":"none"}], + // Allow leading underscores for method names + // REASON: we use underscores to denote private methods + "no-underscore-dangle": 0 + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/index.js nodejs-0.11.15/deps/npm/node_modules/request/index.js --- nodejs-0.11.13/deps/npm/node_modules/request/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -12,146 +12,165 @@ // See the License for the specific language governing permissions and // limitations under the License. -var optional = require('./lib/optional') - , cookie = optional('tough-cookie') - , Cookie = cookie && cookie.Cookie - , CookieJar = cookie && cookie.CookieJar - , cookieJar = CookieJar && new CookieJar - - , copy = require('./lib/copy') - , Request = require('./request') - ; +'use strict' +var extend = require('util')._extend + , cookies = require('./lib/cookies') + , helpers = require('./lib/helpers') + +var isFunction = helpers.isFunction + , constructObject = helpers.constructObject + , filterForCallback = helpers.filterForCallback + , constructOptionsFrom = helpers.constructOptionsFrom + , paramsHaveRequestBody = helpers.paramsHaveRequestBody // organize params for patch, post, put, head, del function initParams(uri, options, callback) { - if ((typeof options === 'function') && !callback) callback = options - if (options && typeof options === 'object') { - options.uri = uri - } else if (typeof uri === 'string') { - options = {uri:uri} - } else { - options = uri - uri = options.uri - } - return { uri: uri, options: options, callback: callback } + callback = filterForCallback([options, callback]) + options = constructOptionsFrom(uri, options) + + return constructObject() + .extend({callback: callback}) + .extend({options: options}) + .extend({uri: options.uri}) + .done() } function request (uri, options, callback) { - if (typeof uri === 'undefined') throw new Error('undefined is not a valid uri or options object.') - if ((typeof options === 'function') && !callback) callback = options - if (options && typeof options === 'object') { - options.uri = uri - } else if (typeof uri === 'string') { - options = {uri:uri} - } else { - options = uri + if (typeof uri === 'undefined') { + throw new Error('undefined is not a valid uri or options object.') } - options = copy(options) - - if (callback) options.callback = callback - var r = new Request(options) - return r -} - -module.exports = request - -request.Request = Request; - -request.debug = process.env.NODE_DEBUG && /request/.test(process.env.NODE_DEBUG) - -request.initParams = initParams + var params = initParams(uri, options, callback) + options = params.options + options.callback = params.callback + options.uri = params.uri -request.defaults = function (options, requester) { - var def = function (method) { - var d = function (uri, opts, callback) { - var params = initParams(uri, opts, callback) - for (var i in options) { - if (params.options[i] === undefined) params.options[i] = options[i] - } - if(typeof requester === 'function') { - if(method === request) { - method = requester - } else { - params.options._requester = requester - } - } - return method(params.options, params.callback) - } - return d - } - var de = def(request) - de.get = def(request.get) - de.patch = def(request.patch) - de.post = def(request.post) - de.put = def(request.put) - de.head = def(request.head) - de.del = def(request.del) - de.cookie = def(request.cookie) - de.jar = request.jar - return de + return new request.Request(options) } function requester(params) { if(typeof params.options._requester === 'function') { return params.options._requester - } else { - return request } + return request } -request.forever = function (agentOptions, optionsArg) { - var options = {} - if (optionsArg) { - for (var option in optionsArg) { - options[option] = optionsArg[option] - } +request.get = function (uri, options, callback) { + var params = initParams(uri, options, callback) + params.options.method = 'GET' + return requester(params)(params.uri || null, params.options, params.callback) +} + +request.head = function (uri, options, callback) { + var params = initParams(uri, options, callback) + params.options.method = 'HEAD' + + if (paramsHaveRequestBody(params)) { + throw new Error('HTTP HEAD requests MUST NOT include a request body.') } - if (agentOptions) options.agentOptions = agentOptions - options.forever = true - return request.defaults(options) + + return requester(params)(params.uri || null, params.options, params.callback) } -request.get = request request.post = function (uri, options, callback) { var params = initParams(uri, options, callback) params.options.method = 'POST' return requester(params)(params.uri || null, params.options, params.callback) } + request.put = function (uri, options, callback) { var params = initParams(uri, options, callback) params.options.method = 'PUT' return requester(params)(params.uri || null, params.options, params.callback) } + request.patch = function (uri, options, callback) { var params = initParams(uri, options, callback) params.options.method = 'PATCH' return requester(params)(params.uri || null, params.options, params.callback) } -request.head = function (uri, options, callback) { - var params = initParams(uri, options, callback) - params.options.method = 'HEAD' - if (params.options.body || - params.options.requestBodyStream || - (params.options.json && typeof params.options.json !== 'boolean') || - params.options.multipart) { - throw new Error("HTTP HEAD requests MUST NOT include a request body.") - } - return requester(params)(params.uri || null, params.options, params.callback) -} request.del = function (uri, options, callback) { var params = initParams(uri, options, callback) params.options.method = 'DELETE' return requester(params)(params.uri || null, params.options, params.callback) } + request.jar = function () { - return new CookieJar + return cookies.jar() } + request.cookie = function (str) { - if (str && str.uri) str = str.uri - if (typeof str !== 'string') throw new Error("The cookie function only accepts STRING as param") - return new Cookie(str) + return cookies.parse(str) +} + +request.defaults = function (options, requester) { + var self = this + var wrap = function (method) { + var headerlessOptions = function (options) { + options = extend({}, options) + delete options.headers + return options + } + + var getHeaders = function (params, options) { + return constructObject() + .extend(options.headers) + .extend(params.options.headers) + .done() + } + + return function (uri, opts, callback) { + var params = initParams(uri, opts, callback) + params.options = extend(headerlessOptions(options), params.options) + + if (options.headers) { + params.options.headers = getHeaders(params, options) + } + + if (isFunction(requester)) { + if (method === self) { + method = requester + } else { + params.options._requester = requester + } + } + + return method(params.options, params.callback) + } + } + + var defaults = wrap(self) + defaults.get = wrap(self.get) + defaults.patch = wrap(self.patch) + defaults.post = wrap(self.post) + defaults.put = wrap(self.put) + defaults.head = wrap(self.head) + defaults.del = wrap(self.del) + defaults.cookie = wrap(self.cookie) + defaults.jar = self.jar + defaults.defaults = self.defaults + return defaults } + +request.forever = function (agentOptions, optionsArg) { + var options = constructObject() + if (optionsArg) { + options.extend(optionsArg) + } + if (agentOptions) { + options.agentOptions = agentOptions + } + + options.extend({forever: true}) + return request.defaults(options.done()) +} + +// Exports + +module.exports = request +request.Request = require('./request') +request.debug = process.env.NODE_DEBUG && /\brequest\b/.test(process.env.NODE_DEBUG) +request.initParams = initParams diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/lib/cookies.js nodejs-0.11.15/deps/npm/node_modules/request/lib/cookies.js --- nodejs-0.11.13/deps/npm/node_modules/request/lib/cookies.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/lib/cookies.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,50 @@ +'use strict' + +var tough = require('tough-cookie') + +var Cookie = tough.Cookie + , CookieJar = tough.CookieJar + + +exports.parse = function(str) { + if (str && str.uri) { + str = str.uri + } + if (typeof str !== 'string') { + throw new Error('The cookie function only accepts STRING as param') + } + if (!Cookie) { + return null + } + return Cookie.parse(str) +} + +// Adapt the sometimes-Async api of tough.CookieJar to our requirements +function RequestJar() { + var self = this + self._jar = new CookieJar() +} +RequestJar.prototype.setCookie = function(cookieOrStr, uri, options) { + var self = this + return self._jar.setCookieSync(cookieOrStr, uri, options || {}) +} +RequestJar.prototype.getCookieString = function(uri) { + var self = this + return self._jar.getCookieStringSync(uri) +} +RequestJar.prototype.getCookies = function(uri) { + var self = this + return self._jar.getCookiesSync(uri) +} + +exports.jar = function() { + if (!CookieJar) { + // tough-cookie not loaded, return a stub object: + return { + setCookie: function(){}, + getCookieString: function(){}, + getCookies: function(){} + } + } + return new RequestJar() +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/lib/copy.js nodejs-0.11.15/deps/npm/node_modules/request/lib/copy.js --- nodejs-0.11.13/deps/npm/node_modules/request/lib/copy.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/lib/copy.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,3 +1,5 @@ +'use strict' + module.exports = function copy (obj) { var o = {} @@ -5,4 +7,4 @@ o[i] = obj[i] }) return o -} \ No newline at end of file +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/lib/debug.js nodejs-0.11.15/deps/npm/node_modules/request/lib/debug.js --- nodejs-0.11.13/deps/npm/node_modules/request/lib/debug.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/lib/debug.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,11 @@ +'use strict' + var util = require('util') + , request = require('../index') + -module.exports = -function debug () { - if (/\brequest\b/.test(process.env.NODE_DEBUG)) +module.exports = function debug() { + if (request.debug) { console.error('REQUEST %s', util.format.apply(util, arguments)) + } } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/lib/getSafe.js nodejs-0.11.15/deps/npm/node_modules/request/lib/getSafe.js --- nodejs-0.11.13/deps/npm/node_modules/request/lib/getSafe.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/lib/getSafe.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -// Safe toJSON -module.exports = -function getSafe (self, uuid) { - if (typeof self === 'object' || typeof self === 'function') var safe = {} - if (Array.isArray(self)) var safe = [] - - var recurse = [] - - Object.defineProperty(self, uuid, {}) - - var attrs = Object.keys(self).filter(function (i) { - if (i === uuid) return false - if ( (typeof self[i] !== 'object' && typeof self[i] !== 'function') || self[i] === null) return true - return !(Object.getOwnPropertyDescriptor(self[i], uuid)) - }) - - - for (var i=0;i=0.5.0 <0.6.0", + "_npmVersion": "1.3.2", + "_npmUser": { + "name": "mikeal", + "email": "mikeal.rogers@gmail.com" + }, + "maintainers": [ + { + "name": "mikeal", + "email": "mikeal.rogers@gmail.com" + } + ], + "directories": {}, + "_shasum": "c57103f7a17fc037f02d7c2e64b602ea223f7d63", + "_resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.5.0.tgz", + "homepage": "https://github.com/mikeal/aws-sign", + "scripts": {} } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/bl.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/bl.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/bl.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/bl.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,216 @@ +var DuplexStream = require('readable-stream').Duplex + , util = require('util') + +function BufferList (callback) { + if (!(this instanceof BufferList)) + return new BufferList(callback) + + this._bufs = [] + this.length = 0 + + if (typeof callback == 'function') { + this._callback = callback + + var piper = function (err) { + if (this._callback) { + this._callback(err) + this._callback = null + } + }.bind(this) + + this.on('pipe', function (src) { + src.on('error', piper) + }) + this.on('unpipe', function (src) { + src.removeListener('error', piper) + }) + } + else if (Buffer.isBuffer(callback)) + this.append(callback) + else if (Array.isArray(callback)) { + callback.forEach(function (b) { + Buffer.isBuffer(b) && this.append(b) + }.bind(this)) + } + + DuplexStream.call(this) +} + +util.inherits(BufferList, DuplexStream) + +BufferList.prototype._offset = function (offset) { + var tot = 0, i = 0, _t + for (; i < this._bufs.length; i++) { + _t = tot + this._bufs[i].length + if (offset < _t) + return [ i, offset - tot ] + tot = _t + } +} + +BufferList.prototype.append = function (buf) { + var isBuffer = Buffer.isBuffer(buf) || + buf instanceof BufferList + + this._bufs.push(isBuffer ? buf : new Buffer(buf)) + this.length += buf.length + return this +} + +BufferList.prototype._write = function (buf, encoding, callback) { + this.append(buf) + if (callback) + callback() +} + +BufferList.prototype._read = function (size) { + if (!this.length) + return this.push(null) + size = Math.min(size, this.length) + this.push(this.slice(0, size)) + this.consume(size) +} + +BufferList.prototype.end = function (chunk) { + DuplexStream.prototype.end.call(this, chunk) + + if (this._callback) { + this._callback(null, this.slice()) + this._callback = null + } +} + +BufferList.prototype.get = function (index) { + return this.slice(index, index + 1)[0] +} + +BufferList.prototype.slice = function (start, end) { + return this.copy(null, 0, start, end) +} + +BufferList.prototype.copy = function (dst, dstStart, srcStart, srcEnd) { + if (typeof srcStart != 'number' || srcStart < 0) + srcStart = 0 + if (typeof srcEnd != 'number' || srcEnd > this.length) + srcEnd = this.length + if (srcStart >= this.length) + return dst || new Buffer(0) + if (srcEnd <= 0) + return dst || new Buffer(0) + + var copy = !!dst + , off = this._offset(srcStart) + , len = srcEnd - srcStart + , bytes = len + , bufoff = (copy && dstStart) || 0 + , start = off[1] + , l + , i + + // copy/slice everything + if (srcStart === 0 && srcEnd == this.length) { + if (!copy) // slice, just return a full concat + return Buffer.concat(this._bufs) + + // copy, need to copy individual buffers + for (i = 0; i < this._bufs.length; i++) { + this._bufs[i].copy(dst, bufoff) + bufoff += this._bufs[i].length + } + + return dst + } + + // easy, cheap case where it's a subset of one of the buffers + if (bytes <= this._bufs[off[0]].length - start) { + return copy + ? this._bufs[off[0]].copy(dst, dstStart, start, start + bytes) + : this._bufs[off[0]].slice(start, start + bytes) + } + + if (!copy) // a slice, we need something to copy in to + dst = new Buffer(len) + + for (i = off[0]; i < this._bufs.length; i++) { + l = this._bufs[i].length - start + + if (bytes > l) { + this._bufs[i].copy(dst, bufoff, start) + } else { + this._bufs[i].copy(dst, bufoff, start, start + bytes) + break + } + + bufoff += l + bytes -= l + + if (start) + start = 0 + } + + return dst +} + +BufferList.prototype.toString = function (encoding, start, end) { + return this.slice(start, end).toString(encoding) +} + +BufferList.prototype.consume = function (bytes) { + while (this._bufs.length) { + if (bytes > this._bufs[0].length) { + bytes -= this._bufs[0].length + this.length -= this._bufs[0].length + this._bufs.shift() + } else { + this._bufs[0] = this._bufs[0].slice(bytes) + this.length -= bytes + break + } + } + return this +} + +BufferList.prototype.duplicate = function () { + var i = 0 + , copy = new BufferList() + + for (; i < this._bufs.length; i++) + copy.append(this._bufs[i]) + + return copy +} + +BufferList.prototype.destroy = function () { + this._bufs.length = 0; + this.length = 0; + this.push(null); +} + +;(function () { + var methods = { + 'readDoubleBE' : 8 + , 'readDoubleLE' : 8 + , 'readFloatBE' : 4 + , 'readFloatLE' : 4 + , 'readInt32BE' : 4 + , 'readInt32LE' : 4 + , 'readUInt32BE' : 4 + , 'readUInt32LE' : 4 + , 'readInt16BE' : 2 + , 'readInt16LE' : 2 + , 'readUInt16BE' : 2 + , 'readUInt16LE' : 2 + , 'readInt8' : 1 + , 'readUInt8' : 1 + } + + for (var m in methods) { + (function (m) { + BufferList.prototype[m] = function (offset) { + return this.slice(offset, offset + methods[m])[m](0) + } + }(m)) + } +}()) + +module.exports = BufferList diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/.jshintrc nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/.jshintrc --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/.jshintrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/.jshintrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,59 @@ +{ + "predef": [ ] + , "bitwise": false + , "camelcase": false + , "curly": false + , "eqeqeq": false + , "forin": false + , "immed": false + , "latedef": false + , "noarg": true + , "noempty": true + , "nonew": true + , "plusplus": false + , "quotmark": true + , "regexp": false + , "undef": true + , "unused": true + , "strict": false + , "trailing": true + , "maxlen": 120 + , "asi": true + , "boss": true + , "debug": true + , "eqnull": true + , "esnext": true + , "evil": true + , "expr": true + , "funcscope": false + , "globalstrict": false + , "iterator": false + , "lastsemic": true + , "laxbreak": true + , "laxcomma": true + , "loopfunc": true + , "multistr": false + , "onecase": false + , "proto": false + , "regexdash": false + , "scripturl": true + , "smarttabs": false + , "shadow": false + , "sub": true + , "supernew": false + , "validthis": true + , "browser": true + , "couch": false + , "devel": false + , "dojo": false + , "mootools": false + , "node": true + , "nonstandard": true + , "prototypejs": false + , "rhino": false + , "worker": true + , "wsh": false + , "nomen": false + , "onevar": false + , "passfail": false +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/LICENSE.md nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/LICENSE.md --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/LICENSE.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/LICENSE.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +The MIT License (MIT) +===================== + +Copyright (c) 2014 bl contributors +---------------------------------- + +*bl contributors listed at * + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/.npmignore nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +node_modules/ \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,61 @@ +{ + "name": "bl", + "version": "0.9.3", + "description": "Buffer List: collect buffers and access with a standard readable Buffer interface, streamable too!", + "main": "bl.js", + "scripts": { + "test": "node test/test.js | faucet", + "test-local": "brtapsauce-local test/basic-test.js" + }, + "repository": { + "type": "git", + "url": "https://github.com/rvagg/bl.git" + }, + "homepage": "https://github.com/rvagg/bl", + "authors": [ + "Rod Vagg (https://github.com/rvagg)", + "Matteo Collina (https://github.com/mcollina)", + "Jarett Cruger (https://github.com/jcrugzz)" + ], + "keywords": [ + "buffer", + "buffers", + "stream", + "awesomesauce" + ], + "license": "MIT", + "dependencies": { + "readable-stream": "~1.0.26" + }, + "devDependencies": { + "tape": "~2.12.3", + "hash_file": "~0.1.1", + "faucet": "~0.0.1", + "brtapsauce": "~0.3.0" + }, + "gitHead": "4987a76bf6bafd7616e62c7023c955e62f3a9461", + "bugs": { + "url": "https://github.com/rvagg/bl/issues" + }, + "_id": "bl@0.9.3", + "_shasum": "c41eff3e7cb31bde107c8f10076d274eff7f7d44", + "_from": "bl@>=0.9.0 <0.10.0", + "_npmVersion": "1.4.27", + "_npmUser": { + "name": "rvagg", + "email": "rod@vagg.org" + }, + "maintainers": [ + { + "name": "rvagg", + "email": "rod@vagg.org" + } + ], + "dist": { + "shasum": "c41eff3e7cb31bde107c8f10076d274eff7f7d44", + "tarball": "http://registry.npmjs.org/bl/-/bl-0.9.3.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/bl/-/bl-0.9.3.tgz", + "readme": "ERROR: No README data found!" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/README.md nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/README.md --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,195 @@ +# bl *(BufferList)* + +**A Node.js Buffer list collector, reader and streamer thingy.** + +[![NPM](https://nodei.co/npm/bl.png?downloads=true&downloadRank=true)](https://nodei.co/npm/bl/) +[![NPM](https://nodei.co/npm-dl/bl.png?months=6&height=3)](https://nodei.co/npm/bl/) + +**bl** is a storage object for collections of Node Buffers, exposing them with the main Buffer readable API. Also works as a duplex stream so you can collect buffers from a stream that emits them and emit buffers to a stream that consumes them! + +The original buffers are kept intact and copies are only done as necessary. Any reads that require the use of a single original buffer will return a slice of that buffer only (which references the same memory as the original buffer). Reads that span buffers perform concatenation as required and return the results transparently. + +```js +const BufferList = require('bl') + +var bl = new BufferList() +bl.append(new Buffer('abcd')) +bl.append(new Buffer('efg')) +bl.append('hi') // bl will also accept & convert Strings +bl.append(new Buffer('j')) +bl.append(new Buffer([ 0x3, 0x4 ])) + +console.log(bl.length) // 12 + +console.log(bl.slice(0, 10).toString('ascii')) // 'abcdefghij' +console.log(bl.slice(3, 10).toString('ascii')) // 'defghij' +console.log(bl.slice(3, 6).toString('ascii')) // 'def' +console.log(bl.slice(3, 8).toString('ascii')) // 'defgh' +console.log(bl.slice(5, 10).toString('ascii')) // 'fghij' + +// or just use toString! +console.log(bl.toString()) // 'abcdefghij\u0003\u0004' +console.log(bl.toString('ascii', 3, 8)) // 'defgh' +console.log(bl.toString('ascii', 5, 10)) // 'fghij' + +// other standard Buffer readables +console.log(bl.readUInt16BE(10)) // 0x0304 +console.log(bl.readUInt16LE(10)) // 0x0403 +``` + +Give it a callback in the constructor and use it just like **[concat-stream](https://github.com/maxogden/node-concat-stream)**: + +```js +const bl = require('bl') + , fs = require('fs') + +fs.createReadStream('README.md') + .pipe(bl(function (err, data) { // note 'new' isn't strictly required + // `data` is a complete Buffer object containing the full data + console.log(data.toString()) + })) +``` + +Note that when you use the *callback* method like this, the resulting `data` parameter is a concatenation of all `Buffer` objects in the list. If you want to avoid the overhead of this concatenation (in cases of extreme performance consciousness), then avoid the *callback* method and just listen to `'end'` instead, like a standard Stream. + +Or to fetch a URL using [hyperquest](https://github.com/substack/hyperquest) (should work with [request](http://github.com/mikeal/request) and even plain Node http too!): +```js +const hyperquest = require('hyperquest') + , bl = require('bl') + , url = 'https://raw.github.com/rvagg/bl/master/README.md' + +hyperquest(url).pipe(bl(function (err, data) { + console.log(data.toString()) +})) +``` + +Or, use it as a readable stream to recompose a list of Buffers to an output source: + +```js +const BufferList = require('bl') + , fs = require('fs') + +var bl = new BufferList() +bl.append(new Buffer('abcd')) +bl.append(new Buffer('efg')) +bl.append(new Buffer('hi')) +bl.append(new Buffer('j')) + +bl.pipe(fs.createWriteStream('gibberish.txt')) +``` + +## API + + * new BufferList([ callback ]) + * bl.length + * bl.append(buffer) + * bl.get(index) + * bl.slice([ start[, end ] ]) + * bl.copy(dest, [ destStart, [ srcStart [, srcEnd ] ] ]) + * bl.duplicate() + * bl.consume(bytes) + * bl.toString([encoding, [ start, [ end ]]]) + * bl.readDoubleBE(), bl.readDoubleLE(), bl.readFloatBE(), bl.readFloatLE(), bl.readInt32BE(), bl.readInt32LE(), bl.readUInt32BE(), bl.readUInt32LE(), bl.readInt16BE(), bl.readInt16LE(), bl.readUInt16BE(), bl.readUInt16LE(), bl.readInt8(), bl.readUInt8() + * Streams + +-------------------------------------------------------- + +### new BufferList([ callback | buffer | buffer array ]) +The constructor takes an optional callback, if supplied, the callback will be called with an error argument followed by a reference to the **bl** instance, when `bl.end()` is called (i.e. from a piped stream). This is a convenient method of collecting the entire contents of a stream, particularly when the stream is *chunky*, such as a network stream. + +Normally, no arguments are required for the constructor, but you can initialise the list by passing in a single `Buffer` object or an array of `Buffer` object. + +`new` is not strictly required, if you don't instantiate a new object, it will be done automatically for you so you can create a new instance simply with: + +```js +var bl = require('bl') +var myinstance = bl() + +// equivilant to: + +var BufferList = require('bl') +var myinstance = new BufferList() +``` + +-------------------------------------------------------- + +### bl.length +Get the length of the list in bytes. This is the sum of the lengths of all of the buffers contained in the list, minus any initial offset for a semi-consumed buffer at the beginning. Should accurately represent the total number of bytes that can be read from the list. + +-------------------------------------------------------- + +### bl.append(buffer) +`append(buffer)` adds an additional buffer or BufferList to the internal list. + +-------------------------------------------------------- + +### bl.get(index) +`get()` will return the byte at the specified index. + +-------------------------------------------------------- + +### bl.slice([ start, [ end ] ]) +`slice()` returns a new `Buffer` object containing the bytes within the range specified. Both `start` and `end` are optional and will default to the beginning and end of the list respectively. + +If the requested range spans a single internal buffer then a slice of that buffer will be returned which shares the original memory range of that Buffer. If the range spans multiple buffers then copy operations will likely occur to give you a uniform Buffer. + +-------------------------------------------------------- + +### bl.copy(dest, [ destStart, [ srcStart [, srcEnd ] ] ]) +`copy()` copies the content of the list in the `dest` buffer, starting from `destStart` and containing the bytes within the range specified with `srcStart` to `srcEnd`. `destStart`, `start` and `end` are optional and will default to the beginning of the `dest` buffer, and the beginning and end of the list respectively. + +-------------------------------------------------------- + +### bl.duplicate() +`duplicate()` performs a **shallow-copy** of the list. The internal Buffers remains the same, so if you change the underlying Buffers, the change will be reflected in both the original and the duplicate. This method is needed if you want to call `consume()` or `pipe()` and still keep the original list.Example: + +```js +var bl = new BufferList() + +bl.append('hello') +bl.append(' world') +bl.append('\n') + +bl.duplicate().pipe(process.stdout, { end: false }) + +console.log(bl.toString()) +``` + +-------------------------------------------------------- + +### bl.consume(bytes) +`consume()` will shift bytes *off the start of the list*. The number of bytes consumed don't need to line up with the sizes of the internal Buffers—initial offsets will be calculated accordingly in order to give you a consistent view of the data. + +-------------------------------------------------------- + +### bl.toString([encoding, [ start, [ end ]]]) +`toString()` will return a string representation of the buffer. The optional `start` and `end` arguments are passed on to `slice()`, while the `encoding` is passed on to `toString()` of the resulting Buffer. See the [Buffer#toString()](http://nodejs.org/docs/latest/api/buffer.html#buffer_buf_tostring_encoding_start_end) documentation for more information. + +-------------------------------------------------------- + +### bl.readDoubleBE(), bl.readDoubleLE(), bl.readFloatBE(), bl.readFloatLE(), bl.readInt32BE(), bl.readInt32LE(), bl.readUInt32BE(), bl.readUInt32LE(), bl.readInt16BE(), bl.readInt16LE(), bl.readUInt16BE(), bl.readUInt16LE(), bl.readInt8(), bl.readUInt8() + +All of the standard byte-reading methods of the `Buffer` interface are implemented and will operate across internal Buffer boundaries transparently. + +See the [Buffer](http://nodejs.org/docs/latest/api/buffer.html) documentation for how these work. + +-------------------------------------------------------- + +### Streams +**bl** is a Node **[Duplex Stream](http://nodejs.org/docs/latest/api/stream.html#stream_class_stream_duplex)**, so it can be read from and written to like a standard Node stream. You can also `pipe()` to and from a **bl** instance. + +-------------------------------------------------------- + +## Contributors + +**bl** is brought to you by the following hackers: + + * [Rod Vagg](https://github.com/rvagg) + * [Matteo Collina](https://github.com/mcollina) + * [Jarett Cruger](https://github.com/jcrugzz) + +======= + +## License + +**bl** is Copyright (c) 2013 Rod Vagg [@rvagg](https://twitter.com/rvagg) and licenced under the MIT licence. All rights not explicitly granted in the MIT license are reserved. See the included LICENSE.md file for more details. diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/test/basic-test.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/test/basic-test.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/test/basic-test.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/test/basic-test.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,541 @@ +var tape = require('tape') + , crypto = require('crypto') + , fs = require('fs') + , hash = require('hash_file') + , BufferList = require('../') + + , encodings = + ('hex utf8 utf-8 ascii binary base64' + + (process.browser ? '' : ' ucs2 ucs-2 utf16le utf-16le')).split(' ') + +tape('single bytes from single buffer', function (t) { + var bl = new BufferList() + bl.append(new Buffer('abcd')) + + t.equal(bl.length, 4) + + t.equal(bl.get(0), 97) + t.equal(bl.get(1), 98) + t.equal(bl.get(2), 99) + t.equal(bl.get(3), 100) + + t.end() +}) + +tape('single bytes from multiple buffers', function (t) { + var bl = new BufferList() + bl.append(new Buffer('abcd')) + bl.append(new Buffer('efg')) + bl.append(new Buffer('hi')) + bl.append(new Buffer('j')) + + t.equal(bl.length, 10) + + t.equal(bl.get(0), 97) + t.equal(bl.get(1), 98) + t.equal(bl.get(2), 99) + t.equal(bl.get(3), 100) + t.equal(bl.get(4), 101) + t.equal(bl.get(5), 102) + t.equal(bl.get(6), 103) + t.equal(bl.get(7), 104) + t.equal(bl.get(8), 105) + t.equal(bl.get(9), 106) + t.end() +}) + +tape('multi bytes from single buffer', function (t) { + var bl = new BufferList() + bl.append(new Buffer('abcd')) + + t.equal(bl.length, 4) + + t.equal(bl.slice(0, 4).toString('ascii'), 'abcd') + t.equal(bl.slice(0, 3).toString('ascii'), 'abc') + t.equal(bl.slice(1, 4).toString('ascii'), 'bcd') + + t.end() +}) + +tape('multiple bytes from multiple buffers', function (t) { + var bl = new BufferList() + + bl.append(new Buffer('abcd')) + bl.append(new Buffer('efg')) + bl.append(new Buffer('hi')) + bl.append(new Buffer('j')) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') + t.equal(bl.slice(3, 6).toString('ascii'), 'def') + t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') + t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') + + t.end() +}) + +tape('multiple bytes from multiple buffer lists', function (t) { + var bl = new BufferList() + + bl.append(new BufferList([new Buffer('abcd'), new Buffer('efg')])) + bl.append(new BufferList([new Buffer('hi'), new Buffer('j')])) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') + t.equal(bl.slice(3, 6).toString('ascii'), 'def') + t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') + t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') + + t.end() +}) + +tape('consuming from multiple buffers', function (t) { + var bl = new BufferList() + + bl.append(new Buffer('abcd')) + bl.append(new Buffer('efg')) + bl.append(new Buffer('hi')) + bl.append(new Buffer('j')) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + + bl.consume(3) + t.equal(bl.length, 7) + t.equal(bl.slice(0, 7).toString('ascii'), 'defghij') + + bl.consume(2) + t.equal(bl.length, 5) + t.equal(bl.slice(0, 5).toString('ascii'), 'fghij') + + bl.consume(1) + t.equal(bl.length, 4) + t.equal(bl.slice(0, 4).toString('ascii'), 'ghij') + + bl.consume(1) + t.equal(bl.length, 3) + t.equal(bl.slice(0, 3).toString('ascii'), 'hij') + + bl.consume(2) + t.equal(bl.length, 1) + t.equal(bl.slice(0, 1).toString('ascii'), 'j') + + t.end() +}) + +tape('test readUInt8 / readInt8', function (t) { + var buf1 = new Buffer(1) + , buf2 = new Buffer(3) + , buf3 = new Buffer(3) + , bl = new BufferList() + + buf2[1] = 0x3 + buf2[2] = 0x4 + buf3[0] = 0x23 + buf3[1] = 0x42 + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readUInt8(2), 0x3) + t.equal(bl.readInt8(2), 0x3) + t.equal(bl.readUInt8(3), 0x4) + t.equal(bl.readInt8(3), 0x4) + t.equal(bl.readUInt8(4), 0x23) + t.equal(bl.readInt8(4), 0x23) + t.equal(bl.readUInt8(5), 0x42) + t.equal(bl.readInt8(5), 0x42) + t.end() +}) + +tape('test readUInt16LE / readUInt16BE / readInt16LE / readInt16BE', function (t) { + var buf1 = new Buffer(1) + , buf2 = new Buffer(3) + , buf3 = new Buffer(3) + , bl = new BufferList() + + buf2[1] = 0x3 + buf2[2] = 0x4 + buf3[0] = 0x23 + buf3[1] = 0x42 + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readUInt16BE(2), 0x0304) + t.equal(bl.readUInt16LE(2), 0x0403) + t.equal(bl.readInt16BE(2), 0x0304) + t.equal(bl.readInt16LE(2), 0x0403) + t.equal(bl.readUInt16BE(3), 0x0423) + t.equal(bl.readUInt16LE(3), 0x2304) + t.equal(bl.readInt16BE(3), 0x0423) + t.equal(bl.readInt16LE(3), 0x2304) + t.equal(bl.readUInt16BE(4), 0x2342) + t.equal(bl.readUInt16LE(4), 0x4223) + t.equal(bl.readInt16BE(4), 0x2342) + t.equal(bl.readInt16LE(4), 0x4223) + t.end() +}) + +tape('test readUInt32LE / readUInt32BE / readInt32LE / readInt32BE', function (t) { + var buf1 = new Buffer(1) + , buf2 = new Buffer(3) + , buf3 = new Buffer(3) + , bl = new BufferList() + + buf2[1] = 0x3 + buf2[2] = 0x4 + buf3[0] = 0x23 + buf3[1] = 0x42 + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readUInt32BE(2), 0x03042342) + t.equal(bl.readUInt32LE(2), 0x42230403) + t.equal(bl.readInt32BE(2), 0x03042342) + t.equal(bl.readInt32LE(2), 0x42230403) + t.end() +}) + +tape('test readFloatLE / readFloatBE', function (t) { + var buf1 = new Buffer(1) + , buf2 = new Buffer(3) + , buf3 = new Buffer(3) + , bl = new BufferList() + + buf2[1] = 0x00 + buf2[2] = 0x00 + buf3[0] = 0x80 + buf3[1] = 0x3f + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readFloatLE(2), 0x01) + t.end() +}) + +tape('test readDoubleLE / readDoubleBE', function (t) { + var buf1 = new Buffer(1) + , buf2 = new Buffer(3) + , buf3 = new Buffer(10) + , bl = new BufferList() + + buf2[1] = 0x55 + buf2[2] = 0x55 + buf3[0] = 0x55 + buf3[1] = 0x55 + buf3[2] = 0x55 + buf3[3] = 0x55 + buf3[4] = 0xd5 + buf3[5] = 0x3f + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readDoubleLE(2), 0.3333333333333333) + t.end() +}) + +tape('test toString', function (t) { + var bl = new BufferList() + + bl.append(new Buffer('abcd')) + bl.append(new Buffer('efg')) + bl.append(new Buffer('hi')) + bl.append(new Buffer('j')) + + t.equal(bl.toString('ascii', 0, 10), 'abcdefghij') + t.equal(bl.toString('ascii', 3, 10), 'defghij') + t.equal(bl.toString('ascii', 3, 6), 'def') + t.equal(bl.toString('ascii', 3, 8), 'defgh') + t.equal(bl.toString('ascii', 5, 10), 'fghij') + + t.end() +}) + +tape('test toString encoding', function (t) { + var bl = new BufferList() + , b = new Buffer('abcdefghij\xff\x00') + + bl.append(new Buffer('abcd')) + bl.append(new Buffer('efg')) + bl.append(new Buffer('hi')) + bl.append(new Buffer('j')) + bl.append(new Buffer('\xff\x00')) + + encodings.forEach(function (enc) { + t.equal(bl.toString(enc), b.toString(enc), enc) + }) + + t.end() +}) + +!process.browser && tape('test stream', function (t) { + var random = crypto.randomBytes(65534) + , rndhash = hash(random, 'md5') + , md5sum = crypto.createHash('md5') + , bl = new BufferList(function (err, buf) { + t.ok(Buffer.isBuffer(buf)) + t.ok(err === null) + t.equal(rndhash, hash(bl.slice(), 'md5')) + t.equal(rndhash, hash(buf, 'md5')) + + bl.pipe(fs.createWriteStream('/tmp/bl_test_rnd_out.dat')) + .on('close', function () { + var s = fs.createReadStream('/tmp/bl_test_rnd_out.dat') + s.on('data', md5sum.update.bind(md5sum)) + s.on('end', function() { + t.equal(rndhash, md5sum.digest('hex'), 'woohoo! correct hash!') + t.end() + }) + }) + + }) + + fs.writeFileSync('/tmp/bl_test_rnd.dat', random) + fs.createReadStream('/tmp/bl_test_rnd.dat').pipe(bl) +}) + +tape('instantiation with Buffer', function (t) { + var buf = crypto.randomBytes(1024) + , buf2 = crypto.randomBytes(1024) + , b = BufferList(buf) + + t.equal(buf.toString('hex'), b.slice().toString('hex'), 'same buffer') + b = BufferList([ buf, buf2 ]) + t.equal(b.slice().toString('hex'), Buffer.concat([ buf, buf2 ]).toString('hex'), 'same buffer') + t.end() +}) + +tape('test String appendage', function (t) { + var bl = new BufferList() + , b = new Buffer('abcdefghij\xff\x00') + + bl.append('abcd') + bl.append('efg') + bl.append('hi') + bl.append('j') + bl.append('\xff\x00') + + encodings.forEach(function (enc) { + t.equal(bl.toString(enc), b.toString(enc)) + }) + + t.end() +}) + +tape('write nothing, should get empty buffer', function (t) { + t.plan(3) + BufferList(function (err, data) { + t.notOk(err, 'no error') + t.ok(Buffer.isBuffer(data), 'got a buffer') + t.equal(0, data.length, 'got a zero-length buffer') + t.end() + }).end() +}) + +tape('unicode string', function (t) { + t.plan(2) + var inp1 = '\u2600' + , inp2 = '\u2603' + , exp = inp1 + ' and ' + inp2 + , bl = BufferList() + bl.write(inp1) + bl.write(' and ') + bl.write(inp2) + t.equal(exp, bl.toString()) + t.equal(new Buffer(exp).toString('hex'), bl.toString('hex')) +}) + +tape('should emit finish', function (t) { + var source = BufferList() + , dest = BufferList() + + source.write('hello') + source.pipe(dest) + + dest.on('finish', function () { + t.equal(dest.toString('utf8'), 'hello') + t.end() + }) +}) + +tape('basic copy', function (t) { + var buf = crypto.randomBytes(1024) + , buf2 = new Buffer(1024) + , b = BufferList(buf) + + b.copy(buf2) + t.equal(b.slice().toString('hex'), buf2.toString('hex'), 'same buffer') + t.end() +}) + +tape('copy after many appends', function (t) { + var buf = crypto.randomBytes(512) + , buf2 = new Buffer(1024) + , b = BufferList(buf) + + b.append(buf) + b.copy(buf2) + t.equal(b.slice().toString('hex'), buf2.toString('hex'), 'same buffer') + t.end() +}) + +tape('copy at a precise position', function (t) { + var buf = crypto.randomBytes(1004) + , buf2 = new Buffer(1024) + , b = BufferList(buf) + + b.copy(buf2, 20) + t.equal(b.slice().toString('hex'), buf2.slice(20).toString('hex'), 'same buffer') + t.end() +}) + +tape('copy starting from a precise location', function (t) { + var buf = crypto.randomBytes(10) + , buf2 = new Buffer(5) + , b = BufferList(buf) + + b.copy(buf2, 0, 5) + t.equal(b.slice(5).toString('hex'), buf2.toString('hex'), 'same buffer') + t.end() +}) + +tape('copy in an interval', function (t) { + var rnd = crypto.randomBytes(10) + , b = BufferList(rnd) // put the random bytes there + , actual = new Buffer(3) + , expected = new Buffer(3) + + rnd.copy(expected, 0, 5, 8) + b.copy(actual, 0, 5, 8) + + t.equal(actual.toString('hex'), expected.toString('hex'), 'same buffer') + t.end() +}) + +tape('copy an interval between two buffers', function (t) { + var buf = crypto.randomBytes(10) + , buf2 = new Buffer(10) + , b = BufferList(buf) + + b.append(buf) + b.copy(buf2, 0, 5, 15) + + t.equal(b.slice(5, 15).toString('hex'), buf2.toString('hex'), 'same buffer') + t.end() +}) + +tape('duplicate', function (t) { + t.plan(2) + + var bl = new BufferList('abcdefghij\xff\x00') + , dup = bl.duplicate() + + t.equal(bl.prototype, dup.prototype) + t.equal(bl.toString('hex'), dup.toString('hex')) +}) + +tape('destroy no pipe', function (t) { + t.plan(2) + + var bl = new BufferList('alsdkfja;lsdkfja;lsdk') + bl.destroy() + + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) +}) + +!process.browser && tape('destroy with pipe before read end', function (t) { + t.plan(2) + + var bl = new BufferList() + fs.createReadStream(__dirname + '/sauce.js') + .pipe(bl) + + bl.destroy() + + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) + +}) + +!process.browser && tape('destroy with pipe before read end with race', function (t) { + t.plan(2) + + var bl = new BufferList() + fs.createReadStream(__dirname + '/sauce.js') + .pipe(bl) + + setTimeout(function () { + bl.destroy() + setTimeout(function () { + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) + }, 500) + }, 500) +}) + +!process.browser && tape('destroy with pipe after read end', function (t) { + t.plan(2) + + var bl = new BufferList() + fs.createReadStream(__dirname + '/sauce.js') + .on('end', onEnd) + .pipe(bl) + + function onEnd () { + bl.destroy() + + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) + } +}) + +!process.browser && tape('destroy with pipe while writing to a destination', function (t) { + t.plan(4) + + var bl = new BufferList() + , ds = new BufferList() + + fs.createReadStream(__dirname + '/sauce.js') + .on('end', onEnd) + .pipe(bl) + + function onEnd () { + bl.pipe(ds) + + setTimeout(function () { + bl.destroy() + + t.equals(bl._bufs.length, 0) + t.equals(bl.length, 0) + + ds.destroy() + + t.equals(bl._bufs.length, 0) + t.equals(bl.length, 0) + + }, 100) + } +}) + +!process.browser && tape('handle error', function (t) { + t.plan(2) + fs.createReadStream('/does/not/exist').pipe(BufferList(function (err, data) { + t.ok(err instanceof Error, 'has error') + t.notOk(data, 'no data') + })) +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/test/sauce.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/test/sauce.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/test/sauce.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/test/sauce.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,38 @@ +#!/usr/bin/env node + +const user = process.env.SAUCE_USER + , key = process.env.SAUCE_KEY + , path = require('path') + , brtapsauce = require('brtapsauce') + , testFile = path.join(__dirname, 'basic-test.js') + + , capabilities = [ + { browserName: 'chrome' , platform: 'Windows XP', version: '' } + , { browserName: 'firefox' , platform: 'Windows 8' , version: '' } + , { browserName: 'firefox' , platform: 'Windows XP', version: '4' } + , { browserName: 'internet explorer' , platform: 'Windows 8' , version: '10' } + , { browserName: 'internet explorer' , platform: 'Windows 7' , version: '9' } + , { browserName: 'internet explorer' , platform: 'Windows 7' , version: '8' } + , { browserName: 'internet explorer' , platform: 'Windows XP', version: '7' } + , { browserName: 'internet explorer' , platform: 'Windows XP', version: '6' } + , { browserName: 'safari' , platform: 'Windows 7' , version: '5' } + , { browserName: 'safari' , platform: 'OS X 10.8' , version: '6' } + , { browserName: 'opera' , platform: 'Windows 7' , version: '' } + , { browserName: 'opera' , platform: 'Windows 7' , version: '11' } + , { browserName: 'ipad' , platform: 'OS X 10.8' , version: '6' } + , { browserName: 'android' , platform: 'Linux' , version: '4.0', 'device-type': 'tablet' } + ] + +if (!user) + throw new Error('Must set a SAUCE_USER env var') +if (!key) + throw new Error('Must set a SAUCE_KEY env var') + +brtapsauce({ + name : 'Traversty' + , user : user + , key : key + , brsrc : testFile + , capabilities : capabilities + , options : { timeout: 60 * 6 } +}) \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/test/test.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/test/test.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/test/test.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/test/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,9 @@ +require('./basic-test') + +if (!process.env.SAUCE_KEY || !process.env.SAUCE_USER) + return console.log('SAUCE_KEY and/or SAUCE_USER not set, not running sauce tests') + +if (!/v0\.10/.test(process.version)) + return console.log('Not Node v0.10.x, not running sauce tests') + +require('./sauce.js') \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/.travis.yml nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/bl/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/bl/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,11 @@ +language: node_js +node_js: + - 0.8 + - "0.10" +branches: + only: + - master +notifications: + email: + - rod@vagg.org +script: npm test \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/caseless/index.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/caseless/index.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/caseless/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/caseless/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,65 @@ +function Caseless (dict) { + this.dict = dict +} +Caseless.prototype.set = function (name, value, clobber) { + if (typeof name === 'object') { + for (var i in name) { + this.set(i, name[i], value) + } + } else { + if (typeof clobber === 'undefined') clobber = true + var has = this.has(name) + + if (!clobber && has) this.dict[has] = this.dict[has] + ',' + value + else this.dict[has || name] = value + return has + } +} +Caseless.prototype.has = function (name) { + var keys = Object.keys(this.dict) + , name = name.toLowerCase() + ; + for (var i=0;i=0.6.0 <0.7.0", + "_npmVersion": "1.4.9", + "_npmUser": { + "name": "mikeal", + "email": "mikeal.rogers@gmail.com" + }, + "maintainers": [ + { + "name": "mikeal", + "email": "mikeal.rogers@gmail.com" + } + ], + "dist": { + "shasum": "8167c1ab8397fb5bb95f96d28e5a81c50f247ac4", + "tarball": "http://registry.npmjs.org/caseless/-/caseless-0.6.0.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/caseless/-/caseless-0.6.0.tgz", + "readme": "ERROR: No README data found!" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/caseless/README.md nodejs-0.11.15/deps/npm/node_modules/request/node_modules/caseless/README.md --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/caseless/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/caseless/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,45 @@ +## Caseless -- wrap an object to set and get property with caseless semantics but also preserve caseing. + +This library is incredibly useful when working with HTTP headers. It allows you to get/set/check for headers in a caseless manor while also preserving the caseing of headers the first time they are set. + +## Usage + +```javascript +var headers = {} + , c = caseless(headers) + ; +c.set('a-Header', 'asdf') +c.get('a-header') === 'asdf' +``` + +## has(key) + +Has takes a name and if it finds a matching header will return that header name with the preserved caseing it was set with. + +```javascript +c.has('a-header') === 'a-Header' +``` + +## set(key, value[, clobber=true]) + +Set is fairly straight forward except that if the header exists and clobber is disabled it will add `','+value` to the existing header. + +```javascript +c.set('a-Header', 'fdas') +c.set('a-HEADER', 'more', false) +c.get('a-header') === 'fdsa,more' +``` + +## swap(key) + +Swaps the casing of a header with the new one that is passed in. + +```javascript +var headers = {} + , c = caseless(headers) + ; +c.set('a-Header', 'fdas') +c.swap('a-HEADER') +c.has('a-header') === 'a-HEADER' +headers === {'a-HEADER': 'fdas'} +``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/caseless/test.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/caseless/test.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/caseless/test.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/caseless/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +var tape = require('tape') + , caseless = require('./') + ; + +tape('set get has', function (t) { + var headers = {} + , c = caseless(headers) + ; + t.plan(14) + c.set('a-Header', 'asdf') + t.equal(c.get('a-header'), 'asdf') + t.equal(c.has('a-header'), 'a-Header') + t.ok(!c.has('nothing')) + // old bug where we used the wrong regex + t.ok(!c.has('a-hea')) + c.set('a-header', 'fdsa') + t.equal(c.get('a-header'), 'fdsa') + t.equal(c.get('a-Header'), 'fdsa') + c.set('a-HEADER', 'more', false) + t.equal(c.get('a-header'), 'fdsa,more') + + t.deepEqual(headers, {'a-Header': 'fdsa,more'}) + c.swap('a-HEADER') + t.deepEqual(headers, {'a-HEADER': 'fdsa,more'}) + + c.set('deleteme', 'foobar') + t.ok(c.has('deleteme')) + t.ok(c.del('deleteme')) + t.notOk(c.has('deleteme')) + t.notOk(c.has('idonotexist')) + t.ok(c.del('idonotexist')) + +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/forever-agent/index.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/forever-agent/index.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/forever-agent/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/forever-agent/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -24,7 +24,7 @@ self.freeSockets[name].push(socket) // if an error happens while we don't use the socket anyway, meh, throw the socket away - function onIdleError() { + var onIdleError = function() { socket.destroy() } socket._onIdleError = onIdleError diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/forever-agent/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/forever-agent/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/forever-agent/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/forever-agent/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "forever-agent", "description": "HTTP Agent that keeps socket connections alive between keep-alive requests. Formerly part of mikeal/request, now a standalone module.", - "version": "0.5.0", + "version": "0.5.2", "repository": { "url": "https://github.com/mikeal/forever-agent" }, @@ -17,12 +17,30 @@ "engines": { "node": "*" }, - "readme": "forever-agent\n=============\n\nHTTP Agent that keeps socket connections alive between keep-alive requests. Formerly part of mikeal/request, now a standalone module.\n", - "readmeFilename": "README.md", "bugs": { "url": "https://github.com/mikeal/forever-agent/issues" }, "homepage": "https://github.com/mikeal/forever-agent", - "_id": "forever-agent@0.5.0", - "_from": "forever-agent@~0.5.0" + "_id": "forever-agent@0.5.2", + "dist": { + "shasum": "6d0e09c4921f94a27f63d3b49c5feff1ea4c5130", + "tarball": "http://registry.npmjs.org/forever-agent/-/forever-agent-0.5.2.tgz" + }, + "_from": "forever-agent@>=0.5.0 <0.6.0", + "_npmVersion": "1.3.21", + "_npmUser": { + "name": "mikeal", + "email": "mikeal.rogers@gmail.com" + }, + "maintainers": [ + { + "name": "mikeal", + "email": "mikeal.rogers@gmail.com" + } + ], + "directories": {}, + "_shasum": "6d0e09c4921f94a27f63d3b49c5feff1ea4c5130", + "_resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.5.2.tgz", + "readme": "ERROR: No README data found!", + "scripts": {} } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/lib/form_data.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/lib/form_data.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/lib/form_data.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/lib/form_data.js 2015-01-20 21:22:17.000000000 +0000 @@ -79,14 +79,38 @@ this._lengthRetrievers.push(function(next) { if (value.hasOwnProperty('fd')) { - fs.stat(value.path, function(err, stat) { - if (err) { - next(err); - return; - } - next(null, stat.size); - }); + // take read range into a account + // `end` = Infinity –> read file till the end + // + // TODO: Looks like there is bug in Node fs.createReadStream + // it doesn't respect `end` options without `start` options + // Fix it when node fixes it. + // https://github.com/joyent/node/issues/7819 + if (value.end != undefined && value.end != Infinity && value.start != undefined) { + + // when end specified + // no need to calculate range + // inclusive, starts with 0 + next(null, value.end+1 - (value.start ? value.start : 0)); + + // not that fast snoopy + } else { + // still need to fetch file size from fs + fs.stat(value.path, function(err, stat) { + + var fileSize; + + if (err) { + next(err); + return; + } + + // update final size based on the range options + fileSize = stat.size - (value.start ? value.start : 0); + next(null, fileSize); + }); + } // or http response } else if (value.hasOwnProperty('httpVersion')) { @@ -255,8 +279,7 @@ var request , options , defaults = { - method : 'post', - headers: this.getHeaders() + method : 'post' }; // parse provided url if it's string @@ -279,6 +302,9 @@ } } + // put that good code in getHeaders to some use + options.headers = this.getHeaders(params.headers); + // https if specified, fallback to http in any other case if (params.protocol == 'https:') { request = https.request(options); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/lib/async.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/lib/async.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/lib/async.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/lib/async.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,3 +1,11 @@ +/*! + * async + * https://github.com/caolan/async + * + * Copyright 2010-2014 Caolan McMahon + * Released under the MIT license + */ +/*jshint onevar: false, indent:4 */ /*global setImmediate: false, setTimeout: false, console: false */ (function () { @@ -27,6 +35,12 @@ //// cross-browser compatiblity functions //// + var _toString = Object.prototype.toString; + + var _isArray = Array.isArray || function (obj) { + return _toString.call(obj) === '[object Array]'; + }; + var _each = function (arr, iterator) { if (arr.forEach) { return arr.forEach(iterator); @@ -91,7 +105,10 @@ else { async.nextTick = process.nextTick; if (typeof setImmediate !== 'undefined') { - async.setImmediate = setImmediate; + async.setImmediate = function (fn) { + // not a direct alias for IE10 compatibility + setImmediate(fn); + }; } else { async.setImmediate = async.nextTick; @@ -105,19 +122,20 @@ } var completed = 0; _each(arr, function (x) { - iterator(x, only_once(function (err) { - if (err) { - callback(err); - callback = function () {}; - } - else { - completed += 1; - if (completed >= arr.length) { - callback(null); - } - } - })); + iterator(x, only_once(done) ); }); + function done(err) { + if (err) { + callback(err); + callback = function () {}; + } + else { + completed += 1; + if (completed >= arr.length) { + callback(); + } + } + } }; async.forEach = async.each; @@ -136,7 +154,7 @@ else { completed += 1; if (completed >= arr.length) { - callback(null); + callback(); } else { iterate(); @@ -216,18 +234,26 @@ var _asyncMap = function (eachfn, arr, iterator, callback) { - var results = []; arr = _map(arr, function (x, i) { return {index: i, value: x}; }); - eachfn(arr, function (x, callback) { - iterator(x.value, function (err, v) { - results[x.index] = v; - callback(err); + if (!callback) { + eachfn(arr, function (x, callback) { + iterator(x.value, function (err) { + callback(err); + }); }); - }, function (err) { - callback(err, results); - }); + } else { + var results = []; + eachfn(arr, function (x, callback) { + iterator(x.value, function (err, v) { + results[x.index] = v; + callback(err); + }); + }, function (err) { + callback(err, results); + }); + } }; async.map = doParallel(_asyncMap); async.mapSeries = doSeries(_asyncMap); @@ -393,8 +419,9 @@ async.auto = function (tasks, callback) { callback = callback || function () {}; var keys = _keys(tasks); - if (!keys.length) { - return callback(null); + var remainingTasks = keys.length + if (!remainingTasks) { + return callback(); } var results = {}; @@ -412,20 +439,24 @@ } }; var taskComplete = function () { + remainingTasks-- _each(listeners.slice(0), function (fn) { fn(); }); }; addListener(function () { - if (_keys(results).length === keys.length) { - callback(null, results); + if (!remainingTasks) { + var theCallback = callback; + // prevent final callback from calling itself if it errors callback = function () {}; + + theCallback(null, results); } }); _each(keys, function (k) { - var task = (tasks[k] instanceof Function) ? [tasks[k]]: tasks[k]; + var task = _isArray(tasks[k]) ? tasks[k]: [tasks[k]]; var taskCallback = function (err) { var args = Array.prototype.slice.call(arguments, 1); if (args.length <= 1) { @@ -467,9 +498,40 @@ }); }; + async.retry = function(times, task, callback) { + var DEFAULT_TIMES = 5; + var attempts = []; + // Use defaults if times not passed + if (typeof times === 'function') { + callback = task; + task = times; + times = DEFAULT_TIMES; + } + // Make sure times is a number + times = parseInt(times, 10) || DEFAULT_TIMES; + var wrappedTask = function(wrappedCallback, wrappedResults) { + var retryAttempt = function(task, finalAttempt) { + return function(seriesCallback) { + task(function(err, result){ + seriesCallback(!err || finalAttempt, {err: err, result: result}); + }, wrappedResults); + }; + }; + while (times) { + attempts.push(retryAttempt(task, !(times-=1))); + } + async.series(attempts, function(done, data){ + data = data[data.length - 1]; + (wrappedCallback || callback)(data.err, data.result); + }); + } + // If a callback is passed, run this as a controll flow + return callback ? wrappedTask() : wrappedTask + }; + async.waterfall = function (tasks, callback) { callback = callback || function () {}; - if (tasks.constructor !== Array) { + if (!_isArray(tasks)) { var err = new Error('First argument to waterfall must be an array of functions'); return callback(err); } @@ -502,7 +564,7 @@ var _parallel = function(eachfn, tasks, callback) { callback = callback || function () {}; - if (tasks.constructor === Array) { + if (_isArray(tasks)) { eachfn.map(tasks, function (fn, callback) { if (fn) { fn(function (err) { @@ -542,7 +604,7 @@ async.series = function (tasks, callback) { callback = callback || function () {}; - if (tasks.constructor === Array) { + if (_isArray(tasks)) { async.mapSeries(tasks, function (fn, callback) { if (fn) { fn(function (err) { @@ -630,7 +692,8 @@ if (err) { return callback(err); } - if (test()) { + var args = Array.prototype.slice.call(arguments, 1); + if (test.apply(null, args)) { async.doWhilst(iterator, test, callback); } else { @@ -658,7 +721,8 @@ if (err) { return callback(err); } - if (!test()) { + var args = Array.prototype.slice.call(arguments, 1); + if (!test.apply(null, args)) { async.doUntil(iterator, test, callback); } else { @@ -672,9 +736,20 @@ concurrency = 1; } function _insert(q, data, pos, callback) { - if(data.constructor !== Array) { + if (!q.started){ + q.started = true; + } + if (!_isArray(data)) { data = [data]; } + if(data.length == 0) { + // call drain immediately if there are no tasks + return async.setImmediate(function() { + if (q.drain) { + q.drain(); + } + }); + } _each(data, function(task) { var item = { data: task, @@ -687,7 +762,7 @@ q.tasks.push(item); } - if (q.saturated && q.tasks.length === concurrency) { + if (q.saturated && q.tasks.length === q.concurrency) { q.saturated(); } async.setImmediate(q.process); @@ -701,14 +776,20 @@ saturated: null, empty: null, drain: null, + started: false, + paused: false, push: function (data, callback) { _insert(q, data, false, callback); }, + kill: function () { + q.drain = null; + q.tasks = []; + }, unshift: function (data, callback) { _insert(q, data, true, callback); }, process: function () { - if (workers < q.concurrency && q.tasks.length) { + if (!q.paused && workers < q.concurrency && q.tasks.length) { var task = q.tasks.shift(); if (q.empty && q.tasks.length === 0) { q.empty(); @@ -733,10 +814,88 @@ }, running: function () { return workers; + }, + idle: function() { + return q.tasks.length + workers === 0; + }, + pause: function () { + if (q.paused === true) { return; } + q.paused = true; + q.process(); + }, + resume: function () { + if (q.paused === false) { return; } + q.paused = false; + q.process(); } }; return q; }; + + async.priorityQueue = function (worker, concurrency) { + + function _compareTasks(a, b){ + return a.priority - b.priority; + }; + + function _binarySearch(sequence, item, compare) { + var beg = -1, + end = sequence.length - 1; + while (beg < end) { + var mid = beg + ((end - beg + 1) >>> 1); + if (compare(item, sequence[mid]) >= 0) { + beg = mid; + } else { + end = mid - 1; + } + } + return beg; + } + + function _insert(q, data, priority, callback) { + if (!q.started){ + q.started = true; + } + if (!_isArray(data)) { + data = [data]; + } + if(data.length == 0) { + // call drain immediately if there are no tasks + return async.setImmediate(function() { + if (q.drain) { + q.drain(); + } + }); + } + _each(data, function(task) { + var item = { + data: task, + priority: priority, + callback: typeof callback === 'function' ? callback : null + }; + + q.tasks.splice(_binarySearch(q.tasks, item, _compareTasks) + 1, 0, item); + + if (q.saturated && q.tasks.length === q.concurrency) { + q.saturated(); + } + async.setImmediate(q.process); + }); + } + + // Start with a normal queue + var q = async.queue(worker, concurrency); + + // Override push to accept second parameter representing priority + q.push = function (data, priority, callback) { + _insert(q, data, priority, callback); + }; + + // Remove unshift function + delete q.unshift; + + return q; + }; async.cargo = function (worker, payload) { var working = false, @@ -748,8 +907,9 @@ saturated: null, empty: null, drain: null, + drained: true, push: function (data, callback) { - if(data.constructor !== Array) { + if (!_isArray(data)) { data = [data]; } _each(data, function(task) { @@ -757,6 +917,7 @@ data: task, callback: typeof callback === 'function' ? callback : null }); + cargo.drained = false; if (cargo.saturated && tasks.length === payload) { cargo.saturated(); } @@ -766,13 +927,14 @@ process: function process() { if (working) return; if (tasks.length === 0) { - if(cargo.drain) cargo.drain(); + if(cargo.drain && !cargo.drained) cargo.drain(); + cargo.drained = true; return; } var ts = typeof payload === 'number' ? tasks.splice(0, payload) - : tasks.splice(0); + : tasks.splice(0, tasks.length); var ds = _map(ts, function (task) { return task.data; @@ -840,7 +1002,9 @@ var callback = args.pop(); var key = hasher.apply(null, args); if (key in memo) { - callback.apply(null, memo[key]); + async.nextTick(function () { + callback.apply(null, memo[key]); + }); } else if (key in queues) { queues[key].push(callback); @@ -884,8 +1048,8 @@ return async.mapSeries(counter, iterator, callback); }; - async.compose = function (/* functions... */) { - var fns = Array.prototype.reverse.call(arguments); + async.seq = function (/* functions... */) { + var fns = arguments; return function () { var that = this; var args = Array.prototype.slice.call(arguments); @@ -903,6 +1067,10 @@ }; }; + async.compose = function (/* functions... */) { + return async.seq.apply(null, Array.prototype.reverse.call(arguments)); + }; + var _applyEach = function (eachfn, fns /*args...*/) { var go = function () { var that = this; @@ -937,16 +1105,16 @@ next(); }; + // Node.js + if (typeof module !== 'undefined' && module.exports) { + module.exports = async; + } // AMD / RequireJS - if (typeof define !== 'undefined' && define.amd) { + else if (typeof define !== 'undefined' && define.amd) { define([], function () { return async; }); } - // Node.js - else if (typeof module !== 'undefined' && module.exports) { - module.exports = async; - } // included directly via \n\n```\n\n## Documentation\n\n### Collections\n\n* [each](#each)\n* [map](#map)\n* [filter](#filter)\n* [reject](#reject)\n* [reduce](#reduce)\n* [detect](#detect)\n* [sortBy](#sortBy)\n* [some](#some)\n* [every](#every)\n* [concat](#concat)\n\n### Control Flow\n\n* [series](#series)\n* [parallel](#parallel)\n* [whilst](#whilst)\n* [doWhilst](#doWhilst)\n* [until](#until)\n* [doUntil](#doUntil)\n* [forever](#forever)\n* [waterfall](#waterfall)\n* [compose](#compose)\n* [applyEach](#applyEach)\n* [queue](#queue)\n* [cargo](#cargo)\n* [auto](#auto)\n* [iterator](#iterator)\n* [apply](#apply)\n* [nextTick](#nextTick)\n* [times](#times)\n* [timesSeries](#timesSeries)\n\n### Utils\n\n* [memoize](#memoize)\n* [unmemoize](#unmemoize)\n* [log](#log)\n* [dir](#dir)\n* [noConflict](#noConflict)\n\n\n## Collections\n\n\n\n### each(arr, iterator, callback)\n\nApplies an iterator function to each item in an array, in parallel.\nThe iterator is called with an item from the list and a callback for when it\nhas finished. If the iterator passes an error to this callback, the main\ncallback for the each function is immediately called with the error.\n\nNote, that since this function applies the iterator to each item in parallel\nthere is no guarantee that the iterator functions will complete in order.\n\n__Arguments__\n\n* arr - An array to iterate over.\n* iterator(item, callback) - A function to apply to each item in the array.\n The iterator is passed a callback(err) which must be called once it has \n completed. If no error has occured, the callback should be run without \n arguments or with an explicit null argument.\n* callback(err) - A callback which is called after all the iterator functions\n have finished, or an error has occurred.\n\n__Example__\n\n```js\n// assuming openFiles is an array of file names and saveFile is a function\n// to save the modified contents of that file:\n\nasync.each(openFiles, saveFile, function(err){\n // if any of the saves produced an error, err would equal that error\n});\n```\n\n---------------------------------------\n\n\n\n### eachSeries(arr, iterator, callback)\n\nThe same as each only the iterator is applied to each item in the array in\nseries. The next iterator is only called once the current one has completed\nprocessing. This means the iterator functions will complete in order.\n\n\n---------------------------------------\n\n\n\n### eachLimit(arr, limit, iterator, callback)\n\nThe same as each only no more than \"limit\" iterators will be simultaneously \nrunning at any time.\n\nNote that the items are not processed in batches, so there is no guarantee that\n the first \"limit\" iterator functions will complete before any others are \nstarted.\n\n__Arguments__\n\n* arr - An array to iterate over.\n* limit - The maximum number of iterators to run at any time.\n* iterator(item, callback) - A function to apply to each item in the array.\n The iterator is passed a callback(err) which must be called once it has \n completed. If no error has occured, the callback should be run without \n arguments or with an explicit null argument.\n* callback(err) - A callback which is called after all the iterator functions\n have finished, or an error has occurred.\n\n__Example__\n\n```js\n// Assume documents is an array of JSON objects and requestApi is a\n// function that interacts with a rate-limited REST api.\n\nasync.eachLimit(documents, 20, requestApi, function(err){\n // if any of the saves produced an error, err would equal that error\n});\n```\n\n---------------------------------------\n\n\n### map(arr, iterator, callback)\n\nProduces a new array of values by mapping each value in the given array through\nthe iterator function. The iterator is called with an item from the array and a\ncallback for when it has finished processing. The callback takes 2 arguments, \nan error and the transformed item from the array. If the iterator passes an\nerror to this callback, the main callback for the map function is immediately\ncalled with the error.\n\nNote, that since this function applies the iterator to each item in parallel\nthere is no guarantee that the iterator functions will complete in order, however\nthe results array will be in the same order as the original array.\n\n__Arguments__\n\n* arr - An array to iterate over.\n* iterator(item, callback) - A function to apply to each item in the array.\n The iterator is passed a callback(err, transformed) which must be called once \n it has completed with an error (which can be null) and a transformed item.\n* callback(err, results) - A callback which is called after all the iterator\n functions have finished, or an error has occurred. Results is an array of the\n transformed items from the original array.\n\n__Example__\n\n```js\nasync.map(['file1','file2','file3'], fs.stat, function(err, results){\n // results is now an array of stats for each file\n});\n```\n\n---------------------------------------\n\n\n### mapSeries(arr, iterator, callback)\n\nThe same as map only the iterator is applied to each item in the array in\nseries. The next iterator is only called once the current one has completed\nprocessing. The results array will be in the same order as the original.\n\n\n---------------------------------------\n\n\n### mapLimit(arr, limit, iterator, callback)\n\nThe same as map only no more than \"limit\" iterators will be simultaneously \nrunning at any time.\n\nNote that the items are not processed in batches, so there is no guarantee that\n the first \"limit\" iterator functions will complete before any others are \nstarted.\n\n__Arguments__\n\n* arr - An array to iterate over.\n* limit - The maximum number of iterators to run at any time.\n* iterator(item, callback) - A function to apply to each item in the array.\n The iterator is passed a callback(err, transformed) which must be called once \n it has completed with an error (which can be null) and a transformed item.\n* callback(err, results) - A callback which is called after all the iterator\n functions have finished, or an error has occurred. Results is an array of the\n transformed items from the original array.\n\n__Example__\n\n```js\nasync.map(['file1','file2','file3'], 1, fs.stat, function(err, results){\n // results is now an array of stats for each file\n});\n```\n\n---------------------------------------\n\n\n### filter(arr, iterator, callback)\n\n__Alias:__ select\n\nReturns a new array of all the values which pass an async truth test.\n_The callback for each iterator call only accepts a single argument of true or\nfalse, it does not accept an error argument first!_ This is in-line with the\nway node libraries work with truth tests like fs.exists. This operation is\nperformed in parallel, but the results array will be in the same order as the\noriginal.\n\n__Arguments__\n\n* arr - An array to iterate over.\n* iterator(item, callback) - A truth test to apply to each item in the array.\n The iterator is passed a callback(truthValue) which must be called with a \n boolean argument once it has completed.\n* callback(results) - A callback which is called after all the iterator\n functions have finished.\n\n__Example__\n\n```js\nasync.filter(['file1','file2','file3'], fs.exists, function(results){\n // results now equals an array of the existing files\n});\n```\n\n---------------------------------------\n\n\n### filterSeries(arr, iterator, callback)\n\n__alias:__ selectSeries\n\nThe same as filter only the iterator is applied to each item in the array in\nseries. The next iterator is only called once the current one has completed\nprocessing. The results array will be in the same order as the original.\n\n---------------------------------------\n\n\n### reject(arr, iterator, callback)\n\nThe opposite of filter. Removes values that pass an async truth test.\n\n---------------------------------------\n\n\n### rejectSeries(arr, iterator, callback)\n\nThe same as reject, only the iterator is applied to each item in the array\nin series.\n\n\n---------------------------------------\n\n\n### reduce(arr, memo, iterator, callback)\n\n__aliases:__ inject, foldl\n\nReduces a list of values into a single value using an async iterator to return\neach successive step. Memo is the initial state of the reduction. This\nfunction only operates in series. For performance reasons, it may make sense to\nsplit a call to this function into a parallel map, then use the normal\nArray.prototype.reduce on the results. This function is for situations where\neach step in the reduction needs to be async, if you can get the data before\nreducing it then it's probably a good idea to do so.\n\n__Arguments__\n\n* arr - An array to iterate over.\n* memo - The initial state of the reduction.\n* iterator(memo, item, callback) - A function applied to each item in the\n array to produce the next step in the reduction. The iterator is passed a\n callback(err, reduction) which accepts an optional error as its first \n argument, and the state of the reduction as the second. If an error is \n passed to the callback, the reduction is stopped and the main callback is \n immediately called with the error.\n* callback(err, result) - A callback which is called after all the iterator\n functions have finished. Result is the reduced value.\n\n__Example__\n\n```js\nasync.reduce([1,2,3], 0, function(memo, item, callback){\n // pointless async:\n process.nextTick(function(){\n callback(null, memo + item)\n });\n}, function(err, result){\n // result is now equal to the last value of memo, which is 6\n});\n```\n\n---------------------------------------\n\n\n### reduceRight(arr, memo, iterator, callback)\n\n__Alias:__ foldr\n\nSame as reduce, only operates on the items in the array in reverse order.\n\n\n---------------------------------------\n\n\n### detect(arr, iterator, callback)\n\nReturns the first value in a list that passes an async truth test. The\niterator is applied in parallel, meaning the first iterator to return true will\nfire the detect callback with that result. That means the result might not be\nthe first item in the original array (in terms of order) that passes the test.\n\nIf order within the original array is important then look at detectSeries.\n\n__Arguments__\n\n* arr - An array to iterate over.\n* iterator(item, callback) - A truth test to apply to each item in the array.\n The iterator is passed a callback(truthValue) which must be called with a \n boolean argument once it has completed.\n* callback(result) - A callback which is called as soon as any iterator returns\n true, or after all the iterator functions have finished. Result will be\n the first item in the array that passes the truth test (iterator) or the\n value undefined if none passed.\n\n__Example__\n\n```js\nasync.detect(['file1','file2','file3'], fs.exists, function(result){\n // result now equals the first file in the list that exists\n});\n```\n\n---------------------------------------\n\n\n### detectSeries(arr, iterator, callback)\n\nThe same as detect, only the iterator is applied to each item in the array\nin series. This means the result is always the first in the original array (in\nterms of array order) that passes the truth test.\n\n\n---------------------------------------\n\n\n### sortBy(arr, iterator, callback)\n\nSorts a list by the results of running each value through an async iterator.\n\n__Arguments__\n\n* arr - An array to iterate over.\n* iterator(item, callback) - A function to apply to each item in the array.\n The iterator is passed a callback(err, sortValue) which must be called once it\n has completed with an error (which can be null) and a value to use as the sort\n criteria.\n* callback(err, results) - A callback which is called after all the iterator\n functions have finished, or an error has occurred. Results is the items from\n the original array sorted by the values returned by the iterator calls.\n\n__Example__\n\n```js\nasync.sortBy(['file1','file2','file3'], function(file, callback){\n fs.stat(file, function(err, stats){\n callback(err, stats.mtime);\n });\n}, function(err, results){\n // results is now the original array of files sorted by\n // modified date\n});\n```\n\n---------------------------------------\n\n\n### some(arr, iterator, callback)\n\n__Alias:__ any\n\nReturns true if at least one element in the array satisfies an async test.\n_The callback for each iterator call only accepts a single argument of true or\nfalse, it does not accept an error argument first!_ This is in-line with the\nway node libraries work with truth tests like fs.exists. Once any iterator\ncall returns true, the main callback is immediately called.\n\n__Arguments__\n\n* arr - An array to iterate over.\n* iterator(item, callback) - A truth test to apply to each item in the array.\n The iterator is passed a callback(truthValue) which must be called with a \n boolean argument once it has completed.\n* callback(result) - A callback which is called as soon as any iterator returns\n true, or after all the iterator functions have finished. Result will be\n either true or false depending on the values of the async tests.\n\n__Example__\n\n```js\nasync.some(['file1','file2','file3'], fs.exists, function(result){\n // if result is true then at least one of the files exists\n});\n```\n\n---------------------------------------\n\n\n### every(arr, iterator, callback)\n\n__Alias:__ all\n\nReturns true if every element in the array satisfies an async test.\n_The callback for each iterator call only accepts a single argument of true or\nfalse, it does not accept an error argument first!_ This is in-line with the\nway node libraries work with truth tests like fs.exists.\n\n__Arguments__\n\n* arr - An array to iterate over.\n* iterator(item, callback) - A truth test to apply to each item in the array.\n The iterator is passed a callback(truthValue) which must be called with a \n boolean argument once it has completed.\n* callback(result) - A callback which is called after all the iterator\n functions have finished. Result will be either true or false depending on\n the values of the async tests.\n\n__Example__\n\n```js\nasync.every(['file1','file2','file3'], fs.exists, function(result){\n // if result is true then every file exists\n});\n```\n\n---------------------------------------\n\n\n### concat(arr, iterator, callback)\n\nApplies an iterator to each item in a list, concatenating the results. Returns the\nconcatenated list. The iterators are called in parallel, and the results are\nconcatenated as they return. There is no guarantee that the results array will\nbe returned in the original order of the arguments passed to the iterator function.\n\n__Arguments__\n\n* arr - An array to iterate over\n* iterator(item, callback) - A function to apply to each item in the array.\n The iterator is passed a callback(err, results) which must be called once it \n has completed with an error (which can be null) and an array of results.\n* callback(err, results) - A callback which is called after all the iterator\n functions have finished, or an error has occurred. Results is an array containing\n the concatenated results of the iterator function.\n\n__Example__\n\n```js\nasync.concat(['dir1','dir2','dir3'], fs.readdir, function(err, files){\n // files is now a list of filenames that exist in the 3 directories\n});\n```\n\n---------------------------------------\n\n\n### concatSeries(arr, iterator, callback)\n\nSame as async.concat, but executes in series instead of parallel.\n\n\n## Control Flow\n\n\n### series(tasks, [callback])\n\nRun an array of functions in series, each one running once the previous\nfunction has completed. If any functions in the series pass an error to its\ncallback, no more functions are run and the callback for the series is\nimmediately called with the value of the error. Once the tasks have completed,\nthe results are passed to the final callback as an array.\n\nIt is also possible to use an object instead of an array. Each property will be\nrun as a function and the results will be passed to the final callback as an object\ninstead of an array. This can be a more readable way of handling results from\nasync.series.\n\n\n__Arguments__\n\n* tasks - An array or object containing functions to run, each function is passed\n a callback(err, result) it must call on completion with an error (which can\n be null) and an optional result value.\n* callback(err, results) - An optional callback to run once all the functions\n have completed. This function gets a results array (or object) containing all \n the result arguments passed to the task callbacks.\n\n__Example__\n\n```js\nasync.series([\n function(callback){\n // do some stuff ...\n callback(null, 'one');\n },\n function(callback){\n // do some more stuff ...\n callback(null, 'two');\n }\n],\n// optional callback\nfunction(err, results){\n // results is now equal to ['one', 'two']\n});\n\n\n// an example using an object instead of an array\nasync.series({\n one: function(callback){\n setTimeout(function(){\n callback(null, 1);\n }, 200);\n },\n two: function(callback){\n setTimeout(function(){\n callback(null, 2);\n }, 100);\n }\n},\nfunction(err, results) {\n // results is now equal to: {one: 1, two: 2}\n});\n```\n\n---------------------------------------\n\n\n### parallel(tasks, [callback])\n\nRun an array of functions in parallel, without waiting until the previous\nfunction has completed. If any of the functions pass an error to its\ncallback, the main callback is immediately called with the value of the error.\nOnce the tasks have completed, the results are passed to the final callback as an\narray.\n\nIt is also possible to use an object instead of an array. Each property will be\nrun as a function and the results will be passed to the final callback as an object\ninstead of an array. This can be a more readable way of handling results from\nasync.parallel.\n\n\n__Arguments__\n\n* tasks - An array or object containing functions to run, each function is passed \n a callback(err, result) it must call on completion with an error (which can\n be null) and an optional result value.\n* callback(err, results) - An optional callback to run once all the functions\n have completed. This function gets a results array (or object) containing all \n the result arguments passed to the task callbacks.\n\n__Example__\n\n```js\nasync.parallel([\n function(callback){\n setTimeout(function(){\n callback(null, 'one');\n }, 200);\n },\n function(callback){\n setTimeout(function(){\n callback(null, 'two');\n }, 100);\n }\n],\n// optional callback\nfunction(err, results){\n // the results array will equal ['one','two'] even though\n // the second function had a shorter timeout.\n});\n\n\n// an example using an object instead of an array\nasync.parallel({\n one: function(callback){\n setTimeout(function(){\n callback(null, 1);\n }, 200);\n },\n two: function(callback){\n setTimeout(function(){\n callback(null, 2);\n }, 100);\n }\n},\nfunction(err, results) {\n // results is now equals to: {one: 1, two: 2}\n});\n```\n\n---------------------------------------\n\n\n### parallelLimit(tasks, limit, [callback])\n\nThe same as parallel only the tasks are executed in parallel with a maximum of \"limit\" \ntasks executing at any time.\n\nNote that the tasks are not executed in batches, so there is no guarantee that \nthe first \"limit\" tasks will complete before any others are started.\n\n__Arguments__\n\n* tasks - An array or object containing functions to run, each function is passed \n a callback(err, result) it must call on completion with an error (which can\n be null) and an optional result value.\n* limit - The maximum number of tasks to run at any time.\n* callback(err, results) - An optional callback to run once all the functions\n have completed. This function gets a results array (or object) containing all \n the result arguments passed to the task callbacks.\n\n---------------------------------------\n\n\n### whilst(test, fn, callback)\n\nRepeatedly call fn, while test returns true. Calls the callback when stopped,\nor an error occurs.\n\n__Arguments__\n\n* test() - synchronous truth test to perform before each execution of fn.\n* fn(callback) - A function to call each time the test passes. The function is\n passed a callback(err) which must be called once it has completed with an \n optional error argument.\n* callback(err) - A callback which is called after the test fails and repeated\n execution of fn has stopped.\n\n__Example__\n\n```js\nvar count = 0;\n\nasync.whilst(\n function () { return count < 5; },\n function (callback) {\n count++;\n setTimeout(callback, 1000);\n },\n function (err) {\n // 5 seconds have passed\n }\n);\n```\n\n---------------------------------------\n\n\n### doWhilst(fn, test, callback)\n\nThe post check version of whilst. To reflect the difference in the order of operations `test` and `fn` arguments are switched. `doWhilst` is to `whilst` as `do while` is to `while` in plain JavaScript.\n\n---------------------------------------\n\n\n### until(test, fn, callback)\n\nRepeatedly call fn, until test returns true. Calls the callback when stopped,\nor an error occurs.\n\nThe inverse of async.whilst.\n\n---------------------------------------\n\n\n### doUntil(fn, test, callback)\n\nLike doWhilst except the test is inverted. Note the argument ordering differs from `until`.\n\n---------------------------------------\n\n\n### forever(fn, callback)\n\nCalls the asynchronous function 'fn' repeatedly, in series, indefinitely.\nIf an error is passed to fn's callback then 'callback' is called with the\nerror, otherwise it will never be called.\n\n---------------------------------------\n\n\n### waterfall(tasks, [callback])\n\nRuns an array of functions in series, each passing their results to the next in\nthe array. However, if any of the functions pass an error to the callback, the\nnext function is not executed and the main callback is immediately called with\nthe error.\n\n__Arguments__\n\n* tasks - An array of functions to run, each function is passed a \n callback(err, result1, result2, ...) it must call on completion. The first\n argument is an error (which can be null) and any further arguments will be \n passed as arguments in order to the next task.\n* callback(err, [results]) - An optional callback to run once all the functions\n have completed. This will be passed the results of the last task's callback.\n\n\n\n__Example__\n\n```js\nasync.waterfall([\n function(callback){\n callback(null, 'one', 'two');\n },\n function(arg1, arg2, callback){\n callback(null, 'three');\n },\n function(arg1, callback){\n // arg1 now equals 'three'\n callback(null, 'done');\n }\n], function (err, result) {\n // result now equals 'done' \n});\n```\n\n---------------------------------------\n\n### compose(fn1, fn2...)\n\nCreates a function which is a composition of the passed asynchronous\nfunctions. Each function consumes the return value of the function that\nfollows. Composing functions f(), g() and h() would produce the result of\nf(g(h())), only this version uses callbacks to obtain the return values.\n\nEach function is executed with the `this` binding of the composed function.\n\n__Arguments__\n\n* functions... - the asynchronous functions to compose\n\n\n__Example__\n\n```js\nfunction add1(n, callback) {\n setTimeout(function () {\n callback(null, n + 1);\n }, 10);\n}\n\nfunction mul3(n, callback) {\n setTimeout(function () {\n callback(null, n * 3);\n }, 10);\n}\n\nvar add1mul3 = async.compose(mul3, add1);\n\nadd1mul3(4, function (err, result) {\n // result now equals 15\n});\n```\n\n---------------------------------------\n\n### applyEach(fns, args..., callback)\n\nApplies the provided arguments to each function in the array, calling the\ncallback after all functions have completed. If you only provide the first\nargument then it will return a function which lets you pass in the\narguments as if it were a single function call.\n\n__Arguments__\n\n* fns - the asynchronous functions to all call with the same arguments\n* args... - any number of separate arguments to pass to the function\n* callback - the final argument should be the callback, called when all\n functions have completed processing\n\n\n__Example__\n\n```js\nasync.applyEach([enableSearch, updateSchema], 'bucket', callback);\n\n// partial application example:\nasync.each(\n buckets,\n async.applyEach([enableSearch, updateSchema]),\n callback\n);\n```\n\n---------------------------------------\n\n\n### applyEachSeries(arr, iterator, callback)\n\nThe same as applyEach only the functions are applied in series.\n\n---------------------------------------\n\n\n### queue(worker, concurrency)\n\nCreates a queue object with the specified concurrency. Tasks added to the\nqueue will be processed in parallel (up to the concurrency limit). If all\nworkers are in progress, the task is queued until one is available. Once\na worker has completed a task, the task's callback is called.\n\n__Arguments__\n\n* worker(task, callback) - An asynchronous function for processing a queued\n task, which must call its callback(err) argument when finished, with an \n optional error as an argument.\n* concurrency - An integer for determining how many worker functions should be\n run in parallel.\n\n__Queue objects__\n\nThe queue object returned by this function has the following properties and\nmethods:\n\n* length() - a function returning the number of items waiting to be processed.\n* concurrency - an integer for determining how many worker functions should be\n run in parallel. This property can be changed after a queue is created to\n alter the concurrency on-the-fly.\n* push(task, [callback]) - add a new task to the queue, the callback is called\n once the worker has finished processing the task.\n instead of a single task, an array of tasks can be submitted. the respective callback is used for every task in the list.\n* unshift(task, [callback]) - add a new task to the front of the queue.\n* saturated - a callback that is called when the queue length hits the concurrency and further tasks will be queued\n* empty - a callback that is called when the last item from the queue is given to a worker\n* drain - a callback that is called when the last item from the queue has returned from the worker\n\n__Example__\n\n```js\n// create a queue object with concurrency 2\n\nvar q = async.queue(function (task, callback) {\n console.log('hello ' + task.name);\n callback();\n}, 2);\n\n\n// assign a callback\nq.drain = function() {\n console.log('all items have been processed');\n}\n\n// add some items to the queue\n\nq.push({name: 'foo'}, function (err) {\n console.log('finished processing foo');\n});\nq.push({name: 'bar'}, function (err) {\n console.log('finished processing bar');\n});\n\n// add some items to the queue (batch-wise)\n\nq.push([{name: 'baz'},{name: 'bay'},{name: 'bax'}], function (err) {\n console.log('finished processing bar');\n});\n\n// add some items to the front of the queue\n\nq.unshift({name: 'bar'}, function (err) {\n console.log('finished processing bar');\n});\n```\n\n---------------------------------------\n\n\n### cargo(worker, [payload])\n\nCreates a cargo object with the specified payload. Tasks added to the\ncargo will be processed altogether (up to the payload limit). If the\nworker is in progress, the task is queued until it is available. Once\nthe worker has completed some tasks, each callback of those tasks is called.\n\n__Arguments__\n\n* worker(tasks, callback) - An asynchronous function for processing an array of\n queued tasks, which must call its callback(err) argument when finished, with \n an optional error as an argument.\n* payload - An optional integer for determining how many tasks should be\n processed per round; if omitted, the default is unlimited.\n\n__Cargo objects__\n\nThe cargo object returned by this function has the following properties and\nmethods:\n\n* length() - a function returning the number of items waiting to be processed.\n* payload - an integer for determining how many tasks should be\n process per round. This property can be changed after a cargo is created to\n alter the payload on-the-fly.\n* push(task, [callback]) - add a new task to the queue, the callback is called\n once the worker has finished processing the task.\n instead of a single task, an array of tasks can be submitted. the respective callback is used for every task in the list.\n* saturated - a callback that is called when the queue length hits the concurrency and further tasks will be queued\n* empty - a callback that is called when the last item from the queue is given to a worker\n* drain - a callback that is called when the last item from the queue has returned from the worker\n\n__Example__\n\n```js\n// create a cargo object with payload 2\n\nvar cargo = async.cargo(function (tasks, callback) {\n for(var i=0; i\n### auto(tasks, [callback])\n\nDetermines the best order for running functions based on their requirements.\nEach function can optionally depend on other functions being completed first,\nand each function is run as soon as its requirements are satisfied. If any of\nthe functions pass an error to their callback, that function will not complete\n(so any other functions depending on it will not run) and the main callback\nwill be called immediately with the error. Functions also receive an object\ncontaining the results of functions which have completed so far.\n\nNote, all functions are called with a results object as a second argument, \nso it is unsafe to pass functions in the tasks object which cannot handle the\nextra argument. For example, this snippet of code:\n\n```js\nasync.auto({\n readData: async.apply(fs.readFile, 'data.txt', 'utf-8');\n}, callback);\n```\n\nwill have the effect of calling readFile with the results object as the last\nargument, which will fail:\n\n```js\nfs.readFile('data.txt', 'utf-8', cb, {});\n```\n\nInstead, wrap the call to readFile in a function which does not forward the \nresults object:\n\n```js\nasync.auto({\n readData: function(cb, results){\n fs.readFile('data.txt', 'utf-8', cb);\n }\n}, callback);\n```\n\n__Arguments__\n\n* tasks - An object literal containing named functions or an array of\n requirements, with the function itself the last item in the array. The key\n used for each function or array is used when specifying requirements. The \n function receives two arguments: (1) a callback(err, result) which must be \n called when finished, passing an error (which can be null) and the result of \n the function's execution, and (2) a results object, containing the results of\n the previously executed functions.\n* callback(err, results) - An optional callback which is called when all the\n tasks have been completed. The callback will receive an error as an argument\n if any tasks pass an error to their callback. Results will always be passed\n\tbut if an error occurred, no other tasks will be performed, and the results\n\tobject will only contain partial results.\n \n\n__Example__\n\n```js\nasync.auto({\n get_data: function(callback){\n // async code to get some data\n },\n make_folder: function(callback){\n // async code to create a directory to store a file in\n // this is run at the same time as getting the data\n },\n write_file: ['get_data', 'make_folder', function(callback){\n // once there is some data and the directory exists,\n // write the data to a file in the directory\n callback(null, filename);\n }],\n email_link: ['write_file', function(callback, results){\n // once the file is written let's email a link to it...\n // results.write_file contains the filename returned by write_file.\n }]\n});\n```\n\nThis is a fairly trivial example, but to do this using the basic parallel and\nseries functions would look like this:\n\n```js\nasync.parallel([\n function(callback){\n // async code to get some data\n },\n function(callback){\n // async code to create a directory to store a file in\n // this is run at the same time as getting the data\n }\n],\nfunction(err, results){\n async.series([\n function(callback){\n // once there is some data and the directory exists,\n // write the data to a file in the directory\n },\n function(callback){\n // once the file is written let's email a link to it...\n }\n ]);\n});\n```\n\nFor a complicated series of async tasks using the auto function makes adding\nnew tasks much easier and makes the code more readable.\n\n\n---------------------------------------\n\n\n### iterator(tasks)\n\nCreates an iterator function which calls the next function in the array,\nreturning a continuation to call the next one after that. It's also possible to\n'peek' the next iterator by doing iterator.next().\n\nThis function is used internally by the async module but can be useful when\nyou want to manually control the flow of functions in series.\n\n__Arguments__\n\n* tasks - An array of functions to run.\n\n__Example__\n\n```js\nvar iterator = async.iterator([\n function(){ sys.p('one'); },\n function(){ sys.p('two'); },\n function(){ sys.p('three'); }\n]);\n\nnode> var iterator2 = iterator();\n'one'\nnode> var iterator3 = iterator2();\n'two'\nnode> iterator3();\n'three'\nnode> var nextfn = iterator2.next();\nnode> nextfn();\n'three'\n```\n\n---------------------------------------\n\n\n### apply(function, arguments..)\n\nCreates a continuation function with some arguments already applied, a useful\nshorthand when combined with other control flow functions. Any arguments\npassed to the returned function are added to the arguments originally passed\nto apply.\n\n__Arguments__\n\n* function - The function you want to eventually apply all arguments to.\n* arguments... - Any number of arguments to automatically apply when the\n continuation is called.\n\n__Example__\n\n```js\n// using apply\n\nasync.parallel([\n async.apply(fs.writeFile, 'testfile1', 'test1'),\n async.apply(fs.writeFile, 'testfile2', 'test2'),\n]);\n\n\n// the same process without using apply\n\nasync.parallel([\n function(callback){\n fs.writeFile('testfile1', 'test1', callback);\n },\n function(callback){\n fs.writeFile('testfile2', 'test2', callback);\n }\n]);\n```\n\nIt's possible to pass any number of additional arguments when calling the\ncontinuation:\n\n```js\nnode> var fn = async.apply(sys.puts, 'one');\nnode> fn('two', 'three');\none\ntwo\nthree\n```\n\n---------------------------------------\n\n\n### nextTick(callback)\n\nCalls the callback on a later loop around the event loop. In node.js this just\ncalls process.nextTick, in the browser it falls back to setImmediate(callback)\nif available, otherwise setTimeout(callback, 0), which means other higher priority\nevents may precede the execution of the callback.\n\nThis is used internally for browser-compatibility purposes.\n\n__Arguments__\n\n* callback - The function to call on a later loop around the event loop.\n\n__Example__\n\n```js\nvar call_order = [];\nasync.nextTick(function(){\n call_order.push('two');\n // call_order now equals ['one','two']\n});\ncall_order.push('one')\n```\n\n\n### times(n, callback)\n\nCalls the callback n times and accumulates results in the same manner\nyou would use with async.map.\n\n__Arguments__\n\n* n - The number of times to run the function.\n* callback - The function to call n times.\n\n__Example__\n\n```js\n// Pretend this is some complicated async factory\nvar createUser = function(id, callback) {\n callback(null, {\n id: 'user' + id\n })\n}\n// generate 5 users\nasync.times(5, function(n, next){\n createUser(n, function(err, user) {\n next(err, user)\n })\n}, function(err, users) {\n // we should now have 5 users\n});\n```\n\n\n### timesSeries(n, callback)\n\nThe same as times only the iterator is applied to each item in the array in\nseries. The next iterator is only called once the current one has completed\nprocessing. The results array will be in the same order as the original.\n\n\n## Utils\n\n\n### memoize(fn, [hasher])\n\nCaches the results of an async function. When creating a hash to store function\nresults against, the callback is omitted from the hash and an optional hash\nfunction can be used.\n\nThe cache of results is exposed as the `memo` property of the function returned\nby `memoize`.\n\n__Arguments__\n\n* fn - the function you to proxy and cache results from.\n* hasher - an optional function for generating a custom hash for storing\n results, it has all the arguments applied to it apart from the callback, and\n must be synchronous.\n\n__Example__\n\n```js\nvar slow_fn = function (name, callback) {\n // do something\n callback(null, result);\n};\nvar fn = async.memoize(slow_fn);\n\n// fn can now be used as if it were slow_fn\nfn('some name', function () {\n // callback\n});\n```\n\n\n### unmemoize(fn)\n\nUndoes a memoized function, reverting it to the original, unmemoized\nform. Comes handy in tests.\n\n__Arguments__\n\n* fn - the memoized function\n\n\n### log(function, arguments)\n\nLogs the result of an async function to the console. Only works in node.js or\nin browsers that support console.log and console.error (such as FF and Chrome).\nIf multiple arguments are returned from the async function, console.log is\ncalled on each argument in order.\n\n__Arguments__\n\n* function - The function you want to eventually apply all arguments to.\n* arguments... - Any number of arguments to apply to the function.\n\n__Example__\n\n```js\nvar hello = function(name, callback){\n setTimeout(function(){\n callback(null, 'hello ' + name);\n }, 1000);\n};\n```\n```js\nnode> async.log(hello, 'world');\n'hello world'\n```\n\n---------------------------------------\n\n\n### dir(function, arguments)\n\nLogs the result of an async function to the console using console.dir to\ndisplay the properties of the resulting object. Only works in node.js or\nin browsers that support console.dir and console.error (such as FF and Chrome).\nIf multiple arguments are returned from the async function, console.dir is\ncalled on each argument in order.\n\n__Arguments__\n\n* function - The function you want to eventually apply all arguments to.\n* arguments... - Any number of arguments to apply to the function.\n\n__Example__\n\n```js\nvar hello = function(name, callback){\n setTimeout(function(){\n callback(null, {hello: name});\n }, 1000);\n};\n```\n```js\nnode> async.dir(hello, 'world');\n{hello: 'world'}\n```\n\n---------------------------------------\n\n\n### noConflict()\n\nChanges the value of async back to its original value, returning a reference to the\nasync object.\n", - "readmeFilename": "README.md", "homepage": "https://github.com/caolan/async", - "_id": "async@0.2.9", - "_from": "async@~0.2.9" + "_id": "async@0.9.0", + "dist": { + "shasum": "ac3613b1da9bed1b47510bb4651b8931e47146c7", + "tarball": "http://registry.npmjs.org/async/-/async-0.9.0.tgz" + }, + "_from": "async@>=0.9.0 <0.10.0", + "_npmVersion": "1.4.3", + "_npmUser": { + "name": "caolan", + "email": "caolan.mcmahon@gmail.com" + }, + "maintainers": [ + { + "name": "caolan", + "email": "caolan@caolanmcmahon.com" + } + ], + "directories": {}, + "_shasum": "ac3613b1da9bed1b47510bb4651b8931e47146c7", + "_resolved": "https://registry.npmjs.org/async/-/async-0.9.0.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/README.md nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/README.md --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,15 +1,18 @@ # Async.js +[![Build Status via Travis CI](https://travis-ci.org/caolan/async.svg?branch=master)](https://travis-ci.org/caolan/async) + + Async is a utility module which provides straight-forward, powerful functions for working with asynchronous JavaScript. Although originally designed for -use with [node.js](http://nodejs.org), it can also be used directly in the +use with [Node.js](http://nodejs.org), it can also be used directly in the browser. Also supports [component](https://github.com/component/component). Async provides around 20 functions that include the usual 'functional' -suspects (map, reduce, filter, each…) as well as some common patterns -for asynchronous control flow (parallel, series, waterfall…). All these -functions assume you follow the node.js convention of providing a single -callback as the last argument of your async function. +suspects (`map`, `reduce`, `filter`, `each`…) as well as some common patterns +for asynchronous control flow (`parallel`, `series`, `waterfall`…). All these +functions assume you follow the Node.js convention of providing a single +callback as the last argument of your `async` function. ## Quick Examples @@ -42,8 +45,8 @@ ### Binding a context to an iterator -This section is really about bind, not about async. If you are wondering how to -make async execute your iterators in a given context, or are confused as to why +This section is really about `bind`, not about `async`. If you are wondering how to +make `async` execute your iterators in a given context, or are confused as to why a method of another library isn't working as an iterator, study this example: ```js @@ -78,7 +81,7 @@ The source is available for download from [GitHub](http://github.com/caolan/async). -Alternatively, you can install using Node Package Manager (npm): +Alternatively, you can install using Node Package Manager (`npm`): npm install async @@ -86,7 +89,9 @@ ## In the Browser -So far it's been tested in IE6, IE7, IE8, FF3.6 and Chrome 5. Usage: +So far it's been tested in IE6, IE7, IE8, FF3.6 and Chrome 5. + +Usage: ```html @@ -103,45 +108,59 @@ ### Collections -* [each](#each) -* [map](#map) -* [filter](#filter) -* [reject](#reject) -* [reduce](#reduce) -* [detect](#detect) -* [sortBy](#sortBy) -* [some](#some) -* [every](#every) -* [concat](#concat) +* [`each`](#each) +* [`eachSeries`](#eachSeries) +* [`eachLimit`](#eachLimit) +* [`map`](#map) +* [`mapSeries`](#mapSeries) +* [`mapLimit`](#mapLimit) +* [`filter`](#filter) +* [`filterSeries`](#filterSeries) +* [`reject`](#reject) +* [`rejectSeries`](#rejectSeries) +* [`reduce`](#reduce) +* [`reduceRight`](#reduceRight) +* [`detect`](#detect) +* [`detectSeries`](#detectSeries) +* [`sortBy`](#sortBy) +* [`some`](#some) +* [`every`](#every) +* [`concat`](#concat) +* [`concatSeries`](#concatSeries) ### Control Flow -* [series](#series) -* [parallel](#parallel) -* [whilst](#whilst) -* [doWhilst](#doWhilst) -* [until](#until) -* [doUntil](#doUntil) -* [forever](#forever) -* [waterfall](#waterfall) -* [compose](#compose) -* [applyEach](#applyEach) -* [queue](#queue) -* [cargo](#cargo) -* [auto](#auto) -* [iterator](#iterator) -* [apply](#apply) -* [nextTick](#nextTick) -* [times](#times) -* [timesSeries](#timesSeries) +* [`series`](#seriestasks-callback) +* [`parallel`](#parallel) +* [`parallelLimit`](#parallellimittasks-limit-callback) +* [`whilst`](#whilst) +* [`doWhilst`](#doWhilst) +* [`until`](#until) +* [`doUntil`](#doUntil) +* [`forever`](#forever) +* [`waterfall`](#waterfall) +* [`compose`](#compose) +* [`seq`](#seq) +* [`applyEach`](#applyEach) +* [`applyEachSeries`](#applyEachSeries) +* [`queue`](#queue) +* [`priorityQueue`](#priorityQueue) +* [`cargo`](#cargo) +* [`auto`](#auto) +* [`retry`](#retry) +* [`iterator`](#iterator) +* [`apply`](#apply) +* [`nextTick`](#nextTick) +* [`times`](#times) +* [`timesSeries`](#timesSeries) ### Utils -* [memoize](#memoize) -* [unmemoize](#unmemoize) -* [log](#log) -* [dir](#dir) -* [noConflict](#noConflict) +* [`memoize`](#memoize) +* [`unmemoize`](#unmemoize) +* [`log`](#log) +* [`dir`](#dir) +* [`noConflict`](#noConflict) ## Collections @@ -150,25 +169,26 @@ ### each(arr, iterator, callback) -Applies an iterator function to each item in an array, in parallel. -The iterator is called with an item from the list and a callback for when it -has finished. If the iterator passes an error to this callback, the main -callback for the each function is immediately called with the error. +Applies the function `iterator` to each item in `arr`, in parallel. +The `iterator` is called with an item from the list, and a callback for when it +has finished. If the `iterator` passes an error to its `callback`, the main +`callback` (for the `each` function) is immediately called with the error. -Note, that since this function applies the iterator to each item in parallel +Note, that since this function applies `iterator` to each item in parallel, there is no guarantee that the iterator functions will complete in order. __Arguments__ -* arr - An array to iterate over. -* iterator(item, callback) - A function to apply to each item in the array. - The iterator is passed a callback(err) which must be called once it has - completed. If no error has occured, the callback should be run without - arguments or with an explicit null argument. -* callback(err) - A callback which is called after all the iterator functions - have finished, or an error has occurred. +* `arr` - An array to iterate over. +* `iterator(item, callback)` - A function to apply to each item in `arr`. + The iterator is passed a `callback(err)` which must be called once it has + completed. If no error has occured, the `callback` should be run without + arguments or with an explicit `null` argument. +* `callback(err)` - A callback which is called when all `iterator` functions + have finished, or an error occurs. + +__Examples__ -__Example__ ```js // assuming openFiles is an array of file names and saveFile is a function @@ -179,15 +199,43 @@ }); ``` +```js +// assuming openFiles is an array of file names + +async.each(openFiles, function( file, callback) { + + // Perform operation on file here. + console.log('Processing file ' + file); + + if( file.length > 32 ) { + console.log('This file name is too long'); + callback('File name too long'); + } else { + // Do work to process file here + console.log('File processed'); + callback(); + } +}, function(err){ + // if any of the file processing produced an error, err would equal that error + if( err ) { + // One of the iterations produced an error. + // All processing will now stop. + console.log('A file failed to process'); + } else { + console.log('All files have been processed successfully'); + } +}); +``` + --------------------------------------- ### eachSeries(arr, iterator, callback) -The same as each only the iterator is applied to each item in the array in -series. The next iterator is only called once the current one has completed -processing. This means the iterator functions will complete in order. +The same as [`each`](#each), only `iterator` is applied to each item in `arr` in +series. The next `iterator` is only called once the current one has completed. +This means the `iterator` functions will complete in order. --------------------------------------- @@ -196,23 +244,22 @@ ### eachLimit(arr, limit, iterator, callback) -The same as each only no more than "limit" iterators will be simultaneously +The same as [`each`](#each), only no more than `limit` `iterator`s will be simultaneously running at any time. -Note that the items are not processed in batches, so there is no guarantee that - the first "limit" iterator functions will complete before any others are -started. +Note that the items in `arr` are not processed in batches, so there is no guarantee that +the first `limit` `iterator` functions will complete before any others are started. __Arguments__ -* arr - An array to iterate over. -* limit - The maximum number of iterators to run at any time. -* iterator(item, callback) - A function to apply to each item in the array. - The iterator is passed a callback(err) which must be called once it has +* `arr` - An array to iterate over. +* `limit` - The maximum number of `iterator`s to run at any time. +* `iterator(item, callback)` - A function to apply to each item in `arr`. + The iterator is passed a `callback(err)` which must be called once it has completed. If no error has occured, the callback should be run without - arguments or with an explicit null argument. -* callback(err) - A callback which is called after all the iterator functions - have finished, or an error has occurred. + arguments or with an explicit `null` argument. +* `callback(err)` - A callback which is called when all `iterator` functions + have finished, or an error occurs. __Example__ @@ -230,26 +277,25 @@ ### map(arr, iterator, callback) -Produces a new array of values by mapping each value in the given array through -the iterator function. The iterator is called with an item from the array and a -callback for when it has finished processing. The callback takes 2 arguments, -an error and the transformed item from the array. If the iterator passes an -error to this callback, the main callback for the map function is immediately -called with the error. - -Note, that since this function applies the iterator to each item in parallel -there is no guarantee that the iterator functions will complete in order, however -the results array will be in the same order as the original array. +Produces a new array of values by mapping each value in `arr` through +the `iterator` function. The `iterator` is called with an item from `arr` and a +callback for when it has finished processing. Each of these callback takes 2 arguments: +an `error`, and the transformed item from `arr`. If `iterator` passes an error to this +callback, the main `callback` (for the `map` function) is immediately called with the error. + +Note, that since this function applies the `iterator` to each item in parallel, +there is no guarantee that the `iterator` functions will complete in order. +However, the results array will be in the same order as the original `arr`. __Arguments__ -* arr - An array to iterate over. -* iterator(item, callback) - A function to apply to each item in the array. - The iterator is passed a callback(err, transformed) which must be called once - it has completed with an error (which can be null) and a transformed item. -* callback(err, results) - A callback which is called after all the iterator - functions have finished, or an error has occurred. Results is an array of the - transformed items from the original array. +* `arr` - An array to iterate over. +* `iterator(item, callback)` - A function to apply to each item in `arr`. + The iterator is passed a `callback(err, transformed)` which must be called once + it has completed with an error (which can be `null`) and a transformed item. +* `callback(err, results)` - A callback which is called when all `iterator` + functions have finished, or an error occurs. Results is an array of the + transformed items from the `arr`. __Example__ @@ -264,9 +310,9 @@ ### mapSeries(arr, iterator, callback) -The same as map only the iterator is applied to each item in the array in -series. The next iterator is only called once the current one has completed -processing. The results array will be in the same order as the original. +The same as [`map`](#map), only the `iterator` is applied to each item in `arr` in +series. The next `iterator` is only called once the current one has completed. +The results array will be in the same order as the original. --------------------------------------- @@ -274,53 +320,53 @@ ### mapLimit(arr, limit, iterator, callback) -The same as map only no more than "limit" iterators will be simultaneously +The same as [`map`](#map), only no more than `limit` `iterator`s will be simultaneously running at any time. -Note that the items are not processed in batches, so there is no guarantee that - the first "limit" iterator functions will complete before any others are -started. +Note that the items are not processed in batches, so there is no guarantee that +the first `limit` `iterator` functions will complete before any others are started. __Arguments__ -* arr - An array to iterate over. -* limit - The maximum number of iterators to run at any time. -* iterator(item, callback) - A function to apply to each item in the array. - The iterator is passed a callback(err, transformed) which must be called once - it has completed with an error (which can be null) and a transformed item. -* callback(err, results) - A callback which is called after all the iterator - functions have finished, or an error has occurred. Results is an array of the - transformed items from the original array. +* `arr` - An array to iterate over. +* `limit` - The maximum number of `iterator`s to run at any time. +* `iterator(item, callback)` - A function to apply to each item in `arr`. + The iterator is passed a `callback(err, transformed)` which must be called once + it has completed with an error (which can be `null`) and a transformed item. +* `callback(err, results)` - A callback which is called when all `iterator` + calls have finished, or an error occurs. The result is an array of the + transformed items from the original `arr`. __Example__ ```js -async.map(['file1','file2','file3'], 1, fs.stat, function(err, results){ +async.mapLimit(['file1','file2','file3'], 1, fs.stat, function(err, results){ // results is now an array of stats for each file }); ``` --------------------------------------- + ### filter(arr, iterator, callback) -__Alias:__ select +__Alias:__ `select` -Returns a new array of all the values which pass an async truth test. -_The callback for each iterator call only accepts a single argument of true or -false, it does not accept an error argument first!_ This is in-line with the -way node libraries work with truth tests like fs.exists. This operation is +Returns a new array of all the values in `arr` which pass an async truth test. +_The callback for each `iterator` call only accepts a single argument of `true` or +`false`; it does not accept an error argument first!_ This is in-line with the +way node libraries work with truth tests like `fs.exists`. This operation is performed in parallel, but the results array will be in the same order as the original. __Arguments__ -* arr - An array to iterate over. -* iterator(item, callback) - A truth test to apply to each item in the array. - The iterator is passed a callback(truthValue) which must be called with a +* `arr` - An array to iterate over. +* `iterator(item, callback)` - A truth test to apply to each item in `arr`. + The `iterator` is passed a `callback(truthValue)`, which must be called with a boolean argument once it has completed. -* callback(results) - A callback which is called after all the iterator +* `callback(results)` - A callback which is called after all the `iterator` functions have finished. __Example__ @@ -333,28 +379,29 @@ --------------------------------------- + ### filterSeries(arr, iterator, callback) -__alias:__ selectSeries +__Alias:__ `selectSeries` -The same as filter only the iterator is applied to each item in the array in -series. The next iterator is only called once the current one has completed -processing. The results array will be in the same order as the original. +The same as [`filter`](#filter) only the `iterator` is applied to each item in `arr` in +series. The next `iterator` is only called once the current one has completed. +The results array will be in the same order as the original. --------------------------------------- ### reject(arr, iterator, callback) -The opposite of filter. Removes values that pass an async truth test. +The opposite of [`filter`](#filter). Removes values that pass an `async` truth test. --------------------------------------- ### rejectSeries(arr, iterator, callback) -The same as reject, only the iterator is applied to each item in the array +The same as [`reject`](#reject), only the `iterator` is applied to each item in `arr` in series. @@ -363,27 +410,28 @@ ### reduce(arr, memo, iterator, callback) -__aliases:__ inject, foldl +__Aliases:__ `inject`, `foldl` + +Reduces `arr` into a single value using an async `iterator` to return +each successive step. `memo` is the initial state of the reduction. +This function only operates in series. -Reduces a list of values into a single value using an async iterator to return -each successive step. Memo is the initial state of the reduction. This -function only operates in series. For performance reasons, it may make sense to -split a call to this function into a parallel map, then use the normal -Array.prototype.reduce on the results. This function is for situations where -each step in the reduction needs to be async, if you can get the data before -reducing it then it's probably a good idea to do so. +For performance reasons, it may make sense to split a call to this function into +a parallel map, and then use the normal `Array.prototype.reduce` on the results. +This function is for situations where each step in the reduction needs to be async; +if you can get the data before reducing it, then it's probably a good idea to do so. __Arguments__ -* arr - An array to iterate over. -* memo - The initial state of the reduction. -* iterator(memo, item, callback) - A function applied to each item in the - array to produce the next step in the reduction. The iterator is passed a - callback(err, reduction) which accepts an optional error as its first +* `arr` - An array to iterate over. +* `memo` - The initial state of the reduction. +* `iterator(memo, item, callback)` - A function applied to each item in the + array to produce the next step in the reduction. The `iterator` is passed a + `callback(err, reduction)` which accepts an optional error as its first argument, and the state of the reduction as the second. If an error is - passed to the callback, the reduction is stopped and the main callback is + passed to the callback, the reduction is stopped and the main `callback` is immediately called with the error. -* callback(err, result) - A callback which is called after all the iterator +* `callback(err, result)` - A callback which is called after all the `iterator` functions have finished. Result is the reduced value. __Example__ @@ -404,9 +452,9 @@ ### reduceRight(arr, memo, iterator, callback) -__Alias:__ foldr +__Alias:__ `foldr` -Same as reduce, only operates on the items in the array in reverse order. +Same as [`reduce`](#reduce), only operates on `arr` in reverse order. --------------------------------------- @@ -414,23 +462,23 @@ ### detect(arr, iterator, callback) -Returns the first value in a list that passes an async truth test. The -iterator is applied in parallel, meaning the first iterator to return true will -fire the detect callback with that result. That means the result might not be -the first item in the original array (in terms of order) that passes the test. +Returns the first value in `arr` that passes an async truth test. The +`iterator` is applied in parallel, meaning the first iterator to return `true` will +fire the detect `callback` with that result. That means the result might not be +the first item in the original `arr` (in terms of order) that passes the test. -If order within the original array is important then look at detectSeries. +If order within the original `arr` is important, then look at [`detectSeries`](#detectSeries). __Arguments__ -* arr - An array to iterate over. -* iterator(item, callback) - A truth test to apply to each item in the array. - The iterator is passed a callback(truthValue) which must be called with a +* `arr` - An array to iterate over. +* `iterator(item, callback)` - A truth test to apply to each item in `arr`. + The iterator is passed a `callback(truthValue)` which must be called with a boolean argument once it has completed. -* callback(result) - A callback which is called as soon as any iterator returns - true, or after all the iterator functions have finished. Result will be +* `callback(result)` - A callback which is called as soon as any iterator returns + `true`, or after all the `iterator` functions have finished. Result will be the first item in the array that passes the truth test (iterator) or the - value undefined if none passed. + value `undefined` if none passed. __Example__ @@ -445,8 +493,8 @@ ### detectSeries(arr, iterator, callback) -The same as detect, only the iterator is applied to each item in the array -in series. This means the result is always the first in the original array (in +The same as [`detect`](#detect), only the `iterator` is applied to each item in `arr` +in series. This means the result is always the first in the original `arr` (in terms of array order) that passes the truth test. @@ -455,18 +503,18 @@ ### sortBy(arr, iterator, callback) -Sorts a list by the results of running each value through an async iterator. +Sorts a list by the results of running each `arr` value through an async `iterator`. __Arguments__ -* arr - An array to iterate over. -* iterator(item, callback) - A function to apply to each item in the array. - The iterator is passed a callback(err, sortValue) which must be called once it - has completed with an error (which can be null) and a value to use as the sort +* `arr` - An array to iterate over. +* `iterator(item, callback)` - A function to apply to each item in `arr`. + The iterator is passed a `callback(err, sortValue)` which must be called once it + has completed with an error (which can be `null`) and a value to use as the sort criteria. -* callback(err, results) - A callback which is called after all the iterator - functions have finished, or an error has occurred. Results is the items from - the original array sorted by the values returned by the iterator calls. +* `callback(err, results)` - A callback which is called after all the `iterator` + functions have finished, or an error occurs. Results is the items from + the original `arr` sorted by the values returned by the `iterator` calls. __Example__ @@ -481,28 +529,48 @@ }); ``` +__Sort Order__ + +By modifying the callback parameter the sorting order can be influenced: + +```js +//ascending order +async.sortBy([1,9,3,5], function(x, callback){ + callback(err, x); +}, function(err,result){ + //result callback +} ); + +//descending order +async.sortBy([1,9,3,5], function(x, callback){ + callback(err, x*-1); //<- x*-1 instead of x, turns the order around +}, function(err,result){ + //result callback +} ); +``` + --------------------------------------- ### some(arr, iterator, callback) -__Alias:__ any +__Alias:__ `any` -Returns true if at least one element in the array satisfies an async test. -_The callback for each iterator call only accepts a single argument of true or -false, it does not accept an error argument first!_ This is in-line with the -way node libraries work with truth tests like fs.exists. Once any iterator -call returns true, the main callback is immediately called. +Returns `true` if at least one element in the `arr` satisfies an async test. +_The callback for each iterator call only accepts a single argument of `true` or +`false`; it does not accept an error argument first!_ This is in-line with the +way node libraries work with truth tests like `fs.exists`. Once any iterator +call returns `true`, the main `callback` is immediately called. __Arguments__ -* arr - An array to iterate over. -* iterator(item, callback) - A truth test to apply to each item in the array. - The iterator is passed a callback(truthValue) which must be called with a - boolean argument once it has completed. -* callback(result) - A callback which is called as soon as any iterator returns - true, or after all the iterator functions have finished. Result will be - either true or false depending on the values of the async tests. +* `arr` - An array to iterate over. +* `iterator(item, callback)` - A truth test to apply to each item in the array + in parallel. The iterator is passed a callback(truthValue) which must be + called with a boolean argument once it has completed. +* `callback(result)` - A callback which is called as soon as any iterator returns + `true`, or after all the iterator functions have finished. Result will be + either `true` or `false` depending on the values of the async tests. __Example__ @@ -517,21 +585,21 @@ ### every(arr, iterator, callback) -__Alias:__ all +__Alias:__ `all` -Returns true if every element in the array satisfies an async test. -_The callback for each iterator call only accepts a single argument of true or -false, it does not accept an error argument first!_ This is in-line with the -way node libraries work with truth tests like fs.exists. +Returns `true` if every element in `arr` satisfies an async test. +_The callback for each `iterator` call only accepts a single argument of `true` or +`false`; it does not accept an error argument first!_ This is in-line with the +way node libraries work with truth tests like `fs.exists`. __Arguments__ -* arr - An array to iterate over. -* iterator(item, callback) - A truth test to apply to each item in the array. - The iterator is passed a callback(truthValue) which must be called with a - boolean argument once it has completed. -* callback(result) - A callback which is called after all the iterator - functions have finished. Result will be either true or false depending on +* `arr` - An array to iterate over. +* `iterator(item, callback)` - A truth test to apply to each item in the array + in parallel. The iterator is passed a callback(truthValue) which must be + called with a boolean argument once it has completed. +* `callback(result)` - A callback which is called after all the `iterator` + functions have finished. Result will be either `true` or `false` depending on the values of the async tests. __Example__ @@ -547,20 +615,20 @@ ### concat(arr, iterator, callback) -Applies an iterator to each item in a list, concatenating the results. Returns the -concatenated list. The iterators are called in parallel, and the results are +Applies `iterator` to each item in `arr`, concatenating the results. Returns the +concatenated list. The `iterator`s are called in parallel, and the results are concatenated as they return. There is no guarantee that the results array will -be returned in the original order of the arguments passed to the iterator function. +be returned in the original order of `arr` passed to the `iterator` function. __Arguments__ -* arr - An array to iterate over -* iterator(item, callback) - A function to apply to each item in the array. - The iterator is passed a callback(err, results) which must be called once it - has completed with an error (which can be null) and an array of results. -* callback(err, results) - A callback which is called after all the iterator - functions have finished, or an error has occurred. Results is an array containing - the concatenated results of the iterator function. +* `arr` - An array to iterate over. +* `iterator(item, callback)` - A function to apply to each item in `arr`. + The iterator is passed a `callback(err, results)` which must be called once it + has completed with an error (which can be `null`) and an array of results. +* `callback(err, results)` - A callback which is called after all the `iterator` + functions have finished, or an error occurs. Results is an array containing + the concatenated results of the `iterator` function. __Example__ @@ -575,7 +643,7 @@ ### concatSeries(arr, iterator, callback) -Same as async.concat, but executes in series instead of parallel. +Same as [`concat`](#concat), but executes in series instead of parallel. ## Control Flow @@ -583,26 +651,33 @@ ### series(tasks, [callback]) -Run an array of functions in series, each one running once the previous +Run the functions in the `tasks` array in series, each one running once the previous function has completed. If any functions in the series pass an error to its -callback, no more functions are run and the callback for the series is -immediately called with the value of the error. Once the tasks have completed, -the results are passed to the final callback as an array. +callback, no more functions are run, and `callback` is immediately called with the value of the error. +Otherwise, `callback` receives an array of results when `tasks` have completed. It is also possible to use an object instead of an array. Each property will be -run as a function and the results will be passed to the final callback as an object +run as a function, and the results will be passed to the final `callback` as an object instead of an array. This can be a more readable way of handling results from -async.series. +[`series`](#series). + +**Note** that while many implementations preserve the order of object properties, the +[ECMAScript Language Specifcation](http://www.ecma-international.org/ecma-262/5.1/#sec-8.6) +explicitly states that + +> The mechanics and order of enumerating the properties is not specified. +So if you rely on the order in which your series of functions are executed, and want +this to work on all platforms, consider using an array. __Arguments__ -* tasks - An array or object containing functions to run, each function is passed - a callback(err, result) it must call on completion with an error (which can - be null) and an optional result value. -* callback(err, results) - An optional callback to run once all the functions +* `tasks` - An array or object containing functions to run, each function is passed + a `callback(err, result)` it must call on completion with an error `err` (which can + be `null`) and an optional `result` value. +* `callback(err, results)` - An optional callback to run once all the functions have completed. This function gets a results array (or object) containing all - the result arguments passed to the task callbacks. + the result arguments passed to the `task` callbacks. __Example__ @@ -646,24 +721,24 @@ ### parallel(tasks, [callback]) -Run an array of functions in parallel, without waiting until the previous +Run the `tasks` array of functions in parallel, without waiting until the previous function has completed. If any of the functions pass an error to its -callback, the main callback is immediately called with the value of the error. -Once the tasks have completed, the results are passed to the final callback as an +callback, the main `callback` is immediately called with the value of the error. +Once the `tasks` have completed, the results are passed to the final `callback` as an array. It is also possible to use an object instead of an array. Each property will be -run as a function and the results will be passed to the final callback as an object +run as a function and the results will be passed to the final `callback` as an object instead of an array. This can be a more readable way of handling results from -async.parallel. +[`parallel`](#parallel). __Arguments__ -* tasks - An array or object containing functions to run, each function is passed - a callback(err, result) it must call on completion with an error (which can - be null) and an optional result value. -* callback(err, results) - An optional callback to run once all the functions +* `tasks` - An array or object containing functions to run. Each function is passed + a `callback(err, result)` which it must call on completion with an error `err` + (which can be `null`) and an optional `result` value. +* `callback(err, results)` - An optional callback to run once all the functions have completed. This function gets a results array (or object) containing all the result arguments passed to the task callbacks. @@ -709,41 +784,41 @@ --------------------------------------- - + ### parallelLimit(tasks, limit, [callback]) -The same as parallel only the tasks are executed in parallel with a maximum of "limit" -tasks executing at any time. +The same as [`parallel`](#parallel), only `tasks` are executed in parallel +with a maximum of `limit` tasks executing at any time. -Note that the tasks are not executed in batches, so there is no guarantee that -the first "limit" tasks will complete before any others are started. +Note that the `tasks` are not executed in batches, so there is no guarantee that +the first `limit` tasks will complete before any others are started. __Arguments__ -* tasks - An array or object containing functions to run, each function is passed - a callback(err, result) it must call on completion with an error (which can - be null) and an optional result value. -* limit - The maximum number of tasks to run at any time. -* callback(err, results) - An optional callback to run once all the functions +* `tasks` - An array or object containing functions to run, each function is passed + a `callback(err, result)` it must call on completion with an error `err` (which can + be `null`) and an optional `result` value. +* `limit` - The maximum number of `tasks` to run at any time. +* `callback(err, results)` - An optional callback to run once all the functions have completed. This function gets a results array (or object) containing all - the result arguments passed to the task callbacks. + the result arguments passed to the `task` callbacks. --------------------------------------- ### whilst(test, fn, callback) -Repeatedly call fn, while test returns true. Calls the callback when stopped, +Repeatedly call `fn`, while `test` returns `true`. Calls `callback` when stopped, or an error occurs. __Arguments__ -* test() - synchronous truth test to perform before each execution of fn. -* fn(callback) - A function to call each time the test passes. The function is - passed a callback(err) which must be called once it has completed with an - optional error argument. -* callback(err) - A callback which is called after the test fails and repeated - execution of fn has stopped. +* `test()` - synchronous truth test to perform before each execution of `fn`. +* `fn(callback)` - A function which is called each time `test` passes. The function is + passed a `callback(err)`, which must be called once it has completed with an + optional `err` argument. +* `callback(err)` - A callback which is called after the test fails and repeated + execution of `fn` has stopped. __Example__ @@ -767,51 +842,69 @@ ### doWhilst(fn, test, callback) -The post check version of whilst. To reflect the difference in the order of operations `test` and `fn` arguments are switched. `doWhilst` is to `whilst` as `do while` is to `while` in plain JavaScript. +The post-check version of [`whilst`](#whilst). To reflect the difference in +the order of operations, the arguments `test` and `fn` are switched. + +`doWhilst` is to `whilst` as `do while` is to `while` in plain JavaScript. --------------------------------------- ### until(test, fn, callback) -Repeatedly call fn, until test returns true. Calls the callback when stopped, +Repeatedly call `fn` until `test` returns `true`. Calls `callback` when stopped, or an error occurs. -The inverse of async.whilst. +The inverse of [`whilst`](#whilst). --------------------------------------- ### doUntil(fn, test, callback) -Like doWhilst except the test is inverted. Note the argument ordering differs from `until`. +Like [`doWhilst`](#doWhilst), except the `test` is inverted. Note the argument ordering differs from `until`. --------------------------------------- -### forever(fn, callback) +### forever(fn, errback) + +Calls the asynchronous function `fn` with a callback parameter that allows it to +call itself again, in series, indefinitely. + +If an error is passed to the callback then `errback` is called with the +error, and execution stops, otherwise it will never be called. -Calls the asynchronous function 'fn' repeatedly, in series, indefinitely. -If an error is passed to fn's callback then 'callback' is called with the -error, otherwise it will never be called. +```js +async.forever( + function(next) { + // next is suitable for passing to things that need a callback(err [, whatever]); + // it will result in this function being called again. + }, + function(err) { + // if next is called with a value in its first parameter, it will appear + // in here as 'err', and execution will stop. + } +); +``` --------------------------------------- ### waterfall(tasks, [callback]) -Runs an array of functions in series, each passing their results to the next in -the array. However, if any of the functions pass an error to the callback, the -next function is not executed and the main callback is immediately called with +Runs the `tasks` array of functions in series, each passing their results to the next in +the array. However, if any of the `tasks` pass an error to their own callback, the +next function is not executed, and the main `callback` is immediately called with the error. __Arguments__ -* tasks - An array of functions to run, each function is passed a - callback(err, result1, result2, ...) it must call on completion. The first - argument is an error (which can be null) and any further arguments will be +* `tasks` - An array of functions to run, each function is passed a + `callback(err, result1, result2, ...)` it must call on completion. The first + argument is an error (which can be `null`) and any further arguments will be passed as arguments in order to the next task. -* callback(err, [results]) - An optional callback to run once all the functions +* `callback(err, [results])` - An optional callback to run once all the functions have completed. This will be passed the results of the last task's callback. @@ -824,6 +917,7 @@ callback(null, 'one', 'two'); }, function(arg1, arg2, callback){ + // arg1 now equals 'one' and arg2 now equals 'two' callback(null, 'three'); }, function(arg1, callback){ @@ -841,14 +935,14 @@ Creates a function which is a composition of the passed asynchronous functions. Each function consumes the return value of the function that -follows. Composing functions f(), g() and h() would produce the result of -f(g(h())), only this version uses callbacks to obtain the return values. +follows. Composing functions `f()`, `g()`, and `h()` would produce the result of +`f(g(h()))`, only this version uses callbacks to obtain the return values. Each function is executed with the `this` binding of the composed function. __Arguments__ -* functions... - the asynchronous functions to compose +* `functions...` - the asynchronous functions to compose __Example__ @@ -874,19 +968,66 @@ ``` --------------------------------------- + +### seq(fn1, fn2...) + +Version of the compose function that is more natural to read. +Each following function consumes the return value of the latter function. + +Each function is executed with the `this` binding of the composed function. + +__Arguments__ + +* functions... - the asynchronous functions to compose + + +__Example__ + +```js +// Requires lodash (or underscore), express3 and dresende's orm2. +// Part of an app, that fetches cats of the logged user. +// This example uses `seq` function to avoid overnesting and error +// handling clutter. +app.get('/cats', function(request, response) { + function handleError(err, data, callback) { + if (err) { + console.error(err); + response.json({ status: 'error', message: err.message }); + } + else { + callback(data); + } + } + var User = request.models.User; + async.seq( + _.bind(User.get, User), // 'User.get' has signature (id, callback(err, data)) + handleError, + function(user, fn) { + user.getCats(fn); // 'getCats' has signature (callback(err, data)) + }, + handleError, + function(cats) { + response.json({ status: 'ok', message: 'Cats found', data: cats }); + } + )(req.session.user_id); + } +}); +``` + +--------------------------------------- ### applyEach(fns, args..., callback) -Applies the provided arguments to each function in the array, calling the -callback after all functions have completed. If you only provide the first -argument then it will return a function which lets you pass in the +Applies the provided arguments to each function in the array, calling +`callback` after all functions have completed. If you only provide the first +argument, then it will return a function which lets you pass in the arguments as if it were a single function call. __Arguments__ -* fns - the asynchronous functions to all call with the same arguments -* args... - any number of separate arguments to pass to the function -* callback - the final argument should be the callback, called when all +* `fns` - the asynchronous functions to all call with the same arguments +* `args...` - any number of separate arguments to pass to the function +* `callback` - the final argument should be the callback, called when all functions have completed processing @@ -908,42 +1049,50 @@ ### applyEachSeries(arr, iterator, callback) -The same as applyEach only the functions are applied in series. +The same as [`applyEach`](#applyEach) only the functions are applied in series. --------------------------------------- ### queue(worker, concurrency) -Creates a queue object with the specified concurrency. Tasks added to the -queue will be processed in parallel (up to the concurrency limit). If all -workers are in progress, the task is queued until one is available. Once -a worker has completed a task, the task's callback is called. +Creates a `queue` object with the specified `concurrency`. Tasks added to the +`queue` are processed in parallel (up to the `concurrency` limit). If all +`worker`s are in progress, the task is queued until one becomes available. +Once a `worker` completes a `task`, that `task`'s callback is called. __Arguments__ -* worker(task, callback) - An asynchronous function for processing a queued - task, which must call its callback(err) argument when finished, with an - optional error as an argument. -* concurrency - An integer for determining how many worker functions should be +* `worker(task, callback)` - An asynchronous function for processing a queued + task, which must call its `callback(err)` argument when finished, with an + optional `error` as an argument. +* `concurrency` - An `integer` for determining how many `worker` functions should be run in parallel. __Queue objects__ -The queue object returned by this function has the following properties and +The `queue` object returned by this function has the following properties and methods: -* length() - a function returning the number of items waiting to be processed. -* concurrency - an integer for determining how many worker functions should be - run in parallel. This property can be changed after a queue is created to +* `length()` - a function returning the number of items waiting to be processed. +* `started` - a function returning whether or not any items have been pushed and processed by the queue +* `running()` - a function returning the number of items currently being processed. +* `idle()` - a function returning false if there are items waiting or being processed, or true if not. +* `concurrency` - an integer for determining how many `worker` functions should be + run in parallel. This property can be changed after a `queue` is created to alter the concurrency on-the-fly. -* push(task, [callback]) - add a new task to the queue, the callback is called - once the worker has finished processing the task. - instead of a single task, an array of tasks can be submitted. the respective callback is used for every task in the list. -* unshift(task, [callback]) - add a new task to the front of the queue. -* saturated - a callback that is called when the queue length hits the concurrency and further tasks will be queued -* empty - a callback that is called when the last item from the queue is given to a worker -* drain - a callback that is called when the last item from the queue has returned from the worker +* `push(task, [callback])` - add a new task to the `queue`. Calls `callback` once + the `worker` has finished processing the task. Instead of a single task, a `tasks` array + can be submitted. The respective callback is used for every task in the list. +* `unshift(task, [callback])` - add a new task to the front of the `queue`. +* `saturated` - a callback that is called when the `queue` length hits the `concurrency` limit, + and further tasks will be queued. +* `empty` - a callback that is called when the last item from the `queue` is given to a `worker`. +* `drain` - a callback that is called when the last item from the `queue` has returned from the `worker`. +* `paused` - a boolean for determining whether the queue is in a paused state +* `pause()` - a function that pauses the processing of tasks until `resume()` is called. +* `resume()` - a function that resumes the processing of queued tasks when the queue is paused. +* `kill()` - a function that empties remaining tasks from the queue forcing it to go idle. __Example__ @@ -983,39 +1132,56 @@ }); ``` + +--------------------------------------- + + +### priorityQueue(worker, concurrency) + +The same as [`queue`](#queue) only tasks are assigned a priority and completed in ascending priority order. There are two differences between `queue` and `priorityQueue` objects: + +* `push(task, priority, [callback])` - `priority` should be a number. If an array of + `tasks` is given, all tasks will be assigned the same priority. +* The `unshift` method was removed. + --------------------------------------- ### cargo(worker, [payload]) -Creates a cargo object with the specified payload. Tasks added to the -cargo will be processed altogether (up to the payload limit). If the -worker is in progress, the task is queued until it is available. Once -the worker has completed some tasks, each callback of those tasks is called. +Creates a `cargo` object with the specified payload. Tasks added to the +cargo will be processed altogether (up to the `payload` limit). If the +`worker` is in progress, the task is queued until it becomes available. Once +the `worker` has completed some tasks, each callback of those tasks is called. +Check out [this animation](https://camo.githubusercontent.com/6bbd36f4cf5b35a0f11a96dcd2e97711ffc2fb37/68747470733a2f2f662e636c6f75642e6769746875622e636f6d2f6173736574732f313637363837312f36383130382f62626330636662302d356632392d313165322d393734662d3333393763363464633835382e676966) for how `cargo` and `queue` work. + +While [queue](#queue) passes only one task to one of a group of workers +at a time, cargo passes an array of tasks to a single worker, repeating +when the worker is finished. __Arguments__ -* worker(tasks, callback) - An asynchronous function for processing an array of - queued tasks, which must call its callback(err) argument when finished, with - an optional error as an argument. -* payload - An optional integer for determining how many tasks should be +* `worker(tasks, callback)` - An asynchronous function for processing an array of + queued tasks, which must call its `callback(err)` argument when finished, with + an optional `err` argument. +* `payload` - An optional `integer` for determining how many tasks should be processed per round; if omitted, the default is unlimited. __Cargo objects__ -The cargo object returned by this function has the following properties and +The `cargo` object returned by this function has the following properties and methods: -* length() - a function returning the number of items waiting to be processed. -* payload - an integer for determining how many tasks should be - process per round. This property can be changed after a cargo is created to +* `length()` - A function returning the number of items waiting to be processed. +* `payload` - An `integer` for determining how many tasks should be + process per round. This property can be changed after a `cargo` is created to alter the payload on-the-fly. -* push(task, [callback]) - add a new task to the queue, the callback is called - once the worker has finished processing the task. - instead of a single task, an array of tasks can be submitted. the respective callback is used for every task in the list. -* saturated - a callback that is called when the queue length hits the concurrency and further tasks will be queued -* empty - a callback that is called when the last item from the queue is given to a worker -* drain - a callback that is called when the last item from the queue has returned from the worker +* `push(task, [callback])` - Adds `task` to the `queue`. The callback is called + once the `worker` has finished processing the task. Instead of a single task, an array of `tasks` + can be submitted. The respective callback is used for every task in the list. +* `saturated` - A callback that is called when the `queue.length()` hits the concurrency and further tasks will be queued. +* `empty` - A callback that is called when the last item from the `queue` is given to a `worker`. +* `drain` - A callback that is called when the last item from the `queue` has returned from the `worker`. __Example__ @@ -1048,33 +1214,36 @@ ### auto(tasks, [callback]) -Determines the best order for running functions based on their requirements. -Each function can optionally depend on other functions being completed first, -and each function is run as soon as its requirements are satisfied. If any of -the functions pass an error to their callback, that function will not complete -(so any other functions depending on it will not run) and the main callback -will be called immediately with the error. Functions also receive an object -containing the results of functions which have completed so far. - -Note, all functions are called with a results object as a second argument, -so it is unsafe to pass functions in the tasks object which cannot handle the -extra argument. For example, this snippet of code: +Determines the best order for running the functions in `tasks`, based on their +requirements. Each function can optionally depend on other functions being completed +first, and each function is run as soon as its requirements are satisfied. + +If any of the functions pass an error to their callback, it will not +complete (so any other functions depending on it will not run), and the main +`callback` is immediately called with the error. Functions also receive an +object containing the results of functions which have completed so far. + +Note, all functions are called with a `results` object as a second argument, +so it is unsafe to pass functions in the `tasks` object which cannot handle the +extra argument. + +For example, this snippet of code: ```js async.auto({ - readData: async.apply(fs.readFile, 'data.txt', 'utf-8'); + readData: async.apply(fs.readFile, 'data.txt', 'utf-8') }, callback); ``` -will have the effect of calling readFile with the results object as the last +will have the effect of calling `readFile` with the results object as the last argument, which will fail: ```js fs.readFile('data.txt', 'utf-8', cb, {}); ``` -Instead, wrap the call to readFile in a function which does not forward the -results object: +Instead, wrap the call to `readFile` in a function which does not forward the +`results` object: ```js async.auto({ @@ -1086,40 +1255,51 @@ __Arguments__ -* tasks - An object literal containing named functions or an array of - requirements, with the function itself the last item in the array. The key - used for each function or array is used when specifying requirements. The - function receives two arguments: (1) a callback(err, result) which must be - called when finished, passing an error (which can be null) and the result of - the function's execution, and (2) a results object, containing the results of +* `tasks` - An object. Each of its properties is either a function or an array of + requirements, with the function itself the last item in the array. The object's key + of a property serves as the name of the task defined by that property, + i.e. can be used when specifying requirements for other tasks. + The function receives two arguments: (1) a `callback(err, result)` which must be + called when finished, passing an `error` (which can be `null`) and the result of + the function's execution, and (2) a `results` object, containing the results of the previously executed functions. -* callback(err, results) - An optional callback which is called when all the - tasks have been completed. The callback will receive an error as an argument - if any tasks pass an error to their callback. Results will always be passed - but if an error occurred, no other tasks will be performed, and the results - object will only contain partial results. - +* `callback(err, results)` - An optional callback which is called when all the + tasks have been completed. It receives the `err` argument if any `tasks` + pass an error to their callback. Results are always returned; however, if + an error occurs, no further `tasks` will be performed, and the results + object will only contain partial results. + __Example__ ```js async.auto({ get_data: function(callback){ + console.log('in get_data'); // async code to get some data + callback(null, 'data', 'converted to array'); }, make_folder: function(callback){ + console.log('in make_folder'); // async code to create a directory to store a file in // this is run at the same time as getting the data + callback(null, 'folder'); }, - write_file: ['get_data', 'make_folder', function(callback){ + write_file: ['get_data', 'make_folder', function(callback, results){ + console.log('in write_file', JSON.stringify(results)); // once there is some data and the directory exists, // write the data to a file in the directory - callback(null, filename); + callback(null, 'filename'); }], email_link: ['write_file', function(callback, results){ + console.log('in email_link', JSON.stringify(results)); // once the file is written let's email a link to it... // results.write_file contains the filename returned by write_file. + callback(null, {'file':results.write_file, 'email':'user@example.com'}); }] +}, function(err, results) { + console.log('err = ', err); + console.log('results = ', results); }); ``` @@ -1129,28 +1309,79 @@ ```js async.parallel([ function(callback){ + console.log('in get_data'); // async code to get some data + callback(null, 'data', 'converted to array'); }, function(callback){ + console.log('in make_folder'); // async code to create a directory to store a file in // this is run at the same time as getting the data + callback(null, 'folder'); } ], function(err, results){ async.series([ function(callback){ + console.log('in write_file', JSON.stringify(results)); // once there is some data and the directory exists, // write the data to a file in the directory + results.push('filename'); + callback(null); }, function(callback){ + console.log('in email_link', JSON.stringify(results)); // once the file is written let's email a link to it... + callback(null, {'file':results.pop(), 'email':'user@example.com'}); } ]); }); ``` -For a complicated series of async tasks using the auto function makes adding -new tasks much easier and makes the code more readable. +For a complicated series of `async` tasks, using the [`auto`](#auto) function makes adding +new tasks much easier (and the code more readable). + + +--------------------------------------- + + +### retry([times = 5], task, [callback]) + +Attempts to get a successful response from `task` no more than `times` times before +returning an error. If the task is successful, the `callback` will be passed the result +of the successfull task. If all attemps fail, the callback will be passed the error and +result (if any) of the final attempt. + +__Arguments__ + +* `times` - An integer indicating how many times to attempt the `task` before giving up. Defaults to 5. +* `task(callback, results)` - A function which receives two arguments: (1) a `callback(err, result)` + which must be called when finished, passing `err` (which can be `null`) and the `result` of + the function's execution, and (2) a `results` object, containing the results of + the previously executed functions (if nested inside another control flow). +* `callback(err, results)` - An optional callback which is called when the + task has succeeded, or after the final failed attempt. It receives the `err` and `result` arguments of the last attempt at completing the `task`. + +The [`retry`](#retry) function can be used as a stand-alone control flow by passing a +callback, as shown below: + +```js +async.retry(3, apiMethod, function(err, result) { + // do something with the result +}); +``` + +It can also be embeded within other control flow functions to retry individual methods +that are not as reliable, like this: + +```js +async.auto({ + users: api.getUsers.bind(api), + payments: async.retry(3, api.getPayments.bind(api)) +}, function(err, results) { + // do something with the results +}); +``` --------------------------------------- @@ -1158,16 +1389,16 @@ ### iterator(tasks) -Creates an iterator function which calls the next function in the array, +Creates an iterator function which calls the next function in the `tasks` array, returning a continuation to call the next one after that. It's also possible to -'peek' the next iterator by doing iterator.next(). +“peek” at the next iterator with `iterator.next()`. -This function is used internally by the async module but can be useful when +This function is used internally by the `async` module, but can be useful when you want to manually control the flow of functions in series. __Arguments__ -* tasks - An array of functions to run. +* `tasks` - An array of functions to run. __Example__ @@ -1194,15 +1425,16 @@ ### apply(function, arguments..) -Creates a continuation function with some arguments already applied, a useful -shorthand when combined with other control flow functions. Any arguments +Creates a continuation function with some arguments already applied. + +Useful as a shorthand when combined with other control flow functions. Any arguments passed to the returned function are added to the arguments originally passed to apply. __Arguments__ -* function - The function you want to eventually apply all arguments to. -* arguments... - Any number of arguments to automatically apply when the +* `function` - The function you want to eventually apply all arguments to. +* `arguments...` - Any number of arguments to automatically apply when the continuation is called. __Example__ @@ -1244,16 +1476,16 @@ ### nextTick(callback) -Calls the callback on a later loop around the event loop. In node.js this just -calls process.nextTick, in the browser it falls back to setImmediate(callback) -if available, otherwise setTimeout(callback, 0), which means other higher priority -events may precede the execution of the callback. +Calls `callback` on a later loop around the event loop. In Node.js this just +calls `process.nextTick`; in the browser it falls back to `setImmediate(callback)` +if available, otherwise `setTimeout(callback, 0)`, which means other higher priority +events may precede the execution of `callback`. This is used internally for browser-compatibility purposes. __Arguments__ -* callback - The function to call on a later loop around the event loop. +* `callback` - The function to call on a later loop around the event loop. __Example__ @@ -1269,13 +1501,13 @@ ### times(n, callback) -Calls the callback n times and accumulates results in the same manner -you would use with async.map. +Calls the `callback` function `n` times, and accumulates results in the same manner +you would use with [`map`](#map). __Arguments__ -* n - The number of times to run the function. -* callback - The function to call n times. +* `n` - The number of times to run the function. +* `callback` - The function to call `n` times. __Example__ @@ -1299,9 +1531,9 @@ ### timesSeries(n, callback) -The same as times only the iterator is applied to each item in the array in -series. The next iterator is only called once the current one has completed -processing. The results array will be in the same order as the original. +The same as [`times`](#times), only the iterator is applied to each item in `arr` in +series. The next `iterator` is only called once the current one has completed. +The results array will be in the same order as the original. ## Utils @@ -1309,7 +1541,7 @@ ### memoize(fn, [hasher]) -Caches the results of an async function. When creating a hash to store function +Caches the results of an `async` function. When creating a hash to store function results against, the callback is omitted from the hash and an optional hash function can be used. @@ -1318,9 +1550,9 @@ __Arguments__ -* fn - the function you to proxy and cache results from. -* hasher - an optional function for generating a custom hash for storing - results, it has all the arguments applied to it apart from the callback, and +* `fn` - The function to proxy and cache results from. +* `hasher` - Tn optional function for generating a custom hash for storing + results. It has all the arguments applied to it apart from the callback, and must be synchronous. __Example__ @@ -1341,25 +1573,25 @@ ### unmemoize(fn) -Undoes a memoized function, reverting it to the original, unmemoized -form. Comes handy in tests. +Undoes a [`memoize`](#memoize)d function, reverting it to the original, unmemoized +form. Handy for testing. __Arguments__ -* fn - the memoized function +* `fn` - the memoized function ### log(function, arguments) -Logs the result of an async function to the console. Only works in node.js or -in browsers that support console.log and console.error (such as FF and Chrome). -If multiple arguments are returned from the async function, console.log is +Logs the result of an `async` function to the `console`. Only works in Node.js or +in browsers that support `console.log` and `console.error` (such as FF and Chrome). +If multiple arguments are returned from the async function, `console.log` is called on each argument in order. __Arguments__ -* function - The function you want to eventually apply all arguments to. -* arguments... - Any number of arguments to apply to the function. +* `function` - The function you want to eventually apply all arguments to. +* `arguments...` - Any number of arguments to apply to the function. __Example__ @@ -1380,16 +1612,16 @@ ### dir(function, arguments) -Logs the result of an async function to the console using console.dir to -display the properties of the resulting object. Only works in node.js or -in browsers that support console.dir and console.error (such as FF and Chrome). -If multiple arguments are returned from the async function, console.dir is +Logs the result of an `async` function to the `console` using `console.dir` to +display the properties of the resulting object. Only works in Node.js or +in browsers that support `console.dir` and `console.error` (such as FF and Chrome). +If multiple arguments are returned from the async function, `console.dir` is called on each argument in order. __Arguments__ -* function - The function you want to eventually apply all arguments to. -* arguments... - Any number of arguments to apply to the function. +* `function` - The function you want to eventually apply all arguments to. +* `arguments...` - Any number of arguments to apply to the function. __Example__ @@ -1410,5 +1642,5 @@ ### noConflict() -Changes the value of async back to its original value, returning a reference to the -async object. +Changes the value of `async` back to its original value, returning a reference to the +`async` object. diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/.travis.yml nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/async/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +language: node_js +node_js: + - "0.10" diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/lib/combined_stream.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/lib/combined_stream.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/lib/combined_stream.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/lib/combined_stream.js 2015-01-20 21:22:17.000000000 +0000 @@ -62,6 +62,7 @@ CombinedStream.prototype.pipe = function(dest, options) { Stream.prototype.pipe.call(this, dest, options); this.resume(); + return dest; }; CombinedStream.prototype._getNext = function() { diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/Makefile nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/Makefile --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -SHELL := /bin/bash - -test: - @./test/run.js - -.PHONY: test - diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/node_modules/delayed-stream/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/node_modules/delayed-stream/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/node_modules/delayed-stream/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/node_modules/delayed-stream/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -21,11 +21,22 @@ "fake": "0.2.0", "far": "0.0.1" }, - "readme": "# delayed-stream\n\nBuffers events from a stream until you are ready to handle them.\n\n## Installation\n\n``` bash\nnpm install delayed-stream\n```\n\n## Usage\n\nThe following example shows how to write a http echo server that delays its\nresponse by 1000 ms.\n\n``` javascript\nvar DelayedStream = require('delayed-stream');\nvar http = require('http');\n\nhttp.createServer(function(req, res) {\n var delayed = DelayedStream.create(req);\n\n setTimeout(function() {\n res.writeHead(200);\n delayed.pipe(res);\n }, 1000);\n});\n```\n\nIf you are not using `Stream#pipe`, you can also manually release the buffered\nevents by calling `delayedStream.resume()`:\n\n``` javascript\nvar delayed = DelayedStream.create(req);\n\nsetTimeout(function() {\n // Emit all buffered events and resume underlaying source\n delayed.resume();\n}, 1000);\n```\n\n## Implementation\n\nIn order to use this meta stream properly, here are a few things you should\nknow about the implementation.\n\n### Event Buffering / Proxying\n\nAll events of the `source` stream are hijacked by overwriting the `source.emit`\nmethod. Until node implements a catch-all event listener, this is the only way.\n\nHowever, delayed-stream still continues to emit all events it captures on the\n`source`, regardless of whether you have released the delayed stream yet or\nnot.\n\nUpon creation, delayed-stream captures all `source` events and stores them in\nan internal event buffer. Once `delayedStream.release()` is called, all\nbuffered events are emitted on the `delayedStream`, and the event buffer is\ncleared. After that, delayed-stream merely acts as a proxy for the underlaying\nsource.\n\n### Error handling\n\nError events on `source` are buffered / proxied just like any other events.\nHowever, `delayedStream.create` attaches a no-op `'error'` listener to the\n`source`. This way you only have to handle errors on the `delayedStream`\nobject, rather than in two places.\n\n### Buffer limits\n\ndelayed-stream provides a `maxDataSize` property that can be used to limit\nthe amount of data being buffered. In order to protect you from bad `source`\nstreams that don't react to `source.pause()`, this feature is enabled by\ndefault.\n\n## API\n\n### DelayedStream.create(source, [options])\n\nReturns a new `delayedStream`. Available options are:\n\n* `pauseStream`\n* `maxDataSize`\n\nThe description for those properties can be found below.\n\n### delayedStream.source\n\nThe `source` stream managed by this object. This is useful if you are\npassing your `delayedStream` around, and you still want to access properties\non the `source` object.\n\n### delayedStream.pauseStream = true\n\nWhether to pause the underlaying `source` when calling\n`DelayedStream.create()`. Modifying this property afterwards has no effect.\n\n### delayedStream.maxDataSize = 1024 * 1024\n\nThe amount of data to buffer before emitting an `error`.\n\nIf the underlaying source is emitting `Buffer` objects, the `maxDataSize`\nrefers to bytes.\n\nIf the underlaying source is emitting JavaScript strings, the size refers to\ncharacters.\n\nIf you know what you are doing, you can set this property to `Infinity` to\ndisable this feature. You can also modify this property during runtime.\n\n### delayedStream.maxDataSize = 1024 * 1024\n\nThe amount of data to buffer before emitting an `error`.\n\nIf the underlaying source is emitting `Buffer` objects, the `maxDataSize`\nrefers to bytes.\n\nIf the underlaying source is emitting JavaScript strings, the size refers to\ncharacters.\n\nIf you know what you are doing, you can set this property to `Infinity` to\ndisable this feature.\n\n### delayedStream.dataSize = 0\n\nThe amount of data buffered so far.\n\n### delayedStream.readable\n\nAn ECMA5 getter that returns the value of `source.readable`.\n\n### delayedStream.resume()\n\nIf the `delayedStream` has not been released so far, `delayedStream.release()`\nis called.\n\nIn either case, `source.resume()` is called.\n\n### delayedStream.pause()\n\nCalls `source.pause()`.\n\n### delayedStream.pipe(dest)\n\nCalls `delayedStream.resume()` and then proxies the arguments to `source.pipe`.\n\n### delayedStream.release()\n\nEmits and clears all events that have been buffered up so far. This does not\nresume the underlaying source, use `delayedStream.resume()` instead.\n\n## License\n\ndelayed-stream is licensed under the MIT license.\n", - "readmeFilename": "Readme.md", + "_id": "delayed-stream@0.0.5", + "_engineSupported": true, + "_npmVersion": "1.0.3", + "_nodeVersion": "v0.4.9-pre", + "_defaultsLoaded": true, + "dist": { + "shasum": "d4b1f43a93e8296dfe02694f4680bc37a313c73f", + "tarball": "http://registry.npmjs.org/delayed-stream/-/delayed-stream-0.0.5.tgz" + }, + "scripts": {}, + "directories": {}, + "_shasum": "d4b1f43a93e8296dfe02694f4680bc37a313c73f", + "_resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-0.0.5.tgz", + "_from": "delayed-stream@0.0.5", "bugs": { "url": "https://github.com/felixge/node-delayed-stream/issues" }, - "_id": "delayed-stream@0.0.5", - "_from": "delayed-stream@0.0.5" + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/.npmignore nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/.npmignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/.npmignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -*.un~ -/node_modules -/test/tmp diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,27 +6,56 @@ }, "name": "combined-stream", "description": "A stream that emits multiple other streams one after another.", - "version": "0.0.4", + "version": "0.0.5", "homepage": "https://github.com/felixge/node-combined-stream", "repository": { "type": "git", "url": "git://github.com/felixge/node-combined-stream.git" }, "main": "./lib/combined_stream", + "scripts": { + "test": "node test/run.js" + }, "engines": { - "node": "*" + "node": ">= 0.8" }, "dependencies": { "delayed-stream": "0.0.5" }, "devDependencies": { - "far": "0.0.1" + "far": "~0.0.7" }, - "readme": "# combined-stream\n\nA stream that emits multiple other streams one after another.\n\n## Installation\n\n``` bash\nnpm install combined-stream\n```\n\n## Usage\n\nHere is a simple example that shows how you can use combined-stream to combine\ntwo files into one:\n\n``` javascript\nvar CombinedStream = require('combined-stream');\nvar fs = require('fs');\n\nvar combinedStream = CombinedStream.create();\ncombinedStream.append(fs.createReadStream('file1.txt'));\ncombinedStream.append(fs.createReadStream('file2.txt'));\n\ncombinedStream.pipe(fs.createWriteStream('combined.txt'));\n```\n\nWhile the example above works great, it will pause all source streams until\nthey are needed. If you don't want that to happen, you can set `pauseStreams`\nto `false`:\n\n``` javascript\nvar CombinedStream = require('combined-stream');\nvar fs = require('fs');\n\nvar combinedStream = CombinedStream.create({pauseStreams: false});\ncombinedStream.append(fs.createReadStream('file1.txt'));\ncombinedStream.append(fs.createReadStream('file2.txt'));\n\ncombinedStream.pipe(fs.createWriteStream('combined.txt'));\n```\n\nHowever, what if you don't have all the source streams yet, or you don't want\nto allocate the resources (file descriptors, memory, etc.) for them right away?\nWell, in that case you can simply provide a callback that supplies the stream\nby calling a `next()` function:\n\n``` javascript\nvar CombinedStream = require('combined-stream');\nvar fs = require('fs');\n\nvar combinedStream = CombinedStream.create();\ncombinedStream.append(function(next) {\n next(fs.createReadStream('file1.txt'));\n});\ncombinedStream.append(function(next) {\n next(fs.createReadStream('file2.txt'));\n});\n\ncombinedStream.pipe(fs.createWriteStream('combined.txt'));\n```\n\n## API\n\n### CombinedStream.create([options])\n\nReturns a new combined stream object. Available options are:\n\n* `maxDataSize`\n* `pauseStreams`\n\nThe effect of those options is described below.\n\n### combinedStream.pauseStreams = true\n\nWhether to apply back pressure to the underlaying streams. If set to `false`,\nthe underlaying streams will never be paused. If set to `true`, the\nunderlaying streams will be paused right after being appended, as well as when\n`delayedStream.pipe()` wants to throttle.\n\n### combinedStream.maxDataSize = 2 * 1024 * 1024\n\nThe maximum amount of bytes (or characters) to buffer for all source streams.\nIf this value is exceeded, `combinedStream` emits an `'error'` event.\n\n### combinedStream.dataSize = 0\n\nThe amount of bytes (or characters) currently buffered by `combinedStream`.\n\n### combinedStream.append(stream)\n\nAppends the given `stream` to the combinedStream object. If `pauseStreams` is\nset to `true, this stream will also be paused right away.\n\n`streams` can also be a function that takes one parameter called `next`. `next`\nis a function that must be invoked in order to provide the `next` stream, see\nexample above.\n\nRegardless of how the `stream` is appended, combined-stream always attaches an\n`'error'` listener to it, so you don't have to do that manually.\n\nSpecial case: `stream` can also be a String or Buffer.\n\n### combinedStream.write(data)\n\nYou should not call this, `combinedStream` takes care of piping the appended\nstreams into itself for you.\n\n### combinedStream.resume()\n\nCauses `combinedStream` to start drain the streams it manages. The function is\nidempotent, and also emits a `'resume'` event each time which usually goes to\nthe stream that is currently being drained.\n\n### combinedStream.pause();\n\nIf `combinedStream.pauseStreams` is set to `false`, this does nothing.\nOtherwise a `'pause'` event is emitted, this goes to the stream that is\ncurrently being drained, so you can use it to apply back pressure.\n\n### combinedStream.end();\n\nSets `combinedStream.writable` to false, emits an `'end'` event, and removes\nall streams from the queue.\n\n### combinedStream.destroy();\n\nSame as `combinedStream.end()`, except it emits a `'close'` event instead of\n`'end'`.\n\n## License\n\ncombined-stream is licensed under the MIT license.\n", - "readmeFilename": "Readme.md", + "gitHead": "19d9bdd4c20f6806c2ae8adb00a53fb6fd154740", "bugs": { "url": "https://github.com/felixge/node-combined-stream/issues" }, - "_id": "combined-stream@0.0.4", - "_from": "combined-stream@~0.0.4" + "_id": "combined-stream@0.0.5", + "_shasum": "29ed76e5c9aad07c4acf9ca3d32601cce28697a2", + "_from": "combined-stream@>=0.0.4 <0.1.0", + "_npmVersion": "1.4.14", + "_npmUser": { + "name": "alexindigo", + "email": "iam@alexindigo.com" + }, + "maintainers": [ + { + "name": "felixge", + "email": "felix@debuggable.com" + }, + { + "name": "celer", + "email": "celer@scrypt.net" + }, + { + "name": "alexindigo", + "email": "iam@alexindigo.com" + } + ], + "dist": { + "shasum": "29ed76e5c9aad07c4acf9ca3d32601cce28697a2", + "tarball": "http://registry.npmjs.org/combined-stream/-/combined-stream-0.0.5.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-0.0.5.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/Readme.md nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/Readme.md --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/Readme.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/Readme.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,4 @@ -# combined-stream +# combined-stream [![Build Status](https://travis-ci.org/felixge/node-combined-stream.svg?branch=master)](https://travis-ci.org/felixge/node-combined-stream) A stream that emits multiple other streams one after another. @@ -70,19 +70,19 @@ The effect of those options is described below. -### combinedStream.pauseStreams = true +### combinedStream.pauseStreams = `true` Whether to apply back pressure to the underlaying streams. If set to `false`, the underlaying streams will never be paused. If set to `true`, the underlaying streams will be paused right after being appended, as well as when `delayedStream.pipe()` wants to throttle. -### combinedStream.maxDataSize = 2 * 1024 * 1024 +### combinedStream.maxDataSize = `2 * 1024 * 1024` The maximum amount of bytes (or characters) to buffer for all source streams. If this value is exceeded, `combinedStream` emits an `'error'` event. -### combinedStream.dataSize = 0 +### combinedStream.dataSize = `0` The amount of bytes (or characters) currently buffered by `combinedStream`. diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/common.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/common.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/common.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/common.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -var common = module.exports; - -var path = require('path'); -var fs = require('fs'); -var root = path.join(__dirname, '..'); - -common.dir = { - fixture: root + '/test/fixture', - tmp: root + '/test/tmp', -}; - -// Create tmp directory if it does not exist -// Not using fs.exists so as to be node 0.6.x compatible -try { - fs.statSync(common.dir.tmp); -} -catch (e) { - // Dir does not exist - fs.mkdirSync(common.dir.tmp); -} - -common.CombinedStream = require(root); -common.assert = require('assert'); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/fixture/file1.txt nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/fixture/file1.txt --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/fixture/file1.txt 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/fixture/file1.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,256 +0,0 @@ -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 -10101010101010101010101010101010101010101010101010101010101010101010101010101010 -01010101010101010101010101010101010101010101010101010101010101010101010101010101 diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/fixture/file2.txt nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/fixture/file2.txt --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/fixture/file2.txt 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/fixture/file2.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,256 +0,0 @@ -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 -20202020202020202020202020202020202020202020202020202020202020202020202020202020 -02020202020202020202020202020202020202020202020202020202020202020202020202020202 diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-callback-streams.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-callback-streams.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-callback-streams.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-callback-streams.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -var common = require('../common'); -var assert = common.assert; -var CombinedStream = common.CombinedStream; -var fs = require('fs'); - -var FILE1 = common.dir.fixture + '/file1.txt'; -var FILE2 = common.dir.fixture + '/file2.txt'; -var EXPECTED = fs.readFileSync(FILE1) + fs.readFileSync(FILE2); - -(function testDelayedStreams() { - var combinedStream = CombinedStream.create(); - combinedStream.append(function(next) { - next(fs.createReadStream(FILE1)); - }); - combinedStream.append(function(next) { - next(fs.createReadStream(FILE2)); - }); - - var tmpFile = common.dir.tmp + '/combined.txt'; - var dest = fs.createWriteStream(tmpFile); - combinedStream.pipe(dest); - - dest.on('end', function() { - var written = fs.readFileSync(tmpFile, 'utf8'); - assert.strictEqual(written, EXPECTED); - }); -})(); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-data-size.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-data-size.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-data-size.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-data-size.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -var common = require('../common'); -var assert = common.assert; -var CombinedStream = common.CombinedStream; - -(function testDataSizeGetter() { - var combinedStream = CombinedStream.create(); - - assert.strictEqual(combinedStream.dataSize, 0); - - // Test one stream - combinedStream._streams.push({dataSize: 10}); - combinedStream._updateDataSize(); - assert.strictEqual(combinedStream.dataSize, 10); - - // Test two streams - combinedStream._streams.push({dataSize: 23}); - combinedStream._updateDataSize(); - assert.strictEqual(combinedStream.dataSize, 33); - - // Test currentStream - combinedStream._currentStream = {dataSize: 20}; - combinedStream._updateDataSize(); - assert.strictEqual(combinedStream.dataSize, 53); - - // Test currentStream without dataSize - combinedStream._currentStream = {}; - combinedStream._updateDataSize(); - assert.strictEqual(combinedStream.dataSize, 33); - - // Test stream function - combinedStream._streams.push(function() {}); - combinedStream._updateDataSize(); - assert.strictEqual(combinedStream.dataSize, 33); -})(); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-delayed-streams-and-buffers-and-strings.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-delayed-streams-and-buffers-and-strings.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-delayed-streams-and-buffers-and-strings.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-delayed-streams-and-buffers-and-strings.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -var common = require('../common'); -var assert = common.assert; -var CombinedStream = common.CombinedStream; -var fs = require('fs'); - -var FILE1 = common.dir.fixture + '/file1.txt'; -var BUFFER = new Buffer('Bacon is delicious'); -var FILE2 = common.dir.fixture + '/file2.txt'; -var STRING = 'The € kicks the $\'s ass!'; - -var EXPECTED = - fs.readFileSync(FILE1) - + BUFFER - + fs.readFileSync(FILE2) - + STRING; -var GOT; - -(function testDelayedStreams() { - var combinedStream = CombinedStream.create(); - combinedStream.append(fs.createReadStream(FILE1)); - combinedStream.append(BUFFER); - combinedStream.append(fs.createReadStream(FILE2)); - combinedStream.append(function(next) { - next(STRING); - }); - - var tmpFile = common.dir.tmp + '/combined-file1-buffer-file2-string.txt'; - var dest = fs.createWriteStream(tmpFile); - combinedStream.pipe(dest); - - dest.on('close', function() { - GOT = fs.readFileSync(tmpFile, 'utf8'); - }); -})(); - -process.on('exit', function() { - assert.strictEqual(GOT, EXPECTED); -}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-delayed-streams.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-delayed-streams.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-delayed-streams.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-delayed-streams.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -var common = require('../common'); -var assert = common.assert; -var CombinedStream = common.CombinedStream; -var fs = require('fs'); - -var FILE1 = common.dir.fixture + '/file1.txt'; -var FILE2 = common.dir.fixture + '/file2.txt'; -var EXPECTED = fs.readFileSync(FILE1) + fs.readFileSync(FILE2); -var GOT; - -(function testDelayedStreams() { - var combinedStream = CombinedStream.create(); - combinedStream.append(fs.createReadStream(FILE1)); - combinedStream.append(fs.createReadStream(FILE2)); - - var stream1 = combinedStream._streams[0]; - var stream2 = combinedStream._streams[1]; - - stream1.on('end', function() { - assert.equal(stream2.dataSize, 0); - }); - - var tmpFile = common.dir.tmp + '/combined.txt'; - var dest = fs.createWriteStream(tmpFile); - combinedStream.pipe(dest); - - dest.on('close', function() { - GOT = fs.readFileSync(tmpFile, 'utf8'); - }); -})(); - -process.on('exit', function() { - console.error(GOT.length, EXPECTED.length); - assert.strictEqual(GOT, EXPECTED); -}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-empty-string.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-empty-string.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-empty-string.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-empty-string.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -var common = require('../common'); -var assert = common.assert; -var CombinedStream = common.CombinedStream; -var util = require('util'); -var Stream = require('stream').Stream; - -var s = CombinedStream.create(); - - -function StringStream(){ - this.writable=true; - this.str="" -} -util.inherits(StringStream,Stream); - -StringStream.prototype.write=function(chunk,encoding){ - this.str+=chunk.toString(); - this.emit('data',chunk); -} - -StringStream.prototype.end=function(chunk,encoding){ - this.emit('end'); -} - -StringStream.prototype.toString=function(){ - return this.str; -} - - -s.append("foo."); -s.append(""); -s.append("bar"); - -var ss = new StringStream(); - -s.pipe(ss); -s.resume(); - -assert.equal(ss.toString(),"foo.bar"); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-is-stream-like.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-is-stream-like.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-is-stream-like.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-is-stream-like.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -var fs = require('fs'); -var common = require('../common'); -var assert = common.assert; -var CombinedStream = common.CombinedStream; -var FILE1 = common.dir.fixture + '/file1.txt'; -var fileStream = fs.createReadStream(FILE1); - -var foo = function(){}; - -(function testIsStreamLike() { - assert(! CombinedStream.isStreamLike(true)); - assert(! CombinedStream.isStreamLike("I am a string")); - assert(! CombinedStream.isStreamLike(7)); - assert(! CombinedStream.isStreamLike(foo)); - - assert(CombinedStream.isStreamLike(fileStream)); -})(); \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-max-data-size.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-max-data-size.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-max-data-size.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-max-data-size.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -var common = require('../common'); -var assert = common.assert; -var CombinedStream = common.CombinedStream; -var fs = require('fs'); - -var FILE1 = common.dir.fixture + '/file1.txt'; -var FILE2 = common.dir.fixture + '/file2.txt'; -var EXPECTED = fs.readFileSync(FILE1) + fs.readFileSync(FILE2); - -(function testDelayedStreams() { - var combinedStream = CombinedStream.create({pauseStreams: false, maxDataSize: 20736}); - combinedStream.append(fs.createReadStream(FILE1)); - combinedStream.append(fs.createReadStream(FILE2)); - - var gotErr = null; - combinedStream.on('error', function(err) { - gotErr = err; - }); - - process.on('exit', function() { - assert.ok(gotErr); - assert.ok(gotErr.message.match(/bytes/)); - }); -})(); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-unpaused-streams.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-unpaused-streams.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-unpaused-streams.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/integration/test-unpaused-streams.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -var common = require('../common'); -var assert = common.assert; -var CombinedStream = common.CombinedStream; -var fs = require('fs'); - -var FILE1 = common.dir.fixture + '/file1.txt'; -var FILE2 = common.dir.fixture + '/file2.txt'; -var EXPECTED = fs.readFileSync(FILE1) + fs.readFileSync(FILE2); - -(function testDelayedStreams() { - var combinedStream = CombinedStream.create({pauseStreams: false}); - combinedStream.append(fs.createReadStream(FILE1)); - combinedStream.append(fs.createReadStream(FILE2)); - - var stream1 = combinedStream._streams[0]; - var stream2 = combinedStream._streams[1]; - - stream1.on('end', function() { - assert.ok(stream2.dataSize > 0); - }); - - var tmpFile = common.dir.tmp + '/combined.txt'; - var dest = fs.createWriteStream(tmpFile); - combinedStream.pipe(dest); - - dest.on('end', function() { - var written = fs.readFileSync(tmpFile, 'utf8'); - assert.strictEqual(written, EXPECTED); - }); -})(); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/run.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/run.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/run.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/combined-stream/test/run.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -#!/usr/bin/env node -var far = require('far').create(); - -far.add(__dirname); -far.include(/test-.*\.js$/); - -far.execute(); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/LICENSE nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,19 @@ +Copyright (c) 2010 Benjamin Thomas, Robert Kieffer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/mime.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/mime.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/mime.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/mime.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,114 @@ +var path = require('path'); +var fs = require('fs'); + +function Mime() { + // Map of extension -> mime type + this.types = Object.create(null); + + // Map of mime type -> extension + this.extensions = Object.create(null); +} + +/** + * Define mimetype -> extension mappings. Each key is a mime-type that maps + * to an array of extensions associated with the type. The first extension is + * used as the default extension for the type. + * + * e.g. mime.define({'audio/ogg', ['oga', 'ogg', 'spx']}); + * + * @param map (Object) type definitions + */ +Mime.prototype.define = function (map) { + for (var type in map) { + var exts = map[type]; + + for (var i = 0; i < exts.length; i++) { + if (process.env.DEBUG_MIME && this.types[exts]) { + console.warn(this._loading.replace(/.*\//, ''), 'changes "' + exts[i] + '" extension type from ' + + this.types[exts] + ' to ' + type); + } + + this.types[exts[i]] = type; + } + + // Default extension is the first one we encounter + if (!this.extensions[type]) { + this.extensions[type] = exts[0]; + } + } +}; + +/** + * Load an Apache2-style ".types" file + * + * This may be called multiple times (it's expected). Where files declare + * overlapping types/extensions, the last file wins. + * + * @param file (String) path of file to load. + */ +Mime.prototype.load = function(file) { + + this._loading = file; + // Read file and split into lines + var map = {}, + content = fs.readFileSync(file, 'ascii'), + lines = content.split(/[\r\n]+/); + + lines.forEach(function(line) { + // Clean up whitespace/comments, and split into fields + var fields = line.replace(/\s*#.*|^\s*|\s*$/g, '').split(/\s+/); + map[fields.shift()] = fields; + }); + + this.define(map); + + this._loading = null; +}; + +/** + * Lookup a mime type based on extension + */ +Mime.prototype.lookup = function(path, fallback) { + var ext = path.replace(/.*[\.\/\\]/, '').toLowerCase(); + + return this.types[ext] || fallback || this.default_type; +}; + +/** + * Return file extension associated with a mime type + */ +Mime.prototype.extension = function(mimeType) { + var type = mimeType.match(/^\s*([^;\s]*)(?:;|\s|$)/)[1].toLowerCase(); + return this.extensions[type]; +}; + +// Default instance +var mime = new Mime(); + +// Load local copy of +// http://svn.apache.org/repos/asf/httpd/httpd/trunk/docs/conf/mime.types +mime.load(path.join(__dirname, 'types/mime.types')); + +// Load additional types from node.js community +mime.load(path.join(__dirname, 'types/node.types')); + +// Default type +mime.default_type = mime.lookup('bin'); + +// +// Additional API specific to the default instance +// + +mime.Mime = Mime; + +/** + * Lookup a charset based on mime type. + */ +mime.charsets = { + lookup: function(mimeType, fallback) { + // Assume text types are utf8 + return (/^text\//).test(mimeType) ? 'UTF-8' : fallback; + } +}; + +module.exports = mime; diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,59 @@ +{ + "author": { + "name": "Robert Kieffer", + "email": "robert@broofa.com", + "url": "http://github.com/broofa" + }, + "contributors": [ + { + "name": "Benjamin Thomas", + "email": "benjamin@benjaminthomas.org", + "url": "http://github.com/bentomas" + } + ], + "dependencies": {}, + "description": "A comprehensive library for mime-type mapping", + "devDependencies": {}, + "keywords": [ + "util", + "mime" + ], + "main": "mime.js", + "name": "mime", + "repository": { + "url": "https://github.com/broofa/node-mime", + "type": "git" + }, + "version": "1.2.11", + "readme": "# mime\n\nComprehensive MIME type mapping API. Includes all 600+ types and 800+ extensions defined by the Apache project, plus additional types submitted by the node.js community.\n\n## Install\n\nInstall with [npm](http://github.com/isaacs/npm):\n\n npm install mime\n\n## API - Queries\n\n### mime.lookup(path)\nGet the mime type associated with a file, if no mime type is found `application/octet-stream` is returned. Performs a case-insensitive lookup using the extension in `path` (the substring after the last '/' or '.'). E.g.\n\n var mime = require('mime');\n\n mime.lookup('/path/to/file.txt'); // => 'text/plain'\n mime.lookup('file.txt'); // => 'text/plain'\n mime.lookup('.TXT'); // => 'text/plain'\n mime.lookup('htm'); // => 'text/html'\n\n### mime.default_type\nSets the mime type returned when `mime.lookup` fails to find the extension searched for. (Default is `application/octet-stream`.)\n\n### mime.extension(type)\nGet the default extension for `type`\n\n mime.extension('text/html'); // => 'html'\n mime.extension('application/octet-stream'); // => 'bin'\n\n### mime.charsets.lookup()\n\nMap mime-type to charset\n\n mime.charsets.lookup('text/plain'); // => 'UTF-8'\n\n(The logic for charset lookups is pretty rudimentary. Feel free to suggest improvements.)\n\n## API - Defining Custom Types\n\nThe following APIs allow you to add your own type mappings within your project. If you feel a type should be included as part of node-mime, see [requesting new types](https://github.com/broofa/node-mime/wiki/Requesting-New-Types).\n\n### mime.define()\n\nAdd custom mime/extension mappings\n\n mime.define({\n 'text/x-some-format': ['x-sf', 'x-sft', 'x-sfml'],\n 'application/x-my-type': ['x-mt', 'x-mtt'],\n // etc ...\n });\n\n mime.lookup('x-sft'); // => 'text/x-some-format'\n\nThe first entry in the extensions array is returned by `mime.extension()`. E.g.\n\n mime.extension('text/x-some-format'); // => 'x-sf'\n\n### mime.load(filepath)\n\nLoad mappings from an Apache \".types\" format file\n\n mime.load('./my_project.types');\n\nThe .types file format is simple - See the `types` dir for examples.\n", + "readmeFilename": "README.md", + "bugs": { + "url": "https://github.com/broofa/node-mime/issues" + }, + "_id": "mime@1.2.11", + "dist": { + "shasum": "58203eed86e3a5ef17aed2b7d9ebd47f0a60dd10", + "tarball": "http://registry.npmjs.org/mime/-/mime-1.2.11.tgz" + }, + "_from": "mime@>=1.2.11 <1.3.0", + "_npmVersion": "1.3.6", + "_npmUser": { + "name": "broofa", + "email": "robert@broofa.com" + }, + "maintainers": [ + { + "name": "broofa", + "email": "robert@broofa.com" + }, + { + "name": "bentomas", + "email": "benjamin@benjaminthomas.org" + } + ], + "directories": {}, + "_shasum": "58203eed86e3a5ef17aed2b7d9ebd47f0a60dd10", + "_resolved": "https://registry.npmjs.org/mime/-/mime-1.2.11.tgz", + "homepage": "https://github.com/broofa/node-mime", + "scripts": {} +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/README.md nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/README.md --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,66 @@ +# mime + +Comprehensive MIME type mapping API. Includes all 600+ types and 800+ extensions defined by the Apache project, plus additional types submitted by the node.js community. + +## Install + +Install with [npm](http://github.com/isaacs/npm): + + npm install mime + +## API - Queries + +### mime.lookup(path) +Get the mime type associated with a file, if no mime type is found `application/octet-stream` is returned. Performs a case-insensitive lookup using the extension in `path` (the substring after the last '/' or '.'). E.g. + + var mime = require('mime'); + + mime.lookup('/path/to/file.txt'); // => 'text/plain' + mime.lookup('file.txt'); // => 'text/plain' + mime.lookup('.TXT'); // => 'text/plain' + mime.lookup('htm'); // => 'text/html' + +### mime.default_type +Sets the mime type returned when `mime.lookup` fails to find the extension searched for. (Default is `application/octet-stream`.) + +### mime.extension(type) +Get the default extension for `type` + + mime.extension('text/html'); // => 'html' + mime.extension('application/octet-stream'); // => 'bin' + +### mime.charsets.lookup() + +Map mime-type to charset + + mime.charsets.lookup('text/plain'); // => 'UTF-8' + +(The logic for charset lookups is pretty rudimentary. Feel free to suggest improvements.) + +## API - Defining Custom Types + +The following APIs allow you to add your own type mappings within your project. If you feel a type should be included as part of node-mime, see [requesting new types](https://github.com/broofa/node-mime/wiki/Requesting-New-Types). + +### mime.define() + +Add custom mime/extension mappings + + mime.define({ + 'text/x-some-format': ['x-sf', 'x-sft', 'x-sfml'], + 'application/x-my-type': ['x-mt', 'x-mtt'], + // etc ... + }); + + mime.lookup('x-sft'); // => 'text/x-some-format' + +The first entry in the extensions array is returned by `mime.extension()`. E.g. + + mime.extension('text/x-some-format'); // => 'x-sf' + +### mime.load(filepath) + +Load mappings from an Apache ".types" format file + + mime.load('./my_project.types'); + +The .types file format is simple - See the `types` dir for examples. diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/test.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/test.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/test.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,84 @@ +/** + * Usage: node test.js + */ + +var mime = require('./mime'); +var assert = require('assert'); +var path = require('path'); + +function eq(a, b) { + console.log('Test: ' + a + ' === ' + b); + assert.strictEqual.apply(null, arguments); +} + +console.log(Object.keys(mime.extensions).length + ' types'); +console.log(Object.keys(mime.types).length + ' extensions\n'); + +// +// Test mime lookups +// + +eq('text/plain', mime.lookup('text.txt')); // normal file +eq('text/plain', mime.lookup('TEXT.TXT')); // uppercase +eq('text/plain', mime.lookup('dir/text.txt')); // dir + file +eq('text/plain', mime.lookup('.text.txt')); // hidden file +eq('text/plain', mime.lookup('.txt')); // nameless +eq('text/plain', mime.lookup('txt')); // extension-only +eq('text/plain', mime.lookup('/txt')); // extension-less () +eq('text/plain', mime.lookup('\\txt')); // Windows, extension-less +eq('application/octet-stream', mime.lookup('text.nope')); // unrecognized +eq('fallback', mime.lookup('text.fallback', 'fallback')); // alternate default + +// +// Test extensions +// + +eq('txt', mime.extension(mime.types.text)); +eq('html', mime.extension(mime.types.htm)); +eq('bin', mime.extension('application/octet-stream')); +eq('bin', mime.extension('application/octet-stream ')); +eq('html', mime.extension(' text/html; charset=UTF-8')); +eq('html', mime.extension('text/html; charset=UTF-8 ')); +eq('html', mime.extension('text/html; charset=UTF-8')); +eq('html', mime.extension('text/html ; charset=UTF-8')); +eq('html', mime.extension('text/html;charset=UTF-8')); +eq('html', mime.extension('text/Html;charset=UTF-8')); +eq(undefined, mime.extension('unrecognized')); + +// +// Test node.types lookups +// + +eq('application/font-woff', mime.lookup('file.woff')); +eq('application/octet-stream', mime.lookup('file.buffer')); +eq('audio/mp4', mime.lookup('file.m4a')); +eq('font/opentype', mime.lookup('file.otf')); + +// +// Test charsets +// + +eq('UTF-8', mime.charsets.lookup('text/plain')); +eq(undefined, mime.charsets.lookup(mime.types.js)); +eq('fallback', mime.charsets.lookup('application/octet-stream', 'fallback')); + +// +// Test for overlaps between mime.types and node.types +// + +var apacheTypes = new mime.Mime(), nodeTypes = new mime.Mime(); +apacheTypes.load(path.join(__dirname, 'types/mime.types')); +nodeTypes.load(path.join(__dirname, 'types/node.types')); + +var keys = [].concat(Object.keys(apacheTypes.types)) + .concat(Object.keys(nodeTypes.types)); +keys.sort(); +for (var i = 1; i < keys.length; i++) { + if (keys[i] == keys[i-1]) { + console.warn('Warning: ' + + 'node.types defines ' + keys[i] + '->' + nodeTypes.types[keys[i]] + + ', mime.types defines ' + keys[i] + '->' + apacheTypes.types[keys[i]]); + } +} + +console.log('\nOK'); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/types/mime.types nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/types/mime.types --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/types/mime.types 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/types/mime.types 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1588 @@ +# This file maps Internet media types to unique file extension(s). +# Although created for httpd, this file is used by many software systems +# and has been placed in the public domain for unlimited redisribution. +# +# The table below contains both registered and (common) unregistered types. +# A type that has no unique extension can be ignored -- they are listed +# here to guide configurations toward known types and to make it easier to +# identify "new" types. File extensions are also commonly used to indicate +# content languages and encodings, so choose them carefully. +# +# Internet media types should be registered as described in RFC 4288. +# The registry is at . +# +# MIME type (lowercased) Extensions +# ============================================ ========== +# application/1d-interleaved-parityfec +# application/3gpp-ims+xml +# application/activemessage +application/andrew-inset ez +# application/applefile +application/applixware aw +application/atom+xml atom +application/atomcat+xml atomcat +# application/atomicmail +application/atomsvc+xml atomsvc +# application/auth-policy+xml +# application/batch-smtp +# application/beep+xml +# application/calendar+xml +# application/cals-1840 +# application/ccmp+xml +application/ccxml+xml ccxml +application/cdmi-capability cdmia +application/cdmi-container cdmic +application/cdmi-domain cdmid +application/cdmi-object cdmio +application/cdmi-queue cdmiq +# application/cea-2018+xml +# application/cellml+xml +# application/cfw +# application/cnrp+xml +# application/commonground +# application/conference-info+xml +# application/cpl+xml +# application/csta+xml +# application/cstadata+xml +application/cu-seeme cu +# application/cybercash +application/davmount+xml davmount +# application/dca-rft +# application/dec-dx +# application/dialog-info+xml +# application/dicom +# application/dns +application/docbook+xml dbk +# application/dskpp+xml +application/dssc+der dssc +application/dssc+xml xdssc +# application/dvcs +application/ecmascript ecma +# application/edi-consent +# application/edi-x12 +# application/edifact +application/emma+xml emma +# application/epp+xml +application/epub+zip epub +# application/eshop +# application/example +application/exi exi +# application/fastinfoset +# application/fastsoap +# application/fits +application/font-tdpfr pfr +# application/framework-attributes+xml +application/gml+xml gml +application/gpx+xml gpx +application/gxf gxf +# application/h224 +# application/held+xml +# application/http +application/hyperstudio stk +# application/ibe-key-request+xml +# application/ibe-pkg-reply+xml +# application/ibe-pp-data +# application/iges +# application/im-iscomposing+xml +# application/index +# application/index.cmd +# application/index.obj +# application/index.response +# application/index.vnd +application/inkml+xml ink inkml +# application/iotp +application/ipfix ipfix +# application/ipp +# application/isup +application/java-archive jar +application/java-serialized-object ser +application/java-vm class +application/javascript js +application/json json +application/jsonml+json jsonml +# application/kpml-request+xml +# application/kpml-response+xml +application/lost+xml lostxml +application/mac-binhex40 hqx +application/mac-compactpro cpt +# application/macwriteii +application/mads+xml mads +application/marc mrc +application/marcxml+xml mrcx +application/mathematica ma nb mb +# application/mathml-content+xml +# application/mathml-presentation+xml +application/mathml+xml mathml +# application/mbms-associated-procedure-description+xml +# application/mbms-deregister+xml +# application/mbms-envelope+xml +# application/mbms-msk+xml +# application/mbms-msk-response+xml +# application/mbms-protection-description+xml +# application/mbms-reception-report+xml +# application/mbms-register+xml +# application/mbms-register-response+xml +# application/mbms-user-service-description+xml +application/mbox mbox +# application/media_control+xml +application/mediaservercontrol+xml mscml +application/metalink+xml metalink +application/metalink4+xml meta4 +application/mets+xml mets +# application/mikey +application/mods+xml mods +# application/moss-keys +# application/moss-signature +# application/mosskey-data +# application/mosskey-request +application/mp21 m21 mp21 +application/mp4 mp4s +# application/mpeg4-generic +# application/mpeg4-iod +# application/mpeg4-iod-xmt +# application/msc-ivr+xml +# application/msc-mixer+xml +application/msword doc dot +application/mxf mxf +# application/nasdata +# application/news-checkgroups +# application/news-groupinfo +# application/news-transmission +# application/nss +# application/ocsp-request +# application/ocsp-response +application/octet-stream bin dms lrf mar so dist distz pkg bpk dump elc deploy +application/oda oda +application/oebps-package+xml opf +application/ogg ogx +application/omdoc+xml omdoc +application/onenote onetoc onetoc2 onetmp onepkg +application/oxps oxps +# application/parityfec +application/patch-ops-error+xml xer +application/pdf pdf +application/pgp-encrypted pgp +# application/pgp-keys +application/pgp-signature asc sig +application/pics-rules prf +# application/pidf+xml +# application/pidf-diff+xml +application/pkcs10 p10 +application/pkcs7-mime p7m p7c +application/pkcs7-signature p7s +application/pkcs8 p8 +application/pkix-attr-cert ac +application/pkix-cert cer +application/pkix-crl crl +application/pkix-pkipath pkipath +application/pkixcmp pki +application/pls+xml pls +# application/poc-settings+xml +application/postscript ai eps ps +# application/prs.alvestrand.titrax-sheet +application/prs.cww cww +# application/prs.nprend +# application/prs.plucker +# application/prs.rdf-xml-crypt +# application/prs.xsf+xml +application/pskc+xml pskcxml +# application/qsig +application/rdf+xml rdf +application/reginfo+xml rif +application/relax-ng-compact-syntax rnc +# application/remote-printing +application/resource-lists+xml rl +application/resource-lists-diff+xml rld +# application/riscos +# application/rlmi+xml +application/rls-services+xml rs +application/rpki-ghostbusters gbr +application/rpki-manifest mft +application/rpki-roa roa +# application/rpki-updown +application/rsd+xml rsd +application/rss+xml rss +application/rtf rtf +# application/rtx +# application/samlassertion+xml +# application/samlmetadata+xml +application/sbml+xml sbml +application/scvp-cv-request scq +application/scvp-cv-response scs +application/scvp-vp-request spq +application/scvp-vp-response spp +application/sdp sdp +# application/set-payment +application/set-payment-initiation setpay +# application/set-registration +application/set-registration-initiation setreg +# application/sgml +# application/sgml-open-catalog +application/shf+xml shf +# application/sieve +# application/simple-filter+xml +# application/simple-message-summary +# application/simplesymbolcontainer +# application/slate +# application/smil +application/smil+xml smi smil +# application/soap+fastinfoset +# application/soap+xml +application/sparql-query rq +application/sparql-results+xml srx +# application/spirits-event+xml +application/srgs gram +application/srgs+xml grxml +application/sru+xml sru +application/ssdl+xml ssdl +application/ssml+xml ssml +# application/tamp-apex-update +# application/tamp-apex-update-confirm +# application/tamp-community-update +# application/tamp-community-update-confirm +# application/tamp-error +# application/tamp-sequence-adjust +# application/tamp-sequence-adjust-confirm +# application/tamp-status-query +# application/tamp-status-response +# application/tamp-update +# application/tamp-update-confirm +application/tei+xml tei teicorpus +application/thraud+xml tfi +# application/timestamp-query +# application/timestamp-reply +application/timestamped-data tsd +# application/tve-trigger +# application/ulpfec +# application/vcard+xml +# application/vemmi +# application/vividence.scriptfile +# application/vnd.3gpp.bsf+xml +application/vnd.3gpp.pic-bw-large plb +application/vnd.3gpp.pic-bw-small psb +application/vnd.3gpp.pic-bw-var pvb +# application/vnd.3gpp.sms +# application/vnd.3gpp2.bcmcsinfo+xml +# application/vnd.3gpp2.sms +application/vnd.3gpp2.tcap tcap +application/vnd.3m.post-it-notes pwn +application/vnd.accpac.simply.aso aso +application/vnd.accpac.simply.imp imp +application/vnd.acucobol acu +application/vnd.acucorp atc acutc +application/vnd.adobe.air-application-installer-package+zip air +application/vnd.adobe.formscentral.fcdt fcdt +application/vnd.adobe.fxp fxp fxpl +# application/vnd.adobe.partial-upload +application/vnd.adobe.xdp+xml xdp +application/vnd.adobe.xfdf xfdf +# application/vnd.aether.imp +# application/vnd.ah-barcode +application/vnd.ahead.space ahead +application/vnd.airzip.filesecure.azf azf +application/vnd.airzip.filesecure.azs azs +application/vnd.amazon.ebook azw +application/vnd.americandynamics.acc acc +application/vnd.amiga.ami ami +# application/vnd.amundsen.maze+xml +application/vnd.android.package-archive apk +application/vnd.anser-web-certificate-issue-initiation cii +application/vnd.anser-web-funds-transfer-initiation fti +application/vnd.antix.game-component atx +application/vnd.apple.installer+xml mpkg +application/vnd.apple.mpegurl m3u8 +# application/vnd.arastra.swi +application/vnd.aristanetworks.swi swi +application/vnd.astraea-software.iota iota +application/vnd.audiograph aep +# application/vnd.autopackage +# application/vnd.avistar+xml +application/vnd.blueice.multipass mpm +# application/vnd.bluetooth.ep.oob +application/vnd.bmi bmi +application/vnd.businessobjects rep +# application/vnd.cab-jscript +# application/vnd.canon-cpdl +# application/vnd.canon-lips +# application/vnd.cendio.thinlinc.clientconf +application/vnd.chemdraw+xml cdxml +application/vnd.chipnuts.karaoke-mmd mmd +application/vnd.cinderella cdy +# application/vnd.cirpack.isdn-ext +application/vnd.claymore cla +application/vnd.cloanto.rp9 rp9 +application/vnd.clonk.c4group c4g c4d c4f c4p c4u +application/vnd.cluetrust.cartomobile-config c11amc +application/vnd.cluetrust.cartomobile-config-pkg c11amz +# application/vnd.collection+json +# application/vnd.commerce-battelle +application/vnd.commonspace csp +application/vnd.contact.cmsg cdbcmsg +application/vnd.cosmocaller cmc +application/vnd.crick.clicker clkx +application/vnd.crick.clicker.keyboard clkk +application/vnd.crick.clicker.palette clkp +application/vnd.crick.clicker.template clkt +application/vnd.crick.clicker.wordbank clkw +application/vnd.criticaltools.wbs+xml wbs +application/vnd.ctc-posml pml +# application/vnd.ctct.ws+xml +# application/vnd.cups-pdf +# application/vnd.cups-postscript +application/vnd.cups-ppd ppd +# application/vnd.cups-raster +# application/vnd.cups-raw +# application/vnd.curl +application/vnd.curl.car car +application/vnd.curl.pcurl pcurl +# application/vnd.cybank +application/vnd.dart dart +application/vnd.data-vision.rdz rdz +application/vnd.dece.data uvf uvvf uvd uvvd +application/vnd.dece.ttml+xml uvt uvvt +application/vnd.dece.unspecified uvx uvvx +application/vnd.dece.zip uvz uvvz +application/vnd.denovo.fcselayout-link fe_launch +# application/vnd.dir-bi.plate-dl-nosuffix +application/vnd.dna dna +application/vnd.dolby.mlp mlp +# application/vnd.dolby.mobile.1 +# application/vnd.dolby.mobile.2 +application/vnd.dpgraph dpg +application/vnd.dreamfactory dfac +application/vnd.ds-keypoint kpxx +application/vnd.dvb.ait ait +# application/vnd.dvb.dvbj +# application/vnd.dvb.esgcontainer +# application/vnd.dvb.ipdcdftnotifaccess +# application/vnd.dvb.ipdcesgaccess +# application/vnd.dvb.ipdcesgaccess2 +# application/vnd.dvb.ipdcesgpdd +# application/vnd.dvb.ipdcroaming +# application/vnd.dvb.iptv.alfec-base +# application/vnd.dvb.iptv.alfec-enhancement +# application/vnd.dvb.notif-aggregate-root+xml +# application/vnd.dvb.notif-container+xml +# application/vnd.dvb.notif-generic+xml +# application/vnd.dvb.notif-ia-msglist+xml +# application/vnd.dvb.notif-ia-registration-request+xml +# application/vnd.dvb.notif-ia-registration-response+xml +# application/vnd.dvb.notif-init+xml +# application/vnd.dvb.pfr +application/vnd.dvb.service svc +# application/vnd.dxr +application/vnd.dynageo geo +# application/vnd.easykaraoke.cdgdownload +# application/vnd.ecdis-update +application/vnd.ecowin.chart mag +# application/vnd.ecowin.filerequest +# application/vnd.ecowin.fileupdate +# application/vnd.ecowin.series +# application/vnd.ecowin.seriesrequest +# application/vnd.ecowin.seriesupdate +# application/vnd.emclient.accessrequest+xml +application/vnd.enliven nml +# application/vnd.eprints.data+xml +application/vnd.epson.esf esf +application/vnd.epson.msf msf +application/vnd.epson.quickanime qam +application/vnd.epson.salt slt +application/vnd.epson.ssf ssf +# application/vnd.ericsson.quickcall +application/vnd.eszigno3+xml es3 et3 +# application/vnd.etsi.aoc+xml +# application/vnd.etsi.cug+xml +# application/vnd.etsi.iptvcommand+xml +# application/vnd.etsi.iptvdiscovery+xml +# application/vnd.etsi.iptvprofile+xml +# application/vnd.etsi.iptvsad-bc+xml +# application/vnd.etsi.iptvsad-cod+xml +# application/vnd.etsi.iptvsad-npvr+xml +# application/vnd.etsi.iptvservice+xml +# application/vnd.etsi.iptvsync+xml +# application/vnd.etsi.iptvueprofile+xml +# application/vnd.etsi.mcid+xml +# application/vnd.etsi.overload-control-policy-dataset+xml +# application/vnd.etsi.sci+xml +# application/vnd.etsi.simservs+xml +# application/vnd.etsi.tsl+xml +# application/vnd.etsi.tsl.der +# application/vnd.eudora.data +application/vnd.ezpix-album ez2 +application/vnd.ezpix-package ez3 +# application/vnd.f-secure.mobile +application/vnd.fdf fdf +application/vnd.fdsn.mseed mseed +application/vnd.fdsn.seed seed dataless +# application/vnd.ffsns +# application/vnd.fints +application/vnd.flographit gph +application/vnd.fluxtime.clip ftc +# application/vnd.font-fontforge-sfd +application/vnd.framemaker fm frame maker book +application/vnd.frogans.fnc fnc +application/vnd.frogans.ltf ltf +application/vnd.fsc.weblaunch fsc +application/vnd.fujitsu.oasys oas +application/vnd.fujitsu.oasys2 oa2 +application/vnd.fujitsu.oasys3 oa3 +application/vnd.fujitsu.oasysgp fg5 +application/vnd.fujitsu.oasysprs bh2 +# application/vnd.fujixerox.art-ex +# application/vnd.fujixerox.art4 +# application/vnd.fujixerox.hbpl +application/vnd.fujixerox.ddd ddd +application/vnd.fujixerox.docuworks xdw +application/vnd.fujixerox.docuworks.binder xbd +# application/vnd.fut-misnet +application/vnd.fuzzysheet fzs +application/vnd.genomatix.tuxedo txd +# application/vnd.geocube+xml +application/vnd.geogebra.file ggb +application/vnd.geogebra.tool ggt +application/vnd.geometry-explorer gex gre +application/vnd.geonext gxt +application/vnd.geoplan g2w +application/vnd.geospace g3w +# application/vnd.globalplatform.card-content-mgt +# application/vnd.globalplatform.card-content-mgt-response +application/vnd.gmx gmx +application/vnd.google-earth.kml+xml kml +application/vnd.google-earth.kmz kmz +application/vnd.grafeq gqf gqs +# application/vnd.gridmp +application/vnd.groove-account gac +application/vnd.groove-help ghf +application/vnd.groove-identity-message gim +application/vnd.groove-injector grv +application/vnd.groove-tool-message gtm +application/vnd.groove-tool-template tpl +application/vnd.groove-vcard vcg +# application/vnd.hal+json +application/vnd.hal+xml hal +application/vnd.handheld-entertainment+xml zmm +application/vnd.hbci hbci +# application/vnd.hcl-bireports +application/vnd.hhe.lesson-player les +application/vnd.hp-hpgl hpgl +application/vnd.hp-hpid hpid +application/vnd.hp-hps hps +application/vnd.hp-jlyt jlt +application/vnd.hp-pcl pcl +application/vnd.hp-pclxl pclxl +# application/vnd.httphone +application/vnd.hydrostatix.sof-data sfd-hdstx +# application/vnd.hzn-3d-crossword +# application/vnd.ibm.afplinedata +# application/vnd.ibm.electronic-media +application/vnd.ibm.minipay mpy +application/vnd.ibm.modcap afp listafp list3820 +application/vnd.ibm.rights-management irm +application/vnd.ibm.secure-container sc +application/vnd.iccprofile icc icm +application/vnd.igloader igl +application/vnd.immervision-ivp ivp +application/vnd.immervision-ivu ivu +# application/vnd.informedcontrol.rms+xml +# application/vnd.informix-visionary +# application/vnd.infotech.project +# application/vnd.infotech.project+xml +# application/vnd.innopath.wamp.notification +application/vnd.insors.igm igm +application/vnd.intercon.formnet xpw xpx +application/vnd.intergeo i2g +# application/vnd.intertrust.digibox +# application/vnd.intertrust.nncp +application/vnd.intu.qbo qbo +application/vnd.intu.qfx qfx +# application/vnd.iptc.g2.conceptitem+xml +# application/vnd.iptc.g2.knowledgeitem+xml +# application/vnd.iptc.g2.newsitem+xml +# application/vnd.iptc.g2.newsmessage+xml +# application/vnd.iptc.g2.packageitem+xml +# application/vnd.iptc.g2.planningitem+xml +application/vnd.ipunplugged.rcprofile rcprofile +application/vnd.irepository.package+xml irp +application/vnd.is-xpr xpr +application/vnd.isac.fcs fcs +application/vnd.jam jam +# application/vnd.japannet-directory-service +# application/vnd.japannet-jpnstore-wakeup +# application/vnd.japannet-payment-wakeup +# application/vnd.japannet-registration +# application/vnd.japannet-registration-wakeup +# application/vnd.japannet-setstore-wakeup +# application/vnd.japannet-verification +# application/vnd.japannet-verification-wakeup +application/vnd.jcp.javame.midlet-rms rms +application/vnd.jisp jisp +application/vnd.joost.joda-archive joda +application/vnd.kahootz ktz ktr +application/vnd.kde.karbon karbon +application/vnd.kde.kchart chrt +application/vnd.kde.kformula kfo +application/vnd.kde.kivio flw +application/vnd.kde.kontour kon +application/vnd.kde.kpresenter kpr kpt +application/vnd.kde.kspread ksp +application/vnd.kde.kword kwd kwt +application/vnd.kenameaapp htke +application/vnd.kidspiration kia +application/vnd.kinar kne knp +application/vnd.koan skp skd skt skm +application/vnd.kodak-descriptor sse +application/vnd.las.las+xml lasxml +# application/vnd.liberty-request+xml +application/vnd.llamagraphics.life-balance.desktop lbd +application/vnd.llamagraphics.life-balance.exchange+xml lbe +application/vnd.lotus-1-2-3 123 +application/vnd.lotus-approach apr +application/vnd.lotus-freelance pre +application/vnd.lotus-notes nsf +application/vnd.lotus-organizer org +application/vnd.lotus-screencam scm +application/vnd.lotus-wordpro lwp +application/vnd.macports.portpkg portpkg +# application/vnd.marlin.drm.actiontoken+xml +# application/vnd.marlin.drm.conftoken+xml +# application/vnd.marlin.drm.license+xml +# application/vnd.marlin.drm.mdcf +application/vnd.mcd mcd +application/vnd.medcalcdata mc1 +application/vnd.mediastation.cdkey cdkey +# application/vnd.meridian-slingshot +application/vnd.mfer mwf +application/vnd.mfmp mfm +application/vnd.micrografx.flo flo +application/vnd.micrografx.igx igx +application/vnd.mif mif +# application/vnd.minisoft-hp3000-save +# application/vnd.mitsubishi.misty-guard.trustweb +application/vnd.mobius.daf daf +application/vnd.mobius.dis dis +application/vnd.mobius.mbk mbk +application/vnd.mobius.mqy mqy +application/vnd.mobius.msl msl +application/vnd.mobius.plc plc +application/vnd.mobius.txf txf +application/vnd.mophun.application mpn +application/vnd.mophun.certificate mpc +# application/vnd.motorola.flexsuite +# application/vnd.motorola.flexsuite.adsi +# application/vnd.motorola.flexsuite.fis +# application/vnd.motorola.flexsuite.gotap +# application/vnd.motorola.flexsuite.kmr +# application/vnd.motorola.flexsuite.ttc +# application/vnd.motorola.flexsuite.wem +# application/vnd.motorola.iprm +application/vnd.mozilla.xul+xml xul +application/vnd.ms-artgalry cil +# application/vnd.ms-asf +application/vnd.ms-cab-compressed cab +# application/vnd.ms-color.iccprofile +application/vnd.ms-excel xls xlm xla xlc xlt xlw +application/vnd.ms-excel.addin.macroenabled.12 xlam +application/vnd.ms-excel.sheet.binary.macroenabled.12 xlsb +application/vnd.ms-excel.sheet.macroenabled.12 xlsm +application/vnd.ms-excel.template.macroenabled.12 xltm +application/vnd.ms-fontobject eot +application/vnd.ms-htmlhelp chm +application/vnd.ms-ims ims +application/vnd.ms-lrm lrm +# application/vnd.ms-office.activex+xml +application/vnd.ms-officetheme thmx +# application/vnd.ms-opentype +# application/vnd.ms-package.obfuscated-opentype +application/vnd.ms-pki.seccat cat +application/vnd.ms-pki.stl stl +# application/vnd.ms-playready.initiator+xml +application/vnd.ms-powerpoint ppt pps pot +application/vnd.ms-powerpoint.addin.macroenabled.12 ppam +application/vnd.ms-powerpoint.presentation.macroenabled.12 pptm +application/vnd.ms-powerpoint.slide.macroenabled.12 sldm +application/vnd.ms-powerpoint.slideshow.macroenabled.12 ppsm +application/vnd.ms-powerpoint.template.macroenabled.12 potm +# application/vnd.ms-printing.printticket+xml +application/vnd.ms-project mpp mpt +# application/vnd.ms-tnef +# application/vnd.ms-wmdrm.lic-chlg-req +# application/vnd.ms-wmdrm.lic-resp +# application/vnd.ms-wmdrm.meter-chlg-req +# application/vnd.ms-wmdrm.meter-resp +application/vnd.ms-word.document.macroenabled.12 docm +application/vnd.ms-word.template.macroenabled.12 dotm +application/vnd.ms-works wps wks wcm wdb +application/vnd.ms-wpl wpl +application/vnd.ms-xpsdocument xps +application/vnd.mseq mseq +# application/vnd.msign +# application/vnd.multiad.creator +# application/vnd.multiad.creator.cif +# application/vnd.music-niff +application/vnd.musician mus +application/vnd.muvee.style msty +application/vnd.mynfc taglet +# application/vnd.ncd.control +# application/vnd.ncd.reference +# application/vnd.nervana +# application/vnd.netfpx +application/vnd.neurolanguage.nlu nlu +application/vnd.nitf ntf nitf +application/vnd.noblenet-directory nnd +application/vnd.noblenet-sealer nns +application/vnd.noblenet-web nnw +# application/vnd.nokia.catalogs +# application/vnd.nokia.conml+wbxml +# application/vnd.nokia.conml+xml +# application/vnd.nokia.isds-radio-presets +# application/vnd.nokia.iptv.config+xml +# application/vnd.nokia.landmark+wbxml +# application/vnd.nokia.landmark+xml +# application/vnd.nokia.landmarkcollection+xml +# application/vnd.nokia.n-gage.ac+xml +application/vnd.nokia.n-gage.data ngdat +application/vnd.nokia.n-gage.symbian.install n-gage +# application/vnd.nokia.ncd +# application/vnd.nokia.pcd+wbxml +# application/vnd.nokia.pcd+xml +application/vnd.nokia.radio-preset rpst +application/vnd.nokia.radio-presets rpss +application/vnd.novadigm.edm edm +application/vnd.novadigm.edx edx +application/vnd.novadigm.ext ext +# application/vnd.ntt-local.file-transfer +# application/vnd.ntt-local.sip-ta_remote +# application/vnd.ntt-local.sip-ta_tcp_stream +application/vnd.oasis.opendocument.chart odc +application/vnd.oasis.opendocument.chart-template otc +application/vnd.oasis.opendocument.database odb +application/vnd.oasis.opendocument.formula odf +application/vnd.oasis.opendocument.formula-template odft +application/vnd.oasis.opendocument.graphics odg +application/vnd.oasis.opendocument.graphics-template otg +application/vnd.oasis.opendocument.image odi +application/vnd.oasis.opendocument.image-template oti +application/vnd.oasis.opendocument.presentation odp +application/vnd.oasis.opendocument.presentation-template otp +application/vnd.oasis.opendocument.spreadsheet ods +application/vnd.oasis.opendocument.spreadsheet-template ots +application/vnd.oasis.opendocument.text odt +application/vnd.oasis.opendocument.text-master odm +application/vnd.oasis.opendocument.text-template ott +application/vnd.oasis.opendocument.text-web oth +# application/vnd.obn +# application/vnd.oftn.l10n+json +# application/vnd.oipf.contentaccessdownload+xml +# application/vnd.oipf.contentaccessstreaming+xml +# application/vnd.oipf.cspg-hexbinary +# application/vnd.oipf.dae.svg+xml +# application/vnd.oipf.dae.xhtml+xml +# application/vnd.oipf.mippvcontrolmessage+xml +# application/vnd.oipf.pae.gem +# application/vnd.oipf.spdiscovery+xml +# application/vnd.oipf.spdlist+xml +# application/vnd.oipf.ueprofile+xml +# application/vnd.oipf.userprofile+xml +application/vnd.olpc-sugar xo +# application/vnd.oma-scws-config +# application/vnd.oma-scws-http-request +# application/vnd.oma-scws-http-response +# application/vnd.oma.bcast.associated-procedure-parameter+xml +# application/vnd.oma.bcast.drm-trigger+xml +# application/vnd.oma.bcast.imd+xml +# application/vnd.oma.bcast.ltkm +# application/vnd.oma.bcast.notification+xml +# application/vnd.oma.bcast.provisioningtrigger +# application/vnd.oma.bcast.sgboot +# application/vnd.oma.bcast.sgdd+xml +# application/vnd.oma.bcast.sgdu +# application/vnd.oma.bcast.simple-symbol-container +# application/vnd.oma.bcast.smartcard-trigger+xml +# application/vnd.oma.bcast.sprov+xml +# application/vnd.oma.bcast.stkm +# application/vnd.oma.cab-address-book+xml +# application/vnd.oma.cab-feature-handler+xml +# application/vnd.oma.cab-pcc+xml +# application/vnd.oma.cab-user-prefs+xml +# application/vnd.oma.dcd +# application/vnd.oma.dcdc +application/vnd.oma.dd2+xml dd2 +# application/vnd.oma.drm.risd+xml +# application/vnd.oma.group-usage-list+xml +# application/vnd.oma.pal+xml +# application/vnd.oma.poc.detailed-progress-report+xml +# application/vnd.oma.poc.final-report+xml +# application/vnd.oma.poc.groups+xml +# application/vnd.oma.poc.invocation-descriptor+xml +# application/vnd.oma.poc.optimized-progress-report+xml +# application/vnd.oma.push +# application/vnd.oma.scidm.messages+xml +# application/vnd.oma.xcap-directory+xml +# application/vnd.omads-email+xml +# application/vnd.omads-file+xml +# application/vnd.omads-folder+xml +# application/vnd.omaloc-supl-init +application/vnd.openofficeorg.extension oxt +# application/vnd.openxmlformats-officedocument.custom-properties+xml +# application/vnd.openxmlformats-officedocument.customxmlproperties+xml +# application/vnd.openxmlformats-officedocument.drawing+xml +# application/vnd.openxmlformats-officedocument.drawingml.chart+xml +# application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml +# application/vnd.openxmlformats-officedocument.drawingml.diagramcolors+xml +# application/vnd.openxmlformats-officedocument.drawingml.diagramdata+xml +# application/vnd.openxmlformats-officedocument.drawingml.diagramlayout+xml +# application/vnd.openxmlformats-officedocument.drawingml.diagramstyle+xml +# application/vnd.openxmlformats-officedocument.extended-properties+xml +# application/vnd.openxmlformats-officedocument.presentationml.commentauthors+xml +# application/vnd.openxmlformats-officedocument.presentationml.comments+xml +# application/vnd.openxmlformats-officedocument.presentationml.handoutmaster+xml +# application/vnd.openxmlformats-officedocument.presentationml.notesmaster+xml +# application/vnd.openxmlformats-officedocument.presentationml.notesslide+xml +application/vnd.openxmlformats-officedocument.presentationml.presentation pptx +# application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml +# application/vnd.openxmlformats-officedocument.presentationml.presprops+xml +application/vnd.openxmlformats-officedocument.presentationml.slide sldx +# application/vnd.openxmlformats-officedocument.presentationml.slide+xml +# application/vnd.openxmlformats-officedocument.presentationml.slidelayout+xml +# application/vnd.openxmlformats-officedocument.presentationml.slidemaster+xml +application/vnd.openxmlformats-officedocument.presentationml.slideshow ppsx +# application/vnd.openxmlformats-officedocument.presentationml.slideshow.main+xml +# application/vnd.openxmlformats-officedocument.presentationml.slideupdateinfo+xml +# application/vnd.openxmlformats-officedocument.presentationml.tablestyles+xml +# application/vnd.openxmlformats-officedocument.presentationml.tags+xml +application/vnd.openxmlformats-officedocument.presentationml.template potx +# application/vnd.openxmlformats-officedocument.presentationml.template.main+xml +# application/vnd.openxmlformats-officedocument.presentationml.viewprops+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.calcchain+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.dialogsheet+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.externallink+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.pivotcachedefinition+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.pivotcacherecords+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.pivottable+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.querytable+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.revisionheaders+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.revisionlog+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.sharedstrings+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx +# application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.sheetmetadata+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.tablesinglecells+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.template xltx +# application/vnd.openxmlformats-officedocument.spreadsheetml.template.main+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.usernames+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.volatiledependencies+xml +# application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml +# application/vnd.openxmlformats-officedocument.theme+xml +# application/vnd.openxmlformats-officedocument.themeoverride+xml +# application/vnd.openxmlformats-officedocument.vmldrawing +# application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.document docx +# application/vnd.openxmlformats-officedocument.wordprocessingml.document.glossary+xml +# application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml +# application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml +# application/vnd.openxmlformats-officedocument.wordprocessingml.fonttable+xml +# application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml +# application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml +# application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml +# application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml +# application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.template dotx +# application/vnd.openxmlformats-officedocument.wordprocessingml.template.main+xml +# application/vnd.openxmlformats-officedocument.wordprocessingml.websettings+xml +# application/vnd.openxmlformats-package.core-properties+xml +# application/vnd.openxmlformats-package.digital-signature-xmlsignature+xml +# application/vnd.openxmlformats-package.relationships+xml +# application/vnd.quobject-quoxdocument +# application/vnd.osa.netdeploy +application/vnd.osgeo.mapguide.package mgp +# application/vnd.osgi.bundle +application/vnd.osgi.dp dp +application/vnd.osgi.subsystem esa +# application/vnd.otps.ct-kip+xml +application/vnd.palm pdb pqa oprc +# application/vnd.paos.xml +application/vnd.pawaafile paw +application/vnd.pg.format str +application/vnd.pg.osasli ei6 +# application/vnd.piaccess.application-licence +application/vnd.picsel efif +application/vnd.pmi.widget wg +# application/vnd.poc.group-advertisement+xml +application/vnd.pocketlearn plf +application/vnd.powerbuilder6 pbd +# application/vnd.powerbuilder6-s +# application/vnd.powerbuilder7 +# application/vnd.powerbuilder7-s +# application/vnd.powerbuilder75 +# application/vnd.powerbuilder75-s +# application/vnd.preminet +application/vnd.previewsystems.box box +application/vnd.proteus.magazine mgz +application/vnd.publishare-delta-tree qps +application/vnd.pvi.ptid1 ptid +# application/vnd.pwg-multiplexed +# application/vnd.pwg-xhtml-print+xml +# application/vnd.qualcomm.brew-app-res +application/vnd.quark.quarkxpress qxd qxt qwd qwt qxl qxb +# application/vnd.radisys.moml+xml +# application/vnd.radisys.msml+xml +# application/vnd.radisys.msml-audit+xml +# application/vnd.radisys.msml-audit-conf+xml +# application/vnd.radisys.msml-audit-conn+xml +# application/vnd.radisys.msml-audit-dialog+xml +# application/vnd.radisys.msml-audit-stream+xml +# application/vnd.radisys.msml-conf+xml +# application/vnd.radisys.msml-dialog+xml +# application/vnd.radisys.msml-dialog-base+xml +# application/vnd.radisys.msml-dialog-fax-detect+xml +# application/vnd.radisys.msml-dialog-fax-sendrecv+xml +# application/vnd.radisys.msml-dialog-group+xml +# application/vnd.radisys.msml-dialog-speech+xml +# application/vnd.radisys.msml-dialog-transform+xml +# application/vnd.rainstor.data +# application/vnd.rapid +application/vnd.realvnc.bed bed +application/vnd.recordare.musicxml mxl +application/vnd.recordare.musicxml+xml musicxml +# application/vnd.renlearn.rlprint +application/vnd.rig.cryptonote cryptonote +application/vnd.rim.cod cod +application/vnd.rn-realmedia rm +application/vnd.rn-realmedia-vbr rmvb +application/vnd.route66.link66+xml link66 +# application/vnd.rs-274x +# application/vnd.ruckus.download +# application/vnd.s3sms +application/vnd.sailingtracker.track st +# application/vnd.sbm.cid +# application/vnd.sbm.mid2 +# application/vnd.scribus +# application/vnd.sealed.3df +# application/vnd.sealed.csf +# application/vnd.sealed.doc +# application/vnd.sealed.eml +# application/vnd.sealed.mht +# application/vnd.sealed.net +# application/vnd.sealed.ppt +# application/vnd.sealed.tiff +# application/vnd.sealed.xls +# application/vnd.sealedmedia.softseal.html +# application/vnd.sealedmedia.softseal.pdf +application/vnd.seemail see +application/vnd.sema sema +application/vnd.semd semd +application/vnd.semf semf +application/vnd.shana.informed.formdata ifm +application/vnd.shana.informed.formtemplate itp +application/vnd.shana.informed.interchange iif +application/vnd.shana.informed.package ipk +application/vnd.simtech-mindmapper twd twds +application/vnd.smaf mmf +# application/vnd.smart.notebook +application/vnd.smart.teacher teacher +# application/vnd.software602.filler.form+xml +# application/vnd.software602.filler.form-xml-zip +application/vnd.solent.sdkm+xml sdkm sdkd +application/vnd.spotfire.dxp dxp +application/vnd.spotfire.sfs sfs +# application/vnd.sss-cod +# application/vnd.sss-dtf +# application/vnd.sss-ntf +application/vnd.stardivision.calc sdc +application/vnd.stardivision.draw sda +application/vnd.stardivision.impress sdd +application/vnd.stardivision.math smf +application/vnd.stardivision.writer sdw vor +application/vnd.stardivision.writer-global sgl +application/vnd.stepmania.package smzip +application/vnd.stepmania.stepchart sm +# application/vnd.street-stream +application/vnd.sun.xml.calc sxc +application/vnd.sun.xml.calc.template stc +application/vnd.sun.xml.draw sxd +application/vnd.sun.xml.draw.template std +application/vnd.sun.xml.impress sxi +application/vnd.sun.xml.impress.template sti +application/vnd.sun.xml.math sxm +application/vnd.sun.xml.writer sxw +application/vnd.sun.xml.writer.global sxg +application/vnd.sun.xml.writer.template stw +# application/vnd.sun.wadl+xml +application/vnd.sus-calendar sus susp +application/vnd.svd svd +# application/vnd.swiftview-ics +application/vnd.symbian.install sis sisx +application/vnd.syncml+xml xsm +application/vnd.syncml.dm+wbxml bdm +application/vnd.syncml.dm+xml xdm +# application/vnd.syncml.dm.notification +# application/vnd.syncml.ds.notification +application/vnd.tao.intent-module-archive tao +application/vnd.tcpdump.pcap pcap cap dmp +application/vnd.tmobile-livetv tmo +application/vnd.trid.tpt tpt +application/vnd.triscape.mxs mxs +application/vnd.trueapp tra +# application/vnd.truedoc +# application/vnd.ubisoft.webplayer +application/vnd.ufdl ufd ufdl +application/vnd.uiq.theme utz +application/vnd.umajin umj +application/vnd.unity unityweb +application/vnd.uoml+xml uoml +# application/vnd.uplanet.alert +# application/vnd.uplanet.alert-wbxml +# application/vnd.uplanet.bearer-choice +# application/vnd.uplanet.bearer-choice-wbxml +# application/vnd.uplanet.cacheop +# application/vnd.uplanet.cacheop-wbxml +# application/vnd.uplanet.channel +# application/vnd.uplanet.channel-wbxml +# application/vnd.uplanet.list +# application/vnd.uplanet.list-wbxml +# application/vnd.uplanet.listcmd +# application/vnd.uplanet.listcmd-wbxml +# application/vnd.uplanet.signal +application/vnd.vcx vcx +# application/vnd.vd-study +# application/vnd.vectorworks +# application/vnd.verimatrix.vcas +# application/vnd.vidsoft.vidconference +application/vnd.visio vsd vst vss vsw +application/vnd.visionary vis +# application/vnd.vividence.scriptfile +application/vnd.vsf vsf +# application/vnd.wap.sic +# application/vnd.wap.slc +application/vnd.wap.wbxml wbxml +application/vnd.wap.wmlc wmlc +application/vnd.wap.wmlscriptc wmlsc +application/vnd.webturbo wtb +# application/vnd.wfa.wsc +# application/vnd.wmc +# application/vnd.wmf.bootstrap +# application/vnd.wolfram.mathematica +# application/vnd.wolfram.mathematica.package +application/vnd.wolfram.player nbp +application/vnd.wordperfect wpd +application/vnd.wqd wqd +# application/vnd.wrq-hp3000-labelled +application/vnd.wt.stf stf +# application/vnd.wv.csp+wbxml +# application/vnd.wv.csp+xml +# application/vnd.wv.ssp+xml +application/vnd.xara xar +application/vnd.xfdl xfdl +# application/vnd.xfdl.webform +# application/vnd.xmi+xml +# application/vnd.xmpie.cpkg +# application/vnd.xmpie.dpkg +# application/vnd.xmpie.plan +# application/vnd.xmpie.ppkg +# application/vnd.xmpie.xlim +application/vnd.yamaha.hv-dic hvd +application/vnd.yamaha.hv-script hvs +application/vnd.yamaha.hv-voice hvp +application/vnd.yamaha.openscoreformat osf +application/vnd.yamaha.openscoreformat.osfpvg+xml osfpvg +# application/vnd.yamaha.remote-setup +application/vnd.yamaha.smaf-audio saf +application/vnd.yamaha.smaf-phrase spf +# application/vnd.yamaha.through-ngn +# application/vnd.yamaha.tunnel-udpencap +application/vnd.yellowriver-custom-menu cmp +application/vnd.zul zir zirz +application/vnd.zzazz.deck+xml zaz +application/voicexml+xml vxml +# application/vq-rtcpxr +# application/watcherinfo+xml +# application/whoispp-query +# application/whoispp-response +application/widget wgt +application/winhlp hlp +# application/wita +# application/wordperfect5.1 +application/wsdl+xml wsdl +application/wspolicy+xml wspolicy +application/x-7z-compressed 7z +application/x-abiword abw +application/x-ace-compressed ace +# application/x-amf +application/x-apple-diskimage dmg +application/x-authorware-bin aab x32 u32 vox +application/x-authorware-map aam +application/x-authorware-seg aas +application/x-bcpio bcpio +application/x-bittorrent torrent +application/x-blorb blb blorb +application/x-bzip bz +application/x-bzip2 bz2 boz +application/x-cbr cbr cba cbt cbz cb7 +application/x-cdlink vcd +application/x-cfs-compressed cfs +application/x-chat chat +application/x-chess-pgn pgn +application/x-conference nsc +# application/x-compress +application/x-cpio cpio +application/x-csh csh +application/x-debian-package deb udeb +application/x-dgc-compressed dgc +application/x-director dir dcr dxr cst cct cxt w3d fgd swa +application/x-doom wad +application/x-dtbncx+xml ncx +application/x-dtbook+xml dtb +application/x-dtbresource+xml res +application/x-dvi dvi +application/x-envoy evy +application/x-eva eva +application/x-font-bdf bdf +# application/x-font-dos +# application/x-font-framemaker +application/x-font-ghostscript gsf +# application/x-font-libgrx +application/x-font-linux-psf psf +application/x-font-otf otf +application/x-font-pcf pcf +application/x-font-snf snf +# application/x-font-speedo +# application/x-font-sunos-news +application/x-font-ttf ttf ttc +application/x-font-type1 pfa pfb pfm afm +application/font-woff woff +# application/x-font-vfont +application/x-freearc arc +application/x-futuresplash spl +application/x-gca-compressed gca +application/x-glulx ulx +application/x-gnumeric gnumeric +application/x-gramps-xml gramps +application/x-gtar gtar +# application/x-gzip +application/x-hdf hdf +application/x-install-instructions install +application/x-iso9660-image iso +application/x-java-jnlp-file jnlp +application/x-latex latex +application/x-lzh-compressed lzh lha +application/x-mie mie +application/x-mobipocket-ebook prc mobi +application/x-ms-application application +application/x-ms-shortcut lnk +application/x-ms-wmd wmd +application/x-ms-wmz wmz +application/x-ms-xbap xbap +application/x-msaccess mdb +application/x-msbinder obd +application/x-mscardfile crd +application/x-msclip clp +application/x-msdownload exe dll com bat msi +application/x-msmediaview mvb m13 m14 +application/x-msmetafile wmf wmz emf emz +application/x-msmoney mny +application/x-mspublisher pub +application/x-msschedule scd +application/x-msterminal trm +application/x-mswrite wri +application/x-netcdf nc cdf +application/x-nzb nzb +application/x-pkcs12 p12 pfx +application/x-pkcs7-certificates p7b spc +application/x-pkcs7-certreqresp p7r +application/x-rar-compressed rar +application/x-research-info-systems ris +application/x-sh sh +application/x-shar shar +application/x-shockwave-flash swf +application/x-silverlight-app xap +application/x-sql sql +application/x-stuffit sit +application/x-stuffitx sitx +application/x-subrip srt +application/x-sv4cpio sv4cpio +application/x-sv4crc sv4crc +application/x-t3vm-image t3 +application/x-tads gam +application/x-tar tar +application/x-tcl tcl +application/x-tex tex +application/x-tex-tfm tfm +application/x-texinfo texinfo texi +application/x-tgif obj +application/x-ustar ustar +application/x-wais-source src +application/x-x509-ca-cert der crt +application/x-xfig fig +application/x-xliff+xml xlf +application/x-xpinstall xpi +application/x-xz xz +application/x-zmachine z1 z2 z3 z4 z5 z6 z7 z8 +# application/x400-bp +application/xaml+xml xaml +# application/xcap-att+xml +# application/xcap-caps+xml +application/xcap-diff+xml xdf +# application/xcap-el+xml +# application/xcap-error+xml +# application/xcap-ns+xml +# application/xcon-conference-info-diff+xml +# application/xcon-conference-info+xml +application/xenc+xml xenc +application/xhtml+xml xhtml xht +# application/xhtml-voice+xml +application/xml xml xsl +application/xml-dtd dtd +# application/xml-external-parsed-entity +# application/xmpp+xml +application/xop+xml xop +application/xproc+xml xpl +application/xslt+xml xslt +application/xspf+xml xspf +application/xv+xml mxml xhvml xvml xvm +application/yang yang +application/yin+xml yin +application/zip zip +# audio/1d-interleaved-parityfec +# audio/32kadpcm +# audio/3gpp +# audio/3gpp2 +# audio/ac3 +audio/adpcm adp +# audio/amr +# audio/amr-wb +# audio/amr-wb+ +# audio/asc +# audio/atrac-advanced-lossless +# audio/atrac-x +# audio/atrac3 +audio/basic au snd +# audio/bv16 +# audio/bv32 +# audio/clearmode +# audio/cn +# audio/dat12 +# audio/dls +# audio/dsr-es201108 +# audio/dsr-es202050 +# audio/dsr-es202211 +# audio/dsr-es202212 +# audio/dv +# audio/dvi4 +# audio/eac3 +# audio/evrc +# audio/evrc-qcp +# audio/evrc0 +# audio/evrc1 +# audio/evrcb +# audio/evrcb0 +# audio/evrcb1 +# audio/evrcwb +# audio/evrcwb0 +# audio/evrcwb1 +# audio/example +# audio/fwdred +# audio/g719 +# audio/g722 +# audio/g7221 +# audio/g723 +# audio/g726-16 +# audio/g726-24 +# audio/g726-32 +# audio/g726-40 +# audio/g728 +# audio/g729 +# audio/g7291 +# audio/g729d +# audio/g729e +# audio/gsm +# audio/gsm-efr +# audio/gsm-hr-08 +# audio/ilbc +# audio/ip-mr_v2.5 +# audio/isac +# audio/l16 +# audio/l20 +# audio/l24 +# audio/l8 +# audio/lpc +audio/midi mid midi kar rmi +# audio/mobile-xmf +audio/mp4 mp4a +# audio/mp4a-latm +# audio/mpa +# audio/mpa-robust +audio/mpeg mpga mp2 mp2a mp3 m2a m3a +# audio/mpeg4-generic +# audio/musepack +audio/ogg oga ogg spx +# audio/opus +# audio/parityfec +# audio/pcma +# audio/pcma-wb +# audio/pcmu-wb +# audio/pcmu +# audio/prs.sid +# audio/qcelp +# audio/red +# audio/rtp-enc-aescm128 +# audio/rtp-midi +# audio/rtx +audio/s3m s3m +audio/silk sil +# audio/smv +# audio/smv0 +# audio/smv-qcp +# audio/sp-midi +# audio/speex +# audio/t140c +# audio/t38 +# audio/telephone-event +# audio/tone +# audio/uemclip +# audio/ulpfec +# audio/vdvi +# audio/vmr-wb +# audio/vnd.3gpp.iufp +# audio/vnd.4sb +# audio/vnd.audiokoz +# audio/vnd.celp +# audio/vnd.cisco.nse +# audio/vnd.cmles.radio-events +# audio/vnd.cns.anp1 +# audio/vnd.cns.inf1 +audio/vnd.dece.audio uva uvva +audio/vnd.digital-winds eol +# audio/vnd.dlna.adts +# audio/vnd.dolby.heaac.1 +# audio/vnd.dolby.heaac.2 +# audio/vnd.dolby.mlp +# audio/vnd.dolby.mps +# audio/vnd.dolby.pl2 +# audio/vnd.dolby.pl2x +# audio/vnd.dolby.pl2z +# audio/vnd.dolby.pulse.1 +audio/vnd.dra dra +audio/vnd.dts dts +audio/vnd.dts.hd dtshd +# audio/vnd.dvb.file +# audio/vnd.everad.plj +# audio/vnd.hns.audio +audio/vnd.lucent.voice lvp +audio/vnd.ms-playready.media.pya pya +# audio/vnd.nokia.mobile-xmf +# audio/vnd.nortel.vbk +audio/vnd.nuera.ecelp4800 ecelp4800 +audio/vnd.nuera.ecelp7470 ecelp7470 +audio/vnd.nuera.ecelp9600 ecelp9600 +# audio/vnd.octel.sbc +# audio/vnd.qcelp +# audio/vnd.rhetorex.32kadpcm +audio/vnd.rip rip +# audio/vnd.sealedmedia.softseal.mpeg +# audio/vnd.vmx.cvsd +# audio/vorbis +# audio/vorbis-config +audio/webm weba +audio/x-aac aac +audio/x-aiff aif aiff aifc +audio/x-caf caf +audio/x-flac flac +audio/x-matroska mka +audio/x-mpegurl m3u +audio/x-ms-wax wax +audio/x-ms-wma wma +audio/x-pn-realaudio ram ra +audio/x-pn-realaudio-plugin rmp +# audio/x-tta +audio/x-wav wav +audio/xm xm +chemical/x-cdx cdx +chemical/x-cif cif +chemical/x-cmdf cmdf +chemical/x-cml cml +chemical/x-csml csml +# chemical/x-pdb +chemical/x-xyz xyz +image/bmp bmp +image/cgm cgm +# image/example +# image/fits +image/g3fax g3 +image/gif gif +image/ief ief +# image/jp2 +image/jpeg jpeg jpg jpe +# image/jpm +# image/jpx +image/ktx ktx +# image/naplps +image/png png +image/prs.btif btif +# image/prs.pti +image/sgi sgi +image/svg+xml svg svgz +# image/t38 +image/tiff tiff tif +# image/tiff-fx +image/vnd.adobe.photoshop psd +# image/vnd.cns.inf2 +image/vnd.dece.graphic uvi uvvi uvg uvvg +image/vnd.dvb.subtitle sub +image/vnd.djvu djvu djv +image/vnd.dwg dwg +image/vnd.dxf dxf +image/vnd.fastbidsheet fbs +image/vnd.fpx fpx +image/vnd.fst fst +image/vnd.fujixerox.edmics-mmr mmr +image/vnd.fujixerox.edmics-rlc rlc +# image/vnd.globalgraphics.pgb +# image/vnd.microsoft.icon +# image/vnd.mix +image/vnd.ms-modi mdi +image/vnd.ms-photo wdp +image/vnd.net-fpx npx +# image/vnd.radiance +# image/vnd.sealed.png +# image/vnd.sealedmedia.softseal.gif +# image/vnd.sealedmedia.softseal.jpg +# image/vnd.svf +image/vnd.wap.wbmp wbmp +image/vnd.xiff xif +image/webp webp +image/x-3ds 3ds +image/x-cmu-raster ras +image/x-cmx cmx +image/x-freehand fh fhc fh4 fh5 fh7 +image/x-icon ico +image/x-mrsid-image sid +image/x-pcx pcx +image/x-pict pic pct +image/x-portable-anymap pnm +image/x-portable-bitmap pbm +image/x-portable-graymap pgm +image/x-portable-pixmap ppm +image/x-rgb rgb +image/x-tga tga +image/x-xbitmap xbm +image/x-xpixmap xpm +image/x-xwindowdump xwd +# message/cpim +# message/delivery-status +# message/disposition-notification +# message/example +# message/external-body +# message/feedback-report +# message/global +# message/global-delivery-status +# message/global-disposition-notification +# message/global-headers +# message/http +# message/imdn+xml +# message/news +# message/partial +message/rfc822 eml mime +# message/s-http +# message/sip +# message/sipfrag +# message/tracking-status +# message/vnd.si.simp +# model/example +model/iges igs iges +model/mesh msh mesh silo +model/vnd.collada+xml dae +model/vnd.dwf dwf +# model/vnd.flatland.3dml +model/vnd.gdl gdl +# model/vnd.gs-gdl +# model/vnd.gs.gdl +model/vnd.gtw gtw +# model/vnd.moml+xml +model/vnd.mts mts +# model/vnd.parasolid.transmit.binary +# model/vnd.parasolid.transmit.text +model/vnd.vtu vtu +model/vrml wrl vrml +model/x3d+binary x3db x3dbz +model/x3d+vrml x3dv x3dvz +model/x3d+xml x3d x3dz +# multipart/alternative +# multipart/appledouble +# multipart/byteranges +# multipart/digest +# multipart/encrypted +# multipart/example +# multipart/form-data +# multipart/header-set +# multipart/mixed +# multipart/parallel +# multipart/related +# multipart/report +# multipart/signed +# multipart/voice-message +# text/1d-interleaved-parityfec +text/cache-manifest appcache +text/calendar ics ifb +text/css css +text/csv csv +# text/directory +# text/dns +# text/ecmascript +# text/enriched +# text/example +# text/fwdred +text/html html htm +# text/javascript +text/n3 n3 +# text/parityfec +text/plain txt text conf def list log in +# text/prs.fallenstein.rst +text/prs.lines.tag dsc +# text/vnd.radisys.msml-basic-layout +# text/red +# text/rfc822-headers +text/richtext rtx +# text/rtf +# text/rtp-enc-aescm128 +# text/rtx +text/sgml sgml sgm +# text/t140 +text/tab-separated-values tsv +text/troff t tr roff man me ms +text/turtle ttl +# text/ulpfec +text/uri-list uri uris urls +text/vcard vcard +# text/vnd.abc +text/vnd.curl curl +text/vnd.curl.dcurl dcurl +text/vnd.curl.scurl scurl +text/vnd.curl.mcurl mcurl +# text/vnd.dmclientscript +text/vnd.dvb.subtitle sub +# text/vnd.esmertec.theme-descriptor +text/vnd.fly fly +text/vnd.fmi.flexstor flx +text/vnd.graphviz gv +text/vnd.in3d.3dml 3dml +text/vnd.in3d.spot spot +# text/vnd.iptc.newsml +# text/vnd.iptc.nitf +# text/vnd.latex-z +# text/vnd.motorola.reflex +# text/vnd.ms-mediapackage +# text/vnd.net2phone.commcenter.command +# text/vnd.si.uricatalogue +text/vnd.sun.j2me.app-descriptor jad +# text/vnd.trolltech.linguist +# text/vnd.wap.si +# text/vnd.wap.sl +text/vnd.wap.wml wml +text/vnd.wap.wmlscript wmls +text/x-asm s asm +text/x-c c cc cxx cpp h hh dic +text/x-fortran f for f77 f90 +text/x-java-source java +text/x-opml opml +text/x-pascal p pas +text/x-nfo nfo +text/x-setext etx +text/x-sfv sfv +text/x-uuencode uu +text/x-vcalendar vcs +text/x-vcard vcf +# text/xml +# text/xml-external-parsed-entity +# video/1d-interleaved-parityfec +video/3gpp 3gp +# video/3gpp-tt +video/3gpp2 3g2 +# video/bmpeg +# video/bt656 +# video/celb +# video/dv +# video/example +video/h261 h261 +video/h263 h263 +# video/h263-1998 +# video/h263-2000 +video/h264 h264 +# video/h264-rcdo +# video/h264-svc +video/jpeg jpgv +# video/jpeg2000 +video/jpm jpm jpgm +video/mj2 mj2 mjp2 +# video/mp1s +# video/mp2p +# video/mp2t +video/mp4 mp4 mp4v mpg4 +# video/mp4v-es +video/mpeg mpeg mpg mpe m1v m2v +# video/mpeg4-generic +# video/mpv +# video/nv +video/ogg ogv +# video/parityfec +# video/pointer +video/quicktime qt mov +# video/raw +# video/rtp-enc-aescm128 +# video/rtx +# video/smpte292m +# video/ulpfec +# video/vc1 +# video/vnd.cctv +video/vnd.dece.hd uvh uvvh +video/vnd.dece.mobile uvm uvvm +# video/vnd.dece.mp4 +video/vnd.dece.pd uvp uvvp +video/vnd.dece.sd uvs uvvs +video/vnd.dece.video uvv uvvv +# video/vnd.directv.mpeg +# video/vnd.directv.mpeg-tts +# video/vnd.dlna.mpeg-tts +video/vnd.dvb.file dvb +video/vnd.fvt fvt +# video/vnd.hns.video +# video/vnd.iptvforum.1dparityfec-1010 +# video/vnd.iptvforum.1dparityfec-2005 +# video/vnd.iptvforum.2dparityfec-1010 +# video/vnd.iptvforum.2dparityfec-2005 +# video/vnd.iptvforum.ttsavc +# video/vnd.iptvforum.ttsmpeg2 +# video/vnd.motorola.video +# video/vnd.motorola.videop +video/vnd.mpegurl mxu m4u +video/vnd.ms-playready.media.pyv pyv +# video/vnd.nokia.interleaved-multimedia +# video/vnd.nokia.videovoip +# video/vnd.objectvideo +# video/vnd.sealed.mpeg1 +# video/vnd.sealed.mpeg4 +# video/vnd.sealed.swf +# video/vnd.sealedmedia.softseal.mov +video/vnd.uvvu.mp4 uvu uvvu +video/vnd.vivo viv +video/webm webm +video/x-f4v f4v +video/x-fli fli +video/x-flv flv +video/x-m4v m4v +video/x-matroska mkv mk3d mks +video/x-mng mng +video/x-ms-asf asf asx +video/x-ms-vob vob +video/x-ms-wm wm +video/x-ms-wmv wmv +video/x-ms-wmx wmx +video/x-ms-wvx wvx +video/x-msvideo avi +video/x-sgi-movie movie +video/x-smv smv +x-conference/x-cooltalk ice diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/types/node.types nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/types/node.types --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/types/node.types 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/node_modules/mime/types/node.types 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,77 @@ +# What: WebVTT +# Why: To allow formats intended for marking up external text track resources. +# http://dev.w3.org/html5/webvtt/ +# Added by: niftylettuce +text/vtt vtt + +# What: Google Chrome Extension +# Why: To allow apps to (work) be served with the right content type header. +# http://codereview.chromium.org/2830017 +# Added by: niftylettuce +application/x-chrome-extension crx + +# What: HTC support +# Why: To properly render .htc files such as CSS3PIE +# Added by: niftylettuce +text/x-component htc + +# What: HTML5 application cache manifes ('.manifest' extension) +# Why: De-facto standard. Required by Mozilla browser when serving HTML5 apps +# per https://developer.mozilla.org/en/offline_resources_in_firefox +# Added by: louisremi +text/cache-manifest manifest + +# What: node binary buffer format +# Why: semi-standard extension w/in the node community +# Added by: tootallnate +application/octet-stream buffer + +# What: The "protected" MP-4 formats used by iTunes. +# Why: Required for streaming music to browsers (?) +# Added by: broofa +application/mp4 m4p +audio/mp4 m4a + +# What: Video format, Part of RFC1890 +# Why: See https://github.com/bentomas/node-mime/pull/6 +# Added by: mjrusso +video/MP2T ts + +# What: EventSource mime type +# Why: mime type of Server-Sent Events stream +# http://www.w3.org/TR/eventsource/#text-event-stream +# Added by: francois2metz +text/event-stream event-stream + +# What: Mozilla App manifest mime type +# Why: https://developer.mozilla.org/en/Apps/Manifest#Serving_manifests +# Added by: ednapiranha +application/x-web-app-manifest+json webapp + +# What: Lua file types +# Why: Googling around shows de-facto consensus on these +# Added by: creationix (Issue #45) +text/x-lua lua +application/x-lua-bytecode luac + +# What: Markdown files, as per http://daringfireball.net/projects/markdown/syntax +# Why: http://stackoverflow.com/questions/10701983/what-is-the-mime-type-for-markdown +# Added by: avoidwork +text/x-markdown markdown md mkd + +# What: ini files +# Why: because they're just text files +# Added by: Matthew Kastor +text/plain ini + +# What: DASH Adaptive Streaming manifest +# Why: https://developer.mozilla.org/en-US/docs/DASH_Adaptive_Streaming_for_HTML_5_Video +# Added by: eelcocramer +application/dash+xml mdp + +# What: OpenType font files - http://www.microsoft.com/typography/otspec/ +# Why: Browsers usually ignore the font MIME types and sniff the content, +# but Chrome, shows a warning if OpenType fonts aren't served with +# the `font/opentype` MIME type: http://i.imgur.com/8c5RN8M.png. +# Added by: alrra +font/opentype otf diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "form-data", "description": "A module to create readable \"multipart/form-data\" streams. Can be used to submit forms and file uploads to other web applications.", - "version": "0.1.2", + "version": "0.1.4", "repository": { "type": "git", "url": "git://github.com/felixge/node-form-data.git" @@ -16,12 +16,12 @@ "test": "node test/run.js" }, "engines": { - "node": ">= 0.6" + "node": ">= 0.8" }, "dependencies": { "combined-stream": "~0.0.4", "mime": "~1.2.11", - "async": "~0.2.9" + "async": "~0.9.0" }, "licenses": [ { @@ -33,14 +33,48 @@ "fake": "~0.2.2", "far": "~0.0.7", "formidable": "~1.0.14", - "request": "~2.27.0" + "request": "~2.36.0" }, - "readme": "# Form-Data [![Build Status](https://travis-ci.org/felixge/node-form-data.png?branch=master)](https://travis-ci.org/felixge/node-form-data) [![Dependency Status](https://gemnasium.com/felixge/node-form-data.png)](https://gemnasium.com/felixge/node-form-data)\n\nA module to create readable ```\"multipart/form-data\"``` streams. Can be used to submit forms and file uploads to other web applications.\n\nThe API of this module is inspired by the [XMLHttpRequest-2 FormData Interface][xhr2-fd].\n\n[xhr2-fd]: http://dev.w3.org/2006/webapi/XMLHttpRequest-2/Overview.html#the-formdata-interface\n[streams2-thing]: http://nodejs.org/api/stream.html#stream_compatibility_with_older_node_versions\n\n## Install\n\n```\nnpm install form-data\n```\n\n## Usage\n\nIn this example we are constructing a form with 3 fields that contain a string,\na buffer and a file stream.\n\n``` javascript\nvar FormData = require('form-data');\nvar fs = require('fs');\n\nvar form = new FormData();\nform.append('my_field', 'my value');\nform.append('my_buffer', new Buffer(10));\nform.append('my_file', fs.createReadStream('/foo/bar.jpg'));\n```\n\nAlso you can use http-response stream:\n\n``` javascript\nvar FormData = require('form-data');\nvar http = require('http');\n\nvar form = new FormData();\n\nhttp.request('http://nodejs.org/images/logo.png', function(response) {\n form.append('my_field', 'my value');\n form.append('my_buffer', new Buffer(10));\n form.append('my_logo', response);\n});\n```\n\nOr @mikeal's request stream:\n\n``` javascript\nvar FormData = require('form-data');\nvar request = require('request');\n\nvar form = new FormData();\n\nform.append('my_field', 'my value');\nform.append('my_buffer', new Buffer(10));\nform.append('my_logo', request('http://nodejs.org/images/logo.png'));\n```\n\nIn order to submit this form to a web application, call ```submit(url, [callback])``` method:\n\n``` javascript\nform.submit('http://example.org/', function(err, res) {\n // res – response object (http.IncomingMessage) //\n res.resume(); // for node-0.10.x\n});\n\n```\n\nFor more advanced request manipulations ```submit()``` method returns ```http.ClientRequest``` object, or you can choose from one of the alternative submission methods.\n\n### Alternative submission methods\n\nYou can use node's http client interface:\n\n``` javascript\nvar http = require('http');\n\nvar request = http.request({\n method: 'post',\n host: 'example.org',\n path: '/upload',\n headers: form.getHeaders()\n});\n\nform.pipe(request);\n\nrequest.on('response', function(res) {\n console.log(res.statusCode);\n});\n```\n\nOr if you would prefer the `'Content-Length'` header to be set for you:\n\n``` javascript\nform.submit('example.org/upload', function(err, res) {\n console.log(res.statusCode);\n});\n```\n\nTo use custom headers and pre-known length in parts:\n\n``` javascript\nvar CRLF = '\\r\\n';\nvar form = new FormData();\n\nvar options = {\n header: CRLF + '--' + form.getBoundary() + CRLF + 'X-Custom-Header: 123' + CRLF + CRLF,\n knownLength: 1\n};\n\nform.append('my_buffer', buffer, options);\n\nform.submit('http://example.com/', function(err, res) {\n if (err) throw err;\n console.log('Done');\n});\n```\n\nForm-Data can recognize and fetch all the required information from common types of streams (```fs.readStream```, ```http.response``` and ```mikeal's request```), for some other types of streams you'd need to provide \"file\"-related information manually:\n\n``` javascript\nsomeModule.stream(function(err, stdout, stderr) {\n if (err) throw err;\n\n var form = new FormData();\n\n form.append('file', stdout, {\n filename: 'unicycle.jpg',\n contentType: 'image/jpg',\n knownLength: 19806\n });\n\n form.submit('http://example.com/', function(err, res) {\n if (err) throw err;\n console.log('Done');\n });\n});\n```\n\nFor edge cases, like POST request to URL with query string or to pass HTTP auth credentials, object can be passed to `form.submit()` as first parameter:\n\n``` javascript\nform.submit({\n host: 'example.com',\n path: '/probably.php?extra=params',\n auth: 'username:password'\n}, function(err, res) {\n console.log(res.statusCode);\n});\n```\n\n## Notes\n\n- ```getLengthSync()``` method DOESN'T calculate length for streams, use ```knownLength``` options as workaround.\n- If it feels like FormData hangs after submit and you're on ```node-0.10```, please check [Compatibility with Older Node Versions][streams2-thing]\n\n## TODO\n\n- Add new streams (0.10) support and try really hard not to break it for 0.8.x.\n\n## License\n\nForm-Data is licensed under the MIT license.\n", - "readmeFilename": "Readme.md", + "gitHead": "5f5f4809ea685f32658809fa0f13d7eface0e45a", "bugs": { "url": "https://github.com/felixge/node-form-data/issues" }, "homepage": "https://github.com/felixge/node-form-data", - "_id": "form-data@0.1.2", - "_from": "form-data@~0.1.0" + "_id": "form-data@0.1.4", + "_shasum": "91abd788aba9702b1aabfa8bc01031a2ac9e3b12", + "_from": "form-data@>=0.1.0 <0.2.0", + "_npmVersion": "1.4.14", + "_npmUser": { + "name": "alexindigo", + "email": "iam@alexindigo.com" + }, + "maintainers": [ + { + "name": "felixge", + "email": "felix@debuggable.com" + }, + { + "name": "idralyuk", + "email": "igor@buran.us" + }, + { + "name": "alexindigo", + "email": "iam@alexindigo.com" + }, + { + "name": "mikeal", + "email": "mikeal.rogers@gmail.com" + }, + { + "name": "celer", + "email": "dtyree77@gmail.com" + } + ], + "dist": { + "shasum": "91abd788aba9702b1aabfa8bc01031a2ac9e3b12", + "tarball": "http://registry.npmjs.org/form-data/-/form-data-0.1.4.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/form-data/-/form-data-0.1.4.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/Readme.md nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/Readme.md --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/form-data/Readme.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/form-data/Readme.md 2015-01-20 21:22:17.000000000 +0000 @@ -149,6 +149,18 @@ }); ``` +In case you need to also send custom HTTP headers with the POST request, you can use the `headers` key in first parameter of `form.submit()`: + +``` javascript +form.submit({ + host: 'example.com', + path: '/surelynot.php', + headers: {'x-test-header': 'test-header-value'} +}, function(err, res) { + console.log(res.statusCode); +}); +``` + ## Notes - ```getLengthSync()``` method DOESN'T calculate length for streams, use ```knownLength``` options as workaround. diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/lib/browser.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/lib/browser.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/lib/browser.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/lib/browser.js 2015-01-20 21:22:17.000000000 +0000 @@ -24,17 +24,17 @@ uri: 'http://example.com/resource?a=b' method: HTTP verb (e.g. 'GET', 'POST') options: { - + // Required - + credentials: { id: 'dh37fgj492je', key: 'aoijedoaijsdlaksjdl', algorithm: 'sha256' // 'sha1', 'sha256' }, - + // Optional - + ext: 'application-specific', // Application specific data sent via the ext attribute timestamp: Date.now() / 1000, // A pre-calculated timestamp in seconds nonce: '2334f34f', // A pre-generated nonce @@ -60,6 +60,7 @@ !method || typeof method !== 'string' || !options || typeof options !== 'object') { + result.err = 'Invalid argument type'; return result; } @@ -75,11 +76,12 @@ !credentials.key || !credentials.algorithm) { - // Invalid credential object + result.err = 'Invalid credential object'; return result; } if (hawk.crypto.algorithms.indexOf(credentials.algorithm) === -1) { + result.err = 'Unknown algorithm'; return result; } @@ -267,6 +269,20 @@ }; return result; + }, + + authenticateTimestamp: function (message, credentials, updateClock) { // updateClock defaults to true + + var tsm = hawk.crypto.calculateTsMac(message.ts, credentials); + if (tsm !== message.tsm) { + return false; + } + + if (updateClock !== false) { + hawk.utils.setNtpOffset(message.ts - Math.floor(Date.now() / 1000)); // Keep offset at 1 second precision + } + + return true; } }; @@ -351,7 +367,13 @@ setNtpOffset: function (offset) { - hawk.utils.storage.setItem('hawk_ntp_offset', offset); + try { + hawk.utils.storage.setItem('hawk_ntp_offset', offset); + } + catch (err) { + console.error('[hawk] could not write to storage.'); + console.error(err); + } }, getNtpOffset: function () { diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/lib/client.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/lib/client.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/lib/client.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/lib/client.js 2015-01-20 21:22:17.000000000 +0000 @@ -54,6 +54,7 @@ !method || typeof method !== 'string' || !options || typeof options !== 'object') { + result.err = 'Invalid argument type'; return result; } @@ -69,11 +70,12 @@ !credentials.key || !credentials.algorithm) { - // Invalid credential object + result.err = 'Invalid credential object'; return result; } if (Crypto.algorithms.indexOf(credentials.algorithm) === -1) { + result.err = 'Unknown algorithm'; return result; } @@ -156,6 +158,8 @@ return false; } + // Validate server timestamp (not used to update clock since it is done via the SNPT client) + if (attributes.ts) { var tsm = Crypto.calculateTsMac(attributes.ts, credentials); if (tsm !== attributes.tsm) { diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/lib/crypto.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/lib/crypto.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/lib/crypto.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/lib/crypto.js 2015-01-20 21:22:17.000000000 +0000 @@ -109,3 +109,10 @@ return hmac.digest('base64'); }; + +exports.timestampMessage = function (credentials, localtimeOffsetMsec) { + + var now = Math.floor((Utils.now() + (localtimeOffsetMsec || 0)) / 1000); + var tsm = exports.calculateTsMac(now, credentials); + return { ts: now, tsm: tsm }; +}; diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/lib/index.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/lib/index.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/lib/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/lib/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -2,6 +2,7 @@ exports.error = exports.Error = require('boom'); exports.sntp = require('sntp'); + exports.server = require('./server'); exports.client = require('./client'); exports.crypto = require('./crypto'); @@ -12,4 +13,3 @@ getBewit: exports.client.getBewit }; - diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/lib/server.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/lib/server.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/lib/server.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/lib/server.js 2015-01-20 21:22:17.000000000 +0000 @@ -191,9 +191,8 @@ // Check timestamp staleness if (Math.abs((attributes.ts * 1000) - now) > (options.timestampSkewSec * 1000)) { - var fresh = Math.floor((Utils.now() + (options.localtimeOffsetMsec || 0)) / 1000); // Get fresh now - var tsm = Crypto.calculateTsMac(fresh, credentials); - return callback(Boom.unauthorized('Stale timestamp', 'Hawk', { ts: fresh, tsm: tsm }), credentials, artifacts); + var tsm = Crypto.timestampMessage(credentials, options.localtimeOffsetMsec); + return callback(Boom.unauthorized('Stale timestamp', 'Hawk', tsm), credentials, artifacts); } // Successful authentication diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/node_modules/boom/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/node_modules/boom/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/node_modules/boom/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/node_modules/boom/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -36,12 +36,29 @@ "url": "http://github.com/spumko/boom/raw/master/LICENSE" } ], - "readme": "\n![boom Logo](https://raw.github.com/spumko/boom/master/images/boom.png)\n\nHTTP-friendly error objects\n\n[![Build Status](https://secure.travis-ci.org/spumko/boom.png)](http://travis-ci.org/spumko/boom)\n", - "readmeFilename": "README.md", + "_id": "boom@0.4.2", + "dist": { + "shasum": "7a636e9ded4efcefb19cef4947a3c67dfaee911b", + "tarball": "http://registry.npmjs.org/boom/-/boom-0.4.2.tgz" + }, + "_from": "boom@>=0.4.0 <0.5.0", + "_npmVersion": "1.2.18", + "_npmUser": { + "name": "hueniverse", + "email": "eran@hueniverse.com" + }, + "maintainers": [ + { + "name": "hueniverse", + "email": "eran@hueniverse.com" + } + ], + "directories": {}, + "_shasum": "7a636e9ded4efcefb19cef4947a3c67dfaee911b", + "_resolved": "https://registry.npmjs.org/boom/-/boom-0.4.2.tgz", "bugs": { "url": "https://github.com/spumko/boom/issues" }, - "homepage": "https://github.com/spumko/boom", - "_id": "boom@0.4.2", - "_from": "boom@0.4.x" + "readme": "ERROR: No README data found!", + "homepage": "https://github.com/spumko/boom" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/node_modules/cryptiles/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/node_modules/cryptiles/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/node_modules/cryptiles/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/node_modules/cryptiles/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -37,12 +37,29 @@ "url": "http://github.com/hueniverse/cryptiles/raw/master/LICENSE" } ], - "readme": "cryptiles\n=========\n\nGeneral purpose crypto utilities\n\n[![Build Status](https://secure.travis-ci.org/hueniverse/cryptiles.png)](http://travis-ci.org/hueniverse/cryptiles)\n", - "readmeFilename": "README.md", "bugs": { "url": "https://github.com/hueniverse/cryptiles/issues" }, - "homepage": "https://github.com/hueniverse/cryptiles", "_id": "cryptiles@0.2.2", - "_from": "cryptiles@0.2.x" + "dist": { + "shasum": "ed91ff1f17ad13d3748288594f8a48a0d26f325c", + "tarball": "http://registry.npmjs.org/cryptiles/-/cryptiles-0.2.2.tgz" + }, + "_from": "cryptiles@>=0.2.0 <0.3.0", + "_npmVersion": "1.2.24", + "_npmUser": { + "name": "hueniverse", + "email": "eran@hueniverse.com" + }, + "maintainers": [ + { + "name": "hueniverse", + "email": "eran@hueniverse.com" + } + ], + "directories": {}, + "_shasum": "ed91ff1f17ad13d3748288594f8a48a0d26f325c", + "_resolved": "https://registry.npmjs.org/cryptiles/-/cryptiles-0.2.2.tgz", + "readme": "ERROR: No README data found!", + "homepage": "https://github.com/hueniverse/cryptiles" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/node_modules/hoek/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/node_modules/hoek/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/node_modules/hoek/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/node_modules/hoek/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -38,12 +38,33 @@ "url": "http://github.com/spumko/hoek/raw/master/LICENSE" } ], - "readme": "\r\n![hoek Logo](https://raw.github.com/spumko/hoek/master/images/hoek.png)\r\n\r\nGeneral purpose node utilities\r\n\r\n[![Build Status](https://secure.travis-ci.org/spumko/hoek.png)](http://travis-ci.org/spumko/hoek)\r\n\r\n# Table of Contents\r\n\r\n* [Introduction](#introduction \"Introduction\")\r\n* [Object](#object \"Object\")\r\n * [clone](#cloneobj \"clone\")\r\n * [merge](#mergetarget-source-isnulloverride-ismergearrays \"merge\")\r\n * [applyToDefaults](#applytodefaultsdefaults-options \"applyToDefaults\")\r\n * [unique](#uniquearray-key \"unique\")\r\n * [mapToObject](#maptoobjectarray-key \"mapToObject\")\r\n * [intersect](#intersectarray1-array2 \"intersect\")\r\n * [matchKeys](#matchkeysobj-keys \"matchKeys\")\r\n * [flatten](#flattenarray-target \"flatten\")\r\n * [removeKeys](#removekeysobject-keys \"removeKeys\")\r\n * [reach](#reachobj-chain \"reach\")\r\n * [inheritAsync](#inheritasyncself-obj-keys \"inheritAsync\")\r\n * [rename](#renameobj-from-to \"rename\")\r\n* [Timer](#timer \"Timer\")\r\n* [Binary Encoding/Decoding](#binary \"Binary Encoding/Decoding\")\r\n * [base64urlEncode](#binary64urlEncodevalue \"binary64urlEncode\")\r\n * [base64urlDecode](#binary64urlDecodevalue \"binary64urlDecode\")\r\n* [Escaping Characters](#escaped \"Escaping Characters\")\r\n * [escapeHtml](#escapeHtmlstring \"escapeHtml\")\r\n * [escapeHeaderAttribute](#escapeHeaderAttributeattribute \"escapeHeaderAttribute\")\r\n * [escapeRegex](#escapeRegexstring \"escapeRegex\")\r\n* [Errors](#errors \"Errors\")\r\n * [assert](#assertmessage \"assert\")\r\n * [abort](#abortmessage \"abort\")\r\n * [displayStack](#displayStackslice \"displayStack\")\r\n * [callStack](#callStackslice \"callStack\")\r\n * [toss](#tosscondition \"toss\")\r\n* [Load files](#load-files \"Load Files\")\r\n * [loadPackage](#loadPackagedir \"loadpackage\")\r\n * [loadDirModules](#loadDirModulespath-excludefiles-target \"loaddirmodules\")\r\n\r\n\r\n\r\n# Introduction\r\n\r\nThe *Hoek* general purpose node utilities library is used to aid in a variety of manners. It comes with useful methods for Arrays (clone, merge, applyToDefaults), Objects (removeKeys, copy), Asserting and more. \r\n\r\nFor example, to use Hoek to set configuration with default options:\r\n```javascript\r\nvar Hoek = require('hoek');\r\n\r\nvar default = {url : \"www.github.com\", port : \"8000\", debug : true}\r\n\r\nvar config = Hoek.applyToDefaults(default, {port : \"3000\", admin : true});\r\n\r\n// In this case, config would be { url: 'www.github.com', port: '3000', debug: true, admin: true }\r\n```\r\n\r\nUnder each of the sections (such as Array), there are subsections which correspond to Hoek methods. Each subsection will explain how to use the corresponding method. In each js excerpt below, the var Hoek = require('hoek') is omitted for brevity.\r\n\r\n## Object\r\n\r\nHoek provides several helpful methods for objects and arrays.\r\n\r\n### clone(obj)\r\n\r\nThis method is used to clone an object or an array. A *deep copy* is made (duplicates everything, including values that are objects). \r\n\r\n```javascript\r\n\r\nvar nestedObj = {\r\n w: /^something$/ig,\r\n x: {\r\n a: [1, 2, 3],\r\n b: 123456,\r\n c: new Date()\r\n },\r\n y: 'y',\r\n z: new Date()\r\n };\r\n\r\nvar copy = Hoek.clone(nestedObj);\r\n\r\ncopy.x.b = 100;\r\n\r\nconsole.log(copy.y) // results in 'y'\r\nconsole.log(nestedObj.x.b) // results in 123456\r\nconsole.log(copy.x.b) // results in 100\r\n```\r\n\r\n### merge(target, source, isNullOverride, isMergeArrays)\r\nisNullOverride, isMergeArrays default to true\r\n\r\nMerge all the properties of source into target, source wins in conflic, and by default null and undefined from source are applied\r\n\r\n\r\n```javascript\r\n\r\nvar target = {a: 1, b : 2}\r\nvar source = {a: 0, c: 5}\r\nvar source2 = {a: null, c: 5}\r\n\r\nvar targetArray = [1, 2, 3];\r\nvar sourceArray = [4, 5];\r\n\r\nvar newTarget = Hoek.merge(target, source); // results in {a: 0, b: 2, c: 5}\r\nnewTarget = Hoek.merge(target, source2); // results in {a: null, b: 2, c: 5}\r\nnewTarget = Hoek.merge(target, source2, false); // results in {a: 1, b: 2, c: 5}\r\n\r\nnewTarget = Hoek.merge(targetArray, sourceArray) // results in [1, 2, 3, 4, 5]\r\nnewTarget = Hoek.merge(targetArray, sourceArray, true, false) // results in [4, 5]\r\n\r\n\r\n\r\n\r\n```\r\n\r\n### applyToDefaults(defaults, options)\r\n\r\nApply options to a copy of the defaults\r\n\r\n```javascript\r\n\r\nvar defaults = {host: \"localhost\", port: 8000};\r\nvar options = {port: 8080};\r\n\r\nvar config = Hoek.applyToDefaults(defaults, options); // results in {host: \"localhost\", port: 8080};\r\n\r\n\r\n```\r\n\r\n### unique(array, key)\r\n\r\nRemove duplicate items from Array\r\n\r\n```javascript\r\n\r\nvar array = [1, 2, 2, 3, 3, 4, 5, 6];\r\n\r\nvar newArray = Hoek.unique(array); // results in [1,2,3,4,5,6];\r\n\r\narray = [{id: 1}, {id: 1}, {id: 2}];\r\n\r\nnewArray = Hoek.unique(array, \"id\") // results in [{id: 1}, {id: 2}]\r\n\r\n```\r\n\r\n### mapToObject(array, key)\r\n\r\nConvert an Array into an Object\r\n\r\n```javascript\r\n\r\nvar array = [1,2,3];\r\nvar newObject = Hoek.mapToObject(array); // results in [{\"1\": true}, {\"2\": true}, {\"3\": true}]\r\n\r\narray = [{id: 1}, {id: 2}];\r\nnewObject = Hoek.mapToObject(array, \"id\") // results in [{\"id\": 1}, {\"id\": 2}]\r\n\r\n```\r\n### intersect(array1, array2)\r\n\r\nFind the common unique items in two arrays\r\n\r\n```javascript\r\n\r\nvar array1 = [1, 2, 3];\r\nvar array2 = [1, 4, 5];\r\n\r\nvar newArray = Hoek.intersect(array1, array2) // results in [1]\r\n\r\n```\r\n\r\n### matchKeys(obj, keys) \r\n\r\nFind which keys are present\r\n\r\n```javascript\r\n\r\nvar obj = {a: 1, b: 2, c: 3};\r\nvar keys = [\"a\", \"e\"];\r\n\r\nHoek.matchKeys(obj, keys) // returns [\"a\"]\r\n\r\n```\r\n\r\n### flatten(array, target)\r\n\r\nFlatten an array\r\n\r\n```javascript\r\n\r\nvar array = [1, 2, 3];\r\nvar target = [4, 5]; \r\n\r\nvar flattenedArray = Hoek.flatten(array, target) // results in [4, 5, 1, 2, 3];\r\n\r\n```\r\n\r\n### removeKeys(object, keys)\r\n\r\nRemove keys\r\n\r\n```javascript\r\n\r\nvar object = {a: 1, b: 2, c: 3, d: 4};\r\n\r\nvar keys = [\"a\", \"b\"];\r\n\r\nHoek.removeKeys(object, keys) // object is now {c: 3, d: 4}\r\n\r\n```\r\n\r\n### reach(obj, chain)\r\n\r\nConverts an object key chain string to reference\r\n\r\n```javascript\r\n\r\nvar chain = 'a.b.c';\r\nvar obj = {a : {b : { c : 1}}};\r\n\r\nHoek.reach(obj, chain) // returns 1\r\n\r\n```\r\n\r\n### inheritAsync(self, obj, keys) \r\n\r\nInherits a selected set of methods from an object, wrapping functions in asynchronous syntax and catching errors\r\n\r\n```javascript\r\n\r\nvar targetFunc = function () { };\r\n\r\nvar proto = {\r\n a: function () {\r\n return 'a!';\r\n },\r\n b: function () {\r\n return 'b!';\r\n },\r\n c: function () {\r\n throw new Error('c!');\r\n }\r\n };\r\n\r\nvar keys = ['a', 'c'];\r\n\r\nHoek.inheritAsync(targetFunc, proto, ['a', 'c']);\r\n\r\nvar target = new targetFunc();\r\n\r\ntarget.a(function(err, result){console.log(result)} // returns 'a!' \r\n\r\ntarget.c(function(err, result){console.log(result)} // returns undefined\r\n\r\ntarget.b(function(err, result){console.log(result)} // gives error: Object [object Object] has no method 'b'\r\n\r\n```\r\n\r\n### rename(obj, from, to)\r\n\r\nRename a key of an object\r\n\r\n```javascript\r\n\r\nvar obj = {a : 1, b : 2};\r\n\r\nHoek.rename(obj, \"a\", \"c\"); // obj is now {c : 1, b : 2}\r\n\r\n```\r\n\r\n\r\n# Timer\r\n\r\nA Timer object. Initializing a new timer object sets the ts to the number of milliseconds elapsed since 1 January 1970 00:00:00 UTC.\r\n\r\n```javascript\r\n\r\n\r\nexample : \r\n\r\n\r\nvar timerObj = new Hoek.Timer();\r\nconsole.log(\"Time is now: \" + timerObj.ts)\r\nconsole.log(\"Elapsed time from initialization: \" + timerObj.elapsed() + 'milliseconds')\r\n\r\n```\r\n\r\n# Binary Encoding/Decoding\r\n\r\n### base64urlEncode(value)\r\n\r\nEncodes value in Base64 or URL encoding\r\n\r\n### base64urlDecode(value)\r\n\r\nDecodes data in Base64 or URL encoding.\r\n# Escaping Characters\r\n\r\nHoek provides convenient methods for escaping html characters. The escaped characters are as followed:\r\n\r\n```javascript\r\n\r\ninternals.htmlEscaped = {\r\n '&': '&',\r\n '<': '<',\r\n '>': '>',\r\n '\"': '"',\r\n \"'\": ''',\r\n '`': '`'\r\n};\r\n\r\n```\r\n\r\n### escapeHtml(string)\r\n\r\n```javascript\r\n\r\nvar string = ' hey ';\r\nvar escapedString = Hoek.escapeHtml(string); // returns <html> hey </html>\r\n\r\n```\r\n\r\n### escapeHeaderAttribute(attribute)\r\n\r\nEscape attribute value for use in HTTP header\r\n\r\n```javascript\r\n\r\nvar a = Hoek.escapeHeaderAttribute('I said \"go w\\\\o me\"'); //returns I said \\\"go w\\\\o me\\\"\r\n\r\n\r\n```\r\n\r\n\r\n### escapeRegex(string)\r\n\r\nEscape string for Regex construction\r\n\r\n```javascript\r\n\r\nvar a = Hoek.escapeRegex('4^f$s.4*5+-_?%=#!:@|~\\\\/`\"(>)[<]d{}s,'); // returns 4\\^f\\$s\\.4\\*5\\+\\-_\\?%\\=#\\!\\:@\\|~\\\\\\/`\"\\(>\\)\\[<\\]d\\{\\}s\\,\r\n\r\n\r\n\r\n```\r\n\r\n# Errors\r\n\r\n### assert(message)\r\n\r\n```javascript\r\n\r\nvar a = 1, b =2;\r\n\r\nHoek.assert(a === b, 'a should equal b'); // ABORT: a should equal b\r\n\r\n```\r\n\r\n### abort(message)\r\n\r\nFirst checks if process.env.NODE_ENV === 'test', and if so, throws error message. Otherwise,\r\ndisplays most recent stack and then exits process.\r\n\r\n\r\n\r\n### displayStack(slice)\r\n\r\nDisplays the trace stack\r\n\r\n```javascript\r\n\r\nvar stack = Hoek.displayStack();\r\nconsole.log(stack) // returns something like:\r\n\r\n[ 'null (/Users/user/Desktop/hoek/test.js:4:18)',\r\n 'Module._compile (module.js:449:26)',\r\n 'Module._extensions..js (module.js:467:10)',\r\n 'Module.load (module.js:356:32)',\r\n 'Module._load (module.js:312:12)',\r\n 'Module.runMain (module.js:492:10)',\r\n 'startup.processNextTick.process._tickCallback (node.js:244:9)' ]\r\n\r\n```\r\n\r\n### callStack(slice)\r\n\r\nReturns a trace stack array.\r\n\r\n```javascript\r\n\r\nvar stack = Hoek.callStack();\r\nconsole.log(stack) // returns something like:\r\n\r\n[ [ '/Users/user/Desktop/hoek/test.js', 4, 18, null, false ],\r\n [ 'module.js', 449, 26, 'Module._compile', false ],\r\n [ 'module.js', 467, 10, 'Module._extensions..js', false ],\r\n [ 'module.js', 356, 32, 'Module.load', false ],\r\n [ 'module.js', 312, 12, 'Module._load', false ],\r\n [ 'module.js', 492, 10, 'Module.runMain', false ],\r\n [ 'node.js',\r\n 244,\r\n 9,\r\n 'startup.processNextTick.process._tickCallback',\r\n false ] ]\r\n\r\n\r\n```\r\n\r\n### toss(condition)\r\n\r\ntoss(condition /*, [message], callback */)\r\n\r\nReturn an error as first argument of a callback\r\n\r\n\r\n# Load Files\r\n\r\n### loadPackage(dir)\r\n\r\nLoad and parse package.json process root or given directory\r\n\r\n```javascript\r\n\r\nvar pack = Hoek.loadPackage(); // pack.name === 'hoek'\r\n\r\n```\r\n\r\n### loadDirModules(path, excludeFiles, target) \r\n\r\nLoads modules from a given path; option to exclude files (array).\r\n\r\n\r\n\r\n\r\n", - "readmeFilename": "README.md", + "_id": "hoek@0.9.1", + "dist": { + "shasum": "3d322462badf07716ea7eb85baf88079cddce505", + "tarball": "http://registry.npmjs.org/hoek/-/hoek-0.9.1.tgz" + }, + "_from": "hoek@>=0.9.0 <0.10.0", + "_npmVersion": "1.2.18", + "_npmUser": { + "name": "hueniverse", + "email": "eran@hueniverse.com" + }, + "maintainers": [ + { + "name": "hueniverse", + "email": "eran@hueniverse.com" + }, + { + "name": "thegoleffect", + "email": "thegoleffect@gmail.com" + } + ], + "directories": {}, + "_shasum": "3d322462badf07716ea7eb85baf88079cddce505", + "_resolved": "https://registry.npmjs.org/hoek/-/hoek-0.9.1.tgz", "bugs": { "url": "https://github.com/spumko/hoek/issues" }, - "homepage": "https://github.com/spumko/hoek", - "_id": "hoek@0.9.1", - "_from": "hoek@0.9.x" + "readme": "ERROR: No README data found!", + "homepage": "https://github.com/spumko/hoek" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/node_modules/sntp/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/node_modules/sntp/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/node_modules/sntp/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/node_modules/sntp/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -37,12 +37,29 @@ "url": "http://github.com/hueniverse/sntp/raw/master/LICENSE" } ], - "readme": "# sntp\n\nAn SNTP v4 client (RFC4330) for node. Simpy connects to the NTP or SNTP server requested and returns the server time\nalong with the roundtrip duration and clock offset. To adjust the local time to the NTP time, add the returned `t` offset\nto the local time.\n\n[![Build Status](https://secure.travis-ci.org/hueniverse/sntp.png)](http://travis-ci.org/hueniverse/sntp)\n\n# Usage\n\n```javascript\nvar Sntp = require('sntp');\n\n// All options are optional\n\nvar options = {\n host: 'nist1-sj.ustiming.org', // Defaults to pool.ntp.org\n port: 123, // Defaults to 123 (NTP)\n resolveReference: true, // Default to false (not resolving)\n timeout: 1000 // Defaults to zero (no timeout)\n};\n\n// Request server time\n\nSntp.time(options, function (err, time) {\n\n if (err) {\n console.log('Failed: ' + err.message);\n process.exit(1);\n }\n\n console.log('Local clock is off by: ' + time.t + ' milliseconds');\n process.exit(0);\n});\n```\n\nIf an application needs to maintain continuous time synchronization, the module provides a stateful method for\nquerying the current offset only when the last one is too old (defaults to daily).\n\n```javascript\n// Request offset once\n\nSntp.offset(function (err, offset) {\n\n console.log(offset); // New (served fresh)\n\n // Request offset again\n\n Sntp.offset(function (err, offset) {\n\n console.log(offset); // Identical (served from cache)\n });\n});\n```\n\nTo set a background offset refresh, start the interval and use the provided now() method. If for any reason the\nclient fails to obtain an up-to-date offset, the current system clock is used.\n\n```javascript\nvar before = Sntp.now(); // System time without offset\n\nSntp.start(function () {\n\n var now = Sntp.now(); // With offset\n Sntp.stop();\n});\n```\n\n", - "readmeFilename": "README.md", + "_id": "sntp@0.2.4", + "dist": { + "shasum": "fb885f18b0f3aad189f824862536bceeec750900", + "tarball": "http://registry.npmjs.org/sntp/-/sntp-0.2.4.tgz" + }, + "_from": "sntp@>=0.2.0 <0.3.0", + "_npmVersion": "1.2.18", + "_npmUser": { + "name": "hueniverse", + "email": "eran@hueniverse.com" + }, + "maintainers": [ + { + "name": "hueniverse", + "email": "eran@hueniverse.com" + } + ], + "directories": {}, + "_shasum": "fb885f18b0f3aad189f824862536bceeec750900", + "_resolved": "https://registry.npmjs.org/sntp/-/sntp-0.2.4.tgz", "bugs": { "url": "https://github.com/hueniverse/sntp/issues" }, - "homepage": "https://github.com/hueniverse/sntp", - "_id": "sntp@0.2.4", - "_from": "sntp@0.2.x" + "readme": "ERROR: No README data found!", + "homepage": "https://github.com/hueniverse/sntp" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/.npmignore nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/.npmignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -1,18 +1,18 @@ -.idea -*.iml -npm-debug.log -dump.rdb -node_modules -results.tap -results.xml -npm-shrinkwrap.json -config.json -.DS_Store -*/.DS_Store -*/*/.DS_Store -._* -*/._* -*/*/._* -coverage.* -lib-cov - +.idea +*.iml +npm-debug.log +dump.rdb +node_modules +results.tap +results.xml +npm-shrinkwrap.json +config.json +.DS_Store +*/.DS_Store +*/*/.DS_Store +._* +*/._* +*/*/._* +coverage.* +lib-cov + diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/hawk/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/hawk/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,7 @@ { "name": "hawk", "description": "HTTP Hawk Authentication Scheme", - "version": "1.0.0", + "version": "1.1.1", "author": { "name": "Eran Hammer", "email": "eran@hueniverse.com", @@ -42,12 +42,29 @@ "url": "http://github.com/hueniverse/hawk/raw/master/LICENSE" } ], - "readme": "![hawk Logo](https://raw.github.com/hueniverse/hawk/master/images/hawk.png)\n\n **Hawk** is an HTTP authentication scheme using a message authentication code (MAC) algorithm to provide partial\nHTTP request cryptographic verification. For more complex use cases such as access delegation, see [Oz](https://github.com/hueniverse/oz).\n\nCurrent version: **1.0**\n\n[![Build Status](https://secure.travis-ci.org/hueniverse/hawk.png)](http://travis-ci.org/hueniverse/hawk)\n\n# Table of Content\n\n- [**Introduction**](#introduction)\n - [Replay Protection](#replay-protection)\n - [Usage Example](#usage-example)\n - [Protocol Example](#protocol-example)\n - [Payload Validation](#payload-validation)\n - [Response Payload Validation](#response-payload-validation)\n - [Browser Support and Considerations](#browser-support-and-considerations)\n

\n- [**Single URI Authorization**](#single-uri-authorization)\n - [Usage Example](#bewit-usage-example)\n

\n- [**Security Considerations**](#security-considerations)\n - [MAC Keys Transmission](#mac-keys-transmission)\n - [Confidentiality of Requests](#confidentiality-of-requests)\n - [Spoofing by Counterfeit Servers](#spoofing-by-counterfeit-servers)\n - [Plaintext Storage of Credentials](#plaintext-storage-of-credentials)\n - [Entropy of Keys](#entropy-of-keys)\n - [Coverage Limitations](#coverage-limitations)\n - [Future Time Manipulation](#future-time-manipulation)\n - [Client Clock Poisoning](#client-clock-poisoning)\n - [Bewit Limitations](#bewit-limitations)\n - [Host Header Forgery](#host-header-forgery)\n

\n- [**Frequently Asked Questions**](#frequently-asked-questions)\n

\n- [**Acknowledgements**](#acknowledgements)\n\n# Introduction\n\n**Hawk** is an HTTP authentication scheme providing mechanisms for making authenticated HTTP requests with\npartial cryptographic verification of the request and response, covering the HTTP method, request URI, host,\nand optionally the request payload.\n\nSimilar to the HTTP [Digest access authentication schemes](http://www.ietf.org/rfc/rfc2617.txt), **Hawk** uses a set of\nclient credentials which include an identifier (e.g. username) and key (e.g. password). Likewise, just as with the Digest scheme,\nthe key is never included in authenticated requests. Instead, it is used to calculate a request MAC value which is\nincluded in its place.\n\nHowever, **Hawk** has several differences from Digest. In particular, while both use a nonce to limit the possibility of\nreplay attacks, in **Hawk** the client generates the nonce and uses it in combination with a timestamp, leading to less\n\"chattiness\" (interaction with the server).\n\nAlso unlike Digest, this scheme is not intended to protect the key itself (the password in Digest) because\nthe client and server must both have access to the key material in the clear.\n\nThe primary design goals of this scheme are to:\n* simplify and improve HTTP authentication for services that are unwilling or unable to deploy TLS for all resources,\n* secure credentials against leakage (e.g., when the client uses some form of dynamic configuration to determine where\n to send an authenticated request), and\n* avoid the exposure of credentials sent to a malicious server over an unauthenticated secure channel due to client\n failure to validate the server's identity as part of its TLS handshake.\n\nIn addition, **Hawk** supports a method for granting third-parties temporary access to individual resources using\na query parameter called _bewit_ (in falconry, a leather strap used to attach a tracking device to the leg of a hawk).\n\nThe **Hawk** scheme requires the establishment of a shared symmetric key between the client and the server,\nwhich is beyond the scope of this module. Typically, the shared credentials are established via an initial\nTLS-protected phase or derived from some other shared confidential information available to both the client\nand the server.\n\n\n## Replay Protection\n\nWithout replay protection, an attacker can use a compromised (but otherwise valid and authenticated) request more \nthan once, gaining access to a protected resource. To mitigate this, clients include both a nonce and a timestamp when \nmaking requests. This gives the server enough information to prevent replay attacks.\n\nThe nonce is generated by the client, and is a string unique across all requests with the same timestamp and\nkey identifier combination. \n\nThe timestamp enables the server to restrict the validity period of the credentials where requests occuring afterwards\nare rejected. It also removes the need for the server to retain an unbounded number of nonce values for future checks.\nBy default, **Hawk** uses a time window of 1 minute to allow for time skew between the client and server (which in\npractice translates to a maximum of 2 minutes as the skew can be positive or negative).\n\nUsing a timestamp requires the client's clock to be in sync with the server's clock. **Hawk** requires both the client\nclock and the server clock to use NTP to ensure synchronization. However, given the limitations of some client types\n(e.g. browsers) to deploy NTP, the server provides the client with its current time (in seconds precision) in response\nto a bad timestamp.\n\nThere is no expectation that the client will adjust its system clock to match the server (in fact, this would be a\npotential attack vector). Instead, the client only uses the server's time to calculate an offset used only\nfor communications with that particular server. The protocol rewards clients with synchronized clocks by reducing\nthe number of round trips required to authenticate the first request.\n\n\n## Usage Example\n\nServer code:\n\n```javascript\nvar Http = require('http');\nvar Hawk = require('hawk');\n\n\n// Credentials lookup function\n\nvar credentialsFunc = function (id, callback) {\n\n var credentials = {\n key: 'werxhqb98rpaxn39848xrunpaw3489ruxnpa98w4rxn',\n algorithm: 'sha256',\n user: 'Steve'\n };\n\n return callback(null, credentials);\n};\n\n// Create HTTP server\n\nvar handler = function (req, res) {\n\n // Authenticate incoming request\n\n Hawk.server.authenticate(req, credentialsFunc, {}, function (err, credentials, artifacts) {\n\n // Prepare response\n\n var payload = (!err ? 'Hello ' + credentials.user + ' ' + artifacts.ext : 'Shoosh!');\n var headers = { 'Content-Type': 'text/plain' };\n\n // Generate Server-Authorization response header\n\n var header = Hawk.server.header(credentials, artifacts, { payload: payload, contentType: headers['Content-Type'] });\n headers['Server-Authorization'] = header;\n\n // Send the response back\n\n res.writeHead(!err ? 200 : 401, headers);\n res.end(payload);\n });\n};\n\n// Start server\n\nHttp.createServer(handler).listen(8000, 'example.com');\n```\n\nClient code:\n\n```javascript\nvar Request = require('request');\nvar Hawk = require('hawk');\n\n\n// Client credentials\n\nvar credentials = {\n id: 'dh37fgj492je',\n key: 'werxhqb98rpaxn39848xrunpaw3489ruxnpa98w4rxn',\n algorithm: 'sha256'\n}\n\n// Request options\n\nvar requestOptions = {\n uri: 'http://example.com:8000/resource/1?b=1&a=2',\n method: 'GET',\n headers: {}\n};\n\n// Generate Authorization request header\n\nvar header = Hawk.client.header('http://example.com:8000/resource/1?b=1&a=2', 'GET', { credentials: credentials, ext: 'some-app-data' });\nrequestOptions.headers.Authorization = header.field;\n\n// Send authenticated request\n\nRequest(requestOptions, function (error, response, body) {\n\n // Authenticate the server's response\n\n var isValid = Hawk.client.authenticate(response, credentials, header.artifacts, { payload: body });\n\n // Output results\n\n console.log(response.statusCode + ': ' + body + (isValid ? ' (valid)' : ' (invalid)'));\n});\n```\n\n**Hawk** utilized the [**SNTP**](https://github.com/hueniverse/sntp) module for time sync management. By default, the local\nmachine time is used. To automatically retrieve and synchronice the clock within the application, use the SNTP 'start()' method.\n\n```javascript\nHawk.sntp.start();\n```\n\n\n## Protocol Example\n\nThe client attempts to access a protected resource without authentication, sending the following HTTP request to\nthe resource server:\n\n```\nGET /resource/1?b=1&a=2 HTTP/1.1\nHost: example.com:8000\n```\n\nThe resource server returns an authentication challenge.\n\n```\nHTTP/1.1 401 Unauthorized\nWWW-Authenticate: Hawk\n```\n\nThe client has previously obtained a set of **Hawk** credentials for accessing resources on the \"http://example.com/\"\nserver. The **Hawk** credentials issued to the client include the following attributes:\n\n* Key identifier: dh37fgj492je\n* Key: werxhqb98rpaxn39848xrunpaw3489ruxnpa98w4rxn\n* Algorithm: sha256\n\nThe client generates the authentication header by calculating a timestamp (e.g. the number of seconds since January 1,\n1970 00:00:00 GMT), generating a nonce, and constructing the normalized request string (each value followed by a newline\ncharacter):\n\n```\nhawk.1.header\n1353832234\nj4h3g2\nGET\n/resource/1?b=1&a=2\nexample.com\n8000\n\nsome-app-ext-data\n\n```\n\nThe request MAC is calculated using HMAC with the specified hash algorithm \"sha256\" and the key over the normalized request string.\nThe result is base64-encoded to produce the request MAC:\n\n```\n6R4rV5iE+NPoym+WwjeHzjAGXUtLNIxmo1vpMofpLAE=\n```\n\nThe client includes the **Hawk** key identifier, timestamp, nonce, application specific data, and request MAC with the request using\nthe HTTP `Authorization` request header field:\n\n```\nGET /resource/1?b=1&a=2 HTTP/1.1\nHost: example.com:8000\nAuthorization: Hawk id=\"dh37fgj492je\", ts=\"1353832234\", nonce=\"j4h3g2\", ext=\"some-app-ext-data\", mac=\"6R4rV5iE+NPoym+WwjeHzjAGXUtLNIxmo1vpMofpLAE=\"\n```\n\nThe server validates the request by calculating the request MAC again based on the request received and verifies the validity\nand scope of the **Hawk** credentials. If valid, the server responds with the requested resource.\n\n\n### Payload Validation\n\n**Hawk** provides optional payload validation. When generating the authentication header, the client calculates a payload hash\nusing the specified hash algorithm. The hash is calculated over the concatenated value of (each followed by a newline character):\n* `hawk.1.payload`\n* the content-type in lowercase, without any parameters (e.g. `application/json`)\n* the request payload prior to any content encoding (the exact representation requirements should be specified by the server for payloads other than simple single-part ascii to ensure interoperability)\n\nFor example:\n\n* Payload: `Thank you for flying Hawk`\n* Content Type: `text/plain`\n* Hash (sha256): `Yi9LfIIFRtBEPt74PVmbTF/xVAwPn7ub15ePICfgnuY=`\n\nResults in the following input to the payload hash function (newline terminated values):\n\n```\nhawk.1.payload\ntext/plain\nThank you for flying Hawk\n\n```\n\nWhich produces the following hash value:\n\n```\nYi9LfIIFRtBEPt74PVmbTF/xVAwPn7ub15ePICfgnuY=\n```\n\nThe client constructs the normalized request string (newline terminated values):\n\n```\nhawk.1.header\n1353832234\nj4h3g2\nPOST\n/resource/1?a=1&b=2\nexample.com\n8000\nYi9LfIIFRtBEPt74PVmbTF/xVAwPn7ub15ePICfgnuY=\nsome-app-ext-data\n\n```\n\nThen calculates the request MAC and includes the **Hawk** key identifier, timestamp, nonce, payload hash, application specific data,\nand request MAC, with the request using the HTTP `Authorization` request header field:\n\n```\nPOST /resource/1?a=1&b=2 HTTP/1.1\nHost: example.com:8000\nAuthorization: Hawk id=\"dh37fgj492je\", ts=\"1353832234\", nonce=\"j4h3g2\", hash=\"Yi9LfIIFRtBEPt74PVmbTF/xVAwPn7ub15ePICfgnuY=\", ext=\"some-app-ext-data\", mac=\"aSe1DERmZuRl3pI36/9BdZmnErTw3sNzOOAUlfeKjVw=\"\n```\n\nIt is up to the server if and when it validates the payload for any given request, based solely on it's security policy\nand the nature of the data included.\n\nIf the payload is available at the time of authentication, the server uses the hash value provided by the client to construct\nthe normalized string and validates the MAC. If the MAC is valid, the server calculates the payload hash and compares the value\nwith the provided payload hash in the header. In many cases, checking the MAC first is faster than calculating the payload hash.\n\nHowever, if the payload is not available at authentication time (e.g. too large to fit in memory, streamed elsewhere, or processed\nat a different stage in the application), the server may choose to defer payload validation for later by retaining the hash value\nprovided by the client after validating the MAC.\n\nIt is important to note that MAC validation does not mean the hash value provided by the client is valid, only that the value\nincluded in the header was not modified. Without calculating the payload hash on the server and comparing it to the value provided\nby the client, the payload may be modified by an attacker.\n\n\n## Response Payload Validation\n\n**Hawk** provides partial response payload validation. The server includes the `Server-Authorization` response header which enables the\nclient to authenticate the response and ensure it is talking to the right server. **Hawk** defines the HTTP `Server-Authorization` header\nas a response header using the exact same syntax as the `Authorization` request header field.\n\nThe header is contructed using the same process as the client's request header. The server uses the same credentials and other\nartifacts provided by the client to constructs the normalized request string. The `ext` and `hash` values are replaced with\nnew values based on the server response. The rest as identical to those used by the client.\n\nThe result MAC digest is included with the optional `hash` and `ext` values:\n\n```\nServer-Authorization: Hawk mac=\"XIJRsMl/4oL+nn+vKoeVZPdCHXB4yJkNnBbTbHFZUYE=\", hash=\"f9cDF/TDm7TkYRLnGwRMfeDzT6LixQVLvrIKhh0vgmM=\", ext=\"response-specific\"\n```\n\n\n## Browser Support and Considerations\n\nA browser script is provided for including using a ` +``` + +In [Narwhal](http://narwhaljs.org/), [Node.js](http://nodejs.org/), and [RingoJS](http://ringojs.org/): + +```js +var punycode = require('punycode'); +``` + +In [Rhino](http://www.mozilla.org/rhino/): + +```js +load('punycode.js'); +``` + +Using an AMD loader like [RequireJS](http://requirejs.org/): + +```js +require( + { + 'paths': { + 'punycode': 'path/to/punycode' + } + }, + ['punycode'], + function(punycode) { + console.log(punycode); + } +); +``` + +## API + +### `punycode.decode(string)` + +Converts a Punycode string of ASCII symbols to a string of Unicode symbols. + +```js +// decode domain name parts +punycode.decode('maana-pta'); // 'mañana' +punycode.decode('--dqo34k'); // '☃-⌘' +``` + +### `punycode.encode(string)` + +Converts a string of Unicode symbols to a Punycode string of ASCII symbols. + +```js +// encode domain name parts +punycode.encode('mañana'); // 'maana-pta' +punycode.encode('☃-⌘'); // '--dqo34k' +``` + +### `punycode.toUnicode(input)` + +Converts a Punycode string representing a domain name or an email address to Unicode. Only the Punycoded parts of the input will be converted, i.e. it doesn’t matter if you call it on a string that has already been converted to Unicode. + +```js +// decode domain names +punycode.toUnicode('xn--maana-pta.com'); +// → 'mañana.com' +punycode.toUnicode('xn----dqo34k.com'); +// → '☃-⌘.com' + +// decode email addresses +punycode.toUnicode('джумла@xn--p-8sbkgc5ag7bhce.xn--ba-lmcq'); +// → 'джумла@джpумлатест.bрфa' +``` + +### `punycode.toASCII(input)` + +Converts a Unicode string representing a domain name or an email address to Punycode. Only the non-ASCII parts of the input will be converted, i.e. it doesn’t matter if you call it with a domain that's already in ASCII. + +```js +// encode domain names +punycode.toASCII('mañana.com'); +// → 'xn--maana-pta.com' +punycode.toASCII('☃-⌘.com'); +// → 'xn----dqo34k.com' + +// encode email addresses +punycode.toASCII('джумла@джpумлатест.bрфa'); +// → 'джумла@xn--p-8sbkgc5ag7bhce.xn--ba-lmcq' +``` + +### `punycode.ucs2` + +#### `punycode.ucs2.decode(string)` + +Creates an array containing the numeric code point values of each Unicode symbol in the string. While [JavaScript uses UCS-2 internally](https://mathiasbynens.be/notes/javascript-encoding), this function will convert a pair of surrogate halves (each of which UCS-2 exposes as separate characters) into a single code point, matching UTF-16. + +```js +punycode.ucs2.decode('abc'); +// → [0x61, 0x62, 0x63] +// surrogate pair for U+1D306 TETRAGRAM FOR CENTRE: +punycode.ucs2.decode('\uD834\uDF06'); +// → [0x1D306] +``` + +#### `punycode.ucs2.encode(codePoints)` + +Creates a string based on an array of numeric code point values. + +```js +punycode.ucs2.encode([0x61, 0x62, 0x63]); +// → 'abc' +punycode.ucs2.encode([0x1D306]); +// → '\uD834\uDF06' +``` + +### `punycode.version` + +A string representing the current Punycode.js version number. + +## Unit tests & code coverage + +After cloning this repository, run `npm install --dev` to install the dependencies needed for Punycode.js development and testing. You may want to install Istanbul _globally_ using `npm install istanbul -g`. + +Once that’s done, you can run the unit tests in Node using `npm test` or `node tests/tests.js`. To run the tests in Rhino, Ringo, Narwhal, PhantomJS, and web browsers as well, use `grunt test`. + +To generate the code coverage report, use `grunt cover`. + +Feel free to fork if you see possible improvements! + +## Author + +| [![twitter/mathias](https://gravatar.com/avatar/24e08a9ea84deb17ae121074d0f17125?s=70)](https://twitter.com/mathias "Follow @mathias on Twitter") | +|---| +| [Mathias Bynens](https://mathiasbynens.be/) | + +## Contributors + +| [![twitter/jdalton](https://gravatar.com/avatar/299a3d891ff1920b69c364d061007043?s=70)](https://twitter.com/jdalton "Follow @jdalton on Twitter") | +|---| +| [John-David Dalton](http://allyoucanleet.com/) | + +## License + +Punycode.js is available under the [MIT](https://mths.be/mit) license. diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/doc/parse.php nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/doc/parse.php --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/doc/parse.php 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/doc/parse.php 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ - '../' . $file, - 'title' => 'Docdown v1.0.0', - 'url' => 'https://github.com/jdalton/docdown/blob/master/docdown.php' - )); - - // save to a .md file - file_put_contents($output . '.md', $markdown); - - // print - header('Content-Type: text/plain;charset=utf-8'); - echo $markdown . PHP_EOL; - -?> \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/doc/README.md nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/doc/README.md --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/doc/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/doc/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -# Docdown v1.0.0 - - - - - - -## `docdown` -* [`docdown`](#docdown$optionsarray) - - - - - - - - - - - - -## `docdown` - - - -### `docdown([$options=array()])` -# [Ⓢ](https://github.com/jdalton/docdown/blob/master/docdown.php#L34 "View in source") [Ⓣ][1] - -Generates Markdown from JSDoc entries in a given file. - -#### Arguments -1. `[$options=array()]` *(Array)*: The options array. - -#### Returns -*(String)*: The generated Markdown. - -#### Example -```php -// specify a file path -$markdown = docdown(array( - // path to js file - 'path' => $filepath, - // url used to reference line numbers in code - 'url' => 'https://github.com/username/project/blob/master/my.js' -)); - -// or pass raw js -$markdown = docdown(array( - // raw JavaScript source - 'source' => $rawJS, - // documentation title - 'title' => 'My API Documentation', - // url used to reference line numbers in code - 'url' => 'https://github.com/username/project/blob/master/my.js' -)); -``` - -* * * - - - - - - - - - - - [1]: #docdown "Jump back to the TOC." \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/docdown.php nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/docdown.php --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/docdown.php 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/docdown.php 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ - - * Available under MIT license - */ -require(dirname(__FILE__) . '/src/DocDown/Generator.php'); - -/** - * Generates Markdown from JSDoc entries in a given file. - * - * @param {Array} [$options=array()] The options array. - * @returns {String} The generated Markdown. - * @example - * - * // specify a file path - * $markdown = docdown(array( - * // path to js file - * 'path' => $filepath, - * // url used to reference line numbers in code - * 'url' => 'https://github.com/username/project/blob/master/my.js' - * )); - * - * // or pass raw js - * $markdown = docdown(array( - * // raw JavaScript source - * 'source' => $rawJS, - * // documentation title - * 'title' => 'My API Documentation', - * // url used to reference line numbers in code - * 'url' => 'https://github.com/username/project/blob/master/my.js' - * )); - */ -function docdown( $options = array() ) { - $gen = new Generator($options); - return $gen->generate(); -} -?> \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/LICENSE.txt nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/LICENSE.txt --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/LICENSE.txt 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/LICENSE.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -Copyright 2011-2013 John-David Dalton - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/README.md nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/README.md --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -# Docdown v1.0.0 - -A simple JSDoc to Markdown documentation generator. - -## Documentation - -The documentation for Docdown can be viewed here: [/doc/README.md](https://github.com/jdalton/docdown/blob/master/doc/README.md#readme) - -For a list of upcoming features, check out our [roadmap](https://github.com/jdalton/docdown/wiki/Roadmap). - -## Installation and usage - -Usage example: - -```php -require("docdown.php"); - -// generate Markdown -$markdown = docdown(array( - "path" => $filepath, - "url" => "https://github.com/username/project/blob/master/my.js" -)); -``` - -## Author - -| [![twitter/jdalton](http://gravatar.com/avatar/299a3d891ff1920b69c364d061007043?s=70)](http://twitter.com/jdalton "Follow @jdalton on Twitter") | -|---| -| [John-David Dalton](http://allyoucanleet.com/) | - -## Contributors - -| [![twitter/mathias](http://gravatar.com/avatar/24e08a9ea84deb17ae121074d0f17125?s=70)](http://twitter.com/mathias "Follow @mathias on Twitter") | -|---| -| [Mathias Bynens](http://mathiasbynens.be/) | diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Alias.php nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Alias.php --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Alias.php 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Alias.php 1970-01-01 00:00:00.000000000 +0000 @@ -1,226 +0,0 @@ -owner = $owner; - $this->_name = $name; - $this->_call = $owner->getCall(); - $this->_category = $owner->getCategory(); - $this->_desc = $owner->getDesc(); - $this->_example = $owner->getExample(); - $this->_isCtor = $owner->isCtor(); - $this->_isLicense = $owner->isLicense(); - $this->_isPlugin = $owner->isPlugin(); - $this->_isPrivate = $owner->isPrivate(); - $this->_isStatic = $owner->isStatic(); - $this->_lineNumber = $owner->getLineNumber(); - $this->_members = $owner->getMembers(); - $this->_params = $owner->getParams(); - $this->_returns = $owner->getReturns(); - $this->_type = $owner->getType(); - } - - /*--------------------------------------------------------------------------*/ - - /** - * Extracts the entry's `alias` objects. - * - * @memberOf Alias - * @param {Number} $index The index of the array value to return. - * @returns {Array|String} The entry's `alias` objects. - */ - public function getAliases( $index = null ) { - $result = array(); - return $index !== null - ? @$result[$index] - : $result; - } - - /** - * Extracts the function call from the owner entry. - * - * @memberOf Alias - * @returns {String} The function call. - */ - public function getCall() { - return $this->_call; - } - - /** - * Extracts the owner entry's `category` data. - * - * @memberOf Alias - * @returns {String} The owner entry's `category` data. - */ - public function getCategory() { - return $this->_category; - } - - /** - * Extracts the owner entry's description. - * - * @memberOf Alias - * @returns {String} The owner entry's description. - */ - public function getDesc() { - return $this->_desc; - } - - /** - * Extracts the owner entry's `example` data. - * - * @memberOf Alias - * @returns {String} The owner entry's `example` data. - */ - public function getExample() { - return $this->_example; - } - - /** - * Checks if the entry is an alias. - * - * @memberOf Alias - * @returns {Boolean} Returns `true`. - */ - public function isAlias() { - return true; - } - - /** - * Checks if the owner entry is a constructor. - * - * @memberOf Alias - * @returns {Boolean} Returns `true` if a constructor, else `false`. - */ - public function isCtor() { - return $this->_isCtor; - } - - /** - * Checks if the owner entry is a license. - * - * @memberOf Alias - * @returns {Boolean} Returns `true` if a license, else `false`. - */ - public function isLicense() { - return $this->_isLicense; - } - - /** - * Checks if the owner entry *is* assigned to a prototype. - * - * @memberOf Alias - * @returns {Boolean} Returns `true` if assigned to a prototype, else `false`. - */ - public function isPlugin() { - return $this->_isPlugin; - } - - /** - * Checks if the owner entry is private. - * - * @memberOf Alias - * @returns {Boolean} Returns `true` if private, else `false`. - */ - public function isPrivate() { - return $this->_isPrivate; - } - - /** - * Checks if the owner entry is *not* assigned to a prototype. - * - * @memberOf Alias - * @returns {Boolean} Returns `true` if not assigned to a prototype, else `false`. - */ - public function isStatic() { - return $this->_isStatic; - } - - /** - * Resolves the owner entry's line number. - * - * @memberOf Alias - * @returns {Number} The owner entry's line number. - */ - public function getLineNumber() { - return $this->_lineNumber; - } - - /** - * Extracts the owner entry's `member` data. - * - * @memberOf Alias - * @param {Number} $index The index of the array value to return. - * @returns {Array|String} The owner entry's `member` data. - */ - public function getMembers( $index = null ) { - return $index !== null - ? @$this->_members[$index] - : $this->_members; - } - - /** - * Extracts the owner entry's `name` data. - * - * @memberOf Alias - * @returns {String} The owner entry's `name` data. - */ - public function getName() { - return $this->_name; - } - - /** - * Extracts the owner entry's `param` data. - * - * @memberOf Alias - * @param {Number} $index The index of the array value to return. - * @returns {Array} The owner entry's `param` data. - */ - public function getParams( $index = null ) { - return $index !== null - ? @$this->_params[$index] - : $this->_params; - } - - /** - * Extracts the owner entry's `returns` data. - * - * @memberOf Alias - * @returns {String} The owner entry's `returns` data. - */ - public function getReturns() { - return $this->_returns; - } - - /** - * Extracts the owner entry's `type` data. - * - * @memberOf Alias - * @returns {String} The owner entry's `type` data. - */ - public function getType() { - return $this->_type; - } -} -?> \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Entry.php nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Entry.php --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Entry.php 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Entry.php 1970-01-01 00:00:00.000000000 +0000 @@ -1,442 +0,0 @@ -entry = $entry; - $this->lang = $lang; - $this->source = str_replace(PHP_EOL, "\n", $source); - } - - /*--------------------------------------------------------------------------*/ - - /** - * Extracts the documentation entries from source code. - * - * @static - * @memberOf Entry - * @param {String} $source The source code. - * @returns {Array} The array of entries. - */ - public static function getEntries( $source ) { - preg_match_all('#/\*\*(?![-!])[\s\S]*?\*/\s*.+#', $source, $result); - return array_pop($result); - } - - /*--------------------------------------------------------------------------*/ - - /** - * Checks if the entry is a function reference. - * - * @private - * @memberOf Entry - * @returns {Boolean} Returns `true` if the entry is a function reference, else `false`. - */ - private function isFunction() { - if (!isset($this->_isFunction)) { - $this->_isFunction = !!( - $this->isCtor() || - count($this->getParams()) || - count($this->getReturns()) || - preg_match('/\*[\t ]*@function\b/', $this->entry) - ); - } - return $this->_isFunction; - } - - /*--------------------------------------------------------------------------*/ - - /** - * Extracts the entry's `alias` objects. - * - * @memberOf Entry - * @param {Number} $index The index of the array value to return. - * @returns {Array|String} The entry's `alias` objects. - */ - public function getAliases( $index = null ) { - if (!isset($this->_aliases)) { - preg_match('#\*[\t ]*@alias\s+(.+)#', $this->entry, $result); - - if (count($result)) { - $result = trim(preg_replace('/(?:^|\n)[\t ]*\*[\t ]?/', ' ', $result[1])); - $result = preg_split('/,\s*/', $result); - natsort($result); - - foreach ($result as $resultIndex => $value) { - $result[$resultIndex] = new Alias($value, $this); - } - } - $this->_aliases = $result; - } - return $index !== null - ? @$this->_aliases[$index] - : $this->_aliases; - } - - /** - * Extracts the function call from the entry. - * - * @memberOf Entry - * @returns {String} The function call. - */ - public function getCall() { - if (isset($this->_call)) { - return $this->_call; - } - - preg_match('#\*/\s*(?:function ([^(]*)|(.*?)(?=[:=,]|return\b))#', $this->entry, $result); - if ($result = array_pop($result)) { - $result = array_pop(explode('var ', trim(trim(array_pop(explode('.', $result))), "'"))); - } - // resolve name - // avoid $this->getName() because it calls $this->getCall() - preg_match('#\*[\t ]*@name\s+(.+)#', $this->entry, $name); - if (count($name)) { - $name = trim($name[1]); - } else { - $name = $result; - } - // compile function call syntax - if ($this->isFunction()) { - // compose parts - $result = array($result); - $params = $this->getParams(); - foreach ($params as $param) { - $result[] = $param[1]; - } - // format - $result = $name .'('. implode(array_slice($result, 1), ', ') .')'; - $result = str_replace(', [', ' [, ', str_replace('], [', ', ', $result)); - } - - $this->_call = $result ? $result : $name; - return $this->_call; - } - - /** - * Extracts the entry's `category` data. - * - * @memberOf Entry - * @returns {String} The entry's `category` data. - */ - public function getCategory() { - if (isset($this->_category)) { - return $this->_category; - } - - preg_match('#\*[\t ]*@category\s+(.+)#', $this->entry, $result); - if (count($result)) { - $result = trim(preg_replace('/(?:^|\n)[\t ]*\*[\t ]?/', ' ', $result[1])); - } else { - $result = $this->getType() == 'Function' ? 'Methods' : 'Properties'; - } - $this->_category = $result; - return $result; - } - - /** - * Extracts the entry's description. - * - * @memberOf Entry - * @returns {String} The entry's description. - */ - public function getDesc() { - if (isset($this->_desc)) { - return $this->_desc; - } - - preg_match('#/\*\*(?:\s*\*)?([\s\S]*?)(?=\*\s\@[a-z]|\*/)#', $this->entry, $result); - if (count($result)) { - $type = $this->getType(); - $result = preg_replace('/:\n[\t ]*\*[\t ]*/', ":
\n", $result[1]); - $result = preg_replace('/(?:^|\n)[\t ]*\*\n[\t ]*\*[\t ]*/', "\n\n", $result); - $result = preg_replace('/(?:^|\n)[\t ]*\*[\t ]?/', ' ', $result); - $result = trim($result); - $result = ($type == 'Function' ? '' : '(' . str_replace('|', ', ', trim($type, '{}')) . '): ') . $result; - } - $this->_desc = $result; - return $result; - } - - /** - * Extracts the entry's `example` data. - * - * @memberOf Entry - * @returns {String} The entry's `example` data. - */ - public function getExample() { - if (isset($this->_example)) { - return $this->_example; - } - - preg_match('#\*[\t ]*@example\s+([\s\S]*?)(?=\*\s\@[a-z]|\*/)#', $this->entry, $result); - if (count($result)) { - $result = trim(preg_replace('/(?:^|\n)[\t ]*\*[\t ]?/', "\n", $result[1])); - $result = '```' . $this->lang . "\n" . $result . "\n```"; - } - $this->_example = $result; - return $result; - } - - /** - * Checks if the entry is an alias. - * - * @memberOf Entry - * @returns {Boolean} Returns `false`. - */ - public function isAlias() { - return false; - } - - /** - * Checks if the entry is a constructor. - * - * @memberOf Entry - * @returns {Boolean} Returns `true` if a constructor, else `false`. - */ - public function isCtor() { - if (!isset($this->_isCtor)) { - $this->_isCtor = !!preg_match('/\*[\t ]*@constructor\b/', $this->entry); - } - return $this->_isCtor; - } - - /** - * Checks if the entry is a license. - * - * @memberOf Entry - * @returns {Boolean} Returns `true` if a license, else `false`. - */ - public function isLicense() { - if (!isset($this->_isLicense)) { - $this->_isLicense = !!preg_match('/\*[\t ]*@license\b/', $this->entry); - } - return $this->_isLicense; - } - - /** - * Checks if the entry *is* assigned to a prototype. - * - * @memberOf Entry - * @returns {Boolean} Returns `true` if assigned to a prototype, else `false`. - */ - public function isPlugin() { - if (!isset($this->_isPlugin)) { - $this->_isPlugin = !$this->isCtor() && !$this->isPrivate() && !$this->isStatic(); - } - return $this->_isPlugin; - } - - /** - * Checks if the entry is private. - * - * @memberOf Entry - * @returns {Boolean} Returns `true` if private, else `false`. - */ - public function isPrivate() { - if (!isset($this->_isPrivate)) { - $this->_isPrivate = $this->isLicense() || !!preg_match('/\*[\t ]*@private\b/', $this->entry) || !preg_match('/\*[\t ]*@[a-z]+\b/', $this->entry); - } - return $this->_isPrivate; - } - - /** - * Checks if the entry is *not* assigned to a prototype. - * - * @memberOf Entry - * @returns {Boolean} Returns `true` if not assigned to a prototype, else `false`. - */ - public function isStatic() { - if (isset($this->_isStatic)) { - return $this->_isStatic; - } - - $public = !$this->isPrivate(); - $result = $public && !!preg_match('/\*[\t ]*@static\b/', $this->entry); - - // set in cases where it isn't explicitly stated - if ($public && !$result) { - if ($parent = array_pop(preg_split('/[#.]/', $this->getMembers(0)))) { - foreach (Entry::getEntries($this->source) as $entry) { - $entry = new Entry($entry, $this->source); - if ($entry->getName() == $parent) { - $result = !$entry->isCtor(); - break; - } - } - } else { - $result = true; - } - } - $this->_isStatic = $result; - return $result; - } - - /** - * Resolves the entry's line number. - * - * @memberOf Entry - * @returns {Number} The entry's line number. - */ - public function getLineNumber() { - if (!isset($this->_lineNumber)) { - preg_match_all('/\n/', substr($this->source, 0, strrpos($this->source, $this->entry) + strlen($this->entry)), $lines); - $this->_lineNumber = count(array_pop($lines)) + 1; - } - return $this->_lineNumber; - } - - /** - * Extracts the entry's `member` data. - * - * @memberOf Entry - * @param {Number} $index The index of the array value to return. - * @returns {Array|String} The entry's `member` data. - */ - public function getMembers( $index = null ) { - if (!isset($this->_members)) { - preg_match('#\*[\t ]*@member(?:Of)?\s+(.+)#', $this->entry, $result); - if (count($result)) { - $result = trim(preg_replace('/(?:^|\n)[\t ]*\*[\t ]?/', ' ', $result[1])); - $result = preg_split('/,\s*/', $result); - natsort($result); - } - $this->_members = $result; - } - return $index !== null - ? @$this->_members[$index] - : $this->_members; - } - - /** - * Extracts the entry's `name` data. - * - * @memberOf Entry - * @returns {String} The entry's `name` data. - */ - public function getName() { - if (isset($this->_name)) { - return $this->_name; - } - - preg_match('#\*[\t ]*@name\s+(.+)#', $this->entry, $result); - if (count($result)) { - $result = trim(preg_replace('/(?:^|\n)[\t ]*\*[\t ]?/', ' ', $result[1])); - } else { - $result = array_shift(explode('(', $this->getCall())); - } - $this->_name = $result; - return $result; - } - - /** - * Extracts the entry's `param` data. - * - * @memberOf Entry - * @param {Number} $index The index of the array value to return. - * @returns {Array} The entry's `param` data. - */ - public function getParams( $index = null ) { - if (!isset($this->_params)) { - preg_match_all('#\*[\t ]*@param\s+\{([^}]+)\}\s+(\[.+\]|[$\w|]+(?:\[.+\])?)\s+([\s\S]*?)(?=\*\s\@[a-z]|\*/)#i', $this->entry, $result); - if (count($result = array_filter(array_slice($result, 1)))) { - // repurpose array - foreach ($result as $param) { - foreach ($param as $key => $value) { - if (!is_array($result[0][$key])) { - $result[0][$key] = array(); - } - $result[0][$key][] = trim(preg_replace('/(?:^|\n)[\t ]*\*[\t ]*/', ' ', $value)); - } - } - $result = $result[0]; - } - $this->_params = $result; - } - return $index !== null - ? @$this->_params[$index] - : $this->_params; - } - - /** - * Extracts the entry's `returns` data. - * - * @memberOf Entry - * @returns {String} The entry's `returns` data. - */ - public function getReturns() { - if (isset($this->_returns)) { - return $this->_returns; - } - - preg_match('#\*[\t ]*@returns\s+\{([^}]+)\}\s+([\s\S]*?)(?=\*\s\@[a-z]|\*/)#', $this->entry, $result); - if (count($result)) { - $result = array_map('trim', array_slice($result, 1)); - $result[0] = str_replace('|', ', ', $result[0]); - $result[1] = preg_replace('/(?:^|\n)[\t ]*\*[\t ]?/', ' ', $result[1]); - } - $this->_returns = $result; - return $result; - } - - /** - * Extracts the entry's `type` data. - * - * @memberOf Entry - * @returns {String} The entry's `type` data. - */ - public function getType() { - if (isset($this->_type)) { - return $this->_type; - } - - preg_match('#\*[\t ]*@type\s+(.+)#', $this->entry, $result); - if (count($result)) { - $result = trim(preg_replace('/(?:^|\n)[\t ]*\*[\t ]?/', ' ', $result[1])); - } else { - $result = $this->isFunction() ? 'Function' : 'Unknown'; - } - $this->_type = $result; - return $result; - } -} -?> \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Generator.php nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Generator.php --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Generator.php 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/node_modules/punycode/vendor/docdown/src/DocDown/Generator.php 1970-01-01 00:00:00.000000000 +0000 @@ -1,563 +0,0 @@ -\n"; - - /** - * An array of JSDoc entries. - * - * @memberOf Generator - * @type Array - */ - public $entries = array(); - - /** - * The HTML for the open tag. - * - * @static - * @memberOf Generator - * @type String - */ - public $openTag = "\n\n"; - - /** - * An options array used to configure the generator. - * - * @memberOf Generator - * @type Array - */ - public $options = array(); - - /** - * The file's source code. - * - * @memberOf Generator - * @type String - */ - public $source = ''; - - /*--------------------------------------------------------------------------*/ - - /** - * The Generator constructor. - * - * @constructor - * @param {String} $source The source code to parse. - * @param {Array} $options The options array. - */ - public function __construct( $source, $options = array() ) { - // juggle arguments - if (is_array($source)) { - $options = $source; - } else { - $options['source'] = $source; - } - if (isset($options['source']) && realpath($options['source'])) { - $options['path'] = $options['source']; - } - if (isset($options['path'])) { - preg_match('/(?<=\.)[a-z]+$/', $options['path'], $ext); - $options['source'] = file_get_contents($options['path']); - $ext = array_pop($ext); - - if (!isset($options['lang']) && $ext) { - $options['lang'] = $ext; - } - if (!isset($options['title'])) { - $options['title'] = ucfirst(basename($options['path'])) . ' API documentation'; - } - } - if (!isset($options['lang'])) { - $options['lang'] = 'js'; - } - if (!isset($options['toc'])) { - $options['toc'] = 'properties'; - } - - $this->options = $options; - $this->source = str_replace(PHP_EOL, "\n", $options['source']); - $this->entries = Entry::getEntries($this->source); - - foreach ($this->entries as $index => $value) { - $this->entries[$index] = new Entry($value, $this->source, $options['lang']); - } - } - - /*--------------------------------------------------------------------------*/ - - /** - * Performs common string formatting operations. - * - * @private - * @static - * @memberOf Generator - * @param {String} $string The string to format. - * @returns {String} The formatted string. - */ - private static function format( $string ) { - $counter = 0; - - // tokenize inline code snippets - preg_match_all('/`[^`]+`/', $string, $tokenized); - $tokenized = $tokenized[0]; - foreach ($tokenized as $snippet) { - $string = str_replace($snippet, '__token' . ($counter++) .'__', $string); - } - - // italicize parentheses - $string = preg_replace('/(^|\s)(\([^)]+\))/', '$1*$2*', $string); - - // mark numbers as inline code - $string = preg_replace('/[\t ](-?\d+(?:.\d+)?)(?!\.[^\n])/', ' `$1`', $string); - - // detokenize inline code snippets - $counter = 0; - foreach ($tokenized as $snippet) { - $string = str_replace('__token' . ($counter++) . '__', $snippet, $string); - } - - return trim($string); - } - - /** - * Modify a string by replacing named tokens with matching assoc. array values. - * - * @private - * @static - * @memberOf Generator - * @param {String} $string The string to modify. - * @param {Array|Object} $object The template object. - * @returns {String} The modified string. - */ - private static function interpolate( $string, $object ) { - preg_match_all('/#\{([^}]+)\}/', $string, $tokens); - $tokens = array_unique(array_pop($tokens)); - - foreach ($tokens as $token) { - $pattern = '/#\{' . $token . '\}/'; - $replacement = ''; - - if (is_object($object)) { - preg_match('/\(([^)]+?)\)$/', $token, $args); - $args = preg_split('/,\s*/', array_pop($args)); - $method = 'get' . ucfirst(str_replace('/\([^)]+?\)$/', '', $token)); - - if (method_exists($object, $method)) { - $replacement = (string) call_user_func_array(array($object, $method), $args); - } else if (isset($object->{$token})) { - $replacement = (string) $object->{$token}; - } - } else if (isset($object[$token])) { - $replacement = (string) $object[$token]; - } - $string = preg_replace($pattern, trim($replacement), $string); - } - return Generator::format($string); - } - - /*--------------------------------------------------------------------------*/ - - /** - * Adds the given `$entries` to the `$result` array. - * - * @private - * @memberOf Generator - * @param {Array} $result The result array to modify. - * @param {Array} $entries The entries to add to the `$result`. - */ - private function addEntries( &$result, $entries ) { - foreach ($entries as $entry) { - // skip aliases - if ($entry->isAlias()) { - continue; - } - // name and description - array_push( - $result, - $this->openTag, - Generator::interpolate("### `#{member}#{separator}#{call}`\n# [Ⓢ](#{href} \"View in source\") [Ⓣ][1]\n\n#{desc}", $entry) - ); - - // @alias - if (count($aliases = $entry->getAliases())) { - array_push($result, '', '#### Aliases'); - foreach ($aliases as $index => $alias) { - $aliases[$index] = $alias->getName(); - } - $result[] = '*' . implode(', ', $aliases) . '*'; - } - // @param - if (count($params = $entry->getParams())) { - array_push($result, '', '#### Arguments'); - foreach ($params as $index => $param) { - $result[] = Generator::interpolate('#{num}. `#{name}` (#{type}): #{desc}', array( - 'desc' => $param[2], - 'name' => $param[1], - 'num' => $index + 1, - 'type' => $param[0] - )); - } - } - // @returns - if (count($returns = $entry->getReturns())) { - array_push( - $result, '', - '#### Returns', - Generator::interpolate('(#{type}): #{desc}', array('desc' => $returns[1], 'type' => $returns[0])) - ); - } - // @example - if ($example = $entry->getExample()) { - array_push($result, '', '#### Example', $example); - } - array_push($result, "\n* * *", $this->closeTag); - } - } - - /** - * Resolves the entry's hash used to navigate the documentation. - * - * @private - * @memberOf Generator - * @param {Number|Object} $entry The entry object. - * @param {String} $member The name of the member. - * @returns {String} The url hash. - */ - private function getHash( $entry, $member = '' ) { - $entry = is_numeric($entry) ? $this->entries[$entry] : $entry; - $member = !$member ? $entry->getMembers(0) : $member; - $result = ($member ? $member . ($entry->isPlugin() ? 'prototype' : '') : '') . $entry->getCall(); - $result = preg_replace('/\(\[|\[\]/', '', $result); - $result = preg_replace('/[ =|\'"{}.()\]]/', '', $result); - $result = preg_replace('/[[#,]/', '-', $result); - return strtolower($result); - } - - /** - * Resolves the entry's url for the specific line number. - * - * @private - * @memberOf Generator - * @param {Number|Object} $entry The entry object. - * @returns {String} The url. - */ - private function getLineUrl( $entry ) { - $entry = is_numeric($entry) ? $this->entries($entry) : $entry; - return $this->options['url'] . '#L' . $entry->getLineNumber(); - } - - /** - * Extracts the character used to separate the entry's name from its member. - * - * @private - * @memberOf Generator - * @param {Number|Object} $entry The entry object. - * @returns {String} The separator. - */ - private function getSeparator( $entry ) { - $entry = is_numeric($entry) ? $this->entries($entry) : $entry; - return $entry->isPlugin() ? '.prototype.' : '.'; - } - - /*--------------------------------------------------------------------------*/ - - /** - * Generates Markdown from JSDoc entries. - * - * @memberOf Generator - * @returns {String} The rendered Markdown. - */ - public function generate() { - $api = array(); - $byCategory = $this->options['toc'] == 'categories'; - $categories = array(); - $closeTag = $this->closeTag; - $compiling = false; - $openTag = $this->openTag; - $result = array('# ' . $this->options['title']); - $toc = 'toc'; - - // initialize $api array - foreach ($this->entries as $entry) { - // skip invalid or private entries - $name = $entry->getName(); - if (!$name || $entry->isPrivate()) { - continue; - } - - $members = $entry->getMembers(); - $members = count($members) ? $members : array(''); - - foreach ($members as $member) { - // create api category arrays - if ($member && !isset($api[$member])) { - // create temporary entry to be replaced later - $api[$member] = new stdClass; - $api[$member]->static = array(); - $api[$member]->plugin = array(); - } - - // append entry to api member - if (!$member || $entry->isCtor() || ($entry->getType() == 'Object' && - !preg_match('/[=:]\s*(?:null|undefined)\s*[,;]?$/', $entry->entry))) { - - // assign the real entry, replacing the temporary entry if it exist - $member = ($member ? $member . ($entry->isPlugin() ? '#' : '.') : '') . $name; - $entry->static = @$api[$member] ? $api[$member]->static : array(); - $entry->plugin = @$api[$member] ? $api[$member]->plugin : array(); - - $api[$member] = $entry; - foreach ($entry->getAliases() as $alias) { - $api[$member]->static[] = $alias; - } - } - else if ($entry->isStatic()) { - $api[$member]->static[] = $entry; - foreach ($entry->getAliases() as $alias) { - $api[$member]->static[] = $alias; - } - } - else if (!$entry->isCtor()) { - $api[$member]->plugin[] = $entry; - foreach ($entry->getAliases() as $alias) { - $api[$member]->plugin[] = $alias; - } - } - } - } - - // add properties to each entry - foreach ($api as $entry) { - $entry->hash = $this->getHash($entry); - $entry->href = $this->getLineUrl($entry); - - $member = $entry->getMembers(0); - $member = ($member ? $member . ($entry->isPlugin() ? '.prototype.' : '.') : '') . $entry->getName(); - $entry->member = preg_replace('/' . $entry->getName() . '$/', '', $member); - - // add properties to static and plugin sub-entries - foreach (array('static', 'plugin') as $kind) { - foreach ($entry->{$kind} as $subentry) { - $subentry->hash = $this->getHash($subentry); - $subentry->href = $this->getLineUrl($subentry); - $subentry->member = $member; - $subentry->separator = $this->getSeparator($subentry); - } - } - } - - /*------------------------------------------------------------------------*/ - - // custom sort for root level entries - // TODO: see how well it handles deeper namespace traversal - function sortCompare($a, $b) { - $score = array( 'a' => 0, 'b' => 0); - foreach (array( 'a' => $a, 'b' => $b) as $key => $value) { - // capitalized properties are last - if (preg_match('/[#.][A-Z]/', $value)) { - $score[$key] = 0; - } - // lowercase prototype properties are next to last - else if (preg_match('/#[a-z]/', $value)) { - $score[$key] = 1; - } - // lowercase static properties next to first - else if (preg_match('/\.[a-z]/', $value)) { - $score[$key] = 2; - } - // root properties are first - else if (preg_match('/^[^#.]+$/', $value)) { - $score[$key] = 3; - } - } - $score = $score['b'] - $score['a']; - return $score ? $score : strcasecmp($a, $b); - } - - uksort($api, 'sortCompare'); - - // sort static and plugin sub-entries - foreach ($api as $entry) { - foreach (array('static', 'plugin') as $kind) { - $sortBy = array( 'a' => array(), 'b' => array(), 'c' => array() ); - foreach ($entry->{$kind} as $subentry) { - $name = $subentry->getName(); - // functions w/o ALL-CAPs names are last - $sortBy['a'][] = $subentry->getType() == 'Function' && !preg_match('/^[A-Z_]+$/', $name); - // ALL-CAPs properties first - $sortBy['b'][] = preg_match('/^[A-Z_]+$/', $name); - // lowercase alphanumeric sort - $sortBy['c'][] = strtolower($name); - } - array_multisort($sortBy['a'], SORT_ASC, $sortBy['b'], SORT_DESC, $sortBy['c'], SORT_ASC, $entry->{$kind}); - } - } - - /*------------------------------------------------------------------------*/ - - // add categories - foreach ($api as $entry) { - $categories[$entry->getCategory()][] = $entry; - foreach (array('static', 'plugin') as $kind) { - foreach ($entry->{$kind} as $subentry) { - $categories[$subentry->getCategory()][] = $subentry; - } - } - } - - // sort categories - ksort($categories); - - foreach(array('Methods', 'Properties') as $category) { - if (isset($categories[$category])) { - $entries = $categories[$category]; - unset($categories[$category]); - $categories[$category] = $entries; - } - } - - /*------------------------------------------------------------------------*/ - - // compile TOC - $result[] = $openTag; - - // compile TOC by categories - if ($byCategory) { - foreach ($categories as $category => $entries) { - if ($compiling) { - $result[] = $closeTag; - } else { - $compiling = true; - } - // assign TOC hash - if (count($result) == 2) { - $toc = $category; - } - // add category - array_push( - $result, - $openTag, '## ' . (count($result) == 2 ? '' : '') . '`' . $category . '`' - ); - // add entries - foreach ($entries as $entry) { - $result[] = Generator::interpolate('* [`#{member}#{separator}#{name}`](##{hash})', $entry); - } - } - } - // compile TOC by namespace - else { - foreach ($api as $entry) { - if ($compiling) { - $result[] = $closeTag; - } else { - $compiling = true; - } - $member = $entry->member . $entry->getName(); - - // assign TOC hash - if (count($result) == 2) { - $toc = $member; - } - // add root entry - array_push( - $result, - $openTag, '## ' . (count($result) == 2 ? '' : '') . '`' . $member . '`', - Generator::interpolate('* [`' . $member . '`](##{hash})', $entry) - ); - - // add static and plugin sub-entries - foreach (array('static', 'plugin') as $kind) { - if ($kind == 'plugin' && count($entry->plugin)) { - array_push( - $result, - $closeTag, - $openTag, - '## `' . $member . ($entry->isCtor() ? '.prototype`' : '`') - ); - } - foreach ($entry->{$kind} as $subentry) { - $subentry->member = $member; - $result[] = Generator::interpolate('* [`#{member}#{separator}#{name}`](##{hash})', $subentry); - } - } - } - } - - array_push($result, $closeTag, $closeTag); - - /*------------------------------------------------------------------------*/ - - // compile content - $compiling = false; - $result[] = $openTag; - - if ($byCategory) { - foreach ($categories as $category => $entries) { - if ($compiling) { - $result[] = $closeTag; - } else { - $compiling = true; - } - if ($category != 'Methods' && $category != 'Properties') { - $category = '“' . $category . '” Methods'; - } - array_push($result, $openTag, '## `' . $category . '`'); - $this->addEntries($result, $entries); - } - } - else { - foreach ($api as $entry) { - // skip aliases - if ($entry->isAlias()) { - continue; - } - if ($compiling) { - $result[] = $closeTag; - } else { - $compiling = true; - } - // add root entry name - $member = $entry->member . $entry->getName(); - array_push($result, $openTag, '## `' . $member . '`'); - - foreach (array($entry, 'static', 'plugin') as $kind) { - $subentries = is_string($kind) ? $entry->{$kind} : array($kind); - - // add sub-entry name - if ($kind != 'static' && $entry->getType() != 'Object' && - count($subentries) && $subentries[0] != $kind) { - if ($kind == 'plugin') { - $result[] = $closeTag; - } - array_push( - $result, - $openTag, - '## `' . $member . ($kind == 'plugin' ? '.prototype`' : '`') - ); - } - $this->addEntries($result, $subentries); - } - } - } - - // close tags add TOC link reference - array_push($result, $closeTag, $closeTag, '', ' [1]: #' . $toc . ' "Jump back to the TOC."'); - - // cleanup whitespace - return trim(preg_replace('/[\t ]+\n/', "\n", join($result, "\n"))); - } -} -?> diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,9 +1,8 @@ { "author": { - "name": "Jeremy Stashewsky", - "email": "jeremy@goinstant.com", - "url": "https://github.com/stash" + "name": "GoInstant Inc., a salesforce.com company" }, + "license": "MIT", "name": "tough-cookie", "description": "RFC6265 Cookies and Cookie Jar for node.js", "keywords": [ @@ -16,14 +15,14 @@ "RFC6265", "RFC2965" ], - "version": "0.9.15", - "homepage": "https://github.com/goinstant/node-cookie", + "version": "0.12.1", + "homepage": "https://github.com/goinstant/tough-cookie", "repository": { "type": "git", - "url": "git://github.com/goinstant/node-cookie.git" + "url": "git://github.com/goinstant/tough-cookie.git" }, "bugs": { - "url": "https://github.com/goinstant/node-cookie/issues" + "url": "https://github.com/goinstant/tough-cookie/issues" }, "main": "./lib/cookie", "scripts": { @@ -36,11 +35,33 @@ "punycode": ">=0.2.0" }, "devDependencies": { - "vows": ">=0.6.0", + "vows": "0.7.0", "async": ">=0.1.12" }, - "readme": "[RFC6265](http://tools.ietf.org/html/rfc6265) Cookies and CookieJar for Node.js\n\n![Tough Cookie](http://www.goinstant.com.s3.amazonaws.com/tough-cookie.jpg)\n\n# Synopsis\n\n``` javascript\nvar cookies = require('tough-cookie'); // note: not 'cookie', 'cookies' or 'node-cookie'\nvar Cookie = cookies.Cookie;\nvar cookie = Cookie.parse(header);\ncookie.value = 'somethingdifferent';\nheader = cookie.toString();\n\nvar cookiejar = new cookies.CookieJar();\ncookiejar.setCookie(cookie, 'http://currentdomain.example.com/path', cb);\n// ...\ncookiejar.getCookies('http://example.com/otherpath',function(err,cookies) {\n res.headers['cookie'] = cookies.join('; ');\n});\n```\n\n# Installation\n\nIt's _so_ easy!\n\n`npm install tough-cookie`\n\nRequires `punycode`, which should get installed automatically for you. Note that node.js v0.6.2+ bundles punycode by default.\n\nWhy the name? NPM modules `cookie`, `cookies` and `cookiejar` were already taken.\n\n# API\n\ncookies\n=======\n\nFunctions on the module you get from `require('tough-cookie')`. All can be used as pure functions and don't need to be \"bound\".\n\nparseDate(string[,strict])\n-----------------\n\nParse a cookie date string into a `Date`. Parses according to RFC6265 Section 5.1.1, not `Date.parse()`. If strict is set to true then leading/trailing non-seperator characters around the time part will cause the parsing to fail (e.g. \"Thu, 01 Jan 1970 00:00:010 GMT\" has an extra trailing zero but Chrome, an assumedly RFC-compliant browser, treats this as valid).\n\nformatDate(date)\n----------------\n\nFormat a Date into a RFC1123 string (the RFC6265-recommended format).\n\ncanonicalDomain(str)\n--------------------\n\nTransforms a domain-name into a canonical domain-name. The canonical domain-name is a trimmed, lowercased, stripped-of-leading-dot and optionally punycode-encoded domain-name (Section 5.1.2 of RFC6265). For the most part, this function is idempotent (can be run again on its output without ill effects).\n\ndomainMatch(str,domStr[,canonicalize=true])\n-------------------------------------------\n\nAnswers \"does this real domain match the domain in a cookie?\". The `str` is the \"current\" domain-name and the `domStr` is the \"cookie\" domain-name. Matches according to RFC6265 Section 5.1.3, but it helps to think of it as a \"suffix match\".\n\nThe `canonicalize` parameter will run the other two paramters through `canonicalDomain` or not.\n\ndefaultPath(path)\n-----------------\n\nGiven a current request/response path, gives the Path apropriate for storing in a cookie. This is basically the \"directory\" of a \"file\" in the path, but is specified by Section 5.1.4 of the RFC.\n\nThe `path` parameter MUST be _only_ the pathname part of a URI (i.e. excludes the hostname, query, fragment, etc.). This is the `.pathname` property of node's `uri.parse()` output.\n\npathMatch(reqPath,cookiePath)\n-----------------------------\n\nAnswers \"does the request-path path-match a given cookie-path?\" as per RFC6265 Section 5.1.4. Returns a boolean.\n\nThis is essentially a prefix-match where `cookiePath` is a prefix of `reqPath`.\n\nparse(header[,strict=false])\n----------------------------\n\nalias for `Cookie.parse(header[,strict])`\n\nfromJSON(string)\n----------------\n\nalias for `Cookie.fromJSON(string)`\n\ngetPublicSuffix(hostname)\n-------------------------\n\nReturns the public suffix of this hostname. The public suffix is the shortest domain-name upon which a cookie can be set. Returns `null` if the hostname cannot have cookies set for it.\n\nFor example: `www.example.com` and `www.subdomain.example.com` both have public suffix `example.com`.\n\nFor further information, see http://publicsuffix.org/. This module derives its list from that site.\n\ncookieCompare(a,b)\n------------------\n\nFor use with `.sort()`, sorts a list of cookies into the recommended order given in the RFC (Section 5.4 step 2). Longest `.path`s go first, then sorted oldest to youngest.\n\n``` javascript\nvar cookies = [ /* unsorted array of Cookie objects */ ];\ncookies = cookies.sort(cookieCompare);\n```\n\npermuteDomain(domain)\n---------------------\n\nGenerates a list of all possible domains that `domainMatch()` the parameter. May be handy for implementing cookie stores.\n\n\npermutePath(path)\n-----------------\n\nGenerates a list of all possible paths that `pathMatch()` the parameter. May be handy for implementing cookie stores.\n\nCookie\n======\n\nCookie.parse(header[,strict=false])\n-----------------------------------\n\nParses a single Cookie or Set-Cookie HTTP header into a `Cookie` object. Returns `undefined` if the string can't be parsed. If in strict mode, returns `undefined` if the cookie doesn't follow the guidelines in section 4 of RFC6265. Generally speaking, strict mode can be used to validate your own generated Set-Cookie headers, but acting as a client you want to be lenient and leave strict mode off.\n\nHere's how to process the Set-Cookie header(s) on a node HTTP/HTTPS response:\n\n``` javascript\nif (res.headers['set-cookie'] instanceof Array)\n cookies = res.headers['set-cookie'].map(Cookie.parse);\nelse\n cookies = [Cookie.parse(res.headers['set-cookie'])];\n```\n\nCookie.fromJSON(string)\n-----------------------\n\nConvert a JSON string to a `Cookie` object. Does a `JSON.parse()` and converts the `.created`, `.lastAccessed` and `.expires` properties into `Date` objects.\n\nProperties\n==========\n\n * _key_ - string - the name or key of the cookie (default \"\")\n * _value_ - string - the value of the cookie (default \"\")\n * _expires_ - `Date` - if set, the `Expires=` attribute of the cookie (defaults to the string `\"Infinity\"`). See `setExpires()`\n * _maxAge_ - seconds - if set, the `Max-Age=` attribute _in seconds_ of the cookie. May also be set to strings `\"Infinity\"` and `\"-Infinity\"` for non-expiry and immediate-expiry, respectively. See `setMaxAge()`\n * _domain_ - string - the `Domain=` attribute of the cookie\n * _path_ - string - the `Path=` of the cookie\n * _secure_ - boolean - the `Secure` cookie flag\n * _httpOnly_ - boolean - the `HttpOnly` cookie flag\n * _extensions_ - `Array` - any unrecognized cookie attributes as strings (even if equal-signs inside)\n \nAfter a cookie has been passed through `CookieJar.setCookie()` it will have the following additional attributes:\n\n * _hostOnly_ - boolean - is this a host-only cookie (i.e. no Domain field was set, but was instead implied)\n * _pathIsDefault_ - boolean - if true, there was no Path field on the cookie and `defaultPath()` was used to derive one.\n * _created_ - `Date` - when this cookie was added to the jar\n * _lastAccessed_ - `Date` - last time the cookie got accessed. Will affect cookie cleaning once implemented. Using `cookiejar.getCookies(...)` will update this attribute.\n\nConstruction([{options}])\n------------\n\nReceives an options object that can contain any Cookie properties, uses the default for unspecified properties.\n\n.toString()\n-----------\n\nencode to a Set-Cookie header value. The Expires cookie field is set using `formatDate()`, but is omitted entirely if `.expires` is `Infinity`.\n\n.cookieString()\n---------------\n\nencode to a Cookie header value (i.e. the `.key` and `.value` properties joined with '=').\n\n.setExpires(String)\n-------------------\n\nsets the expiry based on a date-string passed through `parseDate()`. If parseDate returns `null` (i.e. can't parse this date string), `.expires` is set to `\"Infinity\"` (a string) is set.\n\n.setMaxAge(number)\n-------------------\n\nsets the maxAge in seconds. Coerces `-Infinity` to `\"-Infinity\"` and `Infinity` to `\"Infinity\"` so it JSON serializes correctly.\n\n.expiryTime([now=Date.now()])\n-----------------------------\n\n.expiryDate([now=Date.now()])\n-----------------------------\n\nexpiryTime() Computes the absolute unix-epoch milliseconds that this cookie expires. expiryDate() works similarly, except it returns a `Date` object. Note that in both cases the `now` parameter should be milliseconds.\n\nMax-Age takes precedence over Expires (as per the RFC). The `.created` attribute -- or, by default, the `now` paramter -- is used to offset the `.maxAge` attribute.\n\nIf Expires (`.expires`) is set, that's returned.\n\nOtherwise, `expiryTime()` returns `Infinity` and `expiryDate()` returns a `Date` object for \"Tue, 19 Jan 2038 03:14:07 GMT\" (latest date that can be expressed by a 32-bit `time_t`; the common limit for most user-agents).\n\n.TTL([now=Date.now()])\n---------\n\ncompute the TTL relative to `now` (milliseconds). The same precedence rules as for `expiryTime`/`expiryDate` apply.\n\nThe \"number\" `Infinity` is returned for cookies without an explicit expiry and `0` is returned if the cookie is expired. Otherwise a time-to-live in milliseconds is returned.\n\n.canonicalizedDoman()\n---------------------\n\n.cdomain()\n----------\n\nreturn the canonicalized `.domain` field. This is lower-cased and punycode (RFC3490) encoded if the domain has any non-ASCII characters.\n\n.validate()\n-----------\n\nStatus: *IN PROGRESS*. Works for a few things, but is by no means comprehensive.\n\nvalidates cookie attributes for semantic correctness. Useful for \"lint\" checking any Set-Cookie headers you generate. For now, it returns a boolean, but eventually could return a reason string -- you can future-proof with this construct:\n\n``` javascript\nif (cookie.validate() === true) {\n // it's tasty\n} else {\n // yuck!\n}\n```\n\nCookieJar\n=========\n\nConstruction([store = new MemoryCookieStore()][, rejectPublicSuffixes])\n------------\n\nSimply use `new CookieJar()`. If you'd like to use a custom store, pass that to the constructor otherwise a `MemoryCookieStore` will be created and used.\n\n\nAttributes\n----------\n\n * _rejectPublicSuffixes_ - boolean - reject cookies with domains like \"com\" and \"co.uk\" (default: `true`)\n \nSince eventually this module would like to support database/remote/etc. CookieJars, continuation passing style is used for CookieJar methods.\n\n.setCookie(cookieOrString, currentUrl, [{options},] cb(err,cookie))\n-------------------------------------------------------------------\n\nAttempt to set the cookie in the cookie jar. If the operation fails, an error will be given to the callback `cb`, otherwise the cookie is passed through. The cookie will have updated `.created`, `.lastAccessed` and `.hostOnly` properties.\n\nThe `options` object can be omitted and can have the following properties:\n\n * _http_ - boolean - default `true` - indicates if this is an HTTP or non-HTTP API. Affects HttpOnly cookies.\n * _secure_ - boolean - autodetect from url - indicates if this is a \"Secure\" API. If the currentUrl starts with `https:` or `wss:` then this is defaulted to `true`, otherwise `false`.\n * _now_ - Date - default `new Date()` - what to use for the creation/access time of cookies\n * _strict_ - boolean - default `false` - perform extra checks\n * _ignoreError_ - boolean - default `false` - silently ignore things like parse errors and invalid domains. CookieStore errors aren't ignored by this option.\n\nAs per the RFC, the `.hostOnly` property is set if there was no \"Domain=\" parameter in the cookie string (or `.domain` was null on the Cookie object). The `.domain` property is set to the fully-qualified hostname of `currentUrl` in this case. Matching this cookie requires an exact hostname match (not a `domainMatch` as per usual).\n\n.storeCookie(cookie, [{options},] cb(err,cookie))\n-------------------------------------------------\n\n__REMOVED__ removed in lieu of the CookieStore API below\n \n.getCookies(currentUrl, [{options},] cb(err,cookies))\n-----------------------------------------------------\n\nRetrieve the list of cookies that can be sent in a Cookie header for the current url.\n\nIf an error is encountered, that's passed as `err` to the callback, otherwise an `Array` of `Cookie` objects is passed. The array is sorted with `cookieCompare()` unless the `{sort:false}` option is given.\n\nThe `options` object can be omitted and can have the following properties:\n\n * _http_ - boolean - default `true` - indicates if this is an HTTP or non-HTTP API. Affects HttpOnly cookies.\n * _secure_ - boolean - autodetect from url - indicates if this is a \"Secure\" API. If the currentUrl starts with `https:` or `wss:` then this is defaulted to `true`, otherwise `false`.\n * _now_ - Date - default `new Date()` - what to use for the creation/access time of cookies\n * _expire_ - boolean - default `true` - perform expiry-time checking of cookies and asynchronously remove expired cookies from the store. Using `false` will return expired cookies and **not** remove them from the store (which is useful for replaying Set-Cookie headers, potentially).\n * _allPaths_ - boolean - default `false` - if `true`, do not scope cookies by path. The default uses RFC-compliant path scoping. **Note**: may not be supported by the CookieStore `fetchCookies` function (the default MemoryCookieStore supports it).\n\nThe `.lastAccessed` property of the returned cookies will have been updated.\n\n.getCookieString(...)\n---------------------\n\nAccepts the same options as `.getCookies()` but passes a string suitable for a Cookie header rather than an array to the callback. Simply maps the `Cookie` array via `.cookieString()`.\n\n.getSetCookieStrings(...)\n-------------------------\n\nAccepts the same options as `.getCookies()` but passes an array of strings suitable for Set-Cookie headers (rather than an array of `Cookie`s) to the callback. Simply maps the cookie array via `.toString()`.\n\n# CookieStore API\n\nThe storage model for each `CookieJar` instance can be replaced with a custom implementation. The default is `MemoryCookieStore` which can be found in the `lib/memstore.js` file. The API uses continuation-passing-style to allow for asynchronous stores.\n\nAll `domain` parameters will have been normalized before calling.\n\nThe Cookie store must have all of the following methods.\n\nstore.findCookie(domain, path, key, cb(err,cookie))\n---------------------------------------------------\n\nRetrieve a cookie with the given domain, path and key (a.k.a. name). The RFC maintains that exactly one of these cookies should exist in a store. If the store is using versioning, this means that the latest/newest such cookie should be returned.\n\nCallback takes an error and the resulting `Cookie` object. If no cookie is found then `null` MUST be passed instead (i.e. not an error).\n\nstore.findCookies(domain, path, cb(err,cookies))\n------------------------------------------------\n\nLocates cookies matching the given domain and path. This is most often called in the context of `cookiejar.getCookies()` above.\n\nIf no cookies are found, the callback MUST be passed an empty array.\n\nThe resulting list will be checked for applicability to the current request according to the RFC (domain-match, path-match, http-only-flag, secure-flag, expiry, etc.), so it's OK to use an optimistic search algorithm when implementing this method. However, the search algorithm used SHOULD try to find cookies that `domainMatch()` the domain and `pathMatch()` the path in order to limit the amount of checking that needs to be done.\n\nAs of version 0.9.12, the `allPaths` option to `cookiejar.getCookies()` above will cause the path here to be `null`. If the path is `null`, path-matching MUST NOT be performed (i.e. domain-matching only).\n\nstore.putCookie(cookie, cb(err))\n--------------------------------\n\nAdds a new cookie to the store. The implementation SHOULD replace any existing cookie with the same `.domain`, `.path`, and `.key` properties -- depending on the nature of the implementation, it's possible that between the call to `fetchCookie` and `putCookie` that a duplicate `putCookie` can occur.\n\nThe `cookie` object MUST NOT be modified; the caller will have already updated the `.creation` and `.lastAccessed` properties.\n\nPass an error if the cookie cannot be stored.\n\nstore.updateCookie(oldCookie, newCookie, cb(err))\n-------------------------------------------------\n\nUpdate an existing cookie. The implementation MUST update the `.value` for a cookie with the same `domain`, `.path` and `.key`. The implementation SHOULD check that the old value in the store is equivalent to `oldCookie` - how the conflict is resolved is up to the store.\n\nThe `.lastAccessed` property will always be different between the two objects and `.created` will always be the same. Stores MAY ignore or defer the `.lastAccessed` change at the cost of affecting how cookies are sorted (or selected for deletion).\n\nStores may wish to optimize changing the `.value` of the cookie in the store versus storing a new cookie. If the implementation doesn't define this method a stub that calls `putCookie(newCookie,cb)` will be added to the store object.\n\nThe `newCookie` and `oldCookie` objects MUST NOT be modified.\n\nPass an error if the newCookie cannot be stored.\n\nstore.removeCookie(domain, path, key, cb(err))\n----------------------------------------------\n\nRemove a cookie from the store (see notes on `findCookie` about the uniqueness constraint).\n\nThe implementation MUST NOT pass an error if the cookie doesn't exist; only pass an error due to the failure to remove an existing cookie.\n\nstore.removeCookies(domain, path, cb(err))\n------------------------------------------\n\nRemoves matching cookies from the store. The `path` paramter is optional, and if missing means all paths in a domain should be removed.\n\nPass an error ONLY if removing any existing cookies failed.\n\n# TODO\n\n * _full_ RFC5890/RFC5891 canonicalization for domains in `cdomain()`\n * the optional `punycode` requirement implements RFC3492, but RFC6265 requires RFC5891\n * better tests for `validate()`?\n\n# Copyright and License\n\n(tl;dr: MIT with some MPL/1.1)\n\nCopyright GoInstant, Inc. and other contributors. All rights reserved.\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n\nPortions may be licensed under different licenses (in particular public-suffix.txt is MPL/1.1); please read the LICENSE file for full details.\n", + "readme": "[RFC6265](http://tools.ietf.org/html/rfc6265) Cookies and CookieJar for Node.js\n\n![Tough Cookie](http://www.goinstant.com.s3.amazonaws.com/tough-cookie.jpg)\n\n[![Build Status](https://travis-ci.org/goinstant/node-cookie.png?branch=master)](https://travis-ci.org/goinstant/node-cookie)\n\n[![NPM Stats](https://nodei.co/npm/tough-cookie.png?downloads=true&stars=true)](https://npmjs.org/package/tough-cookie)\n![NPM Downloads](https://nodei.co/npm-dl/tough-cookie.png?months=9)\n\n# Synopsis\n\n``` javascript\nvar tough = require('tough-cookie'); // note: not 'cookie', 'cookies' or 'node-cookie'\nvar Cookie = tough.Cookie;\nvar cookie = Cookie.parse(header);\ncookie.value = 'somethingdifferent';\nheader = cookie.toString();\n\nvar cookiejar = new tough.CookieJar();\ncookiejar.setCookie(cookie, 'http://currentdomain.example.com/path', cb);\n// ...\ncookiejar.getCookies('http://example.com/otherpath',function(err,cookies) {\n res.headers['cookie'] = cookies.join('; ');\n});\n```\n\n# Installation\n\nIt's _so_ easy!\n\n`npm install tough-cookie`\n\nRequires `punycode`, which should get installed automatically for you. Note that node.js v0.6.2+ bundles punycode by default.\n\nWhy the name? NPM modules `cookie`, `cookies` and `cookiejar` were already taken.\n\n# API\n\ntough\n=====\n\nFunctions on the module you get from `require('tough-cookie')`. All can be used as pure functions and don't need to be \"bound\".\n\nparseDate(string[,strict])\n-----------------\n\nParse a cookie date string into a `Date`. Parses according to RFC6265 Section 5.1.1, not `Date.parse()`. If strict is set to true then leading/trailing non-seperator characters around the time part will cause the parsing to fail (e.g. \"Thu, 01 Jan 1970 00:00:010 GMT\" has an extra trailing zero but Chrome, an assumedly RFC-compliant browser, treats this as valid).\n\nformatDate(date)\n----------------\n\nFormat a Date into a RFC1123 string (the RFC6265-recommended format).\n\ncanonicalDomain(str)\n--------------------\n\nTransforms a domain-name into a canonical domain-name. The canonical domain-name is a trimmed, lowercased, stripped-of-leading-dot and optionally punycode-encoded domain-name (Section 5.1.2 of RFC6265). For the most part, this function is idempotent (can be run again on its output without ill effects).\n\ndomainMatch(str,domStr[,canonicalize=true])\n-------------------------------------------\n\nAnswers \"does this real domain match the domain in a cookie?\". The `str` is the \"current\" domain-name and the `domStr` is the \"cookie\" domain-name. Matches according to RFC6265 Section 5.1.3, but it helps to think of it as a \"suffix match\".\n\nThe `canonicalize` parameter will run the other two paramters through `canonicalDomain` or not.\n\ndefaultPath(path)\n-----------------\n\nGiven a current request/response path, gives the Path apropriate for storing in a cookie. This is basically the \"directory\" of a \"file\" in the path, but is specified by Section 5.1.4 of the RFC.\n\nThe `path` parameter MUST be _only_ the pathname part of a URI (i.e. excludes the hostname, query, fragment, etc.). This is the `.pathname` property of node's `uri.parse()` output.\n\npathMatch(reqPath,cookiePath)\n-----------------------------\n\nAnswers \"does the request-path path-match a given cookie-path?\" as per RFC6265 Section 5.1.4. Returns a boolean.\n\nThis is essentially a prefix-match where `cookiePath` is a prefix of `reqPath`.\n\nparse(header[,strict=false])\n----------------------------\n\nalias for `Cookie.parse(header[,strict])`\n\nfromJSON(string)\n----------------\n\nalias for `Cookie.fromJSON(string)`\n\ngetPublicSuffix(hostname)\n-------------------------\n\nReturns the public suffix of this hostname. The public suffix is the shortest domain-name upon which a cookie can be set. Returns `null` if the hostname cannot have cookies set for it.\n\nFor example: `www.example.com` and `www.subdomain.example.com` both have public suffix `example.com`.\n\nFor further information, see http://publicsuffix.org/. This module derives its list from that site.\n\ncookieCompare(a,b)\n------------------\n\nFor use with `.sort()`, sorts a list of cookies into the recommended order given in the RFC (Section 5.4 step 2). Longest `.path`s go first, then sorted oldest to youngest.\n\n``` javascript\nvar cookies = [ /* unsorted array of Cookie objects */ ];\ncookies = cookies.sort(cookieCompare);\n```\n\npermuteDomain(domain)\n---------------------\n\nGenerates a list of all possible domains that `domainMatch()` the parameter. May be handy for implementing cookie stores.\n\n\npermutePath(path)\n-----------------\n\nGenerates a list of all possible paths that `pathMatch()` the parameter. May be handy for implementing cookie stores.\n\nCookie\n======\n\nCookie.parse(header[,strict=false])\n-----------------------------------\n\nParses a single Cookie or Set-Cookie HTTP header into a `Cookie` object. Returns `undefined` if the string can't be parsed. If in strict mode, returns `undefined` if the cookie doesn't follow the guidelines in section 4 of RFC6265. Generally speaking, strict mode can be used to validate your own generated Set-Cookie headers, but acting as a client you want to be lenient and leave strict mode off.\n\nHere's how to process the Set-Cookie header(s) on a node HTTP/HTTPS response:\n\n``` javascript\nif (res.headers['set-cookie'] instanceof Array)\n cookies = res.headers['set-cookie'].map(function (c) { return (Cookie.parse(c)); });\nelse\n cookies = [Cookie.parse(res.headers['set-cookie'])];\n```\n\nCookie.fromJSON(string)\n-----------------------\n\nConvert a JSON string to a `Cookie` object. Does a `JSON.parse()` and converts the `.created`, `.lastAccessed` and `.expires` properties into `Date` objects.\n\nProperties\n==========\n\n * _key_ - string - the name or key of the cookie (default \"\")\n * _value_ - string - the value of the cookie (default \"\")\n * _expires_ - `Date` - if set, the `Expires=` attribute of the cookie (defaults to the string `\"Infinity\"`). See `setExpires()`\n * _maxAge_ - seconds - if set, the `Max-Age=` attribute _in seconds_ of the cookie. May also be set to strings `\"Infinity\"` and `\"-Infinity\"` for non-expiry and immediate-expiry, respectively. See `setMaxAge()`\n * _domain_ - string - the `Domain=` attribute of the cookie\n * _path_ - string - the `Path=` of the cookie\n * _secure_ - boolean - the `Secure` cookie flag\n * _httpOnly_ - boolean - the `HttpOnly` cookie flag\n * _extensions_ - `Array` - any unrecognized cookie attributes as strings (even if equal-signs inside)\n\nAfter a cookie has been passed through `CookieJar.setCookie()` it will have the following additional attributes:\n\n * _hostOnly_ - boolean - is this a host-only cookie (i.e. no Domain field was set, but was instead implied)\n * _pathIsDefault_ - boolean - if true, there was no Path field on the cookie and `defaultPath()` was used to derive one.\n * _created_ - `Date` - when this cookie was added to the jar\n * _lastAccessed_ - `Date` - last time the cookie got accessed. Will affect cookie cleaning once implemented. Using `cookiejar.getCookies(...)` will update this attribute.\n\nConstruction([{options}])\n------------\n\nReceives an options object that can contain any Cookie properties, uses the default for unspecified properties.\n\n.toString()\n-----------\n\nencode to a Set-Cookie header value. The Expires cookie field is set using `formatDate()`, but is omitted entirely if `.expires` is `Infinity`.\n\n.cookieString()\n---------------\n\nencode to a Cookie header value (i.e. the `.key` and `.value` properties joined with '=').\n\n.setExpires(String)\n-------------------\n\nsets the expiry based on a date-string passed through `parseDate()`. If parseDate returns `null` (i.e. can't parse this date string), `.expires` is set to `\"Infinity\"` (a string) is set.\n\n.setMaxAge(number)\n-------------------\n\nsets the maxAge in seconds. Coerces `-Infinity` to `\"-Infinity\"` and `Infinity` to `\"Infinity\"` so it JSON serializes correctly.\n\n.expiryTime([now=Date.now()])\n-----------------------------\n\n.expiryDate([now=Date.now()])\n-----------------------------\n\nexpiryTime() Computes the absolute unix-epoch milliseconds that this cookie expires. expiryDate() works similarly, except it returns a `Date` object. Note that in both cases the `now` parameter should be milliseconds.\n\nMax-Age takes precedence over Expires (as per the RFC). The `.created` attribute -- or, by default, the `now` paramter -- is used to offset the `.maxAge` attribute.\n\nIf Expires (`.expires`) is set, that's returned.\n\nOtherwise, `expiryTime()` returns `Infinity` and `expiryDate()` returns a `Date` object for \"Tue, 19 Jan 2038 03:14:07 GMT\" (latest date that can be expressed by a 32-bit `time_t`; the common limit for most user-agents).\n\n.TTL([now=Date.now()])\n---------\n\ncompute the TTL relative to `now` (milliseconds). The same precedence rules as for `expiryTime`/`expiryDate` apply.\n\nThe \"number\" `Infinity` is returned for cookies without an explicit expiry and `0` is returned if the cookie is expired. Otherwise a time-to-live in milliseconds is returned.\n\n.canonicalizedDoman()\n---------------------\n\n.cdomain()\n----------\n\nreturn the canonicalized `.domain` field. This is lower-cased and punycode (RFC3490) encoded if the domain has any non-ASCII characters.\n\n.validate()\n-----------\n\nStatus: *IN PROGRESS*. Works for a few things, but is by no means comprehensive.\n\nvalidates cookie attributes for semantic correctness. Useful for \"lint\" checking any Set-Cookie headers you generate. For now, it returns a boolean, but eventually could return a reason string -- you can future-proof with this construct:\n\n``` javascript\nif (cookie.validate() === true) {\n // it's tasty\n} else {\n // yuck!\n}\n```\n\nCookieJar\n=========\n\nConstruction([store = new MemoryCookieStore()][, rejectPublicSuffixes])\n------------\n\nSimply use `new CookieJar()`. If you'd like to use a custom store, pass that to the constructor otherwise a `MemoryCookieStore` will be created and used.\n\n\nAttributes\n----------\n\n * _rejectPublicSuffixes_ - boolean - reject cookies with domains like \"com\" and \"co.uk\" (default: `true`)\n\nSince eventually this module would like to support database/remote/etc. CookieJars, continuation passing style is used for CookieJar methods.\n\n.setCookie(cookieOrString, currentUrl, [{options},] cb(err,cookie))\n-------------------------------------------------------------------\n\nAttempt to set the cookie in the cookie jar. If the operation fails, an error will be given to the callback `cb`, otherwise the cookie is passed through. The cookie will have updated `.created`, `.lastAccessed` and `.hostOnly` properties.\n\nThe `options` object can be omitted and can have the following properties:\n\n * _http_ - boolean - default `true` - indicates if this is an HTTP or non-HTTP API. Affects HttpOnly cookies.\n * _secure_ - boolean - autodetect from url - indicates if this is a \"Secure\" API. If the currentUrl starts with `https:` or `wss:` then this is defaulted to `true`, otherwise `false`.\n * _now_ - Date - default `new Date()` - what to use for the creation/access time of cookies\n * _strict_ - boolean - default `false` - perform extra checks\n * _ignoreError_ - boolean - default `false` - silently ignore things like parse errors and invalid domains. CookieStore errors aren't ignored by this option.\n\nAs per the RFC, the `.hostOnly` property is set if there was no \"Domain=\" parameter in the cookie string (or `.domain` was null on the Cookie object). The `.domain` property is set to the fully-qualified hostname of `currentUrl` in this case. Matching this cookie requires an exact hostname match (not a `domainMatch` as per usual).\n\n.setCookieSync(cookieOrString, currentUrl, [{options}])\n-------------------------------------------------------\n\nSynchronous version of `setCookie`; only works with synchronous stores (e.g. the default `MemoryCookieStore`).\n\n.storeCookie(cookie, [{options},] cb(err,cookie))\n-------------------------------------------------\n\n__REMOVED__ removed in lieu of the CookieStore API below\n\n.getCookies(currentUrl, [{options},] cb(err,cookies))\n-----------------------------------------------------\n\nRetrieve the list of cookies that can be sent in a Cookie header for the current url.\n\nIf an error is encountered, that's passed as `err` to the callback, otherwise an `Array` of `Cookie` objects is passed. The array is sorted with `cookieCompare()` unless the `{sort:false}` option is given.\n\nThe `options` object can be omitted and can have the following properties:\n\n * _http_ - boolean - default `true` - indicates if this is an HTTP or non-HTTP API. Affects HttpOnly cookies.\n * _secure_ - boolean - autodetect from url - indicates if this is a \"Secure\" API. If the currentUrl starts with `https:` or `wss:` then this is defaulted to `true`, otherwise `false`.\n * _now_ - Date - default `new Date()` - what to use for the creation/access time of cookies\n * _expire_ - boolean - default `true` - perform expiry-time checking of cookies and asynchronously remove expired cookies from the store. Using `false` will return expired cookies and **not** remove them from the store (which is useful for replaying Set-Cookie headers, potentially).\n * _allPaths_ - boolean - default `false` - if `true`, do not scope cookies by path. The default uses RFC-compliant path scoping. **Note**: may not be supported by the CookieStore `fetchCookies` function (the default MemoryCookieStore supports it).\n\nThe `.lastAccessed` property of the returned cookies will have been updated.\n\n.getCookiesSync(currentUrl, [{options}])\n----------------------------------------\n\nSynchronous version of `getCookies`; only works with synchronous stores (e.g. the default `MemoryCookieStore`).\n\n.getCookieString(...)\n---------------------\n\nAccepts the same options as `.getCookies()` but passes a string suitable for a Cookie header rather than an array to the callback. Simply maps the `Cookie` array via `.cookieString()`.\n\n.getCookieStringSync(...)\n-------------------------\n\nSynchronous version of `getCookieString`; only works with synchronous stores (e.g. the default `MemoryCookieStore`).\n\n.getSetCookieStrings(...)\n-------------------------\n\nReturns an array of strings suitable for **Set-Cookie** headers. Accepts the same options as `.getCookies()`. Simply maps the cookie array via `.toString()`.\n\n.getSetCookieStringsSync(...)\n-----------------------------\n\nSynchronous version of `getSetCookieStrings`; only works with synchronous stores (e.g. the default `MemoryCookieStore`).\n\nStore\n=====\n\nBase class for CookieJar stores.\n\n# CookieStore API\n\nThe storage model for each `CookieJar` instance can be replaced with a custom implementation. The default is `MemoryCookieStore` which can be found in the `lib/memstore.js` file. The API uses continuation-passing-style to allow for asynchronous stores.\n\nStores should inherit from the base `Store` class, which is available as `require('tough-cookie').Store`. Stores are asynchronous by default, but if `store.synchronous` is set, then the `*Sync` methods on the CookieJar can be used.\n\nAll `domain` parameters will have been normalized before calling.\n\nThe Cookie store must have all of the following methods.\n\nstore.findCookie(domain, path, key, cb(err,cookie))\n---------------------------------------------------\n\nRetrieve a cookie with the given domain, path and key (a.k.a. name). The RFC maintains that exactly one of these cookies should exist in a store. If the store is using versioning, this means that the latest/newest such cookie should be returned.\n\nCallback takes an error and the resulting `Cookie` object. If no cookie is found then `null` MUST be passed instead (i.e. not an error).\n\nstore.findCookies(domain, path, cb(err,cookies))\n------------------------------------------------\n\nLocates cookies matching the given domain and path. This is most often called in the context of `cookiejar.getCookies()` above.\n\nIf no cookies are found, the callback MUST be passed an empty array.\n\nThe resulting list will be checked for applicability to the current request according to the RFC (domain-match, path-match, http-only-flag, secure-flag, expiry, etc.), so it's OK to use an optimistic search algorithm when implementing this method. However, the search algorithm used SHOULD try to find cookies that `domainMatch()` the domain and `pathMatch()` the path in order to limit the amount of checking that needs to be done.\n\nAs of version 0.9.12, the `allPaths` option to `cookiejar.getCookies()` above will cause the path here to be `null`. If the path is `null`, path-matching MUST NOT be performed (i.e. domain-matching only).\n\nstore.putCookie(cookie, cb(err))\n--------------------------------\n\nAdds a new cookie to the store. The implementation SHOULD replace any existing cookie with the same `.domain`, `.path`, and `.key` properties -- depending on the nature of the implementation, it's possible that between the call to `fetchCookie` and `putCookie` that a duplicate `putCookie` can occur.\n\nThe `cookie` object MUST NOT be modified; the caller will have already updated the `.creation` and `.lastAccessed` properties.\n\nPass an error if the cookie cannot be stored.\n\nstore.updateCookie(oldCookie, newCookie, cb(err))\n-------------------------------------------------\n\nUpdate an existing cookie. The implementation MUST update the `.value` for a cookie with the same `domain`, `.path` and `.key`. The implementation SHOULD check that the old value in the store is equivalent to `oldCookie` - how the conflict is resolved is up to the store.\n\nThe `.lastAccessed` property will always be different between the two objects and `.created` will always be the same. Stores MAY ignore or defer the `.lastAccessed` change at the cost of affecting how cookies are sorted (or selected for deletion).\n\nStores may wish to optimize changing the `.value` of the cookie in the store versus storing a new cookie. If the implementation doesn't define this method a stub that calls `putCookie(newCookie,cb)` will be added to the store object.\n\nThe `newCookie` and `oldCookie` objects MUST NOT be modified.\n\nPass an error if the newCookie cannot be stored.\n\nstore.removeCookie(domain, path, key, cb(err))\n----------------------------------------------\n\nRemove a cookie from the store (see notes on `findCookie` about the uniqueness constraint).\n\nThe implementation MUST NOT pass an error if the cookie doesn't exist; only pass an error due to the failure to remove an existing cookie.\n\nstore.removeCookies(domain, path, cb(err))\n------------------------------------------\n\nRemoves matching cookies from the store. The `path` paramter is optional, and if missing means all paths in a domain should be removed.\n\nPass an error ONLY if removing any existing cookies failed.\n\n# TODO\n\n * _full_ RFC5890/RFC5891 canonicalization for domains in `cdomain()`\n * the optional `punycode` requirement implements RFC3492, but RFC6265 requires RFC5891\n * better tests for `validate()`?\n\n# Copyright and License\n\n(tl;dr: MIT with some MPL/1.1)\n\nCopyright 2012- GoInstant, Inc. and other contributors. All rights reserved.\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n\nPortions may be licensed under different licenses (in particular public-suffix.txt is MPL/1.1); please read the LICENSE file for full details.\n", "readmeFilename": "README.md", - "_id": "tough-cookie@0.9.15", - "_from": "tough-cookie@~0.9.15" + "_id": "tough-cookie@0.12.1", + "dist": { + "shasum": "8220c7e21abd5b13d96804254bd5a81ebf2c7d62", + "tarball": "http://registry.npmjs.org/tough-cookie/-/tough-cookie-0.12.1.tgz" + }, + "_from": "tough-cookie@>=0.12.0", + "_npmVersion": "1.3.11", + "_npmUser": { + "name": "goinstant", + "email": "support@goinstant.com" + }, + "maintainers": [ + { + "name": "jstash", + "email": "jeremy@goinstant.com" + }, + { + "name": "goinstant", + "email": "services@goinstant.com" + } + ], + "directories": {}, + "_shasum": "8220c7e21abd5b13d96804254bd5a81ebf2c7d62", + "_resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-0.12.1.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/README.md nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/README.md --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -2,16 +2,21 @@ ![Tough Cookie](http://www.goinstant.com.s3.amazonaws.com/tough-cookie.jpg) +[![Build Status](https://travis-ci.org/goinstant/node-cookie.png?branch=master)](https://travis-ci.org/goinstant/node-cookie) + +[![NPM Stats](https://nodei.co/npm/tough-cookie.png?downloads=true&stars=true)](https://npmjs.org/package/tough-cookie) +![NPM Downloads](https://nodei.co/npm-dl/tough-cookie.png?months=9) + # Synopsis ``` javascript -var cookies = require('tough-cookie'); // note: not 'cookie', 'cookies' or 'node-cookie' -var Cookie = cookies.Cookie; +var tough = require('tough-cookie'); // note: not 'cookie', 'cookies' or 'node-cookie' +var Cookie = tough.Cookie; var cookie = Cookie.parse(header); cookie.value = 'somethingdifferent'; header = cookie.toString(); -var cookiejar = new cookies.CookieJar(); +var cookiejar = new tough.CookieJar(); cookiejar.setCookie(cookie, 'http://currentdomain.example.com/path', cb); // ... cookiejar.getCookies('http://example.com/otherpath',function(err,cookies) { @@ -31,8 +36,8 @@ # API -cookies -======= +tough +===== Functions on the module you get from `require('tough-cookie')`. All can be used as pure functions and don't need to be "bound". @@ -124,7 +129,7 @@ ``` javascript if (res.headers['set-cookie'] instanceof Array) - cookies = res.headers['set-cookie'].map(Cookie.parse); + cookies = res.headers['set-cookie'].map(function (c) { return (Cookie.parse(c)); }); else cookies = [Cookie.parse(res.headers['set-cookie'])]; ``` @@ -146,7 +151,7 @@ * _secure_ - boolean - the `Secure` cookie flag * _httpOnly_ - boolean - the `HttpOnly` cookie flag * _extensions_ - `Array` - any unrecognized cookie attributes as strings (even if equal-signs inside) - + After a cookie has been passed through `CookieJar.setCookie()` it will have the following additional attributes: * _hostOnly_ - boolean - is this a host-only cookie (i.e. no Domain field was set, but was instead implied) @@ -236,7 +241,7 @@ ---------- * _rejectPublicSuffixes_ - boolean - reject cookies with domains like "com" and "co.uk" (default: `true`) - + Since eventually this module would like to support database/remote/etc. CookieJars, continuation passing style is used for CookieJar methods. .setCookie(cookieOrString, currentUrl, [{options},] cb(err,cookie)) @@ -254,11 +259,16 @@ As per the RFC, the `.hostOnly` property is set if there was no "Domain=" parameter in the cookie string (or `.domain` was null on the Cookie object). The `.domain` property is set to the fully-qualified hostname of `currentUrl` in this case. Matching this cookie requires an exact hostname match (not a `domainMatch` as per usual). +.setCookieSync(cookieOrString, currentUrl, [{options}]) +------------------------------------------------------- + +Synchronous version of `setCookie`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). + .storeCookie(cookie, [{options},] cb(err,cookie)) ------------------------------------------------- __REMOVED__ removed in lieu of the CookieStore API below - + .getCookies(currentUrl, [{options},] cb(err,cookies)) ----------------------------------------------------- @@ -276,20 +286,42 @@ The `.lastAccessed` property of the returned cookies will have been updated. +.getCookiesSync(currentUrl, [{options}]) +---------------------------------------- + +Synchronous version of `getCookies`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). + .getCookieString(...) --------------------- Accepts the same options as `.getCookies()` but passes a string suitable for a Cookie header rather than an array to the callback. Simply maps the `Cookie` array via `.cookieString()`. +.getCookieStringSync(...) +------------------------- + +Synchronous version of `getCookieString`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). + .getSetCookieStrings(...) ------------------------- -Accepts the same options as `.getCookies()` but passes an array of strings suitable for Set-Cookie headers (rather than an array of `Cookie`s) to the callback. Simply maps the cookie array via `.toString()`. +Returns an array of strings suitable for **Set-Cookie** headers. Accepts the same options as `.getCookies()`. Simply maps the cookie array via `.toString()`. + +.getSetCookieStringsSync(...) +----------------------------- + +Synchronous version of `getSetCookieStrings`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). + +Store +===== + +Base class for CookieJar stores. # CookieStore API The storage model for each `CookieJar` instance can be replaced with a custom implementation. The default is `MemoryCookieStore` which can be found in the `lib/memstore.js` file. The API uses continuation-passing-style to allow for asynchronous stores. +Stores should inherit from the base `Store` class, which is available as `require('tough-cookie').Store`. Stores are asynchronous by default, but if `store.synchronous` is set, then the `*Sync` methods on the CookieJar can be used. + All `domain` parameters will have been normalized before calling. The Cookie store must have all of the following methods. @@ -358,7 +390,7 @@ (tl;dr: MIT with some MPL/1.1) -Copyright GoInstant, Inc. and other contributors. All rights reserved. +Copyright 2012- GoInstant, Inc. and other contributors. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/test.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/test.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/test.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/test.js 2015-01-20 21:22:17.000000000 +0000 @@ -18,7 +18,7 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ - +'use strict'; var vows = require('vows'); var assert = require('assert'); var async = require('async'); @@ -31,7 +31,7 @@ function dateVows(table) { var theVows = { }; - var keys = Object.keys(table).forEach(function(date) { + Object.keys(table).forEach(function(date) { var expect = table[date]; theVows[date] = function() { var got = tough.parseDate(date) ? 'valid' : 'invalid'; @@ -69,7 +69,7 @@ } var atNow = Date.now(); -function at(offset) { return {now: new Date(atNow+offset)} } +function at(offset) { return {now: new Date(atNow+offset)}; } vows.describe('Cookie Jar') .addBatch({ @@ -135,8 +135,8 @@ "doesn't validate": function(c) { assert.ok(!c.validate()); }, - "to string": function(c) { - assert.equal(c.toString(), 'a="beta gamma"'); + "'garbage in, garbage out'": function(c) { + assert.equal(c.toString(), 'a=beta gamma'); }, }, "with an empty value and HttpOnly": { @@ -277,7 +277,7 @@ assert.ok(c.isPersistent()); }, "default TTL": { - topic: function() { return new Cookie() }, + topic: function() { return new Cookie(); }, "is Infinite-future": function(c) { assert.equal(c.TTL(), Infinity) }, "is a 'session' cookie": function(c) { assert.ok(!c.isPersistent()) }, }, @@ -507,7 +507,64 @@ assert.equal(c.expires.getTime(), 1397700749000); }, "httponly": function(c) { assert.ok(c.httpOnly) }, - } + }, + "spaces in value": { + "strict": { + topic: function() { + return Cookie.parse('a=one two three',true) || null; + }, + "did not parse": function(c) { assert.isNull(c) }, + }, + "non-strict": { + topic: function() { + return Cookie.parse('a=one two three',false) || null; + }, + "parsed": function(c) { assert.ok(c) }, + "key": function(c) { assert.equal(c.key, 'a') }, + "value": function(c) { assert.equal(c.value, 'one two three') }, + "no path": function(c) { assert.equal(c.path, null) }, + "no domain": function(c) { assert.equal(c.domain, null) }, + "no extensions": function(c) { assert.ok(!c.extensions) }, + }, + }, + "quoted spaces in value": { + "strict": { + topic: function() { + return Cookie.parse('a="one two three"',true) || null; + }, + "did not parse": function(c) { assert.isNull(c) }, + }, + "non-strict": { + topic: function() { + return Cookie.parse('a="one two three"',false) || null; + }, + "parsed": function(c) { assert.ok(c) }, + "key": function(c) { assert.equal(c.key, 'a') }, + "value": function(c) { assert.equal(c.value, 'one two three') }, + "no path": function(c) { assert.equal(c.path, null) }, + "no domain": function(c) { assert.equal(c.domain, null) }, + "no extensions": function(c) { assert.ok(!c.extensions) }, + } + }, + "non-ASCII in value": { + "strict": { + topic: function() { + return Cookie.parse('farbe=weiß',true) || null; + }, + "did not parse": function(c) { assert.isNull(c) }, + }, + "non-strict": { + topic: function() { + return Cookie.parse('farbe=weiß',false) || null; + }, + "parsed": function(c) { assert.ok(c) }, + "key": function(c) { assert.equal(c.key, 'farbe') }, + "value": function(c) { assert.equal(c.value, 'weiß') }, + "no path": function(c) { assert.equal(c.path, null) }, + "no domain": function(c) { assert.equal(c.domain, null) }, + "no extensions": function(c) { assert.ok(!c.extensions) }, + }, + }, } }) .addBatch({ @@ -706,6 +763,20 @@ assert.equal(c.domain, 'example.com'); }, }, + "Setting a sub-path cookie on a super-domain": { + topic: function() { + var cj = new CookieJar(); + var c = Cookie.parse("a=b; Domain=example.com; Path=/subpath"); + assert.strictEqual(c.hostOnly, null); + assert.instanceOf(c.creation, Date); + assert.strictEqual(c.lastAccessed, null); + c.creation = new Date(Date.now()-10000); + cj.setCookie(c, 'http://www.example.com/index.html', this.callback); + }, + "domain is super-domain": function(c) { assert.equal(c.domain, 'example.com') }, + "path is /subpath": function(c) { assert.equal(c.path, '/subpath') }, + "path was NOT derived": function(c) { assert.strictEqual(c.pathIsDefault, null) }, + }, "Setting HttpOnly cookie over non-HTTP API": { topic: function() { var cj = new CookieJar(); @@ -755,10 +826,13 @@ }); }, "setup ok": function(err,cj,results) { - assert.ok(1); + assert.ok(!err); + assert.ok(cj); + assert.ok(results); }, "then retrieving for http://nodejs.org": { - topic: function(cj,results) { + topic: function(cj,oldResults) { + assert.ok(oldResults); cj.getCookies('http://nodejs.org',this.callback); }, "get a nodejs cookie": function(cookies) { @@ -768,7 +842,8 @@ }, }, "then retrieving for https://example.com": { - topic: function(cj,results) { + topic: function(cj,oldResults) { + assert.ok(oldResults); cj.getCookies('https://example.com',{secure:true},this.callback); }, "get a secure example cookie with others": function(cookies) { @@ -777,7 +852,8 @@ }, }, "then retrieving for https://example.com (missing options)": { - topic: function(cj,results) { + topic: function(cj,oldResults) { + assert.ok(oldResults); cj.getCookies('https://example.com',this.callback); }, "get a secure example cookie with others": function(cookies) { @@ -786,7 +862,8 @@ }, }, "then retrieving for http://example.com": { - topic: function(cj,results) { + topic: function(cj,oldResults) { + assert.ok(oldResults); cj.getCookies('http://example.com',this.callback); }, "get a bunch of cookies": function(cookies) { @@ -795,7 +872,8 @@ }, }, "then retrieving for http://EXAMPlE.com": { - topic: function(cj,results) { + topic: function(cj,oldResults) { + assert.ok(oldResults); cj.getCookies('http://EXAMPlE.com',this.callback); }, "get a bunch of cookies": function(cookies) { @@ -804,7 +882,8 @@ }, }, "then retrieving for http://example.com, non-HTTP": { - topic: function(cj,results) { + topic: function(cj,oldResults) { + assert.ok(oldResults); cj.getCookies('http://example.com',{http:false},this.callback); }, "get a bunch of cookies": function(cookies) { @@ -813,7 +892,8 @@ }, }, "then retrieving for http://example.com/foo/bar": { - topic: function(cj,results) { + topic: function(cj,oldResults) { + assert.ok(oldResults); cj.getCookies('http://example.com/foo/bar',this.callback); }, "get a bunch of cookies": function(cookies) { @@ -822,7 +902,8 @@ }, }, "then retrieving for http://example.com as a string": { - topic: function(cj,results) { + topic: function(cj,oldResults) { + assert.ok(oldResults); cj.getCookieString('http://example.com',this.callback); }, "get a single string": function(cookieHeader) { @@ -830,7 +911,8 @@ }, }, "then retrieving for http://example.com as a set-cookie header": { - topic: function(cj,results) { + topic: function(cj,oldResults) { + assert.ok(oldResults); cj.getSetCookieStrings('http://example.com',this.callback); }, "get a single string": function(cookieHeaders) { @@ -841,7 +923,8 @@ }, }, "then retrieving for http://www.example.com/": { - topic: function(cj,results) { + topic: function(cj,oldResults) { + assert.ok(oldResults); cj.getCookies('http://www.example.com/foo/bar',this.callback); }, "get a bunch of cookies": function(cookies) { @@ -909,6 +992,7 @@ topic: function() { var cb = this.callback; var next = function (err,c) { + c = null; return cb(err,cj); }; var cj = new CookieJar(); @@ -916,11 +1000,16 @@ }, "initial cookie is set": function(err,cj) { assert.ok(!err); + assert.ok(cj); }, "but when trying to overwrite": { topic: function(cj) { var cb = this.callback; - cj.setCookie('k=12; Domain=example.ca; Path=/','http://example.ca',{http:false},function(err,c) {cb(null,err)}); + var next = function(err,c) { + c = null; + cb(null,err); + }; + cj.setCookie('k=12; Domain=example.ca; Path=/','http://example.ca',{http:false},next); }, "it's an error": function(err) { assert.ok(err); @@ -1337,4 +1426,200 @@ }, } }) +.addBatch({ + "remove cookies": { + topic: function() { + var jar = new CookieJar(); + var cookie = Cookie.parse("a=b; Domain=example.com; Path=/"); + var cookie2 = Cookie.parse("a=b; Domain=foo.com; Path=/"); + var cookie3 = Cookie.parse("foo=bar; Domain=foo.com; Path=/"); + jar.setCookie(cookie, 'http://example.com/index.html', function(){}); + jar.setCookie(cookie2, 'http://foo.com/index.html', function(){}); + jar.setCookie(cookie3, 'http://foo.com/index.html', function(){}); + return jar; + }, + "all from matching domain": function(jar){ + jar.store.removeCookies('example.com',null, function(err) { + assert(err == null); + + jar.store.findCookies('example.com', null, function(err, cookies){ + assert(err == null); + assert(cookies != null); + assert(cookies.length === 0, 'cookie was not removed'); + }); + + jar.store.findCookies('foo.com', null, function(err, cookies){ + assert(err == null); + assert(cookies != null); + assert(cookies.length === 2, 'cookies should not have been removed'); + }); + }); + }, + "from cookie store matching domain and key": function(jar){ + jar.store.removeCookie('foo.com', '/', 'foo', function(err) { + assert(err == null); + + jar.store.findCookies('foo.com', null, function(err, cookies){ + assert(err == null); + assert(cookies != null); + assert(cookies.length === 1, 'cookie was not removed correctly'); + assert(cookies[0].key === 'a', 'wrong cookie was removed'); + }); + }); + } + } +}) +.addBatch({ + "Synchronous CookieJar": { + "setCookieSync": { + topic: function() { + var jar = new CookieJar(); + var cookie = Cookie.parse("a=b; Domain=example.com; Path=/"); + cookie = jar.setCookieSync(cookie, 'http://example.com/index.html'); + return cookie; + }, + "returns a copy of the cookie": function(cookie) { + assert.instanceOf(cookie, Cookie); + } + }, + + "setCookieSync strict parse error": { + topic: function() { + var jar = new CookieJar(); + var opts = { strict: true }; + try { + jar.setCookieSync("farbe=weiß", 'http://example.com/index.html', opts); + return false; + } catch (e) { + return e; + } + }, + "throws the error": function(err) { + assert.instanceOf(err, Error); + assert.equal(err.message, "Cookie failed to parse"); + } + }, + + "getCookiesSync": { + topic: function() { + var jar = new CookieJar(); + var url = 'http://example.com/index.html'; + jar.setCookieSync("a=b; Domain=example.com; Path=/", url); + jar.setCookieSync("c=d; Domain=example.com; Path=/", url); + return jar.getCookiesSync(url); + }, + "returns the cookie array": function(err, cookies) { + assert.ok(!err); + assert.ok(Array.isArray(cookies)); + assert.lengthOf(cookies, 2); + cookies.forEach(function(cookie) { + assert.instanceOf(cookie, Cookie); + }); + } + }, + + "getCookieStringSync": { + topic: function() { + var jar = new CookieJar(); + var url = 'http://example.com/index.html'; + jar.setCookieSync("a=b; Domain=example.com; Path=/", url); + jar.setCookieSync("c=d; Domain=example.com; Path=/", url); + return jar.getCookieStringSync(url); + }, + "returns the cookie header string": function(err, str) { + assert.ok(!err); + assert.typeOf(str, 'string'); + } + }, + + "getSetCookieStringsSync": { + topic: function() { + var jar = new CookieJar(); + var url = 'http://example.com/index.html'; + jar.setCookieSync("a=b; Domain=example.com; Path=/", url); + jar.setCookieSync("c=d; Domain=example.com; Path=/", url); + return jar.getSetCookieStringsSync(url); + }, + "returns the cookie header string": function(err, headers) { + assert.ok(!err); + assert.ok(Array.isArray(headers)); + assert.lengthOf(headers, 2); + headers.forEach(function(header) { + assert.typeOf(header, 'string'); + }); + } + }, + } +}) +.addBatch({ + "Synchronous API on async CookieJar": { + topic: function() { + return new tough.Store(); + }, + "setCookieSync": { + topic: function(store) { + var jar = new CookieJar(store); + try { + jar.setCookieSync("a=b", 'http://example.com/index.html'); + return false; + } catch(e) { + return e; + } + }, + "fails": function(err) { + assert.instanceOf(err, Error); + assert.equal(err.message, + 'CookieJar store is not synchronous; use async API instead.'); + } + }, + "getCookiesSync": { + topic: function(store) { + var jar = new CookieJar(store); + try { + jar.getCookiesSync('http://example.com/index.html'); + return false; + } catch(e) { + return e; + } + }, + "fails": function(err) { + assert.instanceOf(err, Error); + assert.equal(err.message, + 'CookieJar store is not synchronous; use async API instead.'); + } + }, + "getCookieStringSync": { + topic: function(store) { + var jar = new CookieJar(store); + try { + jar.getCookieStringSync('http://example.com/index.html'); + return false; + } catch(e) { + return e; + } + }, + "fails": function(err) { + assert.instanceOf(err, Error); + assert.equal(err.message, + 'CookieJar store is not synchronous; use async API instead.'); + } + }, + "getSetCookieStringsSync": { + topic: function(store) { + var jar = new CookieJar(store); + try { + jar.getSetCookieStringsSync('http://example.com/index.html'); + return false; + } catch(e) { + return e; + } + }, + "fails": function(err) { + assert.instanceOf(err, Error); + assert.equal(err.message, + 'CookieJar store is not synchronous; use async API instead.'); + } + }, + } +}) .export(module); diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/.travis.yml nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/.travis.yml --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tough-cookie/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tough-cookie/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +language: node_js +node_js: +- "0.10" +- "0.11" +matrix: + fast_finish: true + allow_failures: + - node_js: 0.11 diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tunnel-agent/index.js nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tunnel-agent/index.js --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tunnel-agent/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tunnel-agent/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -67,9 +67,18 @@ } util.inherits(TunnelingAgent, events.EventEmitter) -TunnelingAgent.prototype.addRequest = function addRequest(req, host, port) { +TunnelingAgent.prototype.addRequest = function addRequest(req, options) { var self = this + // Legacy API: addRequest(req, host, port, path) + if (typeof options === 'string') { + options = { + host: options, + port: arguments[2], + path: arguments[3] + }; + } + if (self.sockets.length >= this.maxSockets) { // We are over limit so we'll add it to the queue. self.requests.push({host: host, port: port, request: req}) @@ -77,14 +86,14 @@ } // If we are under maxSockets create a new one. - self.createSocket({host: host, port: port, request: req}, function(socket) { + self.createSocket({host: options.host, port: options.port, request: req}, function(socket) { socket.on('free', onFree) socket.on('close', onCloseOrRemove) socket.on('agentRemove', onCloseOrRemove) req.onSocket(socket) function onFree() { - self.emit('free', socket, host, port) + self.emit('free', socket, options.host, options.port) } function onCloseOrRemove(err) { diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tunnel-agent/.jshintrc nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tunnel-agent/.jshintrc --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tunnel-agent/.jshintrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tunnel-agent/.jshintrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "node": true, + "asi": true, + "laxcomma": true +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tunnel-agent/package.json nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tunnel-agent/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/node_modules/tunnel-agent/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/node_modules/tunnel-agent/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "tunnel-agent", "description": "HTTP proxy tunneling agent. Formerly part of mikeal/request, now a standalone module.", - "version": "0.3.0", + "version": "0.4.0", "repository": { "url": "https://github.com/mikeal/tunnel-agent" }, @@ -17,12 +17,30 @@ "engines": { "node": "*" }, - "readme": "tunnel-agent\n============\n\nHTTP proxy tunneling agent. Formerly part of mikeal/request, now a standalone module.\n", - "readmeFilename": "README.md", "bugs": { "url": "https://github.com/mikeal/tunnel-agent/issues" }, "homepage": "https://github.com/mikeal/tunnel-agent", - "_id": "tunnel-agent@0.3.0", - "_from": "tunnel-agent@~0.3.0" + "_id": "tunnel-agent@0.4.0", + "dist": { + "shasum": "b1184e312ffbcf70b3b4c78e8c219de7ebb1c550", + "tarball": "http://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.4.0.tgz" + }, + "_from": "tunnel-agent@>=0.4.0 <0.5.0", + "_npmVersion": "1.3.21", + "_npmUser": { + "name": "mikeal", + "email": "mikeal.rogers@gmail.com" + }, + "maintainers": [ + { + "name": "mikeal", + "email": "mikeal.rogers@gmail.com" + } + ], + "directories": {}, + "_shasum": "b1184e312ffbcf70b3b4c78e8c219de7ebb1c550", + "_resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.4.0.tgz", + "readme": "ERROR: No README data found!", + "scripts": {} } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/package.json nodejs-0.11.15/deps/npm/node_modules/request/package.json --- nodejs-0.11.13/deps/npm/node_modules/request/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -7,51 +7,75 @@ "util", "utility" ], - "version": "2.30.0", + "version": "2.46.0", "author": { "name": "Mikeal Rogers", "email": "mikeal.rogers@gmail.com" }, "repository": { "type": "git", - "url": "http://github.com/mikeal/request.git" + "url": "https://github.com/mikeal/request.git" }, "bugs": { "url": "http://github.com/mikeal/request/issues" }, - "engines": [ - "node >= 0.8.0" - ], + "license": "Apache-2.0", + "engines": { + "node": ">=0.8.0" + }, "main": "index.js", "dependencies": { - "qs": "~0.6.0", - "json-stringify-safe": "~5.0.0", + "bl": "~0.9.0", + "caseless": "~0.6.0", "forever-agent": "~0.5.0", - "node-uuid": "~1.4.0", - "mime": "~1.2.9", - "tough-cookie": "~0.9.15", "form-data": "~0.1.0", - "tunnel-agent": "~0.3.0", - "http-signature": "~0.10.0", - "oauth-sign": "~0.3.0", - "hawk": "~1.0.0", - "aws-sign2": "~0.5.0" - }, - "optionalDependencies": { - "tough-cookie": "~0.9.15", - "form-data": "~0.1.0", - "tunnel-agent": "~0.3.0", + "json-stringify-safe": "~5.0.0", + "mime-types": "~1.0.1", + "node-uuid": "~1.4.0", + "qs": "~1.2.0", + "tunnel-agent": "~0.4.0", + "tough-cookie": ">=0.12.0", "http-signature": "~0.10.0", - "oauth-sign": "~0.3.0", - "hawk": "~1.0.0", - "aws-sign2": "~0.5.0" + "oauth-sign": "~0.4.0", + "hawk": "1.1.1", + "aws-sign2": "~0.5.0", + "stringstream": "~0.0.4" }, "scripts": { - "test": "node tests/run.js" + "test": "npm run lint && node node_modules/.bin/taper tests/test-*.js", + "lint": "node node_modules/.bin/eslint lib/ *.js tests/ && echo Lint passed." }, - "readme": "# Request -- Simplified HTTP client\n\n[![NPM](https://nodei.co/npm/request.png)](https://nodei.co/npm/request/)\n\n## Super simple to use\n\nRequest is designed to be the simplest way possible to make http calls. It supports HTTPS and follows redirects by default.\n\n```javascript\nvar request = require('request');\nrequest('http://www.google.com', function (error, response, body) {\n if (!error && response.statusCode == 200) {\n console.log(body) // Print the google web page.\n }\n})\n```\n\n## Streaming\n\nYou can stream any response to a file stream.\n\n```javascript\nrequest('http://google.com/doodle.png').pipe(fs.createWriteStream('doodle.png'))\n```\n\nYou can also stream a file to a PUT or POST request. This method will also check the file extension against a mapping of file extensions to content-types (in this case `application/json`) and use the proper `content-type` in the PUT request (if the headers don’t already provide one).\n\n```javascript\nfs.createReadStream('file.json').pipe(request.put('http://mysite.com/obj.json'))\n```\n\nRequest can also `pipe` to itself. When doing so, `content-type` and `content-length` are preserved in the PUT headers.\n\n```javascript\nrequest.get('http://google.com/img.png').pipe(request.put('http://mysite.com/img.png'))\n```\n\nNow let’s get fancy.\n\n```javascript\nhttp.createServer(function (req, resp) {\n if (req.url === '/doodle.png') {\n if (req.method === 'PUT') {\n req.pipe(request.put('http://mysite.com/doodle.png'))\n } else if (req.method === 'GET' || req.method === 'HEAD') {\n request.get('http://mysite.com/doodle.png').pipe(resp)\n }\n }\n})\n```\n\nYou can also `pipe()` from `http.ServerRequest` instances, as well as to `http.ServerResponse` instances. The HTTP method, headers, and entity-body data will be sent. Which means that, if you don't really care about security, you can do:\n\n```javascript\nhttp.createServer(function (req, resp) {\n if (req.url === '/doodle.png') {\n var x = request('http://mysite.com/doodle.png')\n req.pipe(x)\n x.pipe(resp)\n }\n})\n```\n\nAnd since `pipe()` returns the destination stream in ≥ Node 0.5.x you can do one line proxying. :)\n\n```javascript\nreq.pipe(request('http://mysite.com/doodle.png')).pipe(resp)\n```\n\nAlso, none of this new functionality conflicts with requests previous features, it just expands them.\n\n```javascript\nvar r = request.defaults({'proxy':'http://localproxy.com'})\n\nhttp.createServer(function (req, resp) {\n if (req.url === '/doodle.png') {\n r.get('http://google.com/doodle.png').pipe(resp)\n }\n})\n```\n\nYou can still use intermediate proxies, the requests will still follow HTTP forwards, etc.\n\n## Forms\n\n`request` supports `application/x-www-form-urlencoded` and `multipart/form-data` form uploads. For `multipart/related` refer to the `multipart` API.\n\nURL-encoded forms are simple.\n\n```javascript\nrequest.post('http://service.com/upload', {form:{key:'value'}})\n// or\nrequest.post('http://service.com/upload').form({key:'value'})\n```\n\nFor `multipart/form-data` we use the [form-data](https://github.com/felixge/node-form-data) library by [@felixge](https://github.com/felixge). You don’t need to worry about piping the form object or setting the headers, `request` will handle that for you.\n\n```javascript\nvar r = request.post('http://service.com/upload')\nvar form = r.form()\nform.append('my_field', 'my_value')\nform.append('my_buffer', new Buffer([1, 2, 3]))\nform.append('my_file', fs.createReadStream(path.join(__dirname, 'doodle.png'))\nform.append('remote_file', request('http://google.com/doodle.png'))\n```\n\n## HTTP Authentication\n\n```javascript\nrequest.get('http://some.server.com/').auth('username', 'password', false);\n// or\nrequest.get('http://some.server.com/', {\n 'auth': {\n 'user': 'username',\n 'pass': 'password',\n 'sendImmediately': false\n }\n});\n```\n\nIf passed as an option, `auth` should be a hash containing values `user` || `username`, `password` || `pass`, and `sendImmediately` (optional). The method form takes parameters `auth(username, password, sendImmediately)`.\n\n`sendImmediately` defaults to `true`, which causes a basic authentication header to be sent. If `sendImmediately` is `false`, then `request` will retry with a proper authentication header after receiving a `401` response from the server (which must contain a `WWW-Authenticate` header indicating the required authentication method).\n\nDigest authentication is supported, but it only works with `sendImmediately` set to `false`; otherwise `request` will send basic authentication on the initial request, which will probably cause the request to fail.\n\n## OAuth Signing\n\n```javascript\n// Twitter OAuth\nvar qs = require('querystring')\n , oauth =\n { callback: 'http://mysite.com/callback/'\n , consumer_key: CONSUMER_KEY\n , consumer_secret: CONSUMER_SECRET\n }\n , url = 'https://api.twitter.com/oauth/request_token'\n ;\nrequest.post({url:url, oauth:oauth}, function (e, r, body) {\n // Ideally, you would take the body in the response\n // and construct a URL that a user clicks on (like a sign in button).\n // The verifier is only available in the response after a user has\n // verified with twitter that they are authorizing your app.\n var access_token = qs.parse(body)\n , oauth =\n { consumer_key: CONSUMER_KEY\n , consumer_secret: CONSUMER_SECRET\n , token: access_token.oauth_token\n , verifier: access_token.oauth_verifier\n }\n , url = 'https://api.twitter.com/oauth/access_token'\n ;\n request.post({url:url, oauth:oauth}, function (e, r, body) {\n var perm_token = qs.parse(body)\n , oauth =\n { consumer_key: CONSUMER_KEY\n , consumer_secret: CONSUMER_SECRET\n , token: perm_token.oauth_token\n , token_secret: perm_token.oauth_token_secret\n }\n , url = 'https://api.twitter.com/1/users/show.json?'\n , params =\n { screen_name: perm_token.screen_name\n , user_id: perm_token.user_id\n }\n ;\n url += qs.stringify(params)\n request.get({url:url, oauth:oauth, json:true}, function (e, r, user) {\n console.log(user)\n })\n })\n})\n```\n\n### Custom HTTP Headers\n\nHTTP Headers, such as `User-Agent`, can be set in the `options` object.\nIn the example below, we call the github API to find out the number\nof stars and forks for the request repository. This requires a\ncustom `User-Agent` header as well as https.\n\n```\nvar request = require('request');\n\nvar options = {\n\turl: 'https://api.github.com/repos/mikeal/request',\n\theaders: {\n\t\t'User-Agent': 'request'\n\t}\n};\n\nfunction callback(error, response, body) {\n\tif (!error && response.statusCode == 200) {\n\t\tvar info = JSON.parse(body);\n\t\tconsole.log(info.stargazers_count + \" Stars\");\n\t\tconsole.log(info.forks_count + \" Forks\");\n\t}\n}\n\nrequest(options, callback);\n```\n\n### request(options, callback)\n\nThe first argument can be either a `url` or an `options` object. The only required option is `uri`; all others are optional.\n\n* `uri` || `url` - fully qualified uri or a parsed url object from `url.parse()`\n* `qs` - object containing querystring values to be appended to the `uri`\n* `method` - http method (default: `\"GET\"`)\n* `headers` - http headers (default: `{}`)\n* `body` - entity body for PATCH, POST and PUT requests. Must be a `Buffer` or `String`.\n* `form` - when passed an object, this sets `body` to a querystring representation of value, and adds `Content-type: application/x-www-form-urlencoded; charset=utf-8` header. When passed no options, a `FormData` instance is returned (and is piped to request).\n* `auth` - A hash containing values `user` || `username`, `password` || `pass`, and `sendImmediately` (optional). See documentation above.\n* `json` - sets `body` but to JSON representation of value and adds `Content-type: application/json` header. Additionally, parses the response body as JSON.\n* `multipart` - (experimental) array of objects which contains their own headers and `body` attribute. Sends `multipart/related` request. See example below.\n* `followRedirect` - follow HTTP 3xx responses as redirects (default: `true`)\n* `followAllRedirects` - follow non-GET HTTP 3xx responses as redirects (default: `false`)\n* `maxRedirects` - the maximum number of redirects to follow (default: `10`)\n* `encoding` - Encoding to be used on `setEncoding` of response data. If `null`, the `body` is returned as a `Buffer`.\n* `pool` - A hash object containing the agents for these requests. If omitted, the request will use the global pool (which is set to node's default `maxSockets`)\n* `pool.maxSockets` - Integer containing the maximum amount of sockets in the pool.\n* `timeout` - Integer containing the number of milliseconds to wait for a request to respond before aborting the request\n* `proxy` - An HTTP proxy to be used. Supports proxy Auth with Basic Auth, identical to support for the `url` parameter (by embedding the auth info in the `uri`)\n* `oauth` - Options for OAuth HMAC-SHA1 signing. See documentation above.\n* `hawk` - Options for [Hawk signing](https://github.com/hueniverse/hawk). The `credentials` key must contain the necessary signing info, [see hawk docs for details](https://github.com/hueniverse/hawk#usage-example).\n* `strictSSL` - If `true`, requires SSL certificates be valid. **Note:** to use your own certificate authority, you need to specify an agent that was created with that CA as an option.\n* `jar` - If `true`, remember cookies for future use (or define your custom cookie jar; see examples section)\n* `aws` - `object` containing AWS signing information. Should have the properties `key`, `secret`. Also requires the property `bucket`, unless you’re specifying your `bucket` as part of the path, or the request doesn’t use a bucket (i.e. GET Services)\n* `httpSignature` - Options for the [HTTP Signature Scheme](https://github.com/joyent/node-http-signature/blob/master/http_signing.md) using [Joyent's library](https://github.com/joyent/node-http-signature). The `keyId` and `key` properties must be specified. See the docs for other options.\n* `localAddress` - Local interface to bind for network connections.\n\n\nThe callback argument gets 3 arguments: \n\n1. An `error` when applicable (usually from the `http.Client` option, not the `http.ClientRequest` object)\n2. An `http.ClientResponse` object\n3. The third is the `response` body (`String` or `Buffer`)\n\n## Convenience methods\n\nThere are also shorthand methods for different HTTP METHODs and some other conveniences.\n\n### request.defaults(options)\n\nThis method returns a wrapper around the normal request API that defaults to whatever options you pass in to it.\n\n### request.put\n\nSame as `request()`, but defaults to `method: \"PUT\"`.\n\n```javascript\nrequest.put(url)\n```\n\n### request.patch\n\nSame as `request()`, but defaults to `method: \"PATCH\"`.\n\n```javascript\nrequest.patch(url)\n```\n\n### request.post\n\nSame as `request()`, but defaults to `method: \"POST\"`.\n\n```javascript\nrequest.post(url)\n```\n\n### request.head\n\nSame as request() but defaults to `method: \"HEAD\"`.\n\n```javascript\nrequest.head(url)\n```\n\n### request.del\n\nSame as `request()`, but defaults to `method: \"DELETE\"`.\n\n```javascript\nrequest.del(url)\n```\n\n### request.get\n\nSame as `request()` (for uniformity).\n\n```javascript\nrequest.get(url)\n```\n### request.cookie\n\nFunction that creates a new cookie.\n\n```javascript\nrequest.cookie('cookie_string_here')\n```\n### request.jar\n\nFunction that creates a new cookie jar.\n\n```javascript\nrequest.jar()\n```\n\n\n## Examples:\n\n```javascript\n var request = require('request')\n , rand = Math.floor(Math.random()*100000000).toString()\n ;\n request(\n { method: 'PUT'\n , uri: 'http://mikeal.iriscouch.com/testjs/' + rand\n , multipart:\n [ { 'content-type': 'application/json'\n , body: JSON.stringify({foo: 'bar', _attachments: {'message.txt': {follows: true, length: 18, 'content_type': 'text/plain' }}})\n }\n , { body: 'I am an attachment' }\n ]\n }\n , function (error, response, body) {\n if(response.statusCode == 201){\n console.log('document saved as: http://mikeal.iriscouch.com/testjs/'+ rand)\n } else {\n console.log('error: '+ response.statusCode)\n console.log(body)\n }\n }\n )\n```\n\nCookies are disabled by default (else, they would be used in subsequent requests). To enable cookies, set `jar` to `true` (either in `defaults` or `options`).\n\n```javascript\nvar request = request.defaults({jar: true})\nrequest('http://www.google.com', function () {\n request('http://images.google.com')\n})\n```\n\nTo use a custom cookie jar (instead `request`’s global cookie jar), set `jar` to an instance of `request.jar()` (either in `defaults` or `options`)\n\n```javascript\nvar j = request.jar()\nvar request = request.defaults({jar:j})\nrequest('http://www.google.com', function () {\n request('http://images.google.com')\n})\n```\nOR\n\n```javascript\nvar j = request.jar()\nvar cookie = request.cookie('your_cookie_here')\nj.add(cookie)\nrequest({url: 'http://www.google.com', jar: j}, function () {\n request('http://images.google.com')\n})\n```\n", - "readmeFilename": "README.md", + "devDependencies": { + "eslint": "0.5.1", + "rimraf": "~2.2.8", + "tape": "~3.0.0", + "taper": "~0.3.0" + }, + "gitHead": "7cdd75ec184868bba3be88a780bfb6e10fe33be4", "homepage": "https://github.com/mikeal/request", - "_id": "request@2.30.0", - "_from": "request@latest" + "_id": "request@2.46.0", + "_shasum": "359195d52eaf720bc69742579d04ad6d265a8274", + "_from": "request@>=2.46.0 <2.47.0", + "_npmVersion": "1.4.14", + "_npmUser": { + "name": "nylen", + "email": "jnylen@gmail.com" + }, + "maintainers": [ + { + "name": "mikeal", + "email": "mikeal.rogers@gmail.com" + }, + { + "name": "nylen", + "email": "jnylen@gmail.com" + } + ], + "dist": { + "shasum": "359195d52eaf720bc69742579d04ad6d265a8274", + "tarball": "http://registry.npmjs.org/request/-/request-2.46.0.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/request/-/request-2.46.0.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/README.md nodejs-0.11.15/deps/npm/node_modules/request/README.md --- nodejs-0.11.13/deps/npm/node_modules/request/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,7 @@ -# Request -- Simplified HTTP client +# Request — Simplified HTTP client +[![Gitter](https://badges.gitter.im/Join Chat.svg)](https://gitter.im/mikeal/request?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![NPM](https://nodei.co/npm/request.png)](https://nodei.co/npm/request/) +[![NPM](https://nodei.co/npm/request.png?downloads=true&downloadRank=true&stars=true)](https://nodei.co/npm/request/) ## Super simple to use @@ -35,6 +36,18 @@ request.get('http://google.com/img.png').pipe(request.put('http://mysite.com/img.png')) ``` +Request emits a "response" event when a response is received. The `response` argument will be an instance of [http.IncomingMessage](http://nodejs.org/api/http.html#http_http_incomingmessage). + +```javascript +request + .get('http://google.com/img.png') + .on('response', function(response) { + console.log(response.statusCode) // 200 + console.log(response.headers['content-type']) // 'image/png' + }) + .pipe(request.put('http://mysite.com/img.png')) +``` + Now let’s get fancy. ```javascript @@ -81,29 +94,216 @@ You can still use intermediate proxies, the requests will still follow HTTP forwards, etc. +## Proxies + +If you specify a `proxy` option, then the request (and any subsequent +redirects) will be sent via a connection to the proxy server. + +If your endpoint is an `https` url, and you are using a proxy, then +request will send a `CONNECT` request to the proxy server *first*, and +then use the supplied connection to connect to the endpoint. + +That is, first it will make a request like: + +``` +HTTP/1.1 CONNECT endpoint-server.com:80 +Host: proxy-server.com +User-Agent: whatever user agent you specify +``` + +and then the proxy server make a TCP connection to `endpoint-server` +on port `80`, and return a response that looks like: + +``` +HTTP/1.1 200 OK +``` + +At this point, the connection is left open, and the client is +communicating directly with the `endpoint-server.com` machine. + +See [the wikipedia page on HTTP Tunneling](http://en.wikipedia.org/wiki/HTTP_tunnel) +for more information. + +By default, when proxying `http` traffic, request will simply make a +standard proxied `http` request. This is done by making the `url` +section of the initial line of the request a fully qualified url to +the endpoint. + +For example, it will make a single request that looks like: + +``` +HTTP/1.1 GET http://endpoint-server.com/some-url +Host: proxy-server.com +Other-Headers: all go here + +request body or whatever +``` + +Because a pure "http over http" tunnel offers no additional security +or other features, it is generally simpler to go with a +straightforward HTTP proxy in this case. However, if you would like +to force a tunneling proxy, you may set the `tunnel` option to `true`. + +If you are using a tunneling proxy, you may set the +`proxyHeaderWhiteList` to share certain headers with the proxy. + +By default, this set is: + +``` +accept +accept-charset +accept-encoding +accept-language +accept-ranges +cache-control +content-encoding +content-language +content-length +content-location +content-md5 +content-range +content-type +connection +date +expect +max-forwards +pragma +proxy-authorization +referer +te +transfer-encoding +user-agent +via +``` + +Note that, when using a tunneling proxy, the `proxy-authorization` +header is *never* sent to the endpoint server, but only to the proxy +server. All other headers are sent as-is over the established +connection. + +### Controlling proxy behaviour using environment variables + +The following environment variables are respected by `request`: + + * `HTTP_PROXY` / `http_proxy` + * `HTTPS_PROXY` / `https_proxy` + * `NO_PROXY` / `no_proxy` + +When `HTTP_PROXY` / `http_proxy` are set, they will be used to proxy non-SSL requests that do not have an explicit `proxy` configuration option present. Similarly, `HTTPS_PROXY` / `https_proxy` will be respected for SSL requests that do not have an explicit `proxy` configuration option. It is valid to define a proxy in one of the environment variables, but then override it for a specific request, using the `proxy` configuration option. Furthermore, the `proxy` configuration option can be explicitly set to false / null to opt out of proxying altogether for that request. + +`request` is also aware of the `NO_PROXY`/`no_proxy` environment variables. These variables provide a granular way to opt out of proxying, on a per-host basis. It should contain a comma separated list of hosts to opt out of proxying. It is also possible to opt of proxying when a particular destination port is used. Finally, the variable may be set to `*` to opt out of the implicit proxy configuration of the other environment variables. + +Here's some examples of valid `no_proxy` values: + + * `google.com` - don't proxy HTTP/HTTPS requests to Google. + * `google.com:443` - don't proxy HTTPS requests to Google, but *do* proxy HTTP requests to Google. + * `google.com:443, yahoo.com:80` - don't proxy HTTPS requests to Google, and don't proxy HTTP requests to Yahoo! + * `*` - ignore `https_proxy`/`http_proxy` environment variables altogether. + +## UNIX Socket + +`request` supports making requests to [UNIX Domain Sockets](http://en.wikipedia.org/wiki/Unix_domain_socket). To make one, use the following URL scheme: + +```javascript +/* Pattern */ 'http://unix:SOCKET:PATH' +/* Example */ request.get('http://unix:/absolute/path/to/unix.socket:/request/path') +``` + +Note: The `SOCKET` path is assumed to be absolute to the root of the host file system. + + ## Forms `request` supports `application/x-www-form-urlencoded` and `multipart/form-data` form uploads. For `multipart/related` refer to the `multipart` API. +#### application/x-www-form-urlencoded (URL-Encoded Forms) + URL-encoded forms are simple. ```javascript request.post('http://service.com/upload', {form:{key:'value'}}) // or request.post('http://service.com/upload').form({key:'value'}) +// or +request.post({url:'http://service.com/upload', form: {key:'value'}}, function(err,httpResponse,body){ /* ... */ }) +``` + +#### multipart/form-data (Multipart Form Uploads) + +For `multipart/form-data` we use the [form-data](https://github.com/felixge/node-form-data) library by [@felixge](https://github.com/felixge). For the most cases, you can pass your upload form data via the `formData` option. + + +```javascript +var formData = { + // Pass a simple key-value pair + my_field: 'my_value', + // Pass data via Buffers + my_buffer: new Buffer([1, 2, 3]), + // Pass data via Streams + my_file: fs.createReadStream(__dirname + '/unicycle.jpg'), + // Pass multiple values /w an Array + attachments: [ + fs.createReadStream(__dirname + '/attacment1.jpg') + fs.createReadStream(__dirname + '/attachment2.jpg') + ], + // Pass optional meta-data with an 'options' object with style: {value: DATA, options: OPTIONS} + // See the `form-data` README for more information about options: https://github.com/felixge/node-form-data + custom_file: { + value: fs.createReadStream('/dev/urandom'), + options: { + filename: 'topsecret.jpg', + contentType: 'image/jpg' + } + } +}; +request.post({url:'http://service.com/upload', formData: formData}, function optionalCallback(err, httpResponse, body) { + if (err) { + return console.error('upload failed:', err); + } + console.log('Upload successful! Server responded with:', body); +}); +``` + +For advanced cases, you can the form-data object itself via `r.form()`. This can be modified until the request is fired on the next cycle of the event-loop. (Note that this calling `form()` will clear the currently set form data for that request.) + +```javascript +// NOTE: Advanced use-case, for normal use see 'formData' usage above +var r = request.post('http://service.com/upload', function optionalCallback(err, httpResponse, body) { // ... + +var form = r.form(); +form.append('my_field', 'my_value'); +form.append('my_buffer', new Buffer([1, 2, 3])); +form.append('custom_file', fs.createReadStream(__dirname + '/unicycle.jpg'), {filename: 'unicycle.jpg'}); ``` +See the [form-data README](https://github.com/felixge/node-form-data) for more information & examples. -For `multipart/form-data` we use the [form-data](https://github.com/felixge/node-form-data) library by [@felixge](https://github.com/felixge). You don’t need to worry about piping the form object or setting the headers, `request` will handle that for you. +#### multipart/related + +Some variations in different HTTP implementations require a newline/CRLF before, after, or both before and after the boundary of a `multipart/related` request (using the multipart option). This has been observed in the .NET WebAPI version 4.0. You can turn on a boundary preambleCRLF or postamble by passing them as `true` to your request options. ```javascript -var r = request.post('http://service.com/upload') -var form = r.form() -form.append('my_field', 'my_value') -form.append('my_buffer', new Buffer([1, 2, 3])) -form.append('my_file', fs.createReadStream(path.join(__dirname, 'doodle.png')) -form.append('remote_file', request('http://google.com/doodle.png')) + request( + { method: 'PUT' + , preambleCRLF: true + , postambleCRLF: true + , uri: 'http://service.com/upload' + , multipart: + [ { 'content-type': 'application/json' + , body: JSON.stringify({foo: 'bar', _attachments: {'message.txt': {follows: true, length: 18, 'content_type': 'text/plain' }}}) + } + , { body: 'I am an attachment' } + ] + } + , function (error, response, body) { + if (err) { + return console.error('upload failed:', err); + } + console.log('Upload successful! Server responded with:', body); + } + ) ``` + ## HTTP Authentication ```javascript @@ -116,14 +316,37 @@ 'sendImmediately': false } }); +// or +request.get('http://some.server.com/').auth(null, null, true, 'bearerToken'); +// or +request.get('http://some.server.com/', { + 'auth': { + 'bearer': 'bearerToken' + } +}); ``` -If passed as an option, `auth` should be a hash containing values `user` || `username`, `password` || `pass`, and `sendImmediately` (optional). The method form takes parameters `auth(username, password, sendImmediately)`. +If passed as an option, `auth` should be a hash containing values `user` || `username`, `pass` || `password`, and `sendImmediately` (optional). The method form takes parameters `auth(username, password, sendImmediately)`. `sendImmediately` defaults to `true`, which causes a basic authentication header to be sent. If `sendImmediately` is `false`, then `request` will retry with a proper authentication header after receiving a `401` response from the server (which must contain a `WWW-Authenticate` header indicating the required authentication method). +Note that you can also use for basic authentication a trick using the URL itself, as specified in [RFC 1738](http://www.ietf.org/rfc/rfc1738.txt). +Simply pass the `user:password` before the host with an `@` sign. + +```javascript +var username = 'username', + password = 'password', + url = 'http://' + username + ':' + password + '@some.server.com'; + +request({url: url}, function (error, response, body) { + // Do more stuff with 'body' here +}); +``` + Digest authentication is supported, but it only works with `sendImmediately` set to `false`; otherwise `request` will send basic authentication on the initial request, which will probably cause the request to fail. +Bearer authentication is supported, and is activated when the `bearer` value is available. The value may be either a `String` or a `Function` returning a `String`. Using a function to supply the bearer token is particularly useful if used in conjuction with `defaults` to allow a single function to supply the last known token at the time or sending a request or to compute one on the fly. + ## OAuth Signing ```javascript @@ -158,7 +381,7 @@ , token: perm_token.oauth_token , token_secret: perm_token.oauth_token_secret } - , url = 'https://api.twitter.com/1/users/show.json?' + , url = 'https://api.twitter.com/1.1/users/show.json?' , params = { screen_name: perm_token.screen_name , user_id: perm_token.user_id @@ -172,14 +395,14 @@ }) ``` -### Custom HTTP Headers +## Custom HTTP Headers HTTP Headers, such as `User-Agent`, can be set in the `options` object. In the example below, we call the github API to find out the number of stars and forks for the request repository. This requires a custom `User-Agent` header as well as https. -``` +```javascript var request = require('request'); var options = { @@ -200,41 +423,99 @@ request(options, callback); ``` -### request(options, callback) +## TLS/SSL Protocol + +TLS/SSL Protocol options, such as `cert`, `key` and `passphrase`, can be +set in the `agentOptions` property of the `options` object. +In the example below, we call an API requires client side SSL certificate +(in PEM format) with passphrase protected private key (in PEM format) and disable the SSLv3 protocol: + +```javascript +var fs = require('fs') + , path = require('path') + , certFile = path.resolve(__dirname, 'ssl/client.crt') + , keyFile = path.resolve(__dirname, 'ssl/client.key') + , request = require('request'); + +var options = { + url: 'https://api.some-server.com/', + agentOptions: { + 'cert': fs.readFileSync(certFile), + 'key': fs.readFileSync(keyFile), + // Or use `pfx` property replacing `cert` and `key` when using private key, certificate and CA certs in PFX or PKCS12 format: + // 'pfx': fs.readFileSync(pfxFilePath), + 'passphrase': 'password', + 'securityOptions': 'SSL_OP_NO_SSLv3' + } +}; + +request.get(options); +``` + +It is able to force using SSLv3 only by specifying `secureProtocol`: + +```javascript + +request.get({ + url: 'https://api.some-server.com/', + agentOptions: { + 'secureProtocol': 'SSLv3_method' + } +}); +``` + +## request(options, callback) The first argument can be either a `url` or an `options` object. The only required option is `uri`; all others are optional. * `uri` || `url` - fully qualified uri or a parsed url object from `url.parse()` * `qs` - object containing querystring values to be appended to the `uri` +* `useQuerystring` - If true, use `querystring` to stringify and parse + querystrings, otherwise use `qs` (default: `false`). Set this option to + `true` if you need arrays to be serialized as `foo=bar&foo=baz` instead of the + default `foo[0]=bar&foo[1]=baz`. * `method` - http method (default: `"GET"`) * `headers` - http headers (default: `{}`) -* `body` - entity body for PATCH, POST and PUT requests. Must be a `Buffer` or `String`. -* `form` - when passed an object, this sets `body` to a querystring representation of value, and adds `Content-type: application/x-www-form-urlencoded; charset=utf-8` header. When passed no options, a `FormData` instance is returned (and is piped to request). -* `auth` - A hash containing values `user` || `username`, `password` || `pass`, and `sendImmediately` (optional). See documentation above. +* `body` - entity body for PATCH, POST and PUT requests. Must be a `Buffer` or `String`, unless `json` is `true`. If `json` is `true`, then `body` must be a JSON-serializable object. +* `form` - when passed an object or a querystring, this sets `body` to a querystring representation of value, and adds `Content-type: application/x-www-form-urlencoded` header. When passed no options, a `FormData` instance is returned (and is piped to request). See "Forms" section above. +* `formData` - Data to pass for a `multipart/form-data` request. See "Forms" section above. +* `multipart` - (experimental) Data to pass for a `multipart/related` request. See "Forms" section above +* `auth` - A hash containing values `user` || `username`, `pass` || `password`, and `sendImmediately` (optional). See documentation above. * `json` - sets `body` but to JSON representation of value and adds `Content-type: application/json` header. Additionally, parses the response body as JSON. * `multipart` - (experimental) array of objects which contains their own headers and `body` attribute. Sends `multipart/related` request. See example below. -* `followRedirect` - follow HTTP 3xx responses as redirects (default: `true`) +* `preambleCRLF` - append a newline/CRLF before the boundary of your `multipart/form-data` request. +* `postambleCRLF` - append a newline/CRLF at the end of the boundary of your `multipart/form-data` request. +* `followRedirect` - follow HTTP 3xx responses as redirects (default: `true`). This property can also be implemented as function which gets `response` object as a single argument and should return `true` if redirects should continue or `false` otherwise. * `followAllRedirects` - follow non-GET HTTP 3xx responses as redirects (default: `false`) * `maxRedirects` - the maximum number of redirects to follow (default: `10`) -* `encoding` - Encoding to be used on `setEncoding` of response data. If `null`, the `body` is returned as a `Buffer`. -* `pool` - A hash object containing the agents for these requests. If omitted, the request will use the global pool (which is set to node's default `maxSockets`) -* `pool.maxSockets` - Integer containing the maximum amount of sockets in the pool. +* `encoding` - Encoding to be used on `setEncoding` of response data. If `null`, the `body` is returned as a `Buffer`. Anything else **(including the default value of `undefined`)** will be passed as the [encoding](http://nodejs.org/api/buffer.html#buffer_buffer) parameter to `toString()` (meaning this is effectively `utf8` by default). +* `pool` - An object describing which agents to use for the request. If this option is omitted the request will use the global agent (as long as [your options allow for it](request.js#L747)). Otherwise, request will search the pool for your custom agent. If no custom agent is found, a new agent will be created and added to the pool. + * A `maxSockets` property can also be provided on the `pool` object to set the max number of sockets for all agents created (ex: `pool: {maxSockets: Infinity}`). * `timeout` - Integer containing the number of milliseconds to wait for a request to respond before aborting the request * `proxy` - An HTTP proxy to be used. Supports proxy Auth with Basic Auth, identical to support for the `url` parameter (by embedding the auth info in the `uri`) * `oauth` - Options for OAuth HMAC-SHA1 signing. See documentation above. * `hawk` - Options for [Hawk signing](https://github.com/hueniverse/hawk). The `credentials` key must contain the necessary signing info, [see hawk docs for details](https://github.com/hueniverse/hawk#usage-example). * `strictSSL` - If `true`, requires SSL certificates be valid. **Note:** to use your own certificate authority, you need to specify an agent that was created with that CA as an option. -* `jar` - If `true`, remember cookies for future use (or define your custom cookie jar; see examples section) +* `agentOptions` - Object containing user agent options. See documentation above. **Note:** [see tls API doc for TLS/SSL options](http://nodejs.org/api/tls.html#tls_tls_connect_options_callback). + +* `jar` - If `true` and `tough-cookie` is installed, remember cookies for future use (or define your custom cookie jar; see examples section) * `aws` - `object` containing AWS signing information. Should have the properties `key`, `secret`. Also requires the property `bucket`, unless you’re specifying your `bucket` as part of the path, or the request doesn’t use a bucket (i.e. GET Services) * `httpSignature` - Options for the [HTTP Signature Scheme](https://github.com/joyent/node-http-signature/blob/master/http_signing.md) using [Joyent's library](https://github.com/joyent/node-http-signature). The `keyId` and `key` properties must be specified. See the docs for other options. * `localAddress` - Local interface to bind for network connections. +* `gzip` - If `true`, add an `Accept-Encoding` header to request compressed content encodings from the server (if not already present) and decode supported content encodings in the response. **Note:** Automatic decoding of the response content is performed on the body data returned through `request` (both through the `request` stream and passed to the callback function) but is not performed on the `response` stream (available from the `response` event) which is the unmodified `http.IncomingMessage` object which may contain compressed data. See example below. +* `tunnel` - If `true`, then *always* use a tunneling proxy. If + `false` (default), then tunneling will only be used if the + destination is `https`, or if a previous request in the redirect + chain used a tunneling proxy. +* `proxyHeaderWhiteList` - A whitelist of headers to send to a + tunneling proxy. -The callback argument gets 3 arguments: - -1. An `error` when applicable (usually from the `http.Client` option, not the `http.ClientRequest` object) -2. An `http.ClientResponse` object -3. The third is the `response` body (`String` or `Buffer`) +The callback argument gets 3 arguments: + +1. An `error` when applicable (usually from [`http.ClientRequest`](http://nodejs.org/api/http.html#http_class_http_clientrequest) object) +2. An [`http.IncomingMessage`](http://nodejs.org/api/http.html#http_http_incomingmessage) object +3. The third is the `response` body (`String` or `Buffer`, or JSON object if the `json` option is supplied) ## Convenience methods @@ -244,6 +525,22 @@ This method returns a wrapper around the normal request API that defaults to whatever options you pass in to it. +**Note:** You can call `.defaults()` on the wrapper that is returned from `request.defaults` to add/override defaults that were previously defaulted. + +For example: +```javascript +//requests using baseRequest() will set the 'x-token' header +var baseRequest = request.defaults({ + headers: {x-token: 'my-token'} +}) + +//requests using specialRequest() will include the 'x-token' header set in +//baseRequest and will also include the 'special' header +var specialRequest = baseRequest.defaults({ + headers: {special: 'special value'} +}) +``` + ### request.put Same as `request()`, but defaults to `method: "PUT"`. @@ -296,7 +593,7 @@ Function that creates a new cookie. ```javascript -request.cookie('cookie_string_here') +request.cookie('key1=value1') ``` ### request.jar @@ -334,7 +631,38 @@ ) ``` -Cookies are disabled by default (else, they would be used in subsequent requests). To enable cookies, set `jar` to `true` (either in `defaults` or `options`). +For backwards-compatibility, response compression is not supported by default. +To accept gzip-compressed responses, set the `gzip` option to `true`. Note +that the body data passed through `request` is automatically decompressed +while the response object is unmodified and will contain compressed data if +the server sent a compressed response. + +```javascript + var request = require('request') + request( + { method: 'GET' + , uri: 'http://www.google.com' + , gzip: true + } + , function (error, response, body) { + // body is the decompressed response body + console.log('server encoded the data as: ' + (response.headers['content-encoding'] || 'identity')) + console.log('the decoded data is: ' + body) + } + ).on('data', function(data) { + // decompressed data as it is received + console.log('decoded chunk: ' + data) + }) + .on('response', function(response) { + // unmodified http.IncomingMessage object + response.on('data', function(data) { + // compressed data as it is received + console.log('received ' + data.length + ' bytes of compressed data') + }) + }) +``` + +Cookies are disabled by default (else, they would be used in subsequent requests). To enable cookies, set `jar` to `true` (either in `defaults` or `options`) and install `tough-cookie`. ```javascript var request = request.defaults({jar: true}) @@ -343,7 +671,7 @@ }) ``` -To use a custom cookie jar (instead `request`’s global cookie jar), set `jar` to an instance of `request.jar()` (either in `defaults` or `options`) +To use a custom cookie jar (instead of `request`’s global cookie jar), set `jar` to an instance of `request.jar()` (either in `defaults` or `options`) ```javascript var j = request.jar() @@ -352,13 +680,40 @@ request('http://images.google.com') }) ``` + OR ```javascript +// `npm install --save tough-cookie` before this works +var j = request.jar(); +var cookie = request.cookie('key1=value1'); +var url = 'http://www.google.com'; +j.setCookie(cookie, url); +request({url: url, jar: j}, function () { + request('http://images.google.com') +}) +``` + +To inspect your cookie jar after a request + +```javascript var j = request.jar() -var cookie = request.cookie('your_cookie_here') -j.add(cookie) request({url: 'http://www.google.com', jar: j}, function () { - request('http://images.google.com') + var cookie_string = j.getCookieString(uri); // "key1=value1; key2=value2; ..." + var cookies = j.getCookies(uri); + // [{key: 'key1', value: 'value1', domain: "www.google.com", ...}, ...] }) ``` + +## Debugging + +There are at least three ways to debug the operation of `request`: + +1. Launch the node process like `NODE_DEBUG=request node script.js` + (`lib,request,otherlib` works too). + +2. Set `require('request').debug = true` at any time (this does the same thing + as #1). + +3. Use the [request-debug module](https://github.com/nylen/request-debug) to + view request and response headers and bodies. diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/release.sh nodejs-0.11.15/deps/npm/node_modules/request/release.sh --- nodejs-0.11.13/deps/npm/node_modules/request/release.sh 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/release.sh 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +npm version minor && npm publish && npm version patch && git push --tags && git push origin master diff -Nru nodejs-0.11.13/deps/npm/node_modules/request/request.js nodejs-0.11.15/deps/npm/node_modules/request/request.js --- nodejs-0.11.13/deps/npm/node_modules/request/request.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/request/request.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,116 +1,355 @@ -var optional = require('./lib/optional') - , http = require('http') - , https = optional('https') - , tls = optional('tls') +'use strict' + +var http = require('http') + , https = require('https') , url = require('url') , util = require('util') , stream = require('stream') , qs = require('qs') , querystring = require('querystring') - , crypto = require('crypto') - - , oauth = optional('oauth-sign') - , hawk = optional('hawk') - , aws = optional('aws-sign') - , httpSignature = optional('http-signature') + , zlib = require('zlib') + , helpers = require('./lib/helpers') + , bl = require('bl') + , oauth = require('oauth-sign') + , hawk = require('hawk') + , aws = require('aws-sign2') + , httpSignature = require('http-signature') , uuid = require('node-uuid') - , mime = require('mime') - , tunnel = optional('tunnel-agent') - , _safeStringify = require('json-stringify-safe') - + , mime = require('mime-types') + , tunnel = require('tunnel-agent') + , stringstream = require('stringstream') + , caseless = require('caseless') , ForeverAgent = require('forever-agent') - , FormData = optional('form-data') - - , Cookie = optional('tough-cookie') - , CookieJar = Cookie && Cookie.CookieJar - , cookieJar = CookieJar && new CookieJar - + , FormData = require('form-data') + , cookies = require('./lib/cookies') , copy = require('./lib/copy') , debug = require('./lib/debug') - , getSafe = require('./lib/getSafe') - ; + , net = require('net') + +var safeStringify = helpers.safeStringify + , md5 = helpers.md5 + , isReadStream = helpers.isReadStream + , toBase64 = helpers.toBase64 + , defer = helpers.defer + , globalCookieJar = cookies.jar() -function safeStringify (obj) { - var ret - try { ret = JSON.stringify(obj) } - catch (e) { ret = _safeStringify(obj) } - return ret -} var globalPool = {} -var isUrl = /^https?:/i + , isUrl = /^https?:/ +var defaultProxyHeaderWhiteList = [ + 'accept', + 'accept-charset', + 'accept-encoding', + 'accept-language', + 'accept-ranges', + 'cache-control', + 'content-encoding', + 'content-language', + 'content-length', + 'content-location', + 'content-md5', + 'content-range', + 'content-type', + 'connection', + 'date', + 'expect', + 'max-forwards', + 'pragma', + 'proxy-authorization', + 'referer', + 'te', + 'transfer-encoding', + 'user-agent', + 'via' +] + +function filterForNonReserved(reserved, options) { + // Filter out properties that are not reserved. + // Reserved values are passed in at call site. -// Hacky fix for pre-0.4.4 https -if (https && !https.Agent) { - https.Agent = function (options) { - http.Agent.call(this, options) - } - util.inherits(https.Agent, http.Agent) - https.Agent.prototype._getConnection = function (host, port, cb) { - var s = tls.connect(port, host, this.options, function () { - // do other checks here? - if (cb) cb() - }) - return s + var object = {} + for (var i in options) { + var notReserved = (reserved.indexOf(i) === -1) + if (notReserved) { + object[i] = options[i] + } } + return object } -function isReadStream (rs) { - if (rs.readable && rs.path && rs.mode) { - return true +function filterOutReservedFunctions(reserved, options) { + // Filter out properties that are functions and are reserved. + // Reserved values are passed in at call site. + + var object = {} + for (var i in options) { + var isReserved = !(reserved.indexOf(i) === -1) + var isFunction = (typeof options[i] === 'function') + if (!(isReserved && isFunction)) { + object[i] = options[i] + } } + return object + } -function toBase64 (str) { - return (new Buffer(str || "", "ascii")).toString("base64") +function constructProxyHost(uriObject) { + var port = uriObject.portA + , protocol = uriObject.protocol + , proxyHost = uriObject.hostname + ':' + + if (port) { + proxyHost += port + } else if (protocol === 'https:') { + proxyHost += '443' + } else { + proxyHost += '80' + } + + return proxyHost } -function md5 (str) { - return crypto.createHash('md5').update(str).digest('hex') +function constructProxyHeaderWhiteList(headers, proxyHeaderWhiteList) { + return Object.keys(headers) + .filter(function (header) { + return proxyHeaderWhiteList.indexOf(header.toLowerCase()) !== -1 + }) + .reduce(function (set, header) { + set[header] = headers[header] + return set + }, {}) } -function Request (options) { - stream.Stream.call(this) - this.readable = true - this.writable = true +function construcTunnelOptions(request) { + var proxy = request.proxy + var proxyHeaders = request.proxyHeaders + var proxyAuth - if (typeof options === 'string') { - options = {uri:options} + if (proxy.auth) { + proxyAuth = proxy.auth } - var reserved = Object.keys(Request.prototype) - for (var i in options) { - if (reserved.indexOf(i) === -1) { - this[i] = options[i] - } else { - if (typeof options[i] === 'function') { - delete options[i] + if (!proxy.auth && request.proxyAuthorization) { + proxyHeaders['Proxy-Authorization'] = request.proxyAuthorization + } + + var tunnelOptions = { + proxy: { + host: proxy.hostname, + port: +proxy.port, + proxyAuth: proxyAuth, + headers: proxyHeaders + }, + rejectUnauthorized: request.rejectUnauthorized, + headers: request.headers, + ca: request.ca, + cert: request.cert, + key: request.key + } + + return tunnelOptions +} + +function constructTunnelFnName(uri, proxy) { + var uriProtocol = (uri.protocol === 'https:' ? 'https' : 'http') + var proxyProtocol = (proxy.protocol === 'https:' ? 'Https' : 'Http') + return [uriProtocol, proxyProtocol].join('Over') +} + +function getTunnelFn(request) { + var uri = request.uri + var proxy = request.proxy + var tunnelFnName = constructTunnelFnName(uri, proxy) + return tunnel[tunnelFnName] +} + +// Decide the proper request proxy to use based on the request URI object and the +// environmental variables (NO_PROXY, HTTP_PROXY, etc.) +function getProxyFromURI(uri) { + // respect NO_PROXY environment variables (see: http://lynx.isc.org/current/breakout/lynx_help/keystrokes/environments.html) + var noProxy = process.env.NO_PROXY || process.env.no_proxy || null + + // easy case first - if NO_PROXY is '*' + if (noProxy === '*') { + return null + } + + // otherwise, parse the noProxy value to see if it applies to the URL + if (noProxy !== null) { + var noProxyItem, hostname, port, noProxyItemParts, noProxyHost, noProxyPort, noProxyList + + // canonicalize the hostname, so that 'oogle.com' won't match 'google.com' + hostname = uri.hostname.replace(/^\.*/, '.').toLowerCase() + noProxyList = noProxy.split(',') + + for (var i = 0, len = noProxyList.length; i < len; i++) { + noProxyItem = noProxyList[i].trim().toLowerCase() + + // no_proxy can be granular at the port level, which complicates things a bit. + if (noProxyItem.indexOf(':') > -1) { + noProxyItemParts = noProxyItem.split(':', 2) + noProxyHost = noProxyItemParts[0].replace(/^\.*/, '.') + noProxyPort = noProxyItemParts[1] + port = uri.port || (uri.protocol === 'https:' ? '443' : '80') + + // we've found a match - ports are same and host ends with no_proxy entry. + if (port === noProxyPort && hostname.indexOf(noProxyHost) === hostname.length - noProxyHost.length) { + return null + } + } else { + noProxyItem = noProxyItem.replace(/^\.*/, '.') + if (hostname.indexOf(noProxyItem) === hostname.length - noProxyItem.length) { + return null + } } } } - if (options.method) { - this.explicitMethod = true + // check for HTTP(S)_PROXY environment variables + if (uri.protocol === 'http:') { + return process.env.HTTP_PROXY || process.env.http_proxy || null + } else if (uri.protocol === 'https:') { + return process.env.HTTPS_PROXY || process.env.https_proxy || process.env.HTTP_PROXY || process.env.http_proxy || null + } + + // return null if all else fails (What uri protocol are you using then?) + return null +} + +// Function for properly handling a connection error +function connectionErrorHandler(error) { + var socket = this + if (socket.res) { + if (socket.res.request) { + socket.res.request.emit('error', error) + } else { + socket.res.emit('error', error) + } + } else { + socket._httpMessage.emit('error', error) + } +} + +// Return a simpler request object to allow serialization +function requestToJSON() { + var self = this + return { + uri: self.uri, + method: self.method, + headers: self.headers + } +} + +// Return a simpler response object to allow serialization +function responseToJSON() { + var self = this + return { + statusCode: self.statusCode, + body: self.body, + headers: self.headers, + request: requestToJSON.call(self.request) } +} + +function Request (options) { + // if tunnel property of options was not given default to false + // if given the method property in options, set property explicitMethod to true + + // extend the Request instance with any non-reserved properties + // remove any reserved functions from the options object + // set Request instance to be readable and writable + // call init - this.canTunnel = options.tunnel !== false && tunnel; + var self = this + stream.Stream.call(self) + var reserved = Object.keys(Request.prototype) + var nonReserved = filterForNonReserved(reserved, options) - this.init(options) + stream.Stream.call(self) + util._extend(self, nonReserved) + options = filterOutReservedFunctions(reserved, options) + + self.readable = true + self.writable = true + if (typeof options.tunnel === 'undefined') { + options.tunnel = false + } + if (options.method) { + self.explicitMethod = true + } + self.canTunnel = options.tunnel !== false && tunnel + self.init(options) } + util.inherits(Request, stream.Stream) + +Request.prototype.setupTunnel = function () { + // Set up the tunneling agent if necessary + // Only send the proxy whitelisted header names. + // Turn on tunneling for the rest of request. + + var self = this + + if (typeof self.proxy === 'string') { + self.proxy = url.parse(self.proxy) + } + + if (!self.proxy) { + return false + } + + if (!self.tunnel && self.uri.protocol !== 'https:') { + return false + } + + if (!self.proxyHeaderWhiteList) { + self.proxyHeaderWhiteList = defaultProxyHeaderWhiteList + } + + var proxyHost = constructProxyHost(self.uri) + self.proxyHeaders = constructProxyHeaderWhiteList(self.headers, self.proxyHeaderWhiteList) + self.proxyHeaders.host = proxyHost + + var tunnelFn = getTunnelFn(self) + var tunnelOptions = construcTunnelOptions(self) + + self.agent = tunnelFn(tunnelOptions) + self.tunnel = true + return true +} + Request.prototype.init = function (options) { // init() contains all the code to setup the request object. // the actual outgoing request is not started until start() is called // this function is called from both the constructor and on redirect. var self = this - if (!options) options = {} + if (!options) { + options = {} + } + self.headers = self.headers ? copy(self.headers) : {} + + caseless.httpify(self, self.headers) - if (!self.method) self.method = options.method || 'GET' + // Never send proxy-auth to the endpoint! + if (self.hasHeader('proxy-authorization')) { + self.proxyAuthorization = self.getHeader('proxy-authorization') + self.removeHeader('proxy-authorization') + } + + if (!self.method) { + self.method = options.method || 'GET' + } self.localAddress = options.localAddress + if (!self.qsLib) { + self.qsLib = (options.useQuerystring ? querystring : qs) + } + debug(options) - if (!self.pool && self.pool !== false) self.pool = globalPool + if (!self.pool && self.pool !== false) { + self.pool = globalPool + } self.dests = self.dests || [] self.__isRequestRequest = true @@ -118,7 +357,9 @@ if (!self._callback && self.callback) { self._callback = self.callback self.callback = function () { - if (self._callbackCalled) return // Print a warning maybe? + if (self._callbackCalled) { + return // Print a warning maybe? + } self._callbackCalled = true self._callback.apply(self, arguments) } @@ -126,48 +367,60 @@ self.on('complete', self.callback.bind(self, null)) } - if (self.url && !self.uri) { - // People use this property instead all the time so why not just support it. + // People use this property instead all the time, so support it + if (!self.uri && self.url) { self.uri = self.url delete self.url } + // A URI is needed by this point, throw if we haven't been able to get one if (!self.uri) { - // this will throw if unhandled but is handleable when in a redirect - return self.emit('error', new Error("options.uri is a required argument")) - } else { - if (typeof self.uri == "string") self.uri = url.parse(self.uri) + return self.emit('error', new Error('options.uri is a required argument')) + } + + // If a string URI/URL was given, parse it into a URL object + if(typeof self.uri === 'string') { + self.uri = url.parse(self.uri) + } + + // DEPRECATED: Warning for users of the old Unix Sockets URL Scheme + if (self.uri.protocol === 'unix:') { + return self.emit('error', new Error('`unix://` URL scheme is no longer supported. Please use the format `http://unix:SOCKET:PATH`')) + } + + // Support Unix Sockets + if(self.uri.host === 'unix') { + // Get the socket & request paths from the URL + var unixParts = self.uri.path.split(':') + , host = unixParts[0] + , path = unixParts[1] + // Apply unix properties to request + self.socketPath = host + self.uri.pathname = path + self.uri.path = path + self.uri.host = host + self.uri.hostname = host + self.uri.isUnix = true } if (self.strictSSL === false) { self.rejectUnauthorized = false } - if (self.proxy) { - if (typeof self.proxy == 'string') self.proxy = url.parse(self.proxy) - - // do the HTTP CONNECT dance using koichik/node-tunnel - if (http.globalAgent && self.uri.protocol === "https:" && self.canTunnel) { - var tunnelFn = self.proxy.protocol === "http:" - ? tunnel.httpsOverHttp : tunnel.httpsOverHttps - - var tunnelOptions = { proxy: { host: self.proxy.hostname - , port: +self.proxy.port - , proxyAuth: self.proxy.auth - , headers: { Host: self.uri.hostname + ':' + - (self.uri.port || self.uri.protocol === 'https:' ? 443 : 80) }} - , rejectUnauthorized: self.rejectUnauthorized - , ca: this.ca } + if(!self.hasOwnProperty('proxy')) { + self.proxy = getProxyFromURI(self.uri) + } - self.agent = tunnelFn(tunnelOptions) - self.tunnel = true - } + // Pass in `tunnel:true` to *always* tunnel through proxies + self.tunnel = !!options.tunnel + if (self.proxy) { + self.setupTunnel() } if (!self.uri.pathname) {self.uri.pathname = '/'} - if (!self.uri.host) { - // Invalid URI: it may generate lot of bad errors, like "TypeError: Cannot call method 'indexOf' of undefined" in CookieJar + if (!(self.uri.host || (self.uri.hostname && self.uri.port)) && !self.uri.isUnix) { + // Invalid URI: it may generate lot of bad errors, like 'TypeError: Cannot call method `indexOf` of undefined' in CookieJar // Detect and reject it as soon as possible var faultyUri = url.format(self.uri) var message = 'Invalid URI "' + faultyUri + '"' @@ -177,26 +430,29 @@ // they should be warned that it can be caused by a redirection (can save some hair) message += '. This can be caused by a crappy redirection.' } - self.emit('error', new Error(message)) - return // This error was fatal + // This error was fatal + return self.emit('error', new Error(message)) } self._redirectsFollowed = self._redirectsFollowed || 0 self.maxRedirects = (self.maxRedirects !== undefined) ? self.maxRedirects : 10 - self.followRedirect = (self.followRedirect !== undefined) ? self.followRedirect : true + self.allowRedirect = (typeof self.followRedirect === 'function') ? self.followRedirect : function(response) { + return true + } + self.followRedirects = (self.followRedirect !== undefined) ? !!self.followRedirect : true self.followAllRedirects = (self.followAllRedirects !== undefined) ? self.followAllRedirects : false - if (self.followRedirect || self.followAllRedirects) + if (self.followRedirects || self.followAllRedirects) { self.redirects = self.redirects || [] - - self.headers = self.headers ? copy(self.headers) : {} + } self.setHost = false if (!self.hasHeader('host')) { self.setHeader('host', self.uri.hostname) if (self.uri.port) { if ( !(self.uri.port === 80 && self.uri.protocol === 'http:') && - !(self.uri.port === 443 && self.uri.protocol === 'https:') ) - self.setHeader('host', self.getHeader('host') + (':'+self.uri.port) ) + !(self.uri.port === 443 && self.uri.protocol === 'https:') ) { + self.setHeader('host', self.getHeader('host') + (':' + self.uri.port) ) + } } self.setHost = true } @@ -204,8 +460,8 @@ self.jar(self._jar || options.jar) if (!self.uri.port) { - if (self.uri.protocol == 'http:') {self.uri.port = 80} - else if (self.uri.protocol == 'https:') {self.uri.port = 443} + if (self.uri.protocol === 'http:') {self.uri.port = 80} + else if (self.uri.protocol === 'https:') {self.uri.port = 443} } if (self.proxy && !self.tunnel) { @@ -216,48 +472,47 @@ self.host = self.uri.hostname } - self.clientErrorHandler = function (error) { - if (self._aborted) return - if (self.req && self.req._reusedSocket && error.code === 'ECONNRESET' - && self.agent.addRequestNoreuse) { - self.agent = { addRequest: self.agent.addRequestNoreuse.bind(self.agent) } - self.start() - self.req.end() - return - } - if (self.timeout && self.timeoutTimer) { - clearTimeout(self.timeoutTimer) - self.timeoutTimer = null - } - self.emit('error', error) + if (options.form) { + self.form(options.form) } - self._parserErrorHandler = function (error) { - if (this.res) { - if (this.res.request) { - this.res.request.emit('error', error) + if (options.formData) { + var formData = options.formData + var requestForm = self.form() + var appendFormValue = function (key, value) { + if (value.hasOwnProperty('value') && value.hasOwnProperty('options')) { + requestForm.append(key, value.value, value.options) } else { - this.res.emit('error', error) + requestForm.append(key, value) + } + } + for (var formKey in formData) { + if (formData.hasOwnProperty(formKey)) { + var formValue = formData[formKey] + if (formValue instanceof Array) { + for (var j = 0; j < formValue.length; j++) { + appendFormValue(formKey, formValue[j]) + } + } else { + appendFormValue(formKey, formValue) + } } - } else { - this._httpMessage.emit('error', error) } } - if (options.form) { - self.form(options.form) + if (options.qs) { + self.qs(options.qs) } - if (options.qs) self.qs(options.qs) - if (self.uri.path) { self.path = self.uri.path } else { - self.path = self.uri.pathname + (self.uri.search || "") + self.path = self.uri.pathname + (self.uri.search || '') } - if (self.path.length === 0) self.path = '/' - + if (self.path.length === 0) { + self.path = '/' + } // Auth must happen last in case signing is dependent on other headers if (options.oauth) { @@ -277,26 +532,46 @@ } if (options.auth) { - if (Object.prototype.hasOwnProperty.call(options.auth, 'username')) options.auth.user = options.auth.username - if (Object.prototype.hasOwnProperty.call(options.auth, 'password')) options.auth.pass = options.auth.password - + if (Object.prototype.hasOwnProperty.call(options.auth, 'username')) { + options.auth.user = options.auth.username + } + if (Object.prototype.hasOwnProperty.call(options.auth, 'password')) { + options.auth.pass = options.auth.password + } + self.auth( options.auth.user, options.auth.pass, - options.auth.sendImmediately + options.auth.sendImmediately, + options.auth.bearer ) } - if (self.uri.auth && !self.hasHeader('authorization')) { - var authPieces = self.uri.auth.split(':').map(function(item){ return querystring.unescape(item) }) - self.auth(authPieces[0], authPieces.slice(1).join(':'), true) + if (self.gzip && !self.hasHeader('accept-encoding')) { + self.setHeader('accept-encoding', 'gzip') } - if (self.proxy && self.proxy.auth && !self.hasHeader('proxy-authorization') && !self.tunnel) { - self.setHeader('proxy-authorization', "Basic " + toBase64(self.proxy.auth.split(':').map(function(item){ return querystring.unescape(item)}).join(':'))) + + if (self.uri.auth && !self.hasHeader('authorization')) { + var uriAuthPieces = self.uri.auth.split(':').map(function(item){ return querystring.unescape(item) }) + self.auth(uriAuthPieces[0], uriAuthPieces.slice(1).join(':'), true) } + if (self.proxy && !self.tunnel) { + if (self.proxy.auth && !self.proxyAuthorization) { + var proxyAuthPieces = self.proxy.auth.split(':').map(function(item){ + return querystring.unescape(item) + }) + var authHeader = 'Basic ' + toBase64(proxyAuthPieces.join(':')) + self.proxyAuthorization = authHeader + } + if (self.proxyAuthorization) { + self.setHeader('proxy-authorization', self.proxyAuthorization) + } + } - if (self.proxy && !self.tunnel) self.path = (self.uri.protocol + '//' + self.uri.host + self.path) + if (self.proxy && !self.tunnel) { + self.path = (self.uri.protocol + '//' + self.uri.host + self.path) + } if (options.json) { self.json(options.json) @@ -320,7 +595,9 @@ length = self.body.length } if (length) { - if (!self.hasHeader('content-length')) self.setHeader('content-length', length) + if (!self.hasHeader('content-length')) { + self.setHeader('content-length', length) + } } else { throw new Error('Argument error, options.body.') } @@ -329,15 +606,21 @@ var protocol = self.proxy && !self.tunnel ? self.proxy.protocol : self.uri.protocol , defaultModules = {'http:':http, 'https:':https} , httpModules = self.httpModules || {} - ; + self.httpModule = httpModules[protocol] || defaultModules[protocol] - if (!self.httpModule) return this.emit('error', new Error("Invalid protocol")) + if (!self.httpModule) { + return self.emit('error', new Error('Invalid protocol: ' + protocol)) + } - if (options.ca) self.ca = options.ca + if (options.ca) { + self.ca = options.ca + } if (!self.agent) { - if (options.agentOptions) self.agentOptions = options.agentOptions + if (options.agentOptions) { + self.agentOptions = options.agentOptions + } if (options.agentClass) { self.agentClass = options.agentClass @@ -363,10 +646,14 @@ } self.on('pipe', function (src) { - if (self.ntick && self._started) throw new Error("You cannot pipe to this stream after the outbound request has started.") + if (self.ntick && self._started) { + throw new Error('You cannot pipe to this stream after the outbound request has started.') + } self.src = src if (isReadStream(src)) { - if (!self.hasHeader('content-type')) self.setHeader('content-type', mime.lookup(src.path)) + if (!self.hasHeader('content-type')) { + self.setHeader('content-type', mime.lookup(src.path)) + } } else { if (src.headers) { for (var i in src.headers) { @@ -375,45 +662,64 @@ } } } - if (self._json && !self.hasHeader('content-type')) + if (self._json && !self.hasHeader('content-type')) { self.setHeader('content-type', 'application/json') + } if (src.method && !self.explicitMethod) { self.method = src.method } } // self.on('pipe', function () { - // console.error("You have already piped to this stream. Pipeing twice is likely to break the request.") + // console.error('You have already piped to this stream. Pipeing twice is likely to break the request.') // }) }) - process.nextTick(function () { - if (self._aborted) return - - if (self._form) { - self.setHeaders(self._form.getHeaders()) - self._form.pipe(self) + defer(function () { + if (self._aborted) { + return } - if (self.body) { - if (Array.isArray(self.body)) { - self.body.forEach(function (part) { - self.write(part) - }) - } else { - self.write(self.body) + + var end = function () { + if (self._form) { + self._form.pipe(self) } - self.end() - } else if (self.requestBodyStream) { - console.warn("options.requestBodyStream is deprecated, please pass the request object to stream.pipe.") - self.requestBodyStream.pipe(self) - } else if (!self.src) { - if (self.method !== 'GET' && typeof self.method !== 'undefined') { - self.setHeader('content-length', 0) + if (self.body) { + if (Array.isArray(self.body)) { + self.body.forEach(function (part) { + self.write(part) + }) + } else { + self.write(self.body) + } + self.end() + } else if (self.requestBodyStream) { + console.warn('options.requestBodyStream is deprecated, please pass the request object to stream.pipe.') + self.requestBodyStream.pipe(self) + } else if (!self.src) { + if (self.method !== 'GET' && typeof self.method !== 'undefined') { + self.setHeader('content-length', 0) + } + self.end() } - self.end() } + + if (self._form && !self.hasHeader('content-length')) { + // Before ending the request, we had to compute the length of the whole form, asyncly + self.setHeader(self._form.getHeaders()) + self._form.getLength(function (err, length) { + if (!err) { + self.setHeader('content-length', length) + } + end() + }) + } else { + end() + } + self.ntick = true }) + } // Must call this when following a redirect from https to http or vice versa @@ -423,20 +729,13 @@ var self = this var protocol = self.uri.protocol - if (protocol === 'https:') { + if (protocol === 'https:' || self.tunnel) { // previously was doing http, now doing https // if it's https, then we might need to tunnel now. - if (self.proxy && self.canTunnel) { - self.tunnel = true - var tunnelFn = self.proxy.protocol === 'http:' - ? tunnel.httpsOverHttp : tunnel.httpsOverHttps - var tunnelOptions = { proxy: { host: self.proxy.hostname - , port: +self.proxy.port - , proxyAuth: self.proxy.auth } - , rejectUnauthorized: self.rejectUnauthorized - , ca: self.ca } - self.agent = tunnelFn(tunnelOptions) - return + if (self.proxy) { + if (self.setupTunnel()) { + return + } } self.httpModule = https @@ -453,12 +752,12 @@ } // if there's an agent, we need to get a new one. - if (self.agent) self.agent = self.getAgent() + if (self.agent) { + self.agent = self.getAgent() + } } else { // previously was doing https, now doing http - // stop any tunneling. - if (self.tunnel) self.tunnel = false self.httpModule = http switch (self.agentClass) { case ForeverAgent.SSL: @@ -481,79 +780,114 @@ } Request.prototype.getAgent = function () { - var Agent = this.agentClass + var self = this + var Agent = self.agentClass var options = {} - if (this.agentOptions) { - for (var i in this.agentOptions) { - options[i] = this.agentOptions[i] + if (self.agentOptions) { + for (var i in self.agentOptions) { + options[i] = self.agentOptions[i] } } - if (this.ca) options.ca = this.ca - if (this.ciphers) options.ciphers = this.ciphers - if (this.secureProtocol) options.secureProtocol = this.secureProtocol - if (typeof this.rejectUnauthorized !== 'undefined') options.rejectUnauthorized = this.rejectUnauthorized - - if (this.cert && this.key) { - options.key = this.key - options.cert = this.cert + if (self.ca) { + options.ca = self.ca + } + if (self.ciphers) { + options.ciphers = self.ciphers + } + if (self.secureProtocol) { + options.secureProtocol = self.secureProtocol + } + if (self.secureOptions) { + options.secureOptions = self.secureOptions + } + if (typeof self.rejectUnauthorized !== 'undefined') { + options.rejectUnauthorized = self.rejectUnauthorized + } + + if (self.cert && self.key) { + options.key = self.key + options.cert = self.cert } var poolKey = '' // different types of agents are in different pools - if (Agent !== this.httpModule.Agent) { + if (Agent !== self.httpModule.Agent) { poolKey += Agent.name } - if (!this.httpModule.globalAgent) { + if (!self.httpModule.globalAgent) { // node 0.4.x - options.host = this.host - options.port = this.port - if (poolKey) poolKey += ':' - poolKey += this.host + ':' + this.port + options.host = self.host + options.port = self.port + if (poolKey) { + poolKey += ':' + } + poolKey += self.host + ':' + self.port } // ca option is only relevant if proxy or destination are https - var proxy = this.proxy - if (typeof proxy === 'string') proxy = url.parse(proxy) + var proxy = self.proxy + if (typeof proxy === 'string') { + proxy = url.parse(proxy) + } var isHttps = (proxy && proxy.protocol === 'https:') || this.uri.protocol === 'https:' + if (isHttps) { if (options.ca) { - if (poolKey) poolKey += ':' + if (poolKey) { + poolKey += ':' + } poolKey += options.ca } if (typeof options.rejectUnauthorized !== 'undefined') { - if (poolKey) poolKey += ':' + if (poolKey) { + poolKey += ':' + } poolKey += options.rejectUnauthorized } - if (options.cert) + if (options.cert) { poolKey += options.cert.toString('ascii') + options.key.toString('ascii') + } if (options.ciphers) { - if (poolKey) poolKey += ':' + if (poolKey) { + poolKey += ':' + } poolKey += options.ciphers } if (options.secureProtocol) { - if (poolKey) poolKey += ':' + if (poolKey) { + poolKey += ':' + } poolKey += options.secureProtocol } + + if (options.secureOptions) { + if (poolKey) { + poolKey += ':' + } + poolKey += options.secureOptions + } } - if (this.pool === globalPool && !poolKey && Object.keys(options).length === 0 && this.httpModule.globalAgent) { + if (self.pool === globalPool && !poolKey && Object.keys(options).length === 0 && self.httpModule.globalAgent) { // not doing anything special. Use the globalAgent - return this.httpModule.globalAgent + return self.httpModule.globalAgent } // we're using a stored agent. Make sure it's protocol-specific - poolKey = this.uri.protocol + poolKey + poolKey = self.uri.protocol + poolKey - // already generated an agent for this setting - if (this.pool[poolKey]) return this.pool[poolKey] + // generate a new agent for this setting if none yet exists + if (!self.pool[poolKey]) { + self.pool[poolKey] = new Agent(options) + } - return this.pool[poolKey] = new Agent(options) + return self.pool[poolKey] } Request.prototype.start = function () { @@ -561,7 +895,9 @@ // this is usually called on the first write(), end() or on nextTick() var self = this - if (self._aborted) return + if (self._aborted) { + return + } self._started = true self.method = self.method || 'GET' @@ -580,14 +916,14 @@ delete reqOptions.auth debug('make request', self.uri.href) - self.req = self.httpModule.request(reqOptions, self.onResponse.bind(self)) + self.req = self.httpModule.request(reqOptions) if (self.timeout && !self.timeoutTimer) { self.timeoutTimer = setTimeout(function () { - self.req.abort() - var e = new Error("ETIMEDOUT") - e.code = "ETIMEDOUT" - self.emit("error", e) + self.abort() + var e = new Error('ETIMEDOUT') + e.code = 'ETIMEDOUT' + self.emit('error', e) }, self.timeout) // Set additional timeout on socket - in case if remote @@ -596,89 +932,128 @@ self.req.setTimeout(self.timeout, function () { if (self.req) { self.req.abort() - var e = new Error("ESOCKETTIMEDOUT") - e.code = "ESOCKETTIMEDOUT" - self.emit("error", e) + var e = new Error('ESOCKETTIMEDOUT') + e.code = 'ESOCKETTIMEDOUT' + self.emit('error', e) } }) } } - self.req.on('error', self.clientErrorHandler) + self.req.on('response', self.onRequestResponse.bind(self)) + self.req.on('error', self.onRequestError.bind(self)) self.req.on('drain', function() { self.emit('drain') }) + self.req.on('socket', function(socket) { + self.emit('socket', socket) + }) + self.on('end', function() { - if ( self.req.connection ) self.req.connection.removeListener('error', self._parserErrorHandler) + if ( self.req.connection ) { + self.req.connection.removeListener('error', connectionErrorHandler) + } }) self.emit('request', self.req) } -Request.prototype.onResponse = function (response) { + +Request.prototype.onRequestError = function (error) { + var self = this + if (self._aborted) { + return + } + if (self.req && self.req._reusedSocket && error.code === 'ECONNRESET' + && self.agent.addRequestNoreuse) { + self.agent = { addRequest: self.agent.addRequestNoreuse.bind(self.agent) } + self.start() + self.req.end() + return + } + if (self.timeout && self.timeoutTimer) { + clearTimeout(self.timeoutTimer) + self.timeoutTimer = null + } + self.emit('error', error) +} + +Request.prototype.onRequestResponse = function (response) { var self = this - debug('onResponse', self.uri.href, response.statusCode, response.headers) + debug('onRequestResponse', self.uri.href, response.statusCode, response.headers) response.on('end', function() { debug('response end', self.uri.href, response.statusCode, response.headers) - }); + }) - if (response.connection.listeners('error').indexOf(self._parserErrorHandler) === -1) { - response.connection.once('error', self._parserErrorHandler) + // The check on response.connection is a workaround for browserify. + if (response.connection && response.connection.listeners('error').indexOf(connectionErrorHandler) === -1) { + response.connection.setMaxListeners(0) + response.connection.once('error', connectionErrorHandler) } if (self._aborted) { debug('aborted', self.uri.href) response.resume() return } - if (self._paused) response.pause() - else response.resume() + if (self._paused) { + response.pause() + } else if (response.resume) { + // response.resume should be defined, but check anyway before calling. Workaround for browserify. + response.resume() + } self.response = response response.request = self - response.toJSON = toJSON + response.toJSON = responseToJSON // XXX This is different on 0.10, because SSL is strict by default if (self.httpModule === https && - self.strictSSL && - !response.client.authorized) { + self.strictSSL && (!response.hasOwnProperty('client') || + !response.client.authorized)) { debug('strict ssl error', self.uri.href) - var sslErr = response.client.authorizationError - self.emit('error', new Error('SSL Error: '+ sslErr)) + var sslErr = response.hasOwnProperty('client') ? response.client.authorizationError : self.uri.href + ' does not support SSL' + self.emit('error', new Error('SSL Error: ' + sslErr)) return } - if (self.setHost && self.hasHeader('host')) delete self.headers[self.hasHeader('host')] + // Save the original host before any redirect (if it changes, we need to + // remove any authorization headers) + self.originalHost = self.headers.host + if (self.setHost) { + self.removeHeader('host') + } if (self.timeout && self.timeoutTimer) { clearTimeout(self.timeoutTimer) self.timeoutTimer = null } + var targetCookieJar = (self._jar && self._jar.setCookie) ? self._jar : globalCookieJar var addCookie = function (cookie) { - if (self._jar){ - var targetCookieJar = self._jar.setCookie?self._jar:cookieJar; - - //set the cookie if it's domain in the href's domain. - targetCookieJar.setCookie(cookie, self.uri.href, function(err){ - if (err){ - console.warn('set cookie failed,'+ err) - } - }) + //set the cookie if it's domain in the href's domain. + try { + targetCookieJar.setCookie(cookie, self.uri.href, {ignoreError: true}) + } catch (e) { + self.emit('error', e) } - } - if (hasHeader('set-cookie', response.headers) && (!self._disableCookies)) { - var headerName = hasHeader('set-cookie', response.headers) - if (Array.isArray(response.headers[headerName])) response.headers[headerName].forEach(addCookie) - else addCookie(response.headers[headerName]) + response.caseless = caseless(response.headers) + + if (response.caseless.has('set-cookie') && (!self._disableCookies)) { + var headerName = response.caseless.has('set-cookie') + if (Array.isArray(response.headers[headerName])) { + response.headers[headerName].forEach(addCookie) + } else { + addCookie(response.headers[headerName]) + } } var redirectTo = null - if (response.statusCode >= 300 && response.statusCode < 400 && hasHeader('location', response.headers)) { - var location = response.headers[hasHeader('location', response.headers)] + if (response.statusCode >= 300 && response.statusCode < 400 && response.caseless.has('location')) { + var location = response.caseless.get('location') debug('redirect', location) if (self.followAllRedirects) { redirectTo = location - } else if (self.followRedirect) { + } else if (self.followRedirects) { switch (self.method) { case 'PATCH': case 'PUT': @@ -691,18 +1066,23 @@ break } } - } else if (response.statusCode == 401 && self._hasAuth && !self._sentAuth) { - var authHeader = response.headers[hasHeader('www-authenticate', response.headers)] - var authVerb = authHeader && authHeader.split(' ')[0] + } else if (response.statusCode === 401 && self._hasAuth && !self._sentAuth) { + var authHeader = response.caseless.get('www-authenticate') + var authVerb = authHeader && authHeader.split(' ')[0].toLowerCase() debug('reauth', authVerb) switch (authVerb) { - case 'Basic': + case 'basic': self.auth(self._user, self._pass, true) redirectTo = self.uri break - case 'Digest': + case 'bearer': + self.auth(null, null, true, self._bearer) + redirectTo = self.uri + break + + case 'digest': // TODO: More complete implementation of RFC 2617. // - check challenge.algorithm // - support algorithm="MD5-sess" @@ -719,8 +1099,10 @@ var re = /([a-z0-9_-]+)=(?:"([^"]+)"|([a-z0-9_-]+))/gi for (;;) { var match = re.exec(authHeader) - if (!match) break - challenge[match[1]] = match[2] || match[3]; + if (!match) { + break + } + challenge[match[1]] = match[2] || match[3] } var ha1 = md5(self._user + ':' + challenge.realm + ':' + self._pass) @@ -744,12 +1126,12 @@ authHeader = [] for (var k in authValues) { - if (!authValues[k]) { - //ignore - } else if (k === 'qop' || k === 'nc' || k === 'algorithm') { - authHeader.push(k + '=' + authValues[k]) - } else { - authHeader.push(k + '="' + authValues[k] + '"') + if (authValues[k]) { + if (k === 'qop' || k === 'nc' || k === 'algorithm') { + authHeader.push(k + '=' + authValues[k]) + } else { + authHeader.push(k + '="' + authValues[k] + '"') + } } } authHeader = 'Digest ' + authHeader.join(', ') @@ -761,15 +1143,17 @@ } } - if (redirectTo) { + if (redirectTo && self.allowRedirect.call(self, response)) { debug('redirect to', redirectTo) // ignore any potential response body. it cannot possibly be useful // to us at this point. - if (self._paused) response.resume() + if (self._paused) { + response.resume() + } if (self._redirectsFollowed >= self.maxRedirects) { - self.emit('error', new Error("Exceeded maxRedirects. Probably stuck in a redirect loop "+self.uri.href)) + self.emit('error', new Error('Exceeded maxRedirects. Probably stuck in a redirect loop ' + self.uri.href)) return } self._redirectsFollowed += 1 @@ -791,25 +1175,33 @@ , redirectUri: redirectTo } ) - if (self.followAllRedirects && response.statusCode != 401) self.method = 'GET' + if (self.followAllRedirects && response.statusCode !== 401 && response.statusCode !== 307) { + self.method = 'GET' + } // self.method = 'GET' // Force all redirects to use GET || commented out fixes #215 delete self.src delete self.req delete self.agent delete self._started - if (response.statusCode != 401) { + if (response.statusCode !== 401 && response.statusCode !== 307) { // Remove parameters from the previous response, unless this is the second request // for a server that requires digest authentication. delete self.body delete self._form if (self.headers) { - if (self.hasHeader('host')) delete self.headers[self.hasHeader('host')] - if (self.hasHeader('content-type')) delete self.headers[self.hasHeader('content-type')] - if (self.hasHeader('content-length')) delete self.headers[self.hasHeader('content-length')] + self.removeHeader('host') + self.removeHeader('content-type') + self.removeHeader('content-length') + if (self.uri.hostname !== self.originalHost.split(':')[0]) { + // Remove authorization if changing hostnames (but not if just + // changing ports or protocols). This matches the behavior of curl: + // https://github.com/bagder/curl/blob/6beb0eee/lib/http.c#L710 + self.removeHeader('authorization') + } } } - self.emit('redirect'); + self.emit('redirect') self.init() return // Ignore the rest of the response @@ -818,14 +1210,45 @@ // Be a good stream and emit end when the response is finished. // Hack to emit end on close because of a core bug that never fires end response.on('close', function () { - if (!self._ended) self.response.emit('end') + if (!self._ended) { + self.response.emit('end') + } + }) + + response.on('end', function () { + self._ended = true }) + var dataStream + if (self.gzip) { + var contentEncoding = response.headers['content-encoding'] || 'identity' + contentEncoding = contentEncoding.trim().toLowerCase() + + if (contentEncoding === 'gzip') { + dataStream = zlib.createGunzip() + response.pipe(dataStream) + } else { + // Since previous versions didn't check for Content-Encoding header, + // ignore any invalid values to preserve backwards-compatibility + if (contentEncoding !== 'identity') { + debug('ignoring unrecognized Content-Encoding ' + contentEncoding) + } + dataStream = response + } + } else { + dataStream = response + } + if (self.encoding) { if (self.dests.length !== 0) { - console.error("Ignoring encoding parameter as this stream is being piped to another stream which makes the encoding option invalid.") + console.error('Ignoring encoding parameter as this stream is being piped to another stream which makes the encoding option invalid.') + } else if (dataStream.setEncoding) { + dataStream.setEncoding(self.encoding) } else { - response.setEncoding(self.encoding) + // Should only occur on node pre-v0.9.4 (joyent/node@9b5abe5) with + // zlib streams. + // If/When support for 0.9.4 is dropped, this should be unnecessary. + dataStream = dataStream.pipe(stringstream(self.encoding)) } } @@ -835,50 +1258,52 @@ self.pipeDest(dest) }) - response.on("data", function (chunk) { + dataStream.on('data', function (chunk) { self._destdata = true - self.emit("data", chunk) + self.emit('data', chunk) }) - response.on("end", function (chunk) { - self._ended = true - self.emit("end", chunk) + dataStream.on('end', function (chunk) { + self.emit('end', chunk) + }) + dataStream.on('error', function (error) { + self.emit('error', error) }) - response.on("close", function () {self.emit("close")}) + dataStream.on('close', function () {self.emit('close')}) if (self.callback) { - var buffer = [] - var bodyLen = 0 - self.on("data", function (chunk) { - buffer.push(chunk) - bodyLen += chunk.length + var buffer = bl() + , strings = [] + + self.on('data', function (chunk) { + if (Buffer.isBuffer(chunk)) { + buffer.append(chunk) + } else { + strings.push(chunk) + } }) - self.on("end", function () { + self.on('end', function () { debug('end event', self.uri.href) if (self._aborted) { debug('aborted', self.uri.href) return } - if (buffer.length && Buffer.isBuffer(buffer[0])) { - debug('has body', self.uri.href, bodyLen) - var body = new Buffer(bodyLen) - var i = 0 - buffer.forEach(function (chunk) { - chunk.copy(body, i, 0, chunk.length) - i += chunk.length - }) + if (buffer.length) { + debug('has body', self.uri.href, buffer.length) if (self.encoding === null) { - response.body = body + // response.body = buffer + // can't move to this until https://github.com/rvagg/bl/issues/13 + response.body = buffer.slice() } else { - response.body = body.toString(self.encoding) + response.body = buffer.toString(self.encoding) } - } else if (buffer.length) { + } else if (strings.length) { // The UTF8 BOM [0xEF,0xBB,0xBF] is converted to [0xFE,0xFF] in the JS UTC16/UCS2 representation. // Strip this value out when the encoding is set to 'utf8', as upstream consumers won't expect it and it breaks JSON.parse(). - if (self.encoding === 'utf8' && buffer[0].length > 0 && buffer[0][0] === "\uFEFF") { - buffer[0] = buffer[0].substring(1) + if (self.encoding === 'utf8' && strings[0].length > 0 && strings[0][0] === '\uFEFF') { + strings[0] = strings[0].substring(1) } - response.body = buffer.join('') + response.body = strings.join('') } if (self._json) { @@ -887,116 +1312,112 @@ } catch (e) {} } debug('emitting complete', self.uri.href) - if(response.body == undefined && !self._json) { - response.body = ""; + if(typeof response.body === 'undefined' && !self._json) { + response.body = '' } self.emit('complete', response, response.body) }) } //if no callback else{ - self.on("end", function () { + self.on('end', function () { if (self._aborted) { debug('aborted', self.uri.href) return } - self.emit('complete', response); - }); + self.emit('complete', response) + }) } } debug('finish init function', self.uri.href) } Request.prototype.abort = function () { - this._aborted = true + var self = this + self._aborted = true - if (this.req) { - this.req.abort() + if (self.req) { + self.req.abort() } - else if (this.response) { - this.response.abort() + else if (self.response) { + self.response.abort() } - this.emit("abort") + self.emit('abort') } Request.prototype.pipeDest = function (dest) { - var response = this.response + var self = this + var response = self.response // Called after the response is received if (dest.headers && !dest.headersSent) { - if (hasHeader('content-type', response.headers)) { - var ctname = hasHeader('content-type', response.headers) - if (dest.setHeader) dest.setHeader(ctname, response.headers[ctname]) - else dest.headers[ctname] = response.headers[ctname] + if (response.caseless.has('content-type')) { + var ctname = response.caseless.has('content-type') + if (dest.setHeader) { + dest.setHeader(ctname, response.headers[ctname]) + } + else { + dest.headers[ctname] = response.headers[ctname] + } } - if (hasHeader('content-length', response.headers)) { - var clname = hasHeader('content-length', response.headers) - if (dest.setHeader) dest.setHeader(clname, response.headers[clname]) - else dest.headers[clname] = response.headers[clname] + if (response.caseless.has('content-length')) { + var clname = response.caseless.has('content-length') + if (dest.setHeader) { + dest.setHeader(clname, response.headers[clname]) + } else { + dest.headers[clname] = response.headers[clname] + } } } if (dest.setHeader && !dest.headersSent) { for (var i in response.headers) { - dest.setHeader(i, response.headers[i]) + // If the response content is being decoded, the Content-Encoding header + // of the response doesn't represent the piped content, so don't pass it. + if (!self.gzip || i !== 'content-encoding') { + dest.setHeader(i, response.headers[i]) + } } dest.statusCode = response.statusCode } - if (this.pipefilter) this.pipefilter(response, dest) -} - -// Composable API -Request.prototype.setHeader = function (name, value, clobber) { - if (clobber === undefined) clobber = true - if (clobber || !this.hasHeader(name)) this.headers[name] = value - else this.headers[this.hasHeader(name)] += ',' + value - return this -} -Request.prototype.setHeaders = function (headers) { - for (var i in headers) {this.setHeader(i, headers[i])} - return this -} -Request.prototype.hasHeader = function (header, headers) { - var headers = Object.keys(headers || this.headers) - , lheaders = headers.map(function (h) {return h.toLowerCase()}) - ; - header = header.toLowerCase() - for (var i=0;i=0.6.1 <0.7.0", + "_npmVersion": "1.4.9", + "_npmUser": { + "name": "tim-kos", + "email": "tim@debuggable.com" + }, + "maintainers": [ + { + "name": "tim-kos", + "email": "tim@debuggable.com" + } + ], + "dist": { + "shasum": "fdc90eed943fde11b893554b8cc63d0e899ba918", + "tarball": "http://registry.npmjs.org/retry/-/retry-0.6.1.tgz" + }, + "_resolved": "https://registry.npmjs.org/retry/-/retry-0.6.1.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/retry/Readme.md nodejs-0.11.15/deps/npm/node_modules/retry/Readme.md --- nodejs-0.11.13/deps/npm/node_modules/retry/Readme.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/retry/Readme.md 2015-01-20 21:22:17.000000000 +0000 @@ -14,7 +14,7 @@ The example below will retry a potentially failing `dns.resolve` operation `10` times using an exponential backoff strategy. With the default settings, this -means the last attempt is made after `34 minutes and 7 seconds`. +means the last attempt is made after `17 minutes and 3 seconds`. ``` javascript var dns = require('dns'); @@ -29,7 +29,7 @@ return; } - cb(operation.mainError(), addresses); + cb(err ? operation.mainError() : null, addresses); }); }); } @@ -69,8 +69,8 @@ * `retries`: The maximum amount of times to retry the operation. Default is `10`. * `factor`: The exponential factor to use. Default is `2`. -* `minTimeout`: The amount of time before starting the first retry. Default is `1000`. -* `maxTimeout`: The maximum amount of time between two retries. Default is `Infinity`. +* `minTimeout`: The number of milliseconds before starting the first retry. Default is `1000`. +* `maxTimeout`: The maximum number of milliseconds between two retries. Default is `Infinity`. * `randomize`: Randomizes the timeouts by multiplying with a factor between `1` to `2`. Default is `false`. The formula used to calculate the individual timeouts is: diff -Nru nodejs-0.11.13/deps/npm/node_modules/rimraf/package.json nodejs-0.11.15/deps/npm/node_modules/rimraf/package.json --- nodejs-0.11.13/deps/npm/node_modules/rimraf/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/rimraf/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "rimraf", - "version": "2.2.6", + "version": "2.2.8", "main": "rimraf.js", "description": "A deep deletion module for node (like `rm -rf`)", "author": { @@ -45,12 +45,29 @@ "email": "yosefd@microsoft.com" } ], - "readme": "`rm -rf` for node.\n\nInstall with `npm install rimraf`, or just drop rimraf.js somewhere.\n\n## API\n\n`rimraf(f, callback)`\n\nThe callback will be called with an error if there is one. Certain\nerrors are handled for you:\n\n* Windows: `EBUSY` and `ENOTEMPTY` - rimraf will back off a maximum of\n `opts.maxBusyTries` times before giving up.\n* `ENOENT` - If the file doesn't exist, rimraf will return\n successfully, since your desired outcome is already the case.\n\n## rimraf.sync\n\nIt can remove stuff synchronously, too. But that's not so good. Use\nthe async API. It's better.\n\n## CLI\n\nIf installed with `npm install rimraf -g` it can be used as a global\ncommand `rimraf ` which is useful for cross platform support.\n\n## mkdirp\n\nIf you need to create a directory recursively, check out\n[mkdirp](https://github.com/substack/node-mkdirp).\n", - "readmeFilename": "README.md", "bugs": { "url": "https://github.com/isaacs/rimraf/issues" }, "homepage": "https://github.com/isaacs/rimraf", - "_id": "rimraf@2.2.6", - "_from": "rimraf@~2.2.5" + "_id": "rimraf@2.2.8", + "_shasum": "e439be2aaee327321952730f99a8929e4fc50582", + "_from": "rimraf@latest", + "_npmVersion": "1.4.10", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "e439be2aaee327321952730f99a8929e4fc50582", + "tarball": "http://registry.npmjs.org/rimraf/-/rimraf-2.2.8.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.2.8.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/rimraf/rimraf.js nodejs-0.11.15/deps/npm/node_modules/rimraf/rimraf.js --- nodejs-0.11.13/deps/npm/node_modules/rimraf/rimraf.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/rimraf/rimraf.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,7 @@ module.exports = rimraf rimraf.sync = rimrafSync +var assert = require("assert") var path = require("path") var fs = require("fs") @@ -11,11 +12,36 @@ var isWindows = (process.platform === "win32") -function rimraf (p, cb) { +function defaults (options) { + var methods = [ + 'unlink', + 'chmod', + 'stat', + 'rmdir', + 'readdir' + ] + methods.forEach(function(m) { + options[m] = options[m] || fs[m] + m = m + 'Sync' + options[m] = options[m] || fs[m] + }) +} + +function rimraf (p, options, cb) { + if (typeof options === 'function') { + cb = options + options = {} + } + assert(p) + assert(options) + assert(typeof cb === 'function') + + defaults(options) + if (!cb) throw new Error("No callback passed to rimraf()") var busyTries = 0 - rimraf_(p, function CB (er) { + rimraf_(p, options, function CB (er) { if (er) { if (isWindows && (er.code === "EBUSY" || er.code === "ENOTEMPTY") && busyTries < exports.BUSYTRIES_MAX) { @@ -23,14 +49,14 @@ var time = busyTries * 100 // try again, with the same exact callback as this one. return setTimeout(function () { - rimraf_(p, CB) + rimraf_(p, options, CB) }, time) } // this one won't happen if graceful-fs is used. if (er.code === "EMFILE" && timeout < exports.EMFILE_MAX) { return setTimeout(function () { - rimraf_(p, CB) + rimraf_(p, options, CB) }, timeout ++) } @@ -54,64 +80,91 @@ // // If anyone ever complains about this, then I guess the strategy could // be made configurable somehow. But until then, YAGNI. -function rimraf_ (p, cb) { - fs.unlink(p, function (er) { +function rimraf_ (p, options, cb) { + assert(p) + assert(options) + assert(typeof cb === 'function') + + options.unlink(p, function (er) { if (er) { if (er.code === "ENOENT") return cb(null) if (er.code === "EPERM") - return (isWindows) ? fixWinEPERM(p, er, cb) : rmdir(p, er, cb) + return (isWindows) + ? fixWinEPERM(p, options, er, cb) + : rmdir(p, options, er, cb) if (er.code === "EISDIR") - return rmdir(p, er, cb) + return rmdir(p, options, er, cb) } return cb(er) }) } -function fixWinEPERM (p, er, cb) { - fs.chmod(p, 666, function (er2) { +function fixWinEPERM (p, options, er, cb) { + assert(p) + assert(options) + assert(typeof cb === 'function') + if (er) + assert(er instanceof Error) + + options.chmod(p, 666, function (er2) { if (er2) cb(er2.code === "ENOENT" ? null : er) else - fs.stat(p, function(er3, stats) { + options.stat(p, function(er3, stats) { if (er3) cb(er3.code === "ENOENT" ? null : er) else if (stats.isDirectory()) - rmdir(p, er, cb) + rmdir(p, options, er, cb) else - fs.unlink(p, cb) + options.unlink(p, cb) }) }) } -function fixWinEPERMSync (p, er, cb) { +function fixWinEPERMSync (p, options, er) { + assert(p) + assert(options) + if (er) + assert(er instanceof Error) + try { - fs.chmodSync(p, 666) + options.chmodSync(p, 666) } catch (er2) { - if (er2.code !== "ENOENT") + if (er2.code === "ENOENT") + return + else throw er } try { - var stats = fs.statSync(p) + var stats = options.statSync(p) } catch (er3) { - if (er3 !== "ENOENT") + if (er3.code === "ENOENT") + return + else throw er } if (stats.isDirectory()) - rmdirSync(p, er) + rmdirSync(p, options, er) else - fs.unlinkSync(p) + options.unlinkSync(p) } -function rmdir (p, originalEr, cb) { +function rmdir (p, options, originalEr, cb) { + assert(p) + assert(options) + if (originalEr) + assert(originalEr instanceof Error) + assert(typeof cb === 'function') + // try to rmdir first, and only readdir on ENOTEMPTY or EEXIST (SunOS) // if we guessed wrong, and it's not a directory, then // raise the original error. - fs.rmdir(p, function (er) { + options.rmdir(p, function (er) { if (er && (er.code === "ENOTEMPTY" || er.code === "EEXIST" || er.code === "EPERM")) - rmkids(p, cb) + rmkids(p, options, cb) else if (er && er.code === "ENOTDIR") cb(originalEr) else @@ -119,22 +172,26 @@ }) } -function rmkids(p, cb) { - fs.readdir(p, function (er, files) { +function rmkids(p, options, cb) { + assert(p) + assert(options) + assert(typeof cb === 'function') + + options.readdir(p, function (er, files) { if (er) return cb(er) var n = files.length if (n === 0) - return fs.rmdir(p, cb) + return options.rmdir(p, cb) var errState files.forEach(function (f) { - rimraf(path.join(p, f), function (er) { + rimraf(path.join(p, f), options, function (er) { if (errState) return if (er) return cb(errState = er) if (--n === 0) - fs.rmdir(p, cb) + options.rmdir(p, cb) }) }) }) @@ -143,36 +200,49 @@ // this looks simpler, and is strictly *faster*, but will // tie up the JavaScript thread and fail on excessively // deep directory trees. -function rimrafSync (p) { +function rimrafSync (p, options) { + options = options || {} + defaults(options) + + assert(p) + assert(options) + try { - fs.unlinkSync(p) + options.unlinkSync(p) } catch (er) { if (er.code === "ENOENT") return if (er.code === "EPERM") - return isWindows ? fixWinEPERMSync(p, er) : rmdirSync(p, er) + return isWindows ? fixWinEPERMSync(p, options, er) : rmdirSync(p, options, er) if (er.code !== "EISDIR") throw er - rmdirSync(p, er) + rmdirSync(p, options, er) } } -function rmdirSync (p, originalEr) { +function rmdirSync (p, options, originalEr) { + assert(p) + assert(options) + if (originalEr) + assert(originalEr instanceof Error) + try { - fs.rmdirSync(p) + options.rmdirSync(p) } catch (er) { if (er.code === "ENOENT") return if (er.code === "ENOTDIR") throw originalEr if (er.code === "ENOTEMPTY" || er.code === "EEXIST" || er.code === "EPERM") - rmkidsSync(p) + rmkidsSync(p, options) } } -function rmkidsSync (p) { - fs.readdirSync(p).forEach(function (f) { - rimrafSync(path.join(p, f)) +function rmkidsSync (p, options) { + assert(p) + assert(options) + options.readdirSync(p).forEach(function (f) { + rimrafSync(path.join(p, f), options) }) - fs.rmdirSync(p) + options.rmdirSync(p, options) } diff -Nru nodejs-0.11.13/deps/npm/node_modules/rimraf/test/run.sh nodejs-0.11.15/deps/npm/node_modules/rimraf/test/run.sh --- nodejs-0.11.13/deps/npm/node_modules/rimraf/test/run.sh 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/rimraf/test/run.sh 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,16 @@ #!/bin/bash set -e +code=0 for i in test-*.js; do echo -n $i ... bash setup.sh node $i - ! [ -d target ] - echo "pass" + if [ -d target ]; then + echo "fail" + code=1 + else + echo "pass" + fi done rm -rf target +exit $code diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/bin/semver nodejs-0.11.15/deps/npm/node_modules/semver/bin/semver --- nodejs-0.11.13/deps/npm/node_modules/semver/bin/semver 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/bin/semver 2015-01-20 21:22:17.000000000 +0000 @@ -12,6 +12,7 @@ , inc = null , version = require("../package.json").version , loose = false + , identifier = undefined , semver = require("../semver") , reverse = false @@ -39,6 +40,7 @@ case "-i": case "--inc": case "--increment": switch (argv[0]) { case "major": case "minor": case "patch": case "prerelease": + case "premajor": case "preminor": case "prepatch": inc = argv.shift() break default: @@ -46,6 +48,9 @@ break } break + case "--preid": + identifier = argv.shift() + break case "-r": case "--range": range.push(argv.shift()) break @@ -87,7 +92,7 @@ }).map(function (v) { return semver.clean(v, loose) }).map(function (v) { - return inc ? semver.inc(v, inc, loose) : v + return inc ? semver.inc(v, inc, loose, identifier) : v }).forEach(function (v,i,_) { console.log(v) }) } @@ -106,10 +111,14 @@ ,"" ,"-i --increment []" ," Increment a version by the specified level. Level can" - ," be one of: major, minor, patch, or prerelease" - ," Default level is 'patch'." + ," be one of: major, minor, patch, premajor, preminor," + ," prepatch, or prerelease. Default level is 'patch'." ," Only one version may be specified." ,"" + ,"--preid " + ," Identifier to be used to prefix premajor, preminor," + ," prepatch or prerelease version increments." + ,"" ,"-l --loose" ," Interpret versions and ranges loosely" ,"" diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/foot.js nodejs-0.11.15/deps/npm/node_modules/semver/foot.js --- nodejs-0.11.13/deps/npm/node_modules/semver/foot.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/foot.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ - -})( - typeof exports === 'object' ? exports : - typeof define === 'function' && define.amd ? {} : - semver = {} -); diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/foot.js.txt nodejs-0.11.15/deps/npm/node_modules/semver/foot.js.txt --- nodejs-0.11.13/deps/npm/node_modules/semver/foot.js.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/foot.js.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,6 @@ + +})( + typeof exports === 'object' ? exports : + typeof define === 'function' && define.amd ? {} : + semver = {} +); diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/head.js nodejs-0.11.15/deps/npm/node_modules/semver/head.js --- nodejs-0.11.13/deps/npm/node_modules/semver/head.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/head.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -;(function(exports) { - diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/head.js.txt nodejs-0.11.15/deps/npm/node_modules/semver/head.js.txt --- nodejs-0.11.13/deps/npm/node_modules/semver/head.js.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/head.js.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2 @@ +;(function(exports) { + diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/Makefile nodejs-0.11.15/deps/npm/node_modules/semver/Makefile --- nodejs-0.11.13/deps/npm/node_modules/semver/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -8,12 +8,12 @@ clean: rm -f $(files) -semver.browser.js: head.js semver.js foot.js - ( cat head.js; \ +semver.browser.js: head.js.txt semver.js foot.js.txt + ( cat head.js.txt; \ cat semver.js | \ egrep -v '^ *\/\* nomin \*\/' | \ perl -pi -e 's/debug\([^\)]+\)//g'; \ - cat foot.js ) > semver.browser.js + cat foot.js.txt ) > semver.browser.js semver.min.js: semver.browser.js uglifyjs -m semver.min.js diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/package.json nodejs-0.11.15/deps/npm/node_modules/semver/package.json --- nodejs-0.11.13/deps/npm/node_modules/semver/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "semver", - "version": "2.2.1", + "version": "4.1.0", "description": "The semantic version parser used by npm.", "main": "semver.js", "browser": "semver.browser.js", @@ -21,12 +21,35 @@ "bin": { "semver": "./bin/semver" }, - "readme": "semver(1) -- The semantic versioner for npm\n===========================================\n\n## Usage\n\n $ npm install semver\n\n semver.valid('1.2.3') // '1.2.3'\n semver.valid('a.b.c') // null\n semver.clean(' =v1.2.3 ') // '1.2.3'\n semver.satisfies('1.2.3', '1.x || >=2.5.0 || 5.0.0 - 7.2.3') // true\n semver.gt('1.2.3', '9.8.7') // false\n semver.lt('1.2.3', '9.8.7') // true\n\nAs a command-line utility:\n\n $ semver -h\n\n Usage: semver [ [...]] [-r | -i | -d ]\n Test if version(s) satisfy the supplied range(s), and sort them.\n\n Multiple versions or ranges may be supplied, unless increment\n or decrement options are specified. In that case, only a single\n version may be used, and it is incremented by the specified level\n\n Program exits successfully if any valid version satisfies\n all supplied ranges, and prints all satisfying versions.\n\n If no versions are valid, or ranges are not satisfied,\n then exits failure.\n\n Versions are printed in ascending order, so supplying\n multiple versions to the utility will just sort them.\n\n## Versions\n\nA \"version\" is described by the v2.0.0 specification found at\n.\n\nA leading `\"=\"` or `\"v\"` character is stripped off and ignored.\n\n## Ranges\n\nThe following range styles are supported:\n\n* `1.2.3` A specific version. When nothing else will do. Note that\n build metadata is still ignored, so `1.2.3+build2012` will satisfy\n this range.\n* `>1.2.3` Greater than a specific version.\n* `<1.2.3` Less than a specific version. If there is no prerelease\n tag on the version range, then no prerelease version will be allowed\n either, even though these are technically \"less than\".\n* `>=1.2.3` Greater than or equal to. Note that prerelease versions\n are NOT equal to their \"normal\" equivalents, so `1.2.3-beta` will\n not satisfy this range, but `2.3.0-beta` will.\n* `<=1.2.3` Less than or equal to. In this case, prerelease versions\n ARE allowed, so `1.2.3-beta` would satisfy.\n* `1.2.3 - 2.3.4` := `>=1.2.3 <=2.3.4`\n* `~1.2.3` := `>=1.2.3-0 <1.3.0-0` \"Reasonably close to 1.2.3\". When\n using tilde operators, prerelease versions are supported as well,\n but a prerelease of the next significant digit will NOT be\n satisfactory, so `1.3.0-beta` will not satisfy `~1.2.3`.\n* `^1.2.3` := `>=1.2.3-0 <2.0.0-0` \"Compatible with 1.2.3\". When\n using caret operators, anything from the specified version (including\n prerelease) will be supported up to, but not including, the next\n major version (or its prereleases). `1.5.1` will satisfy `^1.2.3`,\n while `1.2.2` and `2.0.0-beta` will not.\n* `^0.1.3` := `>=0.1.3-0 <0.2.0-0` \"Compatible with 0.1.3\". 0.x.x versions are\n special: the first non-zero component indicates potentially breaking changes,\n meaning the caret operator matches any version with the same first non-zero\n component starting at the specified version.\n* `^0.0.2` := `=0.0.2` \"Only the version 0.0.2 is considered compatible\"\n* `~1.2` := `>=1.2.0-0 <1.3.0-0` \"Any version starting with 1.2\"\n* `^1.2` := `>=1.2.0-0 <2.0.0-0` \"Any version compatible with 1.2\"\n* `1.2.x` := `>=1.2.0-0 <1.3.0-0` \"Any version starting with 1.2\"\n* `~1` := `>=1.0.0-0 <2.0.0-0` \"Any version starting with 1\"\n* `^1` := `>=1.0.0-0 <2.0.0-0` \"Any version compatible with 1\"\n* `1.x` := `>=1.0.0-0 <2.0.0-0` \"Any version starting with 1\"\n\n\nRanges can be joined with either a space (which implies \"and\") or a\n`||` (which implies \"or\").\n\n## Functions\n\nAll methods and classes take a final `loose` boolean argument that, if\ntrue, will be more forgiving about not-quite-valid semver strings.\nThe resulting output will always be 100% strict, of course.\n\nStrict-mode Comparators and Ranges will be strict about the SemVer\nstrings that they parse.\n\n* valid(v): Return the parsed version, or null if it's not valid.\n* inc(v, release): Return the version incremented by the release type\n (major, minor, patch, or prerelease), or null if it's not valid.\n\n### Comparison\n\n* gt(v1, v2): `v1 > v2`\n* gte(v1, v2): `v1 >= v2`\n* lt(v1, v2): `v1 < v2`\n* lte(v1, v2): `v1 <= v2`\n* eq(v1, v2): `v1 == v2` This is true if they're logically equivalent,\n even if they're not the exact same string. You already know how to\n compare strings.\n* neq(v1, v2): `v1 != v2` The opposite of eq.\n* cmp(v1, comparator, v2): Pass in a comparison string, and it'll call\n the corresponding function above. `\"===\"` and `\"!==\"` do simple\n string comparison, but are included for completeness. Throws if an\n invalid comparison string is provided.\n* compare(v1, v2): Return 0 if v1 == v2, or 1 if v1 is greater, or -1 if\n v2 is greater. Sorts in ascending order if passed to Array.sort().\n* rcompare(v1, v2): The reverse of compare. Sorts an array of versions\n in descending order when passed to Array.sort().\n\n\n### Ranges\n\n* validRange(range): Return the valid range or null if it's not valid\n* satisfies(version, range): Return true if the version satisfies the\n range.\n* maxSatisfying(versions, range): Return the highest version in the list\n that satisfies the range, or null if none of them do.\n* gtr(version, range): Return true if version is greater than all the\n versions possible in the range.\n* ltr(version, range): Return true if version is less than all the\n versions possible in the range.\n* outside(version, range, hilo): Return true if the version is outside\n the bounds of the range in either the high or low direction. The\n `hilo` argument must be either the string `'>'` or `'<'`. (This is\n the function called by `gtr` and `ltr`.)\n\nNote that, since ranges may be non-contiguous, a version might not be\ngreater than a range, less than a range, *or* satisfy a range! For\nexample, the range `1.2 <1.2.9 || >2.0.0` would have a hole from `1.2.9`\nuntil `2.0.0`, so the version `1.2.10` would not be greater than the\nrange (because 2.0.1 satisfies, which is higher), nor less than the\nrange (since 1.2.8 satisfies, which is lower), and it also does not\nsatisfy the range.\n\nIf you want to know if a version satisfies or does not satisfy a\nrange, use the `satisfies(version, range)` function.\n", - "readmeFilename": "README.md", + "gitHead": "f8db569b9fd00788d14064aaf81854ed81e1337a", "bugs": { "url": "https://github.com/isaacs/node-semver/issues" }, "homepage": "https://github.com/isaacs/node-semver", - "_id": "semver@2.2.1", - "_from": "semver@latest" + "_id": "semver@4.1.0", + "_shasum": "bc80a9ff68532814362cc3cfda3c7b75ed9c321c", + "_from": "semver@>=4.1.0 <5.0.0", + "_npmVersion": "2.1.3", + "_nodeVersion": "0.10.31", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + }, + { + "name": "othiym23", + "email": "ogd@aoaioxxysz.net" + } + ], + "dist": { + "shasum": "bc80a9ff68532814362cc3cfda3c7b75ed9c321c", + "tarball": "http://registry.npmjs.org/semver/-/semver-4.1.0.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/semver/-/semver-4.1.0.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/README.md nodejs-0.11.15/deps/npm/node_modules/semver/README.md --- nodejs-0.11.13/deps/npm/node_modules/semver/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -34,52 +34,177 @@ ## Versions -A "version" is described by the v2.0.0 specification found at +A "version" is described by the `v2.0.0` specification found at . A leading `"="` or `"v"` character is stripped off and ignored. ## Ranges -The following range styles are supported: +A `version range` is a set of `comparators` which specify versions +that satisfy the range. + +A `comparator` is composed of an `operator` and a `version`. The set +of primitive `operators` is: + +* `<` Less than +* `<=` Less than or equal to +* `>` Greater than +* `>=` Greater than or equal to +* `=` Equal. If no operator is specified, then equality is assumed, + so this operator is optional, but MAY be included. + +For example, the comparator `>=1.2.7` would match the versions +`1.2.7`, `1.2.8`, `2.5.3`, and `1.3.9`, but not the versions `1.2.6` +or `1.1.0`. + +Comparators can be joined by whitespace to form a `comparator set`, +which is satisfied by the **intersection** of all of the comparators +it includes. + +A range is composed of one or more comparator sets, joined by `||`. A +version matches a range if and only if every comparator in at least +one of the `||`-separated comparator sets is satisfied by the version. + +For example, the range `>=1.2.7 <1.3.0` would match the versions +`1.2.7`, `1.2.8`, and `1.2.99`, but not the versions `1.2.6`, `1.3.0`, +or `1.1.0`. + +The range `1.2.7 || >=1.2.9 <2.0.0` would match the versions `1.2.7`, +`1.2.9`, and `1.4.6`, but not the versions `1.2.8` or `2.0.0`. + +### Prerelease Tags + +If a version has a prerelease tag (for example, `1.2.3-alpha.3`) then +it will only be allowed to satisfy comparator sets if at least one +comparator with the same `[major, minor, patch]` tuple also has a +prerelease tag. + +For example, the range `>1.2.3-alpha.3` would be allowed to match the +version `1.2.3-alpha.7`, but it would *not* be satisfied by +`3.4.5-alpha.9`, even though `3.4.5-alpha.9` is technically "greater +than" `1.2.3-alpha.3` according to the SemVer sort rules. The version +range only accepts prerelease tags on the `1.2.3` version. The +version `3.4.5` *would* satisfy the range, because it does not have a +prerelease flag, and `3.4.5` is greater than `1.2.3-alpha.7`. + +The purpose for this behavior is twofold. First, prerelease versions +frequently are updated very quickly, and contain many breaking changes +that are (by the author's design) not yet fit for public consumption. +Therefore, by default, they are excluded from range matching +semantics. + +Second, a user who has opted into using a prerelease version has +clearly indicated the intent to use *that specific* set of +alpha/beta/rc versions. By including a prerelease tag in the range, +the user is indicating that they are aware of the risk. However, it +is still not appropriate to assume that they have opted into taking a +similar risk on the *next* set of prerelease versions. + +### Advanced Range Syntax + +Advanced range syntax desugars to primitive comparators in +deterministic ways. + +Advanced ranges may be combined in the same way as primitive +comparators using white space or `||`. + +#### Hyphen Ranges `X.Y.Z - A.B.C` + +Specifies an inclusive set. -* `1.2.3` A specific version. When nothing else will do. Note that - build metadata is still ignored, so `1.2.3+build2012` will satisfy - this range. -* `>1.2.3` Greater than a specific version. -* `<1.2.3` Less than a specific version. If there is no prerelease - tag on the version range, then no prerelease version will be allowed - either, even though these are technically "less than". -* `>=1.2.3` Greater than or equal to. Note that prerelease versions - are NOT equal to their "normal" equivalents, so `1.2.3-beta` will - not satisfy this range, but `2.3.0-beta` will. -* `<=1.2.3` Less than or equal to. In this case, prerelease versions - ARE allowed, so `1.2.3-beta` would satisfy. * `1.2.3 - 2.3.4` := `>=1.2.3 <=2.3.4` -* `~1.2.3` := `>=1.2.3-0 <1.3.0-0` "Reasonably close to 1.2.3". When - using tilde operators, prerelease versions are supported as well, - but a prerelease of the next significant digit will NOT be - satisfactory, so `1.3.0-beta` will not satisfy `~1.2.3`. -* `^1.2.3` := `>=1.2.3-0 <2.0.0-0` "Compatible with 1.2.3". When - using caret operators, anything from the specified version (including - prerelease) will be supported up to, but not including, the next - major version (or its prereleases). `1.5.1` will satisfy `^1.2.3`, - while `1.2.2` and `2.0.0-beta` will not. -* `^0.1.3` := `>=0.1.3-0 <0.2.0-0` "Compatible with 0.1.3". 0.x.x versions are - special: the first non-zero component indicates potentially breaking changes, - meaning the caret operator matches any version with the same first non-zero - component starting at the specified version. -* `^0.0.2` := `=0.0.2` "Only the version 0.0.2 is considered compatible" -* `~1.2` := `>=1.2.0-0 <1.3.0-0` "Any version starting with 1.2" -* `^1.2` := `>=1.2.0-0 <2.0.0-0` "Any version compatible with 1.2" -* `1.2.x` := `>=1.2.0-0 <1.3.0-0` "Any version starting with 1.2" -* `~1` := `>=1.0.0-0 <2.0.0-0` "Any version starting with 1" -* `^1` := `>=1.0.0-0 <2.0.0-0` "Any version compatible with 1" -* `1.x` := `>=1.0.0-0 <2.0.0-0` "Any version starting with 1" +If a partial version is provided as the first version in the inclusive +range, then the missing pieces are replaced with zeroes. + +* `1.2 - 2.3.4` := `>=1.2.0 <=2.3.4` + +If a partial version is provided as the second version in the +inclusive range, then all versions that start with the supplied parts +of the tuple are accepted, but nothing that would be greater than the +provided tuple parts. + +* `1.2.3 - 2.3` := `>=1.2.3 <2.4.0` +* `1.2.3 - 2` := `>=1.2.3 <3.0.0` + +#### X-Ranges `1.2.x` `1.X` `1.2.*` `*` + +Any of `X`, `x`, or `*` may be used to "stand in" for one of the +numeric values in the `[major, minor, patch]` tuple. + +* `*` := `>=0.0.0` (Any version satisfies) +* `1.x` := `>=1.0.0 <2.0.0` (Matching major version) +* `1.2.x` := `>=1.2.0 <1.3.0` (Matching major and minor versions) + +A partial version range is treated as an X-Range, so the special +character is in fact optional. + +* `""` (empty string) := `*` := `>=0.0.0` +* `1` := `1.x.x` := `>=1.0.0 <2.0.0` +* `1.2` := `1.2.x` := `>=1.2.0 <1.3.0` + +#### Tilde Ranges `~1.2.3` `~1.2` `~1` + +Allows patch-level changes if a minor version is specified on the +comparator. Allows minor-level changes if not. + +* `~1.2.3` := `>=1.2.3 <1.(2+1).0` := `>=1.2.3 <1.3.0` +* `~1.2` := `>=1.2.0 <1.(2+1).0` := `>=1.2.0 <1.3.0` (Same as `1.2.x`) +* `~1` := `>=1.0.0 <(1+1).0.0` := `>=1.0.0 <2.0.0` (Same as `1.x`) +* `~0.2.3` := `>=0.2.3 <0.(2+1).0` := `>=0.2.3 <0.3.0` +* `~0.2` := `>=0.2.0 <0.(2+1).0` := `>=0.2.0 <0.3.0` (Same as `0.2.x`) +* `~0` := `>=0.0.0 <(0+1).0.0` := `>=0.0.0 <1.0.0` (Same as `0.x`) +* `~1.2.3-beta.2` := `>=1.2.3-beta.2 <1.3.0` Note that prereleases in + the `1.2.3` version will be allowed, if they are greater than or + equal to `beta.2`. So, `1.2.3-beta.4` would be allowed, but + `1.2.4-beta.2` would not, because it is a prerelease of a + different `[major, minor, patch]` tuple. + +Note: this is the same as the `~>` operator in rubygems. + +#### Caret Ranges `^1.2.3` `^0.2.5` `^0.0.4` + +Allows changes that do not modify the left-most non-zero digit in the +`[major, minor, patch]` tuple. In other words, this allows patch and +minor updates for versions `1.0.0` and above, patch updates for +versions `0.X >=0.1.0`, and *no* updates for versions `0.0.X`. + +Many authors treat a `0.x` version as if the `x` were the major +"breaking-change" indicator. + +Caret ranges are ideal when an author may make breaking changes +between `0.2.4` and `0.3.0` releases, which is a common practice. +However, it presumes that there will *not* be breaking changes between +`0.2.4` and `0.2.5`. It allows for changes that are presumed to be +additive (but non-breaking), according to commonly observed practices. + +* `^1.2.3` := `>=1.2.3 <2.0.0` +* `^0.2.3` := `>=0.2.3 <0.3.0` +* `^0.0.3` := `>=0.0.3 <0.0.4` +* `^1.2.3-beta.2` := `>=1.2.3-beta.2 <2.0.0` Note that prereleases in + the `1.2.3` version will be allowed, if they are greater than or + equal to `beta.2`. So, `1.2.3-beta.4` would be allowed, but + `1.2.4-beta.2` would not, because it is a prerelease of a + different `[major, minor, patch]` tuple. +* `^0.0.3-beta` := `>=0.0.3-beta <0.0.4` Note that prereleases in the + `0.0.3` version *only* will be allowed, if they are greater than or + equal to `beta`. So, `0.0.3-pr.2` would be allowed. + +When parsing caret ranges, a missing `patch` value desugars to the +number `0`, but will allow flexibility within that value, even if the +major and minor versions are both `0`. + +* `^1.2.x` := `>=1.2.0 <2.0.0` +* `^0.0.x` := `>=0.0.0 <0.1.0` +* `^0.0` := `>=0.0.0 <0.1.0` + +A missing `minor` and `patch` values will desugar to zero, but also +allow flexibility within those values, even if the major version is +zero. -Ranges can be joined with either a space (which implies "and") or a -`||` (which implies "or"). +* `^1.x` := `>=1.0.0 <2.0.0` +* `^0.x` := `>=0.0.0 <1.0.0` ## Functions @@ -90,42 +215,50 @@ Strict-mode Comparators and Ranges will be strict about the SemVer strings that they parse. -* valid(v): Return the parsed version, or null if it's not valid. -* inc(v, release): Return the version incremented by the release type - (major, minor, patch, or prerelease), or null if it's not valid. +* `valid(v)`: Return the parsed version, or null if it's not valid. +* `inc(v, release)`: Return the version incremented by the release + type (`major`, `premajor`, `minor`, `preminor`, `patch`, + `prepatch`, or `prerelease`), or null if it's not valid + * `premajor` in one call will bump the version up to the next major + version and down to a prerelease of that major version. + `preminor`, and `prepatch` work the same way. + * If called from a non-prerelease version, the `prerelease` will work the + same as `prepatch`. It increments the patch version, then makes a + prerelease. If the input version is already a prerelease it simply + increments it. ### Comparison -* gt(v1, v2): `v1 > v2` -* gte(v1, v2): `v1 >= v2` -* lt(v1, v2): `v1 < v2` -* lte(v1, v2): `v1 <= v2` -* eq(v1, v2): `v1 == v2` This is true if they're logically equivalent, +* `gt(v1, v2)`: `v1 > v2` +* `gte(v1, v2)`: `v1 >= v2` +* `lt(v1, v2)`: `v1 < v2` +* `lte(v1, v2)`: `v1 <= v2` +* `eq(v1, v2)`: `v1 == v2` This is true if they're logically equivalent, even if they're not the exact same string. You already know how to compare strings. -* neq(v1, v2): `v1 != v2` The opposite of eq. -* cmp(v1, comparator, v2): Pass in a comparison string, and it'll call +* `neq(v1, v2)`: `v1 != v2` The opposite of `eq`. +* `cmp(v1, comparator, v2)`: Pass in a comparison string, and it'll call the corresponding function above. `"==="` and `"!=="` do simple string comparison, but are included for completeness. Throws if an invalid comparison string is provided. -* compare(v1, v2): Return 0 if v1 == v2, or 1 if v1 is greater, or -1 if - v2 is greater. Sorts in ascending order if passed to Array.sort(). -* rcompare(v1, v2): The reverse of compare. Sorts an array of versions - in descending order when passed to Array.sort(). +* `compare(v1, v2)`: Return `0` if `v1 == v2`, or `1` if `v1` is greater, or `-1` if + `v2` is greater. Sorts in ascending order if passed to `Array.sort()`. +* `rcompare(v1, v2)`: The reverse of compare. Sorts an array of versions + in descending order when passed to `Array.sort()`. ### Ranges -* validRange(range): Return the valid range or null if it's not valid -* satisfies(version, range): Return true if the version satisfies the +* `validRange(range)`: Return the valid range or null if it's not valid +* `satisfies(version, range)`: Return true if the version satisfies the range. -* maxSatisfying(versions, range): Return the highest version in the list - that satisfies the range, or null if none of them do. -* gtr(version, range): Return true if version is greater than all the +* `maxSatisfying(versions, range)`: Return the highest version in the list + that satisfies the range, or `null` if none of them do. +* `gtr(version, range)`: Return `true` if version is greater than all the versions possible in the range. -* ltr(version, range): Return true if version is less than all the +* `ltr(version, range)`: Return `true` if version is less than all the versions possible in the range. -* outside(version, range, hilo): Return true if the version is outside +* `outside(version, range, hilo)`: Return true if the version is outside the bounds of the range in either the high or low direction. The `hilo` argument must be either the string `'>'` or `'<'`. (This is the function called by `gtr` and `ltr`.) @@ -134,8 +267,8 @@ greater than a range, less than a range, *or* satisfy a range! For example, the range `1.2 <1.2.9 || >2.0.0` would have a hole from `1.2.9` until `2.0.0`, so the version `1.2.10` would not be greater than the -range (because 2.0.1 satisfies, which is higher), nor less than the -range (since 1.2.8 satisfies, which is lower), and it also does not +range (because `2.0.1` satisfies, which is higher), nor less than the +range (since `1.2.8` satisfies, which is lower), and it also does not satisfy the range. If you want to know if a version satisfies or does not satisfy a diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/semver.browser.js nodejs-0.11.15/deps/npm/node_modules/semver/semver.browser.js --- nodejs-0.11.13/deps/npm/node_modules/semver/semver.browser.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/semver.browser.js 2015-01-20 21:22:17.000000000 +0000 @@ -128,18 +128,18 @@ src[XRANGEPLAIN] = '[v=\\s]*(' + src[XRANGEIDENTIFIER] + ')' + '(?:\\.(' + src[XRANGEIDENTIFIER] + ')' + '(?:\\.(' + src[XRANGEIDENTIFIER] + ')' + - '(?:(' + src[PRERELEASE] + ')' + - ')?)?)?'; + '(?:' + src[PRERELEASE] + ')?' + + src[BUILD] + '?' + + ')?)?'; var XRANGEPLAINLOOSE = R++; src[XRANGEPLAINLOOSE] = '[v=\\s]*(' + src[XRANGEIDENTIFIERLOOSE] + ')' + '(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' + '(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' + - '(?:(' + src[PRERELEASELOOSE] + ')' + - ')?)?)?'; + '(?:' + src[PRERELEASELOOSE] + ')?' + + src[BUILD] + '?' + + ')?)?'; -// >=2.x, for example, means >=2.0.0-0 -// <1.x would be the same as "<1.0.0-0", though. var XRANGE = R++; src[XRANGE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAIN] + '$'; var XRANGELOOSE = R++; @@ -236,7 +236,7 @@ exports.clean = clean; function clean(version, loose) { - var s = parse(version, loose); + var s = parse(version.trim().replace(/^[=v]+/, ''), loose); return s ? s.version : null; } @@ -248,6 +248,8 @@ return version; else version = version.version; + } else if (typeof version !== 'string') { + throw new TypeError('Invalid Version: ' + version); } if (!(this instanceof SemVer)) @@ -320,7 +322,7 @@ return -1; else if (!this.prerelease.length && other.prerelease.length) return 1; - else if (!this.prerelease.lenth && !other.prerelease.length) + else if (!this.prerelease.length && !other.prerelease.length) return 0; var i = 0; @@ -341,19 +343,72 @@ } while (++i); }; -SemVer.prototype.inc = function(release) { +// preminor will bump the version up to the next minor release, and immediately +// down to pre-release. premajor and prepatch work the same way. +SemVer.prototype.inc = function(release, identifier) { switch (release) { - case 'major': + case 'premajor': + this.prerelease.length = 0; + this.patch = 0; + this.minor = 0; this.major++; - this.minor = -1; - case 'minor': + this.inc('pre', identifier); + break; + case 'preminor': + this.prerelease.length = 0; + this.patch = 0; this.minor++; - this.patch = -1; + this.inc('pre', identifier); + break; + case 'prepatch': + // If this is already a prerelease, it will bump to the next version + // drop any prereleases that might already exist, since they are not + // relevant at this point. + this.prerelease.length = 0; + this.inc('patch', identifier); + this.inc('pre', identifier); + break; + // If the input is a non-prerelease version, this acts the same as + // prepatch. + case 'prerelease': + if (this.prerelease.length === 0) + this.inc('patch', identifier); + this.inc('pre', identifier); + break; + + case 'major': + // If this is a pre-major version, bump up to the same major version. + // Otherwise increment major. + // 1.0.0-5 bumps to 1.0.0 + // 1.1.0 bumps to 2.0.0 + if (this.minor !== 0 || this.patch !== 0 || this.prerelease.length === 0) + this.major++; + this.minor = 0; + this.patch = 0; + this.prerelease = []; + break; + case 'minor': + // If this is a pre-minor version, bump up to the same minor version. + // Otherwise increment minor. + // 1.2.0-5 bumps to 1.2.0 + // 1.2.1 bumps to 1.3.0 + if (this.patch !== 0 || this.prerelease.length === 0) + this.minor++; + this.patch = 0; + this.prerelease = []; + break; case 'patch': - this.patch++; + // If this is not a pre-release version, it will increment the patch. + // If it is a pre-release it will bump up to the same patch version. + // 1.2.0-5 patches to 1.2.0 + // 1.2.0 patches to 1.2.1 + if (this.prerelease.length === 0) + this.patch++; this.prerelease = []; break; - case 'prerelease': + // This probably shouldn't be used publicly. + // 1.0.0 "pre" would become 1.0.0-0 which is the wrong direction. + case 'pre': if (this.prerelease.length === 0) this.prerelease = [0]; else { @@ -367,6 +422,15 @@ if (i === -1) // didn't increment anything this.prerelease.push(0); } + if (identifier) { + // 1.2.0-beta.1 bumps to 1.2.0-beta.2, + // 1.2.0-beta.fooblz or 1.2.0-beta bumps to 1.2.0-beta.0 + if (this.prerelease[0] === identifier) { + if (isNaN(this.prerelease[1])) + this.prerelease = [identifier, 0]; + } else + this.prerelease = [identifier, 0]; + } break; default: @@ -377,9 +441,14 @@ }; exports.inc = inc; -function inc(version, release, loose) { +function inc(version, release, loose, identifier) { + if (typeof(loose) === 'string') { + identifier = loose; + loose = undefined; + } + try { - return new SemVer(version, loose).inc(release).version; + return new SemVer(version, loose).inc(release, identifier).version; } catch (er) { return null; } @@ -472,8 +541,16 @@ function cmp(a, op, b, loose) { var ret; switch (op) { - case '===': ret = a === b; break; - case '!==': ret = a !== b; break; + case '===': + if (typeof a === 'object') a = a.version; + if (typeof b === 'object') b = b.version; + ret = a === b; + break; + case '!==': + if (typeof a === 'object') a = a.version; + if (typeof b === 'object') b = b.version; + ret = a !== b; + break; case '': case '=': case '==': ret = eq(a, b, loose); break; case '!=': ret = neq(a, b, loose); break; case '>': ret = gt(a, b, loose); break; @@ -505,6 +582,8 @@ this.value = ''; else this.value = this.operator + this.semver.version; + + ; } var ANY = {}; @@ -516,24 +595,14 @@ throw new TypeError('Invalid comparator: ' + comp); this.operator = m[1]; + if (this.operator === '=') + this.operator = ''; + // if it literally is just '>' or '' then allow anything. if (!m[2]) this.semver = ANY; - else { + else this.semver = new SemVer(m[2], this.loose); - - // <1.2.3-rc DOES allow 1.2.3-beta (has prerelease) - // >=1.2.3 DOES NOT allow 1.2.3-beta - // <=1.2.3 DOES allow 1.2.3-beta - // However, <1.2.3 does NOT allow 1.2.3-beta, - // even though `1.2.3-beta < 1.2.3` - // The assumption is that the 1.2.3 version has something you - // *don't* want, so we push the prerelease down to the minimum. - if (this.operator === '<' && !this.semver.prerelease.length) { - this.semver.prerelease = ['0']; - this.semver.format(); - } - } }; Comparator.prototype.inspect = function() { @@ -546,8 +615,14 @@ Comparator.prototype.test = function(version) { ; - return (this.semver === ANY) ? true : - cmp(version, this.operator, this.semver, this.loose); + + if (this.semver === ANY) + return true; + + if (typeof version === 'string') + version = new SemVer(version, this.loose); + + return cmp(version, this.operator, this.semver, this.loose); }; @@ -684,20 +759,20 @@ if (isX(M)) ret = ''; else if (isX(m)) - ret = '>=' + M + '.0.0-0 <' + (+M + 1) + '.0.0-0'; + ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; else if (isX(p)) // ~1.2 == >=1.2.0- <1.3.0- - ret = '>=' + M + '.' + m + '.0-0 <' + M + '.' + (+m + 1) + '.0-0'; + ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; else if (pr) { ; if (pr.charAt(0) !== '-') pr = '-' + pr; ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + M + '.' + (+m + 1) + '.0-0'; + ' <' + M + '.' + (+m + 1) + '.0'; } else - // ~1.2.3 == >=1.2.3-0 <1.3.0-0 - ret = '>=' + M + '.' + m + '.' + p + '-0' + - ' <' + M + '.' + (+m + 1) + '.0-0'; + // ~1.2.3 == >=1.2.3 <1.3.0 + ret = '>=' + M + '.' + m + '.' + p + + ' <' + M + '.' + (+m + 1) + '.0'; ; return ret; @@ -717,6 +792,7 @@ } function replaceCaret(comp, loose) { + ; var r = loose ? re[CARETLOOSE] : re[CARET]; return comp.replace(r, function(_, M, m, p, pr) { ; @@ -725,35 +801,38 @@ if (isX(M)) ret = ''; else if (isX(m)) - ret = '>=' + M + '.0.0-0 <' + (+M + 1) + '.0.0-0'; + ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; else if (isX(p)) { if (M === '0') - ret = '>=' + M + '.' + m + '.0-0 <' + M + '.' + (+m + 1) + '.0-0'; + ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; else - ret = '>=' + M + '.' + m + '.0-0 <' + (+M + 1) + '.0.0-0'; + ret = '>=' + M + '.' + m + '.0 <' + (+M + 1) + '.0.0'; } else if (pr) { ; if (pr.charAt(0) !== '-') pr = '-' + pr; if (M === '0') { if (m === '0') - ret = '=' + M + '.' + m + '.' + p + pr; + ret = '>=' + M + '.' + m + '.' + p + pr + + ' <' + M + '.' + m + '.' + (+p + 1); else ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + M + '.' + (+m + 1) + '.0-0'; + ' <' + M + '.' + (+m + 1) + '.0'; } else ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + (+M + 1) + '.0.0-0'; + ' <' + (+M + 1) + '.0.0'; } else { + ; if (M === '0') { if (m === '0') - ret = '=' + M + '.' + m + '.' + p; + ret = '>=' + M + '.' + m + '.' + p + + ' <' + M + '.' + m + '.' + (+p + 1); else - ret = '>=' + M + '.' + m + '.' + p + '-0' + - ' <' + M + '.' + (+m + 1) + '.0-0'; + ret = '>=' + M + '.' + m + '.' + p + + ' <' + M + '.' + (+m + 1) + '.0'; } else - ret = '>=' + M + '.' + m + '.' + p + '-0' + - ' <' + (+M + 1) + '.0.0-0'; + ret = '>=' + M + '.' + m + '.' + p + + ' <' + (+M + 1) + '.0.0'; } ; @@ -781,23 +860,27 @@ if (gtlt === '=' && anyX) gtlt = ''; - if (gtlt && anyX) { - // replace X with 0, and then append the -0 min-prerelease - if (xM) - M = 0; + if (xM) { + if (gtlt === '>' || gtlt === '<') { + // nothing is allowed + ret = '<0.0.0'; + } else { + // nothing is forbidden + ret = '*'; + } + } else if (gtlt && anyX) { + // replace X with 0 if (xm) m = 0; if (xp) p = 0; if (gtlt === '>') { - // >1 => >=2.0.0-0 - // >1.2 => >=1.3.0-0 - // >1.2.3 => >= 1.2.4-0 + // >1 => >=2.0.0 + // >1.2 => >=1.3.0 + // >1.2.3 => >= 1.2.4 gtlt = '>='; - if (xM) { - // no change - } else if (xm) { + if (xm) { M = +M + 1; m = 0; p = 0; @@ -805,20 +888,21 @@ m = +m + 1; p = 0; } + } else if (gtlt === '<=') { + // <=0.7.x is actually <0.8.0, since any 0.7.x should + // pass. Similarly, <=7.x is actually <8.0.0, etc. + gtlt = '<' + if (xm) + M = +M + 1 + else + m = +m + 1 } - - ret = gtlt + M + '.' + m + '.' + p + '-0'; - } else if (xM) { - // allow any - ret = '*'; + ret = gtlt + M + '.' + m + '.' + p; } else if (xm) { - // append '-0' onto the version, otherwise - // '1.x.x' matches '2.0.0-beta', since the tag - // *lowers* the version value - ret = '>=' + M + '.0.0-0 <' + (+M + 1) + '.0.0-0'; + ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; } else if (xp) { - ret = '>=' + M + '.' + m + '.0-0 <' + M + '.' + (+m + 1) + '.0-0'; + ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; } ; @@ -837,9 +921,9 @@ // This function is passed to string.replace(re[HYPHENRANGE]) // M, m, patch, prerelease, build -// 1.2 - 3.4.5 => >=1.2.0-0 <=3.4.5 -// 1.2.3 - 3.4 => >=1.2.0-0 <3.5.0-0 Any 3.4.x will do -// 1.2 - 3.4 => >=1.2.0-0 <3.5.0-0 +// 1.2 - 3.4.5 => >=1.2.0 <=3.4.5 +// 1.2.3 - 3.4 => >=1.2.0 <3.5.0 Any 3.4.x will do +// 1.2 - 3.4 => >=1.2.0 <3.5.0 function hyphenReplace($0, from, fM, fm, fp, fpr, fb, to, tM, tm, tp, tpr, tb) { @@ -847,18 +931,18 @@ if (isX(fM)) from = ''; else if (isX(fm)) - from = '>=' + fM + '.0.0-0'; + from = '>=' + fM + '.0.0'; else if (isX(fp)) - from = '>=' + fM + '.' + fm + '.0-0'; + from = '>=' + fM + '.' + fm + '.0'; else from = '>=' + from; if (isX(tM)) to = ''; else if (isX(tm)) - to = '<' + (+tM + 1) + '.0.0-0'; + to = '<' + (+tM + 1) + '.0.0'; else if (isX(tp)) - to = '<' + tM + '.' + (+tm + 1) + '.0-0'; + to = '<' + tM + '.' + (+tm + 1) + '.0'; else if (tpr) to = '<=' + tM + '.' + tm + '.' + tp + '-' + tpr; else @@ -872,6 +956,10 @@ Range.prototype.test = function(version) { if (!version) return false; + + if (typeof version === 'string') + version = new SemVer(version, this.loose); + for (var i = 0; i < this.set.length; i++) { if (testSet(this.set[i], version)) return true; @@ -884,6 +972,31 @@ if (!set[i].test(version)) return false; } + + if (version.prerelease.length) { + // Find the set of versions that are allowed to have prereleases + // For example, ^1.2.3-pr.1 desugars to >=1.2.3-pr.1 <2.0.0 + // That should allow `1.2.3-pr.2` to pass. + // However, `1.2.4-alpha.notready` should NOT be allowed, + // even though it's within the range set by the comparators. + for (var i = 0; i < set.length; i++) { + ; + if (set[i].semver === ANY) + return true; + + if (set[i].semver.prerelease.length > 0) { + var allowed = set[i].semver; + if (allowed.major === version.major && + allowed.minor === version.minor && + allowed.patch === version.patch) + return true; + } + } + + // Version has a -pre, but it's not one of the ones we like. + return false; + } + return true; } Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/npm/node_modules/semver/semver.browser.js.gz and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/npm/node_modules/semver/semver.browser.js.gz differ diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/semver.js nodejs-0.11.15/deps/npm/node_modules/semver/semver.js --- nodejs-0.11.13/deps/npm/node_modules/semver/semver.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/semver.js 2015-01-20 21:22:17.000000000 +0000 @@ -138,18 +138,18 @@ src[XRANGEPLAIN] = '[v=\\s]*(' + src[XRANGEIDENTIFIER] + ')' + '(?:\\.(' + src[XRANGEIDENTIFIER] + ')' + '(?:\\.(' + src[XRANGEIDENTIFIER] + ')' + - '(?:(' + src[PRERELEASE] + ')' + - ')?)?)?'; + '(?:' + src[PRERELEASE] + ')?' + + src[BUILD] + '?' + + ')?)?'; var XRANGEPLAINLOOSE = R++; src[XRANGEPLAINLOOSE] = '[v=\\s]*(' + src[XRANGEIDENTIFIERLOOSE] + ')' + '(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' + '(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' + - '(?:(' + src[PRERELEASELOOSE] + ')' + - ')?)?)?'; + '(?:' + src[PRERELEASELOOSE] + ')?' + + src[BUILD] + '?' + + ')?)?'; -// >=2.x, for example, means >=2.0.0-0 -// <1.x would be the same as "<1.0.0-0", though. var XRANGE = R++; src[XRANGE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAIN] + '$'; var XRANGELOOSE = R++; @@ -246,7 +246,7 @@ exports.clean = clean; function clean(version, loose) { - var s = parse(version, loose); + var s = parse(version.trim().replace(/^[=v]+/, ''), loose); return s ? s.version : null; } @@ -258,6 +258,8 @@ return version; else version = version.version; + } else if (typeof version !== 'string') { + throw new TypeError('Invalid Version: ' + version); } if (!(this instanceof SemVer)) @@ -330,7 +332,7 @@ return -1; else if (!this.prerelease.length && other.prerelease.length) return 1; - else if (!this.prerelease.lenth && !other.prerelease.length) + else if (!this.prerelease.length && !other.prerelease.length) return 0; var i = 0; @@ -351,19 +353,72 @@ } while (++i); }; -SemVer.prototype.inc = function(release) { +// preminor will bump the version up to the next minor release, and immediately +// down to pre-release. premajor and prepatch work the same way. +SemVer.prototype.inc = function(release, identifier) { switch (release) { - case 'major': + case 'premajor': + this.prerelease.length = 0; + this.patch = 0; + this.minor = 0; this.major++; - this.minor = -1; - case 'minor': + this.inc('pre', identifier); + break; + case 'preminor': + this.prerelease.length = 0; + this.patch = 0; this.minor++; - this.patch = -1; + this.inc('pre', identifier); + break; + case 'prepatch': + // If this is already a prerelease, it will bump to the next version + // drop any prereleases that might already exist, since they are not + // relevant at this point. + this.prerelease.length = 0; + this.inc('patch', identifier); + this.inc('pre', identifier); + break; + // If the input is a non-prerelease version, this acts the same as + // prepatch. + case 'prerelease': + if (this.prerelease.length === 0) + this.inc('patch', identifier); + this.inc('pre', identifier); + break; + + case 'major': + // If this is a pre-major version, bump up to the same major version. + // Otherwise increment major. + // 1.0.0-5 bumps to 1.0.0 + // 1.1.0 bumps to 2.0.0 + if (this.minor !== 0 || this.patch !== 0 || this.prerelease.length === 0) + this.major++; + this.minor = 0; + this.patch = 0; + this.prerelease = []; + break; + case 'minor': + // If this is a pre-minor version, bump up to the same minor version. + // Otherwise increment minor. + // 1.2.0-5 bumps to 1.2.0 + // 1.2.1 bumps to 1.3.0 + if (this.patch !== 0 || this.prerelease.length === 0) + this.minor++; + this.patch = 0; + this.prerelease = []; + break; case 'patch': - this.patch++; + // If this is not a pre-release version, it will increment the patch. + // If it is a pre-release it will bump up to the same patch version. + // 1.2.0-5 patches to 1.2.0 + // 1.2.0 patches to 1.2.1 + if (this.prerelease.length === 0) + this.patch++; this.prerelease = []; break; - case 'prerelease': + // This probably shouldn't be used publicly. + // 1.0.0 "pre" would become 1.0.0-0 which is the wrong direction. + case 'pre': if (this.prerelease.length === 0) this.prerelease = [0]; else { @@ -377,6 +432,15 @@ if (i === -1) // didn't increment anything this.prerelease.push(0); } + if (identifier) { + // 1.2.0-beta.1 bumps to 1.2.0-beta.2, + // 1.2.0-beta.fooblz or 1.2.0-beta bumps to 1.2.0-beta.0 + if (this.prerelease[0] === identifier) { + if (isNaN(this.prerelease[1])) + this.prerelease = [identifier, 0]; + } else + this.prerelease = [identifier, 0]; + } break; default: @@ -387,9 +451,14 @@ }; exports.inc = inc; -function inc(version, release, loose) { +function inc(version, release, loose, identifier) { + if (typeof(loose) === 'string') { + identifier = loose; + loose = undefined; + } + try { - return new SemVer(version, loose).inc(release).version; + return new SemVer(version, loose).inc(release, identifier).version; } catch (er) { return null; } @@ -482,8 +551,16 @@ function cmp(a, op, b, loose) { var ret; switch (op) { - case '===': ret = a === b; break; - case '!==': ret = a !== b; break; + case '===': + if (typeof a === 'object') a = a.version; + if (typeof b === 'object') b = b.version; + ret = a === b; + break; + case '!==': + if (typeof a === 'object') a = a.version; + if (typeof b === 'object') b = b.version; + ret = a !== b; + break; case '': case '=': case '==': ret = eq(a, b, loose); break; case '!=': ret = neq(a, b, loose); break; case '>': ret = gt(a, b, loose); break; @@ -515,6 +592,8 @@ this.value = ''; else this.value = this.operator + this.semver.version; + + debug('comp', this); } var ANY = {}; @@ -526,24 +605,14 @@ throw new TypeError('Invalid comparator: ' + comp); this.operator = m[1]; + if (this.operator === '=') + this.operator = ''; + // if it literally is just '>' or '' then allow anything. if (!m[2]) this.semver = ANY; - else { + else this.semver = new SemVer(m[2], this.loose); - - // <1.2.3-rc DOES allow 1.2.3-beta (has prerelease) - // >=1.2.3 DOES NOT allow 1.2.3-beta - // <=1.2.3 DOES allow 1.2.3-beta - // However, <1.2.3 does NOT allow 1.2.3-beta, - // even though `1.2.3-beta < 1.2.3` - // The assumption is that the 1.2.3 version has something you - // *don't* want, so we push the prerelease down to the minimum. - if (this.operator === '<' && !this.semver.prerelease.length) { - this.semver.prerelease = ['0']; - this.semver.format(); - } - } }; Comparator.prototype.inspect = function() { @@ -556,8 +625,14 @@ Comparator.prototype.test = function(version) { debug('Comparator.test', version, this.loose); - return (this.semver === ANY) ? true : - cmp(version, this.operator, this.semver, this.loose); + + if (this.semver === ANY) + return true; + + if (typeof version === 'string') + version = new SemVer(version, this.loose); + + return cmp(version, this.operator, this.semver, this.loose); }; @@ -694,20 +769,20 @@ if (isX(M)) ret = ''; else if (isX(m)) - ret = '>=' + M + '.0.0-0 <' + (+M + 1) + '.0.0-0'; + ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; else if (isX(p)) // ~1.2 == >=1.2.0- <1.3.0- - ret = '>=' + M + '.' + m + '.0-0 <' + M + '.' + (+m + 1) + '.0-0'; + ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; else if (pr) { debug('replaceTilde pr', pr); if (pr.charAt(0) !== '-') pr = '-' + pr; ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + M + '.' + (+m + 1) + '.0-0'; + ' <' + M + '.' + (+m + 1) + '.0'; } else - // ~1.2.3 == >=1.2.3-0 <1.3.0-0 - ret = '>=' + M + '.' + m + '.' + p + '-0' + - ' <' + M + '.' + (+m + 1) + '.0-0'; + // ~1.2.3 == >=1.2.3 <1.3.0 + ret = '>=' + M + '.' + m + '.' + p + + ' <' + M + '.' + (+m + 1) + '.0'; debug('tilde return', ret); return ret; @@ -727,6 +802,7 @@ } function replaceCaret(comp, loose) { + debug('caret', comp, loose); var r = loose ? re[CARETLOOSE] : re[CARET]; return comp.replace(r, function(_, M, m, p, pr) { debug('caret', comp, _, M, m, p, pr); @@ -735,35 +811,38 @@ if (isX(M)) ret = ''; else if (isX(m)) - ret = '>=' + M + '.0.0-0 <' + (+M + 1) + '.0.0-0'; + ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; else if (isX(p)) { if (M === '0') - ret = '>=' + M + '.' + m + '.0-0 <' + M + '.' + (+m + 1) + '.0-0'; + ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; else - ret = '>=' + M + '.' + m + '.0-0 <' + (+M + 1) + '.0.0-0'; + ret = '>=' + M + '.' + m + '.0 <' + (+M + 1) + '.0.0'; } else if (pr) { debug('replaceCaret pr', pr); if (pr.charAt(0) !== '-') pr = '-' + pr; if (M === '0') { if (m === '0') - ret = '=' + M + '.' + m + '.' + p + pr; + ret = '>=' + M + '.' + m + '.' + p + pr + + ' <' + M + '.' + m + '.' + (+p + 1); else ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + M + '.' + (+m + 1) + '.0-0'; + ' <' + M + '.' + (+m + 1) + '.0'; } else ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + (+M + 1) + '.0.0-0'; + ' <' + (+M + 1) + '.0.0'; } else { + debug('no pr'); if (M === '0') { if (m === '0') - ret = '=' + M + '.' + m + '.' + p; + ret = '>=' + M + '.' + m + '.' + p + + ' <' + M + '.' + m + '.' + (+p + 1); else - ret = '>=' + M + '.' + m + '.' + p + '-0' + - ' <' + M + '.' + (+m + 1) + '.0-0'; + ret = '>=' + M + '.' + m + '.' + p + + ' <' + M + '.' + (+m + 1) + '.0'; } else - ret = '>=' + M + '.' + m + '.' + p + '-0' + - ' <' + (+M + 1) + '.0.0-0'; + ret = '>=' + M + '.' + m + '.' + p + + ' <' + (+M + 1) + '.0.0'; } debug('caret return', ret); @@ -791,23 +870,27 @@ if (gtlt === '=' && anyX) gtlt = ''; - if (gtlt && anyX) { - // replace X with 0, and then append the -0 min-prerelease - if (xM) - M = 0; + if (xM) { + if (gtlt === '>' || gtlt === '<') { + // nothing is allowed + ret = '<0.0.0'; + } else { + // nothing is forbidden + ret = '*'; + } + } else if (gtlt && anyX) { + // replace X with 0 if (xm) m = 0; if (xp) p = 0; if (gtlt === '>') { - // >1 => >=2.0.0-0 - // >1.2 => >=1.3.0-0 - // >1.2.3 => >= 1.2.4-0 + // >1 => >=2.0.0 + // >1.2 => >=1.3.0 + // >1.2.3 => >= 1.2.4 gtlt = '>='; - if (xM) { - // no change - } else if (xm) { + if (xm) { M = +M + 1; m = 0; p = 0; @@ -815,20 +898,21 @@ m = +m + 1; p = 0; } + } else if (gtlt === '<=') { + // <=0.7.x is actually <0.8.0, since any 0.7.x should + // pass. Similarly, <=7.x is actually <8.0.0, etc. + gtlt = '<' + if (xm) + M = +M + 1 + else + m = +m + 1 } - - ret = gtlt + M + '.' + m + '.' + p + '-0'; - } else if (xM) { - // allow any - ret = '*'; + ret = gtlt + M + '.' + m + '.' + p; } else if (xm) { - // append '-0' onto the version, otherwise - // '1.x.x' matches '2.0.0-beta', since the tag - // *lowers* the version value - ret = '>=' + M + '.0.0-0 <' + (+M + 1) + '.0.0-0'; + ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; } else if (xp) { - ret = '>=' + M + '.' + m + '.0-0 <' + M + '.' + (+m + 1) + '.0-0'; + ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; } debug('xRange return', ret); @@ -847,9 +931,9 @@ // This function is passed to string.replace(re[HYPHENRANGE]) // M, m, patch, prerelease, build -// 1.2 - 3.4.5 => >=1.2.0-0 <=3.4.5 -// 1.2.3 - 3.4 => >=1.2.0-0 <3.5.0-0 Any 3.4.x will do -// 1.2 - 3.4 => >=1.2.0-0 <3.5.0-0 +// 1.2 - 3.4.5 => >=1.2.0 <=3.4.5 +// 1.2.3 - 3.4 => >=1.2.0 <3.5.0 Any 3.4.x will do +// 1.2 - 3.4 => >=1.2.0 <3.5.0 function hyphenReplace($0, from, fM, fm, fp, fpr, fb, to, tM, tm, tp, tpr, tb) { @@ -857,18 +941,18 @@ if (isX(fM)) from = ''; else if (isX(fm)) - from = '>=' + fM + '.0.0-0'; + from = '>=' + fM + '.0.0'; else if (isX(fp)) - from = '>=' + fM + '.' + fm + '.0-0'; + from = '>=' + fM + '.' + fm + '.0'; else from = '>=' + from; if (isX(tM)) to = ''; else if (isX(tm)) - to = '<' + (+tM + 1) + '.0.0-0'; + to = '<' + (+tM + 1) + '.0.0'; else if (isX(tp)) - to = '<' + tM + '.' + (+tm + 1) + '.0-0'; + to = '<' + tM + '.' + (+tm + 1) + '.0'; else if (tpr) to = '<=' + tM + '.' + tm + '.' + tp + '-' + tpr; else @@ -882,6 +966,10 @@ Range.prototype.test = function(version) { if (!version) return false; + + if (typeof version === 'string') + version = new SemVer(version, this.loose); + for (var i = 0; i < this.set.length; i++) { if (testSet(this.set[i], version)) return true; @@ -894,6 +982,31 @@ if (!set[i].test(version)) return false; } + + if (version.prerelease.length) { + // Find the set of versions that are allowed to have prereleases + // For example, ^1.2.3-pr.1 desugars to >=1.2.3-pr.1 <2.0.0 + // That should allow `1.2.3-pr.2` to pass. + // However, `1.2.4-alpha.notready` should NOT be allowed, + // even though it's within the range set by the comparators. + for (var i = 0; i < set.length; i++) { + debug(set[i].semver); + if (set[i].semver === ANY) + return true; + + if (set[i].semver.prerelease.length > 0) { + var allowed = set[i].semver; + if (allowed.major === version.major && + allowed.minor === version.minor && + allowed.patch === version.patch) + return true; + } + } + + // Version has a -pre, but it's not one of the ones we like. + return false; + } + return true; } diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/semver.min.js nodejs-0.11.15/deps/npm/node_modules/semver/semver.min.js --- nodejs-0.11.13/deps/npm/node_modules/semver/semver.min.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/semver.min.js 2015-01-20 21:22:17.000000000 +0000 @@ -1 +1 @@ -(function(e){if(typeof module==="object"&&module.exports===e)e=module.exports=H;e.SEMVER_SPEC_VERSION="2.0.0";var r=e.re=[];var t=e.src=[];var n=0;var i=n++;t[i]="0|[1-9]\\d*";var s=n++;t[s]="[0-9]+";var o=n++;t[o]="\\d*[a-zA-Z-][a-zA-Z0-9-]*";var a=n++;t[a]="("+t[i]+")\\."+"("+t[i]+")\\."+"("+t[i]+")";var f=n++;t[f]="("+t[s]+")\\."+"("+t[s]+")\\."+"("+t[s]+")";var u=n++;t[u]="(?:"+t[i]+"|"+t[o]+")";var l=n++;t[l]="(?:"+t[s]+"|"+t[o]+")";var c=n++;t[c]="(?:-("+t[u]+"(?:\\."+t[u]+")*))";var p=n++;t[p]="(?:-?("+t[l]+"(?:\\."+t[l]+")*))";var h=n++;t[h]="[0-9A-Za-z-]+";var v=n++;t[v]="(?:\\+("+t[h]+"(?:\\."+t[h]+")*))";var m=n++;var g="v?"+t[a]+t[c]+"?"+t[v]+"?";t[m]="^"+g+"$";var w="[v=\\s]*"+t[f]+t[p]+"?"+t[v]+"?";var d=n++;t[d]="^"+w+"$";var y=n++;t[y]="((?:<|>)?=?)";var $=n++;t[$]=t[s]+"|x|X|\\*";var j=n++;t[j]=t[i]+"|x|X|\\*";var b=n++;t[b]="[v=\\s]*("+t[j]+")"+"(?:\\.("+t[j]+")"+"(?:\\.("+t[j]+")"+"(?:("+t[c]+")"+")?)?)?";var E=n++;t[E]="[v=\\s]*("+t[$]+")"+"(?:\\.("+t[$]+")"+"(?:\\.("+t[$]+")"+"(?:("+t[p]+")"+")?)?)?";var k=n++;t[k]="^"+t[y]+"\\s*"+t[b]+"$";var x=n++;t[x]="^"+t[y]+"\\s*"+t[E]+"$";var R=n++;t[R]="(?:~>?)";var S=n++;t[S]="(\\s*)"+t[R]+"\\s+";r[S]=new RegExp(t[S],"g");var V="$1~";var I=n++;t[I]="^"+t[R]+t[b]+"$";var A=n++;t[A]="^"+t[R]+t[E]+"$";var C=n++;t[C]="(?:\\^)";var T=n++;t[T]="(\\s*)"+t[C]+"\\s+";r[T]=new RegExp(t[T],"g");var M="$1^";var z=n++;t[z]="^"+t[C]+t[b]+"$";var P=n++;t[P]="^"+t[C]+t[E]+"$";var Z=n++;t[Z]="^"+t[y]+"\\s*("+w+")$|^$";var q=n++;t[q]="^"+t[y]+"\\s*("+g+")$|^$";var L=n++;t[L]="(\\s*)"+t[y]+"\\s*("+w+"|"+t[b]+")";r[L]=new RegExp(t[L],"g");var X="$1$2$3";var _=n++;t[_]="^\\s*("+t[b]+")"+"\\s+-\\s+"+"("+t[b]+")"+"\\s*$";var N=n++;t[N]="^\\s*("+t[E]+")"+"\\s+-\\s+"+"("+t[E]+")"+"\\s*$";var O=n++;t[O]="(<|>)?=?\\s*\\*";for(var B=0;B'};H.prototype.toString=function(){return this.version};H.prototype.compare=function(e){if(!(e instanceof H))e=new H(e,this.loose);return this.compareMain(e)||this.comparePre(e)};H.prototype.compareMain=function(e){if(!(e instanceof H))e=new H(e,this.loose);return Q(this.major,e.major)||Q(this.minor,e.minor)||Q(this.patch,e.patch)};H.prototype.comparePre=function(e){if(!(e instanceof H))e=new H(e,this.loose);if(this.prerelease.length&&!e.prerelease.length)return-1;else if(!this.prerelease.length&&e.prerelease.length)return 1;else if(!this.prerelease.lenth&&!e.prerelease.length)return 0;var r=0;do{var t=this.prerelease[r];var n=e.prerelease[r];if(t===undefined&&n===undefined)return 0;else if(n===undefined)return 1;else if(t===undefined)return-1;else if(t===n)continue;else return Q(t,n)}while(++r)};H.prototype.inc=function(e){switch(e){case"major":this.major++;this.minor=-1;case"minor":this.minor++;this.patch=-1;case"patch":this.patch++;this.prerelease=[];break;case"prerelease":if(this.prerelease.length===0)this.prerelease=[0];else{var r=this.prerelease.length;while(--r>=0){if(typeof this.prerelease[r]==="number"){this.prerelease[r]++;r=-2}}if(r===-1)this.prerelease.push(0)}break;default:throw new Error("invalid increment argument: "+e)}this.format();return this};e.inc=J;function J(e,r,t){try{return new H(e,t).inc(r).version}catch(n){return null}}e.compareIdentifiers=Q;var K=/^[0-9]+$/;function Q(e,r){var t=K.test(e);var n=K.test(r);if(t&&n){e=+e;r=+r}return t&&!n?-1:n&&!t?1:er?1:0}e.rcompareIdentifiers=U;function U(e,r){return Q(r,e)}e.compare=W;function W(e,r,t){return new H(e,t).compare(r)}e.compareLoose=Y;function Y(e,r){return W(e,r,true)}e.rcompare=er;function er(e,r,t){return W(r,e,t)}e.sort=rr;function rr(r,t){return r.sort(function(r,n){return e.compare(r,n,t)})}e.rsort=tr;function tr(r,t){return r.sort(function(r,n){return e.rcompare(r,n,t)})}e.gt=nr;function nr(e,r,t){return W(e,r,t)>0}e.lt=ir;function ir(e,r,t){return W(e,r,t)<0}e.eq=sr;function sr(e,r,t){return W(e,r,t)===0}e.neq=or;function or(e,r,t){return W(e,r,t)!==0}e.gte=ar;function ar(e,r,t){return W(e,r,t)>=0}e.lte=fr;function fr(e,r,t){return W(e,r,t)<=0}e.cmp=ur;function ur(e,r,t,n){var i;switch(r){case"===":i=e===t;break;case"!==":i=e!==t;break;case"":case"=":case"==":i=sr(e,t,n);break;case"!=":i=or(e,t,n);break;case">":i=nr(e,t,n);break;case">=":i=ar(e,t,n);break;case"<":i=ir(e,t,n);break;case"<=":i=fr(e,t,n);break;default:throw new TypeError("Invalid operator: "+r)}return i}e.Comparator=lr;function lr(e,r){if(e instanceof lr){if(e.loose===r)return e;else e=e.value}if(!(this instanceof lr))return new lr(e,r);this.loose=r;this.parse(e);if(this.semver===cr)this.value="";else this.value=this.operator+this.semver.version}var cr={};lr.prototype.parse=function(e){var t=this.loose?r[Z]:r[q];var n=e.match(t);if(!n)throw new TypeError("Invalid comparator: "+e);this.operator=n[1];if(!n[2])this.semver=cr;else{this.semver=new H(n[2],this.loose);if(this.operator==="<"&&!this.semver.prerelease.length){this.semver.prerelease=["0"];this.semver.format()}}};lr.prototype.inspect=function(){return''};lr.prototype.toString=function(){return this.value};lr.prototype.test=function(e){return this.semver===cr?true:ur(e,this.operator,this.semver,this.loose)};e.Range=pr;function pr(e,r){if(e instanceof pr&&e.loose===r)return e;if(!(this instanceof pr))return new pr(e,r);this.loose=r;this.raw=e;this.set=e.split(/\s*\|\|\s*/).map(function(e){return this.parseRange(e.trim())},this).filter(function(e){return e.length});if(!this.set.length){throw new TypeError("Invalid SemVer Range: "+e)}this.format()}pr.prototype.inspect=function(){return''};pr.prototype.format=function(){this.range=this.set.map(function(e){return e.join(" ").trim()}).join("||").trim();return this.range};pr.prototype.toString=function(){return this.range};pr.prototype.parseRange=function(e){var t=this.loose;e=e.trim();var n=t?r[N]:r[_];e=e.replace(n,Er);e=e.replace(r[L],X);e=e.replace(r[S],V);e=e.replace(r[T],M);e=e.split(/\s+/).join(" ");var i=t?r[Z]:r[q];var s=e.split(" ").map(function(e){return vr(e,t)}).join(" ").split(/\s+/);if(this.loose){s=s.filter(function(e){return!!e.match(i)})}s=s.map(function(e){return new lr(e,t)});return s};e.toComparators=hr;function hr(e,r){return new pr(e,r).set.map(function(e){return e.map(function(e){return e.value}).join(" ").trim().split(" ")})}function vr(e,r){e=dr(e,r);e=gr(e,r);e=$r(e,r);e=br(e,r);return e}function mr(e){return!e||e.toLowerCase()==="x"||e==="*"}function gr(e,r){return e.trim().split(/\s+/).map(function(e){return wr(e,r)}).join(" ")}function wr(e,t){var n=t?r[A]:r[I];return e.replace(n,function(e,r,t,n,i){var s;if(mr(r))s="";else if(mr(t))s=">="+r+".0.0-0 <"+(+r+1)+".0.0-0";else if(mr(n))s=">="+r+"."+t+".0-0 <"+r+"."+(+t+1)+".0-0";else if(i){if(i.charAt(0)!=="-")i="-"+i;s=">="+r+"."+t+"."+n+i+" <"+r+"."+(+t+1)+".0-0"}else s=">="+r+"."+t+"."+n+"-0"+" <"+r+"."+(+t+1)+".0-0";return s})}function dr(e,r){return e.trim().split(/\s+/).map(function(e){return yr(e,r)}).join(" ")}function yr(e,t){var n=t?r[P]:r[z];return e.replace(n,function(e,r,t,n,i){var s;if(mr(r))s="";else if(mr(t))s=">="+r+".0.0-0 <"+(+r+1)+".0.0-0";else if(mr(n)){if(r==="0")s=">="+r+"."+t+".0-0 <"+r+"."+(+t+1)+".0-0";else s=">="+r+"."+t+".0-0 <"+(+r+1)+".0.0-0"}else if(i){if(i.charAt(0)!=="-")i="-"+i;if(r==="0"){if(t==="0")s="="+r+"."+t+"."+n+i;else s=">="+r+"."+t+"."+n+i+" <"+r+"."+(+t+1)+".0-0"}else s=">="+r+"."+t+"."+n+i+" <"+(+r+1)+".0.0-0"}else{if(r==="0"){if(t==="0")s="="+r+"."+t+"."+n;else s=">="+r+"."+t+"."+n+"-0"+" <"+r+"."+(+t+1)+".0-0"}else s=">="+r+"."+t+"."+n+"-0"+" <"+(+r+1)+".0.0-0"}return s})}function $r(e,r){return e.split(/\s+/).map(function(e){return jr(e,r)}).join(" ")}function jr(e,t){e=e.trim();var n=t?r[x]:r[k];return e.replace(n,function(e,r,t,n,i,s){var o=mr(t);var a=o||mr(n);var f=a||mr(i);var u=f;if(r==="="&&u)r="";if(r&&u){if(o)t=0;if(a)n=0;if(f)i=0;if(r===">"){r=">=";if(o){}else if(a){t=+t+1;n=0;i=0}else if(f){n=+n+1;i=0}}e=r+t+"."+n+"."+i+"-0"}else if(o){e="*"}else if(a){e=">="+t+".0.0-0 <"+(+t+1)+".0.0-0"}else if(f){e=">="+t+"."+n+".0-0 <"+t+"."+(+n+1)+".0-0"}return e})}function br(e,t){return e.trim().replace(r[O],"")}function Er(e,r,t,n,i,s,o,a,f,u,l,c,p){if(mr(t))r="";else if(mr(n))r=">="+t+".0.0-0";else if(mr(i))r=">="+t+"."+n+".0-0";else r=">="+r;if(mr(f))a="";else if(mr(u))a="<"+(+f+1)+".0.0-0";else if(mr(l))a="<"+f+"."+(+u+1)+".0-0";else if(c)a="<="+f+"."+u+"."+l+"-"+c;else a="<="+a;return(r+" "+a).trim()}pr.prototype.test=function(e){if(!e)return false;for(var r=0;r",t)}e.outside=Ar;function Ar(e,r,t,n){e=new H(e,n);r=new pr(r,n);var i,s,o,a,f;switch(t){case">":i=nr;s=fr;o=ir;a=">";f=">=";break;case"<":i=ir;s=ar;o=nr;a="<";f="<=";break;default:throw new TypeError('Must provide a hilo val of "<" or ">"')}if(xr(e,r,n)){return false}for(var u=0;u)?=?)";var b=n++;t[b]=t[s]+"|x|X|\\*";var j=n++;t[j]=t[i]+"|x|X|\\*";var $=n++;t[$]="[v=\\s]*("+t[j]+")"+"(?:\\.("+t[j]+")"+"(?:\\.("+t[j]+")"+"(?:"+t[p]+")?"+t[v]+"?"+")?)?";var k=n++;t[k]="[v=\\s]*("+t[b]+")"+"(?:\\.("+t[b]+")"+"(?:\\.("+t[b]+")"+"(?:"+t[c]+")?"+t[v]+"?"+")?)?";var E=n++;t[E]="^"+t[y]+"\\s*"+t[$]+"$";var x=n++;t[x]="^"+t[y]+"\\s*"+t[k]+"$";var R=n++;t[R]="(?:~>?)";var S=n++;t[S]="(\\s*)"+t[R]+"\\s+";r[S]=new RegExp(t[S],"g");var V="$1~";var I=n++;t[I]="^"+t[R]+t[$]+"$";var T=n++;t[T]="^"+t[R]+t[k]+"$";var A=n++;t[A]="(?:\\^)";var C=n++;t[C]="(\\s*)"+t[A]+"\\s+";r[C]=new RegExp(t[C],"g");var M="$1^";var z=n++;t[z]="^"+t[A]+t[$]+"$";var N=n++;t[N]="^"+t[A]+t[k]+"$";var P=n++;t[P]="^"+t[y]+"\\s*("+w+")$|^$";var Z=n++;t[Z]="^"+t[y]+"\\s*("+g+")$|^$";var q=n++;t[q]="(\\s*)"+t[y]+"\\s*("+w+"|"+t[$]+")";r[q]=new RegExp(t[q],"g");var L="$1$2$3";var X=n++;t[X]="^\\s*("+t[$]+")"+"\\s+-\\s+"+"("+t[$]+")"+"\\s*$";var _=n++;t[_]="^\\s*("+t[k]+")"+"\\s+-\\s+"+"("+t[k]+")"+"\\s*$";var O=n++;t[O]="(<|>)?=?\\s*\\*";for(var B=0;B'};H.prototype.toString=function(){return this.version};H.prototype.compare=function(e){if(!(e instanceof H))e=new H(e,this.loose);return this.compareMain(e)||this.comparePre(e)};H.prototype.compareMain=function(e){if(!(e instanceof H))e=new H(e,this.loose);return Q(this.major,e.major)||Q(this.minor,e.minor)||Q(this.patch,e.patch)};H.prototype.comparePre=function(e){if(!(e instanceof H))e=new H(e,this.loose);if(this.prerelease.length&&!e.prerelease.length)return-1;else if(!this.prerelease.length&&e.prerelease.length)return 1;else if(!this.prerelease.length&&!e.prerelease.length)return 0;var r=0;do{var t=this.prerelease[r];var n=e.prerelease[r];if(t===undefined&&n===undefined)return 0;else if(n===undefined)return 1;else if(t===undefined)return-1;else if(t===n)continue;else return Q(t,n)}while(++r)};H.prototype.inc=function(e,r){switch(e){case"premajor":this.prerelease.length=0;this.patch=0;this.minor=0;this.major++;this.inc("pre",r);break;case"preminor":this.prerelease.length=0;this.patch=0;this.minor++;this.inc("pre",r);break;case"prepatch":this.prerelease.length=0;this.inc("patch",r);this.inc("pre",r);break;case"prerelease":if(this.prerelease.length===0)this.inc("patch",r);this.inc("pre",r);break;case"major":if(this.minor!==0||this.patch!==0||this.prerelease.length===0)this.major++;this.minor=0;this.patch=0;this.prerelease=[];break;case"minor":if(this.patch!==0||this.prerelease.length===0)this.minor++;this.patch=0;this.prerelease=[];break;case"patch":if(this.prerelease.length===0)this.patch++;this.prerelease=[];break;case"pre":if(this.prerelease.length===0)this.prerelease=[0];else{var t=this.prerelease.length;while(--t>=0){if(typeof this.prerelease[t]==="number"){this.prerelease[t]++;t=-2}}if(t===-1)this.prerelease.push(0)}if(r){if(this.prerelease[0]===r){if(isNaN(this.prerelease[1]))this.prerelease=[r,0]}else this.prerelease=[r,0]}break;default:throw new Error("invalid increment argument: "+e)}this.format();return this};e.inc=J;function J(e,r,t,n){if(typeof t==="string"){n=t;t=undefined}try{return new H(e,t).inc(r,n).version}catch(i){return null}}e.compareIdentifiers=Q;var K=/^[0-9]+$/;function Q(e,r){var t=K.test(e);var n=K.test(r);if(t&&n){e=+e;r=+r}return t&&!n?-1:n&&!t?1:er?1:0}e.rcompareIdentifiers=U;function U(e,r){return Q(r,e)}e.compare=W;function W(e,r,t){return new H(e,t).compare(r)}e.compareLoose=Y;function Y(e,r){return W(e,r,true)}e.rcompare=er;function er(e,r,t){return W(r,e,t)}e.sort=rr;function rr(r,t){return r.sort(function(r,n){return e.compare(r,n,t)})}e.rsort=tr;function tr(r,t){return r.sort(function(r,n){return e.rcompare(r,n,t)})}e.gt=nr;function nr(e,r,t){return W(e,r,t)>0}e.lt=ir;function ir(e,r,t){return W(e,r,t)<0}e.eq=sr;function sr(e,r,t){return W(e,r,t)===0}e.neq=ar;function ar(e,r,t){return W(e,r,t)!==0}e.gte=or;function or(e,r,t){return W(e,r,t)>=0}e.lte=fr;function fr(e,r,t){return W(e,r,t)<=0}e.cmp=ur;function ur(e,r,t,n){var i;switch(r){case"===":if(typeof e==="object")e=e.version;if(typeof t==="object")t=t.version;i=e===t;break;case"!==":if(typeof e==="object")e=e.version;if(typeof t==="object")t=t.version;i=e!==t;break;case"":case"=":case"==":i=sr(e,t,n);break;case"!=":i=ar(e,t,n);break;case">":i=nr(e,t,n);break;case">=":i=or(e,t,n);break;case"<":i=ir(e,t,n);break;case"<=":i=fr(e,t,n);break;default:throw new TypeError("Invalid operator: "+r)}return i}e.Comparator=lr;function lr(e,r){if(e instanceof lr){if(e.loose===r)return e;else e=e.value}if(!(this instanceof lr))return new lr(e,r);this.loose=r;this.parse(e);if(this.semver===pr)this.value="";else this.value=this.operator+this.semver.version}var pr={};lr.prototype.parse=function(e){var t=this.loose?r[P]:r[Z];var n=e.match(t);if(!n)throw new TypeError("Invalid comparator: "+e);this.operator=n[1];if(this.operator==="=")this.operator="";if(!n[2])this.semver=pr;else this.semver=new H(n[2],this.loose)};lr.prototype.inspect=function(){return''};lr.prototype.toString=function(){return this.value};lr.prototype.test=function(e){if(this.semver===pr)return true;if(typeof e==="string")e=new H(e,this.loose);return ur(e,this.operator,this.semver,this.loose)};e.Range=cr;function cr(e,r){if(e instanceof cr&&e.loose===r)return e;if(!(this instanceof cr))return new cr(e,r);this.loose=r;this.raw=e;this.set=e.split(/\s*\|\|\s*/).map(function(e){return this.parseRange(e.trim())},this).filter(function(e){return e.length});if(!this.set.length){throw new TypeError("Invalid SemVer Range: "+e)}this.format()}cr.prototype.inspect=function(){return''};cr.prototype.format=function(){this.range=this.set.map(function(e){return e.join(" ").trim()}).join("||").trim();return this.range};cr.prototype.toString=function(){return this.range};cr.prototype.parseRange=function(e){var t=this.loose;e=e.trim();var n=t?r[_]:r[X];e=e.replace(n,kr);e=e.replace(r[q],L);e=e.replace(r[S],V);e=e.replace(r[C],M);e=e.split(/\s+/).join(" ");var i=t?r[P]:r[Z];var s=e.split(" ").map(function(e){return vr(e,t)}).join(" ").split(/\s+/);if(this.loose){s=s.filter(function(e){return!!e.match(i)})}s=s.map(function(e){return new lr(e,t)});return s};e.toComparators=hr;function hr(e,r){return new cr(e,r).set.map(function(e){return e.map(function(e){return e.value}).join(" ").trim().split(" ")})}function vr(e,r){e=dr(e,r);e=gr(e,r);e=br(e,r);e=$r(e,r);return e}function mr(e){return!e||e.toLowerCase()==="x"||e==="*"}function gr(e,r){return e.trim().split(/\s+/).map(function(e){return wr(e,r)}).join(" ")}function wr(e,t){var n=t?r[T]:r[I];return e.replace(n,function(e,r,t,n,i){var s;if(mr(r))s="";else if(mr(t))s=">="+r+".0.0 <"+(+r+1)+".0.0";else if(mr(n))s=">="+r+"."+t+".0 <"+r+"."+(+t+1)+".0";else if(i){if(i.charAt(0)!=="-")i="-"+i;s=">="+r+"."+t+"."+n+i+" <"+r+"."+(+t+1)+".0"}else s=">="+r+"."+t+"."+n+" <"+r+"."+(+t+1)+".0";return s})}function dr(e,r){return e.trim().split(/\s+/).map(function(e){return yr(e,r)}).join(" ")}function yr(e,t){var n=t?r[N]:r[z];return e.replace(n,function(e,r,t,n,i){var s;if(mr(r))s="";else if(mr(t))s=">="+r+".0.0 <"+(+r+1)+".0.0";else if(mr(n)){if(r==="0")s=">="+r+"."+t+".0 <"+r+"."+(+t+1)+".0";else s=">="+r+"."+t+".0 <"+(+r+1)+".0.0"}else if(i){if(i.charAt(0)!=="-")i="-"+i;if(r==="0"){if(t==="0")s=">="+r+"."+t+"."+n+i+" <"+r+"."+t+"."+(+n+1);else s=">="+r+"."+t+"."+n+i+" <"+r+"."+(+t+1)+".0"}else s=">="+r+"."+t+"."+n+i+" <"+(+r+1)+".0.0"}else{if(r==="0"){if(t==="0")s=">="+r+"."+t+"."+n+" <"+r+"."+t+"."+(+n+1);else s=">="+r+"."+t+"."+n+" <"+r+"."+(+t+1)+".0"}else s=">="+r+"."+t+"."+n+" <"+(+r+1)+".0.0"}return s})}function br(e,r){return e.split(/\s+/).map(function(e){return jr(e,r)}).join(" ")}function jr(e,t){e=e.trim();var n=t?r[x]:r[E];return e.replace(n,function(e,r,t,n,i,s){var a=mr(t);var o=a||mr(n);var f=o||mr(i);var u=f;if(r==="="&&u)r="";if(a){if(r===">"||r==="<"){e="<0.0.0"}else{e="*"}}else if(r&&u){if(o)n=0;if(f)i=0;if(r===">"){r=">=";if(o){t=+t+1;n=0;i=0}else if(f){n=+n+1;i=0}}else if(r==="<="){r="<";if(o)t=+t+1;else n=+n+1}e=r+t+"."+n+"."+i}else if(o){e=">="+t+".0.0 <"+(+t+1)+".0.0"}else if(f){e=">="+t+"."+n+".0 <"+t+"."+(+n+1)+".0"}return e})}function $r(e,t){return e.trim().replace(r[O],"")}function kr(e,r,t,n,i,s,a,o,f,u,l,p,c){if(mr(t))r="";else if(mr(n))r=">="+t+".0.0";else if(mr(i))r=">="+t+"."+n+".0";else r=">="+r;if(mr(f))o="";else if(mr(u))o="<"+(+f+1)+".0.0";else if(mr(l))o="<"+f+"."+(+u+1)+".0";else if(p)o="<="+f+"."+u+"."+l+"-"+p;else o="<="+o;return(r+" "+o).trim()}cr.prototype.test=function(e){if(!e)return false;if(typeof e==="string")e=new H(e,this.loose);for(var r=0;r0){var n=e[t].semver;if(n.major===r.major&&n.minor===r.minor&&n.patch===r.patch)return true}}return false}return true}e.satisfies=xr;function xr(e,r,t){try{r=new cr(r,t)}catch(n){return false}return r.test(e)}e.maxSatisfying=Rr;function Rr(e,r,t){return e.filter(function(e){return xr(e,r,t)}).sort(function(e,r){return er(e,r,t)})[0]||null}e.validRange=Sr;function Sr(e,r){try{return new cr(e,r).range||"*"}catch(t){return null}}e.ltr=Vr;function Vr(e,r,t){return Tr(e,r,"<",t)}e.gtr=Ir;function Ir(e,r,t){return Tr(e,r,">",t)}e.outside=Tr;function Tr(e,r,t,n){e=new H(e,n);r=new cr(r,n);var i,s,a,o,f;switch(t){case">":i=nr;s=fr;a=ir;o=">";f=">=";break;case"<":i=ir;s=or;a=nr;o="<";f="<=";break;default:throw new TypeError('Must provide a hilo val of "<" or ">"')}if(xr(e,r,n)){return false}for(var u=0;u1.2.3', null], + ['~1.2.3', null], + ['<=1.2.3', null], + ['1.2.x', null] + ].forEach(function(tuple) { + var range = tuple[0]; + var version = tuple[1]; + var msg = 'clean(' + range + ') = ' + version; + t.equal(clean(range), version, msg); + }); + t.end(); +}); diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/test/gtr.js nodejs-0.11.15/deps/npm/node_modules/semver/test/gtr.js --- nodejs-0.11.13/deps/npm/node_modules/semver/test/gtr.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/test/gtr.js 2015-01-20 21:22:17.000000000 +0000 @@ -39,7 +39,7 @@ ['~v0.5.4-pre', '0.6.1-pre'], ['=0.7.x', '0.8.0'], ['=0.7.x', '0.8.0-asdf'], - ['<=0.7.x', '0.7.0'], + ['<0.7.x', '0.7.0'], ['~1.2.2', '1.3.0'], ['1.0.0 - 2.0.0', '2.2.3'], ['1.0.0', '1.0.1'], @@ -66,7 +66,7 @@ ['<1', '1.0.0beta', true], ['< 1', '1.0.0beta', true], ['=0.7.x', '0.8.2'], - ['<=0.7.x', '0.7.2'] + ['<0.7.x', '0.7.2'] ].forEach(function(tuple) { var range = tuple[0]; var version = tuple[1]; diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/test/index.js nodejs-0.11.15/deps/npm/node_modules/semver/test/index.js --- nodejs-0.11.13/deps/npm/node_modules/semver/test/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/test/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,3 +1,5 @@ +'use strict'; + var tap = require('tap'); var test = tap.test; var semver = require('../semver.js'); @@ -130,6 +132,15 @@ // [range, version] // version should be included by range [['1.0.0 - 2.0.0', '1.2.3'], + ['^1.2.3+build', '1.2.3'], + ['^1.2.3+build', '1.3.0'], + ['1.2.3-pre+asdf - 2.4.3-pre+asdf', '1.2.3'], + ['1.2.3pre+asdf - 2.4.3-pre+asdf', '1.2.3', true], + ['1.2.3-pre+asdf - 2.4.3pre+asdf', '1.2.3', true], + ['1.2.3pre+asdf - 2.4.3pre+asdf', '1.2.3', true], + ['1.2.3-pre+asdf - 2.4.3-pre+asdf', '1.2.3-pre.2'], + ['1.2.3-pre+asdf - 2.4.3-pre+asdf', '2.4.3-alpha'], + ['1.2.3+asdf - 2.4.3+asdf', '1.2.3'], ['1.0.0', '1.0.0'], ['>=*', '0.2.4'], ['', '1.0.0'], @@ -187,13 +198,11 @@ ['>= 1', '1.0.0'], ['<1.2', '1.1.1'], ['< 1.2', '1.1.1'], - ['1', '1.0.0beta', true], ['~v0.5.4-pre', '0.5.5'], ['~v0.5.4-pre', '0.5.4'], ['=0.7.x', '0.7.2'], + ['<=0.7.x', '0.7.2'], ['>=0.7.x', '0.7.2'], - ['=0.7.x', '0.7.0-asdf'], - ['>=0.7.x', '0.7.0-asdf'], ['<=0.7.x', '0.6.2'], ['~1.2.1 >=1.2.3', '1.2.3'], ['~1.2.1 =1.2.3', '1.2.3'], @@ -205,17 +214,15 @@ ['1.2.3 >=1.2.1', '1.2.3'], ['>=1.2.3 >=1.2.1', '1.2.3'], ['>=1.2.1 >=1.2.3', '1.2.3'], - ['<=1.2.3', '1.2.3-beta'], - ['>1.2', '1.3.0-beta'], ['>=1.2', '1.2.8'], ['^1.2.3', '1.8.1'], - ['^1.2.3', '1.2.3-beta'], ['^0.1.2', '0.1.2'], ['^0.1', '0.1.2'], ['^1.2', '1.4.2'], ['^1.2 ^1', '1.4.2'], - ['^1.2', '1.2.0-pre'], - ['^1.2.3', '1.2.3-pre'] + ['^1.2.3-alpha', '1.2.3-pre'], + ['^1.2.0-alpha', '1.2.0-pre'], + ['^0.0.1-alpha', '0.0.1-beta'] ].forEach(function(v) { var range = v[0]; var ver = v[1]; @@ -229,6 +236,20 @@ // [range, version] // version should not be included by range [['1.0.0 - 2.0.0', '2.2.3'], + ['1.2.3+asdf - 2.4.3+asdf', '1.2.3-pre.2'], + ['1.2.3+asdf - 2.4.3+asdf', '2.4.3-alpha'], + ['^1.2.3+build', '2.0.0'], + ['^1.2.3+build', '1.2.0'], + ['^1.2.3', '1.2.3-pre'], + ['^1.2', '1.2.0-pre'], + ['>1.2', '1.3.0-beta'], + ['<=1.2.3', '1.2.3-beta'], + ['^1.2.3', '1.2.3-beta'], + ['=0.7.x', '0.7.0-asdf'], + ['>=0.7.x', '0.7.0-asdf'], + ['1', '1.0.0beta', true], + ['<1', '1.0.0beta', true], + ['< 1', '1.0.0beta', true], ['1.0.0', '1.0.1'], ['>=1.0.0', '0.0.0'], ['>=1.0.0', '0.0.1'], @@ -268,11 +289,9 @@ ['>=1.2', '1.1.1'], ['1', '2.0.0beta', true], ['~v0.5.4-beta', '0.5.4-alpha'], - ['<1', '1.0.0beta', true], - ['< 1', '1.0.0beta', true], ['=0.7.x', '0.8.2'], ['>=0.7.x', '0.6.2'], - ['<=0.7.x', '0.7.2'], + ['<0.7.x', '0.7.2'], ['<1.2.3', '1.2.3-beta'], ['=1.2.3', '1.2.3-beta'], ['>1.2', '1.2.8'], @@ -294,16 +313,23 @@ }); test('\nincrement versions test', function(t) { - // [version, inc, result] - // inc(version, inc) -> result +// [version, inc, result, identifier] +// inc(version, inc) -> result [['1.2.3', 'major', '2.0.0'], ['1.2.3', 'minor', '1.3.0'], ['1.2.3', 'patch', '1.2.4'], ['1.2.3tag', 'major', '2.0.0', true], ['1.2.3-tag', 'major', '2.0.0'], ['1.2.3', 'fake', null], + ['1.2.0-0', 'patch', '1.2.0'], ['fake', 'major', null], - ['1.2.3', 'prerelease', '1.2.3-0'], + ['1.2.3-4', 'major', '2.0.0'], + ['1.2.3-4', 'minor', '1.3.0'], + ['1.2.3-4', 'patch', '1.2.3'], + ['1.2.3-alpha.0.beta', 'major', '2.0.0'], + ['1.2.3-alpha.0.beta', 'minor', '1.3.0'], + ['1.2.3-alpha.0.beta', 'patch', '1.2.3'], + ['1.2.4', 'prerelease', '1.2.5-0'], ['1.2.3-0', 'prerelease', '1.2.3-1'], ['1.2.3-alpha.0', 'prerelease', '1.2.3-alpha.1'], ['1.2.3-alpha.1', 'prerelease', '1.2.3-alpha.2'], @@ -319,14 +345,67 @@ ['1.2.3-alpha.10.beta.2', 'prerelease', '1.2.3-alpha.10.beta.3'], ['1.2.3-alpha.9.beta', 'prerelease', '1.2.3-alpha.10.beta'], ['1.2.3-alpha.10.beta', 'prerelease', '1.2.3-alpha.11.beta'], - ['1.2.3-alpha.11.beta', 'prerelease', '1.2.3-alpha.12.beta'] + ['1.2.3-alpha.11.beta', 'prerelease', '1.2.3-alpha.12.beta'], + ['1.2.0', 'prepatch', '1.2.1-0'], + ['1.2.0-1', 'prepatch', '1.2.1-0'], + ['1.2.0', 'preminor', '1.3.0-0'], + ['1.2.3-1', 'preminor', '1.3.0-0'], + ['1.2.0', 'premajor', '2.0.0-0'], + ['1.2.3-1', 'premajor', '2.0.0-0'], + ['1.2.0-1', 'minor', '1.2.0'], + ['1.0.0-1', 'major', '1.0.0'], + + ['1.2.3', 'major', '2.0.0', false, 'dev'], + ['1.2.3', 'minor', '1.3.0', false, 'dev'], + ['1.2.3', 'patch', '1.2.4', false, 'dev'], + ['1.2.3tag', 'major', '2.0.0', true, 'dev'], + ['1.2.3-tag', 'major', '2.0.0', false, 'dev'], + ['1.2.3', 'fake', null, false, 'dev'], + ['1.2.0-0', 'patch', '1.2.0', false, 'dev'], + ['fake', 'major', null, false, 'dev'], + ['1.2.3-4', 'major', '2.0.0', false, 'dev'], + ['1.2.3-4', 'minor', '1.3.0', false, 'dev'], + ['1.2.3-4', 'patch', '1.2.3', false, 'dev'], + ['1.2.3-alpha.0.beta', 'major', '2.0.0', false, 'dev'], + ['1.2.3-alpha.0.beta', 'minor', '1.3.0', false, 'dev'], + ['1.2.3-alpha.0.beta', 'patch', '1.2.3', false, 'dev'], + ['1.2.4', 'prerelease', '1.2.5-dev.0', false, 'dev'], + ['1.2.3-0', 'prerelease', '1.2.3-dev.0', false, 'dev'], + ['1.2.3-alpha.0', 'prerelease', '1.2.3-dev.0', false, 'dev'], + ['1.2.3-alpha.0', 'prerelease', '1.2.3-alpha.1', false, 'alpha'], + ['1.2.3-alpha.0.beta', 'prerelease', '1.2.3-dev.0', false, 'dev'], + ['1.2.3-alpha.0.beta', 'prerelease', '1.2.3-alpha.1.beta', false, 'alpha'], + ['1.2.3-alpha.10.0.beta', 'prerelease', '1.2.3-dev.0', false, 'dev'], + ['1.2.3-alpha.10.0.beta', 'prerelease', '1.2.3-alpha.10.1.beta', false, 'alpha'], + ['1.2.3-alpha.10.1.beta', 'prerelease', '1.2.3-alpha.10.2.beta', false, 'alpha'], + ['1.2.3-alpha.10.2.beta', 'prerelease', '1.2.3-alpha.10.3.beta', false, 'alpha'], + ['1.2.3-alpha.10.beta.0', 'prerelease', '1.2.3-dev.0', false, 'dev'], + ['1.2.3-alpha.10.beta.0', 'prerelease', '1.2.3-alpha.10.beta.1', false, 'alpha'], + ['1.2.3-alpha.10.beta.1', 'prerelease', '1.2.3-alpha.10.beta.2', false, 'alpha'], + ['1.2.3-alpha.10.beta.2', 'prerelease', '1.2.3-alpha.10.beta.3', false, 'alpha'], + ['1.2.3-alpha.9.beta', 'prerelease', '1.2.3-dev.0', false, 'dev'], + ['1.2.3-alpha.9.beta', 'prerelease', '1.2.3-alpha.10.beta', false, 'alpha'], + ['1.2.3-alpha.10.beta', 'prerelease', '1.2.3-alpha.11.beta', false, 'alpha'], + ['1.2.3-alpha.11.beta', 'prerelease', '1.2.3-alpha.12.beta', false, 'alpha'], + ['1.2.0', 'prepatch', '1.2.1-dev.0', 'dev'], + ['1.2.0-1', 'prepatch', '1.2.1-dev.0', 'dev'], + ['1.2.0', 'preminor', '1.3.0-dev.0', 'dev'], + ['1.2.3-1', 'preminor', '1.3.0-dev.0', 'dev'], + ['1.2.0', 'premajor', '2.0.0-dev.0', 'dev'], + ['1.2.3-1', 'premajor', '2.0.0-dev.0', 'dev'], + ['1.2.0-1', 'minor', '1.2.0', 'dev'], + ['1.0.0-1', 'major', '1.0.0', 'dev'], + ['1.2.3-dev.bar', 'prerelease', '1.2.3-dev.0', false, 'dev'] + ].forEach(function(v) { var pre = v[0]; var what = v[1]; var wanted = v[2]; var loose = v[3]; - var found = inc(pre, what, loose); - t.equal(found, wanted, 'inc(' + pre + ', ' + what + ') === ' + wanted); + var id = v[4]; + var found = inc(pre, what, loose, id); + var cmd = 'inc(' + pre + ', ' + what + ', ' + id + ')'; + t.equal(found, wanted, cmd + ' === ' + wanted); }); t.end(); @@ -338,18 +417,18 @@ // translate ranges into their canonical form [['1.0.0 - 2.0.0', '>=1.0.0 <=2.0.0'], ['1.0.0', '1.0.0'], - ['>=*', '>=0.0.0-0'], + ['>=*', '*'], ['', '*'], ['*', '*'], ['*', '*'], ['>=1.0.0', '>=1.0.0'], ['>1.0.0', '>1.0.0'], ['<=2.0.0', '<=2.0.0'], - ['1', '>=1.0.0-0 <2.0.0-0'], + ['1', '>=1.0.0 <2.0.0'], ['<=2.0.0', '<=2.0.0'], ['<=2.0.0', '<=2.0.0'], - ['<2.0.0', '<2.0.0-0'], - ['<2.0.0', '<2.0.0-0'], + ['<2.0.0', '<2.0.0'], + ['<2.0.0', '<2.0.0'], ['>= 1.0.0', '>=1.0.0'], ['>= 1.0.0', '>=1.0.0'], ['>= 1.0.0', '>=1.0.0'], @@ -358,56 +437,56 @@ ['<= 2.0.0', '<=2.0.0'], ['<= 2.0.0', '<=2.0.0'], ['<= 2.0.0', '<=2.0.0'], - ['< 2.0.0', '<2.0.0-0'], - ['< 2.0.0', '<2.0.0-0'], + ['< 2.0.0', '<2.0.0'], + ['< 2.0.0', '<2.0.0'], ['>=0.1.97', '>=0.1.97'], ['>=0.1.97', '>=0.1.97'], ['0.1.20 || 1.2.4', '0.1.20||1.2.4'], - ['>=0.2.3 || <0.0.1', '>=0.2.3||<0.0.1-0'], - ['>=0.2.3 || <0.0.1', '>=0.2.3||<0.0.1-0'], - ['>=0.2.3 || <0.0.1', '>=0.2.3||<0.0.1-0'], + ['>=0.2.3 || <0.0.1', '>=0.2.3||<0.0.1'], + ['>=0.2.3 || <0.0.1', '>=0.2.3||<0.0.1'], + ['>=0.2.3 || <0.0.1', '>=0.2.3||<0.0.1'], ['||', '||'], - ['2.x.x', '>=2.0.0-0 <3.0.0-0'], - ['1.2.x', '>=1.2.0-0 <1.3.0-0'], - ['1.2.x || 2.x', '>=1.2.0-0 <1.3.0-0||>=2.0.0-0 <3.0.0-0'], - ['1.2.x || 2.x', '>=1.2.0-0 <1.3.0-0||>=2.0.0-0 <3.0.0-0'], + ['2.x.x', '>=2.0.0 <3.0.0'], + ['1.2.x', '>=1.2.0 <1.3.0'], + ['1.2.x || 2.x', '>=1.2.0 <1.3.0||>=2.0.0 <3.0.0'], + ['1.2.x || 2.x', '>=1.2.0 <1.3.0||>=2.0.0 <3.0.0'], ['x', '*'], - ['2.*.*', '>=2.0.0-0 <3.0.0-0'], - ['1.2.*', '>=1.2.0-0 <1.3.0-0'], - ['1.2.* || 2.*', '>=1.2.0-0 <1.3.0-0||>=2.0.0-0 <3.0.0-0'], + ['2.*.*', '>=2.0.0 <3.0.0'], + ['1.2.*', '>=1.2.0 <1.3.0'], + ['1.2.* || 2.*', '>=1.2.0 <1.3.0||>=2.0.0 <3.0.0'], ['*', '*'], - ['2', '>=2.0.0-0 <3.0.0-0'], - ['2.3', '>=2.3.0-0 <2.4.0-0'], - ['~2.4', '>=2.4.0-0 <2.5.0-0'], - ['~2.4', '>=2.4.0-0 <2.5.0-0'], - ['~>3.2.1', '>=3.2.1-0 <3.3.0-0'], - ['~1', '>=1.0.0-0 <2.0.0-0'], - ['~>1', '>=1.0.0-0 <2.0.0-0'], - ['~> 1', '>=1.0.0-0 <2.0.0-0'], - ['~1.0', '>=1.0.0-0 <1.1.0-0'], - ['~ 1.0', '>=1.0.0-0 <1.1.0-0'], - ['^0', '>=0.0.0-0 <1.0.0-0'], - ['^ 1', '>=1.0.0-0 <2.0.0-0'], - ['^0.1', '>=0.1.0-0 <0.2.0-0'], - ['^1.0', '>=1.0.0-0 <2.0.0-0'], - ['^1.2', '>=1.2.0-0 <2.0.0-0'], - ['^0.0.1', '=0.0.1'], - ['^0.0.1-beta', '=0.0.1-beta'], - ['^0.1.2', '>=0.1.2-0 <0.2.0-0'], - ['^1.2.3', '>=1.2.3-0 <2.0.0-0'], - ['^1.2.3-beta.4', '>=1.2.3-beta.4 <2.0.0-0'], - ['<1', '<1.0.0-0'], - ['< 1', '<1.0.0-0'], - ['>=1', '>=1.0.0-0'], - ['>= 1', '>=1.0.0-0'], - ['<1.2', '<1.2.0-0'], - ['< 1.2', '<1.2.0-0'], - ['1', '>=1.0.0-0 <2.0.0-0'], + ['2', '>=2.0.0 <3.0.0'], + ['2.3', '>=2.3.0 <2.4.0'], + ['~2.4', '>=2.4.0 <2.5.0'], + ['~2.4', '>=2.4.0 <2.5.0'], + ['~>3.2.1', '>=3.2.1 <3.3.0'], + ['~1', '>=1.0.0 <2.0.0'], + ['~>1', '>=1.0.0 <2.0.0'], + ['~> 1', '>=1.0.0 <2.0.0'], + ['~1.0', '>=1.0.0 <1.1.0'], + ['~ 1.0', '>=1.0.0 <1.1.0'], + ['^0', '>=0.0.0 <1.0.0'], + ['^ 1', '>=1.0.0 <2.0.0'], + ['^0.1', '>=0.1.0 <0.2.0'], + ['^1.0', '>=1.0.0 <2.0.0'], + ['^1.2', '>=1.2.0 <2.0.0'], + ['^0.0.1', '>=0.0.1 <0.0.2'], + ['^0.0.1-beta', '>=0.0.1-beta <0.0.2'], + ['^0.1.2', '>=0.1.2 <0.2.0'], + ['^1.2.3', '>=1.2.3 <2.0.0'], + ['^1.2.3-beta.4', '>=1.2.3-beta.4 <2.0.0'], + ['<1', '<1.0.0'], + ['< 1', '<1.0.0'], + ['>=1', '>=1.0.0'], + ['>= 1', '>=1.0.0'], + ['<1.2', '<1.2.0'], + ['< 1.2', '<1.2.0'], + ['1', '>=1.0.0 <2.0.0'], ['>01.02.03', '>1.2.3', true], ['>01.02.03', null], - ['~1.2.3beta', '>=1.2.3-beta <1.3.0-0', true], + ['~1.2.3beta', '>=1.2.3-beta <1.3.0', true], ['~1.2.3beta', null], - ['^ 1.2 ^ 1', '>=1.2.0-0 <2.0.0-0 >=1.0.0-0 <2.0.0-0'] + ['^ 1.2 ^ 1', '>=1.2.0 <2.0.0 >=1.0.0 <2.0.0'] ].forEach(function(v) { var pre = v[0]; var wanted = v[1]; @@ -425,7 +504,7 @@ // turn range into a set of individual comparators [['1.0.0 - 2.0.0', [['>=1.0.0', '<=2.0.0']]], ['1.0.0', [['1.0.0']]], - ['>=*', [['>=0.0.0-0']]], + ['>=*', [['']]], ['', [['']]], ['*', [['']]], ['*', [['']]], @@ -435,11 +514,11 @@ ['>1.0.0', [['>1.0.0']]], ['>1.0.0', [['>1.0.0']]], ['<=2.0.0', [['<=2.0.0']]], - ['1', [['>=1.0.0-0', '<2.0.0-0']]], + ['1', [['>=1.0.0', '<2.0.0']]], ['<=2.0.0', [['<=2.0.0']]], ['<=2.0.0', [['<=2.0.0']]], - ['<2.0.0', [['<2.0.0-0']]], - ['<2.0.0', [['<2.0.0-0']]], + ['<2.0.0', [['<2.0.0']]], + ['<2.0.0', [['<2.0.0']]], ['>= 1.0.0', [['>=1.0.0']]], ['>= 1.0.0', [['>=1.0.0']]], ['>= 1.0.0', [['>=1.0.0']]], @@ -448,47 +527,50 @@ ['<= 2.0.0', [['<=2.0.0']]], ['<= 2.0.0', [['<=2.0.0']]], ['<= 2.0.0', [['<=2.0.0']]], - ['< 2.0.0', [['<2.0.0-0']]], - ['<\t2.0.0', [['<2.0.0-0']]], + ['< 2.0.0', [['<2.0.0']]], + ['<\t2.0.0', [['<2.0.0']]], ['>=0.1.97', [['>=0.1.97']]], ['>=0.1.97', [['>=0.1.97']]], ['0.1.20 || 1.2.4', [['0.1.20'], ['1.2.4']]], - ['>=0.2.3 || <0.0.1', [['>=0.2.3'], ['<0.0.1-0']]], - ['>=0.2.3 || <0.0.1', [['>=0.2.3'], ['<0.0.1-0']]], - ['>=0.2.3 || <0.0.1', [['>=0.2.3'], ['<0.0.1-0']]], + ['>=0.2.3 || <0.0.1', [['>=0.2.3'], ['<0.0.1']]], + ['>=0.2.3 || <0.0.1', [['>=0.2.3'], ['<0.0.1']]], + ['>=0.2.3 || <0.0.1', [['>=0.2.3'], ['<0.0.1']]], ['||', [[''], ['']]], - ['2.x.x', [['>=2.0.0-0', '<3.0.0-0']]], - ['1.2.x', [['>=1.2.0-0', '<1.3.0-0']]], - ['1.2.x || 2.x', [['>=1.2.0-0', '<1.3.0-0'], ['>=2.0.0-0', '<3.0.0-0']]], - ['1.2.x || 2.x', [['>=1.2.0-0', '<1.3.0-0'], ['>=2.0.0-0', '<3.0.0-0']]], + ['2.x.x', [['>=2.0.0', '<3.0.0']]], + ['1.2.x', [['>=1.2.0', '<1.3.0']]], + ['1.2.x || 2.x', [['>=1.2.0', '<1.3.0'], ['>=2.0.0', '<3.0.0']]], + ['1.2.x || 2.x', [['>=1.2.0', '<1.3.0'], ['>=2.0.0', '<3.0.0']]], ['x', [['']]], - ['2.*.*', [['>=2.0.0-0', '<3.0.0-0']]], - ['1.2.*', [['>=1.2.0-0', '<1.3.0-0']]], - ['1.2.* || 2.*', [['>=1.2.0-0', '<1.3.0-0'], ['>=2.0.0-0', '<3.0.0-0']]], - ['1.2.* || 2.*', [['>=1.2.0-0', '<1.3.0-0'], ['>=2.0.0-0', '<3.0.0-0']]], + ['2.*.*', [['>=2.0.0', '<3.0.0']]], + ['1.2.*', [['>=1.2.0', '<1.3.0']]], + ['1.2.* || 2.*', [['>=1.2.0', '<1.3.0'], ['>=2.0.0', '<3.0.0']]], + ['1.2.* || 2.*', [['>=1.2.0', '<1.3.0'], ['>=2.0.0', '<3.0.0']]], ['*', [['']]], - ['2', [['>=2.0.0-0', '<3.0.0-0']]], - ['2.3', [['>=2.3.0-0', '<2.4.0-0']]], - ['~2.4', [['>=2.4.0-0', '<2.5.0-0']]], - ['~2.4', [['>=2.4.0-0', '<2.5.0-0']]], - ['~>3.2.1', [['>=3.2.1-0', '<3.3.0-0']]], - ['~1', [['>=1.0.0-0', '<2.0.0-0']]], - ['~>1', [['>=1.0.0-0', '<2.0.0-0']]], - ['~> 1', [['>=1.0.0-0', '<2.0.0-0']]], - ['~1.0', [['>=1.0.0-0', '<1.1.0-0']]], - ['~ 1.0', [['>=1.0.0-0', '<1.1.0-0']]], - ['~ 1.0.3', [['>=1.0.3-0', '<1.1.0-0']]], - ['~> 1.0.3', [['>=1.0.3-0', '<1.1.0-0']]], - ['<1', [['<1.0.0-0']]], - ['< 1', [['<1.0.0-0']]], - ['>=1', [['>=1.0.0-0']]], - ['>= 1', [['>=1.0.0-0']]], - ['<1.2', [['<1.2.0-0']]], - ['< 1.2', [['<1.2.0-0']]], - ['1', [['>=1.0.0-0', '<2.0.0-0']]], - ['1 2', [['>=1.0.0-0', '<2.0.0-0', '>=2.0.0-0', '<3.0.0-0']]], - ['1.2 - 3.4.5', [['>=1.2.0-0', '<=3.4.5']]], - ['1.2.3 - 3.4', [['>=1.2.3', '<3.5.0-0']]] + ['2', [['>=2.0.0', '<3.0.0']]], + ['2.3', [['>=2.3.0', '<2.4.0']]], + ['~2.4', [['>=2.4.0', '<2.5.0']]], + ['~2.4', [['>=2.4.0', '<2.5.0']]], + ['~>3.2.1', [['>=3.2.1', '<3.3.0']]], + ['~1', [['>=1.0.0', '<2.0.0']]], + ['~>1', [['>=1.0.0', '<2.0.0']]], + ['~> 1', [['>=1.0.0', '<2.0.0']]], + ['~1.0', [['>=1.0.0', '<1.1.0']]], + ['~ 1.0', [['>=1.0.0', '<1.1.0']]], + ['~ 1.0.3', [['>=1.0.3', '<1.1.0']]], + ['~> 1.0.3', [['>=1.0.3', '<1.1.0']]], + ['<1', [['<1.0.0']]], + ['< 1', [['<1.0.0']]], + ['>=1', [['>=1.0.0']]], + ['>= 1', [['>=1.0.0']]], + ['<1.2', [['<1.2.0']]], + ['< 1.2', [['<1.2.0']]], + ['1', [['>=1.0.0', '<2.0.0']]], + ['1 2', [['>=1.0.0', '<2.0.0', '>=2.0.0', '<3.0.0']]], + ['1.2 - 3.4.5', [['>=1.2.0', '<=3.4.5']]], + ['1.2.3 - 3.4', [['>=1.2.3', '<3.5.0']]], + ['1.2.3 - 3', [['>=1.2.3', '<4.0.0']]], + ['>*', [['<0.0.0']]], + ['<*', [['<0.0.0']]] ].forEach(function(v) { var pre = v[0]; var wanted = v[1]; @@ -500,6 +582,21 @@ t.end(); }); +test('\ninvalid version numbers', function(t) { + ['1.2.3.4', + 'NOT VALID', + 1.2, + null, + 'Infinity.NaN.Infinity' + ].forEach(function(v) { + t.throws(function() { + new SemVer(v); + }, {name:'TypeError', message:'Invalid Version: ' + v}); + }); + + t.end(); +}); + test('\nstrict vs loose version numbers', function(t) { [['=1.2.3', '1.2.3'], ['01.02.03', '1.2.3'], @@ -527,7 +624,7 @@ test('\nstrict vs loose ranges', function(t) { [['>=01.02.03', '>=1.2.3'], - ['~1.02.03beta', '>=1.2.3-beta <1.3.0-0'] + ['~1.02.03beta', '>=1.2.3-beta <1.3.0'] ].forEach(function(v) { var loose = v[0]; var comps = v[1]; diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/test/ltr.js nodejs-0.11.15/deps/npm/node_modules/semver/test/ltr.js --- nodejs-0.11.13/deps/npm/node_modules/semver/test/ltr.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/test/ltr.js 2015-01-20 21:22:17.000000000 +0000 @@ -66,6 +66,10 @@ ['>1', '1.0.0beta', true], ['> 1', '1.0.0beta', true], ['=0.7.x', '0.6.2'], + ['=0.7.x', '0.7.0-asdf'], + ['^1', '1.0.0-0'], + ['>=0.7.x', '0.7.0-asdf'], + ['1', '1.0.0beta', true], ['>=0.7.x', '0.6.2'] ].forEach(function(tuple) { var range = tuple[0]; @@ -145,24 +149,27 @@ ['>= 1', '1.0.0'], ['<1.2', '1.1.1'], ['< 1.2', '1.1.1'], - ['1', '1.0.0beta', true], ['~v0.5.4-pre', '0.5.5'], ['~v0.5.4-pre', '0.5.4'], ['=0.7.x', '0.7.2'], ['>=0.7.x', '0.7.2'], - ['=0.7.x', '0.7.0-asdf'], - ['>=0.7.x', '0.7.0-asdf'], ['<=0.7.x', '0.6.2'], ['>0.2.3 >0.2.4 <=0.2.5', '0.2.5'], ['>=0.2.3 <=0.2.4', '0.2.4'], ['1.0.0 - 2.0.0', '2.0.0'], - ['^1', '1.0.0-0'], ['^3.0.0', '4.0.0'], ['^1.0.0 || ~2.0.1', '2.0.0'], ['^0.1.0 || ~3.0.1 || 5.0.0', '3.2.0'], ['^0.1.0 || ~3.0.1 || 5.0.0', '1.0.0beta', true], ['^0.1.0 || ~3.0.1 || 5.0.0', '5.0.0-0', true], - ['^0.1.0 || ~3.0.1 || >4 <=5.0.0', '3.5.0'] + ['^0.1.0 || ~3.0.1 || >4 <=5.0.0', '3.5.0'], + ['^1.0.0alpha', '1.0.0beta', true], + ['~1.0.0alpha', '1.0.0beta', true], + ['^1.0.0-alpha', '1.0.0beta', true], + ['~1.0.0-alpha', '1.0.0beta', true], + ['^1.0.0-alpha', '1.0.0-beta'], + ['~1.0.0-alpha', '1.0.0-beta'], + ['=0.1.0', '1.0.0'] ].forEach(function(tuple) { var range = tuple[0]; var version = tuple[1]; diff -Nru nodejs-0.11.13/deps/npm/node_modules/semver/test/no-module.js nodejs-0.11.15/deps/npm/node_modules/semver/test/no-module.js --- nodejs-0.11.13/deps/npm/node_modules/semver/test/no-module.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/semver/test/no-module.js 2015-01-20 21:22:17.000000000 +0000 @@ -4,9 +4,9 @@ test('no module system', function(t) { var fs = require('fs'); var vm = require('vm'); - var head = fs.readFileSync(require.resolve('../head.js'), 'utf8'); + var head = fs.readFileSync(require.resolve('../head.js.txt'), 'utf8'); var src = fs.readFileSync(require.resolve('../'), 'utf8'); - var foot = fs.readFileSync(require.resolve('../foot.js'), 'utf8'); + var foot = fs.readFileSync(require.resolve('../foot.js.txt'), 'utf8'); vm.runInThisContext(head + src + foot, 'semver.js'); // just some basic poking to see if it did some stuff diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/index.js nodejs-0.11.15/deps/npm/node_modules/sha/index.js --- nodejs-0.11.13/deps/npm/node_modules/sha/index.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,120 +1,120 @@ -'use strict' - -var Transform = require('stream').Transform || require('readable-stream').Transform -var crypto = require('crypto') -var fs -try { - fs = require('graceful-fs') -} catch (ex) { - fs = require('fs') -} -try { - process.binding('crypto') -} catch (e) { - var er = new Error( 'crypto binding not found.\n' - + 'Please build node with openssl.\n' - + e.message ) - throw er -} - -exports.check = check -exports.checkSync = checkSync -exports.get = get -exports.getSync = getSync -exports.stream = stream - -function check(file, expected, options, cb) { - if (typeof options === 'function') { - cb = options - options = undefined - } - expected = expected.toLowerCase().trim() - get(file, options, function (er, actual) { - if (er) { - if (er.message) er.message += ' while getting shasum for ' + file - return cb(er) - } - if (actual === expected) return cb(null) - cb(new Error( - 'shasum check failed for ' + file + '\n' - + 'Expected: ' + expected + '\n' - + 'Actual: ' + actual)) - }) -} -function checkSync(file, expected, options) { - expected = expected.toLowerCase().trim() - var actual - try { - actual = getSync(file, options) - } catch (er) { - if (er.message) er.message += ' while getting shasum for ' + file - throw er - } - if (actual !== expected) { - var ex = new Error( - 'shasum check failed for ' + file + '\n' - + 'Expected: ' + expected + '\n' - + 'Actual: ' + actual) - throw ex - } -} - - -function get(file, options, cb) { - if (typeof options === 'function') { - cb = options - options = undefined - } - options = options || {} - var algorithm = options.algorithm || 'sha1' - var hash = crypto.createHash(algorithm) - var source = fs.createReadStream(file) - var errState = null - source - .on('error', function (er) { - if (errState) return - return cb(errState = er) - }) - .on('data', function (chunk) { - if (errState) return - hash.update(chunk) - }) - .on('end', function () { - if (errState) return - var actual = hash.digest("hex").toLowerCase().trim() - cb(null, actual) - }) -} - -function getSync(file, options) { - options = options || {} - var algorithm = options.algorithm || 'sha1' - var hash = crypto.createHash(algorithm) - var source = fs.readFileSync(file) - hash.update(source) - return hash.digest("hex").toLowerCase().trim() -} - -function stream(expected, options) { - expected = expected.toLowerCase().trim() - options = options || {} - var algorithm = options.algorithm || 'sha1' - var hash = crypto.createHash(algorithm) - - var stream = new Transform() - stream._transform = function (chunk, encoding, callback) { - hash.update(chunk) - stream.push(chunk) - callback() - } - stream._flush = function (cb) { - var actual = hash.digest("hex").toLowerCase().trim() - if (actual === expected) return cb(null) - cb(new Error( - 'shasum check failed for:\n' - + ' Expected: ' + expected + '\n' - + ' Actual: ' + actual)) - this.push(null) - } - return stream +'use strict' + +var Transform = require('stream').Transform || require('readable-stream').Transform +var crypto = require('crypto') +var fs +try { + fs = require('graceful-fs') +} catch (ex) { + fs = require('fs') +} +try { + process.binding('crypto') +} catch (e) { + var er = new Error( 'crypto binding not found.\n' + + 'Please build node with openssl.\n' + + e.message ) + throw er +} + +exports.check = check +exports.checkSync = checkSync +exports.get = get +exports.getSync = getSync +exports.stream = stream + +function check(file, expected, options, cb) { + if (typeof options === 'function') { + cb = options + options = undefined + } + expected = expected.toLowerCase().trim() + get(file, options, function (er, actual) { + if (er) { + if (er.message) er.message += ' while getting shasum for ' + file + return cb(er) + } + if (actual === expected) return cb(null) + cb(new Error( + 'shasum check failed for ' + file + '\n' + + 'Expected: ' + expected + '\n' + + 'Actual: ' + actual)) + }) +} +function checkSync(file, expected, options) { + expected = expected.toLowerCase().trim() + var actual + try { + actual = getSync(file, options) + } catch (er) { + if (er.message) er.message += ' while getting shasum for ' + file + throw er + } + if (actual !== expected) { + var ex = new Error( + 'shasum check failed for ' + file + '\n' + + 'Expected: ' + expected + '\n' + + 'Actual: ' + actual) + throw ex + } +} + + +function get(file, options, cb) { + if (typeof options === 'function') { + cb = options + options = undefined + } + options = options || {} + var algorithm = options.algorithm || 'sha1' + var hash = crypto.createHash(algorithm) + var source = fs.createReadStream(file) + var errState = null + source + .on('error', function (er) { + if (errState) return + return cb(errState = er) + }) + .on('data', function (chunk) { + if (errState) return + hash.update(chunk) + }) + .on('end', function () { + if (errState) return + var actual = hash.digest("hex").toLowerCase().trim() + cb(null, actual) + }) +} + +function getSync(file, options) { + options = options || {} + var algorithm = options.algorithm || 'sha1' + var hash = crypto.createHash(algorithm) + var source = fs.readFileSync(file) + hash.update(source) + return hash.digest("hex").toLowerCase().trim() +} + +function stream(expected, options) { + expected = expected.toLowerCase().trim() + options = options || {} + var algorithm = options.algorithm || 'sha1' + var hash = crypto.createHash(algorithm) + + var stream = new Transform() + stream._transform = function (chunk, encoding, callback) { + hash.update(chunk) + stream.push(chunk) + callback() + } + stream._flush = function (cb) { + var actual = hash.digest("hex").toLowerCase().trim() + if (actual === expected) return cb(null) + cb(new Error( + 'shasum check failed for:\n' + + ' Expected: ' + expected + '\n' + + ' Actual: ' + actual)) + this.push(null) + } + return stream } \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/LICENSE nodejs-0.11.15/deps/npm/node_modules/sha/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/sha/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,46 @@ -Copyright (c) 2013 Forbes Lindesay - -The BSD License - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The MIT License (MIT) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +Copyright (c) 2013 Forbes Lindesay + +The BSD License + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/float.patch nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/float.patch --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/float.patch 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/float.patch 2015-01-20 21:22:17.000000000 +0000 @@ -27,37 +27,92 @@ util.inherits(PassThrough, Transform); diff --git a/lib/_stream_readable.js b/lib/_stream_readable.js -index 2259d2e..e6681ee 100644 +index 0c3fe3e..90a8298 100644 --- a/lib/_stream_readable.js +++ b/lib/_stream_readable.js -@@ -23,6 +23,9 @@ module.exports = Readable; +@@ -23,10 +23,34 @@ module.exports = Readable; Readable.ReadableState = ReadableState; var EE = require('events').EventEmitter; +if (!EE.listenerCount) EE.listenerCount = function(emitter, type) { + return emitter.listeners(type).length; +}; ++ ++if (!global.setImmediate) global.setImmediate = function setImmediate(fn) { ++ return setTimeout(fn, 0); ++}; ++if (!global.clearImmediate) global.clearImmediate = function clearImmediate(i) { ++ return clearTimeout(i); ++}; ++ var Stream = require('stream'); var util = require('util'); ++if (!util.isUndefined) { ++ var utilIs = require('core-util-is'); ++ for (var f in utilIs) { ++ util[f] = utilIs[f]; ++ } ++} var StringDecoder; +-var debug = util.debuglog('stream'); ++var debug; ++if (util.debuglog) ++ debug = util.debuglog('stream'); ++else try { ++ debug = require('debuglog')('stream'); ++} catch (er) { ++ debug = function() {}; ++} + + util.inherits(Readable, Stream); + +@@ -380,7 +404,7 @@ function chunkInvalid(state, chunk) { + + + function onEofChunk(stream, state) { +- if (state.decoder && !state.ended) { ++ if (state.decoder && !state.ended && state.decoder.end) { + var chunk = state.decoder.end(); + if (chunk && chunk.length) { + state.buffer.push(chunk); diff --git a/lib/_stream_transform.js b/lib/_stream_transform.js -index e925b4b..f08b05e 100644 +index b1f9fcc..b0caf57 100644 --- a/lib/_stream_transform.js +++ b/lib/_stream_transform.js -@@ -64,7 +64,7 @@ +@@ -64,8 +64,14 @@ module.exports = Transform; -var Duplex = require('_stream_duplex'); +var Duplex = require('./_stream_duplex'); var util = require('util'); ++if (!util.isUndefined) { ++ var utilIs = require('core-util-is'); ++ for (var f in utilIs) { ++ util[f] = utilIs[f]; ++ } ++} util.inherits(Transform, Duplex); + diff --git a/lib/_stream_writable.js b/lib/_stream_writable.js -index a26f711..56ca47d 100644 +index ba2e920..f49288b 100644 --- a/lib/_stream_writable.js +++ b/lib/_stream_writable.js -@@ -109,7 +109,7 @@ function WritableState(options, stream) { +@@ -27,6 +27,12 @@ module.exports = Writable; + Writable.WritableState = WritableState; + + var util = require('util'); ++if (!util.isUndefined) { ++ var utilIs = require('core-util-is'); ++ for (var f in utilIs) { ++ util[f] = utilIs[f]; ++ } ++} + var Stream = require('stream'); + + util.inherits(Writable, Stream); +@@ -119,7 +125,7 @@ function WritableState(options, stream) { function Writable(options) { // Writable ctor is applied to Duplexes, though they're not // instanceof Writable, they're instanceof Readable. @@ -66,3 +121,803 @@ return new Writable(options); this._writableState = new WritableState(options, this); +diff --git a/test/simple/test-stream-big-push.js b/test/simple/test-stream-big-push.js +index e3787e4..8cd2127 100644 +--- a/test/simple/test-stream-big-push.js ++++ b/test/simple/test-stream-big-push.js +@@ -21,7 +21,7 @@ + + var common = require('../common'); + var assert = require('assert'); +-var stream = require('stream'); ++var stream = require('../../'); + var str = 'asdfasdfasdfasdfasdf'; + + var r = new stream.Readable({ +diff --git a/test/simple/test-stream-end-paused.js b/test/simple/test-stream-end-paused.js +index bb73777..d40efc7 100644 +--- a/test/simple/test-stream-end-paused.js ++++ b/test/simple/test-stream-end-paused.js +@@ -25,7 +25,7 @@ var gotEnd = false; + + // Make sure we don't miss the end event for paused 0-length streams + +-var Readable = require('stream').Readable; ++var Readable = require('../../').Readable; + var stream = new Readable(); + var calledRead = false; + stream._read = function() { +diff --git a/test/simple/test-stream-pipe-after-end.js b/test/simple/test-stream-pipe-after-end.js +index b46ee90..0be8366 100644 +--- a/test/simple/test-stream-pipe-after-end.js ++++ b/test/simple/test-stream-pipe-after-end.js +@@ -22,8 +22,8 @@ + var common = require('../common'); + var assert = require('assert'); + +-var Readable = require('_stream_readable'); +-var Writable = require('_stream_writable'); ++var Readable = require('../../lib/_stream_readable'); ++var Writable = require('../../lib/_stream_writable'); + var util = require('util'); + + util.inherits(TestReadable, Readable); +diff --git a/test/simple/test-stream-pipe-cleanup.js b/test/simple/test-stream-pipe-cleanup.js +deleted file mode 100644 +index f689358..0000000 +--- a/test/simple/test-stream-pipe-cleanup.js ++++ /dev/null +@@ -1,122 +0,0 @@ +-// Copyright Joyent, Inc. and other Node contributors. +-// +-// Permission is hereby granted, free of charge, to any person obtaining a +-// copy of this software and associated documentation files (the +-// "Software"), to deal in the Software without restriction, including +-// without limitation the rights to use, copy, modify, merge, publish, +-// distribute, sublicense, and/or sell copies of the Software, and to permit +-// persons to whom the Software is furnished to do so, subject to the +-// following conditions: +-// +-// The above copyright notice and this permission notice shall be included +-// in all copies or substantial portions of the Software. +-// +-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +-// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +-// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +-// USE OR OTHER DEALINGS IN THE SOFTWARE. +- +-// This test asserts that Stream.prototype.pipe does not leave listeners +-// hanging on the source or dest. +- +-var common = require('../common'); +-var stream = require('stream'); +-var assert = require('assert'); +-var util = require('util'); +- +-function Writable() { +- this.writable = true; +- this.endCalls = 0; +- stream.Stream.call(this); +-} +-util.inherits(Writable, stream.Stream); +-Writable.prototype.end = function() { +- this.endCalls++; +-}; +- +-Writable.prototype.destroy = function() { +- this.endCalls++; +-}; +- +-function Readable() { +- this.readable = true; +- stream.Stream.call(this); +-} +-util.inherits(Readable, stream.Stream); +- +-function Duplex() { +- this.readable = true; +- Writable.call(this); +-} +-util.inherits(Duplex, Writable); +- +-var i = 0; +-var limit = 100; +- +-var w = new Writable(); +- +-var r; +- +-for (i = 0; i < limit; i++) { +- r = new Readable(); +- r.pipe(w); +- r.emit('end'); +-} +-assert.equal(0, r.listeners('end').length); +-assert.equal(limit, w.endCalls); +- +-w.endCalls = 0; +- +-for (i = 0; i < limit; i++) { +- r = new Readable(); +- r.pipe(w); +- r.emit('close'); +-} +-assert.equal(0, r.listeners('close').length); +-assert.equal(limit, w.endCalls); +- +-w.endCalls = 0; +- +-r = new Readable(); +- +-for (i = 0; i < limit; i++) { +- w = new Writable(); +- r.pipe(w); +- w.emit('close'); +-} +-assert.equal(0, w.listeners('close').length); +- +-r = new Readable(); +-w = new Writable(); +-var d = new Duplex(); +-r.pipe(d); // pipeline A +-d.pipe(w); // pipeline B +-assert.equal(r.listeners('end').length, 2); // A.onend, A.cleanup +-assert.equal(r.listeners('close').length, 2); // A.onclose, A.cleanup +-assert.equal(d.listeners('end').length, 2); // B.onend, B.cleanup +-assert.equal(d.listeners('close').length, 3); // A.cleanup, B.onclose, B.cleanup +-assert.equal(w.listeners('end').length, 0); +-assert.equal(w.listeners('close').length, 1); // B.cleanup +- +-r.emit('end'); +-assert.equal(d.endCalls, 1); +-assert.equal(w.endCalls, 0); +-assert.equal(r.listeners('end').length, 0); +-assert.equal(r.listeners('close').length, 0); +-assert.equal(d.listeners('end').length, 2); // B.onend, B.cleanup +-assert.equal(d.listeners('close').length, 2); // B.onclose, B.cleanup +-assert.equal(w.listeners('end').length, 0); +-assert.equal(w.listeners('close').length, 1); // B.cleanup +- +-d.emit('end'); +-assert.equal(d.endCalls, 1); +-assert.equal(w.endCalls, 1); +-assert.equal(r.listeners('end').length, 0); +-assert.equal(r.listeners('close').length, 0); +-assert.equal(d.listeners('end').length, 0); +-assert.equal(d.listeners('close').length, 0); +-assert.equal(w.listeners('end').length, 0); +-assert.equal(w.listeners('close').length, 0); +diff --git a/test/simple/test-stream-pipe-error-handling.js b/test/simple/test-stream-pipe-error-handling.js +index c5d724b..c7d6b7d 100644 +--- a/test/simple/test-stream-pipe-error-handling.js ++++ b/test/simple/test-stream-pipe-error-handling.js +@@ -21,7 +21,7 @@ + + var common = require('../common'); + var assert = require('assert'); +-var Stream = require('stream').Stream; ++var Stream = require('../../').Stream; + + (function testErrorListenerCatches() { + var source = new Stream(); +diff --git a/test/simple/test-stream-pipe-event.js b/test/simple/test-stream-pipe-event.js +index cb9d5fe..56f8d61 100644 +--- a/test/simple/test-stream-pipe-event.js ++++ b/test/simple/test-stream-pipe-event.js +@@ -20,7 +20,7 @@ + // USE OR OTHER DEALINGS IN THE SOFTWARE. + + var common = require('../common'); +-var stream = require('stream'); ++var stream = require('../../'); + var assert = require('assert'); + var util = require('util'); + +diff --git a/test/simple/test-stream-push-order.js b/test/simple/test-stream-push-order.js +index f2e6ec2..a5c9bf9 100644 +--- a/test/simple/test-stream-push-order.js ++++ b/test/simple/test-stream-push-order.js +@@ -20,7 +20,7 @@ + // USE OR OTHER DEALINGS IN THE SOFTWARE. + + var common = require('../common.js'); +-var Readable = require('stream').Readable; ++var Readable = require('../../').Readable; + var assert = require('assert'); + + var s = new Readable({ +diff --git a/test/simple/test-stream-push-strings.js b/test/simple/test-stream-push-strings.js +index 06f43dc..1701a9a 100644 +--- a/test/simple/test-stream-push-strings.js ++++ b/test/simple/test-stream-push-strings.js +@@ -22,7 +22,7 @@ + var common = require('../common'); + var assert = require('assert'); + +-var Readable = require('stream').Readable; ++var Readable = require('../../').Readable; + var util = require('util'); + + util.inherits(MyStream, Readable); +diff --git a/test/simple/test-stream-readable-event.js b/test/simple/test-stream-readable-event.js +index ba6a577..a8e6f7b 100644 +--- a/test/simple/test-stream-readable-event.js ++++ b/test/simple/test-stream-readable-event.js +@@ -22,7 +22,7 @@ + var common = require('../common'); + var assert = require('assert'); + +-var Readable = require('stream').Readable; ++var Readable = require('../../').Readable; + + (function first() { + // First test, not reading when the readable is added. +diff --git a/test/simple/test-stream-readable-flow-recursion.js b/test/simple/test-stream-readable-flow-recursion.js +index 2891ad6..11689ba 100644 +--- a/test/simple/test-stream-readable-flow-recursion.js ++++ b/test/simple/test-stream-readable-flow-recursion.js +@@ -27,7 +27,7 @@ var assert = require('assert'); + // more data continuously, but without triggering a nextTick + // warning or RangeError. + +-var Readable = require('stream').Readable; ++var Readable = require('../../').Readable; + + // throw an error if we trigger a nextTick warning. + process.throwDeprecation = true; +diff --git a/test/simple/test-stream-unshift-empty-chunk.js b/test/simple/test-stream-unshift-empty-chunk.js +index 0c96476..7827538 100644 +--- a/test/simple/test-stream-unshift-empty-chunk.js ++++ b/test/simple/test-stream-unshift-empty-chunk.js +@@ -24,7 +24,7 @@ var assert = require('assert'); + + // This test verifies that stream.unshift(Buffer(0)) or + // stream.unshift('') does not set state.reading=false. +-var Readable = require('stream').Readable; ++var Readable = require('../../').Readable; + + var r = new Readable(); + var nChunks = 10; +diff --git a/test/simple/test-stream-unshift-read-race.js b/test/simple/test-stream-unshift-read-race.js +index 83fd9fa..17c18aa 100644 +--- a/test/simple/test-stream-unshift-read-race.js ++++ b/test/simple/test-stream-unshift-read-race.js +@@ -29,7 +29,7 @@ var assert = require('assert'); + // 3. push() after the EOF signaling null is an error. + // 4. _read() is not called after pushing the EOF null chunk. + +-var stream = require('stream'); ++var stream = require('../../'); + var hwm = 10; + var r = stream.Readable({ highWaterMark: hwm }); + var chunks = 10; +@@ -51,7 +51,14 @@ r._read = function(n) { + + function push(fast) { + assert(!pushedNull, 'push() after null push'); +- var c = pos >= data.length ? null : data.slice(pos, pos + n); ++ var c; ++ if (pos >= data.length) ++ c = null; ++ else { ++ if (n + pos > data.length) ++ n = data.length - pos; ++ c = data.slice(pos, pos + n); ++ } + pushedNull = c === null; + if (fast) { + pos += n; +diff --git a/test/simple/test-stream-writev.js b/test/simple/test-stream-writev.js +index 5b49e6e..b5321f3 100644 +--- a/test/simple/test-stream-writev.js ++++ b/test/simple/test-stream-writev.js +@@ -22,7 +22,7 @@ + var common = require('../common'); + var assert = require('assert'); + +-var stream = require('stream'); ++var stream = require('../../'); + + var queue = []; + for (var decode = 0; decode < 2; decode++) { +diff --git a/test/simple/test-stream2-basic.js b/test/simple/test-stream2-basic.js +index 3814bf0..248c1be 100644 +--- a/test/simple/test-stream2-basic.js ++++ b/test/simple/test-stream2-basic.js +@@ -21,7 +21,7 @@ + + + var common = require('../common.js'); +-var R = require('_stream_readable'); ++var R = require('../../lib/_stream_readable'); + var assert = require('assert'); + + var util = require('util'); +diff --git a/test/simple/test-stream2-compatibility.js b/test/simple/test-stream2-compatibility.js +index 6cdd4e9..f0fa84b 100644 +--- a/test/simple/test-stream2-compatibility.js ++++ b/test/simple/test-stream2-compatibility.js +@@ -21,7 +21,7 @@ + + + var common = require('../common.js'); +-var R = require('_stream_readable'); ++var R = require('../../lib/_stream_readable'); + var assert = require('assert'); + + var util = require('util'); +diff --git a/test/simple/test-stream2-finish-pipe.js b/test/simple/test-stream2-finish-pipe.js +index 39b274f..006a19b 100644 +--- a/test/simple/test-stream2-finish-pipe.js ++++ b/test/simple/test-stream2-finish-pipe.js +@@ -20,7 +20,7 @@ + // USE OR OTHER DEALINGS IN THE SOFTWARE. + + var common = require('../common.js'); +-var stream = require('stream'); ++var stream = require('../../'); + var Buffer = require('buffer').Buffer; + + var r = new stream.Readable(); +diff --git a/test/simple/test-stream2-fs.js b/test/simple/test-stream2-fs.js +deleted file mode 100644 +index e162406..0000000 +--- a/test/simple/test-stream2-fs.js ++++ /dev/null +@@ -1,72 +0,0 @@ +-// Copyright Joyent, Inc. and other Node contributors. +-// +-// Permission is hereby granted, free of charge, to any person obtaining a +-// copy of this software and associated documentation files (the +-// "Software"), to deal in the Software without restriction, including +-// without limitation the rights to use, copy, modify, merge, publish, +-// distribute, sublicense, and/or sell copies of the Software, and to permit +-// persons to whom the Software is furnished to do so, subject to the +-// following conditions: +-// +-// The above copyright notice and this permission notice shall be included +-// in all copies or substantial portions of the Software. +-// +-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +-// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +-// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +-// USE OR OTHER DEALINGS IN THE SOFTWARE. +- +- +-var common = require('../common.js'); +-var R = require('_stream_readable'); +-var assert = require('assert'); +- +-var fs = require('fs'); +-var FSReadable = fs.ReadStream; +- +-var path = require('path'); +-var file = path.resolve(common.fixturesDir, 'x1024.txt'); +- +-var size = fs.statSync(file).size; +- +-var expectLengths = [1024]; +- +-var util = require('util'); +-var Stream = require('stream'); +- +-util.inherits(TestWriter, Stream); +- +-function TestWriter() { +- Stream.apply(this); +- this.buffer = []; +- this.length = 0; +-} +- +-TestWriter.prototype.write = function(c) { +- this.buffer.push(c.toString()); +- this.length += c.length; +- return true; +-}; +- +-TestWriter.prototype.end = function(c) { +- if (c) this.buffer.push(c.toString()); +- this.emit('results', this.buffer); +-} +- +-var r = new FSReadable(file); +-var w = new TestWriter(); +- +-w.on('results', function(res) { +- console.error(res, w.length); +- assert.equal(w.length, size); +- var l = 0; +- assert.deepEqual(res.map(function (c) { +- return c.length; +- }), expectLengths); +- console.log('ok'); +-}); +- +-r.pipe(w); +diff --git a/test/simple/test-stream2-httpclient-response-end.js b/test/simple/test-stream2-httpclient-response-end.js +deleted file mode 100644 +index 15cffc2..0000000 +--- a/test/simple/test-stream2-httpclient-response-end.js ++++ /dev/null +@@ -1,52 +0,0 @@ +-// Copyright Joyent, Inc. and other Node contributors. +-// +-// Permission is hereby granted, free of charge, to any person obtaining a +-// copy of this software and associated documentation files (the +-// "Software"), to deal in the Software without restriction, including +-// without limitation the rights to use, copy, modify, merge, publish, +-// distribute, sublicense, and/or sell copies of the Software, and to permit +-// persons to whom the Software is furnished to do so, subject to the +-// following conditions: +-// +-// The above copyright notice and this permission notice shall be included +-// in all copies or substantial portions of the Software. +-// +-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +-// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +-// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +-// USE OR OTHER DEALINGS IN THE SOFTWARE. +- +-var common = require('../common.js'); +-var assert = require('assert'); +-var http = require('http'); +-var msg = 'Hello'; +-var readable_event = false; +-var end_event = false; +-var server = http.createServer(function(req, res) { +- res.writeHead(200, {'Content-Type': 'text/plain'}); +- res.end(msg); +-}).listen(common.PORT, function() { +- http.get({port: common.PORT}, function(res) { +- var data = ''; +- res.on('readable', function() { +- console.log('readable event'); +- readable_event = true; +- data += res.read(); +- }); +- res.on('end', function() { +- console.log('end event'); +- end_event = true; +- assert.strictEqual(msg, data); +- server.close(); +- }); +- }); +-}); +- +-process.on('exit', function() { +- assert(readable_event); +- assert(end_event); +-}); +- +diff --git a/test/simple/test-stream2-large-read-stall.js b/test/simple/test-stream2-large-read-stall.js +index 2fbfbca..667985b 100644 +--- a/test/simple/test-stream2-large-read-stall.js ++++ b/test/simple/test-stream2-large-read-stall.js +@@ -30,7 +30,7 @@ var PUSHSIZE = 20; + var PUSHCOUNT = 1000; + var HWM = 50; + +-var Readable = require('stream').Readable; ++var Readable = require('../../').Readable; + var r = new Readable({ + highWaterMark: HWM + }); +@@ -39,23 +39,23 @@ var rs = r._readableState; + r._read = push; + + r.on('readable', function() { +- console.error('>> readable'); ++ //console.error('>> readable'); + do { +- console.error(' > read(%d)', READSIZE); ++ //console.error(' > read(%d)', READSIZE); + var ret = r.read(READSIZE); +- console.error(' < %j (%d remain)', ret && ret.length, rs.length); ++ //console.error(' < %j (%d remain)', ret && ret.length, rs.length); + } while (ret && ret.length === READSIZE); + +- console.error('<< after read()', +- ret && ret.length, +- rs.needReadable, +- rs.length); ++ //console.error('<< after read()', ++ // ret && ret.length, ++ // rs.needReadable, ++ // rs.length); + }); + + var endEmitted = false; + r.on('end', function() { + endEmitted = true; +- console.error('end'); ++ //console.error('end'); + }); + + var pushes = 0; +@@ -64,11 +64,11 @@ function push() { + return; + + if (pushes++ === PUSHCOUNT) { +- console.error(' push(EOF)'); ++ //console.error(' push(EOF)'); + return r.push(null); + } + +- console.error(' push #%d', pushes); ++ //console.error(' push #%d', pushes); + if (r.push(new Buffer(PUSHSIZE))) + setTimeout(push); + } +diff --git a/test/simple/test-stream2-objects.js b/test/simple/test-stream2-objects.js +index 3e6931d..ff47d89 100644 +--- a/test/simple/test-stream2-objects.js ++++ b/test/simple/test-stream2-objects.js +@@ -21,8 +21,8 @@ + + + var common = require('../common.js'); +-var Readable = require('_stream_readable'); +-var Writable = require('_stream_writable'); ++var Readable = require('../../lib/_stream_readable'); ++var Writable = require('../../lib/_stream_writable'); + var assert = require('assert'); + + // tiny node-tap lookalike. +diff --git a/test/simple/test-stream2-pipe-error-handling.js b/test/simple/test-stream2-pipe-error-handling.js +index cf7531c..e3f3e4e 100644 +--- a/test/simple/test-stream2-pipe-error-handling.js ++++ b/test/simple/test-stream2-pipe-error-handling.js +@@ -21,7 +21,7 @@ + + var common = require('../common'); + var assert = require('assert'); +-var stream = require('stream'); ++var stream = require('../../'); + + (function testErrorListenerCatches() { + var count = 1000; +diff --git a/test/simple/test-stream2-pipe-error-once-listener.js b/test/simple/test-stream2-pipe-error-once-listener.js +index 5e8e3cb..53b2616 100755 +--- a/test/simple/test-stream2-pipe-error-once-listener.js ++++ b/test/simple/test-stream2-pipe-error-once-listener.js +@@ -24,7 +24,7 @@ var common = require('../common.js'); + var assert = require('assert'); + + var util = require('util'); +-var stream = require('stream'); ++var stream = require('../../'); + + + var Read = function() { +diff --git a/test/simple/test-stream2-push.js b/test/simple/test-stream2-push.js +index b63edc3..eb2b0e9 100644 +--- a/test/simple/test-stream2-push.js ++++ b/test/simple/test-stream2-push.js +@@ -20,7 +20,7 @@ + // USE OR OTHER DEALINGS IN THE SOFTWARE. + + var common = require('../common.js'); +-var stream = require('stream'); ++var stream = require('../../'); + var Readable = stream.Readable; + var Writable = stream.Writable; + var assert = require('assert'); +diff --git a/test/simple/test-stream2-read-sync-stack.js b/test/simple/test-stream2-read-sync-stack.js +index e8a7305..9740a47 100644 +--- a/test/simple/test-stream2-read-sync-stack.js ++++ b/test/simple/test-stream2-read-sync-stack.js +@@ -21,7 +21,7 @@ + + var common = require('../common'); + var assert = require('assert'); +-var Readable = require('stream').Readable; ++var Readable = require('../../').Readable; + var r = new Readable(); + var N = 256 * 1024; + +diff --git a/test/simple/test-stream2-readable-empty-buffer-no-eof.js b/test/simple/test-stream2-readable-empty-buffer-no-eof.js +index cd30178..4b1659d 100644 +--- a/test/simple/test-stream2-readable-empty-buffer-no-eof.js ++++ b/test/simple/test-stream2-readable-empty-buffer-no-eof.js +@@ -22,10 +22,9 @@ + var common = require('../common'); + var assert = require('assert'); + +-var Readable = require('stream').Readable; ++var Readable = require('../../').Readable; + + test1(); +-test2(); + + function test1() { + var r = new Readable(); +@@ -88,31 +87,3 @@ function test1() { + console.log('ok'); + }); + } +- +-function test2() { +- var r = new Readable({ encoding: 'base64' }); +- var reads = 5; +- r._read = function(n) { +- if (!reads--) +- return r.push(null); // EOF +- else +- return r.push(new Buffer('x')); +- }; +- +- var results = []; +- function flow() { +- var chunk; +- while (null !== (chunk = r.read())) +- results.push(chunk + ''); +- } +- r.on('readable', flow); +- r.on('end', function() { +- results.push('EOF'); +- }); +- flow(); +- +- process.on('exit', function() { +- assert.deepEqual(results, [ 'eHh4', 'eHg=', 'EOF' ]); +- console.log('ok'); +- }); +-} +diff --git a/test/simple/test-stream2-readable-from-list.js b/test/simple/test-stream2-readable-from-list.js +index 7c96ffe..04a96f5 100644 +--- a/test/simple/test-stream2-readable-from-list.js ++++ b/test/simple/test-stream2-readable-from-list.js +@@ -21,7 +21,7 @@ + + var assert = require('assert'); + var common = require('../common.js'); +-var fromList = require('_stream_readable')._fromList; ++var fromList = require('../../lib/_stream_readable')._fromList; + + // tiny node-tap lookalike. + var tests = []; +diff --git a/test/simple/test-stream2-readable-legacy-drain.js b/test/simple/test-stream2-readable-legacy-drain.js +index 675da8e..51fd3d5 100644 +--- a/test/simple/test-stream2-readable-legacy-drain.js ++++ b/test/simple/test-stream2-readable-legacy-drain.js +@@ -22,7 +22,7 @@ + var common = require('../common'); + var assert = require('assert'); + +-var Stream = require('stream'); ++var Stream = require('../../'); + var Readable = Stream.Readable; + + var r = new Readable(); +diff --git a/test/simple/test-stream2-readable-non-empty-end.js b/test/simple/test-stream2-readable-non-empty-end.js +index 7314ae7..c971898 100644 +--- a/test/simple/test-stream2-readable-non-empty-end.js ++++ b/test/simple/test-stream2-readable-non-empty-end.js +@@ -21,7 +21,7 @@ + + var assert = require('assert'); + var common = require('../common.js'); +-var Readable = require('_stream_readable'); ++var Readable = require('../../lib/_stream_readable'); + + var len = 0; + var chunks = new Array(10); +diff --git a/test/simple/test-stream2-readable-wrap-empty.js b/test/simple/test-stream2-readable-wrap-empty.js +index 2e5cf25..fd8a3dc 100644 +--- a/test/simple/test-stream2-readable-wrap-empty.js ++++ b/test/simple/test-stream2-readable-wrap-empty.js +@@ -22,7 +22,7 @@ + var common = require('../common'); + var assert = require('assert'); + +-var Readable = require('_stream_readable'); ++var Readable = require('../../lib/_stream_readable'); + var EE = require('events').EventEmitter; + + var oldStream = new EE(); +diff --git a/test/simple/test-stream2-readable-wrap.js b/test/simple/test-stream2-readable-wrap.js +index 90eea01..6b177f7 100644 +--- a/test/simple/test-stream2-readable-wrap.js ++++ b/test/simple/test-stream2-readable-wrap.js +@@ -22,8 +22,8 @@ + var common = require('../common'); + var assert = require('assert'); + +-var Readable = require('_stream_readable'); +-var Writable = require('_stream_writable'); ++var Readable = require('../../lib/_stream_readable'); ++var Writable = require('../../lib/_stream_writable'); + var EE = require('events').EventEmitter; + + var testRuns = 0, completedRuns = 0; +diff --git a/test/simple/test-stream2-set-encoding.js b/test/simple/test-stream2-set-encoding.js +index 5d2c32a..685531b 100644 +--- a/test/simple/test-stream2-set-encoding.js ++++ b/test/simple/test-stream2-set-encoding.js +@@ -22,7 +22,7 @@ + + var common = require('../common.js'); + var assert = require('assert'); +-var R = require('_stream_readable'); ++var R = require('../../lib/_stream_readable'); + var util = require('util'); + + // tiny node-tap lookalike. +diff --git a/test/simple/test-stream2-transform.js b/test/simple/test-stream2-transform.js +index 9c9ddd8..a0cacc6 100644 +--- a/test/simple/test-stream2-transform.js ++++ b/test/simple/test-stream2-transform.js +@@ -21,8 +21,8 @@ + + var assert = require('assert'); + var common = require('../common.js'); +-var PassThrough = require('_stream_passthrough'); +-var Transform = require('_stream_transform'); ++var PassThrough = require('../../').PassThrough; ++var Transform = require('../../').Transform; + + // tiny node-tap lookalike. + var tests = []; +diff --git a/test/simple/test-stream2-unpipe-drain.js b/test/simple/test-stream2-unpipe-drain.js +index d66dc3c..365b327 100644 +--- a/test/simple/test-stream2-unpipe-drain.js ++++ b/test/simple/test-stream2-unpipe-drain.js +@@ -22,7 +22,7 @@ + + var common = require('../common.js'); + var assert = require('assert'); +-var stream = require('stream'); ++var stream = require('../../'); + var crypto = require('crypto'); + + var util = require('util'); +diff --git a/test/simple/test-stream2-unpipe-leak.js b/test/simple/test-stream2-unpipe-leak.js +index 99f8746..17c92ae 100644 +--- a/test/simple/test-stream2-unpipe-leak.js ++++ b/test/simple/test-stream2-unpipe-leak.js +@@ -22,7 +22,7 @@ + + var common = require('../common.js'); + var assert = require('assert'); +-var stream = require('stream'); ++var stream = require('../../'); + + var chunk = new Buffer('hallo'); + +diff --git a/test/simple/test-stream2-writable.js b/test/simple/test-stream2-writable.js +index 704100c..209c3a6 100644 +--- a/test/simple/test-stream2-writable.js ++++ b/test/simple/test-stream2-writable.js +@@ -20,8 +20,8 @@ + // USE OR OTHER DEALINGS IN THE SOFTWARE. + + var common = require('../common.js'); +-var W = require('_stream_writable'); +-var D = require('_stream_duplex'); ++var W = require('../../').Writable; ++var D = require('../../').Duplex; + var assert = require('assert'); + + var util = require('util'); +diff --git a/test/simple/test-stream3-pause-then-read.js b/test/simple/test-stream3-pause-then-read.js +index b91bde3..2f72c15 100644 +--- a/test/simple/test-stream3-pause-then-read.js ++++ b/test/simple/test-stream3-pause-then-read.js +@@ -22,7 +22,7 @@ + var common = require('../common'); + var assert = require('assert'); + +-var stream = require('stream'); ++var stream = require('../../'); + var Readable = stream.Readable; + var Writable = stream.Writable; + diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_duplex.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_duplex.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_duplex.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_duplex.js 2015-01-20 21:22:17.000000000 +0000 @@ -25,13 +25,27 @@ // Writable. module.exports = Duplex; -var util = require('util'); + +/**/ +var objectKeys = Object.keys || function (obj) { + var keys = []; + for (var key in obj) keys.push(key); + return keys; +} +/**/ + + +/**/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/**/ + var Readable = require('./_stream_readable'); var Writable = require('./_stream_writable'); util.inherits(Duplex, Readable); -Object.keys(Writable.prototype).forEach(function(method) { +forEach(objectKeys(Writable.prototype), function(method) { if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method]; }); @@ -67,3 +81,9 @@ // But allow more writes to happen in this tick. process.nextTick(this.end.bind(this)); } + +function forEach (xs, f) { + for (var i = 0, l = xs.length; i < l; i++) { + f(xs[i], i); + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_passthrough.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_passthrough.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_passthrough.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_passthrough.js 2015-01-20 21:22:17.000000000 +0000 @@ -26,7 +26,12 @@ module.exports = PassThrough; var Transform = require('./_stream_transform'); -var util = require('util'); + +/**/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/**/ + util.inherits(PassThrough, Transform); function PassThrough(options) { diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_readable.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_readable.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_readable.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_readable.js 2015-01-20 21:22:17.000000000 +0000 @@ -20,25 +20,58 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. module.exports = Readable; + +/**/ +var isArray = require('isarray'); +/**/ + + +/**/ +var Buffer = require('buffer').Buffer; +/**/ + Readable.ReadableState = ReadableState; var EE = require('events').EventEmitter; + +/**/ if (!EE.listenerCount) EE.listenerCount = function(emitter, type) { return emitter.listeners(type).length; }; +/**/ + var Stream = require('stream'); -var util = require('util'); + +/**/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/**/ + var StringDecoder; + +/**/ +var debug = require('util'); +if (debug && debug.debuglog) { + debug = debug.debuglog('stream'); +} else { + debug = function () {}; +} +/**/ + + util.inherits(Readable, Stream); function ReadableState(options, stream) { + var Duplex = require('./_stream_duplex'); + options = options || {}; // the point at which it stops calling _read() to fill the buffer // Note: 0 is a valid value, means "don't call _read preemptively ever" var hwm = options.highWaterMark; - this.highWaterMark = (hwm || hwm === 0) ? hwm : 16 * 1024; + var defaultHwm = options.objectMode ? 16 : 16 * 1024; + this.highWaterMark = (hwm || hwm === 0) ? hwm : defaultHwm; // cast to ints. this.highWaterMark = ~~this.highWaterMark; @@ -47,19 +80,13 @@ this.length = 0; this.pipes = null; this.pipesCount = 0; - this.flowing = false; + this.flowing = null; this.ended = false; this.endEmitted = false; this.reading = false; - // In streams that never have any data, and do push(null) right away, - // the consumer can miss the 'end' event if they do some I/O before - // consuming the stream. So, we don't emit('end') until some reading - // happens. - this.calledRead = false; - // a flag to be able to tell if the onwrite cb is called immediately, - // or on a later tick. We set this to true at first, becuase any + // or on a later tick. We set this to true at first, because any // actions that shouldn't happen until "later" should generally also // not happen before the first write call. this.sync = true; @@ -75,6 +102,9 @@ // make all the buffer merging and length checks go away this.objectMode = !!options.objectMode; + if (stream instanceof Duplex) + this.objectMode = this.objectMode || !!options.readableObjectMode; + // Crypto is kind of old and crusty. Historically, its default string // encoding is 'binary' so we have to make this configurable. // Everything else in the universe uses 'utf8', though. @@ -94,13 +124,15 @@ this.encoding = null; if (options.encoding) { if (!StringDecoder) - StringDecoder = require('string_decoder').StringDecoder; + StringDecoder = require('string_decoder/').StringDecoder; this.decoder = new StringDecoder(options.encoding); this.encoding = options.encoding; } } function Readable(options) { + var Duplex = require('./_stream_duplex'); + if (!(this instanceof Readable)) return new Readable(options); @@ -119,7 +151,7 @@ Readable.prototype.push = function(chunk, encoding) { var state = this._readableState; - if (typeof chunk === 'string' && !state.objectMode) { + if (util.isString(chunk) && !state.objectMode) { encoding = encoding || state.defaultEncoding; if (encoding !== state.encoding) { chunk = new Buffer(chunk, encoding); @@ -140,7 +172,7 @@ var er = chunkInvalid(state, chunk); if (er) { stream.emit('error', er); - } else if (chunk === null || chunk === undefined) { + } else if (util.isNullOrUndefined(chunk)) { state.reading = false; if (!state.ended) onEofChunk(stream, state); @@ -155,17 +187,24 @@ if (state.decoder && !addToFront && !encoding) chunk = state.decoder.write(chunk); - // update the buffer info. - state.length += state.objectMode ? 1 : chunk.length; - if (addToFront) { - state.buffer.unshift(chunk); - } else { + if (!addToFront) state.reading = false; - state.buffer.push(chunk); - } - if (state.needReadable) - emitReadable(stream); + // if we want the data now, just emit it. + if (state.flowing && state.length === 0 && !state.sync) { + stream.emit('data', chunk); + stream.read(0); + } else { + // update the buffer info. + state.length += state.objectMode ? 1 : chunk.length; + if (addToFront) + state.buffer.unshift(chunk); + else + state.buffer.push(chunk); + + if (state.needReadable) + emitReadable(stream); + } maybeReadMore(stream, state); } @@ -195,9 +234,10 @@ // backwards compatibility. Readable.prototype.setEncoding = function(enc) { if (!StringDecoder) - StringDecoder = require('string_decoder').StringDecoder; + StringDecoder = require('string_decoder/').StringDecoder; this._readableState.decoder = new StringDecoder(enc); this._readableState.encoding = enc; + return this; }; // Don't raise the hwm > 128MB @@ -221,7 +261,7 @@ if (state.objectMode) return n === 0 ? 0 : 1; - if (isNaN(n) || n === null) { + if (isNaN(n) || util.isNull(n)) { // only flow one buffer at a time if (state.flowing && state.buffer.length) return state.buffer[0].length; @@ -253,11 +293,11 @@ // you can override either this method, or the async _read(n) below. Readable.prototype.read = function(n) { + debug('read', n); var state = this._readableState; - state.calledRead = true; var nOrig = n; - if (typeof n !== 'number' || n > 0) + if (!util.isNumber(n) || n > 0) state.emittedReadable = false; // if we're doing read(0) to trigger a readable event, but we @@ -266,7 +306,11 @@ if (n === 0 && state.needReadable && (state.length >= state.highWaterMark || state.ended)) { - emitReadable(this); + debug('read: emitReadable', state.length, state.ended); + if (state.length === 0 && state.ended) + endReadable(this); + else + emitReadable(this); return null; } @@ -303,17 +347,23 @@ // if we need a readable event, then we need to do some reading. var doRead = state.needReadable; + debug('need readable', doRead); // if we currently have less than the highWaterMark, then also read some - if (state.length - n <= state.highWaterMark) + if (state.length === 0 || state.length - n < state.highWaterMark) { doRead = true; + debug('length less than watermark', doRead); + } // however, if we've ended, then there's no point, and if we're already // reading, then it's unnecessary. - if (state.ended || state.reading) + if (state.ended || state.reading) { doRead = false; + debug('reading or ended', doRead); + } if (doRead) { + debug('do read'); state.reading = true; state.sync = true; // if the length is currently zero, then we *need* a readable event. @@ -324,9 +374,8 @@ state.sync = false; } - // If _read called its callback synchronously, then `reading` - // will be false, and we need to re-evaluate how much data we - // can return to the user. + // If _read pushed data synchronously, then `reading` will be false, + // and we need to re-evaluate how much data we can return to the user. if (doRead && !state.reading) n = howMuchToRead(nOrig, state); @@ -336,7 +385,7 @@ else ret = null; - if (ret === null) { + if (util.isNull(ret)) { state.needReadable = true; n = 0; } @@ -348,23 +397,22 @@ if (state.length === 0 && !state.ended) state.needReadable = true; - // If we happened to read() exactly the remaining amount in the - // buffer, and the EOF has been seen at this point, then make sure - // that we emit 'end' on the very next tick. - if (state.ended && !state.endEmitted && state.length === 0) + // If we tried to read() past the EOF, then emit end on the next tick. + if (nOrig !== n && state.ended && state.length === 0) endReadable(this); + if (!util.isNull(ret)) + this.emit('data', ret); + return ret; }; function chunkInvalid(state, chunk) { var er = null; - if (!Buffer.isBuffer(chunk) && - 'string' !== typeof chunk && - chunk !== null && - chunk !== undefined && - !state.objectMode && - !er) { + if (!util.isBuffer(chunk) && + !util.isString(chunk) && + !util.isNullOrUndefined(chunk) && + !state.objectMode) { er = new TypeError('Invalid non-string/buffer chunk'); } return er; @@ -372,7 +420,7 @@ function onEofChunk(stream, state) { - if (state.decoder && !state.ended && state.decoder.end) { + if (state.decoder && !state.ended) { var chunk = state.decoder.end(); if (chunk && chunk.length) { state.buffer.push(chunk); @@ -381,12 +429,8 @@ } state.ended = true; - // if we've ended and we have some data left, then emit - // 'readable' now to make sure it gets picked up. - if (state.length > 0) - emitReadable(stream); - else - endReadable(stream); + // emit 'readable' now to make sure it gets picked up. + emitReadable(stream); } // Don't emit readable right away in sync mode, because this can trigger @@ -395,20 +439,22 @@ function emitReadable(stream) { var state = stream._readableState; state.needReadable = false; - if (state.emittedReadable) - return; - - state.emittedReadable = true; - if (state.sync) - process.nextTick(function() { + if (!state.emittedReadable) { + debug('emitReadable', state.flowing); + state.emittedReadable = true; + if (state.sync) + process.nextTick(function() { + emitReadable_(stream); + }); + else emitReadable_(stream); - }); - else - emitReadable_(stream); + } } function emitReadable_(stream) { + debug('emit readable'); stream.emit('readable'); + flow(stream); } @@ -431,6 +477,7 @@ var len = state.length; while (!state.reading && !state.flowing && !state.ended && state.length < state.highWaterMark) { + debug('maybeReadMore read 0'); stream.read(0); if (len === state.length) // didn't get any data, stop spinning. @@ -465,6 +512,7 @@ break; } state.pipesCount += 1; + debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts); var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && @@ -478,11 +526,14 @@ dest.on('unpipe', onunpipe); function onunpipe(readable) { - if (readable !== src) return; - cleanup(); + debug('onunpipe'); + if (readable === src) { + cleanup(); + } } function onend() { + debug('onend'); dest.end(); } @@ -494,6 +545,7 @@ dest.on('drain', ondrain); function cleanup() { + debug('cleanup'); // cleanup event handlers once the pipe is broken dest.removeListener('close', onclose); dest.removeListener('finish', onfinish); @@ -502,19 +554,34 @@ dest.removeListener('unpipe', onunpipe); src.removeListener('end', onend); src.removeListener('end', cleanup); + src.removeListener('data', ondata); // if the reader is waiting for a drain event from this // specific writer, then it would cause it to never start // flowing again. // So, if this is awaiting a drain, then we just call it now. // If we don't know, then assume that we are waiting for one. - if (!dest._writableState || dest._writableState.needDrain) + if (state.awaitDrain && + (!dest._writableState || dest._writableState.needDrain)) ondrain(); } + src.on('data', ondata); + function ondata(chunk) { + debug('ondata'); + var ret = dest.write(chunk); + if (false === ret) { + debug('false write response, pause', + src._readableState.awaitDrain); + src._readableState.awaitDrain++; + src.pause(); + } + } + // if the dest has an error, then stop piping into it. // however, don't suppress the throwing behavior for this. function onerror(er) { + debug('onerror', er); unpipe(); dest.removeListener('error', onerror); if (EE.listenerCount(dest, 'error') === 0) @@ -524,7 +591,7 @@ // is attached before any userland ones. NEVER DO THIS. if (!dest._events || !dest._events.error) dest.on('error', onerror); - else if (Array.isArray(dest._events.error)) + else if (isArray(dest._events.error)) dest._events.error.unshift(onerror); else dest._events.error = [onerror, dest._events.error]; @@ -538,12 +605,14 @@ } dest.once('close', onclose); function onfinish() { + debug('onfinish'); dest.removeListener('close', onclose); unpipe(); } dest.once('finish', onfinish); function unpipe() { + debug('unpipe'); src.unpipe(dest); } @@ -552,16 +621,8 @@ // start the flow if it hasn't been started already. if (!state.flowing) { - // the handler that waits for readable events after all - // the data gets sucked out in flow. - // This would be easier to follow with a .once() handler - // in flow(), but that is too slow. - this.on('readable', pipeOnReadable); - - state.flowing = true; - process.nextTick(function() { - flow(src); - }); + debug('pipe resume'); + src.resume(); } return dest; @@ -569,63 +630,15 @@ function pipeOnDrain(src) { return function() { - var dest = this; var state = src._readableState; - state.awaitDrain--; - if (state.awaitDrain === 0) + debug('pipeOnDrain', state.awaitDrain); + if (state.awaitDrain) + state.awaitDrain--; + if (state.awaitDrain === 0 && EE.listenerCount(src, 'data')) { + state.flowing = true; flow(src); - }; -} - -function flow(src) { - var state = src._readableState; - var chunk; - state.awaitDrain = 0; - - function write(dest, i, list) { - var written = dest.write(chunk); - if (false === written) { - state.awaitDrain++; } - } - - while (state.pipesCount && null !== (chunk = src.read())) { - - if (state.pipesCount === 1) - write(state.pipes, 0, null); - else - state.pipes.forEach(write); - - src.emit('data', chunk); - - // if anyone needs a drain, then we have to wait for that. - if (state.awaitDrain > 0) - return; - } - - // if every destination was unpiped, either before entering this - // function, or in the while loop, then stop flowing. - // - // NB: This is a pretty rare edge case. - if (state.pipesCount === 0) { - state.flowing = false; - - // if there were data event listeners added, then switch to old mode. - if (EE.listenerCount(src, 'data') > 0) - emitDataEvents(src); - return; - } - - // at this point, no one needed a drain, so we just ran out of data - // on the next readable event, start it over again. - state.ranOut = true; -} - -function pipeOnReadable() { - if (this._readableState.ranOut) { - this._readableState.ranOut = false; - flow(this); - } + }; } @@ -648,7 +661,6 @@ // got a match. state.pipes = null; state.pipesCount = 0; - this.removeListener('readable', pipeOnReadable); state.flowing = false; if (dest) dest.emit('unpipe', this); @@ -663,7 +675,6 @@ var len = state.pipesCount; state.pipes = null; state.pipesCount = 0; - this.removeListener('readable', pipeOnReadable); state.flowing = false; for (var i = 0; i < len; i++) @@ -672,7 +683,7 @@ } // try to find the right one. - var i = state.pipes.indexOf(dest); + var i = indexOf(state.pipes, dest); if (i === -1) return this; @@ -691,8 +702,11 @@ Readable.prototype.on = function(ev, fn) { var res = Stream.prototype.on.call(this, ev, fn); - if (ev === 'data' && !this._readableState.flowing) - emitDataEvents(this); + // If listening to data, and it has not explicitly been paused, + // then call resume to start the flow of data on the next tick. + if (ev === 'data' && false !== this._readableState.flowing) { + this.resume(); + } if (ev === 'readable' && this.readable) { var state = this._readableState; @@ -701,7 +715,11 @@ state.emittedReadable = false; state.needReadable = true; if (!state.reading) { - this.read(0); + var self = this; + process.nextTick(function() { + debug('readable nexttick read 0'); + self.read(0); + }); } else if (state.length) { emitReadable(this, state); } @@ -715,63 +733,54 @@ // pause() and resume() are remnants of the legacy readable stream API // If the user uses them, then switch into old mode. Readable.prototype.resume = function() { - emitDataEvents(this); - this.read(0); - this.emit('resume'); + var state = this._readableState; + if (!state.flowing) { + debug('resume'); + state.flowing = true; + if (!state.reading) { + debug('resume read 0'); + this.read(0); + } + resume(this, state); + } + return this; }; +function resume(stream, state) { + if (!state.resumeScheduled) { + state.resumeScheduled = true; + process.nextTick(function() { + resume_(stream, state); + }); + } +} + +function resume_(stream, state) { + state.resumeScheduled = false; + stream.emit('resume'); + flow(stream); + if (state.flowing && !state.reading) + stream.read(0); +} + Readable.prototype.pause = function() { - emitDataEvents(this, true); - this.emit('pause'); + debug('call pause flowing=%j', this._readableState.flowing); + if (false !== this._readableState.flowing) { + debug('pause'); + this._readableState.flowing = false; + this.emit('pause'); + } + return this; }; -function emitDataEvents(stream, startPaused) { +function flow(stream) { var state = stream._readableState; - + debug('flow', state.flowing); if (state.flowing) { - // https://github.com/isaacs/readable-stream/issues/16 - throw new Error('Cannot switch to old mode now.'); + do { + var chunk = stream.read(); + } while (null !== chunk && state.flowing); } - - var paused = startPaused || false; - var readable = false; - - // convert to an old-style stream. - stream.readable = true; - stream.pipe = Stream.prototype.pipe; - stream.on = stream.addListener = Stream.prototype.on; - - stream.on('readable', function() { - readable = true; - - var c; - while (!paused && (null !== (c = stream.read()))) - stream.emit('data', c); - - if (c === null) { - readable = false; - stream._readableState.needReadable = true; - } - }); - - stream.pause = function() { - paused = true; - this.emit('pause'); - }; - - stream.resume = function() { - paused = false; - if (readable) - process.nextTick(function() { - stream.emit('readable'); - }); - else - this.read(0); - this.emit('resume'); - }; - - // now make it start, just in case it hadn't already. - stream.emit('readable'); } // wrap an old-style stream as the async data source. @@ -783,6 +792,7 @@ var self = this; stream.on('end', function() { + debug('wrapped end'); if (state.decoder && !state.ended) { var chunk = state.decoder.end(); if (chunk && chunk.length) @@ -793,6 +803,7 @@ }); stream.on('data', function(chunk) { + debug('wrapped data'); if (state.decoder) chunk = state.decoder.write(chunk); if (!chunk || !state.objectMode && !chunk.length) @@ -808,8 +819,7 @@ // proxy all the other methods. // important when wrapping filters and duplexes. for (var i in stream) { - if (typeof stream[i] === 'function' && - typeof this[i] === 'undefined') { + if (util.isFunction(stream[i]) && util.isUndefined(this[i])) { this[i] = function(method) { return function() { return stream[method].apply(stream, arguments); }}(i); @@ -818,13 +828,14 @@ // proxy certain important events. var events = ['error', 'close', 'destroy', 'pause', 'resume']; - events.forEach(function(ev) { + forEach(events, function(ev) { stream.on(ev, self.emit.bind(self, ev)); }); // when we try to consume some more bytes, simply unpause the // underlying stream. self._read = function(n) { + debug('wrapped _read', n); if (paused) { paused = false; stream.resume(); @@ -913,7 +924,7 @@ if (state.length > 0) throw new Error('endReadable called on non-empty stream'); - if (!state.endEmitted && state.calledRead) { + if (!state.endEmitted) { state.ended = true; process.nextTick(function() { // Check that we didn't get one last unshift. @@ -925,3 +936,16 @@ }); } } + +function forEach (xs, f) { + for (var i = 0, l = xs.length; i < l; i++) { + f(xs[i], i); + } +} + +function indexOf (xs, x) { + for (var i = 0, l = xs.length; i < l; i++) { + if (xs[i] === x) return i; + } + return -1; +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_transform.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_transform.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_transform.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_transform.js 2015-01-20 21:22:17.000000000 +0000 @@ -65,7 +65,12 @@ module.exports = Transform; var Duplex = require('./_stream_duplex'); -var util = require('util'); + +/**/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/**/ + util.inherits(Transform, Duplex); @@ -92,7 +97,7 @@ ts.writechunk = null; ts.writecb = null; - if (data !== null && data !== undefined) + if (!util.isNullOrUndefined(data)) stream.push(data); if (cb) @@ -112,7 +117,7 @@ Duplex.call(this, options); - var ts = this._transformState = new TransformState(options, this); + this._transformState = new TransformState(options, this); // when the writable side finishes, then flush out anything remaining. var stream = this; @@ -125,8 +130,8 @@ // sync guard flag. this._readableState.sync = false; - this.once('finish', function() { - if ('function' === typeof this._flush) + this.once('prefinish', function() { + if (util.isFunction(this._flush)) this._flush(function(er) { done(stream, er); }); @@ -174,7 +179,7 @@ Transform.prototype._read = function(n) { var ts = this._transformState; - if (ts.writechunk !== null && ts.writecb && !ts.transforming) { + if (!util.isNull(ts.writechunk) && ts.writecb && !ts.transforming) { ts.transforming = true; this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform); } else { @@ -192,7 +197,6 @@ // if there's nothing in the write buffer, then that means // that nothing more will ever be provided var ws = stream._writableState; - var rs = stream._readableState; var ts = stream._transformState; if (ws.length) diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_writable.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_writable.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_writable.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/lib/_stream_writable.js 2015-01-20 21:22:17.000000000 +0000 @@ -24,10 +24,19 @@ // the drain event emission and buffering. module.exports = Writable; + +/**/ +var Buffer = require('buffer').Buffer; +/**/ + Writable.WritableState = WritableState; -var util = require('util'); -var assert = require('assert'); + +/**/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/**/ + var Stream = require('stream'); util.inherits(Writable, Stream); @@ -39,18 +48,24 @@ } function WritableState(options, stream) { + var Duplex = require('./_stream_duplex'); + options = options || {}; // the point at which write() starts returning false // Note: 0 is a valid value, means that we always return false if // the entire buffer is not flushed immediately on write() var hwm = options.highWaterMark; - this.highWaterMark = (hwm || hwm === 0) ? hwm : 16 * 1024; + var defaultHwm = options.objectMode ? 16 : 16 * 1024; + this.highWaterMark = (hwm || hwm === 0) ? hwm : defaultHwm; // object stream flag to indicate whether or not this stream // contains buffers or objects. this.objectMode = !!options.objectMode; + if (stream instanceof Duplex) + this.objectMode = this.objectMode || !!options.writableObjectMode; + // cast to ints. this.highWaterMark = ~~this.highWaterMark; @@ -81,8 +96,11 @@ // a flag to see when we're in the middle of a write. this.writing = false; + // when true all writes will be buffered until .uncork() call + this.corked = 0; + // a flag to be able to tell if the onwrite cb is called immediately, - // or on a later tick. We set this to true at first, becuase any + // or on a later tick. We set this to true at first, because any // actions that shouldn't happen until "later" should generally also // not happen before the first write call. this.sync = true; @@ -104,12 +122,25 @@ this.writelen = 0; this.buffer = []; + + // number of pending user-supplied write callbacks + // this must be 0 before 'finish' can be emitted + this.pendingcb = 0; + + // emit prefinish if the only thing we're waiting for is _write cbs + // This is relevant for synchronous Transform streams + this.prefinished = false; + + // True if the error was already emitted and should not be thrown again + this.errorEmitted = false; } function Writable(options) { + var Duplex = require('./_stream_duplex'); + // Writable ctor is applied to Duplexes, though they're not // instanceof Writable, they're instanceof Readable. - if (!(this instanceof Writable) && !(this instanceof require('./_stream_duplex'))) + if (!(this instanceof Writable) && !(this instanceof Duplex)) return new Writable(options); this._writableState = new WritableState(options, this); @@ -142,10 +173,9 @@ // how many bytes or characters. function validChunk(stream, state, chunk, cb) { var valid = true; - if (!Buffer.isBuffer(chunk) && - 'string' !== typeof chunk && - chunk !== null && - chunk !== undefined && + if (!util.isBuffer(chunk) && + !util.isString(chunk) && + !util.isNullOrUndefined(chunk) && !state.objectMode) { var er = new TypeError('Invalid non-string/buffer chunk'); stream.emit('error', er); @@ -161,31 +191,54 @@ var state = this._writableState; var ret = false; - if (typeof encoding === 'function') { + if (util.isFunction(encoding)) { cb = encoding; encoding = null; } - if (Buffer.isBuffer(chunk)) + if (util.isBuffer(chunk)) encoding = 'buffer'; else if (!encoding) encoding = state.defaultEncoding; - if (typeof cb !== 'function') + if (!util.isFunction(cb)) cb = function() {}; if (state.ended) writeAfterEnd(this, state, cb); - else if (validChunk(this, state, chunk, cb)) + else if (validChunk(this, state, chunk, cb)) { + state.pendingcb++; ret = writeOrBuffer(this, state, chunk, encoding, cb); + } return ret; }; +Writable.prototype.cork = function() { + var state = this._writableState; + + state.corked++; +}; + +Writable.prototype.uncork = function() { + var state = this._writableState; + + if (state.corked) { + state.corked--; + + if (!state.writing && + !state.corked && + !state.finished && + !state.bufferProcessing && + state.buffer.length) + clearBuffer(this, state); + } +}; + function decodeChunk(state, chunk, encoding) { if (!state.objectMode && state.decodeStrings !== false && - typeof chunk === 'string') { + util.isString(chunk)) { chunk = new Buffer(chunk, encoding); } return chunk; @@ -196,40 +249,49 @@ // If we return false, then we need a drain event, so set that flag. function writeOrBuffer(stream, state, chunk, encoding, cb) { chunk = decodeChunk(state, chunk, encoding); - if (Buffer.isBuffer(chunk)) + if (util.isBuffer(chunk)) encoding = 'buffer'; var len = state.objectMode ? 1 : chunk.length; state.length += len; var ret = state.length < state.highWaterMark; - state.needDrain = !ret; + // we must ensure that previous needDrain will not be reset to false. + if (!ret) + state.needDrain = true; - if (state.writing) + if (state.writing || state.corked) state.buffer.push(new WriteReq(chunk, encoding, cb)); else - doWrite(stream, state, len, chunk, encoding, cb); + doWrite(stream, state, false, len, chunk, encoding, cb); return ret; } -function doWrite(stream, state, len, chunk, encoding, cb) { +function doWrite(stream, state, writev, len, chunk, encoding, cb) { state.writelen = len; state.writecb = cb; state.writing = true; state.sync = true; - stream._write(chunk, encoding, state.onwrite); + if (writev) + stream._writev(chunk, state.onwrite); + else + stream._write(chunk, encoding, state.onwrite); state.sync = false; } function onwriteError(stream, state, sync, er, cb) { if (sync) process.nextTick(function() { + state.pendingcb--; cb(er); }); - else + else { + state.pendingcb--; cb(er); + } + stream._writableState.errorEmitted = true; stream.emit('error', er); } @@ -253,8 +315,12 @@ // Check if we're actually ready to finish, but don't emit yet var finished = needFinish(stream, state); - if (!finished && !state.bufferProcessing && state.buffer.length) + if (!finished && + !state.corked && + !state.bufferProcessing && + state.buffer.length) { clearBuffer(stream, state); + } if (sync) { process.nextTick(function() { @@ -269,9 +335,9 @@ function afterWrite(stream, state, finished, cb) { if (!finished) onwriteDrain(stream, state); + state.pendingcb--; cb(); - if (finished) - finishMaybe(stream, state); + finishMaybe(stream, state); } // Must force callback to be called on nextTick, so that we don't @@ -289,51 +355,82 @@ function clearBuffer(stream, state) { state.bufferProcessing = true; - for (var c = 0; c < state.buffer.length; c++) { - var entry = state.buffer[c]; - var chunk = entry.chunk; - var encoding = entry.encoding; - var cb = entry.callback; - var len = state.objectMode ? 1 : chunk.length; - - doWrite(stream, state, len, chunk, encoding, cb); - - // if we didn't call the onwrite immediately, then - // it means that we need to wait until it does. - // also, that means that the chunk and cb are currently - // being processed, so move the buffer counter past them. - if (state.writing) { - c++; - break; + if (stream._writev && state.buffer.length > 1) { + // Fast case, write everything using _writev() + var cbs = []; + for (var c = 0; c < state.buffer.length; c++) + cbs.push(state.buffer[c].callback); + + // count the one we are adding, as well. + // TODO(isaacs) clean this up + state.pendingcb++; + doWrite(stream, state, true, state.length, state.buffer, '', function(err) { + for (var i = 0; i < cbs.length; i++) { + state.pendingcb--; + cbs[i](err); + } + }); + + // Clear buffer + state.buffer = []; + } else { + // Slow case, write chunks one-by-one + for (var c = 0; c < state.buffer.length; c++) { + var entry = state.buffer[c]; + var chunk = entry.chunk; + var encoding = entry.encoding; + var cb = entry.callback; + var len = state.objectMode ? 1 : chunk.length; + + doWrite(stream, state, false, len, chunk, encoding, cb); + + // if we didn't call the onwrite immediately, then + // it means that we need to wait until it does. + // also, that means that the chunk and cb are currently + // being processed, so move the buffer counter past them. + if (state.writing) { + c++; + break; + } } + + if (c < state.buffer.length) + state.buffer = state.buffer.slice(c); + else + state.buffer.length = 0; } state.bufferProcessing = false; - if (c < state.buffer.length) - state.buffer = state.buffer.slice(c); - else - state.buffer.length = 0; } Writable.prototype._write = function(chunk, encoding, cb) { cb(new Error('not implemented')); + }; +Writable.prototype._writev = null; + Writable.prototype.end = function(chunk, encoding, cb) { var state = this._writableState; - if (typeof chunk === 'function') { + if (util.isFunction(chunk)) { cb = chunk; chunk = null; encoding = null; - } else if (typeof encoding === 'function') { + } else if (util.isFunction(encoding)) { cb = encoding; encoding = null; } - if (typeof chunk !== 'undefined' && chunk !== null) + if (!util.isNullOrUndefined(chunk)) this.write(chunk, encoding); + // .end() fully uncorks + if (state.corked) { + state.corked = 1; + this.uncork(); + } + // ignore unnecessary end() calls. if (!state.ending && !state.finished) endWritable(this, state, cb); @@ -347,11 +444,22 @@ !state.writing); } +function prefinish(stream, state) { + if (!state.prefinished) { + state.prefinished = true; + stream.emit('prefinish'); + } +} + function finishMaybe(stream, state) { var need = needFinish(stream, state); if (need) { - state.finished = true; - stream.emit('finish'); + if (state.pendingcb === 0) { + prefinish(stream, state); + state.finished = true; + stream.emit('finish'); + } else + prefinish(stream, state); } return need; } diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/LICENSE nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -1,27 +1,18 @@ -Copyright (c) Isaac Z. Schlueter ("Author") -All rights reserved. +Copyright Joyent, Inc. and other Node contributors. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -The BSD License +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/float.patch nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/float.patch --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/float.patch 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/float.patch 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,604 @@ +diff --git a/lib/util.js b/lib/util.js +index a03e874..9074e8e 100644 +--- a/lib/util.js ++++ b/lib/util.js +@@ -19,430 +19,6 @@ + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + +-var formatRegExp = /%[sdj%]/g; +-exports.format = function(f) { +- if (!isString(f)) { +- var objects = []; +- for (var i = 0; i < arguments.length; i++) { +- objects.push(inspect(arguments[i])); +- } +- return objects.join(' '); +- } +- +- var i = 1; +- var args = arguments; +- var len = args.length; +- var str = String(f).replace(formatRegExp, function(x) { +- if (x === '%%') return '%'; +- if (i >= len) return x; +- switch (x) { +- case '%s': return String(args[i++]); +- case '%d': return Number(args[i++]); +- case '%j': +- try { +- return JSON.stringify(args[i++]); +- } catch (_) { +- return '[Circular]'; +- } +- default: +- return x; +- } +- }); +- for (var x = args[i]; i < len; x = args[++i]) { +- if (isNull(x) || !isObject(x)) { +- str += ' ' + x; +- } else { +- str += ' ' + inspect(x); +- } +- } +- return str; +-}; +- +- +-// Mark that a method should not be used. +-// Returns a modified function which warns once by default. +-// If --no-deprecation is set, then it is a no-op. +-exports.deprecate = function(fn, msg) { +- // Allow for deprecating things in the process of starting up. +- if (isUndefined(global.process)) { +- return function() { +- return exports.deprecate(fn, msg).apply(this, arguments); +- }; +- } +- +- if (process.noDeprecation === true) { +- return fn; +- } +- +- var warned = false; +- function deprecated() { +- if (!warned) { +- if (process.throwDeprecation) { +- throw new Error(msg); +- } else if (process.traceDeprecation) { +- console.trace(msg); +- } else { +- console.error(msg); +- } +- warned = true; +- } +- return fn.apply(this, arguments); +- } +- +- return deprecated; +-}; +- +- +-var debugs = {}; +-var debugEnviron; +-exports.debuglog = function(set) { +- if (isUndefined(debugEnviron)) +- debugEnviron = process.env.NODE_DEBUG || ''; +- set = set.toUpperCase(); +- if (!debugs[set]) { +- if (new RegExp('\\b' + set + '\\b', 'i').test(debugEnviron)) { +- var pid = process.pid; +- debugs[set] = function() { +- var msg = exports.format.apply(exports, arguments); +- console.error('%s %d: %s', set, pid, msg); +- }; +- } else { +- debugs[set] = function() {}; +- } +- } +- return debugs[set]; +-}; +- +- +-/** +- * Echos the value of a value. Trys to print the value out +- * in the best way possible given the different types. +- * +- * @param {Object} obj The object to print out. +- * @param {Object} opts Optional options object that alters the output. +- */ +-/* legacy: obj, showHidden, depth, colors*/ +-function inspect(obj, opts) { +- // default options +- var ctx = { +- seen: [], +- stylize: stylizeNoColor +- }; +- // legacy... +- if (arguments.length >= 3) ctx.depth = arguments[2]; +- if (arguments.length >= 4) ctx.colors = arguments[3]; +- if (isBoolean(opts)) { +- // legacy... +- ctx.showHidden = opts; +- } else if (opts) { +- // got an "options" object +- exports._extend(ctx, opts); +- } +- // set default options +- if (isUndefined(ctx.showHidden)) ctx.showHidden = false; +- if (isUndefined(ctx.depth)) ctx.depth = 2; +- if (isUndefined(ctx.colors)) ctx.colors = false; +- if (isUndefined(ctx.customInspect)) ctx.customInspect = true; +- if (ctx.colors) ctx.stylize = stylizeWithColor; +- return formatValue(ctx, obj, ctx.depth); +-} +-exports.inspect = inspect; +- +- +-// http://en.wikipedia.org/wiki/ANSI_escape_code#graphics +-inspect.colors = { +- 'bold' : [1, 22], +- 'italic' : [3, 23], +- 'underline' : [4, 24], +- 'inverse' : [7, 27], +- 'white' : [37, 39], +- 'grey' : [90, 39], +- 'black' : [30, 39], +- 'blue' : [34, 39], +- 'cyan' : [36, 39], +- 'green' : [32, 39], +- 'magenta' : [35, 39], +- 'red' : [31, 39], +- 'yellow' : [33, 39] +-}; +- +-// Don't use 'blue' not visible on cmd.exe +-inspect.styles = { +- 'special': 'cyan', +- 'number': 'yellow', +- 'boolean': 'yellow', +- 'undefined': 'grey', +- 'null': 'bold', +- 'string': 'green', +- 'date': 'magenta', +- // "name": intentionally not styling +- 'regexp': 'red' +-}; +- +- +-function stylizeWithColor(str, styleType) { +- var style = inspect.styles[styleType]; +- +- if (style) { +- return '\u001b[' + inspect.colors[style][0] + 'm' + str + +- '\u001b[' + inspect.colors[style][1] + 'm'; +- } else { +- return str; +- } +-} +- +- +-function stylizeNoColor(str, styleType) { +- return str; +-} +- +- +-function arrayToHash(array) { +- var hash = {}; +- +- array.forEach(function(val, idx) { +- hash[val] = true; +- }); +- +- return hash; +-} +- +- +-function formatValue(ctx, value, recurseTimes) { +- // Provide a hook for user-specified inspect functions. +- // Check that value is an object with an inspect function on it +- if (ctx.customInspect && +- value && +- isFunction(value.inspect) && +- // Filter out the util module, it's inspect function is special +- value.inspect !== exports.inspect && +- // Also filter out any prototype objects using the circular check. +- !(value.constructor && value.constructor.prototype === value)) { +- var ret = value.inspect(recurseTimes, ctx); +- if (!isString(ret)) { +- ret = formatValue(ctx, ret, recurseTimes); +- } +- return ret; +- } +- +- // Primitive types cannot have properties +- var primitive = formatPrimitive(ctx, value); +- if (primitive) { +- return primitive; +- } +- +- // Look up the keys of the object. +- var keys = Object.keys(value); +- var visibleKeys = arrayToHash(keys); +- +- if (ctx.showHidden) { +- keys = Object.getOwnPropertyNames(value); +- } +- +- // Some type of object without properties can be shortcutted. +- if (keys.length === 0) { +- if (isFunction(value)) { +- var name = value.name ? ': ' + value.name : ''; +- return ctx.stylize('[Function' + name + ']', 'special'); +- } +- if (isRegExp(value)) { +- return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); +- } +- if (isDate(value)) { +- return ctx.stylize(Date.prototype.toString.call(value), 'date'); +- } +- if (isError(value)) { +- return formatError(value); +- } +- } +- +- var base = '', array = false, braces = ['{', '}']; +- +- // Make Array say that they are Array +- if (isArray(value)) { +- array = true; +- braces = ['[', ']']; +- } +- +- // Make functions say that they are functions +- if (isFunction(value)) { +- var n = value.name ? ': ' + value.name : ''; +- base = ' [Function' + n + ']'; +- } +- +- // Make RegExps say that they are RegExps +- if (isRegExp(value)) { +- base = ' ' + RegExp.prototype.toString.call(value); +- } +- +- // Make dates with properties first say the date +- if (isDate(value)) { +- base = ' ' + Date.prototype.toUTCString.call(value); +- } +- +- // Make error with message first say the error +- if (isError(value)) { +- base = ' ' + formatError(value); +- } +- +- if (keys.length === 0 && (!array || value.length == 0)) { +- return braces[0] + base + braces[1]; +- } +- +- if (recurseTimes < 0) { +- if (isRegExp(value)) { +- return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); +- } else { +- return ctx.stylize('[Object]', 'special'); +- } +- } +- +- ctx.seen.push(value); +- +- var output; +- if (array) { +- output = formatArray(ctx, value, recurseTimes, visibleKeys, keys); +- } else { +- output = keys.map(function(key) { +- return formatProperty(ctx, value, recurseTimes, visibleKeys, key, array); +- }); +- } +- +- ctx.seen.pop(); +- +- return reduceToSingleString(output, base, braces); +-} +- +- +-function formatPrimitive(ctx, value) { +- if (isUndefined(value)) +- return ctx.stylize('undefined', 'undefined'); +- if (isString(value)) { +- var simple = '\'' + JSON.stringify(value).replace(/^"|"$/g, '') +- .replace(/'/g, "\\'") +- .replace(/\\"/g, '"') + '\''; +- return ctx.stylize(simple, 'string'); +- } +- if (isNumber(value)) { +- // Format -0 as '-0'. Strict equality won't distinguish 0 from -0, +- // so instead we use the fact that 1 / -0 < 0 whereas 1 / 0 > 0 . +- if (value === 0 && 1 / value < 0) +- return ctx.stylize('-0', 'number'); +- return ctx.stylize('' + value, 'number'); +- } +- if (isBoolean(value)) +- return ctx.stylize('' + value, 'boolean'); +- // For some reason typeof null is "object", so special case here. +- if (isNull(value)) +- return ctx.stylize('null', 'null'); +-} +- +- +-function formatError(value) { +- return '[' + Error.prototype.toString.call(value) + ']'; +-} +- +- +-function formatArray(ctx, value, recurseTimes, visibleKeys, keys) { +- var output = []; +- for (var i = 0, l = value.length; i < l; ++i) { +- if (hasOwnProperty(value, String(i))) { +- output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, +- String(i), true)); +- } else { +- output.push(''); +- } +- } +- keys.forEach(function(key) { +- if (!key.match(/^\d+$/)) { +- output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, +- key, true)); +- } +- }); +- return output; +-} +- +- +-function formatProperty(ctx, value, recurseTimes, visibleKeys, key, array) { +- var name, str, desc; +- desc = Object.getOwnPropertyDescriptor(value, key) || { value: value[key] }; +- if (desc.get) { +- if (desc.set) { +- str = ctx.stylize('[Getter/Setter]', 'special'); +- } else { +- str = ctx.stylize('[Getter]', 'special'); +- } +- } else { +- if (desc.set) { +- str = ctx.stylize('[Setter]', 'special'); +- } +- } +- if (!hasOwnProperty(visibleKeys, key)) { +- name = '[' + key + ']'; +- } +- if (!str) { +- if (ctx.seen.indexOf(desc.value) < 0) { +- if (isNull(recurseTimes)) { +- str = formatValue(ctx, desc.value, null); +- } else { +- str = formatValue(ctx, desc.value, recurseTimes - 1); +- } +- if (str.indexOf('\n') > -1) { +- if (array) { +- str = str.split('\n').map(function(line) { +- return ' ' + line; +- }).join('\n').substr(2); +- } else { +- str = '\n' + str.split('\n').map(function(line) { +- return ' ' + line; +- }).join('\n'); +- } +- } +- } else { +- str = ctx.stylize('[Circular]', 'special'); +- } +- } +- if (isUndefined(name)) { +- if (array && key.match(/^\d+$/)) { +- return str; +- } +- name = JSON.stringify('' + key); +- if (name.match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)) { +- name = name.substr(1, name.length - 2); +- name = ctx.stylize(name, 'name'); +- } else { +- name = name.replace(/'/g, "\\'") +- .replace(/\\"/g, '"') +- .replace(/(^"|"$)/g, "'"); +- name = ctx.stylize(name, 'string'); +- } +- } +- +- return name + ': ' + str; +-} +- +- +-function reduceToSingleString(output, base, braces) { +- var numLinesEst = 0; +- var length = output.reduce(function(prev, cur) { +- numLinesEst++; +- if (cur.indexOf('\n') >= 0) numLinesEst++; +- return prev + cur.replace(/\u001b\[\d\d?m/g, '').length + 1; +- }, 0); +- +- if (length > 60) { +- return braces[0] + +- (base === '' ? '' : base + '\n ') + +- ' ' + +- output.join(',\n ') + +- ' ' + +- braces[1]; +- } +- +- return braces[0] + base + ' ' + output.join(', ') + ' ' + braces[1]; +-} +- +- + // NOTE: These type checking functions intentionally don't use `instanceof` + // because it is fragile and can be easily faked with `Object.create()`. + function isArray(ar) { +@@ -522,166 +98,10 @@ function isPrimitive(arg) { + exports.isPrimitive = isPrimitive; + + function isBuffer(arg) { +- return arg instanceof Buffer; ++ return Buffer.isBuffer(arg); + } + exports.isBuffer = isBuffer; + + function objectToString(o) { + return Object.prototype.toString.call(o); +-} +- +- +-function pad(n) { +- return n < 10 ? '0' + n.toString(10) : n.toString(10); +-} +- +- +-var months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', +- 'Oct', 'Nov', 'Dec']; +- +-// 26 Feb 16:19:34 +-function timestamp() { +- var d = new Date(); +- var time = [pad(d.getHours()), +- pad(d.getMinutes()), +- pad(d.getSeconds())].join(':'); +- return [d.getDate(), months[d.getMonth()], time].join(' '); +-} +- +- +-// log is just a thin wrapper to console.log that prepends a timestamp +-exports.log = function() { +- console.log('%s - %s', timestamp(), exports.format.apply(exports, arguments)); +-}; +- +- +-/** +- * Inherit the prototype methods from one constructor into another. +- * +- * The Function.prototype.inherits from lang.js rewritten as a standalone +- * function (not on Function.prototype). NOTE: If this file is to be loaded +- * during bootstrapping this function needs to be rewritten using some native +- * functions as prototype setup using normal JavaScript does not work as +- * expected during bootstrapping (see mirror.js in r114903). +- * +- * @param {function} ctor Constructor function which needs to inherit the +- * prototype. +- * @param {function} superCtor Constructor function to inherit prototype from. +- */ +-exports.inherits = function(ctor, superCtor) { +- ctor.super_ = superCtor; +- ctor.prototype = Object.create(superCtor.prototype, { +- constructor: { +- value: ctor, +- enumerable: false, +- writable: true, +- configurable: true +- } +- }); +-}; +- +-exports._extend = function(origin, add) { +- // Don't do anything if add isn't an object +- if (!add || !isObject(add)) return origin; +- +- var keys = Object.keys(add); +- var i = keys.length; +- while (i--) { +- origin[keys[i]] = add[keys[i]]; +- } +- return origin; +-}; +- +-function hasOwnProperty(obj, prop) { +- return Object.prototype.hasOwnProperty.call(obj, prop); +-} +- +- +-// Deprecated old stuff. +- +-exports.p = exports.deprecate(function() { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- console.error(exports.inspect(arguments[i])); +- } +-}, 'util.p: Use console.error() instead'); +- +- +-exports.exec = exports.deprecate(function() { +- return require('child_process').exec.apply(this, arguments); +-}, 'util.exec is now called `child_process.exec`.'); +- +- +-exports.print = exports.deprecate(function() { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- process.stdout.write(String(arguments[i])); +- } +-}, 'util.print: Use console.log instead'); +- +- +-exports.puts = exports.deprecate(function() { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- process.stdout.write(arguments[i] + '\n'); +- } +-}, 'util.puts: Use console.log instead'); +- +- +-exports.debug = exports.deprecate(function(x) { +- process.stderr.write('DEBUG: ' + x + '\n'); +-}, 'util.debug: Use console.error instead'); +- +- +-exports.error = exports.deprecate(function(x) { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- process.stderr.write(arguments[i] + '\n'); +- } +-}, 'util.error: Use console.error instead'); +- +- +-exports.pump = exports.deprecate(function(readStream, writeStream, callback) { +- var callbackCalled = false; +- +- function call(a, b, c) { +- if (callback && !callbackCalled) { +- callback(a, b, c); +- callbackCalled = true; +- } +- } +- +- readStream.addListener('data', function(chunk) { +- if (writeStream.write(chunk) === false) readStream.pause(); +- }); +- +- writeStream.addListener('drain', function() { +- readStream.resume(); +- }); +- +- readStream.addListener('end', function() { +- writeStream.end(); +- }); +- +- readStream.addListener('close', function() { +- call(); +- }); +- +- readStream.addListener('error', function(err) { +- writeStream.end(); +- call(err); +- }); +- +- writeStream.addListener('error', function(err) { +- readStream.destroy(); +- call(err); +- }); +-}, 'util.pump(): Use readableStream.pipe() instead'); +- +- +-var uv; +-exports._errnoException = function(err, syscall) { +- if (isUndefined(uv)) uv = process.binding('uv'); +- var errname = uv.errname(err); +- var e = new Error(syscall + ' ' + errname); +- e.code = errname; +- e.errno = errname; +- e.syscall = syscall; +- return e; +-}; ++} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/lib/util.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/lib/util.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/lib/util.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/lib/util.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,107 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// NOTE: These type checking functions intentionally don't use `instanceof` +// because it is fragile and can be easily faked with `Object.create()`. +function isArray(ar) { + return Array.isArray(ar); +} +exports.isArray = isArray; + +function isBoolean(arg) { + return typeof arg === 'boolean'; +} +exports.isBoolean = isBoolean; + +function isNull(arg) { + return arg === null; +} +exports.isNull = isNull; + +function isNullOrUndefined(arg) { + return arg == null; +} +exports.isNullOrUndefined = isNullOrUndefined; + +function isNumber(arg) { + return typeof arg === 'number'; +} +exports.isNumber = isNumber; + +function isString(arg) { + return typeof arg === 'string'; +} +exports.isString = isString; + +function isSymbol(arg) { + return typeof arg === 'symbol'; +} +exports.isSymbol = isSymbol; + +function isUndefined(arg) { + return arg === void 0; +} +exports.isUndefined = isUndefined; + +function isRegExp(re) { + return isObject(re) && objectToString(re) === '[object RegExp]'; +} +exports.isRegExp = isRegExp; + +function isObject(arg) { + return typeof arg === 'object' && arg !== null; +} +exports.isObject = isObject; + +function isDate(d) { + return isObject(d) && objectToString(d) === '[object Date]'; +} +exports.isDate = isDate; + +function isError(e) { + return isObject(e) && + (objectToString(e) === '[object Error]' || e instanceof Error); +} +exports.isError = isError; + +function isFunction(arg) { + return typeof arg === 'function'; +} +exports.isFunction = isFunction; + +function isPrimitive(arg) { + return arg === null || + typeof arg === 'boolean' || + typeof arg === 'number' || + typeof arg === 'string' || + typeof arg === 'symbol' || // ES6 symbol + typeof arg === 'undefined'; +} +exports.isPrimitive = isPrimitive; + +function isBuffer(arg) { + return Buffer.isBuffer(arg); +} +exports.isBuffer = isBuffer; + +function objectToString(o) { + return Object.prototype.toString.call(o); +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/package.json nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/package.json --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +{ + "name": "core-util-is", + "version": "1.0.1", + "description": "The `util.is*` functions introduced in Node v0.12.", + "main": "lib/util.js", + "repository": { + "type": "git", + "url": "git://github.com/isaacs/core-util-is" + }, + "keywords": [ + "util", + "isBuffer", + "isArray", + "isNumber", + "isString", + "isRegExp", + "isThis", + "isThat", + "polyfill" + ], + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "license": "MIT", + "bugs": { + "url": "https://github.com/isaacs/core-util-is/issues" + }, + "readme": "# core-util-is\n\nThe `util.is*` functions introduced in Node v0.12.\n", + "readmeFilename": "README.md", + "homepage": "https://github.com/isaacs/core-util-is", + "_id": "core-util-is@1.0.1", + "dist": { + "shasum": "6b07085aef9a3ccac6ee53bf9d3df0c1521a5538", + "tarball": "http://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz" + }, + "_from": "core-util-is@>=1.0.0 <1.1.0", + "_npmVersion": "1.3.23", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "directories": {}, + "_shasum": "6b07085aef9a3ccac6ee53bf9d3df0c1521a5538", + "_resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.1.tgz", + "scripts": {} +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/README.md nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/README.md --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +# core-util-is + +The `util.is*` functions introduced in Node v0.12. diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/util.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/util.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/util.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/core-util-is/util.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,106 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// NOTE: These type checking functions intentionally don't use `instanceof` +// because it is fragile and can be easily faked with `Object.create()`. +function isArray(ar) { + return Array.isArray(ar); +} +exports.isArray = isArray; + +function isBoolean(arg) { + return typeof arg === 'boolean'; +} +exports.isBoolean = isBoolean; + +function isNull(arg) { + return arg === null; +} +exports.isNull = isNull; + +function isNullOrUndefined(arg) { + return arg == null; +} +exports.isNullOrUndefined = isNullOrUndefined; + +function isNumber(arg) { + return typeof arg === 'number'; +} +exports.isNumber = isNumber; + +function isString(arg) { + return typeof arg === 'string'; +} +exports.isString = isString; + +function isSymbol(arg) { + return typeof arg === 'symbol'; +} +exports.isSymbol = isSymbol; + +function isUndefined(arg) { + return arg === void 0; +} +exports.isUndefined = isUndefined; + +function isRegExp(re) { + return isObject(re) && objectToString(re) === '[object RegExp]'; +} +exports.isRegExp = isRegExp; + +function isObject(arg) { + return typeof arg === 'object' && arg !== null; +} +exports.isObject = isObject; + +function isDate(d) { + return isObject(d) && objectToString(d) === '[object Date]'; +} +exports.isDate = isDate; + +function isError(e) { + return isObject(e) && objectToString(e) === '[object Error]'; +} +exports.isError = isError; + +function isFunction(arg) { + return typeof arg === 'function'; +} +exports.isFunction = isFunction; + +function isPrimitive(arg) { + return arg === null || + typeof arg === 'boolean' || + typeof arg === 'number' || + typeof arg === 'string' || + typeof arg === 'symbol' || // ES6 symbol + typeof arg === 'undefined'; +} +exports.isPrimitive = isPrimitive; + +function isBuffer(arg) { + return arg instanceof Buffer; +} +exports.isBuffer = isBuffer; + +function objectToString(o) { + return Object.prototype.toString.call(o); +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/build/build.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/build/build.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/build/build.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/build/build.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,209 @@ + +/** + * Require the given path. + * + * @param {String} path + * @return {Object} exports + * @api public + */ + +function require(path, parent, orig) { + var resolved = require.resolve(path); + + // lookup failed + if (null == resolved) { + orig = orig || path; + parent = parent || 'root'; + var err = new Error('Failed to require "' + orig + '" from "' + parent + '"'); + err.path = orig; + err.parent = parent; + err.require = true; + throw err; + } + + var module = require.modules[resolved]; + + // perform real require() + // by invoking the module's + // registered function + if (!module.exports) { + module.exports = {}; + module.client = module.component = true; + module.call(this, module.exports, require.relative(resolved), module); + } + + return module.exports; +} + +/** + * Registered modules. + */ + +require.modules = {}; + +/** + * Registered aliases. + */ + +require.aliases = {}; + +/** + * Resolve `path`. + * + * Lookup: + * + * - PATH/index.js + * - PATH.js + * - PATH + * + * @param {String} path + * @return {String} path or null + * @api private + */ + +require.resolve = function(path) { + if (path.charAt(0) === '/') path = path.slice(1); + var index = path + '/index.js'; + + var paths = [ + path, + path + '.js', + path + '.json', + path + '/index.js', + path + '/index.json' + ]; + + for (var i = 0; i < paths.length; i++) { + var path = paths[i]; + if (require.modules.hasOwnProperty(path)) return path; + } + + if (require.aliases.hasOwnProperty(index)) { + return require.aliases[index]; + } +}; + +/** + * Normalize `path` relative to the current path. + * + * @param {String} curr + * @param {String} path + * @return {String} + * @api private + */ + +require.normalize = function(curr, path) { + var segs = []; + + if ('.' != path.charAt(0)) return path; + + curr = curr.split('/'); + path = path.split('/'); + + for (var i = 0; i < path.length; ++i) { + if ('..' == path[i]) { + curr.pop(); + } else if ('.' != path[i] && '' != path[i]) { + segs.push(path[i]); + } + } + + return curr.concat(segs).join('/'); +}; + +/** + * Register module at `path` with callback `definition`. + * + * @param {String} path + * @param {Function} definition + * @api private + */ + +require.register = function(path, definition) { + require.modules[path] = definition; +}; + +/** + * Alias a module definition. + * + * @param {String} from + * @param {String} to + * @api private + */ + +require.alias = function(from, to) { + if (!require.modules.hasOwnProperty(from)) { + throw new Error('Failed to alias "' + from + '", it does not exist'); + } + require.aliases[to] = from; +}; + +/** + * Return a require function relative to the `parent` path. + * + * @param {String} parent + * @return {Function} + * @api private + */ + +require.relative = function(parent) { + var p = require.normalize(parent, '..'); + + /** + * lastIndexOf helper. + */ + + function lastIndexOf(arr, obj) { + var i = arr.length; + while (i--) { + if (arr[i] === obj) return i; + } + return -1; + } + + /** + * The relative require() itself. + */ + + function localRequire(path) { + var resolved = localRequire.resolve(path); + return require(resolved, parent, path); + } + + /** + * Resolve relative to the parent. + */ + + localRequire.resolve = function(path) { + var c = path.charAt(0); + if ('/' == c) return path.slice(1); + if ('.' == c) return require.normalize(p, path); + + // resolve deps by returning + // the dep in the nearest "deps" + // directory + var segs = parent.split('/'); + var i = lastIndexOf(segs, 'deps') + 1; + if (!i) i = 0; + path = segs.slice(0, i + 1).join('/') + '/deps/' + path; + return path; + }; + + /** + * Check if module is defined at `path`. + */ + + localRequire.exists = function(path) { + return require.modules.hasOwnProperty(localRequire.resolve(path)); + }; + + return localRequire; +}; +require.register("isarray/index.js", function(exports, require, module){ +module.exports = Array.isArray || function (arr) { + return Object.prototype.toString.call(arr) == '[object Array]'; +}; + +}); +require.alias("isarray/index.js", "isarray/index.js"); + diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/component.json nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/component.json --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/component.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/component.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,19 @@ +{ + "name" : "isarray", + "description" : "Array#isArray for older browsers", + "version" : "0.0.1", + "repository" : "juliangruber/isarray", + "homepage": "https://github.com/juliangruber/isarray", + "main" : "index.js", + "scripts" : [ + "index.js" + ], + "dependencies" : {}, + "keywords": ["browser","isarray","array"], + "author": { + "name": "Julian Gruber", + "email": "mail@juliangruber.com", + "url": "http://juliangruber.com" + }, + "license": "MIT" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/index.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/index.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +module.exports = Array.isArray || function (arr) { + return Object.prototype.toString.call(arr) == '[object Array]'; +}; diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/package.json nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/package.json --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +{ + "name": "isarray", + "description": "Array#isArray for older browsers", + "version": "0.0.1", + "repository": { + "type": "git", + "url": "git://github.com/juliangruber/isarray.git" + }, + "homepage": "https://github.com/juliangruber/isarray", + "main": "index.js", + "scripts": { + "test": "tap test/*.js" + }, + "dependencies": {}, + "devDependencies": { + "tap": "*" + }, + "keywords": [ + "browser", + "isarray", + "array" + ], + "author": { + "name": "Julian Gruber", + "email": "mail@juliangruber.com", + "url": "http://juliangruber.com" + }, + "license": "MIT", + "readme": "\n# isarray\n\n`Array#isArray` for older browsers.\n\n## Usage\n\n```js\nvar isArray = require('isarray');\n\nconsole.log(isArray([])); // => true\nconsole.log(isArray({})); // => false\n```\n\n## Installation\n\nWith [npm](http://npmjs.org) do\n\n```bash\n$ npm install isarray\n```\n\nThen bundle for the browser with\n[browserify](https://github.com/substack/browserify).\n\nWith [component](http://component.io) do\n\n```bash\n$ component install juliangruber/isarray\n```\n\n## License\n\n(MIT)\n\nCopyright (c) 2013 Julian Gruber <julian@juliangruber.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n", + "readmeFilename": "README.md", + "_id": "isarray@0.0.1", + "dist": { + "shasum": "8a18acfca9a8f4177e09abfc6038939b05d1eedf", + "tarball": "http://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz" + }, + "_from": "isarray@0.0.1", + "_npmVersion": "1.2.18", + "_npmUser": { + "name": "juliangruber", + "email": "julian@juliangruber.com" + }, + "maintainers": [ + { + "name": "juliangruber", + "email": "julian@juliangruber.com" + } + ], + "directories": {}, + "_shasum": "8a18acfca9a8f4177e09abfc6038939b05d1eedf", + "_resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "bugs": { + "url": "https://github.com/juliangruber/isarray/issues" + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/README.md nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/README.md --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/isarray/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ + +# isarray + +`Array#isArray` for older browsers. + +## Usage + +```js +var isArray = require('isarray'); + +console.log(isArray([])); // => true +console.log(isArray({})); // => false +``` + +## Installation + +With [npm](http://npmjs.org) do + +```bash +$ npm install isarray +``` + +Then bundle for the browser with +[browserify](https://github.com/substack/browserify). + +With [component](http://component.io) do + +```bash +$ component install juliangruber/isarray +``` + +## License + +(MIT) + +Copyright (c) 2013 Julian Gruber <julian@juliangruber.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/index.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/index.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,221 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +var Buffer = require('buffer').Buffer; + +var isBufferEncoding = Buffer.isEncoding + || function(encoding) { + switch (encoding && encoding.toLowerCase()) { + case 'hex': case 'utf8': case 'utf-8': case 'ascii': case 'binary': case 'base64': case 'ucs2': case 'ucs-2': case 'utf16le': case 'utf-16le': case 'raw': return true; + default: return false; + } + } + + +function assertEncoding(encoding) { + if (encoding && !isBufferEncoding(encoding)) { + throw new Error('Unknown encoding: ' + encoding); + } +} + +// StringDecoder provides an interface for efficiently splitting a series of +// buffers into a series of JS strings without breaking apart multi-byte +// characters. CESU-8 is handled as part of the UTF-8 encoding. +// +// @TODO Handling all encodings inside a single object makes it very difficult +// to reason about this code, so it should be split up in the future. +// @TODO There should be a utf8-strict encoding that rejects invalid UTF-8 code +// points as used by CESU-8. +var StringDecoder = exports.StringDecoder = function(encoding) { + this.encoding = (encoding || 'utf8').toLowerCase().replace(/[-_]/, ''); + assertEncoding(encoding); + switch (this.encoding) { + case 'utf8': + // CESU-8 represents each of Surrogate Pair by 3-bytes + this.surrogateSize = 3; + break; + case 'ucs2': + case 'utf16le': + // UTF-16 represents each of Surrogate Pair by 2-bytes + this.surrogateSize = 2; + this.detectIncompleteChar = utf16DetectIncompleteChar; + break; + case 'base64': + // Base-64 stores 3 bytes in 4 chars, and pads the remainder. + this.surrogateSize = 3; + this.detectIncompleteChar = base64DetectIncompleteChar; + break; + default: + this.write = passThroughWrite; + return; + } + + // Enough space to store all bytes of a single character. UTF-8 needs 4 + // bytes, but CESU-8 may require up to 6 (3 bytes per surrogate). + this.charBuffer = new Buffer(6); + // Number of bytes received for the current incomplete multi-byte character. + this.charReceived = 0; + // Number of bytes expected for the current incomplete multi-byte character. + this.charLength = 0; +}; + + +// write decodes the given buffer and returns it as JS string that is +// guaranteed to not contain any partial multi-byte characters. Any partial +// character found at the end of the buffer is buffered up, and will be +// returned when calling write again with the remaining bytes. +// +// Note: Converting a Buffer containing an orphan surrogate to a String +// currently works, but converting a String to a Buffer (via `new Buffer`, or +// Buffer#write) will replace incomplete surrogates with the unicode +// replacement character. See https://codereview.chromium.org/121173009/ . +StringDecoder.prototype.write = function(buffer) { + var charStr = ''; + // if our last write ended with an incomplete multibyte character + while (this.charLength) { + // determine how many remaining bytes this buffer has to offer for this char + var available = (buffer.length >= this.charLength - this.charReceived) ? + this.charLength - this.charReceived : + buffer.length; + + // add the new bytes to the char buffer + buffer.copy(this.charBuffer, this.charReceived, 0, available); + this.charReceived += available; + + if (this.charReceived < this.charLength) { + // still not enough chars in this buffer? wait for more ... + return ''; + } + + // remove bytes belonging to the current character from the buffer + buffer = buffer.slice(available, buffer.length); + + // get the character that was split + charStr = this.charBuffer.slice(0, this.charLength).toString(this.encoding); + + // CESU-8: lead surrogate (D800-DBFF) is also the incomplete character + var charCode = charStr.charCodeAt(charStr.length - 1); + if (charCode >= 0xD800 && charCode <= 0xDBFF) { + this.charLength += this.surrogateSize; + charStr = ''; + continue; + } + this.charReceived = this.charLength = 0; + + // if there are no more bytes in this buffer, just emit our char + if (buffer.length === 0) { + return charStr; + } + break; + } + + // determine and set charLength / charReceived + this.detectIncompleteChar(buffer); + + var end = buffer.length; + if (this.charLength) { + // buffer the incomplete character bytes we got + buffer.copy(this.charBuffer, 0, buffer.length - this.charReceived, end); + end -= this.charReceived; + } + + charStr += buffer.toString(this.encoding, 0, end); + + var end = charStr.length - 1; + var charCode = charStr.charCodeAt(end); + // CESU-8: lead surrogate (D800-DBFF) is also the incomplete character + if (charCode >= 0xD800 && charCode <= 0xDBFF) { + var size = this.surrogateSize; + this.charLength += size; + this.charReceived += size; + this.charBuffer.copy(this.charBuffer, size, 0, size); + buffer.copy(this.charBuffer, 0, 0, size); + return charStr.substring(0, end); + } + + // or just emit the charStr + return charStr; +}; + +// detectIncompleteChar determines if there is an incomplete UTF-8 character at +// the end of the given buffer. If so, it sets this.charLength to the byte +// length that character, and sets this.charReceived to the number of bytes +// that are available for this character. +StringDecoder.prototype.detectIncompleteChar = function(buffer) { + // determine how many bytes we have to check at the end of this buffer + var i = (buffer.length >= 3) ? 3 : buffer.length; + + // Figure out if one of the last i bytes of our buffer announces an + // incomplete char. + for (; i > 0; i--) { + var c = buffer[buffer.length - i]; + + // See http://en.wikipedia.org/wiki/UTF-8#Description + + // 110XXXXX + if (i == 1 && c >> 5 == 0x06) { + this.charLength = 2; + break; + } + + // 1110XXXX + if (i <= 2 && c >> 4 == 0x0E) { + this.charLength = 3; + break; + } + + // 11110XXX + if (i <= 3 && c >> 3 == 0x1E) { + this.charLength = 4; + break; + } + } + this.charReceived = i; +}; + +StringDecoder.prototype.end = function(buffer) { + var res = ''; + if (buffer && buffer.length) + res = this.write(buffer); + + if (this.charReceived) { + var cr = this.charReceived; + var buf = this.charBuffer; + var enc = this.encoding; + res += buf.slice(0, cr).toString(enc); + } + + return res; +}; + +function passThroughWrite(buffer) { + return buffer.toString(this.encoding); +} + +function utf16DetectIncompleteChar(buffer) { + this.charReceived = buffer.length % 2; + this.charLength = this.charReceived ? 2 : 0; +} + +function base64DetectIncompleteChar(buffer) { + this.charReceived = buffer.length % 3; + this.charLength = this.charReceived ? 3 : 0; +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/LICENSE nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +Copyright Joyent, Inc. and other Node contributors. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the +following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/.npmignore nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/.npmignore 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2 @@ +build +test diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/package.json nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/package.json --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +{ + "name": "string_decoder", + "version": "0.10.31", + "description": "The string_decoder module from Node core", + "main": "index.js", + "dependencies": {}, + "devDependencies": { + "tap": "~0.4.8" + }, + "scripts": { + "test": "tap test/simple/*.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/rvagg/string_decoder.git" + }, + "homepage": "https://github.com/rvagg/string_decoder", + "keywords": [ + "string", + "decoder", + "browser", + "browserify" + ], + "license": "MIT", + "gitHead": "d46d4fd87cf1d06e031c23f1ba170ca7d4ade9a0", + "bugs": { + "url": "https://github.com/rvagg/string_decoder/issues" + }, + "_id": "string_decoder@0.10.31", + "_shasum": "62e203bc41766c6c28c9fc84301dab1c5310fa94", + "_from": "string_decoder@>=0.10.0 <0.11.0", + "_npmVersion": "1.4.23", + "_npmUser": { + "name": "rvagg", + "email": "rod@vagg.org" + }, + "maintainers": [ + { + "name": "substack", + "email": "mail@substack.net" + }, + { + "name": "rvagg", + "email": "rod@vagg.org" + } + ], + "dist": { + "shasum": "62e203bc41766c6c28c9fc84301dab1c5310fa94", + "tarball": "http://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "readme": "ERROR: No README data found!" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/README.md nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/README.md --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/node_modules/string_decoder/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,7 @@ +**string_decoder.js** (`require('string_decoder')`) from Node.js core + +Copyright Joyent, Inc. and other Node contributors. See LICENCE file for details. + +Version numbers match the versions found in Node core, e.g. 0.10.24 matches Node 0.10.24, likewise 0.11.10 matches Node 0.11.10. **Prefer the stable version over the unstable.** + +The *build/* directory contains a build script that will scrape the source from the [joyent/node](https://github.com/joyent/node) repo given a specific Node version. \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/package.json nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/package.json --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,9 +1,14 @@ { "name": "readable-stream", - "version": "1.0.24", - "description": "An exploration of a new kind of readable streams for Node.js", + "version": "1.1.13", + "description": "Streams3, a user-land copy of the stream library from Node.js v0.11.x", "main": "readable.js", - "dependencies": {}, + "dependencies": { + "core-util-is": "~1.0.0", + "isarray": "0.0.1", + "string_decoder": "~0.10.x", + "inherits": "~2.0.1" + }, "devDependencies": { "tap": "~0.2.6" }, @@ -19,18 +24,47 @@ "stream", "pipe" ], + "browser": { + "util": false + }, "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me", "url": "http://blog.izs.me/" }, - "license": "BSD", - "readme": "# readable-stream\n\nA new class of streams for Node.js\n\nThis module provides the new Stream base classes introduced in Node\nv0.10, for use in Node v0.8. You can use it to have programs that\nhave to work with node v0.8, while being forward-compatible for v0.10\nand beyond. When you drop support for v0.8, you can remove this\nmodule, and only use the native streams.\n\nThis is almost exactly the same codebase as appears in Node v0.10.\nHowever:\n\n1. The exported object is actually the Readable class. Decorating the\n native `stream` module would be global pollution.\n2. In v0.10, you can safely use `base64` as an argument to\n `setEncoding` in Readable streams. However, in v0.8, the\n StringDecoder class has no `end()` method, which is problematic for\n Base64. So, don't use that, because it'll break and be weird.\n\nOther than that, the API is the same as `require('stream')` in v0.10,\nso the API docs are reproduced below.\n\n----------\n\n Stability: 2 - Unstable\n\nA stream is an abstract interface implemented by various objects in\nNode. For example a request to an HTTP server is a stream, as is\nstdout. Streams are readable, writable, or both. All streams are\ninstances of [EventEmitter][]\n\nYou can load the Stream base classes by doing `require('stream')`.\nThere are base classes provided for Readable streams, Writable\nstreams, Duplex streams, and Transform streams.\n\n## Compatibility\n\nIn earlier versions of Node, the Readable stream interface was\nsimpler, but also less powerful and less useful.\n\n* Rather than waiting for you to call the `read()` method, `'data'`\n events would start emitting immediately. If you needed to do some\n I/O to decide how to handle data, then you had to store the chunks\n in some kind of buffer so that they would not be lost.\n* The `pause()` method was advisory, rather than guaranteed. This\n meant that you still had to be prepared to receive `'data'` events\n even when the stream was in a paused state.\n\nIn Node v0.10, the Readable class described below was added. For\nbackwards compatibility with older Node programs, Readable streams\nswitch into \"old mode\" when a `'data'` event handler is added, or when\nthe `pause()` or `resume()` methods are called. The effect is that,\neven if you are not using the new `read()` method and `'readable'`\nevent, you no longer have to worry about losing `'data'` chunks.\n\nMost programs will continue to function normally. However, this\nintroduces an edge case in the following conditions:\n\n* No `'data'` event handler is added.\n* The `pause()` and `resume()` methods are never called.\n\nFor example, consider the following code:\n\n```javascript\n// WARNING! BROKEN!\nnet.createServer(function(socket) {\n\n // we add an 'end' method, but never consume the data\n socket.on('end', function() {\n // It will never get here.\n socket.end('I got your message (but didnt read it)\\n');\n });\n\n}).listen(1337);\n```\n\nIn versions of node prior to v0.10, the incoming message data would be\nsimply discarded. However, in Node v0.10 and beyond, the socket will\nremain paused forever.\n\nThe workaround in this situation is to call the `resume()` method to\ntrigger \"old mode\" behavior:\n\n```javascript\n// Workaround\nnet.createServer(function(socket) {\n\n socket.on('end', function() {\n socket.end('I got your message (but didnt read it)\\n');\n });\n\n // start the flow of data, discarding it.\n socket.resume();\n\n}).listen(1337);\n```\n\nIn addition to new Readable streams switching into old-mode, pre-v0.10\nstyle streams can be wrapped in a Readable class using the `wrap()`\nmethod.\n\n## Class: stream.Readable\n\n\n\nA `Readable Stream` has the following methods, members, and events.\n\nNote that `stream.Readable` is an abstract class designed to be\nextended with an underlying implementation of the `_read(size)`\nmethod. (See below.)\n\n### new stream.Readable([options])\n\n* `options` {Object}\n * `highWaterMark` {Number} The maximum number of bytes to store in\n the internal buffer before ceasing to read from the underlying\n resource. Default=16kb\n * `encoding` {String} If specified, then buffers will be decoded to\n strings using the specified encoding. Default=null\n * `objectMode` {Boolean} Whether this stream should behave\n as a stream of objects. Meaning that stream.read(n) returns\n a single value instead of a Buffer of size n\n\nIn classes that extend the Readable class, make sure to call the\nconstructor so that the buffering settings can be properly\ninitialized.\n\n### readable.\\_read(size)\n\n* `size` {Number} Number of bytes to read asynchronously\n\nNote: **This function should NOT be called directly.** It should be\nimplemented by child classes, and called by the internal Readable\nclass methods only.\n\nAll Readable stream implementations must provide a `_read` method\nto fetch data from the underlying resource.\n\nThis method is prefixed with an underscore because it is internal to\nthe class that defines it, and should not be called directly by user\nprograms. However, you **are** expected to override this method in\nyour own extension classes.\n\nWhen data is available, put it into the read queue by calling\n`readable.push(chunk)`. If `push` returns false, then you should stop\nreading. When `_read` is called again, you should start pushing more\ndata.\n\nThe `size` argument is advisory. Implementations where a \"read\" is a\nsingle call that returns data can use this to know how much data to\nfetch. Implementations where that is not relevant, such as TCP or\nTLS, may ignore this argument, and simply provide data whenever it\nbecomes available. There is no need, for example to \"wait\" until\n`size` bytes are available before calling `stream.push(chunk)`.\n\n### readable.push(chunk)\n\n* `chunk` {Buffer | null | String} Chunk of data to push into the read queue\n* return {Boolean} Whether or not more pushes should be performed\n\nNote: **This function should be called by Readable implementors, NOT\nby consumers of Readable subclasses.** The `_read()` function will not\nbe called again until at least one `push(chunk)` call is made. If no\ndata is available, then you MAY call `push('')` (an empty string) to\nallow a future `_read` call, without adding any data to the queue.\n\nThe `Readable` class works by putting data into a read queue to be\npulled out later by calling the `read()` method when the `'readable'`\nevent fires.\n\nThe `push()` method will explicitly insert some data into the read\nqueue. If it is called with `null` then it will signal the end of the\ndata.\n\nIn some cases, you may be wrapping a lower-level source which has some\nsort of pause/resume mechanism, and a data callback. In those cases,\nyou could wrap the low-level source object by doing something like\nthis:\n\n```javascript\n// source is an object with readStop() and readStart() methods,\n// and an `ondata` member that gets called when it has data, and\n// an `onend` member that gets called when the data is over.\n\nvar stream = new Readable();\n\nsource.ondata = function(chunk) {\n // if push() returns false, then we need to stop reading from source\n if (!stream.push(chunk))\n source.readStop();\n};\n\nsource.onend = function() {\n stream.push(null);\n};\n\n// _read will be called when the stream wants to pull more data in\n// the advisory size argument is ignored in this case.\nstream._read = function(n) {\n source.readStart();\n};\n```\n\n### readable.unshift(chunk)\n\n* `chunk` {Buffer | null | String} Chunk of data to unshift onto the read queue\n* return {Boolean} Whether or not more pushes should be performed\n\nThis is the corollary of `readable.push(chunk)`. Rather than putting\nthe data at the *end* of the read queue, it puts it at the *front* of\nthe read queue.\n\nThis is useful in certain use-cases where a stream is being consumed\nby a parser, which needs to \"un-consume\" some data that it has\noptimistically pulled out of the source.\n\n```javascript\n// A parser for a simple data protocol.\n// The \"header\" is a JSON object, followed by 2 \\n characters, and\n// then a message body.\n//\n// Note: This can be done more simply as a Transform stream. See below.\n\nfunction SimpleProtocol(source, options) {\n if (!(this instanceof SimpleProtocol))\n return new SimpleProtocol(options);\n\n Readable.call(this, options);\n this._inBody = false;\n this._sawFirstCr = false;\n\n // source is a readable stream, such as a socket or file\n this._source = source;\n\n var self = this;\n source.on('end', function() {\n self.push(null);\n });\n\n // give it a kick whenever the source is readable\n // read(0) will not consume any bytes\n source.on('readable', function() {\n self.read(0);\n });\n\n this._rawHeader = [];\n this.header = null;\n}\n\nSimpleProtocol.prototype = Object.create(\n Readable.prototype, { constructor: { value: SimpleProtocol }});\n\nSimpleProtocol.prototype._read = function(n) {\n if (!this._inBody) {\n var chunk = this._source.read();\n\n // if the source doesn't have data, we don't have data yet.\n if (chunk === null)\n return this.push('');\n\n // check if the chunk has a \\n\\n\n var split = -1;\n for (var i = 0; i < chunk.length; i++) {\n if (chunk[i] === 10) { // '\\n'\n if (this._sawFirstCr) {\n split = i;\n break;\n } else {\n this._sawFirstCr = true;\n }\n } else {\n this._sawFirstCr = false;\n }\n }\n\n if (split === -1) {\n // still waiting for the \\n\\n\n // stash the chunk, and try again.\n this._rawHeader.push(chunk);\n this.push('');\n } else {\n this._inBody = true;\n var h = chunk.slice(0, split);\n this._rawHeader.push(h);\n var header = Buffer.concat(this._rawHeader).toString();\n try {\n this.header = JSON.parse(header);\n } catch (er) {\n this.emit('error', new Error('invalid simple protocol data'));\n return;\n }\n // now, because we got some extra data, unshift the rest\n // back into the read queue so that our consumer will see it.\n var b = chunk.slice(split);\n this.unshift(b);\n\n // and let them know that we are done parsing the header.\n this.emit('header', this.header);\n }\n } else {\n // from there on, just provide the data to our consumer.\n // careful not to push(null), since that would indicate EOF.\n var chunk = this._source.read();\n if (chunk) this.push(chunk);\n }\n};\n\n// Usage:\nvar parser = new SimpleProtocol(source);\n// Now parser is a readable stream that will emit 'header'\n// with the parsed header data.\n```\n\n### readable.wrap(stream)\n\n* `stream` {Stream} An \"old style\" readable stream\n\nIf you are using an older Node library that emits `'data'` events and\nhas a `pause()` method that is advisory only, then you can use the\n`wrap()` method to create a Readable stream that uses the old stream\nas its data source.\n\nFor example:\n\n```javascript\nvar OldReader = require('./old-api-module.js').OldReader;\nvar oreader = new OldReader;\nvar Readable = require('stream').Readable;\nvar myReader = new Readable().wrap(oreader);\n\nmyReader.on('readable', function() {\n myReader.read(); // etc.\n});\n```\n\n### Event: 'readable'\n\nWhen there is data ready to be consumed, this event will fire.\n\nWhen this event emits, call the `read()` method to consume the data.\n\n### Event: 'end'\n\nEmitted when the stream has received an EOF (FIN in TCP terminology).\nIndicates that no more `'data'` events will happen. If the stream is\nalso writable, it may be possible to continue writing.\n\n### Event: 'data'\n\nThe `'data'` event emits either a `Buffer` (by default) or a string if\n`setEncoding()` was used.\n\nNote that adding a `'data'` event listener will switch the Readable\nstream into \"old mode\", where data is emitted as soon as it is\navailable, rather than waiting for you to call `read()` to consume it.\n\n### Event: 'error'\n\nEmitted if there was an error receiving data.\n\n### Event: 'close'\n\nEmitted when the underlying resource (for example, the backing file\ndescriptor) has been closed. Not all streams will emit this.\n\n### readable.setEncoding(encoding)\n\nMakes the `'data'` event emit a string instead of a `Buffer`. `encoding`\ncan be `'utf8'`, `'utf16le'` (`'ucs2'`), `'ascii'`, or `'hex'`.\n\nThe encoding can also be set by specifying an `encoding` field to the\nconstructor.\n\n### readable.read([size])\n\n* `size` {Number | null} Optional number of bytes to read.\n* Return: {Buffer | String | null}\n\nNote: **This function SHOULD be called by Readable stream users.**\n\nCall this method to consume data once the `'readable'` event is\nemitted.\n\nThe `size` argument will set a minimum number of bytes that you are\ninterested in. If not set, then the entire content of the internal\nbuffer is returned.\n\nIf there is no data to consume, or if there are fewer bytes in the\ninternal buffer than the `size` argument, then `null` is returned, and\na future `'readable'` event will be emitted when more is available.\n\nCalling `stream.read(0)` will always return `null`, and will trigger a\nrefresh of the internal buffer, but otherwise be a no-op.\n\n### readable.pipe(destination, [options])\n\n* `destination` {Writable Stream}\n* `options` {Object} Optional\n * `end` {Boolean} Default=true\n\nConnects this readable stream to `destination` WriteStream. Incoming\ndata on this stream gets written to `destination`. Properly manages\nback-pressure so that a slow destination will not be overwhelmed by a\nfast readable stream.\n\nThis function returns the `destination` stream.\n\nFor example, emulating the Unix `cat` command:\n\n process.stdin.pipe(process.stdout);\n\nBy default `end()` is called on the destination when the source stream\nemits `end`, so that `destination` is no longer writable. Pass `{ end:\nfalse }` as `options` to keep the destination stream open.\n\nThis keeps `writer` open so that \"Goodbye\" can be written at the\nend.\n\n reader.pipe(writer, { end: false });\n reader.on(\"end\", function() {\n writer.end(\"Goodbye\\n\");\n });\n\nNote that `process.stderr` and `process.stdout` are never closed until\nthe process exits, regardless of the specified options.\n\n### readable.unpipe([destination])\n\n* `destination` {Writable Stream} Optional\n\nUndo a previously established `pipe()`. If no destination is\nprovided, then all previously established pipes are removed.\n\n### readable.pause()\n\nSwitches the readable stream into \"old mode\", where data is emitted\nusing a `'data'` event rather than being buffered for consumption via\nthe `read()` method.\n\nCeases the flow of data. No `'data'` events are emitted while the\nstream is in a paused state.\n\n### readable.resume()\n\nSwitches the readable stream into \"old mode\", where data is emitted\nusing a `'data'` event rather than being buffered for consumption via\nthe `read()` method.\n\nResumes the incoming `'data'` events after a `pause()`.\n\n\n## Class: stream.Writable\n\n\n\nA `Writable` Stream has the following methods, members, and events.\n\nNote that `stream.Writable` is an abstract class designed to be\nextended with an underlying implementation of the\n`_write(chunk, encoding, cb)` method. (See below.)\n\n### new stream.Writable([options])\n\n* `options` {Object}\n * `highWaterMark` {Number} Buffer level when `write()` starts\n returning false. Default=16kb\n * `decodeStrings` {Boolean} Whether or not to decode strings into\n Buffers before passing them to `_write()`. Default=true\n\nIn classes that extend the Writable class, make sure to call the\nconstructor so that the buffering settings can be properly\ninitialized.\n\n### writable.\\_write(chunk, encoding, callback)\n\n* `chunk` {Buffer | String} The chunk to be written. Will always\n be a buffer unless the `decodeStrings` option was set to `false`.\n* `encoding` {String} If the chunk is a string, then this is the\n encoding type. Ignore chunk is a buffer. Note that chunk will\n **always** be a buffer unless the `decodeStrings` option is\n explicitly set to `false`.\n* `callback` {Function} Call this function (optionally with an error\n argument) when you are done processing the supplied chunk.\n\nAll Writable stream implementations must provide a `_write` method to\nsend data to the underlying resource.\n\nNote: **This function MUST NOT be called directly.** It should be\nimplemented by child classes, and called by the internal Writable\nclass methods only.\n\nCall the callback using the standard `callback(error)` pattern to\nsignal that the write completed successfully or with an error.\n\nIf the `decodeStrings` flag is set in the constructor options, then\n`chunk` may be a string rather than a Buffer, and `encoding` will\nindicate the sort of string that it is. This is to support\nimplementations that have an optimized handling for certain string\ndata encodings. If you do not explicitly set the `decodeStrings`\noption to `false`, then you can safely ignore the `encoding` argument,\nand assume that `chunk` will always be a Buffer.\n\nThis method is prefixed with an underscore because it is internal to\nthe class that defines it, and should not be called directly by user\nprograms. However, you **are** expected to override this method in\nyour own extension classes.\n\n\n### writable.write(chunk, [encoding], [callback])\n\n* `chunk` {Buffer | String} Data to be written\n* `encoding` {String} Optional. If `chunk` is a string, then encoding\n defaults to `'utf8'`\n* `callback` {Function} Optional. Called when this chunk is\n successfully written.\n* Returns {Boolean}\n\nWrites `chunk` to the stream. Returns `true` if the data has been\nflushed to the underlying resource. Returns `false` to indicate that\nthe buffer is full, and the data will be sent out in the future. The\n`'drain'` event will indicate when the buffer is empty again.\n\nThe specifics of when `write()` will return false, is determined by\nthe `highWaterMark` option provided to the constructor.\n\n### writable.end([chunk], [encoding], [callback])\n\n* `chunk` {Buffer | String} Optional final data to be written\n* `encoding` {String} Optional. If `chunk` is a string, then encoding\n defaults to `'utf8'`\n* `callback` {Function} Optional. Called when the final chunk is\n successfully written.\n\nCall this method to signal the end of the data being written to the\nstream.\n\n### Event: 'drain'\n\nEmitted when the stream's write queue empties and it's safe to write\nwithout buffering again. Listen for it when `stream.write()` returns\n`false`.\n\n### Event: 'close'\n\nEmitted when the underlying resource (for example, the backing file\ndescriptor) has been closed. Not all streams will emit this.\n\n### Event: 'finish'\n\nWhen `end()` is called and there are no more chunks to write, this\nevent is emitted.\n\n### Event: 'pipe'\n\n* `source` {Readable Stream}\n\nEmitted when the stream is passed to a readable stream's pipe method.\n\n### Event 'unpipe'\n\n* `source` {Readable Stream}\n\nEmitted when a previously established `pipe()` is removed using the\nsource Readable stream's `unpipe()` method.\n\n## Class: stream.Duplex\n\n\n\nA \"duplex\" stream is one that is both Readable and Writable, such as a\nTCP socket connection.\n\nNote that `stream.Duplex` is an abstract class designed to be\nextended with an underlying implementation of the `_read(size)`\nand `_write(chunk, encoding, callback)` methods as you would with a Readable or\nWritable stream class.\n\nSince JavaScript doesn't have multiple prototypal inheritance, this\nclass prototypally inherits from Readable, and then parasitically from\nWritable. It is thus up to the user to implement both the lowlevel\n`_read(n)` method as well as the lowlevel `_write(chunk, encoding, cb)` method\non extension duplex classes.\n\n### new stream.Duplex(options)\n\n* `options` {Object} Passed to both Writable and Readable\n constructors. Also has the following fields:\n * `allowHalfOpen` {Boolean} Default=true. If set to `false`, then\n the stream will automatically end the readable side when the\n writable side ends and vice versa.\n\nIn classes that extend the Duplex class, make sure to call the\nconstructor so that the buffering settings can be properly\ninitialized.\n\n## Class: stream.Transform\n\nA \"transform\" stream is a duplex stream where the output is causally\nconnected in some way to the input, such as a zlib stream or a crypto\nstream.\n\nThere is no requirement that the output be the same size as the input,\nthe same number of chunks, or arrive at the same time. For example, a\nHash stream will only ever have a single chunk of output which is\nprovided when the input is ended. A zlib stream will either produce\nmuch smaller or much larger than its input.\n\nRather than implement the `_read()` and `_write()` methods, Transform\nclasses must implement the `_transform()` method, and may optionally\nalso implement the `_flush()` method. (See below.)\n\n### new stream.Transform([options])\n\n* `options` {Object} Passed to both Writable and Readable\n constructors.\n\nIn classes that extend the Transform class, make sure to call the\nconstructor so that the buffering settings can be properly\ninitialized.\n\n### transform.\\_transform(chunk, encoding, callback)\n\n* `chunk` {Buffer | String} The chunk to be transformed. Will always\n be a buffer unless the `decodeStrings` option was set to `false`.\n* `encoding` {String} If the chunk is a string, then this is the\n encoding type. (Ignore if `decodeStrings` chunk is a buffer.)\n* `callback` {Function} Call this function (optionally with an error\n argument) when you are done processing the supplied chunk.\n\nNote: **This function MUST NOT be called directly.** It should be\nimplemented by child classes, and called by the internal Transform\nclass methods only.\n\nAll Transform stream implementations must provide a `_transform`\nmethod to accept input and produce output.\n\n`_transform` should do whatever has to be done in this specific\nTransform class, to handle the bytes being written, and pass them off\nto the readable portion of the interface. Do asynchronous I/O,\nprocess things, and so on.\n\nCall `transform.push(outputChunk)` 0 or more times to generate output\nfrom this input chunk, depending on how much data you want to output\nas a result of this chunk.\n\nCall the callback function only when the current chunk is completely\nconsumed. Note that there may or may not be output as a result of any\nparticular input chunk.\n\nThis method is prefixed with an underscore because it is internal to\nthe class that defines it, and should not be called directly by user\nprograms. However, you **are** expected to override this method in\nyour own extension classes.\n\n### transform.\\_flush(callback)\n\n* `callback` {Function} Call this function (optionally with an error\n argument) when you are done flushing any remaining data.\n\nNote: **This function MUST NOT be called directly.** It MAY be implemented\nby child classes, and if so, will be called by the internal Transform\nclass methods only.\n\nIn some cases, your transform operation may need to emit a bit more\ndata at the end of the stream. For example, a `Zlib` compression\nstream will store up some internal state so that it can optimally\ncompress the output. At the end, however, it needs to do the best it\ncan with what is left, so that the data will be complete.\n\nIn those cases, you can implement a `_flush` method, which will be\ncalled at the very end, after all the written data is consumed, but\nbefore emitting `end` to signal the end of the readable side. Just\nlike with `_transform`, call `transform.push(chunk)` zero or more\ntimes, as appropriate, and call `callback` when the flush operation is\ncomplete.\n\nThis method is prefixed with an underscore because it is internal to\nthe class that defines it, and should not be called directly by user\nprograms. However, you **are** expected to override this method in\nyour own extension classes.\n\n### Example: `SimpleProtocol` parser\n\nThe example above of a simple protocol parser can be implemented much\nmore simply by using the higher level `Transform` stream class.\n\nIn this example, rather than providing the input as an argument, it\nwould be piped into the parser, which is a more idiomatic Node stream\napproach.\n\n```javascript\nfunction SimpleProtocol(options) {\n if (!(this instanceof SimpleProtocol))\n return new SimpleProtocol(options);\n\n Transform.call(this, options);\n this._inBody = false;\n this._sawFirstCr = false;\n this._rawHeader = [];\n this.header = null;\n}\n\nSimpleProtocol.prototype = Object.create(\n Transform.prototype, { constructor: { value: SimpleProtocol }});\n\nSimpleProtocol.prototype._transform = function(chunk, encoding, done) {\n if (!this._inBody) {\n // check if the chunk has a \\n\\n\n var split = -1;\n for (var i = 0; i < chunk.length; i++) {\n if (chunk[i] === 10) { // '\\n'\n if (this._sawFirstCr) {\n split = i;\n break;\n } else {\n this._sawFirstCr = true;\n }\n } else {\n this._sawFirstCr = false;\n }\n }\n\n if (split === -1) {\n // still waiting for the \\n\\n\n // stash the chunk, and try again.\n this._rawHeader.push(chunk);\n } else {\n this._inBody = true;\n var h = chunk.slice(0, split);\n this._rawHeader.push(h);\n var header = Buffer.concat(this._rawHeader).toString();\n try {\n this.header = JSON.parse(header);\n } catch (er) {\n this.emit('error', new Error('invalid simple protocol data'));\n return;\n }\n // and let them know that we are done parsing the header.\n this.emit('header', this.header);\n\n // now, because we got some extra data, emit this first.\n this.push(b);\n }\n } else {\n // from there on, just provide the data to our consumer as-is.\n this.push(b);\n }\n done();\n};\n\nvar parser = new SimpleProtocol();\nsource.pipe(parser)\n\n// Now parser is a readable stream that will emit 'header'\n// with the parsed header data.\n```\n\n\n## Class: stream.PassThrough\n\nThis is a trivial implementation of a `Transform` stream that simply\npasses the input bytes across to the output. Its purpose is mainly\nfor examples and testing, but there are occasionally use cases where\nit can come in handy.\n\n\n[EventEmitter]: events.html#events_class_events_eventemitter\n", - "readmeFilename": "README.md", + "license": "MIT", + "gitHead": "3b672fd7ae92acf5b4ffdbabf74b372a0a56b051", "bugs": { "url": "https://github.com/isaacs/readable-stream/issues" }, "homepage": "https://github.com/isaacs/readable-stream", - "_id": "readable-stream@1.0.24", - "_from": "readable-stream@1.0" + "_id": "readable-stream@1.1.13", + "_shasum": "f6eef764f514c89e2b9e23146a75ba106756d23e", + "_from": "readable-stream@>=1.1.0 <1.2.0", + "_npmVersion": "1.4.23", + "_npmUser": { + "name": "rvagg", + "email": "rod@vagg.org" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + }, + { + "name": "tootallnate", + "email": "nathan@tootallnate.net" + }, + { + "name": "rvagg", + "email": "rod@vagg.org" + } + ], + "dist": { + "shasum": "f6eef764f514c89e2b9e23146a75ba106756d23e", + "tarball": "http://registry.npmjs.org/readable-stream/-/readable-stream-1.1.13.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.13.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/readable.js nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/readable.js --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/readable.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/readable.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,5 @@ exports = module.exports = require('./lib/_stream_readable.js'); +exports.Stream = require('stream'); exports.Readable = exports; exports.Writable = require('./lib/_stream_writable.js'); exports.Duplex = require('./lib/_stream_duplex.js'); diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/README.md nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/README.md --- nodejs-0.11.13/deps/npm/node_modules/sha/node_modules/readable-stream/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/node_modules/readable-stream/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,768 +1,15 @@ # readable-stream -A new class of streams for Node.js +***Node-core streams for userland*** -This module provides the new Stream base classes introduced in Node -v0.10, for use in Node v0.8. You can use it to have programs that -have to work with node v0.8, while being forward-compatible for v0.10 -and beyond. When you drop support for v0.8, you can remove this -module, and only use the native streams. +[![NPM](https://nodei.co/npm/readable-stream.png?downloads=true&downloadRank=true)](https://nodei.co/npm/readable-stream/) +[![NPM](https://nodei.co/npm-dl/readable-stream.png&months=6&height=3)](https://nodei.co/npm/readable-stream/) -This is almost exactly the same codebase as appears in Node v0.10. -However: +This package is a mirror of the Streams2 and Streams3 implementations in Node-core. -1. The exported object is actually the Readable class. Decorating the - native `stream` module would be global pollution. -2. In v0.10, you can safely use `base64` as an argument to - `setEncoding` in Readable streams. However, in v0.8, the - StringDecoder class has no `end()` method, which is problematic for - Base64. So, don't use that, because it'll break and be weird. +If you want to guarantee a stable streams base, regardless of what version of Node you, or the users of your libraries are using, use **readable-stream** *only* and avoid the *"stream"* module in Node-core. -Other than that, the API is the same as `require('stream')` in v0.10, -so the API docs are reproduced below. +**readable-stream** comes in two major versions, v1.0.x and v1.1.x. The former tracks the Streams2 implementation in Node 0.10, including bug-fixes and minor improvements as they are added. The latter tracks Streams3 as it develops in Node 0.11; we will likely see a v1.2.x branch for Node 0.12. ----------- +**readable-stream** uses proper patch-level versioning so if you pin to `"~1.0.0"` you’ll get the latest Node 0.10 Streams2 implementation, including any fixes and minor non-breaking improvements. The patch-level versions of 1.0.x and 1.1.x should mirror the patch-level versions of Node-core releases. You should prefer the **1.0.x** releases for now and when you’re ready to start using Streams3, pin to `"~1.1.0"` - Stability: 2 - Unstable - -A stream is an abstract interface implemented by various objects in -Node. For example a request to an HTTP server is a stream, as is -stdout. Streams are readable, writable, or both. All streams are -instances of [EventEmitter][] - -You can load the Stream base classes by doing `require('stream')`. -There are base classes provided for Readable streams, Writable -streams, Duplex streams, and Transform streams. - -## Compatibility - -In earlier versions of Node, the Readable stream interface was -simpler, but also less powerful and less useful. - -* Rather than waiting for you to call the `read()` method, `'data'` - events would start emitting immediately. If you needed to do some - I/O to decide how to handle data, then you had to store the chunks - in some kind of buffer so that they would not be lost. -* The `pause()` method was advisory, rather than guaranteed. This - meant that you still had to be prepared to receive `'data'` events - even when the stream was in a paused state. - -In Node v0.10, the Readable class described below was added. For -backwards compatibility with older Node programs, Readable streams -switch into "old mode" when a `'data'` event handler is added, or when -the `pause()` or `resume()` methods are called. The effect is that, -even if you are not using the new `read()` method and `'readable'` -event, you no longer have to worry about losing `'data'` chunks. - -Most programs will continue to function normally. However, this -introduces an edge case in the following conditions: - -* No `'data'` event handler is added. -* The `pause()` and `resume()` methods are never called. - -For example, consider the following code: - -```javascript -// WARNING! BROKEN! -net.createServer(function(socket) { - - // we add an 'end' method, but never consume the data - socket.on('end', function() { - // It will never get here. - socket.end('I got your message (but didnt read it)\n'); - }); - -}).listen(1337); -``` - -In versions of node prior to v0.10, the incoming message data would be -simply discarded. However, in Node v0.10 and beyond, the socket will -remain paused forever. - -The workaround in this situation is to call the `resume()` method to -trigger "old mode" behavior: - -```javascript -// Workaround -net.createServer(function(socket) { - - socket.on('end', function() { - socket.end('I got your message (but didnt read it)\n'); - }); - - // start the flow of data, discarding it. - socket.resume(); - -}).listen(1337); -``` - -In addition to new Readable streams switching into old-mode, pre-v0.10 -style streams can be wrapped in a Readable class using the `wrap()` -method. - -## Class: stream.Readable - - - -A `Readable Stream` has the following methods, members, and events. - -Note that `stream.Readable` is an abstract class designed to be -extended with an underlying implementation of the `_read(size)` -method. (See below.) - -### new stream.Readable([options]) - -* `options` {Object} - * `highWaterMark` {Number} The maximum number of bytes to store in - the internal buffer before ceasing to read from the underlying - resource. Default=16kb - * `encoding` {String} If specified, then buffers will be decoded to - strings using the specified encoding. Default=null - * `objectMode` {Boolean} Whether this stream should behave - as a stream of objects. Meaning that stream.read(n) returns - a single value instead of a Buffer of size n - -In classes that extend the Readable class, make sure to call the -constructor so that the buffering settings can be properly -initialized. - -### readable.\_read(size) - -* `size` {Number} Number of bytes to read asynchronously - -Note: **This function should NOT be called directly.** It should be -implemented by child classes, and called by the internal Readable -class methods only. - -All Readable stream implementations must provide a `_read` method -to fetch data from the underlying resource. - -This method is prefixed with an underscore because it is internal to -the class that defines it, and should not be called directly by user -programs. However, you **are** expected to override this method in -your own extension classes. - -When data is available, put it into the read queue by calling -`readable.push(chunk)`. If `push` returns false, then you should stop -reading. When `_read` is called again, you should start pushing more -data. - -The `size` argument is advisory. Implementations where a "read" is a -single call that returns data can use this to know how much data to -fetch. Implementations where that is not relevant, such as TCP or -TLS, may ignore this argument, and simply provide data whenever it -becomes available. There is no need, for example to "wait" until -`size` bytes are available before calling `stream.push(chunk)`. - -### readable.push(chunk) - -* `chunk` {Buffer | null | String} Chunk of data to push into the read queue -* return {Boolean} Whether or not more pushes should be performed - -Note: **This function should be called by Readable implementors, NOT -by consumers of Readable subclasses.** The `_read()` function will not -be called again until at least one `push(chunk)` call is made. If no -data is available, then you MAY call `push('')` (an empty string) to -allow a future `_read` call, without adding any data to the queue. - -The `Readable` class works by putting data into a read queue to be -pulled out later by calling the `read()` method when the `'readable'` -event fires. - -The `push()` method will explicitly insert some data into the read -queue. If it is called with `null` then it will signal the end of the -data. - -In some cases, you may be wrapping a lower-level source which has some -sort of pause/resume mechanism, and a data callback. In those cases, -you could wrap the low-level source object by doing something like -this: - -```javascript -// source is an object with readStop() and readStart() methods, -// and an `ondata` member that gets called when it has data, and -// an `onend` member that gets called when the data is over. - -var stream = new Readable(); - -source.ondata = function(chunk) { - // if push() returns false, then we need to stop reading from source - if (!stream.push(chunk)) - source.readStop(); -}; - -source.onend = function() { - stream.push(null); -}; - -// _read will be called when the stream wants to pull more data in -// the advisory size argument is ignored in this case. -stream._read = function(n) { - source.readStart(); -}; -``` - -### readable.unshift(chunk) - -* `chunk` {Buffer | null | String} Chunk of data to unshift onto the read queue -* return {Boolean} Whether or not more pushes should be performed - -This is the corollary of `readable.push(chunk)`. Rather than putting -the data at the *end* of the read queue, it puts it at the *front* of -the read queue. - -This is useful in certain use-cases where a stream is being consumed -by a parser, which needs to "un-consume" some data that it has -optimistically pulled out of the source. - -```javascript -// A parser for a simple data protocol. -// The "header" is a JSON object, followed by 2 \n characters, and -// then a message body. -// -// Note: This can be done more simply as a Transform stream. See below. - -function SimpleProtocol(source, options) { - if (!(this instanceof SimpleProtocol)) - return new SimpleProtocol(options); - - Readable.call(this, options); - this._inBody = false; - this._sawFirstCr = false; - - // source is a readable stream, such as a socket or file - this._source = source; - - var self = this; - source.on('end', function() { - self.push(null); - }); - - // give it a kick whenever the source is readable - // read(0) will not consume any bytes - source.on('readable', function() { - self.read(0); - }); - - this._rawHeader = []; - this.header = null; -} - -SimpleProtocol.prototype = Object.create( - Readable.prototype, { constructor: { value: SimpleProtocol }}); - -SimpleProtocol.prototype._read = function(n) { - if (!this._inBody) { - var chunk = this._source.read(); - - // if the source doesn't have data, we don't have data yet. - if (chunk === null) - return this.push(''); - - // check if the chunk has a \n\n - var split = -1; - for (var i = 0; i < chunk.length; i++) { - if (chunk[i] === 10) { // '\n' - if (this._sawFirstCr) { - split = i; - break; - } else { - this._sawFirstCr = true; - } - } else { - this._sawFirstCr = false; - } - } - - if (split === -1) { - // still waiting for the \n\n - // stash the chunk, and try again. - this._rawHeader.push(chunk); - this.push(''); - } else { - this._inBody = true; - var h = chunk.slice(0, split); - this._rawHeader.push(h); - var header = Buffer.concat(this._rawHeader).toString(); - try { - this.header = JSON.parse(header); - } catch (er) { - this.emit('error', new Error('invalid simple protocol data')); - return; - } - // now, because we got some extra data, unshift the rest - // back into the read queue so that our consumer will see it. - var b = chunk.slice(split); - this.unshift(b); - - // and let them know that we are done parsing the header. - this.emit('header', this.header); - } - } else { - // from there on, just provide the data to our consumer. - // careful not to push(null), since that would indicate EOF. - var chunk = this._source.read(); - if (chunk) this.push(chunk); - } -}; - -// Usage: -var parser = new SimpleProtocol(source); -// Now parser is a readable stream that will emit 'header' -// with the parsed header data. -``` - -### readable.wrap(stream) - -* `stream` {Stream} An "old style" readable stream - -If you are using an older Node library that emits `'data'` events and -has a `pause()` method that is advisory only, then you can use the -`wrap()` method to create a Readable stream that uses the old stream -as its data source. - -For example: - -```javascript -var OldReader = require('./old-api-module.js').OldReader; -var oreader = new OldReader; -var Readable = require('stream').Readable; -var myReader = new Readable().wrap(oreader); - -myReader.on('readable', function() { - myReader.read(); // etc. -}); -``` - -### Event: 'readable' - -When there is data ready to be consumed, this event will fire. - -When this event emits, call the `read()` method to consume the data. - -### Event: 'end' - -Emitted when the stream has received an EOF (FIN in TCP terminology). -Indicates that no more `'data'` events will happen. If the stream is -also writable, it may be possible to continue writing. - -### Event: 'data' - -The `'data'` event emits either a `Buffer` (by default) or a string if -`setEncoding()` was used. - -Note that adding a `'data'` event listener will switch the Readable -stream into "old mode", where data is emitted as soon as it is -available, rather than waiting for you to call `read()` to consume it. - -### Event: 'error' - -Emitted if there was an error receiving data. - -### Event: 'close' - -Emitted when the underlying resource (for example, the backing file -descriptor) has been closed. Not all streams will emit this. - -### readable.setEncoding(encoding) - -Makes the `'data'` event emit a string instead of a `Buffer`. `encoding` -can be `'utf8'`, `'utf16le'` (`'ucs2'`), `'ascii'`, or `'hex'`. - -The encoding can also be set by specifying an `encoding` field to the -constructor. - -### readable.read([size]) - -* `size` {Number | null} Optional number of bytes to read. -* Return: {Buffer | String | null} - -Note: **This function SHOULD be called by Readable stream users.** - -Call this method to consume data once the `'readable'` event is -emitted. - -The `size` argument will set a minimum number of bytes that you are -interested in. If not set, then the entire content of the internal -buffer is returned. - -If there is no data to consume, or if there are fewer bytes in the -internal buffer than the `size` argument, then `null` is returned, and -a future `'readable'` event will be emitted when more is available. - -Calling `stream.read(0)` will always return `null`, and will trigger a -refresh of the internal buffer, but otherwise be a no-op. - -### readable.pipe(destination, [options]) - -* `destination` {Writable Stream} -* `options` {Object} Optional - * `end` {Boolean} Default=true - -Connects this readable stream to `destination` WriteStream. Incoming -data on this stream gets written to `destination`. Properly manages -back-pressure so that a slow destination will not be overwhelmed by a -fast readable stream. - -This function returns the `destination` stream. - -For example, emulating the Unix `cat` command: - - process.stdin.pipe(process.stdout); - -By default `end()` is called on the destination when the source stream -emits `end`, so that `destination` is no longer writable. Pass `{ end: -false }` as `options` to keep the destination stream open. - -This keeps `writer` open so that "Goodbye" can be written at the -end. - - reader.pipe(writer, { end: false }); - reader.on("end", function() { - writer.end("Goodbye\n"); - }); - -Note that `process.stderr` and `process.stdout` are never closed until -the process exits, regardless of the specified options. - -### readable.unpipe([destination]) - -* `destination` {Writable Stream} Optional - -Undo a previously established `pipe()`. If no destination is -provided, then all previously established pipes are removed. - -### readable.pause() - -Switches the readable stream into "old mode", where data is emitted -using a `'data'` event rather than being buffered for consumption via -the `read()` method. - -Ceases the flow of data. No `'data'` events are emitted while the -stream is in a paused state. - -### readable.resume() - -Switches the readable stream into "old mode", where data is emitted -using a `'data'` event rather than being buffered for consumption via -the `read()` method. - -Resumes the incoming `'data'` events after a `pause()`. - - -## Class: stream.Writable - - - -A `Writable` Stream has the following methods, members, and events. - -Note that `stream.Writable` is an abstract class designed to be -extended with an underlying implementation of the -`_write(chunk, encoding, cb)` method. (See below.) - -### new stream.Writable([options]) - -* `options` {Object} - * `highWaterMark` {Number} Buffer level when `write()` starts - returning false. Default=16kb - * `decodeStrings` {Boolean} Whether or not to decode strings into - Buffers before passing them to `_write()`. Default=true - -In classes that extend the Writable class, make sure to call the -constructor so that the buffering settings can be properly -initialized. - -### writable.\_write(chunk, encoding, callback) - -* `chunk` {Buffer | String} The chunk to be written. Will always - be a buffer unless the `decodeStrings` option was set to `false`. -* `encoding` {String} If the chunk is a string, then this is the - encoding type. Ignore chunk is a buffer. Note that chunk will - **always** be a buffer unless the `decodeStrings` option is - explicitly set to `false`. -* `callback` {Function} Call this function (optionally with an error - argument) when you are done processing the supplied chunk. - -All Writable stream implementations must provide a `_write` method to -send data to the underlying resource. - -Note: **This function MUST NOT be called directly.** It should be -implemented by child classes, and called by the internal Writable -class methods only. - -Call the callback using the standard `callback(error)` pattern to -signal that the write completed successfully or with an error. - -If the `decodeStrings` flag is set in the constructor options, then -`chunk` may be a string rather than a Buffer, and `encoding` will -indicate the sort of string that it is. This is to support -implementations that have an optimized handling for certain string -data encodings. If you do not explicitly set the `decodeStrings` -option to `false`, then you can safely ignore the `encoding` argument, -and assume that `chunk` will always be a Buffer. - -This method is prefixed with an underscore because it is internal to -the class that defines it, and should not be called directly by user -programs. However, you **are** expected to override this method in -your own extension classes. - - -### writable.write(chunk, [encoding], [callback]) - -* `chunk` {Buffer | String} Data to be written -* `encoding` {String} Optional. If `chunk` is a string, then encoding - defaults to `'utf8'` -* `callback` {Function} Optional. Called when this chunk is - successfully written. -* Returns {Boolean} - -Writes `chunk` to the stream. Returns `true` if the data has been -flushed to the underlying resource. Returns `false` to indicate that -the buffer is full, and the data will be sent out in the future. The -`'drain'` event will indicate when the buffer is empty again. - -The specifics of when `write()` will return false, is determined by -the `highWaterMark` option provided to the constructor. - -### writable.end([chunk], [encoding], [callback]) - -* `chunk` {Buffer | String} Optional final data to be written -* `encoding` {String} Optional. If `chunk` is a string, then encoding - defaults to `'utf8'` -* `callback` {Function} Optional. Called when the final chunk is - successfully written. - -Call this method to signal the end of the data being written to the -stream. - -### Event: 'drain' - -Emitted when the stream's write queue empties and it's safe to write -without buffering again. Listen for it when `stream.write()` returns -`false`. - -### Event: 'close' - -Emitted when the underlying resource (for example, the backing file -descriptor) has been closed. Not all streams will emit this. - -### Event: 'finish' - -When `end()` is called and there are no more chunks to write, this -event is emitted. - -### Event: 'pipe' - -* `source` {Readable Stream} - -Emitted when the stream is passed to a readable stream's pipe method. - -### Event 'unpipe' - -* `source` {Readable Stream} - -Emitted when a previously established `pipe()` is removed using the -source Readable stream's `unpipe()` method. - -## Class: stream.Duplex - - - -A "duplex" stream is one that is both Readable and Writable, such as a -TCP socket connection. - -Note that `stream.Duplex` is an abstract class designed to be -extended with an underlying implementation of the `_read(size)` -and `_write(chunk, encoding, callback)` methods as you would with a Readable or -Writable stream class. - -Since JavaScript doesn't have multiple prototypal inheritance, this -class prototypally inherits from Readable, and then parasitically from -Writable. It is thus up to the user to implement both the lowlevel -`_read(n)` method as well as the lowlevel `_write(chunk, encoding, cb)` method -on extension duplex classes. - -### new stream.Duplex(options) - -* `options` {Object} Passed to both Writable and Readable - constructors. Also has the following fields: - * `allowHalfOpen` {Boolean} Default=true. If set to `false`, then - the stream will automatically end the readable side when the - writable side ends and vice versa. - -In classes that extend the Duplex class, make sure to call the -constructor so that the buffering settings can be properly -initialized. - -## Class: stream.Transform - -A "transform" stream is a duplex stream where the output is causally -connected in some way to the input, such as a zlib stream or a crypto -stream. - -There is no requirement that the output be the same size as the input, -the same number of chunks, or arrive at the same time. For example, a -Hash stream will only ever have a single chunk of output which is -provided when the input is ended. A zlib stream will either produce -much smaller or much larger than its input. - -Rather than implement the `_read()` and `_write()` methods, Transform -classes must implement the `_transform()` method, and may optionally -also implement the `_flush()` method. (See below.) - -### new stream.Transform([options]) - -* `options` {Object} Passed to both Writable and Readable - constructors. - -In classes that extend the Transform class, make sure to call the -constructor so that the buffering settings can be properly -initialized. - -### transform.\_transform(chunk, encoding, callback) - -* `chunk` {Buffer | String} The chunk to be transformed. Will always - be a buffer unless the `decodeStrings` option was set to `false`. -* `encoding` {String} If the chunk is a string, then this is the - encoding type. (Ignore if `decodeStrings` chunk is a buffer.) -* `callback` {Function} Call this function (optionally with an error - argument) when you are done processing the supplied chunk. - -Note: **This function MUST NOT be called directly.** It should be -implemented by child classes, and called by the internal Transform -class methods only. - -All Transform stream implementations must provide a `_transform` -method to accept input and produce output. - -`_transform` should do whatever has to be done in this specific -Transform class, to handle the bytes being written, and pass them off -to the readable portion of the interface. Do asynchronous I/O, -process things, and so on. - -Call `transform.push(outputChunk)` 0 or more times to generate output -from this input chunk, depending on how much data you want to output -as a result of this chunk. - -Call the callback function only when the current chunk is completely -consumed. Note that there may or may not be output as a result of any -particular input chunk. - -This method is prefixed with an underscore because it is internal to -the class that defines it, and should not be called directly by user -programs. However, you **are** expected to override this method in -your own extension classes. - -### transform.\_flush(callback) - -* `callback` {Function} Call this function (optionally with an error - argument) when you are done flushing any remaining data. - -Note: **This function MUST NOT be called directly.** It MAY be implemented -by child classes, and if so, will be called by the internal Transform -class methods only. - -In some cases, your transform operation may need to emit a bit more -data at the end of the stream. For example, a `Zlib` compression -stream will store up some internal state so that it can optimally -compress the output. At the end, however, it needs to do the best it -can with what is left, so that the data will be complete. - -In those cases, you can implement a `_flush` method, which will be -called at the very end, after all the written data is consumed, but -before emitting `end` to signal the end of the readable side. Just -like with `_transform`, call `transform.push(chunk)` zero or more -times, as appropriate, and call `callback` when the flush operation is -complete. - -This method is prefixed with an underscore because it is internal to -the class that defines it, and should not be called directly by user -programs. However, you **are** expected to override this method in -your own extension classes. - -### Example: `SimpleProtocol` parser - -The example above of a simple protocol parser can be implemented much -more simply by using the higher level `Transform` stream class. - -In this example, rather than providing the input as an argument, it -would be piped into the parser, which is a more idiomatic Node stream -approach. - -```javascript -function SimpleProtocol(options) { - if (!(this instanceof SimpleProtocol)) - return new SimpleProtocol(options); - - Transform.call(this, options); - this._inBody = false; - this._sawFirstCr = false; - this._rawHeader = []; - this.header = null; -} - -SimpleProtocol.prototype = Object.create( - Transform.prototype, { constructor: { value: SimpleProtocol }}); - -SimpleProtocol.prototype._transform = function(chunk, encoding, done) { - if (!this._inBody) { - // check if the chunk has a \n\n - var split = -1; - for (var i = 0; i < chunk.length; i++) { - if (chunk[i] === 10) { // '\n' - if (this._sawFirstCr) { - split = i; - break; - } else { - this._sawFirstCr = true; - } - } else { - this._sawFirstCr = false; - } - } - - if (split === -1) { - // still waiting for the \n\n - // stash the chunk, and try again. - this._rawHeader.push(chunk); - } else { - this._inBody = true; - var h = chunk.slice(0, split); - this._rawHeader.push(h); - var header = Buffer.concat(this._rawHeader).toString(); - try { - this.header = JSON.parse(header); - } catch (er) { - this.emit('error', new Error('invalid simple protocol data')); - return; - } - // and let them know that we are done parsing the header. - this.emit('header', this.header); - - // now, because we got some extra data, emit this first. - this.push(b); - } - } else { - // from there on, just provide the data to our consumer as-is. - this.push(b); - } - done(); -}; - -var parser = new SimpleProtocol(); -source.pipe(parser) - -// Now parser is a readable stream that will emit 'header' -// with the parsed header data. -``` - - -## Class: stream.PassThrough - -This is a trivial implementation of a `Transform` stream that simply -passes the input bytes across to the output. Its purpose is mainly -for examples and testing, but there are occasionally use cases where -it can come in handy. - - -[EventEmitter]: events.html#events_class_events_eventemitter diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/.npmignore nodejs-0.11.15/deps/npm/node_modules/sha/.npmignore --- nodejs-0.11.13/deps/npm/node_modules/sha/.npmignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,4 @@ -node_modules -test -.gitignore +node_modules +test +.gitignore .travis.yml \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/package.json nodejs-0.11.15/deps/npm/node_modules/sha/package.json --- nodejs-0.11.13/deps/npm/node_modules/sha/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "sha", - "version": "1.2.3", + "version": "1.3.0", "description": "Check and get file hashes", "scripts": { "test": "mocha -R spec" @@ -11,22 +11,39 @@ }, "license": "BSD", "optionalDependencies": { - "graceful-fs": "2", - "readable-stream": "1.0" + "graceful-fs": "2 || 3", + "readable-stream": "~1.1" }, "devDependencies": { "mocha": "~1.9.0" }, - "readme": "# sha\r\n\r\nCheck and get file hashes (using any algorithm)\r\n\r\n[![Build Status](https://travis-ci.org/ForbesLindesay/sha.png?branch=master)](https://travis-ci.org/ForbesLindesay/sha)\r\n[![Dependency Status](https://gemnasium.com/ForbesLindesay/sha.png)](https://gemnasium.com/ForbesLindesay/sha)\r\n[![NPM version](https://badge.fury.io/js/sha.png)](http://badge.fury.io/js/sha)\r\n\r\n## Installation\r\n\r\n $ npm install sha\r\n\r\n## API\r\n\r\n### check(fileName, expected, [options,] cb) / checkSync(filename, expected, [options])\r\n\r\nAsynchronously check that `fileName` has a \"hash\" of `expected`. The callback will be called with either `null` or an error (indicating that they did not match).\r\n\r\nOptions:\r\n\r\n- algorithm: defaults to `sha1` and can be any of the algorithms supported by `crypto.createHash`\r\n\r\n### get(fileName, [options,] cb) / getSync(filename, [options])\r\n\r\nAsynchronously get the \"hash\" of `fileName`. The callback will be called with an optional `error` object and the (lower cased) hex digest of the hash.\r\n\r\nOptions:\r\n\r\n- algorithm: defaults to `sha1` and can be any of the algorithms supported by `crypto.createHash`\r\n\r\n### stream(expected, [options])\r\n\r\nCheck the hash of a stream without ever buffering it. This is a pass through stream so you can do things like:\r\n\r\n```js\r\nfs.createReadStream('src')\r\n .pipe(sha.stream('expected'))\r\n .pipe(fs.createWriteStream('dest'))\r\n```\r\n\r\n`dest` will be a complete copy of `src` and an error will be emitted if the hash did not match `'expected'`.\r\n\r\nOptions:\r\n\r\n- algorithm: defaults to `sha1` and can be any of the algorithms supported by `crypto.createHash`\r\n\r\n## License\r\n\r\nYou may use this software under the BSD or MIT. Take your pick. If you want me to release it under another license, open a pull request.", - "readmeFilename": "README.md", + "gitHead": "f1985eefbf7538e5809a2157c728d2f740901600", "bugs": { "url": "https://github.com/ForbesLindesay/sha/issues" }, "homepage": "https://github.com/ForbesLindesay/sha", "dependencies": { - "graceful-fs": "2", - "readable-stream": "1.0" + "graceful-fs": "2 || 3", + "readable-stream": "~1.1" }, - "_id": "sha@1.2.3", - "_from": "sha@~1.2.1" + "_id": "sha@1.3.0", + "_shasum": "79f4787045d0ede7327d702c25c443460dbc6764", + "_from": "sha@>=1.3.0 <1.4.0", + "_npmVersion": "1.5.0-alpha-4", + "_npmUser": { + "name": "forbeslindesay", + "email": "forbes@lindesay.co.uk" + }, + "maintainers": [ + { + "name": "forbeslindesay", + "email": "forbes@lindesay.co.uk" + } + ], + "dist": { + "shasum": "79f4787045d0ede7327d702c25c443460dbc6764", + "tarball": "http://registry.npmjs.org/sha/-/sha-1.3.0.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/sha/-/sha-1.3.0.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/sha/README.md nodejs-0.11.15/deps/npm/node_modules/sha/README.md --- nodejs-0.11.13/deps/npm/node_modules/sha/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/sha/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,49 +1,49 @@ -# sha - -Check and get file hashes (using any algorithm) - -[![Build Status](https://travis-ci.org/ForbesLindesay/sha.png?branch=master)](https://travis-ci.org/ForbesLindesay/sha) -[![Dependency Status](https://gemnasium.com/ForbesLindesay/sha.png)](https://gemnasium.com/ForbesLindesay/sha) -[![NPM version](https://badge.fury.io/js/sha.png)](http://badge.fury.io/js/sha) - -## Installation - - $ npm install sha - -## API - -### check(fileName, expected, [options,] cb) / checkSync(filename, expected, [options]) - -Asynchronously check that `fileName` has a "hash" of `expected`. The callback will be called with either `null` or an error (indicating that they did not match). - -Options: - -- algorithm: defaults to `sha1` and can be any of the algorithms supported by `crypto.createHash` - -### get(fileName, [options,] cb) / getSync(filename, [options]) - -Asynchronously get the "hash" of `fileName`. The callback will be called with an optional `error` object and the (lower cased) hex digest of the hash. - -Options: - -- algorithm: defaults to `sha1` and can be any of the algorithms supported by `crypto.createHash` - -### stream(expected, [options]) - -Check the hash of a stream without ever buffering it. This is a pass through stream so you can do things like: - -```js -fs.createReadStream('src') - .pipe(sha.stream('expected')) - .pipe(fs.createWriteStream('dest')) -``` - -`dest` will be a complete copy of `src` and an error will be emitted if the hash did not match `'expected'`. - -Options: - -- algorithm: defaults to `sha1` and can be any of the algorithms supported by `crypto.createHash` - -## License - +# sha + +Check and get file hashes (using any algorithm) + +[![Build Status](https://img.shields.io/travis/ForbesLindesay/sha/master.svg)](https://travis-ci.org/ForbesLindesay/sha) +[![Dependency Status](https://img.shields.io/gemnasium/ForbesLindesay/sha.svg)](https://gemnasium.com/ForbesLindesay/sha) +[![NPM version](https://img.shields.io/npm/v/sha.svg)](http://badge.fury.io/js/sha) + +## Installation + + $ npm install sha + +## API + +### check(fileName, expected, [options,] cb) / checkSync(filename, expected, [options]) + +Asynchronously check that `fileName` has a "hash" of `expected`. The callback will be called with either `null` or an error (indicating that they did not match). + +Options: + +- algorithm: defaults to `sha1` and can be any of the algorithms supported by `crypto.createHash` + +### get(fileName, [options,] cb) / getSync(filename, [options]) + +Asynchronously get the "hash" of `fileName`. The callback will be called with an optional `error` object and the (lower cased) hex digest of the hash. + +Options: + +- algorithm: defaults to `sha1` and can be any of the algorithms supported by `crypto.createHash` + +### stream(expected, [options]) + +Check the hash of a stream without ever buffering it. This is a pass through stream so you can do things like: + +```js +fs.createReadStream('src') + .pipe(sha.stream('expected')) + .pipe(fs.createWriteStream('dest')) +``` + +`dest` will be a complete copy of `src` and an error will be emitted if the hash did not match `'expected'`. + +Options: + +- algorithm: defaults to `sha1` and can be any of the algorithms supported by `crypto.createHash` + +## License + You may use this software under the BSD or MIT. Take your pick. If you want me to release it under another license, open a pull request. \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/slide/lib/async-map.js nodejs-0.11.15/deps/npm/node_modules/slide/lib/async-map.js --- nodejs-0.11.13/deps/npm/node_modules/slide/lib/async-map.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/slide/lib/async-map.js 2015-01-20 21:22:17.000000000 +0000 @@ -26,7 +26,8 @@ , a = l * n if (!a) return cb_(null, []) function cb (er) { - if (errState) return + if (er && !errState) errState = er + var argLen = arguments.length for (var i = 1; i < argLen; i ++) if (arguments[i] !== undefined) { data[i - 1] = (data[i - 1] || []).concat(arguments[i]) @@ -43,10 +44,7 @@ }) } - if (er || --a === 0) { - errState = er - cb_.apply(null, [errState].concat(data)) - } + if (--a === 0) cb_.apply(null, [errState].concat(data)) } // expect the supplied cb function to be called // "n" times for each thing in the array. diff -Nru nodejs-0.11.13/deps/npm/node_modules/slide/package.json nodejs-0.11.15/deps/npm/node_modules/slide/package.json --- nodejs-0.11.13/deps/npm/node_modules/slide/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/slide/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ { "name": "slide", - "version": "1.1.5", + "version": "1.1.6", "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me", @@ -25,11 +25,30 @@ "url": "git://github.com/isaacs/slide-flow-control.git" }, "license": "ISC", - "readme": "# Controlling Flow: callbacks are easy\n\n## What's actually hard?\n\n- Doing a bunch of things in a specific order.\n- Knowing when stuff is done.\n- Handling failures.\n- Breaking up functionality into parts (avoid nested inline callbacks)\n\n\n## Common Mistakes\n\n- Abandoning convention and consistency.\n- Putting all callbacks inline.\n- Using libraries without grokking them.\n- Trying to make async code look sync.\n\n## Define Conventions\n\n- Two kinds of functions: *actors* take action, *callbacks* get results.\n- Essentially the continuation pattern. Resulting code *looks* similar\n to fibers, but is *much* simpler to implement.\n- Node works this way in the lowlevel APIs already, and it's very flexible.\n\n## Callbacks\n\n- Simple responders\n- Must always be prepared to handle errors, that's why it's the first argument.\n- Often inline anonymous, but not always.\n- Can trap and call other callbacks with modified data, or pass errors upwards.\n\n## Actors\n\n- Last argument is a callback.\n- If any error occurs, and can't be handled, pass it to the callback and return.\n- Must not throw. Return value ignored.\n- return x ==> return cb(null, x)\n- throw er ==> return cb(er)\n\n```javascript\n// return true if a path is either\n// a symlink or a directory.\nfunction isLinkOrDir (path, cb) {\n fs.lstat(path, function (er, s) {\n if (er) return cb(er)\n return cb(null, s.isDirectory() || s.isSymbolicLink())\n })\n}\n```\n\n# asyncMap\n\n## Usecases\n\n- I have a list of 10 files, and need to read all of them, and then continue when they're all done.\n- I have a dozen URLs, and need to fetch them all, and then continue when they're all done.\n- I have 4 connected users, and need to send a message to all of them, and then continue when that's done.\n- I have a list of n things, and I need to dosomething with all of them, in parallel, and get the results once they're all complete.\n\n\n## Solution\n\n```javascript\nvar asyncMap = require(\"slide\").asyncMap\nfunction writeFiles (files, what, cb) {\n asyncMap(files, function (f, cb) {\n fs.writeFile(f, what, cb)\n }, cb)\n}\nwriteFiles([my, file, list], \"foo\", cb)\n```\n\n# chain\n\n## Usecases\n\n- I have to do a bunch of things, in order. Get db credentials out of a file,\n read the data from the db, write that data to another file.\n- If anything fails, do not continue.\n- I still have to provide an array of functions, which is a lot of boilerplate,\n and a pita if your functions take args like\n\n```javascript\nfunction (cb) {\n blah(a, b, c, cb)\n}\n```\n\n- Results are discarded, which is a bit lame.\n- No way to branch.\n\n## Solution\n\n- reduces boilerplate by converting an array of [fn, args] to an actor\n that takes no arguments (except cb)\n- A bit like Function#bind, but tailored for our use-case.\n- bindActor(obj, \"method\", a, b, c)\n- bindActor(fn, a, b, c)\n- bindActor(obj, fn, a, b, c)\n- branching, skipping over falsey arguments\n\n```javascript\nchain([\n doThing && [thing, a, b, c]\n, isFoo && [doFoo, \"foo\"]\n, subChain && [chain, [one, two]]\n], cb)\n```\n\n- tracking results: results are stored in an optional array passed as argument,\n last result is always in results[results.length - 1].\n- treat chain.first and chain.last as placeholders for the first/last\n result up until that point.\n\n\n## Non-trivial example\n\n- Read number files in a directory\n- Add the results together\n- Ping a web service with the result\n- Write the response to a file\n- Delete the number files\n\n```javascript\nvar chain = require(\"slide\").chain\nfunction myProgram (cb) {\n var res = [], last = chain.last, first = chain.first\n chain([\n [fs, \"readdir\", \"the-directory\"]\n , [readFiles, \"the-directory\", last]\n , [sum, last]\n , [ping, \"POST\", \"example.com\", 80, \"/foo\", last]\n , [fs, \"writeFile\", \"result.txt\", last]\n , [rmFiles, \"./the-directory\", first]\n ], res, cb)\n}\n```\n\n# Conclusion: Convention Profits\n\n- Consistent API from top to bottom.\n- Sneak in at any point to inject functionality. Testable, reusable, ...\n- When ruby and python users whine, you can smile condescendingly.\n", - "readmeFilename": "README.md", + "gitHead": "8345e51ee41e35825abc1a40750ea11462f57028", "bugs": { "url": "https://github.com/isaacs/slide-flow-control/issues" }, - "_id": "slide@1.1.5", - "_from": "slide@latest" + "homepage": "https://github.com/isaacs/slide-flow-control", + "_id": "slide@1.1.6", + "scripts": {}, + "_shasum": "56eb027d65b4d2dce6cb2e2d32c4d4afc9e1d707", + "_from": "slide@>=1.1.6 <1.2.0", + "_npmVersion": "2.0.0-beta.3", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "56eb027d65b4d2dce6cb2e2d32c4d4afc9e1d707", + "tarball": "http://registry.npmjs.org/slide/-/slide-1.1.6.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/slide/-/slide-1.1.6.tgz" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/tar/examples/packer.js nodejs-0.11.15/deps/npm/node_modules/tar/examples/packer.js --- nodejs-0.11.13/deps/npm/node_modules/tar/examples/packer.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/tar/examples/packer.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +var tar = require("../tar.js") + , fstream = require("fstream") + , fs = require("fs") + +var dir_destination = fs.createWriteStream('dir.tar') + +// This must be a "directory" +fstream.Reader({ path: __dirname, type: "Directory" }) + .pipe(tar.Pack({ noProprietary: true })) + .pipe(dir_destination) \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/node_modules/tar/lib/parse.js nodejs-0.11.15/deps/npm/node_modules/tar/lib/parse.js --- nodejs-0.11.13/deps/npm/node_modules/tar/lib/parse.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/tar/lib/parse.js 2015-01-20 21:22:17.000000000 +0000 @@ -37,6 +37,7 @@ me.readable = true me._stream = new BlockStream(512) me.position = 0 + me._ended = false me._stream.on("error", function (e) { me.emit("error", e) @@ -118,13 +119,13 @@ // so appending one tarball to another is technically valid. // ending without the eof null blocks is not allowed, however. if (zero) { - this._ended = this._eofStarted + if (this._eofStarted) + this._ended = true this._eofStarted = true } else { - this._ended = this._eofStarted = false + this._eofStarted = false this._startEntry(c) } - } this.position += 512 diff -Nru nodejs-0.11.13/deps/npm/node_modules/tar/package.json nodejs-0.11.15/deps/npm/node_modules/tar/package.json --- nodejs-0.11.13/deps/npm/node_modules/tar/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/tar/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,7 @@ }, "name": "tar", "description": "tar for node", - "version": "0.1.19", + "version": "1.0.1", "repository": { "type": "git", "url": "git://github.com/isaacs/node-tar.git" @@ -16,25 +16,40 @@ "test": "tap test/*.js" }, "dependencies": { - "inherits": "2", "block-stream": "*", - "fstream": "~0.1.8" + "fstream": "^1.0.2", + "inherits": "2" }, "devDependencies": { - "tap": "0.x", - "rimraf": "1.x" + "graceful-fs": "^3.0.2", + "rimraf": "1.x", + "tap": "0.x" }, "license": "BSD", - "readme": "# node-tar\n\nTar for Node.js.\n\n[![NPM](https://nodei.co/npm/tar.png)](https://nodei.co/npm/tar/)\n\n## API\n\nSee `examples/` for usage examples.\n\n### var tar = require('tar')\n\nReturns an object with `.Pack`, `.Extract` and `.Parse` methods.\n\n### tar.Pack([properties])\n\nReturns a through stream. Use\n[fstream](https://npmjs.org/package/fstream) to write files into the\npack stream and you will receive tar archive data from the pack\nstream.\n\nThe optional `properties` object are used to set properties in the tar\n'Global Extended Header'.\n\n### tar.Extract([options])\n\nReturns a through stream. Write tar data to the stream and the files\nin the tarball will be extracted onto the filesystem.\n\n`options` can be:\n\n```js\n{\n path: '/path/to/extract/tar/into',\n strip: 0, // how many path segments to strip from the root when extracting\n}\n```\n\n`options` also get passed to the `fstream.Writer` instance that `tar`\nuses internally.\n\n### tar.Parse()\n\nReturns a writable stream. Write tar data to it and it will emit\n`entry` events for each entry parsed from the tarball. This is used by\n`tar.Extract`.\n", - "readmeFilename": "README.md", + "gitHead": "476bf6f5882b9c33d1cbf66f175d0f25e3981044", "bugs": { "url": "https://github.com/isaacs/node-tar/issues" }, "homepage": "https://github.com/isaacs/node-tar", - "_id": "tar@0.1.19", + "_id": "tar@1.0.1", + "_shasum": "6075b5a1f236defe0c7e3756d3d9b3ebdad0f19a", + "_from": "tar@1.0.1", + "_npmVersion": "1.4.23", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], "dist": { - "shasum": "fe45941799e660ce1ea52d875d37481b4bf13eac" + "shasum": "6075b5a1f236defe0c7e3756d3d9b3ebdad0f19a", + "tarball": "http://registry.npmjs.org/tar/-/tar-1.0.1.tgz" }, - "_from": "tar@0.1.19", - "_resolved": "https://registry.npmjs.org/tar/-/tar-0.1.19.tgz" + "directories": {}, + "_resolved": "https://registry.npmjs.org/tar/-/tar-1.0.1.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/tar/README.md nodejs-0.11.15/deps/npm/node_modules/tar/README.md --- nodejs-0.11.13/deps/npm/node_modules/tar/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/tar/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -19,6 +19,8 @@ pack stream and you will receive tar archive data from the pack stream. +This only works with directories, it does not work with individual files. + The optional `properties` object are used to set properties in the tar 'Global Extended Header'. diff -Nru nodejs-0.11.13/deps/npm/node_modules/tar/test/extract.js nodejs-0.11.15/deps/npm/node_modules/tar/test/extract.js --- nodejs-0.11.13/deps/npm/node_modules/tar/test/extract.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/tar/test/extract.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,3 +1,6 @@ +// Set the umask, so that it works the same everywhere. +process.umask(parseInt('22', 8)) + var tap = require("tap") , tar = require("../tar.js") , fs = require("fs") @@ -114,6 +117,13 @@ size: 200, linkpath: undefined, nlink: 2 }, + { path: '/200LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL', + mode: '120755', + type: 'SymbolicLink', + depth: 1, + size: 200, + linkpath: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', + nlink: 1 }, { path: '/200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', mode: '100644', type: 'Link', @@ -121,13 +131,6 @@ size: 200, linkpath: path.join(target, '200-hard'), nlink: 2 }, - { path: '/200LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL', - mode: '120777', - type: 'SymbolicLink', - depth: 1, - size: 200, - linkpath: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', - nlink: 1 }, { path: '/c.txt', mode: '100644', type: 'File', @@ -280,6 +283,12 @@ // So, this is as much a test of fstream.Reader and fstream.Writer // as it is of tar.Extract, but it sort of makes sense. +tap.test("preclean", function (t) { + require("rimraf").sync(__dirname + "/tmp/extract-test") + t.pass("cleaned!") + t.end() +}) + tap.test("extract test", function (t) { var extract = tar.Extract(target) var inp = fs.createReadStream(file) diff -Nru nodejs-0.11.13/deps/npm/node_modules/tar/test/extract-move.js nodejs-0.11.15/deps/npm/node_modules/tar/test/extract-move.js --- nodejs-0.11.13/deps/npm/node_modules/tar/test/extract-move.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/tar/test/extract-move.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,132 @@ +// Set the umask, so that it works the same everywhere. +process.umask(parseInt('22', 8)) + +var tap = require("tap") + , tar = require("../tar.js") + , fs = require("fs") + , gfs = require("graceful-fs") + , path = require("path") + , file = path.resolve(__dirname, "fixtures/dir.tar") + , target = path.resolve(__dirname, "tmp/extract-test") + , index = 0 + , fstream = require("fstream") + , rimraf = require("rimraf") + , mkdirp = require("mkdirp") + + , ee = 0 + , expectEntries = [ + { + "path" : "dir/", + "mode" : "750", + "type" : "5", + "depth" : undefined, + "size" : 0, + "linkpath" : "", + "nlink" : undefined, + "dev" : undefined, + "ino" : undefined + }, + { + "path" : "dir/sub/", + "mode" : "750", + "type" : "5", + "depth" : undefined, + "size" : 0, + "linkpath" : "", + "nlink" : undefined, + "dev" : undefined, + "ino" : undefined + } ] + +function slow (fs, method, t1, t2) { + var orig = fs[method] + if (!orig) return null + fs[method] = function () { + var args = [].slice.call(arguments) + console.error("slow", method, args[0]) + var cb = args.pop() + + setTimeout(function () { + orig.apply(fs, args.concat(function(er, data) { + setTimeout(function() { + cb(er, data) + }, t2) + })) + }, t1) + } +} + +// Make sure we get the graceful-fs that fstream is using. +var gfs2 +try { + gfs2 = require("fstream/node_modules/graceful-fs") +} catch (er) {} + +var slowMethods = ["chown", "chmod", "utimes", "lutimes"] +slowMethods.forEach(function (method) { + var t1 = 500 + var t2 = 0 + slow(fs, method, t1, t2) + slow(gfs, method, t1, t2) + if (gfs2) { + slow(gfs2, method, t1, t2) + } +}) + + + +// The extract class basically just pipes the input +// to a Reader, and then to a fstream.DirWriter + +// So, this is as much a test of fstream.Reader and fstream.Writer +// as it is of tar.Extract, but it sort of makes sense. + +tap.test("preclean", function (t) { + rimraf.sync(target) + /mkdirp.sync(target) + t.pass("cleaned!") + t.end() +}) + +tap.test("extract test", function (t) { + var extract = tar.Extract(target) + var inp = fs.createReadStream(file) + + // give it a weird buffer size to try to break in odd places + inp.bufferSize = 1234 + + inp.pipe(extract) + + extract.on("end", function () { + rimraf.sync(target) + + t.equal(ee, expectEntries.length, "should see "+ee+" entries") + + // should get no more entries after end + extract.removeAllListeners("entry") + extract.on("entry", function (e) { + t.fail("Should not get entries after end!") + }) + + t.end() + }) + + + extract.on("entry", function (entry) { + var found = + { path: entry.path + , mode: entry.props.mode.toString(8) + , type: entry.props.type + , depth: entry.props.depth + , size: entry.props.size + , linkpath: entry.props.linkpath + , nlink: entry.props.nlink + , dev: entry.props.dev + , ino: entry.props.ino + } + + var wanted = expectEntries[ee ++] + + t.equivalent(found, wanted, "tar entry " + ee + " " + wanted.path) + }) +}) Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/npm/node_modules/tar/test/fixtures.tgz and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/npm/node_modules/tar/test/fixtures.tgz differ diff -Nru nodejs-0.11.13/deps/npm/node_modules/tar/test/pack.js nodejs-0.11.15/deps/npm/node_modules/tar/test/pack.js --- nodejs-0.11.13/deps/npm/node_modules/tar/test/pack.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/tar/test/pack.js 2015-01-20 21:22:17.000000000 +0000 @@ -178,6 +178,39 @@ fill: '' } ] , [ 'entry', + { path: 'fixtures/dir/', + mode: 488, + uid: uid, + gid: gid, + size: 0, + type: '5', + linkpath: '', + ustar: 'ustar\u0000', + ustarver: '00', + uname: '', + gname: '', + devmaj: 0, + devmin: 0, + fill: '' } ] + + , [ 'entry', + { path: 'fixtures/dir/sub/', + mode: 488, + uid: uid, + gid: gid, + size: 0, + type: '5', + linkpath: '', + ustar: 'ustar\u0000', + ustarver: '00', + uname: '', + gname: '', + devmaj: 0, + devmin: 0, + fill: '' } ] + + + , [ 'entry', { path: 'fixtures/foo.js', mode: 420, uid: uid, @@ -868,11 +901,15 @@ } t.equal(ev, wanted[0], "event type should be "+wanted[0]) - // if (ev !== wanted[0] || e.path !== wanted[1].path) { - // console.error(wanted) - // console.error([ev, e.props]) - // throw "break" - // } + if (ev !== wanted[0] || e.path !== wanted[1].path) { + console.error("wanted", wanted) + console.error([ev, e.props]) + e.on("end", function () { + console.error(e.fields) + throw "break" + }) + } + t.has(e.props, wanted[1], "properties "+wanted[1].path) if (wanted[2]) { diff -Nru nodejs-0.11.13/deps/npm/node_modules/tar/test/pack-no-proprietary.js nodejs-0.11.15/deps/npm/node_modules/tar/test/pack-no-proprietary.js --- nodejs-0.11.13/deps/npm/node_modules/tar/test/pack-no-proprietary.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/tar/test/pack-no-proprietary.js 2015-01-20 21:22:17.000000000 +0000 @@ -150,6 +150,38 @@ fill: '' } ] , [ 'entry', + { path: 'fixtures/dir/', + mode: 488, + uid: uid, + gid: gid, + size: 0, + type: '5', + linkpath: '', + ustar: 'ustar\u0000', + ustarver: '00', + uname: '', + gname: '', + devmaj: 0, + devmin: 0, + fill: '' } ] + + , [ 'entry', + { path: 'fixtures/dir/sub/', + mode: 488, + uid: uid, + gid: gid, + size: 0, + type: '5', + linkpath: '', + ustar: 'ustar\u0000', + ustarver: '00', + uname: '', + gname: '', + devmaj: 0, + devmin: 0, + fill: '' } ] + + , [ 'entry', { path: 'fixtures/foo.js', mode: 420, uid: uid, @@ -823,7 +855,7 @@ t.equal(ev, wanted[0], "event type should be "+wanted[0]) if (ev !== wanted[0] || e.path !== wanted[1].path) { - console.error(wanted) + console.error("wanted", wanted) console.error([ev, e.props]) e.on("end", function () { console.error(e.fields) diff -Nru nodejs-0.11.13/deps/npm/node_modules/uid-number/LICENCE nodejs-0.11.15/deps/npm/node_modules/uid-number/LICENCE --- nodejs-0.11.13/deps/npm/node_modules/uid-number/LICENCE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/uid-number/LICENCE 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) Isaac Z. Schlueter -All rights reserved. - -The BSD License - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS -``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/uid-number/package.json nodejs-0.11.15/deps/npm/node_modules/uid-number/package.json --- nodejs-0.11.13/deps/npm/node_modules/uid-number/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/uid-number/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -19,12 +19,29 @@ "node": "*" }, "license": "ISC", - "readme": "Use this module to convert a username/groupname to a uid/gid number.\n\nUsage:\n\n```\nnpm install uid-number\n```\n\nThen, in your node program:\n\n```javascript\nvar uidNumber = require(\"uid-number\")\nuidNumber(\"isaacs\", function (er, uid, gid) {\n // gid is null because we didn't ask for a group name\n // uid === 24561 because that's my number.\n})\n```\n", - "readmeFilename": "README.md", "bugs": { "url": "https://github.com/isaacs/uid-number/issues" }, "homepage": "https://github.com/isaacs/uid-number", "_id": "uid-number@0.0.5", - "_from": "uid-number@latest" + "dist": { + "shasum": "5a3db23ef5dbd55b81fce0ec9a2ac6fccdebb81e", + "tarball": "http://registry.npmjs.org/uid-number/-/uid-number-0.0.5.tgz" + }, + "_from": "uid-number@latest", + "_npmVersion": "1.4.3", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "directories": {}, + "_shasum": "5a3db23ef5dbd55b81fce0ec9a2ac6fccdebb81e", + "_resolved": "https://registry.npmjs.org/uid-number/-/uid-number-0.0.5.tgz", + "readme": "ERROR: No README data found!" } diff -Nru nodejs-0.11.13/deps/npm/node_modules/wrappy/LICENSE nodejs-0.11.15/deps/npm/node_modules/wrappy/LICENSE --- nodejs-0.11.13/deps/npm/node_modules/wrappy/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/wrappy/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru nodejs-0.11.13/deps/npm/node_modules/wrappy/package.json nodejs-0.11.15/deps/npm/node_modules/wrappy/package.json --- nodejs-0.11.13/deps/npm/node_modules/wrappy/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/wrappy/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,52 @@ +{ + "name": "wrappy", + "version": "1.0.1", + "description": "Callback wrapping utility", + "main": "wrappy.js", + "directories": { + "test": "test" + }, + "dependencies": {}, + "devDependencies": { + "tap": "^0.4.12" + }, + "scripts": { + "test": "tap test/*.js" + }, + "repository": { + "type": "git", + "url": "https://github.com/npm/wrappy" + }, + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "license": "ISC", + "bugs": { + "url": "https://github.com/npm/wrappy/issues" + }, + "homepage": "https://github.com/npm/wrappy", + "gitHead": "006a8cbac6b99988315834c207896eed71fd069a", + "_id": "wrappy@1.0.1", + "_shasum": "1e65969965ccbc2db4548c6b84a6f2c5aedd4739", + "_from": "wrappy@1.0.1", + "_npmVersion": "2.0.0", + "_nodeVersion": "0.10.31", + "_npmUser": { + "name": "isaacs", + "email": "i@izs.me" + }, + "maintainers": [ + { + "name": "isaacs", + "email": "i@izs.me" + } + ], + "dist": { + "shasum": "1e65969965ccbc2db4548c6b84a6f2c5aedd4739", + "tarball": "http://registry.npmjs.org/wrappy/-/wrappy-1.0.1.tgz" + }, + "_resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.1.tgz", + "readme": "ERROR: No README data found!" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/wrappy/README.md nodejs-0.11.15/deps/npm/node_modules/wrappy/README.md --- nodejs-0.11.13/deps/npm/node_modules/wrappy/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/wrappy/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,36 @@ +# wrappy + +Callback wrapping utility + +## USAGE + +```javascript +var wrappy = require("wrappy") + +// var wrapper = wrappy(wrapperFunction) + +// make sure a cb is called only once +// See also: http://npm.im/once for this specific use case +var once = wrappy(function (cb) { + var called = false + return function () { + if (called) return + called = true + return cb.apply(this, arguments) + } +}) + +function printBoo () { + console.log('boo') +} +// has some rando property +printBoo.iAmBooPrinter = true + +var onlyPrintOnce = once(printBoo) + +onlyPrintOnce() // prints 'boo' +onlyPrintOnce() // does nothing + +// random property is retained! +assert.equal(onlyPrintOnce.iAmBooPrinter, true) +``` diff -Nru nodejs-0.11.13/deps/npm/node_modules/wrappy/test/basic.js nodejs-0.11.15/deps/npm/node_modules/wrappy/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/wrappy/test/basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/wrappy/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,51 @@ +var test = require('tap').test +var wrappy = require('../wrappy.js') + +test('basic', function (t) { + function onceifier (cb) { + var called = false + return function () { + if (called) return + called = true + return cb.apply(this, arguments) + } + } + onceifier.iAmOnce = {} + var once = wrappy(onceifier) + t.equal(once.iAmOnce, onceifier.iAmOnce) + + var called = 0 + function boo () { + t.equal(called, 0) + called++ + } + // has some rando property + boo.iAmBoo = true + + var onlyPrintOnce = once(boo) + + onlyPrintOnce() // prints 'boo' + onlyPrintOnce() // does nothing + t.equal(called, 1) + + // random property is retained! + t.equal(onlyPrintOnce.iAmBoo, true) + + var logs = [] + var logwrap = wrappy(function (msg, cb) { + logs.push(msg + ' wrapping cb') + return function () { + logs.push(msg + ' before cb') + var ret = cb.apply(this, arguments) + logs.push(msg + ' after cb') + } + }) + + var c = logwrap('foo', function () { + t.same(logs, [ 'foo wrapping cb', 'foo before cb' ]) + }) + c() + t.same(logs, [ 'foo wrapping cb', 'foo before cb', 'foo after cb' ]) + + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/node_modules/wrappy/wrappy.js nodejs-0.11.15/deps/npm/node_modules/wrappy/wrappy.js --- nodejs-0.11.13/deps/npm/node_modules/wrappy/wrappy.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/wrappy/wrappy.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +// Returns a wrapper function that returns a wrapped callback +// The wrapper function should do some stuff, and return a +// presumably different callback function. +// This makes sure that own properties are retained, so that +// decorations and such are not lost along the way. +module.exports = wrappy +function wrappy (fn, cb) { + if (fn && cb) return wrappy(fn)(cb) + + if (typeof fn !== 'function') + throw new TypeError('need wrapper function') + + Object.keys(fn).forEach(function (k) { + wrapper[k] = fn[k] + }) + + return wrapper + + function wrapper() { + var args = new Array(arguments.length) + for (var i = 0; i < args.length; i++) { + args[i] = arguments[i] + } + var ret = fn.apply(this, args) + var cb = args[args.length-1] + if (typeof ret === 'function' && ret !== cb) { + Object.keys(cb).forEach(function (k) { + ret[k] = cb[k] + }) + } + return ret + } +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/write-file-atomic/index.js nodejs-0.11.15/deps/npm/node_modules/write-file-atomic/index.js --- nodejs-0.11.13/deps/npm/node_modules/write-file-atomic/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/write-file-atomic/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,45 @@ +'use strict' +var fs = require('graceful-fs'); +var chain = require('slide').chain; +var crypto = require('crypto'); + +var md5hex = function () { + var hash = crypto.createHash('md5'); + for (var ii=0; ii=1.1.0 <2.0.0", + "_npmVersion": "1.4.28", + "_npmUser": { + "name": "iarna", + "email": "me@re-becca.org" + }, + "maintainers": [ + { + "name": "iarna", + "email": "me@re-becca.org" + } + ], + "dist": { + "shasum": "e114cfb8f82188353f98217c5945451c9b4dc060", + "tarball": "http://registry.npmjs.org/write-file-atomic/-/write-file-atomic-1.1.0.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-1.1.0.tgz" +} diff -Nru nodejs-0.11.13/deps/npm/node_modules/write-file-atomic/README.md nodejs-0.11.15/deps/npm/node_modules/write-file-atomic/README.md --- nodejs-0.11.13/deps/npm/node_modules/write-file-atomic/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/write-file-atomic/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ +write-file-atomic +----------------- + +This is an extension for node's `fs.writeFile` that makes its operation +atomic and allows you set ownership (uid/gid of the file). + +### var writeFileAtomic = require('write-file-atomic')
writeFileAtomic(filename, data, [options], callback) + +* filename **String** +* data **String** | **Buffer** +* options **Object** + * chown **Object** + * uid **Number** + * gid **Number** + * encoding **String** | **Null** default = 'utf8' + * mode **Number** default = 438 (aka 0666 in Octal) +callback **Function** + +Atomically and asynchronously writes data to a file, replacing the file if it already +exists. data can be a string or a buffer. + +The file is initially named `filename + "." + md5hex(__filename, process.pid, ++invocations)`. +If writeFile completes successfully then, if passed the **chown** option it will change +the ownership of the file. Finally it renames the file back to the filename you specified. If +it encounters errors at any of these steps it will attempt to unlink the temporary file and then +pass the error back to the caller. + +If provided, the **chown** option requires both **uid** and **gid** properties or else +you'll get an error. + +The **encoding** option is ignored if **data** is a buffer. It defaults to 'utf8'. + +Example: + +```javascript +writeFileAtomic('message.txt', 'Hello Node', {chown:{uid:100,gid:50}}, function (err) { + if (err) throw err; + console.log('It\'s saved!'); +}); +``` + +### var writeFileAtomicSync = require('write-file-atomic').sync
writeFileAtomicSync(filename, data, [options]) + +The synchronous version of **writeFileAtomic**. diff -Nru nodejs-0.11.13/deps/npm/node_modules/write-file-atomic/test/basic.js nodejs-0.11.15/deps/npm/node_modules/write-file-atomic/test/basic.js --- nodejs-0.11.13/deps/npm/node_modules/write-file-atomic/test/basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/node_modules/write-file-atomic/test/basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,97 @@ +"use strict"; +var test = require('tap').test; +var requireInject = require('require-inject'); +var writeFileAtomic = requireInject('../index', { + 'graceful-fs': { + writeFile: function (tmpfile, data, options, cb) { + if (/nowrite/.test(tmpfile)) return cb('ENOWRITE'); + cb(); + }, + chown: function (tmpfile, uid, gid, cb) { + if (/nochown/.test(tmpfile)) return cb('ENOCHOWN'); + cb(); + }, + rename: function (tmpfile, filename, cb) { + if (/norename/.test(tmpfile)) return cb('ENORENAME'); + cb(); + }, + unlink: function (tmpfile, cb) { + if (/nounlink/.test(tmpfile)) return cb('ENOUNLINK'); + cb(); + }, + writeFileSync: function (tmpfile, data, options) { + if (/nowrite/.test(tmpfile)) throw 'ENOWRITE'; + }, + chownSync: function (tmpfile, uid, gid) { + if (/nochown/.test(tmpfile)) throw 'ENOCHOWN'; + }, + renameSync: function (tmpfile, filename) { + if (/norename/.test(tmpfile)) throw 'ENORENAME'; + }, + unlinkSync: function (tmpfile) { + if (/nounlink/.test(tmpfile)) throw 'ENOUNLINK'; + }, + } +}); +var writeFileAtomicSync = writeFileAtomic.sync; + +test('async tests', function (t) { + t.plan(7); + writeFileAtomic('good', 'test', {mode: '0777'}, function (err) { + t.notOk(err, 'No errors occur when passing in options'); + }); + writeFileAtomic('good', 'test', function (err) { + t.notOk(err, 'No errors occur when NOT passing in options'); + }); + writeFileAtomic('nowrite', 'test', function (err) { + t.is(err, 'ENOWRITE', 'writeFile failures propagate'); + }); + writeFileAtomic('nochown', 'test', {chown: {uid:100,gid:100}}, function (err) { + t.is(err, 'ENOCHOWN', 'Chown failures propagate'); + }); + writeFileAtomic('nochown', 'test', function (err) { + t.notOk(err, 'No attempt to chown when no uid/gid passed in'); + }); + writeFileAtomic('norename', 'test', function (err) { + t.is(err, 'ENORENAME', 'Rename errors propagate'); + }); + writeFileAtomic('norename nounlink', 'test', function (err) { + t.is(err, 'ENORENAME', 'Failure to unlink the temp file does not clobber the original error'); + }); +}); + +test('sync tests', function (t) { + t.plan(7); + var throws = function (shouldthrow, msg, todo) { + var err; + try { todo() } catch (e) { err = e } + t.is(shouldthrow,err,msg); + } + var noexception = function (msg, todo) { + var err; + try { todo() } catch (e) { err = e } + t.notOk(err,msg); + } + + noexception('No errors occur when passing in options',function (){ + writeFileAtomicSync('good', 'test', {mode: '0777'}); + }) + noexception('No errors occur when NOT passing in options',function (){ + writeFileAtomicSync('good', 'test'); + }); + throws('ENOWRITE', 'writeFile failures propagate', function () { + writeFileAtomicSync('nowrite', 'test'); + }); + throws('ENOCHOWN', 'Chown failures propagate', function () { + writeFileAtomicSync('nochown', 'test', {chown: {uid:100,gid:100}}); + }); + noexception('No attempt to chown when no uid/gid passed in', function (){ + writeFileAtomicSync('nochown', 'test'); + }); + throws('ENORENAME', 'Rename errors propagate', function (){ + writeFileAtomicSync('norename', 'test'); + }); + throws('ENORENAME', 'Failure to unlink the temp file does not clobber the original error', function (){ + writeFileAtomicSync('norename nounlink', 'test'); + }); +}); diff -Nru nodejs-0.11.13/deps/npm/.npmignore nodejs-0.11.15/deps/npm/.npmignore --- nodejs-0.11.13/deps/npm/.npmignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/.npmignore 2015-01-20 21:22:17.000000000 +0000 @@ -7,6 +7,7 @@ /test/packages/npm-test-depends-on-spark/which-spark.log /test/packages/test-package/random-data.txt /test/root +node_modules/marked node_modules/ronn node_modules/tap node_modules/.bin @@ -24,3 +25,5 @@ /npm-*.tgz *.pyc + +/test/tap/builtin-config diff -Nru nodejs-0.11.13/deps/npm/.npmrc nodejs-0.11.15/deps/npm/.npmrc --- nodejs-0.11.13/deps/npm/.npmrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/.npmrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2 @@ +save-prefix = ~ +proprietary-attribs = false diff -Nru nodejs-0.11.13/deps/npm/package.json nodejs-0.11.15/deps/npm/package.json --- nodejs-0.11.13/deps/npm/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,9 +1,6 @@ { - "version": "1.4.9", + "version": "2.1.6", "name": "npm", - "publishConfig": { - "proprietary-attribs": false - }, "description": "A package manager for node", "keywords": [ "package manager", @@ -34,55 +31,69 @@ "main": "./lib/npm.js", "bin": "./bin/npm-cli.js", "dependencies": { - "abbrev": "~1.0.4", - "ansi": "~0.2.1", + "abbrev": "~1.0.5", + "ansi": "~0.3.0", "ansicolors": "~0.3.2", "ansistyles": "~0.1.3", - "archy": "0", + "archy": "~1.0.0", + "async-some": "~1.0.1", "block-stream": "0.0.7", + "char-spinner": "~1.0.1", "child-process-close": "~0.1.1", "chmodr": "~0.1.0", "chownr": "0", - "cmd-shim": "~1.1.1", - "columnify": "0.1.2", - "editor": "0.0.5", - "fstream": "~0.1.25", - "fstream-npm": "~0.1.6", - "github-url-from-git": "1.1.1", - "github-url-from-username-repo": "~0.1.0", - "glob": "~3.2.9", - "graceful-fs": "~2.0.2", - "ini": "~1.1.0", - "init-package-json": "0.0.16", - "lockfile": "~0.4.0", + "cmd-shim": "~2.0.1", + "columnify": "~1.2.1", + "config-chain": "~1.1.8", + "dezalgo": "~1.0.1", + "editor": "~0.1.0", + "fs-vacuum": "~1.2.1", + "fs-write-stream-atomic": "~1.0.2", + "fstream": "~1.0.2", + "fstream-npm": "~1.0.1", + "github-url-from-git": "~1.4.0", + "github-url-from-username-repo": "~1.0.2", + "glob": "~4.0.6", + "graceful-fs": "~3.0.4", + "inflight": "~1.0.4", + "inherits": "~2.0.1", + "ini": "~1.3.0", + "init-package-json": "~1.1.1", + "lockfile": "~1.0.0", "lru-cache": "~2.5.0", - "minimatch": "~0.2.14", - "mkdirp": "~0.3.5", - "node-gyp": "~0.13.0", - "nopt": "~2.2.1", - "npm-install-checks": "~1.0.0", - "npm-registry-client": "~0.4.8", - "npm-user-validate": "0.0.3", - "npmconf": "~0.1.15", - "npmlog": "0.0.6", - "once": "~1.3.0", - "opener": "~1.3.0", - "osenv": "0", + "minimatch": "~1.0.0", + "mkdirp": "~0.5.0", + "node-gyp": "~1.0.1", + "nopt": "~3.0.1", + "normalize-package-data": "~1.0.3", + "npm-cache-filename": "~1.0.1", + "npm-install-checks": "~1.0.2", + "npm-package-arg": "~2.1.3", + "npm-registry-client": "~3.2.4", + "npm-user-validate": "~0.1.1", + "npmlog": "~0.1.1", + "once": "~1.3.1", + "opener": "~1.4.0", + "osenv": "~0.1.0", "path-is-inside": "~1.0.0", "read": "~1.0.4", - "read-installed": "~2.0.1", - "read-package-json": "~1.1.9", - "request": "~2.30.0", - "retry": "~0.6.0", - "rimraf": "~2.2.5", - "semver": "~2.2.1", - "sha": "~1.2.1", - "slide": "~1.1.5", + "read-installed": "~3.1.2", + "read-package-json": "~1.2.7", + "readable-stream": "~1.0.32", + "realize-package-specifier": "~1.2.0", + "request": "~2.46.0", + "retry": "~0.6.1", + "rimraf": "~2.2.8", + "semver": "~4.1.0", + "sha": "~1.3.0", + "slide": "~1.1.6", "sorted-object": "~1.0.0", - "tar": "~0.1.19", + "tar": "~1.0.1", "text-table": "~0.2.0", "uid-number": "0.0.5", - "which": "1" + "which": "1", + "wrappy": "~1.0.1", + "write-file-atomic": "~1.1.0" }, "bundleDependencies": [ "abbrev", @@ -90,19 +101,26 @@ "ansicolors", "ansistyles", "archy", + "async-some", "block-stream", + "char-spinner", "child-process-close", "chmodr", "chownr", "cmd-shim", "columnify", + "config-chain", + "dezalgo", "editor", + "fs-vacuum", + "fs-write-stream-atomic", "fstream", "fstream-npm", "github-url-from-git", "github-url-from-username-repo", "glob", "graceful-fs", + "inflight", "inherits", "ini", "init-package-json", @@ -113,10 +131,11 @@ "node-gyp", "nopt", "normalize-package-data", + "npm-cache-filename", "npm-install-checks", + "npm-package-arg", "npm-registry-client", "npm-user-validate", - "npmconf", "npmlog", "once", "opener", @@ -125,6 +144,8 @@ "read", "read-installed", "read-package-json", + "readable-stream", + "realize-package-specifier", "request", "retry", "rimraf", @@ -135,23 +156,25 @@ "tar", "text-table", "uid-number", - "which" + "which", + "wrappy", + "write-file-atomic" ], "devDependencies": { + "marked": "~0.3.2", + "marked-man": "~0.1.4", + "nock": "~0.48.1", + "npm-registry-couchapp": "~2.6.2", "npm-registry-mock": "~0.6.3", - "ronn": "~0.3.6", - "tap": "~0.4.9" - }, - "engines": { - "node": ">=0.8", - "npm": "1" + "require-inject": "~1.1.0", + "tap": "~0.4.12" }, "scripts": { "test-legacy": "node ./test/run.js", "test": "tap --timeout 120 test/tap/*.js", "tap": "tap --timeout 120 test/tap/*.js", "test-all": "node ./test/run.js && tap test/tap/*.js", - "prepublish": "node bin/npm-cli.js prune --prefix=. --no-global && rm -rf test/*/*/node_modules && make -j32 doc", + "prepublish": "node bin/npm-cli.js prune --prefix=. --no-global && rm -rf test/*/*/node_modules && make -j8 doc", "dumpconf": "env | grep npm | sort | uniq" }, "license": "Artistic-2.0" diff -Nru nodejs-0.11.13/deps/npm/README.md nodejs-0.11.15/deps/npm/README.md --- nodejs-0.11.13/deps/npm/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -16,15 +16,15 @@ ## Super Easy Install -npm comes with node now. +npm comes with [node](http://nodejs.org/download/) now. ### Windows Computers -Get the MSI. npm is in it. +[Get the MSI](http://nodejs.org/download/). npm is in it. ### Apple Macintosh Computers -Get the pkg. npm is in it. +[Get the pkg](http://nodejs.org/download/). npm is in it. ### Other Sorts of Unices @@ -154,7 +154,7 @@ if (er) return commandFailed(er) // command succeeded, and data might have some info }) - npm.on("log", function (message) { .... }) + npm.registry.log.on("log", function (message) { .... }) }) The `load` function takes an object hash of the command-line configs. diff -Nru nodejs-0.11.13/deps/npm/scripts/doc-build.sh nodejs-0.11.15/deps/npm/scripts/doc-build.sh --- nodejs-0.11.13/deps/npm/scripts/doc-build.sh 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/scripts/doc-build.sh 2015-01-20 21:22:17.000000000 +0000 @@ -6,26 +6,52 @@ set -o errexit set -o pipefail -if ! [ -x node_modules/.bin/ronn ]; then +if ! [ -x node_modules/.bin/marked-man ]; then ps=0 - if [ -f .building_ronn ]; then - pid=$(cat .building_ronn) + if [ -f .building_marked-man ]; then + pid=$(cat .building_marked-man) ps=$(ps -p $pid | grep $pid | wc -l) || true fi - if [ -f .building_ronn ] && [ $ps != 0 ]; then - while [ -f .building_ronn ]; do + if [ -f .building_marked-man ] && [ $ps != 0 ]; then + while [ -f .building_marked-man ]; do sleep 1 done else - # a race to see which make process will be the one to install ronn - echo $$ > .building_ronn + # a race to see which make process will be the one to install marked-man + echo $$ > .building_marked-man sleep 1 - if [ $(cat .building_ronn) == $$ ]; then - make node_modules/.bin/ronn - rm .building_ronn + if [ $(cat .building_marked-man) == $$ ]; then + make node_modules/.bin/marked-man + rm .building_marked-man else - while [ -f .building_ronn ]; do + while [ -f .building_marked-man ]; do + sleep 1 + done + fi + fi +fi + +if ! [ -x node_modules/.bin/marked ]; then + ps=0 + if [ -f .building_marked ]; then + pid=$(cat .building_marked) + ps=$(ps -p $pid | grep $pid | wc -l) || true + fi + + if [ -f .building_marked ] && [ $ps != 0 ]; then + while [ -f .building_marked ]; do + sleep 1 + done + else + # a race to see which make process will be the one to install marked + echo $$ > .building_marked + sleep 1 + if [ $(cat .building_marked) == $$ ]; then + make node_modules/.bin/marked + rm .building_marked + else + while [ -f .building_marked ]; do sleep 1 done fi @@ -40,44 +66,59 @@ mkdir -p $(dirname $dest) +html_replace_tokens () { + local url=$1 + sed "s|@NAME@|$name|g" \ + | sed "s|@DATE@|$date|g" \ + | sed "s|@URL@|$url|g" \ + | sed "s|@VERSION@|$version|g" \ + | perl -p -e 's/]*)>([^\(]*\([0-9]\)) -- (.*?)<\/h1>/

\2<\/h1>

\3<\/p>/g' \ + | perl -p -e 's/npm-npm/npm/g' \ + | perl -p -e 's/([^"-])(npm-)?README(?!\.html)(\(1\))?/\1README<\/a>/g' \ + | perl -p -e 's/<a href="[^"]+README.html">README<\/a><\/title>/<title>README<\/title>/g' \ + | perl -p -e 's/([^"-])([^\(> ]+)(\(1\))/\1<a href="..\/cli\/\2.html">\2\3<\/a>/g' \ + | perl -p -e 's/([^"-])([^\(> ]+)(\(3\))/\1<a href="..\/api\/\2.html">\2\3<\/a>/g' \ + | perl -p -e 's/([^"-])([^\(> ]+)(\(5\))/\1<a href="..\/files\/\2.html">\2\3<\/a>/g' \ + | perl -p -e 's/([^"-])([^\(> ]+)(\(7\))/\1<a href="..\/misc\/\2.html">\2\3<\/a>/g' \ + | perl -p -e 's/\([1357]\)<\/a><\/h1>/<\/a><\/h1>/g' \ + | (if [ $(basename $(dirname $dest)) == "doc" ]; then + perl -p -e 's/ href="\.\.\// href="/g' + else + cat + fi) +} + +man_replace_tokens () { + sed "s|@VERSION@|$version|g" \ + | perl -p -e 's/(npm\\-)?([a-zA-Z\\\.\-]*)\(1\)/npm help \2/g' \ + | perl -p -e 's/(npm\\-)?([a-zA-Z\\\.\-]*)\(([57])\)/npm help \3 \2/g' \ + | perl -p -e 's/(npm\\-)?([a-zA-Z\\\.\-]*)\(3\)/npm apihelp \2/g' \ + | perl -p -e 's/npm\(1\)/npm help npm/g' \ + | perl -p -e 's/npm\(3\)/npm apihelp npm/g' +} + case $dest in *.[1357]) - ./node_modules/.bin/ronn --roff $src \ - | sed "s|@VERSION@|$version|g" \ - | perl -pi -e 's/(npm\\-)?([^\(]*)\(1\)/npm help \2/g' \ - | perl -pi -e 's/(npm\\-)?([^\(]*)\([57]\)/npm help \3 \2/g' \ - | perl -pi -e 's/(npm\\-)?([^\(]*)\(3\)/npm apihelp \2/g' \ - | perl -pi -e 's/npm\(1\)/npm help npm/g' \ - | perl -pi -e 's/npm\(3\)/npm apihelp npm/g' \ - > $dest + ./node_modules/.bin/marked-man --roff $src \ + | man_replace_tokens > $dest exit $? ;; - *.html) + + html/partial/*.html) + url=${dest/html\/partial\//} + cat $src | ./node_modules/.bin/marked | html_replace_tokens $url > $dest + ;; + + html/*.html) url=${dest/html\//} (cat html/dochead.html && \ - ./node_modules/.bin/ronn -f $src && + cat $src && \ cat html/docfoot.html)\ - | sed "s|@NAME@|$name|g" \ - | sed "s|@DATE@|$date|g" \ - | sed "s|@URL@|$url|g" \ - | sed "s|@VERSION@|$version|g" \ - | perl -pi -e 's/<h1>([^\(]*\([0-9]\)) -- (.*?)<\/h1>/<h1>\1<\/h1> <p>\2<\/p>/g' \ - | perl -pi -e 's/npm-npm/npm/g' \ - | perl -pi -e 's/([^"-])(npm-)?README(?!\.html)(\(1\))?/\1<a href="..\/..\/doc\/README.html">README<\/a>/g' \ - | perl -pi -e 's/<title><a href="[^"]+README.html">README<\/a><\/title>/<title>README<\/title>/g' \ - | perl -pi -e 's/([^"-])([^\(> ]+)(\(1\))/\1<a href="..\/cli\/\2.html">\2\3<\/a>/g' \ - | perl -pi -e 's/([^"-])([^\(> ]+)(\(3\))/\1<a href="..\/api\/\2.html">\2\3<\/a>/g' \ - | perl -pi -e 's/([^"-])([^\(> ]+)(\(5\))/\1<a href="..\/files\/\2.html">\2\3<\/a>/g' \ - | perl -pi -e 's/([^"-])([^\(> ]+)(\(7\))/\1<a href="..\/misc\/\2.html">\2\3<\/a>/g' \ - | perl -pi -e 's/\([1357]\)<\/a><\/h1>/<\/a><\/h1>/g' \ - | (if [ $(basename $(dirname $dest)) == "doc" ]; then - perl -pi -e 's/ href="\.\.\// href="/g' - else - cat - fi) \ + | html_replace_tokens $url \ > $dest exit $? ;; + *) echo "Invalid destination type: $dest" >&2 exit 1 diff -Nru nodejs-0.11.13/deps/npm/scripts/index-build.js nodejs-0.11.15/deps/npm/scripts/index-build.js --- nodejs-0.11.13/deps/npm/scripts/index-build.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/scripts/index-build.js 2015-01-20 21:22:17.000000000 +0000 @@ -35,15 +35,16 @@ "==============================================\n") writeLines(files, 0) - writeLines(files, 1, "Command Line Documentation") - writeLines(files, 3, "API Documentation") - writeLines(files, 5, "Files") - writeLines(files, 7, "Misc") + writeLines(files, 1, "Command Line Documentation", "Using npm on the command line") + writeLines(files, 3, "API Documentation", "Using npm in your Node programs") + writeLines(files, 5, "Files", "File system structures npm uses") + writeLines(files, 7, "Misc", "Various other bits and bobs") } -function writeLines (files, sxn, heading) { - if (heading) - console.log("# %s\n", heading) +function writeLines (files, sxn, heading, desc) { + if (heading) { + console.log("## %s\n\n%s\n", heading, desc) + } files.filter(function (f) { return f[0] === sxn }).forEach(writeLine) @@ -57,6 +58,6 @@ var content = fs.readFileSync(doc, "utf8").split("\n")[0].split("-- ")[1] - console.log("## %s(%d)\n", d, sxn) + console.log("### %s(%d)\n", d, sxn) console.log(content + "\n") } diff -Nru nodejs-0.11.13/deps/npm/scripts/publish-tag.js nodejs-0.11.15/deps/npm/scripts/publish-tag.js --- nodejs-0.11.13/deps/npm/scripts/publish-tag.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/scripts/publish-tag.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +var semver = require("semver") +var version = semver.parse(require("../package.json").version) +console.log('v%s.%s-next', version.major, version.minor) diff -Nru nodejs-0.11.13/deps/npm/.tern-project nodejs-0.11.15/deps/npm/.tern-project --- nodejs-0.11.13/deps/npm/.tern-project 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/.tern-project 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -{ - "libs": [ - ], - "plugins": { - "node": {} - } -} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/test/common-tap.js nodejs-0.11.15/deps/npm/test/common-tap.js --- nodejs-0.11.13/deps/npm/test/common-tap.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/common-tap.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,16 +1,27 @@ var spawn = require("child_process").spawn +var path = require("path") +var fs = require("fs") var port = exports.port = 1337 exports.registry = "http://localhost:" + port process.env.npm_config_loglevel = "error" +var npm_config_cache = path.resolve(__dirname, "npm_cache") +exports.npm_config_cache = npm_config_cache + var bin = exports.bin = require.resolve("../bin/npm-cli.js") var once = require("once") + exports.npm = function (cmd, opts, cb) { cb = once(cb) cmd = [bin].concat(cmd) opts = opts || {} + opts.env = opts.env ? opts.env : process.env + if (!opts.env.npm_config_cache) { + opts.env.npm_config_cache = npm_config_cache + } + var stdout = "" , stderr = "" , node = process.execPath @@ -26,7 +37,31 @@ child.on("error", cb) - child.on("close", function (code, signal) { + child.on("close", function (code) { cb(null, code, stdout, stderr) }) + return child +} + +// based on http://bit.ly/1tkI6DJ +function deleteNpmCacheRecursivelySync(cache) { + cache = cache ? cache : npm_config_cache + var files = [] + var res + if( fs.existsSync(cache) ) { + files = fs.readdirSync(cache) + files.forEach(function(file,index) { + var curPath = path.resolve(cache, file) + if(fs.lstatSync(curPath).isDirectory()) { // recurse + deleteNpmCacheRecursivelySync(curPath) + } else { // delete file + if (res = fs.unlinkSync(curPath)) + throw Error("Failed to delete file " + curPath + ", error " + res) + } + }) + if (res = fs.rmdirSync(cache)) + throw Error("Failed to delete directory " + cache + ", error " + res) + } + return 0 } +exports.deleteNpmCacheRecursivelySync = deleteNpmCacheRecursivelySync \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/test/fixtures/config/builtin nodejs-0.11.15/deps/npm/test/fixtures/config/builtin --- nodejs-0.11.13/deps/npm/test/fixtures/config/builtin 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/fixtures/config/builtin 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +builtin-config = true diff -Nru nodejs-0.11.13/deps/npm/test/fixtures/config/globalconfig nodejs-0.11.15/deps/npm/test/fixtures/config/globalconfig --- nodejs-0.11.13/deps/npm/test/fixtures/config/globalconfig 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/fixtures/config/globalconfig 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +package-config:foo = boo diff -Nru nodejs-0.11.13/deps/npm/test/fixtures/config/malformed nodejs-0.11.15/deps/npm/test/fixtures/config/malformed --- nodejs-0.11.13/deps/npm/test/fixtures/config/malformed 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/fixtures/config/malformed 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +email = """ \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/test/fixtures/config/multi-ca nodejs-0.11.15/deps/npm/test/fixtures/config/multi-ca --- nodejs-0.11.13/deps/npm/test/fixtures/config/multi-ca 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/fixtures/config/multi-ca 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIICjTCCAfigAwIBAgIEMaYgRzALBgkqhkiG9w0BAQQwRTELMAkGA1UEBhMCVVMx +NjA0BgNVBAoTLU5hdGlvbmFsIEFlcm9uYXV0aWNzIGFuZCBTcGFjZSBBZG1pbmlz +dHJhdGlvbjAmFxE5NjA1MjgxMzQ5MDUrMDgwMBcROTgwNTI4MTM0OTA1KzA4MDAw +ZzELMAkGA1UEBhMCVVMxNjA0BgNVBAoTLU5hdGlvbmFsIEFlcm9uYXV0aWNzIGFu +ZCBTcGFjZSBBZG1pbmlzdHJhdGlvbjEgMAkGA1UEBRMCMTYwEwYDVQQDEwxTdGV2 +ZSBTY2hvY2gwWDALBgkqhkiG9w0BAQEDSQAwRgJBALrAwyYdgxmzNP/ts0Uyf6Bp +miJYktU/w4NG67ULaN4B5CnEz7k57s9o3YY3LecETgQ5iQHmkwlYDTL2fTgVfw0C +AQOjgaswgagwZAYDVR0ZAQH/BFowWDBWMFQxCzAJBgNVBAYTAlVTMTYwNAYDVQQK +Ey1OYXRpAAAAACBBZXJvbmF1dGljcyBhbmQgU3BhY2UgQWRtaW5pc3RyYXRpb24x +DTALBgNVBAMTBENSTDEwFwYDVR0BAQH/BA0wC4AJODMyOTcwODEwMBgGA1UdAgQR +MA8ECTgzMjk3MDgyM4ACBSAwDQYDVR0KBAYwBAMCBkAwCwYJKoZIhvcNAQEEA4GB +AH2y1VCEw/A4zaXzSYZJTTUi3uawbbFiS2yxHvgf28+8Js0OHXk1H1w2d6qOHH21 +X82tZXd/0JtG0g1T9usFFBDvYK8O0ebgz/P5ELJnBL2+atObEuJy1ZZ0pBDWINR3 +WkDNLCGiTkCKp0F5EWIrVDwh54NNevkCQRZita+z4IBO +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +AAAAAACCAfigAwIBAgIEMaYgRzALBgkqhkiG9w0BAQQwRTELMAkGA1UEBhMCVVMx +NjA0BgNVBAoTLU5hdGlvbmFsIEFlcm9uYXV0aWNzIGFuZCBTcGFjZSBBZG1pbmlz +dHJhdGlvbjAmFxE5NjA1MjgxMzQ5MDUrMDgwMBcROTgwNTI4MTM0OTA1KzA4MDAw +ZzELMAkGA1UEBhMCVVMxNjA0BgNVBAoTLU5hdGlvbmFsIEFlcm9uYXV0aWNzIGFu +ZCBTcGFjZSBBZG1pbmlzdHJhdGlvbjEgMAkGA1UEBRMCMTYwEwYDVQQDEwxTdGV2 +ZSBTY2hvY2gwWDALBgkqhkiG9w0BAQEDSQAwRgJBALrAwyYdgxmzNP/ts0Uyf6Bp +miJYktU/w4NG67ULaN4B5CnEz7k57s9o3YY3LecETgQ5iQHmkwlYDTL2fTgVfw0C +AQOjgaswgagwZAYDVR0ZAQH/BFowWDBWMFQxCzAJBgNVBAYTAlVTMTYwNAYDVQQK +Ey1OYXRpb25hbCBBZXJvbmF1dGljcyBhbmQgU3BhY2UgQWRtaW5pc3RyYXRpb24x +DTALBgNVBAMTBENSTDEwFwYDVR0BAQH/BA0wC4AJODMyOTcwODEwMBgGA1UdAgQR +MA8ECTgzMjk3MDgyM4ACBSAwDQYDVR0KBAYwBAMCBkAwCwYJKoZIhvcNAQEEA4GB +AH2y1VCEw/A4zaXzSYZJTTUi3uawbbFiS2yxHvgf28+8Js0OHXk1H1w2d6qOHH21 +X82tZXd/0JtG0g1T9usFFBDvYK8O0ebgz/P5ELJnBL2+atObEuJy1ZZ0pBDWINR3 +WkDNLCGiTkCKp0F5EWIrVDwh54NNevkCQRZita+z4IBO +-----END CERTIFICATE----- diff -Nru nodejs-0.11.13/deps/npm/test/fixtures/config/.npmrc nodejs-0.11.15/deps/npm/test/fixtures/config/.npmrc --- nodejs-0.11.13/deps/npm/test/fixtures/config/.npmrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/fixtures/config/.npmrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +just = testing diff -Nru nodejs-0.11.13/deps/npm/test/fixtures/config/userconfig nodejs-0.11.15/deps/npm/test/fixtures/config/userconfig --- nodejs-0.11.13/deps/npm/test/fixtures/config/userconfig 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/fixtures/config/userconfig 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,23 @@ +email = i@izs.me +env-thing = ${random_env_var} +init.author.name = Isaac Z. Schlueter +init.author.email = i@izs.me +init.author.url = http://blog.izs.me/ +init.version = 1.2.3 +proprietary-attribs = false +npm:publishtest = true +_npmjs.org:couch = https://admin:password@localhost:5984/registry +npm-www:nocache = 1 +nodedir = /Users/isaacs/dev/js/node-v0.8 +sign-git-tag = true +message = v%s +strict-ssl = false +tmp = ~/.tmp +_auth = dXNlcm5hbWU6cGFzc3dvcmQ= + +[_token] +AuthSession = yabba-dabba-doodle +version = 1 +expires = 1345001053415 +path = / +httponly = true diff -Nru nodejs-0.11.13/deps/npm/test/fixtures/config/userconfig-with-gc nodejs-0.11.15/deps/npm/test/fixtures/config/userconfig-with-gc --- nodejs-0.11.13/deps/npm/test/fixtures/config/userconfig-with-gc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/fixtures/config/userconfig-with-gc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,22 @@ +globalconfig=/Users/ogd/Documents/projects/npm/npm/test/fixtures/config/globalconfig +email=i@izs.me +env-thing=asdf +init.author.name=Isaac Z. Schlueter +init.author.email=i@izs.me +init.author.url=http://blog.izs.me/ +init.version=1.2.3 +proprietary-attribs=false +npm:publishtest=true +_npmjs.org:couch=https://admin:password@localhost:5984/registry +npm-www:nocache=1 +sign-git-tag=false +message=v%s +strict-ssl=false +_auth=dXNlcm5hbWU6cGFzc3dvcmQ= + +[_token] +AuthSession=yabba-dabba-doodle +version=1 +expires=1345001053415 +path=/ +httponly=true diff -Nru nodejs-0.11.13/deps/npm/test/packages/npm-test-optional-deps/package.json nodejs-0.11.15/deps/npm/test/packages/npm-test-optional-deps/package.json --- nodejs-0.11.13/deps/npm/test/packages/npm-test-optional-deps/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/packages/npm-test-optional-deps/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -5,7 +5,6 @@ { "npm-test-foobarzaaakakaka": "http://example.com/" , "dnode": "10.999.14234" , "sax": "0.3.5" - , "999 invalid name": "1.2.3" , "glob": "some invalid version 99 #! $$ x y z" , "npm-test-failer":"*" } diff -Nru nodejs-0.11.13/deps/npm/test/run.js nodejs-0.11.15/deps/npm/test/run.js --- nodejs-0.11.13/deps/npm/test/run.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/run.js 2015-01-20 21:22:17.000000000 +0000 @@ -7,7 +7,7 @@ , testdir = __dirname , fs = require("graceful-fs") , npmpkg = path.dirname(testdir) - , npmcli = path.join(__dirname, "bin", "npm-cli.js") + , npmcli = path.resolve(npmpkg, "bin", "npm-cli.js") var temp = process.env.TMPDIR || process.env.TMP @@ -63,7 +63,7 @@ } var execCount = 0 -function exec (cmd, shouldFail, cb) { +function exec (cmd, cwd, shouldFail, cb) { if (typeof shouldFail === "function") { cb = shouldFail, shouldFail = false } @@ -81,7 +81,10 @@ cmd = cmd.replace(/^npm /, npmReplace + " ") cmd = cmd.replace(/^node /, nodeReplace + " ") - child_process.exec(cmd, {env: env}, function (er, stdout, stderr) { + console.error("$$$$$$ cd %s; PATH=%s %s", cwd, env.PATH, cmd) + + child_process.exec(cmd, {cwd: cwd, env: env}, function (er, stdout, stderr) { + console.error("$$$$$$ after command", cmd, cwd) if (stdout) { console.error(prefix(stdout, " 1> ")) } @@ -102,10 +105,8 @@ } function execChain (cmds, cb) { - chain(cmds.reduce(function (l, r) { - return l.concat(r) - }, []).map(function (cmd) { - return [exec, cmd] + chain(cmds.map(function (args) { + return [exec].concat(args) }), cb) } @@ -118,9 +119,8 @@ function setup (cb) { cleanup(function (er) { if (er) return cb(er) - execChain([ "node \""+path.resolve(npmpkg, "bin", "npm-cli.js") - + "\" install \""+npmpkg+"\"" - , "npm config set package-config:foo boo" + execChain([ [ "node \""+npmcli+"\" install \""+npmpkg+"\"", root ], + [ "npm config set package-config:foo boo", root ] ], cb) }) } @@ -134,6 +134,7 @@ failures = 0 process.chdir(testdir) + var base = path.resolve(root, path.join("lib", "node_modules")) // get the list of packages var packages = fs.readdirSync(path.resolve(testdir, "packages")) @@ -150,17 +151,17 @@ packagesToRm.push("npm") } - chain - ( [ setup - , [ exec, "npm install "+npmpkg ] + chain( + [ setup + , [ exec, "npm install "+npmpkg, testdir ] , [ execChain, packages.map(function (p) { - return "npm install packages/"+p + return [ "npm install packages/"+p, testdir ] }) ] , [ execChain, packages.map(function (p) { - return "npm test "+p + return [ "npm test -ddd", path.resolve(base, p) ] }) ] , [ execChain, packagesToRm.map(function (p) { - return "npm rm " + p + return [ "npm rm "+p, root ] }) ] , installAndTestEach ] @@ -171,15 +172,15 @@ function installAndTestEach (cb) { var thingsToChain = [ setup - , [ execChain, packages.map(function (p) { - return [ "npm install packages/"+p - , "npm test "+p - , "npm rm "+p ] - }) ] + , [ execChain, flatten(packages.map(function (p) { + return [ [ "npm install packages/"+p, testdir ] + , [ "npm test", path.resolve(base, p) ] + , [ "npm rm "+p, root ] ] + })) ] ] if (process.platform !== "win32") { // Windows can't handle npm rm npm due to file-in-use issues. - thingsToChain.push([exec, "npm rm npm"]) + thingsToChain.push([exec, "npm rm npm", testdir]) } chain(thingsToChain, cb) diff -Nru nodejs-0.11.13/deps/npm/test/tap/00-check-mock-dep.js nodejs-0.11.15/deps/npm/test/tap/00-check-mock-dep.js --- nodejs-0.11.13/deps/npm/test/tap/00-check-mock-dep.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/00-check-mock-dep.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,7 @@ console.log("TAP Version 13") -process.on("uncaughtException", function(er) { +process.on("uncaughtException", function (er) { + if (er) { throw er } console.log("not ok - Failed checking mock registry dep. Expect much fail!") console.log("1..1") process.exit(1) @@ -10,6 +11,7 @@ var semver = require("semver") var mock = require("npm-registry-mock/package.json").version var req = require("../../package.json").devDependencies["npm-registry-mock"] + assert(semver.satisfies(mock, req)) console.log("ok") console.log("1..1") diff -Nru nodejs-0.11.13/deps/npm/test/tap/00-config-setup.js nodejs-0.11.15/deps/npm/test/tap/00-config-setup.js --- nodejs-0.11.13/deps/npm/test/tap/00-config-setup.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/00-config-setup.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,68 @@ +var path = require("path") +var userconfigSrc = path.resolve(__dirname, "..", "fixtures", "config", "userconfig") +exports.userconfig = userconfigSrc + "-with-gc" +exports.globalconfig = path.resolve(__dirname, "..", "fixtures", "config", "globalconfig") +exports.builtin = path.resolve(__dirname, "..", "fixtures", "config", "builtin") +exports.malformed = path.resolve(__dirname, "..", "fixtures", "config", "malformed") +exports.ucData = + { globalconfig: exports.globalconfig, + email: "i@izs.me", + "env-thing": "asdf", + "init.author.name": "Isaac Z. Schlueter", + "init.author.email": "i@izs.me", + "init.author.url": "http://blog.izs.me/", + "init.version": "1.2.3", + "proprietary-attribs": false, + "npm:publishtest": true, + "_npmjs.org:couch": "https://admin:password@localhost:5984/registry", + "npm-www:nocache": "1", + nodedir: "/Users/isaacs/dev/js/node-v0.8", + "sign-git-tag": true, + message: "v%s", + "strict-ssl": false, + "tmp": process.env.HOME + "/.tmp", + _auth: "dXNlcm5hbWU6cGFzc3dvcmQ=", + _token: + { AuthSession: "yabba-dabba-doodle", + version: "1", + expires: "1345001053415", + path: "/", + httponly: true } } + +// set the userconfig in the env +// unset anything else that npm might be trying to foist on us +Object.keys(process.env).forEach(function (k) { + if (k.match(/^npm_config_/i)) { + delete process.env[k] + } +}) +process.env.npm_config_userconfig = exports.userconfig +process.env.npm_config_other_env_thing = 1000 +process.env.random_env_var = "asdf" +process.env.npm_config__underbar_env_thing = "underful" +process.env.NPM_CONFIG_UPPERCASE_ENV_THING = 42 + +exports.envData = { + userconfig: exports.userconfig, + "_underbar-env-thing": "underful", + "uppercase-env-thing": "42", + "other-env-thing": "1000" +} +exports.envDataFix = { + userconfig: exports.userconfig, + "_underbar-env-thing": "underful", + "uppercase-env-thing": 42, + "other-env-thing": 1000 +} + + +if (module === require.main) { + // set the globalconfig in the userconfig + var fs = require("fs") + var uc = fs.readFileSync(userconfigSrc) + var gcini = "globalconfig = " + exports.globalconfig + "\n" + fs.writeFileSync(exports.userconfig, gcini + uc) + + console.log("0..1") + console.log("ok 1 setup done") +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/00-verify-bundle-deps.js nodejs-0.11.15/deps/npm/test/tap/00-verify-bundle-deps.js --- nodejs-0.11.13/deps/npm/test/tap/00-verify-bundle-deps.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/00-verify-bundle-deps.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ +var fs = require("fs") +var path = require("path") +var test = require("tap").test + +var manifest = require("../../package.json") +var deps = Object.keys(manifest.dependencies) +var dev = Object.keys(manifest.devDependencies) +var bundled = manifest.bundleDependencies + +test("all deps are bundled deps or dev deps", function (t) { + deps.forEach(function (name) { + t.assert( + bundled.indexOf(name) !== -1, + name + " is in bundledDependencies" + ) + }) + + t.same( + fs.readdirSync(path.resolve(__dirname, "../../node_modules")).filter(function (name) { + return (dev.indexOf(name) === -1) && (name !== ".bin") + }).sort(), + bundled.sort(), + "bundleDependencies matches what's in node_modules" + ) + + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/00-verify-ls-ok.js nodejs-0.11.15/deps/npm/test/tap/00-verify-ls-ok.js --- nodejs-0.11.13/deps/npm/test/tap/00-verify-ls-ok.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/00-verify-ls-ok.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +var common = require("../common-tap") +var test = require("tap").test +var path = require("path") +var cwd = path.resolve(__dirname, "..", "..") +var fs = require("fs") + +test("npm ls in npm", function (t) { + t.ok(fs.existsSync(cwd), "ensure that the path we are calling ls within exists") + var files = fs.readdirSync(cwd) + t.notEqual(files.length, 0, "ensure there are files in the directory we are to ls") + + var opt = { cwd: cwd, stdio: [ "ignore", "ignore", 2 ] } + common.npm(["ls"], opt, function (err, code) { + t.ifError(err, "error should not exist") + t.equal(code, 0, "npm ls exited with code") + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/404-parent.js nodejs-0.11.15/deps/npm/test/tap/404-parent.js --- nodejs-0.11.13/deps/npm/test/tap/404-parent.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/404-parent.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,26 +1,25 @@ -var common = require('../common-tap.js') -var test = require('tap').test -var npm = require('../../') -var osenv = require('osenv') -var path = require('path') -var fs = require('fs') -var rimraf = require('rimraf') -var mkdirp = require('mkdirp') -var pkg = path.resolve(__dirname, '404-parent') +var common = require("../common-tap.js") +var test = require("tap").test +var npm = require("../../") +var osenv = require("osenv") +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var pkg = path.resolve(__dirname, "404-parent") var mr = require("npm-registry-mock") -test('404-parent: if parent exists, specify parent in error message', function(t) { +test("404-parent: if parent exists, specify parent in error message", function (t) { setup() - rimraf.sync(path.resolve(pkg, 'node_modules')) - performInstall(function(err) { - t.ok(err instanceof Error) - t.pass('error was returned') - t.ok(err.parent === '404-parent-test') + rimraf.sync(path.resolve(pkg, "node_modules")) + performInstall(function (err) { + t.ok(err instanceof Error, "error was returned") + t.ok(err.parent === "404-parent-test", "error's parent set") t.end() }) }) -test('cleanup', function(t) { +test("cleanup", function (t) { process.chdir(osenv.tmpdir()) rimraf.sync(pkg) t.end() @@ -28,23 +27,23 @@ function setup() { mkdirp.sync(pkg) - mkdirp.sync(path.resolve(pkg, 'cache')) - fs.writeFileSync(path.resolve(pkg, 'package.json'), JSON.stringify({ - author: 'Evan Lucas', - name: '404-parent-test', - version: '0.0.0', - description: 'Test for 404-parent', + mkdirp.sync(path.resolve(pkg, "cache")) + fs.writeFileSync(path.resolve(pkg, "package.json"), JSON.stringify({ + author: "Evan Lucas", + name: "404-parent-test", + version: "0.0.0", + description: "Test for 404-parent", dependencies: { - 'test-npm-404-parent-test': '*' + "test-npm-404-parent-test": "*" } - }), 'utf8') + }), "utf8") process.chdir(pkg) } function performInstall(cb) { mr(common.port, function (s) { // create mock registry. - npm.load({registry: common.registry}, function() { - npm.commands.install(pkg, [], function(err) { + npm.load({registry: common.registry}, function () { + npm.commands.install(pkg, [], function (err) { cb(err) s.close() // shutdown mock npm server. }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/builtin-config.js nodejs-0.11.15/deps/npm/test/tap/builtin-config.js --- nodejs-0.11.13/deps/npm/test/tap/builtin-config.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/builtin-config.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,125 @@ +var fs = require("fs") + +if (process.argv[2] === "write-builtin") { + var pid = process.argv[3] + fs.writeFileSync("npmrc", "foo=bar\npid=" + pid + "\n") + return +} + +var rcdata = "foo=bar\npid=" + process.pid + "\n" +var common = require("../common-tap.js") +var path = require("path") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var folder = path.resolve(__dirname, "builtin-config") +var test = require("tap").test +var npm = path.resolve(__dirname, "../..") +var spawn = require("child_process").spawn +var node = process.execPath + +test("setup", function (t) { + rimraf.sync(folder) + mkdirp.sync(folder + "/first") + mkdirp.sync(folder + "/second") + mkdirp.sync(folder + "/cache") + mkdirp.sync(folder + "/tmp") + + t.pass("finished setup") + t.end() +}) + + +test("install npm into first folder", function (t) { + var args = ["install", npm, "-g", + "--prefix=" + folder + "/first", + "--cache=" + folder + "/cache", + "--no-spin", + "--loglevel=silent", + "--tmp=" + folder + "/tmp"] + common.npm(args, {stdio: "inherit"}, function (er, code) { + if (er) throw er + t.equal(code, 0) + t.end() + }) +}) + +test("write npmrc file", function (t) { + common.npm(["explore", "npm", "-g", + "--prefix=" + folder + "/first", + "--cache=" + folder + "/cache", + "--tmp=" + folder + "/tmp", + "--no-spin", + "--", + node, __filename, "write-builtin", process.pid + ], + {"stdio": "inherit"}, + function (er, code) { + if (er) throw er + t.equal(code, 0) + t.end() + }) +}) + +test("use first npm to install second npm", function (t) { + // get the root location + common.npm([ "root", "-g", + "--prefix=" + folder + "/first", + "--cache=" + folder + "/cache", + "--tmp=" + folder + "/tmp", + "--no-spin" + ], {}, function (er, code, so) { + if (er) throw er + t.equal(code, 0) + var root = so.trim() + t.ok(fs.statSync(root).isDirectory()) + + var bin = path.resolve(root, "npm/bin/npm-cli.js") + spawn( node + , [ bin + , "install", npm + , "-g" + , "--prefix=" + folder + "/second" + , "--cache=" + folder + "/cache" + , "--tmp=" + folder + "/tmp" + , "--no-spin" + ]) + .on("error", function (er) { throw er }) + .on("close", function (code) { + t.equal(code, 0, "code is zero") + t.end() + }) + }) +}) + +test("verify that the builtin config matches", function (t) { + common.npm([ "root", "-g", + "--prefix=" + folder + "/first", + "--cache=" + folder + "/cache", + "--tmp=" + folder + "/tmp" + ], {}, function (er, code, so) { + if (er) throw er + t.equal(code, 0) + var firstRoot = so.trim() + common.npm([ "root", "-g", + "--prefix=" + folder + "/second", + "--cache=" + folder + "/cache", + "--tmp=" + folder + "/tmp" + ], {}, function (er, code, so) { + if (er) throw er + t.equal(code, 0) + var secondRoot = so.trim() + var firstRc = path.resolve(firstRoot, "npm", "npmrc") + var secondRc = path.resolve(secondRoot, "npm", "npmrc") + var firstData = fs.readFileSync(firstRc, "utf8") + var secondData = fs.readFileSync(secondRc, "utf8") + t.equal(firstData, secondData) + t.end() + }) + }) +}) + + +test("clean", function (t) { + rimraf.sync(folder) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/cache-add-localdir-fallback.js nodejs-0.11.15/deps/npm/test/tap/cache-add-localdir-fallback.js --- nodejs-0.11.13/deps/npm/test/tap/cache-add-localdir-fallback.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/cache-add-localdir-fallback.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,84 @@ +var path = require("path") +var test = require("tap").test +var npm = require("../../lib/npm.js") +var requireInject = require("require-inject") + +var realizePackageSpecifier = requireInject("realize-package-specifier", { + "fs": { + stat: function (file, cb) { + process.nextTick(function () { + switch (file) { + case path.resolve("named"): + cb(new Error("ENOENT")) + break + case path.resolve("file.tgz"): + cb(null, { isDirectory: function () { return false } }) + break + case path.resolve("dir-no-package"): + cb(null, { isDirectory: function () { return true } }) + break + case path.resolve("dir-no-package/package.json"): + cb(new Error("ENOENT")) + break + case path.resolve("dir-with-package"): + cb(null, { isDirectory: function () { return true } }) + break + case path.resolve("dir-with-package/package.json"): + cb(null, {}) + break + case path.resolve(__dirname, "dir-with-package"): + cb(null, { isDirectory: function () { return true } }) + break + case path.join(__dirname, "dir-with-package", "package.json"): + cb(null, {}) + break + case path.resolve(__dirname, "file.tgz"): + cb(null, { isDirectory: function () { return false } }) + break + default: + throw new Error("Unknown test file passed to stat: " + file) + } + }) + } + } +}) + +npm.load({loglevel : "silent"}, function () { + var cache = requireInject("../../lib/cache.js", { + "realize-package-specifier": realizePackageSpecifier, + "../../lib/cache/add-named.js": function addNamed (name, version, data, cb) { + cb(null, "addNamed") + }, + "../../lib/cache/add-local.js": function addLocal (name, data, cb) { + cb(null, "addLocal") + } + }) + + test("npm install localdir fallback", function (t) { + t.plan(12) + cache.add("named", null, null, false, function (er, which) { + t.ifError(er, "named was cached") + t.is(which, "addNamed", "registry package name") + }) + cache.add("file.tgz", null, null, false, function (er, which) { + t.ifError(er, "file.tgz was cached") + t.is(which, "addLocal", "local file") + }) + cache.add("dir-no-package", null, null, false, function (er, which) { + t.ifError(er, "local directory was cached") + t.is(which, "addNamed", "local directory w/o package.json") + }) + cache.add("dir-with-package", null, null, false, function (er, which) { + t.ifError(er, "local directory with package was cached") + t.is(which,"addLocal", "local directory with package.json") + }) + cache.add("file:./dir-with-package", null, __dirname, false, function (er, which) { + t.ifError(er, "local directory (as URI) with package was cached") + t.is(which, "addLocal", "file: URI to local directory with package.json") + }) + cache.add("file:./file.tgz", null, __dirname, false, function (er, which) { + t.ifError(er, "local file (as URI) with package was cached") + t.is(which, "addLocal", "file: URI to local file with package.json") + }) + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/cache-add-unpublished.js nodejs-0.11.15/deps/npm/test/tap/cache-add-unpublished.js --- nodejs-0.11.13/deps/npm/test/tap/cache-add-unpublished.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/cache-add-unpublished.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,61 +1,12 @@ -var common = require('../common-tap.js') -var test = require('tap').test - -var server - -var port = common.port -var http = require("http") - -var doc = { - "_id": "superfoo", - "_rev": "5-d11adeec0fdfea6b96b120610d2bed71", - "name": "superfoo", - "time": { - "modified": "2014-02-18T18:35:02.930Z", - "created": "2014-02-18T18:34:08.437Z", - "1.1.0": "2014-02-18T18:34:08.437Z", - "unpublished": { - "name": "isaacs", - "time": "2014-04-30T18:26:45.584Z", - "tags": { - "latest": "1.1.0" - }, - "maintainers": [ - { - "name": "foo", - "email": "foo@foo.com" - } - ], - "description": "do lots a foo", - "versions": [ - "1.1.0" - ] - } - }, - "_attachments": {} -} - -test("setup", function (t) { - server = http.createServer(function(req, res) { - res.end(JSON.stringify(doc)) - }) - server.listen(port, function() { - t.end() - }) -}) +var common = require("../common-tap.js") +var test = require("tap").test test("cache add", function (t) { common.npm(["cache", "add", "superfoo"], {}, function (er, c, so, se) { if (er) throw er - t.ok(c) - t.equal(so, "") - t.similar(se, /404 Not Found: superfoo/) - t.end() - }) -}) - -test("cleanup", function (t) { - server.close(function() { + t.ok(c, "got non-zero exit code") + t.equal(so, "", "nothing printed to stdout") + t.similar(se, /404 Not Found: superfoo/, "got expected error") t.end() }) }) Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/npm/test/tap/cache-shasum-fork/underscore-1.5.1.tgz and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/npm/test/tap/cache-shasum-fork/underscore-1.5.1.tgz differ diff -Nru nodejs-0.11.13/deps/npm/test/tap/cache-shasum-fork.js nodejs-0.11.15/deps/npm/test/tap/cache-shasum-fork.js --- nodejs-0.11.13/deps/npm/test/tap/cache-shasum-fork.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/cache-shasum-fork.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,83 @@ +var test = require("tap").test +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var mr = require("npm-registry-mock") +var common = require("../common-tap.js") +var cache = path.resolve(__dirname, "cache-shasum-fork", "CACHE") +var cwd = path.resolve(__dirname, "cache-shasum-fork", "CWD") +var server + +// Test for https://github.com/npm/npm/issues/3265 + +test("mock reg", function (t) { + rimraf.sync(cache) + mkdirp.sync(cache) + rimraf.sync(cwd) + mkdirp.sync(path.join(cwd, "node_modules")) + mr(common.port, function (s) { + server = s + t.pass("ok") + t.end() + }) +}) + +test("npm cache - install from fork", function (t) { + // Install from a tarball that thinks it is underscore@1.5.1 + // (but is actually a fork) + var forkPath = path.resolve( + __dirname, "cache-shasum-fork", "underscore-1.5.1.tgz") + common.npm(["install", forkPath], { + cwd: cwd, + env: { + "npm_config_cache" : cache, + "npm_config_registry" : common.registry, + "npm_config_loglevel" : "silent" + } + }, function (err, code, stdout, stderr) { + t.ifErr(err, "install finished without error") + t.notOk(stderr, "Should not get data on stderr: " + stderr) + t.equal(code, 0, "install finished successfully") + + t.equal(stdout, "underscore@1.5.1 node_modules/underscore\n") + var index = fs.readFileSync( + path.join(cwd, "node_modules", "underscore", "index.js"), + "utf8" + ) + t.equal(index, 'console.log("This is the fork");\n\n') + t.end() + }) +}) + +test("npm cache - install from origin", function (t) { + // Now install the real 1.5.1. + rimraf.sync(path.join(cwd, "node_modules")) + mkdirp.sync(path.join(cwd, "node_modules")) + common.npm(["install", "underscore"], { + cwd: cwd, + env: { + "npm_config_cache" : cache, + "npm_config_registry" : common.registry, + "npm_config_loglevel" : "silent" + } + }, function (err, code, stdout, stderr) { + t.ifErr(err, "install finished without error") + t.equal(code, 0, "install finished successfully") + t.notOk(stderr, "Should not get data on stderr: " + stderr) + t.equal(stdout, "underscore@1.5.1 node_modules/underscore\n") + var index = fs.readFileSync( + path.join(cwd, "node_modules", "underscore", "index.js"), + "utf8" + ) + t.equal(index, "module.exports = require('./underscore');\n") + t.end() + }) +}) + +test("cleanup", function (t) { + server.close() + rimraf.sync(cache) + rimraf.sync(cwd) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/cache-shasum.js nodejs-0.11.15/deps/npm/test/tap/cache-shasum.js --- nodejs-0.11.13/deps/npm/test/tap/cache-shasum.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/cache-shasum.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,6 @@ var npm = require.resolve("../../") var test = require("tap").test var path = require("path") -var fs = require("fs") var rimraf = require("rimraf") var mkdirp = require("mkdirp") var mr = require("npm-registry-mock") @@ -11,7 +10,7 @@ var sha = require("sha") var server -test("mock reg", function(t) { +test("mock reg", function (t) { rimraf.sync(cache) mkdirp.sync(cache) mr(common.port, function (s) { @@ -21,7 +20,7 @@ }) }) -test("npm cache add request", function(t) { +test("npm cache add request", function (t) { var c = spawn(process.execPath, [ npm, "cache", "add", "request@2.27.0", "--cache=" + cache, @@ -30,21 +29,21 @@ ]) c.stderr.pipe(process.stderr) - c.stdout.on("data", function(d) { + c.stdout.on("data", function (d) { t.fail("Should not get data on stdout: " + d) }) - c.on("close", function(code) { + c.on("close", function (code) { t.notOk(code, "exit ok") t.end() }) }) -test("compare", function(t) { +test("compare", function (t) { var d = path.resolve(__dirname, "cache-shasum/request") var p = path.resolve(d, "2.27.0/package.tgz") - var r = require(path.resolve(d, ".cache.json")) - var rshasum = r.versions['2.27.0'].dist.shasum + var r = require("./cache-shasum/localhost_1337/request/.cache.json") + var rshasum = r.versions["2.27.0"].dist.shasum sha.get(p, function (er, pshasum) { if (er) throw er @@ -53,7 +52,7 @@ }) }) -test("cleanup", function(t) { +test("cleanup", function (t) { server.close() rimraf.sync(cache) t.end() diff -Nru nodejs-0.11.13/deps/npm/test/tap/circular-dep.js nodejs-0.11.15/deps/npm/test/tap/circular-dep.js --- nodejs-0.11.13/deps/npm/test/tap/circular-dep.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/circular-dep.js 2015-01-20 21:22:17.000000000 +0000 @@ -17,12 +17,12 @@ setup(function () { npm.install("optimist", function (err) { if (err) return t.fail(err) - npm.dedupe(function(err) { + npm.dedupe(function (err) { if (err) return t.fail(err) t.ok(existsSync(path.resolve(pkg, "minimist", "node_modules", "optimist", "node_modules", "minimist" - ))) + )), "circular dependency uncircled") cleanup() server.close() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/config-basic.js nodejs-0.11.15/deps/npm/test/tap/config-basic.js --- nodejs-0.11.13/deps/npm/test/tap/config-basic.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/config-basic.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,66 @@ +var test = require("tap").test +var npmconf = require("../../lib/config/core.js") +var common = require("./00-config-setup.js") +var path = require("path") + +var projectData = { + "save-prefix": "~", + "proprietary-attribs": false +} + +var ucData = common.ucData +var envData = common.envData +var envDataFix = common.envDataFix + +var gcData = { "package-config:foo": "boo" } + +var biData = {} + +var cli = { foo: "bar", umask: 022 } + +var expectList = +[ cli, + envDataFix, + projectData, + ucData, + gcData, + biData ] + +var expectSources = { + cli: { data: cli }, + env: { + data: envDataFix, + source: envData, + prefix: "" + }, + project: { + path: path.resolve(__dirname, "..", "..", ".npmrc"), + type: "ini", + data: projectData + }, + user: { + path: common.userconfig, + type: "ini", + data: ucData + }, + global: { + path: common.globalconfig, + type: "ini", + data: gcData + }, + builtin: { data: biData } +} + +test("no builtin", function (t) { + npmconf.load(cli, function (er, conf) { + if (er) throw er + t.same(conf.list, expectList) + t.same(conf.sources, expectSources) + t.same(npmconf.rootConf.list, []) + t.equal(npmconf.rootConf.root, npmconf.defs.defaults) + t.equal(conf.root, npmconf.defs.defaults) + t.equal(conf.get("umask"), 022) + t.equal(conf.get("heading"), "npm") + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/config-builtin.js nodejs-0.11.15/deps/npm/test/tap/config-builtin.js --- nodejs-0.11.13/deps/npm/test/tap/config-builtin.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/config-builtin.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,68 @@ +var test = require("tap").test +var npmconf = require("../../lib/config/core.js") +var common = require("./00-config-setup.js") +var path = require("path") + +var ucData = common.ucData + +var envData = common.envData +var envDataFix = common.envDataFix + +var gcData = { "package-config:foo": "boo" } + +var biData = { "builtin-config": true } + +var cli = { foo: "bar", heading: "foo", "git-tag-version": false } + +var projectData = { + "save-prefix": "~", + "proprietary-attribs": false +} + +var expectList = [ + cli, + envDataFix, + projectData, + ucData, + gcData, + biData +] + +var expectSources = { + cli: { data: cli }, + env: { + data: envDataFix, + source: envData, + prefix: "" + }, + project: { + path: path.resolve(__dirname, "..", "..", ".npmrc"), + type: "ini", + data: projectData + }, + user: { + path: common.userconfig, + type: "ini", + data: ucData + }, + global: { + path: common.globalconfig, + type: "ini", + data: gcData + }, + builtin: { data: biData } +} + +test("with builtin", function (t) { + npmconf.load(cli, common.builtin, function (er, conf) { + if (er) throw er + t.same(conf.list, expectList) + t.same(conf.sources, expectSources) + t.same(npmconf.rootConf.list, []) + t.equal(npmconf.rootConf.root, npmconf.defs.defaults) + t.equal(conf.root, npmconf.defs.defaults) + t.equal(conf.get("heading"), "foo") + t.equal(conf.get("git-tag-version"), false) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/config-certfile.js nodejs-0.11.15/deps/npm/test/tap/config-certfile.js --- nodejs-0.11.13/deps/npm/test/tap/config-certfile.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/config-certfile.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +require("./00-config-setup.js") + +var path = require("path") +var fs = require("fs") +var test = require("tap").test +var npmconf = require("../../lib/config/core.js") + +test("cafile loads as ca", function (t) { + var cafile = path.join(__dirname, "..", "fixtures", "config", "multi-ca") + + npmconf.load({cafile: cafile}, function (er, conf) { + if (er) throw er + + t.same(conf.get("cafile"), cafile) + t.same(conf.get("ca").join("\n"), fs.readFileSync(cafile, "utf8").trim()) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/config-credentials.js nodejs-0.11.15/deps/npm/test/tap/config-credentials.js --- nodejs-0.11.13/deps/npm/test/tap/config-credentials.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/config-credentials.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,295 @@ +var test = require("tap").test + +var npmconf = require("../../lib/config/core.js") +var common = require("./00-config-setup.js") + +var URI = "https://registry.lvh.me:8661/" + +test("getting scope with no credentials set", function (t) { + npmconf.load({}, function (er, conf) { + t.ifError(er, "configuration loaded") + + var basic = conf.getCredentialsByURI(URI) + t.equal(basic.scope, "//registry.lvh.me:8661/", "nerfed URL extracted") + + t.end() + }) +}) + +test("trying to set credentials with no URI", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + + t.throws(function () { + conf.setCredentialsByURI() + }, "enforced missing URI") + + t.end() + }) +}) + +test("set with missing credentials object", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + + t.throws(function () { + conf.setCredentialsByURI(URI) + }, "enforced missing credentials") + + t.end() + }) +}) + +test("set with empty credentials object", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + + t.throws(function () { + conf.setCredentialsByURI(URI, {}) + }, "enforced missing credentials") + + t.end() + }) +}) + +test("set with token", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + + t.doesNotThrow(function () { + conf.setCredentialsByURI(URI, {token : "simple-token"}) + }, "needs only token") + + var expected = { + scope : "//registry.lvh.me:8661/", + token : "simple-token", + username : undefined, + password : undefined, + email : undefined, + auth : undefined, + alwaysAuth : undefined + } + + t.same(conf.getCredentialsByURI(URI), expected, "got bearer token and scope") + + t.end() + }) +}) + +test("set with missing username", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + + var credentials = { + password : "password", + email : "ogd@aoaioxxysz.net" + } + + t.throws(function () { + conf.setCredentialsByURI(URI, credentials) + }, "enforced missing email") + + t.end() + }) +}) + +test("set with missing password", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + + var credentials = { + username : "username", + email : "ogd@aoaioxxysz.net" + } + + t.throws(function () { + conf.setCredentialsByURI(URI, credentials) + }, "enforced missing email") + + t.end() + }) +}) + +test("set with missing email", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + + var credentials = { + username : "username", + password : "password" + } + + t.throws(function () { + conf.setCredentialsByURI(URI, credentials) + }, "enforced missing email") + + t.end() + }) +}) + +test("set with old-style credentials", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + + var credentials = { + username : "username", + password : "password", + email : "ogd@aoaioxxysz.net" + } + + t.doesNotThrow(function () { + conf.setCredentialsByURI(URI, credentials) + }, "requires all of username, password, and email") + + var expected = { + scope : "//registry.lvh.me:8661/", + token : undefined, + username : "username", + password : "password", + email : "ogd@aoaioxxysz.net", + auth : "dXNlcm5hbWU6cGFzc3dvcmQ=", + alwaysAuth : false + } + + t.same(conf.getCredentialsByURI(URI), expected, "got credentials") + + t.end() + }) +}) + +test("get old-style credentials for default registry", function (t) { + npmconf.load(common.builtin, function (er, conf) { + var actual = conf.getCredentialsByURI(conf.get("registry")) + var expected = { + scope : "//registry.npmjs.org/", + token : undefined, + password : "password", + username : "username", + email : "i@izs.me", + auth : "dXNlcm5hbWU6cGFzc3dvcmQ=", + alwaysAuth : false + } + t.same(actual, expected) + t.end() + }) +}) + +test("set with always-auth enabled", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + + var credentials = { + username : "username", + password : "password", + email : "ogd@aoaioxxysz.net", + alwaysAuth : true + } + + conf.setCredentialsByURI(URI, credentials) + + var expected = { + scope : "//registry.lvh.me:8661/", + token : undefined, + username : "username", + password : "password", + email : "ogd@aoaioxxysz.net", + auth : "dXNlcm5hbWU6cGFzc3dvcmQ=", + alwaysAuth : true + } + + t.same(conf.getCredentialsByURI(URI), expected, "got credentials") + + t.end() + }) +}) + +test("set with always-auth disabled", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + + var credentials = { + username : "username", + password : "password", + email : "ogd@aoaioxxysz.net", + alwaysAuth : false + } + + conf.setCredentialsByURI(URI, credentials) + + var expected = { + scope : "//registry.lvh.me:8661/", + token : undefined, + username : "username", + password : "password", + email : "ogd@aoaioxxysz.net", + auth : "dXNlcm5hbWU6cGFzc3dvcmQ=", + alwaysAuth : false + } + + t.same(conf.getCredentialsByURI(URI), expected, "got credentials") + + t.end() + }) +}) + +test("set with global always-auth enabled", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + var original = conf.get("always-auth") + conf.set("always-auth", true) + + var credentials = { + username : "username", + password : "password", + email : "ogd@aoaioxxysz.net" + } + + conf.setCredentialsByURI(URI, credentials) + + var expected = { + scope : "//registry.lvh.me:8661/", + token : undefined, + username : "username", + password : "password", + email : "ogd@aoaioxxysz.net", + auth : "dXNlcm5hbWU6cGFzc3dvcmQ=", + alwaysAuth : true + } + + t.same(conf.getCredentialsByURI(URI), expected, "got credentials") + + conf.set("always-auth", original) + t.end() + }) +}) + +test("set with global always-auth disabled", function (t) { + npmconf.load(common.builtin, function (er, conf) { + t.ifError(er, "configuration loaded") + var original = conf.get("always-auth") + conf.set("always-auth", false) + + var credentials = { + username : "username", + password : "password", + email : "ogd@aoaioxxysz.net" + } + + conf.setCredentialsByURI(URI, credentials) + + var expected = { + scope : "//registry.lvh.me:8661/", + token : undefined, + username : "username", + password : "password", + email : "ogd@aoaioxxysz.net", + auth : "dXNlcm5hbWU6cGFzc3dvcmQ=", + alwaysAuth : false + } + + t.same(conf.getCredentialsByURI(URI), expected, "got credentials") + + conf.set("always-auth", original) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/config-malformed.js nodejs-0.11.15/deps/npm/test/tap/config-malformed.js --- nodejs-0.11.13/deps/npm/test/tap/config-malformed.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/config-malformed.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,14 @@ +var test = require('tap').test + +var npmconf = require("../../lib/config/core.js") +var common = require("./00-config-setup.js") + +test('with malformed', function (t) { + npmconf.load({}, common.malformed, function (er, conf) { + t.ok(er, 'Expected parse error') + if (!(er && /Failed parsing JSON config key email/.test(er.message))) { + throw er + } + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/config-meta.js nodejs-0.11.15/deps/npm/test/tap/config-meta.js --- nodejs-0.11.13/deps/npm/test/tap/config-meta.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/config-meta.js 2015-01-20 21:22:17.000000000 +0000 @@ -16,8 +16,13 @@ var DOC = {} var exceptions = [ + path.resolve(lib, "adduser.js"), path.resolve(lib, "config.js"), - path.resolve(lib, "utils", "lifecycle.js") + path.resolve(lib, "publish.js"), + path.resolve(lib, "utils", "lifecycle.js"), + path.resolve(lib, "utils", "map-to-registry.js"), + path.resolve(nm, "npm-registry-client", "lib", "publish.js"), + path.resolve(nm, "npm-registry-client", "lib", "request.js") ] test("get files", function (t) { @@ -31,7 +36,12 @@ return path.resolve(lib, f) }) files.forEach(function (f) { - if (fs.statSync(f).isDirectory()) + try { + var s = fs.statSync(f) + } catch (er) { + return + } + if (s.isDirectory()) walk(f) else if (f.match(/\.js$/)) FILES.push(f) @@ -41,16 +51,16 @@ test("get lines", function (t) { FILES.forEach(function (f) { - var lines = fs.readFileSync(f, 'utf8').split('\n') + var lines = fs.readFileSync(f, "utf8").split(/\r|\n/) lines.forEach(function (l, i) { var matches = l.split(/conf(?:ig)?\.get\(/g) matches.shift() matches.forEach(function (m) { - m = m.split(')').shift() + m = m.split(")").shift() var literal = m.match(/^['"].+['"]$/) if (literal) { m = m.slice(1, -1) - if (!m.match(/^\_/) && m !== 'argv') + if (!m.match(/^\_/) && m !== "argv") CONFS[m] = { file: f, line: i @@ -66,53 +76,51 @@ }) test("get docs", function (t) { - var d = fs.readFileSync(doc, "utf8").split("\n") + var d = fs.readFileSync(doc, "utf8").split(/\r|\n/) // walk down until the "## Config Settings" section for (var i = 0; i < d.length && d[i] !== "## Config Settings"; i++); i++ // now gather up all the ^###\s lines until the next ^##\s - var doclines = [] for (; i < d.length && !d[i].match(/^## /); i++) { if (d[i].match(/^### /)) - DOC[ d[i].replace(/^### /, '').trim() ] = true + DOC[ d[i].replace(/^### /, "").trim() ] = true } t.pass("read the docs") t.end() }) test("check configs", function (t) { - var defs = require("npmconf/config-defs.js") + var defs = require("../../lib/config/defaults.js") var types = Object.keys(defs.types) var defaults = Object.keys(defs.defaults) - - for (var c in CONFS) { - if (CONFS[c].file.indexOf(lib) === 0) { - t.ok(DOC[c], "should be documented " + c + " " - + CONFS[c].file + ":" + CONFS[c].line) - t.ok(types.indexOf(c) !== -1, "should be defined in npmconf " + c) - t.ok(defaults.indexOf(c) !== -1, "should have default in npmconf " + c) + for (var c1 in CONFS) { + if (CONFS[c1].file.indexOf(lib) === 0) { + t.ok(DOC[c1], "should be documented " + c1 + " " + + CONFS[c1].file + ":" + CONFS[c1].line) + t.ok(types.indexOf(c1) !== -1, "should be defined in npmconf " + c1) + t.ok(defaults.indexOf(c1) !== -1, "should have default in npmconf " + c1) } } - for (var c in DOC) { - if (c !== "versions" && c !== "version") { - t.ok(CONFS[c], "config in doc should be used somewhere " + c) - t.ok(types.indexOf(c) !== -1, "should be defined in npmconf " + c) - t.ok(defaults.indexOf(c) !== -1, "should have default in npmconf " + c) + for (var c2 in DOC) { + if (c2 !== "versions" && c2 !== "version" && c2 !== "init.version") { + t.ok(CONFS[c2], "config in doc should be used somewhere " + c2) + t.ok(types.indexOf(c2) !== -1, "should be defined in npmconf " + c2) + t.ok(defaults.indexOf(c2) !== -1, "should have default in npmconf " + c2) } } - types.forEach(function(c) { - if (!c.match(/^\_/) && c !== 'argv' && !c.match(/^versions?$/)) { - t.ok(DOC[c], 'defined type should be documented ' + c) - t.ok(CONFS[c], 'defined type should be used ' + c) + types.forEach(function (c) { + if (!c.match(/^\_/) && c !== "argv" && !c.match(/^versions?$/)) { + t.ok(DOC[c], "defined type should be documented " + c) + t.ok(CONFS[c], "defined type should be used " + c) } }) - defaults.forEach(function(c) { - if (!c.match(/^\_/) && c !== 'argv' && !c.match(/^versions?$/)) { - t.ok(DOC[c], 'defaulted type should be documented ' + c) - t.ok(CONFS[c], 'defaulted type should be used ' + c) + defaults.forEach(function (c) { + if (!c.match(/^\_/) && c !== "argv" && !c.match(/^versions?$/)) { + t.ok(DOC[c], "defaulted type should be documented " + c) + t.ok(CONFS[c], "defaulted type should be used " + c) } }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/config-project.js nodejs-0.11.15/deps/npm/test/tap/config-project.js --- nodejs-0.11.13/deps/npm/test/tap/config-project.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/config-project.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,66 @@ +var test = require("tap").test +var path = require("path") +var fix = path.resolve(__dirname, "..", "fixtures", "config") +var projectRc = path.resolve(fix, ".npmrc") +var npmconf = require("../../lib/config/core.js") +var common = require("./00-config-setup.js") + +var projectData = { just: "testing" } + +var ucData = common.ucData +var envData = common.envData +var envDataFix = common.envDataFix + +var gcData = { "package-config:foo": "boo" } + +var biData = {} + +var cli = { foo: "bar", umask: 022, prefix: fix } + +var expectList = [ + cli, + envDataFix, + projectData, + ucData, + gcData, + biData +] + +var expectSources = { + cli: { data: cli }, + env: { + data: envDataFix, + source: envData, + prefix: "" + }, + project: { + path: projectRc, + type: "ini", + data: projectData + }, + user: { + path: common.userconfig, + type: "ini", + data: ucData + }, + global: { + path: common.globalconfig, + type: "ini", + data: gcData + }, + builtin: { data: biData } +} + +test("no builtin", function (t) { + npmconf.load(cli, function (er, conf) { + if (er) throw er + t.same(conf.list, expectList) + t.same(conf.sources, expectSources) + t.same(npmconf.rootConf.list, []) + t.equal(npmconf.rootConf.root, npmconf.defs.defaults) + t.equal(conf.root, npmconf.defs.defaults) + t.equal(conf.get("umask"), 022) + t.equal(conf.get("heading"), "npm") + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/config-save.js nodejs-0.11.15/deps/npm/test/tap/config-save.js --- nodejs-0.11.13/deps/npm/test/tap/config-save.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/config-save.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,88 @@ +var fs = require("fs") +var ini = require("ini") +var test = require("tap").test +var npmconf = require("../../lib/config/core.js") +var common = require("./00-config-setup.js") + +var expectConf = [ + "globalconfig = " + common.globalconfig, + "email = i@izs.me", + "env-thing = asdf", + "init.author.name = Isaac Z. Schlueter", + "init.author.email = i@izs.me", + "init.author.url = http://blog.izs.me/", + "init.version = 1.2.3", + "proprietary-attribs = false", + "npm:publishtest = true", + "_npmjs.org:couch = https://admin:password@localhost:5984/registry", + "npm-www:nocache = 1", + "sign-git-tag = false", + "message = v%s", + "strict-ssl = false", + "_auth = dXNlcm5hbWU6cGFzc3dvcmQ=", + "", + "[_token]", + "AuthSession = yabba-dabba-doodle", + "version = 1", + "expires = 1345001053415", + "path = /", + "httponly = true", + "" +].join("\n") + +var expectFile = [ + "globalconfig = " + common.globalconfig, + "email = i@izs.me", + "env-thing = asdf", + "init.author.name = Isaac Z. Schlueter", + "init.author.email = i@izs.me", + "init.author.url = http://blog.izs.me/", + "init.version = 1.2.3", + "proprietary-attribs = false", + "npm:publishtest = true", + "_npmjs.org:couch = https://admin:password@localhost:5984/registry", + "npm-www:nocache = 1", + "sign-git-tag = false", + "message = v%s", + "strict-ssl = false", + "_auth = dXNlcm5hbWU6cGFzc3dvcmQ=", + "", + "[_token]", + "AuthSession = yabba-dabba-doodle", + "version = 1", + "expires = 1345001053415", + "path = /", + "httponly = true", + "" +].join("\n") + +test("saving configs", function (t) { + npmconf.load(function (er, conf) { + if (er) + throw er + conf.set("sign-git-tag", false, "user") + conf.del("nodedir") + conf.del("tmp") + var foundConf = ini.stringify(conf.sources.user.data) + t.same(ini.parse(foundConf), ini.parse(expectConf)) + fs.unlinkSync(common.userconfig) + conf.save("user", function (er) { + if (er) + throw er + var uc = fs.readFileSync(conf.get("userconfig"), "utf8") + t.same(ini.parse(uc), ini.parse(expectFile)) + t.end() + }) + }) +}) + +test("setting prefix", function (t) { + npmconf.load(function (er, conf) { + if (er) + throw er + + conf.prefix = "newvalue" + t.same(conf.prefix, "newvalue") + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/config-semver-tag.js nodejs-0.11.15/deps/npm/test/tap/config-semver-tag.js --- nodejs-0.11.13/deps/npm/test/tap/config-semver-tag.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/config-semver-tag.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ +var util = require("util") +var test = require("tap").test +var npmconf = require("../../lib/config/core.js") +var common = require("./00-config-setup.js") + +var cli = { tag: "v2.x" } + +var log = require("npmlog") + +test("tag cannot be a SemVer", function (t) { + var messages = [] + log.warn = function (m) { + messages.push(m + " " + util.format.apply(util, [].slice.call(arguments, 1))) + } + + var expect = [ + 'invalid config tag="v2.x"', + "invalid config Tag must not be a SemVer range" + ] + + npmconf.load(cli, common.builtin, function (er, conf) { + if (er) throw er + t.equal(conf.get("tag"), "latest") + t.same(messages, expect) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/dedupe.js nodejs-0.11.15/deps/npm/test/tap/dedupe.js --- nodejs-0.11.13/deps/npm/test/tap/dedupe.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/dedupe.js 2015-01-20 21:22:17.000000000 +0000 @@ -2,17 +2,26 @@ , fs = require("fs") , path = require("path") , existsSync = fs.existsSync || path.existsSync - , npm = require("../../") , rimraf = require("rimraf") , mr = require("npm-registry-mock") - , common = require('../common-tap.js') + , common = require("../common-tap.js") + +var EXEC_OPTS = {} test("dedupe finds the common module and moves it up one level", function (t) { setup(function (s) { - npm.install(".", function (err) { - if (err) return t.fail(err) - npm.dedupe(function(err) { - if (err) return t.fail(err) + common.npm( + [ + "install", ".", + "--registry", common.registry + ], + EXEC_OPTS, + function (err, code) { + t.ifError(err, "successfully installed directory") + t.equal(code, 0, "npm install exited with code") + common.npm(["dedupe"], {}, function (err, code) { + t.ifError(err, "successfully deduped against previous install") + t.notOk(code, "npm dedupe exited with code") t.ok(existsSync(path.join(__dirname, "dedupe", "node_modules", "minimist"))) t.ok(!existsSync(path.join(__dirname, "dedupe", "node_modules", "checker"))) s.close() // shutdown mock registry. @@ -25,10 +34,8 @@ function setup (cb) { process.chdir(path.join(__dirname, "dedupe")) mr(common.port, function (s) { // create mock registry. - npm.load({registry: common.registry}, function() { - rimraf.sync(path.join(__dirname, "dedupe", "node_modules")) - fs.mkdirSync(path.join(__dirname, "dedupe", "node_modules")) - cb(s) - }) + rimraf.sync(path.join(__dirname, "dedupe", "node_modules")) + fs.mkdirSync(path.join(__dirname, "dedupe", "node_modules")) + cb(s) }) } diff -Nru nodejs-0.11.13/deps/npm/test/tap/dev-dep-duplicate/desired-ls-results.json nodejs-0.11.15/deps/npm/test/tap/dev-dep-duplicate/desired-ls-results.json --- nodejs-0.11.13/deps/npm/test/tap/dev-dep-duplicate/desired-ls-results.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/dev-dep-duplicate/desired-ls-results.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "name": "dev-dep-duplicate", + "version": "0.0.0", + "dependencies": { + "underscore": { + "version": "1.5.1" + } + } +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/dev-dep-duplicate/package.json nodejs-0.11.15/deps/npm/test/tap/dev-dep-duplicate/package.json --- nodejs-0.11.13/deps/npm/test/tap/dev-dep-duplicate/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/dev-dep-duplicate/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "author": "Anders Janmyr", + "name": "dev-dep-duplicate", + "version": "0.0.0", + "dependencies": { + "underscore": "1.5.1" + }, + "devDependencies": { + "underscore": "1.3.1" + } +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/false_name.js nodejs-0.11.15/deps/npm/test/tap/false_name.js --- nodejs-0.11.13/deps/npm/test/tap/false_name.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/false_name.js 2015-01-20 21:22:17.000000000 +0000 @@ -11,41 +11,45 @@ , fs = require("fs") , path = require("path") , existsSync = fs.existsSync || path.existsSync - , spawn = require("child_process").spawn - , npm = require("../../") , rimraf = require("rimraf") , common = require("../common-tap.js") , mr = require("npm-registry-mock") - , pkg = __dirname + "/false_name" + , pkg = path.resolve(__dirname, "false_name") + , cache = path.resolve(pkg, "cache") + , nodeModules = path.resolve(pkg, "node_modules") -test("not every pkg.name can be required", function (t) { - rimraf.sync(pkg + "/cache") +var EXEC_OPTS = { cwd: pkg } - t.plan(1) +test("setup", function(t) { + cleanup() + fs.mkdirSync(nodeModules) + t.end() +}) + +test("not every pkg.name can be required", function (t) { + t.plan(3) mr(common.port, function (s) { - setup(function () { - npm.install(".", function (err) { - if (err) return t.fail(err) - s.close() - t.ok(existsSync(pkg + "/node_modules/test-package-with-one-dep/" + - "node_modules/test-package")) - }) + common.npm([ + "install", ".", + "--cache", cache, + "--registry", common.registry + ], EXEC_OPTS, function (err, code) { + s.close() + t.ifErr(err, "install finished without error") + t.equal(code, 0, "install exited ok") + t.ok(existsSync(path.resolve(pkg, + "node_modules/test-package-with-one-dep", + "node_modules/test-package"))) }) }) }) +function cleanup() { + rimraf.sync(cache) + rimraf.sync(nodeModules) +} + test("cleanup", function (t) { - rimraf.sync(pkg + "/cache") - rimraf.sync(pkg + "/node_modules") + cleanup() t.end() }) - -function setup (cb) { - process.chdir(pkg) - npm.load({cache: pkg + "/cache", registry: common.registry}, - function () { - rimraf.sync(pkg + "/node_modules") - fs.mkdirSync(pkg + "/node_modules") - cb() - }) -} diff -Nru nodejs-0.11.13/deps/npm/test/tap/git-cache-locking.js nodejs-0.11.15/deps/npm/test/tap/git-cache-locking.js --- nodejs-0.11.13/deps/npm/test/tap/git-cache-locking.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/git-cache-locking.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,8 @@ var test = require("tap").test + , common = require("../common-tap") , path = require("path") , rimraf = require("rimraf") , mkdirp = require("mkdirp") - , spawn = require("child_process").spawn - , npm = require.resolve("../../bin/npm-cli.js") - , node = process.execPath , pkg = path.resolve(__dirname, "git-cache-locking") , tmp = path.join(pkg, "tmp") , cache = path.join(pkg, "cache") @@ -12,10 +10,7 @@ test("setup", function (t) { rimraf.sync(pkg) - mkdirp.sync(pkg) - mkdirp.sync(cache) - mkdirp.sync(tmp) - mkdirp.sync(path.resolve(pkg, 'node_modules')) + mkdirp.sync(path.resolve(pkg, "node_modules")) t.end() }) @@ -26,27 +21,28 @@ // package c depends on a.git#master and b.git#master // package b depends on a.git#master - var child = spawn(node, [npm, "install", "git://github.com/nigelzor/npm-4503-c.git"], { + common.npm([ + "install", + "git://github.com/nigelzor/npm-4503-c.git" + ], { cwd: pkg, env: { - npm_config_cache: cache, - npm_config_tmp: tmp, - npm_config_prefix: pkg, - npm_config_global: "false", + "npm_config_cache": cache, + "npm_config_tmp": tmp, + "npm_config_prefix": pkg, + "npm_config_global": "false", HOME: process.env.HOME, Path: process.env.PATH, PATH: process.env.PATH - }, - stdio: "inherit" - }) - - child.on("close", function (code) { + } + }, function (err, code) { + t.ifErr(err, "npm install finished without error") t.equal(0, code, "npm install should succeed") t.end() }) }) -test('cleanup', function(t) { +test("cleanup", function(t) { rimraf.sync(pkg) t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/git-cache-no-hooks.js nodejs-0.11.15/deps/npm/test/tap/git-cache-no-hooks.js --- nodejs-0.11.13/deps/npm/test/tap/git-cache-no-hooks.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/git-cache-no-hooks.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,63 @@ +var test = require("tap").test + , fs = require("fs") + , path = require("path") + , rimraf = require("rimraf") + , mkdirp = require("mkdirp") + , spawn = require("child_process").spawn + , npmCli = require.resolve("../../bin/npm-cli.js") + , node = process.execPath + , pkg = path.resolve(__dirname, "git-cache-no-hooks") + , tmp = path.join(pkg, "tmp") + , cache = path.join(pkg, "cache") + + +test("setup", function (t) { + rimraf.sync(pkg) + mkdirp.sync(pkg) + mkdirp.sync(cache) + mkdirp.sync(tmp) + mkdirp.sync(path.resolve(pkg, "node_modules")) + t.end() +}) + +test("git-cache-no-hooks: install a git dependency", function (t) { + + // disable git integration tests on Travis. + if (process.env.TRAVIS) return t.end() + + var command = [ npmCli + , "install" + , "git://github.com/nigelzor/npm-4503-a.git" + ] + var child = spawn(node, command, { + cwd: pkg, + env: { + "npm_config_cache" : cache, + "npm_config_tmp" : tmp, + "npm_config_prefix" : pkg, + "npm_config_global" : "false", + "npm_config_umask" : "00", + HOME : process.env.HOME, + Path : process.env.PATH, + PATH : process.env.PATH + }, + stdio: "inherit" + }) + + child.on("close", function (code) { + t.equal(code, 0, "npm install should succeed") + + // verify permissions on git hooks + var repoDir = "git-github-com-nigelzor-npm-4503-a-git-40c5cb24" + var hooksPath = path.join(cache, "_git-remotes", repoDir, "hooks") + fs.readdir(hooksPath, function (err) { + t.equal(err && err.code, "ENOENT", "hooks are not brought along with repo") + t.end() + }) + }) +}) + +test("cleanup", function (t) { + rimraf.sync(pkg) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/global-prefix-set-in-userconfig.js nodejs-0.11.15/deps/npm/test/tap/global-prefix-set-in-userconfig.js --- nodejs-0.11.13/deps/npm/test/tap/global-prefix-set-in-userconfig.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/global-prefix-set-in-userconfig.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,36 @@ +var common = require("../common-tap.js") +var test = require("tap").test +var rimraf = require("rimraf") +var prefix = __filename.replace(/\.js$/, "") +var rcfile = __filename.replace(/\.js$/, ".npmrc") +var fs = require("fs") +var conf = "prefix = " + prefix + "\n" + +test("setup", function (t) { + rimraf.sync(prefix) + fs.writeFileSync(rcfile, conf) + t.pass("ready") + t.end() +}) + +test("run command", function (t) { + var args = ["prefix", "-g", "--userconfig=" + rcfile] + common.npm(args, {env: {}}, function (er, code, so) { + if (er) throw er + t.notOk(code, "npm prefix exited with code 0") + t.equal(so.trim(), prefix) + t.end() + }) +}) + +test("made dir", function (t) { + t.ok(fs.statSync(prefix).isDirectory()) + t.end() +}) + +test("cleanup", function (t) { + rimraf.sync(prefix) + rimraf.sync(rcfile) + t.pass("clean") + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/ignore-install-link.js nodejs-0.11.15/deps/npm/test/tap/ignore-install-link.js --- nodejs-0.11.13/deps/npm/test/tap/ignore-install-link.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/ignore-install-link.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,122 +1,69 @@ -var common = require('../common-tap.js') -var test = require('tap').test -var osenv = require('osenv') -var npm = require.resolve("../../bin/npm-cli.js") -var node = process.execPath -var path = require('path') -var fs = require('fs') -var rimraf = require('rimraf') -var mkdirp = require('mkdirp') -var pkg = path.resolve(__dirname, 'ignore-install-link') -var spawn = require('child_process').spawn -var linkDir = path.resolve(osenv.tmpdir(), 'npm-link-issue') - -test('ignore-install-link: ignore install if a package is linked', function(t) { - setup(function(err) { - if (err) { - t.ifError(err) - t.end() - return - } - - var p = path.resolve(pkg, 'node_modules', 'npm-link-issue') - fs.lstat(p, function(err, s) { - t.ifError(err) - - t.ok(true === s.isSymbolicLink(), 'child is a symlink') - t.end() - }) - }) -}) - -test('cleanup', function(t) { - process.chdir(osenv.tmpdir()) - rimraf.sync(pkg) - rimraf.sync(linkDir) +if (process.platform === "win32") { + console.log("ok - symlinks are weird on windows, skip this test") + return +} +var common = require("../common-tap.js") +var test = require("tap").test +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") + +var root = path.resolve(__dirname, "ignore-install-link") +var pkg = path.resolve(root, "pkg") +var dep = path.resolve(root, "dep") +var target = path.resolve(pkg, "node_modules", "dep") +var cache = path.resolve(root, "cache") +var globalPath = path.resolve(root, "global") + +var pkgj = { "name":"pkg", "version": "1.2.3" + , "dependencies": { "dep": "1.2.3" } } +var depj = { "name": "dep", "version": "1.2.3" } + +var myreg = require("http").createServer(function (q, s) { + s.statusCode = 403 + s.end(JSON.stringify({"error":"forbidden"}) + "\n") +}).listen(common.port) + +test("setup", function (t) { + rimraf.sync(root) + mkdirp.sync(root) + mkdirp.sync(path.resolve(pkg, "node_modules")) + mkdirp.sync(dep) + mkdirp.sync(cache) + mkdirp.sync(globalPath) + fs.writeFileSync(path.resolve(pkg, "package.json"), JSON.stringify(pkgj)) + fs.writeFileSync(path.resolve(dep, "package.json"), JSON.stringify(depj)) + fs.symlinkSync(dep, target, "dir") t.end() }) - -function setup(cb) { - rimraf.sync(linkDir) - mkdirp.sync(pkg) - mkdirp.sync(path.resolve(pkg, 'cache')) - mkdirp.sync(path.resolve(pkg, 'node_modules')) - mkdirp.sync(linkDir) - fs.writeFileSync(path.resolve(pkg, 'package.json'), JSON.stringify({ - author: 'Evan Lucas', - name: 'ignore-install-link', - version: '0.0.0', - description: 'Test for ignoring install when a package has been linked', - dependencies: { - 'npm-link-issue': 'git+https://github.com/lancefisher/npm-link-issue.git#0.0.1' - } - }), 'utf8') - fs.writeFileSync(path.resolve(linkDir, 'package.json'), JSON.stringify({ - author: 'lancefisher', - name: 'npm-link-issue', - version: '0.0.1', - description: 'Sample Dependency' - }), 'utf8') - - clone(cb) -} - -function clone (cb) { - var child = createChild(process.cwd(), 'git', ['--git-dir', linkDir, 'init']) - child.on('close', function(c) { - if (c !== 0) - return cb(new Error('Failed to init the git repository')) - - process.chdir(linkDir) - performLink(cb) +test("ignore install if package is linked", function (t) { + common.npm(["install"], { + cwd: pkg, + env: { + PATH: process.env.PATH || process.env.Path, + HOME: process.env.HOME, + "npm_config_prefix": globalPath, + "npm_config_cache": cache, + "npm_config_registry": common.registry, + "npm_config_loglevel": "silent" + }, + stdio: "inherit" + }, function (er, code) { + if (er) throw er + t.equal(code, 0, "npm install exited with code") + t.end() }) -} - -function performLink (cb) { - var child = createChild(linkDir, node, [npm, 'link', '.']) - child.on('close', function(c) { - if (c !== 0) - return cb(new Error('Failed to link ' + linkDir + ' globally')) - - performLink2(cb) - }) -} - -function performLink2 (cb) { - var child = createChild(pkg, node, [npm, 'link', 'npm-link-issue']) - child.on('close', function(c) { - if (c !== 0) - return cb(new Error('Failed to link ' + linkDir + ' to local node_modules')) - - performInstall(cb) - }) -} - -function performInstall (cb) { - var child = createChild(pkg, node, [npm, 'install']) - child.on('close', function(c) { - if (c !== 0) - return cb(new Error('Failed to install')) +}) - cb() - }) -} +test("still a symlink", function (t) { + t.equal(true, fs.lstatSync(target).isSymbolicLink()) + t.end() +}) -function createChild (cwd, cmd, args) { - var env = { - HOME: process.env.HOME, - Path: process.env.PATH, - PATH: process.env.PATH, - npm_config_loglevel: "silent" - } - - if (process.platform === "win32") - env.npm_config_cache = "%APPDATA%\\npm-cache" - - return spawn(cmd, args, { - cwd: cwd, - stdio: "pipe", - env: env - }) -} +test("cleanup", function (t) { + rimraf.sync(root) + myreg.close() + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/ignore-scripts.js nodejs-0.11.15/deps/npm/test/tap/ignore-scripts.js --- nodejs-0.11.13/deps/npm/test/tap/ignore-scripts.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/ignore-scripts.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,24 +1,24 @@ +var common = require("../common-tap") var test = require("tap").test -var npm = require.resolve("../../bin/npm-cli.js") - -var spawn = require("child_process").spawn -var node = process.execPath +var path = require("path") // ignore-scripts/package.json has scripts that always exit with non-zero error // codes. The "install" script is omitted so that npm tries to run node-gyp, // which should also fail. -var pkg = __dirname + "/ignore-scripts" +var pkg = path.resolve(__dirname, "ignore-scripts") -test("ignore-scripts: install using the option", function(t) { - createChild([npm, "install", "--ignore-scripts"]).on("close", function(code) { - t.equal(code, 0) +test("ignore-scripts: install using the option", function (t) { + createChild(["install", "--ignore-scripts"], function (err, code) { + t.ifError(err, "install with scripts ignored finished successfully") + t.equal(code, 0, "npm install exited with code") t.end() }) }) -test("ignore-scripts: install NOT using the option", function(t) { - createChild([npm, "install"]).on("close", function(code) { - t.notEqual(code, 0) +test("ignore-scripts: install NOT using the option", function (t) { + createChild(["install"], function (err, code) { + t.ifError(err, "install with scripts successful") + t.notEqual(code, 0, "npm install exited with code") t.end() }) }) @@ -34,39 +34,40 @@ "prerestart", "restart", "postrestart" ] -scripts.forEach(function(script) { - test("ignore-scripts: run-script "+script+" using the option", function(t) { - createChild([npm, "--ignore-scripts", "run-script", script]) - .on("close", function(code) { - t.equal(code, 0) - t.end() - }) +scripts.forEach(function (script) { + test("ignore-scripts: run-script "+script+" using the option", function (t) { + createChild(["--ignore-scripts", "run-script", script], function (err, code) { + t.ifError(err, "run-script " + script + " with ignore-scripts successful") + t.equal(code, 0, "npm run-script exited with code") + t.end() + }) }) }) -scripts.forEach(function(script) { - test("ignore-scripts: run-script "+script+" NOT using the option", function(t) { - createChild([npm, "run-script", script]).on("close", function(code) { - t.notEqual(code, 0) +scripts.forEach(function (script) { + test("ignore-scripts: run-script "+script+" NOT using the option", function (t) { + createChild(["run-script", script], function (err, code) { + t.ifError(err, "run-script " + script + " finished successfully") + t.notEqual(code, 0, "npm run-script exited with code") t.end() }) }) }) -function createChild (args) { +function createChild (args, cb) { var env = { HOME: process.env.HOME, Path: process.env.PATH, PATH: process.env.PATH, - npm_config_loglevel: "silent" + "npm_config_loglevel": "silent" } if (process.platform === "win32") env.npm_config_cache = "%APPDATA%\\npm-cache" - return spawn(node, args, { + return common.npm(args, { cwd: pkg, stdio: "inherit", env: env - }) + }, cb) } diff -Nru nodejs-0.11.13/deps/npm/test/tap/ignore-shrinkwrap.js nodejs-0.11.15/deps/npm/test/tap/ignore-shrinkwrap.js --- nodejs-0.11.13/deps/npm/test/tap/ignore-shrinkwrap.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/ignore-shrinkwrap.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,9 @@ var common = require("../common-tap.js") var test = require("tap").test -var pkg = './ignore-shrinkwrap' +var pkg = require("path").join(__dirname,"ignore-shrinkwrap") var mr = require("npm-registry-mock") -var child var spawn = require("child_process").spawn var npm = require.resolve("../../bin/npm-cli.js") var node = process.execPath @@ -18,7 +17,7 @@ test("ignore-shrinkwrap: using the option", function (t) { mr({port: common.port, mocks: customMocks}, function (s) { - s._server.on("request", function (req, res) { + s._server.on("request", function (req) { switch (req.url) { case "/shrinkwrap.js": t.fail() @@ -28,7 +27,7 @@ } }) var child = createChild(true) - child.on("close", function (m) { + child.on("close", function () { s.close() t.end() }) @@ -37,7 +36,7 @@ test("ignore-shrinkwrap: NOT using the option", function (t) { mr({port: common.port, mocks: customMocks}, function (s) { - s._server.on("request", function (req, res) { + s._server.on("request", function (req) { switch (req.url) { case "/shrinkwrap.js": t.pass("shrinkwrap used") @@ -47,7 +46,7 @@ } }) var child = createChild(false) - child.on("close", function (m) { + child.on("close", function () { s.close() t.end() }) @@ -65,13 +64,12 @@ return spawn(node, args, { cwd: pkg, env: { - npm_config_registry: common.registry, - npm_config_cache_lock_stale: 1000, - npm_config_cache_lock_wait: 1000, + "npm_config_registry": common.registry, + "npm_config_cache_lock_stale": 1000, + "npm_config_cache_lock_wait": 1000, HOME: process.env.HOME, Path: process.env.PATH, PATH: process.env.PATH } }) - } diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-at-locally.js nodejs-0.11.15/deps/npm/test/tap/install-at-locally.js --- nodejs-0.11.13/deps/npm/test/tap/install-at-locally.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-at-locally.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,42 @@ -var common = require('../common-tap.js') -var test = require('tap').test -var npm = require('../../') -var osenv = require('osenv') -var path = require('path') -var fs = require('fs') -var rimraf = require('rimraf') -var mkdirp = require('mkdirp') -var pkg = path.join(__dirname, 'install-at-locally') +var common = require("../common-tap.js") +var test = require("tap").test +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var pkg = path.join(__dirname, "install-at-locally") + +var EXEC_OPTS = { } test("setup", function (t) { mkdirp.sync(pkg) - mkdirp.sync(path.resolve(pkg, 'node_modules')) + mkdirp.sync(path.resolve(pkg, "node_modules")) process.chdir(pkg) t.end() }) -test('"npm install ./package@1.2.3" should install local pkg', function(t) { - npm.load(function() { - npm.commands.install(['./package@1.2.3'], function(err) { - var p = path.resolve(pkg, 'node_modules/install-at-locally/package.json') - t.ok(JSON.parse(fs.readFileSync(p, 'utf8'))) - t.end() - }) +test("\"npm install ./package@1.2.3\" should install local pkg", function(t) { + common.npm(["install", "./package@1.2.3"], EXEC_OPTS, function(err, code) { + var p = path.resolve(pkg, "node_modules/install-at-locally/package.json") + t.ifError(err, "install local package successful") + t.equal(code, 0, "npm install exited with code") + t.ok(JSON.parse(fs.readFileSync(p, "utf8"))) + t.end() }) }) -test('"npm install install/at/locally@./package@1.2.3" should install local pkg', function(t) { - npm.load(function() { - npm.commands.install(['./package@1.2.3'], function(err) { - var p = path.resolve(pkg, 'node_modules/install-at-locally/package.json') - t.ok(JSON.parse(fs.readFileSync(p, 'utf8'))) - t.end() - }) +test("\"npm install install/at/locally@./package@1.2.3\" should install local pkg", function(t) { + common.npm(["install", "./package@1.2.3"], EXEC_OPTS, function(err, code) { + var p = path.resolve(pkg, "node_modules/install-at-locally/package.json") + t.ifError(err, "install local package in explicit directory successful") + t.equal(code, 0, "npm install exited with code") + t.ok(JSON.parse(fs.readFileSync(p, "utf8"))) + t.end() }) }) -test('cleanup', function(t) { +test("cleanup", function(t) { process.chdir(__dirname) - rimraf.sync(path.resolve(pkg, 'node_modules')) + rimraf.sync(path.resolve(pkg, "node_modules")) t.end() }) - diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-cli-production/dependency/package.json nodejs-0.11.15/deps/npm/test/tap/install-cli-production/dependency/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-cli-production/dependency/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-cli-production/dependency/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "name": "dependency", + "description": "fixture", + "version": "0.0.0" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-cli-production/dev-dependency/package.json nodejs-0.11.15/deps/npm/test/tap/install-cli-production/dev-dependency/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-cli-production/dev-dependency/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-cli-production/dev-dependency/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "name": "dev-dependency", + "description": "fixture", + "version": "0.0.0" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-cli-production/package.json nodejs-0.11.15/deps/npm/test/tap/install-cli-production/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-cli-production/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-cli-production/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "name": "install-cli-production", + "description": "fixture", + "version": "0.0.0", + "scripts": { + "prepublish": "exit 123" + }, + "dependencies": { + "dependency": "file:./dependency" + }, + "devDependencies": { + "dev-dependency": "file:./dev-dependency" + } +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-cli-production.js nodejs-0.11.15/deps/npm/test/tap/install-cli-production.js --- nodejs-0.11.13/deps/npm/test/tap/install-cli-production.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-cli-production.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ +var common = require("../common-tap.js") +var test = require("tap").test +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var pkg = path.join(__dirname, "install-cli-production") + +var EXEC_OPTS = { + cwd: pkg +} + +test("setup", function(t) { + mkdirp.sync(pkg) + mkdirp.sync(path.resolve(pkg, "node_modules")) + process.chdir(pkg) + t.end() +}) + +test("\"npm install --production\" should install dependencies", function(t) { + common.npm(["install", "--production"], EXEC_OPTS, function(err, code) { + t.ifError(err, "install production successful") + t.equal(code, 0, "npm install exited with code") + var p = path.resolve(pkg, "node_modules/dependency/package.json") + t.ok(JSON.parse(fs.readFileSync(p, "utf8"))) + t.end() + }) +}) + +test("\"npm install --production\" should not install dev dependencies", function(t) { + common.npm(["install", "--production"], EXEC_OPTS, function(err, code) { + t.ifError(err, "install production successful") + t.equal(code, 0, "npm install exited with code") + var p = path.resolve(pkg, "node_modules/dev-dependency/package.json") + t.ok(!fs.existsSync(p), "") + t.end() + }) +}) + +test("cleanup", function(t) { + process.chdir(__dirname) + rimraf.sync(path.resolve(pkg, "node_modules")) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-cli-unicode.js nodejs-0.11.15/deps/npm/test/tap/install-cli-unicode.js --- nodejs-0.11.13/deps/npm/test/tap/install-cli-unicode.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-cli-unicode.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,23 +1,24 @@ -var common = require('../common-tap.js') -var test = require('tap').test -var npm = require('../../') -var mkdirp = require('mkdirp') -var mr = require('npm-registry-mock') -var exec = require('child_process').exec +var common = require("../common-tap.js") +var test = require("tap").test +var mr = require("npm-registry-mock") +var path = require("path") -var pkg = __dirname + '/install-cli' -var NPM_BIN = __dirname + '/../../bin/npm-cli.js' +var pkg = path.resolve(__dirname, "install-cli") function hasOnlyAscii (s) { - return /^[\000-\177]*$/.test(s) ; + return /^[\000-\177]*$/.test(s) } -test('does not use unicode with --unicode false', function (t) { - t.plan(3) +var EXEC_OPTS = { + cwd : pkg +} + +test("does not use unicode with --unicode false", function (t) { + t.plan(5) mr(common.port, function (s) { - exec('node ' + NPM_BIN + ' install --unicode false read', { - cwd: pkg - }, function(err, stdout) { + common.npm(["install", "--unicode", "false", "read"], EXEC_OPTS, function (err, code, stdout) { + t.ifError(err, "install package read without unicode success") + t.notOk(code, "npm install exited with code 0") t.ifError(err) t.ok(stdout, stdout.length) t.ok(hasOnlyAscii(stdout)) @@ -26,11 +27,11 @@ }) }) -test('cleanup', function (t) { +test("cleanup", function (t) { mr(common.port, function (s) { - exec('node ' + NPM_BIN + ' uninstall read', { - cwd: pkg - }, function(err, stdout) { + common.npm(["uninstall", "read"], EXEC_OPTS, function (err, code) { + t.ifError(err, "uninstall read package success") + t.notOk(code, "npm uninstall exited with code 0") s.close() }) }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-from-local/package-local-dependency/package.json nodejs-0.11.15/deps/npm/test/tap/install-from-local/package-local-dependency/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-from-local/package-local-dependency/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-from-local/package-local-dependency/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "name": "package-local-dependency", + "version": "0.0.0", + "description": "Test for local installs" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-from-local/package-local-dev-dependency/package.json nodejs-0.11.15/deps/npm/test/tap/install-from-local/package-local-dev-dependency/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-from-local/package-local-dev-dependency/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-from-local/package-local-dev-dependency/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "name": "package-local-dev-dependency", + "version": "0.0.0", + "description": "Test for local installs" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-from-local/package-scoped-dependency/package.json nodejs-0.11.15/deps/npm/test/tap/install-from-local/package-scoped-dependency/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-from-local/package-scoped-dependency/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-from-local/package-scoped-dependency/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "name": "@scoped/package", + "version": "0.0.0", + "description": "Test for local installs" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-from-local/package-with-local-paths/package.json nodejs-0.11.15/deps/npm/test/tap/install-from-local/package-with-local-paths/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-from-local/package-with-local-paths/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-from-local/package-with-local-paths/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "name": "package-with-local-paths", + "version": "0.0.0", + "dependencies": { + "package-local-dependency": "file:../package-local-dependency" + }, + "devDependencies": { + "package-local-dev-dependency": "file:../package-local-dev-dependency" + } +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-from-local/package-with-scoped-paths/package.json nodejs-0.11.15/deps/npm/test/tap/install-from-local/package-with-scoped-paths/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-from-local/package-with-scoped-paths/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-from-local/package-with-scoped-paths/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "name": "package-with-scoped-paths", + "version": "0.0.0", + "dependencies": { + "package-local-dependency": "file:../package-local-dependency", + "@scoped/package-scoped-dependency": "file:../package-scoped-dependency" + } +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-from-local.js nodejs-0.11.15/deps/npm/test/tap/install-from-local.js --- nodejs-0.11.13/deps/npm/test/tap/install-from-local.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-from-local.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,39 @@ +var common = require("../common-tap") +var test = require("tap").test +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var pkg = path.join(__dirname, "install-from-local", "package-with-local-paths") + +var EXEC_OPTS = { } + +test("setup", function (t) { + process.chdir(pkg) + t.end() +}) + +test('"npm install" should install local packages', function (t) { + common.npm(["install", "."], EXEC_OPTS, function (err, code) { + t.ifError(err, "error should not exist") + t.notOk(code, "npm install exited with code 0") + var dependencyPackageJson = path.resolve(pkg, "node_modules/package-local-dependency/package.json") + t.ok( + JSON.parse(fs.readFileSync(dependencyPackageJson, "utf8")), + "package with local dependency installed" + ) + + var devDependencyPackageJson = path.resolve(pkg, "node_modules/package-local-dev-dependency/package.json") + t.ok( + JSON.parse(fs.readFileSync(devDependencyPackageJson, "utf8")), + "package with local dev dependency installed" + ) + + t.end() + }) +}) + +test("cleanup", function (t) { + process.chdir(__dirname) + rimraf.sync(path.resolve(pkg, "node_modules")) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-save-exact.js nodejs-0.11.15/deps/npm/test/tap/install-save-exact.js --- nodejs-0.11.13/deps/npm/test/tap/install-save-exact.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-save-exact.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,41 @@ -var common = require('../common-tap.js') -var test = require('tap').test -var npm = require('../../') -var osenv = require('osenv') -var path = require('path') -var fs = require('fs') -var rimraf = require('rimraf') -var mkdirp = require('mkdirp') -var pkg = path.join(__dirname, 'install-save-exact') +var common = require("../common-tap.js") +var test = require("tap").test +var npm = require("../../") +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var pkg = path.join(__dirname, "install-save-exact") var mr = require("npm-registry-mock") test("setup", function (t) { mkdirp.sync(pkg) - mkdirp.sync(path.resolve(pkg, 'node_modules')) + mkdirp.sync(path.resolve(pkg, "node_modules")) process.chdir(pkg) t.end() }) -test('"npm install --save --save-exact should install local pkg', function(t) { +test("\"npm install --save --save-exact\" should install local pkg", function (t) { resetPackageJSON(pkg) mr(common.port, function (s) { npm.load({ cache: pkg + "/cache", - loglevel: 'silent', - registry: common.registry }, function(err) { + loglevel: "silent", + registry: common.registry }, function (err) { t.ifError(err) - npm.config.set('save', true) - npm.config.set('save-exact', true) - npm.commands.install(['underscore@1.3.1'], function(err) { + npm.config.set("save", true) + npm.config.set("save-exact", true) + npm.commands.install(["underscore@1.3.1"], function (err) { t.ifError(err) - var p = path.resolve(pkg, 'node_modules/underscore/package.json') + var p = path.resolve(pkg, "node_modules/underscore/package.json") t.ok(JSON.parse(fs.readFileSync(p))) - var pkgJson = JSON.parse(fs.readFileSync(pkg + '/package.json', 'utf8')) + p = path.resolve(pkg, "package.json") + var pkgJson = JSON.parse(fs.readFileSync(p, "utf8")) t.deepEqual(pkgJson.dependencies, { - 'underscore': '1.3.1' - }, 'Underscore dependency should specify exactly 1.3.1') - npm.config.set('save', undefined) - npm.config.set('save-exact', undefined) + "underscore": "1.3.1" + }, "Underscore dependency should specify exactly 1.3.1") + npm.config.set("save", undefined) + npm.config.set("save-exact", undefined) s.close() t.end() }) @@ -43,50 +43,50 @@ }) }) -test('"npm install --save-dev --save-exact should install local pkg', function(t) { +test("\"npm install --save-dev --save-exact\" should install local pkg", function (t) { resetPackageJSON(pkg) mr(common.port, function (s) { npm.load({ cache: pkg + "/cache", - loglevel: 'silent', - registry: common.registry }, function(err) { + loglevel: "silent", + registry: common.registry }, function (err) { t.ifError(err) - npm.config.set('save-dev', true) - npm.config.set('save-exact', true) - npm.commands.install(['underscore@1.3.1'], function(err) { + npm.config.set("save-dev", true) + npm.config.set("save-exact", true) + npm.commands.install(["underscore@1.3.1"], function (err) { t.ifError(err) - var p = path.resolve(pkg, 'node_modules/underscore/package.json') + var p = path.resolve(pkg, "node_modules/underscore/package.json") t.ok(JSON.parse(fs.readFileSync(p))) - var pkgJson = JSON.parse(fs.readFileSync(pkg + '/package.json', 'utf8')) + p = path.resolve(pkg, "package.json") + var pkgJson = JSON.parse(fs.readFileSync(p, "utf8")) console.log(pkgJson) t.deepEqual(pkgJson.devDependencies, { - 'underscore': '1.3.1' - }, 'underscore devDependency should specify exactly 1.3.1') + "underscore": "1.3.1" + }, "underscore devDependency should specify exactly 1.3.1") s.close() - npm.config.set('save-dev', undefined) - npm.config.set('save-exact', undefined) + npm.config.set("save-dev", undefined) + npm.config.set("save-exact", undefined) t.end() }) }) }) }) -test('cleanup', function(t) { +test("cleanup", function (t) { process.chdir(__dirname) - rimraf.sync(path.resolve(pkg, 'node_modules')) - rimraf.sync(path.resolve(pkg, 'cache')) + rimraf.sync(path.resolve(pkg, "node_modules")) + rimraf.sync(path.resolve(pkg, "cache")) resetPackageJSON(pkg) t.end() }) function resetPackageJSON(pkg) { - var pkgJson = JSON.parse(fs.readFileSync(pkg + '/package.json', 'utf8')) + var pkgJson = JSON.parse(fs.readFileSync(pkg + "/package.json", "utf8")) delete pkgJson.dependencies delete pkgJson.devDependencies delete pkgJson.optionalDependencies var json = JSON.stringify(pkgJson, null, 2) + "\n" - fs.writeFileSync(pkg + '/package.json', json, "ascii") + var p = path.resolve(pkg, "package.json") + fs.writeFileSync(p, json, "ascii") } - - diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-save-local/package/package.json nodejs-0.11.15/deps/npm/test/tap/install-save-local/package/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-save-local/package/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-save-local/package/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4 @@ +{ + "name": "package", + "version": "0.0.0" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-save-local/package-local-dependency/package.json nodejs-0.11.15/deps/npm/test/tap/install-save-local/package-local-dependency/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-save-local/package-local-dependency/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-save-local/package-local-dependency/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "name": "package-local-dependency", + "version": "0.0.0", + "description": "Test for local installs" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-save-local/package-local-dev-dependency/package.json nodejs-0.11.15/deps/npm/test/tap/install-save-local/package-local-dev-dependency/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-save-local/package-local-dev-dependency/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-save-local/package-local-dev-dependency/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "name": "package-local-dev-dependency", + "version": "0.0.0", + "description": "Test for local installs" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-save-local.js nodejs-0.11.15/deps/npm/test/tap/install-save-local.js --- nodejs-0.11.13/deps/npm/test/tap/install-save-local.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-save-local.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,65 @@ +var common = require("../common-tap.js") +var test = require("tap").test +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var pkg = path.join(__dirname, "install-save-local", "package") + +var EXEC_OPTS = { } + +test("setup", function (t) { + resetPackageJSON(pkg) + process.chdir(pkg) + t.end() +}) + +test('"npm install --save ../local/path" should install local package and save to package.json', function (t) { + resetPackageJSON(pkg) + common.npm(["install", "--save", "../package-local-dependency"], EXEC_OPTS, function (err, code) { + t.ifError(err) + t.notOk(code, "npm install exited with code 0") + + var dependencyPackageJson = path.resolve(pkg, "node_modules/package-local-dependency/package.json") + t.ok(JSON.parse(fs.readFileSync(dependencyPackageJson, "utf8"))) + + var pkgJson = JSON.parse(fs.readFileSync(pkg + "/package.json", "utf8")) + t.deepEqual(pkgJson.dependencies, { + "package-local-dependency": "file:../package-local-dependency" + }) + t.end() + }) +}) + +test('"npm install --save-dev ../local/path" should install local package and save to package.json', function (t) { + resetPackageJSON(pkg) + common.npm(["install", "--save-dev", "../package-local-dev-dependency"], EXEC_OPTS, function (err, code) { + t.ifError(err) + t.notOk(code, "npm install exited with code 0") + + var dependencyPackageJson = path.resolve(pkg, "node_modules/package-local-dev-dependency/package.json") + t.ok(JSON.parse(fs.readFileSync(dependencyPackageJson, "utf8"))) + + var pkgJson = JSON.parse(fs.readFileSync(pkg + "/package.json", "utf8")) + t.deepEqual(pkgJson.devDependencies, { + "package-local-dev-dependency": "file:../package-local-dev-dependency" + }) + + t.end() + }) +}) + + +test("cleanup", function (t) { + resetPackageJSON(pkg) + process.chdir(__dirname) + rimraf.sync(path.resolve(pkg, "node_modules")) + t.end() +}) + +function resetPackageJSON(pkg) { + var pkgJson = JSON.parse(fs.readFileSync(pkg + "/package.json", "utf8")) + delete pkgJson.dependencies + delete pkgJson.devDependencies + var json = JSON.stringify(pkgJson, null, 2) + "\n" + fs.writeFileSync(pkg + "/package.json", json, "ascii") +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-save-prefix.js nodejs-0.11.15/deps/npm/test/tap/install-save-prefix.js --- nodejs-0.11.13/deps/npm/test/tap/install-save-prefix.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-save-prefix.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,39 @@ -var common = require('../common-tap.js') -var test = require('tap').test -var npm = require('../../') -var osenv = require('osenv') -var path = require('path') -var fs = require('fs') -var rimraf = require('rimraf') -var mkdirp = require('mkdirp') -var pkg = path.join(__dirname, 'install-save-prefix') +var common = require("../common-tap.js") +var test = require("tap").test +var npm = require("../../") +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var pkg = path.join(__dirname, "install-save-prefix") var mr = require("npm-registry-mock") test("setup", function (t) { mkdirp.sync(pkg) - mkdirp.sync(path.resolve(pkg, 'node_modules')) + mkdirp.sync(path.resolve(pkg, "node_modules")) process.chdir(pkg) t.end() }) -test('"npm install --save with default save-prefix should install local pkg versioned to allow minor updates', function(t) { +test("npm install --save with default save-prefix should install local pkg versioned to allow minor updates", function (t) { resetPackageJSON(pkg) mr(common.port, function (s) { npm.load({ cache: pkg + "/cache", - loglevel: 'silent', - registry: common.registry }, function(err) { + loglevel: "silent", + "save-prefix": "^", + registry: common.registry }, function (err) { t.ifError(err) - npm.config.set('save', true) - npm.commands.install(['underscore@latest'], function(err) { + npm.config.set("save", true) + npm.commands.install(["underscore@latest"], function (err) { t.ifError(err) - var p = path.resolve(pkg, 'node_modules/underscore/package.json') + var p = path.resolve(pkg, "node_modules/underscore/package.json") t.ok(JSON.parse(fs.readFileSync(p))) - var pkgJson = JSON.parse(fs.readFileSync(pkg + '/package.json', 'utf8')) + var pkgJson = JSON.parse(fs.readFileSync(pkg + "/package.json", "utf8")) t.deepEqual(pkgJson.dependencies, { - 'underscore': '^1.5.1' - }, 'Underscore dependency should specify ^1.3.1') - npm.config.set('save', undefined) + "underscore": "^1.5.1" + }, "Underscore dependency should specify ^1.5.1") + npm.config.set("save", undefined) s.close() t.end() }) @@ -41,24 +41,25 @@ }) }) -test('"npm install --save-dev with default save-prefix should install local pkg to dev dependencies versioned to allow minor updates', function(t) { +test("npm install --save-dev with default save-prefix should install local pkg to dev dependencies versioned to allow minor updates", function (t) { resetPackageJSON(pkg) mr(common.port, function (s) { npm.load({ cache: pkg + "/cache", - loglevel: 'silent', - registry: common.registry }, function(err) { + loglevel: "silent", + "save-prefix": "^", + registry: common.registry }, function (err) { t.ifError(err) - npm.config.set('save-dev', true) - npm.commands.install(['underscore@1.3.1'], function(err) { + npm.config.set("save-dev", true) + npm.commands.install(["underscore@1.3.1"], function (err) { t.ifError(err) - var p = path.resolve(pkg, 'node_modules/underscore/package.json') + var p = path.resolve(pkg, "node_modules/underscore/package.json") t.ok(JSON.parse(fs.readFileSync(p))) - var pkgJson = JSON.parse(fs.readFileSync(pkg + '/package.json', 'utf8')) + var pkgJson = JSON.parse(fs.readFileSync(pkg + "/package.json", "utf8")) t.deepEqual(pkgJson.devDependencies, { - 'underscore': '^1.3.1' - }, 'Underscore devDependency should specify ^1.3.1') - npm.config.set('save-dev', undefined) + "underscore": "^1.3.1" + }, "Underscore devDependency should specify ^1.3.1") + npm.config.set("save-dev", undefined) s.close() t.end() }) @@ -66,26 +67,26 @@ }) }) -test('"npm install --save with "~" save-prefix should install local pkg versioned to allow patch updates', function(t) { +test("npm install --save with \"~\" save-prefix should install local pkg versioned to allow patch updates", function (t) { resetPackageJSON(pkg) mr(common.port, function (s) { npm.load({ cache: pkg + "/cache", - loglevel: 'silent', - registry: common.registry }, function(err) { + loglevel: "silent", + registry: common.registry }, function (err) { t.ifError(err) - npm.config.set('save', true) - npm.config.set('save-prefix', '~') - npm.commands.install(['underscore@1.3.1'], function(err) { + npm.config.set("save", true) + npm.config.set("save-prefix", "~") + npm.commands.install(["underscore@1.3.1"], function (err) { t.ifError(err) - var p = path.resolve(pkg, 'node_modules/underscore/package.json') + var p = path.resolve(pkg, "node_modules/underscore/package.json") t.ok(JSON.parse(fs.readFileSync(p))) - var pkgJson = JSON.parse(fs.readFileSync(pkg + '/package.json', 'utf8')) + var pkgJson = JSON.parse(fs.readFileSync(pkg + "/package.json", "utf8")) t.deepEqual(pkgJson.dependencies, { - 'underscore': '~1.3.1' - }, 'Underscore dependency should specify ~1.3.1') - npm.config.set('save', undefined) - npm.config.set('save-prefix', undefined) + "underscore": "~1.3.1" + }, "Underscore dependency should specify ~1.3.1") + npm.config.set("save", undefined) + npm.config.set("save-prefix", undefined) s.close() t.end() }) @@ -93,26 +94,26 @@ }) }) -test('"npm install --save-dev with "~" save-prefix should install local pkg to dev dependencies versioned to allow patch updates', function(t) { +test("npm install --save-dev with \"~\" save-prefix should install local pkg to dev dependencies versioned to allow patch updates", function (t) { resetPackageJSON(pkg) mr(common.port, function (s) { npm.load({ cache: pkg + "/cache", - loglevel: 'silent', - registry: common.registry }, function(err) { + loglevel: "silent", + registry: common.registry }, function (err) { t.ifError(err) - npm.config.set('save-dev', true) - npm.config.set('save-prefix', '~') - npm.commands.install(['underscore@1.3.1'], function(err) { + npm.config.set("save-dev", true) + npm.config.set("save-prefix", "~") + npm.commands.install(["underscore@1.3.1"], function (err) { t.ifError(err) - var p = path.resolve(pkg, 'node_modules/underscore/package.json') + var p = path.resolve(pkg, "node_modules/underscore/package.json") t.ok(JSON.parse(fs.readFileSync(p))) - var pkgJson = JSON.parse(fs.readFileSync(pkg + '/package.json', 'utf8')) + var pkgJson = JSON.parse(fs.readFileSync(pkg + "/package.json", "utf8")) t.deepEqual(pkgJson.devDependencies, { - 'underscore': '~1.3.1' - }, 'Underscore devDependency should specify ~1.3.1') - npm.config.set('save-dev', undefined) - npm.config.set('save-prefix', undefined) + "underscore": "~1.3.1" + }, "Underscore devDependency should specify ~1.3.1") + npm.config.set("save-dev", undefined) + npm.config.set("save-prefix", undefined) s.close() t.end() }) @@ -120,21 +121,19 @@ }) }) -test('cleanup', function(t) { +test("cleanup", function (t) { process.chdir(__dirname) - rimraf.sync(path.resolve(pkg, 'node_modules')) - rimraf.sync(path.resolve(pkg, 'cache')) + rimraf.sync(path.resolve(pkg, "node_modules")) + rimraf.sync(path.resolve(pkg, "cache")) resetPackageJSON(pkg) t.end() }) function resetPackageJSON(pkg) { - var pkgJson = JSON.parse(fs.readFileSync(pkg + '/package.json', 'utf8')) + var pkgJson = JSON.parse(fs.readFileSync(pkg + "/package.json", "utf8")) delete pkgJson.dependencies delete pkgJson.devDependencies delete pkgJson.optionalDependencies var json = JSON.stringify(pkgJson, null, 2) + "\n" - fs.writeFileSync(pkg + '/package.json', json, "ascii") + fs.writeFileSync(pkg + "/package.json", json, "ascii") } - - diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-scoped/package.json nodejs-0.11.15/deps/npm/test/tap/install-scoped/package.json --- nodejs-0.11.13/deps/npm/test/tap/install-scoped/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-scoped/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "name": "@scoped/package", + "version": "0.0.0", + "bin": { + "hello": "./world.js" + } +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-scoped/world.js nodejs-0.11.15/deps/npm/test/tap/install-scoped/world.js --- nodejs-0.11.13/deps/npm/test/tap/install-scoped/world.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-scoped/world.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +console.log("hello blrbld") diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-scoped-already-installed.js nodejs-0.11.15/deps/npm/test/tap/install-scoped-already-installed.js --- nodejs-0.11.13/deps/npm/test/tap/install-scoped-already-installed.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-scoped-already-installed.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,86 @@ +var common = require("../common-tap") +var existsSync = require("fs").existsSync +var join = require("path").join + +var test = require("tap").test +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") + +var pkg = join(__dirname, "install-from-local", "package-with-scoped-paths") +var modules = join(pkg, "node_modules") + +var EXEC_OPTS = { + cwd : pkg +} + +test("setup", function (t) { + rimraf.sync(modules) + rimraf.sync(join(pkg, "cache")) + process.chdir(pkg) + mkdirp.sync(modules) + t.end() +}) + +test("installing already installed local scoped package", function (t) { + common.npm(["install", "--loglevel", "silent"], EXEC_OPTS, function (err, code, stdout) { + var installed = parseNpmInstallOutput(stdout) + t.ifError(err, "error should not exist") + t.notOk(code, "npm install exited with code 0") + t.ifError(err, "install ran to completion without error") + t.ok( + existsSync(join(modules, "@scoped", "package", "package.json")), + "package installed" + ) + t.ok( + contains(installed, "node_modules/@scoped/package"), + "installed @scoped/package" + ) + t.ok( + contains(installed, "node_modules/package-local-dependency"), + "installed package-local-dependency" + ) + + common.npm(["install", "--loglevel", "silent"], EXEC_OPTS, function (err, code, stdout) { + installed = parseNpmInstallOutput(stdout) + t.ifError(err, "error should not exist") + t.notOk(code, "npm install exited with code 0") + + t.ifError(err, "install ran to completion without error") + + t.ok( + existsSync(join(modules, "@scoped", "package", "package.json")), + "package installed" + ) + + t.notOk( + contains(installed, "node_modules/@scoped/package"), + "did not reinstall @scoped/package" + ) + t.notOk( + contains(installed, "node_modules/package-local-dependency"), + "did not reinstall package-local-dependency" + ) + t.end() + }) + }) +}) + +test("cleanup", function (t) { + process.chdir(__dirname) + rimraf.sync(join(modules)) + rimraf.sync(join(pkg, "cache")) + t.end() +}) + +function contains(list, element) { + for (var i=0; i < list.length; ++i) { + if (list[i] === element) { + return true + } + } + return false +} + +function parseNpmInstallOutput(stdout) { + return stdout.trim().split(/\n\n|\s+/) +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-scoped-link.js nodejs-0.11.15/deps/npm/test/tap/install-scoped-link.js --- nodejs-0.11.13/deps/npm/test/tap/install-scoped-link.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-scoped-link.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,51 @@ +var common = require("../common-tap.js") +var existsSync = require("fs").existsSync +var join = require("path").join +var exec = require("child_process").exec + +var test = require("tap").test +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") + +var pkg = join(__dirname, "install-scoped") +var work = join(__dirname, "install-scoped-TEST") +var modules = join(work, "node_modules") + +var EXEC_OPTS = {} + +test("setup", function (t) { + mkdirp.sync(modules) + process.chdir(work) + + t.end() +}) + +test("installing package with links", function (t) { + common.npm(["install", pkg], EXEC_OPTS, function (err, code) { + t.ifError(err, "install ran to completion without error") + t.notOk(code, "npm install exited with code 0") + + t.ok( + existsSync(join(modules, "@scoped", "package", "package.json")), + "package installed" + ) + t.ok(existsSync(join(modules, ".bin")), "binary link directory exists") + + var hello = join(modules, ".bin", "hello") + t.ok(existsSync(hello), "binary link exists") + + exec("node " + hello, function (err, stdout, stderr) { + t.ifError(err, "command ran fine") + t.notOk(stderr, "got no error output back") + t.equal(stdout, "hello blrbld\n", "output was as expected") + + t.end() + }) + }) +}) + +test("cleanup", function (t) { + process.chdir(__dirname) + rimraf.sync(work) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/install-with-dev-dep-duplicate.js nodejs-0.11.15/deps/npm/test/tap/install-with-dev-dep-duplicate.js --- nodejs-0.11.13/deps/npm/test/tap/install-with-dev-dep-duplicate.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/install-with-dev-dep-duplicate.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,57 @@ +var npm = npm = require("../../") +var test = require("tap").test +var path = require("path") +var fs = require("fs") +var osenv = require("osenv") +var rimraf = require("rimraf") +var mr = require("npm-registry-mock") +var common = require("../common-tap.js") + +var pkg = path.resolve(__dirname, "dev-dep-duplicate") +var desiredResultsPath = path.resolve(pkg, "desired-ls-results.json") + +test("prefers version from dependencies over devDependencies", function (t) { + t.plan(1) + + mr(common.port, function (s) { + setup(function (err) { + if (err) return t.fail(err) + + npm.install(".", function (err) { + if (err) return t.fail(err) + + npm.commands.ls([], true, function (err, _, results) { + if (err) return t.fail(err) + + fs.readFile(desiredResultsPath, function (err, desired) { + if (err) return t.fail(err) + + t.deepEqual(results, JSON.parse(desired)) + s.close() + t.end() + }) + }) + }) + }) + }) +}) + +test("cleanup", function (t) { + cleanup() + t.end() +}) + + +function setup (cb) { + cleanup() + process.chdir(pkg) + + var opts = { cache: path.resolve(pkg, "cache"), registry: common.registry} + npm.load(opts, cb) +} + +function cleanup () { + process.chdir(osenv.tmpdir()) + rimraf.sync(path.resolve(pkg, "node_modules")) + rimraf.sync(path.resolve(pkg, "cache")) +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/invalid-cmd-exit-code.js nodejs-0.11.15/deps/npm/test/tap/invalid-cmd-exit-code.js --- nodejs-0.11.13/deps/npm/test/tap/invalid-cmd-exit-code.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/invalid-cmd-exit-code.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,9 @@ var test = require("tap").test -var node = process.execPath var common = require("../common-tap.js") var opts = { cwd: process.cwd() } -test("npm asdf should return exit code 1", function(t) { +test("npm asdf should return exit code 1", function (t) { common.npm(["asdf"], opts, function (er, c) { if (er) throw er t.ok(c, "exit code should not be zero") @@ -12,7 +11,7 @@ }) }) -test("npm help should return exit code 0", function(t) { +test("npm help should return exit code 0", function (t) { common.npm(["help"], opts, function (er, c) { if (er) throw er t.equal(c, 0, "exit code should be 0") @@ -20,7 +19,7 @@ }) }) -test("npm help fadf should return exit code 0", function(t) { +test("npm help fadf should return exit code 0", function (t) { common.npm(["help", "fadf"], opts, function (er, c) { if (er) throw er t.equal(c, 0, "exit code should be 0") diff -Nru nodejs-0.11.13/deps/npm/test/tap/lifecycle.js nodejs-0.11.15/deps/npm/test/tap/lifecycle.js --- nodejs-0.11.13/deps/npm/test/tap/lifecycle.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/lifecycle.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,12 +1,12 @@ var test = require("tap").test -var npm = require('../../') -var lifecycle = require('../../lib/utils/lifecycle') +var npm = require("../../") +var lifecycle = require("../../lib/utils/lifecycle") test("lifecycle: make env correctly", function (t) { - npm.load({enteente: Infinity}, function() { + npm.load({enteente: Infinity}, function () { var env = lifecycle.makeEnv({}, null, process.env) - t.equal('Infinity', env.npm_config_enteente) + t.equal("Infinity", env.npm_config_enteente) t.end() }) }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/lifecycle-path/package.json nodejs-0.11.15/deps/npm/test/tap/lifecycle-path/package.json --- nodejs-0.11.13/deps/npm/test/tap/lifecycle-path/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/lifecycle-path/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +{"name":"glorb","version":"1.2.3","scripts":{"path":"./node-bin/node print-path.js"}} diff -Nru nodejs-0.11.13/deps/npm/test/tap/lifecycle-path/print-path.js nodejs-0.11.15/deps/npm/test/tap/lifecycle-path/print-path.js --- nodejs-0.11.13/deps/npm/test/tap/lifecycle-path/print-path.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/lifecycle-path/print-path.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +console.log(process.env.PATH) diff -Nru nodejs-0.11.13/deps/npm/test/tap/lifecycle-path.js nodejs-0.11.15/deps/npm/test/tap/lifecycle-path.js --- nodejs-0.11.13/deps/npm/test/tap/lifecycle-path.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/lifecycle-path.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,59 @@ +var test = require("tap").test +var common = require("../common-tap.js") +var path = require("path") +var rimraf = require("rimraf") +var pkg = path.resolve(__dirname, "lifecycle-path") +var fs = require("fs") +var link = path.resolve(pkg, "node-bin") + +// Without the path to the shell, nothing works usually. +var PATH +if (process.platform === "win32") { + PATH = "C:\\Windows\\system32;C:\\Windows" +} else { + PATH = "/bin:/usr/bin" +} + +test("setup", function (t) { + rimraf.sync(link) + fs.symlinkSync(path.dirname(process.execPath), link, "dir") + t.end() +}) + +test("make sure the path is correct", function (t) { + common.npm(["run-script", "path"], { + cwd: pkg, + env: { + PATH: PATH, + stdio: [ 0, "pipe", 2 ] + } + }, function (er, code, stdout) { + if (er) throw er + t.equal(code, 0, "exit code") + // remove the banner, we just care about the last line + stdout = stdout.trim().split(/\r|\n/).pop() + var pathSplit = process.platform === "win32" ? ";" : ":" + var root = path.resolve(__dirname, "../..") + var actual = stdout.split(pathSplit).map(function (p) { + if (p.indexOf(root) === 0) { + p = "{{ROOT}}" + p.substr(root.length) + } + return p.replace(/\\/g, "/") + }) + + // get the ones we tacked on, then the system-specific requirements + var expect = [ + "{{ROOT}}/bin/node-gyp-bin", + "{{ROOT}}/test/tap/lifecycle-path/node_modules/.bin" + ].concat(PATH.split(pathSplit).map(function (p) { + return p.replace(/\\/g, "/") + })) + t.same(actual, expect) + t.end() + }) +}) + +test("clean", function (t) { + rimraf.sync(link) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/locker.js nodejs-0.11.15/deps/npm/test/tap/locker.js --- nodejs-0.11.13/deps/npm/test/tap/locker.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/locker.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,89 @@ +var test = require("tap").test + , path = require("path") + , fs = require("graceful-fs") + , crypto = require("crypto") + , rimraf = require("rimraf") + , osenv = require("osenv") + , mkdirp = require("mkdirp") + , npm = require("../../") + , locker = require("../../lib/utils/locker.js") + , lock = locker.lock + , unlock = locker.unlock + +var pkg = path.join(__dirname, "/locker") + , cache = path.join(pkg, "/cache") + , tmp = path.join(pkg, "/tmp") + , nm = path.join(pkg, "/node_modules") + +function cleanup () { + process.chdir(osenv.tmpdir()) + rimraf.sync(pkg) +} + +test("setup", function (t) { + cleanup() + mkdirp.sync(cache) + mkdirp.sync(tmp) + t.end() +}) + +test("locking file puts lock in correct place", function (t) { + npm.load({cache: cache, tmpdir: tmp}, function (er) { + t.ifError(er, "npm bootstrapped OK") + + var n = "correct" + , c = n.replace(/[^a-zA-Z0-9]+/g, "-").replace(/^-+|-+$/g, "") + , p = path.resolve(nm, n) + , h = crypto.createHash("sha1").update(p).digest("hex") + , l = c.substr(0, 24)+"-"+h.substr(0, 16)+".lock" + , v = path.join(cache, "_locks", l) + + lock(nm, n, function (er) { + t.ifError(er, "locked path") + + fs.exists(v, function (found) { + t.ok(found, "lock found OK") + + unlock(nm, n, function (er) { + t.ifError(er, "unlocked path") + + fs.exists(v, function (found) { + t.notOk(found, "lock deleted OK") + t.end() + }) + }) + }) + }) + }) +}) + +test("unlocking out of order errors out", function (t) { + npm.load({cache: cache, tmpdir: tmp}, function (er) { + t.ifError(er, "npm bootstrapped OK") + + var n = "busted" + , c = n.replace(/[^a-zA-Z0-9]+/g, "-").replace(/^-+|-+$/g, "") + , p = path.resolve(nm, n) + , h = crypto.createHash("sha1").update(p).digest("hex") + , l = c.substr(0, 24)+"-"+h.substr(0, 16)+".lock" + , v = path.join(cache, "_locks", l) + + fs.exists(v, function (found) { + t.notOk(found, "no lock to unlock") + + t.throws(function () { + unlock(nm, n, function () { + t.fail("shouldn't get here") + t.end() + }) + }, "blew up as expected") + + t.end() + }) + }) +}) + +test("cleanup", function (t) { + cleanup() + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/login-always-auth.js nodejs-0.11.15/deps/npm/test/tap/login-always-auth.js --- nodejs-0.11.13/deps/npm/test/tap/login-always-auth.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/login-always-auth.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,142 @@ +var fs = require("fs") +var path = require("path") +var rimraf = require("rimraf") +var mr = require("npm-registry-mock") + +var test = require("tap").test +var common = require("../common-tap.js") + +var opts = {cwd : __dirname} +var outfile = path.resolve(__dirname, "_npmrc") +var responses = { + "Username" : "u\n", + "Password" : "p\n", + "Email" : "u@p.me\n" +} + +function mocks(server) { + server.filteringRequestBody(function (r) { + if (r.match(/\"_id\":\"org\.couchdb\.user:u\"/)) { + return "auth" + } + }) + server.put("/-/user/org.couchdb.user:u", "auth") + .reply(201, {username : "u", password : "p", email : "u@p.me"}) +} + +test("npm login", function (t) { + mr({port : common.port, mocks : mocks}, function (s) { + var runner = common.npm( + [ + "login", + "--registry", common.registry, + "--loglevel", "silent", + "--userconfig", outfile + ], + opts, + function (err, code) { + t.notOk(code, "exited OK") + t.notOk(err, "no error output") + var config = fs.readFileSync(outfile, "utf8") + t.like(config, /:always-auth=false/, "always-auth is scoped and false (by default)") + s.close() + rimraf(outfile, function (err) { + t.ifError(err, "removed config file OK") + t.end() + }) + }) + + var o = "", e = "", remaining = Object.keys(responses).length + runner.stdout.on("data", function (chunk) { + remaining-- + o += chunk + + var label = chunk.toString("utf8").split(":")[0] + runner.stdin.write(responses[label]) + + if (remaining === 0) runner.stdin.end() + }) + runner.stderr.on("data", function (chunk) { e += chunk }) + }) +}) + +test("npm login --always-auth", function (t) { + mr({port : common.port, mocks : mocks}, function (s) { + var runner = common.npm( + [ + "login", + "--registry", common.registry, + "--loglevel", "silent", + "--userconfig", outfile, + "--always-auth" + ], + opts, + function (err, code) { + t.notOk(code, "exited OK") + t.notOk(err, "no error output") + var config = fs.readFileSync(outfile, "utf8") + t.like(config, /:always-auth=true/, "always-auth is scoped and true") + s.close() + rimraf(outfile, function (err) { + t.ifError(err, "removed config file OK") + t.end() + }) + }) + + var o = "", e = "", remaining = Object.keys(responses).length + runner.stdout.on("data", function (chunk) { + remaining-- + o += chunk + + var label = chunk.toString("utf8").split(":")[0] + runner.stdin.write(responses[label]) + + if (remaining === 0) runner.stdin.end() + }) + runner.stderr.on("data", function (chunk) { e += chunk }) + }) +}) + +test("npm login --no-always-auth", function (t) { + mr({port : common.port, mocks : mocks}, function (s) { + var runner = common.npm( + [ + "login", + "--registry", common.registry, + "--loglevel", "silent", + "--userconfig", outfile, + "--no-always-auth" + ], + opts, + function (err, code) { + t.notOk(code, "exited OK") + t.notOk(err, "no error output") + var config = fs.readFileSync(outfile, "utf8") + t.like(config, /:always-auth=false/, "always-auth is scoped and false") + s.close() + rimraf(outfile, function (err) { + t.ifError(err, "removed config file OK") + t.end() + }) + }) + + var o = "", e = "", remaining = Object.keys(responses).length + runner.stdout.on("data", function (chunk) { + remaining-- + o += chunk + + var label = chunk.toString("utf8").split(":")[0] + runner.stdin.write(responses[label]) + + if (remaining === 0) runner.stdin.end() + }) + runner.stderr.on("data", function (chunk) { e += chunk }) + }) +}) + + +test("cleanup", function (t) { + rimraf.sync(outfile) + t.pass("cleaned up") + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/ls-depth-cli.js nodejs-0.11.15/deps/npm/test/tap/ls-depth-cli.js --- nodejs-0.11.13/deps/npm/test/tap/ls-depth-cli.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/ls-depth-cli.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,27 @@ -var common = require('../common-tap') - , test = require('tap').test - , path = require('path') - , rimraf = require('rimraf') - , osenv = require('osenv') - , mkdirp = require('mkdirp') - , pkg = __dirname + '/ls-depth' - , cache = pkg + '/cache' - , tmp = pkg + '/tmp' - , node = process.execPath - , npm = path.resolve(__dirname, '../../cli.js') - , mr = require('npm-registry-mock') +var common = require("../common-tap") + , test = require("tap").test + , path = require("path") + , rimraf = require("rimraf") + , osenv = require("osenv") + , mkdirp = require("mkdirp") + , pkg = path.resolve(__dirname, "ls-depth") + , mr = require("npm-registry-mock") , opts = {cwd: pkg} function cleanup () { process.chdir(osenv.tmpdir()) - rimraf.sync(pkg + '/cache') - rimraf.sync(pkg + '/tmp') - rimraf.sync(pkg + '/node_modules') + rimraf.sync(pkg + "/cache") + rimraf.sync(pkg + "/tmp") + rimraf.sync(pkg + "/node_modules") } -test('setup', function (t) { +test("setup", function (t) { cleanup() - mkdirp.sync(pkg + '/cache') - mkdirp.sync(pkg + '/tmp') + mkdirp.sync(pkg + "/cache") + mkdirp.sync(pkg + "/tmp") mr(common.port, function (s) { - var cmd = ['install', '--registry=' + common.registry] + var cmd = ["install", "--registry=" + common.registry] common.npm(cmd, opts, function (er, c) { if (er) throw er t.equal(c, 0) @@ -35,8 +31,8 @@ }) }) -test('npm ls --depth=0', function (t) { - common.npm(['ls', '--depth=0'], opts, function (er, c, out) { +test("npm ls --depth=0", function (t) { + common.npm(["ls", "--depth=0"], opts, function (er, c, out) { if (er) throw er t.equal(c, 0) t.has(out, /test-package-with-one-dep@0\.0\.0/ @@ -47,8 +43,8 @@ }) }) -test('npm ls --depth=1', function (t) { - common.npm(['ls', '--depth=1'], opts, function (er, c, out) { +test("npm ls --depth=1", function (t) { + common.npm(["ls", "--depth=1"], opts, function (er, c, out) { if (er) throw er t.equal(c, 0) t.has(out, /test-package-with-one-dep@0\.0\.0/ @@ -59,10 +55,10 @@ }) }) -test('npm ls --depth=Infinity', function (t) { +test("npm ls --depth=Infinity", function (t) { // travis has a preconfigured depth=0, in general we can not depend // on the default value in all environments, so explictly set it here - common.npm(['ls', '--depth=Infinity'], opts, function (er, c, out) { + common.npm(["ls", "--depth=Infinity"], opts, function (er, c, out) { if (er) throw er t.equal(c, 0) t.has(out, /test-package-with-one-dep@0\.0\.0/ @@ -73,7 +69,7 @@ }) }) -test('cleanup', function (t) { +test("cleanup", function (t) { cleanup() t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/ls-depth-unmet/package.json nodejs-0.11.15/deps/npm/test/tap/ls-depth-unmet/package.json --- nodejs-0.11.13/deps/npm/test/tap/ls-depth-unmet/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/ls-depth-unmet/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "name": "ls-depth-umnet", + "author": "Evan You", + "version": "0.0.0", + "dependencies": { + "test-package-with-one-dep": "0.0.0", + "underscore": "1.5.1", + "optimist": "0.6.0" + } +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/test/tap/ls-depth-unmet.js nodejs-0.11.15/deps/npm/test/tap/ls-depth-unmet.js --- nodejs-0.11.13/deps/npm/test/tap/ls-depth-unmet.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/ls-depth-unmet.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,96 @@ +var common = require("../common-tap") + , test = require("tap").test + , path = require("path") + , rimraf = require("rimraf") + , osenv = require("osenv") + , mkdirp = require("mkdirp") + , pkg = path.resolve(__dirname, "ls-depth-unmet") + , mr = require("npm-registry-mock") + , opts = {cwd: pkg} + , cache = path.resolve(pkg, "cache") + , tmp = path.resolve(pkg, "tmp") + , nodeModules = path.resolve(pkg, "node_modules") + + +function cleanup () { + process.chdir(osenv.tmpdir()) + rimraf.sync(cache) + rimraf.sync(tmp) + rimraf.sync(nodeModules) +} + +test("setup", function (t) { + cleanup() + mkdirp.sync(cache) + mkdirp.sync(tmp) + mr(common.port, function (s) { + var cmd = ["install", "underscore@1.3.1", "mkdirp", "test-package-with-one-dep", "--registry=" + common.registry] + common.npm(cmd, opts, function (er, c) { + if (er) throw er + t.equal(c, 0) + s.close() + t.end() + }) + }) +}) + +test("npm ls --depth=0", function (t) { + common.npm(["ls", "--depth=0"], opts, function (er, c, out) { + if (er) throw er + t.equal(c, 1) + t.has(out, /UNMET DEPENDENCY optimist@0\.6\.0/ + , "output contains optimist@0.6.0 and labeled as unmet dependency") + t.has(out, /mkdirp@0\.3\.5 extraneous/ + , "output contains mkdirp@0.3.5 and labeled as extraneous") + t.has(out, /underscore@1\.3\.1 invalid/ + , "output contains underscore@1.3.1 and labeled as invalid") + t.has(out, /test-package-with-one-dep@0\.0\.0\n/ + , "output contains test-package-with-one-dep@0.0.0 and has no labels") + t.doesNotHave(out, /test-package@0\.0\.0/ + , "output does not contain test-package@0.0.0 which is at depth=1") + t.end() + }) +}) + +test("npm ls --depth=1", function (t) { + common.npm(["ls", "--depth=1"], opts, function (er, c, out) { + if (er) throw er + t.equal(c, 1) + t.has(out, /UNMET DEPENDENCY optimist@0\.6\.0/ + , "output contains optimist@0.6.0 and labeled as unmet dependency") + t.has(out, /mkdirp@0\.3\.5 extraneous/ + , "output contains mkdirp@0.3.5 and labeled as extraneous") + t.has(out, /underscore@1\.3\.1 invalid/ + , "output contains underscore@1.3.1 and labeled as invalid") + t.has(out, /test-package-with-one-dep@0\.0\.0\n/ + , "output contains test-package-with-one-dep@0.0.0 and has no labels") + t.has(out, /test-package@0\.0\.0/ + , "output contains test-package@0.0.0 which is at depth=1") + t.end() + }) +}) + +test("npm ls --depth=Infinity", function (t) { + // travis has a preconfigured depth=0, in general we can not depend + // on the default value in all environments, so explictly set it here + common.npm(["ls", "--depth=Infinity"], opts, function (er, c, out) { + if (er) throw er + t.equal(c, 1) + t.has(out, /UNMET DEPENDENCY optimist@0\.6\.0/ + , "output contains optimist@0.6.0 and labeled as unmet dependency") + t.has(out, /mkdirp@0\.3\.5 extraneous/ + , "output contains mkdirp@0.3.5 and labeled as extraneous") + t.has(out, /underscore@1\.3\.1 invalid/ + , "output contains underscore@1.3.1 and labeled as invalid") + t.has(out, /test-package-with-one-dep@0\.0\.0\n/ + , "output contains test-package-with-one-dep@0.0.0 and has no labels") + t.has(out, /test-package@0\.0\.0/ + , "output contains test-package@0.0.0 which is at depth=1") + t.end() + }) +}) + +test("cleanup", function (t) { + cleanup() + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/ls-no-results.js nodejs-0.11.15/deps/npm/test/tap/ls-no-results.js --- nodejs-0.11.13/deps/npm/test/tap/ls-no-results.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/ls-no-results.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,11 +1,11 @@ -var test = require('tap').test -var spawn = require('child_process').spawn +var test = require("tap").test +var spawn = require("child_process").spawn var node = process.execPath -var npm = require.resolve('../../') -var args = [ npm, 'ls', 'ceci n’est pas une package' ] -test('ls exits non-zero when nothing found', function (t) { +var npm = require.resolve("../../") +var args = [ npm, "ls", "ceci n’est pas une package" ] +test("ls exits non-zero when nothing found", function (t) { var child = spawn(node, args) - child.on('exit', function (code) { + child.on("exit", function (code) { t.notEqual(code, 0) t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/maybe-github.js nodejs-0.11.15/deps/npm/test/tap/maybe-github.js --- nodejs-0.11.13/deps/npm/test/tap/maybe-github.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/maybe-github.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,72 @@ +require("../common-tap.js") +var test = require("tap").test +var npm = require("../../lib/npm.js") + +// this is the narrowest way to replace a function in the module cache +var found = true +var remoteGitPath = require.resolve("../../lib/cache/add-remote-git.js") +require("module")._cache[remoteGitPath] = { + id: remoteGitPath, + exports: function stub(_, __, cb) { + if (found) { + cb(null, {}) + } + else { + cb(new Error("not on filesystem")) + } + } +} + +// only load maybeGithub now, so it gets the stub from cache +var maybeGithub = require("../../lib/cache/maybe-github.js") + +test("should throw with no parameters", function (t) { + t.plan(1) + + t.throws(function () { + maybeGithub() + }, "throws when called without parameters") +}) + +test("should throw with wrong parameter types", function (t) { + t.plan(2) + + t.throws(function () { + maybeGithub({}, function () {}) + }, "expects only a package name") + + t.throws(function () { + maybeGithub("npm/xxx-noexist", "ham") + }, "is always async") +}) + +test("should find an existing package on Github", function (t) { + found = true + npm.load({}, function (error) { + t.notOk(error, "bootstrapping succeeds") + t.doesNotThrow(function () { + maybeGithub("npm/npm", function (error, data) { + t.notOk(error, "no issues in looking things up") + t.ok(data, "received metadata from Github") + t.end() + }) + }) + }) +}) + +test("shouldn't find a nonexistent package on Github", function (t) { + found = false + npm.load({}, function () { + t.doesNotThrow(function () { + maybeGithub("npm/xxx-noexist", function (error, data) { + t.equal( + error.message, + "not on filesystem", + "passed through original error message" + ) + t.notOk(data, "didn't pass any metadata") + t.end() + }) + }) + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/nested-extraneous.js nodejs-0.11.15/deps/npm/test/tap/nested-extraneous.js --- nodejs-0.11.13/deps/npm/test/tap/nested-extraneous.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/nested-extraneous.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,53 @@ +var common = require("../common-tap.js") +var test = require("tap").test +var mkdirp = require("mkdirp") +var fs = require("fs") +var rimraf = require("rimraf") +var path = require("path") + +var pkg = path.resolve(__dirname, "nested-extraneous") +var pj = { + name: "nested-extraneous", + version: "1.2.3" +} + +var dep = path.resolve(pkg, "node_modules", "dep") +var deppj = { + name: "nested-extraneous-dep", + version: "1.2.3", + dependencies: { + "nested-extra-depdep": "*" + } +} + +var depdep = path.resolve(dep, "node_modules", "depdep") +var depdeppj = { + name: "nested-extra-depdep", + version: "1.2.3" +} + +test("setup", function (t) { + rimraf.sync(pkg) + mkdirp.sync(depdep) + fs.writeFileSync(path.resolve(pkg, "package.json"), JSON.stringify(pj)) + fs.writeFileSync(path.resolve(dep, "package.json"), JSON.stringify(deppj)) + fs.writeFileSync(path.resolve(depdep, "package.json"), JSON.stringify(depdeppj)) + t.end() +}) + +test("test", function (t) { + common.npm(["ls"], { + cwd: pkg + }, function (er, code, sto, ste) { + if (er) throw er + t.notEqual(code, 0) + t.notSimilar(ste, /depdep/) + t.notSimilar(sto, /depdep/) + t.end() + }) +}) + +test("clean", function (t) { + rimraf.sync(pkg) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/noargs-install-config-save.js nodejs-0.11.15/deps/npm/test/tap/noargs-install-config-save.js --- nodejs-0.11.13/deps/npm/test/tap/noargs-install-config-save.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/noargs-install-config-save.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,6 @@ var common = require("../common-tap.js") var test = require("tap").test var npm = require.resolve("../../bin/npm-cli.js") -var osenv = require("osenv") var path = require("path") var fs = require("fs") var rimraf = require("rimraf") @@ -9,12 +8,11 @@ var mr = require("npm-registry-mock") -var child var spawn = require("child_process").spawn var node = process.execPath -var pkg = process.env.npm_config_tmp || "/tmp" -pkg += path.sep + "noargs-install-config-save" +var pkg = path.resolve(process.env.npm_config_tmp || "/tmp", + "noargs-install-config-save") function writePackageJson() { rimraf.sync(pkg) @@ -28,14 +26,14 @@ "devDependencies": { "underscore": "1.3.1" } - }), 'utf8') + }), "utf8") } function createChild (args) { var env = { - npm_config_save: true, - npm_config_registry: common.registry, - npm_config_cache: pkg + "/cache", + "npm_config_save": true, + "npm_config_registry": common.registry, + "npm_config_cache": pkg + "/cache", HOME: process.env.HOME, Path: process.env.PATH, PATH: process.env.PATH @@ -56,9 +54,9 @@ mr(common.port, function (s) { var child = createChild([npm, "install"]) - child.on("close", function (m) { + child.on("close", function () { var text = JSON.stringify(fs.readFileSync(pkg + "/package.json", "utf8")) - t.ok(text.indexOf('"dependencies') === -1) + t.ok(text.indexOf("\"dependencies") === -1) s.close() t.end() }) @@ -71,9 +69,9 @@ mr(common.port, function (s) { var child = createChild([npm, "install", "underscore"]) - child.on("close", function (m) { + child.on("close", function () { var text = JSON.stringify(fs.readFileSync(pkg + "/package.json", "utf8")) - t.ok(text.indexOf('"dependencies') !== -1) + t.ok(text.indexOf("\"dependencies") !== -1) s.close() t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/npm-api-not-loaded-error.js nodejs-0.11.15/deps/npm/test/tap/npm-api-not-loaded-error.js --- nodejs-0.11.13/deps/npm/test/tap/npm-api-not-loaded-error.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/npm-api-not-loaded-error.js 2015-01-20 21:22:17.000000000 +0000 @@ -21,7 +21,7 @@ t.ok(threw, "get before load should throw") } - var threw = true + threw = true try { npm.config.set("foo", "bar") threw = false diff -Nru nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d1/package.json nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d1/package.json --- nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d1/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d1/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "name": "d1", + "version": "1.0.0", + "description": "I FAIL CONSTANTLY", + "scripts": { + "preinstall": "sleep 1" + }, + "dependencies": { + "foo": "http://localhost:8080/" + }, + "author": "Forrest L Norvell <ogd@aoaioxxysz.net>", + "license": "ISC" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d2/blart.js nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d2/blart.js --- nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d2/blart.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d2/blart.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,52 @@ +var rando = require("crypto").randomBytes +var resolve = require("path").resolve + +var mkdirp = require("mkdirp") +var rimraf = require("rimraf") +var writeFile = require("graceful-fs").writeFile + +var BASEDIR = resolve(__dirname, "arena") + +var keepItGoingLouder = {} +var writers = 0 +var errors = 0 + +function gensym() { return rando(16).toString("hex") } + +function writeAlmostForever(filename) { + if (!keepItGoingLouder[filename]) { + writers-- + if (writers < 1) return done() + } + else { + writeFile(filename, keepItGoingLouder[filename], function (err) { + if (err) errors++ + + writeAlmostForever(filename) + }) + } +} + +function done() { + rimraf(BASEDIR, function () { + if (errors > 0) console.log("not ok - %d errors", errors) + else console.log("ok") + }) +} + +mkdirp(BASEDIR, function go() { + for (var i = 0; i < 16; i++) { + var filename = resolve(BASEDIR, gensym() + ".txt") + + keepItGoingLouder[filename] = "" + for (var j = 0; j < 512; j++) keepItGoingLouder[filename] += filename + + writers++ + writeAlmostForever(filename) + } + + setTimeout(function viktor() { + // kill all the writers + keepItGoingLouder = {} + }, 3 * 1000) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d2/package.json nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d2/package.json --- nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d2/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/deps/d2/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "name": "d2", + "version": "1.0.0", + "description": "how do you *really* know you exist?", + "scripts": { + "postinstall": "node blart.js" + }, + "dependencies": { + "graceful-fs": "^3.0.2", + "mkdirp": "^0.5.0", + "rimraf": "^2.2.8" + }, + "author": "Forrest L Norvell <ogd@aoaioxxysz.net>", + "license": "ISC" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/deps/opdep/bad-server.js nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/deps/opdep/bad-server.js --- nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/deps/opdep/bad-server.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/deps/opdep/bad-server.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,35 @@ +var createServer = require("http").createServer +var spawn = require("child_process").spawn +var fs = require("fs") +var path = require("path") +var pidfile = path.resolve(__dirname, "..", "..", "child.pid") + +if (process.argv[2]) { + console.log("ok") + createServer(function (req, res) { + setTimeout(function () { + res.writeHead(404) + res.end() + }, 1000) + this.close() + }).listen(8080) +} +else { + var child = spawn( + process.execPath, + [__filename, "whatever"], + { + stdio: [0, 1, 2], + detached: true + } + ) + child.unref() + + // kill any prior children, if existing. + try { + var pid = +fs.readFileSync(pidfile) + process.kill(pid, "SIGKILL") + } catch (er) {} + + fs.writeFileSync(pidfile, child.pid + "\n") +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/deps/opdep/package.json nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/deps/opdep/package.json --- nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/deps/opdep/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/deps/opdep/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "name": "opdep", + "version": "1.0.0", + "description": "To explode, of course!", + "main": "index.js", + "scripts": { + "preinstall": "node bad-server.js" + }, + "dependencies": { + "d1": "file:../d1", + "d2": "file:../d2" + }, + "author": "Forrest L Norvell <ogd@aoaioxxysz.net>", + "license": "ISC" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/package.json nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/package.json --- nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "name": "optional-metadep-rollback-collision", + "version": "1.0.0", + "description": "let's just see about that race condition", + "optionalDependencies": { + "opdep": "file:./deps/opdep" + }, + "author": "Forrest L Norvell <ogd@aoaioxxysz.net>", + "license": "ISC" +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision.js nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision.js --- nodejs-0.11.13/deps/npm/test/tap/optional-metadep-rollback-collision.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/optional-metadep-rollback-collision.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,56 @@ +var test = require("tap").test +var rimraf = require("rimraf") +var common = require("../common-tap.js") +var path = require("path") +var fs = require("fs") + +var pkg = path.resolve(__dirname, "optional-metadep-rollback-collision") +var nm = path.resolve(pkg, "node_modules") +var cache = path.resolve(pkg, "cache") +var pidfile = path.resolve(pkg, "child.pid") + +test("setup", function (t) { + cleanup() + t.end() +}) + +test("go go test racer", function (t) { + common.npm(["install", "--prefix=" + pkg, "--fetch-retries=0", "--cache=" + cache], { + cwd: pkg, + env: { + PATH: process.env.PATH, + Path: process.env.Path, + "npm_config_loglevel": "silent" + }, + stdio: [ 0, "pipe", 2 ] + }, function (er, code, sout) { + if (er) throw er + t.notOk(code, "npm install exited with code 0") + t.equal(sout, "ok\nok\n") + t.notOk(/not ok/.test(sout), "should not contain the string 'not ok'") + t.end() + }) +}) + +test("verify results", function (t) { + t.throws(function () { + fs.statSync(nm) + }) + t.end() +}) + +test("cleanup", function (t) { + cleanup() + t.end() +}) + +function cleanup () { + try { + var pid = +fs.readFileSync(pidfile) + process.kill(pid, "SIGKILL") + } catch (er) {} + + rimraf.sync(cache) + rimraf.sync(nm) + rimraf.sync(pidfile) +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/outdated-color.js nodejs-0.11.15/deps/npm/test/tap/outdated-color.js --- nodejs-0.11.13/deps/npm/test/tap/outdated-color.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/outdated-color.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,15 +1,17 @@ var common = require("../common-tap.js") var test = require("tap").test -var npm = require("../../") var mkdirp = require("mkdirp") var rimraf = require("rimraf") var mr = require("npm-registry-mock") -var exec = require('child_process').exec -var mr = require("npm-registry-mock") +var path = require("path") + +var pkg = path.resolve(__dirname, "outdated") +var cache = path.resolve(pkg, "cache") +mkdirp.sync(cache) -var pkg = __dirname + '/outdated' -var NPM_BIN = __dirname + '/../../bin/npm-cli.js' -mkdirp.sync(pkg + "/cache") +var EXEC_OPTS = { + cwd: pkg +} function hasControlCodes(str) { return str.length !== ansiTrim(str).length @@ -17,20 +19,26 @@ function ansiTrim (str) { var r = new RegExp("\x1b(?:\\[(?:\\d+[ABCDEFGJKSTm]|\\d+;\\d+[Hfm]|" + - "\\d+;\\d+;\\d+m|6n|s|u|\\?25[lh])|\\w)", "g"); + "\\d+;\\d+;\\d+m|6n|s|u|\\?25[lh])|\\w)", "g") return str.replace(r, "") } // note hard to automate tests for color = true // as npm kills the color config when it detects -// it's not running in a tty +// it"s not running in a tty test("does not use ansi styling", function (t) { - t.plan(3) + t.plan(4) mr(common.port, function (s) { // create mock registry. - exec('node ' + NPM_BIN + ' outdated --registry ' + common.registry + ' --color false underscore', { - cwd: pkg - }, function(err, stdout) { + common.npm( + [ + "outdated", + "--registry", common.registry, + "underscore" + ], + EXEC_OPTS, + function (err, code, stdout) { t.ifError(err) + t.notOk(code, "npm outdated exited with code 0") t.ok(stdout, stdout.length) t.ok(!hasControlCodes(stdout)) s.close() @@ -39,6 +47,6 @@ }) test("cleanup", function (t) { - rimraf.sync(pkg + "/cache") + rimraf.sync(cache) t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/outdated-depth.js nodejs-0.11.15/deps/npm/test/tap/outdated-depth.js --- nodejs-0.11.13/deps/npm/test/tap/outdated-depth.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/outdated-depth.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,39 @@ -var common = require('../common-tap') - , path = require('path') - , test = require('tap').test - , rimraf = require('rimraf') - , npm = require('../../') - , mr = require('npm-registry-mock') - , pkg = path.resolve(__dirname, 'outdated-depth') +var common = require("../common-tap") + , path = require("path") + , test = require("tap").test + , rimraf = require("rimraf") + , npm = require("../../") + , mr = require("npm-registry-mock") + , pkg = path.resolve(__dirname, "outdated-depth") + , cache = path.resolve(pkg, "cache") + , nodeModules = path.resolve(pkg, "node_modules") function cleanup () { - rimraf.sync(pkg + '/node_modules') - rimraf.sync(pkg + '/cache') + rimraf.sync(nodeModules) + rimraf.sync(cache) } -test('outdated depth zero', function (t) { +test("outdated depth zero", function (t) { var expected = [ pkg, - 'underscore', - '1.3.1', - '1.3.1', - '1.5.1', - '1.3.1' + "underscore", + "1.3.1", + "1.3.1", + "1.5.1", + "1.3.1" ] process.chdir(pkg) mr(common.port, function (s) { npm.load({ - cache: pkg + '/cache' - , loglevel: 'silent' + cache: cache + , loglevel: "silent" , registry: common.registry , depth: 0 } , function () { - npm.install('.', function (er) { + npm.install(".", function (er) { if (er) throw new Error(er) npm.outdated(function (err, d) { if (err) throw new Error(err) diff -Nru nodejs-0.11.13/deps/npm/test/tap/outdated-git.js nodejs-0.11.15/deps/npm/test/tap/outdated-git.js --- nodejs-0.11.13/deps/npm/test/tap/outdated-git.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/outdated-git.js 2015-01-20 21:22:17.000000000 +0000 @@ -3,28 +3,29 @@ var npm = require("../../") var mkdirp = require("mkdirp") var rimraf = require("rimraf") -var mr = require("npm-registry-mock") +var path = require("path") // config -var pkg = __dirname + "/outdated-git" -mkdirp.sync(pkg + "/cache") +var pkg = path.resolve(__dirname, "outdated-git") +var cache = path.resolve(pkg, "cache") +mkdirp.sync(cache) test("dicovers new versions in outdated", function (t) { process.chdir(pkg) t.plan(5) - npm.load({cache: pkg + "/cache", registry: common.registry}, function () { - npm.outdated(function (er, d) { - t.equal('git', d[0][3]) - t.equal('git', d[0][4]) - t.equal('git://github.com/robertkowalski/foo-private.git', d[0][5]) - t.equal('git://user:pass@github.com/robertkowalski/foo-private.git', d[1][5]) - t.equal('git://github.com/robertkowalski/foo', d[2][5]) + npm.load({cache: cache, registry: common.registry}, function () { + npm.commands.outdated([], function (er, d) { + t.equal("git", d[0][3]) + t.equal("git", d[0][4]) + t.equal("git://github.com/robertkowalski/foo-private.git", d[0][5]) + t.equal("git://user:pass@github.com/robertkowalski/foo-private.git", d[1][5]) + t.equal("git+https://github.com/robertkowalski/foo", d[2][5]) }) }) }) test("cleanup", function (t) { - rimraf.sync(pkg + "/cache") + rimraf.sync(cache) t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/outdated-include-devdependencies.js nodejs-0.11.15/deps/npm/test/tap/outdated-include-devdependencies.js --- nodejs-0.11.13/deps/npm/test/tap/outdated-include-devdependencies.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/outdated-include-devdependencies.js 2015-01-20 21:22:17.000000000 +0000 @@ -4,15 +4,17 @@ var mkdirp = require("mkdirp") var rimraf = require("rimraf") var mr = require("npm-registry-mock") +var path = require("path") // config -var pkg = __dirname + '/outdated-include-devdependencies' -mkdirp.sync(pkg + "/cache") +var pkg = path.resolve(__dirname, "outdated-include-devdependencies") +var cache = path.resolve(pkg, "cache") +mkdirp.sync(cache) test("includes devDependencies in outdated", function (t) { process.chdir(pkg) mr(common.port, function (s) { - npm.load({cache: pkg + "/cache", registry: common.registry}, function () { + npm.load({cache: cache, registry: common.registry}, function () { npm.outdated(function (er, d) { t.equal("1.5.1", d[0][3]) s.close() @@ -23,6 +25,6 @@ }) test("cleanup", function (t) { - rimraf.sync(pkg + "/cache") + rimraf.sync(cache) t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/outdated.js nodejs-0.11.15/deps/npm/test/tap/outdated.js --- nodejs-0.11.13/deps/npm/test/tap/outdated.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/outdated.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,14 +1,14 @@ var common = require("../common-tap.js") -var fs = require("fs") var test = require("tap").test var rimraf = require("rimraf") var npm = require("../../") +var path = require("path") var mr = require("npm-registry-mock") // config -var pkg = __dirname + '/outdated' - -var path = require("path") +var pkg = path.resolve(__dirname, "outdated") +var cache = path.resolve(pkg, "cache") +var nodeModules = path.resolve(pkg, "node_modules") test("it should not throw", function (t) { cleanup() @@ -33,13 +33,15 @@ } mr(common.port, function (s) { npm.load({ - cache: pkg + "/cache", - loglevel: 'silent', + cache: "cache", + loglevel: "silent", parseable: true, registry: common.registry } , function () { npm.install(".", function (err) { + t.ifError(err, "install success") npm.outdated(function (er, d) { + t.ifError(er, "outdated success") console.log = originalLog t.same(output, expOut) t.same(d, expData) @@ -57,6 +59,6 @@ }) function cleanup () { - rimraf.sync(pkg + "/node_modules") - rimraf.sync(pkg + "/cache") + rimraf.sync(nodeModules) + rimraf.sync(cache) } diff -Nru nodejs-0.11.13/deps/npm/test/tap/outdated-json.js nodejs-0.11.15/deps/npm/test/tap/outdated-json.js --- nodejs-0.11.13/deps/npm/test/tap/outdated-json.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/outdated-json.js 2015-01-20 21:22:17.000000000 +0000 @@ -5,29 +5,29 @@ , mr = require("npm-registry-mock") , path = require("path") , osenv = require("osenv") - , spawn = require('child_process').spawn + , spawn = require("child_process").spawn , node = process.execPath - , npmc = require.resolve('../../') - , pkg = path.resolve(__dirname, 'outdated-new-versions') + , npmc = require.resolve("../../") + , pkg = path.resolve(__dirname, "outdated-new-versions") , args = [ npmc - , 'outdated' - , '--json' - , '--silent' - , '--registry=' + common.registry - , '--cache=' + pkg + '/cache' + , "outdated" + , "--json" + , "--silent" + , "--registry=" + common.registry + , "--cache=" + pkg + "/cache" ] var expected = { underscore: - { current: '1.3.3' - , wanted: '1.3.3' - , latest: '1.5.1' - , location: 'node_modules' + path.sep + 'underscore' + { current: "1.3.3" + , wanted: "1.3.3" + , latest: "1.5.1" + , location: "node_modules" + path.sep + "underscore" } , request: - { current: '0.9.5' - , wanted: '0.9.5' - , latest: '2.27.0' - , location: 'node_modules' + path.sep + 'request' + { current: "0.9.5" + , wanted: "0.9.5" + , latest: "2.27.0" + , location: "node_modules" + path.sep + "request" } } @@ -38,18 +38,19 @@ mr(common.port, function (s) { npm.load({ cache: pkg + "/cache", - loglevel: 'silent', + loglevel: "silent", registry: common.registry } , function () { npm.install(".", function (err) { + t.ifError(err, "error should not exist") var child = spawn(node, args) - , out = '' + , out = "" child.stdout - .on('data', function (buf) { + .on("data", function (buf) { out += buf.toString() }) .pipe(process.stdout) - child.on('exit', function () { + child.on("exit", function () { out = JSON.parse(out) t.deepEqual(out, expected) s.close() diff -Nru nodejs-0.11.13/deps/npm/test/tap/outdated-new-versions.js nodejs-0.11.15/deps/npm/test/tap/outdated-new-versions.js --- nodejs-0.11.13/deps/npm/test/tap/outdated-new-versions.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/outdated-new-versions.js 2015-01-20 21:22:17.000000000 +0000 @@ -3,11 +3,13 @@ var npm = require("../../") var mkdirp = require("mkdirp") var rimraf = require("rimraf") +var path = require("path") var mr = require("npm-registry-mock") -var pkg = __dirname + "/outdated-new-versions" -mkdirp.sync(pkg + "/cache") +var pkg = path.resolve(__dirname, "outdated-new-versions") +var cache = path.resolve(pkg, "cache") +mkdirp.sync(cache) test("dicovers new versions in outdated", function (t) { @@ -15,7 +17,7 @@ t.plan(2) mr(common.port, function (s) { - npm.load({cache: pkg + "/cache", registry: common.registry}, function () { + npm.load({cache: cache, registry: common.registry}, function () { npm.outdated(function (er, d) { for (var i = 0; i < d.length; i++) { if (d[i][1] === "underscore") @@ -31,6 +33,6 @@ }) test("cleanup", function (t) { - rimraf.sync(pkg + "/cache") + rimraf.sync(cache) t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/outdated-notarget.js nodejs-0.11.15/deps/npm/test/tap/outdated-notarget.js --- nodejs-0.11.13/deps/npm/test/tap/outdated-notarget.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/outdated-notarget.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,23 +1,23 @@ // Fixes Issue #1770 -var common = require('../common-tap.js') -var test = require('tap').test -var npm = require('../../') -var osenv = require('osenv') -var path = require('path') -var fs = require('fs') -var rimraf = require('rimraf') -var mkdirp = require('mkdirp') -var pkg = path.resolve(__dirname, 'outdated-notarget') -var cache = path.resolve(pkg, 'cache') -var mr = require('npm-registry-mock') +var common = require("../common-tap.js") +var test = require("tap").test +var npm = require("../../") +var osenv = require("osenv") +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var pkg = path.resolve(__dirname, "outdated-notarget") +var cache = path.resolve(pkg, "cache") +var mr = require("npm-registry-mock") -test('outdated-target: if no viable version is found, show error', function(t) { +test("outdated-target: if no viable version is found, show error", function (t) { t.plan(1) setup() - mr({port: common.port}, function(s) { - npm.load({ cache: cache, registry: common.registry}, function() { - npm.commands.update(function(er, d) { - t.equal(er.code, 'ETARGET') + mr({port: common.port}, function (s) { + npm.load({ cache: cache, registry: common.registry}, function () { + npm.commands.update(function (er) { + t.equal(er.code, "ETARGET") s.close() t.end() }) @@ -25,7 +25,7 @@ }) }) -test('cleanup', function(t) { +test("cleanup", function (t) { process.chdir(osenv.tmpdir()) rimraf.sync(pkg) t.end() @@ -34,14 +34,14 @@ function setup() { mkdirp.sync(pkg) mkdirp.sync(cache) - fs.writeFileSync(path.resolve(pkg, 'package.json'), JSON.stringify({ - author: 'Evan Lucas', - name: 'outdated-notarget', - version: '0.0.0', - description: 'Test for outdated-target', + fs.writeFileSync(path.resolve(pkg, "package.json"), JSON.stringify({ + author: "Evan Lucas", + name: "outdated-notarget", + version: "0.0.0", + description: "Test for outdated-target", dependencies: { - underscore: '~199.7.1' + underscore: "~199.7.1" } - }), 'utf8') + }), "utf8") process.chdir(pkg) } diff -Nru nodejs-0.11.13/deps/npm/test/tap/pack-scoped.js nodejs-0.11.15/deps/npm/test/tap/pack-scoped.js --- nodejs-0.11.13/deps/npm/test/tap/pack-scoped.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/pack-scoped.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,81 @@ +// verify that prepublish runs on pack and publish +var test = require("tap").test +var common = require("../common-tap") +var fs = require("graceful-fs") +var join = require("path").join +var mkdirp = require("mkdirp") +var rimraf = require("rimraf") + +var pkg = join(__dirname, "scoped_package") +var manifest = join(pkg, "package.json") +var tmp = join(pkg, "tmp") +var cache = join(pkg, "cache") + +var data = { + name : "@scope/generic-package", + version : "90000.100001.5" +} + +test("setup", function (t) { + var n = 0 + + rimraf.sync(pkg) + + mkdirp(pkg, then()) + mkdirp(cache, then()) + mkdirp(tmp, then()) + + function then () { + n++ + return function (er) { + t.ifError(er) + if (--n === 0) next() + } + } + + function next () { + fs.writeFile(manifest, JSON.stringify(data), "ascii", done) + } + + function done (er) { + t.ifError(er) + + t.pass("setup done") + t.end() + } +}) + +test("test", function (t) { + var env = { + "npm_config_cache" : cache, + "npm_config_tmp" : tmp, + "npm_config_prefix" : pkg, + "npm_config_global" : "false" + } + + for (var i in process.env) { + if (!/^npm_config_/.test(i)) env[i] = process.env[i] + } + + common.npm([ + "pack", + "--loglevel", "warn" + ], { + cwd: pkg, + env: env + }, function(err, code, stdout, stderr) { + t.ifErr(err, "npm pack finished without error") + t.equal(code, 0, "npm pack exited ok") + t.notOk(stderr, "got stderr data: " + JSON.stringify("" + stderr)) + stdout = stdout.trim() + var regex = new RegExp("scope-generic-package-90000.100001.5.tgz", "ig") + t.ok(stdout.match(regex), "found package") + t.end() + }) +}) + +test("cleanup", function (t) { + rimraf.sync(pkg) + t.pass("cleaned up") + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/peer-deps-invalid.js nodejs-0.11.15/deps/npm/test/tap/peer-deps-invalid.js --- nodejs-0.11.13/deps/npm/test/tap/peer-deps-invalid.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/peer-deps-invalid.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,18 +1,20 @@ -var common = require('../common-tap.js') +var common = require("../common-tap") var fs = require("fs") var path = require("path") var test = require("tap").test var rimraf = require("rimraf") var npm = require("../../") var mr = require("npm-registry-mock") -var pkg = __dirname + "/peer-deps-invalid" +var pkg = path.resolve(__dirname, "peer-deps-invalid") +var cache = path.resolve(pkg, "cache") +var nodeModules = path.resolve(pkg, "node_modules") var okFile = fs.readFileSync(path.join(pkg, "file-ok.js"), "utf8") var failFile = fs.readFileSync(path.join(pkg, "file-fail.js"), "utf8") test("installing dependencies that have conflicting peerDependencies", function (t) { - rimraf.sync(pkg + "/node_modules") - rimraf.sync(pkg + "/cache") + rimraf.sync(nodeModules) + rimraf.sync(cache) process.chdir(pkg) var customMocks = { @@ -23,7 +25,7 @@ } mr({port: common.port, mocks: customMocks}, function (s) { // create mock registry. npm.load({ - cache: pkg + "/cache", + cache: cache, registry: common.registry }, function () { npm.commands.install([], function (err) { @@ -40,7 +42,7 @@ }) test("cleanup", function (t) { - rimraf.sync(pkg + "/node_modules") - rimraf.sync(pkg + "/cache") + rimraf.sync(nodeModules) + rimraf.sync(cache) t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/peer-deps.js nodejs-0.11.15/deps/npm/test/tap/peer-deps.js --- nodejs-0.11.13/deps/npm/test/tap/peer-deps.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/peer-deps.js 2015-01-20 21:22:17.000000000 +0000 @@ -11,8 +11,6 @@ var desiredResultsPath = path.resolve(pkg, "desired-ls-results.json") test("installs the peer dependency directory structure", function (t) { - t.plan(1) - mr(common.port, function (s) { setup(function (err) { if (err) return t.fail(err) @@ -46,7 +44,7 @@ cleanup() process.chdir(pkg) - var opts = { cache: path.resolve(pkg, "cache"), registry: common.registry}; + var opts = { cache: path.resolve(pkg, "cache"), registry: common.registry} npm.load(opts, cb) } diff -Nru nodejs-0.11.13/deps/npm/test/tap/peer-deps-toplevel/desired-ls-results.json nodejs-0.11.15/deps/npm/test/tap/peer-deps-toplevel/desired-ls-results.json --- nodejs-0.11.13/deps/npm/test/tap/peer-deps-toplevel/desired-ls-results.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/peer-deps-toplevel/desired-ls-results.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +{ + "name": "npm-test-peer-deps-toplevel", + "version": "0.0.0", + "dependencies": { + "npm-test-peer-deps": { + "version": "0.0.0", + "dependencies": { + "underscore": { + "version": "1.3.1" + } + } + }, + "mkdirp": { + "version": "0.3.5" + }, + "request": { + "version": "0.9.5" + } + } +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/peer-deps-toplevel/package.json nodejs-0.11.15/deps/npm/test/tap/peer-deps-toplevel/package.json --- nodejs-0.11.13/deps/npm/test/tap/peer-deps-toplevel/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/peer-deps-toplevel/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "author": "Domenic Denicola", + "name": "npm-test-peer-deps-toplevel", + "version": "0.0.0", + "dependencies": { + "npm-test-peer-deps": "*" + }, + "peerDependencies": { + "mkdirp": "*" + } +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/peer-deps-toplevel.js nodejs-0.11.15/deps/npm/test/tap/peer-deps-toplevel.js --- nodejs-0.11.13/deps/npm/test/tap/peer-deps-toplevel.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/peer-deps-toplevel.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,55 @@ +var npm = npm = require("../../") +var test = require("tap").test +var path = require("path") +var fs = require("fs") +var osenv = require("osenv") +var rimraf = require("rimraf") +var mr = require("npm-registry-mock") +var common = require("../common-tap.js") + +var pkg = path.resolve(__dirname, "peer-deps-toplevel") +var desiredResultsPath = path.resolve(pkg, "desired-ls-results.json") + +test("installs the peer dependency directory structure", function (t) { + mr(common.port, function (s) { + setup(function (err) { + t.ifError(err, "setup ran successfully") + + npm.install(".", function (err) { + t.ifError(err, "packages were installed") + + npm.commands.ls([], true, function (err, _, results) { + t.ifError(err, "listed tree without problems") + + fs.readFile(desiredResultsPath, function (err, desired) { + t.ifError(err, "read desired results") + + t.deepEqual(results, JSON.parse(desired), "got expected output from ls") + s.close() + t.end() + }) + }) + }) + }) + }) +}) + +test("cleanup", function (t) { + cleanup() + t.end() +}) + + +function setup (cb) { + cleanup() + process.chdir(pkg) + + var opts = { cache: path.resolve(pkg, "cache"), registry: common.registry} + npm.load(opts, cb) +} + +function cleanup () { + process.chdir(osenv.tmpdir()) + rimraf.sync(path.resolve(pkg, "node_modules")) + rimraf.sync(path.resolve(pkg, "cache")) +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/peer-deps-without-package-json.js nodejs-0.11.15/deps/npm/test/tap/peer-deps-without-package-json.js --- nodejs-0.11.13/deps/npm/test/tap/peer-deps-without-package-json.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/peer-deps-without-package-json.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,39 @@ -var common = require('../common-tap.js') +var common = require("../common-tap") var fs = require("fs") var path = require("path") var test = require("tap").test var rimraf = require("rimraf") var npm = require("../../") var mr = require("npm-registry-mock") -var pkg = __dirname + "/peer-deps-without-package-json" +var pkg = path.resolve(__dirname, "peer-deps-without-package-json") +var cache = path.resolve(pkg, "cache") +var nodeModules = path.resolve(pkg, "node_modules") var js = fs.readFileSync(path.join(pkg, "file-js.js"), "utf8") test("installing a peerDependencies-using package without a package.json present (GH-3049)", function (t) { - rimraf.sync(pkg + "/node_modules") - rimraf.sync(pkg + "/cache") + rimraf.sync(nodeModules) + rimraf.sync(cache) - fs.mkdirSync(pkg + "/node_modules") + fs.mkdirSync(nodeModules) process.chdir(pkg) var customMocks = { "get": { - "/ok.js": [200, js], + "/ok.js": [200, js] } } mr({port: common.port, mocks: customMocks}, function (s) { // create mock registry. npm.load({ registry: common.registry, - cache: pkg + "/cache" + cache: cache }, function () { npm.install(common.registry + "/ok.js", function (err) { if (err) { t.fail(err) } else { - t.ok(fs.existsSync(pkg + "/node_modules/npm-test-peer-deps-file")) - t.ok(fs.existsSync(pkg + "/node_modules/underscore")) + t.ok(fs.existsSync(path.join(nodeModules, "/npm-test-peer-deps-file"))) + t.ok(fs.existsSync(path.join(nodeModules, "/underscore"))) } t.end() s.close() // shutdown mock registry. @@ -41,7 +43,7 @@ }) test("cleanup", function (t) { - rimraf.sync(pkg + "/node_modules") - rimraf.sync(pkg + "/cache") + rimraf.sync(nodeModules) + rimraf.sync(cache) t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/prepublish.js nodejs-0.11.15/deps/npm/test/tap/prepublish.js --- nodejs-0.11.13/deps/npm/test/tap/prepublish.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/prepublish.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,79 +1,64 @@ // verify that prepublish runs on pack and publish -var test = require('tap').test -var npm = require('../../') -var fs = require('fs') -var pkg = __dirname + '/prepublish_package' -var tmp = pkg + '/tmp' -var cache = pkg + '/cache' -var mkdirp = require('mkdirp') -var rimraf = require('rimraf') -var path = require('path') -var os = require('os') +var common = require("../common-tap") +var test = require("tap").test +var fs = require("graceful-fs") +var join = require("path").join +var mkdirp = require("mkdirp") +var rimraf = require("rimraf") + +var pkg = join(__dirname, "prepublish_package") +var tmp = join(pkg, "tmp") +var cache = join(pkg, "cache") -test('setup', function (t) { +test("setup", function (t) { var n = 0 + cleanup() mkdirp(pkg, then()) mkdirp(cache, then()) mkdirp(tmp, then()) - function then (er) { - n ++ + function then () { + n++ return function (er) { - if (er) - throw er - if (--n === 0) - next() + if (er) throw er + if (--n === 0) next() } } function next () { - fs.writeFile(pkg + '/package.json', JSON.stringify({ - name: 'npm-test-prepublish', - version: '1.2.5', - scripts: { prepublish: 'echo ok' } - }), 'ascii', function (er) { - if (er) - throw er - t.pass('setup done') + fs.writeFile(join(pkg, "package.json"), JSON.stringify({ + name: "npm-test-prepublish", + version: "1.2.5", + scripts: { prepublish: "echo ok" } + }), "ascii", function (er) { + if (er) throw er + + t.pass("setup done") t.end() }) } }) -test('test', function (t) { - var spawn = require('child_process').spawn - var node = process.execPath - var npm = path.resolve(__dirname, '../../cli.js') +test("test", function (t) { var env = { - npm_config_cache: cache, - npm_config_tmp: tmp, - npm_config_prefix: pkg, - npm_config_global: 'false' + "npm_config_cache" : cache, + "npm_config_tmp" : tmp, + "npm_config_prefix" : pkg, + "npm_config_global" : "false" } for (var i in process.env) { if (!/^npm_config_/.test(i)) env[i] = process.env[i] } - var child = spawn(node, [npm, 'pack'], { - cwd: pkg, - env: env - }) - child.stdout.setEncoding('utf8') - child.stderr.on('data', onerr) - child.stdout.on('data', ondata) - child.on('close', onend) - var c = '' - , e = '' - function ondata (chunk) { - c += chunk - } - function onerr (chunk) { - e += chunk - } - function onend () { - if (e) { - throw new Error('got stderr data: ' + JSON.stringify('' + e)) - } - c = c.trim() + + common.npm([ + "pack", + "--loglevel", "warn" + ], { cwd: pkg, env: env }, function(err, code, stdout, stderr) { + t.equal(code, 0, "pack finished successfully") + t.ifErr(err, "pack finished successfully") + + t.notOk(stderr, "got stderr data:" + JSON.stringify("" + stderr)) + var c = stdout.trim() var regex = new RegExp("" + "> npm-test-prepublish@1.2.5 prepublish [^\\r\\n]+\\r?\\n" + "> echo ok\\r?\\n" + @@ -83,15 +68,15 @@ t.ok(c.match(regex)) t.end() - } + }) }) -test('cleanup', function (t) { - rimraf(pkg, function(er) { - if (er) - throw er - t.pass('cleaned up') - t.end() - }) +test("cleanup", function (t) { + cleanup() + t.pass("cleaned up") + t.end() }) +function cleanup() { + rimraf.sync(pkg) +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/prune.js nodejs-0.11.15/deps/npm/test/tap/prune.js --- nodejs-0.11.13/deps/npm/test/tap/prune.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/prune.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,16 +1,17 @@ var test = require("tap").test +var common = require("../common-tap") var fs = require("fs") -var node = process.execPath -var npm = require.resolve("../../bin/npm-cli.js") var rimraf = require("rimraf") var mr = require("npm-registry-mock") -var common = require("../common-tap.js") -var spawn = require("child_process").spawn var env = process.env -process.env.npm_config_depth = "Infinity" +var path = require("path") -var pkg = __dirname + "/prune" -var cache = pkg + "/cache" +var pkg = path.resolve(__dirname, "prune") +var cache = path.resolve(pkg, "cache") +var nodeModules = path.resolve(pkg, "node_modules") + +var EXEC_OPTS = { cwd: pkg, env: env } +EXEC_OPTS.env.npm_config_depth = "Infinity" var server @@ -22,38 +23,43 @@ }) }) +function cleanup () { + rimraf.sync(cache) + rimraf.sync(nodeModules) +} + +test("setup", function (t) { + cleanup() + t.pass("setup") + t.end() +}) test("npm install", function (t) { - rimraf.sync(pkg + "/node_modules") - var c = spawn(node, [ - npm, "install", - "--cache=" + cache, - "--registry=" + common.registry, - "--loglevel=silent", - "--production=false" - ], { cwd: pkg, env: env }) - c.stderr.on("data", function(d) { - t.fail("Should not get data on stderr: " + d) - }) - c.on("close", function(code) { + common.npm([ + "install", + "--cache", cache, + "--registry", common.registry, + "--loglevel", "silent", + "--production", "false" + ], EXEC_OPTS, function (err, code, stdout, stderr) { + t.ifErr(err, "install finished successfully") t.notOk(code, "exit ok") + t.notOk(stderr, "Should not get data on stderr: " + stderr) t.end() }) }) test("npm install test-package", function (t) { - var c = spawn(node, [ - npm, "install", "test-package", - "--cache=" + cache, - "--registry=" + common.registry, - "--loglevel=silent", - "--production=false" - ], { cwd: pkg, env: env }) - c.stderr.on("data", function(d) { - t.fail("Should not get data on stderr: " + d) - }) - c.on("close", function(code) { + common.npm([ + "install", "test-package", + "--cache", cache, + "--registry", common.registry, + "--loglevel", "silent", + "--production", "false" + ], EXEC_OPTS, function (err, code, stdout, stderr) { + t.ifErr(err, "install finished successfully") t.notOk(code, "exit ok") + t.notOk(stderr, "Should not get data on stderr: " + stderr) t.end() }) }) @@ -65,16 +71,14 @@ }) test("npm prune", function (t) { - var c = spawn(node, [ - npm, "prune", - "--loglevel=silent", - "--production=false" - ], { cwd: pkg, env: env }) - c.stderr.on("data", function(d) { - t.fail("Should not get data on stderr: " + d) - }) - c.on("close", function(code) { + common.npm([ + "prune", + "--loglevel", "silent", + "--production", "false" + ], EXEC_OPTS, function (err, code, stdout, stderr) { + t.ifErr(err, "prune finished successfully") t.notOk(code, "exit ok") + t.notOk(stderr, "Should not get data on stderr: " + stderr) t.end() }) }) @@ -86,16 +90,14 @@ }) test("npm prune", function (t) { - var c = spawn(node, [ - npm, "prune", - "--loglevel=silent", + common.npm([ + "prune", + "--loglevel", "silent", "--production" - ], { cwd: pkg, env: env }) - c.stderr.on("data", function(d) { - t.fail("Should not get data on stderr: " + d) - }) - c.on("close", function(code) { + ], EXEC_OPTS, function (err, code, stderr) { + t.ifErr(err, "prune finished successfully") t.notOk(code, "exit ok") + t.equal(stderr, "unbuild mkdirp@0.3.5\n") t.end() }) }) @@ -108,8 +110,7 @@ test("cleanup", function (t) { server.close() - rimraf.sync(pkg + "/node_modules") - rimraf.sync(pkg + "/cache") + cleanup() t.pass("cleaned up") t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/publish-config.js nodejs-0.11.15/deps/npm/test/tap/publish-config.js --- nodejs-0.11.13/deps/npm/test/tap/publish-config.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/publish-config.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,33 +1,37 @@ -var common = require('../common-tap.js') -var test = require('tap').test -var fs = require('fs') -var osenv = require('osenv') -var pkg = process.env.npm_config_tmp || '/tmp' -pkg += '/npm-test-publish-config' +var common = require("../common-tap.js") +var test = require("tap").test +var fs = require("fs") +var osenv = require("osenv") +var pkg = process.env.npm_config_tmp || "/tmp" +pkg += "/npm-test-publish-config" -require('mkdirp').sync(pkg) +require("mkdirp").sync(pkg) -fs.writeFileSync(pkg + '/package.json', JSON.stringify({ - name: 'npm-test-publish-config', - version: '1.2.3', +fs.writeFileSync(pkg + "/package.json", JSON.stringify({ + name: "npm-test-publish-config", + version: "1.2.3", publishConfig: { registry: common.registry } -}), 'utf8') +}), "utf8") -var spawn = require('child_process').spawn -var npm = require.resolve('../../bin/npm-cli.js') -var node = process.execPath +fs.writeFileSync(pkg + "/fixture_npmrc", + "//localhost:1337/:email = fancy@feast.net\n" + + "//localhost:1337/:username = fancy\n" + + "//localhost:1337/:_password = " + new Buffer("feast").toString("base64") + "\n" + + "registry = http://localhost:1337/") test(function (t) { var child - require('http').createServer(function (req, res) { - t.pass('got request on the fakey fake registry') + require("http").createServer(function (req, res) { + t.pass("got request on the fakey fake registry") t.end() this.close() res.statusCode = 500 - res.end('{"error":"sshhh. naptime nao. \\^O^/ <(YAWWWWN!)"}') + res.end(JSON.stringify({ + error: "sshhh. naptime nao. \\^O^/ <(YAWWWWN!)" + })) child.kill() }).listen(common.port, function () { - t.pass('server is listening') + t.pass("server is listening") // don't much care about listening to the child's results // just wanna make sure it hits the server we just set up. @@ -36,16 +40,20 @@ // itself functions normally. // // Make sure that we don't sit around waiting for lock files - child = spawn(node, [npm, 'publish', '--email=fancy', '--_auth=feast'], { + child = common.npm(["publish", "--userconfig=" + pkg + "/fixture_npmrc"], { cwd: pkg, + stdio: "inherit", env: { - npm_config_cache_lock_stale: 1000, - npm_config_cache_lock_wait: 1000, + "npm_config_cache_lock_stale": 1000, + "npm_config_cache_lock_wait": 1000, HOME: process.env.HOME, Path: process.env.PATH, PATH: process.env.PATH, USERPROFILE: osenv.home() } + }, function (err, code) { + t.ifError(err, "publish command finished successfully") + t.notOk(code, "npm install exited with code 0") }) }) }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/publish-scoped.js nodejs-0.11.15/deps/npm/test/tap/publish-scoped.js --- nodejs-0.11.13/deps/npm/test/tap/publish-scoped.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/publish-scoped.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,101 @@ +var fs = require("fs") +var path = require("path") + +var test = require("tap").test +var mkdirp = require("mkdirp") +var rimraf = require("rimraf") +var nock = require("nock") + +var npm = require("../../") +var common = require("../common-tap.js") + +var pkg = path.join(__dirname, "prepublish_package") + +// TODO: nock uses setImmediate, breaks 0.8: replace with mockRegistry +if (!global.setImmediate) { + global.setImmediate = function () { + var args = [arguments[0], 0].concat([].slice.call(arguments, 1)) + setTimeout.apply(this, args) + } +} + +test("setup", function (t) { + mkdirp(path.join(pkg, "cache"), next) + + function next () { + process.chdir(pkg) + fs.writeFile( + path.join(pkg, "package.json"), + JSON.stringify({ + name: "@bigco/publish-organized", + version: "1.2.5" + }), + "ascii", + function (er) { + t.ifError(er) + + t.pass("setup done") + t.end() + } + ) + } +}) + +test("npm publish should honor scoping", function (t) { + var put = nock(common.registry) + .put("/@bigco%2fpublish-organized") + .reply(201, verify) + + var configuration = { + cache : path.join(pkg, "cache"), + loglevel : "silent", + registry : "http://nonexistent.lvh.me", + "//localhost:1337/:username" : "username", + "//localhost:1337/:_password" : new Buffer("password").toString("base64"), + "//localhost:1337/:email" : "ogd@aoaioxxysz.net" + } + + npm.load(configuration, onload) + + function onload (er) { + t.ifError(er, "npm bootstrapped successfully") + + npm.config.set("@bigco:registry", common.registry) + npm.commands.publish([], false, function (er) { + t.ifError(er, "published without error") + + put.done() + + t.end() + }) + } + + function verify (_, body) { + t.doesNotThrow(function () { + var parsed = JSON.parse(body) + var current = parsed.versions["1.2.5"] + t.equal( + current._npmVersion, + require(path.resolve(__dirname, "../../package.json")).version, + "npm version is correct" + ) + + t.equal( + current._nodeVersion, + process.versions.node, + "node version is correct" + ) + }, "converted body back into object") + + return {ok: true} + } +}) + +test("cleanup", function (t) { + process.chdir(__dirname) + rimraf(pkg, function (er) { + t.ifError(er) + + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/pwd-prefix.js nodejs-0.11.15/deps/npm/test/tap/pwd-prefix.js --- nodejs-0.11.13/deps/npm/test/tap/pwd-prefix.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/pwd-prefix.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,35 @@ +// This test ensures that a few commands do the same +// thing when the cwd is where package.json is, and when +// the package.json is one level up. + +var test = require("tap").test +var common = require("../common-tap.js") +var path = require("path") +var root = path.resolve(__dirname, "../..") +var lib = path.resolve(root, "lib") +var commands = ["run", "version"] + +commands.forEach(function (cmd) { + // Should get the same stdout and stderr each time + var stdout, stderr + + test(cmd + " in root", function (t) { + common.npm([cmd], {cwd: root}, function (er, code, so, se) { + if (er) throw er + t.notOk(code, "npm " + cmd + " exited with code 0") + stdout = so + stderr = se + t.end() + }) + }) + + test(cmd + " in lib", function (t) { + common.npm([cmd], {cwd: lib}, function (er, code, so, se) { + if (er) throw er + t.notOk(code, "npm " + cmd + " exited with code 0") + t.equal(so, stdout) + t.equal(se, stderr) + t.end() + }) + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/referer.js nodejs-0.11.15/deps/npm/test/tap/referer.js --- nodejs-0.11.13/deps/npm/test/tap/referer.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/referer.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,18 +1,17 @@ var common = require("../common-tap.js") var test = require("tap").test var http = require("http") -var server test("should send referer http header", function (t) { - var server = http.createServer(function (q, s) { + http.createServer(function (q, s) { t.equal(q.headers.referer, "install foo") s.statusCode = 404 s.end(JSON.stringify({error: "whatever"})) this.close() }).listen(common.port, function () { - var reg = "--registry=http://localhost:" + common.port - var args = [ "install", "foo", reg ] - common.npm(args, {}, function (er, code, so, se) { + var reg = "http://localhost:" + common.port + var args = [ "install", "foo", "--registry", reg ] + common.npm(args, {}, function (er, code) { if (er) { throw er } diff -Nru nodejs-0.11.13/deps/npm/test/tap/registry.js nodejs-0.11.15/deps/npm/test/tap/registry.js --- nodejs-0.11.13/deps/npm/test/tap/registry.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/registry.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,75 @@ +// Run all the tests in the `npm-registry-couchapp` suite +// This verifies that the server-side stuff still works. + +var common = require("../common-tap") +var test = require("tap").test + +var npmExec = require.resolve("../../bin/npm-cli.js") +var path = require("path") +var ca = path.resolve(__dirname, "../../node_modules/npm-registry-couchapp") + +var which = require("which") + +var v = process.versions.node.split(".").map(function (n) { return parseInt(n, 10) }) +if (v[0] === 0 && v[1] < 10) { + console.error( + "WARNING: need a recent Node for npm-registry-couchapp tests to run, have", + process.versions.node + ) +} +else { + which("couchdb", function (er) { + if (er) { + console.error("WARNING: need couch to run test: " + er.message) + } + else { + runTests() + } + }) +} + + +function runTests () { + var env = {} + for (var i in process.env) env[i] = process.env[i] + env.npm = npmExec + + var opts = { + cwd: ca, + stdio: "inherit" + } + common.npm(["install"], opts, function (err, code) { + if (err) { throw err } + if (code) { + return test("need install to work", function (t) { + t.fail("install failed with: " + code) + t.end() + }) + + } else { + opts = { + cwd: ca, + env: env, + stdio: "inherit" + } + common.npm(["test"], opts, function (err, code) { + if (err) { throw err } + if (code) { + return test("need test to work", function (t) { + t.fail("test failed with: " + code) + t.end() + }) + } + opts = { + cwd: ca, + env: env, + stdio: "inherit" + } + common.npm(["prune", "--production"], opts, function (err, code) { + if (err) { throw err } + process.exit(code || 0) + }) + }) + } + }) +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/repo.js nodejs-0.11.15/deps/npm/test/tap/repo.js --- nodejs-0.11.13/deps/npm/test/tap/repo.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/repo.js 2015-01-20 21:22:17.000000000 +0000 @@ -12,6 +12,10 @@ var rimraf = require("rimraf") var spawn = require("child_process").spawn var fs = require("fs") +var path = require('path') +var outFile = path.join(__dirname, '/_output') + +var opts = { cwd: __dirname } test("setup", function (t) { var s = "#!/usr/bin/env bash\n" + @@ -24,21 +28,105 @@ test("npm repo underscore", function (t) { mr(common.port, function (s) { - var c = spawn(node, [ - npm, "repo", "underscore", - "--registry=" + common.registry, - "--loglevel=silent", - "--browser=" + __dirname + "/_script.sh", - ]) - c.stdout.on("data", function(d) { - t.fail("Should not get data on stdout: " + d) - }) - c.stderr.pipe(process.stderr) - c.on("close", function(code) { - t.equal(code, 0, "exit ok") - var res = fs.readFileSync(__dirname + "/_output", "ascii") + common.npm([ + 'repo', 'underscore', + '--registry=' + common.registry, + '--loglevel=silent', + '--browser=' + __dirname + '/_script.sh' + ], opts, function (err, code, stdout, stderr) { + t.equal(code, 0, 'exit ok') + var res = fs.readFileSync(outFile, 'ascii') s.close() t.equal(res, "https://github.com/jashkenas/underscore\n") + rimraf.sync(outFile) + t.end() + }) + }) +}) + + +test('npm repo optimist - github (https://)', function (t) { + mr(common.port, function (s) { + common.npm([ + 'repo', 'optimist', + '--registry=' + common.registry, + '--loglevel=silent', + '--browser=' + __dirname + '/_script.sh' + ], opts, function (err, code, stdout, stderr) { + t.equal(code, 0, 'exit ok') + var res = fs.readFileSync(outFile, 'ascii') + s.close() + t.equal(res, "https://github.com/substack/node-optimist\n") + rimraf.sync(outFile) + t.end() + }) + }) +}) + +test("npm repo npm-test-peer-deps - no repo", function (t) { + mr(common.port, function (s) { + common.npm([ + 'repo', 'npm-test-peer-deps', + '--registry=' + common.registry, + '--loglevel=silent', + '--browser=' + __dirname + '/_script.sh' + ], opts, function (err, code, stdout, stderr) { + t.equal(code, 1, 'exit not ok') + s.close() + t.end() + }) + }) +}) + +test("npm repo test-repo-url-http - non-github (http://)", function (t) { + mr(common.port, function (s) { + common.npm([ + 'repo', 'test-repo-url-http', + '--registry=' + common.registry, + '--loglevel=silent', + '--browser=' + __dirname + '/_script.sh' + ], opts, function (err, code, stdout, stderr) { + t.equal(code, 0, 'exit ok') + var res = fs.readFileSync(outFile, 'ascii') + s.close() + t.equal(res, "http://gitlab.com/evanlucas/test-repo-url-http\n") + rimraf.sync(outFile) + t.end() + }) + }) +}) + +test("npm repo test-repo-url-https - non-github (https://)", function (t) { + mr(common.port, function (s) { + common.npm([ + 'repo', 'test-repo-url-https', + '--registry=' + common.registry, + '--loglevel=silent', + '--browser=' + __dirname + '/_script.sh' + ], opts, function (err, code, stdout, stderr) { + t.equal(code, 0, 'exit ok') + var res = fs.readFileSync(outFile, 'ascii') + s.close() + t.equal(res, "https://gitlab.com/evanlucas/test-repo-url-https\n") + rimraf.sync(outFile) + t.end() + }) + }) +}) + +test("npm repo test-repo-url-ssh - non-github (ssh://)", function (t) { + mr(common.port, function (s) { + common.npm([ + 'repo', 'test-repo-url-ssh', + '--registry=' + common.registry, + '--loglevel=silent', + '--browser=' + __dirname + '/_script.sh' + ], opts, function (err, code, stdout, stderr) { + t.equal(code, 0, 'exit ok') + var res = fs.readFileSync(outFile, 'ascii') + s.close() + t.equal(res, "http://gitlab.com/evanlucas/test-repo-url-ssh\n") + rimraf.sync(outFile) t.end() }) }) @@ -46,7 +134,6 @@ test("cleanup", function (t) { fs.unlinkSync(__dirname + "/_script.sh") - fs.unlinkSync(__dirname + "/_output") t.pass("cleaned up") t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/run-script/package.json nodejs-0.11.15/deps/npm/test/tap/run-script/package.json --- nodejs-0.11.13/deps/npm/test/tap/run-script/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/run-script/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +{"name":"runscript" +,"version":"1.2.3" +,"scripts":{ + "start":"node -e 'console.log(process.argv[1] || \"start\")'", + "prewith-pre":"node -e 'console.log(process.argv[1] || \"pre\")'", + "with-pre":"node -e 'console.log(process.argv[1] || \"main\")'", + "with-post":"node -e 'console.log(process.argv[1] || \"main\")'", + "postwith-post":"node -e 'console.log(process.argv[1] || \"post\")'", + "prewith-both":"node -e 'console.log(process.argv[1] || \"pre\")'", + "with-both":"node -e 'console.log(process.argv[1] || \"main\")'", + "postwith-both":"node -e 'console.log(process.argv[1] || \"post\")'" + } +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/run-script.js nodejs-0.11.15/deps/npm/test/tap/run-script.js --- nodejs-0.11.13/deps/npm/test/tap/run-script.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/run-script.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,84 @@ +var common = require("../common-tap") + , test = require("tap").test + , path = require("path") + , rimraf = require("rimraf") + , mkdirp = require("mkdirp") + , pkg = path.resolve(__dirname, "run-script") + , cache = path.resolve(pkg, "cache") + , tmp = path.resolve(pkg, "tmp") + , opts = { cwd: pkg } + +function testOutput (t, command, er, code, stdout, stderr) { + var lines + + if (er) + throw er + + if (stderr) + throw new Error("npm " + command + " stderr: " + stderr.toString()) + + lines = stdout.trim().split("\n") + stdout = lines.filter(function(line) { + return line.trim() !== "" && line[0] !== '>' + }).join(';') + + t.equal(stdout, command) + t.end() +} + +function cleanup () { + rimraf.sync(cache) + rimraf.sync(tmp) +} + +test("setup", function (t) { + cleanup() + mkdirp.sync(cache) + mkdirp.sync(tmp) + t.end() +}) + +test("npm run-script", function (t) { + common.npm(["run-script", "start"], opts, testOutput.bind(null, t, "start")) +}) + +test("npm run-script with args", function (t) { + common.npm(["run-script", "start", "--", "stop"], opts, testOutput.bind(null, t, "stop")) +}) + +test("npm run-script with args that contain spaces", function (t) { + common.npm(["run-script", "start", "--", "hello world"], opts, testOutput.bind(null, t, "hello world")) +}) + +test("npm run-script with args that contain single quotes", function (t) { + common.npm(["run-script", "start", "--", "they're awesome"], opts, testOutput.bind(null, t, "they're awesome")) +}) + +test("npm run-script with args that contain double quotes", function (t) { + common.npm(["run-script", "start", "--", "what's \"up\"?"], opts, testOutput.bind(null, t, "what's \"up\"?")) +}) + +test("npm run-script with pre script", function (t) { + common.npm(["run-script", "with-post"], opts, testOutput.bind(null, t, "main;post")) +}) + +test("npm run-script with post script", function (t) { + common.npm(["run-script", "with-pre"], opts, testOutput.bind(null, t, "pre;main")) +}) + +test("npm run-script with both pre and post script", function (t) { + common.npm(["run-script", "with-both"], opts, testOutput.bind(null, t, "pre;main;post")) +}) + +test("npm run-script with both pre and post script and with args", function (t) { + common.npm(["run-script", "with-both", "--", "an arg"], opts, testOutput.bind(null, t, "pre;an arg;post")) +}) + +test("npm run-script explicitly call pre script with arg", function (t) { + common.npm(["run-script", "prewith-pre", "--", "an arg"], opts, testOutput.bind(null, t, "an arg")) +}) + +test("cleanup", function (t) { + cleanup() + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/scripts-whitespace-windows.js nodejs-0.11.15/deps/npm/test/tap/scripts-whitespace-windows.js --- nodejs-0.11.13/deps/npm/test/tap/scripts-whitespace-windows.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/scripts-whitespace-windows.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,71 +1,54 @@ -var test = require('tap').test -var path = require('path') -var npm = path.resolve(__dirname, '../../cli.js') -var pkg = __dirname + '/scripts-whitespace-windows' -var tmp = pkg + '/tmp' -var cache = pkg + '/cache' -var modules = pkg + '/node_modules' -var dep = pkg + '/dep' - -var mkdirp = require('mkdirp') -var rimraf = require('rimraf') -var node = process.execPath -var spawn = require('child_process').spawn +var test = require("tap").test +var common = require("../common-tap") +var path = require("path") +var pkg = path.resolve(__dirname, "scripts-whitespace-windows") +var tmp = path.resolve(pkg, "tmp") +var cache = path.resolve(pkg, "cache") +var modules = path.resolve(pkg, "node_modules") +var dep = path.resolve(pkg, "dep") -test('setup', function (t) { +var mkdirp = require("mkdirp") +var rimraf = require("rimraf") + +test("setup", function (t) { + cleanup() mkdirp.sync(cache) mkdirp.sync(tmp) - rimraf.sync(modules) - var env = { - npm_config_cache: cache, - npm_config_tmp: tmp, - npm_config_prefix: pkg, - npm_config_global: 'false' - } - - var child = spawn(node, [npm, 'i', dep], { + common.npm(["i", dep], { cwd: pkg, - env: env - }) - - child.stdout.setEncoding('utf8') - child.stderr.on('data', function(chunk) { - throw new Error('got stderr data: ' + JSON.stringify('' + chunk)) - }) - child.on('close', function () { + env: { + "npm_config_cache": cache, + "npm_config_tmp": tmp, + "npm_config_prefix": pkg, + "npm_config_global": "false" + } + }, function (err, code, stdout, stderr) { + t.ifErr(err, "npm i " + dep + " finished without error") + t.equal(code, 0, "npm i " + dep + " exited ok") + t.notOk(stderr, "no output stderr") t.end() }) }) -test('test', function (t) { - - var child = spawn(node, [npm, 'run', 'foo'], { - cwd: pkg, - env: process.env - }) - - child.stdout.setEncoding('utf8') - child.stderr.on('data', function(chunk) { - throw new Error('got stderr data: ' + JSON.stringify('' + chunk)) +test("test", function (t) { + common.npm(["run", "foo"], { cwd: pkg }, function (err, code, stdout, stderr) { + t.ifErr(err, "npm run finished without error") + t.equal(code, 0, "npm run exited ok") + t.notOk(stderr, "no output stderr: ", stderr) + stdout = stdout.trim() + t.ok(/npm-test-fine/.test(stdout)) + t.end() }) - child.stdout.on('data', ondata) - child.on('close', onend) - var c = '' - function ondata (chunk) { - c += chunk - } - function onend () { - c = c.trim() +}) - t.ok(/npm-test-fine/.test(c)) - t.end() - } +test("cleanup", function (t) { + cleanup() + t.end() }) -test('cleanup', function (t) { +function cleanup() { rimraf.sync(cache) rimraf.sync(tmp) rimraf.sync(modules) - t.end() -}) +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/search.js nodejs-0.11.15/deps/npm/test/tap/search.js --- nodejs-0.11.13/deps/npm/test/tap/search.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/search.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,265 @@ +var common = require("../common-tap.js") +var test = require("tap").test +var rimraf = require("rimraf") +var mr = require("npm-registry-mock") +var fs = require("fs") +var path = require("path") +var pkg = path.resolve(__dirname, "search") +var cache = path.resolve(pkg, "cache") +var registryCache = path.resolve(cache, "localhost_1337", "-", "all") +var cacheJsonFile = path.resolve(registryCache, ".cache.json") +var mkdirp = require("mkdirp") + +var timeMock = { + epoch: 1411727900, + future: 1411727900+100, + all: 1411727900+25, + since: 0 // filled by since server callback +} + +var EXEC_OPTS = {} + +function cleanupCache() { + rimraf.sync(cache) +} +function cleanup () { cleanupCache() } + +function setupCache() { + mkdirp.sync(cache) + mkdirp.sync(registryCache) + var res = fs.writeFileSync(cacheJsonFile, stringifyUpdated(timeMock.epoch)) + if (res) throw new Error("Creating cache file failed") +} + +var mocks = { + /* Since request, always response with an _update time > the time requested */ + sinceFuture: function(server) { + server.filteringPathRegEx(/startkey=[^&]*/g, function(s) { + var _allMock = JSON.parse(JSON.stringify(allMock)) + timeMock.since = _allMock._updated = s.replace("startkey=", "") + server.get("/-/all/since?stale=update_after&" + s) + .reply(200, _allMock) + return s + }) + }, + allFutureUpdatedOnly: function(server) { + server.get("/-/all") + .reply(200, stringifyUpdated(timeMock.future)) + }, + all: function(server) { + server.get("/-/all") + .reply(200, allMock) + } +} + + +test("No previous cache, init cache triggered by first search", function(t) { + cleanupCache() + + mr({ port: common.port, mocks: mocks.allFutureUpdatedOnly }, function (s) { + common.npm([ + "search", "do not do extra search work on my behalf", + "--registry", common.registry, + "--cache", cache, + "--loglevel", "silent", + "--color", "always" + ], + EXEC_OPTS, + function(err, code) { + s.close() + t.equal(code, 0, "search finished successfully") + t.ifErr(err, "search finished successfully") + + t.ok(fs.existsSync(cacheJsonFile), + cacheJsonFile + " expected to have been created") + var cacheData = JSON.parse(fs.readFileSync(cacheJsonFile, "utf8")) + t.equal(cacheData._updated, String(timeMock.future)) + t.end() + }) + }) +}) + +test("previous cache, _updated set, should trigger since request", function(t) { + cleanupCache() + setupCache() + + function m(server) { + [ mocks.all, mocks.sinceFuture ].forEach(function(m) { + m(server) + }) + } + mr({ port: common.port, mocks: m }, function (s) { + common.npm([ + "search", "do not do extra search work on my behalf", + "--registry", common.registry, + "--cache", cache, + "--loglevel", "silly", + "--color", "always" + ], + EXEC_OPTS, + function(err, code) { + s.close() + t.equal(code, 0, "search finished successfully") + t.ifErr(err, "search finished successfully") + + var cacheData = JSON.parse(fs.readFileSync(cacheJsonFile, "utf8")) + t.equal(cacheData._updated, + timeMock.since, + "cache update time gotten from since response") + cleanupCache() + t.end() + }) + }) +}) + + +var searches = [ + { + term: "f36b6a6123da50959741e2ce4d634f96ec668c56", + description: "non-regex", + location: 241 + }, + { + term: "/f36b6a6123da50959741e2ce4d634f96ec668c56/", + description: "regex", + location: 241 + } +] + +searches.forEach(function(search) { + test(search.description + " search in color", function(t) { + cleanupCache() + mr({ port: common.port, mocks: mocks.all }, function (s) { + common.npm([ + "search", search.term, + "--registry", common.registry, + "--cache", cache, + "--loglevel", "silent", + "--color", "always" + ], + EXEC_OPTS, + function(err, code, stdout) { + s.close() + t.equal(code, 0, "search finished successfully") + t.ifErr(err, "search finished successfully") + // \033 == \u001B + var markStart = "\u001B\\[[0-9][0-9]m" + var markEnd = "\u001B\\[0m" + + var re = new RegExp(markStart + ".*?" + markEnd) + + var cnt = stdout.search(re) + t.equal(cnt, search.location, + search.description + " search for " + search.term) + t.end() + }) + }) + }) +}) + +test("cleanup", function (t) { + cleanup() + t.end() +}) + +function stringifyUpdated(time) { + return JSON.stringify({ _updated : String(time) }) +} + +var allMock = { + "_updated": timeMock.all, + "generator-frontcow": { + "name": "generator-frontcow", + "description": "f36b6a6123da50959741e2ce4d634f96ec668c56 This is a fake description to ensure we do not accidentally search the real npm registry or use some kind of cache", + "dist-tags": { + "latest": "0.1.19" + }, + "maintainers": [ + { + "name": "bcabanes", + "email": "contact@benjamincabanes.com" + } + ], + "homepage": "https://github.com/bcabanes/generator-frontcow", + "keywords": [ + "sass", + "frontend", + "yeoman-generator", + "atomic", + "design", + "sass", + "foundation", + "foundation5", + "atomic design", + "bourbon", + "polyfill", + "font awesome" + ], + "repository": { + "type": "git", + "url": "https://github.com/bcabanes/generator-frontcow" + }, + "author": { + "name": "ben", + "email": "contact@benjamincabanes.com", + "url": "https://github.com/bcabanes" + }, + "bugs": { + "url": "https://github.com/bcabanes/generator-frontcow/issues" + }, + "license": "MIT", + "readmeFilename": "README.md", + "time": { + "modified": "2014-10-03T02:26:18.406Z" + }, + "versions": { + "0.1.19": "latest" + } + }, + "marko": { + "name": "marko", + "description": "Marko is an extensible, streaming, asynchronous, high performance, HTML-based templating language that can be used in Node.js or in the browser.", + "dist-tags": { + "latest": "1.2.16" + }, + "maintainers": [ + { + "name": "pnidem", + "email": "pnidem@gmail.com" + }, + { + "name": "philidem", + "email": "phillip.idem@gmail.com" + } + ], + "homepage": "https://github.com/raptorjs/marko", + "keywords": [ + "templating", + "template", + "async", + "streaming" + ], + "repository": { + "type": "git", + "url": "https://github.com/raptorjs/marko.git" + }, + "author": { + "name": "Patrick Steele-Idem", + "email": "pnidem@gmail.com" + }, + "bugs": { + "url": "https://github.com/raptorjs/marko/issues" + }, + "license": "Apache License v2.0", + "readmeFilename": "README.md", + "users": { + "pnidem": true + }, + "time": { + "modified": "2014-10-03T02:27:31.775Z" + }, + "versions": { + "1.2.16": "latest" + } + } +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/semver-doc.js nodejs-0.11.15/deps/npm/test/tap/semver-doc.js --- nodejs-0.11.13/deps/npm/test/tap/semver-doc.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/semver-doc.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,11 +1,11 @@ var test = require("tap").test -test("semver doc is up to date", function(t) { +test("semver doc is up to date", function (t) { var path = require("path") var moddoc = path.join(__dirname, "../../node_modules/semver/README.md") var mydoc = path.join(__dirname, "../../doc/misc/semver.md") var fs = require("fs") - var mod = fs.readFileSync(moddoc, "utf8").replace(/semver\(1\)/, 'semver(7)') + var mod = fs.readFileSync(moddoc, "utf8").replace(/semver\(1\)/, "semver(7)") var my = fs.readFileSync(mydoc, "utf8") t.equal(my, mod) t.end() diff -Nru nodejs-0.11.13/deps/npm/test/tap/semver-tag.js nodejs-0.11.15/deps/npm/test/tap/semver-tag.js --- nodejs-0.11.13/deps/npm/test/tap/semver-tag.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/semver-tag.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +// should not allow tagging with a valid semver range +var common = require("../common-tap.js") +var test = require("tap").test + +test("try to tag with semver range as tag name", function (t) { + var cmd = ["tag", "zzzz@1.2.3", "v2.x", "--registry=http://localhost"] + common.npm(cmd, { + stdio: "pipe" + }, function (er, code, so, se) { + if (er) throw er + t.similar(se, /Tag name must not be a valid SemVer range: v2.x\n/) + t.equal(code, 1) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/shrinkwrap-empty-deps.js nodejs-0.11.15/deps/npm/test/tap/shrinkwrap-empty-deps.js --- nodejs-0.11.13/deps/npm/test/tap/shrinkwrap-empty-deps.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/shrinkwrap-empty-deps.js 2015-01-20 21:22:17.000000000 +0000 @@ -6,7 +6,8 @@ , fs = require("fs") , osenv = require("osenv") , rimraf = require("rimraf") - , pkg = __dirname + "/shrinkwrap-empty-deps" + , pkg = path.resolve(__dirname, "shrinkwrap-empty-deps") + , cache = path.resolve(pkg, "cache") test("returns a list of removed items", function (t) { var desiredResultsPath = path.resolve(pkg, "npm-shrinkwrap.json") @@ -36,7 +37,7 @@ function setup (cb) { cleanup() process.chdir(pkg) - npm.load({cache: pkg + "/cache", registry: common.registry}, function () { + npm.load({cache: cache, registry: common.registry}, function () { cb() }) } diff -Nru nodejs-0.11.13/deps/npm/test/tap/sorted-package-json.js nodejs-0.11.15/deps/npm/test/tap/sorted-package-json.js --- nodejs-0.11.13/deps/npm/test/tap/sorted-package-json.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/sorted-package-json.js 2015-01-20 21:22:17.000000000 +0000 @@ -30,11 +30,11 @@ var child = spawn(node, [npm, "install", "--save", "underscore@1.3.3"], { cwd: pkg, env: { - npm_config_registry: common.registry, - npm_config_cache: cache, - npm_config_tmp: tmp, - npm_config_prefix: pkg, - npm_config_global: "false", + "npm_config_registry": common.registry, + "npm_config_cache": cache, + "npm_config_tmp": tmp, + "npm_config_prefix": pkg, + "npm_config_global": "false", HOME: process.env.HOME, Path: process.env.PATH, PATH: process.env.PATH @@ -42,6 +42,7 @@ }) child.on("close", function (code) { + t.equal(code, 0, "npm install exited with code") var result = fs.readFileSync(packageJson).toString() , resultAsJson = JSON.parse(result) @@ -83,7 +84,7 @@ "underscore": "^1.3.3", "request": "^0.9.0" } - }, null, 2), 'utf8') + }, null, 2), "utf8") } function cleanup() { diff -Nru nodejs-0.11.13/deps/npm/test/tap/spawn-enoent.js nodejs-0.11.15/deps/npm/test/tap/spawn-enoent.js --- nodejs-0.11.13/deps/npm/test/tap/spawn-enoent.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/spawn-enoent.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,40 @@ +var path = require("path") +var test = require("tap").test +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var common = require("../common-tap.js") + +var pkg = path.resolve(__dirname, "spawn-enoent") +var pj = JSON.stringify({ + name:"x", + version: "1.2.3", + scripts: { start: "wharble-garble-blorst" } +}, null, 2) + "\n" + + +test("setup", function (t) { + rimraf.sync(pkg) + mkdirp.sync(pkg) + fs.writeFileSync(pkg + "/package.json", pj) + t.end() +}) + +test("enoent script", function (t) { + common.npm(["start"], { + cwd: pkg, + env: { + PATH: process.env.PATH, + Path: process.env.Path, + "npm_config_loglevel": "warn" + } + }, function (er, code, sout, serr) { + t.similar(serr, /npm ERR! Failed at the x@1\.2\.3 start script\./) + t.end() + }) +}) + +test("clean", function (t) { + rimraf.sync(pkg) + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/startstop/package.json nodejs-0.11.15/deps/npm/test/tap/startstop/package.json --- nodejs-0.11.13/deps/npm/test/tap/startstop/package.json 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/startstop/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,7 @@ {"name":"startstop" ,"version":"1.2.3" ,"scripts":{ - "start":"node -e 'console.log(\"start\")'", - "stop":"node -e 'console.log(\"stop\")'" + "start":"node -e \"console.log('start')\"", + "stop":"node -e \"console.log('stop')\"" } } diff -Nru nodejs-0.11.13/deps/npm/test/tap/startstop.js nodejs-0.11.15/deps/npm/test/tap/startstop.js --- nodejs-0.11.13/deps/npm/test/tap/startstop.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/startstop.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,64 +1,60 @@ -var common = require('../common-tap') - , test = require('tap').test - , path = require('path') - , spawn = require('child_process').spawn - , rimraf = require('rimraf') - , mkdirp = require('mkdirp') - , pkg = __dirname + '/startstop' - , cache = pkg + '/cache' - , tmp = pkg + '/tmp' - , node = process.execPath - , npm = path.resolve(__dirname, '../../cli.js') +var common = require("../common-tap") + , test = require("tap").test + , path = require("path") + , rimraf = require("rimraf") + , mkdirp = require("mkdirp") + , pkg = path.resolve(__dirname, "startstop") + , cache = path.resolve(pkg, "cache") + , tmp = path.resolve(pkg, "tmp") , opts = { cwd: pkg } function testOutput (t, command, er, code, stdout, stderr) { - if (er) - throw er + t.notOk(code, "npm " + command + " exited with code 0") if (stderr) - throw new Error('npm ' + command + ' stderr: ' + stderr.toString()) + throw new Error("npm " + command + " stderr: " + stderr.toString()) - stdout = stdout.trim().split('\n') + stdout = stdout.trim().split(/\n|\r/) stdout = stdout[stdout.length - 1] t.equal(stdout, command) t.end() } function cleanup () { - rimraf.sync(pkg + '/cache') - rimraf.sync(pkg + '/tmp') + rimraf.sync(cache) + rimraf.sync(tmp) } -test('setup', function (t) { +test("setup", function (t) { cleanup() - mkdirp.sync(pkg + '/cache') - mkdirp.sync(pkg + '/tmp') + mkdirp.sync(cache) + mkdirp.sync(tmp) t.end() }) -test('npm start', function (t) { - common.npm(['start'], opts, testOutput.bind(null, t, "start")) +test("npm start", function (t) { + common.npm(["start"], opts, testOutput.bind(null, t, "start")) }) -test('npm stop', function (t) { - common.npm(['stop'], opts, testOutput.bind(null, t, "stop")) +test("npm stop", function (t) { + common.npm(["stop"], opts, testOutput.bind(null, t, "stop")) }) -test('npm restart', function (t) { - common.npm(['restart'], opts, function (er, c, stdout, stderr) { +test("npm restart", function (t) { + common.npm(["restart"], opts, function (er, c, stdout) { if (er) throw er - var output = stdout.split('\n').filter(function (val) { + var output = stdout.split("\n").filter(function (val) { return val.match(/^s/) }) - t.same(output.sort(), ['start', 'stop'].sort()) + t.same(output.sort(), ["start", "stop"].sort()) t.end() }) }) -test('cleanup', function (t) { +test("cleanup", function (t) { cleanup() t.end() }) diff -Nru nodejs-0.11.13/deps/npm/test/tap/test-run-ls.js nodejs-0.11.15/deps/npm/test/tap/test-run-ls.js --- nodejs-0.11.13/deps/npm/test/tap/test-run-ls.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/test-run-ls.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +var common = require("../common-tap.js") +var test = require("tap").test +var path = require("path") +var cwd = path.resolve(__dirname, "..", "..") +var testscript = require("../../package.json").scripts.test +var tsregexp = testscript.replace(/([\[\.\*\]])/g, "\\$1") + +test("default", function (t) { + common.npm(["run"], { cwd: cwd }, function (er, code, so) { + if (er) throw er + t.notOk(code) + t.similar(so, new RegExp("\\n test\\n " + tsregexp + "\\n")) + t.end() + }) +}) + +test("parseable", function (t) { + common.npm(["run", "-p"], { cwd: cwd }, function (er, code, so) { + if (er) throw er + t.notOk(code) + t.similar(so, new RegExp("\\ntest:" + tsregexp + "\\n")) + t.end() + }) +}) + +test("parseable", function (t) { + common.npm(["run", "--json"], { cwd: cwd }, function (er, code, so) { + if (er) throw er + t.notOk(code) + t.equal(JSON.parse(so).test, testscript) + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/uninstall-package.js nodejs-0.11.15/deps/npm/test/tap/uninstall-package.js --- nodejs-0.11.13/deps/npm/test/tap/uninstall-package.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/uninstall-package.js 2015-01-20 21:22:17.000000000 +0000 @@ -3,7 +3,8 @@ , rimraf = require("rimraf") , mr = require("npm-registry-mock") , common = require("../common-tap.js") - , pkg = __dirname + "/uninstall-package" + , path = require("path") + , pkg = path.join(__dirname, "uninstall-package") test("returns a list of removed items", function (t) { t.plan(1) Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/npm/test/tap/unpack-foreign-tarball/gitignore-and-npmignore.tar and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/npm/test/tap/unpack-foreign-tarball/gitignore-and-npmignore.tar differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/npm/test/tap/unpack-foreign-tarball/gitignore-and-npmignore.tgz and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/npm/test/tap/unpack-foreign-tarball/gitignore-and-npmignore.tgz differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/npm/test/tap/unpack-foreign-tarball/gitignore.tgz and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/npm/test/tap/unpack-foreign-tarball/gitignore.tgz differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/npm/test/tap/unpack-foreign-tarball/npmignore.tgz and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/npm/test/tap/unpack-foreign-tarball/npmignore.tgz differ diff -Nru nodejs-0.11.13/deps/npm/test/tap/unpack-foreign-tarball.js nodejs-0.11.15/deps/npm/test/tap/unpack-foreign-tarball.js --- nodejs-0.11.13/deps/npm/test/tap/unpack-foreign-tarball.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/unpack-foreign-tarball.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,76 @@ +var test = require("tap").test +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var common = require("../common-tap.js") +var path = require("path") +var fs = require("fs") +var dir = path.resolve(__dirname, "unpack-foreign-tarball") +var root = path.resolve(dir, "root") +var nm = path.resolve(root, "node_modules") +var cache = path.resolve(dir, "cache") +var tmp = path.resolve(dir, "tmp") +var pkg = path.resolve(nm, "npm-test-gitignore") + +var env = { + "npm_config_cache": cache, + "npm_config_tmp": tmp +} + +var conf = { + env: env, + cwd: root, + stdio: [ "pipe", "pipe", 2 ] +} + +function verify (t, files, err, code) { + if (code) { + t.fail("exited with failure: " + code) + return t.end() + } + var actual = fs.readdirSync(pkg).sort() + var expect = files.concat([".npmignore", "package.json"]).sort() + t.same(actual, expect) + t.end() +} + +test("npmignore only", function (t) { + setup() + var file = path.resolve(dir, "npmignore.tgz") + common.npm(["install", file], conf, verify.bind(null, t, ["foo"])) +}) + +test("gitignore only", function (t) { + setup() + var file = path.resolve(dir, "gitignore.tgz") + common.npm(["install", file], conf, verify.bind(null, t, ["foo"])) +}) + +test("gitignore and npmignore", function (t) { + setup() + var file = path.resolve(dir, "gitignore-and-npmignore.tgz") + common.npm(["install", file], conf, verify.bind(null, t, ["foo", "bar"])) +}) + +test("gitignore and npmignore, not gzipped", function (t) { + setup() + var file = path.resolve(dir, "gitignore-and-npmignore.tar") + common.npm(["install", file], conf, verify.bind(null, t, ["foo", "bar"])) +}) + +test("clean", function (t) { + clean() + t.end() +}) + +function setup () { + clean() + mkdirp.sync(nm) + mkdirp.sync(cache) + mkdirp.sync(tmp) +} + +function clean () { + rimraf.sync(root) + rimraf.sync(cache) + rimraf.sync(tmp) +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/update-save/index.js nodejs-0.11.15/deps/npm/test/tap/update-save/index.js --- nodejs-0.11.13/deps/npm/test/tap/update-save/index.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/update-save/index.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +module.exports = true diff -Nru nodejs-0.11.13/deps/npm/test/tap/update-save/package.json nodejs-0.11.15/deps/npm/test/tap/update-save/package.json --- nodejs-0.11.13/deps/npm/test/tap/update-save/package.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/update-save/package.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "name": "update-save-example", + "version": "1.2.3", + "dependencies": { + "mkdirp": "~0.3.0" + }, + "devDependencies": { + "underscore": "~1.3.1" + } +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/npm/test/tap/update-save/README.md nodejs-0.11.15/deps/npm/test/tap/update-save/README.md --- nodejs-0.11.13/deps/npm/test/tap/update-save/README.md 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/update-save/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +# just a test diff -Nru nodejs-0.11.13/deps/npm/test/tap/update-save.js nodejs-0.11.15/deps/npm/test/tap/update-save.js --- nodejs-0.11.13/deps/npm/test/tap/update-save.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/update-save.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,160 @@ +var common = require("../common-tap.js") +var test = require("tap").test +var npm = require("../../") +var mkdirp = require("mkdirp") +var rimraf = require("rimraf") +var fs = require("fs") +var path = require("path") +var mr = require("npm-registry-mock") + +var PKG_DIR = path.resolve(__dirname, "update-save") +var PKG = path.resolve(PKG_DIR, "package.json") +var CACHE_DIR = path.resolve(PKG_DIR, "cache") +var MODULES_DIR = path.resolve(PKG_DIR, "node_modules") + +var EXEC_OPTS = { + cwd: PKG_DIR, + stdio: "ignore", + env: { + "npm_config_registry": common.registry, + "npm_config_loglevel": "verbose" + } +} + +var DEFAULT_PKG = { + "name": "update-save-example", + "version": "1.2.3", + "dependencies": { + "mkdirp": "~0.3.0" + }, + "devDependencies": { + "underscore": "~1.3.1" + } +} + +var s // mock server reference + +test("setup", function (t) { + resetPackage() + + mr(common.port, function (server) { + npm.load({cache: CACHE_DIR, registry: common.registry}, function (err) { + t.ifError(err) + s = server + t.end() + }) + }) +}) + +test("update regular dependencies only", function (t) { + resetPackage() + + common.npm(["update", "--save"], EXEC_OPTS, function (err, code) { + t.ifError(err) + t.notOk(code, "npm update exited with code 0") + + var pkgdata = JSON.parse(fs.readFileSync(PKG, "utf8")) + t.deepEqual(pkgdata.dependencies, {mkdirp: "^0.3.5"}, "only dependencies updated") + t.deepEqual(pkgdata.devDependencies, DEFAULT_PKG.devDependencies, "dev dependencies should be untouched") + t.deepEqual(pkgdata.optionalDependencies, DEFAULT_PKG.optionalDependencies, "optional dependencies should be untouched") + t.end() + }) +}) + +test("update devDependencies only", function (t) { + resetPackage() + + common.npm(["update", "--save-dev"], EXEC_OPTS, function (err, code) { + t.ifError(err) + t.notOk(code, "npm update exited with code 0") + + var pkgdata = JSON.parse(fs.readFileSync(PKG, "utf8")) + t.deepEqual(pkgdata.dependencies, DEFAULT_PKG.dependencies, "dependencies should be untouched") + t.deepEqual(pkgdata.devDependencies, {underscore: "^1.3.3"}, "dev dependencies should be updated") + t.deepEqual(pkgdata.optionalDependencies, DEFAULT_PKG.optionalDependencies, "optional dependencies should be untouched") + t.end() + }) +}) + +test("update optionalDependencies only", function (t) { + resetPackage({ + "optionalDependencies": { + "underscore": "~1.3.1" + } + }) + + common.npm(["update", "--save-optional"], EXEC_OPTS, function (err, code) { + t.ifError(err) + t.notOk(code, "npm update exited with code 0") + + var pkgdata = JSON.parse(fs.readFileSync(PKG, "utf8")) + t.deepEqual(pkgdata.dependencies, DEFAULT_PKG.dependencies, "dependencies should be untouched") + t.deepEqual(pkgdata.devDependencies, DEFAULT_PKG.devDependencies, "dev dependencies should be untouched") + t.deepEqual(pkgdata.optionalDependencies, {underscore: "^1.3.3"}, "optional dependencies should be updated") + t.end() + }) +}) + +test("optionalDependencies are merged into dependencies during --save", function (t) { + var pkg = resetPackage({ + "optionalDependencies": { + "underscore": "~1.3.1" + } + }) + + common.npm(["update", "--save"], EXEC_OPTS, function (err, code) { + t.ifError(err) + t.notOk(code, "npm update exited with code 0") + + var pkgdata = JSON.parse(fs.readFileSync(PKG, "utf8")) + t.deepEqual(pkgdata.dependencies, {mkdirp: "^0.3.5"}, "dependencies should not include optional dependencies") + t.deepEqual(pkgdata.devDependencies, pkg.devDependencies, "dev dependencies should be untouched") + t.deepEqual(pkgdata.optionalDependencies, pkg.optionalDependencies, "optional dependencies should be untouched") + t.end() + }) +}) + +test("semver prefix is replaced with configured save-prefix", function (t) { + resetPackage() + + common.npm(["update", "--save", "--save-prefix", "~"], EXEC_OPTS, function (err, code) { + t.ifError(err) + t.notOk(code, "npm update exited with code 0") + + var pkgdata = JSON.parse(fs.readFileSync(PKG, "utf8")) + t.deepEqual(pkgdata.dependencies, { + mkdirp: "~0.3.5" + }, "dependencies should be updated") + t.deepEqual(pkgdata.devDependencies, DEFAULT_PKG.devDependencies, "dev dependencies should be untouched") + t.deepEqual(pkgdata.optionalDependencies, DEFAULT_PKG.optionalDependencies, "optional dependencies should be updated") + t.end() + }) +}) + +function resetPackage(extendWith) { + rimraf.sync(CACHE_DIR) + rimraf.sync(MODULES_DIR) + mkdirp.sync(CACHE_DIR) + var pkg = clone(DEFAULT_PKG) + extend(pkg, extendWith) + for (var key in extend) { pkg[key] = extend[key]} + fs.writeFileSync(PKG, JSON.stringify(pkg, null, 2), "ascii") + return pkg +} + +test("cleanup", function (t) { + s.close() + resetPackage() // restore package.json + rimraf.sync(CACHE_DIR) + rimraf.sync(MODULES_DIR) + t.end() +}) + +function clone(a) { + return extend({}, a) +} + +function extend(a, b) { + for (var key in b) { a[key] = b[key]} + return a +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/url-dependencies.js nodejs-0.11.15/deps/npm/test/tap/url-dependencies.js --- nodejs-0.11.13/deps/npm/test/tap/url-dependencies.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/url-dependencies.js 2015-01-20 21:22:17.000000000 +0000 @@ -3,11 +3,8 @@ var path = require("path") var osenv = require("osenv") var mr = require("npm-registry-mock") -var spawn = require("child_process").spawn -var npm = require.resolve("../../bin/npm-cli.js") -var node = process.execPath var pkg = path.resolve(__dirname, "url-dependencies") -var common = require('../common-tap') +var common = require("../common-tap") var mockRoutes = { "get": { @@ -15,27 +12,27 @@ } } -test("url-dependencies: download first time", function(t) { +test("url-dependencies: download first time", function (t) { cleanup() - performInstall(function(output){ - if(!tarballWasFetched(output)){ + performInstall(t, function (output){ + if (!tarballWasFetched(output)){ t.fail("Tarball was not fetched") - }else{ + } else { t.pass("Tarball was fetched") } t.end() }) }) -test("url-dependencies: do not download subsequent times", function(t) { +test("url-dependencies: do not download subsequent times", function (t) { cleanup() - performInstall(function(){ - performInstall(function(output){ - if(tarballWasFetched(output)){ + performInstall(t, function () { + performInstall(t, function (output) { + if (tarballWasFetched(output)){ t.fail("Tarball was fetched second time around") - }else{ + } else { t.pass("Tarball was not fetched") } t.end() @@ -44,30 +41,28 @@ }) function tarballWasFetched(output){ - return output.indexOf("http GET " + common.registry + "/underscore/-/underscore-1.3.1.tgz") > -1 + return output.indexOf("http fetch GET " + common.registry + "/underscore/-/underscore-1.3.1.tgz") > -1 } -function performInstall (cb) { - mr({port: common.port, mocks: mockRoutes}, function(s){ - var output = "" - , child = spawn(node, [npm, "install"], { - cwd: pkg, - env: { - npm_config_registry: common.registry, - npm_config_cache_lock_stale: 1000, - npm_config_cache_lock_wait: 1000, - HOME: process.env.HOME, - Path: process.env.PATH, - PATH: process.env.PATH - } - }) - - child.stderr.on("data", function(data){ - output += data.toString() - }) - child.on("close", function () { +function performInstall (t, cb) { + mr({port: common.port, mocks: mockRoutes}, function (s) { + var opts = { + cwd : pkg, + env: { + "npm_config_registry": common.registry, + "npm_config_cache_lock_stale": 1000, + "npm_config_cache_lock_wait": 1000, + "npm_config_loglevel": "http", + HOME: process.env.HOME, + Path: process.env.PATH, + PATH: process.env.PATH + } + } + common.npm(["install"], opts, function (err, code, stdout, stderr) { + t.ifError(err, "install success") + t.notOk(code, "npm install exited with code 0") s.close() - cb(output) + cb(stderr) }) }) } diff -Nru nodejs-0.11.13/deps/npm/test/tap/version-no-git.js nodejs-0.11.15/deps/npm/test/tap/version-no-git.js --- nodejs-0.11.13/deps/npm/test/tap/version-no-git.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/version-no-git.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +var common = require("../common-tap.js") +var test = require("tap").test +var npm = require("../../") +var osenv = require("osenv") +var path = require("path") +var fs = require("fs") +var mkdirp = require("mkdirp") +var rimraf = require("rimraf") +var requireInject = require("require-inject") + +var pkg = path.resolve(__dirname, "version-no-git") +var cache = path.resolve(pkg, "cache") +var gitDir = path.resolve(pkg, ".git") + +test("npm version <semver> in a git repo without the git binary", function(t) { + setup() + npm.load({cache: cache, registry: common.registry}, function() { + var version = requireInject("../../lib/version", { + which: function(cmd, cb) { + process.nextTick(function() { + cb(new Error('ENOGIT!')) + }) + } + }) + + version(["patch"], function(err) { + if (err) return t.fail("Error performing version patch") + var p = path.resolve(pkg, "package") + var testPkg = require(p) + t.equal("0.0.1", testPkg.version, "\"" + testPkg.version+"\" === \"0.0.1\"") + t.end() + }) + }) +}) + +test("cleanup", function(t) { + process.chdir(osenv.tmpdir()) + + rimraf.sync(pkg) + t.end() +}) + +function setup() { + mkdirp.sync(pkg) + mkdirp.sync(cache) + mkdirp.sync(gitDir) + fs.writeFileSync(path.resolve(pkg, "package.json"), JSON.stringify({ + author: "Terin Stock", + name: "version-no-git-test", + version: "0.0.0", + description: "Test for npm version if git binary doesn't exist" + }), "utf8") + process.chdir(pkg) +} diff -Nru nodejs-0.11.13/deps/npm/test/tap/version-no-tags.js nodejs-0.11.15/deps/npm/test/tap/version-no-tags.js --- nodejs-0.11.13/deps/npm/test/tap/version-no-tags.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/version-no-tags.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,49 +1,47 @@ -var common = require('../common-tap.js') -var test = require('tap').test -var npm = require('../../') -var npmc = require.resolve('../../') -var osenv = require('osenv') -var path = require('path') -var fs = require('fs') -var rimraf = require('rimraf') -var mkdirp = require('mkdirp') -var which = require('which') -var util = require('util') -var spawn = require('child_process').spawn -var args = [ npmc - , 'version' - , 'patch' - , '--no-git-tag-version' - ] -var pkg = __dirname + '/version-no-tags' +var common = require("../common-tap.js") +var test = require("tap").test +var npm = require("../../") +var osenv = require("osenv") +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var which = require("which") +var spawn = require("child_process").spawn + +var pkg = path.resolve(__dirname, "version-no-tags") +var cache = path.resolve(pkg, "cache") test("npm version <semver> without git tag", function (t) { setup() - npm.load({ cache: pkg + '/cache', registry: common.registry}, function () { - which('git', function(err, git) { + npm.load({ cache: cache, registry: common.registry}, function () { + which("git", function (err, git) { + t.ifError(err, "git found on system") function tagExists(tag, _cb) { - var child = spawn(git, ['tag', '-l', tag]) - var out = '' - child.stdout.on('data', function(d) { - out += data.toString() + var child1 = spawn(git, ["tag", "-l", tag]) + var out = "" + child1.stdout.on("data", function (d) { + out += d.toString() }) - child.on('exit', function() { + child1.on("exit", function () { return _cb(null, Boolean(~out.indexOf(tag))) }) } - var child = spawn(git, ['init']) - child.stdout.pipe(process.stdout) - child.on('exit', function() { - npm.config.set('git-tag-version', false) - npm.commands.version(['patch'], function(err) { - if (err) return t.fail('Error perform version patch') - var testPkg = require(pkg+'/package') - if (testPkg.version !== '0.0.1') t.fail(testPkg.version+' !== \'0.0.1\'') - t.ok('0.0.1' === testPkg.version) - tagExists('v0.0.1', function(err, exists) { - t.equal(exists, false, 'git tag DOES exist') - t.pass('git tag does not exist') + var child2 = spawn(git, ["init"]) + child2.stdout.pipe(process.stdout) + child2.on("exit", function () { + npm.config.set("git-tag-version", false) + npm.commands.version(["patch"], function (err) { + if (err) return t.fail("Error perform version patch") + var p = path.resolve(pkg, "package") + var testPkg = require(p) + if (testPkg.version !== "0.0.1") t.fail(testPkg.version+" !== \"0.0.1\"") + t.equal("0.0.1", testPkg.version) + tagExists("v0.0.1", function (err, exists) { + t.ifError(err, "tag found to exist") + t.equal(exists, false, "git tag DOES exist") + t.pass("git tag does not exist") t.end() }) }) @@ -52,7 +50,7 @@ }) }) -test('cleanup', function(t) { +test("cleanup", function (t) { // windows fix for locked files process.chdir(osenv.tmpdir()) @@ -62,12 +60,12 @@ function setup() { mkdirp.sync(pkg) - mkdirp.sync(pkg + '/cache') - fs.writeFileSync(pkg + '/package.json', JSON.stringify({ + mkdirp.sync(cache) + fs.writeFileSync(path.resolve(pkg, "package.json"), JSON.stringify({ author: "Evan Lucas", name: "version-no-tags-test", version: "0.0.0", description: "Test for git-tag-version flag" - }), 'utf8') + }), "utf8") process.chdir(pkg) } diff -Nru nodejs-0.11.13/deps/npm/test/tap/view.js nodejs-0.11.15/deps/npm/test/tap/view.js --- nodejs-0.11.13/deps/npm/test/tap/view.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/view.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,253 @@ +var common = require("../common-tap.js") +var test = require("tap").test +var osenv = require("osenv") +var path = require("path") +var fs = require("fs") +var rimraf = require("rimraf") +var mkdirp = require("mkdirp") +var tmp = osenv.tmpdir() +var t1dir = path.resolve(tmp, "view-local-no-pkg") +var t2dir = path.resolve(tmp, "view-local-notmine") +var t3dir = path.resolve(tmp, "view-local-mine") +var mr = require("npm-registry-mock") + +test("setup", function (t) { + mkdirp.sync(t1dir) + mkdirp.sync(t2dir) + mkdirp.sync(t3dir) + + fs.writeFileSync(t2dir + "/package.json", JSON.stringify({ + author: "Evan Lucas" + , name: "test-repo-url-https" + , version: "0.0.1" + }), "utf8") + + fs.writeFileSync(t3dir + "/package.json", JSON.stringify({ + author: "Evan Lucas" + , name: "biscuits" + , version: "0.0.1" + }), "utf8") + + t.pass("created fixtures") + t.end() +}) + +test("npm view . in global mode", function (t) { + process.chdir(t1dir) + common.npm([ + "view" + , "." + , "--registry=" + common.registry + , "--global" + ], { cwd: t1dir }, function (err, code, stdout, stderr) { + t.ifError(err, "view command finished successfully") + t.equal(code, 1, "exit not ok") + t.similar(stderr, /Cannot use view command in global mode./m) + t.end() + }) +}) + +test("npm view --global", function(t) { + process.chdir(t1dir) + common.npm([ + "view" + , "--registry=" + common.registry + , "--global" + ], { cwd: t1dir }, function(err, code, stdout, stderr) { + t.ifError(err, "view command finished successfully") + t.equal(code, 1, "exit not ok") + t.similar(stderr, /Cannot use view command in global mode./m) + t.end() + }) +}) + +test("npm view . with no package.json", function(t) { + process.chdir(t1dir) + common.npm([ + "view" + , "." + , "--registry=" + common.registry + ], { cwd: t1dir }, function (err, code, stdout, stderr) { + t.ifError(err, "view command finished successfully") + t.equal(code, 1, "exit not ok") + t.similar(stderr, /Invalid package.json/m) + t.end() + }) +}) + +test("npm view . with no published package", function (t) { + process.chdir(t3dir) + mr(common.port, function (s) { + common.npm([ + "view" + , "." + , "--registry=" + common.registry + ], { cwd: t3dir }, function (err, code, stdout, stderr) { + t.ifError(err, "view command finished successfully") + t.equal(code, 1, "exit not ok") + t.similar(stderr, /version not found/m) + s.close() + t.end() + }) + }) +}) + +test("npm view .", function (t) { + process.chdir(t2dir) + mr(common.port, function (s) { + common.npm([ + "view" + , "." + , "--registry=" + common.registry + ], { cwd: t2dir }, function (err, code, stdout) { + t.ifError(err, "view command finished successfully") + t.equal(code, 0, "exit ok") + var re = new RegExp("name: 'test-repo-url-https'") + t.similar(stdout, re) + s.close() + t.end() + }) + }) +}) + +test("npm view . select fields", function (t) { + process.chdir(t2dir) + mr(common.port, function (s) { + common.npm([ + "view" + , "." + , "main" + , "--registry=" + common.registry + ], { cwd: t2dir }, function (err, code, stdout) { + t.ifError(err, "view command finished successfully") + t.equal(code, 0, "exit ok") + t.equal(stdout.trim(), "index.js", "should print `index.js`") + s.close() + t.end() + }) + }) +}) + +test("npm view .@<version>", function (t) { + process.chdir(t2dir) + mr(common.port, function (s) { + common.npm([ + "view" + , ".@0.0.0" + , "version" + , "--registry=" + common.registry + ], { cwd: t2dir }, function (err, code, stdout) { + t.ifError(err, "view command finished successfully") + t.equal(code, 0, "exit ok") + t.equal(stdout.trim(), "0.0.0", "should print `0.0.0`") + s.close() + t.end() + }) + }) +}) + +test("npm view .@<version> --json", function (t) { + process.chdir(t2dir) + mr(common.port, function (s) { + common.npm([ + "view" + , ".@0.0.0" + , "version" + , "--json" + , "--registry=" + common.registry + ], { cwd: t2dir }, function (err, code, stdout) { + t.ifError(err, "view command finished successfully") + t.equal(code, 0, "exit ok") + t.equal(stdout.trim(), "\"0.0.0\"", "should print `\"0.0.0\"`") + s.close() + t.end() + }) + }) +}) + +test("npm view <package name>", function (t) { + mr(common.port, function (s) { + common.npm([ + "view" + , "underscore" + , "--registry=" + common.registry + ], { cwd: t2dir }, function (err, code, stdout) { + t.ifError(err, "view command finished successfully") + t.equal(code, 0, "exit ok") + var re = new RegExp("name: 'underscore'") + t.similar(stdout, re, "should have name `underscore`") + s.close() + t.end() + }) + }) +}) + +test("npm view <package name> --global", function(t) { + mr(common.port, function(s) { + common.npm([ + "view" + , "underscore" + , "--global" + , "--registry=" + common.registry + ], { cwd: t2dir }, function(err, code, stdout) { + t.ifError(err, "view command finished successfully") + t.equal(code, 0, "exit ok") + var re = new RegExp("name: 'underscore'") + t.similar(stdout, re, "should have name `underscore`") + s.close() + t.end() + }) + }) +}) + +test("npm view <package name> --json", function(t) { + t.plan(3) + mr(common.port, function (s) { + common.npm([ + "view" + , "underscore" + , "--json" + , "--registry=" + common.registry + ], { cwd: t2dir }, function (err, code, stdout) { + t.ifError(err, "view command finished successfully") + t.equal(code, 0, "exit ok") + s.close() + try { + var out = JSON.parse(stdout.trim()) + t.similar(out, { + maintainers: "jashkenas <jashkenas@gmail.com>" + }, "should have the same maintainer") + } + catch (er) { + t.fail("Unable to parse JSON") + } + }) + }) +}) + +test("npm view <package name> <field>", function (t) { + mr(common.port, function (s) { + common.npm([ + "view" + , "underscore" + , "homepage" + , "--registry=" + common.registry + ], { cwd: t2dir }, function (err, code, stdout) { + t.ifError(err, "view command finished successfully") + t.equal(code, 0, "exit ok") + t.equal(stdout.trim(), "http://underscorejs.org", + "homepage should equal `http://underscorejs.org`") + s.close() + t.end() + }) + }) +}) + +test("cleanup", function (t) { + process.chdir(osenv.tmpdir()) + rimraf.sync(t1dir) + rimraf.sync(t2dir) + rimraf.sync(t3dir) + t.pass("cleaned up") + t.end() +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/whoami.js nodejs-0.11.15/deps/npm/test/tap/whoami.js --- nodejs-0.11.13/deps/npm/test/tap/whoami.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/whoami.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,77 @@ +var common = require("../common-tap.js") + +var fs = require("fs") +var path = require("path") +var createServer = require("http").createServer + +var test = require("tap").test +var rimraf = require("rimraf") + +var opts = { cwd: __dirname } + +var FIXTURE_PATH = path.resolve(__dirname, "fixture_npmrc") + +test("npm whoami with basic auth", function (t) { + var s = "//registry.lvh.me/:username = wombat\n" + + "//registry.lvh.me/:_password = YmFkIHBhc3N3b3Jk\n" + + "//registry.lvh.me/:email = lindsay@wdu.org.au\n" + fs.writeFileSync(FIXTURE_PATH, s, "ascii") + fs.chmodSync(FIXTURE_PATH, "0444") + + common.npm( + [ + "whoami", + "--userconfig=" + FIXTURE_PATH, + "--registry=http://registry.lvh.me/" + ], + opts, + function (err, code, stdout, stderr) { + t.ifError(err) + + t.equal(stderr, "", "got nothing on stderr") + t.equal(code, 0, "exit ok") + t.equal(stdout, "wombat\n", "got username") + rimraf.sync(FIXTURE_PATH) + t.end() + } + ) +}) + +test("npm whoami with bearer auth", {timeout : 2 * 1000}, function (t) { + var s = "//localhost:" + common.port + + "/:_authToken = wombat-developers-union\n" + fs.writeFileSync(FIXTURE_PATH, s, "ascii") + fs.chmodSync(FIXTURE_PATH, "0444") + + function verify(req, res) { + t.equal(req.method, "GET") + t.equal(req.url, "/whoami") + + res.setHeader("content-type", "application/json") + res.writeHeader(200) + res.end(JSON.stringify({username : "wombat"}), "utf8") + } + + var server = createServer(verify) + + server.listen(common.port, function () { + common.npm( + [ + "whoami", + "--userconfig=" + FIXTURE_PATH, + "--registry=http://localhost:" + common.port + "/" + ], + opts, + function (err, code, stdout, stderr) { + t.ifError(err) + + t.equal(stderr, "", "got nothing on stderr") + t.equal(code, 0, "exit ok") + t.equal(stdout, "wombat\n", "got username") + rimraf.sync(FIXTURE_PATH) + server.close() + t.end() + } + ) + }) +}) diff -Nru nodejs-0.11.13/deps/npm/test/tap/zz-cleanup.js nodejs-0.11.15/deps/npm/test/tap/zz-cleanup.js --- nodejs-0.11.13/deps/npm/test/tap/zz-cleanup.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/npm/test/tap/zz-cleanup.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,15 @@ +var common = require("../common-tap") +var test = require("tap").test +var fs = require("fs") + +test("cleanup", function (t) { + var res = common.deleteNpmCacheRecursivelySync() + t.equal(res, 0, "Deleted test npm cache successfully") + + // ensure cache is clean + fs.readdir(common.npm_config_cache, function (err) { + t.ok(err, "error expected") + t.equal(err.code, "ENOENT", "npm cache directory no longer exists") + t.end() + }) +}) diff -Nru nodejs-0.11.13/deps/npm/.travis.yml nodejs-0.11.15/deps/npm/.travis.yml --- nodejs-0.11.13/deps/npm/.travis.yml 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/npm/.travis.yml 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,11 @@ language: node_js -script: "npm run-script tap" node_js: - "0.11" - "0.10" +env: + - DEPLOY_VERSION=testing +before_install: + - "npm config set spin false" + - "npm install -g npm@^2" + - "sudo mkdir -p /var/run/couchdb" +script: "npm run-script tap" diff -Nru nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/aes/aes-armv4.S nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/aes/aes-armv4.S --- nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/aes/aes-armv4.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/aes/aes-armv4.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1071 @@ +#include "arm_arch.h" +.text +.code 32 + +.type AES_Te,%object +.align 5 +AES_Te: +.word 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d +.word 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554 +.word 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d +.word 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a +.word 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87 +.word 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b +.word 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea +.word 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b +.word 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a +.word 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f +.word 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108 +.word 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f +.word 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e +.word 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5 +.word 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d +.word 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f +.word 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e +.word 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb +.word 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce +.word 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497 +.word 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c +.word 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed +.word 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b +.word 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a +.word 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16 +.word 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594 +.word 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81 +.word 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3 +.word 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a +.word 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504 +.word 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163 +.word 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d +.word 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f +.word 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739 +.word 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47 +.word 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395 +.word 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f +.word 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883 +.word 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c +.word 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76 +.word 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e +.word 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4 +.word 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6 +.word 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b +.word 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7 +.word 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0 +.word 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25 +.word 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818 +.word 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72 +.word 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651 +.word 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21 +.word 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85 +.word 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa +.word 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12 +.word 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0 +.word 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9 +.word 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133 +.word 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7 +.word 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920 +.word 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a +.word 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17 +.word 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8 +.word 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11 +.word 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a +@ Te4[256] +.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 +.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 +.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0 +.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 +.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc +.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 +.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a +.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 +.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0 +.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 +.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b +.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf +.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85 +.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 +.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5 +.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 +.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17 +.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 +.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88 +.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb +.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c +.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 +.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9 +.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 +.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6 +.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a +.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e +.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e +.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94 +.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf +.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68 +.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 +@ rcon[] +.word 0x01000000, 0x02000000, 0x04000000, 0x08000000 +.word 0x10000000, 0x20000000, 0x40000000, 0x80000000 +.word 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0 +.size AES_Te,.-AES_Te + +@ void AES_encrypt(const unsigned char *in, unsigned char *out, +@ const AES_KEY *key) { +.global AES_encrypt +.type AES_encrypt,%function +.align 5 +AES_encrypt: + sub r3,pc,#8 @ AES_encrypt + stmdb sp!,{r1,r4-r12,lr} + mov r12,r0 @ inp + mov r11,r2 + sub r10,r3,#AES_encrypt-AES_Te @ Te +#if __ARM_ARCH__<7 + ldrb r0,[r12,#3] @ load input data in endian-neutral + ldrb r4,[r12,#2] @ manner... + ldrb r5,[r12,#1] + ldrb r6,[r12,#0] + orr r0,r0,r4,lsl#8 + ldrb r1,[r12,#7] + orr r0,r0,r5,lsl#16 + ldrb r4,[r12,#6] + orr r0,r0,r6,lsl#24 + ldrb r5,[r12,#5] + ldrb r6,[r12,#4] + orr r1,r1,r4,lsl#8 + ldrb r2,[r12,#11] + orr r1,r1,r5,lsl#16 + ldrb r4,[r12,#10] + orr r1,r1,r6,lsl#24 + ldrb r5,[r12,#9] + ldrb r6,[r12,#8] + orr r2,r2,r4,lsl#8 + ldrb r3,[r12,#15] + orr r2,r2,r5,lsl#16 + ldrb r4,[r12,#14] + orr r2,r2,r6,lsl#24 + ldrb r5,[r12,#13] + ldrb r6,[r12,#12] + orr r3,r3,r4,lsl#8 + orr r3,r3,r5,lsl#16 + orr r3,r3,r6,lsl#24 +#else + ldr r0,[r12,#0] + ldr r1,[r12,#4] + ldr r2,[r12,#8] + ldr r3,[r12,#12] +#ifdef __ARMEL__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +#endif +#endif + bl _armv4_AES_encrypt + + ldr r12,[sp],#4 @ pop out +#if __ARM_ARCH__>=7 +#ifdef __ARMEL__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +#endif + str r0,[r12,#0] + str r1,[r12,#4] + str r2,[r12,#8] + str r3,[r12,#12] +#else + mov r4,r0,lsr#24 @ write output in endian-neutral + mov r5,r0,lsr#16 @ manner... + mov r6,r0,lsr#8 + strb r4,[r12,#0] + strb r5,[r12,#1] + mov r4,r1,lsr#24 + strb r6,[r12,#2] + mov r5,r1,lsr#16 + strb r0,[r12,#3] + mov r6,r1,lsr#8 + strb r4,[r12,#4] + strb r5,[r12,#5] + mov r4,r2,lsr#24 + strb r6,[r12,#6] + mov r5,r2,lsr#16 + strb r1,[r12,#7] + mov r6,r2,lsr#8 + strb r4,[r12,#8] + strb r5,[r12,#9] + mov r4,r3,lsr#24 + strb r6,[r12,#10] + mov r5,r3,lsr#16 + strb r2,[r12,#11] + mov r6,r3,lsr#8 + strb r4,[r12,#12] + strb r5,[r12,#13] + strb r6,[r12,#14] + strb r3,[r12,#15] +#endif +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else + ldmia sp!,{r4-r12,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.size AES_encrypt,.-AES_encrypt + +.type _armv4_AES_encrypt,%function +.align 2 +_armv4_AES_encrypt: + str lr,[sp,#-4]! @ push lr + ldmia r11!,{r4-r7} + eor r0,r0,r4 + ldr r12,[r11,#240-16] + eor r1,r1,r5 + eor r2,r2,r6 + eor r3,r3,r7 + sub r12,r12,#1 + mov lr,#255 + + and r7,lr,r0 + and r8,lr,r0,lsr#8 + and r9,lr,r0,lsr#16 + mov r0,r0,lsr#24 +.Lenc_loop: + ldr r4,[r10,r7,lsl#2] @ Te3[s0>>0] + and r7,lr,r1,lsr#16 @ i0 + ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8] + and r8,lr,r1 + ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16] + and r9,lr,r1,lsr#8 + ldr r0,[r10,r0,lsl#2] @ Te0[s0>>24] + mov r1,r1,lsr#24 + + ldr r7,[r10,r7,lsl#2] @ Te1[s1>>16] + ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0] + ldr r9,[r10,r9,lsl#2] @ Te2[s1>>8] + eor r0,r0,r7,ror#8 + ldr r1,[r10,r1,lsl#2] @ Te0[s1>>24] + and r7,lr,r2,lsr#8 @ i0 + eor r5,r5,r8,ror#8 + and r8,lr,r2,lsr#16 @ i1 + eor r6,r6,r9,ror#8 + and r9,lr,r2 + ldr r7,[r10,r7,lsl#2] @ Te2[s2>>8] + eor r1,r1,r4,ror#24 + ldr r8,[r10,r8,lsl#2] @ Te1[s2>>16] + mov r2,r2,lsr#24 + + ldr r9,[r10,r9,lsl#2] @ Te3[s2>>0] + eor r0,r0,r7,ror#16 + ldr r2,[r10,r2,lsl#2] @ Te0[s2>>24] + and r7,lr,r3 @ i0 + eor r1,r1,r8,ror#8 + and r8,lr,r3,lsr#8 @ i1 + eor r6,r6,r9,ror#16 + and r9,lr,r3,lsr#16 @ i2 + ldr r7,[r10,r7,lsl#2] @ Te3[s3>>0] + eor r2,r2,r5,ror#16 + ldr r8,[r10,r8,lsl#2] @ Te2[s3>>8] + mov r3,r3,lsr#24 + + ldr r9,[r10,r9,lsl#2] @ Te1[s3>>16] + eor r0,r0,r7,ror#24 + ldr r7,[r11],#16 + eor r1,r1,r8,ror#16 + ldr r3,[r10,r3,lsl#2] @ Te0[s3>>24] + eor r2,r2,r9,ror#8 + ldr r4,[r11,#-12] + eor r3,r3,r6,ror#8 + + ldr r5,[r11,#-8] + eor r0,r0,r7 + ldr r6,[r11,#-4] + and r7,lr,r0 + eor r1,r1,r4 + and r8,lr,r0,lsr#8 + eor r2,r2,r5 + and r9,lr,r0,lsr#16 + eor r3,r3,r6 + mov r0,r0,lsr#24 + + subs r12,r12,#1 + bne .Lenc_loop + + add r10,r10,#2 + + ldrb r4,[r10,r7,lsl#2] @ Te4[s0>>0] + and r7,lr,r1,lsr#16 @ i0 + ldrb r5,[r10,r8,lsl#2] @ Te4[s0>>8] + and r8,lr,r1 + ldrb r6,[r10,r9,lsl#2] @ Te4[s0>>16] + and r9,lr,r1,lsr#8 + ldrb r0,[r10,r0,lsl#2] @ Te4[s0>>24] + mov r1,r1,lsr#24 + + ldrb r7,[r10,r7,lsl#2] @ Te4[s1>>16] + ldrb r8,[r10,r8,lsl#2] @ Te4[s1>>0] + ldrb r9,[r10,r9,lsl#2] @ Te4[s1>>8] + eor r0,r7,r0,lsl#8 + ldrb r1,[r10,r1,lsl#2] @ Te4[s1>>24] + and r7,lr,r2,lsr#8 @ i0 + eor r5,r8,r5,lsl#8 + and r8,lr,r2,lsr#16 @ i1 + eor r6,r9,r6,lsl#8 + and r9,lr,r2 + ldrb r7,[r10,r7,lsl#2] @ Te4[s2>>8] + eor r1,r4,r1,lsl#24 + ldrb r8,[r10,r8,lsl#2] @ Te4[s2>>16] + mov r2,r2,lsr#24 + + ldrb r9,[r10,r9,lsl#2] @ Te4[s2>>0] + eor r0,r7,r0,lsl#8 + ldrb r2,[r10,r2,lsl#2] @ Te4[s2>>24] + and r7,lr,r3 @ i0 + eor r1,r1,r8,lsl#16 + and r8,lr,r3,lsr#8 @ i1 + eor r6,r9,r6,lsl#8 + and r9,lr,r3,lsr#16 @ i2 + ldrb r7,[r10,r7,lsl#2] @ Te4[s3>>0] + eor r2,r5,r2,lsl#24 + ldrb r8,[r10,r8,lsl#2] @ Te4[s3>>8] + mov r3,r3,lsr#24 + + ldrb r9,[r10,r9,lsl#2] @ Te4[s3>>16] + eor r0,r7,r0,lsl#8 + ldr r7,[r11,#0] + ldrb r3,[r10,r3,lsl#2] @ Te4[s3>>24] + eor r1,r1,r8,lsl#8 + ldr r4,[r11,#4] + eor r2,r2,r9,lsl#16 + ldr r5,[r11,#8] + eor r3,r6,r3,lsl#24 + ldr r6,[r11,#12] + + eor r0,r0,r7 + eor r1,r1,r4 + eor r2,r2,r5 + eor r3,r3,r6 + + sub r10,r10,#2 + ldr pc,[sp],#4 @ pop and return +.size _armv4_AES_encrypt,.-_armv4_AES_encrypt + +.global private_AES_set_encrypt_key +.type private_AES_set_encrypt_key,%function +.align 5 +private_AES_set_encrypt_key: +_armv4_AES_set_encrypt_key: + sub r3,pc,#8 @ AES_set_encrypt_key + teq r0,#0 + moveq r0,#-1 + beq .Labrt + teq r2,#0 + moveq r0,#-1 + beq .Labrt + + teq r1,#128 + beq .Lok + teq r1,#192 + beq .Lok + teq r1,#256 + movne r0,#-1 + bne .Labrt + +.Lok: stmdb sp!,{r4-r12,lr} + sub r10,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024 @ Te4 + + mov r12,r0 @ inp + mov lr,r1 @ bits + mov r11,r2 @ key + +#if __ARM_ARCH__<7 + ldrb r0,[r12,#3] @ load input data in endian-neutral + ldrb r4,[r12,#2] @ manner... + ldrb r5,[r12,#1] + ldrb r6,[r12,#0] + orr r0,r0,r4,lsl#8 + ldrb r1,[r12,#7] + orr r0,r0,r5,lsl#16 + ldrb r4,[r12,#6] + orr r0,r0,r6,lsl#24 + ldrb r5,[r12,#5] + ldrb r6,[r12,#4] + orr r1,r1,r4,lsl#8 + ldrb r2,[r12,#11] + orr r1,r1,r5,lsl#16 + ldrb r4,[r12,#10] + orr r1,r1,r6,lsl#24 + ldrb r5,[r12,#9] + ldrb r6,[r12,#8] + orr r2,r2,r4,lsl#8 + ldrb r3,[r12,#15] + orr r2,r2,r5,lsl#16 + ldrb r4,[r12,#14] + orr r2,r2,r6,lsl#24 + ldrb r5,[r12,#13] + ldrb r6,[r12,#12] + orr r3,r3,r4,lsl#8 + str r0,[r11],#16 + orr r3,r3,r5,lsl#16 + str r1,[r11,#-12] + orr r3,r3,r6,lsl#24 + str r2,[r11,#-8] + str r3,[r11,#-4] +#else + ldr r0,[r12,#0] + ldr r1,[r12,#4] + ldr r2,[r12,#8] + ldr r3,[r12,#12] +#ifdef __ARMEL__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +#endif + str r0,[r11],#16 + str r1,[r11,#-12] + str r2,[r11,#-8] + str r3,[r11,#-4] +#endif + + teq lr,#128 + bne .Lnot128 + mov r12,#10 + str r12,[r11,#240-16] + add r6,r10,#256 @ rcon + mov lr,#255 + +.L128_loop: + and r5,lr,r3,lsr#24 + and r7,lr,r3,lsr#16 + ldrb r5,[r10,r5] + and r8,lr,r3,lsr#8 + ldrb r7,[r10,r7] + and r9,lr,r3 + ldrb r8,[r10,r8] + orr r5,r5,r7,lsl#24 + ldrb r9,[r10,r9] + orr r5,r5,r8,lsl#16 + ldr r4,[r6],#4 @ rcon[i++] + orr r5,r5,r9,lsl#8 + eor r5,r5,r4 + eor r0,r0,r5 @ rk[4]=rk[0]^... + eor r1,r1,r0 @ rk[5]=rk[1]^rk[4] + str r0,[r11],#16 + eor r2,r2,r1 @ rk[6]=rk[2]^rk[5] + str r1,[r11,#-12] + eor r3,r3,r2 @ rk[7]=rk[3]^rk[6] + str r2,[r11,#-8] + subs r12,r12,#1 + str r3,[r11,#-4] + bne .L128_loop + sub r2,r11,#176 + b .Ldone + +.Lnot128: +#if __ARM_ARCH__<7 + ldrb r8,[r12,#19] + ldrb r4,[r12,#18] + ldrb r5,[r12,#17] + ldrb r6,[r12,#16] + orr r8,r8,r4,lsl#8 + ldrb r9,[r12,#23] + orr r8,r8,r5,lsl#16 + ldrb r4,[r12,#22] + orr r8,r8,r6,lsl#24 + ldrb r5,[r12,#21] + ldrb r6,[r12,#20] + orr r9,r9,r4,lsl#8 + orr r9,r9,r5,lsl#16 + str r8,[r11],#8 + orr r9,r9,r6,lsl#24 + str r9,[r11,#-4] +#else + ldr r8,[r12,#16] + ldr r9,[r12,#20] +#ifdef __ARMEL__ + rev r8,r8 + rev r9,r9 +#endif + str r8,[r11],#8 + str r9,[r11,#-4] +#endif + + teq lr,#192 + bne .Lnot192 + mov r12,#12 + str r12,[r11,#240-24] + add r6,r10,#256 @ rcon + mov lr,#255 + mov r12,#8 + +.L192_loop: + and r5,lr,r9,lsr#24 + and r7,lr,r9,lsr#16 + ldrb r5,[r10,r5] + and r8,lr,r9,lsr#8 + ldrb r7,[r10,r7] + and r9,lr,r9 + ldrb r8,[r10,r8] + orr r5,r5,r7,lsl#24 + ldrb r9,[r10,r9] + orr r5,r5,r8,lsl#16 + ldr r4,[r6],#4 @ rcon[i++] + orr r5,r5,r9,lsl#8 + eor r9,r5,r4 + eor r0,r0,r9 @ rk[6]=rk[0]^... + eor r1,r1,r0 @ rk[7]=rk[1]^rk[6] + str r0,[r11],#24 + eor r2,r2,r1 @ rk[8]=rk[2]^rk[7] + str r1,[r11,#-20] + eor r3,r3,r2 @ rk[9]=rk[3]^rk[8] + str r2,[r11,#-16] + subs r12,r12,#1 + str r3,[r11,#-12] + subeq r2,r11,#216 + beq .Ldone + + ldr r7,[r11,#-32] + ldr r8,[r11,#-28] + eor r7,r7,r3 @ rk[10]=rk[4]^rk[9] + eor r9,r8,r7 @ rk[11]=rk[5]^rk[10] + str r7,[r11,#-8] + str r9,[r11,#-4] + b .L192_loop + +.Lnot192: +#if __ARM_ARCH__<7 + ldrb r8,[r12,#27] + ldrb r4,[r12,#26] + ldrb r5,[r12,#25] + ldrb r6,[r12,#24] + orr r8,r8,r4,lsl#8 + ldrb r9,[r12,#31] + orr r8,r8,r5,lsl#16 + ldrb r4,[r12,#30] + orr r8,r8,r6,lsl#24 + ldrb r5,[r12,#29] + ldrb r6,[r12,#28] + orr r9,r9,r4,lsl#8 + orr r9,r9,r5,lsl#16 + str r8,[r11],#8 + orr r9,r9,r6,lsl#24 + str r9,[r11,#-4] +#else + ldr r8,[r12,#24] + ldr r9,[r12,#28] +#ifdef __ARMEL__ + rev r8,r8 + rev r9,r9 +#endif + str r8,[r11],#8 + str r9,[r11,#-4] +#endif + + mov r12,#14 + str r12,[r11,#240-32] + add r6,r10,#256 @ rcon + mov lr,#255 + mov r12,#7 + +.L256_loop: + and r5,lr,r9,lsr#24 + and r7,lr,r9,lsr#16 + ldrb r5,[r10,r5] + and r8,lr,r9,lsr#8 + ldrb r7,[r10,r7] + and r9,lr,r9 + ldrb r8,[r10,r8] + orr r5,r5,r7,lsl#24 + ldrb r9,[r10,r9] + orr r5,r5,r8,lsl#16 + ldr r4,[r6],#4 @ rcon[i++] + orr r5,r5,r9,lsl#8 + eor r9,r5,r4 + eor r0,r0,r9 @ rk[8]=rk[0]^... + eor r1,r1,r0 @ rk[9]=rk[1]^rk[8] + str r0,[r11],#32 + eor r2,r2,r1 @ rk[10]=rk[2]^rk[9] + str r1,[r11,#-28] + eor r3,r3,r2 @ rk[11]=rk[3]^rk[10] + str r2,[r11,#-24] + subs r12,r12,#1 + str r3,[r11,#-20] + subeq r2,r11,#256 + beq .Ldone + + and r5,lr,r3 + and r7,lr,r3,lsr#8 + ldrb r5,[r10,r5] + and r8,lr,r3,lsr#16 + ldrb r7,[r10,r7] + and r9,lr,r3,lsr#24 + ldrb r8,[r10,r8] + orr r5,r5,r7,lsl#8 + ldrb r9,[r10,r9] + orr r5,r5,r8,lsl#16 + ldr r4,[r11,#-48] + orr r5,r5,r9,lsl#24 + + ldr r7,[r11,#-44] + ldr r8,[r11,#-40] + eor r4,r4,r5 @ rk[12]=rk[4]^... + ldr r9,[r11,#-36] + eor r7,r7,r4 @ rk[13]=rk[5]^rk[12] + str r4,[r11,#-16] + eor r8,r8,r7 @ rk[14]=rk[6]^rk[13] + str r7,[r11,#-12] + eor r9,r9,r8 @ rk[15]=rk[7]^rk[14] + str r8,[r11,#-8] + str r9,[r11,#-4] + b .L256_loop + +.Ldone: mov r0,#0 + ldmia sp!,{r4-r12,lr} +.Labrt: tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key + +.global private_AES_set_decrypt_key +.type private_AES_set_decrypt_key,%function +.align 5 +private_AES_set_decrypt_key: + str lr,[sp,#-4]! @ push lr + bl _armv4_AES_set_encrypt_key + teq r0,#0 + ldrne lr,[sp],#4 @ pop lr + bne .Labrt + + stmdb sp!,{r4-r12} + + ldr r12,[r2,#240] @ AES_set_encrypt_key preserves r2, + mov r11,r2 @ which is AES_KEY *key + mov r7,r2 + add r8,r2,r12,lsl#4 + +.Linv: ldr r0,[r7] + ldr r1,[r7,#4] + ldr r2,[r7,#8] + ldr r3,[r7,#12] + ldr r4,[r8] + ldr r5,[r8,#4] + ldr r6,[r8,#8] + ldr r9,[r8,#12] + str r0,[r8],#-16 + str r1,[r8,#16+4] + str r2,[r8,#16+8] + str r3,[r8,#16+12] + str r4,[r7],#16 + str r5,[r7,#-12] + str r6,[r7,#-8] + str r9,[r7,#-4] + teq r7,r8 + bne .Linv + ldr r0,[r11,#16]! @ prefetch tp1 + mov r7,#0x80 + mov r8,#0x1b + orr r7,r7,#0x8000 + orr r8,r8,#0x1b00 + orr r7,r7,r7,lsl#16 + orr r8,r8,r8,lsl#16 + sub r12,r12,#1 + mvn r9,r7 + mov r12,r12,lsl#2 @ (rounds-1)*4 + +.Lmix: and r4,r0,r7 + and r1,r0,r9 + sub r4,r4,r4,lsr#7 + and r4,r4,r8 + eor r1,r4,r1,lsl#1 @ tp2 + + and r4,r1,r7 + and r2,r1,r9 + sub r4,r4,r4,lsr#7 + and r4,r4,r8 + eor r2,r4,r2,lsl#1 @ tp4 + + and r4,r2,r7 + and r3,r2,r9 + sub r4,r4,r4,lsr#7 + and r4,r4,r8 + eor r3,r4,r3,lsl#1 @ tp8 + + eor r4,r1,r2 + eor r5,r0,r3 @ tp9 + eor r4,r4,r3 @ tpe + eor r4,r4,r1,ror#24 + eor r4,r4,r5,ror#24 @ ^= ROTATE(tpb=tp9^tp2,8) + eor r4,r4,r2,ror#16 + eor r4,r4,r5,ror#16 @ ^= ROTATE(tpd=tp9^tp4,16) + eor r4,r4,r5,ror#8 @ ^= ROTATE(tp9,24) + + ldr r0,[r11,#4] @ prefetch tp1 + str r4,[r11],#4 + subs r12,r12,#1 + bne .Lmix + + mov r0,#0 +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else + ldmia sp!,{r4-r12,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key + +.type AES_Td,%object +.align 5 +AES_Td: +.word 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96 +.word 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393 +.word 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25 +.word 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f +.word 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1 +.word 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6 +.word 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da +.word 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844 +.word 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd +.word 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4 +.word 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45 +.word 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94 +.word 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7 +.word 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a +.word 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5 +.word 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c +.word 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1 +.word 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a +.word 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75 +.word 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051 +.word 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46 +.word 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff +.word 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77 +.word 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb +.word 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000 +.word 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e +.word 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927 +.word 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a +.word 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e +.word 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16 +.word 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d +.word 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8 +.word 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd +.word 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34 +.word 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163 +.word 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120 +.word 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d +.word 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0 +.word 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422 +.word 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef +.word 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36 +.word 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4 +.word 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662 +.word 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5 +.word 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3 +.word 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b +.word 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8 +.word 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6 +.word 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6 +.word 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0 +.word 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815 +.word 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f +.word 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df +.word 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f +.word 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e +.word 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713 +.word 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89 +.word 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c +.word 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf +.word 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86 +.word 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f +.word 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541 +.word 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190 +.word 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742 +@ Td4[256] +.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 +.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb +.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87 +.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb +.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d +.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e +.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2 +.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 +.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16 +.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92 +.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda +.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 +.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a +.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06 +.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02 +.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b +.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea +.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73 +.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85 +.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e +.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89 +.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b +.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20 +.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4 +.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31 +.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f +.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d +.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef +.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0 +.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 +.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 +.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d +.size AES_Td,.-AES_Td + +@ void AES_decrypt(const unsigned char *in, unsigned char *out, +@ const AES_KEY *key) { +.global AES_decrypt +.type AES_decrypt,%function +.align 5 +AES_decrypt: + sub r3,pc,#8 @ AES_decrypt + stmdb sp!,{r1,r4-r12,lr} + mov r12,r0 @ inp + mov r11,r2 + sub r10,r3,#AES_decrypt-AES_Td @ Td +#if __ARM_ARCH__<7 + ldrb r0,[r12,#3] @ load input data in endian-neutral + ldrb r4,[r12,#2] @ manner... + ldrb r5,[r12,#1] + ldrb r6,[r12,#0] + orr r0,r0,r4,lsl#8 + ldrb r1,[r12,#7] + orr r0,r0,r5,lsl#16 + ldrb r4,[r12,#6] + orr r0,r0,r6,lsl#24 + ldrb r5,[r12,#5] + ldrb r6,[r12,#4] + orr r1,r1,r4,lsl#8 + ldrb r2,[r12,#11] + orr r1,r1,r5,lsl#16 + ldrb r4,[r12,#10] + orr r1,r1,r6,lsl#24 + ldrb r5,[r12,#9] + ldrb r6,[r12,#8] + orr r2,r2,r4,lsl#8 + ldrb r3,[r12,#15] + orr r2,r2,r5,lsl#16 + ldrb r4,[r12,#14] + orr r2,r2,r6,lsl#24 + ldrb r5,[r12,#13] + ldrb r6,[r12,#12] + orr r3,r3,r4,lsl#8 + orr r3,r3,r5,lsl#16 + orr r3,r3,r6,lsl#24 +#else + ldr r0,[r12,#0] + ldr r1,[r12,#4] + ldr r2,[r12,#8] + ldr r3,[r12,#12] +#ifdef __ARMEL__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +#endif +#endif + bl _armv4_AES_decrypt + + ldr r12,[sp],#4 @ pop out +#if __ARM_ARCH__>=7 +#ifdef __ARMEL__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +#endif + str r0,[r12,#0] + str r1,[r12,#4] + str r2,[r12,#8] + str r3,[r12,#12] +#else + mov r4,r0,lsr#24 @ write output in endian-neutral + mov r5,r0,lsr#16 @ manner... + mov r6,r0,lsr#8 + strb r4,[r12,#0] + strb r5,[r12,#1] + mov r4,r1,lsr#24 + strb r6,[r12,#2] + mov r5,r1,lsr#16 + strb r0,[r12,#3] + mov r6,r1,lsr#8 + strb r4,[r12,#4] + strb r5,[r12,#5] + mov r4,r2,lsr#24 + strb r6,[r12,#6] + mov r5,r2,lsr#16 + strb r1,[r12,#7] + mov r6,r2,lsr#8 + strb r4,[r12,#8] + strb r5,[r12,#9] + mov r4,r3,lsr#24 + strb r6,[r12,#10] + mov r5,r3,lsr#16 + strb r2,[r12,#11] + mov r6,r3,lsr#8 + strb r4,[r12,#12] + strb r5,[r12,#13] + strb r6,[r12,#14] + strb r3,[r12,#15] +#endif +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else + ldmia sp!,{r4-r12,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.size AES_decrypt,.-AES_decrypt + +.type _armv4_AES_decrypt,%function +.align 2 +_armv4_AES_decrypt: + str lr,[sp,#-4]! @ push lr + ldmia r11!,{r4-r7} + eor r0,r0,r4 + ldr r12,[r11,#240-16] + eor r1,r1,r5 + eor r2,r2,r6 + eor r3,r3,r7 + sub r12,r12,#1 + mov lr,#255 + + and r7,lr,r0,lsr#16 + and r8,lr,r0,lsr#8 + and r9,lr,r0 + mov r0,r0,lsr#24 +.Ldec_loop: + ldr r4,[r10,r7,lsl#2] @ Td1[s0>>16] + and r7,lr,r1 @ i0 + ldr r5,[r10,r8,lsl#2] @ Td2[s0>>8] + and r8,lr,r1,lsr#16 + ldr r6,[r10,r9,lsl#2] @ Td3[s0>>0] + and r9,lr,r1,lsr#8 + ldr r0,[r10,r0,lsl#2] @ Td0[s0>>24] + mov r1,r1,lsr#24 + + ldr r7,[r10,r7,lsl#2] @ Td3[s1>>0] + ldr r8,[r10,r8,lsl#2] @ Td1[s1>>16] + ldr r9,[r10,r9,lsl#2] @ Td2[s1>>8] + eor r0,r0,r7,ror#24 + ldr r1,[r10,r1,lsl#2] @ Td0[s1>>24] + and r7,lr,r2,lsr#8 @ i0 + eor r5,r8,r5,ror#8 + and r8,lr,r2 @ i1 + eor r6,r9,r6,ror#8 + and r9,lr,r2,lsr#16 + ldr r7,[r10,r7,lsl#2] @ Td2[s2>>8] + eor r1,r1,r4,ror#8 + ldr r8,[r10,r8,lsl#2] @ Td3[s2>>0] + mov r2,r2,lsr#24 + + ldr r9,[r10,r9,lsl#2] @ Td1[s2>>16] + eor r0,r0,r7,ror#16 + ldr r2,[r10,r2,lsl#2] @ Td0[s2>>24] + and r7,lr,r3,lsr#16 @ i0 + eor r1,r1,r8,ror#24 + and r8,lr,r3,lsr#8 @ i1 + eor r6,r9,r6,ror#8 + and r9,lr,r3 @ i2 + ldr r7,[r10,r7,lsl#2] @ Td1[s3>>16] + eor r2,r2,r5,ror#8 + ldr r8,[r10,r8,lsl#2] @ Td2[s3>>8] + mov r3,r3,lsr#24 + + ldr r9,[r10,r9,lsl#2] @ Td3[s3>>0] + eor r0,r0,r7,ror#8 + ldr r7,[r11],#16 + eor r1,r1,r8,ror#16 + ldr r3,[r10,r3,lsl#2] @ Td0[s3>>24] + eor r2,r2,r9,ror#24 + + ldr r4,[r11,#-12] + eor r0,r0,r7 + ldr r5,[r11,#-8] + eor r3,r3,r6,ror#8 + ldr r6,[r11,#-4] + and r7,lr,r0,lsr#16 + eor r1,r1,r4 + and r8,lr,r0,lsr#8 + eor r2,r2,r5 + and r9,lr,r0 + eor r3,r3,r6 + mov r0,r0,lsr#24 + + subs r12,r12,#1 + bne .Ldec_loop + + add r10,r10,#1024 + + ldr r5,[r10,#0] @ prefetch Td4 + ldr r6,[r10,#32] + ldr r4,[r10,#64] + ldr r5,[r10,#96] + ldr r6,[r10,#128] + ldr r4,[r10,#160] + ldr r5,[r10,#192] + ldr r6,[r10,#224] + + ldrb r0,[r10,r0] @ Td4[s0>>24] + ldrb r4,[r10,r7] @ Td4[s0>>16] + and r7,lr,r1 @ i0 + ldrb r5,[r10,r8] @ Td4[s0>>8] + and r8,lr,r1,lsr#16 + ldrb r6,[r10,r9] @ Td4[s0>>0] + and r9,lr,r1,lsr#8 + + ldrb r7,[r10,r7] @ Td4[s1>>0] + ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24] + ldrb r8,[r10,r8] @ Td4[s1>>16] + eor r0,r7,r0,lsl#24 + ldrb r9,[r10,r9] @ Td4[s1>>8] + eor r1,r4,r1,lsl#8 + and r7,lr,r2,lsr#8 @ i0 + eor r5,r5,r8,lsl#8 + and r8,lr,r2 @ i1 + ldrb r7,[r10,r7] @ Td4[s2>>8] + eor r6,r6,r9,lsl#8 + ldrb r8,[r10,r8] @ Td4[s2>>0] + and r9,lr,r2,lsr#16 + + ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24] + eor r0,r0,r7,lsl#8 + ldrb r9,[r10,r9] @ Td4[s2>>16] + eor r1,r8,r1,lsl#16 + and r7,lr,r3,lsr#16 @ i0 + eor r2,r5,r2,lsl#16 + and r8,lr,r3,lsr#8 @ i1 + ldrb r7,[r10,r7] @ Td4[s3>>16] + eor r6,r6,r9,lsl#16 + ldrb r8,[r10,r8] @ Td4[s3>>8] + and r9,lr,r3 @ i2 + + ldrb r9,[r10,r9] @ Td4[s3>>0] + ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24] + eor r0,r0,r7,lsl#16 + ldr r7,[r11,#0] + eor r1,r1,r8,lsl#8 + ldr r4,[r11,#4] + eor r2,r9,r2,lsl#8 + ldr r5,[r11,#8] + eor r3,r6,r3,lsl#24 + ldr r6,[r11,#12] + + eor r0,r0,r7 + eor r1,r1,r4 + eor r2,r2,r5 + eor r3,r3,r6 + + sub r10,r10,#1024 + ldr pc,[sp],#4 @ pop and return +.size _armv4_AES_decrypt,.-_armv4_AES_decrypt +.asciz "AES for ARMv4, CRYPTOGAMS by <appro@openssl.org>" +.align 2 diff -Nru nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/bn/armv4-gf2m.S nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/bn/armv4-gf2m.S --- nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/bn/armv4-gf2m.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/bn/armv4-gf2m.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,214 @@ +#include "arm_arch.h" + +.text +.code 32 + +#if __ARM_ARCH__>=7 +.fpu neon + +.type mul_1x1_neon,%function +.align 5 +mul_1x1_neon: + vshl.u64 d2,d16,#8 @ q1-q3 are slided + + vmull.p8 q0,d16,d17 @ abb + vshl.u64 d4,d16,#16 + vmull.p8 q1,d2,d17 @ a<<8bb + vshl.u64 d6,d16,#24 + vmull.p8 q2,d4,d17 @ a<<16bb + vshr.u64 d2,#8 + vmull.p8 q3,d6,d17 @ a<<24bb + vshl.u64 d3,#24 + veor d0,d2 + vshr.u64 d4,#16 + veor d0,d3 + vshl.u64 d5,#16 + veor d0,d4 + vshr.u64 d6,#24 + veor d0,d5 + vshl.u64 d7,#8 + veor d0,d6 + veor d0,d7 + .word 0xe12fff1e +.size mul_1x1_neon,.-mul_1x1_neon +#endif +.type mul_1x1_ialu,%function +.align 5 +mul_1x1_ialu: + mov r4,#0 + bic r5,r1,#3<<30 @ a1=a&0x3fffffff + str r4,[sp,#0] @ tab[0]=0 + add r6,r5,r5 @ a2=a1<<1 + str r5,[sp,#4] @ tab[1]=a1 + eor r7,r5,r6 @ a1^a2 + str r6,[sp,#8] @ tab[2]=a2 + mov r8,r5,lsl#2 @ a4=a1<<2 + str r7,[sp,#12] @ tab[3]=a1^a2 + eor r9,r5,r8 @ a1^a4 + str r8,[sp,#16] @ tab[4]=a4 + eor r4,r6,r8 @ a2^a4 + str r9,[sp,#20] @ tab[5]=a1^a4 + eor r7,r7,r8 @ a1^a2^a4 + str r4,[sp,#24] @ tab[6]=a2^a4 + and r8,r12,r0,lsl#2 + str r7,[sp,#28] @ tab[7]=a1^a2^a4 + + and r9,r12,r0,lsr#1 + ldr r5,[sp,r8] @ tab[b & 0x7] + and r8,r12,r0,lsr#4 + ldr r7,[sp,r9] @ tab[b >> 3 & 0x7] + and r9,r12,r0,lsr#7 + ldr r6,[sp,r8] @ tab[b >> 6 & 0x7] + eor r5,r5,r7,lsl#3 @ stall + mov r4,r7,lsr#29 + ldr r7,[sp,r9] @ tab[b >> 9 & 0x7] + + and r8,r12,r0,lsr#10 + eor r5,r5,r6,lsl#6 + eor r4,r4,r6,lsr#26 + ldr r6,[sp,r8] @ tab[b >> 12 & 0x7] + + and r9,r12,r0,lsr#13 + eor r5,r5,r7,lsl#9 + eor r4,r4,r7,lsr#23 + ldr r7,[sp,r9] @ tab[b >> 15 & 0x7] + + and r8,r12,r0,lsr#16 + eor r5,r5,r6,lsl#12 + eor r4,r4,r6,lsr#20 + ldr r6,[sp,r8] @ tab[b >> 18 & 0x7] + + and r9,r12,r0,lsr#19 + eor r5,r5,r7,lsl#15 + eor r4,r4,r7,lsr#17 + ldr r7,[sp,r9] @ tab[b >> 21 & 0x7] + + and r8,r12,r0,lsr#22 + eor r5,r5,r6,lsl#18 + eor r4,r4,r6,lsr#14 + ldr r6,[sp,r8] @ tab[b >> 24 & 0x7] + + and r9,r12,r0,lsr#25 + eor r5,r5,r7,lsl#21 + eor r4,r4,r7,lsr#11 + ldr r7,[sp,r9] @ tab[b >> 27 & 0x7] + + tst r1,#1<<30 + and r8,r12,r0,lsr#28 + eor r5,r5,r6,lsl#24 + eor r4,r4,r6,lsr#8 + ldr r6,[sp,r8] @ tab[b >> 30 ] + + eorne r5,r5,r0,lsl#30 + eorne r4,r4,r0,lsr#2 + tst r1,#1<<31 + eor r5,r5,r7,lsl#27 + eor r4,r4,r7,lsr#5 + eorne r5,r5,r0,lsl#31 + eorne r4,r4,r0,lsr#1 + eor r5,r5,r6,lsl#30 + eor r4,r4,r6,lsr#2 + + mov pc,lr +.size mul_1x1_ialu,.-mul_1x1_ialu +.global bn_GF2m_mul_2x2 +.type bn_GF2m_mul_2x2,%function +.align 5 +bn_GF2m_mul_2x2: +#if __ARM_ARCH__>=7 + ldr r12,.LOPENSSL_armcap +.Lpic: ldr r12,[pc,r12] + tst r12,#1 + beq .Lialu + + veor d18,d18 + vmov.32 d19,r3,r3 @ two copies of b1 + vmov.32 d18[0],r1 @ a1 + + veor d20,d20 + vld1.32 d21[],[sp,:32] @ two copies of b0 + vmov.32 d20[0],r2 @ a0 + mov r12,lr + + vmov d16,d18 + vmov d17,d19 + bl mul_1x1_neon @ a1b1 + vmov d22,d0 + + vmov d16,d20 + vmov d17,d21 + bl mul_1x1_neon @ a0b0 + vmov d23,d0 + + veor d16,d20,d18 + veor d17,d21,d19 + veor d20,d23,d22 + bl mul_1x1_neon @ (a0+a1)(b0+b1) + + veor d0,d20 @ (a0+a1)(b0+b1)-a0b0-a1b1 + vshl.u64 d1,d0,#32 + vshr.u64 d0,d0,#32 + veor d23,d1 + veor d22,d0 + vst1.32 {d23[0]},[r0,:32]! + vst1.32 {d23[1]},[r0,:32]! + vst1.32 {d22[0]},[r0,:32]! + vst1.32 {d22[1]},[r0,:32] + bx r12 +.align 4 +.Lialu: +#endif + stmdb sp!,{r4-r10,lr} + mov r10,r0 @ reassign 1st argument + mov r0,r3 @ r0=b1 + ldr r3,[sp,#32] @ load b0 + mov r12,#7<<2 + sub sp,sp,#32 @ allocate tab[8] + + bl mul_1x1_ialu @ a1b1 + str r5,[r10,#8] + str r4,[r10,#12] + + eor r0,r0,r3 @ flip b0 and b1 + eor r1,r1,r2 @ flip a0 and a1 + eor r3,r3,r0 + eor r2,r2,r1 + eor r0,r0,r3 + eor r1,r1,r2 + bl mul_1x1_ialu @ a0b0 + str r5,[r10] + str r4,[r10,#4] + + eor r1,r1,r2 + eor r0,r0,r3 + bl mul_1x1_ialu @ (a1+a0)(b1+b0) + ldmia r10,{r6-r9} + eor r5,r5,r4 + eor r4,r4,r7 + eor r5,r5,r6 + eor r4,r4,r8 + eor r5,r5,r9 + eor r4,r4,r9 + str r4,[r10,#8] + eor r5,r5,r4 + add sp,sp,#32 @ destroy tab[8] + str r5,[r10,#4] + +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r10,pc} +#else + ldmia sp!,{r4-r10,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2 +#if __ARM_ARCH__>=7 +.align 5 +.LOPENSSL_armcap: +.word OPENSSL_armcap_P-(.Lpic+8) +#endif +.asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>" +.align 5 + +.comm OPENSSL_armcap_P,4,4 diff -Nru nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/bn/armv4-mont.S nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/bn/armv4-mont.S --- nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/bn/armv4-mont.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/bn/armv4-mont.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,147 @@ +.text + +.global bn_mul_mont +.type bn_mul_mont,%function + +.align 2 +bn_mul_mont: + stmdb sp!,{r0,r2} @ sp points at argument block + ldr r0,[sp,#3*4] @ load num + cmp r0,#2 + movlt r0,#0 + addlt sp,sp,#2*4 + blt .Labrt + + stmdb sp!,{r4-r12,lr} @ save 10 registers + + mov r0,r0,lsl#2 @ rescale r0 for byte count + sub sp,sp,r0 @ alloca(4*num) + sub sp,sp,#4 @ +extra dword + sub r0,r0,#4 @ "num=num-1" + add r4,r2,r0 @ &bp[num-1] + + add r0,sp,r0 @ r0 to point at &tp[num-1] + ldr r8,[r0,#14*4] @ &n0 + ldr r2,[r2] @ bp[0] + ldr r5,[r1],#4 @ ap[0],ap++ + ldr r6,[r3],#4 @ np[0],np++ + ldr r8,[r8] @ *n0 + str r4,[r0,#15*4] @ save &bp[num] + + umull r10,r11,r5,r2 @ ap[0]*bp[0] + str r8,[r0,#14*4] @ save n0 value + mul r8,r10,r8 @ "tp[0]"*n0 + mov r12,#0 + umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" + mov r4,sp + +.L1st: + ldr r5,[r1],#4 @ ap[j],ap++ + mov r10,r11 + ldr r6,[r3],#4 @ np[j],np++ + mov r11,#0 + umlal r10,r11,r5,r2 @ ap[j]*bp[0] + mov r14,#0 + umlal r12,r14,r6,r8 @ np[j]*n0 + adds r12,r12,r10 + str r12,[r4],#4 @ tp[j-1]=,tp++ + adc r12,r14,#0 + cmp r4,r0 + bne .L1st + + adds r12,r12,r11 + ldr r4,[r0,#13*4] @ restore bp + mov r14,#0 + ldr r8,[r0,#14*4] @ restore n0 + adc r14,r14,#0 + str r12,[r0] @ tp[num-1]= + str r14,[r0,#4] @ tp[num]= + + +.Louter: + sub r7,r0,sp @ "original" r0-1 value + sub r1,r1,r7 @ "rewind" ap to &ap[1] + ldr r2,[r4,#4]! @ *(++bp) + sub r3,r3,r7 @ "rewind" np to &np[1] + ldr r5,[r1,#-4] @ ap[0] + ldr r10,[sp] @ tp[0] + ldr r6,[r3,#-4] @ np[0] + ldr r7,[sp,#4] @ tp[1] + + mov r11,#0 + umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] + str r4,[r0,#13*4] @ save bp + mul r8,r10,r8 + mov r12,#0 + umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]" + mov r4,sp + +.Linner: + ldr r5,[r1],#4 @ ap[j],ap++ + adds r10,r11,r7 @ +=tp[j] + ldr r6,[r3],#4 @ np[j],np++ + mov r11,#0 + umlal r10,r11,r5,r2 @ ap[j]*bp[i] + mov r14,#0 + umlal r12,r14,r6,r8 @ np[j]*n0 + adc r11,r11,#0 + ldr r7,[r4,#8] @ tp[j+1] + adds r12,r12,r10 + str r12,[r4],#4 @ tp[j-1]=,tp++ + adc r12,r14,#0 + cmp r4,r0 + bne .Linner + + adds r12,r12,r11 + mov r14,#0 + ldr r4,[r0,#13*4] @ restore bp + adc r14,r14,#0 + ldr r8,[r0,#14*4] @ restore n0 + adds r12,r12,r7 + ldr r7,[r0,#15*4] @ restore &bp[num] + adc r14,r14,#0 + str r12,[r0] @ tp[num-1]= + str r14,[r0,#4] @ tp[num]= + + cmp r4,r7 + bne .Louter + + + ldr r2,[r0,#12*4] @ pull rp + add r0,r0,#4 @ r0 to point at &tp[num] + sub r5,r0,sp @ "original" num value + mov r4,sp @ "rewind" r4 + mov r1,r4 @ "borrow" r1 + sub r3,r3,r5 @ "rewind" r3 to &np[0] + + subs r7,r7,r7 @ "clear" carry flag +.Lsub: ldr r7,[r4],#4 + ldr r6,[r3],#4 + sbcs r7,r7,r6 @ tp[j]-np[j] + str r7,[r2],#4 @ rp[j]= + teq r4,r0 @ preserve carry + bne .Lsub + sbcs r14,r14,#0 @ upmost carry + mov r4,sp @ "rewind" r4 + sub r2,r2,r5 @ "rewind" r2 + + and r1,r4,r14 + bic r3,r2,r14 + orr r1,r1,r3 @ ap=borrow?tp:rp + +.Lcopy: ldr r7,[r1],#4 @ copy or in-place refresh + str sp,[r4],#4 @ zap tp + str r7,[r2],#4 + cmp r4,r0 + bne .Lcopy + + add sp,r0,#4 @ skip over tp[num+1] + ldmia sp!,{r4-r12,lr} @ restore registers + add sp,sp,#2*4 @ skip over {r0,r2} + mov r0,#1 +.Labrt: tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +.size bn_mul_mont,.-bn_mul_mont +.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro@openssl.org>" +.align 2 diff -Nru nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/modes/ghash-armv4.S nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/modes/ghash-armv4.S --- nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/modes/ghash-armv4.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/modes/ghash-armv4.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,415 @@ +#include "arm_arch.h" + +.text +.code 32 + +.type rem_4bit,%object +.align 5 +rem_4bit: +.short 0x0000,0x1C20,0x3840,0x2460 +.short 0x7080,0x6CA0,0x48C0,0x54E0 +.short 0xE100,0xFD20,0xD940,0xC560 +.short 0x9180,0x8DA0,0xA9C0,0xB5E0 +.size rem_4bit,.-rem_4bit + +.type rem_4bit_get,%function +rem_4bit_get: + sub r2,pc,#8 + sub r2,r2,#32 @ &rem_4bit + b .Lrem_4bit_got + nop +.size rem_4bit_get,.-rem_4bit_get + +.global gcm_ghash_4bit +.type gcm_ghash_4bit,%function +gcm_ghash_4bit: + sub r12,pc,#8 + add r3,r2,r3 @ r3 to point at the end + stmdb sp!,{r3-r11,lr} @ save r3/end too + sub r12,r12,#48 @ &rem_4bit + + ldmia r12,{r4-r11} @ copy rem_4bit ... + stmdb sp!,{r4-r11} @ ... to stack + + ldrb r12,[r2,#15] + ldrb r14,[r0,#15] +.Louter: + eor r12,r12,r14 + and r14,r12,#0xf0 + and r12,r12,#0x0f + mov r3,#14 + + add r7,r1,r12,lsl#4 + ldmia r7,{r4-r7} @ load Htbl[nlo] + add r11,r1,r14 + ldrb r12,[r2,#14] + + and r14,r4,#0xf @ rem + ldmia r11,{r8-r11} @ load Htbl[nhi] + add r14,r14,r14 + eor r4,r8,r4,lsr#4 + ldrh r8,[sp,r14] @ rem_4bit[rem] + eor r4,r4,r5,lsl#28 + ldrb r14,[r0,#14] + eor r5,r9,r5,lsr#4 + eor r5,r5,r6,lsl#28 + eor r6,r10,r6,lsr#4 + eor r6,r6,r7,lsl#28 + eor r7,r11,r7,lsr#4 + eor r12,r12,r14 + and r14,r12,#0xf0 + and r12,r12,#0x0f + eor r7,r7,r8,lsl#16 + +.Linner: + add r11,r1,r12,lsl#4 + and r12,r4,#0xf @ rem + subs r3,r3,#1 + add r12,r12,r12 + ldmia r11,{r8-r11} @ load Htbl[nlo] + eor r4,r8,r4,lsr#4 + eor r4,r4,r5,lsl#28 + eor r5,r9,r5,lsr#4 + eor r5,r5,r6,lsl#28 + ldrh r8,[sp,r12] @ rem_4bit[rem] + eor r6,r10,r6,lsr#4 + ldrplb r12,[r2,r3] + eor r6,r6,r7,lsl#28 + eor r7,r11,r7,lsr#4 + + add r11,r1,r14 + and r14,r4,#0xf @ rem + eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] + add r14,r14,r14 + ldmia r11,{r8-r11} @ load Htbl[nhi] + eor r4,r8,r4,lsr#4 + ldrplb r8,[r0,r3] + eor r4,r4,r5,lsl#28 + eor r5,r9,r5,lsr#4 + ldrh r9,[sp,r14] + eor r5,r5,r6,lsl#28 + eor r6,r10,r6,lsr#4 + eor r6,r6,r7,lsl#28 + eorpl r12,r12,r8 + eor r7,r11,r7,lsr#4 + andpl r14,r12,#0xf0 + andpl r12,r12,#0x0f + eor r7,r7,r9,lsl#16 @ ^= rem_4bit[rem] + bpl .Linner + + ldr r3,[sp,#32] @ re-load r3/end + add r2,r2,#16 + mov r14,r4 +#if __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r4,r4 + str r4,[r0,#12] +#elif defined(__ARMEB__) + str r4,[r0,#12] +#else + mov r9,r4,lsr#8 + strb r4,[r0,#12+3] + mov r10,r4,lsr#16 + strb r9,[r0,#12+2] + mov r11,r4,lsr#24 + strb r10,[r0,#12+1] + strb r11,[r0,#12] +#endif + cmp r2,r3 +#if __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r5,r5 + str r5,[r0,#8] +#elif defined(__ARMEB__) + str r5,[r0,#8] +#else + mov r9,r5,lsr#8 + strb r5,[r0,#8+3] + mov r10,r5,lsr#16 + strb r9,[r0,#8+2] + mov r11,r5,lsr#24 + strb r10,[r0,#8+1] + strb r11,[r0,#8] +#endif + ldrneb r12,[r2,#15] +#if __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r6,r6 + str r6,[r0,#4] +#elif defined(__ARMEB__) + str r6,[r0,#4] +#else + mov r9,r6,lsr#8 + strb r6,[r0,#4+3] + mov r10,r6,lsr#16 + strb r9,[r0,#4+2] + mov r11,r6,lsr#24 + strb r10,[r0,#4+1] + strb r11,[r0,#4] +#endif + + +#if __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r7,r7 + str r7,[r0,#0] +#elif defined(__ARMEB__) + str r7,[r0,#0] +#else + mov r9,r7,lsr#8 + strb r7,[r0,#0+3] + mov r10,r7,lsr#16 + strb r9,[r0,#0+2] + mov r11,r7,lsr#24 + strb r10,[r0,#0+1] + strb r11,[r0,#0] +#endif + + + bne .Louter + + add sp,sp,#36 +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r11,pc} +#else + ldmia sp!,{r4-r11,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.size gcm_ghash_4bit,.-gcm_ghash_4bit + +.global gcm_gmult_4bit +.type gcm_gmult_4bit,%function +gcm_gmult_4bit: + stmdb sp!,{r4-r11,lr} + ldrb r12,[r0,#15] + b rem_4bit_get +.Lrem_4bit_got: + and r14,r12,#0xf0 + and r12,r12,#0x0f + mov r3,#14 + + add r7,r1,r12,lsl#4 + ldmia r7,{r4-r7} @ load Htbl[nlo] + ldrb r12,[r0,#14] + + add r11,r1,r14 + and r14,r4,#0xf @ rem + ldmia r11,{r8-r11} @ load Htbl[nhi] + add r14,r14,r14 + eor r4,r8,r4,lsr#4 + ldrh r8,[r2,r14] @ rem_4bit[rem] + eor r4,r4,r5,lsl#28 + eor r5,r9,r5,lsr#4 + eor r5,r5,r6,lsl#28 + eor r6,r10,r6,lsr#4 + eor r6,r6,r7,lsl#28 + eor r7,r11,r7,lsr#4 + and r14,r12,#0xf0 + eor r7,r7,r8,lsl#16 + and r12,r12,#0x0f + +.Loop: + add r11,r1,r12,lsl#4 + and r12,r4,#0xf @ rem + subs r3,r3,#1 + add r12,r12,r12 + ldmia r11,{r8-r11} @ load Htbl[nlo] + eor r4,r8,r4,lsr#4 + eor r4,r4,r5,lsl#28 + eor r5,r9,r5,lsr#4 + eor r5,r5,r6,lsl#28 + ldrh r8,[r2,r12] @ rem_4bit[rem] + eor r6,r10,r6,lsr#4 + ldrplb r12,[r0,r3] + eor r6,r6,r7,lsl#28 + eor r7,r11,r7,lsr#4 + + add r11,r1,r14 + and r14,r4,#0xf @ rem + eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] + add r14,r14,r14 + ldmia r11,{r8-r11} @ load Htbl[nhi] + eor r4,r8,r4,lsr#4 + eor r4,r4,r5,lsl#28 + eor r5,r9,r5,lsr#4 + ldrh r8,[r2,r14] @ rem_4bit[rem] + eor r5,r5,r6,lsl#28 + eor r6,r10,r6,lsr#4 + eor r6,r6,r7,lsl#28 + eor r7,r11,r7,lsr#4 + andpl r14,r12,#0xf0 + andpl r12,r12,#0x0f + eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] + bpl .Loop +#if __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r4,r4 + str r4,[r0,#12] +#elif defined(__ARMEB__) + str r4,[r0,#12] +#else + mov r9,r4,lsr#8 + strb r4,[r0,#12+3] + mov r10,r4,lsr#16 + strb r9,[r0,#12+2] + mov r11,r4,lsr#24 + strb r10,[r0,#12+1] + strb r11,[r0,#12] +#endif + + +#if __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r5,r5 + str r5,[r0,#8] +#elif defined(__ARMEB__) + str r5,[r0,#8] +#else + mov r9,r5,lsr#8 + strb r5,[r0,#8+3] + mov r10,r5,lsr#16 + strb r9,[r0,#8+2] + mov r11,r5,lsr#24 + strb r10,[r0,#8+1] + strb r11,[r0,#8] +#endif + + +#if __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r6,r6 + str r6,[r0,#4] +#elif defined(__ARMEB__) + str r6,[r0,#4] +#else + mov r9,r6,lsr#8 + strb r6,[r0,#4+3] + mov r10,r6,lsr#16 + strb r9,[r0,#4+2] + mov r11,r6,lsr#24 + strb r10,[r0,#4+1] + strb r11,[r0,#4] +#endif + + +#if __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r7,r7 + str r7,[r0,#0] +#elif defined(__ARMEB__) + str r7,[r0,#0] +#else + mov r9,r7,lsr#8 + strb r7,[r0,#0+3] + mov r10,r7,lsr#16 + strb r9,[r0,#0+2] + mov r11,r7,lsr#24 + strb r10,[r0,#0+1] + strb r11,[r0,#0] +#endif + + +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r11,pc} +#else + ldmia sp!,{r4-r11,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.size gcm_gmult_4bit,.-gcm_gmult_4bit +#if __ARM_ARCH__>=7 +.fpu neon + +.global gcm_gmult_neon +.type gcm_gmult_neon,%function +.align 4 +gcm_gmult_neon: + sub r1,#16 @ point at H in GCM128_CTX + vld1.64 d29,[r0,:64]!@ load Xi + vmov.i32 d5,#0xe1 @ our irreducible polynomial + vld1.64 d28,[r0,:64]! + vshr.u64 d5,#32 + vldmia r1,{d0-d1} @ load H + veor q12,q12 +#ifdef __ARMEL__ + vrev64.8 q14,q14 +#endif + veor q13,q13 + veor q11,q11 + mov r1,#16 + veor q10,q10 + mov r3,#16 + veor d2,d2 + vdup.8 d4,d28[0] @ broadcast lowest byte + b .Linner_neon +.size gcm_gmult_neon,.-gcm_gmult_neon + +.global gcm_ghash_neon +.type gcm_ghash_neon,%function +.align 4 +gcm_ghash_neon: + vld1.64 d21,[r0,:64]! @ load Xi + vmov.i32 d5,#0xe1 @ our irreducible polynomial + vld1.64 d20,[r0,:64]! + vshr.u64 d5,#32 + vldmia r0,{d0-d1} @ load H + veor q12,q12 + nop +#ifdef __ARMEL__ + vrev64.8 q10,q10 +#endif +.Louter_neon: + vld1.64 d29,[r2]! @ load inp + veor q13,q13 + vld1.64 d28,[r2]! + veor q11,q11 + mov r1,#16 +#ifdef __ARMEL__ + vrev64.8 q14,q14 +#endif + veor d2,d2 + veor q14,q10 @ inp^=Xi + veor q10,q10 + vdup.8 d4,d28[0] @ broadcast lowest byte +.Linner_neon: + subs r1,r1,#1 + vmull.p8 q9,d1,d4 @ H.loXi[i] + vmull.p8 q8,d0,d4 @ H.hiXi[i] + vext.8 q14,q12,#1 @ IN>>=8 + + veor q10,q13 @ modulo-scheduled part + vshl.i64 d22,#48 + vdup.8 d4,d28[0] @ broadcast lowest byte + veor d3,d18,d20 + + veor d21,d22 + vuzp.8 q9,q8 + vsli.8 d2,d3,#1 @ compose the "carry" byte + vext.8 q10,q12,#1 @ Z>>=8 + + vmull.p8 q11,d2,d5 @ "carry"0xe1 + vshr.u8 d2,d3,#7 @ save Z's bottom bit + vext.8 q13,q9,q12,#1 @ Qlo>>=8 + veor q10,q8 + bne .Linner_neon + + veor q10,q13 @ modulo-scheduled artefact + vshl.i64 d22,#48 + veor d21,d22 + + @ finalization, normalize Z:Zo + vand d2,d5 @ suffices to mask the bit + vshr.u64 d3,d20,#63 + vshl.i64 q10,#1 + subs r3,#16 + vorr q10,q1 @ Z=Z:Zo<<1 + bne .Louter_neon + +#ifdef __ARMEL__ + vrev64.8 q10,q10 +#endif + sub r0,#16 + + vst1.64 d21,[r0,:64]! @ write out Xi + vst1.64 d20,[r0,:64] + + .word 0xe12fff1e +.size gcm_ghash_neon,.-gcm_ghash_neon +#endif +.asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>" +.align 2 diff -Nru nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/sha/sha1-armv4-large.S nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/sha/sha1-armv4-large.S --- nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/sha/sha1-armv4-large.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/sha/sha1-armv4-large.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,452 @@ +#include "arm_arch.h" + +.text + +.global sha1_block_data_order +.type sha1_block_data_order,%function + +.align 2 +sha1_block_data_order: + stmdb sp!,{r4-r12,lr} + add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 + ldmia r0,{r3,r4,r5,r6,r7} +.Lloop: + ldr r8,.LK_00_19 + mov r14,sp + sub sp,sp,#15*4 + mov r5,r5,ror#30 + mov r6,r6,ror#30 + mov r7,r7,ror#30 @ [6] +.L_00_15: +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] + add r7,r8,r7,ror#2 @ E+=K_00_19 + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r5,r6 @ F_xx_xx + orr r9,r9,r11,lsl#16 + add r7,r7,r3,ror#27 @ E+=ROR(A,27) + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r7,r8,r7,ror#2 @ E+=K_00_19 + eor r10,r5,r6 @ F_xx_xx + add r7,r7,r3,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif + and r10,r4,r10,ror#2 + add r7,r7,r9 @ E+=X[i] + eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) + str r9,[r14,#-4]! + add r7,r7,r10 @ E+=F_00_19(B,C,D) +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] + add r6,r8,r6,ror#2 @ E+=K_00_19 + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r4,r5 @ F_xx_xx + orr r9,r9,r11,lsl#16 + add r6,r6,r7,ror#27 @ E+=ROR(A,27) + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r6,r8,r6,ror#2 @ E+=K_00_19 + eor r10,r4,r5 @ F_xx_xx + add r6,r6,r7,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif + and r10,r3,r10,ror#2 + add r6,r6,r9 @ E+=X[i] + eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) + str r9,[r14,#-4]! + add r6,r6,r10 @ E+=F_00_19(B,C,D) +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] + add r5,r8,r5,ror#2 @ E+=K_00_19 + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r3,r4 @ F_xx_xx + orr r9,r9,r11,lsl#16 + add r5,r5,r6,ror#27 @ E+=ROR(A,27) + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r5,r8,r5,ror#2 @ E+=K_00_19 + eor r10,r3,r4 @ F_xx_xx + add r5,r5,r6,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif + and r10,r7,r10,ror#2 + add r5,r5,r9 @ E+=X[i] + eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) + str r9,[r14,#-4]! + add r5,r5,r10 @ E+=F_00_19(B,C,D) +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] + add r4,r8,r4,ror#2 @ E+=K_00_19 + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r7,r3 @ F_xx_xx + orr r9,r9,r11,lsl#16 + add r4,r4,r5,ror#27 @ E+=ROR(A,27) + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r4,r8,r4,ror#2 @ E+=K_00_19 + eor r10,r7,r3 @ F_xx_xx + add r4,r4,r5,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif + and r10,r6,r10,ror#2 + add r4,r4,r9 @ E+=X[i] + eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) + str r9,[r14,#-4]! + add r4,r4,r10 @ E+=F_00_19(B,C,D) +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] + add r3,r8,r3,ror#2 @ E+=K_00_19 + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r6,r7 @ F_xx_xx + orr r9,r9,r11,lsl#16 + add r3,r3,r4,ror#27 @ E+=ROR(A,27) + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r3,r8,r3,ror#2 @ E+=K_00_19 + eor r10,r6,r7 @ F_xx_xx + add r3,r3,r4,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif + and r10,r5,r10,ror#2 + add r3,r3,r9 @ E+=X[i] + eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) + str r9,[r14,#-4]! + add r3,r3,r10 @ E+=F_00_19(B,C,D) + teq r14,sp + bne .L_00_15 @ [((11+4)*5+2)*3] + sub sp,sp,#25*4 +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] + add r7,r8,r7,ror#2 @ E+=K_00_19 + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r5,r6 @ F_xx_xx + orr r9,r9,r11,lsl#16 + add r7,r7,r3,ror#27 @ E+=ROR(A,27) + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r7,r8,r7,ror#2 @ E+=K_00_19 + eor r10,r5,r6 @ F_xx_xx + add r7,r7,r3,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif + and r10,r4,r10,ror#2 + add r7,r7,r9 @ E+=X[i] + eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) + str r9,[r14,#-4]! + add r7,r7,r10 @ E+=F_00_19(B,C,D) + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r6,r8,r6,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r4,r5 @ F_xx_xx + mov r9,r9,ror#31 + add r6,r6,r7,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + and r10,r3,r10,ror#2 @ F_xx_xx + @ F_xx_xx + add r6,r6,r9 @ E+=X[i] + eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) + add r6,r6,r10 @ E+=F_00_19(B,C,D) + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r5,r8,r5,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r3,r4 @ F_xx_xx + mov r9,r9,ror#31 + add r5,r5,r6,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + and r10,r7,r10,ror#2 @ F_xx_xx + @ F_xx_xx + add r5,r5,r9 @ E+=X[i] + eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) + add r5,r5,r10 @ E+=F_00_19(B,C,D) + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r4,r8,r4,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r7,r3 @ F_xx_xx + mov r9,r9,ror#31 + add r4,r4,r5,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + and r10,r6,r10,ror#2 @ F_xx_xx + @ F_xx_xx + add r4,r4,r9 @ E+=X[i] + eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) + add r4,r4,r10 @ E+=F_00_19(B,C,D) + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r3,r8,r3,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r6,r7 @ F_xx_xx + mov r9,r9,ror#31 + add r3,r3,r4,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + and r10,r5,r10,ror#2 @ F_xx_xx + @ F_xx_xx + add r3,r3,r9 @ E+=X[i] + eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) + add r3,r3,r10 @ E+=F_00_19(B,C,D) + + ldr r8,.LK_20_39 @ [+15+16*4] + cmn sp,#0 @ [+3], clear carry to denote 20_39 +.L_20_39_or_60_79: + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r7,r8,r7,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r5,r6 @ F_xx_xx + mov r9,r9,ror#31 + add r7,r7,r3,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + eor r10,r4,r10,ror#2 @ F_xx_xx + @ F_xx_xx + add r7,r7,r9 @ E+=X[i] + add r7,r7,r10 @ E+=F_20_39(B,C,D) + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r6,r8,r6,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r4,r5 @ F_xx_xx + mov r9,r9,ror#31 + add r6,r6,r7,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + eor r10,r3,r10,ror#2 @ F_xx_xx + @ F_xx_xx + add r6,r6,r9 @ E+=X[i] + add r6,r6,r10 @ E+=F_20_39(B,C,D) + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r5,r8,r5,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r3,r4 @ F_xx_xx + mov r9,r9,ror#31 + add r5,r5,r6,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + eor r10,r7,r10,ror#2 @ F_xx_xx + @ F_xx_xx + add r5,r5,r9 @ E+=X[i] + add r5,r5,r10 @ E+=F_20_39(B,C,D) + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r4,r8,r4,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r7,r3 @ F_xx_xx + mov r9,r9,ror#31 + add r4,r4,r5,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + eor r10,r6,r10,ror#2 @ F_xx_xx + @ F_xx_xx + add r4,r4,r9 @ E+=X[i] + add r4,r4,r10 @ E+=F_20_39(B,C,D) + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r3,r8,r3,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r6,r7 @ F_xx_xx + mov r9,r9,ror#31 + add r3,r3,r4,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + eor r10,r5,r10,ror#2 @ F_xx_xx + @ F_xx_xx + add r3,r3,r9 @ E+=X[i] + add r3,r3,r10 @ E+=F_20_39(B,C,D) + teq r14,sp @ preserve carry + bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] + bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes + + ldr r8,.LK_40_59 + sub sp,sp,#20*4 @ [+2] +.L_40_59: + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r7,r8,r7,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r5,r6 @ F_xx_xx + mov r9,r9,ror#31 + add r7,r7,r3,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + and r10,r4,r10,ror#2 @ F_xx_xx + and r11,r5,r6 @ F_xx_xx + add r7,r7,r9 @ E+=X[i] + add r7,r7,r10 @ E+=F_40_59(B,C,D) + add r7,r7,r11,ror#2 + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r6,r8,r6,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r4,r5 @ F_xx_xx + mov r9,r9,ror#31 + add r6,r6,r7,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + and r10,r3,r10,ror#2 @ F_xx_xx + and r11,r4,r5 @ F_xx_xx + add r6,r6,r9 @ E+=X[i] + add r6,r6,r10 @ E+=F_40_59(B,C,D) + add r6,r6,r11,ror#2 + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r5,r8,r5,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r3,r4 @ F_xx_xx + mov r9,r9,ror#31 + add r5,r5,r6,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + and r10,r7,r10,ror#2 @ F_xx_xx + and r11,r3,r4 @ F_xx_xx + add r5,r5,r9 @ E+=X[i] + add r5,r5,r10 @ E+=F_40_59(B,C,D) + add r5,r5,r11,ror#2 + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r4,r8,r4,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r7,r3 @ F_xx_xx + mov r9,r9,ror#31 + add r4,r4,r5,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + and r10,r6,r10,ror#2 @ F_xx_xx + and r11,r7,r3 @ F_xx_xx + add r4,r4,r9 @ E+=X[i] + add r4,r4,r10 @ E+=F_40_59(B,C,D) + add r4,r4,r11,ror#2 + ldr r9,[r14,#15*4] + ldr r10,[r14,#13*4] + ldr r11,[r14,#7*4] + add r3,r8,r3,ror#2 @ E+=K_xx_xx + ldr r12,[r14,#2*4] + eor r9,r9,r10 + eor r11,r11,r12 @ 1 cycle stall + eor r10,r6,r7 @ F_xx_xx + mov r9,r9,ror#31 + add r3,r3,r4,ror#27 @ E+=ROR(A,27) + eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! + and r10,r5,r10,ror#2 @ F_xx_xx + and r11,r6,r7 @ F_xx_xx + add r3,r3,r9 @ E+=X[i] + add r3,r3,r10 @ E+=F_40_59(B,C,D) + add r3,r3,r11,ror#2 + teq r14,sp + bne .L_40_59 @ [+((12+5)*5+2)*4] + + ldr r8,.LK_60_79 + sub sp,sp,#20*4 + cmp sp,#0 @ set carry to denote 60_79 + b .L_20_39_or_60_79 @ [+4], spare 300 bytes +.L_done: + add sp,sp,#80*4 @ "deallocate" stack frame + ldmia r0,{r8,r9,r10,r11,r12} + add r3,r8,r3 + add r4,r9,r4 + add r5,r10,r5,ror#2 + add r6,r11,r6,ror#2 + add r7,r12,r7,ror#2 + stmia r0,{r3,r4,r5,r6,r7} + teq r1,r2 + bne .Lloop @ [+18], total 1307 + +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else + ldmia sp!,{r4-r12,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.align 2 +.LK_00_19: .word 0x5a827999 +.LK_20_39: .word 0x6ed9eba1 +.LK_40_59: .word 0x8f1bbcdc +.LK_60_79: .word 0xca62c1d6 +.size sha1_block_data_order,.-sha1_block_data_order +.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" +.align 2 diff -Nru nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/sha/sha256-armv4.S nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/sha/sha256-armv4.S --- nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/sha/sha256-armv4.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/sha/sha256-armv4.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1517 @@ +#include "arm_arch.h" + +.text +.code 32 + +.type K256,%object +.align 5 +K256: +.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 +.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 +.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 +.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 +.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc +.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da +.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 +.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 +.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 +.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 +.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 +.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 +.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 +.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 +.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 +.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 +.size K256,.-K256 + +.global sha256_block_data_order +.type sha256_block_data_order,%function +sha256_block_data_order: + sub r3,pc,#8 @ sha256_block_data_order + add r2,r1,r2,lsl#6 @ len to point at the end of inp + stmdb sp!,{r0,r1,r2,r4-r11,lr} + ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} + sub r14,r3,#256 @ K256 + sub sp,sp,#16*4 @ alloca(X[16]) +.Loop: +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 0 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r8,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r8,ror#11 + eor r2,r9,r10 +#if 0>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 0==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r8,ror#25 @ Sigma1(e) + and r2,r2,r8 + str r3,[sp,#0*4] + add r3,r3,r0 + eor r2,r2,r10 @ Ch(e,f,g) + add r3,r3,r11 + mov r11,r4,ror#2 + add r3,r3,r2 + eor r11,r11,r4,ror#13 + add r3,r3,r12 + eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 0>=15 + ldr r1,[sp,#2*4] @ from BODY_16_xx +#endif + orr r0,r4,r5 + and r2,r4,r5 + and r0,r0,r6 + add r11,r11,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r7,r7,r3 + add r11,r11,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 1 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r7,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r7,ror#11 + eor r2,r8,r9 +#if 1>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 1==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r7,ror#25 @ Sigma1(e) + and r2,r2,r7 + str r3,[sp,#1*4] + add r3,r3,r0 + eor r2,r2,r9 @ Ch(e,f,g) + add r3,r3,r10 + mov r10,r11,ror#2 + add r3,r3,r2 + eor r10,r10,r11,ror#13 + add r3,r3,r12 + eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 1>=15 + ldr r1,[sp,#3*4] @ from BODY_16_xx +#endif + orr r0,r11,r4 + and r2,r11,r4 + and r0,r0,r5 + add r10,r10,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r6,r6,r3 + add r10,r10,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 2 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r6,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r6,ror#11 + eor r2,r7,r8 +#if 2>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 2==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r6,ror#25 @ Sigma1(e) + and r2,r2,r6 + str r3,[sp,#2*4] + add r3,r3,r0 + eor r2,r2,r8 @ Ch(e,f,g) + add r3,r3,r9 + mov r9,r10,ror#2 + add r3,r3,r2 + eor r9,r9,r10,ror#13 + add r3,r3,r12 + eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 2>=15 + ldr r1,[sp,#4*4] @ from BODY_16_xx +#endif + orr r0,r10,r11 + and r2,r10,r11 + and r0,r0,r4 + add r9,r9,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r5,r5,r3 + add r9,r9,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 3 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r5,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r5,ror#11 + eor r2,r6,r7 +#if 3>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 3==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r5,ror#25 @ Sigma1(e) + and r2,r2,r5 + str r3,[sp,#3*4] + add r3,r3,r0 + eor r2,r2,r7 @ Ch(e,f,g) + add r3,r3,r8 + mov r8,r9,ror#2 + add r3,r3,r2 + eor r8,r8,r9,ror#13 + add r3,r3,r12 + eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 3>=15 + ldr r1,[sp,#5*4] @ from BODY_16_xx +#endif + orr r0,r9,r10 + and r2,r9,r10 + and r0,r0,r11 + add r8,r8,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r4,r4,r3 + add r8,r8,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 4 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r4,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r4,ror#11 + eor r2,r5,r6 +#if 4>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 4==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r4,ror#25 @ Sigma1(e) + and r2,r2,r4 + str r3,[sp,#4*4] + add r3,r3,r0 + eor r2,r2,r6 @ Ch(e,f,g) + add r3,r3,r7 + mov r7,r8,ror#2 + add r3,r3,r2 + eor r7,r7,r8,ror#13 + add r3,r3,r12 + eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 4>=15 + ldr r1,[sp,#6*4] @ from BODY_16_xx +#endif + orr r0,r8,r9 + and r2,r8,r9 + and r0,r0,r10 + add r7,r7,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r11,r11,r3 + add r7,r7,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 5 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r11,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r11,ror#11 + eor r2,r4,r5 +#if 5>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 5==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r11,ror#25 @ Sigma1(e) + and r2,r2,r11 + str r3,[sp,#5*4] + add r3,r3,r0 + eor r2,r2,r5 @ Ch(e,f,g) + add r3,r3,r6 + mov r6,r7,ror#2 + add r3,r3,r2 + eor r6,r6,r7,ror#13 + add r3,r3,r12 + eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 5>=15 + ldr r1,[sp,#7*4] @ from BODY_16_xx +#endif + orr r0,r7,r8 + and r2,r7,r8 + and r0,r0,r9 + add r6,r6,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r10,r10,r3 + add r6,r6,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 6 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r10,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r10,ror#11 + eor r2,r11,r4 +#if 6>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 6==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r10,ror#25 @ Sigma1(e) + and r2,r2,r10 + str r3,[sp,#6*4] + add r3,r3,r0 + eor r2,r2,r4 @ Ch(e,f,g) + add r3,r3,r5 + mov r5,r6,ror#2 + add r3,r3,r2 + eor r5,r5,r6,ror#13 + add r3,r3,r12 + eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 6>=15 + ldr r1,[sp,#8*4] @ from BODY_16_xx +#endif + orr r0,r6,r7 + and r2,r6,r7 + and r0,r0,r8 + add r5,r5,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r9,r9,r3 + add r5,r5,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 7 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r9,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r9,ror#11 + eor r2,r10,r11 +#if 7>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 7==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r9,ror#25 @ Sigma1(e) + and r2,r2,r9 + str r3,[sp,#7*4] + add r3,r3,r0 + eor r2,r2,r11 @ Ch(e,f,g) + add r3,r3,r4 + mov r4,r5,ror#2 + add r3,r3,r2 + eor r4,r4,r5,ror#13 + add r3,r3,r12 + eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 7>=15 + ldr r1,[sp,#9*4] @ from BODY_16_xx +#endif + orr r0,r5,r6 + and r2,r5,r6 + and r0,r0,r7 + add r4,r4,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r8,r8,r3 + add r4,r4,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 8 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r8,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r8,ror#11 + eor r2,r9,r10 +#if 8>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 8==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r8,ror#25 @ Sigma1(e) + and r2,r2,r8 + str r3,[sp,#8*4] + add r3,r3,r0 + eor r2,r2,r10 @ Ch(e,f,g) + add r3,r3,r11 + mov r11,r4,ror#2 + add r3,r3,r2 + eor r11,r11,r4,ror#13 + add r3,r3,r12 + eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 8>=15 + ldr r1,[sp,#10*4] @ from BODY_16_xx +#endif + orr r0,r4,r5 + and r2,r4,r5 + and r0,r0,r6 + add r11,r11,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r7,r7,r3 + add r11,r11,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 9 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r7,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r7,ror#11 + eor r2,r8,r9 +#if 9>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 9==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r7,ror#25 @ Sigma1(e) + and r2,r2,r7 + str r3,[sp,#9*4] + add r3,r3,r0 + eor r2,r2,r9 @ Ch(e,f,g) + add r3,r3,r10 + mov r10,r11,ror#2 + add r3,r3,r2 + eor r10,r10,r11,ror#13 + add r3,r3,r12 + eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 9>=15 + ldr r1,[sp,#11*4] @ from BODY_16_xx +#endif + orr r0,r11,r4 + and r2,r11,r4 + and r0,r0,r5 + add r10,r10,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r6,r6,r3 + add r10,r10,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 10 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r6,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r6,ror#11 + eor r2,r7,r8 +#if 10>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 10==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r6,ror#25 @ Sigma1(e) + and r2,r2,r6 + str r3,[sp,#10*4] + add r3,r3,r0 + eor r2,r2,r8 @ Ch(e,f,g) + add r3,r3,r9 + mov r9,r10,ror#2 + add r3,r3,r2 + eor r9,r9,r10,ror#13 + add r3,r3,r12 + eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 10>=15 + ldr r1,[sp,#12*4] @ from BODY_16_xx +#endif + orr r0,r10,r11 + and r2,r10,r11 + and r0,r0,r4 + add r9,r9,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r5,r5,r3 + add r9,r9,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 11 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r5,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r5,ror#11 + eor r2,r6,r7 +#if 11>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 11==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r5,ror#25 @ Sigma1(e) + and r2,r2,r5 + str r3,[sp,#11*4] + add r3,r3,r0 + eor r2,r2,r7 @ Ch(e,f,g) + add r3,r3,r8 + mov r8,r9,ror#2 + add r3,r3,r2 + eor r8,r8,r9,ror#13 + add r3,r3,r12 + eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 11>=15 + ldr r1,[sp,#13*4] @ from BODY_16_xx +#endif + orr r0,r9,r10 + and r2,r9,r10 + and r0,r0,r11 + add r8,r8,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r4,r4,r3 + add r8,r8,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 12 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r4,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r4,ror#11 + eor r2,r5,r6 +#if 12>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 12==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r4,ror#25 @ Sigma1(e) + and r2,r2,r4 + str r3,[sp,#12*4] + add r3,r3,r0 + eor r2,r2,r6 @ Ch(e,f,g) + add r3,r3,r7 + mov r7,r8,ror#2 + add r3,r3,r2 + eor r7,r7,r8,ror#13 + add r3,r3,r12 + eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 12>=15 + ldr r1,[sp,#14*4] @ from BODY_16_xx +#endif + orr r0,r8,r9 + and r2,r8,r9 + and r0,r0,r10 + add r7,r7,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r11,r11,r3 + add r7,r7,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 13 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r11,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r11,ror#11 + eor r2,r4,r5 +#if 13>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 13==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r11,ror#25 @ Sigma1(e) + and r2,r2,r11 + str r3,[sp,#13*4] + add r3,r3,r0 + eor r2,r2,r5 @ Ch(e,f,g) + add r3,r3,r6 + mov r6,r7,ror#2 + add r3,r3,r2 + eor r6,r6,r7,ror#13 + add r3,r3,r12 + eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 13>=15 + ldr r1,[sp,#15*4] @ from BODY_16_xx +#endif + orr r0,r7,r8 + and r2,r7,r8 + and r0,r0,r9 + add r6,r6,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r10,r10,r3 + add r6,r6,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 14 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r10,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r10,ror#11 + eor r2,r11,r4 +#if 14>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 14==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r10,ror#25 @ Sigma1(e) + and r2,r2,r10 + str r3,[sp,#14*4] + add r3,r3,r0 + eor r2,r2,r4 @ Ch(e,f,g) + add r3,r3,r5 + mov r5,r6,ror#2 + add r3,r3,r2 + eor r5,r5,r6,ror#13 + add r3,r3,r12 + eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 14>=15 + ldr r1,[sp,#0*4] @ from BODY_16_xx +#endif + orr r0,r6,r7 + and r2,r6,r7 + and r0,r0,r8 + add r5,r5,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r9,r9,r3 + add r5,r5,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else + ldrb r3,[r1,#3] @ 15 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r9,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r9,ror#11 + eor r2,r10,r11 +#if 15>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 15==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r9,ror#25 @ Sigma1(e) + and r2,r2,r9 + str r3,[sp,#15*4] + add r3,r3,r0 + eor r2,r2,r11 @ Ch(e,f,g) + add r3,r3,r4 + mov r4,r5,ror#2 + add r3,r3,r2 + eor r4,r4,r5,ror#13 + add r3,r3,r12 + eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 15>=15 + ldr r1,[sp,#1*4] @ from BODY_16_xx +#endif + orr r0,r5,r6 + and r2,r5,r6 + and r0,r0,r7 + add r4,r4,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r8,r8,r3 + add r4,r4,r0 +.Lrounds_16_xx: + @ ldr r1,[sp,#1*4] @ 16 + ldr r12,[sp,#14*4] + mov r0,r1,ror#7 + ldr r3,[sp,#0*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#9*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r8,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r8,ror#11 + eor r2,r9,r10 +#if 16>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 16==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r8,ror#25 @ Sigma1(e) + and r2,r2,r8 + str r3,[sp,#0*4] + add r3,r3,r0 + eor r2,r2,r10 @ Ch(e,f,g) + add r3,r3,r11 + mov r11,r4,ror#2 + add r3,r3,r2 + eor r11,r11,r4,ror#13 + add r3,r3,r12 + eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 16>=15 + ldr r1,[sp,#2*4] @ from BODY_16_xx +#endif + orr r0,r4,r5 + and r2,r4,r5 + and r0,r0,r6 + add r11,r11,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r7,r7,r3 + add r11,r11,r0 + @ ldr r1,[sp,#2*4] @ 17 + ldr r12,[sp,#15*4] + mov r0,r1,ror#7 + ldr r3,[sp,#1*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#10*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r7,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r7,ror#11 + eor r2,r8,r9 +#if 17>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 17==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r7,ror#25 @ Sigma1(e) + and r2,r2,r7 + str r3,[sp,#1*4] + add r3,r3,r0 + eor r2,r2,r9 @ Ch(e,f,g) + add r3,r3,r10 + mov r10,r11,ror#2 + add r3,r3,r2 + eor r10,r10,r11,ror#13 + add r3,r3,r12 + eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 17>=15 + ldr r1,[sp,#3*4] @ from BODY_16_xx +#endif + orr r0,r11,r4 + and r2,r11,r4 + and r0,r0,r5 + add r10,r10,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r6,r6,r3 + add r10,r10,r0 + @ ldr r1,[sp,#3*4] @ 18 + ldr r12,[sp,#0*4] + mov r0,r1,ror#7 + ldr r3,[sp,#2*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#11*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r6,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r6,ror#11 + eor r2,r7,r8 +#if 18>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 18==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r6,ror#25 @ Sigma1(e) + and r2,r2,r6 + str r3,[sp,#2*4] + add r3,r3,r0 + eor r2,r2,r8 @ Ch(e,f,g) + add r3,r3,r9 + mov r9,r10,ror#2 + add r3,r3,r2 + eor r9,r9,r10,ror#13 + add r3,r3,r12 + eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 18>=15 + ldr r1,[sp,#4*4] @ from BODY_16_xx +#endif + orr r0,r10,r11 + and r2,r10,r11 + and r0,r0,r4 + add r9,r9,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r5,r5,r3 + add r9,r9,r0 + @ ldr r1,[sp,#4*4] @ 19 + ldr r12,[sp,#1*4] + mov r0,r1,ror#7 + ldr r3,[sp,#3*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#12*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r5,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r5,ror#11 + eor r2,r6,r7 +#if 19>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 19==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r5,ror#25 @ Sigma1(e) + and r2,r2,r5 + str r3,[sp,#3*4] + add r3,r3,r0 + eor r2,r2,r7 @ Ch(e,f,g) + add r3,r3,r8 + mov r8,r9,ror#2 + add r3,r3,r2 + eor r8,r8,r9,ror#13 + add r3,r3,r12 + eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 19>=15 + ldr r1,[sp,#5*4] @ from BODY_16_xx +#endif + orr r0,r9,r10 + and r2,r9,r10 + and r0,r0,r11 + add r8,r8,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r4,r4,r3 + add r8,r8,r0 + @ ldr r1,[sp,#5*4] @ 20 + ldr r12,[sp,#2*4] + mov r0,r1,ror#7 + ldr r3,[sp,#4*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#13*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r4,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r4,ror#11 + eor r2,r5,r6 +#if 20>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 20==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r4,ror#25 @ Sigma1(e) + and r2,r2,r4 + str r3,[sp,#4*4] + add r3,r3,r0 + eor r2,r2,r6 @ Ch(e,f,g) + add r3,r3,r7 + mov r7,r8,ror#2 + add r3,r3,r2 + eor r7,r7,r8,ror#13 + add r3,r3,r12 + eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 20>=15 + ldr r1,[sp,#6*4] @ from BODY_16_xx +#endif + orr r0,r8,r9 + and r2,r8,r9 + and r0,r0,r10 + add r7,r7,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r11,r11,r3 + add r7,r7,r0 + @ ldr r1,[sp,#6*4] @ 21 + ldr r12,[sp,#3*4] + mov r0,r1,ror#7 + ldr r3,[sp,#5*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#14*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r11,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r11,ror#11 + eor r2,r4,r5 +#if 21>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 21==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r11,ror#25 @ Sigma1(e) + and r2,r2,r11 + str r3,[sp,#5*4] + add r3,r3,r0 + eor r2,r2,r5 @ Ch(e,f,g) + add r3,r3,r6 + mov r6,r7,ror#2 + add r3,r3,r2 + eor r6,r6,r7,ror#13 + add r3,r3,r12 + eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 21>=15 + ldr r1,[sp,#7*4] @ from BODY_16_xx +#endif + orr r0,r7,r8 + and r2,r7,r8 + and r0,r0,r9 + add r6,r6,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r10,r10,r3 + add r6,r6,r0 + @ ldr r1,[sp,#7*4] @ 22 + ldr r12,[sp,#4*4] + mov r0,r1,ror#7 + ldr r3,[sp,#6*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#15*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r10,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r10,ror#11 + eor r2,r11,r4 +#if 22>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 22==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r10,ror#25 @ Sigma1(e) + and r2,r2,r10 + str r3,[sp,#6*4] + add r3,r3,r0 + eor r2,r2,r4 @ Ch(e,f,g) + add r3,r3,r5 + mov r5,r6,ror#2 + add r3,r3,r2 + eor r5,r5,r6,ror#13 + add r3,r3,r12 + eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 22>=15 + ldr r1,[sp,#8*4] @ from BODY_16_xx +#endif + orr r0,r6,r7 + and r2,r6,r7 + and r0,r0,r8 + add r5,r5,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r9,r9,r3 + add r5,r5,r0 + @ ldr r1,[sp,#8*4] @ 23 + ldr r12,[sp,#5*4] + mov r0,r1,ror#7 + ldr r3,[sp,#7*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#0*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r9,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r9,ror#11 + eor r2,r10,r11 +#if 23>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 23==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r9,ror#25 @ Sigma1(e) + and r2,r2,r9 + str r3,[sp,#7*4] + add r3,r3,r0 + eor r2,r2,r11 @ Ch(e,f,g) + add r3,r3,r4 + mov r4,r5,ror#2 + add r3,r3,r2 + eor r4,r4,r5,ror#13 + add r3,r3,r12 + eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 23>=15 + ldr r1,[sp,#9*4] @ from BODY_16_xx +#endif + orr r0,r5,r6 + and r2,r5,r6 + and r0,r0,r7 + add r4,r4,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r8,r8,r3 + add r4,r4,r0 + @ ldr r1,[sp,#9*4] @ 24 + ldr r12,[sp,#6*4] + mov r0,r1,ror#7 + ldr r3,[sp,#8*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#1*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r8,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r8,ror#11 + eor r2,r9,r10 +#if 24>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 24==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r8,ror#25 @ Sigma1(e) + and r2,r2,r8 + str r3,[sp,#8*4] + add r3,r3,r0 + eor r2,r2,r10 @ Ch(e,f,g) + add r3,r3,r11 + mov r11,r4,ror#2 + add r3,r3,r2 + eor r11,r11,r4,ror#13 + add r3,r3,r12 + eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 24>=15 + ldr r1,[sp,#10*4] @ from BODY_16_xx +#endif + orr r0,r4,r5 + and r2,r4,r5 + and r0,r0,r6 + add r11,r11,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r7,r7,r3 + add r11,r11,r0 + @ ldr r1,[sp,#10*4] @ 25 + ldr r12,[sp,#7*4] + mov r0,r1,ror#7 + ldr r3,[sp,#9*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#2*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r7,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r7,ror#11 + eor r2,r8,r9 +#if 25>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 25==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r7,ror#25 @ Sigma1(e) + and r2,r2,r7 + str r3,[sp,#9*4] + add r3,r3,r0 + eor r2,r2,r9 @ Ch(e,f,g) + add r3,r3,r10 + mov r10,r11,ror#2 + add r3,r3,r2 + eor r10,r10,r11,ror#13 + add r3,r3,r12 + eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 25>=15 + ldr r1,[sp,#11*4] @ from BODY_16_xx +#endif + orr r0,r11,r4 + and r2,r11,r4 + and r0,r0,r5 + add r10,r10,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r6,r6,r3 + add r10,r10,r0 + @ ldr r1,[sp,#11*4] @ 26 + ldr r12,[sp,#8*4] + mov r0,r1,ror#7 + ldr r3,[sp,#10*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#3*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r6,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r6,ror#11 + eor r2,r7,r8 +#if 26>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 26==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r6,ror#25 @ Sigma1(e) + and r2,r2,r6 + str r3,[sp,#10*4] + add r3,r3,r0 + eor r2,r2,r8 @ Ch(e,f,g) + add r3,r3,r9 + mov r9,r10,ror#2 + add r3,r3,r2 + eor r9,r9,r10,ror#13 + add r3,r3,r12 + eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 26>=15 + ldr r1,[sp,#12*4] @ from BODY_16_xx +#endif + orr r0,r10,r11 + and r2,r10,r11 + and r0,r0,r4 + add r9,r9,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r5,r5,r3 + add r9,r9,r0 + @ ldr r1,[sp,#12*4] @ 27 + ldr r12,[sp,#9*4] + mov r0,r1,ror#7 + ldr r3,[sp,#11*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#4*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r5,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r5,ror#11 + eor r2,r6,r7 +#if 27>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 27==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r5,ror#25 @ Sigma1(e) + and r2,r2,r5 + str r3,[sp,#11*4] + add r3,r3,r0 + eor r2,r2,r7 @ Ch(e,f,g) + add r3,r3,r8 + mov r8,r9,ror#2 + add r3,r3,r2 + eor r8,r8,r9,ror#13 + add r3,r3,r12 + eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 27>=15 + ldr r1,[sp,#13*4] @ from BODY_16_xx +#endif + orr r0,r9,r10 + and r2,r9,r10 + and r0,r0,r11 + add r8,r8,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r4,r4,r3 + add r8,r8,r0 + @ ldr r1,[sp,#13*4] @ 28 + ldr r12,[sp,#10*4] + mov r0,r1,ror#7 + ldr r3,[sp,#12*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#5*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r4,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r4,ror#11 + eor r2,r5,r6 +#if 28>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 28==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r4,ror#25 @ Sigma1(e) + and r2,r2,r4 + str r3,[sp,#12*4] + add r3,r3,r0 + eor r2,r2,r6 @ Ch(e,f,g) + add r3,r3,r7 + mov r7,r8,ror#2 + add r3,r3,r2 + eor r7,r7,r8,ror#13 + add r3,r3,r12 + eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 28>=15 + ldr r1,[sp,#14*4] @ from BODY_16_xx +#endif + orr r0,r8,r9 + and r2,r8,r9 + and r0,r0,r10 + add r7,r7,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r11,r11,r3 + add r7,r7,r0 + @ ldr r1,[sp,#14*4] @ 29 + ldr r12,[sp,#11*4] + mov r0,r1,ror#7 + ldr r3,[sp,#13*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#6*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r11,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r11,ror#11 + eor r2,r4,r5 +#if 29>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 29==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r11,ror#25 @ Sigma1(e) + and r2,r2,r11 + str r3,[sp,#13*4] + add r3,r3,r0 + eor r2,r2,r5 @ Ch(e,f,g) + add r3,r3,r6 + mov r6,r7,ror#2 + add r3,r3,r2 + eor r6,r6,r7,ror#13 + add r3,r3,r12 + eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 29>=15 + ldr r1,[sp,#15*4] @ from BODY_16_xx +#endif + orr r0,r7,r8 + and r2,r7,r8 + and r0,r0,r9 + add r6,r6,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r10,r10,r3 + add r6,r6,r0 + @ ldr r1,[sp,#15*4] @ 30 + ldr r12,[sp,#12*4] + mov r0,r1,ror#7 + ldr r3,[sp,#14*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#7*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r10,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r10,ror#11 + eor r2,r11,r4 +#if 30>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 30==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r10,ror#25 @ Sigma1(e) + and r2,r2,r10 + str r3,[sp,#14*4] + add r3,r3,r0 + eor r2,r2,r4 @ Ch(e,f,g) + add r3,r3,r5 + mov r5,r6,ror#2 + add r3,r3,r2 + eor r5,r5,r6,ror#13 + add r3,r3,r12 + eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 30>=15 + ldr r1,[sp,#0*4] @ from BODY_16_xx +#endif + orr r0,r6,r7 + and r2,r6,r7 + and r0,r0,r8 + add r5,r5,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r9,r9,r3 + add r5,r5,r0 + @ ldr r1,[sp,#0*4] @ 31 + ldr r12,[sp,#13*4] + mov r0,r1,ror#7 + ldr r3,[sp,#15*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#8*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r9,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r9,ror#11 + eor r2,r10,r11 +#if 31>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 31==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r9,ror#25 @ Sigma1(e) + and r2,r2,r9 + str r3,[sp,#15*4] + add r3,r3,r0 + eor r2,r2,r11 @ Ch(e,f,g) + add r3,r3,r4 + mov r4,r5,ror#2 + add r3,r3,r2 + eor r4,r4,r5,ror#13 + add r3,r3,r12 + eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 31>=15 + ldr r1,[sp,#1*4] @ from BODY_16_xx +#endif + orr r0,r5,r6 + and r2,r5,r6 + and r0,r0,r7 + add r4,r4,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r8,r8,r3 + add r4,r4,r0 + and r12,r12,#0xff + cmp r12,#0xf2 + bne .Lrounds_16_xx + + ldr r3,[sp,#16*4] @ pull ctx + ldr r0,[r3,#0] + ldr r2,[r3,#4] + ldr r12,[r3,#8] + add r4,r4,r0 + ldr r0,[r3,#12] + add r5,r5,r2 + ldr r2,[r3,#16] + add r6,r6,r12 + ldr r12,[r3,#20] + add r7,r7,r0 + ldr r0,[r3,#24] + add r8,r8,r2 + ldr r2,[r3,#28] + add r9,r9,r12 + ldr r1,[sp,#17*4] @ pull inp + ldr r12,[sp,#18*4] @ pull inp+len + add r10,r10,r0 + add r11,r11,r2 + stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} + cmp r1,r12 + sub r14,r14,#256 @ rewind Ktbl + bne .Loop + + add sp,sp,#19*4 @ destroy frame +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r11,pc} +#else + ldmia sp!,{r4-r11,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.size sha256_block_data_order,.-sha256_block_data_order +.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" +.align 2 diff -Nru nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/sha/sha512-armv4.S nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/sha/sha512-armv4.S --- nodejs-0.11.13/deps/openssl/asm/arm-elf-gas/sha/sha512-armv4.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/asm/arm-elf-gas/sha/sha512-armv4.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1783 @@ +#include "arm_arch.h" +#ifdef __ARMEL__ +# define LO 0 +# define HI 4 +# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 +#else +# define HI 0 +# define LO 4 +# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 +#endif + +.text +.code 32 +.type K512,%object +.align 5 +K512: +WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) +WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) +WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) +WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) +WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) +WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) +WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) +WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) +WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) +WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) +WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) +WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) +WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) +WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) +WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) +WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) +WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) +WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) +WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) +WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) +WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) +WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) +WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) +WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) +WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) +WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) +WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) +WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) +WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) +WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) +WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) +WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) +WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) +WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) +WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) +WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) +WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) +WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) +WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) +WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) +.size K512,.-K512 +.LOPENSSL_armcap: +.word OPENSSL_armcap_P-sha512_block_data_order +.skip 32-4 + +.global sha512_block_data_order +.type sha512_block_data_order,%function +sha512_block_data_order: + sub r3,pc,#8 @ sha512_block_data_order + add r2,r1,r2,lsl#7 @ len to point at the end of inp +#if __ARM_ARCH__>=7 + ldr r12,.LOPENSSL_armcap + ldr r12,[r3,r12] @ OPENSSL_armcap_P + tst r12,#1 + bne .LNEON +#endif + stmdb sp!,{r4-r12,lr} + sub r14,r3,#672 @ K512 + sub sp,sp,#9*8 + + ldr r7,[r0,#32+LO] + ldr r8,[r0,#32+HI] + ldr r9, [r0,#48+LO] + ldr r10, [r0,#48+HI] + ldr r11, [r0,#56+LO] + ldr r12, [r0,#56+HI] +.Loop: + str r9, [sp,#48+0] + str r10, [sp,#48+4] + str r11, [sp,#56+0] + str r12, [sp,#56+4] + ldr r5,[r0,#0+LO] + ldr r6,[r0,#0+HI] + ldr r3,[r0,#8+LO] + ldr r4,[r0,#8+HI] + ldr r9, [r0,#16+LO] + ldr r10, [r0,#16+HI] + ldr r11, [r0,#24+LO] + ldr r12, [r0,#24+HI] + str r3,[sp,#8+0] + str r4,[sp,#8+4] + str r9, [sp,#16+0] + str r10, [sp,#16+4] + str r11, [sp,#24+0] + str r12, [sp,#24+4] + ldr r3,[r0,#40+LO] + ldr r4,[r0,#40+HI] + str r3,[sp,#40+0] + str r4,[sp,#40+4] + +.L00_15: +#if __ARM_ARCH__<7 + ldrb r3,[r1,#7] + ldrb r9, [r1,#6] + ldrb r10, [r1,#5] + ldrb r11, [r1,#4] + ldrb r4,[r1,#3] + ldrb r12, [r1,#2] + orr r3,r3,r9,lsl#8 + ldrb r9, [r1,#1] + orr r3,r3,r10,lsl#16 + ldrb r10, [r1],#8 + orr r3,r3,r11,lsl#24 + orr r4,r4,r12,lsl#8 + orr r4,r4,r9,lsl#16 + orr r4,r4,r10,lsl#24 +#else + ldr r3,[r1,#4] + ldr r4,[r1],#8 +#ifdef __ARMEL__ + rev r3,r3 + rev r4,r4 +#endif +#endif + @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) + @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 + @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 + mov r9,r7,lsr#14 + str r3,[sp,#64+0] + mov r10,r8,lsr#14 + str r4,[sp,#64+4] + eor r9,r9,r8,lsl#18 + ldr r11,[sp,#56+0] @ h.lo + eor r10,r10,r7,lsl#18 + ldr r12,[sp,#56+4] @ h.hi + eor r9,r9,r7,lsr#18 + eor r10,r10,r8,lsr#18 + eor r9,r9,r8,lsl#14 + eor r10,r10,r7,lsl#14 + eor r9,r9,r8,lsr#9 + eor r10,r10,r7,lsr#9 + eor r9,r9,r7,lsl#23 + eor r10,r10,r8,lsl#23 @ Sigma1(e) + adds r3,r3,r9 + ldr r9,[sp,#40+0] @ f.lo + adc r4,r4,r10 @ T += Sigma1(e) + ldr r10,[sp,#40+4] @ f.hi + adds r3,r3,r11 + ldr r11,[sp,#48+0] @ g.lo + adc r4,r4,r12 @ T += h + ldr r12,[sp,#48+4] @ g.hi + + eor r9,r9,r11 + str r7,[sp,#32+0] + eor r10,r10,r12 + str r8,[sp,#32+4] + and r9,r9,r7 + str r5,[sp,#0+0] + and r10,r10,r8 + str r6,[sp,#0+4] + eor r9,r9,r11 + ldr r11,[r14,#LO] @ K[i].lo + eor r10,r10,r12 @ Ch(e,f,g) + ldr r12,[r14,#HI] @ K[i].hi + + adds r3,r3,r9 + ldr r7,[sp,#24+0] @ d.lo + adc r4,r4,r10 @ T += Ch(e,f,g) + ldr r8,[sp,#24+4] @ d.hi + adds r3,r3,r11 + and r9,r11,#0xff + adc r4,r4,r12 @ T += K[i] + adds r7,r7,r3 + ldr r11,[sp,#8+0] @ b.lo + adc r8,r8,r4 @ d += T + teq r9,#148 + + ldr r12,[sp,#16+0] @ c.lo + orreq r14,r14,#1 + @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) + @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 + @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 + mov r9,r5,lsr#28 + mov r10,r6,lsr#28 + eor r9,r9,r6,lsl#4 + eor r10,r10,r5,lsl#4 + eor r9,r9,r6,lsr#2 + eor r10,r10,r5,lsr#2 + eor r9,r9,r5,lsl#30 + eor r10,r10,r6,lsl#30 + eor r9,r9,r6,lsr#7 + eor r10,r10,r5,lsr#7 + eor r9,r9,r5,lsl#25 + eor r10,r10,r6,lsl#25 @ Sigma0(a) + adds r3,r3,r9 + and r9,r5,r11 + adc r4,r4,r10 @ T += Sigma0(a) + + ldr r10,[sp,#8+4] @ b.hi + orr r5,r5,r11 + ldr r11,[sp,#16+4] @ c.hi + and r5,r5,r12 + and r12,r6,r10 + orr r6,r6,r10 + orr r5,r5,r9 @ Maj(a,b,c).lo + and r6,r6,r11 + adds r5,r5,r3 + orr r6,r6,r12 @ Maj(a,b,c).hi + sub sp,sp,#8 + adc r6,r6,r4 @ h += T + tst r14,#1 + add r14,r14,#8 + tst r14,#1 + beq .L00_15 + ldr r9,[sp,#184+0] + ldr r10,[sp,#184+4] + bic r14,r14,#1 +.L16_79: + @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) + @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 + @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 + mov r3,r9,lsr#1 + ldr r11,[sp,#80+0] + mov r4,r10,lsr#1 + ldr r12,[sp,#80+4] + eor r3,r3,r10,lsl#31 + eor r4,r4,r9,lsl#31 + eor r3,r3,r9,lsr#8 + eor r4,r4,r10,lsr#8 + eor r3,r3,r10,lsl#24 + eor r4,r4,r9,lsl#24 + eor r3,r3,r9,lsr#7 + eor r4,r4,r10,lsr#7 + eor r3,r3,r10,lsl#25 + + @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) + @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 + @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 + mov r9,r11,lsr#19 + mov r10,r12,lsr#19 + eor r9,r9,r12,lsl#13 + eor r10,r10,r11,lsl#13 + eor r9,r9,r12,lsr#29 + eor r10,r10,r11,lsr#29 + eor r9,r9,r11,lsl#3 + eor r10,r10,r12,lsl#3 + eor r9,r9,r11,lsr#6 + eor r10,r10,r12,lsr#6 + ldr r11,[sp,#120+0] + eor r9,r9,r12,lsl#26 + + ldr r12,[sp,#120+4] + adds r3,r3,r9 + ldr r9,[sp,#192+0] + adc r4,r4,r10 + + ldr r10,[sp,#192+4] + adds r3,r3,r11 + adc r4,r4,r12 + adds r3,r3,r9 + adc r4,r4,r10 + @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) + @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 + @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 + mov r9,r7,lsr#14 + str r3,[sp,#64+0] + mov r10,r8,lsr#14 + str r4,[sp,#64+4] + eor r9,r9,r8,lsl#18 + ldr r11,[sp,#56+0] @ h.lo + eor r10,r10,r7,lsl#18 + ldr r12,[sp,#56+4] @ h.hi + eor r9,r9,r7,lsr#18 + eor r10,r10,r8,lsr#18 + eor r9,r9,r8,lsl#14 + eor r10,r10,r7,lsl#14 + eor r9,r9,r8,lsr#9 + eor r10,r10,r7,lsr#9 + eor r9,r9,r7,lsl#23 + eor r10,r10,r8,lsl#23 @ Sigma1(e) + adds r3,r3,r9 + ldr r9,[sp,#40+0] @ f.lo + adc r4,r4,r10 @ T += Sigma1(e) + ldr r10,[sp,#40+4] @ f.hi + adds r3,r3,r11 + ldr r11,[sp,#48+0] @ g.lo + adc r4,r4,r12 @ T += h + ldr r12,[sp,#48+4] @ g.hi + + eor r9,r9,r11 + str r7,[sp,#32+0] + eor r10,r10,r12 + str r8,[sp,#32+4] + and r9,r9,r7 + str r5,[sp,#0+0] + and r10,r10,r8 + str r6,[sp,#0+4] + eor r9,r9,r11 + ldr r11,[r14,#LO] @ K[i].lo + eor r10,r10,r12 @ Ch(e,f,g) + ldr r12,[r14,#HI] @ K[i].hi + + adds r3,r3,r9 + ldr r7,[sp,#24+0] @ d.lo + adc r4,r4,r10 @ T += Ch(e,f,g) + ldr r8,[sp,#24+4] @ d.hi + adds r3,r3,r11 + and r9,r11,#0xff + adc r4,r4,r12 @ T += K[i] + adds r7,r7,r3 + ldr r11,[sp,#8+0] @ b.lo + adc r8,r8,r4 @ d += T + teq r9,#23 + + ldr r12,[sp,#16+0] @ c.lo + orreq r14,r14,#1 + @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) + @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 + @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 + mov r9,r5,lsr#28 + mov r10,r6,lsr#28 + eor r9,r9,r6,lsl#4 + eor r10,r10,r5,lsl#4 + eor r9,r9,r6,lsr#2 + eor r10,r10,r5,lsr#2 + eor r9,r9,r5,lsl#30 + eor r10,r10,r6,lsl#30 + eor r9,r9,r6,lsr#7 + eor r10,r10,r5,lsr#7 + eor r9,r9,r5,lsl#25 + eor r10,r10,r6,lsl#25 @ Sigma0(a) + adds r3,r3,r9 + and r9,r5,r11 + adc r4,r4,r10 @ T += Sigma0(a) + + ldr r10,[sp,#8+4] @ b.hi + orr r5,r5,r11 + ldr r11,[sp,#16+4] @ c.hi + and r5,r5,r12 + and r12,r6,r10 + orr r6,r6,r10 + orr r5,r5,r9 @ Maj(a,b,c).lo + and r6,r6,r11 + adds r5,r5,r3 + orr r6,r6,r12 @ Maj(a,b,c).hi + sub sp,sp,#8 + adc r6,r6,r4 @ h += T + tst r14,#1 + add r14,r14,#8 + ldreq r9,[sp,#184+0] + ldreq r10,[sp,#184+4] + beq .L16_79 + bic r14,r14,#1 + + ldr r3,[sp,#8+0] + ldr r4,[sp,#8+4] + ldr r9, [r0,#0+LO] + ldr r10, [r0,#0+HI] + ldr r11, [r0,#8+LO] + ldr r12, [r0,#8+HI] + adds r9,r5,r9 + str r9, [r0,#0+LO] + adc r10,r6,r10 + str r10, [r0,#0+HI] + adds r11,r3,r11 + str r11, [r0,#8+LO] + adc r12,r4,r12 + str r12, [r0,#8+HI] + + ldr r5,[sp,#16+0] + ldr r6,[sp,#16+4] + ldr r3,[sp,#24+0] + ldr r4,[sp,#24+4] + ldr r9, [r0,#16+LO] + ldr r10, [r0,#16+HI] + ldr r11, [r0,#24+LO] + ldr r12, [r0,#24+HI] + adds r9,r5,r9 + str r9, [r0,#16+LO] + adc r10,r6,r10 + str r10, [r0,#16+HI] + adds r11,r3,r11 + str r11, [r0,#24+LO] + adc r12,r4,r12 + str r12, [r0,#24+HI] + + ldr r3,[sp,#40+0] + ldr r4,[sp,#40+4] + ldr r9, [r0,#32+LO] + ldr r10, [r0,#32+HI] + ldr r11, [r0,#40+LO] + ldr r12, [r0,#40+HI] + adds r7,r7,r9 + str r7,[r0,#32+LO] + adc r8,r8,r10 + str r8,[r0,#32+HI] + adds r11,r3,r11 + str r11, [r0,#40+LO] + adc r12,r4,r12 + str r12, [r0,#40+HI] + + ldr r5,[sp,#48+0] + ldr r6,[sp,#48+4] + ldr r3,[sp,#56+0] + ldr r4,[sp,#56+4] + ldr r9, [r0,#48+LO] + ldr r10, [r0,#48+HI] + ldr r11, [r0,#56+LO] + ldr r12, [r0,#56+HI] + adds r9,r5,r9 + str r9, [r0,#48+LO] + adc r10,r6,r10 + str r10, [r0,#48+HI] + adds r11,r3,r11 + str r11, [r0,#56+LO] + adc r12,r4,r12 + str r12, [r0,#56+HI] + + add sp,sp,#640 + sub r14,r14,#640 + + teq r1,r2 + bne .Loop + + add sp,sp,#8*9 @ destroy frame +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else + ldmia sp!,{r4-r12,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +#if __ARM_ARCH__>=7 +.fpu neon + +.align 4 +.LNEON: + dmb @ errata #451034 on early Cortex A8 + vstmdb sp!,{d8-d15} @ ABI specification says so + sub r3,r3,#672 @ K512 + vldmia r0,{d16-d23} @ load context +.Loop_neon: + vshr.u64 d24,d20,#14 @ 0 +#if 0<16 + vld1.64 {d0},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d20,#18 + vshr.u64 d26,d20,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d20,#50 + vsli.64 d25,d20,#46 + vsli.64 d26,d20,#23 +#if 0<16 && defined(__ARMEL__) + vrev64.8 d0,d0 +#endif + vadd.i64 d27,d28,d23 + veor d29,d21,d22 + veor d24,d25 + vand d29,d20 + veor d24,d26 @ Sigma1(e) + veor d29,d22 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d16,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d16,#34 + vshr.u64 d26,d16,#39 + vsli.64 d24,d16,#36 + vsli.64 d25,d16,#30 + vsli.64 d26,d16,#25 + vadd.i64 d27,d0 + vorr d30,d16,d18 + vand d29,d16,d18 + veor d23,d24,d25 + vand d30,d17 + veor d23,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d23,d27 + vadd.i64 d19,d27 + vadd.i64 d23,d30 + vshr.u64 d24,d19,#14 @ 1 +#if 1<16 + vld1.64 {d1},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d19,#18 + vshr.u64 d26,d19,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d19,#50 + vsli.64 d25,d19,#46 + vsli.64 d26,d19,#23 +#if 1<16 && defined(__ARMEL__) + vrev64.8 d1,d1 +#endif + vadd.i64 d27,d28,d22 + veor d29,d20,d21 + veor d24,d25 + vand d29,d19 + veor d24,d26 @ Sigma1(e) + veor d29,d21 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d23,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d23,#34 + vshr.u64 d26,d23,#39 + vsli.64 d24,d23,#36 + vsli.64 d25,d23,#30 + vsli.64 d26,d23,#25 + vadd.i64 d27,d1 + vorr d30,d23,d17 + vand d29,d23,d17 + veor d22,d24,d25 + vand d30,d16 + veor d22,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d22,d27 + vadd.i64 d18,d27 + vadd.i64 d22,d30 + vshr.u64 d24,d18,#14 @ 2 +#if 2<16 + vld1.64 {d2},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d18,#18 + vshr.u64 d26,d18,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d18,#50 + vsli.64 d25,d18,#46 + vsli.64 d26,d18,#23 +#if 2<16 && defined(__ARMEL__) + vrev64.8 d2,d2 +#endif + vadd.i64 d27,d28,d21 + veor d29,d19,d20 + veor d24,d25 + vand d29,d18 + veor d24,d26 @ Sigma1(e) + veor d29,d20 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d22,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d22,#34 + vshr.u64 d26,d22,#39 + vsli.64 d24,d22,#36 + vsli.64 d25,d22,#30 + vsli.64 d26,d22,#25 + vadd.i64 d27,d2 + vorr d30,d22,d16 + vand d29,d22,d16 + veor d21,d24,d25 + vand d30,d23 + veor d21,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d21,d27 + vadd.i64 d17,d27 + vadd.i64 d21,d30 + vshr.u64 d24,d17,#14 @ 3 +#if 3<16 + vld1.64 {d3},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d17,#18 + vshr.u64 d26,d17,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d17,#50 + vsli.64 d25,d17,#46 + vsli.64 d26,d17,#23 +#if 3<16 && defined(__ARMEL__) + vrev64.8 d3,d3 +#endif + vadd.i64 d27,d28,d20 + veor d29,d18,d19 + veor d24,d25 + vand d29,d17 + veor d24,d26 @ Sigma1(e) + veor d29,d19 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d21,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d21,#34 + vshr.u64 d26,d21,#39 + vsli.64 d24,d21,#36 + vsli.64 d25,d21,#30 + vsli.64 d26,d21,#25 + vadd.i64 d27,d3 + vorr d30,d21,d23 + vand d29,d21,d23 + veor d20,d24,d25 + vand d30,d22 + veor d20,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d20,d27 + vadd.i64 d16,d27 + vadd.i64 d20,d30 + vshr.u64 d24,d16,#14 @ 4 +#if 4<16 + vld1.64 {d4},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d16,#18 + vshr.u64 d26,d16,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d16,#50 + vsli.64 d25,d16,#46 + vsli.64 d26,d16,#23 +#if 4<16 && defined(__ARMEL__) + vrev64.8 d4,d4 +#endif + vadd.i64 d27,d28,d19 + veor d29,d17,d18 + veor d24,d25 + vand d29,d16 + veor d24,d26 @ Sigma1(e) + veor d29,d18 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d20,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d20,#34 + vshr.u64 d26,d20,#39 + vsli.64 d24,d20,#36 + vsli.64 d25,d20,#30 + vsli.64 d26,d20,#25 + vadd.i64 d27,d4 + vorr d30,d20,d22 + vand d29,d20,d22 + veor d19,d24,d25 + vand d30,d21 + veor d19,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d19,d27 + vadd.i64 d23,d27 + vadd.i64 d19,d30 + vshr.u64 d24,d23,#14 @ 5 +#if 5<16 + vld1.64 {d5},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d23,#18 + vshr.u64 d26,d23,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d23,#50 + vsli.64 d25,d23,#46 + vsli.64 d26,d23,#23 +#if 5<16 && defined(__ARMEL__) + vrev64.8 d5,d5 +#endif + vadd.i64 d27,d28,d18 + veor d29,d16,d17 + veor d24,d25 + vand d29,d23 + veor d24,d26 @ Sigma1(e) + veor d29,d17 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d19,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d19,#34 + vshr.u64 d26,d19,#39 + vsli.64 d24,d19,#36 + vsli.64 d25,d19,#30 + vsli.64 d26,d19,#25 + vadd.i64 d27,d5 + vorr d30,d19,d21 + vand d29,d19,d21 + veor d18,d24,d25 + vand d30,d20 + veor d18,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d18,d27 + vadd.i64 d22,d27 + vadd.i64 d18,d30 + vshr.u64 d24,d22,#14 @ 6 +#if 6<16 + vld1.64 {d6},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d22,#18 + vshr.u64 d26,d22,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d22,#50 + vsli.64 d25,d22,#46 + vsli.64 d26,d22,#23 +#if 6<16 && defined(__ARMEL__) + vrev64.8 d6,d6 +#endif + vadd.i64 d27,d28,d17 + veor d29,d23,d16 + veor d24,d25 + vand d29,d22 + veor d24,d26 @ Sigma1(e) + veor d29,d16 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d18,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d18,#34 + vshr.u64 d26,d18,#39 + vsli.64 d24,d18,#36 + vsli.64 d25,d18,#30 + vsli.64 d26,d18,#25 + vadd.i64 d27,d6 + vorr d30,d18,d20 + vand d29,d18,d20 + veor d17,d24,d25 + vand d30,d19 + veor d17,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d17,d27 + vadd.i64 d21,d27 + vadd.i64 d17,d30 + vshr.u64 d24,d21,#14 @ 7 +#if 7<16 + vld1.64 {d7},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d21,#18 + vshr.u64 d26,d21,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d21,#50 + vsli.64 d25,d21,#46 + vsli.64 d26,d21,#23 +#if 7<16 && defined(__ARMEL__) + vrev64.8 d7,d7 +#endif + vadd.i64 d27,d28,d16 + veor d29,d22,d23 + veor d24,d25 + vand d29,d21 + veor d24,d26 @ Sigma1(e) + veor d29,d23 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d17,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d17,#34 + vshr.u64 d26,d17,#39 + vsli.64 d24,d17,#36 + vsli.64 d25,d17,#30 + vsli.64 d26,d17,#25 + vadd.i64 d27,d7 + vorr d30,d17,d19 + vand d29,d17,d19 + veor d16,d24,d25 + vand d30,d18 + veor d16,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d16,d27 + vadd.i64 d20,d27 + vadd.i64 d16,d30 + vshr.u64 d24,d20,#14 @ 8 +#if 8<16 + vld1.64 {d8},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d20,#18 + vshr.u64 d26,d20,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d20,#50 + vsli.64 d25,d20,#46 + vsli.64 d26,d20,#23 +#if 8<16 && defined(__ARMEL__) + vrev64.8 d8,d8 +#endif + vadd.i64 d27,d28,d23 + veor d29,d21,d22 + veor d24,d25 + vand d29,d20 + veor d24,d26 @ Sigma1(e) + veor d29,d22 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d16,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d16,#34 + vshr.u64 d26,d16,#39 + vsli.64 d24,d16,#36 + vsli.64 d25,d16,#30 + vsli.64 d26,d16,#25 + vadd.i64 d27,d8 + vorr d30,d16,d18 + vand d29,d16,d18 + veor d23,d24,d25 + vand d30,d17 + veor d23,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d23,d27 + vadd.i64 d19,d27 + vadd.i64 d23,d30 + vshr.u64 d24,d19,#14 @ 9 +#if 9<16 + vld1.64 {d9},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d19,#18 + vshr.u64 d26,d19,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d19,#50 + vsli.64 d25,d19,#46 + vsli.64 d26,d19,#23 +#if 9<16 && defined(__ARMEL__) + vrev64.8 d9,d9 +#endif + vadd.i64 d27,d28,d22 + veor d29,d20,d21 + veor d24,d25 + vand d29,d19 + veor d24,d26 @ Sigma1(e) + veor d29,d21 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d23,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d23,#34 + vshr.u64 d26,d23,#39 + vsli.64 d24,d23,#36 + vsli.64 d25,d23,#30 + vsli.64 d26,d23,#25 + vadd.i64 d27,d9 + vorr d30,d23,d17 + vand d29,d23,d17 + veor d22,d24,d25 + vand d30,d16 + veor d22,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d22,d27 + vadd.i64 d18,d27 + vadd.i64 d22,d30 + vshr.u64 d24,d18,#14 @ 10 +#if 10<16 + vld1.64 {d10},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d18,#18 + vshr.u64 d26,d18,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d18,#50 + vsli.64 d25,d18,#46 + vsli.64 d26,d18,#23 +#if 10<16 && defined(__ARMEL__) + vrev64.8 d10,d10 +#endif + vadd.i64 d27,d28,d21 + veor d29,d19,d20 + veor d24,d25 + vand d29,d18 + veor d24,d26 @ Sigma1(e) + veor d29,d20 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d22,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d22,#34 + vshr.u64 d26,d22,#39 + vsli.64 d24,d22,#36 + vsli.64 d25,d22,#30 + vsli.64 d26,d22,#25 + vadd.i64 d27,d10 + vorr d30,d22,d16 + vand d29,d22,d16 + veor d21,d24,d25 + vand d30,d23 + veor d21,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d21,d27 + vadd.i64 d17,d27 + vadd.i64 d21,d30 + vshr.u64 d24,d17,#14 @ 11 +#if 11<16 + vld1.64 {d11},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d17,#18 + vshr.u64 d26,d17,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d17,#50 + vsli.64 d25,d17,#46 + vsli.64 d26,d17,#23 +#if 11<16 && defined(__ARMEL__) + vrev64.8 d11,d11 +#endif + vadd.i64 d27,d28,d20 + veor d29,d18,d19 + veor d24,d25 + vand d29,d17 + veor d24,d26 @ Sigma1(e) + veor d29,d19 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d21,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d21,#34 + vshr.u64 d26,d21,#39 + vsli.64 d24,d21,#36 + vsli.64 d25,d21,#30 + vsli.64 d26,d21,#25 + vadd.i64 d27,d11 + vorr d30,d21,d23 + vand d29,d21,d23 + veor d20,d24,d25 + vand d30,d22 + veor d20,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d20,d27 + vadd.i64 d16,d27 + vadd.i64 d20,d30 + vshr.u64 d24,d16,#14 @ 12 +#if 12<16 + vld1.64 {d12},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d16,#18 + vshr.u64 d26,d16,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d16,#50 + vsli.64 d25,d16,#46 + vsli.64 d26,d16,#23 +#if 12<16 && defined(__ARMEL__) + vrev64.8 d12,d12 +#endif + vadd.i64 d27,d28,d19 + veor d29,d17,d18 + veor d24,d25 + vand d29,d16 + veor d24,d26 @ Sigma1(e) + veor d29,d18 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d20,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d20,#34 + vshr.u64 d26,d20,#39 + vsli.64 d24,d20,#36 + vsli.64 d25,d20,#30 + vsli.64 d26,d20,#25 + vadd.i64 d27,d12 + vorr d30,d20,d22 + vand d29,d20,d22 + veor d19,d24,d25 + vand d30,d21 + veor d19,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d19,d27 + vadd.i64 d23,d27 + vadd.i64 d19,d30 + vshr.u64 d24,d23,#14 @ 13 +#if 13<16 + vld1.64 {d13},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d23,#18 + vshr.u64 d26,d23,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d23,#50 + vsli.64 d25,d23,#46 + vsli.64 d26,d23,#23 +#if 13<16 && defined(__ARMEL__) + vrev64.8 d13,d13 +#endif + vadd.i64 d27,d28,d18 + veor d29,d16,d17 + veor d24,d25 + vand d29,d23 + veor d24,d26 @ Sigma1(e) + veor d29,d17 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d19,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d19,#34 + vshr.u64 d26,d19,#39 + vsli.64 d24,d19,#36 + vsli.64 d25,d19,#30 + vsli.64 d26,d19,#25 + vadd.i64 d27,d13 + vorr d30,d19,d21 + vand d29,d19,d21 + veor d18,d24,d25 + vand d30,d20 + veor d18,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d18,d27 + vadd.i64 d22,d27 + vadd.i64 d18,d30 + vshr.u64 d24,d22,#14 @ 14 +#if 14<16 + vld1.64 {d14},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d22,#18 + vshr.u64 d26,d22,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d22,#50 + vsli.64 d25,d22,#46 + vsli.64 d26,d22,#23 +#if 14<16 && defined(__ARMEL__) + vrev64.8 d14,d14 +#endif + vadd.i64 d27,d28,d17 + veor d29,d23,d16 + veor d24,d25 + vand d29,d22 + veor d24,d26 @ Sigma1(e) + veor d29,d16 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d18,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d18,#34 + vshr.u64 d26,d18,#39 + vsli.64 d24,d18,#36 + vsli.64 d25,d18,#30 + vsli.64 d26,d18,#25 + vadd.i64 d27,d14 + vorr d30,d18,d20 + vand d29,d18,d20 + veor d17,d24,d25 + vand d30,d19 + veor d17,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d17,d27 + vadd.i64 d21,d27 + vadd.i64 d17,d30 + vshr.u64 d24,d21,#14 @ 15 +#if 15<16 + vld1.64 {d15},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d21,#18 + vshr.u64 d26,d21,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d21,#50 + vsli.64 d25,d21,#46 + vsli.64 d26,d21,#23 +#if 15<16 && defined(__ARMEL__) + vrev64.8 d15,d15 +#endif + vadd.i64 d27,d28,d16 + veor d29,d22,d23 + veor d24,d25 + vand d29,d21 + veor d24,d26 @ Sigma1(e) + veor d29,d23 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d17,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d17,#34 + vshr.u64 d26,d17,#39 + vsli.64 d24,d17,#36 + vsli.64 d25,d17,#30 + vsli.64 d26,d17,#25 + vadd.i64 d27,d15 + vorr d30,d17,d19 + vand d29,d17,d19 + veor d16,d24,d25 + vand d30,d18 + veor d16,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d16,d27 + vadd.i64 d20,d27 + vadd.i64 d16,d30 + mov r12,#4 +.L16_79_neon: + subs r12,#1 + vshr.u64 q12,q7,#19 + vshr.u64 q13,q7,#61 + vshr.u64 q15,q7,#6 + vsli.64 q12,q7,#45 + vext.8 q14,q0,q1,#8 @ X[i+1] + vsli.64 q13,q7,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q0,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q4,q5,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d20,#14 @ from NEON_00_15 + vadd.i64 q0,q14 + vshr.u64 d25,d20,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d20,#41 @ from NEON_00_15 + vadd.i64 q0,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d20,#50 + vsli.64 d25,d20,#46 + vsli.64 d26,d20,#23 +#if 16<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d23 + veor d29,d21,d22 + veor d24,d25 + vand d29,d20 + veor d24,d26 @ Sigma1(e) + veor d29,d22 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d16,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d16,#34 + vshr.u64 d26,d16,#39 + vsli.64 d24,d16,#36 + vsli.64 d25,d16,#30 + vsli.64 d26,d16,#25 + vadd.i64 d27,d0 + vorr d30,d16,d18 + vand d29,d16,d18 + veor d23,d24,d25 + vand d30,d17 + veor d23,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d23,d27 + vadd.i64 d19,d27 + vadd.i64 d23,d30 + vshr.u64 d24,d19,#14 @ 17 +#if 17<16 + vld1.64 {d1},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d19,#18 + vshr.u64 d26,d19,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d19,#50 + vsli.64 d25,d19,#46 + vsli.64 d26,d19,#23 +#if 17<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d22 + veor d29,d20,d21 + veor d24,d25 + vand d29,d19 + veor d24,d26 @ Sigma1(e) + veor d29,d21 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d23,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d23,#34 + vshr.u64 d26,d23,#39 + vsli.64 d24,d23,#36 + vsli.64 d25,d23,#30 + vsli.64 d26,d23,#25 + vadd.i64 d27,d1 + vorr d30,d23,d17 + vand d29,d23,d17 + veor d22,d24,d25 + vand d30,d16 + veor d22,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d22,d27 + vadd.i64 d18,d27 + vadd.i64 d22,d30 + vshr.u64 q12,q0,#19 + vshr.u64 q13,q0,#61 + vshr.u64 q15,q0,#6 + vsli.64 q12,q0,#45 + vext.8 q14,q1,q2,#8 @ X[i+1] + vsli.64 q13,q0,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q1,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q5,q6,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d18,#14 @ from NEON_00_15 + vadd.i64 q1,q14 + vshr.u64 d25,d18,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d18,#41 @ from NEON_00_15 + vadd.i64 q1,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d18,#50 + vsli.64 d25,d18,#46 + vsli.64 d26,d18,#23 +#if 18<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d21 + veor d29,d19,d20 + veor d24,d25 + vand d29,d18 + veor d24,d26 @ Sigma1(e) + veor d29,d20 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d22,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d22,#34 + vshr.u64 d26,d22,#39 + vsli.64 d24,d22,#36 + vsli.64 d25,d22,#30 + vsli.64 d26,d22,#25 + vadd.i64 d27,d2 + vorr d30,d22,d16 + vand d29,d22,d16 + veor d21,d24,d25 + vand d30,d23 + veor d21,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d21,d27 + vadd.i64 d17,d27 + vadd.i64 d21,d30 + vshr.u64 d24,d17,#14 @ 19 +#if 19<16 + vld1.64 {d3},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d17,#18 + vshr.u64 d26,d17,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d17,#50 + vsli.64 d25,d17,#46 + vsli.64 d26,d17,#23 +#if 19<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d20 + veor d29,d18,d19 + veor d24,d25 + vand d29,d17 + veor d24,d26 @ Sigma1(e) + veor d29,d19 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d21,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d21,#34 + vshr.u64 d26,d21,#39 + vsli.64 d24,d21,#36 + vsli.64 d25,d21,#30 + vsli.64 d26,d21,#25 + vadd.i64 d27,d3 + vorr d30,d21,d23 + vand d29,d21,d23 + veor d20,d24,d25 + vand d30,d22 + veor d20,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d20,d27 + vadd.i64 d16,d27 + vadd.i64 d20,d30 + vshr.u64 q12,q1,#19 + vshr.u64 q13,q1,#61 + vshr.u64 q15,q1,#6 + vsli.64 q12,q1,#45 + vext.8 q14,q2,q3,#8 @ X[i+1] + vsli.64 q13,q1,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q2,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q6,q7,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d16,#14 @ from NEON_00_15 + vadd.i64 q2,q14 + vshr.u64 d25,d16,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d16,#41 @ from NEON_00_15 + vadd.i64 q2,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d16,#50 + vsli.64 d25,d16,#46 + vsli.64 d26,d16,#23 +#if 20<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d19 + veor d29,d17,d18 + veor d24,d25 + vand d29,d16 + veor d24,d26 @ Sigma1(e) + veor d29,d18 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d20,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d20,#34 + vshr.u64 d26,d20,#39 + vsli.64 d24,d20,#36 + vsli.64 d25,d20,#30 + vsli.64 d26,d20,#25 + vadd.i64 d27,d4 + vorr d30,d20,d22 + vand d29,d20,d22 + veor d19,d24,d25 + vand d30,d21 + veor d19,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d19,d27 + vadd.i64 d23,d27 + vadd.i64 d19,d30 + vshr.u64 d24,d23,#14 @ 21 +#if 21<16 + vld1.64 {d5},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d23,#18 + vshr.u64 d26,d23,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d23,#50 + vsli.64 d25,d23,#46 + vsli.64 d26,d23,#23 +#if 21<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d18 + veor d29,d16,d17 + veor d24,d25 + vand d29,d23 + veor d24,d26 @ Sigma1(e) + veor d29,d17 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d19,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d19,#34 + vshr.u64 d26,d19,#39 + vsli.64 d24,d19,#36 + vsli.64 d25,d19,#30 + vsli.64 d26,d19,#25 + vadd.i64 d27,d5 + vorr d30,d19,d21 + vand d29,d19,d21 + veor d18,d24,d25 + vand d30,d20 + veor d18,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d18,d27 + vadd.i64 d22,d27 + vadd.i64 d18,d30 + vshr.u64 q12,q2,#19 + vshr.u64 q13,q2,#61 + vshr.u64 q15,q2,#6 + vsli.64 q12,q2,#45 + vext.8 q14,q3,q4,#8 @ X[i+1] + vsli.64 q13,q2,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q3,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q7,q0,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d22,#14 @ from NEON_00_15 + vadd.i64 q3,q14 + vshr.u64 d25,d22,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d22,#41 @ from NEON_00_15 + vadd.i64 q3,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d22,#50 + vsli.64 d25,d22,#46 + vsli.64 d26,d22,#23 +#if 22<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d17 + veor d29,d23,d16 + veor d24,d25 + vand d29,d22 + veor d24,d26 @ Sigma1(e) + veor d29,d16 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d18,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d18,#34 + vshr.u64 d26,d18,#39 + vsli.64 d24,d18,#36 + vsli.64 d25,d18,#30 + vsli.64 d26,d18,#25 + vadd.i64 d27,d6 + vorr d30,d18,d20 + vand d29,d18,d20 + veor d17,d24,d25 + vand d30,d19 + veor d17,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d17,d27 + vadd.i64 d21,d27 + vadd.i64 d17,d30 + vshr.u64 d24,d21,#14 @ 23 +#if 23<16 + vld1.64 {d7},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d21,#18 + vshr.u64 d26,d21,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d21,#50 + vsli.64 d25,d21,#46 + vsli.64 d26,d21,#23 +#if 23<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d16 + veor d29,d22,d23 + veor d24,d25 + vand d29,d21 + veor d24,d26 @ Sigma1(e) + veor d29,d23 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d17,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d17,#34 + vshr.u64 d26,d17,#39 + vsli.64 d24,d17,#36 + vsli.64 d25,d17,#30 + vsli.64 d26,d17,#25 + vadd.i64 d27,d7 + vorr d30,d17,d19 + vand d29,d17,d19 + veor d16,d24,d25 + vand d30,d18 + veor d16,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d16,d27 + vadd.i64 d20,d27 + vadd.i64 d16,d30 + vshr.u64 q12,q3,#19 + vshr.u64 q13,q3,#61 + vshr.u64 q15,q3,#6 + vsli.64 q12,q3,#45 + vext.8 q14,q4,q5,#8 @ X[i+1] + vsli.64 q13,q3,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q4,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q0,q1,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d20,#14 @ from NEON_00_15 + vadd.i64 q4,q14 + vshr.u64 d25,d20,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d20,#41 @ from NEON_00_15 + vadd.i64 q4,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d20,#50 + vsli.64 d25,d20,#46 + vsli.64 d26,d20,#23 +#if 24<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d23 + veor d29,d21,d22 + veor d24,d25 + vand d29,d20 + veor d24,d26 @ Sigma1(e) + veor d29,d22 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d16,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d16,#34 + vshr.u64 d26,d16,#39 + vsli.64 d24,d16,#36 + vsli.64 d25,d16,#30 + vsli.64 d26,d16,#25 + vadd.i64 d27,d8 + vorr d30,d16,d18 + vand d29,d16,d18 + veor d23,d24,d25 + vand d30,d17 + veor d23,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d23,d27 + vadd.i64 d19,d27 + vadd.i64 d23,d30 + vshr.u64 d24,d19,#14 @ 25 +#if 25<16 + vld1.64 {d9},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d19,#18 + vshr.u64 d26,d19,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d19,#50 + vsli.64 d25,d19,#46 + vsli.64 d26,d19,#23 +#if 25<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d22 + veor d29,d20,d21 + veor d24,d25 + vand d29,d19 + veor d24,d26 @ Sigma1(e) + veor d29,d21 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d23,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d23,#34 + vshr.u64 d26,d23,#39 + vsli.64 d24,d23,#36 + vsli.64 d25,d23,#30 + vsli.64 d26,d23,#25 + vadd.i64 d27,d9 + vorr d30,d23,d17 + vand d29,d23,d17 + veor d22,d24,d25 + vand d30,d16 + veor d22,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d22,d27 + vadd.i64 d18,d27 + vadd.i64 d22,d30 + vshr.u64 q12,q4,#19 + vshr.u64 q13,q4,#61 + vshr.u64 q15,q4,#6 + vsli.64 q12,q4,#45 + vext.8 q14,q5,q6,#8 @ X[i+1] + vsli.64 q13,q4,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q5,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q1,q2,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d18,#14 @ from NEON_00_15 + vadd.i64 q5,q14 + vshr.u64 d25,d18,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d18,#41 @ from NEON_00_15 + vadd.i64 q5,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d18,#50 + vsli.64 d25,d18,#46 + vsli.64 d26,d18,#23 +#if 26<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d21 + veor d29,d19,d20 + veor d24,d25 + vand d29,d18 + veor d24,d26 @ Sigma1(e) + veor d29,d20 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d22,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d22,#34 + vshr.u64 d26,d22,#39 + vsli.64 d24,d22,#36 + vsli.64 d25,d22,#30 + vsli.64 d26,d22,#25 + vadd.i64 d27,d10 + vorr d30,d22,d16 + vand d29,d22,d16 + veor d21,d24,d25 + vand d30,d23 + veor d21,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d21,d27 + vadd.i64 d17,d27 + vadd.i64 d21,d30 + vshr.u64 d24,d17,#14 @ 27 +#if 27<16 + vld1.64 {d11},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d17,#18 + vshr.u64 d26,d17,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d17,#50 + vsli.64 d25,d17,#46 + vsli.64 d26,d17,#23 +#if 27<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d20 + veor d29,d18,d19 + veor d24,d25 + vand d29,d17 + veor d24,d26 @ Sigma1(e) + veor d29,d19 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d21,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d21,#34 + vshr.u64 d26,d21,#39 + vsli.64 d24,d21,#36 + vsli.64 d25,d21,#30 + vsli.64 d26,d21,#25 + vadd.i64 d27,d11 + vorr d30,d21,d23 + vand d29,d21,d23 + veor d20,d24,d25 + vand d30,d22 + veor d20,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d20,d27 + vadd.i64 d16,d27 + vadd.i64 d20,d30 + vshr.u64 q12,q5,#19 + vshr.u64 q13,q5,#61 + vshr.u64 q15,q5,#6 + vsli.64 q12,q5,#45 + vext.8 q14,q6,q7,#8 @ X[i+1] + vsli.64 q13,q5,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q6,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q2,q3,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d16,#14 @ from NEON_00_15 + vadd.i64 q6,q14 + vshr.u64 d25,d16,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d16,#41 @ from NEON_00_15 + vadd.i64 q6,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d16,#50 + vsli.64 d25,d16,#46 + vsli.64 d26,d16,#23 +#if 28<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d19 + veor d29,d17,d18 + veor d24,d25 + vand d29,d16 + veor d24,d26 @ Sigma1(e) + veor d29,d18 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d20,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d20,#34 + vshr.u64 d26,d20,#39 + vsli.64 d24,d20,#36 + vsli.64 d25,d20,#30 + vsli.64 d26,d20,#25 + vadd.i64 d27,d12 + vorr d30,d20,d22 + vand d29,d20,d22 + veor d19,d24,d25 + vand d30,d21 + veor d19,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d19,d27 + vadd.i64 d23,d27 + vadd.i64 d19,d30 + vshr.u64 d24,d23,#14 @ 29 +#if 29<16 + vld1.64 {d13},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d23,#18 + vshr.u64 d26,d23,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d23,#50 + vsli.64 d25,d23,#46 + vsli.64 d26,d23,#23 +#if 29<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d18 + veor d29,d16,d17 + veor d24,d25 + vand d29,d23 + veor d24,d26 @ Sigma1(e) + veor d29,d17 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d19,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d19,#34 + vshr.u64 d26,d19,#39 + vsli.64 d24,d19,#36 + vsli.64 d25,d19,#30 + vsli.64 d26,d19,#25 + vadd.i64 d27,d13 + vorr d30,d19,d21 + vand d29,d19,d21 + veor d18,d24,d25 + vand d30,d20 + veor d18,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d18,d27 + vadd.i64 d22,d27 + vadd.i64 d18,d30 + vshr.u64 q12,q6,#19 + vshr.u64 q13,q6,#61 + vshr.u64 q15,q6,#6 + vsli.64 q12,q6,#45 + vext.8 q14,q7,q0,#8 @ X[i+1] + vsli.64 q13,q6,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q7,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q3,q4,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d22,#14 @ from NEON_00_15 + vadd.i64 q7,q14 + vshr.u64 d25,d22,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d22,#41 @ from NEON_00_15 + vadd.i64 q7,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d22,#50 + vsli.64 d25,d22,#46 + vsli.64 d26,d22,#23 +#if 30<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d17 + veor d29,d23,d16 + veor d24,d25 + vand d29,d22 + veor d24,d26 @ Sigma1(e) + veor d29,d16 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d18,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d18,#34 + vshr.u64 d26,d18,#39 + vsli.64 d24,d18,#36 + vsli.64 d25,d18,#30 + vsli.64 d26,d18,#25 + vadd.i64 d27,d14 + vorr d30,d18,d20 + vand d29,d18,d20 + veor d17,d24,d25 + vand d30,d19 + veor d17,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d17,d27 + vadd.i64 d21,d27 + vadd.i64 d17,d30 + vshr.u64 d24,d21,#14 @ 31 +#if 31<16 + vld1.64 {d15},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d21,#18 + vshr.u64 d26,d21,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d21,#50 + vsli.64 d25,d21,#46 + vsli.64 d26,d21,#23 +#if 31<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d16 + veor d29,d22,d23 + veor d24,d25 + vand d29,d21 + veor d24,d26 @ Sigma1(e) + veor d29,d23 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d17,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d17,#34 + vshr.u64 d26,d17,#39 + vsli.64 d24,d17,#36 + vsli.64 d25,d17,#30 + vsli.64 d26,d17,#25 + vadd.i64 d27,d15 + vorr d30,d17,d19 + vand d29,d17,d19 + veor d16,d24,d25 + vand d30,d18 + veor d16,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d16,d27 + vadd.i64 d20,d27 + vadd.i64 d16,d30 + bne .L16_79_neon + + vldmia r0,{d24-d31} @ load context to temp + vadd.i64 q8,q12 @ vectorized accumulate + vadd.i64 q9,q13 + vadd.i64 q10,q14 + vadd.i64 q11,q15 + vstmia r0,{d16-d23} @ save context + teq r1,r2 + sub r3,#640 @ rewind K512 + bne .Loop_neon + + vldmia sp!,{d8-d15} @ epilogue + .word 0xe12fff1e +#endif +.size sha512_block_data_order,.-sha512_block_data_order +.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>" +.align 2 +.comm OPENSSL_armcap_P,4,4 diff -Nru nodejs-0.11.13/deps/openssl/asm/Makefile nodejs-0.11.15/deps/openssl/asm/Makefile --- nodejs-0.11.13/deps/openssl/asm/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/asm/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -42,6 +42,13 @@ x64-elf-gas/whrlpool/wp-x86_64.s \ x64-elf-gas/modes/ghash-x86_64.s \ x64-elf-gas/x86_64cpuid.s \ + arm-elf-gas/aes/aes-armv4.S \ + arm-elf-gas/bn/armv4-mont.S \ + arm-elf-gas/bn/armv4-gf2m.S \ + arm-elf-gas/sha/sha1-armv4-large.S \ + arm-elf-gas/sha/sha256-armv4.S \ + arm-elf-gas/sha/sha512-armv4.S \ + arm-elf-gas/modes/ghash-armv4.S \ x86-macosx-gas/aes/aes-586.s \ x86-macosx-gas/aes/aesni-x86.s \ x86-macosx-gas/aes/vpaes-x86.s \ @@ -121,7 +128,7 @@ x64-win32-masm/modes/ghash-x86_64.asm \ x64-win32-masm/x86_64cpuid.asm \ -x64-elf-gas/%.s x86-elf-gas/%.s: +arm-elf-gas/%.S x64-elf-gas/%.s x86-elf-gas/%.s: $(PERL) $< elf > $@ x64-macosx-gas/%.s x86-macosx-gas/%.s: @@ -262,3 +269,10 @@ x86-win32-masm/whrlpool/wp-mmx.asm: ../openssl/crypto/whrlpool/asm/wp-mmx.pl x86-win32-masm/modes/ghash-x86.asm: ../openssl/crypto/modes/asm/ghash-x86.pl x86-win32-masm/x86cpuid.asm: ../openssl/crypto/x86cpuid.pl +arm-elf-gas/aes/aes-armv4.S: ../openssl/crypto/aes/asm/aes-armv4.pl +arm-elf-gas/bn/armv4-mont.S: ../openssl/crypto/bn/asm/armv4-mont.pl +arm-elf-gas/bn/armv4-gf2m.S: ../openssl/crypto/bn/asm/armv4-gf2m.pl +arm-elf-gas/sha/sha1-armv4-large.S: ../openssl/crypto/sha/asm/sha1-armv4-large.pl +arm-elf-gas/sha/sha512-armv4.S: ../openssl/crypto/sha/asm/sha512-armv4.pl +arm-elf-gas/sha/sha256-armv4.S: ../openssl/crypto/sha/asm/sha256-armv4.pl +arm-elf-gas/modes/ghash-armv4.S: ../openssl/crypto/modes/asm/ghash-armv4.pl diff -Nru nodejs-0.11.13/deps/openssl/asm/x64-win32-masm/aes/aesni-x86_64.asm nodejs-0.11.15/deps/openssl/asm/x64-win32-masm/aes/aesni-x86_64.asm --- nodejs-0.11.13/deps/openssl/asm/x64-win32-masm/aes/aesni-x86_64.asm 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/asm/x64-win32-masm/aes/aesni-x86_64.asm 2015-01-20 21:22:17.000000000 +0000 @@ -454,6 +454,12 @@ mov r8,QWORD PTR[40+rsp] + lea rsp,QWORD PTR[((-88))+rsp] + movaps XMMWORD PTR[rsp],xmm6 + movaps XMMWORD PTR[16+rsp],xmm7 + movaps XMMWORD PTR[32+rsp],xmm8 + movaps XMMWORD PTR[48+rsp],xmm9 +$L$ecb_enc_body:: and rdx,-16 jz $L$ecb_ret @@ -752,6 +758,12 @@ movups XMMWORD PTR[80+rsi],xmm7 $L$ecb_ret:: + movaps xmm6,XMMWORD PTR[rsp] + movaps xmm7,XMMWORD PTR[16+rsp] + movaps xmm8,XMMWORD PTR[32+rsp] + movaps xmm9,XMMWORD PTR[48+rsp] + lea rsp,QWORD PTR[88+rsp] +$L$ecb_enc_ret:: mov rdi,QWORD PTR[8+rsp] ;WIN64 epilogue mov rsi,QWORD PTR[16+rsp] DB 0F3h,0C3h ;repret @@ -2766,26 +2778,7 @@ EXTERN __imp_RtlVirtualUnwind:NEAR ALIGN 16 -ecb_se_handler PROC PRIVATE - push rsi - push rdi - push rbx - push rbp - push r12 - push r13 - push r14 - push r15 - pushfq - sub rsp,64 - - mov rax,QWORD PTR[152+r8] - - jmp $L$common_seh_tail -ecb_se_handler ENDP - - -ALIGN 16 -ccm64_se_handler PROC PRIVATE +ecb_ccm64_se_handler PROC PRIVATE push rsi push rdi push rbx @@ -2823,7 +2816,7 @@ lea rax,QWORD PTR[88+rax] jmp $L$common_seh_tail -ccm64_se_handler ENDP +ecb_ccm64_se_handler ENDP ALIGN 16 @@ -3026,15 +3019,17 @@ ALIGN 8 $L$SEH_info_ecb:: DB 9,0,0,0 - DD imagerel ecb_se_handler + DD imagerel ecb_ccm64_se_handler + DD imagerel $L$ecb_enc_body,imagerel $L$ecb_enc_ret + $L$SEH_info_ccm64_enc:: DB 9,0,0,0 - DD imagerel ccm64_se_handler + DD imagerel ecb_ccm64_se_handler DD imagerel $L$ccm64_enc_body,imagerel $L$ccm64_enc_ret $L$SEH_info_ccm64_dec:: DB 9,0,0,0 - DD imagerel ccm64_se_handler + DD imagerel ecb_ccm64_se_handler DD imagerel $L$ccm64_dec_body,imagerel $L$ccm64_dec_ret $L$SEH_info_ctr32:: diff -Nru nodejs-0.11.13/deps/openssl/openssl/ACKNOWLEDGMENTS nodejs-0.11.15/deps/openssl/openssl/ACKNOWLEDGMENTS --- nodejs-0.11.13/deps/openssl/openssl/ACKNOWLEDGMENTS 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ACKNOWLEDGMENTS 2015-01-20 21:22:17.000000000 +0000 @@ -10,13 +10,18 @@ We would like to identify and thank the following such sponsors for their past or current significant support of the OpenSSL project: +Major support: + + Qualys http://www.qualys.com/ + Very significant support: - OpenGear: www.opengear.com + OpenGear: http://www.opengear.com/ Significant support: - PSW Group: www.psw.net + PSW Group: http://www.psw.net/ + Acano Ltd. http://acano.com/ Please note that we ask permission to identify sponsors and that some sponsors we consider eligible for inclusion here have requested to remain anonymous. diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/apps.c nodejs-0.11.15/deps/openssl/openssl/apps/apps.c --- nodejs-0.11.13/deps/openssl/openssl/apps/apps.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/apps.c 2015-01-20 21:22:17.000000000 +0000 @@ -390,6 +390,8 @@ { arg->count=20; arg->data=(char **)OPENSSL_malloc(sizeof(char *)*arg->count); + if (arg->data == NULL) + return 0; } for (i=0; i<arg->count; i++) arg->data[i]=NULL; @@ -1542,6 +1544,8 @@ len=strlen(t)+strlen(OPENSSL_CONF)+2; p=OPENSSL_malloc(len); + if (p == NULL) + return NULL; BUF_strlcpy(p,t,len); #ifndef OPENSSL_SYS_VMS BUF_strlcat(p,"/",len); diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/ca.c nodejs-0.11.15/deps/openssl/openssl/apps/ca.c --- nodejs-0.11.13/deps/openssl/openssl/apps/ca.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/ca.c 2015-01-20 21:22:17.000000000 +0000 @@ -1620,12 +1620,14 @@ { ok=0; BIO_printf(bio_err,"Signature verification problems....\n"); + ERR_print_errors(bio_err); goto err; } if (i == 0) { ok=0; BIO_printf(bio_err,"Signature did not match the certificate request\n"); + ERR_print_errors(bio_err); goto err; } else @@ -2777,6 +2779,9 @@ revtm = X509_gmtime_adj(NULL, 0); + if (!revtm) + return NULL; + i = revtm->length + 1; if (reason) i += strlen(reason) + 1; diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/ciphers.c nodejs-0.11.15/deps/openssl/openssl/apps/ciphers.c --- nodejs-0.11.13/deps/openssl/openssl/apps/ciphers.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/ciphers.c 2015-01-20 21:22:17.000000000 +0000 @@ -96,13 +96,7 @@ char buf[512]; BIO *STDout=NULL; -#if !defined(OPENSSL_NO_SSL2) && !defined(OPENSSL_NO_SSL3) meth=SSLv23_server_method(); -#elif !defined(OPENSSL_NO_SSL3) - meth=SSLv3_server_method(); -#elif !defined(OPENSSL_NO_SSL2) - meth=SSLv2_server_method(); -#endif apps_startup(); diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/crl2p7.c nodejs-0.11.15/deps/openssl/openssl/apps/crl2p7.c --- nodejs-0.11.13/deps/openssl/openssl/apps/crl2p7.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/crl2p7.c 2015-01-20 21:22:17.000000000 +0000 @@ -141,7 +141,13 @@ { if (--argc < 1) goto bad; if(!certflst) certflst = sk_OPENSSL_STRING_new_null(); - sk_OPENSSL_STRING_push(certflst,*(++argv)); + if (!certflst) + goto end; + if (!sk_OPENSSL_STRING_push(certflst,*(++argv))) + { + sk_OPENSSL_STRING_free(certflst); + goto end; + } } else { diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/enc.c nodejs-0.11.15/deps/openssl/openssl/apps/enc.c --- nodejs-0.11.13/deps/openssl/openssl/apps/enc.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/enc.c 2015-01-20 21:22:17.000000000 +0000 @@ -67,7 +67,9 @@ #include <openssl/x509.h> #include <openssl/rand.h> #include <openssl/pem.h> +#ifndef OPENSSL_NO_COMP #include <openssl/comp.h> +#endif #include <ctype.h> int set_hex(char *in,unsigned char *out,int size); @@ -331,6 +333,18 @@ setup_engine(bio_err, engine, 0); #endif + if (cipher && EVP_CIPHER_flags(cipher) & EVP_CIPH_FLAG_AEAD_CIPHER) + { + BIO_printf(bio_err, "AEAD ciphers not supported by the enc utility\n"); + goto end; + } + + if (cipher && (EVP_CIPHER_mode(cipher) == EVP_CIPH_XTS_MODE)) + { + BIO_printf(bio_err, "Ciphers in XTS mode are not supported by the enc utility\n"); + goto end; + } + if (md && (dgst=EVP_get_digestbyname(md)) == NULL) { BIO_printf(bio_err,"%s is an unsupported message digest type\n",md); diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/makeapps.com nodejs-0.11.15/deps/openssl/openssl/apps/makeapps.com --- nodejs-0.11.13/deps/openssl/openssl/apps/makeapps.com 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/makeapps.com 2015-01-20 21:22:17.000000000 +0000 @@ -773,9 +773,12 @@ $ IF F$TYPE(USER_CCDEFS) .NES. "" THEN CCDEFS = CCDEFS + "," + USER_CCDEFS $ CCEXTRAFLAGS = "" $ IF F$TYPE(USER_CCFLAGS) .NES. "" THEN CCEXTRAFLAGS = USER_CCFLAGS -$ CCDISABLEWARNINGS = "" !!! "LONGLONGTYPE,LONGLONGSUFX,FOUNDCR" -$ IF F$TYPE(USER_CCDISABLEWARNINGS) .NES. "" THEN - - CCDISABLEWARNINGS = CCDISABLEWARNINGS + "," + USER_CCDISABLEWARNINGS +$ CCDISABLEWARNINGS = "" !!! "MAYLOSEDATA3" !!! "LONGLONGTYPE,LONGLONGSUFX,FOUNDCR" +$ IF F$TYPE(USER_CCDISABLEWARNINGS) .NES. "" +$ THEN +$ IF CCDISABLEWARNINGS .NES. "" THEN CCDISABLEWARNINGS = CCDISABLEWARNINGS + "," +$ CCDISABLEWARNINGS = CCDISABLEWARNINGS + USER_CCDISABLEWARNINGS +$ ENDIF $! $! Check To See If We Have A ZLIB Option. $! @@ -1064,6 +1067,18 @@ $! $ IF COMPILER .EQS. "DECC" $ THEN +$! Not all compiler versions support MAYLOSEDATA3. +$ OPT_TEST = "MAYLOSEDATA3" +$ DEFINE /USER_MODE SYS$ERROR NL: +$ DEFINE /USER_MODE SYS$OUTPUT NL: +$ 'CC' /NOCROSS_REFERENCE /NOLIST /NOOBJECT - + /WARNINGS = DISABLE = ('OPT_TEST', EMPTYFILE) NL: +$ IF ($SEVERITY) +$ THEN +$ IF CCDISABLEWARNINGS .NES. "" THEN - + CCDISABLEWARNINGS = CCDISABLEWARNINGS+ "," +$ CCDISABLEWARNINGS = CCDISABLEWARNINGS+ OPT_TEST +$ ENDIF $ IF CCDISABLEWARNINGS .NES. "" $ THEN $ CCDISABLEWARNINGS = " /WARNING=(DISABLE=(" + CCDISABLEWARNINGS + "))" diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/Makefile.save nodejs-0.11.15/deps/openssl/openssl/apps/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/apps/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,1056 +0,0 @@ -# -# apps/Makefile -# - -DIR= apps -TOP= .. -CC= cc -INCLUDES= -I$(TOP) -I../include $(KRB5_INCLUDES) -CFLAG= -g -static -MAKEFILE= Makefile -PERL= perl -RM= rm -f -# KRB5 stuff -KRB5_INCLUDES= -LIBKRB5= - -PEX_LIBS= -EX_LIBS= -EXE_EXT= - -SHLIB_TARGET= - -CFLAGS= -DMONOLITH $(INCLUDES) $(CFLAG) - -GENERAL=Makefile makeapps.com install.com - -DLIBCRYPTO=../libcrypto.a -DLIBSSL=../libssl.a -LIBCRYPTO=-L.. -lcrypto -LIBSSL=-L.. -lssl - -PROGRAM= openssl - -SCRIPTS=CA.sh CA.pl tsget - -EXE= $(PROGRAM)$(EXE_EXT) - -E_EXE= verify asn1pars req dgst dh dhparam enc passwd gendh errstr \ - ca crl rsa rsautl dsa dsaparam ec ecparam \ - x509 genrsa gendsa genpkey s_server s_client speed \ - s_time version pkcs7 cms crl2pkcs7 sess_id ciphers nseq pkcs12 \ - pkcs8 pkey pkeyparam pkeyutl spkac smime rand engine ocsp prime ts srp - -PROGS= $(PROGRAM).c - -A_OBJ=apps.o -A_SRC=apps.c -S_OBJ= s_cb.o s_socket.o -S_SRC= s_cb.c s_socket.c -RAND_OBJ=app_rand.o -RAND_SRC=app_rand.c - -E_OBJ= verify.o asn1pars.o req.o dgst.o dh.o dhparam.o enc.o passwd.o gendh.o errstr.o \ - ca.o pkcs7.o crl2p7.o crl.o \ - rsa.o rsautl.o dsa.o dsaparam.o ec.o ecparam.o \ - x509.o genrsa.o gendsa.o genpkey.o s_server.o s_client.o speed.o \ - s_time.o $(A_OBJ) $(S_OBJ) $(RAND_OBJ) version.o sess_id.o \ - ciphers.o nseq.o pkcs12.o pkcs8.o pkey.o pkeyparam.o pkeyutl.o \ - spkac.o smime.o cms.o rand.o engine.o ocsp.o prime.o ts.o srp.o - -E_SRC= verify.c asn1pars.c req.c dgst.c dh.c enc.c passwd.c gendh.c errstr.c ca.c \ - pkcs7.c crl2p7.c crl.c \ - rsa.c rsautl.c dsa.c dsaparam.c ec.c ecparam.c \ - x509.c genrsa.c gendsa.c genpkey.c s_server.c s_client.c speed.c \ - s_time.c $(A_SRC) $(S_SRC) $(RAND_SRC) version.c sess_id.c \ - ciphers.c nseq.c pkcs12.c pkcs8.c pkey.c pkeyparam.c pkeyutl.c \ - spkac.c smime.c cms.c rand.c engine.c ocsp.c prime.c ts.c srp.c - -SRC=$(E_SRC) - -EXHEADER= -HEADER= apps.h progs.h s_apps.h \ - testdsa.h testrsa.h \ - $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - @(cd ..; $(MAKE) DIRS=$(DIR) all) - -all: exe - -exe: $(EXE) - -req: sreq.o $(A_OBJ) $(DLIBCRYPTO) - shlib_target=; if [ -n "$(SHARED_LIBS)" ]; then \ - shlib_target="$(SHLIB_TARGET)"; \ - fi; \ - $(MAKE) -f $(TOP)/Makefile.shared -e \ - APPNAME=req OBJECTS="sreq.o $(A_OBJ) $(RAND_OBJ)" \ - LIBDEPS="$(PEX_LIBS) $(LIBCRYPTO) $(EX_LIBS)" \ - link_app.$${shlib_target} - -sreq.o: req.c - $(CC) -c $(INCLUDES) $(CFLAG) -o sreq.o req.c - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @set -e; for i in $(EXE); \ - do \ - (echo installing $$i; \ - cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/bin/$$i.new; \ - chmod 755 $(INSTALL_PREFIX)$(INSTALLTOP)/bin/$$i.new; \ - mv -f $(INSTALL_PREFIX)$(INSTALLTOP)/bin/$$i.new $(INSTALL_PREFIX)$(INSTALLTOP)/bin/$$i ); \ - done; - @set -e; for i in $(SCRIPTS); \ - do \ - (echo installing $$i; \ - cp $$i $(INSTALL_PREFIX)$(OPENSSLDIR)/misc/$$i.new; \ - chmod 755 $(INSTALL_PREFIX)$(OPENSSLDIR)/misc/$$i.new; \ - mv -f $(INSTALL_PREFIX)$(OPENSSLDIR)/misc/$$i.new $(INSTALL_PREFIX)$(OPENSSLDIR)/misc/$$i ); \ - done - @cp openssl.cnf $(INSTALL_PREFIX)$(OPENSSLDIR)/openssl.cnf.new; \ - chmod 644 $(INSTALL_PREFIX)$(OPENSSLDIR)/openssl.cnf.new; \ - mv -f $(INSTALL_PREFIX)$(OPENSSLDIR)/openssl.cnf.new $(INSTALL_PREFIX)$(OPENSSLDIR)/openssl.cnf - -tags: - ctags $(SRC) - -tests: - -links: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @if [ -z "$(THIS)" ]; then \ - $(MAKE) -f $(TOP)/Makefile reflect THIS=$@; \ - else \ - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(SRC); \ - fi - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - rm -f CA.pl - -clean: - rm -f *.o *.obj *.dll lib tags core .pure .nfs* *.old *.bak fluff $(EXE) - rm -f req - -$(DLIBSSL): - (cd ..; $(MAKE) DIRS=ssl all) - -$(DLIBCRYPTO): - (cd ..; $(MAKE) DIRS=crypto all) - -$(EXE): progs.h $(E_OBJ) $(PROGRAM).o $(DLIBCRYPTO) $(DLIBSSL) - $(RM) $(EXE) - shlib_target=; if [ -n "$(SHARED_LIBS)" ]; then \ - shlib_target="$(SHLIB_TARGET)"; \ - elif [ -n "$(FIPSCANLIB)" ]; then \ - FIPSLD_CC="$(CC)"; CC=$(FIPSDIR)/bin/fipsld; export CC FIPSLD_CC; \ - fi; \ - LIBRARIES="$(LIBSSL) $(LIBKRB5) $(LIBCRYPTO)" ; \ - $(MAKE) -f $(TOP)/Makefile.shared -e \ - APPNAME=$(EXE) OBJECTS="$(PROGRAM).o $(E_OBJ)" \ - LIBDEPS="$(PEX_LIBS) $$LIBRARIES $(EX_LIBS)" \ - link_app.$${shlib_target} - @(cd ..; $(MAKE) rehash) - -progs.h: progs.pl - $(PERL) progs.pl $(E_EXE) >progs.h - $(RM) $(PROGRAM).o - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -app_rand.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -app_rand.o: ../include/openssl/buffer.h ../include/openssl/conf.h -app_rand.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -app_rand.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -app_rand.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -app_rand.o: ../include/openssl/evp.h ../include/openssl/lhash.h -app_rand.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -app_rand.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -app_rand.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -app_rand.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -app_rand.o: ../include/openssl/safestack.h ../include/openssl/sha.h -app_rand.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -app_rand.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -app_rand.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h -app_rand.o: app_rand.c apps.h -apps.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -apps.o: ../include/openssl/bn.h ../include/openssl/buffer.h -apps.o: ../include/openssl/conf.h ../include/openssl/crypto.h -apps.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -apps.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -apps.o: ../include/openssl/engine.h ../include/openssl/err.h -apps.o: ../include/openssl/evp.h ../include/openssl/lhash.h -apps.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -apps.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -apps.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -apps.o: ../include/openssl/pem.h ../include/openssl/pem2.h -apps.o: ../include/openssl/pkcs12.h ../include/openssl/pkcs7.h -apps.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -apps.o: ../include/openssl/sha.h ../include/openssl/stack.h -apps.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -apps.o: ../include/openssl/ui.h ../include/openssl/x509.h -apps.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.c apps.h -asn1pars.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -asn1pars.o: ../include/openssl/buffer.h ../include/openssl/conf.h -asn1pars.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -asn1pars.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -asn1pars.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -asn1pars.o: ../include/openssl/err.h ../include/openssl/evp.h -asn1pars.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -asn1pars.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -asn1pars.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -asn1pars.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -asn1pars.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -asn1pars.o: ../include/openssl/safestack.h ../include/openssl/sha.h -asn1pars.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -asn1pars.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -asn1pars.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -asn1pars.o: asn1pars.c -ca.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ca.o: ../include/openssl/bn.h ../include/openssl/buffer.h -ca.o: ../include/openssl/conf.h ../include/openssl/crypto.h -ca.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -ca.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -ca.o: ../include/openssl/engine.h ../include/openssl/err.h -ca.o: ../include/openssl/evp.h ../include/openssl/lhash.h -ca.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -ca.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -ca.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ca.o: ../include/openssl/pem.h ../include/openssl/pem2.h -ca.o: ../include/openssl/pkcs7.h ../include/openssl/safestack.h -ca.o: ../include/openssl/sha.h ../include/openssl/stack.h -ca.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -ca.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -ca.o: ../include/openssl/x509v3.h apps.h ca.c -ciphers.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ciphers.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ciphers.o: ../include/openssl/conf.h ../include/openssl/crypto.h -ciphers.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -ciphers.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ciphers.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -ciphers.o: ../include/openssl/err.h ../include/openssl/evp.h -ciphers.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -ciphers.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ciphers.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -ciphers.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -ciphers.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -ciphers.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -ciphers.o: ../include/openssl/pqueue.h ../include/openssl/safestack.h -ciphers.o: ../include/openssl/sha.h ../include/openssl/srtp.h -ciphers.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -ciphers.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -ciphers.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -ciphers.o: ../include/openssl/tls1.h ../include/openssl/txt_db.h -ciphers.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -ciphers.o: ../include/openssl/x509v3.h apps.h ciphers.c -cms.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -cms.o: ../include/openssl/buffer.h ../include/openssl/cms.h -cms.o: ../include/openssl/conf.h ../include/openssl/crypto.h -cms.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -cms.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -cms.o: ../include/openssl/engine.h ../include/openssl/err.h -cms.o: ../include/openssl/evp.h ../include/openssl/lhash.h -cms.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -cms.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -cms.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -cms.o: ../include/openssl/pem.h ../include/openssl/pem2.h -cms.o: ../include/openssl/pkcs7.h ../include/openssl/safestack.h -cms.o: ../include/openssl/sha.h ../include/openssl/stack.h -cms.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -cms.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -cms.o: ../include/openssl/x509v3.h apps.h cms.c -crl.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -crl.o: ../include/openssl/buffer.h ../include/openssl/conf.h -crl.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -crl.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -crl.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -crl.o: ../include/openssl/err.h ../include/openssl/evp.h -crl.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -crl.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -crl.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -crl.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -crl.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -crl.o: ../include/openssl/safestack.h ../include/openssl/sha.h -crl.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -crl.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -crl.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h crl.c -crl2p7.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -crl2p7.o: ../include/openssl/buffer.h ../include/openssl/conf.h -crl2p7.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -crl2p7.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -crl2p7.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -crl2p7.o: ../include/openssl/err.h ../include/openssl/evp.h -crl2p7.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -crl2p7.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -crl2p7.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -crl2p7.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -crl2p7.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -crl2p7.o: ../include/openssl/safestack.h ../include/openssl/sha.h -crl2p7.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -crl2p7.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -crl2p7.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -crl2p7.o: crl2p7.c -dgst.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -dgst.o: ../include/openssl/buffer.h ../include/openssl/conf.h -dgst.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -dgst.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -dgst.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -dgst.o: ../include/openssl/err.h ../include/openssl/evp.h -dgst.o: ../include/openssl/hmac.h ../include/openssl/lhash.h -dgst.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -dgst.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -dgst.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -dgst.o: ../include/openssl/pem.h ../include/openssl/pem2.h -dgst.o: ../include/openssl/pkcs7.h ../include/openssl/safestack.h -dgst.o: ../include/openssl/sha.h ../include/openssl/stack.h -dgst.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -dgst.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -dgst.o: ../include/openssl/x509v3.h apps.h dgst.c -dh.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -dh.o: ../include/openssl/bn.h ../include/openssl/buffer.h -dh.o: ../include/openssl/conf.h ../include/openssl/crypto.h -dh.o: ../include/openssl/dh.h ../include/openssl/e_os2.h -dh.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -dh.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -dh.o: ../include/openssl/err.h ../include/openssl/evp.h -dh.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -dh.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -dh.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -dh.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -dh.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -dh.o: ../include/openssl/safestack.h ../include/openssl/sha.h -dh.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -dh.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -dh.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h dh.c -dsa.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -dsa.o: ../include/openssl/bn.h ../include/openssl/buffer.h -dsa.o: ../include/openssl/conf.h ../include/openssl/crypto.h -dsa.o: ../include/openssl/dsa.h ../include/openssl/e_os2.h -dsa.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -dsa.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -dsa.o: ../include/openssl/err.h ../include/openssl/evp.h -dsa.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -dsa.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -dsa.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -dsa.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -dsa.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -dsa.o: ../include/openssl/safestack.h ../include/openssl/sha.h -dsa.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -dsa.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -dsa.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h dsa.c -dsaparam.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -dsaparam.o: ../include/openssl/bn.h ../include/openssl/buffer.h -dsaparam.o: ../include/openssl/conf.h ../include/openssl/crypto.h -dsaparam.o: ../include/openssl/dh.h ../include/openssl/dsa.h -dsaparam.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -dsaparam.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -dsaparam.o: ../include/openssl/engine.h ../include/openssl/err.h -dsaparam.o: ../include/openssl/evp.h ../include/openssl/lhash.h -dsaparam.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -dsaparam.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -dsaparam.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -dsaparam.o: ../include/openssl/pem.h ../include/openssl/pem2.h -dsaparam.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -dsaparam.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -dsaparam.o: ../include/openssl/sha.h ../include/openssl/stack.h -dsaparam.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -dsaparam.o: ../include/openssl/ui.h ../include/openssl/x509.h -dsaparam.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -dsaparam.o: dsaparam.c -ec.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ec.o: ../include/openssl/buffer.h ../include/openssl/conf.h -ec.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -ec.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ec.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -ec.o: ../include/openssl/err.h ../include/openssl/evp.h -ec.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ec.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -ec.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -ec.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -ec.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -ec.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ec.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -ec.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -ec.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h ec.c -ecparam.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ecparam.o: ../include/openssl/bn.h ../include/openssl/buffer.h -ecparam.o: ../include/openssl/conf.h ../include/openssl/crypto.h -ecparam.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -ecparam.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -ecparam.o: ../include/openssl/engine.h ../include/openssl/err.h -ecparam.o: ../include/openssl/evp.h ../include/openssl/lhash.h -ecparam.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -ecparam.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -ecparam.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ecparam.o: ../include/openssl/pem.h ../include/openssl/pem2.h -ecparam.o: ../include/openssl/pkcs7.h ../include/openssl/safestack.h -ecparam.o: ../include/openssl/sha.h ../include/openssl/stack.h -ecparam.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -ecparam.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -ecparam.o: ../include/openssl/x509v3.h apps.h ecparam.c -enc.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -enc.o: ../include/openssl/buffer.h ../include/openssl/comp.h -enc.o: ../include/openssl/conf.h ../include/openssl/crypto.h -enc.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -enc.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -enc.o: ../include/openssl/engine.h ../include/openssl/err.h -enc.o: ../include/openssl/evp.h ../include/openssl/lhash.h -enc.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -enc.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -enc.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -enc.o: ../include/openssl/pem.h ../include/openssl/pem2.h -enc.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -enc.o: ../include/openssl/safestack.h ../include/openssl/sha.h -enc.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -enc.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -enc.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h enc.c -engine.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -engine.o: ../include/openssl/buffer.h ../include/openssl/comp.h -engine.o: ../include/openssl/conf.h ../include/openssl/crypto.h -engine.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -engine.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -engine.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -engine.o: ../include/openssl/err.h ../include/openssl/evp.h -engine.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -engine.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -engine.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -engine.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -engine.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -engine.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -engine.o: ../include/openssl/pqueue.h ../include/openssl/safestack.h -engine.o: ../include/openssl/sha.h ../include/openssl/srtp.h -engine.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -engine.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -engine.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -engine.o: ../include/openssl/tls1.h ../include/openssl/txt_db.h -engine.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -engine.o: ../include/openssl/x509v3.h apps.h engine.c -errstr.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -errstr.o: ../include/openssl/buffer.h ../include/openssl/comp.h -errstr.o: ../include/openssl/conf.h ../include/openssl/crypto.h -errstr.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -errstr.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -errstr.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -errstr.o: ../include/openssl/err.h ../include/openssl/evp.h -errstr.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -errstr.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -errstr.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -errstr.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -errstr.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -errstr.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -errstr.o: ../include/openssl/pqueue.h ../include/openssl/safestack.h -errstr.o: ../include/openssl/sha.h ../include/openssl/srtp.h -errstr.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -errstr.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -errstr.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -errstr.o: ../include/openssl/tls1.h ../include/openssl/txt_db.h -errstr.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -errstr.o: ../include/openssl/x509v3.h apps.h errstr.c -gendh.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -gendh.o: ../include/openssl/bn.h ../include/openssl/buffer.h -gendh.o: ../include/openssl/conf.h ../include/openssl/crypto.h -gendh.o: ../include/openssl/dh.h ../include/openssl/dsa.h -gendh.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -gendh.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -gendh.o: ../include/openssl/engine.h ../include/openssl/err.h -gendh.o: ../include/openssl/evp.h ../include/openssl/lhash.h -gendh.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -gendh.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -gendh.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -gendh.o: ../include/openssl/pem.h ../include/openssl/pem2.h -gendh.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -gendh.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -gendh.o: ../include/openssl/sha.h ../include/openssl/stack.h -gendh.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -gendh.o: ../include/openssl/ui.h ../include/openssl/x509.h -gendh.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -gendh.o: gendh.c -gendsa.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -gendsa.o: ../include/openssl/bn.h ../include/openssl/buffer.h -gendsa.o: ../include/openssl/conf.h ../include/openssl/crypto.h -gendsa.o: ../include/openssl/dsa.h ../include/openssl/e_os2.h -gendsa.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -gendsa.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -gendsa.o: ../include/openssl/err.h ../include/openssl/evp.h -gendsa.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -gendsa.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -gendsa.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -gendsa.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -gendsa.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -gendsa.o: ../include/openssl/safestack.h ../include/openssl/sha.h -gendsa.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -gendsa.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -gendsa.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -gendsa.o: gendsa.c -genpkey.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -genpkey.o: ../include/openssl/buffer.h ../include/openssl/conf.h -genpkey.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -genpkey.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -genpkey.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -genpkey.o: ../include/openssl/err.h ../include/openssl/evp.h -genpkey.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -genpkey.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -genpkey.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -genpkey.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -genpkey.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -genpkey.o: ../include/openssl/safestack.h ../include/openssl/sha.h -genpkey.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -genpkey.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -genpkey.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -genpkey.o: genpkey.c -genrsa.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -genrsa.o: ../include/openssl/bn.h ../include/openssl/buffer.h -genrsa.o: ../include/openssl/conf.h ../include/openssl/crypto.h -genrsa.o: ../include/openssl/dh.h ../include/openssl/dsa.h -genrsa.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -genrsa.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -genrsa.o: ../include/openssl/engine.h ../include/openssl/err.h -genrsa.o: ../include/openssl/evp.h ../include/openssl/lhash.h -genrsa.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -genrsa.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -genrsa.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -genrsa.o: ../include/openssl/pem.h ../include/openssl/pem2.h -genrsa.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -genrsa.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -genrsa.o: ../include/openssl/sha.h ../include/openssl/stack.h -genrsa.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -genrsa.o: ../include/openssl/ui.h ../include/openssl/x509.h -genrsa.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -genrsa.o: genrsa.c -nseq.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -nseq.o: ../include/openssl/buffer.h ../include/openssl/conf.h -nseq.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -nseq.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -nseq.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -nseq.o: ../include/openssl/err.h ../include/openssl/evp.h -nseq.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -nseq.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -nseq.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -nseq.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -nseq.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -nseq.o: ../include/openssl/safestack.h ../include/openssl/sha.h -nseq.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -nseq.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -nseq.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h nseq.c -ocsp.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ocsp.o: ../include/openssl/bn.h ../include/openssl/buffer.h -ocsp.o: ../include/openssl/comp.h ../include/openssl/conf.h -ocsp.o: ../include/openssl/crypto.h ../include/openssl/dtls1.h -ocsp.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -ocsp.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -ocsp.o: ../include/openssl/engine.h ../include/openssl/err.h -ocsp.o: ../include/openssl/evp.h ../include/openssl/hmac.h -ocsp.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -ocsp.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -ocsp.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -ocsp.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ocsp.o: ../include/openssl/pem.h ../include/openssl/pem2.h -ocsp.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -ocsp.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ocsp.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -ocsp.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -ocsp.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -ocsp.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -ocsp.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -ocsp.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h ocsp.c -openssl.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -openssl.o: ../include/openssl/buffer.h ../include/openssl/comp.h -openssl.o: ../include/openssl/conf.h ../include/openssl/crypto.h -openssl.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -openssl.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -openssl.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -openssl.o: ../include/openssl/err.h ../include/openssl/evp.h -openssl.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -openssl.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -openssl.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -openssl.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -openssl.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -openssl.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -openssl.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -openssl.o: ../include/openssl/safestack.h ../include/openssl/sha.h -openssl.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -openssl.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -openssl.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -openssl.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -openssl.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -openssl.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -openssl.o: openssl.c progs.h s_apps.h -passwd.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -passwd.o: ../include/openssl/buffer.h ../include/openssl/conf.h -passwd.o: ../include/openssl/crypto.h ../include/openssl/des.h -passwd.o: ../include/openssl/des_old.h ../include/openssl/e_os2.h -passwd.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -passwd.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -passwd.o: ../include/openssl/err.h ../include/openssl/evp.h -passwd.o: ../include/openssl/lhash.h ../include/openssl/md5.h -passwd.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -passwd.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -passwd.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -passwd.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -passwd.o: ../include/openssl/safestack.h ../include/openssl/sha.h -passwd.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -passwd.o: ../include/openssl/txt_db.h ../include/openssl/ui.h -passwd.o: ../include/openssl/ui_compat.h ../include/openssl/x509.h -passwd.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -passwd.o: passwd.c -pkcs12.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -pkcs12.o: ../include/openssl/buffer.h ../include/openssl/conf.h -pkcs12.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -pkcs12.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -pkcs12.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -pkcs12.o: ../include/openssl/err.h ../include/openssl/evp.h -pkcs12.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -pkcs12.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -pkcs12.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -pkcs12.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -pkcs12.o: ../include/openssl/pem2.h ../include/openssl/pkcs12.h -pkcs12.o: ../include/openssl/pkcs7.h ../include/openssl/safestack.h -pkcs12.o: ../include/openssl/sha.h ../include/openssl/stack.h -pkcs12.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -pkcs12.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -pkcs12.o: ../include/openssl/x509v3.h apps.h pkcs12.c -pkcs7.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -pkcs7.o: ../include/openssl/buffer.h ../include/openssl/conf.h -pkcs7.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -pkcs7.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -pkcs7.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -pkcs7.o: ../include/openssl/err.h ../include/openssl/evp.h -pkcs7.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -pkcs7.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -pkcs7.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -pkcs7.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -pkcs7.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -pkcs7.o: ../include/openssl/safestack.h ../include/openssl/sha.h -pkcs7.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -pkcs7.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -pkcs7.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -pkcs7.o: pkcs7.c -pkcs8.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -pkcs8.o: ../include/openssl/buffer.h ../include/openssl/conf.h -pkcs8.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -pkcs8.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -pkcs8.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -pkcs8.o: ../include/openssl/err.h ../include/openssl/evp.h -pkcs8.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -pkcs8.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -pkcs8.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -pkcs8.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -pkcs8.o: ../include/openssl/pem2.h ../include/openssl/pkcs12.h -pkcs8.o: ../include/openssl/pkcs7.h ../include/openssl/safestack.h -pkcs8.o: ../include/openssl/sha.h ../include/openssl/stack.h -pkcs8.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -pkcs8.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -pkcs8.o: ../include/openssl/x509v3.h apps.h pkcs8.c -pkey.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -pkey.o: ../include/openssl/buffer.h ../include/openssl/conf.h -pkey.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -pkey.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -pkey.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -pkey.o: ../include/openssl/err.h ../include/openssl/evp.h -pkey.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -pkey.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -pkey.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -pkey.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -pkey.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -pkey.o: ../include/openssl/safestack.h ../include/openssl/sha.h -pkey.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -pkey.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -pkey.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h pkey.c -pkeyparam.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -pkeyparam.o: ../include/openssl/buffer.h ../include/openssl/conf.h -pkeyparam.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -pkeyparam.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -pkeyparam.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -pkeyparam.o: ../include/openssl/err.h ../include/openssl/evp.h -pkeyparam.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -pkeyparam.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -pkeyparam.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -pkeyparam.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -pkeyparam.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -pkeyparam.o: ../include/openssl/safestack.h ../include/openssl/sha.h -pkeyparam.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -pkeyparam.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -pkeyparam.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -pkeyparam.o: pkeyparam.c -pkeyutl.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -pkeyutl.o: ../include/openssl/buffer.h ../include/openssl/conf.h -pkeyutl.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -pkeyutl.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -pkeyutl.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -pkeyutl.o: ../include/openssl/err.h ../include/openssl/evp.h -pkeyutl.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -pkeyutl.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -pkeyutl.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -pkeyutl.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -pkeyutl.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -pkeyutl.o: ../include/openssl/safestack.h ../include/openssl/sha.h -pkeyutl.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -pkeyutl.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -pkeyutl.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -pkeyutl.o: pkeyutl.c -prime.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -prime.o: ../include/openssl/bn.h ../include/openssl/buffer.h -prime.o: ../include/openssl/conf.h ../include/openssl/crypto.h -prime.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -prime.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -prime.o: ../include/openssl/engine.h ../include/openssl/evp.h -prime.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -prime.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -prime.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -prime.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -prime.o: ../include/openssl/safestack.h ../include/openssl/sha.h -prime.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -prime.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -prime.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -prime.o: prime.c -rand.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -rand.o: ../include/openssl/buffer.h ../include/openssl/conf.h -rand.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -rand.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -rand.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -rand.o: ../include/openssl/err.h ../include/openssl/evp.h -rand.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -rand.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -rand.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -rand.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -rand.o: ../include/openssl/rand.h ../include/openssl/safestack.h -rand.o: ../include/openssl/sha.h ../include/openssl/stack.h -rand.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -rand.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -rand.o: ../include/openssl/x509v3.h apps.h rand.c -req.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -req.o: ../include/openssl/bn.h ../include/openssl/buffer.h -req.o: ../include/openssl/conf.h ../include/openssl/crypto.h -req.o: ../include/openssl/dh.h ../include/openssl/dsa.h -req.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -req.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -req.o: ../include/openssl/engine.h ../include/openssl/err.h -req.o: ../include/openssl/evp.h ../include/openssl/lhash.h -req.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -req.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -req.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -req.o: ../include/openssl/pem.h ../include/openssl/pem2.h -req.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -req.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -req.o: ../include/openssl/sha.h ../include/openssl/stack.h -req.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -req.o: ../include/openssl/ui.h ../include/openssl/x509.h -req.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h req.c -rsa.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -rsa.o: ../include/openssl/bn.h ../include/openssl/buffer.h -rsa.o: ../include/openssl/conf.h ../include/openssl/crypto.h -rsa.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -rsa.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -rsa.o: ../include/openssl/engine.h ../include/openssl/err.h -rsa.o: ../include/openssl/evp.h ../include/openssl/lhash.h -rsa.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -rsa.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -rsa.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -rsa.o: ../include/openssl/pem.h ../include/openssl/pem2.h -rsa.o: ../include/openssl/pkcs7.h ../include/openssl/rsa.h -rsa.o: ../include/openssl/safestack.h ../include/openssl/sha.h -rsa.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -rsa.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -rsa.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h rsa.c -rsautl.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -rsautl.o: ../include/openssl/buffer.h ../include/openssl/conf.h -rsautl.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -rsautl.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -rsautl.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -rsautl.o: ../include/openssl/err.h ../include/openssl/evp.h -rsautl.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -rsautl.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -rsautl.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -rsautl.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -rsautl.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -rsautl.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -rsautl.o: ../include/openssl/sha.h ../include/openssl/stack.h -rsautl.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -rsautl.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -rsautl.o: ../include/openssl/x509v3.h apps.h rsautl.c -s_cb.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s_cb.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s_cb.o: ../include/openssl/conf.h ../include/openssl/crypto.h -s_cb.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s_cb.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s_cb.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -s_cb.o: ../include/openssl/err.h ../include/openssl/evp.h -s_cb.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -s_cb.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -s_cb.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -s_cb.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s_cb.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s_cb.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s_cb.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -s_cb.o: ../include/openssl/safestack.h ../include/openssl/sha.h -s_cb.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -s_cb.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -s_cb.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -s_cb.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -s_cb.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -s_cb.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -s_cb.o: s_apps.h s_cb.c -s_client.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s_client.o: ../include/openssl/bn.h ../include/openssl/buffer.h -s_client.o: ../include/openssl/comp.h ../include/openssl/conf.h -s_client.o: ../include/openssl/crypto.h ../include/openssl/dtls1.h -s_client.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -s_client.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -s_client.o: ../include/openssl/engine.h ../include/openssl/err.h -s_client.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s_client.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s_client.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s_client.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -s_client.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -s_client.o: ../include/openssl/pem.h ../include/openssl/pem2.h -s_client.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -s_client.o: ../include/openssl/rand.h ../include/openssl/safestack.h -s_client.o: ../include/openssl/sha.h ../include/openssl/srp.h -s_client.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -s_client.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -s_client.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -s_client.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -s_client.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -s_client.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -s_client.o: s_apps.h s_client.c timeouts.h -s_server.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s_server.o: ../include/openssl/bn.h ../include/openssl/buffer.h -s_server.o: ../include/openssl/comp.h ../include/openssl/conf.h -s_server.o: ../include/openssl/crypto.h ../include/openssl/dh.h -s_server.o: ../include/openssl/dsa.h ../include/openssl/dtls1.h -s_server.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -s_server.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -s_server.o: ../include/openssl/engine.h ../include/openssl/err.h -s_server.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s_server.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s_server.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s_server.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -s_server.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -s_server.o: ../include/openssl/pem.h ../include/openssl/pem2.h -s_server.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -s_server.o: ../include/openssl/rand.h ../include/openssl/rsa.h -s_server.o: ../include/openssl/safestack.h ../include/openssl/sha.h -s_server.o: ../include/openssl/srp.h ../include/openssl/srtp.h -s_server.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s_server.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s_server.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s_server.o: ../include/openssl/tls1.h ../include/openssl/txt_db.h -s_server.o: ../include/openssl/ui.h ../include/openssl/x509.h -s_server.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -s_server.o: s_apps.h s_server.c timeouts.h -s_socket.o: ../e_os.h ../e_os2.h ../include/openssl/asn1.h -s_socket.o: ../include/openssl/bio.h ../include/openssl/buffer.h -s_socket.o: ../include/openssl/comp.h ../include/openssl/conf.h -s_socket.o: ../include/openssl/crypto.h ../include/openssl/dtls1.h -s_socket.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -s_socket.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -s_socket.o: ../include/openssl/engine.h ../include/openssl/evp.h -s_socket.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -s_socket.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -s_socket.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -s_socket.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s_socket.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s_socket.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s_socket.o: ../include/openssl/pqueue.h ../include/openssl/safestack.h -s_socket.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s_socket.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s_socket.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s_socket.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s_socket.o: ../include/openssl/tls1.h ../include/openssl/txt_db.h -s_socket.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -s_socket.o: ../include/openssl/x509v3.h apps.h s_apps.h s_socket.c -s_time.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s_time.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s_time.o: ../include/openssl/conf.h ../include/openssl/crypto.h -s_time.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s_time.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s_time.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -s_time.o: ../include/openssl/err.h ../include/openssl/evp.h -s_time.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -s_time.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -s_time.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -s_time.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s_time.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s_time.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s_time.o: ../include/openssl/pqueue.h ../include/openssl/safestack.h -s_time.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s_time.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s_time.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s_time.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s_time.o: ../include/openssl/tls1.h ../include/openssl/txt_db.h -s_time.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -s_time.o: ../include/openssl/x509v3.h apps.h s_apps.h s_time.c -sess_id.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -sess_id.o: ../include/openssl/buffer.h ../include/openssl/comp.h -sess_id.o: ../include/openssl/conf.h ../include/openssl/crypto.h -sess_id.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -sess_id.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -sess_id.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -sess_id.o: ../include/openssl/err.h ../include/openssl/evp.h -sess_id.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -sess_id.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -sess_id.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -sess_id.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -sess_id.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -sess_id.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -sess_id.o: ../include/openssl/pqueue.h ../include/openssl/safestack.h -sess_id.o: ../include/openssl/sha.h ../include/openssl/srtp.h -sess_id.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -sess_id.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -sess_id.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -sess_id.o: ../include/openssl/tls1.h ../include/openssl/txt_db.h -sess_id.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -sess_id.o: ../include/openssl/x509v3.h apps.h sess_id.c -smime.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -smime.o: ../include/openssl/buffer.h ../include/openssl/conf.h -smime.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -smime.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -smime.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -smime.o: ../include/openssl/err.h ../include/openssl/evp.h -smime.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -smime.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -smime.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -smime.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -smime.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -smime.o: ../include/openssl/safestack.h ../include/openssl/sha.h -smime.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -smime.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -smime.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -smime.o: smime.c -speed.o: ../e_os.h ../include/openssl/aes.h ../include/openssl/asn1.h -speed.o: ../include/openssl/bio.h ../include/openssl/blowfish.h -speed.o: ../include/openssl/bn.h ../include/openssl/buffer.h -speed.o: ../include/openssl/camellia.h ../include/openssl/cast.h -speed.o: ../include/openssl/conf.h ../include/openssl/crypto.h -speed.o: ../include/openssl/des.h ../include/openssl/des_old.h -speed.o: ../include/openssl/dsa.h ../include/openssl/e_os2.h -speed.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -speed.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -speed.o: ../include/openssl/err.h ../include/openssl/evp.h -speed.o: ../include/openssl/hmac.h ../include/openssl/idea.h -speed.o: ../include/openssl/lhash.h ../include/openssl/md4.h -speed.o: ../include/openssl/md5.h ../include/openssl/mdc2.h -speed.o: ../include/openssl/modes.h ../include/openssl/obj_mac.h -speed.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -speed.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -speed.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -speed.o: ../include/openssl/rand.h ../include/openssl/rc2.h -speed.o: ../include/openssl/rc4.h ../include/openssl/ripemd.h -speed.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -speed.o: ../include/openssl/seed.h ../include/openssl/sha.h -speed.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -speed.o: ../include/openssl/txt_db.h ../include/openssl/ui.h -speed.o: ../include/openssl/ui_compat.h ../include/openssl/whrlpool.h -speed.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -speed.o: ../include/openssl/x509v3.h apps.h speed.c testdsa.h testrsa.h -spkac.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -spkac.o: ../include/openssl/buffer.h ../include/openssl/conf.h -spkac.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -spkac.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -spkac.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -spkac.o: ../include/openssl/err.h ../include/openssl/evp.h -spkac.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -spkac.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -spkac.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -spkac.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -spkac.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -spkac.o: ../include/openssl/safestack.h ../include/openssl/sha.h -spkac.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -spkac.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -spkac.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -spkac.o: spkac.c -srp.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -srp.o: ../include/openssl/bn.h ../include/openssl/buffer.h -srp.o: ../include/openssl/conf.h ../include/openssl/crypto.h -srp.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -srp.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -srp.o: ../include/openssl/engine.h ../include/openssl/err.h -srp.o: ../include/openssl/evp.h ../include/openssl/lhash.h -srp.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -srp.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -srp.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -srp.o: ../include/openssl/pkcs7.h ../include/openssl/safestack.h -srp.o: ../include/openssl/sha.h ../include/openssl/srp.h -srp.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -srp.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -srp.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h srp.c -ts.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ts.o: ../include/openssl/bn.h ../include/openssl/buffer.h -ts.o: ../include/openssl/conf.h ../include/openssl/crypto.h -ts.o: ../include/openssl/dh.h ../include/openssl/dsa.h -ts.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -ts.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -ts.o: ../include/openssl/engine.h ../include/openssl/err.h -ts.o: ../include/openssl/evp.h ../include/openssl/lhash.h -ts.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -ts.o: ../include/openssl/ocsp.h ../include/openssl/opensslconf.h -ts.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ts.o: ../include/openssl/pem.h ../include/openssl/pem2.h -ts.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -ts.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -ts.o: ../include/openssl/sha.h ../include/openssl/stack.h -ts.o: ../include/openssl/symhacks.h ../include/openssl/ts.h -ts.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -ts.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h ts.c -verify.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -verify.o: ../include/openssl/buffer.h ../include/openssl/conf.h -verify.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -verify.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -verify.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -verify.o: ../include/openssl/err.h ../include/openssl/evp.h -verify.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -verify.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -verify.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -verify.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -verify.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -verify.o: ../include/openssl/safestack.h ../include/openssl/sha.h -verify.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -verify.o: ../include/openssl/txt_db.h ../include/openssl/x509.h -verify.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h apps.h -verify.o: verify.c -version.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -version.o: ../include/openssl/blowfish.h ../include/openssl/bn.h -version.o: ../include/openssl/buffer.h ../include/openssl/conf.h -version.o: ../include/openssl/crypto.h ../include/openssl/des.h -version.o: ../include/openssl/des_old.h ../include/openssl/e_os2.h -version.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -version.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -version.o: ../include/openssl/evp.h ../include/openssl/idea.h -version.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -version.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -version.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -version.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -version.o: ../include/openssl/rc4.h ../include/openssl/safestack.h -version.o: ../include/openssl/sha.h ../include/openssl/stack.h -version.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -version.o: ../include/openssl/ui.h ../include/openssl/ui_compat.h -version.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -version.o: ../include/openssl/x509v3.h apps.h version.c -x509.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -x509.o: ../include/openssl/bn.h ../include/openssl/buffer.h -x509.o: ../include/openssl/conf.h ../include/openssl/crypto.h -x509.o: ../include/openssl/dsa.h ../include/openssl/e_os2.h -x509.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -x509.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -x509.o: ../include/openssl/err.h ../include/openssl/evp.h -x509.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -x509.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -x509.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -x509.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -x509.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -x509.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -x509.o: ../include/openssl/sha.h ../include/openssl/stack.h -x509.o: ../include/openssl/symhacks.h ../include/openssl/txt_db.h -x509.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -x509.o: ../include/openssl/x509v3.h apps.h x509.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/ocsp.c nodejs-0.11.15/deps/openssl/openssl/apps/ocsp.c --- nodejs-0.11.13/deps/openssl/openssl/apps/ocsp.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/ocsp.c 2015-01-20 21:22:17.000000000 +0000 @@ -127,6 +127,7 @@ ENGINE *e = NULL; char **args; char *host = NULL, *port = NULL, *path = "/"; + char *thost = NULL, *tport = NULL, *tpath = NULL; char *reqin = NULL, *respin = NULL; char *reqout = NULL, *respout = NULL; char *signfile = NULL, *keyfile = NULL; @@ -204,6 +205,12 @@ } else if (!strcmp(*args, "-url")) { + if (thost) + OPENSSL_free(thost); + if (tport) + OPENSSL_free(tport); + if (tpath) + OPENSSL_free(tpath); if (args[1]) { args++; @@ -212,6 +219,9 @@ BIO_printf(bio_err, "Error parsing URL\n"); badarg = 1; } + thost = host; + tport = port; + tpath = path; } else badarg = 1; } @@ -920,12 +930,12 @@ sk_X509_pop_free(verify_other, X509_free); sk_CONF_VALUE_pop_free(headers, X509V3_conf_free); - if (use_ssl != -1) - { - OPENSSL_free(host); - OPENSSL_free(port); - OPENSSL_free(path); - } + if (thost) + OPENSSL_free(thost); + if (tport) + OPENSSL_free(tport); + if (tpath) + OPENSSL_free(tpath); OPENSSL_EXIT(ret); } @@ -1409,7 +1419,7 @@ } resp = query_responder(err, cbio, path, headers, req, req_timeout); if (!resp) - BIO_printf(bio_err, "Error querying OCSP responsder\n"); + BIO_printf(bio_err, "Error querying OCSP responder\n"); end: if (cbio) BIO_free_all(cbio); diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/progs.h nodejs-0.11.15/deps/openssl/openssl/apps/progs.h --- nodejs-0.11.13/deps/openssl/openssl/apps/progs.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/progs.h 2015-01-20 21:22:17.000000000 +0000 @@ -107,16 +107,16 @@ {FUNC_TYPE_GENERAL,"gendsa",gendsa_main}, #endif {FUNC_TYPE_GENERAL,"genpkey",genpkey_main}, -#if !defined(OPENSSL_NO_SOCK) && !(defined(OPENSSL_NO_SSL2) && defined(OPENSSL_NO_SSL3)) +#if !defined(OPENSSL_NO_SOCK) {FUNC_TYPE_GENERAL,"s_server",s_server_main}, #endif -#if !defined(OPENSSL_NO_SOCK) && !(defined(OPENSSL_NO_SSL2) && defined(OPENSSL_NO_SSL3)) +#if !defined(OPENSSL_NO_SOCK) {FUNC_TYPE_GENERAL,"s_client",s_client_main}, #endif #ifndef OPENSSL_NO_SPEED {FUNC_TYPE_GENERAL,"speed",speed_main}, #endif -#if !defined(OPENSSL_NO_SOCK) && !(defined(OPENSSL_NO_SSL2) && defined(OPENSSL_NO_SSL3)) +#if !defined(OPENSSL_NO_SOCK) {FUNC_TYPE_GENERAL,"s_time",s_time_main}, #endif {FUNC_TYPE_GENERAL,"version",version_main}, @@ -126,7 +126,7 @@ #endif {FUNC_TYPE_GENERAL,"crl2pkcs7",crl2pkcs7_main}, {FUNC_TYPE_GENERAL,"sess_id",sess_id_main}, -#if !defined(OPENSSL_NO_SOCK) && !(defined(OPENSSL_NO_SSL2) && defined(OPENSSL_NO_SSL3)) +#if !defined(OPENSSL_NO_SOCK) {FUNC_TYPE_GENERAL,"ciphers",ciphers_main}, #endif {FUNC_TYPE_GENERAL,"nseq",nseq_main}, diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/progs.pl nodejs-0.11.15/deps/openssl/openssl/apps/progs.pl --- nodejs-0.11.13/deps/openssl/openssl/apps/progs.pl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/progs.pl 2015-01-20 21:22:17.000000000 +0000 @@ -32,7 +32,7 @@ push(@files,$_); $str="\t{FUNC_TYPE_GENERAL,\"$_\",${_}_main},\n"; if (($_ =~ /^s_/) || ($_ =~ /^ciphers$/)) - { print "#if !defined(OPENSSL_NO_SOCK) && !(defined(OPENSSL_NO_SSL2) && defined(OPENSSL_NO_SSL3))\n${str}#endif\n"; } + { print "#if !defined(OPENSSL_NO_SOCK)\n${str}#endif\n"; } elsif ( ($_ =~ /^speed$/)) { print "#ifndef OPENSSL_NO_SPEED\n${str}#endif\n"; } elsif ( ($_ =~ /^engine$/)) diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/req.c nodejs-0.11.15/deps/openssl/openssl/apps/req.c --- nodejs-0.11.13/deps/openssl/openssl/apps/req.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/req.c 2015-01-20 21:22:17.000000000 +0000 @@ -1489,7 +1489,13 @@ #ifdef CHARSET_EBCDIC ebcdic2ascii(buf, buf, i); #endif - if(!req_check_len(i, n_min, n_max)) goto start; + if(!req_check_len(i, n_min, n_max)) + { + if (batch || value) + return 0; + goto start; + } + if (!X509_NAME_add_entry_by_NID(n,nid, chtype, (unsigned char *) buf, -1,-1,mval)) goto err; ret=1; @@ -1548,7 +1554,12 @@ #ifdef CHARSET_EBCDIC ebcdic2ascii(buf, buf, i); #endif - if(!req_check_len(i, n_min, n_max)) goto start; + if(!req_check_len(i, n_min, n_max)) + { + if (batch || value) + return 0; + goto start; + } if(!X509_REQ_add1_attr_by_NID(req, nid, chtype, (unsigned char *)buf, -1)) { diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/s_cb.c nodejs-0.11.15/deps/openssl/openssl/apps/s_cb.c --- nodejs-0.11.13/deps/openssl/openssl/apps/s_cb.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/s_cb.c 2015-01-20 21:22:17.000000000 +0000 @@ -747,6 +747,10 @@ break; #endif + case TLSEXT_TYPE_padding: + extname = "TLS padding"; + break; + default: extname = "unknown"; break; diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/s_client.c nodejs-0.11.15/deps/openssl/openssl/apps/s_client.c --- nodejs-0.11.13/deps/openssl/openssl/apps/s_client.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/s_client.c 2015-01-20 21:22:17.000000000 +0000 @@ -297,6 +297,7 @@ BIO_printf(bio_err," -connect host:port - who to connect to (default is %s:%s)\n",SSL_HOST_NAME,PORT_STR); BIO_printf(bio_err," -verify arg - turn on peer certificate verification\n"); + BIO_printf(bio_err," -verify_return_error - return verification errors\n"); BIO_printf(bio_err," -cert arg - certificate file to use, PEM format assumed\n"); BIO_printf(bio_err," -certform arg - certificate format (PEM or DER) PEM default\n"); BIO_printf(bio_err," -key arg - Private key file to use, in cert file if\n"); @@ -307,6 +308,7 @@ BIO_printf(bio_err," -CAfile arg - PEM format file of CA's\n"); BIO_printf(bio_err," -reconnect - Drop and re-make the connection with the same Session-ID\n"); BIO_printf(bio_err," -pause - sleep(1) after each read(2) and write(2) system call\n"); + BIO_printf(bio_err," -prexit - print session information even on connection failure\n"); BIO_printf(bio_err," -showcerts - show all certificates in the chain\n"); BIO_printf(bio_err," -debug - extra output\n"); #ifdef WATT32 @@ -342,6 +344,7 @@ BIO_printf(bio_err," -tls1_1 - just use TLSv1.1\n"); BIO_printf(bio_err," -tls1 - just use TLSv1\n"); BIO_printf(bio_err," -dtls1 - just use DTLSv1\n"); + BIO_printf(bio_err," -fallback_scsv - send TLS_FALLBACK_SCSV\n"); BIO_printf(bio_err," -mtu - set the link layer MTU\n"); BIO_printf(bio_err," -no_tls1_2/-no_tls1_1/-no_tls1/-no_ssl3/-no_ssl2 - turn off that protocol\n"); BIO_printf(bio_err," -bugs - Switch on all SSL implementation bug workarounds\n"); @@ -622,6 +625,7 @@ char *sess_out = NULL; struct sockaddr peer; int peerlen = sizeof(peer); + int fallback_scsv = 0; int enable_timeouts = 0 ; long socket_mtu = 0; #ifndef OPENSSL_NO_JPAKE @@ -828,6 +832,10 @@ meth=DTLSv1_client_method(); socket_type=SOCK_DGRAM; } + else if (strcmp(*argv,"-fallback_scsv") == 0) + { + fallback_scsv = 1; + } else if (strcmp(*argv,"-timeout") == 0) enable_timeouts=1; else if (strcmp(*argv,"-mtu") == 0) @@ -1240,6 +1248,10 @@ SSL_set_session(con, sess); SSL_SESSION_free(sess); } + + if (fallback_scsv) + SSL_set_mode(con, SSL_MODE_SEND_FALLBACK_SCSV); + #ifndef OPENSSL_NO_TLSEXT if (servername != NULL) { diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/smime.c nodejs-0.11.15/deps/openssl/openssl/apps/smime.c --- nodejs-0.11.13/deps/openssl/openssl/apps/smime.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/smime.c 2015-01-20 21:22:17.000000000 +0000 @@ -541,8 +541,8 @@ { if (!cipher) { -#ifndef OPENSSL_NO_RC2 - cipher = EVP_rc2_40_cbc(); +#ifndef OPENSSL_NO_DES + cipher = EVP_des_ede3_cbc(); #else BIO_printf(bio_err, "No cipher selected\n"); goto end; diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/s_server.c nodejs-0.11.15/deps/openssl/openssl/apps/s_server.c --- nodejs-0.11.13/deps/openssl/openssl/apps/s_server.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/s_server.c 2015-01-20 21:22:17.000000000 +0000 @@ -463,6 +463,7 @@ BIO_printf(bio_err," -context arg - set session ID context\n"); BIO_printf(bio_err," -verify arg - turn on peer certificate verification\n"); BIO_printf(bio_err," -Verify arg - turn on peer certificate verification, must have a cert.\n"); + BIO_printf(bio_err," -verify_return_error - return verification errors\n"); BIO_printf(bio_err," -cert arg - certificate file to use\n"); BIO_printf(bio_err," (default is %s)\n",TEST_CERT); BIO_printf(bio_err," -crl_check - check the peer certificate has not been revoked by its CA.\n" \ @@ -534,6 +535,7 @@ BIO_printf(bio_err," -no_ecdhe - Disable ephemeral ECDH\n"); #endif BIO_printf(bio_err," -bugs - Turn on SSL bug compatibility\n"); + BIO_printf(bio_err," -hack - workaround for early Netscape code\n"); BIO_printf(bio_err," -www - Respond to a 'GET /' with a status page\n"); BIO_printf(bio_err," -WWW - Respond to a 'GET /<path> HTTP/1.0' with file ./<path>\n"); BIO_printf(bio_err," -HTTP - Respond to a 'GET /<path> HTTP/1.0' with file ./<path>\n"); @@ -562,6 +564,10 @@ #endif BIO_printf(bio_err," -keymatexport label - Export keying material using label\n"); BIO_printf(bio_err," -keymatexportlen len - Export len bytes of keying material (default 20)\n"); + BIO_printf(bio_err," -status - respond to certificate status requests\n"); + BIO_printf(bio_err," -status_verbose - enable status request verbose printout\n"); + BIO_printf(bio_err," -status_timeout n - status request responder timeout\n"); + BIO_printf(bio_err," -status_url URL - status request fallback URL\n"); } static int local_argc=0; @@ -739,7 +745,7 @@ if (servername) { - if (strcmp(servername,p->servername)) + if (strcasecmp(servername,p->servername)) return p->extension_error; if (ctx2) { @@ -1356,6 +1362,14 @@ sv_usage(); goto end; } +#ifndef OPENSSL_NO_DTLS1 + if (www && socket_type == SOCK_DGRAM) + { + BIO_printf(bio_err, + "Can't use -HTTP, -www or -WWW with DTLS\n"); + goto end; + } +#endif #if !defined(OPENSSL_NO_JPAKE) && !defined(OPENSSL_NO_PSK) if (jpake_secret) diff -Nru nodejs-0.11.13/deps/openssl/openssl/apps/s_socket.c nodejs-0.11.15/deps/openssl/openssl/apps/s_socket.c --- nodejs-0.11.13/deps/openssl/openssl/apps/s_socket.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/apps/s_socket.c 2015-01-20 21:22:17.000000000 +0000 @@ -274,7 +274,7 @@ { i=0; i=setsockopt(s,SOL_SOCKET,SO_KEEPALIVE,(char *)&i,sizeof(i)); - if (i < 0) { perror("keepalive"); return(0); } + if (i < 0) { closesocket(s); perror("keepalive"); return(0); } } #endif @@ -450,6 +450,7 @@ if ((*host=(char *)OPENSSL_malloc(strlen(h1->h_name)+1)) == NULL) { perror("OPENSSL_malloc"); + closesocket(ret); return(0); } BUF_strlcpy(*host,h1->h_name,strlen(h1->h_name)+1); @@ -458,11 +459,13 @@ if (h2 == NULL) { BIO_printf(bio_err,"gethostbyname failure\n"); + closesocket(ret); return(0); } if (h2->h_addrtype != AF_INET) { BIO_printf(bio_err,"gethostbyname addr is not AF_INET\n"); + closesocket(ret); return(0); } } diff -Nru nodejs-0.11.13/deps/openssl/openssl/CHANGES nodejs-0.11.15/deps/openssl/openssl/CHANGES --- nodejs-0.11.13/deps/openssl/openssl/CHANGES 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/CHANGES 2015-01-20 21:22:17.000000000 +0000 @@ -2,6 +2,187 @@ OpenSSL CHANGES _______________ + Changes between 1.0.1i and 1.0.1j [15 Oct 2014] + + *) SRTP Memory Leak. + + A flaw in the DTLS SRTP extension parsing code allows an attacker, who + sends a carefully crafted handshake message, to cause OpenSSL to fail + to free up to 64k of memory causing a memory leak. This could be + exploited in a Denial Of Service attack. This issue affects OpenSSL + 1.0.1 server implementations for both SSL/TLS and DTLS regardless of + whether SRTP is used or configured. Implementations of OpenSSL that + have been compiled with OPENSSL_NO_SRTP defined are not affected. + + The fix was developed by the OpenSSL team. + (CVE-2014-3513) + [OpenSSL team] + + *) Session Ticket Memory Leak. + + When an OpenSSL SSL/TLS/DTLS server receives a session ticket the + integrity of that ticket is first verified. In the event of a session + ticket integrity check failing, OpenSSL will fail to free memory + causing a memory leak. By sending a large number of invalid session + tickets an attacker could exploit this issue in a Denial Of Service + attack. + (CVE-2014-3567) + [Steve Henson] + + *) Build option no-ssl3 is incomplete. + + When OpenSSL is configured with "no-ssl3" as a build option, servers + could accept and complete a SSL 3.0 handshake, and clients could be + configured to send them. + (CVE-2014-3568) + [Akamai and the OpenSSL team] + + *) Add support for TLS_FALLBACK_SCSV. + Client applications doing fallback retries should call + SSL_set_mode(s, SSL_MODE_SEND_FALLBACK_SCSV). + (CVE-2014-3566) + [Adam Langley, Bodo Moeller] + + *) Add additional DigestInfo checks. + + Reencode DigestInto in DER and check against the original when + verifying RSA signature: this will reject any improperly encoded + DigestInfo structures. + + Note: this is a precautionary measure and no attacks are currently known. + + [Steve Henson] + + Changes between 1.0.1h and 1.0.1i [6 Aug 2014] + + *) Fix SRP buffer overrun vulnerability. Invalid parameters passed to the + SRP code can be overrun an internal buffer. Add sanity check that + g, A, B < N to SRP code. + + Thanks to Sean Devlin and Watson Ladd of Cryptography Services, NCC + Group for discovering this issue. + (CVE-2014-3512) + [Steve Henson] + + *) A flaw in the OpenSSL SSL/TLS server code causes the server to negotiate + TLS 1.0 instead of higher protocol versions when the ClientHello message + is badly fragmented. This allows a man-in-the-middle attacker to force a + downgrade to TLS 1.0 even if both the server and the client support a + higher protocol version, by modifying the client's TLS records. + + Thanks to David Benjamin and Adam Langley (Google) for discovering and + researching this issue. + (CVE-2014-3511) + [David Benjamin] + + *) OpenSSL DTLS clients enabling anonymous (EC)DH ciphersuites are subject + to a denial of service attack. A malicious server can crash the client + with a null pointer dereference (read) by specifying an anonymous (EC)DH + ciphersuite and sending carefully crafted handshake messages. + + Thanks to Felix Grbert (Google) for discovering and researching this + issue. + (CVE-2014-3510) + [Emilia Ksper] + + *) By sending carefully crafted DTLS packets an attacker could cause openssl + to leak memory. This can be exploited through a Denial of Service attack. + Thanks to Adam Langley for discovering and researching this issue. + (CVE-2014-3507) + [Adam Langley] + + *) An attacker can force openssl to consume large amounts of memory whilst + processing DTLS handshake messages. This can be exploited through a + Denial of Service attack. + Thanks to Adam Langley for discovering and researching this issue. + (CVE-2014-3506) + [Adam Langley] + + *) An attacker can force an error condition which causes openssl to crash + whilst processing DTLS packets due to memory being freed twice. This + can be exploited through a Denial of Service attack. + Thanks to Adam Langley and Wan-Teh Chang for discovering and researching + this issue. + (CVE-2014-3505) + [Adam Langley] + + *) If a multithreaded client connects to a malicious server using a resumed + session and the server sends an ec point format extension it could write + up to 255 bytes to freed memory. + + Thanks to Gabor Tyukasz (LogMeIn Inc) for discovering and researching this + issue. + (CVE-2014-3509) + [Gabor Tyukasz] + + *) A malicious server can crash an OpenSSL client with a null pointer + dereference (read) by specifying an SRP ciphersuite even though it was not + properly negotiated with the client. This can be exploited through a + Denial of Service attack. + + Thanks to Joonas Kuorilehto and Riku Hietamki (Codenomicon) for + discovering and researching this issue. + (CVE-2014-5139) + [Steve Henson] + + *) A flaw in OBJ_obj2txt may cause pretty printing functions such as + X509_name_oneline, X509_name_print_ex et al. to leak some information + from the stack. Applications may be affected if they echo pretty printing + output to the attacker. + + Thanks to Ivan Fratric (Google) for discovering this issue. + (CVE-2014-3508) + [Emilia Ksper, and Steve Henson] + + *) Fix ec_GFp_simple_points_make_affine (thus, EC_POINTs_mul etc.) + for corner cases. (Certain input points at infinity could lead to + bogus results, with non-infinity inputs mapped to infinity too.) + [Bodo Moeller] + + Changes between 1.0.1g and 1.0.1h [5 Jun 2014] + + *) Fix for SSL/TLS MITM flaw. An attacker using a carefully crafted + handshake can force the use of weak keying material in OpenSSL + SSL/TLS clients and servers. + + Thanks to KIKUCHI Masashi (Lepidum Co. Ltd.) for discovering and + researching this issue. (CVE-2014-0224) + [KIKUCHI Masashi, Steve Henson] + + *) Fix DTLS recursion flaw. By sending an invalid DTLS handshake to an + OpenSSL DTLS client the code can be made to recurse eventually crashing + in a DoS attack. + + Thanks to Imre Rad (Search-Lab Ltd.) for discovering this issue. + (CVE-2014-0221) + [Imre Rad, Steve Henson] + + *) Fix DTLS invalid fragment vulnerability. A buffer overrun attack can + be triggered by sending invalid DTLS fragments to an OpenSSL DTLS + client or server. This is potentially exploitable to run arbitrary + code on a vulnerable client or server. + + Thanks to Jri Aedla for reporting this issue. (CVE-2014-0195) + [Jri Aedla, Steve Henson] + + *) Fix bug in TLS code where clients enable anonymous ECDH ciphersuites + are subject to a denial of service attack. + + Thanks to Felix Grbert and Ivan Fratric at Google for discovering + this issue. (CVE-2014-3470) + [Felix Grbert, Ivan Fratric, Steve Henson] + + *) Harmonize version and its documentation. -f flag is used to display + compilation flags. + [mancha <mancha1@zoho.com>] + + *) Fix eckey_priv_encode so it immediately returns an error upon a failure + in i2d_ECPrivateKey. + [mancha <mancha1@zoho.com>] + + *) Fix some double frees. These are not thought to be exploitable. + [mancha <mancha1@zoho.com>] + Changes between 1.0.1f and 1.0.1g [7 Apr 2014] *) A missing bounds check in the handling of the TLS heartbeat extension diff -Nru nodejs-0.11.13/deps/openssl/openssl/Configure nodejs-0.11.15/deps/openssl/openssl/Configure --- nodejs-0.11.13/deps/openssl/openssl/Configure 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/Configure 2015-01-20 21:22:17.000000000 +0000 @@ -720,6 +720,7 @@ "sctp" => "default", "shared" => "default", "store" => "experimental", + "unit-test" => "default", "zlib" => "default", "zlib-dynamic" => "default" ); @@ -727,7 +728,7 @@ # This is what $depflags will look like with the above defaults # (we need this to see if we should advise the user to run "make depend"): -my $default_depflags = " -DOPENSSL_NO_EC_NISTP_64_GCC_128 -DOPENSSL_NO_GMP -DOPENSSL_NO_JPAKE -DOPENSSL_NO_MD2 -DOPENSSL_NO_RC5 -DOPENSSL_NO_RFC3779 -DOPENSSL_NO_SCTP -DOPENSSL_NO_STORE"; +my $default_depflags = " -DOPENSSL_NO_EC_NISTP_64_GCC_128 -DOPENSSL_NO_GMP -DOPENSSL_NO_JPAKE -DOPENSSL_NO_MD2 -DOPENSSL_NO_RC5 -DOPENSSL_NO_RFC3779 -DOPENSSL_NO_SCTP -DOPENSSL_NO_STORE -DOPENSSL_NO_UNIT_TEST"; # Explicit "no-..." options will be collected in %disabled along with the defaults. # To remove something from %disabled, use "enable-foo" (unless it's experimental). @@ -1766,6 +1767,9 @@ print OUT "/* opensslconf.h */\n"; print OUT "/* WARNING: Generated automatically from opensslconf.h.in by Configure. */\n\n"; +print OUT "#ifdef __cplusplus\n"; +print OUT "extern \"C\" {\n"; +print OUT "#endif\n"; print OUT "/* OpenSSL was configured with the following options: */\n"; my $openssl_algorithm_defines_trans = $openssl_algorithm_defines; $openssl_experimental_defines =~ s/^\s*#\s*define\s+OPENSSL_NO_(.*)/#ifndef OPENSSL_EXPERIMENTAL_$1\n# ifndef OPENSSL_NO_$1\n# define OPENSSL_NO_$1\n# endif\n#endif/mg; @@ -1870,6 +1874,9 @@ { print OUT $_; } } close(IN); +print OUT "#ifdef __cplusplus\n"; +print OUT "}\n"; +print OUT "#endif\n"; close(OUT); rename("crypto/opensslconf.h","crypto/opensslconf.h.bak") || die "unable to rename crypto/opensslconf.h\n" if -e "crypto/opensslconf.h"; rename("crypto/opensslconf.h.new","crypto/opensslconf.h") || die "unable to rename crypto/opensslconf.h.new\n"; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/aes/asm/aesni-x86_64.pl nodejs-0.11.15/deps/openssl/openssl/crypto/aes/asm/aesni-x86_64.pl --- nodejs-0.11.13/deps/openssl/openssl/crypto/aes/asm/aesni-x86_64.pl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/aes/asm/aesni-x86_64.pl 2015-01-20 21:22:17.000000000 +0000 @@ -525,6 +525,16 @@ .type aesni_ecb_encrypt,\@function,5 .align 16 aesni_ecb_encrypt: +___ +$code.=<<___ if ($win64); + lea -0x58(%rsp),%rsp + movaps %xmm6,(%rsp) + movaps %xmm7,0x10(%rsp) + movaps %xmm8,0x20(%rsp) + movaps %xmm9,0x30(%rsp) +.Lecb_enc_body: +___ +$code.=<<___; and \$-16,$len jz .Lecb_ret @@ -805,6 +815,16 @@ movups $inout5,0x50($out) .Lecb_ret: +___ +$code.=<<___ if ($win64); + movaps (%rsp),%xmm6 + movaps 0x10(%rsp),%xmm7 + movaps 0x20(%rsp),%xmm8 + movaps 0x30(%rsp),%xmm9 + lea 0x58(%rsp),%rsp +.Lecb_enc_ret: +___ +$code.=<<___; ret .size aesni_ecb_encrypt,.-aesni_ecb_encrypt ___ @@ -2730,28 +2750,9 @@ .extern __imp_RtlVirtualUnwind ___ $code.=<<___ if ($PREFIX eq "aesni"); -.type ecb_se_handler,\@abi-omnipotent -.align 16 -ecb_se_handler: - push %rsi - push %rdi - push %rbx - push %rbp - push %r12 - push %r13 - push %r14 - push %r15 - pushfq - sub \$64,%rsp - - mov 152($context),%rax # pull context->Rsp - - jmp .Lcommon_seh_tail -.size ecb_se_handler,.-ecb_se_handler - -.type ccm64_se_handler,\@abi-omnipotent +.type ecb_ccm64_se_handler,\@abi-omnipotent .align 16 -ccm64_se_handler: +ecb_ccm64_se_handler: push %rsi push %rdi push %rbx @@ -2788,7 +2789,7 @@ lea 0x58(%rax),%rax # adjust stack pointer jmp .Lcommon_seh_tail -.size ccm64_se_handler,.-ccm64_se_handler +.size ecb_ccm64_se_handler,.-ecb_ccm64_se_handler .type ctr32_se_handler,\@abi-omnipotent .align 16 @@ -2993,14 +2994,15 @@ $code.=<<___ if ($PREFIX eq "aesni"); .LSEH_info_ecb: .byte 9,0,0,0 - .rva ecb_se_handler + .rva ecb_ccm64_se_handler + .rva .Lecb_enc_body,.Lecb_enc_ret # HandlerData[] .LSEH_info_ccm64_enc: .byte 9,0,0,0 - .rva ccm64_se_handler + .rva ecb_ccm64_se_handler .rva .Lccm64_enc_body,.Lccm64_enc_ret # HandlerData[] .LSEH_info_ccm64_dec: .byte 9,0,0,0 - .rva ccm64_se_handler + .rva ecb_ccm64_se_handler .rva .Lccm64_dec_body,.Lccm64_dec_ret # HandlerData[] .LSEH_info_ctr32: .byte 9,0,0,0 diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/aes/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/aes/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/aes/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/aes/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,153 +0,0 @@ -# -# crypto/aes/Makefile -# - -DIR= aes -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -AES_ENC=aes_core.o aes_cbc.o - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -#TEST=aestest.c -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=aes_core.c aes_misc.c aes_ecb.c aes_cbc.c aes_cfb.c aes_ofb.c \ - aes_ctr.c aes_ige.c aes_wrap.c -LIBOBJ=aes_misc.o aes_ecb.o aes_cfb.o aes_ofb.o aes_ctr.o aes_ige.o aes_wrap.o \ - $(AES_ENC) - -SRC= $(LIBSRC) - -EXHEADER= aes.h -HEADER= aes_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -aes-ia64.s: asm/aes-ia64.S - $(CC) $(CFLAGS) -E asm/aes-ia64.S > $@ - -aes-586.s: asm/aes-586.pl ../perlasm/x86asm.pl - $(PERL) asm/aes-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ -vpaes-x86.s: asm/vpaes-x86.pl ../perlasm/x86asm.pl - $(PERL) asm/vpaes-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ -aesni-x86.s: asm/aesni-x86.pl ../perlasm/x86asm.pl - $(PERL) asm/aesni-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ - -aes-x86_64.s: asm/aes-x86_64.pl - $(PERL) asm/aes-x86_64.pl $(PERLASM_SCHEME) > $@ -vpaes-x86_64.s: asm/vpaes-x86_64.pl - $(PERL) asm/vpaes-x86_64.pl $(PERLASM_SCHEME) > $@ -bsaes-x86_64.s: asm/bsaes-x86_64.pl - $(PERL) asm/bsaes-x86_64.pl $(PERLASM_SCHEME) > $@ -aesni-x86_64.s: asm/aesni-x86_64.pl - $(PERL) asm/aesni-x86_64.pl $(PERLASM_SCHEME) > $@ -aesni-sha1-x86_64.s: asm/aesni-sha1-x86_64.pl - $(PERL) asm/aesni-sha1-x86_64.pl $(PERLASM_SCHEME) > $@ - -aes-sparcv9.s: asm/aes-sparcv9.pl - $(PERL) asm/aes-sparcv9.pl $(CFLAGS) > $@ - -aes-ppc.s: asm/aes-ppc.pl - $(PERL) asm/aes-ppc.pl $(PERLASM_SCHEME) $@ - -aes-parisc.s: asm/aes-parisc.pl - $(PERL) asm/aes-parisc.pl $(PERLASM_SCHEME) $@ - -aes-mips.S: asm/aes-mips.pl - $(PERL) asm/aes-mips.pl $(PERLASM_SCHEME) $@ - -# GNU make "catch all" -aes-%.S: asm/aes-%.pl; $(PERL) $< $(PERLASM_SCHEME) > $@ -aes-armv4.o: aes-armv4.S - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -aes_cbc.o: ../../include/openssl/aes.h ../../include/openssl/modes.h -aes_cbc.o: ../../include/openssl/opensslconf.h aes_cbc.c -aes_cfb.o: ../../include/openssl/aes.h ../../include/openssl/modes.h -aes_cfb.o: ../../include/openssl/opensslconf.h aes_cfb.c -aes_core.o: ../../include/openssl/aes.h ../../include/openssl/e_os2.h -aes_core.o: ../../include/openssl/opensslconf.h aes_core.c aes_locl.h -aes_ctr.o: ../../include/openssl/aes.h ../../include/openssl/modes.h -aes_ctr.o: ../../include/openssl/opensslconf.h aes_ctr.c -aes_ecb.o: ../../include/openssl/aes.h ../../include/openssl/e_os2.h -aes_ecb.o: ../../include/openssl/opensslconf.h aes_ecb.c aes_locl.h -aes_ige.o: ../../e_os.h ../../include/openssl/aes.h ../../include/openssl/bio.h -aes_ige.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -aes_ige.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -aes_ige.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -aes_ige.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -aes_ige.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -aes_ige.o: ../../include/openssl/symhacks.h ../cryptlib.h aes_ige.c aes_locl.h -aes_misc.o: ../../include/openssl/aes.h ../../include/openssl/crypto.h -aes_misc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -aes_misc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -aes_misc.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -aes_misc.o: ../../include/openssl/symhacks.h aes_locl.h aes_misc.c -aes_ofb.o: ../../include/openssl/aes.h ../../include/openssl/modes.h -aes_ofb.o: ../../include/openssl/opensslconf.h aes_ofb.c -aes_wrap.o: ../../e_os.h ../../include/openssl/aes.h -aes_wrap.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -aes_wrap.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -aes_wrap.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -aes_wrap.o: ../../include/openssl/opensslconf.h -aes_wrap.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -aes_wrap.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -aes_wrap.o: ../../include/openssl/symhacks.h ../cryptlib.h aes_wrap.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/ameth_lib.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/ameth_lib.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/ameth_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/ameth_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -258,7 +258,12 @@ if (!ameth) return 0; ameth->pkey_base_id = to; - return EVP_PKEY_asn1_add0(ameth); + if (!EVP_PKEY_asn1_add0(ameth)) + { + EVP_PKEY_asn1_free(ameth); + return 0; + } + return 1; } int EVP_PKEY_asn1_get0_info(int *ppkey_id, int *ppkey_base_id, int *ppkey_flags, diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/a_object.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/a_object.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/a_object.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/a_object.c 2015-01-20 21:22:17.000000000 +0000 @@ -283,17 +283,29 @@ ASN1err(ASN1_F_D2I_ASN1_OBJECT,i); return(NULL); } + ASN1_OBJECT *c2i_ASN1_OBJECT(ASN1_OBJECT **a, const unsigned char **pp, long len) { ASN1_OBJECT *ret=NULL; const unsigned char *p; unsigned char *data; - int i; - /* Sanity check OID encoding: can't have leading 0x80 in - * subidentifiers, see: X.690 8.19.2 + int i, length; + + /* Sanity check OID encoding. + * Need at least one content octet. + * MSB must be clear in the last octet. + * can't have leading 0x80 in subidentifiers, see: X.690 8.19.2 */ - for (i = 0, p = *pp; i < len; i++, p++) + if (len <= 0 || len > INT_MAX || pp == NULL || (p = *pp) == NULL || + p[len - 1] & 0x80) + { + ASN1err(ASN1_F_C2I_ASN1_OBJECT,ASN1_R_INVALID_OBJECT_ENCODING); + return NULL; + } + /* Now 0 < len <= INT_MAX, so the cast is safe. */ + length = (int)len; + for (i = 0; i < length; i++, p++) { if (*p == 0x80 && (!i || !(p[-1] & 0x80))) { @@ -316,23 +328,23 @@ data = (unsigned char *)ret->data; ret->data = NULL; /* once detached we can change it */ - if ((data == NULL) || (ret->length < len)) + if ((data == NULL) || (ret->length < length)) { ret->length=0; if (data != NULL) OPENSSL_free(data); - data=(unsigned char *)OPENSSL_malloc(len ? (int)len : 1); + data=(unsigned char *)OPENSSL_malloc(length); if (data == NULL) { i=ERR_R_MALLOC_FAILURE; goto err; } ret->flags|=ASN1_OBJECT_FLAG_DYNAMIC_DATA; } - memcpy(data,p,(int)len); + memcpy(data,p,length); /* reattach data to object, after which it remains const */ ret->data =data; - ret->length=(int)len; + ret->length=length; ret->sn=NULL; ret->ln=NULL; /* ret->flags=ASN1_OBJECT_FLAG_DYNAMIC; we know it is dynamic */ - p+=len; + p+=length; if (a != NULL) (*a)=ret; *pp=p; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/asn1_lib.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/asn1_lib.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/asn1_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/asn1_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -131,6 +131,9 @@ *pclass=xclass; if (!asn1_get_length(&p,&inf,plength,(int)max)) goto err; + if (inf && !(ret & V_ASN1_CONSTRUCTED)) + goto err; + #if 0 fprintf(stderr,"p=%d + *plength=%ld > omax=%ld + *pp=%d (%d > %d)\n", (int)p,*plength,omax,(int)*pp,(int)(p+ *plength), diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/asn_mime.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/asn_mime.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/asn_mime.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/asn_mime.c 2015-01-20 21:22:17.000000000 +0000 @@ -667,6 +667,8 @@ int len, state, save_state = 0; headers = sk_MIME_HEADER_new(mime_hdr_cmp); + if (!headers) + return NULL; while ((len = BIO_gets(bio, linebuf, MAX_SMLEN)) > 0) { /* If whitespace at line start then continuation line */ if(mhdr && isspace((unsigned char)linebuf[0])) state = MIME_NAME; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/asn_pack.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/asn_pack.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/asn_pack.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/asn_pack.c 2015-01-20 21:22:17.000000000 +0000 @@ -134,15 +134,23 @@ if (!(octmp->length = i2d(obj, NULL))) { ASN1err(ASN1_F_ASN1_PACK_STRING,ASN1_R_ENCODE_ERROR); - return NULL; + goto err; } if (!(p = OPENSSL_malloc (octmp->length))) { ASN1err(ASN1_F_ASN1_PACK_STRING,ERR_R_MALLOC_FAILURE); - return NULL; + goto err; } octmp->data = p; i2d (obj, &p); return octmp; + err: + if (!oct || !*oct) + { + ASN1_STRING_free(octmp); + if (oct) + *oct = NULL; + } + return NULL; } #endif diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/a_strex.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/a_strex.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/a_strex.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/a_strex.c 2015-01-20 21:22:17.000000000 +0000 @@ -568,6 +568,7 @@ mbflag |= MBSTRING_FLAG; stmp.data = NULL; stmp.length = 0; + stmp.flags = 0; ret = ASN1_mbstring_copy(&str, in->data, in->length, mbflag, B_ASN1_UTF8STRING); if(ret < 0) return ret; *out = stmp.data; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/a_strnid.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/a_strnid.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/a_strnid.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/a_strnid.c 2015-01-20 21:22:17.000000000 +0000 @@ -74,7 +74,7 @@ * certain software (e.g. Netscape) has problems with them. */ -static unsigned long global_mask = 0xFFFFFFFFL; +static unsigned long global_mask = B_ASN1_UTF8STRING; void ASN1_STRING_set_default_mask(unsigned long mask) { diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/a_utctm.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/a_utctm.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/a_utctm.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/a_utctm.c 2015-01-20 21:22:17.000000000 +0000 @@ -196,24 +196,29 @@ struct tm *ts; struct tm data; size_t len = 20; + int free_s = 0; if (s == NULL) + { + free_s = 1; s=M_ASN1_UTCTIME_new(); + } if (s == NULL) - return(NULL); + goto err; + ts=OPENSSL_gmtime(&t, &data); if (ts == NULL) - return(NULL); + goto err; if (offset_day || offset_sec) { if (!OPENSSL_gmtime_adj(ts, offset_day, offset_sec)) - return NULL; + goto err; } if((ts->tm_year < 50) || (ts->tm_year >= 150)) - return NULL; + goto err; p=(char *)s->data; if ((p == NULL) || ((size_t)s->length < len)) @@ -222,7 +227,7 @@ if (p == NULL) { ASN1err(ASN1_F_ASN1_UTCTIME_ADJ,ERR_R_MALLOC_FAILURE); - return(NULL); + goto err; } if (s->data != NULL) OPENSSL_free(s->data); @@ -237,6 +242,10 @@ ebcdic2ascii(s->data, s->data, s->length); #endif return(s); + err: + if (free_s && s) + M_ASN1_UTCTIME_free(s); + return NULL; } @@ -261,6 +270,11 @@ t -= offset*60; /* FIXME: may overflow in extreme cases */ tm = OPENSSL_gmtime(&t, &data); + /* NB: -1, 0, 1 already valid return values so use -2 to + * indicate error. + */ + if (tm == NULL) + return -2; #define return_cmp(a,b) if ((a)<(b)) return -1; else if ((a)>(b)) return 1 year = g2(s->data); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/bio_asn1.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/bio_asn1.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/bio_asn1.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/bio_asn1.c 2015-01-20 21:22:17.000000000 +0000 @@ -154,7 +154,10 @@ if (!ctx) return 0; if (!asn1_bio_init(ctx, DEFAULT_ASN1_BUF_SIZE)) + { + OPENSSL_free(ctx); return 0; + } b->init = 1; b->ptr = (char *)ctx; b->flags = 0; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/charmap.pl nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/charmap.pl --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/charmap.pl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/charmap.pl 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,8 @@ #!/usr/local/bin/perl -w +# Written by Dr Stephen N Henson (steve@openssl.org). +# Licensed under the terms of the OpenSSL license. + use strict; my ($i, @arr); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/evp_asn1.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/evp_asn1.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/evp_asn1.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/evp_asn1.c 2015-01-20 21:22:17.000000000 +0000 @@ -66,7 +66,11 @@ ASN1_STRING *os; if ((os=M_ASN1_OCTET_STRING_new()) == NULL) return(0); - if (!M_ASN1_OCTET_STRING_set(os,data,len)) return(0); + if (!M_ASN1_OCTET_STRING_set(os,data,len)) + { + M_ASN1_OCTET_STRING_free(os); + return 0; + } ASN1_TYPE_set(a,V_ASN1_OCTET_STRING,os); return(1); } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,930 +0,0 @@ -# -# OpenSSL/crypto/asn1/Makefile -# - -DIR= asn1 -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile README -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= a_object.c a_bitstr.c a_utctm.c a_gentm.c a_time.c a_int.c a_octet.c \ - a_print.c a_type.c a_set.c a_dup.c a_d2i_fp.c a_i2d_fp.c \ - a_enum.c a_utf8.c a_sign.c a_digest.c a_verify.c a_mbstr.c a_strex.c \ - x_algor.c x_val.c x_pubkey.c x_sig.c x_req.c x_attrib.c x_bignum.c \ - x_long.c x_name.c x_x509.c x_x509a.c x_crl.c x_info.c x_spki.c nsseq.c \ - x_nx509.c d2i_pu.c d2i_pr.c i2d_pu.c i2d_pr.c\ - t_req.c t_x509.c t_x509a.c t_crl.c t_pkey.c t_spki.c t_bitst.c \ - tasn_new.c tasn_fre.c tasn_enc.c tasn_dec.c tasn_utl.c tasn_typ.c \ - tasn_prn.c ameth_lib.c \ - f_int.c f_string.c n_pkey.c \ - f_enum.c x_pkey.c a_bool.c x_exten.c bio_asn1.c bio_ndef.c asn_mime.c \ - asn1_gen.c asn1_par.c asn1_lib.c asn1_err.c a_bytes.c a_strnid.c \ - evp_asn1.c asn_pack.c p5_pbe.c p5_pbev2.c p8_pkey.c asn_moid.c -LIBOBJ= a_object.o a_bitstr.o a_utctm.o a_gentm.o a_time.o a_int.o a_octet.o \ - a_print.o a_type.o a_set.o a_dup.o a_d2i_fp.o a_i2d_fp.o \ - a_enum.o a_utf8.o a_sign.o a_digest.o a_verify.o a_mbstr.o a_strex.o \ - x_algor.o x_val.o x_pubkey.o x_sig.o x_req.o x_attrib.o x_bignum.o \ - x_long.o x_name.o x_x509.o x_x509a.o x_crl.o x_info.o x_spki.o nsseq.o \ - x_nx509.o d2i_pu.o d2i_pr.o i2d_pu.o i2d_pr.o \ - t_req.o t_x509.o t_x509a.o t_crl.o t_pkey.o t_spki.o t_bitst.o \ - tasn_new.o tasn_fre.o tasn_enc.o tasn_dec.o tasn_utl.o tasn_typ.o \ - tasn_prn.o ameth_lib.o \ - f_int.o f_string.o n_pkey.o \ - f_enum.o x_pkey.o a_bool.o x_exten.o bio_asn1.o bio_ndef.o asn_mime.o \ - asn1_gen.o asn1_par.o asn1_lib.o asn1_err.o a_bytes.o a_strnid.o \ - evp_asn1.o asn_pack.o p5_pbe.o p5_pbev2.o p8_pkey.o asn_moid.o - -SRC= $(LIBSRC) - -EXHEADER= asn1.h asn1_mac.h asn1t.h -HEADER= $(EXHEADER) asn1_locl.h - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -test: test.c - cc -g -I../../include -c test.c - cc -g -I../../include -o test test.o -L../.. -lcrypto - -pk: pk.c - cc -g -I../../include -c pk.c - cc -g -I../../include -o pk pk.o -L../.. -lcrypto - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by top Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -a_bitstr.o: ../../e_os.h ../../include/openssl/asn1.h -a_bitstr.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_bitstr.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_bitstr.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_bitstr.o: ../../include/openssl/opensslconf.h -a_bitstr.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_bitstr.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_bitstr.o: ../../include/openssl/symhacks.h ../cryptlib.h a_bitstr.c -a_bool.o: ../../e_os.h ../../include/openssl/asn1.h -a_bool.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -a_bool.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -a_bool.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -a_bool.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -a_bool.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_bool.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_bool.o: ../../include/openssl/symhacks.h ../cryptlib.h a_bool.c -a_bytes.o: ../../e_os.h ../../include/openssl/asn1.h -a_bytes.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_bytes.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_bytes.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_bytes.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -a_bytes.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -a_bytes.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -a_bytes.o: ../cryptlib.h a_bytes.c -a_d2i_fp.o: ../../e_os.h ../../include/openssl/asn1.h -a_d2i_fp.o: ../../include/openssl/asn1_mac.h ../../include/openssl/bio.h -a_d2i_fp.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -a_d2i_fp.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -a_d2i_fp.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -a_d2i_fp.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_d2i_fp.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_d2i_fp.o: ../../include/openssl/symhacks.h ../cryptlib.h a_d2i_fp.c -a_digest.o: ../../e_os.h ../../include/openssl/asn1.h -a_digest.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_digest.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_digest.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -a_digest.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -a_digest.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -a_digest.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -a_digest.o: ../../include/openssl/opensslconf.h -a_digest.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_digest.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -a_digest.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -a_digest.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -a_digest.o: ../../include/openssl/x509_vfy.h ../cryptlib.h a_digest.c -a_dup.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -a_dup.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -a_dup.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -a_dup.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -a_dup.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_dup.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_dup.o: ../../include/openssl/symhacks.h ../cryptlib.h a_dup.c -a_enum.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -a_enum.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -a_enum.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_enum.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_enum.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -a_enum.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -a_enum.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -a_enum.o: ../cryptlib.h a_enum.c -a_gentm.o: ../../e_os.h ../../include/openssl/asn1.h -a_gentm.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_gentm.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_gentm.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_gentm.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -a_gentm.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -a_gentm.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -a_gentm.o: ../cryptlib.h ../o_time.h a_gentm.c -a_i2d_fp.o: ../../e_os.h ../../include/openssl/asn1.h -a_i2d_fp.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_i2d_fp.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_i2d_fp.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_i2d_fp.o: ../../include/openssl/opensslconf.h -a_i2d_fp.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_i2d_fp.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_i2d_fp.o: ../../include/openssl/symhacks.h ../cryptlib.h a_i2d_fp.c -a_int.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -a_int.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -a_int.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_int.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_int.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -a_int.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -a_int.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -a_int.o: ../cryptlib.h a_int.c -a_mbstr.o: ../../e_os.h ../../include/openssl/asn1.h -a_mbstr.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_mbstr.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_mbstr.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_mbstr.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -a_mbstr.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -a_mbstr.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -a_mbstr.o: ../cryptlib.h a_mbstr.c -a_object.o: ../../e_os.h ../../include/openssl/asn1.h -a_object.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -a_object.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -a_object.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -a_object.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -a_object.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -a_object.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_object.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_object.o: ../../include/openssl/symhacks.h ../cryptlib.h a_object.c -a_octet.o: ../../e_os.h ../../include/openssl/asn1.h -a_octet.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_octet.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_octet.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_octet.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -a_octet.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -a_octet.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -a_octet.o: ../cryptlib.h a_octet.c -a_print.o: ../../e_os.h ../../include/openssl/asn1.h -a_print.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_print.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_print.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_print.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -a_print.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -a_print.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -a_print.o: ../cryptlib.h a_print.c -a_set.o: ../../e_os.h ../../include/openssl/asn1.h -a_set.o: ../../include/openssl/asn1_mac.h ../../include/openssl/bio.h -a_set.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -a_set.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -a_set.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -a_set.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_set.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_set.o: ../../include/openssl/symhacks.h ../cryptlib.h a_set.c -a_sign.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -a_sign.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -a_sign.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_sign.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -a_sign.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -a_sign.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -a_sign.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -a_sign.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -a_sign.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -a_sign.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -a_sign.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -a_sign.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -a_sign.o: ../cryptlib.h a_sign.c asn1_locl.h -a_strex.o: ../../e_os.h ../../include/openssl/asn1.h -a_strex.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_strex.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_strex.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -a_strex.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -a_strex.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -a_strex.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -a_strex.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -a_strex.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -a_strex.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -a_strex.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -a_strex.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -a_strex.o: ../cryptlib.h a_strex.c charmap.h -a_strnid.o: ../../e_os.h ../../include/openssl/asn1.h -a_strnid.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_strnid.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_strnid.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_strnid.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -a_strnid.o: ../../include/openssl/opensslconf.h -a_strnid.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_strnid.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_strnid.o: ../../include/openssl/symhacks.h ../cryptlib.h a_strnid.c -a_time.o: ../../e_os.h ../../include/openssl/asn1.h -a_time.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -a_time.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -a_time.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -a_time.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -a_time.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_time.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_time.o: ../../include/openssl/symhacks.h ../cryptlib.h ../o_time.h a_time.c -a_type.o: ../../e_os.h ../../include/openssl/asn1.h -a_type.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -a_type.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -a_type.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -a_type.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -a_type.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -a_type.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_type.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_type.o: ../../include/openssl/symhacks.h ../cryptlib.h a_type.c -a_utctm.o: ../../e_os.h ../../include/openssl/asn1.h -a_utctm.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -a_utctm.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -a_utctm.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -a_utctm.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -a_utctm.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -a_utctm.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -a_utctm.o: ../cryptlib.h ../o_time.h a_utctm.c -a_utf8.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -a_utf8.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -a_utf8.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -a_utf8.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -a_utf8.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_utf8.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -a_utf8.o: ../../include/openssl/symhacks.h ../cryptlib.h a_utf8.c -a_verify.o: ../../e_os.h ../../include/openssl/asn1.h -a_verify.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -a_verify.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -a_verify.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -a_verify.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -a_verify.o: ../../include/openssl/err.h ../../include/openssl/evp.h -a_verify.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -a_verify.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -a_verify.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -a_verify.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -a_verify.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -a_verify.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -a_verify.o: ../../include/openssl/x509_vfy.h ../cryptlib.h a_verify.c -a_verify.o: asn1_locl.h -ameth_lib.o: ../../e_os.h ../../include/openssl/asn1.h -ameth_lib.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -ameth_lib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -ameth_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ameth_lib.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ameth_lib.o: ../../include/openssl/engine.h ../../include/openssl/err.h -ameth_lib.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ameth_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ameth_lib.o: ../../include/openssl/opensslconf.h -ameth_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ameth_lib.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -ameth_lib.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ameth_lib.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ameth_lib.o: ../../include/openssl/x509_vfy.h ../cryptlib.h ameth_lib.c -ameth_lib.o: asn1_locl.h -asn1_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -asn1_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -asn1_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -asn1_err.o: ../../include/openssl/opensslconf.h -asn1_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -asn1_err.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -asn1_err.o: ../../include/openssl/symhacks.h asn1_err.c -asn1_gen.o: ../../e_os.h ../../include/openssl/asn1.h -asn1_gen.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -asn1_gen.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -asn1_gen.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -asn1_gen.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -asn1_gen.o: ../../include/openssl/err.h ../../include/openssl/evp.h -asn1_gen.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -asn1_gen.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -asn1_gen.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -asn1_gen.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -asn1_gen.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -asn1_gen.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -asn1_gen.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -asn1_gen.o: ../cryptlib.h asn1_gen.c -asn1_lib.o: ../../e_os.h ../../include/openssl/asn1.h -asn1_lib.o: ../../include/openssl/asn1_mac.h ../../include/openssl/bio.h -asn1_lib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -asn1_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -asn1_lib.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -asn1_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -asn1_lib.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -asn1_lib.o: ../../include/openssl/symhacks.h ../cryptlib.h asn1_lib.c -asn1_par.o: ../../e_os.h ../../include/openssl/asn1.h -asn1_par.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -asn1_par.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -asn1_par.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -asn1_par.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -asn1_par.o: ../../include/openssl/opensslconf.h -asn1_par.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -asn1_par.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -asn1_par.o: ../../include/openssl/symhacks.h ../cryptlib.h asn1_par.c -asn_mime.o: ../../e_os.h ../../include/openssl/asn1.h -asn_mime.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -asn_mime.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -asn_mime.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -asn_mime.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -asn_mime.o: ../../include/openssl/err.h ../../include/openssl/evp.h -asn_mime.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -asn_mime.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -asn_mime.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -asn_mime.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -asn_mime.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -asn_mime.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -asn_mime.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -asn_mime.o: ../cryptlib.h asn1_locl.h asn_mime.c -asn_moid.o: ../../e_os.h ../../include/openssl/asn1.h -asn_moid.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -asn_moid.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -asn_moid.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -asn_moid.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -asn_moid.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -asn_moid.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -asn_moid.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -asn_moid.o: ../../include/openssl/opensslconf.h -asn_moid.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -asn_moid.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -asn_moid.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -asn_moid.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -asn_moid.o: ../../include/openssl/x509_vfy.h ../cryptlib.h asn_moid.c -asn_pack.o: ../../e_os.h ../../include/openssl/asn1.h -asn_pack.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -asn_pack.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -asn_pack.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -asn_pack.o: ../../include/openssl/opensslconf.h -asn_pack.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -asn_pack.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -asn_pack.o: ../../include/openssl/symhacks.h ../cryptlib.h asn_pack.c -bio_asn1.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -bio_asn1.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -bio_asn1.o: ../../include/openssl/opensslconf.h -bio_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bio_asn1.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bio_asn1.o: ../../include/openssl/symhacks.h bio_asn1.c -bio_ndef.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -bio_ndef.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -bio_ndef.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bio_ndef.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bio_ndef.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bio_ndef.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bio_ndef.o: ../../include/openssl/symhacks.h bio_ndef.c -d2i_pr.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -d2i_pr.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -d2i_pr.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -d2i_pr.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -d2i_pr.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -d2i_pr.o: ../../include/openssl/err.h ../../include/openssl/evp.h -d2i_pr.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -d2i_pr.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -d2i_pr.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -d2i_pr.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -d2i_pr.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -d2i_pr.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -d2i_pr.o: ../../include/openssl/x509_vfy.h ../cryptlib.h asn1_locl.h d2i_pr.c -d2i_pu.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -d2i_pu.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -d2i_pu.o: ../../include/openssl/crypto.h ../../include/openssl/dsa.h -d2i_pu.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -d2i_pu.o: ../../include/openssl/err.h ../../include/openssl/evp.h -d2i_pu.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -d2i_pu.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -d2i_pu.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -d2i_pu.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -d2i_pu.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -d2i_pu.o: ../cryptlib.h d2i_pu.c -evp_asn1.o: ../../e_os.h ../../include/openssl/asn1.h -evp_asn1.o: ../../include/openssl/asn1_mac.h ../../include/openssl/bio.h -evp_asn1.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -evp_asn1.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -evp_asn1.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -evp_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -evp_asn1.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -evp_asn1.o: ../../include/openssl/symhacks.h ../cryptlib.h evp_asn1.c -f_enum.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -f_enum.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -f_enum.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -f_enum.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -f_enum.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -f_enum.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -f_enum.o: ../../include/openssl/symhacks.h ../cryptlib.h f_enum.c -f_int.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -f_int.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -f_int.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -f_int.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -f_int.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -f_int.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -f_int.o: ../../include/openssl/symhacks.h ../cryptlib.h f_int.c -f_string.o: ../../e_os.h ../../include/openssl/asn1.h -f_string.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -f_string.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -f_string.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -f_string.o: ../../include/openssl/opensslconf.h -f_string.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -f_string.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -f_string.o: ../../include/openssl/symhacks.h ../cryptlib.h f_string.c -i2d_pr.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -i2d_pr.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -i2d_pr.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -i2d_pr.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -i2d_pr.o: ../../include/openssl/err.h ../../include/openssl/evp.h -i2d_pr.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -i2d_pr.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -i2d_pr.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -i2d_pr.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -i2d_pr.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -i2d_pr.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -i2d_pr.o: ../../include/openssl/x509_vfy.h ../cryptlib.h asn1_locl.h i2d_pr.c -i2d_pu.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -i2d_pu.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -i2d_pu.o: ../../include/openssl/crypto.h ../../include/openssl/dsa.h -i2d_pu.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -i2d_pu.o: ../../include/openssl/err.h ../../include/openssl/evp.h -i2d_pu.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -i2d_pu.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -i2d_pu.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -i2d_pu.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -i2d_pu.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -i2d_pu.o: ../cryptlib.h i2d_pu.c -n_pkey.o: ../../e_os.h ../../include/openssl/asn1.h -n_pkey.o: ../../include/openssl/asn1_mac.h ../../include/openssl/asn1t.h -n_pkey.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -n_pkey.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -n_pkey.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -n_pkey.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -n_pkey.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -n_pkey.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -n_pkey.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -n_pkey.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -n_pkey.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -n_pkey.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -n_pkey.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -n_pkey.o: ../../include/openssl/x509_vfy.h ../cryptlib.h n_pkey.c -nsseq.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -nsseq.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -nsseq.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -nsseq.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -nsseq.o: ../../include/openssl/ecdsa.h ../../include/openssl/evp.h -nsseq.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -nsseq.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -nsseq.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -nsseq.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -nsseq.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -nsseq.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -nsseq.o: ../../include/openssl/x509_vfy.h nsseq.c -p5_pbe.o: ../../e_os.h ../../include/openssl/asn1.h -p5_pbe.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -p5_pbe.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p5_pbe.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p5_pbe.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p5_pbe.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p5_pbe.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p5_pbe.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p5_pbe.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p5_pbe.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -p5_pbe.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p5_pbe.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p5_pbe.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p5_pbe.o: ../cryptlib.h p5_pbe.c -p5_pbev2.o: ../../e_os.h ../../include/openssl/asn1.h -p5_pbev2.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -p5_pbev2.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p5_pbev2.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p5_pbev2.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p5_pbev2.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p5_pbev2.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p5_pbev2.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p5_pbev2.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p5_pbev2.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -p5_pbev2.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p5_pbev2.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p5_pbev2.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p5_pbev2.o: ../cryptlib.h p5_pbev2.c -p8_pkey.o: ../../e_os.h ../../include/openssl/asn1.h -p8_pkey.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -p8_pkey.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p8_pkey.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p8_pkey.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p8_pkey.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p8_pkey.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p8_pkey.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p8_pkey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p8_pkey.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -p8_pkey.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p8_pkey.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p8_pkey.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p8_pkey.c -t_bitst.o: ../../e_os.h ../../include/openssl/asn1.h -t_bitst.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -t_bitst.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -t_bitst.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -t_bitst.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -t_bitst.o: ../../include/openssl/err.h ../../include/openssl/evp.h -t_bitst.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -t_bitst.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -t_bitst.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -t_bitst.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -t_bitst.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -t_bitst.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -t_bitst.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -t_bitst.o: ../cryptlib.h t_bitst.c -t_crl.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -t_crl.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -t_crl.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -t_crl.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -t_crl.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -t_crl.o: ../../include/openssl/err.h ../../include/openssl/evp.h -t_crl.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -t_crl.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -t_crl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -t_crl.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -t_crl.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -t_crl.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -t_crl.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -t_crl.o: ../cryptlib.h t_crl.c -t_pkey.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -t_pkey.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -t_pkey.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -t_pkey.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -t_pkey.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -t_pkey.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -t_pkey.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -t_pkey.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -t_pkey.o: ../cryptlib.h t_pkey.c -t_req.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -t_req.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -t_req.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -t_req.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -t_req.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -t_req.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -t_req.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -t_req.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -t_req.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -t_req.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -t_req.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -t_req.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -t_req.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -t_req.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -t_req.o: ../cryptlib.h t_req.c -t_spki.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -t_spki.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -t_spki.o: ../../include/openssl/crypto.h ../../include/openssl/dsa.h -t_spki.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -t_spki.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -t_spki.o: ../../include/openssl/err.h ../../include/openssl/evp.h -t_spki.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -t_spki.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -t_spki.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -t_spki.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -t_spki.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -t_spki.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -t_spki.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -t_spki.o: ../cryptlib.h t_spki.c -t_x509.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -t_x509.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -t_x509.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -t_x509.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -t_x509.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -t_x509.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -t_x509.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -t_x509.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -t_x509.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -t_x509.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -t_x509.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -t_x509.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -t_x509.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -t_x509.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -t_x509.o: ../cryptlib.h asn1_locl.h t_x509.c -t_x509a.o: ../../e_os.h ../../include/openssl/asn1.h -t_x509a.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -t_x509a.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -t_x509a.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -t_x509a.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -t_x509a.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -t_x509a.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -t_x509a.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -t_x509a.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -t_x509a.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -t_x509a.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -t_x509a.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -t_x509a.o: ../cryptlib.h t_x509a.c -tasn_dec.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -tasn_dec.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -tasn_dec.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -tasn_dec.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -tasn_dec.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -tasn_dec.o: ../../include/openssl/opensslconf.h -tasn_dec.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tasn_dec.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -tasn_dec.o: ../../include/openssl/symhacks.h tasn_dec.c -tasn_enc.o: ../../e_os.h ../../include/openssl/asn1.h -tasn_enc.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -tasn_enc.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -tasn_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -tasn_enc.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tasn_enc.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -tasn_enc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tasn_enc.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -tasn_enc.o: ../../include/openssl/symhacks.h ../cryptlib.h tasn_enc.c -tasn_fre.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -tasn_fre.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -tasn_fre.o: ../../include/openssl/e_os2.h ../../include/openssl/obj_mac.h -tasn_fre.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -tasn_fre.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tasn_fre.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -tasn_fre.o: ../../include/openssl/symhacks.h tasn_fre.c -tasn_new.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -tasn_new.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -tasn_new.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -tasn_new.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tasn_new.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -tasn_new.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tasn_new.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -tasn_new.o: ../../include/openssl/symhacks.h tasn_new.c -tasn_prn.o: ../../e_os.h ../../include/openssl/asn1.h -tasn_prn.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -tasn_prn.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -tasn_prn.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -tasn_prn.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -tasn_prn.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -tasn_prn.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -tasn_prn.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -tasn_prn.o: ../../include/openssl/opensslconf.h -tasn_prn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tasn_prn.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -tasn_prn.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -tasn_prn.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -tasn_prn.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -tasn_prn.o: ../cryptlib.h asn1_locl.h tasn_prn.c -tasn_typ.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -tasn_typ.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -tasn_typ.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -tasn_typ.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tasn_typ.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -tasn_typ.o: ../../include/openssl/symhacks.h tasn_typ.c -tasn_utl.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -tasn_utl.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -tasn_utl.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -tasn_utl.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tasn_utl.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -tasn_utl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tasn_utl.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -tasn_utl.o: ../../include/openssl/symhacks.h tasn_utl.c -x_algor.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -x_algor.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x_algor.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x_algor.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x_algor.o: ../../include/openssl/ecdsa.h ../../include/openssl/evp.h -x_algor.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_algor.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_algor.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_algor.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_algor.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_algor.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_algor.o: ../../include/openssl/x509_vfy.h x_algor.c -x_attrib.o: ../../e_os.h ../../include/openssl/asn1.h -x_attrib.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_attrib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_attrib.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x_attrib.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x_attrib.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x_attrib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_attrib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_attrib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_attrib.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_attrib.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_attrib.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_attrib.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x_attrib.c -x_bignum.o: ../../e_os.h ../../include/openssl/asn1.h -x_bignum.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_bignum.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -x_bignum.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x_bignum.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -x_bignum.o: ../../include/openssl/opensslconf.h -x_bignum.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_bignum.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -x_bignum.o: ../../include/openssl/symhacks.h ../cryptlib.h x_bignum.c -x_crl.o: ../../e_os.h ../../include/openssl/asn1.h -x_crl.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_crl.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -x_crl.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x_crl.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x_crl.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x_crl.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x_crl.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x_crl.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -x_crl.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -x_crl.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -x_crl.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -x_crl.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -x_crl.o: ../../include/openssl/x509v3.h ../cryptlib.h asn1_locl.h x_crl.c -x_exten.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -x_exten.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x_exten.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x_exten.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x_exten.o: ../../include/openssl/ecdsa.h ../../include/openssl/evp.h -x_exten.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_exten.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_exten.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_exten.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_exten.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_exten.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_exten.o: ../../include/openssl/x509_vfy.h x_exten.c -x_info.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -x_info.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_info.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x_info.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x_info.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x_info.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_info.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_info.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_info.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_info.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_info.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_info.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x_info.c -x_long.o: ../../e_os.h ../../include/openssl/asn1.h -x_long.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_long.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -x_long.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x_long.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -x_long.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -x_long.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -x_long.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -x_long.o: ../cryptlib.h x_long.c -x_name.o: ../../e_os.h ../../include/openssl/asn1.h -x_name.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_name.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_name.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x_name.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x_name.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x_name.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_name.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_name.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_name.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_name.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_name.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_name.o: ../../include/openssl/x509_vfy.h ../cryptlib.h asn1_locl.h x_name.c -x_nx509.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -x_nx509.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x_nx509.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x_nx509.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x_nx509.o: ../../include/openssl/ecdsa.h ../../include/openssl/evp.h -x_nx509.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_nx509.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_nx509.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_nx509.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_nx509.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_nx509.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_nx509.o: ../../include/openssl/x509_vfy.h x_nx509.c -x_pkey.o: ../../e_os.h ../../include/openssl/asn1.h -x_pkey.o: ../../include/openssl/asn1_mac.h ../../include/openssl/bio.h -x_pkey.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_pkey.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x_pkey.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x_pkey.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x_pkey.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_pkey.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_pkey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_pkey.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_pkey.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_pkey.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_pkey.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x_pkey.c -x_pubkey.o: ../../e_os.h ../../include/openssl/asn1.h -x_pubkey.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_pubkey.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_pubkey.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -x_pubkey.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x_pubkey.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x_pubkey.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x_pubkey.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x_pubkey.o: ../../include/openssl/opensslconf.h -x_pubkey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_pubkey.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -x_pubkey.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -x_pubkey.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -x_pubkey.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -x_pubkey.o: ../cryptlib.h asn1_locl.h x_pubkey.c -x_req.o: ../../e_os.h ../../include/openssl/asn1.h -x_req.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_req.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_req.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x_req.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x_req.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x_req.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_req.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_req.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_req.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_req.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_req.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_req.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x_req.c -x_sig.o: ../../e_os.h ../../include/openssl/asn1.h -x_sig.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_sig.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_sig.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x_sig.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x_sig.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x_sig.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_sig.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_sig.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_sig.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_sig.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_sig.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_sig.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x_sig.c -x_spki.o: ../../e_os.h ../../include/openssl/asn1.h -x_spki.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_spki.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_spki.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x_spki.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x_spki.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x_spki.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_spki.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_spki.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_spki.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_spki.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_spki.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_spki.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x_spki.c -x_val.o: ../../e_os.h ../../include/openssl/asn1.h -x_val.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_val.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_val.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x_val.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x_val.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x_val.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_val.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_val.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_val.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_val.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_val.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_val.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x_val.c -x_x509.o: ../../e_os.h ../../include/openssl/asn1.h -x_x509.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_x509.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -x_x509.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x_x509.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x_x509.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x_x509.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x_x509.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x_x509.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -x_x509.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -x_x509.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -x_x509.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -x_x509.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -x_x509.o: ../../include/openssl/x509v3.h ../cryptlib.h x_x509.c -x_x509a.o: ../../e_os.h ../../include/openssl/asn1.h -x_x509a.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x_x509a.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_x509a.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x_x509a.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x_x509a.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x_x509a.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x_x509a.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x_x509a.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x_x509a.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x_x509a.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_x509a.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_x509a.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x_x509a.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/tasn_enc.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/tasn_enc.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/tasn_enc.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/tasn_enc.c 2015-01-20 21:22:17.000000000 +0000 @@ -453,9 +453,14 @@ { derlst = OPENSSL_malloc(sk_ASN1_VALUE_num(sk) * sizeof(*derlst)); + if (!derlst) + return 0; tmpdat = OPENSSL_malloc(skcontlen); - if (!derlst || !tmpdat) + if (!tmpdat) + { + OPENSSL_free(derlst); return 0; + } } } /* If not sorting just output each item */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/t_x509.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/t_x509.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/t_x509.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/t_x509.c 2015-01-20 21:22:17.000000000 +0000 @@ -475,6 +475,8 @@ l=80-2-obase; b=X509_NAME_oneline(name,NULL,0); + if (!b) + return 0; if (!*b) { OPENSSL_free(b); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/x_crl.c nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/x_crl.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/asn1/x_crl.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/asn1/x_crl.c 2015-01-20 21:22:17.000000000 +0000 @@ -270,6 +270,7 @@ { /* We handle IDP and deltas */ if ((nid == NID_issuing_distribution_point) + || (nid == NID_authority_key_identifier) || (nid == NID_delta_crl)) break;; crl->flags |= EXFLAG_CRITICAL; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bf/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/bf/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/bf/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bf/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -# -# OpenSSL/crypto/blowfish/Makefile -# - -DIR= bf -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -BF_ENC= bf_enc.o - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -TEST=bftest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=bf_skey.c bf_ecb.c bf_enc.c bf_cfb64.c bf_ofb64.c -LIBOBJ=bf_skey.o bf_ecb.o $(BF_ENC) bf_cfb64.o bf_ofb64.o - -SRC= $(LIBSRC) - -EXHEADER= blowfish.h -HEADER= bf_pi.h bf_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -bf-586.s: asm/bf-586.pl ../perlasm/x86asm.pl ../perlasm/cbc.pl - $(PERL) asm/bf-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -# We need to use force because 'install' matches 'INSTALL' on case -# insensitive systems -FRC.install: -install: FRC.install - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -bf_cfb64.o: ../../include/openssl/blowfish.h ../../include/openssl/e_os2.h -bf_cfb64.o: ../../include/openssl/opensslconf.h bf_cfb64.c bf_locl.h -bf_ecb.o: ../../include/openssl/blowfish.h ../../include/openssl/e_os2.h -bf_ecb.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -bf_ecb.o: bf_ecb.c bf_locl.h -bf_enc.o: ../../include/openssl/blowfish.h ../../include/openssl/e_os2.h -bf_enc.o: ../../include/openssl/opensslconf.h bf_enc.c bf_locl.h -bf_ofb64.o: ../../include/openssl/blowfish.h ../../include/openssl/e_os2.h -bf_ofb64.o: ../../include/openssl/opensslconf.h bf_locl.h bf_ofb64.c -bf_skey.o: ../../include/openssl/blowfish.h ../../include/openssl/crypto.h -bf_skey.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -bf_skey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bf_skey.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bf_skey.o: ../../include/openssl/symhacks.h bf_locl.h bf_pi.h bf_skey.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bio/bio_lib.c nodejs-0.11.15/deps/openssl/openssl/crypto/bio/bio_lib.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/bio/bio_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bio/bio_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -132,8 +132,8 @@ CRYPTO_free_ex_data(CRYPTO_EX_INDEX_BIO, a, &a->ex_data); - if ((a->method == NULL) || (a->method->destroy == NULL)) return(1); - a->method->destroy(a); + if ((a->method != NULL) && (a->method->destroy != NULL)) + a->method->destroy(a); OPENSSL_free(a); return(1); } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bio/bss_dgram.c nodejs-0.11.15/deps/openssl/openssl/crypto/bio/bss_dgram.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/bio/bss_dgram.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bio/bss_dgram.c 2015-01-20 21:22:17.000000000 +0000 @@ -1333,7 +1333,7 @@ bio_dgram_sctp_data *data = NULL; socklen_t sockopt_len = 0; struct sctp_authkeyid authkeyid; - struct sctp_authkey *authkey; + struct sctp_authkey *authkey = NULL; data = (bio_dgram_sctp_data *)b->ptr; @@ -1388,6 +1388,11 @@ /* Add new key */ sockopt_len = sizeof(struct sctp_authkey) + 64 * sizeof(uint8_t); authkey = OPENSSL_malloc(sockopt_len); + if (authkey == NULL) + { + ret = -1; + break; + } memset(authkey, 0x00, sockopt_len); authkey->sca_keynumber = authkeyid.scact_keynumber + 1; #ifndef __FreeBSD__ @@ -1399,6 +1404,8 @@ memcpy(&authkey->sca_key[0], ptr, 64 * sizeof(uint8_t)); ret = setsockopt(b->num, IPPROTO_SCTP, SCTP_AUTH_KEY, authkey, sockopt_len); + OPENSSL_free(authkey); + authkey = NULL; if (ret < 0) break; /* Reset active key */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bio/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/bio/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/bio/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bio/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,222 +0,0 @@ -# -# OpenSSL/crypto/bio/Makefile -# - -DIR= bio -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= bio_lib.c bio_cb.c bio_err.c \ - bss_mem.c bss_null.c bss_fd.c \ - bss_file.c bss_sock.c bss_conn.c \ - bf_null.c bf_buff.c b_print.c b_dump.c \ - b_sock.c bss_acpt.c bf_nbio.c bss_log.c bss_bio.c \ - bss_dgram.c -# bf_lbuf.c -LIBOBJ= bio_lib.o bio_cb.o bio_err.o \ - bss_mem.o bss_null.o bss_fd.o \ - bss_file.o bss_sock.o bss_conn.o \ - bf_null.o bf_buff.o b_print.o b_dump.o \ - b_sock.o bss_acpt.o bf_nbio.o bss_log.o bss_bio.o \ - bss_dgram.o -# bf_lbuf.o - -SRC= $(LIBSRC) - -EXHEADER= bio.h -HEADER= bio_lcl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -b_dump.o: ../../e_os.h ../../include/openssl/bio.h -b_dump.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -b_dump.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -b_dump.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -b_dump.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -b_dump.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -b_dump.o: ../../include/openssl/symhacks.h ../cryptlib.h b_dump.c bio_lcl.h -b_print.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -b_print.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -b_print.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -b_print.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -b_print.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -b_print.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -b_print.o: ../../include/openssl/symhacks.h ../cryptlib.h b_print.c -b_sock.o: ../../e_os.h ../../include/openssl/bio.h -b_sock.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -b_sock.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -b_sock.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -b_sock.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -b_sock.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -b_sock.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -b_sock.o: ../cryptlib.h b_sock.c -bf_buff.o: ../../e_os.h ../../include/openssl/bio.h -bf_buff.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bf_buff.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bf_buff.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bf_buff.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bf_buff.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bf_buff.o: ../../include/openssl/symhacks.h ../cryptlib.h bf_buff.c -bf_nbio.o: ../../e_os.h ../../include/openssl/bio.h -bf_nbio.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bf_nbio.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bf_nbio.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bf_nbio.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bf_nbio.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -bf_nbio.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -bf_nbio.o: ../cryptlib.h bf_nbio.c -bf_null.o: ../../e_os.h ../../include/openssl/bio.h -bf_null.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bf_null.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bf_null.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bf_null.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bf_null.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bf_null.o: ../../include/openssl/symhacks.h ../cryptlib.h bf_null.c -bio_cb.o: ../../e_os.h ../../include/openssl/bio.h -bio_cb.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bio_cb.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bio_cb.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bio_cb.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bio_cb.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bio_cb.o: ../../include/openssl/symhacks.h ../cryptlib.h bio_cb.c -bio_err.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -bio_err.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bio_err.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bio_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bio_err.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bio_err.o: ../../include/openssl/symhacks.h bio_err.c -bio_lib.o: ../../e_os.h ../../include/openssl/bio.h -bio_lib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bio_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bio_lib.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bio_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bio_lib.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bio_lib.o: ../../include/openssl/symhacks.h ../cryptlib.h bio_lib.c -bss_acpt.o: ../../e_os.h ../../include/openssl/bio.h -bss_acpt.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bss_acpt.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bss_acpt.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bss_acpt.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bss_acpt.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bss_acpt.o: ../../include/openssl/symhacks.h ../cryptlib.h bss_acpt.c -bss_bio.o: ../../e_os.h ../../include/openssl/bio.h -bss_bio.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -bss_bio.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -bss_bio.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -bss_bio.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -bss_bio.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -bss_bio.o: bss_bio.c -bss_conn.o: ../../e_os.h ../../include/openssl/bio.h -bss_conn.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bss_conn.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bss_conn.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bss_conn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bss_conn.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bss_conn.o: ../../include/openssl/symhacks.h ../cryptlib.h bss_conn.c -bss_dgram.o: ../../e_os.h ../../include/openssl/bio.h -bss_dgram.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bss_dgram.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bss_dgram.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bss_dgram.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bss_dgram.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bss_dgram.o: ../../include/openssl/symhacks.h ../cryptlib.h bss_dgram.c -bss_fd.o: ../../e_os.h ../../include/openssl/bio.h -bss_fd.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bss_fd.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bss_fd.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bss_fd.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bss_fd.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bss_fd.o: ../../include/openssl/symhacks.h ../cryptlib.h bio_lcl.h bss_fd.c -bss_file.o: ../../e_os.h ../../include/openssl/bio.h -bss_file.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bss_file.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bss_file.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bss_file.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bss_file.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bss_file.o: ../../include/openssl/symhacks.h ../cryptlib.h bio_lcl.h bss_file.c -bss_log.o: ../../e_os.h ../../include/openssl/bio.h -bss_log.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bss_log.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bss_log.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bss_log.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bss_log.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bss_log.o: ../../include/openssl/symhacks.h ../cryptlib.h bss_log.c -bss_mem.o: ../../e_os.h ../../include/openssl/bio.h -bss_mem.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bss_mem.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bss_mem.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bss_mem.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bss_mem.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bss_mem.o: ../../include/openssl/symhacks.h ../cryptlib.h bss_mem.c -bss_null.o: ../../e_os.h ../../include/openssl/bio.h -bss_null.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bss_null.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bss_null.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bss_null.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bss_null.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bss_null.o: ../../include/openssl/symhacks.h ../cryptlib.h bss_null.c -bss_sock.o: ../../e_os.h ../../include/openssl/bio.h -bss_sock.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bss_sock.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bss_sock.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bss_sock.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bss_sock.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bss_sock.o: ../../include/openssl/symhacks.h ../cryptlib.h bss_sock.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bn/asm/x86_64-gcc.c nodejs-0.11.15/deps/openssl/openssl/crypto/bn/asm/x86_64-gcc.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/bn/asm/x86_64-gcc.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bn/asm/x86_64-gcc.c 2015-01-20 21:22:17.000000000 +0000 @@ -189,7 +189,7 @@ if (n <= 0) return 0; - asm ( + asm volatile ( " subq %2,%2 \n" ".p2align 4 \n" "1: movq (%4,%2,8),%0 \n" @@ -200,7 +200,7 @@ " sbbq %0,%0 \n" : "=&a"(ret),"+c"(n),"=&r"(i) : "r"(rp),"r"(ap),"r"(bp) - : "cc" + : "cc", "memory" ); return ret&1; @@ -212,7 +212,7 @@ if (n <= 0) return 0; - asm ( + asm volatile ( " subq %2,%2 \n" ".p2align 4 \n" "1: movq (%4,%2,8),%0 \n" @@ -223,7 +223,7 @@ " sbbq %0,%0 \n" : "=&a"(ret),"+c"(n),"=&r"(i) : "r"(rp),"r"(ap),"r"(bp) - : "cc" + : "cc", "memory" ); return ret&1; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bn/bn_exp.c nodejs-0.11.15/deps/openssl/openssl/crypto/bn/bn_exp.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/bn/bn_exp.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bn/bn_exp.c 2015-01-20 21:22:17.000000000 +0000 @@ -680,7 +680,7 @@ /* Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as * 512-bit RSA is hardly relevant, we omit it to spare size... */ - if (window==5) + if (window==5 && top>1) { void bn_mul_mont_gather5(BN_ULONG *rp,const BN_ULONG *ap, const void *table,const BN_ULONG *np, @@ -874,7 +874,14 @@ bits = BN_num_bits(p); if (bits == 0) { - ret = BN_one(rr); + /* x**0 mod 1 is still zero. */ + if (BN_is_one(m)) + { + ret = 1; + BN_zero(rr); + } + else + ret = BN_one(rr); return ret; } if (a == 0) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bn/bn_lib.c nodejs-0.11.15/deps/openssl/openssl/crypto/bn/bn_lib.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/bn/bn_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bn/bn_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -320,6 +320,15 @@ BNerr(BN_F_BN_EXPAND_INTERNAL,ERR_R_MALLOC_FAILURE); return(NULL); } +#ifdef PURIFY + /* Valgrind complains in BN_consttime_swap because we process the whole + * array even if it's not initialised yet. This doesn't matter in that + * function - what's important is constant time operation (we're not + * actually going to use the data) + */ + memset(a, 0, sizeof(BN_ULONG)*words); +#endif + #if 1 B=b->d; /* Check if the previous number needs to be copied */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bn/bn_mont.c nodejs-0.11.15/deps/openssl/openssl/crypto/bn/bn_mont.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/bn/bn_mont.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bn/bn_mont.c 2015-01-20 21:22:17.000000000 +0000 @@ -478,32 +478,38 @@ BN_MONT_CTX *BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, int lock, const BIGNUM *mod, BN_CTX *ctx) { - int got_write_lock = 0; BN_MONT_CTX *ret; CRYPTO_r_lock(lock); - if (!*pmont) + ret = *pmont; + CRYPTO_r_unlock(lock); + if (ret) + return ret; + + /* We don't want to serialise globally while doing our lazy-init math in + * BN_MONT_CTX_set. That punishes threads that are doing independent + * things. Instead, punish the case where more than one thread tries to + * lazy-init the same 'pmont', by having each do the lazy-init math work + * independently and only use the one from the thread that wins the race + * (the losers throw away the work they've done). */ + ret = BN_MONT_CTX_new(); + if (!ret) + return NULL; + if (!BN_MONT_CTX_set(ret, mod, ctx)) { - CRYPTO_r_unlock(lock); - CRYPTO_w_lock(lock); - got_write_lock = 1; + BN_MONT_CTX_free(ret); + return NULL; + } - if (!*pmont) - { - ret = BN_MONT_CTX_new(); - if (ret && !BN_MONT_CTX_set(ret, mod, ctx)) - BN_MONT_CTX_free(ret); - else - *pmont = ret; - } + /* The locked compare-and-set, after the local work is done. */ + CRYPTO_w_lock(lock); + if (*pmont) + { + BN_MONT_CTX_free(ret); + ret = *pmont; } - - ret = *pmont; - - if (got_write_lock) - CRYPTO_w_unlock(lock); else - CRYPTO_r_unlock(lock); - + *pmont = ret; + CRYPTO_w_unlock(lock); return ret; } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bn/bn_nist.c nodejs-0.11.15/deps/openssl/openssl/crypto/bn/bn_nist.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/bn/bn_nist.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bn/bn_nist.c 2015-01-20 21:22:17.000000000 +0000 @@ -1088,9 +1088,9 @@ /* ... and right shift */ for (val=t_d[0],i=0; i<BN_NIST_521_TOP-1; i++) { - tmp = val>>BN_NIST_521_RSHIFT; - val = t_d[i+1]; - t_d[i] = (tmp | val<<BN_NIST_521_LSHIFT) & BN_MASK2; + t_d[i] = ( val>>BN_NIST_521_RSHIFT | + (tmp=t_d[i+1])<<BN_NIST_521_LSHIFT ) & BN_MASK2; + val=tmp; } t_d[i] = val>>BN_NIST_521_RSHIFT; /* lower 521 bits */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bn/bn_sqr.c nodejs-0.11.15/deps/openssl/openssl/crypto/bn/bn_sqr.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/bn/bn_sqr.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bn/bn_sqr.c 2015-01-20 21:22:17.000000000 +0000 @@ -77,6 +77,7 @@ if (al <= 0) { r->top=0; + r->neg = 0; return 1; } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bn/exptest.c nodejs-0.11.15/deps/openssl/openssl/crypto/bn/exptest.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/bn/exptest.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bn/exptest.c 2015-01-20 21:22:17.000000000 +0000 @@ -71,6 +71,43 @@ static const char rnd_seed[] = "string to make the random number generator think it has entropy"; +/* test_exp_mod_zero tests that x**0 mod 1 == 0. It returns zero on success. */ +static int test_exp_mod_zero() { + BIGNUM a, p, m; + BIGNUM r; + BN_CTX *ctx = BN_CTX_new(); + int ret = 1; + + BN_init(&m); + BN_one(&m); + + BN_init(&a); + BN_one(&a); + + BN_init(&p); + BN_zero(&p); + + BN_init(&r); + BN_mod_exp(&r, &a, &p, &m, ctx); + BN_CTX_free(ctx); + + if (BN_is_zero(&r)) + ret = 0; + else + { + printf("1**0 mod 1 = "); + BN_print_fp(stdout, &r); + printf(", should be 0\n"); + } + + BN_free(&r); + BN_free(&a); + BN_free(&p); + BN_free(&m); + + return ret; +} + int main(int argc, char *argv[]) { BN_CTX *ctx; @@ -190,7 +227,13 @@ ERR_remove_thread_state(NULL); CRYPTO_mem_leaks(out); BIO_free(out); - printf(" done\n"); + printf("\n"); + + if (test_exp_mod_zero() != 0) + goto err; + + printf("done\n"); + EXIT(0); err: ERR_load_crypto_strings(); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/bn/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/bn/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/bn/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/bn/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,377 +0,0 @@ -# -# OpenSSL/crypto/bn/Makefile -# - -DIR= bn -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -BN_ASM= bn_asm.o - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -TEST=bntest.c exptest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= bn_add.c bn_div.c bn_exp.c bn_lib.c bn_ctx.c bn_mul.c bn_mod.c \ - bn_print.c bn_rand.c bn_shift.c bn_word.c bn_blind.c \ - bn_kron.c bn_sqrt.c bn_gcd.c bn_prime.c bn_err.c bn_sqr.c bn_asm.c \ - bn_recp.c bn_mont.c bn_mpi.c bn_exp2.c bn_gf2m.c bn_nist.c \ - bn_depr.c bn_const.c bn_x931p.c - -LIBOBJ= bn_add.o bn_div.o bn_exp.o bn_lib.o bn_ctx.o bn_mul.o bn_mod.o \ - bn_print.o bn_rand.o bn_shift.o bn_word.o bn_blind.o \ - bn_kron.o bn_sqrt.o bn_gcd.o bn_prime.o bn_err.o bn_sqr.o $(BN_ASM) \ - bn_recp.o bn_mont.o bn_mpi.o bn_exp2.o bn_gf2m.o bn_nist.o \ - bn_depr.o bn_const.o bn_x931p.o - -SRC= $(LIBSRC) - -EXHEADER= bn.h -HEADER= bn_lcl.h bn_prime.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -bn_prime.h: bn_prime.pl - $(PERL) bn_prime.pl >bn_prime.h - -divtest: divtest.c ../../libcrypto.a - cc -I../../include divtest.c -o divtest ../../libcrypto.a - -bnbug: bnbug.c ../../libcrypto.a top - cc -g -I../../include bnbug.c -o bnbug ../../libcrypto.a - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -bn-586.s: asm/bn-586.pl ../perlasm/x86asm.pl - $(PERL) asm/bn-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ -co-586.s: asm/co-586.pl ../perlasm/x86asm.pl - $(PERL) asm/co-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ -x86-mont.s: asm/x86-mont.pl ../perlasm/x86asm.pl - $(PERL) asm/x86-mont.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ -x86-gf2m.s: asm/x86-gf2m.pl ../perlasm/x86asm.pl - $(PERL) asm/x86-gf2m.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ - -sparcv8.o: asm/sparcv8.S - $(CC) $(CFLAGS) -c asm/sparcv8.S -bn-sparcv9.o: asm/sparcv8plus.S - $(CC) $(CFLAGS) -c -o $@ asm/sparcv8plus.S -sparcv9a-mont.s: asm/sparcv9a-mont.pl - $(PERL) asm/sparcv9a-mont.pl $(CFLAGS) > $@ -sparcv9-mont.s: asm/sparcv9-mont.pl - $(PERL) asm/sparcv9-mont.pl $(CFLAGS) > $@ - -bn-mips3.o: asm/mips3.s - @if [ "$(CC)" = "gcc" ]; then \ - ABI=`expr "$(CFLAGS)" : ".*-mabi=\([n3264]*\)"` && \ - as -$$ABI -O -o $@ asm/mips3.s; \ - else $(CC) -c $(CFLAGS) -o $@ asm/mips3.s; fi - -bn-mips.s: asm/mips.pl - $(PERL) asm/mips.pl $(PERLASM_SCHEME) $@ -mips-mont.s: asm/mips-mont.pl - $(PERL) asm/mips-mont.pl $(PERLASM_SCHEME) $@ - -bn-s390x.o: asm/s390x.S - $(CC) $(CFLAGS) -c -o $@ asm/s390x.S -s390x-gf2m.s: asm/s390x-gf2m.pl - $(PERL) asm/s390x-gf2m.pl $(PERLASM_SCHEME) $@ - -x86_64-gcc.o: asm/x86_64-gcc.c - $(CC) $(CFLAGS) -c -o $@ asm/x86_64-gcc.c -x86_64-mont.s: asm/x86_64-mont.pl - $(PERL) asm/x86_64-mont.pl $(PERLASM_SCHEME) > $@ -x86_64-mont5.s: asm/x86_64-mont5.pl - $(PERL) asm/x86_64-mont5.pl $(PERLASM_SCHEME) > $@ -x86_64-gf2m.s: asm/x86_64-gf2m.pl - $(PERL) asm/x86_64-gf2m.pl $(PERLASM_SCHEME) > $@ -modexp512-x86_64.s: asm/modexp512-x86_64.pl - $(PERL) asm/modexp512-x86_64.pl $(PERLASM_SCHEME) > $@ - -bn-ia64.s: asm/ia64.S - $(CC) $(CFLAGS) -E asm/ia64.S > $@ -ia64-mont.s: asm/ia64-mont.pl - $(PERL) asm/ia64-mont.pl $@ $(CFLAGS) - -# GNU assembler fails to compile PA-RISC2 modules, insist on calling -# vendor assembler... -pa-risc2W.o: asm/pa-risc2W.s - /usr/ccs/bin/as -o pa-risc2W.o asm/pa-risc2W.s -pa-risc2.o: asm/pa-risc2.s - /usr/ccs/bin/as -o pa-risc2.o asm/pa-risc2.s -parisc-mont.s: asm/parisc-mont.pl - $(PERL) asm/parisc-mont.pl $(PERLASM_SCHEME) $@ - -# ppc - AIX, Linux, MacOS X... -bn-ppc.s: asm/ppc.pl; $(PERL) asm/ppc.pl $(PERLASM_SCHEME) $@ -ppc-mont.s: asm/ppc-mont.pl;$(PERL) asm/ppc-mont.pl $(PERLASM_SCHEME) $@ -ppc64-mont.s: asm/ppc64-mont.pl;$(PERL) asm/ppc64-mont.pl $(PERLASM_SCHEME) $@ - -alpha-mont.s: asm/alpha-mont.pl - (preproc=/tmp/$$$$.$@; trap "rm $$preproc" INT; \ - $(PERL) asm/alpha-mont.pl > $$preproc && \ - $(CC) -E $$preproc > $@ && rm $$preproc) - -# GNU make "catch all" -%-mont.s: asm/%-mont.pl; $(PERL) $< $(PERLASM_SCHEME) $@ -%-gf2m.S: asm/%-gf2m.pl; $(PERL) $< $(PERLASM_SCHEME) $@ - -armv4-gf2m.o: armv4-gf2m.S - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -exptest: - rm -f exptest - gcc -I../../include -g2 -ggdb -o exptest exptest.c ../../libcrypto.a - -div: - rm -f a.out - gcc -I.. -g div.c ../../libcrypto.a - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -bn_add.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_add.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_add.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_add.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_add.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_add.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_add.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_add.c bn_lcl.h -bn_asm.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_asm.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_asm.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_asm.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_asm.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_asm.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_asm.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_asm.c bn_lcl.h -bn_blind.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_blind.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_blind.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_blind.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_blind.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_blind.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_blind.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_blind.c bn_lcl.h -bn_const.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -bn_const.o: ../../include/openssl/opensslconf.h -bn_const.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_const.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_const.o: ../../include/openssl/symhacks.h bn.h bn_const.c -bn_ctx.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_ctx.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_ctx.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_ctx.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_ctx.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_ctx.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_ctx.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_ctx.c bn_lcl.h -bn_depr.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_depr.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_depr.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_depr.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_depr.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_depr.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -bn_depr.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -bn_depr.o: ../cryptlib.h bn_depr.c bn_lcl.h -bn_div.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_div.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_div.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_div.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_div.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_div.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_div.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_div.c bn_lcl.h -bn_err.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -bn_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -bn_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -bn_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -bn_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -bn_err.o: bn_err.c -bn_exp.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_exp.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_exp.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_exp.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_exp.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_exp.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_exp.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_exp.c bn_lcl.h -bn_exp2.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_exp2.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_exp2.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_exp2.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_exp2.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_exp2.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_exp2.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_exp2.c bn_lcl.h -bn_gcd.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_gcd.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_gcd.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_gcd.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_gcd.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_gcd.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_gcd.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_gcd.c bn_lcl.h -bn_gf2m.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_gf2m.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_gf2m.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_gf2m.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_gf2m.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_gf2m.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_gf2m.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_gf2m.c bn_lcl.h -bn_kron.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_kron.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_kron.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_kron.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_kron.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_kron.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_kron.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_kron.c bn_lcl.h -bn_lib.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_lib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_lib.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_lib.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_lib.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_lib.c -bn_mod.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_mod.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_mod.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_mod.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_mod.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_mod.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_mod.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_mod.c -bn_mont.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_mont.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_mont.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_mont.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_mont.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_mont.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_mont.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_mont.c -bn_mpi.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_mpi.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_mpi.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_mpi.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_mpi.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_mpi.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_mpi.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_mpi.c -bn_mul.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_mul.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_mul.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_mul.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_mul.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_mul.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_mul.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_mul.c -bn_nist.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_nist.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_nist.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_nist.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_nist.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_nist.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_nist.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_nist.c -bn_prime.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_prime.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_prime.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_prime.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_prime.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_prime.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -bn_prime.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -bn_prime.o: ../cryptlib.h bn_lcl.h bn_prime.c bn_prime.h -bn_print.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_print.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_print.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_print.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_print.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_print.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_print.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_print.c -bn_rand.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_rand.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_rand.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_rand.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_rand.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_rand.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -bn_rand.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -bn_rand.o: ../cryptlib.h bn_lcl.h bn_rand.c -bn_recp.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_recp.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_recp.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_recp.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_recp.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_recp.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_recp.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_recp.c -bn_shift.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_shift.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_shift.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_shift.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_shift.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_shift.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_shift.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_shift.c -bn_sqr.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_sqr.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_sqr.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_sqr.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_sqr.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_sqr.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_sqr.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_sqr.c -bn_sqrt.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_sqrt.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_sqrt.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_sqrt.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_sqrt.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_sqrt.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_sqrt.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_sqrt.c -bn_word.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -bn_word.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bn_word.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bn_word.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -bn_word.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_word.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_word.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_word.c -bn_x931p.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -bn_x931p.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -bn_x931p.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bn_x931p.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bn_x931p.o: ../../include/openssl/symhacks.h bn_x931p.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/buffer/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/buffer/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/buffer/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/buffer/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -# -# OpenSSL/crypto/buffer/Makefile -# - -DIR= buffer -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= buffer.c buf_str.c buf_err.c -LIBOBJ= buffer.o buf_str.o buf_err.o - -SRC= $(LIBSRC) - -EXHEADER= buffer.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -buf_err.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -buf_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -buf_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -buf_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -buf_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -buf_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -buf_err.o: buf_err.c -buf_str.o: ../../e_os.h ../../include/openssl/bio.h -buf_str.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -buf_str.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -buf_str.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -buf_str.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -buf_str.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -buf_str.o: ../../include/openssl/symhacks.h ../cryptlib.h buf_str.c -buffer.o: ../../e_os.h ../../include/openssl/bio.h -buffer.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -buffer.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -buffer.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -buffer.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -buffer.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -buffer.o: ../../include/openssl/symhacks.h ../cryptlib.h buffer.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/camellia/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/camellia/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/camellia/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/camellia/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -# -# crypto/camellia/Makefile -# - -DIR= camellia -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CMLL_ENC= camellia.o cmll_misc.o cmll_cbc.o - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -#TEST=camelliatest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=camellia.c cmll_misc.c cmll_ecb.c cmll_cbc.c cmll_ofb.c \ - cmll_cfb.c cmll_ctr.c cmll_utl.c - -LIBOBJ= cmll_ecb.o cmll_ofb.o cmll_cfb.o cmll_ctr.o cmll_utl.o $(CMLL_ENC) - -SRC= $(LIBSRC) - -EXHEADER= camellia.h -HEADER= cmll_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -cmll-x86.s: asm/cmll-x86.pl ../perlasm/x86asm.pl - $(PERL) asm/cmll-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ -cmll-x86_64.s: asm/cmll-x86_64.pl - $(PERL) asm/cmll-x86_64.pl $(PERLASM_SCHEME) > $@ - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -camellia.o: ../../include/openssl/opensslconf.h camellia.c camellia.h -camellia.o: cmll_locl.h -cmll_cbc.o: ../../include/openssl/camellia.h ../../include/openssl/modes.h -cmll_cbc.o: ../../include/openssl/opensslconf.h cmll_cbc.c -cmll_cfb.o: ../../include/openssl/camellia.h ../../include/openssl/modes.h -cmll_cfb.o: ../../include/openssl/opensslconf.h cmll_cfb.c -cmll_ctr.o: ../../include/openssl/camellia.h ../../include/openssl/modes.h -cmll_ctr.o: ../../include/openssl/opensslconf.h cmll_ctr.c -cmll_ecb.o: ../../include/openssl/camellia.h -cmll_ecb.o: ../../include/openssl/opensslconf.h cmll_ecb.c cmll_locl.h -cmll_misc.o: ../../include/openssl/camellia.h ../../include/openssl/crypto.h -cmll_misc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -cmll_misc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cmll_misc.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -cmll_misc.o: ../../include/openssl/symhacks.h cmll_locl.h cmll_misc.c -cmll_ofb.o: ../../include/openssl/camellia.h ../../include/openssl/modes.h -cmll_ofb.o: ../../include/openssl/opensslconf.h cmll_ofb.c -cmll_utl.o: ../../include/openssl/camellia.h ../../include/openssl/crypto.h -cmll_utl.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -cmll_utl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cmll_utl.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -cmll_utl.o: ../../include/openssl/symhacks.h cmll_locl.h cmll_utl.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/cast/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/cast/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/cast/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/cast/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -# -# OpenSSL/crypto/cast/Makefile -# - -DIR= cast -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CAST_ENC=c_enc.o - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -TEST=casttest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=c_skey.c c_ecb.c c_enc.c c_cfb64.c c_ofb64.c -LIBOBJ=c_skey.o c_ecb.o $(CAST_ENC) c_cfb64.o c_ofb64.o - -SRC= $(LIBSRC) - -EXHEADER= cast.h -HEADER= cast_s.h cast_lcl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -cast-586.s: asm/cast-586.pl ../perlasm/x86asm.pl ../perlasm/cbc.pl - $(PERL) asm/cast-586.pl $(PERLASM_SCHEME) $(CLAGS) $(PROCESSOR) > $@ - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -c_cfb64.o: ../../e_os.h ../../include/openssl/cast.h -c_cfb64.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -c_cfb64.o: c_cfb64.c cast_lcl.h -c_ecb.o: ../../e_os.h ../../include/openssl/cast.h -c_ecb.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -c_ecb.o: ../../include/openssl/opensslv.h c_ecb.c cast_lcl.h -c_enc.o: ../../e_os.h ../../include/openssl/cast.h -c_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -c_enc.o: c_enc.c cast_lcl.h -c_ofb64.o: ../../e_os.h ../../include/openssl/cast.h -c_ofb64.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -c_ofb64.o: c_ofb64.c cast_lcl.h -c_skey.o: ../../e_os.h ../../include/openssl/cast.h -c_skey.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -c_skey.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -c_skey.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -c_skey.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -c_skey.o: c_skey.c cast_lcl.h cast_s.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/cmac/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/cmac/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/cmac/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/cmac/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -# -# OpenSSL/crypto/cmac/Makefile -# - -DIR= cmac -TOP= ../.. -CC= cc -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=cmac.c cm_ameth.c cm_pmeth.c -LIBOBJ=cmac.o cm_ameth.o cm_pmeth.o - -SRC= $(LIBSRC) - -EXHEADER= cmac.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -cm_ameth.o: ../../e_os.h ../../include/openssl/asn1.h -cm_ameth.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -cm_ameth.o: ../../include/openssl/cmac.h ../../include/openssl/crypto.h -cm_ameth.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -cm_ameth.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -cm_ameth.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -cm_ameth.o: ../../include/openssl/opensslconf.h -cm_ameth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cm_ameth.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -cm_ameth.o: ../../include/openssl/symhacks.h ../asn1/asn1_locl.h ../cryptlib.h -cm_ameth.o: cm_ameth.c -cm_pmeth.o: ../../e_os.h ../../include/openssl/asn1.h -cm_pmeth.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -cm_pmeth.o: ../../include/openssl/cmac.h ../../include/openssl/conf.h -cm_pmeth.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cm_pmeth.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -cm_pmeth.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -cm_pmeth.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -cm_pmeth.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -cm_pmeth.o: ../../include/openssl/opensslconf.h -cm_pmeth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cm_pmeth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -cm_pmeth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -cm_pmeth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -cm_pmeth.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -cm_pmeth.o: ../cryptlib.h ../evp/evp_locl.h cm_pmeth.c -cmac.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -cmac.o: ../../include/openssl/buffer.h ../../include/openssl/cmac.h -cmac.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cmac.o: ../../include/openssl/err.h ../../include/openssl/evp.h -cmac.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -cmac.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -cmac.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cmac.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -cmac.o: ../../include/openssl/symhacks.h ../cryptlib.h cmac.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/cms/cms_env.c nodejs-0.11.15/deps/openssl/openssl/crypto/cms/cms_env.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/cms/cms_env.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/cms/cms_env.c 2015-01-20 21:22:17.000000000 +0000 @@ -185,6 +185,8 @@ if (flags & CMS_USE_KEYID) { ktri->version = 2; + if (env->version < 2) + env->version = 2; type = CMS_RECIPINFO_KEYIDENTIFIER; } else diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/cms/cms_pwri.c nodejs-0.11.15/deps/openssl/openssl/crypto/cms/cms_pwri.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/cms/cms_pwri.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/cms/cms_pwri.c 2015-01-20 21:22:17.000000000 +0000 @@ -93,9 +93,10 @@ X509_ALGOR *encalg = NULL; unsigned char iv[EVP_MAX_IV_LENGTH]; int ivlen; + env = cms_get0_enveloped(cms); if (!env) - goto err; + return NULL; if (wrap_nid <= 0) wrap_nid = NID_id_alg_PWRI_KEK; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/cms/cms_sd.c nodejs-0.11.15/deps/openssl/openssl/crypto/cms/cms_sd.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/cms/cms_sd.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/cms/cms_sd.c 2015-01-20 21:22:17.000000000 +0000 @@ -158,8 +158,8 @@ if (sd->version < 3) sd->version = 3; } - else - sd->version = 1; + else if (si->version < 1) + si->version = 1; } if (sd->version < 1) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/cms/cms_smime.c nodejs-0.11.15/deps/openssl/openssl/crypto/cms/cms_smime.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/cms/cms_smime.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/cms/cms_smime.c 2015-01-20 21:22:17.000000000 +0000 @@ -611,7 +611,7 @@ STACK_OF(CMS_RecipientInfo) *ris; CMS_RecipientInfo *ri; int i, r; - int debug = 0; + int debug = 0, ri_match = 0; ris = CMS_get0_RecipientInfos(cms); if (ris) debug = cms->d.envelopedData->encryptedContentInfo->debug; @@ -620,6 +620,7 @@ ri = sk_CMS_RecipientInfo_value(ris, i); if (CMS_RecipientInfo_type(ri) != CMS_RECIPINFO_TRANS) continue; + ri_match = 1; /* If we have a cert try matching RecipientInfo * otherwise try them all. */ @@ -655,7 +656,7 @@ } } /* If no cert and not debugging always return success */ - if (!cert && !debug) + if (ri_match && !cert && !debug) { ERR_clear_error(); return 1; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/cms/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/cms/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/cms/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/cms/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,284 +0,0 @@ -# -# OpenSSL/crypto/cms/Makefile -# - -DIR= cms -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= cms_lib.c cms_asn1.c cms_att.c cms_io.c cms_smime.c cms_err.c \ - cms_sd.c cms_dd.c cms_cd.c cms_env.c cms_enc.c cms_ess.c \ - cms_pwri.c -LIBOBJ= cms_lib.o cms_asn1.o cms_att.o cms_io.o cms_smime.o cms_err.o \ - cms_sd.o cms_dd.o cms_cd.o cms_env.o cms_enc.o cms_ess.o \ - cms_pwri.o - -SRC= $(LIBSRC) - -EXHEADER= cms.h -HEADER= cms_lcl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -test: - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -cms_asn1.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -cms_asn1.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -cms_asn1.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -cms_asn1.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -cms_asn1.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -cms_asn1.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -cms_asn1.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -cms_asn1.o: ../../include/openssl/opensslconf.h -cms_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cms_asn1.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -cms_asn1.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -cms_asn1.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -cms_asn1.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -cms_asn1.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -cms_asn1.o: cms.h cms_asn1.c cms_lcl.h -cms_att.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -cms_att.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -cms_att.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -cms_att.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -cms_att.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -cms_att.o: ../../include/openssl/err.h ../../include/openssl/evp.h -cms_att.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -cms_att.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -cms_att.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cms_att.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -cms_att.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -cms_att.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -cms_att.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -cms_att.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -cms_att.o: cms.h cms_att.c cms_lcl.h -cms_cd.o: ../../e_os.h ../../include/openssl/asn1.h -cms_cd.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -cms_cd.o: ../../include/openssl/buffer.h ../../include/openssl/cms.h -cms_cd.o: ../../include/openssl/comp.h ../../include/openssl/conf.h -cms_cd.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cms_cd.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -cms_cd.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -cms_cd.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -cms_cd.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -cms_cd.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -cms_cd.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -cms_cd.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs7.h -cms_cd.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -cms_cd.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cms_cd.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -cms_cd.o: ../../include/openssl/x509v3.h ../cryptlib.h cms_cd.c cms_lcl.h -cms_dd.o: ../../e_os.h ../../include/openssl/asn1.h -cms_dd.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -cms_dd.o: ../../include/openssl/buffer.h ../../include/openssl/cms.h -cms_dd.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -cms_dd.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -cms_dd.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -cms_dd.o: ../../include/openssl/err.h ../../include/openssl/evp.h -cms_dd.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -cms_dd.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -cms_dd.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cms_dd.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -cms_dd.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -cms_dd.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -cms_dd.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -cms_dd.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -cms_dd.o: ../cryptlib.h cms_dd.c cms_lcl.h -cms_enc.o: ../../e_os.h ../../include/openssl/asn1.h -cms_enc.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -cms_enc.o: ../../include/openssl/buffer.h ../../include/openssl/cms.h -cms_enc.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -cms_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -cms_enc.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -cms_enc.o: ../../include/openssl/err.h ../../include/openssl/evp.h -cms_enc.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -cms_enc.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -cms_enc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cms_enc.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -cms_enc.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -cms_enc.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -cms_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cms_enc.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -cms_enc.o: ../../include/openssl/x509v3.h ../cryptlib.h cms_enc.c cms_lcl.h -cms_env.o: ../../e_os.h ../../include/openssl/aes.h -cms_env.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -cms_env.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -cms_env.o: ../../include/openssl/cms.h ../../include/openssl/conf.h -cms_env.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cms_env.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -cms_env.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -cms_env.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -cms_env.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -cms_env.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -cms_env.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -cms_env.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs7.h -cms_env.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -cms_env.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -cms_env.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -cms_env.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -cms_env.o: ../asn1/asn1_locl.h ../cryptlib.h cms_env.c cms_lcl.h -cms_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -cms_err.o: ../../include/openssl/buffer.h ../../include/openssl/cms.h -cms_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cms_err.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -cms_err.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -cms_err.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -cms_err.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -cms_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -cms_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -cms_err.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -cms_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cms_err.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -cms_err.o: cms_err.c -cms_ess.o: ../../e_os.h ../../include/openssl/asn1.h -cms_ess.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -cms_ess.o: ../../include/openssl/buffer.h ../../include/openssl/cms.h -cms_ess.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -cms_ess.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -cms_ess.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -cms_ess.o: ../../include/openssl/err.h ../../include/openssl/evp.h -cms_ess.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -cms_ess.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -cms_ess.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cms_ess.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -cms_ess.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -cms_ess.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -cms_ess.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cms_ess.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -cms_ess.o: ../../include/openssl/x509v3.h ../cryptlib.h cms_ess.c cms_lcl.h -cms_io.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -cms_io.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -cms_io.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cms_io.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -cms_io.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -cms_io.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -cms_io.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -cms_io.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -cms_io.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -cms_io.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs7.h -cms_io.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -cms_io.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cms_io.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h cms.h -cms_io.o: cms_io.c cms_lcl.h -cms_lib.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -cms_lib.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -cms_lib.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cms_lib.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -cms_lib.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -cms_lib.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -cms_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -cms_lib.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -cms_lib.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -cms_lib.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs7.h -cms_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -cms_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cms_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h cms.h -cms_lib.o: cms_lcl.h cms_lib.c -cms_pwri.o: ../../e_os.h ../../include/openssl/aes.h -cms_pwri.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -cms_pwri.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -cms_pwri.o: ../../include/openssl/cms.h ../../include/openssl/conf.h -cms_pwri.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cms_pwri.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -cms_pwri.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -cms_pwri.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -cms_pwri.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -cms_pwri.o: ../../include/openssl/opensslconf.h -cms_pwri.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cms_pwri.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -cms_pwri.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -cms_pwri.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -cms_pwri.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cms_pwri.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -cms_pwri.o: ../../include/openssl/x509v3.h ../asn1/asn1_locl.h ../cryptlib.h -cms_pwri.o: cms_lcl.h cms_pwri.c -cms_sd.o: ../../e_os.h ../../include/openssl/asn1.h -cms_sd.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -cms_sd.o: ../../include/openssl/buffer.h ../../include/openssl/cms.h -cms_sd.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -cms_sd.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -cms_sd.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -cms_sd.o: ../../include/openssl/err.h ../../include/openssl/evp.h -cms_sd.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -cms_sd.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -cms_sd.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cms_sd.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -cms_sd.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -cms_sd.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -cms_sd.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -cms_sd.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -cms_sd.o: ../asn1/asn1_locl.h ../cryptlib.h cms_lcl.h cms_sd.c -cms_smime.o: ../../e_os.h ../../include/openssl/asn1.h -cms_smime.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -cms_smime.o: ../../include/openssl/buffer.h ../../include/openssl/cms.h -cms_smime.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -cms_smime.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -cms_smime.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -cms_smime.o: ../../include/openssl/err.h ../../include/openssl/evp.h -cms_smime.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -cms_smime.o: ../../include/openssl/objects.h -cms_smime.o: ../../include/openssl/opensslconf.h -cms_smime.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cms_smime.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -cms_smime.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -cms_smime.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -cms_smime.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -cms_smime.o: ../cryptlib.h cms_lcl.h cms_smime.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/comp/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/comp/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/comp/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/comp/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ -# -# OpenSSL/crypto/comp/Makefile -# - -DIR= comp -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= comp_lib.c comp_err.c \ - c_rle.c c_zlib.c - -LIBOBJ= comp_lib.o comp_err.o \ - c_rle.o c_zlib.o - -SRC= $(LIBSRC) - -EXHEADER= comp.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -c_rle.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -c_rle.o: ../../include/openssl/comp.h ../../include/openssl/crypto.h -c_rle.o: ../../include/openssl/e_os2.h ../../include/openssl/obj_mac.h -c_rle.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -c_rle.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -c_rle.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -c_rle.o: ../../include/openssl/symhacks.h c_rle.c -c_zlib.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -c_zlib.o: ../../include/openssl/comp.h ../../include/openssl/crypto.h -c_zlib.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -c_zlib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -c_zlib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -c_zlib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -c_zlib.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -c_zlib.o: ../../include/openssl/symhacks.h c_zlib.c -comp_err.o: ../../include/openssl/bio.h ../../include/openssl/comp.h -comp_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -comp_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -comp_err.o: ../../include/openssl/opensslconf.h -comp_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -comp_err.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -comp_err.o: ../../include/openssl/symhacks.h comp_err.c -comp_lib.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -comp_lib.o: ../../include/openssl/comp.h ../../include/openssl/crypto.h -comp_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/obj_mac.h -comp_lib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -comp_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -comp_lib.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -comp_lib.o: ../../include/openssl/symhacks.h comp_lib.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/conf/conf_def.c nodejs-0.11.15/deps/openssl/openssl/crypto/conf/conf_def.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/conf/conf_def.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/conf/conf_def.c 2015-01-20 21:22:17.000000000 +0000 @@ -321,7 +321,7 @@ p=eat_ws(conf, end); if (*p != ']') { - if (*p != '\0') + if (*p != '\0' && ss != p) { ss=p; goto again; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/conf/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/conf/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/conf/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/conf/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,152 +0,0 @@ -# -# OpenSSL/crypto/conf/Makefile -# - -DIR= conf -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= conf_err.c conf_lib.c conf_api.c conf_def.c conf_mod.c \ - conf_mall.c conf_sap.c - -LIBOBJ= conf_err.o conf_lib.o conf_api.o conf_def.o conf_mod.o \ - conf_mall.o conf_sap.o - -SRC= $(LIBSRC) - -EXHEADER= conf.h conf_api.h -HEADER= conf_def.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -conf_api.o: ../../e_os.h ../../include/openssl/bio.h -conf_api.o: ../../include/openssl/conf.h ../../include/openssl/conf_api.h -conf_api.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -conf_api.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -conf_api.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -conf_api.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -conf_api.o: ../../include/openssl/symhacks.h conf_api.c -conf_def.o: ../../e_os.h ../../include/openssl/bio.h -conf_def.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -conf_def.o: ../../include/openssl/conf_api.h ../../include/openssl/crypto.h -conf_def.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -conf_def.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -conf_def.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -conf_def.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -conf_def.o: ../../include/openssl/symhacks.h ../cryptlib.h conf_def.c -conf_def.o: conf_def.h -conf_err.o: ../../include/openssl/bio.h ../../include/openssl/conf.h -conf_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -conf_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -conf_err.o: ../../include/openssl/opensslconf.h -conf_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -conf_err.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -conf_err.o: ../../include/openssl/symhacks.h conf_err.c -conf_lib.o: ../../include/openssl/bio.h ../../include/openssl/conf.h -conf_lib.o: ../../include/openssl/conf_api.h ../../include/openssl/crypto.h -conf_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -conf_lib.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -conf_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -conf_lib.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -conf_lib.o: ../../include/openssl/symhacks.h conf_lib.c -conf_mall.o: ../../e_os.h ../../include/openssl/asn1.h -conf_mall.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -conf_mall.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -conf_mall.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -conf_mall.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -conf_mall.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -conf_mall.o: ../../include/openssl/err.h ../../include/openssl/evp.h -conf_mall.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -conf_mall.o: ../../include/openssl/objects.h -conf_mall.o: ../../include/openssl/opensslconf.h -conf_mall.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -conf_mall.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -conf_mall.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -conf_mall.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -conf_mall.o: ../../include/openssl/x509_vfy.h ../cryptlib.h conf_mall.c -conf_mod.o: ../../e_os.h ../../include/openssl/asn1.h -conf_mod.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -conf_mod.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -conf_mod.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -conf_mod.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -conf_mod.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -conf_mod.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -conf_mod.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -conf_mod.o: ../../include/openssl/opensslconf.h -conf_mod.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -conf_mod.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -conf_mod.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -conf_mod.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -conf_mod.o: ../../include/openssl/x509_vfy.h ../cryptlib.h conf_mod.c -conf_sap.o: ../../e_os.h ../../include/openssl/asn1.h -conf_sap.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -conf_sap.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -conf_sap.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -conf_sap.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -conf_sap.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -conf_sap.o: ../../include/openssl/err.h ../../include/openssl/evp.h -conf_sap.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -conf_sap.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -conf_sap.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -conf_sap.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -conf_sap.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -conf_sap.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -conf_sap.o: ../../include/openssl/x509_vfy.h ../cryptlib.h conf_sap.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/constant_time_locl.h nodejs-0.11.15/deps/openssl/openssl/crypto/constant_time_locl.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/constant_time_locl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/constant_time_locl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,216 @@ +/* crypto/constant_time_locl.h */ +/* + * Utilities for constant-time cryptography. + * + * Author: Emilia Kasper (emilia@openssl.org) + * Based on previous work by Bodo Moeller, Emilia Kasper, Adam Langley + * (Google). + * ==================================================================== + * Copyright (c) 2014 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#ifndef HEADER_CONSTANT_TIME_LOCL_H +#define HEADER_CONSTANT_TIME_LOCL_H + +#include "e_os.h" /* For 'inline' */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The boolean methods return a bitmask of all ones (0xff...f) for true + * and 0 for false. This is useful for choosing a value based on the result + * of a conditional in constant time. For example, + * + * if (a < b) { + * c = a; + * } else { + * c = b; + * } + * + * can be written as + * + * unsigned int lt = constant_time_lt(a, b); + * c = constant_time_select(lt, a, b); + */ + +/* + * Returns the given value with the MSB copied to all the other + * bits. Uses the fact that arithmetic shift shifts-in the sign bit. + * However, this is not ensured by the C standard so you may need to + * replace this with something else on odd CPUs. + */ +static inline unsigned int constant_time_msb(unsigned int a); + +/* + * Returns 0xff..f if a < b and 0 otherwise. + */ +static inline unsigned int constant_time_lt(unsigned int a, unsigned int b); +/* Convenience method for getting an 8-bit mask. */ +static inline unsigned char constant_time_lt_8(unsigned int a, unsigned int b); + +/* + * Returns 0xff..f if a >= b and 0 otherwise. + */ +static inline unsigned int constant_time_ge(unsigned int a, unsigned int b); +/* Convenience method for getting an 8-bit mask. */ +static inline unsigned char constant_time_ge_8(unsigned int a, unsigned int b); + +/* + * Returns 0xff..f if a == 0 and 0 otherwise. + */ +static inline unsigned int constant_time_is_zero(unsigned int a); +/* Convenience method for getting an 8-bit mask. */ +static inline unsigned char constant_time_is_zero_8(unsigned int a); + + +/* + * Returns 0xff..f if a == b and 0 otherwise. + */ +static inline unsigned int constant_time_eq(unsigned int a, unsigned int b); +/* Convenience method for getting an 8-bit mask. */ +static inline unsigned char constant_time_eq_8(unsigned int a, unsigned int b); +/* Signed integers. */ +static inline unsigned int constant_time_eq_int(int a, int b); +/* Convenience method for getting an 8-bit mask. */ +static inline unsigned char constant_time_eq_int_8(int a, int b); + + +/* + * Returns (mask & a) | (~mask & b). + * + * When |mask| is all 1s or all 0s (as returned by the methods above), + * the select methods return either |a| (if |mask| is nonzero) or |b| + * (if |mask| is zero). + */ +static inline unsigned int constant_time_select(unsigned int mask, + unsigned int a, unsigned int b); +/* Convenience method for unsigned chars. */ +static inline unsigned char constant_time_select_8(unsigned char mask, + unsigned char a, unsigned char b); +/* Convenience method for signed integers. */ +static inline int constant_time_select_int(unsigned int mask, int a, int b); + +static inline unsigned int constant_time_msb(unsigned int a) + { + return (unsigned int)((int)(a) >> (sizeof(int) * 8 - 1)); + } + +static inline unsigned int constant_time_lt(unsigned int a, unsigned int b) + { + unsigned int lt; + /* Case 1: msb(a) == msb(b). a < b iff the MSB of a - b is set.*/ + lt = ~(a ^ b) & (a - b); + /* Case 2: msb(a) != msb(b). a < b iff the MSB of b is set. */ + lt |= ~a & b; + return constant_time_msb(lt); + } + +static inline unsigned char constant_time_lt_8(unsigned int a, unsigned int b) + { + return (unsigned char)(constant_time_lt(a, b)); + } + +static inline unsigned int constant_time_ge(unsigned int a, unsigned int b) + { + unsigned int ge; + /* Case 1: msb(a) == msb(b). a >= b iff the MSB of a - b is not set.*/ + ge = ~((a ^ b) | (a - b)); + /* Case 2: msb(a) != msb(b). a >= b iff the MSB of a is set. */ + ge |= a & ~b; + return constant_time_msb(ge); + } + +static inline unsigned char constant_time_ge_8(unsigned int a, unsigned int b) + { + return (unsigned char)(constant_time_ge(a, b)); + } + +static inline unsigned int constant_time_is_zero(unsigned int a) + { + return constant_time_msb(~a & (a - 1)); + } + +static inline unsigned char constant_time_is_zero_8(unsigned int a) + { + return (unsigned char)(constant_time_is_zero(a)); + } + +static inline unsigned int constant_time_eq(unsigned int a, unsigned int b) + { + return constant_time_is_zero(a ^ b); + } + +static inline unsigned char constant_time_eq_8(unsigned int a, unsigned int b) + { + return (unsigned char)(constant_time_eq(a, b)); + } + +static inline unsigned int constant_time_eq_int(int a, int b) + { + return constant_time_eq((unsigned)(a), (unsigned)(b)); + } + +static inline unsigned char constant_time_eq_int_8(int a, int b) + { + return constant_time_eq_8((unsigned)(a), (unsigned)(b)); + } + +static inline unsigned int constant_time_select(unsigned int mask, + unsigned int a, unsigned int b) + { + return (mask & a) | (~mask & b); + } + +static inline unsigned char constant_time_select_8(unsigned char mask, + unsigned char a, unsigned char b) + { + return (unsigned char)(constant_time_select(mask, a, b)); + } + +inline int constant_time_select_int(unsigned int mask, int a, int b) + { + return (int)(constant_time_select(mask, (unsigned)(a), (unsigned)(b))); + } + +#ifdef __cplusplus +} +#endif + +#endif /* HEADER_CONSTANT_TIME_LOCL_H */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/constant_time_test.c nodejs-0.11.15/deps/openssl/openssl/crypto/constant_time_test.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/constant_time_test.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/constant_time_test.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,330 @@ +/* crypto/constant_time_test.c */ +/* + * Utilities for constant-time cryptography. + * + * Author: Emilia Kasper (emilia@openssl.org) + * Based on previous work by Bodo Moeller, Emilia Kasper, Adam Langley + * (Google). + * ==================================================================== + * Copyright (c) 2014 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include "../crypto/constant_time_locl.h" + +#include <limits.h> +#include <stdio.h> +#include <stdlib.h> + +static const unsigned int CONSTTIME_TRUE = (unsigned)(~0); +static const unsigned int CONSTTIME_FALSE = 0; +static const unsigned char CONSTTIME_TRUE_8 = 0xff; +static const unsigned char CONSTTIME_FALSE_8 = 0; + +static int test_binary_op(unsigned int (*op)(unsigned int a, unsigned int b), + const char* op_name, unsigned int a, unsigned int b, int is_true) + { + unsigned c = op(a, b); + if (is_true && c != CONSTTIME_TRUE) + { + fprintf(stderr, "Test failed for %s(%du, %du): expected %du " + "(TRUE), got %du\n", op_name, a, b, CONSTTIME_TRUE, c); + return 1; + } + else if (!is_true && c != CONSTTIME_FALSE) + { + fprintf(stderr, "Test failed for %s(%du, %du): expected %du " + "(FALSE), got %du\n", op_name, a, b, CONSTTIME_FALSE, + c); + return 1; + } + return 0; + } + +static int test_binary_op_8(unsigned char (*op)(unsigned int a, unsigned int b), + const char* op_name, unsigned int a, unsigned int b, int is_true) + { + unsigned char c = op(a, b); + if (is_true && c != CONSTTIME_TRUE_8) + { + fprintf(stderr, "Test failed for %s(%du, %du): expected %u " + "(TRUE), got %u\n", op_name, a, b, CONSTTIME_TRUE_8, c); + return 1; + } + else if (!is_true && c != CONSTTIME_FALSE_8) + { + fprintf(stderr, "Test failed for %s(%du, %du): expected %u " + "(FALSE), got %u\n", op_name, a, b, CONSTTIME_FALSE_8, + c); + return 1; + } + return 0; + } + +static int test_is_zero(unsigned int a) + { + unsigned int c = constant_time_is_zero(a); + if (a == 0 && c != CONSTTIME_TRUE) + { + fprintf(stderr, "Test failed for constant_time_is_zero(%du): " + "expected %du (TRUE), got %du\n", a, CONSTTIME_TRUE, c); + return 1; + } + else if (a != 0 && c != CONSTTIME_FALSE) + { + fprintf(stderr, "Test failed for constant_time_is_zero(%du): " + "expected %du (FALSE), got %du\n", a, CONSTTIME_FALSE, + c); + return 1; + } + return 0; + } + +static int test_is_zero_8(unsigned int a) + { + unsigned char c = constant_time_is_zero_8(a); + if (a == 0 && c != CONSTTIME_TRUE_8) + { + fprintf(stderr, "Test failed for constant_time_is_zero(%du): " + "expected %u (TRUE), got %u\n", a, CONSTTIME_TRUE_8, c); + return 1; + } + else if (a != 0 && c != CONSTTIME_FALSE) + { + fprintf(stderr, "Test failed for constant_time_is_zero(%du): " + "expected %u (FALSE), got %u\n", a, CONSTTIME_FALSE_8, + c); + return 1; + } + return 0; + } + +static int test_select(unsigned int a, unsigned int b) + { + unsigned int selected = constant_time_select(CONSTTIME_TRUE, a, b); + if (selected != a) + { + fprintf(stderr, "Test failed for constant_time_select(%du, %du," + "%du): expected %du(first value), got %du\n", + CONSTTIME_TRUE, a, b, a, selected); + return 1; + } + selected = constant_time_select(CONSTTIME_FALSE, a, b); + if (selected != b) + { + fprintf(stderr, "Test failed for constant_time_select(%du, %du," + "%du): expected %du(second value), got %du\n", + CONSTTIME_FALSE, a, b, b, selected); + return 1; + } + return 0; + } + +static int test_select_8(unsigned char a, unsigned char b) + { + unsigned char selected = constant_time_select_8(CONSTTIME_TRUE_8, a, b); + if (selected != a) + { + fprintf(stderr, "Test failed for constant_time_select(%u, %u," + "%u): expected %u(first value), got %u\n", + CONSTTIME_TRUE, a, b, a, selected); + return 1; + } + selected = constant_time_select_8(CONSTTIME_FALSE_8, a, b); + if (selected != b) + { + fprintf(stderr, "Test failed for constant_time_select(%u, %u," + "%u): expected %u(second value), got %u\n", + CONSTTIME_FALSE, a, b, b, selected); + return 1; + } + return 0; + } + +static int test_select_int(int a, int b) + { + int selected = constant_time_select_int(CONSTTIME_TRUE, a, b); + if (selected != a) + { + fprintf(stderr, "Test failed for constant_time_select(%du, %d," + "%d): expected %d(first value), got %d\n", + CONSTTIME_TRUE, a, b, a, selected); + return 1; + } + selected = constant_time_select_int(CONSTTIME_FALSE, a, b); + if (selected != b) + { + fprintf(stderr, "Test failed for constant_time_select(%du, %d," + "%d): expected %d(second value), got %d\n", + CONSTTIME_FALSE, a, b, b, selected); + return 1; + } + return 0; + } + +static int test_eq_int(int a, int b) + { + unsigned int equal = constant_time_eq_int(a, b); + if (a == b && equal != CONSTTIME_TRUE) + { + fprintf(stderr, "Test failed for constant_time_eq_int(%d, %d): " + "expected %du(TRUE), got %du\n", + a, b, CONSTTIME_TRUE, equal); + return 1; + } + else if (a != b && equal != CONSTTIME_FALSE) + { + fprintf(stderr, "Test failed for constant_time_eq_int(%d, %d): " + "expected %du(FALSE), got %du\n", + a, b, CONSTTIME_FALSE, equal); + return 1; + } + return 0; + } + +static int test_eq_int_8(int a, int b) + { + unsigned char equal = constant_time_eq_int_8(a, b); + if (a == b && equal != CONSTTIME_TRUE_8) + { + fprintf(stderr, "Test failed for constant_time_eq_int_8(%d, %d): " + "expected %u(TRUE), got %u\n", + a, b, CONSTTIME_TRUE_8, equal); + return 1; + } + else if (a != b && equal != CONSTTIME_FALSE_8) + { + fprintf(stderr, "Test failed for constant_time_eq_int_8(%d, %d): " + "expected %u(FALSE), got %u\n", + a, b, CONSTTIME_FALSE_8, equal); + return 1; + } + return 0; + } + +static unsigned int test_values[] = {0, 1, 1024, 12345, 32000, UINT_MAX/2-1, + UINT_MAX/2, UINT_MAX/2+1, UINT_MAX-1, + UINT_MAX}; + +static unsigned char test_values_8[] = {0, 1, 2, 20, 32, 127, 128, 129, 255}; + +static int signed_test_values[] = {0, 1, -1, 1024, -1024, 12345, -12345, + 32000, -32000, INT_MAX, INT_MIN, INT_MAX-1, + INT_MIN+1}; + + +int main(int argc, char *argv[]) + { + unsigned int a, b, i, j; + int c, d; + unsigned char e, f; + int num_failed = 0, num_all = 0; + fprintf(stdout, "Testing constant time operations...\n"); + + for (i = 0; i < sizeof(test_values)/sizeof(int); ++i) + { + a = test_values[i]; + num_failed += test_is_zero(a); + num_failed += test_is_zero_8(a); + num_all += 2; + for (j = 0; j < sizeof(test_values)/sizeof(int); ++j) + { + b = test_values[j]; + num_failed += test_binary_op(&constant_time_lt, + "constant_time_lt", a, b, a < b); + num_failed += test_binary_op_8(&constant_time_lt_8, + "constant_time_lt_8", a, b, a < b); + num_failed += test_binary_op(&constant_time_lt, + "constant_time_lt_8", b, a, b < a); + num_failed += test_binary_op_8(&constant_time_lt_8, + "constant_time_lt_8", b, a, b < a); + num_failed += test_binary_op(&constant_time_ge, + "constant_time_ge", a, b, a >= b); + num_failed += test_binary_op_8(&constant_time_ge_8, + "constant_time_ge_8", a, b, a >= b); + num_failed += test_binary_op(&constant_time_ge, + "constant_time_ge", b, a, b >= a); + num_failed += test_binary_op_8(&constant_time_ge_8, + "constant_time_ge_8", b, a, b >= a); + num_failed += test_binary_op(&constant_time_eq, + "constant_time_eq", a, b, a == b); + num_failed += test_binary_op_8(&constant_time_eq_8, + "constant_time_eq_8", a, b, a == b); + num_failed += test_binary_op(&constant_time_eq, + "constant_time_eq", b, a, b == a); + num_failed += test_binary_op_8(&constant_time_eq_8, + "constant_time_eq_8", b, a, b == a); + num_failed += test_select(a, b); + num_all += 13; + } + } + + for (i = 0; i < sizeof(signed_test_values)/sizeof(int); ++i) + { + c = signed_test_values[i]; + for (j = 0; j < sizeof(signed_test_values)/sizeof(int); ++j) + { + d = signed_test_values[j]; + num_failed += test_select_int(c, d); + num_failed += test_eq_int(c, d); + num_failed += test_eq_int_8(c, d); + num_all += 3; + } + } + + for (i = 0; i < sizeof(test_values_8); ++i) + { + e = test_values_8[i]; + for (j = 0; j < sizeof(test_values_8); ++j) + { + f = test_values_8[j]; + num_failed += test_select_8(e, f); + num_all += 1; + } + } + + if (!num_failed) + { + fprintf(stdout, "ok (ran %d tests)\n", num_all); + return EXIT_SUCCESS; + } + else + { + fprintf(stdout, "%d of %d tests failed!\n", num_failed, num_all); + return EXIT_FAILURE; + } + } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/crypto-lib.com nodejs-0.11.15/deps/openssl/openssl/crypto/crypto-lib.com --- nodejs-0.11.13/deps/openssl/openssl/crypto/crypto-lib.com 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/crypto-lib.com 2015-01-20 21:22:17.000000000 +0000 @@ -111,8 +111,8 @@ $ IF ARCH .EQS. "VAX" THEN ET_WHIRLPOOL = "" $ ENCRYPT_TYPES = "Basic,"+ - "OBJECTS,"+ - - "MD2,MD4,MD5,SHA,MDC2,HMAC,RIPEMD,"+ET_WHIRLPOOL+","+ - - "DES,AES,RC2,RC4,RC5,IDEA,BF,CAST,CAMELLIA,SEED,MODES,"+ - + "MD4,MD5,SHA,MDC2,HMAC,RIPEMD,"+ET_WHIRLPOOL+","+ - + "DES,AES,RC2,RC4,IDEA,BF,CAST,CAMELLIA,SEED,MODES,"+ - "BN,EC,RSA,DSA,ECDSA,DH,ECDH,DSO,ENGINE,"+ - "BUFFER,BIO,STACK,LHASH,RAND,ERR,"+ - "EVP,EVP_2,EVP_3,ASN1,ASN1_2,PEM,X509,X509V3,"+ - @@ -204,11 +204,18 @@ $! $! Define The Different Encryption "library" Strings. $! -$ APPS_DES = "DES/DES,CBC3_ENC" -$ APPS_PKCS7 = "ENC/ENC;DEC/DEC;SIGN/SIGN;VERIFY/VERIFY,EXAMPLE" +$!!! Test apps disabled, as they aren't supported at all, +$!!! not even in the unix build +$!!! APPS_DES = "DES/DES,CBC3_ENC" +$!!! APPS_PKCS7 = "ENC/ENC;DEC/DEC;SIGN/SIGN;VERIFY/VERIFY,EXAMPLE" $ -$ LIB_ = "cryptlib,mem,mem_clr,mem_dbg,cversion,ex_data,cpt_err,"+ - - "ebcdic,uid,o_time,o_str,o_dir,o_fips.c,o_init,fips_ers" +$! These variables are ordered as the SDIRS variable from the top Makefile.org +$! The contents of these variables are copied from the LIBOBJ variable in the +$! corresponding Makefile from each corresponding subdirectory, with .o stripped +$! and spaces replaced with commas. +$ LIB_ = "cryptlib,mem,mem_dbg,cversion,ex_data,cpt_err,ebcdic,"+ - + "uid,o_time,o_str,o_dir,o_fips.c,o_init,fips_ers,mem_clr" +$ LIB_OBJECTS = "o_names,obj_dat,obj_lib,obj_err,obj_xref" $ LIB_MD2 = "md2_dgst,md2_one" $ LIB_MD4 = "md4_dgst,md4_one" $ LIB_MD5 = "md5_dgst,md5_one" @@ -225,13 +232,13 @@ "fcrypt,xcbc_enc,rpc_enc,cbc_cksm,"+ - "ede_cbcm_enc,des_old,des_old2,read2pwd" $ LIB_RC2 = "rc2_ecb,rc2_skey,rc2_cbc,rc2cfb64,rc2ofb64" -$ LIB_RC4 = "rc4_skey,rc4_enc,rc4_utl" +$ LIB_RC4 = "rc4_enc,rc4_skey,rc4_utl" $ LIB_RC5 = "rc5_skey,rc5_ecb,rc5_enc,rc5cfb64,rc5ofb64" $ LIB_IDEA = "i_cbc,i_cfb64,i_ofb64,i_ecb,i_skey" $ LIB_BF = "bf_skey,bf_ecb,bf_enc,bf_cfb64,bf_ofb64" $ LIB_CAST = "c_skey,c_ecb,c_enc,c_cfb64,c_ofb64" -$ LIB_CAMELLIA = "camellia,cmll_misc,cmll_ecb,cmll_cbc,cmll_ofb,"+ - - "cmll_cfb,cmll_ctr,cmll_utl" +$ LIB_CAMELLIA = "cmll_ecb,cmll_ofb,cmll_cfb,cmll_ctr,cmll_utl,"+ - + "camellia,cmll_misc,cmll_cbc" $ LIB_SEED = "seed,seed_ecb,seed_cbc,seed_cfb,seed_ofb" $ LIB_MODES = "cbc128,ctr128,cts128,cfb128,ofb128,gcm128,"+ - "ccm128,xts128" @@ -264,24 +271,23 @@ "eng_table,eng_pkey,eng_fat,eng_all,"+ - "tb_rsa,tb_dsa,tb_ecdsa,tb_dh,tb_ecdh,tb_rand,tb_store,"+ - "tb_cipher,tb_digest,tb_pkmeth,tb_asnmth,"+ - - "eng_openssl,eng_dyn,eng_cnf,eng_cryptodev,"+ - + "eng_openssl,eng_cnf,eng_dyn,eng_cryptodev,"+ - "eng_rsax,eng_rdrand" -$ LIB_AES = "aes_core,aes_misc,aes_ecb,aes_cbc,aes_cfb,aes_ofb,aes_ctr,"+ - - "aes_ige,aes_wrap" +$ LIB_AES = "aes_misc,aes_ecb,aes_cfb,aes_ofb,aes_ctr,aes_ige,aes_wrap,"+ - + "aes_core,aes_cbc" $ LIB_BUFFER = "buffer,buf_str,buf_err" $ LIB_BIO = "bio_lib,bio_cb,bio_err,"+ - "bss_mem,bss_null,bss_fd,"+ - "bss_file,bss_sock,bss_conn,"+ - "bf_null,bf_buff,b_print,b_dump,"+ - - "b_sock,bss_acpt,bf_nbio,bss_rtcp,bss_bio,bss_log,"+ - + "b_sock,bss_acpt,bf_nbio,bss_log,bss_bio,"+ - "bss_dgram,"+ - - "bf_lbuf" + "bf_lbuf,bss_rtcp" ! The last two are VMS specific $ LIB_STACK = "stack" $ LIB_LHASH = "lhash,lh_stats" $ LIB_RAND = "md_rand,randfile,rand_lib,rand_err,rand_egd,"+ - - "rand_vms" + "rand_vms" ! The last one is VMS specific $ LIB_ERR = "err,err_all,err_prn" -$ LIB_OBJECTS = "o_names,obj_dat,obj_lib,obj_err,obj_xref" $ LIB_EVP = "encode,digest,evp_enc,evp_key,evp_acnf,evp_cnf,"+ - "e_des,e_bf,e_idea,e_des3,e_camellia,"+ - "e_rc4,e_aes,names,e_seed,"+ - @@ -345,7 +351,7 @@ $ LIB_JPAKE = "jpake,jpake_err" $ LIB_SRP = "srp_lib,srp_vfy" $ LIB_STORE = "str_err,str_lib,str_meth,str_mem" -$ LIB_CMAC = "cmac,cm_ameth.c,cm_pmeth" +$ LIB_CMAC = "cmac,cm_ameth,cm_pmeth" $! $! Setup exceptional compilations $! @@ -381,7 +387,7 @@ $! $! Extract The Module Name From The Encryption List. $! -$ MODULE_NAME = F$ELEMENT(MODULE_COUNTER,",",ENCRYPT_TYPES) +$ MODULE_NAME = F$EDIT(F$ELEMENT(MODULE_COUNTER,",",ENCRYPT_TYPES),"COLLAPSE") $ IF MODULE_NAME.EQS."Basic" THEN MODULE_NAME = "" $ MODULE_NAME1 = MODULE_NAME $! @@ -465,7 +471,7 @@ $! $! O.K, Extract The File Name From The File List. $! -$ FILE_NAME = F$ELEMENT(FILE_COUNTER,",",'LIB_MODULE') +$ FILE_NAME = F$EDIT(F$ELEMENT(FILE_COUNTER,",",'LIB_MODULE'),"COLLAPSE") $! $! else $! @@ -492,7 +498,7 @@ $! $ IF APPLICATION .NES. ";" $ THEN -$ FILE_NAME = F$ELEMENT(FILE_COUNTER,",",APPLICATION_OBJECTS) +$ FILE_NAME = F$EDIT(F$ELEMENT(FILE_COUNTER,",",APPLICATION_OBJECTS),"COLLAPSE") $ IF FILE_NAME .EQS. "," $ THEN $ APPLICATION = "" @@ -1132,9 +1138,12 @@ $ IF F$TYPE(USER_CCDEFS) .NES. "" THEN CCDEFS = CCDEFS + "," + USER_CCDEFS $ CCEXTRAFLAGS = "" $ IF F$TYPE(USER_CCFLAGS) .NES. "" THEN CCEXTRAFLAGS = USER_CCFLAGS -$ CCDISABLEWARNINGS = "" !!! "LONGLONGTYPE,LONGLONGSUFX,FOUNDCR" -$ IF F$TYPE(USER_CCDISABLEWARNINGS) .NES. "" THEN - - CCDISABLEWARNINGS = CCDISABLEWARNINGS + "," + USER_CCDISABLEWARNINGS +$ CCDISABLEWARNINGS = "" !!! "MAYLOSEDATA3" !!! "LONGLONGTYPE,LONGLONGSUFX,FOUNDCR" +$ IF F$TYPE(USER_CCDISABLEWARNINGS) .NES. "" +$ THEN +$ IF CCDISABLEWARNINGS .NES. "" THEN CCDISABLEWARNINGS = CCDISABLEWARNINGS + "," +$ CCDISABLEWARNINGS = CCDISABLEWARNINGS + USER_CCDISABLEWARNINGS +$ ENDIF $! $! Check To See If We Have A ZLIB Option. $! @@ -1299,6 +1308,18 @@ $! $ IF COMPILER .EQS. "DECC" $ THEN +$! Not all compiler versions support MAYLOSEDATA3. +$ OPT_TEST = "MAYLOSEDATA3" +$ DEFINE /USER_MODE SYS$ERROR NL: +$ DEFINE /USER_MODE SYS$OUTPUT NL: +$ 'CC' /NOCROSS_REFERENCE /NOLIST /NOOBJECT - + /WARNINGS = DISABLE = ('OPT_TEST', EMPTYFILE) NL: +$ IF ($SEVERITY) +$ THEN +$ IF CCDISABLEWARNINGS .NES. "" THEN - + CCDISABLEWARNINGS = CCDISABLEWARNINGS+ "," +$ CCDISABLEWARNINGS = CCDISABLEWARNINGS+ OPT_TEST +$ ENDIF $ IF CCDISABLEWARNINGS .EQS. "" $ THEN $ CC4DISABLEWARNINGS = "DOLLARID" diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/des/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/des/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/des/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/des/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,279 +0,0 @@ -# -# OpenSSL/crypto/des/Makefile -# - -DIR= des -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES=-I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r -RANLIB= ranlib -DES_ENC= des_enc.o fcrypt_b.o - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -TEST=destest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= cbc_cksm.c cbc_enc.c cfb64enc.c cfb_enc.c \ - ecb3_enc.c ecb_enc.c enc_read.c enc_writ.c \ - fcrypt.c ofb64enc.c ofb_enc.c pcbc_enc.c \ - qud_cksm.c rand_key.c rpc_enc.c set_key.c \ - des_enc.c fcrypt_b.c \ - xcbc_enc.c \ - str2key.c cfb64ede.c ofb64ede.c ede_cbcm_enc.c des_old.c des_old2.c \ - read2pwd.c - -LIBOBJ= set_key.o ecb_enc.o cbc_enc.o \ - ecb3_enc.o cfb64enc.o cfb64ede.o cfb_enc.o ofb64ede.o \ - enc_read.o enc_writ.o ofb64enc.o \ - ofb_enc.o str2key.o pcbc_enc.o qud_cksm.o rand_key.o \ - ${DES_ENC} \ - fcrypt.o xcbc_enc.o rpc_enc.o cbc_cksm.o \ - ede_cbcm_enc.o des_old.o des_old2.o read2pwd.o - -SRC= $(LIBSRC) - -EXHEADER= des.h des_old.h -HEADER= des_locl.h rpc_des.h spr.h des_ver.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -des: des.o cbc3_enc.o lib - $(CC) $(CFLAGS) -o des des.o cbc3_enc.o $(LIB) - -des_enc-sparc.S: asm/des_enc.m4 - m4 -B 8192 asm/des_enc.m4 > des_enc-sparc.S - -des-586.s: asm/des-586.pl ../perlasm/x86asm.pl ../perlasm/cbc.pl - $(PERL) asm/des-586.pl $(PERLASM_SCHEME) $(CFLAGS) > $@ -crypt586.s: asm/crypt586.pl ../perlasm/x86asm.pl ../perlasm/cbc.pl - $(PERL) asm/crypt586.pl $(PERLASM_SCHEME) $(CFLAGS) > $@ - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -# We need to use force because 'install' matches 'INSTALL' on case -# insensitive systems -FRC.install: -install: FRC.install - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj des lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -cbc_cksm.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -cbc_cksm.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -cbc_cksm.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -cbc_cksm.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cbc_cksm.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -cbc_cksm.o: cbc_cksm.c des_locl.h -cbc_enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -cbc_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -cbc_enc.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -cbc_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cbc_enc.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -cbc_enc.o: cbc_enc.c des_locl.h ncbc_enc.c -cfb64ede.o: ../../e_os.h ../../include/openssl/des.h -cfb64ede.o: ../../include/openssl/des_old.h ../../include/openssl/e_os2.h -cfb64ede.o: ../../include/openssl/opensslconf.h -cfb64ede.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -cfb64ede.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cfb64ede.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -cfb64ede.o: cfb64ede.c des_locl.h -cfb64enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -cfb64enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -cfb64enc.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -cfb64enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -cfb64enc.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -cfb64enc.o: cfb64enc.c des_locl.h -cfb_enc.o: ../../e_os.h ../../include/openssl/des.h -cfb_enc.o: ../../include/openssl/des_old.h ../../include/openssl/e_os2.h -cfb_enc.o: ../../include/openssl/opensslconf.h ../../include/openssl/ossl_typ.h -cfb_enc.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -cfb_enc.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -cfb_enc.o: ../../include/openssl/ui_compat.h cfb_enc.c des_locl.h -des_enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -des_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -des_enc.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -des_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -des_enc.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -des_enc.o: des_enc.c des_locl.h ncbc_enc.c spr.h -des_old.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -des_old.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -des_old.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -des_old.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -des_old.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -des_old.o: ../../include/openssl/ui_compat.h des_old.c -des_old2.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -des_old2.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -des_old2.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -des_old2.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -des_old2.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -des_old2.o: ../../include/openssl/ui_compat.h des_old2.c -ecb3_enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -ecb3_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -ecb3_enc.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -ecb3_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ecb3_enc.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -ecb3_enc.o: des_locl.h ecb3_enc.c -ecb_enc.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -ecb_enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -ecb_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -ecb_enc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ecb_enc.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ecb_enc.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -ecb_enc.o: ../../include/openssl/ui_compat.h des_locl.h des_ver.h ecb_enc.c -ede_cbcm_enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -ede_cbcm_enc.o: ../../include/openssl/e_os2.h -ede_cbcm_enc.o: ../../include/openssl/opensslconf.h -ede_cbcm_enc.o: ../../include/openssl/ossl_typ.h -ede_cbcm_enc.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ede_cbcm_enc.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -ede_cbcm_enc.o: ../../include/openssl/ui_compat.h des_locl.h ede_cbcm_enc.c -enc_read.o: ../../e_os.h ../../include/openssl/bio.h -enc_read.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -enc_read.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -enc_read.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -enc_read.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -enc_read.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -enc_read.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -enc_read.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -enc_read.o: ../../include/openssl/ui_compat.h ../cryptlib.h des_locl.h -enc_read.o: enc_read.c -enc_writ.o: ../../e_os.h ../../include/openssl/bio.h -enc_writ.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -enc_writ.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -enc_writ.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -enc_writ.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -enc_writ.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -enc_writ.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -enc_writ.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -enc_writ.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -enc_writ.o: ../cryptlib.h des_locl.h enc_writ.c -fcrypt.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -fcrypt.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -fcrypt.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -fcrypt.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -fcrypt.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -fcrypt.o: des_locl.h fcrypt.c -fcrypt_b.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -fcrypt_b.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -fcrypt_b.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -fcrypt_b.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -fcrypt_b.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -fcrypt_b.o: des_locl.h fcrypt_b.c -ofb64ede.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -ofb64ede.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -ofb64ede.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -ofb64ede.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ofb64ede.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -ofb64ede.o: des_locl.h ofb64ede.c -ofb64enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -ofb64enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -ofb64enc.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -ofb64enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ofb64enc.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -ofb64enc.o: des_locl.h ofb64enc.c -ofb_enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -ofb_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -ofb_enc.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -ofb_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ofb_enc.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -ofb_enc.o: des_locl.h ofb_enc.c -pcbc_enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -pcbc_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -pcbc_enc.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -pcbc_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -pcbc_enc.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -pcbc_enc.o: des_locl.h pcbc_enc.c -qud_cksm.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -qud_cksm.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -qud_cksm.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -qud_cksm.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -qud_cksm.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -qud_cksm.o: des_locl.h qud_cksm.c -rand_key.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -rand_key.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -rand_key.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -rand_key.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rand_key.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -rand_key.o: ../../include/openssl/ui_compat.h rand_key.c -read2pwd.o: ../../include/openssl/crypto.h ../../include/openssl/des.h -read2pwd.o: ../../include/openssl/des_old.h ../../include/openssl/e_os2.h -read2pwd.o: ../../include/openssl/opensslconf.h -read2pwd.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -read2pwd.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -read2pwd.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -read2pwd.o: ../../include/openssl/ui_compat.h read2pwd.c -rpc_enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -rpc_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -rpc_enc.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -rpc_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rpc_enc.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -rpc_enc.o: des_locl.h des_ver.h rpc_des.h rpc_enc.c -set_key.o: ../../include/openssl/crypto.h ../../include/openssl/des.h -set_key.o: ../../include/openssl/des_old.h ../../include/openssl/e_os2.h -set_key.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -set_key.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -set_key.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -set_key.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -set_key.o: des_locl.h set_key.c -str2key.o: ../../include/openssl/crypto.h ../../include/openssl/des.h -str2key.o: ../../include/openssl/des_old.h ../../include/openssl/e_os2.h -str2key.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -str2key.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -str2key.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -str2key.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -str2key.o: des_locl.h str2key.c -xcbc_enc.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -xcbc_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -xcbc_enc.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -xcbc_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -xcbc_enc.o: ../../include/openssl/ui.h ../../include/openssl/ui_compat.h -xcbc_enc.o: des_locl.h xcbc_enc.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/dh/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/dh/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/dh/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/dh/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,180 +0,0 @@ -# -# OpenSSL/crypto/dh/Makefile -# - -DIR= dh -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= dhtest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= dh_asn1.c dh_gen.c dh_key.c dh_lib.c dh_check.c dh_err.c dh_depr.c \ - dh_ameth.c dh_pmeth.c dh_prn.c -LIBOBJ= dh_asn1.o dh_gen.o dh_key.o dh_lib.o dh_check.o dh_err.o dh_depr.o \ - dh_ameth.o dh_pmeth.o dh_prn.o - -SRC= $(LIBSRC) - -EXHEADER= dh.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -dh_ameth.o: ../../e_os.h ../../include/openssl/asn1.h -dh_ameth.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -dh_ameth.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dh_ameth.o: ../../include/openssl/dh.h ../../include/openssl/e_os2.h -dh_ameth.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -dh_ameth.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -dh_ameth.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -dh_ameth.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -dh_ameth.o: ../../include/openssl/opensslconf.h -dh_ameth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dh_ameth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -dh_ameth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -dh_ameth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -dh_ameth.o: ../../include/openssl/x509_vfy.h ../asn1/asn1_locl.h ../cryptlib.h -dh_ameth.o: dh_ameth.c -dh_asn1.o: ../../e_os.h ../../include/openssl/asn1.h -dh_asn1.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -dh_asn1.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -dh_asn1.o: ../../include/openssl/crypto.h ../../include/openssl/dh.h -dh_asn1.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -dh_asn1.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -dh_asn1.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -dh_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dh_asn1.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -dh_asn1.o: ../../include/openssl/symhacks.h ../cryptlib.h dh_asn1.c -dh_check.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -dh_check.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dh_check.o: ../../include/openssl/dh.h ../../include/openssl/e_os2.h -dh_check.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dh_check.o: ../../include/openssl/opensslconf.h -dh_check.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dh_check.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -dh_check.o: ../../include/openssl/symhacks.h ../cryptlib.h dh_check.c -dh_depr.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -dh_depr.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dh_depr.o: ../../include/openssl/dh.h ../../include/openssl/e_os2.h -dh_depr.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dh_depr.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dh_depr.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -dh_depr.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dh_depr.o: ../cryptlib.h dh_depr.c -dh_err.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -dh_err.o: ../../include/openssl/dh.h ../../include/openssl/e_os2.h -dh_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dh_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dh_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -dh_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dh_err.o: dh_err.c -dh_gen.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -dh_gen.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dh_gen.o: ../../include/openssl/dh.h ../../include/openssl/e_os2.h -dh_gen.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dh_gen.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dh_gen.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -dh_gen.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dh_gen.o: ../cryptlib.h dh_gen.c -dh_key.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -dh_key.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dh_key.o: ../../include/openssl/dh.h ../../include/openssl/e_os2.h -dh_key.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dh_key.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dh_key.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -dh_key.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -dh_key.o: ../../include/openssl/symhacks.h ../cryptlib.h dh_key.c -dh_lib.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -dh_lib.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -dh_lib.o: ../../include/openssl/crypto.h ../../include/openssl/dh.h -dh_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -dh_lib.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -dh_lib.o: ../../include/openssl/engine.h ../../include/openssl/err.h -dh_lib.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -dh_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -dh_lib.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dh_lib.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -dh_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -dh_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dh_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -dh_lib.o: ../cryptlib.h dh_lib.c -dh_pmeth.o: ../../e_os.h ../../include/openssl/asn1.h -dh_pmeth.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -dh_pmeth.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -dh_pmeth.o: ../../include/openssl/crypto.h ../../include/openssl/dh.h -dh_pmeth.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -dh_pmeth.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -dh_pmeth.o: ../../include/openssl/err.h ../../include/openssl/evp.h -dh_pmeth.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -dh_pmeth.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -dh_pmeth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dh_pmeth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -dh_pmeth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -dh_pmeth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -dh_pmeth.o: ../../include/openssl/x509_vfy.h ../cryptlib.h ../evp/evp_locl.h -dh_pmeth.o: dh_pmeth.c -dh_prn.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -dh_prn.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dh_prn.o: ../../include/openssl/dh.h ../../include/openssl/e_os2.h -dh_prn.o: ../../include/openssl/err.h ../../include/openssl/evp.h -dh_prn.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -dh_prn.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -dh_prn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dh_prn.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -dh_prn.o: ../../include/openssl/symhacks.h ../cryptlib.h dh_prn.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/dsa/dsa_ameth.c nodejs-0.11.15/deps/openssl/openssl/crypto/dsa/dsa_ameth.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/dsa/dsa_ameth.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/dsa/dsa_ameth.c 2015-01-20 21:22:17.000000000 +0000 @@ -307,6 +307,12 @@ unsigned char *dp = NULL; int dplen; + if (!pkey->pkey.dsa || !pkey->pkey.dsa->priv_key) + { + DSAerr(DSA_F_DSA_PRIV_ENCODE,DSA_R_MISSING_PARAMETERS); + goto err; + } + params = ASN1_STRING_new(); if (!params) @@ -701,4 +707,3 @@ old_dsa_priv_encode } }; - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/dsa/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/dsa/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/dsa/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/dsa/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,209 +0,0 @@ -# -# OpenSSL/crypto/dsa/Makefile -# - -DIR= dsa -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=dsatest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= dsa_gen.c dsa_key.c dsa_lib.c dsa_asn1.c dsa_vrf.c dsa_sign.c \ - dsa_err.c dsa_ossl.c dsa_depr.c dsa_ameth.c dsa_pmeth.c dsa_prn.c -LIBOBJ= dsa_gen.o dsa_key.o dsa_lib.o dsa_asn1.o dsa_vrf.o dsa_sign.o \ - dsa_err.o dsa_ossl.o dsa_depr.o dsa_ameth.o dsa_pmeth.o dsa_prn.o - -SRC= $(LIBSRC) - -EXHEADER= dsa.h -HEADER= dsa_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -dsa_ameth.o: ../../e_os.h ../../include/openssl/asn1.h -dsa_ameth.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -dsa_ameth.o: ../../include/openssl/buffer.h ../../include/openssl/cms.h -dsa_ameth.o: ../../include/openssl/crypto.h ../../include/openssl/dsa.h -dsa_ameth.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -dsa_ameth.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -dsa_ameth.o: ../../include/openssl/err.h ../../include/openssl/evp.h -dsa_ameth.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -dsa_ameth.o: ../../include/openssl/objects.h -dsa_ameth.o: ../../include/openssl/opensslconf.h -dsa_ameth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dsa_ameth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -dsa_ameth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -dsa_ameth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -dsa_ameth.o: ../../include/openssl/x509_vfy.h ../asn1/asn1_locl.h ../cryptlib.h -dsa_ameth.o: dsa_ameth.c -dsa_asn1.o: ../../e_os.h ../../include/openssl/asn1.h -dsa_asn1.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -dsa_asn1.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dsa_asn1.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -dsa_asn1.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dsa_asn1.o: ../../include/openssl/opensslconf.h -dsa_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dsa_asn1.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -dsa_asn1.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dsa_asn1.o: ../cryptlib.h dsa_asn1.c -dsa_depr.o: ../../e_os.h ../../include/openssl/asn1.h -dsa_depr.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -dsa_depr.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dsa_depr.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -dsa_depr.o: ../../include/openssl/err.h ../../include/openssl/evp.h -dsa_depr.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -dsa_depr.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -dsa_depr.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dsa_depr.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -dsa_depr.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -dsa_depr.o: ../../include/openssl/symhacks.h ../cryptlib.h dsa_depr.c -dsa_err.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -dsa_err.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -dsa_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dsa_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dsa_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -dsa_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dsa_err.o: dsa_err.c -dsa_gen.o: ../../e_os.h ../../include/openssl/asn1.h -dsa_gen.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -dsa_gen.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dsa_gen.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -dsa_gen.o: ../../include/openssl/err.h ../../include/openssl/evp.h -dsa_gen.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -dsa_gen.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -dsa_gen.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dsa_gen.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -dsa_gen.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -dsa_gen.o: ../../include/openssl/symhacks.h ../cryptlib.h dsa_gen.c dsa_locl.h -dsa_key.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -dsa_key.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dsa_key.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -dsa_key.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dsa_key.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dsa_key.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -dsa_key.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -dsa_key.o: ../../include/openssl/symhacks.h ../cryptlib.h dsa_key.c -dsa_lib.o: ../../e_os.h ../../include/openssl/asn1.h -dsa_lib.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -dsa_lib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dsa_lib.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -dsa_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -dsa_lib.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -dsa_lib.o: ../../include/openssl/engine.h ../../include/openssl/err.h -dsa_lib.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -dsa_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -dsa_lib.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dsa_lib.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -dsa_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -dsa_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dsa_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -dsa_lib.o: ../cryptlib.h dsa_lib.c -dsa_ossl.o: ../../e_os.h ../../include/openssl/asn1.h -dsa_ossl.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -dsa_ossl.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dsa_ossl.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -dsa_ossl.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dsa_ossl.o: ../../include/openssl/opensslconf.h -dsa_ossl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dsa_ossl.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -dsa_ossl.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -dsa_ossl.o: ../../include/openssl/symhacks.h ../cryptlib.h dsa_ossl.c -dsa_pmeth.o: ../../e_os.h ../../include/openssl/asn1.h -dsa_pmeth.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -dsa_pmeth.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -dsa_pmeth.o: ../../include/openssl/crypto.h ../../include/openssl/dsa.h -dsa_pmeth.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -dsa_pmeth.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -dsa_pmeth.o: ../../include/openssl/err.h ../../include/openssl/evp.h -dsa_pmeth.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -dsa_pmeth.o: ../../include/openssl/objects.h -dsa_pmeth.o: ../../include/openssl/opensslconf.h -dsa_pmeth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dsa_pmeth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -dsa_pmeth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -dsa_pmeth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -dsa_pmeth.o: ../../include/openssl/x509_vfy.h ../cryptlib.h ../evp/evp_locl.h -dsa_pmeth.o: dsa_locl.h dsa_pmeth.c -dsa_prn.o: ../../e_os.h ../../include/openssl/asn1.h -dsa_prn.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -dsa_prn.o: ../../include/openssl/crypto.h ../../include/openssl/dsa.h -dsa_prn.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -dsa_prn.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -dsa_prn.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -dsa_prn.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dsa_prn.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -dsa_prn.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dsa_prn.o: ../cryptlib.h dsa_prn.c -dsa_sign.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -dsa_sign.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dsa_sign.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -dsa_sign.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dsa_sign.o: ../../include/openssl/opensslconf.h -dsa_sign.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dsa_sign.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -dsa_sign.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dsa_sign.o: ../cryptlib.h dsa_sign.c -dsa_vrf.o: ../../e_os.h ../../include/openssl/bio.h -dsa_vrf.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dsa_vrf.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -dsa_vrf.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dsa_vrf.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dsa_vrf.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -dsa_vrf.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dsa_vrf.o: ../cryptlib.h dsa_vrf.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/dso/dso_dlfcn.c nodejs-0.11.15/deps/openssl/openssl/crypto/dso/dso_dlfcn.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/dso/dso_dlfcn.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/dso/dso_dlfcn.c 2015-01-20 21:22:17.000000000 +0000 @@ -464,7 +464,7 @@ return len; } - ERR_add_error_data(4, "dlfcn_pathbyaddr(): ", dlerror()); + ERR_add_error_data(2, "dlfcn_pathbyaddr(): ", dlerror()); #endif return -1; } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/dso/dso_vms.c nodejs-0.11.15/deps/openssl/openssl/crypto/dso/dso_vms.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/dso/dso_vms.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/dso/dso_vms.c 2015-01-20 21:22:17.000000000 +0000 @@ -61,7 +61,14 @@ #include <errno.h> #include "cryptlib.h" #include <openssl/dso.h> -#ifdef OPENSSL_SYS_VMS + +#ifndef OPENSSL_SYS_VMS +DSO_METHOD *DSO_METHOD_vms(void) + { + return NULL; + } +#else + #pragma message disable DOLLARID #include <rms.h> #include <lib$routines.h> @@ -69,7 +76,6 @@ #include <descrip.h> #include <starlet.h> #include "vms_rms.h" -#endif /* Some compiler options may mask the declaration of "_malloc32". */ #if __INITIAL_POINTER_SIZE && defined _ANSI_C_SOURCE @@ -82,12 +88,6 @@ #endif /* __INITIAL_POINTER_SIZE && defined _ANSI_C_SOURCE */ -#ifndef OPENSSL_SYS_VMS -DSO_METHOD *DSO_METHOD_vms(void) - { - return NULL; - } -#else #pragma message disable DOLLARID static int vms_load(DSO *dso); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/dso/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/dso/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/dso/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/dso/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,150 +0,0 @@ -# -# OpenSSL/crypto/dso/Makefile -# - -DIR= dso -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= dso_dl.c dso_dlfcn.c dso_err.c dso_lib.c dso_null.c \ - dso_openssl.c dso_win32.c dso_vms.c dso_beos.c -LIBOBJ= dso_dl.o dso_dlfcn.o dso_err.o dso_lib.o dso_null.o \ - dso_openssl.o dso_win32.o dso_vms.o dso_beos.o - -SRC= $(LIBSRC) - -EXHEADER= dso.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -dso_beos.o: ../../e_os.h ../../include/openssl/bio.h -dso_beos.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dso_beos.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -dso_beos.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dso_beos.o: ../../include/openssl/opensslconf.h -dso_beos.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dso_beos.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -dso_beos.o: ../../include/openssl/symhacks.h ../cryptlib.h dso_beos.c -dso_dl.o: ../../e_os.h ../../include/openssl/bio.h -dso_dl.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dso_dl.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -dso_dl.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dso_dl.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dso_dl.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -dso_dl.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dso_dl.o: ../cryptlib.h dso_dl.c -dso_dlfcn.o: ../../e_os.h ../../include/openssl/bio.h -dso_dlfcn.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dso_dlfcn.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -dso_dlfcn.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dso_dlfcn.o: ../../include/openssl/opensslconf.h -dso_dlfcn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dso_dlfcn.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -dso_dlfcn.o: ../../include/openssl/symhacks.h ../cryptlib.h dso_dlfcn.c -dso_err.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -dso_err.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -dso_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dso_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dso_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -dso_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dso_err.o: dso_err.c -dso_lib.o: ../../e_os.h ../../include/openssl/bio.h -dso_lib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dso_lib.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -dso_lib.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dso_lib.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dso_lib.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -dso_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dso_lib.o: ../cryptlib.h dso_lib.c -dso_null.o: ../../e_os.h ../../include/openssl/bio.h -dso_null.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dso_null.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -dso_null.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dso_null.o: ../../include/openssl/opensslconf.h -dso_null.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dso_null.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -dso_null.o: ../../include/openssl/symhacks.h ../cryptlib.h dso_null.c -dso_openssl.o: ../../e_os.h ../../include/openssl/bio.h -dso_openssl.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dso_openssl.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -dso_openssl.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dso_openssl.o: ../../include/openssl/opensslconf.h -dso_openssl.o: ../../include/openssl/opensslv.h -dso_openssl.o: ../../include/openssl/ossl_typ.h -dso_openssl.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -dso_openssl.o: ../../include/openssl/symhacks.h ../cryptlib.h dso_openssl.c -dso_vms.o: ../../e_os.h ../../include/openssl/bio.h -dso_vms.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dso_vms.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -dso_vms.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dso_vms.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -dso_vms.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -dso_vms.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -dso_vms.o: ../cryptlib.h dso_vms.c -dso_win32.o: ../../e_os.h ../../include/openssl/bio.h -dso_win32.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -dso_win32.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -dso_win32.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -dso_win32.o: ../../include/openssl/opensslconf.h -dso_win32.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -dso_win32.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -dso_win32.o: ../../include/openssl/symhacks.h ../cryptlib.h dso_win32.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ebcdic.h nodejs-0.11.15/deps/openssl/openssl/crypto/ebcdic.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/ebcdic.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ebcdic.h 2015-01-20 21:22:17.000000000 +0000 @@ -5,6 +5,10 @@ #include <sys/types.h> +#ifdef __cplusplus +extern "C" { +#endif + /* Avoid name clashes with other applications */ #define os_toascii _openssl_os_toascii #define os_toebcdic _openssl_os_toebcdic @@ -16,4 +20,7 @@ void *ebcdic2ascii(void *dest, const void *srce, size_t count); void *ascii2ebcdic(void *dest, const void *srce, size_t count); +#ifdef __cplusplus +} +#endif #endif diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec2_smpl.c nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec2_smpl.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec2_smpl.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec2_smpl.c 2015-01-20 21:22:17.000000000 +0000 @@ -80,9 +80,6 @@ const EC_METHOD *EC_GF2m_simple_method(void) { -#ifdef OPENSSL_FIPS - return fips_ec_gf2m_simple_method(); -#else static const EC_METHOD ret = { EC_FLAGS_DEFAULT_OCT, NID_X9_62_characteristic_two_field, @@ -125,8 +122,12 @@ 0 /* field_decode */, 0 /* field_set_to_one */ }; - return &ret; +#ifdef OPENSSL_FIPS + if (FIPS_mode()) + return fips_ec_gf2m_simple_method(); #endif + + return &ret; } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec_ameth.c nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec_ameth.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec_ameth.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec_ameth.c 2015-01-20 21:22:17.000000000 +0000 @@ -352,6 +352,7 @@ EC_KEY_set_enc_flags(ec_key, old_flags); OPENSSL_free(ep); ECerr(EC_F_ECKEY_PRIV_ENCODE, ERR_R_EC_LIB); + return 0; } /* restore old encoding flags */ EC_KEY_set_enc_flags(ec_key, old_flags); @@ -452,14 +453,16 @@ if (ktype > 0) { public_key = EC_KEY_get0_public_key(x); - if ((pub_key = EC_POINT_point2bn(group, public_key, - EC_KEY_get_conv_form(x), NULL, ctx)) == NULL) + if (public_key != NULL) { - reason = ERR_R_EC_LIB; - goto err; - } - if (pub_key) + if ((pub_key = EC_POINT_point2bn(group, public_key, + EC_KEY_get_conv_form(x), NULL, ctx)) == NULL) + { + reason = ERR_R_EC_LIB; + goto err; + } buf_len = (size_t)BN_num_bytes(pub_key); + } } if (ktype == 2) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec_asn1.c nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec_asn1.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec_asn1.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec_asn1.c 2015-01-20 21:22:17.000000000 +0000 @@ -1183,30 +1183,47 @@ goto err; } + if (ret->pub_key) + EC_POINT_clear_free(ret->pub_key); + ret->pub_key = EC_POINT_new(ret->group); + if (ret->pub_key == NULL) + { + ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_EC_LIB); + goto err; + } + if (priv_key->publicKey) { const unsigned char *pub_oct; - size_t pub_oct_len; + int pub_oct_len; - if (ret->pub_key) - EC_POINT_clear_free(ret->pub_key); - ret->pub_key = EC_POINT_new(ret->group); - if (ret->pub_key == NULL) + pub_oct = M_ASN1_STRING_data(priv_key->publicKey); + pub_oct_len = M_ASN1_STRING_length(priv_key->publicKey); + /* The first byte - point conversion form - must be present. */ + if (pub_oct_len <= 0) { - ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_EC_LIB); + ECerr(EC_F_D2I_ECPRIVATEKEY, EC_R_BUFFER_TOO_SMALL); goto err; } - pub_oct = M_ASN1_STRING_data(priv_key->publicKey); - pub_oct_len = M_ASN1_STRING_length(priv_key->publicKey); - /* save the point conversion form */ + /* Save the point conversion form. */ ret->conv_form = (point_conversion_form_t)(pub_oct[0] & ~0x01); if (!EC_POINT_oct2point(ret->group, ret->pub_key, - pub_oct, pub_oct_len, NULL)) + pub_oct, (size_t)(pub_oct_len), NULL)) { ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_EC_LIB); goto err; } } + else + { + if (!EC_POINT_mul(ret->group, ret->pub_key, ret->priv_key, NULL, NULL, NULL)) + { + ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_EC_LIB); + goto err; + } + /* Remember the original private-key-only encoding. */ + ret->enc_flag |= EC_PKEY_NO_PUBKEY; + } ok = 1; err: @@ -1230,7 +1247,8 @@ size_t buf_len=0, tmp_len; EC_PRIVATEKEY *priv_key=NULL; - if (a == NULL || a->group == NULL || a->priv_key == NULL) + if (a == NULL || a->group == NULL || a->priv_key == NULL || + (!(a->enc_flag & EC_PKEY_NO_PUBKEY) && a->pub_key == NULL)) { ECerr(EC_F_I2D_ECPRIVATEKEY, ERR_R_PASSED_NULL_PARAMETER); @@ -1435,8 +1453,11 @@ *out, buf_len, NULL)) { ECerr(EC_F_I2O_ECPUBLICKEY, ERR_R_EC_LIB); - OPENSSL_free(*out); - *out = NULL; + if (new_buffer) + { + OPENSSL_free(*out); + *out = NULL; + } return 0; } if (!new_buffer) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec.h nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec.h 2015-01-20 21:22:17.000000000 +0000 @@ -629,7 +629,7 @@ int EC_POINT_make_affine(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx); int EC_POINTs_make_affine(const EC_GROUP *group, size_t num, EC_POINT *points[], BN_CTX *ctx); -/** Computes r = generator * n sum_{i=0}^num p[i] * m[i] +/** Computes r = generator * n sum_{i=0}^{num-1} p[i] * m[i] * \param group underlying EC_GROUP object * \param r EC_POINT object for the result * \param n BIGNUM with the multiplier for the group generator (optional) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec_lcl.h nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec_lcl.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec_lcl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec_lcl.h 2015-01-20 21:22:17.000000000 +0000 @@ -404,7 +404,7 @@ int ec_GF2m_precompute_mult(EC_GROUP *group, BN_CTX *ctx); int ec_GF2m_have_precompute_mult(const EC_GROUP *group); -#ifndef OPENSSL_EC_NISTP_64_GCC_128 +#ifndef OPENSSL_NO_EC_NISTP_64_GCC_128 /* method functions in ecp_nistp224.c */ int ec_GFp_nistp224_group_init(EC_GROUP *group); int ec_GFp_nistp224_group_set_curve(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, const BIGNUM *n, BN_CTX *); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec_lib.c nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec_lib.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ec_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ec_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -942,7 +942,7 @@ int EC_POINT_invert(const EC_GROUP *group, EC_POINT *a, BN_CTX *ctx) { - if (group->meth->dbl == 0) + if (group->meth->invert == 0) { ECerr(EC_F_EC_POINT_INVERT, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ecp_mont.c nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ecp_mont.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ecp_mont.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ecp_mont.c 2015-01-20 21:22:17.000000000 +0000 @@ -72,9 +72,6 @@ const EC_METHOD *EC_GFp_mont_method(void) { -#ifdef OPENSSL_FIPS - return fips_ec_gfp_mont_method(); -#else static const EC_METHOD ret = { EC_FLAGS_DEFAULT_OCT, NID_X9_62_prime_field, @@ -114,8 +111,12 @@ ec_GFp_mont_field_decode, ec_GFp_mont_field_set_to_one }; - return &ret; +#ifdef OPENSSL_FIPS + if (FIPS_mode()) + return fips_ec_gfp_mont_method(); #endif + + return &ret; } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ecp_nist.c nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ecp_nist.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ecp_nist.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ecp_nist.c 2015-01-20 21:22:17.000000000 +0000 @@ -73,9 +73,6 @@ const EC_METHOD *EC_GFp_nist_method(void) { -#ifdef OPENSSL_FIPS - return fips_ec_gfp_nist_method(); -#else static const EC_METHOD ret = { EC_FLAGS_DEFAULT_OCT, NID_X9_62_prime_field, @@ -115,8 +112,12 @@ 0 /* field_decode */, 0 /* field_set_to_one */ }; - return &ret; +#ifdef OPENSSL_FIPS + if (FIPS_mode()) + return fips_ec_gfp_nist_method(); #endif + + return &ret; } int ec_GFp_nist_group_copy(EC_GROUP *dest, const EC_GROUP *src) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ecp_smpl.c nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ecp_smpl.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ecp_smpl.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ecp_smpl.c 2015-01-20 21:22:17.000000000 +0000 @@ -73,9 +73,6 @@ const EC_METHOD *EC_GFp_simple_method(void) { -#ifdef OPENSSL_FIPS - return fips_ec_gfp_simple_method(); -#else static const EC_METHOD ret = { EC_FLAGS_DEFAULT_OCT, NID_X9_62_prime_field, @@ -115,8 +112,12 @@ 0 /* field_decode */, 0 /* field_set_to_one */ }; - return &ret; +#ifdef OPENSSL_FIPS + if (FIPS_mode()) + return fips_ec_gfp_simple_method(); #endif + + return &ret; } @@ -1181,9 +1182,8 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num, EC_POINT *points[], BN_CTX *ctx) { BN_CTX *new_ctx = NULL; - BIGNUM *tmp0, *tmp1; - size_t pow2 = 0; - BIGNUM **heap = NULL; + BIGNUM *tmp, *tmp_Z; + BIGNUM **prod_Z = NULL; size_t i; int ret = 0; @@ -1198,124 +1198,104 @@ } BN_CTX_start(ctx); - tmp0 = BN_CTX_get(ctx); - tmp1 = BN_CTX_get(ctx); - if (tmp0 == NULL || tmp1 == NULL) goto err; - - /* Before converting the individual points, compute inverses of all Z values. - * Modular inversion is rather slow, but luckily we can do with a single - * explicit inversion, plus about 3 multiplications per input value. - */ + tmp = BN_CTX_get(ctx); + tmp_Z = BN_CTX_get(ctx); + if (tmp == NULL || tmp_Z == NULL) goto err; - pow2 = 1; - while (num > pow2) - pow2 <<= 1; - /* Now pow2 is the smallest power of 2 satifsying pow2 >= num. - * We need twice that. */ - pow2 <<= 1; - - heap = OPENSSL_malloc(pow2 * sizeof heap[0]); - if (heap == NULL) goto err; - - /* The array is used as a binary tree, exactly as in heapsort: - * - * heap[1] - * heap[2] heap[3] - * heap[4] heap[5] heap[6] heap[7] - * heap[8]heap[9] heap[10]heap[11] heap[12]heap[13] heap[14] heap[15] - * - * We put the Z's in the last line; - * then we set each other node to the product of its two child-nodes (where - * empty or 0 entries are treated as ones); - * then we invert heap[1]; - * then we invert each other node by replacing it by the product of its - * parent (after inversion) and its sibling (before inversion). - */ - heap[0] = NULL; - for (i = pow2/2 - 1; i > 0; i--) - heap[i] = NULL; + prod_Z = OPENSSL_malloc(num * sizeof prod_Z[0]); + if (prod_Z == NULL) goto err; for (i = 0; i < num; i++) - heap[pow2/2 + i] = &points[i]->Z; - for (i = pow2/2 + num; i < pow2; i++) - heap[i] = NULL; - - /* set each node to the product of its children */ - for (i = pow2/2 - 1; i > 0; i--) { - heap[i] = BN_new(); - if (heap[i] == NULL) goto err; - - if (heap[2*i] != NULL) + prod_Z[i] = BN_new(); + if (prod_Z[i] == NULL) goto err; + } + + /* Set each prod_Z[i] to the product of points[0]->Z .. points[i]->Z, + * skipping any zero-valued inputs (pretend that they're 1). */ + + if (!BN_is_zero(&points[0]->Z)) + { + if (!BN_copy(prod_Z[0], &points[0]->Z)) goto err; + } + else + { + if (group->meth->field_set_to_one != 0) { - if ((heap[2*i + 1] == NULL) || BN_is_zero(heap[2*i + 1])) - { - if (!BN_copy(heap[i], heap[2*i])) goto err; - } - else - { - if (BN_is_zero(heap[2*i])) - { - if (!BN_copy(heap[i], heap[2*i + 1])) goto err; - } - else - { - if (!group->meth->field_mul(group, heap[i], - heap[2*i], heap[2*i + 1], ctx)) goto err; - } - } + if (!group->meth->field_set_to_one(group, prod_Z[0], ctx)) goto err; + } + else + { + if (!BN_one(prod_Z[0])) goto err; } } - /* invert heap[1] */ - if (!BN_is_zero(heap[1])) + for (i = 1; i < num; i++) { - if (!BN_mod_inverse(heap[1], heap[1], &group->field, ctx)) + if (!BN_is_zero(&points[i]->Z)) { - ECerr(EC_F_EC_GFP_SIMPLE_POINTS_MAKE_AFFINE, ERR_R_BN_LIB); - goto err; + if (!group->meth->field_mul(group, prod_Z[i], prod_Z[i - 1], &points[i]->Z, ctx)) goto err; + } + else + { + if (!BN_copy(prod_Z[i], prod_Z[i - 1])) goto err; } } + + /* Now use a single explicit inversion to replace every + * non-zero points[i]->Z by its inverse. */ + + if (!BN_mod_inverse(tmp, prod_Z[num - 1], &group->field, ctx)) + { + ECerr(EC_F_EC_GFP_SIMPLE_POINTS_MAKE_AFFINE, ERR_R_BN_LIB); + goto err; + } if (group->meth->field_encode != 0) { - /* in the Montgomery case, we just turned R*H (representing H) + /* In the Montgomery case, we just turned R*H (representing H) * into 1/(R*H), but we need R*(1/H) (representing 1/H); - * i.e. we have need to multiply by the Montgomery factor twice */ - if (!group->meth->field_encode(group, heap[1], heap[1], ctx)) goto err; - if (!group->meth->field_encode(group, heap[1], heap[1], ctx)) goto err; + * i.e. we need to multiply by the Montgomery factor twice. */ + if (!group->meth->field_encode(group, tmp, tmp, ctx)) goto err; + if (!group->meth->field_encode(group, tmp, tmp, ctx)) goto err; } - /* set other heap[i]'s to their inverses */ - for (i = 2; i < pow2/2 + num; i += 2) + for (i = num - 1; i > 0; --i) { - /* i is even */ - if ((heap[i + 1] != NULL) && !BN_is_zero(heap[i + 1])) - { - if (!group->meth->field_mul(group, tmp0, heap[i/2], heap[i + 1], ctx)) goto err; - if (!group->meth->field_mul(group, tmp1, heap[i/2], heap[i], ctx)) goto err; - if (!BN_copy(heap[i], tmp0)) goto err; - if (!BN_copy(heap[i + 1], tmp1)) goto err; - } - else + /* Loop invariant: tmp is the product of the inverses of + * points[0]->Z .. points[i]->Z (zero-valued inputs skipped). */ + if (!BN_is_zero(&points[i]->Z)) { - if (!BN_copy(heap[i], heap[i/2])) goto err; + /* Set tmp_Z to the inverse of points[i]->Z (as product + * of Z inverses 0 .. i, Z values 0 .. i - 1). */ + if (!group->meth->field_mul(group, tmp_Z, prod_Z[i - 1], tmp, ctx)) goto err; + /* Update tmp to satisfy the loop invariant for i - 1. */ + if (!group->meth->field_mul(group, tmp, tmp, &points[i]->Z, ctx)) goto err; + /* Replace points[i]->Z by its inverse. */ + if (!BN_copy(&points[i]->Z, tmp_Z)) goto err; } } - /* we have replaced all non-zero Z's by their inverses, now fix up all the points */ + if (!BN_is_zero(&points[0]->Z)) + { + /* Replace points[0]->Z by its inverse. */ + if (!BN_copy(&points[0]->Z, tmp)) goto err; + } + + /* Finally, fix up the X and Y coordinates for all points. */ + for (i = 0; i < num; i++) { EC_POINT *p = points[i]; - + if (!BN_is_zero(&p->Z)) { /* turn (X, Y, 1/Z) into (X/Z^2, Y/Z^3, 1) */ - if (!group->meth->field_sqr(group, tmp1, &p->Z, ctx)) goto err; - if (!group->meth->field_mul(group, &p->X, &p->X, tmp1, ctx)) goto err; + if (!group->meth->field_sqr(group, tmp, &p->Z, ctx)) goto err; + if (!group->meth->field_mul(group, &p->X, &p->X, tmp, ctx)) goto err; + + if (!group->meth->field_mul(group, tmp, tmp, &p->Z, ctx)) goto err; + if (!group->meth->field_mul(group, &p->Y, &p->Y, tmp, ctx)) goto err; - if (!group->meth->field_mul(group, tmp1, tmp1, &p->Z, ctx)) goto err; - if (!group->meth->field_mul(group, &p->Y, &p->Y, tmp1, ctx)) goto err; - if (group->meth->field_set_to_one != 0) { if (!group->meth->field_set_to_one(group, &p->Z, ctx)) goto err; @@ -1329,20 +1309,19 @@ } ret = 1; - + err: BN_CTX_end(ctx); if (new_ctx != NULL) BN_CTX_free(new_ctx); - if (heap != NULL) + if (prod_Z != NULL) { - /* heap[pow2/2] .. heap[pow2-1] have not been allocated locally! */ - for (i = pow2/2 - 1; i > 0; i--) + for (i = 0; i < num; i++) { - if (heap[i] != NULL) - BN_clear_free(heap[i]); + if (prod_Z[i] == NULL) break; + BN_clear_free(prod_Z[i]); } - OPENSSL_free(heap); + OPENSSL_free(prod_Z); } return ret; } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ectest.c nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ectest.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/ectest.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/ectest.c 2015-01-20 21:22:17.000000000 +0000 @@ -199,6 +199,7 @@ EC_POINT *P = EC_POINT_new(group); EC_POINT *Q = EC_POINT_new(group); BN_CTX *ctx = BN_CTX_new(); + int i; n1 = BN_new(); n2 = BN_new(); order = BN_new(); fprintf(stdout, "verify group order ..."); @@ -212,21 +213,56 @@ if (!EC_POINT_mul(group, Q, order, NULL, NULL, ctx)) ABORT; if (!EC_POINT_is_at_infinity(group, Q)) ABORT; fprintf(stdout, " ok\n"); - fprintf(stdout, "long/negative scalar tests ... "); - if (!BN_one(n1)) ABORT; - /* n1 = 1 - order */ - if (!BN_sub(n1, n1, order)) ABORT; - if(!EC_POINT_mul(group, Q, NULL, P, n1, ctx)) ABORT; - if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; - /* n2 = 1 + order */ - if (!BN_add(n2, order, BN_value_one())) ABORT; - if(!EC_POINT_mul(group, Q, NULL, P, n2, ctx)) ABORT; - if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; - /* n2 = (1 - order) * (1 + order) */ - if (!BN_mul(n2, n1, n2, ctx)) ABORT; - if(!EC_POINT_mul(group, Q, NULL, P, n2, ctx)) ABORT; - if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; + fprintf(stdout, "long/negative scalar tests "); + for (i = 1; i <= 2; i++) + { + const BIGNUM *scalars[6]; + const EC_POINT *points[6]; + + fprintf(stdout, i == 1 ? + "allowing precomputation ... " : + "without precomputation ... "); + if (!BN_set_word(n1, i)) ABORT; + /* If i == 1, P will be the predefined generator for which + * EC_GROUP_precompute_mult has set up precomputation. */ + if (!EC_POINT_mul(group, P, n1, NULL, NULL, ctx)) ABORT; + + if (!BN_one(n1)) ABORT; + /* n1 = 1 - order */ + if (!BN_sub(n1, n1, order)) ABORT; + if (!EC_POINT_mul(group, Q, NULL, P, n1, ctx)) ABORT; + if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; + + /* n2 = 1 + order */ + if (!BN_add(n2, order, BN_value_one())) ABORT; + if (!EC_POINT_mul(group, Q, NULL, P, n2, ctx)) ABORT; + if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; + + /* n2 = (1 - order) * (1 + order) = 1 - order^2 */ + if (!BN_mul(n2, n1, n2, ctx)) ABORT; + if (!EC_POINT_mul(group, Q, NULL, P, n2, ctx)) ABORT; + if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; + + /* n2 = order^2 - 1 */ + BN_set_negative(n2, 0); + if (!EC_POINT_mul(group, Q, NULL, P, n2, ctx)) ABORT; + /* Add P to verify the result. */ + if (!EC_POINT_add(group, Q, Q, P, ctx)) ABORT; + if (!EC_POINT_is_at_infinity(group, Q)) ABORT; + + /* Exercise EC_POINTs_mul, including corner cases. */ + if (EC_POINT_is_at_infinity(group, P)) ABORT; + scalars[0] = n1; points[0] = Q; /* => infinity */ + scalars[1] = n2; points[1] = P; /* => -P */ + scalars[2] = n1; points[2] = Q; /* => infinity */ + scalars[3] = n2; points[3] = Q; /* => infinity */ + scalars[4] = n1; points[4] = P; /* => P */ + scalars[5] = n2; points[5] = Q; /* => infinity */ + if (!EC_POINTs_mul(group, P, NULL, 6, points, scalars, ctx)) ABORT; + if (!EC_POINT_is_at_infinity(group, P)) ABORT; + } fprintf(stdout, "ok\n"); + EC_POINT_free(P); EC_POINT_free(Q); BN_free(n1); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ec/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/ec/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/ec/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ec/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,263 +0,0 @@ -# -# crypto/ec/Makefile -# - -DIR= ec -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=ectest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= ec_lib.c ecp_smpl.c ecp_mont.c ecp_nist.c ec_cvt.c ec_mult.c\ - ec_err.c ec_curve.c ec_check.c ec_print.c ec_asn1.c ec_key.c\ - ec2_smpl.c ec2_mult.c ec_ameth.c ec_pmeth.c eck_prn.c \ - ecp_nistp224.c ecp_nistp256.c ecp_nistp521.c ecp_nistputil.c \ - ecp_oct.c ec2_oct.c ec_oct.c - -LIBOBJ= ec_lib.o ecp_smpl.o ecp_mont.o ecp_nist.o ec_cvt.o ec_mult.o\ - ec_err.o ec_curve.o ec_check.o ec_print.o ec_asn1.o ec_key.o\ - ec2_smpl.o ec2_mult.o ec_ameth.o ec_pmeth.o eck_prn.o \ - ecp_nistp224.o ecp_nistp256.o ecp_nistp521.o ecp_nistputil.o \ - ecp_oct.o ec2_oct.o ec_oct.o - -SRC= $(LIBSRC) - -EXHEADER= ec.h -HEADER= ec_lcl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -ec2_mult.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec2_mult.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec2_mult.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec2_mult.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ec2_mult.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec2_mult.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec2_mult.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec2_mult.o: ../../include/openssl/symhacks.h ec2_mult.c ec_lcl.h -ec2_oct.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec2_oct.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec2_oct.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec2_oct.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ec2_oct.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec2_oct.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec2_oct.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec2_oct.o: ../../include/openssl/symhacks.h ec2_oct.c ec_lcl.h -ec2_smpl.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec2_smpl.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec2_smpl.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec2_smpl.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ec2_smpl.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec2_smpl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec2_smpl.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec2_smpl.o: ../../include/openssl/symhacks.h ec2_smpl.c ec_lcl.h -ec_ameth.o: ../../e_os.h ../../include/openssl/asn1.h -ec_ameth.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -ec_ameth.o: ../../include/openssl/buffer.h ../../include/openssl/cms.h -ec_ameth.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ec_ameth.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ec_ameth.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -ec_ameth.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ec_ameth.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ec_ameth.o: ../../include/openssl/opensslconf.h -ec_ameth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_ameth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -ec_ameth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ec_ameth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ec_ameth.o: ../../include/openssl/x509_vfy.h ../asn1/asn1_locl.h ../cryptlib.h -ec_ameth.o: ec_ameth.c -ec_asn1.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -ec_asn1.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -ec_asn1.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ec_asn1.o: ../../include/openssl/ec.h ../../include/openssl/err.h -ec_asn1.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ec_asn1.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -ec_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_asn1.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec_asn1.o: ../../include/openssl/symhacks.h ec_asn1.c ec_lcl.h -ec_check.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec_check.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec_check.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec_check.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ec_check.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec_check.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_check.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec_check.o: ../../include/openssl/symhacks.h ec_check.c ec_lcl.h -ec_curve.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec_curve.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec_curve.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec_curve.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ec_curve.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec_curve.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_curve.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec_curve.o: ../../include/openssl/symhacks.h ec_curve.c ec_lcl.h -ec_cvt.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec_cvt.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec_cvt.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec_cvt.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ec_cvt.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec_cvt.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_cvt.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec_cvt.o: ../../include/openssl/symhacks.h ec_cvt.c ec_lcl.h -ec_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ec_err.o: ../../include/openssl/ec.h ../../include/openssl/err.h -ec_err.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -ec_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_err.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec_err.o: ../../include/openssl/symhacks.h ec_err.c -ec_key.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec_key.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec_key.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec_key.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ec_key.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec_key.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_key.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec_key.o: ../../include/openssl/symhacks.h ec_key.c ec_lcl.h -ec_lib.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec_lib.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec_lib.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ec_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_lib.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec_lib.o: ../../include/openssl/symhacks.h ec_lcl.h ec_lib.c -ec_mult.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec_mult.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec_mult.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec_mult.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ec_mult.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec_mult.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_mult.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec_mult.o: ../../include/openssl/symhacks.h ec_lcl.h ec_mult.c -ec_oct.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec_oct.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec_oct.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec_oct.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ec_oct.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec_oct.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_oct.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec_oct.o: ../../include/openssl/symhacks.h ec_lcl.h ec_oct.c -ec_pmeth.o: ../../e_os.h ../../include/openssl/asn1.h -ec_pmeth.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -ec_pmeth.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -ec_pmeth.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec_pmeth.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ec_pmeth.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ec_pmeth.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ec_pmeth.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -ec_pmeth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_pmeth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -ec_pmeth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ec_pmeth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ec_pmeth.o: ../../include/openssl/x509_vfy.h ../cryptlib.h ../evp/evp_locl.h -ec_pmeth.o: ec_pmeth.c -ec_print.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ec_print.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ec_print.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ec_print.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ec_print.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ec_print.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ec_print.o: ../../include/openssl/symhacks.h ec_lcl.h ec_print.c -eck_prn.o: ../../e_os.h ../../include/openssl/asn1.h -eck_prn.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -eck_prn.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -eck_prn.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -eck_prn.o: ../../include/openssl/err.h ../../include/openssl/evp.h -eck_prn.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -eck_prn.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -eck_prn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -eck_prn.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -eck_prn.o: ../../include/openssl/symhacks.h ../cryptlib.h eck_prn.c -ecp_mont.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ecp_mont.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ecp_mont.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ecp_mont.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ecp_mont.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ecp_mont.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ecp_mont.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ecp_mont.o: ../../include/openssl/symhacks.h ec_lcl.h ecp_mont.c -ecp_nist.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ecp_nist.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ecp_nist.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ecp_nist.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ecp_nist.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ecp_nist.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ecp_nist.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ecp_nist.o: ../../include/openssl/symhacks.h ec_lcl.h ecp_nist.c -ecp_nistp224.o: ../../include/openssl/opensslconf.h ecp_nistp224.c -ecp_nistp256.o: ../../include/openssl/opensslconf.h ecp_nistp256.c -ecp_nistp521.o: ../../include/openssl/opensslconf.h ecp_nistp521.c -ecp_nistputil.o: ../../include/openssl/opensslconf.h ecp_nistputil.c -ecp_oct.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ecp_oct.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ecp_oct.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ecp_oct.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ecp_oct.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ecp_oct.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ecp_oct.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ecp_oct.o: ../../include/openssl/symhacks.h ec_lcl.h ecp_oct.c -ecp_smpl.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ecp_smpl.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ecp_smpl.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ecp_smpl.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ecp_smpl.o: ../../include/openssl/obj_mac.h ../../include/openssl/opensslconf.h -ecp_smpl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ecp_smpl.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ecp_smpl.o: ../../include/openssl/symhacks.h ec_lcl.h ecp_smpl.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ecdh/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/ecdh/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/ecdh/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ecdh/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,116 +0,0 @@ -# -# crypto/ecdh/Makefile -# - -DIR= ecdh -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -Wall -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=ecdhtest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= ech_lib.c ech_ossl.c ech_key.c ech_err.c - -LIBOBJ= ech_lib.o ech_ossl.o ech_key.o ech_err.o - -SRC= $(LIBSRC) - -EXHEADER= ecdh.h -HEADER= ech_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -ech_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ech_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ech_err.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ech_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ech_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -ech_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -ech_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ech_err.o: ech_err.c -ech_key.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ech_key.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ech_key.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ech_key.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -ech_key.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -ech_key.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ech_key.o: ech_key.c ech_locl.h -ech_lib.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ech_lib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -ech_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ech_lib.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ech_lib.o: ../../include/openssl/engine.h ../../include/openssl/err.h -ech_lib.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ech_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ech_lib.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -ech_lib.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -ech_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -ech_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ech_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ech_lib.o: ech_lib.c ech_locl.h -ech_ossl.o: ../../e_os.h ../../include/openssl/asn1.h -ech_ossl.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -ech_ossl.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -ech_ossl.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ech_ossl.o: ../../include/openssl/ecdh.h ../../include/openssl/err.h -ech_ossl.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ech_ossl.o: ../../include/openssl/opensslconf.h -ech_ossl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ech_ossl.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -ech_ossl.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ech_ossl.o: ../cryptlib.h ech_locl.h ech_ossl.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ecdsa/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/ecdsa/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/ecdsa/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ecdsa/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,140 +0,0 @@ -# -# crypto/ecdsa/Makefile -# - -DIR= ecdsa -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -Wall -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=ecdsatest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= ecs_lib.c ecs_asn1.c ecs_ossl.c ecs_sign.c ecs_vrf.c ecs_err.c - -LIBOBJ= ecs_lib.o ecs_asn1.o ecs_ossl.o ecs_sign.o ecs_vrf.o ecs_err.o - -SRC= $(LIBSRC) - -EXHEADER= ecdsa.h -HEADER= ecs_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -ecs_asn1.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -ecs_asn1.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -ecs_asn1.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ecs_asn1.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -ecs_asn1.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -ecs_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ecs_asn1.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ecs_asn1.o: ../../include/openssl/symhacks.h ecs_asn1.c ecs_locl.h -ecs_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ecs_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ecs_err.o: ../../include/openssl/ec.h ../../include/openssl/ecdsa.h -ecs_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -ecs_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -ecs_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -ecs_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ecs_err.o: ecs_err.c -ecs_lib.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ecs_lib.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -ecs_lib.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ecs_lib.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ecs_lib.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -ecs_lib.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ecs_lib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ecs_lib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -ecs_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ecs_lib.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -ecs_lib.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ecs_lib.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ecs_lib.o: ../../include/openssl/x509_vfy.h ecs_lib.c ecs_locl.h -ecs_ossl.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ecs_ossl.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -ecs_ossl.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ecs_ossl.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -ecs_ossl.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ecs_ossl.o: ../../include/openssl/opensslconf.h -ecs_ossl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ecs_ossl.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ecs_ossl.o: ../../include/openssl/symhacks.h ecs_locl.h ecs_ossl.c -ecs_sign.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ecs_sign.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -ecs_sign.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ecs_sign.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ecs_sign.o: ../../include/openssl/engine.h ../../include/openssl/evp.h -ecs_sign.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ecs_sign.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -ecs_sign.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ecs_sign.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -ecs_sign.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -ecs_sign.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ecs_sign.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ecs_sign.o: ecs_locl.h ecs_sign.c -ecs_vrf.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ecs_vrf.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -ecs_vrf.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ecs_vrf.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ecs_vrf.o: ../../include/openssl/engine.h ../../include/openssl/evp.h -ecs_vrf.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ecs_vrf.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -ecs_vrf.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ecs_vrf.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -ecs_vrf.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ecs_vrf.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ecs_vrf.o: ../../include/openssl/x509_vfy.h ecs_locl.h ecs_vrf.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/engine/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/engine/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/engine/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/engine/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,447 +0,0 @@ -# -# OpenSSL/crypto/engine/Makefile -# - -DIR= engine -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= enginetest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= eng_err.c eng_lib.c eng_list.c eng_init.c eng_ctrl.c \ - eng_table.c eng_pkey.c eng_fat.c eng_all.c \ - tb_rsa.c tb_dsa.c tb_ecdsa.c tb_dh.c tb_ecdh.c tb_rand.c tb_store.c \ - tb_cipher.c tb_digest.c tb_pkmeth.c tb_asnmth.c \ - eng_openssl.c eng_cnf.c eng_dyn.c eng_cryptodev.c \ - eng_rsax.c eng_rdrand.c -LIBOBJ= eng_err.o eng_lib.o eng_list.o eng_init.o eng_ctrl.o \ - eng_table.o eng_pkey.o eng_fat.o eng_all.o \ - tb_rsa.o tb_dsa.o tb_ecdsa.o tb_dh.o tb_ecdh.o tb_rand.o tb_store.o \ - tb_cipher.o tb_digest.o tb_pkmeth.o tb_asnmth.o \ - eng_openssl.o eng_cnf.o eng_dyn.o eng_cryptodev.o \ - eng_rsax.o eng_rdrand.o - -SRC= $(LIBSRC) - -EXHEADER= engine.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -eng_all.o: ../../e_os.h ../../include/openssl/asn1.h -eng_all.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_all.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -eng_all.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -eng_all.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -eng_all.o: ../../include/openssl/err.h ../../include/openssl/evp.h -eng_all.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -eng_all.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -eng_all.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -eng_all.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -eng_all.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -eng_all.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -eng_all.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_all.c eng_int.h -eng_cnf.o: ../../e_os.h ../../include/openssl/asn1.h -eng_cnf.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_cnf.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -eng_cnf.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -eng_cnf.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -eng_cnf.o: ../../include/openssl/engine.h ../../include/openssl/err.h -eng_cnf.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -eng_cnf.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -eng_cnf.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -eng_cnf.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -eng_cnf.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -eng_cnf.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -eng_cnf.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -eng_cnf.o: ../cryptlib.h eng_cnf.c eng_int.h -eng_cryptodev.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -eng_cryptodev.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -eng_cryptodev.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -eng_cryptodev.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -eng_cryptodev.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -eng_cryptodev.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -eng_cryptodev.o: ../../include/openssl/obj_mac.h -eng_cryptodev.o: ../../include/openssl/objects.h -eng_cryptodev.o: ../../include/openssl/opensslconf.h -eng_cryptodev.o: ../../include/openssl/opensslv.h -eng_cryptodev.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -eng_cryptodev.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -eng_cryptodev.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -eng_cryptodev.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -eng_cryptodev.o: eng_cryptodev.c -eng_ctrl.o: ../../e_os.h ../../include/openssl/asn1.h -eng_ctrl.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_ctrl.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -eng_ctrl.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -eng_ctrl.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -eng_ctrl.o: ../../include/openssl/err.h ../../include/openssl/evp.h -eng_ctrl.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -eng_ctrl.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -eng_ctrl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -eng_ctrl.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -eng_ctrl.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -eng_ctrl.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -eng_ctrl.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_ctrl.c eng_int.h -eng_dyn.o: ../../e_os.h ../../include/openssl/asn1.h -eng_dyn.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_dyn.o: ../../include/openssl/crypto.h ../../include/openssl/dso.h -eng_dyn.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -eng_dyn.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -eng_dyn.o: ../../include/openssl/engine.h ../../include/openssl/err.h -eng_dyn.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -eng_dyn.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -eng_dyn.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -eng_dyn.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -eng_dyn.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -eng_dyn.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -eng_dyn.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -eng_dyn.o: ../cryptlib.h eng_dyn.c eng_int.h -eng_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -eng_err.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -eng_err.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -eng_err.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -eng_err.o: ../../include/openssl/engine.h ../../include/openssl/err.h -eng_err.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -eng_err.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -eng_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -eng_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -eng_err.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -eng_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -eng_err.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -eng_err.o: eng_err.c -eng_fat.o: ../../e_os.h ../../include/openssl/asn1.h -eng_fat.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_fat.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -eng_fat.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -eng_fat.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -eng_fat.o: ../../include/openssl/engine.h ../../include/openssl/err.h -eng_fat.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -eng_fat.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -eng_fat.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -eng_fat.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -eng_fat.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -eng_fat.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -eng_fat.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -eng_fat.o: ../cryptlib.h eng_fat.c eng_int.h -eng_init.o: ../../e_os.h ../../include/openssl/asn1.h -eng_init.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_init.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -eng_init.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -eng_init.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -eng_init.o: ../../include/openssl/err.h ../../include/openssl/evp.h -eng_init.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -eng_init.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -eng_init.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -eng_init.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -eng_init.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -eng_init.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -eng_init.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_init.c eng_int.h -eng_lib.o: ../../e_os.h ../../include/openssl/asn1.h -eng_lib.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_lib.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -eng_lib.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -eng_lib.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -eng_lib.o: ../../include/openssl/err.h ../../include/openssl/evp.h -eng_lib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -eng_lib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -eng_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -eng_lib.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -eng_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -eng_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -eng_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -eng_lib.o: ../cryptlib.h eng_int.h eng_lib.c -eng_list.o: ../../e_os.h ../../include/openssl/asn1.h -eng_list.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_list.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -eng_list.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -eng_list.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -eng_list.o: ../../include/openssl/err.h ../../include/openssl/evp.h -eng_list.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -eng_list.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -eng_list.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -eng_list.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -eng_list.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -eng_list.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -eng_list.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_int.h eng_list.c -eng_openssl.o: ../../e_os.h ../../include/openssl/asn1.h -eng_openssl.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_openssl.o: ../../include/openssl/crypto.h ../../include/openssl/dh.h -eng_openssl.o: ../../include/openssl/dsa.h ../../include/openssl/dso.h -eng_openssl.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -eng_openssl.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -eng_openssl.o: ../../include/openssl/engine.h ../../include/openssl/err.h -eng_openssl.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -eng_openssl.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -eng_openssl.o: ../../include/openssl/opensslconf.h -eng_openssl.o: ../../include/openssl/opensslv.h -eng_openssl.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -eng_openssl.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs7.h -eng_openssl.o: ../../include/openssl/rand.h ../../include/openssl/rc4.h -eng_openssl.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -eng_openssl.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -eng_openssl.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -eng_openssl.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_openssl.c -eng_pkey.o: ../../e_os.h ../../include/openssl/asn1.h -eng_pkey.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_pkey.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -eng_pkey.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -eng_pkey.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -eng_pkey.o: ../../include/openssl/err.h ../../include/openssl/evp.h -eng_pkey.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -eng_pkey.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -eng_pkey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -eng_pkey.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -eng_pkey.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -eng_pkey.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -eng_pkey.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_int.h eng_pkey.c -eng_rdrand.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -eng_rdrand.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -eng_rdrand.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -eng_rdrand.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -eng_rdrand.o: ../../include/openssl/engine.h ../../include/openssl/err.h -eng_rdrand.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -eng_rdrand.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -eng_rdrand.o: ../../include/openssl/opensslconf.h -eng_rdrand.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -eng_rdrand.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -eng_rdrand.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -eng_rdrand.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -eng_rdrand.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -eng_rdrand.o: eng_rdrand.c -eng_rsax.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -eng_rsax.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -eng_rsax.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -eng_rsax.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -eng_rsax.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -eng_rsax.o: ../../include/openssl/err.h ../../include/openssl/evp.h -eng_rsax.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -eng_rsax.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -eng_rsax.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -eng_rsax.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -eng_rsax.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -eng_rsax.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -eng_rsax.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -eng_rsax.o: eng_rsax.c -eng_table.o: ../../e_os.h ../../include/openssl/asn1.h -eng_table.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -eng_table.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -eng_table.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -eng_table.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -eng_table.o: ../../include/openssl/err.h ../../include/openssl/evp.h -eng_table.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -eng_table.o: ../../include/openssl/objects.h -eng_table.o: ../../include/openssl/opensslconf.h -eng_table.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -eng_table.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -eng_table.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -eng_table.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -eng_table.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_int.h -eng_table.o: eng_table.c -tb_asnmth.o: ../../e_os.h ../../include/openssl/asn1.h -tb_asnmth.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -tb_asnmth.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -tb_asnmth.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -tb_asnmth.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -tb_asnmth.o: ../../include/openssl/err.h ../../include/openssl/evp.h -tb_asnmth.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tb_asnmth.o: ../../include/openssl/objects.h -tb_asnmth.o: ../../include/openssl/opensslconf.h -tb_asnmth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tb_asnmth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -tb_asnmth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -tb_asnmth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -tb_asnmth.o: ../../include/openssl/x509_vfy.h ../asn1/asn1_locl.h ../cryptlib.h -tb_asnmth.o: eng_int.h tb_asnmth.c -tb_cipher.o: ../../e_os.h ../../include/openssl/asn1.h -tb_cipher.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -tb_cipher.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -tb_cipher.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -tb_cipher.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -tb_cipher.o: ../../include/openssl/err.h ../../include/openssl/evp.h -tb_cipher.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tb_cipher.o: ../../include/openssl/objects.h -tb_cipher.o: ../../include/openssl/opensslconf.h -tb_cipher.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tb_cipher.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -tb_cipher.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -tb_cipher.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -tb_cipher.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_int.h -tb_cipher.o: tb_cipher.c -tb_dh.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -tb_dh.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -tb_dh.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -tb_dh.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -tb_dh.o: ../../include/openssl/engine.h ../../include/openssl/err.h -tb_dh.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -tb_dh.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -tb_dh.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -tb_dh.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -tb_dh.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -tb_dh.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -tb_dh.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -tb_dh.o: ../cryptlib.h eng_int.h tb_dh.c -tb_digest.o: ../../e_os.h ../../include/openssl/asn1.h -tb_digest.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -tb_digest.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -tb_digest.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -tb_digest.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -tb_digest.o: ../../include/openssl/err.h ../../include/openssl/evp.h -tb_digest.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tb_digest.o: ../../include/openssl/objects.h -tb_digest.o: ../../include/openssl/opensslconf.h -tb_digest.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tb_digest.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -tb_digest.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -tb_digest.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -tb_digest.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_int.h -tb_digest.o: tb_digest.c -tb_dsa.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -tb_dsa.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -tb_dsa.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -tb_dsa.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -tb_dsa.o: ../../include/openssl/engine.h ../../include/openssl/err.h -tb_dsa.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -tb_dsa.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -tb_dsa.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -tb_dsa.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -tb_dsa.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -tb_dsa.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -tb_dsa.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -tb_dsa.o: ../cryptlib.h eng_int.h tb_dsa.c -tb_ecdh.o: ../../e_os.h ../../include/openssl/asn1.h -tb_ecdh.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -tb_ecdh.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -tb_ecdh.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -tb_ecdh.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -tb_ecdh.o: ../../include/openssl/err.h ../../include/openssl/evp.h -tb_ecdh.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tb_ecdh.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -tb_ecdh.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tb_ecdh.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -tb_ecdh.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -tb_ecdh.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -tb_ecdh.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_int.h tb_ecdh.c -tb_ecdsa.o: ../../e_os.h ../../include/openssl/asn1.h -tb_ecdsa.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -tb_ecdsa.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -tb_ecdsa.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -tb_ecdsa.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -tb_ecdsa.o: ../../include/openssl/err.h ../../include/openssl/evp.h -tb_ecdsa.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tb_ecdsa.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -tb_ecdsa.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tb_ecdsa.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -tb_ecdsa.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -tb_ecdsa.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -tb_ecdsa.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_int.h tb_ecdsa.c -tb_pkmeth.o: ../../e_os.h ../../include/openssl/asn1.h -tb_pkmeth.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -tb_pkmeth.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -tb_pkmeth.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -tb_pkmeth.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -tb_pkmeth.o: ../../include/openssl/err.h ../../include/openssl/evp.h -tb_pkmeth.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tb_pkmeth.o: ../../include/openssl/objects.h -tb_pkmeth.o: ../../include/openssl/opensslconf.h -tb_pkmeth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tb_pkmeth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -tb_pkmeth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -tb_pkmeth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -tb_pkmeth.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_int.h -tb_pkmeth.o: tb_pkmeth.c -tb_rand.o: ../../e_os.h ../../include/openssl/asn1.h -tb_rand.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -tb_rand.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -tb_rand.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -tb_rand.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -tb_rand.o: ../../include/openssl/err.h ../../include/openssl/evp.h -tb_rand.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tb_rand.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -tb_rand.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tb_rand.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -tb_rand.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -tb_rand.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -tb_rand.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_int.h tb_rand.c -tb_rsa.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -tb_rsa.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -tb_rsa.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -tb_rsa.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -tb_rsa.o: ../../include/openssl/engine.h ../../include/openssl/err.h -tb_rsa.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -tb_rsa.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -tb_rsa.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -tb_rsa.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -tb_rsa.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -tb_rsa.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -tb_rsa.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -tb_rsa.o: ../cryptlib.h eng_int.h tb_rsa.c -tb_store.o: ../../e_os.h ../../include/openssl/asn1.h -tb_store.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -tb_store.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -tb_store.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -tb_store.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -tb_store.o: ../../include/openssl/err.h ../../include/openssl/evp.h -tb_store.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -tb_store.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -tb_store.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -tb_store.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -tb_store.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -tb_store.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -tb_store.o: ../../include/openssl/x509_vfy.h ../cryptlib.h eng_int.h tb_store.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/err/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/err/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/err/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/err/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -# -# OpenSSL/crypto/err/Makefile -# - -DIR= err -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=err.c err_all.c err_prn.c -LIBOBJ=err.o err_all.o err_prn.o - -SRC= $(LIBSRC) - -EXHEADER= err.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -err.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/buffer.h -err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -err.o: ../cryptlib.h err.c -err_all.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -err_all.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -err_all.o: ../../include/openssl/cms.h ../../include/openssl/comp.h -err_all.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -err_all.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -err_all.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -err_all.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -err_all.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -err_all.o: ../../include/openssl/err.h ../../include/openssl/evp.h -err_all.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -err_all.o: ../../include/openssl/objects.h ../../include/openssl/ocsp.h -err_all.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -err_all.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem2.h -err_all.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -err_all.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h -err_all.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -err_all.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -err_all.o: ../../include/openssl/ts.h ../../include/openssl/ui.h -err_all.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -err_all.o: ../../include/openssl/x509v3.h err_all.c -err_prn.o: ../../e_os.h ../../include/openssl/bio.h -err_prn.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -err_prn.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -err_prn.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -err_prn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -err_prn.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -err_prn.o: ../../include/openssl/symhacks.h ../cryptlib.h err_prn.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/err/openssl.ec nodejs-0.11.15/deps/openssl/openssl/crypto/err/openssl.ec --- nodejs-0.11.13/deps/openssl/openssl/crypto/err/openssl.ec 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/err/openssl.ec 2015-01-20 21:22:17.000000000 +0000 @@ -71,6 +71,7 @@ R SSL_R_TLSV1_ALERT_PROTOCOL_VERSION 1070 R SSL_R_TLSV1_ALERT_INSUFFICIENT_SECURITY 1071 R SSL_R_TLSV1_ALERT_INTERNAL_ERROR 1080 +R SSL_R_TLSV1_ALERT_INAPPROPRIATE_FALLBACK 1086 R SSL_R_TLSV1_ALERT_USER_CANCELLED 1090 R SSL_R_TLSV1_ALERT_NO_RENEGOTIATION 1100 R SSL_R_TLSV1_UNSUPPORTED_EXTENSION 1110 diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/evp/bio_b64.c nodejs-0.11.15/deps/openssl/openssl/crypto/evp/bio_b64.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/evp/bio_b64.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/evp/bio_b64.c 2015-01-20 21:22:17.000000000 +0000 @@ -226,6 +226,7 @@ else if (ctx->start) { q=p=(unsigned char *)ctx->tmp; + num = 0; for (j=0; j<i; j++) { if (*(q++) != '\n') continue; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/evp/e_aes.c nodejs-0.11.15/deps/openssl/openssl/crypto/evp/e_aes.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/evp/e_aes.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/evp/e_aes.c 2015-01-20 21:22:17.000000000 +0000 @@ -166,7 +166,7 @@ #define VPAES_CAPABLE (OPENSSL_ia32cap_P[1]&(1<<(41-32))) #endif #ifdef BSAES_ASM -#define BSAES_CAPABLE VPAES_CAPABLE +#define BSAES_CAPABLE (OPENSSL_ia32cap_P[1]&(1<<(41-32))) #endif /* * AES-NI section @@ -808,6 +808,28 @@ /* Extra padding: tag appended to record */ return EVP_GCM_TLS_TAG_LEN; + case EVP_CTRL_COPY: + { + EVP_CIPHER_CTX *out = ptr; + EVP_AES_GCM_CTX *gctx_out = out->cipher_data; + if (gctx->gcm.key) + { + if (gctx->gcm.key != &gctx->ks) + return 0; + gctx_out->gcm.key = &gctx_out->ks; + } + if (gctx->iv == c->iv) + gctx_out->iv = out->iv; + else + { + gctx_out->iv = OPENSSL_malloc(gctx->ivlen); + if (!gctx_out->iv) + return 0; + memcpy(gctx_out->iv, gctx->iv, gctx->ivlen); + } + return 1; + } + default: return -1; @@ -1032,7 +1054,8 @@ #define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \ | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \ - | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT) + | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ + | EVP_CIPH_CUSTOM_COPY) BLOCK_CIPHER_custom(NID_aes,128,1,12,gcm,GCM, EVP_CIPH_FLAG_FIPS|EVP_CIPH_FLAG_AEAD_CIPHER|CUSTOM_FLAGS) @@ -1044,7 +1067,25 @@ static int aes_xts_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) { EVP_AES_XTS_CTX *xctx = c->cipher_data; - if (type != EVP_CTRL_INIT) + if (type == EVP_CTRL_COPY) + { + EVP_CIPHER_CTX *out = ptr; + EVP_AES_XTS_CTX *xctx_out = out->cipher_data; + if (xctx->xts.key1) + { + if (xctx->xts.key1 != &xctx->ks1) + return 0; + xctx_out->xts.key1 = &xctx_out->ks1; + } + if (xctx->xts.key2) + { + if (xctx->xts.key2 != &xctx->ks2) + return 0; + xctx_out->xts.key2 = &xctx_out->ks2; + } + return 1; + } + else if (type != EVP_CTRL_INIT) return -1; /* key1 and key2 are used as an indicator both key and IV are set */ xctx->xts.key1 = NULL; @@ -1153,7 +1194,8 @@ #define aes_xts_cleanup NULL #define XTS_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_CUSTOM_IV \ - | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT) + | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ + | EVP_CIPH_CUSTOM_COPY) BLOCK_CIPHER_custom(NID_aes,128,1,16,xts,XTS,EVP_CIPH_FLAG_FIPS|XTS_FLAGS) BLOCK_CIPHER_custom(NID_aes,256,1,16,xts,XTS,EVP_CIPH_FLAG_FIPS|XTS_FLAGS) @@ -1203,6 +1245,19 @@ cctx->len_set = 0; return 1; + case EVP_CTRL_COPY: + { + EVP_CIPHER_CTX *out = ptr; + EVP_AES_CCM_CTX *cctx_out = out->cipher_data; + if (cctx->ccm.key) + { + if (cctx->ccm.key != &cctx->ks) + return 0; + cctx_out->ccm.key = &cctx_out->ks; + } + return 1; + } + default: return -1; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/evp/encode.c nodejs-0.11.15/deps/openssl/openssl/crypto/evp/encode.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/evp/encode.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/evp/encode.c 2015-01-20 21:22:17.000000000 +0000 @@ -324,6 +324,7 @@ v=EVP_DecodeBlock(out,d,n); n=0; if (v < 0) { rv=0; goto end; } + if (eof > v) { rv=-1; goto end; } ret+=(v-eof); } else diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/evp/evp_enc.c nodejs-0.11.15/deps/openssl/openssl/crypto/evp/evp_enc.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/evp/evp_enc.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/evp/evp_enc.c 2015-01-20 21:22:17.000000000 +0000 @@ -67,6 +67,7 @@ #ifdef OPENSSL_FIPS #include <openssl/fips.h> #endif +#include "constant_time_locl.h" #include "evp_locl.h" #ifdef OPENSSL_FIPS @@ -500,21 +501,21 @@ int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl) { - int i,n; - unsigned int b; + unsigned int i, b; + unsigned char pad, padding_good; *outl=0; if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { - i = M_do_cipher(ctx, out, NULL, 0); - if (i < 0) + int ret = M_do_cipher(ctx, out, NULL, 0); + if (ret < 0) return 0; else - *outl = i; + *outl = ret; return 1; } - b=ctx->cipher->block_size; + b=(unsigned int)(ctx->cipher->block_size); if (ctx->flags & EVP_CIPH_NO_PADDING) { if(ctx->buf_len) @@ -533,28 +534,34 @@ return(0); } OPENSSL_assert(b <= sizeof ctx->final); - n=ctx->final[b-1]; - if (n == 0 || n > (int)b) - { - EVPerr(EVP_F_EVP_DECRYPTFINAL_EX,EVP_R_BAD_DECRYPT); - return(0); - } - for (i=0; i<n; i++) + pad=ctx->final[b-1]; + + padding_good = (unsigned char)(~constant_time_is_zero_8(pad)); + padding_good &= constant_time_ge_8(b, pad); + + for (i = 1; i < b; ++i) { - if (ctx->final[--b] != n) - { - EVPerr(EVP_F_EVP_DECRYPTFINAL_EX,EVP_R_BAD_DECRYPT); - return(0); - } - } - n=ctx->cipher->block_size-n; - for (i=0; i<n; i++) - out[i]=ctx->final[i]; - *outl=n; + unsigned char is_pad_index = constant_time_lt_8(i, pad); + unsigned char pad_byte_good = constant_time_eq_8(ctx->final[b-i-1], pad); + padding_good &= constant_time_select_8(is_pad_index, pad_byte_good, 0xff); + } + + /* + * At least 1 byte is always padding, so we always write b - 1 + * bytes to avoid a timing leak. The caller is required to have |b| + * bytes space in |out| by the API contract. + */ + for (i = 0; i < b - 1; ++i) + out[i] = ctx->final[i] & padding_good; + /* Safe cast: for a good padding, EVP_MAX_IV_LENGTH >= b >= pad */ + *outl = padding_good & ((unsigned char)(b - pad)); + return padding_good & 1; } else - *outl=0; - return(1); + { + *outl = 0; + return 1; + } } void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx) @@ -678,4 +685,3 @@ return in->cipher->ctrl((EVP_CIPHER_CTX *)in, EVP_CTRL_COPY, 0, out); return 1; } - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/evp/evp_pbe.c nodejs-0.11.15/deps/openssl/openssl/crypto/evp/evp_pbe.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/evp/evp_pbe.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/evp/evp_pbe.c 2015-01-20 21:22:17.000000000 +0000 @@ -259,7 +259,7 @@ { int cipher_nid, md_nid; if (cipher) - cipher_nid = EVP_CIPHER_type(cipher); + cipher_nid = EVP_CIPHER_nid(cipher); else cipher_nid = -1; if (md) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/evp/Makefile nodejs-0.11.15/deps/openssl/openssl/crypto/evp/Makefile --- nodejs-0.11.13/deps/openssl/openssl/crypto/evp/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/evp/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -383,7 +383,7 @@ evp_enc.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h evp_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h evp_enc.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -evp_enc.o: ../cryptlib.h evp_enc.c evp_locl.h +evp_enc.o: ../constant_time_locl.h ../cryptlib.h evp_enc.c evp_locl.h evp_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h evp_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h evp_err.o: ../../include/openssl/err.h ../../include/openssl/evp.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/evp/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/evp/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/evp/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/evp/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,776 +0,0 @@ -# -# OpenSSL/crypto/evp/Makefile -# - -DIR= evp -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=evp_test.c -TESTDATA=evptests.txt -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= encode.c digest.c evp_enc.c evp_key.c evp_acnf.c evp_cnf.c \ - e_des.c e_bf.c e_idea.c e_des3.c e_camellia.c\ - e_rc4.c e_aes.c names.c e_seed.c \ - e_xcbc_d.c e_rc2.c e_cast.c e_rc5.c \ - m_null.c m_md2.c m_md4.c m_md5.c m_sha.c m_sha1.c m_wp.c \ - m_dss.c m_dss1.c m_mdc2.c m_ripemd.c m_ecdsa.c\ - p_open.c p_seal.c p_sign.c p_verify.c p_lib.c p_enc.c p_dec.c \ - bio_md.c bio_b64.c bio_enc.c evp_err.c e_null.c \ - c_all.c c_allc.c c_alld.c evp_lib.c bio_ok.c \ - evp_pkey.c evp_pbe.c p5_crpt.c p5_crpt2.c \ - e_old.c pmeth_lib.c pmeth_fn.c pmeth_gn.c m_sigver.c evp_fips.c \ - e_aes_cbc_hmac_sha1.c e_rc4_hmac_md5.c - -LIBOBJ= encode.o digest.o evp_enc.o evp_key.o evp_acnf.o evp_cnf.o \ - e_des.o e_bf.o e_idea.o e_des3.o e_camellia.o\ - e_rc4.o e_aes.o names.o e_seed.o \ - e_xcbc_d.o e_rc2.o e_cast.o e_rc5.o \ - m_null.o m_md2.o m_md4.o m_md5.o m_sha.o m_sha1.o m_wp.o \ - m_dss.o m_dss1.o m_mdc2.o m_ripemd.o m_ecdsa.o\ - p_open.o p_seal.o p_sign.o p_verify.o p_lib.o p_enc.o p_dec.o \ - bio_md.o bio_b64.o bio_enc.o evp_err.o e_null.o \ - c_all.o c_allc.o c_alld.o evp_lib.o bio_ok.o \ - evp_pkey.o evp_pbe.o p5_crpt.o p5_crpt2.o \ - e_old.o pmeth_lib.o pmeth_fn.o pmeth_gn.o m_sigver.o evp_fips.o \ - e_aes_cbc_hmac_sha1.o e_rc4_hmac_md5.o - -SRC= $(LIBSRC) - -EXHEADER= evp.h -HEADER= evp_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @[ -f $(TESTDATA) ] && cp $(TESTDATA) ../../test && echo "$(TESTDATA) -> ../../test/$(TESTDATA)" - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -bio_b64.o: ../../e_os.h ../../include/openssl/asn1.h -bio_b64.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -bio_b64.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -bio_b64.o: ../../include/openssl/err.h ../../include/openssl/evp.h -bio_b64.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -bio_b64.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -bio_b64.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bio_b64.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bio_b64.o: ../../include/openssl/symhacks.h ../cryptlib.h bio_b64.c -bio_enc.o: ../../e_os.h ../../include/openssl/asn1.h -bio_enc.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -bio_enc.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -bio_enc.o: ../../include/openssl/err.h ../../include/openssl/evp.h -bio_enc.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -bio_enc.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -bio_enc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -bio_enc.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bio_enc.o: ../../include/openssl/symhacks.h ../cryptlib.h bio_enc.c -bio_md.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -bio_md.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bio_md.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bio_md.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -bio_md.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -bio_md.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -bio_md.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -bio_md.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -bio_md.o: ../cryptlib.h bio_md.c -bio_ok.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -bio_ok.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -bio_ok.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -bio_ok.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -bio_ok.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -bio_ok.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -bio_ok.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -bio_ok.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bio_ok.o: ../../include/openssl/symhacks.h ../cryptlib.h bio_ok.c -c_all.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -c_all.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -c_all.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -c_all.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -c_all.o: ../../include/openssl/engine.h ../../include/openssl/err.h -c_all.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -c_all.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -c_all.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -c_all.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -c_all.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -c_all.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -c_all.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -c_all.o: ../cryptlib.h c_all.c -c_allc.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -c_allc.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -c_allc.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -c_allc.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -c_allc.o: ../../include/openssl/err.h ../../include/openssl/evp.h -c_allc.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -c_allc.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -c_allc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -c_allc.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -c_allc.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -c_allc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -c_allc.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -c_allc.o: ../cryptlib.h c_allc.c -c_alld.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -c_alld.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -c_alld.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -c_alld.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -c_alld.o: ../../include/openssl/err.h ../../include/openssl/evp.h -c_alld.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -c_alld.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -c_alld.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -c_alld.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -c_alld.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -c_alld.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -c_alld.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -c_alld.o: ../cryptlib.h c_alld.c -digest.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -digest.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -digest.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -digest.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -digest.o: ../../include/openssl/engine.h ../../include/openssl/err.h -digest.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -digest.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -digest.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -digest.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -digest.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -digest.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -digest.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -digest.o: ../cryptlib.h digest.c -e_aes.o: ../../include/openssl/aes.h ../../include/openssl/asn1.h -e_aes.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -e_aes.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -e_aes.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -e_aes.o: ../../include/openssl/modes.h ../../include/openssl/obj_mac.h -e_aes.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -e_aes.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -e_aes.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -e_aes.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -e_aes.o: ../modes/modes_lcl.h e_aes.c evp_locl.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/aes.h ../../include/openssl/asn1.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/bio.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/crypto.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/e_os2.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/evp.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/obj_mac.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/objects.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/opensslconf.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/opensslv.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/ossl_typ.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/safestack.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/sha.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/stack.h -e_aes_cbc_hmac_sha1.o: ../../include/openssl/symhacks.h e_aes_cbc_hmac_sha1.c -e_aes_cbc_hmac_sha1.o: evp_locl.h -e_bf.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_bf.o: ../../include/openssl/blowfish.h ../../include/openssl/buffer.h -e_bf.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -e_bf.o: ../../include/openssl/err.h ../../include/openssl/evp.h -e_bf.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -e_bf.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -e_bf.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -e_bf.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -e_bf.o: ../../include/openssl/symhacks.h ../cryptlib.h e_bf.c evp_locl.h -e_camellia.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_camellia.o: ../../include/openssl/camellia.h ../../include/openssl/crypto.h -e_camellia.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -e_camellia.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -e_camellia.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -e_camellia.o: ../../include/openssl/opensslconf.h -e_camellia.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -e_camellia.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -e_camellia.o: ../../include/openssl/symhacks.h e_camellia.c evp_locl.h -e_cast.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_cast.o: ../../include/openssl/buffer.h ../../include/openssl/cast.h -e_cast.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -e_cast.o: ../../include/openssl/err.h ../../include/openssl/evp.h -e_cast.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -e_cast.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -e_cast.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -e_cast.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -e_cast.o: ../../include/openssl/symhacks.h ../cryptlib.h e_cast.c evp_locl.h -e_des.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_des.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -e_des.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -e_des.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -e_des.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -e_des.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -e_des.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -e_des.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -e_des.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -e_des.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -e_des.o: ../../include/openssl/ui_compat.h ../cryptlib.h e_des.c evp_locl.h -e_des3.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_des3.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -e_des3.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -e_des3.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -e_des3.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -e_des3.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -e_des3.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -e_des3.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -e_des3.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -e_des3.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -e_des3.o: ../../include/openssl/ui_compat.h ../cryptlib.h e_des3.c evp_locl.h -e_idea.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_idea.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -e_idea.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -e_idea.o: ../../include/openssl/evp.h ../../include/openssl/idea.h -e_idea.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -e_idea.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -e_idea.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -e_idea.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -e_idea.o: ../../include/openssl/symhacks.h ../cryptlib.h e_idea.c evp_locl.h -e_null.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_null.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -e_null.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -e_null.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -e_null.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -e_null.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -e_null.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -e_null.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -e_null.o: ../cryptlib.h e_null.c -e_old.o: e_old.c -e_rc2.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_rc2.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -e_rc2.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -e_rc2.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -e_rc2.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -e_rc2.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -e_rc2.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rc2.h -e_rc2.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -e_rc2.o: ../../include/openssl/symhacks.h ../cryptlib.h e_rc2.c evp_locl.h -e_rc4.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_rc4.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -e_rc4.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -e_rc4.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -e_rc4.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -e_rc4.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -e_rc4.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rc4.h -e_rc4.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -e_rc4.o: ../../include/openssl/symhacks.h ../cryptlib.h e_rc4.c evp_locl.h -e_rc4_hmac_md5.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_rc4_hmac_md5.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -e_rc4_hmac_md5.o: ../../include/openssl/evp.h ../../include/openssl/md5.h -e_rc4_hmac_md5.o: ../../include/openssl/obj_mac.h -e_rc4_hmac_md5.o: ../../include/openssl/objects.h -e_rc4_hmac_md5.o: ../../include/openssl/opensslconf.h -e_rc4_hmac_md5.o: ../../include/openssl/opensslv.h -e_rc4_hmac_md5.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rc4.h -e_rc4_hmac_md5.o: ../../include/openssl/safestack.h -e_rc4_hmac_md5.o: ../../include/openssl/stack.h -e_rc4_hmac_md5.o: ../../include/openssl/symhacks.h e_rc4_hmac_md5.c -e_rc5.o: ../../e_os.h ../../include/openssl/bio.h -e_rc5.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -e_rc5.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -e_rc5.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -e_rc5.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -e_rc5.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -e_rc5.o: ../../include/openssl/symhacks.h ../cryptlib.h e_rc5.c -e_seed.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -e_seed.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -e_seed.o: ../../include/openssl/err.h ../../include/openssl/evp.h -e_seed.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -e_seed.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -e_seed.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -e_seed.o: ../../include/openssl/safestack.h ../../include/openssl/seed.h -e_seed.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -e_seed.o: e_seed.c evp_locl.h -e_xcbc_d.o: ../../e_os.h ../../include/openssl/asn1.h -e_xcbc_d.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -e_xcbc_d.o: ../../include/openssl/crypto.h ../../include/openssl/des.h -e_xcbc_d.o: ../../include/openssl/des_old.h ../../include/openssl/e_os2.h -e_xcbc_d.o: ../../include/openssl/err.h ../../include/openssl/evp.h -e_xcbc_d.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -e_xcbc_d.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -e_xcbc_d.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -e_xcbc_d.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -e_xcbc_d.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -e_xcbc_d.o: ../../include/openssl/ui_compat.h ../cryptlib.h e_xcbc_d.c -e_xcbc_d.o: evp_locl.h -encode.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -encode.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -encode.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -encode.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -encode.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -encode.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -encode.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -encode.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -encode.o: ../cryptlib.h encode.c -evp_acnf.o: ../../e_os.h ../../include/openssl/asn1.h -evp_acnf.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -evp_acnf.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -evp_acnf.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -evp_acnf.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -evp_acnf.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -evp_acnf.o: ../../include/openssl/opensslconf.h -evp_acnf.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -evp_acnf.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -evp_acnf.o: ../../include/openssl/symhacks.h ../cryptlib.h evp_acnf.c -evp_cnf.o: ../../e_os.h ../../include/openssl/asn1.h -evp_cnf.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -evp_cnf.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -evp_cnf.o: ../../include/openssl/dso.h ../../include/openssl/e_os2.h -evp_cnf.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -evp_cnf.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -evp_cnf.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -evp_cnf.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -evp_cnf.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -evp_cnf.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -evp_cnf.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -evp_cnf.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -evp_cnf.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -evp_cnf.o: ../../include/openssl/x509v3.h ../cryptlib.h evp_cnf.c -evp_enc.o: ../../e_os.h ../../include/openssl/asn1.h -evp_enc.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -evp_enc.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -evp_enc.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -evp_enc.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -evp_enc.o: ../../include/openssl/err.h ../../include/openssl/evp.h -evp_enc.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -evp_enc.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -evp_enc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -evp_enc.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -evp_enc.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -evp_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -evp_enc.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -evp_enc.o: ../cryptlib.h evp_enc.c evp_locl.h -evp_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -evp_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -evp_err.o: ../../include/openssl/err.h ../../include/openssl/evp.h -evp_err.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -evp_err.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -evp_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -evp_err.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -evp_err.o: ../../include/openssl/symhacks.h evp_err.c -evp_fips.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -evp_fips.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -evp_fips.o: ../../include/openssl/evp.h ../../include/openssl/obj_mac.h -evp_fips.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -evp_fips.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -evp_fips.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -evp_fips.o: ../../include/openssl/symhacks.h evp_fips.c -evp_key.o: ../../e_os.h ../../include/openssl/asn1.h -evp_key.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -evp_key.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -evp_key.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -evp_key.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -evp_key.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -evp_key.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -evp_key.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -evp_key.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -evp_key.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -evp_key.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -evp_key.o: ../../include/openssl/ui.h ../../include/openssl/x509.h -evp_key.o: ../../include/openssl/x509_vfy.h ../cryptlib.h evp_key.c -evp_lib.o: ../../e_os.h ../../include/openssl/asn1.h -evp_lib.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -evp_lib.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -evp_lib.o: ../../include/openssl/err.h ../../include/openssl/evp.h -evp_lib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -evp_lib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -evp_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -evp_lib.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -evp_lib.o: ../../include/openssl/symhacks.h ../cryptlib.h evp_lib.c -evp_pbe.o: ../../e_os.h ../../include/openssl/asn1.h -evp_pbe.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -evp_pbe.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -evp_pbe.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -evp_pbe.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -evp_pbe.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -evp_pbe.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -evp_pbe.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -evp_pbe.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs12.h -evp_pbe.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -evp_pbe.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -evp_pbe.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -evp_pbe.o: ../../include/openssl/x509_vfy.h ../cryptlib.h evp_locl.h evp_pbe.c -evp_pkey.o: ../../e_os.h ../../include/openssl/asn1.h -evp_pkey.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -evp_pkey.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -evp_pkey.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -evp_pkey.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -evp_pkey.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -evp_pkey.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -evp_pkey.o: ../../include/openssl/opensslconf.h -evp_pkey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -evp_pkey.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -evp_pkey.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -evp_pkey.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -evp_pkey.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -evp_pkey.o: ../asn1/asn1_locl.h ../cryptlib.h evp_pkey.c -m_dss.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -m_dss.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -m_dss.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -m_dss.o: ../../include/openssl/err.h ../../include/openssl/evp.h -m_dss.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -m_dss.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -m_dss.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -m_dss.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -m_dss.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -m_dss.o: ../cryptlib.h m_dss.c -m_dss1.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -m_dss1.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -m_dss1.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -m_dss1.o: ../../include/openssl/err.h ../../include/openssl/evp.h -m_dss1.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -m_dss1.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -m_dss1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -m_dss1.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -m_dss1.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -m_dss1.o: ../cryptlib.h m_dss1.c -m_ecdsa.o: ../../e_os.h ../../include/openssl/asn1.h -m_ecdsa.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -m_ecdsa.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -m_ecdsa.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -m_ecdsa.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -m_ecdsa.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -m_ecdsa.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -m_ecdsa.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -m_ecdsa.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -m_ecdsa.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -m_ecdsa.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -m_ecdsa.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -m_ecdsa.o: ../cryptlib.h m_ecdsa.c -m_md2.o: ../../e_os.h ../../include/openssl/bio.h -m_md2.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -m_md2.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -m_md2.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -m_md2.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -m_md2.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -m_md2.o: ../../include/openssl/symhacks.h ../cryptlib.h m_md2.c -m_md4.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -m_md4.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -m_md4.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -m_md4.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -m_md4.o: ../../include/openssl/err.h ../../include/openssl/evp.h -m_md4.o: ../../include/openssl/lhash.h ../../include/openssl/md4.h -m_md4.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -m_md4.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -m_md4.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -m_md4.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -m_md4.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -m_md4.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -m_md4.o: ../../include/openssl/x509_vfy.h ../cryptlib.h evp_locl.h m_md4.c -m_md5.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -m_md5.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -m_md5.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -m_md5.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -m_md5.o: ../../include/openssl/err.h ../../include/openssl/evp.h -m_md5.o: ../../include/openssl/lhash.h ../../include/openssl/md5.h -m_md5.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -m_md5.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -m_md5.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -m_md5.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -m_md5.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -m_md5.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -m_md5.o: ../../include/openssl/x509_vfy.h ../cryptlib.h evp_locl.h m_md5.c -m_mdc2.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -m_mdc2.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -m_mdc2.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -m_mdc2.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -m_mdc2.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -m_mdc2.o: ../../include/openssl/err.h ../../include/openssl/evp.h -m_mdc2.o: ../../include/openssl/lhash.h ../../include/openssl/mdc2.h -m_mdc2.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -m_mdc2.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -m_mdc2.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -m_mdc2.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -m_mdc2.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -m_mdc2.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -m_mdc2.o: ../../include/openssl/ui_compat.h ../../include/openssl/x509.h -m_mdc2.o: ../../include/openssl/x509_vfy.h ../cryptlib.h evp_locl.h m_mdc2.c -m_null.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -m_null.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -m_null.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -m_null.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -m_null.o: ../../include/openssl/err.h ../../include/openssl/evp.h -m_null.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -m_null.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -m_null.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -m_null.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -m_null.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -m_null.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -m_null.o: ../../include/openssl/x509_vfy.h ../cryptlib.h m_null.c -m_ripemd.o: ../../e_os.h ../../include/openssl/asn1.h -m_ripemd.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -m_ripemd.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -m_ripemd.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -m_ripemd.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -m_ripemd.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -m_ripemd.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -m_ripemd.o: ../../include/openssl/opensslconf.h -m_ripemd.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -m_ripemd.o: ../../include/openssl/pkcs7.h ../../include/openssl/ripemd.h -m_ripemd.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -m_ripemd.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -m_ripemd.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -m_ripemd.o: ../../include/openssl/x509_vfy.h ../cryptlib.h evp_locl.h -m_ripemd.o: m_ripemd.c -m_sha.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -m_sha.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -m_sha.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -m_sha.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -m_sha.o: ../../include/openssl/err.h ../../include/openssl/evp.h -m_sha.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -m_sha.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -m_sha.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -m_sha.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -m_sha.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -m_sha.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -m_sha.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -m_sha.o: ../cryptlib.h evp_locl.h m_sha.c -m_sha1.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -m_sha1.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -m_sha1.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -m_sha1.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -m_sha1.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -m_sha1.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -m_sha1.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rsa.h -m_sha1.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -m_sha1.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -m_sha1.o: ../cryptlib.h m_sha1.c -m_sigver.o: ../../e_os.h ../../include/openssl/asn1.h -m_sigver.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -m_sigver.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -m_sigver.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -m_sigver.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -m_sigver.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -m_sigver.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -m_sigver.o: ../../include/openssl/opensslconf.h -m_sigver.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -m_sigver.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -m_sigver.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -m_sigver.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -m_sigver.o: ../../include/openssl/x509_vfy.h ../cryptlib.h evp_locl.h -m_sigver.o: m_sigver.c -m_wp.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -m_wp.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -m_wp.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -m_wp.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -m_wp.o: ../../include/openssl/err.h ../../include/openssl/evp.h -m_wp.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -m_wp.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -m_wp.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -m_wp.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -m_wp.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -m_wp.o: ../../include/openssl/symhacks.h ../../include/openssl/whrlpool.h -m_wp.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -m_wp.o: ../cryptlib.h evp_locl.h m_wp.c -names.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -names.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -names.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -names.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -names.o: ../../include/openssl/err.h ../../include/openssl/evp.h -names.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -names.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -names.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -names.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -names.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -names.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -names.o: ../../include/openssl/x509_vfy.h ../cryptlib.h names.c -p5_crpt.o: ../../e_os.h ../../include/openssl/asn1.h -p5_crpt.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p5_crpt.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p5_crpt.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p5_crpt.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p5_crpt.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p5_crpt.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p5_crpt.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -p5_crpt.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -p5_crpt.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p5_crpt.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p5_crpt.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p5_crpt.o: ../cryptlib.h p5_crpt.c -p5_crpt2.o: ../../e_os.h ../../include/openssl/asn1.h -p5_crpt2.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p5_crpt2.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p5_crpt2.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p5_crpt2.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p5_crpt2.o: ../../include/openssl/evp.h ../../include/openssl/hmac.h -p5_crpt2.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p5_crpt2.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p5_crpt2.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p5_crpt2.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -p5_crpt2.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p5_crpt2.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p5_crpt2.o: ../../include/openssl/x509_vfy.h ../cryptlib.h evp_locl.h -p5_crpt2.o: p5_crpt2.c -p_dec.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -p_dec.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p_dec.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p_dec.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p_dec.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p_dec.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p_dec.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p_dec.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p_dec.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -p_dec.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -p_dec.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p_dec.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p_dec.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p_dec.c -p_enc.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -p_enc.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p_enc.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p_enc.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p_enc.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p_enc.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p_enc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p_enc.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -p_enc.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -p_enc.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p_enc.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p_enc.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p_enc.c -p_lib.o: ../../e_os.h ../../include/openssl/asn1.h -p_lib.o: ../../include/openssl/asn1_mac.h ../../include/openssl/bio.h -p_lib.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -p_lib.o: ../../include/openssl/crypto.h ../../include/openssl/dh.h -p_lib.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -p_lib.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p_lib.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -p_lib.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p_lib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p_lib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p_lib.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -p_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p_lib.o: ../asn1/asn1_locl.h ../cryptlib.h p_lib.c -p_open.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -p_open.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p_open.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p_open.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p_open.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p_open.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p_open.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p_open.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p_open.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -p_open.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p_open.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p_open.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p_open.o: ../cryptlib.h p_open.c -p_seal.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -p_seal.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p_seal.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p_seal.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p_seal.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p_seal.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p_seal.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p_seal.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p_seal.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -p_seal.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -p_seal.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p_seal.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p_seal.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p_seal.c -p_sign.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -p_sign.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p_sign.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p_sign.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p_sign.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p_sign.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p_sign.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p_sign.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p_sign.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -p_sign.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p_sign.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p_sign.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p_sign.c -p_verify.o: ../../e_os.h ../../include/openssl/asn1.h -p_verify.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p_verify.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p_verify.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p_verify.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p_verify.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p_verify.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p_verify.o: ../../include/openssl/opensslconf.h -p_verify.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p_verify.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -p_verify.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p_verify.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p_verify.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p_verify.c -pmeth_fn.o: ../../e_os.h ../../include/openssl/asn1.h -pmeth_fn.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pmeth_fn.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pmeth_fn.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pmeth_fn.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pmeth_fn.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pmeth_fn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pmeth_fn.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -pmeth_fn.o: ../../include/openssl/symhacks.h ../cryptlib.h evp_locl.h -pmeth_fn.o: pmeth_fn.c -pmeth_gn.o: ../../e_os.h ../../include/openssl/asn1.h -pmeth_gn.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -pmeth_gn.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -pmeth_gn.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -pmeth_gn.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pmeth_gn.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pmeth_gn.o: ../../include/openssl/opensslconf.h -pmeth_gn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pmeth_gn.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -pmeth_gn.o: ../../include/openssl/symhacks.h ../cryptlib.h evp_locl.h -pmeth_gn.o: pmeth_gn.c -pmeth_lib.o: ../../e_os.h ../../include/openssl/asn1.h -pmeth_lib.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pmeth_lib.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pmeth_lib.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pmeth_lib.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -pmeth_lib.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pmeth_lib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pmeth_lib.o: ../../include/openssl/objects.h -pmeth_lib.o: ../../include/openssl/opensslconf.h -pmeth_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pmeth_lib.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pmeth_lib.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pmeth_lib.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pmeth_lib.o: ../../include/openssl/x509_vfy.h ../asn1/asn1_locl.h ../cryptlib.h -pmeth_lib.o: evp_locl.h pmeth_lib.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/hmac/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/hmac/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/hmac/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/hmac/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -# -# OpenSSL/crypto/md/Makefile -# - -DIR= hmac -TOP= ../.. -CC= cc -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=hmactest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=hmac.c hm_ameth.c hm_pmeth.c -LIBOBJ=hmac.o hm_ameth.o hm_pmeth.o - -SRC= $(LIBSRC) - -EXHEADER= hmac.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -hm_ameth.o: ../../e_os.h ../../include/openssl/asn1.h -hm_ameth.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -hm_ameth.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -hm_ameth.o: ../../include/openssl/err.h ../../include/openssl/evp.h -hm_ameth.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -hm_ameth.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -hm_ameth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -hm_ameth.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -hm_ameth.o: ../../include/openssl/symhacks.h ../asn1/asn1_locl.h ../cryptlib.h -hm_ameth.o: hm_ameth.c -hm_pmeth.o: ../../e_os.h ../../include/openssl/asn1.h -hm_pmeth.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -hm_pmeth.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -hm_pmeth.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -hm_pmeth.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -hm_pmeth.o: ../../include/openssl/err.h ../../include/openssl/evp.h -hm_pmeth.o: ../../include/openssl/hmac.h ../../include/openssl/lhash.h -hm_pmeth.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -hm_pmeth.o: ../../include/openssl/opensslconf.h -hm_pmeth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -hm_pmeth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -hm_pmeth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -hm_pmeth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -hm_pmeth.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -hm_pmeth.o: ../cryptlib.h ../evp/evp_locl.h hm_pmeth.c -hmac.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -hmac.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -hmac.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -hmac.o: ../../include/openssl/evp.h ../../include/openssl/hmac.h -hmac.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -hmac.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -hmac.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -hmac.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -hmac.o: ../../include/openssl/symhacks.h ../cryptlib.h hmac.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/idea/ideatest.c nodejs-0.11.15/deps/openssl/openssl/crypto/idea/ideatest.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/idea/ideatest.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/idea/ideatest.c 2015-01-20 21:22:17.000000000 +0000 @@ -199,10 +199,10 @@ } memcpy(cfb_tmp,cfb_iv,8); n=0; - idea_cfb64_encrypt(cfb_buf1,cfb_buf2,(long)17,&eks, + idea_cfb64_encrypt(cfb_buf1,cfb_buf2,(long)13,&eks, cfb_tmp,&n,IDEA_DECRYPT); - idea_cfb64_encrypt(&(cfb_buf1[17]),&(cfb_buf2[17]), - (long)CFB_TEST_SIZE-17,&dks, + idea_cfb64_encrypt(&(cfb_buf1[13]),&(cfb_buf2[13]), + (long)CFB_TEST_SIZE-13,&eks, cfb_tmp,&n,IDEA_DECRYPT); if (memcmp(plain,cfb_buf2,CFB_TEST_SIZE) != 0) { diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/idea/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/idea/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/idea/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/idea/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -# -# OpenSSL/crypto/idea/Makefile -# - -DIR= idea -TOP= ../.. -CC= cc -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=ideatest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=i_cbc.c i_cfb64.c i_ofb64.c i_ecb.c i_skey.c -LIBOBJ=i_cbc.o i_cfb64.o i_ofb64.o i_ecb.o i_skey.o - -SRC= $(LIBSRC) - -EXHEADER= idea.h -HEADER= idea_lcl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -i_cbc.o: ../../include/openssl/idea.h ../../include/openssl/opensslconf.h -i_cbc.o: i_cbc.c idea_lcl.h -i_cfb64.o: ../../include/openssl/idea.h ../../include/openssl/opensslconf.h -i_cfb64.o: i_cfb64.c idea_lcl.h -i_ecb.o: ../../include/openssl/idea.h ../../include/openssl/opensslconf.h -i_ecb.o: ../../include/openssl/opensslv.h i_ecb.c idea_lcl.h -i_ofb64.o: ../../include/openssl/idea.h ../../include/openssl/opensslconf.h -i_ofb64.o: i_ofb64.c idea_lcl.h -i_skey.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -i_skey.o: ../../include/openssl/idea.h ../../include/openssl/opensslconf.h -i_skey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -i_skey.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -i_skey.o: ../../include/openssl/symhacks.h i_skey.c idea_lcl.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/install-crypto.com nodejs-0.11.15/deps/openssl/openssl/crypto/install-crypto.com --- nodejs-0.11.13/deps/openssl/openssl/crypto/install-crypto.com 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/install-crypto.com 2015-01-20 21:22:17.000000000 +0000 @@ -75,13 +75,13 @@ $ sdirs := , - 'archd', - objects, - - md2, md4, md5, sha, mdc2, hmac, ripemd, whrlpool, - - des, aes, rc2, rc4, rc5, idea, bf, cast, camellia, seed, - + md4, md5, sha, mdc2, hmac, ripemd, whrlpool, - + des, aes, rc2, rc4, idea, bf, cast, camellia, seed, - bn, ec, rsa, dsa, ecdsa, dh, ecdh, dso, engine, - buffer, bio, stack, lhash, rand, err, - evp, asn1, pem, x509, x509v3, conf, txt_db, pkcs7, pkcs12, comp, ocsp, - ui, krb5, - - store, cms, pqueue, ts, jpake + cms, pqueue, ts, jpake, srp, store, cmac $! $ exheader_ := crypto.h, opensslv.h, ebcdic.h, symhacks.h, ossl_typ.h $ exheader_'archd' := opensslconf.h @@ -139,6 +139,9 @@ $ exheader_pqueue := pqueue.h $ exheader_ts := ts.h $ exheader_jpake := jpake.h +$ exheader_srp := srp.h +$ exheader_store := store.h +$ exheader_cmac := cmac.h $ libs := ssl_libcrypto $! $ exe_dir := [-.'archd'.exe.crypto] diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/krb5/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/krb5/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/krb5/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/krb5/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -# -# OpenSSL/krb5/Makefile -# - -DIR= krb5 -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile README -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= krb5_asn.c - -LIBOBJ= krb5_asn.o - -SRC= $(LIBSRC) - -EXHEADER= krb5_asn.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -krb5_asn.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -krb5_asn.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -krb5_asn.o: ../../include/openssl/e_os2.h ../../include/openssl/krb5_asn.h -krb5_asn.o: ../../include/openssl/opensslconf.h -krb5_asn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -krb5_asn.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -krb5_asn.o: ../../include/openssl/symhacks.h krb5_asn.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/lhash/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/lhash/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/lhash/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/lhash/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,88 +0,0 @@ -# -# OpenSSL/crypto/lhash/Makefile -# - -DIR= lhash -TOP= ../.. -CC= cc -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=lhash.c lh_stats.c -LIBOBJ=lhash.o lh_stats.o - -SRC= $(LIBSRC) - -EXHEADER= lhash.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -lh_stats.o: ../../e_os.h ../../include/openssl/bio.h -lh_stats.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -lh_stats.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -lh_stats.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -lh_stats.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -lh_stats.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -lh_stats.o: ../../include/openssl/symhacks.h ../cryptlib.h lh_stats.c -lhash.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -lhash.o: ../../include/openssl/e_os2.h ../../include/openssl/lhash.h -lhash.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -lhash.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -lhash.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h lhash.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/LPdir_vms.c nodejs-0.11.15/deps/openssl/openssl/crypto/LPdir_vms.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/LPdir_vms.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/LPdir_vms.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,3 @@ -/* $LP: LPlib/source/LPdir_vms.c,v 1.20 2004/08/26 13:36:05 _cvs_levitte Exp $ */ /* * Copyright (c) 2004, Richard Levitte <richard@levitte.org> * All rights reserved. @@ -88,6 +87,12 @@ size_t filespeclen = strlen(directory); char *filespec = NULL; + if (filespeclen == 0) + { + errno = ENOENT; + return 0; + } + /* MUST be a VMS directory specification! Let's estimate if it is. */ if (directory[filespeclen-1] != ']' && directory[filespeclen-1] != '>' diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/LPdir_win.c nodejs-0.11.15/deps/openssl/openssl/crypto/LPdir_win.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/LPdir_win.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/LPdir_win.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,3 @@ -/* $LP: LPlib/source/LPdir_win.c,v 1.10 2004/08/26 13:36:05 _cvs_levitte Exp $ */ /* * Copyright (c) 2004, Richard Levitte <richard@levitte.org> * All rights reserved. @@ -63,6 +62,16 @@ errno = 0; if (*ctx == NULL) { + const char *extdir = directory; + char *extdirbuf = NULL; + size_t dirlen = strlen (directory); + + if (dirlen == 0) + { + errno = ENOENT; + return 0; + } + *ctx = (LP_DIR_CTX *)malloc(sizeof(LP_DIR_CTX)); if (*ctx == NULL) { @@ -71,15 +80,35 @@ } memset(*ctx, '\0', sizeof(LP_DIR_CTX)); + if (directory[dirlen-1] != '*') + { + extdirbuf = (char *)malloc(dirlen + 3); + if (extdirbuf == NULL) + { + free(*ctx); + *ctx = NULL; + errno = ENOMEM; + return 0; + } + if (directory[dirlen-1] != '/' && directory[dirlen-1] != '\\') + extdir = strcat(strcpy (extdirbuf,directory),"/*"); + else + extdir = strcat(strcpy (extdirbuf,directory),"*"); + } + if (sizeof(TCHAR) != sizeof(char)) { TCHAR *wdir = NULL; /* len_0 denotes string length *with* trailing 0 */ - size_t index = 0,len_0 = strlen(directory) + 1; + size_t index = 0,len_0 = strlen(extdir) + 1; - wdir = (TCHAR *)malloc(len_0 * sizeof(TCHAR)); + wdir = (TCHAR *)calloc(len_0, sizeof(TCHAR)); if (wdir == NULL) { + if (extdirbuf != NULL) + { + free (extdirbuf); + } free(*ctx); *ctx = NULL; errno = ENOMEM; @@ -87,17 +116,23 @@ } #ifdef LP_MULTIBYTE_AVAILABLE - if (!MultiByteToWideChar(CP_ACP, 0, directory, len_0, (WCHAR *)wdir, len_0)) + if (!MultiByteToWideChar(CP_ACP, 0, extdir, len_0, (WCHAR *)wdir, len_0)) #endif for (index = 0; index < len_0; index++) - wdir[index] = (TCHAR)directory[index]; + wdir[index] = (TCHAR)extdir[index]; (*ctx)->handle = FindFirstFile(wdir, &(*ctx)->ctx); free(wdir); } else - (*ctx)->handle = FindFirstFile((TCHAR *)directory, &(*ctx)->ctx); + { + (*ctx)->handle = FindFirstFile((TCHAR *)extdir, &(*ctx)->ctx); + } + if (extdirbuf != NULL) + { + free (extdirbuf); + } if ((*ctx)->handle == INVALID_HANDLE_VALUE) { @@ -114,7 +149,6 @@ return 0; } } - if (sizeof(TCHAR) != sizeof(char)) { TCHAR *wdir = (*ctx)->ctx.cFileName; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/Makefile nodejs-0.11.15/deps/openssl/openssl/crypto/Makefile --- nodejs-0.11.13/deps/openssl/openssl/crypto/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -31,6 +31,7 @@ LIBS= GENERAL=Makefile README crypto-lib.com install.com +TEST=constant_time_test.c LIB= $(TOP)/libcrypto.a SHARED_LIB= libcrypto$(SHLIB_EXT) @@ -43,7 +44,8 @@ EXHEADER= crypto.h opensslv.h opensslconf.h ebcdic.h symhacks.h \ ossl_typ.h -HEADER= cryptlib.h buildinf.h md32_common.h o_time.h o_str.h o_dir.h $(EXHEADER) +HEADER= cryptlib.h buildinf.h md32_common.h o_time.h o_str.h o_dir.h \ + constant_time_locl.h $(EXHEADER) ALL= $(GENERAL) $(SRC) $(HEADER) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,219 +0,0 @@ -# -# OpenSSL/crypto/Makefile -# - -DIR= crypto -TOP= .. -CC= cc -INCLUDE= -I. -I$(TOP) -I../include $(ZLIB_INCLUDE) -# INCLUDES targets sudbirs! -INCLUDES= -I.. -I../.. -I../modes -I../asn1 -I../evp -I../../include $(ZLIB_INCLUDE) -CFLAG= -g -MAKEDEPPROG= makedepend -MAKEDEPEND= $(TOP)/util/domd $(TOP) -MD $(MAKEDEPPROG) -MAKEFILE= Makefile -RM= rm -f -AR= ar r - -RECURSIVE_MAKE= [ -n "$(SDIRS)" ] && for i in $(SDIRS) ; do \ - (cd $$i && echo "making $$target in $(DIR)/$$i..." && \ - $(MAKE) -e TOP=../.. DIR=$$i INCLUDES='$(INCLUDES)' $$target ) || exit 1; \ - done; - -PEX_LIBS= -EX_LIBS= - -CFLAGS= $(INCLUDE) $(CFLAG) -ASFLAGS= $(INCLUDE) $(ASFLAG) -AFLAGS=$(ASFLAGS) -CPUID_OBJ=mem_clr.o - -LIBS= - -GENERAL=Makefile README crypto-lib.com install.com - -LIB= $(TOP)/libcrypto.a -SHARED_LIB= libcrypto$(SHLIB_EXT) -LIBSRC= cryptlib.c mem.c mem_clr.c mem_dbg.c cversion.c ex_data.c cpt_err.c \ - ebcdic.c uid.c o_time.c o_str.c o_dir.c o_fips.c o_init.c fips_ers.c -LIBOBJ= cryptlib.o mem.o mem_dbg.o cversion.o ex_data.o cpt_err.o ebcdic.o \ - uid.o o_time.o o_str.o o_dir.o o_fips.o o_init.o fips_ers.o $(CPUID_OBJ) - -SRC= $(LIBSRC) - -EXHEADER= crypto.h opensslv.h opensslconf.h ebcdic.h symhacks.h \ - ossl_typ.h -HEADER= cryptlib.h buildinf.h md32_common.h o_time.h o_str.h o_dir.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - @(cd ..; $(MAKE) DIRS=$(DIR) all) - -all: shared - -buildinf.h: ../Makefile - ( echo "#ifndef MK1MF_BUILD"; \ - echo ' /* auto-generated by crypto/Makefile for crypto/cversion.c */'; \ - echo ' #define CFLAGS "$(CC) $(CFLAG)"'; \ - echo ' #define PLATFORM "$(PLATFORM)"'; \ - echo " #define DATE \"`LC_ALL=C LC_TIME=C date`\""; \ - echo '#endif' ) >buildinf.h - -x86cpuid.s: x86cpuid.pl perlasm/x86asm.pl - $(PERL) x86cpuid.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ - -applink.o: $(TOP)/ms/applink.c - $(CC) $(CFLAGS) -c -o $@ $(TOP)/ms/applink.c - -uplink.o: $(TOP)/ms/uplink.c applink.o - $(CC) $(CFLAGS) -c -o $@ $(TOP)/ms/uplink.c - -uplink-x86.s: $(TOP)/ms/uplink-x86.pl - $(PERL) $(TOP)/ms/uplink-x86.pl $(PERLASM_SCHEME) > $@ - -x86_64cpuid.s: x86_64cpuid.pl; $(PERL) x86_64cpuid.pl $(PERLASM_SCHEME) > $@ -ia64cpuid.s: ia64cpuid.S; $(CC) $(CFLAGS) -E ia64cpuid.S > $@ -ppccpuid.s: ppccpuid.pl; $(PERL) ppccpuid.pl $(PERLASM_SCHEME) $@ -pariscid.s: pariscid.pl; $(PERL) pariscid.pl $(PERLASM_SCHEME) $@ -alphacpuid.s: alphacpuid.pl - (preproc=/tmp/$$$$.$@; trap "rm $$preproc" INT; \ - $(PERL) alphacpuid.pl > $$preproc && \ - $(CC) -E $$preproc > $@ && rm $$preproc) - -testapps: - [ -z "$(THIS)" ] || ( if echo $(SDIRS) | fgrep ' des '; \ - then cd des && $(MAKE) -e des; fi ) - [ -z "$(THIS)" ] || ( cd pkcs7 && $(MAKE) -e testapps ); - @if [ -z "$(THIS)" ]; then $(MAKE) -f $(TOP)/Makefile reflect THIS=$@; fi - -subdirs: - @target=all; $(RECURSIVE_MAKE) - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - @target=files; $(RECURSIVE_MAKE) - -links: - @$(PERL) $(TOP)/util/mklink.pl ../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../apps $(APPS) - @target=links; $(RECURSIVE_MAKE) - -# lib: $(LIB): are splitted to avoid end-less loop -lib: $(LIB) - @touch lib -$(LIB): $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - [ -z "$(FIPSLIBDIR)" ] || $(AR) $(LIB) $(FIPSLIBDIR)fipscanister.o - $(RANLIB) $(LIB) || echo Never mind. - -shared: buildinf.h lib subdirs - if [ -n "$(SHARED_LIBS)" ]; then \ - (cd ..; $(MAKE) $(SHARED_LIB)); \ - fi - -libs: - @target=lib; $(RECURSIVE_MAKE) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ;\ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - @target=install; $(RECURSIVE_MAKE) - -lint: - @target=lint; $(RECURSIVE_MAKE) - -depend: - @[ -z "$(THIS)" -o -f buildinf.h ] || touch buildinf.h # fake buildinf.h if it does not exist - @[ -z "$(THIS)" ] || $(MAKEDEPEND) -- $(CFLAG) $(INCLUDE) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - @[ -z "$(THIS)" -o -s buildinf.h ] || rm buildinf.h - @[ -z "$(THIS)" ] || (set -e; target=depend; $(RECURSIVE_MAKE) ) - @if [ -z "$(THIS)" ]; then $(MAKE) -f $(TOP)/Makefile reflect THIS=$@; fi - -clean: - rm -f buildinf.h *.s *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - @target=clean; $(RECURSIVE_MAKE) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - rm -f opensslconf.h - @target=dclean; $(RECURSIVE_MAKE) - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -cpt_err.o: ../include/openssl/bio.h ../include/openssl/crypto.h -cpt_err.o: ../include/openssl/e_os2.h ../include/openssl/err.h -cpt_err.o: ../include/openssl/lhash.h ../include/openssl/opensslconf.h -cpt_err.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -cpt_err.o: ../include/openssl/safestack.h ../include/openssl/stack.h -cpt_err.o: ../include/openssl/symhacks.h cpt_err.c -cryptlib.o: ../e_os.h ../include/openssl/bio.h ../include/openssl/buffer.h -cryptlib.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -cryptlib.o: ../include/openssl/err.h ../include/openssl/lhash.h -cryptlib.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -cryptlib.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -cryptlib.o: ../include/openssl/stack.h ../include/openssl/symhacks.h cryptlib.c -cryptlib.o: cryptlib.h -cversion.o: ../e_os.h ../include/openssl/bio.h ../include/openssl/buffer.h -cversion.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -cversion.o: ../include/openssl/err.h ../include/openssl/lhash.h -cversion.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -cversion.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -cversion.o: ../include/openssl/stack.h ../include/openssl/symhacks.h buildinf.h -cversion.o: cryptlib.h cversion.c -ebcdic.o: ../include/openssl/e_os2.h ../include/openssl/opensslconf.h ebcdic.c -ex_data.o: ../e_os.h ../include/openssl/bio.h ../include/openssl/buffer.h -ex_data.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -ex_data.o: ../include/openssl/err.h ../include/openssl/lhash.h -ex_data.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -ex_data.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -ex_data.o: ../include/openssl/stack.h ../include/openssl/symhacks.h cryptlib.h -ex_data.o: ex_data.c -fips_ers.o: ../include/openssl/opensslconf.h fips_ers.c -mem.o: ../e_os.h ../include/openssl/bio.h ../include/openssl/buffer.h -mem.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -mem.o: ../include/openssl/err.h ../include/openssl/lhash.h -mem.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -mem.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -mem.o: ../include/openssl/stack.h ../include/openssl/symhacks.h cryptlib.h -mem.o: mem.c -mem_clr.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -mem_clr.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -mem_clr.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -mem_clr.o: ../include/openssl/stack.h ../include/openssl/symhacks.h mem_clr.c -mem_dbg.o: ../e_os.h ../include/openssl/bio.h ../include/openssl/buffer.h -mem_dbg.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -mem_dbg.o: ../include/openssl/err.h ../include/openssl/lhash.h -mem_dbg.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -mem_dbg.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -mem_dbg.o: ../include/openssl/stack.h ../include/openssl/symhacks.h cryptlib.h -mem_dbg.o: mem_dbg.c -o_dir.o: ../e_os.h ../include/openssl/e_os2.h ../include/openssl/opensslconf.h -o_dir.o: LPdir_unix.c o_dir.c o_dir.h -o_fips.o: ../e_os.h ../include/openssl/bio.h ../include/openssl/buffer.h -o_fips.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -o_fips.o: ../include/openssl/err.h ../include/openssl/lhash.h -o_fips.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -o_fips.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -o_fips.o: ../include/openssl/stack.h ../include/openssl/symhacks.h cryptlib.h -o_fips.o: o_fips.c -o_init.o: ../e_os.h ../include/openssl/bio.h ../include/openssl/crypto.h -o_init.o: ../include/openssl/e_os2.h ../include/openssl/err.h -o_init.o: ../include/openssl/lhash.h ../include/openssl/opensslconf.h -o_init.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -o_init.o: ../include/openssl/safestack.h ../include/openssl/stack.h -o_init.o: ../include/openssl/symhacks.h o_init.c -o_str.o: ../e_os.h ../include/openssl/e_os2.h ../include/openssl/opensslconf.h -o_str.o: o_str.c o_str.h -o_time.o: ../include/openssl/e_os2.h ../include/openssl/opensslconf.h o_time.c -o_time.o: o_time.h -uid.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -uid.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -uid.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -uid.o: ../include/openssl/stack.h ../include/openssl/symhacks.h uid.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/md4/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/md4/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/md4/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/md4/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -# -# OpenSSL/crypto/md4/Makefile -# - -DIR= md4 -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=md4test.c -APPS=md4.c - -LIB=$(TOP)/libcrypto.a -LIBSRC=md4_dgst.c md4_one.c -LIBOBJ=md4_dgst.o md4_one.o - -SRC= $(LIBSRC) - -EXHEADER= md4.h -HEADER= md4_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - rm -f ../../include/openssl/$(EXHEADER) ../../test/$(TEST) ../../apps/$(APPS) - -clean: - rm -f asm/mx86unix.cpp *.o asm/*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -md4_dgst.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -md4_dgst.o: ../../include/openssl/md4.h ../../include/openssl/opensslconf.h -md4_dgst.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -md4_dgst.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -md4_dgst.o: ../../include/openssl/symhacks.h ../md32_common.h md4_dgst.c -md4_dgst.o: md4_locl.h -md4_one.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -md4_one.o: ../../include/openssl/md4.h ../../include/openssl/opensslconf.h -md4_one.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -md4_one.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -md4_one.o: ../../include/openssl/symhacks.h md4_one.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/md5/asm/md5-x86_64.pl nodejs-0.11.15/deps/openssl/openssl/crypto/md5/asm/md5-x86_64.pl --- nodejs-0.11.13/deps/openssl/openssl/crypto/md5/asm/md5-x86_64.pl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/md5/asm/md5-x86_64.pl 2015-01-20 21:22:17.000000000 +0000 @@ -108,6 +108,7 @@ EOF } +no warnings qw(uninitialized); my $flavour = shift; my $output = shift; if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } @@ -119,7 +120,6 @@ ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or die "can't locate x86_64-xlate.pl"; -no warnings qw(uninitialized); open OUT,"| \"$^X\" $xlate $flavour $output"; *STDOUT=*OUT; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/md5/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/md5/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/md5/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/md5/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -# -# OpenSSL/crypto/md5/Makefile -# - -DIR= md5 -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES=-I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -MD5_ASM_OBJ= - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -TEST=md5test.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=md5_dgst.c md5_one.c -LIBOBJ=md5_dgst.o md5_one.o $(MD5_ASM_OBJ) - -SRC= $(LIBSRC) - -EXHEADER= md5.h -HEADER= md5_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -md5-586.s: asm/md5-586.pl ../perlasm/x86asm.pl - $(PERL) asm/md5-586.pl $(PERLASM_SCHEME) $(CFLAGS) > $@ - -md5-x86_64.s: asm/md5-x86_64.pl - $(PERL) asm/md5-x86_64.pl $(PERLASM_SCHEME) > $@ - -md5-ia64.s: asm/md5-ia64.S - $(CC) $(CFLAGS) -E asm/md5-ia64.S | \ - $(PERL) -ne 's/;\s+/;\n/g; print;' > $@ - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -md5_dgst.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -md5_dgst.o: ../../include/openssl/md5.h ../../include/openssl/opensslconf.h -md5_dgst.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -md5_dgst.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -md5_dgst.o: ../../include/openssl/symhacks.h ../md32_common.h md5_dgst.c -md5_dgst.o: md5_locl.h -md5_one.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -md5_one.o: ../../include/openssl/md5.h ../../include/openssl/opensslconf.h -md5_one.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -md5_one.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -md5_one.o: ../../include/openssl/symhacks.h md5_one.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/mdc2/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/mdc2/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/mdc2/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/mdc2/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -# -# OpenSSL/crypto/mdc2/Makefile -# - -DIR= mdc2 -TOP= ../.. -CC= cc -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= mdc2test.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=mdc2dgst.c mdc2_one.c -LIBOBJ=mdc2dgst.o mdc2_one.o - -SRC= $(LIBSRC) - -EXHEADER= mdc2.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -mdc2_one.o: ../../e_os.h ../../include/openssl/bio.h -mdc2_one.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -mdc2_one.o: ../../include/openssl/des.h ../../include/openssl/des_old.h -mdc2_one.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -mdc2_one.o: ../../include/openssl/lhash.h ../../include/openssl/mdc2.h -mdc2_one.o: ../../include/openssl/opensslconf.h -mdc2_one.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -mdc2_one.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -mdc2_one.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -mdc2_one.o: ../../include/openssl/ui_compat.h ../cryptlib.h mdc2_one.c -mdc2dgst.o: ../../include/openssl/crypto.h ../../include/openssl/des.h -mdc2dgst.o: ../../include/openssl/des_old.h ../../include/openssl/e_os2.h -mdc2dgst.o: ../../include/openssl/mdc2.h ../../include/openssl/opensslconf.h -mdc2dgst.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -mdc2dgst.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -mdc2dgst.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -mdc2dgst.o: ../../include/openssl/ui_compat.h mdc2dgst.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/modes/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/modes/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/modes/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/modes/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,144 +0,0 @@ -# -# OpenSSL/crypto/modes/Makefile -# - -DIR= modes -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -MODES_ASM_OBJ= - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= cbc128.c ctr128.c cts128.c cfb128.c ofb128.c gcm128.c \ - ccm128.c xts128.c -LIBOBJ= cbc128.o ctr128.o cts128.o cfb128.o ofb128.o gcm128.o \ - ccm128.o xts128.o $(MODES_ASM_OBJ) - -SRC= $(LIBSRC) - -#EXHEADER= store.h str_compat.h -EXHEADER= modes.h -HEADER= modes_lcl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -ghash-ia64.s: asm/ghash-ia64.pl - $(PERL) asm/ghash-ia64.pl $@ $(CFLAGS) -ghash-x86.s: asm/ghash-x86.pl - $(PERL) asm/ghash-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ -ghash-x86_64.s: asm/ghash-x86_64.pl - $(PERL) asm/ghash-x86_64.pl $(PERLASM_SCHEME) > $@ -ghash-sparcv9.s: asm/ghash-sparcv9.pl - $(PERL) asm/ghash-sparcv9.pl $@ $(CFLAGS) -ghash-alpha.s: asm/ghash-alpha.pl - (preproc=/tmp/$$$$.$@; trap "rm $$preproc" INT; \ - $(PERL) asm/ghash-alpha.pl > $$preproc && \ - $(CC) -E $$preproc > $@ && rm $$preproc) - -ghash-parisc.s: asm/ghash-parisc.pl - $(PERL) asm/ghash-parisc.pl $(PERLASM_SCHEME) $@ - -# GNU make "catch all" -ghash-%.S: asm/ghash-%.pl; $(PERL) $< $(PERLASM_SCHEME) $@ - -ghash-armv4.o: ghash-armv4.S - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -cbc128.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cbc128.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -cbc128.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cbc128.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -cbc128.o: ../../include/openssl/symhacks.h cbc128.c modes_lcl.h -ccm128.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ccm128.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -ccm128.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ccm128.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ccm128.o: ../../include/openssl/symhacks.h ccm128.c modes_lcl.h -cfb128.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cfb128.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -cfb128.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cfb128.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -cfb128.o: ../../include/openssl/symhacks.h cfb128.c modes_lcl.h -ctr128.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ctr128.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -ctr128.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ctr128.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ctr128.o: ../../include/openssl/symhacks.h ctr128.c modes_lcl.h -cts128.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -cts128.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -cts128.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -cts128.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -cts128.o: ../../include/openssl/symhacks.h cts128.c modes_lcl.h -gcm128.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -gcm128.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -gcm128.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -gcm128.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -gcm128.o: ../../include/openssl/symhacks.h gcm128.c modes_lcl.h -ofb128.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ofb128.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -ofb128.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ofb128.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ofb128.o: ../../include/openssl/symhacks.h modes_lcl.h ofb128.c -xts128.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -xts128.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -xts128.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -xts128.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -xts128.o: ../../include/openssl/symhacks.h modes_lcl.h xts128.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/modes/modes.h nodejs-0.11.15/deps/openssl/openssl/crypto/modes/modes.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/modes/modes.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/modes/modes.h 2015-01-20 21:22:17.000000000 +0000 @@ -7,6 +7,9 @@ #include <stddef.h> +#ifdef __cplusplus +extern "C" { +#endif typedef void (*block128_f)(const unsigned char in[16], unsigned char out[16], const void *key); @@ -133,3 +136,6 @@ int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], const unsigned char *inp, unsigned char *out, size_t len, int enc); +#ifdef __cplusplus +} +#endif diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/objects/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/objects/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/objects/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/objects/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ -# -# OpenSSL/crypto/objects/Makefile -# - -DIR= objects -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r -PERL= perl - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile README -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= o_names.c obj_dat.c obj_lib.c obj_err.c obj_xref.c -LIBOBJ= o_names.o obj_dat.o obj_lib.o obj_err.o obj_xref.o - -SRC= $(LIBSRC) - -EXHEADER= objects.h obj_mac.h -HEADER= $(EXHEADER) obj_dat.h obj_xref.h - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: obj_dat.h obj_xref.h lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -obj_dat.h: obj_dat.pl obj_mac.h - $(PERL) obj_dat.pl obj_mac.h obj_dat.h - -# objects.pl both reads and writes obj_mac.num -obj_mac.h: objects.pl objects.txt obj_mac.num - $(PERL) objects.pl objects.txt obj_mac.num obj_mac.h - @sleep 1; touch obj_mac.h; sleep 1 - -obj_xref.h: objxref.pl obj_xref.txt obj_mac.num - $(PERL) objxref.pl obj_mac.num obj_xref.txt > obj_xref.h - @sleep 1; touch obj_xref.h; sleep 1 - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -o_names.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -o_names.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -o_names.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -o_names.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -o_names.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -o_names.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -o_names.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -o_names.o: o_names.c -obj_dat.o: ../../e_os.h ../../include/openssl/asn1.h -obj_dat.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -obj_dat.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -obj_dat.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -obj_dat.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -obj_dat.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -obj_dat.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -obj_dat.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -obj_dat.o: ../../include/openssl/symhacks.h ../cryptlib.h obj_dat.c obj_dat.h -obj_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -obj_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -obj_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -obj_err.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -obj_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -obj_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -obj_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -obj_err.o: obj_err.c -obj_lib.o: ../../e_os.h ../../include/openssl/asn1.h -obj_lib.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -obj_lib.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -obj_lib.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -obj_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -obj_lib.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -obj_lib.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -obj_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -obj_lib.o: ../cryptlib.h obj_lib.c -obj_xref.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -obj_xref.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -obj_xref.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -obj_xref.o: ../../include/openssl/opensslconf.h -obj_xref.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -obj_xref.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -obj_xref.o: ../../include/openssl/symhacks.h obj_xref.c obj_xref.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/objects/obj_dat.c nodejs-0.11.15/deps/openssl/openssl/crypto/objects/obj_dat.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/objects/obj_dat.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/objects/obj_dat.c 2015-01-20 21:22:17.000000000 +0000 @@ -471,11 +471,12 @@ const unsigned char *p; char tbuf[DECIMAL_SIZE(i)+DECIMAL_SIZE(l)+2]; - if ((a == NULL) || (a->data == NULL)) { - buf[0]='\0'; - return(0); - } + /* Ensure that, at every state, |buf| is NUL-terminated. */ + if (buf && buf_len > 0) + buf[0] = '\0'; + if ((a == NULL) || (a->data == NULL)) + return(0); if (!no_name && (nid=OBJ_obj2nid(a)) != NID_undef) { @@ -554,9 +555,10 @@ i=(int)(l/40); l-=(long)(i*40); } - if (buf && (buf_len > 0)) + if (buf && (buf_len > 1)) { *buf++ = i + '0'; + *buf = '\0'; buf_len--; } n++; @@ -571,9 +573,10 @@ i = strlen(bndec); if (buf) { - if (buf_len > 0) + if (buf_len > 1) { *buf++ = '.'; + *buf = '\0'; buf_len--; } BUF_strlcpy(buf,bndec,buf_len); @@ -807,4 +810,3 @@ OPENSSL_free(buf); return(ok); } - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/objects/obj_dat.h nodejs-0.11.15/deps/openssl/openssl/crypto/objects/obj_dat.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/objects/obj_dat.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/objects/obj_dat.h 2015-01-20 21:22:17.000000000 +0000 @@ -67,1908 +67,1901 @@ #define NUM_LN 913 #define NUM_OBJ 857 -static const unsigned char lvalues[5980]={ -0x00, /* [ 0] OBJ_undef */ -0x2A,0x86,0x48,0x86,0xF7,0x0D, /* [ 1] OBJ_rsadsi */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01, /* [ 7] OBJ_pkcs */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x02, /* [ 14] OBJ_md2 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x05, /* [ 22] OBJ_md5 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x03,0x04, /* [ 30] OBJ_rc4 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,/* [ 38] OBJ_rsaEncryption */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x02,/* [ 47] OBJ_md2WithRSAEncryption */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x04,/* [ 56] OBJ_md5WithRSAEncryption */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x01,/* [ 65] OBJ_pbeWithMD2AndDES_CBC */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x03,/* [ 74] OBJ_pbeWithMD5AndDES_CBC */ -0x55, /* [ 83] OBJ_X500 */ -0x55,0x04, /* [ 84] OBJ_X509 */ -0x55,0x04,0x03, /* [ 86] OBJ_commonName */ -0x55,0x04,0x06, /* [ 89] OBJ_countryName */ -0x55,0x04,0x07, /* [ 92] OBJ_localityName */ -0x55,0x04,0x08, /* [ 95] OBJ_stateOrProvinceName */ -0x55,0x04,0x0A, /* [ 98] OBJ_organizationName */ -0x55,0x04,0x0B, /* [101] OBJ_organizationalUnitName */ -0x55,0x08,0x01,0x01, /* [104] OBJ_rsa */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07, /* [108] OBJ_pkcs7 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x01,/* [116] OBJ_pkcs7_data */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x02,/* [125] OBJ_pkcs7_signed */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x03,/* [134] OBJ_pkcs7_enveloped */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x04,/* [143] OBJ_pkcs7_signedAndEnveloped */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x05,/* [152] OBJ_pkcs7_digest */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x06,/* [161] OBJ_pkcs7_encrypted */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x03, /* [170] OBJ_pkcs3 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x03,0x01,/* [178] OBJ_dhKeyAgreement */ -0x2B,0x0E,0x03,0x02,0x06, /* [187] OBJ_des_ecb */ -0x2B,0x0E,0x03,0x02,0x09, /* [192] OBJ_des_cfb64 */ -0x2B,0x0E,0x03,0x02,0x07, /* [197] OBJ_des_cbc */ -0x2B,0x0E,0x03,0x02,0x11, /* [202] OBJ_des_ede_ecb */ -0x2B,0x06,0x01,0x04,0x01,0x81,0x3C,0x07,0x01,0x01,0x02,/* [207] OBJ_idea_cbc */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x03,0x02, /* [218] OBJ_rc2_cbc */ -0x2B,0x0E,0x03,0x02,0x12, /* [226] OBJ_sha */ -0x2B,0x0E,0x03,0x02,0x0F, /* [231] OBJ_shaWithRSAEncryption */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x03,0x07, /* [236] OBJ_des_ede3_cbc */ -0x2B,0x0E,0x03,0x02,0x08, /* [244] OBJ_des_ofb64 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09, /* [249] OBJ_pkcs9 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x01,/* [257] OBJ_pkcs9_emailAddress */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x02,/* [266] OBJ_pkcs9_unstructuredName */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x03,/* [275] OBJ_pkcs9_contentType */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x04,/* [284] OBJ_pkcs9_messageDigest */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x05,/* [293] OBJ_pkcs9_signingTime */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x06,/* [302] OBJ_pkcs9_countersignature */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x07,/* [311] OBJ_pkcs9_challengePassword */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x08,/* [320] OBJ_pkcs9_unstructuredAddress */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x09,/* [329] OBJ_pkcs9_extCertAttributes */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42, /* [338] OBJ_netscape */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01, /* [345] OBJ_netscape_cert_extension */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x02, /* [353] OBJ_netscape_data_type */ -0x2B,0x0E,0x03,0x02,0x1A, /* [361] OBJ_sha1 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,/* [366] OBJ_sha1WithRSAEncryption */ -0x2B,0x0E,0x03,0x02,0x0D, /* [375] OBJ_dsaWithSHA */ -0x2B,0x0E,0x03,0x02,0x0C, /* [380] OBJ_dsa_2 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x0B,/* [385] OBJ_pbeWithSHA1AndRC2_CBC */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x0C,/* [394] OBJ_id_pbkdf2 */ -0x2B,0x0E,0x03,0x02,0x1B, /* [403] OBJ_dsaWithSHA1_2 */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x01,/* [408] OBJ_netscape_cert_type */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x02,/* [417] OBJ_netscape_base_url */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x03,/* [426] OBJ_netscape_revocation_url */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x04,/* [435] OBJ_netscape_ca_revocation_url */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x07,/* [444] OBJ_netscape_renewal_url */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x08,/* [453] OBJ_netscape_ca_policy_url */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x0C,/* [462] OBJ_netscape_ssl_server_name */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x0D,/* [471] OBJ_netscape_comment */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x02,0x05,/* [480] OBJ_netscape_cert_sequence */ -0x55,0x1D, /* [489] OBJ_id_ce */ -0x55,0x1D,0x0E, /* [491] OBJ_subject_key_identifier */ -0x55,0x1D,0x0F, /* [494] OBJ_key_usage */ -0x55,0x1D,0x10, /* [497] OBJ_private_key_usage_period */ -0x55,0x1D,0x11, /* [500] OBJ_subject_alt_name */ -0x55,0x1D,0x12, /* [503] OBJ_issuer_alt_name */ -0x55,0x1D,0x13, /* [506] OBJ_basic_constraints */ -0x55,0x1D,0x14, /* [509] OBJ_crl_number */ -0x55,0x1D,0x20, /* [512] OBJ_certificate_policies */ -0x55,0x1D,0x23, /* [515] OBJ_authority_key_identifier */ -0x2B,0x06,0x01,0x04,0x01,0x97,0x55,0x01,0x02,/* [518] OBJ_bf_cbc */ -0x55,0x08,0x03,0x65, /* [527] OBJ_mdc2 */ -0x55,0x08,0x03,0x64, /* [531] OBJ_mdc2WithRSA */ -0x55,0x04,0x2A, /* [535] OBJ_givenName */ -0x55,0x04,0x04, /* [538] OBJ_surname */ -0x55,0x04,0x2B, /* [541] OBJ_initials */ -0x55,0x1D,0x1F, /* [544] OBJ_crl_distribution_points */ -0x2B,0x0E,0x03,0x02,0x03, /* [547] OBJ_md5WithRSA */ -0x55,0x04,0x05, /* [552] OBJ_serialNumber */ -0x55,0x04,0x0C, /* [555] OBJ_title */ -0x55,0x04,0x0D, /* [558] OBJ_description */ -0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,0x42,0x0A,/* [561] OBJ_cast5_cbc */ -0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,0x42,0x0C,/* [570] OBJ_pbeWithMD5AndCast5_CBC */ -0x2A,0x86,0x48,0xCE,0x38,0x04,0x03, /* [579] OBJ_dsaWithSHA1 */ -0x2B,0x0E,0x03,0x02,0x1D, /* [586] OBJ_sha1WithRSA */ -0x2A,0x86,0x48,0xCE,0x38,0x04,0x01, /* [591] OBJ_dsa */ -0x2B,0x24,0x03,0x02,0x01, /* [598] OBJ_ripemd160 */ -0x2B,0x24,0x03,0x03,0x01,0x02, /* [603] OBJ_ripemd160WithRSA */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x03,0x08, /* [609] OBJ_rc5_cbc */ -0x29,0x01,0x01,0x85,0x1A,0x01, /* [617] OBJ_rle_compression */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x08,/* [623] OBJ_zlib_compression */ -0x55,0x1D,0x25, /* [634] OBJ_ext_key_usage */ -0x2B,0x06,0x01,0x05,0x05,0x07, /* [637] OBJ_id_pkix */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03, /* [643] OBJ_id_kp */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x01, /* [650] OBJ_server_auth */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x02, /* [658] OBJ_client_auth */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x03, /* [666] OBJ_code_sign */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x04, /* [674] OBJ_email_protect */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x08, /* [682] OBJ_time_stamp */ -0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x02,0x01,0x15,/* [690] OBJ_ms_code_ind */ -0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x02,0x01,0x16,/* [700] OBJ_ms_code_com */ -0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x0A,0x03,0x01,/* [710] OBJ_ms_ctl_sign */ -0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x0A,0x03,0x03,/* [720] OBJ_ms_sgc */ -0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x0A,0x03,0x04,/* [730] OBJ_ms_efs */ -0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x04,0x01,/* [740] OBJ_ns_sgc */ -0x55,0x1D,0x1B, /* [749] OBJ_delta_crl */ -0x55,0x1D,0x15, /* [752] OBJ_crl_reason */ -0x55,0x1D,0x18, /* [755] OBJ_invalidity_date */ -0x2B,0x65,0x01,0x04,0x01, /* [758] OBJ_sxnet */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x01,/* [763] OBJ_pbe_WithSHA1And128BitRC4 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x02,/* [773] OBJ_pbe_WithSHA1And40BitRC4 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x03,/* [783] OBJ_pbe_WithSHA1And3_Key_TripleDES_CBC */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x04,/* [793] OBJ_pbe_WithSHA1And2_Key_TripleDES_CBC */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x05,/* [803] OBJ_pbe_WithSHA1And128BitRC2_CBC */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x06,/* [813] OBJ_pbe_WithSHA1And40BitRC2_CBC */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x01,/* [823] OBJ_keyBag */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x02,/* [834] OBJ_pkcs8ShroudedKeyBag */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x03,/* [845] OBJ_certBag */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x04,/* [856] OBJ_crlBag */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x05,/* [867] OBJ_secretBag */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x06,/* [878] OBJ_safeContentsBag */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x14,/* [889] OBJ_friendlyName */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x15,/* [898] OBJ_localKeyID */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x16,0x01,/* [907] OBJ_x509Certificate */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x16,0x02,/* [917] OBJ_sdsiCertificate */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x17,0x01,/* [927] OBJ_x509Crl */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x0D,/* [937] OBJ_pbes2 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x0E,/* [946] OBJ_pbmac1 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x07, /* [955] OBJ_hmacWithSHA1 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x02,0x01, /* [963] OBJ_id_qt_cps */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x02,0x02, /* [971] OBJ_id_qt_unotice */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x0F,/* [979] OBJ_SMIMECapabilities */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x04,/* [988] OBJ_pbeWithMD2AndRC2_CBC */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x06,/* [997] OBJ_pbeWithMD5AndRC2_CBC */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x0A,/* [1006] OBJ_pbeWithSHA1AndDES_CBC */ -0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x02,0x01,0x0E,/* [1015] OBJ_ms_ext_req */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x0E,/* [1025] OBJ_ext_req */ -0x55,0x04,0x29, /* [1034] OBJ_name */ -0x55,0x04,0x2E, /* [1037] OBJ_dnQualifier */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01, /* [1040] OBJ_id_pe */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30, /* [1047] OBJ_id_ad */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x01, /* [1054] OBJ_info_access */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01, /* [1062] OBJ_ad_OCSP */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x02, /* [1070] OBJ_ad_ca_issuers */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x09, /* [1078] OBJ_OCSP_sign */ -0x28, /* [1086] OBJ_iso */ -0x2A, /* [1087] OBJ_member_body */ -0x2A,0x86,0x48, /* [1088] OBJ_ISO_US */ -0x2A,0x86,0x48,0xCE,0x38, /* [1091] OBJ_X9_57 */ -0x2A,0x86,0x48,0xCE,0x38,0x04, /* [1096] OBJ_X9cm */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01, /* [1102] OBJ_pkcs1 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05, /* [1110] OBJ_pkcs5 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,/* [1118] OBJ_SMIME */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,/* [1127] OBJ_id_smime_mod */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,/* [1137] OBJ_id_smime_ct */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,/* [1147] OBJ_id_smime_aa */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,/* [1157] OBJ_id_smime_alg */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x04,/* [1167] OBJ_id_smime_cd */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x05,/* [1177] OBJ_id_smime_spq */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,/* [1187] OBJ_id_smime_cti */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x01,/* [1197] OBJ_id_smime_mod_cms */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x02,/* [1208] OBJ_id_smime_mod_ess */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x03,/* [1219] OBJ_id_smime_mod_oid */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x04,/* [1230] OBJ_id_smime_mod_msg_v3 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x05,/* [1241] OBJ_id_smime_mod_ets_eSignature_88 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x06,/* [1252] OBJ_id_smime_mod_ets_eSignature_97 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x07,/* [1263] OBJ_id_smime_mod_ets_eSigPolicy_88 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x08,/* [1274] OBJ_id_smime_mod_ets_eSigPolicy_97 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x01,/* [1285] OBJ_id_smime_ct_receipt */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x02,/* [1296] OBJ_id_smime_ct_authData */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x03,/* [1307] OBJ_id_smime_ct_publishCert */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x04,/* [1318] OBJ_id_smime_ct_TSTInfo */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x05,/* [1329] OBJ_id_smime_ct_TDTInfo */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x06,/* [1340] OBJ_id_smime_ct_contentInfo */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x07,/* [1351] OBJ_id_smime_ct_DVCSRequestData */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x08,/* [1362] OBJ_id_smime_ct_DVCSResponseData */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x01,/* [1373] OBJ_id_smime_aa_receiptRequest */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x02,/* [1384] OBJ_id_smime_aa_securityLabel */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x03,/* [1395] OBJ_id_smime_aa_mlExpandHistory */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x04,/* [1406] OBJ_id_smime_aa_contentHint */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x05,/* [1417] OBJ_id_smime_aa_msgSigDigest */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x06,/* [1428] OBJ_id_smime_aa_encapContentType */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x07,/* [1439] OBJ_id_smime_aa_contentIdentifier */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x08,/* [1450] OBJ_id_smime_aa_macValue */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x09,/* [1461] OBJ_id_smime_aa_equivalentLabels */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0A,/* [1472] OBJ_id_smime_aa_contentReference */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0B,/* [1483] OBJ_id_smime_aa_encrypKeyPref */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0C,/* [1494] OBJ_id_smime_aa_signingCertificate */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0D,/* [1505] OBJ_id_smime_aa_smimeEncryptCerts */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0E,/* [1516] OBJ_id_smime_aa_timeStampToken */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0F,/* [1527] OBJ_id_smime_aa_ets_sigPolicyId */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x10,/* [1538] OBJ_id_smime_aa_ets_commitmentType */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x11,/* [1549] OBJ_id_smime_aa_ets_signerLocation */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x12,/* [1560] OBJ_id_smime_aa_ets_signerAttr */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x13,/* [1571] OBJ_id_smime_aa_ets_otherSigCert */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x14,/* [1582] OBJ_id_smime_aa_ets_contentTimestamp */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x15,/* [1593] OBJ_id_smime_aa_ets_CertificateRefs */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x16,/* [1604] OBJ_id_smime_aa_ets_RevocationRefs */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x17,/* [1615] OBJ_id_smime_aa_ets_certValues */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x18,/* [1626] OBJ_id_smime_aa_ets_revocationValues */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x19,/* [1637] OBJ_id_smime_aa_ets_escTimeStamp */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x1A,/* [1648] OBJ_id_smime_aa_ets_certCRLTimestamp */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x1B,/* [1659] OBJ_id_smime_aa_ets_archiveTimeStamp */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x1C,/* [1670] OBJ_id_smime_aa_signatureType */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x1D,/* [1681] OBJ_id_smime_aa_dvcs_dvc */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x01,/* [1692] OBJ_id_smime_alg_ESDHwith3DES */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x02,/* [1703] OBJ_id_smime_alg_ESDHwithRC2 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x03,/* [1714] OBJ_id_smime_alg_3DESwrap */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x04,/* [1725] OBJ_id_smime_alg_RC2wrap */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x05,/* [1736] OBJ_id_smime_alg_ESDH */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x06,/* [1747] OBJ_id_smime_alg_CMS3DESwrap */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x07,/* [1758] OBJ_id_smime_alg_CMSRC2wrap */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x04,0x01,/* [1769] OBJ_id_smime_cd_ldap */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x05,0x01,/* [1780] OBJ_id_smime_spq_ets_sqt_uri */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x05,0x02,/* [1791] OBJ_id_smime_spq_ets_sqt_unotice */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x01,/* [1802] OBJ_id_smime_cti_ets_proofOfOrigin */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x02,/* [1813] OBJ_id_smime_cti_ets_proofOfReceipt */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x03,/* [1824] OBJ_id_smime_cti_ets_proofOfDelivery */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x04,/* [1835] OBJ_id_smime_cti_ets_proofOfSender */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x05,/* [1846] OBJ_id_smime_cti_ets_proofOfApproval */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x06,/* [1857] OBJ_id_smime_cti_ets_proofOfCreation */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x04, /* [1868] OBJ_md4 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00, /* [1876] OBJ_id_pkix_mod */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x02, /* [1883] OBJ_id_qt */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04, /* [1890] OBJ_id_it */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05, /* [1897] OBJ_id_pkip */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x06, /* [1904] OBJ_id_alg */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07, /* [1911] OBJ_id_cmc */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x08, /* [1918] OBJ_id_on */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x09, /* [1925] OBJ_id_pda */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0A, /* [1932] OBJ_id_aca */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0B, /* [1939] OBJ_id_qcs */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0C, /* [1946] OBJ_id_cct */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x01, /* [1953] OBJ_id_pkix1_explicit_88 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x02, /* [1961] OBJ_id_pkix1_implicit_88 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x03, /* [1969] OBJ_id_pkix1_explicit_93 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x04, /* [1977] OBJ_id_pkix1_implicit_93 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x05, /* [1985] OBJ_id_mod_crmf */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x06, /* [1993] OBJ_id_mod_cmc */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x07, /* [2001] OBJ_id_mod_kea_profile_88 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x08, /* [2009] OBJ_id_mod_kea_profile_93 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x09, /* [2017] OBJ_id_mod_cmp */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0A, /* [2025] OBJ_id_mod_qualified_cert_88 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0B, /* [2033] OBJ_id_mod_qualified_cert_93 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0C, /* [2041] OBJ_id_mod_attribute_cert */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0D, /* [2049] OBJ_id_mod_timestamp_protocol */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0E, /* [2057] OBJ_id_mod_ocsp */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0F, /* [2065] OBJ_id_mod_dvcs */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x10, /* [2073] OBJ_id_mod_cmp2000 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x02, /* [2081] OBJ_biometricInfo */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x03, /* [2089] OBJ_qcStatements */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x04, /* [2097] OBJ_ac_auditEntity */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x05, /* [2105] OBJ_ac_targeting */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x06, /* [2113] OBJ_aaControls */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x07, /* [2121] OBJ_sbgp_ipAddrBlock */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x08, /* [2129] OBJ_sbgp_autonomousSysNum */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x09, /* [2137] OBJ_sbgp_routerIdentifier */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x02,0x03, /* [2145] OBJ_textNotice */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x05, /* [2153] OBJ_ipsecEndSystem */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x06, /* [2161] OBJ_ipsecTunnel */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x07, /* [2169] OBJ_ipsecUser */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x0A, /* [2177] OBJ_dvcs */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x01, /* [2185] OBJ_id_it_caProtEncCert */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x02, /* [2193] OBJ_id_it_signKeyPairTypes */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x03, /* [2201] OBJ_id_it_encKeyPairTypes */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x04, /* [2209] OBJ_id_it_preferredSymmAlg */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x05, /* [2217] OBJ_id_it_caKeyUpdateInfo */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x06, /* [2225] OBJ_id_it_currentCRL */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x07, /* [2233] OBJ_id_it_unsupportedOIDs */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x08, /* [2241] OBJ_id_it_subscriptionRequest */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x09, /* [2249] OBJ_id_it_subscriptionResponse */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0A, /* [2257] OBJ_id_it_keyPairParamReq */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0B, /* [2265] OBJ_id_it_keyPairParamRep */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0C, /* [2273] OBJ_id_it_revPassphrase */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0D, /* [2281] OBJ_id_it_implicitConfirm */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0E, /* [2289] OBJ_id_it_confirmWaitTime */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0F, /* [2297] OBJ_id_it_origPKIMessage */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01, /* [2305] OBJ_id_regCtrl */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x02, /* [2313] OBJ_id_regInfo */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x01,/* [2321] OBJ_id_regCtrl_regToken */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x02,/* [2330] OBJ_id_regCtrl_authenticator */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x03,/* [2339] OBJ_id_regCtrl_pkiPublicationInfo */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x04,/* [2348] OBJ_id_regCtrl_pkiArchiveOptions */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x05,/* [2357] OBJ_id_regCtrl_oldCertID */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x06,/* [2366] OBJ_id_regCtrl_protocolEncrKey */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x02,0x01,/* [2375] OBJ_id_regInfo_utf8Pairs */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x02,0x02,/* [2384] OBJ_id_regInfo_certReq */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x06,0x01, /* [2393] OBJ_id_alg_des40 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x06,0x02, /* [2401] OBJ_id_alg_noSignature */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x06,0x03, /* [2409] OBJ_id_alg_dh_sig_hmac_sha1 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x06,0x04, /* [2417] OBJ_id_alg_dh_pop */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x01, /* [2425] OBJ_id_cmc_statusInfo */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x02, /* [2433] OBJ_id_cmc_identification */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x03, /* [2441] OBJ_id_cmc_identityProof */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x04, /* [2449] OBJ_id_cmc_dataReturn */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x05, /* [2457] OBJ_id_cmc_transactionId */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x06, /* [2465] OBJ_id_cmc_senderNonce */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x07, /* [2473] OBJ_id_cmc_recipientNonce */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x08, /* [2481] OBJ_id_cmc_addExtensions */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x09, /* [2489] OBJ_id_cmc_encryptedPOP */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x0A, /* [2497] OBJ_id_cmc_decryptedPOP */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x0B, /* [2505] OBJ_id_cmc_lraPOPWitness */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x0F, /* [2513] OBJ_id_cmc_getCert */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x10, /* [2521] OBJ_id_cmc_getCRL */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x11, /* [2529] OBJ_id_cmc_revokeRequest */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x12, /* [2537] OBJ_id_cmc_regInfo */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x13, /* [2545] OBJ_id_cmc_responseInfo */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x15, /* [2553] OBJ_id_cmc_queryPending */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x16, /* [2561] OBJ_id_cmc_popLinkRandom */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x17, /* [2569] OBJ_id_cmc_popLinkWitness */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x18, /* [2577] OBJ_id_cmc_confirmCertAcceptance */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x08,0x01, /* [2585] OBJ_id_on_personalData */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x09,0x01, /* [2593] OBJ_id_pda_dateOfBirth */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x09,0x02, /* [2601] OBJ_id_pda_placeOfBirth */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x09,0x03, /* [2609] OBJ_id_pda_gender */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x09,0x04, /* [2617] OBJ_id_pda_countryOfCitizenship */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x09,0x05, /* [2625] OBJ_id_pda_countryOfResidence */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x01, /* [2633] OBJ_id_aca_authenticationInfo */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x02, /* [2641] OBJ_id_aca_accessIdentity */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x03, /* [2649] OBJ_id_aca_chargingIdentity */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x04, /* [2657] OBJ_id_aca_group */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x05, /* [2665] OBJ_id_aca_role */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0B,0x01, /* [2673] OBJ_id_qcs_pkixQCSyntax_v1 */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0C,0x01, /* [2681] OBJ_id_cct_crs */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0C,0x02, /* [2689] OBJ_id_cct_PKIData */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0C,0x03, /* [2697] OBJ_id_cct_PKIResponse */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x03, /* [2705] OBJ_ad_timeStamping */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x04, /* [2713] OBJ_ad_dvcs */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x01,/* [2721] OBJ_id_pkix_OCSP_basic */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x02,/* [2730] OBJ_id_pkix_OCSP_Nonce */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x03,/* [2739] OBJ_id_pkix_OCSP_CrlID */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x04,/* [2748] OBJ_id_pkix_OCSP_acceptableResponses */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x05,/* [2757] OBJ_id_pkix_OCSP_noCheck */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x06,/* [2766] OBJ_id_pkix_OCSP_archiveCutoff */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x07,/* [2775] OBJ_id_pkix_OCSP_serviceLocator */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x08,/* [2784] OBJ_id_pkix_OCSP_extendedStatus */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x09,/* [2793] OBJ_id_pkix_OCSP_valid */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x0A,/* [2802] OBJ_id_pkix_OCSP_path */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x0B,/* [2811] OBJ_id_pkix_OCSP_trustRoot */ -0x2B,0x0E,0x03,0x02, /* [2820] OBJ_algorithm */ -0x2B,0x0E,0x03,0x02,0x0B, /* [2824] OBJ_rsaSignature */ -0x55,0x08, /* [2829] OBJ_X500algorithms */ -0x2B, /* [2831] OBJ_org */ -0x2B,0x06, /* [2832] OBJ_dod */ -0x2B,0x06,0x01, /* [2834] OBJ_iana */ -0x2B,0x06,0x01,0x01, /* [2837] OBJ_Directory */ -0x2B,0x06,0x01,0x02, /* [2841] OBJ_Management */ -0x2B,0x06,0x01,0x03, /* [2845] OBJ_Experimental */ -0x2B,0x06,0x01,0x04, /* [2849] OBJ_Private */ -0x2B,0x06,0x01,0x05, /* [2853] OBJ_Security */ -0x2B,0x06,0x01,0x06, /* [2857] OBJ_SNMPv2 */ -0x2B,0x06,0x01,0x07, /* [2861] OBJ_Mail */ -0x2B,0x06,0x01,0x04,0x01, /* [2865] OBJ_Enterprises */ -0x2B,0x06,0x01,0x04,0x01,0x8B,0x3A,0x82,0x58,/* [2870] OBJ_dcObject */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x19,/* [2879] OBJ_domainComponent */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x0D,/* [2889] OBJ_Domain */ -0x00, /* [2899] OBJ_joint_iso_ccitt */ -0x55,0x01,0x05, /* [2900] OBJ_selected_attribute_types */ -0x55,0x01,0x05,0x37, /* [2903] OBJ_clearance */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x03,/* [2907] OBJ_md4WithRSAEncryption */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x0A, /* [2916] OBJ_ac_proxying */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x0B, /* [2924] OBJ_sinfo_access */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x06, /* [2932] OBJ_id_aca_encAttrs */ -0x55,0x04,0x48, /* [2940] OBJ_role */ -0x55,0x1D,0x24, /* [2943] OBJ_policy_constraints */ -0x55,0x1D,0x37, /* [2946] OBJ_target_information */ -0x55,0x1D,0x38, /* [2949] OBJ_no_rev_avail */ -0x00, /* [2952] OBJ_ccitt */ -0x2A,0x86,0x48,0xCE,0x3D, /* [2953] OBJ_ansi_X9_62 */ -0x2A,0x86,0x48,0xCE,0x3D,0x01,0x01, /* [2958] OBJ_X9_62_prime_field */ -0x2A,0x86,0x48,0xCE,0x3D,0x01,0x02, /* [2965] OBJ_X9_62_characteristic_two_field */ -0x2A,0x86,0x48,0xCE,0x3D,0x02,0x01, /* [2972] OBJ_X9_62_id_ecPublicKey */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x01, /* [2979] OBJ_X9_62_prime192v1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x02, /* [2987] OBJ_X9_62_prime192v2 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x03, /* [2995] OBJ_X9_62_prime192v3 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x04, /* [3003] OBJ_X9_62_prime239v1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x05, /* [3011] OBJ_X9_62_prime239v2 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x06, /* [3019] OBJ_X9_62_prime239v3 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x07, /* [3027] OBJ_X9_62_prime256v1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x04,0x01, /* [3035] OBJ_ecdsa_with_SHA1 */ -0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x11,0x01,/* [3042] OBJ_ms_csp_name */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x01,/* [3051] OBJ_aes_128_ecb */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x02,/* [3060] OBJ_aes_128_cbc */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x03,/* [3069] OBJ_aes_128_ofb128 */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x04,/* [3078] OBJ_aes_128_cfb128 */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x15,/* [3087] OBJ_aes_192_ecb */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x16,/* [3096] OBJ_aes_192_cbc */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x17,/* [3105] OBJ_aes_192_ofb128 */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x18,/* [3114] OBJ_aes_192_cfb128 */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x29,/* [3123] OBJ_aes_256_ecb */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2A,/* [3132] OBJ_aes_256_cbc */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2B,/* [3141] OBJ_aes_256_ofb128 */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2C,/* [3150] OBJ_aes_256_cfb128 */ -0x55,0x1D,0x17, /* [3159] OBJ_hold_instruction_code */ -0x2A,0x86,0x48,0xCE,0x38,0x02,0x01, /* [3162] OBJ_hold_instruction_none */ -0x2A,0x86,0x48,0xCE,0x38,0x02,0x02, /* [3169] OBJ_hold_instruction_call_issuer */ -0x2A,0x86,0x48,0xCE,0x38,0x02,0x03, /* [3176] OBJ_hold_instruction_reject */ -0x09, /* [3183] OBJ_data */ -0x09,0x92,0x26, /* [3184] OBJ_pss */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C, /* [3187] OBJ_ucl */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64, /* [3194] OBJ_pilot */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,/* [3202] OBJ_pilotAttributeType */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x03,/* [3211] OBJ_pilotAttributeSyntax */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,/* [3220] OBJ_pilotObjectClass */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x0A,/* [3229] OBJ_pilotGroups */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x03,0x04,/* [3238] OBJ_iA5StringSyntax */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x03,0x05,/* [3248] OBJ_caseIgnoreIA5StringSyntax */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x03,/* [3258] OBJ_pilotObject */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x04,/* [3268] OBJ_pilotPerson */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x05,/* [3278] OBJ_account */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x06,/* [3288] OBJ_document */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x07,/* [3298] OBJ_room */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x09,/* [3308] OBJ_documentSeries */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x0E,/* [3318] OBJ_rFC822localPart */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x0F,/* [3328] OBJ_dNSDomain */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x11,/* [3338] OBJ_domainRelatedObject */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x12,/* [3348] OBJ_friendlyCountry */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x13,/* [3358] OBJ_simpleSecurityObject */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x14,/* [3368] OBJ_pilotOrganization */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x15,/* [3378] OBJ_pilotDSA */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x16,/* [3388] OBJ_qualityLabelledData */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x01,/* [3398] OBJ_userId */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x02,/* [3408] OBJ_textEncodedORAddress */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x03,/* [3418] OBJ_rfc822Mailbox */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x04,/* [3428] OBJ_info */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x05,/* [3438] OBJ_favouriteDrink */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x06,/* [3448] OBJ_roomNumber */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x07,/* [3458] OBJ_photo */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x08,/* [3468] OBJ_userClass */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x09,/* [3478] OBJ_host */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0A,/* [3488] OBJ_manager */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0B,/* [3498] OBJ_documentIdentifier */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0C,/* [3508] OBJ_documentTitle */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0D,/* [3518] OBJ_documentVersion */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0E,/* [3528] OBJ_documentAuthor */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0F,/* [3538] OBJ_documentLocation */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x14,/* [3548] OBJ_homeTelephoneNumber */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x15,/* [3558] OBJ_secretary */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x16,/* [3568] OBJ_otherMailbox */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x17,/* [3578] OBJ_lastModifiedTime */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x18,/* [3588] OBJ_lastModifiedBy */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1A,/* [3598] OBJ_aRecord */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1B,/* [3608] OBJ_pilotAttributeType27 */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1C,/* [3618] OBJ_mXRecord */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1D,/* [3628] OBJ_nSRecord */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1E,/* [3638] OBJ_sOARecord */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1F,/* [3648] OBJ_cNAMERecord */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x25,/* [3658] OBJ_associatedDomain */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x26,/* [3668] OBJ_associatedName */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x27,/* [3678] OBJ_homePostalAddress */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x28,/* [3688] OBJ_personalTitle */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x29,/* [3698] OBJ_mobileTelephoneNumber */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x2A,/* [3708] OBJ_pagerTelephoneNumber */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x2B,/* [3718] OBJ_friendlyCountryName */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x2D,/* [3728] OBJ_organizationalStatus */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x2E,/* [3738] OBJ_janetMailbox */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x2F,/* [3748] OBJ_mailPreferenceOption */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x30,/* [3758] OBJ_buildingName */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x31,/* [3768] OBJ_dSAQuality */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x32,/* [3778] OBJ_singleLevelQuality */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x33,/* [3788] OBJ_subtreeMinimumQuality */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x34,/* [3798] OBJ_subtreeMaximumQuality */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x35,/* [3808] OBJ_personalSignature */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x36,/* [3818] OBJ_dITRedirect */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x37,/* [3828] OBJ_audio */ -0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x38,/* [3838] OBJ_documentPublisher */ -0x55,0x04,0x2D, /* [3848] OBJ_x500UniqueIdentifier */ -0x2B,0x06,0x01,0x07,0x01, /* [3851] OBJ_mime_mhs */ -0x2B,0x06,0x01,0x07,0x01,0x01, /* [3856] OBJ_mime_mhs_headings */ -0x2B,0x06,0x01,0x07,0x01,0x02, /* [3862] OBJ_mime_mhs_bodies */ -0x2B,0x06,0x01,0x07,0x01,0x01,0x01, /* [3868] OBJ_id_hex_partial_message */ -0x2B,0x06,0x01,0x07,0x01,0x01,0x02, /* [3875] OBJ_id_hex_multipart_message */ -0x55,0x04,0x2C, /* [3882] OBJ_generationQualifier */ -0x55,0x04,0x41, /* [3885] OBJ_pseudonym */ -0x67,0x2A, /* [3888] OBJ_id_set */ -0x67,0x2A,0x00, /* [3890] OBJ_set_ctype */ -0x67,0x2A,0x01, /* [3893] OBJ_set_msgExt */ -0x67,0x2A,0x03, /* [3896] OBJ_set_attr */ -0x67,0x2A,0x05, /* [3899] OBJ_set_policy */ -0x67,0x2A,0x07, /* [3902] OBJ_set_certExt */ -0x67,0x2A,0x08, /* [3905] OBJ_set_brand */ -0x67,0x2A,0x00,0x00, /* [3908] OBJ_setct_PANData */ -0x67,0x2A,0x00,0x01, /* [3912] OBJ_setct_PANToken */ -0x67,0x2A,0x00,0x02, /* [3916] OBJ_setct_PANOnly */ -0x67,0x2A,0x00,0x03, /* [3920] OBJ_setct_OIData */ -0x67,0x2A,0x00,0x04, /* [3924] OBJ_setct_PI */ -0x67,0x2A,0x00,0x05, /* [3928] OBJ_setct_PIData */ -0x67,0x2A,0x00,0x06, /* [3932] OBJ_setct_PIDataUnsigned */ -0x67,0x2A,0x00,0x07, /* [3936] OBJ_setct_HODInput */ -0x67,0x2A,0x00,0x08, /* [3940] OBJ_setct_AuthResBaggage */ -0x67,0x2A,0x00,0x09, /* [3944] OBJ_setct_AuthRevReqBaggage */ -0x67,0x2A,0x00,0x0A, /* [3948] OBJ_setct_AuthRevResBaggage */ -0x67,0x2A,0x00,0x0B, /* [3952] OBJ_setct_CapTokenSeq */ -0x67,0x2A,0x00,0x0C, /* [3956] OBJ_setct_PInitResData */ -0x67,0x2A,0x00,0x0D, /* [3960] OBJ_setct_PI_TBS */ -0x67,0x2A,0x00,0x0E, /* [3964] OBJ_setct_PResData */ -0x67,0x2A,0x00,0x10, /* [3968] OBJ_setct_AuthReqTBS */ -0x67,0x2A,0x00,0x11, /* [3972] OBJ_setct_AuthResTBS */ -0x67,0x2A,0x00,0x12, /* [3976] OBJ_setct_AuthResTBSX */ -0x67,0x2A,0x00,0x13, /* [3980] OBJ_setct_AuthTokenTBS */ -0x67,0x2A,0x00,0x14, /* [3984] OBJ_setct_CapTokenData */ -0x67,0x2A,0x00,0x15, /* [3988] OBJ_setct_CapTokenTBS */ -0x67,0x2A,0x00,0x16, /* [3992] OBJ_setct_AcqCardCodeMsg */ -0x67,0x2A,0x00,0x17, /* [3996] OBJ_setct_AuthRevReqTBS */ -0x67,0x2A,0x00,0x18, /* [4000] OBJ_setct_AuthRevResData */ -0x67,0x2A,0x00,0x19, /* [4004] OBJ_setct_AuthRevResTBS */ -0x67,0x2A,0x00,0x1A, /* [4008] OBJ_setct_CapReqTBS */ -0x67,0x2A,0x00,0x1B, /* [4012] OBJ_setct_CapReqTBSX */ -0x67,0x2A,0x00,0x1C, /* [4016] OBJ_setct_CapResData */ -0x67,0x2A,0x00,0x1D, /* [4020] OBJ_setct_CapRevReqTBS */ -0x67,0x2A,0x00,0x1E, /* [4024] OBJ_setct_CapRevReqTBSX */ -0x67,0x2A,0x00,0x1F, /* [4028] OBJ_setct_CapRevResData */ -0x67,0x2A,0x00,0x20, /* [4032] OBJ_setct_CredReqTBS */ -0x67,0x2A,0x00,0x21, /* [4036] OBJ_setct_CredReqTBSX */ -0x67,0x2A,0x00,0x22, /* [4040] OBJ_setct_CredResData */ -0x67,0x2A,0x00,0x23, /* [4044] OBJ_setct_CredRevReqTBS */ -0x67,0x2A,0x00,0x24, /* [4048] OBJ_setct_CredRevReqTBSX */ -0x67,0x2A,0x00,0x25, /* [4052] OBJ_setct_CredRevResData */ -0x67,0x2A,0x00,0x26, /* [4056] OBJ_setct_PCertReqData */ -0x67,0x2A,0x00,0x27, /* [4060] OBJ_setct_PCertResTBS */ -0x67,0x2A,0x00,0x28, /* [4064] OBJ_setct_BatchAdminReqData */ -0x67,0x2A,0x00,0x29, /* [4068] OBJ_setct_BatchAdminResData */ -0x67,0x2A,0x00,0x2A, /* [4072] OBJ_setct_CardCInitResTBS */ -0x67,0x2A,0x00,0x2B, /* [4076] OBJ_setct_MeAqCInitResTBS */ -0x67,0x2A,0x00,0x2C, /* [4080] OBJ_setct_RegFormResTBS */ -0x67,0x2A,0x00,0x2D, /* [4084] OBJ_setct_CertReqData */ -0x67,0x2A,0x00,0x2E, /* [4088] OBJ_setct_CertReqTBS */ -0x67,0x2A,0x00,0x2F, /* [4092] OBJ_setct_CertResData */ -0x67,0x2A,0x00,0x30, /* [4096] OBJ_setct_CertInqReqTBS */ -0x67,0x2A,0x00,0x31, /* [4100] OBJ_setct_ErrorTBS */ -0x67,0x2A,0x00,0x32, /* [4104] OBJ_setct_PIDualSignedTBE */ -0x67,0x2A,0x00,0x33, /* [4108] OBJ_setct_PIUnsignedTBE */ -0x67,0x2A,0x00,0x34, /* [4112] OBJ_setct_AuthReqTBE */ -0x67,0x2A,0x00,0x35, /* [4116] OBJ_setct_AuthResTBE */ -0x67,0x2A,0x00,0x36, /* [4120] OBJ_setct_AuthResTBEX */ -0x67,0x2A,0x00,0x37, /* [4124] OBJ_setct_AuthTokenTBE */ -0x67,0x2A,0x00,0x38, /* [4128] OBJ_setct_CapTokenTBE */ -0x67,0x2A,0x00,0x39, /* [4132] OBJ_setct_CapTokenTBEX */ -0x67,0x2A,0x00,0x3A, /* [4136] OBJ_setct_AcqCardCodeMsgTBE */ -0x67,0x2A,0x00,0x3B, /* [4140] OBJ_setct_AuthRevReqTBE */ -0x67,0x2A,0x00,0x3C, /* [4144] OBJ_setct_AuthRevResTBE */ -0x67,0x2A,0x00,0x3D, /* [4148] OBJ_setct_AuthRevResTBEB */ -0x67,0x2A,0x00,0x3E, /* [4152] OBJ_setct_CapReqTBE */ -0x67,0x2A,0x00,0x3F, /* [4156] OBJ_setct_CapReqTBEX */ -0x67,0x2A,0x00,0x40, /* [4160] OBJ_setct_CapResTBE */ -0x67,0x2A,0x00,0x41, /* [4164] OBJ_setct_CapRevReqTBE */ -0x67,0x2A,0x00,0x42, /* [4168] OBJ_setct_CapRevReqTBEX */ -0x67,0x2A,0x00,0x43, /* [4172] OBJ_setct_CapRevResTBE */ -0x67,0x2A,0x00,0x44, /* [4176] OBJ_setct_CredReqTBE */ -0x67,0x2A,0x00,0x45, /* [4180] OBJ_setct_CredReqTBEX */ -0x67,0x2A,0x00,0x46, /* [4184] OBJ_setct_CredResTBE */ -0x67,0x2A,0x00,0x47, /* [4188] OBJ_setct_CredRevReqTBE */ -0x67,0x2A,0x00,0x48, /* [4192] OBJ_setct_CredRevReqTBEX */ -0x67,0x2A,0x00,0x49, /* [4196] OBJ_setct_CredRevResTBE */ -0x67,0x2A,0x00,0x4A, /* [4200] OBJ_setct_BatchAdminReqTBE */ -0x67,0x2A,0x00,0x4B, /* [4204] OBJ_setct_BatchAdminResTBE */ -0x67,0x2A,0x00,0x4C, /* [4208] OBJ_setct_RegFormReqTBE */ -0x67,0x2A,0x00,0x4D, /* [4212] OBJ_setct_CertReqTBE */ -0x67,0x2A,0x00,0x4E, /* [4216] OBJ_setct_CertReqTBEX */ -0x67,0x2A,0x00,0x4F, /* [4220] OBJ_setct_CertResTBE */ -0x67,0x2A,0x00,0x50, /* [4224] OBJ_setct_CRLNotificationTBS */ -0x67,0x2A,0x00,0x51, /* [4228] OBJ_setct_CRLNotificationResTBS */ -0x67,0x2A,0x00,0x52, /* [4232] OBJ_setct_BCIDistributionTBS */ -0x67,0x2A,0x01,0x01, /* [4236] OBJ_setext_genCrypt */ -0x67,0x2A,0x01,0x03, /* [4240] OBJ_setext_miAuth */ -0x67,0x2A,0x01,0x04, /* [4244] OBJ_setext_pinSecure */ -0x67,0x2A,0x01,0x05, /* [4248] OBJ_setext_pinAny */ -0x67,0x2A,0x01,0x07, /* [4252] OBJ_setext_track2 */ -0x67,0x2A,0x01,0x08, /* [4256] OBJ_setext_cv */ -0x67,0x2A,0x05,0x00, /* [4260] OBJ_set_policy_root */ -0x67,0x2A,0x07,0x00, /* [4264] OBJ_setCext_hashedRoot */ -0x67,0x2A,0x07,0x01, /* [4268] OBJ_setCext_certType */ -0x67,0x2A,0x07,0x02, /* [4272] OBJ_setCext_merchData */ -0x67,0x2A,0x07,0x03, /* [4276] OBJ_setCext_cCertRequired */ -0x67,0x2A,0x07,0x04, /* [4280] OBJ_setCext_tunneling */ -0x67,0x2A,0x07,0x05, /* [4284] OBJ_setCext_setExt */ -0x67,0x2A,0x07,0x06, /* [4288] OBJ_setCext_setQualf */ -0x67,0x2A,0x07,0x07, /* [4292] OBJ_setCext_PGWYcapabilities */ -0x67,0x2A,0x07,0x08, /* [4296] OBJ_setCext_TokenIdentifier */ -0x67,0x2A,0x07,0x09, /* [4300] OBJ_setCext_Track2Data */ -0x67,0x2A,0x07,0x0A, /* [4304] OBJ_setCext_TokenType */ -0x67,0x2A,0x07,0x0B, /* [4308] OBJ_setCext_IssuerCapabilities */ -0x67,0x2A,0x03,0x00, /* [4312] OBJ_setAttr_Cert */ -0x67,0x2A,0x03,0x01, /* [4316] OBJ_setAttr_PGWYcap */ -0x67,0x2A,0x03,0x02, /* [4320] OBJ_setAttr_TokenType */ -0x67,0x2A,0x03,0x03, /* [4324] OBJ_setAttr_IssCap */ -0x67,0x2A,0x03,0x00,0x00, /* [4328] OBJ_set_rootKeyThumb */ -0x67,0x2A,0x03,0x00,0x01, /* [4333] OBJ_set_addPolicy */ -0x67,0x2A,0x03,0x02,0x01, /* [4338] OBJ_setAttr_Token_EMV */ -0x67,0x2A,0x03,0x02,0x02, /* [4343] OBJ_setAttr_Token_B0Prime */ -0x67,0x2A,0x03,0x03,0x03, /* [4348] OBJ_setAttr_IssCap_CVM */ -0x67,0x2A,0x03,0x03,0x04, /* [4353] OBJ_setAttr_IssCap_T2 */ -0x67,0x2A,0x03,0x03,0x05, /* [4358] OBJ_setAttr_IssCap_Sig */ -0x67,0x2A,0x03,0x03,0x03,0x01, /* [4363] OBJ_setAttr_GenCryptgrm */ -0x67,0x2A,0x03,0x03,0x04,0x01, /* [4369] OBJ_setAttr_T2Enc */ -0x67,0x2A,0x03,0x03,0x04,0x02, /* [4375] OBJ_setAttr_T2cleartxt */ -0x67,0x2A,0x03,0x03,0x05,0x01, /* [4381] OBJ_setAttr_TokICCsig */ -0x67,0x2A,0x03,0x03,0x05,0x02, /* [4387] OBJ_setAttr_SecDevSig */ -0x67,0x2A,0x08,0x01, /* [4393] OBJ_set_brand_IATA_ATA */ -0x67,0x2A,0x08,0x1E, /* [4397] OBJ_set_brand_Diners */ -0x67,0x2A,0x08,0x22, /* [4401] OBJ_set_brand_AmericanExpress */ -0x67,0x2A,0x08,0x23, /* [4405] OBJ_set_brand_JCB */ -0x67,0x2A,0x08,0x04, /* [4409] OBJ_set_brand_Visa */ -0x67,0x2A,0x08,0x05, /* [4413] OBJ_set_brand_MasterCard */ -0x67,0x2A,0x08,0xAE,0x7B, /* [4417] OBJ_set_brand_Novus */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x03,0x0A, /* [4422] OBJ_des_cdmf */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x06,/* [4430] OBJ_rsaOAEPEncryptionSET */ -0x00, /* [4439] OBJ_itu_t */ -0x50, /* [4440] OBJ_joint_iso_itu_t */ -0x67, /* [4441] OBJ_international_organizations */ -0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x14,0x02,0x02,/* [4442] OBJ_ms_smartcard_login */ -0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x14,0x02,0x03,/* [4452] OBJ_ms_upn */ -0x55,0x04,0x09, /* [4462] OBJ_streetAddress */ -0x55,0x04,0x11, /* [4465] OBJ_postalCode */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x15, /* [4468] OBJ_id_ppl */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x0E, /* [4475] OBJ_proxyCertInfo */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x15,0x00, /* [4483] OBJ_id_ppl_anyLanguage */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x15,0x01, /* [4491] OBJ_id_ppl_inheritAll */ -0x55,0x1D,0x1E, /* [4499] OBJ_name_constraints */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x15,0x02, /* [4502] OBJ_Independent */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,/* [4510] OBJ_sha256WithRSAEncryption */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0C,/* [4519] OBJ_sha384WithRSAEncryption */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0D,/* [4528] OBJ_sha512WithRSAEncryption */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0E,/* [4537] OBJ_sha224WithRSAEncryption */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x01,/* [4546] OBJ_sha256 */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x02,/* [4555] OBJ_sha384 */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x03,/* [4564] OBJ_sha512 */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x04,/* [4573] OBJ_sha224 */ -0x2B, /* [4582] OBJ_identified_organization */ -0x2B,0x81,0x04, /* [4583] OBJ_certicom_arc */ -0x67,0x2B, /* [4586] OBJ_wap */ -0x67,0x2B,0x01, /* [4588] OBJ_wap_wsg */ -0x2A,0x86,0x48,0xCE,0x3D,0x01,0x02,0x03, /* [4591] OBJ_X9_62_id_characteristic_two_basis */ -0x2A,0x86,0x48,0xCE,0x3D,0x01,0x02,0x03,0x01,/* [4599] OBJ_X9_62_onBasis */ -0x2A,0x86,0x48,0xCE,0x3D,0x01,0x02,0x03,0x02,/* [4608] OBJ_X9_62_tpBasis */ -0x2A,0x86,0x48,0xCE,0x3D,0x01,0x02,0x03,0x03,/* [4617] OBJ_X9_62_ppBasis */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x01, /* [4626] OBJ_X9_62_c2pnb163v1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x02, /* [4634] OBJ_X9_62_c2pnb163v2 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x03, /* [4642] OBJ_X9_62_c2pnb163v3 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x04, /* [4650] OBJ_X9_62_c2pnb176v1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x05, /* [4658] OBJ_X9_62_c2tnb191v1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x06, /* [4666] OBJ_X9_62_c2tnb191v2 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x07, /* [4674] OBJ_X9_62_c2tnb191v3 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x08, /* [4682] OBJ_X9_62_c2onb191v4 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x09, /* [4690] OBJ_X9_62_c2onb191v5 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0A, /* [4698] OBJ_X9_62_c2pnb208w1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0B, /* [4706] OBJ_X9_62_c2tnb239v1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0C, /* [4714] OBJ_X9_62_c2tnb239v2 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0D, /* [4722] OBJ_X9_62_c2tnb239v3 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0E, /* [4730] OBJ_X9_62_c2onb239v4 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0F, /* [4738] OBJ_X9_62_c2onb239v5 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x10, /* [4746] OBJ_X9_62_c2pnb272w1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x11, /* [4754] OBJ_X9_62_c2pnb304w1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x12, /* [4762] OBJ_X9_62_c2tnb359v1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x13, /* [4770] OBJ_X9_62_c2pnb368w1 */ -0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x14, /* [4778] OBJ_X9_62_c2tnb431r1 */ -0x2B,0x81,0x04,0x00,0x06, /* [4786] OBJ_secp112r1 */ -0x2B,0x81,0x04,0x00,0x07, /* [4791] OBJ_secp112r2 */ -0x2B,0x81,0x04,0x00,0x1C, /* [4796] OBJ_secp128r1 */ -0x2B,0x81,0x04,0x00,0x1D, /* [4801] OBJ_secp128r2 */ -0x2B,0x81,0x04,0x00,0x09, /* [4806] OBJ_secp160k1 */ -0x2B,0x81,0x04,0x00,0x08, /* [4811] OBJ_secp160r1 */ -0x2B,0x81,0x04,0x00,0x1E, /* [4816] OBJ_secp160r2 */ -0x2B,0x81,0x04,0x00,0x1F, /* [4821] OBJ_secp192k1 */ -0x2B,0x81,0x04,0x00,0x20, /* [4826] OBJ_secp224k1 */ -0x2B,0x81,0x04,0x00,0x21, /* [4831] OBJ_secp224r1 */ -0x2B,0x81,0x04,0x00,0x0A, /* [4836] OBJ_secp256k1 */ -0x2B,0x81,0x04,0x00,0x22, /* [4841] OBJ_secp384r1 */ -0x2B,0x81,0x04,0x00,0x23, /* [4846] OBJ_secp521r1 */ -0x2B,0x81,0x04,0x00,0x04, /* [4851] OBJ_sect113r1 */ -0x2B,0x81,0x04,0x00,0x05, /* [4856] OBJ_sect113r2 */ -0x2B,0x81,0x04,0x00,0x16, /* [4861] OBJ_sect131r1 */ -0x2B,0x81,0x04,0x00,0x17, /* [4866] OBJ_sect131r2 */ -0x2B,0x81,0x04,0x00,0x01, /* [4871] OBJ_sect163k1 */ -0x2B,0x81,0x04,0x00,0x02, /* [4876] OBJ_sect163r1 */ -0x2B,0x81,0x04,0x00,0x0F, /* [4881] OBJ_sect163r2 */ -0x2B,0x81,0x04,0x00,0x18, /* [4886] OBJ_sect193r1 */ -0x2B,0x81,0x04,0x00,0x19, /* [4891] OBJ_sect193r2 */ -0x2B,0x81,0x04,0x00,0x1A, /* [4896] OBJ_sect233k1 */ -0x2B,0x81,0x04,0x00,0x1B, /* [4901] OBJ_sect233r1 */ -0x2B,0x81,0x04,0x00,0x03, /* [4906] OBJ_sect239k1 */ -0x2B,0x81,0x04,0x00,0x10, /* [4911] OBJ_sect283k1 */ -0x2B,0x81,0x04,0x00,0x11, /* [4916] OBJ_sect283r1 */ -0x2B,0x81,0x04,0x00,0x24, /* [4921] OBJ_sect409k1 */ -0x2B,0x81,0x04,0x00,0x25, /* [4926] OBJ_sect409r1 */ -0x2B,0x81,0x04,0x00,0x26, /* [4931] OBJ_sect571k1 */ -0x2B,0x81,0x04,0x00,0x27, /* [4936] OBJ_sect571r1 */ -0x67,0x2B,0x01,0x04,0x01, /* [4941] OBJ_wap_wsg_idm_ecid_wtls1 */ -0x67,0x2B,0x01,0x04,0x03, /* [4946] OBJ_wap_wsg_idm_ecid_wtls3 */ -0x67,0x2B,0x01,0x04,0x04, /* [4951] OBJ_wap_wsg_idm_ecid_wtls4 */ -0x67,0x2B,0x01,0x04,0x05, /* [4956] OBJ_wap_wsg_idm_ecid_wtls5 */ -0x67,0x2B,0x01,0x04,0x06, /* [4961] OBJ_wap_wsg_idm_ecid_wtls6 */ -0x67,0x2B,0x01,0x04,0x07, /* [4966] OBJ_wap_wsg_idm_ecid_wtls7 */ -0x67,0x2B,0x01,0x04,0x08, /* [4971] OBJ_wap_wsg_idm_ecid_wtls8 */ -0x67,0x2B,0x01,0x04,0x09, /* [4976] OBJ_wap_wsg_idm_ecid_wtls9 */ -0x67,0x2B,0x01,0x04,0x0A, /* [4981] OBJ_wap_wsg_idm_ecid_wtls10 */ -0x67,0x2B,0x01,0x04,0x0B, /* [4986] OBJ_wap_wsg_idm_ecid_wtls11 */ -0x67,0x2B,0x01,0x04,0x0C, /* [4991] OBJ_wap_wsg_idm_ecid_wtls12 */ -0x55,0x1D,0x20,0x00, /* [4996] OBJ_any_policy */ -0x55,0x1D,0x21, /* [5000] OBJ_policy_mappings */ -0x55,0x1D,0x36, /* [5003] OBJ_inhibit_any_policy */ -0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x01,0x02,/* [5006] OBJ_camellia_128_cbc */ -0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x01,0x03,/* [5017] OBJ_camellia_192_cbc */ -0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x01,0x04,/* [5028] OBJ_camellia_256_cbc */ -0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x01, /* [5039] OBJ_camellia_128_ecb */ -0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x15, /* [5047] OBJ_camellia_192_ecb */ -0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x29, /* [5055] OBJ_camellia_256_ecb */ -0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x04, /* [5063] OBJ_camellia_128_cfb128 */ -0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x18, /* [5071] OBJ_camellia_192_cfb128 */ -0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x2C, /* [5079] OBJ_camellia_256_cfb128 */ -0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x03, /* [5087] OBJ_camellia_128_ofb128 */ -0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x17, /* [5095] OBJ_camellia_192_ofb128 */ -0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x2B, /* [5103] OBJ_camellia_256_ofb128 */ -0x55,0x1D,0x09, /* [5111] OBJ_subject_directory_attributes */ -0x55,0x1D,0x1C, /* [5114] OBJ_issuing_distribution_point */ -0x55,0x1D,0x1D, /* [5117] OBJ_certificate_issuer */ -0x2A,0x83,0x1A,0x8C,0x9A,0x44, /* [5120] OBJ_kisa */ -0x2A,0x83,0x1A,0x8C,0x9A,0x44,0x01,0x03, /* [5126] OBJ_seed_ecb */ -0x2A,0x83,0x1A,0x8C,0x9A,0x44,0x01,0x04, /* [5134] OBJ_seed_cbc */ -0x2A,0x83,0x1A,0x8C,0x9A,0x44,0x01,0x06, /* [5142] OBJ_seed_ofb128 */ -0x2A,0x83,0x1A,0x8C,0x9A,0x44,0x01,0x05, /* [5150] OBJ_seed_cfb128 */ -0x2B,0x06,0x01,0x05,0x05,0x08,0x01,0x01, /* [5158] OBJ_hmac_md5 */ -0x2B,0x06,0x01,0x05,0x05,0x08,0x01,0x02, /* [5166] OBJ_hmac_sha1 */ -0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,0x42,0x0D,/* [5174] OBJ_id_PasswordBasedMAC */ -0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,0x42,0x1E,/* [5183] OBJ_id_DHBasedMac */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x10, /* [5192] OBJ_id_it_suppLangTags */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x05, /* [5200] OBJ_caRepository */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x09,/* [5208] OBJ_id_smime_ct_compressedData */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x1B,/* [5219] OBJ_id_ct_asciiTextWithCRLF */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x05,/* [5230] OBJ_id_aes128_wrap */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x19,/* [5239] OBJ_id_aes192_wrap */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2D,/* [5248] OBJ_id_aes256_wrap */ -0x2A,0x86,0x48,0xCE,0x3D,0x04,0x02, /* [5257] OBJ_ecdsa_with_Recommended */ -0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03, /* [5264] OBJ_ecdsa_with_Specified */ -0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x01, /* [5271] OBJ_ecdsa_with_SHA224 */ -0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x02, /* [5279] OBJ_ecdsa_with_SHA256 */ -0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03, /* [5287] OBJ_ecdsa_with_SHA384 */ -0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x04, /* [5295] OBJ_ecdsa_with_SHA512 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x06, /* [5303] OBJ_hmacWithMD5 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x08, /* [5311] OBJ_hmacWithSHA224 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x09, /* [5319] OBJ_hmacWithSHA256 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x0A, /* [5327] OBJ_hmacWithSHA384 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x0B, /* [5335] OBJ_hmacWithSHA512 */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x03,0x01,/* [5343] OBJ_dsa_with_SHA224 */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x03,0x02,/* [5352] OBJ_dsa_with_SHA256 */ -0x28,0xCF,0x06,0x03,0x00,0x37, /* [5361] OBJ_whirlpool */ -0x2A,0x85,0x03,0x02,0x02, /* [5367] OBJ_cryptopro */ -0x2A,0x85,0x03,0x02,0x09, /* [5372] OBJ_cryptocom */ -0x2A,0x85,0x03,0x02,0x02,0x03, /* [5377] OBJ_id_GostR3411_94_with_GostR3410_2001 */ -0x2A,0x85,0x03,0x02,0x02,0x04, /* [5383] OBJ_id_GostR3411_94_with_GostR3410_94 */ -0x2A,0x85,0x03,0x02,0x02,0x09, /* [5389] OBJ_id_GostR3411_94 */ -0x2A,0x85,0x03,0x02,0x02,0x0A, /* [5395] OBJ_id_HMACGostR3411_94 */ -0x2A,0x85,0x03,0x02,0x02,0x13, /* [5401] OBJ_id_GostR3410_2001 */ -0x2A,0x85,0x03,0x02,0x02,0x14, /* [5407] OBJ_id_GostR3410_94 */ -0x2A,0x85,0x03,0x02,0x02,0x15, /* [5413] OBJ_id_Gost28147_89 */ -0x2A,0x85,0x03,0x02,0x02,0x16, /* [5419] OBJ_id_Gost28147_89_MAC */ -0x2A,0x85,0x03,0x02,0x02,0x17, /* [5425] OBJ_id_GostR3411_94_prf */ -0x2A,0x85,0x03,0x02,0x02,0x62, /* [5431] OBJ_id_GostR3410_2001DH */ -0x2A,0x85,0x03,0x02,0x02,0x63, /* [5437] OBJ_id_GostR3410_94DH */ -0x2A,0x85,0x03,0x02,0x02,0x0E,0x01, /* [5443] OBJ_id_Gost28147_89_CryptoPro_KeyMeshing */ -0x2A,0x85,0x03,0x02,0x02,0x0E,0x00, /* [5450] OBJ_id_Gost28147_89_None_KeyMeshing */ -0x2A,0x85,0x03,0x02,0x02,0x1E,0x00, /* [5457] OBJ_id_GostR3411_94_TestParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x1E,0x01, /* [5464] OBJ_id_GostR3411_94_CryptoProParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x1F,0x00, /* [5471] OBJ_id_Gost28147_89_TestParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x1F,0x01, /* [5478] OBJ_id_Gost28147_89_CryptoPro_A_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x1F,0x02, /* [5485] OBJ_id_Gost28147_89_CryptoPro_B_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x1F,0x03, /* [5492] OBJ_id_Gost28147_89_CryptoPro_C_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x1F,0x04, /* [5499] OBJ_id_Gost28147_89_CryptoPro_D_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x1F,0x05, /* [5506] OBJ_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x1F,0x06, /* [5513] OBJ_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x1F,0x07, /* [5520] OBJ_id_Gost28147_89_CryptoPro_RIC_1_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x20,0x00, /* [5527] OBJ_id_GostR3410_94_TestParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x20,0x02, /* [5534] OBJ_id_GostR3410_94_CryptoPro_A_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x20,0x03, /* [5541] OBJ_id_GostR3410_94_CryptoPro_B_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x20,0x04, /* [5548] OBJ_id_GostR3410_94_CryptoPro_C_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x20,0x05, /* [5555] OBJ_id_GostR3410_94_CryptoPro_D_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x21,0x01, /* [5562] OBJ_id_GostR3410_94_CryptoPro_XchA_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x21,0x02, /* [5569] OBJ_id_GostR3410_94_CryptoPro_XchB_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x21,0x03, /* [5576] OBJ_id_GostR3410_94_CryptoPro_XchC_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x23,0x00, /* [5583] OBJ_id_GostR3410_2001_TestParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x23,0x01, /* [5590] OBJ_id_GostR3410_2001_CryptoPro_A_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x23,0x02, /* [5597] OBJ_id_GostR3410_2001_CryptoPro_B_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x23,0x03, /* [5604] OBJ_id_GostR3410_2001_CryptoPro_C_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x24,0x00, /* [5611] OBJ_id_GostR3410_2001_CryptoPro_XchA_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x24,0x01, /* [5618] OBJ_id_GostR3410_2001_CryptoPro_XchB_ParamSet */ -0x2A,0x85,0x03,0x02,0x02,0x14,0x01, /* [5625] OBJ_id_GostR3410_94_a */ -0x2A,0x85,0x03,0x02,0x02,0x14,0x02, /* [5632] OBJ_id_GostR3410_94_aBis */ -0x2A,0x85,0x03,0x02,0x02,0x14,0x03, /* [5639] OBJ_id_GostR3410_94_b */ -0x2A,0x85,0x03,0x02,0x02,0x14,0x04, /* [5646] OBJ_id_GostR3410_94_bBis */ -0x2A,0x85,0x03,0x02,0x09,0x01,0x06,0x01, /* [5653] OBJ_id_Gost28147_89_cc */ -0x2A,0x85,0x03,0x02,0x09,0x01,0x05,0x03, /* [5661] OBJ_id_GostR3410_94_cc */ -0x2A,0x85,0x03,0x02,0x09,0x01,0x05,0x04, /* [5669] OBJ_id_GostR3410_2001_cc */ -0x2A,0x85,0x03,0x02,0x09,0x01,0x03,0x03, /* [5677] OBJ_id_GostR3411_94_with_GostR3410_94_cc */ -0x2A,0x85,0x03,0x02,0x09,0x01,0x03,0x04, /* [5685] OBJ_id_GostR3411_94_with_GostR3410_2001_cc */ -0x2A,0x85,0x03,0x02,0x09,0x01,0x08,0x01, /* [5693] OBJ_id_GostR3410_2001_ParamSet_cc */ -0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x11,0x02,/* [5701] OBJ_LocalKeySet */ -0x55,0x1D,0x2E, /* [5710] OBJ_freshest_crl */ -0x2B,0x06,0x01,0x05,0x05,0x07,0x08,0x03, /* [5713] OBJ_id_on_permanentIdentifier */ -0x55,0x04,0x0E, /* [5721] OBJ_searchGuide */ -0x55,0x04,0x0F, /* [5724] OBJ_businessCategory */ -0x55,0x04,0x10, /* [5727] OBJ_postalAddress */ -0x55,0x04,0x12, /* [5730] OBJ_postOfficeBox */ -0x55,0x04,0x13, /* [5733] OBJ_physicalDeliveryOfficeName */ -0x55,0x04,0x14, /* [5736] OBJ_telephoneNumber */ -0x55,0x04,0x15, /* [5739] OBJ_telexNumber */ -0x55,0x04,0x16, /* [5742] OBJ_teletexTerminalIdentifier */ -0x55,0x04,0x17, /* [5745] OBJ_facsimileTelephoneNumber */ -0x55,0x04,0x18, /* [5748] OBJ_x121Address */ -0x55,0x04,0x19, /* [5751] OBJ_internationaliSDNNumber */ -0x55,0x04,0x1A, /* [5754] OBJ_registeredAddress */ -0x55,0x04,0x1B, /* [5757] OBJ_destinationIndicator */ -0x55,0x04,0x1C, /* [5760] OBJ_preferredDeliveryMethod */ -0x55,0x04,0x1D, /* [5763] OBJ_presentationAddress */ -0x55,0x04,0x1E, /* [5766] OBJ_supportedApplicationContext */ -0x55,0x04,0x1F, /* [5769] OBJ_member */ -0x55,0x04,0x20, /* [5772] OBJ_owner */ -0x55,0x04,0x21, /* [5775] OBJ_roleOccupant */ -0x55,0x04,0x22, /* [5778] OBJ_seeAlso */ -0x55,0x04,0x23, /* [5781] OBJ_userPassword */ -0x55,0x04,0x24, /* [5784] OBJ_userCertificate */ -0x55,0x04,0x25, /* [5787] OBJ_cACertificate */ -0x55,0x04,0x26, /* [5790] OBJ_authorityRevocationList */ -0x55,0x04,0x27, /* [5793] OBJ_certificateRevocationList */ -0x55,0x04,0x28, /* [5796] OBJ_crossCertificatePair */ -0x55,0x04,0x2F, /* [5799] OBJ_enhancedSearchGuide */ -0x55,0x04,0x30, /* [5802] OBJ_protocolInformation */ -0x55,0x04,0x31, /* [5805] OBJ_distinguishedName */ -0x55,0x04,0x32, /* [5808] OBJ_uniqueMember */ -0x55,0x04,0x33, /* [5811] OBJ_houseIdentifier */ -0x55,0x04,0x34, /* [5814] OBJ_supportedAlgorithms */ -0x55,0x04,0x35, /* [5817] OBJ_deltaRevocationList */ -0x55,0x04,0x36, /* [5820] OBJ_dmdName */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x09,/* [5823] OBJ_id_alg_PWRI_KEK */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x06,/* [5834] OBJ_aes_128_gcm */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x07,/* [5843] OBJ_aes_128_ccm */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x08,/* [5852] OBJ_id_aes128_wrap_pad */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x1A,/* [5861] OBJ_aes_192_gcm */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x1B,/* [5870] OBJ_aes_192_ccm */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x1C,/* [5879] OBJ_id_aes192_wrap_pad */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2E,/* [5888] OBJ_aes_256_gcm */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2F,/* [5897] OBJ_aes_256_ccm */ -0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x30,/* [5906] OBJ_id_aes256_wrap_pad */ -0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x03,0x02,/* [5915] OBJ_id_camellia128_wrap */ -0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x03,0x03,/* [5926] OBJ_id_camellia192_wrap */ -0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x03,0x04,/* [5937] OBJ_id_camellia256_wrap */ -0x55,0x1D,0x25,0x00, /* [5948] OBJ_anyExtendedKeyUsage */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x08,/* [5952] OBJ_mgf1 */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0A,/* [5961] OBJ_rsassaPss */ -0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x07,/* [5970] OBJ_rsaesOaep */ +static const unsigned char lvalues[5974]={ +0x2A,0x86,0x48,0x86,0xF7,0x0D, /* [ 0] OBJ_rsadsi */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01, /* [ 6] OBJ_pkcs */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x02, /* [ 13] OBJ_md2 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x05, /* [ 21] OBJ_md5 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x03,0x04, /* [ 29] OBJ_rc4 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,/* [ 37] OBJ_rsaEncryption */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x02,/* [ 46] OBJ_md2WithRSAEncryption */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x04,/* [ 55] OBJ_md5WithRSAEncryption */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x01,/* [ 64] OBJ_pbeWithMD2AndDES_CBC */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x03,/* [ 73] OBJ_pbeWithMD5AndDES_CBC */ +0x55, /* [ 82] OBJ_X500 */ +0x55,0x04, /* [ 83] OBJ_X509 */ +0x55,0x04,0x03, /* [ 85] OBJ_commonName */ +0x55,0x04,0x06, /* [ 88] OBJ_countryName */ +0x55,0x04,0x07, /* [ 91] OBJ_localityName */ +0x55,0x04,0x08, /* [ 94] OBJ_stateOrProvinceName */ +0x55,0x04,0x0A, /* [ 97] OBJ_organizationName */ +0x55,0x04,0x0B, /* [100] OBJ_organizationalUnitName */ +0x55,0x08,0x01,0x01, /* [103] OBJ_rsa */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07, /* [107] OBJ_pkcs7 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x01,/* [115] OBJ_pkcs7_data */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x02,/* [124] OBJ_pkcs7_signed */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x03,/* [133] OBJ_pkcs7_enveloped */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x04,/* [142] OBJ_pkcs7_signedAndEnveloped */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x05,/* [151] OBJ_pkcs7_digest */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x07,0x06,/* [160] OBJ_pkcs7_encrypted */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x03, /* [169] OBJ_pkcs3 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x03,0x01,/* [177] OBJ_dhKeyAgreement */ +0x2B,0x0E,0x03,0x02,0x06, /* [186] OBJ_des_ecb */ +0x2B,0x0E,0x03,0x02,0x09, /* [191] OBJ_des_cfb64 */ +0x2B,0x0E,0x03,0x02,0x07, /* [196] OBJ_des_cbc */ +0x2B,0x0E,0x03,0x02,0x11, /* [201] OBJ_des_ede_ecb */ +0x2B,0x06,0x01,0x04,0x01,0x81,0x3C,0x07,0x01,0x01,0x02,/* [206] OBJ_idea_cbc */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x03,0x02, /* [217] OBJ_rc2_cbc */ +0x2B,0x0E,0x03,0x02,0x12, /* [225] OBJ_sha */ +0x2B,0x0E,0x03,0x02,0x0F, /* [230] OBJ_shaWithRSAEncryption */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x03,0x07, /* [235] OBJ_des_ede3_cbc */ +0x2B,0x0E,0x03,0x02,0x08, /* [243] OBJ_des_ofb64 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09, /* [248] OBJ_pkcs9 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x01,/* [256] OBJ_pkcs9_emailAddress */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x02,/* [265] OBJ_pkcs9_unstructuredName */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x03,/* [274] OBJ_pkcs9_contentType */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x04,/* [283] OBJ_pkcs9_messageDigest */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x05,/* [292] OBJ_pkcs9_signingTime */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x06,/* [301] OBJ_pkcs9_countersignature */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x07,/* [310] OBJ_pkcs9_challengePassword */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x08,/* [319] OBJ_pkcs9_unstructuredAddress */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x09,/* [328] OBJ_pkcs9_extCertAttributes */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42, /* [337] OBJ_netscape */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01, /* [344] OBJ_netscape_cert_extension */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x02, /* [352] OBJ_netscape_data_type */ +0x2B,0x0E,0x03,0x02,0x1A, /* [360] OBJ_sha1 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,/* [365] OBJ_sha1WithRSAEncryption */ +0x2B,0x0E,0x03,0x02,0x0D, /* [374] OBJ_dsaWithSHA */ +0x2B,0x0E,0x03,0x02,0x0C, /* [379] OBJ_dsa_2 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x0B,/* [384] OBJ_pbeWithSHA1AndRC2_CBC */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x0C,/* [393] OBJ_id_pbkdf2 */ +0x2B,0x0E,0x03,0x02,0x1B, /* [402] OBJ_dsaWithSHA1_2 */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x01,/* [407] OBJ_netscape_cert_type */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x02,/* [416] OBJ_netscape_base_url */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x03,/* [425] OBJ_netscape_revocation_url */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x04,/* [434] OBJ_netscape_ca_revocation_url */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x07,/* [443] OBJ_netscape_renewal_url */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x08,/* [452] OBJ_netscape_ca_policy_url */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x0C,/* [461] OBJ_netscape_ssl_server_name */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x01,0x0D,/* [470] OBJ_netscape_comment */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x02,0x05,/* [479] OBJ_netscape_cert_sequence */ +0x55,0x1D, /* [488] OBJ_id_ce */ +0x55,0x1D,0x0E, /* [490] OBJ_subject_key_identifier */ +0x55,0x1D,0x0F, /* [493] OBJ_key_usage */ +0x55,0x1D,0x10, /* [496] OBJ_private_key_usage_period */ +0x55,0x1D,0x11, /* [499] OBJ_subject_alt_name */ +0x55,0x1D,0x12, /* [502] OBJ_issuer_alt_name */ +0x55,0x1D,0x13, /* [505] OBJ_basic_constraints */ +0x55,0x1D,0x14, /* [508] OBJ_crl_number */ +0x55,0x1D,0x20, /* [511] OBJ_certificate_policies */ +0x55,0x1D,0x23, /* [514] OBJ_authority_key_identifier */ +0x2B,0x06,0x01,0x04,0x01,0x97,0x55,0x01,0x02,/* [517] OBJ_bf_cbc */ +0x55,0x08,0x03,0x65, /* [526] OBJ_mdc2 */ +0x55,0x08,0x03,0x64, /* [530] OBJ_mdc2WithRSA */ +0x55,0x04,0x2A, /* [534] OBJ_givenName */ +0x55,0x04,0x04, /* [537] OBJ_surname */ +0x55,0x04,0x2B, /* [540] OBJ_initials */ +0x55,0x1D,0x1F, /* [543] OBJ_crl_distribution_points */ +0x2B,0x0E,0x03,0x02,0x03, /* [546] OBJ_md5WithRSA */ +0x55,0x04,0x05, /* [551] OBJ_serialNumber */ +0x55,0x04,0x0C, /* [554] OBJ_title */ +0x55,0x04,0x0D, /* [557] OBJ_description */ +0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,0x42,0x0A,/* [560] OBJ_cast5_cbc */ +0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,0x42,0x0C,/* [569] OBJ_pbeWithMD5AndCast5_CBC */ +0x2A,0x86,0x48,0xCE,0x38,0x04,0x03, /* [578] OBJ_dsaWithSHA1 */ +0x2B,0x0E,0x03,0x02,0x1D, /* [585] OBJ_sha1WithRSA */ +0x2A,0x86,0x48,0xCE,0x38,0x04,0x01, /* [590] OBJ_dsa */ +0x2B,0x24,0x03,0x02,0x01, /* [597] OBJ_ripemd160 */ +0x2B,0x24,0x03,0x03,0x01,0x02, /* [602] OBJ_ripemd160WithRSA */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x03,0x08, /* [608] OBJ_rc5_cbc */ +0x29,0x01,0x01,0x85,0x1A,0x01, /* [616] OBJ_rle_compression */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x08,/* [622] OBJ_zlib_compression */ +0x55,0x1D,0x25, /* [633] OBJ_ext_key_usage */ +0x2B,0x06,0x01,0x05,0x05,0x07, /* [636] OBJ_id_pkix */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03, /* [642] OBJ_id_kp */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x01, /* [649] OBJ_server_auth */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x02, /* [657] OBJ_client_auth */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x03, /* [665] OBJ_code_sign */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x04, /* [673] OBJ_email_protect */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x08, /* [681] OBJ_time_stamp */ +0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x02,0x01,0x15,/* [689] OBJ_ms_code_ind */ +0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x02,0x01,0x16,/* [699] OBJ_ms_code_com */ +0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x0A,0x03,0x01,/* [709] OBJ_ms_ctl_sign */ +0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x0A,0x03,0x03,/* [719] OBJ_ms_sgc */ +0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x0A,0x03,0x04,/* [729] OBJ_ms_efs */ +0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x04,0x01,/* [739] OBJ_ns_sgc */ +0x55,0x1D,0x1B, /* [748] OBJ_delta_crl */ +0x55,0x1D,0x15, /* [751] OBJ_crl_reason */ +0x55,0x1D,0x18, /* [754] OBJ_invalidity_date */ +0x2B,0x65,0x01,0x04,0x01, /* [757] OBJ_sxnet */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x01,/* [762] OBJ_pbe_WithSHA1And128BitRC4 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x02,/* [772] OBJ_pbe_WithSHA1And40BitRC4 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x03,/* [782] OBJ_pbe_WithSHA1And3_Key_TripleDES_CBC */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x04,/* [792] OBJ_pbe_WithSHA1And2_Key_TripleDES_CBC */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x05,/* [802] OBJ_pbe_WithSHA1And128BitRC2_CBC */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x01,0x06,/* [812] OBJ_pbe_WithSHA1And40BitRC2_CBC */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x01,/* [822] OBJ_keyBag */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x02,/* [833] OBJ_pkcs8ShroudedKeyBag */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x03,/* [844] OBJ_certBag */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x04,/* [855] OBJ_crlBag */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x05,/* [866] OBJ_secretBag */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x0C,0x0A,0x01,0x06,/* [877] OBJ_safeContentsBag */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x14,/* [888] OBJ_friendlyName */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x15,/* [897] OBJ_localKeyID */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x16,0x01,/* [906] OBJ_x509Certificate */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x16,0x02,/* [916] OBJ_sdsiCertificate */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x17,0x01,/* [926] OBJ_x509Crl */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x0D,/* [936] OBJ_pbes2 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x0E,/* [945] OBJ_pbmac1 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x07, /* [954] OBJ_hmacWithSHA1 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x02,0x01, /* [962] OBJ_id_qt_cps */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x02,0x02, /* [970] OBJ_id_qt_unotice */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x0F,/* [978] OBJ_SMIMECapabilities */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x04,/* [987] OBJ_pbeWithMD2AndRC2_CBC */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x06,/* [996] OBJ_pbeWithMD5AndRC2_CBC */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05,0x0A,/* [1005] OBJ_pbeWithSHA1AndDES_CBC */ +0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x02,0x01,0x0E,/* [1014] OBJ_ms_ext_req */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x0E,/* [1024] OBJ_ext_req */ +0x55,0x04,0x29, /* [1033] OBJ_name */ +0x55,0x04,0x2E, /* [1036] OBJ_dnQualifier */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01, /* [1039] OBJ_id_pe */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30, /* [1046] OBJ_id_ad */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x01, /* [1053] OBJ_info_access */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01, /* [1061] OBJ_ad_OCSP */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x02, /* [1069] OBJ_ad_ca_issuers */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x09, /* [1077] OBJ_OCSP_sign */ +0x2A, /* [1085] OBJ_member_body */ +0x2A,0x86,0x48, /* [1086] OBJ_ISO_US */ +0x2A,0x86,0x48,0xCE,0x38, /* [1089] OBJ_X9_57 */ +0x2A,0x86,0x48,0xCE,0x38,0x04, /* [1094] OBJ_X9cm */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01, /* [1100] OBJ_pkcs1 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x05, /* [1108] OBJ_pkcs5 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,/* [1116] OBJ_SMIME */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,/* [1125] OBJ_id_smime_mod */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,/* [1135] OBJ_id_smime_ct */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,/* [1145] OBJ_id_smime_aa */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,/* [1155] OBJ_id_smime_alg */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x04,/* [1165] OBJ_id_smime_cd */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x05,/* [1175] OBJ_id_smime_spq */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,/* [1185] OBJ_id_smime_cti */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x01,/* [1195] OBJ_id_smime_mod_cms */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x02,/* [1206] OBJ_id_smime_mod_ess */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x03,/* [1217] OBJ_id_smime_mod_oid */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x04,/* [1228] OBJ_id_smime_mod_msg_v3 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x05,/* [1239] OBJ_id_smime_mod_ets_eSignature_88 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x06,/* [1250] OBJ_id_smime_mod_ets_eSignature_97 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x07,/* [1261] OBJ_id_smime_mod_ets_eSigPolicy_88 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x00,0x08,/* [1272] OBJ_id_smime_mod_ets_eSigPolicy_97 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x01,/* [1283] OBJ_id_smime_ct_receipt */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x02,/* [1294] OBJ_id_smime_ct_authData */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x03,/* [1305] OBJ_id_smime_ct_publishCert */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x04,/* [1316] OBJ_id_smime_ct_TSTInfo */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x05,/* [1327] OBJ_id_smime_ct_TDTInfo */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x06,/* [1338] OBJ_id_smime_ct_contentInfo */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x07,/* [1349] OBJ_id_smime_ct_DVCSRequestData */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x08,/* [1360] OBJ_id_smime_ct_DVCSResponseData */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x01,/* [1371] OBJ_id_smime_aa_receiptRequest */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x02,/* [1382] OBJ_id_smime_aa_securityLabel */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x03,/* [1393] OBJ_id_smime_aa_mlExpandHistory */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x04,/* [1404] OBJ_id_smime_aa_contentHint */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x05,/* [1415] OBJ_id_smime_aa_msgSigDigest */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x06,/* [1426] OBJ_id_smime_aa_encapContentType */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x07,/* [1437] OBJ_id_smime_aa_contentIdentifier */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x08,/* [1448] OBJ_id_smime_aa_macValue */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x09,/* [1459] OBJ_id_smime_aa_equivalentLabels */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0A,/* [1470] OBJ_id_smime_aa_contentReference */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0B,/* [1481] OBJ_id_smime_aa_encrypKeyPref */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0C,/* [1492] OBJ_id_smime_aa_signingCertificate */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0D,/* [1503] OBJ_id_smime_aa_smimeEncryptCerts */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0E,/* [1514] OBJ_id_smime_aa_timeStampToken */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x0F,/* [1525] OBJ_id_smime_aa_ets_sigPolicyId */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x10,/* [1536] OBJ_id_smime_aa_ets_commitmentType */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x11,/* [1547] OBJ_id_smime_aa_ets_signerLocation */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x12,/* [1558] OBJ_id_smime_aa_ets_signerAttr */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x13,/* [1569] OBJ_id_smime_aa_ets_otherSigCert */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x14,/* [1580] OBJ_id_smime_aa_ets_contentTimestamp */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x15,/* [1591] OBJ_id_smime_aa_ets_CertificateRefs */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x16,/* [1602] OBJ_id_smime_aa_ets_RevocationRefs */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x17,/* [1613] OBJ_id_smime_aa_ets_certValues */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x18,/* [1624] OBJ_id_smime_aa_ets_revocationValues */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x19,/* [1635] OBJ_id_smime_aa_ets_escTimeStamp */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x1A,/* [1646] OBJ_id_smime_aa_ets_certCRLTimestamp */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x1B,/* [1657] OBJ_id_smime_aa_ets_archiveTimeStamp */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x1C,/* [1668] OBJ_id_smime_aa_signatureType */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x02,0x1D,/* [1679] OBJ_id_smime_aa_dvcs_dvc */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x01,/* [1690] OBJ_id_smime_alg_ESDHwith3DES */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x02,/* [1701] OBJ_id_smime_alg_ESDHwithRC2 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x03,/* [1712] OBJ_id_smime_alg_3DESwrap */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x04,/* [1723] OBJ_id_smime_alg_RC2wrap */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x05,/* [1734] OBJ_id_smime_alg_ESDH */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x06,/* [1745] OBJ_id_smime_alg_CMS3DESwrap */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x07,/* [1756] OBJ_id_smime_alg_CMSRC2wrap */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x04,0x01,/* [1767] OBJ_id_smime_cd_ldap */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x05,0x01,/* [1778] OBJ_id_smime_spq_ets_sqt_uri */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x05,0x02,/* [1789] OBJ_id_smime_spq_ets_sqt_unotice */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x01,/* [1800] OBJ_id_smime_cti_ets_proofOfOrigin */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x02,/* [1811] OBJ_id_smime_cti_ets_proofOfReceipt */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x03,/* [1822] OBJ_id_smime_cti_ets_proofOfDelivery */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x04,/* [1833] OBJ_id_smime_cti_ets_proofOfSender */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x05,/* [1844] OBJ_id_smime_cti_ets_proofOfApproval */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x06,0x06,/* [1855] OBJ_id_smime_cti_ets_proofOfCreation */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x04, /* [1866] OBJ_md4 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00, /* [1874] OBJ_id_pkix_mod */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x02, /* [1881] OBJ_id_qt */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04, /* [1888] OBJ_id_it */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05, /* [1895] OBJ_id_pkip */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x06, /* [1902] OBJ_id_alg */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07, /* [1909] OBJ_id_cmc */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x08, /* [1916] OBJ_id_on */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x09, /* [1923] OBJ_id_pda */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0A, /* [1930] OBJ_id_aca */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0B, /* [1937] OBJ_id_qcs */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0C, /* [1944] OBJ_id_cct */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x01, /* [1951] OBJ_id_pkix1_explicit_88 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x02, /* [1959] OBJ_id_pkix1_implicit_88 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x03, /* [1967] OBJ_id_pkix1_explicit_93 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x04, /* [1975] OBJ_id_pkix1_implicit_93 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x05, /* [1983] OBJ_id_mod_crmf */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x06, /* [1991] OBJ_id_mod_cmc */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x07, /* [1999] OBJ_id_mod_kea_profile_88 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x08, /* [2007] OBJ_id_mod_kea_profile_93 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x09, /* [2015] OBJ_id_mod_cmp */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0A, /* [2023] OBJ_id_mod_qualified_cert_88 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0B, /* [2031] OBJ_id_mod_qualified_cert_93 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0C, /* [2039] OBJ_id_mod_attribute_cert */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0D, /* [2047] OBJ_id_mod_timestamp_protocol */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0E, /* [2055] OBJ_id_mod_ocsp */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x0F, /* [2063] OBJ_id_mod_dvcs */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x00,0x10, /* [2071] OBJ_id_mod_cmp2000 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x02, /* [2079] OBJ_biometricInfo */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x03, /* [2087] OBJ_qcStatements */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x04, /* [2095] OBJ_ac_auditEntity */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x05, /* [2103] OBJ_ac_targeting */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x06, /* [2111] OBJ_aaControls */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x07, /* [2119] OBJ_sbgp_ipAddrBlock */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x08, /* [2127] OBJ_sbgp_autonomousSysNum */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x09, /* [2135] OBJ_sbgp_routerIdentifier */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x02,0x03, /* [2143] OBJ_textNotice */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x05, /* [2151] OBJ_ipsecEndSystem */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x06, /* [2159] OBJ_ipsecTunnel */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x07, /* [2167] OBJ_ipsecUser */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x0A, /* [2175] OBJ_dvcs */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x01, /* [2183] OBJ_id_it_caProtEncCert */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x02, /* [2191] OBJ_id_it_signKeyPairTypes */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x03, /* [2199] OBJ_id_it_encKeyPairTypes */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x04, /* [2207] OBJ_id_it_preferredSymmAlg */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x05, /* [2215] OBJ_id_it_caKeyUpdateInfo */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x06, /* [2223] OBJ_id_it_currentCRL */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x07, /* [2231] OBJ_id_it_unsupportedOIDs */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x08, /* [2239] OBJ_id_it_subscriptionRequest */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x09, /* [2247] OBJ_id_it_subscriptionResponse */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0A, /* [2255] OBJ_id_it_keyPairParamReq */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0B, /* [2263] OBJ_id_it_keyPairParamRep */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0C, /* [2271] OBJ_id_it_revPassphrase */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0D, /* [2279] OBJ_id_it_implicitConfirm */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0E, /* [2287] OBJ_id_it_confirmWaitTime */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x0F, /* [2295] OBJ_id_it_origPKIMessage */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01, /* [2303] OBJ_id_regCtrl */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x02, /* [2311] OBJ_id_regInfo */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x01,/* [2319] OBJ_id_regCtrl_regToken */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x02,/* [2328] OBJ_id_regCtrl_authenticator */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x03,/* [2337] OBJ_id_regCtrl_pkiPublicationInfo */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x04,/* [2346] OBJ_id_regCtrl_pkiArchiveOptions */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x05,/* [2355] OBJ_id_regCtrl_oldCertID */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x01,0x06,/* [2364] OBJ_id_regCtrl_protocolEncrKey */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x02,0x01,/* [2373] OBJ_id_regInfo_utf8Pairs */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x05,0x02,0x02,/* [2382] OBJ_id_regInfo_certReq */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x06,0x01, /* [2391] OBJ_id_alg_des40 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x06,0x02, /* [2399] OBJ_id_alg_noSignature */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x06,0x03, /* [2407] OBJ_id_alg_dh_sig_hmac_sha1 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x06,0x04, /* [2415] OBJ_id_alg_dh_pop */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x01, /* [2423] OBJ_id_cmc_statusInfo */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x02, /* [2431] OBJ_id_cmc_identification */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x03, /* [2439] OBJ_id_cmc_identityProof */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x04, /* [2447] OBJ_id_cmc_dataReturn */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x05, /* [2455] OBJ_id_cmc_transactionId */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x06, /* [2463] OBJ_id_cmc_senderNonce */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x07, /* [2471] OBJ_id_cmc_recipientNonce */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x08, /* [2479] OBJ_id_cmc_addExtensions */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x09, /* [2487] OBJ_id_cmc_encryptedPOP */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x0A, /* [2495] OBJ_id_cmc_decryptedPOP */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x0B, /* [2503] OBJ_id_cmc_lraPOPWitness */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x0F, /* [2511] OBJ_id_cmc_getCert */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x10, /* [2519] OBJ_id_cmc_getCRL */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x11, /* [2527] OBJ_id_cmc_revokeRequest */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x12, /* [2535] OBJ_id_cmc_regInfo */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x13, /* [2543] OBJ_id_cmc_responseInfo */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x15, /* [2551] OBJ_id_cmc_queryPending */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x16, /* [2559] OBJ_id_cmc_popLinkRandom */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x17, /* [2567] OBJ_id_cmc_popLinkWitness */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x07,0x18, /* [2575] OBJ_id_cmc_confirmCertAcceptance */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x08,0x01, /* [2583] OBJ_id_on_personalData */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x09,0x01, /* [2591] OBJ_id_pda_dateOfBirth */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x09,0x02, /* [2599] OBJ_id_pda_placeOfBirth */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x09,0x03, /* [2607] OBJ_id_pda_gender */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x09,0x04, /* [2615] OBJ_id_pda_countryOfCitizenship */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x09,0x05, /* [2623] OBJ_id_pda_countryOfResidence */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x01, /* [2631] OBJ_id_aca_authenticationInfo */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x02, /* [2639] OBJ_id_aca_accessIdentity */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x03, /* [2647] OBJ_id_aca_chargingIdentity */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x04, /* [2655] OBJ_id_aca_group */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x05, /* [2663] OBJ_id_aca_role */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0B,0x01, /* [2671] OBJ_id_qcs_pkixQCSyntax_v1 */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0C,0x01, /* [2679] OBJ_id_cct_crs */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0C,0x02, /* [2687] OBJ_id_cct_PKIData */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0C,0x03, /* [2695] OBJ_id_cct_PKIResponse */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x03, /* [2703] OBJ_ad_timeStamping */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x04, /* [2711] OBJ_ad_dvcs */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x01,/* [2719] OBJ_id_pkix_OCSP_basic */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x02,/* [2728] OBJ_id_pkix_OCSP_Nonce */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x03,/* [2737] OBJ_id_pkix_OCSP_CrlID */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x04,/* [2746] OBJ_id_pkix_OCSP_acceptableResponses */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x05,/* [2755] OBJ_id_pkix_OCSP_noCheck */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x06,/* [2764] OBJ_id_pkix_OCSP_archiveCutoff */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x07,/* [2773] OBJ_id_pkix_OCSP_serviceLocator */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x08,/* [2782] OBJ_id_pkix_OCSP_extendedStatus */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x09,/* [2791] OBJ_id_pkix_OCSP_valid */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x0A,/* [2800] OBJ_id_pkix_OCSP_path */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x01,0x0B,/* [2809] OBJ_id_pkix_OCSP_trustRoot */ +0x2B,0x0E,0x03,0x02, /* [2818] OBJ_algorithm */ +0x2B,0x0E,0x03,0x02,0x0B, /* [2822] OBJ_rsaSignature */ +0x55,0x08, /* [2827] OBJ_X500algorithms */ +0x2B, /* [2829] OBJ_org */ +0x2B,0x06, /* [2830] OBJ_dod */ +0x2B,0x06,0x01, /* [2832] OBJ_iana */ +0x2B,0x06,0x01,0x01, /* [2835] OBJ_Directory */ +0x2B,0x06,0x01,0x02, /* [2839] OBJ_Management */ +0x2B,0x06,0x01,0x03, /* [2843] OBJ_Experimental */ +0x2B,0x06,0x01,0x04, /* [2847] OBJ_Private */ +0x2B,0x06,0x01,0x05, /* [2851] OBJ_Security */ +0x2B,0x06,0x01,0x06, /* [2855] OBJ_SNMPv2 */ +0x2B,0x06,0x01,0x07, /* [2859] OBJ_Mail */ +0x2B,0x06,0x01,0x04,0x01, /* [2863] OBJ_Enterprises */ +0x2B,0x06,0x01,0x04,0x01,0x8B,0x3A,0x82,0x58,/* [2868] OBJ_dcObject */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x19,/* [2877] OBJ_domainComponent */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x0D,/* [2887] OBJ_Domain */ +0x55,0x01,0x05, /* [2897] OBJ_selected_attribute_types */ +0x55,0x01,0x05,0x37, /* [2900] OBJ_clearance */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x03,/* [2904] OBJ_md4WithRSAEncryption */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x0A, /* [2913] OBJ_ac_proxying */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x0B, /* [2921] OBJ_sinfo_access */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x0A,0x06, /* [2929] OBJ_id_aca_encAttrs */ +0x55,0x04,0x48, /* [2937] OBJ_role */ +0x55,0x1D,0x24, /* [2940] OBJ_policy_constraints */ +0x55,0x1D,0x37, /* [2943] OBJ_target_information */ +0x55,0x1D,0x38, /* [2946] OBJ_no_rev_avail */ +0x2A,0x86,0x48,0xCE,0x3D, /* [2949] OBJ_ansi_X9_62 */ +0x2A,0x86,0x48,0xCE,0x3D,0x01,0x01, /* [2954] OBJ_X9_62_prime_field */ +0x2A,0x86,0x48,0xCE,0x3D,0x01,0x02, /* [2961] OBJ_X9_62_characteristic_two_field */ +0x2A,0x86,0x48,0xCE,0x3D,0x02,0x01, /* [2968] OBJ_X9_62_id_ecPublicKey */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x01, /* [2975] OBJ_X9_62_prime192v1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x02, /* [2983] OBJ_X9_62_prime192v2 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x03, /* [2991] OBJ_X9_62_prime192v3 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x04, /* [2999] OBJ_X9_62_prime239v1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x05, /* [3007] OBJ_X9_62_prime239v2 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x06, /* [3015] OBJ_X9_62_prime239v3 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x01,0x07, /* [3023] OBJ_X9_62_prime256v1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x04,0x01, /* [3031] OBJ_ecdsa_with_SHA1 */ +0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x11,0x01,/* [3038] OBJ_ms_csp_name */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x01,/* [3047] OBJ_aes_128_ecb */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x02,/* [3056] OBJ_aes_128_cbc */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x03,/* [3065] OBJ_aes_128_ofb128 */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x04,/* [3074] OBJ_aes_128_cfb128 */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x15,/* [3083] OBJ_aes_192_ecb */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x16,/* [3092] OBJ_aes_192_cbc */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x17,/* [3101] OBJ_aes_192_ofb128 */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x18,/* [3110] OBJ_aes_192_cfb128 */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x29,/* [3119] OBJ_aes_256_ecb */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2A,/* [3128] OBJ_aes_256_cbc */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2B,/* [3137] OBJ_aes_256_ofb128 */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2C,/* [3146] OBJ_aes_256_cfb128 */ +0x55,0x1D,0x17, /* [3155] OBJ_hold_instruction_code */ +0x2A,0x86,0x48,0xCE,0x38,0x02,0x01, /* [3158] OBJ_hold_instruction_none */ +0x2A,0x86,0x48,0xCE,0x38,0x02,0x02, /* [3165] OBJ_hold_instruction_call_issuer */ +0x2A,0x86,0x48,0xCE,0x38,0x02,0x03, /* [3172] OBJ_hold_instruction_reject */ +0x09, /* [3179] OBJ_data */ +0x09,0x92,0x26, /* [3180] OBJ_pss */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C, /* [3183] OBJ_ucl */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64, /* [3190] OBJ_pilot */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,/* [3198] OBJ_pilotAttributeType */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x03,/* [3207] OBJ_pilotAttributeSyntax */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,/* [3216] OBJ_pilotObjectClass */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x0A,/* [3225] OBJ_pilotGroups */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x03,0x04,/* [3234] OBJ_iA5StringSyntax */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x03,0x05,/* [3244] OBJ_caseIgnoreIA5StringSyntax */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x03,/* [3254] OBJ_pilotObject */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x04,/* [3264] OBJ_pilotPerson */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x05,/* [3274] OBJ_account */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x06,/* [3284] OBJ_document */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x07,/* [3294] OBJ_room */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x09,/* [3304] OBJ_documentSeries */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x0E,/* [3314] OBJ_rFC822localPart */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x0F,/* [3324] OBJ_dNSDomain */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x11,/* [3334] OBJ_domainRelatedObject */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x12,/* [3344] OBJ_friendlyCountry */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x13,/* [3354] OBJ_simpleSecurityObject */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x14,/* [3364] OBJ_pilotOrganization */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x15,/* [3374] OBJ_pilotDSA */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x04,0x16,/* [3384] OBJ_qualityLabelledData */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x01,/* [3394] OBJ_userId */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x02,/* [3404] OBJ_textEncodedORAddress */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x03,/* [3414] OBJ_rfc822Mailbox */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x04,/* [3424] OBJ_info */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x05,/* [3434] OBJ_favouriteDrink */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x06,/* [3444] OBJ_roomNumber */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x07,/* [3454] OBJ_photo */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x08,/* [3464] OBJ_userClass */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x09,/* [3474] OBJ_host */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0A,/* [3484] OBJ_manager */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0B,/* [3494] OBJ_documentIdentifier */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0C,/* [3504] OBJ_documentTitle */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0D,/* [3514] OBJ_documentVersion */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0E,/* [3524] OBJ_documentAuthor */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x0F,/* [3534] OBJ_documentLocation */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x14,/* [3544] OBJ_homeTelephoneNumber */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x15,/* [3554] OBJ_secretary */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x16,/* [3564] OBJ_otherMailbox */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x17,/* [3574] OBJ_lastModifiedTime */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x18,/* [3584] OBJ_lastModifiedBy */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1A,/* [3594] OBJ_aRecord */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1B,/* [3604] OBJ_pilotAttributeType27 */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1C,/* [3614] OBJ_mXRecord */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1D,/* [3624] OBJ_nSRecord */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1E,/* [3634] OBJ_sOARecord */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x1F,/* [3644] OBJ_cNAMERecord */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x25,/* [3654] OBJ_associatedDomain */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x26,/* [3664] OBJ_associatedName */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x27,/* [3674] OBJ_homePostalAddress */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x28,/* [3684] OBJ_personalTitle */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x29,/* [3694] OBJ_mobileTelephoneNumber */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x2A,/* [3704] OBJ_pagerTelephoneNumber */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x2B,/* [3714] OBJ_friendlyCountryName */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x2D,/* [3724] OBJ_organizationalStatus */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x2E,/* [3734] OBJ_janetMailbox */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x2F,/* [3744] OBJ_mailPreferenceOption */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x30,/* [3754] OBJ_buildingName */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x31,/* [3764] OBJ_dSAQuality */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x32,/* [3774] OBJ_singleLevelQuality */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x33,/* [3784] OBJ_subtreeMinimumQuality */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x34,/* [3794] OBJ_subtreeMaximumQuality */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x35,/* [3804] OBJ_personalSignature */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x36,/* [3814] OBJ_dITRedirect */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x37,/* [3824] OBJ_audio */ +0x09,0x92,0x26,0x89,0x93,0xF2,0x2C,0x64,0x01,0x38,/* [3834] OBJ_documentPublisher */ +0x55,0x04,0x2D, /* [3844] OBJ_x500UniqueIdentifier */ +0x2B,0x06,0x01,0x07,0x01, /* [3847] OBJ_mime_mhs */ +0x2B,0x06,0x01,0x07,0x01,0x01, /* [3852] OBJ_mime_mhs_headings */ +0x2B,0x06,0x01,0x07,0x01,0x02, /* [3858] OBJ_mime_mhs_bodies */ +0x2B,0x06,0x01,0x07,0x01,0x01,0x01, /* [3864] OBJ_id_hex_partial_message */ +0x2B,0x06,0x01,0x07,0x01,0x01,0x02, /* [3871] OBJ_id_hex_multipart_message */ +0x55,0x04,0x2C, /* [3878] OBJ_generationQualifier */ +0x55,0x04,0x41, /* [3881] OBJ_pseudonym */ +0x67,0x2A, /* [3884] OBJ_id_set */ +0x67,0x2A,0x00, /* [3886] OBJ_set_ctype */ +0x67,0x2A,0x01, /* [3889] OBJ_set_msgExt */ +0x67,0x2A,0x03, /* [3892] OBJ_set_attr */ +0x67,0x2A,0x05, /* [3895] OBJ_set_policy */ +0x67,0x2A,0x07, /* [3898] OBJ_set_certExt */ +0x67,0x2A,0x08, /* [3901] OBJ_set_brand */ +0x67,0x2A,0x00,0x00, /* [3904] OBJ_setct_PANData */ +0x67,0x2A,0x00,0x01, /* [3908] OBJ_setct_PANToken */ +0x67,0x2A,0x00,0x02, /* [3912] OBJ_setct_PANOnly */ +0x67,0x2A,0x00,0x03, /* [3916] OBJ_setct_OIData */ +0x67,0x2A,0x00,0x04, /* [3920] OBJ_setct_PI */ +0x67,0x2A,0x00,0x05, /* [3924] OBJ_setct_PIData */ +0x67,0x2A,0x00,0x06, /* [3928] OBJ_setct_PIDataUnsigned */ +0x67,0x2A,0x00,0x07, /* [3932] OBJ_setct_HODInput */ +0x67,0x2A,0x00,0x08, /* [3936] OBJ_setct_AuthResBaggage */ +0x67,0x2A,0x00,0x09, /* [3940] OBJ_setct_AuthRevReqBaggage */ +0x67,0x2A,0x00,0x0A, /* [3944] OBJ_setct_AuthRevResBaggage */ +0x67,0x2A,0x00,0x0B, /* [3948] OBJ_setct_CapTokenSeq */ +0x67,0x2A,0x00,0x0C, /* [3952] OBJ_setct_PInitResData */ +0x67,0x2A,0x00,0x0D, /* [3956] OBJ_setct_PI_TBS */ +0x67,0x2A,0x00,0x0E, /* [3960] OBJ_setct_PResData */ +0x67,0x2A,0x00,0x10, /* [3964] OBJ_setct_AuthReqTBS */ +0x67,0x2A,0x00,0x11, /* [3968] OBJ_setct_AuthResTBS */ +0x67,0x2A,0x00,0x12, /* [3972] OBJ_setct_AuthResTBSX */ +0x67,0x2A,0x00,0x13, /* [3976] OBJ_setct_AuthTokenTBS */ +0x67,0x2A,0x00,0x14, /* [3980] OBJ_setct_CapTokenData */ +0x67,0x2A,0x00,0x15, /* [3984] OBJ_setct_CapTokenTBS */ +0x67,0x2A,0x00,0x16, /* [3988] OBJ_setct_AcqCardCodeMsg */ +0x67,0x2A,0x00,0x17, /* [3992] OBJ_setct_AuthRevReqTBS */ +0x67,0x2A,0x00,0x18, /* [3996] OBJ_setct_AuthRevResData */ +0x67,0x2A,0x00,0x19, /* [4000] OBJ_setct_AuthRevResTBS */ +0x67,0x2A,0x00,0x1A, /* [4004] OBJ_setct_CapReqTBS */ +0x67,0x2A,0x00,0x1B, /* [4008] OBJ_setct_CapReqTBSX */ +0x67,0x2A,0x00,0x1C, /* [4012] OBJ_setct_CapResData */ +0x67,0x2A,0x00,0x1D, /* [4016] OBJ_setct_CapRevReqTBS */ +0x67,0x2A,0x00,0x1E, /* [4020] OBJ_setct_CapRevReqTBSX */ +0x67,0x2A,0x00,0x1F, /* [4024] OBJ_setct_CapRevResData */ +0x67,0x2A,0x00,0x20, /* [4028] OBJ_setct_CredReqTBS */ +0x67,0x2A,0x00,0x21, /* [4032] OBJ_setct_CredReqTBSX */ +0x67,0x2A,0x00,0x22, /* [4036] OBJ_setct_CredResData */ +0x67,0x2A,0x00,0x23, /* [4040] OBJ_setct_CredRevReqTBS */ +0x67,0x2A,0x00,0x24, /* [4044] OBJ_setct_CredRevReqTBSX */ +0x67,0x2A,0x00,0x25, /* [4048] OBJ_setct_CredRevResData */ +0x67,0x2A,0x00,0x26, /* [4052] OBJ_setct_PCertReqData */ +0x67,0x2A,0x00,0x27, /* [4056] OBJ_setct_PCertResTBS */ +0x67,0x2A,0x00,0x28, /* [4060] OBJ_setct_BatchAdminReqData */ +0x67,0x2A,0x00,0x29, /* [4064] OBJ_setct_BatchAdminResData */ +0x67,0x2A,0x00,0x2A, /* [4068] OBJ_setct_CardCInitResTBS */ +0x67,0x2A,0x00,0x2B, /* [4072] OBJ_setct_MeAqCInitResTBS */ +0x67,0x2A,0x00,0x2C, /* [4076] OBJ_setct_RegFormResTBS */ +0x67,0x2A,0x00,0x2D, /* [4080] OBJ_setct_CertReqData */ +0x67,0x2A,0x00,0x2E, /* [4084] OBJ_setct_CertReqTBS */ +0x67,0x2A,0x00,0x2F, /* [4088] OBJ_setct_CertResData */ +0x67,0x2A,0x00,0x30, /* [4092] OBJ_setct_CertInqReqTBS */ +0x67,0x2A,0x00,0x31, /* [4096] OBJ_setct_ErrorTBS */ +0x67,0x2A,0x00,0x32, /* [4100] OBJ_setct_PIDualSignedTBE */ +0x67,0x2A,0x00,0x33, /* [4104] OBJ_setct_PIUnsignedTBE */ +0x67,0x2A,0x00,0x34, /* [4108] OBJ_setct_AuthReqTBE */ +0x67,0x2A,0x00,0x35, /* [4112] OBJ_setct_AuthResTBE */ +0x67,0x2A,0x00,0x36, /* [4116] OBJ_setct_AuthResTBEX */ +0x67,0x2A,0x00,0x37, /* [4120] OBJ_setct_AuthTokenTBE */ +0x67,0x2A,0x00,0x38, /* [4124] OBJ_setct_CapTokenTBE */ +0x67,0x2A,0x00,0x39, /* [4128] OBJ_setct_CapTokenTBEX */ +0x67,0x2A,0x00,0x3A, /* [4132] OBJ_setct_AcqCardCodeMsgTBE */ +0x67,0x2A,0x00,0x3B, /* [4136] OBJ_setct_AuthRevReqTBE */ +0x67,0x2A,0x00,0x3C, /* [4140] OBJ_setct_AuthRevResTBE */ +0x67,0x2A,0x00,0x3D, /* [4144] OBJ_setct_AuthRevResTBEB */ +0x67,0x2A,0x00,0x3E, /* [4148] OBJ_setct_CapReqTBE */ +0x67,0x2A,0x00,0x3F, /* [4152] OBJ_setct_CapReqTBEX */ +0x67,0x2A,0x00,0x40, /* [4156] OBJ_setct_CapResTBE */ +0x67,0x2A,0x00,0x41, /* [4160] OBJ_setct_CapRevReqTBE */ +0x67,0x2A,0x00,0x42, /* [4164] OBJ_setct_CapRevReqTBEX */ +0x67,0x2A,0x00,0x43, /* [4168] OBJ_setct_CapRevResTBE */ +0x67,0x2A,0x00,0x44, /* [4172] OBJ_setct_CredReqTBE */ +0x67,0x2A,0x00,0x45, /* [4176] OBJ_setct_CredReqTBEX */ +0x67,0x2A,0x00,0x46, /* [4180] OBJ_setct_CredResTBE */ +0x67,0x2A,0x00,0x47, /* [4184] OBJ_setct_CredRevReqTBE */ +0x67,0x2A,0x00,0x48, /* [4188] OBJ_setct_CredRevReqTBEX */ +0x67,0x2A,0x00,0x49, /* [4192] OBJ_setct_CredRevResTBE */ +0x67,0x2A,0x00,0x4A, /* [4196] OBJ_setct_BatchAdminReqTBE */ +0x67,0x2A,0x00,0x4B, /* [4200] OBJ_setct_BatchAdminResTBE */ +0x67,0x2A,0x00,0x4C, /* [4204] OBJ_setct_RegFormReqTBE */ +0x67,0x2A,0x00,0x4D, /* [4208] OBJ_setct_CertReqTBE */ +0x67,0x2A,0x00,0x4E, /* [4212] OBJ_setct_CertReqTBEX */ +0x67,0x2A,0x00,0x4F, /* [4216] OBJ_setct_CertResTBE */ +0x67,0x2A,0x00,0x50, /* [4220] OBJ_setct_CRLNotificationTBS */ +0x67,0x2A,0x00,0x51, /* [4224] OBJ_setct_CRLNotificationResTBS */ +0x67,0x2A,0x00,0x52, /* [4228] OBJ_setct_BCIDistributionTBS */ +0x67,0x2A,0x01,0x01, /* [4232] OBJ_setext_genCrypt */ +0x67,0x2A,0x01,0x03, /* [4236] OBJ_setext_miAuth */ +0x67,0x2A,0x01,0x04, /* [4240] OBJ_setext_pinSecure */ +0x67,0x2A,0x01,0x05, /* [4244] OBJ_setext_pinAny */ +0x67,0x2A,0x01,0x07, /* [4248] OBJ_setext_track2 */ +0x67,0x2A,0x01,0x08, /* [4252] OBJ_setext_cv */ +0x67,0x2A,0x05,0x00, /* [4256] OBJ_set_policy_root */ +0x67,0x2A,0x07,0x00, /* [4260] OBJ_setCext_hashedRoot */ +0x67,0x2A,0x07,0x01, /* [4264] OBJ_setCext_certType */ +0x67,0x2A,0x07,0x02, /* [4268] OBJ_setCext_merchData */ +0x67,0x2A,0x07,0x03, /* [4272] OBJ_setCext_cCertRequired */ +0x67,0x2A,0x07,0x04, /* [4276] OBJ_setCext_tunneling */ +0x67,0x2A,0x07,0x05, /* [4280] OBJ_setCext_setExt */ +0x67,0x2A,0x07,0x06, /* [4284] OBJ_setCext_setQualf */ +0x67,0x2A,0x07,0x07, /* [4288] OBJ_setCext_PGWYcapabilities */ +0x67,0x2A,0x07,0x08, /* [4292] OBJ_setCext_TokenIdentifier */ +0x67,0x2A,0x07,0x09, /* [4296] OBJ_setCext_Track2Data */ +0x67,0x2A,0x07,0x0A, /* [4300] OBJ_setCext_TokenType */ +0x67,0x2A,0x07,0x0B, /* [4304] OBJ_setCext_IssuerCapabilities */ +0x67,0x2A,0x03,0x00, /* [4308] OBJ_setAttr_Cert */ +0x67,0x2A,0x03,0x01, /* [4312] OBJ_setAttr_PGWYcap */ +0x67,0x2A,0x03,0x02, /* [4316] OBJ_setAttr_TokenType */ +0x67,0x2A,0x03,0x03, /* [4320] OBJ_setAttr_IssCap */ +0x67,0x2A,0x03,0x00,0x00, /* [4324] OBJ_set_rootKeyThumb */ +0x67,0x2A,0x03,0x00,0x01, /* [4329] OBJ_set_addPolicy */ +0x67,0x2A,0x03,0x02,0x01, /* [4334] OBJ_setAttr_Token_EMV */ +0x67,0x2A,0x03,0x02,0x02, /* [4339] OBJ_setAttr_Token_B0Prime */ +0x67,0x2A,0x03,0x03,0x03, /* [4344] OBJ_setAttr_IssCap_CVM */ +0x67,0x2A,0x03,0x03,0x04, /* [4349] OBJ_setAttr_IssCap_T2 */ +0x67,0x2A,0x03,0x03,0x05, /* [4354] OBJ_setAttr_IssCap_Sig */ +0x67,0x2A,0x03,0x03,0x03,0x01, /* [4359] OBJ_setAttr_GenCryptgrm */ +0x67,0x2A,0x03,0x03,0x04,0x01, /* [4365] OBJ_setAttr_T2Enc */ +0x67,0x2A,0x03,0x03,0x04,0x02, /* [4371] OBJ_setAttr_T2cleartxt */ +0x67,0x2A,0x03,0x03,0x05,0x01, /* [4377] OBJ_setAttr_TokICCsig */ +0x67,0x2A,0x03,0x03,0x05,0x02, /* [4383] OBJ_setAttr_SecDevSig */ +0x67,0x2A,0x08,0x01, /* [4389] OBJ_set_brand_IATA_ATA */ +0x67,0x2A,0x08,0x1E, /* [4393] OBJ_set_brand_Diners */ +0x67,0x2A,0x08,0x22, /* [4397] OBJ_set_brand_AmericanExpress */ +0x67,0x2A,0x08,0x23, /* [4401] OBJ_set_brand_JCB */ +0x67,0x2A,0x08,0x04, /* [4405] OBJ_set_brand_Visa */ +0x67,0x2A,0x08,0x05, /* [4409] OBJ_set_brand_MasterCard */ +0x67,0x2A,0x08,0xAE,0x7B, /* [4413] OBJ_set_brand_Novus */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x03,0x0A, /* [4418] OBJ_des_cdmf */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x06,/* [4426] OBJ_rsaOAEPEncryptionSET */ +0x67, /* [4435] OBJ_international_organizations */ +0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x14,0x02,0x02,/* [4436] OBJ_ms_smartcard_login */ +0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x14,0x02,0x03,/* [4446] OBJ_ms_upn */ +0x55,0x04,0x09, /* [4456] OBJ_streetAddress */ +0x55,0x04,0x11, /* [4459] OBJ_postalCode */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x15, /* [4462] OBJ_id_ppl */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x0E, /* [4469] OBJ_proxyCertInfo */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x15,0x00, /* [4477] OBJ_id_ppl_anyLanguage */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x15,0x01, /* [4485] OBJ_id_ppl_inheritAll */ +0x55,0x1D,0x1E, /* [4493] OBJ_name_constraints */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x15,0x02, /* [4496] OBJ_Independent */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,/* [4504] OBJ_sha256WithRSAEncryption */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0C,/* [4513] OBJ_sha384WithRSAEncryption */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0D,/* [4522] OBJ_sha512WithRSAEncryption */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0E,/* [4531] OBJ_sha224WithRSAEncryption */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x01,/* [4540] OBJ_sha256 */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x02,/* [4549] OBJ_sha384 */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x03,/* [4558] OBJ_sha512 */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x04,/* [4567] OBJ_sha224 */ +0x2B, /* [4576] OBJ_identified_organization */ +0x2B,0x81,0x04, /* [4577] OBJ_certicom_arc */ +0x67,0x2B, /* [4580] OBJ_wap */ +0x67,0x2B,0x01, /* [4582] OBJ_wap_wsg */ +0x2A,0x86,0x48,0xCE,0x3D,0x01,0x02,0x03, /* [4585] OBJ_X9_62_id_characteristic_two_basis */ +0x2A,0x86,0x48,0xCE,0x3D,0x01,0x02,0x03,0x01,/* [4593] OBJ_X9_62_onBasis */ +0x2A,0x86,0x48,0xCE,0x3D,0x01,0x02,0x03,0x02,/* [4602] OBJ_X9_62_tpBasis */ +0x2A,0x86,0x48,0xCE,0x3D,0x01,0x02,0x03,0x03,/* [4611] OBJ_X9_62_ppBasis */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x01, /* [4620] OBJ_X9_62_c2pnb163v1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x02, /* [4628] OBJ_X9_62_c2pnb163v2 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x03, /* [4636] OBJ_X9_62_c2pnb163v3 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x04, /* [4644] OBJ_X9_62_c2pnb176v1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x05, /* [4652] OBJ_X9_62_c2tnb191v1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x06, /* [4660] OBJ_X9_62_c2tnb191v2 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x07, /* [4668] OBJ_X9_62_c2tnb191v3 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x08, /* [4676] OBJ_X9_62_c2onb191v4 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x09, /* [4684] OBJ_X9_62_c2onb191v5 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0A, /* [4692] OBJ_X9_62_c2pnb208w1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0B, /* [4700] OBJ_X9_62_c2tnb239v1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0C, /* [4708] OBJ_X9_62_c2tnb239v2 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0D, /* [4716] OBJ_X9_62_c2tnb239v3 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0E, /* [4724] OBJ_X9_62_c2onb239v4 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x0F, /* [4732] OBJ_X9_62_c2onb239v5 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x10, /* [4740] OBJ_X9_62_c2pnb272w1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x11, /* [4748] OBJ_X9_62_c2pnb304w1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x12, /* [4756] OBJ_X9_62_c2tnb359v1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x13, /* [4764] OBJ_X9_62_c2pnb368w1 */ +0x2A,0x86,0x48,0xCE,0x3D,0x03,0x00,0x14, /* [4772] OBJ_X9_62_c2tnb431r1 */ +0x2B,0x81,0x04,0x00,0x06, /* [4780] OBJ_secp112r1 */ +0x2B,0x81,0x04,0x00,0x07, /* [4785] OBJ_secp112r2 */ +0x2B,0x81,0x04,0x00,0x1C, /* [4790] OBJ_secp128r1 */ +0x2B,0x81,0x04,0x00,0x1D, /* [4795] OBJ_secp128r2 */ +0x2B,0x81,0x04,0x00,0x09, /* [4800] OBJ_secp160k1 */ +0x2B,0x81,0x04,0x00,0x08, /* [4805] OBJ_secp160r1 */ +0x2B,0x81,0x04,0x00,0x1E, /* [4810] OBJ_secp160r2 */ +0x2B,0x81,0x04,0x00,0x1F, /* [4815] OBJ_secp192k1 */ +0x2B,0x81,0x04,0x00,0x20, /* [4820] OBJ_secp224k1 */ +0x2B,0x81,0x04,0x00,0x21, /* [4825] OBJ_secp224r1 */ +0x2B,0x81,0x04,0x00,0x0A, /* [4830] OBJ_secp256k1 */ +0x2B,0x81,0x04,0x00,0x22, /* [4835] OBJ_secp384r1 */ +0x2B,0x81,0x04,0x00,0x23, /* [4840] OBJ_secp521r1 */ +0x2B,0x81,0x04,0x00,0x04, /* [4845] OBJ_sect113r1 */ +0x2B,0x81,0x04,0x00,0x05, /* [4850] OBJ_sect113r2 */ +0x2B,0x81,0x04,0x00,0x16, /* [4855] OBJ_sect131r1 */ +0x2B,0x81,0x04,0x00,0x17, /* [4860] OBJ_sect131r2 */ +0x2B,0x81,0x04,0x00,0x01, /* [4865] OBJ_sect163k1 */ +0x2B,0x81,0x04,0x00,0x02, /* [4870] OBJ_sect163r1 */ +0x2B,0x81,0x04,0x00,0x0F, /* [4875] OBJ_sect163r2 */ +0x2B,0x81,0x04,0x00,0x18, /* [4880] OBJ_sect193r1 */ +0x2B,0x81,0x04,0x00,0x19, /* [4885] OBJ_sect193r2 */ +0x2B,0x81,0x04,0x00,0x1A, /* [4890] OBJ_sect233k1 */ +0x2B,0x81,0x04,0x00,0x1B, /* [4895] OBJ_sect233r1 */ +0x2B,0x81,0x04,0x00,0x03, /* [4900] OBJ_sect239k1 */ +0x2B,0x81,0x04,0x00,0x10, /* [4905] OBJ_sect283k1 */ +0x2B,0x81,0x04,0x00,0x11, /* [4910] OBJ_sect283r1 */ +0x2B,0x81,0x04,0x00,0x24, /* [4915] OBJ_sect409k1 */ +0x2B,0x81,0x04,0x00,0x25, /* [4920] OBJ_sect409r1 */ +0x2B,0x81,0x04,0x00,0x26, /* [4925] OBJ_sect571k1 */ +0x2B,0x81,0x04,0x00,0x27, /* [4930] OBJ_sect571r1 */ +0x67,0x2B,0x01,0x04,0x01, /* [4935] OBJ_wap_wsg_idm_ecid_wtls1 */ +0x67,0x2B,0x01,0x04,0x03, /* [4940] OBJ_wap_wsg_idm_ecid_wtls3 */ +0x67,0x2B,0x01,0x04,0x04, /* [4945] OBJ_wap_wsg_idm_ecid_wtls4 */ +0x67,0x2B,0x01,0x04,0x05, /* [4950] OBJ_wap_wsg_idm_ecid_wtls5 */ +0x67,0x2B,0x01,0x04,0x06, /* [4955] OBJ_wap_wsg_idm_ecid_wtls6 */ +0x67,0x2B,0x01,0x04,0x07, /* [4960] OBJ_wap_wsg_idm_ecid_wtls7 */ +0x67,0x2B,0x01,0x04,0x08, /* [4965] OBJ_wap_wsg_idm_ecid_wtls8 */ +0x67,0x2B,0x01,0x04,0x09, /* [4970] OBJ_wap_wsg_idm_ecid_wtls9 */ +0x67,0x2B,0x01,0x04,0x0A, /* [4975] OBJ_wap_wsg_idm_ecid_wtls10 */ +0x67,0x2B,0x01,0x04,0x0B, /* [4980] OBJ_wap_wsg_idm_ecid_wtls11 */ +0x67,0x2B,0x01,0x04,0x0C, /* [4985] OBJ_wap_wsg_idm_ecid_wtls12 */ +0x55,0x1D,0x20,0x00, /* [4990] OBJ_any_policy */ +0x55,0x1D,0x21, /* [4994] OBJ_policy_mappings */ +0x55,0x1D,0x36, /* [4997] OBJ_inhibit_any_policy */ +0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x01,0x02,/* [5000] OBJ_camellia_128_cbc */ +0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x01,0x03,/* [5011] OBJ_camellia_192_cbc */ +0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x01,0x04,/* [5022] OBJ_camellia_256_cbc */ +0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x01, /* [5033] OBJ_camellia_128_ecb */ +0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x15, /* [5041] OBJ_camellia_192_ecb */ +0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x29, /* [5049] OBJ_camellia_256_ecb */ +0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x04, /* [5057] OBJ_camellia_128_cfb128 */ +0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x18, /* [5065] OBJ_camellia_192_cfb128 */ +0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x2C, /* [5073] OBJ_camellia_256_cfb128 */ +0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x03, /* [5081] OBJ_camellia_128_ofb128 */ +0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x17, /* [5089] OBJ_camellia_192_ofb128 */ +0x03,0xA2,0x31,0x05,0x03,0x01,0x09,0x2B, /* [5097] OBJ_camellia_256_ofb128 */ +0x55,0x1D,0x09, /* [5105] OBJ_subject_directory_attributes */ +0x55,0x1D,0x1C, /* [5108] OBJ_issuing_distribution_point */ +0x55,0x1D,0x1D, /* [5111] OBJ_certificate_issuer */ +0x2A,0x83,0x1A,0x8C,0x9A,0x44, /* [5114] OBJ_kisa */ +0x2A,0x83,0x1A,0x8C,0x9A,0x44,0x01,0x03, /* [5120] OBJ_seed_ecb */ +0x2A,0x83,0x1A,0x8C,0x9A,0x44,0x01,0x04, /* [5128] OBJ_seed_cbc */ +0x2A,0x83,0x1A,0x8C,0x9A,0x44,0x01,0x06, /* [5136] OBJ_seed_ofb128 */ +0x2A,0x83,0x1A,0x8C,0x9A,0x44,0x01,0x05, /* [5144] OBJ_seed_cfb128 */ +0x2B,0x06,0x01,0x05,0x05,0x08,0x01,0x01, /* [5152] OBJ_hmac_md5 */ +0x2B,0x06,0x01,0x05,0x05,0x08,0x01,0x02, /* [5160] OBJ_hmac_sha1 */ +0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,0x42,0x0D,/* [5168] OBJ_id_PasswordBasedMAC */ +0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,0x42,0x1E,/* [5177] OBJ_id_DHBasedMac */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x04,0x10, /* [5186] OBJ_id_it_suppLangTags */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x30,0x05, /* [5194] OBJ_caRepository */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x09,/* [5202] OBJ_id_smime_ct_compressedData */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x01,0x1B,/* [5213] OBJ_id_ct_asciiTextWithCRLF */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x05,/* [5224] OBJ_id_aes128_wrap */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x19,/* [5233] OBJ_id_aes192_wrap */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2D,/* [5242] OBJ_id_aes256_wrap */ +0x2A,0x86,0x48,0xCE,0x3D,0x04,0x02, /* [5251] OBJ_ecdsa_with_Recommended */ +0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03, /* [5258] OBJ_ecdsa_with_Specified */ +0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x01, /* [5265] OBJ_ecdsa_with_SHA224 */ +0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x02, /* [5273] OBJ_ecdsa_with_SHA256 */ +0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03, /* [5281] OBJ_ecdsa_with_SHA384 */ +0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x04, /* [5289] OBJ_ecdsa_with_SHA512 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x06, /* [5297] OBJ_hmacWithMD5 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x08, /* [5305] OBJ_hmacWithSHA224 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x09, /* [5313] OBJ_hmacWithSHA256 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x0A, /* [5321] OBJ_hmacWithSHA384 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x02,0x0B, /* [5329] OBJ_hmacWithSHA512 */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x03,0x01,/* [5337] OBJ_dsa_with_SHA224 */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x03,0x02,/* [5346] OBJ_dsa_with_SHA256 */ +0x28,0xCF,0x06,0x03,0x00,0x37, /* [5355] OBJ_whirlpool */ +0x2A,0x85,0x03,0x02,0x02, /* [5361] OBJ_cryptopro */ +0x2A,0x85,0x03,0x02,0x09, /* [5366] OBJ_cryptocom */ +0x2A,0x85,0x03,0x02,0x02,0x03, /* [5371] OBJ_id_GostR3411_94_with_GostR3410_2001 */ +0x2A,0x85,0x03,0x02,0x02,0x04, /* [5377] OBJ_id_GostR3411_94_with_GostR3410_94 */ +0x2A,0x85,0x03,0x02,0x02,0x09, /* [5383] OBJ_id_GostR3411_94 */ +0x2A,0x85,0x03,0x02,0x02,0x0A, /* [5389] OBJ_id_HMACGostR3411_94 */ +0x2A,0x85,0x03,0x02,0x02,0x13, /* [5395] OBJ_id_GostR3410_2001 */ +0x2A,0x85,0x03,0x02,0x02,0x14, /* [5401] OBJ_id_GostR3410_94 */ +0x2A,0x85,0x03,0x02,0x02,0x15, /* [5407] OBJ_id_Gost28147_89 */ +0x2A,0x85,0x03,0x02,0x02,0x16, /* [5413] OBJ_id_Gost28147_89_MAC */ +0x2A,0x85,0x03,0x02,0x02,0x17, /* [5419] OBJ_id_GostR3411_94_prf */ +0x2A,0x85,0x03,0x02,0x02,0x62, /* [5425] OBJ_id_GostR3410_2001DH */ +0x2A,0x85,0x03,0x02,0x02,0x63, /* [5431] OBJ_id_GostR3410_94DH */ +0x2A,0x85,0x03,0x02,0x02,0x0E,0x01, /* [5437] OBJ_id_Gost28147_89_CryptoPro_KeyMeshing */ +0x2A,0x85,0x03,0x02,0x02,0x0E,0x00, /* [5444] OBJ_id_Gost28147_89_None_KeyMeshing */ +0x2A,0x85,0x03,0x02,0x02,0x1E,0x00, /* [5451] OBJ_id_GostR3411_94_TestParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x1E,0x01, /* [5458] OBJ_id_GostR3411_94_CryptoProParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x1F,0x00, /* [5465] OBJ_id_Gost28147_89_TestParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x1F,0x01, /* [5472] OBJ_id_Gost28147_89_CryptoPro_A_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x1F,0x02, /* [5479] OBJ_id_Gost28147_89_CryptoPro_B_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x1F,0x03, /* [5486] OBJ_id_Gost28147_89_CryptoPro_C_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x1F,0x04, /* [5493] OBJ_id_Gost28147_89_CryptoPro_D_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x1F,0x05, /* [5500] OBJ_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x1F,0x06, /* [5507] OBJ_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x1F,0x07, /* [5514] OBJ_id_Gost28147_89_CryptoPro_RIC_1_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x20,0x00, /* [5521] OBJ_id_GostR3410_94_TestParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x20,0x02, /* [5528] OBJ_id_GostR3410_94_CryptoPro_A_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x20,0x03, /* [5535] OBJ_id_GostR3410_94_CryptoPro_B_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x20,0x04, /* [5542] OBJ_id_GostR3410_94_CryptoPro_C_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x20,0x05, /* [5549] OBJ_id_GostR3410_94_CryptoPro_D_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x21,0x01, /* [5556] OBJ_id_GostR3410_94_CryptoPro_XchA_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x21,0x02, /* [5563] OBJ_id_GostR3410_94_CryptoPro_XchB_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x21,0x03, /* [5570] OBJ_id_GostR3410_94_CryptoPro_XchC_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x23,0x00, /* [5577] OBJ_id_GostR3410_2001_TestParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x23,0x01, /* [5584] OBJ_id_GostR3410_2001_CryptoPro_A_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x23,0x02, /* [5591] OBJ_id_GostR3410_2001_CryptoPro_B_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x23,0x03, /* [5598] OBJ_id_GostR3410_2001_CryptoPro_C_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x24,0x00, /* [5605] OBJ_id_GostR3410_2001_CryptoPro_XchA_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x24,0x01, /* [5612] OBJ_id_GostR3410_2001_CryptoPro_XchB_ParamSet */ +0x2A,0x85,0x03,0x02,0x02,0x14,0x01, /* [5619] OBJ_id_GostR3410_94_a */ +0x2A,0x85,0x03,0x02,0x02,0x14,0x02, /* [5626] OBJ_id_GostR3410_94_aBis */ +0x2A,0x85,0x03,0x02,0x02,0x14,0x03, /* [5633] OBJ_id_GostR3410_94_b */ +0x2A,0x85,0x03,0x02,0x02,0x14,0x04, /* [5640] OBJ_id_GostR3410_94_bBis */ +0x2A,0x85,0x03,0x02,0x09,0x01,0x06,0x01, /* [5647] OBJ_id_Gost28147_89_cc */ +0x2A,0x85,0x03,0x02,0x09,0x01,0x05,0x03, /* [5655] OBJ_id_GostR3410_94_cc */ +0x2A,0x85,0x03,0x02,0x09,0x01,0x05,0x04, /* [5663] OBJ_id_GostR3410_2001_cc */ +0x2A,0x85,0x03,0x02,0x09,0x01,0x03,0x03, /* [5671] OBJ_id_GostR3411_94_with_GostR3410_94_cc */ +0x2A,0x85,0x03,0x02,0x09,0x01,0x03,0x04, /* [5679] OBJ_id_GostR3411_94_with_GostR3410_2001_cc */ +0x2A,0x85,0x03,0x02,0x09,0x01,0x08,0x01, /* [5687] OBJ_id_GostR3410_2001_ParamSet_cc */ +0x2B,0x06,0x01,0x04,0x01,0x82,0x37,0x11,0x02,/* [5695] OBJ_LocalKeySet */ +0x55,0x1D,0x2E, /* [5704] OBJ_freshest_crl */ +0x2B,0x06,0x01,0x05,0x05,0x07,0x08,0x03, /* [5707] OBJ_id_on_permanentIdentifier */ +0x55,0x04,0x0E, /* [5715] OBJ_searchGuide */ +0x55,0x04,0x0F, /* [5718] OBJ_businessCategory */ +0x55,0x04,0x10, /* [5721] OBJ_postalAddress */ +0x55,0x04,0x12, /* [5724] OBJ_postOfficeBox */ +0x55,0x04,0x13, /* [5727] OBJ_physicalDeliveryOfficeName */ +0x55,0x04,0x14, /* [5730] OBJ_telephoneNumber */ +0x55,0x04,0x15, /* [5733] OBJ_telexNumber */ +0x55,0x04,0x16, /* [5736] OBJ_teletexTerminalIdentifier */ +0x55,0x04,0x17, /* [5739] OBJ_facsimileTelephoneNumber */ +0x55,0x04,0x18, /* [5742] OBJ_x121Address */ +0x55,0x04,0x19, /* [5745] OBJ_internationaliSDNNumber */ +0x55,0x04,0x1A, /* [5748] OBJ_registeredAddress */ +0x55,0x04,0x1B, /* [5751] OBJ_destinationIndicator */ +0x55,0x04,0x1C, /* [5754] OBJ_preferredDeliveryMethod */ +0x55,0x04,0x1D, /* [5757] OBJ_presentationAddress */ +0x55,0x04,0x1E, /* [5760] OBJ_supportedApplicationContext */ +0x55,0x04,0x1F, /* [5763] OBJ_member */ +0x55,0x04,0x20, /* [5766] OBJ_owner */ +0x55,0x04,0x21, /* [5769] OBJ_roleOccupant */ +0x55,0x04,0x22, /* [5772] OBJ_seeAlso */ +0x55,0x04,0x23, /* [5775] OBJ_userPassword */ +0x55,0x04,0x24, /* [5778] OBJ_userCertificate */ +0x55,0x04,0x25, /* [5781] OBJ_cACertificate */ +0x55,0x04,0x26, /* [5784] OBJ_authorityRevocationList */ +0x55,0x04,0x27, /* [5787] OBJ_certificateRevocationList */ +0x55,0x04,0x28, /* [5790] OBJ_crossCertificatePair */ +0x55,0x04,0x2F, /* [5793] OBJ_enhancedSearchGuide */ +0x55,0x04,0x30, /* [5796] OBJ_protocolInformation */ +0x55,0x04,0x31, /* [5799] OBJ_distinguishedName */ +0x55,0x04,0x32, /* [5802] OBJ_uniqueMember */ +0x55,0x04,0x33, /* [5805] OBJ_houseIdentifier */ +0x55,0x04,0x34, /* [5808] OBJ_supportedAlgorithms */ +0x55,0x04,0x35, /* [5811] OBJ_deltaRevocationList */ +0x55,0x04,0x36, /* [5814] OBJ_dmdName */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x09,0x10,0x03,0x09,/* [5817] OBJ_id_alg_PWRI_KEK */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x06,/* [5828] OBJ_aes_128_gcm */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x07,/* [5837] OBJ_aes_128_ccm */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x08,/* [5846] OBJ_id_aes128_wrap_pad */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x1A,/* [5855] OBJ_aes_192_gcm */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x1B,/* [5864] OBJ_aes_192_ccm */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x1C,/* [5873] OBJ_id_aes192_wrap_pad */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2E,/* [5882] OBJ_aes_256_gcm */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x2F,/* [5891] OBJ_aes_256_ccm */ +0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x01,0x30,/* [5900] OBJ_id_aes256_wrap_pad */ +0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x03,0x02,/* [5909] OBJ_id_camellia128_wrap */ +0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x03,0x03,/* [5920] OBJ_id_camellia192_wrap */ +0x2A,0x83,0x08,0x8C,0x9A,0x4B,0x3D,0x01,0x01,0x03,0x04,/* [5931] OBJ_id_camellia256_wrap */ +0x55,0x1D,0x25,0x00, /* [5942] OBJ_anyExtendedKeyUsage */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x08,/* [5946] OBJ_mgf1 */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0A,/* [5955] OBJ_rsassaPss */ +0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x07,/* [5964] OBJ_rsaesOaep */ }; static const ASN1_OBJECT nid_objs[NUM_NID]={ -{"UNDEF","undefined",NID_undef,1,&(lvalues[0]),0}, -{"rsadsi","RSA Data Security, Inc.",NID_rsadsi,6,&(lvalues[1]),0}, -{"pkcs","RSA Data Security, Inc. PKCS",NID_pkcs,7,&(lvalues[7]),0}, -{"MD2","md2",NID_md2,8,&(lvalues[14]),0}, -{"MD5","md5",NID_md5,8,&(lvalues[22]),0}, -{"RC4","rc4",NID_rc4,8,&(lvalues[30]),0}, -{"rsaEncryption","rsaEncryption",NID_rsaEncryption,9,&(lvalues[38]),0}, +{"UNDEF","undefined",NID_undef,0,NULL,0}, +{"rsadsi","RSA Data Security, Inc.",NID_rsadsi,6,&(lvalues[0]),0}, +{"pkcs","RSA Data Security, Inc. PKCS",NID_pkcs,7,&(lvalues[6]),0}, +{"MD2","md2",NID_md2,8,&(lvalues[13]),0}, +{"MD5","md5",NID_md5,8,&(lvalues[21]),0}, +{"RC4","rc4",NID_rc4,8,&(lvalues[29]),0}, +{"rsaEncryption","rsaEncryption",NID_rsaEncryption,9,&(lvalues[37]),0}, {"RSA-MD2","md2WithRSAEncryption",NID_md2WithRSAEncryption,9, - &(lvalues[47]),0}, + &(lvalues[46]),0}, {"RSA-MD5","md5WithRSAEncryption",NID_md5WithRSAEncryption,9, - &(lvalues[56]),0}, + &(lvalues[55]),0}, {"PBE-MD2-DES","pbeWithMD2AndDES-CBC",NID_pbeWithMD2AndDES_CBC,9, - &(lvalues[65]),0}, + &(lvalues[64]),0}, {"PBE-MD5-DES","pbeWithMD5AndDES-CBC",NID_pbeWithMD5AndDES_CBC,9, - &(lvalues[74]),0}, -{"X500","directory services (X.500)",NID_X500,1,&(lvalues[83]),0}, -{"X509","X509",NID_X509,2,&(lvalues[84]),0}, -{"CN","commonName",NID_commonName,3,&(lvalues[86]),0}, -{"C","countryName",NID_countryName,3,&(lvalues[89]),0}, -{"L","localityName",NID_localityName,3,&(lvalues[92]),0}, -{"ST","stateOrProvinceName",NID_stateOrProvinceName,3,&(lvalues[95]),0}, -{"O","organizationName",NID_organizationName,3,&(lvalues[98]),0}, + &(lvalues[73]),0}, +{"X500","directory services (X.500)",NID_X500,1,&(lvalues[82]),0}, +{"X509","X509",NID_X509,2,&(lvalues[83]),0}, +{"CN","commonName",NID_commonName,3,&(lvalues[85]),0}, +{"C","countryName",NID_countryName,3,&(lvalues[88]),0}, +{"L","localityName",NID_localityName,3,&(lvalues[91]),0}, +{"ST","stateOrProvinceName",NID_stateOrProvinceName,3,&(lvalues[94]),0}, +{"O","organizationName",NID_organizationName,3,&(lvalues[97]),0}, {"OU","organizationalUnitName",NID_organizationalUnitName,3, - &(lvalues[101]),0}, -{"RSA","rsa",NID_rsa,4,&(lvalues[104]),0}, -{"pkcs7","pkcs7",NID_pkcs7,8,&(lvalues[108]),0}, -{"pkcs7-data","pkcs7-data",NID_pkcs7_data,9,&(lvalues[116]),0}, + &(lvalues[100]),0}, +{"RSA","rsa",NID_rsa,4,&(lvalues[103]),0}, +{"pkcs7","pkcs7",NID_pkcs7,8,&(lvalues[107]),0}, +{"pkcs7-data","pkcs7-data",NID_pkcs7_data,9,&(lvalues[115]),0}, {"pkcs7-signedData","pkcs7-signedData",NID_pkcs7_signed,9, - &(lvalues[125]),0}, + &(lvalues[124]),0}, {"pkcs7-envelopedData","pkcs7-envelopedData",NID_pkcs7_enveloped,9, - &(lvalues[134]),0}, + &(lvalues[133]),0}, {"pkcs7-signedAndEnvelopedData","pkcs7-signedAndEnvelopedData", - NID_pkcs7_signedAndEnveloped,9,&(lvalues[143]),0}, + NID_pkcs7_signedAndEnveloped,9,&(lvalues[142]),0}, {"pkcs7-digestData","pkcs7-digestData",NID_pkcs7_digest,9, - &(lvalues[152]),0}, + &(lvalues[151]),0}, {"pkcs7-encryptedData","pkcs7-encryptedData",NID_pkcs7_encrypted,9, - &(lvalues[161]),0}, -{"pkcs3","pkcs3",NID_pkcs3,8,&(lvalues[170]),0}, + &(lvalues[160]),0}, +{"pkcs3","pkcs3",NID_pkcs3,8,&(lvalues[169]),0}, {"dhKeyAgreement","dhKeyAgreement",NID_dhKeyAgreement,9, - &(lvalues[178]),0}, -{"DES-ECB","des-ecb",NID_des_ecb,5,&(lvalues[187]),0}, -{"DES-CFB","des-cfb",NID_des_cfb64,5,&(lvalues[192]),0}, -{"DES-CBC","des-cbc",NID_des_cbc,5,&(lvalues[197]),0}, -{"DES-EDE","des-ede",NID_des_ede_ecb,5,&(lvalues[202]),0}, + &(lvalues[177]),0}, +{"DES-ECB","des-ecb",NID_des_ecb,5,&(lvalues[186]),0}, +{"DES-CFB","des-cfb",NID_des_cfb64,5,&(lvalues[191]),0}, +{"DES-CBC","des-cbc",NID_des_cbc,5,&(lvalues[196]),0}, +{"DES-EDE","des-ede",NID_des_ede_ecb,5,&(lvalues[201]),0}, {"DES-EDE3","des-ede3",NID_des_ede3_ecb,0,NULL,0}, -{"IDEA-CBC","idea-cbc",NID_idea_cbc,11,&(lvalues[207]),0}, +{"IDEA-CBC","idea-cbc",NID_idea_cbc,11,&(lvalues[206]),0}, {"IDEA-CFB","idea-cfb",NID_idea_cfb64,0,NULL,0}, {"IDEA-ECB","idea-ecb",NID_idea_ecb,0,NULL,0}, -{"RC2-CBC","rc2-cbc",NID_rc2_cbc,8,&(lvalues[218]),0}, +{"RC2-CBC","rc2-cbc",NID_rc2_cbc,8,&(lvalues[217]),0}, {"RC2-ECB","rc2-ecb",NID_rc2_ecb,0,NULL,0}, {"RC2-CFB","rc2-cfb",NID_rc2_cfb64,0,NULL,0}, {"RC2-OFB","rc2-ofb",NID_rc2_ofb64,0,NULL,0}, -{"SHA","sha",NID_sha,5,&(lvalues[226]),0}, +{"SHA","sha",NID_sha,5,&(lvalues[225]),0}, {"RSA-SHA","shaWithRSAEncryption",NID_shaWithRSAEncryption,5, - &(lvalues[231]),0}, + &(lvalues[230]),0}, {"DES-EDE-CBC","des-ede-cbc",NID_des_ede_cbc,0,NULL,0}, -{"DES-EDE3-CBC","des-ede3-cbc",NID_des_ede3_cbc,8,&(lvalues[236]),0}, -{"DES-OFB","des-ofb",NID_des_ofb64,5,&(lvalues[244]),0}, +{"DES-EDE3-CBC","des-ede3-cbc",NID_des_ede3_cbc,8,&(lvalues[235]),0}, +{"DES-OFB","des-ofb",NID_des_ofb64,5,&(lvalues[243]),0}, {"IDEA-OFB","idea-ofb",NID_idea_ofb64,0,NULL,0}, -{"pkcs9","pkcs9",NID_pkcs9,8,&(lvalues[249]),0}, +{"pkcs9","pkcs9",NID_pkcs9,8,&(lvalues[248]),0}, {"emailAddress","emailAddress",NID_pkcs9_emailAddress,9, - &(lvalues[257]),0}, + &(lvalues[256]),0}, {"unstructuredName","unstructuredName",NID_pkcs9_unstructuredName,9, - &(lvalues[266]),0}, -{"contentType","contentType",NID_pkcs9_contentType,9,&(lvalues[275]),0}, + &(lvalues[265]),0}, +{"contentType","contentType",NID_pkcs9_contentType,9,&(lvalues[274]),0}, {"messageDigest","messageDigest",NID_pkcs9_messageDigest,9, - &(lvalues[284]),0}, -{"signingTime","signingTime",NID_pkcs9_signingTime,9,&(lvalues[293]),0}, + &(lvalues[283]),0}, +{"signingTime","signingTime",NID_pkcs9_signingTime,9,&(lvalues[292]),0}, {"countersignature","countersignature",NID_pkcs9_countersignature,9, - &(lvalues[302]),0}, + &(lvalues[301]),0}, {"challengePassword","challengePassword",NID_pkcs9_challengePassword, - 9,&(lvalues[311]),0}, + 9,&(lvalues[310]),0}, {"unstructuredAddress","unstructuredAddress", - NID_pkcs9_unstructuredAddress,9,&(lvalues[320]),0}, + NID_pkcs9_unstructuredAddress,9,&(lvalues[319]),0}, {"extendedCertificateAttributes","extendedCertificateAttributes", - NID_pkcs9_extCertAttributes,9,&(lvalues[329]),0}, + NID_pkcs9_extCertAttributes,9,&(lvalues[328]),0}, {"Netscape","Netscape Communications Corp.",NID_netscape,7, - &(lvalues[338]),0}, + &(lvalues[337]),0}, {"nsCertExt","Netscape Certificate Extension", - NID_netscape_cert_extension,8,&(lvalues[345]),0}, + NID_netscape_cert_extension,8,&(lvalues[344]),0}, {"nsDataType","Netscape Data Type",NID_netscape_data_type,8, - &(lvalues[353]),0}, + &(lvalues[352]),0}, {"DES-EDE-CFB","des-ede-cfb",NID_des_ede_cfb64,0,NULL,0}, {"DES-EDE3-CFB","des-ede3-cfb",NID_des_ede3_cfb64,0,NULL,0}, {"DES-EDE-OFB","des-ede-ofb",NID_des_ede_ofb64,0,NULL,0}, {"DES-EDE3-OFB","des-ede3-ofb",NID_des_ede3_ofb64,0,NULL,0}, -{"SHA1","sha1",NID_sha1,5,&(lvalues[361]),0}, +{"SHA1","sha1",NID_sha1,5,&(lvalues[360]),0}, {"RSA-SHA1","sha1WithRSAEncryption",NID_sha1WithRSAEncryption,9, - &(lvalues[366]),0}, -{"DSA-SHA","dsaWithSHA",NID_dsaWithSHA,5,&(lvalues[375]),0}, -{"DSA-old","dsaEncryption-old",NID_dsa_2,5,&(lvalues[380]),0}, + &(lvalues[365]),0}, +{"DSA-SHA","dsaWithSHA",NID_dsaWithSHA,5,&(lvalues[374]),0}, +{"DSA-old","dsaEncryption-old",NID_dsa_2,5,&(lvalues[379]),0}, {"PBE-SHA1-RC2-64","pbeWithSHA1AndRC2-CBC",NID_pbeWithSHA1AndRC2_CBC, - 9,&(lvalues[385]),0}, -{"PBKDF2","PBKDF2",NID_id_pbkdf2,9,&(lvalues[394]),0}, -{"DSA-SHA1-old","dsaWithSHA1-old",NID_dsaWithSHA1_2,5,&(lvalues[403]),0}, + 9,&(lvalues[384]),0}, +{"PBKDF2","PBKDF2",NID_id_pbkdf2,9,&(lvalues[393]),0}, +{"DSA-SHA1-old","dsaWithSHA1-old",NID_dsaWithSHA1_2,5,&(lvalues[402]),0}, {"nsCertType","Netscape Cert Type",NID_netscape_cert_type,9, - &(lvalues[408]),0}, + &(lvalues[407]),0}, {"nsBaseUrl","Netscape Base Url",NID_netscape_base_url,9, - &(lvalues[417]),0}, + &(lvalues[416]),0}, {"nsRevocationUrl","Netscape Revocation Url", - NID_netscape_revocation_url,9,&(lvalues[426]),0}, + NID_netscape_revocation_url,9,&(lvalues[425]),0}, {"nsCaRevocationUrl","Netscape CA Revocation Url", - NID_netscape_ca_revocation_url,9,&(lvalues[435]),0}, + NID_netscape_ca_revocation_url,9,&(lvalues[434]),0}, {"nsRenewalUrl","Netscape Renewal Url",NID_netscape_renewal_url,9, - &(lvalues[444]),0}, + &(lvalues[443]),0}, {"nsCaPolicyUrl","Netscape CA Policy Url",NID_netscape_ca_policy_url, - 9,&(lvalues[453]),0}, + 9,&(lvalues[452]),0}, {"nsSslServerName","Netscape SSL Server Name", - NID_netscape_ssl_server_name,9,&(lvalues[462]),0}, -{"nsComment","Netscape Comment",NID_netscape_comment,9,&(lvalues[471]),0}, + NID_netscape_ssl_server_name,9,&(lvalues[461]),0}, +{"nsComment","Netscape Comment",NID_netscape_comment,9,&(lvalues[470]),0}, {"nsCertSequence","Netscape Certificate Sequence", - NID_netscape_cert_sequence,9,&(lvalues[480]),0}, + NID_netscape_cert_sequence,9,&(lvalues[479]),0}, {"DESX-CBC","desx-cbc",NID_desx_cbc,0,NULL,0}, -{"id-ce","id-ce",NID_id_ce,2,&(lvalues[489]),0}, +{"id-ce","id-ce",NID_id_ce,2,&(lvalues[488]),0}, {"subjectKeyIdentifier","X509v3 Subject Key Identifier", - NID_subject_key_identifier,3,&(lvalues[491]),0}, -{"keyUsage","X509v3 Key Usage",NID_key_usage,3,&(lvalues[494]),0}, + NID_subject_key_identifier,3,&(lvalues[490]),0}, +{"keyUsage","X509v3 Key Usage",NID_key_usage,3,&(lvalues[493]),0}, {"privateKeyUsagePeriod","X509v3 Private Key Usage Period", - NID_private_key_usage_period,3,&(lvalues[497]),0}, + NID_private_key_usage_period,3,&(lvalues[496]),0}, {"subjectAltName","X509v3 Subject Alternative Name", - NID_subject_alt_name,3,&(lvalues[500]),0}, + NID_subject_alt_name,3,&(lvalues[499]),0}, {"issuerAltName","X509v3 Issuer Alternative Name",NID_issuer_alt_name, - 3,&(lvalues[503]),0}, + 3,&(lvalues[502]),0}, {"basicConstraints","X509v3 Basic Constraints",NID_basic_constraints, - 3,&(lvalues[506]),0}, -{"crlNumber","X509v3 CRL Number",NID_crl_number,3,&(lvalues[509]),0}, + 3,&(lvalues[505]),0}, +{"crlNumber","X509v3 CRL Number",NID_crl_number,3,&(lvalues[508]),0}, {"certificatePolicies","X509v3 Certificate Policies", - NID_certificate_policies,3,&(lvalues[512]),0}, + NID_certificate_policies,3,&(lvalues[511]),0}, {"authorityKeyIdentifier","X509v3 Authority Key Identifier", - NID_authority_key_identifier,3,&(lvalues[515]),0}, -{"BF-CBC","bf-cbc",NID_bf_cbc,9,&(lvalues[518]),0}, + NID_authority_key_identifier,3,&(lvalues[514]),0}, +{"BF-CBC","bf-cbc",NID_bf_cbc,9,&(lvalues[517]),0}, {"BF-ECB","bf-ecb",NID_bf_ecb,0,NULL,0}, {"BF-CFB","bf-cfb",NID_bf_cfb64,0,NULL,0}, {"BF-OFB","bf-ofb",NID_bf_ofb64,0,NULL,0}, -{"MDC2","mdc2",NID_mdc2,4,&(lvalues[527]),0}, -{"RSA-MDC2","mdc2WithRSA",NID_mdc2WithRSA,4,&(lvalues[531]),0}, +{"MDC2","mdc2",NID_mdc2,4,&(lvalues[526]),0}, +{"RSA-MDC2","mdc2WithRSA",NID_mdc2WithRSA,4,&(lvalues[530]),0}, {"RC4-40","rc4-40",NID_rc4_40,0,NULL,0}, {"RC2-40-CBC","rc2-40-cbc",NID_rc2_40_cbc,0,NULL,0}, -{"GN","givenName",NID_givenName,3,&(lvalues[535]),0}, -{"SN","surname",NID_surname,3,&(lvalues[538]),0}, -{"initials","initials",NID_initials,3,&(lvalues[541]),0}, +{"GN","givenName",NID_givenName,3,&(lvalues[534]),0}, +{"SN","surname",NID_surname,3,&(lvalues[537]),0}, +{"initials","initials",NID_initials,3,&(lvalues[540]),0}, {NULL,NULL,NID_undef,0,NULL,0}, {"crlDistributionPoints","X509v3 CRL Distribution Points", - NID_crl_distribution_points,3,&(lvalues[544]),0}, -{"RSA-NP-MD5","md5WithRSA",NID_md5WithRSA,5,&(lvalues[547]),0}, -{"serialNumber","serialNumber",NID_serialNumber,3,&(lvalues[552]),0}, -{"title","title",NID_title,3,&(lvalues[555]),0}, -{"description","description",NID_description,3,&(lvalues[558]),0}, -{"CAST5-CBC","cast5-cbc",NID_cast5_cbc,9,&(lvalues[561]),0}, + NID_crl_distribution_points,3,&(lvalues[543]),0}, +{"RSA-NP-MD5","md5WithRSA",NID_md5WithRSA,5,&(lvalues[546]),0}, +{"serialNumber","serialNumber",NID_serialNumber,3,&(lvalues[551]),0}, +{"title","title",NID_title,3,&(lvalues[554]),0}, +{"description","description",NID_description,3,&(lvalues[557]),0}, +{"CAST5-CBC","cast5-cbc",NID_cast5_cbc,9,&(lvalues[560]),0}, {"CAST5-ECB","cast5-ecb",NID_cast5_ecb,0,NULL,0}, {"CAST5-CFB","cast5-cfb",NID_cast5_cfb64,0,NULL,0}, {"CAST5-OFB","cast5-ofb",NID_cast5_ofb64,0,NULL,0}, {"pbeWithMD5AndCast5CBC","pbeWithMD5AndCast5CBC", - NID_pbeWithMD5AndCast5_CBC,9,&(lvalues[570]),0}, -{"DSA-SHA1","dsaWithSHA1",NID_dsaWithSHA1,7,&(lvalues[579]),0}, + NID_pbeWithMD5AndCast5_CBC,9,&(lvalues[569]),0}, +{"DSA-SHA1","dsaWithSHA1",NID_dsaWithSHA1,7,&(lvalues[578]),0}, {"MD5-SHA1","md5-sha1",NID_md5_sha1,0,NULL,0}, -{"RSA-SHA1-2","sha1WithRSA",NID_sha1WithRSA,5,&(lvalues[586]),0}, -{"DSA","dsaEncryption",NID_dsa,7,&(lvalues[591]),0}, -{"RIPEMD160","ripemd160",NID_ripemd160,5,&(lvalues[598]),0}, +{"RSA-SHA1-2","sha1WithRSA",NID_sha1WithRSA,5,&(lvalues[585]),0}, +{"DSA","dsaEncryption",NID_dsa,7,&(lvalues[590]),0}, +{"RIPEMD160","ripemd160",NID_ripemd160,5,&(lvalues[597]),0}, {NULL,NULL,NID_undef,0,NULL,0}, {"RSA-RIPEMD160","ripemd160WithRSA",NID_ripemd160WithRSA,6, - &(lvalues[603]),0}, -{"RC5-CBC","rc5-cbc",NID_rc5_cbc,8,&(lvalues[609]),0}, + &(lvalues[602]),0}, +{"RC5-CBC","rc5-cbc",NID_rc5_cbc,8,&(lvalues[608]),0}, {"RC5-ECB","rc5-ecb",NID_rc5_ecb,0,NULL,0}, {"RC5-CFB","rc5-cfb",NID_rc5_cfb64,0,NULL,0}, {"RC5-OFB","rc5-ofb",NID_rc5_ofb64,0,NULL,0}, -{"RLE","run length compression",NID_rle_compression,6,&(lvalues[617]),0}, -{"ZLIB","zlib compression",NID_zlib_compression,11,&(lvalues[623]),0}, +{"RLE","run length compression",NID_rle_compression,6,&(lvalues[616]),0}, +{"ZLIB","zlib compression",NID_zlib_compression,11,&(lvalues[622]),0}, {"extendedKeyUsage","X509v3 Extended Key Usage",NID_ext_key_usage,3, - &(lvalues[634]),0}, -{"PKIX","PKIX",NID_id_pkix,6,&(lvalues[637]),0}, -{"id-kp","id-kp",NID_id_kp,7,&(lvalues[643]),0}, + &(lvalues[633]),0}, +{"PKIX","PKIX",NID_id_pkix,6,&(lvalues[636]),0}, +{"id-kp","id-kp",NID_id_kp,7,&(lvalues[642]),0}, {"serverAuth","TLS Web Server Authentication",NID_server_auth,8, - &(lvalues[650]),0}, + &(lvalues[649]),0}, {"clientAuth","TLS Web Client Authentication",NID_client_auth,8, - &(lvalues[658]),0}, -{"codeSigning","Code Signing",NID_code_sign,8,&(lvalues[666]),0}, + &(lvalues[657]),0}, +{"codeSigning","Code Signing",NID_code_sign,8,&(lvalues[665]),0}, {"emailProtection","E-mail Protection",NID_email_protect,8, - &(lvalues[674]),0}, -{"timeStamping","Time Stamping",NID_time_stamp,8,&(lvalues[682]),0}, + &(lvalues[673]),0}, +{"timeStamping","Time Stamping",NID_time_stamp,8,&(lvalues[681]),0}, {"msCodeInd","Microsoft Individual Code Signing",NID_ms_code_ind,10, - &(lvalues[690]),0}, + &(lvalues[689]),0}, {"msCodeCom","Microsoft Commercial Code Signing",NID_ms_code_com,10, - &(lvalues[700]),0}, + &(lvalues[699]),0}, {"msCTLSign","Microsoft Trust List Signing",NID_ms_ctl_sign,10, - &(lvalues[710]),0}, -{"msSGC","Microsoft Server Gated Crypto",NID_ms_sgc,10,&(lvalues[720]),0}, + &(lvalues[709]),0}, +{"msSGC","Microsoft Server Gated Crypto",NID_ms_sgc,10,&(lvalues[719]),0}, {"msEFS","Microsoft Encrypted File System",NID_ms_efs,10, - &(lvalues[730]),0}, -{"nsSGC","Netscape Server Gated Crypto",NID_ns_sgc,9,&(lvalues[740]),0}, + &(lvalues[729]),0}, +{"nsSGC","Netscape Server Gated Crypto",NID_ns_sgc,9,&(lvalues[739]),0}, {"deltaCRL","X509v3 Delta CRL Indicator",NID_delta_crl,3, - &(lvalues[749]),0}, -{"CRLReason","X509v3 CRL Reason Code",NID_crl_reason,3,&(lvalues[752]),0}, + &(lvalues[748]),0}, +{"CRLReason","X509v3 CRL Reason Code",NID_crl_reason,3,&(lvalues[751]),0}, {"invalidityDate","Invalidity Date",NID_invalidity_date,3, - &(lvalues[755]),0}, -{"SXNetID","Strong Extranet ID",NID_sxnet,5,&(lvalues[758]),0}, + &(lvalues[754]),0}, +{"SXNetID","Strong Extranet ID",NID_sxnet,5,&(lvalues[757]),0}, {"PBE-SHA1-RC4-128","pbeWithSHA1And128BitRC4", - NID_pbe_WithSHA1And128BitRC4,10,&(lvalues[763]),0}, + NID_pbe_WithSHA1And128BitRC4,10,&(lvalues[762]),0}, {"PBE-SHA1-RC4-40","pbeWithSHA1And40BitRC4", - NID_pbe_WithSHA1And40BitRC4,10,&(lvalues[773]),0}, + NID_pbe_WithSHA1And40BitRC4,10,&(lvalues[772]),0}, {"PBE-SHA1-3DES","pbeWithSHA1And3-KeyTripleDES-CBC", - NID_pbe_WithSHA1And3_Key_TripleDES_CBC,10,&(lvalues[783]),0}, + NID_pbe_WithSHA1And3_Key_TripleDES_CBC,10,&(lvalues[782]),0}, {"PBE-SHA1-2DES","pbeWithSHA1And2-KeyTripleDES-CBC", - NID_pbe_WithSHA1And2_Key_TripleDES_CBC,10,&(lvalues[793]),0}, + NID_pbe_WithSHA1And2_Key_TripleDES_CBC,10,&(lvalues[792]),0}, {"PBE-SHA1-RC2-128","pbeWithSHA1And128BitRC2-CBC", - NID_pbe_WithSHA1And128BitRC2_CBC,10,&(lvalues[803]),0}, + NID_pbe_WithSHA1And128BitRC2_CBC,10,&(lvalues[802]),0}, {"PBE-SHA1-RC2-40","pbeWithSHA1And40BitRC2-CBC", - NID_pbe_WithSHA1And40BitRC2_CBC,10,&(lvalues[813]),0}, -{"keyBag","keyBag",NID_keyBag,11,&(lvalues[823]),0}, + NID_pbe_WithSHA1And40BitRC2_CBC,10,&(lvalues[812]),0}, +{"keyBag","keyBag",NID_keyBag,11,&(lvalues[822]),0}, {"pkcs8ShroudedKeyBag","pkcs8ShroudedKeyBag",NID_pkcs8ShroudedKeyBag, - 11,&(lvalues[834]),0}, -{"certBag","certBag",NID_certBag,11,&(lvalues[845]),0}, -{"crlBag","crlBag",NID_crlBag,11,&(lvalues[856]),0}, -{"secretBag","secretBag",NID_secretBag,11,&(lvalues[867]),0}, + 11,&(lvalues[833]),0}, +{"certBag","certBag",NID_certBag,11,&(lvalues[844]),0}, +{"crlBag","crlBag",NID_crlBag,11,&(lvalues[855]),0}, +{"secretBag","secretBag",NID_secretBag,11,&(lvalues[866]),0}, {"safeContentsBag","safeContentsBag",NID_safeContentsBag,11, - &(lvalues[878]),0}, -{"friendlyName","friendlyName",NID_friendlyName,9,&(lvalues[889]),0}, -{"localKeyID","localKeyID",NID_localKeyID,9,&(lvalues[898]),0}, + &(lvalues[877]),0}, +{"friendlyName","friendlyName",NID_friendlyName,9,&(lvalues[888]),0}, +{"localKeyID","localKeyID",NID_localKeyID,9,&(lvalues[897]),0}, {"x509Certificate","x509Certificate",NID_x509Certificate,10, - &(lvalues[907]),0}, + &(lvalues[906]),0}, {"sdsiCertificate","sdsiCertificate",NID_sdsiCertificate,10, - &(lvalues[917]),0}, -{"x509Crl","x509Crl",NID_x509Crl,10,&(lvalues[927]),0}, -{"PBES2","PBES2",NID_pbes2,9,&(lvalues[937]),0}, -{"PBMAC1","PBMAC1",NID_pbmac1,9,&(lvalues[946]),0}, -{"hmacWithSHA1","hmacWithSHA1",NID_hmacWithSHA1,8,&(lvalues[955]),0}, -{"id-qt-cps","Policy Qualifier CPS",NID_id_qt_cps,8,&(lvalues[963]),0}, + &(lvalues[916]),0}, +{"x509Crl","x509Crl",NID_x509Crl,10,&(lvalues[926]),0}, +{"PBES2","PBES2",NID_pbes2,9,&(lvalues[936]),0}, +{"PBMAC1","PBMAC1",NID_pbmac1,9,&(lvalues[945]),0}, +{"hmacWithSHA1","hmacWithSHA1",NID_hmacWithSHA1,8,&(lvalues[954]),0}, +{"id-qt-cps","Policy Qualifier CPS",NID_id_qt_cps,8,&(lvalues[962]),0}, {"id-qt-unotice","Policy Qualifier User Notice",NID_id_qt_unotice,8, - &(lvalues[971]),0}, + &(lvalues[970]),0}, {"RC2-64-CBC","rc2-64-cbc",NID_rc2_64_cbc,0,NULL,0}, {"SMIME-CAPS","S/MIME Capabilities",NID_SMIMECapabilities,9, - &(lvalues[979]),0}, + &(lvalues[978]),0}, {"PBE-MD2-RC2-64","pbeWithMD2AndRC2-CBC",NID_pbeWithMD2AndRC2_CBC,9, - &(lvalues[988]),0}, + &(lvalues[987]),0}, {"PBE-MD5-RC2-64","pbeWithMD5AndRC2-CBC",NID_pbeWithMD5AndRC2_CBC,9, - &(lvalues[997]),0}, + &(lvalues[996]),0}, {"PBE-SHA1-DES","pbeWithSHA1AndDES-CBC",NID_pbeWithSHA1AndDES_CBC,9, - &(lvalues[1006]),0}, + &(lvalues[1005]),0}, {"msExtReq","Microsoft Extension Request",NID_ms_ext_req,10, - &(lvalues[1015]),0}, -{"extReq","Extension Request",NID_ext_req,9,&(lvalues[1025]),0}, -{"name","name",NID_name,3,&(lvalues[1034]),0}, -{"dnQualifier","dnQualifier",NID_dnQualifier,3,&(lvalues[1037]),0}, -{"id-pe","id-pe",NID_id_pe,7,&(lvalues[1040]),0}, -{"id-ad","id-ad",NID_id_ad,7,&(lvalues[1047]),0}, + &(lvalues[1014]),0}, +{"extReq","Extension Request",NID_ext_req,9,&(lvalues[1024]),0}, +{"name","name",NID_name,3,&(lvalues[1033]),0}, +{"dnQualifier","dnQualifier",NID_dnQualifier,3,&(lvalues[1036]),0}, +{"id-pe","id-pe",NID_id_pe,7,&(lvalues[1039]),0}, +{"id-ad","id-ad",NID_id_ad,7,&(lvalues[1046]),0}, {"authorityInfoAccess","Authority Information Access",NID_info_access, - 8,&(lvalues[1054]),0}, -{"OCSP","OCSP",NID_ad_OCSP,8,&(lvalues[1062]),0}, -{"caIssuers","CA Issuers",NID_ad_ca_issuers,8,&(lvalues[1070]),0}, -{"OCSPSigning","OCSP Signing",NID_OCSP_sign,8,&(lvalues[1078]),0}, -{"ISO","iso",NID_iso,1,&(lvalues[1086]),0}, -{"member-body","ISO Member Body",NID_member_body,1,&(lvalues[1087]),0}, -{"ISO-US","ISO US Member Body",NID_ISO_US,3,&(lvalues[1088]),0}, -{"X9-57","X9.57",NID_X9_57,5,&(lvalues[1091]),0}, -{"X9cm","X9.57 CM ?",NID_X9cm,6,&(lvalues[1096]),0}, -{"pkcs1","pkcs1",NID_pkcs1,8,&(lvalues[1102]),0}, -{"pkcs5","pkcs5",NID_pkcs5,8,&(lvalues[1110]),0}, -{"SMIME","S/MIME",NID_SMIME,9,&(lvalues[1118]),0}, -{"id-smime-mod","id-smime-mod",NID_id_smime_mod,10,&(lvalues[1127]),0}, -{"id-smime-ct","id-smime-ct",NID_id_smime_ct,10,&(lvalues[1137]),0}, -{"id-smime-aa","id-smime-aa",NID_id_smime_aa,10,&(lvalues[1147]),0}, -{"id-smime-alg","id-smime-alg",NID_id_smime_alg,10,&(lvalues[1157]),0}, -{"id-smime-cd","id-smime-cd",NID_id_smime_cd,10,&(lvalues[1167]),0}, -{"id-smime-spq","id-smime-spq",NID_id_smime_spq,10,&(lvalues[1177]),0}, -{"id-smime-cti","id-smime-cti",NID_id_smime_cti,10,&(lvalues[1187]),0}, + 8,&(lvalues[1053]),0}, +{"OCSP","OCSP",NID_ad_OCSP,8,&(lvalues[1061]),0}, +{"caIssuers","CA Issuers",NID_ad_ca_issuers,8,&(lvalues[1069]),0}, +{"OCSPSigning","OCSP Signing",NID_OCSP_sign,8,&(lvalues[1077]),0}, +{"ISO","iso",NID_iso,0,NULL,0}, +{"member-body","ISO Member Body",NID_member_body,1,&(lvalues[1085]),0}, +{"ISO-US","ISO US Member Body",NID_ISO_US,3,&(lvalues[1086]),0}, +{"X9-57","X9.57",NID_X9_57,5,&(lvalues[1089]),0}, +{"X9cm","X9.57 CM ?",NID_X9cm,6,&(lvalues[1094]),0}, +{"pkcs1","pkcs1",NID_pkcs1,8,&(lvalues[1100]),0}, +{"pkcs5","pkcs5",NID_pkcs5,8,&(lvalues[1108]),0}, +{"SMIME","S/MIME",NID_SMIME,9,&(lvalues[1116]),0}, +{"id-smime-mod","id-smime-mod",NID_id_smime_mod,10,&(lvalues[1125]),0}, +{"id-smime-ct","id-smime-ct",NID_id_smime_ct,10,&(lvalues[1135]),0}, +{"id-smime-aa","id-smime-aa",NID_id_smime_aa,10,&(lvalues[1145]),0}, +{"id-smime-alg","id-smime-alg",NID_id_smime_alg,10,&(lvalues[1155]),0}, +{"id-smime-cd","id-smime-cd",NID_id_smime_cd,10,&(lvalues[1165]),0}, +{"id-smime-spq","id-smime-spq",NID_id_smime_spq,10,&(lvalues[1175]),0}, +{"id-smime-cti","id-smime-cti",NID_id_smime_cti,10,&(lvalues[1185]),0}, {"id-smime-mod-cms","id-smime-mod-cms",NID_id_smime_mod_cms,11, - &(lvalues[1197]),0}, + &(lvalues[1195]),0}, {"id-smime-mod-ess","id-smime-mod-ess",NID_id_smime_mod_ess,11, - &(lvalues[1208]),0}, + &(lvalues[1206]),0}, {"id-smime-mod-oid","id-smime-mod-oid",NID_id_smime_mod_oid,11, - &(lvalues[1219]),0}, + &(lvalues[1217]),0}, {"id-smime-mod-msg-v3","id-smime-mod-msg-v3",NID_id_smime_mod_msg_v3, - 11,&(lvalues[1230]),0}, + 11,&(lvalues[1228]),0}, {"id-smime-mod-ets-eSignature-88","id-smime-mod-ets-eSignature-88", - NID_id_smime_mod_ets_eSignature_88,11,&(lvalues[1241]),0}, + NID_id_smime_mod_ets_eSignature_88,11,&(lvalues[1239]),0}, {"id-smime-mod-ets-eSignature-97","id-smime-mod-ets-eSignature-97", - NID_id_smime_mod_ets_eSignature_97,11,&(lvalues[1252]),0}, + NID_id_smime_mod_ets_eSignature_97,11,&(lvalues[1250]),0}, {"id-smime-mod-ets-eSigPolicy-88","id-smime-mod-ets-eSigPolicy-88", - NID_id_smime_mod_ets_eSigPolicy_88,11,&(lvalues[1263]),0}, + NID_id_smime_mod_ets_eSigPolicy_88,11,&(lvalues[1261]),0}, {"id-smime-mod-ets-eSigPolicy-97","id-smime-mod-ets-eSigPolicy-97", - NID_id_smime_mod_ets_eSigPolicy_97,11,&(lvalues[1274]),0}, + NID_id_smime_mod_ets_eSigPolicy_97,11,&(lvalues[1272]),0}, {"id-smime-ct-receipt","id-smime-ct-receipt",NID_id_smime_ct_receipt, - 11,&(lvalues[1285]),0}, + 11,&(lvalues[1283]),0}, {"id-smime-ct-authData","id-smime-ct-authData", - NID_id_smime_ct_authData,11,&(lvalues[1296]),0}, + NID_id_smime_ct_authData,11,&(lvalues[1294]),0}, {"id-smime-ct-publishCert","id-smime-ct-publishCert", - NID_id_smime_ct_publishCert,11,&(lvalues[1307]),0}, + NID_id_smime_ct_publishCert,11,&(lvalues[1305]),0}, {"id-smime-ct-TSTInfo","id-smime-ct-TSTInfo",NID_id_smime_ct_TSTInfo, - 11,&(lvalues[1318]),0}, + 11,&(lvalues[1316]),0}, {"id-smime-ct-TDTInfo","id-smime-ct-TDTInfo",NID_id_smime_ct_TDTInfo, - 11,&(lvalues[1329]),0}, + 11,&(lvalues[1327]),0}, {"id-smime-ct-contentInfo","id-smime-ct-contentInfo", - NID_id_smime_ct_contentInfo,11,&(lvalues[1340]),0}, + NID_id_smime_ct_contentInfo,11,&(lvalues[1338]),0}, {"id-smime-ct-DVCSRequestData","id-smime-ct-DVCSRequestData", - NID_id_smime_ct_DVCSRequestData,11,&(lvalues[1351]),0}, + NID_id_smime_ct_DVCSRequestData,11,&(lvalues[1349]),0}, {"id-smime-ct-DVCSResponseData","id-smime-ct-DVCSResponseData", - NID_id_smime_ct_DVCSResponseData,11,&(lvalues[1362]),0}, + NID_id_smime_ct_DVCSResponseData,11,&(lvalues[1360]),0}, {"id-smime-aa-receiptRequest","id-smime-aa-receiptRequest", - NID_id_smime_aa_receiptRequest,11,&(lvalues[1373]),0}, + NID_id_smime_aa_receiptRequest,11,&(lvalues[1371]),0}, {"id-smime-aa-securityLabel","id-smime-aa-securityLabel", - NID_id_smime_aa_securityLabel,11,&(lvalues[1384]),0}, + NID_id_smime_aa_securityLabel,11,&(lvalues[1382]),0}, {"id-smime-aa-mlExpandHistory","id-smime-aa-mlExpandHistory", - NID_id_smime_aa_mlExpandHistory,11,&(lvalues[1395]),0}, + NID_id_smime_aa_mlExpandHistory,11,&(lvalues[1393]),0}, {"id-smime-aa-contentHint","id-smime-aa-contentHint", - NID_id_smime_aa_contentHint,11,&(lvalues[1406]),0}, + NID_id_smime_aa_contentHint,11,&(lvalues[1404]),0}, {"id-smime-aa-msgSigDigest","id-smime-aa-msgSigDigest", - NID_id_smime_aa_msgSigDigest,11,&(lvalues[1417]),0}, + NID_id_smime_aa_msgSigDigest,11,&(lvalues[1415]),0}, {"id-smime-aa-encapContentType","id-smime-aa-encapContentType", - NID_id_smime_aa_encapContentType,11,&(lvalues[1428]),0}, + NID_id_smime_aa_encapContentType,11,&(lvalues[1426]),0}, {"id-smime-aa-contentIdentifier","id-smime-aa-contentIdentifier", - NID_id_smime_aa_contentIdentifier,11,&(lvalues[1439]),0}, + NID_id_smime_aa_contentIdentifier,11,&(lvalues[1437]),0}, {"id-smime-aa-macValue","id-smime-aa-macValue", - NID_id_smime_aa_macValue,11,&(lvalues[1450]),0}, + NID_id_smime_aa_macValue,11,&(lvalues[1448]),0}, {"id-smime-aa-equivalentLabels","id-smime-aa-equivalentLabels", - NID_id_smime_aa_equivalentLabels,11,&(lvalues[1461]),0}, + NID_id_smime_aa_equivalentLabels,11,&(lvalues[1459]),0}, {"id-smime-aa-contentReference","id-smime-aa-contentReference", - NID_id_smime_aa_contentReference,11,&(lvalues[1472]),0}, + NID_id_smime_aa_contentReference,11,&(lvalues[1470]),0}, {"id-smime-aa-encrypKeyPref","id-smime-aa-encrypKeyPref", - NID_id_smime_aa_encrypKeyPref,11,&(lvalues[1483]),0}, + NID_id_smime_aa_encrypKeyPref,11,&(lvalues[1481]),0}, {"id-smime-aa-signingCertificate","id-smime-aa-signingCertificate", - NID_id_smime_aa_signingCertificate,11,&(lvalues[1494]),0}, + NID_id_smime_aa_signingCertificate,11,&(lvalues[1492]),0}, {"id-smime-aa-smimeEncryptCerts","id-smime-aa-smimeEncryptCerts", - NID_id_smime_aa_smimeEncryptCerts,11,&(lvalues[1505]),0}, + NID_id_smime_aa_smimeEncryptCerts,11,&(lvalues[1503]),0}, {"id-smime-aa-timeStampToken","id-smime-aa-timeStampToken", - NID_id_smime_aa_timeStampToken,11,&(lvalues[1516]),0}, + NID_id_smime_aa_timeStampToken,11,&(lvalues[1514]),0}, {"id-smime-aa-ets-sigPolicyId","id-smime-aa-ets-sigPolicyId", - NID_id_smime_aa_ets_sigPolicyId,11,&(lvalues[1527]),0}, + NID_id_smime_aa_ets_sigPolicyId,11,&(lvalues[1525]),0}, {"id-smime-aa-ets-commitmentType","id-smime-aa-ets-commitmentType", - NID_id_smime_aa_ets_commitmentType,11,&(lvalues[1538]),0}, + NID_id_smime_aa_ets_commitmentType,11,&(lvalues[1536]),0}, {"id-smime-aa-ets-signerLocation","id-smime-aa-ets-signerLocation", - NID_id_smime_aa_ets_signerLocation,11,&(lvalues[1549]),0}, + NID_id_smime_aa_ets_signerLocation,11,&(lvalues[1547]),0}, {"id-smime-aa-ets-signerAttr","id-smime-aa-ets-signerAttr", - NID_id_smime_aa_ets_signerAttr,11,&(lvalues[1560]),0}, + NID_id_smime_aa_ets_signerAttr,11,&(lvalues[1558]),0}, {"id-smime-aa-ets-otherSigCert","id-smime-aa-ets-otherSigCert", - NID_id_smime_aa_ets_otherSigCert,11,&(lvalues[1571]),0}, + NID_id_smime_aa_ets_otherSigCert,11,&(lvalues[1569]),0}, {"id-smime-aa-ets-contentTimestamp", "id-smime-aa-ets-contentTimestamp", - NID_id_smime_aa_ets_contentTimestamp,11,&(lvalues[1582]),0}, + NID_id_smime_aa_ets_contentTimestamp,11,&(lvalues[1580]),0}, {"id-smime-aa-ets-CertificateRefs","id-smime-aa-ets-CertificateRefs", - NID_id_smime_aa_ets_CertificateRefs,11,&(lvalues[1593]),0}, + NID_id_smime_aa_ets_CertificateRefs,11,&(lvalues[1591]),0}, {"id-smime-aa-ets-RevocationRefs","id-smime-aa-ets-RevocationRefs", - NID_id_smime_aa_ets_RevocationRefs,11,&(lvalues[1604]),0}, + NID_id_smime_aa_ets_RevocationRefs,11,&(lvalues[1602]),0}, {"id-smime-aa-ets-certValues","id-smime-aa-ets-certValues", - NID_id_smime_aa_ets_certValues,11,&(lvalues[1615]),0}, + NID_id_smime_aa_ets_certValues,11,&(lvalues[1613]),0}, {"id-smime-aa-ets-revocationValues", "id-smime-aa-ets-revocationValues", - NID_id_smime_aa_ets_revocationValues,11,&(lvalues[1626]),0}, + NID_id_smime_aa_ets_revocationValues,11,&(lvalues[1624]),0}, {"id-smime-aa-ets-escTimeStamp","id-smime-aa-ets-escTimeStamp", - NID_id_smime_aa_ets_escTimeStamp,11,&(lvalues[1637]),0}, + NID_id_smime_aa_ets_escTimeStamp,11,&(lvalues[1635]),0}, {"id-smime-aa-ets-certCRLTimestamp", "id-smime-aa-ets-certCRLTimestamp", - NID_id_smime_aa_ets_certCRLTimestamp,11,&(lvalues[1648]),0}, + NID_id_smime_aa_ets_certCRLTimestamp,11,&(lvalues[1646]),0}, {"id-smime-aa-ets-archiveTimeStamp", "id-smime-aa-ets-archiveTimeStamp", - NID_id_smime_aa_ets_archiveTimeStamp,11,&(lvalues[1659]),0}, + NID_id_smime_aa_ets_archiveTimeStamp,11,&(lvalues[1657]),0}, {"id-smime-aa-signatureType","id-smime-aa-signatureType", - NID_id_smime_aa_signatureType,11,&(lvalues[1670]),0}, + NID_id_smime_aa_signatureType,11,&(lvalues[1668]),0}, {"id-smime-aa-dvcs-dvc","id-smime-aa-dvcs-dvc", - NID_id_smime_aa_dvcs_dvc,11,&(lvalues[1681]),0}, + NID_id_smime_aa_dvcs_dvc,11,&(lvalues[1679]),0}, {"id-smime-alg-ESDHwith3DES","id-smime-alg-ESDHwith3DES", - NID_id_smime_alg_ESDHwith3DES,11,&(lvalues[1692]),0}, + NID_id_smime_alg_ESDHwith3DES,11,&(lvalues[1690]),0}, {"id-smime-alg-ESDHwithRC2","id-smime-alg-ESDHwithRC2", - NID_id_smime_alg_ESDHwithRC2,11,&(lvalues[1703]),0}, + NID_id_smime_alg_ESDHwithRC2,11,&(lvalues[1701]),0}, {"id-smime-alg-3DESwrap","id-smime-alg-3DESwrap", - NID_id_smime_alg_3DESwrap,11,&(lvalues[1714]),0}, + NID_id_smime_alg_3DESwrap,11,&(lvalues[1712]),0}, {"id-smime-alg-RC2wrap","id-smime-alg-RC2wrap", - NID_id_smime_alg_RC2wrap,11,&(lvalues[1725]),0}, + NID_id_smime_alg_RC2wrap,11,&(lvalues[1723]),0}, {"id-smime-alg-ESDH","id-smime-alg-ESDH",NID_id_smime_alg_ESDH,11, - &(lvalues[1736]),0}, + &(lvalues[1734]),0}, {"id-smime-alg-CMS3DESwrap","id-smime-alg-CMS3DESwrap", - NID_id_smime_alg_CMS3DESwrap,11,&(lvalues[1747]),0}, + NID_id_smime_alg_CMS3DESwrap,11,&(lvalues[1745]),0}, {"id-smime-alg-CMSRC2wrap","id-smime-alg-CMSRC2wrap", - NID_id_smime_alg_CMSRC2wrap,11,&(lvalues[1758]),0}, + NID_id_smime_alg_CMSRC2wrap,11,&(lvalues[1756]),0}, {"id-smime-cd-ldap","id-smime-cd-ldap",NID_id_smime_cd_ldap,11, - &(lvalues[1769]),0}, + &(lvalues[1767]),0}, {"id-smime-spq-ets-sqt-uri","id-smime-spq-ets-sqt-uri", - NID_id_smime_spq_ets_sqt_uri,11,&(lvalues[1780]),0}, + NID_id_smime_spq_ets_sqt_uri,11,&(lvalues[1778]),0}, {"id-smime-spq-ets-sqt-unotice","id-smime-spq-ets-sqt-unotice", - NID_id_smime_spq_ets_sqt_unotice,11,&(lvalues[1791]),0}, + NID_id_smime_spq_ets_sqt_unotice,11,&(lvalues[1789]),0}, {"id-smime-cti-ets-proofOfOrigin","id-smime-cti-ets-proofOfOrigin", - NID_id_smime_cti_ets_proofOfOrigin,11,&(lvalues[1802]),0}, + NID_id_smime_cti_ets_proofOfOrigin,11,&(lvalues[1800]),0}, {"id-smime-cti-ets-proofOfReceipt","id-smime-cti-ets-proofOfReceipt", - NID_id_smime_cti_ets_proofOfReceipt,11,&(lvalues[1813]),0}, + NID_id_smime_cti_ets_proofOfReceipt,11,&(lvalues[1811]),0}, {"id-smime-cti-ets-proofOfDelivery", "id-smime-cti-ets-proofOfDelivery", - NID_id_smime_cti_ets_proofOfDelivery,11,&(lvalues[1824]),0}, + NID_id_smime_cti_ets_proofOfDelivery,11,&(lvalues[1822]),0}, {"id-smime-cti-ets-proofOfSender","id-smime-cti-ets-proofOfSender", - NID_id_smime_cti_ets_proofOfSender,11,&(lvalues[1835]),0}, + NID_id_smime_cti_ets_proofOfSender,11,&(lvalues[1833]),0}, {"id-smime-cti-ets-proofOfApproval", "id-smime-cti-ets-proofOfApproval", - NID_id_smime_cti_ets_proofOfApproval,11,&(lvalues[1846]),0}, + NID_id_smime_cti_ets_proofOfApproval,11,&(lvalues[1844]),0}, {"id-smime-cti-ets-proofOfCreation", "id-smime-cti-ets-proofOfCreation", - NID_id_smime_cti_ets_proofOfCreation,11,&(lvalues[1857]),0}, -{"MD4","md4",NID_md4,8,&(lvalues[1868]),0}, -{"id-pkix-mod","id-pkix-mod",NID_id_pkix_mod,7,&(lvalues[1876]),0}, -{"id-qt","id-qt",NID_id_qt,7,&(lvalues[1883]),0}, -{"id-it","id-it",NID_id_it,7,&(lvalues[1890]),0}, -{"id-pkip","id-pkip",NID_id_pkip,7,&(lvalues[1897]),0}, -{"id-alg","id-alg",NID_id_alg,7,&(lvalues[1904]),0}, -{"id-cmc","id-cmc",NID_id_cmc,7,&(lvalues[1911]),0}, -{"id-on","id-on",NID_id_on,7,&(lvalues[1918]),0}, -{"id-pda","id-pda",NID_id_pda,7,&(lvalues[1925]),0}, -{"id-aca","id-aca",NID_id_aca,7,&(lvalues[1932]),0}, -{"id-qcs","id-qcs",NID_id_qcs,7,&(lvalues[1939]),0}, -{"id-cct","id-cct",NID_id_cct,7,&(lvalues[1946]),0}, + NID_id_smime_cti_ets_proofOfCreation,11,&(lvalues[1855]),0}, +{"MD4","md4",NID_md4,8,&(lvalues[1866]),0}, +{"id-pkix-mod","id-pkix-mod",NID_id_pkix_mod,7,&(lvalues[1874]),0}, +{"id-qt","id-qt",NID_id_qt,7,&(lvalues[1881]),0}, +{"id-it","id-it",NID_id_it,7,&(lvalues[1888]),0}, +{"id-pkip","id-pkip",NID_id_pkip,7,&(lvalues[1895]),0}, +{"id-alg","id-alg",NID_id_alg,7,&(lvalues[1902]),0}, +{"id-cmc","id-cmc",NID_id_cmc,7,&(lvalues[1909]),0}, +{"id-on","id-on",NID_id_on,7,&(lvalues[1916]),0}, +{"id-pda","id-pda",NID_id_pda,7,&(lvalues[1923]),0}, +{"id-aca","id-aca",NID_id_aca,7,&(lvalues[1930]),0}, +{"id-qcs","id-qcs",NID_id_qcs,7,&(lvalues[1937]),0}, +{"id-cct","id-cct",NID_id_cct,7,&(lvalues[1944]),0}, {"id-pkix1-explicit-88","id-pkix1-explicit-88", - NID_id_pkix1_explicit_88,8,&(lvalues[1953]),0}, + NID_id_pkix1_explicit_88,8,&(lvalues[1951]),0}, {"id-pkix1-implicit-88","id-pkix1-implicit-88", - NID_id_pkix1_implicit_88,8,&(lvalues[1961]),0}, + NID_id_pkix1_implicit_88,8,&(lvalues[1959]),0}, {"id-pkix1-explicit-93","id-pkix1-explicit-93", - NID_id_pkix1_explicit_93,8,&(lvalues[1969]),0}, + NID_id_pkix1_explicit_93,8,&(lvalues[1967]),0}, {"id-pkix1-implicit-93","id-pkix1-implicit-93", - NID_id_pkix1_implicit_93,8,&(lvalues[1977]),0}, -{"id-mod-crmf","id-mod-crmf",NID_id_mod_crmf,8,&(lvalues[1985]),0}, -{"id-mod-cmc","id-mod-cmc",NID_id_mod_cmc,8,&(lvalues[1993]),0}, + NID_id_pkix1_implicit_93,8,&(lvalues[1975]),0}, +{"id-mod-crmf","id-mod-crmf",NID_id_mod_crmf,8,&(lvalues[1983]),0}, +{"id-mod-cmc","id-mod-cmc",NID_id_mod_cmc,8,&(lvalues[1991]),0}, {"id-mod-kea-profile-88","id-mod-kea-profile-88", - NID_id_mod_kea_profile_88,8,&(lvalues[2001]),0}, + NID_id_mod_kea_profile_88,8,&(lvalues[1999]),0}, {"id-mod-kea-profile-93","id-mod-kea-profile-93", - NID_id_mod_kea_profile_93,8,&(lvalues[2009]),0}, -{"id-mod-cmp","id-mod-cmp",NID_id_mod_cmp,8,&(lvalues[2017]),0}, + NID_id_mod_kea_profile_93,8,&(lvalues[2007]),0}, +{"id-mod-cmp","id-mod-cmp",NID_id_mod_cmp,8,&(lvalues[2015]),0}, {"id-mod-qualified-cert-88","id-mod-qualified-cert-88", - NID_id_mod_qualified_cert_88,8,&(lvalues[2025]),0}, + NID_id_mod_qualified_cert_88,8,&(lvalues[2023]),0}, {"id-mod-qualified-cert-93","id-mod-qualified-cert-93", - NID_id_mod_qualified_cert_93,8,&(lvalues[2033]),0}, + NID_id_mod_qualified_cert_93,8,&(lvalues[2031]),0}, {"id-mod-attribute-cert","id-mod-attribute-cert", - NID_id_mod_attribute_cert,8,&(lvalues[2041]),0}, + NID_id_mod_attribute_cert,8,&(lvalues[2039]),0}, {"id-mod-timestamp-protocol","id-mod-timestamp-protocol", - NID_id_mod_timestamp_protocol,8,&(lvalues[2049]),0}, -{"id-mod-ocsp","id-mod-ocsp",NID_id_mod_ocsp,8,&(lvalues[2057]),0}, -{"id-mod-dvcs","id-mod-dvcs",NID_id_mod_dvcs,8,&(lvalues[2065]),0}, + NID_id_mod_timestamp_protocol,8,&(lvalues[2047]),0}, +{"id-mod-ocsp","id-mod-ocsp",NID_id_mod_ocsp,8,&(lvalues[2055]),0}, +{"id-mod-dvcs","id-mod-dvcs",NID_id_mod_dvcs,8,&(lvalues[2063]),0}, {"id-mod-cmp2000","id-mod-cmp2000",NID_id_mod_cmp2000,8, - &(lvalues[2073]),0}, -{"biometricInfo","Biometric Info",NID_biometricInfo,8,&(lvalues[2081]),0}, -{"qcStatements","qcStatements",NID_qcStatements,8,&(lvalues[2089]),0}, + &(lvalues[2071]),0}, +{"biometricInfo","Biometric Info",NID_biometricInfo,8,&(lvalues[2079]),0}, +{"qcStatements","qcStatements",NID_qcStatements,8,&(lvalues[2087]),0}, {"ac-auditEntity","ac-auditEntity",NID_ac_auditEntity,8, - &(lvalues[2097]),0}, -{"ac-targeting","ac-targeting",NID_ac_targeting,8,&(lvalues[2105]),0}, -{"aaControls","aaControls",NID_aaControls,8,&(lvalues[2113]),0}, + &(lvalues[2095]),0}, +{"ac-targeting","ac-targeting",NID_ac_targeting,8,&(lvalues[2103]),0}, +{"aaControls","aaControls",NID_aaControls,8,&(lvalues[2111]),0}, {"sbgp-ipAddrBlock","sbgp-ipAddrBlock",NID_sbgp_ipAddrBlock,8, - &(lvalues[2121]),0}, + &(lvalues[2119]),0}, {"sbgp-autonomousSysNum","sbgp-autonomousSysNum", - NID_sbgp_autonomousSysNum,8,&(lvalues[2129]),0}, + NID_sbgp_autonomousSysNum,8,&(lvalues[2127]),0}, {"sbgp-routerIdentifier","sbgp-routerIdentifier", - NID_sbgp_routerIdentifier,8,&(lvalues[2137]),0}, -{"textNotice","textNotice",NID_textNotice,8,&(lvalues[2145]),0}, + NID_sbgp_routerIdentifier,8,&(lvalues[2135]),0}, +{"textNotice","textNotice",NID_textNotice,8,&(lvalues[2143]),0}, {"ipsecEndSystem","IPSec End System",NID_ipsecEndSystem,8, - &(lvalues[2153]),0}, -{"ipsecTunnel","IPSec Tunnel",NID_ipsecTunnel,8,&(lvalues[2161]),0}, -{"ipsecUser","IPSec User",NID_ipsecUser,8,&(lvalues[2169]),0}, -{"DVCS","dvcs",NID_dvcs,8,&(lvalues[2177]),0}, + &(lvalues[2151]),0}, +{"ipsecTunnel","IPSec Tunnel",NID_ipsecTunnel,8,&(lvalues[2159]),0}, +{"ipsecUser","IPSec User",NID_ipsecUser,8,&(lvalues[2167]),0}, +{"DVCS","dvcs",NID_dvcs,8,&(lvalues[2175]),0}, {"id-it-caProtEncCert","id-it-caProtEncCert",NID_id_it_caProtEncCert, - 8,&(lvalues[2185]),0}, + 8,&(lvalues[2183]),0}, {"id-it-signKeyPairTypes","id-it-signKeyPairTypes", - NID_id_it_signKeyPairTypes,8,&(lvalues[2193]),0}, + NID_id_it_signKeyPairTypes,8,&(lvalues[2191]),0}, {"id-it-encKeyPairTypes","id-it-encKeyPairTypes", - NID_id_it_encKeyPairTypes,8,&(lvalues[2201]),0}, + NID_id_it_encKeyPairTypes,8,&(lvalues[2199]),0}, {"id-it-preferredSymmAlg","id-it-preferredSymmAlg", - NID_id_it_preferredSymmAlg,8,&(lvalues[2209]),0}, + NID_id_it_preferredSymmAlg,8,&(lvalues[2207]),0}, {"id-it-caKeyUpdateInfo","id-it-caKeyUpdateInfo", - NID_id_it_caKeyUpdateInfo,8,&(lvalues[2217]),0}, + NID_id_it_caKeyUpdateInfo,8,&(lvalues[2215]),0}, {"id-it-currentCRL","id-it-currentCRL",NID_id_it_currentCRL,8, - &(lvalues[2225]),0}, + &(lvalues[2223]),0}, {"id-it-unsupportedOIDs","id-it-unsupportedOIDs", - NID_id_it_unsupportedOIDs,8,&(lvalues[2233]),0}, + NID_id_it_unsupportedOIDs,8,&(lvalues[2231]),0}, {"id-it-subscriptionRequest","id-it-subscriptionRequest", - NID_id_it_subscriptionRequest,8,&(lvalues[2241]),0}, + NID_id_it_subscriptionRequest,8,&(lvalues[2239]),0}, {"id-it-subscriptionResponse","id-it-subscriptionResponse", - NID_id_it_subscriptionResponse,8,&(lvalues[2249]),0}, + NID_id_it_subscriptionResponse,8,&(lvalues[2247]),0}, {"id-it-keyPairParamReq","id-it-keyPairParamReq", - NID_id_it_keyPairParamReq,8,&(lvalues[2257]),0}, + NID_id_it_keyPairParamReq,8,&(lvalues[2255]),0}, {"id-it-keyPairParamRep","id-it-keyPairParamRep", - NID_id_it_keyPairParamRep,8,&(lvalues[2265]),0}, + NID_id_it_keyPairParamRep,8,&(lvalues[2263]),0}, {"id-it-revPassphrase","id-it-revPassphrase",NID_id_it_revPassphrase, - 8,&(lvalues[2273]),0}, + 8,&(lvalues[2271]),0}, {"id-it-implicitConfirm","id-it-implicitConfirm", - NID_id_it_implicitConfirm,8,&(lvalues[2281]),0}, + NID_id_it_implicitConfirm,8,&(lvalues[2279]),0}, {"id-it-confirmWaitTime","id-it-confirmWaitTime", - NID_id_it_confirmWaitTime,8,&(lvalues[2289]),0}, + NID_id_it_confirmWaitTime,8,&(lvalues[2287]),0}, {"id-it-origPKIMessage","id-it-origPKIMessage", - NID_id_it_origPKIMessage,8,&(lvalues[2297]),0}, -{"id-regCtrl","id-regCtrl",NID_id_regCtrl,8,&(lvalues[2305]),0}, -{"id-regInfo","id-regInfo",NID_id_regInfo,8,&(lvalues[2313]),0}, + NID_id_it_origPKIMessage,8,&(lvalues[2295]),0}, +{"id-regCtrl","id-regCtrl",NID_id_regCtrl,8,&(lvalues[2303]),0}, +{"id-regInfo","id-regInfo",NID_id_regInfo,8,&(lvalues[2311]),0}, {"id-regCtrl-regToken","id-regCtrl-regToken",NID_id_regCtrl_regToken, - 9,&(lvalues[2321]),0}, + 9,&(lvalues[2319]),0}, {"id-regCtrl-authenticator","id-regCtrl-authenticator", - NID_id_regCtrl_authenticator,9,&(lvalues[2330]),0}, + NID_id_regCtrl_authenticator,9,&(lvalues[2328]),0}, {"id-regCtrl-pkiPublicationInfo","id-regCtrl-pkiPublicationInfo", - NID_id_regCtrl_pkiPublicationInfo,9,&(lvalues[2339]),0}, + NID_id_regCtrl_pkiPublicationInfo,9,&(lvalues[2337]),0}, {"id-regCtrl-pkiArchiveOptions","id-regCtrl-pkiArchiveOptions", - NID_id_regCtrl_pkiArchiveOptions,9,&(lvalues[2348]),0}, + NID_id_regCtrl_pkiArchiveOptions,9,&(lvalues[2346]),0}, {"id-regCtrl-oldCertID","id-regCtrl-oldCertID", - NID_id_regCtrl_oldCertID,9,&(lvalues[2357]),0}, + NID_id_regCtrl_oldCertID,9,&(lvalues[2355]),0}, {"id-regCtrl-protocolEncrKey","id-regCtrl-protocolEncrKey", - NID_id_regCtrl_protocolEncrKey,9,&(lvalues[2366]),0}, + NID_id_regCtrl_protocolEncrKey,9,&(lvalues[2364]),0}, {"id-regInfo-utf8Pairs","id-regInfo-utf8Pairs", - NID_id_regInfo_utf8Pairs,9,&(lvalues[2375]),0}, + NID_id_regInfo_utf8Pairs,9,&(lvalues[2373]),0}, {"id-regInfo-certReq","id-regInfo-certReq",NID_id_regInfo_certReq,9, - &(lvalues[2384]),0}, -{"id-alg-des40","id-alg-des40",NID_id_alg_des40,8,&(lvalues[2393]),0}, + &(lvalues[2382]),0}, +{"id-alg-des40","id-alg-des40",NID_id_alg_des40,8,&(lvalues[2391]),0}, {"id-alg-noSignature","id-alg-noSignature",NID_id_alg_noSignature,8, - &(lvalues[2401]),0}, + &(lvalues[2399]),0}, {"id-alg-dh-sig-hmac-sha1","id-alg-dh-sig-hmac-sha1", - NID_id_alg_dh_sig_hmac_sha1,8,&(lvalues[2409]),0}, -{"id-alg-dh-pop","id-alg-dh-pop",NID_id_alg_dh_pop,8,&(lvalues[2417]),0}, + NID_id_alg_dh_sig_hmac_sha1,8,&(lvalues[2407]),0}, +{"id-alg-dh-pop","id-alg-dh-pop",NID_id_alg_dh_pop,8,&(lvalues[2415]),0}, {"id-cmc-statusInfo","id-cmc-statusInfo",NID_id_cmc_statusInfo,8, - &(lvalues[2425]),0}, + &(lvalues[2423]),0}, {"id-cmc-identification","id-cmc-identification", - NID_id_cmc_identification,8,&(lvalues[2433]),0}, + NID_id_cmc_identification,8,&(lvalues[2431]),0}, {"id-cmc-identityProof","id-cmc-identityProof", - NID_id_cmc_identityProof,8,&(lvalues[2441]),0}, + NID_id_cmc_identityProof,8,&(lvalues[2439]),0}, {"id-cmc-dataReturn","id-cmc-dataReturn",NID_id_cmc_dataReturn,8, - &(lvalues[2449]),0}, + &(lvalues[2447]),0}, {"id-cmc-transactionId","id-cmc-transactionId", - NID_id_cmc_transactionId,8,&(lvalues[2457]),0}, + NID_id_cmc_transactionId,8,&(lvalues[2455]),0}, {"id-cmc-senderNonce","id-cmc-senderNonce",NID_id_cmc_senderNonce,8, - &(lvalues[2465]),0}, + &(lvalues[2463]),0}, {"id-cmc-recipientNonce","id-cmc-recipientNonce", - NID_id_cmc_recipientNonce,8,&(lvalues[2473]),0}, + NID_id_cmc_recipientNonce,8,&(lvalues[2471]),0}, {"id-cmc-addExtensions","id-cmc-addExtensions", - NID_id_cmc_addExtensions,8,&(lvalues[2481]),0}, + NID_id_cmc_addExtensions,8,&(lvalues[2479]),0}, {"id-cmc-encryptedPOP","id-cmc-encryptedPOP",NID_id_cmc_encryptedPOP, - 8,&(lvalues[2489]),0}, + 8,&(lvalues[2487]),0}, {"id-cmc-decryptedPOP","id-cmc-decryptedPOP",NID_id_cmc_decryptedPOP, - 8,&(lvalues[2497]),0}, + 8,&(lvalues[2495]),0}, {"id-cmc-lraPOPWitness","id-cmc-lraPOPWitness", - NID_id_cmc_lraPOPWitness,8,&(lvalues[2505]),0}, + NID_id_cmc_lraPOPWitness,8,&(lvalues[2503]),0}, {"id-cmc-getCert","id-cmc-getCert",NID_id_cmc_getCert,8, - &(lvalues[2513]),0}, -{"id-cmc-getCRL","id-cmc-getCRL",NID_id_cmc_getCRL,8,&(lvalues[2521]),0}, + &(lvalues[2511]),0}, +{"id-cmc-getCRL","id-cmc-getCRL",NID_id_cmc_getCRL,8,&(lvalues[2519]),0}, {"id-cmc-revokeRequest","id-cmc-revokeRequest", - NID_id_cmc_revokeRequest,8,&(lvalues[2529]),0}, + NID_id_cmc_revokeRequest,8,&(lvalues[2527]),0}, {"id-cmc-regInfo","id-cmc-regInfo",NID_id_cmc_regInfo,8, - &(lvalues[2537]),0}, + &(lvalues[2535]),0}, {"id-cmc-responseInfo","id-cmc-responseInfo",NID_id_cmc_responseInfo, - 8,&(lvalues[2545]),0}, + 8,&(lvalues[2543]),0}, {"id-cmc-queryPending","id-cmc-queryPending",NID_id_cmc_queryPending, - 8,&(lvalues[2553]),0}, + 8,&(lvalues[2551]),0}, {"id-cmc-popLinkRandom","id-cmc-popLinkRandom", - NID_id_cmc_popLinkRandom,8,&(lvalues[2561]),0}, + NID_id_cmc_popLinkRandom,8,&(lvalues[2559]),0}, {"id-cmc-popLinkWitness","id-cmc-popLinkWitness", - NID_id_cmc_popLinkWitness,8,&(lvalues[2569]),0}, + NID_id_cmc_popLinkWitness,8,&(lvalues[2567]),0}, {"id-cmc-confirmCertAcceptance","id-cmc-confirmCertAcceptance", - NID_id_cmc_confirmCertAcceptance,8,&(lvalues[2577]),0}, + NID_id_cmc_confirmCertAcceptance,8,&(lvalues[2575]),0}, {"id-on-personalData","id-on-personalData",NID_id_on_personalData,8, - &(lvalues[2585]),0}, + &(lvalues[2583]),0}, {"id-pda-dateOfBirth","id-pda-dateOfBirth",NID_id_pda_dateOfBirth,8, - &(lvalues[2593]),0}, + &(lvalues[2591]),0}, {"id-pda-placeOfBirth","id-pda-placeOfBirth",NID_id_pda_placeOfBirth, - 8,&(lvalues[2601]),0}, + 8,&(lvalues[2599]),0}, {NULL,NULL,NID_undef,0,NULL,0}, -{"id-pda-gender","id-pda-gender",NID_id_pda_gender,8,&(lvalues[2609]),0}, +{"id-pda-gender","id-pda-gender",NID_id_pda_gender,8,&(lvalues[2607]),0}, {"id-pda-countryOfCitizenship","id-pda-countryOfCitizenship", - NID_id_pda_countryOfCitizenship,8,&(lvalues[2617]),0}, + NID_id_pda_countryOfCitizenship,8,&(lvalues[2615]),0}, {"id-pda-countryOfResidence","id-pda-countryOfResidence", - NID_id_pda_countryOfResidence,8,&(lvalues[2625]),0}, + NID_id_pda_countryOfResidence,8,&(lvalues[2623]),0}, {"id-aca-authenticationInfo","id-aca-authenticationInfo", - NID_id_aca_authenticationInfo,8,&(lvalues[2633]),0}, + NID_id_aca_authenticationInfo,8,&(lvalues[2631]),0}, {"id-aca-accessIdentity","id-aca-accessIdentity", - NID_id_aca_accessIdentity,8,&(lvalues[2641]),0}, + NID_id_aca_accessIdentity,8,&(lvalues[2639]),0}, {"id-aca-chargingIdentity","id-aca-chargingIdentity", - NID_id_aca_chargingIdentity,8,&(lvalues[2649]),0}, -{"id-aca-group","id-aca-group",NID_id_aca_group,8,&(lvalues[2657]),0}, -{"id-aca-role","id-aca-role",NID_id_aca_role,8,&(lvalues[2665]),0}, + NID_id_aca_chargingIdentity,8,&(lvalues[2647]),0}, +{"id-aca-group","id-aca-group",NID_id_aca_group,8,&(lvalues[2655]),0}, +{"id-aca-role","id-aca-role",NID_id_aca_role,8,&(lvalues[2663]),0}, {"id-qcs-pkixQCSyntax-v1","id-qcs-pkixQCSyntax-v1", - NID_id_qcs_pkixQCSyntax_v1,8,&(lvalues[2673]),0}, -{"id-cct-crs","id-cct-crs",NID_id_cct_crs,8,&(lvalues[2681]),0}, + NID_id_qcs_pkixQCSyntax_v1,8,&(lvalues[2671]),0}, +{"id-cct-crs","id-cct-crs",NID_id_cct_crs,8,&(lvalues[2679]),0}, {"id-cct-PKIData","id-cct-PKIData",NID_id_cct_PKIData,8, - &(lvalues[2689]),0}, + &(lvalues[2687]),0}, {"id-cct-PKIResponse","id-cct-PKIResponse",NID_id_cct_PKIResponse,8, - &(lvalues[2697]),0}, + &(lvalues[2695]),0}, {"ad_timestamping","AD Time Stamping",NID_ad_timeStamping,8, - &(lvalues[2705]),0}, -{"AD_DVCS","ad dvcs",NID_ad_dvcs,8,&(lvalues[2713]),0}, + &(lvalues[2703]),0}, +{"AD_DVCS","ad dvcs",NID_ad_dvcs,8,&(lvalues[2711]),0}, {"basicOCSPResponse","Basic OCSP Response",NID_id_pkix_OCSP_basic,9, - &(lvalues[2721]),0}, -{"Nonce","OCSP Nonce",NID_id_pkix_OCSP_Nonce,9,&(lvalues[2730]),0}, -{"CrlID","OCSP CRL ID",NID_id_pkix_OCSP_CrlID,9,&(lvalues[2739]),0}, + &(lvalues[2719]),0}, +{"Nonce","OCSP Nonce",NID_id_pkix_OCSP_Nonce,9,&(lvalues[2728]),0}, +{"CrlID","OCSP CRL ID",NID_id_pkix_OCSP_CrlID,9,&(lvalues[2737]),0}, {"acceptableResponses","Acceptable OCSP Responses", - NID_id_pkix_OCSP_acceptableResponses,9,&(lvalues[2748]),0}, -{"noCheck","OCSP No Check",NID_id_pkix_OCSP_noCheck,9,&(lvalues[2757]),0}, + NID_id_pkix_OCSP_acceptableResponses,9,&(lvalues[2746]),0}, +{"noCheck","OCSP No Check",NID_id_pkix_OCSP_noCheck,9,&(lvalues[2755]),0}, {"archiveCutoff","OCSP Archive Cutoff",NID_id_pkix_OCSP_archiveCutoff, - 9,&(lvalues[2766]),0}, + 9,&(lvalues[2764]),0}, {"serviceLocator","OCSP Service Locator", - NID_id_pkix_OCSP_serviceLocator,9,&(lvalues[2775]),0}, + NID_id_pkix_OCSP_serviceLocator,9,&(lvalues[2773]),0}, {"extendedStatus","Extended OCSP Status", - NID_id_pkix_OCSP_extendedStatus,9,&(lvalues[2784]),0}, -{"valid","valid",NID_id_pkix_OCSP_valid,9,&(lvalues[2793]),0}, -{"path","path",NID_id_pkix_OCSP_path,9,&(lvalues[2802]),0}, + NID_id_pkix_OCSP_extendedStatus,9,&(lvalues[2782]),0}, +{"valid","valid",NID_id_pkix_OCSP_valid,9,&(lvalues[2791]),0}, +{"path","path",NID_id_pkix_OCSP_path,9,&(lvalues[2800]),0}, {"trustRoot","Trust Root",NID_id_pkix_OCSP_trustRoot,9, - &(lvalues[2811]),0}, -{"algorithm","algorithm",NID_algorithm,4,&(lvalues[2820]),0}, -{"rsaSignature","rsaSignature",NID_rsaSignature,5,&(lvalues[2824]),0}, + &(lvalues[2809]),0}, +{"algorithm","algorithm",NID_algorithm,4,&(lvalues[2818]),0}, +{"rsaSignature","rsaSignature",NID_rsaSignature,5,&(lvalues[2822]),0}, {"X500algorithms","directory services - algorithms", - NID_X500algorithms,2,&(lvalues[2829]),0}, -{"ORG","org",NID_org,1,&(lvalues[2831]),0}, -{"DOD","dod",NID_dod,2,&(lvalues[2832]),0}, -{"IANA","iana",NID_iana,3,&(lvalues[2834]),0}, -{"directory","Directory",NID_Directory,4,&(lvalues[2837]),0}, -{"mgmt","Management",NID_Management,4,&(lvalues[2841]),0}, -{"experimental","Experimental",NID_Experimental,4,&(lvalues[2845]),0}, -{"private","Private",NID_Private,4,&(lvalues[2849]),0}, -{"security","Security",NID_Security,4,&(lvalues[2853]),0}, -{"snmpv2","SNMPv2",NID_SNMPv2,4,&(lvalues[2857]),0}, -{"Mail","Mail",NID_Mail,4,&(lvalues[2861]),0}, -{"enterprises","Enterprises",NID_Enterprises,5,&(lvalues[2865]),0}, -{"dcobject","dcObject",NID_dcObject,9,&(lvalues[2870]),0}, -{"DC","domainComponent",NID_domainComponent,10,&(lvalues[2879]),0}, -{"domain","Domain",NID_Domain,10,&(lvalues[2889]),0}, -{"NULL","NULL",NID_joint_iso_ccitt,1,&(lvalues[2899]),0}, + NID_X500algorithms,2,&(lvalues[2827]),0}, +{"ORG","org",NID_org,1,&(lvalues[2829]),0}, +{"DOD","dod",NID_dod,2,&(lvalues[2830]),0}, +{"IANA","iana",NID_iana,3,&(lvalues[2832]),0}, +{"directory","Directory",NID_Directory,4,&(lvalues[2835]),0}, +{"mgmt","Management",NID_Management,4,&(lvalues[2839]),0}, +{"experimental","Experimental",NID_Experimental,4,&(lvalues[2843]),0}, +{"private","Private",NID_Private,4,&(lvalues[2847]),0}, +{"security","Security",NID_Security,4,&(lvalues[2851]),0}, +{"snmpv2","SNMPv2",NID_SNMPv2,4,&(lvalues[2855]),0}, +{"Mail","Mail",NID_Mail,4,&(lvalues[2859]),0}, +{"enterprises","Enterprises",NID_Enterprises,5,&(lvalues[2863]),0}, +{"dcobject","dcObject",NID_dcObject,9,&(lvalues[2868]),0}, +{"DC","domainComponent",NID_domainComponent,10,&(lvalues[2877]),0}, +{"domain","Domain",NID_Domain,10,&(lvalues[2887]),0}, +{"NULL","NULL",NID_joint_iso_ccitt,0,NULL,0}, {"selected-attribute-types","Selected Attribute Types", - NID_selected_attribute_types,3,&(lvalues[2900]),0}, -{"clearance","clearance",NID_clearance,4,&(lvalues[2903]),0}, + NID_selected_attribute_types,3,&(lvalues[2897]),0}, +{"clearance","clearance",NID_clearance,4,&(lvalues[2900]),0}, {"RSA-MD4","md4WithRSAEncryption",NID_md4WithRSAEncryption,9, - &(lvalues[2907]),0}, -{"ac-proxying","ac-proxying",NID_ac_proxying,8,&(lvalues[2916]),0}, + &(lvalues[2904]),0}, +{"ac-proxying","ac-proxying",NID_ac_proxying,8,&(lvalues[2913]),0}, {"subjectInfoAccess","Subject Information Access",NID_sinfo_access,8, - &(lvalues[2924]),0}, + &(lvalues[2921]),0}, {"id-aca-encAttrs","id-aca-encAttrs",NID_id_aca_encAttrs,8, - &(lvalues[2932]),0}, -{"role","role",NID_role,3,&(lvalues[2940]),0}, + &(lvalues[2929]),0}, +{"role","role",NID_role,3,&(lvalues[2937]),0}, {"policyConstraints","X509v3 Policy Constraints", - NID_policy_constraints,3,&(lvalues[2943]),0}, + NID_policy_constraints,3,&(lvalues[2940]),0}, {"targetInformation","X509v3 AC Targeting",NID_target_information,3, - &(lvalues[2946]),0}, + &(lvalues[2943]),0}, {"noRevAvail","X509v3 No Revocation Available",NID_no_rev_avail,3, - &(lvalues[2949]),0}, -{"NULL","NULL",NID_ccitt,1,&(lvalues[2952]),0}, -{"ansi-X9-62","ANSI X9.62",NID_ansi_X9_62,5,&(lvalues[2953]),0}, -{"prime-field","prime-field",NID_X9_62_prime_field,7,&(lvalues[2958]),0}, + &(lvalues[2946]),0}, +{"NULL","NULL",NID_ccitt,0,NULL,0}, +{"ansi-X9-62","ANSI X9.62",NID_ansi_X9_62,5,&(lvalues[2949]),0}, +{"prime-field","prime-field",NID_X9_62_prime_field,7,&(lvalues[2954]),0}, {"characteristic-two-field","characteristic-two-field", - NID_X9_62_characteristic_two_field,7,&(lvalues[2965]),0}, + NID_X9_62_characteristic_two_field,7,&(lvalues[2961]),0}, {"id-ecPublicKey","id-ecPublicKey",NID_X9_62_id_ecPublicKey,7, - &(lvalues[2972]),0}, -{"prime192v1","prime192v1",NID_X9_62_prime192v1,8,&(lvalues[2979]),0}, -{"prime192v2","prime192v2",NID_X9_62_prime192v2,8,&(lvalues[2987]),0}, -{"prime192v3","prime192v3",NID_X9_62_prime192v3,8,&(lvalues[2995]),0}, -{"prime239v1","prime239v1",NID_X9_62_prime239v1,8,&(lvalues[3003]),0}, -{"prime239v2","prime239v2",NID_X9_62_prime239v2,8,&(lvalues[3011]),0}, -{"prime239v3","prime239v3",NID_X9_62_prime239v3,8,&(lvalues[3019]),0}, -{"prime256v1","prime256v1",NID_X9_62_prime256v1,8,&(lvalues[3027]),0}, + &(lvalues[2968]),0}, +{"prime192v1","prime192v1",NID_X9_62_prime192v1,8,&(lvalues[2975]),0}, +{"prime192v2","prime192v2",NID_X9_62_prime192v2,8,&(lvalues[2983]),0}, +{"prime192v3","prime192v3",NID_X9_62_prime192v3,8,&(lvalues[2991]),0}, +{"prime239v1","prime239v1",NID_X9_62_prime239v1,8,&(lvalues[2999]),0}, +{"prime239v2","prime239v2",NID_X9_62_prime239v2,8,&(lvalues[3007]),0}, +{"prime239v3","prime239v3",NID_X9_62_prime239v3,8,&(lvalues[3015]),0}, +{"prime256v1","prime256v1",NID_X9_62_prime256v1,8,&(lvalues[3023]),0}, {"ecdsa-with-SHA1","ecdsa-with-SHA1",NID_ecdsa_with_SHA1,7, - &(lvalues[3035]),0}, -{"CSPName","Microsoft CSP Name",NID_ms_csp_name,9,&(lvalues[3042]),0}, -{"AES-128-ECB","aes-128-ecb",NID_aes_128_ecb,9,&(lvalues[3051]),0}, -{"AES-128-CBC","aes-128-cbc",NID_aes_128_cbc,9,&(lvalues[3060]),0}, -{"AES-128-OFB","aes-128-ofb",NID_aes_128_ofb128,9,&(lvalues[3069]),0}, -{"AES-128-CFB","aes-128-cfb",NID_aes_128_cfb128,9,&(lvalues[3078]),0}, -{"AES-192-ECB","aes-192-ecb",NID_aes_192_ecb,9,&(lvalues[3087]),0}, -{"AES-192-CBC","aes-192-cbc",NID_aes_192_cbc,9,&(lvalues[3096]),0}, -{"AES-192-OFB","aes-192-ofb",NID_aes_192_ofb128,9,&(lvalues[3105]),0}, -{"AES-192-CFB","aes-192-cfb",NID_aes_192_cfb128,9,&(lvalues[3114]),0}, -{"AES-256-ECB","aes-256-ecb",NID_aes_256_ecb,9,&(lvalues[3123]),0}, -{"AES-256-CBC","aes-256-cbc",NID_aes_256_cbc,9,&(lvalues[3132]),0}, -{"AES-256-OFB","aes-256-ofb",NID_aes_256_ofb128,9,&(lvalues[3141]),0}, -{"AES-256-CFB","aes-256-cfb",NID_aes_256_cfb128,9,&(lvalues[3150]),0}, + &(lvalues[3031]),0}, +{"CSPName","Microsoft CSP Name",NID_ms_csp_name,9,&(lvalues[3038]),0}, +{"AES-128-ECB","aes-128-ecb",NID_aes_128_ecb,9,&(lvalues[3047]),0}, +{"AES-128-CBC","aes-128-cbc",NID_aes_128_cbc,9,&(lvalues[3056]),0}, +{"AES-128-OFB","aes-128-ofb",NID_aes_128_ofb128,9,&(lvalues[3065]),0}, +{"AES-128-CFB","aes-128-cfb",NID_aes_128_cfb128,9,&(lvalues[3074]),0}, +{"AES-192-ECB","aes-192-ecb",NID_aes_192_ecb,9,&(lvalues[3083]),0}, +{"AES-192-CBC","aes-192-cbc",NID_aes_192_cbc,9,&(lvalues[3092]),0}, +{"AES-192-OFB","aes-192-ofb",NID_aes_192_ofb128,9,&(lvalues[3101]),0}, +{"AES-192-CFB","aes-192-cfb",NID_aes_192_cfb128,9,&(lvalues[3110]),0}, +{"AES-256-ECB","aes-256-ecb",NID_aes_256_ecb,9,&(lvalues[3119]),0}, +{"AES-256-CBC","aes-256-cbc",NID_aes_256_cbc,9,&(lvalues[3128]),0}, +{"AES-256-OFB","aes-256-ofb",NID_aes_256_ofb128,9,&(lvalues[3137]),0}, +{"AES-256-CFB","aes-256-cfb",NID_aes_256_cfb128,9,&(lvalues[3146]),0}, {"holdInstructionCode","Hold Instruction Code", - NID_hold_instruction_code,3,&(lvalues[3159]),0}, + NID_hold_instruction_code,3,&(lvalues[3155]),0}, {"holdInstructionNone","Hold Instruction None", - NID_hold_instruction_none,7,&(lvalues[3162]),0}, + NID_hold_instruction_none,7,&(lvalues[3158]),0}, {"holdInstructionCallIssuer","Hold Instruction Call Issuer", - NID_hold_instruction_call_issuer,7,&(lvalues[3169]),0}, + NID_hold_instruction_call_issuer,7,&(lvalues[3165]),0}, {"holdInstructionReject","Hold Instruction Reject", - NID_hold_instruction_reject,7,&(lvalues[3176]),0}, -{"data","data",NID_data,1,&(lvalues[3183]),0}, -{"pss","pss",NID_pss,3,&(lvalues[3184]),0}, -{"ucl","ucl",NID_ucl,7,&(lvalues[3187]),0}, -{"pilot","pilot",NID_pilot,8,&(lvalues[3194]),0}, + NID_hold_instruction_reject,7,&(lvalues[3172]),0}, +{"data","data",NID_data,1,&(lvalues[3179]),0}, +{"pss","pss",NID_pss,3,&(lvalues[3180]),0}, +{"ucl","ucl",NID_ucl,7,&(lvalues[3183]),0}, +{"pilot","pilot",NID_pilot,8,&(lvalues[3190]),0}, {"pilotAttributeType","pilotAttributeType",NID_pilotAttributeType,9, - &(lvalues[3202]),0}, + &(lvalues[3198]),0}, {"pilotAttributeSyntax","pilotAttributeSyntax", - NID_pilotAttributeSyntax,9,&(lvalues[3211]),0}, + NID_pilotAttributeSyntax,9,&(lvalues[3207]),0}, {"pilotObjectClass","pilotObjectClass",NID_pilotObjectClass,9, - &(lvalues[3220]),0}, -{"pilotGroups","pilotGroups",NID_pilotGroups,9,&(lvalues[3229]),0}, + &(lvalues[3216]),0}, +{"pilotGroups","pilotGroups",NID_pilotGroups,9,&(lvalues[3225]),0}, {"iA5StringSyntax","iA5StringSyntax",NID_iA5StringSyntax,10, - &(lvalues[3238]),0}, + &(lvalues[3234]),0}, {"caseIgnoreIA5StringSyntax","caseIgnoreIA5StringSyntax", - NID_caseIgnoreIA5StringSyntax,10,&(lvalues[3248]),0}, -{"pilotObject","pilotObject",NID_pilotObject,10,&(lvalues[3258]),0}, -{"pilotPerson","pilotPerson",NID_pilotPerson,10,&(lvalues[3268]),0}, -{"account","account",NID_account,10,&(lvalues[3278]),0}, -{"document","document",NID_document,10,&(lvalues[3288]),0}, -{"room","room",NID_room,10,&(lvalues[3298]),0}, + NID_caseIgnoreIA5StringSyntax,10,&(lvalues[3244]),0}, +{"pilotObject","pilotObject",NID_pilotObject,10,&(lvalues[3254]),0}, +{"pilotPerson","pilotPerson",NID_pilotPerson,10,&(lvalues[3264]),0}, +{"account","account",NID_account,10,&(lvalues[3274]),0}, +{"document","document",NID_document,10,&(lvalues[3284]),0}, +{"room","room",NID_room,10,&(lvalues[3294]),0}, {"documentSeries","documentSeries",NID_documentSeries,10, - &(lvalues[3308]),0}, + &(lvalues[3304]),0}, {"rFC822localPart","rFC822localPart",NID_rFC822localPart,10, - &(lvalues[3318]),0}, -{"dNSDomain","dNSDomain",NID_dNSDomain,10,&(lvalues[3328]),0}, + &(lvalues[3314]),0}, +{"dNSDomain","dNSDomain",NID_dNSDomain,10,&(lvalues[3324]),0}, {"domainRelatedObject","domainRelatedObject",NID_domainRelatedObject, - 10,&(lvalues[3338]),0}, + 10,&(lvalues[3334]),0}, {"friendlyCountry","friendlyCountry",NID_friendlyCountry,10, - &(lvalues[3348]),0}, + &(lvalues[3344]),0}, {"simpleSecurityObject","simpleSecurityObject", - NID_simpleSecurityObject,10,&(lvalues[3358]),0}, + NID_simpleSecurityObject,10,&(lvalues[3354]),0}, {"pilotOrganization","pilotOrganization",NID_pilotOrganization,10, - &(lvalues[3368]),0}, -{"pilotDSA","pilotDSA",NID_pilotDSA,10,&(lvalues[3378]),0}, + &(lvalues[3364]),0}, +{"pilotDSA","pilotDSA",NID_pilotDSA,10,&(lvalues[3374]),0}, {"qualityLabelledData","qualityLabelledData",NID_qualityLabelledData, - 10,&(lvalues[3388]),0}, -{"UID","userId",NID_userId,10,&(lvalues[3398]),0}, + 10,&(lvalues[3384]),0}, +{"UID","userId",NID_userId,10,&(lvalues[3394]),0}, {"textEncodedORAddress","textEncodedORAddress", - NID_textEncodedORAddress,10,&(lvalues[3408]),0}, -{"mail","rfc822Mailbox",NID_rfc822Mailbox,10,&(lvalues[3418]),0}, -{"info","info",NID_info,10,&(lvalues[3428]),0}, + NID_textEncodedORAddress,10,&(lvalues[3404]),0}, +{"mail","rfc822Mailbox",NID_rfc822Mailbox,10,&(lvalues[3414]),0}, +{"info","info",NID_info,10,&(lvalues[3424]),0}, {"favouriteDrink","favouriteDrink",NID_favouriteDrink,10, - &(lvalues[3438]),0}, -{"roomNumber","roomNumber",NID_roomNumber,10,&(lvalues[3448]),0}, -{"photo","photo",NID_photo,10,&(lvalues[3458]),0}, -{"userClass","userClass",NID_userClass,10,&(lvalues[3468]),0}, -{"host","host",NID_host,10,&(lvalues[3478]),0}, -{"manager","manager",NID_manager,10,&(lvalues[3488]),0}, + &(lvalues[3434]),0}, +{"roomNumber","roomNumber",NID_roomNumber,10,&(lvalues[3444]),0}, +{"photo","photo",NID_photo,10,&(lvalues[3454]),0}, +{"userClass","userClass",NID_userClass,10,&(lvalues[3464]),0}, +{"host","host",NID_host,10,&(lvalues[3474]),0}, +{"manager","manager",NID_manager,10,&(lvalues[3484]),0}, {"documentIdentifier","documentIdentifier",NID_documentIdentifier,10, - &(lvalues[3498]),0}, -{"documentTitle","documentTitle",NID_documentTitle,10,&(lvalues[3508]),0}, + &(lvalues[3494]),0}, +{"documentTitle","documentTitle",NID_documentTitle,10,&(lvalues[3504]),0}, {"documentVersion","documentVersion",NID_documentVersion,10, - &(lvalues[3518]),0}, + &(lvalues[3514]),0}, {"documentAuthor","documentAuthor",NID_documentAuthor,10, - &(lvalues[3528]),0}, + &(lvalues[3524]),0}, {"documentLocation","documentLocation",NID_documentLocation,10, - &(lvalues[3538]),0}, + &(lvalues[3534]),0}, {"homeTelephoneNumber","homeTelephoneNumber",NID_homeTelephoneNumber, - 10,&(lvalues[3548]),0}, -{"secretary","secretary",NID_secretary,10,&(lvalues[3558]),0}, -{"otherMailbox","otherMailbox",NID_otherMailbox,10,&(lvalues[3568]),0}, + 10,&(lvalues[3544]),0}, +{"secretary","secretary",NID_secretary,10,&(lvalues[3554]),0}, +{"otherMailbox","otherMailbox",NID_otherMailbox,10,&(lvalues[3564]),0}, {"lastModifiedTime","lastModifiedTime",NID_lastModifiedTime,10, - &(lvalues[3578]),0}, + &(lvalues[3574]),0}, {"lastModifiedBy","lastModifiedBy",NID_lastModifiedBy,10, - &(lvalues[3588]),0}, -{"aRecord","aRecord",NID_aRecord,10,&(lvalues[3598]),0}, + &(lvalues[3584]),0}, +{"aRecord","aRecord",NID_aRecord,10,&(lvalues[3594]),0}, {"pilotAttributeType27","pilotAttributeType27", - NID_pilotAttributeType27,10,&(lvalues[3608]),0}, -{"mXRecord","mXRecord",NID_mXRecord,10,&(lvalues[3618]),0}, -{"nSRecord","nSRecord",NID_nSRecord,10,&(lvalues[3628]),0}, -{"sOARecord","sOARecord",NID_sOARecord,10,&(lvalues[3638]),0}, -{"cNAMERecord","cNAMERecord",NID_cNAMERecord,10,&(lvalues[3648]),0}, + NID_pilotAttributeType27,10,&(lvalues[3604]),0}, +{"mXRecord","mXRecord",NID_mXRecord,10,&(lvalues[3614]),0}, +{"nSRecord","nSRecord",NID_nSRecord,10,&(lvalues[3624]),0}, +{"sOARecord","sOARecord",NID_sOARecord,10,&(lvalues[3634]),0}, +{"cNAMERecord","cNAMERecord",NID_cNAMERecord,10,&(lvalues[3644]),0}, {"associatedDomain","associatedDomain",NID_associatedDomain,10, - &(lvalues[3658]),0}, + &(lvalues[3654]),0}, {"associatedName","associatedName",NID_associatedName,10, - &(lvalues[3668]),0}, + &(lvalues[3664]),0}, {"homePostalAddress","homePostalAddress",NID_homePostalAddress,10, - &(lvalues[3678]),0}, -{"personalTitle","personalTitle",NID_personalTitle,10,&(lvalues[3688]),0}, + &(lvalues[3674]),0}, +{"personalTitle","personalTitle",NID_personalTitle,10,&(lvalues[3684]),0}, {"mobileTelephoneNumber","mobileTelephoneNumber", - NID_mobileTelephoneNumber,10,&(lvalues[3698]),0}, + NID_mobileTelephoneNumber,10,&(lvalues[3694]),0}, {"pagerTelephoneNumber","pagerTelephoneNumber", - NID_pagerTelephoneNumber,10,&(lvalues[3708]),0}, + NID_pagerTelephoneNumber,10,&(lvalues[3704]),0}, {"friendlyCountryName","friendlyCountryName",NID_friendlyCountryName, - 10,&(lvalues[3718]),0}, + 10,&(lvalues[3714]),0}, {"organizationalStatus","organizationalStatus", - NID_organizationalStatus,10,&(lvalues[3728]),0}, -{"janetMailbox","janetMailbox",NID_janetMailbox,10,&(lvalues[3738]),0}, + NID_organizationalStatus,10,&(lvalues[3724]),0}, +{"janetMailbox","janetMailbox",NID_janetMailbox,10,&(lvalues[3734]),0}, {"mailPreferenceOption","mailPreferenceOption", - NID_mailPreferenceOption,10,&(lvalues[3748]),0}, -{"buildingName","buildingName",NID_buildingName,10,&(lvalues[3758]),0}, -{"dSAQuality","dSAQuality",NID_dSAQuality,10,&(lvalues[3768]),0}, + NID_mailPreferenceOption,10,&(lvalues[3744]),0}, +{"buildingName","buildingName",NID_buildingName,10,&(lvalues[3754]),0}, +{"dSAQuality","dSAQuality",NID_dSAQuality,10,&(lvalues[3764]),0}, {"singleLevelQuality","singleLevelQuality",NID_singleLevelQuality,10, - &(lvalues[3778]),0}, + &(lvalues[3774]),0}, {"subtreeMinimumQuality","subtreeMinimumQuality", - NID_subtreeMinimumQuality,10,&(lvalues[3788]),0}, + NID_subtreeMinimumQuality,10,&(lvalues[3784]),0}, {"subtreeMaximumQuality","subtreeMaximumQuality", - NID_subtreeMaximumQuality,10,&(lvalues[3798]),0}, + NID_subtreeMaximumQuality,10,&(lvalues[3794]),0}, {"personalSignature","personalSignature",NID_personalSignature,10, - &(lvalues[3808]),0}, -{"dITRedirect","dITRedirect",NID_dITRedirect,10,&(lvalues[3818]),0}, -{"audio","audio",NID_audio,10,&(lvalues[3828]),0}, + &(lvalues[3804]),0}, +{"dITRedirect","dITRedirect",NID_dITRedirect,10,&(lvalues[3814]),0}, +{"audio","audio",NID_audio,10,&(lvalues[3824]),0}, {"documentPublisher","documentPublisher",NID_documentPublisher,10, - &(lvalues[3838]),0}, + &(lvalues[3834]),0}, {"x500UniqueIdentifier","x500UniqueIdentifier", - NID_x500UniqueIdentifier,3,&(lvalues[3848]),0}, -{"mime-mhs","MIME MHS",NID_mime_mhs,5,&(lvalues[3851]),0}, + NID_x500UniqueIdentifier,3,&(lvalues[3844]),0}, +{"mime-mhs","MIME MHS",NID_mime_mhs,5,&(lvalues[3847]),0}, {"mime-mhs-headings","mime-mhs-headings",NID_mime_mhs_headings,6, - &(lvalues[3856]),0}, + &(lvalues[3852]),0}, {"mime-mhs-bodies","mime-mhs-bodies",NID_mime_mhs_bodies,6, - &(lvalues[3862]),0}, + &(lvalues[3858]),0}, {"id-hex-partial-message","id-hex-partial-message", - NID_id_hex_partial_message,7,&(lvalues[3868]),0}, + NID_id_hex_partial_message,7,&(lvalues[3864]),0}, {"id-hex-multipart-message","id-hex-multipart-message", - NID_id_hex_multipart_message,7,&(lvalues[3875]),0}, + NID_id_hex_multipart_message,7,&(lvalues[3871]),0}, {"generationQualifier","generationQualifier",NID_generationQualifier, - 3,&(lvalues[3882]),0}, -{"pseudonym","pseudonym",NID_pseudonym,3,&(lvalues[3885]),0}, + 3,&(lvalues[3878]),0}, +{"pseudonym","pseudonym",NID_pseudonym,3,&(lvalues[3881]),0}, {NULL,NULL,NID_undef,0,NULL,0}, {"id-set","Secure Electronic Transactions",NID_id_set,2, - &(lvalues[3888]),0}, -{"set-ctype","content types",NID_set_ctype,3,&(lvalues[3890]),0}, -{"set-msgExt","message extensions",NID_set_msgExt,3,&(lvalues[3893]),0}, -{"set-attr","set-attr",NID_set_attr,3,&(lvalues[3896]),0}, -{"set-policy","set-policy",NID_set_policy,3,&(lvalues[3899]),0}, + &(lvalues[3884]),0}, +{"set-ctype","content types",NID_set_ctype,3,&(lvalues[3886]),0}, +{"set-msgExt","message extensions",NID_set_msgExt,3,&(lvalues[3889]),0}, +{"set-attr","set-attr",NID_set_attr,3,&(lvalues[3892]),0}, +{"set-policy","set-policy",NID_set_policy,3,&(lvalues[3895]),0}, {"set-certExt","certificate extensions",NID_set_certExt,3, - &(lvalues[3902]),0}, -{"set-brand","set-brand",NID_set_brand,3,&(lvalues[3905]),0}, -{"setct-PANData","setct-PANData",NID_setct_PANData,4,&(lvalues[3908]),0}, + &(lvalues[3898]),0}, +{"set-brand","set-brand",NID_set_brand,3,&(lvalues[3901]),0}, +{"setct-PANData","setct-PANData",NID_setct_PANData,4,&(lvalues[3904]),0}, {"setct-PANToken","setct-PANToken",NID_setct_PANToken,4, - &(lvalues[3912]),0}, -{"setct-PANOnly","setct-PANOnly",NID_setct_PANOnly,4,&(lvalues[3916]),0}, -{"setct-OIData","setct-OIData",NID_setct_OIData,4,&(lvalues[3920]),0}, -{"setct-PI","setct-PI",NID_setct_PI,4,&(lvalues[3924]),0}, -{"setct-PIData","setct-PIData",NID_setct_PIData,4,&(lvalues[3928]),0}, + &(lvalues[3908]),0}, +{"setct-PANOnly","setct-PANOnly",NID_setct_PANOnly,4,&(lvalues[3912]),0}, +{"setct-OIData","setct-OIData",NID_setct_OIData,4,&(lvalues[3916]),0}, +{"setct-PI","setct-PI",NID_setct_PI,4,&(lvalues[3920]),0}, +{"setct-PIData","setct-PIData",NID_setct_PIData,4,&(lvalues[3924]),0}, {"setct-PIDataUnsigned","setct-PIDataUnsigned", - NID_setct_PIDataUnsigned,4,&(lvalues[3932]),0}, + NID_setct_PIDataUnsigned,4,&(lvalues[3928]),0}, {"setct-HODInput","setct-HODInput",NID_setct_HODInput,4, - &(lvalues[3936]),0}, + &(lvalues[3932]),0}, {"setct-AuthResBaggage","setct-AuthResBaggage", - NID_setct_AuthResBaggage,4,&(lvalues[3940]),0}, + NID_setct_AuthResBaggage,4,&(lvalues[3936]),0}, {"setct-AuthRevReqBaggage","setct-AuthRevReqBaggage", - NID_setct_AuthRevReqBaggage,4,&(lvalues[3944]),0}, + NID_setct_AuthRevReqBaggage,4,&(lvalues[3940]),0}, {"setct-AuthRevResBaggage","setct-AuthRevResBaggage", - NID_setct_AuthRevResBaggage,4,&(lvalues[3948]),0}, + NID_setct_AuthRevResBaggage,4,&(lvalues[3944]),0}, {"setct-CapTokenSeq","setct-CapTokenSeq",NID_setct_CapTokenSeq,4, - &(lvalues[3952]),0}, + &(lvalues[3948]),0}, {"setct-PInitResData","setct-PInitResData",NID_setct_PInitResData,4, - &(lvalues[3956]),0}, -{"setct-PI-TBS","setct-PI-TBS",NID_setct_PI_TBS,4,&(lvalues[3960]),0}, + &(lvalues[3952]),0}, +{"setct-PI-TBS","setct-PI-TBS",NID_setct_PI_TBS,4,&(lvalues[3956]),0}, {"setct-PResData","setct-PResData",NID_setct_PResData,4, - &(lvalues[3964]),0}, + &(lvalues[3960]),0}, {"setct-AuthReqTBS","setct-AuthReqTBS",NID_setct_AuthReqTBS,4, - &(lvalues[3968]),0}, + &(lvalues[3964]),0}, {"setct-AuthResTBS","setct-AuthResTBS",NID_setct_AuthResTBS,4, - &(lvalues[3972]),0}, + &(lvalues[3968]),0}, {"setct-AuthResTBSX","setct-AuthResTBSX",NID_setct_AuthResTBSX,4, - &(lvalues[3976]),0}, + &(lvalues[3972]),0}, {"setct-AuthTokenTBS","setct-AuthTokenTBS",NID_setct_AuthTokenTBS,4, - &(lvalues[3980]),0}, + &(lvalues[3976]),0}, {"setct-CapTokenData","setct-CapTokenData",NID_setct_CapTokenData,4, - &(lvalues[3984]),0}, + &(lvalues[3980]),0}, {"setct-CapTokenTBS","setct-CapTokenTBS",NID_setct_CapTokenTBS,4, - &(lvalues[3988]),0}, + &(lvalues[3984]),0}, {"setct-AcqCardCodeMsg","setct-AcqCardCodeMsg", - NID_setct_AcqCardCodeMsg,4,&(lvalues[3992]),0}, + NID_setct_AcqCardCodeMsg,4,&(lvalues[3988]),0}, {"setct-AuthRevReqTBS","setct-AuthRevReqTBS",NID_setct_AuthRevReqTBS, - 4,&(lvalues[3996]),0}, + 4,&(lvalues[3992]),0}, {"setct-AuthRevResData","setct-AuthRevResData", - NID_setct_AuthRevResData,4,&(lvalues[4000]),0}, + NID_setct_AuthRevResData,4,&(lvalues[3996]),0}, {"setct-AuthRevResTBS","setct-AuthRevResTBS",NID_setct_AuthRevResTBS, - 4,&(lvalues[4004]),0}, + 4,&(lvalues[4000]),0}, {"setct-CapReqTBS","setct-CapReqTBS",NID_setct_CapReqTBS,4, - &(lvalues[4008]),0}, + &(lvalues[4004]),0}, {"setct-CapReqTBSX","setct-CapReqTBSX",NID_setct_CapReqTBSX,4, - &(lvalues[4012]),0}, + &(lvalues[4008]),0}, {"setct-CapResData","setct-CapResData",NID_setct_CapResData,4, - &(lvalues[4016]),0}, + &(lvalues[4012]),0}, {"setct-CapRevReqTBS","setct-CapRevReqTBS",NID_setct_CapRevReqTBS,4, - &(lvalues[4020]),0}, + &(lvalues[4016]),0}, {"setct-CapRevReqTBSX","setct-CapRevReqTBSX",NID_setct_CapRevReqTBSX, - 4,&(lvalues[4024]),0}, + 4,&(lvalues[4020]),0}, {"setct-CapRevResData","setct-CapRevResData",NID_setct_CapRevResData, - 4,&(lvalues[4028]),0}, + 4,&(lvalues[4024]),0}, {"setct-CredReqTBS","setct-CredReqTBS",NID_setct_CredReqTBS,4, - &(lvalues[4032]),0}, + &(lvalues[4028]),0}, {"setct-CredReqTBSX","setct-CredReqTBSX",NID_setct_CredReqTBSX,4, - &(lvalues[4036]),0}, + &(lvalues[4032]),0}, {"setct-CredResData","setct-CredResData",NID_setct_CredResData,4, - &(lvalues[4040]),0}, + &(lvalues[4036]),0}, {"setct-CredRevReqTBS","setct-CredRevReqTBS",NID_setct_CredRevReqTBS, - 4,&(lvalues[4044]),0}, + 4,&(lvalues[4040]),0}, {"setct-CredRevReqTBSX","setct-CredRevReqTBSX", - NID_setct_CredRevReqTBSX,4,&(lvalues[4048]),0}, + NID_setct_CredRevReqTBSX,4,&(lvalues[4044]),0}, {"setct-CredRevResData","setct-CredRevResData", - NID_setct_CredRevResData,4,&(lvalues[4052]),0}, + NID_setct_CredRevResData,4,&(lvalues[4048]),0}, {"setct-PCertReqData","setct-PCertReqData",NID_setct_PCertReqData,4, - &(lvalues[4056]),0}, + &(lvalues[4052]),0}, {"setct-PCertResTBS","setct-PCertResTBS",NID_setct_PCertResTBS,4, - &(lvalues[4060]),0}, + &(lvalues[4056]),0}, {"setct-BatchAdminReqData","setct-BatchAdminReqData", - NID_setct_BatchAdminReqData,4,&(lvalues[4064]),0}, + NID_setct_BatchAdminReqData,4,&(lvalues[4060]),0}, {"setct-BatchAdminResData","setct-BatchAdminResData", - NID_setct_BatchAdminResData,4,&(lvalues[4068]),0}, + NID_setct_BatchAdminResData,4,&(lvalues[4064]),0}, {"setct-CardCInitResTBS","setct-CardCInitResTBS", - NID_setct_CardCInitResTBS,4,&(lvalues[4072]),0}, + NID_setct_CardCInitResTBS,4,&(lvalues[4068]),0}, {"setct-MeAqCInitResTBS","setct-MeAqCInitResTBS", - NID_setct_MeAqCInitResTBS,4,&(lvalues[4076]),0}, + NID_setct_MeAqCInitResTBS,4,&(lvalues[4072]),0}, {"setct-RegFormResTBS","setct-RegFormResTBS",NID_setct_RegFormResTBS, - 4,&(lvalues[4080]),0}, + 4,&(lvalues[4076]),0}, {"setct-CertReqData","setct-CertReqData",NID_setct_CertReqData,4, - &(lvalues[4084]),0}, + &(lvalues[4080]),0}, {"setct-CertReqTBS","setct-CertReqTBS",NID_setct_CertReqTBS,4, - &(lvalues[4088]),0}, + &(lvalues[4084]),0}, {"setct-CertResData","setct-CertResData",NID_setct_CertResData,4, - &(lvalues[4092]),0}, + &(lvalues[4088]),0}, {"setct-CertInqReqTBS","setct-CertInqReqTBS",NID_setct_CertInqReqTBS, - 4,&(lvalues[4096]),0}, + 4,&(lvalues[4092]),0}, {"setct-ErrorTBS","setct-ErrorTBS",NID_setct_ErrorTBS,4, - &(lvalues[4100]),0}, + &(lvalues[4096]),0}, {"setct-PIDualSignedTBE","setct-PIDualSignedTBE", - NID_setct_PIDualSignedTBE,4,&(lvalues[4104]),0}, + NID_setct_PIDualSignedTBE,4,&(lvalues[4100]),0}, {"setct-PIUnsignedTBE","setct-PIUnsignedTBE",NID_setct_PIUnsignedTBE, - 4,&(lvalues[4108]),0}, + 4,&(lvalues[4104]),0}, {"setct-AuthReqTBE","setct-AuthReqTBE",NID_setct_AuthReqTBE,4, - &(lvalues[4112]),0}, + &(lvalues[4108]),0}, {"setct-AuthResTBE","setct-AuthResTBE",NID_setct_AuthResTBE,4, - &(lvalues[4116]),0}, + &(lvalues[4112]),0}, {"setct-AuthResTBEX","setct-AuthResTBEX",NID_setct_AuthResTBEX,4, - &(lvalues[4120]),0}, + &(lvalues[4116]),0}, {"setct-AuthTokenTBE","setct-AuthTokenTBE",NID_setct_AuthTokenTBE,4, - &(lvalues[4124]),0}, + &(lvalues[4120]),0}, {"setct-CapTokenTBE","setct-CapTokenTBE",NID_setct_CapTokenTBE,4, - &(lvalues[4128]),0}, + &(lvalues[4124]),0}, {"setct-CapTokenTBEX","setct-CapTokenTBEX",NID_setct_CapTokenTBEX,4, - &(lvalues[4132]),0}, + &(lvalues[4128]),0}, {"setct-AcqCardCodeMsgTBE","setct-AcqCardCodeMsgTBE", - NID_setct_AcqCardCodeMsgTBE,4,&(lvalues[4136]),0}, + NID_setct_AcqCardCodeMsgTBE,4,&(lvalues[4132]),0}, {"setct-AuthRevReqTBE","setct-AuthRevReqTBE",NID_setct_AuthRevReqTBE, - 4,&(lvalues[4140]),0}, + 4,&(lvalues[4136]),0}, {"setct-AuthRevResTBE","setct-AuthRevResTBE",NID_setct_AuthRevResTBE, - 4,&(lvalues[4144]),0}, + 4,&(lvalues[4140]),0}, {"setct-AuthRevResTBEB","setct-AuthRevResTBEB", - NID_setct_AuthRevResTBEB,4,&(lvalues[4148]),0}, + NID_setct_AuthRevResTBEB,4,&(lvalues[4144]),0}, {"setct-CapReqTBE","setct-CapReqTBE",NID_setct_CapReqTBE,4, - &(lvalues[4152]),0}, + &(lvalues[4148]),0}, {"setct-CapReqTBEX","setct-CapReqTBEX",NID_setct_CapReqTBEX,4, - &(lvalues[4156]),0}, + &(lvalues[4152]),0}, {"setct-CapResTBE","setct-CapResTBE",NID_setct_CapResTBE,4, - &(lvalues[4160]),0}, + &(lvalues[4156]),0}, {"setct-CapRevReqTBE","setct-CapRevReqTBE",NID_setct_CapRevReqTBE,4, - &(lvalues[4164]),0}, + &(lvalues[4160]),0}, {"setct-CapRevReqTBEX","setct-CapRevReqTBEX",NID_setct_CapRevReqTBEX, - 4,&(lvalues[4168]),0}, + 4,&(lvalues[4164]),0}, {"setct-CapRevResTBE","setct-CapRevResTBE",NID_setct_CapRevResTBE,4, - &(lvalues[4172]),0}, + &(lvalues[4168]),0}, {"setct-CredReqTBE","setct-CredReqTBE",NID_setct_CredReqTBE,4, - &(lvalues[4176]),0}, + &(lvalues[4172]),0}, {"setct-CredReqTBEX","setct-CredReqTBEX",NID_setct_CredReqTBEX,4, - &(lvalues[4180]),0}, + &(lvalues[4176]),0}, {"setct-CredResTBE","setct-CredResTBE",NID_setct_CredResTBE,4, - &(lvalues[4184]),0}, + &(lvalues[4180]),0}, {"setct-CredRevReqTBE","setct-CredRevReqTBE",NID_setct_CredRevReqTBE, - 4,&(lvalues[4188]),0}, + 4,&(lvalues[4184]),0}, {"setct-CredRevReqTBEX","setct-CredRevReqTBEX", - NID_setct_CredRevReqTBEX,4,&(lvalues[4192]),0}, + NID_setct_CredRevReqTBEX,4,&(lvalues[4188]),0}, {"setct-CredRevResTBE","setct-CredRevResTBE",NID_setct_CredRevResTBE, - 4,&(lvalues[4196]),0}, + 4,&(lvalues[4192]),0}, {"setct-BatchAdminReqTBE","setct-BatchAdminReqTBE", - NID_setct_BatchAdminReqTBE,4,&(lvalues[4200]),0}, + NID_setct_BatchAdminReqTBE,4,&(lvalues[4196]),0}, {"setct-BatchAdminResTBE","setct-BatchAdminResTBE", - NID_setct_BatchAdminResTBE,4,&(lvalues[4204]),0}, + NID_setct_BatchAdminResTBE,4,&(lvalues[4200]),0}, {"setct-RegFormReqTBE","setct-RegFormReqTBE",NID_setct_RegFormReqTBE, - 4,&(lvalues[4208]),0}, + 4,&(lvalues[4204]),0}, {"setct-CertReqTBE","setct-CertReqTBE",NID_setct_CertReqTBE,4, - &(lvalues[4212]),0}, + &(lvalues[4208]),0}, {"setct-CertReqTBEX","setct-CertReqTBEX",NID_setct_CertReqTBEX,4, - &(lvalues[4216]),0}, + &(lvalues[4212]),0}, {"setct-CertResTBE","setct-CertResTBE",NID_setct_CertResTBE,4, - &(lvalues[4220]),0}, + &(lvalues[4216]),0}, {"setct-CRLNotificationTBS","setct-CRLNotificationTBS", - NID_setct_CRLNotificationTBS,4,&(lvalues[4224]),0}, + NID_setct_CRLNotificationTBS,4,&(lvalues[4220]),0}, {"setct-CRLNotificationResTBS","setct-CRLNotificationResTBS", - NID_setct_CRLNotificationResTBS,4,&(lvalues[4228]),0}, + NID_setct_CRLNotificationResTBS,4,&(lvalues[4224]),0}, {"setct-BCIDistributionTBS","setct-BCIDistributionTBS", - NID_setct_BCIDistributionTBS,4,&(lvalues[4232]),0}, + NID_setct_BCIDistributionTBS,4,&(lvalues[4228]),0}, {"setext-genCrypt","generic cryptogram",NID_setext_genCrypt,4, - &(lvalues[4236]),0}, + &(lvalues[4232]),0}, {"setext-miAuth","merchant initiated auth",NID_setext_miAuth,4, - &(lvalues[4240]),0}, + &(lvalues[4236]),0}, {"setext-pinSecure","setext-pinSecure",NID_setext_pinSecure,4, - &(lvalues[4244]),0}, -{"setext-pinAny","setext-pinAny",NID_setext_pinAny,4,&(lvalues[4248]),0}, -{"setext-track2","setext-track2",NID_setext_track2,4,&(lvalues[4252]),0}, + &(lvalues[4240]),0}, +{"setext-pinAny","setext-pinAny",NID_setext_pinAny,4,&(lvalues[4244]),0}, +{"setext-track2","setext-track2",NID_setext_track2,4,&(lvalues[4248]),0}, {"setext-cv","additional verification",NID_setext_cv,4, - &(lvalues[4256]),0}, + &(lvalues[4252]),0}, {"set-policy-root","set-policy-root",NID_set_policy_root,4, - &(lvalues[4260]),0}, + &(lvalues[4256]),0}, {"setCext-hashedRoot","setCext-hashedRoot",NID_setCext_hashedRoot,4, - &(lvalues[4264]),0}, + &(lvalues[4260]),0}, {"setCext-certType","setCext-certType",NID_setCext_certType,4, - &(lvalues[4268]),0}, + &(lvalues[4264]),0}, {"setCext-merchData","setCext-merchData",NID_setCext_merchData,4, - &(lvalues[4272]),0}, + &(lvalues[4268]),0}, {"setCext-cCertRequired","setCext-cCertRequired", - NID_setCext_cCertRequired,4,&(lvalues[4276]),0}, + NID_setCext_cCertRequired,4,&(lvalues[4272]),0}, {"setCext-tunneling","setCext-tunneling",NID_setCext_tunneling,4, - &(lvalues[4280]),0}, + &(lvalues[4276]),0}, {"setCext-setExt","setCext-setExt",NID_setCext_setExt,4, - &(lvalues[4284]),0}, + &(lvalues[4280]),0}, {"setCext-setQualf","setCext-setQualf",NID_setCext_setQualf,4, - &(lvalues[4288]),0}, + &(lvalues[4284]),0}, {"setCext-PGWYcapabilities","setCext-PGWYcapabilities", - NID_setCext_PGWYcapabilities,4,&(lvalues[4292]),0}, + NID_setCext_PGWYcapabilities,4,&(lvalues[4288]),0}, {"setCext-TokenIdentifier","setCext-TokenIdentifier", - NID_setCext_TokenIdentifier,4,&(lvalues[4296]),0}, + NID_setCext_TokenIdentifier,4,&(lvalues[4292]),0}, {"setCext-Track2Data","setCext-Track2Data",NID_setCext_Track2Data,4, - &(lvalues[4300]),0}, + &(lvalues[4296]),0}, {"setCext-TokenType","setCext-TokenType",NID_setCext_TokenType,4, - &(lvalues[4304]),0}, + &(lvalues[4300]),0}, {"setCext-IssuerCapabilities","setCext-IssuerCapabilities", - NID_setCext_IssuerCapabilities,4,&(lvalues[4308]),0}, -{"setAttr-Cert","setAttr-Cert",NID_setAttr_Cert,4,&(lvalues[4312]),0}, + NID_setCext_IssuerCapabilities,4,&(lvalues[4304]),0}, +{"setAttr-Cert","setAttr-Cert",NID_setAttr_Cert,4,&(lvalues[4308]),0}, {"setAttr-PGWYcap","payment gateway capabilities",NID_setAttr_PGWYcap, - 4,&(lvalues[4316]),0}, + 4,&(lvalues[4312]),0}, {"setAttr-TokenType","setAttr-TokenType",NID_setAttr_TokenType,4, - &(lvalues[4320]),0}, + &(lvalues[4316]),0}, {"setAttr-IssCap","issuer capabilities",NID_setAttr_IssCap,4, - &(lvalues[4324]),0}, + &(lvalues[4320]),0}, {"set-rootKeyThumb","set-rootKeyThumb",NID_set_rootKeyThumb,5, - &(lvalues[4328]),0}, -{"set-addPolicy","set-addPolicy",NID_set_addPolicy,5,&(lvalues[4333]),0}, + &(lvalues[4324]),0}, +{"set-addPolicy","set-addPolicy",NID_set_addPolicy,5,&(lvalues[4329]),0}, {"setAttr-Token-EMV","setAttr-Token-EMV",NID_setAttr_Token_EMV,5, - &(lvalues[4338]),0}, + &(lvalues[4334]),0}, {"setAttr-Token-B0Prime","setAttr-Token-B0Prime", - NID_setAttr_Token_B0Prime,5,&(lvalues[4343]),0}, + NID_setAttr_Token_B0Prime,5,&(lvalues[4339]),0}, {"setAttr-IssCap-CVM","setAttr-IssCap-CVM",NID_setAttr_IssCap_CVM,5, - &(lvalues[4348]),0}, + &(lvalues[4344]),0}, {"setAttr-IssCap-T2","setAttr-IssCap-T2",NID_setAttr_IssCap_T2,5, - &(lvalues[4353]),0}, + &(lvalues[4349]),0}, {"setAttr-IssCap-Sig","setAttr-IssCap-Sig",NID_setAttr_IssCap_Sig,5, - &(lvalues[4358]),0}, + &(lvalues[4354]),0}, {"setAttr-GenCryptgrm","generate cryptogram",NID_setAttr_GenCryptgrm, - 6,&(lvalues[4363]),0}, + 6,&(lvalues[4359]),0}, {"setAttr-T2Enc","encrypted track 2",NID_setAttr_T2Enc,6, - &(lvalues[4369]),0}, + &(lvalues[4365]),0}, {"setAttr-T2cleartxt","cleartext track 2",NID_setAttr_T2cleartxt,6, - &(lvalues[4375]),0}, + &(lvalues[4371]),0}, {"setAttr-TokICCsig","ICC or token signature",NID_setAttr_TokICCsig,6, - &(lvalues[4381]),0}, + &(lvalues[4377]),0}, {"setAttr-SecDevSig","secure device signature",NID_setAttr_SecDevSig, - 6,&(lvalues[4387]),0}, + 6,&(lvalues[4383]),0}, {"set-brand-IATA-ATA","set-brand-IATA-ATA",NID_set_brand_IATA_ATA,4, - &(lvalues[4393]),0}, + &(lvalues[4389]),0}, {"set-brand-Diners","set-brand-Diners",NID_set_brand_Diners,4, - &(lvalues[4397]),0}, + &(lvalues[4393]),0}, {"set-brand-AmericanExpress","set-brand-AmericanExpress", - NID_set_brand_AmericanExpress,4,&(lvalues[4401]),0}, -{"set-brand-JCB","set-brand-JCB",NID_set_brand_JCB,4,&(lvalues[4405]),0}, + NID_set_brand_AmericanExpress,4,&(lvalues[4397]),0}, +{"set-brand-JCB","set-brand-JCB",NID_set_brand_JCB,4,&(lvalues[4401]),0}, {"set-brand-Visa","set-brand-Visa",NID_set_brand_Visa,4, - &(lvalues[4409]),0}, + &(lvalues[4405]),0}, {"set-brand-MasterCard","set-brand-MasterCard", - NID_set_brand_MasterCard,4,&(lvalues[4413]),0}, + NID_set_brand_MasterCard,4,&(lvalues[4409]),0}, {"set-brand-Novus","set-brand-Novus",NID_set_brand_Novus,5, - &(lvalues[4417]),0}, -{"DES-CDMF","des-cdmf",NID_des_cdmf,8,&(lvalues[4422]),0}, + &(lvalues[4413]),0}, +{"DES-CDMF","des-cdmf",NID_des_cdmf,8,&(lvalues[4418]),0}, {"rsaOAEPEncryptionSET","rsaOAEPEncryptionSET", - NID_rsaOAEPEncryptionSET,9,&(lvalues[4430]),0}, -{"ITU-T","itu-t",NID_itu_t,1,&(lvalues[4439]),0}, -{"JOINT-ISO-ITU-T","joint-iso-itu-t",NID_joint_iso_itu_t,1, - &(lvalues[4440]),0}, + NID_rsaOAEPEncryptionSET,9,&(lvalues[4426]),0}, +{"ITU-T","itu-t",NID_itu_t,0,NULL,0}, +{"JOINT-ISO-ITU-T","joint-iso-itu-t",NID_joint_iso_itu_t,0,NULL,0}, {"international-organizations","International Organizations", - NID_international_organizations,1,&(lvalues[4441]),0}, + NID_international_organizations,1,&(lvalues[4435]),0}, {"msSmartcardLogin","Microsoft Smartcardlogin",NID_ms_smartcard_login, - 10,&(lvalues[4442]),0}, + 10,&(lvalues[4436]),0}, {"msUPN","Microsoft Universal Principal Name",NID_ms_upn,10, - &(lvalues[4452]),0}, + &(lvalues[4446]),0}, {"AES-128-CFB1","aes-128-cfb1",NID_aes_128_cfb1,0,NULL,0}, {"AES-192-CFB1","aes-192-cfb1",NID_aes_192_cfb1,0,NULL,0}, {"AES-256-CFB1","aes-256-cfb1",NID_aes_256_cfb1,0,NULL,0}, @@ -1979,138 +1972,138 @@ {"DES-CFB8","des-cfb8",NID_des_cfb8,0,NULL,0}, {"DES-EDE3-CFB1","des-ede3-cfb1",NID_des_ede3_cfb1,0,NULL,0}, {"DES-EDE3-CFB8","des-ede3-cfb8",NID_des_ede3_cfb8,0,NULL,0}, -{"street","streetAddress",NID_streetAddress,3,&(lvalues[4462]),0}, -{"postalCode","postalCode",NID_postalCode,3,&(lvalues[4465]),0}, -{"id-ppl","id-ppl",NID_id_ppl,7,&(lvalues[4468]),0}, +{"street","streetAddress",NID_streetAddress,3,&(lvalues[4456]),0}, +{"postalCode","postalCode",NID_postalCode,3,&(lvalues[4459]),0}, +{"id-ppl","id-ppl",NID_id_ppl,7,&(lvalues[4462]),0}, {"proxyCertInfo","Proxy Certificate Information",NID_proxyCertInfo,8, - &(lvalues[4475]),0}, + &(lvalues[4469]),0}, {"id-ppl-anyLanguage","Any language",NID_id_ppl_anyLanguage,8, - &(lvalues[4483]),0}, + &(lvalues[4477]),0}, {"id-ppl-inheritAll","Inherit all",NID_id_ppl_inheritAll,8, - &(lvalues[4491]),0}, + &(lvalues[4485]),0}, {"nameConstraints","X509v3 Name Constraints",NID_name_constraints,3, - &(lvalues[4499]),0}, -{"id-ppl-independent","Independent",NID_Independent,8,&(lvalues[4502]),0}, + &(lvalues[4493]),0}, +{"id-ppl-independent","Independent",NID_Independent,8,&(lvalues[4496]),0}, {"RSA-SHA256","sha256WithRSAEncryption",NID_sha256WithRSAEncryption,9, - &(lvalues[4510]),0}, + &(lvalues[4504]),0}, {"RSA-SHA384","sha384WithRSAEncryption",NID_sha384WithRSAEncryption,9, - &(lvalues[4519]),0}, + &(lvalues[4513]),0}, {"RSA-SHA512","sha512WithRSAEncryption",NID_sha512WithRSAEncryption,9, - &(lvalues[4528]),0}, + &(lvalues[4522]),0}, {"RSA-SHA224","sha224WithRSAEncryption",NID_sha224WithRSAEncryption,9, - &(lvalues[4537]),0}, -{"SHA256","sha256",NID_sha256,9,&(lvalues[4546]),0}, -{"SHA384","sha384",NID_sha384,9,&(lvalues[4555]),0}, -{"SHA512","sha512",NID_sha512,9,&(lvalues[4564]),0}, -{"SHA224","sha224",NID_sha224,9,&(lvalues[4573]),0}, + &(lvalues[4531]),0}, +{"SHA256","sha256",NID_sha256,9,&(lvalues[4540]),0}, +{"SHA384","sha384",NID_sha384,9,&(lvalues[4549]),0}, +{"SHA512","sha512",NID_sha512,9,&(lvalues[4558]),0}, +{"SHA224","sha224",NID_sha224,9,&(lvalues[4567]),0}, {"identified-organization","identified-organization", - NID_identified_organization,1,&(lvalues[4582]),0}, -{"certicom-arc","certicom-arc",NID_certicom_arc,3,&(lvalues[4583]),0}, -{"wap","wap",NID_wap,2,&(lvalues[4586]),0}, -{"wap-wsg","wap-wsg",NID_wap_wsg,3,&(lvalues[4588]),0}, + NID_identified_organization,1,&(lvalues[4576]),0}, +{"certicom-arc","certicom-arc",NID_certicom_arc,3,&(lvalues[4577]),0}, +{"wap","wap",NID_wap,2,&(lvalues[4580]),0}, +{"wap-wsg","wap-wsg",NID_wap_wsg,3,&(lvalues[4582]),0}, {"id-characteristic-two-basis","id-characteristic-two-basis", - NID_X9_62_id_characteristic_two_basis,8,&(lvalues[4591]),0}, -{"onBasis","onBasis",NID_X9_62_onBasis,9,&(lvalues[4599]),0}, -{"tpBasis","tpBasis",NID_X9_62_tpBasis,9,&(lvalues[4608]),0}, -{"ppBasis","ppBasis",NID_X9_62_ppBasis,9,&(lvalues[4617]),0}, -{"c2pnb163v1","c2pnb163v1",NID_X9_62_c2pnb163v1,8,&(lvalues[4626]),0}, -{"c2pnb163v2","c2pnb163v2",NID_X9_62_c2pnb163v2,8,&(lvalues[4634]),0}, -{"c2pnb163v3","c2pnb163v3",NID_X9_62_c2pnb163v3,8,&(lvalues[4642]),0}, -{"c2pnb176v1","c2pnb176v1",NID_X9_62_c2pnb176v1,8,&(lvalues[4650]),0}, -{"c2tnb191v1","c2tnb191v1",NID_X9_62_c2tnb191v1,8,&(lvalues[4658]),0}, -{"c2tnb191v2","c2tnb191v2",NID_X9_62_c2tnb191v2,8,&(lvalues[4666]),0}, -{"c2tnb191v3","c2tnb191v3",NID_X9_62_c2tnb191v3,8,&(lvalues[4674]),0}, -{"c2onb191v4","c2onb191v4",NID_X9_62_c2onb191v4,8,&(lvalues[4682]),0}, -{"c2onb191v5","c2onb191v5",NID_X9_62_c2onb191v5,8,&(lvalues[4690]),0}, -{"c2pnb208w1","c2pnb208w1",NID_X9_62_c2pnb208w1,8,&(lvalues[4698]),0}, -{"c2tnb239v1","c2tnb239v1",NID_X9_62_c2tnb239v1,8,&(lvalues[4706]),0}, -{"c2tnb239v2","c2tnb239v2",NID_X9_62_c2tnb239v2,8,&(lvalues[4714]),0}, -{"c2tnb239v3","c2tnb239v3",NID_X9_62_c2tnb239v3,8,&(lvalues[4722]),0}, -{"c2onb239v4","c2onb239v4",NID_X9_62_c2onb239v4,8,&(lvalues[4730]),0}, -{"c2onb239v5","c2onb239v5",NID_X9_62_c2onb239v5,8,&(lvalues[4738]),0}, -{"c2pnb272w1","c2pnb272w1",NID_X9_62_c2pnb272w1,8,&(lvalues[4746]),0}, -{"c2pnb304w1","c2pnb304w1",NID_X9_62_c2pnb304w1,8,&(lvalues[4754]),0}, -{"c2tnb359v1","c2tnb359v1",NID_X9_62_c2tnb359v1,8,&(lvalues[4762]),0}, -{"c2pnb368w1","c2pnb368w1",NID_X9_62_c2pnb368w1,8,&(lvalues[4770]),0}, -{"c2tnb431r1","c2tnb431r1",NID_X9_62_c2tnb431r1,8,&(lvalues[4778]),0}, -{"secp112r1","secp112r1",NID_secp112r1,5,&(lvalues[4786]),0}, -{"secp112r2","secp112r2",NID_secp112r2,5,&(lvalues[4791]),0}, -{"secp128r1","secp128r1",NID_secp128r1,5,&(lvalues[4796]),0}, -{"secp128r2","secp128r2",NID_secp128r2,5,&(lvalues[4801]),0}, -{"secp160k1","secp160k1",NID_secp160k1,5,&(lvalues[4806]),0}, -{"secp160r1","secp160r1",NID_secp160r1,5,&(lvalues[4811]),0}, -{"secp160r2","secp160r2",NID_secp160r2,5,&(lvalues[4816]),0}, -{"secp192k1","secp192k1",NID_secp192k1,5,&(lvalues[4821]),0}, -{"secp224k1","secp224k1",NID_secp224k1,5,&(lvalues[4826]),0}, -{"secp224r1","secp224r1",NID_secp224r1,5,&(lvalues[4831]),0}, -{"secp256k1","secp256k1",NID_secp256k1,5,&(lvalues[4836]),0}, -{"secp384r1","secp384r1",NID_secp384r1,5,&(lvalues[4841]),0}, -{"secp521r1","secp521r1",NID_secp521r1,5,&(lvalues[4846]),0}, -{"sect113r1","sect113r1",NID_sect113r1,5,&(lvalues[4851]),0}, -{"sect113r2","sect113r2",NID_sect113r2,5,&(lvalues[4856]),0}, -{"sect131r1","sect131r1",NID_sect131r1,5,&(lvalues[4861]),0}, -{"sect131r2","sect131r2",NID_sect131r2,5,&(lvalues[4866]),0}, -{"sect163k1","sect163k1",NID_sect163k1,5,&(lvalues[4871]),0}, -{"sect163r1","sect163r1",NID_sect163r1,5,&(lvalues[4876]),0}, -{"sect163r2","sect163r2",NID_sect163r2,5,&(lvalues[4881]),0}, -{"sect193r1","sect193r1",NID_sect193r1,5,&(lvalues[4886]),0}, -{"sect193r2","sect193r2",NID_sect193r2,5,&(lvalues[4891]),0}, -{"sect233k1","sect233k1",NID_sect233k1,5,&(lvalues[4896]),0}, -{"sect233r1","sect233r1",NID_sect233r1,5,&(lvalues[4901]),0}, -{"sect239k1","sect239k1",NID_sect239k1,5,&(lvalues[4906]),0}, -{"sect283k1","sect283k1",NID_sect283k1,5,&(lvalues[4911]),0}, -{"sect283r1","sect283r1",NID_sect283r1,5,&(lvalues[4916]),0}, -{"sect409k1","sect409k1",NID_sect409k1,5,&(lvalues[4921]),0}, -{"sect409r1","sect409r1",NID_sect409r1,5,&(lvalues[4926]),0}, -{"sect571k1","sect571k1",NID_sect571k1,5,&(lvalues[4931]),0}, -{"sect571r1","sect571r1",NID_sect571r1,5,&(lvalues[4936]),0}, + NID_X9_62_id_characteristic_two_basis,8,&(lvalues[4585]),0}, +{"onBasis","onBasis",NID_X9_62_onBasis,9,&(lvalues[4593]),0}, +{"tpBasis","tpBasis",NID_X9_62_tpBasis,9,&(lvalues[4602]),0}, +{"ppBasis","ppBasis",NID_X9_62_ppBasis,9,&(lvalues[4611]),0}, +{"c2pnb163v1","c2pnb163v1",NID_X9_62_c2pnb163v1,8,&(lvalues[4620]),0}, +{"c2pnb163v2","c2pnb163v2",NID_X9_62_c2pnb163v2,8,&(lvalues[4628]),0}, +{"c2pnb163v3","c2pnb163v3",NID_X9_62_c2pnb163v3,8,&(lvalues[4636]),0}, +{"c2pnb176v1","c2pnb176v1",NID_X9_62_c2pnb176v1,8,&(lvalues[4644]),0}, +{"c2tnb191v1","c2tnb191v1",NID_X9_62_c2tnb191v1,8,&(lvalues[4652]),0}, +{"c2tnb191v2","c2tnb191v2",NID_X9_62_c2tnb191v2,8,&(lvalues[4660]),0}, +{"c2tnb191v3","c2tnb191v3",NID_X9_62_c2tnb191v3,8,&(lvalues[4668]),0}, +{"c2onb191v4","c2onb191v4",NID_X9_62_c2onb191v4,8,&(lvalues[4676]),0}, +{"c2onb191v5","c2onb191v5",NID_X9_62_c2onb191v5,8,&(lvalues[4684]),0}, +{"c2pnb208w1","c2pnb208w1",NID_X9_62_c2pnb208w1,8,&(lvalues[4692]),0}, +{"c2tnb239v1","c2tnb239v1",NID_X9_62_c2tnb239v1,8,&(lvalues[4700]),0}, +{"c2tnb239v2","c2tnb239v2",NID_X9_62_c2tnb239v2,8,&(lvalues[4708]),0}, +{"c2tnb239v3","c2tnb239v3",NID_X9_62_c2tnb239v3,8,&(lvalues[4716]),0}, +{"c2onb239v4","c2onb239v4",NID_X9_62_c2onb239v4,8,&(lvalues[4724]),0}, +{"c2onb239v5","c2onb239v5",NID_X9_62_c2onb239v5,8,&(lvalues[4732]),0}, +{"c2pnb272w1","c2pnb272w1",NID_X9_62_c2pnb272w1,8,&(lvalues[4740]),0}, +{"c2pnb304w1","c2pnb304w1",NID_X9_62_c2pnb304w1,8,&(lvalues[4748]),0}, +{"c2tnb359v1","c2tnb359v1",NID_X9_62_c2tnb359v1,8,&(lvalues[4756]),0}, +{"c2pnb368w1","c2pnb368w1",NID_X9_62_c2pnb368w1,8,&(lvalues[4764]),0}, +{"c2tnb431r1","c2tnb431r1",NID_X9_62_c2tnb431r1,8,&(lvalues[4772]),0}, +{"secp112r1","secp112r1",NID_secp112r1,5,&(lvalues[4780]),0}, +{"secp112r2","secp112r2",NID_secp112r2,5,&(lvalues[4785]),0}, +{"secp128r1","secp128r1",NID_secp128r1,5,&(lvalues[4790]),0}, +{"secp128r2","secp128r2",NID_secp128r2,5,&(lvalues[4795]),0}, +{"secp160k1","secp160k1",NID_secp160k1,5,&(lvalues[4800]),0}, +{"secp160r1","secp160r1",NID_secp160r1,5,&(lvalues[4805]),0}, +{"secp160r2","secp160r2",NID_secp160r2,5,&(lvalues[4810]),0}, +{"secp192k1","secp192k1",NID_secp192k1,5,&(lvalues[4815]),0}, +{"secp224k1","secp224k1",NID_secp224k1,5,&(lvalues[4820]),0}, +{"secp224r1","secp224r1",NID_secp224r1,5,&(lvalues[4825]),0}, +{"secp256k1","secp256k1",NID_secp256k1,5,&(lvalues[4830]),0}, +{"secp384r1","secp384r1",NID_secp384r1,5,&(lvalues[4835]),0}, +{"secp521r1","secp521r1",NID_secp521r1,5,&(lvalues[4840]),0}, +{"sect113r1","sect113r1",NID_sect113r1,5,&(lvalues[4845]),0}, +{"sect113r2","sect113r2",NID_sect113r2,5,&(lvalues[4850]),0}, +{"sect131r1","sect131r1",NID_sect131r1,5,&(lvalues[4855]),0}, +{"sect131r2","sect131r2",NID_sect131r2,5,&(lvalues[4860]),0}, +{"sect163k1","sect163k1",NID_sect163k1,5,&(lvalues[4865]),0}, +{"sect163r1","sect163r1",NID_sect163r1,5,&(lvalues[4870]),0}, +{"sect163r2","sect163r2",NID_sect163r2,5,&(lvalues[4875]),0}, +{"sect193r1","sect193r1",NID_sect193r1,5,&(lvalues[4880]),0}, +{"sect193r2","sect193r2",NID_sect193r2,5,&(lvalues[4885]),0}, +{"sect233k1","sect233k1",NID_sect233k1,5,&(lvalues[4890]),0}, +{"sect233r1","sect233r1",NID_sect233r1,5,&(lvalues[4895]),0}, +{"sect239k1","sect239k1",NID_sect239k1,5,&(lvalues[4900]),0}, +{"sect283k1","sect283k1",NID_sect283k1,5,&(lvalues[4905]),0}, +{"sect283r1","sect283r1",NID_sect283r1,5,&(lvalues[4910]),0}, +{"sect409k1","sect409k1",NID_sect409k1,5,&(lvalues[4915]),0}, +{"sect409r1","sect409r1",NID_sect409r1,5,&(lvalues[4920]),0}, +{"sect571k1","sect571k1",NID_sect571k1,5,&(lvalues[4925]),0}, +{"sect571r1","sect571r1",NID_sect571r1,5,&(lvalues[4930]),0}, {"wap-wsg-idm-ecid-wtls1","wap-wsg-idm-ecid-wtls1", - NID_wap_wsg_idm_ecid_wtls1,5,&(lvalues[4941]),0}, + NID_wap_wsg_idm_ecid_wtls1,5,&(lvalues[4935]),0}, {"wap-wsg-idm-ecid-wtls3","wap-wsg-idm-ecid-wtls3", - NID_wap_wsg_idm_ecid_wtls3,5,&(lvalues[4946]),0}, + NID_wap_wsg_idm_ecid_wtls3,5,&(lvalues[4940]),0}, {"wap-wsg-idm-ecid-wtls4","wap-wsg-idm-ecid-wtls4", - NID_wap_wsg_idm_ecid_wtls4,5,&(lvalues[4951]),0}, + NID_wap_wsg_idm_ecid_wtls4,5,&(lvalues[4945]),0}, {"wap-wsg-idm-ecid-wtls5","wap-wsg-idm-ecid-wtls5", - NID_wap_wsg_idm_ecid_wtls5,5,&(lvalues[4956]),0}, + NID_wap_wsg_idm_ecid_wtls5,5,&(lvalues[4950]),0}, {"wap-wsg-idm-ecid-wtls6","wap-wsg-idm-ecid-wtls6", - NID_wap_wsg_idm_ecid_wtls6,5,&(lvalues[4961]),0}, + NID_wap_wsg_idm_ecid_wtls6,5,&(lvalues[4955]),0}, {"wap-wsg-idm-ecid-wtls7","wap-wsg-idm-ecid-wtls7", - NID_wap_wsg_idm_ecid_wtls7,5,&(lvalues[4966]),0}, + NID_wap_wsg_idm_ecid_wtls7,5,&(lvalues[4960]),0}, {"wap-wsg-idm-ecid-wtls8","wap-wsg-idm-ecid-wtls8", - NID_wap_wsg_idm_ecid_wtls8,5,&(lvalues[4971]),0}, + NID_wap_wsg_idm_ecid_wtls8,5,&(lvalues[4965]),0}, {"wap-wsg-idm-ecid-wtls9","wap-wsg-idm-ecid-wtls9", - NID_wap_wsg_idm_ecid_wtls9,5,&(lvalues[4976]),0}, + NID_wap_wsg_idm_ecid_wtls9,5,&(lvalues[4970]),0}, {"wap-wsg-idm-ecid-wtls10","wap-wsg-idm-ecid-wtls10", - NID_wap_wsg_idm_ecid_wtls10,5,&(lvalues[4981]),0}, + NID_wap_wsg_idm_ecid_wtls10,5,&(lvalues[4975]),0}, {"wap-wsg-idm-ecid-wtls11","wap-wsg-idm-ecid-wtls11", - NID_wap_wsg_idm_ecid_wtls11,5,&(lvalues[4986]),0}, + NID_wap_wsg_idm_ecid_wtls11,5,&(lvalues[4980]),0}, {"wap-wsg-idm-ecid-wtls12","wap-wsg-idm-ecid-wtls12", - NID_wap_wsg_idm_ecid_wtls12,5,&(lvalues[4991]),0}, -{"anyPolicy","X509v3 Any Policy",NID_any_policy,4,&(lvalues[4996]),0}, + NID_wap_wsg_idm_ecid_wtls12,5,&(lvalues[4985]),0}, +{"anyPolicy","X509v3 Any Policy",NID_any_policy,4,&(lvalues[4990]),0}, {"policyMappings","X509v3 Policy Mappings",NID_policy_mappings,3, - &(lvalues[5000]),0}, + &(lvalues[4994]),0}, {"inhibitAnyPolicy","X509v3 Inhibit Any Policy", - NID_inhibit_any_policy,3,&(lvalues[5003]),0}, + NID_inhibit_any_policy,3,&(lvalues[4997]),0}, {"Oakley-EC2N-3","ipsec3",NID_ipsec3,0,NULL,0}, {"Oakley-EC2N-4","ipsec4",NID_ipsec4,0,NULL,0}, {"CAMELLIA-128-CBC","camellia-128-cbc",NID_camellia_128_cbc,11, - &(lvalues[5006]),0}, + &(lvalues[5000]),0}, {"CAMELLIA-192-CBC","camellia-192-cbc",NID_camellia_192_cbc,11, - &(lvalues[5017]),0}, + &(lvalues[5011]),0}, {"CAMELLIA-256-CBC","camellia-256-cbc",NID_camellia_256_cbc,11, - &(lvalues[5028]),0}, + &(lvalues[5022]),0}, {"CAMELLIA-128-ECB","camellia-128-ecb",NID_camellia_128_ecb,8, - &(lvalues[5039]),0}, + &(lvalues[5033]),0}, {"CAMELLIA-192-ECB","camellia-192-ecb",NID_camellia_192_ecb,8, - &(lvalues[5047]),0}, + &(lvalues[5041]),0}, {"CAMELLIA-256-ECB","camellia-256-ecb",NID_camellia_256_ecb,8, - &(lvalues[5055]),0}, + &(lvalues[5049]),0}, {"CAMELLIA-128-CFB","camellia-128-cfb",NID_camellia_128_cfb128,8, - &(lvalues[5063]),0}, + &(lvalues[5057]),0}, {"CAMELLIA-192-CFB","camellia-192-cfb",NID_camellia_192_cfb128,8, - &(lvalues[5071]),0}, + &(lvalues[5065]),0}, {"CAMELLIA-256-CFB","camellia-256-cfb",NID_camellia_256_cfb128,8, - &(lvalues[5079]),0}, + &(lvalues[5073]),0}, {"CAMELLIA-128-CFB1","camellia-128-cfb1",NID_camellia_128_cfb1,0,NULL,0}, {"CAMELLIA-192-CFB1","camellia-192-cfb1",NID_camellia_192_cfb1,0,NULL,0}, {"CAMELLIA-256-CFB1","camellia-256-cfb1",NID_camellia_256_cfb1,0,NULL,0}, @@ -2118,284 +2111,284 @@ {"CAMELLIA-192-CFB8","camellia-192-cfb8",NID_camellia_192_cfb8,0,NULL,0}, {"CAMELLIA-256-CFB8","camellia-256-cfb8",NID_camellia_256_cfb8,0,NULL,0}, {"CAMELLIA-128-OFB","camellia-128-ofb",NID_camellia_128_ofb128,8, - &(lvalues[5087]),0}, + &(lvalues[5081]),0}, {"CAMELLIA-192-OFB","camellia-192-ofb",NID_camellia_192_ofb128,8, - &(lvalues[5095]),0}, + &(lvalues[5089]),0}, {"CAMELLIA-256-OFB","camellia-256-ofb",NID_camellia_256_ofb128,8, - &(lvalues[5103]),0}, + &(lvalues[5097]),0}, {"subjectDirectoryAttributes","X509v3 Subject Directory Attributes", - NID_subject_directory_attributes,3,&(lvalues[5111]),0}, + NID_subject_directory_attributes,3,&(lvalues[5105]),0}, {"issuingDistributionPoint","X509v3 Issuing Distrubution Point", - NID_issuing_distribution_point,3,&(lvalues[5114]),0}, + NID_issuing_distribution_point,3,&(lvalues[5108]),0}, {"certificateIssuer","X509v3 Certificate Issuer", - NID_certificate_issuer,3,&(lvalues[5117]),0}, + NID_certificate_issuer,3,&(lvalues[5111]),0}, {NULL,NULL,NID_undef,0,NULL,0}, -{"KISA","kisa",NID_kisa,6,&(lvalues[5120]),0}, +{"KISA","kisa",NID_kisa,6,&(lvalues[5114]),0}, {NULL,NULL,NID_undef,0,NULL,0}, {NULL,NULL,NID_undef,0,NULL,0}, -{"SEED-ECB","seed-ecb",NID_seed_ecb,8,&(lvalues[5126]),0}, -{"SEED-CBC","seed-cbc",NID_seed_cbc,8,&(lvalues[5134]),0}, -{"SEED-OFB","seed-ofb",NID_seed_ofb128,8,&(lvalues[5142]),0}, -{"SEED-CFB","seed-cfb",NID_seed_cfb128,8,&(lvalues[5150]),0}, -{"HMAC-MD5","hmac-md5",NID_hmac_md5,8,&(lvalues[5158]),0}, -{"HMAC-SHA1","hmac-sha1",NID_hmac_sha1,8,&(lvalues[5166]),0}, +{"SEED-ECB","seed-ecb",NID_seed_ecb,8,&(lvalues[5120]),0}, +{"SEED-CBC","seed-cbc",NID_seed_cbc,8,&(lvalues[5128]),0}, +{"SEED-OFB","seed-ofb",NID_seed_ofb128,8,&(lvalues[5136]),0}, +{"SEED-CFB","seed-cfb",NID_seed_cfb128,8,&(lvalues[5144]),0}, +{"HMAC-MD5","hmac-md5",NID_hmac_md5,8,&(lvalues[5152]),0}, +{"HMAC-SHA1","hmac-sha1",NID_hmac_sha1,8,&(lvalues[5160]),0}, {"id-PasswordBasedMAC","password based MAC",NID_id_PasswordBasedMAC,9, - &(lvalues[5174]),0}, + &(lvalues[5168]),0}, {"id-DHBasedMac","Diffie-Hellman based MAC",NID_id_DHBasedMac,9, - &(lvalues[5183]),0}, + &(lvalues[5177]),0}, {"id-it-suppLangTags","id-it-suppLangTags",NID_id_it_suppLangTags,8, - &(lvalues[5192]),0}, -{"caRepository","CA Repository",NID_caRepository,8,&(lvalues[5200]),0}, + &(lvalues[5186]),0}, +{"caRepository","CA Repository",NID_caRepository,8,&(lvalues[5194]),0}, {"id-smime-ct-compressedData","id-smime-ct-compressedData", - NID_id_smime_ct_compressedData,11,&(lvalues[5208]),0}, + NID_id_smime_ct_compressedData,11,&(lvalues[5202]),0}, {"id-ct-asciiTextWithCRLF","id-ct-asciiTextWithCRLF", - NID_id_ct_asciiTextWithCRLF,11,&(lvalues[5219]),0}, + NID_id_ct_asciiTextWithCRLF,11,&(lvalues[5213]),0}, {"id-aes128-wrap","id-aes128-wrap",NID_id_aes128_wrap,9, - &(lvalues[5230]),0}, + &(lvalues[5224]),0}, {"id-aes192-wrap","id-aes192-wrap",NID_id_aes192_wrap,9, - &(lvalues[5239]),0}, + &(lvalues[5233]),0}, {"id-aes256-wrap","id-aes256-wrap",NID_id_aes256_wrap,9, - &(lvalues[5248]),0}, + &(lvalues[5242]),0}, {"ecdsa-with-Recommended","ecdsa-with-Recommended", - NID_ecdsa_with_Recommended,7,&(lvalues[5257]),0}, + NID_ecdsa_with_Recommended,7,&(lvalues[5251]),0}, {"ecdsa-with-Specified","ecdsa-with-Specified", - NID_ecdsa_with_Specified,7,&(lvalues[5264]),0}, + NID_ecdsa_with_Specified,7,&(lvalues[5258]),0}, {"ecdsa-with-SHA224","ecdsa-with-SHA224",NID_ecdsa_with_SHA224,8, - &(lvalues[5271]),0}, + &(lvalues[5265]),0}, {"ecdsa-with-SHA256","ecdsa-with-SHA256",NID_ecdsa_with_SHA256,8, - &(lvalues[5279]),0}, + &(lvalues[5273]),0}, {"ecdsa-with-SHA384","ecdsa-with-SHA384",NID_ecdsa_with_SHA384,8, - &(lvalues[5287]),0}, + &(lvalues[5281]),0}, {"ecdsa-with-SHA512","ecdsa-with-SHA512",NID_ecdsa_with_SHA512,8, - &(lvalues[5295]),0}, -{"hmacWithMD5","hmacWithMD5",NID_hmacWithMD5,8,&(lvalues[5303]),0}, + &(lvalues[5289]),0}, +{"hmacWithMD5","hmacWithMD5",NID_hmacWithMD5,8,&(lvalues[5297]),0}, {"hmacWithSHA224","hmacWithSHA224",NID_hmacWithSHA224,8, - &(lvalues[5311]),0}, + &(lvalues[5305]),0}, {"hmacWithSHA256","hmacWithSHA256",NID_hmacWithSHA256,8, - &(lvalues[5319]),0}, + &(lvalues[5313]),0}, {"hmacWithSHA384","hmacWithSHA384",NID_hmacWithSHA384,8, - &(lvalues[5327]),0}, + &(lvalues[5321]),0}, {"hmacWithSHA512","hmacWithSHA512",NID_hmacWithSHA512,8, - &(lvalues[5335]),0}, + &(lvalues[5329]),0}, {"dsa_with_SHA224","dsa_with_SHA224",NID_dsa_with_SHA224,9, - &(lvalues[5343]),0}, + &(lvalues[5337]),0}, {"dsa_with_SHA256","dsa_with_SHA256",NID_dsa_with_SHA256,9, - &(lvalues[5352]),0}, -{"whirlpool","whirlpool",NID_whirlpool,6,&(lvalues[5361]),0}, -{"cryptopro","cryptopro",NID_cryptopro,5,&(lvalues[5367]),0}, -{"cryptocom","cryptocom",NID_cryptocom,5,&(lvalues[5372]),0}, + &(lvalues[5346]),0}, +{"whirlpool","whirlpool",NID_whirlpool,6,&(lvalues[5355]),0}, +{"cryptopro","cryptopro",NID_cryptopro,5,&(lvalues[5361]),0}, +{"cryptocom","cryptocom",NID_cryptocom,5,&(lvalues[5366]),0}, {"id-GostR3411-94-with-GostR3410-2001", "GOST R 34.11-94 with GOST R 34.10-2001", - NID_id_GostR3411_94_with_GostR3410_2001,6,&(lvalues[5377]),0}, + NID_id_GostR3411_94_with_GostR3410_2001,6,&(lvalues[5371]),0}, {"id-GostR3411-94-with-GostR3410-94", "GOST R 34.11-94 with GOST R 34.10-94", - NID_id_GostR3411_94_with_GostR3410_94,6,&(lvalues[5383]),0}, -{"md_gost94","GOST R 34.11-94",NID_id_GostR3411_94,6,&(lvalues[5389]),0}, + NID_id_GostR3411_94_with_GostR3410_94,6,&(lvalues[5377]),0}, +{"md_gost94","GOST R 34.11-94",NID_id_GostR3411_94,6,&(lvalues[5383]),0}, {"id-HMACGostR3411-94","HMAC GOST 34.11-94",NID_id_HMACGostR3411_94,6, - &(lvalues[5395]),0}, + &(lvalues[5389]),0}, {"gost2001","GOST R 34.10-2001",NID_id_GostR3410_2001,6, - &(lvalues[5401]),0}, -{"gost94","GOST R 34.10-94",NID_id_GostR3410_94,6,&(lvalues[5407]),0}, -{"gost89","GOST 28147-89",NID_id_Gost28147_89,6,&(lvalues[5413]),0}, + &(lvalues[5395]),0}, +{"gost94","GOST R 34.10-94",NID_id_GostR3410_94,6,&(lvalues[5401]),0}, +{"gost89","GOST 28147-89",NID_id_Gost28147_89,6,&(lvalues[5407]),0}, {"gost89-cnt","gost89-cnt",NID_gost89_cnt,0,NULL,0}, {"gost-mac","GOST 28147-89 MAC",NID_id_Gost28147_89_MAC,6, - &(lvalues[5419]),0}, + &(lvalues[5413]),0}, {"prf-gostr3411-94","GOST R 34.11-94 PRF",NID_id_GostR3411_94_prf,6, - &(lvalues[5425]),0}, + &(lvalues[5419]),0}, {"id-GostR3410-2001DH","GOST R 34.10-2001 DH",NID_id_GostR3410_2001DH, - 6,&(lvalues[5431]),0}, + 6,&(lvalues[5425]),0}, {"id-GostR3410-94DH","GOST R 34.10-94 DH",NID_id_GostR3410_94DH,6, - &(lvalues[5437]),0}, + &(lvalues[5431]),0}, {"id-Gost28147-89-CryptoPro-KeyMeshing", "id-Gost28147-89-CryptoPro-KeyMeshing", - NID_id_Gost28147_89_CryptoPro_KeyMeshing,7,&(lvalues[5443]),0}, + NID_id_Gost28147_89_CryptoPro_KeyMeshing,7,&(lvalues[5437]),0}, {"id-Gost28147-89-None-KeyMeshing","id-Gost28147-89-None-KeyMeshing", - NID_id_Gost28147_89_None_KeyMeshing,7,&(lvalues[5450]),0}, + NID_id_Gost28147_89_None_KeyMeshing,7,&(lvalues[5444]),0}, {"id-GostR3411-94-TestParamSet","id-GostR3411-94-TestParamSet", - NID_id_GostR3411_94_TestParamSet,7,&(lvalues[5457]),0}, + NID_id_GostR3411_94_TestParamSet,7,&(lvalues[5451]),0}, {"id-GostR3411-94-CryptoProParamSet", "id-GostR3411-94-CryptoProParamSet", - NID_id_GostR3411_94_CryptoProParamSet,7,&(lvalues[5464]),0}, + NID_id_GostR3411_94_CryptoProParamSet,7,&(lvalues[5458]),0}, {"id-Gost28147-89-TestParamSet","id-Gost28147-89-TestParamSet", - NID_id_Gost28147_89_TestParamSet,7,&(lvalues[5471]),0}, + NID_id_Gost28147_89_TestParamSet,7,&(lvalues[5465]),0}, {"id-Gost28147-89-CryptoPro-A-ParamSet", "id-Gost28147-89-CryptoPro-A-ParamSet", - NID_id_Gost28147_89_CryptoPro_A_ParamSet,7,&(lvalues[5478]),0}, + NID_id_Gost28147_89_CryptoPro_A_ParamSet,7,&(lvalues[5472]),0}, {"id-Gost28147-89-CryptoPro-B-ParamSet", "id-Gost28147-89-CryptoPro-B-ParamSet", - NID_id_Gost28147_89_CryptoPro_B_ParamSet,7,&(lvalues[5485]),0}, + NID_id_Gost28147_89_CryptoPro_B_ParamSet,7,&(lvalues[5479]),0}, {"id-Gost28147-89-CryptoPro-C-ParamSet", "id-Gost28147-89-CryptoPro-C-ParamSet", - NID_id_Gost28147_89_CryptoPro_C_ParamSet,7,&(lvalues[5492]),0}, + NID_id_Gost28147_89_CryptoPro_C_ParamSet,7,&(lvalues[5486]),0}, {"id-Gost28147-89-CryptoPro-D-ParamSet", "id-Gost28147-89-CryptoPro-D-ParamSet", - NID_id_Gost28147_89_CryptoPro_D_ParamSet,7,&(lvalues[5499]),0}, + NID_id_Gost28147_89_CryptoPro_D_ParamSet,7,&(lvalues[5493]),0}, {"id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet", "id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet", - NID_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet,7,&(lvalues[5506]), + NID_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet,7,&(lvalues[5500]), 0}, {"id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet", "id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet", - NID_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet,7,&(lvalues[5513]), + NID_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet,7,&(lvalues[5507]), 0}, {"id-Gost28147-89-CryptoPro-RIC-1-ParamSet", "id-Gost28147-89-CryptoPro-RIC-1-ParamSet", - NID_id_Gost28147_89_CryptoPro_RIC_1_ParamSet,7,&(lvalues[5520]),0}, + NID_id_Gost28147_89_CryptoPro_RIC_1_ParamSet,7,&(lvalues[5514]),0}, {"id-GostR3410-94-TestParamSet","id-GostR3410-94-TestParamSet", - NID_id_GostR3410_94_TestParamSet,7,&(lvalues[5527]),0}, + NID_id_GostR3410_94_TestParamSet,7,&(lvalues[5521]),0}, {"id-GostR3410-94-CryptoPro-A-ParamSet", "id-GostR3410-94-CryptoPro-A-ParamSet", - NID_id_GostR3410_94_CryptoPro_A_ParamSet,7,&(lvalues[5534]),0}, + NID_id_GostR3410_94_CryptoPro_A_ParamSet,7,&(lvalues[5528]),0}, {"id-GostR3410-94-CryptoPro-B-ParamSet", "id-GostR3410-94-CryptoPro-B-ParamSet", - NID_id_GostR3410_94_CryptoPro_B_ParamSet,7,&(lvalues[5541]),0}, + NID_id_GostR3410_94_CryptoPro_B_ParamSet,7,&(lvalues[5535]),0}, {"id-GostR3410-94-CryptoPro-C-ParamSet", "id-GostR3410-94-CryptoPro-C-ParamSet", - NID_id_GostR3410_94_CryptoPro_C_ParamSet,7,&(lvalues[5548]),0}, + NID_id_GostR3410_94_CryptoPro_C_ParamSet,7,&(lvalues[5542]),0}, {"id-GostR3410-94-CryptoPro-D-ParamSet", "id-GostR3410-94-CryptoPro-D-ParamSet", - NID_id_GostR3410_94_CryptoPro_D_ParamSet,7,&(lvalues[5555]),0}, + NID_id_GostR3410_94_CryptoPro_D_ParamSet,7,&(lvalues[5549]),0}, {"id-GostR3410-94-CryptoPro-XchA-ParamSet", "id-GostR3410-94-CryptoPro-XchA-ParamSet", - NID_id_GostR3410_94_CryptoPro_XchA_ParamSet,7,&(lvalues[5562]),0}, + NID_id_GostR3410_94_CryptoPro_XchA_ParamSet,7,&(lvalues[5556]),0}, {"id-GostR3410-94-CryptoPro-XchB-ParamSet", "id-GostR3410-94-CryptoPro-XchB-ParamSet", - NID_id_GostR3410_94_CryptoPro_XchB_ParamSet,7,&(lvalues[5569]),0}, + NID_id_GostR3410_94_CryptoPro_XchB_ParamSet,7,&(lvalues[5563]),0}, {"id-GostR3410-94-CryptoPro-XchC-ParamSet", "id-GostR3410-94-CryptoPro-XchC-ParamSet", - NID_id_GostR3410_94_CryptoPro_XchC_ParamSet,7,&(lvalues[5576]),0}, + NID_id_GostR3410_94_CryptoPro_XchC_ParamSet,7,&(lvalues[5570]),0}, {"id-GostR3410-2001-TestParamSet","id-GostR3410-2001-TestParamSet", - NID_id_GostR3410_2001_TestParamSet,7,&(lvalues[5583]),0}, + NID_id_GostR3410_2001_TestParamSet,7,&(lvalues[5577]),0}, {"id-GostR3410-2001-CryptoPro-A-ParamSet", "id-GostR3410-2001-CryptoPro-A-ParamSet", - NID_id_GostR3410_2001_CryptoPro_A_ParamSet,7,&(lvalues[5590]),0}, + NID_id_GostR3410_2001_CryptoPro_A_ParamSet,7,&(lvalues[5584]),0}, {"id-GostR3410-2001-CryptoPro-B-ParamSet", "id-GostR3410-2001-CryptoPro-B-ParamSet", - NID_id_GostR3410_2001_CryptoPro_B_ParamSet,7,&(lvalues[5597]),0}, + NID_id_GostR3410_2001_CryptoPro_B_ParamSet,7,&(lvalues[5591]),0}, {"id-GostR3410-2001-CryptoPro-C-ParamSet", "id-GostR3410-2001-CryptoPro-C-ParamSet", - NID_id_GostR3410_2001_CryptoPro_C_ParamSet,7,&(lvalues[5604]),0}, + NID_id_GostR3410_2001_CryptoPro_C_ParamSet,7,&(lvalues[5598]),0}, {"id-GostR3410-2001-CryptoPro-XchA-ParamSet", "id-GostR3410-2001-CryptoPro-XchA-ParamSet", - NID_id_GostR3410_2001_CryptoPro_XchA_ParamSet,7,&(lvalues[5611]),0}, + NID_id_GostR3410_2001_CryptoPro_XchA_ParamSet,7,&(lvalues[5605]),0}, {"id-GostR3410-2001-CryptoPro-XchB-ParamSet", "id-GostR3410-2001-CryptoPro-XchB-ParamSet", - NID_id_GostR3410_2001_CryptoPro_XchB_ParamSet,7,&(lvalues[5618]),0}, + NID_id_GostR3410_2001_CryptoPro_XchB_ParamSet,7,&(lvalues[5612]),0}, {"id-GostR3410-94-a","id-GostR3410-94-a",NID_id_GostR3410_94_a,7, - &(lvalues[5625]),0}, + &(lvalues[5619]),0}, {"id-GostR3410-94-aBis","id-GostR3410-94-aBis", - NID_id_GostR3410_94_aBis,7,&(lvalues[5632]),0}, + NID_id_GostR3410_94_aBis,7,&(lvalues[5626]),0}, {"id-GostR3410-94-b","id-GostR3410-94-b",NID_id_GostR3410_94_b,7, - &(lvalues[5639]),0}, + &(lvalues[5633]),0}, {"id-GostR3410-94-bBis","id-GostR3410-94-bBis", - NID_id_GostR3410_94_bBis,7,&(lvalues[5646]),0}, + NID_id_GostR3410_94_bBis,7,&(lvalues[5640]),0}, {"id-Gost28147-89-cc","GOST 28147-89 Cryptocom ParamSet", - NID_id_Gost28147_89_cc,8,&(lvalues[5653]),0}, + NID_id_Gost28147_89_cc,8,&(lvalues[5647]),0}, {"gost94cc","GOST 34.10-94 Cryptocom",NID_id_GostR3410_94_cc,8, - &(lvalues[5661]),0}, + &(lvalues[5655]),0}, {"gost2001cc","GOST 34.10-2001 Cryptocom",NID_id_GostR3410_2001_cc,8, - &(lvalues[5669]),0}, + &(lvalues[5663]),0}, {"id-GostR3411-94-with-GostR3410-94-cc", "GOST R 34.11-94 with GOST R 34.10-94 Cryptocom", - NID_id_GostR3411_94_with_GostR3410_94_cc,8,&(lvalues[5677]),0}, + NID_id_GostR3411_94_with_GostR3410_94_cc,8,&(lvalues[5671]),0}, {"id-GostR3411-94-with-GostR3410-2001-cc", "GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom", - NID_id_GostR3411_94_with_GostR3410_2001_cc,8,&(lvalues[5685]),0}, + NID_id_GostR3411_94_with_GostR3410_2001_cc,8,&(lvalues[5679]),0}, {"id-GostR3410-2001-ParamSet-cc", "GOST R 3410-2001 Parameter Set Cryptocom", - NID_id_GostR3410_2001_ParamSet_cc,8,&(lvalues[5693]),0}, + NID_id_GostR3410_2001_ParamSet_cc,8,&(lvalues[5687]),0}, {"HMAC","hmac",NID_hmac,0,NULL,0}, {"LocalKeySet","Microsoft Local Key set",NID_LocalKeySet,9, - &(lvalues[5701]),0}, + &(lvalues[5695]),0}, {"freshestCRL","X509v3 Freshest CRL",NID_freshest_crl,3, - &(lvalues[5710]),0}, + &(lvalues[5704]),0}, {"id-on-permanentIdentifier","Permanent Identifier", - NID_id_on_permanentIdentifier,8,&(lvalues[5713]),0}, -{"searchGuide","searchGuide",NID_searchGuide,3,&(lvalues[5721]),0}, + NID_id_on_permanentIdentifier,8,&(lvalues[5707]),0}, +{"searchGuide","searchGuide",NID_searchGuide,3,&(lvalues[5715]),0}, {"businessCategory","businessCategory",NID_businessCategory,3, - &(lvalues[5724]),0}, -{"postalAddress","postalAddress",NID_postalAddress,3,&(lvalues[5727]),0}, -{"postOfficeBox","postOfficeBox",NID_postOfficeBox,3,&(lvalues[5730]),0}, + &(lvalues[5718]),0}, +{"postalAddress","postalAddress",NID_postalAddress,3,&(lvalues[5721]),0}, +{"postOfficeBox","postOfficeBox",NID_postOfficeBox,3,&(lvalues[5724]),0}, {"physicalDeliveryOfficeName","physicalDeliveryOfficeName", - NID_physicalDeliveryOfficeName,3,&(lvalues[5733]),0}, + NID_physicalDeliveryOfficeName,3,&(lvalues[5727]),0}, {"telephoneNumber","telephoneNumber",NID_telephoneNumber,3, - &(lvalues[5736]),0}, -{"telexNumber","telexNumber",NID_telexNumber,3,&(lvalues[5739]),0}, + &(lvalues[5730]),0}, +{"telexNumber","telexNumber",NID_telexNumber,3,&(lvalues[5733]),0}, {"teletexTerminalIdentifier","teletexTerminalIdentifier", - NID_teletexTerminalIdentifier,3,&(lvalues[5742]),0}, + NID_teletexTerminalIdentifier,3,&(lvalues[5736]),0}, {"facsimileTelephoneNumber","facsimileTelephoneNumber", - NID_facsimileTelephoneNumber,3,&(lvalues[5745]),0}, -{"x121Address","x121Address",NID_x121Address,3,&(lvalues[5748]),0}, + NID_facsimileTelephoneNumber,3,&(lvalues[5739]),0}, +{"x121Address","x121Address",NID_x121Address,3,&(lvalues[5742]),0}, {"internationaliSDNNumber","internationaliSDNNumber", - NID_internationaliSDNNumber,3,&(lvalues[5751]),0}, + NID_internationaliSDNNumber,3,&(lvalues[5745]),0}, {"registeredAddress","registeredAddress",NID_registeredAddress,3, - &(lvalues[5754]),0}, + &(lvalues[5748]),0}, {"destinationIndicator","destinationIndicator", - NID_destinationIndicator,3,&(lvalues[5757]),0}, + NID_destinationIndicator,3,&(lvalues[5751]),0}, {"preferredDeliveryMethod","preferredDeliveryMethod", - NID_preferredDeliveryMethod,3,&(lvalues[5760]),0}, + NID_preferredDeliveryMethod,3,&(lvalues[5754]),0}, {"presentationAddress","presentationAddress",NID_presentationAddress, - 3,&(lvalues[5763]),0}, + 3,&(lvalues[5757]),0}, {"supportedApplicationContext","supportedApplicationContext", - NID_supportedApplicationContext,3,&(lvalues[5766]),0}, -{"member","member",NID_member,3,&(lvalues[5769]),0}, -{"owner","owner",NID_owner,3,&(lvalues[5772]),0}, -{"roleOccupant","roleOccupant",NID_roleOccupant,3,&(lvalues[5775]),0}, -{"seeAlso","seeAlso",NID_seeAlso,3,&(lvalues[5778]),0}, -{"userPassword","userPassword",NID_userPassword,3,&(lvalues[5781]),0}, + NID_supportedApplicationContext,3,&(lvalues[5760]),0}, +{"member","member",NID_member,3,&(lvalues[5763]),0}, +{"owner","owner",NID_owner,3,&(lvalues[5766]),0}, +{"roleOccupant","roleOccupant",NID_roleOccupant,3,&(lvalues[5769]),0}, +{"seeAlso","seeAlso",NID_seeAlso,3,&(lvalues[5772]),0}, +{"userPassword","userPassword",NID_userPassword,3,&(lvalues[5775]),0}, {"userCertificate","userCertificate",NID_userCertificate,3, - &(lvalues[5784]),0}, -{"cACertificate","cACertificate",NID_cACertificate,3,&(lvalues[5787]),0}, + &(lvalues[5778]),0}, +{"cACertificate","cACertificate",NID_cACertificate,3,&(lvalues[5781]),0}, {"authorityRevocationList","authorityRevocationList", - NID_authorityRevocationList,3,&(lvalues[5790]),0}, + NID_authorityRevocationList,3,&(lvalues[5784]),0}, {"certificateRevocationList","certificateRevocationList", - NID_certificateRevocationList,3,&(lvalues[5793]),0}, + NID_certificateRevocationList,3,&(lvalues[5787]),0}, {"crossCertificatePair","crossCertificatePair", - NID_crossCertificatePair,3,&(lvalues[5796]),0}, + NID_crossCertificatePair,3,&(lvalues[5790]),0}, {"enhancedSearchGuide","enhancedSearchGuide",NID_enhancedSearchGuide, - 3,&(lvalues[5799]),0}, + 3,&(lvalues[5793]),0}, {"protocolInformation","protocolInformation",NID_protocolInformation, - 3,&(lvalues[5802]),0}, + 3,&(lvalues[5796]),0}, {"distinguishedName","distinguishedName",NID_distinguishedName,3, - &(lvalues[5805]),0}, -{"uniqueMember","uniqueMember",NID_uniqueMember,3,&(lvalues[5808]),0}, + &(lvalues[5799]),0}, +{"uniqueMember","uniqueMember",NID_uniqueMember,3,&(lvalues[5802]),0}, {"houseIdentifier","houseIdentifier",NID_houseIdentifier,3, - &(lvalues[5811]),0}, + &(lvalues[5805]),0}, {"supportedAlgorithms","supportedAlgorithms",NID_supportedAlgorithms, - 3,&(lvalues[5814]),0}, + 3,&(lvalues[5808]),0}, {"deltaRevocationList","deltaRevocationList",NID_deltaRevocationList, - 3,&(lvalues[5817]),0}, -{"dmdName","dmdName",NID_dmdName,3,&(lvalues[5820]),0}, + 3,&(lvalues[5811]),0}, +{"dmdName","dmdName",NID_dmdName,3,&(lvalues[5814]),0}, {"id-alg-PWRI-KEK","id-alg-PWRI-KEK",NID_id_alg_PWRI_KEK,11, - &(lvalues[5823]),0}, + &(lvalues[5817]),0}, {"CMAC","cmac",NID_cmac,0,NULL,0}, -{"id-aes128-GCM","aes-128-gcm",NID_aes_128_gcm,9,&(lvalues[5834]),0}, -{"id-aes128-CCM","aes-128-ccm",NID_aes_128_ccm,9,&(lvalues[5843]),0}, +{"id-aes128-GCM","aes-128-gcm",NID_aes_128_gcm,9,&(lvalues[5828]),0}, +{"id-aes128-CCM","aes-128-ccm",NID_aes_128_ccm,9,&(lvalues[5837]),0}, {"id-aes128-wrap-pad","id-aes128-wrap-pad",NID_id_aes128_wrap_pad,9, - &(lvalues[5852]),0}, -{"id-aes192-GCM","aes-192-gcm",NID_aes_192_gcm,9,&(lvalues[5861]),0}, -{"id-aes192-CCM","aes-192-ccm",NID_aes_192_ccm,9,&(lvalues[5870]),0}, + &(lvalues[5846]),0}, +{"id-aes192-GCM","aes-192-gcm",NID_aes_192_gcm,9,&(lvalues[5855]),0}, +{"id-aes192-CCM","aes-192-ccm",NID_aes_192_ccm,9,&(lvalues[5864]),0}, {"id-aes192-wrap-pad","id-aes192-wrap-pad",NID_id_aes192_wrap_pad,9, - &(lvalues[5879]),0}, -{"id-aes256-GCM","aes-256-gcm",NID_aes_256_gcm,9,&(lvalues[5888]),0}, -{"id-aes256-CCM","aes-256-ccm",NID_aes_256_ccm,9,&(lvalues[5897]),0}, + &(lvalues[5873]),0}, +{"id-aes256-GCM","aes-256-gcm",NID_aes_256_gcm,9,&(lvalues[5882]),0}, +{"id-aes256-CCM","aes-256-ccm",NID_aes_256_ccm,9,&(lvalues[5891]),0}, {"id-aes256-wrap-pad","id-aes256-wrap-pad",NID_id_aes256_wrap_pad,9, - &(lvalues[5906]),0}, + &(lvalues[5900]),0}, {"AES-128-CTR","aes-128-ctr",NID_aes_128_ctr,0,NULL,0}, {"AES-192-CTR","aes-192-ctr",NID_aes_192_ctr,0,NULL,0}, {"AES-256-CTR","aes-256-ctr",NID_aes_256_ctr,0,NULL,0}, {"id-camellia128-wrap","id-camellia128-wrap",NID_id_camellia128_wrap, - 11,&(lvalues[5915]),0}, + 11,&(lvalues[5909]),0}, {"id-camellia192-wrap","id-camellia192-wrap",NID_id_camellia192_wrap, - 11,&(lvalues[5926]),0}, + 11,&(lvalues[5920]),0}, {"id-camellia256-wrap","id-camellia256-wrap",NID_id_camellia256_wrap, - 11,&(lvalues[5937]),0}, + 11,&(lvalues[5931]),0}, {"anyExtendedKeyUsage","Any Extended Key Usage", - NID_anyExtendedKeyUsage,4,&(lvalues[5948]),0}, -{"MGF1","mgf1",NID_mgf1,9,&(lvalues[5952]),0}, -{"RSASSA-PSS","rsassaPss",NID_rsassaPss,9,&(lvalues[5961]),0}, + NID_anyExtendedKeyUsage,4,&(lvalues[5942]),0}, +{"MGF1","mgf1",NID_mgf1,9,&(lvalues[5946]),0}, +{"RSASSA-PSS","rsassaPss",NID_rsassaPss,9,&(lvalues[5955]),0}, {"AES-128-XTS","aes-128-xts",NID_aes_128_xts,0,NULL,0}, {"AES-256-XTS","aes-256-xts",NID_aes_256_xts,0,NULL,0}, {"RC4-HMAC-MD5","rc4-hmac-md5",NID_rc4_hmac_md5,0,NULL,0}, @@ -2405,7 +2398,7 @@ NID_aes_192_cbc_hmac_sha1,0,NULL,0}, {"AES-256-CBC-HMAC-SHA1","aes-256-cbc-hmac-sha1", NID_aes_256_cbc_hmac_sha1,0,NULL,0}, -{"RSAES-OAEP","rsaesOaep",NID_rsaesOaep,9,&(lvalues[5970]),0}, +{"RSAES-OAEP","rsaesOaep",NID_rsaesOaep,9,&(lvalues[5964]),0}, }; static const unsigned int sn_objs[NUM_SN]={ @@ -4242,15 +4235,15 @@ static const unsigned int obj_objs[NUM_OBJ]={ 0, /* OBJ_undef 0 */ +181, /* OBJ_iso 1 */ 393, /* OBJ_joint_iso_ccitt OBJ_joint_iso_itu_t */ 404, /* OBJ_ccitt OBJ_itu_t */ 645, /* OBJ_itu_t 0 */ +646, /* OBJ_joint_iso_itu_t 2 */ 434, /* OBJ_data 0 9 */ -181, /* OBJ_iso 1 */ 182, /* OBJ_member_body 1 2 */ 379, /* OBJ_org 1 3 */ 676, /* OBJ_identified_organization 1 3 */ -646, /* OBJ_joint_iso_itu_t 2 */ 11, /* OBJ_X500 2 5 */ 647, /* OBJ_international_organizations 2 23 */ 380, /* OBJ_dod 1 3 6 */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/objects/obj_dat.pl nodejs-0.11.15/deps/openssl/openssl/crypto/objects/obj_dat.pl --- nodejs-0.11.13/deps/openssl/openssl/crypto/objects/obj_dat.pl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/objects/obj_dat.pl 2015-01-20 21:22:17.000000000 +0000 @@ -115,7 +115,7 @@ $out.="\"$sn\""; $out.=","."\"$ln\""; $out.=",NID_$nid{$i},"; - if (defined($obj{$nid{$i}})) + if (defined($obj{$nid{$i}}) && $objd{$obj{$nid{$i}}} =~ /,/) { $v=$objd{$obj{$nid{$i}}}; $v =~ s/L//g; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ocsp/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/ocsp/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/ocsp/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ocsp/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,213 +0,0 @@ -# -# OpenSSL/ocsp/Makefile -# - -DIR= ocsp -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile README -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= ocsp_asn.c ocsp_ext.c ocsp_ht.c ocsp_lib.c ocsp_cl.c \ - ocsp_srv.c ocsp_prn.c ocsp_vfy.c ocsp_err.c - -LIBOBJ= ocsp_asn.o ocsp_ext.o ocsp_ht.o ocsp_lib.o ocsp_cl.o \ - ocsp_srv.o ocsp_prn.o ocsp_vfy.o ocsp_err.o - -SRC= $(LIBSRC) - -EXHEADER= ocsp.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -ocsp_asn.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -ocsp_asn.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ocsp_asn.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ocsp_asn.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ocsp_asn.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ocsp_asn.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ocsp_asn.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ocsp_asn.o: ../../include/openssl/ocsp.h ../../include/openssl/opensslconf.h -ocsp_asn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ocsp_asn.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -ocsp_asn.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ocsp_asn.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ocsp_asn.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -ocsp_asn.o: ocsp_asn.c -ocsp_cl.o: ../../e_os.h ../../include/openssl/asn1.h -ocsp_cl.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ocsp_cl.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ocsp_cl.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ocsp_cl.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ocsp_cl.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ocsp_cl.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ocsp_cl.o: ../../include/openssl/objects.h ../../include/openssl/ocsp.h -ocsp_cl.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -ocsp_cl.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -ocsp_cl.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs7.h -ocsp_cl.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -ocsp_cl.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ocsp_cl.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ocsp_cl.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -ocsp_cl.o: ../cryptlib.h ocsp_cl.c -ocsp_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ocsp_err.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -ocsp_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ocsp_err.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ocsp_err.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -ocsp_err.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ocsp_err.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ocsp_err.o: ../../include/openssl/ocsp.h ../../include/openssl/opensslconf.h -ocsp_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ocsp_err.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -ocsp_err.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ocsp_err.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ocsp_err.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -ocsp_err.o: ocsp_err.c -ocsp_ext.o: ../../e_os.h ../../include/openssl/asn1.h -ocsp_ext.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ocsp_ext.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ocsp_ext.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ocsp_ext.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ocsp_ext.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ocsp_ext.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ocsp_ext.o: ../../include/openssl/objects.h ../../include/openssl/ocsp.h -ocsp_ext.o: ../../include/openssl/opensslconf.h -ocsp_ext.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ocsp_ext.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -ocsp_ext.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -ocsp_ext.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ocsp_ext.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ocsp_ext.o: ../../include/openssl/x509v3.h ../cryptlib.h ocsp_ext.c -ocsp_ht.o: ../../e_os.h ../../include/openssl/asn1.h -ocsp_ht.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ocsp_ht.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ocsp_ht.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ocsp_ht.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ocsp_ht.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ocsp_ht.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ocsp_ht.o: ../../include/openssl/objects.h ../../include/openssl/ocsp.h -ocsp_ht.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -ocsp_ht.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -ocsp_ht.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -ocsp_ht.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ocsp_ht.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ocsp_ht.o: ../../include/openssl/x509v3.h ocsp_ht.c -ocsp_lib.o: ../../e_os.h ../../include/openssl/asn1.h -ocsp_lib.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -ocsp_lib.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -ocsp_lib.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ocsp_lib.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ocsp_lib.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -ocsp_lib.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ocsp_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ocsp_lib.o: ../../include/openssl/ocsp.h ../../include/openssl/opensslconf.h -ocsp_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ocsp_lib.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -ocsp_lib.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -ocsp_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -ocsp_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ocsp_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ocsp_lib.o: ../../include/openssl/x509v3.h ../cryptlib.h ocsp_lib.c -ocsp_prn.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ocsp_prn.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -ocsp_prn.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ocsp_prn.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ocsp_prn.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -ocsp_prn.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ocsp_prn.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ocsp_prn.o: ../../include/openssl/ocsp.h ../../include/openssl/opensslconf.h -ocsp_prn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ocsp_prn.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -ocsp_prn.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -ocsp_prn.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ocsp_prn.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ocsp_prn.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -ocsp_prn.o: ocsp_prn.c -ocsp_srv.o: ../../e_os.h ../../include/openssl/asn1.h -ocsp_srv.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ocsp_srv.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ocsp_srv.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ocsp_srv.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ocsp_srv.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ocsp_srv.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ocsp_srv.o: ../../include/openssl/objects.h ../../include/openssl/ocsp.h -ocsp_srv.o: ../../include/openssl/opensslconf.h -ocsp_srv.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ocsp_srv.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -ocsp_srv.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -ocsp_srv.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -ocsp_srv.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ocsp_srv.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ocsp_srv.o: ../../include/openssl/x509v3.h ../cryptlib.h ocsp_srv.c -ocsp_vfy.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ocsp_vfy.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -ocsp_vfy.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ocsp_vfy.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ocsp_vfy.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -ocsp_vfy.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ocsp_vfy.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ocsp_vfy.o: ../../include/openssl/ocsp.h ../../include/openssl/opensslconf.h -ocsp_vfy.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ocsp_vfy.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -ocsp_vfy.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ocsp_vfy.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ocsp_vfy.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -ocsp_vfy.o: ocsp_vfy.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ocsp/ocsp_ht.c nodejs-0.11.15/deps/openssl/openssl/crypto/ocsp/ocsp_ht.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ocsp/ocsp_ht.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ocsp/ocsp_ht.c 2015-01-20 21:22:17.000000000 +0000 @@ -158,6 +158,8 @@ OCSP_REQ_CTX *rctx; rctx = OPENSSL_malloc(sizeof(OCSP_REQ_CTX)); + if (!rctx) + return NULL; rctx->state = OHS_ERROR; rctx->mem = BIO_new(BIO_s_mem()); rctx->io = io; @@ -167,18 +169,21 @@ else rctx->iobuflen = OCSP_MAX_LINE_LEN; rctx->iobuf = OPENSSL_malloc(rctx->iobuflen); - if (!rctx->iobuf) - return 0; + if (!rctx->mem || !rctx->iobuf) + goto err; if (!path) path = "/"; if (BIO_printf(rctx->mem, post_hdr, path) <= 0) - return 0; + goto err; if (req && !OCSP_REQ_CTX_set1_req(rctx, req)) - return 0; + goto err; return rctx; + err: + OCSP_REQ_CTX_free(rctx); + return NULL; } /* Parse the HTTP response. This will look like this: @@ -490,6 +495,9 @@ ctx = OCSP_sendreq_new(b, path, req, -1); + if (!ctx) + return NULL; + do { rv = OCSP_sendreq_nbio(&resp, ctx); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ocsp/ocsp_lib.c nodejs-0.11.15/deps/openssl/openssl/crypto/ocsp/ocsp_lib.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ocsp/ocsp_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ocsp/ocsp_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -222,8 +222,19 @@ if (!*ppath) goto mem_err; + p = host; + if(host[0] == '[') + { + /* ipv6 literal */ + host++; + p = strchr(host, ']'); + if(!p) goto parse_err; + *p = '\0'; + p++; + } + /* Look for optional ':' for port number */ - if ((p = strchr(host, ':'))) + if ((p = strchr(p, ':'))) { *p = 0; port = p + 1; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ocsp/ocsp_vfy.c nodejs-0.11.15/deps/openssl/openssl/crypto/ocsp/ocsp_vfy.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ocsp/ocsp_vfy.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ocsp/ocsp_vfy.c 2015-01-20 21:22:17.000000000 +0000 @@ -436,8 +436,11 @@ if(!(flags & OCSP_NOINTERN)) { signer = X509_find_by_subject(req->optionalSignature->certs, nm); - *psigner = signer; - return 1; + if (signer) + { + *psigner = signer; + return 1; + } } signer = X509_find_by_subject(certs, nm); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/opensslv.h nodejs-0.11.15/deps/openssl/openssl/crypto/opensslv.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/opensslv.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/opensslv.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,10 @@ #ifndef HEADER_OPENSSLV_H #define HEADER_OPENSSLV_H +#ifdef __cplusplus +extern "C" { +#endif + /* Numeric release version identifier: * MNNFFPPS: major minor fix patch status * The status nibble has one of the values 0 for development, 1 to e for betas @@ -25,11 +29,11 @@ * (Prior to 0.9.5a beta1, a different scheme was used: MMNNFFRBB for * major minor fix final patch/beta) */ -#define OPENSSL_VERSION_NUMBER 0x1000107fL +#define OPENSSL_VERSION_NUMBER 0x100010afL #ifdef OPENSSL_FIPS -#define OPENSSL_VERSION_TEXT "OpenSSL 1.0.1g-fips 7 Apr 2014" +#define OPENSSL_VERSION_TEXT "OpenSSL 1.0.1j-fips 15 Oct 2014" #else -#define OPENSSL_VERSION_TEXT "OpenSSL 1.0.1g 7 Apr 2014" +#define OPENSSL_VERSION_TEXT "OpenSSL 1.0.1j 15 Oct 2014" #endif #define OPENSSL_VERSION_PTEXT " part of " OPENSSL_VERSION_TEXT @@ -86,4 +90,7 @@ #define SHLIB_VERSION_NUMBER "1.0.0" +#ifdef __cplusplus +} +#endif #endif /* HEADER_OPENSSLV_H */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ossl_typ.h nodejs-0.11.15/deps/openssl/openssl/crypto/ossl_typ.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/ossl_typ.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ossl_typ.h 2015-01-20 21:22:17.000000000 +0000 @@ -55,6 +55,10 @@ #ifndef HEADER_OPENSSL_TYPES_H #define HEADER_OPENSSL_TYPES_H +#ifdef __cplusplus +extern "C" { +#endif + #include <openssl/e_os2.h> #ifdef NO_ASN1_TYPEDEFS @@ -199,4 +203,7 @@ typedef struct ocsp_response_st OCSP_RESPONSE; typedef struct ocsp_responder_id_st OCSP_RESPID; +#ifdef __cplusplus +} +#endif #endif /* def HEADER_OPENSSL_TYPES_H */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pem/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/pem/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/pem/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pem/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,258 +0,0 @@ -# -# OpenSSL/crypto/pem/Makefile -# - -DIR= pem -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= pem_sign.c pem_seal.c pem_info.c pem_lib.c pem_all.c pem_err.c \ - pem_x509.c pem_xaux.c pem_oth.c pem_pk8.c pem_pkey.c pvkfmt.c - -LIBOBJ= pem_sign.o pem_seal.o pem_info.o pem_lib.o pem_all.o pem_err.o \ - pem_x509.o pem_xaux.o pem_oth.o pem_pk8.o pem_pkey.o pvkfmt.o - -SRC= $(LIBSRC) - -EXHEADER= pem.h pem2.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -pem_all.o: ../../e_os.h ../../include/openssl/asn1.h -pem_all.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pem_all.o: ../../include/openssl/crypto.h ../../include/openssl/dh.h -pem_all.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -pem_all.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pem_all.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -pem_all.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pem_all.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pem_all.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -pem_all.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -pem_all.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs7.h -pem_all.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -pem_all.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pem_all.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pem_all.o: ../../include/openssl/x509_vfy.h ../cryptlib.h pem_all.c -pem_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -pem_err.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -pem_err.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pem_err.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pem_err.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pem_err.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pem_err.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pem_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pem_err.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -pem_err.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pem_err.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pem_err.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pem_err.o: ../../include/openssl/x509_vfy.h pem_err.c -pem_info.o: ../../e_os.h ../../include/openssl/asn1.h -pem_info.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pem_info.o: ../../include/openssl/crypto.h ../../include/openssl/dsa.h -pem_info.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pem_info.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pem_info.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pem_info.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pem_info.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pem_info.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pem_info.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -pem_info.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -pem_info.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -pem_info.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -pem_info.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -pem_info.o: ../cryptlib.h pem_info.c -pem_lib.o: ../../e_os.h ../../include/openssl/asn1.h -pem_lib.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pem_lib.o: ../../include/openssl/crypto.h ../../include/openssl/des.h -pem_lib.o: ../../include/openssl/des_old.h ../../include/openssl/e_os2.h -pem_lib.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pem_lib.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -pem_lib.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pem_lib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pem_lib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pem_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pem_lib.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -pem_lib.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -pem_lib.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -pem_lib.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pem_lib.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -pem_lib.o: ../../include/openssl/ui_compat.h ../../include/openssl/x509.h -pem_lib.o: ../../include/openssl/x509_vfy.h ../asn1/asn1_locl.h ../cryptlib.h -pem_lib.o: pem_lib.c -pem_oth.o: ../../e_os.h ../../include/openssl/asn1.h -pem_oth.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pem_oth.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pem_oth.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pem_oth.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -pem_oth.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pem_oth.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pem_oth.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -pem_oth.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -pem_oth.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs7.h -pem_oth.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -pem_oth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pem_oth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pem_oth.o: ../../include/openssl/x509_vfy.h ../cryptlib.h pem_oth.c -pem_pk8.o: ../../e_os.h ../../include/openssl/asn1.h -pem_pk8.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pem_pk8.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pem_pk8.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pem_pk8.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -pem_pk8.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pem_pk8.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pem_pk8.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -pem_pk8.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -pem_pk8.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs12.h -pem_pk8.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -pem_pk8.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -pem_pk8.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -pem_pk8.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -pem_pk8.o: ../cryptlib.h pem_pk8.c -pem_pkey.o: ../../e_os.h ../../include/openssl/asn1.h -pem_pkey.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pem_pkey.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pem_pkey.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pem_pkey.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -pem_pkey.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pem_pkey.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pem_pkey.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pem_pkey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pem_pkey.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -pem_pkey.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -pem_pkey.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -pem_pkey.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pem_pkey.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pem_pkey.o: ../../include/openssl/x509_vfy.h ../asn1/asn1_locl.h ../cryptlib.h -pem_pkey.o: pem_pkey.c -pem_seal.o: ../../e_os.h ../../include/openssl/asn1.h -pem_seal.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pem_seal.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pem_seal.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pem_seal.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -pem_seal.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pem_seal.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pem_seal.o: ../../include/openssl/opensslconf.h -pem_seal.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pem_seal.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -pem_seal.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -pem_seal.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -pem_seal.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pem_seal.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pem_seal.o: ../../include/openssl/x509_vfy.h ../cryptlib.h pem_seal.c -pem_sign.o: ../../e_os.h ../../include/openssl/asn1.h -pem_sign.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pem_sign.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pem_sign.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pem_sign.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -pem_sign.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pem_sign.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pem_sign.o: ../../include/openssl/opensslconf.h -pem_sign.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pem_sign.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -pem_sign.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -pem_sign.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -pem_sign.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -pem_sign.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -pem_sign.o: ../cryptlib.h pem_sign.c -pem_x509.o: ../../e_os.h ../../include/openssl/asn1.h -pem_x509.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pem_x509.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pem_x509.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pem_x509.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -pem_x509.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pem_x509.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pem_x509.o: ../../include/openssl/opensslconf.h -pem_x509.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pem_x509.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -pem_x509.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pem_x509.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pem_x509.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pem_x509.o: ../../include/openssl/x509_vfy.h ../cryptlib.h pem_x509.c -pem_xaux.o: ../../e_os.h ../../include/openssl/asn1.h -pem_xaux.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pem_xaux.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pem_xaux.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pem_xaux.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -pem_xaux.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pem_xaux.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pem_xaux.o: ../../include/openssl/opensslconf.h -pem_xaux.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pem_xaux.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -pem_xaux.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pem_xaux.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pem_xaux.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pem_xaux.o: ../../include/openssl/x509_vfy.h ../cryptlib.h pem_xaux.c -pvkfmt.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -pvkfmt.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -pvkfmt.o: ../../include/openssl/crypto.h ../../include/openssl/dsa.h -pvkfmt.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pvkfmt.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pvkfmt.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pvkfmt.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pvkfmt.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pvkfmt.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pvkfmt.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -pvkfmt.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -pvkfmt.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -pvkfmt.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pvkfmt.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pvkfmt.o: ../../include/openssl/x509_vfy.h ../cryptlib.h pvkfmt.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pem/pvkfmt.c nodejs-0.11.15/deps/openssl/openssl/crypto/pem/pvkfmt.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pem/pvkfmt.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pem/pvkfmt.c 2015-01-20 21:22:17.000000000 +0000 @@ -759,6 +759,11 @@ /* Copy BLOBHEADER across, decrypt rest */ memcpy(enctmp, p, 8); p += 8; + if (keylen < 8) + { + PEMerr(PEM_F_DO_PVK_BODY, PEM_R_PVK_TOO_SHORT); + return NULL; + } inlen = keylen - 8; q = enctmp + 8; if (!EVP_DecryptInit_ex(&cctx, EVP_rc4(), NULL, keybuf, NULL)) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs12/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs12/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs12/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs12/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,286 +0,0 @@ -# -# OpenSSL/crypto/pkcs12/Makefile -# - -DIR= pkcs12 -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= p12_add.c p12_asn.c p12_attr.c p12_crpt.c p12_crt.c p12_decr.c \ - p12_init.c p12_key.c p12_kiss.c p12_mutl.c\ - p12_utl.c p12_npas.c pk12err.c p12_p8d.c p12_p8e.c -LIBOBJ= p12_add.o p12_asn.o p12_attr.o p12_crpt.o p12_crt.o p12_decr.o \ - p12_init.o p12_key.o p12_kiss.o p12_mutl.o\ - p12_utl.o p12_npas.o pk12err.o p12_p8d.o p12_p8e.o - -SRC= $(LIBSRC) - -EXHEADER= pkcs12.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -test: - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -p12_add.o: ../../e_os.h ../../include/openssl/asn1.h -p12_add.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_add.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_add.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_add.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_add.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p12_add.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p12_add.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -p12_add.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs12.h -p12_add.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -p12_add.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p12_add.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p12_add.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p12_add.c -p12_asn.o: ../../e_os.h ../../include/openssl/asn1.h -p12_asn.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -p12_asn.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p12_asn.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p12_asn.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p12_asn.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p12_asn.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p12_asn.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p12_asn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p12_asn.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -p12_asn.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p12_asn.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p12_asn.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p12_asn.o: ../cryptlib.h p12_asn.c -p12_attr.o: ../../e_os.h ../../include/openssl/asn1.h -p12_attr.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_attr.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_attr.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_attr.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_attr.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p12_attr.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p12_attr.o: ../../include/openssl/opensslconf.h -p12_attr.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p12_attr.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -p12_attr.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p12_attr.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p12_attr.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p12_attr.o: ../cryptlib.h p12_attr.c -p12_crpt.o: ../../e_os.h ../../include/openssl/asn1.h -p12_crpt.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_crpt.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_crpt.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_crpt.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_crpt.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p12_crpt.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p12_crpt.o: ../../include/openssl/opensslconf.h -p12_crpt.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p12_crpt.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -p12_crpt.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p12_crpt.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p12_crpt.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p12_crpt.o: ../cryptlib.h p12_crpt.c -p12_crt.o: ../../e_os.h ../../include/openssl/asn1.h -p12_crt.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_crt.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_crt.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_crt.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_crt.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p12_crt.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p12_crt.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -p12_crt.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs12.h -p12_crt.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -p12_crt.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p12_crt.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p12_crt.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p12_crt.c -p12_decr.o: ../../e_os.h ../../include/openssl/asn1.h -p12_decr.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_decr.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_decr.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_decr.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_decr.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p12_decr.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p12_decr.o: ../../include/openssl/opensslconf.h -p12_decr.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p12_decr.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -p12_decr.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p12_decr.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p12_decr.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p12_decr.o: ../cryptlib.h p12_decr.c -p12_init.o: ../../e_os.h ../../include/openssl/asn1.h -p12_init.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_init.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_init.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_init.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_init.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p12_init.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p12_init.o: ../../include/openssl/opensslconf.h -p12_init.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p12_init.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -p12_init.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p12_init.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p12_init.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p12_init.o: ../cryptlib.h p12_init.c -p12_key.o: ../../e_os.h ../../include/openssl/asn1.h -p12_key.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -p12_key.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p12_key.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p12_key.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p12_key.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p12_key.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p12_key.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p12_key.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p12_key.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -p12_key.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p12_key.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p12_key.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p12_key.o: ../cryptlib.h p12_key.c -p12_kiss.o: ../../e_os.h ../../include/openssl/asn1.h -p12_kiss.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_kiss.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_kiss.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_kiss.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_kiss.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p12_kiss.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p12_kiss.o: ../../include/openssl/opensslconf.h -p12_kiss.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p12_kiss.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -p12_kiss.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p12_kiss.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p12_kiss.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p12_kiss.o: ../cryptlib.h p12_kiss.c -p12_mutl.o: ../../e_os.h ../../include/openssl/asn1.h -p12_mutl.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_mutl.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_mutl.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_mutl.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_mutl.o: ../../include/openssl/evp.h ../../include/openssl/hmac.h -p12_mutl.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p12_mutl.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p12_mutl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p12_mutl.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -p12_mutl.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -p12_mutl.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p12_mutl.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p12_mutl.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p12_mutl.c -p12_npas.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -p12_npas.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -p12_npas.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -p12_npas.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -p12_npas.o: ../../include/openssl/err.h ../../include/openssl/evp.h -p12_npas.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -p12_npas.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -p12_npas.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -p12_npas.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -p12_npas.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -p12_npas.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -p12_npas.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -p12_npas.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -p12_npas.o: p12_npas.c -p12_p8d.o: ../../e_os.h ../../include/openssl/asn1.h -p12_p8d.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_p8d.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_p8d.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_p8d.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_p8d.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p12_p8d.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p12_p8d.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -p12_p8d.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs12.h -p12_p8d.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -p12_p8d.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p12_p8d.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p12_p8d.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p12_p8d.c -p12_p8e.o: ../../e_os.h ../../include/openssl/asn1.h -p12_p8e.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_p8e.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_p8e.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_p8e.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_p8e.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p12_p8e.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p12_p8e.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -p12_p8e.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs12.h -p12_p8e.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -p12_p8e.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p12_p8e.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p12_p8e.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p12_p8e.c -p12_utl.o: ../../e_os.h ../../include/openssl/asn1.h -p12_utl.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -p12_utl.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -p12_utl.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -p12_utl.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -p12_utl.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -p12_utl.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -p12_utl.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -p12_utl.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs12.h -p12_utl.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -p12_utl.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -p12_utl.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -p12_utl.o: ../../include/openssl/x509_vfy.h ../cryptlib.h p12_utl.c -pk12err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -pk12err.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -pk12err.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pk12err.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pk12err.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pk12err.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pk12err.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pk12err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pk12err.o: ../../include/openssl/pkcs12.h ../../include/openssl/pkcs7.h -pk12err.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -pk12err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -pk12err.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -pk12err.o: pk12err.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs12/p12_crt.c nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs12/p12_crt.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs12/p12_crt.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs12/p12_crt.c 2015-01-20 21:22:17.000000000 +0000 @@ -96,7 +96,11 @@ nid_cert = NID_pbe_WithSHA1And3_Key_TripleDES_CBC; else #endif +#ifdef OPENSSL_NO_RC2 + nid_cert = NID_pbe_WithSHA1And3_Key_TripleDES_CBC; +#else nid_cert = NID_pbe_WithSHA1And40BitRC2_CBC; +#endif } if (!nid_key) nid_key = NID_pbe_WithSHA1And3_Key_TripleDES_CBC; @@ -286,7 +290,11 @@ free_safes = 0; if (nid_safe == 0) +#ifdef OPENSSL_NO_RC2 + nid_safe = NID_pbe_WithSHA1And3_Key_TripleDES_CBC; +#else nid_safe = NID_pbe_WithSHA1And40BitRC2_CBC; +#endif if (nid_safe == -1) p7 = PKCS12_pack_p7data(bags); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs12/p12_kiss.c nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs12/p12_kiss.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs12/p12_kiss.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs12/p12_kiss.c 2015-01-20 21:22:17.000000000 +0000 @@ -269,7 +269,7 @@ int len, r; unsigned char *data; len = ASN1_STRING_to_UTF8(&data, fname); - if(len > 0) { + if(len >= 0) { r = X509_alias_set1(x509, data, len); OPENSSL_free(data); if (!r) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/bio_ber.c nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/bio_ber.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/bio_ber.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/bio_ber.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,466 +0,0 @@ -/* crypto/evp/bio_ber.c */ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ - -#include <stdio.h> -#include <errno.h> -#include "cryptlib.h" -#include <openssl/buffer.h> -#include <openssl/evp.h> - -static int ber_write(BIO *h,char *buf,int num); -static int ber_read(BIO *h,char *buf,int size); -/*static int ber_puts(BIO *h,char *str); */ -/*static int ber_gets(BIO *h,char *str,int size); */ -static long ber_ctrl(BIO *h,int cmd,long arg1,char *arg2); -static int ber_new(BIO *h); -static int ber_free(BIO *data); -static long ber_callback_ctrl(BIO *h,int cmd,void *(*fp)()); -#define BER_BUF_SIZE (32) - -/* This is used to hold the state of the BER objects being read. */ -typedef struct ber_struct - { - int tag; - int class; - long length; - int inf; - int num_left; - int depth; - } BER_CTX; - -typedef struct bio_ber_struct - { - int tag; - int class; - long length; - int inf; - - /* most of the following are used when doing non-blocking IO */ - /* reading */ - long num_left; /* number of bytes still to read/write in block */ - int depth; /* used with indefinite encoding. */ - int finished; /* No more read data */ - - /* writting */ - char *w_addr; - int w_offset; - int w_left; - - int buf_len; - int buf_off; - unsigned char buf[BER_BUF_SIZE]; - } BIO_BER_CTX; - -static BIO_METHOD methods_ber= - { - BIO_TYPE_CIPHER,"cipher", - ber_write, - ber_read, - NULL, /* ber_puts, */ - NULL, /* ber_gets, */ - ber_ctrl, - ber_new, - ber_free, - ber_callback_ctrl, - }; - -BIO_METHOD *BIO_f_ber(void) - { - return(&methods_ber); - } - -static int ber_new(BIO *bi) - { - BIO_BER_CTX *ctx; - - ctx=(BIO_BER_CTX *)OPENSSL_malloc(sizeof(BIO_BER_CTX)); - if (ctx == NULL) return(0); - - memset((char *)ctx,0,sizeof(BIO_BER_CTX)); - - bi->init=0; - bi->ptr=(char *)ctx; - bi->flags=0; - return(1); - } - -static int ber_free(BIO *a) - { - BIO_BER_CTX *b; - - if (a == NULL) return(0); - b=(BIO_BER_CTX *)a->ptr; - OPENSSL_cleanse(a->ptr,sizeof(BIO_BER_CTX)); - OPENSSL_free(a->ptr); - a->ptr=NULL; - a->init=0; - a->flags=0; - return(1); - } - -int bio_ber_get_header(BIO *bio, BIO_BER_CTX *ctx) - { - char buf[64]; - int i,j,n; - int ret; - unsigned char *p; - unsigned long length - int tag; - int class; - long max; - - BIO_clear_retry_flags(b); - - /* Pack the buffer down if there is a hole at the front */ - if (ctx->buf_off != 0) - { - p=ctx->buf; - j=ctx->buf_off; - n=ctx->buf_len-j; - for (i=0; i<n; i++) - { - p[0]=p[j]; - p++; - } - ctx->buf_len-j; - ctx->buf_off=0; - } - - /* If there is more room, read some more data */ - i=BER_BUF_SIZE-ctx->buf_len; - if (i) - { - i=BIO_read(bio->next_bio,&(ctx->buf[ctx->buf_len]),i); - if (i <= 0) - { - BIO_copy_next_retry(b); - return(i); - } - else - ctx->buf_len+=i; - } - - max=ctx->buf_len; - p=ctx->buf; - ret=ASN1_get_object(&p,&length,&tag,&class,max); - - if (ret & 0x80) - { - if ((ctx->buf_len < BER_BUF_SIZE) && - (ERR_GET_REASON(ERR_peek_error()) == ASN1_R_TOO_LONG)) - { - ERR_clear_error(); /* clear the error */ - BIO_set_retry_read(b); - } - return(-1); - } - - /* We have no error, we have a header, so make use of it */ - - if ((ctx->tag >= 0) && (ctx->tag != tag)) - { - BIOerr(BIO_F_BIO_BER_GET_HEADER,BIO_R_TAG_MISMATCH); - sprintf(buf,"tag=%d, got %d",ctx->tag,tag); - ERR_add_error_data(1,buf); - return(-1); - } - if (ret & 0x01) - if (ret & V_ASN1_CONSTRUCTED) - } - -static int ber_read(BIO *b, char *out, int outl) - { - int ret=0,i,n; - BIO_BER_CTX *ctx; - - BIO_clear_retry_flags(b); - - if (out == NULL) return(0); - ctx=(BIO_BER_CTX *)b->ptr; - - if ((ctx == NULL) || (b->next_bio == NULL)) return(0); - - if (ctx->finished) return(0); - -again: - /* First see if we are half way through reading a block */ - if (ctx->num_left > 0) - { - if (ctx->num_left < outl) - n=ctx->num_left; - else - n=outl; - i=BIO_read(b->next_bio,out,n); - if (i <= 0) - { - BIO_copy_next_retry(b); - return(i); - } - ctx->num_left-=i; - outl-=i; - ret+=i; - if (ctx->num_left <= 0) - { - ctx->depth--; - if (ctx->depth <= 0) - ctx->finished=1; - } - if (outl <= 0) - return(ret); - else - goto again; - } - else /* we need to read another BER header */ - { - } - } - -static int ber_write(BIO *b, char *in, int inl) - { - int ret=0,n,i; - BIO_ENC_CTX *ctx; - - ctx=(BIO_ENC_CTX *)b->ptr; - ret=inl; - - BIO_clear_retry_flags(b); - n=ctx->buf_len-ctx->buf_off; - while (n > 0) - { - i=BIO_write(b->next_bio,&(ctx->buf[ctx->buf_off]),n); - if (i <= 0) - { - BIO_copy_next_retry(b); - return(i); - } - ctx->buf_off+=i; - n-=i; - } - /* at this point all pending data has been written */ - - if ((in == NULL) || (inl <= 0)) return(0); - - ctx->buf_off=0; - while (inl > 0) - { - n=(inl > ENC_BLOCK_SIZE)?ENC_BLOCK_SIZE:inl; - EVP_CipherUpdate(&(ctx->cipher), - (unsigned char *)ctx->buf,&ctx->buf_len, - (unsigned char *)in,n); - inl-=n; - in+=n; - - ctx->buf_off=0; - n=ctx->buf_len; - while (n > 0) - { - i=BIO_write(b->next_bio,&(ctx->buf[ctx->buf_off]),n); - if (i <= 0) - { - BIO_copy_next_retry(b); - return(i); - } - n-=i; - ctx->buf_off+=i; - } - ctx->buf_len=0; - ctx->buf_off=0; - } - BIO_copy_next_retry(b); - return(ret); - } - -static long ber_ctrl(BIO *b, int cmd, long num, char *ptr) - { - BIO *dbio; - BIO_ENC_CTX *ctx,*dctx; - long ret=1; - int i; - - ctx=(BIO_ENC_CTX *)b->ptr; - - switch (cmd) - { - case BIO_CTRL_RESET: - ctx->ok=1; - ctx->finished=0; - EVP_CipherInit_ex(&(ctx->cipher),NULL,NULL,NULL,NULL, - ctx->cipher.berrypt); - ret=BIO_ctrl(b->next_bio,cmd,num,ptr); - break; - case BIO_CTRL_EOF: /* More to read */ - if (ctx->cont <= 0) - ret=1; - else - ret=BIO_ctrl(b->next_bio,cmd,num,ptr); - break; - case BIO_CTRL_WPENDING: - ret=ctx->buf_len-ctx->buf_off; - if (ret <= 0) - ret=BIO_ctrl(b->next_bio,cmd,num,ptr); - break; - case BIO_CTRL_PENDING: /* More to read in buffer */ - ret=ctx->buf_len-ctx->buf_off; - if (ret <= 0) - ret=BIO_ctrl(b->next_bio,cmd,num,ptr); - break; - case BIO_CTRL_FLUSH: - /* do a final write */ -again: - while (ctx->buf_len != ctx->buf_off) - { - i=ber_write(b,NULL,0); - if (i < 0) - { - ret=i; - break; - } - } - - if (!ctx->finished) - { - ctx->finished=1; - ctx->buf_off=0; - ret=EVP_CipherFinal_ex(&(ctx->cipher), - (unsigned char *)ctx->buf, - &(ctx->buf_len)); - ctx->ok=(int)ret; - if (ret <= 0) break; - - /* push out the bytes */ - goto again; - } - - /* Finally flush the underlying BIO */ - ret=BIO_ctrl(b->next_bio,cmd,num,ptr); - break; - case BIO_C_GET_CIPHER_STATUS: - ret=(long)ctx->ok; - break; - case BIO_C_DO_STATE_MACHINE: - BIO_clear_retry_flags(b); - ret=BIO_ctrl(b->next_bio,cmd,num,ptr); - BIO_copy_next_retry(b); - break; - - case BIO_CTRL_DUP: - dbio=(BIO *)ptr; - dctx=(BIO_ENC_CTX *)dbio->ptr; - memcpy(&(dctx->cipher),&(ctx->cipher),sizeof(ctx->cipher)); - dbio->init=1; - break; - default: - ret=BIO_ctrl(b->next_bio,cmd,num,ptr); - break; - } - return(ret); - } - -static long ber_callback_ctrl(BIO *b, int cmd, void *(*fp)()) - { - long ret=1; - - if (b->next_bio == NULL) return(0); - switch (cmd) - { - default: - ret=BIO_callback_ctrl(b->next_bio,cmd,fp); - break; - } - return(ret); - } - -/* -void BIO_set_cipher_ctx(b,c) -BIO *b; -EVP_CIPHER_ctx *c; - { - if (b == NULL) return; - - if ((b->callback != NULL) && - (b->callback(b,BIO_CB_CTRL,(char *)c,BIO_CTRL_SET,e,0L) <= 0)) - return; - - b->init=1; - ctx=(BIO_ENC_CTX *)b->ptr; - memcpy(ctx->cipher,c,sizeof(EVP_CIPHER_CTX)); - - if (b->callback != NULL) - b->callback(b,BIO_CB_CTRL,(char *)c,BIO_CTRL_SET,e,1L); - } -*/ - -void BIO_set_cipher(BIO *b, EVP_CIPHER *c, unsigned char *k, unsigned char *i, - int e) - { - BIO_ENC_CTX *ctx; - - if (b == NULL) return; - - if ((b->callback != NULL) && - (b->callback(b,BIO_CB_CTRL,(char *)c,BIO_CTRL_SET,e,0L) <= 0)) - return; - - b->init=1; - ctx=(BIO_ENC_CTX *)b->ptr; - EVP_CipherInit_ex(&(ctx->cipher),c,NULL,k,i,e); - - if (b->callback != NULL) - b->callback(b,BIO_CB_CTRL,(char *)c,BIO_CTRL_SET,e,1L); - } - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/dec.c nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/dec.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/dec.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/dec.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,248 +0,0 @@ -/* crypto/pkcs7/verify.c */ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <openssl/bio.h> -#include <openssl/x509.h> -#include <openssl/pem.h> -#include <openssl/err.h> -#include <openssl/asn1.h> - -int verify_callback(int ok, X509_STORE_CTX *ctx); - -BIO *bio_err=NULL; - -int main(argc,argv) -int argc; -char *argv[]; - { - char *keyfile=NULL; - BIO *in; - EVP_PKEY *pkey; - X509 *x509; - PKCS7 *p7; - PKCS7_SIGNER_INFO *si; - X509_STORE_CTX cert_ctx; - X509_STORE *cert_store=NULL; - BIO *data,*detached=NULL,*p7bio=NULL; - char buf[1024*4]; - unsigned char *pp; - int i,printit=0; - STACK_OF(PKCS7_SIGNER_INFO) *sk; - - OpenSSL_add_all_algorithms(); - bio_err=BIO_new_fp(stderr,BIO_NOCLOSE); - - data=BIO_new(BIO_s_file()); - pp=NULL; - while (argc > 1) - { - argc--; - argv++; - if (strcmp(argv[0],"-p") == 0) - { - printit=1; - } - else if ((strcmp(argv[0],"-k") == 0) && (argc >= 2)) { - keyfile = argv[1]; - argc-=1; - argv+=1; - } else if ((strcmp(argv[0],"-d") == 0) && (argc >= 2)) - { - detached=BIO_new(BIO_s_file()); - if (!BIO_read_filename(detached,argv[1])) - goto err; - argc-=1; - argv+=1; - } - else break; - } - - if (!BIO_read_filename(data,argv[0])) goto err; - - if(!keyfile) { - fprintf(stderr, "No private key file specified\n"); - goto err; - } - - if ((in=BIO_new_file(keyfile,"r")) == NULL) goto err; - if ((x509=PEM_read_bio_X509(in,NULL,NULL,NULL)) == NULL) goto err; - BIO_reset(in); - if ((pkey=PEM_read_bio_PrivateKey(in,NULL,NULL,NULL)) == NULL) - goto err; - BIO_free(in); - - if (pp == NULL) - BIO_set_fp(data,stdin,BIO_NOCLOSE); - - - /* Load the PKCS7 object from a file */ - if ((p7=PEM_read_bio_PKCS7(data,NULL,NULL,NULL)) == NULL) goto err; - - - - /* This stuff is being setup for certificate verification. - * When using SSL, it could be replaced with a - * cert_stre=SSL_CTX_get_cert_store(ssl_ctx); */ - cert_store=X509_STORE_new(); - X509_STORE_set_default_paths(cert_store); - X509_STORE_load_locations(cert_store,NULL,"../../certs"); - X509_STORE_set_verify_cb_func(cert_store,verify_callback); - - ERR_clear_error(); - - /* We need to process the data */ - /* We cannot support detached encryption */ - p7bio=PKCS7_dataDecode(p7,pkey,detached,x509); - - if (p7bio == NULL) - { - printf("problems decoding\n"); - goto err; - } - - /* We now have to 'read' from p7bio to calculate digests etc. */ - for (;;) - { - i=BIO_read(p7bio,buf,sizeof(buf)); - /* print it? */ - if (i <= 0) break; - fwrite(buf,1, i, stdout); - } - - /* We can now verify signatures */ - sk=PKCS7_get_signer_info(p7); - if (sk == NULL) - { - fprintf(stderr, "there are no signatures on this data\n"); - } - else - { - /* Ok, first we need to, for each subject entry, - * see if we can verify */ - ERR_clear_error(); - for (i=0; i<sk_PKCS7_SIGNER_INFO_num(sk); i++) - { - si=sk_PKCS7_SIGNER_INFO_value(sk,i); - i=PKCS7_dataVerify(cert_store,&cert_ctx,p7bio,p7,si); - if (i <= 0) - goto err; - else - fprintf(stderr,"Signature verified\n"); - } - } - X509_STORE_free(cert_store); - - exit(0); -err: - ERR_load_crypto_strings(); - ERR_print_errors_fp(stderr); - exit(1); - } - -/* should be X509 * but we can just have them as char *. */ -int verify_callback(int ok, X509_STORE_CTX *ctx) - { - char buf[256]; - X509 *err_cert; - int err,depth; - - err_cert=X509_STORE_CTX_get_current_cert(ctx); - err= X509_STORE_CTX_get_error(ctx); - depth= X509_STORE_CTX_get_error_depth(ctx); - - X509_NAME_oneline(X509_get_subject_name(err_cert),buf,256); - BIO_printf(bio_err,"depth=%d %s\n",depth,buf); - if (!ok) - { - BIO_printf(bio_err,"verify error:num=%d:%s\n",err, - X509_verify_cert_error_string(err)); - if (depth < 6) - { - ok=1; - X509_STORE_CTX_set_error(ctx,X509_V_OK); - } - else - { - ok=0; - X509_STORE_CTX_set_error(ctx,X509_V_ERR_CERT_CHAIN_TOO_LONG); - } - } - switch (ctx->error) - { - case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: - X509_NAME_oneline(X509_get_issuer_name(ctx->current_cert),buf,256); - BIO_printf(bio_err,"issuer= %s\n",buf); - break; - case X509_V_ERR_CERT_NOT_YET_VALID: - case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD: - BIO_printf(bio_err,"notBefore="); - ASN1_UTCTIME_print(bio_err,X509_get_notBefore(ctx->current_cert)); - BIO_printf(bio_err,"\n"); - break; - case X509_V_ERR_CERT_HAS_EXPIRED: - case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD: - BIO_printf(bio_err,"notAfter="); - ASN1_UTCTIME_print(bio_err,X509_get_notAfter(ctx->current_cert)); - BIO_printf(bio_err,"\n"); - break; - } - BIO_printf(bio_err,"verify return:%d\n",ok); - return(ok); - } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/des.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/des.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/des.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/des.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ - -MIAGCSqGSIb3DQEHA6CAMIACAQAxggHmMIHwAgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEG -A1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNUUkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQD -ExJERU1PIFpFUk8gVkFMVUUgQ0ECAgR+MA0GCSqGSIb3DQEBAQUABEC2vXI1xQDW6lUHM3zQ -/9uBEBOO5A3TtkrklAXq7v01gsIC21t52qSk36REXY+slhNZ0OQ349tgkTsoETHFLoEwMIHw -AgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEGA1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMI -QnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29mdCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNU -UkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQDExJERU1PIFpFUk8gVkFMVUUgQ0ECAgR9MA0G -CSqGSIb3DQEBAQUABEB8ujxbabxXUYJhopuDm3oDq4JNqX6Io4p3ro+ShqfIndsXTZ1v5a2N -WtLLCWlHn/habjBwZ/DgQgcKASbZ7QxNMIAGCSqGSIb3DQEHATAaBggqhkiG9w0DAjAOAgIA -oAQIbsL5v1wX98KggAQoAaJ4WHm68fXY1WE5OIjfVBIDpO1K+i8dmKhjnAjrjoyZ9Bwc8rDL -lgQg4CXb805h5xl+GfvSwUaHJayte1m2mcOhs3J2YyqbQ+MEIMIiJQccmhO3oDKm36CFvYR8 -5PjpclVcZyX2ngbwPFMnBAgy0clOAE6UKAAAAAAAAAAAAAA= - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/doc nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/doc --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/doc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/doc 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -int PKCS7_set_content_type(PKCS7 *p7, int type); -Call to set the type of PKCS7 object we are working on - -int PKCS7_SIGNER_INFO_set(PKCS7_SIGNER_INFO *p7i, X509 *x509, EVP_PKEY *pkey, - EVP_MD *dgst); -Use this to setup a signer info -There will also be functions to add signed and unsigned attributes. - -int PKCS7_add_signer(PKCS7 *p7, PKCS7_SIGNER_INFO *p7i); -Add a signer info to the content. - -int PKCS7_add_certificae(PKCS7 *p7, X509 *x509); -int PKCS7_add_crl(PKCS7 *p7, X509_CRL *x509); - ----- - -p7=PKCS7_new(); -PKCS7_set_content_type(p7,NID_pkcs7_signed); - -signer=PKCS7_SINGNER_INFO_new(); -PKCS7_SIGNER_INFO_set(signer,x509,pkey,EVP_md5()); -PKCS7_add_signer(py,signer); - -we are now setup. diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/enc.c nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/enc.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/enc.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/enc.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,174 +0,0 @@ -/* crypto/pkcs7/enc.c */ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ -#include <stdio.h> -#include <string.h> -#include <openssl/bio.h> -#include <openssl/x509.h> -#include <openssl/pem.h> -#include <openssl/err.h> - -int main(argc,argv) -int argc; -char *argv[]; - { - X509 *x509; - PKCS7 *p7; - BIO *in; - BIO *data,*p7bio; - char buf[1024*4]; - int i; - int nodetach=1; - char *keyfile = NULL; - const EVP_CIPHER *cipher=NULL; - STACK_OF(X509) *recips=NULL; - - OpenSSL_add_all_algorithms(); - - data=BIO_new(BIO_s_file()); - while(argc > 1) - { - if (strcmp(argv[1],"-nd") == 0) - { - nodetach=1; - argv++; argc--; - } - else if ((strcmp(argv[1],"-c") == 0) && (argc >= 2)) { - if(!(cipher = EVP_get_cipherbyname(argv[2]))) { - fprintf(stderr, "Unknown cipher %s\n", argv[2]); - goto err; - } - argc-=2; - argv+=2; - } else if ((strcmp(argv[1],"-k") == 0) && (argc >= 2)) { - keyfile = argv[2]; - argc-=2; - argv+=2; - if (!(in=BIO_new_file(keyfile,"r"))) goto err; - if (!(x509=PEM_read_bio_X509(in,NULL,NULL,NULL))) - goto err; - if(!recips) recips = sk_X509_new_null(); - sk_X509_push(recips, x509); - BIO_free(in); - } else break; - } - - if(!recips) { - fprintf(stderr, "No recipients\n"); - goto err; - } - - if (!BIO_read_filename(data,argv[1])) goto err; - - p7=PKCS7_new(); -#if 0 - BIO_reset(in); - if ((pkey=PEM_read_bio_PrivateKey(in,NULL,NULL)) == NULL) goto err; - BIO_free(in); - PKCS7_set_type(p7,NID_pkcs7_signedAndEnveloped); - - if (PKCS7_add_signature(p7,x509,pkey,EVP_sha1()) == NULL) goto err; - /* we may want to add more */ - PKCS7_add_certificate(p7,x509); -#else - PKCS7_set_type(p7,NID_pkcs7_enveloped); -#endif - if(!cipher) { -#ifndef OPENSSL_NO_DES - cipher = EVP_des_ede3_cbc(); -#else - fprintf(stderr, "No cipher selected\n"); - goto err; -#endif - } - - if (!PKCS7_set_cipher(p7,cipher)) goto err; - for(i = 0; i < sk_X509_num(recips); i++) { - if (!PKCS7_add_recipient(p7,sk_X509_value(recips, i))) goto err; - } - sk_X509_pop_free(recips, X509_free); - - /* Set the content of the signed to 'data' */ - /* PKCS7_content_new(p7,NID_pkcs7_data); not used in envelope */ - - /* could be used, but not in this version :-) - if (!nodetach) PKCS7_set_detached(p7,1); - */ - - if ((p7bio=PKCS7_dataInit(p7,NULL)) == NULL) goto err; - - for (;;) - { - i=BIO_read(data,buf,sizeof(buf)); - if (i <= 0) break; - BIO_write(p7bio,buf,i); - } - BIO_flush(p7bio); - - if (!PKCS7_dataFinal(p7,p7bio)) goto err; - BIO_free(p7bio); - - PEM_write_PKCS7(stdout,p7); - PKCS7_free(p7); - - exit(0); -err: - ERR_load_crypto_strings(); - ERR_print_errors_fp(stderr); - exit(1); - } - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/es1.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/es1.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/es1.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/es1.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,66 +0,0 @@ ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHA6CAMIACAQAxggHmMIHwAgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEG -A1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNUUkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQD -ExJERU1PIFpFUk8gVkFMVUUgQ0ECAgRuMA0GCSqGSIb3DQEBAQUABEDWak0y/5XZJhQJeCLo -KECcHXkTEbjzYkYNHIinbiPmRK4QbNfs9z2mA3z/c2ykQ4eAqFR2jyNrUMN/+I5XEiv6MIHw -AgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEGA1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMI -QnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29mdCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNU -UkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQDExJERU1PIFpFUk8gVkFMVUUgQ0ECAgR9MA0G -CSqGSIb3DQEBAQUABEAWg9+KgtCjc77Jdj1Ve4wGgHjVHbbSYEA1ZqKFDoi15vSr9hfpHmC4 -ycZzcRo16JkTfolefiHZzmyjVz94vSN6MIAGCSqGSIb3DQEHATAaBggqhkiG9w0DAjAOAgIA -oAQI7X4Tk4mcbV6ggASBsHl1mCaJ3RhXWlNPCgCRU53d7M5x6TDZRkvwdtdvW96m1lupT03F -XtonkBqk7oMkH7kGfs5/REQOPjx0QE2Ixmgt1W3szum82EZwA7pZNppcraK7W/odw/7bYZO+ -II3HPmRklE2N9qiu1LPaPUsnYogkO6SennyeL5tZ382vBweL/8pnG0qsbT1OBb65v+llnsjT -pa1T/p+fIx/iJJGE6K9fYFokC6gXLQ6ozXRdOu5oBDB8mPCYYvAqKycidM/MrGGUkpEtS4f0 -lS31PwQi5YTim8Ig3/TOwVpPX32i46FTuEIEIMHkD/OvpfwCCzXUHHJnKnKUAUvIsSY3vGBs -8ezpUDfBBBj9LHDy32hZ2tQilkDefP5VM2LLdrWgamYEgfiyITQvn08Ul5lQOQxbFKBheFq5 -otCCN4MR+w5eq12xQu6y+f9z0159ag2ru87D0lLtUtXXtCELbO1nUkT2sJ0k/iDs9TOXr6Cx -go1XKYho83hlkXYiCteVizdAbgVGNsNRD4wtIdajsorET/LuJECgp11YeL9w1dlDB0HLEZfi -XCsUphH4jGagba3hDeUSibnjSiJlN0ukfuQurBBbI2UkBAujiEAubKPn7C1FZJRSw6CPPX5t -KEpmcqT1JNk6LO8Js6/1sCmmBh1VGCy1+EuTI9J1p7Dagf4nQ8cHitoCRpHuKZlFHnZyv7tw -Rn/KOhHaYP2VzAh40gQIvKMAAWh9oFsEEIMwIoOmLwLH5wf+8QdbDhoECH8HwZt9a12dBAjL -r4j2zlvtfgQIt7nmEM3wz1EECKlc3EIy1irCBBCAKINcermK3A+jI6ISN2RzBFA3dsh/xwMu -l61aWMBBZzEz/SF92k6n35KZhCC0d6fIVC/1WMv0fnCwQ8oEDynSre216VEFiYKBaQLJe5o/ -mTAxC7Ht3goXnuc+i1FItOkLrgRI/wyvTICEn2WsNZiMADnGaee2bqPnUopo+VMGexJEtCPk -l0ZNlDJGquPDkpUwaEtecVZzCNyVPYyyF4J/l8rmGDhDdYUIC8IKBEg/ip/E0BuubBLWVbv+ -HRl4QrnGpyCyeXRXXK603QP3sT1Zbbm1v5pI/loOhVHi724LmtXHSyp5qv9MDcxE1PoX10LY -gBRtlwwESPeCF8bK5jk4xIQMhK5NMHj1Y1KQWTZ9NGITBL4hjRq2qp4Qk5GIpGgOVPopAuCo -TIyPikpqBRNtLSPRSsDs6QPUPzWBh6JgxwRQblnDKKUkxUcnJiD4i9QtGa/ZabMn4KxtNOBL -5JSh1nJkaLXCZY070131WWPAByLcd5TiXq8x84pmzV5NNk4tiMpoXhJNsx8e4rskQQlKd6ME -SCe2eYDHKcKPX3WJbUzhrJSQ92/aWnI2iUY8WQ+kSNyiZ2QUjyuUg9Z66g/0d2STlvPOBHT/ -y5ODP2CwbcWX4QmCbUc9TT66fQRIrRVuwvtOfnUueyGgYhJ3HpAJfVaB/7kap5bj7Fi/azW4 -9JDfd1bC/W9h0Kyk7RO2gxvE0hIHc26mZJHTm9MNP5D328MnM2MdBEjKjQBtgrp+lFIii7MP -nGHFTKUkG4WAIZJCf/CsT+p6/SW0qG71Me/YcSw5STB24j+a+HgMV8RVIeUlkP4z0IWWrSoB -Gh4d/Z0EUMCVHs/HZ/bWgiyhtHpvuVAzidm8D81p1LJ5BQX5/5f/m+q5+fS/npL27dTEbNqs -LSB6ij3MZAi7LwHWpTn9zWnDajCMEj9vlaV7mcKtHK5iBEg85agFi1h3MvicqLtoFe5hVv9T -tG0j6CRkjkixPzivltlrf44KHv14gLM0XJxCGyq7vd3l8QYr3+9at0zNnX/yqTiBnsnE5dUE -SIgrYuz87M2gi/ER9PcDoTtONH3+CkcqVy03q/Sj8cVWD/b1KgEhqnNOfc8Ak9PctyR/ItcR -8Me5XVn1GJKkQJk4O29fxvgNoAQIrIESvUWGshAEQByXiFoFTDUByjTlgjcy77H1lrH+y3P/ -wAInJjJAut9kCNyGJV0PA4kdPB5USWltuO6t8gk4Pd2YBMl09zqUWkAEUCjFrtZ3mapjcGZI -uQTASKR5LSjXoWxTT5gae/+64MerF/oCEeO3ehRTpjnPrsiRDo0rWIQTaj9+Nro8Z2xtWstw -RnfoAHIxV1lEamPwjsceBEi2SD9hiifFeO5ECiVoaE1FdXUXhU+jwYAMx6jHWO9hMkYzS9pM -Y3IyWR5ybtOjiQgkUdvRJPUPGf5DVVMPnymGX25aDh5PYpIESPbsM9akCpOOVuscywcUswmU -o7dXvlB48WWCfg/al3BQKAZbn5ZXtWNwpUZkrEdHsrxAVv3rxRcdkT3Z1fzUbIuYkLJN200o -WgRIJvn6RO8KEj7/HOg2sYuuM8nz1kR0TSgwX7/0y/7JfjBa0JIlP7o75sNJscE8oyoIMzuy -Dvn6/U9g3BCDXn83A/s+ke60qn9gBFC6NAeLOlXal1YVWYhMQNOqCyUfAjiXBTawaysQb1Mk -YgeNlF8xuEFcUQWIP+vNG7FJ5JPMaMRL4YEoaQ3sVFhYOERJR1cSb+8xt4QCYtBKQgRIUOmJ -CHW5o1hXJWJiTkZK2qWFcEMzTINSj5EpYFySr8aVBjkRnI7vxegRT/+XZZXoYedQ3UNsnGI3 -DdkWii5VzX0PNF6C60pfBEiVpausYuX7Wjb3Lfm8cBj7GgN69i6Pm2gxtobVcmpo2nS4D714 -ePyhlX9n8kJ6QAcqWMRj22smDPrHVGNTizfzHBh5zNllK9gESJizILOWI327og3ZWp+qUht5 -kNDJCzMK7Z09UAy+h+vq0VTQuEo3FgLzVdqkJujjSL4Nx97lXg51AovrEn3nd4evydwcjKLX -1wRIo72NaeWuUEQ+rt1SlCsOJ7k1ioJSqhrPOfvwcaFcb4beVet1JWiy4yvowTjLDGbUje2s -xjrlVt4BJWI/uA6jbQsrxSe89ADZBAi5YAlR4qszeAQIXD3VSBVKbRUECNTtyvw9vvqXBAhb -IZNn4H4cxgQI+XW7GkfL+ekECCCCg2reMyGDBAh1PYqkg3lw3gQQkNlggEPU+BH8eh7Gm7n7 -7AQIjC5EWbkil5cEEKcpuqwTWww/X89KnQAg8TcECJPomqHvrlZFBBiRSuIiHpmN+PaujXpv -qZV2VhjkB2j09GEECOIdv8AVOJgKBAjlHgIqAD9jZQQIXHbs44+wogcEIGGqTACRJxrhMcMG -X8drNjksIPt+snxTXUBIkTVpZWoABAh6unXPTyIr8QQgBF8xKoX27MWk7iTNmkSNZggZXa2a -DWCGHSYLngbSOHIECD9XmO6VsvTgBAjfqB70CEW4WwQIVIBkbCocznUEEHB/zFXy/sR4OYHe -UfbNPnIEEDWBB/NTCLMGE+o8BfyujcAECFik7GQnnF9VBBAhLXExQeWAofZNc6NtN7qZBCC1 -gVIS3ruTwKltmcrgx3heT3M8ZJhCfWa+6KzchnmKygQQ+1NL5sSzR4m/fdrqxHFyUAQYCT2x -PamQr3wK3h0lyZER+4H0zPM86AhFBBC3CkmvL2vjflMfujnzPBVpBBge9rMbI5+0q9DLrTiT -5F3AIgXLpD8PQWAECHkHVo6RomV3BAgMbi8E271UeAQIqtS8wnI3XngECG3TWmOMb3/iBEha -y+mvCS6I3n3JfL8e1B5P4qX9/czJRaERLuKpGNjLiL4A+zxN0LZ0UHd0qfmJjwOTxAx3iJAC -lGXX4nB9ATYPUT5EU+o1Y4sECN01pP6vWNIdBDAsiE0Ts8/9ltJlqX2B3AoOM4qOt9EaCjXf -lB+aEmrhtjUwuZ6GqS5Ke7P6XnakTk4ECCLIMatNdootAAAAAAAAAAAAAA== ------END PKCS7----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/example.c nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/example.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/example.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/example.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,329 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <openssl/pkcs7.h> -#include <openssl/asn1_mac.h> -#include <openssl/x509.h> - -int add_signed_time(PKCS7_SIGNER_INFO *si) - { - ASN1_UTCTIME *sign_time; - - /* The last parameter is the amount to add/subtract from the current - * time (in seconds) */ - sign_time=X509_gmtime_adj(NULL,0); - PKCS7_add_signed_attribute(si,NID_pkcs9_signingTime, - V_ASN1_UTCTIME,(char *)sign_time); - return(1); - } - -ASN1_UTCTIME *get_signed_time(PKCS7_SIGNER_INFO *si) - { - ASN1_TYPE *so; - - so=PKCS7_get_signed_attribute(si,NID_pkcs9_signingTime); - if (so->type == V_ASN1_UTCTIME) - return so->value.utctime; - return NULL; - } - -static int signed_string_nid= -1; - -void add_signed_string(PKCS7_SIGNER_INFO *si, char *str) - { - ASN1_OCTET_STRING *os; - - /* To a an object of OID 1.2.3.4.5, which is an octet string */ - if (signed_string_nid == -1) - signed_string_nid= - OBJ_create("1.2.3.4.5","OID_example","Our example OID"); - os=ASN1_OCTET_STRING_new(); - ASN1_OCTET_STRING_set(os,(unsigned char*)str,strlen(str)); - /* When we add, we do not free */ - PKCS7_add_signed_attribute(si,signed_string_nid, - V_ASN1_OCTET_STRING,(char *)os); - } - -int get_signed_string(PKCS7_SIGNER_INFO *si, char *buf, int len) - { - ASN1_TYPE *so; - ASN1_OCTET_STRING *os; - int i; - - if (signed_string_nid == -1) - signed_string_nid= - OBJ_create("1.2.3.4.5","OID_example","Our example OID"); - /* To retrieve */ - so=PKCS7_get_signed_attribute(si,signed_string_nid); - if (so != NULL) - { - if (so->type == V_ASN1_OCTET_STRING) - { - os=so->value.octet_string; - i=os->length; - if ((i+1) > len) - i=len-1; - memcpy(buf,os->data,i); - return(i); - } - } - return(0); - } - -static int signed_seq2string_nid= -1; -/* ########################################### */ -int add_signed_seq2string(PKCS7_SIGNER_INFO *si, char *str1, char *str2) - { - /* To add an object of OID 1.9.999, which is a sequence containing - * 2 octet strings */ - unsigned char *p; - ASN1_OCTET_STRING *os1,*os2; - ASN1_STRING *seq; - unsigned char *data; - int i,total; - - if (signed_seq2string_nid == -1) - signed_seq2string_nid= - OBJ_create("1.9.9999","OID_example","Our example OID"); - - os1=ASN1_OCTET_STRING_new(); - os2=ASN1_OCTET_STRING_new(); - ASN1_OCTET_STRING_set(os1,(unsigned char*)str1,strlen(str1)); - ASN1_OCTET_STRING_set(os2,(unsigned char*)str1,strlen(str1)); - i =i2d_ASN1_OCTET_STRING(os1,NULL); - i+=i2d_ASN1_OCTET_STRING(os2,NULL); - total=ASN1_object_size(1,i,V_ASN1_SEQUENCE); - - data=malloc(total); - p=data; - ASN1_put_object(&p,1,i,V_ASN1_SEQUENCE,V_ASN1_UNIVERSAL); - i2d_ASN1_OCTET_STRING(os1,&p); - i2d_ASN1_OCTET_STRING(os2,&p); - - seq=ASN1_STRING_new(); - ASN1_STRING_set(seq,data,total); - free(data); - ASN1_OCTET_STRING_free(os1); - ASN1_OCTET_STRING_free(os2); - - PKCS7_add_signed_attribute(si,signed_seq2string_nid, - V_ASN1_SEQUENCE,(char *)seq); - return(1); - } - -/* For this case, I will malloc the return strings */ -int get_signed_seq2string(PKCS7_SIGNER_INFO *si, char **str1, char **str2) - { - ASN1_TYPE *so; - - if (signed_seq2string_nid == -1) - signed_seq2string_nid= - OBJ_create("1.9.9999","OID_example","Our example OID"); - /* To retrieve */ - so=PKCS7_get_signed_attribute(si,signed_seq2string_nid); - if (so && (so->type == V_ASN1_SEQUENCE)) - { - ASN1_const_CTX c; - ASN1_STRING *s; - long length; - ASN1_OCTET_STRING *os1,*os2; - - s=so->value.sequence; - c.p=ASN1_STRING_data(s); - c.max=c.p+ASN1_STRING_length(s); - if (!asn1_GetSequence(&c,&length)) goto err; - /* Length is the length of the seqence */ - - c.q=c.p; - if ((os1=d2i_ASN1_OCTET_STRING(NULL,&c.p,c.slen)) == NULL) - goto err; - c.slen-=(c.p-c.q); - - c.q=c.p; - if ((os2=d2i_ASN1_OCTET_STRING(NULL,&c.p,c.slen)) == NULL) - goto err; - c.slen-=(c.p-c.q); - - if (!asn1_const_Finish(&c)) goto err; - *str1=malloc(os1->length+1); - *str2=malloc(os2->length+1); - memcpy(*str1,os1->data,os1->length); - memcpy(*str2,os2->data,os2->length); - (*str1)[os1->length]='\0'; - (*str2)[os2->length]='\0'; - ASN1_OCTET_STRING_free(os1); - ASN1_OCTET_STRING_free(os2); - return(1); - } -err: - return(0); - } - - -/* ####################################### - * THE OTHER WAY TO DO THINGS - * ####################################### - */ -X509_ATTRIBUTE *create_time(void) - { - ASN1_UTCTIME *sign_time; - X509_ATTRIBUTE *ret; - - /* The last parameter is the amount to add/subtract from the current - * time (in seconds) */ - sign_time=X509_gmtime_adj(NULL,0); - ret=X509_ATTRIBUTE_create(NID_pkcs9_signingTime, - V_ASN1_UTCTIME,(char *)sign_time); - return(ret); - } - -ASN1_UTCTIME *sk_get_time(STACK_OF(X509_ATTRIBUTE) *sk) - { - ASN1_TYPE *so; - PKCS7_SIGNER_INFO si; - - si.auth_attr=sk; - so=PKCS7_get_signed_attribute(&si,NID_pkcs9_signingTime); - if (so->type == V_ASN1_UTCTIME) - return so->value.utctime; - return NULL; - } - -X509_ATTRIBUTE *create_string(char *str) - { - ASN1_OCTET_STRING *os; - X509_ATTRIBUTE *ret; - - /* To a an object of OID 1.2.3.4.5, which is an octet string */ - if (signed_string_nid == -1) - signed_string_nid= - OBJ_create("1.2.3.4.5","OID_example","Our example OID"); - os=ASN1_OCTET_STRING_new(); - ASN1_OCTET_STRING_set(os,(unsigned char*)str,strlen(str)); - /* When we add, we do not free */ - ret=X509_ATTRIBUTE_create(signed_string_nid, - V_ASN1_OCTET_STRING,(char *)os); - return(ret); - } - -int sk_get_string(STACK_OF(X509_ATTRIBUTE) *sk, char *buf, int len) - { - ASN1_TYPE *so; - ASN1_OCTET_STRING *os; - int i; - PKCS7_SIGNER_INFO si; - - si.auth_attr=sk; - - if (signed_string_nid == -1) - signed_string_nid= - OBJ_create("1.2.3.4.5","OID_example","Our example OID"); - /* To retrieve */ - so=PKCS7_get_signed_attribute(&si,signed_string_nid); - if (so != NULL) - { - if (so->type == V_ASN1_OCTET_STRING) - { - os=so->value.octet_string; - i=os->length; - if ((i+1) > len) - i=len-1; - memcpy(buf,os->data,i); - return(i); - } - } - return(0); - } - -X509_ATTRIBUTE *add_seq2string(PKCS7_SIGNER_INFO *si, char *str1, char *str2) - { - /* To add an object of OID 1.9.999, which is a sequence containing - * 2 octet strings */ - unsigned char *p; - ASN1_OCTET_STRING *os1,*os2; - ASN1_STRING *seq; - X509_ATTRIBUTE *ret; - unsigned char *data; - int i,total; - - if (signed_seq2string_nid == -1) - signed_seq2string_nid= - OBJ_create("1.9.9999","OID_example","Our example OID"); - - os1=ASN1_OCTET_STRING_new(); - os2=ASN1_OCTET_STRING_new(); - ASN1_OCTET_STRING_set(os1,(unsigned char*)str1,strlen(str1)); - ASN1_OCTET_STRING_set(os2,(unsigned char*)str1,strlen(str1)); - i =i2d_ASN1_OCTET_STRING(os1,NULL); - i+=i2d_ASN1_OCTET_STRING(os2,NULL); - total=ASN1_object_size(1,i,V_ASN1_SEQUENCE); - - data=malloc(total); - p=data; - ASN1_put_object(&p,1,i,V_ASN1_SEQUENCE,V_ASN1_UNIVERSAL); - i2d_ASN1_OCTET_STRING(os1,&p); - i2d_ASN1_OCTET_STRING(os2,&p); - - seq=ASN1_STRING_new(); - ASN1_STRING_set(seq,data,total); - free(data); - ASN1_OCTET_STRING_free(os1); - ASN1_OCTET_STRING_free(os2); - - ret=X509_ATTRIBUTE_create(signed_seq2string_nid, - V_ASN1_SEQUENCE,(char *)seq); - return(ret); - } - -/* For this case, I will malloc the return strings */ -int sk_get_seq2string(STACK_OF(X509_ATTRIBUTE) *sk, char **str1, char **str2) - { - ASN1_TYPE *so; - PKCS7_SIGNER_INFO si; - - if (signed_seq2string_nid == -1) - signed_seq2string_nid= - OBJ_create("1.9.9999","OID_example","Our example OID"); - - si.auth_attr=sk; - /* To retrieve */ - so=PKCS7_get_signed_attribute(&si,signed_seq2string_nid); - if (so->type == V_ASN1_SEQUENCE) - { - ASN1_const_CTX c; - ASN1_STRING *s; - long length; - ASN1_OCTET_STRING *os1,*os2; - - s=so->value.sequence; - c.p=ASN1_STRING_data(s); - c.max=c.p+ASN1_STRING_length(s); - if (!asn1_GetSequence(&c,&length)) goto err; - /* Length is the length of the seqence */ - - c.q=c.p; - if ((os1=d2i_ASN1_OCTET_STRING(NULL,&c.p,c.slen)) == NULL) - goto err; - c.slen-=(c.p-c.q); - - c.q=c.p; - if ((os2=d2i_ASN1_OCTET_STRING(NULL,&c.p,c.slen)) == NULL) - goto err; - c.slen-=(c.p-c.q); - - if (!asn1_const_Finish(&c)) goto err; - *str1=malloc(os1->length+1); - *str2=malloc(os2->length+1); - memcpy(*str1,os1->data,os1->length); - memcpy(*str2,os2->data,os2->length); - (*str1)[os1->length]='\0'; - (*str2)[os2->length]='\0'; - ASN1_OCTET_STRING_free(os1); - ASN1_OCTET_STRING_free(os2); - return(1); - } -err: - return(0); - } - - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/example.h nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/example.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/example.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/example.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -/* ==================================================================== - * Copyright (c) 1999 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ - -int add_signed_time(PKCS7_SIGNER_INFO *si); -ASN1_UTCTIME *get_signed_time(PKCS7_SIGNER_INFO *si); -int get_signed_seq2string(PKCS7_SIGNER_INFO *si, char **str1, char **str2); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/infokey.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/infokey.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/infokey.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/infokey.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIBOgIBAAJBAK3nI4nuDYe3nDJES5WBc90igEstxWC4/h4YY+/ciYki35U8ets9 -mgaoCNYp/e9BCZHtvK2Y+fYokGJv5+cMTQsCAwEAAQJBAIHpvXvqEcOEoDRRHuIG -fkcB4jPHcr9KE9TpxabH6xs9beN6OJnkePXAHwaz5MnUgSnbpOKq+cw8miKjXwe/ -zVECIQDVLwncT2lRmXarEYHzb+q/0uaSvKhWKKt3kJasLNTrAwIhANDUc/ghut29 -p3jJYjurzUKuG774/5eLjPLsxPPIZzNZAiA/10hSq41UnGqHLEUIS9m2/EeEZe7b -bm567dfRU9OnVQIgDo8ROrZXSchEGbaog5J5r/Fle83uO8l93R3GqVxKXZkCIFfk -IPD5PIYQAyyod3hyKKza7ZP4CGY4oOfZetbkSGGG ------END RSA PRIVATE KEY----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/info.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/info.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/info.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/info.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -issuer :/C=AU/SP=Queensland/L=Brisbane/O=Cryptsoft Pty Ltd/OU=DEMONSTRATION AND TESTING/CN=DEMO ZERO VALUE CA -subject:/C=AU/SP=Queensland/L=Brisbane/O=Cryptsoft Pty Ltd/OU=SMIME 003/CN=Information/Email=info@cryptsoft.com -serial :047D - -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1149 (0x47d) - Signature Algorithm: md5withRSAEncryption - Issuer: C=AU, SP=Queensland, L=Brisbane, O=Cryptsoft Pty Ltd, OU=DEMONSTRATION AND TESTING, CN=DEMO ZERO VALUE CA - Validity - Not Before: May 13 05:40:58 1998 GMT - Not After : May 12 05:40:58 2000 GMT - Subject: C=AU, SP=Queensland, L=Brisbane, O=Cryptsoft Pty Ltd, OU=SMIME 003, CN=Information/Email=info@cryptsoft.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Modulus: - 00:ad:e7:23:89:ee:0d:87:b7:9c:32:44:4b:95:81: - 73:dd:22:80:4b:2d:c5:60:b8:fe:1e:18:63:ef:dc: - 89:89:22:df:95:3c:7a:db:3d:9a:06:a8:08:d6:29: - fd:ef:41:09:91:ed:bc:ad:98:f9:f6:28:90:62:6f: - e7:e7:0c:4d:0b - Exponent: 65537 (0x10001) - X509v3 extensions: - Netscape Comment: - Generated with SSLeay - Signature Algorithm: md5withRSAEncryption - 52:15:ea:88:f4:f0:f9:0b:ef:ce:d5:f8:83:40:61:16:5e:55: - f9:ce:2d:d1:8b:31:5c:03:c6:2d:10:7c:61:d5:5c:0a:42:97: - d1:fd:65:b6:b6:84:a5:39:ec:46:ec:fc:e0:0d:d9:22:da:1b: - 50:74:ad:92:cb:4e:90:e5:fa:7d - ------BEGIN CERTIFICATE----- -MIICTDCCAfagAwIBAgICBH0wDQYJKoZIhvcNAQEEBQAwgZIxCzAJBgNVBAYTAkFV -MRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQHEwhCcmlzYmFuZTEaMBgGA1UE -ChMRQ3J5cHRzb2Z0IFB0eSBMdGQxIjAgBgNVBAsTGURFTU9OU1RSQVRJT04gQU5E -IFRFU1RJTkcxGzAZBgNVBAMTEkRFTU8gWkVSTyBWQUxVRSBDQTAeFw05ODA1MTMw -NTQwNThaFw0wMDA1MTIwNTQwNThaMIGeMQswCQYDVQQGEwJBVTETMBEGA1UECBMK -UXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMRIwEAYDVQQLEwlTTUlNRSAwMDMxFDASBgNVBAMTC0luZm9ybWF0 -aW9uMSEwHwYJKoZIhvcNAQkBFhJpbmZvQGNyeXB0c29mdC5jb20wXDANBgkqhkiG -9w0BAQEFAANLADBIAkEArecjie4Nh7ecMkRLlYFz3SKASy3FYLj+Hhhj79yJiSLf -lTx62z2aBqgI1in970EJke28rZj59iiQYm/n5wxNCwIDAQABoygwJjAkBglghkgB -hvhCAQ0EFxYVR2VuZXJhdGVkIHdpdGggU1NMZWF5MA0GCSqGSIb3DQEBBAUAA0EA -UhXqiPTw+QvvztX4g0BhFl5V+c4t0YsxXAPGLRB8YdVcCkKX0f1ltraEpTnsRuz8 -4A3ZItobUHStkstOkOX6fQ== ------END CERTIFICATE----- - ------BEGIN RSA PRIVATE KEY----- -MIIBOgIBAAJBAK3nI4nuDYe3nDJES5WBc90igEstxWC4/h4YY+/ciYki35U8ets9 -mgaoCNYp/e9BCZHtvK2Y+fYokGJv5+cMTQsCAwEAAQJBAIHpvXvqEcOEoDRRHuIG -fkcB4jPHcr9KE9TpxabH6xs9beN6OJnkePXAHwaz5MnUgSnbpOKq+cw8miKjXwe/ -zVECIQDVLwncT2lRmXarEYHzb+q/0uaSvKhWKKt3kJasLNTrAwIhANDUc/ghut29 -p3jJYjurzUKuG774/5eLjPLsxPPIZzNZAiA/10hSq41UnGqHLEUIS9m2/EeEZe7b -bm567dfRU9OnVQIgDo8ROrZXSchEGbaog5J5r/Fle83uO8l93R3GqVxKXZkCIFfk -IPD5PIYQAyyod3hyKKza7ZP4CGY4oOfZetbkSGGG ------END RSA PRIVATE KEY----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/Makefile nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/Makefile --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -39,20 +39,6 @@ all: lib -testapps: enc dec sign verify - -enc: enc.o lib - $(CC) $(CFLAGS) -o enc enc.o $(PEX_LIBS) $(LIB) $(EX_LIBS) - -dec: dec.o lib - $(CC) $(CFLAGS) -o dec dec.o $(PEX_LIBS) $(LIB) $(EX_LIBS) - -sign: sign.o lib - $(CC) $(CFLAGS) -o sign sign.o $(PEX_LIBS) $(LIB) $(EX_LIBS) - -verify: verify.o example.o lib - $(CC) $(CFLAGS) -o verify verify.o $(PEX_LIBS) example.o $(LIB) $(EX_LIBS) - lib: $(LIBOBJ) $(AR) $(LIB) $(LIBOBJ) $(RANLIB) $(LIB) || echo Never mind. diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,194 +0,0 @@ -# -# OpenSSL/crypto/pkcs7/Makefile -# - -DIR= pkcs7 -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -PEX_LIBS= -EX_LIBS= - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile README -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= pk7_asn1.c pk7_lib.c pkcs7err.c pk7_doit.c pk7_smime.c pk7_attr.c \ - pk7_mime.c bio_pk7.c -LIBOBJ= pk7_asn1.o pk7_lib.o pkcs7err.o pk7_doit.o pk7_smime.o pk7_attr.o \ - pk7_mime.o bio_pk7.o - -SRC= $(LIBSRC) - -EXHEADER= pkcs7.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -test: - -all: lib - -testapps: enc dec sign verify - -enc: enc.o lib - $(CC) $(CFLAGS) -o enc enc.o $(PEX_LIBS) $(LIB) $(EX_LIBS) - -dec: dec.o lib - $(CC) $(CFLAGS) -o dec dec.o $(PEX_LIBS) $(LIB) $(EX_LIBS) - -sign: sign.o lib - $(CC) $(CFLAGS) -o sign sign.o $(PEX_LIBS) $(LIB) $(EX_LIBS) - -verify: verify.o example.o lib - $(CC) $(CFLAGS) -o verify verify.o $(PEX_LIBS) example.o $(LIB) $(EX_LIBS) - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff enc dec sign verify - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -bio_pk7.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -bio_pk7.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -bio_pk7.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -bio_pk7.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -bio_pk7.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -bio_pk7.o: ../../include/openssl/symhacks.h bio_pk7.c -pk7_asn1.o: ../../e_os.h ../../include/openssl/asn1.h -pk7_asn1.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -pk7_asn1.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -pk7_asn1.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pk7_asn1.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pk7_asn1.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pk7_asn1.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pk7_asn1.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pk7_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pk7_asn1.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pk7_asn1.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pk7_asn1.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pk7_asn1.o: ../../include/openssl/x509_vfy.h ../cryptlib.h pk7_asn1.c -pk7_attr.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -pk7_attr.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pk7_attr.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pk7_attr.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pk7_attr.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -pk7_attr.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pk7_attr.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pk7_attr.o: ../../include/openssl/opensslconf.h -pk7_attr.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pk7_attr.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -pk7_attr.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pk7_attr.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pk7_attr.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pk7_attr.o: ../../include/openssl/x509_vfy.h pk7_attr.c -pk7_doit.o: ../../e_os.h ../../include/openssl/asn1.h -pk7_doit.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pk7_doit.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -pk7_doit.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pk7_doit.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pk7_doit.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pk7_doit.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pk7_doit.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pk7_doit.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pk7_doit.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -pk7_doit.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -pk7_doit.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -pk7_doit.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -pk7_doit.o: ../../include/openssl/x509v3.h ../cryptlib.h pk7_doit.c -pk7_lib.o: ../../e_os.h ../../include/openssl/asn1.h -pk7_lib.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pk7_lib.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pk7_lib.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pk7_lib.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -pk7_lib.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pk7_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pk7_lib.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -pk7_lib.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -pk7_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -pk7_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -pk7_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -pk7_lib.o: ../asn1/asn1_locl.h ../cryptlib.h pk7_lib.c -pk7_mime.o: ../../e_os.h ../../include/openssl/asn1.h -pk7_mime.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pk7_mime.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pk7_mime.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pk7_mime.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -pk7_mime.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -pk7_mime.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -pk7_mime.o: ../../include/openssl/opensslconf.h -pk7_mime.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pk7_mime.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -pk7_mime.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -pk7_mime.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -pk7_mime.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -pk7_mime.o: ../cryptlib.h pk7_mime.c -pk7_smime.o: ../../e_os.h ../../include/openssl/asn1.h -pk7_smime.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pk7_smime.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -pk7_smime.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pk7_smime.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pk7_smime.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pk7_smime.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pk7_smime.o: ../../include/openssl/objects.h -pk7_smime.o: ../../include/openssl/opensslconf.h -pk7_smime.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pk7_smime.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pk7_smime.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pk7_smime.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pk7_smime.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -pk7_smime.o: ../cryptlib.h pk7_smime.c -pkcs7err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -pkcs7err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pkcs7err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -pkcs7err.o: ../../include/openssl/opensslconf.h -pkcs7err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pkcs7err.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pkcs7err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -pkcs7err.o: pkcs7err.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/p7/a1 nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/p7/a1 --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/p7/a1 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/p7/a1 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -j,H>__DzEL VJ觬E3Yx%_k -3)DLSc8% M \ No newline at end of file diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/p7/a2 nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/p7/a2 --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/p7/a2 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/p7/a2 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -k~@a,NM͹ <O( KP騠K>Uo_Bqrm?٠t?tρId2 \ No newline at end of file Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/p7/cert.p7c and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/p7/cert.p7c differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/p7/smime.p7m and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/p7/smime.p7m differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/p7/smime.p7s and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/p7/smime.p7s differ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/pk7_doit.c nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/pk7_doit.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/pk7_doit.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/pk7_doit.c 2015-01-20 21:22:17.000000000 +0000 @@ -440,6 +440,11 @@ { case NID_pkcs7_signed: data_body=PKCS7_get_octet_string(p7->d.sign->contents); + if (!PKCS7_is_detached(p7) && data_body == NULL) + { + PKCS7err(PKCS7_F_PKCS7_DATADECODE,PKCS7_R_INVALID_SIGNED_DATA_TYPE); + goto err; + } md_sk=p7->d.sign->md_algs; break; case NID_pkcs7_signedAndEnveloped: @@ -928,6 +933,7 @@ if (EVP_DigestSignUpdate(&mctx,abuf,alen) <= 0) goto err; OPENSSL_free(abuf); + abuf = NULL; if (EVP_DigestSignFinal(&mctx, NULL, &siglen) <= 0) goto err; abuf = OPENSSL_malloc(siglen); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/pkcs7err.c nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/pkcs7err.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/pkcs7err.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/pkcs7err.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,6 +1,6 @@ /* crypto/pkcs7/pkcs7err.c */ /* ==================================================================== - * Copyright (c) 1999-2007 The OpenSSL Project. All rights reserved. + * Copyright (c) 1999-2014 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -130,6 +130,7 @@ {ERR_REASON(PKCS7_R_ERROR_SETTING_CIPHER),"error setting cipher"}, {ERR_REASON(PKCS7_R_INVALID_MIME_TYPE) ,"invalid mime type"}, {ERR_REASON(PKCS7_R_INVALID_NULL_POINTER),"invalid null pointer"}, +{ERR_REASON(PKCS7_R_INVALID_SIGNED_DATA_TYPE),"invalid signed data type"}, {ERR_REASON(PKCS7_R_MIME_NO_CONTENT_TYPE),"mime no content type"}, {ERR_REASON(PKCS7_R_MIME_PARSE_ERROR) ,"mime parse error"}, {ERR_REASON(PKCS7_R_MIME_SIG_PARSE_ERROR),"mime sig parse error"}, diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/pkcs7.h nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/pkcs7.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/pkcs7.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/pkcs7.h 2015-01-20 21:22:17.000000000 +0000 @@ -233,10 +233,6 @@ (OBJ_obj2nid((a)->type) == NID_pkcs7_signedAndEnveloped) #define PKCS7_type_is_data(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_data) #define PKCS7_type_is_digest(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_digest) -#define PKCS7_type_is_encrypted(a) \ - (OBJ_obj2nid((a)->type) == NID_pkcs7_encrypted) - -#define PKCS7_type_is_digest(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_digest) #define PKCS7_set_detached(p,v) \ PKCS7_ctrl(p,PKCS7_OP_SET_DETACHED_SIGNATURE,v,NULL) @@ -453,6 +449,7 @@ #define PKCS7_R_ERROR_SETTING_CIPHER 121 #define PKCS7_R_INVALID_MIME_TYPE 131 #define PKCS7_R_INVALID_NULL_POINTER 143 +#define PKCS7_R_INVALID_SIGNED_DATA_TYPE 155 #define PKCS7_R_MIME_NO_CONTENT_TYPE 132 #define PKCS7_R_MIME_PARSE_ERROR 133 #define PKCS7_R_MIME_SIG_PARSE_ERROR 134 diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/server.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/server.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/server.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/server.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -issuer= /C=AU/ST=Queensland/O=CryptSoft Pty Ltd/CN=Test CA (1024 bit) -subject=/C=AU/ST=Queensland/O=CryptSoft Pty Ltd/CN=Server test cert (512 bit) ------BEGIN CERTIFICATE----- -MIIB6TCCAVICAQAwDQYJKoZIhvcNAQEEBQAwWzELMAkGA1UEBhMCQVUxEzARBgNV -BAgTClF1ZWVuc2xhbmQxGjAYBgNVBAoTEUNyeXB0U29mdCBQdHkgTHRkMRswGQYD -VQQDExJUZXN0IENBICgxMDI0IGJpdCkwHhcNOTcwNjA5MTM1NzQ2WhcNOTgwNjA5 -MTM1NzQ2WjBjMQswCQYDVQQGEwJBVTETMBEGA1UECBMKUXVlZW5zbGFuZDEaMBgG -A1UEChMRQ3J5cHRTb2Z0IFB0eSBMdGQxIzAhBgNVBAMTGlNlcnZlciB0ZXN0IGNl -cnQgKDUxMiBiaXQpMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAJ+zw4Qnlf8SMVIP -Fe9GEcStgOY2Ww/dgNdhjeD8ckUJNP5VZkVDTGiXav6ooKXfX3j/7tdkuD8Ey2// -Kv7+ue0CAwEAATANBgkqhkiG9w0BAQQFAAOBgQB4TMR2CvacKE9wAsu9jyCX8YiW -mgCM+YoP6kt4Zkj2z5IRfm7WrycKsnpnOR+tGeqAjkCeZ6/36o9l91RvPnN1VJ/i -xQv2df0KFeMr00IkDdTNAdIWqFkSsZTAY2QAdgenb7MB1joejquYzO2DQIO7+wpH -irObpESxAZLySCmPPg== ------END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- -MIIBPAIBAAJBAJ+zw4Qnlf8SMVIPFe9GEcStgOY2Ww/dgNdhjeD8ckUJNP5VZkVD -TGiXav6ooKXfX3j/7tdkuD8Ey2//Kv7+ue0CAwEAAQJAN6W31vDEP2DjdqhzCDDu -OA4NACqoiFqyblo7yc2tM4h4xMbC3Yx5UKMN9ZkCtX0gzrz6DyF47bdKcWBzNWCj -gQIhANEoojVt7hq+SQ6MCN6FTAysGgQf56Q3TYoJMoWvdiXVAiEAw3e3rc+VJpOz -rHuDo6bgpjUAAXM+v3fcpsfZSNO6V7kCIQCtbVjanpUwvZkMI9by02oUk9taki3b -PzPfAfNPYAbCJQIhAJXNQDWyqwn/lGmR11cqY2y9nZ1+5w3yHGatLrcDnQHxAiEA -vnlEGo8K85u+KwIOimM48ZG8oTk7iFdkqLJR1utT3aU= ------END RSA PRIVATE KEY----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/sign.c nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/sign.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/sign.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/sign.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,154 +0,0 @@ -/* crypto/pkcs7/sign.c */ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ -#include <stdio.h> -#include <string.h> -#include <openssl/bio.h> -#include <openssl/x509.h> -#include <openssl/pem.h> -#include <openssl/err.h> - -int main(argc,argv) -int argc; -char *argv[]; - { - X509 *x509; - EVP_PKEY *pkey; - PKCS7 *p7; - PKCS7_SIGNER_INFO *si; - BIO *in; - BIO *data,*p7bio; - char buf[1024*4]; - int i; - int nodetach=0; - -#ifndef OPENSSL_NO_MD2 - EVP_add_digest(EVP_md2()); -#endif -#ifndef OPENSSL_NO_MD5 - EVP_add_digest(EVP_md5()); -#endif -#ifndef OPENSSL_NO_SHA1 - EVP_add_digest(EVP_sha1()); -#endif -#ifndef OPENSSL_NO_MDC2 - EVP_add_digest(EVP_mdc2()); -#endif - - data=BIO_new(BIO_s_file()); -again: - if (argc > 1) - { - if (strcmp(argv[1],"-nd") == 0) - { - nodetach=1; - argv++; argc--; - goto again; - } - if (!BIO_read_filename(data,argv[1])) - goto err; - } - else - BIO_set_fp(data,stdin,BIO_NOCLOSE); - - if ((in=BIO_new_file("server.pem","r")) == NULL) goto err; - if ((x509=PEM_read_bio_X509(in,NULL,NULL,NULL)) == NULL) goto err; - BIO_reset(in); - if ((pkey=PEM_read_bio_PrivateKey(in,NULL,NULL,NULL)) == NULL) goto err; - BIO_free(in); - - p7=PKCS7_new(); - PKCS7_set_type(p7,NID_pkcs7_signed); - - si=PKCS7_add_signature(p7,x509,pkey,EVP_sha1()); - if (si == NULL) goto err; - - /* If you do this then you get signing time automatically added */ - PKCS7_add_signed_attribute(si, NID_pkcs9_contentType, V_ASN1_OBJECT, - OBJ_nid2obj(NID_pkcs7_data)); - - /* we may want to add more */ - PKCS7_add_certificate(p7,x509); - - /* Set the content of the signed to 'data' */ - PKCS7_content_new(p7,NID_pkcs7_data); - - if (!nodetach) - PKCS7_set_detached(p7,1); - - if ((p7bio=PKCS7_dataInit(p7,NULL)) == NULL) goto err; - - for (;;) - { - i=BIO_read(data,buf,sizeof(buf)); - if (i <= 0) break; - BIO_write(p7bio,buf,i); - } - - if (!PKCS7_dataFinal(p7,p7bio)) goto err; - BIO_free(p7bio); - - PEM_write_PKCS7(stdout,p7); - PKCS7_free(p7); - - exit(0); -err: - ERR_load_crypto_strings(); - ERR_print_errors_fp(stderr); - exit(1); - } - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/3des.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/3des.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/3des.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/3des.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHA6CAMIACAQAxggHmMIHwAgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEG -A1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNUUkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQD -ExJERU1PIFpFUk8gVkFMVUUgQ0ECAgR+MA0GCSqGSIb3DQEBAQUABEC2vXI1xQDW6lUHM3zQ -/9uBEBOO5A3TtkrklAXq7v01gsIC21t52qSk36REXY+slhNZ0OQ349tgkTsoETHFLoEwMIHw -AgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEGA1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMI -QnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29mdCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNU -UkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQDExJERU1PIFpFUk8gVkFMVUUgQ0ECAgR9MA0G -CSqGSIb3DQEBAQUABEB8ujxbabxXUYJhopuDm3oDq4JNqX6Io4p3ro+ShqfIndsXTZ1v5a2N -WtLLCWlHn/habjBwZ/DgQgcKASbZ7QxNMIAGCSqGSIb3DQEHATAaBggqhkiG9w0DAjAOAgIA -oAQIbsL5v1wX98KggAQoAaJ4WHm68fXY1WE5OIjfVBIDpO1K+i8dmKhjnAjrjoyZ9Bwc8rDL -lgQg4CXb805h5xl+GfvSwUaHJayte1m2mcOhs3J2YyqbQ+MEIMIiJQccmhO3oDKm36CFvYR8 -5PjpclVcZyX2ngbwPFMnBAgy0clOAE6UKAAAAAAAAAAAAAA= ------END PKCS7----- - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/3dess.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/3dess.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/3dess.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/3dess.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ ------BEGIN PKCS7----- -MIIGHgYJKoZIhvcNAQcCoIIGDzCCBgsCAQExCzAJBgUrDgMCGgUAMAsGCSqGSIb3DQEHAaCC -BGswggJTMIIB/aADAgECAgIEfjANBgkqhkiG9w0BAQQFADCBkjELMAkGA1UEBhMCQVUxEzAR -BgNVBAgTClF1ZWVuc2xhbmQxETAPBgNVBAcTCEJyaXNiYW5lMRowGAYDVQQKExFDcnlwdHNv -ZnQgUHR5IEx0ZDEiMCAGA1UECxMZREVNT05TVFJBVElPTiBBTkQgVEVTVElORzEbMBkGA1UE -AxMSREVNTyBaRVJPIFZBTFVFIENBMB4XDTk4MDUxMzA2MjY1NloXDTAwMDUxMjA2MjY1Nlow -gaUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQHEwhCcmlzYmFu -ZTEaMBgGA1UEChMRQ3J5cHRzb2Z0IFB0eSBMdGQxEjAQBgNVBAsTCVNNSU1FIDAwMzEZMBcG -A1UEAxMQQW5nZWxhIHZhbiBMZWVudDEjMCEGCSqGSIb3DQEJARYUYW5nZWxhQGNyeXB0c29m -dC5jb20wXDANBgkqhkiG9w0BAQEFAANLADBIAkEAuC3+7dAb2LhuO7gt2cTM8vsNjhG5JfDh -hX1Vl/wVGbKEEj0MA6vWEolvefQlxB+EzwCtR0YZ7eEC/T/4JoCyeQIDAQABoygwJjAkBglg -hkgBhvhCAQ0EFxYVR2VuZXJhdGVkIHdpdGggU1NMZWF5MA0GCSqGSIb3DQEBBAUAA0EAUnSP -igs6TMFISTjw8cBtJYb98czgAVkVFjKyJQwYMH8FbDnCyx6NocM555nsyDstaw8fKR11Khds -syd3ikkrhDCCAhAwggG6AgEDMA0GCSqGSIb3DQEBBAUAMIGSMQswCQYDVQQGEwJBVTETMBEG -A1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNUUkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQD -ExJERU1PIFpFUk8gVkFMVUUgQ0EwHhcNOTgwMzAzMDc0MTMyWhcNMDgwMjI5MDc0MTMyWjCB -kjELMAkGA1UEBhMCQVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxETAPBgNVBAcTCEJyaXNiYW5l -MRowGAYDVQQKExFDcnlwdHNvZnQgUHR5IEx0ZDEiMCAGA1UECxMZREVNT05TVFJBVElPTiBB -TkQgVEVTVElORzEbMBkGA1UEAxMSREVNTyBaRVJPIFZBTFVFIENBMFwwDQYJKoZIhvcNAQEB -BQADSwAwSAJBAL+0E2fLej3FSCwe2A2iRnMuC3z12qHIp6Ky1wo2zZcxft7AI+RfkrWrSGtf -mfzBEuPrLdfulncC5Y1pNcM8RTUCAwEAATANBgkqhkiG9w0BAQQFAANBAGSbLMphL6F5pp3s -8o0Xyh86FHFdpVOwYx09ELLkuG17V/P9pgIc0Eo/gDMbN+KT3IdgECf8S//pCRA6RrNjcXIx -ggF7MIIBdwIBATCBmTCBkjELMAkGA1UEBhMCQVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxETAP -BgNVBAcTCEJyaXNiYW5lMRowGAYDVQQKExFDcnlwdHNvZnQgUHR5IEx0ZDEiMCAGA1UECxMZ -REVNT05TVFJBVElPTiBBTkQgVEVTVElORzEbMBkGA1UEAxMSREVNTyBaRVJPIFZBTFVFIENB -AgIEfjAJBgUrDgMCGgUAoHowGAYJKoZIhvcNAQkDMQsGCSqGSIb3DQEHATAbBgkqhkiG9w0B -CQ8xDjAMMAoGCCqGSIb3DQMHMBwGCSqGSIb3DQEJBTEPFw05ODA1MTQwMzM5MzdaMCMGCSqG -SIb3DQEJBDEWBBQstNMnSV26ba8PapQEDhO21yNFrjANBgkqhkiG9w0BAQEFAARAW9Xb9YXv -BfcNkutgFX9Gr8iXhBVsNtGEVrjrpkQwpKa7jHI8SjAlLhk/4RFwDHf+ISB9Np3Z1WDWnLcA -9CWR6g== ------END PKCS7----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/c.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/c.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/c.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/c.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -issuer :/C=AU/SP=Queensland/L=Brisbane/O=Cryptsoft Pty Ltd/OU=DEMONSTRATION AND TESTING/CN=DEMO ZERO VALUE CA -subject:/C=AU/SP=Queensland/L=Brisbane/O=Cryptsoft Pty Ltd/OU=SMIME 003/CN=Information/Email=info@cryptsoft.com -serial :047D - -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1149 (0x47d) - Signature Algorithm: md5withRSAEncryption - Issuer: C=AU, SP=Queensland, L=Brisbane, O=Cryptsoft Pty Ltd, OU=DEMONSTRATION AND TESTING, CN=DEMO ZERO VALUE CA - Validity - Not Before: May 13 05:40:58 1998 GMT - Not After : May 12 05:40:58 2000 GMT - Subject: C=AU, SP=Queensland, L=Brisbane, O=Cryptsoft Pty Ltd, OU=SMIME 003, CN=Information/Email=info@cryptsoft.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Modulus: - 00:ad:e7:23:89:ee:0d:87:b7:9c:32:44:4b:95:81: - 73:dd:22:80:4b:2d:c5:60:b8:fe:1e:18:63:ef:dc: - 89:89:22:df:95:3c:7a:db:3d:9a:06:a8:08:d6:29: - fd:ef:41:09:91:ed:bc:ad:98:f9:f6:28:90:62:6f: - e7:e7:0c:4d:0b - Exponent: 65537 (0x10001) - X509v3 extensions: - Netscape Comment: - Generated with SSLeay - Signature Algorithm: md5withRSAEncryption - 52:15:ea:88:f4:f0:f9:0b:ef:ce:d5:f8:83:40:61:16:5e:55: - f9:ce:2d:d1:8b:31:5c:03:c6:2d:10:7c:61:d5:5c:0a:42:97: - d1:fd:65:b6:b6:84:a5:39:ec:46:ec:fc:e0:0d:d9:22:da:1b: - 50:74:ad:92:cb:4e:90:e5:fa:7d - ------BEGIN CERTIFICATE----- -MIICTDCCAfagAwIBAgICBH0wDQYJKoZIhvcNAQEEBQAwgZIxCzAJBgNVBAYTAkFV -MRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQHEwhCcmlzYmFuZTEaMBgGA1UE -ChMRQ3J5cHRzb2Z0IFB0eSBMdGQxIjAgBgNVBAsTGURFTU9OU1RSQVRJT04gQU5E -IFRFU1RJTkcxGzAZBgNVBAMTEkRFTU8gWkVSTyBWQUxVRSBDQTAeFw05ODA1MTMw -NTQwNThaFw0wMDA1MTIwNTQwNThaMIGeMQswCQYDVQQGEwJBVTETMBEGA1UECBMK -UXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMRIwEAYDVQQLEwlTTUlNRSAwMDMxFDASBgNVBAMTC0luZm9ybWF0 -aW9uMSEwHwYJKoZIhvcNAQkBFhJpbmZvQGNyeXB0c29mdC5jb20wXDANBgkqhkiG -9w0BAQEFAANLADBIAkEArecjie4Nh7ecMkRLlYFz3SKASy3FYLj+Hhhj79yJiSLf -lTx62z2aBqgI1in970EJke28rZj59iiQYm/n5wxNCwIDAQABoygwJjAkBglghkgB -hvhCAQ0EFxYVR2VuZXJhdGVkIHdpdGggU1NMZWF5MA0GCSqGSIb3DQEBBAUAA0EA -UhXqiPTw+QvvztX4g0BhFl5V+c4t0YsxXAPGLRB8YdVcCkKX0f1ltraEpTnsRuz8 -4A3ZItobUHStkstOkOX6fQ== ------END CERTIFICATE----- - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/ff nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/ff --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/ff 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/ff 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ ------BEGIN PKCS7----- -MIIGHgYJKoZIhvcNAQcCoIIGDzCCBgsCAQExCzAJBgUrDgMCGgUAMAsGCSqGSIb3DQEHAaCC -BGswggJTMIIB/aADAgECAgIEfjANBgkqhkiG9w0BAQQFADCBkjELMAkGA1UEBhMCQVUxEzAR -BgNVBAgTClF1ZWVuc2xhbmQxETAPBgNVBAcTCEJyaXNiYW5lMRowGAYDVQQKExFDcnlwdHNv -ZnQgUHR5IEx0ZDEiMCAGA1UECxMZREVNT05TVFJBVElPTiBBTkQgVEVTVElORzEbMBkGA1UE -AxMSREVNTyBaRVJPIFZBTFVFIENBMB4XDTk4MDUxMzA2MjY1NloXDTAwMDUxMjA2MjY1Nlow -gaUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQHEwhCcmlzYmFu -ZTEaMBgGA1UEChMRQ3J5cHRzb2Z0IFB0eSBMdGQxEjAQBgNVBAsTCVNNSU1FIDAwMzEZMBcG -A1UEAxMQQW5nZWxhIHZhbiBMZWVudDEjMCEGCSqGSIb3DQEJARYUYW5nZWxhQGNyeXB0c29m -dC5jb20wXDANBgkqhkiG9w0BAQEFAANLADBIAkEAuC3+7dAb2LhuO7gt2cTM8vsNjhG5JfDh -hX1Vl/wVGbKEEj0MA6vWEolvefQlxB+EzwCtR0YZ7eEC/T/4JoCyeQIDAQABoygwJjAkBglg -hkgBhvhCAQ0EFxYVR2VuZXJhdGVkIHdpdGggU1NMZWF5MA0GCSqGSIb3DQEBBAUAA0EAUnSP -igs6TMFISTjw8cBtJYb98czgAVkVFjKyJQwYMH8FbDnCyx6NocM555nsyDstaw8fKR11Khds -syd3ikkrhDCCAhAwggG6AgEDMA0GCSqGSIb3DQEBBAUAMIGSMQswCQYDVQQGEwJBVTETMBEG -A1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNUUkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQD -ExJERU1PIFpFUk8gVkFMVUUgQ0EwHhcNOTgwMzAzMDc0MTMyWhcNMDgwMjI5MDc0MTMyWjCB -kjELMAkGA1UEBhMCQVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxETAPBgNVBAcTCEJyaXNiYW5l -MRowGAYDVQQKExFDcnlwdHNvZnQgUHR5IEx0ZDEiMCAGA1UECxMZREVNT05TVFJBVElPTiBB -TkQgVEVTVElORzEbMBkGA1UEAxMSREVNTyBaRVJPIFZBTFVFIENBMFwwDQYJKoZIhvcNAQEB -BQADSwAwSAJBAL+0E2fLej3FSCwe2A2iRnMuC3z12qHIp6Ky1wo2zZcxft7AI+RfkrWrSGtf -mfzBEuPrLdfulncC5Y1pNcM8RTUCAwEAATANBgkqhkiG9w0BAQQFAANBAGSbLMphL6F5pp3s -8o0Xyh86FHFdpVOwYx09ELLkuG17V/P9pgIc0Eo/gDMbN+KT3IdgECf8S//pCRA6RrNjcXIx -ggF7MIIBdwIBATCBmTCBkjELMAkGA1UEBhMCQVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxETAP -BgNVBAcTCEJyaXNiYW5lMRowGAYDVQQKExFDcnlwdHNvZnQgUHR5IEx0ZDEiMCAGA1UECxMZ -REVNT05TVFJBVElPTiBBTkQgVEVTVElORzEbMBkGA1UEAxMSREVNTyBaRVJPIFZBTFVFIENB -AgIEfjAJBgUrDgMCGgUAoHowGAYJKoZIhvcNAQkDMQsGCSqGSIb3DQEHATAbBgkqhkiG9w0B -CQ8xDjAMMAoGCCqGSIb3DQMHMBwGCSqGSIb3DQEJBTEPFw05ODA1MTQwMzM5MzdaMCMGCSqG -SIb3DQEJBDEWBBQstNMnSV26ba8PapQEDhO21yNFrjANBgkqhkiG9w0BAQEFAARAW9Xb9YXv -BfcNkutgFX9Gr8iXhBVsNtGEVrjrpkQwpKa7jHI8SjAlLhk/4RFwDHf+ISB9Np3Z1WDWnLcA -9CWR6g== ------END PKCS7----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-e nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-e --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-e 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-e 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ - -MIAGCSqGSIb3DQEHA6CAMIACAQAxggHCMIHMAgEAMHYwYjERMA8GA1UEBxMISW50ZXJuZXQxFzAV -BgNVBAoTDlZlcmlTaWduLCBJbmMuMTQwMgYDVQQLEytWZXJpU2lnbiBDbGFzcyAxIENBIC0gSW5k -aXZpZHVhbCBTdWJzY3JpYmVyAhBgQJiC3qfbCbjdj5INYLnKMA0GCSqGSIb3DQEBAQUABECMzu8y -wQ/qZbO8cAGMRBF+mPruv3+Dvb9aWNZ2k8njUgqF6mcdhVB2MkGcsG3memRXJBixvMYWVkU3qK4Z -VuKsMIHwAgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEGA1UECBMKUXVlZW5zbGFuZDERMA8GA1UE -BxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29mdCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNU -UkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQDExJERU1PIFpFUk8gVkFMVUUgQ0ECAgRuMA0GCSqG -SIb3DQEBAQUABEBcWwYFHJbJGhiztt7lzue3Lc9CH5WAbyR+2BZ3uv+JxZfRs1PuaWPOwRa0Vgs3 -YwSJoRfxQj2Gk0wFqG1qt6d1MIAGCSqGSIb3DQEHATAaBggqhkiG9w0DAjAOAgIAoAQI8vRlP/Nx -2iSggASCAZhR5srxyspy7DfomRJ9ff8eMCtaNwEoEx7G25PZRonC57hBvGoScLtEPU3Wp9FEbPN7 -oJESeC+AqMTyTLNy8aQsyC5s53E9UkoIvg62ekYZBbXZqXsrxx4PhiiX3NH8GVh42phB0Chjw0nK -HZeRDmxGY3Cmk+J+l0uVKxbNIfJIKOguLBnhqmnKH/PrnzDt591u0ULy2aTLqRm+4/1Yat/QPb6J -eoKGwNPBbS9ogBdrCNCp9ZFg3Xar2AtQHzyTQIfYeH3SRQUpKmRm5U5o9p5emgEdT+ZfJm/J4tSH -OmbgAFsbHQakA4MBZ4J5qfDJhOA2g5lWk1hIeu5Dn/AaLRZd0yz3oY0Ieo/erPWx/bCqtBzYbMe9 -qSFTedKlbc9EGe3opOTdBZVzK8KH3w3zsy5luxKdOUG59YYb5F1IZiWGiDyuo/HuacX+griu5LeD -bEzOtZnko+TZXvWIko30fD79j3T4MRRhWXbgj2HKza+4vJ0mzcC/1+GPsJjAEAA/JgIEDU4w6/DI -/HQHhLAO3G+9xKD7MvmrzkoAAAAAAAAAAAAA - - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-01 nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-01 --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-01 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-01 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ - -MIAGCSqGSIb3DQEHA6CAMIACAQAxgfMwgfACAQAwgZkwgZIxCzAJBgNVBAYTAkFVMRMwEQYD -VQQIEwpRdWVlbnNsYW5kMREwDwYDVQQHEwhCcmlzYmFuZTEaMBgGA1UEChMRQ3J5cHRzb2Z0 -IFB0eSBMdGQxIjAgBgNVBAsTGURFTU9OU1RSQVRJT04gQU5EIFRFU1RJTkcxGzAZBgNVBAMT -EkRFTU8gWkVSTyBWQUxVRSBDQQICBG4wDQYJKoZIhvcNAQEBBQAEQKvMaW8xh6oF/X+CJivz -IZV7yHxlp4O3NHQtWG0A8MOZB+CtKlU7/6g5e/a9Du/TOqxRMqtYRp63pa2Q/mM4IYMwgAYJ -KoZIhvcNAQcBMBoGCCqGSIb3DQMCMA4CAgCgBAifz6RvzOPYlKCABIGwxtGA/FLBBRs1wbBP -gDCbSG0yCwjJNsFg89/k6xuXo8c5YTwsw8+XlIVq03navpew6XxxzY090rD2OJ0t6HA6GqrI -pd8WiSh/Atqn0yfLFmkLqgIAPRfzxUxqUocxLpQsLIFp2YNUGE+yps+UZmIjw/WHfdqrcWTm -STSvKuy3UkIJZCkGDBpTvqk4BFaHh4oTXEpgpNY+GKxjf9TDN9GQPqQZR7sgQki4t2g4/Saq -Kl4EMISgluk6swdND0tiHY7v5d6YR29ePCl2/STJ98eJpWkEEC22GNNvOy7ru/Rv2He4MgQg -optd7sk9MMd9xhJppg7CcH/yDx//HrtgpOcWmn6VxpgECFqon4uXkQtIBIH4PaNclFn7/hLx -Pw2VmBGaC0SYF3U1jyN96EBxdjqy8Aa6ByMXYDW5BcfqniD5mYXfw+b81lh1kutxaPaV4YJ9 -ZlRUW752N7VHo/fG0/fukoe5W9a8kIhgLpygllb/GP4oSF4wM6n1/OgRzZj2IWFiobKO4d/t -Mnh+C+PoEVAuFZcxQwi9GqvsK5OoIjVwNx0XcVSOl1TTYS9SwC7ugMBCab73JiruC24pL78Y -M+NaIpIQ3On4DokJA2ZHtjBjZIxF4tKA144RvFN6pBd6TVE5XM6KD/Vh9bjSmujtEAfdQ3Te -dvKJsbZuu0stErbvWcRy11I328l557EECAJT7d44OJ3rBBBj6bnnx6dDU2SRqp2CEoQaBAhK -RBuyhNxkygQIOY9/NhwqAJAECOvX0Zd0DqgoBAjobPpMHhVV3gQQWLU2vEoZ51BwzxdzCmxO -wwQI4oKfudaNqoAESKzBNAqv5kGumHOlMKsRfrs7jZCcSaOuEj97pYx08FLEgF23cav39MOQ -NUEM1dNU+EYslL4o3RoSHRjUgPU+2t9c0prS9A/bPARIEOP94PynaTNxwHi3VTK7SzuQmgzA -4n942E9joSiqsQPlsKAb3sPUaLC3SuUxSjNBgfpvD0bmrA/5h+WZoYXvIogFpwjkSmnFBEie -0lh5Ov1aRrvCw5/j3Q/W/4ZtN5U+aeVBJMtA8n0Mxd5kPxHbNVh4oGprZ6wEegV8ht3voyZa -mZ5Cyxc8ffMYnM/JJI6/oEYEUEMyyiS5FnYyvxKzfMtyn2lZ2st9nZGNNgMc9N62r5HgNbdD -FHuRdKKzV+8kQfuMc3mOPpK1t9TFY+QgrxiB5p6S7VooI97YtP3PbfknszCEBEh4PdXYbbaR -3AacN3Q5kYYmWsq3WW6xgrg0mmEGosGvwSQxBBuiXZrxScCa4ivEq05UZwyShePvKduOvnUE -2zDO6IXFLZxhTZAESEm9/FovLgGAiJ7iMGmYvsISLJScwG4n+wrSaQNQXizs9N3ykys54wBN -d/+BQ4F7pncHhDQ2Dyt5MekB8Y8iNOocUTFCu524vQRIaWCXmXP3vU7D21dp0XnAMzRQJ565 -JV3aHRoY7XDa4LePa7PP9ywyafOE5yCW7ndqx3J+2JhTDvSFsW8/q3H3iyeFhykuJVS6BFDK -6CmKbnyyjOfE2iLGJmTFa905V2KrVDCmlEu/xyGMs80yTyZC+ySzM83FMVvLEQmSzcTNUZVp -DfA1kNXbXkPouBXXT6g8r8JCRljaKKABmgRIlMheOJQRUUU4cgvhMreXPayhq5Ao4VMSCkA5 -hYRCBczm4Di/MMohF0SxIsdRY6gY9CPnrBXAsY6h1RbR7Tw0iQZmeXi52DCiBEj0by+SYMAa -9z0CReIzl8JLL6EVIFz8kFxlkGWjr4dnOzhhPOq/mCpp0WxbavDfdhE87MdXJZBnLwoT62QG -955HlAoEQBOGJbcESCgd5XSirZ9Y3AbCfuKOqoMBvEUGn+w/pMaqnGvnr5FZhuBDKrhRXqtx -QsxA//drGUxsrZOuSL/0+fbvo7n2h1Z8Ny86jOvVZAQIAjw2l1Yc5RAESNc9i3I8pKEOVQf/ -UBczJ0NR9aTEF80dRg2lpXwD0ho4N0AvSiVbgxC7cPZHQwIqvq9LHRUs/4n+Vu3SVYU3cAxo -lUTiCGUSlARIF+TD57SI5+RI+MNtnD9rs4E1ml51YoHGWFj3UPriDmY0FKEwIgqtMXMY3fZ9 -Kq8d83bjDzxwbDX7WwR7KbSeJWT42pCz7kM+BEjjPsOnZHuusXT3x2rrsBnYtYsbt98mSFiS -KzTtFmXfkOBbCQdit1P76QnYJ1aXMGs6zP6GypQTadK/zYWvlm38QkVwueaJ0woESKW2pqKA -70h2UMDHOrpepU1lj0YMzmotDHSTU3L909VvUMNg9uqfrQ6mSkb9j5Tl8oF2otOw5EzA1Yda -KPmgsv62RWLYl80wXQRQwG0e/mgG75jp9lOhJdVXqcYbQpS9viwVaVkwH+69mu/bQI4gjoEs -UYX6O71Re2z+cYhcm9UrK+DXuSFBXQOIlAFxKMW4B0apd6fU84FsZLMESOorXE5OE0A2B2ji -J8QI0Exk4hUvWrMNJfUZwFyS7E05xV9ORuX1xmsKqkT4tVR5Nqln4vhvAY860VBoloz0CDkd -8seSBEjeMgRI9FvpYuflIeHg9urkwp6N+1f0DrJJhJY9ZQ0HTQhziJmIfvbEjNqCl7hEC28+ -F8I5tuViLgfSwcFFCvnS6WFoN4X6QdFdqMCbBEjdlI1c+IQGA/IuTDMJYCuQ/v+8BG5ZeWVH -icPZmXfRat9eFK1dGKAJef6+Tf9HPuDjSpDyffrifsp7Dc34lmm7GN1+ON3ZMtwEUNm6epb8 -1RKWjoI7jIKUV/M2p/0eeGSqs4b06KF/VR6dBwsJVL5DpnTsp3MV4j/CAOlRdSPZ5++tsKbM -aplk+ceqQtpEFz1MYTtVV4+rlrWaBEA1okJyNZ5/tNOwM7B+XfOZ0xw+uyVi9v4byTZM2Qds -J+d3YGYLAugTGHISLqQEerD8/gGK+/SL06b2gNedXPHtBAiBKX+Mdy3wFQQIqE9gVgvrFNUE -CKKoTFoMGqnPBAjDPgLCklNfrwQI3Ek1vSq68w8ECBodu2FOZJVkBAgzwjfSr2N9WQQQTCoQ -KkAbrS9tnjXn1I3+ZwQIrPx3eINo/YUECIeYWCFskxlYBAiDUdvZXwD3vgQIkEyZbbZWbUUE -CH4+odl1Isk3BBj68fkqJ0fKJRWVLWuW/O3VE4BOPKwFlaIECFseVTdDUho8BAj+cOKvV2WA -hgQgaXr+wwq+ItblG0Qxz8IVUXX6PV2mIdHwz4SCCvnCsaIECJhBYxdfLI/XBCDswamPn9MR -yXi2HVQBineV+GtWVkIoZ2dCLFB9mQRMoAQI0nUR5a5AOJoECA+AunKlAlx8BAi5RtFeF4g1 -FQQIz/ie+16LlQcECOmNuVg5DXjMBAjH2nkfpXZgWwQIVdLuO/+kuHAECO/5rEHmyI9vBBD4 -16BU4Rd3YerDQnHtrwOQBCCkho1XxK5Maz8KLCNi20wvcGt8wsIXlj2h5q9ITBq7IgQQvKVY -4OfJ7bKbItP2dylwQgQYPIGxwkkbRXNraONYvN19G8UdF35rFOuIBAjf0sKz/618ZQQIxObr -xJkRe0sECIC+ssnjEb2NBBBI+XM4OntVWGsRV9Td3sFgBAinGwIroo8O0gQQMGAwgc9PaLaG -gBCiwSTrYQQIVHjfCQgOtygEUIoraFoANfhZgIShpOd/RRxFU4/7xZR5tMdGoYz/g0thR0lM -+Hi88FtFD4mAh/Oat4Ri8B7bv04aokjN2UHz6nPbHHjZ8zIqpbYTCy043GNZBAhOqjyB2JbD -NwQoR23XCYD9x6E20ChHJRXmaHwyMdYXKl5CUxypl7ois+sy2D7jDukS3wQIsTyyPgJi0GsA -AAAAAAAAAAAA - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-01.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-01.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-01.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-01.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,66 +0,0 @@ ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHA6CAMIILyAIBADGB8zCB8AIBADCBmTCBkjELMAkGA1UEBhMC -QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxETAPBgNVBAcTCEJyaXNiYW5lMRowGAYD -VQQKExFDcnlwdHNvZnQgUHR5IEx0ZDEiMCAGA1UECxMZREVNT05TVFJBVElPTiBB -TkQgVEVTVElORzEbMBkGA1UEAxMSREVNTyBaRVJPIFZBTFVFIENBAgIEbjANBgkq -hkiG9w0BAQEFAARAq8xpbzGHqgX9f4ImK/MhlXvIfGWng7c0dC1YbQDww5kH4K0q -VTv/qDl79r0O79M6rFEyq1hGnrelrZD+YzghgzCCCssGCSqGSIb3DQEHATAaBggq -hkiG9w0DAjAOAgIAoAQIn8+kb8zj2JSAggqgxtGA/FLBBRs1wbBPgDCbSG0yCwjJ -NsFg89/k6xuXo8c5YTwsw8+XlIVq03navpew6XxxzY090rD2OJ0t6HA6GqrIpd8W -iSh/Atqn0yfLFmkLqgIAPRfzxUxqUocxLpQsLIFp2YNUGE+yps+UZmIjw/WHfdqr -cWTmSTSvKuy3UkIJZCkGDBpTvqk4BFaHh4oTXEpgpNY+GKxjf9TDN9GQPqQZR7sg -Qki4t2g4/SaqKl6EoJbpOrMHTQ9LYh2O7+XemEdvXjwpdv0kyffHiaVpBBAtthjT -bzsu67v0b9h3uDKim13uyT0wx33GEmmmDsJwf/IPH/8eu2Ck5xaafpXGmFqon4uX -kQtIPaNclFn7/hLxPw2VmBGaC0SYF3U1jyN96EBxdjqy8Aa6ByMXYDW5BcfqniD5 -mYXfw+b81lh1kutxaPaV4YJ9ZlRUW752N7VHo/fG0/fukoe5W9a8kIhgLpygllb/ -GP4oSF4wM6n1/OgRzZj2IWFiobKO4d/tMnh+C+PoEVAuFZcxQwi9GqvsK5OoIjVw -Nx0XcVSOl1TTYS9SwC7ugMBCab73JiruC24pL78YM+NaIpIQ3On4DokJA2ZHtjBj -ZIxF4tKA144RvFN6pBd6TVE5XM6KD/Vh9bjSmujtEAfdQ3TedvKJsbZuu0stErbv -WcRy11I328l557ECU+3eODid62PpuefHp0NTZJGqnYIShBpKRBuyhNxkyjmPfzYc -KgCQ69fRl3QOqCjobPpMHhVV3li1NrxKGedQcM8XcwpsTsPigp+51o2qgKzBNAqv -5kGumHOlMKsRfrs7jZCcSaOuEj97pYx08FLEgF23cav39MOQNUEM1dNU+EYslL4o -3RoSHRjUgPU+2t9c0prS9A/bPBDj/eD8p2kzccB4t1Uyu0s7kJoMwOJ/eNhPY6Eo -qrED5bCgG97D1Giwt0rlMUozQYH6bw9G5qwP+YflmaGF7yKIBacI5EppxZ7SWHk6 -/VpGu8LDn+PdD9b/hm03lT5p5UEky0DyfQzF3mQ/Eds1WHigamtnrAR6BXyG3e+j -JlqZnkLLFzx98xicz8kkjr+gRkMyyiS5FnYyvxKzfMtyn2lZ2st9nZGNNgMc9N62 -r5HgNbdDFHuRdKKzV+8kQfuMc3mOPpK1t9TFY+QgrxiB5p6S7VooI97YtP3Pbfkn -szCEeD3V2G22kdwGnDd0OZGGJlrKt1lusYK4NJphBqLBr8EkMQQbol2a8UnAmuIr -xKtOVGcMkoXj7ynbjr51BNswzuiFxS2cYU2QSb38Wi8uAYCInuIwaZi+whIslJzA -bif7CtJpA1BeLOz03fKTKznjAE13/4FDgXumdweENDYPK3kx6QHxjyI06hxRMUK7 -nbi9aWCXmXP3vU7D21dp0XnAMzRQJ565JV3aHRoY7XDa4LePa7PP9ywyafOE5yCW -7ndqx3J+2JhTDvSFsW8/q3H3iyeFhykuJVS6yugpim58soznxNoixiZkxWvdOVdi -q1QwppRLv8chjLPNMk8mQvskszPNxTFbyxEJks3EzVGVaQ3wNZDV215D6LgV10+o -PK/CQkZY2iigAZqUyF44lBFRRThyC+Eyt5c9rKGrkCjhUxIKQDmFhEIFzObgOL8w -yiEXRLEix1FjqBj0I+esFcCxjqHVFtHtPDSJBmZ5eLnYMKL0by+SYMAa9z0CReIz -l8JLL6EVIFz8kFxlkGWjr4dnOzhhPOq/mCpp0WxbavDfdhE87MdXJZBnLwoT62QG -955HlAoEQBOGJbcoHeV0oq2fWNwGwn7ijqqDAbxFBp/sP6TGqpxr56+RWYbgQyq4 -UV6rcULMQP/3axlMbK2Trki/9Pn276O59odWfDcvOozr1WQCPDaXVhzlENc9i3I8 -pKEOVQf/UBczJ0NR9aTEF80dRg2lpXwD0ho4N0AvSiVbgxC7cPZHQwIqvq9LHRUs -/4n+Vu3SVYU3cAxolUTiCGUSlBfkw+e0iOfkSPjDbZw/a7OBNZpedWKBxlhY91D6 -4g5mNBShMCIKrTFzGN32fSqvHfN24w88cGw1+1sEeym0niVk+NqQs+5DPuM+w6dk -e66xdPfHauuwGdi1ixu33yZIWJIrNO0WZd+Q4FsJB2K3U/vpCdgnVpcwazrM/obK -lBNp0r/Nha+WbfxCRXC55onTCqW2pqKA70h2UMDHOrpepU1lj0YMzmotDHSTU3L9 -09VvUMNg9uqfrQ6mSkb9j5Tl8oF2otOw5EzA1YdaKPmgsv62RWLYl80wXcBtHv5o -Bu+Y6fZToSXVV6nGG0KUvb4sFWlZMB/uvZrv20COII6BLFGF+ju9UXts/nGIXJvV -Kyvg17khQV0DiJQBcSjFuAdGqXen1POBbGSz6itcTk4TQDYHaOInxAjQTGTiFS9a -sw0l9RnAXJLsTTnFX05G5fXGawqqRPi1VHk2qWfi+G8BjzrRUGiWjPQIOR3yx5IE -SN4y9FvpYuflIeHg9urkwp6N+1f0DrJJhJY9ZQ0HTQhziJmIfvbEjNqCl7hEC28+ -F8I5tuViLgfSwcFFCvnS6WFoN4X6QdFdqMCb3ZSNXPiEBgPyLkwzCWArkP7/vARu -WXllR4nD2Zl30WrfXhStXRigCXn+vk3/Rz7g40qQ8n364n7Kew3N+JZpuxjdfjjd -2TLc2bp6lvzVEpaOgjuMgpRX8zan/R54ZKqzhvTooX9VHp0HCwlUvkOmdOyncxXi -P8IA6VF1I9nn762wpsxqmWT5x6pC2kQXPUxhO1VXj6uWtZo1okJyNZ5/tNOwM7B+ -XfOZ0xw+uyVi9v4byTZM2QdsJ+d3YGYLAugTGHISLqQEerD8/gGK+/SL06b2gNed -XPHtgSl/jHct8BWoT2BWC+sU1aKoTFoMGqnPwz4CwpJTX6/cSTW9KrrzDxodu2FO -ZJVkM8I30q9jfVlMKhAqQButL22eNefUjf5nrPx3eINo/YWHmFghbJMZWINR29lf -APe+kEyZbbZWbUV+PqHZdSLJN/rx+SonR8olFZUta5b87dUTgE48rAWVolseVTdD -Uho8/nDir1dlgIZpev7DCr4i1uUbRDHPwhVRdfo9XaYh0fDPhIIK+cKxophBYxdf -LI/X7MGpj5/TEcl4th1UAYp3lfhrVlZCKGdnQixQfZkETKDSdRHlrkA4mg+AunKl -Alx8uUbRXheINRXP+J77XouVB+mNuVg5DXjMx9p5H6V2YFtV0u47/6S4cO/5rEHm -yI9v+NegVOEXd2Hqw0Jx7a8DkKSGjVfErkxrPwosI2LbTC9wa3zCwheWPaHmr0hM -GrsivKVY4OfJ7bKbItP2dylwQjyBscJJG0Vza2jjWLzdfRvFHRd+axTriN/SwrP/ -rXxlxObrxJkRe0uAvrLJ4xG9jUj5czg6e1VYaxFX1N3ewWCnGwIroo8O0jBgMIHP -T2i2hoAQosEk62FUeN8JCA63KIoraFoANfhZgIShpOd/RRxFU4/7xZR5tMdGoYz/ -g0thR0lM+Hi88FtFD4mAh/Oat4Ri8B7bv04aokjN2UHz6nPbHHjZ8zIqpbYTCy04 -3GNZTqo8gdiWwzdHbdcJgP3HoTbQKEclFeZofDIx1hcqXkJTHKmXuiKz6zLYPuMO -6RLfsTyyPgJi0GsAAAAA ------END PKCS7----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-02 nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-02 --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-02 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-02 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ - -MIAGCSqGSIb3DQEHA6CAMIACAQAxggHCMIHMAgEAMHYwYjERMA8GA1UEBxMISW50ZXJuZXQxFzAV -BgNVBAoTDlZlcmlTaWduLCBJbmMuMTQwMgYDVQQLEytWZXJpU2lnbiBDbGFzcyAxIENBIC0gSW5k -aXZpZHVhbCBTdWJzY3JpYmVyAhBgQJiC3qfbCbjdj5INYLnKMA0GCSqGSIb3DQEBAQUABEACr4tn -kSzvo3aIlHfJLGbfokNCV6FjdDP1vQhL+kdXONqcFCEf9ReETCvaHslIr/Wepc5j2hjZselzgqLn -rM1ZMIHwAgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEGA1UECBMKUXVlZW5zbGFuZDERMA8GA1UE -BxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29mdCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNU -UkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQDExJERU1PIFpFUk8gVkFMVUUgQ0ECAgRuMA0GCSqG -SIb3DQEBAQUABEBanBxKOvUoRn3DiFY55lly2TPu2Cv+dI/GLrzW6qvnUMZPWGPGaUlPyWLMZrXJ -xGXZUiRJKTBwDu91fnodUEK9MIAGCSqGSIb3DQEHATAaBggqhkiG9w0DAjAOAgIAoAQImxKZEDWP -EuOggASCBACBi1bX/qc3geqFyfRpX7JyIo/g4CDr62GlwvassAGlIO8zJ5Z/UDIIooeV6QS4D4OW -PymKd0WXhwcJI0yBcJTWEoxND27LM7CWFJpA07AoxVCRHTOPgm794NynLecNUOqVTFyS4CRuLhVG -PAk0nFZG/RE2yMtx4rAkSiVgOexES7wq/xWuoDSSmuTMNQOTbKfkEKqdFLkM/d62gD2wnaph7vKk -PPK82wdZP8rF3nUUC5c4ahbNoa8g+5B3tIF/Jz3ZZK3vGLU0IWO+i7W451dna13MglDDjXOeikNl -XLsQdAVo0nsjfGu+f66besJojPzysNA+IEZl6gNWUetl9lim4SqrxubUExdS2rmXnXXmEuEW/HC7 -dlTAeYq5Clqx5id6slhC2C2oegMww3XH9yxHw6OqzvXY6pVPEScEtBMQLgaKFQT+m2SRtbTVFG7c -QcnUODyVB1IbpQTF1DHeeOX1W/HfpWZym8dzkti6SCyeumHmqO406xDiIMVKtHOqM86nEHuAMZsr -cLy+ey6TEJvR6S4N8QRzng8JJDZDTJXQN6q84aEudsnOrw2KyOVwPpI6ey4qBsHUgQ8kAFy5lsQa -WV45h6exgUwbBcKLgPZGFj+OdD2RKJsTb83/UqbJS5Q/lGXhzBlnaYucyJxEprRxbntmcnOEPFJe -+tRDUwOTd7qlJljdhIJL+uDcooL9Ahgo6Cwep6tduekv2cSEohJeTE8Dvy34YRhMbLvnFNdmnpNy -rNZDYVVxxaKoyd2AfB8NPFZh1VdAYfI3R1QAQ2kXEef5NNIfVQfMzD9akJn4RP+Kv32Qaxm4FrnK -xmwRyGJShavIBc2ax+F1r1+NZXuSBHn5vfoRTxOk0ST4dXsw74dnlYUMRaSu4qqUdM9jsXSyeX4Z -gQgkR2bkaYO6ezFgenFIa7QWVw8rXZAEZ5aibCxbnY1VE41PYIvhlLdbFJhH9gY22s+fFAuwnzyA -SRjC40A9aAEItRlaPStWSGiqlLRgNkBBwdpv2l2YPBd2QzHx6ek6XGrvRJuAC+Nh62rtQKwpNH54 -YAOHW55maBFW2SQ3TF+cZ6NbbqhCmHTyyR7mcSYc9sXSVDWEhYKQ1iyU870zhHWVpvglZizZetJC -ZFjYex3b1ngVdcgargOvpPq9urCKKi2mbkqv/EFpzSWGXkKSpfCG/XfMnEOtkNrB8S06vnk2JcJB -OBqJot+uuSH5hOg0vTpxX2DuONJSiWSWyfRE/lTfJJFXwhod7SXclUyXPeSyibcSic2hVAzDmwjD -31js/j2k02PI/agPhr3UQ8cMgcNAiaoCKbNaWfn6BGbCAbTchxzUlo2cSJiLlrX2IDZmfXbXmZCo -m1smWIG+BIIEALiuAxDb6dWLAYyVBoN9hYI4AiPeZAY9MtvQ6AV8o2/EFm6PvYGXy3Hei5830CH0 -PBeX7Kdd6ff1y33TW/l5qSkIL1ULTGR7okFfJePHDmq1dFt6/JOMptiQ8WSu7CsJQvZ9VTFXeYFc -ZqCPPZc1NrPegNK70Zf9QxWIbDAevJ5KLBf1c6j8pU2/6LnvDY6VjaTvYSgr7vTR8eVzH4Rm77W0 -iOHxg5VcODv6cGSVyuvbX8UAGo8Cmb58ERDtBDJBQXVpWKLNAuDJ9GX8n2zNkpjZLbPSkcmuhqGa -BJBE/BaCTkUQWlY9dIbRtEnxIU1mfbPPdx1Ppa8DqGDjSOsQdKcKYNNZtayEw++EIpmpdBNsKphC -fB8UEK2Wkk4ZVW+qyGoi/r0MFsvO1NmSOOZ0o/jy/YHmoeURHhPy97AO3eVTkEAa5CfJEJybmo56 -7CDw/FwoGAUCgsoz7rlxzMudr/IhHIH+APinncxXlHO2ecvHD9i8DaHGA8tVifgsUhqQoZieULut -eF94O5UAxOkv41UZssYTwN4nYrN1QkesZl3BX4ORS4EE30/PQ23ARf3WZptZrCJevGm2ZYzGeh8x -g17mCDfiLO+bff4qP/4mC96Pu4ia6j4to5BwKIJS/+DCuoD8WeSKF4pugXQkMUiHdQnNnVP9Sp2O -/4ly5mO8JzrQC59V2bnTNBqPhpno8kfJvK5TypPSVC+bTzern3rJ6UceB3srcn9zxKx9GdNydJQj -yWjv8ec3n3d1nuQwhz5Q053NBhIjwoGg3Go7LO6i78ZOlpF7dcoAO13NfHLyNjnyHCaiWtVRTct9 -rLf5vN00urSn8YJngHk1eTKK8nHGIcOg6YdYDOD2nE5XwRijKmieG8Xa3eKRzfbL06GrBQENle6J -mC131bp3cRVxpjq+o6RAbGoMm4yICsL4eTarCQrsyHmoPHqr91UHo91avyxU7knWmEhX27ybmsrs -8aeZwPHixL14TeyhruCqRVvkf1Ks7P+z8MPUboGNqQe2WLN8ktCGEr15O8MJR/em86G03Jfo4oaw -/DVUH5RwLT6acedOGuzMh/2r8BcmemhVQ8/cWvV4YJ0tOW4hzyVHC5hQf8sZ3LzxXLH6Ohnrbprh -xvrdbaSdChWZDDP0bCCbxEhkwuBkBeKZrMbwRTP+TPTPYLVTH/CmKLzKh/114tkGkyO3hHS4qExU -V39F2Sj4mylx+hD0+20D9pntpNi7htccGlOm6yNM69at/3+kLgJJyoIlaxLcCUYHNMifDt+T3p/t -5U4XmD53uUQ6M8dvj/udqPekNSUfse15yrd9pjOt5PcJuqW28q0sFHf9pHIgz3XZFMe5PD7ppw6r -S+C6Ir4PrYIEggQA7ZDVtiCm+BbtNNB/UJm79/OQ5mp5bTI0kPmDeycaWTa0Ojpum+c/dpG/iJOB -DICj7jHOXSHT7JlGyX6aSFJUltucAnZvwzhPDmdDaIDiKSk85GqgdDWVfGosSCX9Ph/T3WpIxnwf -WSDRtIHkWTjly+pe4yy5K6/XISy/L5Zh/fhiI5fjHjgzmlibs2ru4nVw6hBhUvlSSe2BEs5d9h/y -NH8Wy3qvb2D3jh7hkepFtZJGNTHp8ZUC7Ns2JIpQYObsaxdI65i3mMOu7fRwI+0/4ejsWhP6KCEi -LgwvLg0qM82ma6YB7qHAHboaczRVEffDcJUG4a5uycB0DoZFn+uEaEFyili20hCn4hVfsqUQk2PT -8Mo1tSl5e30xI1YJZrRgiJm9nHRX6fLizngP+ILJLPHZsPvlSVIfY+/v/FR8feKOjaGhyGF51BAx -aM2NIQ4jMP5/X+U5gQybi0E6u7rroDhaHsKmCMgXqszwXWCpedA/sEbeHpiTC59YlPPSlIOMc9vP -Ko/mQCfWy/9icUaIfKQldvkllUxxNkqu6AbIpHVscbAEzSPs5xbQXU8EZNNCDisFnnpY3nQ3eLnl -m89saTJxRb7NWHRMlmPv7qgD7uMIq3vdOGA7i5wT9MeoNIgK1/DsgH30s6RWjJy4YyyLmRTXPzbj -hbQVpEmiMRbEidIvUx2OjKVxVQIcgtLsa2lvHQ4XL1cpLr5GVtOgy0fMg5OCDUUDsvjgjgLQ3P2U -p2nVY5FM6/QpPc5DTLuuR9ekI2/c9Biz09RtcYDUQK2ajdo8h1IyKqHFoB7h48OXxXKKY94DY0TG -x6PonB/epj8orAw4QKmm5M0vXYwBOqRymCTHTqOJGObdLx1euFFyqguzHJOU2gAGZI0z9Lg1yRuF -yhdPZyuniIcmtLNxRZ1duYHErcAyX56qndmLXt7UVkATai/rIMuoJLfAsUnVuTUS5p7tJM754UZT -7lTcXvDJgOUNnBRaIcxC3pxvbrYDJ2iFJ72xkxUP2p74gucqg25XnCVmQuLg6zDDxF6CLuw9isxy -Xg4pkneMN//7fpp8GYl9nyZm2yqYYM+jcw0fcVc64L+X4w/gL3H2UMGgxIHSJp7HIG7VKHtXrNyj -dPXXPVUsMsAAimqOr0Lr2sZWirfuivLaPTqhbkvG5PF7K3gT80AOIcd/6EIHBy2hZ7ukfjHmdP4L -yQOhTQklaKzGHI0mypq0uFLWJOUlZnVrMiLP1xrWkpC8Ro9eo6mfjjQ45z8adC43a47klwTEzvod -3rNEFIGJJUEjAN3mbqie7IxoSJknBBJK0D9lZEQ8lZWlq7vuN8JdqPM6xh155jMVsPwjLK6Tzkj5 -BpRD9Tgm3u6HPQSCBADgkWEN75Mu9TGosXY0xm1k6K6sPv8L949CrLWo4r1I2LA072bTGvQP28Vs -hUA76jgcT1ocC++9PoktIK10YCq5w+FfMAQ04KeCXuAdmiY2iAT4Slea61PMCMta3mVGyLUZCLEm -P+I0UKR5mlO0fGEcjU9j8TmbjZqxNFqloLsU7oSi7Os0EtYHkdAVrExUyOc/ZDie6fBjdLTmLdCm -bE9JNwjlbXypdTZupGgLNhKGDIskUAAMwZYayI6YfSIMkNCeAYTnjOuGZZ1msCXGXsfMBR1sfUIj -9UeGjwD8gq+UVVHX/oeoH/m0eJ5ppqi3+nUlgc9DvpYsC/Fg0G2KuYb9B+VJ+a4GMzQSPREoFtQp -B9dtLkBb7Ha/hpGWTIdqzW0eAo5llyN8FNvl2Fu2IcLaNmWFO69gLjRKQopp0dvFOuwAVI6fvGDj -p1WigoNbFZl8N+iiWmzKOjoG2ZLbez1clZCms/JPJrXhEMMOxWpVzkQyN336VWHmGgMcjaKCGSeA -2nnESIGuiCXMrkHlGfabYIsKcHFCo2t13uXyZPf0zSPTkuD0Eh92wqC9pvA3gvrrCUfo9Mn3bs+e -KWKmDlpcs8mDn032oIg+zrQhIduMqXVn3evzeVM3B5MBOGMvg51/SXg7R+MC/463juQQEb9IVe/I -YGnO//oWm9lw/377Af/qH+FnN02obJw1FvesQIs9e5RHNQykKbO+vmVJQl1nd9DZWrHDNO7/80Yz -2hCm7Tws5nSRN2iFlyRaYJHr7ypxkU2rCak2r6ua7XDwu1qU2RT3+qPjT1RuxQ2oTlHyGkKPMZGC -Rc+CSWz5aeeCmHZVwdb3nC8YpfsujMiYqygLeuQ82pjKuR7DIKGmnfcOLdv5F+Ek2Wyy0D98iSgk -+aoQGYLhL9llU13pn21uRsDY5uGcXiIw1IETFlTdgENEv8futZuJsegrp7fmFXyNoNyFNyypeDrM -6ZqR4vKxFjg3tKKeVpkw/W4EAklzMxmNiazGNDBHsnYV3rwPlKa+HeeE2YxnsKwGLCNgRYUXTaJk -461vS160z3dvh/mLfdZ7MYCkmO3bNE3ELUDAw7YQkSuo9ujzdFKte9LC34sjg9fOex3ThAg5Y50n -wYm4zBmGM7yEqL8O6QgnM6tIDFS9XryDaLNzcGhMWqMvhzO6sC/AA2WfLgwS517Cp03IkJQWqG9q -w52+E+GAtpioJfczEhlv9BrhjttdugRSjJrG8SYVYE4zG3Aur5eNBoGaALIOHOtPw8+JovQmIWcF -oaJ/WQuglFrWtew51IK6F8RiHAOBVavZOuZcO7tV+5enVfreOd0rX8ZOy4hYmHhmF1hOrrWOn+Ee -E0SYKonXN01BM9xMBIIBSLCvNAppnGPTUGjwbMJRg1VJ2KMiBWH5oJp8tyfIAxMuWFdtaLYbRSOD -XbOAshPVK8JAY8DQDkzqaCTAkLTfSRAt9yY6SbUpMsRv7xa8nMZNJBJzJT9b/wNjgiOJgaGuJMkV -2g/DX2jfP3PrMM/Sbnz7edORXHj1Pa5XTT8nG5MS0FuZgvevdq3o/gVVAz+ZCKOH3ShMzZvfp01l -SX5gaJTflmU6cdNwtn2yZ6IScF7OrjUeA9iEoSVR9dQcA+4lB3RAG3LMwcnxXY35D7+PMJzHIZdF -cSnq+n03ACY2/E/T31iijRH29rvYHGI+mP/ieYs45iq4fTWo6i1HofeWLdP0fX7xW3XO0/hWYFiw -BxKu66whAbRhaib3XJNvetVs25ToYXyiDpjG+cd5rCMei8sGQwTBj9Zeh0URoeMW1inTP0JvCmMU -rZgAAAAAAAAAAAAA - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-02.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-02.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-02.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-enc-02.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHA6CAMIITQAIBADGCAcIwgcwCAQAwdjBiMREwDwYDVQQHEwhJ -bnRlcm5ldDEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xNDAyBgNVBAsTK1ZlcmlT -aWduIENsYXNzIDEgQ0EgLSBJbmRpdmlkdWFsIFN1YnNjcmliZXICEGBAmILep9sJ -uN2Pkg1gucowDQYJKoZIhvcNAQEBBQAEQAKvi2eRLO+jdoiUd8ksZt+iQ0JXoWN0 -M/W9CEv6R1c42pwUIR/1F4RMK9oeyUiv9Z6lzmPaGNmx6XOCoueszVkwgfACAQAw -gZkwgZIxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQH -EwhCcmlzYmFuZTEaMBgGA1UEChMRQ3J5cHRzb2Z0IFB0eSBMdGQxIjAgBgNVBAsT -GURFTU9OU1RSQVRJT04gQU5EIFRFU1RJTkcxGzAZBgNVBAMTEkRFTU8gWkVSTyBW -QUxVRSBDQQICBG4wDQYJKoZIhvcNAQEBBQAEQFqcHEo69ShGfcOIVjnmWXLZM+7Y -K/50j8YuvNbqq+dQxk9YY8ZpSU/JYsxmtcnEZdlSJEkpMHAO73V+eh1QQr0wghFz -BgkqhkiG9w0BBwEwGgYIKoZIhvcNAwIwDgICAKAECJsSmRA1jxLjgIIRSIGLVtf+ -pzeB6oXJ9GlfsnIij+DgIOvrYaXC9qywAaUg7zMnln9QMgiih5XpBLgPg5Y/KYp3 -RZeHBwkjTIFwlNYSjE0PbsszsJYUmkDTsCjFUJEdM4+Cbv3g3Kct5w1Q6pVMXJLg -JG4uFUY8CTScVkb9ETbIy3HisCRKJWA57ERLvCr/Fa6gNJKa5Mw1A5Nsp+QQqp0U -uQz93raAPbCdqmHu8qQ88rzbB1k/ysXedRQLlzhqFs2hryD7kHe0gX8nPdlkre8Y -tTQhY76LtbjnV2drXcyCUMONc56KQ2VcuxB0BWjSeyN8a75/rpt6wmiM/PKw0D4g -RmXqA1ZR62X2WKbhKqvG5tQTF1LauZeddeYS4Rb8cLt2VMB5irkKWrHmJ3qyWELY -Lah6AzDDdcf3LEfDo6rO9djqlU8RJwS0ExAuBooVBP6bZJG1tNUUbtxBydQ4PJUH -UhulBMXUMd545fVb8d+lZnKbx3OS2LpILJ66Yeao7jTrEOIgxUq0c6ozzqcQe4Ax -mytwvL57LpMQm9HpLg3xBHOeDwkkNkNMldA3qrzhoS52yc6vDYrI5XA+kjp7LioG -wdSBDyQAXLmWxBpZXjmHp7GBTBsFwouA9kYWP450PZEomxNvzf9SpslLlD+UZeHM -GWdpi5zInESmtHFue2Zyc4Q8Ul761ENTA5N3uqUmWN2Egkv64Nyigv0CGCjoLB6n -q1256S/ZxISiEl5MTwO/LfhhGExsu+cU12aek3Ks1kNhVXHFoqjJ3YB8Hw08VmHV -V0Bh8jdHVABDaRcR5/k00h9VB8zMP1qQmfhE/4q/fZBrGbgWucrGbBHIYlKFq8gF -zZrH4XWvX41le5IEefm9+hFPE6TRJPh1ezDvh2eVhQxFpK7iqpR0z2OxdLJ5fhmB -CCRHZuRpg7p7MWB6cUhrtBZXDytdkARnlqJsLFudjVUTjU9gi+GUt1sUmEf2Bjba -z58UC7CfPIBJGMLjQD1oAQi1GVo9K1ZIaKqUtGA2QEHB2m/aXZg8F3ZDMfHp6Tpc -au9Em4AL42Hrau1ArCk0fnhgA4dbnmZoEVbZJDdMX5xno1tuqEKYdPLJHuZxJhz2 -xdJUNYSFgpDWLJTzvTOEdZWm+CVmLNl60kJkWNh7HdvWeBV1yBquA6+k+r26sIoq -LaZuSq/8QWnNJYZeQpKl8Ib9d8ycQ62Q2sHxLTq+eTYlwkE4Gomi3665IfmE6DS9 -OnFfYO440lKJZJbJ9ET+VN8kkVfCGh3tJdyVTJc95LKJtxKJzaFUDMObCMPfWOz+ -PaTTY8j9qA+GvdRDxwyBw0CJqgIps1pZ+foEZsIBtNyHHNSWjZxImIuWtfYgNmZ9 -dteZkKibWyZYgb64rgMQ2+nViwGMlQaDfYWCOAIj3mQGPTLb0OgFfKNvxBZuj72B -l8tx3oufN9Ah9DwXl+ynXen39ct901v5eakpCC9VC0xke6JBXyXjxw5qtXRbevyT -jKbYkPFkruwrCUL2fVUxV3mBXGagjz2XNTaz3oDSu9GX/UMViGwwHryeSiwX9XOo -/KVNv+i57w2OlY2k72EoK+700fHlcx+EZu+1tIjh8YOVXDg7+nBklcrr21/FABqP -Apm+fBEQ7QQyQUF1aViizQLgyfRl/J9szZKY2S2z0pHJroahmgSQRPwWgk5FEFpW -PXSG0bRJ8SFNZn2zz3cdT6WvA6hg40jrEHSnCmDTWbWshMPvhCKZqXQTbCqYQnwf -FBCtlpJOGVVvqshqIv69DBbLztTZkjjmdKP48v2B5qHlER4T8vewDt3lU5BAGuQn -yRCcm5qOeuwg8PxcKBgFAoLKM+65cczLna/yIRyB/gD4p53MV5RztnnLxw/YvA2h -xgPLVYn4LFIakKGYnlC7rXhfeDuVAMTpL+NVGbLGE8DeJ2KzdUJHrGZdwV+DkUuB -BN9Pz0NtwEX91mabWawiXrxptmWMxnofMYNe5gg34izvm33+Kj/+Jgvej7uImuo+ -LaOQcCiCUv/gwrqA/FnkiheKboF0JDFIh3UJzZ1T/Uqdjv+JcuZjvCc60AufVdm5 -0zQaj4aZ6PJHybyuU8qT0lQvm083q596yelHHgd7K3J/c8SsfRnTcnSUI8lo7/Hn -N593dZ7kMIc+UNOdzQYSI8KBoNxqOyzuou/GTpaRe3XKADtdzXxy8jY58hwmolrV -UU3Lfay3+bzdNLq0p/GCZ4B5NXkyivJxxiHDoOmHWAzg9pxOV8EYoyponhvF2t3i -kc32y9OhqwUBDZXuiZgtd9W6d3EVcaY6vqOkQGxqDJuMiArC+Hk2qwkK7Mh5qDx6 -q/dVB6PdWr8sVO5J1phIV9u8m5rK7PGnmcDx4sS9eE3soa7gqkVb5H9SrOz/s/DD -1G6BjakHtlizfJLQhhK9eTvDCUf3pvOhtNyX6OKGsPw1VB+UcC0+mnHnThrszIf9 -q/AXJnpoVUPP3Fr1eGCdLTluIc8lRwuYUH/LGdy88Vyx+joZ626a4cb63W2knQoV -mQwz9Gwgm8RIZMLgZAXimazG8EUz/kz0z2C1Ux/wpii8yof9deLZBpMjt4R0uKhM -VFd/Rdko+JspcfoQ9PttA/aZ7aTYu4bXHBpTpusjTOvWrf9/pC4CScqCJWsS3AlG -BzTInw7fk96f7eVOF5g+d7lEOjPHb4/7naj3pDUlH7Htecq3faYzreT3CbqltvKt -LBR3/aRyIM912RTHuTw+6acOq0vguiK+D62C7ZDVtiCm+BbtNNB/UJm79/OQ5mp5 -bTI0kPmDeycaWTa0Ojpum+c/dpG/iJOBDICj7jHOXSHT7JlGyX6aSFJUltucAnZv -wzhPDmdDaIDiKSk85GqgdDWVfGosSCX9Ph/T3WpIxnwfWSDRtIHkWTjly+pe4yy5 -K6/XISy/L5Zh/fhiI5fjHjgzmlibs2ru4nVw6hBhUvlSSe2BEs5d9h/yNH8Wy3qv -b2D3jh7hkepFtZJGNTHp8ZUC7Ns2JIpQYObsaxdI65i3mMOu7fRwI+0/4ejsWhP6 -KCEiLgwvLg0qM82ma6YB7qHAHboaczRVEffDcJUG4a5uycB0DoZFn+uEaEFyili2 -0hCn4hVfsqUQk2PT8Mo1tSl5e30xI1YJZrRgiJm9nHRX6fLizngP+ILJLPHZsPvl -SVIfY+/v/FR8feKOjaGhyGF51BAxaM2NIQ4jMP5/X+U5gQybi0E6u7rroDhaHsKm -CMgXqszwXWCpedA/sEbeHpiTC59YlPPSlIOMc9vPKo/mQCfWy/9icUaIfKQldvkl -lUxxNkqu6AbIpHVscbAEzSPs5xbQXU8EZNNCDisFnnpY3nQ3eLnlm89saTJxRb7N -WHRMlmPv7qgD7uMIq3vdOGA7i5wT9MeoNIgK1/DsgH30s6RWjJy4YyyLmRTXPzbj -hbQVpEmiMRbEidIvUx2OjKVxVQIcgtLsa2lvHQ4XL1cpLr5GVtOgy0fMg5OCDUUD -svjgjgLQ3P2Up2nVY5FM6/QpPc5DTLuuR9ekI2/c9Biz09RtcYDUQK2ajdo8h1Iy -KqHFoB7h48OXxXKKY94DY0TGx6PonB/epj8orAw4QKmm5M0vXYwBOqRymCTHTqOJ -GObdLx1euFFyqguzHJOU2gAGZI0z9Lg1yRuFyhdPZyuniIcmtLNxRZ1duYHErcAy -X56qndmLXt7UVkATai/rIMuoJLfAsUnVuTUS5p7tJM754UZT7lTcXvDJgOUNnBRa -IcxC3pxvbrYDJ2iFJ72xkxUP2p74gucqg25XnCVmQuLg6zDDxF6CLuw9isxyXg4p -kneMN//7fpp8GYl9nyZm2yqYYM+jcw0fcVc64L+X4w/gL3H2UMGgxIHSJp7HIG7V -KHtXrNyjdPXXPVUsMsAAimqOr0Lr2sZWirfuivLaPTqhbkvG5PF7K3gT80AOIcd/ -6EIHBy2hZ7ukfjHmdP4LyQOhTQklaKzGHI0mypq0uFLWJOUlZnVrMiLP1xrWkpC8 -Ro9eo6mfjjQ45z8adC43a47klwTEzvod3rNEFIGJJUEjAN3mbqie7IxoSJknBBJK -0D9lZEQ8lZWlq7vuN8JdqPM6xh155jMVsPwjLK6Tzkj5BpRD9Tgm3u6HPeCRYQ3v -ky71MaixdjTGbWTorqw+/wv3j0KstajivUjYsDTvZtMa9A/bxWyFQDvqOBxPWhwL -770+iS0grXRgKrnD4V8wBDTgp4Je4B2aJjaIBPhKV5rrU8wIy1reZUbItRkIsSY/ -4jRQpHmaU7R8YRyNT2PxOZuNmrE0WqWguxTuhKLs6zQS1geR0BWsTFTI5z9kOJ7p -8GN0tOYt0KZsT0k3COVtfKl1Nm6kaAs2EoYMiyRQAAzBlhrIjph9IgyQ0J4BhOeM -64ZlnWawJcZex8wFHWx9QiP1R4aPAPyCr5RVUdf+h6gf+bR4nmmmqLf6dSWBz0O+ -liwL8WDQbYq5hv0H5Un5rgYzNBI9ESgW1CkH120uQFvsdr+GkZZMh2rNbR4CjmWX -I3wU2+XYW7Yhwto2ZYU7r2AuNEpCimnR28U67ABUjp+8YOOnVaKCg1sVmXw36KJa -bMo6OgbZktt7PVyVkKaz8k8mteEQww7FalXORDI3ffpVYeYaAxyNooIZJ4DaecRI -ga6IJcyuQeUZ9ptgiwpwcUKja3Xe5fJk9/TNI9OS4PQSH3bCoL2m8DeC+usJR+j0 -yfduz54pYqYOWlyzyYOfTfagiD7OtCEh24ypdWfd6/N5UzcHkwE4Yy+DnX9JeDtH -4wL/jreO5BARv0hV78hgac7/+hab2XD/fvsB/+of4Wc3TahsnDUW96xAiz17lEc1 -DKQps76+ZUlCXWd30NlascM07v/zRjPaEKbtPCzmdJE3aIWXJFpgkevvKnGRTasJ -qTavq5rtcPC7WpTZFPf6o+NPVG7FDahOUfIaQo8xkYJFz4JJbPlp54KYdlXB1vec -Lxil+y6MyJirKAt65DzamMq5HsMgoaad9w4t2/kX4STZbLLQP3yJKCT5qhAZguEv -2WVTXemfbW5GwNjm4ZxeIjDUgRMWVN2AQ0S/x+61m4mx6Cunt+YVfI2g3IU3LKl4 -OszpmpHi8rEWODe0op5WmTD9bgQCSXMzGY2JrMY0MEeydhXevA+Upr4d54TZjGew -rAYsI2BFhRdNomTjrW9LXrTPd2+H+Yt91nsxgKSY7ds0TcQtQMDDthCRK6j26PN0 -Uq170sLfiyOD1857HdOECDljnSfBibjMGYYzvISovw7pCCczq0gMVL1evINos3Nw -aExaoy+HM7qwL8ADZZ8uDBLnXsKnTciQlBaob2rDnb4T4YC2mKgl9zMSGW/0GuGO -2126BFKMmsbxJhVgTjMbcC6vl40GgZoAsg4c60/Dz4mi9CYhZwWhon9ZC6CUWta1 -7DnUgroXxGIcA4FVq9k65lw7u1X7l6dV+t453Stfxk7LiFiYeGYXWE6utY6f4R4T -RJgqidc3TUEz3EywrzQKaZxj01Bo8GzCUYNVSdijIgVh+aCafLcnyAMTLlhXbWi2 -G0Ujg12zgLIT1SvCQGPA0A5M6mgkwJC030kQLfcmOkm1KTLEb+8WvJzGTSQScyU/ -W/8DY4IjiYGhriTJFdoPw19o3z9z6zDP0m58+3nTkVx49T2uV00/JxuTEtBbmYL3 -r3at6P4FVQM/mQijh90oTM2b36dNZUl+YGiU35ZlOnHTcLZ9smeiEnBezq41HgPY -hKElUfXUHAPuJQd0QBtyzMHJ8V2N+Q+/jzCcxyGXRXEp6vp9NwAmNvxP099Yoo0R -9va72BxiPpj/4nmLOOYquH01qOotR6H3li3T9H1+8Vt1ztP4VmBYsAcSruusIQG0 -YWom91yTb3rVbNuU6GF8og6YxvnHeawjHovLBkMEwY/WXodFEaHjFtYp0z9Cbwpj -FK2YAAAAAA== ------END PKCS7----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-e.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-e.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-e.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-e.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHA6CAMIIDkAIBADGCAcIwgcwCAQAwdjBiMREwDwYDVQQHEwhJ -bnRlcm5ldDEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xNDAyBgNVBAsTK1ZlcmlT -aWduIENsYXNzIDEgQ0EgLSBJbmRpdmlkdWFsIFN1YnNjcmliZXICEGBAmILep9sJ -uN2Pkg1gucowDQYJKoZIhvcNAQEBBQAEQIzO7zLBD+pls7xwAYxEEX6Y+u6/f4O9 -v1pY1naTyeNSCoXqZx2FUHYyQZywbeZ6ZFckGLG8xhZWRTeorhlW4qwwgfACAQAw -gZkwgZIxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQH -EwhCcmlzYmFuZTEaMBgGA1UEChMRQ3J5cHRzb2Z0IFB0eSBMdGQxIjAgBgNVBAsT -GURFTU9OU1RSQVRJT04gQU5EIFRFU1RJTkcxGzAZBgNVBAMTEkRFTU8gWkVSTyBW -QUxVRSBDQQICBG4wDQYJKoZIhvcNAQEBBQAEQFxbBgUclskaGLO23uXO57ctz0If -lYBvJH7YFne6/4nFl9GzU+5pY87BFrRWCzdjBImhF/FCPYaTTAWobWq3p3UwggHD -BgkqhkiG9w0BBwEwGgYIKoZIhvcNAwIwDgICAKAECPL0ZT/zcdokgIIBmFHmyvHK -ynLsN+iZEn19/x4wK1o3ASgTHsbbk9lGicLnuEG8ahJwu0Q9Tdan0URs83ugkRJ4 -L4CoxPJMs3LxpCzILmzncT1SSgi+DrZ6RhkFtdmpeyvHHg+GKJfc0fwZWHjamEHQ -KGPDScodl5EObEZjcKaT4n6XS5UrFs0h8kgo6C4sGeGqacof8+ufMO3n3W7RQvLZ -pMupGb7j/Vhq39A9vol6gobA08FtL2iAF2sI0Kn1kWDddqvYC1AfPJNAh9h4fdJF -BSkqZGblTmj2nl6aAR1P5l8mb8ni1Ic6ZuAAWxsdBqQDgwFngnmp8MmE4DaDmVaT -WEh67kOf8BotFl3TLPehjQh6j96s9bH9sKq0HNhsx72pIVN50qVtz0QZ7eik5N0F -lXMrwoffDfOzLmW7Ep05Qbn1hhvkXUhmJYaIPK6j8e5pxf6CuK7kt4NsTM61meSj -5Nle9YiSjfR8Pv2PdPgxFGFZduCPYcrNr7i8nSbNwL/X4Y+wmMAQAD8mAgQNTjDr -8Mj8dAeEsA7cb73EoPsy+avOSgAAAAA= ------END PKCS7----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-s-a-e nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-s-a-e --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-s-a-e 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-s-a-e 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ - -MIAGCSqGSIb3DQEHA6CAMIACAQAxggHCMIHMAgEAMHYwYjERMA8GA1UEBxMISW50ZXJuZXQxFzAV -BgNVBAoTDlZlcmlTaWduLCBJbmMuMTQwMgYDVQQLEytWZXJpU2lnbiBDbGFzcyAxIENBIC0gSW5k -aXZpZHVhbCBTdWJzY3JpYmVyAhBgQJiC3qfbCbjdj5INYLnKMA0GCSqGSIb3DQEBAQUABECjscaS -G0U299fqiEAgTqTFQBp8Ai6zzjl557cVb3k6z4QZ7CbqBjSXAjLbh5e7S5Hd/FrFcDnxl1Ka06ha -VHGPMIHwAgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEGA1UECBMKUXVlZW5zbGFuZDERMA8GA1UE -BxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29mdCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNU -UkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQDExJERU1PIFpFUk8gVkFMVUUgQ0ECAgRuMA0GCSqG -SIb3DQEBAQUABECsyHXZ1xaiv0UQRvOmVYsaF38AL2XX75wxbCsz5/wOg7g3RP4aicZxaR4sBog0 -f2G1o9om/hu+A0rIYF/L4/GUMIAGCSqGSIb3DQEHATAaBggqhkiG9w0DAjAOAgIAoAQIsozQrnwj -cc2ggASCBAAQz/LPoJe/+iYWeTwSebz6Q9UeKZzQ2UWm7GLtEM3s3c9SCvpmkwIRdEhLjWaBJMyI -DiL7t1I1vMf9inB8LXgAcIEYkpNScjS8ERA9Ebb7ieNKSBg7w7B8ATHFxLSlDADqRgoZrB1Ctfgf -ximp3EgxTgnhtyQhZxXW7kBQyFRwumplrJXOp7albP7IothrOKncw30IJT1fwPxWNMItI9juXF0U -CbWVSjPzGBo4+XNXMvUO6MplOQEz/ywEQ9E8OZAQex1Zw9qq5ppsXB2pMsYV5sLJGikukMYKquiz -3YK+tN6J8ahLcDUs+VGwqvZi17gpBTlbEP+ZmXJpnO63t1yTEB0V5AZcRKWUOhzlCBM5YUagqNoY -cpsmSvOK6bYzkUKOrzWpDCAtGZ/Dvul5dTZZmxs2WpM+iyeHXMxO3huy8K1brPTqt1f1sHhuq1jD -1eXedaCjIgUW9qV18vNAQCof/Yb6T/1fxztf/jD7pPLQJ+7LJkKCAEHGcaizpoKqhYcttaEhLq1G -O+Ohqf7yFegMdTJ3wwP324w5ZYSU5fLo2Z34/Edf6EGvXyTIqVfAmEBALd6JGVdN5GlYYTxrL+eO -P80Z4ao4YKoxwEmRp5bmQsQ8B29QhOFKmC6eiG5B96qLMtp7Zmu1grDNxTd6OXShWVwYARD0/B1P -Sy0PAfk9Gb4fAkO9fZJDQYZ7s0mM5iOPEeSR7820TolOb+KfRabLA9d714jsc2jEykKlpP66Bh4j -aCsyqJ0uUQcE8SnzrKAqGwgWiCGQpiTa+HBiP6eRlRGOKQj5Y06vcNx6Ija4cGe6+yCN8HV8tCY0 -okZK98NQCl5t79R/ZB2c3NvBJH+/g3ulU48ikT3tVmDxE3mOZofZyGFEM99P+YCMScLDxTl3hzGy -0YkI8U855P7qOAbcFfh2T5n+LSELwLhbkymEfZT917GWTfmypBWMvJx0WHeDhKwQYPdzbKgWETnc -yeKasaCW+oLdhBwrd6Ws2r4MA8cwiYXDLbwYmCxJA8VF++8kubF2HJOjSyMBS+QT2PSV/0D9UWoi -Vfk7R4OvWBJVvq7nV+lXS0O5igjExxlmx1OaBfg7+Cr/MbK4zVNrKSJn82NnKKt6LC6RaTmvFYay -0sDFxQ7Xo+Th6tDNKmKWJt6Kegfjc+qTWJTKb3kL+UI8vS0zTLy1+M/rZ4ekos/JiS5rYIcAswvg -58kBgp/0rc6upBeWjBaK5O0aLAeBQfLulo1axWX04OSVKmYeoAltyR6UO9ME3acurQyg7Ta24yqO -whi/PrIaEiO7dsWvFtzsshVzBLic02NlAkPkMUzliPYnZHWQglDAVxL5K2qhvK1OFCkQpIgBsBDM -6KYRL/mkBIIEALIl927rIkaN37/BQIcxLcSa05YfC0Hl3mxWESt1A0D4lA37A9S8EbYmDfAYlMc0 -3HhZGdZEtawfpJFyDHzNZceNWBch6nxeNZCY4YFdsbzuGS0RKpwNA9S/czOJ4p9ymBCxuhGepI3U -PKbC8C749Www1/wMdAot1n+K7M/PBGR8hWmaH5SS7U3yMwAB1fq2NDjx4ur+Um+MclSdN01MDXzG -EO+eAo1pdAY8479234l8dB2YVAhZ1ZlJ4KmbqMKJrGJXnQUEYS6/cTDRjsUocsoW7uGg1ci2GiHa -qjlkfpBfie3SdhFW/K8hwAH0HALs56oFN66wUkP/AaJAPfIUNhR6RpHKzZ9zCC42oB2mNawQRMnF -ETBl1s/SwMxLKRp7jAfKs4NZxSY6I9z/2dTpzS3tsHMjxVDuxkolvRNWBILEMeL1CBvip2HhmoUw -/Sz5NDgyzk1aQLV6DQNJ2RZLMZDRCtSwZSBu6lhhSgTJGazP0+NbqXXC5aQTrqrFIcWyDXz+ADle -kszzYM/gSaQTCALTwfDDaU9Ek3xVgW+XBtExtJ3U+0AN3l0j86rUIdIvp6eWdxWQqv9LtpoorKMD -KfUc5PYV09Z1JgsT4X51Zzq+74l5dz7udIM7UNbdTpmRm9PDj3TUbGCvNR9hqOEGTLbkvb1ZR24a -h6uGRl2znB25IpDAGRhNRb9is/pO2tvHwHTDMOjrgvZG/pNvXgSUxz0pRjUjXIcqBe2X2gcQfeal -r8gY76o83WEGL6ODryV9vTQVHt52+izgpYoBZaVlpgqbZl54c+OE0Zxf9RwXwDbcYu5Ku5E0MPL0 -qUjc0y2+Y6E4P5bAWaZGMGT+ORkyVUzcaWmM/+XlO7PER5wrWlCIMZCX1L/nvioY0q0CKqALn7DJ -QU+qenbwrb6uwS7uNZY6V86s0aDYpU7yRyqxC5SbuyNJb02gdxUCgpIscFaMUjMVRml4M4BIjX/b -U+HgHoVMUm8SnN9gRcT2izPrgOGVcMTJjfenzoCKoCPo9RjgGMctgB4DvKamErNU7OrilIfuoqzE -PNSeP9SPw/zkDmNvMebM499We9CVnsHUWqF00/ZJWoua77+0f1bLS/tmci1JBvIcMo/4SJvgH+KF -o0gijP9gqAPd5iCOnpnJlHUqRIym42SmyKEDuzdSwXKjAR6j7uXda39JyMJr8gGzEsu0jYRkAmj1 -YdiqwKXUcLMkcj1AKeU/PxTUVw0YKsv/rowrPYww3xQUWqNivrXB7GCHE3BzsYNdHsmziaGIXQbA -+EBHdkuKrM8BcC+fxhF/l/KUxngsD1E75IcUv8zFDF+sk4CBYHqks9S4JYlcubuizqsILbdGzIMN -Z7w34k0XT+sEggQAyzr8MHeIJGsT+AYnZr08PeTbyr01JEoT7lPYT6PzX4F63QKKDl+mB+PwLMzY -CXrxZcUmuay6/MV8w/f5T6vQXdoSw5puWodBYwVReYh1IaEN+jiTapm9YBVmcIsJPO6abHowknSV -OWSvST0AtAX57fFOTckm+facfBK9s9T1lUUgF44Bh5e8f9qKqfOV44nqdCOEyUm0Dao497ieN4Eg -XBLNvOZY9+irMiXjp0lcyFvhrJOczfyCr9EiiaiH1TfSzKGKsf2W84iKn/JH6x2eOo7xjwJ40BQD -c6S1cUNEuqBhP6by0FioOXYOKVyifpxk84Eb+F/4CNdTJTvCPwsiegdfsX/Q53DvKVtXp9Ycam5J -TmKRHXK/bMHF4ONv3p/O/kn/BqRx+fbbP2eMX8Z1F/ltHKfp6B+06HljUwQLBJs9XtCfqH5Zgdz9 -gad5WZF5ykFArmHDgeFlgggvbZ7z9vqnjN/TH68TxJzauYQ5vLHQ6wGXik4/4uq7/TqNmhxlQEM4 -zVkwsn203bUmKLyz+yl1zItDpn5zy1uXfGo99rBdUzdbdE9LmEFPMaFsaHd4a8oDaUroD7FgCbeD -JJVld3ac6F8+3QbExPs48OrgA1kI3/UwXr52ldjiYzTLfAGR9BjqNFTw45FUHuMf8TEM5hcHx56w -95eKAqraDk28o9k+M2UKpcmrdlWoWzdqVVFeWGpM8x9Y9Nt0lf/4VUQgrXjqTkUCQkJyqTeTeGgH -rn3QBk2XAgpxZhaJs3InW0BkAlBmK99cMinUiJeFt5a4p5wPeXrVuh6V9m7Mpl9hzpogg++EZqah -fzzNnDgxOZfW342DX052PdgXo0NnkhCk005LvFt6M2mRn0fLgNVfyUZZoOp8cO5ZWbhXXlrhrgUt -j2zKPK6Q94Zj4kdXHBGpAkrB8ZQ4EGGODE0Dqusm8WPXzB+9236IMHPU7lFbyjBrFNI7O4jg+qRI -Ipi+7tX0FsilqEbmjG+OPwhZXrdqUqyF+rjKQuSRq7lOeDB4c6S2dq4OOny01i5HCbbyc9UvSHRm -hOhGqUlzHyHLo3W7j+26V/MhkDXJ+Tx+qfylv4pbliwTteJJj+CZwzjv29qb6lxYi+38Bw10ERap -m8UCRFBecVN7xXlcIfyeAl666Vi7EBJZv3EdFNrx1nlLwM65nYya7uj6L7IwJWotIUx8E0XH0/cU -xS/dG8bxf9L/8652h5gq3LI+wTNGuEX0DMuz7BGQG+NtgabrZ6SsKGthGa7eULTpz0McWTLRU0y/ -/tkckpm5pDnXSFbIMskwwjECz82UZBSPpigdN/Pjg5d+0yWu7s3VJxw4ENWPPpzZ+j7sOXmdvn9P -O1tQd60EO+3awASCBAAZQvWV3/yJ6FxPttbP+qeURpJoPEZfpN2UYZmd8HqtR0YbaOZ6Rln9nvpd -K9fylXdw9z2xeCbjDWUttJB4VqZxGJM8eCTC1VDVyAOsQ5n7SY55dMkQbU+o4Z/4J5m8+wz50BBI -LfruL1eZ6/CF6CdvxVRiJ10sXc0Tn2sVMXqkw7Adp1GYoCI9c6VFSFK74+n+y7LVFQ5HBnbQyKJc -dvdLOXwZOPaFHC5UNXRmOpcwdPqyXUe+xIsOMYbzdlAnI9eGDNeRDktUa/Rh0CbZCxjmJzoZEYOE -ZjsYZlEfp1Kb61t8z4m28hGLEg88T1Ihmxa2HeUWes1RpmgIOP+/2Lb3smj/l/fpSu4gabFgyCAV -H5HdCYMScUv8SVu55+tpeO8ELoHHQUXV4rr084O4budzhgNSOPyLGDl5sfDUXiyusPCxS4JVO/KY -6V2Qrtg/q2wtmXpEkZnGT+Qi3WDzwt4W81alztnYMP17oGLmxX71KV9OEiMZjI4WaaGt+OOINLtR -qefioZ1NI2L1s5M0tybwTsyU9WERM+3pUwXIfJVsbMZRlNaO2OogcHbaR4UWvhOj+3CTG1sThiYQ -MxMnp1Rpqx3nhyzqLO3TRrkYvxnA3cdPBn9EeqpgBMg7X3hCiMV3Fl5cj/WOMhtHYgY7BgeCXo46 -EFVZ4+WroGZ46xGiRDiIblo8bzLd7QCxvukzxy3mUDgsZQ8pds4N28weSUhBk5MAPbfBpRvXUVJx -MhKqXucQU1Md1qSGLbuuIQuz9pAGp1JFUx/vEkCgm74daSoVWCZuB+1ZE4f48clvrBj51xMNf8CP -EFE7vySzVb6X2H1i5X3Z+Y3DdIcWw4Y2FClfcJk4Mwq8Cq2GALGFEge9YSEE9YmyuU6OFeU0ICon -iXAgZ72SM8fBwJPruLFbdsNYKW+oAfmPisXSWMcZmdSbfk0GYv+vKtu3eegSbWw1UsCVtZOh9E5Z -uQ83l59CBqO9sV/SFU3WrrJ0qNWxrmXu9nJn5Qf5iCRoFGYNHYHkIG5FS6N00GEDZxGkxmro2d++ -Adj5LVHc/b1cYWmrux+jEqI8ZK8cyTB0XMbBA/HYbx9NXazr7znP4/Mlv3pZToEcYt+lgLHAArtU -AdhybhbLIwNMq0gr6EwtDklBa3ns4Wx/rJU8H7LGs6gV8uqeaSketv+nz+sQhfctxZ1rx+5qzXfy -FOQVpO23KDQunBi1Bl9k61Di4q9JWcyADBXPHXJzp7mL8Fk7zdvMAEfuED1phdRm6GgDYoYUs4yQ -IrhSjFlWyk7hT8475xk3BIv++obvWSAv/3+pF6A6U2RXDChVmnG0JnPa9wYYtdzBmLfZKBjX+DjD -yEMsuhPsCzuN4R6tBIIBWCVRKmKwdkatmpsQBgDw48u0/Arffl5/DRlS9ee+QffFecUitDdCK+kt -X5L2fGYrL5g6SltncMIeV1ptx4nuSjC/O944q1KYtqvQiPFWJqEXIRMNbbYOC47sjLza0tEFrimN -wxcrWGSzsy5R9beFQ1aHPcMrDWfCoviNRk2qPtxuKIC5Qk2ZuOmJLjCiLwUGEb0/1Mpzv3MqQa7d -mRayXg3DZWJPajxNZv6eS357ElMvwGQmqafb2mlQJwWLsg9m9PG7uqEoyrqSc6MiuY+icLEFib9j -OfRQrx70rTSKUfTr4MtP0aZZAefjCrpVIyTekhFDOk0Nmx057eonlyGgmGpl5/Uo+t1J1Z11Ya/l -bNbfmebRISJeTVW0I8FhseAZMI1GSwp/ludJxSLYOgyRkh+GX134MexNo7O9F1SxLCfWaSG9Fc3s -5ify04ua9/t8SGrYZPm/l3MkAAAAAAAAAAAAAA== - - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-s-a-e.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-s-a-e.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/msie-s-a-e.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/msie-s-a-e.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHA6CAMIITUAIBADGCAcIwgcwCAQAwdjBiMREwDwYDVQQHEwhJ -bnRlcm5ldDEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xNDAyBgNVBAsTK1ZlcmlT -aWduIENsYXNzIDEgQ0EgLSBJbmRpdmlkdWFsIFN1YnNjcmliZXICEGBAmILep9sJ -uN2Pkg1gucowDQYJKoZIhvcNAQEBBQAEQKOxxpIbRTb31+qIQCBOpMVAGnwCLrPO -OXnntxVveTrPhBnsJuoGNJcCMtuHl7tLkd38WsVwOfGXUprTqFpUcY8wgfACAQAw -gZkwgZIxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQH -EwhCcmlzYmFuZTEaMBgGA1UEChMRQ3J5cHRzb2Z0IFB0eSBMdGQxIjAgBgNVBAsT -GURFTU9OU1RSQVRJT04gQU5EIFRFU1RJTkcxGzAZBgNVBAMTEkRFTU8gWkVSTyBW -QUxVRSBDQQICBG4wDQYJKoZIhvcNAQEBBQAEQKzIddnXFqK/RRBG86ZVixoXfwAv -ZdfvnDFsKzPn/A6DuDdE/hqJxnFpHiwGiDR/YbWj2ib+G74DSshgX8vj8ZQwghGD -BgkqhkiG9w0BBwEwGgYIKoZIhvcNAwIwDgICAKAECLKM0K58I3HNgIIRWBDP8s+g -l7/6JhZ5PBJ5vPpD1R4pnNDZRabsYu0Qzezdz1IK+maTAhF0SEuNZoEkzIgOIvu3 -UjW8x/2KcHwteABwgRiSk1JyNLwRED0RtvuJ40pIGDvDsHwBMcXEtKUMAOpGChms -HUK1+B/GKancSDFOCeG3JCFnFdbuQFDIVHC6amWslc6ntqVs/sii2Gs4qdzDfQgl -PV/A/FY0wi0j2O5cXRQJtZVKM/MYGjj5c1cy9Q7oymU5ATP/LARD0Tw5kBB7HVnD -2qrmmmxcHakyxhXmwskaKS6Qxgqq6LPdgr603onxqEtwNSz5UbCq9mLXuCkFOVsQ -/5mZcmmc7re3XJMQHRXkBlxEpZQ6HOUIEzlhRqCo2hhymyZK84rptjORQo6vNakM -IC0Zn8O+6Xl1NlmbGzZakz6LJ4dczE7eG7LwrVus9Oq3V/WweG6rWMPV5d51oKMi -BRb2pXXy80BAKh/9hvpP/V/HO1/+MPuk8tAn7ssmQoIAQcZxqLOmgqqFhy21oSEu -rUY746Gp/vIV6Ax1MnfDA/fbjDllhJTl8ujZnfj8R1/oQa9fJMipV8CYQEAt3okZ -V03kaVhhPGsv544/zRnhqjhgqjHASZGnluZCxDwHb1CE4UqYLp6IbkH3qosy2ntm -a7WCsM3FN3o5dKFZXBgBEPT8HU9LLQ8B+T0Zvh8CQ719kkNBhnuzSYzmI48R5JHv -zbROiU5v4p9FpssD13vXiOxzaMTKQqWk/roGHiNoKzKonS5RBwTxKfOsoCobCBaI -IZCmJNr4cGI/p5GVEY4pCPljTq9w3HoiNrhwZ7r7II3wdXy0JjSiRkr3w1AKXm3v -1H9kHZzc28Ekf7+De6VTjyKRPe1WYPETeY5mh9nIYUQz30/5gIxJwsPFOXeHMbLR -iQjxTznk/uo4BtwV+HZPmf4tIQvAuFuTKYR9lP3XsZZN+bKkFYy8nHRYd4OErBBg -93NsqBYROdzJ4pqxoJb6gt2EHCt3pazavgwDxzCJhcMtvBiYLEkDxUX77yS5sXYc -k6NLIwFL5BPY9JX/QP1RaiJV+TtHg69YElW+rudX6VdLQ7mKCMTHGWbHU5oF+Dv4 -Kv8xsrjNU2spImfzY2coq3osLpFpOa8VhrLSwMXFDtej5OHq0M0qYpYm3op6B+Nz -6pNYlMpveQv5Qjy9LTNMvLX4z+tnh6Siz8mJLmtghwCzC+DnyQGCn/Stzq6kF5aM -Fork7RosB4FB8u6WjVrFZfTg5JUqZh6gCW3JHpQ70wTdpy6tDKDtNrbjKo7CGL8+ -shoSI7t2xa8W3OyyFXMEuJzTY2UCQ+QxTOWI9idkdZCCUMBXEvkraqG8rU4UKRCk -iAGwEMzophEv+aSyJfdu6yJGjd+/wUCHMS3EmtOWHwtB5d5sVhErdQNA+JQN+wPU -vBG2Jg3wGJTHNNx4WRnWRLWsH6SRcgx8zWXHjVgXIep8XjWQmOGBXbG87hktESqc -DQPUv3MzieKfcpgQsboRnqSN1DymwvAu+PVsMNf8DHQKLdZ/iuzPzwRkfIVpmh+U -ku1N8jMAAdX6tjQ48eLq/lJvjHJUnTdNTA18xhDvngKNaXQGPOO/dt+JfHQdmFQI -WdWZSeCpm6jCiaxiV50FBGEuv3Ew0Y7FKHLKFu7hoNXIthoh2qo5ZH6QX4nt0nYR -VvyvIcAB9BwC7OeqBTeusFJD/wGiQD3yFDYUekaRys2fcwguNqAdpjWsEETJxREw -ZdbP0sDMSykae4wHyrODWcUmOiPc/9nU6c0t7bBzI8VQ7sZKJb0TVgSCxDHi9Qgb -4qdh4ZqFMP0s+TQ4Ms5NWkC1eg0DSdkWSzGQ0QrUsGUgbupYYUoEyRmsz9PjW6l1 -wuWkE66qxSHFsg18/gA5XpLM82DP4EmkEwgC08Hww2lPRJN8VYFvlwbRMbSd1PtA -Dd5dI/Oq1CHSL6enlncVkKr/S7aaKKyjAyn1HOT2FdPWdSYLE+F+dWc6vu+JeXc+ -7nSDO1DW3U6ZkZvTw4901GxgrzUfYajhBky25L29WUduGoerhkZds5wduSKQwBkY -TUW/YrP6Ttrbx8B0wzDo64L2Rv6Tb14ElMc9KUY1I1yHKgXtl9oHEH3mpa/IGO+q -PN1hBi+jg68lfb00FR7edvos4KWKAWWlZaYKm2ZeeHPjhNGcX/UcF8A23GLuSruR -NDDy9KlI3NMtvmOhOD+WwFmmRjBk/jkZMlVM3GlpjP/l5TuzxEecK1pQiDGQl9S/ -574qGNKtAiqgC5+wyUFPqnp28K2+rsEu7jWWOlfOrNGg2KVO8kcqsQuUm7sjSW9N -oHcVAoKSLHBWjFIzFUZpeDOASI1/21Ph4B6FTFJvEpzfYEXE9osz64DhlXDEyY33 -p86AiqAj6PUY4BjHLYAeA7ymphKzVOzq4pSH7qKsxDzUnj/Uj8P85A5jbzHmzOPf -VnvQlZ7B1FqhdNP2SVqLmu+/tH9Wy0v7ZnItSQbyHDKP+Eib4B/ihaNIIoz/YKgD -3eYgjp6ZyZR1KkSMpuNkpsihA7s3UsFyowEeo+7l3Wt/ScjCa/IBsxLLtI2EZAJo -9WHYqsCl1HCzJHI9QCnlPz8U1FcNGCrL/66MKz2MMN8UFFqjYr61wexghxNwc7GD -XR7Js4mhiF0GwPhAR3ZLiqzPAXAvn8YRf5fylMZ4LA9RO+SHFL/MxQxfrJOAgWB6 -pLPUuCWJXLm7os6rCC23RsyDDWe8N+JNF0/ryzr8MHeIJGsT+AYnZr08PeTbyr01 -JEoT7lPYT6PzX4F63QKKDl+mB+PwLMzYCXrxZcUmuay6/MV8w/f5T6vQXdoSw5pu -WodBYwVReYh1IaEN+jiTapm9YBVmcIsJPO6abHowknSVOWSvST0AtAX57fFOTckm -+facfBK9s9T1lUUgF44Bh5e8f9qKqfOV44nqdCOEyUm0Dao497ieN4EgXBLNvOZY -9+irMiXjp0lcyFvhrJOczfyCr9EiiaiH1TfSzKGKsf2W84iKn/JH6x2eOo7xjwJ4 -0BQDc6S1cUNEuqBhP6by0FioOXYOKVyifpxk84Eb+F/4CNdTJTvCPwsiegdfsX/Q -53DvKVtXp9Ycam5JTmKRHXK/bMHF4ONv3p/O/kn/BqRx+fbbP2eMX8Z1F/ltHKfp -6B+06HljUwQLBJs9XtCfqH5Zgdz9gad5WZF5ykFArmHDgeFlgggvbZ7z9vqnjN/T -H68TxJzauYQ5vLHQ6wGXik4/4uq7/TqNmhxlQEM4zVkwsn203bUmKLyz+yl1zItD -pn5zy1uXfGo99rBdUzdbdE9LmEFPMaFsaHd4a8oDaUroD7FgCbeDJJVld3ac6F8+ -3QbExPs48OrgA1kI3/UwXr52ldjiYzTLfAGR9BjqNFTw45FUHuMf8TEM5hcHx56w -95eKAqraDk28o9k+M2UKpcmrdlWoWzdqVVFeWGpM8x9Y9Nt0lf/4VUQgrXjqTkUC -QkJyqTeTeGgHrn3QBk2XAgpxZhaJs3InW0BkAlBmK99cMinUiJeFt5a4p5wPeXrV -uh6V9m7Mpl9hzpogg++EZqahfzzNnDgxOZfW342DX052PdgXo0NnkhCk005LvFt6 -M2mRn0fLgNVfyUZZoOp8cO5ZWbhXXlrhrgUtj2zKPK6Q94Zj4kdXHBGpAkrB8ZQ4 -EGGODE0Dqusm8WPXzB+9236IMHPU7lFbyjBrFNI7O4jg+qRIIpi+7tX0FsilqEbm -jG+OPwhZXrdqUqyF+rjKQuSRq7lOeDB4c6S2dq4OOny01i5HCbbyc9UvSHRmhOhG -qUlzHyHLo3W7j+26V/MhkDXJ+Tx+qfylv4pbliwTteJJj+CZwzjv29qb6lxYi+38 -Bw10ERapm8UCRFBecVN7xXlcIfyeAl666Vi7EBJZv3EdFNrx1nlLwM65nYya7uj6 -L7IwJWotIUx8E0XH0/cUxS/dG8bxf9L/8652h5gq3LI+wTNGuEX0DMuz7BGQG+Nt -gabrZ6SsKGthGa7eULTpz0McWTLRU0y//tkckpm5pDnXSFbIMskwwjECz82UZBSP -pigdN/Pjg5d+0yWu7s3VJxw4ENWPPpzZ+j7sOXmdvn9PO1tQd60EO+3awBlC9ZXf -/InoXE+21s/6p5RGkmg8Rl+k3ZRhmZ3weq1HRhto5npGWf2e+l0r1/KVd3D3PbF4 -JuMNZS20kHhWpnEYkzx4JMLVUNXIA6xDmftJjnl0yRBtT6jhn/gnmbz7DPnQEEgt -+u4vV5nr8IXoJ2/FVGInXSxdzROfaxUxeqTDsB2nUZigIj1zpUVIUrvj6f7LstUV -DkcGdtDIolx290s5fBk49oUcLlQ1dGY6lzB0+rJdR77Eiw4xhvN2UCcj14YM15EO -S1Rr9GHQJtkLGOYnOhkRg4RmOxhmUR+nUpvrW3zPibbyEYsSDzxPUiGbFrYd5RZ6 -zVGmaAg4/7/YtveyaP+X9+lK7iBpsWDIIBUfkd0JgxJxS/xJW7nn62l47wQugcdB -RdXiuvTzg7hu53OGA1I4/IsYOXmx8NReLK6w8LFLglU78pjpXZCu2D+rbC2ZekSR -mcZP5CLdYPPC3hbzVqXO2dgw/XugYubFfvUpX04SIxmMjhZpoa3444g0u1Gp5+Kh -nU0jYvWzkzS3JvBOzJT1YREz7elTBch8lWxsxlGU1o7Y6iBwdtpHhRa+E6P7cJMb -WxOGJhAzEyenVGmrHeeHLOos7dNGuRi/GcDdx08Gf0R6qmAEyDtfeEKIxXcWXlyP -9Y4yG0diBjsGB4JejjoQVVnj5augZnjrEaJEOIhuWjxvMt3tALG+6TPHLeZQOCxl -Dyl2zg3bzB5JSEGTkwA9t8GlG9dRUnEyEqpe5xBTUx3WpIYtu64hC7P2kAanUkVT -H+8SQKCbvh1pKhVYJm4H7VkTh/jxyW+sGPnXEw1/wI8QUTu/JLNVvpfYfWLlfdn5 -jcN0hxbDhjYUKV9wmTgzCrwKrYYAsYUSB71hIQT1ibK5To4V5TQgKieJcCBnvZIz -x8HAk+u4sVt2w1gpb6gB+Y+KxdJYxxmZ1Jt+TQZi/68q27d56BJtbDVSwJW1k6H0 -Tlm5DzeXn0IGo72xX9IVTdausnSo1bGuZe72cmflB/mIJGgUZg0dgeQgbkVLo3TQ -YQNnEaTGaujZ374B2PktUdz9vVxhaau7H6MSojxkrxzJMHRcxsED8dhvH01drOvv -Oc/j8yW/ellOgRxi36WAscACu1QB2HJuFssjA0yrSCvoTC0OSUFreezhbH+slTwf -ssazqBXy6p5pKR62/6fP6xCF9y3FnWvH7mrNd/IU5BWk7bcoNC6cGLUGX2TrUOLi -r0lZzIAMFc8dcnOnuYvwWTvN28wAR+4QPWmF1GboaANihhSzjJAiuFKMWVbKTuFP -zjvnGTcEi/76hu9ZIC//f6kXoDpTZFcMKFWacbQmc9r3Bhi13MGYt9koGNf4OMPI -Qyy6E+wLO43hHq0lUSpisHZGrZqbEAYA8OPLtPwK335efw0ZUvXnvkH3xXnFIrQ3 -QivpLV+S9nxmKy+YOkpbZ3DCHldabceJ7kowvzveOKtSmLar0IjxViahFyETDW22 -DguO7Iy82tLRBa4pjcMXK1hks7MuUfW3hUNWhz3DKw1nwqL4jUZNqj7cbiiAuUJN -mbjpiS4woi8FBhG9P9TKc79zKkGu3ZkWsl4Nw2ViT2o8TWb+nkt+exJTL8BkJqmn -29ppUCcFi7IPZvTxu7qhKMq6knOjIrmPonCxBYm/Yzn0UK8e9K00ilH06+DLT9Gm -WQHn4wq6VSMk3pIRQzpNDZsdOe3qJ5choJhqZef1KPrdSdWddWGv5WzW35nm0SEi -Xk1VtCPBYbHgGTCNRksKf5bnScUi2DoMkZIfhl9d+DHsTaOzvRdUsSwn1mkhvRXN -7OYn8tOLmvf7fEhq2GT5v5dzJAAAAAA= ------END PKCS7----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/nav-smime nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/nav-smime --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/nav-smime 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/nav-smime 1970-01-01 00:00:00.000000000 +0000 @@ -1,157 +0,0 @@ -From angela@c2.net.au Thu May 14 13:32:27 1998 -X-UIDL: 83c94dd550e54329bf9571b72038b8c8 -Return-Path: angela@c2.net.au -Received: from cryptsoft.com (play.cryptsoft.com [203.56.44.3]) by pandora.cryptsoft.com (8.8.3/8.7.3) with ESMTP id NAA27838 for <tjh@cryptsoft.com>; Thu, 14 May 1998 13:32:26 +1000 (EST) -Message-ID: <355A6779.4B63E64C@cryptsoft.com> -Date: Thu, 14 May 1998 13:39:37 +1000 -From: Angela van Lent <angela@c2.net.au> -X-Mailer: Mozilla 4.03 [en] (Win95; U) -MIME-Version: 1.0 -To: tjh@cryptsoft.com -Subject: signed -Content-Type: multipart/signed; protocol="application/x-pkcs7-signature"; micalg=sha1; boundary="------------ms9A58844C95949ECC78A1C54C" -Content-Length: 2604 -Status: OR - -This is a cryptographically signed message in MIME format. - ---------------ms9A58844C95949ECC78A1C54C -Content-Type: text/plain; charset=us-ascii -Content-Transfer-Encoding: 7bit - -signed body - ---------------ms9A58844C95949ECC78A1C54C -Content-Type: application/x-pkcs7-signature; name="smime.p7s" -Content-Transfer-Encoding: base64 -Content-Disposition: attachment; filename="smime.p7s" -Content-Description: S/MIME Cryptographic Signature - -MIIGHgYJKoZIhvcNAQcCoIIGDzCCBgsCAQExCzAJBgUrDgMCGgUAMAsGCSqGSIb3DQEHAaCC -BGswggJTMIIB/aADAgECAgIEfjANBgkqhkiG9w0BAQQFADCBkjELMAkGA1UEBhMCQVUxEzAR -BgNVBAgTClF1ZWVuc2xhbmQxETAPBgNVBAcTCEJyaXNiYW5lMRowGAYDVQQKExFDcnlwdHNv -ZnQgUHR5IEx0ZDEiMCAGA1UECxMZREVNT05TVFJBVElPTiBBTkQgVEVTVElORzEbMBkGA1UE -AxMSREVNTyBaRVJPIFZBTFVFIENBMB4XDTk4MDUxMzA2MjY1NloXDTAwMDUxMjA2MjY1Nlow -gaUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQHEwhCcmlzYmFu -ZTEaMBgGA1UEChMRQ3J5cHRzb2Z0IFB0eSBMdGQxEjAQBgNVBAsTCVNNSU1FIDAwMzEZMBcG -A1UEAxMQQW5nZWxhIHZhbiBMZWVudDEjMCEGCSqGSIb3DQEJARYUYW5nZWxhQGNyeXB0c29m -dC5jb20wXDANBgkqhkiG9w0BAQEFAANLADBIAkEAuC3+7dAb2LhuO7gt2cTM8vsNjhG5JfDh -hX1Vl/wVGbKEEj0MA6vWEolvefQlxB+EzwCtR0YZ7eEC/T/4JoCyeQIDAQABoygwJjAkBglg -hkgBhvhCAQ0EFxYVR2VuZXJhdGVkIHdpdGggU1NMZWF5MA0GCSqGSIb3DQEBBAUAA0EAUnSP -igs6TMFISTjw8cBtJYb98czgAVkVFjKyJQwYMH8FbDnCyx6NocM555nsyDstaw8fKR11Khds -syd3ikkrhDCCAhAwggG6AgEDMA0GCSqGSIb3DQEBBAUAMIGSMQswCQYDVQQGEwJBVTETMBEG -A1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNUUkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQD -ExJERU1PIFpFUk8gVkFMVUUgQ0EwHhcNOTgwMzAzMDc0MTMyWhcNMDgwMjI5MDc0MTMyWjCB -kjELMAkGA1UEBhMCQVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxETAPBgNVBAcTCEJyaXNiYW5l -MRowGAYDVQQKExFDcnlwdHNvZnQgUHR5IEx0ZDEiMCAGA1UECxMZREVNT05TVFJBVElPTiBB -TkQgVEVTVElORzEbMBkGA1UEAxMSREVNTyBaRVJPIFZBTFVFIENBMFwwDQYJKoZIhvcNAQEB -BQADSwAwSAJBAL+0E2fLej3FSCwe2A2iRnMuC3z12qHIp6Ky1wo2zZcxft7AI+RfkrWrSGtf -mfzBEuPrLdfulncC5Y1pNcM8RTUCAwEAATANBgkqhkiG9w0BAQQFAANBAGSbLMphL6F5pp3s -8o0Xyh86FHFdpVOwYx09ELLkuG17V/P9pgIc0Eo/gDMbN+KT3IdgECf8S//pCRA6RrNjcXIx -ggF7MIIBdwIBATCBmTCBkjELMAkGA1UEBhMCQVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxETAP -BgNVBAcTCEJyaXNiYW5lMRowGAYDVQQKExFDcnlwdHNvZnQgUHR5IEx0ZDEiMCAGA1UECxMZ -REVNT05TVFJBVElPTiBBTkQgVEVTVElORzEbMBkGA1UEAxMSREVNTyBaRVJPIFZBTFVFIENB -AgIEfjAJBgUrDgMCGgUAoHowGAYJKoZIhvcNAQkDMQsGCSqGSIb3DQEHATAbBgkqhkiG9w0B -CQ8xDjAMMAoGCCqGSIb3DQMHMBwGCSqGSIb3DQEJBTEPFw05ODA1MTQwMzM5MzdaMCMGCSqG -SIb3DQEJBDEWBBQstNMnSV26ba8PapQEDhO21yNFrjANBgkqhkiG9w0BAQEFAARAW9Xb9YXv -BfcNkutgFX9Gr8iXhBVsNtGEVrjrpkQwpKa7jHI8SjAlLhk/4RFwDHf+ISB9Np3Z1WDWnLcA -9CWR6g== ---------------ms9A58844C95949ECC78A1C54C-- - - -From angela@c2.net.au Thu May 14 13:33:16 1998 -X-UIDL: 8f076c44ff7c5967fd5b00c4588a8731 -Return-Path: angela@c2.net.au -Received: from cryptsoft.com (play.cryptsoft.com [203.56.44.3]) by pandora.cryptsoft.com (8.8.3/8.7.3) with ESMTP id NAA27847 for <tjh@cryptsoft.com>; Thu, 14 May 1998 13:33:15 +1000 (EST) -Message-ID: <355A67AB.2AF38806@cryptsoft.com> -Date: Thu, 14 May 1998 13:40:27 +1000 -From: Angela van Lent <angela@c2.net.au> -X-Mailer: Mozilla 4.03 [en] (Win95; U) -MIME-Version: 1.0 -To: tjh@cryptsoft.com -Subject: signed -Content-Type: multipart/signed; protocol="application/x-pkcs7-signature"; micalg=sha1; boundary="------------msD7863B84BD61E02C407F2F5E" -Content-Length: 2679 -Status: OR - -This is a cryptographically signed message in MIME format. - ---------------msD7863B84BD61E02C407F2F5E -Content-Type: text/plain; charset=us-ascii -Content-Transfer-Encoding: 7bit - -signed body 2 - ---------------msD7863B84BD61E02C407F2F5E -Content-Type: application/x-pkcs7-signature; name="smime.p7s" -Content-Transfer-Encoding: base64 -Content-Disposition: attachment; filename="smime.p7s" -Content-Description: S/MIME Cryptographic Signature - -MIIGVgYJKoZIhvcNAQcCoIIGRzCCBkMCAQExCzAJBgUrDgMCGgUAMAsGCSqGSIb3DQEHAaCC -BGswggJTMIIB/aADAgECAgIEfjANBgkqhkiG9w0BAQQFADCBkjELMAkGA1UEBhMCQVUxEzAR -BgNVBAgTClF1ZWVuc2xhbmQxETAPBgNVBAcTCEJyaXNiYW5lMRowGAYDVQQKExFDcnlwdHNv -ZnQgUHR5IEx0ZDEiMCAGA1UECxMZREVNT05TVFJBVElPTiBBTkQgVEVTVElORzEbMBkGA1UE -AxMSREVNTyBaRVJPIFZBTFVFIENBMB4XDTk4MDUxMzA2MjY1NloXDTAwMDUxMjA2MjY1Nlow -gaUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQHEwhCcmlzYmFu -ZTEaMBgGA1UEChMRQ3J5cHRzb2Z0IFB0eSBMdGQxEjAQBgNVBAsTCVNNSU1FIDAwMzEZMBcG -A1UEAxMQQW5nZWxhIHZhbiBMZWVudDEjMCEGCSqGSIb3DQEJARYUYW5nZWxhQGNyeXB0c29m -dC5jb20wXDANBgkqhkiG9w0BAQEFAANLADBIAkEAuC3+7dAb2LhuO7gt2cTM8vsNjhG5JfDh -hX1Vl/wVGbKEEj0MA6vWEolvefQlxB+EzwCtR0YZ7eEC/T/4JoCyeQIDAQABoygwJjAkBglg -hkgBhvhCAQ0EFxYVR2VuZXJhdGVkIHdpdGggU1NMZWF5MA0GCSqGSIb3DQEBBAUAA0EAUnSP -igs6TMFISTjw8cBtJYb98czgAVkVFjKyJQwYMH8FbDnCyx6NocM555nsyDstaw8fKR11Khds -syd3ikkrhDCCAhAwggG6AgEDMA0GCSqGSIb3DQEBBAUAMIGSMQswCQYDVQQGEwJBVTETMBEG -A1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNUUkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQD -ExJERU1PIFpFUk8gVkFMVUUgQ0EwHhcNOTgwMzAzMDc0MTMyWhcNMDgwMjI5MDc0MTMyWjCB -kjELMAkGA1UEBhMCQVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxETAPBgNVBAcTCEJyaXNiYW5l -MRowGAYDVQQKExFDcnlwdHNvZnQgUHR5IEx0ZDEiMCAGA1UECxMZREVNT05TVFJBVElPTiBB -TkQgVEVTVElORzEbMBkGA1UEAxMSREVNTyBaRVJPIFZBTFVFIENBMFwwDQYJKoZIhvcNAQEB -BQADSwAwSAJBAL+0E2fLej3FSCwe2A2iRnMuC3z12qHIp6Ky1wo2zZcxft7AI+RfkrWrSGtf -mfzBEuPrLdfulncC5Y1pNcM8RTUCAwEAATANBgkqhkiG9w0BAQQFAANBAGSbLMphL6F5pp3s -8o0Xyh86FHFdpVOwYx09ELLkuG17V/P9pgIc0Eo/gDMbN+KT3IdgECf8S//pCRA6RrNjcXIx -ggGzMIIBrwIBATCBmTCBkjELMAkGA1UEBhMCQVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxETAP -BgNVBAcTCEJyaXNiYW5lMRowGAYDVQQKExFDcnlwdHNvZnQgUHR5IEx0ZDEiMCAGA1UECxMZ -REVNT05TVFJBVElPTiBBTkQgVEVTVElORzEbMBkGA1UEAxMSREVNTyBaRVJPIFZBTFVFIENB -AgIEfjAJBgUrDgMCGgUAoIGxMBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZIhvcN -AQkFMQ8XDTk4MDUxNDAzNDAyN1owIwYJKoZIhvcNAQkEMRYEFOKcV8mNYJnM8rHQajcSEqJN -rwdDMFIGCSqGSIb3DQEJDzFFMEMwCgYIKoZIhvcNAwcwDgYIKoZIhvcNAwICAgCAMAcGBSsO -AwIHMA0GCCqGSIb3DQMCAgFAMA0GCCqGSIb3DQMCAgEoMA0GCSqGSIb3DQEBAQUABEADPE/N -coH+zTFuX5YpolupTKxKK8eEjc48TuADuO8bIHHDE/fEYaWunlwDuTlcFJl1ig0idffPB1qC -Zp8SSVVY ---------------msD7863B84BD61E02C407F2F5E-- - - -From angela@c2.net.au Thu May 14 14:05:32 1998 -X-UIDL: a7d629b4b9acacaee8b39371b860a32a -Return-Path: angela@c2.net.au -Received: from cryptsoft.com (play.cryptsoft.com [203.56.44.3]) by pandora.cryptsoft.com (8.8.3/8.7.3) with ESMTP id OAA28033 for <tjh@cryptsoft.com>; Thu, 14 May 1998 14:05:32 +1000 (EST) -Message-ID: <355A6F3B.AC385981@cryptsoft.com> -Date: Thu, 14 May 1998 14:12:43 +1000 -From: Angela van Lent <angela@c2.net.au> -X-Mailer: Mozilla 4.03 [en] (Win95; U) -MIME-Version: 1.0 -To: tjh@cryptsoft.com -Subject: encrypted -Content-Type: application/x-pkcs7-mime; name="smime.p7m" -Content-Transfer-Encoding: base64 -Content-Disposition: attachment; filename="smime.p7m" -Content-Description: S/MIME Encrypted Message -Content-Length: 905 -Status: OR - -MIAGCSqGSIb3DQEHA6CAMIACAQAxggHmMIHwAgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEG -A1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNUUkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQD -ExJERU1PIFpFUk8gVkFMVUUgQ0ECAgR+MA0GCSqGSIb3DQEBAQUABEA92N29Yk39RUY2tIVd -exGT2MFX3J6H8LB8aDRJjw7843ALgJ5zXpM5+f80QkAWwEN2A6Pl3VxiCeKLi435zXVyMIHw -AgEAMIGZMIGSMQswCQYDVQQGEwJBVTETMBEGA1UECBMKUXVlZW5zbGFuZDERMA8GA1UEBxMI -QnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29mdCBQdHkgTHRkMSIwIAYDVQQLExlERU1PTlNU -UkFUSU9OIEFORCBURVNUSU5HMRswGQYDVQQDExJERU1PIFpFUk8gVkFMVUUgQ0ECAgRuMA0G -CSqGSIb3DQEBAQUABECR9IfyHtvnjFmZ8B2oUCEs1vxMsG0u1kxKE4RMPFyDqDCEARq7zXMg -nzSUI7Wgv5USSKDqcLRJeW+jvYURv/nJMIAGCSqGSIb3DQEHATAaBggqhkiG9w0DAjAOAgIA -oAQIrLqrij2ZMpeggAQoibtn6reRZWuWk5Iv5IAhgitr8EYE4w4ySQ7EMB6mTlBoFpccUMWX -BwQgQn1UoWCvYAlhDzURdbui64Dc0rS2wtj+kE/InS6y25EEEPe4NUKaF8/UlE+lo3LtILQE -CL3uV8k7m0iqAAAAAAAAAAAAAA== - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/server.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/server.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/server.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/server.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -issuer :/C=AU/SP=Queensland/L=Brisbane/O=Cryptsoft Pty Ltd/OU=DEMONSTRATION AND TESTING/CN=DEMO ZERO VALUE CA -subject:/C=AU/SP=Queensland/L=Brisbane/O=Cryptsoft Pty Ltd/OU=SMIME 003/CN=Information/Email=info@cryptsoft.com -serial :047D - -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1149 (0x47d) - Signature Algorithm: md5withRSAEncryption - Issuer: C=AU, SP=Queensland, L=Brisbane, O=Cryptsoft Pty Ltd, OU=DEMONSTRATION AND TESTING, CN=DEMO ZERO VALUE CA - Validity - Not Before: May 13 05:40:58 1998 GMT - Not After : May 12 05:40:58 2000 GMT - Subject: C=AU, SP=Queensland, L=Brisbane, O=Cryptsoft Pty Ltd, OU=SMIME 003, CN=Information/Email=info@cryptsoft.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Modulus: - 00:ad:e7:23:89:ee:0d:87:b7:9c:32:44:4b:95:81: - 73:dd:22:80:4b:2d:c5:60:b8:fe:1e:18:63:ef:dc: - 89:89:22:df:95:3c:7a:db:3d:9a:06:a8:08:d6:29: - fd:ef:41:09:91:ed:bc:ad:98:f9:f6:28:90:62:6f: - e7:e7:0c:4d:0b - Exponent: 65537 (0x10001) - X509v3 extensions: - Netscape Comment: - Generated with SSLeay - Signature Algorithm: md5withRSAEncryption - 52:15:ea:88:f4:f0:f9:0b:ef:ce:d5:f8:83:40:61:16:5e:55: - f9:ce:2d:d1:8b:31:5c:03:c6:2d:10:7c:61:d5:5c:0a:42:97: - d1:fd:65:b6:b6:84:a5:39:ec:46:ec:fc:e0:0d:d9:22:da:1b: - 50:74:ad:92:cb:4e:90:e5:fa:7d - ------BEGIN CERTIFICATE----- -MIICTDCCAfagAwIBAgICBH0wDQYJKoZIhvcNAQEEBQAwgZIxCzAJBgNVBAYTAkFV -MRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQHEwhCcmlzYmFuZTEaMBgGA1UE -ChMRQ3J5cHRzb2Z0IFB0eSBMdGQxIjAgBgNVBAsTGURFTU9OU1RSQVRJT04gQU5E -IFRFU1RJTkcxGzAZBgNVBAMTEkRFTU8gWkVSTyBWQUxVRSBDQTAeFw05ODA1MTMw -NTQwNThaFw0wMDA1MTIwNTQwNThaMIGeMQswCQYDVQQGEwJBVTETMBEGA1UECBMK -UXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMRIwEAYDVQQLEwlTTUlNRSAwMDMxFDASBgNVBAMTC0luZm9ybWF0 -aW9uMSEwHwYJKoZIhvcNAQkBFhJpbmZvQGNyeXB0c29mdC5jb20wXDANBgkqhkiG -9w0BAQEFAANLADBIAkEArecjie4Nh7ecMkRLlYFz3SKASy3FYLj+Hhhj79yJiSLf -lTx62z2aBqgI1in970EJke28rZj59iiQYm/n5wxNCwIDAQABoygwJjAkBglghkgB -hvhCAQ0EFxYVR2VuZXJhdGVkIHdpdGggU1NMZWF5MA0GCSqGSIb3DQEBBAUAA0EA -UhXqiPTw+QvvztX4g0BhFl5V+c4t0YsxXAPGLRB8YdVcCkKX0f1ltraEpTnsRuz8 -4A3ZItobUHStkstOkOX6fQ== ------END CERTIFICATE----- - ------BEGIN RSA PRIVATE KEY----- -MIIBOgIBAAJBAK3nI4nuDYe3nDJES5WBc90igEstxWC4/h4YY+/ciYki35U8ets9 -mgaoCNYp/e9BCZHtvK2Y+fYokGJv5+cMTQsCAwEAAQJBAIHpvXvqEcOEoDRRHuIG -fkcB4jPHcr9KE9TpxabH6xs9beN6OJnkePXAHwaz5MnUgSnbpOKq+cw8miKjXwe/ -zVECIQDVLwncT2lRmXarEYHzb+q/0uaSvKhWKKt3kJasLNTrAwIhANDUc/ghut29 -p3jJYjurzUKuG774/5eLjPLsxPPIZzNZAiA/10hSq41UnGqHLEUIS9m2/EeEZe7b -bm567dfRU9OnVQIgDo8ROrZXSchEGbaog5J5r/Fle83uO8l93R3GqVxKXZkCIFfk -IPD5PIYQAyyod3hyKKza7ZP4CGY4oOfZetbkSGGG ------END RSA PRIVATE KEY----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/s.pem nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/s.pem --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/t/s.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/t/s.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIBOgIBAAJBAK3nI4nuDYe3nDJES5WBc90igEstxWC4/h4YY+/ciYki35U8ets9 -mgaoCNYp/e9BCZHtvK2Y+fYokGJv5+cMTQsCAwEAAQJBAIHpvXvqEcOEoDRRHuIG -fkcB4jPHcr9KE9TpxabH6xs9beN6OJnkePXAHwaz5MnUgSnbpOKq+cw8miKjXwe/ -zVECIQDVLwncT2lRmXarEYHzb+q/0uaSvKhWKKt3kJasLNTrAwIhANDUc/ghut29 -p3jJYjurzUKuG774/5eLjPLsxPPIZzNZAiA/10hSq41UnGqHLEUIS9m2/EeEZe7b -bm567dfRU9OnVQIgDo8ROrZXSchEGbaog5J5r/Fle83uO8l93R3GqVxKXZkCIFfk -IPD5PIYQAyyod3hyKKza7ZP4CGY4oOfZetbkSGGG ------END RSA PRIVATE KEY----- -issuer :/C=AU/SP=Queensland/L=Brisbane/O=Cryptsoft Pty Ltd/OU=DEMONSTRATION AND TESTING/CN=DEMO ZERO VALUE CA -subject:/C=AU/SP=Queensland/L=Brisbane/O=Cryptsoft Pty Ltd/OU=SMIME 003/CN=Information/Email=info@cryptsoft.com -serial :047D - -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1149 (0x47d) - Signature Algorithm: md5withRSAEncryption - Issuer: C=AU, SP=Queensland, L=Brisbane, O=Cryptsoft Pty Ltd, OU=DEMONSTRATION AND TESTING, CN=DEMO ZERO VALUE CA - Validity - Not Before: May 13 05:40:58 1998 GMT - Not After : May 12 05:40:58 2000 GMT - Subject: C=AU, SP=Queensland, L=Brisbane, O=Cryptsoft Pty Ltd, OU=SMIME 003, CN=Information/Email=info@cryptsoft.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Modulus: - 00:ad:e7:23:89:ee:0d:87:b7:9c:32:44:4b:95:81: - 73:dd:22:80:4b:2d:c5:60:b8:fe:1e:18:63:ef:dc: - 89:89:22:df:95:3c:7a:db:3d:9a:06:a8:08:d6:29: - fd:ef:41:09:91:ed:bc:ad:98:f9:f6:28:90:62:6f: - e7:e7:0c:4d:0b - Exponent: 65537 (0x10001) - X509v3 extensions: - Netscape Comment: - Generated with SSLeay - Signature Algorithm: md5withRSAEncryption - 52:15:ea:88:f4:f0:f9:0b:ef:ce:d5:f8:83:40:61:16:5e:55: - f9:ce:2d:d1:8b:31:5c:03:c6:2d:10:7c:61:d5:5c:0a:42:97: - d1:fd:65:b6:b6:84:a5:39:ec:46:ec:fc:e0:0d:d9:22:da:1b: - 50:74:ad:92:cb:4e:90:e5:fa:7d - ------BEGIN CERTIFICATE----- -MIICTDCCAfagAwIBAgICBH0wDQYJKoZIhvcNAQEEBQAwgZIxCzAJBgNVBAYTAkFV -MRMwEQYDVQQIEwpRdWVlbnNsYW5kMREwDwYDVQQHEwhCcmlzYmFuZTEaMBgGA1UE -ChMRQ3J5cHRzb2Z0IFB0eSBMdGQxIjAgBgNVBAsTGURFTU9OU1RSQVRJT04gQU5E -IFRFU1RJTkcxGzAZBgNVBAMTEkRFTU8gWkVSTyBWQUxVRSBDQTAeFw05ODA1MTMw -NTQwNThaFw0wMDA1MTIwNTQwNThaMIGeMQswCQYDVQQGEwJBVTETMBEGA1UECBMK -UXVlZW5zbGFuZDERMA8GA1UEBxMIQnJpc2JhbmUxGjAYBgNVBAoTEUNyeXB0c29m -dCBQdHkgTHRkMRIwEAYDVQQLEwlTTUlNRSAwMDMxFDASBgNVBAMTC0luZm9ybWF0 -aW9uMSEwHwYJKoZIhvcNAQkBFhJpbmZvQGNyeXB0c29mdC5jb20wXDANBgkqhkiG -9w0BAQEFAANLADBIAkEArecjie4Nh7ecMkRLlYFz3SKASy3FYLj+Hhhj79yJiSLf -lTx62z2aBqgI1in970EJke28rZj59iiQYm/n5wxNCwIDAQABoygwJjAkBglghkgB -hvhCAQ0EFxYVR2VuZXJhdGVkIHdpdGggU1NMZWF5MA0GCSqGSIb3DQEBBAUAA0EA -UhXqiPTw+QvvztX4g0BhFl5V+c4t0YsxXAPGLRB8YdVcCkKX0f1ltraEpTnsRuz8 -4A3ZItobUHStkstOkOX6fQ== ------END CERTIFICATE----- - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/verify.c nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/verify.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/pkcs7/verify.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pkcs7/verify.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,263 +0,0 @@ -/* crypto/pkcs7/verify.c */ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ -#include <stdio.h> -#include <string.h> -#include <openssl/bio.h> -#include <openssl/asn1.h> -#include <openssl/x509.h> -#include <openssl/pem.h> -#include <openssl/err.h> -#include "example.h" - -int verify_callback(int ok, X509_STORE_CTX *ctx); - -BIO *bio_err=NULL; -BIO *bio_out=NULL; - -int main(argc,argv) -int argc; -char *argv[]; - { - PKCS7 *p7; - PKCS7_SIGNER_INFO *si; - X509_STORE_CTX cert_ctx; - X509_STORE *cert_store=NULL; - BIO *data,*detached=NULL,*p7bio=NULL; - char buf[1024*4]; - char *pp; - int i,printit=0; - STACK_OF(PKCS7_SIGNER_INFO) *sk; - - bio_err=BIO_new_fp(stderr,BIO_NOCLOSE); - bio_out=BIO_new_fp(stdout,BIO_NOCLOSE); -#ifndef OPENSSL_NO_MD2 - EVP_add_digest(EVP_md2()); -#endif -#ifndef OPENSSL_NO_MD5 - EVP_add_digest(EVP_md5()); -#endif -#ifndef OPENSSL_NO_SHA1 - EVP_add_digest(EVP_sha1()); -#endif -#ifndef OPENSSL_NO_MDC2 - EVP_add_digest(EVP_mdc2()); -#endif - - data=BIO_new(BIO_s_file()); - - pp=NULL; - while (argc > 1) - { - argc--; - argv++; - if (strcmp(argv[0],"-p") == 0) - { - printit=1; - } - else if ((strcmp(argv[0],"-d") == 0) && (argc >= 2)) - { - detached=BIO_new(BIO_s_file()); - if (!BIO_read_filename(detached,argv[1])) - goto err; - argc--; - argv++; - } - else - { - pp=argv[0]; - if (!BIO_read_filename(data,argv[0])) - goto err; - } - } - - if (pp == NULL) - BIO_set_fp(data,stdin,BIO_NOCLOSE); - - - /* Load the PKCS7 object from a file */ - if ((p7=PEM_read_bio_PKCS7(data,NULL,NULL,NULL)) == NULL) goto err; - - /* This stuff is being setup for certificate verification. - * When using SSL, it could be replaced with a - * cert_stre=SSL_CTX_get_cert_store(ssl_ctx); */ - cert_store=X509_STORE_new(); - X509_STORE_set_default_paths(cert_store); - X509_STORE_load_locations(cert_store,NULL,"../../certs"); - X509_STORE_set_verify_cb_func(cert_store,verify_callback); - - ERR_clear_error(); - - /* We need to process the data */ - if ((PKCS7_get_detached(p7) || detached)) - { - if (detached == NULL) - { - printf("no data to verify the signature on\n"); - exit(1); - } - else - p7bio=PKCS7_dataInit(p7,detached); - } - else - { - p7bio=PKCS7_dataInit(p7,NULL); - } - - /* We now have to 'read' from p7bio to calculate digests etc. */ - for (;;) - { - i=BIO_read(p7bio,buf,sizeof(buf)); - /* print it? */ - if (i <= 0) break; - } - - /* We can now verify signatures */ - sk=PKCS7_get_signer_info(p7); - if (sk == NULL) - { - printf("there are no signatures on this data\n"); - exit(1); - } - - /* Ok, first we need to, for each subject entry, see if we can verify */ - for (i=0; i<sk_PKCS7_SIGNER_INFO_num(sk); i++) - { - ASN1_UTCTIME *tm; - char *str1,*str2; - int rc; - - si=sk_PKCS7_SIGNER_INFO_value(sk,i); - rc=PKCS7_dataVerify(cert_store,&cert_ctx,p7bio,p7,si); - if (rc <= 0) - goto err; - printf("signer info\n"); - if ((tm=get_signed_time(si)) != NULL) - { - BIO_printf(bio_out,"Signed time:"); - ASN1_UTCTIME_print(bio_out,tm); - ASN1_UTCTIME_free(tm); - BIO_printf(bio_out,"\n"); - } - if (get_signed_seq2string(si,&str1,&str2)) - { - BIO_printf(bio_out,"String 1 is %s\n",str1); - BIO_printf(bio_out,"String 2 is %s\n",str2); - } - - } - - X509_STORE_free(cert_store); - - printf("done\n"); - exit(0); -err: - ERR_load_crypto_strings(); - ERR_print_errors_fp(stderr); - exit(1); - } - -/* should be X509 * but we can just have them as char *. */ -int verify_callback(int ok, X509_STORE_CTX *ctx) - { - char buf[256]; - X509 *err_cert; - int err,depth; - - err_cert=X509_STORE_CTX_get_current_cert(ctx); - err= X509_STORE_CTX_get_error(ctx); - depth= X509_STORE_CTX_get_error_depth(ctx); - - X509_NAME_oneline(X509_get_subject_name(err_cert),buf,256); - BIO_printf(bio_err,"depth=%d %s\n",depth,buf); - if (!ok) - { - BIO_printf(bio_err,"verify error:num=%d:%s\n",err, - X509_verify_cert_error_string(err)); - if (depth < 6) - { - ok=1; - X509_STORE_CTX_set_error(ctx,X509_V_OK); - } - else - { - ok=0; - X509_STORE_CTX_set_error(ctx,X509_V_ERR_CERT_CHAIN_TOO_LONG); - } - } - switch (ctx->error) - { - case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: - X509_NAME_oneline(X509_get_issuer_name(ctx->current_cert),buf,256); - BIO_printf(bio_err,"issuer= %s\n",buf); - break; - case X509_V_ERR_CERT_NOT_YET_VALID: - case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD: - BIO_printf(bio_err,"notBefore="); - ASN1_UTCTIME_print(bio_err,X509_get_notBefore(ctx->current_cert)); - BIO_printf(bio_err,"\n"); - break; - case X509_V_ERR_CERT_HAS_EXPIRED: - case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD: - BIO_printf(bio_err,"notAfter="); - ASN1_UTCTIME_print(bio_err,X509_get_notAfter(ctx->current_cert)); - BIO_printf(bio_err,"\n"); - break; - } - BIO_printf(bio_err,"verify return:%d\n",ok); - return(ok); - } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pqueue/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/pqueue/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/pqueue/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pqueue/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -# -# OpenSSL/crypto/pqueue/Makefile -# - -DIR= pqueue -TOP= ../.. -CC= cc -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=pqueue.c -LIBOBJ=pqueue.o - -SRC= $(LIBSRC) - -EXHEADER= pqueue.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -pqueue.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h -pqueue.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -pqueue.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -pqueue.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -pqueue.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pqueue.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -pqueue.o: ../../include/openssl/symhacks.h ../cryptlib.h pqueue.c pqueue.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/pqueue/pqueue.h nodejs-0.11.15/deps/openssl/openssl/crypto/pqueue/pqueue.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/pqueue/pqueue.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/pqueue/pqueue.h 2015-01-20 21:22:17.000000000 +0000 @@ -64,6 +64,9 @@ #include <stdlib.h> #include <string.h> +#ifdef __cplusplus +extern "C" { +#endif typedef struct _pqueue *pqueue; typedef struct _pitem @@ -91,4 +94,7 @@ void pqueue_print(pqueue pq); int pqueue_size(pqueue pq); +#ifdef __cplusplus +} +#endif #endif /* ! HEADER_PQUEUE_H */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rand/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/rand/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/rand/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rand/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -# -# OpenSSL/crypto/rand/Makefile -# - -DIR= rand -TOP= ../.. -CC= cc -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= randtest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=md_rand.c randfile.c rand_lib.c rand_err.c rand_egd.c \ - rand_win.c rand_unix.c rand_os2.c rand_nw.c -LIBOBJ=md_rand.o randfile.o rand_lib.o rand_err.o rand_egd.o \ - rand_win.o rand_unix.o rand_os2.o rand_nw.o - -SRC= $(LIBSRC) - -EXHEADER= rand.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -md_rand.o: ../../e_os.h ../../include/openssl/asn1.h -md_rand.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -md_rand.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -md_rand.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -md_rand.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -md_rand.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -md_rand.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -md_rand.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -md_rand.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -md_rand.o: md_rand.c rand_lcl.h -rand_egd.o: ../../include/openssl/buffer.h ../../include/openssl/e_os2.h -rand_egd.o: ../../include/openssl/opensslconf.h -rand_egd.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -rand_egd.o: rand_egd.c -rand_err.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -rand_err.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rand_err.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rand_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rand_err.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -rand_err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rand_err.o: rand_err.c -rand_lib.o: ../../e_os.h ../../include/openssl/asn1.h -rand_lib.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -rand_lib.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rand_lib.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -rand_lib.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -rand_lib.o: ../../include/openssl/err.h ../../include/openssl/evp.h -rand_lib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rand_lib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -rand_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rand_lib.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -rand_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -rand_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rand_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -rand_lib.o: ../cryptlib.h rand_lib.c -rand_nw.o: ../../e_os.h ../../include/openssl/asn1.h -rand_nw.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -rand_nw.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rand_nw.o: ../../include/openssl/err.h ../../include/openssl/evp.h -rand_nw.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rand_nw.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -rand_nw.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rand_nw.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -rand_nw.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -rand_nw.o: ../../include/openssl/symhacks.h ../cryptlib.h rand_lcl.h rand_nw.c -rand_os2.o: ../../e_os.h ../../include/openssl/asn1.h -rand_os2.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -rand_os2.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rand_os2.o: ../../include/openssl/err.h ../../include/openssl/evp.h -rand_os2.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rand_os2.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -rand_os2.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rand_os2.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -rand_os2.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -rand_os2.o: ../../include/openssl/symhacks.h ../cryptlib.h rand_lcl.h -rand_os2.o: rand_os2.c -rand_unix.o: ../../e_os.h ../../include/openssl/asn1.h -rand_unix.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -rand_unix.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rand_unix.o: ../../include/openssl/err.h ../../include/openssl/evp.h -rand_unix.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rand_unix.o: ../../include/openssl/objects.h -rand_unix.o: ../../include/openssl/opensslconf.h -rand_unix.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rand_unix.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -rand_unix.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -rand_unix.o: ../../include/openssl/symhacks.h ../cryptlib.h rand_lcl.h -rand_unix.o: rand_unix.c -rand_win.o: ../../e_os.h ../../include/openssl/asn1.h -rand_win.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -rand_win.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rand_win.o: ../../include/openssl/err.h ../../include/openssl/evp.h -rand_win.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rand_win.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -rand_win.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rand_win.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -rand_win.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -rand_win.o: ../../include/openssl/symhacks.h ../cryptlib.h rand_lcl.h -rand_win.o: rand_win.c -randfile.o: ../../e_os.h ../../include/openssl/buffer.h -randfile.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -randfile.o: ../../include/openssl/opensslconf.h -randfile.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -randfile.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -randfile.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -randfile.o: randfile.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rand/md_rand.c nodejs-0.11.15/deps/openssl/openssl/crypto/rand/md_rand.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/rand/md_rand.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rand/md_rand.c 2015-01-20 21:22:17.000000000 +0000 @@ -159,7 +159,6 @@ static void ssleay_rand_cleanup(void); static void ssleay_rand_seed(const void *buf, int num); static void ssleay_rand_add(const void *buf, int num, double add_entropy); -static int ssleay_rand_bytes(unsigned char *buf, int num, int pseudo); static int ssleay_rand_nopseudo_bytes(unsigned char *buf, int num); static int ssleay_rand_pseudo_bytes(unsigned char *buf, int num); static int ssleay_rand_status(void); @@ -334,7 +333,7 @@ ssleay_rand_add(buf, num, (double)num); } -static int ssleay_rand_bytes(unsigned char *buf, int num, int pseudo) +int ssleay_rand_bytes(unsigned char *buf, int num, int pseudo, int lock) { static volatile int stirred_pool = 0; int i,j,k,st_num,st_idx; @@ -383,10 +382,7 @@ * are fed into the hash function and the results are kept in the * global 'md'. */ -#ifdef OPENSSL_FIPS - /* NB: in FIPS mode we are already under a lock */ - if (!FIPS_mode()) -#endif + if (lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND); /* prevent ssleay_rand_bytes() from trying to obtain the lock again */ @@ -466,9 +462,7 @@ /* before unlocking, we must clear 'crypto_lock_rand' */ crypto_lock_rand = 0; -#ifdef OPENSSL_FIPS - if (!FIPS_mode()) -#endif + if (lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND); while (num > 0) @@ -521,15 +515,11 @@ MD_Init(&m); MD_Update(&m,(unsigned char *)&(md_c[0]),sizeof(md_c)); MD_Update(&m,local_md,MD_DIGEST_LENGTH); -#ifdef OPENSSL_FIPS - if (!FIPS_mode()) -#endif + if (lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND); MD_Update(&m,md,MD_DIGEST_LENGTH); MD_Final(&m,md); -#ifdef OPENSSL_FIPS - if (!FIPS_mode()) -#endif + if (lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND); EVP_MD_CTX_cleanup(&m); @@ -548,14 +538,14 @@ static int ssleay_rand_nopseudo_bytes(unsigned char *buf, int num) { - return ssleay_rand_bytes(buf, num, 0); + return ssleay_rand_bytes(buf, num, 0, 1); } /* pseudo-random bytes that are guaranteed to be unique but not unpredictable */ static int ssleay_rand_pseudo_bytes(unsigned char *buf, int num) { - return ssleay_rand_bytes(buf, num, 1); + return ssleay_rand_bytes(buf, num, 1, 1); } static int ssleay_rand_status(void) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rand/randfile.c nodejs-0.11.15/deps/openssl/openssl/crypto/rand/randfile.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/rand/randfile.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rand/randfile.c 2015-01-20 21:22:17.000000000 +0000 @@ -79,6 +79,7 @@ #endif #ifndef OPENSSL_NO_POSIX_IO # include <sys/stat.h> +# include <fcntl.h> #endif #ifdef _WIN32 diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rand/rand_lcl.h nodejs-0.11.15/deps/openssl/openssl/crypto/rand/rand_lcl.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/rand/rand_lcl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rand/rand_lcl.h 2015-01-20 21:22:17.000000000 +0000 @@ -154,5 +154,6 @@ #define MD(a,b,c) EVP_Digest(a,b,c,NULL,EVP_md2(), NULL) #endif +int ssleay_rand_bytes(unsigned char *buf, int num, int pseudo, int lock); #endif diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rand/rand_lib.c nodejs-0.11.15/deps/openssl/openssl/crypto/rand/rand_lib.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/rand/rand_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rand/rand_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -68,6 +68,7 @@ #ifdef OPENSSL_FIPS #include <openssl/fips.h> #include <openssl/fips_rand.h> +#include "rand_lcl.h" #endif #ifndef OPENSSL_NO_ENGINE @@ -199,7 +200,7 @@ *pout = OPENSSL_malloc(min_len); if (!*pout) return 0; - if (RAND_SSLeay()->bytes(*pout, min_len) <= 0) + if (ssleay_rand_bytes(*pout, min_len, 0, 0) <= 0) { OPENSSL_free(*pout); *pout = NULL; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rc2/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/rc2/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/rc2/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rc2/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -# -# OpenSSL/crypto/rc2/Makefile -# - -DIR= rc2 -TOP= ../.. -CC= cc -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=rc2test.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=rc2_ecb.c rc2_skey.c rc2_cbc.c rc2cfb64.c rc2ofb64.c -LIBOBJ=rc2_ecb.o rc2_skey.o rc2_cbc.o rc2cfb64.o rc2ofb64.o - -SRC= $(LIBSRC) - -EXHEADER= rc2.h -HEADER= rc2_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -rc2_cbc.o: ../../include/openssl/opensslconf.h ../../include/openssl/rc2.h -rc2_cbc.o: rc2_cbc.c rc2_locl.h -rc2_ecb.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -rc2_ecb.o: ../../include/openssl/rc2.h rc2_ecb.c rc2_locl.h -rc2_skey.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rc2_skey.o: ../../include/openssl/opensslconf.h -rc2_skey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rc2_skey.o: ../../include/openssl/rc2.h ../../include/openssl/safestack.h -rc2_skey.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rc2_skey.o: rc2_locl.h rc2_skey.c -rc2cfb64.o: ../../include/openssl/opensslconf.h ../../include/openssl/rc2.h -rc2cfb64.o: rc2_locl.h rc2cfb64.c -rc2ofb64.o: ../../include/openssl/opensslconf.h ../../include/openssl/rc2.h -rc2ofb64.o: rc2_locl.h rc2ofb64.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rc4/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/rc4/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/rc4/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rc4/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -# -# OpenSSL/crypto/rc4/Makefile -# - -DIR= rc4 -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -CFLAG=-g -AR= ar r - -RC4_ENC=rc4_enc.o rc4_skey.o - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -TEST=rc4test.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=rc4_skey.c rc4_enc.c rc4_utl.c -LIBOBJ=$(RC4_ENC) rc4_utl.o - -SRC= $(LIBSRC) - -EXHEADER= rc4.h -HEADER= $(EXHEADER) rc4_locl.h - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -rc4-586.s: asm/rc4-586.pl ../perlasm/x86asm.pl - $(PERL) asm/rc4-586.pl $(PERLASM_SCHEME) $(CFLAGS) > $@ - -rc4-x86_64.s: asm/rc4-x86_64.pl - $(PERL) asm/rc4-x86_64.pl $(PERLASM_SCHEME) > $@ -rc4-md5-x86_64.s: asm/rc4-md5-x86_64.pl - $(PERL) asm/rc4-md5-x86_64.pl $(PERLASM_SCHEME) > $@ - -rc4-ia64.S: asm/rc4-ia64.pl - $(PERL) asm/rc4-ia64.pl $(CFLAGS) > $@ - -rc4-parisc.s: asm/rc4-parisc.pl - $(PERL) asm/rc4-parisc.pl $(PERLASM_SCHEME) $@ - -rc4-ia64.s: rc4-ia64.S - @case `awk '/^#define RC4_INT/{print$$NF}' $(TOP)/include/openssl/opensslconf.h` in \ - int) set -x; $(CC) $(CFLAGS) -DSZ=4 -E rc4-ia64.S > $@ ;; \ - char) set -x; $(CC) $(CFLAGS) -DSZ=1 -E rc4-ia64.S > $@ ;; \ - *) exit 1 ;; \ - esac - -# GNU make "catch all" -rc4-%.s: asm/rc4-%.pl; $(PERL) $< $(PERLASM_SCHEME) $@ - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -rc4_enc.o: ../../e_os.h ../../include/openssl/bio.h -rc4_enc.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rc4_enc.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rc4_enc.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rc4_enc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rc4_enc.o: ../../include/openssl/rc4.h ../../include/openssl/safestack.h -rc4_enc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rc4_enc.o: ../cryptlib.h rc4_enc.c rc4_locl.h -rc4_skey.o: ../../e_os.h ../../include/openssl/bio.h -rc4_skey.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rc4_skey.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rc4_skey.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rc4_skey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rc4_skey.o: ../../include/openssl/rc4.h ../../include/openssl/safestack.h -rc4_skey.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rc4_skey.o: ../cryptlib.h rc4_locl.h rc4_skey.c -rc4_utl.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rc4_utl.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -rc4_utl.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rc4.h -rc4_utl.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rc4_utl.o: ../../include/openssl/symhacks.h rc4_utl.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ripemd/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/ripemd/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/ripemd/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ripemd/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -# -# OpenSSL/crypto/ripemd/Makefile -# - -DIR= ripemd -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -RIP_ASM_OBJ= - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -TEST=rmdtest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=rmd_dgst.c rmd_one.c -LIBOBJ=rmd_dgst.o rmd_one.o $(RMD160_ASM_OBJ) - -SRC= $(LIBSRC) - -EXHEADER= ripemd.h -HEADER= rmd_locl.h rmdconst.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -rmd-586.s: asm/rmd-586.pl ../perlasm/x86asm.pl - $(PERL) asm/rmd-586.pl $(PERLASM_SCHEME) $(CFLAGS) > $@ - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -rmd_dgst.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rmd_dgst.o: ../../include/openssl/opensslconf.h -rmd_dgst.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rmd_dgst.o: ../../include/openssl/ripemd.h ../../include/openssl/safestack.h -rmd_dgst.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rmd_dgst.o: ../md32_common.h rmd_dgst.c rmd_locl.h rmdconst.h -rmd_one.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rmd_one.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -rmd_one.o: ../../include/openssl/ossl_typ.h ../../include/openssl/ripemd.h -rmd_one.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rmd_one.o: ../../include/openssl/symhacks.h rmd_one.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/Makefile nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/Makefile --- nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -212,7 +212,7 @@ rsa_oaep.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h rsa_oaep.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h rsa_oaep.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_oaep.o: ../cryptlib.h rsa_oaep.c +rsa_oaep.o: ../constant_time_locl.h ../cryptlib.h rsa_oaep.c rsa_pk1.o: ../../e_os.h ../../include/openssl/asn1.h rsa_pk1.o: ../../include/openssl/bio.h ../../include/openssl/bn.h rsa_pk1.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h @@ -221,7 +221,8 @@ rsa_pk1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h rsa_pk1.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h rsa_pk1.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rsa_pk1.o: ../../include/openssl/symhacks.h ../cryptlib.h rsa_pk1.c +rsa_pk1.o: ../../include/openssl/symhacks.h ../constant_time_locl.h +rsa_pk1.o: ../cryptlib.h rsa_pk1.c rsa_pmeth.o: ../../e_os.h ../../include/openssl/asn1.h rsa_pmeth.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h rsa_pmeth.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,308 +0,0 @@ -# -# OpenSSL/crypto/rsa/Makefile -# - -DIR= rsa -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=rsa_test.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= rsa_eay.c rsa_gen.c rsa_lib.c rsa_sign.c rsa_saos.c rsa_err.c \ - rsa_pk1.c rsa_ssl.c rsa_none.c rsa_oaep.c rsa_chk.c rsa_null.c \ - rsa_pss.c rsa_x931.c rsa_asn1.c rsa_depr.c rsa_ameth.c rsa_prn.c \ - rsa_pmeth.c rsa_crpt.c -LIBOBJ= rsa_eay.o rsa_gen.o rsa_lib.o rsa_sign.o rsa_saos.o rsa_err.o \ - rsa_pk1.o rsa_ssl.o rsa_none.o rsa_oaep.o rsa_chk.o rsa_null.o \ - rsa_pss.o rsa_x931.o rsa_asn1.o rsa_depr.o rsa_ameth.o rsa_prn.o \ - rsa_pmeth.o rsa_crpt.o - -SRC= $(LIBSRC) - -EXHEADER= rsa.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -rsa_ameth.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_ameth.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -rsa_ameth.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -rsa_ameth.o: ../../include/openssl/cms.h ../../include/openssl/crypto.h -rsa_ameth.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -rsa_ameth.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -rsa_ameth.o: ../../include/openssl/err.h ../../include/openssl/evp.h -rsa_ameth.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rsa_ameth.o: ../../include/openssl/objects.h -rsa_ameth.o: ../../include/openssl/opensslconf.h -rsa_ameth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_ameth.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -rsa_ameth.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -rsa_ameth.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_ameth.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -rsa_ameth.o: ../asn1/asn1_locl.h ../cryptlib.h rsa_ameth.c -rsa_asn1.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_asn1.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -rsa_asn1.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -rsa_asn1.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rsa_asn1.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -rsa_asn1.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -rsa_asn1.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -rsa_asn1.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -rsa_asn1.o: ../../include/openssl/opensslconf.h -rsa_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_asn1.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -rsa_asn1.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -rsa_asn1.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_asn1.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -rsa_asn1.o: ../cryptlib.h rsa_asn1.c -rsa_chk.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -rsa_chk.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h -rsa_chk.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_chk.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rsa_chk.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_chk.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -rsa_chk.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_chk.o: rsa_chk.c -rsa_crpt.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_crpt.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_crpt.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_crpt.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -rsa_crpt.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -rsa_crpt.o: ../../include/openssl/engine.h ../../include/openssl/err.h -rsa_crpt.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -rsa_crpt.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -rsa_crpt.o: ../../include/openssl/opensslconf.h -rsa_crpt.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_crpt.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -rsa_crpt.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -rsa_crpt.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -rsa_crpt.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -rsa_crpt.o: ../../include/openssl/x509_vfy.h ../cryptlib.h rsa_crpt.c -rsa_depr.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_depr.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_depr.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_depr.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_depr.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rsa_depr.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_depr.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -rsa_depr.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_depr.o: ../cryptlib.h rsa_depr.c -rsa_eay.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_eay.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_eay.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_eay.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_eay.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rsa_eay.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_eay.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h -rsa_eay.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rsa_eay.o: ../../include/openssl/symhacks.h ../cryptlib.h rsa_eay.c -rsa_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -rsa_err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rsa_err.o: ../../include/openssl/err.h ../../include/openssl/lhash.h -rsa_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -rsa_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rsa.h -rsa_err.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rsa_err.o: ../../include/openssl/symhacks.h rsa_err.c -rsa_gen.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_gen.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_gen.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_gen.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_gen.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rsa_gen.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_gen.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -rsa_gen.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_gen.o: ../cryptlib.h rsa_gen.c -rsa_lib.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_lib.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_lib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -rsa_lib.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -rsa_lib.o: ../../include/openssl/engine.h ../../include/openssl/err.h -rsa_lib.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -rsa_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -rsa_lib.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -rsa_lib.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -rsa_lib.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h -rsa_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -rsa_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -rsa_lib.o: ../cryptlib.h rsa_lib.c -rsa_none.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_none.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_none.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_none.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_none.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rsa_none.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_none.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h -rsa_none.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rsa_none.o: ../../include/openssl/symhacks.h ../cryptlib.h rsa_none.c -rsa_null.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_null.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_null.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_null.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_null.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rsa_null.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_null.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h -rsa_null.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rsa_null.o: ../../include/openssl/symhacks.h ../cryptlib.h rsa_null.c -rsa_oaep.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_oaep.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_oaep.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_oaep.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_oaep.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -rsa_oaep.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -rsa_oaep.o: ../../include/openssl/opensslconf.h -rsa_oaep.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_oaep.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h -rsa_oaep.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -rsa_oaep.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_oaep.o: ../cryptlib.h rsa_oaep.c -rsa_pk1.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_pk1.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_pk1.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_pk1.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_pk1.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rsa_pk1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_pk1.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h -rsa_pk1.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rsa_pk1.o: ../../include/openssl/symhacks.h ../cryptlib.h rsa_pk1.c -rsa_pmeth.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_pmeth.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -rsa_pmeth.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -rsa_pmeth.o: ../../include/openssl/cms.h ../../include/openssl/crypto.h -rsa_pmeth.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -rsa_pmeth.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -rsa_pmeth.o: ../../include/openssl/err.h ../../include/openssl/evp.h -rsa_pmeth.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rsa_pmeth.o: ../../include/openssl/objects.h -rsa_pmeth.o: ../../include/openssl/opensslconf.h -rsa_pmeth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_pmeth.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -rsa_pmeth.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -rsa_pmeth.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_pmeth.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -rsa_pmeth.o: ../cryptlib.h ../evp/evp_locl.h rsa_locl.h rsa_pmeth.c -rsa_prn.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_prn.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -rsa_prn.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -rsa_prn.o: ../../include/openssl/err.h ../../include/openssl/evp.h -rsa_prn.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rsa_prn.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -rsa_prn.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_prn.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -rsa_prn.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_prn.o: ../cryptlib.h rsa_prn.c -rsa_pss.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_pss.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_pss.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_pss.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_pss.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -rsa_pss.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -rsa_pss.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -rsa_pss.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -rsa_pss.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -rsa_pss.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -rsa_pss.o: ../../include/openssl/symhacks.h ../cryptlib.h rsa_pss.c -rsa_saos.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_saos.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_saos.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_saos.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -rsa_saos.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -rsa_saos.o: ../../include/openssl/err.h ../../include/openssl/evp.h -rsa_saos.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rsa_saos.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -rsa_saos.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_saos.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -rsa_saos.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -rsa_saos.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_saos.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -rsa_saos.o: ../cryptlib.h rsa_saos.c -rsa_sign.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_sign.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_sign.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_sign.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -rsa_sign.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -rsa_sign.o: ../../include/openssl/err.h ../../include/openssl/evp.h -rsa_sign.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rsa_sign.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -rsa_sign.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_sign.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -rsa_sign.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -rsa_sign.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -rsa_sign.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -rsa_sign.o: ../cryptlib.h rsa_locl.h rsa_sign.c -rsa_ssl.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_ssl.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_ssl.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_ssl.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_ssl.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -rsa_ssl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_ssl.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h -rsa_ssl.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rsa_ssl.o: ../../include/openssl/symhacks.h ../cryptlib.h rsa_ssl.c -rsa_x931.o: ../../e_os.h ../../include/openssl/asn1.h -rsa_x931.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -rsa_x931.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -rsa_x931.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -rsa_x931.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -rsa_x931.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -rsa_x931.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -rsa_x931.o: ../../include/openssl/rand.h ../../include/openssl/rsa.h -rsa_x931.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -rsa_x931.o: ../../include/openssl/symhacks.h ../cryptlib.h rsa_x931.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_ameth.c nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_ameth.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_ameth.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_ameth.c 2015-01-20 21:22:17.000000000 +0000 @@ -358,7 +358,7 @@ if (i2a_ASN1_INTEGER(bp, pss->saltLength) <= 0) goto err; } - else if (BIO_puts(bp, "0x14 (default)") <= 0) + else if (BIO_puts(bp, "14 (default)") <= 0) goto err; BIO_puts(bp, "\n"); diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_eay.c nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_eay.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_eay.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_eay.c 2015-01-20 21:22:17.000000000 +0000 @@ -459,7 +459,7 @@ if (padding == RSA_X931_PADDING) { BN_sub(f, rsa->n, ret); - if (BN_cmp(ret, f)) + if (BN_cmp(ret, f) > 0) res = f; else res = ret; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_err.c nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_err.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_err.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_err.c 2015-01-20 21:22:17.000000000 +0000 @@ -175,6 +175,7 @@ {ERR_REASON(RSA_R_OPERATION_NOT_ALLOWED_IN_FIPS_MODE),"operation not allowed in fips mode"}, {ERR_REASON(RSA_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE),"operation not supported for this keytype"}, {ERR_REASON(RSA_R_PADDING_CHECK_FAILED) ,"padding check failed"}, +{ERR_REASON(RSA_R_PKCS_DECODING_ERROR) ,"pkcs decoding error"}, {ERR_REASON(RSA_R_P_NOT_PRIME) ,"p not prime"}, {ERR_REASON(RSA_R_Q_NOT_PRIME) ,"q not prime"}, {ERR_REASON(RSA_R_RSA_OPERATIONS_NOT_SUPPORTED),"rsa operations not supported"}, diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa.h nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa.h 2015-01-20 21:22:17.000000000 +0000 @@ -559,6 +559,7 @@ #define RSA_R_OPERATION_NOT_ALLOWED_IN_FIPS_MODE 158 #define RSA_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE 148 #define RSA_R_PADDING_CHECK_FAILED 114 +#define RSA_R_PKCS_DECODING_ERROR 159 #define RSA_R_P_NOT_PRIME 128 #define RSA_R_Q_NOT_PRIME 129 #define RSA_R_RSA_OPERATIONS_NOT_SUPPORTED 130 diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_oaep.c nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_oaep.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_oaep.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_oaep.c 2015-01-20 21:22:17.000000000 +0000 @@ -18,6 +18,7 @@ * an equivalent notion. */ +#include "constant_time_locl.h" #if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA1) #include <stdio.h> @@ -95,92 +96,117 @@ const unsigned char *from, int flen, int num, const unsigned char *param, int plen) { - int i, dblen, mlen = -1; - const unsigned char *maskeddb; - int lzero; - unsigned char *db = NULL, seed[SHA_DIGEST_LENGTH], phash[SHA_DIGEST_LENGTH]; - unsigned char *padded_from; - int bad = 0; - - if (--num < 2 * SHA_DIGEST_LENGTH + 1) - /* 'num' is the length of the modulus, i.e. does not depend on the - * particular ciphertext. */ + int i, dblen, mlen = -1, one_index = 0, msg_index; + unsigned int good, found_one_byte; + const unsigned char *maskedseed, *maskeddb; + /* |em| is the encoded message, zero-padded to exactly |num| bytes: + * em = Y || maskedSeed || maskedDB */ + unsigned char *db = NULL, *em = NULL, seed[EVP_MAX_MD_SIZE], + phash[EVP_MAX_MD_SIZE]; + + if (tlen <= 0 || flen <= 0) + return -1; + + /* + * |num| is the length of the modulus; |flen| is the length of the + * encoded message. Therefore, for any |from| that was obtained by + * decrypting a ciphertext, we must have |flen| <= |num|. Similarly, + * num < 2 * SHA_DIGEST_LENGTH + 2 must hold for the modulus + * irrespective of the ciphertext, see PKCS #1 v2.2, section 7.1.2. + * This does not leak any side-channel information. + */ + if (num < flen || num < 2 * SHA_DIGEST_LENGTH + 2) goto decoding_err; - lzero = num - flen; - if (lzero < 0) - { - /* signalling this error immediately after detection might allow - * for side-channel attacks (e.g. timing if 'plen' is huge - * -- cf. James H. Manger, "A Chosen Ciphertext Attack on RSA Optimal - * Asymmetric Encryption Padding (OAEP) [...]", CRYPTO 2001), - * so we use a 'bad' flag */ - bad = 1; - lzero = 0; - flen = num; /* don't overflow the memcpy to padded_from */ - } - - dblen = num - SHA_DIGEST_LENGTH; - db = OPENSSL_malloc(dblen + num); - if (db == NULL) + dblen = num - SHA_DIGEST_LENGTH - 1; + db = OPENSSL_malloc(dblen); + em = OPENSSL_malloc(num); + if (db == NULL || em == NULL) { RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP, ERR_R_MALLOC_FAILURE); - return -1; + goto cleanup; } - /* Always do this zero-padding copy (even when lzero == 0) - * to avoid leaking timing info about the value of lzero. */ - padded_from = db + dblen; - memset(padded_from, 0, lzero); - memcpy(padded_from + lzero, from, flen); + /* + * Always do this zero-padding copy (even when num == flen) to avoid + * leaking that information. The copy still leaks some side-channel + * information, but it's impossible to have a fixed memory access + * pattern since we can't read out of the bounds of |from|. + * + * TODO(emilia): Consider porting BN_bn2bin_padded from BoringSSL. + */ + memset(em, 0, num); + memcpy(em + num - flen, from, flen); + + /* + * The first byte must be zero, however we must not leak if this is + * true. See James H. Manger, "A Chosen Ciphertext Attack on RSA + * Optimal Asymmetric Encryption Padding (OAEP) [...]", CRYPTO 2001). + */ + good = constant_time_is_zero(em[0]); - maskeddb = padded_from + SHA_DIGEST_LENGTH; + maskedseed = em + 1; + maskeddb = em + 1 + SHA_DIGEST_LENGTH; if (MGF1(seed, SHA_DIGEST_LENGTH, maskeddb, dblen)) - return -1; + goto cleanup; for (i = 0; i < SHA_DIGEST_LENGTH; i++) - seed[i] ^= padded_from[i]; - + seed[i] ^= maskedseed[i]; + if (MGF1(db, dblen, seed, SHA_DIGEST_LENGTH)) - return -1; + goto cleanup; for (i = 0; i < dblen; i++) db[i] ^= maskeddb[i]; if (!EVP_Digest((void *)param, plen, phash, NULL, EVP_sha1(), NULL)) - return -1; + goto cleanup; - if (CRYPTO_memcmp(db, phash, SHA_DIGEST_LENGTH) != 0 || bad) + good &= constant_time_is_zero(CRYPTO_memcmp(db, phash, SHA_DIGEST_LENGTH)); + + found_one_byte = 0; + for (i = SHA_DIGEST_LENGTH; i < dblen; i++) + { + /* Padding consists of a number of 0-bytes, followed by a 1. */ + unsigned int equals1 = constant_time_eq(db[i], 1); + unsigned int equals0 = constant_time_is_zero(db[i]); + one_index = constant_time_select_int(~found_one_byte & equals1, + i, one_index); + found_one_byte |= equals1; + good &= (found_one_byte | equals0); + } + + good &= found_one_byte; + + /* + * At this point |good| is zero unless the plaintext was valid, + * so plaintext-awareness ensures timing side-channels are no longer a + * concern. + */ + if (!good) goto decoding_err; + + msg_index = one_index + 1; + mlen = dblen - msg_index; + + if (tlen < mlen) + { + RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP, RSA_R_DATA_TOO_LARGE); + mlen = -1; + } else { - for (i = SHA_DIGEST_LENGTH; i < dblen; i++) - if (db[i] != 0x00) - break; - if (i == dblen || db[i] != 0x01) - goto decoding_err; - else - { - /* everything looks OK */ - - mlen = dblen - ++i; - if (tlen < mlen) - { - RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP, RSA_R_DATA_TOO_LARGE); - mlen = -1; - } - else - memcpy(to, db + i, mlen); - } + memcpy(to, db + msg_index, mlen); + goto cleanup; } - OPENSSL_free(db); - return mlen; decoding_err: - /* to avoid chosen ciphertext attacks, the error message should not reveal - * which kind of decoding error happened */ + /* To avoid chosen ciphertext attacks, the error message should not reveal + * which kind of decoding error happened. */ RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP, RSA_R_OAEP_DECODING_ERROR); +cleanup: if (db != NULL) OPENSSL_free(db); - return -1; + if (em != NULL) OPENSSL_free(em); + return mlen; } int PKCS1_MGF1(unsigned char *mask, long len, diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_pk1.c nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_pk1.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_pk1.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_pk1.c 2015-01-20 21:22:17.000000000 +0000 @@ -56,6 +56,8 @@ * [including the GNU Public Licence.] */ +#include "constant_time_locl.h" + #include <stdio.h> #include "cryptlib.h" #include <openssl/bn.h> @@ -181,44 +183,87 @@ int RSA_padding_check_PKCS1_type_2(unsigned char *to, int tlen, const unsigned char *from, int flen, int num) { - int i,j; - const unsigned char *p; - - p=from; - if ((num != (flen+1)) || (*(p++) != 02)) - { - RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2,RSA_R_BLOCK_TYPE_IS_NOT_02); - return(-1); - } -#ifdef PKCS1_CHECK - return(num-11); -#endif - - /* scan over padding data */ - j=flen-1; /* one for type. */ - for (i=0; i<j; i++) - if (*(p++) == 0) break; - - if (i == j) - { - RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2,RSA_R_NULL_BEFORE_BLOCK_MISSING); - return(-1); - } - - if (i < 8) - { - RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2,RSA_R_BAD_PAD_BYTE_COUNT); - return(-1); - } - i++; /* Skip over the '\0' */ - j-=i; - if (j > tlen) - { - RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2,RSA_R_DATA_TOO_LARGE); - return(-1); - } - memcpy(to,p,(unsigned int)j); - - return(j); + int i; + /* |em| is the encoded message, zero-padded to exactly |num| bytes */ + unsigned char *em = NULL; + unsigned int good, found_zero_byte; + int zero_index = 0, msg_index, mlen = -1; + + if (tlen < 0 || flen < 0) + return -1; + + /* PKCS#1 v1.5 decryption. See "PKCS #1 v2.2: RSA Cryptography + * Standard", section 7.2.2. */ + + if (flen > num) + goto err; + + if (num < 11) + goto err; + + em = OPENSSL_malloc(num); + if (em == NULL) + { + RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2, ERR_R_MALLOC_FAILURE); + return -1; + } + memset(em, 0, num); + /* + * Always do this zero-padding copy (even when num == flen) to avoid + * leaking that information. The copy still leaks some side-channel + * information, but it's impossible to have a fixed memory access + * pattern since we can't read out of the bounds of |from|. + * + * TODO(emilia): Consider porting BN_bn2bin_padded from BoringSSL. + */ + memcpy(em + num - flen, from, flen); + + good = constant_time_is_zero(em[0]); + good &= constant_time_eq(em[1], 2); + + found_zero_byte = 0; + for (i = 2; i < num; i++) + { + unsigned int equals0 = constant_time_is_zero(em[i]); + zero_index = constant_time_select_int(~found_zero_byte & equals0, i, zero_index); + found_zero_byte |= equals0; + } + + /* + * PS must be at least 8 bytes long, and it starts two bytes into |em|. + * If we never found a 0-byte, then |zero_index| is 0 and the check + * also fails. + */ + good &= constant_time_ge((unsigned int)(zero_index), 2 + 8); + + /* Skip the zero byte. This is incorrect if we never found a zero-byte + * but in this case we also do not copy the message out. */ + msg_index = zero_index + 1; + mlen = num - msg_index; + + /* For good measure, do this check in constant time as well; it could + * leak something if |tlen| was assuming valid padding. */ + good &= constant_time_ge((unsigned int)(tlen), (unsigned int)(mlen)); + + /* + * We can't continue in constant-time because we need to copy the result + * and we cannot fake its length. This unavoidably leaks timing + * information at the API boundary. + * TODO(emilia): this could be addressed at the call site, + * see BoringSSL commit 0aa0767340baf925bda4804882aab0cb974b2d26. + */ + if (!good) + { + mlen = -1; + goto err; + } + + memcpy(to, em + msg_index, mlen); + +err: + if (em != NULL) + OPENSSL_free(em); + if (mlen == -1) + RSAerr(RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2, RSA_R_PKCS_DECODING_ERROR); + return mlen; } - diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_sign.c nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_sign.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/rsa/rsa_sign.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/rsa/rsa_sign.c 2015-01-20 21:22:17.000000000 +0000 @@ -151,6 +151,25 @@ return(ret); } +/* + * Check DigestInfo structure does not contain extraneous data by reencoding + * using DER and checking encoding against original. + */ +static int rsa_check_digestinfo(X509_SIG *sig, const unsigned char *dinfo, int dinfolen) + { + unsigned char *der = NULL; + int derlen; + int ret = 0; + derlen = i2d_X509_SIG(sig, &der); + if (derlen <= 0) + return 0; + if (derlen == dinfolen && !memcmp(dinfo, der, derlen)) + ret = 1; + OPENSSL_cleanse(der, derlen); + OPENSSL_free(der); + return ret; + } + int int_rsa_verify(int dtype, const unsigned char *m, unsigned int m_len, unsigned char *rm, size_t *prm_len, @@ -228,7 +247,7 @@ if (sig == NULL) goto err; /* Excess data can be used to create forgeries */ - if(p != s+i) + if(p != s+i || !rsa_check_digestinfo(sig, s, i)) { RSAerr(RSA_F_INT_RSA_VERIFY,RSA_R_BAD_SIGNATURE); goto err; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/seed/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/seed/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/seed/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/seed/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -# -# crypto/seed/Makefile -# - -DIR= seed -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=seed.c seed_ecb.c seed_cbc.c seed_cfb.c seed_ofb.c -LIBOBJ=seed.o seed_ecb.o seed_cbc.o seed_cfb.o seed_ofb.o - -SRC= $(LIBSRC) - -EXHEADER= seed.h -HEADER= seed_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -seed.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -seed.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -seed.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -seed.o: ../../include/openssl/seed.h ../../include/openssl/stack.h -seed.o: ../../include/openssl/symhacks.h seed.c seed_locl.h -seed_cbc.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -seed_cbc.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -seed_cbc.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -seed_cbc.o: ../../include/openssl/safestack.h ../../include/openssl/seed.h -seed_cbc.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -seed_cbc.o: seed_cbc.c -seed_cfb.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -seed_cfb.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -seed_cfb.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -seed_cfb.o: ../../include/openssl/safestack.h ../../include/openssl/seed.h -seed_cfb.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -seed_cfb.o: seed_cfb.c -seed_ecb.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -seed_ecb.o: ../../include/openssl/opensslconf.h -seed_ecb.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -seed_ecb.o: ../../include/openssl/safestack.h ../../include/openssl/seed.h -seed_ecb.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -seed_ecb.o: seed_ecb.c -seed_ofb.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -seed_ofb.o: ../../include/openssl/modes.h ../../include/openssl/opensslconf.h -seed_ofb.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -seed_ofb.o: ../../include/openssl/safestack.h ../../include/openssl/seed.h -seed_ofb.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -seed_ofb.o: seed_ofb.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/sha/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/sha/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/sha/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/sha/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,168 +0,0 @@ -# -# OpenSSL/crypto/sha/Makefile -# - -DIR= sha -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -SHA1_ASM_OBJ= - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -TEST=shatest.c sha1test.c sha256t.c sha512t.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=sha_dgst.c sha1dgst.c sha_one.c sha1_one.c sha256.c sha512.c -LIBOBJ=sha_dgst.o sha1dgst.o sha_one.o sha1_one.o sha256.o sha512.o $(SHA1_ASM_OBJ) - -SRC= $(LIBSRC) - -EXHEADER= sha.h -HEADER= sha_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -sha1-586.s: asm/sha1-586.pl ../perlasm/x86asm.pl - $(PERL) asm/sha1-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ -sha256-586.s: asm/sha256-586.pl ../perlasm/x86asm.pl - $(PERL) asm/sha256-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ -sha512-586.s: asm/sha512-586.pl ../perlasm/x86asm.pl - $(PERL) asm/sha512-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ - -sha1-ia64.s: asm/sha1-ia64.pl - (cd asm; $(PERL) sha1-ia64.pl ../$@ $(CFLAGS)) -sha256-ia64.s: asm/sha512-ia64.pl - (cd asm; $(PERL) sha512-ia64.pl ../$@ $(CFLAGS)) -sha512-ia64.s: asm/sha512-ia64.pl - (cd asm; $(PERL) sha512-ia64.pl ../$@ $(CFLAGS)) - -sha256-armv4.S: asm/sha256-armv4.pl - $(PERL) $< $(PERLASM_SCHEME) $@ - -sha1-alpha.s: asm/sha1-alpha.pl - (preproc=/tmp/$$$$.$@; trap "rm $$preproc" INT; \ - $(PERL) asm/sha1-alpha.pl > $$preproc && \ - $(CC) -E $$preproc > $@ && rm $$preproc) - -# Solaris make has to be explicitly told -sha1-x86_64.s: asm/sha1-x86_64.pl; $(PERL) asm/sha1-x86_64.pl $(PERLASM_SCHEME) > $@ -sha256-x86_64.s:asm/sha512-x86_64.pl; $(PERL) asm/sha512-x86_64.pl $(PERLASM_SCHEME) $@ -sha512-x86_64.s:asm/sha512-x86_64.pl; $(PERL) asm/sha512-x86_64.pl $(PERLASM_SCHEME) $@ -sha1-sparcv9.s: asm/sha1-sparcv9.pl; $(PERL) asm/sha1-sparcv9.pl $@ $(CFLAGS) -sha256-sparcv9.s:asm/sha512-sparcv9.pl; $(PERL) asm/sha512-sparcv9.pl $@ $(CFLAGS) -sha512-sparcv9.s:asm/sha512-sparcv9.pl; $(PERL) asm/sha512-sparcv9.pl $@ $(CFLAGS) - -sha1-ppc.s: asm/sha1-ppc.pl; $(PERL) asm/sha1-ppc.pl $(PERLASM_SCHEME) $@ -sha256-ppc.s: asm/sha512-ppc.pl; $(PERL) asm/sha512-ppc.pl $(PERLASM_SCHEME) $@ -sha512-ppc.s: asm/sha512-ppc.pl; $(PERL) asm/sha512-ppc.pl $(PERLASM_SCHEME) $@ - -sha1-parisc.s: asm/sha1-parisc.pl; $(PERL) asm/sha1-parisc.pl $(PERLASM_SCHEME) $@ -sha256-parisc.s:asm/sha512-parisc.pl; $(PERL) asm/sha512-parisc.pl $(PERLASM_SCHEME) $@ -sha512-parisc.s:asm/sha512-parisc.pl; $(PERL) asm/sha512-parisc.pl $(PERLASM_SCHEME) $@ - -sha1-mips.S: asm/sha1-mips.pl; $(PERL) asm/sha1-mips.pl $(PERLASM_SCHEME) $@ -sha256-mips.S: asm/sha512-mips.pl; $(PERL) asm/sha512-mips.pl $(PERLASM_SCHEME) $@ -sha512-mips.S: asm/sha512-mips.pl; $(PERL) asm/sha512-mips.pl $(PERLASM_SCHEME) $@ - -# GNU make "catch all" -sha1-%.S: asm/sha1-%.pl; $(PERL) $< $(PERLASM_SCHEME) $@ -sha256-%.S: asm/sha512-%.pl; $(PERL) $< $(PERLASM_SCHEME) $@ -sha512-%.S: asm/sha512-%.pl; $(PERL) $< $(PERLASM_SCHEME) $@ - -sha1-armv4-large.o: sha1-armv4-large.S -sha256-armv4.o: sha256-armv4.S -sha512-armv4.o: sha512-armv4.S - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -sha1_one.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -sha1_one.o: ../../include/openssl/opensslconf.h -sha1_one.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -sha1_one.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -sha1_one.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -sha1_one.o: sha1_one.c -sha1dgst.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -sha1dgst.o: ../../include/openssl/opensslconf.h -sha1dgst.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -sha1dgst.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -sha1dgst.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -sha1dgst.o: ../md32_common.h sha1dgst.c sha_locl.h -sha256.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -sha256.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -sha256.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -sha256.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -sha256.o: ../../include/openssl/symhacks.h ../md32_common.h sha256.c -sha512.o: ../../e_os.h ../../include/openssl/bio.h -sha512.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -sha512.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -sha512.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -sha512.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -sha512.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -sha512.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -sha512.o: ../cryptlib.h sha512.c -sha_dgst.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -sha_dgst.o: ../../include/openssl/opensslconf.h -sha_dgst.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -sha_dgst.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -sha_dgst.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -sha_dgst.o: ../md32_common.h sha_dgst.c sha_locl.h -sha_one.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -sha_one.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -sha_one.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -sha_one.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -sha_one.o: ../../include/openssl/symhacks.h sha_one.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/srp/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/srp/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/srp/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/srp/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,98 +0,0 @@ -DIR= srp -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -INSTALL_PREFIX= -OPENSSLDIR= /usr/local/ssl -INSTALLTOP=/usr/local/ssl -MAKE= make -f Makefile.ssl -MAKEDEPPROG= makedepend -MAKEDEPEND= $(TOP)/util/domd $(TOP) -MD $(MAKEDEPPROG) -MAKEFILE= Makefile.ssl -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST=srptest.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=srp_lib.c srp_vfy.c -LIBOBJ=srp_lib.o srp_vfy.o - -SRC= $(LIBSRC) - -EXHEADER= srp.h -HEADER= $(EXHEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -srptest: top srptest.c $(LIB) - $(CC) $(CFLAGS) -Wall -Werror -g -o srptest srptest.c $(LIB) - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -srp_lib.o: ../../e_os.h ../../include/openssl/asn1.h -srp_lib.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -srp_lib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -srp_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -srp_lib.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -srp_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -srp_lib.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -srp_lib.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -srp_lib.o: ../../include/openssl/sha.h ../../include/openssl/srp.h -srp_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -srp_lib.o: ../cryptlib.h srp_grps.h srp_lcl.h srp_lib.c -srp_vfy.o: ../../e_os.h ../../include/openssl/asn1.h -srp_vfy.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -srp_vfy.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -srp_vfy.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -srp_vfy.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -srp_vfy.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -srp_vfy.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -srp_vfy.o: ../../include/openssl/ossl_typ.h ../../include/openssl/rand.h -srp_vfy.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -srp_vfy.o: ../../include/openssl/srp.h ../../include/openssl/stack.h -srp_vfy.o: ../../include/openssl/symhacks.h ../../include/openssl/txt_db.h -srp_vfy.o: ../cryptlib.h srp_lcl.h srp_vfy.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/srp/srp_lib.c nodejs-0.11.15/deps/openssl/openssl/crypto/srp/srp_lib.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/srp/srp_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/srp/srp_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -89,6 +89,9 @@ int longg ; int longN = BN_num_bytes(N); + if (BN_ucmp(g, N) >= 0) + return NULL; + if ((tmp = OPENSSL_malloc(longN)) == NULL) return NULL; BN_bn2bin(N,tmp) ; @@ -121,6 +124,9 @@ if ((A == NULL) ||(B == NULL) || (N == NULL)) return NULL; + if (BN_ucmp(A, N) >= 0 || BN_ucmp(B, N) >= 0) + return NULL; + longN= BN_num_bytes(N); if ((cAB = OPENSSL_malloc(2*longN)) == NULL) diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/srp/srp_vfy.c nodejs-0.11.15/deps/openssl/openssl/crypto/srp/srp_vfy.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/srp/srp_vfy.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/srp/srp_vfy.c 2015-01-20 21:22:17.000000000 +0000 @@ -93,6 +93,9 @@ else a[i] = loc - b64table; ++i; } + /* if nothing valid to process we have a zero length response */ + if (i == 0) + return 0; size = i; i = size - 1; j = size; diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/stack/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/stack/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/stack/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/stack/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -# -# OpenSSL/crypto/stack/Makefile -# - -DIR= stack -TOP= ../.. -CC= cc -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=stack.c -LIBOBJ=stack.o - -SRC= $(LIBSRC) - -EXHEADER= stack.h safestack.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -stack.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -stack.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -stack.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -stack.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -stack.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -stack.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -stack.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -stack.o: ../../include/openssl/symhacks.h ../cryptlib.h stack.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/stack/safestack.h nodejs-0.11.15/deps/openssl/openssl/crypto/stack/safestack.h --- nodejs-0.11.13/deps/openssl/openssl/crypto/stack/safestack.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/stack/safestack.h 2015-01-20 21:22:17.000000000 +0000 @@ -57,6 +57,10 @@ #include <openssl/stack.h> +#ifdef __cplusplus +extern "C" { +#endif + #ifndef CHECKED_PTR_OF #define CHECKED_PTR_OF(type, p) \ ((void*) (1 ? p : (type*)0)) @@ -2660,4 +2664,8 @@ #define lh_SSL_SESSION_free(lh) LHM_lh_free(SSL_SESSION,lh) /* End of util/mkstack.pl block, you may now edit :-) */ + +#ifdef __cplusplus +} +#endif #endif /* !defined HEADER_SAFESTACK_H */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ts/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/ts/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/ts/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ts/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,269 +0,0 @@ -# -# SSLeay/crypto/ts/Makefile -# - -DIR= ts -TOP= ../.. -CC= cc -INCLUDES= -I.. -I../../include -CFLAG = -g -INSTALL_PREFIX= -OPENSSLDIR= /usr/local/ssl -INSTALLTOP=/usr/local/ssl -MAKEDEPPROG= makedepend -MAKEDEPEND= $(TOP)/util/domd $(TOP) -MD $(MAKEDEPPROG) -MAKEFILE= Makefile -AR= ar r - -PEX_LIBS= -EX_LIBS= - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL= Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= ts_err.c ts_req_utils.c ts_req_print.c ts_rsp_utils.c ts_rsp_print.c \ - ts_rsp_sign.c ts_rsp_verify.c ts_verify_ctx.c ts_lib.c ts_conf.c \ - ts_asn1.c -LIBOBJ= ts_err.o ts_req_utils.o ts_req_print.o ts_rsp_utils.o ts_rsp_print.o \ - ts_rsp_sign.o ts_rsp_verify.o ts_verify_ctx.o ts_lib.o ts_conf.o \ - ts_asn1.o - -SRC= $(LIBSRC) - -EXHEADER= ts.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -test: - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff enc dec sign verify - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -ts_asn1.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -ts_asn1.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ts_asn1.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ts_asn1.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -ts_asn1.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ts_asn1.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ts_asn1.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ts_asn1.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ts_asn1.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -ts_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ts_asn1.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -ts_asn1.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -ts_asn1.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ts_asn1.o: ../../include/openssl/ts.h ../../include/openssl/x509.h -ts_asn1.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -ts_asn1.o: ts_asn1.c -ts_conf.o: ../../e_os.h ../../include/openssl/asn1.h -ts_conf.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ts_conf.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ts_conf.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -ts_conf.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ts_conf.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ts_conf.o: ../../include/openssl/engine.h ../../include/openssl/err.h -ts_conf.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ts_conf.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ts_conf.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -ts_conf.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -ts_conf.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs7.h -ts_conf.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -ts_conf.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ts_conf.o: ../../include/openssl/symhacks.h ../../include/openssl/ts.h -ts_conf.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ts_conf.o: ../../include/openssl/x509v3.h ../cryptlib.h ts_conf.c -ts_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -ts_err.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -ts_err.o: ../../include/openssl/crypto.h ../../include/openssl/dh.h -ts_err.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -ts_err.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ts_err.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -ts_err.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ts_err.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ts_err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -ts_err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -ts_err.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -ts_err.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ts_err.o: ../../include/openssl/symhacks.h ../../include/openssl/ts.h -ts_err.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ts_err.o: ../../include/openssl/x509v3.h ts_err.c -ts_lib.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -ts_lib.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -ts_lib.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ts_lib.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -ts_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ts_lib.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ts_lib.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ts_lib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ts_lib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -ts_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ts_lib.o: ../../include/openssl/pkcs7.h ../../include/openssl/rsa.h -ts_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -ts_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ts_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ts_lib.o: ../../include/openssl/x509v3.h ../cryptlib.h ts.h ts_lib.c -ts_req_print.o: ../../e_os.h ../../include/openssl/asn1.h -ts_req_print.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -ts_req_print.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -ts_req_print.o: ../../include/openssl/crypto.h ../../include/openssl/dh.h -ts_req_print.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -ts_req_print.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ts_req_print.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -ts_req_print.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ts_req_print.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ts_req_print.o: ../../include/openssl/opensslconf.h -ts_req_print.o: ../../include/openssl/opensslv.h -ts_req_print.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -ts_req_print.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -ts_req_print.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ts_req_print.o: ../../include/openssl/symhacks.h ../../include/openssl/ts.h -ts_req_print.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ts_req_print.o: ../../include/openssl/x509v3.h ../cryptlib.h ts_req_print.c -ts_req_utils.o: ../../e_os.h ../../include/openssl/asn1.h -ts_req_utils.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ts_req_utils.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ts_req_utils.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -ts_req_utils.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ts_req_utils.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ts_req_utils.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ts_req_utils.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ts_req_utils.o: ../../include/openssl/objects.h -ts_req_utils.o: ../../include/openssl/opensslconf.h -ts_req_utils.o: ../../include/openssl/opensslv.h -ts_req_utils.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -ts_req_utils.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -ts_req_utils.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ts_req_utils.o: ../../include/openssl/symhacks.h ../../include/openssl/ts.h -ts_req_utils.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ts_req_utils.o: ../../include/openssl/x509v3.h ../cryptlib.h ts_req_utils.c -ts_rsp_print.o: ../../e_os.h ../../include/openssl/asn1.h -ts_rsp_print.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -ts_rsp_print.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -ts_rsp_print.o: ../../include/openssl/crypto.h ../../include/openssl/dh.h -ts_rsp_print.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -ts_rsp_print.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -ts_rsp_print.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -ts_rsp_print.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -ts_rsp_print.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -ts_rsp_print.o: ../../include/openssl/opensslconf.h -ts_rsp_print.o: ../../include/openssl/opensslv.h -ts_rsp_print.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -ts_rsp_print.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -ts_rsp_print.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ts_rsp_print.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -ts_rsp_print.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -ts_rsp_print.o: ../cryptlib.h ts.h ts_rsp_print.c -ts_rsp_sign.o: ../../e_os.h ../../include/openssl/asn1.h -ts_rsp_sign.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ts_rsp_sign.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ts_rsp_sign.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -ts_rsp_sign.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ts_rsp_sign.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ts_rsp_sign.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ts_rsp_sign.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ts_rsp_sign.o: ../../include/openssl/objects.h -ts_rsp_sign.o: ../../include/openssl/opensslconf.h -ts_rsp_sign.o: ../../include/openssl/opensslv.h -ts_rsp_sign.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -ts_rsp_sign.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -ts_rsp_sign.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ts_rsp_sign.o: ../../include/openssl/symhacks.h ../../include/openssl/ts.h -ts_rsp_sign.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ts_rsp_sign.o: ../../include/openssl/x509v3.h ../cryptlib.h ts_rsp_sign.c -ts_rsp_utils.o: ../../e_os.h ../../include/openssl/asn1.h -ts_rsp_utils.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ts_rsp_utils.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ts_rsp_utils.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -ts_rsp_utils.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ts_rsp_utils.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ts_rsp_utils.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ts_rsp_utils.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ts_rsp_utils.o: ../../include/openssl/objects.h -ts_rsp_utils.o: ../../include/openssl/opensslconf.h -ts_rsp_utils.o: ../../include/openssl/opensslv.h -ts_rsp_utils.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -ts_rsp_utils.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -ts_rsp_utils.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ts_rsp_utils.o: ../../include/openssl/symhacks.h ../../include/openssl/ts.h -ts_rsp_utils.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ts_rsp_utils.o: ../../include/openssl/x509v3.h ../cryptlib.h ts_rsp_utils.c -ts_rsp_verify.o: ../../e_os.h ../../include/openssl/asn1.h -ts_rsp_verify.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ts_rsp_verify.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ts_rsp_verify.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -ts_rsp_verify.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ts_rsp_verify.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ts_rsp_verify.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ts_rsp_verify.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ts_rsp_verify.o: ../../include/openssl/objects.h -ts_rsp_verify.o: ../../include/openssl/opensslconf.h -ts_rsp_verify.o: ../../include/openssl/opensslv.h -ts_rsp_verify.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -ts_rsp_verify.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -ts_rsp_verify.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ts_rsp_verify.o: ../../include/openssl/symhacks.h ../../include/openssl/ts.h -ts_rsp_verify.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ts_rsp_verify.o: ../../include/openssl/x509v3.h ../cryptlib.h ts_rsp_verify.c -ts_verify_ctx.o: ../../e_os.h ../../include/openssl/asn1.h -ts_verify_ctx.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -ts_verify_ctx.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -ts_verify_ctx.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -ts_verify_ctx.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -ts_verify_ctx.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -ts_verify_ctx.o: ../../include/openssl/err.h ../../include/openssl/evp.h -ts_verify_ctx.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -ts_verify_ctx.o: ../../include/openssl/objects.h -ts_verify_ctx.o: ../../include/openssl/opensslconf.h -ts_verify_ctx.o: ../../include/openssl/opensslv.h -ts_verify_ctx.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -ts_verify_ctx.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -ts_verify_ctx.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -ts_verify_ctx.o: ../../include/openssl/symhacks.h ../../include/openssl/ts.h -ts_verify_ctx.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -ts_verify_ctx.o: ../../include/openssl/x509v3.h ../cryptlib.h ts_verify_ctx.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ts/ts_rsp_verify.c nodejs-0.11.15/deps/openssl/openssl/crypto/ts/ts_rsp_verify.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ts/ts_rsp_verify.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ts/ts_rsp_verify.c 2015-01-20 21:22:17.000000000 +0000 @@ -629,6 +629,7 @@ X509_ALGOR_free(*md_alg); OPENSSL_free(*imprint); *imprint_len = 0; + *imprint = NULL; return 0; } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/txt_db/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/txt_db/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/txt_db/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/txt_db/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -# -# OpenSSL/crypto/txt_db/Makefile -# - -DIR= txt_db -TOP= ../.. -CC= cc -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=txt_db.c -LIBOBJ=txt_db.o - -SRC= $(LIBSRC) - -EXHEADER= txt_db.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by top Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -txt_db.o: ../../e_os.h ../../include/openssl/bio.h -txt_db.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -txt_db.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -txt_db.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -txt_db.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -txt_db.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -txt_db.o: ../../include/openssl/symhacks.h ../../include/openssl/txt_db.h -txt_db.o: ../cryptlib.h txt_db.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ui/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/ui/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/ui/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ui/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -# -# OpenSSL/crypto/ui/Makefile -# - -DIR= ui -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile -#TEST= uitest.c -TEST= -APPS= - -COMPATSRC= ui_compat.c -COMPATOBJ= ui_compat.o - -LIB=$(TOP)/libcrypto.a -LIBSRC= ui_err.c ui_lib.c ui_openssl.c ui_util.c $(COMPATSRC) -LIBOBJ= ui_err.o ui_lib.o ui_openssl.o ui_util.o $(COMPATOBJ) - -SRC= $(LIBSRC) - -EXHEADER= ui.h ui_compat.h -HEADER= $(EXHEADER) ui_locl.h - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -ui_compat.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -ui_compat.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -ui_compat.o: ../../include/openssl/stack.h ../../include/openssl/ui.h -ui_compat.o: ../../include/openssl/ui_compat.h ui_compat.c -ui_err.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h -ui_err.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -ui_err.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -ui_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ui_err.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ui_err.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h ui_err.c -ui_lib.o: ../../e_os.h ../../include/openssl/bio.h -ui_lib.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -ui_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -ui_lib.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -ui_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ui_lib.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ui_lib.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -ui_lib.o: ../cryptlib.h ui_lib.c ui_locl.h -ui_openssl.o: ../../e_os.h ../../include/openssl/bio.h -ui_openssl.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -ui_openssl.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h -ui_openssl.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h -ui_openssl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -ui_openssl.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -ui_openssl.o: ../../include/openssl/symhacks.h ../../include/openssl/ui.h -ui_openssl.o: ../cryptlib.h ui_locl.h ui_openssl.c -ui_util.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -ui_util.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -ui_util.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -ui_util.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -ui_util.o: ../../include/openssl/ui.h ui_locl.h ui_util.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/ui/ui_lib.c nodejs-0.11.15/deps/openssl/openssl/crypto/ui/ui_lib.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/ui/ui_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/ui/ui_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -916,9 +916,9 @@ break; } } + } default: break; } - } return 0; } diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/whrlpool/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/whrlpool/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/whrlpool/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/whrlpool/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ -# -# crypto/whrlpool/Makefile -# - -DIR= whrlpool -TOP= ../.. -CC= cc -CPP= $(CC) -E -INCLUDES= -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -WP_ASM_OBJ=wp_block.o - -CFLAGS= $(INCLUDES) $(CFLAG) -ASFLAGS= $(INCLUDES) $(ASFLAG) -AFLAGS= $(ASFLAGS) - -GENERAL=Makefile -TEST=wp_test.c -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC=wp_dgst.c wp_block.c -LIBOBJ=wp_dgst.o $(WP_ASM_OBJ) - -SRC= $(LIBSRC) - -EXHEADER= whrlpool.h -HEADER= wp_locl.h $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -wp-mmx.s: asm/wp-mmx.pl ../perlasm/x86asm.pl - $(PERL) asm/wp-mmx.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ - -wp-x86_64.s: asm/wp-x86_64.pl - $(PERL) asm/wp-x86_64.pl $(PERLASM_SCHEME) > $@ - -$(LIBOBJ): $(LIBSRC) - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -wp_block.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h -wp_block.o: ../../include/openssl/whrlpool.h wp_block.c wp_locl.h -wp_dgst.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -wp_dgst.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -wp_dgst.o: ../../include/openssl/ossl_typ.h ../../include/openssl/safestack.h -wp_dgst.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -wp_dgst.o: ../../include/openssl/whrlpool.h wp_dgst.c wp_locl.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/x509/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/x509/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/x509/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/x509/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,407 +0,0 @@ -# -# OpenSSL/crypto/x509/Makefile -# - -DIR= x509 -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile README -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= x509_def.c x509_d2.c x509_r2x.c x509_cmp.c \ - x509_obj.c x509_req.c x509spki.c x509_vfy.c \ - x509_set.c x509cset.c x509rset.c x509_err.c \ - x509name.c x509_v3.c x509_ext.c x509_att.c \ - x509type.c x509_lu.c x_all.c x509_txt.c \ - x509_trs.c by_file.c by_dir.c x509_vpm.c -LIBOBJ= x509_def.o x509_d2.o x509_r2x.o x509_cmp.o \ - x509_obj.o x509_req.o x509spki.o x509_vfy.o \ - x509_set.o x509cset.o x509rset.o x509_err.o \ - x509name.o x509_v3.o x509_ext.o x509_att.o \ - x509type.o x509_lu.o x_all.o x509_txt.o \ - x509_trs.o by_file.o by_dir.o x509_vpm.o - -SRC= $(LIBSRC) - -EXHEADER= x509.h x509_vfy.h -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -by_dir.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -by_dir.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -by_dir.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -by_dir.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -by_dir.o: ../../include/openssl/err.h ../../include/openssl/evp.h -by_dir.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -by_dir.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -by_dir.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -by_dir.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -by_dir.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -by_dir.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -by_dir.o: ../../include/openssl/x509_vfy.h ../cryptlib.h by_dir.c -by_file.o: ../../e_os.h ../../include/openssl/asn1.h -by_file.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -by_file.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -by_file.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -by_file.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -by_file.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -by_file.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -by_file.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -by_file.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pem.h -by_file.o: ../../include/openssl/pem2.h ../../include/openssl/pkcs7.h -by_file.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -by_file.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -by_file.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -by_file.o: ../cryptlib.h by_file.c -x509_att.o: ../../e_os.h ../../include/openssl/asn1.h -x509_att.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_att.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -x509_att.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x509_att.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x509_att.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x509_att.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x509_att.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x509_att.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_att.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_att.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_att.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_att.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -x509_att.o: ../cryptlib.h x509_att.c -x509_cmp.o: ../../e_os.h ../../include/openssl/asn1.h -x509_cmp.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_cmp.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -x509_cmp.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x509_cmp.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x509_cmp.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x509_cmp.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x509_cmp.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x509_cmp.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_cmp.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_cmp.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_cmp.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_cmp.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -x509_cmp.o: ../cryptlib.h x509_cmp.c -x509_d2.o: ../../e_os.h ../../include/openssl/asn1.h -x509_d2.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_d2.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509_d2.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509_d2.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509_d2.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509_d2.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509_d2.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -x509_d2.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -x509_d2.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -x509_d2.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -x509_d2.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -x509_d2.o: ../cryptlib.h x509_d2.c -x509_def.o: ../../e_os.h ../../include/openssl/asn1.h -x509_def.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_def.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509_def.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509_def.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509_def.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509_def.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509_def.o: ../../include/openssl/opensslconf.h -x509_def.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_def.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_def.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_def.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_def.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509_def.c -x509_err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -x509_err.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x509_err.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x509_err.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x509_err.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x509_err.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x509_err.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x509_err.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_err.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_err.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_err.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_err.o: ../../include/openssl/x509_vfy.h x509_err.c -x509_ext.o: ../../e_os.h ../../include/openssl/asn1.h -x509_ext.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_ext.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -x509_ext.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x509_ext.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x509_ext.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x509_ext.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x509_ext.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x509_ext.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_ext.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_ext.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_ext.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_ext.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -x509_ext.o: ../cryptlib.h x509_ext.c -x509_lu.o: ../../e_os.h ../../include/openssl/asn1.h -x509_lu.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_lu.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -x509_lu.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x509_lu.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x509_lu.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x509_lu.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x509_lu.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x509_lu.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_lu.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_lu.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_lu.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_lu.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -x509_lu.o: ../cryptlib.h x509_lu.c -x509_obj.o: ../../e_os.h ../../include/openssl/asn1.h -x509_obj.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_obj.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509_obj.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509_obj.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509_obj.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509_obj.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509_obj.o: ../../include/openssl/opensslconf.h -x509_obj.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_obj.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_obj.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_obj.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_obj.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509_obj.c -x509_r2x.o: ../../e_os.h ../../include/openssl/asn1.h -x509_r2x.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -x509_r2x.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x509_r2x.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x509_r2x.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x509_r2x.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x509_r2x.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x509_r2x.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x509_r2x.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_r2x.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_r2x.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_r2x.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_r2x.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509_r2x.c -x509_req.o: ../../e_os.h ../../include/openssl/asn1.h -x509_req.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -x509_req.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -x509_req.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509_req.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509_req.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509_req.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509_req.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509_req.o: ../../include/openssl/opensslconf.h -x509_req.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_req.o: ../../include/openssl/pem.h ../../include/openssl/pem2.h -x509_req.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_req.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_req.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_req.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509_req.c -x509_set.o: ../../e_os.h ../../include/openssl/asn1.h -x509_set.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_set.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509_set.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509_set.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509_set.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509_set.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509_set.o: ../../include/openssl/opensslconf.h -x509_set.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_set.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_set.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_set.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_set.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509_set.c -x509_trs.o: ../../e_os.h ../../include/openssl/asn1.h -x509_trs.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_trs.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -x509_trs.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x509_trs.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x509_trs.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x509_trs.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x509_trs.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x509_trs.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_trs.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_trs.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_trs.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_trs.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -x509_trs.o: ../cryptlib.h x509_trs.c -x509_txt.o: ../../e_os.h ../../include/openssl/asn1.h -x509_txt.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_txt.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509_txt.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509_txt.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509_txt.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509_txt.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509_txt.o: ../../include/openssl/opensslconf.h -x509_txt.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_txt.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_txt.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_txt.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_txt.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509_txt.c -x509_v3.o: ../../e_os.h ../../include/openssl/asn1.h -x509_v3.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_v3.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -x509_v3.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x509_v3.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x509_v3.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x509_v3.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x509_v3.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x509_v3.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_v3.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_v3.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_v3.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_v3.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -x509_v3.o: ../cryptlib.h x509_v3.c -x509_vfy.o: ../../e_os.h ../../include/openssl/asn1.h -x509_vfy.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_vfy.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -x509_vfy.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x509_vfy.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x509_vfy.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x509_vfy.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x509_vfy.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x509_vfy.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_vfy.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_vfy.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_vfy.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_vfy.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -x509_vfy.o: ../cryptlib.h x509_vfy.c -x509_vpm.o: ../../e_os.h ../../include/openssl/asn1.h -x509_vpm.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509_vpm.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -x509_vpm.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -x509_vpm.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -x509_vpm.o: ../../include/openssl/err.h ../../include/openssl/evp.h -x509_vpm.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -x509_vpm.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -x509_vpm.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509_vpm.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509_vpm.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509_vpm.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509_vpm.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -x509_vpm.o: ../cryptlib.h x509_vpm.c -x509cset.o: ../../e_os.h ../../include/openssl/asn1.h -x509cset.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509cset.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509cset.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509cset.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509cset.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509cset.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509cset.o: ../../include/openssl/opensslconf.h -x509cset.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509cset.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509cset.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509cset.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509cset.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509cset.c -x509name.o: ../../e_os.h ../../include/openssl/asn1.h -x509name.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509name.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509name.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509name.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509name.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509name.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509name.o: ../../include/openssl/opensslconf.h -x509name.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509name.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509name.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509name.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509name.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509name.c -x509rset.o: ../../e_os.h ../../include/openssl/asn1.h -x509rset.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509rset.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509rset.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509rset.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509rset.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509rset.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509rset.o: ../../include/openssl/opensslconf.h -x509rset.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509rset.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509rset.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509rset.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509rset.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509rset.c -x509spki.o: ../../e_os.h ../../include/openssl/asn1.h -x509spki.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509spki.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509spki.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509spki.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509spki.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509spki.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509spki.o: ../../include/openssl/opensslconf.h -x509spki.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509spki.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509spki.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509spki.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509spki.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509spki.c -x509type.o: ../../e_os.h ../../include/openssl/asn1.h -x509type.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -x509type.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -x509type.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x509type.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x509type.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x509type.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x509type.o: ../../include/openssl/opensslconf.h -x509type.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -x509type.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -x509type.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x509type.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x509type.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x509type.c -x_all.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -x_all.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -x_all.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -x_all.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -x_all.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -x_all.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -x_all.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -x_all.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -x_all.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -x_all.o: ../../include/openssl/rsa.h ../../include/openssl/safestack.h -x_all.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -x_all.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -x_all.o: ../../include/openssl/x509_vfy.h ../cryptlib.h x_all.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/x509v3/Makefile.save nodejs-0.11.15/deps/openssl/openssl/crypto/x509v3/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/crypto/x509v3/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/x509v3/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,591 +0,0 @@ -# -# OpenSSL/crypto/x509v3/Makefile -# - -DIR= x509v3 -TOP= ../.. -CC= cc -INCLUDES= -I.. -I$(TOP) -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile README -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBSRC= v3_bcons.c v3_bitst.c v3_conf.c v3_extku.c v3_ia5.c v3_lib.c \ -v3_prn.c v3_utl.c v3err.c v3_genn.c v3_alt.c v3_skey.c v3_akey.c v3_pku.c \ -v3_int.c v3_enum.c v3_sxnet.c v3_cpols.c v3_crld.c v3_purp.c v3_info.c \ -v3_ocsp.c v3_akeya.c v3_pmaps.c v3_pcons.c v3_ncons.c v3_pcia.c v3_pci.c \ -pcy_cache.c pcy_node.c pcy_data.c pcy_map.c pcy_tree.c pcy_lib.c \ -v3_asid.c v3_addr.c -LIBOBJ= v3_bcons.o v3_bitst.o v3_conf.o v3_extku.o v3_ia5.o v3_lib.o \ -v3_prn.o v3_utl.o v3err.o v3_genn.o v3_alt.o v3_skey.o v3_akey.o v3_pku.o \ -v3_int.o v3_enum.o v3_sxnet.o v3_cpols.o v3_crld.o v3_purp.o v3_info.o \ -v3_ocsp.o v3_akeya.o v3_pmaps.o v3_pcons.o v3_ncons.o v3_pcia.o v3_pci.o \ -pcy_cache.o pcy_node.o pcy_data.o pcy_map.o pcy_tree.o pcy_lib.o \ -v3_asid.o v3_addr.o - -SRC= $(LIBSRC) - -EXHEADER= x509v3.h -HEADER= $(EXHEADER) pcy_int.h - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all) - -all: lib - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile... - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -pcy_cache.o: ../../e_os.h ../../include/openssl/asn1.h -pcy_cache.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pcy_cache.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -pcy_cache.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pcy_cache.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pcy_cache.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pcy_cache.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pcy_cache.o: ../../include/openssl/objects.h -pcy_cache.o: ../../include/openssl/opensslconf.h -pcy_cache.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pcy_cache.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pcy_cache.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pcy_cache.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pcy_cache.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -pcy_cache.o: ../cryptlib.h pcy_cache.c pcy_int.h -pcy_data.o: ../../e_os.h ../../include/openssl/asn1.h -pcy_data.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pcy_data.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -pcy_data.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pcy_data.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pcy_data.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pcy_data.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pcy_data.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pcy_data.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pcy_data.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pcy_data.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pcy_data.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pcy_data.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -pcy_data.o: ../cryptlib.h pcy_data.c pcy_int.h -pcy_lib.o: ../../e_os.h ../../include/openssl/asn1.h -pcy_lib.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pcy_lib.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -pcy_lib.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pcy_lib.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pcy_lib.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pcy_lib.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pcy_lib.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pcy_lib.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pcy_lib.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pcy_lib.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pcy_lib.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pcy_lib.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -pcy_lib.o: ../cryptlib.h pcy_int.h pcy_lib.c -pcy_map.o: ../../e_os.h ../../include/openssl/asn1.h -pcy_map.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pcy_map.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -pcy_map.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pcy_map.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pcy_map.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pcy_map.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pcy_map.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pcy_map.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pcy_map.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pcy_map.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pcy_map.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pcy_map.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -pcy_map.o: ../cryptlib.h pcy_int.h pcy_map.c -pcy_node.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -pcy_node.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -pcy_node.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -pcy_node.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -pcy_node.o: ../../include/openssl/ecdsa.h ../../include/openssl/evp.h -pcy_node.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pcy_node.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pcy_node.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pcy_node.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pcy_node.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pcy_node.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pcy_node.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -pcy_node.o: pcy_int.h pcy_node.c -pcy_tree.o: ../../e_os.h ../../include/openssl/asn1.h -pcy_tree.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -pcy_tree.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -pcy_tree.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -pcy_tree.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -pcy_tree.o: ../../include/openssl/err.h ../../include/openssl/evp.h -pcy_tree.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -pcy_tree.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -pcy_tree.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -pcy_tree.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -pcy_tree.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -pcy_tree.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -pcy_tree.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -pcy_tree.o: ../cryptlib.h pcy_int.h pcy_tree.c -v3_addr.o: ../../e_os.h ../../include/openssl/asn1.h -v3_addr.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_addr.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_addr.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_addr.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_addr.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_addr.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_addr.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_addr.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_addr.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_addr.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_addr.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_addr.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_addr.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_addr.c -v3_akey.o: ../../e_os.h ../../include/openssl/asn1.h -v3_akey.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_akey.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_akey.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_akey.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_akey.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_akey.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_akey.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_akey.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_akey.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_akey.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_akey.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_akey.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_akey.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_akey.c -v3_akeya.o: ../../e_os.h ../../include/openssl/asn1.h -v3_akeya.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_akeya.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_akeya.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_akeya.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_akeya.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_akeya.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_akeya.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_akeya.o: ../../include/openssl/opensslconf.h -v3_akeya.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_akeya.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_akeya.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_akeya.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_akeya.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_akeya.o: ../cryptlib.h v3_akeya.c -v3_alt.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -v3_alt.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_alt.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_alt.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_alt.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_alt.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_alt.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_alt.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_alt.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_alt.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_alt.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_alt.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_alt.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_alt.c -v3_asid.o: ../../e_os.h ../../include/openssl/asn1.h -v3_asid.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_asid.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -v3_asid.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -v3_asid.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -v3_asid.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -v3_asid.o: ../../include/openssl/err.h ../../include/openssl/evp.h -v3_asid.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -v3_asid.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -v3_asid.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_asid.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_asid.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_asid.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_asid.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_asid.o: ../cryptlib.h v3_asid.c -v3_bcons.o: ../../e_os.h ../../include/openssl/asn1.h -v3_bcons.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_bcons.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_bcons.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_bcons.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_bcons.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_bcons.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_bcons.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_bcons.o: ../../include/openssl/opensslconf.h -v3_bcons.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_bcons.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_bcons.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_bcons.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_bcons.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_bcons.o: ../cryptlib.h v3_bcons.c -v3_bitst.o: ../../e_os.h ../../include/openssl/asn1.h -v3_bitst.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -v3_bitst.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -v3_bitst.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -v3_bitst.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -v3_bitst.o: ../../include/openssl/err.h ../../include/openssl/evp.h -v3_bitst.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -v3_bitst.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -v3_bitst.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_bitst.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_bitst.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_bitst.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_bitst.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_bitst.o: ../cryptlib.h v3_bitst.c -v3_conf.o: ../../e_os.h ../../include/openssl/asn1.h -v3_conf.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -v3_conf.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -v3_conf.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -v3_conf.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -v3_conf.o: ../../include/openssl/err.h ../../include/openssl/evp.h -v3_conf.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -v3_conf.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -v3_conf.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_conf.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_conf.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_conf.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_conf.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_conf.o: ../cryptlib.h v3_conf.c -v3_cpols.o: ../../e_os.h ../../include/openssl/asn1.h -v3_cpols.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_cpols.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_cpols.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_cpols.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_cpols.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_cpols.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_cpols.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_cpols.o: ../../include/openssl/opensslconf.h -v3_cpols.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_cpols.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_cpols.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_cpols.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_cpols.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_cpols.o: ../cryptlib.h pcy_int.h v3_cpols.c -v3_crld.o: ../../e_os.h ../../include/openssl/asn1.h -v3_crld.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_crld.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_crld.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_crld.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_crld.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_crld.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_crld.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_crld.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_crld.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_crld.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_crld.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_crld.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_crld.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_crld.c -v3_enum.o: ../../e_os.h ../../include/openssl/asn1.h -v3_enum.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -v3_enum.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -v3_enum.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -v3_enum.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -v3_enum.o: ../../include/openssl/err.h ../../include/openssl/evp.h -v3_enum.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -v3_enum.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -v3_enum.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_enum.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_enum.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_enum.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_enum.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_enum.o: ../cryptlib.h v3_enum.c -v3_extku.o: ../../e_os.h ../../include/openssl/asn1.h -v3_extku.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_extku.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_extku.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_extku.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_extku.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_extku.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_extku.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_extku.o: ../../include/openssl/opensslconf.h -v3_extku.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_extku.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_extku.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_extku.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_extku.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_extku.o: ../cryptlib.h v3_extku.c -v3_genn.o: ../../e_os.h ../../include/openssl/asn1.h -v3_genn.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_genn.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_genn.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_genn.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_genn.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_genn.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_genn.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_genn.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_genn.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_genn.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_genn.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_genn.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_genn.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_genn.c -v3_ia5.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -v3_ia5.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_ia5.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_ia5.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_ia5.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_ia5.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_ia5.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_ia5.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_ia5.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_ia5.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_ia5.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_ia5.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_ia5.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_ia5.c -v3_info.o: ../../e_os.h ../../include/openssl/asn1.h -v3_info.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_info.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_info.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_info.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_info.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_info.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_info.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_info.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_info.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_info.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_info.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_info.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_info.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_info.c -v3_int.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -v3_int.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_int.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_int.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_int.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_int.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_int.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_int.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_int.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_int.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_int.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_int.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_int.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_int.c -v3_lib.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -v3_lib.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_lib.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_lib.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_lib.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_lib.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_lib.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_lib.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_lib.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_lib.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_lib.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_lib.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_lib.o: ../../include/openssl/x509v3.h ../cryptlib.h ext_dat.h v3_lib.c -v3_ncons.o: ../../e_os.h ../../include/openssl/asn1.h -v3_ncons.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_ncons.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_ncons.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_ncons.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_ncons.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_ncons.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_ncons.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_ncons.o: ../../include/openssl/opensslconf.h -v3_ncons.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_ncons.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_ncons.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_ncons.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_ncons.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_ncons.o: ../cryptlib.h v3_ncons.c -v3_ocsp.o: ../../e_os.h ../../include/openssl/asn1.h -v3_ocsp.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -v3_ocsp.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -v3_ocsp.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -v3_ocsp.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -v3_ocsp.o: ../../include/openssl/err.h ../../include/openssl/evp.h -v3_ocsp.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -v3_ocsp.o: ../../include/openssl/objects.h ../../include/openssl/ocsp.h -v3_ocsp.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_ocsp.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_ocsp.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_ocsp.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_ocsp.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_ocsp.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_ocsp.c -v3_pci.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -v3_pci.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_pci.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_pci.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_pci.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_pci.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_pci.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_pci.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_pci.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_pci.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_pci.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_pci.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_pci.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_pci.c -v3_pcia.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -v3_pcia.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -v3_pcia.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -v3_pcia.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -v3_pcia.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -v3_pcia.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_pcia.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_pcia.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_pcia.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_pcia.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_pcia.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_pcia.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_pcia.o: ../../include/openssl/x509v3.h v3_pcia.c -v3_pcons.o: ../../e_os.h ../../include/openssl/asn1.h -v3_pcons.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_pcons.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_pcons.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_pcons.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_pcons.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_pcons.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_pcons.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_pcons.o: ../../include/openssl/opensslconf.h -v3_pcons.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_pcons.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_pcons.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_pcons.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_pcons.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_pcons.o: ../cryptlib.h v3_pcons.c -v3_pku.o: ../../e_os.h ../../include/openssl/asn1.h -v3_pku.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_pku.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_pku.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_pku.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_pku.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_pku.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_pku.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_pku.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_pku.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_pku.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_pku.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_pku.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_pku.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_pku.c -v3_pmaps.o: ../../e_os.h ../../include/openssl/asn1.h -v3_pmaps.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_pmaps.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_pmaps.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_pmaps.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_pmaps.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_pmaps.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_pmaps.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_pmaps.o: ../../include/openssl/opensslconf.h -v3_pmaps.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_pmaps.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_pmaps.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_pmaps.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_pmaps.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_pmaps.o: ../cryptlib.h v3_pmaps.c -v3_prn.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -v3_prn.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_prn.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_prn.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_prn.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_prn.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_prn.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_prn.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3_prn.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3_prn.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3_prn.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3_prn.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3_prn.o: ../../include/openssl/x509v3.h ../cryptlib.h v3_prn.c -v3_purp.o: ../../e_os.h ../../include/openssl/asn1.h -v3_purp.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -v3_purp.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -v3_purp.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -v3_purp.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -v3_purp.o: ../../include/openssl/err.h ../../include/openssl/evp.h -v3_purp.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -v3_purp.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -v3_purp.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_purp.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_purp.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_purp.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_purp.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_purp.o: ../cryptlib.h v3_purp.c -v3_skey.o: ../../e_os.h ../../include/openssl/asn1.h -v3_skey.o: ../../include/openssl/bio.h ../../include/openssl/buffer.h -v3_skey.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -v3_skey.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -v3_skey.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -v3_skey.o: ../../include/openssl/err.h ../../include/openssl/evp.h -v3_skey.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -v3_skey.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -v3_skey.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_skey.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_skey.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_skey.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_skey.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_skey.o: ../cryptlib.h v3_skey.c -v3_sxnet.o: ../../e_os.h ../../include/openssl/asn1.h -v3_sxnet.o: ../../include/openssl/asn1t.h ../../include/openssl/bio.h -v3_sxnet.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3_sxnet.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3_sxnet.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3_sxnet.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3_sxnet.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3_sxnet.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3_sxnet.o: ../../include/openssl/opensslconf.h -v3_sxnet.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_sxnet.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_sxnet.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_sxnet.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_sxnet.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_sxnet.o: ../cryptlib.h v3_sxnet.c -v3_utl.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h -v3_utl.o: ../../include/openssl/bn.h ../../include/openssl/buffer.h -v3_utl.o: ../../include/openssl/conf.h ../../include/openssl/crypto.h -v3_utl.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -v3_utl.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -v3_utl.o: ../../include/openssl/err.h ../../include/openssl/evp.h -v3_utl.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -v3_utl.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -v3_utl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -v3_utl.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -v3_utl.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -v3_utl.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -v3_utl.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -v3_utl.o: ../cryptlib.h v3_utl.c -v3err.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -v3err.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -v3err.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -v3err.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -v3err.o: ../../include/openssl/ecdsa.h ../../include/openssl/err.h -v3err.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -v3err.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -v3err.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -v3err.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -v3err.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -v3err.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -v3err.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -v3err.o: ../../include/openssl/x509v3.h v3err.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/crypto/x509v3/v3_purp.c nodejs-0.11.15/deps/openssl/openssl/crypto/x509v3/v3_purp.c --- nodejs-0.11.13/deps/openssl/openssl/crypto/x509v3/v3_purp.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/crypto/x509v3/v3_purp.c 2015-01-20 21:22:17.000000000 +0000 @@ -389,8 +389,8 @@ /* Handle proxy certificates */ if((pci=X509_get_ext_d2i(x, NID_proxyCertInfo, NULL, NULL))) { if (x->ex_flags & EXFLAG_CA - || X509_get_ext_by_NID(x, NID_subject_alt_name, 0) >= 0 - || X509_get_ext_by_NID(x, NID_issuer_alt_name, 0) >= 0) { + || X509_get_ext_by_NID(x, NID_subject_alt_name, -1) >= 0 + || X509_get_ext_by_NID(x, NID_issuer_alt_name, -1) >= 0) { x->ex_flags |= EXFLAG_INVALID; } if (pci->pcPathLengthConstraint) { @@ -670,7 +670,7 @@ return 0; /* Extended Key Usage MUST be critical */ - i_ext = X509_get_ext_by_NID((X509 *) x, NID_ext_key_usage, 0); + i_ext = X509_get_ext_by_NID((X509 *) x, NID_ext_key_usage, -1); if (i_ext >= 0) { X509_EXTENSION *ext = X509_get_ext((X509 *) x, i_ext); diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/eay/base64.c nodejs-0.11.15/deps/openssl/openssl/demos/eay/base64.c --- nodejs-0.11.13/deps/openssl/openssl/demos/eay/base64.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/eay/base64.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -/* This is a simple example of using the base64 BIO to a memory BIO and then - * getting the data. - */ -#include <stdio.h> -#include <openssl/bio.h> -#include <openssl/evp.h> - -main() - { - int i; - BIO *mbio,*b64bio,*bio; - char buf[512]; - char *p; - - mbio=BIO_new(BIO_s_mem()); - b64bio=BIO_new(BIO_f_base64()); - - bio=BIO_push(b64bio,mbio); - /* We now have bio pointing at b64->mem, the base64 bio encodes on - * write and decodes on read */ - - for (;;) - { - i=fread(buf,1,512,stdin); - if (i <= 0) break; - BIO_write(bio,buf,i); - } - /* We need to 'flush' things to push out the encoding of the - * last few bytes. There is special encoding if it is not a - * multiple of 3 - */ - BIO_flush(bio); - - printf("We have %d bytes available\n",BIO_pending(mbio)); - - /* We will now get a pointer to the data and the number of elements. */ - /* hmm... this one was not defined by a macro in bio.h, it will be for - * 0.9.1. The other option is too just read from the memory bio. - */ - i=(int)BIO_ctrl(mbio,BIO_CTRL_INFO,0,(char *)&p); - - printf("%d\n",i); - fwrite("---\n",1,4,stdout); - fwrite(p,1,i,stdout); - fwrite("---\n",1,4,stdout); - - /* This call will walk the chain freeing all the BIOs */ - BIO_free_all(bio); - } diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/eay/conn.c nodejs-0.11.15/deps/openssl/openssl/demos/eay/conn.c --- nodejs-0.11.13/deps/openssl/openssl/demos/eay/conn.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/eay/conn.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -/* NOCW */ -/* demos/eay/conn.c */ - -/* A minimal program to connect to a port using the sock4a protocol. - * - * cc -I../../include conn.c -L../.. -lcrypto - */ -#include <stdio.h> -#include <stdlib.h> -#include <openssl/err.h> -#include <openssl/bio.h> -/* #include "proxy.h" */ - -extern int errno; - -int main(argc,argv) -int argc; -char *argv[]; - { - PROXY *pxy; - char *host; - char buf[1024*10],*p; - BIO *bio; - int i,len,off,ret=1; - - if (argc <= 1) - host="localhost:4433"; - else - host=argv[1]; - - /* Lets get nice error messages */ - ERR_load_crypto_strings(); - - /* First, configure proxy settings */ - pxy=PROXY_new(); - PROXY_add_server(pxy,PROXY_PROTOCOL_SOCKS,"gromit:1080"); - - bio=BIO_new(BIO_s_socks4a_connect()); - - BIO_set_conn_hostname(bio,host); - BIO_set_proxies(bio,pxy); - BIO_set_socks_userid(bio,"eay"); - BIO_set_nbio(bio,1); - - p="GET / HTTP/1.0\r\n\r\n"; - len=strlen(p); - - off=0; - for (;;) - { - i=BIO_write(bio,&(p[off]),len); - if (i <= 0) - { - if (BIO_should_retry(bio)) - { - fprintf(stderr,"write DELAY\n"); - sleep(1); - continue; - } - else - { - goto err; - } - } - off+=i; - len-=i; - if (len <= 0) break; - } - - for (;;) - { - i=BIO_read(bio,buf,sizeof(buf)); - if (i == 0) break; - if (i < 0) - { - if (BIO_should_retry(bio)) - { - fprintf(stderr,"read DELAY\n"); - sleep(1); - continue; - } - goto err; - } - fwrite(buf,1,i,stdout); - } - - ret=1; - - if (0) - { -err: - if (ERR_peek_error() == 0) /* system call error */ - { - fprintf(stderr,"errno=%d ",errno); - perror("error"); - } - else - ERR_print_errors_fp(stderr); - } - BIO_free_all(bio); - if (pxy != NULL) PROXY_free(pxy); - exit(!ret); - return(ret); - } - diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/eay/loadrsa.c nodejs-0.11.15/deps/openssl/openssl/demos/eay/loadrsa.c --- nodejs-0.11.13/deps/openssl/openssl/demos/eay/loadrsa.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/eay/loadrsa.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -#include <stdio.h> -#include <openssl/rsa.h> - -/* This is a simple program to generate an RSA private key. It then - * saves both the public and private key into a char array, then - * re-reads them. It saves them as DER encoded binary data. - */ - -void callback(stage,count,arg) -int stage,count; -char *arg; - { - FILE *out; - - out=(FILE *)arg; - fprintf(out,"%d",stage); - if (stage == 3) - fprintf(out,"\n"); - fflush(out); - } - -main() - { - RSA *rsa,*pub_rsa,*priv_rsa; - int len; - unsigned char buf[1024],*p; - - rsa=RSA_generate_key(512,RSA_F4,callback,(char *)stdout); - - p=buf; - - /* Save the public key into buffer, we know it will be big enough - * but we should really check how much space we need by calling the - * i2d functions with a NULL second parameter */ - len=i2d_RSAPublicKey(rsa,&p); - len+=i2d_RSAPrivateKey(rsa,&p); - - printf("The public and private key are now both in a char array\n"); - printf("and are taking up %d bytes\n",len); - - RSA_free(rsa); - - p=buf; - pub_rsa=d2i_RSAPublicKey(NULL,&p,(long)len); - len-=(p-buf); - priv_rsa=d2i_RSAPrivateKey(NULL,&p,(long)len); - - if ((pub_rsa == NULL) || (priv_rsa == NULL)) - ERR_print_errors_fp(stderr); - - RSA_free(pub_rsa); - RSA_free(priv_rsa); - } diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/eay/Makefile nodejs-0.11.15/deps/openssl/openssl/demos/eay/Makefile --- nodejs-0.11.13/deps/openssl/openssl/demos/eay/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/eay/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -CC=cc -CFLAGS= -g -I../../include -#LIBS= -L../.. -lcrypto -lssl -LIBS= -L../.. ../../libssl.a ../../libcrypto.a - -# the file conn.c requires a file "proxy.h" which I couldn't find... -#EXAMPLES=base64 conn loadrsa -EXAMPLES=base64 loadrsa - -all: $(EXAMPLES) - -base64: base64.o - $(CC) -o base64 base64.o $(LIBS) -# -# sorry... can't find "proxy.h" -#conn: conn.o -# $(CC) -o conn conn.o $(LIBS) - -loadrsa: loadrsa.o - $(CC) -o loadrsa loadrsa.o $(LIBS) - -clean: - rm -f $(EXAMPLES) *.o - diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/maurice/cert.pem nodejs-0.11.15/deps/openssl/openssl/demos/maurice/cert.pem --- nodejs-0.11.13/deps/openssl/openssl/demos/maurice/cert.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/maurice/cert.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,77 +0,0 @@ -issuer :/C=NL/SP=Brabant/L=Eindhoven/O=Gittens Information Systems B.V./OU=Certification Services/CN=ca.gits.nl/Email=mgittens@gits.nl -subject:/C=NL/SP=Brabant/O=Gittens Information Systems B.V./OU=Certification Services/CN=caleb.gits.nl/Email=mgittens@gits.nl -serial :01 - -Certificate: - Data: - Version: 0 (0x0) - Serial Number: 1 (0x1) - Signature Algorithm: md5withRSAEncryption - Issuer: C=NL, SP=Brabant, L=Eindhoven, O=Gittens Information Systems B.V., OU=Certification Services, CN=ca.gits.nl/Email=mgittens@gits.nl - Validity - Not Before: Jan 5 13:21:16 1997 GMT - Not After : Jul 24 13:21:16 1997 GMT - Subject: C=NL, SP=Brabant, O=Gittens Information Systems B.V., OU=Certification Services, CN=caleb.gits.nl/Email=mgittens@gits.nl - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Modulus: - 00:dd:82:a0:fe:a9:8d:6a:02:7e:78:d6:33:75:9b: - 82:01:4b:12:80:ea:6b:9b:83:9e:e3:ae:dc:f3:d0: - 71:7c:4b:ea:03:57:b4:cc:ba:44:5b:b8:4b:49:d3: - f6:39:cc:3d:12:1f:da:58:26:27:bc:bc:ab:a4:6d: - 62:d1:91:5a:47:9f:80:40:c1:b9:fa:e3:1e:ef:52: - 78:46:26:43:65:1d:f2:6b:bf:ff:c0:81:66:14:cd: - 81:32:91:f1:f8:51:7d:0e:17:1f:27:fc:c7:51:fd: - 1c:73:41:e5:66:43:3c:67:a3:09:b9:5e:36:50:50: - b1:e8:42:bd:5c:c6:2b:ec:a9:2c:fe:6a:fe:40:26: - 64:9e:b9:bf:2d:1d:fb:d0:48:5b:82:2a:8e:ab:a4: - d5:7b:5f:26:84:8a:9a:69:5e:c1:71:e2:a9:59:4c: - 2a:76:f7:fd:f4:cf:3f:d3:ce:30:72:62:65:1c:e9: - e9:ee:d2:fc:44:00:1e:e0:80:57:e9:41:b3:f0:44: - e5:0f:77:3b:1a:1f:57:5e:94:1d:c3:a5:fa:af:41: - 8c:4c:30:6b:2b:00:84:52:0c:64:0c:a8:5b:17:16: - d1:1e:f8:ea:72:01:47:9a:b9:21:95:f9:71:ed:7c: - d2:93:54:0c:c5:9c:e8:e5:40:28:c5:a0:ca:b1:a9: - 20:f9 - Exponent: 65537 (0x10001) - Signature Algorithm: md5withRSAEncryption - 93:08:f9:e0:d4:c5:ca:95:de:4e:38:3b:28:87:e9:d3:b6:ce: - 4f:69:2e:c9:09:57:2f:fa:e2:50:9f:39:ec:f3:84:e8:3a:8f: - 9b:c3:06:62:90:49:93:6d:23:7a:2b:3d:7b:f9:46:32:18:d3: - 87:44:49:f7:29:2f:f3:58:97:70:c3:45:5b:90:52:1c:df:fb: - a8:a3:a1:29:53:a3:4c:ed:d2:51:d0:44:98:a4:14:6f:76:9d: - 0d:03:76:e5:d3:13:21:ce:a3:4d:2a:77:fe:ad:b3:47:6d:42: - b9:4a:0e:ff:61:f4:ec:62:b2:3b:00:9c:ac:16:a2:ec:19:c8: - c7:3d:d7:7d:97:cd:4d:1a:d2:00:07:4e:40:3d:b9:ba:1e:e2: - fe:81:28:57:b9:ad:2b:74:59:b0:9f:8b:a5:98:d3:75:06:67: - 4a:04:11:b2:ea:1a:8c:e0:d4:be:c8:0c:46:76:7f:5f:5a:7b: - 72:09:dd:b6:d3:6b:97:70:e8:7e:17:74:1c:f7:3a:5f:e3:fa: - c2:f7:95:bd:74:5e:44:4b:9b:bd:27:de:02:7f:87:1f:68:68: - 60:b9:f4:1d:2b:7b:ce:ef:b1:7f:3a:be:b9:66:60:54:6f:0c: - a0:dd:8c:03:a7:f1:9f:f8:0e:8d:bb:c6:ba:77:61:f7:8e:be: - 28:ba:d8:4f - ------BEGIN CERTIFICATE----- -MIIDzzCCArcCAQEwDQYJKoZIhvcNAQEEBQAwgbUxCzAJBgNVBAYTAk5MMRAwDgYD -VQQIEwdCcmFiYW50MRIwEAYDVQQHEwlFaW5kaG92ZW4xKTAnBgNVBAoTIEdpdHRl -bnMgSW5mb3JtYXRpb24gU3lzdGVtcyBCLlYuMR8wHQYDVQQLExZDZXJ0aWZpY2F0 -aW9uIFNlcnZpY2VzMRMwEQYDVQQDEwpjYS5naXRzLm5sMR8wHQYJKoZIhvcNAQkB -FhBtZ2l0dGVuc0BnaXRzLm5sMB4XDTk3MDEwNTEzMjExNloXDTk3MDcyNDEzMjEx -NlowgaQxCzAJBgNVBAYTAk5MMRAwDgYDVQQIEwdCcmFiYW50MSkwJwYDVQQKEyBH -aXR0ZW5zIEluZm9ybWF0aW9uIFN5c3RlbXMgQi5WLjEfMB0GA1UECxMWQ2VydGlm -aWNhdGlvbiBTZXJ2aWNlczEWMBQGA1UEAxMNY2FsZWIuZ2l0cy5ubDEfMB0GCSqG -SIb3DQEJARYQbWdpdHRlbnNAZ2l0cy5ubDCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAN2CoP6pjWoCfnjWM3WbggFLEoDqa5uDnuOu3PPQcXxL6gNXtMy6 -RFu4S0nT9jnMPRIf2lgmJ7y8q6RtYtGRWkefgEDBufrjHu9SeEYmQ2Ud8mu//8CB -ZhTNgTKR8fhRfQ4XHyf8x1H9HHNB5WZDPGejCbleNlBQsehCvVzGK+ypLP5q/kAm -ZJ65vy0d+9BIW4Iqjquk1XtfJoSKmmlewXHiqVlMKnb3/fTPP9POMHJiZRzp6e7S -/EQAHuCAV+lBs/BE5Q93OxofV16UHcOl+q9BjEwwaysAhFIMZAyoWxcW0R746nIB -R5q5IZX5ce180pNUDMWc6OVAKMWgyrGpIPkCAwEAATANBgkqhkiG9w0BAQQFAAOC -AQEAkwj54NTFypXeTjg7KIfp07bOT2kuyQlXL/riUJ857POE6DqPm8MGYpBJk20j -eis9e/lGMhjTh0RJ9ykv81iXcMNFW5BSHN/7qKOhKVOjTO3SUdBEmKQUb3adDQN2 -5dMTIc6jTSp3/q2zR21CuUoO/2H07GKyOwCcrBai7BnIxz3XfZfNTRrSAAdOQD25 -uh7i/oEoV7mtK3RZsJ+LpZjTdQZnSgQRsuoajODUvsgMRnZ/X1p7cgndttNrl3Do -fhd0HPc6X+P6wveVvXReREubvSfeAn+HH2hoYLn0HSt7zu+xfzq+uWZgVG8MoN2M -A6fxn/gOjbvGundh946+KLrYTw== ------END CERTIFICATE----- - diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/maurice/example1.c nodejs-0.11.15/deps/openssl/openssl/demos/maurice/example1.c --- nodejs-0.11.13/deps/openssl/openssl/demos/maurice/example1.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/maurice/example1.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,198 +0,0 @@ -/* NOCW */ -/* - Please read the README file for condition of use, before - using this software. - - Maurice Gittens <mgittens@gits.nl> January 1997 -*/ - -#include <unistd.h> -#include <stdio.h> -#include <netinet/in.h> -#include <fcntl.h> -#include <strings.h> -#include <stdlib.h> - -#include <openssl/rsa.h> -#include <openssl/evp.h> -#include <openssl/objects.h> -#include <openssl/x509.h> -#include <openssl/err.h> -#include <openssl/pem.h> -#include <openssl/ssl.h> - -#include "loadkeys.h" - -#define PUBFILE "cert.pem" -#define PRIVFILE "privkey.pem" - -#define STDIN 0 -#define STDOUT 1 - -void main_encrypt(void); -void main_decrypt(void); - -static const char *usage = "Usage: example1 [-d]\n"; - -int main(int argc, char *argv[]) -{ - - ERR_load_crypto_strings(); - - if ((argc == 1)) - { - main_encrypt(); - } - else if ((argc == 2) && !strcmp(argv[1],"-d")) - { - main_decrypt(); - } - else - { - printf("%s",usage); - exit(1); - } - - return 0; -} - -void main_encrypt(void) -{ - unsigned int ebuflen; - EVP_CIPHER_CTX ectx; - unsigned char iv[EVP_MAX_IV_LENGTH]; - unsigned char *ekey[1]; - int readlen; - int ekeylen, net_ekeylen; - EVP_PKEY *pubKey[1]; - char buf[512]; - char ebuf[512]; - - memset(iv, '\0', sizeof(iv)); - - pubKey[0] = ReadPublicKey(PUBFILE); - - if(!pubKey[0]) - { - fprintf(stderr,"Error: can't load public key"); - exit(1); - } - - ekey[0] = malloc(EVP_PKEY_size(pubKey[0])); - if (!ekey[0]) - { - EVP_PKEY_free(pubKey[0]); - perror("malloc"); - exit(1); - } - - EVP_SealInit(&ectx, - EVP_des_ede3_cbc(), - ekey, - &ekeylen, - iv, - pubKey, - 1); - - net_ekeylen = htonl(ekeylen); - write(STDOUT, (char*)&net_ekeylen, sizeof(net_ekeylen)); - write(STDOUT, ekey[0], ekeylen); - write(STDOUT, iv, sizeof(iv)); - - while(1) - { - readlen = read(STDIN, buf, sizeof(buf)); - - if (readlen <= 0) - { - if (readlen < 0) - perror("read"); - - break; - } - - EVP_SealUpdate(&ectx, ebuf, &ebuflen, buf, readlen); - - write(STDOUT, ebuf, ebuflen); - } - - EVP_SealFinal(&ectx, ebuf, &ebuflen); - - write(STDOUT, ebuf, ebuflen); - - EVP_PKEY_free(pubKey[0]); - free(ekey[0]); -} - -void main_decrypt(void) -{ - char buf[520]; - char ebuf[512]; - unsigned int buflen; - EVP_CIPHER_CTX ectx; - unsigned char iv[EVP_MAX_IV_LENGTH]; - unsigned char *encryptKey; - unsigned int ekeylen; - EVP_PKEY *privateKey; - - memset(iv, '\0', sizeof(iv)); - - privateKey = ReadPrivateKey(PRIVFILE); - if (!privateKey) - { - fprintf(stderr, "Error: can't load private key"); - exit(1); - } - - read(STDIN, &ekeylen, sizeof(ekeylen)); - ekeylen = ntohl(ekeylen); - - if (ekeylen != EVP_PKEY_size(privateKey)) - { - EVP_PKEY_free(privateKey); - fprintf(stderr, "keylength mismatch"); - exit(1); - } - - encryptKey = malloc(sizeof(char) * ekeylen); - if (!encryptKey) - { - EVP_PKEY_free(privateKey); - perror("malloc"); - exit(1); - } - - read(STDIN, encryptKey, ekeylen); - read(STDIN, iv, sizeof(iv)); - EVP_OpenInit(&ectx, - EVP_des_ede3_cbc(), - encryptKey, - ekeylen, - iv, - privateKey); - - while(1) - { - int readlen = read(STDIN, ebuf, sizeof(ebuf)); - - if (readlen <= 0) - { - if (readlen < 0) - perror("read"); - - break; - } - - EVP_OpenUpdate(&ectx, buf, &buflen, ebuf, readlen); - write(STDOUT, buf, buflen); - } - - EVP_OpenFinal(&ectx, buf, &buflen); - - write(STDOUT, buf, buflen); - - EVP_PKEY_free(privateKey); - free(encryptKey); -} - - diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/maurice/example2.c nodejs-0.11.15/deps/openssl/openssl/demos/maurice/example2.c --- nodejs-0.11.13/deps/openssl/openssl/demos/maurice/example2.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/maurice/example2.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,75 +0,0 @@ -/* NOCW */ -/* - Please read the README file for condition of use, before - using this software. - - Maurice Gittens <mgittens@gits.nl> January 1997 -*/ - -#include <stdlib.h> -#include <stdio.h> -#include <strings.h> - -#include <openssl/rsa.h> -#include <openssl/evp.h> -#include <openssl/objects.h> -#include <openssl/x509.h> -#include <openssl/err.h> -#include <openssl/pem.h> -#include <openssl/ssl.h> - -#include "loadkeys.h" - -#define PUBFILE "cert.pem" -#define PRIVFILE "privkey.pem" -#define STDIN 0 -#define STDOUT 1 - -int main() -{ - char *ct = "This the clear text"; - char *buf; - char *buf2; - EVP_PKEY *pubKey; - EVP_PKEY *privKey; - int len; - - ERR_load_crypto_strings(); - - privKey = ReadPrivateKey(PRIVFILE); - if (!privKey) - { - ERR_print_errors_fp (stderr); - exit (1); - } - - pubKey = ReadPublicKey(PUBFILE); - if(!pubKey) - { - EVP_PKEY_free(privKey); - fprintf(stderr,"Error: can't load public key"); - exit(1); - } - - /* No error checking */ - buf = malloc(EVP_PKEY_size(pubKey)); - buf2 = malloc(EVP_PKEY_size(pubKey)); - - len = RSA_public_encrypt(strlen(ct)+1, ct, buf, pubKey->pkey.rsa,RSA_PKCS1_PADDING); - - if (len != EVP_PKEY_size(pubKey)) - { - fprintf(stderr,"Error: ciphertext should match length of key\n"); - exit(1); - } - - RSA_private_decrypt(len, buf, buf2, privKey->pkey.rsa,RSA_PKCS1_PADDING); - - printf("%s\n", buf2); - - EVP_PKEY_free(privKey); - EVP_PKEY_free(pubKey); - free(buf); - free(buf2); - return 0; -} diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/maurice/example3.c nodejs-0.11.15/deps/openssl/openssl/demos/maurice/example3.c --- nodejs-0.11.13/deps/openssl/openssl/demos/maurice/example3.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/maurice/example3.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,87 +0,0 @@ -/* NOCW */ -/* - Please read the README file for condition of use, before - using this software. - - Maurice Gittens <mgittens@gits.nl> January 1997 - -*/ - -#include <stdio.h> -#include <unistd.h> -#include <fcntl.h> -#include <sys/stat.h> -#include <openssl/evp.h> - -#define STDIN 0 -#define STDOUT 1 -#define BUFLEN 512 -#define INIT_VECTOR "12345678" -#define ENCRYPT 1 -#define DECRYPT 0 -#define ALG EVP_des_ede3_cbc() - -static const char *usage = "Usage: example3 [-d] password\n"; - -void do_cipher(char *,int); - -int main(int argc, char *argv[]) -{ - if ((argc == 2)) - { - do_cipher(argv[1],ENCRYPT); - } - else if ((argc == 3) && !strcmp(argv[1],"-d")) - { - do_cipher(argv[2],DECRYPT); - } - else - { - fprintf(stderr,"%s", usage); - exit(1); - } - - return 0; -} - -void do_cipher(char *pw, int operation) -{ - char buf[BUFLEN]; - char ebuf[BUFLEN + 8]; - unsigned int ebuflen; /* rc; */ - unsigned char iv[EVP_MAX_IV_LENGTH], key[EVP_MAX_KEY_LENGTH]; - /* unsigned int ekeylen, net_ekeylen; */ - EVP_CIPHER_CTX ectx; - - memcpy(iv, INIT_VECTOR, sizeof(iv)); - - EVP_BytesToKey(ALG, EVP_md5(), "salu", pw, strlen(pw), 1, key, iv); - - EVP_CIPHER_CTX_init(&ectx); - EVP_CipherInit_ex(&ectx, ALG, NULL, key, iv, operation); - - while(1) - { - int readlen = read(STDIN, buf, sizeof(buf)); - - if (readlen <= 0) - { - if (!readlen) - break; - else - { - perror("read"); - exit(1); - } - } - - EVP_CipherUpdate(&ectx, ebuf, &ebuflen, buf, readlen); - - write(STDOUT, ebuf, ebuflen); - } - - EVP_CipherFinal_ex(&ectx, ebuf, &ebuflen); - EVP_CIPHER_CTX_cleanup(&ectx); - - write(STDOUT, ebuf, ebuflen); -} diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/maurice/example4.c nodejs-0.11.15/deps/openssl/openssl/demos/maurice/example4.c --- nodejs-0.11.13/deps/openssl/openssl/demos/maurice/example4.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/maurice/example4.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,123 +0,0 @@ -/* NOCW */ -/* - Please read the README file for condition of use, before - using this software. - - Maurice Gittens <mgittens@gits.nl> January 1997 - -*/ - -#include <stdio.h> -#include <unistd.h> -#include <fcntl.h> -#include <sys/stat.h> -#include <openssl/evp.h> - -#define STDIN 0 -#define STDOUT 1 -#define BUFLEN 512 - -static const char *usage = "Usage: example4 [-d]\n"; - -void do_encode(void); -void do_decode(void); - -int main(int argc, char *argv[]) -{ - if ((argc == 1)) - { - do_encode(); - } - else if ((argc == 2) && !strcmp(argv[1],"-d")) - { - do_decode(); - } - else - { - fprintf(stderr,"%s", usage); - exit(1); - } - - return 0; -} - -void do_encode() -{ - char buf[BUFLEN]; - char ebuf[BUFLEN+24]; - unsigned int ebuflen; - EVP_ENCODE_CTX ectx; - - EVP_EncodeInit(&ectx); - - while(1) - { - int readlen = read(STDIN, buf, sizeof(buf)); - - if (readlen <= 0) - { - if (!readlen) - break; - else - { - perror("read"); - exit(1); - } - } - - EVP_EncodeUpdate(&ectx, ebuf, &ebuflen, buf, readlen); - - write(STDOUT, ebuf, ebuflen); - } - - EVP_EncodeFinal(&ectx, ebuf, &ebuflen); - - write(STDOUT, ebuf, ebuflen); -} - -void do_decode() -{ - char buf[BUFLEN]; - char ebuf[BUFLEN+24]; - unsigned int ebuflen; - EVP_ENCODE_CTX ectx; - - EVP_DecodeInit(&ectx); - - while(1) - { - int readlen = read(STDIN, buf, sizeof(buf)); - int rc; - - if (readlen <= 0) - { - if (!readlen) - break; - else - { - perror("read"); - exit(1); - } - } - - rc = EVP_DecodeUpdate(&ectx, ebuf, &ebuflen, buf, readlen); - if (rc <= 0) - { - if (!rc) - { - write(STDOUT, ebuf, ebuflen); - break; - } - - fprintf(stderr, "Error: decoding message\n"); - return; - } - - write(STDOUT, ebuf, ebuflen); - } - - EVP_DecodeFinal(&ectx, ebuf, &ebuflen); - - write(STDOUT, ebuf, ebuflen); -} - diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/maurice/loadkeys.c nodejs-0.11.15/deps/openssl/openssl/demos/maurice/loadkeys.c --- nodejs-0.11.13/deps/openssl/openssl/demos/maurice/loadkeys.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/maurice/loadkeys.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -/* NOCW */ -/* - Please read the README file for condition of use, before - using this software. - - Maurice Gittens <mgittens@gits.nl> January 1997 - -*/ - -#include <unistd.h> -#include <stdio.h> -#include <netinet/in.h> -#include <fcntl.h> -#include <strings.h> -#include <stdlib.h> - -#include <openssl/rsa.h> -#include <openssl/evp.h> -#include <openssl/objects.h> -#include <openssl/x509.h> -#include <openssl/err.h> -#include <openssl/pem.h> -#include <openssl/ssl.h> - -EVP_PKEY * ReadPublicKey(const char *certfile) -{ - FILE *fp = fopen (certfile, "r"); - X509 *x509; - EVP_PKEY *pkey; - - if (!fp) - return NULL; - - x509 = PEM_read_X509(fp, NULL, 0, NULL); - - if (x509 == NULL) - { - ERR_print_errors_fp (stderr); - return NULL; - } - - fclose (fp); - - pkey=X509_extract_key(x509); - - X509_free(x509); - - if (pkey == NULL) - ERR_print_errors_fp (stderr); - - return pkey; -} - -EVP_PKEY *ReadPrivateKey(const char *keyfile) -{ - FILE *fp = fopen(keyfile, "r"); - EVP_PKEY *pkey; - - if (!fp) - return NULL; - - pkey = PEM_read_PrivateKey(fp, NULL, 0, NULL); - - fclose (fp); - - if (pkey == NULL) - ERR_print_errors_fp (stderr); - - return pkey; -} - - diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/maurice/loadkeys.h nodejs-0.11.15/deps/openssl/openssl/demos/maurice/loadkeys.h --- nodejs-0.11.13/deps/openssl/openssl/demos/maurice/loadkeys.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/maurice/loadkeys.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -/* NOCW */ -/* - Please read the README file for condition of use, before - using this software. - - Maurice Gittens <mgittens@gits.nl> January 1997 - -*/ - -#ifndef LOADKEYS_H_SEEN -#define LOADKEYS_H_SEEN - -#include <openssl/evp.h> - -EVP_PKEY * ReadPublicKey(const char *certfile); -EVP_PKEY *ReadPrivateKey(const char *keyfile); - -#endif - diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/maurice/Makefile nodejs-0.11.15/deps/openssl/openssl/demos/maurice/Makefile --- nodejs-0.11.13/deps/openssl/openssl/demos/maurice/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/maurice/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -CC=cc -CFLAGS= -g -I../../include -Wall -LIBS= -L../.. -lcrypto -EXAMPLES=example1 example2 example3 example4 - -all: $(EXAMPLES) - -example1: example1.o loadkeys.o - $(CC) -o example1 example1.o loadkeys.o $(LIBS) - -example2: example2.o loadkeys.o - $(CC) -o example2 example2.o loadkeys.o $(LIBS) - -example3: example3.o - $(CC) -o example3 example3.o $(LIBS) - -example4: example4.o - $(CC) -o example4 example4.o $(LIBS) - -clean: - rm -f $(EXAMPLES) *.o - -test: all - @echo - @echo Example 1 Demonstrates the sealing and opening APIs - @echo Doing the encrypt side... - ./example1 <README >t.t - @echo Doing the decrypt side... - ./example1 -d <t.t >t.2 - diff t.2 README - rm -f t.t t.2 - @echo example1 is OK - - @echo - @echo Example2 Demonstrates rsa encryption and decryption - @echo and it should just print \"This the clear text\" - ./example2 - - @echo - @echo Example3 Demonstrates the use of symmetric block ciphers - @echo in this case it uses EVP_des_ede3_cbc - @echo i.e. triple DES in Cipher Block Chaining mode - @echo Doing the encrypt side... - ./example3 ThisIsThePassword <README >t.t - @echo Doing the decrypt side... - ./example3 -d ThisIsThePassword <t.t >t.2 - diff t.2 README - rm -f t.t t.2 - @echo example3 is OK - - @echo - @echo Example4 Demonstrates base64 encoding and decoding - @echo Doing the encrypt side... - ./example4 <README >t.t - @echo Doing the decrypt side... - ./example4 -d <t.t >t.2 - diff t.2 README - rm -f t.t t.2 - @echo example4 is OK diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/maurice/privkey.pem nodejs-0.11.15/deps/openssl/openssl/demos/maurice/privkey.pem --- nodejs-0.11.13/deps/openssl/openssl/demos/maurice/privkey.pem 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/maurice/privkey.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA3YKg/qmNagJ+eNYzdZuCAUsSgOprm4Oe467c89BxfEvqA1e0 -zLpEW7hLSdP2Ocw9Eh/aWCYnvLyrpG1i0ZFaR5+AQMG5+uMe71J4RiZDZR3ya7// -wIFmFM2BMpHx+FF9DhcfJ/zHUf0cc0HlZkM8Z6MJuV42UFCx6EK9XMYr7Kks/mr+ -QCZknrm/LR370EhbgiqOq6TVe18mhIqaaV7BceKpWUwqdvf99M8/084wcmJlHOnp -7tL8RAAe4IBX6UGz8ETlD3c7Gh9XXpQdw6X6r0GMTDBrKwCEUgxkDKhbFxbRHvjq -cgFHmrkhlflx7XzSk1QMxZzo5UAoxaDKsakg+QIDAQABAoIBAQC0hnh083PnuJ6g -Flob+B+stCUhYWtPc6ZzgphaMD+9ABV4oescipWZdooNYiyikBwZgFIvUvFBtTXh -rLBDgUVlZ81beUb7/EvC2aBh818rsotWW0Sw/ARY4d7wetcL/EWBzUA8E5vR6wlb -uZGelR9OiyYqp2h2bj1/v5yaVnuHxBeBj5clTHtPMXc+/70iUNBDMZ0ruZTdSwll -e0DH8pp/5USYewlrKtRIJT7elC8LFMqEz4OpNvfaR2OEY0FatYYmSvQPNwV8/Eor -XlNzRi9qD0uXbVexaAgQZ3/KZuAzUbOgwJZZXEAOGkZ/J1n08jljPXdU0o7bHhNl -7siHbuEBAoGBAP53IvvJkhnH8Akf6E6sXelZkPKHnwDwfywDAiIhXza9DB1DViRS -bZUB5gzcxmLGalex5+LcwZmsqFO5NXZ8SQeE9p0YT8yJsX4J1w9JzSvsWJBS2vyW -Kbt21oG6JAGrWSGMIfxKpuahtWLf4JpGjftti0qIVQ60GKEPc1/xE2PZAoGBAN7Y -nRPaUaqcIwbnH9kovOKwZ/PWREy1ecr3YXj65VYTnwSJHD0+CJa/DX8eB/G4AoNA -Y2LPbq0Xu3+7SaUsO45VkaZuJmNwheUQ4tmyd/YdnVZ0AHXx1tvpR7QeO0WjnlNK -mR+x00fetrff2Ypahs0wtU0Xf3F8ORgVB8jnxBIhAoGAcwf0PpI+g30Im3dbEsWE -poogpiJ81HXjZ0fs3PTtD9eh9FCOTlkcxHFZR5M980TyqbX4t2tH8WpFpaNh8a/5 -a3bF7PoiiLnuDKXyHC0mnKZ42rU53VkcgGwWSAqXYFHPNwUcD+rHTBbp4kqGQ/eF -E5XPk9/RY5YyVAyiAUr/kvECgYBvW1Ua75SxqbZDI8mhbZ79tGMt0NtubZz/1KCL -oOxrGAD1dkJ7Q/1svunSpMIZgvcWeV1wqfFHY72ZNZC2jiTwmkffH9nlBPyTm92Q -JYOWo/PUmMEGLyRL3gWrtxOtV/as7nEYCndmyZ8KwTxmy5fi/z0J2f0gS5AIPbIX -LeGnoQKBgQDapjz9K4HWR5AMxyga4eiLIrmADySP846uz3eZIvTJQZ+6TAamvnno -KbnU21cGq5HBBtxqQvGswLPGW9rZAgykHHJmYBUp0xv4+I4qHfXyD7QNmvq+Vxjj -V2tgIafEpaf2ZsfM7BZeZz8MzeGcDwyrHtIO1FQiYN5Qz9Hq68XmVA== ------END RSA PRIVATE KEY----- diff -Nru nodejs-0.11.13/deps/openssl/openssl/demos/maurice/README nodejs-0.11.15/deps/openssl/openssl/demos/maurice/README --- nodejs-0.11.13/deps/openssl/openssl/demos/maurice/README 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/demos/maurice/README 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -From Maurice Gittens <mgittens@gits.nl> --- - Example programs, demonstrating some basic SSLeay crypto library - operations, to help you not to make the same mistakes I did. - - The following files are present. - - loadkeys.c Demonstrates the loading and of public and - private keys. - - loadkeys.h The interface for loadkeys.c - - example1.c Demonstrates the sealing and opening API's - - example2.c Demonstrates rsa encryption and decryption - - example3.c Demonstrates the use of symmetric block ciphers - - example4.c Demonstrates base64 and decoding - - Makefile A makefile you probably will have to adjust for - your environment - - README this file - - - The programs were written by Maurice Gittens <mgittens@gits.nl> - with the necesary help from Eric Young <eay@cryptsoft.com> - - You may do as you please with these programs, but please don't - pretend that you wrote them. - - To be complete: If you use these programs you acknowlegde that - you are aware that there is NO warranty of any kind associated - with these programs. I don't even claim that the programs work, - they are provided AS-IS. - - January 1997 - - Maurice - - diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/asn1parse.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/asn1parse.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/asn1parse.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/asn1parse.pod 2015-01-20 21:22:17.000000000 +0000 @@ -15,6 +15,8 @@ [B<-length number>] [B<-i>] [B<-oid filename>] +[B<-dump>] +[B<-dlimit num>] [B<-strparse offset>] [B<-genstr string>] [B<-genconf file>] @@ -64,6 +66,14 @@ a file containing additional OBJECT IDENTIFIERs (OIDs). The format of this file is described in the NOTES section below. +=item B<-dump> + +dump unknown data in hex format. + +=item B<-dlimit num> + +like B<-dump>, but only the first B<num> bytes are output. + =item B<-strparse offset> parse the contents octets of the ASN.1 object starting at B<offset>. This diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/ca.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/ca.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/ca.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/ca.pod 2015-01-20 21:22:17.000000000 +0000 @@ -13,6 +13,8 @@ [B<-name section>] [B<-gencrl>] [B<-revoke file>] +[B<-status serial>] +[B<-updatedb>] [B<-crl_reason reason>] [B<-crl_hold instruction>] [B<-crl_compromise time>] @@ -26,6 +28,7 @@ [B<-md arg>] [B<-policy arg>] [B<-keyfile arg>] +[B<-keyform PEM|DER>] [B<-key arg>] [B<-passin arg>] [B<-cert file>] @@ -83,7 +86,7 @@ a file containing a single Netscape signed public key and challenge and additional field values to be signed by the CA. See the B<SPKAC FORMAT> -section for information on the required format. +section for information on the required input and output format. =item B<-infiles> @@ -94,7 +97,7 @@ the output file to output certificates to. The default is standard output. The certificate details will also be printed out to this -file. +file in PEM format (except that B<-spkac> outputs DER format). =item B<-outdir directory> @@ -110,6 +113,11 @@ the private key to sign requests with. +=item B<-keyform PEM|DER> + +the format of the data in the private key file. +The default is PEM. + =item B<-key password> the password used to encrypt the private key. Since on some @@ -267,6 +275,15 @@ a filename containing a certificate to revoke. +=item B<-status serial> + +displays the revocation status of the certificate with the specified +serial number and exits. + +=item B<-updatedb> + +Updates the database index to purge expired certificates. + =item B<-crl_reason reason> revocation reason, where B<reason> is one of: B<unspecified>, B<keyCompromise>, @@ -499,6 +516,10 @@ If you need to include the same component twice then it can be preceded by a number and a '.'. +When processing SPKAC format, the output is DER if the B<-out> +flag is used, but PEM format if sending to stdout or the B<-outdir> +flag is used. + =head1 EXAMPLES Note: these examples assume that the B<ca> directory structure is diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/ciphers.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/ciphers.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/ciphers.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/ciphers.pod 2015-01-20 21:22:17.000000000 +0000 @@ -36,7 +36,7 @@ =item B<-V> -Like B<-V>, but include cipher suite codes in output (hex format). +Like B<-v>, but include cipher suite codes in output (hex format). =item B<-ssl3> @@ -116,8 +116,8 @@ =item B<COMPLEMENTOFDEFAULT> the ciphers included in B<ALL>, but not enabled by default. Currently -this is B<ADH>. Note that this rule does not cover B<eNULL>, which is -not included by B<ALL> (use B<COMPLEMENTOFALL> if necessary). +this is B<ADH> and B<AECDH>. Note that this rule does not cover B<eNULL>, +which is not included by B<ALL> (use B<COMPLEMENTOFALL> if necessary). =item B<ALL> @@ -165,21 +165,58 @@ =item B<aNULL> the cipher suites offering no authentication. This is currently the anonymous -DH algorithms. These cipher suites are vulnerable to a "man in the middle" -attack and so their use is normally discouraged. +DH algorithms and anonymous ECDH algorithms. These cipher suites are vulnerable +to a "man in the middle" attack and so their use is normally discouraged. =item B<kRSA>, B<RSA> cipher suites using RSA key exchange. +=item B<kDHr>, B<kDHd>, B<kDH> + +cipher suites using DH key agreement and DH certificates signed by CAs with RSA +and DSS keys or either respectively. Not implemented. + =item B<kEDH> -cipher suites using ephemeral DH key agreement. +cipher suites using ephemeral DH key agreement, including anonymous cipher +suites. -=item B<kDHr>, B<kDHd> +=item B<EDH> -cipher suites using DH key agreement and DH certificates signed by CAs with RSA -and DSS keys respectively. Not implemented. +cipher suites using authenticated ephemeral DH key agreement. + +=item B<ADH> + +anonymous DH cipher suites, note that this does not include anonymous Elliptic +Curve DH (ECDH) cipher suites. + +=item B<DH> + +cipher suites using DH, including anonymous DH, ephemeral DH and fixed DH. + +=item B<kECDHr>, B<kECDHe>, B<kECDH> + +cipher suites using fixed ECDH key agreement signed by CAs with RSA and ECDSA +keys or either respectively. + +=item B<kEECDH> + +cipher suites using ephemeral ECDH key agreement, including anonymous +cipher suites. + +=item B<EECDHE> + +cipher suites using authenticated ephemeral ECDH key agreement. + +=item B<AECDH> + +anonymous Elliptic Curve Diffie Hellman cipher suites. + +=item B<ECDH> + +cipher suites using ECDH key exchange, including anonymous, ephemeral and +fixed ECDH. =item B<aRSA> @@ -194,30 +231,39 @@ cipher suites effectively using DH authentication, i.e. the certificates carry DH keys. Not implemented. +=item B<aECDH> + +cipher suites effectively using ECDH authentication, i.e. the certificates +carry ECDH keys. + +=item B<aECDSA>, B<ECDSA> + +cipher suites using ECDSA authentication, i.e. the certificates carry ECDSA +keys. + =item B<kFZA>, B<aFZA>, B<eFZA>, B<FZA> ciphers suites using FORTEZZA key exchange, authentication, encryption or all FORTEZZA algorithms. Not implemented. -=item B<TLSv1>, B<SSLv3>, B<SSLv2> - -TLS v1.0, SSL v3.0 or SSL v2.0 cipher suites respectively. +=item B<TLSv1.2>, B<TLSv1>, B<SSLv3>, B<SSLv2> -=item B<DH> +TLS v1.2, TLS v1.0, SSL v3.0 or SSL v2.0 cipher suites respectively. Note: +there are no ciphersuites specific to TLS v1.1. -cipher suites using DH, including anonymous DH. +=item B<AES128>, B<AES256>, B<AES> -=item B<ADH> +cipher suites using 128 bit AES, 256 bit AES or either 128 or 256 bit AES. -anonymous DH cipher suites. +=item B<AESGCM> -=item B<AES> +AES in Galois Counter Mode (GCM): these ciphersuites are only supported +in TLS v1.2. -cipher suites using AES. +=item B<CAMELLIA128>, B<CAMELLIA256>, B<CAMELLIA> -=item B<CAMELLIA> - -cipher suites using Camellia. +cipher suites using 128 bit CAMELLIA, 256 bit CAMELLIA or either 128 or 256 bit +CAMELLIA. =item B<3DES> @@ -251,6 +297,10 @@ cipher suites using SHA1. +=item B<SHA256>, B<SHA384> + +ciphersuites using SHA256 or SHA384. + =item B<aGOST> cipher suites using GOST R 34.10 (either 2001 or 94) for authenticaction @@ -277,6 +327,9 @@ cipher suites using GOST 28147-89 MAC B<instead of> HMAC. +=item B<PSK> + +cipher suites using pre-shared keys (PSK). =back @@ -423,7 +476,100 @@ TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA EXP1024-DHE-DSS-RC4-SHA TLS_DHE_DSS_WITH_RC4_128_SHA DHE-DSS-RC4-SHA -=head2 SSL v2.0 cipher suites. +=head2 Elliptic curve cipher suites. + + TLS_ECDH_RSA_WITH_NULL_SHA ECDH-RSA-NULL-SHA + TLS_ECDH_RSA_WITH_RC4_128_SHA ECDH-RSA-RC4-SHA + TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA ECDH-RSA-DES-CBC3-SHA + TLS_ECDH_RSA_WITH_AES_128_CBC_SHA ECDH-RSA-AES128-SHA + TLS_ECDH_RSA_WITH_AES_256_CBC_SHA ECDH-RSA-AES256-SHA + + TLS_ECDH_ECDSA_WITH_NULL_SHA ECDH-ECDSA-NULL-SHA + TLS_ECDH_ECDSA_WITH_RC4_128_SHA ECDH-ECDSA-RC4-SHA + TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA ECDH-ECDSA-DES-CBC3-SHA + TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA ECDH-ECDSA-AES128-SHA + TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA ECDH-ECDSA-AES256-SHA + + TLS_ECDHE_RSA_WITH_NULL_SHA ECDHE-RSA-NULL-SHA + TLS_ECDHE_RSA_WITH_RC4_128_SHA ECDHE-RSA-RC4-SHA + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA ECDHE-RSA-DES-CBC3-SHA + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA ECDHE-RSA-AES128-SHA + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA ECDHE-RSA-AES256-SHA + + TLS_ECDHE_ECDSA_WITH_NULL_SHA ECDHE-ECDSA-NULL-SHA + TLS_ECDHE_ECDSA_WITH_RC4_128_SHA ECDHE-ECDSA-RC4-SHA + TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA ECDHE-ECDSA-DES-CBC3-SHA + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA ECDHE-ECDSA-AES128-SHA + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA ECDHE-ECDSA-AES256-SHA + + TLS_ECDH_anon_WITH_NULL_SHA AECDH-NULL-SHA + TLS_ECDH_anon_WITH_RC4_128_SHA AECDH-RC4-SHA + TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA AECDH-DES-CBC3-SHA + TLS_ECDH_anon_WITH_AES_128_CBC_SHA AECDH-AES128-SHA + TLS_ECDH_anon_WITH_AES_256_CBC_SHA AECDH-AES256-SHA + +=head2 TLS v1.2 cipher suites + + TLS_RSA_WITH_NULL_SHA256 NULL-SHA256 + + TLS_RSA_WITH_AES_128_CBC_SHA256 AES128-SHA256 + TLS_RSA_WITH_AES_256_CBC_SHA256 AES256-SHA256 + TLS_RSA_WITH_AES_128_GCM_SHA256 AES128-GCM-SHA256 + TLS_RSA_WITH_AES_256_GCM_SHA384 AES256-GCM-SHA384 + + TLS_DH_RSA_WITH_AES_128_CBC_SHA256 Not implemented. + TLS_DH_RSA_WITH_AES_256_CBC_SHA256 Not implemented. + TLS_DH_RSA_WITH_AES_128_GCM_SHA256 Not implemented. + TLS_DH_RSA_WITH_AES_256_GCM_SHA384 Not implemented. + + TLS_DH_DSS_WITH_AES_128_CBC_SHA256 Not implemented. + TLS_DH_DSS_WITH_AES_256_CBC_SHA256 Not implemented. + TLS_DH_DSS_WITH_AES_128_GCM_SHA256 Not implemented. + TLS_DH_DSS_WITH_AES_256_GCM_SHA384 Not implemented. + + TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 DHE-RSA-AES128-SHA256 + TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 DHE-RSA-AES256-SHA256 + TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 DHE-RSA-AES128-GCM-SHA256 + TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 DHE-RSA-AES256-GCM-SHA384 + + TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 DHE-DSS-AES128-SHA256 + TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 DHE-DSS-AES256-SHA256 + TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 DHE-DSS-AES128-GCM-SHA256 + TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 DHE-DSS-AES256-GCM-SHA384 + + TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 ECDH-RSA-AES128-SHA256 + TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 ECDH-RSA-AES256-SHA384 + TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 ECDH-RSA-AES128-GCM-SHA256 + TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 ECDH-RSA-AES256-GCM-SHA384 + + TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 ECDH-ECDSA-AES128-SHA256 + TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 ECDH-ECDSA-AES256-SHA384 + TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 ECDH-ECDSA-AES128-GCM-SHA256 + TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 ECDH-ECDSA-AES256-GCM-SHA384 + + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 ECDHE-RSA-AES128-SHA256 + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 ECDHE-RSA-AES256-SHA384 + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ECDHE-RSA-AES128-GCM-SHA256 + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ECDHE-RSA-AES256-GCM-SHA384 + + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 ECDHE-ECDSA-AES128-SHA256 + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 ECDHE-ECDSA-AES256-SHA384 + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 ECDHE-ECDSA-AES128-GCM-SHA256 + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ECDHE-ECDSA-AES256-GCM-SHA384 + + TLS_DH_anon_WITH_AES_128_CBC_SHA256 ADH-AES128-SHA256 + TLS_DH_anon_WITH_AES_256_CBC_SHA256 ADH-AES256-SHA256 + TLS_DH_anon_WITH_AES_128_GCM_SHA256 ADH-AES128-GCM-SHA256 + TLS_DH_anon_WITH_AES_256_GCM_SHA384 ADH-AES256-GCM-SHA384 + +=head2 Pre shared keying (PSK) cipheruites + + TLS_PSK_WITH_RC4_128_SHA PSK-RC4-SHA + TLS_PSK_WITH_3DES_EDE_CBC_SHA PSK-3DES-EDE-CBC-SHA + TLS_PSK_WITH_AES_128_CBC_SHA PSK-AES128-CBC-SHA + TLS_PSK_WITH_AES_256_CBC_SHA PSK-AES256-CBC-SHA + +=head2 Deprecated SSL v2.0 cipher suites. SSL_CK_RC4_128_WITH_MD5 RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 EXP-RC4-MD5 @@ -452,6 +598,11 @@ openssl ciphers -v 'ALL:!ADH:@STRENGTH' +Include all ciphers except ones with no encryption (eNULL) or no +authentication (aNULL): + + openssl ciphers -v 'ALL:!aNULL' + Include only 3DES ciphers and then place RSA ciphers last: openssl ciphers -v '3DES:+RSA' diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/cms.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/cms.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/cms.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/cms.pod 2015-01-20 21:22:17.000000000 +0000 @@ -90,6 +90,11 @@ encrypted mail message in MIME format for the input file. The decrypted mail is written to the output file. +=item B<-debug_decrypt> + +this option sets the B<CMS_DEBUG_DECRYPT> flag. This option should be used +with caution: see the notes section below. + =item B<-sign> sign mail using the supplied certificate and private key. Input file is @@ -138,7 +143,7 @@ =item B<-EncryptedData_encrypt> -Encrypt suppled content using supplied symmetric key and algorithm using a CMS +Encrypt content using supplied symmetric key and algorithm using a CMS B<EncrytedData> type and output the content. =item B<-sign_receipt> @@ -446,32 +451,42 @@ since the content is no longer part of the CMS structure the encoding remains DER. +If the B<-decrypt> option is used without a recipient certificate then an +attempt is made to locate the recipient by trying each potential recipient +in turn using the supplied private key. To thwart the MMA attack +(Bleichenbacher's attack on PKCS #1 v1.5 RSA padding) all recipients are +tried whether they succeed or not and if no recipients match the message +is "decrypted" using a random key which will typically output garbage. +The B<-debug_decrypt> option can be used to disable the MMA attack protection +and return an error if no recipient can be found: this option should be used +with caution. For a fuller description see L<CMS_decrypt(3)|CMS_decrypt(3)>). + =head1 EXIT CODES =over 4 -=item 0 +=item Z<>0 the operation was completely successfully. -=item 1 +=item Z<>1 an error occurred parsing the command options. -=item 2 +=item Z<>2 one of the input files could not be read. -=item 3 +=item Z<>3 an error occurred creating the CMS file or when reading the MIME message. -=item 4 +=item Z<>4 an error occurred decrypting or verifying the message. -=item 5 +=item Z<>5 the message was verified correctly but an error occurred writing out the signers certificates. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/c_rehash.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/c_rehash.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/c_rehash.pod 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/c_rehash.pod 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,81 @@ +=pod + +=for comment +Original text by James Westby, contributed under the OpenSSL license. + +=head1 NAME + +c_rehash - Create symbolic links to files named by the hash values + +=head1 SYNOPSIS + +B<c_rehash> +[ I<directory>...] + +=head1 DESCRIPTION + +B<c_rehash> scans directories and calculates a hash value of each C<.pem> +file in the specified directory list and creates symbolic links +for each file, where the name of the link is the hash value. +This utility is useful as many programs that use OpenSSL require +directories to be set up like this in order to find certificates. + +If any directories are named on the command line, then those are +processed in turn. If not, then the B<SSL_CERT_DIR> environment variable +is consulted; this shold be a colon-separated list of directories, +like the Unix B<PATH> variable. +If that is not set then the default directory (installation-specific +but often B</usr/local/ssl/certs>) is processed. + +In order for a directory to be processed, the user must have write +permissions on that directory, otherwise it will be skipped. +The links created are of the form C<HHHHHHHH.D>, where each B<H> +is a hexadecimal character and B<D> is a single decimal digit. +When processing a directory, B<c_rehash> will first remove all links +that have a name in that syntax. If you have links in that format +used for other purposes, they will be removed. +Hashes for CRL's look similar except the letter B<r> appears after +the period, like this: C<HHHHHHHH.rD>. + +Multiple objects may have the same hash; they will be indicated by +incrementing the B<D> value. Duplicates are found by comparing the +full SHA-1 fingerprint. A warning will be displayed if a duplicate +is found. + +A warning will also be displayed if there are B<.pem> files that +cannot be parsed as either a certificate or a CRL. + +The program uses the B<openssl> program to compute the hashes and +fingerprints. If not found in the user's B<PATH>, then set the +B<OPENSSL> environment variable to the full pathname. +Any program can be used, it will be invoked as follows for either +a certificate or CRL: + + $OPENSSL x509 -hash -fingerprint -noout -in FFFFFF + $OPENSSL crl -hash -fingerprint -noout -in FFFFFF + +where B<FFFFFF> is the filename. It must output the hash of the +file on the first line, and the fingerprint on the second, +optionally prefixed with some text and an equals sign. + +=head1 ENVIRONMENT + +=over + +=item B<OPENSSL> + +The path to an executable to use to generate hashes and +fingerprints (see above). + +=item B<SSL_CERT_DIR> + +Colon separated list of directories to operate on. +Ignored if directories are listed on the command line. + +=back + +=head1 SEE ALSO + +L<openssl(1)|openssl(1)>, +L<crl(1)|crl(1)>. +L<x509(1)|x509(1)>. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/crl.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/crl.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/crl.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/crl.pod 2015-01-20 21:22:17.000000000 +0000 @@ -12,6 +12,7 @@ [B<-text>] [B<-in filename>] [B<-out filename>] +[B<-nameopt option>] [B<-noout>] [B<-hash>] [B<-issuer>] @@ -53,6 +54,11 @@ print out the CRL in text form. +=item B<-nameopt option> + +option which determines how the subject or issuer names are displayed. See +the description of B<-nameopt> in L<x509(1)|x509(1)>. + =item B<-noout> don't output the encoded version of the CRL. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/dgst.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/dgst.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/dgst.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/dgst.pod 2015-01-20 21:22:17.000000000 +0000 @@ -2,16 +2,17 @@ =head1 NAME -dgst, md5, md4, md2, sha1, sha, mdc2, ripemd160 - message digests +dgst, sha, sha1, mdc2, ripemd160, sha224, sha256, sha384, sha512, md2, md4, md5, dss1 - message digests =head1 SYNOPSIS B<openssl> B<dgst> -[B<-md5|-md4|-md2|-sha1|-sha|-mdc2|-ripemd160|-dss1>] +[B<-sha|-sha1|-mdc2|-ripemd160|-sha224|-sha256|-sha384|-sha512|-md2|-md4|-md5|-dss1>] [B<-c>] [B<-d>] [B<-hex>] [B<-binary>] +[B<-r>] [B<-out filename>] [B<-sign filename>] [B<-keyform arg>] @@ -20,17 +21,19 @@ [B<-prverify filename>] [B<-signature filename>] [B<-hmac key>] +[B<-non-fips-allow>] +[B<-fips-fingerprint>] [B<file...>] -[B<md5|md4|md2|sha1|sha|mdc2|ripemd160>] -[B<-c>] -[B<-d>] -[B<file...>] +B<openssl> +[I<digest>] +[B<...>] =head1 DESCRIPTION The digest functions output the message digest of a supplied file or files -in hexadecimal form. They can also be used for digital signing and verification. +in hexadecimal. The digest functions also generate and verify digital +signatures using message digests. =head1 OPTIONS @@ -48,12 +51,17 @@ =item B<-hex> digest is to be output as a hex dump. This is the default case for a "normal" -digest as opposed to a digital signature. +digest as opposed to a digital signature. See NOTES below for digital +signatures using B<-hex>. =item B<-binary> output the digest or signature in binary form. +=item B<-r> + +output the digest in the "coreutils" format used by programs like B<sha1sum>. + =item B<-out filename> filename to output to, or standard output by default. @@ -64,8 +72,8 @@ =item B<-keyform arg> -Specifies the key format to sign digest with. Only PEM and ENGINE -formats are supported by the B<dgst> command. +Specifies the key format to sign digest with. The DER, PEM, P12, +and ENGINE formats are supported. =item B<-engine id> @@ -117,7 +125,7 @@ =over 8 =item B<key:string> - + Specifies MAC key as alphnumeric string (use if key contain printable characters only). String length must conform to any restrictions of the MAC algorithm for example exactly 32 chars for gost-mac. @@ -138,6 +146,15 @@ The separator is B<;> for MS-Windows, B<,> for OpenVMS, and B<:> for all others. +=item B<-non-fips-allow> + +enable use of non-FIPS algorithms such as MD5 even in FIPS mode. + +=item B<-fips-fingerprint> + +compute HMAC using a specific key +for certain OpenSSL-FIPS operations. + =item B<file...> file or files to digest. If no files are specified then standard input is @@ -145,18 +162,41 @@ =back + +=head1 EXAMPLES + +To create a hex-encoded message digest of a file: + openssl dgst -md5 -hex file.txt + +To sign a file using SHA-256 with binary file output: + openssl dgst -sha256 -sign privatekey.pem -out signature.sign file.txt + +To verify a signature: + openssl dgst -sha256 -verify publickey.pem \ + -signature signature.sign \ + file.txt + + =head1 NOTES The digest of choice for all new applications is SHA1. Other digests are however still widely used. -If you wish to sign or verify data using the DSA algorithm then the dss1 -digest must be used. +When signing a file, B<dgst> will automatically determine the algorithm +(RSA, ECC, etc) to use for signing based on the private key's ASN.1 info. +When verifying signatures, it only handles the RSA, DSA, or ECDSA signature +itself, not the related data to identify the signer and algorithm used in +formats such as x.509, CMS, and S/MIME. A source of random numbers is required for certain signing algorithms, in -particular DSA. +particular ECDSA and DSA. The signing and verify options should only be used if a single file is being signed or verified. +Hex signatures cannot be verified using B<openssl>. Instead, use "xxd -r" +or similar program to transform the hex signature into a binary signature +prior to verification. + + =cut diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/dhparam.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/dhparam.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/dhparam.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/dhparam.pod 2015-01-20 21:22:17.000000000 +0000 @@ -12,6 +12,7 @@ [B<-in> I<filename>] [B<-out> I<filename>] [B<-dsaparam>] +[B<-check>] [B<-noout>] [B<-text>] [B<-C>] @@ -64,6 +65,10 @@ parameters, a fresh DH key should be created for each use to avoid small-subgroup attacks that may be possible otherwise. +=item B<-check> + +check if the parameters are valid primes and generator. + =item B<-2>, B<-5> The generator to use, either 2 or 5. 2 is the default. If present then the diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/dsa.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/dsa.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/dsa.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/dsa.pod 2015-01-20 21:22:17.000000000 +0000 @@ -13,6 +13,12 @@ [B<-passin arg>] [B<-out filename>] [B<-passout arg>] +[B<-aes128>] +[B<-aes192>] +[B<-aes256>] +[B<-camellia128>] +[B<-camellia192>] +[B<-camellia256>] [B<-des>] [B<-des3>] [B<-idea>] @@ -74,10 +80,10 @@ the output file password source. For more information about the format of B<arg> see the B<PASS PHRASE ARGUMENTS> section in L<openssl(1)|openssl(1)>. -=item B<-des|-des3|-idea> +=item B<-aes128|-aes192|-aes256|-camellia128|-camellia192|-camellia256|-des|-des3|-idea> -These options encrypt the private key with the DES, triple DES, or the -IDEA ciphers respectively before outputting it. A pass phrase is prompted for. +These options encrypt the private key with the specified +cipher before outputting it. A pass phrase is prompted for. If none of these options is specified the key is written in plain text. This means that using the B<dsa> utility to read in an encrypted key with no encryption option can be used to remove the pass phrase from a key, or by diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/ecparam.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/ecparam.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/ecparam.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/ecparam.pod 2015-01-20 21:22:17.000000000 +0000 @@ -16,7 +16,7 @@ [B<-C>] [B<-check>] [B<-name arg>] -[B<-list_curve>] +[B<-list_curves>] [B<-conv_form arg>] [B<-param_enc arg>] [B<-no_seed>] diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/enc.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/enc.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/enc.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/enc.pod 2015-01-20 21:22:17.000000000 +0000 @@ -215,6 +215,10 @@ list of ciphers, supported by your versesion of OpenSSL, including ones provided by configured engines. +The B<enc> program does not support authenticated encryption modes +like CCM and GCM. The utility does not store or retrieve the +authentication tag. + base64 Base 64 diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/gendsa.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/gendsa.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/gendsa.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/gendsa.pod 2015-01-20 21:22:17.000000000 +0000 @@ -8,6 +8,12 @@ B<openssl> B<gendsa> [B<-out filename>] +[B<-aes128>] +[B<-aes192>] +[B<-aes256>] +[B<-camellia128>] +[B<-camellia192>] +[B<-camellia256>] [B<-des>] [B<-des3>] [B<-idea>] @@ -24,10 +30,10 @@ =over 4 -=item B<-des|-des3|-idea> +=item B<-aes128|-aes192|-aes256|-camellia128|-camellia192|-camellia256|-des|-des3|-idea> -These options encrypt the private key with the DES, triple DES, or the -IDEA ciphers respectively before outputting it. A pass phrase is prompted for. +These options encrypt the private key with specified +cipher before outputting it. A pass phrase is prompted for. If none of these options is specified no encryption is used. =item B<-rand file(s)> diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/genrsa.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/genrsa.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/genrsa.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/genrsa.pod 2015-01-20 21:22:17.000000000 +0000 @@ -9,6 +9,18 @@ B<openssl> B<genrsa> [B<-out filename>] [B<-passout arg>] +[B<-aes128>] +[B<-aes128>] +[B<-aes192>] +[B<-aes256>] +[B<-camellia128>] +[B<-camellia192>] +[B<-camellia256>] +[B<-aes192>] +[B<-aes256>] +[B<-camellia128>] +[B<-camellia192>] +[B<-camellia256>] [B<-des>] [B<-des3>] [B<-idea>] @@ -36,10 +48,10 @@ the output file password source. For more information about the format of B<arg> see the B<PASS PHRASE ARGUMENTS> section in L<openssl(1)|openssl(1)>. -=item B<-des|-des3|-idea> +=item B<-aes128|-aes192|-aes256|-camellia128|-camellia192|-camellia256|-des|-des3|-idea> -These options encrypt the private key with the DES, triple DES, or the -IDEA ciphers respectively before outputting it. If none of these options is +These options encrypt the private key with specified +cipher before outputting it. If none of these options is specified no encryption is used. If encryption is used a pass phrase is prompted for if it is not supplied via the B<-passout> argument. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/rsa.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/rsa.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/rsa.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/rsa.pod 2015-01-20 21:22:17.000000000 +0000 @@ -15,6 +15,12 @@ [B<-out filename>] [B<-passout arg>] [B<-sgckey>] +[B<-aes128>] +[B<-aes192>] +[B<-aes256>] +[B<-camellia128>] +[B<-camellia192>] +[B<-camellia256>] [B<-des>] [B<-des3>] [B<-idea>] @@ -82,10 +88,10 @@ use the modified NET algorithm used with some versions of Microsoft IIS and SGC keys. -=item B<-des|-des3|-idea> +=item B<-aes128|-aes192|-aes256|-camellia128|-camellia192|-camellia256|-des|-des3|-idea> -These options encrypt the private key with the DES, triple DES, or the -IDEA ciphers respectively before outputting it. A pass phrase is prompted for. +These options encrypt the private key with the specified +cipher before outputting it. A pass phrase is prompted for. If none of these options is specified the key is written in plain text. This means that using the B<rsa> utility to read in an encrypted key with no encryption option can be used to remove the pass phrase from a key, or by diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/s_client.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/s_client.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/s_client.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/s_client.pod 2015-01-20 21:22:17.000000000 +0000 @@ -9,6 +9,7 @@ B<openssl> B<s_client> [B<-connect host:port>] +[B<-servername name>] [B<-verify depth>] [B<-verify_return_error>] [B<-cert filename>] @@ -28,6 +29,7 @@ [B<-nbio>] [B<-crlf>] [B<-ign_eof>] +[B<-no_ign_eof>] [B<-quiet>] [B<-ssl2>] [B<-ssl3>] @@ -37,6 +39,7 @@ [B<-no_tls1>] [B<-bugs>] [B<-cipher cipherlist>] +[B<-serverpref>] [B<-starttls protocol>] [B<-engine id>] [B<-tlsextdebug>] @@ -44,6 +47,8 @@ [B<-sess_out filename>] [B<-sess_in filename>] [B<-rand file(s)>] +[B<-status>] +[B<-nextprotoneg protocols>] =head1 DESCRIPTION @@ -60,6 +65,10 @@ This specifies the host and optional port to connect to. If not specified then an attempt is made to connect to the local host on port 4433. +=item B<-servername name> + +Set the TLS SNI (Server Name Indication) extension in the ClientHello message. + =item B<-cert certname> The certificate to use, if one is requested by the server. The default is @@ -172,6 +181,11 @@ inhibit printing of session and certificate information. This implicitly turns on B<-ign_eof> as well. +=item B<-no_ign_eof> + +shut down the connection when end of file is reached in the input. +Can be used to override the implicit B<-ign_eof> after B<-quiet>. + =item B<-psk_identity identity> Use the PSK identity B<identity> when using a PSK cipher suite. @@ -205,6 +219,10 @@ supported cipher in the list sent by the client. See the B<ciphers> command for more information. +=item B<-serverpref> + +use the server's cipher preferences; only used for SSLV2. + =item B<-starttls protocol> send the protocol-specific message(s) to switch to TLS for communication. @@ -243,6 +261,22 @@ The separator is B<;> for MS-Windows, B<,> for OpenVMS, and B<:> for all others. +=item B<-status> + +sends a certificate status request to the server (OCSP stapling). The server +response (if any) is printed out. + +=item B<-nextprotoneg protocols> + +enable Next Protocol Negotiation TLS extension and provide a list of +comma-separated protocol names that the client should advertise +support for. The list should contain most wanted protocols first. +Protocol names are printable ASCII strings, for example "http/1.1" or +"spdy/3". +Empty list of protocols is treated specially and will cause the client to +advertise support for the TLS extension but disconnect just after +reciving ServerHello with a list of server supported protocols. + =back =head1 CONNECTED COMMANDS diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/smime.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/smime.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/smime.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/smime.pod 2015-01-20 21:22:17.000000000 +0000 @@ -159,7 +159,7 @@ example B<-aes_128_cbc>. See L<B<enc>|enc(1)> for list of ciphers supported by your version of OpenSSL. -If not specified 40 bit RC2 is used. Only used with B<-encrypt>. +If not specified triple DES is used. Only used with B<-encrypt>. =item B<-nointern> @@ -308,28 +308,28 @@ =over 4 -=item 0 +=item Z<>0 the operation was completely successfully. -=item 1 +=item Z<>1 an error occurred parsing the command options. -=item 2 +=item Z<>2 one of the input files could not be read. -=item 3 +=item Z<>3 an error occurred creating the PKCS#7 file or when reading the MIME message. -=item 4 +=item Z<>4 an error occurred decrypting or verifying the message. -=item 5 +=item Z<>5 the message was verified correctly but an error occurred writing out the signers certificates. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/s_server.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/s_server.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/s_server.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/s_server.pod 2015-01-20 21:22:17.000000000 +0000 @@ -35,6 +35,7 @@ [B<-CAfile filename>] [B<-nocert>] [B<-cipher cipherlist>] +[B<-serverpref>] [B<-quiet>] [B<-no_tmp_rsa>] [B<-ssl2>] @@ -44,6 +45,7 @@ [B<-no_ssl3>] [B<-no_tls1>] [B<-no_dhe>] +[B<-no_ecdhe>] [B<-bugs>] [B<-hack>] [B<-www>] @@ -54,6 +56,11 @@ [B<-no_ticket>] [B<-id_prefix arg>] [B<-rand file(s)>] +[B<-status>] +[B<-status_verbose>] +[B<-status_timeout nsec>] +[B<-status_url url>] +[B<-nextprotoneg protocols>] =head1 DESCRIPTION @@ -131,6 +138,11 @@ if this option is set then no DH parameters will be loaded effectively disabling the ephemeral DH cipher suites. +=item B<-no_ecdhe> + +if this option is set then no ECDH parameters will be loaded effectively +disabling the ephemeral ECDH cipher suites. + =item B<-no_tmp_rsa> certain export cipher suites sometimes use a temporary RSA key, this option @@ -144,6 +156,9 @@ client does not have to send one, with the B<-Verify> option the client must supply a certificate or an error occurs. +If the ciphersuite cannot request a client certificate (for example an +anonymous ciphersuite or PSK) this option has no effect. + =item B<-crl_check>, B<-crl_check_all> Check the peer certificate has not been revoked by its CA. @@ -225,6 +240,10 @@ the preference order, the order of the server cipherlist irrelevant. See the B<ciphers> command for more information. +=item B<-serverpref> + +use the server's cipher preferences, rather than the client's preferences. + =item B<-tlsextdebug> print out a hex dump of any TLS extensions received from the server. @@ -276,6 +295,33 @@ The separator is B<;> for MS-Windows, B<,> for OpenVMS, and B<:> for all others. +=item B<-status> + +enables certificate status request support (aka OCSP stapling). + +=item B<-status_verbose> + +enables certificate status request support (aka OCSP stapling) and gives +a verbose printout of the OCSP response. + +=item B<-status_timeout nsec> + +sets the timeout for OCSP response to B<nsec> seconds. + +=item B<-status_url url> + +sets a fallback responder URL to use if no responder URL is present in the +server certificate. Without this option an error is returned if the server +certificate does not contain a responder address. + +=item B<-nextprotoneg protocols> + +enable Next Protocol Negotiation TLS extension and provide a +comma-separated list of supported protocol names. +The list should contain most wanted protocols first. +Protocol names are printable ASCII strings, for example "http/1.1" or +"spdy/3". + =back =head1 CONNECTED COMMANDS diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/verify.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/verify.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/verify.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/verify.pod 2015-01-20 21:22:17.000000000 +0000 @@ -25,6 +25,7 @@ [B<-untrusted file>] [B<-help>] [B<-issuer_checks>] +[B<-attime timestamp>] [B<-verbose>] [B<->] [certificates] @@ -47,7 +48,6 @@ create symbolic links to a directory of certificates. =item B<-CAfile file> - A file of trusted certificates. The file should contain multiple certificates in PEM format concatenated together. @@ -80,6 +80,12 @@ anything is wrong; during the normal verification process, several rejections may take place. +=item B<-attime timestamp> + +Perform validation checks using time specified by B<timestamp> and not +current system time. B<timestamp> is the number of seconds since +01.01.1970 (UNIX time). + =item B<-policy arg> Enable policy processing and add B<arg> to the user-initial-policy-set (see @@ -386,7 +392,7 @@ =head1 BUGS -Although the issuer checks are a considerably improvement over the old technique they still +Although the issuer checks are a considerable improvement over the old technique they still suffer from limitations in the underlying X509_LOOKUP API. One consequence of this is that trusted certificates with matching subject name must either appear in a file (as specified by the B<-CAfile> option) or a directory (as specified by B<-CApath>. If they occur in both then only diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/version.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/version.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/version.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/version.pod 2015-01-20 21:22:17.000000000 +0000 @@ -13,6 +13,7 @@ [B<-o>] [B<-f>] [B<-p>] +[B<-d>] =head1 DESCRIPTION @@ -38,7 +39,7 @@ option information: various options set when the library was built. -=item B<-c> +=item B<-f> compilation flags. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/x509.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/x509.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/x509.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/x509.pod 2015-01-20 21:22:17.000000000 +0000 @@ -19,6 +19,7 @@ [B<-hash>] [B<-subject_hash>] [B<-issuer_hash>] +[B<-ocspid>] [B<-subject>] [B<-issuer>] [B<-nameopt option>] @@ -28,6 +29,7 @@ [B<-enddate>] [B<-purpose>] [B<-dates>] +[B<-checkend num>] [B<-modulus>] [B<-pubkey>] [B<-fingerprint>] @@ -42,6 +44,7 @@ [B<-days arg>] [B<-set_serial n>] [B<-signkey filename>] +[B<-passin arg>] [B<-x509toreq>] [B<-req>] [B<-CA filename>] @@ -49,6 +52,7 @@ [B<-CAcreateserial>] [B<-CAserial filename>] [B<-text>] +[B<-certopt option>] [B<-C>] [B<-md2|-md5|-sha1|-mdc2>] [B<-clrext>] @@ -159,6 +163,10 @@ outputs the "hash" of the certificate issuer name. +=item B<-ocspid> + +outputs the OCSP hash values for the subject name and public key. + =item B<-hash> synonym for "-subject_hash" for backward compatibility reasons. @@ -208,6 +216,11 @@ prints out the start and expiry dates of a certificate. +=item B<-checkend arg> + +checks if the certificate expires within the next B<arg> seconds and exits +non-zero if yes it will expire or zero if not. + =item B<-fingerprint> prints out the digest of the DER encoded version of the whole certificate @@ -313,6 +326,11 @@ is created using the supplied private key using the subject name in the request. +=item B<-passin arg> + +the key password source. For more information about the format of B<arg> +see the B<PASS PHRASE ARGUMENTS> section in L<openssl(1)|openssl(1)>. + =item B<-clrext> delete any extensions from a certificate. This option is used when a @@ -468,7 +486,7 @@ Also if this option is off any UTF8Strings will be converted to their character form first. -=item B<no_type> +=item B<ignore_type> this option does not attempt to interpret multibyte characters in any way. That is their content octets are merely dumped as though one octet diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/apps/x509v3_config.pod nodejs-0.11.15/deps/openssl/openssl/doc/apps/x509v3_config.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/apps/x509v3_config.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/apps/x509v3_config.pod 2015-01-20 21:22:17.000000000 +0000 @@ -174,7 +174,7 @@ The value of B<dirName> should point to a section containing the distinguished name to use as a set of name value pairs. Multi values AVAs can be formed by -preceeding the name with a B<+> character. +prefacing the name with a B<+> character. otherName can include arbitrary data associated with an OID: the value should be the OID followed by a semicolon and the content in standard @@ -301,7 +301,7 @@ O=Organisation CN=Some Name - + =head2 Certificate Policies. This is a I<raw> extension. All the fields of this extension can be set by @@ -390,7 +390,7 @@ nameConstraints=permitted;email:.somedomain.com nameConstraints=excluded;email:.com -issuingDistributionPoint = idp_section + =head2 OCSP No Check diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/ASN1_generate_nconf.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/ASN1_generate_nconf.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/ASN1_generate_nconf.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/ASN1_generate_nconf.pod 2015-01-20 21:22:17.000000000 +0000 @@ -61,7 +61,7 @@ =item B<INTEGER>, B<INT> Encodes an ASN1 B<INTEGER> type. The B<value> string represents -the value of the integer, it can be preceeded by a minus sign and +the value of the integer, it can be prefaced by a minus sign and is normally interpreted as a decimal value unless the prefix B<0x> is included. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/BIO_f_base64.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/BIO_f_base64.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/BIO_f_base64.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/BIO_f_base64.pod 2015-01-20 21:22:17.000000000 +0000 @@ -46,11 +46,11 @@ b64 = BIO_new(BIO_f_base64()); bio = BIO_new_fp(stdout, BIO_NOCLOSE); - bio = BIO_push(b64, bio); - BIO_write(bio, message, strlen(message)); - BIO_flush(bio); + BIO_push(b64, bio); + BIO_write(b64, message, strlen(message)); + BIO_flush(b64); - BIO_free_all(bio); + BIO_free_all(b64); Read Base64 encoded data from standard input and write the decoded data to standard output: @@ -62,11 +62,12 @@ b64 = BIO_new(BIO_f_base64()); bio = BIO_new_fp(stdin, BIO_NOCLOSE); bio_out = BIO_new_fp(stdout, BIO_NOCLOSE); - bio = BIO_push(b64, bio); - while((inlen = BIO_read(bio, inbuf, 512)) > 0) + BIO_push(b64, bio); + while((inlen = BIO_read(b64, inbuf, 512)) > 0) BIO_write(bio_out, inbuf, inlen); - BIO_free_all(bio); + BIO_flush(bio_out); + BIO_free_all(b64); =head1 BUGS diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/BIO_push.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/BIO_push.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/BIO_push.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/BIO_push.pod 2015-01-20 21:22:17.000000000 +0000 @@ -40,7 +40,7 @@ BIO_push(b64, f); -is made then the new chain will be B<b64-chain>. After making the calls +is made then the new chain will be B<b64-f>. After making the calls BIO_push(md2, b64); BIO_push(md1, md2); diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/BIO_s_accept.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/BIO_s_accept.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/BIO_s_accept.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/BIO_s_accept.pod 2015-01-20 21:22:17.000000000 +0000 @@ -59,8 +59,8 @@ BIO_set_accept_port() uses the string B<name> to set the accept port. The port is represented as a string of the form "host:port", where "host" is the interface to use and "port" is the port. -Either or both values can be "*" which is interpreted as meaning -any interface or port respectively. "port" has the same syntax +The host can be can be "*" which is interpreted as meaning +any interface; "port" has the same syntax as the port specified in BIO_set_conn_port() for connect BIOs, that is it can be a numerical port string or a string to lookup using getservbyname() and a string table. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/CMS_add1_signer.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/CMS_add1_signer.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/CMS_add1_signer.pod 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/CMS_add1_signer.pod 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,101 @@ +=pod + +=head1 NAME + + CMS_add1_signer, CMS_SignerInfo_sign - add a signer to a CMS_ContentInfo signed data structure. + +=head1 SYNOPSIS + + #include <openssl/cms.h> + + CMS_SignerInfo *CMS_add1_signer(CMS_ContentInfo *cms, X509 *signcert, EVP_PKEY *pkey, const EVP_MD *md, unsigned int flags); + + int CMS_SignerInfo_sign(CMS_SignerInfo *si); + + +=head1 DESCRIPTION + +CMS_add1_signer() adds a signer with certificate B<signcert> and private +key B<pkey> using message digest B<md> to CMS_ContentInfo SignedData +structure B<cms>. + +The CMS_ContentInfo structure should be obtained from an initial call to +CMS_sign() with the flag B<CMS_PARTIAL> set or in the case or re-signing a +valid CMS_ContentInfo SignedData structure. + +If the B<md> parameter is B<NULL> then the default digest for the public +key algorithm will be used. + +Unless the B<CMS_REUSE_DIGEST> flag is set the returned CMS_ContentInfo +structure is not complete and must be finalized either by streaming (if +applicable) or a call to CMS_final(). + +The CMS_SignerInfo_sign() function will explicitly sign a CMS_SignerInfo +structure, its main use is when B<CMS_REUSE_DIGEST> and B<CMS_PARTIAL> flags +are both set. + +=head1 NOTES + +The main purpose of CMS_add1_signer() is to provide finer control +over a CMS signed data structure where the simpler CMS_sign() function defaults +are not appropriate. For example if multiple signers or non default digest +algorithms are needed. New attributes can also be added using the returned +CMS_SignerInfo structure and the CMS attribute utility functions or the +CMS signed receipt request functions. + +Any of the following flags (ored together) can be passed in the B<flags> +parameter. + +If B<CMS_REUSE_DIGEST> is set then an attempt is made to copy the content +digest value from the CMS_ContentInfo structure: to add a signer to an existing +structure. An error occurs if a matching digest value cannot be found to copy. +The returned CMS_ContentInfo structure will be valid and finalized when this +flag is set. + +If B<CMS_PARTIAL> is set in addition to B<CMS_REUSE_DIGEST> then the +CMS_SignerInfo structure will not be finalized so additional attributes +can be added. In this case an explicit call to CMS_SignerInfo_sign() is +needed to finalize it. + +If B<CMS_NOCERTS> is set the signer's certificate will not be included in the +CMS_ContentInfo structure, the signer's certificate must still be supplied in +the B<signcert> parameter though. This can reduce the size of the signature if +the signers certificate can be obtained by other means: for example a +previously signed message. + +The SignedData structure includes several CMS signedAttributes including the +signing time, the CMS content type and the supported list of ciphers in an +SMIMECapabilities attribute. If B<CMS_NOATTR> is set then no signedAttributes +will be used. If B<CMS_NOSMIMECAP> is set then just the SMIMECapabilities are +omitted. + +OpenSSL will by default identify signing certificates using issuer name +and serial number. If B<CMS_USE_KEYID> is set it will use the subject key +identifier value instead. An error occurs if the signing certificate does not +have a subject key identifier extension. + +If present the SMIMECapabilities attribute indicates support for the following +algorithms in preference order: 256 bit AES, Gost R3411-94, Gost 28147-89, 192 +bit AES, 128 bit AES, triple DES, 128 bit RC2, 64 bit RC2, DES and 40 bit RC2. +If any of these algorithms is not available then it will not be included: for example the GOST algorithms will not be included if the GOST ENGINE is +not loaded. + +CMS_add1_signer() returns an internal pointer to the CMS_SignerInfo +structure just added, this can be used to set additional attributes +before it is finalized. + +=head1 RETURN VALUES + +CMS_add1_signer() returns an internal pointer to the CMS_SignerInfo +structure just added or NULL if an error occurs. + +=head1 SEE ALSO + +L<ERR_get_error(3)|ERR_get_error(3)>, L<CMS_sign(3)|CMS_sign(3)>, +L<CMS_final(3)|CMS_final(3)>, + +=head1 HISTORY + +CMS_add1_signer() was added to OpenSSL 0.9.8 + +=cut diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/CMS_decrypt.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/CMS_decrypt.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/CMS_decrypt.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/CMS_decrypt.pod 2015-01-20 21:22:17.000000000 +0000 @@ -27,7 +27,21 @@ Although the recipients certificate is not needed to decrypt the data it is needed to locate the appropriate (of possible several) recipients in the CMS -structure. If B<cert> is set to NULL all possible recipients are tried. +structure. + +If B<cert> is set to NULL all possible recipients are tried. This case however +is problematic. To thwart the MMA attack (Bleichenbacher's attack on +PKCS #1 v1.5 RSA padding) all recipients are tried whether they succeed or +not. If no recipient succeeds then a random symmetric key is used to decrypt +the content: this will typically output garbage and may (but is not guaranteed +to) ultimately return a padding error only. If CMS_decrypt() just returned an +error when all recipient encrypted keys failed to decrypt an attacker could +use this in a timing attack. If the special flag B<CMS_DEBUG_DECRYPT> is set +then the above behaviour is modified and an error B<is> returned if no +recipient encrypted key can be decrypted B<without> generating a random +content encryption key. Applications should use this flag with +B<extreme caution> especially in automated gateways as it can leave them +open to attack. It is possible to determine the correct recipient key by other means (for example looking them up in a database) and setting them in the CMS structure diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/CMS_sign_add1_signer.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/CMS_sign_add1_signer.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/CMS_sign_add1_signer.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/CMS_sign_add1_signer.pod 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -=pod - -=head1 NAME - - CMS_sign_add1_signer, CMS_SignerInfo_sign - add a signer to a CMS_ContentInfo signed data structure. - -=head1 SYNOPSIS - - #include <openssl/cms.h> - - CMS_SignerInfo *CMS_sign_add1_signer(CMS_ContentInfo *cms, X509 *signcert, EVP_PKEY *pkey, const EVP_MD *md, unsigned int flags); - - int CMS_SignerInfo_sign(CMS_SignerInfo *si); - - -=head1 DESCRIPTION - -CMS_sign_add1_signer() adds a signer with certificate B<signcert> and private -key B<pkey> using message digest B<md> to CMS_ContentInfo SignedData -structure B<cms>. - -The CMS_ContentInfo structure should be obtained from an initial call to -CMS_sign() with the flag B<CMS_PARTIAL> set or in the case or re-signing a -valid CMS_ContentInfo SignedData structure. - -If the B<md> parameter is B<NULL> then the default digest for the public -key algorithm will be used. - -Unless the B<CMS_REUSE_DIGEST> flag is set the returned CMS_ContentInfo -structure is not complete and must be finalized either by streaming (if -applicable) or a call to CMS_final(). - -The CMS_SignerInfo_sign() function will explicitly sign a CMS_SignerInfo -structure, its main use is when B<CMS_REUSE_DIGEST> and B<CMS_PARTIAL> flags -are both set. - -=head1 NOTES - -The main purpose of CMS_sign_add1_signer() is to provide finer control -over a CMS signed data structure where the simpler CMS_sign() function defaults -are not appropriate. For example if multiple signers or non default digest -algorithms are needed. New attributes can also be added using the returned -CMS_SignerInfo structure and the CMS attribute utility functions or the -CMS signed receipt request functions. - -Any of the following flags (ored together) can be passed in the B<flags> -parameter. - -If B<CMS_REUSE_DIGEST> is set then an attempt is made to copy the content -digest value from the CMS_ContentInfo structure: to add a signer to an existing -structure. An error occurs if a matching digest value cannot be found to copy. -The returned CMS_ContentInfo structure will be valid and finalized when this -flag is set. - -If B<CMS_PARTIAL> is set in addition to B<CMS_REUSE_DIGEST> then the -CMS_SignerInfo structure will not be finalized so additional attributes -can be added. In this case an explicit call to CMS_SignerInfo_sign() is -needed to finalize it. - -If B<CMS_NOCERTS> is set the signer's certificate will not be included in the -CMS_ContentInfo structure, the signer's certificate must still be supplied in -the B<signcert> parameter though. This can reduce the size of the signature if -the signers certificate can be obtained by other means: for example a -previously signed message. - -The SignedData structure includes several CMS signedAttributes including the -signing time, the CMS content type and the supported list of ciphers in an -SMIMECapabilities attribute. If B<CMS_NOATTR> is set then no signedAttributes -will be used. If B<CMS_NOSMIMECAP> is set then just the SMIMECapabilities are -omitted. - -OpenSSL will by default identify signing certificates using issuer name -and serial number. If B<CMS_USE_KEYID> is set it will use the subject key -identifier value instead. An error occurs if the signing certificate does not -have a subject key identifier extension. - -If present the SMIMECapabilities attribute indicates support for the following -algorithms in preference order: 256 bit AES, Gost R3411-94, Gost 28147-89, 192 -bit AES, 128 bit AES, triple DES, 128 bit RC2, 64 bit RC2, DES and 40 bit RC2. -If any of these algorithms is not available then it will not be included: for example the GOST algorithms will not be included if the GOST ENGINE is -not loaded. - -CMS_sign_add1_signer() returns an internal pointer to the CMS_SignerInfo -structure just added, this can be used to set additional attributes -before it is finalized. - -=head1 RETURN VALUES - -CMS_sign1_add_signers() returns an internal pointer to the CMS_SignerInfo -structure just added or NULL if an error occurs. - -=head1 SEE ALSO - -L<ERR_get_error(3)|ERR_get_error(3)>, L<CMS_sign(3)|CMS_sign(3)>, -L<CMS_final(3)|CMS_final(3)>, - -=head1 HISTORY - -CMS_sign_add1_signer() was added to OpenSSL 0.9.8 - -=cut diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/CONF_modules_free.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/CONF_modules_free.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/CONF_modules_free.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/CONF_modules_free.pod 2015-01-20 21:22:17.000000000 +0000 @@ -37,7 +37,7 @@ =head1 SEE ALSO L<conf(5)|conf(5)>, L<OPENSSL_config(3)|OPENSSL_config(3)>, -L<CONF_modules_load_file(3), CONF_modules_load_file(3)> +L<CONF_modules_load_file(3)|CONF_modules_load_file(3)> =head1 HISTORY diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/CONF_modules_load_file.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/CONF_modules_load_file.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/CONF_modules_load_file.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/CONF_modules_load_file.pod 2015-01-20 21:22:17.000000000 +0000 @@ -51,7 +51,7 @@ =head1 SEE ALSO L<conf(5)|conf(5)>, L<OPENSSL_config(3)|OPENSSL_config(3)>, -L<CONF_free(3), CONF_free(3)>, L<err(3),err(3)> +L<CONF_free(3)|CONF_free(3)>, L<err(3)|err(3)> =head1 HISTORY diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/des.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/des.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/des.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/des.pod 2015-01-20 21:22:17.000000000 +0000 @@ -135,9 +135,8 @@ DES_set_odd_parity() sets the parity of the passed I<key> to odd. -DES_is_weak_key() returns 1 is the passed key is a weak key, 0 if it -is ok. The probability that a randomly generated key is weak is -1/2^52, so it is not really worth checking for them. +DES_is_weak_key() returns 1 if the passed key is a weak key, 0 if it +is ok. The following routines mostly operate on an input and output stream of I<DES_cblock>s. @@ -181,7 +180,7 @@ DES_ede3_cbc_encrypt() implements outer triple CBC DES encryption with three keys. This means that each DES operation inside the CBC mode is -really an C<C=E(ks3,D(ks2,E(ks1,M)))>. This mode is used by SSL. +an C<C=E(ks3,D(ks2,E(ks1,M)))>. This mode is used by SSL. The DES_ede2_cbc_encrypt() macro implements two-key Triple-DES by reusing I<ks1> for the final encryption. C<C=E(ks1,D(ks2,E(ks1,M)))>. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/ERR_get_error.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/ERR_get_error.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/ERR_get_error.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/ERR_get_error.pod 2015-01-20 21:22:17.000000000 +0000 @@ -49,10 +49,10 @@ the error occurred in *B<file> and *B<line>, unless these are B<NULL>. ERR_get_error_line_data(), ERR_peek_error_line_data() and -ERR_get_last_error_line_data() store additional data and flags +ERR_peek_last_error_line_data() store additional data and flags associated with the error code in *B<data> and *B<flags>, unless these are B<NULL>. *B<data> contains a string -if *B<flags>&B<ERR_TXT_STRING> is true. +if *B<flags>&B<ERR_TXT_STRING> is true. An application B<MUST NOT> free the *B<data> pointer (or any other pointers returned by these functions) with OPENSSL_free() as freeing is handled diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/err.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/err.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/err.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/err.pod 2015-01-20 21:22:17.000000000 +0000 @@ -171,7 +171,6 @@ =head1 SEE ALSO -L<CRYPTO_set_id_callback(3)|CRYPTO_set_id_callback(3)>, L<CRYPTO_set_locking_callback(3)|CRYPTO_set_locking_callback(3)>, L<ERR_get_error(3)|ERR_get_error(3)>, L<ERR_GET_LIB(3)|ERR_GET_LIB(3)>, diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_DigestInit.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_DigestInit.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_DigestInit.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_DigestInit.pod 2015-01-20 21:22:17.000000000 +0000 @@ -26,13 +26,13 @@ int EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx); void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx); - int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out,const EVP_MD_CTX *in); + int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out,const EVP_MD_CTX *in); int EVP_DigestInit(EVP_MD_CTX *ctx, const EVP_MD *type); int EVP_DigestFinal(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s); - int EVP_MD_CTX_copy(EVP_MD_CTX *out,EVP_MD_CTX *in); + int EVP_MD_CTX_copy(EVP_MD_CTX *out,EVP_MD_CTX *in); #define EVP_MAX_MD_SIZE 64 /* SHA512 */ @@ -136,10 +136,10 @@ EVP_md2(), EVP_md5(), EVP_sha(), EVP_sha1(), EVP_sha224(), EVP_sha256(), EVP_sha384(), EVP_sha512(), EVP_mdc2() and EVP_ripemd160() return B<EVP_MD> structures for the MD2, MD5, SHA, SHA1, SHA224, SHA256, SHA384, SHA512, MDC2 -and RIPEMD160 digest algorithms respectively. +and RIPEMD160 digest algorithms respectively. EVP_dss() and EVP_dss1() return B<EVP_MD> structures for SHA and SHA1 digest -algorithms but using DSS (DSA) for the signature algorithm. Note: there is +algorithms but using DSS (DSA) for the signature algorithm. Note: there is no need to use these pseudo-digests in OpenSSL 1.0.0 and later, they are however retained for compatibility. @@ -161,9 +161,8 @@ EVP_MD_type(), EVP_MD_pkey_type() and EVP_MD_type() return the NID of the corresponding OBJECT IDENTIFIER or NID_undef if none exists. -EVP_MD_size(), EVP_MD_block_size(), EVP_MD_CTX_size(e), EVP_MD_size(), -EVP_MD_CTX_block_size() and EVP_MD_block_size() return the digest or block -size in bytes. +EVP_MD_size(), EVP_MD_block_size(), EVP_MD_CTX_size() and +EVP_MD_CTX_block_size() return the digest or block size in bytes. EVP_md_null(), EVP_md2(), EVP_md5(), EVP_sha(), EVP_sha1(), EVP_dss(), EVP_dss1(), EVP_mdc2() and EVP_ripemd160() return pointers to the @@ -178,21 +177,21 @@ preference to the low level interfaces. This is because the code then becomes transparent to the digest used and much more flexible. -New applications should use the SHA2 digest algorithms such as SHA256. +New applications should use the SHA2 digest algorithms such as SHA256. The other digest algorithms are still in common use. For most applications the B<impl> parameter to EVP_DigestInit_ex() will be set to NULL to use the default digest implementation. -The functions EVP_DigestInit(), EVP_DigestFinal() and EVP_MD_CTX_copy() are +The functions EVP_DigestInit(), EVP_DigestFinal() and EVP_MD_CTX_copy() are obsolete but are retained to maintain compatibility with existing code. New -applications should use EVP_DigestInit_ex(), EVP_DigestFinal_ex() and +applications should use EVP_DigestInit_ex(), EVP_DigestFinal_ex() and EVP_MD_CTX_copy_ex() because they can efficiently reuse a digest context instead of initializing and cleaning it up on each call and allow non default implementations of digests to be specified. In OpenSSL 0.9.7 and later if digest contexts are not cleaned up after use -memory leaks will occur. +memory leaks will occur. Stack allocation of EVP_MD_CTX structures is common, for example: @@ -246,15 +245,19 @@ EVP_MD_CTX_destroy(mdctx); printf("Digest is: "); - for(i = 0; i < md_len; i++) printf("%02x", md_value[i]); + for(i = 0; i < md_len; i++) + printf("%02x", md_value[i]); printf("\n"); + + /* Call this once before exit. */ + EVP_cleanup(); + exit(0); } =head1 SEE ALSO -L<evp(3)|evp(3)>, L<hmac(3)|hmac(3)>, L<md2(3)|md2(3)>, -L<md5(3)|md5(3)>, L<mdc2(3)|mdc2(3)>, L<ripemd(3)|ripemd(3)>, -L<sha(3)|sha(3)>, L<dgst(1)|dgst(1)> +L<dgst(1)|dgst(1)>, +L<evp(3)|evp(3)> =head1 HISTORY @@ -270,7 +273,7 @@ changed to return truely const EVP_MD * in OpenSSL 0.9.7. The link between digests and signing algorithms was fixed in OpenSSL 1.0 and -later, so now EVP_sha1() can be used with RSA and DSA, there is no need to +later, so now EVP_sha1() can be used with RSA and DSA; there is no need to use EVP_dss1() any more. OpenSSL 1.0 and later does not include the MD2 digest algorithm in the diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_DigestVerifyInit.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_DigestVerifyInit.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_DigestVerifyInit.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_DigestVerifyInit.pod 2015-01-20 21:22:17.000000000 +0000 @@ -38,7 +38,7 @@ the operation is not supported by the public key algorithm. Unlike other functions the return value 0 from EVP_DigestVerifyFinal() only -indicates that the signature did not not verify successfully (that is tbs did +indicates that the signature did not verify successfully (that is tbs did not match the original data or the signature was of invalid form) it is not an indication of a more serious error. @@ -59,7 +59,7 @@ or the operation will fail. The call to EVP_DigestVerifyFinal() internally finalizes a copy of the digest -context. This means that calls to EVP_VerifyUpdate() and EVP_VerifyFinal() can +context. This means that EVP_VerifyUpdate() and EVP_VerifyFinal() can be called later to digest and verify additional data. Since only a copy of the digest context is ever finalized the context must diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_EncryptInit.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_EncryptInit.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_EncryptInit.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_EncryptInit.pod 2015-01-20 21:22:17.000000000 +0000 @@ -344,7 +344,10 @@ Where possible the B<EVP> interface to symmetric ciphers should be used in preference to the low level interfaces. This is because the code then becomes -transparent to the cipher used and much more flexible. +transparent to the cipher used and much more flexible. Additionally, the +B<EVP> interface will ensure the use of platform specific cryptographic +acceleration such as AES-NI (the low level interfaces do not provide the +guarantee). PKCS padding works by adding B<n> padding bytes of value B<n> to make the total length of the encrypted data a multiple of the block size. Padding is always @@ -384,27 +387,7 @@ =head1 EXAMPLES -Get the number of rounds used in RC5: - - int nrounds; - EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GET_RC5_ROUNDS, 0, &nrounds); - -Get the RC2 effective key length: - - int key_bits; - EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GET_RC2_KEY_BITS, 0, &key_bits); - -Set the number of rounds used in RC5: - - int nrounds; - EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_SET_RC5_ROUNDS, nrounds, NULL); - -Set the effective key length used in RC2: - - int key_bits; - EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_SET_RC2_KEY_BITS, key_bits, NULL); - -Encrypt a string using blowfish: +Encrypt a string using IDEA: int do_crypt(char *outfile) { @@ -418,8 +401,9 @@ char intext[] = "Some Crypto Text"; EVP_CIPHER_CTX ctx; FILE *out; + EVP_CIPHER_CTX_init(&ctx); - EVP_EncryptInit_ex(&ctx, EVP_bf_cbc(), NULL, key, iv); + EVP_EncryptInit_ex(&ctx, EVP_idea_cbc(), NULL, key, iv); if(!EVP_EncryptUpdate(&ctx, outbuf, &outlen, intext, strlen(intext))) { @@ -448,28 +432,34 @@ } The ciphertext from the above example can be decrypted using the B<openssl> -utility with the command line: +utility with the command line (shown on two lines for clarity): - S<openssl bf -in cipher.bin -K 000102030405060708090A0B0C0D0E0F -iv 0102030405060708 -d> + openssl idea -d <filename + -K 000102030405060708090A0B0C0D0E0F -iv 0102030405060708 -General encryption, decryption function example using FILE I/O and RC2 with an -80 bit key: +General encryption and decryption function example using FILE I/O and AES128 +with a 128-bit key: int do_crypt(FILE *in, FILE *out, int do_encrypt) { /* Allow enough space in output buffer for additional block */ - inbuf[1024], outbuf[1024 + EVP_MAX_BLOCK_LENGTH]; + unsigned char inbuf[1024], outbuf[1024 + EVP_MAX_BLOCK_LENGTH]; int inlen, outlen; + EVP_CIPHER_CTX ctx; /* Bogus key and IV: we'd normally set these from * another source. */ - unsigned char key[] = "0123456789"; - unsigned char iv[] = "12345678"; - /* Don't set key or IV because we will modify the parameters */ + unsigned char key[] = "0123456789abcdeF"; + unsigned char iv[] = "1234567887654321"; + + /* Don't set key or IV right away; we want to check lengths */ EVP_CIPHER_CTX_init(&ctx); - EVP_CipherInit_ex(&ctx, EVP_rc2(), NULL, NULL, NULL, do_encrypt); - EVP_CIPHER_CTX_set_key_length(&ctx, 10); - /* We finished modifying parameters so now we can set key and IV */ + EVP_CipherInit_ex(&ctx, EVP_aes_128_cbc(), NULL, NULL, NULL, + do_encrypt); + OPENSSL_assert(EVP_CIPHER_CTX_key_length(&ctx) == 16); + OPENSSL_assert(EVP_CIPHER_CTX_iv_length(&ctx) == 16); + + /* Now we can set key and IV */ EVP_CipherInit_ex(&ctx, NULL, NULL, key, iv, do_encrypt); for(;;) @@ -508,4 +498,7 @@ EVP_CipherFinal_ex() and EVP_CIPHER_CTX_set_padding() appeared in OpenSSL 0.9.7. +IDEA appeared in OpenSSL 0.9.7 but was often disabled due to +patent concerns; the last patents expired in 2012. + =cut diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_PKEY_set1_RSA.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_PKEY_set1_RSA.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_PKEY_set1_RSA.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_PKEY_set1_RSA.pod 2015-01-20 21:22:17.000000000 +0000 @@ -37,7 +37,7 @@ EVP_PKEY_get1_EC_KEY() return the referenced key in B<pkey> or B<NULL> if the key is not of the correct type. -EVP_PKEY_assign_RSA() EVP_PKEY_assign_DSA(), EVP_PKEY_assign_DH() +EVP_PKEY_assign_RSA(), EVP_PKEY_assign_DSA(), EVP_PKEY_assign_DH() and EVP_PKEY_assign_EC_KEY() also set the referenced key to B<key> however these use the supplied B<key> internally and so B<key> will be freed when the parent B<pkey> is freed. @@ -54,8 +54,8 @@ from or assigned to the B<pkey> using the B<1> functions must be freed as well as B<pkey>. -EVP_PKEY_assign_RSA() EVP_PKEY_assign_DSA(), EVP_PKEY_assign_DH() -EVP_PKEY_assign_EC_KEY() are implemented as macros. +EVP_PKEY_assign_RSA(), EVP_PKEY_assign_DSA(), EVP_PKEY_assign_DH() +and EVP_PKEY_assign_EC_KEY() are implemented as macros. =head1 RETURN VALUES @@ -66,7 +66,7 @@ EVP_PKEY_get1_EC_KEY() return the referenced key or B<NULL> if an error occurred. -EVP_PKEY_assign_RSA() EVP_PKEY_assign_DSA(), EVP_PKEY_assign_DH() +EVP_PKEY_assign_RSA(), EVP_PKEY_assign_DSA(), EVP_PKEY_assign_DH() and EVP_PKEY_assign_EC_KEY() return 1 for success and 0 for failure. =head1 SEE ALSO diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_PKEY_sign.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_PKEY_sign.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_PKEY_sign.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_PKEY_sign.pod 2015-01-20 21:22:17.000000000 +0000 @@ -28,9 +28,14 @@ =head1 NOTES +EVP_PKEY_sign() does not hash the data to be signed, and therefore is +normally used to sign digests. For signing arbitrary messages, see the +L<EVP_DigestSignInit(3)|EVP_DigestSignInit(3)> and +L<EVP_SignInit(3)|EVP_SignInit(3)> signing interfaces instead. + After the call to EVP_PKEY_sign_init() algorithm specific control operations can be performed to set any appropriate parameters for the -operation. +operation (see L<EVP_PKEY_CTX_ctrl(3)|EVP_PKEY_CTX_ctrl(3)>). The function EVP_PKEY_sign() can be called more than once on the same context if several operations are performed using the same parameters. @@ -49,13 +54,17 @@ #include <openssl/rsa.h> EVP_PKEY_CTX *ctx; + /* md is a SHA-256 digest in this example. */ unsigned char *md, *sig; - size_t mdlen, siglen; + size_t mdlen = 32, siglen; EVP_PKEY *signing_key; - /* NB: assumes signing_key, md and mdlen are already set up - * and that signing_key is an RSA private key + + /* + * NB: assumes signing_key and md are set up before the next + * step. signing_key must be an RSA private key and md must + * point to the SHA-256 digest to be signed. */ - ctx = EVP_PKEY_CTX_new(signing_key); + ctx = EVP_PKEY_CTX_new(signing_key, NULL /* no engine */); if (!ctx) /* Error occurred */ if (EVP_PKEY_sign_init(ctx) <= 0) @@ -83,6 +92,7 @@ =head1 SEE ALSO L<EVP_PKEY_CTX_new(3)|EVP_PKEY_CTX_new(3)>, +L<EVP_PKEY_CTX_ctrl(3)|EVP_PKEY_CTX_ctrl(3)>, L<EVP_PKEY_encrypt(3)|EVP_PKEY_encrypt(3)>, L<EVP_PKEY_decrypt(3)|EVP_PKEY_decrypt(3)>, L<EVP_PKEY_verify(3)|EVP_PKEY_verify(3)>, diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_SignInit.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_SignInit.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/EVP_SignInit.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/EVP_SignInit.pod 2015-01-20 21:22:17.000000000 +0000 @@ -30,9 +30,11 @@ same B<ctx> to include additional data. EVP_SignFinal() signs the data in B<ctx> using the private key B<pkey> and -places the signature in B<sig>. The number of bytes of data written (i.e. the -length of the signature) will be written to the integer at B<s>, at most -EVP_PKEY_size(pkey) bytes will be written. +places the signature in B<sig>. B<sig> must be at least EVP_PKEY_size(pkey) +bytes in size. B<s> is an OUT paramter, and not used as an IN parameter. +The number of bytes of data written (i.e. the length of the signature) +will be written to the integer at B<s>, at most EVP_PKEY_size(pkey) bytes +will be written. EVP_SignInit() initializes a signing context B<ctx> to use the default implementation of digest B<type>. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/OPENSSL_config.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/OPENSSL_config.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/OPENSSL_config.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/OPENSSL_config.pod 2015-01-20 21:22:17.000000000 +0000 @@ -73,7 +73,7 @@ =head1 SEE ALSO L<conf(5)|conf(5)>, L<CONF_load_modules_file(3)|CONF_load_modules_file(3)>, -L<CONF_modules_free(3),CONF_modules_free(3)> +L<CONF_modules_free(3)|CONF_modules_free(3)> =head1 HISTORY diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/pem.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/pem.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/pem.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/pem.pod 2015-01-20 21:22:17.000000000 +0000 @@ -450,9 +450,9 @@ After this is the base64 encoded encrypted data. -The encryption key is determined using EVP_bytestokey(), using B<salt> and an +The encryption key is determined using EVP_BytesToKey(), using B<salt> and an iteration count of 1. The IV used is the value of B<salt> and *not* the IV -returned by EVP_bytestokey(). +returned by EVP_BytesToKey(). =head1 BUGS @@ -474,3 +474,7 @@ if an error occurred. The write routines return 1 for success or 0 for failure. + +=head1 SEE ALSO + +L<EVP_get_cipherbyname(3)|EVP_get_cipherbyname>, L<EVP_BytesToKey(3)|EVP_BytesToKey(3)> diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/RSA_set_method.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/RSA_set_method.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/RSA_set_method.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/RSA_set_method.pod 2015-01-20 21:22:17.000000000 +0000 @@ -125,14 +125,18 @@ /* sign. For backward compatibility, this is used only * if (flags & RSA_FLAG_SIGN_VER) */ - int (*rsa_sign)(int type, unsigned char *m, unsigned int m_len, - unsigned char *sigret, unsigned int *siglen, RSA *rsa); - + int (*rsa_sign)(int type, + const unsigned char *m, unsigned int m_length, + unsigned char *sigret, unsigned int *siglen, const RSA *rsa); /* verify. For backward compatibility, this is used only * if (flags & RSA_FLAG_SIGN_VER) */ - int (*rsa_verify)(int type, unsigned char *m, unsigned int m_len, - unsigned char *sigbuf, unsigned int siglen, RSA *rsa); + int (*rsa_verify)(int dtype, + const unsigned char *m, unsigned int m_length, + const unsigned char *sigbuf, unsigned int siglen, + const RSA *rsa); + /* keygen. If NULL builtin RSA key generation will be used */ + int (*rsa_keygen)(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb); } RSA_METHOD; diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/RSA_sign.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/RSA_sign.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/RSA_sign.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/RSA_sign.pod 2015-01-20 21:22:17.000000000 +0000 @@ -20,6 +20,10 @@ private key B<rsa> as specified in PKCS #1 v2.0. It stores the signature in B<sigret> and the signature size in B<siglen>. B<sigret> must point to RSA_size(B<rsa>) bytes of memory. +Note that PKCS #1 adds meta-data, placing limits on the size of the +key that can be used. +See L<RSA_private_encrypt(3)|RSA_private_encrypt(3)> for lower-level +operations. B<type> denotes the message digest algorithm that was used to generate B<m>. It usually is one of B<NID_sha1>, B<NID_ripemd160> and B<NID_md5>; diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/ui.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/ui.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/ui.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/ui.pod 2015-01-20 21:22:17.000000000 +0000 @@ -119,7 +119,7 @@ UI_add_input_boolean() adds a prompt to the UI that's supposed to be answered in a boolean way, with a single character for yes and a different character for no. A set of characters that can be used to cancel the prompt is given -as well. The prompt itself is really divided in two, one part being the +as well. The prompt itself is divided in two, one part being the descriptive text (given through the I<prompt> argument) and one describing the possible answers (given through the I<action_desc> argument). diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/X509_NAME_ENTRY_get_object.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/X509_NAME_ENTRY_get_object.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/X509_NAME_ENTRY_get_object.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/X509_NAME_ENTRY_get_object.pod 2015-01-20 21:22:17.000000000 +0000 @@ -65,7 +65,7 @@ =head1 SEE ALSO L<ERR_get_error(3)|ERR_get_error(3)>, L<d2i_X509_NAME(3)|d2i_X509_NAME(3)>, -L<OBJ_nid2obj(3),OBJ_nid2obj(3)> +L<OBJ_nid2obj(3)|OBJ_nid2obj(3)> =head1 HISTORY diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/crypto/X509_STORE_CTX_get_ex_new_index.pod nodejs-0.11.15/deps/openssl/openssl/doc/crypto/X509_STORE_CTX_get_ex_new_index.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/crypto/X509_STORE_CTX_get_ex_new_index.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/crypto/X509_STORE_CTX_get_ex_new_index.pod 2015-01-20 21:22:17.000000000 +0000 @@ -15,7 +15,7 @@ int X509_STORE_CTX_set_ex_data(X509_STORE_CTX *d, int idx, void *arg); - char *X509_STORE_CTX_get_ex_data(X509_STORE_CTX *d, int idx); + void *X509_STORE_CTX_get_ex_data(X509_STORE_CTX *d, int idx); =head1 DESCRIPTION diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/fingerprints.txt nodejs-0.11.15/deps/openssl/openssl/doc/fingerprints.txt --- nodejs-0.11.13/deps/openssl/openssl/doc/fingerprints.txt 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/fingerprints.txt 2015-01-20 21:22:17.000000000 +0000 @@ -4,12 +4,11 @@ signatures in separate files in the same location you find the distributions themselves. The normal file name is the same as the distribution file, with '.asc' added. For example, the signature for -the distribution of OpenSSL 0.9.7f, openssl-0.9.7f.tar.gz, is found in -the file openssl-0.9.7f.tar.gz.asc. +the distribution of OpenSSL 1.0.1h, openssl-1.0.1h.tar.gz, is found in +the file openssl-1.0.1h.tar.gz.asc. The following is the list of fingerprints for the keys that are -currently in use (have been used since summer 2004) to sign OpenSSL -distributions: +currently in use to sign OpenSSL distributions: pub 1024D/F709453B 2003-10-20 Key fingerprint = C4CA B749 C34F 7F4C C04F DAC9 A7AF 9E78 F709 453B @@ -21,16 +20,19 @@ Key fingerprint = D0 5D 8C 61 6E 27 E6 60 41 EC B1 B8 D5 7E E5 97 uid Dr S N Henson <shenson@drh-consultancy.demon.co.uk> +pub 4096R/FA40E9E2 2005-03-19 + Key fingerprint = 6260 5AA4 334A F9F0 DDE5 D349 D357 7507 FA40 E9E2 +uid Dr Stephen Henson <shenson@opensslfoundation.com> +uid Dr Stephen Henson <shenson@drh-consultancy.co.uk> +uid Dr Stephen N Henson <steve@openssl.org> +sub 4096R/8811F530 2005-03-19 + pub 1024R/49A563D9 1997-02-24 Key fingerprint = 7B 79 19 FA 71 6B 87 25 0E 77 21 E5 52 D9 83 BF uid Mark Cox <mjc@redhat.com> uid Mark Cox <mark@awe.com> uid Mark Cox <mjc@apache.org> -pub 1024R/26BB437D 1997-04-28 - Key fingerprint = 00 C9 21 8E D1 AB 70 37 DD 67 A2 3A 0A 6F 8D A5 -uid Ralf S. Engelschall <rse@engelschall.com> - pub 1024R/9C58A66D 1997-04-03 Key fingerprint = 13 D0 B8 9D 37 30 C3 ED AC 9C 24 7D 45 8C 17 67 uid jaenicke@openssl.org @@ -55,3 +57,7 @@ uid Bodo Moeller <Bodo_Moeller@public.uni-hamburg.de> uid Bodo Moeller <3moeller@rzdspc5.informatik.uni-hamburg.de> +pub 2048R/0E604491 2013-04-30 + Key fingerprint = 8657 ABB2 60F0 56B1 E519 0839 D9C4 D26D 0E60 4491 +uid Matt Caswell <frodo@baggins.org> + diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/d2i_SSL_SESSION.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/d2i_SSL_SESSION.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/d2i_SSL_SESSION.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/d2i_SSL_SESSION.pod 2015-01-20 21:22:17.000000000 +0000 @@ -48,6 +48,16 @@ amount of space should be obtained by first calling i2d_SSL_SESSION() with B<pp=NULL>, and obtain the size needed, then allocate the memory and call i2d_SSL_SESSION() again. +Note that this will advance the value contained in B<*pp> so it is necessary +to save a copy of the original allocation. +For example: + int i,j; + char *p, *temp; + i = i2d_SSL_SESSION(sess, NULL); + p = temp = malloc(i); + j = i2d_SSL_SESSION(sess, &temp); + assert(i == j); + assert(p+i == temp); =head1 RETURN VALUES diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_accept.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_accept.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_accept.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_accept.pod 2015-01-20 21:22:17.000000000 +0000 @@ -44,13 +44,13 @@ =over 4 -=item 0 +=item Z<>0 The TLS/SSL handshake was not successful but was shut down controlled and by the specifications of the TLS/SSL protocol. Call SSL_get_error() with the return value B<ret> to find out the reason. -=item 1 +=item Z<>1 The TLS/SSL handshake was successfully completed, a TLS/SSL connection has been established. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CIPHER_get_name.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CIPHER_get_name.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CIPHER_get_name.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CIPHER_get_name.pod 2015-01-20 21:22:17.000000000 +0000 @@ -23,8 +23,12 @@ B<alg_bits> is not NULL, it contains the number of bits processed by the chosen algorithm. If B<cipher> is NULL, 0 is returned. -SSL_CIPHER_get_version() returns the protocol version for B<cipher>, currently -"SSLv2", "SSLv3", or "TLSv1". If B<cipher> is NULL, "(NONE)" is returned. +SSL_CIPHER_get_version() returns string which indicates the SSL/TLS protocol +version that first defined the cipher. +This is currently B<SSLv2> or B<TLSv1/SSLv3>. +In some cases it should possibly return "TLSv1.2" but does not; +use SSL_CIPHER_description() instead. +If B<cipher> is NULL, "(NONE)" is returned. SSL_CIPHER_description() returns a textual description of the cipher used into the buffer B<buf> of length B<len> provided. B<len> must be at least @@ -52,7 +56,8 @@ =item <protocol version> -Protocol version: B<SSLv2>, B<SSLv3>. The TLSv1 ciphers are flagged with SSLv3. +Protocol version: B<SSLv2>, B<SSLv3>, B<TLSv1.2>. The TLSv1.0 ciphers are +flagged with SSLv3. No new ciphers were added by TLSv1.1. =item Kx=<key exchange> @@ -91,6 +96,10 @@ RC4-MD5 SSLv3 Kx=RSA Au=RSA Enc=RC4(128) Mac=MD5 EXP-RC4-MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export +A comp[lete list can be retrieved by invoking the following command: + + openssl ciphers -v ALL + =head1 BUGS If SSL_CIPHER_description() is called with B<cipher> being NULL, the diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_clear.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_clear.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_clear.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_clear.pod 2015-01-20 21:22:17.000000000 +0000 @@ -56,12 +56,12 @@ =over 4 -=item 0 +=item Z<>0 The SSL_clear() operation could not be performed. Check the error stack to find out the reason. -=item 1 +=item Z<>1 The SSL_clear() operation was successful. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_COMP_add_compression_method.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_COMP_add_compression_method.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_COMP_add_compression_method.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_COMP_add_compression_method.pod 2015-01-20 21:22:17.000000000 +0000 @@ -53,11 +53,11 @@ =over 4 -=item 0 +=item Z<>0 The operation succeeded. -=item 1 +=item Z<>1 The operation failed. Check the error queue to find out the reason. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_connect.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_connect.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_connect.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_connect.pod 2015-01-20 21:22:17.000000000 +0000 @@ -41,13 +41,13 @@ =over 4 -=item 0 +=item Z<>0 The TLS/SSL handshake was not successful but was shut down controlled and by the specifications of the TLS/SSL protocol. Call SSL_get_error() with the return value B<ret> to find out the reason. -=item 1 +=item Z<>1 The TLS/SSL handshake was successfully completed, a TLS/SSL connection has been established. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_add_extra_chain_cert.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_add_extra_chain_cert.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_add_extra_chain_cert.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_add_extra_chain_cert.pod 2015-01-20 21:22:17.000000000 +0000 @@ -24,6 +24,16 @@ certificates in the trusted CA storage, see L<SSL_CTX_load_verify_locations(3)|SSL_CTX_load_verify_locations(3)>. +The B<x509> certificate provided to SSL_CTX_add_extra_chain_cert() will be freed by the library when the B<SSL_CTX> is destroyed. An application B<should not> free the B<x509> object. + +=head1 RESTRICTIONS + +Only one set of extra chain certificates can be specified per SSL_CTX +structure. Different chains for different certificates (for example if both +RSA and DSA certificates are specified by the same server) or different SSL +structures with the same parent SSL_CTX cannot be specified using this +function. + =head1 RETURN VALUES SSL_CTX_add_extra_chain_cert() returns 1 on success. Check out the diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_add_session.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_add_session.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_add_session.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_add_session.pod 2015-01-20 21:22:17.000000000 +0000 @@ -41,7 +41,7 @@ flag then the internal cache will not be populated automatically by new sessions negotiated by the SSL/TLS implementation, even though the internal cache will be searched automatically for session-resume requests (the -latter can be surpressed by SSL_SESS_CACHE_NO_INTERNAL_LOOKUP). So the +latter can be suppressed by SSL_SESS_CACHE_NO_INTERNAL_LOOKUP). So the application can use SSL_CTX_add_session() directly to have full control over the sessions that can be resumed if desired. @@ -52,13 +52,13 @@ =over 4 -=item 0 +=item Z<>0 The operation failed. In case of the add operation, it was tried to add the same (identical) session twice. In case of the remove operation, the session was not found in the cache. -=item 1 +=item Z<>1 The operation succeeded. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_load_verify_locations.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_load_verify_locations.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_load_verify_locations.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_load_verify_locations.pod 2015-01-20 21:22:17.000000000 +0000 @@ -100,13 +100,13 @@ =over 4 -=item 0 +=item Z<>0 The operation failed because B<CAfile> and B<CApath> are NULL or the processing at one of the locations specified failed. Check the error stack to find out the reason. -=item 1 +=item Z<>1 The operation succeeded. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_new.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_new.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_new.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_new.pod 2015-01-20 21:22:17.000000000 +0000 @@ -51,22 +51,36 @@ =item SSLv23_method(void), SSLv23_server_method(void), SSLv23_client_method(void) -A TLS/SSL connection established with these methods will understand the SSLv2, -SSLv3, and TLSv1 protocol. A client will send out SSLv2 client hello messages -and will indicate that it also understands SSLv3 and TLSv1. A server will -understand SSLv2, SSLv3, and TLSv1 client hello messages. This is the best -choice when compatibility is a concern. +A TLS/SSL connection established with these methods may understand the SSLv2, +SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols. + +If the cipher list does not contain any SSLv2 ciphersuites (the default +cipher list does not) or extensions are required (for example server name) +a client will send out TLSv1 client hello messages including extensions and +will indicate that it also understands TLSv1.1, TLSv1.2 and permits a +fallback to SSLv3. A server will support SSLv3, TLSv1, TLSv1.1 and TLSv1.2 +protocols. This is the best choice when compatibility is a concern. + +If any SSLv2 ciphersuites are included in the cipher list and no extensions +are required then SSLv2 compatible client hellos will be used by clients and +SSLv2 will be accepted by servers. This is B<not> recommended due to the +insecurity of SSLv2 and the limited nature of the SSLv2 client hello +prohibiting the use of extensions. =back The list of protocols available can later be limited using the SSL_OP_NO_SSLv2, -SSL_OP_NO_SSLv3, SSL_OP_NO_TLSv1 options of the B<SSL_CTX_set_options()> or -B<SSL_set_options()> functions. Using these options it is possible to choose -e.g. SSLv23_server_method() and be able to negotiate with all possible -clients, but to only allow newer protocols like SSLv3 or TLSv1. +SSL_OP_NO_SSLv3, SSL_OP_NO_TLSv1, SSL_OP_NO_TLSv1_1 and SSL_OP_NO_TLSv1_2 +options of the SSL_CTX_set_options() or SSL_set_options() functions. +Using these options it is possible to choose e.g. SSLv23_server_method() and +be able to negotiate with all possible clients, but to only allow newer +protocols like TLSv1, TLSv1.1 or TLS v1.2. + +Applications which never want to support SSLv2 (even is the cipher string +is configured to use SSLv2 ciphersuites) can set SSL_OP_NO_SSLv2. SSL_CTX_new() initializes the list of ciphers, the session cache setting, -the callbacks, the keys and certificates, and the options to its default +the callbacks, the keys and certificates and the options to its default values. =head1 RETURN VALUES diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_cipher_list.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_cipher_list.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_cipher_list.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_cipher_list.pod 2015-01-20 21:22:17.000000000 +0000 @@ -54,6 +54,10 @@ keys), the "no shared cipher" (SSL_R_NO_SHARED_CIPHER) error is generated and the handshake will fail. +If the cipher list does not contain any SSLv2 cipher suites (this is the +default) then SSLv2 is effectively disabled and neither clients nor servers +will attempt to use SSLv2. + =head1 RETURN VALUES SSL_CTX_set_cipher_list() and SSL_set_cipher_list() return 1 if any cipher diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_client_CA_list.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_client_CA_list.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_client_CA_list.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_client_CA_list.pod 2015-01-20 21:22:17.000000000 +0000 @@ -35,7 +35,7 @@ =head1 NOTES When a TLS/SSL server requests a client certificate (see -B<SSL_CTX_set_verify_options()>), it sends a list of CAs, for which +B<SSL_CTX_set_verify(3)>), it sends a list of CAs, for which it will accept certificates, to the client. This list must explicitly be set using SSL_CTX_set_client_CA_list() for @@ -66,13 +66,13 @@ =over 4 -=item 0 +=item Z<>0 A failure while manipulating the STACK_OF(X509_NAME) object occurred or the X509_NAME could not be extracted from B<cacert>. Check the error stack to find out the reason. -=item 1 +=item Z<>1 The operation succeeded. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_client_cert_cb.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_client_cert_cb.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_client_cert_cb.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_client_cert_cb.pod 2015-01-20 21:22:17.000000000 +0000 @@ -29,7 +29,7 @@ certificate will be installed into B<ssl>, see the NOTES and BUGS sections. If no certificate should be set, "0" has to be returned and no certificate will be sent. A negative return value will suspend the handshake and the -handshake function will return immediatly. L<SSL_get_error(3)|SSL_get_error(3)> +handshake function will return immediately. L<SSL_get_error(3)|SSL_get_error(3)> will return SSL_ERROR_WANT_X509_LOOKUP to indicate, that the handshake was suspended. The next call to the handshake function will again lead to the call of client_cert_cb(). It is the job of the client_cert_cb() to store information diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_msg_callback.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_msg_callback.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_msg_callback.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_msg_callback.pod 2015-01-20 21:22:17.000000000 +0000 @@ -11,8 +11,8 @@ void SSL_CTX_set_msg_callback(SSL_CTX *ctx, void (*cb)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)); void SSL_CTX_set_msg_callback_arg(SSL_CTX *ctx, void *arg); - void SSL_set_msg_callback(SSL_CTX *ctx, void (*cb)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)); - void SSL_set_msg_callback_arg(SSL_CTX *ctx, void *arg); + void SSL_set_msg_callback(SSL *ssl, void (*cb)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)); + void SSL_set_msg_callback_arg(SSL *ssl, void *arg); =head1 DESCRIPTION diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_options.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_options.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_options.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_options.pod 2015-01-20 21:22:17.000000000 +0000 @@ -112,6 +112,12 @@ broken SSL implementations. This option has no effect for connections using other ciphers. +=item SSL_OP_TLSEXT_PADDING + +Adds a padding extension to ensure the ClientHello size is never between +256 and 511 bytes in length. This is needed as a workaround for some +implementations. + =item SSL_OP_ALL All of the above bug workarounds. @@ -250,7 +256,7 @@ =head2 Unpatched client and patched OpenSSL server -The initial connection suceeds but client renegotiation is denied by the +The initial connection succeeds but client renegotiation is denied by the server with a B<no_renegotiation> warning alert if TLS v1.0 is used or a fatal B<handshake_failure> alert in SSL v3.0. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_session_id_context.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_session_id_context.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_session_id_context.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_session_id_context.pod 2015-01-20 21:22:17.000000000 +0000 @@ -64,13 +64,13 @@ =over 4 -=item 0 +=item Z<>0 The length B<sid_ctx_len> of the session id context B<sid_ctx> exceeded the maximum allowed length of B<SSL_MAX_SSL_SESSION_ID_LENGTH>. The error is logged to the error stack. -=item 1 +=item Z<>1 The operation succeeded. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_ssl_version.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_ssl_version.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_ssl_version.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_ssl_version.pod 2015-01-20 21:22:17.000000000 +0000 @@ -42,11 +42,11 @@ =over 4 -=item 0 +=item Z<>0 The new choice failed, check the error stack to find out the reason. -=item 1 +=item Z<>1 The operation succeeded. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_tlsext_ticket_key_cb.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_tlsext_ticket_key_cb.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_tlsext_ticket_key_cb.pod 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_tlsext_ticket_key_cb.pod 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,195 @@ +=pod + +=head1 NAME + +SSL_CTX_set_tlsext_ticket_key_cb - set a callback for session ticket processing + +=head1 SYNOPSIS + + #include <openssl/tls1.h> + + long SSL_CTX_set_tlsext_ticket_key_cb(SSL_CTX sslctx, + int (*cb)(SSL *s, unsigned char key_name[16], + unsigned char iv[EVP_MAX_IV_LENGTH], + EVP_CIPHER_CTX *ctx, HMAC_CTX *hctx, int enc)); + +=head1 DESCRIPTION + +SSL_CTX_set_tlsext_ticket_key_cb() sets a callback fuction I<cb> for handling +session tickets for the ssl context I<sslctx>. Session tickets, defined in +RFC5077 provide an enhanced session resumption capability where the server +implementation is not required to maintain per session state. It only applies +to TLS and there is no SSLv3 implementation. + +The callback is available when the OpenSSL library was built without +I<OPENSSL_NO_TLSEXT> being defined. + +The callback function I<cb> will be called for every client instigated TLS +session when session ticket extension is presented in the TLS hello +message. It is the responsibility of this function to create or retrieve the +cryptographic parameters and to maintain their state. + +The OpenSSL library uses your callback function to help implement a common TLS +ticket construction state according to RFC5077 Section 4 such that per session +state is unnecessary and a small set of cryptographic variables needs to be +maintained by the callback function implementation. + +In order to reuse a session, a TLS client must send the a session ticket +extension to the server. The client can only send exactly one session ticket. +The server, through the callback function, either agrees to reuse the session +ticket information or it starts a full TLS handshake to create a new session +ticket. + +Before the callback function is started I<ctx> and I<hctx> have been +initialised with EVP_CIPHER_CTX_init and HMAC_CTX_init respectively. + +For new sessions tickets, when the client doesn't present a session ticket, or +an attempted retreival of the ticket failed, or a renew option was indicated, +the callback function will be called with I<enc> equal to 1. The OpenSSL +library expects that the function will set an arbitary I<name>, initialize +I<iv>, and set the cipher context I<ctx> and the hash context I<hctx>. + +The I<name> is 16 characters long and is used as a key identifier. + +The I<iv> length is the length of the IV of the corresponding cipher. The +maximum IV length is L<EVP_MAX_IV_LENGTH> bytes defined in B<evp.h>. + +The initialization vector I<iv> should be a random value. The cipher context +I<ctx> should use the initialisation vector I<iv>. The cipher context can be +set using L<EVP_EncryptInit_ex>. The hmac context can be set using L<HMAC_Init_ex>. + +When the client presents a session ticket, the callback function with be called +with I<enc> set to 0 indicating that the I<cb> function should retreive a set +of parameters. In this case I<name> and I<iv> have already been parsed out of +the session ticket. The OpenSSL library expects that the I<name> will be used +to retrieve a cryptographic parameters and that the cryptographic context +I<ctx> will be set with the retreived parameters and the initialization vector +I<iv>. using a function like L<EVP_DecryptInit_ex>. The I<hctx> needs to be set +using L<HMAC_Init_ex>. + +If the I<name> is still valid but a renewal of the ticket is required the +callback function should return 2. The library will call the callback again +with an arguement of enc equal to 1 to set the new ticket. + +The return value of the I<cb> function is used by OpenSSL to determine what +further processing will occur. The following return values have meaning: + +=over 4 + +=item Z<>2 + +This indicates that the I<ctx> and I<hctx> have been set and the session can +continue on those parameters. Additionally it indicates that the session +ticket is in a renewal period and should be replaced. The OpenSSL library will +call I<cb> again with an enc argument of 1 to set the new ticket (see RFC5077 +3.3 paragraph 2). + +=item Z<>1 + +This indicates that the I<ctx> and I<hctx> have been set and the session can +continue on those parameters. + +=item Z<>0 + +This indicates that it was not possible to set/retrieve a session ticket and +the SSL/TLS session will continue by by negiotationing a set of cryptographic +parameters or using the alternate SSL/TLS resumption mechanism, session ids. + +If called with enc equal to 0 the library will call the I<cb> again to get +a new set of parameters. + +=item less than 0 + +This indicates an error. + +=back + +=head1 NOTES + +Session resumption shortcuts the TLS so that the client certificate +negiotation don't occur. It makes up for this by storing client certificate +an all other negotiated state information encrypted within the ticket. In a +resumed session the applications will have all this state information available +exactly as if a full negiotation had occured. + +If an attacker can obtain the key used to encrypt a session ticket, they can +obtain the master secret for any ticket using that key and decrypt any traffic +using that session: even if the ciphersuite supports forward secrecy. As +a result applications may wish to use multiple keys and avoid using long term +keys stored in files. + +Applications can use longer keys to maintain a consistent level of security. +For example if a ciphersuite uses 256 bit ciphers but only a 128 bit ticket key +the overall security is only 128 bits because breaking the ticket key will +enable an attacker to obtain the session keys. + +=head1 EXAMPLES + +Reference Implemention: + SSL_CTX_set_tlsext_ticket_key_cb(SSL,ssl_tlsext_ticket_key_cb); + .... + + static int ssl_tlsext_ticket_key_cb(SSL *s, unsigned char key_name[16], unsigned char *iv, EVP_CIPHER_CTX *ctx, HMAC_CTX *hctx, int enc) + { + if (enc) { /* create new session */ + if (RAND_bytes(iv, EVP_MAX_IV_LENGTH) ) { + return -1; /* insufficient random */ + } + + key = currentkey(); /* something that you need to implement */ + if ( !key ) { + /* current key doesn't exist or isn't valid */ + key = createkey(); /* something that you need to implement. + * createkey needs to initialise, a name, + * an aes_key, a hmac_key and optionally + * an expire time. */ + if ( !key ) { /* key couldn't be created */ + return 0; + } + } + memcpy(key_name, key->name, 16); + + EVP_EncryptInit_ex(&ctx, EVP_aes_128_cbc(), NULL, key->aes_key, iv); + HMAC_Init_ex(&hctx, key->hmac_key, 16, EVP_sha256(), NULL); + + return 1; + + } else { /* retrieve session */ + key = findkey(name); + + if (!key || key->expire < now() ) { + return 0; + } + + HMAC_Init_ex(&hctx, key->hmac_key, 16, EVP_sha256(), NULL); + EVP_DecryptInit_ex(&ctx, EVP_aes_128_cbc(), NULL, key->aes_key, iv ); + + if (key->expire < ( now() - RENEW_TIME ) ) { + /* return 2 - this session will get a new ticket even though the current is still valid */ + return 2; + } + return 1; + + } + } + + + +=head1 RETURN VALUES + +returns 0 to indicate the callback function was set. + +=head1 SEE ALSO + +L<ssl(3)|ssl(3)>, L<SSL_set_session(3)|SSL_set_session(3)>, +L<SSL_session_reused(3)|SSL_session_reused(3)>, +L<SSL_CTX_add_session(3)|SSL_CTX_add_session(3)>, +L<SSL_CTX_sess_number(3)|SSL_CTX_sess_number(3)>, +L<SSL_CTX_sess_set_get_cb(3)|SSL_CTX_sess_set_get_cb(3)>, +L<SSL_CTX_set_session_id_context(3)|SSL_CTX_set_session_id_context(3)>, + +=head1 HISTORY + +This function was introduced in OpenSSL 0.9.8h + +=cut diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_tmp_dh_callback.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_tmp_dh_callback.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_tmp_dh_callback.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_tmp_dh_callback.pod 2015-01-20 21:22:17.000000000 +0000 @@ -12,12 +12,10 @@ DH *(*tmp_dh_callback)(SSL *ssl, int is_export, int keylength)); long SSL_CTX_set_tmp_dh(SSL_CTX *ctx, DH *dh); - void SSL_set_tmp_dh_callback(SSL_CTX *ctx, + void SSL_set_tmp_dh_callback(SSL *ctx, DH *(*tmp_dh_callback)(SSL *ssl, int is_export, int keylength)); long SSL_set_tmp_dh(SSL *ssl, DH *dh) - DH *(*tmp_dh_callback)(SSL *ssl, int is_export, int keylength)); - =head1 DESCRIPTION SSL_CTX_set_tmp_dh_callback() sets the callback function for B<ctx> to be @@ -50,12 +48,13 @@ only used for signing. In order to perform a DH key exchange the server must use a DH group -(DH parameters) and generate a DH key. The server will always generate a new -DH key during the negotiation, when the DH parameters are supplied via -callback and/or when the SSL_OP_SINGLE_DH_USE option of -L<SSL_CTX_set_options(3)|SSL_CTX_set_options(3)> is set. It will -immediately create a DH key, when DH parameters are supplied via -SSL_CTX_set_tmp_dh() and SSL_OP_SINGLE_DH_USE is not set. In this case, +(DH parameters) and generate a DH key. +The server will always generate a new DH key during the negotiation +if either the DH parameters are supplied via callback or the +SSL_OP_SINGLE_DH_USE option of SSL_CTX_set_options(3) is set (or both). +It will immediately create a DH key if DH parameters are supplied via +SSL_CTX_set_tmp_dh() and SSL_OP_SINGLE_DH_USE is not set. +In this case, it may happen that a key is generated on initialization without later being needed, while on the other hand the computer time during the negotiation is being saved. @@ -81,7 +80,7 @@ is mandatory. Application authors may compile in DH parameters. Files dh512.pem, -dh1024.pem, dh2048.pem, and dh4096 in the 'apps' directory of current +dh1024.pem, dh2048.pem, and dh4096.pem in the 'apps' directory of current version of the OpenSSL distribution contain the 'SKIP' DH parameters, which use safe primes and were generated verifiably pseudo-randomly. These files can be converted into C code using the B<-C> option of the @@ -141,7 +140,7 @@ dh_tmp = dh_512; break; case 1024: - if (!dh_1024) + if (!dh_1024) dh_1024 = get_dh1024(); dh_tmp = dh_1024; break; diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_verify.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_verify.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_set_verify.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_set_verify.pod 2015-01-20 21:22:17.000000000 +0000 @@ -109,8 +109,8 @@ X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY will be issued. The depth count is "level 0:peer certificate", "level 1: CA certificate", "level 2: higher level CA certificate", and so on. Setting the maximum -depth to 2 allows the levels 0, 1, and 2. The default depth limit is 9, -allowing for the peer certificate and additional 9 CA certificates. +depth to 2 allows the levels 0, 1, and 2. The default depth limit is 100, +allowing for the peer certificate and additional 100 CA certificates. The B<verify_callback> function is used to control the behaviour when the SSL_VERIFY_PEER flag is set. It must be supplied by the application and diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_use_psk_identity_hint.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_use_psk_identity_hint.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_CTX_use_psk_identity_hint.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_CTX_use_psk_identity_hint.pod 2015-01-20 21:22:17.000000000 +0000 @@ -96,7 +96,7 @@ connection will fail with decryption_error before it will be finished completely. -=item 0 +=item Z<>0 PSK identity was not found. An "unknown_psk_identity" alert message will be sent and the connection setup fails. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_do_handshake.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_do_handshake.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_do_handshake.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_do_handshake.pod 2015-01-20 21:22:17.000000000 +0000 @@ -45,13 +45,13 @@ =over 4 -=item 0 +=item Z<>0 The TLS/SSL handshake was not successful but was shut down controlled and by the specifications of the TLS/SSL protocol. Call SSL_get_error() with the return value B<ret> to find out the reason. -=item 1 +=item Z<>1 The TLS/SSL handshake was successfully completed, a TLS/SSL connection has been established. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_get_peer_cert_chain.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_get_peer_cert_chain.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_get_peer_cert_chain.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_get_peer_cert_chain.pod 2015-01-20 21:22:17.000000000 +0000 @@ -8,11 +8,11 @@ #include <openssl/ssl.h> - STACKOF(X509) *SSL_get_peer_cert_chain(const SSL *ssl); + STACK_OF(X509) *SSL_get_peer_cert_chain(const SSL *ssl); =head1 DESCRIPTION -SSL_get_peer_cert_chain() returns a pointer to STACKOF(X509) certificates +SSL_get_peer_cert_chain() returns a pointer to STACK_OF(X509) certificates forming the certificate chain of the peer. If called on the client side, the stack also contains the peer's certificate; if called on the server side, the peer's certificate must be obtained separately using @@ -24,7 +24,7 @@ The peer certificate chain is not necessarily available after reusing a session, in which case a NULL pointer is returned. -The reference count of the STACKOF(X509) object is not incremented. +The reference count of the STACK_OF(X509) object is not incremented. If the corresponding session is freed, the pointer must not be used any longer. @@ -39,7 +39,7 @@ No certificate was presented by the peer or no connection was established or the certificate chain is no longer available when a session is reused. -=item Pointer to a STACKOF(X509) +=item Pointer to a STACK_OF(X509) The return value points to the certificate chain presented by the peer. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_get_version.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_get_version.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_get_version.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_get_version.pod 2015-01-20 21:22:17.000000000 +0000 @@ -12,12 +12,12 @@ =head1 DESCRIPTION -SSL_get_cipher_version() returns the name of the protocol used for the +SSL_get_version() returns the name of the protocol used for the connection B<ssl>. =head1 RETURN VALUES -The following strings can occur: +The following strings can be returned: =over 4 @@ -31,7 +31,15 @@ =item TLSv1 -The connection uses the TLSv1 protocol. +The connection uses the TLSv1.0 protocol. + +=item TLSv1.1 + +The connection uses the TLSv1.1 protocol. + +=item TLSv1.2 + +The connection uses the TLSv1.2 protocol. =item unknown diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_read.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_read.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_read.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_read.pod 2015-01-20 21:22:17.000000000 +0000 @@ -86,7 +86,7 @@ The read operation was successful; the return value is the number of bytes actually read from the TLS/SSL connection. -=item 0 +=item Z<>0 The read operation was not successful. The reason may either be a clean shutdown due to a "close notify" alert sent by the peer (in which case diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_session_reused.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_session_reused.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_session_reused.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_session_reused.pod 2015-01-20 21:22:17.000000000 +0000 @@ -27,11 +27,11 @@ =over 4 -=item 0 +=item Z<>0 A new session was negotiated. -=item 1 +=item Z<>1 A session was reused. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_set_fd.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_set_fd.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_set_fd.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_set_fd.pod 2015-01-20 21:22:17.000000000 +0000 @@ -35,11 +35,11 @@ =over 4 -=item 0 +=item Z<>0 The operation failed. Check the error stack to find out why. -=item 1 +=item Z<>1 The operation succeeded. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_set_session.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_set_session.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_set_session.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_set_session.pod 2015-01-20 21:22:17.000000000 +0000 @@ -37,11 +37,11 @@ =over 4 -=item 0 +=item Z<>0 The operation failed; check the error stack to find out the reason. -=item 1 +=item Z<>1 The operation succeeded. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_shutdown.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_shutdown.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_shutdown.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_shutdown.pod 2015-01-20 21:22:17.000000000 +0000 @@ -92,14 +92,14 @@ =over 4 -=item 0 +=item Z<>0 The shutdown is not yet finished. Call SSL_shutdown() for a second time, if a bidirectional shutdown shall be performed. The output of L<SSL_get_error(3)|SSL_get_error(3)> may be misleading, as an erroneous SSL_ERROR_SYSCALL may be flagged even though no error occurred. -=item 1 +=item Z<>1 The shutdown was successfully completed. The "close notify" alert was sent and the peer's "close notify" alert was received. diff -Nru nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_write.pod nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_write.pod --- nodejs-0.11.13/deps/openssl/openssl/doc/ssl/SSL_write.pod 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/doc/ssl/SSL_write.pod 2015-01-20 21:22:17.000000000 +0000 @@ -79,7 +79,7 @@ The write operation was successful, the return value is the number of bytes actually written to the TLS/SSL connection. -=item 0 +=item Z<>0 The write operation was not successful. Probably the underlying connection was closed. Call SSL_get_error() with the return value B<ret> to find out, diff -Nru nodejs-0.11.13/deps/openssl/openssl/engines/ccgost/gost_ameth.c nodejs-0.11.15/deps/openssl/openssl/engines/ccgost/gost_ameth.c --- nodejs-0.11.13/deps/openssl/openssl/engines/ccgost/gost_ameth.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/engines/ccgost/gost_ameth.c 2015-01-20 21:22:17.000000000 +0000 @@ -269,7 +269,7 @@ case ASN1_PKEY_CTRL_CMS_ENVELOPE: if (arg1 == 0) { - X509_ALGOR *alg; + X509_ALGOR *alg = NULL; ASN1_STRING * params = encode_gost_algor_params(pkey); if (!params) { diff -Nru nodejs-0.11.13/deps/openssl/openssl/engines/ccgost/Makefile.save nodejs-0.11.15/deps/openssl/openssl/engines/ccgost/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/engines/ccgost/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/engines/ccgost/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,275 +0,0 @@ -DIR=ccgost -TOP=../.. -CC=cc -INCLUDES= -I../../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r -CFLAGS= $(INCLUDES) $(CFLAG) -LIB=$(TOP)/libcrypto.a - -LIBSRC= gost2001.c gost2001_keyx.c gost89.c gost94_keyx.c gost_ameth.c gost_asn1.c gost_crypt.c gost_ctl.c gost_eng.c gosthash.c gost_keywrap.c gost_md.c gost_params.c gost_pmeth.c gost_sign.c - -LIBOBJ= e_gost_err.o gost2001_keyx.o gost2001.o gost89.o gost94_keyx.o gost_ameth.o gost_asn1.o gost_crypt.o gost_ctl.o gost_eng.o gosthash.o gost_keywrap.o gost_md.o gost_params.o gost_pmeth.o gost_sign.o - -SRC=$(LIBSRC) - -LIBNAME=gost - -top: - (cd $(TOP); $(MAKE) DIRS=engines EDIRS=$(DIR) sub_all) - -all: lib - -tags: - ctags $(SRC) - -errors: - $(PERL) ../../util/mkerr.pl -conf gost.ec -nostatic -write $(SRC) - -lib: $(LIBOBJ) - if [ -n "$(SHARED_LIBS)" ]; then \ - $(MAKE) -f $(TOP)/Makefile.shared -e \ - LIBNAME=$(LIBNAME) \ - LIBEXTRAS='$(LIBOBJ)' \ - LIBDEPS='-L$(TOP) -lcrypto' \ - link_o.$(SHLIB_TARGET); \ - else \ - $(AR) $(LIB) $(LIBOBJ); \ - fi - @touch lib - -install: - [ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - if [ -n "$(SHARED_LIBS)" ]; then \ - set -e; \ - echo installing $(LIBNAME); \ - pfx=lib; \ - if [ "$(PLATFORM)" != "Cygwin" ]; then \ - case "$(CFLAGS)" in \ - *DSO_BEOS*) sfx=".so";; \ - *DSO_DLFCN*) sfx=`expr "$(SHLIB_EXT)" : '.*\(\.[a-z][a-z]*\)' \| ".so"`;; \ - *DSO_DL*) sfx=".sl";; \ - *DSO_WIN32*) sfx="eay32.dll"; pfx=;; \ - *) sfx=".bad";; \ - esac; \ - cp $${pfx}$(LIBNAME)$$sfx $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines/$${pfx}$(LIBNAME)$$sfx.new; \ - else \ - sfx=".so"; \ - cp cyg$(LIBNAME).dll $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines/$${pfx}$(LIBNAME)$$sfx.new; \ - fi; \ - chmod 555 $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines/$${pfx}$(LIBNAME)$$sfx.new; \ - mv -f $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines/$${pfx}$(LIBNAME)$$sfx.new $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines/$${pfx}$(LIBNAME)$$sfx; \ - fi - -links: - -tests: - -depend: - @if [ -z "$(THIS)" ]; then \ - $(MAKE) -f $(TOP)/Makefile reflect THIS=$@; \ - else \ - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC); \ - fi - -files: - - - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff *.so *.sl *.dll - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -gost2001.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost2001.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost2001.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -gost2001.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -gost2001.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -gost2001.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -gost2001.o: ../../include/openssl/err.h ../../include/openssl/evp.h -gost2001.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -gost2001.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -gost2001.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -gost2001.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -gost2001.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -gost2001.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -gost2001.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -gost2001.o: e_gost_err.h gost2001.c gost89.h gost_lcl.h gost_params.h -gost2001.o: gosthash.h -gost2001_keyx.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost2001_keyx.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost2001_keyx.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -gost2001_keyx.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -gost2001_keyx.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -gost2001_keyx.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -gost2001_keyx.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -gost2001_keyx.o: ../../include/openssl/obj_mac.h -gost2001_keyx.o: ../../include/openssl/objects.h -gost2001_keyx.o: ../../include/openssl/opensslconf.h -gost2001_keyx.o: ../../include/openssl/opensslv.h -gost2001_keyx.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -gost2001_keyx.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -gost2001_keyx.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -gost2001_keyx.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -gost2001_keyx.o: ../../include/openssl/x509_vfy.h e_gost_err.h gost2001_keyx.c -gost2001_keyx.o: gost2001_keyx.h gost89.h gost_keywrap.h gost_lcl.h gosthash.h -gost89.o: gost89.c gost89.h -gost94_keyx.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost94_keyx.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost94_keyx.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -gost94_keyx.o: ../../include/openssl/dh.h ../../include/openssl/dsa.h -gost94_keyx.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -gost94_keyx.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -gost94_keyx.o: ../../include/openssl/engine.h ../../include/openssl/evp.h -gost94_keyx.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -gost94_keyx.o: ../../include/openssl/objects.h -gost94_keyx.o: ../../include/openssl/opensslconf.h -gost94_keyx.o: ../../include/openssl/opensslv.h -gost94_keyx.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -gost94_keyx.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h -gost94_keyx.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -gost94_keyx.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -gost94_keyx.o: ../../include/openssl/x509_vfy.h e_gost_err.h gost89.h -gost94_keyx.o: gost94_keyx.c gost_keywrap.h gost_lcl.h gosthash.h -gost_ameth.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost_ameth.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost_ameth.o: ../../include/openssl/buffer.h ../../include/openssl/cms.h -gost_ameth.o: ../../include/openssl/crypto.h ../../include/openssl/dsa.h -gost_ameth.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -gost_ameth.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -gost_ameth.o: ../../include/openssl/engine.h ../../include/openssl/err.h -gost_ameth.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -gost_ameth.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -gost_ameth.o: ../../include/openssl/opensslconf.h -gost_ameth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -gost_ameth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -gost_ameth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -gost_ameth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -gost_ameth.o: ../../include/openssl/x509_vfy.h e_gost_err.h gost89.h -gost_ameth.o: gost_ameth.c gost_lcl.h gost_params.h gosthash.h -gost_asn1.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost_asn1.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost_asn1.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -gost_asn1.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -gost_asn1.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -gost_asn1.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -gost_asn1.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -gost_asn1.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -gost_asn1.o: ../../include/openssl/opensslconf.h -gost_asn1.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -gost_asn1.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -gost_asn1.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -gost_asn1.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -gost_asn1.o: ../../include/openssl/x509_vfy.h gost89.h gost_asn1.c gost_lcl.h -gost_asn1.o: gosthash.h -gost_crypt.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost_crypt.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost_crypt.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -gost_crypt.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -gost_crypt.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -gost_crypt.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -gost_crypt.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -gost_crypt.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -gost_crypt.o: ../../include/openssl/opensslconf.h -gost_crypt.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -gost_crypt.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -gost_crypt.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -gost_crypt.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -gost_crypt.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -gost_crypt.o: e_gost_err.h gost89.h gost_crypt.c gost_lcl.h gosthash.h -gost_ctl.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost_ctl.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost_ctl.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -gost_ctl.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -gost_ctl.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -gost_ctl.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -gost_ctl.o: ../../include/openssl/err.h ../../include/openssl/evp.h -gost_ctl.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -gost_ctl.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -gost_ctl.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -gost_ctl.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -gost_ctl.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -gost_ctl.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -gost_ctl.o: ../../include/openssl/x509_vfy.h gost89.h gost_ctl.c gost_lcl.h -gost_ctl.o: gosthash.h -gost_eng.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost_eng.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost_eng.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -gost_eng.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -gost_eng.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -gost_eng.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -gost_eng.o: ../../include/openssl/err.h ../../include/openssl/evp.h -gost_eng.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -gost_eng.o: ../../include/openssl/objects.h ../../include/openssl/opensslconf.h -gost_eng.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -gost_eng.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -gost_eng.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -gost_eng.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -gost_eng.o: ../../include/openssl/x509_vfy.h e_gost_err.h gost89.h gost_eng.c -gost_eng.o: gost_lcl.h gosthash.h -gost_keywrap.o: gost89.h gost_keywrap.c gost_keywrap.h -gost_md.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost_md.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost_md.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -gost_md.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -gost_md.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -gost_md.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -gost_md.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -gost_md.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -gost_md.o: ../../include/openssl/opensslconf.h ../../include/openssl/opensslv.h -gost_md.o: ../../include/openssl/ossl_typ.h ../../include/openssl/pkcs7.h -gost_md.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -gost_md.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -gost_md.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -gost_md.o: e_gost_err.h gost89.h gost_lcl.h gost_md.c gosthash.h -gost_params.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h -gost_params.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h -gost_params.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -gost_params.o: ../../include/openssl/opensslconf.h -gost_params.o: ../../include/openssl/opensslv.h -gost_params.o: ../../include/openssl/ossl_typ.h -gost_params.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h -gost_params.o: ../../include/openssl/symhacks.h gost_params.c gost_params.h -gost_pmeth.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost_pmeth.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost_pmeth.o: ../../include/openssl/buffer.h ../../include/openssl/conf.h -gost_pmeth.o: ../../include/openssl/crypto.h ../../include/openssl/dsa.h -gost_pmeth.o: ../../include/openssl/e_os2.h ../../include/openssl/ec.h -gost_pmeth.o: ../../include/openssl/ecdh.h ../../include/openssl/ecdsa.h -gost_pmeth.o: ../../include/openssl/engine.h ../../include/openssl/evp.h -gost_pmeth.o: ../../include/openssl/lhash.h ../../include/openssl/obj_mac.h -gost_pmeth.o: ../../include/openssl/objects.h -gost_pmeth.o: ../../include/openssl/opensslconf.h -gost_pmeth.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -gost_pmeth.o: ../../include/openssl/pkcs7.h ../../include/openssl/safestack.h -gost_pmeth.o: ../../include/openssl/sha.h ../../include/openssl/stack.h -gost_pmeth.o: ../../include/openssl/symhacks.h ../../include/openssl/x509.h -gost_pmeth.o: ../../include/openssl/x509_vfy.h ../../include/openssl/x509v3.h -gost_pmeth.o: e_gost_err.h gost89.h gost_lcl.h gost_params.h gost_pmeth.c -gost_pmeth.o: gosthash.h -gost_sign.o: ../../include/openssl/asn1.h ../../include/openssl/asn1t.h -gost_sign.o: ../../include/openssl/bio.h ../../include/openssl/bn.h -gost_sign.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h -gost_sign.o: ../../include/openssl/dsa.h ../../include/openssl/e_os2.h -gost_sign.o: ../../include/openssl/ec.h ../../include/openssl/ecdh.h -gost_sign.o: ../../include/openssl/ecdsa.h ../../include/openssl/engine.h -gost_sign.o: ../../include/openssl/evp.h ../../include/openssl/lhash.h -gost_sign.o: ../../include/openssl/obj_mac.h ../../include/openssl/objects.h -gost_sign.o: ../../include/openssl/opensslconf.h -gost_sign.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h -gost_sign.o: ../../include/openssl/pkcs7.h ../../include/openssl/rand.h -gost_sign.o: ../../include/openssl/safestack.h ../../include/openssl/sha.h -gost_sign.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h -gost_sign.o: ../../include/openssl/x509.h ../../include/openssl/x509_vfy.h -gost_sign.o: e_gost_err.h gost89.h gost_lcl.h gost_params.h gost_sign.c -gost_sign.o: gosthash.h -gosthash.o: gost89.h gosthash.c gosthash.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/engines/makeengines.com nodejs-0.11.15/deps/openssl/openssl/engines/makeengines.com --- nodejs-0.11.13/deps/openssl/openssl/engines/makeengines.com 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/engines/makeengines.com 2015-01-20 21:22:17.000000000 +0000 @@ -155,7 +155,7 @@ $ TV_OBJ_NAME = OBJ_DIR + F$PARSE(ENGINE_,,,"NAME","SYNTAX_ONLY") + ".OBJ" $ TV_OBJ = ",''TV_OBJ_NAME'" $ ENDIF -$ ENGINE_4758CCA = "e_4758cca" +$ ENGINE_4758cca = "e_4758cca" $ ENGINE_aep = "e_aep" $ ENGINE_atalla = "e_atalla" $ ENGINE_cswift = "e_cswift" @@ -756,9 +756,12 @@ $ IF F$TYPE(USER_CCDEFS) .NES. "" THEN CCDEFS = CCDEFS + "," + USER_CCDEFS $ CCEXTRAFLAGS = "" $ IF F$TYPE(USER_CCFLAGS) .NES. "" THEN CCEXTRAFLAGS = USER_CCFLAGS -$ CCDISABLEWARNINGS = "" !!! "LONGLONGTYPE,LONGLONGSUFX" -$ IF F$TYPE(USER_CCDISABLEWARNINGS) .NES. "" THEN - - CCDISABLEWARNINGS = CCDISABLEWARNINGS + "," + USER_CCDISABLEWARNINGS +$ CCDISABLEWARNINGS = "" !!! "MAYLOSEDATA3" !!! "LONGLONGTYPE,LONGLONGSUFX" +$ IF F$TYPE(USER_CCDISABLEWARNINGS) .NES. "" +$ THEN +$ IF CCDISABLEWARNINGS .NES. "" THEN CCDISABLEWARNINGS = CCDISABLEWARNINGS + "," +$ CCDISABLEWARNINGS = CCDISABLEWARNINGS + USER_CCDISABLEWARNINGS +$ ENDIF $! $! Check To See If We Have A ZLIB Option. $! @@ -922,6 +925,18 @@ $! $ IF COMPILER .EQS. "DECC" $ THEN +$! Not all compiler versions support MAYLOSEDATA3. +$ OPT_TEST = "MAYLOSEDATA3" +$ DEFINE /USER_MODE SYS$ERROR NL: +$ DEFINE /USER_MODE SYS$OUTPUT NL: +$ 'CC' /NOCROSS_REFERENCE /NOLIST /NOOBJECT - + /WARNINGS = DISABLE = ('OPT_TEST', EMPTYFILE) NL: +$ IF ($SEVERITY) +$ THEN +$ IF CCDISABLEWARNINGS .NES. "" THEN - + CCDISABLEWARNINGS = CCDISABLEWARNINGS+ "," +$ CCDISABLEWARNINGS = CCDISABLEWARNINGS+ OPT_TEST +$ ENDIF $ IF CCDISABLEWARNINGS .NES. "" $ THEN $ CCDISABLEWARNINGS = " /WARNING=(DISABLE=(" + CCDISABLEWARNINGS + "))" diff -Nru nodejs-0.11.13/deps/openssl/openssl/engines/Makefile.save nodejs-0.11.15/deps/openssl/openssl/engines/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/engines/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/engines/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,335 +0,0 @@ -# -# OpenSSL/engines/Makefile -# - -DIR= engines -TOP= .. -CC= cc -INCLUDES= -I../include -CFLAG=-g -MAKEFILE= Makefile -AR= ar r -ENGDIRS= ccgost - -RECURSIVE_MAKE= [ -z "$(ENGDIRS)" ] || for i in $(ENGDIRS) ; do \ - (cd $$i && echo "making $$target in $(DIR)/$$i..." && \ - $(MAKE) -e TOP=../.. DIR=$$i $$target ) || exit 1; \ - done; - -PEX_LIBS= -EX_LIBS= - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile engines.com install.com engine_vector.mar -TEST= -APPS= - -LIB=$(TOP)/libcrypto.a -LIBNAMES= 4758cca aep atalla cswift gmp chil nuron sureware ubsec padlock capi - -LIBSRC= e_4758cca.c \ - e_aep.c \ - e_atalla.c \ - e_cswift.c \ - e_gmp.c \ - e_chil.c \ - e_nuron.c \ - e_sureware.c \ - e_ubsec.c \ - e_padlock.c \ - e_capi.c -LIBOBJ= e_4758cca.o \ - e_aep.o \ - e_atalla.o \ - e_cswift.o \ - e_gmp.o \ - e_chil.o \ - e_nuron.o \ - e_sureware.o \ - e_ubsec.o \ - e_padlock.o \ - e_capi.o - -SRC= $(LIBSRC) - -EXHEADER= -HEADER= e_4758cca_err.c e_4758cca_err.h \ - e_aep_err.c e_aep_err.h \ - e_atalla_err.c e_atalla_err.h \ - e_cswift_err.c e_cswift_err.h \ - e_gmp_err.c e_gmp_err.h \ - e_chil_err.c e_chil_err.h \ - e_nuron_err.c e_nuron_err.h \ - e_sureware_err.c e_sureware_err.h \ - e_ubsec_err.c e_ubsec_err.h \ - e_capi_err.c e_capi_err.h - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ..; $(MAKE) DIRS=$(DIR) all) - -all: lib subdirs - -lib: $(LIBOBJ) - @if [ -n "$(SHARED_LIBS)" ]; then \ - set -e; \ - for l in $(LIBNAMES); do \ - $(MAKE) -f ../Makefile.shared -e \ - LIBNAME=$$l LIBEXTRAS=e_$$l.o \ - LIBDEPS='-L.. -lcrypto $(EX_LIBS)' \ - link_o.$(SHLIB_TARGET); \ - done; \ - else \ - $(AR) $(LIB) $(LIBOBJ); \ - $(RANLIB) $(LIB) || echo Never mind.; \ - fi; \ - touch lib - -subdirs: - echo $(EDIRS) - @target=all; $(RECURSIVE_MAKE) - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - @target=files; $(RECURSIVE_MAKE) - -links: - @target=links; $(RECURSIVE_MAKE) - -# XXXXX This currently only works on systems that use .so as suffix -# for shared libraries as well as for Cygwin which uses the -# dlfcn_name_converter and therefore stores the engines with .so suffix, too. -# XXXXX This was extended to HP-UX dl targets, which use .sl suffix. -# XXXXX This was extended to mingw targets, which use eay32.dll suffix without lib as prefix. -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @if [ -n "$(SHARED_LIBS)" ]; then \ - set -e; \ - $(PERL) $(TOP)/util/mkdir-p.pl $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines; \ - for l in $(LIBNAMES); do \ - ( echo installing $$l; \ - pfx=lib; \ - if [ "$(PLATFORM)" != "Cygwin" ]; then \ - case "$(CFLAGS)" in \ - *DSO_BEOS*) sfx=".so";; \ - *DSO_DLFCN*) sfx=`expr "$(SHLIB_EXT)" : '.*\(\.[a-z][a-z]*\)' \| ".so"`;; \ - *DSO_DL*) sfx=".sl";; \ - *DSO_WIN32*) sfx="eay32.dll"; pfx=;; \ - *) sfx=".bad";; \ - esac; \ - cp $$pfx$$l$$sfx $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines/$$pfx$$l$$sfx.new; \ - else \ - sfx=".so"; \ - cp cyg$$l.dll $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines/$$pfx$$l$$sfx.new; \ - fi; \ - chmod 555 $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines/$$pfx$$l$$sfx.new; \ - mv -f $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines/$$pfx$$l$$sfx.new $(INSTALL_PREFIX)$(INSTALLTOP)/$(LIBDIR)/engines/$$pfx$$l$$sfx ); \ - done; \ - fi - @target=install; $(RECURSIVE_MAKE) - -tags: - ctags $(SRC) - -errors: - set -e; for l in $(LIBNAMES); do \ - $(PERL) ../util/mkerr.pl -conf e_$$l.ec \ - -nostatic -staticloader -write e_$$l.c; \ - done - (cd ccgost; $(MAKE) PERL=$(PERL) errors) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - @target=lint; $(RECURSIVE_MAKE) - -depend: - @if [ -z "$(THIS)" ]; then \ - $(MAKE) -f $(TOP)/Makefile reflect THIS=$@; \ - fi - @[ -z "$(THIS)" ] || $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC) - @[ -z "$(THIS)" ] || (set -e; target=depend; $(RECURSIVE_MAKE) ) - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - @target=dclean; $(RECURSIVE_MAKE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - @target=clean; $(RECURSIVE_MAKE) - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -e_4758cca.o: ../include/openssl/asn1.h ../include/openssl/bio.h -e_4758cca.o: ../include/openssl/bn.h ../include/openssl/buffer.h -e_4758cca.o: ../include/openssl/crypto.h ../include/openssl/dso.h -e_4758cca.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -e_4758cca.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -e_4758cca.o: ../include/openssl/engine.h ../include/openssl/err.h -e_4758cca.o: ../include/openssl/evp.h ../include/openssl/lhash.h -e_4758cca.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -e_4758cca.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -e_4758cca.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -e_4758cca.o: ../include/openssl/rand.h ../include/openssl/rsa.h -e_4758cca.o: ../include/openssl/safestack.h ../include/openssl/sha.h -e_4758cca.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -e_4758cca.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -e_4758cca.o: e_4758cca.c e_4758cca_err.c e_4758cca_err.h -e_4758cca.o: vendor_defns/hw_4758_cca.h -e_aep.o: ../include/openssl/asn1.h ../include/openssl/bio.h -e_aep.o: ../include/openssl/bn.h ../include/openssl/buffer.h -e_aep.o: ../include/openssl/crypto.h ../include/openssl/dh.h -e_aep.o: ../include/openssl/dsa.h ../include/openssl/dso.h -e_aep.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -e_aep.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -e_aep.o: ../include/openssl/engine.h ../include/openssl/err.h -e_aep.o: ../include/openssl/evp.h ../include/openssl/lhash.h -e_aep.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -e_aep.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -e_aep.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -e_aep.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -e_aep.o: ../include/openssl/sha.h ../include/openssl/stack.h -e_aep.o: ../include/openssl/symhacks.h ../include/openssl/x509.h -e_aep.o: ../include/openssl/x509_vfy.h e_aep.c e_aep_err.c e_aep_err.h -e_aep.o: vendor_defns/aep.h -e_atalla.o: ../include/openssl/asn1.h ../include/openssl/bio.h -e_atalla.o: ../include/openssl/bn.h ../include/openssl/buffer.h -e_atalla.o: ../include/openssl/crypto.h ../include/openssl/dh.h -e_atalla.o: ../include/openssl/dsa.h ../include/openssl/dso.h -e_atalla.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -e_atalla.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -e_atalla.o: ../include/openssl/engine.h ../include/openssl/err.h -e_atalla.o: ../include/openssl/evp.h ../include/openssl/lhash.h -e_atalla.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -e_atalla.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -e_atalla.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -e_atalla.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -e_atalla.o: ../include/openssl/sha.h ../include/openssl/stack.h -e_atalla.o: ../include/openssl/symhacks.h ../include/openssl/x509.h -e_atalla.o: ../include/openssl/x509_vfy.h e_atalla.c e_atalla_err.c -e_atalla.o: e_atalla_err.h vendor_defns/atalla.h -e_capi.o: ../include/openssl/asn1.h ../include/openssl/bio.h -e_capi.o: ../include/openssl/bn.h ../include/openssl/buffer.h -e_capi.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -e_capi.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -e_capi.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -e_capi.o: ../include/openssl/evp.h ../include/openssl/lhash.h -e_capi.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -e_capi.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -e_capi.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -e_capi.o: ../include/openssl/safestack.h ../include/openssl/sha.h -e_capi.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -e_capi.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h e_capi.c -e_chil.o: ../include/openssl/asn1.h ../include/openssl/bio.h -e_chil.o: ../include/openssl/bn.h ../include/openssl/buffer.h -e_chil.o: ../include/openssl/crypto.h ../include/openssl/dh.h -e_chil.o: ../include/openssl/dso.h ../include/openssl/e_os2.h -e_chil.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -e_chil.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -e_chil.o: ../include/openssl/err.h ../include/openssl/evp.h -e_chil.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -e_chil.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -e_chil.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -e_chil.o: ../include/openssl/pem.h ../include/openssl/pem2.h -e_chil.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -e_chil.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -e_chil.o: ../include/openssl/sha.h ../include/openssl/stack.h -e_chil.o: ../include/openssl/symhacks.h ../include/openssl/ui.h -e_chil.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h e_chil.c -e_chil.o: e_chil_err.c e_chil_err.h vendor_defns/hwcryptohook.h -e_cswift.o: ../include/openssl/asn1.h ../include/openssl/bio.h -e_cswift.o: ../include/openssl/bn.h ../include/openssl/buffer.h -e_cswift.o: ../include/openssl/crypto.h ../include/openssl/dh.h -e_cswift.o: ../include/openssl/dsa.h ../include/openssl/dso.h -e_cswift.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -e_cswift.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -e_cswift.o: ../include/openssl/engine.h ../include/openssl/err.h -e_cswift.o: ../include/openssl/evp.h ../include/openssl/lhash.h -e_cswift.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -e_cswift.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -e_cswift.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -e_cswift.o: ../include/openssl/rand.h ../include/openssl/rsa.h -e_cswift.o: ../include/openssl/safestack.h ../include/openssl/sha.h -e_cswift.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -e_cswift.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h e_cswift.c -e_cswift.o: e_cswift_err.c e_cswift_err.h vendor_defns/cswift.h -e_gmp.o: ../include/openssl/asn1.h ../include/openssl/bio.h -e_gmp.o: ../include/openssl/bn.h ../include/openssl/buffer.h -e_gmp.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -e_gmp.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -e_gmp.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -e_gmp.o: ../include/openssl/evp.h ../include/openssl/lhash.h -e_gmp.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -e_gmp.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -e_gmp.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -e_gmp.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -e_gmp.o: ../include/openssl/sha.h ../include/openssl/stack.h -e_gmp.o: ../include/openssl/symhacks.h ../include/openssl/x509.h -e_gmp.o: ../include/openssl/x509_vfy.h e_gmp.c -e_nuron.o: ../include/openssl/asn1.h ../include/openssl/bio.h -e_nuron.o: ../include/openssl/bn.h ../include/openssl/buffer.h -e_nuron.o: ../include/openssl/crypto.h ../include/openssl/dh.h -e_nuron.o: ../include/openssl/dsa.h ../include/openssl/dso.h -e_nuron.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -e_nuron.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -e_nuron.o: ../include/openssl/engine.h ../include/openssl/err.h -e_nuron.o: ../include/openssl/evp.h ../include/openssl/lhash.h -e_nuron.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -e_nuron.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -e_nuron.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -e_nuron.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -e_nuron.o: ../include/openssl/sha.h ../include/openssl/stack.h -e_nuron.o: ../include/openssl/symhacks.h ../include/openssl/x509.h -e_nuron.o: ../include/openssl/x509_vfy.h e_nuron.c e_nuron_err.c e_nuron_err.h -e_padlock.o: ../include/openssl/aes.h ../include/openssl/asn1.h -e_padlock.o: ../include/openssl/bio.h ../include/openssl/buffer.h -e_padlock.o: ../include/openssl/crypto.h ../include/openssl/dso.h -e_padlock.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -e_padlock.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -e_padlock.o: ../include/openssl/engine.h ../include/openssl/err.h -e_padlock.o: ../include/openssl/evp.h ../include/openssl/lhash.h -e_padlock.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -e_padlock.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -e_padlock.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -e_padlock.o: ../include/openssl/rand.h ../include/openssl/safestack.h -e_padlock.o: ../include/openssl/sha.h ../include/openssl/stack.h -e_padlock.o: ../include/openssl/symhacks.h ../include/openssl/x509.h -e_padlock.o: ../include/openssl/x509_vfy.h e_padlock.c -e_sureware.o: ../include/openssl/asn1.h ../include/openssl/bio.h -e_sureware.o: ../include/openssl/bn.h ../include/openssl/buffer.h -e_sureware.o: ../include/openssl/crypto.h ../include/openssl/dh.h -e_sureware.o: ../include/openssl/dsa.h ../include/openssl/dso.h -e_sureware.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -e_sureware.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -e_sureware.o: ../include/openssl/engine.h ../include/openssl/err.h -e_sureware.o: ../include/openssl/evp.h ../include/openssl/lhash.h -e_sureware.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -e_sureware.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -e_sureware.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -e_sureware.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -e_sureware.o: ../include/openssl/rand.h ../include/openssl/rsa.h -e_sureware.o: ../include/openssl/safestack.h ../include/openssl/sha.h -e_sureware.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -e_sureware.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -e_sureware.o: e_sureware.c e_sureware_err.c e_sureware_err.h -e_sureware.o: vendor_defns/sureware.h -e_ubsec.o: ../include/openssl/asn1.h ../include/openssl/bio.h -e_ubsec.o: ../include/openssl/bn.h ../include/openssl/buffer.h -e_ubsec.o: ../include/openssl/crypto.h ../include/openssl/dh.h -e_ubsec.o: ../include/openssl/dsa.h ../include/openssl/dso.h -e_ubsec.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -e_ubsec.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -e_ubsec.o: ../include/openssl/engine.h ../include/openssl/err.h -e_ubsec.o: ../include/openssl/evp.h ../include/openssl/lhash.h -e_ubsec.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -e_ubsec.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -e_ubsec.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -e_ubsec.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -e_ubsec.o: ../include/openssl/sha.h ../include/openssl/stack.h -e_ubsec.o: ../include/openssl/symhacks.h ../include/openssl/x509.h -e_ubsec.o: ../include/openssl/x509_vfy.h e_ubsec.c e_ubsec_err.c e_ubsec_err.h -e_ubsec.o: vendor_defns/hw_ubsec.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/e_os.h nodejs-0.11.15/deps/openssl/openssl/e_os.h --- nodejs-0.11.13/deps/openssl/openssl/e_os.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/e_os.h 2015-01-20 21:22:17.000000000 +0000 @@ -373,7 +373,16 @@ # define check_winnt() (1) #else # define check_winnt() (GetVersion() < 0x80000000) -#endif +#endif + +/* + * Visual Studio: inline is available in C++ only, however + * __inline is available for C, see + * http://msdn.microsoft.com/en-us/library/z8y1yy88.aspx + */ +#if defined(_MSC_VER) && !defined(__cplusplus) && !defined(inline) +# define inline __inline +#endif #else /* The non-microsoft world */ @@ -738,4 +747,3 @@ #endif #endif - diff -Nru nodejs-0.11.13/deps/openssl/openssl/FAQ nodejs-0.11.15/deps/openssl/openssl/FAQ --- nodejs-0.11.13/deps/openssl/openssl/FAQ 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/FAQ 2015-01-20 21:22:17.000000000 +0000 @@ -113,11 +113,6 @@ documentation is included in each OpenSSL distribution under the docs directory. -For information on parts of libcrypto that are not yet documented, you -might want to read Ariel Glenn's documentation on SSLeay 0.9, OpenSSL's -predecessor, at <URL: http://www.columbia.edu/~ariel/ssleay/>. Much -of this still applies to OpenSSL. - There is some documentation about certificate extensions and PKCS#12 in doc/openssl.txt diff -Nru nodejs-0.11.13/deps/openssl/openssl/Makefile nodejs-0.11.15/deps/openssl/openssl/Makefile --- nodejs-0.11.13/deps/openssl/openssl/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -4,7 +4,7 @@ ## Makefile for OpenSSL ## -VERSION=1.0.1g +VERSION=1.0.1j MAJOR=1 MINOR=0.1 SHLIB_VERSION_NUMBER=1.0.0 @@ -13,7 +13,7 @@ SHLIB_MINOR=0.0 SHLIB_EXT= PLATFORM=dist -OPTIONS= no-ec_nistp_64_gcc_128 no-gmp no-jpake no-krb5 no-md2 no-rc5 no-rfc3779 no-sctp no-shared no-store no-zlib no-zlib-dynamic static-engine +OPTIONS= no-ec_nistp_64_gcc_128 no-gmp no-jpake no-krb5 no-md2 no-rc5 no-rfc3779 no-sctp no-shared no-store no-unit-test no-zlib no-zlib-dynamic static-engine CONFIGURE_ARGS=dist SHLIB_TARGET= @@ -61,7 +61,7 @@ CC= cc CFLAG= -O -DEPFLAG= -DOPENSSL_NO_EC_NISTP_64_GCC_128 -DOPENSSL_NO_GMP -DOPENSSL_NO_JPAKE -DOPENSSL_NO_MD2 -DOPENSSL_NO_RC5 -DOPENSSL_NO_RFC3779 -DOPENSSL_NO_SCTP -DOPENSSL_NO_STORE +DEPFLAG= -DOPENSSL_NO_EC_NISTP_64_GCC_128 -DOPENSSL_NO_GMP -DOPENSSL_NO_JPAKE -DOPENSSL_NO_MD2 -DOPENSSL_NO_RC5 -DOPENSSL_NO_RFC3779 -DOPENSSL_NO_SCTP -DOPENSSL_NO_STORE -DOPENSSL_NO_UNIT_TEST PEX_LIBS= EX_LIBS= EXE_EXT= diff -Nru nodejs-0.11.13/deps/openssl/openssl/Makefile.bak nodejs-0.11.15/deps/openssl/openssl/Makefile.bak --- nodejs-0.11.13/deps/openssl/openssl/Makefile.bak 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/Makefile.bak 2015-01-20 21:22:17.000000000 +0000 @@ -4,7 +4,7 @@ ## Makefile for OpenSSL ## -VERSION=1.0.1g-dev +VERSION=1.0.1j-dev MAJOR=1 MINOR=0.1 SHLIB_VERSION_NUMBER=1.0.0 @@ -13,7 +13,7 @@ SHLIB_MINOR=0.0 SHLIB_EXT= PLATFORM=gcc -OPTIONS= no-ec_nistp_64_gcc_128 no-gmp no-jpake no-krb5 no-md2 no-rc5 no-rfc3779 no-sctp no-shared no-store no-zlib no-zlib-dynamic static-engine +OPTIONS= no-ec_nistp_64_gcc_128 no-gmp no-jpake no-krb5 no-md2 no-rc5 no-rfc3779 no-sctp no-shared no-store no-unit-test no-zlib no-zlib-dynamic static-engine CONFIGURE_ARGS=gcc SHLIB_TARGET= @@ -61,7 +61,7 @@ CC= gcc CFLAG= -O3 -DEPFLAG= -DOPENSSL_NO_EC_NISTP_64_GCC_128 -DOPENSSL_NO_GMP -DOPENSSL_NO_JPAKE -DOPENSSL_NO_MD2 -DOPENSSL_NO_RC5 -DOPENSSL_NO_RFC3779 -DOPENSSL_NO_SCTP -DOPENSSL_NO_STORE +DEPFLAG= -DOPENSSL_NO_EC_NISTP_64_GCC_128 -DOPENSSL_NO_GMP -DOPENSSL_NO_JPAKE -DOPENSSL_NO_MD2 -DOPENSSL_NO_RC5 -DOPENSSL_NO_RFC3779 -DOPENSSL_NO_SCTP -DOPENSSL_NO_STORE -DOPENSSL_NO_UNIT_TEST PEX_LIBS= EX_LIBS= EXE_EXT= diff -Nru nodejs-0.11.13/deps/openssl/openssl/makevms.com nodejs-0.11.15/deps/openssl/openssl/makevms.com --- nodejs-0.11.13/deps/openssl/openssl/makevms.com 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/makevms.com 2015-01-20 21:22:17.000000000 +0000 @@ -283,6 +283,7 @@ MD4,- MD5,- MDC2,- + NEXTPROTONEG,- OCSP,- PSK,- RC2,- @@ -340,7 +341,12 @@ DH/GOST;- /STATIC_ENGINE;- /KRB5;- - /EC_NISTP_64_GCC_128 + /EC_NISTP_64_GCC_128;- + /GMP;- + /MD2;- + /RC5;- + /RFC3779;- + /SCTP $ CONFIG_ENABLE_RULES := ZLIB_DYNAMIC/ZLIB;- /THREADS $ @@ -706,8 +712,8 @@ $ SDIRS := , - 'ARCHD', - OBJECTS, - - MD2, MD4, MD5, SHA, MDC2, HMAC, RIPEMD, WHRLPOOL, - - DES, AES, RC2, RC4, RC5, IDEA, BF, CAST, CAMELLIA, SEED, MODES, - + MD4, MD5, SHA, MDC2, HMAC, RIPEMD, WHRLPOOL, - + DES, AES, RC2, RC4, IDEA, BF, CAST, CAMELLIA, SEED, MODES, - BN, EC, RSA, DSA, ECDSA, DH, ECDH, DSO, ENGINE, - BUFFER, BIO, STACK, LHASH, RAND, ERR, - EVP, ASN1, PEM, X509, X509V3, CONF, TXT_DB, PKCS7, PKCS12, - @@ -819,8 +825,9 @@ $! $! Build The [.xxx.EXE.CRYPTO]*.EXE Test Applications. $! -$ @CRYPTO-LIB APPS 'DEBUGGER' "''COMPILER'" "''TCPIP_TYPE'" - - "''ISSEVEN'" "''BUILDPART'" "''POINTER_SIZE'" "''ZLIB'" +$!!! DISABLED, as these test programs lack any support +$!!!$ @CRYPTO-LIB APPS 'DEBUGGER' "''COMPILER'" "''TCPIP_TYPE'" - +$!!! "''ISSEVEN'" "''BUILDPART'" "''POINTER_SIZE'" "''ZLIB'" $! $! Go Back To The Main Directory. $! diff -Nru nodejs-0.11.13/deps/openssl/openssl/NEWS nodejs-0.11.15/deps/openssl/openssl/NEWS --- nodejs-0.11.13/deps/openssl/openssl/NEWS 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/NEWS 2015-01-20 21:22:17.000000000 +0000 @@ -5,6 +5,34 @@ This file gives a brief overview of the major changes between each OpenSSL release. For more details please read the CHANGES file. + Major changes between OpenSSL 1.0.1i and OpenSSL 1.0.1j [15 Oct 2014] + + o Fix for CVE-2014-3513 + o Fix for CVE-2014-3567 + o Mitigation for CVE-2014-3566 (SSL protocol vulnerability) + o Fix for CVE-2014-3568 + + Major changes between OpenSSL 1.0.1h and OpenSSL 1.0.1i [6 Aug 2014] + + o Fix for CVE-2014-3512 + o Fix for CVE-2014-3511 + o Fix for CVE-2014-3510 + o Fix for CVE-2014-3507 + o Fix for CVE-2014-3506 + o Fix for CVE-2014-3505 + o Fix for CVE-2014-3509 + o Fix for CVE-2014-5139 + o Fix for CVE-2014-3508 + + Major changes between OpenSSL 1.0.1g and OpenSSL 1.0.1h [5 Jun 2014] + + o Fix for CVE-2014-0224 + o Fix for CVE-2014-0221 + o Fix for CVE-2014-0198 + o Fix for CVE-2014-0195 + o Fix for CVE-2014-3470 + o Fix for CVE-2010-5298 + Major changes between OpenSSL 1.0.1f and OpenSSL 1.0.1g [7 Apr 2014] o Fix for CVE-2014-0160 diff -Nru nodejs-0.11.13/deps/openssl/openssl/openssl.spec nodejs-0.11.15/deps/openssl/openssl/openssl.spec --- nodejs-0.11.13/deps/openssl/openssl/openssl.spec 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/openssl.spec 2015-01-20 21:22:17.000000000 +0000 @@ -7,7 +7,7 @@ Summary: Secure Sockets Layer and cryptography libraries and tools Name: openssl #Version: %{libmaj}.%{libmin}.%{librel} -Version: 1.0.1g +Version: 1.0.1j Source0: ftp://ftp.openssl.org/source/%{name}-%{version}.tar.gz License: OpenSSL Group: System Environment/Libraries diff -Nru nodejs-0.11.13/deps/openssl/openssl/README nodejs-0.11.15/deps/openssl/openssl/README --- nodejs-0.11.13/deps/openssl/openssl/README 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/README 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ - OpenSSL 1.0.1g 7 Apr 2014 + OpenSSL 1.0.1j 15 Oct 2014 Copyright (c) 1998-2011 The OpenSSL Project Copyright (c) 1995-1998 Eric A. Young, Tim J. Hudson diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/d1_both.c nodejs-0.11.15/deps/openssl/openssl/ssl/d1_both.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/d1_both.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/d1_both.c 2015-01-20 21:22:17.000000000 +0000 @@ -587,29 +587,32 @@ return 0; } +/* dtls1_max_handshake_message_len returns the maximum number of bytes + * permitted in a DTLS handshake message for |s|. The minimum is 16KB, but may + * be greater if the maximum certificate list size requires it. */ +static unsigned long dtls1_max_handshake_message_len(const SSL *s) + { + unsigned long max_len = DTLS1_HM_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH; + if (max_len < (unsigned long)s->max_cert_list) + return s->max_cert_list; + return max_len; + } static int -dtls1_reassemble_fragment(SSL *s, struct hm_header_st* msg_hdr, int *ok) +dtls1_reassemble_fragment(SSL *s, const struct hm_header_st* msg_hdr, int *ok) { hm_fragment *frag = NULL; pitem *item = NULL; int i = -1, is_complete; unsigned char seq64be[8]; - unsigned long frag_len = msg_hdr->frag_len, max_len; + unsigned long frag_len = msg_hdr->frag_len; - if ((msg_hdr->frag_off+frag_len) > msg_hdr->msg_len) + if ((msg_hdr->frag_off+frag_len) > msg_hdr->msg_len || + msg_hdr->msg_len > dtls1_max_handshake_message_len(s)) goto err; - /* Determine maximum allowed message size. Depends on (user set) - * maximum certificate length, but 16k is minimum. - */ - if (DTLS1_HM_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH < s->max_cert_list) - max_len = s->max_cert_list; - else - max_len = DTLS1_HM_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH; - - if ((msg_hdr->frag_off+frag_len) > max_len) - goto err; + if (frag_len == 0) + return DTLS1_HM_FRAGMENT_RETRY; /* Try to find item in queue */ memset(seq64be,0,sizeof(seq64be)); @@ -627,10 +630,20 @@ frag->msg_header.frag_off = 0; } else + { frag = (hm_fragment*) item->data; + if (frag->msg_header.msg_len != msg_hdr->msg_len) + { + item = NULL; + frag = NULL; + goto err; + } + } + /* If message is already reassembled, this must be a - * retransmit and can be dropped. + * retransmit and can be dropped. In this case item != NULL and so frag + * does not need to be freed. */ if (frag->reassembly == NULL) { @@ -650,7 +663,9 @@ /* read the body of the fragment (header has already been read */ i = s->method->ssl_read_bytes(s,SSL3_RT_HANDSHAKE, frag->fragment + msg_hdr->frag_off,frag_len,0); - if (i<=0 || (unsigned long)i!=frag_len) + if ((unsigned long)i!=frag_len) + i=-1; + if (i<=0) goto err; RSMBLY_BITMASK_MARK(frag->reassembly, (long)msg_hdr->frag_off, @@ -667,32 +682,32 @@ if (item == NULL) { - memset(seq64be,0,sizeof(seq64be)); - seq64be[6] = (unsigned char)(msg_hdr->seq>>8); - seq64be[7] = (unsigned char)(msg_hdr->seq); - item = pitem_new(seq64be, frag); if (item == NULL) { - goto err; i = -1; + goto err; } - pqueue_insert(s->d1->buffered_messages, item); + item = pqueue_insert(s->d1->buffered_messages, item); + /* pqueue_insert fails iff a duplicate item is inserted. + * However, |item| cannot be a duplicate. If it were, + * |pqueue_find|, above, would have returned it and control + * would never have reached this branch. */ + OPENSSL_assert(item != NULL); } return DTLS1_HM_FRAGMENT_RETRY; err: - if (frag != NULL) dtls1_hm_fragment_free(frag); - if (item != NULL) OPENSSL_free(item); + if (frag != NULL && item == NULL) dtls1_hm_fragment_free(frag); *ok = 0; return i; } static int -dtls1_process_out_of_seq_message(SSL *s, struct hm_header_st* msg_hdr, int *ok) +dtls1_process_out_of_seq_message(SSL *s, const struct hm_header_st* msg_hdr, int *ok) { int i=-1; hm_fragment *frag = NULL; @@ -712,7 +727,7 @@ /* If we already have an entry and this one is a fragment, * don't discard it and rather try to reassemble it. */ - if (item != NULL && frag_len < msg_hdr->msg_len) + if (item != NULL && frag_len != msg_hdr->msg_len) item = NULL; /* Discard the message if sequence number was already there, is @@ -737,9 +752,12 @@ } else { - if (frag_len && frag_len < msg_hdr->msg_len) + if (frag_len != msg_hdr->msg_len) return dtls1_reassemble_fragment(s, msg_hdr, ok); + if (frag_len > dtls1_max_handshake_message_len(s)) + goto err; + frag = dtls1_hm_fragment_new(frag_len, 0); if ( frag == NULL) goto err; @@ -751,26 +769,31 @@ /* read the body of the fragment (header has already been read */ i = s->method->ssl_read_bytes(s,SSL3_RT_HANDSHAKE, frag->fragment,frag_len,0); - if (i<=0 || (unsigned long)i!=frag_len) + if ((unsigned long)i!=frag_len) + i = -1; + if (i<=0) goto err; } - memset(seq64be,0,sizeof(seq64be)); - seq64be[6] = (unsigned char)(msg_hdr->seq>>8); - seq64be[7] = (unsigned char)(msg_hdr->seq); - item = pitem_new(seq64be, frag); if ( item == NULL) goto err; - pqueue_insert(s->d1->buffered_messages, item); + item = pqueue_insert(s->d1->buffered_messages, item); + /* pqueue_insert fails iff a duplicate item is inserted. + * However, |item| cannot be a duplicate. If it were, + * |pqueue_find|, above, would have returned it. Then, either + * |frag_len| != |msg_hdr->msg_len| in which case |item| is set + * to NULL and it will have been processed with + * |dtls1_reassemble_fragment|, above, or the record will have + * been discarded. */ + OPENSSL_assert(item != NULL); } return DTLS1_HM_FRAGMENT_RETRY; err: - if ( frag != NULL) dtls1_hm_fragment_free(frag); - if ( item != NULL) OPENSSL_free(item); + if (frag != NULL && item == NULL) dtls1_hm_fragment_free(frag); *ok = 0; return i; } @@ -784,6 +807,7 @@ int i,al; struct hm_header_st msg_hdr; + redo: /* see if we have the required fragment already */ if ((frag_len = dtls1_retrieve_buffered_fragment(s,max,ok)) || *ok) { @@ -842,8 +866,7 @@ s->msg_callback_arg); s->init_num = 0; - return dtls1_get_message_fragment(s, st1, stn, - max, ok); + goto redo; } else /* Incorrectly formated Hello request */ { @@ -1171,6 +1194,8 @@ OPENSSL_assert(s->init_off == 0); frag = dtls1_hm_fragment_new(s->init_num, 0); + if (!frag) + return 0; memcpy(frag->fragment, s->init_buf->data, s->init_num); @@ -1467,6 +1492,9 @@ /* Read type and payload length first */ if (1 + 2 + 16 > s->s3->rrec.length) return 0; /* silently discard */ + if (s->s3->rrec.length > SSL3_RT_MAX_PLAIN_LENGTH) + return 0; /* silently discard per RFC 6520 sec. 4 */ + hbtype = *p++; n2s(p, payload); if (1 + 2 + payload + 16 > s->s3->rrec.length) diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/d1_clnt.c nodejs-0.11.15/deps/openssl/openssl/ssl/d1_clnt.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/d1_clnt.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/d1_clnt.c 2015-01-20 21:22:17.000000000 +0000 @@ -876,12 +876,18 @@ *(p++)=0; /* Add the NULL method */ #ifndef OPENSSL_NO_TLSEXT + /* TLS extensions*/ + if (ssl_prepare_clienthello_tlsext(s) <= 0) + { + SSLerr(SSL_F_DTLS1_CLIENT_HELLO,SSL_R_CLIENTHELLO_TLSEXT); + goto err; + } if ((p = ssl_add_clienthello_tlsext(s, p, buf+SSL3_RT_MAX_PLAIN_LENGTH)) == NULL) { SSLerr(SSL_F_DTLS1_CLIENT_HELLO,ERR_R_INTERNAL_ERROR); goto err; } -#endif +#endif l=(p-d); d=buf; @@ -990,6 +996,13 @@ RSA *rsa; unsigned char tmp_buf[SSL_MAX_MASTER_KEY_LENGTH]; + if (s->session->sess_cert == NULL) + { + /* We should always have a server certificate with SSL_kRSA. */ + SSLerr(SSL_F_DTLS1_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR); + goto err; + } + if (s->session->sess_cert->peer_rsa_tmp != NULL) rsa=s->session->sess_cert->peer_rsa_tmp; else @@ -1180,6 +1193,13 @@ { DH *dh_srvr,*dh_clnt; + if (s->session->sess_cert == NULL) + { + ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE); + SSLerr(SSL_F_DTLS1_SEND_CLIENT_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE); + goto err; + } + if (s->session->sess_cert->peer_dh_tmp != NULL) dh_srvr=s->session->sess_cert->peer_dh_tmp; else @@ -1239,6 +1259,13 @@ int ecdh_clnt_cert = 0; int field_size = 0; + if (s->session->sess_cert == NULL) + { + ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE); + SSLerr(SSL_F_DTLS1_SEND_CLIENT_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE); + goto err; + } + /* Did we send out the client's * ECDH share for use in premaster * computation as part of client certificate? @@ -1714,5 +1741,3 @@ /* SSL3_ST_CW_CERT_D */ return(dtls1_do_write(s,SSL3_RT_HANDSHAKE)); } - - diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/d1_lib.c nodejs-0.11.15/deps/openssl/openssl/ssl/d1_lib.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/d1_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/d1_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -176,9 +176,12 @@ while ( (item = pqueue_pop(s->d1->buffered_app_data.q)) != NULL) { - frag = (hm_fragment *)item->data; - OPENSSL_free(frag->fragment); - OPENSSL_free(frag); + rdata = (DTLS1_RECORD_DATA *) item->data; + if (rdata->rbuf.buf) + { + OPENSSL_free(rdata->rbuf.buf); + } + OPENSSL_free(item->data); pitem_free(item); } } @@ -263,6 +266,16 @@ case DTLS_CTRL_LISTEN: ret = dtls1_listen(s, parg); break; + case SSL_CTRL_CHECK_PROTO_VERSION: + /* For library-internal use; checks that the current protocol + * is the highest enabled version (according to s->ctx->method, + * as version negotiation may have changed s->method). */ +#if DTLS_MAX_VERSION != DTLS1_VERSION +# error Code needs update for DTLS_method() support beyond DTLS1_VERSION. +#endif + /* Just one protocol version is supported so far; + * fail closed if the version is not as expected. */ + return s->version == DTLS_MAX_VERSION; default: ret = ssl3_ctrl(s, cmd, larg, parg); diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/d1_pkt.c nodejs-0.11.15/deps/openssl/openssl/ssl/d1_pkt.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/d1_pkt.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/d1_pkt.c 2015-01-20 21:22:17.000000000 +0000 @@ -239,14 +239,6 @@ } #endif - /* insert should not fail, since duplicates are dropped */ - if (pqueue_insert(queue->q, item) == NULL) - { - OPENSSL_free(rdata); - pitem_free(item); - return(0); - } - s->packet = NULL; s->packet_length = 0; memset(&(s->s3->rbuf), 0, sizeof(SSL3_BUFFER)); @@ -259,7 +251,16 @@ pitem_free(item); return(0); } - + + /* insert should not fail, since duplicates are dropped */ + if (pqueue_insert(queue->q, item) == NULL) + { + SSLerr(SSL_F_DTLS1_BUFFER_RECORD, ERR_R_INTERNAL_ERROR); + OPENSSL_free(rdata); + pitem_free(item); + return(0); + } + return(1); } diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/d1_srtp.c nodejs-0.11.15/deps/openssl/openssl/ssl/d1_srtp.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/d1_srtp.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/d1_srtp.c 2015-01-20 21:22:17.000000000 +0000 @@ -168,25 +168,6 @@ return 1; } -static int find_profile_by_num(unsigned profile_num, - SRTP_PROTECTION_PROFILE **pptr) - { - SRTP_PROTECTION_PROFILE *p; - - p=srtp_known_profiles; - while(p->name) - { - if(p->id == profile_num) - { - *pptr=p; - return 0; - } - p++; - } - - return 1; - } - static int ssl_ctx_make_profiles(const char *profiles_string,STACK_OF(SRTP_PROTECTION_PROFILE) **out) { STACK_OF(SRTP_PROTECTION_PROFILE) *profiles; @@ -209,11 +190,19 @@ if(!find_profile_by_name(ptr,&p, col ? col-ptr : (int)strlen(ptr))) { + if (sk_SRTP_PROTECTION_PROFILE_find(profiles,p) >= 0) + { + SSLerr(SSL_F_SSL_CTX_MAKE_PROFILES,SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST); + sk_SRTP_PROTECTION_PROFILE_free(profiles); + return 1; + } + sk_SRTP_PROTECTION_PROFILE_push(profiles,p); } else { SSLerr(SSL_F_SSL_CTX_MAKE_PROFILES,SSL_R_SRTP_UNKNOWN_PROTECTION_PROFILE); + sk_SRTP_PROTECTION_PROFILE_free(profiles); return 1; } @@ -305,13 +294,12 @@ int ssl_parse_clienthello_use_srtp_ext(SSL *s, unsigned char *d, int len,int *al) { - SRTP_PROTECTION_PROFILE *cprof,*sprof; - STACK_OF(SRTP_PROTECTION_PROFILE) *clnt=0,*srvr; + SRTP_PROTECTION_PROFILE *sprof; + STACK_OF(SRTP_PROTECTION_PROFILE) *srvr; int ct; int mki_len; - int i,j; - int id; - int ret; + int i, srtp_pref; + unsigned int id; /* Length value + the MKI length */ if(len < 3) @@ -341,22 +329,32 @@ return 1; } + srvr=SSL_get_srtp_profiles(s); + s->srtp_profile = NULL; + /* Search all profiles for a match initially */ + srtp_pref = sk_SRTP_PROTECTION_PROFILE_num(srvr); - clnt=sk_SRTP_PROTECTION_PROFILE_new_null(); - while(ct) { n2s(d,id); ct-=2; len-=2; - if(!find_profile_by_num(id,&cprof)) + /* + * Only look for match in profiles of higher preference than + * current match. + * If no profiles have been have been configured then this + * does nothing. + */ + for (i = 0; i < srtp_pref; i++) { - sk_SRTP_PROTECTION_PROFILE_push(clnt,cprof); - } - else - { - ; /* Ignore */ + sprof = sk_SRTP_PROTECTION_PROFILE_value(srvr, i); + if (sprof->id == id) + { + s->srtp_profile = sprof; + srtp_pref = i; + break; + } } } @@ -371,36 +369,7 @@ return 1; } - srvr=SSL_get_srtp_profiles(s); - - /* Pick our most preferred profile. If no profiles have been - configured then the outer loop doesn't run - (sk_SRTP_PROTECTION_PROFILE_num() = -1) - and so we just return without doing anything */ - for(i=0;i<sk_SRTP_PROTECTION_PROFILE_num(srvr);i++) - { - sprof=sk_SRTP_PROTECTION_PROFILE_value(srvr,i); - - for(j=0;j<sk_SRTP_PROTECTION_PROFILE_num(clnt);j++) - { - cprof=sk_SRTP_PROTECTION_PROFILE_value(clnt,j); - - if(cprof->id==sprof->id) - { - s->srtp_profile=sprof; - *al=0; - ret=0; - goto done; - } - } - } - - ret=0; - -done: - if(clnt) sk_SRTP_PROTECTION_PROFILE_free(clnt); - - return ret; + return 0; } int ssl_add_serverhello_use_srtp_ext(SSL *s, unsigned char *p, int *len, int maxlen) diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/d1_srvr.c nodejs-0.11.15/deps/openssl/openssl/ssl/d1_srvr.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/d1_srvr.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/d1_srvr.c 2015-01-20 21:22:17.000000000 +0000 @@ -598,10 +598,11 @@ s->state = SSL3_ST_SR_CLNT_HELLO_C; } else { - /* could be sent for a DH cert, even if we - * have not asked for it :-) */ - ret=ssl3_get_client_certificate(s); - if (ret <= 0) goto end; + if (s->s3->tmp.cert_request) + { + ret=ssl3_get_client_certificate(s); + if (ret <= 0) goto end; + } s->init_num=0; s->state=SSL3_ST_SR_KEY_EXCH_A; } @@ -980,6 +981,11 @@ #endif #ifndef OPENSSL_NO_TLSEXT + if (ssl_prepare_serverhello_tlsext(s) <= 0) + { + SSLerr(SSL_F_DTLS1_SEND_SERVER_HELLO,SSL_R_SERVERHELLO_TLSEXT); + return -1; + } if ((p = ssl_add_serverhello_tlsext(s, p, buf+SSL3_RT_MAX_PLAIN_LENGTH)) == NULL) { SSLerr(SSL_F_DTLS1_SEND_SERVER_HELLO,ERR_R_INTERNAL_ERROR); @@ -1356,6 +1362,7 @@ (unsigned char *)encodedPoint, encodedlen); OPENSSL_free(encodedPoint); + encodedPoint = NULL; p += encodedlen; } #endif diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/dtls1.h nodejs-0.11.15/deps/openssl/openssl/ssl/dtls1.h --- nodejs-0.11.13/deps/openssl/openssl/ssl/dtls1.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/dtls1.h 2015-01-20 21:22:17.000000000 +0000 @@ -84,6 +84,8 @@ #endif #define DTLS1_VERSION 0xFEFF +#define DTLS_MAX_VERSION DTLS1_VERSION + #define DTLS1_BAD_VER 0x0100 #if 0 @@ -284,4 +286,3 @@ } #endif #endif - diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/heartbeat_test.c nodejs-0.11.15/deps/openssl/openssl/ssl/heartbeat_test.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/heartbeat_test.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/heartbeat_test.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,466 @@ +/* test/heartbeat_test.c */ +/* + * Unit test for TLS heartbeats. + * + * Acts as a regression test against the Heartbleed bug (CVE-2014-0160). + * + * Author: Mike Bland (mbland@acm.org, http://mike-bland.com/) + * Date: 2014-04-12 + * License: Creative Commons Attribution 4.0 International (CC By 4.0) + * http://creativecommons.org/licenses/by/4.0/deed.en_US + * + * OUTPUT + * ------ + * The program returns zero on success. It will print a message with a count + * of the number of failed tests and return nonzero if any tests fail. + * + * It will print the contents of the request and response buffers for each + * failing test. In a "fixed" version, all the tests should pass and there + * should be no output. + * + * In a "bleeding" version, you'll see: + * + * test_dtls1_heartbleed failed: + * expected payload len: 0 + * received: 1024 + * sent 26 characters + * "HEARTBLEED " + * received 1024 characters + * "HEARTBLEED \xde\xad\xbe\xef..." + * ** test_dtls1_heartbleed failed ** + * + * The contents of the returned buffer in the failing test will depend on the + * contents of memory on your machine. + * + * MORE INFORMATION + * ---------------- + * http://mike-bland.com/2014/04/12/heartbleed.html + * http://mike-bland.com/tags/heartbleed.html + */ + +#define OPENSSL_UNIT_TEST + +#include "../test/testutil.h" + +#include "../ssl/ssl_locl.h" +#include <ctype.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#if !defined(OPENSSL_NO_HEARTBEATS) && !defined(OPENSSL_NO_UNIT_TEST) + +/* As per https://tools.ietf.org/html/rfc6520#section-4 */ +#define MIN_PADDING_SIZE 16 + +/* Maximum number of payload characters to print as test output */ +#define MAX_PRINTABLE_CHARACTERS 1024 + +typedef struct heartbeat_test_fixture + { + SSL_CTX *ctx; + SSL *s; + const char* test_case_name; + int (*process_heartbeat)(SSL* s); + unsigned char* payload; + int sent_payload_len; + int expected_return_value; + int return_payload_offset; + int expected_payload_len; + const char* expected_return_payload; + } HEARTBEAT_TEST_FIXTURE; + +static HEARTBEAT_TEST_FIXTURE set_up(const char* const test_case_name, + const SSL_METHOD* meth) + { + HEARTBEAT_TEST_FIXTURE fixture; + int setup_ok = 1; + memset(&fixture, 0, sizeof(fixture)); + fixture.test_case_name = test_case_name; + + fixture.ctx = SSL_CTX_new(meth); + if (!fixture.ctx) + { + fprintf(stderr, "Failed to allocate SSL_CTX for test: %s\n", + test_case_name); + setup_ok = 0; + goto fail; + } + + fixture.s = SSL_new(fixture.ctx); + if (!fixture.s) + { + fprintf(stderr, "Failed to allocate SSL for test: %s\n", test_case_name); + setup_ok = 0; + goto fail; + } + + if (!ssl_init_wbio_buffer(fixture.s, 1)) + { + fprintf(stderr, "Failed to set up wbio buffer for test: %s\n", + test_case_name); + setup_ok = 0; + goto fail; + } + + if (!ssl3_setup_buffers(fixture.s)) + { + fprintf(stderr, "Failed to setup buffers for test: %s\n", + test_case_name); + setup_ok = 0; + goto fail; + } + + /* Clear the memory for the return buffer, since this isn't automatically + * zeroed in opt mode and will cause spurious test failures that will change + * with each execution. + */ + memset(fixture.s->s3->wbuf.buf, 0, fixture.s->s3->wbuf.len); + + fail: + if (!setup_ok) + { + ERR_print_errors_fp(stderr); + exit(EXIT_FAILURE); + } + return fixture; + } + +static HEARTBEAT_TEST_FIXTURE set_up_dtls(const char* const test_case_name) + { + HEARTBEAT_TEST_FIXTURE fixture = set_up(test_case_name, + DTLSv1_server_method()); + fixture.process_heartbeat = dtls1_process_heartbeat; + + /* As per dtls1_get_record(), skipping the following from the beginning of + * the returned heartbeat message: + * type-1 byte; version-2 bytes; sequence number-8 bytes; length-2 bytes + * + * And then skipping the 1-byte type encoded by process_heartbeat for + * a total of 14 bytes, at which point we can grab the length and the + * payload we seek. + */ + fixture.return_payload_offset = 14; + return fixture; + } + +/* Needed by ssl3_write_bytes() */ +static int dummy_handshake(SSL* s) + { + return 1; + } + +static HEARTBEAT_TEST_FIXTURE set_up_tls(const char* const test_case_name) + { + HEARTBEAT_TEST_FIXTURE fixture = set_up(test_case_name, + TLSv1_server_method()); + fixture.process_heartbeat = tls1_process_heartbeat; + fixture.s->handshake_func = dummy_handshake; + + /* As per do_ssl3_write(), skipping the following from the beginning of + * the returned heartbeat message: + * type-1 byte; version-2 bytes; length-2 bytes + * + * And then skipping the 1-byte type encoded by process_heartbeat for + * a total of 6 bytes, at which point we can grab the length and the payload + * we seek. + */ + fixture.return_payload_offset = 6; + return fixture; + } + +static void tear_down(HEARTBEAT_TEST_FIXTURE fixture) + { + ERR_print_errors_fp(stderr); + SSL_free(fixture.s); + SSL_CTX_free(fixture.ctx); + } + +static void print_payload(const char* const prefix, + const unsigned char *payload, const int n) + { + const int end = n < MAX_PRINTABLE_CHARACTERS ? n + : MAX_PRINTABLE_CHARACTERS; + int i = 0; + + printf("%s %d character%s", prefix, n, n == 1 ? "" : "s"); + if (end != n) printf(" (first %d shown)", end); + printf("\n \""); + + for (; i != end; ++i) + { + const unsigned char c = payload[i]; + if (isprint(c)) fputc(c, stdout); + else printf("\\x%02x", c); + } + printf("\"\n"); + } + +static int execute_heartbeat(HEARTBEAT_TEST_FIXTURE fixture) + { + int result = 0; + SSL* s = fixture.s; + unsigned char *payload = fixture.payload; + unsigned char sent_buf[MAX_PRINTABLE_CHARACTERS + 1]; + int return_value; + unsigned const char *p; + int actual_payload_len; + + s->s3->rrec.data = payload; + s->s3->rrec.length = strlen((const char*)payload); + *payload++ = TLS1_HB_REQUEST; + s2n(fixture.sent_payload_len, payload); + + /* Make a local copy of the request, since it gets overwritten at some + * point */ + memcpy((char *)sent_buf, (const char*)payload, sizeof(sent_buf)); + + return_value = fixture.process_heartbeat(s); + + if (return_value != fixture.expected_return_value) + { + printf("%s failed: expected return value %d, received %d\n", + fixture.test_case_name, fixture.expected_return_value, + return_value); + result = 1; + } + + /* If there is any byte alignment, it will be stored in wbuf.offset. */ + p = &(s->s3->wbuf.buf[ + fixture.return_payload_offset + s->s3->wbuf.offset]); + actual_payload_len = 0; + n2s(p, actual_payload_len); + + if (actual_payload_len != fixture.expected_payload_len) + { + printf("%s failed:\n expected payload len: %d\n received: %d\n", + fixture.test_case_name, fixture.expected_payload_len, + actual_payload_len); + print_payload("sent", sent_buf, strlen((const char*)sent_buf)); + print_payload("received", p, actual_payload_len); + result = 1; + } + else + { + char* actual_payload = BUF_strndup((const char*)p, actual_payload_len); + if (strcmp(actual_payload, fixture.expected_return_payload) != 0) + { + printf("%s failed:\n expected payload: \"%s\"\n received: \"%s\"\n", + fixture.test_case_name, fixture.expected_return_payload, + actual_payload); + result = 1; + } + OPENSSL_free(actual_payload); + } + + if (result != 0) + { + printf("** %s failed **\n--------\n", fixture.test_case_name); + } + return result; + } + +static int honest_payload_size(unsigned char payload_buf[]) + { + /* Omit three-byte pad at the beginning for type and payload length */ + return strlen((const char*)&payload_buf[3]) - MIN_PADDING_SIZE; + } + +#define SETUP_HEARTBEAT_TEST_FIXTURE(type)\ + SETUP_TEST_FIXTURE(HEARTBEAT_TEST_FIXTURE, set_up_##type) + +#define EXECUTE_HEARTBEAT_TEST()\ + EXECUTE_TEST(execute_heartbeat, tear_down) + +static int test_dtls1_not_bleeding() + { + SETUP_HEARTBEAT_TEST_FIXTURE(dtls); + /* Three-byte pad at the beginning for type and payload length */ + unsigned char payload_buf[] = " Not bleeding, sixteen spaces of padding" + " "; + const int payload_buf_len = honest_payload_size(payload_buf); + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = payload_buf_len; + fixture.expected_return_value = 0; + fixture.expected_payload_len = payload_buf_len; + fixture.expected_return_payload = "Not bleeding, sixteen spaces of padding"; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_dtls1_not_bleeding_empty_payload() + { + int payload_buf_len; + + SETUP_HEARTBEAT_TEST_FIXTURE(dtls); + /* Three-byte pad at the beginning for type and payload length, plus a NUL + * at the end */ + unsigned char payload_buf[4 + MIN_PADDING_SIZE]; + memset(payload_buf, ' ', sizeof(payload_buf)); + payload_buf[sizeof(payload_buf) - 1] = '\0'; + payload_buf_len = honest_payload_size(payload_buf); + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = payload_buf_len; + fixture.expected_return_value = 0; + fixture.expected_payload_len = payload_buf_len; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_dtls1_heartbleed() + { + SETUP_HEARTBEAT_TEST_FIXTURE(dtls); + /* Three-byte pad at the beginning for type and payload length */ + unsigned char payload_buf[] = " HEARTBLEED "; + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = MAX_PRINTABLE_CHARACTERS; + fixture.expected_return_value = 0; + fixture.expected_payload_len = 0; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_dtls1_heartbleed_empty_payload() + { + SETUP_HEARTBEAT_TEST_FIXTURE(dtls); + /* Excluding the NUL at the end, one byte short of type + payload length + + * minimum padding */ + unsigned char payload_buf[MIN_PADDING_SIZE + 3]; + memset(payload_buf, ' ', sizeof(payload_buf)); + payload_buf[sizeof(payload_buf) - 1] = '\0'; + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = MAX_PRINTABLE_CHARACTERS; + fixture.expected_return_value = 0; + fixture.expected_payload_len = 0; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_dtls1_heartbleed_excessive_plaintext_length() + { + SETUP_HEARTBEAT_TEST_FIXTURE(dtls); + /* Excluding the NUL at the end, one byte in excess of maximum allowed + * heartbeat message length */ + unsigned char payload_buf[SSL3_RT_MAX_PLAIN_LENGTH + 2]; + memset(payload_buf, ' ', sizeof(payload_buf)); + payload_buf[sizeof(payload_buf) - 1] = '\0'; + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = honest_payload_size(payload_buf); + fixture.expected_return_value = 0; + fixture.expected_payload_len = 0; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_tls1_not_bleeding() + { + SETUP_HEARTBEAT_TEST_FIXTURE(tls); + /* Three-byte pad at the beginning for type and payload length */ + unsigned char payload_buf[] = " Not bleeding, sixteen spaces of padding" + " "; + const int payload_buf_len = honest_payload_size(payload_buf); + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = payload_buf_len; + fixture.expected_return_value = 0; + fixture.expected_payload_len = payload_buf_len; + fixture.expected_return_payload = "Not bleeding, sixteen spaces of padding"; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_tls1_not_bleeding_empty_payload() + { + int payload_buf_len; + + SETUP_HEARTBEAT_TEST_FIXTURE(tls); + /* Three-byte pad at the beginning for type and payload length, plus a NUL + * at the end */ + unsigned char payload_buf[4 + MIN_PADDING_SIZE]; + memset(payload_buf, ' ', sizeof(payload_buf)); + payload_buf[sizeof(payload_buf) - 1] = '\0'; + payload_buf_len = honest_payload_size(payload_buf); + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = payload_buf_len; + fixture.expected_return_value = 0; + fixture.expected_payload_len = payload_buf_len; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_tls1_heartbleed() + { + SETUP_HEARTBEAT_TEST_FIXTURE(tls); + /* Three-byte pad at the beginning for type and payload length */ + unsigned char payload_buf[] = " HEARTBLEED "; + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = MAX_PRINTABLE_CHARACTERS; + fixture.expected_return_value = 0; + fixture.expected_payload_len = 0; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_tls1_heartbleed_empty_payload() + { + SETUP_HEARTBEAT_TEST_FIXTURE(tls); + /* Excluding the NUL at the end, one byte short of type + payload length + + * minimum padding */ + unsigned char payload_buf[MIN_PADDING_SIZE + 3]; + memset(payload_buf, ' ', sizeof(payload_buf)); + payload_buf[sizeof(payload_buf) - 1] = '\0'; + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = MAX_PRINTABLE_CHARACTERS; + fixture.expected_return_value = 0; + fixture.expected_payload_len = 0; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +#undef EXECUTE_HEARTBEAT_TEST +#undef SETUP_HEARTBEAT_TEST_FIXTURE + +int main(int argc, char *argv[]) + { + int num_failed; + + SSL_library_init(); + SSL_load_error_strings(); + + num_failed = test_dtls1_not_bleeding() + + test_dtls1_not_bleeding_empty_payload() + + test_dtls1_heartbleed() + + test_dtls1_heartbleed_empty_payload() + + /* The following test causes an assertion failure at + * ssl/d1_pkt.c:dtls1_write_bytes() in versions prior to 1.0.1g: */ + (OPENSSL_VERSION_NUMBER >= 0x1000107fL ? + test_dtls1_heartbleed_excessive_plaintext_length() : 0) + + test_tls1_not_bleeding() + + test_tls1_not_bleeding_empty_payload() + + test_tls1_heartbleed() + + test_tls1_heartbleed_empty_payload() + + 0; + + ERR_print_errors_fp(stderr); + + if (num_failed != 0) + { + printf("%d test%s failed\n", num_failed, num_failed != 1 ? "s" : ""); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; + } + +#else /* OPENSSL_NO_HEARTBEATS*/ + +int main(int argc, char *argv[]) + { + return EXIT_SUCCESS; + } +#endif /* OPENSSL_NO_HEARTBEATS */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/Makefile nodejs-0.11.15/deps/openssl/openssl/ssl/Makefile --- nodejs-0.11.13/deps/openssl/openssl/ssl/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -15,7 +15,7 @@ CFLAGS= $(INCLUDES) $(CFLAG) GENERAL=Makefile README ssl-lib.com install.com -TEST=ssltest.c +TEST=ssltest.c heartbeat_test.c APPS= LIB=$(TOP)/libssl.a @@ -30,7 +30,7 @@ ssl_lib.c ssl_err2.c ssl_cert.c ssl_sess.c \ ssl_ciph.c ssl_stat.c ssl_rsa.c \ ssl_asn1.c ssl_txt.c ssl_algs.c \ - bio_ssl.c ssl_err.c kssl.c tls_srp.c t1_reneg.c + bio_ssl.c ssl_err.c kssl.c tls_srp.c t1_reneg.c ssl_utst.c LIBOBJ= \ s2_meth.o s2_srvr.o s2_clnt.o s2_lib.o s2_enc.o s2_pkt.o \ s3_meth.o s3_srvr.o s3_clnt.o s3_lib.o s3_enc.o s3_pkt.o s3_both.o s3_cbc.o \ @@ -41,7 +41,7 @@ ssl_lib.o ssl_err2.o ssl_cert.o ssl_sess.o \ ssl_ciph.o ssl_stat.o ssl_rsa.o \ ssl_asn1.o ssl_txt.o ssl_algs.o \ - bio_ssl.o ssl_err.o kssl.o tls_srp.o t1_reneg.o + bio_ssl.o ssl_err.o kssl.o tls_srp.o t1_reneg.o ssl_utst.o SRC= $(LIBSRC) @@ -547,26 +547,27 @@ s3_both.o: ../include/openssl/stack.h ../include/openssl/symhacks.h s3_both.o: ../include/openssl/tls1.h ../include/openssl/x509.h s3_both.o: ../include/openssl/x509_vfy.h s3_both.c ssl_locl.h -s3_cbc.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s3_cbc.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s3_cbc.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s3_cbc.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s3_cbc.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s3_cbc.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s3_cbc.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s3_cbc.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s3_cbc.o: ../include/openssl/md5.h ../include/openssl/obj_mac.h -s3_cbc.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -s3_cbc.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -s3_cbc.o: ../include/openssl/pem.h ../include/openssl/pem2.h -s3_cbc.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -s3_cbc.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s3_cbc.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s3_cbc.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s3_cbc.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s3_cbc.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s3_cbc.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s3_cbc.o: ../include/openssl/x509_vfy.h s3_cbc.c ssl_locl.h +s3_cbc.o: ../crypto/constant_time_locl.h ../e_os.h ../include/openssl/asn1.h +s3_cbc.o: ../include/openssl/bio.h ../include/openssl/buffer.h +s3_cbc.o: ../include/openssl/comp.h ../include/openssl/crypto.h +s3_cbc.o: ../include/openssl/dsa.h ../include/openssl/dtls1.h +s3_cbc.o: ../include/openssl/e_os2.h ../include/openssl/ec.h +s3_cbc.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h +s3_cbc.o: ../include/openssl/err.h ../include/openssl/evp.h +s3_cbc.o: ../include/openssl/hmac.h ../include/openssl/kssl.h +s3_cbc.o: ../include/openssl/lhash.h ../include/openssl/md5.h +s3_cbc.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h +s3_cbc.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h +s3_cbc.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h +s3_cbc.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h +s3_cbc.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h +s3_cbc.o: ../include/openssl/safestack.h ../include/openssl/sha.h +s3_cbc.o: ../include/openssl/srtp.h ../include/openssl/ssl.h +s3_cbc.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h +s3_cbc.o: ../include/openssl/ssl3.h ../include/openssl/stack.h +s3_cbc.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h +s3_cbc.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h s3_cbc.c +s3_cbc.o: ssl_locl.h s3_clnt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h s3_clnt.o: ../include/openssl/bn.h ../include/openssl/buffer.h s3_clnt.o: ../include/openssl/comp.h ../include/openssl/crypto.h @@ -671,28 +672,29 @@ s3_pkt.o: ../include/openssl/stack.h ../include/openssl/symhacks.h s3_pkt.o: ../include/openssl/tls1.h ../include/openssl/x509.h s3_pkt.o: ../include/openssl/x509_vfy.h s3_pkt.c ssl_locl.h -s3_srvr.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s3_srvr.o: ../include/openssl/bn.h ../include/openssl/buffer.h -s3_srvr.o: ../include/openssl/comp.h ../include/openssl/crypto.h -s3_srvr.o: ../include/openssl/dh.h ../include/openssl/dsa.h -s3_srvr.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s3_srvr.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s3_srvr.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s3_srvr.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s3_srvr.o: ../include/openssl/krb5_asn.h ../include/openssl/kssl.h -s3_srvr.o: ../include/openssl/lhash.h ../include/openssl/md5.h -s3_srvr.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s3_srvr.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s3_srvr.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s3_srvr.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s3_srvr.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -s3_srvr.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s3_srvr.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s3_srvr.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s3_srvr.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s3_srvr.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s3_srvr.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s3_srvr.o: ../include/openssl/x509_vfy.h kssl_lcl.h s3_srvr.c ssl_locl.h +s3_srvr.o: ../crypto/constant_time_locl.h ../e_os.h ../include/openssl/asn1.h +s3_srvr.o: ../include/openssl/bio.h ../include/openssl/bn.h +s3_srvr.o: ../include/openssl/buffer.h ../include/openssl/comp.h +s3_srvr.o: ../include/openssl/crypto.h ../include/openssl/dh.h +s3_srvr.o: ../include/openssl/dsa.h ../include/openssl/dtls1.h +s3_srvr.o: ../include/openssl/e_os2.h ../include/openssl/ec.h +s3_srvr.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h +s3_srvr.o: ../include/openssl/err.h ../include/openssl/evp.h +s3_srvr.o: ../include/openssl/hmac.h ../include/openssl/krb5_asn.h +s3_srvr.o: ../include/openssl/kssl.h ../include/openssl/lhash.h +s3_srvr.o: ../include/openssl/md5.h ../include/openssl/obj_mac.h +s3_srvr.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h +s3_srvr.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h +s3_srvr.o: ../include/openssl/pem.h ../include/openssl/pem2.h +s3_srvr.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h +s3_srvr.o: ../include/openssl/rand.h ../include/openssl/rsa.h +s3_srvr.o: ../include/openssl/safestack.h ../include/openssl/sha.h +s3_srvr.o: ../include/openssl/srtp.h ../include/openssl/ssl.h +s3_srvr.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h +s3_srvr.o: ../include/openssl/ssl3.h ../include/openssl/stack.h +s3_srvr.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h +s3_srvr.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h kssl_lcl.h +s3_srvr.o: s3_srvr.c ssl_locl.h ssl_algs.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h ssl_algs.o: ../include/openssl/buffer.h ../include/openssl/comp.h ssl_algs.o: ../include/openssl/crypto.h ../include/openssl/dsa.h @@ -915,6 +917,26 @@ ssl_txt.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h ssl_txt.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_locl.h ssl_txt.o: ssl_txt.c +ssl_utst.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h +ssl_utst.o: ../include/openssl/buffer.h ../include/openssl/comp.h +ssl_utst.o: ../include/openssl/crypto.h ../include/openssl/dsa.h +ssl_utst.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h +ssl_utst.o: ../include/openssl/ec.h ../include/openssl/ecdh.h +ssl_utst.o: ../include/openssl/ecdsa.h ../include/openssl/err.h +ssl_utst.o: ../include/openssl/evp.h ../include/openssl/hmac.h +ssl_utst.o: ../include/openssl/kssl.h ../include/openssl/lhash.h +ssl_utst.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h +ssl_utst.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h +ssl_utst.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h +ssl_utst.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h +ssl_utst.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h +ssl_utst.o: ../include/openssl/safestack.h ../include/openssl/sha.h +ssl_utst.o: ../include/openssl/srtp.h ../include/openssl/ssl.h +ssl_utst.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h +ssl_utst.o: ../include/openssl/ssl3.h ../include/openssl/stack.h +ssl_utst.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h +ssl_utst.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_locl.h +ssl_utst.o: ssl_utst.c t1_clnt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h t1_clnt.o: ../include/openssl/buffer.h ../include/openssl/comp.h t1_clnt.o: ../include/openssl/crypto.h ../include/openssl/dsa.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/Makefile.save nodejs-0.11.15/deps/openssl/openssl/ssl/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/ssl/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,1061 +0,0 @@ -# -# OpenSSL/ssl/Makefile -# - -DIR= ssl -TOP= .. -CC= cc -INCLUDES= -I../crypto -I$(TOP) -I../include $(KRB5_INCLUDES) -CFLAG=-g -MAKEFILE= Makefile -AR= ar r -# KRB5 stuff -KRB5_INCLUDES= - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile README ssl-lib.com install.com -TEST=ssltest.c -APPS= - -LIB=$(TOP)/libssl.a -SHARED_LIB= libssl$(SHLIB_EXT) -LIBSRC= \ - s2_meth.c s2_srvr.c s2_clnt.c s2_lib.c s2_enc.c s2_pkt.c \ - s3_meth.c s3_srvr.c s3_clnt.c s3_lib.c s3_enc.c s3_pkt.c s3_both.c s3_cbc.c \ - s23_meth.c s23_srvr.c s23_clnt.c s23_lib.c s23_pkt.c \ - t1_meth.c t1_srvr.c t1_clnt.c t1_lib.c t1_enc.c \ - d1_meth.c d1_srvr.c d1_clnt.c d1_lib.c d1_pkt.c \ - d1_both.c d1_enc.c d1_srtp.c \ - ssl_lib.c ssl_err2.c ssl_cert.c ssl_sess.c \ - ssl_ciph.c ssl_stat.c ssl_rsa.c \ - ssl_asn1.c ssl_txt.c ssl_algs.c \ - bio_ssl.c ssl_err.c kssl.c tls_srp.c t1_reneg.c -LIBOBJ= \ - s2_meth.o s2_srvr.o s2_clnt.o s2_lib.o s2_enc.o s2_pkt.o \ - s3_meth.o s3_srvr.o s3_clnt.o s3_lib.o s3_enc.o s3_pkt.o s3_both.o s3_cbc.o \ - s23_meth.o s23_srvr.o s23_clnt.o s23_lib.o s23_pkt.o \ - t1_meth.o t1_srvr.o t1_clnt.o t1_lib.o t1_enc.o \ - d1_meth.o d1_srvr.o d1_clnt.o d1_lib.o d1_pkt.o \ - d1_both.o d1_enc.o d1_srtp.o\ - ssl_lib.o ssl_err2.o ssl_cert.o ssl_sess.o \ - ssl_ciph.o ssl_stat.o ssl_rsa.o \ - ssl_asn1.o ssl_txt.o ssl_algs.o \ - bio_ssl.o ssl_err.o kssl.o tls_srp.o t1_reneg.o - -SRC= $(LIBSRC) - -EXHEADER= ssl.h ssl2.h ssl3.h ssl23.h tls1.h dtls1.h kssl.h srtp.h -HEADER= $(EXHEADER) ssl_locl.h kssl_lcl.h - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ..; $(MAKE) DIRS=$(DIR) all) - -all: shared - -lib: $(LIBOBJ) - $(AR) $(LIB) $(LIBOBJ) - $(RANLIB) $(LIB) || echo Never mind. - @touch lib - -shared: lib - if [ -n "$(SHARED_LIBS)" ]; then \ - (cd ..; $(MAKE) $(SHARED_LIB)); \ - fi - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - @$(PERL) $(TOP)/util/mklink.pl ../include/openssl $(EXHEADER) - @$(PERL) $(TOP)/util/mklink.pl ../test $(TEST) - @$(PERL) $(TOP)/util/mklink.pl ../apps $(APPS) - -install: - @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile... - @headerlist="$(EXHEADER)"; for i in $$headerlist ; \ - do \ - (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \ - chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \ - done; - -tags: - ctags $(SRC) - -tests: - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @if [ -z "$(THIS)" ]; then \ - $(MAKE) -f $(TOP)/Makefile reflect THIS=$@; \ - else \ - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC); \ - fi - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - -clean: - rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -bio_ssl.o: ../include/openssl/asn1.h ../include/openssl/bio.h -bio_ssl.o: ../include/openssl/buffer.h ../include/openssl/comp.h -bio_ssl.o: ../include/openssl/crypto.h ../include/openssl/dtls1.h -bio_ssl.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -bio_ssl.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -bio_ssl.o: ../include/openssl/err.h ../include/openssl/evp.h -bio_ssl.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -bio_ssl.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -bio_ssl.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -bio_ssl.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -bio_ssl.o: ../include/openssl/pem.h ../include/openssl/pem2.h -bio_ssl.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -bio_ssl.o: ../include/openssl/safestack.h ../include/openssl/sha.h -bio_ssl.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -bio_ssl.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -bio_ssl.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -bio_ssl.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -bio_ssl.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h bio_ssl.c -d1_both.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -d1_both.o: ../include/openssl/buffer.h ../include/openssl/comp.h -d1_both.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -d1_both.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -d1_both.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -d1_both.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -d1_both.o: ../include/openssl/evp.h ../include/openssl/hmac.h -d1_both.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -d1_both.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -d1_both.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -d1_both.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -d1_both.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -d1_both.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -d1_both.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -d1_both.o: ../include/openssl/sha.h ../include/openssl/srtp.h -d1_both.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -d1_both.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -d1_both.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -d1_both.o: ../include/openssl/tls1.h ../include/openssl/x509.h -d1_both.o: ../include/openssl/x509_vfy.h d1_both.c ssl_locl.h -d1_clnt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -d1_clnt.o: ../include/openssl/bn.h ../include/openssl/buffer.h -d1_clnt.o: ../include/openssl/comp.h ../include/openssl/crypto.h -d1_clnt.o: ../include/openssl/dh.h ../include/openssl/dsa.h -d1_clnt.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -d1_clnt.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -d1_clnt.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -d1_clnt.o: ../include/openssl/evp.h ../include/openssl/hmac.h -d1_clnt.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -d1_clnt.o: ../include/openssl/md5.h ../include/openssl/obj_mac.h -d1_clnt.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -d1_clnt.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -d1_clnt.o: ../include/openssl/pem.h ../include/openssl/pem2.h -d1_clnt.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -d1_clnt.o: ../include/openssl/rand.h ../include/openssl/rsa.h -d1_clnt.o: ../include/openssl/safestack.h ../include/openssl/sha.h -d1_clnt.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -d1_clnt.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -d1_clnt.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -d1_clnt.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -d1_clnt.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h d1_clnt.c -d1_clnt.o: kssl_lcl.h ssl_locl.h -d1_enc.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -d1_enc.o: ../include/openssl/buffer.h ../include/openssl/comp.h -d1_enc.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -d1_enc.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -d1_enc.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -d1_enc.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -d1_enc.o: ../include/openssl/evp.h ../include/openssl/hmac.h -d1_enc.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -d1_enc.o: ../include/openssl/md5.h ../include/openssl/obj_mac.h -d1_enc.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -d1_enc.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -d1_enc.o: ../include/openssl/pem.h ../include/openssl/pem2.h -d1_enc.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -d1_enc.o: ../include/openssl/rand.h ../include/openssl/rsa.h -d1_enc.o: ../include/openssl/safestack.h ../include/openssl/sha.h -d1_enc.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -d1_enc.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -d1_enc.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -d1_enc.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -d1_enc.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h d1_enc.c -d1_enc.o: ssl_locl.h -d1_lib.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -d1_lib.o: ../include/openssl/buffer.h ../include/openssl/comp.h -d1_lib.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -d1_lib.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -d1_lib.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -d1_lib.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -d1_lib.o: ../include/openssl/evp.h ../include/openssl/hmac.h -d1_lib.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -d1_lib.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -d1_lib.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -d1_lib.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -d1_lib.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -d1_lib.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -d1_lib.o: ../include/openssl/safestack.h ../include/openssl/sha.h -d1_lib.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -d1_lib.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -d1_lib.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -d1_lib.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -d1_lib.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h d1_lib.c -d1_lib.o: ssl_locl.h -d1_meth.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -d1_meth.o: ../include/openssl/buffer.h ../include/openssl/comp.h -d1_meth.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -d1_meth.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -d1_meth.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -d1_meth.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -d1_meth.o: ../include/openssl/evp.h ../include/openssl/hmac.h -d1_meth.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -d1_meth.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -d1_meth.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -d1_meth.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -d1_meth.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -d1_meth.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -d1_meth.o: ../include/openssl/safestack.h ../include/openssl/sha.h -d1_meth.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -d1_meth.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -d1_meth.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -d1_meth.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -d1_meth.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h d1_meth.c -d1_meth.o: ssl_locl.h -d1_pkt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -d1_pkt.o: ../include/openssl/buffer.h ../include/openssl/comp.h -d1_pkt.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -d1_pkt.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -d1_pkt.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -d1_pkt.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -d1_pkt.o: ../include/openssl/evp.h ../include/openssl/hmac.h -d1_pkt.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -d1_pkt.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -d1_pkt.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -d1_pkt.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -d1_pkt.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -d1_pkt.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -d1_pkt.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -d1_pkt.o: ../include/openssl/sha.h ../include/openssl/srtp.h -d1_pkt.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -d1_pkt.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -d1_pkt.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -d1_pkt.o: ../include/openssl/tls1.h ../include/openssl/x509.h -d1_pkt.o: ../include/openssl/x509_vfy.h d1_pkt.c ssl_locl.h -d1_srtp.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -d1_srtp.o: ../include/openssl/buffer.h ../include/openssl/comp.h -d1_srtp.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -d1_srtp.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -d1_srtp.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -d1_srtp.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -d1_srtp.o: ../include/openssl/evp.h ../include/openssl/hmac.h -d1_srtp.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -d1_srtp.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -d1_srtp.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -d1_srtp.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -d1_srtp.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -d1_srtp.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -d1_srtp.o: ../include/openssl/safestack.h ../include/openssl/sha.h -d1_srtp.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -d1_srtp.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -d1_srtp.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -d1_srtp.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -d1_srtp.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h d1_srtp.c -d1_srtp.o: srtp.h ssl_locl.h -d1_srvr.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -d1_srvr.o: ../include/openssl/bn.h ../include/openssl/buffer.h -d1_srvr.o: ../include/openssl/comp.h ../include/openssl/crypto.h -d1_srvr.o: ../include/openssl/dh.h ../include/openssl/dsa.h -d1_srvr.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -d1_srvr.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -d1_srvr.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -d1_srvr.o: ../include/openssl/evp.h ../include/openssl/hmac.h -d1_srvr.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -d1_srvr.o: ../include/openssl/md5.h ../include/openssl/obj_mac.h -d1_srvr.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -d1_srvr.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -d1_srvr.o: ../include/openssl/pem.h ../include/openssl/pem2.h -d1_srvr.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -d1_srvr.o: ../include/openssl/rand.h ../include/openssl/rsa.h -d1_srvr.o: ../include/openssl/safestack.h ../include/openssl/sha.h -d1_srvr.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -d1_srvr.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -d1_srvr.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -d1_srvr.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -d1_srvr.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h d1_srvr.c -d1_srvr.o: ssl_locl.h -kssl.o: ../include/openssl/asn1.h ../include/openssl/bio.h -kssl.o: ../include/openssl/buffer.h ../include/openssl/comp.h -kssl.o: ../include/openssl/crypto.h ../include/openssl/dtls1.h -kssl.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -kssl.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -kssl.o: ../include/openssl/evp.h ../include/openssl/hmac.h -kssl.o: ../include/openssl/krb5_asn.h ../include/openssl/kssl.h -kssl.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -kssl.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -kssl.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -kssl.o: ../include/openssl/pem.h ../include/openssl/pem2.h -kssl.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -kssl.o: ../include/openssl/safestack.h ../include/openssl/sha.h -kssl.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -kssl.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -kssl.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -kssl.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -kssl.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h kssl.c -kssl.o: kssl_lcl.h -s23_clnt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s23_clnt.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s23_clnt.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s23_clnt.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s23_clnt.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s23_clnt.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s23_clnt.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s23_clnt.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s23_clnt.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s23_clnt.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s23_clnt.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s23_clnt.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s23_clnt.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -s23_clnt.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s23_clnt.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s23_clnt.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s23_clnt.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s23_clnt.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s23_clnt.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s23_clnt.o: ../include/openssl/x509_vfy.h s23_clnt.c ssl_locl.h -s23_lib.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s23_lib.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s23_lib.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s23_lib.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s23_lib.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s23_lib.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s23_lib.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s23_lib.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s23_lib.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s23_lib.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s23_lib.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s23_lib.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s23_lib.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -s23_lib.o: ../include/openssl/safestack.h ../include/openssl/sha.h -s23_lib.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -s23_lib.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -s23_lib.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -s23_lib.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -s23_lib.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h s23_lib.c -s23_lib.o: ssl_locl.h -s23_meth.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s23_meth.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s23_meth.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s23_meth.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s23_meth.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s23_meth.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s23_meth.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s23_meth.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s23_meth.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s23_meth.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s23_meth.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s23_meth.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s23_meth.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -s23_meth.o: ../include/openssl/safestack.h ../include/openssl/sha.h -s23_meth.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -s23_meth.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -s23_meth.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -s23_meth.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -s23_meth.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h s23_meth.c -s23_meth.o: ssl_locl.h -s23_pkt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s23_pkt.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s23_pkt.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s23_pkt.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s23_pkt.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s23_pkt.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s23_pkt.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s23_pkt.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s23_pkt.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s23_pkt.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s23_pkt.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s23_pkt.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s23_pkt.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -s23_pkt.o: ../include/openssl/safestack.h ../include/openssl/sha.h -s23_pkt.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -s23_pkt.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -s23_pkt.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -s23_pkt.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -s23_pkt.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h s23_pkt.c -s23_pkt.o: ssl_locl.h -s23_srvr.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s23_srvr.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s23_srvr.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s23_srvr.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s23_srvr.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s23_srvr.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s23_srvr.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s23_srvr.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s23_srvr.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s23_srvr.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s23_srvr.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s23_srvr.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s23_srvr.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -s23_srvr.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s23_srvr.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s23_srvr.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s23_srvr.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s23_srvr.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s23_srvr.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s23_srvr.o: ../include/openssl/x509_vfy.h s23_srvr.c ssl_locl.h -s2_clnt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s2_clnt.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s2_clnt.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s2_clnt.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s2_clnt.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s2_clnt.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s2_clnt.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s2_clnt.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s2_clnt.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s2_clnt.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s2_clnt.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s2_clnt.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s2_clnt.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -s2_clnt.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s2_clnt.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s2_clnt.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s2_clnt.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s2_clnt.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s2_clnt.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s2_clnt.o: ../include/openssl/x509_vfy.h s2_clnt.c ssl_locl.h -s2_enc.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s2_enc.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s2_enc.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s2_enc.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s2_enc.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s2_enc.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s2_enc.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s2_enc.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s2_enc.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s2_enc.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s2_enc.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s2_enc.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s2_enc.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -s2_enc.o: ../include/openssl/safestack.h ../include/openssl/sha.h -s2_enc.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -s2_enc.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -s2_enc.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -s2_enc.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -s2_enc.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h s2_enc.c -s2_enc.o: ssl_locl.h -s2_lib.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s2_lib.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s2_lib.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s2_lib.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s2_lib.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s2_lib.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s2_lib.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s2_lib.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s2_lib.o: ../include/openssl/md5.h ../include/openssl/obj_mac.h -s2_lib.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -s2_lib.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -s2_lib.o: ../include/openssl/pem.h ../include/openssl/pem2.h -s2_lib.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -s2_lib.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s2_lib.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s2_lib.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s2_lib.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s2_lib.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s2_lib.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s2_lib.o: ../include/openssl/x509_vfy.h s2_lib.c ssl_locl.h -s2_meth.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s2_meth.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s2_meth.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s2_meth.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s2_meth.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s2_meth.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s2_meth.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s2_meth.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s2_meth.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s2_meth.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s2_meth.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s2_meth.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s2_meth.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -s2_meth.o: ../include/openssl/safestack.h ../include/openssl/sha.h -s2_meth.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -s2_meth.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -s2_meth.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -s2_meth.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -s2_meth.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h s2_meth.c -s2_meth.o: ssl_locl.h -s2_pkt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s2_pkt.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s2_pkt.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s2_pkt.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s2_pkt.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s2_pkt.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s2_pkt.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s2_pkt.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s2_pkt.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s2_pkt.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s2_pkt.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s2_pkt.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s2_pkt.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -s2_pkt.o: ../include/openssl/safestack.h ../include/openssl/sha.h -s2_pkt.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -s2_pkt.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -s2_pkt.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -s2_pkt.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -s2_pkt.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h s2_pkt.c -s2_pkt.o: ssl_locl.h -s2_srvr.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s2_srvr.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s2_srvr.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s2_srvr.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s2_srvr.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s2_srvr.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s2_srvr.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s2_srvr.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s2_srvr.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s2_srvr.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s2_srvr.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s2_srvr.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s2_srvr.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -s2_srvr.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s2_srvr.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s2_srvr.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s2_srvr.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s2_srvr.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s2_srvr.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s2_srvr.o: ../include/openssl/x509_vfy.h s2_srvr.c ssl_locl.h -s3_both.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s3_both.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s3_both.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s3_both.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s3_both.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s3_both.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s3_both.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s3_both.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s3_both.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s3_both.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s3_both.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s3_both.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s3_both.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -s3_both.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s3_both.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s3_both.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s3_both.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s3_both.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s3_both.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s3_both.o: ../include/openssl/x509_vfy.h s3_both.c ssl_locl.h -s3_cbc.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s3_cbc.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s3_cbc.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s3_cbc.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s3_cbc.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s3_cbc.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s3_cbc.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s3_cbc.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s3_cbc.o: ../include/openssl/md5.h ../include/openssl/obj_mac.h -s3_cbc.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -s3_cbc.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -s3_cbc.o: ../include/openssl/pem.h ../include/openssl/pem2.h -s3_cbc.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -s3_cbc.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s3_cbc.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s3_cbc.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s3_cbc.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s3_cbc.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s3_cbc.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s3_cbc.o: ../include/openssl/x509_vfy.h s3_cbc.c ssl_locl.h -s3_clnt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s3_clnt.o: ../include/openssl/bn.h ../include/openssl/buffer.h -s3_clnt.o: ../include/openssl/comp.h ../include/openssl/crypto.h -s3_clnt.o: ../include/openssl/dh.h ../include/openssl/dsa.h -s3_clnt.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s3_clnt.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s3_clnt.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -s3_clnt.o: ../include/openssl/err.h ../include/openssl/evp.h -s3_clnt.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -s3_clnt.o: ../include/openssl/lhash.h ../include/openssl/md5.h -s3_clnt.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s3_clnt.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s3_clnt.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s3_clnt.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s3_clnt.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -s3_clnt.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s3_clnt.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s3_clnt.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s3_clnt.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s3_clnt.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s3_clnt.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s3_clnt.o: ../include/openssl/x509_vfy.h kssl_lcl.h s3_clnt.c ssl_locl.h -s3_enc.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s3_enc.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s3_enc.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s3_enc.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s3_enc.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s3_enc.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s3_enc.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s3_enc.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s3_enc.o: ../include/openssl/md5.h ../include/openssl/obj_mac.h -s3_enc.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -s3_enc.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -s3_enc.o: ../include/openssl/pem.h ../include/openssl/pem2.h -s3_enc.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -s3_enc.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s3_enc.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s3_enc.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s3_enc.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s3_enc.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s3_enc.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s3_enc.o: ../include/openssl/x509_vfy.h s3_enc.c ssl_locl.h -s3_lib.o: ../crypto/ec/ec_lcl.h ../e_os.h ../include/openssl/asn1.h -s3_lib.o: ../include/openssl/bio.h ../include/openssl/bn.h -s3_lib.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s3_lib.o: ../include/openssl/crypto.h ../include/openssl/dh.h -s3_lib.o: ../include/openssl/dsa.h ../include/openssl/dtls1.h -s3_lib.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -s3_lib.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -s3_lib.o: ../include/openssl/err.h ../include/openssl/evp.h -s3_lib.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -s3_lib.o: ../include/openssl/lhash.h ../include/openssl/md5.h -s3_lib.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s3_lib.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s3_lib.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s3_lib.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s3_lib.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -s3_lib.o: ../include/openssl/safestack.h ../include/openssl/sha.h -s3_lib.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -s3_lib.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -s3_lib.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -s3_lib.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -s3_lib.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h kssl_lcl.h -s3_lib.o: s3_lib.c ssl_locl.h -s3_meth.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s3_meth.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s3_meth.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s3_meth.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s3_meth.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s3_meth.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s3_meth.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s3_meth.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s3_meth.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s3_meth.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s3_meth.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s3_meth.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s3_meth.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -s3_meth.o: ../include/openssl/safestack.h ../include/openssl/sha.h -s3_meth.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -s3_meth.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -s3_meth.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -s3_meth.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -s3_meth.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h s3_meth.c -s3_meth.o: ssl_locl.h -s3_pkt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s3_pkt.o: ../include/openssl/buffer.h ../include/openssl/comp.h -s3_pkt.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -s3_pkt.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s3_pkt.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s3_pkt.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s3_pkt.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s3_pkt.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -s3_pkt.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s3_pkt.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s3_pkt.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s3_pkt.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s3_pkt.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -s3_pkt.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s3_pkt.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s3_pkt.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s3_pkt.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s3_pkt.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s3_pkt.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s3_pkt.o: ../include/openssl/x509_vfy.h s3_pkt.c ssl_locl.h -s3_srvr.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -s3_srvr.o: ../include/openssl/bn.h ../include/openssl/buffer.h -s3_srvr.o: ../include/openssl/comp.h ../include/openssl/crypto.h -s3_srvr.o: ../include/openssl/dh.h ../include/openssl/dsa.h -s3_srvr.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -s3_srvr.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -s3_srvr.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -s3_srvr.o: ../include/openssl/evp.h ../include/openssl/hmac.h -s3_srvr.o: ../include/openssl/krb5_asn.h ../include/openssl/kssl.h -s3_srvr.o: ../include/openssl/lhash.h ../include/openssl/md5.h -s3_srvr.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -s3_srvr.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -s3_srvr.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -s3_srvr.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -s3_srvr.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -s3_srvr.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -s3_srvr.o: ../include/openssl/sha.h ../include/openssl/srtp.h -s3_srvr.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -s3_srvr.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -s3_srvr.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -s3_srvr.o: ../include/openssl/tls1.h ../include/openssl/x509.h -s3_srvr.o: ../include/openssl/x509_vfy.h kssl_lcl.h s3_srvr.c ssl_locl.h -ssl_algs.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ssl_algs.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ssl_algs.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -ssl_algs.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -ssl_algs.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ssl_algs.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -ssl_algs.o: ../include/openssl/evp.h ../include/openssl/hmac.h -ssl_algs.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -ssl_algs.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -ssl_algs.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -ssl_algs.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -ssl_algs.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -ssl_algs.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -ssl_algs.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ssl_algs.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -ssl_algs.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -ssl_algs.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -ssl_algs.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -ssl_algs.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_algs.c -ssl_algs.o: ssl_locl.h -ssl_asn1.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/asn1_mac.h -ssl_asn1.o: ../include/openssl/bio.h ../include/openssl/buffer.h -ssl_asn1.o: ../include/openssl/comp.h ../include/openssl/crypto.h -ssl_asn1.o: ../include/openssl/dsa.h ../include/openssl/dtls1.h -ssl_asn1.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -ssl_asn1.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -ssl_asn1.o: ../include/openssl/err.h ../include/openssl/evp.h -ssl_asn1.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -ssl_asn1.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ssl_asn1.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -ssl_asn1.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ssl_asn1.o: ../include/openssl/pem.h ../include/openssl/pem2.h -ssl_asn1.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -ssl_asn1.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -ssl_asn1.o: ../include/openssl/sha.h ../include/openssl/srtp.h -ssl_asn1.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -ssl_asn1.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -ssl_asn1.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -ssl_asn1.o: ../include/openssl/tls1.h ../include/openssl/x509.h -ssl_asn1.o: ../include/openssl/x509_vfy.h ssl_asn1.c ssl_locl.h -ssl_cert.o: ../crypto/o_dir.h ../e_os.h ../include/openssl/asn1.h -ssl_cert.o: ../include/openssl/bio.h ../include/openssl/bn.h -ssl_cert.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ssl_cert.o: ../include/openssl/conf.h ../include/openssl/crypto.h -ssl_cert.o: ../include/openssl/dh.h ../include/openssl/dsa.h -ssl_cert.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -ssl_cert.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ssl_cert.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -ssl_cert.o: ../include/openssl/evp.h ../include/openssl/hmac.h -ssl_cert.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -ssl_cert.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -ssl_cert.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -ssl_cert.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -ssl_cert.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -ssl_cert.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -ssl_cert.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ssl_cert.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -ssl_cert.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -ssl_cert.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -ssl_cert.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -ssl_cert.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -ssl_cert.o: ../include/openssl/x509v3.h ssl_cert.c ssl_locl.h -ssl_ciph.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ssl_ciph.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ssl_ciph.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -ssl_ciph.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -ssl_ciph.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ssl_ciph.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -ssl_ciph.o: ../include/openssl/err.h ../include/openssl/evp.h -ssl_ciph.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -ssl_ciph.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ssl_ciph.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -ssl_ciph.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ssl_ciph.o: ../include/openssl/pem.h ../include/openssl/pem2.h -ssl_ciph.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -ssl_ciph.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -ssl_ciph.o: ../include/openssl/sha.h ../include/openssl/srtp.h -ssl_ciph.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -ssl_ciph.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -ssl_ciph.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -ssl_ciph.o: ../include/openssl/tls1.h ../include/openssl/x509.h -ssl_ciph.o: ../include/openssl/x509_vfy.h ssl_ciph.c ssl_locl.h -ssl_err.o: ../include/openssl/asn1.h ../include/openssl/bio.h -ssl_err.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ssl_err.o: ../include/openssl/crypto.h ../include/openssl/dtls1.h -ssl_err.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -ssl_err.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -ssl_err.o: ../include/openssl/err.h ../include/openssl/evp.h -ssl_err.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -ssl_err.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ssl_err.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -ssl_err.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ssl_err.o: ../include/openssl/pem.h ../include/openssl/pem2.h -ssl_err.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -ssl_err.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ssl_err.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -ssl_err.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -ssl_err.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -ssl_err.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -ssl_err.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_err.c -ssl_err2.o: ../include/openssl/asn1.h ../include/openssl/bio.h -ssl_err2.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ssl_err2.o: ../include/openssl/crypto.h ../include/openssl/dtls1.h -ssl_err2.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -ssl_err2.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -ssl_err2.o: ../include/openssl/err.h ../include/openssl/evp.h -ssl_err2.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -ssl_err2.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ssl_err2.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -ssl_err2.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ssl_err2.o: ../include/openssl/pem.h ../include/openssl/pem2.h -ssl_err2.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -ssl_err2.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ssl_err2.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -ssl_err2.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -ssl_err2.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -ssl_err2.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -ssl_err2.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_err2.c -ssl_lib.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ssl_lib.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ssl_lib.o: ../include/openssl/conf.h ../include/openssl/crypto.h -ssl_lib.o: ../include/openssl/dh.h ../include/openssl/dsa.h -ssl_lib.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -ssl_lib.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ssl_lib.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -ssl_lib.o: ../include/openssl/err.h ../include/openssl/evp.h -ssl_lib.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -ssl_lib.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ssl_lib.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -ssl_lib.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -ssl_lib.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -ssl_lib.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -ssl_lib.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -ssl_lib.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -ssl_lib.o: ../include/openssl/sha.h ../include/openssl/srtp.h -ssl_lib.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -ssl_lib.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -ssl_lib.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -ssl_lib.o: ../include/openssl/tls1.h ../include/openssl/x509.h -ssl_lib.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h kssl_lcl.h -ssl_lib.o: ssl_lib.c ssl_locl.h -ssl_rsa.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ssl_rsa.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ssl_rsa.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -ssl_rsa.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -ssl_rsa.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ssl_rsa.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -ssl_rsa.o: ../include/openssl/evp.h ../include/openssl/hmac.h -ssl_rsa.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -ssl_rsa.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -ssl_rsa.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -ssl_rsa.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -ssl_rsa.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -ssl_rsa.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -ssl_rsa.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ssl_rsa.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -ssl_rsa.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -ssl_rsa.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -ssl_rsa.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -ssl_rsa.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_locl.h -ssl_rsa.o: ssl_rsa.c -ssl_sess.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ssl_sess.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ssl_sess.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -ssl_sess.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -ssl_sess.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ssl_sess.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -ssl_sess.o: ../include/openssl/err.h ../include/openssl/evp.h -ssl_sess.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -ssl_sess.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ssl_sess.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -ssl_sess.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ssl_sess.o: ../include/openssl/pem.h ../include/openssl/pem2.h -ssl_sess.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -ssl_sess.o: ../include/openssl/rand.h ../include/openssl/rsa.h -ssl_sess.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ssl_sess.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -ssl_sess.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -ssl_sess.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -ssl_sess.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -ssl_sess.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_locl.h -ssl_sess.o: ssl_sess.c -ssl_stat.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ssl_stat.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ssl_stat.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -ssl_stat.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -ssl_stat.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ssl_stat.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -ssl_stat.o: ../include/openssl/evp.h ../include/openssl/hmac.h -ssl_stat.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -ssl_stat.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -ssl_stat.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -ssl_stat.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -ssl_stat.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -ssl_stat.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -ssl_stat.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ssl_stat.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -ssl_stat.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -ssl_stat.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -ssl_stat.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -ssl_stat.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_locl.h -ssl_stat.o: ssl_stat.c -ssl_txt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ssl_txt.o: ../include/openssl/buffer.h ../include/openssl/comp.h -ssl_txt.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -ssl_txt.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -ssl_txt.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ssl_txt.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -ssl_txt.o: ../include/openssl/evp.h ../include/openssl/hmac.h -ssl_txt.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -ssl_txt.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -ssl_txt.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -ssl_txt.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -ssl_txt.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -ssl_txt.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -ssl_txt.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ssl_txt.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -ssl_txt.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -ssl_txt.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -ssl_txt.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -ssl_txt.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_locl.h -ssl_txt.o: ssl_txt.c -t1_clnt.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -t1_clnt.o: ../include/openssl/buffer.h ../include/openssl/comp.h -t1_clnt.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -t1_clnt.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -t1_clnt.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -t1_clnt.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -t1_clnt.o: ../include/openssl/evp.h ../include/openssl/hmac.h -t1_clnt.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -t1_clnt.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -t1_clnt.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -t1_clnt.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -t1_clnt.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -t1_clnt.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -t1_clnt.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -t1_clnt.o: ../include/openssl/sha.h ../include/openssl/srtp.h -t1_clnt.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -t1_clnt.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -t1_clnt.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -t1_clnt.o: ../include/openssl/tls1.h ../include/openssl/x509.h -t1_clnt.o: ../include/openssl/x509_vfy.h ssl_locl.h t1_clnt.c -t1_enc.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -t1_enc.o: ../include/openssl/buffer.h ../include/openssl/comp.h -t1_enc.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -t1_enc.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -t1_enc.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -t1_enc.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -t1_enc.o: ../include/openssl/evp.h ../include/openssl/hmac.h -t1_enc.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -t1_enc.o: ../include/openssl/md5.h ../include/openssl/obj_mac.h -t1_enc.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -t1_enc.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -t1_enc.o: ../include/openssl/pem.h ../include/openssl/pem2.h -t1_enc.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -t1_enc.o: ../include/openssl/rand.h ../include/openssl/rsa.h -t1_enc.o: ../include/openssl/safestack.h ../include/openssl/sha.h -t1_enc.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -t1_enc.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -t1_enc.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -t1_enc.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -t1_enc.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_locl.h -t1_enc.o: t1_enc.c -t1_lib.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -t1_lib.o: ../include/openssl/buffer.h ../include/openssl/comp.h -t1_lib.o: ../include/openssl/conf.h ../include/openssl/crypto.h -t1_lib.o: ../include/openssl/dsa.h ../include/openssl/dtls1.h -t1_lib.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -t1_lib.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -t1_lib.o: ../include/openssl/err.h ../include/openssl/evp.h -t1_lib.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -t1_lib.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -t1_lib.o: ../include/openssl/objects.h ../include/openssl/ocsp.h -t1_lib.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -t1_lib.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -t1_lib.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -t1_lib.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -t1_lib.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -t1_lib.o: ../include/openssl/sha.h ../include/openssl/srtp.h -t1_lib.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -t1_lib.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -t1_lib.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -t1_lib.o: ../include/openssl/tls1.h ../include/openssl/x509.h -t1_lib.o: ../include/openssl/x509_vfy.h ../include/openssl/x509v3.h ssl_locl.h -t1_lib.o: t1_lib.c -t1_meth.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -t1_meth.o: ../include/openssl/buffer.h ../include/openssl/comp.h -t1_meth.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -t1_meth.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -t1_meth.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -t1_meth.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -t1_meth.o: ../include/openssl/evp.h ../include/openssl/hmac.h -t1_meth.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -t1_meth.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -t1_meth.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -t1_meth.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -t1_meth.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -t1_meth.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -t1_meth.o: ../include/openssl/safestack.h ../include/openssl/sha.h -t1_meth.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -t1_meth.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -t1_meth.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -t1_meth.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -t1_meth.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_locl.h -t1_meth.o: t1_meth.c -t1_reneg.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -t1_reneg.o: ../include/openssl/buffer.h ../include/openssl/comp.h -t1_reneg.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -t1_reneg.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -t1_reneg.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -t1_reneg.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -t1_reneg.o: ../include/openssl/evp.h ../include/openssl/hmac.h -t1_reneg.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -t1_reneg.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -t1_reneg.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -t1_reneg.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -t1_reneg.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -t1_reneg.o: ../include/openssl/pqueue.h ../include/openssl/rsa.h -t1_reneg.o: ../include/openssl/safestack.h ../include/openssl/sha.h -t1_reneg.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -t1_reneg.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -t1_reneg.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -t1_reneg.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -t1_reneg.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ssl_locl.h -t1_reneg.o: t1_reneg.c -t1_srvr.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -t1_srvr.o: ../include/openssl/buffer.h ../include/openssl/comp.h -t1_srvr.o: ../include/openssl/crypto.h ../include/openssl/dsa.h -t1_srvr.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h -t1_srvr.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -t1_srvr.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -t1_srvr.o: ../include/openssl/evp.h ../include/openssl/hmac.h -t1_srvr.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -t1_srvr.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -t1_srvr.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -t1_srvr.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -t1_srvr.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -t1_srvr.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -t1_srvr.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -t1_srvr.o: ../include/openssl/sha.h ../include/openssl/srtp.h -t1_srvr.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -t1_srvr.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -t1_srvr.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -t1_srvr.o: ../include/openssl/tls1.h ../include/openssl/x509.h -t1_srvr.o: ../include/openssl/x509_vfy.h ssl_locl.h t1_srvr.c -tls_srp.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -tls_srp.o: ../include/openssl/bn.h ../include/openssl/buffer.h -tls_srp.o: ../include/openssl/comp.h ../include/openssl/crypto.h -tls_srp.o: ../include/openssl/dsa.h ../include/openssl/dtls1.h -tls_srp.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -tls_srp.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -tls_srp.o: ../include/openssl/err.h ../include/openssl/evp.h -tls_srp.o: ../include/openssl/hmac.h ../include/openssl/kssl.h -tls_srp.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -tls_srp.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -tls_srp.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -tls_srp.o: ../include/openssl/pem.h ../include/openssl/pem2.h -tls_srp.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h -tls_srp.o: ../include/openssl/rand.h ../include/openssl/rsa.h -tls_srp.o: ../include/openssl/safestack.h ../include/openssl/sha.h -tls_srp.o: ../include/openssl/srp.h ../include/openssl/srtp.h -tls_srp.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h -tls_srp.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h -tls_srp.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -tls_srp.o: ../include/openssl/tls1.h ../include/openssl/x509.h -tls_srp.o: ../include/openssl/x509_vfy.h ssl_locl.h tls_srp.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/s23_clnt.c nodejs-0.11.15/deps/openssl/openssl/ssl/s23_clnt.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/s23_clnt.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/s23_clnt.c 2015-01-20 21:22:17.000000000 +0000 @@ -125,9 +125,11 @@ if (ver == SSL2_VERSION) return(SSLv2_client_method()); #endif +#ifndef OPENSSL_NO_SSL3 if (ver == SSL3_VERSION) return(SSLv3_client_method()); - else if (ver == TLS1_VERSION) +#endif + if (ver == TLS1_VERSION) return(TLSv1_client_method()); else if (ver == TLS1_1_VERSION) return(TLSv1_1_client_method()); @@ -698,6 +700,7 @@ { /* we have sslv3 or tls1 (server hello or alert) */ +#ifndef OPENSSL_NO_SSL3 if ((p[2] == SSL3_VERSION_MINOR) && !(s->options & SSL_OP_NO_SSLv3)) { @@ -712,7 +715,9 @@ s->version=SSL3_VERSION; s->method=SSLv3_client_method(); } - else if ((p[2] == TLS1_VERSION_MINOR) && + else +#endif + if ((p[2] == TLS1_VERSION_MINOR) && !(s->options & SSL_OP_NO_TLSv1)) { s->version=TLS1_VERSION; @@ -736,6 +741,9 @@ goto err; } + /* ensure that TLS_MAX_VERSION is up-to-date */ + OPENSSL_assert(s->version <= TLS_MAX_VERSION); + if (p[0] == SSL3_RT_ALERT && p[5] != SSL3_AL_WARNING) { /* fatal alert */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/s23_lib.c nodejs-0.11.15/deps/openssl/openssl/ssl/s23_lib.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/s23_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/s23_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -107,6 +107,13 @@ long l; /* We can write SSLv2 and SSLv3 ciphers */ + /* but no ECC ciphers */ + if (c->algorithm_mkey == SSL_kECDHr || + c->algorithm_mkey == SSL_kECDHe || + c->algorithm_mkey == SSL_kEECDH || + c->algorithm_auth == SSL_aECDH || + c->algorithm_auth == SSL_aECDSA) + return 0; if (p != NULL) { l=c->id; diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/s23_srvr.c nodejs-0.11.15/deps/openssl/openssl/ssl/s23_srvr.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/s23_srvr.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/s23_srvr.c 2015-01-20 21:22:17.000000000 +0000 @@ -127,9 +127,11 @@ if (ver == SSL2_VERSION) return(SSLv2_server_method()); #endif +#ifndef OPENSSL_NO_SSL3 if (ver == SSL3_VERSION) return(SSLv3_server_method()); - else if (ver == TLS1_VERSION) +#endif + if (ver == TLS1_VERSION) return(TLSv1_server_method()); else if (ver == TLS1_1_VERSION) return(TLSv1_1_server_method()); @@ -348,23 +350,19 @@ * Client Hello message, this would be difficult, and we'd have * to read more records to find out. * No known SSL 3.0 client fragments ClientHello like this, - * so we simply assume TLS 1.0 to avoid protocol version downgrade - * attacks. */ + * so we simply reject such connections to avoid + * protocol version downgrade attacks. */ if (p[3] == 0 && p[4] < 6) { -#if 0 SSLerr(SSL_F_SSL23_GET_CLIENT_HELLO,SSL_R_RECORD_TOO_SMALL); goto err; -#else - v[1] = TLS1_VERSION_MINOR; -#endif } /* if major version number > 3 set minor to a value * which will use the highest version 3 we support. * If TLS 2.0 ever appears we will need to revise * this.... */ - else if (p[9] > SSL3_VERSION_MAJOR) + if (p[9] > SSL3_VERSION_MAJOR) v[1]=0xff; else v[1]=p[10]; /* minor version according to client_version */ @@ -425,6 +423,9 @@ } } + /* ensure that TLS_MAX_VERSION is up-to-date */ + OPENSSL_assert(s->version <= TLS_MAX_VERSION); + #ifdef OPENSSL_FIPS if (FIPS_mode() && (s->version < TLS1_VERSION)) { @@ -444,14 +445,34 @@ v[0] = p[3]; /* == SSL3_VERSION_MAJOR */ v[1] = p[4]; + /* An SSLv3/TLSv1 backwards-compatible CLIENT-HELLO in an SSLv2 + * header is sent directly on the wire, not wrapped as a TLS + * record. It's format is: + * Byte Content + * 0-1 msg_length + * 2 msg_type + * 3-4 version + * 5-6 cipher_spec_length + * 7-8 session_id_length + * 9-10 challenge_length + * ... ... + */ n=((p[0]&0x7f)<<8)|p[1]; if (n > (1024*4)) { SSLerr(SSL_F_SSL23_GET_CLIENT_HELLO,SSL_R_RECORD_TOO_LARGE); goto err; } + if (n < 9) + { + SSLerr(SSL_F_SSL23_GET_CLIENT_HELLO,SSL_R_RECORD_LENGTH_MISMATCH); + goto err; + } j=ssl23_read_bytes(s,n+2); + /* We previously read 11 bytes, so if j > 0, we must have + * j == n+2 == s->packet_length. We have at least 11 valid + * packet bytes. */ if (j <= 0) return(j); ssl3_finish_mac(s, s->packet+2, s->packet_length-2); @@ -581,6 +602,12 @@ if ((type == 2) || (type == 3)) { /* we have SSLv3/TLSv1 (type 2: SSL2 style, type 3: SSL3/TLS style) */ + s->method = ssl23_get_server_method(s->version); + if (s->method == NULL) + { + SSLerr(SSL_F_SSL23_GET_CLIENT_HELLO,SSL_R_UNSUPPORTED_PROTOCOL); + goto err; + } if (!ssl_init_wbio_buffer(s,1)) goto err; @@ -608,14 +635,6 @@ s->s3->rbuf.left=0; s->s3->rbuf.offset=0; } - if (s->version == TLS1_2_VERSION) - s->method = TLSv1_2_server_method(); - else if (s->version == TLS1_1_VERSION) - s->method = TLSv1_1_server_method(); - else if (s->version == TLS1_VERSION) - s->method = TLSv1_server_method(); - else - s->method = SSLv3_server_method(); #if 0 /* ssl3_get_client_hello does this */ s->client_version=(v[0]<<8)|v[1]; #endif diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/s2_lib.c nodejs-0.11.15/deps/openssl/openssl/ssl/s2_lib.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/s2_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/s2_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -250,7 +250,7 @@ SSL_SSLV2, SSL_NOT_EXP|SSL_HIGH, 0, - 168, + 112, 168, }, @@ -391,6 +391,8 @@ case SSL_CTRL_GET_SESSION_REUSED: ret=s->hit; break; + case SSL_CTRL_CHECK_PROTO_VERSION: + return ssl3_ctrl(s, SSL_CTRL_CHECK_PROTO_VERSION, larg, parg); default: break; } @@ -437,7 +439,7 @@ if (p != NULL) { l=c->id; - if ((l & 0xff000000) != 0x02000000) return(0); + if ((l & 0xff000000) != 0x02000000 && l != SSL3_CK_FALLBACK_SCSV) return(0); p[0]=((unsigned char)(l>>16L))&0xFF; p[1]=((unsigned char)(l>> 8L))&0xFF; p[2]=((unsigned char)(l ))&0xFF; diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/s3_cbc.c nodejs-0.11.15/deps/openssl/openssl/ssl/s3_cbc.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/s3_cbc.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/s3_cbc.c 2015-01-20 21:22:17.000000000 +0000 @@ -53,6 +53,7 @@ * */ +#include "../crypto/constant_time_locl.h" #include "ssl_locl.h" #include <openssl/md5.h> @@ -67,37 +68,6 @@ * supported by TLS.) */ #define MAX_HASH_BLOCK_SIZE 128 -/* Some utility functions are needed: - * - * These macros return the given value with the MSB copied to all the other - * bits. They use the fact that arithmetic shift shifts-in the sign bit. - * However, this is not ensured by the C standard so you may need to replace - * them with something else on odd CPUs. */ -#define DUPLICATE_MSB_TO_ALL(x) ( (unsigned)( (int)(x) >> (sizeof(int)*8-1) ) ) -#define DUPLICATE_MSB_TO_ALL_8(x) ((unsigned char)(DUPLICATE_MSB_TO_ALL(x))) - -/* constant_time_lt returns 0xff if a<b and 0x00 otherwise. */ -static unsigned constant_time_lt(unsigned a, unsigned b) - { - a -= b; - return DUPLICATE_MSB_TO_ALL(a); - } - -/* constant_time_ge returns 0xff if a>=b and 0x00 otherwise. */ -static unsigned constant_time_ge(unsigned a, unsigned b) - { - a -= b; - return DUPLICATE_MSB_TO_ALL(~a); - } - -/* constant_time_eq_8 returns 0xff if a==b and 0x00 otherwise. */ -static unsigned char constant_time_eq_8(unsigned a, unsigned b) - { - unsigned c = a ^ b; - c--; - return DUPLICATE_MSB_TO_ALL_8(c); - } - /* ssl3_cbc_remove_padding removes padding from the decrypted, SSLv3, CBC * record in |rec| by updating |rec->length| in constant time. * @@ -126,8 +96,8 @@ padding_length = good & (padding_length+1); rec->length -= padding_length; rec->type |= padding_length<<8; /* kludge: pass padding length */ - return (int)((good & 1) | (~good & -1)); -} + return constant_time_select_int(good, 1, -1); + } /* tls1_cbc_remove_padding removes the CBC padding from the decrypted, TLS, CBC * record in |rec| in constant time and returns 1 if the padding is valid and @@ -208,7 +178,7 @@ for (i = 0; i < to_check; i++) { - unsigned char mask = constant_time_ge(padding_length, i); + unsigned char mask = constant_time_ge_8(padding_length, i); unsigned char b = rec->data[rec->length-1-i]; /* The final |padding_length+1| bytes should all have the value * |padding_length|. Therefore the XOR should be zero. */ @@ -216,20 +186,14 @@ } /* If any of the final |padding_length+1| bytes had the wrong value, - * one or more of the lower eight bits of |good| will be cleared. We - * AND the bottom 8 bits together and duplicate the result to all the - * bits. */ - good &= good >> 4; - good &= good >> 2; - good &= good >> 1; - good <<= sizeof(good)*8-1; - good = DUPLICATE_MSB_TO_ALL(good); - + * one or more of the lower eight bits of |good| will be cleared. + */ + good = constant_time_eq(0xff, good & 0xff); padding_length = good & (padding_length+1); rec->length -= padding_length; rec->type |= padding_length<<8; /* kludge: pass padding length */ - return (int)((good & 1) | (~good & -1)); + return constant_time_select_int(good, 1, -1); } /* ssl3_cbc_copy_mac copies |md_size| bytes from the end of |rec| to |out| in @@ -296,8 +260,8 @@ memset(rotated_mac, 0, md_size); for (i = scan_start, j = 0; i < orig_len; i++) { - unsigned char mac_started = constant_time_ge(i, mac_start); - unsigned char mac_ended = constant_time_ge(i, mac_end); + unsigned char mac_started = constant_time_ge_8(i, mac_start); + unsigned char mac_ended = constant_time_ge_8(i, mac_end); unsigned char b = rec->data[i]; rotated_mac[j++] |= b & mac_started & ~mac_ended; j &= constant_time_lt(j,md_size); @@ -683,12 +647,12 @@ b = data[k-header_length]; k++; - is_past_c = is_block_a & constant_time_ge(j, c); - is_past_cp1 = is_block_a & constant_time_ge(j, c+1); + is_past_c = is_block_a & constant_time_ge_8(j, c); + is_past_cp1 = is_block_a & constant_time_ge_8(j, c+1); /* If this is the block containing the end of the * application data, and we are at the offset for the * 0x80 value, then overwrite b with 0x80. */ - b = (b&~is_past_c) | (0x80&is_past_c); + b = constant_time_select_8(is_past_c, 0x80, b); /* If this the the block containing the end of the * application data and we're past the 0x80 value then * just write zero. */ @@ -704,7 +668,8 @@ if (j >= md_block_size - md_length_size) { /* If this is index_b, write a length byte. */ - b = (b&~is_block_b) | (is_block_b&length_bytes[j-(md_block_size-md_length_size)]); + b = constant_time_select_8( + is_block_b, length_bytes[j-(md_block_size-md_length_size)], b); } block[j] = b; } diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/s3_clnt.c nodejs-0.11.15/deps/openssl/openssl/ssl/s3_clnt.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/s3_clnt.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/s3_clnt.c 2015-01-20 21:22:17.000000000 +0000 @@ -326,9 +326,9 @@ break; } #endif - /* Check if it is anon DH/ECDH */ + /* Check if it is anon DH/ECDH, SRP auth */ /* or PSK */ - if (!(s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) && + if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aSRP)) && !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) { ret=ssl3_get_server_certificate(s); @@ -510,6 +510,7 @@ s->method->ssl3_enc->client_finished_label, s->method->ssl3_enc->client_finished_label_len); if (ret <= 0) goto end; + s->s3->flags |= SSL3_FLAGS_CCS_OK; s->state=SSL3_ST_CW_FLUSH; /* clear flags */ @@ -559,6 +560,7 @@ case SSL3_ST_CR_FINISHED_A: case SSL3_ST_CR_FINISHED_B: + s->s3->flags |= SSL3_FLAGS_CCS_OK; ret=ssl3_get_finished(s,SSL3_ST_CR_FINISHED_A, SSL3_ST_CR_FINISHED_B); if (ret <= 0) goto end; @@ -900,6 +902,7 @@ { s->session->cipher = pref_cipher ? pref_cipher : ssl_get_cipher_by_char(s, p+j); + s->s3->flags |= SSL3_FLAGS_CCS_OK; } } #endif /* OPENSSL_NO_TLSEXT */ @@ -915,6 +918,7 @@ SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT); goto f_err; } + s->s3->flags |= SSL3_FLAGS_CCS_OK; s->hit=1; } else /* a miss or crap from the other end */ @@ -950,6 +954,15 @@ SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_WRONG_CIPHER_RETURNED); goto f_err; } +#ifndef OPENSSL_NO_SRP + if (((c->algorithm_mkey & SSL_kSRP) || (c->algorithm_auth & SSL_aSRP)) && + !(s->srp_ctx.srp_Mask & SSL_kSRP)) + { + al=SSL_AD_ILLEGAL_PARAMETER; + SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_WRONG_CIPHER_RETURNED); + goto f_err; + } +#endif /* OPENSSL_NO_SRP */ p+=ssl_put_cipher_by_char(s,NULL,NULL); sk=ssl_get_ciphers_by_id(s); @@ -1264,8 +1277,8 @@ #endif EVP_MD_CTX md_ctx; unsigned char *param,*p; - int al,i,j,param_len,ok; - long n,alg_k,alg_a; + int al,j,ok; + long i,param_len,n,alg_k,alg_a; EVP_PKEY *pkey=NULL; const EVP_MD *md = NULL; #ifndef OPENSSL_NO_RSA @@ -1341,36 +1354,48 @@ s->session->sess_cert=ssl_sess_cert_new(); } + /* Total length of the parameters including the length prefix */ param_len=0; + alg_k=s->s3->tmp.new_cipher->algorithm_mkey; alg_a=s->s3->tmp.new_cipher->algorithm_auth; EVP_MD_CTX_init(&md_ctx); + al=SSL_AD_DECODE_ERROR; + #ifndef OPENSSL_NO_PSK if (alg_k & SSL_kPSK) { char tmp_id_hint[PSK_MAX_IDENTITY_LEN+1]; - al=SSL_AD_HANDSHAKE_FAILURE; + param_len = 2; + if (param_len > n) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } n2s(p,i); - param_len=i+2; + /* Store PSK identity hint for later use, hint is used * in ssl3_send_client_key_exchange. Assume that the * maximum length of a PSK identity hint can be as * long as the maximum length of a PSK identity. */ if (i > PSK_MAX_IDENTITY_LEN) { + al=SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_DATA_LENGTH_TOO_LONG); goto f_err; } - if (param_len > n) + if (i > n - param_len) { - al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_PSK_IDENTITY_HINT_LENGTH); goto f_err; } + param_len += i; + /* If received PSK identity hint contains NULL * characters, the hint is truncated from the first * NULL. p may not be ending with NULL, so create a @@ -1382,6 +1407,7 @@ s->ctx->psk_identity_hint = BUF_strdup(tmp_id_hint); if (s->ctx->psk_identity_hint == NULL) { + al=SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto f_err; } @@ -1394,14 +1420,22 @@ #ifndef OPENSSL_NO_SRP if (alg_k & SSL_kSRP) { - n2s(p,i); - param_len=i+2; + param_len = 2; if (param_len > n) { - al=SSL_AD_DECODE_ERROR; + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + n2s(p,i); + + if (i > n - param_len) + { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_N_LENGTH); goto f_err; } + param_len += i; + if (!(s->srp_ctx.N=BN_bin2bn(p,i,NULL))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); @@ -1409,14 +1443,24 @@ } p+=i; + + if (2 > n - param_len) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + param_len += 2; + n2s(p,i); - param_len+=i+2; - if (param_len > n) + + if (i > n - param_len) { - al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_G_LENGTH); goto f_err; } + param_len += i; + if (!(s->srp_ctx.g=BN_bin2bn(p,i,NULL))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); @@ -1424,15 +1468,25 @@ } p+=i; + + if (1 > n - param_len) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + param_len += 1; + i = (unsigned int)(p[0]); p++; - param_len+=i+1; - if (param_len > n) + + if (i > n - param_len) { - al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_S_LENGTH); goto f_err; } + param_len += i; + if (!(s->srp_ctx.s=BN_bin2bn(p,i,NULL))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); @@ -1440,14 +1494,23 @@ } p+=i; + if (2 > n - param_len) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + param_len += 2; + n2s(p,i); - param_len+=i+2; - if (param_len > n) + + if (i > n - param_len) { - al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_B_LENGTH); goto f_err; } + param_len += i; + if (!(s->srp_ctx.B=BN_bin2bn(p,i,NULL))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); @@ -1456,6 +1519,12 @@ p+=i; n-=param_len; + if (!srp_verify_server_param(s, &al)) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_PARAMETERS); + goto f_err; + } + /* We must check if there is a certificate */ #ifndef OPENSSL_NO_RSA if (alg_a & SSL_aRSA) @@ -1479,14 +1548,23 @@ SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE); goto err; } - n2s(p,i); - param_len=i+2; + + param_len = 2; if (param_len > n) { - al=SSL_AD_DECODE_ERROR; + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + n2s(p,i); + + if (i > n - param_len) + { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_MODULUS_LENGTH); goto f_err; } + param_len += i; + if (!(rsa->n=BN_bin2bn(p,i,rsa->n))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); @@ -1494,14 +1572,23 @@ } p+=i; + if (2 > n - param_len) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + param_len += 2; + n2s(p,i); - param_len+=i+2; - if (param_len > n) + + if (i > n - param_len) { - al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_E_LENGTH); goto f_err; } + param_len += i; + if (!(rsa->e=BN_bin2bn(p,i,rsa->e))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); @@ -1533,14 +1620,23 @@ SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_DH_LIB); goto err; } - n2s(p,i); - param_len=i+2; + + param_len = 2; if (param_len > n) { - al=SSL_AD_DECODE_ERROR; + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + n2s(p,i); + + if (i > n - param_len) + { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_P_LENGTH); goto f_err; } + param_len += i; + if (!(dh->p=BN_bin2bn(p,i,NULL))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); @@ -1548,14 +1644,23 @@ } p+=i; + if (2 > n - param_len) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + param_len += 2; + n2s(p,i); - param_len+=i+2; - if (param_len > n) + + if (i > n - param_len) { - al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_G_LENGTH); goto f_err; } + param_len += i; + if (!(dh->g=BN_bin2bn(p,i,NULL))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); @@ -1563,14 +1668,23 @@ } p+=i; + if (2 > n - param_len) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + param_len += 2; + n2s(p,i); - param_len+=i+2; - if (param_len > n) + + if (i > n - param_len) { - al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_PUB_KEY_LENGTH); goto f_err; } + param_len += i; + if (!(dh->pub_key=BN_bin2bn(p,i,NULL))) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB); @@ -1622,12 +1736,19 @@ */ /* XXX: For now we only support named (not generic) curves - * and the ECParameters in this case is just three bytes. + * and the ECParameters in this case is just three bytes. We + * also need one byte for the length of the encoded point */ - param_len=3; - if ((param_len > n) || - (*p != NAMED_CURVE_TYPE) || - ((curve_nid = tls1_ec_curve_id2nid(*(p + 2))) == 0)) + param_len=4; + if (param_len > n) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + + if ((*p != NAMED_CURVE_TYPE) || + ((curve_nid = tls1_ec_curve_id2nid(*(p + 2))) == 0)) { al=SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS); @@ -1669,15 +1790,15 @@ encoded_pt_len = *p; /* length of encoded point */ p+=1; - param_len += (1 + encoded_pt_len); - if ((param_len > n) || + + if ((encoded_pt_len > n - param_len) || (EC_POINT_oct2point(group, srvr_ecpoint, p, encoded_pt_len, bn_ctx) == 0)) { - al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_ECPOINT); goto f_err; } + param_len += encoded_pt_len; n-=param_len; p+=encoded_pt_len; @@ -1720,7 +1841,15 @@ { if (TLS1_get_version(s) >= TLS1_2_VERSION) { - int sigalg = tls12_get_sigid(pkey); + int sigalg; + if (2 > n) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } + + sigalg = tls12_get_sigid(pkey); /* Should never happen */ if (sigalg == -1) { @@ -1738,7 +1867,6 @@ if (md == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_UNKNOWN_DIGEST); - al=SSL_AD_DECODE_ERROR; goto f_err; } #ifdef SSL_DEBUG @@ -1749,15 +1877,21 @@ } else md = EVP_sha1(); - + + if (2 > n) + { + SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, + SSL_R_LENGTH_TOO_SHORT); + goto f_err; + } n2s(p,i); n-=2; j=EVP_PKEY_size(pkey); + /* Check signature length. If n is 0 then signature is empty */ if ((i != n) || (n > j) || (n <= 0)) { /* wrong packet length */ - al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_WRONG_SIGNATURE_LENGTH); goto f_err; } @@ -1766,6 +1900,7 @@ if (pkey->type == EVP_PKEY_RSA && TLS1_get_version(s) < TLS1_2_VERSION) { int num; + unsigned int size; j=0; q=md_buf; @@ -1778,9 +1913,9 @@ EVP_DigestUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE); EVP_DigestUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE); EVP_DigestUpdate(&md_ctx,param,param_len); - EVP_DigestFinal_ex(&md_ctx,q,(unsigned int *)&i); - q+=i; - j+=i; + EVP_DigestFinal_ex(&md_ctx,q,&size); + q+=size; + j+=size; } i=RSA_verify(NID_md5_sha1, md_buf, j, p, n, pkey->pkey.rsa); @@ -1816,8 +1951,8 @@ } else { - if (!(alg_a & SSL_aNULL) && !(alg_k & SSL_kPSK)) - /* aNULL or kPSK do not need public keys */ + /* aNULL, aSRP or kPSK do not need public keys */ + if (!(alg_a & (SSL_aNULL|SSL_aSRP)) && !(alg_k & SSL_kPSK)) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR); goto err; @@ -1825,7 +1960,6 @@ /* still data left over */ if (n != 0) { - al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_EXTRA_DATA_IN_MESSAGE); goto f_err; } @@ -2249,6 +2383,13 @@ RSA *rsa; unsigned char tmp_buf[SSL_MAX_MASTER_KEY_LENGTH]; + if (s->session->sess_cert == NULL) + { + /* We should always have a server certificate with SSL_kRSA. */ + SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR); + goto err; + } + if (s->session->sess_cert->peer_rsa_tmp != NULL) rsa=s->session->sess_cert->peer_rsa_tmp; else @@ -2510,6 +2651,13 @@ int ecdh_clnt_cert = 0; int field_size = 0; + if (s->session->sess_cert == NULL) + { + ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE); + SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE); + goto err; + } + /* Did we send out the client's * ECDH share for use in premaster * computation as part of client certificate? @@ -2813,7 +2961,11 @@ #ifndef OPENSSL_NO_PSK else if (alg_k & SSL_kPSK) { - char identity[PSK_MAX_IDENTITY_LEN]; + /* The callback needs PSK_MAX_IDENTITY_LEN + 1 bytes + * to return a \0-terminated identity. The last byte + * is for us for simulating strnlen. */ + char identity[PSK_MAX_IDENTITY_LEN + 2]; + size_t identity_len; unsigned char *t = NULL; unsigned char psk_or_pre_ms[PSK_MAX_PSK_LEN*2+4]; unsigned int pre_ms_len = 0, psk_len = 0; @@ -2827,8 +2979,9 @@ goto err; } + memset(identity, 0, sizeof(identity)); psk_len = s->psk_client_callback(s, s->ctx->psk_identity_hint, - identity, PSK_MAX_IDENTITY_LEN, + identity, sizeof(identity) - 1, psk_or_pre_ms, sizeof(psk_or_pre_ms)); if (psk_len > PSK_MAX_PSK_LEN) { @@ -2842,7 +2995,14 @@ SSL_R_PSK_IDENTITY_NOT_FOUND); goto psk_err; } - + identity[PSK_MAX_IDENTITY_LEN + 1] = '\0'; + identity_len = strlen(identity); + if (identity_len > PSK_MAX_IDENTITY_LEN) + { + SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, + ERR_R_INTERNAL_ERROR); + goto psk_err; + } /* create PSK pre_master_secret */ pre_ms_len = 2+psk_len+2+psk_len; t = psk_or_pre_ms; @@ -2876,14 +3036,13 @@ s->session->master_key_length = s->method->ssl3_enc->generate_master_secret(s, s->session->master_key, - psk_or_pre_ms, pre_ms_len); - n = strlen(identity); - s2n(n, p); - memcpy(p, identity, n); - n+=2; + psk_or_pre_ms, pre_ms_len); + s2n(identity_len, p); + memcpy(p, identity, identity_len); + n = 2 + identity_len; psk_err = 0; psk_err: - OPENSSL_cleanse(identity, PSK_MAX_IDENTITY_LEN); + OPENSSL_cleanse(identity, sizeof(identity)); OPENSSL_cleanse(psk_or_pre_ms, sizeof(psk_or_pre_ms)); if (psk_err != 0) { diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/s3_enc.c nodejs-0.11.15/deps/openssl/openssl/ssl/s3_enc.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/s3_enc.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/s3_enc.c 2015-01-20 21:22:17.000000000 +0000 @@ -642,10 +642,18 @@ int ssl3_final_finish_mac(SSL *s, const char *sender, int len, unsigned char *p) { - int ret; + int ret, sha1len; ret=ssl3_handshake_mac(s,NID_md5,sender,len,p); + if(ret == 0) + return 0; + p+=ret; - ret+=ssl3_handshake_mac(s,NID_sha1,sender,len,p); + + sha1len=ssl3_handshake_mac(s,NID_sha1,sender,len,p); + if(sha1len == 0) + return 0; + + ret+=sha1len; return(ret); } static int ssl3_handshake_mac(SSL *s, int md_nid, @@ -892,7 +900,7 @@ case SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE: return(SSL3_AD_HANDSHAKE_FAILURE); case SSL_AD_BAD_CERTIFICATE_HASH_VALUE: return(SSL3_AD_HANDSHAKE_FAILURE); case SSL_AD_UNKNOWN_PSK_IDENTITY:return(TLS1_AD_UNKNOWN_PSK_IDENTITY); + case SSL_AD_INAPPROPRIATE_FALLBACK:return(TLS1_AD_INAPPROPRIATE_FALLBACK); default: return(-1); } } - diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/s3_lib.c nodejs-0.11.15/deps/openssl/openssl/ssl/s3_lib.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/s3_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/s3_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -328,7 +328,7 @@ SSL_SSLV3, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -377,7 +377,7 @@ SSL_SSLV3, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -425,7 +425,7 @@ SSL_SSLV3, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -474,7 +474,7 @@ SSL_SSLV3, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -522,7 +522,7 @@ SSL_SSLV3, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -602,7 +602,7 @@ SSL_SSLV3, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -687,7 +687,7 @@ SSL_SSLV3, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -751,7 +751,7 @@ SSL_SSLV3, SSL_NOT_EXP|SSL_HIGH, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -1685,7 +1685,7 @@ SSL_TLSV1, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -2062,7 +2062,7 @@ SSL_TLSV1, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -2142,7 +2142,7 @@ SSL_TLSV1, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -2222,7 +2222,7 @@ SSL_TLSV1, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -2302,7 +2302,7 @@ SSL_TLSV1, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -2382,7 +2382,7 @@ SSL_TLSV1, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -2426,13 +2426,13 @@ TLS1_TXT_SRP_SHA_WITH_3DES_EDE_CBC_SHA, TLS1_CK_SRP_SHA_WITH_3DES_EDE_CBC_SHA, SSL_kSRP, - SSL_aNULL, + SSL_aSRP, SSL_3DES, SSL_SHA1, SSL_TLSV1, SSL_NOT_EXP|SSL_HIGH, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -2448,7 +2448,7 @@ SSL_TLSV1, SSL_NOT_EXP|SSL_HIGH, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -2464,7 +2464,7 @@ SSL_TLSV1, SSL_NOT_EXP|SSL_HIGH, SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF, - 168, + 112, 168, }, @@ -2474,7 +2474,7 @@ TLS1_TXT_SRP_SHA_WITH_AES_128_CBC_SHA, TLS1_CK_SRP_SHA_WITH_AES_128_CBC_SHA, SSL_kSRP, - SSL_aNULL, + SSL_aSRP, SSL_AES128, SSL_SHA1, SSL_TLSV1, @@ -2522,7 +2522,7 @@ TLS1_TXT_SRP_SHA_WITH_AES_256_CBC_SHA, TLS1_CK_SRP_SHA_WITH_AES_256_CBC_SHA, SSL_kSRP, - SSL_aNULL, + SSL_aSRP, SSL_AES256, SSL_SHA1, SSL_TLSV1, @@ -3355,6 +3355,33 @@ #endif #endif /* !OPENSSL_NO_TLSEXT */ + + case SSL_CTRL_CHECK_PROTO_VERSION: + /* For library-internal use; checks that the current protocol + * is the highest enabled version (according to s->ctx->method, + * as version negotiation may have changed s->method). */ + if (s->version == s->ctx->method->version) + return 1; + /* Apparently we're using a version-flexible SSL_METHOD + * (not at its highest protocol version). */ + if (s->ctx->method->version == SSLv23_method()->version) + { +#if TLS_MAX_VERSION != TLS1_2_VERSION +# error Code needs update for SSLv23_method() support beyond TLS1_2_VERSION. +#endif + if (!(s->options & SSL_OP_NO_TLSv1_2)) + return s->version == TLS1_2_VERSION; + if (!(s->options & SSL_OP_NO_TLSv1_1)) + return s->version == TLS1_1_VERSION; + if (!(s->options & SSL_OP_NO_TLSv1)) + return s->version == TLS1_VERSION; + if (!(s->options & SSL_OP_NO_SSLv3)) + return s->version == SSL3_VERSION; + if (!(s->options & SSL_OP_NO_SSLv2)) + return s->version == SSL2_VERSION; + } + return 0; /* Unexpected state; fail closed. */ + default: break; } @@ -3714,6 +3741,7 @@ break; #endif #endif + default: return(0); } @@ -3822,10 +3850,15 @@ emask_k = cert->export_mask_k; emask_a = cert->export_mask_a; #ifndef OPENSSL_NO_SRP - mask_k=cert->mask_k | s->srp_ctx.srp_Mask; - emask_k=cert->export_mask_k | s->srp_ctx.srp_Mask; + if (s->srp_ctx.srp_Mask & SSL_kSRP) + { + mask_k |= SSL_kSRP; + emask_k |= SSL_kSRP; + mask_a |= SSL_aSRP; + emask_a |= SSL_aSRP; + } #endif - + #ifdef KSSL_DEBUG /* printf("ssl3_choose_cipher %d alg= %lx\n", i,c->algorithms);*/ #endif /* KSSL_DEBUG */ @@ -4291,4 +4324,3 @@ return SSL_HANDSHAKE_MAC_SHA256 | TLS1_PRF_SHA256; return alg2; } - diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/s3_pkt.c nodejs-0.11.15/deps/openssl/openssl/ssl/s3_pkt.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/s3_pkt.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/s3_pkt.c 2015-01-20 21:22:17.000000000 +0000 @@ -110,6 +110,7 @@ */ #include <stdio.h> +#include <limits.h> #include <errno.h> #define USE_SOCKETS #include "ssl_locl.h" @@ -272,6 +273,12 @@ return(n); } +/* MAX_EMPTY_RECORDS defines the number of consecutive, empty records that will + * be processed per call to ssl3_get_record. Without this limit an attacker + * could send empty records at a faster rate than we can process and cause + * ssl3_get_record to loop forever. */ +#define MAX_EMPTY_RECORDS 32 + /* Call this to get a new input record. * It will return <= 0 if more data is needed, normally due to an error * or non-blocking IO. @@ -292,6 +299,7 @@ short version; unsigned mac_size, orig_len; size_t extra; + unsigned empty_record_count = 0; rr= &(s->s3->rrec); sess=s->session; @@ -522,7 +530,17 @@ s->packet_length=0; /* just read a 0 length packet */ - if (rr->length == 0) goto again; + if (rr->length == 0) + { + empty_record_count++; + if (empty_record_count > MAX_EMPTY_RECORDS) + { + al=SSL_AD_UNEXPECTED_MESSAGE; + SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_RECORD_TOO_SMALL); + goto f_err; + } + goto again; + } #if 0 fprintf(stderr, "Ultimate Record type=%d, Length=%d\n", rr->type, rr->length); @@ -580,10 +598,11 @@ int ssl3_write_bytes(SSL *s, int type, const void *buf_, int len) { const unsigned char *buf=buf_; - unsigned int tot,n,nw; - int i; + unsigned int n,nw; + int i,tot; s->rwstate=SSL_NOTHING; + OPENSSL_assert(s->s3->wnum <= INT_MAX); tot=s->s3->wnum; s->s3->wnum=0; @@ -598,6 +617,22 @@ } } + /* ensure that if we end up with a smaller value of data to write + * out than the the original len from a write which didn't complete + * for non-blocking I/O and also somehow ended up avoiding + * the check for this in ssl3_write_pending/SSL_R_BAD_WRITE_RETRY as + * it must never be possible to end up with (len-tot) as a large + * number that will then promptly send beyond the end of the users + * buffer ... so we trap and report the error in a way the user + * will notice + */ + if (len < tot) + { + SSLerr(SSL_F_SSL3_WRITE_BYTES,SSL_R_BAD_LENGTH); + return(-1); + } + + n=(len-tot); for (;;) { @@ -641,9 +676,6 @@ SSL3_BUFFER *wb=&(s->s3->wbuf); SSL_SESSION *sess; - if (wb->buf == NULL) - if (!ssl3_setup_write_buffer(s)) - return -1; /* first check if there is a SSL3_BUFFER still being written * out. This will happen with non blocking IO */ @@ -659,6 +691,10 @@ /* if it went, fall through and send more stuff */ } + if (wb->buf == NULL) + if (!ssl3_setup_write_buffer(s)) + return -1; + if (len == 0 && !create_empty_fragment) return 0; @@ -949,7 +985,7 @@ if (!ssl3_setup_read_buffer(s)) return(-1); - if ((type && (type != SSL3_RT_APPLICATION_DATA) && (type != SSL3_RT_HANDSHAKE) && type) || + if ((type && (type != SSL3_RT_APPLICATION_DATA) && (type != SSL3_RT_HANDSHAKE)) || (peek && (type != SSL3_RT_APPLICATION_DATA))) { SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR); @@ -1055,7 +1091,7 @@ { s->rstate=SSL_ST_READ_HEADER; rr->off=0; - if (s->mode & SSL_MODE_RELEASE_BUFFERS) + if (s->mode & SSL_MODE_RELEASE_BUFFERS && s->s3->rbuf.left == 0) ssl3_release_read_buffer(s); } } @@ -1297,6 +1333,15 @@ goto f_err; } + if (!(s->s3->flags & SSL3_FLAGS_CCS_OK)) + { + al=SSL_AD_UNEXPECTED_MESSAGE; + SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_CCS_RECEIVED_EARLY); + goto f_err; + } + + s->s3->flags &= ~SSL3_FLAGS_CCS_OK; + rr->length=0; if (s->msg_callback) @@ -1431,7 +1476,7 @@ if (s->s3->tmp.key_block == NULL) { - if (s->session == NULL) + if (s->session == NULL || s->session->master_key_length == 0) { /* might happen if dtls1_read_bytes() calls this */ SSLerr(SSL_F_SSL3_DO_CHANGE_CIPHER_SPEC,SSL_R_CCS_RECEIVED_EARLY); diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/s3_srvr.c nodejs-0.11.15/deps/openssl/openssl/ssl/s3_srvr.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/s3_srvr.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/s3_srvr.c 2015-01-20 21:22:17.000000000 +0000 @@ -154,6 +154,7 @@ #include <stdio.h> #include "ssl_locl.h" #include "kssl_lcl.h" +#include "../crypto/constant_time_locl.h" #include <openssl/buffer.h> #include <openssl/rand.h> #include <openssl/objects.h> @@ -410,9 +411,8 @@ case SSL3_ST_SW_CERT_B: /* Check if it is anon DH or anon ECDH, */ /* normal PSK or KRB5 or SRP */ - if (!(s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) - && !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK) - && !(s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5)) + if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aKRB5|SSL_aSRP)) + && !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) { ret=ssl3_send_server_certificate(s); if (ret <= 0) goto end; @@ -515,7 +515,9 @@ * (against the specs, but s3_clnt.c accepts this for SSL 3) */ !(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)) || /* never request cert in Kerberos ciphersuites */ - (s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5) + (s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5) || + /* don't request certificate for SRP auth */ + (s->s3->tmp.new_cipher->algorithm_auth & SSL_aSRP) /* With normal PSK Certificates and * Certificate Requests are omitted */ || (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) @@ -673,6 +675,7 @@ case SSL3_ST_SR_CERT_VRFY_A: case SSL3_ST_SR_CERT_VRFY_B: + s->s3->flags |= SSL3_FLAGS_CCS_OK; /* we should decide if we expected this one */ ret=ssl3_get_cert_verify(s); if (ret <= 0) goto end; @@ -700,6 +703,7 @@ case SSL3_ST_SR_FINISHED_A: case SSL3_ST_SR_FINISHED_B: + s->s3->flags |= SSL3_FLAGS_CCS_OK; ret=ssl3_get_finished(s,SSL3_ST_SR_FINISHED_A, SSL3_ST_SR_FINISHED_B); if (ret <= 0) goto end; @@ -770,7 +774,10 @@ s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A; #else if (s->s3->next_proto_neg_seen) + { + s->s3->flags |= SSL3_FLAGS_CCS_OK; s->s3->tmp.next_state=SSL3_ST_SR_NEXT_PROTO_A; + } else s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A; #endif @@ -1841,7 +1848,7 @@ n+=2+nr[i]; } - if (!(s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) + if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aSRP)) && !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) { if ((pkey=ssl_get_sign_pkey(s,s->s3->tmp.new_cipher,&md)) @@ -2097,6 +2104,11 @@ s->init_num=n+4; s->init_off=0; #ifdef NETSCAPE_HANG_BUG + if (!BUF_MEM_grow_clean(buf, s->init_num + 4)) + { + SSLerr(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST,ERR_R_BUF_LIB); + goto err; + } p=(unsigned char *)s->init_buf->data + s->init_num; /* do the header */ @@ -2156,6 +2168,10 @@ #ifndef OPENSSL_NO_RSA if (alg_k & SSL_kRSA) { + unsigned char rand_premaster_secret[SSL_MAX_MASTER_KEY_LENGTH]; + int decrypt_len; + unsigned char decrypt_good, version_good; + /* FIX THIS UP EAY EAY EAY EAY */ if (s->s3->tmp.use_rsa_tmp) { @@ -2203,54 +2219,61 @@ n=i; } - i=RSA_private_decrypt((int)n,p,p,rsa,RSA_PKCS1_PADDING); - - al = -1; - - if (i != SSL_MAX_MASTER_KEY_LENGTH) - { - al=SSL_AD_DECODE_ERROR; - /* SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_BAD_RSA_DECRYPT); */ - } - - if ((al == -1) && !((p[0] == (s->client_version>>8)) && (p[1] == (s->client_version & 0xff)))) + /* We must not leak whether a decryption failure occurs because + * of Bleichenbacher's attack on PKCS #1 v1.5 RSA padding (see + * RFC 2246, section 7.4.7.1). The code follows that advice of + * the TLS RFC and generates a random premaster secret for the + * case that the decrypt fails. See + * https://tools.ietf.org/html/rfc5246#section-7.4.7.1 */ + + /* should be RAND_bytes, but we cannot work around a failure. */ + if (RAND_pseudo_bytes(rand_premaster_secret, + sizeof(rand_premaster_secret)) <= 0) + goto err; + decrypt_len = RSA_private_decrypt((int)n,p,p,rsa,RSA_PKCS1_PADDING); + ERR_clear_error(); + + /* decrypt_len should be SSL_MAX_MASTER_KEY_LENGTH. + * decrypt_good will be 0xff if so and zero otherwise. */ + decrypt_good = constant_time_eq_int_8(decrypt_len, SSL_MAX_MASTER_KEY_LENGTH); + + /* If the version in the decrypted pre-master secret is correct + * then version_good will be 0xff, otherwise it'll be zero. + * The Klima-Pokorny-Rosa extension of Bleichenbacher's attack + * (http://eprint.iacr.org/2003/052/) exploits the version + * number check as a "bad version oracle". Thus version checks + * are done in constant time and are treated like any other + * decryption error. */ + version_good = constant_time_eq_8(p[0], (unsigned)(s->client_version>>8)); + version_good &= constant_time_eq_8(p[1], (unsigned)(s->client_version&0xff)); + + /* The premaster secret must contain the same version number as + * the ClientHello to detect version rollback attacks + * (strangely, the protocol does not offer such protection for + * DH ciphersuites). However, buggy clients exist that send the + * negotiated protocol version instead if the server does not + * support the requested protocol version. If + * SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such clients. */ + if (s->options & SSL_OP_TLS_ROLLBACK_BUG) + { + unsigned char workaround_good; + workaround_good = constant_time_eq_8(p[0], (unsigned)(s->version>>8)); + workaround_good &= constant_time_eq_8(p[1], (unsigned)(s->version&0xff)); + version_good |= workaround_good; + } + + /* Both decryption and version must be good for decrypt_good + * to remain non-zero (0xff). */ + decrypt_good &= version_good; + + /* Now copy rand_premaster_secret over p using + * decrypt_good_mask. */ + for (i = 0; i < (int) sizeof(rand_premaster_secret); i++) { - /* The premaster secret must contain the same version number as the - * ClientHello to detect version rollback attacks (strangely, the - * protocol does not offer such protection for DH ciphersuites). - * However, buggy clients exist that send the negotiated protocol - * version instead if the server does not support the requested - * protocol version. - * If SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such clients. */ - if (!((s->options & SSL_OP_TLS_ROLLBACK_BUG) && - (p[0] == (s->version>>8)) && (p[1] == (s->version & 0xff)))) - { - al=SSL_AD_DECODE_ERROR; - /* SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_BAD_PROTOCOL_VERSION_NUMBER); */ - - /* The Klima-Pokorny-Rosa extension of Bleichenbacher's attack - * (http://eprint.iacr.org/2003/052/) exploits the version - * number check as a "bad version oracle" -- an alert would - * reveal that the plaintext corresponding to some ciphertext - * made up by the adversary is properly formatted except - * that the version number is wrong. To avoid such attacks, - * we should treat this just like any other decryption error. */ - } + p[i] = constant_time_select_8(decrypt_good, p[i], + rand_premaster_secret[i]); } - if (al != -1) - { - /* Some decryption failure -- use random value instead as countermeasure - * against Bleichenbacher's attack on PKCS #1 v1.5 RSA padding - * (see RFC 2246, section 7.4.7.1). */ - ERR_clear_error(); - i = SSL_MAX_MASTER_KEY_LENGTH; - p[0] = s->client_version >> 8; - p[1] = s->client_version & 0xff; - if (RAND_pseudo_bytes(p+2, i-2) <= 0) /* should be RAND_bytes, but we cannot work around a failure */ - goto err; - } - s->session->master_key_length= s->method->ssl3_enc->generate_master_secret(s, s->session->master_key, @@ -2785,6 +2808,13 @@ SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,ERR_R_BN_LIB); goto err; } + if (BN_ucmp(s->srp_ctx.A, s->srp_ctx.N) >= 0 + || BN_is_zero(s->srp_ctx.A)) + { + al=SSL_AD_ILLEGAL_PARAMETER; + SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_BAD_SRP_PARAMETERS); + goto f_err; + } if (s->session->srp_username != NULL) OPENSSL_free(s->session->srp_username); s->session->srp_username = BUF_strdup(s->srp_ctx.login); @@ -2813,6 +2843,8 @@ unsigned char premaster_secret[32], *start; size_t outlen=32, inlen; unsigned long alg_a; + int Ttag, Tclass; + long Tlen; /* Get our certificate private key*/ alg_a = s->s3->tmp.new_cipher->algorithm_auth; @@ -2834,26 +2866,15 @@ ERR_clear_error(); } /* Decrypt session key */ - if ((*p!=( V_ASN1_SEQUENCE| V_ASN1_CONSTRUCTED))) - { - SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_DECRYPTION_FAILED); - goto gerr; - } - if (p[1] == 0x81) - { - start = p+3; - inlen = p[2]; - } - else if (p[1] < 0x80) - { - start = p+2; - inlen = p[1]; - } - else + if (ASN1_get_object((const unsigned char **)&p, &Tlen, &Ttag, &Tclass, n) != V_ASN1_CONSTRUCTED || + Ttag != V_ASN1_SEQUENCE || + Tclass != V_ASN1_UNIVERSAL) { SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_DECRYPTION_FAILED); goto gerr; } + start = p; + inlen = Tlen; if (EVP_PKEY_decrypt(pkey_ctx,premaster_secret,&outlen,start,inlen) <=0) { @@ -2917,7 +2938,7 @@ SSL3_ST_SR_CERT_VRFY_A, SSL3_ST_SR_CERT_VRFY_B, -1, - 516, /* Enough for 4096 bit RSA key with TLS v1.2 */ + SSL3_RT_MAX_PLAIN_LENGTH, &ok); if (!ok) return((int)n); diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/srtp.h nodejs-0.11.15/deps/openssl/openssl/ssl/srtp.h --- nodejs-0.11.13/deps/openssl/openssl/ssl/srtp.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/srtp.h 2015-01-20 21:22:17.000000000 +0000 @@ -130,6 +130,8 @@ #define SRTP_NULL_SHA1_80 0x0005 #define SRTP_NULL_SHA1_32 0x0006 +#ifndef OPENSSL_NO_SRTP + int SSL_CTX_set_tlsext_use_srtp(SSL_CTX *ctx, const char *profiles); int SSL_set_tlsext_use_srtp(SSL *ctx, const char *profiles); SRTP_PROTECTION_PROFILE *SSL_get_selected_srtp_profile(SSL *s); @@ -137,6 +139,8 @@ STACK_OF(SRTP_PROTECTION_PROFILE) *SSL_get_srtp_profiles(SSL *ssl); SRTP_PROTECTION_PROFILE *SSL_get_selected_srtp_profile(SSL *s); +#endif + #ifdef __cplusplus } #endif diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/ssl3.h nodejs-0.11.15/deps/openssl/openssl/ssl/ssl3.h --- nodejs-0.11.13/deps/openssl/openssl/ssl/ssl3.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/ssl3.h 2015-01-20 21:22:17.000000000 +0000 @@ -128,9 +128,14 @@ extern "C" { #endif -/* Signalling cipher suite value: from draft-ietf-tls-renegotiation-03.txt */ +/* Signalling cipher suite value from RFC 5746 + * (TLS_EMPTY_RENEGOTIATION_INFO_SCSV) */ #define SSL3_CK_SCSV 0x030000FF +/* Signalling cipher suite value from draft-ietf-tls-downgrade-scsv-00 + * (TLS_FALLBACK_SCSV) */ +#define SSL3_CK_FALLBACK_SCSV 0x03005600 + #define SSL3_CK_RSA_NULL_MD5 0x03000001 #define SSL3_CK_RSA_NULL_SHA 0x03000002 #define SSL3_CK_RSA_RC4_40_MD5 0x03000003 @@ -388,6 +393,7 @@ #define TLS1_FLAGS_TLS_PADDING_BUG 0x0008 #define TLS1_FLAGS_SKIP_CERT_VERIFY 0x0010 #define TLS1_FLAGS_KEEP_HANDSHAKE 0x0020 +#define SSL3_FLAGS_CCS_OK 0x0080 /* SSL3_FLAGS_SGC_RESTART_DONE is set when we * restart a handshake because of MS SGC and so prevents us diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_asn1.c nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_asn1.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_asn1.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_asn1.c 2015-01-20 21:22:17.000000000 +0000 @@ -408,6 +408,7 @@ if (os.length != 3) { c.error=SSL_R_CIPHER_CODE_WRONG_LENGTH; + c.line=__LINE__; goto err; } id=0x02000000L| @@ -420,6 +421,7 @@ if (os.length != 2) { c.error=SSL_R_CIPHER_CODE_WRONG_LENGTH; + c.line=__LINE__; goto err; } id=0x03000000L| @@ -429,6 +431,7 @@ else { c.error=SSL_R_UNKNOWN_SSL_VERSION; + c.line=__LINE__; goto err; } @@ -521,6 +524,7 @@ if (os.length > SSL_MAX_SID_CTX_LENGTH) { c.error=SSL_R_BAD_LENGTH; + c.line=__LINE__; goto err; } else diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_ciph.c nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_ciph.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_ciph.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_ciph.c 2015-01-20 21:22:17.000000000 +0000 @@ -270,6 +270,7 @@ {0,SSL_TXT_aGOST94,0,0,SSL_aGOST94,0,0,0,0,0,0,0}, {0,SSL_TXT_aGOST01,0,0,SSL_aGOST01,0,0,0,0,0,0,0}, {0,SSL_TXT_aGOST,0,0,SSL_aGOST94|SSL_aGOST01,0,0,0,0,0,0,0}, + {0,SSL_TXT_aSRP,0, 0,SSL_aSRP, 0,0,0,0,0,0,0}, /* aliases combining key exchange and server authentication */ {0,SSL_TXT_EDH,0, SSL_kEDH,~SSL_aNULL,0,0,0,0,0,0,0}, @@ -562,7 +563,7 @@ break; } - if ((i < 0) || (i > SSL_ENC_NUM_IDX)) + if ((i < 0) || (i >= SSL_ENC_NUM_IDX)) *enc=NULL; else { @@ -596,7 +597,7 @@ i= -1; break; } - if ((i < 0) || (i > SSL_MD_NUM_IDX)) + if ((i < 0) || (i >= SSL_MD_NUM_IDX)) { *md=NULL; if (mac_pkey_type!=NULL) *mac_pkey_type = NID_undef; @@ -925,7 +926,7 @@ int rule, int strength_bits, CIPHER_ORDER **head_p, CIPHER_ORDER **tail_p) { - CIPHER_ORDER *head, *tail, *curr, *curr2, *last; + CIPHER_ORDER *head, *tail, *curr, *next, *last; const SSL_CIPHER *cp; int reverse = 0; @@ -942,21 +943,25 @@ if (reverse) { - curr = tail; + next = tail; last = head; } else { - curr = head; + next = head; last = tail; } - curr2 = curr; + curr = NULL; for (;;) { - if ((curr == NULL) || (curr == last)) break; - curr = curr2; - curr2 = reverse ? curr->prev : curr->next; + if (curr == last) break; + + curr = next; + + if (curr == NULL) break; + + next = reverse ? curr->prev : curr->next; cp = curr->cipher; @@ -1598,6 +1603,9 @@ case SSL_kSRP: kx="SRP"; break; + case SSL_kGOST: + kx="GOST"; + break; default: kx="unknown"; } @@ -1628,6 +1636,15 @@ case SSL_aPSK: au="PSK"; break; + case SSL_aSRP: + au="SRP"; + break; + case SSL_aGOST94: + au="GOST94"; + break; + case SSL_aGOST01: + au="GOST01"; + break; default: au="unknown"; break; @@ -1675,6 +1692,9 @@ case SSL_SEED: enc="SEED(128)"; break; + case SSL_eGOST2814789CNT: + enc="GOST89(256)"; + break; default: enc="unknown"; break; @@ -1697,6 +1717,12 @@ case SSL_AEAD: mac="AEAD"; break; + case SSL_GOST89MAC: + mac="GOST89"; + break; + case SSL_GOST94: + mac="GOST94"; + break; default: mac="unknown"; break; diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_err.c nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_err.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_err.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_err.c 2015-01-20 21:22:17.000000000 +0000 @@ -329,6 +329,7 @@ {ERR_REASON(SSL_R_BAD_SRP_B_LENGTH) ,"bad srp b length"}, {ERR_REASON(SSL_R_BAD_SRP_G_LENGTH) ,"bad srp g length"}, {ERR_REASON(SSL_R_BAD_SRP_N_LENGTH) ,"bad srp n length"}, +{ERR_REASON(SSL_R_BAD_SRP_PARAMETERS) ,"bad srp parameters"}, {ERR_REASON(SSL_R_BAD_SRP_S_LENGTH) ,"bad srp s length"}, {ERR_REASON(SSL_R_BAD_SRTP_MKI_VALUE) ,"bad srtp mki value"}, {ERR_REASON(SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST),"bad srtp protection profile list"}, @@ -382,6 +383,7 @@ {ERR_REASON(SSL_R_HTTPS_PROXY_REQUEST) ,"https proxy request"}, {ERR_REASON(SSL_R_HTTP_REQUEST) ,"http request"}, {ERR_REASON(SSL_R_ILLEGAL_PADDING) ,"illegal padding"}, +{ERR_REASON(SSL_R_INAPPROPRIATE_FALLBACK),"inappropriate fallback"}, {ERR_REASON(SSL_R_INCONSISTENT_COMPRESSION),"inconsistent compression"}, {ERR_REASON(SSL_R_INVALID_CHALLENGE_LENGTH),"invalid challenge length"}, {ERR_REASON(SSL_R_INVALID_COMMAND) ,"invalid command"}, @@ -528,6 +530,7 @@ {ERR_REASON(SSL_R_TLSV1_ALERT_DECRYPTION_FAILED),"tlsv1 alert decryption failed"}, {ERR_REASON(SSL_R_TLSV1_ALERT_DECRYPT_ERROR),"tlsv1 alert decrypt error"}, {ERR_REASON(SSL_R_TLSV1_ALERT_EXPORT_RESTRICTION),"tlsv1 alert export restriction"}, +{ERR_REASON(SSL_R_TLSV1_ALERT_INAPPROPRIATE_FALLBACK),"tlsv1 alert inappropriate fallback"}, {ERR_REASON(SSL_R_TLSV1_ALERT_INSUFFICIENT_SECURITY),"tlsv1 alert insufficient security"}, {ERR_REASON(SSL_R_TLSV1_ALERT_INTERNAL_ERROR),"tlsv1 alert internal error"}, {ERR_REASON(SSL_R_TLSV1_ALERT_NO_RENEGOTIATION),"tlsv1 alert no renegotiation"}, @@ -541,7 +544,7 @@ {ERR_REASON(SSL_R_TLSV1_UNRECOGNIZED_NAME),"tlsv1 unrecognized name"}, {ERR_REASON(SSL_R_TLSV1_UNSUPPORTED_EXTENSION),"tlsv1 unsupported extension"}, {ERR_REASON(SSL_R_TLS_CLIENT_CERT_REQ_WITH_ANON_CIPHER),"tls client cert req with anon cipher"}, -{ERR_REASON(SSL_R_TLS_HEARTBEAT_PEER_DOESNT_ACCEPT),"peer does not accept heartbearts"}, +{ERR_REASON(SSL_R_TLS_HEARTBEAT_PEER_DOESNT_ACCEPT),"peer does not accept heartbeats"}, {ERR_REASON(SSL_R_TLS_HEARTBEAT_PENDING) ,"heartbeat request already pending"}, {ERR_REASON(SSL_R_TLS_ILLEGAL_EXPORTER_LABEL),"tls illegal exporter label"}, {ERR_REASON(SSL_R_TLS_INVALID_ECPOINTFORMAT_LIST),"tls invalid ecpointformat list"}, diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/ssl.h nodejs-0.11.15/deps/openssl/openssl/ssl/ssl.h --- nodejs-0.11.13/deps/openssl/openssl/ssl/ssl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/ssl.h 2015-01-20 21:22:17.000000000 +0000 @@ -264,6 +264,7 @@ #define SSL_TXT_aGOST94 "aGOST94" #define SSL_TXT_aGOST01 "aGOST01" #define SSL_TXT_aGOST "aGOST" +#define SSL_TXT_aSRP "aSRP" #define SSL_TXT_DSS "DSS" #define SSL_TXT_DH "DH" @@ -553,7 +554,7 @@ /* Allow initial connection to servers that don't support RI */ #define SSL_OP_LEGACY_SERVER_CONNECT 0x00000004L #define SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG 0x00000008L -#define SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG 0x00000010L +#define SSL_OP_TLSEXT_PADDING 0x00000010L #define SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER 0x00000020L #define SSL_OP_SAFARI_ECDHE_ECDSA_BUG 0x00000040L #define SSL_OP_SSLEAY_080_CLIENT_DH_BUG 0x00000080L @@ -562,6 +563,8 @@ /* Hasn't done anything since OpenSSL 0.9.7h, retained for compatibility */ #define SSL_OP_MSIE_SSLV2_RSA_PADDING 0x0 +/* Refers to ancient SSLREF and SSLv2, retained for compatibility */ +#define SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG 0x0 /* Disable SSL 3.0/TLS 1.0 CBC vulnerability workaround that was added * in OpenSSL 0.9.6d. Usually (depending on the application protocol) @@ -650,6 +653,10 @@ */ #define SSL_MODE_SEND_CLIENTHELLO_TIME 0x00000020L #define SSL_MODE_SEND_SERVERHELLO_TIME 0x00000040L +/* Send TLS_FALLBACK_SCSV in the ClientHello. + * To be set by applications that reconnect with a downgraded protocol + * version; see draft-ietf-tls-downgrade-scsv-00 for details. */ +#define SSL_MODE_SEND_FALLBACK_SCSV 0x00000080L /* Note: SSL[_CTX]_set_{options,mode} use |= op on the previous value, * they cannot be used to clear bits. */ @@ -1508,6 +1515,7 @@ #define SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE #define SSL_AD_BAD_CERTIFICATE_HASH_VALUE TLS1_AD_BAD_CERTIFICATE_HASH_VALUE #define SSL_AD_UNKNOWN_PSK_IDENTITY TLS1_AD_UNKNOWN_PSK_IDENTITY /* fatal */ +#define SSL_AD_INAPPROPRIATE_FALLBACK TLS1_AD_INAPPROPRIATE_FALLBACK /* fatal */ #define SSL_ERROR_NONE 0 #define SSL_ERROR_SSL 1 @@ -1618,6 +1626,8 @@ #define SSL_CTRL_GET_EXTRA_CHAIN_CERTS 82 #define SSL_CTRL_CLEAR_EXTRA_CHAIN_CERTS 83 +#define SSL_CTRL_CHECK_PROTO_VERSION 119 + #define DTLSv1_get_timeout(ssl, arg) \ SSL_ctrl(ssl,DTLS_CTRL_GET_TIMEOUT,0, (void *)arg) #define DTLSv1_handle_timeout(ssl) \ @@ -2053,6 +2063,10 @@ void SSL_set_debug(SSL *s, int debug); int SSL_cache_hit(SSL *s); +#ifndef OPENSSL_NO_UNIT_TEST +const struct openssl_ssl_test_functions *SSL_test_functions(void); +#endif + /* BEGIN ERROR CODES */ /* The following lines are auto generated by the script mkerr.pl. Any changes * made after this point may be overwritten when the script is next run. @@ -2318,6 +2332,7 @@ #define SSL_R_BAD_SRP_B_LENGTH 348 #define SSL_R_BAD_SRP_G_LENGTH 349 #define SSL_R_BAD_SRP_N_LENGTH 350 +#define SSL_R_BAD_SRP_PARAMETERS 371 #define SSL_R_BAD_SRP_S_LENGTH 351 #define SSL_R_BAD_SRTP_MKI_VALUE 352 #define SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST 353 @@ -2371,6 +2386,7 @@ #define SSL_R_HTTPS_PROXY_REQUEST 155 #define SSL_R_HTTP_REQUEST 156 #define SSL_R_ILLEGAL_PADDING 283 +#define SSL_R_INAPPROPRIATE_FALLBACK 373 #define SSL_R_INCONSISTENT_COMPRESSION 340 #define SSL_R_INVALID_CHALLENGE_LENGTH 158 #define SSL_R_INVALID_COMMAND 280 @@ -2517,6 +2533,7 @@ #define SSL_R_TLSV1_ALERT_DECRYPTION_FAILED 1021 #define SSL_R_TLSV1_ALERT_DECRYPT_ERROR 1051 #define SSL_R_TLSV1_ALERT_EXPORT_RESTRICTION 1060 +#define SSL_R_TLSV1_ALERT_INAPPROPRIATE_FALLBACK 1086 #define SSL_R_TLSV1_ALERT_INSUFFICIENT_SECURITY 1071 #define SSL_R_TLSV1_ALERT_INTERNAL_ERROR 1080 #define SSL_R_TLSV1_ALERT_NO_RENEGOTIATION 1100 diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_lib.c nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_lib.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -1349,6 +1349,10 @@ p=buf; sk=s->session->ciphers; + + if (sk_SSL_CIPHER_num(sk) == 0) + return NULL; + for (i=0; i<sk_SSL_CIPHER_num(sk); i++) { int n; @@ -1383,6 +1387,8 @@ if (sk == NULL) return(0); q=p; + if (put_cb == NULL) + put_cb = s->method->put_cipher_by_char; for (i=0; i<sk_SSL_CIPHER_num(sk); i++) { @@ -1402,24 +1408,41 @@ s->psk_client_callback == NULL) continue; #endif /* OPENSSL_NO_PSK */ - j = put_cb ? put_cb(c,p) : ssl_put_cipher_by_char(s,c,p); +#ifndef OPENSSL_NO_SRP + if (((c->algorithm_mkey & SSL_kSRP) || (c->algorithm_auth & SSL_aSRP)) && + !(s->srp_ctx.srp_Mask & SSL_kSRP)) + continue; +#endif /* OPENSSL_NO_SRP */ + j = put_cb(c,p); p+=j; } - /* If p == q, no ciphers and caller indicates an error. Otherwise - * add SCSV if not renegotiating. - */ - if (p != q && !s->renegotiate) + /* If p == q, no ciphers; caller indicates an error. + * Otherwise, add applicable SCSVs. */ + if (p != q) { - static SSL_CIPHER scsv = + if (!s->renegotiate) { - 0, NULL, SSL3_CK_SCSV, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - j = put_cb ? put_cb(&scsv,p) : ssl_put_cipher_by_char(s,&scsv,p); - p+=j; + static SSL_CIPHER scsv = + { + 0, NULL, SSL3_CK_SCSV, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + j = put_cb(&scsv,p); + p+=j; #ifdef OPENSSL_RI_DEBUG - fprintf(stderr, "SCSV sent by client\n"); + fprintf(stderr, "TLS_EMPTY_RENEGOTIATION_INFO_SCSV sent by client\n"); #endif - } + } + + if (s->mode & SSL_MODE_SEND_FALLBACK_SCSV) + { + static SSL_CIPHER scsv = + { + 0, NULL, SSL3_CK_FALLBACK_SCSV, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + j = put_cb(&scsv,p); + p+=j; + } + } return(p-q); } @@ -1430,11 +1453,12 @@ const SSL_CIPHER *c; STACK_OF(SSL_CIPHER) *sk; int i,n; + if (s->s3) s->s3->send_connection_binding = 0; n=ssl_put_cipher_by_char(s,NULL,NULL); - if ((num%n) != 0) + if (n == 0 || (num%n) != 0) { SSLerr(SSL_F_SSL_BYTES_TO_CIPHER_LIST,SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST); return(NULL); @@ -1449,7 +1473,7 @@ for (i=0; i<num; i+=n) { - /* Check for SCSV */ + /* Check for TLS_EMPTY_RENEGOTIATION_INFO_SCSV */ if (s->s3 && (n != 3 || !p[0]) && (p[n-2] == ((SSL3_CK_SCSV >> 8) & 0xff)) && (p[n-1] == (SSL3_CK_SCSV & 0xff))) @@ -1469,6 +1493,23 @@ continue; } + /* Check for TLS_FALLBACK_SCSV */ + if ((n != 3 || !p[0]) && + (p[n-2] == ((SSL3_CK_FALLBACK_SCSV >> 8) & 0xff)) && + (p[n-1] == (SSL3_CK_FALLBACK_SCSV & 0xff))) + { + /* The SCSV indicates that the client previously tried a higher version. + * Fail if the current version is an unexpected downgrade. */ + if (!SSL_ctrl(s, SSL_CTRL_CHECK_PROTO_VERSION, 0, NULL)) + { + SSLerr(SSL_F_SSL_BYTES_TO_CIPHER_LIST,SSL_R_INAPPROPRIATE_FALLBACK); + if (s->s3) + ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_INAPPROPRIATE_FALLBACK); + goto err; + } + continue; + } + c=ssl_get_cipher_by_char(s,p); p+=n; if (c != NULL) @@ -2935,15 +2976,26 @@ SSL_CTX *SSL_set_SSL_CTX(SSL *ssl, SSL_CTX* ctx) { + CERT *ocert = ssl->cert; if (ssl->ctx == ctx) return ssl->ctx; #ifndef OPENSSL_NO_TLSEXT if (ctx == NULL) ctx = ssl->initial_ctx; #endif - if (ssl->cert != NULL) - ssl_cert_free(ssl->cert); ssl->cert = ssl_cert_dup(ctx->cert); + if (ocert != NULL) + { + int i; + /* Copy negotiated digests from original */ + for (i = 0; i < SSL_PKEY_NUM; i++) + { + CERT_PKEY *cpk = ocert->pkeys + i; + CERT_PKEY *rpk = ssl->cert->pkeys + i; + rpk->digest = cpk->digest; + } + ssl_cert_free(ocert); + } CRYPTO_add(&ctx->references,1,CRYPTO_LOCK_SSL_CTX); if (ssl->ctx != NULL) SSL_CTX_free(ssl->ctx); /* decrement reference count */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/ssl-lib.com nodejs-0.11.15/deps/openssl/openssl/ssl/ssl-lib.com --- nodejs-0.11.13/deps/openssl/openssl/ssl/ssl-lib.com 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/ssl-lib.com 2015-01-20 21:22:17.000000000 +0000 @@ -213,16 +213,16 @@ $! $! Define The Different SSL "library" Files. $! -$ LIB_SSL = "s2_meth,s2_srvr,s2_clnt,s2_lib,s2_enc,s2_pkt,"+ - - "s3_meth,s3_srvr,s3_clnt,s3_lib,s3_enc,s3_pkt,s3_both,s3_cbc,"+ - - "s23_meth,s23_srvr,s23_clnt,s23_lib,s23_pkt,"+ - - "t1_meth,t1_srvr,t1_clnt,t1_lib,t1_enc,"+ - - "d1_meth,d1_srvr,d1_clnt,d1_lib,d1_pkt,"+ - +$ LIB_SSL = "s2_meth, s2_srvr, s2_clnt, s2_lib, s2_enc, s2_pkt,"+ - + "s3_meth, s3_srvr, s3_clnt, s3_lib, s3_enc, s3_pkt, s3_both, s3_cbc,"+ - + "s23_meth,s23_srvr,s23_clnt,s23_lib, s23_pkt,"+ - + "t1_meth, t1_srvr, t1_clnt, t1_lib, t1_enc,"+ - + "d1_meth, d1_srvr, d1_clnt, d1_lib, d1_pkt,"+ - "d1_both,d1_enc,d1_srtp,"+ - "ssl_lib,ssl_err2,ssl_cert,ssl_sess,"+ - "ssl_ciph,ssl_stat,ssl_rsa,"+ - "ssl_asn1,ssl_txt,ssl_algs,"+ - - "bio_ssl,ssl_err,kssl,tls_srp,t1_reneg" + "bio_ssl,ssl_err,kssl,tls_srp,t1_reneg,ssl_utst" $! $ COMPILEWITH_CC5 = "" $! @@ -240,7 +240,7 @@ $! $! O.K, Extract The File Name From The File List. $! -$ FILE_NAME = F$ELEMENT(FILE_COUNTER,",",LIB_SSL) +$ FILE_NAME = F$EDIT(F$ELEMENT(FILE_COUNTER,",",LIB_SSL),"COLLAPSE") $! $! Check To See If We Are At The End Of The File List. $! @@ -857,9 +857,12 @@ $ IF F$TYPE(USER_CCDEFS) .NES. "" THEN CCDEFS = CCDEFS + "," + USER_CCDEFS $ CCEXTRAFLAGS = "" $ IF F$TYPE(USER_CCFLAGS) .NES. "" THEN CCEXTRAFLAGS = USER_CCFLAGS -$ CCDISABLEWARNINGS = "MAYLOSEDATA3" !!! "LONGLONGTYPE,LONGLONGSUFX,FOUNDCR" -$ IF F$TYPE(USER_CCDISABLEWARNINGS) .NES. "" THEN - - CCDISABLEWARNINGS = CCDISABLEWARNINGS + "," + USER_CCDISABLEWARNINGS +$ CCDISABLEWARNINGS = "" !!! "MAYLOSEDATA3" !!! "LONGLONGTYPE,LONGLONGSUFX,FOUNDCR" +$ IF F$TYPE(USER_CCDISABLEWARNINGS) .NES. "" +$ THEN +$ IF CCDISABLEWARNINGS .NES. "" THEN CCDISABLEWARNINGS = CCDISABLEWARNINGS + "," +$ CCDISABLEWARNINGS = CCDISABLEWARNINGS + USER_CCDISABLEWARNINGS +$ ENDIF $! $! Check To See If We Have A ZLIB Option. $! @@ -1022,6 +1025,18 @@ $! $ IF COMPILER .EQS. "DECC" $ THEN +$! Not all compiler versions support MAYLOSEDATA3. +$ OPT_TEST = "MAYLOSEDATA3" +$ DEFINE /USER_MODE SYS$ERROR NL: +$ DEFINE /USER_MODE SYS$OUTPUT NL: +$ 'CC' /NOCROSS_REFERENCE /NOLIST /NOOBJECT - + /WARNINGS = DISABLE = ('OPT_TEST', EMPTYFILE) NL: +$ IF ($SEVERITY) +$ THEN +$ IF CCDISABLEWARNINGS .NES. "" THEN - + CCDISABLEWARNINGS = CCDISABLEWARNINGS+ "," +$ CCDISABLEWARNINGS = CCDISABLEWARNINGS+ OPT_TEST +$ ENDIF $ IF CCDISABLEWARNINGS .EQS. "" $ THEN $ CC4DISABLEWARNINGS = "DOLLARID" diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_locl.h nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_locl.h --- nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_locl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_locl.h 2015-01-20 21:22:17.000000000 +0000 @@ -311,6 +311,7 @@ #define SSL_aPSK 0x00000080L /* PSK auth */ #define SSL_aGOST94 0x00000100L /* GOST R 34.10-94 signature auth */ #define SSL_aGOST01 0x00000200L /* GOST R 34.10-2001 signature auth */ +#define SSL_aSRP 0x00000400L /* SRP auth */ /* Bits for algorithm_enc (symmetric encryption) */ @@ -809,6 +810,16 @@ return &func_name##_data; \ } +struct openssl_ssl_test_functions + { + int (*p_ssl_init_wbio_buffer)(SSL *s, int push); + int (*p_ssl3_setup_buffers)(SSL *s); + int (*p_tls1_process_heartbeat)(SSL *s); + int (*p_dtls1_process_heartbeat)(SSL *s); + }; + +#ifndef OPENSSL_UNIT_TEST + void ssl_clear_cipher_ctx(SSL *s); int ssl_clear_bad_session(SSL *s); CERT *ssl_cert_new(void); @@ -1095,8 +1106,8 @@ #endif /* OPENSSL_NO_EC */ #ifndef OPENSSL_NO_TLSEXT -unsigned char *ssl_add_clienthello_tlsext(SSL *s, unsigned char *p, unsigned char *limit); -unsigned char *ssl_add_serverhello_tlsext(SSL *s, unsigned char *p, unsigned char *limit); +unsigned char *ssl_add_clienthello_tlsext(SSL *s, unsigned char *buf, unsigned char *limit); +unsigned char *ssl_add_serverhello_tlsext(SSL *s, unsigned char *buf, unsigned char *limit); int ssl_parse_clienthello_tlsext(SSL *s, unsigned char **data, unsigned char *d, int n, int *al); int ssl_parse_serverhello_tlsext(SSL *s, unsigned char **data, unsigned char *d, int n, int *al); int ssl_prepare_clienthello_tlsext(SSL *s); @@ -1174,4 +1185,14 @@ const EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *mac_ctx, const unsigned char *data, size_t data_len, size_t orig_len); +int srp_verify_server_param(SSL *s, int *al); + +#else + +#define ssl_init_wbio_buffer SSL_test_functions()->p_ssl_init_wbio_buffer +#define ssl3_setup_buffers SSL_test_functions()->p_ssl3_setup_buffers +#define tls1_process_heartbeat SSL_test_functions()->p_tls1_process_heartbeat +#define dtls1_process_heartbeat SSL_test_functions()->p_dtls1_process_heartbeat + +#endif #endif diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_stat.c nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_stat.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_stat.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_stat.c 2015-01-20 21:22:17.000000000 +0000 @@ -212,7 +212,6 @@ case SSL3_ST_SR_CERT_VRFY_B: str="SSLv3 read certificate verify B"; break; #endif -#if !defined(OPENSSL_NO_SSL2) && !defined(OPENSSL_NO_SSL3) /* SSLv2/v3 compatibility states */ /* client */ case SSL23_ST_CW_CLNT_HELLO_A: str="SSLv2/v3 write client hello A"; break; @@ -222,7 +221,6 @@ /* server */ case SSL23_ST_SR_CLNT_HELLO_A: str="SSLv2/v3 read client hello A"; break; case SSL23_ST_SR_CLNT_HELLO_B: str="SSLv2/v3 read client hello B"; break; -#endif /* DTLS */ case DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A: str="DTLS1 read hello verify request A"; break; @@ -366,7 +364,6 @@ case SSL3_ST_SR_CERT_VRFY_B: str="3RCV_B"; break; #endif -#if !defined(OPENSSL_NO_SSL2) && !defined(OPENSSL_NO_SSL3) /* SSLv2/v3 compatibility states */ /* client */ case SSL23_ST_CW_CLNT_HELLO_A: str="23WCHA"; break; @@ -376,7 +373,7 @@ /* server */ case SSL23_ST_SR_CLNT_HELLO_A: str="23RCHA"; break; case SSL23_ST_SR_CLNT_HELLO_B: str="23RCHB"; break; -#endif + /* DTLS */ case DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A: str="DRCHVA"; break; case DTLS1_ST_CR_HELLO_VERIFY_REQUEST_B: str="DRCHVB"; break; diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_utst.c nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_utst.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/ssl_utst.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/ssl_utst.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,73 @@ +/* ssl_utst.c */ +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL + * project. + */ +/* ==================================================================== + * Copyright (c) 2014 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + */ + +#include "ssl_locl.h" + +#ifndef OPENSSL_NO_UNIT_TEST + + +static const struct openssl_ssl_test_functions ssl_test_functions = + { + ssl_init_wbio_buffer, + ssl3_setup_buffers, + tls1_process_heartbeat, + dtls1_process_heartbeat + }; + +const struct openssl_ssl_test_functions *SSL_test_functions(void) + { + return &ssl_test_functions; + } + +#endif diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/t1_enc.c nodejs-0.11.15/deps/openssl/openssl/ssl/t1_enc.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/t1_enc.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/t1_enc.c 2015-01-20 21:22:17.000000000 +0000 @@ -1048,14 +1048,10 @@ if (!stream_mac) EVP_MD_CTX_cleanup(&hmac); #ifdef TLS_DEBUG -printf("sec="); -{unsigned int z; for (z=0; z<md_size; z++) printf("%02X ",mac_sec[z]); printf("\n"); } printf("seq="); {int z; for (z=0; z<8; z++) printf("%02X ",seq[z]); printf("\n"); } -printf("buf="); -{int z; for (z=0; z<5; z++) printf("%02X ",buf[z]); printf("\n"); } printf("rec="); -{unsigned int z; for (z=0; z<rec->length; z++) printf("%02X ",buf[z]); printf("\n"); } +{unsigned int z; for (z=0; z<rec->length; z++) printf("%02X ",rec->data[z]); printf("\n"); } #endif if (ssl->version != DTLS1_VERSION && ssl->version != DTLS1_BAD_VER) @@ -1132,7 +1128,7 @@ int rv; #ifdef KSSL_DEBUG - printf ("tls1_export_keying_material(%p,%p,%d,%s,%d,%p,%d)\n", s, out, olen, label, llen, p, plen); + printf ("tls1_export_keying_material(%p,%p,%d,%s,%d,%p,%d)\n", s, out, olen, label, llen, context, contextlen); #endif /* KSSL_DEBUG */ buff = OPENSSL_malloc(olen); @@ -1185,7 +1181,7 @@ if (memcmp(val, TLS_MD_KEY_EXPANSION_CONST, TLS_MD_KEY_EXPANSION_CONST_SIZE) == 0) goto err1; - rv = tls1_PRF(s->s3->tmp.new_cipher->algorithm2, + rv = tls1_PRF(ssl_get_algorithm2(s), val, vallen, NULL, 0, NULL, 0, @@ -1245,6 +1241,7 @@ case SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE: return(TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE); case SSL_AD_BAD_CERTIFICATE_HASH_VALUE: return(TLS1_AD_BAD_CERTIFICATE_HASH_VALUE); case SSL_AD_UNKNOWN_PSK_IDENTITY:return(TLS1_AD_UNKNOWN_PSK_IDENTITY); + case SSL_AD_INAPPROPRIATE_FALLBACK:return(TLS1_AD_INAPPROPRIATE_FALLBACK); #if 0 /* not appropriate for TLS, not used for DTLS */ case DTLS1_AD_MISSING_HANDSHAKE_MESSAGE: return (DTLS1_AD_MISSING_HANDSHAKE_MESSAGE); diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/t1_lib.c nodejs-0.11.15/deps/openssl/openssl/ssl/t1_lib.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/t1_lib.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/t1_lib.c 2015-01-20 21:22:17.000000000 +0000 @@ -352,15 +352,16 @@ return (int)slen; } -unsigned char *ssl_add_clienthello_tlsext(SSL *s, unsigned char *p, unsigned char *limit) +unsigned char *ssl_add_clienthello_tlsext(SSL *s, unsigned char *buf, unsigned char *limit) { int extdatalen=0; - unsigned char *ret = p; + unsigned char *orig = buf; + unsigned char *ret = buf; /* don't add extensions for SSLv3 unless doing secure renegotiation */ if (s->client_version == SSL3_VERSION && !s->s3->send_connection_binding) - return p; + return orig; ret+=2; @@ -409,7 +410,7 @@ return NULL; } - if((limit - p - 4 - el) < 0) return NULL; + if((limit - ret - 4 - el) < 0) return NULL; s2n(TLSEXT_TYPE_renegotiate,ret); s2n(el,ret); @@ -452,8 +453,7 @@ #endif #ifndef OPENSSL_NO_EC - if (s->tlsext_ecpointformatlist != NULL && - s->version != DTLS1_VERSION) + if (s->tlsext_ecpointformatlist != NULL) { /* Add TLS extension ECPointFormats to the ClientHello message */ long lenmax; @@ -472,8 +472,7 @@ memcpy(ret, s->tlsext_ecpointformatlist, s->tlsext_ecpointformatlist_length); ret+=s->tlsext_ecpointformatlist_length; } - if (s->tlsext_ellipticcurvelist != NULL && - s->version != DTLS1_VERSION) + if (s->tlsext_ellipticcurvelist != NULL) { /* Add TLS extension EllipticCurves to the ClientHello message */ long lenmax; @@ -617,6 +616,8 @@ #ifndef OPENSSL_NO_HEARTBEATS /* Add Heartbeat extension */ + if ((limit - ret - 4 - 1) < 0) + return NULL; s2n(TLSEXT_TYPE_heartbeat,ret); s2n(1,ret); /* Set mode: @@ -642,13 +643,13 @@ #endif #ifndef OPENSSL_NO_SRTP - if(SSL_get_srtp_profiles(s)) + if(SSL_IS_DTLS(s) && SSL_get_srtp_profiles(s)) { int el; ssl_add_clienthello_use_srtp_ext(s, 0, &el, 0); - if((limit - p - 4 - el) < 0) return NULL; + if((limit - ret - 4 - el) < 0) return NULL; s2n(TLSEXT_TYPE_use_srtp,ret); s2n(el,ret); @@ -661,55 +662,55 @@ ret += el; } #endif - -#ifdef TLSEXT_TYPE_padding /* Add padding to workaround bugs in F5 terminators. * See https://tools.ietf.org/html/draft-agl-tls-padding-03 * * NB: because this code works out the length of all existing * extensions it MUST always appear last. */ - { - int hlen = ret - (unsigned char *)s->init_buf->data; - /* The code in s23_clnt.c to build ClientHello messages includes the - * 5-byte record header in the buffer, while the code in s3_clnt.c does - * not. */ - if (s->state == SSL23_ST_CW_CLNT_HELLO_A) - hlen -= 5; - if (hlen > 0xff && hlen < 0x200) - { - hlen = 0x200 - hlen; - if (hlen >= 4) - hlen -= 4; - else - hlen = 0; + if (s->options & SSL_OP_TLSEXT_PADDING) + { + int hlen = ret - (unsigned char *)s->init_buf->data; + /* The code in s23_clnt.c to build ClientHello messages + * includes the 5-byte record header in the buffer, while + * the code in s3_clnt.c does not. + */ + if (s->state == SSL23_ST_CW_CLNT_HELLO_A) + hlen -= 5; + if (hlen > 0xff && hlen < 0x200) + { + hlen = 0x200 - hlen; + if (hlen >= 4) + hlen -= 4; + else + hlen = 0; - s2n(TLSEXT_TYPE_padding, ret); - s2n(hlen, ret); - memset(ret, 0, hlen); - ret += hlen; + s2n(TLSEXT_TYPE_padding, ret); + s2n(hlen, ret); + memset(ret, 0, hlen); + ret += hlen; + } } - } -#endif - if ((extdatalen = ret-p-2)== 0) - return p; + if ((extdatalen = ret-orig-2)== 0) + return orig; - s2n(extdatalen,p); + s2n(extdatalen, orig); return ret; } -unsigned char *ssl_add_serverhello_tlsext(SSL *s, unsigned char *p, unsigned char *limit) +unsigned char *ssl_add_serverhello_tlsext(SSL *s, unsigned char *buf, unsigned char *limit) { int extdatalen=0; - unsigned char *ret = p; + unsigned char *orig = buf; + unsigned char *ret = buf; #ifndef OPENSSL_NO_NEXTPROTONEG int next_proto_neg_seen; #endif /* don't add extensions for SSLv3, unless doing secure renegotiation */ if (s->version == SSL3_VERSION && !s->s3->send_connection_binding) - return p; + return orig; ret+=2; if (ret>=limit) return NULL; /* this really never occurs, but ... */ @@ -732,7 +733,7 @@ return NULL; } - if((limit - p - 4 - el) < 0) return NULL; + if((limit - ret - 4 - el) < 0) return NULL; s2n(TLSEXT_TYPE_renegotiate,ret); s2n(el,ret); @@ -747,8 +748,7 @@ } #ifndef OPENSSL_NO_EC - if (s->tlsext_ecpointformatlist != NULL && - s->version != DTLS1_VERSION) + if (s->tlsext_ecpointformatlist != NULL) { /* Add TLS extension ECPointFormats to the ServerHello message */ long lenmax; @@ -806,13 +806,13 @@ #endif #ifndef OPENSSL_NO_SRTP - if(s->srtp_profile) + if(SSL_IS_DTLS(s) && s->srtp_profile) { int el; ssl_add_serverhello_use_srtp_ext(s, 0, &el, 0); - if((limit - p - 4 - el) < 0) return NULL; + if((limit - ret - 4 - el) < 0) return NULL; s2n(TLSEXT_TYPE_use_srtp,ret); s2n(el,ret); @@ -845,6 +845,8 @@ /* Add Heartbeat extension if we've received one */ if (s->tlsext_heartbeat & SSL_TLSEXT_HB_ENABLED) { + if ((limit - ret - 4 - 1) < 0) + return NULL; s2n(TLSEXT_TYPE_heartbeat,ret); s2n(1,ret); /* Set mode: @@ -881,10 +883,10 @@ } #endif - if ((extdatalen = ret-p-2)== 0) - return p; + if ((extdatalen = ret-orig-2)== 0) + return orig; - s2n(extdatalen,p); + s2n(extdatalen, orig); return ret; } @@ -1149,8 +1151,7 @@ #endif #ifndef OPENSSL_NO_EC - else if (type == TLSEXT_TYPE_ec_point_formats && - s->version != DTLS1_VERSION) + else if (type == TLSEXT_TYPE_ec_point_formats) { unsigned char *sdata = data; int ecpointformatlist_length = *(sdata++); @@ -1184,8 +1185,7 @@ fprintf(stderr,"\n"); #endif } - else if (type == TLSEXT_TYPE_elliptic_curves && - s->version != DTLS1_VERSION) + else if (type == TLSEXT_TYPE_elliptic_curves) { unsigned char *sdata = data; int ellipticcurvelist_length = (*(sdata++) << 8); @@ -1444,7 +1444,8 @@ /* session ticket processed earlier */ #ifndef OPENSSL_NO_SRTP - else if (type == TLSEXT_TYPE_use_srtp) + else if (SSL_IS_DTLS(s) && SSL_get_srtp_profiles(s) + && type == TLSEXT_TYPE_use_srtp) { if(ssl_parse_clienthello_use_srtp_ext(s, data, size, al)) @@ -1544,8 +1545,7 @@ } #ifndef OPENSSL_NO_EC - else if (type == TLSEXT_TYPE_ec_point_formats && - s->version != DTLS1_VERSION) + else if (type == TLSEXT_TYPE_ec_point_formats) { unsigned char *sdata = data; int ecpointformatlist_length = *(sdata++); @@ -1556,15 +1556,18 @@ *al = TLS1_AD_DECODE_ERROR; return 0; } - s->session->tlsext_ecpointformatlist_length = 0; - if (s->session->tlsext_ecpointformatlist != NULL) OPENSSL_free(s->session->tlsext_ecpointformatlist); - if ((s->session->tlsext_ecpointformatlist = OPENSSL_malloc(ecpointformatlist_length)) == NULL) + if (!s->hit) { - *al = TLS1_AD_INTERNAL_ERROR; - return 0; + s->session->tlsext_ecpointformatlist_length = 0; + if (s->session->tlsext_ecpointformatlist != NULL) OPENSSL_free(s->session->tlsext_ecpointformatlist); + if ((s->session->tlsext_ecpointformatlist = OPENSSL_malloc(ecpointformatlist_length)) == NULL) + { + *al = TLS1_AD_INTERNAL_ERROR; + return 0; + } + s->session->tlsext_ecpointformatlist_length = ecpointformatlist_length; + memcpy(s->session->tlsext_ecpointformatlist, sdata, ecpointformatlist_length); } - s->session->tlsext_ecpointformatlist_length = ecpointformatlist_length; - memcpy(s->session->tlsext_ecpointformatlist, sdata, ecpointformatlist_length); #if 0 fprintf(stderr,"ssl_parse_serverhello_tlsext s->session->tlsext_ecpointformatlist "); sdata = s->session->tlsext_ecpointformatlist; @@ -1696,7 +1699,7 @@ } #endif #ifndef OPENSSL_NO_SRTP - else if (type == TLSEXT_TYPE_use_srtp) + else if (SSL_IS_DTLS(s) && type == TLSEXT_TYPE_use_srtp) { if(ssl_parse_serverhello_use_srtp_ext(s, data, size, al)) @@ -2345,7 +2348,10 @@ HMAC_Final(&hctx, tick_hmac, NULL); HMAC_CTX_cleanup(&hctx); if (CRYPTO_memcmp(tick_hmac, etick + eticklen, mlen)) + { + EVP_CIPHER_CTX_cleanup(&ctx); return 2; + } /* Attempt to decrypt session data */ /* Move p after IV to start of encrypted ticket, update length */ p = etick + 16 + EVP_CIPHER_CTX_iv_length(&ctx); @@ -2358,7 +2364,11 @@ } EVP_DecryptUpdate(&ctx, sdec, &slen, p, eticklen); if (EVP_DecryptFinal(&ctx, sdec + slen, &mlen) <= 0) + { + EVP_CIPHER_CTX_cleanup(&ctx); + OPENSSL_free(sdec); return 2; + } slen += mlen; EVP_CIPHER_CTX_cleanup(&ctx); p = sdec; diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/tls1.h nodejs-0.11.15/deps/openssl/openssl/ssl/tls1.h --- nodejs-0.11.13/deps/openssl/openssl/ssl/tls1.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/tls1.h 2015-01-20 21:22:17.000000000 +0000 @@ -159,17 +159,19 @@ #define TLS1_ALLOW_EXPERIMENTAL_CIPHERSUITES 0 +#define TLS1_VERSION 0x0301 +#define TLS1_1_VERSION 0x0302 #define TLS1_2_VERSION 0x0303 -#define TLS1_2_VERSION_MAJOR 0x03 -#define TLS1_2_VERSION_MINOR 0x03 +#define TLS_MAX_VERSION TLS1_2_VERSION + +#define TLS1_VERSION_MAJOR 0x03 +#define TLS1_VERSION_MINOR 0x01 -#define TLS1_1_VERSION 0x0302 #define TLS1_1_VERSION_MAJOR 0x03 #define TLS1_1_VERSION_MINOR 0x02 -#define TLS1_VERSION 0x0301 -#define TLS1_VERSION_MAJOR 0x03 -#define TLS1_VERSION_MINOR 0x01 +#define TLS1_2_VERSION_MAJOR 0x03 +#define TLS1_2_VERSION_MINOR 0x03 #define TLS1_get_version(s) \ ((s->version >> 8) == TLS1_VERSION_MAJOR ? s->version : 0) @@ -187,6 +189,7 @@ #define TLS1_AD_PROTOCOL_VERSION 70 /* fatal */ #define TLS1_AD_INSUFFICIENT_SECURITY 71 /* fatal */ #define TLS1_AD_INTERNAL_ERROR 80 /* fatal */ +#define TLS1_AD_INAPPROPRIATE_FALLBACK 86 /* fatal */ #define TLS1_AD_USER_CANCELLED 90 #define TLS1_AD_NO_RENEGOTIATION 100 /* codes 110-114 are from RFC3546 */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/ssl/tls_srp.c nodejs-0.11.15/deps/openssl/openssl/ssl/tls_srp.c --- nodejs-0.11.13/deps/openssl/openssl/ssl/tls_srp.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/ssl/tls_srp.c 2015-01-20 21:22:17.000000000 +0000 @@ -408,16 +408,46 @@ return ret; } -int SRP_Calc_A_param(SSL *s) +int srp_verify_server_param(SSL *s, int *al) { - unsigned char rnd[SSL_MAX_MASTER_KEY_LENGTH]; + SRP_CTX *srp = &s->srp_ctx; + /* Sanity check parameters: we can quickly check B % N == 0 + * by checking B != 0 since B < N + */ + if (BN_ucmp(srp->g, srp->N) >=0 || BN_ucmp(srp->B, srp->N) >= 0 + || BN_is_zero(srp->B)) + { + *al = SSL3_AD_ILLEGAL_PARAMETER; + return 0; + } + + if (BN_num_bits(srp->N) < srp->strength) + { + *al = TLS1_AD_INSUFFICIENT_SECURITY; + return 0; + } + + if (srp->SRP_verify_param_callback) + { + if (srp->SRP_verify_param_callback(s, srp->SRP_cb_arg) <= 0) + { + *al = TLS1_AD_INSUFFICIENT_SECURITY; + return 0; + } + } + else if(!SRP_check_known_gN_param(srp->g, srp->N)) + { + *al = TLS1_AD_INSUFFICIENT_SECURITY; + return 0; + } - if (BN_num_bits(s->srp_ctx.N) < s->srp_ctx.strength) - return -1; + return 1; + } + - if (s->srp_ctx.SRP_verify_param_callback ==NULL && - !SRP_check_known_gN_param(s->srp_ctx.g,s->srp_ctx.N)) - return -1 ; +int SRP_Calc_A_param(SSL *s) + { + unsigned char rnd[SSL_MAX_MASTER_KEY_LENGTH]; RAND_bytes(rnd, sizeof(rnd)); s->srp_ctx.a = BN_bin2bn(rnd, sizeof(rnd), s->srp_ctx.a); @@ -426,10 +456,6 @@ if (!(s->srp_ctx.A = SRP_Calc_A(s->srp_ctx.a,s->srp_ctx.N,s->srp_ctx.g))) return -1; - /* We can have a callback to verify SRP param!! */ - if (s->srp_ctx.SRP_verify_param_callback !=NULL) - return s->srp_ctx.SRP_verify_param_callback(s,s->srp_ctx.SRP_cb_arg); - return 1; } diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/constant_time_test.c nodejs-0.11.15/deps/openssl/openssl/test/constant_time_test.c --- nodejs-0.11.13/deps/openssl/openssl/test/constant_time_test.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/constant_time_test.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,330 @@ +/* crypto/constant_time_test.c */ +/* + * Utilities for constant-time cryptography. + * + * Author: Emilia Kasper (emilia@openssl.org) + * Based on previous work by Bodo Moeller, Emilia Kasper, Adam Langley + * (Google). + * ==================================================================== + * Copyright (c) 2014 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include "../crypto/constant_time_locl.h" + +#include <limits.h> +#include <stdio.h> +#include <stdlib.h> + +static const unsigned int CONSTTIME_TRUE = (unsigned)(~0); +static const unsigned int CONSTTIME_FALSE = 0; +static const unsigned char CONSTTIME_TRUE_8 = 0xff; +static const unsigned char CONSTTIME_FALSE_8 = 0; + +static int test_binary_op(unsigned int (*op)(unsigned int a, unsigned int b), + const char* op_name, unsigned int a, unsigned int b, int is_true) + { + unsigned c = op(a, b); + if (is_true && c != CONSTTIME_TRUE) + { + fprintf(stderr, "Test failed for %s(%du, %du): expected %du " + "(TRUE), got %du\n", op_name, a, b, CONSTTIME_TRUE, c); + return 1; + } + else if (!is_true && c != CONSTTIME_FALSE) + { + fprintf(stderr, "Test failed for %s(%du, %du): expected %du " + "(FALSE), got %du\n", op_name, a, b, CONSTTIME_FALSE, + c); + return 1; + } + return 0; + } + +static int test_binary_op_8(unsigned char (*op)(unsigned int a, unsigned int b), + const char* op_name, unsigned int a, unsigned int b, int is_true) + { + unsigned char c = op(a, b); + if (is_true && c != CONSTTIME_TRUE_8) + { + fprintf(stderr, "Test failed for %s(%du, %du): expected %u " + "(TRUE), got %u\n", op_name, a, b, CONSTTIME_TRUE_8, c); + return 1; + } + else if (!is_true && c != CONSTTIME_FALSE_8) + { + fprintf(stderr, "Test failed for %s(%du, %du): expected %u " + "(FALSE), got %u\n", op_name, a, b, CONSTTIME_FALSE_8, + c); + return 1; + } + return 0; + } + +static int test_is_zero(unsigned int a) + { + unsigned int c = constant_time_is_zero(a); + if (a == 0 && c != CONSTTIME_TRUE) + { + fprintf(stderr, "Test failed for constant_time_is_zero(%du): " + "expected %du (TRUE), got %du\n", a, CONSTTIME_TRUE, c); + return 1; + } + else if (a != 0 && c != CONSTTIME_FALSE) + { + fprintf(stderr, "Test failed for constant_time_is_zero(%du): " + "expected %du (FALSE), got %du\n", a, CONSTTIME_FALSE, + c); + return 1; + } + return 0; + } + +static int test_is_zero_8(unsigned int a) + { + unsigned char c = constant_time_is_zero_8(a); + if (a == 0 && c != CONSTTIME_TRUE_8) + { + fprintf(stderr, "Test failed for constant_time_is_zero(%du): " + "expected %u (TRUE), got %u\n", a, CONSTTIME_TRUE_8, c); + return 1; + } + else if (a != 0 && c != CONSTTIME_FALSE) + { + fprintf(stderr, "Test failed for constant_time_is_zero(%du): " + "expected %u (FALSE), got %u\n", a, CONSTTIME_FALSE_8, + c); + return 1; + } + return 0; + } + +static int test_select(unsigned int a, unsigned int b) + { + unsigned int selected = constant_time_select(CONSTTIME_TRUE, a, b); + if (selected != a) + { + fprintf(stderr, "Test failed for constant_time_select(%du, %du," + "%du): expected %du(first value), got %du\n", + CONSTTIME_TRUE, a, b, a, selected); + return 1; + } + selected = constant_time_select(CONSTTIME_FALSE, a, b); + if (selected != b) + { + fprintf(stderr, "Test failed for constant_time_select(%du, %du," + "%du): expected %du(second value), got %du\n", + CONSTTIME_FALSE, a, b, b, selected); + return 1; + } + return 0; + } + +static int test_select_8(unsigned char a, unsigned char b) + { + unsigned char selected = constant_time_select_8(CONSTTIME_TRUE_8, a, b); + if (selected != a) + { + fprintf(stderr, "Test failed for constant_time_select(%u, %u," + "%u): expected %u(first value), got %u\n", + CONSTTIME_TRUE, a, b, a, selected); + return 1; + } + selected = constant_time_select_8(CONSTTIME_FALSE_8, a, b); + if (selected != b) + { + fprintf(stderr, "Test failed for constant_time_select(%u, %u," + "%u): expected %u(second value), got %u\n", + CONSTTIME_FALSE, a, b, b, selected); + return 1; + } + return 0; + } + +static int test_select_int(int a, int b) + { + int selected = constant_time_select_int(CONSTTIME_TRUE, a, b); + if (selected != a) + { + fprintf(stderr, "Test failed for constant_time_select(%du, %d," + "%d): expected %d(first value), got %d\n", + CONSTTIME_TRUE, a, b, a, selected); + return 1; + } + selected = constant_time_select_int(CONSTTIME_FALSE, a, b); + if (selected != b) + { + fprintf(stderr, "Test failed for constant_time_select(%du, %d," + "%d): expected %d(second value), got %d\n", + CONSTTIME_FALSE, a, b, b, selected); + return 1; + } + return 0; + } + +static int test_eq_int(int a, int b) + { + unsigned int equal = constant_time_eq_int(a, b); + if (a == b && equal != CONSTTIME_TRUE) + { + fprintf(stderr, "Test failed for constant_time_eq_int(%d, %d): " + "expected %du(TRUE), got %du\n", + a, b, CONSTTIME_TRUE, equal); + return 1; + } + else if (a != b && equal != CONSTTIME_FALSE) + { + fprintf(stderr, "Test failed for constant_time_eq_int(%d, %d): " + "expected %du(FALSE), got %du\n", + a, b, CONSTTIME_FALSE, equal); + return 1; + } + return 0; + } + +static int test_eq_int_8(int a, int b) + { + unsigned char equal = constant_time_eq_int_8(a, b); + if (a == b && equal != CONSTTIME_TRUE_8) + { + fprintf(stderr, "Test failed for constant_time_eq_int_8(%d, %d): " + "expected %u(TRUE), got %u\n", + a, b, CONSTTIME_TRUE_8, equal); + return 1; + } + else if (a != b && equal != CONSTTIME_FALSE_8) + { + fprintf(stderr, "Test failed for constant_time_eq_int_8(%d, %d): " + "expected %u(FALSE), got %u\n", + a, b, CONSTTIME_FALSE_8, equal); + return 1; + } + return 0; + } + +static unsigned int test_values[] = {0, 1, 1024, 12345, 32000, UINT_MAX/2-1, + UINT_MAX/2, UINT_MAX/2+1, UINT_MAX-1, + UINT_MAX}; + +static unsigned char test_values_8[] = {0, 1, 2, 20, 32, 127, 128, 129, 255}; + +static int signed_test_values[] = {0, 1, -1, 1024, -1024, 12345, -12345, + 32000, -32000, INT_MAX, INT_MIN, INT_MAX-1, + INT_MIN+1}; + + +int main(int argc, char *argv[]) + { + unsigned int a, b, i, j; + int c, d; + unsigned char e, f; + int num_failed = 0, num_all = 0; + fprintf(stdout, "Testing constant time operations...\n"); + + for (i = 0; i < sizeof(test_values)/sizeof(int); ++i) + { + a = test_values[i]; + num_failed += test_is_zero(a); + num_failed += test_is_zero_8(a); + num_all += 2; + for (j = 0; j < sizeof(test_values)/sizeof(int); ++j) + { + b = test_values[j]; + num_failed += test_binary_op(&constant_time_lt, + "constant_time_lt", a, b, a < b); + num_failed += test_binary_op_8(&constant_time_lt_8, + "constant_time_lt_8", a, b, a < b); + num_failed += test_binary_op(&constant_time_lt, + "constant_time_lt_8", b, a, b < a); + num_failed += test_binary_op_8(&constant_time_lt_8, + "constant_time_lt_8", b, a, b < a); + num_failed += test_binary_op(&constant_time_ge, + "constant_time_ge", a, b, a >= b); + num_failed += test_binary_op_8(&constant_time_ge_8, + "constant_time_ge_8", a, b, a >= b); + num_failed += test_binary_op(&constant_time_ge, + "constant_time_ge", b, a, b >= a); + num_failed += test_binary_op_8(&constant_time_ge_8, + "constant_time_ge_8", b, a, b >= a); + num_failed += test_binary_op(&constant_time_eq, + "constant_time_eq", a, b, a == b); + num_failed += test_binary_op_8(&constant_time_eq_8, + "constant_time_eq_8", a, b, a == b); + num_failed += test_binary_op(&constant_time_eq, + "constant_time_eq", b, a, b == a); + num_failed += test_binary_op_8(&constant_time_eq_8, + "constant_time_eq_8", b, a, b == a); + num_failed += test_select(a, b); + num_all += 13; + } + } + + for (i = 0; i < sizeof(signed_test_values)/sizeof(int); ++i) + { + c = signed_test_values[i]; + for (j = 0; j < sizeof(signed_test_values)/sizeof(int); ++j) + { + d = signed_test_values[j]; + num_failed += test_select_int(c, d); + num_failed += test_eq_int(c, d); + num_failed += test_eq_int_8(c, d); + num_all += 3; + } + } + + for (i = 0; i < sizeof(test_values_8); ++i) + { + e = test_values_8[i]; + for (j = 0; j < sizeof(test_values_8); ++j) + { + f = test_values_8[j]; + num_failed += test_select_8(e, f); + num_all += 1; + } + } + + if (!num_failed) + { + fprintf(stdout, "ok (ran %d tests)\n", num_all); + return EXIT_SUCCESS; + } + else + { + fprintf(stdout, "%d of %d tests failed!\n", num_failed, num_all); + return EXIT_FAILURE; + } + } diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/ectest.c nodejs-0.11.15/deps/openssl/openssl/test/ectest.c --- nodejs-0.11.13/deps/openssl/openssl/test/ectest.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/ectest.c 2015-01-20 21:22:17.000000000 +0000 @@ -199,6 +199,7 @@ EC_POINT *P = EC_POINT_new(group); EC_POINT *Q = EC_POINT_new(group); BN_CTX *ctx = BN_CTX_new(); + int i; n1 = BN_new(); n2 = BN_new(); order = BN_new(); fprintf(stdout, "verify group order ..."); @@ -212,21 +213,56 @@ if (!EC_POINT_mul(group, Q, order, NULL, NULL, ctx)) ABORT; if (!EC_POINT_is_at_infinity(group, Q)) ABORT; fprintf(stdout, " ok\n"); - fprintf(stdout, "long/negative scalar tests ... "); - if (!BN_one(n1)) ABORT; - /* n1 = 1 - order */ - if (!BN_sub(n1, n1, order)) ABORT; - if(!EC_POINT_mul(group, Q, NULL, P, n1, ctx)) ABORT; - if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; - /* n2 = 1 + order */ - if (!BN_add(n2, order, BN_value_one())) ABORT; - if(!EC_POINT_mul(group, Q, NULL, P, n2, ctx)) ABORT; - if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; - /* n2 = (1 - order) * (1 + order) */ - if (!BN_mul(n2, n1, n2, ctx)) ABORT; - if(!EC_POINT_mul(group, Q, NULL, P, n2, ctx)) ABORT; - if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; + fprintf(stdout, "long/negative scalar tests "); + for (i = 1; i <= 2; i++) + { + const BIGNUM *scalars[6]; + const EC_POINT *points[6]; + + fprintf(stdout, i == 1 ? + "allowing precomputation ... " : + "without precomputation ... "); + if (!BN_set_word(n1, i)) ABORT; + /* If i == 1, P will be the predefined generator for which + * EC_GROUP_precompute_mult has set up precomputation. */ + if (!EC_POINT_mul(group, P, n1, NULL, NULL, ctx)) ABORT; + + if (!BN_one(n1)) ABORT; + /* n1 = 1 - order */ + if (!BN_sub(n1, n1, order)) ABORT; + if (!EC_POINT_mul(group, Q, NULL, P, n1, ctx)) ABORT; + if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; + + /* n2 = 1 + order */ + if (!BN_add(n2, order, BN_value_one())) ABORT; + if (!EC_POINT_mul(group, Q, NULL, P, n2, ctx)) ABORT; + if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; + + /* n2 = (1 - order) * (1 + order) = 1 - order^2 */ + if (!BN_mul(n2, n1, n2, ctx)) ABORT; + if (!EC_POINT_mul(group, Q, NULL, P, n2, ctx)) ABORT; + if (0 != EC_POINT_cmp(group, Q, P, ctx)) ABORT; + + /* n2 = order^2 - 1 */ + BN_set_negative(n2, 0); + if (!EC_POINT_mul(group, Q, NULL, P, n2, ctx)) ABORT; + /* Add P to verify the result. */ + if (!EC_POINT_add(group, Q, Q, P, ctx)) ABORT; + if (!EC_POINT_is_at_infinity(group, Q)) ABORT; + + /* Exercise EC_POINTs_mul, including corner cases. */ + if (EC_POINT_is_at_infinity(group, P)) ABORT; + scalars[0] = n1; points[0] = Q; /* => infinity */ + scalars[1] = n2; points[1] = P; /* => -P */ + scalars[2] = n1; points[2] = Q; /* => infinity */ + scalars[3] = n2; points[3] = Q; /* => infinity */ + scalars[4] = n1; points[4] = P; /* => P */ + scalars[5] = n2; points[5] = Q; /* => infinity */ + if (!EC_POINTs_mul(group, P, NULL, 6, points, scalars, ctx)) ABORT; + if (!EC_POINT_is_at_infinity(group, P)) ABORT; + } fprintf(stdout, "ok\n"); + EC_POINT_free(P); EC_POINT_free(Q); BN_free(n1); diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/exptest.c nodejs-0.11.15/deps/openssl/openssl/test/exptest.c --- nodejs-0.11.13/deps/openssl/openssl/test/exptest.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/exptest.c 2015-01-20 21:22:17.000000000 +0000 @@ -71,6 +71,43 @@ static const char rnd_seed[] = "string to make the random number generator think it has entropy"; +/* test_exp_mod_zero tests that x**0 mod 1 == 0. It returns zero on success. */ +static int test_exp_mod_zero() { + BIGNUM a, p, m; + BIGNUM r; + BN_CTX *ctx = BN_CTX_new(); + int ret = 1; + + BN_init(&m); + BN_one(&m); + + BN_init(&a); + BN_one(&a); + + BN_init(&p); + BN_zero(&p); + + BN_init(&r); + BN_mod_exp(&r, &a, &p, &m, ctx); + BN_CTX_free(ctx); + + if (BN_is_zero(&r)) + ret = 0; + else + { + printf("1**0 mod 1 = "); + BN_print_fp(stdout, &r); + printf(", should be 0\n"); + } + + BN_free(&r); + BN_free(&a); + BN_free(&p); + BN_free(&m); + + return ret; +} + int main(int argc, char *argv[]) { BN_CTX *ctx; @@ -190,7 +227,13 @@ ERR_remove_thread_state(NULL); CRYPTO_mem_leaks(out); BIO_free(out); - printf(" done\n"); + printf("\n"); + + if (test_exp_mod_zero() != 0) + goto err; + + printf("done\n"); + EXIT(0); err: ERR_load_crypto_strings(); diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/heartbeat_test.c nodejs-0.11.15/deps/openssl/openssl/test/heartbeat_test.c --- nodejs-0.11.13/deps/openssl/openssl/test/heartbeat_test.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/heartbeat_test.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,466 @@ +/* test/heartbeat_test.c */ +/* + * Unit test for TLS heartbeats. + * + * Acts as a regression test against the Heartbleed bug (CVE-2014-0160). + * + * Author: Mike Bland (mbland@acm.org, http://mike-bland.com/) + * Date: 2014-04-12 + * License: Creative Commons Attribution 4.0 International (CC By 4.0) + * http://creativecommons.org/licenses/by/4.0/deed.en_US + * + * OUTPUT + * ------ + * The program returns zero on success. It will print a message with a count + * of the number of failed tests and return nonzero if any tests fail. + * + * It will print the contents of the request and response buffers for each + * failing test. In a "fixed" version, all the tests should pass and there + * should be no output. + * + * In a "bleeding" version, you'll see: + * + * test_dtls1_heartbleed failed: + * expected payload len: 0 + * received: 1024 + * sent 26 characters + * "HEARTBLEED " + * received 1024 characters + * "HEARTBLEED \xde\xad\xbe\xef..." + * ** test_dtls1_heartbleed failed ** + * + * The contents of the returned buffer in the failing test will depend on the + * contents of memory on your machine. + * + * MORE INFORMATION + * ---------------- + * http://mike-bland.com/2014/04/12/heartbleed.html + * http://mike-bland.com/tags/heartbleed.html + */ + +#define OPENSSL_UNIT_TEST + +#include "../test/testutil.h" + +#include "../ssl/ssl_locl.h" +#include <ctype.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#if !defined(OPENSSL_NO_HEARTBEATS) && !defined(OPENSSL_NO_UNIT_TEST) + +/* As per https://tools.ietf.org/html/rfc6520#section-4 */ +#define MIN_PADDING_SIZE 16 + +/* Maximum number of payload characters to print as test output */ +#define MAX_PRINTABLE_CHARACTERS 1024 + +typedef struct heartbeat_test_fixture + { + SSL_CTX *ctx; + SSL *s; + const char* test_case_name; + int (*process_heartbeat)(SSL* s); + unsigned char* payload; + int sent_payload_len; + int expected_return_value; + int return_payload_offset; + int expected_payload_len; + const char* expected_return_payload; + } HEARTBEAT_TEST_FIXTURE; + +static HEARTBEAT_TEST_FIXTURE set_up(const char* const test_case_name, + const SSL_METHOD* meth) + { + HEARTBEAT_TEST_FIXTURE fixture; + int setup_ok = 1; + memset(&fixture, 0, sizeof(fixture)); + fixture.test_case_name = test_case_name; + + fixture.ctx = SSL_CTX_new(meth); + if (!fixture.ctx) + { + fprintf(stderr, "Failed to allocate SSL_CTX for test: %s\n", + test_case_name); + setup_ok = 0; + goto fail; + } + + fixture.s = SSL_new(fixture.ctx); + if (!fixture.s) + { + fprintf(stderr, "Failed to allocate SSL for test: %s\n", test_case_name); + setup_ok = 0; + goto fail; + } + + if (!ssl_init_wbio_buffer(fixture.s, 1)) + { + fprintf(stderr, "Failed to set up wbio buffer for test: %s\n", + test_case_name); + setup_ok = 0; + goto fail; + } + + if (!ssl3_setup_buffers(fixture.s)) + { + fprintf(stderr, "Failed to setup buffers for test: %s\n", + test_case_name); + setup_ok = 0; + goto fail; + } + + /* Clear the memory for the return buffer, since this isn't automatically + * zeroed in opt mode and will cause spurious test failures that will change + * with each execution. + */ + memset(fixture.s->s3->wbuf.buf, 0, fixture.s->s3->wbuf.len); + + fail: + if (!setup_ok) + { + ERR_print_errors_fp(stderr); + exit(EXIT_FAILURE); + } + return fixture; + } + +static HEARTBEAT_TEST_FIXTURE set_up_dtls(const char* const test_case_name) + { + HEARTBEAT_TEST_FIXTURE fixture = set_up(test_case_name, + DTLSv1_server_method()); + fixture.process_heartbeat = dtls1_process_heartbeat; + + /* As per dtls1_get_record(), skipping the following from the beginning of + * the returned heartbeat message: + * type-1 byte; version-2 bytes; sequence number-8 bytes; length-2 bytes + * + * And then skipping the 1-byte type encoded by process_heartbeat for + * a total of 14 bytes, at which point we can grab the length and the + * payload we seek. + */ + fixture.return_payload_offset = 14; + return fixture; + } + +/* Needed by ssl3_write_bytes() */ +static int dummy_handshake(SSL* s) + { + return 1; + } + +static HEARTBEAT_TEST_FIXTURE set_up_tls(const char* const test_case_name) + { + HEARTBEAT_TEST_FIXTURE fixture = set_up(test_case_name, + TLSv1_server_method()); + fixture.process_heartbeat = tls1_process_heartbeat; + fixture.s->handshake_func = dummy_handshake; + + /* As per do_ssl3_write(), skipping the following from the beginning of + * the returned heartbeat message: + * type-1 byte; version-2 bytes; length-2 bytes + * + * And then skipping the 1-byte type encoded by process_heartbeat for + * a total of 6 bytes, at which point we can grab the length and the payload + * we seek. + */ + fixture.return_payload_offset = 6; + return fixture; + } + +static void tear_down(HEARTBEAT_TEST_FIXTURE fixture) + { + ERR_print_errors_fp(stderr); + SSL_free(fixture.s); + SSL_CTX_free(fixture.ctx); + } + +static void print_payload(const char* const prefix, + const unsigned char *payload, const int n) + { + const int end = n < MAX_PRINTABLE_CHARACTERS ? n + : MAX_PRINTABLE_CHARACTERS; + int i = 0; + + printf("%s %d character%s", prefix, n, n == 1 ? "" : "s"); + if (end != n) printf(" (first %d shown)", end); + printf("\n \""); + + for (; i != end; ++i) + { + const unsigned char c = payload[i]; + if (isprint(c)) fputc(c, stdout); + else printf("\\x%02x", c); + } + printf("\"\n"); + } + +static int execute_heartbeat(HEARTBEAT_TEST_FIXTURE fixture) + { + int result = 0; + SSL* s = fixture.s; + unsigned char *payload = fixture.payload; + unsigned char sent_buf[MAX_PRINTABLE_CHARACTERS + 1]; + int return_value; + unsigned const char *p; + int actual_payload_len; + + s->s3->rrec.data = payload; + s->s3->rrec.length = strlen((const char*)payload); + *payload++ = TLS1_HB_REQUEST; + s2n(fixture.sent_payload_len, payload); + + /* Make a local copy of the request, since it gets overwritten at some + * point */ + memcpy((char *)sent_buf, (const char*)payload, sizeof(sent_buf)); + + return_value = fixture.process_heartbeat(s); + + if (return_value != fixture.expected_return_value) + { + printf("%s failed: expected return value %d, received %d\n", + fixture.test_case_name, fixture.expected_return_value, + return_value); + result = 1; + } + + /* If there is any byte alignment, it will be stored in wbuf.offset. */ + p = &(s->s3->wbuf.buf[ + fixture.return_payload_offset + s->s3->wbuf.offset]); + actual_payload_len = 0; + n2s(p, actual_payload_len); + + if (actual_payload_len != fixture.expected_payload_len) + { + printf("%s failed:\n expected payload len: %d\n received: %d\n", + fixture.test_case_name, fixture.expected_payload_len, + actual_payload_len); + print_payload("sent", sent_buf, strlen((const char*)sent_buf)); + print_payload("received", p, actual_payload_len); + result = 1; + } + else + { + char* actual_payload = BUF_strndup((const char*)p, actual_payload_len); + if (strcmp(actual_payload, fixture.expected_return_payload) != 0) + { + printf("%s failed:\n expected payload: \"%s\"\n received: \"%s\"\n", + fixture.test_case_name, fixture.expected_return_payload, + actual_payload); + result = 1; + } + OPENSSL_free(actual_payload); + } + + if (result != 0) + { + printf("** %s failed **\n--------\n", fixture.test_case_name); + } + return result; + } + +static int honest_payload_size(unsigned char payload_buf[]) + { + /* Omit three-byte pad at the beginning for type and payload length */ + return strlen((const char*)&payload_buf[3]) - MIN_PADDING_SIZE; + } + +#define SETUP_HEARTBEAT_TEST_FIXTURE(type)\ + SETUP_TEST_FIXTURE(HEARTBEAT_TEST_FIXTURE, set_up_##type) + +#define EXECUTE_HEARTBEAT_TEST()\ + EXECUTE_TEST(execute_heartbeat, tear_down) + +static int test_dtls1_not_bleeding() + { + SETUP_HEARTBEAT_TEST_FIXTURE(dtls); + /* Three-byte pad at the beginning for type and payload length */ + unsigned char payload_buf[] = " Not bleeding, sixteen spaces of padding" + " "; + const int payload_buf_len = honest_payload_size(payload_buf); + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = payload_buf_len; + fixture.expected_return_value = 0; + fixture.expected_payload_len = payload_buf_len; + fixture.expected_return_payload = "Not bleeding, sixteen spaces of padding"; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_dtls1_not_bleeding_empty_payload() + { + int payload_buf_len; + + SETUP_HEARTBEAT_TEST_FIXTURE(dtls); + /* Three-byte pad at the beginning for type and payload length, plus a NUL + * at the end */ + unsigned char payload_buf[4 + MIN_PADDING_SIZE]; + memset(payload_buf, ' ', sizeof(payload_buf)); + payload_buf[sizeof(payload_buf) - 1] = '\0'; + payload_buf_len = honest_payload_size(payload_buf); + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = payload_buf_len; + fixture.expected_return_value = 0; + fixture.expected_payload_len = payload_buf_len; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_dtls1_heartbleed() + { + SETUP_HEARTBEAT_TEST_FIXTURE(dtls); + /* Three-byte pad at the beginning for type and payload length */ + unsigned char payload_buf[] = " HEARTBLEED "; + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = MAX_PRINTABLE_CHARACTERS; + fixture.expected_return_value = 0; + fixture.expected_payload_len = 0; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_dtls1_heartbleed_empty_payload() + { + SETUP_HEARTBEAT_TEST_FIXTURE(dtls); + /* Excluding the NUL at the end, one byte short of type + payload length + + * minimum padding */ + unsigned char payload_buf[MIN_PADDING_SIZE + 3]; + memset(payload_buf, ' ', sizeof(payload_buf)); + payload_buf[sizeof(payload_buf) - 1] = '\0'; + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = MAX_PRINTABLE_CHARACTERS; + fixture.expected_return_value = 0; + fixture.expected_payload_len = 0; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_dtls1_heartbleed_excessive_plaintext_length() + { + SETUP_HEARTBEAT_TEST_FIXTURE(dtls); + /* Excluding the NUL at the end, one byte in excess of maximum allowed + * heartbeat message length */ + unsigned char payload_buf[SSL3_RT_MAX_PLAIN_LENGTH + 2]; + memset(payload_buf, ' ', sizeof(payload_buf)); + payload_buf[sizeof(payload_buf) - 1] = '\0'; + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = honest_payload_size(payload_buf); + fixture.expected_return_value = 0; + fixture.expected_payload_len = 0; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_tls1_not_bleeding() + { + SETUP_HEARTBEAT_TEST_FIXTURE(tls); + /* Three-byte pad at the beginning for type and payload length */ + unsigned char payload_buf[] = " Not bleeding, sixteen spaces of padding" + " "; + const int payload_buf_len = honest_payload_size(payload_buf); + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = payload_buf_len; + fixture.expected_return_value = 0; + fixture.expected_payload_len = payload_buf_len; + fixture.expected_return_payload = "Not bleeding, sixteen spaces of padding"; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_tls1_not_bleeding_empty_payload() + { + int payload_buf_len; + + SETUP_HEARTBEAT_TEST_FIXTURE(tls); + /* Three-byte pad at the beginning for type and payload length, plus a NUL + * at the end */ + unsigned char payload_buf[4 + MIN_PADDING_SIZE]; + memset(payload_buf, ' ', sizeof(payload_buf)); + payload_buf[sizeof(payload_buf) - 1] = '\0'; + payload_buf_len = honest_payload_size(payload_buf); + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = payload_buf_len; + fixture.expected_return_value = 0; + fixture.expected_payload_len = payload_buf_len; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_tls1_heartbleed() + { + SETUP_HEARTBEAT_TEST_FIXTURE(tls); + /* Three-byte pad at the beginning for type and payload length */ + unsigned char payload_buf[] = " HEARTBLEED "; + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = MAX_PRINTABLE_CHARACTERS; + fixture.expected_return_value = 0; + fixture.expected_payload_len = 0; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +static int test_tls1_heartbleed_empty_payload() + { + SETUP_HEARTBEAT_TEST_FIXTURE(tls); + /* Excluding the NUL at the end, one byte short of type + payload length + + * minimum padding */ + unsigned char payload_buf[MIN_PADDING_SIZE + 3]; + memset(payload_buf, ' ', sizeof(payload_buf)); + payload_buf[sizeof(payload_buf) - 1] = '\0'; + + fixture.payload = &payload_buf[0]; + fixture.sent_payload_len = MAX_PRINTABLE_CHARACTERS; + fixture.expected_return_value = 0; + fixture.expected_payload_len = 0; + fixture.expected_return_payload = ""; + EXECUTE_HEARTBEAT_TEST(); + } + +#undef EXECUTE_HEARTBEAT_TEST +#undef SETUP_HEARTBEAT_TEST_FIXTURE + +int main(int argc, char *argv[]) + { + int num_failed; + + SSL_library_init(); + SSL_load_error_strings(); + + num_failed = test_dtls1_not_bleeding() + + test_dtls1_not_bleeding_empty_payload() + + test_dtls1_heartbleed() + + test_dtls1_heartbleed_empty_payload() + + /* The following test causes an assertion failure at + * ssl/d1_pkt.c:dtls1_write_bytes() in versions prior to 1.0.1g: */ + (OPENSSL_VERSION_NUMBER >= 0x1000107fL ? + test_dtls1_heartbleed_excessive_plaintext_length() : 0) + + test_tls1_not_bleeding() + + test_tls1_not_bleeding_empty_payload() + + test_tls1_heartbleed() + + test_tls1_heartbleed_empty_payload() + + 0; + + ERR_print_errors_fp(stderr); + + if (num_failed != 0) + { + printf("%d test%s failed\n", num_failed, num_failed != 1 ? "s" : ""); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; + } + +#else /* OPENSSL_NO_HEARTBEATS*/ + +int main(int argc, char *argv[]) + { + return EXIT_SUCCESS; + } +#endif /* OPENSSL_NO_HEARTBEATS */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/ideatest.c nodejs-0.11.15/deps/openssl/openssl/test/ideatest.c --- nodejs-0.11.13/deps/openssl/openssl/test/ideatest.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/ideatest.c 2015-01-20 21:22:17.000000000 +0000 @@ -199,10 +199,10 @@ } memcpy(cfb_tmp,cfb_iv,8); n=0; - idea_cfb64_encrypt(cfb_buf1,cfb_buf2,(long)17,&eks, + idea_cfb64_encrypt(cfb_buf1,cfb_buf2,(long)13,&eks, cfb_tmp,&n,IDEA_DECRYPT); - idea_cfb64_encrypt(&(cfb_buf1[17]),&(cfb_buf2[17]), - (long)CFB_TEST_SIZE-17,&dks, + idea_cfb64_encrypt(&(cfb_buf1[13]),&(cfb_buf2[13]), + (long)CFB_TEST_SIZE-13,&eks, cfb_tmp,&n,IDEA_DECRYPT); if (memcmp(plain,cfb_buf2,CFB_TEST_SIZE) != 0) { diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/Makefile nodejs-0.11.15/deps/openssl/openssl/test/Makefile --- nodejs-0.11.13/deps/openssl/openssl/test/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -63,6 +63,8 @@ JPAKETEST= jpaketest SRPTEST= srptest ASN1TEST= asn1test +HEARTBEATTEST= heartbeat_test +CONSTTIMETEST= constant_time_test TESTS= alltests @@ -74,7 +76,7 @@ $(RANDTEST)$(EXE_EXT) $(DHTEST)$(EXE_EXT) $(ENGINETEST)$(EXE_EXT) \ $(BFTEST)$(EXE_EXT) $(CASTTEST)$(EXE_EXT) $(SSLTEST)$(EXE_EXT) $(EXPTEST)$(EXE_EXT) $(DSATEST)$(EXE_EXT) $(RSATEST)$(EXE_EXT) \ $(EVPTEST)$(EXE_EXT) $(IGETEST)$(EXE_EXT) $(JPAKETEST)$(EXE_EXT) $(SRPTEST)$(EXE_EXT) \ - $(ASN1TEST)$(EXE_EXT) + $(ASN1TEST)$(EXE_EXT) $(HEARTBEATTEST)$(EXE_EXT) $(CONSTTIMETEST)$(EXE_EXT) # $(METHTEST)$(EXE_EXT) @@ -86,7 +88,9 @@ $(MDC2TEST).o $(RMDTEST).o \ $(RANDTEST).o $(DHTEST).o $(ENGINETEST).o $(CASTTEST).o \ $(BFTEST).o $(SSLTEST).o $(DSATEST).o $(EXPTEST).o $(RSATEST).o \ - $(EVPTEST).o $(IGETEST).o $(JPAKETEST).o $(ASN1TEST).o + $(EVPTEST).o $(IGETEST).o $(JPAKETEST).o $(ASN1TEST).o \ + $(HEARTBEATTEST).o $(CONSTTIMETEST).o + SRC= $(BNTEST).c $(ECTEST).c $(ECDSATEST).c $(ECDHTEST).c $(IDEATEST).c \ $(MD2TEST).c $(MD4TEST).c $(MD5TEST).c \ $(HMACTEST).c $(WPTEST).c \ @@ -94,7 +98,8 @@ $(DESTEST).c $(SHATEST).c $(SHA1TEST).c $(MDC2TEST).c $(RMDTEST).c \ $(RANDTEST).c $(DHTEST).c $(ENGINETEST).c $(CASTTEST).c \ $(BFTEST).c $(SSLTEST).c $(DSATEST).c $(EXPTEST).c $(RSATEST).c \ - $(EVPTEST).c $(IGETEST).c $(JPAKETEST).c $(SRPTEST).c $(ASN1TEST).c + $(EVPTEST).c $(IGETEST).c $(JPAKETEST).c $(SRPTEST).c $(ASN1TEST).c \ + $(HEARTBEATTEST).c $(CONSTTIMETEST).c EXHEADER= HEADER= $(EXHEADER) @@ -137,7 +142,7 @@ test_enc test_x509 test_rsa test_crl test_sid \ test_gen test_req test_pkcs7 test_verify test_dh test_dsa \ test_ss test_ca test_engine test_evp test_ssl test_tsa test_ige \ - test_jpake test_srp test_cms + test_jpake test_srp test_cms test_heartbeat test_constant_time test_evp: ../util/shlib_wrap.sh ./$(EVPTEST) evptests.txt @@ -227,7 +232,7 @@ @../util/shlib_wrap.sh ./$(BNTEST) >tmp.bntest @echo quit >>tmp.bntest @echo "running bc" - @<tmp.bntest sh -c "`sh ./bctest ignore`" | $(PERL) -e '$$i=0; while (<STDIN>) {if (/^test (.*)/) {print STDERR "\nverify $$1";} elsif (!/^0$$/) {die "\nFailed! bc: $$_";} else {print STDERR "."; $$i++;}} print STDERR "\n$$i tests passed\n"' + @<tmp.bntest sh -c "`sh ./bctest ignore`" | $(PERL) -e '$$i=0; while (<STDIN>) {if (/^test (.*)/) {print STDERR "\nverify $$1";} elsif (!/^0\r?$$/) {die "\nFailed! bc: $$_";} else {print STDERR "."; $$i++;}} print STDERR "\n$$i tests passed\n"' @echo 'test a^b%c implementations' ../util/shlib_wrap.sh ./$(EXPTEST) @@ -318,6 +323,13 @@ @echo "Test SRP" ../util/shlib_wrap.sh ./srptest +test_heartbeat: $(HEARTBEATTEST)$(EXE_EXT) + ../util/shlib_wrap.sh ./$(HEARTBEATTEST) + +test_constant_time: $(CONSTTIMETEST)$(EXE_EXT) + @echo "Test constant time utilites" + ../util/shlib_wrap.sh ./$(CONSTTIMETEST) + lint: lint -DLINT $(INCLUDES) $(SRC)>fluff @@ -364,6 +376,13 @@ LIBDEPS="$(PEX_LIBS) $$LIBRARIES $(EX_LIBS)" \ link_app.$${shlib_target} +BUILD_CMD_STATIC=shlib_target=; \ + LIBRARIES="$(DLIBSSL) $(DLIBCRYPTO) $(LIBKRB5)"; \ + $(MAKE) -f $(TOP)/Makefile.shared -e \ + APPNAME=$$target$(EXE_EXT) OBJECTS="$$target.o" \ + LIBDEPS="$(PEX_LIBS) $$LIBRARIES $(EX_LIBS)" \ + link_app.$${shlib_target} + $(RSATEST)$(EXE_EXT): $(RSATEST).o $(DLIBCRYPTO) @target=$(RSATEST); $(BUILD_CMD) @@ -469,6 +488,12 @@ $(SRPTEST)$(EXE_EXT): $(SRPTEST).o $(DLIBCRYPTO) @target=$(SRPTEST); $(BUILD_CMD) +$(HEARTBEATTEST)$(EXE_EXT): $(HEARTBEATTEST).o $(DLIBCRYPTO) + @target=$(HEARTBEATTEST); $(BUILD_CMD_STATIC) + +$(CONSTTIMETEST)$(EXE_EXT): $(CONSTTIMETEST).o + @target=$(CONSTTIMETEST) $(BUILD_CMD) + #$(AESTEST).o: $(AESTEST).c # $(CC) -c $(CFLAGS) -DINTERMEDIATE_VALUE_KAT -DTRACE_KAT_MCT $(AESTEST).c @@ -514,6 +539,9 @@ bntest.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h bntest.c casttest.o: ../e_os.h ../include/openssl/cast.h ../include/openssl/e_os2.h casttest.o: ../include/openssl/opensslconf.h casttest.c +constant_time_test.o: ../crypto/constant_time_locl.h ../e_os.h +constant_time_test.o: ../include/openssl/e_os2.h +constant_time_test.o: ../include/openssl/opensslconf.h constant_time_test.c destest.o: ../include/openssl/des.h ../include/openssl/des_old.h destest.o: ../include/openssl/e_os2.h ../include/openssl/opensslconf.h destest.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h @@ -604,6 +632,27 @@ exptest.o: ../include/openssl/ossl_typ.h ../include/openssl/rand.h exptest.o: ../include/openssl/safestack.h ../include/openssl/stack.h exptest.o: ../include/openssl/symhacks.h exptest.c +heartbeat_test.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h +heartbeat_test.o: ../include/openssl/buffer.h ../include/openssl/comp.h +heartbeat_test.o: ../include/openssl/crypto.h ../include/openssl/dsa.h +heartbeat_test.o: ../include/openssl/dtls1.h ../include/openssl/e_os2.h +heartbeat_test.o: ../include/openssl/ec.h ../include/openssl/ecdh.h +heartbeat_test.o: ../include/openssl/ecdsa.h ../include/openssl/err.h +heartbeat_test.o: ../include/openssl/evp.h ../include/openssl/hmac.h +heartbeat_test.o: ../include/openssl/kssl.h ../include/openssl/lhash.h +heartbeat_test.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h +heartbeat_test.o: ../include/openssl/opensslconf.h +heartbeat_test.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h +heartbeat_test.o: ../include/openssl/pem.h ../include/openssl/pem2.h +heartbeat_test.o: ../include/openssl/pkcs7.h ../include/openssl/pqueue.h +heartbeat_test.o: ../include/openssl/rsa.h ../include/openssl/safestack.h +heartbeat_test.o: ../include/openssl/sha.h ../include/openssl/srtp.h +heartbeat_test.o: ../include/openssl/ssl.h ../include/openssl/ssl2.h +heartbeat_test.o: ../include/openssl/ssl23.h ../include/openssl/ssl3.h +heartbeat_test.o: ../include/openssl/stack.h ../include/openssl/symhacks.h +heartbeat_test.o: ../include/openssl/tls1.h ../include/openssl/x509.h +heartbeat_test.o: ../include/openssl/x509_vfy.h ../ssl/ssl_locl.h +heartbeat_test.o: ../test/testutil.h heartbeat_test.c hmactest.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h hmactest.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h hmactest.o: ../include/openssl/evp.h ../include/openssl/hmac.h diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/Makefile.save nodejs-0.11.15/deps/openssl/openssl/test/Makefile.save --- nodejs-0.11.13/deps/openssl/openssl/test/Makefile.save 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/Makefile.save 1970-01-01 00:00:00.000000000 +0000 @@ -1,729 +0,0 @@ -# -# test/Makefile -# - -DIR= test -TOP= .. -CC= cc -INCLUDES= -I$(TOP) -I../include $(KRB5_INCLUDES) -CFLAG= -g -MAKEDEPEND= $(TOP)/util/domd $(TOP) -MD $(MAKEDEPPROG) -PERL= perl -# KRB5 stuff -KRB5_INCLUDES= -LIBKRB5= - -PEX_LIBS= -EX_LIBS= #-lnsl -lsocket - -CFLAGS= $(INCLUDES) $(CFLAG) - -GENERAL=Makefile maketests.com \ - tests.com testenc.com tx509.com trsa.com tcrl.com tsid.com treq.com \ - tpkcs7.com tpkcs7d.com tverify.com testgen.com testss.com testssl.com \ - testca.com VMSca-response.1 VMSca-response.2 - -DLIBCRYPTO= ../libcrypto.a -DLIBSSL= ../libssl.a -LIBCRYPTO= -L.. -lcrypto -LIBSSL= -L.. -lssl - -BNTEST= bntest -ECTEST= ectest -ECDSATEST= ecdsatest -ECDHTEST= ecdhtest -EXPTEST= exptest -IDEATEST= ideatest -SHATEST= shatest -SHA1TEST= sha1test -SHA256TEST= sha256t -SHA512TEST= sha512t -MDC2TEST= mdc2test -RMDTEST= rmdtest -MD2TEST= md2test -MD4TEST= md4test -MD5TEST= md5test -HMACTEST= hmactest -WPTEST= wp_test -RC2TEST= rc2test -RC4TEST= rc4test -RC5TEST= rc5test -BFTEST= bftest -CASTTEST= casttest -DESTEST= destest -RANDTEST= randtest -DHTEST= dhtest -DSATEST= dsatest -METHTEST= methtest -SSLTEST= ssltest -RSATEST= rsa_test -ENGINETEST= enginetest -EVPTEST= evp_test -IGETEST= igetest -JPAKETEST= jpaketest -SRPTEST= srptest -ASN1TEST= asn1test - -TESTS= alltests - -EXE= $(BNTEST)$(EXE_EXT) $(ECTEST)$(EXE_EXT) $(ECDSATEST)$(EXE_EXT) $(ECDHTEST)$(EXE_EXT) $(IDEATEST)$(EXE_EXT) \ - $(MD2TEST)$(EXE_EXT) $(MD4TEST)$(EXE_EXT) $(MD5TEST)$(EXE_EXT) $(HMACTEST)$(EXE_EXT) $(WPTEST)$(EXE_EXT) \ - $(RC2TEST)$(EXE_EXT) $(RC4TEST)$(EXE_EXT) $(RC5TEST)$(EXE_EXT) \ - $(DESTEST)$(EXE_EXT) $(SHATEST)$(EXE_EXT) $(SHA1TEST)$(EXE_EXT) $(SHA256TEST)$(EXE_EXT) $(SHA512TEST)$(EXE_EXT) \ - $(MDC2TEST)$(EXE_EXT) $(RMDTEST)$(EXE_EXT) \ - $(RANDTEST)$(EXE_EXT) $(DHTEST)$(EXE_EXT) $(ENGINETEST)$(EXE_EXT) \ - $(BFTEST)$(EXE_EXT) $(CASTTEST)$(EXE_EXT) $(SSLTEST)$(EXE_EXT) $(EXPTEST)$(EXE_EXT) $(DSATEST)$(EXE_EXT) $(RSATEST)$(EXE_EXT) \ - $(EVPTEST)$(EXE_EXT) $(IGETEST)$(EXE_EXT) $(JPAKETEST)$(EXE_EXT) $(SRPTEST)$(EXE_EXT) \ - $(ASN1TEST)$(EXE_EXT) - -# $(METHTEST)$(EXE_EXT) - -OBJ= $(BNTEST).o $(ECTEST).o $(ECDSATEST).o $(ECDHTEST).o $(IDEATEST).o \ - $(MD2TEST).o $(MD4TEST).o $(MD5TEST).o \ - $(HMACTEST).o $(WPTEST).o \ - $(RC2TEST).o $(RC4TEST).o $(RC5TEST).o \ - $(DESTEST).o $(SHATEST).o $(SHA1TEST).o $(SHA256TEST).o $(SHA512TEST).o \ - $(MDC2TEST).o $(RMDTEST).o \ - $(RANDTEST).o $(DHTEST).o $(ENGINETEST).o $(CASTTEST).o \ - $(BFTEST).o $(SSLTEST).o $(DSATEST).o $(EXPTEST).o $(RSATEST).o \ - $(EVPTEST).o $(IGETEST).o $(JPAKETEST).o $(ASN1TEST).o -SRC= $(BNTEST).c $(ECTEST).c $(ECDSATEST).c $(ECDHTEST).c $(IDEATEST).c \ - $(MD2TEST).c $(MD4TEST).c $(MD5TEST).c \ - $(HMACTEST).c $(WPTEST).c \ - $(RC2TEST).c $(RC4TEST).c $(RC5TEST).c \ - $(DESTEST).c $(SHATEST).c $(SHA1TEST).c $(MDC2TEST).c $(RMDTEST).c \ - $(RANDTEST).c $(DHTEST).c $(ENGINETEST).c $(CASTTEST).c \ - $(BFTEST).c $(SSLTEST).c $(DSATEST).c $(EXPTEST).c $(RSATEST).c \ - $(EVPTEST).c $(IGETEST).c $(JPAKETEST).c $(SRPTEST).c $(ASN1TEST).c - -EXHEADER= -HEADER= $(EXHEADER) - -ALL= $(GENERAL) $(SRC) $(HEADER) - -top: - (cd ..; $(MAKE) DIRS=$(DIR) TESTS=$(TESTS) all) - -all: exe - -exe: $(EXE) dummytest$(EXE_EXT) - -files: - $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO - -links: - -generate: $(SRC) -$(SRC): - @sh $(TOP)/util/point.sh dummytest.c $@ - -errors: - -install: - -tags: - ctags $(SRC) - -tests: exe apps $(TESTS) - -apps: - @(cd ..; $(MAKE) DIRS=apps all) - -alltests: \ - test_des test_idea test_sha test_md4 test_md5 test_hmac \ - test_md2 test_mdc2 test_wp \ - test_rmd test_rc2 test_rc4 test_rc5 test_bf test_cast test_aes \ - test_rand test_bn test_ec test_ecdsa test_ecdh \ - test_enc test_x509 test_rsa test_crl test_sid \ - test_gen test_req test_pkcs7 test_verify test_dh test_dsa \ - test_ss test_ca test_engine test_evp test_ssl test_tsa test_ige \ - test_jpake test_srp test_cms - -test_evp: - ../util/shlib_wrap.sh ./$(EVPTEST) evptests.txt - -test_des: - ../util/shlib_wrap.sh ./$(DESTEST) - -test_idea: - ../util/shlib_wrap.sh ./$(IDEATEST) - -test_sha: - ../util/shlib_wrap.sh ./$(SHATEST) - ../util/shlib_wrap.sh ./$(SHA1TEST) - ../util/shlib_wrap.sh ./$(SHA256TEST) - ../util/shlib_wrap.sh ./$(SHA512TEST) - -test_mdc2: - ../util/shlib_wrap.sh ./$(MDC2TEST) - -test_md5: - ../util/shlib_wrap.sh ./$(MD5TEST) - -test_md4: - ../util/shlib_wrap.sh ./$(MD4TEST) - -test_hmac: - ../util/shlib_wrap.sh ./$(HMACTEST) - -test_wp: - ../util/shlib_wrap.sh ./$(WPTEST) - -test_md2: - ../util/shlib_wrap.sh ./$(MD2TEST) - -test_rmd: - ../util/shlib_wrap.sh ./$(RMDTEST) - -test_bf: - ../util/shlib_wrap.sh ./$(BFTEST) - -test_cast: - ../util/shlib_wrap.sh ./$(CASTTEST) - -test_rc2: - ../util/shlib_wrap.sh ./$(RC2TEST) - -test_rc4: - ../util/shlib_wrap.sh ./$(RC4TEST) - -test_rc5: - ../util/shlib_wrap.sh ./$(RC5TEST) - -test_rand: - ../util/shlib_wrap.sh ./$(RANDTEST) - -test_enc: - @sh ./testenc - -test_x509: - echo test normal x509v1 certificate - sh ./tx509 2>/dev/null - echo test first x509v3 certificate - sh ./tx509 v3-cert1.pem 2>/dev/null - echo test second x509v3 certificate - sh ./tx509 v3-cert2.pem 2>/dev/null - -test_rsa: $(RSATEST)$(EXE_EXT) - @sh ./trsa 2>/dev/null - ../util/shlib_wrap.sh ./$(RSATEST) - -test_crl: - @sh ./tcrl 2>/dev/null - -test_sid: - @sh ./tsid 2>/dev/null - -test_req: - @sh ./treq 2>/dev/null - @sh ./treq testreq2.pem 2>/dev/null - -test_pkcs7: - @sh ./tpkcs7 2>/dev/null - @sh ./tpkcs7d 2>/dev/null - -test_bn: - @echo starting big number library test, could take a while... - @../util/shlib_wrap.sh ./$(BNTEST) >tmp.bntest - @echo quit >>tmp.bntest - @echo "running bc" - @<tmp.bntest sh -c "`sh ./bctest ignore`" | $(PERL) -e '$$i=0; while (<STDIN>) {if (/^test (.*)/) {print STDERR "\nverify $$1";} elsif (!/^0$$/) {die "\nFailed! bc: $$_";} else {print STDERR "."; $$i++;}} print STDERR "\n$$i tests passed\n"' - @echo 'test a^b%c implementations' - ../util/shlib_wrap.sh ./$(EXPTEST) - -test_ec: - @echo 'test elliptic curves' - ../util/shlib_wrap.sh ./$(ECTEST) - -test_ecdsa: - @echo 'test ecdsa' - ../util/shlib_wrap.sh ./$(ECDSATEST) - -test_ecdh: - @echo 'test ecdh' - ../util/shlib_wrap.sh ./$(ECDHTEST) - -test_verify: - @echo "The following command should have some OK's and some failures" - @echo "There are definitly a few expired certificates" - ../util/shlib_wrap.sh ../apps/openssl verify -CApath ../certs/demo ../certs/demo/*.pem - -test_dh: - @echo "Generate a set of DH parameters" - ../util/shlib_wrap.sh ./$(DHTEST) - -test_dsa: - @echo "Generate a set of DSA parameters" - ../util/shlib_wrap.sh ./$(DSATEST) - ../util/shlib_wrap.sh ./$(DSATEST) -app2_1 - -test_gen: - @echo "Generate and verify a certificate request" - @sh ./testgen - -test_ss keyU.ss certU.ss certCA.ss certP1.ss keyP1.ss certP2.ss keyP2.ss \ - intP1.ss intP2.ss: testss - @echo "Generate and certify a test certificate" - @sh ./testss - @cat certCA.ss certU.ss > intP1.ss - @cat certCA.ss certU.ss certP1.ss > intP2.ss - -test_engine: - @echo "Manipulate the ENGINE structures" - ../util/shlib_wrap.sh ./$(ENGINETEST) - -test_ssl: keyU.ss certU.ss certCA.ss certP1.ss keyP1.ss certP2.ss keyP2.ss \ - intP1.ss intP2.ss - @echo "test SSL protocol" - @if [ -n "$(FIPSCANLIB)" ]; then \ - sh ./testfipsssl keyU.ss certU.ss certCA.ss; \ - fi - ../util/shlib_wrap.sh ./$(SSLTEST) -test_cipherlist - @sh ./testssl keyU.ss certU.ss certCA.ss - @sh ./testsslproxy keyP1.ss certP1.ss intP1.ss - @sh ./testsslproxy keyP2.ss certP2.ss intP2.ss - -test_ca: - @if ../util/shlib_wrap.sh ../apps/openssl no-rsa; then \ - echo "skipping CA.sh test -- requires RSA"; \ - else \ - echo "Generate and certify a test certificate via the 'ca' program"; \ - sh ./testca; \ - fi - -test_aes: #$(AESTEST) -# @echo "test Rijndael" -# ../util/shlib_wrap.sh ./$(AESTEST) - -test_tsa: - @if ../util/shlib_wrap.sh ../apps/openssl no-rsa; then \ - echo "skipping testtsa test -- requires RSA"; \ - else \ - sh ./testtsa; \ - fi - -test_ige: $(IGETEST)$(EXE_EXT) - @echo "Test IGE mode" - ../util/shlib_wrap.sh ./$(IGETEST) - -test_jpake: $(JPAKETEST)$(EXE_EXT) - @echo "Test JPAKE" - ../util/shlib_wrap.sh ./$(JPAKETEST) - -test_cms: - @echo "CMS consistency test" - $(PERL) cms-test.pl - -test_srp: $(SRPTEST)$(EXE_EXT) - @echo "Test SRP" - ../util/shlib_wrap.sh ./srptest - -lint: - lint -DLINT $(INCLUDES) $(SRC)>fluff - -depend: - @if [ -z "$(THIS)" ]; then \ - $(MAKE) -f $(TOP)/Makefile reflect THIS=$@; \ - else \ - $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(SRC); \ - fi - -dclean: - $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new - mv -f Makefile.new $(MAKEFILE) - rm -f $(SRC) $(SHA256TEST).c $(SHA512TEST).c evptests.txt newkey.pem testkey.pem \ - testreq.pem - -clean: - rm -f .rnd tmp.bntest tmp.bctest *.o *.obj *.dll lib tags core .pure .nfs* *.old *.bak fluff $(EXE) *.ss *.srl log dummytest - -$(DLIBSSL): - (cd ..; $(MAKE) DIRS=ssl all) - -$(DLIBCRYPTO): - (cd ..; $(MAKE) DIRS=crypto all) - -BUILD_CMD=shlib_target=; if [ -n "$(SHARED_LIBS)" ]; then \ - shlib_target="$(SHLIB_TARGET)"; \ - fi; \ - LIBRARIES="$(LIBSSL) $(LIBCRYPTO) $(LIBKRB5)"; \ - $(MAKE) -f $(TOP)/Makefile.shared -e \ - CC="$${CC}" APPNAME=$$target$(EXE_EXT) OBJECTS="$$target.o" \ - LIBDEPS="$(PEX_LIBS) $$LIBRARIES $(EX_LIBS)" \ - link_app.$${shlib_target} - -FIPS_BUILD_CMD=shlib_target=; if [ -n "$(SHARED_LIBS)" ]; then \ - shlib_target="$(SHLIB_TARGET)"; \ - fi; \ - LIBRARIES="$(LIBSSL) $(LIBCRYPTO) $(LIBKRB5)"; \ - if [ -z "$(SHARED_LIBS)" -a -n "$(FIPSCANLIB)" ] ; then \ - FIPSLD_CC="$(CC)"; CC=$(FIPSDIR)/bin/fipsld; export CC FIPSLD_CC; \ - fi; \ - $(MAKE) -f $(TOP)/Makefile.shared -e \ - CC="$${CC}" APPNAME=$$target$(EXE_EXT) OBJECTS="$$target.o" \ - LIBDEPS="$(PEX_LIBS) $$LIBRARIES $(EX_LIBS)" \ - link_app.$${shlib_target} - -$(RSATEST)$(EXE_EXT): $(RSATEST).o $(DLIBCRYPTO) - @target=$(RSATEST); $(BUILD_CMD) - -$(BNTEST)$(EXE_EXT): $(BNTEST).o $(DLIBCRYPTO) - @target=$(BNTEST); $(BUILD_CMD) - -$(ECTEST)$(EXE_EXT): $(ECTEST).o $(DLIBCRYPTO) - @target=$(ECTEST); $(BUILD_CMD) - -$(EXPTEST)$(EXE_EXT): $(EXPTEST).o $(DLIBCRYPTO) - @target=$(EXPTEST); $(BUILD_CMD) - -$(IDEATEST)$(EXE_EXT): $(IDEATEST).o $(DLIBCRYPTO) - @target=$(IDEATEST); $(BUILD_CMD) - -$(MD2TEST)$(EXE_EXT): $(MD2TEST).o $(DLIBCRYPTO) - @target=$(MD2TEST); $(BUILD_CMD) - -$(SHATEST)$(EXE_EXT): $(SHATEST).o $(DLIBCRYPTO) - @target=$(SHATEST); $(BUILD_CMD) - -$(SHA1TEST)$(EXE_EXT): $(SHA1TEST).o $(DLIBCRYPTO) - @target=$(SHA1TEST); $(BUILD_CMD) - -$(SHA256TEST)$(EXE_EXT): $(SHA256TEST).o $(DLIBCRYPTO) - @target=$(SHA256TEST); $(BUILD_CMD) - -$(SHA512TEST)$(EXE_EXT): $(SHA512TEST).o $(DLIBCRYPTO) - @target=$(SHA512TEST); $(BUILD_CMD) - -$(RMDTEST)$(EXE_EXT): $(RMDTEST).o $(DLIBCRYPTO) - @target=$(RMDTEST); $(BUILD_CMD) - -$(MDC2TEST)$(EXE_EXT): $(MDC2TEST).o $(DLIBCRYPTO) - @target=$(MDC2TEST); $(BUILD_CMD) - -$(MD4TEST)$(EXE_EXT): $(MD4TEST).o $(DLIBCRYPTO) - @target=$(MD4TEST); $(BUILD_CMD) - -$(MD5TEST)$(EXE_EXT): $(MD5TEST).o $(DLIBCRYPTO) - @target=$(MD5TEST); $(BUILD_CMD) - -$(HMACTEST)$(EXE_EXT): $(HMACTEST).o $(DLIBCRYPTO) - @target=$(HMACTEST); $(BUILD_CMD) - -$(WPTEST)$(EXE_EXT): $(WPTEST).o $(DLIBCRYPTO) - @target=$(WPTEST); $(BUILD_CMD) - -$(RC2TEST)$(EXE_EXT): $(RC2TEST).o $(DLIBCRYPTO) - @target=$(RC2TEST); $(BUILD_CMD) - -$(BFTEST)$(EXE_EXT): $(BFTEST).o $(DLIBCRYPTO) - @target=$(BFTEST); $(BUILD_CMD) - -$(CASTTEST)$(EXE_EXT): $(CASTTEST).o $(DLIBCRYPTO) - @target=$(CASTTEST); $(BUILD_CMD) - -$(RC4TEST)$(EXE_EXT): $(RC4TEST).o $(DLIBCRYPTO) - @target=$(RC4TEST); $(BUILD_CMD) - -$(RC5TEST)$(EXE_EXT): $(RC5TEST).o $(DLIBCRYPTO) - @target=$(RC5TEST); $(BUILD_CMD) - -$(DESTEST)$(EXE_EXT): $(DESTEST).o $(DLIBCRYPTO) - @target=$(DESTEST); $(BUILD_CMD) - -$(RANDTEST)$(EXE_EXT): $(RANDTEST).o $(DLIBCRYPTO) - @target=$(RANDTEST); $(BUILD_CMD) - -$(DHTEST)$(EXE_EXT): $(DHTEST).o $(DLIBCRYPTO) - @target=$(DHTEST); $(BUILD_CMD) - -$(DSATEST)$(EXE_EXT): $(DSATEST).o $(DLIBCRYPTO) - @target=$(DSATEST); $(BUILD_CMD) - -$(METHTEST)$(EXE_EXT): $(METHTEST).o $(DLIBCRYPTO) - @target=$(METHTEST); $(BUILD_CMD) - -$(SSLTEST)$(EXE_EXT): $(SSLTEST).o $(DLIBSSL) $(DLIBCRYPTO) - @target=$(SSLTEST); $(FIPS_BUILD_CMD) - -$(ENGINETEST)$(EXE_EXT): $(ENGINETEST).o $(DLIBCRYPTO) - @target=$(ENGINETEST); $(BUILD_CMD) - -$(EVPTEST)$(EXE_EXT): $(EVPTEST).o $(DLIBCRYPTO) - @target=$(EVPTEST); $(BUILD_CMD) - -$(ECDSATEST)$(EXE_EXT): $(ECDSATEST).o $(DLIBCRYPTO) - @target=$(ECDSATEST); $(BUILD_CMD) - -$(ECDHTEST)$(EXE_EXT): $(ECDHTEST).o $(DLIBCRYPTO) - @target=$(ECDHTEST); $(BUILD_CMD) - -$(IGETEST)$(EXE_EXT): $(IGETEST).o $(DLIBCRYPTO) - @target=$(IGETEST); $(BUILD_CMD) - -$(JPAKETEST)$(EXE_EXT): $(JPAKETEST).o $(DLIBCRYPTO) - @target=$(JPAKETEST); $(BUILD_CMD) - -$(ASN1TEST)$(EXE_EXT): $(ASN1TEST).o $(DLIBCRYPTO) - @target=$(ASN1TEST); $(BUILD_CMD) - -$(SRPTEST)$(EXE_EXT): $(SRPTEST).o $(DLIBCRYPTO) - @target=$(SRPTEST); $(BUILD_CMD) - -#$(AESTEST).o: $(AESTEST).c -# $(CC) -c $(CFLAGS) -DINTERMEDIATE_VALUE_KAT -DTRACE_KAT_MCT $(AESTEST).c - -#$(AESTEST)$(EXE_EXT): $(AESTEST).o $(DLIBCRYPTO) -# if [ "$(SHLIB_TARGET)" = "hpux-shared" -o "$(SHLIB_TARGET)" = "darwin-shared" ] ; then \ -# $(CC) -o $(AESTEST)$(EXE_EXT) $(CFLAGS) $(AESTEST).o $(PEX_LIBS) $(DLIBCRYPTO) $(EX_LIBS) ; \ -# else \ -# $(CC) -o $(AESTEST)$(EXE_EXT) $(CFLAGS) $(AESTEST).o $(PEX_LIBS) $(LIBCRYPTO) $(EX_LIBS) ; \ -# fi - -dummytest$(EXE_EXT): dummytest.o $(DLIBCRYPTO) - @target=dummytest; $(BUILD_CMD) - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -asn1test.o: ../include/openssl/asn1.h ../include/openssl/asn1_mac.h -asn1test.o: ../include/openssl/bio.h ../include/openssl/buffer.h -asn1test.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -asn1test.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -asn1test.o: ../include/openssl/ecdsa.h ../include/openssl/evp.h -asn1test.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -asn1test.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -asn1test.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -asn1test.o: ../include/openssl/pkcs7.h ../include/openssl/safestack.h -asn1test.o: ../include/openssl/sha.h ../include/openssl/stack.h -asn1test.o: ../include/openssl/symhacks.h ../include/openssl/x509.h -asn1test.o: ../include/openssl/x509_vfy.h asn1test.c -bftest.o: ../e_os.h ../include/openssl/blowfish.h ../include/openssl/e_os2.h -bftest.o: ../include/openssl/opensslconf.h bftest.c -bntest.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -bntest.o: ../include/openssl/bn.h ../include/openssl/buffer.h -bntest.o: ../include/openssl/crypto.h ../include/openssl/dh.h -bntest.o: ../include/openssl/dsa.h ../include/openssl/e_os2.h -bntest.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -bntest.o: ../include/openssl/ecdsa.h ../include/openssl/err.h -bntest.o: ../include/openssl/evp.h ../include/openssl/lhash.h -bntest.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -bntest.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -bntest.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -bntest.o: ../include/openssl/rand.h ../include/openssl/rsa.h -bntest.o: ../include/openssl/safestack.h ../include/openssl/sha.h -bntest.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -bntest.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h bntest.c -casttest.o: ../e_os.h ../include/openssl/cast.h ../include/openssl/e_os2.h -casttest.o: ../include/openssl/opensslconf.h casttest.c -destest.o: ../include/openssl/des.h ../include/openssl/des_old.h -destest.o: ../include/openssl/e_os2.h ../include/openssl/opensslconf.h -destest.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -destest.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -destest.o: ../include/openssl/ui.h ../include/openssl/ui_compat.h destest.c -dhtest.o: ../e_os.h ../include/openssl/bio.h ../include/openssl/bn.h -dhtest.o: ../include/openssl/crypto.h ../include/openssl/dh.h -dhtest.o: ../include/openssl/e_os2.h ../include/openssl/err.h -dhtest.o: ../include/openssl/lhash.h ../include/openssl/opensslconf.h -dhtest.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -dhtest.o: ../include/openssl/rand.h ../include/openssl/safestack.h -dhtest.o: ../include/openssl/stack.h ../include/openssl/symhacks.h dhtest.c -dsatest.o: ../e_os.h ../include/openssl/bio.h ../include/openssl/bn.h -dsatest.o: ../include/openssl/crypto.h ../include/openssl/dh.h -dsatest.o: ../include/openssl/dsa.h ../include/openssl/e_os2.h -dsatest.o: ../include/openssl/err.h ../include/openssl/lhash.h -dsatest.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -dsatest.o: ../include/openssl/ossl_typ.h ../include/openssl/rand.h -dsatest.o: ../include/openssl/safestack.h ../include/openssl/stack.h -dsatest.o: ../include/openssl/symhacks.h dsatest.c -ecdhtest.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ecdhtest.o: ../include/openssl/bn.h ../include/openssl/crypto.h -ecdhtest.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -ecdhtest.o: ../include/openssl/ecdh.h ../include/openssl/err.h -ecdhtest.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ecdhtest.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -ecdhtest.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ecdhtest.o: ../include/openssl/rand.h ../include/openssl/safestack.h -ecdhtest.o: ../include/openssl/sha.h ../include/openssl/stack.h -ecdhtest.o: ../include/openssl/symhacks.h ecdhtest.c -ecdsatest.o: ../include/openssl/asn1.h ../include/openssl/bio.h -ecdsatest.o: ../include/openssl/bn.h ../include/openssl/buffer.h -ecdsatest.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -ecdsatest.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ecdsatest.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -ecdsatest.o: ../include/openssl/err.h ../include/openssl/evp.h -ecdsatest.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ecdsatest.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -ecdsatest.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ecdsatest.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -ecdsatest.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ecdsatest.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -ecdsatest.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -ecdsatest.o: ecdsatest.c -ectest.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ectest.o: ../include/openssl/bn.h ../include/openssl/buffer.h -ectest.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -ectest.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -ectest.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -ectest.o: ../include/openssl/err.h ../include/openssl/evp.h -ectest.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -ectest.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -ectest.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -ectest.o: ../include/openssl/pkcs7.h ../include/openssl/rand.h -ectest.o: ../include/openssl/safestack.h ../include/openssl/sha.h -ectest.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -ectest.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h ectest.c -enginetest.o: ../include/openssl/asn1.h ../include/openssl/bio.h -enginetest.o: ../include/openssl/buffer.h ../include/openssl/crypto.h -enginetest.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -enginetest.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -enginetest.o: ../include/openssl/engine.h ../include/openssl/err.h -enginetest.o: ../include/openssl/evp.h ../include/openssl/lhash.h -enginetest.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -enginetest.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -enginetest.o: ../include/openssl/ossl_typ.h ../include/openssl/pkcs7.h -enginetest.o: ../include/openssl/safestack.h ../include/openssl/sha.h -enginetest.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -enginetest.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -enginetest.o: enginetest.c -evp_test.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -evp_test.o: ../include/openssl/buffer.h ../include/openssl/conf.h -evp_test.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -evp_test.o: ../include/openssl/ec.h ../include/openssl/ecdh.h -evp_test.o: ../include/openssl/ecdsa.h ../include/openssl/engine.h -evp_test.o: ../include/openssl/err.h ../include/openssl/evp.h -evp_test.o: ../include/openssl/lhash.h ../include/openssl/obj_mac.h -evp_test.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -evp_test.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -evp_test.o: ../include/openssl/pkcs7.h ../include/openssl/safestack.h -evp_test.o: ../include/openssl/sha.h ../include/openssl/stack.h -evp_test.o: ../include/openssl/symhacks.h ../include/openssl/x509.h -evp_test.o: ../include/openssl/x509_vfy.h evp_test.c -exptest.o: ../e_os.h ../include/openssl/bio.h ../include/openssl/bn.h -exptest.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -exptest.o: ../include/openssl/err.h ../include/openssl/lhash.h -exptest.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -exptest.o: ../include/openssl/ossl_typ.h ../include/openssl/rand.h -exptest.o: ../include/openssl/safestack.h ../include/openssl/stack.h -exptest.o: ../include/openssl/symhacks.h exptest.c -hmactest.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -hmactest.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -hmactest.o: ../include/openssl/evp.h ../include/openssl/hmac.h -hmactest.o: ../include/openssl/md5.h ../include/openssl/obj_mac.h -hmactest.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -hmactest.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -hmactest.o: ../include/openssl/safestack.h ../include/openssl/stack.h -hmactest.o: ../include/openssl/symhacks.h hmactest.c -ideatest.o: ../e_os.h ../include/openssl/e_os2.h ../include/openssl/idea.h -ideatest.o: ../include/openssl/opensslconf.h ideatest.c -igetest.o: ../include/openssl/aes.h ../include/openssl/e_os2.h -igetest.o: ../include/openssl/opensslconf.h ../include/openssl/ossl_typ.h -igetest.o: ../include/openssl/rand.h igetest.c -jpaketest.o: ../include/openssl/buffer.h ../include/openssl/crypto.h -jpaketest.o: ../include/openssl/e_os2.h ../include/openssl/opensslconf.h -jpaketest.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -jpaketest.o: ../include/openssl/safestack.h ../include/openssl/stack.h -jpaketest.o: ../include/openssl/symhacks.h jpaketest.c -md2test.o: ../include/openssl/buffer.h ../include/openssl/crypto.h -md2test.o: ../include/openssl/e_os2.h ../include/openssl/opensslconf.h -md2test.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -md2test.o: ../include/openssl/safestack.h ../include/openssl/stack.h -md2test.o: ../include/openssl/symhacks.h md2test.c -md4test.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -md4test.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -md4test.o: ../include/openssl/evp.h ../include/openssl/md4.h -md4test.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -md4test.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -md4test.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -md4test.o: ../include/openssl/stack.h ../include/openssl/symhacks.h md4test.c -md5test.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -md5test.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -md5test.o: ../include/openssl/evp.h ../include/openssl/md5.h -md5test.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -md5test.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -md5test.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -md5test.o: ../include/openssl/stack.h ../include/openssl/symhacks.h md5test.c -mdc2test.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -mdc2test.o: ../include/openssl/crypto.h ../include/openssl/des.h -mdc2test.o: ../include/openssl/des_old.h ../include/openssl/e_os2.h -mdc2test.o: ../include/openssl/evp.h ../include/openssl/mdc2.h -mdc2test.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -mdc2test.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -mdc2test.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -mdc2test.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -mdc2test.o: ../include/openssl/ui.h ../include/openssl/ui_compat.h mdc2test.c -randtest.o: ../e_os.h ../include/openssl/e_os2.h -randtest.o: ../include/openssl/opensslconf.h ../include/openssl/ossl_typ.h -randtest.o: ../include/openssl/rand.h randtest.c -rc2test.o: ../e_os.h ../include/openssl/e_os2.h -rc2test.o: ../include/openssl/opensslconf.h ../include/openssl/rc2.h rc2test.c -rc4test.o: ../e_os.h ../include/openssl/e_os2.h -rc4test.o: ../include/openssl/opensslconf.h ../include/openssl/rc4.h -rc4test.o: ../include/openssl/sha.h rc4test.c -rc5test.o: ../include/openssl/buffer.h ../include/openssl/crypto.h -rc5test.o: ../include/openssl/e_os2.h ../include/openssl/opensslconf.h -rc5test.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -rc5test.o: ../include/openssl/safestack.h ../include/openssl/stack.h -rc5test.o: ../include/openssl/symhacks.h rc5test.c -rmdtest.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -rmdtest.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -rmdtest.o: ../include/openssl/evp.h ../include/openssl/obj_mac.h -rmdtest.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -rmdtest.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -rmdtest.o: ../include/openssl/ripemd.h ../include/openssl/safestack.h -rmdtest.o: ../include/openssl/stack.h ../include/openssl/symhacks.h rmdtest.c -rsa_test.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -rsa_test.o: ../include/openssl/bn.h ../include/openssl/crypto.h -rsa_test.o: ../include/openssl/e_os2.h ../include/openssl/err.h -rsa_test.o: ../include/openssl/lhash.h ../include/openssl/opensslconf.h -rsa_test.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -rsa_test.o: ../include/openssl/rand.h ../include/openssl/rsa.h -rsa_test.o: ../include/openssl/safestack.h ../include/openssl/stack.h -rsa_test.o: ../include/openssl/symhacks.h rsa_test.c -sha1test.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -sha1test.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -sha1test.o: ../include/openssl/evp.h ../include/openssl/obj_mac.h -sha1test.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -sha1test.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -sha1test.o: ../include/openssl/safestack.h ../include/openssl/sha.h -sha1test.o: ../include/openssl/stack.h ../include/openssl/symhacks.h sha1test.c -shatest.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -shatest.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -shatest.o: ../include/openssl/evp.h ../include/openssl/obj_mac.h -shatest.o: ../include/openssl/objects.h ../include/openssl/opensslconf.h -shatest.o: ../include/openssl/opensslv.h ../include/openssl/ossl_typ.h -shatest.o: ../include/openssl/safestack.h ../include/openssl/sha.h -shatest.o: ../include/openssl/stack.h ../include/openssl/symhacks.h shatest.c -srptest.o: ../include/openssl/bio.h ../include/openssl/bn.h -srptest.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -srptest.o: ../include/openssl/err.h ../include/openssl/lhash.h -srptest.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -srptest.o: ../include/openssl/ossl_typ.h ../include/openssl/rand.h -srptest.o: ../include/openssl/safestack.h ../include/openssl/srp.h -srptest.o: ../include/openssl/stack.h ../include/openssl/symhacks.h srptest.c -ssltest.o: ../e_os.h ../include/openssl/asn1.h ../include/openssl/bio.h -ssltest.o: ../include/openssl/bn.h ../include/openssl/buffer.h -ssltest.o: ../include/openssl/comp.h ../include/openssl/conf.h -ssltest.o: ../include/openssl/crypto.h ../include/openssl/dh.h -ssltest.o: ../include/openssl/dsa.h ../include/openssl/dtls1.h -ssltest.o: ../include/openssl/e_os2.h ../include/openssl/ec.h -ssltest.o: ../include/openssl/ecdh.h ../include/openssl/ecdsa.h -ssltest.o: ../include/openssl/engine.h ../include/openssl/err.h -ssltest.o: ../include/openssl/evp.h ../include/openssl/hmac.h -ssltest.o: ../include/openssl/kssl.h ../include/openssl/lhash.h -ssltest.o: ../include/openssl/obj_mac.h ../include/openssl/objects.h -ssltest.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -ssltest.o: ../include/openssl/ossl_typ.h ../include/openssl/pem.h -ssltest.o: ../include/openssl/pem2.h ../include/openssl/pkcs7.h -ssltest.o: ../include/openssl/pqueue.h ../include/openssl/rand.h -ssltest.o: ../include/openssl/rsa.h ../include/openssl/safestack.h -ssltest.o: ../include/openssl/sha.h ../include/openssl/srp.h -ssltest.o: ../include/openssl/srtp.h ../include/openssl/ssl.h -ssltest.o: ../include/openssl/ssl2.h ../include/openssl/ssl23.h -ssltest.o: ../include/openssl/ssl3.h ../include/openssl/stack.h -ssltest.o: ../include/openssl/symhacks.h ../include/openssl/tls1.h -ssltest.o: ../include/openssl/x509.h ../include/openssl/x509_vfy.h -ssltest.o: ../include/openssl/x509v3.h ssltest.c -wp_test.o: ../include/openssl/crypto.h ../include/openssl/e_os2.h -wp_test.o: ../include/openssl/opensslconf.h ../include/openssl/opensslv.h -wp_test.o: ../include/openssl/ossl_typ.h ../include/openssl/safestack.h -wp_test.o: ../include/openssl/stack.h ../include/openssl/symhacks.h -wp_test.o: ../include/openssl/whrlpool.h wp_test.c diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/maketests.com nodejs-0.11.15/deps/openssl/openssl/test/maketests.com --- nodejs-0.11.13/deps/openssl/openssl/test/maketests.com 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/maketests.com 2015-01-20 21:22:17.000000000 +0000 @@ -6,6 +6,7 @@ $! byer@mail.all-net.net $! $! Changes by Richard Levitte <richard@levitte.org> +$! Zoltan Arpadffy <arpadffy@polarhome.com> $! $! This command files compiles and creates all the various different $! "test" programs for the different types of encryption for OpenSSL. @@ -147,7 +148,7 @@ "RANDTEST,DHTEST,ENGINETEST,"+ - "BFTEST,CASTTEST,SSLTEST,EXPTEST,DSATEST,RSA_TEST,"+ - "EVP_TEST,IGETEST,JPAKETEST,SRPTEST,"+ - - "ASN1TEST" + "ASN1TEST,HEARTBEAT_TEST,CONSTANT_TIME_TEST" $! Should we add MTTEST,PQ_TEST,LH_TEST,DIVTEST,TABTEST as well? $! $! Additional directory information. @@ -185,6 +186,8 @@ $ T_D_JPAKETEST := [-.crypto.jpake] $ T_D_SRPTEST := [-.crypto.srp] $ T_D_ASN1TEST := [-.test] +$ T_D_HEARTBEAT_TEST := [-.ssl] +$ T_D_CONSTANT_TIME_TEST := [-.crypto] $! $ TCPIP_PROGRAMS = ",," $ IF COMPILER .EQS. "VAXC" THEN - diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/tests.com nodejs-0.11.15/deps/openssl/openssl/test/tests.com --- nodejs-0.11.13/deps/openssl/openssl/test/tests.com 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/tests.com 2015-01-20 21:22:17.000000000 +0000 @@ -56,7 +56,7 @@ test_enc,test_x509,test_rsa,test_crl,test_sid,- test_gen,test_req,test_pkcs7,test_verify,test_dh,test_dsa,- test_ss,test_ca,test_engine,test_evp,test_ssl,test_tsa,test_ige,- - test_jpake,test_srp,test_cms + test_jpake,test_srp,test_cms,test_heartbeat,test_constant_time $ endif $ tests = f$edit(tests,"COLLAPSE") $ @@ -95,6 +95,8 @@ $ JPAKETEST := jpaketest $ SRPTEST := srptest $ ASN1TEST := asn1test +$ HEARTBEATTEST := heartbeat_test +$ CONSTTIMETEST := constant_time_test $! $ tests_i = 0 $ loop_tests: @@ -366,10 +368,20 @@ $ write sys$output "Test SRP" $ mcr 'texe_dir''srptest' $ return +$ test_heartbeat: +$ write sys$output "Test HEARTBEAT" +$ mcr 'texe_dir''heartbeattest' +$ return +$ test_constant_time: +$ write sys$output "Test constant time utilities" +$ mcr 'texe_dir''consttimetest' +$ return $ $ $ exit: +$ on error then goto exit2 ! In case openssl.exe didn't build. $ mcr 'exe_dir'openssl version -a +$ exit2: $ set default '__save_default' $ deassign sslroot $ exit diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/testssl nodejs-0.11.15/deps/openssl/openssl/test/testssl --- nodejs-0.11.13/deps/openssl/openssl/test/testssl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/testssl 2015-01-20 21:22:17.000000000 +0000 @@ -173,6 +173,12 @@ echo test tls1 with SRP via BIO pair $ssltest -bio_pair -tls1 -cipher SRP -srpuser test -srppass abc123 + + echo test tls1 with SRP auth + $ssltest -tls1 -cipher aSRP -srpuser test -srppass abc123 + + echo test tls1 with SRP auth via BIO pair + $ssltest -bio_pair -tls1 -cipher aSRP -srpuser test -srppass abc123 fi exit 0 diff -Nru nodejs-0.11.13/deps/openssl/openssl/test/testutil.h nodejs-0.11.15/deps/openssl/openssl/test/testutil.h --- nodejs-0.11.13/deps/openssl/openssl/test/testutil.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/test/testutil.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,116 @@ +/* test/testutil.h */ +/* + * Utilities for writing OpenSSL unit tests. + * + * More information: + * http://wiki.openssl.org/index.php/How_To_Write_Unit_Tests_For_OpenSSL + * + * Author: Mike Bland (mbland@acm.org) + * Date: 2014-06-07 + * ==================================================================== + * Copyright (c) 2014 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * licensing@OpenSSL.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + */ + +#ifndef HEADER_TESTUTIL_H +#define HEADER_TESTUTIL_H + +/* SETUP_TEST_FIXTURE and EXECUTE_TEST macros for test case functions. + * + * SETUP_TEST_FIXTURE will call set_up() to create a new TEST_FIXTURE_TYPE + * object called "fixture". It will also allocate the "result" variable used + * by EXECUTE_TEST. set_up() should take a const char* specifying the test + * case name and return a TEST_FIXTURE_TYPE by value. + * + * EXECUTE_TEST will pass fixture to execute_func() by value, call + * tear_down(), and return the result of execute_func(). execute_func() should + * take a TEST_FIXTURE_TYPE by value and return zero on success or one on + * failure. + * + * Unit tests can define their own SETUP_TEST_FIXTURE and EXECUTE_TEST + * variations like so: + * + * #define SETUP_FOOBAR_TEST_FIXTURE()\ + * SETUP_TEST_FIXTURE(FOOBAR_TEST_FIXTURE, set_up_foobar) + * + * #define EXECUTE_FOOBAR_TEST()\ + * EXECUTE_TEST(execute_foobar, tear_down_foobar) + * + * Then test case functions can take the form: + * + * static int test_foobar_feature() + * { + * SETUP_FOOBAR_TEST_FIXTURE(); + * [...set individual members of fixture...] + * EXECUTE_FOOBAR_TEST(); + * } + */ +#define SETUP_TEST_FIXTURE(TEST_FIXTURE_TYPE, set_up)\ + TEST_FIXTURE_TYPE fixture = set_up(TEST_CASE_NAME);\ + int result = 0 + +#define EXECUTE_TEST(execute_func, tear_down)\ + if (execute_func(fixture) != 0) result = 1;\ + tear_down(fixture);\ + return result + +/* TEST_CASE_NAME is defined as the name of the test case function where + * possible; otherwise we get by with the file name and line number. + */ +#if __STDC_VERSION__ < 199901L +#if defined(_MSC_VER) +#define TEST_CASE_NAME __FUNCTION__ +#else +#define testutil_stringify_helper(s) #s +#define testutil_stringify(s) testutil_stringify_helper(s) +#define TEST_CASE_NAME __FILE__ ":" testutil_stringify(__LINE__) +#endif /* _MSC_VER */ +#else +#define TEST_CASE_NAME __func__ +#endif /* __STDC_VERSION__ */ + +#endif /* HEADER_TESTUTIL_H */ diff -Nru nodejs-0.11.13/deps/openssl/openssl/util/mk1mf.pl nodejs-0.11.15/deps/openssl/openssl/util/mk1mf.pl --- nodejs-0.11.13/deps/openssl/openssl/util/mk1mf.pl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/util/mk1mf.pl 2015-01-20 21:22:17.000000000 +0000 @@ -1145,11 +1145,13 @@ "dll" => \$shlib, "shared" => 0, "no-sctp" => 0, + "no-srtp" => 0, "no-gmp" => 0, "no-rfc3779" => 0, "no-montasm" => 0, "no-shared" => 0, "no-store" => 0, + "no-unit-test" => 0, "no-zlib" => 0, "no-zlib-dynamic" => 0, "fips" => \$fips diff -Nru nodejs-0.11.13/deps/openssl/openssl/util/mkdef.pl nodejs-0.11.15/deps/openssl/openssl/util/mkdef.pl --- nodejs-0.11.13/deps/openssl/openssl/util/mkdef.pl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/util/mkdef.pl 2015-01-20 21:22:17.000000000 +0000 @@ -116,7 +116,11 @@ # Hide SSL internals "SSL_INTERN", # SCTP - "SCTP"); + "SCTP", + # SRTP + "SRTP", + # Unit testing + "UNIT_TEST"); my $options=""; open(IN,"<Makefile") || die "unable to open Makefile!\n"; @@ -136,7 +140,8 @@ my $no_fp_api; my $no_static_engine=1; my $no_gmp; my $no_deprecated; my $no_rfc3779; my $no_psk; my $no_tlsext; my $no_cms; my $no_capieng; my $no_jpake; my $no_srp; my $no_ssl2; my $no_ec2m; my $no_nistp_gcc; -my $no_nextprotoneg; my $no_sctp; +my $no_nextprotoneg; my $no_sctp; my $no_srtp; +my $no_unit_test; my $fips; @@ -235,6 +240,8 @@ elsif (/^no-jpake$/) { $no_jpake=1; } elsif (/^no-srp$/) { $no_srp=1; } elsif (/^no-sctp$/) { $no_sctp=1; } + elsif (/^no-srtp$/) { $no_srtp=1; } + elsif (/^no-unit-test$/){ $no_unit_test=1; } } @@ -1205,6 +1212,8 @@ if ($keyword eq "JPAKE" && $no_jpake) { return 0; } if ($keyword eq "SRP" && $no_srp) { return 0; } if ($keyword eq "SCTP" && $no_sctp) { return 0; } + if ($keyword eq "SRTP" && $no_srtp) { return 0; } + if ($keyword eq "UNIT_TEST" && $no_unit_test) { return 0; } if ($keyword eq "DEPRECATED" && $no_deprecated) { return 0; } # Nothing recognise as true diff -Nru nodejs-0.11.13/deps/openssl/openssl/util/mkerr.pl nodejs-0.11.15/deps/openssl/openssl/util/mkerr.pl --- nodejs-0.11.13/deps/openssl/openssl/util/mkerr.pl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/util/mkerr.pl 2015-01-20 21:22:17.000000000 +0000 @@ -787,7 +787,7 @@ push (@runref, $_) unless exists $urcodes{$_}; } -if($debug && defined(@funref) ) { +if($debug && @funref) { print STDERR "The following function codes were not referenced:\n"; foreach(sort @funref) { @@ -795,7 +795,7 @@ } } -if($debug && defined(@runref) ) { +if($debug && @runref) { print STDERR "The following reason codes were not referenced:\n"; foreach(sort @runref) { diff -Nru nodejs-0.11.13/deps/openssl/openssl/util/ssleay.num nodejs-0.11.15/deps/openssl/openssl/util/ssleay.num --- nodejs-0.11.13/deps/openssl/openssl/util/ssleay.num 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl/util/ssleay.num 2015-01-20 21:22:17.000000000 +0000 @@ -181,6 +181,7 @@ SSL_CTX_set_session_id_context 231 EXIST::FUNCTION: SSL_CTX_set_cert_verify_callback 232 EXIST:!VMS:FUNCTION: SSL_CTX_set_cert_verify_cb 232 EXIST:VMS:FUNCTION: +SSL_test_functions 233 EXIST::FUNCTION:UNIT_TEST SSL_CTX_set_default_passwd_cb_userdata 235 EXIST:!VMS:FUNCTION: SSL_CTX_set_def_passwd_cb_ud 235 EXIST:VMS:FUNCTION: SSL_set_purpose 236 EXIST::FUNCTION: @@ -309,14 +310,14 @@ SSL_SESSION_get_id_len 351 NOEXIST::FUNCTION: kssl_ctx_get0_client_princ 352 EXIST::FUNCTION:KRB5 SSL_export_keying_material 353 EXIST::FUNCTION:TLSEXT -SSL_set_tlsext_use_srtp 354 EXIST::FUNCTION: +SSL_set_tlsext_use_srtp 354 EXIST::FUNCTION:SRTP SSL_CTX_set_next_protos_advertised_cb 355 EXIST:!VMS:FUNCTION:NEXTPROTONEG SSL_CTX_set_next_protos_adv_cb 355 EXIST:VMS:FUNCTION:NEXTPROTONEG SSL_get0_next_proto_negotiated 356 EXIST::FUNCTION:NEXTPROTONEG -SSL_get_selected_srtp_profile 357 EXIST::FUNCTION: -SSL_CTX_set_tlsext_use_srtp 358 EXIST::FUNCTION: +SSL_get_selected_srtp_profile 357 EXIST::FUNCTION:SRTP +SSL_CTX_set_tlsext_use_srtp 358 EXIST::FUNCTION:SRTP SSL_select_next_proto 359 EXIST::FUNCTION:NEXTPROTONEG -SSL_get_srtp_profiles 360 EXIST::FUNCTION: +SSL_get_srtp_profiles 360 EXIST::FUNCTION:SRTP SSL_CTX_set_next_proto_select_cb 361 EXIST:!VMS:FUNCTION:NEXTPROTONEG SSL_CTX_set_next_proto_sel_cb 361 EXIST:VMS:FUNCTION:NEXTPROTONEG SSL_SESSION_get_compress_id 362 EXIST::FUNCTION: diff -Nru nodejs-0.11.13/deps/openssl/openssl.gyp nodejs-0.11.15/deps/openssl/openssl.gyp --- nodejs-0.11.13/deps/openssl/openssl.gyp 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/openssl/openssl.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -6,6 +6,7 @@ 'variables': { 'is_clang': 0, 'gcc_version': 0, + 'openssl_no_asm%': 0 }, 'targets': [ @@ -651,7 +652,7 @@ ['exclude', 'store/.*$'] ], 'conditions': [ - ['target_arch!="ia32" and target_arch!="x64"', { + ['target_arch!="ia32" and target_arch!="x64" and target_arch!="arm" or openssl_no_asm!=0', { # Disable asm 'defines': [ 'OPENSSL_NO_ASM' @@ -676,27 +677,32 @@ # Enable asm 'defines': [ 'AES_ASM', - 'VPAES_ASM', - 'BF_ASM', - 'BNCO_ASM', - 'BN_ASM', 'CPUID_ASM', - 'DES_ASM', - 'LIB_BN_ASM', - 'MD5_ASM', - 'OPENSSL_BN_ASM', 'OPENSSL_BN_ASM_MONT', 'OPENSSL_CPUID_OBJ', - 'RIP_ASM', - 'RMD160_ASM', 'SHA1_ASM', 'SHA256_ASM', 'SHA512_ASM', 'GHASH_ASM', - 'WHIRLPOOL_ASM', - 'WP_ASM' ], 'conditions': [ + # Extended assembly on non-arm platforms + ['target_arch!="arm"', { + 'defines': [ + 'VPAES_ASM', + 'BN_ASM', + 'BF_ASM', + 'BNCO_ASM', + 'DES_ASM', + 'LIB_BN_ASM', + 'MD5_ASM', + 'OPENSSL_BN_ASM', + 'RIP_ASM', + 'RMD160_ASM', + 'WHIRLPOOL_ASM', + 'WP_ASM', + ], + }], ['OS!="win" and OS!="mac" and target_arch=="ia32"', { 'sources': [ 'asm/x86-elf-gas/aes/aes-586.s', @@ -821,6 +827,33 @@ 'openssl/crypto/des/fcrypt_b.c' ] }], + ['target_arch=="arm"', { + 'sources': [ + 'asm/arm-elf-gas/aes/aes-armv4.s', + 'asm/arm-elf-gas/bn/armv4-mont.s', + 'asm/arm-elf-gas/bn/armv4-gf2m.s', + 'asm/arm-elf-gas/sha/sha1-armv4-large.s', + 'asm/arm-elf-gas/sha/sha512-armv4.s', + 'asm/arm-elf-gas/sha/sha256-armv4.s', + 'asm/arm-elf-gas/modes/ghash-armv4.s', + # No asm available + 'openssl/crypto/aes/aes_cbc.c', + 'openssl/crypto/bf/bf_enc.c', + 'openssl/crypto/bn/bn_asm.c', + 'openssl/crypto/cast/c_enc.c', + 'openssl/crypto/camellia/camellia.c', + 'openssl/crypto/camellia/cmll_cbc.c', + 'openssl/crypto/camellia/cmll_misc.c', + 'openssl/crypto/des/des_enc.c', + 'openssl/crypto/des/fcrypt_b.c', + 'openssl/crypto/rc4/rc4_enc.c', + 'openssl/crypto/rc4/rc4_skey.c', + 'openssl/crypto/whrlpool/wp_block.c', + # PCAP stuff + 'openssl/crypto/armcap.c', + 'openssl/crypto/armv4cpuid.S', + ] + }], ['OS=="win" and target_arch=="ia32"', { 'sources': [ 'asm/x86-win32-masm/aes/aes-586.asm', @@ -938,9 +971,6 @@ 'HAVE_DLFCN_H' ], }], - ['target_arch=="arm"', { - 'sources': ['openssl/crypto/armcap.c'], - }], ], 'include_dirs': [ '.', diff -Nru nodejs-0.11.13/deps/uv/AUTHORS nodejs-0.11.15/deps/uv/AUTHORS --- nodejs-0.11.13/deps/uv/AUTHORS 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/AUTHORS 2015-01-20 21:22:17.000000000 +0000 @@ -86,9 +86,7 @@ Miroslav Bajtoš <miro.bajtos@gmail.com> Sean Silva <chisophugis@gmail.com> Wynn Wilkes <wynnw@movenetworks.com> -Linus Mårtensson <linus.martensson@sonymobile.com> Andrei Sedoi <bsnote@gmail.com> -Navaneeth Kedaram Nambiathan <navaneethkn@gmail.com> Alex Crichton <alex@alexcrichton.com> Brent Cook <brent@boundary.com> Brian Kaisner <bkize1@gmail.com> @@ -110,7 +108,6 @@ Sam Roberts <vieuxtech@gmail.com> River Tarnell <river@loreley.flyingparchment.org.uk> Nathan Sweet <nathanjsweet@gmail.com> -Luca Bruno <lucab@debian.org> Trevor Norris <trev.norris@gmail.com> Oguz Bastemur <obastemur@gmail.com> Dylan Cali <calid1984@gmail.com> @@ -139,3 +136,42 @@ 李港平 <chopdown@gmail.com> Chernyshev Viacheslav <astellar@ro.ru> Stephen von Takach <steve@advancedcontrol.com.au> +JD Ballard <jd@pixelandline.com> +Luka Perkov <luka.perkov@sartura.hr> +Ryan Cole <ryan@rycole.com> +HungMingWu <u9089000@gmail.com> +Jay Satiro <raysatiro@yahoo.com> +Leith Bade <leith@leithalweapon.geek.nz> +Peter Atashian <retep998@gmail.com> +Tim Cooper <tim.cooper@layeh.com> +Caleb James DeLisle <cjd@hyperboria.ca> +Jameson Nash <vtjnash@gmail.com> +Graham Lee <ghmlee@ghmlee.com> +Andrew Low <Andrew_Low@ca.ibm.com> +Pavel Platto <hinidu@gmail.com> +Tony Kelman <tony@kelman.net> +John Firebaugh <john.firebaugh@gmail.com> +lilohuang <lilohuang@hotmail.com> +Paul Goldsmith <paul.goldsmith@aplink.net> +Julien Gilli <julien.gilli@joyent.com> +Michael Hudson-Doyle <michael.hudson@linaro.org> +Recep ASLANTAS <m@recp.me> +Rob Adams <readams@readams.net> +Zachary Newman <znewman01@gmail.com> +Robin Hahling <robin.hahling@gw-computing.net> +Jeff Widman <jeff@jeffwidman.com> +cjihrig <cjihrig@gmail.com> +Tomasz Kołodziejski <tkolodziejski@mozilla.com> +Unknown W. Brackets <checkins@unknownbrackets.org> +Emmanuel Odeke <odeke@ualberta.ca> +Mikhail Mukovnikov <yndi@me.com> +Thorsten Lorenz <thlorenz@gmx.de> +Yuri D'Elia <yuri.delia@eurac.edu> +Manos Nikolaidis <manos@shadowrobot.com> +Elijah Andrews <elijah@busbud.com> +Michael Ira Krufky <m.krufky@samsung.com> +Helge Deller <deller@gmx.de> +Joey Geralnik <jgeralnik@gmail.com> +Tim Caswell <tim@creationix.com> +Michael Hudson-Doyle <michael.hudson@linaro.org> +Helge Deller <deller@gmx.de> diff -Nru nodejs-0.11.13/deps/uv/ChangeLog nodejs-0.11.15/deps/uv/ChangeLog --- nodejs-0.11.13/deps/uv/ChangeLog 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/ChangeLog 2015-01-20 21:22:17.000000000 +0000 @@ -1,4 +1,405 @@ -2014.05.02, Version 0.11.25 (Unstable) +2014.12.10, Version 1.0.2 (Stable), eec671f0059953505f9a3c9aeb7f9f31466dd7cd + +Changes since version 1.0.1: + +* linux: fix sigmask size arg in epoll_pwait() call (Ben Noordhuis) + +* linux: handle O_NONBLOCK != SOCK_NONBLOCK case (Helge Deller) + +* doc: fix spelling (Joey Geralnik) + +* unix, windows: fix typos in comments (Joey Geralnik) + +* test: canonicalize test runner path (Ben Noordhuis) + +* test: fix compilation warnings (Saúl Ibarra Corretgé) + +* test: skip tty test if detected width and height are 0 (Saúl Ibarra Corretgé) + +* doc: update README with IRC channel (Saúl Ibarra Corretgé) + +* Revert "unix: use cfmakeraw() for setting raw TTY mode" (Ben Noordhuis) + +* doc: document how to get result of uv_fs_mkdtemp (Tim Caswell) + +* unix: add flag for blocking SIGPROF during poll (Ben Noordhuis) + +* unix, windows: add uv_loop_configure() function (Ben Noordhuis) + +* win: keep a reference to AFD_POLL_INFO in cancel poll (Marc Schlaich) + +* test: raise fd limit for OSX select test (Saúl Ibarra Corretgé) + +* unix: remove overzealous assert in uv_read_stop (Saúl Ibarra Corretgé) + +* unix: reset the reading flag when a stream gets EOF (Saúl Ibarra Corretgé) + +* unix: stop reading if an error is produced (Saúl Ibarra Corretgé) + +* cleanup: remove all dead assignments (Maciej Małecki) + +* linux: return early if we have no interfaces (Maciej Małecki) + +* cleanup: remove a dead increment (Maciej Małecki) + + +2014.12.10, Version 0.10.30 (Stable), 5a63f5e9546dca482eeebc3054139b21f509f21f + +Changes since version 0.10.29: + +* linux: fix sigmask size arg in epoll_pwait() call (Ben Noordhuis) + +* linux: handle O_NONBLOCK != SOCK_NONBLOCK case (Helge Deller) + +* doc: update project links (Ben Noordhuis) + +* windows: fix compilation of tests (Marc Schlaich) + +* unix: add flag for blocking SIGPROF during poll (Ben Noordhuis) + +* unix, windows: add uv_loop_configure() function (Ben Noordhuis) + +* win: keep a reference to AFD_POLL_INFO in cancel poll (Marc Schlaich) + + +2014.11.27, Version 1.0.1 (Stable), 0a8e81374e861d425b56c45c8599595d848911d2 + +Changes since version 1.0.0: + +* readme: remove Rust from users (Elijah Andrews) + +* doc,build,include: update project links (Ben Noordhuis) + +* doc: fix typo: Strcutures -> Structures (Michael Ira Krufky) + +* unix: fix processing process handles queue (Saúl Ibarra Corretgé) + +* win: replace non-ansi characters in source file (Bert Belder) + + +2014.11.21, Version 1.0.0 (Stable), feb2a9e6947d892f449b2770c4090f7d8c88381b + +Changes since version 1.0.0-rc2: + +* doc: fix git/svn url for gyp repo in README (Emmanuel Odeke) + +* windows: fix fs_read with nbufs > 1 and offset (Unknown W. Brackets) + +* win: add missing IP_ADAPTER_UNICAST_ADDRESS_LH definition for MinGW + (huxingyi) + +* doc: mention homebrew in README (Mikhail Mukovnikov) + +* doc: add learnuv workshop to README (Thorsten Lorenz) + +* doc: fix parameter name in uv_fs_access (Saúl Ibarra Corretgé) + +* unix: use cfmakeraw() for setting raw TTY mode (Yuri D'Elia) + +* win: fix uv_thread_self() (Alexis Campailla) + +* build: add x32 support to gyp build (Ben Noordhuis) + +* build: remove dtrace probes (Ben Noordhuis) + +* doc: fix link in misc.rst (Manos Nikolaidis) + +* mailmap: remove duplicated entries (Saúl Ibarra Corretgé) + +* gyp: fix comment regarding version info location (Saúl Ibarra Corretgé) + + +2014.10.21, Version 1.0.0-rc2 (Pre-release) + +Changes since version 1.0.0-rc1: + +* build: add missing fixtures to distribution tarball (Rob Adams) + +* doc: update references to current stable branch (Zachary Newman) + +* fs: fix readdir on empty directory (Fedor Indutny) + +* fs: rename uv_fs_readdir to uv_fs_scandir (Saúl Ibarra Corretgé) + +* doc: document uv_alloc_cb (Saúl Ibarra Corretgé) + +* doc: add migration guide from version 0.10 (Saúl Ibarra Corretgé) + +* build: add DragonFly BSD support in autotools (Robin Hahling) + +* doc: document missing stream related structures (Saúl Ibarra Corretgé) + +* doc: clarify uv_loop_t.data field lifetime (Saúl Ibarra Corretgé) + +* doc: add documentation for missing functions and structures (Saúl Ibarra + Corretgé) + +* doc: fix punctuation and grammar in README (Jeff Widman) + +* windows: return libuv error codes in uv_poll_init() (cjihrig) + +* unix, windows: add uv_fs_access() (cjihrig) + +* windows: fix netmask detection (Alexis Campailla) + +* unix, windows: don't include null byte in uv_cwd size (Saúl Ibarra Corretgé) + +* unix, windows: add uv_thread_equal (Tomasz Kołodziejski) + +* windows: fix fs_write with nbufs > 1 and offset (Unknown W. Brackets) + + +2014.10.21, Version 0.10.29 (Stable), 2d728542d3790183417f8f122a110693cd85db14 + +Changes since version 0.10.28: + +* darwin: allocate enough space for select() hack (Fedor Indutny) + +* linux: try epoll_pwait if epoll_wait is missing (Michael Hudson-Doyle) + +* windows: map ERROR_INVALID_DRIVE to UV_ENOENT (Saúl Ibarra Corretgé) + + +2014.09.18, Version 1.0.0-rc1 (Unstable), 0c28bbf7b42882853d1799ab96ff68b07f7f8d49 + +Changes since version 0.11.29: + +* windows: improve timer precision (Alexis Campailla) + +* build, gyp: set xcode flags (Recep ASLANTAS) + +* ignore: include m4 files which are created manually (Recep ASLANTAS) + +* build: add m4 for feature/flag-testing (Recep ASLANTAS) + +* ignore: ignore Xcode project and workspace files (Recep ASLANTAS) + +* unix: fix warnings about dollar symbol usage in identifiers (Recep ASLANTAS) + +* unix: fix warnings when loading functions with dlsym (Recep ASLANTAS) + +* linux: try epoll_pwait if epoll_wait is missing (Michael Hudson-Doyle) + +* test: add test for closing and recreating default loop (Saúl Ibarra Corretgé) + +* windows: properly close the default loop (Saúl Ibarra Corretgé) + +* version: add ability to specify a version suffix (Saúl Ibarra Corretgé) + +* doc: add API documentation (Saúl Ibarra Corretgé) + +* test: don't close connection on write error (Trevor Norris) + +* windows: further simplify the code for timers (Saúl Ibarra Corretgé) + +* gyp: remove UNLIMITED_SELECT from dependent define (Fedor Indutny) + +* darwin: allocate enough space for select() hack (Fedor Indutny) + +* unix, windows: don't allow a NULL callback on timers (Saúl Ibarra Corretgé) + +* windows: simplify code in uv_timer_again (Saúl Ibarra Corretgé) + +* test: use less requests on tcp-write-queue-order (Saúl Ibarra Corretgé) + +* unix: stop child process watcher after last one exits (Saúl Ibarra Corretgé) + +* unix: simplify how process handle queue is managed (Saúl Ibarra Corretgé) + +* windows: remove duplicated field (mattn) + +* core: add a reserved field to uv_handle_t and uv_req_t (Saúl Ibarra Corretgé) + +* windows: fix buffer leak after failed udp send (Bert Belder) + +* windows: make sure sockets and handles are reset on close (Saúl Ibarra Corretgé) + +* unix, windows: add uv_fileno (Saúl Ibarra Corretgé) + +* build: use same CFLAGS in autotools build as in gyp (Saúl Ibarra Corretgé) + +* build: remove unneeded define in uv.gyp (Saúl Ibarra Corretgé) + +* test: fix watcher_cross_stop on Windows (Saúl Ibarra Corretgé) + +* unix, windows: move includes for EAI constants (Saúl Ibarra Corretgé) + +* unix: fix exposing EAI_* glibc-isms (Saúl Ibarra Corretgé) + +* unix: fix tcp write after bad connect freezing (Andrius Bentkus) + + +2014.08.20, Version 0.11.29 (Unstable), 35451fed830807095bbae8ef981af004a4b9259e + +Changes since version 0.11.28: + +* windows: make uv_read_stop immediately stop reading (Jameson Nash) + +* windows: fix uv__getaddrinfo_translate_error (Alexis Campailla) + +* netbsd: fix build (Saúl Ibarra Corretgé) + +* unix, windows: add uv_recv_buffer_size and uv_send_buffer_size (Andrius + Bentkus) + +* windows: add support for UNC paths on uv_spawn (Paul Goldsmith) + +* windows: replace use of inet_addr with uv_inet_pton (Saúl Ibarra Corretgé) + +* unix: replace some asserts with returning errors (Andrius Bentkus) + +* windows: use OpenBSD implementation for uv_fs_mkdtemp (Pavel Platto) + +* windows: fix GetNameInfoW error handling (Alexis Campailla) + +* fs: introduce uv_readdir_next() and report types (Fedor Indutny) + +* fs: extend reported types in uv_fs_readdir_next (Saúl Ibarra Corretgé) + +* unix: read on stream even when UV__POLLHUP set. (Julien Gilli) + + +2014.08.08, Version 0.11.28 (Unstable), fc9e2a0bc487b299c0cd3b2c9a23aeb554b5d8d1 + +Changes since version 0.11.27: + +* unix, windows: const-ify handle in uv_udp_getsockname (Rasmus Pedersen) + +* windows: use UV_ECANCELED for aborted TCP writes (Saúl Ibarra Corretgé) + +* windows: add more required environment variables (Jameson Nash) + +* windows: sort environment variables before calling CreateProcess (Jameson + Nash) + +* unix, windows: move uv_loop_close out of assert (John Firebaugh) + +* windows: fix buffer overflow on uv__getnameinfo_work() (lilohuang) + +* windows: add uv_backend_timeout (Jameson Nash) + +* test: disable tcp_close_accept on Windows (Saúl Ibarra Corretgé) + +* windows: read the PATH env var of the child (Alex Crichton) + +* include: avoid using C++ 'template' reserved word (Iñaki Baz Castillo) + +* include: fix version number (Saúl Ibarra Corretgé) + + +2014.07.32, Version 0.11.27 (Unstable), ffe24f955032d060968ea0289af365006afed55e + +Changes since version 0.11.26: + +* unix, windows: use the same threadpool implementation (Saúl Ibarra Corretgé) + +* unix: use struct sockaddr_storage for target UDP addr (Saúl Ibarra Corretgé) + +* doc: add documentation to uv_udp_start_recv (Andrius Bentkus) + +* common: use common uv__count_bufs code (Andrius Bentkus) + +* unix, win: add send_queue_size and send_queue_count to uv_udp_t (Andrius + Bentkus) + +* unix, win: add uv_udp_try_send (Andrius Bentkus) + +* unix: return UV_EAGAIN if uv_try_write cannot write any data (Saúl Ibarra + Corretgé) + +* windows: fix compatibility with cygwin pipes (Jameson Nash) + +* windows: count queued bytes even if request completed immediately (Saúl + Ibarra Corretgé) + +* windows: disable CRT debug handler on MinGW32 (Saúl Ibarra Corretgé) + +* windows: map ERROR_INVALID_DRIVE to UV_ENOENT (Saúl Ibarra Corretgé) + +* unix: try to write immediately in uv_udp_send (Saúl Ibarra Corretgé) + +* unix: remove incorrect assert (Saúl Ibarra Corretgé) + +* openbsd: avoid requiring privileges for uv_resident_set_memory (Aaron Bieber) + +* unix: guarantee write queue cb execution order in streams (Andrius Bentkus) + +* img: add logo files (Saúl Ibarra Corretgé) + +* aix: improve AIX compatibility (Andrew Low) + +* windows: return bind error immediately when implicitly binding (Saúl Ibarra + Corretgé) + +* windows: don't use atexit for cleaning up the threadpool (Saúl Ibarra + Corretgé) + +* windows: destroy work queue elements when colsing a loop (Saúl Ibarra + Corretgé) + +* unix, windows: add uv_fs_mkdtemp (Pavel Platto) + +* build: handle platforms without multiprocessing.synchronize (Saúl Ibarra + Corretgé) + +* windows: change GENERIC_ALL to GENERIC_WRITE in fs__create_junction (Tony + Kelman) + +* windows: relay TCP bind errors via ipc (Alexis Campailla) + + +2014.07.32, Version 0.10.28 (Stable), 9c14b616f5fb84bfd7d45707bab4bbb85894443e + +Changes since version 0.10.27: + +* windows: fix handling closed socket while poll handle is closing (Saúl Ibarra + Corretgé) + +* unix: return system error on EAI_SYSTEM (Saúl Ibarra Corretgé) + +* unix: fix bogus structure field name (Saúl Ibarra Corretgé) + +* darwin: invoke `mach_timebase_info` only once (Fedor Indutny) + + +2014.06.28, Version 0.11.26 (Unstable), 115281a1058c4034d5c5ccedacb667fe3f6327ea + +Changes since version 0.11.25: + +* windows: add VT100 codes ?25l and ?25h (JD Ballard) + +* windows: add invert ANSI (7 / 27) emulation (JD Ballard) + +* unix: fix handling error on UDP socket creation (Saúl Ibarra Corretgé) + +* unix, windows: getnameinfo implementation (Rasmus Pedersen) + +* heap: fix `heap_remove()` (Fedor Indutny) + +* unix, windows: fix parsing scoped IPv6 addresses (Saúl Ibarra Corretgé) + +* windows: fix handling closed socket while poll handle is closing (Saúl Ibarra + Corretgé) + +* thread: barrier functions (Ben Noordhuis) + +* windows: fix PYTHON environment variable usage (Jay Satiro) + +* unix, windows: return system error on EAI_SYSTEM (Saúl Ibarra Corretgé) + +* windows: fix handling closed socket while poll handle is closing (Saúl Ibarra + Corretgé) + +* unix: don't run i/o callbacks after prepare callbacks (Saúl Ibarra Corretgé) + +* windows: add tty unicode support for input (Peter Atashian) + +* header: introduce `uv_loop_size()` (Andrius Bentkus) + +* darwin: invoke `mach_timebase_info` only once (Fedor Indutny) + + +2014.05.02, Version 0.11.25 (Unstable), 2acd544cff7142e06aa3b09ec64b4a33dd9ab996 Changes since version 0.11.24: @@ -33,8 +434,6 @@ * inet: allow scopeid in uv_inet_pton (Fedor Indutny) -* win: always leave crit section in get_proc_title (Fedor Indutny) - 2014.04.07, Version 0.11.23 (Unstable), e54de537efcacd593f36fcaaf8b4cb9e64313275 @@ -77,25 +476,6 @@ * unix: fix setting written size on uv_wd (Saúl Ibarra Corretgé) -2014.04.07, Version 0.10.26 (Stable), d864907611c25ec986c5e77d4d6d6dee88f26926 - -Changes since version 0.10.25: - -* process: don't close stdio fds during spawn (Tonis Tiigi) - -* build, windows: do not fail on Windows SDK Prompt (Marc Schlaich) - -* build, windows: fix x64 configuration issue (Marc Schlaich) - -* win: fix buffer leak on error in pipe.c (Fedor Indutny) - -* kqueue: invalidate fd in uv_fs_event_t (Fedor Indutny) - -* linux: always deregister closing fds from epoll (Geoffry Song) - -* error: add ENXIO for O_NONBLOCK FIFO open() (Fedor Indutny) - - 2014.03.11, Version 0.11.22 (Unstable), cd0c19b1d3c56acf0ade7687006e12e75fbda36d Changes since version 0.11.21: @@ -225,6 +605,34 @@ * linux: fix C99/C++ comment (Fedor Indutny) +2014.05.02, Version 0.10.27 (Stable), 6e24ce23b1e7576059f85a608eca13b766458a01 + +Changes since version 0.10.26: + +* windows: fix console signal handler refcount (Saúl Ibarra Corretgé) + +* win: always leave crit section in get_proc_title (Fedor Indutny) + + +2014.04.07, Version 0.10.26 (Stable), d864907611c25ec986c5e77d4d6d6dee88f26926 + +Changes since version 0.10.25: + +* process: don't close stdio fds during spawn (Tonis Tiigi) + +* build, windows: do not fail on Windows SDK Prompt (Marc Schlaich) + +* build, windows: fix x64 configuration issue (Marc Schlaich) + +* win: fix buffer leak on error in pipe.c (Fedor Indutny) + +* kqueue: invalidate fd in uv_fs_event_t (Fedor Indutny) + +* linux: always deregister closing fds from epoll (Geoffry Song) + +* error: add ENXIO for O_NONBLOCK FIFO open() (Fedor Indutny) + + 2014.02.19, Version 0.10.25 (Stable), d778dc588507588b12b9f9d2905078db542ed751 Changes since version 0.10.24: diff -Nru nodejs-0.11.13/deps/uv/common.gypi nodejs-0.11.15/deps/uv/common.gypi --- nodejs-0.11.13/deps/uv/common.gypi 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/common.gypi 2015-01-20 21:22:17.000000000 +0000 @@ -143,6 +143,10 @@ 'cflags': [ '-m32' ], 'ldflags': [ '-m32' ], }], + [ 'target_arch=="x32"', { + 'cflags': [ '-mx32' ], + 'ldflags': [ '-mx32' ], + }], [ 'OS=="linux"', { 'cflags': [ '-ansi' ], }], diff -Nru nodejs-0.11.13/deps/uv/configure.ac nodejs-0.11.15/deps/uv/configure.ac --- nodejs-0.11.13/deps/uv/configure.ac 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/configure.ac 2015-01-20 21:22:17.000000000 +0000 @@ -13,16 +13,18 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. AC_PREREQ(2.57) -AC_INIT([libuv], [0.11.25], [https://github.com/joyent/libuv/issues]) +AC_INIT([libuv], [1.0.2], [https://github.com/libuv/libuv/issues]) AC_CONFIG_MACRO_DIR([m4]) m4_include([m4/libuv-extra-automake-flags.m4]) m4_include([m4/as_case.m4]) +m4_include([m4/libuv-check-flags.m4]) AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects] UV_EXTRA_AUTOMAKE_FLAGS) AC_CANONICAL_HOST AC_ENABLE_SHARED AC_ENABLE_STATIC AC_PROG_CC AM_PROG_CC_C_O +CC_CHECK_CFLAGS_APPEND([-Wno-dollar-in-identifier-extension]) # AM_PROG_AR is not available in automake v0.11 but it's essential in v0.12. m4_ifdef([AM_PROG_AR], [AM_PROG_AR]) m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) @@ -38,16 +40,16 @@ AC_CHECK_LIB([sendfile], [sendfile]) AC_CHECK_LIB([socket], [socket]) AC_SYS_LARGEFILE -AM_CONDITIONAL([AIX], [AS_CASE([$host_os],[aix*], [true], [false])]) -AM_CONDITIONAL([ANDROID],[AS_CASE([$host_os],[linux-android*],[true], [false])]) -AM_CONDITIONAL([DARWIN], [AS_CASE([$host_os],[darwin*], [true], [false])]) -AM_CONDITIONAL([FREEBSD],[AS_CASE([$host_os],[freebsd*], [true], [false])]) -AM_CONDITIONAL([LINUX], [AS_CASE([$host_os],[linux*], [true], [false])]) -AM_CONDITIONAL([NETBSD], [AS_CASE([$host_os],[netbsd*], [true], [false])]) -AM_CONDITIONAL([OPENBSD],[AS_CASE([$host_os],[openbsd*], [true], [false])]) -AM_CONDITIONAL([SUNOS], [AS_CASE([$host_os],[solaris*], [true], [false])]) -AM_CONDITIONAL([WINNT], [AS_CASE([$host_os],[mingw*], [true], [false])]) -PANDORA_ENABLE_DTRACE +AM_CONDITIONAL([AIX], [AS_CASE([$host_os],[aix*], [true], [false])]) +AM_CONDITIONAL([ANDROID], [AS_CASE([$host_os],[linux-android*],[true], [false])]) +AM_CONDITIONAL([DARWIN], [AS_CASE([$host_os],[darwin*], [true], [false])]) +AM_CONDITIONAL([DRAGONFLY],[AS_CASE([$host_os],[dragonfly*], [true], [false])]) +AM_CONDITIONAL([FREEBSD], [AS_CASE([$host_os],[freebsd*], [true], [false])]) +AM_CONDITIONAL([LINUX], [AS_CASE([$host_os],[linux*], [true], [false])]) +AM_CONDITIONAL([NETBSD], [AS_CASE([$host_os],[netbsd*], [true], [false])]) +AM_CONDITIONAL([OPENBSD], [AS_CASE([$host_os],[openbsd*], [true], [false])]) +AM_CONDITIONAL([SUNOS], [AS_CASE([$host_os],[solaris*], [true], [false])]) +AM_CONDITIONAL([WINNT], [AS_CASE([$host_os],[mingw*], [true], [false])]) AC_CHECK_PROG(PKG_CONFIG, pkg-config, yes) AM_CONDITIONAL([HAVE_PKG_CONFIG], [test "x$PKG_CONFIG" != "x"]) AS_IF([test "x$PKG_CONFIG" != "x"], [ diff -Nru nodejs-0.11.13/deps/uv/CONTRIBUTING.md nodejs-0.11.15/deps/uv/CONTRIBUTING.md --- nodejs-0.11.13/deps/uv/CONTRIBUTING.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/CONTRIBUTING.md 2015-01-20 21:22:17.000000000 +0000 @@ -6,13 +6,13 @@ ### FORK -Fork the project [on GitHub](https://github.com/joyent/libuv) and check out +Fork the project [on GitHub](https://github.com/libuv/libuv) and check out your copy. ``` $ git clone https://github.com/username/libuv.git $ cd libuv -$ git remote add upstream https://github.com/joyent/libuv.git +$ git remote add upstream https://github.com/libuv/libuv.git ``` Now decide if you want your feature or bug fix to go into the master branch @@ -37,10 +37,10 @@ and start hacking: ``` -$ git checkout -b my-feature-branch -t origin/v0.10 +$ git checkout -b my-feature-branch -t origin/v1.x ``` -(Where v0.10 is the latest stable branch as of this writing.) +(Where v1.x is the latest stable branch as of this writing.) ### CODE @@ -131,7 +131,7 @@ ``` $ git fetch upstream -$ git rebase upstream/v0.10 # or upstream/master +$ git rebase upstream/v1.x # or upstream/master ``` @@ -142,11 +142,8 @@ Look at other tests to see how they should be structured (license boilerplate, the way entry points are declared, etc.). -``` -$ make test -``` - -Make sure that there are no test regressions. +Check README.md file to find out how to run the test suite and make sure that +there are no test regressions. ### PUSH @@ -163,15 +160,7 @@ not send out notifications when you add commits. -### CONTRIBUTOR LICENSE AGREEMENT - -The current state of affairs is that, in order to get a patch accepted, you need -to sign Node.js's [contributor license agreement][]. You only need to do that -once. - - -[issue tracker]: https://github.com/joyent/libuv/issues +[issue tracker]: https://github.com/libuv/libuv/issues [libuv mailing list]: http://groups.google.com/group/libuv [IRC]: http://webchat.freelibuv.net/?channels=libuv [Google C/C++ style guide]: http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml -[contributor license agreement]: http://nodejs.org/cla.html diff -Nru nodejs-0.11.13/deps/uv/docs/make.bat nodejs-0.11.15/deps/uv/docs/make.bat --- nodejs-0.11.13/deps/uv/docs/make.bat 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/make.bat 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,243 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=build +set SRCDIR=src +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% %SRCDIR% +set I18NSPHINXOPTS=%SPHINXOPTS% %SRCDIR% +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^<target^>` where ^<target^> is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. xml to make Docutils-native XML files + echo. pseudoxml to make pseudoxml-XML files for display purposes + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + + +%SPHINXBUILD% 2> nul +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\libuv.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\libuv.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdf" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf + cd %BUILDDIR%/.. + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdfja" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf-ja + cd %BUILDDIR%/.. + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +if "%1" == "xml" ( + %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The XML files are in %BUILDDIR%/xml. + goto end +) + +if "%1" == "pseudoxml" ( + %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. + goto end +) + +:end diff -Nru nodejs-0.11.13/deps/uv/docs/src/async.rst nodejs-0.11.15/deps/uv/docs/src/async.rst --- nodejs-0.11.13/deps/uv/docs/src/async.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/async.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,56 @@ + +.. _async: + +:c:type:`uv_async_t` --- Async handle +===================================== + +Async handles allow the user to "wakeup" the event loop and get a callback +called from another thread. + + +Data types +---------- + +.. c:type:: uv_async_t + + Async handle type. + +.. c:type:: void (*uv_async_cb)(uv_async_t* handle) + + Type definition for callback passed to :c:func:`uv_async_init`. + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_async_init(uv_loop_t* loop, uv_async_t* async, uv_async_cb async_cb) + + Initialize the handle. A NULL callback is allowed. + + .. note:: + Unlike other handle initialization functions, it immediately starts the handle. + +.. c:function:: int uv_async_send(uv_async_t* async) + + Wakeup the event loop and call the async handle's callback. + + .. note:: + It's safe to call this function from any thread. The callback will be called on the + loop thread. + + .. warning:: + libuv will coalesce calls to :c:func:`uv_async_send`, that is, not every call to it will + yield an execution of the callback, the only guarantee is that it will be called at least + once. Thus, calling this function may not wakeup the event loop if it was already called + previously within a short period of time. + +.. seealso:: + The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/check.rst nodejs-0.11.15/deps/uv/docs/src/check.rst --- nodejs-0.11.13/deps/uv/docs/src/check.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/check.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,46 @@ + +.. _check: + +:c:type:`uv_check_t` --- Check handle +===================================== + +Check handles will run the given callback once per loop iteration, right +after polling for i/o. + + +Data types +---------- + +.. c:type:: uv_check_t + + Check handle type. + +.. c:type:: void (*uv_check_cb)(uv_check_t* handle) + + Type definition for callback passed to :c:func:`uv_check_start`. + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_check_init(uv_loop_t*, uv_check_t* check) + + Initialize the handle. + +.. c:function:: int uv_check_start(uv_check_t* check, uv_check_cb cb) + + Start the handle with the given callback. + +.. c:function:: int uv_check_stop(uv_check_t* check) + + Stop the handle, the callback will no longer be called. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/conf.py nodejs-0.11.15/deps/uv/docs/src/conf.py --- nodejs-0.11.13/deps/uv/docs/src/conf.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/conf.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,348 @@ +# -*- coding: utf-8 -*- +# +# libuv API documentation documentation build configuration file, created by +# sphinx-quickstart on Sun Jul 27 11:47:51 2014. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +import re +import sys + + +def get_libuv_version(): + with open('../../include/uv-version.h') as f: + data = f.read() + try: + m = re.search(r"""^#define UV_VERSION_MAJOR (\d)$""", data, re.MULTILINE) + major = int(m.group(1)) + m = re.search(r"""^#define UV_VERSION_MINOR (\d)$""", data, re.MULTILINE) + minor = int(m.group(1)) + m = re.search(r"""^#define UV_VERSION_PATCH (\d)$""", data, re.MULTILINE) + patch = int(m.group(1)) + m = re.search(r"""^#define UV_VERSION_IS_RELEASE (\d)$""", data, re.MULTILINE) + is_release = int(m.group(1)) + m = re.search(r"""^#define UV_VERSION_SUFFIX \"(\w*)\"$""", data, re.MULTILINE) + suffix = m.group(1) + return '%d.%d.%d%s' % (major, minor, patch, '-%s' % suffix if not is_release else '') + except Exception: + return 'unknown' + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'libuv API documentation' +copyright = u'libuv contributors' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = get_libuv_version() +# The full version, including alpha/beta/rc tags. +release = version + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'nature' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# "<project> v<release> documentation". +html_title = 'libuv API documentation' + +# A shorter title for the navigation bar. Default is the same as html_title. +html_short_title = 'libuv %s API documentation' % version + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +html_logo = 'static/logo.png' + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +html_favicon = 'static/favicon.ico' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a <link> tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'libuv' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'libuv.tex', u'libuv API documentation', + u'libuv contributors', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'libuv', u'libuv API documentation', + [u'libuv contributors'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'libuv', u'libuv API documentation', + u'libuv contributors', 'libuv', 'Cross-platform asynchronous I/O', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + + +# -- Options for Epub output ---------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = u'libuv API documentation' +epub_author = u'libuv contributors' +epub_publisher = u'libuv contributors' +epub_copyright = u'2014, libuv contributors' + +# The basename for the epub file. It defaults to the project name. +epub_basename = u'libuv' + +# The HTML theme for the epub output. Since the default themes are not optimized +# for small screen space, using the same theme for HTML and epub output is +# usually not wise. This defaults to 'epub', a theme designed to save visual +# space. +#epub_theme = 'epub' + +# The language of the text. It defaults to the language option +# or en if the language is not set. +#epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +#epub_scheme = '' + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +#epub_identifier = '' + +# A unique identification for the text. +#epub_uid = '' + +# A tuple containing the cover image and cover page html template filenames. +#epub_cover = () + +# A sequence of (type, uri, title) tuples for the guide element of content.opf. +#epub_guide = () + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_pre_files = [] + +# HTML files shat should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_post_files = [] + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# The depth of the table of contents in toc.ncx. +#epub_tocdepth = 3 + +# Allow duplicate toc entries. +#epub_tocdup = True + +# Choose between 'default' and 'includehidden'. +#epub_tocscope = 'default' + +# Fix unsupported image types using the PIL. +#epub_fix_images = False + +# Scale large images. +#epub_max_image_width = 0 + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#epub_show_urls = 'inline' + +# If false, no index is generated. +#epub_use_index = True diff -Nru nodejs-0.11.13/deps/uv/docs/src/design.rst nodejs-0.11.15/deps/uv/docs/src/design.rst --- nodejs-0.11.13/deps/uv/docs/src/design.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/design.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,137 @@ + +.. _design: + +Design overview +=============== + +libuv is cross-platform support library which was originally written for NodeJS. It's designed +around the event-driven asynchronous I/O model. + +The library provides much more than simply abstraction over different I/O polling mechanisms: +'handles' and 'streams' provide a high level abstraction for sockets and other entities; +cross-platform file I/O and threading functionality is also provided, amongst other things. + +Here is a diagram illustrating the different parts that compose libuv and what subsystem they +relate to: + +.. image:: static/architecture.png + :scale: 75% + :align: center + + +Handles and requests +^^^^^^^^^^^^^^^^^^^^ + +libuv provides users with 2 abstractions to work with, in combination with the event loop: +handles and requests. + +Handles represent long-lived objects capable of performing certain operations while active. Some +examples: a prepare handle gets its callback called once every loop iteration when active, and +a TCP server handle get its connection callback called every time there is a new connection. + +Requests represent (typically) short-lived operations. These operations can be performed over a +handle: write requests are used to write data on a handle; or standalone: getaddrinfo requests +don't need a handle they run directly on the loop. + + +The I/O loop +^^^^^^^^^^^^ + +The I/O (or event) loop is the central part of libuv. It establishes the content for all I/O +operations, and it's meant to be tied to a single thread. One can run multiple event loops +as long as each runs in a different thread. The libuv event loop (or any other API involving +the loop or handles, for that matter) **is not thread-safe** except stated otherwise. + +The event loop follows the rather usual single threaded asynchronous I/O approach: all (network) +I/O is performed on non-blocking sockets which are polled using the best mechanism available +on the given platform: epoll on Linux, kqueue on OSX and other BSDs, event ports on SunOS and IOCP +on Windows. As part of a loop iteration the loop will block waiting for I/O activity on sockets +which have been added to the poller and callbacks will be fired indicating socket conditions +(readable, writable hangup) so handles can read, write or perform the desired I/O operation. + +In order to better understand how the event loop operates, the following diagram illustrates all +stages of a loop iteration: + +.. image:: static/loop_iteration.png + :scale: 75% + :align: center + + +#. The loop concept of 'now' is updated. The event loop caches the current time at the start of + the event loop tick in order to reduce the number of time-related system calls. + +#. If the loop is *alive* an iteration is started, otherwise the loop will exit immediately. So, + when is a loop considered to be *alive*? If a loop has active and ref'd handles, active + requests or closing handles it's considered to be *alive*. + +#. Due timers are run. All active timers scheduled for a time before the loop's concept of *now* + get their callbacks called. + +#. Pending callbacks are called. All I/O callbacks are called right after polling for I/O, for the + most part. There are cases, however, in which calling such a callback is deferred for the next + loop iteration. If the previous iteration deferred any I/O callback it will be run at this point. + +#. Idle handle callbacks are called. Despite the unfortunate name, idle handles are run on every + loop iteration, if they are active. + +#. Prepare handle callbacks are called. Prepare handles get their callbacks called right before + the loop will block for I/O. + +#. Poll timeout is calculated. Before blocking for I/O the loop calculates for how long it should + block. These are the rules when calculating the timeout: + + * If the loop was run with the ``UV_RUN_NOWAIT`` flag, the timeout is 0. + * If the loop is going to be stopped (:c:func:`uv_stop` was called), the timeout is 0. + * If there are no active handles or requests, the timeout is 0. + * If there are any idle handles active, the timeout is 0. + * If there are any handles pending to be closed, the timeout is 0. + * If none of the above cases was matched, the timeout of the closest timer is taken, or + if there are no active timers, infinity. + +#. The loop blocks for I/O. At this point the loop will block for I/O for the timeout calculated + on the previous step. All I/O related handles that were monitoring a given file descriptor + for a read or write operation get their callbacks called at this point. + +#. Check handle callbacks are called. Check handles get their callbacks called right after the + loop has blocked for I/O. Check handles are essentially the counterpart of prepare handles. + +#. Close callbacks are called. If a handle was closed by calling :c:func:`uv_close` it will + get the close callback called. + +#. Special case in case the loop was run with ``UV_RUN_ONCE``, as it implies forward progress. + It's possible that no I/O callbacks were fired after blocking for I/O, but some time has passed + so there might be timers which are due, those timers get their callbacks called. + +#. Iteration ends. If the loop was run with ``UV_RUN_NOWAIT`` or ``UV_RUN_ONCE`` modes the + iteration is ended and :c:func:`uv_run` will return. If the loop was run with ``UV_RUN_DEFAULT`` + it will continue from the start if it's still *alive*, otherwise it will also end. + + +.. important:: + libuv uses a thread pool to make asynchronous file I/O operations possible, but + network I/O is **always** performed in a single thread, each loop's thread. + +.. note:: + While the polling mechanism is different, libuv makes the execution model consistent + Unix systems and Windows. + + +File I/O +^^^^^^^^ + +Unlike network I/O, there are no platform-specific file I/O primitives libuv could rely on, +so the current approach is to run blocking file I/O operations in a thread pool. + +For a thorough explanation of the cross-platform file I/O landscape, checkout +`this post <http://blog.libtorrent.org/2012/10/asynchronous-disk-io/>`_. + +libuv currently uses a global thread pool on which all loops can queue work on. 3 types of +operations are currently run on this pool: + + * Filesystem operations + * DNS functions (getaddrinfo and getnameinfo) + * User specified code via :c:func:`uv_queue_work` + +.. warning:: + See the :c:ref:`threadpool` section for more details, but keep in mind the thread pool size + is quite limited. diff -Nru nodejs-0.11.13/deps/uv/docs/src/dll.rst nodejs-0.11.15/deps/uv/docs/src/dll.rst --- nodejs-0.11.13/deps/uv/docs/src/dll.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/dll.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ + +.. _dll: + +Shared library handling +======================= + +libuv provides cross platform utilities for loading shared libraries and +retrieving symbols from them, using the following API. + + +Data types +---------- + +.. c:type:: uv_lib_t + + Shared library data type. + + +Public members +^^^^^^^^^^^^^^ + +N/A + + +API +--- + +.. c:function:: int uv_dlopen(const char* filename, uv_lib_t* lib) + + Opens a shared library. The filename is in utf-8. Returns 0 on success and + -1 on error. Call :c:func:`uv_dlerror` to get the error message. + +.. c:function:: void uv_dlclose(uv_lib_t* lib) + + Close the shared library. + +.. c:function:: uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) + + Retrieves a data pointer from a dynamic library. It is legal for a symbol + to map to NULL. Returns 0 on success and -1 if the symbol was not found. + +.. c:function:: const char* uv_dlerror(const uv_lib_t* lib) + + Returns the last uv_dlopen() or uv_dlsym() error message. diff -Nru nodejs-0.11.13/deps/uv/docs/src/dns.rst nodejs-0.11.15/deps/uv/docs/src/dns.rst --- nodejs-0.11.13/deps/uv/docs/src/dns.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/dns.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,83 @@ + +.. _dns: + +DNS utility functions +===================== + +libuv provides asynchronous variants of `getaddrinfo` and `getnameinfo`. + + +Data types +---------- + +.. c:type:: uv_getaddrinfo_t + + `getaddrinfo` request type. + +.. c:type:: void (*uv_getaddrinfo_cb)(uv_getaddrinfo_t* req, int status, struct addrinfo* res) + + Callback which will be called with the getaddrinfo request result once + complete. In case it was cancelled, `status` will have a value of + ``UV_ECANCELED``. + +.. c:type:: uv_getnameinfo_t + + `getnameinfo` request type. + +.. c:type:: void (*uv_getnameinfo_cb)(uv_getnameinfo_t* req, int status, const char* hostname, const char* service) + + Callback which will be called with the getnameinfo request result once + complete. In case it was cancelled, `status` will have a value of + ``UV_ECANCELED``. + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: uv_loop_t* uv_getaddrinfo_t.loop + + Loop that started this getaddrinfo request and where completion will be + reported. Readonly. + +.. c:member:: uv_loop_t* uv_getnameinfo_t.loop + + Loop that started this getnameinfo request and where completion will be + reported. Readonly. + +.. seealso:: The :c:type:`uv_req_t` members also apply. + + +API +--- + +.. c:function:: int uv_getaddrinfo(uv_loop_t* loop, uv_getaddrinfo_t* req, uv_getaddrinfo_cb getaddrinfo_cb, const char* node, const char* service, const struct addrinfo* hints) + + Asynchronous ``getaddrinfo(3)``. + + Either node or service may be NULL but not both. + + `hints` is a pointer to a struct addrinfo with additional address type + constraints, or NULL. Consult `man -s 3 getaddrinfo` for more details. + + Returns 0 on success or an error code < 0 on failure. If successful, the + callback will get called sometime in the future with the lookup result, + which is either: + + * status == 0, the res argument points to a valid `struct addrinfo`, or + * status < 0, the res argument is NULL. See the UV_EAI_* constants. + + Call :c:func:`uv_freeaddrinfo` to free the addrinfo structure. + +.. c:function:: void uv_freeaddrinfo(struct addrinfo* ai) + + Free the struct addrinfo. Passing NULL is allowed and is a no-op. + +.. c:function:: int uv_getnameinfo(uv_loop_t* loop, uv_getnameinfo_t* req, uv_getnameinfo_cb getnameinfo_cb, const struct sockaddr* addr, int flags) + + Asynchronous ``getnameinfo(3)``. + + Returns 0 on success or an error code < 0 on failure. If successful, the + callback will get called sometime in the future with the lookup result. + Consult `man -s 3 getnameinfo` for more details. + +.. seealso:: The :c:type:`uv_req_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/errors.rst nodejs-0.11.15/deps/uv/docs/src/errors.rst --- nodejs-0.11.13/deps/uv/docs/src/errors.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/errors.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,329 @@ + +.. _errors: + +Error handling +============== + +In libuv errors are negative numbered constants. As a rule of thumb, whenever +there is a status parameter, or an API functions returns an integer, a negative +number will imply an error. + +.. note:: + Implementation detail: on Unix error codes are the negated `errno` (or `-errno`), while on + Windows they are defined by libuv to arbitrary negative numbers. + + +Error constants +--------------- + +.. c:macro:: UV_E2BIG + + argument list too long + +.. c:macro:: UV_EACCES + + permission denied + +.. c:macro:: UV_EADDRINUSE + + address already in use + +.. c:macro:: UV_EADDRNOTAVAIL + + address not available + +.. c:macro:: UV_EAFNOSUPPORT + + address family not supported + +.. c:macro:: UV_EAGAIN + + resource temporarily unavailable + +.. c:macro:: UV_EAI_ADDRFAMILY + + address family not supported + +.. c:macro:: UV_EAI_AGAIN + + temporary failure + +.. c:macro:: UV_EAI_BADFLAGS + + bad ai_flags value + +.. c:macro:: UV_EAI_BADHINTS + + invalid value for hints + +.. c:macro:: UV_EAI_CANCELED + + request canceled + +.. c:macro:: UV_EAI_FAIL + + permanent failure + +.. c:macro:: UV_EAI_FAMILY + + ai_family not supported + +.. c:macro:: UV_EAI_MEMORY + + out of memory + +.. c:macro:: UV_EAI_NODATA + + no address + +.. c:macro:: UV_EAI_NONAME + + unknown node or service + +.. c:macro:: UV_EAI_OVERFLOW + + argument buffer overflow + +.. c:macro:: UV_EAI_PROTOCOL + + resolved protocol is unknown + +.. c:macro:: UV_EAI_SERVICE + + service not available for socket type + +.. c:macro:: UV_EAI_SOCKTYPE + + socket type not supported + +.. c:macro:: UV_EALREADY + + connection already in progress + +.. c:macro:: UV_EBADF + + bad file descriptor + +.. c:macro:: UV_EBUSY + + resource busy or locked + +.. c:macro:: UV_ECANCELED + + operation canceled + +.. c:macro:: UV_ECHARSET + + invalid Unicode character + +.. c:macro:: UV_ECONNABORTED + + software caused connection abort + +.. c:macro:: UV_ECONNREFUSED + + connection refused + +.. c:macro:: UV_ECONNRESET + + connection reset by peer + +.. c:macro:: UV_EDESTADDRREQ + + destination address required + +.. c:macro:: UV_EEXIST + + file already exists + +.. c:macro:: UV_EFAULT + + bad address in system call argument + +.. c:macro:: UV_EFBIG + + file too large + +.. c:macro:: UV_EHOSTUNREACH + + host is unreachable + +.. c:macro:: UV_EINTR + + interrupted system call + +.. c:macro:: UV_EINVAL + + invalid argument + +.. c:macro:: UV_EIO + + i/o error + +.. c:macro:: UV_EISCONN + + socket is already connected + +.. c:macro:: UV_EISDIR + + illegal operation on a directory + +.. c:macro:: UV_ELOOP + + too many symbolic links encountered + +.. c:macro:: UV_EMFILE + + too many open files + +.. c:macro:: UV_EMSGSIZE + + message too long + +.. c:macro:: UV_ENAMETOOLONG + + name too long + +.. c:macro:: UV_ENETDOWN + + network is down + +.. c:macro:: UV_ENETUNREACH + + network is unreachable + +.. c:macro:: UV_ENFILE + + file table overflow + +.. c:macro:: UV_ENOBUFS + + no buffer space available + +.. c:macro:: UV_ENODEV + + no such device + +.. c:macro:: UV_ENOENT + + no such file or directory + +.. c:macro:: UV_ENOMEM + + not enough memory + +.. c:macro:: UV_ENONET + + machine is not on the network + +.. c:macro:: UV_ENOPROTOOPT + + protocol not available + +.. c:macro:: UV_ENOSPC + + no space left on device + +.. c:macro:: UV_ENOSYS + + function not implemented + +.. c:macro:: UV_ENOTCONN + + socket is not connected + +.. c:macro:: UV_ENOTDIR + + not a directory + +.. c:macro:: UV_ENOTEMPTY + + directory not empty + +.. c:macro:: UV_ENOTSOCK + + socket operation on non-socket + +.. c:macro:: UV_ENOTSUP + + operation not supported on socket + +.. c:macro:: UV_EPERM + + operation not permitted + +.. c:macro:: UV_EPIPE + + broken pipe + +.. c:macro:: UV_EPROTO + + protocol error + +.. c:macro:: UV_EPROTONOSUPPORT + + protocol not supported + +.. c:macro:: UV_EPROTOTYPE + + protocol wrong type for socket + +.. c:macro:: UV_ERANGE + + result too large + +.. c:macro:: UV_EROFS + + read-only file system + +.. c:macro:: UV_ESHUTDOWN + + cannot send after transport endpoint shutdown + +.. c:macro:: UV_ESPIPE + + invalid seek + +.. c:macro:: UV_ESRCH + + no such process + +.. c:macro:: UV_ETIMEDOUT + + connection timed out + +.. c:macro:: UV_ETXTBSY + + text file is busy + +.. c:macro:: UV_EXDEV + + cross-device link not permitted + +.. c:macro:: UV_UNKNOWN + + unknown error + +.. c:macro:: UV_EOF + + end of file + +.. c:macro:: UV_ENXIO + + no such device or address + +.. c:macro:: UV_EMLINK + + too many links + + +API +--- + +.. c:function:: const char* uv_strerror(int err) + + Returns the error message for the given error code. + +.. c:function:: const char* uv_err_name(int err) + + Returns the error name for the given error code. diff -Nru nodejs-0.11.13/deps/uv/docs/src/fs_event.rst nodejs-0.11.15/deps/uv/docs/src/fs_event.rst --- nodejs-0.11.13/deps/uv/docs/src/fs_event.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/fs_event.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,102 @@ + +.. _fs_event: + +:c:type:`uv_fs_event_t` --- FS Event handle +=========================================== + +FS Event handles allow the user to monitor a given path for changes, for example, +if the file was renamed or there was a generic change in it. This handle uses +the best backend for the job on each platform. + + +Data types +---------- + +.. c:type:: uv_fs_event_t + + FS Event handle type. + +.. c:type:: void (*uv_fs_event_cb)(uv_fs_event_t* handle, const char* filename, int events, int status) + + Callback passed to :c:func:`uv_fs_event_start` which will be called repeatedly + after the handle is started. If the handle was started with a directory the + `filename` parameter will be a relative path to a file contained in the directory. + The `events` parameter is an ORed mask of :c:type:`uv_fs_event` elements. + +.. c:type:: uv_fs_event + + Event types that :c:type:`uv_fs_event_t` handles monitor. + + :: + + enum uv_fs_event { + UV_RENAME = 1, + UV_CHANGE = 2 + }; + +.. c:type:: uv_fs_event_flags + + Flags that can be passed to :c:func:`uv_fs_event_start` to control its + behavior. + + :: + + enum uv_fs_event_flags { + /* + * By default, if the fs event watcher is given a directory name, we will + * watch for all events in that directory. This flags overrides this behavior + * and makes fs_event report only changes to the directory entry itself. This + * flag does not affect individual files watched. + * This flag is currently not implemented yet on any backend. + */ + UV_FS_EVENT_WATCH_ENTRY = 1, + /* + * By default uv_fs_event will try to use a kernel interface such as inotify + * or kqueue to detect events. This may not work on remote filesystems such + * as NFS mounts. This flag makes fs_event fall back to calling stat() on a + * regular interval. + * This flag is currently not implemented yet on any backend. + */ + UV_FS_EVENT_STAT = 2, + /* + * By default, event watcher, when watching directory, is not registering + * (is ignoring) changes in it's subdirectories. + * This flag will override this behaviour on platforms that support it. + */ + UV_FS_EVENT_RECURSIVE = 4 + }; + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) + + Initialize the handle. + +.. c:function:: int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb, const char* path, unsigned int flags) + + Start the handle with the given callback, which will watch the specified + `path` for changes. `flags` can be an ORed mask of :c:type:`uv_fs_event_flags`. + +.. c:function:: int uv_fs_event_stop(uv_fs_event_t* handle) + + Stop the handle, the callback will no longer be called. + +.. c:function:: int uv_fs_event_getpath(uv_fs_event_t* handle, char* buf, size_t* len) + + Get the path being monitored by the handle. The buffer must be preallocated + by the user. Returns 0 on success or an error code < 0 in case of failure. + On success, `buf` will contain the path and `len` its length. If the buffer + is not big enough UV_ENOBUFS will be returned and len will be set to the + required size. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/fs_poll.rst nodejs-0.11.15/deps/uv/docs/src/fs_poll.rst --- nodejs-0.11.13/deps/uv/docs/src/fs_poll.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/fs_poll.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,69 @@ + +.. _fs_poll: + +:c:type:`uv_fs_poll_t` --- FS Poll handle +========================================= + +FS Poll handles allow the user to monitor a given path for changes. Unlike +:c:type:`uv_fs_event_t`, fs poll handles use `stat` to detect when a file has +changed so they can work on file systems where fs event handles can't. + + +Data types +---------- + +.. c:type:: uv_fs_poll_t + + FS Poll handle type. + +.. c:type:: void (*uv_fs_poll_cb)(uv_fs_poll_t* handle, int status, const uv_stat_t* prev, const uv_stat_t* curr) + + Callback passed to :c:func:`uv_fs_poll_start` which will be called repeatedly + after the handle is started, when any change happens to the monitored path. + + The callback is invoked with `status < 0` if `path` does not exist + or is inaccessible. The watcher is *not* stopped but your callback is + not called again until something changes (e.g. when the file is created + or the error reason changes). + + When `status == 0`, the callback receives pointers to the old and new + :c:type:`uv_stat_t` structs. They are valid for the duration of the + callback only. + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle) + + Initialize the handle. + +.. c:function:: int uv_fs_poll_start(uv_fs_poll_t* handle, uv_fs_poll_cb poll_cb, const char* path, unsigned int interval) + + Check the file at `path` for changes every `interval` milliseconds. + + .. note:: + For maximum portability, use multi-second intervals. Sub-second intervals will not detect + all changes on many file systems. + +.. c:function:: int uv_fs_poll_stop(uv_fs_poll_t* handle) + + Stop the handle, the callback will no longer be called. + +.. c:function:: int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buf, size_t* len) + + Get the path being monitored by the handle. The buffer must be preallocated + by the user. Returns 0 on success or an error code < 0 in case of failure. + On success, `buf` will contain the path and `len` its length. If the buffer + is not big enough UV_ENOBUFS will be returned and len will be set to the + required size. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/fs.rst nodejs-0.11.15/deps/uv/docs/src/fs.rst --- nodejs-0.11.13/deps/uv/docs/src/fs.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/fs.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,278 @@ + +.. _fs: + +Filesystem operations +===================== + +libuv provides a wide variety of cross-platform sync and async filesystem +operations. All functions defined in this document take a callback, which is +allowed to be NULL. If the callback is NULL the request is completed synchronously, +otherwise it will be performed asynchronously. + +All file operations are run on the threadpool, see :ref:`threadpool` for information +on the threadpool size. + + +Data types +---------- + +.. c:type:: uv_fs_t + + Filesystem request type. + +.. c:type:: uv_timespec_t + + Portable equivalent of ``struct timespec``. + + :: + + typedef struct { + long tv_sec; + long tv_nsec; + } uv_timespec_t; + +.. c:type:: uv_stat_t + + Portable equivalent of ``struct stat``. + + :: + + typedef struct { + uint64_t st_dev; + uint64_t st_mode; + uint64_t st_nlink; + uint64_t st_uid; + uint64_t st_gid; + uint64_t st_rdev; + uint64_t st_ino; + uint64_t st_size; + uint64_t st_blksize; + uint64_t st_blocks; + uint64_t st_flags; + uint64_t st_gen; + uv_timespec_t st_atim; + uv_timespec_t st_mtim; + uv_timespec_t st_ctim; + uv_timespec_t st_birthtim; + } uv_stat_t; + +.. c:type:: uv_fs_type + + Filesystem request type. + + :: + + typedef enum { + UV_FS_UNKNOWN = -1, + UV_FS_CUSTOM, + UV_FS_OPEN, + UV_FS_CLOSE, + UV_FS_READ, + UV_FS_WRITE, + UV_FS_SENDFILE, + UV_FS_STAT, + UV_FS_LSTAT, + UV_FS_FSTAT, + UV_FS_FTRUNCATE, + UV_FS_UTIME, + UV_FS_FUTIME, + UV_FS_ACCESS, + UV_FS_CHMOD, + UV_FS_FCHMOD, + UV_FS_FSYNC, + UV_FS_FDATASYNC, + UV_FS_UNLINK, + UV_FS_RMDIR, + UV_FS_MKDIR, + UV_FS_MKDTEMP, + UV_FS_RENAME, + UV_FS_SCANDIR, + UV_FS_LINK, + UV_FS_SYMLINK, + UV_FS_READLINK, + UV_FS_CHOWN, + UV_FS_FCHOWN + } uv_fs_type; + +.. c:type:: uv_dirent_t + + Cross platform (reduced) equivalent of ``struct dirent``. + Used in :c:func:`uv_fs_scandir_next`. + + :: + + typedef enum { + UV_DIRENT_UNKNOWN, + UV_DIRENT_FILE, + UV_DIRENT_DIR, + UV_DIRENT_LINK, + UV_DIRENT_FIFO, + UV_DIRENT_SOCKET, + UV_DIRENT_CHAR, + UV_DIRENT_BLOCK + } uv_dirent_type_t; + + typedef struct uv_dirent_s { + const char* name; + uv_dirent_type_t type; + } uv_dirent_t; + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: uv_loop_t* uv_fs_t.loop + + Loop that started this request and where completion will be reported. + Readonly. + +.. c:member:: uv_fs_type uv_fs_t.fs_type + + FS request type. + +.. c:member:: const char* uv_fs_t.path + + Path affecting the request. + +.. c:member:: ssize_t uv_fs_t.result + + Result of the request. < 0 means error, success otherwise. On requests such + as :c:func:`uv_fs_read` or :c:func:`uv_fs_write` it indicates the amount of + data that was read or written, respectively. + +.. c:member:: uv_stat_t uv_fs_t.statbuf + + Stores the result of :c:func:`uv_fs_stat` and other stat requests. + +.. c:member:: void* uv_fs_t.ptr + + Stores the result of :c:func:`uv_fs_readlink` and serves as an alias to + `statbuf`. + +.. seealso:: The :c:type:`uv_req_t` members also apply. + + +API +--- + +.. c:function:: void uv_fs_req_cleanup(uv_fs_t* req) + + Cleanup request. Must be called after a request is finished to deallocate + any memory libuv might have allocated. + +.. c:function:: int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) + + Equivalent to ``close(2)``. + +.. c:function:: int uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, int mode, uv_fs_cb cb) + + Equivalent to ``open(2)``. + +.. c:function:: int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, uv_file file, const uv_buf_t bufs[], unsigned int nbufs, int64_t offset, uv_fs_cb cb) + + Equivalent to ``preadv(2)``. + +.. c:function:: int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) + + Equivalent to ``unlink(2)``. + +.. c:function:: int uv_fs_write(uv_loop_t* loop, uv_fs_t* req, uv_file file, const uv_buf_t bufs[], unsigned int nbufs, int64_t offset, uv_fs_cb cb) + + Equivalent to ``pwritev(2)``. + +.. c:function:: int uv_fs_mkdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb) + + Equivalent to ``mkdir(2)``. + + .. note:: + `mode` is currently not implemented on Windows. + +.. c:function:: int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, uv_fs_cb cb) + + Equivalent to ``mkdtemp(3)``. + + .. note:: + The result can be found as a null terminated string at `req->path`. + +.. c:function:: int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) + + Equivalent to ``rmdir(2)``. + +.. c:function:: int uv_fs_scandir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, uv_fs_cb cb) +.. c:function:: int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) + + Equivalent to ``scandir(3)``, with a slightly different API. Once the callback + for the request is called, the user can use :c:func:`uv_fs_scandir_next` to + get `ent` populated with the next directory entry data. When there are no + more entries ``UV_EOF`` will be returned. + +.. c:function:: int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) +.. c:function:: int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) +.. c:function:: int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) + + Equivalent to ``(f/l)stat(2)``. + +.. c:function:: int uv_fs_rename(uv_loop_t* loop, uv_fs_t* req, const char* path, const char* new_path, uv_fs_cb cb) + + Equivalent to ``rename(2)``. + +.. c:function:: int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) + + Equivalent to ``fsync(2)``. + +.. c:function:: int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) + + Equivalent to ``fdatasync(2)``. + +.. c:function:: int uv_fs_ftruncate(uv_loop_t* loop, uv_fs_t* req, uv_file file, int64_t offset, uv_fs_cb cb) + + Equivalent to ``ftruncate(2)``. + +.. c:function:: int uv_fs_sendfile(uv_loop_t* loop, uv_fs_t* req, uv_file out_fd, uv_file in_fd, int64_t in_offset, size_t length, uv_fs_cb cb) + + Limited equivalent to ``sendfile(2)``. + +.. c:function:: int uv_fs_access(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb) + + Equivalent to ``access(2)`` on Unix. Windows uses ``GetFileAttributesW()``. + +.. c:function:: int uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb) +.. c:function:: int uv_fs_fchmod(uv_loop_t* loop, uv_fs_t* req, uv_file file, int mode, uv_fs_cb cb) + + Equivalent to ``(f)chmod(2)``. + +.. c:function:: int uv_fs_utime(uv_loop_t* loop, uv_fs_t* req, const char* path, double atime, double mtime, uv_fs_cb cb) +.. c:function:: int uv_fs_futime(uv_loop_t* loop, uv_fs_t* req, uv_file file, double atime, double mtime, uv_fs_cb cb) + + Equivalent to ``(f)utime(s)(2)``. + +.. c:function:: int uv_fs_link(uv_loop_t* loop, uv_fs_t* req, const char* path, const char* new_path, uv_fs_cb cb) + + Equivalent to ``link(2)``. + +.. c:function:: int uv_fs_symlink(uv_loop_t* loop, uv_fs_t* req, const char* path, const char* new_path, int flags, uv_fs_cb cb) + + Equivalent to ``symlink(2)``. + + .. note:: + On Windows the `flags` parameter can be specified to control how the symlink will + be created: + + * ``UV_FS_SYMLINK_DIR``: indicates that `path` points to a directory. + + * ``UV_FS_SYMLINK_JUNCTION``: request that the symlink is created + using junction points. + +.. c:function:: int uv_fs_readlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) + + Equivalent to ``readlink(2)``. + +.. c:function:: int uv_fs_chown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid, uv_gid_t gid, uv_fs_cb cb) +.. c:function:: int uv_fs_fchown(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_uid_t uid, uv_gid_t gid, uv_fs_cb cb) + + Equivalent to ``(f)chown(2)``. + + .. note:: + These functions are not implemented on Windows. + +.. seealso:: The :c:type:`uv_req_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/handle.rst nodejs-0.11.15/deps/uv/docs/src/handle.rst --- nodejs-0.11.13/deps/uv/docs/src/handle.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/handle.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,181 @@ + +.. _handle: + +:c:type:`uv_handle_t` --- Base handle +===================================== + +`uv_handle_t` is the base type for all libuv handle types. + +Structures are aligned so that any libuv handle can be cast to `uv_handle_t`. +All API functions defined here work with any handle type. + + +Data types +---------- + +.. c:type:: uv_handle_t + + The base libuv handle type. + +.. c:type:: uv_any_handle + + Union of all handle types. + +.. c:type:: void (*uv_alloc_cb)(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) + + Type definition for callback passed to :c:func:`uv_read_start` and + :c:func:`uv_udp_recv_start`. The user must fill the supplied :c:type:`uv_buf_t` + structure with whatever size, as long as it's > 0. A suggested size (65536 at the moment) + is provided, but it doesn't need to be honored. Setting the buffer's length to 0 + will trigger a ``UV_ENOBUFS`` error in the :c:type:`uv_udp_recv_cb` or + :c:type:`uv_read_cb` callback. + +.. c:type:: void (*uv_close_cb)(uv_handle_t* handle) + + Type definition for callback passed to :c:func:`uv_close`. + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: uv_loop_t* uv_handle_t.loop + + Pointer to the :c:type:`uv_loop_t` where the handle is running on. Readonly. + +.. c:member:: void* uv_handle_t.data + + Space for user-defined arbitrary data. libuv does not use this field. + + +API +--- + +.. c:function:: int uv_is_active(const uv_handle_t* handle) + + Returns non-zero if the handle is active, zero if it's inactive. What + "active" means depends on the type of handle: + + - A uv_async_t handle is always active and cannot be deactivated, except + by closing it with uv_close(). + + - A uv_pipe_t, uv_tcp_t, uv_udp_t, etc. handle - basically any handle that + deals with i/o - is active when it is doing something that involves i/o, + like reading, writing, connecting, accepting new connections, etc. + + - A uv_check_t, uv_idle_t, uv_timer_t, etc. handle is active when it has + been started with a call to uv_check_start(), uv_idle_start(), etc. + + Rule of thumb: if a handle of type `uv_foo_t` has a `uv_foo_start()` + function, then it's active from the moment that function is called. + Likewise, `uv_foo_stop()` deactivates the handle again. + +.. c:function:: int uv_is_closing(const uv_handle_t* handle) + + Returns non-zero if the handle is closing or closed, zero otherwise. + + .. note:: + This function should only be used between the initialization of the handle and the + arrival of the close callback. + +.. c:function:: void uv_close(uv_handle_t* handle, uv_close_cb close_cb) + + Request handle to be closed. `close_cb` will be called asynchronously after + this call. This MUST be called on each handle before memory is released. + + Handles that wrap file descriptors are closed immediately but + `close_cb` will still be deferred to the next iteration of the event loop. + It gives you a chance to free up any resources associated with the handle. + + In-progress requests, like uv_connect_t or uv_write_t, are cancelled and + have their callbacks called asynchronously with status=UV_ECANCELED. + +.. c:function:: void uv_ref(uv_handle_t* handle) + + Reference the given handle. References are idempotent, that is, if a handle + is already referenced calling this function again will have no effect. + + See :ref:`refcount`. + +.. c:function:: void uv_unref(uv_handle_t* handle) + + Un-reference the given handle. References are idempotent, that is, if a handle + is not referenced calling this function again will have no effect. + + See :ref:`refcount`. + +.. c:function:: int uv_has_ref(const uv_handle_t* handle) + + Returns non-zero if the handle referenced, zero otherwise. + + See :ref:`refcount`. + +.. c:function:: size_t uv_handle_size(uv_handle_type type) + + Returns the size of the given handle type. Useful for FFI binding writers + who don't want to know the structure layout. + + +Miscellaneous API functions +--------------------------- + +The following API functions take a :c:type:`uv_handle_t` argument but they work +just for some handle types. + +.. c:function:: int uv_send_buffer_size(uv_handle_t* handle, int* value) + + Gets or sets the size of the send buffer that the operating + system uses for the socket. + + If `*value` == 0, it will return the current send buffer size, + otherwise it will use `*value` to set the new send buffer size. + + This function works for TCP, pipe and UDP handles on Unix and for TCP and + UDP handles on Windows. + + .. note:: + Linux will set double the size and return double the size of the original set value. + +.. c:function:: int uv_recv_buffer_size(uv_handle_t* handle, int* value) + + Gets or sets the size of the receive buffer that the operating + system uses for the socket. + + If `*value` == 0, it will return the current receive buffer size, + otherwise it will use `*value` to set the new receive buffer size. + + This function works for TCP, pipe and UDP handles on Unix and for TCP and + UDP handles on Windows. + + .. note:: + Linux will set double the size and return double the size of the original set value. + +.. c:function:: int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) + + Gets the platform dependent file descriptor equivalent. + + The following handles are supported: TCP, pipes, TTY, UDP and poll. Passing + any other handle type will fail with `UV_EINVAL`. + + If a handle doesn't have an attached file descriptor yet or the handle + itself has been closed, this function will return `UV_EBADF`. + + .. warning:: + Be very careful when using this function. libuv assumes it's in control of the file + descriptor so any change to it may lead to malfunction. + + +.. _refcount: + +Reference counting +------------------ + +The libuv event loop (if run in the default mode) will run until there are no +active `and` referenced handles left. The user can force the loop to exit early +by unreferencing handles which are active, for example by calling :c:func:`uv_unref` +after calling :c:func:`uv_timer_start`. + +A handle can be referenced or unreferenced, the refcounting scheme doesn't use +a counter, so both operations are idempotent. + +All handles are referenced when active by default, see :c:func:`uv_is_active` +for a more detailed explanation on what being `active` involves. diff -Nru nodejs-0.11.13/deps/uv/docs/src/idle.rst nodejs-0.11.15/deps/uv/docs/src/idle.rst --- nodejs-0.11.13/deps/uv/docs/src/idle.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/idle.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ + +.. _idle: + +:c:type:`uv_idle_t` --- Idle handle +=================================== + +Idle handles will run the given callback once per loop iteration, right +before the :c:type:`uv_prepare_t` handles. + +.. note:: + The notable difference with prepare handles is that when there are active idle handles, + the loop will perform a zero timeout poll instead of blocking for i/o. + +.. warning:: + Despite the name, idle handles will get their callbacks called on every loop iteration, + not when the loop is actually "idle". + + +Data types +---------- + +.. c:type:: uv_idle_t + + Idle handle type. + +.. c:type:: void (*uv_idle_cb)(uv_idle_t* handle) + + Type definition for callback passed to :c:func:`uv_idle_start`. + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_idle_init(uv_loop_t*, uv_idle_t* idle) + + Initialize the handle. + +.. c:function:: int uv_idle_start(uv_idle_t* idle, uv_idle_cb cb) + + Start the handle with the given callback. + +.. c:function:: int uv_idle_stop(uv_idle_t* idle) + + Stop the handle, the callback will no longer be called. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/index.rst nodejs-0.11.15/deps/uv/docs/src/index.rst --- nodejs-0.11.13/deps/uv/docs/src/index.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/index.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,94 @@ + +Welcome to the libuv API documentation +====================================== + +Overview +-------- + +libuv is a multi-platform support library with a focus on asynchronous I/O. It +was primarily developed for use by `Node.js`_, but it's also used by `Luvit`_, +`Julia`_, `pyuv`_, and `others`_. + +.. note:: + In case you find errors in this documentation you can help by sending + `pull requests <https://github.com/libuv/libuv>`_! + +.. _Node.js: http://nodejs.org +.. _Luvit: http://luvit.io +.. _Julia: http://julialang.org +.. _pyuv: https://github.com/saghul/pyuv +.. _others: https://github.com/libuv/libuv/wiki/Projects-that-use-libuv + + +Features +-------- + +* Full-featured event loop backed by epoll, kqueue, IOCP, event ports. +* Asynchronous TCP and UDP sockets +* Asynchronous DNS resolution +* Asynchronous file and file system operations +* File system events +* ANSI escape code controlled TTY +* IPC with socket sharing, using Unix domain sockets or named pipes (Windows) +* Child processes +* Thread pool +* Signal handling +* High resolution clock +* Threading and synchronization primitives + + +Downloads +--------- + +libuv can be downloaded from `here <http://dist.libuv.org/dist/>`_. + + +Installation +------------ + +Installation instructions can be found on `the README <https://github.com/libuv/libuv/blob/master/README.md>`_. + + +Upgrading +--------- + +Migration guides for different libuv versions, starting with 1.0. + +.. toctree:: + :maxdepth: 1 + + migration_010_100 + + +Documentation +------------- + +.. toctree:: + :maxdepth: 1 + + design + errors + loop + handle + request + timer + prepare + check + idle + async + poll + signal + process + stream + tcp + pipe + tty + udp + fs_event + fs_poll + fs + threadpool + dns + dll + threading + misc diff -Nru nodejs-0.11.13/deps/uv/docs/src/loop.rst nodejs-0.11.15/deps/uv/docs/src/loop.rst --- nodejs-0.11.13/deps/uv/docs/src/loop.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/loop.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,156 @@ + +.. _loop: + +:c:type:`uv_loop_t` --- Event loop +================================== + +The event loop is the central part of libuv's functionality. It takes care +of polling for i/o and scheduling callbacks to be run based on different sources +of events. + + +Data types +---------- + +.. c:type:: uv_loop_t + + Loop data type. + +.. c:type:: uv_run_mode + + Mode used to run the loop with :c:func:`uv_run`. + + :: + + typedef enum { + UV_RUN_DEFAULT = 0, + UV_RUN_ONCE, + UV_RUN_NOWAIT + } uv_run_mode; + +.. c:type:: void (*uv_walk_cb)(uv_handle_t* handle, void* arg) + + Type definition for callback passed to :c:func:`uv_walk`. + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: void* uv_loop_t.data + + Space for user-defined arbitrary data. libuv does not use this field. libuv does, however, + initialize it to NULL in :c:func:`uv_loop_init`, and it poisons the value (on debug builds) + on :c:func:`uv_loop_close`. + + +API +--- + +.. c:function:: int uv_loop_init(uv_loop_t* loop) + + Initializes the given `uv_loop_t` structure. + +.. c:function:: int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) + + Set additional loop options. You should normally call this before the + first call to :c:func:`uv_run` unless mentioned otherwise. + + Returns 0 on success or a UV_E* error code on failure. Be prepared to + handle UV_ENOSYS; it means the loop option is not supported by the platform. + + Supported options: + + - UV_LOOP_BLOCK_SIGNAL: Block a signal when polling for new events. The + second argument to :c:func:`uv_loop_configure` is the signal number. + + This operation is currently only implemented for SIGPROF signals, + to suppress unnecessary wakeups when using a sampling profiler. + Requesting other signals will fail with UV_EINVAL. + +.. c:function:: int uv_loop_close(uv_loop_t* loop) + + Closes all internal loop resources. This function must only be called once + the loop has finished its execution or it will return UV_EBUSY. After this + function returns the user shall free the memory allocated for the loop. + +.. c:function:: uv_loop_t* uv_default_loop(void) + + Returns the initialized default loop. It may return NULL in case of + allocation failure. + +.. c:function:: int uv_run(uv_loop_t* loop, uv_run_mode mode) + + This function runs the event loop. It will act differently depending on the + specified mode: + + - UV_RUN_DEFAULT: Runs the event loop until there are no more active and + referenced handles or requests. Always returns zero. + - UV_RUN_ONCE: Poll for i/o once. Note that this function blocks if + there are no pending callbacks. Returns zero when done (no active handles + or requests left), or non-zero if more callbacks are expected (meaning + you should run the event loop again sometime in the future). + - UV_RUN_NOWAIT: Poll for i/o once but don't block if there are no + pending callbacks. Returns zero if done (no active handles + or requests left), or non-zero if more callbacks are expected (meaning + you should run the event loop again sometime in the future). + +.. c:function:: int uv_loop_alive(const uv_loop_t* loop) + + Returns non-zero if there are active handles or request in the loop. + +.. c:function:: void uv_stop(uv_loop_t* loop) + + Stop the event loop, causing :c:func:`uv_run` to end as soon as + possible. This will happen not sooner than the next loop iteration. + If this function was called before blocking for i/o, the loop won't block + for i/o on this iteration. + +.. c:function:: size_t uv_loop_size(void) + + Returns the size of the `uv_loop_t` structure. Useful for FFI binding + writers who don't want to know the structure layout. + +.. c:function:: int uv_backend_fd(const uv_loop_t* loop) + + Get backend file descriptor. Only kqueue, epoll and event ports are + supported. + + This can be used in conjunction with `uv_run(loop, UV_RUN_NOWAIT)` to + poll in one thread and run the event loop's callbacks in another see + test/test-embed.c for an example. + + .. note:: + Embedding a kqueue fd in another kqueue pollset doesn't work on all platforms. It's not + an error to add the fd but it never generates events. + +.. c:function:: int uv_backend_timeout(const uv_loop_t* loop) + + Get the poll timeout. The return value is in milliseconds, or -1 for no + timeout. + +.. c:function:: uint64_t uv_now(const uv_loop_t* loop) + + Return the current timestamp in milliseconds. The timestamp is cached at + the start of the event loop tick, see :c:func:`uv_update_time` for details + and rationale. + + The timestamp increases monotonically from some arbitrary point in time. + Don't make assumptions about the starting point, you will only get + disappointed. + + .. note:: + Use :c:func:`uv_hrtime` if you need sub-millisecond granularity. + +.. c:function:: void uv_update_time(uv_loop_t* loop) + + Update the event loop's concept of "now". Libuv caches the current time + at the start of the event loop tick in order to reduce the number of + time-related system calls. + + You won't normally need to call this function unless you have callbacks + that block the event loop for longer periods of time, where "longer" is + somewhat subjective but probably on the order of a millisecond or more. + +.. c:function:: void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) + + Walk the list of handles: `walk_cb` will be executed with the given `arg`. diff -Nru nodejs-0.11.13/deps/uv/docs/src/migration_010_100.rst nodejs-0.11.15/deps/uv/docs/src/migration_010_100.rst --- nodejs-0.11.13/deps/uv/docs/src/migration_010_100.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/migration_010_100.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,244 @@ + +.. _migration_010_100: + +libuv 0.10 -> 1.0.0 migration guide +=================================== + +Some APIs changed quite a bit throughout the 1.0.0 development process. Here +is a migration guide for the most significant changes that happened after 0.10 +was released. + + +Loop initialization and closing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In libuv 0.10 (and previous versions), loops were created with `uv_loop_new`, which +allocated memory for a new loop and initialized it; and destroyed with `uv_loop_delete`, +which destroyed the loop and freed the memory. Starting with 1.0, those are deprecated +and the user is responsible for allocating the memory and then initializing the loop. + +libuv 0.10 + +:: + + uv_loop_t* loop = uv_loop_new(); + ... + uv_loop_delete(loop); + +libuv 1.0 + +:: + + uv_loop_t* loop = malloc(sizeof *loop); + uv_loop_init(loop); + ... + uv_loop_close(loop); + free(loop); + +.. note:: + Error handling was omitted for brevity. Check the documentation for :c:func:`uv_loop_init` + and :c:func:`uv_loop_close`. + + +Error handling +~~~~~~~~~~~~~~ + +Error handling had a major overhaul in libuv 1.0. In general, functions and status parameters +would get 0 for success and -1 for failure on libuv 0.10, and the user had to use `uv_last_error` +to fetch the error code, which was a positive number. + +In 1.0, functions and status parameters contain the actual error code, which is 0 for success, or +a negative number in case of error. + +libuv 0.10 + +:: + + ... assume 'server' is a TCP server which is already listening + r = uv_listen((uv_stream_t*) server, 511, NULL); + if (r == -1) { + uv_err_t err = uv_last_error(uv_default_loop()); + /* err.code contains UV_EADDRINUSE */ + } + +libuv 1.0 + +:: + + ... assume 'server' is a TCP server which is already listening + r = uv_listen((uv_stream_t*) server, 511, NULL); + if (r < 0) { + /* r contains UV_EADDRINUSE */ + } + + +Threadpool changes +~~~~~~~~~~~~~~~~~~ + +In libuv 0.10 Unix used a threadpool which defaulted to 4 threads, while Windows used the +`QueueUserWorkItem` API, which uses a Windows internal threadpool, which defaults to 512 +threads per process. + +In 1.0, we unified both implementations, so Windows now uses the same implementation Unix +does. The threadpool size can be set by exporting the ``UV_THREADPOOL_SIZE`` environment +variable. See :c:ref:`threadpool`. + + +Allocation callback API change +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In libuv 0.10 the callback had to return a filled :c:type:`uv_buf_t` by value: + +:: + + uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { + return uv_buf_init(malloc(size), size); + } + +In libuv 1.0 a pointer to a buffer is passed to the callback, which the user +needs to fill: + +:: + + void alloc_cb(uv_handle_t* handle, size_t size, uv_buf_t* buf) { + buf->base = malloc(size); + buf->len = size; + } + + +Unification of IPv4 / IPv6 APIs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +libuv 1.0 unified the IPv4 and IPv6 APIS. There is no longer a `uv_tcp_bind` and `uv_tcp_bind6` +duality, there is only :c:func:`uv_tcp_bind` now. + +IPv4 functions took ``struct sockaddr_in`` structures by value, and IPv6 functions took +``struct sockaddr_in6``. Now functions take a ``struct sockaddr*`` (note it's a pointer). +It can be stack allocated. + +libuv 0.10 + +:: + + struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", 1234); + ... + uv_tcp_bind(&server, addr) + +libuv 1.0 + +:: + + struct sockaddr_in addr; + uv_ip4_addr("0.0.0.0", 1234, &addr) + ... + uv_tcp_bind(&server, (const struct sockaddr*) &addr, 0); + +The IPv4 and IPv6 struct creating functions (:c:func:`uv_ip4_addr` and :c:func:`uv_ip6_addr`) +have also changed, make sure you check the documentation. + +..note:: + This change applies to all functions that made a distinction between IPv4 and IPv6 + addresses. + + +Streams / UDP data receive callback API change +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The streams and UDP data receive callbacks now get a pointer to a :c:type:`uv_buf_t` buffer, +not a structure by value. + +libuv 0.10 + +:: + + void on_read(uv_stream_t* handle, + ssize_t nread, + uv_buf_t buf) { + ... + } + + void recv_cb(uv_udp_t* handle, + ssize_t nread, + uv_buf_t buf, + struct sockaddr* addr, + unsigned flags) { + ... + } + +libuv 1.0 + +:: + + void on_read(uv_stream_t* handle, + ssize_t nread, + const uv_buf_t* buf) { + ... + } + + void recv_cb(uv_udp_t* handle, + ssize_t nread, + const uv_buf_t* buf, + const struct sockaddr* addr, + unsigned flags) { + ... + } + + +Receiving handles over pipes API change +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In libuv 0.10 (and earlier versions) the `uv_read2_start` function was used to start reading +data on a pipe, which could also result in the reception of handles over it. The callback +for such function looked like this: + +:: + + void on_read(uv_pipe_t* pipe, + ssize_t nread, + uv_buf_t buf, + uv_handle_type pending) { + ... + } + +In libuv 1.0, `uv_read2_start` was removed, and the user needs to check if there are pending +handles using :c:func:`uv_pipe_pending_count` and :c:func:`uv_pipe_pending_type` while in +the read callback: + +:: + + void on_read(uv_stream_t* handle, + ssize_t nread, + const uv_buf_t* buf) { + ... + while (uv_pipe_pending_count((uv_pipe_t*) handle) != 0) { + pending = uv_pipe_pending_type((uv_pipe_t*) handle); + ... + } + ... + } + + +Extracting the file descriptor out of a handle +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +While it wasn't supported by the API, users often accessed the libuv internals in +order to get access to the file descriptor of a TCP handle, for example. + +:: + + fd = handle->io_watcher.fd; + +This is now properly exposed through the :c:func:`uv_fileno` function. + + +uv_fs_readdir rename and API change +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +`uv_fs_readdir` returned a list of strings in the `req->ptr` field upon completion in +libuv 0.10. In 1.0, this function got renamed to :c:func:`uv_fs_scandir`, since it's +actually implemented using ``scandir(3)``. + +In addition, instead of allocating a full list strings, the user is able to get one +result at a time by using the :c:func:`uv_fs_scandir_next` function. This function +does not need to make a roundtrip to the threadpool, because libuv will keep the +list of *dents* returned by ``scandir(3)`` around. diff -Nru nodejs-0.11.13/deps/uv/docs/src/misc.rst nodejs-0.11.15/deps/uv/docs/src/misc.rst --- nodejs-0.11.13/deps/uv/docs/src/misc.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/misc.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,228 @@ + +.. _misc: + +Miscellaneous utilities +====================== + +This section contains miscellaneous functions that don't really belong in any +other section. + + +Data types +---------- + +.. c:type:: uv_buf_t + + Buffer data type. + +.. c:type:: uv_file + + Cross platform representation of a file handle. + +.. c:type:: uv_os_sock_t + + Cross platform representation of a socket handle. + +.. c:type:: uv_os_fd_t + + Abstract representation of a file descriptor. On Unix systems this is a + `typedef` of `int` and on Windows fa `HANDLE`. + +.. c:type:: uv_rusage_t + + Data type for resource usage results. + + :: + + typedef struct { + uv_timeval_t ru_utime; /* user CPU time used */ + uv_timeval_t ru_stime; /* system CPU time used */ + uint64_t ru_maxrss; /* maximum resident set size */ + uint64_t ru_ixrss; /* integral shared memory size */ + uint64_t ru_idrss; /* integral unshared data size */ + uint64_t ru_isrss; /* integral unshared stack size */ + uint64_t ru_minflt; /* page reclaims (soft page faults) */ + uint64_t ru_majflt; /* page faults (hard page faults) */ + uint64_t ru_nswap; /* swaps */ + uint64_t ru_inblock; /* block input operations */ + uint64_t ru_oublock; /* block output operations */ + uint64_t ru_msgsnd; /* IPC messages sent */ + uint64_t ru_msgrcv; /* IPC messages received */ + uint64_t ru_nsignals; /* signals received */ + uint64_t ru_nvcsw; /* voluntary context switches */ + uint64_t ru_nivcsw; /* involuntary context switches */ + } uv_rusage_t; + +.. c:type:: uv_cpu_info_t + + Data type for CPU information. + + :: + + typedef struct uv_cpu_info_s { + char* model; + int speed; + struct uv_cpu_times_s { + uint64_t user; + uint64_t nice; + uint64_t sys; + uint64_t idle; + uint64_t irq; + } cpu_times; + } uv_cpu_info_t; + +.. c:type:: uv_interface_address_t + + Data type for interface addresses. + + :: + + typedef struct uv_interface_address_s { + char* name; + char phys_addr[6]; + int is_internal; + union { + struct sockaddr_in address4; + struct sockaddr_in6 address6; + } address; + union { + struct sockaddr_in netmask4; + struct sockaddr_in6 netmask6; + } netmask; + } uv_interface_address_t; + + +API +--- + +.. c:function:: uv_handle_type uv_guess_handle(uv_file file) + + Used to detect what type of stream should be used with a given file + descriptor. Usually this will be used during initialization to guess the + type of the stdio streams. + + For ``isatty()`` functionality use this function and test for ``UV_TTY``. + +.. c:function:: unsigned int uv_version(void) + + Returns the libuv version packed into a single integer. 8 bits are used for + each component, with the patch number stored in the 8 least significant + bits. E.g. for libuv 1.2.3 this would return 0x010203. + +.. c:function:: const char* uv_version_string(void) + + Returns the libuv version number as a string. For non-release versions + "-pre" is appended, so the version number could be "1.2.3-pre". + +.. c:function:: uv_buf_t uv_buf_init(char* base, unsigned int len) + + Constructor for :c:type:`uv_buf_t`. + + Due to platform differences the user cannot rely on the ordering of the + `base` and `len` members of the uv_buf_t struct. The user is responsible for + freeing `base` after the uv_buf_t is done. Return struct passed by value. + +.. c:function:: char** uv_setup_args(int argc, char** argv) + + Store the program arguments. Required for getting / setting the process title. + +.. c:function:: int uv_get_process_title(char* buffer, size_t size) + + Gets the title of the current process. + +.. c:function:: int uv_set_process_title(const char* title) + + Sets the current process title. + +.. c:function:: int uv_resident_set_memory(size_t* rss) + + Gets the resident set size (RSS) for the current process. + +.. c:function:: int uv_uptime(double* uptime) + + Gets the current system uptime. + +.. c:function:: int uv_getrusage(uv_rusage_t* rusage) + + Gets the resource usage measures for the current process. + + .. note:: + On Windows not all fields are set, the unsupported fields are filled with zeroes. + +.. c:function:: int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) + + Gets information about the CPUs on the system. The `cpu_infos` array will + have `count` elements and needs to be freed with :c:func:`uv_free_cpu_info`. + +.. c:function:: void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) + + Frees the `cpu_infos` array previously allocated with :c:func:`uv_cpu_info`. + +.. c:function:: int uv_interface_addresses(uv_interface_address_t** addresses, int* count) + + Gets address information about the network interfaces on the system. An + array of `count` elements is allocated and returned in `addresses`. It must + be freed by the user, calling :c:func:`uv_free_interface_addresses`. + +.. c:function:: void uv_free_interface_addresses(uv_interface_address_t* addresses, int count) + + Free an array of :c:type:`uv_interface_address_t` which was returned by + :c:func:`uv_interface_addresses`. + +.. c:function:: void uv_loadavg(double avg[3]) + + Gets the load average. See: `<http://en.wikipedia.org/wiki/Load_(computing)>`_ + + .. note:: + Returns [0,0,0] on Windows (i.e., it's not implemented). + +.. c:function:: int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) + + Convert a string containing an IPv4 addresses to a binary structure. + +.. c:function:: int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) + + Convert a string containing an IPv6 addresses to a binary structure. + +.. c:function:: int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) + + Convert a binary structure containing an IPv4 address to a string. + +.. c:function:: int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) + + Convert a binary structure containing an IPv6 address to a string. + +.. c:function:: int uv_inet_ntop(int af, const void* src, char* dst, size_t size) +.. c:function:: int uv_inet_pton(int af, const char* src, void* dst) + + Cross-platform IPv6-capable implementation of the 'standard' ``inet_ntop()`` + and ``inet_pton()`` functions. On success they return 0. In case of error + the target `dst` pointer is unmodified. + +.. c:function:: int uv_exepath(char* buffer, size_t* size) + + Gets the executable path. + +.. c:function:: int uv_cwd(char* buffer, size_t* size) + + Gets the current working directory. + +.. c:function:: int uv_chdir(const char* dir) + + Changes the current working directory. + +.. uint64_t uv_get_free_memory(void) +.. c:function:: uint64_t uv_get_total_memory(void) + + Gets memory information (in bytes). + +.. c:function:: uint64_t uv_hrtime(void) + + Returns the current high-resolution real time. This is expressed in + nanoseconds. It is relative to an arbitrary time in the past. It is not + related to the time of day and therefore not subject to clock drift. The + primary use is for measuring performance between intervals. + + .. note:: + Not every platform can support nanosecond resolution; however, this value will always + be in nanoseconds. diff -Nru nodejs-0.11.13/deps/uv/docs/src/pipe.rst nodejs-0.11.15/deps/uv/docs/src/pipe.rst --- nodejs-0.11.13/deps/uv/docs/src/pipe.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/pipe.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,86 @@ + +.. _pipe: + +:c:type:`uv_pipe_t` --- Pipe handle +=================================== + +Pipe handles provide an abstraction over local domain sockets on Unix and named +pipes on Windows. + +:c:type:`uv_pipe_t` is a 'subclass' of :c:type:`uv_stream_t`. + + +Data types +---------- + +.. c:type:: uv_pipe_t + + Pipe handle type. + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_stream_t` members also apply. + + +API +--- + +.. c:function:: int uv_pipe_init(uv_loop_t*, uv_pipe_t* handle, int ipc) + + Initialize a pipe handle. The `ipc` argument is a boolean to indicate if + this pipe will be used for handle passing between processes. + +.. c:function:: int uv_pipe_open(uv_pipe_t*, uv_file file) + + Open an existing file descriptor or HANDLE as a pipe. + + .. note:: + The user is responsible for setting the file descriptor in non-blocking mode. + +.. c:function:: int uv_pipe_bind(uv_pipe_t* handle, const char* name) + + Bind the pipe to a file path (Unix) or a name (Windows). + + .. note:: + Paths on Unix get truncated to ``sizeof(sockaddr_un.sun_path)`` bytes, typically between + 92 and 108 bytes. + +.. c:function:: void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, const char* name, uv_connect_cb cb) + + Connect to the Unix domain socket or the named pipe. + + .. note:: + Paths on Unix get truncated to ``sizeof(sockaddr_un.sun_path)`` bytes, typically between + 92 and 108 bytes. + +.. c:function:: int uv_pipe_getsockname(const uv_pipe_t* handle, char* buf, size_t* len) + + Get the name of the Unix domain socket or the named pipe. + + A preallocated buffer must be provided. The len parameter holds the length + of the buffer and it's set to the number of bytes written to the buffer on + output. If the buffer is not big enough ``UV_ENOBUFS`` will be returned and + len will contain the required size. + +.. c:function:: void uv_pipe_pending_instances(uv_pipe_t* handle, int count) + + Set the number of pending pipe instance handles when the pipe server is + waiting for connections. + + .. note:: + This setting applies to Windows only. + +.. c:function:: int uv_pipe_pending_count(uv_pipe_t* handle) +.. c:function:: uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) + + Used to receive handles over IPC pipes. + + First - call :c:func:`uv_pipe_pending_count`, if it's > 0 then initialize + a handle of the given `type`, returned by :c:func:`uv_pipe_pending_type` + and call ``uv_accept(pipe, handle)``. + +.. seealso:: The :c:type:`uv_stream_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/poll.rst nodejs-0.11.15/deps/uv/docs/src/poll.rst --- nodejs-0.11.13/deps/uv/docs/src/poll.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/poll.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,99 @@ + +.. _poll: + +:c:type:`uv_poll_t` --- Poll handle +=================================== + +Poll handles are used to watch file descriptors for readability and +writability, similar to the purpose of poll(2). + +The purpose of poll handles is to enable integrating external libraries that +rely on the event loop to signal it about the socket status changes, like +c-ares or libssh2. Using uv_poll_t for any other purpose is not recommended; +:c:type:`uv_tcp_t`, :c:type:`uv_udp_t`, etc. provide an implementation that is faster and +more scalable than what can be achieved with :c:type:`uv_poll_t`, especially on +Windows. + +It is possible that poll handles occasionally signal that a file descriptor is +readable or writable even when it isn't. The user should therefore always +be prepared to handle EAGAIN or equivalent when it attempts to read from or +write to the fd. + +It is not okay to have multiple active poll handles for the same socket, this +can cause libuv to busyloop or otherwise malfunction. + +The user should not close a file descriptor while it is being polled by an +active poll handle. This can cause the handle to report an error, +but it might also start polling another socket. However the fd can be safely +closed immediately after a call to :c:func:`uv_poll_stop` or :c:func:`uv_close`. + +.. note:: + On windows only sockets can be polled with poll handles. On Unix any file + descriptor that would be accepted by poll(2) can be used. + + +Data types +---------- + +.. c:type:: uv_poll_t + + Poll handle type. + +.. c:type:: void (*uv_poll_cb)(uv_poll_t* handle, int status, int events) + + Type definition for callback passed to :c:func:`uv_poll_start`. + +.. c:type:: uv_poll_event + + Poll event types + + :: + + enum uv_poll_event { + UV_READABLE = 1, + UV_WRITABLE = 2 + }; + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) + + Initialize the handle using a file descriptor. + +.. c:function:: int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle, uv_os_sock_t socket) + + Initialize the handle using a socket descriptor. On Unix this is identical + to :c:func:`uv_poll_init`. On windows it takes a SOCKET handle. + +.. c:function:: int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) + + Starts polling the file descriptor. `events` is a bitmask consisting made up + of UV_READABLE and UV_WRITABLE. As soon as an event is detected the callback + will be called with `status` set to 0, and the detected events set on the + `events` field. + + If an error happens while polling, `status` will be < 0 and corresponds + with one of the UV_E* error codes (see :ref:`errors`). The user should + not close the socket while the handle is active. If the user does that + anyway, the callback *may* be called reporting an error status, but this + is **not** guaranteed. + + .. note:: + Calling :c:func:`uv_poll_start` on a handle that is already active is fine. Doing so + will update the events mask that is being watched for. + +.. c:function:: int uv_poll_stop(uv_poll_t* poll) + + Stop polling the file descriptor, the callback will no longer be called. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/prepare.rst nodejs-0.11.15/deps/uv/docs/src/prepare.rst --- nodejs-0.11.13/deps/uv/docs/src/prepare.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/prepare.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,46 @@ + +.. _prepare: + +:c:type:`uv_prepare_t` --- Prepare handle +========================================= + +Prepare handles will run the given callback once per loop iteration, right +before polling for i/o. + + +Data types +---------- + +.. c:type:: uv_prepare_t + + Prepare handle type. + +.. c:type:: void (*uv_prepare_cb)(uv_prepare_t* handle) + + Type definition for callback passed to :c:func:`uv_prepare_start`. + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_prepare_init(uv_loop_t* loop, uv_prepare_t* prepare) + + Initialize the handle. + +.. c:function:: int uv_prepare_start(uv_prepare_t* prepare, uv_prepare_cb cb) + + Start the handle with the given callback. + +.. c:function:: int uv_prepare_stop(uv_prepare_t* prepare) + + Stop the handle, the callback will no longer be called. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/process.rst nodejs-0.11.15/deps/uv/docs/src/process.rst --- nodejs-0.11.13/deps/uv/docs/src/process.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/process.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,225 @@ + +.. _process: + +:c:type:`uv_process_t` --- Process handle +========================================= + +Process handles will spawn a new process and allow the user to control it and +establish communication channels with it using streams. + + +Data types +---------- + +.. c:type:: uv_process_t + + Process handle type. + +.. c:type:: uv_process_options_t + + Options for spawning the process (passed to :c:func:`uv_spawn`. + + :: + + typedef struct uv_process_options_s { + uv_exit_cb exit_cb; + const char* file; + char** args; + char** env; + const char* cwd; + unsigned int flags; + int stdio_count; + uv_stdio_container_t* stdio; + uv_uid_t uid; + uv_gid_t gid; + } uv_process_options_t; + +.. c:type:: void (*uv_exit_cb)(uv_process_t*, int64_t exit_status, int term_signal) + + Type definition for callback passed in :c:type:`uv_process_options_t` which + will indicate the exit status and the signal that caused the process to + terminate, if any. + +.. c:type:: uv_process_flags + + Flags to be set on the flags field of :c:type:`uv_process_options_t`. + + :: + + enum uv_process_flags { + /* + * Set the child process' user id. + */ + UV_PROCESS_SETUID = (1 << 0), + /* + * Set the child process' group id. + */ + UV_PROCESS_SETGID = (1 << 1), + /* + * Do not wrap any arguments in quotes, or perform any other escaping, when + * converting the argument list into a command line string. This option is + * only meaningful on Windows systems. On Unix it is silently ignored. + */ + UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS = (1 << 2), + /* + * Spawn the child process in a detached state - this will make it a process + * group leader, and will effectively enable the child to keep running after + * the parent exits. Note that the child process will still keep the + * parent's event loop alive unless the parent process calls uv_unref() on + * the child's process handle. + */ + UV_PROCESS_DETACHED = (1 << 3), + /* + * Hide the subprocess console window that would normally be created. This + * option is only meaningful on Windows systems. On Unix it is silently + * ignored. + */ + UV_PROCESS_WINDOWS_HIDE = (1 << 4) + }; + +.. c:type:: uv_stdio_container_t + + Container for each stdio handle or fd passed to a child process. + + :: + + typedef struct uv_stdio_container_s { + uv_stdio_flags flags; + union { + uv_stream_t* stream; + int fd; + } data; + } uv_stdio_container_t; + +.. c:type:: uv_stdio_flags + + Flags specifying how a stdio should be transmitted to the child process. + + :: + + typedef enum { + UV_IGNORE = 0x00, + UV_CREATE_PIPE = 0x01, + UV_INHERIT_FD = 0x02, + UV_INHERIT_STREAM = 0x04, + /* + * When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE + * determine the direction of flow, from the child process' perspective. Both + * flags may be specified to create a duplex data stream. + */ + UV_READABLE_PIPE = 0x10, + UV_WRITABLE_PIPE = 0x20 + } uv_stdio_flags; + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: uv_process_t.pid + + The PID of the spawned process. It's set after calling :c:func:`uv_spawn`. + +.. note:: + The :c:type:`uv_handle_t` members also apply. + +.. c:member:: uv_process_options_t.exit_cb + + Callback called after the process exits. + +.. c:member:: uv_process_options_t.file + + Path pointing to the program to be executed. + +.. c:member:: uv_process_options_t.args + + Command line arguments. args[0] should be the path to the program. On + Windows this uses `CreateProcess` which concatenates the arguments into a + string this can cause some strange errors. See the + ``UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS`` flag on :c:type:`uv_process_flags`. + +.. c:member:: uv_process_options_t.env + + Environment for the new process. If NULL the parents environment is used. + +.. c:member:: uv_process_options_t.cwd + + Current working directory for the subprocess. + +.. c:member:: uv_process_options_t.flags + + Various flags that control how :c:func:`uv_spawn` behaves. See + :c:type:`uv_process_flags`. + +.. c:member:: uv_process_options_t.stdio_count +.. c:member:: uv_process_options_t.stdio + + The `stdio` field points to an array of :c:type:`uv_stdio_container_t` + structs that describe the file descriptors that will be made available to + the child process. The convention is that stdio[0] points to stdin, + fd 1 is used for stdout, and fd 2 is stderr. + + .. note:: + On Windows file descriptors greater than 2 are available to the child process only if + the child processes uses the MSVCRT runtime. + +.. c:member:: uv_process_options_t.uid +.. c:member:: uv_process_options_t.gid + + Libuv can change the child process' user/group id. This happens only when + the appropriate bits are set in the flags fields. + + .. note:: + This is not supported on Windows, :c:func:`uv_spawn` will fail and set the error + to ``UV_ENOTSUP``. + +.. c:member:: uv_stdio_container_t.flags + + Flags specifying how the stdio container should be passed to the child. See + :c:type:`uv_stdio_flags`. + +.. c:member:: uv_stdio_container_t.data + + Union containing either the stream or fd to be passed on to the child + process. + + +API +--- + +.. c:function:: void uv_disable_stdio_inheritance(void) + + Disables inheritance for file descriptors / handles that this process + inherited from its parent. The effect is that child processes spawned by + this process don't accidentally inherit these handles. + + It is recommended to call this function as early in your program as possible, + before the inherited file descriptors can be closed or duplicated. + + .. note:: + This function works on a best-effort basis: there is no guarantee that libuv can discover + all file descriptors that were inherited. In general it does a better job on Windows than + it does on Unix. + +.. c:function:: int uv_spawn(uv_loop_t* loop, uv_process_t* handle, const uv_process_options_t* options) + + Initializes the process handle and starts the process. If the process is + successfully spawned, this function will return 0. Otherwise, the + negative error code corresponding to the reason it couldn't spawn is + returned. + + Possible reasons for failing to spawn would include (but not be limited to) + the file to execute not existing, not having permissions to use the setuid or + setgid specified, or not having enough memory to allocate for the new + process. + +.. c:function:: int uv_process_kill(uv_process_t* handle, int signum) + + Sends the specified signal to the given process handle. Check the documentation + on :c:ref:`signal` for signal support, specially on Windows. + +.. c:function:: int uv_kill(int pid, int signum) + + Sends the specified signal to the given PID. Check the documentation + on :c:ref:`signal` for signal support, specially on Windows. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/request.rst nodejs-0.11.15/deps/uv/docs/src/request.rst --- nodejs-0.11.13/deps/uv/docs/src/request.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/request.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,82 @@ + +.. _request: + +:c:type:`uv_req_t` --- Base request +=================================== + +`uv_req_t` is the base type for all libuv request types. + +Structures are aligned so that any libuv request can be cast to `uv_req_t`. +All API functions defined here work with any request type. + + +Data types +---------- + +.. c:type:: uv_req_t + + The base libuv request structure. + +.. c:type:: uv_any_req + + Union of all request types. + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: void* uv_request_t.data + + Space for user-defined arbitrary data. libuv does not use this field. + +.. c:member:: uv_req_type uv_req_t.type + + Indicated the type of request. Readonly. + + :: + + typedef enum { + UV_UNKNOWN_REQ = 0, + UV_REQ, + UV_CONNECT, + UV_WRITE, + UV_SHUTDOWN, + UV_UDP_SEND, + UV_FS, + UV_WORK, + UV_GETADDRINFO, + UV_GETNAMEINFO, + UV_REQ_TYPE_PRIVATE, + UV_REQ_TYPE_MAX, + } uv_req_type; + + +API +--- + +.. c:function:: int uv_cancel(uv_req_t* req) + + Cancel a pending request. Fails if the request is executing or has finished + executing. + + Returns 0 on success, or an error code < 0 on failure. + + Only cancellation of :c:type:`uv_fs_t`, :c:type:`uv_getaddrinfo_t`, + :c:type:`uv_getnameinfo_t` and :c:type:`uv_work_t` requests is + currently supported. + + Cancelled requests have their callbacks invoked some time in the future. + It's **not** safe to free the memory associated with the request until the + callback is called. + + Here is how cancellation is reported to the callback: + + * A :c:type:`uv_fs_t` request has its req->result field set to `UV_ECANCELED`. + + * A :c:type:`uv_work_t`, :c:type:`uv_getaddrinfo_t` or c:type:`uv_getnameinfo_t` + request has its callback invoked with status == `UV_ECANCELED`. + +.. c:function:: size_t uv_req_size(uv_req_type type) + + Returns the size of the given request type. Useful for FFI binding writers + who don't want to know the structure layout. diff -Nru nodejs-0.11.13/deps/uv/docs/src/signal.rst nodejs-0.11.15/deps/uv/docs/src/signal.rst --- nodejs-0.11.13/deps/uv/docs/src/signal.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/signal.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,77 @@ + +.. _signal: + +:c:type:`uv_signal_t` --- Signal handle +======================================= + +Signal handles implement Unix style signal handling on a per-event loop bases. + +Reception of some signals is emulated on Windows: + +* SIGINT is normally delivered when the user presses CTRL+C. However, like + on Unix, it is not generated when terminal raw mode is enabled. + +* SIGBREAK is delivered when the user pressed CTRL + BREAK. + +* SIGHUP is generated when the user closes the console window. On SIGHUP the + program is given approximately 10 seconds to perform cleanup. After that + Windows will unconditionally terminate it. + +* SIGWINCH is raised whenever libuv detects that the console has been + resized. SIGWINCH is emulated by libuv when the program uses a :c:type:`uv_tty_t` + handle to write to the console. SIGWINCH may not always be delivered in a + timely manner; libuv will only detect size changes when the cursor is + being moved. When a readable :c:type:`uv_tty_t` handle is used in raw mode, + resizing the console buffer will also trigger a SIGWINCH signal. + +Watchers for other signals can be successfully created, but these signals +are never received. These signals are: `SIGILL`, `SIGABRT`, `SIGFPE`, `SIGSEGV`, +`SIGTERM` and `SIGKILL.` + +Calls to raise() or abort() to programmatically raise a signal are +not detected by libuv; these will not trigger a signal watcher. + +.. note:: + On Linux SIGRT0 and SIGRT1 (signals 32 and 33) are used by the NPTL pthreads library to + manage threads. Installing watchers for those signals will lead to unpredictable behavior + and is strongly discouraged. Future versions of libuv may simply reject them. + + +Data types +---------- + +.. c:type:: uv_signal_t + + Signal handle type. + +.. c:type:: void (*uv_signal_cb)(uv_signal_t* handle, int signum) + + Type definition for callback passed to :c:func:`uv_signal_start`. + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: int uv_signal_t.signum + + Signal being monitored by this handle. Readonly. + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_signal_init(uv_loop_t*, uv_signal_t* signal) + + Initialize the handle. + +.. c:function:: int uv_signal_start(uv_signal_t* signal, uv_signal_cb cb, int signum) + + Start the handle with the given callback, watching for the given signal. + +.. c:function:: int uv_signal_stop(uv_signal_t* signal) + + Stop the handle, the callback will no longer be called. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/architecture.png and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/architecture.png differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/Data/st0-311.jpg and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/Data/st0-311.jpg differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/Data/st1-475.jpg and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/Data/st1-475.jpg differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/Index.zip and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/Index.zip differ diff -Nru nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/Metadata/BuildVersionHistory.plist nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/Metadata/BuildVersionHistory.plist --- nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/Metadata/BuildVersionHistory.plist 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/Metadata/BuildVersionHistory.plist 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<array> + <string>Template: White (2014-02-28 09:41)</string> + <string>M6.2.2-1878-1</string> +</array> +</plist> diff -Nru nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/Metadata/DocumentIdentifier nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/Metadata/DocumentIdentifier --- nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/Metadata/DocumentIdentifier 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/Metadata/DocumentIdentifier 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +F69E9CD9-EEF1-4223-9DA4-A1EA7FE112BA \ No newline at end of file Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/Metadata/Properties.plist and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/Metadata/Properties.plist differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/preview.jpg and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/preview.jpg differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/preview-micro.jpg and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/preview-micro.jpg differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/diagrams.key/preview-web.jpg and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/diagrams.key/preview-web.jpg differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/favicon.ico and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/favicon.ico differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/logo.png and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/logo.png differ Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/docs/src/static/loop_iteration.png and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/docs/src/static/loop_iteration.png differ diff -Nru nodejs-0.11.13/deps/uv/docs/src/stream.rst nodejs-0.11.15/deps/uv/docs/src/stream.rst --- nodejs-0.11.13/deps/uv/docs/src/stream.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/stream.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,217 @@ + +.. _stream: + +:c:type:`uv_stream_t` --- Stream handle +======================================= + +Stream handles provide an abstraction of a duplex communication channel. +:c:type:`uv_stream_t` is an abstract type, libuv provides 3 stream implementations +in the for of :c:type:`uv_tcp_t`, :c:type:`uv_pipe_t` and :c:type:`uv_tty_t`. + + +Data types +---------- + +.. c:type:: uv_stream_t + + Stream handle type. + +.. c:type:: uv_connect_t + + Connect request type. + +.. c:type:: uv_shutdown_t + + Shutdown request type. + +.. c:type:: uv_write_t + + Write request type. + +.. c:type:: void (*uv_read_cb)(uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf) + + Callback called when data was read on a stream. + + `nread` is > 0 if there is data available, 0 if libuv is done reading for + now, or < 0 on error. + + The callee is responsible for stopping closing the stream when an error happens + by calling :c:func:`uv_read_stop` or :c:func:`uv_close`. Trying to read + from the stream again is undefined. + + The callee is responsible for freeing the buffer, libuv does not reuse it. + The buffer may be a null buffer (where buf->base=NULL and buf->len=0) on + error. + +.. c:type:: void (*uv_write_cb)(uv_write_t* req, int status) + + Callback called after data was written on a stream. `status` will be 0 in + case of success, < 0 otherwise. + +.. c:type:: void (*uv_connect_cb)(uv_connect_t* req, int status) + + Callback called after a connection started by :c:func:`uv_connect` is done. + `status` will be 0 in case of success, < 0 otherwise. + +.. c:type:: void (*uv_shutdown_cb)(uv_shutdown_t* req, int status) + + Callback called after s shutdown request has been completed. `status` will + be 0 in case of success, < 0 otherwise. + +.. c:type:: void (*uv_connection_cb)(uv_stream_t* server, int status) + + Callback called when a stream server has received an incoming connection. + The user can accept the connection by calling :c:func:`uv_accept`. + `status` will be 0 in case of success, < 0 otherwise. + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: size_t uv_stream_t.write_queue_size + + Contains the amount of queued bytes waiting to be sent. Readonly. + +.. c:member:: uv_stream_t* uv_connect_t.handle + + Pointer to the stream where this connection request is running. + +.. c:member:: uv_stream_t* uv_shutdown_t.handle + + Pointer to the stream where this shutdown request is running. + +.. c:member:: uv_stream_t* uv_write_t.handle + + Pointer to the stream where this write request is running. + +.. c:member:: uv_stream_t* uv_write_t.send_handle + + Pointer to the stream being sent using this write request.. + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) + + Shutdown the outgoing (write) side of a duplex stream. It waits for pending + write requests to complete. The `handle` should refer to a initialized stream. + `req` should be an uninitialized shutdown request struct. The `cb` is called + after shutdown is complete. + +.. c:function:: int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) + + Start listening for incoming connections. `backlog` indicates the number of + connections the kernel might queue, same as ``listen(2)``. When a new + incoming connection is received the :c:type:`uv_connection_cb` callback is + called. + +.. c:function:: int uv_accept(uv_stream_t* server, uv_stream_t* client) + + This call is used in conjunction with :c:func:`uv_listen` to accept incoming + connections. Call this function after receiving a :c:type:`uv_connection_cb` + to accept the connection. Before calling this function the client handle must + be initialized. < 0 return value indicates an error. + + When the :c:type:`uv_connection_cb` callback is called it is guaranteed that + this function will complete successfully the first time. If you attempt to use + it more than once, it may fail. It is suggested to only call this function once + per :c:type:`uv_connection_cb` call. + + .. note:: + `server` and `client` must be handles running on the same loop. + +.. c:function:: int uv_read_start(uv_stream_t*, uv_alloc_cb alloc_cb, uv_read_cb read_cb) + + Read data from an incoming stream. The callback will be made several + times until there is no more data to read or :c:func:`uv_read_stop` is called. + When we've reached EOF `nread` will be set to ``UV_EOF``. + + When `nread` < 0, the `buf` parameter might not point to a valid buffer; + in that case `buf.len` and `buf.base` are both set to 0. + + .. note:: + `nread` might also be 0, which does *not* indicate an error or EOF, it happens when + libuv requested a buffer through the alloc callback but then decided that it didn't + need that buffer. + +.. c:function:: int uv_read_stop(uv_stream_t*) + + Stop reading data from the stream. The :c:type:`uv_read_cb` callback will + no longer be called. + +.. c:function:: int uv_write(uv_write_t* req, uv_stream_t* handle, const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb) + + Write data to stream. Buffers are written in order. Example: + + :: + + uv_buf_t a[] = { + { .base = "1", .len = 1 }, + { .base = "2", .len = 1 } + }; + + uv_buf_t b[] = { + { .base = "3", .len = 1 }, + { .base = "4", .len = 1 } + }; + + uv_write_t req1; + uv_write_t req2; + + /* writes "1234" */ + uv_write(&req1, stream, a, 2); + uv_write(&req2, stream, b, 2); + +.. c:function:: int uv_write2(uv_write_t* req, uv_stream_t* handle, const uv_buf_t bufs[], unsigned int nbufs, uv_stream_t* send_handle, uv_write_cb cb) + + Extended write function for sending handles over a pipe. The pipe must be + initialized with `ipc` == 1. + + .. note:: + `send_handle` must be a TCP socket or pipe, which is a server or a connection (listening + or connected state). Bound sockets or pipes will be assumed to be servers. + +.. c:function:: int uv_try_write(uv_stream_t* handle, const uv_buf_t bufs[], unsigned int nbufs) + + Same as :c:func:`uv_write`, but won't queue a write request if it can't be + completed immediately. + + Will return either: + + * > 0: number of bytes written (can be less than the supplied buffer size). + * < 0: negative error code (``UV_EAGAIN`` is returned if no data can be sent + immediately). + +.. c:function:: int uv_is_readable(const uv_stream_t* handle) + + Returns 1 if the stream is readable, 0 otherwise. + +.. c:function:: int uv_is_writable(const uv_stream_t* handle) + + Returns 1 if the stream is writable, 0 otherwise. + +.. c:function:: int uv_stream_set_blocking(uv_stream_t* handle, int blocking) + + Enable or disable blocking mode for a stream. + + When blocking mode is enabled all writes complete synchronously. The + interface remains unchanged otherwise, e.g. completion or failure of the + operation will still be reported through a callback which is made + asynchronously. + + .. warning:: + Relying too much on this API is not recommended. It is likely to change + significantly in the future. + + Currently this only works on Windows and only for + :c:type:`uv_pipe_t` handles. + + Also libuv currently makes no ordering guarantee when the blocking mode + is changed after write requests have already been submitted. Therefore it is + recommended to set the blocking mode immediately after opening or creating + the stream. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/tcp.rst nodejs-0.11.15/deps/uv/docs/src/tcp.rst --- nodejs-0.11.13/deps/uv/docs/src/tcp.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/tcp.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,97 @@ + +.. _tcp: + +:c:type:`uv_tcp_t` --- TCP handle +================================= + +TCP handles are used to represent both TCP streams and servers. + +:c:type:`uv_tcp_t` is a 'subclass' of :c:type:`uv_stream_t`. + + +Data types +---------- + +.. c:type:: uv_tcp_t + + TCP handle type. + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_stream_t` members also apply. + + +API +--- + +.. c:function:: int uv_tcp_init(uv_loop_t*, uv_tcp_t* handle) + + Initialize the handle. + +.. c:function:: int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) + + Open an existing file descriptor or SOCKET as a TCP handle. + + .. note:: + The user is responsible for setting the file descriptor in + non-blocking mode. + +.. c:function:: int uv_tcp_nodelay(uv_tcp_t* handle, int enable) + + Enable / disable Nagle's algorithm. + +.. c:function:: int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) + + Enable / disable TCP keep-alive. `delay` is the initial delay in seconds, + ignored when `enable` is zero. + +.. c:function:: int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) + + Enable / disable simultaneous asynchronous accept requests that are + queued by the operating system when listening for new TCP connections. + + This setting is used to tune a TCP server for the desired performance. + Having simultaneous accepts can significantly improve the rate of accepting + connections (which is why it is enabled by default) but may lead to uneven + load distribution in multi-process setups. + +.. c:function:: int uv_tcp_bind(uv_tcp_t* handle, const struct sockaddr* addr, unsigned int flags) + + Bind the handle to an address and port. `addr` should point to an + initialized ``struct sockaddr_in`` or ``struct sockaddr_in6``. + + When the port is already taken, you can expect to see an ``UV_EADDRINUSE`` + error from either :c:func:`uv_tcp_bind`, :c:func:`uv_listen` or + :c:func:`uv_tcp_connect`. That is, a successful call to this function does + not guarantee that the call to :c:func:`uv_listen` or :c:func:`uv_tcp_connect` + will succeed as well. + + `flags` con contain ``UV_TCP_IPV6ONLY``, in which case dual-stack support + is disabled and only IPv6 is used. + +.. c:function:: int uv_tcp_getsockname(const uv_tcp_t* handle, struct sockaddr* name, int* namelen) + + Get the current address to which the handle is bound. `addr` must point to + a valid and big enough chunk of memory, ``struct sockaddr_storage`` is + recommended for IPv4 and IPv6 support. + +.. c:function:: int uv_tcp_getpeername(const uv_tcp_t* handle, struct sockaddr* name, int* namelen) + + Get the address of the peer connected to the handle. `addr` must point to + a valid and big enough chunk of memory, ``struct sockaddr_storage`` is + recommended for IPv4 and IPv6 support. + +.. c:function:: int uv_tcp_connect(uv_connect_t* req, uv_tcp_t* handle, const struct sockaddr* addr, uv_connect_cb cb) + + Establish an IPv4 or IPv6 TCP connection. Provide an initialized TCP handle + and an uninitialized :c:type:`uv_connect_t`. `addr` should point to an + initialized ``struct sockaddr_in`` or ``struct sockaddr_in6``. + + The callback is made when the connection has been established or when a + connection error happened. + +.. seealso:: The :c:type:`uv_stream_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/threading.rst nodejs-0.11.15/deps/uv/docs/src/threading.rst --- nodejs-0.11.13/deps/uv/docs/src/threading.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/threading.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,157 @@ + +.. _threading: + +Threading and synchronization utilities +======================================= + +libuv provides cross-platform implementations for multiple threading and +synchronization primitives. The API largely follows the pthreads API. + + +Data types +---------- + +.. c:type:: uv_thread_t + + Thread data type. + +.. c:type:: void (*uv_thread_cb)(void* arg) + + Callback that is invoked to initialize thread execution. `arg` is the same + value that was passed to :c:func:`uv_thread_create`. + +.. c:type:: uv_key_t + + Thread-local key data type. + +.. c:type:: uv_once_t + + Once-only initializer data type. + +.. c:type:: uv_mutex_t + + Mutex data type. + +.. c:type:: uv_rwlock_t + + Read-write lock data type. + +.. c:type:: uv_sem_t + + Semaphore data type. + +.. c:type:: uv_cond_t + + Condition data type. + +.. c:type:: uv_barrier_t + + Barrier data type. + + +API +--- + +Threads +^^^^^^^ + +.. c:function:: int uv_thread_create(uv_thread_t* tid, uv_thread_cb entry, void* arg) +.. c:function:: uv_thread_t uv_thread_self(void) +.. c:function:: int uv_thread_join(uv_thread_t *tid) +.. c:function:: int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) + +Thread-local storage +^^^^^^^^^^^^^^^^^^^^ + +.. note:: + The total thread-local storage size may be limited. That is, it may not be possible to + create many TLS keys. + +.. c:function:: int uv_key_create(uv_key_t* key) +.. c:function:: void uv_key_delete(uv_key_t* key) +.. c:function:: void* uv_key_get(uv_key_t* key) +.. c:function:: void uv_key_set(uv_key_t* key, void* value) + +Once-only initialization +^^^^^^^^^^^^^^^^^^^^^^^^ + +Runs a function once and only once. Concurrent calls to :c:func:`uv_once` with the +same guard will block all callers except one (it's unspecified which one). +The guard should be initialized statically with the UV_ONCE_INIT macro. + +.. c:function:: void uv_once(uv_once_t* guard, void (*callback)(void)) + +Mutex locks +^^^^^^^^^^^ + +Functions return 0 on success or an error code < 0 (unless the +return type is void, of course). + +.. c:function:: int uv_mutex_init(uv_mutex_t* handle) +.. c:function:: void uv_mutex_destroy(uv_mutex_t* handle) +.. c:function:: void uv_mutex_lock(uv_mutex_t* handle) +.. c:function:: int uv_mutex_trylock(uv_mutex_t* handle) +.. c:function:: void uv_mutex_unlock(uv_mutex_t* handle) + +Read-write locks +^^^^^^^^^^^^^^^^ + +Functions return 0 on success or an error code < 0 (unless the +return type is void, of course). + +.. c:function:: int uv_rwlock_init(uv_rwlock_t* rwlock) +.. c:function:: void uv_rwlock_destroy(uv_rwlock_t* rwlock) +.. c:function:: void uv_rwlock_rdlock(uv_rwlock_t* rwlock) +.. c:function:: int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) +.. c:function:: void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) +.. c:function:: void uv_rwlock_wrlock(uv_rwlock_t* rwlock) +.. c:function:: int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) +.. c:function:: void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) + +Semaphores +^^^^^^^^^^ + +Functions return 0 on success or an error code < 0 (unless the +return type is void, of course). + +.. c:function:: int uv_sem_init(uv_sem_t* sem, unsigned int value) +.. c:function:: void uv_sem_destroy(uv_sem_t* sem) +.. c:function:: void uv_sem_post(uv_sem_t* sem) +.. c:function:: void uv_sem_wait(uv_sem_t* sem) +.. c:function:: int uv_sem_trywait(uv_sem_t* sem) + +Conditions +^^^^^^^^^^ + +Functions return 0 on success or an error code < 0 (unless the +return type is void, of course). + +.. note:: + Callers should be prepared to deal with spurious wakeups on :c:func:`uv_cond_wait` and + :c:func:`uv_cond_timedwait`. + +.. c:function:: int uv_cond_init(uv_cond_t* cond) +.. c:function:: void uv_cond_destroy(uv_cond_t* cond) +.. c:function:: void uv_cond_signal(uv_cond_t* cond) +.. c:function:: void uv_cond_broadcast(uv_cond_t* cond) +.. c:function:: void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) +.. c:function:: int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) + +Barriers +^^^^^^^^ + +Functions return 0 on success or an error code < 0 (unless the +return type is void, of course). + +.. note:: + :c:func:`uv_barrier_wait` returns a value > 0 to an arbitrarily chosen "serializer" thread + to facilitate cleanup, i.e. + + :: + + if (uv_barrier_wait(&barrier) > 0) + uv_barrier_destroy(&barrier); + +.. c:function:: int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) +.. c:function:: void uv_barrier_destroy(uv_barrier_t* barrier) +.. c:function:: int uv_barrier_wait(uv_barrier_t* barrier) diff -Nru nodejs-0.11.13/deps/uv/docs/src/threadpool.rst nodejs-0.11.15/deps/uv/docs/src/threadpool.rst --- nodejs-0.11.13/deps/uv/docs/src/threadpool.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/threadpool.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,59 @@ + +.. _threadpool: + +Thread pool work scheduling +=========================== + +libuv provides a threadpool which can be used to run user code and get notified +in the loop thread. This thread pool is internally used to run all filesystem +operations, as well as getaddrinfo and getnameinfo requests. + +Its default size is 4, but it can be changed at startup time by setting the +``UV_THREADPOOL_SIZE`` environment variable to any value (the absolute maximum +is 128). + +The threadpool is global and shared across all event loops. + + +Data types +---------- + +.. c:type:: uv_work_t + + Work request type. + +.. c:type:: void (*uv_work_cb)(uv_work_t* req) + + Callback passed to :c:func:`uv_queue_work` which will be run on the thread + pool. + +.. c:type:: void (*uv_after_work_cb)(uv_work_t* req, int status) + + Callback passed to :c:func:`uv_queue_work` which will be called on the loop + thread after the work on the threadpool has been completed. If the work + was cancelled using :c:func:`uv_cancel` `status` will be ``UV_ECANCELED``. + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: uv_loop_t* uv_work_t.loop + + Loop that started this request and where completion will be reported. + Readonly. + +.. seealso:: The :c:type:`uv_req_t` members also apply. + + +API +--- + +.. c:function:: int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb, uv_after_work_cb after_work_cb) + + Initializes a work request which will run the given `work_cb` in a thread + from the threadpool. Once `work_cb` is completed, `after_work_cb` will be + called on the loop thread. + + This request can be cancelled with :c:func:`uv_cancel`. + +.. seealso:: The :c:type:`uv_req_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/timer.rst nodejs-0.11.15/deps/uv/docs/src/timer.rst --- nodejs-0.11.13/deps/uv/docs/src/timer.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/timer.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,68 @@ + +.. _timer: + +:c:type:`uv_timer_t` --- Timer handle +===================================== + +Timer handles are used to schedule callbacks to be called in the future. + + +Data types +---------- + +.. c:type:: uv_timer_t + + Timer handle type. + +.. c:type:: void (*uv_timer_cb)(uv_timer_t* handle) + + Type definition for callback passed to :c:func:`uv_timer_start`. + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) + + Initialize the handle. + +.. c:function:: int uv_timer_start(uv_timer_t* handle, uv_timer_cb cb, uint64_t timeout, uint64_t repeat) + + Start the timer. `timeout` and `repeat` are in milliseconds. + + If `timeout` is zero, the callback fires on the next event loop iteration. + If `repeat` is non-zero, the callback fires first after `timeout` + milliseconds and then repeatedly after `repeat` milliseconds. + +.. c:function:: int uv_timer_stop(uv_timer_t* handle) + + Stop the timer, the callback will not be called anymore. + +.. c:function:: int uv_timer_again(uv_timer_t* handle) + + Stop the timer, and if it is repeating restart it using the repeat value + as the timeout. If the timer has never been started before it returns + UV_EINVAL. + +.. c:function:: void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) + + Set the repeat value in milliseconds. + + .. note:: + If the repeat value is set from a timer callback it does not immediately take effect. + If the timer was non-repeating before, it will have been stopped. If it was repeating, + then the old repeat value will have been used to schedule the next timeout. + +.. c:function:: uint64_t uv_timer_get_repeat(const uv_timer_t* handle) + + Get the timer repeat value. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/tty.rst nodejs-0.11.15/deps/uv/docs/src/tty.rst --- nodejs-0.11.13/deps/uv/docs/src/tty.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/tty.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,63 @@ + +.. _tty: + +:c:type:`uv_tty_t` --- TTY handle +================================= + +TTY handles represent a stream for the console. + +:c:type:`uv_tty_t` is a 'subclass' of :c:type:`uv_stream_t`. + + +Data types +---------- + +.. c:type:: uv_tty_t + + TTY handle type. + + +Public members +^^^^^^^^^^^^^^ + +N/A + +.. seealso:: The :c:type:`uv_stream_t` members also apply. + + +API +--- + +.. c:function:: int uv_tty_init(uv_loop_t*, uv_tty_t*, uv_file fd, int readable) + + Initialize a new TTY stream with the given file descriptor. Usually the + file descriptor will be: + + * 0 = stdin + * 1 = stdout + * 2 = stderr + + `readable`, specifies if you plan on calling :c:func:`uv_read_start` with + this stream. stdin is readable, stdout is not. + + .. note:: + TTY streams which are not readable have blocking writes. + +.. c:function:: int uv_tty_set_mode(uv_tty_t*, int mode) + + Set the TTY mode. 0 for normal, 1 for raw. + +.. c:function:: int uv_tty_reset_mode(void) + + To be called when the program exits. Resets TTY settings to default + values for the next process to take over. + + This function is async signal-safe on Unix platforms but can fail with error + code ``UV_EBUSY`` if you call it when execution is inside + :c:func:`uv_tty_set_mode`. + +.. c:function:: int uv_tty_get_winsize(uv_tty_t*, int* width, int* height) + + Gets the current Window size. On success it returns 0. + +.. seealso:: The :c:type:`uv_stream_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/docs/src/udp.rst nodejs-0.11.15/deps/uv/docs/src/udp.rst --- nodejs-0.11.13/deps/uv/docs/src/udp.rst 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/docs/src/udp.rst 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,280 @@ + +.. _udp: + +:c:type:`uv_udp_t` --- UDP handle +================================= + +UDP handles encapsulate UDP communication for both clients and servers. + + +Data types +---------- + +.. c:type:: uv_udp_t + + UDP handle type. + +.. c:type:: uv_udp_send_t + + UDP send request type. + +.. c:type:: uv_udp_flags + + Flags used in :c:func:`uv_udp_bind` and :c:type:`uv_udp_recv_cb`.. + + :: + + enum uv_udp_flags { + /* Disables dual stack mode. */ + UV_UDP_IPV6ONLY = 1, + /* + * Indicates message was truncated because read buffer was too small. The + * remainder was discarded by the OS. Used in uv_udp_recv_cb. + */ + UV_UDP_PARTIAL = 2, + /* + * Indicates if SO_REUSEADDR will be set when binding the handle in + * uv_udp_bind. + * This sets the SO_REUSEPORT socket flag on the BSDs and OS X. On other + * Unix platforms, it sets the SO_REUSEADDR flag. What that means is that + * multiple threads or processes can bind to the same address without error + * (provided they all set the flag) but only the last one to bind will receive + * any traffic, in effect "stealing" the port from the previous listener. + */ + UV_UDP_REUSEADDR = 4 + }; + +.. c:type:: void (*uv_udp_send_cb)(uv_udp_send_t* req, int status) + + Type definition for callback passed to :c:func:`uv_udp_send`, which is + called after the data was sent. + +.. c:type:: void (*uv_udp_recv_cb)(uv_udp_t* handle, ssize_t nread, const uv_buf_t* buf, const struct sockaddr* addr, unsigned flags) + + Type definition for callback passed to :c:func:`uv_udp_recv_start`, which + is called when the endpoint receives data. + + * `handle`: UDP handle + * `nread`: Number of bytes that have been received. + 0 if there is no more data to read. You may discard or repurpose + the read buffer. Note that 0 may also mean that an empty datagram + was received (in this case `addr` is not NULL). < 0 if a transmission + error was detected. + * `buf`: :c:type:`uv_buf_t` with the received data. + * `addr`: ``struct sockaddr*`` containing the address of the sender. + Can be NULL. Valid for the duration of the callback only. + * `flags`: One or more or'ed UV_UDP_* constants. Right now only + ``UV_UDP_PARTIAL`` is used. + + .. note:: + The receive callback will be called with `nread` == 0 and `addr` == NULL when there is + nothing to read, and with `nread` == 0 and `addr` != NULL when an empty UDP packet is + received. + +.. c:type:: uv_membership + + Membership type for a multicast address. + + :: + + typedef enum { + UV_LEAVE_GROUP = 0, + UV_JOIN_GROUP + } uv_membership; + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: size_t uv_udp_t.send_queue_size + + Number of bytes queued for sending. This field strictly shows how much + information is currently queued. + +.. c:member:: size_t uv_udp_t.send_queue_count + + Number of send requests currently in the queue awaiting to be processed. + +.. c:member:: uv_udp_t* uv_udp_send_t.handle + + UDP handle where this send request is taking place. + +.. seealso:: The :c:type:`uv_handle_t` members also apply. + + +API +--- + +.. c:function:: int uv_udp_init(uv_loop_t*, uv_udp_t* handle) + + Initialize a new UDP handle. The actual socket is created lazily. + Returns 0 on success. + +.. c:function:: int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) + + Opens an existing file descriptor or Windows SOCKET as a UDP handle. + + Unix only: + The only requirement of the `sock` argument is that it follows the datagram + contract (works in unconnected mode, supports sendmsg()/recvmsg(), etc). + In other words, other datagram-type sockets like raw sockets or netlink + sockets can also be passed to this function. + +.. c:function:: int uv_udp_bind(uv_udp_t* handle, const struct sockaddr* addr, unsigned int flags) + + Bind the UDP handle to an IP address and port. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :param addr: `struct sockaddr_in` or `struct sockaddr_in6` + with the address and port to bind to. + + :param flags: Indicate how the socket will be bound, + ``UV_UDP_IPV6ONLY`` and ``UV_UDP_REUSEADDR`` are supported. + + :returns: 0 on success, or an error code < 0 on failure. + +.. c:function:: int uv_udp_getsockname(const uv_udp_t* handle, struct sockaddr* name, int* namelen) + + Get the local IP and port of the UDP handle. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init` and bound. + + :param name: Pointer to the structure to be filled with the address data. + In order to support IPv4 and IPv6 `struct sockaddr_storage` should be + used. + + :param namelen: On input it indicates the data of the `name` field. On + output it indicates how much of it was filled. + + :returns: 0 on success, or an error code < 0 on failure. + +.. c:function:: int uv_udp_set_membership(uv_udp_t* handle, const char* multicast_addr, const char* interface_addr, uv_membership membership) + + Set membership for a multicast address + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :param multicast_addr: Multicast address to set membership for. + + :param interface_addr: Interface address. + + :param membership: Should be ``UV_JOIN_GROUP`` or ``UV_LEAVE_GROUP``. + + :returns: 0 on success, or an error code < 0 on failure. + +.. c:function:: int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) + + Set IP multicast loop flag. Makes multicast packets loop back to + local sockets. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :param on: 1 for on, 0 for off. + + :returns: 0 on success, or an error code < 0 on failure. + +.. c:function:: int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) + + Set the multicast ttl. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :param ttl: 1 through 255. + + :returns: 0 on success, or an error code < 0 on failure. + +.. c:function:: int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) + + Set the multicast interface to send or receive data on. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :param interface_addr: interface address. + + :returns: 0 on success, or an error code < 0 on failure. + +.. c:function:: int uv_udp_set_broadcast(uv_udp_t* handle, int on) + + Set broadcast on or off. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :param on: 1 for on, 0 for off. + + :returns: 0 on success, or an error code < 0 on failure. + +.. c:function:: int uv_udp_set_ttl(uv_udp_t* handle, int ttl) + + Set the time to live. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :param ttl: 1 through 255. + + :returns: 0 on success, or an error code < 0 on failure. + +.. c:function:: int uv_udp_send(uv_udp_send_t* req, uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr, uv_udp_send_cb send_cb) + + Send data over the UDP socket. If the socket has not previously been bound + with :c:func:`uv_udp_bind` it will be bound to 0.0.0.0 + (the "all interfaces" IPv4 address) and a random port number. + + :param req: UDP request handle. Need not be initialized. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :param bufs: List of buffers to send. + + :param nbufs: Number of buffers in `bufs`. + + :param addr: `struct sockaddr_in` or `struct sockaddr_in6` with the + address and port of the remote peer. + + :param send_cb: Callback to invoke when the data has been sent out. + + :returns: 0 on success, or an error code < 0 on failure. + +.. c:function:: int uv_udp_try_send(uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr) + + Same as :c:func:`uv_udp_send`, but won't queue a send request if it can't + be completed immediately. + + :returns: >= 0: number of bytes sent (it matches the given buffer size). + < 0: negative error code (``UV_EAGAIN`` is returned when the message + can't be sent immediately). + +.. c:function:: int uv_udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, uv_udp_recv_cb recv_cb) + + Prepare for receiving data. If the socket has not previously been bound + with :c:func:`uv_udp_bind` it is bound to 0.0.0.0 (the "all interfaces" + IPv4 address) and a random port number. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :param alloc_cb: Callback to invoke when temporary storage is needed. + + :param recv_cb: Callback to invoke with received data. + + :returns: 0 on success, or an error code < 0 on failure. + +.. c:function:: int uv_udp_recv_stop(uv_udp_t* handle) + + Stop listening for incoming datagrams. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :returns: 0 on success, or an error code < 0 on failure. + +.. seealso:: The :c:type:`uv_handle_t` API functions also apply. diff -Nru nodejs-0.11.13/deps/uv/.gitignore nodejs-0.11.15/deps/uv/.gitignore --- nodejs-0.11.13/deps/uv/.gitignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/.gitignore 2015-01-20 21:22:17.000000000 +0000 @@ -61,3 +61,9 @@ Debug Release ipch + +# sphinx generated files +/docs/build/ + +*.xcodeproj +*.xcworkspace diff -Nru nodejs-0.11.13/deps/uv/gyp_uv.py nodejs-0.11.15/deps/uv/gyp_uv.py --- nodejs-0.11.13/deps/uv/gyp_uv.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/gyp_uv.py 2015-01-20 21:22:17.000000000 +0000 @@ -6,6 +6,13 @@ import subprocess import sys +try: + import multiprocessing.synchronize + gyp_parallel_support = True +except ImportError: + gyp_parallel_support = False + + CC = os.environ.get('CC', 'cc') script_dir = os.path.dirname(__file__) uv_root = os.path.normpath(script_dir) @@ -94,6 +101,11 @@ if not any(a.startswith('-Dcomponent=') for a in args): args.append('-Dcomponent=static_library') + # Some platforms (OpenBSD for example) don't have multiprocessing.synchronize + # so gyp must be run with --no-parallel + if not gyp_parallel_support: + args.append('--no-parallel') + gyp_args = list(args) print gyp_args run_gyp(gyp_args) Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/uv/img/banner.png and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/uv/img/banner.png differ diff -Nru nodejs-0.11.13/deps/uv/img/logos.svg nodejs-0.11.15/deps/uv/img/logos.svg --- nodejs-0.11.13/deps/uv/img/logos.svg 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/img/logos.svg 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,152 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<!-- Created with Inkscape (http://www.inkscape.org/) --> + +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + width="792pt" + height="612pt" + id="svg3069" + version="1.1" + inkscape:version="0.48.4 r9939" + sodipodi:docname="New document 3"> + <defs + id="defs3071"> + <clipPath + clipPathUnits="userSpaceOnUse" + id="clipPath70"> + <path + inkscape:connector-curvature="0" + d="M 0,5952.81 0,0 l 8418.9,0 0,5952.81 -8418.9,0 z" + id="path72" /> + </clipPath> + <clipPath + clipPathUnits="userSpaceOnUse" + id="clipPath70-6"> + <path + inkscape:connector-curvature="0" + d="M 0,5952.81 0,0 l 8418.9,0 0,5952.81 -8418.9,0 z" + id="path72-5" /> + </clipPath> + </defs> + <sodipodi:namedview + inkscape:document-units="in" + pagecolor="#ffffff" + bordercolor="#666666" + borderopacity="1.0" + inkscape:pageopacity="0.0" + inkscape:pageshadow="2" + inkscape:zoom="0.43415836" + inkscape:cx="508.42646" + inkscape:cy="359.27024" + inkscape:current-layer="layer1" + id="namedview3073" + showgrid="false" + inkscape:window-width="1010" + inkscape:window-height="702" + inkscape:window-x="441" + inkscape:window-y="267" + inkscape:window-maximized="0" /> + <metadata + id="metadata3075"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + <dc:title></dc:title> + </cc:Work> + </rdf:RDF> + </metadata> + <g + inkscape:label="Layer 1" + inkscape:groupmode="layer" + id="layer1"> + <g + transform="matrix(0.2288071,0,0,-0.2288071,130.84779,1169.4436)" + id="g66"> + <g + id="g68" + clip-path="url(#clipPath70)"> + <path + inkscape:connector-curvature="0" + d="m 3274.33,2372.2 c -42.27,-8.61 -87.28,-37.29 -122.12,-43.75 -77.01,-14.25 -122.4,-0.53 -81.76,16.27 59.03,24.45 253.75,30.55 274.1,175.05 20.36,144.5 6.1,209.66 -4.07,246.29 -4.87,17.57 -15.81,43.49 -25.9,65.76 -2.85,15.73 -6.59,29.07 -9.86,38.57 -1.82,4.29 -3.65,9.81 -5.46,16.78 0,0 -2.54,17.81 5.6,35.1 7.2,15.34 31.95,48.62 33.62,80.79 0.03,1.95 0.11,3.9 0.08,5.85 -0.1,2.18 -0.28,4.33 -0.63,6.48 l -0.18,1.9 c -3.17,18.05 -13.53,30.62 -27.6,39.98 -9.06,6.02 -21.4,7.92 -28.17,16.51 5.4,17.01 22.63,23.06 29.82,36.95 4.48,12.34 0.82,25.3 -5.99,37.08 -13.13,14.72 -38.92,27.49 -62.07,36.15 -28.97,10.87 -58.44,19.45 -78.35,35.06 -21.03,11.19 -35.17,32.83 -22.26,61.45 3.35,7.44 4.85,14.93 4.75,22.04 l -0.09,2.09 c -0.03,1.45 -0.23,2.76 -0.41,4.1 -0.15,1.04 -0.32,2.06 -0.55,3.08 l -0.47,1.77 -0.56,2.02 c -5.3,17.86 -33.44,22.88 -43.47,23.78 -22.72,2.04 -37.4,8.2 -49.46,12.01 9.7,-5.04 19.46,-11.78 31.9,-16.79 7.87,-3.21 19.17,-5.12 28.68,-8.29 l 0.09,0.02 c 0,0 17.55,-3.56 22.38,-15.78 4.84,-12.2 2.03,-22.9 0,-29.75 l -0.74,-1.99 c -1.75,-5.35 -3.72,-10.51 -4.13,-15.84 -1.65,-21.04 11.26,-32 18.12,-49.6 12.35,-11.73 29.93,-23.88 54.32,-34.88 21.92,-9.9 48.15,-15.89 64.4,-25.7 9.41,-5.68 26.55,-19.41 26.16,-29.4 -0.3,-7.65 -14.43,-18.09 -20.12,-25.7 -4.07,-5.42 -6.09,-12.62 -9.73,-17.74 -3.17,-5 -7.24,-8.22 -11.92,-8.22 -6.1,0 -20.01,-2.36 -10.86,-3.04 9.16,-0.68 32.91,-11.87 32.91,-11.87 l -0.02,-0.03 c 32.1,-13.28 53.67,-37.88 41.87,-72.96 -9.88,-29.39 -32.61,-53.97 -34.2,-86.3 -1.7,-34.4 14.07,-62.72 16.09,-90 3.41,-45.9 -3.18,-77.51 -12.06,-112.01 -4.17,-16.14 -6.39,-32.04 -10.06,-47.74 -15.68,-66.87 -56.09,-95.84 -110.68,-126.73 -23.67,-13.39 -49.07,-38.07 -74.45,-45.9 -4.09,-1.26 -10.39,0.66 -14.11,0 -23.38,-4.22 -44.42,-15.81 -68.41,-20.19 -22.59,-4.17 -46.27,-2.17 -73.08,-8.96 -13.6,-4.85 -31.03,-12.11 -41.21,-16.43 -15.74,-7.17 -31.16,-14.63 -46.55,-22.15 l -51.11,-29.2 c 0,0 -28.48,-16.49 3.06,-4.28 25.38,9.83 112.62,53.25 76.08,27.75 -25.2,-12.66 -38.87,-30.64 -61.16,-45.98 0,0 -74.99,-37.19 -86.24,-47.57 -12.37,-11.42 -27.49,-5.1 -37.41,-16.3 -11.65,-13.13 -25.94,-21.01 -36.13,-24.41 -10.17,-3.42 -21.7,-25.78 -37.31,-37.31 -15.61,-11.54 -49.52,-8.15 -79.38,-21.04 -29.85,-12.89 -60.79,-13.06 -60.79,-13.06 -2.96,-0.28 -5.83,-0.46 -8.58,-0.53 -1.18,-0.03 -2.27,-0.02 -3.4,-0.02 -17.46,-0.09 -30.81,3.32 -41.04,7.08 -12.47,4.58 -17.05,2.28 -14.25,0.76 2.8,-1.51 10.92,-9.11 10.92,-9.11 -34.83,0.04 -44.33,23.18 -60.37,40.37 7.29,1.04 28.08,-4.04 17.52,2.08 -20.85,9.97 -61.01,10.69 -61.8,34.67 -0.6,18.81 8.06,14.69 8.06,14.69 14.61,-25.44 59.69,-27.93 92.57,-27.57 61.24,0.74 102.68,34.32 142.82,53.32 13.01,7.82 28.21,18.1 28.21,18.1 l -0.08,-0.3 c 12.67,8.25 25.48,16.25 36.31,26.22 18.9,17.37 31.67,40.04 48.31,59.27 1.93,3.27 5.99,7.26 10.81,11.28 4.69,4.4 9.73,8.5 15.34,12.09 l 0.21,-0.45 2.18,1.46 -0.39,0.82 c 40.39,22.9 78.03,48.25 109.79,79.02 l 0.21,0.36 c 4.56,4.13 28.46,30.81 40,43.76 13.61,18.15 23.42,41.08 29.11,64.18 2.05,8.36 0.75,18.51 4.01,27.56 5.07,14.03 17.81,26.51 26.17,40.39 8.65,14.43 14.37,30.14 20.12,45.9 25.17,69.17 54.28,124.25 108.66,161.6 26.2,18.01 58.02,29.63 76.46,56.95 -13.39,-13.59 -30.82,-23.5 -51.87,-30.09 -59.79,-19.7 -98.68,-70.15 -118.23,-102.2 -15.01,-26.98 -26.61,-57.04 -35.14,-89.93 -2.83,40.54 -12.84,72.15 -26.17,102.84 -11.85,27.29 -22.33,56.15 -44.27,73.46 0,0 -0.84,-0.06 -4.25,1.99 -3.39,2.02 56.57,3.53 56.57,3.53 10.49,-0.38 21.17,-1.89 31.99,-3.29 2.03,0.22 4.51,0.1 7.56,-0.43 32.96,-5.82 67.21,6.45 88.55,21.98 11.08,8.06 30.34,14.52 46.32,18.89 0.26,0.69 1.82,1.47 5.81,2.23 0,0 24.42,7.63 50.37,14.76 17.58,4.81 30.38,8.5 39.27,12.89 5.15,3.28 10.05,7.22 13.85,11.91 7.61,9.39 7.76,20.66 14.1,36.73 6.31,16.03 15.96,28.49 16.1,42.24 0.45,45.08 -42.25,80.98 -88.54,56.95 45.58,11.75 77.9,-10.49 72.44,-53.28 -6.74,-52.87 -86.39,-92.05 -134.83,-112.01 -19.2,-7.91 -41.68,-17.9 -64.38,-20.19 -32.74,-3.32 -62.41,3.67 -90.56,1.84 -36.1,-2.38 -60.11,-21.47 -90.55,-23.89 -26.16,-2.07 -22.61,15.61 -45.31,17.62 -4.7,2.68 -18.98,0 -33.18,0.74 -39.25,-10.37 -64.5,7.57 -96.58,22.04 -23.14,10.45 -47.82,19.96 -72.44,22.04 -61.81,5.24 -99.39,-3.35 -149.91,2.01 -14.25,1.51 -43.81,3.31 -57.85,8.57 -43.71,16.35 -77.17,10.42 -124.65,11.1 -34.54,0.5 -66.59,2.31 -88.15,-1.47 -5.88,-1.06 -11.84,-2.57 -18.12,-3.67 -46.27,-8.09 -116.76,-12.28 -114.69,-53.26 3.44,2.35 2.48,8.74 8.04,9.18 -0.2,-7.53 -0.39,-15.05 2.01,-20.21 -26.91,0.7 -42.78,19.28 -42.25,40.41 0.9,35.93 58.71,50.95 98.59,60.61 39.98,9.64 81.5,20.94 110.69,25.7 -69.91,-10.27 -143.86,-16.88 -197.22,-42.25 -0.75,2.76 -1.23,5.42 -1.55,7.98 -4.22,19.4 -13.43,49.15 -19.91,78.07 -7.46,33.24 4.06,91.59 4.06,91.59 l 0.29,-0.34 c 4.81,29.66 10.94,42.88 16.88,60.61 12.65,37.82 53.52,49.69 82.13,53.28 -24.12,-17.04 -39.54,-38.17 -49.29,-66.46 -1.2,-10.04 -2.14,-19.46 -2.84,-27.77 21.65,67.41 82.07,92.64 82.07,92.64 l 19.34,20.86 -0.25,-0.95 c 33.36,32.26 77.42,57.26 133.43,58.83 28.22,0.79 62.32,-6.64 62.23,-34.32 -0.04,-15.65 -12.28,-28.93 -28.97,-36.27 4.91,-0.97 9.4,-0.29 13.47,1.39 l 17.34,19.41 c 5.74,14.18 5.23,32.1 -4.44,42.9 25.52,21.82 65.26,-2.65 62.36,-31.6 7.47,6.3 12.03,18.2 6.89,27.64 34.34,20.18 53.56,-28.74 88.11,-17.31 -3.25,9.79 -19.89,7.81 -24.95,15.97 45.17,6.46 84.78,-9.42 130.08,-5.82 2.84,-2.47 5.91,-4.93 9.19,-7.36 -84.73,-9.38 -15.9,-9.33 -15.9,-9.33 10.85,0.99 36.92,2.81 47.06,1.01 16.58,-3 27.29,1.04 41.95,6.77 0.35,-0.64 1.82,-0.58 2.71,1.08 7.6,3 16.35,6.31 27.47,9.31 4.31,0.61 8.64,1.72 12.84,3.09 l 1.81,0.37 0.26,0.29 c 5.8,2.02 11.27,4.44 15.96,6.71 42.69,20.52 78.1,68.84 114.56,94.19 60.42,41.99 117.63,-9.88 159.69,-43.54 l -0.27,-0.39 c 10.55,-7.2 24.46,-16.08 39.61,-25.35 39.58,-17.23 91.79,-32.28 119.67,-59.84 22.31,-22.04 29.9,-50.42 39.49,-79.88 8.96,-27.47 26.34,-51.77 36.12,-82.6 1.88,1.58 2.99,3.96 3.63,6.8 -0.27,2.63 -0.7,5.41 0.08,7.13 l 0.53,-0.48 c -0.11,5.38 -1.3,11.69 -3.14,18.33 -8.29,16.88 -22.78,36.11 -21,51.15 0,0 51.42,-26.57 109.76,-52.96 20.09,-9.09 38.48,-24.08 54.53,-41.21 l 4.69,0.1 c -0.55,-1.2 -0.5,-2.93 -0.33,-4.76 28.36,-31.78 48.46,-69.63 55.45,-91.51 1.44,-4.49 3.07,-9.04 4.83,-13.57 l 0.01,-0.05 c 2.2,-5.71 4.57,-11.35 7.02,-16.78 l 0.05,0.08 c 1.03,-1.85 1.68,-3.66 2.37,-5.49 8.42,-18.22 16.69,-33.01 18.99,-37.06 7.41,-9.21 13.39,-21.04 19.78,-31.05 2.22,-24.89 3.41,-50.09 3.41,-75.56 0,-199.46 -69.36,-382.69 -185.2,-527 -6.7,-0.78 -15.83,-2.33 -27.9,-4.8 z m -52.92,-82.53 c -40.52,-13.23 -81.83,-38.11 -108.21,-40.61 -79.4,-7.5 -93.63,27.79 -30.19,33.61 23.94,2.2 59.29,4.91 87.52,18.29 41.64,19.78 84.29,38.98 112.76,53.23 -19.47,-22.6 -40.15,-44.11 -61.88,-64.52 z m -865.53,-3.85 c -6.73,-2.26 -6.73,1.07 -6.73,1.07 0,0 -12.2,17.42 -14.09,33.64 -1.91,16.07 2.23,30.38 6.26,32.5 4.07,2.2 4.95,-7.16 8.26,-18.88 3.33,-11.84 19.78,-36.13 19.78,-36.13 2.34,-5 -6.65,-9.95 -13.48,-12.2 z m 42.43,-7.43 c -7.73,-2.57 -7.72,1.26 -7.72,1.26 0,0 -13.94,19.78 -16.06,38.24 -2.14,18.26 2.66,34.52 7.3,36.95 4.67,2.51 5.67,-8.16 9.42,-21.52 3.76,-13.43 22.6,-41.09 22.6,-41.09 2.67,-5.68 -7.71,-11.25 -15.54,-13.84 z m 14.34,51.42 c -2.66,23.12 3.53,43.78 9.47,46.91 5.98,3.19 7.21,-10.35 11.96,-27.27 4.76,-17.04 28.77,-52.08 28.77,-52.08 3.4,-7.18 -9.92,-14.29 -19.94,-17.55 -9.92,-3.24 -9.89,1.54 -9.89,1.54 0,0 -17.72,25.08 -20.37,48.45 z m 344.3,264.59 c 4.16,3.72 8.54,-4.1 16.65,-12.97 8.18,-8.98 36.92,-23.11 36.92,-23.11 4.63,-3.42 -4.59,-11.97 -12.08,-17.13 -7.47,-5.16 -8.66,-2.13 -8.66,-2.13 0,0 -21.16,9.99 -29.17,23.65 -7.97,13.46 -7.84,27.91 -3.66,31.69 z m -68.15,-36.32 c 5.11,4.79 10.57,-5.35 20.77,-16.86 10.31,-11.78 46.52,-30.52 46.52,-30.52 5.9,-4.44 -5.63,-15.55 -14.96,-22.21 -9.36,-6.66 -10.92,-2.72 -10.92,-2.72 0,0 -26.46,13.4 -36.55,31.17 -9.99,17.53 -10,36.37 -4.86,41.14 z m -60.8,-81.74 c -9,18.83 -8.22,38.34 -3.02,43.06 5.25,4.73 10.2,-6.14 19.69,-18.82 9.63,-12.88 44.35,-34.41 44.35,-34.41 5.56,-5 -6.19,-15.86 -15.65,-22.26 -9.41,-6.35 -10.8,-2.21 -10.8,-2.21 0,0 -25.38,15.51 -34.57,34.64 z m -55.66,-46.52 c -7.53,19.87 -5.51,39.81 -0.21,44.23 5.4,4.43 9.44,-7.08 17.74,-20.66 8.43,-13.86 40.51,-38.43 40.51,-38.43 5.09,-5.51 -6.92,-15.77 -16.52,-21.57 -9.48,-5.72 -10.46,-1.32 -10.46,-1.32 0,0 -23.45,17.66 -31.06,37.75 z m -75.01,-121.98 c -9.98,-3.02 -9.89,1.61 -9.89,1.61 0,0 -16.92,24.38 -18.9,46.94 -1.96,22.39 4.85,42.34 10.86,45.31 6.07,3.02 6.94,-10.05 11.14,-26.43 4.3,-16.56 27.21,-50.59 27.21,-50.59 3.18,-6.96 -10.3,-13.73 -20.42,-16.84 z m 20.3,78.13 c -4.88,22.44 -0.13,43.46 5.88,47.35 6.06,3.95 8.61,-9.17 15.12,-25.14 6.57,-16.04 35.41,-47.57 35.41,-47.57 4.31,-6.67 -9.23,-15.5 -19.66,-20.14 -10.27,-4.55 -10.72,0.28 -10.72,0.28 0,0 -21.1,22.52 -26.03,45.22 z m 271.98,527.71 c 0,0 5.87,-0.99 3.22,-9.29 -2.64,-8.28 -3.15,-33.94 4.88,-45 8.07,-11.09 -5.53,-6.88 -16.15,4.32 -10.63,11.15 -18.15,31.7 -15.86,45.66 0,0 -0.61,5.87 23.91,4.31 z m -37.52,-67.96 c 12.88,-14.37 -6.84,-10.53 -23.34,3.63 -16.48,14.11 -29.99,41.66 -28.75,61.54 0,0 -1.69,8.15 32.94,9.54 0,0 8.42,-0.53 5.81,-12.53 -2.47,-11.98 0.45,-47.93 13.34,-62.18 z m -87.89,-74.93 c 75.07,-4.92 128.89,21.11 167.01,47.73 10.04,7 23.31,24.05 40.27,12.85 -7.23,-14.98 -26.39,-24.85 -42.26,-33.04 -27.81,-14.37 -52.47,-26.94 -84.52,-34.9 54.02,8.26 92.4,30.76 132.81,51.42 4.95,-10.38 10.51,-31.21 14.16,-48.84 l 0.52,0.05 c 0,0 3.67,-9.88 2.58,-19.53 0.15,-4.6 -0.78,-8.56 -4.07,-10.74 -1.11,-1.42 -2.44,-2.73 -4.11,-3.82 -14.75,-9.67 -58.52,-31.04 -93.63,-33.58 -1.16,-0.07 -2.27,-0.22 -3.42,-0.33 l -2.59,-0.73 -0.07,0.48 c -31.84,-3.69 -52.44,-16.81 -11.72,-10.12 40.74,6.72 128,21.57 96.21,9.62 l 0.11,0.02 c -39.69,-15.51 -91.77,-30.69 -142.89,-25.71 -33.03,3.2 -67.2,19.4 -96.58,34.89 -29.34,15.45 -57.18,35.77 -88.54,42.22 -23.87,4.94 -43.59,-1.94 -64.39,-5.5 -13.85,-2.34 -33.15,-7.95 -41.54,0.7 -3.54,4.19 -0.72,8.49 -0.72,8.49 7.94,19.62 31.07,31.19 50.3,36.72 60.36,17.37 118.83,-14.55 177.08,-18.35 z m 17.01,65.76 c 12.62,-18.31 -9.29,-10.98 -26.04,7.72 -16.76,18.69 -28.07,52.35 -23.77,75.03 0,0 -0.73,9.53 38.96,5.89 0,0 9.52,-1.89 4.9,-15.19 -4.73,-13.42 -6.64,-55.1 5.95,-73.45 z m -79.92,8.72 c 12.44,-20.82 -10.82,-11.69 -27.75,9.78 -16.94,21.37 -27.09,59.1 -20.93,83.8 0,0 -0.14,10.62 42.73,4.68 0,0 10.14,-2.56 4.23,-17.09 -5.93,-14.48 -10.72,-60.33 1.72,-81.17 z m -82.62,11.5 c 12.72,-21.08 -10.85,-12.08 -28.06,9.58 -17.23,21.59 -27.69,59.9 -21.58,85.21 0,0 -0.23,10.74 43.13,5.29 0,0 10.28,-2.5 4.37,-17.35 -5.89,-14.84 -10.51,-61.7 2.14,-82.73 z m -75.72,-23.8 c 15.61,-27.27 -11.73,-15.72 -32.57,12.24 -20.82,27.97 -34.79,77.68 -29.28,110.62 0,0 -0.8,14.07 48.83,7.3 0,0 11.92,-3.09 6.02,-22.58 -5.92,-19.36 -8.62,-80.28 7,-107.58 z m -80.38,20.29 c 15.56,-23.8 -11.82,-14.71 -32.58,9.48 -20.74,24.17 -34.4,68.31 -28.65,98.18 0,0 -0.78,12.63 49.05,8.82 0,0 11.91,-2.29 5.83,-19.98 -6.03,-17.69 -9.15,-72.64 6.35,-96.5 z m -85.14,27.74 c 10.98,-20.46 -10.36,-11.36 -25.48,9.78 -15.12,21.03 -23.44,58.05 -17.05,82.25 0,0 0.14,10.39 39.73,4.3 0,0 9.4,-2.58 3.46,-16.71 -5.88,-14.2 -11.63,-59.14 -0.66,-79.62 z m -64.93,17.69 c 10.31,-16.32 -8.4,-9.23 -22.34,7.51 -13.92,16.76 -22.72,46.32 -18.28,65.79 0,0 -0.36,8.33 33.87,3.93 0,0 8.22,-1.94 3.71,-13.4 -4.32,-11.4 -7.32,-47.51 3.04,-63.83 z m -68.19,-11.78 c 12.6,-18.31 -9.3,-10.98 -26.05,7.74 -16.75,18.69 -28.06,52.34 -23.76,75.03 0,0 -0.74,9.51 38.95,5.89 0,0 9.52,-1.89 4.9,-15.21 -4.74,-13.41 -6.63,-55.07 5.96,-73.45 z m -55.02,4.85 c 12.88,-14.35 -6.83,-10.55 -23.32,3.6 -16.5,14.13 -30.01,41.69 -28.77,61.56 0,0 -1.68,8.15 32.94,9.54 0,0 8.42,-0.53 5.83,-12.55 -2.48,-11.98 0.43,-47.93 13.32,-62.15 z m 755.27,-755.37 c -38.14,-13.18 -105.91,-46.26 -160.35,-73.94 -13.01,-0.6 -26.11,-0.94 -39.29,-0.94 -465.3,0 -842.48,377.19 -842.48,842.49 0,360.48 226.45,668 544.82,788.29 1.61,-1.49 3.31,-2.93 5.11,-4.33 3.61,-2.81 9.58,-4.3 12.67,-8 4.11,-4.94 2.4,-14.97 4.19,-21.16 5.55,-19.11 17.35,-36.65 37,-50.99 2.87,-2.12 8.35,-3.14 10.85,-5.95 5.47,-6.21 3.88,-16.65 8.01,-23.83 4.35,-7.55 11.76,-16.67 20.9,-23.69 5.01,-3.86 12.5,-5.92 17.18,-10.52 3.79,-3.71 4.84,-10.39 7.55,-15.95 7.34,-15.01 26.73,-32.41 36.04,-48.12 1.23,-2.07 2.19,-4.27 3,-6.51 3.88,-7.99 13.26,-24.92 26.05,-32.75 0,0 1.33,-23.51 24.78,-50.31 -47.5,2.56 -93.32,29.42 -128.56,2.74 -10.17,11.1 -42.79,19.8 -56.75,4.23 -10.31,17.92 -64.99,29.36 -75.62,1.04 -77.73,34.87 -171.54,-14.85 -201.69,-64.76 -5.6,-2.67 -10.83,-5.48 -15.77,-8.37 -21.28,6.4 -50.1,8.67 -81.29,-13.82 -30.74,-22.18 -62.88,-121.66 -57.35,-190.53 0.6,-7.38 14.59,-24.8 19.62,-59.7 5,-34.62 8.81,-45.99 8.81,-45.99 -6.84,-11.93 -15,-17.26 -16.11,-31.22 -2.41,-30.21 23.42,-46.63 47.62,-62.1 8.61,-4.01 18.36,-6.89 28.94,-7.27 6.39,-10.12 31.01,-44.33 71.72,-44.33 1,0 2.03,0.07 3.03,0.13 15.54,-13.07 31.75,-16.58 20.56,-0.47 -0.92,1.33 -1.71,2.82 -2.45,4.35 7.48,-4.43 16.54,-8.77 24.61,-9.56 0.16,-0.01 3.96,-0.97 7.13,-0.97 1.12,0 4.46,0 5.64,2.57 1.17,2.58 -1.21,5.35 -1.99,6.23 -3.26,3.76 -6.93,13.54 -9.69,22.65 2.98,-5.46 6.23,-10.66 9.87,-15.13 19.37,-23.78 46.47,-35.02 31.34,-11.36 -4.86,7.58 -7.63,17.91 -8.98,28.88 7.5,-9.25 20.42,-19.99 41.43,-24.29 17.36,-16.51 38.94,-23.26 26.01,-5.71 -3.3,4.49 -5.46,10.27 -6.75,16.59 10.47,-12.55 28.17,-28.68 49.88,-28.68 l 1.58,0.04 0.44,0.02 c 14.89,-10.07 27.36,-11.75 16.22,2.77 -4.83,6.32 -7.46,15.08 -8.64,24.34 9.84,-20.08 28.82,-47.69 61.49,-56.57 0.24,-0.09 5.87,-1.65 10.52,-1.65 3.63,0 5.91,0.93 7,2.92 1.58,2.85 -0.5,6.17 -1.18,7.26 -2.11,3.38 -4.07,7.59 -5.77,11.74 14.67,-8.6 25.51,-9.33 14.1,4.68 -5.9,7.22 -9.05,17.42 -10.34,27.95 7.97,-21.31 24.22,-51.23 56.3,-70.83 0.24,-0.11 6.01,-2.84 12.42,-2.84 3.09,0 5.84,0.65 8.23,1.85 2.04,1.05 3.38,2.77 3.93,4.98 1.67,6.68 -4.67,16.16 -5.24,16.93 -2.64,5.2 -7.48,16.18 -9.86,25.78 l 0.24,-0.19 c 23.19,-18.71 53.29,-27.37 34.72,-8.72 -11.27,11.35 -15.62,30.79 -16,46.67 6.77,-19.01 23.24,-51.94 57.35,-58.45 0.23,-0.02 5.52,-1.04 9.42,-1.04 2.17,0 4.78,0.25 5.69,2.33 0.4,0.88 0.56,2.3 -0.99,4.04 -2.55,2.86 -6.18,11.52 -8.1,18.52 1.62,-1.24 3.19,-2.55 4.89,-3.63 16.54,-10.85 36.06,-16.54 38.61,-13.02 5.29,-4.34 13.57,-10.03 24.82,-14.99 1.05,-2.39 2.09,-4.82 2.93,-7.24 0,0 5.25,-14.59 7.27,-22.48 -26.76,6.93 -72.56,20.35 -106.65,12.83 -26.92,-5.92 -97.1,-44.72 -80.12,-71.99 1.13,-1.82 2.58,-3.09 4.24,-4.11 l 1.05,-0.4 c 6.7,-3.57 18.52,-5.67 30.95,-5.57 l 4.52,0.13 c 7.22,0.39 26.15,3.12 32.56,5.2 18.77,6.1 35.53,2.43 35.53,2.43 l -0.24,-0.17 c 13.34,-1.55 27.41,-6.24 41.94,-13.66 15.61,-7.94 36.85,-21.06 42.26,-27.53 9.76,-11.73 -5.19,-39.21 -10.76,-56.13 -0.27,-1 -0.61,-2.04 -0.93,-3.08 l -0.4,-1.4 -0.05,0.03 c -0.79,-2.37 -1.76,-4.86 -2.8,-7.4 -0.95,1.19 -1.79,2.34 -2.43,3.42 -5.34,8.98 -8.21,16.84 -10.97,13.23 -2.76,-3.62 -2.89,-17.96 2.36,-31.49 0.54,-1.4 1.2,-2.73 1.91,-4.06 -6.05,-11.06 -13.38,-22.64 -21.11,-33.75 -6.02,5.7 -12.11,12.11 -15.05,17.23 -6.87,11.76 -10.53,22.09 -14.04,17.35 -3.54,-4.72 -3.64,-23.77 3.09,-41.67 2.2,-5.83 5.55,-11.11 9.07,-15.73 -5.34,-6.74 -10.55,-12.95 -15.3,-18.23 -2.13,-2.34 -4.65,-4.99 -7.32,-7.74 -7.34,7.56 -16.63,17.94 -20.38,25.45 -6.54,13.12 -9.91,24.3 -13.63,19.54 -3.71,-4.8 -4.43,-24.95 1.75,-44.43 2.55,-7.98 6.93,-15.27 11.27,-21.19 -7.96,-7.58 -16.6,-15.59 -25.47,-23.67 -5.38,7.14 -10.69,14.98 -13.12,20.98 -5.83,14.19 -8.6,26.23 -12.54,21.61 -3.88,-4.63 -5.58,-25.57 -0.35,-46.44 1.29,-5.15 3.26,-10.09 5.5,-14.67 -8.79,-7.86 -17.3,-15.38 -25.11,-22.19 -2.29,4.97 -4.2,9.69 -5.26,13.58 -4.58,16.9 -6.3,30.85 -10.81,26.57 -4.51,-4.18 -8.35,-26.68 -5.05,-50.59 0.31,-2.3 0.76,-4.57 1.27,-6.85 l -2.2,-1.89 c -5.11,-4.41 -10.66,-9.72 -16.39,-15.56 -1.43,4.49 -2.54,8.7 -3.12,12.19 -2.91,17.61 -3.35,31.7 -7.98,28.33 -4.6,-3.31 -10.11,-25.03 -9,-49.33 0.17,-3.82 0.63,-7.68 1.27,-11.45 -6.74,-7.6 -13.41,-15.44 -19.69,-23.09 -3.64,9.35 -6.93,19.01 -8.2,26 -3.33,18.42 -4.03,33.23 -8.74,29.6 -4.31,-3.29 -9.05,-23.01 -8.53,-45.96 -4.7,-3.44 -10.79,-19.44 -10.99,-37.57 -0.09,-6.17 1.01,-12.49 2.51,-18.2 -3.05,-4.3 -8.38,-7.28 -12.11,-12.52 -19.24,-26.91 -11.39,-58.98 30.18,-66.11 12.74,-39.25 45.04,-58.31 94.58,-58.76 6.65,-0.06 13.6,2.26 20.12,1.85 7.36,-0.49 13.42,-3.48 20.13,-3.69 36.11,-1.1 55.04,10.39 82.49,18.39 20.47,5.94 41.6,7.84 54.34,12.84 19.51,7.65 28.78,28.62 44.26,38.56 5.77,3.7 16.64,5.44 26.17,11.01 8.04,4.73 15,13.32 22.13,16.53 6.23,2.82 16.6,2.26 24.14,5.51 12.64,5.48 26.6,17.31 42.27,25.73 32.81,17.55 59.51,30.53 94.57,45.89 16.75,7.34 34.47,24.6 44.27,25.71 11.62,1.34 25.15,-8.36 34.21,-7.34 18.46,-21.95 24.89,-52.62 24.12,-92.06 1.09,-0.11 0.29,-9.78 0.29,-9.78 0,0 6.45,-84.66 -125.77,-130.38 z M 2319.1,3773.92 c 13.83,-3.85 29.29,-11.71 14.56,-35.21 -9.06,9.68 -12.13,22.44 -14.56,35.21 z m 26.38,-30.96 c 4.42,7.74 0.02,11.68 2.36,16.37 39.74,-5.12 43.17,-33.03 26,-72.62 -26.49,8.95 -33.54,26.66 -38.58,44.64 4.25,4.61 7.55,6.9 10.22,11.61 z m 43.39,-7.87 c 36.99,-16.3 42.8,-57.19 17.22,-110.53 -8.74,5.74 -15.55,12.62 -20.46,20.22 -3.89,6.03 -9.35,15.9 -8.18,24.01 0.82,5.58 5.24,13.07 7.5,19.31 6.47,17.93 8.99,34.11 3.92,46.99 z m 177.23,-155.25 c -0.64,1.95 0.67,1.89 2.28,3.33 55.29,-11.22 59.69,-65.17 35.95,-105.83 -11.26,-19.25 -22.27,-29.38 -39.55,-24.01 -14.3,4.46 -16.25,15.95 -23.14,23.85 28.17,41.04 27.62,71.27 24.46,102.66 z m -63.91,40.61 c -0.24,5.01 -1.15,10.58 0.87,15.52 4.77,-0.4 13.12,-1.61 19.95,-5.23 31.05,-16.4 43.9,-75.07 25.85,-109.56 -7.75,-14.78 -15.59,-21.96 -30.09,-17.81 -23.4,6.72 -30.19,33.21 -39.62,44.17 25.51,27.2 24.04,51.08 23.04,72.91 z m -74.76,73.65 1.52,0.74 c 60.84,-21.44 95.74,-61.97 40.45,-136.4 -21.08,10.57 -32.94,24.68 -38.43,40.68 -3.23,9.38 -5.72,10.51 -3.53,22.15 4.69,25.14 6.27,52.87 -0.01,72.83 z m 236.11,-312.56 c -2.27,-1.01 -3.87,-1.76 -3.87,-1.76 -7.39,-3.78 -15.16,-6.03 -22.54,-7.31 -45.74,9.82 -59.56,61.95 -59.56,61.95 42.74,18.79 66.89,72.89 51.68,102.65 0.43,1.11 1.42,0.93 2.42,0.7 19.37,-8.54 39.75,-22.88 49.66,-38.41 18.83,-29.45 24.41,-73.75 8.73,-109.34 -6.98,-1.61 -11.46,-3.21 -17.08,-5.19 -0.95,-0.25 -1.94,-0.54 -2.92,-0.88 l -5.22,-1.92 -0.53,-0.21 -0.82,-0.27 0.05,-0.01 z m 73.78,36.96 c -3.13,-2.66 -6.21,-5.27 -9.06,-7.69 -2.62,-5.02 -10.66,-11.66 -20.93,-16.01 8.15,24.72 9.13,50.99 9.7,70.45 16.84,-8.75 28.08,-20.77 34.57,-34.43 -2.08,-1.8 -4.21,-3.66 -6.4,-5.55 -2.88,5.37 -6.87,10.23 -12.09,14.35 2.36,-6.86 3.71,-13.9 4.21,-21.12 z m -92.4,328 c 412.7,0 756.05,-296.74 828.41,-688.48 -7.89,5.56 -16.59,10.5 -26.45,14.61 -20.63,92.05 -65.01,150.17 -156.18,183.4 -30.56,11.13 -59.51,24.02 -89.66,37.15 1.74,97.97 -112.11,95.91 -183.06,146.34 -30.86,21.99 -65.84,62.56 -101.14,70.06 -68.27,14.52 -110.27,-33.06 -150.59,-66.3 -9.89,10.8 -24.72,21.35 -46.23,30.94 0,0 -38.48,54.88 -56.43,65.21 -1.19,0.68 -1.98,1.3 -2.5,1.92 -9.92,4.65 -22.34,7.27 -31.12,13.07 -8.67,5.7 -10.31,19.19 -18.99,25.7 -4.25,3.19 -10.84,6.65 -16.6,8.96 -6.96,2.79 -18.88,2.27 -25.88,6.1 -8.63,4.73 -9.32,20.11 -15.57,27.74 -6.89,8.43 -15.75,15.32 -30.08,20.14 -5.81,1.93 -14.59,2.06 -19.84,4.87 -5.86,3.17 -7.29,13.09 -11.83,19.17 -10.55,14.18 -28.36,25.18 -48.68,32.36 -4.37,1.54 -10.25,1.54 -13.45,3.69 -3.77,2.51 -4.21,8.78 -6.49,13.58 70.85,19.35 145.38,29.77 222.36,29.77 z m 836.59,-742.56 c -8.56,14.67 -16.48,28.78 -19.38,41.21 5.81,-3.84 10.91,-8.12 15.58,-12.68 1.44,-9.45 2.67,-18.97 3.8,-28.53 z m -836.59,785.09 c -91.12,0 -179,-13.85 -261.73,-39.43 -0.93,1.64 -1.92,3.24 -3.23,4.52 -7.64,7.44 -16.71,13.1 -35.56,8.27 -6.68,8.65 -15.49,15.6 -29.86,18.55 l -0.45,-0.77 c 1.82,-17.15 4.28,-34.48 17.33,-46.35 l -0.19,-2.08 C 1997.3,3605.11 1759.91,3282.33 1759.91,2904 c 0,-488.79 396.23,-885.02 885.01,-885.02 488.78,0 885.04,396.23 885.04,885.02 0,488.81 -396.26,885.03 -885.04,885.03 z M 2195.1,3323.54 c 0,0 -34.81,-7.93 -44.29,-69.48 0,0 -1.65,-31.07 8.08,-24.19 -8.2,37.31 27.27,54.21 35.41,58.01 11.45,5.09 20.91,3.38 21.63,-6.34 0.78,-10.49 -11.21,-15.13 -14.8,-25.94 12.73,7.18 41.2,24 38.24,45.9 -2.18,16.1 -17.34,30.4 -44.27,22.04 z m 457.68,-37.78 c -59.82,35.02 -214.44,28.94 -250.75,31.75 -35.74,2.72 -53.6,-42.72 -53.6,-42.72 -0.47,-9.62 7.4,1.41 7.4,1.41 23.35,42.08 43.88,34.38 43.88,34.38 73.99,-21.61 87.17,-130.29 90.25,-155.48 3.06,-25 9.38,-8.72 9.38,-8.72 7.19,62.54 -38.41,138.76 -48,148.55 -9.58,9.73 -0.43,15.23 30.13,6.68 92.03,-25.84 100.65,-173.22 106.2,-191.09 5.54,-17.8 9,-0.68 9,-0.68 -12.45,135.51 -63.96,168.92 -77.69,187.56 -5.7,7.73 40.82,8.9 61.31,-4.43 115.64,-74.71 102.66,-217.79 113.07,-197.37 10.55,20.85 -29.1,150.34 -84.81,194.41 -11.15,8.79 38.66,-6.58 57.02,-23.49 87,-80.41 97.7,-167.53 104.39,-192.7 6.57,-25 9.78,-16.53 9.78,-16.53 1.21,85.15 -66.26,193.01 -126.96,228.47 z m 167.67,59.29 c 12.83,6.96 27.96,52.1 59.04,33.35 23.1,-13.93 34.1,-17.46 34.1,-17.46 0,0 -16.13,-4.53 -23.47,-16.63 -7.34,-12.1 -16.14,-21.41 -41.81,-17.62 -26.95,4.02 -31.88,-0.21 -45.46,-2.42 0,0 4.76,13.79 17.6,20.78 z m -49.59,-36.2 c 19.79,-4.3 38.04,-11.99 56.34,-23.86 20.27,-13.17 42.21,-37.36 72.44,-34.91 9.64,0.77 27.16,10.1 34.21,14.7 8.82,5.75 29.74,19.45 24.15,40.39 -2.48,9.3 -13.54,10.78 -20.13,18.37 -9.13,10.56 -8.78,20.93 -16.09,33.05 15.93,-3.2 25.06,-12.64 42.24,-14.7 -27.07,24.28 -58.26,44.78 -96.58,58.78 2.88,1.63 4.71,8.31 14.09,11.01 8.45,2.44 35.7,-1.72 18.11,1.85 -17.66,4.94 -28.62,-0.09 -40.23,-7.36 -26.68,-16.67 -48.41,-54.95 -76.48,-71.6 -12.18,-7.27 -24.09,-5.6 -34.22,-18.39 6.56,-13.1 13.21,-5.39 22.15,-7.33 z m 209.28,23.88 c -6.66,-3.87 4.19,-7.38 6.03,-9.19 7.67,-7.55 24.8,-21.4 30.18,-33.05 11,-23.83 7.62,-60.3 -12.08,-73.48 -13.27,-8.86 -78.22,-33.08 -106.63,-33.05 -79.93,0.11 -88.04,87.86 -136.84,117.54 0.87,-8.4 7,-25.99 13.98,-35.11 27.44,-35.83 53.66,-109.66 130.81,-97.33 27.26,4.36 78.26,23.19 98.72,35.98 22.22,13.85 38.72,67.03 16.08,96.46 -10.14,13.18 -34.41,29.88 -40.25,31.23" + style="fill:#403c3d;fill-opacity:1;fill-rule:evenodd;stroke:none" + id="path76" /> + </g> + </g> + <path + inkscape:connector-curvature="0" + d="m 376.40655,56.024215 0,23.8525 24.42625,0 0,-23.8525 -24.42625,0 z m 0.09,131.692505 24.24875,0 0,-96.436255 -24.24875,0 0,96.436255 z m 305.7425,-96.436255 -35.3525,96.436255 -18.88,0 -35.5375,-96.436255 25.3575,0 19.61875,59.602505 19.43625,-59.602505 25.3575,0 z m -97.74375,96.436255 -23.5075,0 0,-8.885 c -6.29375,6.66375 -14.31625,9.995 -24.06125,9.995 -9.505,0 -17.09375,-2.83625 -22.76875,-8.51375 -6.54125,-6.53875 -9.80875,-15.67 -9.80875,-27.39375 l 0,-61.638755 24.2475,0 0,58.306255 c 0,6.04875 1.70625,10.61625 5.12125,13.6975 2.80625,2.5925 6.34375,3.8875 10.61125,3.8875 4.39,0 7.99,-1.295 10.79625,-3.8875 3.4125,-3.08125 5.12125,-7.64875 5.12125,-13.6975 l 0,-58.306255 24.24875,0 0,96.436255 z m -114.8675,-48.31 c 0,-8.51375 -0.67125,-14.6225 -2.0125,-18.32375 -2.44125,-6.17 -7.1375,-9.255 -14.09,-9.255 -6.95625,0 -11.6525,3.085 -14.09125,9.255 -1.34125,3.70125 -2.0125,9.81 -2.0125,18.32375 0,8.515 0.67125,14.62375 2.0125,18.32375 2.43875,6.295 7.135,9.44125 14.09125,9.44125 6.9525,0 11.64875,-3.14625 14.09,-9.44125 1.34125,-3.7 2.0125,-9.80875 2.0125,-18.32375 z m 24.24875,0 c 0,9.8725 -0.43375,17.2775 -1.29625,22.2125 -1.35875,8.02125 -4.1975,14.19125 -8.51375,18.5075 -5.80375,5.805 -13.63625,8.7 -23.50875,8.7 -9.8725,0 -17.8925,-3.3925 -24.06125,-10.18 l 0,9.07 -23.3225,0 0,-131.788755 24.2475,0 0,43.86875 c 5.77875,-6.415 13.53,-9.625 23.24625,-9.625 9.83875,0 17.64875,2.9 23.42875,8.7 4.305,4.318755 7.1325,10.488755 8.48625,18.508755 0.8625,4.93625 1.29375,12.28 1.29375,22.02625 z m -130.04375,48.31 -13.69625,0 c -9.255,0 -16.4125,-2.8975 -21.47125,-8.69875 -4.4425,-5.05875 -6.6625,-11.35125 -6.6625,-18.88 l 0,-104.210005 24.2475,0 0,102.730005 c 0,5.80125 2.8075,8.69875 8.42375,8.69875 l 9.15875,0 0,20.36" + style="fill:#403c3d;fill-opacity:1;fill-rule:nonzero;stroke:none" + id="path74" /> + <g + transform="matrix(0.23169071,0,0,-0.23371708,-280.79355,1099.9435)" + id="g66-4"> + <g + id="g68-1" + clip-path="url(#clipPath70-6)"> + <g + id="g3197" + transform="translate(-350.10267,-350.10267)"> + <path + id="path76-6" + style="fill:#e9e9e9;fill-opacity:1;fill-rule:evenodd;stroke:none" + d="m 3504.81,2908.41 c 0,-483.58 -391.32,-875.6 -874.02,-875.6 -482.71,0 -874,392.02 -874,875.6 0,483.61 391.29,875.63 874,875.63 482.7,0 874.02,-392.02 874.02,-875.63" + inkscape:connector-curvature="0" /> + <path + id="path78" + style="fill:#403c3d;fill-opacity:1;fill-rule:evenodd;stroke:none" + d="m 3482.08,2908.41 c 0,-471 -381.13,-852.83 -851.29,-852.83 -470.16,0 -851.27,381.83 -851.27,852.83 0,471.02 381.11,852.85 851.27,852.85 470.16,0 851.29,-381.83 851.29,-852.85" + inkscape:connector-curvature="0" /> + <path + id="path80" + style="fill:#adda1a;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 3473.33,3029.95 c -6.42,7.53 -13.81,14.41 -22.76,20.33 3.77,-16.2 15.99,-35.21 27.12,-54.62 -1.18,11.52 -2.73,22.91 -4.36,34.29 z M 3070.25,2284 c -63.63,-5.85 -49.36,-41.32 30.29,-33.77 29.21,2.77 76.64,33.01 121.13,44.56 22.41,21.66 43.74,44.44 63.68,68.43 -27,-14.62 -77.9,-37.33 -127.3,-60.84 -28.31,-13.46 -63.77,-16.17 -87.8,-18.38 z m 409,691.11 c -9.21,11.12 -16.63,28.08 -26.56,40.46 -2.32,4.07 -10.61,18.92 -19.05,37.24 -0.7,1.84 -1.35,3.66 -2.39,5.51 l -0.05,-0.07 c -2.45,5.45 -4.83,11.12 -7.04,16.85 l -0.01,0.06 c -1.76,4.55 -3.39,9.12 -4.84,13.64 -7.01,21.99 -27.18,60.03 -55.62,91.97 -0.17,1.83 -0.23,3.57 0.33,4.78 l -4.7,-0.11 c -16.1,17.22 -34.56,32.28 -54.71,41.42 -58.53,26.53 -110.11,53.23 -110.11,53.23 -1.79,-15.12 12.75,-34.44 21.07,-51.41 1.85,-6.68 3.04,-13.01 3.15,-18.42 l -0.53,0.48 c -0.79,-1.72 -0.36,-4.51 -0.09,-7.16 -0.63,-2.86 -1.75,-5.25 -3.64,-6.83 -9.8,30.98 -27.24,55.39 -36.22,83.01 -9.63,29.6 -17.24,58.12 -39.62,80.27 -27.97,27.69 -80.34,42.83 -120.05,60.13 -15.2,9.33 -29.15,18.25 -39.73,25.48 l 0.27,0.4 c -42.2,33.83 -99.59,85.95 -160.2,43.76 -36.58,-25.48 -72.09,-74.05 -114.92,-94.66 -4.7,-2.29 -10.18,-4.72 -16.01,-6.75 l -0.25,-0.29 -1.82,-0.38 c -4.22,-1.37 -8.56,-2.48 -12.88,-3.09 -11.15,-3.02 -19.94,-6.36 -27.56,-9.36 -0.89,-1.67 -2.36,-1.72 -2.72,-1.08 -14.7,-5.76 -25.44,-9.82 -42.09,-6.82 -10.16,1.82 -36.31,-0.01 -47.19,-1.01 0,0 -69.05,-0.05 15.93,9.37 -3.27,2.46 -6.36,4.92 -9.21,7.4 -45.43,-3.61 -85.18,12.35 -130.49,5.85 5.09,-8.21 21.77,-6.21 25.04,-16.05 -34.67,-11.48 -53.95,37.68 -88.39,17.4 5.15,-9.48 0.58,-21.45 -6.91,-27.78 2.9,29.09 -36.96,53.69 -62.56,31.76 9.7,-10.86 10.2,-28.86 4.46,-43.12 l -17.4,-19.5 c -4.08,-1.69 -8.59,-2.37 -13.51,-1.4 16.74,7.37 29.01,20.73 29.06,36.46 0.09,27.81 -34.12,35.28 -62.43,34.49 -56.18,-1.59 -100.39,-26.71 -133.84,-59.13 l 0.24,0.95 -19.4,-20.96 c 0,0 -60.6,-25.35 -82.33,-93.09 0.7,8.35 1.64,17.81 2.85,27.9 9.79,28.43 25.25,49.66 49.44,66.79 -28.7,-3.61 -69.69,-15.54 -82.38,-53.55 -5.96,-17.81 -12.11,-31.1 -16.94,-60.9 l -0.29,0.34 c 0,0 -11.55,-58.65 -4.07,-92.05 6.5,-29.06 15.74,-58.97 19.98,-78.46 0.31,-2.57 0.8,-5.25 1.55,-8.02 53.53,25.5 127.71,32.14 197.84,42.47 -29.28,-4.79 -70.93,-16.14 -111.03,-25.84 -40.01,-9.7 -98.01,-24.8 -98.91,-60.92 -0.53,-21.23 15.39,-39.9 42.38,-40.6 -2.4,5.18 -2.21,12.74 -2.01,20.32 -5.57,-0.46 -4.61,-6.86 -8.07,-9.23 -2.07,41.18 68.63,45.39 115.05,53.52 6.31,1.1 12.28,2.63 18.18,3.7 21.63,3.79 53.78,1.97 88.43,1.46 47.63,-0.67 81.2,5.28 125.04,-11.15 14.08,-5.29 43.74,-7.09 58.03,-8.62 50.68,-5.38 88.38,3.26 150.38,-2.01 24.7,-2.09 49.47,-11.64 72.68,-22.15 32.18,-14.54 57.5,-32.58 96.88,-22.16 14.24,-0.73 28.56,1.95 33.28,-0.73 22.77,-2.03 19.21,-19.81 45.45,-17.71 30.54,2.43 54.62,21.61 90.84,24.01 28.24,1.84 57.99,-5.19 90.84,-1.86 22.77,2.31 45.33,12.35 64.59,20.29 48.59,20.07 128.49,59.45 135.25,112.57 5.48,43.01 -26.95,65.35 -72.67,53.55 46.44,24.15 89.27,-11.93 88.81,-57.23 -0.13,-13.82 -9.81,-26.35 -16.14,-42.45 -6.37,-16.16 -6.51,-27.48 -14.14,-36.9 -3.81,-4.74 -8.73,-8.7 -13.9,-11.99 -8.91,-4.41 -21.76,-8.11 -39.39,-12.95 -26.03,-7.17 -50.53,-14.82 -50.53,-14.82 -4,-0.78 -5.56,-1.56 -5.83,-2.26 -16.03,-4.39 -35.35,-10.88 -46.46,-18.98 -21.4,-15.61 -55.77,-27.94 -88.83,-22.1 -3.06,0.55 -5.56,0.66 -7.59,0.44 -10.86,1.42 -21.56,2.92 -32.09,3.3 0,0 -60.14,-1.51 -56.74,-3.53 3.41,-2.07 4.26,-2.02 4.26,-2.02 22.01,-17.39 32.52,-46.38 44.41,-73.8 13.37,-30.85 23.4,-62.63 26.25,-103.36 8.55,33.04 20.19,63.26 35.25,90.38 19.61,32.19 58.62,82.91 118.6,102.69 21.12,6.63 38.61,16.58 52.03,30.26 -18.49,-27.47 -50.41,-39.13 -76.7,-57.24 -54.55,-37.54 -83.75,-92.88 -109,-162.4 -5.76,-15.84 -11.5,-31.64 -20.18,-46.15 -8.38,-13.93 -21.17,-26.48 -26.25,-40.58 -3.28,-9.09 -1.97,-19.3 -4.03,-27.7 -5.7,-23.21 -15.54,-46.26 -29.19,-64.5 -11.59,-13.02 -35.57,-39.82 -40.13,-43.98 l -0.22,-0.36 c -31.85,-30.92 -69.62,-56.39 -110.13,-79.41 l 0.39,-0.82 -2.18,-1.47 -0.22,0.45 c -5.62,-3.61 -10.68,-7.72 -15.38,-12.15 -4.84,-4.03 -8.91,-8.05 -10.85,-11.33 -16.68,-19.34 -29.5,-42.11 -48.46,-59.56 -10.87,-10.02 -23.71,-18.07 -36.43,-26.37 l 0.09,0.31 c 0,0 -15.25,-10.33 -28.3,-18.19 -40.26,-19.09 -81.84,-52.85 -143.27,-53.58 -32.98,-0.36 -78.22,2.15 -92.87,27.71 0,0 -8.68,4.13 -8.07,-14.77 0.78,-24.11 41.07,-24.82 61.99,-34.85 10.59,-6.14 -10.26,-1.04 -17.57,-2.08 16.08,-17.27 25.62,-40.53 60.55,-40.57 0,0 -8.15,7.62 -10.95,9.16 -2.81,1.52 1.78,3.83 14.29,-0.78 10.26,-3.78 23.65,-7.19 41.17,-7.09 1.13,0 2.23,-0.02 3.41,0 2.76,0.08 5.63,0.26 8.61,0.54 0,0 31.03,0.17 60.97,13.12 29.96,12.96 63.98,9.55 79.64,21.14 15.66,11.58 27.23,34.07 37.43,37.5 10.22,3.42 24.55,11.33 36.24,24.55 9.96,11.24 25.12,4.89 37.52,16.37 11.29,10.44 86.52,47.81 86.52,47.81 22.36,15.41 36.07,33.48 61.35,46.2 36.66,25.62 -50.85,-18.01 -76.31,-27.89 -31.65,-12.26 -3.07,4.31 -3.07,4.31 l 51.26,29.33 c 15.44,7.57 30.91,15.07 46.7,22.26 10.21,4.35 27.69,11.65 41.34,16.53 26.89,6.81 50.65,4.82 73.31,8.99 24.06,4.41 45.17,16.05 68.63,20.3 3.73,0.66 10.05,-1.26 14.14,0 25.47,7.85 50.95,32.67 74.69,46.13 54.76,31.04 95.31,60.15 111.03,127.36 3.69,15.78 5.91,31.75 10.09,47.96 8.91,34.68 15.52,66.46 12.1,112.58 -2.02,27.41 -17.84,55.88 -16.14,90.44 1.6,32.49 24.4,57.21 34.31,86.74 11.84,35.26 -9.8,59.99 -42,73.32 l 0.02,0.03 c 0,0 -23.83,11.25 -33.01,11.93 -9.19,0.69 4.77,3.06 10.89,3.06 4.7,0 8.77,3.24 11.96,8.26 3.64,5.15 5.68,12.38 9.75,17.83 5.72,7.65 19.89,18.14 20.19,25.82 0.39,10.04 -16.8,23.84 -26.25,29.55 -16.3,9.86 -42.61,15.88 -64.59,25.84 -24.47,11.05 -42.11,23.26 -54.5,35.05 -6.87,17.68 -19.82,28.7 -18.17,49.84 0.41,5.36 2.38,10.54 4.14,15.93 l 0.74,1.99 c 2.04,6.89 4.85,17.63 0,29.9 -4.84,12.28 -22.46,15.85 -22.46,15.85 l -0.07,-0.01 c -9.55,3.18 -20.88,5.11 -28.78,8.33 -12.47,5.03 -22.27,11.8 -32.01,16.88 12.11,-3.84 26.84,-10.03 49.63,-12.08 10.06,-0.89 38.28,-5.95 43.6,-23.9 l 0.56,-2.02 0.47,-1.79 c 0.23,-1.02 0.4,-2.05 0.56,-3.09 0.18,-1.35 0.38,-2.66 0.41,-4.12 l 0.09,-2.09 c 0.1,-7.16 -1.4,-14.68 -4.77,-22.16 -12.95,-28.75 1.23,-50.51 22.34,-61.76 19.97,-15.69 49.52,-24.31 78.59,-35.23 23.22,-8.71 49.09,-21.54 62.26,-36.33 6.84,-11.84 10.51,-24.87 6.01,-37.26 -7.2,-13.97 -24.49,-20.05 -29.91,-37.14 6.8,-8.64 19.17,-10.55 28.26,-16.59 14.11,-9.41 24.5,-22.04 27.69,-40.18 l 0.18,-1.91 c 0.34,-2.16 0.53,-4.32 0.63,-6.51 0.03,-1.96 -0.05,-3.92 -0.08,-5.88 -1.68,-32.34 -26.5,-65.77 -33.73,-81.2 -8.16,-17.37 -5.61,-35.28 -5.61,-35.28 1.81,-7 3.64,-12.54 5.48,-16.85 3.27,-9.56 7.03,-22.96 9.88,-38.76 10.13,-22.39 21.09,-48.44 25.98,-66.09 10.22,-36.81 24.51,-102.3 4.09,-247.53 -20.41,-145.21 -215.75,-151.34 -274.96,-175.91 -40.77,-16.9 4.76,-30.67 82.01,-16.35 34.95,6.5 80.11,35.32 122.51,43.97 17.97,3.67 29.54,5.36 36.09,5.44 114.98,145.4 183.81,329.08 183.81,528.99 0,22.48 -1.13,44.67 -2.83,66.7" + inkscape:connector-curvature="0" /> + <path + id="path82" + style="fill:#f4f4f4;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 2527.18,3484.46 c 28.26,41.24 27.7,71.63 24.53,103.17 -0.63,1.96 0.68,1.9 2.29,3.34 55.47,-11.28 59.88,-65.49 36.07,-106.35 -11.3,-19.34 -22.34,-29.53 -39.67,-24.13 -14.35,4.48 -16.31,16.03 -23.22,23.97 z m -22.94,26.32 c -23.48,6.74 -30.28,33.38 -39.75,44.39 25.59,27.33 24.12,51.34 23.12,73.26 -0.25,5.04 -1.16,10.64 0.87,15.61 4.78,-0.4 13.16,-1.62 20.01,-5.26 31.15,-16.48 44.04,-75.44 25.93,-110.1 -7.77,-14.86 -15.64,-22.08 -30.18,-17.9 z m -49.53,55.34 c -21.15,10.63 -33.04,24.8 -38.55,40.89 -3.24,9.42 -5.73,10.56 -3.54,22.26 4.71,25.26 6.29,53.13 -0.01,73.19 l 1.52,0.74 c 61.04,-21.54 96.06,-62.27 40.58,-137.08 z m 160.36,-21.48 c 0.43,1.12 1.42,0.94 2.43,0.7 19.43,-8.58 39.87,-22.99 49.81,-38.59 18.89,-29.6 24.49,-74.11 8.75,-109.89 -6.99,-1.61 -11.49,-3.22 -17.12,-5.22 -0.96,-0.25 -1.96,-0.53 -2.93,-0.88 l -5.24,-1.92 -0.53,-0.22 -0.83,-0.27 0.06,-0.02 c -2.29,-1 -3.88,-1.76 -3.88,-1.76 -7.42,-3.8 -15.21,-6.06 -22.62,-7.34 -45.89,9.86 -59.74,62.26 -59.74,62.26 42.87,18.87 67.1,73.24 51.84,103.15 z m 88.05,-72.17 c 16.89,-8.8 28.17,-20.87 34.68,-34.6 -2.09,-1.82 -4.23,-3.69 -6.42,-5.58 -2.89,5.4 -6.88,10.28 -12.13,14.42 2.37,-6.9 3.73,-13.96 4.23,-21.22 -3.14,-2.68 -6.23,-5.31 -9.09,-7.73 -2.63,-5.05 -10.69,-11.72 -20.99,-16.09 8.17,24.85 9.15,51.24 9.72,70.8 z m -311.91,160.11 c -8.77,5.76 -15.6,12.68 -20.53,20.31 -3.9,6.06 -9.38,15.98 -8.2,24.13 0.81,5.61 5.25,13.13 7.51,19.41 6.49,18.02 9.03,34.28 3.94,47.21 37.11,-16.36 42.93,-57.47 17.28,-111.06 z m -71.06,107.31 c 4.27,4.64 7.57,6.94 10.26,11.67 4.42,7.78 0.01,11.73 2.36,16.45 39.87,-5.15 43.3,-33.2 26.09,-72.98 -26.57,9 -33.65,26.79 -38.71,44.86 z m -16.21,42.78 c 13.88,-3.86 29.39,-11.76 14.6,-35.37 -9.08,9.72 -12.16,22.55 -14.6,35.37" + inkscape:connector-curvature="0" /> + <path + id="path84" + style="fill:#f6836c;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 2529.01,2752.37 c -23.94,4.97 -43.72,-1.93 -64.59,-5.52 -13.88,-2.35 -33.24,-7.99 -41.67,0.71 -3.55,4.21 -0.72,8.52 -0.72,8.52 7.97,19.73 31.16,31.35 50.46,36.91 60.55,17.46 119.21,-14.63 177.64,-18.45 75.3,-4.94 129.29,21.21 167.54,47.98 10.06,7.04 23.37,24.16 40.39,12.92 -7.25,-15.06 -26.47,-24.97 -42.4,-33.22 -27.89,-14.43 -52.63,-27.06 -84.78,-35.07 54.18,8.3 92.69,30.91 133.23,51.69 4.96,-10.44 10.54,-31.37 14.21,-49.1 l 0.51,0.06 c 0,0 3.69,-9.93 2.6,-19.63 0.14,-4.63 -0.79,-8.61 -4.09,-10.8 -1.11,-1.42 -2.45,-2.74 -4.12,-3.84 -14.8,-9.71 -58.71,-31.2 -93.93,-33.75 -1.16,-0.06 -2.28,-0.21 -3.43,-0.33 l -2.59,-0.73 -0.08,0.49 c -31.94,-3.71 -52.6,-16.9 -11.76,-10.17 40.88,6.75 128.41,21.66 96.52,9.67 l 0.11,0.01 c -39.82,-15.58 -92.06,-30.84 -143.33,-25.84 -33.14,3.22 -67.42,19.51 -96.89,35.07 -29.44,15.53 -57.36,35.95 -88.83,42.42" + inkscape:connector-curvature="0" /> + <path + id="path86" + style="fill:#f4f4f4;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 2326.23,2354.71 c 4.08,2.22 4.97,-7.19 8.29,-18.98 3.33,-11.89 19.84,-36.31 19.84,-36.31 2.35,-5.03 -6.67,-9.99 -13.52,-12.26 -6.76,-2.27 -6.76,1.09 -6.76,1.09 0,0 -12.23,17.5 -14.12,33.81 -1.92,16.13 2.23,30.52 6.27,32.65 z m 40.64,1.81 c 4.69,2.53 5.68,-8.19 9.45,-21.63 3.78,-13.49 22.67,-41.28 22.67,-41.28 2.68,-5.7 -7.73,-11.3 -15.59,-13.92 -7.76,-2.56 -7.75,1.26 -7.75,1.26 0,0 -13.98,19.89 -16.1,38.44 -2.15,18.35 2.67,34.69 7.32,37.13 z m 40.42,22 c 6,3.2 7.23,-10.4 12,-27.41 4.77,-17.13 28.86,-52.33 28.86,-52.33 3.41,-7.23 -9.95,-14.37 -20.01,-17.65 -9.95,-3.25 -9.92,1.56 -9.92,1.56 0,0 -17.78,25.2 -20.43,48.68 -2.68,23.23 3.53,44 9.5,47.15 z m 57.45,25.09 c 6.09,3.05 6.97,-10.1 11.18,-26.55 4.31,-16.64 27.29,-50.85 27.29,-50.85 3.2,-6.99 -10.33,-13.79 -20.48,-16.91 -10.02,-3.04 -9.92,1.62 -9.92,1.62 0,0 -16.98,24.48 -18.95,47.17 -1.98,22.49 4.86,42.54 10.88,45.52 z m 44.26,31.78 c 6.07,3.98 8.64,-9.21 15.17,-25.27 6.58,-16.12 35.51,-47.78 35.51,-47.78 4.33,-6.72 -9.26,-15.58 -19.72,-20.26 -10.3,-4.57 -10.76,0.29 -10.76,0.29 0,0 -21.15,22.63 -26.1,45.45 -4.9,22.54 -0.14,43.66 5.9,47.57 z m 48.77,40.94 c 5.41,4.47 9.46,-7.11 17.8,-20.77 8.45,-13.93 40.63,-38.61 40.63,-38.61 5.1,-5.54 -6.94,-15.85 -16.57,-21.68 -9.51,-5.74 -10.49,-1.34 -10.49,-1.34 0,0 -23.53,17.77 -31.17,37.94 -7.55,19.98 -5.51,40.01 -0.2,44.46 z m 185.39,120.94 c 4.18,3.75 8.57,-4.11 16.71,-13.01 8.21,-9.05 37.03,-23.24 37.03,-23.24 4.66,-3.43 -4.6,-12.04 -12.11,-17.22 -7.5,-5.19 -8.69,-2.14 -8.69,-2.14 0,0 -21.23,10.05 -29.26,23.76 -7.99,13.54 -7.86,28.06 -3.68,31.85 z m -83.83,-151.23 c -9.45,-6.38 -10.83,-2.22 -10.83,-2.22 0,0 -25.47,15.59 -34.69,34.81 -9.02,18.94 -8.25,38.54 -3.03,43.27 5.27,4.75 10.24,-6.17 19.76,-18.9 9.66,-12.94 44.48,-34.59 44.48,-34.59 5.58,-5.02 -6.2,-15.95 -15.69,-22.37 z m 15.48,114.75 c 5.12,4.8 10.6,-5.38 20.83,-16.95 10.35,-11.84 46.67,-30.68 46.67,-30.68 5.91,-4.46 -5.65,-15.61 -15.02,-22.3 -9.37,-6.71 -10.94,-2.75 -10.94,-2.75 0,0 -26.56,13.47 -36.67,31.33 -10.02,17.62 -10.04,36.54 -4.87,41.35 z m 101.12,357.36 c 0,0 5.89,-0.99 3.23,-9.33 -2.65,-8.32 -3.16,-34.13 4.89,-45.23 8.1,-11.15 -5.54,-6.92 -16.19,4.34 -10.67,11.21 -18.21,31.85 -15.91,45.89 0,0 -0.61,5.9 23.98,4.33 z m -37.63,-68.3 c 12.91,-14.43 -6.86,-10.58 -23.41,3.64 -16.55,14.19 -30.1,41.88 -28.85,61.86 0,0 -1.69,8.18 33.05,9.59 0,0 8.44,-0.55 5.83,-12.61 -2.48,-12.03 0.45,-48.16 13.38,-62.48 z m -71.11,-9.21 c 12.65,-18.42 -9.32,-11.03 -26.12,7.75 -16.81,18.79 -28.16,52.61 -23.84,75.41 0,0 -0.74,9.57 39.08,5.91 0,0 9.54,-1.9 4.91,-15.26 -4.74,-13.49 -6.65,-55.37 5.97,-73.81 z m -108.01,18.58 c -16.99,21.48 -27.18,59.39 -21,84.23 0,0 -0.14,10.67 42.87,4.7 0,0 10.17,-2.58 4.24,-17.17 -5.94,-14.56 -10.76,-60.64 1.73,-81.59 12.48,-20.92 -10.85,-11.75 -27.84,9.83 z m -83.19,11.35 c -17.28,21.7 -27.77,60.21 -21.65,85.66 0,0 -0.23,10.79 43.27,5.3 0,0 10.31,-2.5 4.38,-17.43 -5.91,-14.92 -10.54,-62 2.15,-83.15 12.76,-21.18 -10.88,-12.13 -28.15,9.62 z m -80.49,-21.24 c -20.89,28.13 -34.89,78.07 -29.36,111.18 0,0 -0.81,14.15 48.98,7.34 0,0 11.95,-3.12 6.04,-22.7 -5.94,-19.46 -8.65,-80.67 7.02,-108.11 15.66,-27.4 -11.77,-15.8 -32.68,12.29 z m -80.64,17.63 c -20.8,24.29 -34.51,68.64 -28.73,98.66 0,0 -0.78,12.7 49.2,8.87 0,0 11.96,-2.3 5.84,-20.09 -6.04,-17.77 -9.17,-73 6.38,-96.98 15.61,-23.92 -11.86,-14.78 -32.69,9.54 z m -78.28,28.17 c -15.16,21.15 -23.52,58.34 -17.11,82.67 0,0 0.15,10.43 39.86,4.32 0,0 9.42,-2.59 3.47,-16.79 -5.9,-14.27 -11.67,-59.43 -0.66,-80.02 11.02,-20.57 -10.39,-11.43 -25.56,9.82 z m -61.99,15.5 c -13.96,16.84 -22.79,46.55 -18.34,66.13 0,0 -0.35,8.36 33.98,3.94 0,0 8.25,-1.95 3.73,-13.46 -4.34,-11.46 -7.34,-47.75 3.05,-64.16 10.34,-16.4 -8.43,-9.27 -22.42,7.55 z m -72.12,-11.61 c -16.8,18.79 -28.15,52.6 -23.84,75.42 0,0 -0.74,9.55 39.08,5.91 0,0 9.55,-1.9 4.91,-15.28 -4.74,-13.48 -6.65,-55.36 5.98,-73.82 12.64,-18.41 -9.33,-11.03 -26.13,7.77 z m -52.45,0.72 c -16.56,14.2 -30.11,41.9 -28.86,61.87 0,0 -1.69,8.18 33.03,9.58 0,0 8.46,-0.53 5.86,-12.6 -2.5,-12.04 0.43,-48.17 13.36,-62.48 12.91,-14.42 -6.86,-10.6 -23.39,3.63" + inkscape:connector-curvature="0" /> + <path + id="path88" + style="fill:#f4f4f4;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 2806.87,3351.66 c 12.87,7 28.05,52.37 59.22,33.53 23.17,-14 34.21,-17.55 34.21,-17.55 0,0 -16.18,-4.55 -23.54,-16.71 -7.37,-12.16 -16.19,-21.52 -41.94,-17.7 -27.04,4.02 -31.99,-0.22 -45.6,-2.44 0,0 4.76,13.86 17.65,20.87" + inkscape:connector-curvature="0" /> + <path + id="path90" + style="fill:#403c3d;fill-opacity:1;fill-rule:evenodd;stroke:none" + d="m 2731.38,3432.29 c -2.89,5.4 -6.88,10.28 -12.13,14.42 2.37,-6.9 3.73,-13.96 4.23,-21.22 -3.14,-2.68 -6.23,-5.31 -9.09,-7.73 -2.63,-5.05 -10.69,-11.72 -20.99,-16.09 8.17,24.85 9.15,51.24 9.72,70.8 16.89,-8.8 28.17,-20.87 34.68,-34.6 -2.09,-1.82 -4.23,-3.69 -6.42,-5.58 z m -55.32,-35.43 c -6.99,-1.61 -11.49,-3.22 -17.12,-5.22 -0.96,-0.25 -1.96,-0.53 -2.93,-0.88 l -5.24,-1.92 -0.53,-0.22 -0.83,-0.27 0.06,-0.02 c -2.29,-1 -3.88,-1.76 -3.88,-1.76 -7.42,-3.8 -15.21,-6.06 -22.62,-7.34 -45.89,9.86 -59.74,62.26 -59.74,62.26 42.87,18.87 67.1,73.24 51.84,103.15 0.43,1.12 1.42,0.94 2.43,0.7 19.43,-8.58 39.87,-22.99 49.81,-38.59 18.89,-29.6 24.49,-74.11 8.75,-109.89 z m -125.66,63.63 c -14.35,4.48 -16.31,16.03 -23.22,23.97 28.26,41.24 27.7,71.63 24.53,103.17 -0.63,1.96 0.68,1.9 2.29,3.34 55.47,-11.28 59.88,-65.49 36.07,-106.35 -11.3,-19.34 -22.34,-29.53 -39.67,-24.13 z m -46.16,50.29 c -23.48,6.74 -30.28,33.38 -39.75,44.39 25.59,27.33 24.12,51.34 23.12,73.26 -0.25,5.04 -1.16,10.64 0.87,15.61 4.78,-0.4 13.16,-1.62 20.01,-5.26 31.15,-16.48 44.04,-75.44 25.93,-110.1 -7.77,-14.86 -15.64,-22.08 -30.18,-17.9 z m -49.53,55.34 c -21.15,10.63 -33.04,24.8 -38.55,40.89 -3.24,9.42 -5.73,10.56 -3.54,22.26 4.71,25.26 6.29,53.13 -0.01,73.19 l 1.52,0.74 c 61.04,-21.54 96.06,-62.27 40.58,-137.08 z m -400.52,-594.93 c 0,0 8.46,-0.53 5.86,-12.6 -2.5,-12.04 0.43,-48.17 13.36,-62.48 12.91,-14.42 -6.86,-10.6 -23.39,3.63 -16.56,14.2 -30.11,41.9 -28.86,61.87 0,0 -1.69,8.18 33.03,9.58 z m 24.44,3.25 c 0,0 -0.74,9.55 39.08,5.91 0,0 9.55,-1.9 4.91,-15.28 -4.74,-13.48 -6.65,-55.36 5.98,-73.82 12.64,-18.41 -9.33,-11.03 -26.13,7.77 -16.8,18.79 -28.15,52.6 -23.84,75.42 z m 77.62,2.32 c 0,0 -0.35,8.36 33.98,3.94 0,0 8.25,-1.95 3.73,-13.46 -4.34,-11.46 -7.34,-47.75 3.05,-64.16 10.34,-16.4 -8.43,-9.27 -22.42,7.55 -13.96,16.84 -22.79,46.55 -18.34,66.13 z m 63.22,1.04 c 0,0 0.15,10.43 39.86,4.32 0,0 9.42,-2.59 3.47,-16.79 -5.9,-14.27 -11.67,-59.43 -0.66,-80.02 11.02,-20.57 -10.39,-11.43 -25.56,9.82 -15.16,21.15 -23.52,58.34 -17.11,82.67 z m 66.66,-12.18 c 0,0 -0.78,12.7 49.2,8.87 0,0 11.96,-2.3 5.84,-20.09 -6.04,-17.77 -9.17,-73 6.38,-96.98 15.61,-23.92 -11.86,-14.78 -32.69,9.54 -20.8,24.29 -34.51,68.64 -28.73,98.66 z m 80.01,-5.11 c 0,0 -0.81,14.15 48.98,7.34 0,0 11.95,-3.12 6.04,-22.7 -5.94,-19.46 -8.65,-80.67 7.02,-108.11 15.66,-27.4 -11.77,-15.8 -32.68,12.29 -20.89,28.13 -34.89,78.07 -29.36,111.18 z m 88.2,-4.28 c 0,0 -0.23,10.79 43.27,5.3 0,0 10.31,-2.5 4.38,-17.43 -5.91,-14.92 -10.54,-62 2.15,-83.15 12.76,-21.18 -10.88,-12.13 -28.15,9.62 -17.28,21.7 -27.77,60.21 -21.65,85.66 z m 83.84,-12.78 c 0,0 -0.14,10.67 42.87,4.7 0,0 10.17,-2.58 4.24,-17.17 -5.94,-14.56 -10.76,-60.64 1.73,-81.59 12.48,-20.92 -10.85,-11.75 -27.84,9.83 -16.99,21.48 -27.18,59.39 -21,84.23 z m 79.05,-19.65 c 0,0 -0.74,9.57 39.08,5.91 0,0 9.54,-1.9 4.91,-15.26 -4.74,-13.49 -6.65,-55.37 5.97,-73.81 12.65,-18.42 -9.32,-11.03 -26.12,7.75 -16.81,18.79 -28.16,52.61 -23.84,75.41 z m 101.86,1.14 c 0,0 8.44,-0.55 5.83,-12.61 -2.48,-12.03 0.45,-48.16 13.38,-62.48 12.91,-14.43 -6.86,-10.58 -23.41,3.64 -16.55,14.19 -30.1,41.88 -28.85,61.86 0,0 -1.69,8.18 33.05,9.59 z m 98.58,-102.42 c 10.06,7.04 23.37,24.16 40.39,12.92 -7.25,-15.06 -26.47,-24.97 -42.4,-33.22 -27.89,-14.43 -52.63,-27.06 -84.78,-35.07 54.18,8.3 92.69,30.91 133.23,51.69 4.96,-10.44 10.54,-31.37 14.21,-49.1 l 0.51,0.06 c 0,0 3.69,-9.93 2.6,-19.63 0.14,-4.63 -0.79,-8.61 -4.09,-10.8 -1.11,-1.42 -2.45,-2.74 -4.12,-3.84 -14.8,-9.71 -58.71,-31.2 -93.93,-33.75 -1.16,-0.06 -2.28,-0.21 -3.43,-0.33 l -2.59,-0.73 -0.08,0.49 c -31.94,-3.71 -52.6,-16.9 -11.76,-10.17 40.88,6.75 128.41,21.66 96.52,9.67 l 0.11,0.01 c -39.82,-15.58 -92.06,-30.84 -143.33,-25.84 -33.14,3.22 -67.42,19.51 -96.89,35.07 -29.44,15.53 -57.36,35.95 -88.83,42.42 -23.94,4.97 -43.72,-1.93 -64.59,-5.52 -13.88,-2.35 -33.24,-7.99 -41.67,0.71 -3.55,4.21 -0.72,8.52 -0.72,8.52 7.97,19.73 31.16,31.35 50.46,36.91 60.55,17.46 119.21,-14.63 177.64,-18.45 75.3,-4.94 129.29,21.21 167.54,47.98 z m -41.74,95.63 c 0,0 5.89,-0.99 3.23,-9.33 -2.65,-8.32 -3.16,-34.13 4.89,-45.23 8.1,-11.15 -5.54,-6.92 -16.19,4.34 -10.67,11.21 -18.21,31.85 -15.91,45.89 0,0 -0.61,5.9 23.98,4.33 z m 8.86,-374.35 c -7.5,-5.19 -8.69,-2.14 -8.69,-2.14 0,0 -21.23,10.05 -29.26,23.76 -7.99,13.54 -7.86,28.06 -3.68,31.85 4.18,3.75 8.57,-4.11 16.71,-13.01 8.21,-9.05 37.03,-23.24 37.03,-23.24 4.66,-3.43 -4.6,-12.04 -12.11,-17.22 z m -57.5,-52.94 c -9.37,-6.71 -10.94,-2.75 -10.94,-2.75 0,0 -26.56,13.47 -36.67,31.33 -10.02,17.62 -10.04,36.54 -4.87,41.35 5.12,4.8 10.6,-5.38 20.83,-16.95 10.35,-11.84 46.67,-30.68 46.67,-30.68 5.91,-4.46 -5.65,-15.61 -15.02,-22.3 z m -67.96,-44.82 c -9.45,-6.38 -10.83,-2.22 -10.83,-2.22 0,0 -25.47,15.59 -34.69,34.81 -9.02,18.94 -8.25,38.54 -3.03,43.27 5.27,4.75 10.24,-6.17 19.76,-18.9 9.66,-12.94 44.48,-34.59 44.48,-34.59 5.58,-5.02 -6.2,-15.95 -15.69,-22.37 z m -59.7,-50.77 c -9.51,-5.74 -10.49,-1.34 -10.49,-1.34 0,0 -23.53,17.77 -31.17,37.94 -7.55,19.98 -5.51,40.01 -0.2,44.46 5.41,4.47 9.46,-7.11 17.8,-20.77 8.45,-13.93 40.63,-38.61 40.63,-38.61 5.1,-5.54 -6.94,-15.85 -16.57,-21.68 z m -59.67,-53.19 c -10.3,-4.57 -10.76,0.29 -10.76,0.29 0,0 -21.15,22.63 -26.1,45.45 -4.9,22.54 -0.14,43.66 5.9,47.57 6.07,3.98 8.64,-9.21 15.17,-25.27 6.58,-16.12 35.51,-47.78 35.51,-47.78 4.33,-6.72 -9.26,-15.58 -19.72,-20.26 z m -57.23,-32.78 c -10.02,-3.04 -9.92,1.62 -9.92,1.62 0,0 -16.98,24.48 -18.95,47.17 -1.98,22.49 4.86,42.54 10.88,45.52 6.09,3.05 6.97,-10.1 11.18,-26.55 4.31,-16.64 27.29,-50.85 27.29,-50.85 3.2,-6.99 -10.33,-13.79 -20.48,-16.91 z m -54.59,-28.17 c -9.95,-3.25 -9.92,1.56 -9.92,1.56 0,0 -17.78,25.2 -20.43,48.68 -2.68,23.23 3.53,44 9.5,47.15 6,3.2 7.23,-10.4 12,-27.41 4.77,-17.13 28.86,-52.33 28.86,-52.33 3.41,-7.23 -9.95,-14.37 -20.01,-17.65 z m -44.74,-1.44 c -7.76,-2.56 -7.75,1.26 -7.75,1.26 0,0 -13.98,19.89 -16.1,38.44 -2.15,18.35 2.67,34.69 7.32,37.13 4.69,2.53 5.68,-8.19 9.45,-21.63 3.78,-13.49 22.67,-41.28 22.67,-41.28 2.68,-5.7 -7.73,-11.3 -15.59,-13.92 z m -42.56,7.47 c -6.76,-2.27 -6.76,1.09 -6.76,1.09 0,0 -12.23,17.5 -14.12,33.81 -1.92,16.13 2.23,30.52 6.27,32.65 4.08,2.22 4.97,-7.19 8.29,-18.98 3.33,-11.89 19.84,-36.31 19.84,-36.31 2.35,-5.03 -6.67,-9.99 -13.52,-12.26 z m 50.37,1345.42 c -8.77,5.76 -15.6,12.68 -20.53,20.31 -3.9,6.06 -9.38,15.98 -8.2,24.13 0.81,5.61 5.25,13.13 7.51,19.41 6.49,18.02 9.03,34.28 3.94,47.21 37.11,-16.36 42.93,-57.47 17.28,-111.06 z m -71.06,107.31 c 4.27,4.64 7.57,6.94 10.26,11.67 4.42,7.78 0.01,11.73 2.36,16.45 39.87,-5.15 43.3,-33.2 26.09,-72.98 -26.57,9 -33.65,26.79 -38.71,44.86 z m -16.21,42.78 c 13.88,-3.86 29.39,-11.76 14.6,-35.37 -9.08,9.72 -12.16,22.55 -14.6,35.37 z m 1173.75,-787.01 c -11.13,19.41 -23.35,38.42 -27.12,54.62 8.95,-5.92 16.34,-12.8 22.76,-20.33 -1.34,9.41 -2.92,18.74 -4.57,28.06 -9.71,7.73 -20.63,14.52 -33.49,19.89 -20.69,92.5 -65.21,150.91 -156.67,184.3 -30.66,11.19 -59.7,24.15 -89.94,37.34 1.75,98.46 -112.47,96.39 -183.64,147.07 -30.95,22.1 -66.05,62.87 -101.46,70.41 -68.48,14.6 -110.62,-33.23 -151.06,-66.63 -9.93,10.85 -24.8,21.45 -46.38,31.1 0,0 -38.6,55.15 -56.6,65.52 -1.19,0.69 -2,1.32 -2.52,1.93 -9.94,4.68 -22.41,7.31 -31.21,13.14 -8.7,5.73 -10.35,19.28 -19.05,25.84 -4.26,3.2 -10.87,6.67 -16.65,8.99 -6.98,2.81 -18.94,2.29 -25.97,6.14 -8.65,4.75 -9.34,20.2 -15.61,27.87 -6.91,8.47 -15.8,15.4 -30.18,20.24 -5.83,1.94 -14.63,2.07 -19.9,4.9 -5.88,3.18 -7.32,13.15 -11.87,19.26 -10.58,14.26 -28.45,25.31 -48.83,32.52 -4.39,1.55 -10.28,1.55 -13.49,3.71 -4.03,2.7 -4.19,9.73 -6.94,14.64 -4.25,7.58 -9.92,15.28 -19.06,19.99 -3.91,2.04 -9.92,1.85 -13.75,4.03 -4.5,2.58 -5.71,8.88 -9.5,12.57 -7.66,7.48 -16.76,13.16 -35.67,8.31 -6.7,8.69 -15.55,15.67 -29.96,18.64 l -0.44,-0.77 c 1.82,-17.24 4.29,-34.66 17.38,-46.59 -3.32,-19.81 5.98,-34.36 21.01,-46.08 3.62,-2.83 9.62,-4.32 12.72,-8.05 4.11,-4.96 2.41,-15.04 4.2,-21.26 5.56,-19.2 17.41,-36.84 37.11,-51.25 2.88,-2.12 8.38,-3.14 10.88,-5.97 5.5,-6.25 3.9,-16.74 8.05,-23.95 4.36,-7.59 11.79,-16.75 20.96,-23.81 5.03,-3.88 12.53,-5.95 17.23,-10.57 3.81,-3.73 4.86,-10.44 7.58,-16.02 7.35,-15.1 26.81,-32.59 36.15,-48.37 1.23,-2.08 2.2,-4.3 3.01,-6.54 3.89,-8.03 13.3,-25.05 26.13,-32.91 0,0 1.34,-23.63 24.86,-50.57 -47.65,2.58 -93.62,29.56 -128.96,2.76 -10.2,11.15 -42.93,19.9 -56.94,4.25 -10.33,18.01 -65.18,29.5 -75.85,1.04 -77.98,35.05 -172.08,-14.92 -202.33,-65.08 -5.61,-2.68 -10.86,-5.51 -15.82,-8.41 -21.34,6.43 -50.25,8.71 -81.54,-13.89 -30.84,-22.29 -63.08,-122.26 -57.53,-191.48 0.6,-7.41 14.64,-24.92 19.68,-59.99 5.02,-34.8 8.83,-46.22 8.83,-46.22 -6.86,-11.99 -15.04,-17.35 -16.15,-31.38 -2.42,-30.36 23.49,-46.85 47.76,-62.4 8.65,-4.04 18.42,-6.93 29.04,-7.32 6.41,-10.15 31.11,-44.55 71.94,-44.55 1.01,0 2.04,0.07 3.04,0.13 15.59,-13.13 31.86,-16.66 20.63,-0.47 -0.93,1.33 -1.72,2.83 -2.46,4.38 7.51,-4.47 16.59,-8.82 24.69,-9.61 0.16,-0.02 3.98,-0.98 7.14,-0.98 1.14,0 4.48,0 5.66,2.57 1.18,2.6 -1.2,5.38 -1.99,6.28 -3.27,3.77 -6.95,13.59 -9.72,22.76 3,-5.5 6.25,-10.71 9.9,-15.21 19.43,-23.89 46.63,-35.2 31.44,-11.41 -4.87,7.61 -7.65,17.99 -9.01,29.01 7.53,-9.28 20.49,-20.09 41.56,-24.4 17.42,-16.59 39.07,-23.38 26.1,-5.74 -3.32,4.51 -5.48,10.33 -6.77,16.68 10.5,-12.62 28.26,-28.83 50.03,-28.83 l 1.59,0.03 0.44,0.02 c 14.94,-10.11 27.45,-11.8 16.27,2.79 -4.84,6.36 -7.48,15.16 -8.67,24.45 9.87,-20.16 28.91,-47.92 61.69,-56.83 0.24,-0.1 5.89,-1.65 10.55,-1.65 3.63,0 5.93,0.93 7.02,2.92 1.59,2.87 -0.5,6.19 -1.19,7.28 -2.11,3.41 -4.08,7.64 -5.78,11.82 14.71,-8.64 25.59,-9.38 14.13,4.7 -5.91,7.26 -9.06,17.5 -10.36,28.08 8,-21.41 24.29,-51.48 56.48,-71.19 0.23,-0.11 6.03,-2.83 12.45,-2.83 3.1,0 5.87,0.64 8.26,1.84 2.05,1.07 3.39,2.8 3.94,5 1.68,6.71 -4.68,16.25 -5.25,17.03 -2.65,5.22 -7.5,16.25 -9.89,25.91 l 0.24,-0.2 c 23.26,-18.8 53.46,-27.51 34.83,-8.75 -11.31,11.4 -15.67,30.94 -16.06,46.89 6.8,-19.1 23.32,-52.18 57.54,-58.74 0.23,-0.02 5.53,-1.04 9.45,-1.04 2.17,0 4.79,0.24 5.71,2.34 0.39,0.88 0.55,2.32 -1,4.06 -2.56,2.89 -6.19,11.58 -8.12,18.61 1.62,-1.24 3.2,-2.56 4.89,-3.65 16.6,-10.91 36.19,-16.61 38.75,-13.08 5.3,-4.36 13.61,-10.09 24.89,-15.06 1.06,-2.42 2.1,-4.84 2.95,-7.27 0,0 5.26,-14.67 7.29,-22.61 -26.85,6.97 -72.8,20.47 -106.99,12.91 -27.01,-5.96 -97.4,-44.95 -80.38,-72.36 1.14,-1.83 2.59,-3.1 4.26,-4.14 l 1.05,-0.4 c 6.73,-3.59 18.58,-5.69 31.05,-5.59 l 4.53,0.13 c 7.25,0.4 26.23,3.13 32.67,5.22 18.82,6.13 35.64,2.45 35.64,2.45 l -0.25,-0.17 c 13.38,-1.57 27.5,-6.27 42.08,-13.73 15.66,-7.98 36.97,-21.16 42.4,-27.67 9.78,-11.78 -5.21,-39.4 -10.8,-56.41 -0.27,-1 -0.61,-2.05 -0.94,-3.09 l -0.39,-1.41 -0.06,0.04 c -0.79,-2.39 -1.76,-4.89 -2.81,-7.45 -0.95,1.2 -1.8,2.36 -2.43,3.44 -5.35,9.03 -8.24,16.93 -11,13.3 -2.77,-3.64 -2.9,-18.05 2.36,-31.66 0.54,-1.4 1.2,-2.73 1.91,-4.06 -6.06,-11.12 -13.42,-22.77 -21.18,-33.92 -6.03,5.72 -12.13,12.17 -15.09,17.31 -6.89,11.83 -10.56,22.21 -14.08,17.43 -3.56,-4.74 -3.65,-23.89 3.1,-41.86 2.2,-5.87 5.57,-11.18 9.09,-15.82 -5.35,-6.76 -10.57,-13.01 -15.34,-18.31 -2.14,-2.37 -4.66,-5.02 -7.34,-7.8 -7.36,7.62 -16.68,18.03 -20.45,25.58 -6.56,13.19 -9.94,24.43 -13.67,19.64 -3.72,-4.82 -4.45,-25.07 1.76,-44.65 2.55,-8.02 6.95,-15.34 11.3,-21.3 -7.98,-7.6 -16.65,-15.65 -25.55,-23.79 -5.4,7.18 -10.72,15.06 -13.16,21.1 -5.85,14.26 -8.62,26.35 -12.58,21.72 -3.89,-4.67 -5.6,-25.71 -0.35,-46.68 1.29,-5.17 3.26,-10.14 5.51,-14.74 -8.82,-7.89 -17.35,-15.45 -25.18,-22.31 -2.3,5.01 -4.22,9.75 -5.28,13.66 -4.6,16.98 -6.32,31 -10.84,26.71 -4.53,-4.2 -8.38,-26.82 -5.07,-50.86 0.31,-2.3 0.76,-4.59 1.27,-6.88 l -2.21,-1.9 c -5.11,-4.43 -10.68,-9.77 -16.44,-15.64 -1.43,4.52 -2.54,8.74 -3.13,12.25 -2.91,17.7 -3.36,31.85 -7.99,28.47 -4.62,-3.32 -10.15,-25.15 -9.04,-49.57 0.17,-3.84 0.64,-7.72 1.27,-11.52 -6.76,-7.62 -13.45,-15.51 -19.75,-23.18 -3.65,9.38 -6.95,19.1 -8.23,26.11 -3.34,18.53 -4.03,33.4 -8.76,29.75 -4.33,-3.29 -9.08,-23.12 -8.56,-46.19 -4.71,-3.45 -10.82,-19.53 -11.02,-37.76 -0.09,-6.2 1.01,-12.55 2.51,-18.29 -3.05,-4.31 -8.4,-7.29 -12.14,-12.57 -19.3,-27.05 -11.43,-59.27 30.28,-66.44 12.78,-39.46 45.17,-58.61 94.87,-59.05 6.68,-0.07 13.64,2.27 20.18,1.86 7.38,-0.5 13.46,-3.5 20.2,-3.71 36.22,-1.09 55.22,10.43 82.74,18.48 20.55,5.96 41.74,7.89 54.52,12.9 19.57,7.68 28.87,28.77 44.4,38.76 5.78,3.71 16.69,5.46 26.25,11.06 8.07,4.75 15.05,13.39 22.2,16.6 6.24,2.85 16.65,2.28 24.22,5.55 12.67,5.51 26.68,17.4 42.4,25.86 32.91,17.63 59.7,30.68 94.87,46.11 16.8,7.38 34.58,24.72 44.41,25.84 11.65,1.35 25.22,-8.4 34.31,-7.37 18.52,-22.06 24.98,-52.89 24.2,-92.52 1.1,-0.11 0.29,-9.83 0.29,-9.83 0,0 6.47,-85.09 -126.17,-131.03 -41.25,-14.28 -117.07,-51.81 -173.39,-80.72 218.91,6.81 416.91,96.33 564.01,238.52 -44.49,-11.55 -91.92,-41.79 -121.13,-44.56 -79.65,-7.55 -93.92,27.92 -30.29,33.77 24.03,2.21 59.49,4.92 87.8,18.38 49.4,23.51 100.3,46.22 127.3,60.84 4.41,5.31 8.63,10.78 12.92,16.2 -6.55,-0.08 -18.12,-1.77 -36.09,-5.44 -42.4,-8.65 -87.56,-37.47 -122.51,-43.97 -77.25,-14.32 -122.78,-0.55 -82.01,16.35 59.21,24.57 254.55,30.7 274.96,175.91 20.42,145.23 6.13,210.72 -4.09,247.53 -4.89,17.65 -15.85,43.7 -25.98,66.09 -2.85,15.8 -6.61,29.2 -9.88,38.76 -1.84,4.31 -3.67,9.85 -5.48,16.85 0,0 -2.55,17.91 5.61,35.28 7.23,15.43 32.05,48.86 33.73,81.2 0.03,1.96 0.11,3.92 0.08,5.88 -0.1,2.19 -0.29,4.35 -0.63,6.51 l -0.18,1.91 c -3.19,18.14 -13.58,30.77 -27.69,40.18 -9.09,6.04 -21.46,7.95 -28.26,16.59 5.42,17.09 22.71,23.17 29.91,37.14 4.5,12.39 0.83,25.42 -6.01,37.26 -13.17,14.79 -39.04,27.62 -62.26,36.33 -29.07,10.92 -58.62,19.54 -78.59,35.23 -21.11,11.25 -35.29,33.01 -22.34,61.76 3.37,7.48 4.87,15 4.77,22.16 l -0.09,2.09 c -0.03,1.46 -0.23,2.77 -0.41,4.12 -0.16,1.04 -0.33,2.07 -0.56,3.09 l -0.47,1.79 -0.56,2.02 c -5.32,17.95 -33.54,23.01 -43.6,23.9 -22.79,2.05 -37.52,8.24 -49.63,12.08 9.74,-5.08 19.54,-11.85 32.01,-16.88 7.9,-3.22 19.23,-5.15 28.78,-8.33 l 0.07,0.01 c 0,0 17.62,-3.57 22.46,-15.85 4.85,-12.27 2.04,-23.01 0,-29.9 l -0.74,-1.99 c -1.76,-5.39 -3.73,-10.57 -4.14,-15.93 -1.65,-21.14 11.3,-32.16 18.17,-49.84 12.39,-11.79 30.03,-24 54.5,-35.05 21.98,-9.96 48.29,-15.98 64.59,-25.84 9.45,-5.71 26.64,-19.51 26.25,-29.55 -0.3,-7.68 -14.47,-18.17 -20.19,-25.82 -4.07,-5.45 -6.11,-12.68 -9.75,-17.83 -3.19,-5.02 -7.26,-8.26 -11.96,-8.26 -6.12,0 -20.08,-2.37 -10.89,-3.06 9.18,-0.68 33.01,-11.93 33.01,-11.93 l -0.02,-0.03 c 32.2,-13.33 53.84,-38.06 42,-73.32 -9.91,-29.53 -32.71,-54.25 -34.31,-86.74 -1.7,-34.56 14.12,-63.03 16.14,-90.44 3.42,-46.12 -3.19,-77.9 -12.1,-112.58 -4.18,-16.21 -6.4,-32.18 -10.09,-47.96 -15.72,-67.21 -56.27,-96.32 -111.03,-127.36 -23.74,-13.46 -49.22,-38.28 -74.69,-46.13 -4.09,-1.26 -10.41,0.66 -14.14,0 -23.46,-4.25 -44.57,-15.89 -68.63,-20.3 -22.66,-4.17 -46.42,-2.18 -73.31,-8.99 -13.65,-4.88 -31.13,-12.18 -41.34,-16.53 -15.79,-7.19 -31.26,-14.69 -46.7,-22.26 L 2782.7,2380 c 0,0 -28.58,-16.57 3.07,-4.31 25.46,9.88 112.97,53.51 76.31,27.89 -25.28,-12.72 -38.99,-30.79 -61.35,-46.2 0,0 -75.23,-37.37 -86.52,-47.81 -12.4,-11.48 -27.56,-5.13 -37.52,-16.37 -11.69,-13.22 -26.02,-21.13 -36.24,-24.55 -10.2,-3.43 -21.77,-25.92 -37.43,-37.5 -15.66,-11.59 -49.68,-8.18 -79.64,-21.14 -29.94,-12.95 -60.97,-13.12 -60.97,-13.12 -2.98,-0.28 -5.85,-0.46 -8.61,-0.54 -1.18,-0.02 -2.28,0 -3.41,0 -17.52,-0.1 -30.91,3.31 -41.17,7.09 -12.51,4.61 -17.1,2.3 -14.29,0.78 2.8,-1.54 10.95,-9.16 10.95,-9.16 -34.93,0.04 -44.47,23.3 -60.55,40.57 7.31,1.04 28.16,-4.06 17.57,2.08 -20.92,10.03 -61.21,10.74 -61.99,34.85 -0.61,18.9 8.07,14.77 8.07,14.77 14.65,-25.56 59.89,-28.07 92.87,-27.71 61.43,0.73 103.01,34.49 143.27,53.58 13.05,7.86 28.3,18.19 28.3,18.19 l -0.09,-0.31 c 12.72,8.3 25.56,16.35 36.43,26.37 18.96,17.45 31.78,40.22 48.46,59.56 1.94,3.28 6.01,7.3 10.85,11.33 4.7,4.43 9.76,8.54 15.38,12.15 l 0.22,-0.45 2.18,1.47 -0.39,0.82 c 40.51,23.02 78.28,48.49 110.13,79.41 l 0.22,0.36 c 4.56,4.16 28.54,30.96 40.13,43.98 13.65,18.24 23.49,41.29 29.19,64.5 2.06,8.4 0.75,18.61 4.03,27.7 5.08,14.1 17.87,26.65 26.25,40.58 8.68,14.51 14.42,30.31 20.18,46.15 25.25,69.52 54.45,124.86 109,162.4 26.29,18.11 58.21,29.77 76.7,57.24 -13.42,-13.68 -30.91,-23.63 -52.03,-30.26 -59.98,-19.78 -98.99,-70.5 -118.6,-102.69 -15.06,-27.12 -26.7,-57.34 -35.25,-90.38 -2.85,40.73 -12.88,72.51 -26.25,103.36 -11.89,27.42 -22.4,56.41 -44.41,73.8 0,0 -0.85,-0.05 -4.26,2.02 -3.4,2.02 56.74,3.53 56.74,3.53 10.53,-0.38 21.23,-1.88 32.09,-3.3 2.03,0.22 4.53,0.11 7.59,-0.44 33.06,-5.84 67.43,6.49 88.83,22.1 11.11,8.1 30.43,14.59 46.46,18.98 0.27,0.7 1.83,1.48 5.83,2.26 0,0 24.5,7.65 50.53,14.82 17.63,4.84 30.48,8.54 39.39,12.95 5.17,3.29 10.09,7.25 13.9,11.99 7.63,9.42 7.77,20.74 14.14,36.9 6.33,16.1 16.01,28.63 16.14,42.45 0.46,45.3 -42.37,81.38 -88.81,57.23 45.72,11.8 78.15,-10.54 72.67,-53.55 -6.76,-53.12 -86.66,-92.5 -135.25,-112.57 -19.26,-7.94 -41.82,-17.98 -64.59,-20.29 -32.85,-3.33 -62.6,3.7 -90.84,1.86 -36.22,-2.4 -60.3,-21.58 -90.84,-24.01 -26.24,-2.1 -22.68,15.68 -45.45,17.71 -4.72,2.68 -19.04,0 -33.28,0.73 -39.38,-10.42 -64.7,7.62 -96.88,22.16 -23.21,10.51 -47.98,20.06 -72.68,22.15 -62,5.27 -99.7,-3.37 -150.38,2.01 -14.29,1.53 -43.95,3.33 -58.03,8.62 -43.84,16.43 -77.41,10.48 -125.04,11.15 -34.65,0.51 -66.8,2.33 -88.43,-1.46 -5.9,-1.07 -11.87,-2.6 -18.18,-3.7 -46.42,-8.13 -117.12,-12.34 -115.05,-53.52 3.46,2.37 2.5,8.77 8.07,9.23 -0.2,-7.58 -0.39,-15.14 2.01,-20.32 -26.99,0.7 -42.91,19.37 -42.38,40.6 0.9,36.12 58.9,51.22 98.91,60.92 40.1,9.7 81.75,21.05 111.03,25.84 -70.13,-10.33 -144.31,-16.97 -197.84,-42.47 -0.75,2.77 -1.24,5.45 -1.55,8.02 -4.24,19.49 -13.48,49.4 -19.98,78.46 -7.48,33.4 4.07,92.05 4.07,92.05 l 0.29,-0.34 c 4.83,29.8 10.98,43.09 16.94,60.9 12.69,38.01 53.68,49.94 82.38,53.55 -24.19,-17.13 -39.65,-38.36 -49.44,-66.79 -1.21,-10.09 -2.15,-19.55 -2.85,-27.9 21.73,67.74 82.33,93.09 82.33,93.09 l 19.4,20.96 -0.24,-0.95 c 33.45,32.42 77.66,57.54 133.84,59.13 28.31,0.79 62.52,-6.68 62.43,-34.49 -0.05,-15.73 -12.32,-29.09 -29.06,-36.46 4.92,-0.97 9.43,-0.29 13.51,1.4 l 17.4,19.5 c 5.74,14.26 5.24,32.26 -4.46,43.12 25.6,21.93 65.46,-2.67 62.56,-31.76 7.49,6.33 12.06,18.3 6.91,27.78 34.44,20.28 53.72,-28.88 88.39,-17.4 -3.27,9.84 -19.95,7.84 -25.04,16.05 45.31,6.5 85.06,-9.46 130.49,-5.85 2.85,-2.48 5.94,-4.94 9.21,-7.4 -84.98,-9.42 -15.93,-9.37 -15.93,-9.37 10.88,1 37.03,2.83 47.19,1.01 16.65,-3 27.39,1.06 42.09,6.82 0.36,-0.64 1.83,-0.59 2.72,1.08 7.62,3 16.41,6.34 27.56,9.36 4.32,0.61 8.66,1.72 12.88,3.09 l 1.82,0.38 0.25,0.29 c 5.83,2.03 11.31,4.46 16.01,6.75 42.83,20.61 78.34,69.18 114.92,94.66 60.61,42.19 118,-9.93 160.2,-43.76 l -0.27,-0.4 c 10.58,-7.23 24.53,-16.15 39.73,-25.48 39.71,-17.3 92.08,-32.44 120.05,-60.13 22.38,-22.15 29.99,-50.67 39.62,-80.27 8.98,-27.62 26.42,-52.03 36.22,-83.01 1.89,1.58 3.01,3.97 3.64,6.83 -0.27,2.65 -0.7,5.44 0.09,7.16 l 0.53,-0.48 c -0.11,5.41 -1.3,11.74 -3.15,18.42 -8.32,16.97 -22.86,36.29 -21.07,51.41 0,0 51.58,-26.7 110.11,-53.23 20.15,-9.14 38.61,-24.2 54.71,-41.42 l 4.7,0.11 c -0.56,-1.21 -0.5,-2.95 -0.33,-4.78 28.44,-31.94 48.61,-69.98 55.62,-91.97 1.45,-4.52 3.08,-9.09 4.84,-13.64 l 0.01,-0.06 c 2.21,-5.73 4.59,-11.4 7.04,-16.85 l 0.05,0.07 c 1.04,-1.85 1.69,-3.67 2.39,-5.51 8.44,-18.32 16.73,-33.17 19.05,-37.24 9.93,-12.38 17.35,-29.34 26.56,-40.46 -0.53,6.85 -0.88,13.75 -1.56,20.55 z m -826.19,277.09 c 87.27,-80.81 98.01,-168.37 104.71,-193.66 6.6,-25.12 9.82,-16.62 9.82,-16.62 1.21,85.58 -66.47,193.97 -127.36,229.62 -60,35.19 -215.11,29.07 -251.54,31.9 -35.85,2.74 -53.77,-42.93 -53.77,-42.93 -0.46,-9.67 7.43,1.42 7.43,1.42 23.42,42.28 44.01,34.55 44.01,34.55 74.22,-21.72 87.45,-130.94 90.54,-156.26 3.07,-25.11 9.41,-8.76 9.41,-8.76 7.21,62.85 -38.53,139.45 -48.15,149.3 -9.61,9.76 -0.43,15.3 30.23,6.7 92.32,-25.96 100.96,-174.08 106.52,-192.04 5.57,-17.89 9.03,-0.68 9.03,-0.68 -12.48,136.19 -64.16,169.77 -77.93,188.5 -5.72,7.77 40.96,8.94 61.5,-4.45 116.01,-75.08 102.99,-218.89 113.43,-198.36 10.58,20.95 -29.19,151.09 -85.08,195.37 -11.18,8.84 38.79,-6.6 57.2,-23.6 z m 214.59,112.44 c 23.17,-14 34.21,-17.55 34.21,-17.55 0,0 -16.18,-4.55 -23.54,-16.71 -7.37,-12.16 -16.19,-21.52 -41.94,-17.7 -27.04,4.02 -31.99,-0.22 -45.6,-2.44 0,0 4.76,13.86 17.65,20.87 12.87,7 28.05,52.37 59.22,33.53 z m 2.05,33.45 c 8.47,2.44 35.81,-1.73 18.17,1.85 -17.72,4.96 -28.72,-0.09 -40.36,-7.4 -26.76,-16.75 -48.56,-55.22 -76.72,-71.95 -12.22,-7.31 -24.17,-5.63 -34.33,-18.48 6.58,-13.17 13.26,-5.42 22.22,-7.38 19.86,-4.32 38.16,-12.04 56.52,-23.96 20.33,-13.25 42.35,-37.56 72.67,-35.09 9.67,0.77 27.24,10.15 34.32,14.77 8.84,5.77 29.83,19.54 24.22,40.6 -2.49,9.33 -13.59,10.83 -20.2,18.46 -9.15,10.61 -8.8,21.03 -16.13,33.21 15.98,-3.22 25.13,-12.71 42.37,-14.77 -27.15,24.39 -58.44,45 -96.88,59.07 2.88,1.64 4.72,8.35 14.13,11.07 z M 2223.96,3307.9 c -2.19,16.18 -17.4,30.55 -44.41,22.16 0,0 -34.92,-7.98 -44.43,-69.84 0,0 -1.66,-31.21 8.1,-24.31 -8.22,37.5 27.37,54.49 35.53,58.3 11.48,5.13 20.97,3.4 21.69,-6.37 0.79,-10.54 -11.24,-15.2 -14.84,-26.07 12.77,7.22 41.33,24.13 38.36,46.13 z m 668.31,-133.09 c 27.34,4.37 78.51,23.3 99.04,36.15 22.28,13.93 38.84,67.36 16.13,96.94 -10.17,13.24 -34.52,30.03 -40.38,31.38 -6.68,-3.88 4.2,-7.41 6.05,-9.22 7.7,-7.6 24.88,-21.51 30.28,-33.22 11.03,-23.95 7.65,-60.6 -12.12,-73.85 -13.32,-8.9 -78.47,-33.25 -106.97,-33.21 -80.17,0.11 -88.31,88.29 -137.27,118.12 0.87,-8.43 7.02,-26.13 14.02,-35.28 27.53,-36 53.83,-110.21 131.22,-97.81" + inkscape:connector-curvature="0" /> + <path + id="path92" + style="fill:#5d802f;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 2433.33,2370.61 c 0.44,-4.18 2.32,-8.2 3.64,-12.03 0.27,19.49 1.09,47.95 21.49,59.93 3.2,1.86 6.38,2.84 9.52,2.84 8.01,0 13.14,-5.65 16.36,-12.02 -0.35,15.85 1.93,31.07 12.45,38.27 5.18,3.59 10,5.32 14.61,5.32 13.21,0 18.54,-13.65 21.4,-20.98 0.47,-1.15 0.86,-2.18 1.27,-3.11 5.43,-12.42 19.9,-37.76 26.67,-43.33 2.68,2.13 6.03,4.9 7.08,6.42 -0.37,0.5 -0.99,1.17 -1.78,2.1 -42.06,48.68 -29.05,81.43 -10.7,100.35 1.31,1.51 4.77,4.85 9.87,4.85 6.26,0 11.15,-4.56 14.53,-13.49 5.33,-14.1 26.51,-41.61 30.55,-44.43 l 0.99,-0.49 c 2.07,-1.05 4.66,-2.36 6.14,-2.36 l 1.2,0.74 c 1.32,1.41 2.02,2.65 2.41,3.58 -8.82,10.66 -39.16,50.51 -27.93,82.65 1.66,4.78 6.74,19.34 18.65,19.34 10.99,0 18.37,-13.19 20.89,-18.44 6.44,-8.21 23.48,-28.51 32.54,-33.85 2.24,-1.27 4.78,-1.95 7.56,-1.95 4.48,0 8.36,1.68 10.27,2.81 -10.65,8.04 -31.32,39.91 -24.68,67.59 2.94,12.3 13.27,17.81 22.2,17.81 4.37,0 8.25,-1.27 10.93,-3.61 1.01,-0.87 3.11,-3.14 5.94,-6.27 7.87,-8.61 26.27,-28.86 37.27,-32.77 2.14,-0.75 4.09,-1.15 5.87,-1.15 4.13,0 6.86,2.09 9.5,4.82 -0.73,0.56 -1.66,1.22 -2.86,2.02 -4.97,3.36 -29.76,28.79 -19.55,60.6 3.72,11.63 12.01,12.88 15.4,12.88 7.95,0 15.18,-5.84 18.19,-9.31 5.35,-6.09 26.76,-24.07 32.16,-24.45 4.01,0.71 2.25,2.71 8.31,10.37 5,11.5 -10.48,12.08 -25.14,16.79 -8.01,2.57 -25.83,21.87 -41.9,14.16 -16.06,-7.69 -172.23,-113.68 -103.34,-29.72 28.99,35.32 66.68,50.35 63.89,59.45 -4.26,13.83 -48.2,14.69 -60.75,23.8 -6.53,4.73 -12.16,-5.39 -16.07,-14.7 -39.01,-152.94 -175.78,-233.82 -188.79,-242.48 0,0 -35.95,-22.04 -26.53,-21.68 9.43,0.34 27.02,3.64 30.27,-26.87" + inkscape:connector-curvature="0" /> + <path + id="path94" + style="fill:#5d802f;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 2747.21,2810.08 c -62.51,-19.99 -119.08,-11.38 -119.08,-11.38 -22.18,0.52 -23.98,27.54 -27.27,49.98 -3.36,23 22.07,-23.5 53.19,-30.48 29.63,-6.74 40.8,-4.57 37.8,18.39 -3.21,24.06 27.8,-3.65 46.52,-7.14 0,0 2.3,-1.48 12,-0.96 9.68,0.53 27.48,-6.91 -3.16,-18.41" + inkscape:connector-curvature="0" /> + </g> + </g> + </g> + </g> +</svg> diff -Nru nodejs-0.11.13/deps/uv/include/uv-aix.h nodejs-0.11.15/deps/uv/include/uv-aix.h --- nodejs-0.11.13/deps/uv/include/uv-aix.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv-aix.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,32 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_AIX_H +#define UV_AIX_H + +#define UV_PLATFORM_LOOP_FIELDS \ + int fs_fd; \ + +#define UV_PLATFORM_FS_EVENT_FIELDS \ + uv__io_t event_watcher; \ + char *dir_filename; \ + +#endif /* UV_AIX_H */ diff -Nru nodejs-0.11.13/deps/uv/include/uv-bsd.h nodejs-0.11.15/deps/uv/include/uv-bsd.h --- nodejs-0.11.13/deps/uv/include/uv-bsd.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv-bsd.h 2015-01-20 21:22:17.000000000 +0000 @@ -31,6 +31,4 @@ #define UV_HAVE_KQUEUE 1 -#define UV_PLATFORM_HAS_IP6_LINK_LOCAL_ADDRESS - #endif /* UV_BSD_H */ diff -Nru nodejs-0.11.13/deps/uv/include/uv-darwin.h nodejs-0.11.15/deps/uv/include/uv-darwin.h --- nodejs-0.11.13/deps/uv/include/uv-darwin.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv-darwin.h 2015-01-20 21:22:17.000000000 +0000 @@ -58,6 +58,4 @@ #define UV_HAVE_KQUEUE 1 -#define UV_PLATFORM_HAS_IP6_LINK_LOCAL_ADDRESS - #endif /* UV_DARWIN_H */ diff -Nru nodejs-0.11.13/deps/uv/include/uv-errno.h nodejs-0.11.15/deps/uv/include/uv-errno.h --- nodejs-0.11.13/deps/uv/include/uv-errno.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv-errno.h 2015-01-20 21:22:17.000000000 +0000 @@ -39,7 +39,6 @@ #define UV__EAI_OVERFLOW (-3009) #define UV__EAI_SERVICE (-3010) #define UV__EAI_SOCKTYPE (-3011) -#define UV__EAI_SYSTEM (-3012) /* TODO(bnoordhuis) Return system error. */ #define UV__EAI_BADHINTS (-3013) #define UV__EAI_PROTOCOL (-3014) @@ -58,12 +57,6 @@ # define UV__EACCES (-4092) #endif -#if defined(EADDRINFO) && !defined(_WIN32) -# define UV__EADDRINFO EADDRINFO -#else -# define UV__EADDRINFO (-4091) -#endif - #if defined(EADDRINUSE) && !defined(_WIN32) # define UV__EADDRINUSE (-EADDRINUSE) #else diff -Nru nodejs-0.11.13/deps/uv/include/uv.h nodejs-0.11.15/deps/uv/include/uv.h --- nodejs-0.11.13/deps/uv/include/uv.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv.h 2015-01-20 21:22:17.000000000 +0000 @@ -19,7 +19,7 @@ * IN THE SOFTWARE. */ -/* See https://github.com/joyent/libuv#documentation for documentation. */ +/* See https://github.com/libuv/libuv#documentation for documentation. */ #ifndef UV_H #define UV_H @@ -83,7 +83,6 @@ XX(EAI_PROTOCOL, "resolved protocol is unknown") \ XX(EAI_SERVICE, "service not available for socket type") \ XX(EAI_SOCKTYPE, "socket type not supported") \ - XX(EAI_SYSTEM, "system error") \ XX(EALREADY, "connection already in progress") \ XX(EBADF, "bad file descriptor") \ XX(EBUSY, "resource busy or locked") \ @@ -167,6 +166,7 @@ XX(FS, fs) \ XX(WORK, work) \ XX(GETADDRINFO, getaddrinfo) \ + XX(GETNAMEINFO, getnameinfo) \ typedef enum { #define XX(code, _) UV_ ## code = UV__ ## code, @@ -216,6 +216,7 @@ /* Request types. */ typedef struct uv_req_s uv_req_t; typedef struct uv_getaddrinfo_s uv_getaddrinfo_t; +typedef struct uv_getnameinfo_s uv_getnameinfo_t; typedef struct uv_shutdown_s uv_shutdown_t; typedef struct uv_write_s uv_write_t; typedef struct uv_connect_s uv_connect_t; @@ -226,7 +227,11 @@ /* None of the above. */ typedef struct uv_cpu_info_s uv_cpu_info_t; typedef struct uv_interface_address_s uv_interface_address_t; +typedef struct uv_dirent_s uv_dirent_t; +typedef enum { + UV_LOOP_BLOCK_SIGNAL +} uv_loop_option; typedef enum { UV_RUN_DEFAULT = 0, @@ -235,168 +240,44 @@ } uv_run_mode; -/* - * Returns the libuv version packed into a single integer. 8 bits are used for - * each component, with the patch number stored in the 8 least significant - * bits. E.g. for libuv 1.2.3 this would return 0x010203. - */ UV_EXTERN unsigned int uv_version(void); - -/* - * Returns the libuv version number as a string. For non-release versions - * "-pre" is appended, so the version number could be "1.2.3-pre". - */ UV_EXTERN const char* uv_version_string(void); - -/* - * All functions besides uv_run() are non-blocking. - * - * All callbacks in libuv are made asynchronously. That is they are never - * made by the function that takes them as a parameter. - */ - -/* - * Returns the default loop. - */ UV_EXTERN uv_loop_t* uv_default_loop(void); - -/* - * Initializes a uv_loop_t structure. - */ UV_EXTERN int uv_loop_init(uv_loop_t* loop); - -/* - * Closes all internal loop resources. This function must only be called once - * the loop has finished it's execution or it will return UV_EBUSY. After this - * function returns the user shall free the memory allocated for the loop. - */ UV_EXTERN int uv_loop_close(uv_loop_t* loop); - /* - * Allocates and initializes a new loop. - * NOTE: This function is DEPRECATED (to be removed after 0.12), users should - * allocate the loop manually and use uv_loop_init instead. + * NOTE: + * This function is DEPRECATED (to be removed after 0.12), users should + * allocate the loop manually and use uv_loop_init instead. */ UV_EXTERN uv_loop_t* uv_loop_new(void); - /* - * Cleans up a loop once it has finished executio and frees its memory. - * NOTE: This function is DEPRECATED (to be removed after 0.12). Users should use - * uv_loop_close and free the memory manually instead. + * NOTE: + * This function is DEPRECATED (to be removed after 0.12). Users should use + * uv_loop_close and free the memory manually instead. */ UV_EXTERN void uv_loop_delete(uv_loop_t*); - -/* - * This function runs the event loop. It will act differently depending on the - * specified mode: - * - UV_RUN_DEFAULT: Runs the event loop until the reference count drops to - * zero. Always returns zero. - * - UV_RUN_ONCE: Poll for new events once. Note that this function blocks if - * there are no pending events. Returns zero when done (no active handles - * or requests left), or non-zero if more events are expected (meaning you - * should run the event loop again sometime in the future). - * - UV_RUN_NOWAIT: Poll for new events once but don't block if there are no - * pending events. - */ -UV_EXTERN int uv_run(uv_loop_t*, uv_run_mode mode); - -/* - * This function checks whether the reference count, the number of active - * handles or requests left in the event loop, is non-zero. - */ +UV_EXTERN size_t uv_loop_size(void); UV_EXTERN int uv_loop_alive(const uv_loop_t* loop); +UV_EXTERN int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...); -/* - * This function will stop the event loop by forcing uv_run to end - * as soon as possible, but not sooner than the next loop iteration. - * If this function was called before blocking for i/o, the loop won't - * block for i/o on this iteration. - */ +UV_EXTERN int uv_run(uv_loop_t*, uv_run_mode mode); UV_EXTERN void uv_stop(uv_loop_t*); -/* - * Manually modify the event loop's reference count. Useful if the user wants - * to have a handle or timeout that doesn't keep the loop alive. - */ UV_EXTERN void uv_ref(uv_handle_t*); UV_EXTERN void uv_unref(uv_handle_t*); UV_EXTERN int uv_has_ref(const uv_handle_t*); -/* - * Update the event loop's concept of "now". Libuv caches the current time - * at the start of the event loop tick in order to reduce the number of - * time-related system calls. - * - * You won't normally need to call this function unless you have callbacks - * that block the event loop for longer periods of time, where "longer" is - * somewhat subjective but probably on the order of a millisecond or more. - */ UV_EXTERN void uv_update_time(uv_loop_t*); - -/* - * Return the current timestamp in milliseconds. The timestamp is cached at - * the start of the event loop tick, see |uv_update_time()| for details and - * rationale. - * - * The timestamp increases monotonically from some arbitrary point in time. - * Don't make assumptions about the starting point, you will only get - * disappointed. - * - * Use uv_hrtime() if you need sub-millisecond granularity. - */ UV_EXTERN uint64_t uv_now(const uv_loop_t*); -/* - * Get backend file descriptor. Only kqueue, epoll and event ports are - * supported. - * - * This can be used in conjunction with `uv_run(loop, UV_RUN_NOWAIT)` to - * poll in one thread and run the event loop's event callbacks in another. - * - * Useful for embedding libuv's event loop in another event loop. - * See test/test-embed.c for an example. - * - * Note that embedding a kqueue fd in another kqueue pollset doesn't work on - * all platforms. It's not an error to add the fd but it never generates - * events. - */ UV_EXTERN int uv_backend_fd(const uv_loop_t*); - -/* - * Get the poll timeout. The return value is in milliseconds, or -1 for no - * timeout. - */ UV_EXTERN int uv_backend_timeout(const uv_loop_t*); - -/* - * Should prepare a buffer that libuv can use to read data into. - * - * `suggested_size` is a hint. Returning a buffer that is smaller is perfectly - * okay as long as `buf.len > 0`. - * - * If you return a buffer with `buf.len == 0`, libuv skips the read and calls - * your read or recv callback with nread=UV_ENOBUFS. - * - * Note that returning a zero-length buffer does not stop the handle, call - * uv_read_stop() or uv_udp_recv_stop() for that. - */ typedef void (*uv_alloc_cb)(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf); - -/* - * `nread` is > 0 if there is data available, 0 if libuv is done reading for - * now, or < 0 on error. - * - * The callee is responsible for closing the stream when an error happens. - * Trying to read from the stream again is undefined. - * - * The callee is responsible for freeing the buffer, libuv does not reuse it. - * The buffer may be a null buffer (where buf->base=NULL and buf->len=0) on - * EOF or error. - */ typedef void (*uv_read_cb)(uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf); @@ -419,6 +300,10 @@ typedef void (*uv_getaddrinfo_cb)(uv_getaddrinfo_t* req, int status, struct addrinfo* res); +typedef void (*uv_getnameinfo_cb)(uv_getnameinfo_t* req, + int status, + const char* hostname, + const char* service); typedef struct { long tv_sec; @@ -446,12 +331,6 @@ } uv_stat_t; -/* -* This will be called repeatedly after the uv_fs_event_t is initialized. -* If uv_fs_event_t was initialized with a directory the filename parameter -* will be a relative path to a file contained in the directory. -* The events parameter is an ORed mask of enum uv_fs_event elements. -*/ typedef void (*uv_fs_event_cb)(uv_fs_event_t* handle, const char* filename, int events, @@ -471,9 +350,6 @@ } uv_membership; -/* - * Most functions return 0 on success or an error code < 0 on failure. - */ UV_EXTERN const char* uv_strerror(int err); UV_EXTERN const char* uv_err_name(int err); @@ -485,6 +361,7 @@ uv_req_type type; \ /* private */ \ void* active_queue[2]; \ + void* reserved[4]; \ UV_REQ_PRIVATE_FIELDS \ /* Abstract base class of all requests. */ @@ -493,18 +370,10 @@ }; -/* Platform-specific request types */ +/* Platform-specific request types. */ UV_PRIVATE_REQ_TYPES -/* - * uv_shutdown_t is a subclass of uv_req_t - * - * Shutdown the outgoing (write) side of a duplex stream. It waits for - * pending write requests to complete. The handle should refer to a - * initialized stream. req should be an uninitialized shutdown request - * struct. The cb is called after shutdown is complete. - */ UV_EXTERN int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb); @@ -526,73 +395,28 @@ /* private */ \ uv_close_cb close_cb; \ void* handle_queue[2]; \ + void* reserved[4]; \ UV_HANDLE_PRIVATE_FIELDS \ -/* The abstract base class of all handles. */ +/* The abstract base class of all handles. */ struct uv_handle_s { UV_HANDLE_FIELDS }; -/* - * Returns size of various handle types, useful for FFI - * bindings to allocate correct memory without copying struct - * definitions - */ UV_EXTERN size_t uv_handle_size(uv_handle_type type); - -/* - * Returns size of request types, useful for dynamic lookup with FFI - */ UV_EXTERN size_t uv_req_size(uv_req_type type); -/* - * Returns non-zero if the handle is active, zero if it's inactive. - * - * What "active" means depends on the type of handle: - * - * - A uv_async_t handle is always active and cannot be deactivated, except - * by closing it with uv_close(). - * - * - A uv_pipe_t, uv_tcp_t, uv_udp_t, etc. handle - basically any handle that - * deals with I/O - is active when it is doing something that involves I/O, - * like reading, writing, connecting, accepting new connections, etc. - * - * - A uv_check_t, uv_idle_t, uv_timer_t, etc. handle is active when it has - * been started with a call to uv_check_start(), uv_idle_start(), etc. - * - * Rule of thumb: if a handle of type uv_foo_t has a uv_foo_start() - * function, then it's active from the moment that function is called. - * Likewise, uv_foo_stop() deactivates the handle again. - * - */ UV_EXTERN int uv_is_active(const uv_handle_t* handle); -/* - * Walk the list of open handles. - */ UV_EXTERN void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg); - -/* - * Request handle to be closed. close_cb will be called asynchronously after - * this call. This MUST be called on each handle before memory is released. - * - * Note that handles that wrap file descriptors are closed immediately but - * close_cb will still be deferred to the next iteration of the event loop. - * It gives you a chance to free up any resources associated with the handle. - * - * In-progress requests, like uv_connect_t or uv_write_t, are cancelled and - * have their callbacks called asynchronously with status=UV_ECANCELED. - */ UV_EXTERN void uv_close(uv_handle_t* handle, uv_close_cb close_cb); +UV_EXTERN int uv_send_buffer_size(uv_handle_t* handle, int* value); +UV_EXTERN int uv_recv_buffer_size(uv_handle_t* handle, int* value); + +UV_EXTERN int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd); -/* - * Constructor for uv_buf_t. - * Due to platform differences the user cannot rely on the ordering of the - * base and len members of the uv_buf_t struct. The user is responsible for - * freeing base after the uv_buf_t is done. Return struct passed by value. - */ UV_EXTERN uv_buf_t uv_buf_init(char* base, unsigned int len); @@ -605,7 +429,7 @@ UV_STREAM_PRIVATE_FIELDS /* - * uv_stream_t is a subclass of uv_handle_t + * uv_stream_t is a subclass of uv_handle_t. * * uv_stream is an abstract class. * @@ -617,92 +441,29 @@ }; UV_EXTERN int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb); - -/* - * This call is used in conjunction with uv_listen() to accept incoming - * connections. Call uv_accept after receiving a uv_connection_cb to accept - * the connection. Before calling uv_accept use uv_*_init() must be - * called on the client. Non-zero return value indicates an error. - * - * When the uv_connection_cb is called it is guaranteed that uv_accept will - * complete successfully the first time. If you attempt to use it more than - * once, it may fail. It is suggested to only call uv_accept once per - * uv_connection_cb call. - */ UV_EXTERN int uv_accept(uv_stream_t* server, uv_stream_t* client); -/* - * Read data from an incoming stream. The callback will be made several - * times until there is no more data to read or uv_read_stop is called. - * When we've reached EOF nread will be set to UV_EOF. - * - * When nread < 0, the buf parameter might not point to a valid buffer; - * in that case buf.len and buf.base are both set to 0. - * - * Note that nread might also be 0, which does *not* indicate an error or - * eof; it happens when libuv requested a buffer through the alloc callback - * but then decided that it didn't need that buffer. - */ UV_EXTERN int uv_read_start(uv_stream_t*, uv_alloc_cb alloc_cb, uv_read_cb read_cb); - UV_EXTERN int uv_read_stop(uv_stream_t*); - -/* - * Write data to stream. Buffers are written in order. Example: - * - * uv_buf_t a[] = { - * { .base = "1", .len = 1 }, - * { .base = "2", .len = 1 } - * }; - * - * uv_buf_t b[] = { - * { .base = "3", .len = 1 }, - * { .base = "4", .len = 1 } - * }; - * - * uv_write_t req1; - * uv_write_t req2; - * - * // writes "1234" - * uv_write(&req1, stream, a, 2); - * uv_write(&req2, stream, b, 2); - * - */ UV_EXTERN int uv_write(uv_write_t* req, uv_stream_t* handle, const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb); - -/* - * Extended write function for sending handles over a pipe. The pipe must be - * initialized with ipc == 1. - * send_handle must be a TCP socket or pipe, which is a server or a connection - * (listening or connected state). Bound sockets or pipes will be assumed to - * be servers. - */ UV_EXTERN int uv_write2(uv_write_t* req, uv_stream_t* handle, const uv_buf_t bufs[], unsigned int nbufs, uv_stream_t* send_handle, uv_write_cb cb); - -/* - * Same as `uv_write()`, but won't queue write request if it can't be completed - * immediately. - * Will return either: - * - >= 0: number of bytes written (can be less than the supplied buffer size) - * - < 0: negative error code - */ UV_EXTERN int uv_try_write(uv_stream_t* handle, const uv_buf_t bufs[], unsigned int nbufs); -/* uv_write_t is a subclass of uv_req_t */ +/* uv_write_t is a subclass of uv_req_t. */ struct uv_write_s { UV_REQ_FIELDS uv_write_cb cb; @@ -712,46 +473,16 @@ }; -/* - * Used to determine whether a stream is readable or writable. - */ UV_EXTERN int uv_is_readable(const uv_stream_t* handle); UV_EXTERN int uv_is_writable(const uv_stream_t* handle); - -/* - * Enable or disable blocking mode for a stream. - * - * When blocking mode is enabled all writes complete synchronously. The - * interface remains unchanged otherwise, e.g. completion or failure of the - * operation will still be reported through a callback which is made - * asychronously. - * - * Relying too much on this API is not recommended. It is likely to change - * significantly in the future. - * - * Currently this only works on Windows and only for uv_pipe_t handles. - * - * Also libuv currently makes no ordering guarantee when the blocking mode - * is changed after write requests have already been submitted. Therefore it is - * recommended to set the blocking mode immediately after opening or creating - * the stream. - */ UV_EXTERN int uv_stream_set_blocking(uv_stream_t* handle, int blocking); - -/* - * Used to determine whether a stream is closing or closed. - * - * N.B. is only valid between the initialization of the handle - * and the arrival of the close callback, and cannot be used - * to validate the handle. - */ UV_EXTERN int uv_is_closing(const uv_handle_t* handle); /* - * uv_tcp_t is a subclass of uv_stream_t + * uv_tcp_t is a subclass of uv_stream_t. * * Represents a TCP stream or TCP server. */ @@ -762,49 +493,18 @@ }; UV_EXTERN int uv_tcp_init(uv_loop_t*, uv_tcp_t* handle); - -/* - * Opens an existing file descriptor or SOCKET as a tcp handle. - */ UV_EXTERN int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock); - -/* Enable/disable Nagle's algorithm. */ UV_EXTERN int uv_tcp_nodelay(uv_tcp_t* handle, int enable); - -/* - * Enable/disable TCP keep-alive. - * - * `delay` is the initial delay in seconds, ignored when `enable` is zero. - */ UV_EXTERN int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay); - -/* - * Enable/disable simultaneous asynchronous accept requests that are - * queued by the operating system when listening for new tcp connections. - * This setting is used to tune a tcp server for the desired performance. - * Having simultaneous accepts can significantly improve the rate of - * accepting connections (which is why it is enabled by default) but - * may lead to uneven load distribution in multi-process setups. - */ UV_EXTERN int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable); enum uv_tcp_flags { - /* Used with uv_tcp_bind, when an IPv6 address is used */ + /* Used with uv_tcp_bind, when an IPv6 address is used. */ UV_TCP_IPV6ONLY = 1 }; -/* - * Bind the handle to an address and port. `addr` should point to an - * initialized struct sockaddr_in or struct sockaddr_in6. - * - * When the port is already taken, you can expect to see an UV_EADDRINUSE - * error from either uv_tcp_bind(), uv_listen() or uv_tcp_connect(). - * - * That is, a successful call to uv_tcp_bind() does not guarantee that - * the call to uv_listen() or uv_tcp_connect() will succeed as well. - */ UV_EXTERN int uv_tcp_bind(uv_tcp_t* handle, const struct sockaddr* addr, unsigned int flags); @@ -814,21 +514,12 @@ UV_EXTERN int uv_tcp_getpeername(const uv_tcp_t* handle, struct sockaddr* name, int* namelen); - -/* - * Establish an IPv4 or IPv6 TCP connection. Provide an initialized TCP handle - * and an uninitialized uv_connect_t*. `addr` should point to an initialized - * struct sockaddr_in or struct sockaddr_in6. - * - * The callback is made when the connection has been established or when a - * connection error happened. - */ UV_EXTERN int uv_tcp_connect(uv_connect_t* req, uv_tcp_t* handle, const struct sockaddr* addr, uv_connect_cb cb); -/* uv_connect_t is a subclass of uv_req_t */ +/* uv_connect_t is a subclass of uv_req_t. */ struct uv_connect_s { UV_REQ_FIELDS uv_connect_cb cb; @@ -849,9 +540,10 @@ * remainder was discarded by the OS. Used in uv_udp_recv_cb. */ UV_UDP_PARTIAL = 2, - /* Indicates if SO_REUSEADDR will be set when binding the handle. + /* + * Indicates if SO_REUSEADDR will be set when binding the handle. * This sets the SO_REUSEPORT socket flag on the BSDs and OS X. On other - * UNIX platforms, it sets the SO_REUSEADDR flag. What that means is that + * Unix platforms, it sets the SO_REUSEADDR flag. What that means is that * multiple threads or processes can bind to the same address without error * (provided they all set the flag) but only the last one to bind will receive * any traffic, in effect "stealing" the port from the previous listener. @@ -859,39 +551,30 @@ UV_UDP_REUSEADDR = 4 }; -/* - * Called after uv_udp_send(). status 0 indicates - * success otherwise error. - */ typedef void (*uv_udp_send_cb)(uv_udp_send_t* req, int status); - -/* - * Callback that is invoked when a new UDP datagram is received. - * - * handle UDP handle. - * nread Number of bytes that have been received. - * 0 if there is no more data to read. You may - * discard or repurpose the read buffer. - * < 0 if a transmission error was detected. - * buf uv_buf_t with the received data. - * addr struct sockaddr* containing the address of the sender. - * Can be NULL. Valid for the duration of the callback only. - * flags One or more OR'ed UV_UDP_* constants. - * Right now only UV_UDP_PARTIAL is used. - */ typedef void (*uv_udp_recv_cb)(uv_udp_t* handle, ssize_t nread, const uv_buf_t* buf, const struct sockaddr* addr, unsigned flags); -/* uv_udp_t is a subclass of uv_handle_t */ +/* uv_udp_t is a subclass of uv_handle_t. */ struct uv_udp_s { UV_HANDLE_FIELDS + /* read-only */ + /* + * Number of bytes queued for sending. This field strictly shows how much + * information is currently queued. + */ + size_t send_queue_size; + /* + * Number of send requests currently in the queue awaiting to be processed. + */ + size_t send_queue_count; UV_UDP_PRIVATE_FIELDS }; -/* uv_udp_send_t is a subclass of uv_req_t */ +/* uv_udp_send_t is a subclass of uv_req_t. */ struct uv_udp_send_s { UV_REQ_FIELDS uv_udp_t* handle; @@ -899,193 +582,43 @@ UV_UDP_SEND_PRIVATE_FIELDS }; -/* - * Initialize a new UDP handle. The actual socket is created lazily. - * Returns 0 on success. - */ UV_EXTERN int uv_udp_init(uv_loop_t*, uv_udp_t* handle); - -/* - * Opens an existing file descriptor or SOCKET as a udp handle. - * - * Unix only: - * The only requirement of the sock argument is that it follows the - * datagram contract (works in unconnected mode, supports sendmsg()/recvmsg(), - * etc.). In other words, other datagram-type sockets like raw sockets or - * netlink sockets can also be passed to this function. - * - * This sets the SO_REUSEPORT socket flag on the BSDs and OS X. On other - * UNIX platforms, it sets the SO_REUSEADDR flag. What that means is that - * multiple threads or processes can bind to the same address without error - * (provided they all set the flag) but only the last one to bind will receive - * any traffic, in effect "stealing" the port from the previous listener. - * This behavior is something of an anomaly and may be replaced by an explicit - * opt-in mechanism in future versions of libuv. - */ UV_EXTERN int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock); - -/* - * Bind to an IP address and port. - * - * Arguments: - * handle UDP handle. Should have been initialized with `uv_udp_init`. - * addr struct sockaddr_in or struct sockaddr_in6 with the address and - * port to bind to. - * flags Indicate how the socket will be bound, UV_UDP_IPV6ONLY and - * UV_UDP_REUSEADDR are supported. - * - * Returns: - * 0 on success, or an error code < 0 on failure. - */ UV_EXTERN int uv_udp_bind(uv_udp_t* handle, const struct sockaddr* addr, unsigned int flags); -UV_EXTERN int uv_udp_getsockname(uv_udp_t* handle, +UV_EXTERN int uv_udp_getsockname(const uv_udp_t* handle, struct sockaddr* name, int* namelen); - -/* - * Set membership for a multicast address - * - * Arguments: - * handle UDP handle. Should have been initialized with - * `uv_udp_init`. - * multicast_addr multicast address to set membership for - * interface_addr interface address - * membership Should be UV_JOIN_GROUP or UV_LEAVE_GROUP - * - * Returns: - * 0 on success, or an error code < 0 on failure. - */ UV_EXTERN int uv_udp_set_membership(uv_udp_t* handle, const char* multicast_addr, const char* interface_addr, uv_membership membership); - -/* - * Set IP multicast loop flag. Makes multicast packets loop back to - * local sockets. - * - * Arguments: - * handle UDP handle. Should have been initialized with - * `uv_udp_init`. - * on 1 for on, 0 for off - * - * Returns: - * 0 on success, or an error code < 0 on failure. - */ UV_EXTERN int uv_udp_set_multicast_loop(uv_udp_t* handle, int on); - -/* - * Set the multicast ttl - * - * Arguments: - * handle UDP handle. Should have been initialized with - * `uv_udp_init`. - * ttl 1 through 255 - * - * Returns: - * 0 on success, or an error code < 0 on failure. - */ UV_EXTERN int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl); - - -/* - * Set the multicast interface to send on - * - * Arguments: - * handle UDP handle. Should have been initialized with - * `uv_udp_init`. - * interface_addr interface address - * - * Returns: - * 0 on success, or an error code < 0 on failure. - */ UV_EXTERN int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr); - -/* - * Set broadcast on or off - * - * Arguments: - * handle UDP handle. Should have been initialized with - * `uv_udp_init`. - * on 1 for on, 0 for off - * - * Returns: - * 0 on success, or an error code < 0 on failure. - */ UV_EXTERN int uv_udp_set_broadcast(uv_udp_t* handle, int on); - -/* - * Set the time to live - * - * Arguments: - * handle UDP handle. Should have been initialized with - * `uv_udp_init`. - * ttl 1 through 255 - * - * Returns: - * 0 on success, or an error code < 0 on failure. - */ UV_EXTERN int uv_udp_set_ttl(uv_udp_t* handle, int ttl); - -/* - * Send data. If the socket has not previously been bound with `uv_udp_bind,` - * it is bound to 0.0.0.0 (the "all interfaces" address) and a random - * port number. - * - * Arguments: - * req UDP request handle. Need not be initialized. - * handle UDP handle. Should have been initialized with `uv_udp_init`. - * bufs List of buffers to send. - * nbufs Number of buffers in `bufs`. - * addr struct sockaddr_in or struct sockaddr_in6 with the address and - * port of the remote peer. - * send_cb Callback to invoke when the data has been sent out. - * - * Returns: - * 0 on success, or an error code < 0 on failure. - */ UV_EXTERN int uv_udp_send(uv_udp_send_t* req, uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr, uv_udp_send_cb send_cb); - -/* - * Receive data. If the socket has not previously been bound with `uv_udp_bind` - * it is bound to 0.0.0.0 (the "all interfaces" address) and a random - * port number. - * - * Arguments: - * handle UDP handle. Should have been initialized with `uv_udp_init`. - * alloc_cb Callback to invoke when temporary storage is needed. - * recv_cb Callback to invoke with received data. - * - * Returns: - * 0 on success, or an error code < 0 on failure. - */ +UV_EXTERN int uv_udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr); UV_EXTERN int uv_udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, uv_udp_recv_cb recv_cb); - -/* - * Stop listening for incoming datagrams. - * - * Arguments: - * handle UDP handle. Should have been initialized with `uv_udp_init`. - * - * Returns: - * 0 on success, or an error code < 0 on failure. - */ UV_EXTERN int uv_udp_recv_stop(uv_udp_t* handle); /* - * uv_tty_t is a subclass of uv_stream_t + * uv_tty_t is a subclass of uv_stream_t. * * Representing a stream for the console. */ @@ -1095,51 +628,18 @@ UV_TTY_PRIVATE_FIELDS }; -/* - * Initialize a new TTY stream with the given file descriptor. Usually the - * file descriptor will be - * 0 = stdin - * 1 = stdout - * 2 = stderr - * The last argument, readable, specifies if you plan on calling - * uv_read_start with this stream. stdin is readable, stdout is not. - * - * TTY streams which are not readable have blocking writes. - */ UV_EXTERN int uv_tty_init(uv_loop_t*, uv_tty_t*, uv_file fd, int readable); - -/* - * Set mode. 0 for normal, 1 for raw. - */ UV_EXTERN int uv_tty_set_mode(uv_tty_t*, int mode); - -/* - * To be called when the program exits. Resets TTY settings to default - * values for the next process to take over. - * - * This function is async signal-safe on UNIX platforms but can fail with error - * code UV_EBUSY if you call it when execution is inside uv_tty_set_mode(). - */ UV_EXTERN int uv_tty_reset_mode(void); - -/* - * Gets the current Window size. On success zero is returned. - */ UV_EXTERN int uv_tty_get_winsize(uv_tty_t*, int* width, int* height); -/* - * Used to detect what type of stream should be used with a given file - * descriptor. Usually this will be used during initialization to guess the - * type of the stdio streams. - * For isatty() functionality use this function and test for UV_TTY. - */ UV_EXTERN uv_handle_type uv_guess_handle(uv_file file); /* - * uv_pipe_t is a subclass of uv_stream_t + * uv_pipe_t is a subclass of uv_stream_t. * * Representing a pipe stream or pipe server. On Windows this is a Named - * Pipe. On Unix this is a UNIX domain socket. + * Pipe. On Unix this is a Unix domain socket. */ struct uv_pipe_s { UV_HANDLE_FIELDS @@ -1148,94 +648,21 @@ UV_PIPE_PRIVATE_FIELDS }; -/* - * Initialize a pipe. The last argument is a boolean to indicate if - * this pipe will be used for handle passing between processes. - */ UV_EXTERN int uv_pipe_init(uv_loop_t*, uv_pipe_t* handle, int ipc); - -/* - * Opens an existing file descriptor or HANDLE as a pipe. - */ UV_EXTERN int uv_pipe_open(uv_pipe_t*, uv_file file); - -/* - * Bind the pipe to a file path (UNIX) or a name (Windows.) - * - * Paths on UNIX get truncated to `sizeof(sockaddr_un.sun_path)` bytes, - * typically between 92 and 108 bytes. - */ UV_EXTERN int uv_pipe_bind(uv_pipe_t* handle, const char* name); - -/* - * Connect to the UNIX domain socket or the named pipe. - * - * Paths on UNIX get truncated to `sizeof(sockaddr_un.sun_path)` bytes, - * typically between 92 and 108 bytes. - */ UV_EXTERN void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, const char* name, uv_connect_cb cb); - -/* - * Get the name of the UNIX domain socket or the named pipe. - * - * A preallocated buffer must be provided. The len parameter holds the - * length of the buffer and it's set to the number of bytes written to the - * buffer on output. If the buffer is not big enough UV_ENOBUFS will be - * returned and len will contain the required size. - */ UV_EXTERN int uv_pipe_getsockname(const uv_pipe_t* handle, char* buf, size_t* len); - -/* - * This setting applies to Windows only. - * Set the number of pending pipe instance handles when the pipe server - * is waiting for connections. - */ UV_EXTERN void uv_pipe_pending_instances(uv_pipe_t* handle, int count); - -/* - * Used to receive handles over ipc pipes. - * - * First - call `uv_pipe_pending_count`, if it is > 0 - initialize handle - * using type, returned by `uv_pipe_pending_type` and call - * `uv_accept(pipe, handle)`. - */ UV_EXTERN int uv_pipe_pending_count(uv_pipe_t* handle); UV_EXTERN uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle); -/* - * uv_poll_t is a subclass of uv_handle_t. - * - * The uv_poll watcher is used to watch file descriptors for readability and - * writability, similar to the purpose of poll(2). - * - * The purpose of uv_poll is to enable integrating external libraries that - * rely on the event loop to signal it about the socket status changes, like - * c-ares or libssh2. Using uv_poll_t for any other other purpose is not - * recommended; uv_tcp_t, uv_udp_t, etc. provide an implementation that is - * much faster and more scalable than what can be achieved with uv_poll_t, - * especially on Windows. - * - * It is possible that uv_poll occasionally signals that a file descriptor is - * readable or writable even when it isn't. The user should therefore always - * be prepared to handle EAGAIN or equivalent when it attempts to read from or - * write to the fd. - * - * It is not okay to have multiple active uv_poll watchers for the same socket. - * This can cause libuv to busyloop or otherwise malfunction. - * - * The user should not close a file descriptor while it is being polled by an - * active uv_poll watcher. This can cause the poll watcher to report an error, - * but it might also start polling another socket. However the fd can be safely - * closed immediately after a call to uv_poll_stop() or uv_close(). - * - * On windows only sockets can be polled with uv_poll. On unix any file - * descriptor that would be accepted by poll(2) can be used with uv_poll. - */ + struct uv_poll_s { UV_HANDLE_FIELDS uv_poll_cb poll_cb; @@ -1247,118 +674,52 @@ UV_WRITABLE = 2 }; -/* Initialize the poll watcher using a file descriptor. */ UV_EXTERN int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd); - -/* Initialize the poll watcher using a socket descriptor. On unix this is */ -/* identical to uv_poll_init. On windows it takes a SOCKET handle. */ UV_EXTERN int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle, uv_os_sock_t socket); - -/* - * Starts polling the file descriptor. `events` is a bitmask consisting made up - * of UV_READABLE and UV_WRITABLE. As soon as an event is detected the callback - * will be called with `status` set to 0, and the detected events set en the - * `events` field. - * - * If an error happens while polling status, `status` < 0 and corresponds - * with one of the UV_E* error codes. The user should not close the socket - * while uv_poll is active. If the user does that anyway, the callback *may* - * be called reporting an error status, but this is not guaranteed. - * - * Calling uv_poll_start on an uv_poll watcher that is already active is fine. - * Doing so will update the events mask that is being watched for. - */ UV_EXTERN int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb); - -/* Stops polling the file descriptor. */ UV_EXTERN int uv_poll_stop(uv_poll_t* handle); -/* - * uv_prepare_t is a subclass of uv_handle_t. - * - * Every active prepare handle gets its callback called exactly once per loop - * iteration, just before the system blocks to wait for completed i/o. - */ struct uv_prepare_s { UV_HANDLE_FIELDS UV_PREPARE_PRIVATE_FIELDS }; UV_EXTERN int uv_prepare_init(uv_loop_t*, uv_prepare_t* prepare); - UV_EXTERN int uv_prepare_start(uv_prepare_t* prepare, uv_prepare_cb cb); - UV_EXTERN int uv_prepare_stop(uv_prepare_t* prepare); -/* - * uv_check_t is a subclass of uv_handle_t. - * - * Every active check handle gets its callback called exactly once per loop - * iteration, just after the system returns from blocking. - */ struct uv_check_s { UV_HANDLE_FIELDS UV_CHECK_PRIVATE_FIELDS }; UV_EXTERN int uv_check_init(uv_loop_t*, uv_check_t* check); - UV_EXTERN int uv_check_start(uv_check_t* check, uv_check_cb cb); - UV_EXTERN int uv_check_stop(uv_check_t* check); -/* - * uv_idle_t is a subclass of uv_handle_t. - * - * Every active idle handle gets its callback called repeatedly until it is - * stopped. This happens after all other types of callbacks are processed. - * When there are multiple "idle" handles active, their callbacks are called - * in turn. - */ struct uv_idle_s { UV_HANDLE_FIELDS UV_IDLE_PRIVATE_FIELDS }; UV_EXTERN int uv_idle_init(uv_loop_t*, uv_idle_t* idle); - UV_EXTERN int uv_idle_start(uv_idle_t* idle, uv_idle_cb cb); - UV_EXTERN int uv_idle_stop(uv_idle_t* idle); -/* - * uv_async_t is a subclass of uv_handle_t. - * - * uv_async_send wakes up the event loop and calls the async handle's callback. - * There is no guarantee that every uv_async_send call leads to exactly one - * invocation of the callback; the only guarantee is that the callback function - * is called at least once after the call to async_send. Unlike all other - * libuv functions, uv_async_send can be called from another thread. - */ struct uv_async_s { UV_HANDLE_FIELDS UV_ASYNC_PRIVATE_FIELDS }; -/* - * Initialize the uv_async_t handle. A NULL callback is allowed. - * - * Note that uv_async_init(), unlike other libuv functions, immediately - * starts the handle. To stop the handle again, close it with uv_close(). - */ UV_EXTERN int uv_async_init(uv_loop_t*, uv_async_t* async, uv_async_cb async_cb); - -/* - * This can be called from other threads to wake up a libuv thread. - */ UV_EXTERN int uv_async_send(uv_async_t* async); @@ -1373,42 +734,18 @@ }; UV_EXTERN int uv_timer_init(uv_loop_t*, uv_timer_t* handle); - -/* - * Start the timer. `timeout` and `repeat` are in milliseconds. - * - * If timeout is zero, the callback fires on the next tick of the event loop. - * - * If repeat is non-zero, the callback fires first after timeout milliseconds - * and then repeatedly after repeat milliseconds. - */ UV_EXTERN int uv_timer_start(uv_timer_t* handle, uv_timer_cb cb, uint64_t timeout, uint64_t repeat); - UV_EXTERN int uv_timer_stop(uv_timer_t* handle); - -/* - * Stop the timer, and if it is repeating restart it using the repeat value - * as the timeout. If the timer has never been started before it returns - * UV_EINVAL. - */ UV_EXTERN int uv_timer_again(uv_timer_t* handle); - -/* - * Set the repeat value in milliseconds. Note that if the repeat value is set - * from a timer callback it does not immediately take effect. If the timer was - * non-repeating before, it will have been stopped. If it was repeating, then - * the old repeat value will have been used to schedule the next timeout. - */ UV_EXTERN void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat); - UV_EXTERN uint64_t uv_timer_get_repeat(const uv_timer_t* handle); /* - * uv_getaddrinfo_t is a subclass of uv_req_t + * uv_getaddrinfo_t is a subclass of uv_req_t. * * Request object for uv_getaddrinfo. */ @@ -1420,45 +757,43 @@ }; -/* - * Asynchronous getaddrinfo(3). - * - * Either node or service may be NULL but not both. - * - * hints is a pointer to a struct addrinfo with additional address type - * constraints, or NULL. Consult `man -s 3 getaddrinfo` for details. - * - * Returns 0 on success or an error code < 0 on failure. - * - * If successful, your callback gets called sometime in the future with the - * lookup result, which is either: - * - * a) err == 0, the res argument points to a valid struct addrinfo, or - * b) err < 0, the res argument is NULL. See the UV_EAI_* constants. - * - * Call uv_freeaddrinfo() to free the addrinfo structure. - */ UV_EXTERN int uv_getaddrinfo(uv_loop_t* loop, uv_getaddrinfo_t* req, uv_getaddrinfo_cb getaddrinfo_cb, const char* node, const char* service, const struct addrinfo* hints); +UV_EXTERN void uv_freeaddrinfo(struct addrinfo* ai); + /* - * Free the struct addrinfo. Passing NULL is allowed and is a no-op. - */ -UV_EXTERN void uv_freeaddrinfo(struct addrinfo* ai); +* uv_getnameinfo_t is a subclass of uv_req_t. +* +* Request object for uv_getnameinfo. +*/ +struct uv_getnameinfo_s { + UV_REQ_FIELDS + /* read-only */ + uv_loop_t* loop; + UV_GETNAMEINFO_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_getnameinfo(uv_loop_t* loop, + uv_getnameinfo_t* req, + uv_getnameinfo_cb getnameinfo_cb, + const struct sockaddr* addr, + int flags); -/* uv_spawn() options */ +/* uv_spawn() options. */ typedef enum { UV_IGNORE = 0x00, UV_CREATE_PIPE = 0x01, UV_INHERIT_FD = 0x02, UV_INHERIT_STREAM = 0x04, - /* When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE + /* + * When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE * determine the direction of flow, from the child process' perspective. Both * flags may be specified to create a duplex data stream. */ @@ -1539,7 +874,7 @@ /* * Do not wrap any arguments in quotes, or perform any other escaping, when * converting the argument list into a command line string. This option is - * only meaningful on Windows systems. On unix it is silently ignored. + * only meaningful on Windows systems. On Unix it is silently ignored. */ UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS = (1 << 2), /* @@ -1552,14 +887,14 @@ UV_PROCESS_DETACHED = (1 << 3), /* * Hide the subprocess console window that would normally be created. This - * option is only meaningful on Windows systems. On unix it is silently + * option is only meaningful on Windows systems. On Unix it is silently * ignored. */ UV_PROCESS_WINDOWS_HIDE = (1 << 4) }; /* - * uv_process_t is a subclass of uv_handle_t + * uv_process_t is a subclass of uv_handle_t. */ struct uv_process_s { UV_HANDLE_FIELDS @@ -1568,51 +903,15 @@ UV_PROCESS_PRIVATE_FIELDS }; -/* - * Initializes the uv_process_t and starts the process. If the process is - * successfully spawned, then this function will return 0. Otherwise, the - * negative error code corresponding to the reason it couldn't spawn is - * returned. - * - * Possible reasons for failing to spawn would include (but not be limited to) - * the file to execute not existing, not having permissions to use the setuid or - * setgid specified, or not having enough memory to allocate for the new - * process. - */ UV_EXTERN int uv_spawn(uv_loop_t* loop, uv_process_t* handle, const uv_process_options_t* options); - - -/* - * Kills the process with the specified signal. The user must still - * call uv_close on the process. - * - * Emulates some aspects of Unix exit status on Windows, in that while the - * underlying process will be terminated with a status of `1`, - * `uv_process_t.exit_signal` will be set to signum, so the process will appear - * to have been killed by `signum`. - */ UV_EXTERN int uv_process_kill(uv_process_t*, int signum); - - -/* Kills the process with the specified signal. - * - * Emulates some aspects of Unix signals on Windows: - * - SIGTERM, SIGKILL, and SIGINT call TerminateProcess() to unconditionally - * cause the target to exit with status 1. Unlike Unix, this cannot be caught - * or ignored (but see uv_process_kill() and uv_signal_start()). - * - Signal number `0` causes a check for target existence, as in Unix. Return - * value is 0 on existence, UV_ESRCH on non-existence. - * - * Returns 0 on success, or an error code on failure. UV_ESRCH is portably used - * for non-existence of target process, other errors may be system specific. - */ UV_EXTERN int uv_kill(int pid, int signum); /* - * uv_work_t is a subclass of uv_req_t + * uv_work_t is a subclass of uv_req_t. */ struct uv_work_s { UV_REQ_FIELDS @@ -1622,34 +921,11 @@ UV_WORK_PRIVATE_FIELDS }; -/* Queues a work request to execute asynchronously on the thread pool. */ UV_EXTERN int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb, uv_after_work_cb after_work_cb); -/* Cancel a pending request. Fails if the request is executing or has finished - * executing. - * - * Returns 0 on success, or an error code < 0 on failure. - * - * Only cancellation of uv_fs_t, uv_getaddrinfo_t and uv_work_t requests is - * currently supported. - * - * Cancelled requests have their callbacks invoked some time in the future. - * It's _not_ safe to free the memory associated with the request until your - * callback is called. - * - * Here is how cancellation is reported to your callback: - * - * - A uv_fs_t request has its req->result field set to UV_ECANCELED. - * - * - A uv_work_t or uv_getaddrinfo_t request has its callback invoked with - * status == UV_ECANCELED. - * - * This function is currently only implemented on UNIX platforms. On Windows, - * it always returns UV_ENOSYS. - */ UV_EXTERN int uv_cancel(uv_req_t* req); @@ -1679,6 +955,22 @@ } netmask; }; +typedef enum { + UV_DIRENT_UNKNOWN, + UV_DIRENT_FILE, + UV_DIRENT_DIR, + UV_DIRENT_LINK, + UV_DIRENT_FIFO, + UV_DIRENT_SOCKET, + UV_DIRENT_CHAR, + UV_DIRENT_BLOCK +} uv_dirent_type_t; + +struct uv_dirent_s { + const char* name; + uv_dirent_type_t type; +}; + UV_EXTERN char** uv_setup_args(int argc, char** argv); UV_EXTERN int uv_get_process_title(char* buffer, size_t size); UV_EXTERN int uv_set_process_title(const char* title); @@ -1709,41 +1001,16 @@ uint64_t ru_nivcsw; /* involuntary context switches */ } uv_rusage_t; -/* - * Get information about OS resource utilization for the current process. - * Please note that not all uv_rusage_t struct fields will be filled on Windows. - */ UV_EXTERN int uv_getrusage(uv_rusage_t* rusage); -/* - * This allocates cpu_infos array, and sets count. The array - * is freed using uv_free_cpu_info(). - */ UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count); UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count); -/* - * This allocates addresses array, and sets count. The array - * is freed using uv_free_interface_addresses(). - */ UV_EXTERN int uv_interface_addresses(uv_interface_address_t** addresses, - int* count); + int* count); UV_EXTERN void uv_free_interface_addresses(uv_interface_address_t* addresses, - int count); + int count); -/* - * File System Methods. - * - * The uv_fs_* functions execute a blocking system call asynchronously (in a - * thread pool) and call the specified callback in the specified loop after - * completion. If the user gives NULL as the callback the blocking system - * call will be called synchronously. req should be a pointer to an - * uninitialized uv_fs_t object. - * - * uv_fs_req_cleanup() must be called after completion of the uv_fs_ - * function to free any internal memory allocations associated with the - * request. - */ typedef enum { UV_FS_UNKNOWN = -1, @@ -1759,6 +1026,7 @@ UV_FS_FTRUNCATE, UV_FS_UTIME, UV_FS_FUTIME, + UV_FS_ACCESS, UV_FS_CHMOD, UV_FS_FCHMOD, UV_FS_FSYNC, @@ -1766,8 +1034,9 @@ UV_FS_UNLINK, UV_FS_RMDIR, UV_FS_MKDIR, + UV_FS_MKDTEMP, UV_FS_RENAME, - UV_FS_READDIR, + UV_FS_SCANDIR, UV_FS_LINK, UV_FS_SYMLINK, UV_FS_READLINK, @@ -1775,7 +1044,7 @@ UV_FS_FCHOWN } uv_fs_type; -/* uv_fs_t is a subclass of uv_req_t */ +/* uv_fs_t is a subclass of uv_req_t. */ struct uv_fs_s { UV_REQ_FIELDS uv_fs_type fs_type; @@ -1784,98 +1053,163 @@ ssize_t result; void* ptr; const char* path; - uv_stat_t statbuf; /* Stores the result of uv_fs_stat and uv_fs_fstat. */ + uv_stat_t statbuf; /* Stores the result of uv_fs_stat() and uv_fs_fstat(). */ UV_FS_PRIVATE_FIELDS }; UV_EXTERN void uv_fs_req_cleanup(uv_fs_t* req); - -UV_EXTERN int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, - uv_fs_cb cb); - -UV_EXTERN int uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, - int flags, int mode, uv_fs_cb cb); - -UV_EXTERN int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, uv_file file, - const uv_buf_t bufs[], unsigned int nbufs, int64_t offset, uv_fs_cb cb); - -UV_EXTERN int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, - uv_fs_cb cb); - -UV_EXTERN int uv_fs_write(uv_loop_t* loop, uv_fs_t* req, uv_file file, - const uv_buf_t bufs[], unsigned int nbufs, int64_t offset, uv_fs_cb cb); - -UV_EXTERN int uv_fs_mkdir(uv_loop_t* loop, uv_fs_t* req, const char* path, - int mode, uv_fs_cb cb); - -UV_EXTERN int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, - uv_fs_cb cb); - -UV_EXTERN int uv_fs_readdir(uv_loop_t* loop, uv_fs_t* req, - const char* path, int flags, uv_fs_cb cb); - -UV_EXTERN int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, - uv_fs_cb cb); - -UV_EXTERN int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, - uv_fs_cb cb); - -UV_EXTERN int uv_fs_rename(uv_loop_t* loop, uv_fs_t* req, const char* path, - const char* new_path, uv_fs_cb cb); - -UV_EXTERN int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, - uv_fs_cb cb); - -UV_EXTERN int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, - uv_fs_cb cb); - -UV_EXTERN int uv_fs_ftruncate(uv_loop_t* loop, uv_fs_t* req, uv_file file, - int64_t offset, uv_fs_cb cb); - -UV_EXTERN int uv_fs_sendfile(uv_loop_t* loop, uv_fs_t* req, uv_file out_fd, - uv_file in_fd, int64_t in_offset, size_t length, uv_fs_cb cb); - -UV_EXTERN int uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, - int mode, uv_fs_cb cb); - -UV_EXTERN int uv_fs_utime(uv_loop_t* loop, uv_fs_t* req, const char* path, - double atime, double mtime, uv_fs_cb cb); - -UV_EXTERN int uv_fs_futime(uv_loop_t* loop, uv_fs_t* req, uv_file file, - double atime, double mtime, uv_fs_cb cb); - -UV_EXTERN int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, - uv_fs_cb cb); - -UV_EXTERN int uv_fs_link(uv_loop_t* loop, uv_fs_t* req, const char* path, - const char* new_path, uv_fs_cb cb); +UV_EXTERN int uv_fs_close(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_fs_cb cb); +UV_EXTERN int uv_fs_open(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + int mode, + uv_fs_cb cb); +UV_EXTERN int uv_fs_read(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + const uv_buf_t bufs[], + unsigned int nbufs, + int64_t offset, + uv_fs_cb cb); +UV_EXTERN int uv_fs_unlink(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_write(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + const uv_buf_t bufs[], + unsigned int nbufs, + int64_t offset, + uv_fs_cb cb); +UV_EXTERN int uv_fs_mkdir(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int mode, + uv_fs_cb cb); +UV_EXTERN int uv_fs_mkdtemp(uv_loop_t* loop, + uv_fs_t* req, + const char* tpl, + uv_fs_cb cb); +UV_EXTERN int uv_fs_rmdir(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_scandir(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + uv_fs_cb cb); +UV_EXTERN int uv_fs_scandir_next(uv_fs_t* req, + uv_dirent_t* ent); +UV_EXTERN int uv_fs_stat(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_fstat(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_fs_cb cb); +UV_EXTERN int uv_fs_rename(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_fsync(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_fs_cb cb); +UV_EXTERN int uv_fs_fdatasync(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_fs_cb cb); +UV_EXTERN int uv_fs_ftruncate(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + int64_t offset, + uv_fs_cb cb); +UV_EXTERN int uv_fs_sendfile(uv_loop_t* loop, + uv_fs_t* req, + uv_file out_fd, + uv_file in_fd, + int64_t in_offset, + size_t length, + uv_fs_cb cb); +UV_EXTERN int uv_fs_access(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int mode, + uv_fs_cb cb); +UV_EXTERN int uv_fs_chmod(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int mode, + uv_fs_cb cb); +UV_EXTERN int uv_fs_utime(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + double atime, + double mtime, + uv_fs_cb cb); +UV_EXTERN int uv_fs_futime(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + double atime, + double mtime, + uv_fs_cb cb); +UV_EXTERN int uv_fs_lstat(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_link(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + uv_fs_cb cb); /* - * This flag can be used with uv_fs_symlink on Windows - * to specify whether path argument points to a directory. + * This flag can be used with uv_fs_symlink() on Windows to specify whether + * path argument points to a directory. */ #define UV_FS_SYMLINK_DIR 0x0001 /* - * This flag can be used with uv_fs_symlink on Windows - * to specify whether the symlink is to be created using junction points. + * This flag can be used with uv_fs_symlink() on Windows to specify whether + * the symlink is to be created using junction points. */ #define UV_FS_SYMLINK_JUNCTION 0x0002 -UV_EXTERN int uv_fs_symlink(uv_loop_t* loop, uv_fs_t* req, const char* path, - const char* new_path, int flags, uv_fs_cb cb); - -UV_EXTERN int uv_fs_readlink(uv_loop_t* loop, uv_fs_t* req, const char* path, - uv_fs_cb cb); - -UV_EXTERN int uv_fs_fchmod(uv_loop_t* loop, uv_fs_t* req, uv_file file, - int mode, uv_fs_cb cb); - -UV_EXTERN int uv_fs_chown(uv_loop_t* loop, uv_fs_t* req, const char* path, - uv_uid_t uid, uv_gid_t gid, uv_fs_cb cb); - -UV_EXTERN int uv_fs_fchown(uv_loop_t* loop, uv_fs_t* req, uv_file file, - uv_uid_t uid, uv_gid_t gid, uv_fs_cb cb); +UV_EXTERN int uv_fs_symlink(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + int flags, + uv_fs_cb cb); +UV_EXTERN int uv_fs_readlink(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_fchmod(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + int mode, + uv_fs_cb cb); +UV_EXTERN int uv_fs_chown(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_uid_t uid, + uv_gid_t gid, + uv_fs_cb cb); +UV_EXTERN int uv_fs_fchown(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_uid_t uid, + uv_gid_t gid, + uv_fs_cb cb); enum uv_fs_event { @@ -1902,77 +1236,14 @@ }; UV_EXTERN int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle); - -/* - * Check the file at `path` for changes every `interval` milliseconds. - * - * Your callback is invoked with `status < 0` if `path` does not exist - * or is inaccessible. The watcher is *not* stopped but your callback is - * not called again until something changes (e.g. when the file is created - * or the error reason changes). - * - * When `status == 0`, your callback receives pointers to the old and new - * `uv_stat_t` structs. They are valid for the duration of the callback - * only! - * - * For maximum portability, use multi-second intervals. Sub-second intervals - * will not detect all changes on many file systems. - */ UV_EXTERN int uv_fs_poll_start(uv_fs_poll_t* handle, uv_fs_poll_cb poll_cb, const char* path, unsigned int interval); - UV_EXTERN int uv_fs_poll_stop(uv_fs_poll_t* handle); - -/* - * Get the path being monitored by the handle. The buffer must be preallocated - * by the user. Returns 0 on success or an error code < 0 in case of failure. - * On sucess, `buf` will contain the path and `len` its length. If the buffer - * is not big enough UV_ENOBUFS will be returned and len will be set to the - * required size. - */ UV_EXTERN int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buf, size_t* len); -/* - * UNIX signal handling on a per-event loop basis. The implementation is not - * ultra efficient so don't go creating a million event loops with a million - * signal watchers. - * - * Note to Linux users: SIGRT0 and SIGRT1 (signals 32 and 33) are used by the - * NPTL pthreads library to manage threads. Installing watchers for those - * signals will lead to unpredictable behavior and is strongly discouraged. - * Future versions of libuv may simply reject them. - * - * Reception of some signals is emulated on Windows: - * - * SIGINT is normally delivered when the user presses CTRL+C. However, like - * on Unix, it is not generated when terminal raw mode is enabled. - * - * SIGBREAK is delivered when the user pressed CTRL+BREAK. - * - * SIGHUP is generated when the user closes the console window. On SIGHUP the - * program is given approximately 10 seconds to perform cleanup. After that - * Windows will unconditionally terminate it. - * - * SIGWINCH is raised whenever libuv detects that the console has been - * resized. SIGWINCH is emulated by libuv when the program uses an uv_tty_t - * handle to write to the console. SIGWINCH may not always be delivered in a - * timely manner; libuv will only detect size changes when the cursor is - * being moved. When a readable uv_tty_handle is used in raw mode, resizing - * the console buffer will also trigger a SIGWINCH signal. - * - * Watchers for other signals can be successfully created, but these signals - * are never received. These signals are: SIGILL, SIGABRT, SIGFPE, SIGSEGV, - * SIGTERM and SIGKILL. - * - * Note that calls to raise() or abort() to programmatically raise a signal are - * not detected by libuv; these will not trigger a signal watcher. - * - * See uv_process_kill() and uv_kill() for information about support for sending - * signals. - */ struct uv_signal_s { UV_HANDLE_FIELDS uv_signal_cb signal_cb; @@ -1981,24 +1252,16 @@ }; UV_EXTERN int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle); - UV_EXTERN int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum); - UV_EXTERN int uv_signal_stop(uv_signal_t* handle); - -/* - * Gets load average. - * See: http://en.wikipedia.org/wiki/Load_(computing) - * Returns [0,0,0] on Windows. - */ UV_EXTERN void uv_loadavg(double avg[3]); /* - * Flags to be passed to uv_fs_event_start. + * Flags to be passed to uv_fs_event_start(). */ enum uv_fs_event_flags { /* @@ -2029,116 +1292,48 @@ UV_EXTERN int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle); - UV_EXTERN int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb, const char* path, unsigned int flags); - UV_EXTERN int uv_fs_event_stop(uv_fs_event_t* handle); - -/* - * Get the path being monitored by the handle. The buffer must be preallocated - * by the user. Returns 0 on success or an error code < 0 in case of failure. - * On sucess, `buf` will contain the path and `len` its length. If the buffer - * is not big enough UV_ENOBUFS will be returned and len will be set to the - * required size. - */ UV_EXTERN int uv_fs_event_getpath(uv_fs_event_t* handle, char* buf, size_t* len); - -/* Utility */ - -/* Convert string ip addresses to binary structures */ UV_EXTERN int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr); UV_EXTERN int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr); -/* Convert binary addresses to strings */ UV_EXTERN int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size); UV_EXTERN int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size); -/* Cross-platform IPv6-capable implementation of the 'standard' inet_ntop */ -/* and inet_pton functions. On success they return 0. If an error */ -/* the target of the `dst` pointer is unmodified. */ UV_EXTERN int uv_inet_ntop(int af, const void* src, char* dst, size_t size); UV_EXTERN int uv_inet_pton(int af, const char* src, void* dst); -/* Gets the executable path */ UV_EXTERN int uv_exepath(char* buffer, size_t* size); -/* Gets the current working directory */ UV_EXTERN int uv_cwd(char* buffer, size_t* size); -/* Changes the current working directory */ UV_EXTERN int uv_chdir(const char* dir); -/* Gets memory info in bytes */ UV_EXTERN uint64_t uv_get_free_memory(void); UV_EXTERN uint64_t uv_get_total_memory(void); -/* - * Returns the current high-resolution real time. This is expressed in - * nanoseconds. It is relative to an arbitrary time in the past. It is not - * related to the time of day and therefore not subject to clock drift. The - * primary use is for measuring performance between intervals. - * - * Note not every platform can support nanosecond resolution; however, this - * value will always be in nanoseconds. - */ UV_EXTERN extern uint64_t uv_hrtime(void); - -/* - * Disables inheritance for file descriptors / handles that this process - * inherited from its parent. The effect is that child processes spawned by - * this process don't accidentally inherit these handles. - * - * It is recommended to call this function as early in your program as possible, - * before the inherited file descriptors can be closed or duplicated. - * - * Note that this function works on a best-effort basis: there is no guarantee - * that libuv can discover all file descriptors that were inherited. In general - * it does a better job on Windows than it does on unix. - */ UV_EXTERN void uv_disable_stdio_inheritance(void); -/* - * Opens a shared library. The filename is in utf-8. Returns 0 on success and - * -1 on error. Call `uv_dlerror(uv_lib_t*)` to get the error message. - */ UV_EXTERN int uv_dlopen(const char* filename, uv_lib_t* lib); - -/* - * Close the shared library. - */ UV_EXTERN void uv_dlclose(uv_lib_t* lib); - -/* - * Retrieves a data pointer from a dynamic library. It is legal for a symbol to - * map to NULL. Returns 0 on success and -1 if the symbol was not found. - */ UV_EXTERN int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr); +UV_EXTERN const char* uv_dlerror(const uv_lib_t* lib); -/* - * Returns the last uv_dlopen() or uv_dlsym() error message. - */ -UV_EXTERN const char* uv_dlerror(uv_lib_t* lib); - -/* - * The mutex functions return 0 on success or an error code < 0 - * (unless the return type is void, of course). - */ UV_EXTERN int uv_mutex_init(uv_mutex_t* handle); UV_EXTERN void uv_mutex_destroy(uv_mutex_t* handle); UV_EXTERN void uv_mutex_lock(uv_mutex_t* handle); UV_EXTERN int uv_mutex_trylock(uv_mutex_t* handle); UV_EXTERN void uv_mutex_unlock(uv_mutex_t* handle); -/* - * Same goes for the read/write lock functions. - */ UV_EXTERN int uv_rwlock_init(uv_rwlock_t* rwlock); UV_EXTERN void uv_rwlock_destroy(uv_rwlock_t* rwlock); UV_EXTERN void uv_rwlock_rdlock(uv_rwlock_t* rwlock); @@ -2148,66 +1343,39 @@ UV_EXTERN int uv_rwlock_trywrlock(uv_rwlock_t* rwlock); UV_EXTERN void uv_rwlock_wrunlock(uv_rwlock_t* rwlock); -/* - * Same goes for the semaphore functions. - */ UV_EXTERN int uv_sem_init(uv_sem_t* sem, unsigned int value); UV_EXTERN void uv_sem_destroy(uv_sem_t* sem); UV_EXTERN void uv_sem_post(uv_sem_t* sem); UV_EXTERN void uv_sem_wait(uv_sem_t* sem); UV_EXTERN int uv_sem_trywait(uv_sem_t* sem); -/* - * Same goes for the condition variable functions. - */ UV_EXTERN int uv_cond_init(uv_cond_t* cond); UV_EXTERN void uv_cond_destroy(uv_cond_t* cond); UV_EXTERN void uv_cond_signal(uv_cond_t* cond); UV_EXTERN void uv_cond_broadcast(uv_cond_t* cond); -/* Waits on a condition variable without a timeout. - * - * Note: - * 1. callers should be prepared to deal with spurious wakeups. - */ -UV_EXTERN void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex); -/* Waits on a condition variable with a timeout in nano seconds. - * Returns 0 for success or UV_ETIMEDOUT on timeout, It aborts when other - * errors happen. - * - * Note: - * 1. callers should be prepared to deal with spurious wakeups. - * 2. the granularity of timeout on Windows is never less than one millisecond. - * 3. uv_cond_timedwait takes a relative timeout, not an absolute time. - */ -UV_EXTERN int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, - uint64_t timeout); UV_EXTERN int uv_barrier_init(uv_barrier_t* barrier, unsigned int count); UV_EXTERN void uv_barrier_destroy(uv_barrier_t* barrier); -UV_EXTERN void uv_barrier_wait(uv_barrier_t* barrier); +UV_EXTERN int uv_barrier_wait(uv_barrier_t* barrier); + +UV_EXTERN void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex); +UV_EXTERN int uv_cond_timedwait(uv_cond_t* cond, + uv_mutex_t* mutex, + uint64_t timeout); -/* Runs a function once and only once. Concurrent calls to uv_once() with the - * same guard will block all callers except one (it's unspecified which one). - * The guard should be initialized statically with the UV_ONCE_INIT macro. - */ UV_EXTERN void uv_once(uv_once_t* guard, void (*callback)(void)); -/* Thread-local storage. These functions largely follow the semantics of - * pthread_key_create(), pthread_key_delete(), pthread_getspecific() and - * pthread_setspecific(). - * - * Note that the total thread-local storage size may be limited. - * That is, it may not be possible to create many TLS keys. - */ UV_EXTERN int uv_key_create(uv_key_t* key); UV_EXTERN void uv_key_delete(uv_key_t* key); UV_EXTERN void* uv_key_get(uv_key_t* key); UV_EXTERN void uv_key_set(uv_key_t* key, void* value); -UV_EXTERN int uv_thread_create(uv_thread_t *tid, - void (*entry)(void *arg), void *arg); -UV_EXTERN unsigned long uv_thread_self(void); +typedef void (*uv_thread_cb)(void* arg); + +UV_EXTERN int uv_thread_create(uv_thread_t* tid, uv_thread_cb entry, void* arg); +UV_EXTERN uv_thread_t uv_thread_self(void); UV_EXTERN int uv_thread_join(uv_thread_t *tid); +UV_EXTERN int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2); /* The presence of these unions force similar struct layout. */ #define XX(_, name) uv_ ## name ## _t name; @@ -2224,11 +1392,11 @@ struct uv_loop_s { /* User data - use this for whatever. */ void* data; - /* Loop reference counting */ + /* Loop reference counting. */ unsigned int active_handles; void* handle_queue[2]; void* active_reqs[2]; - /* Internal flag to signal loop stop */ + /* Internal flag to signal loop stop. */ unsigned int stop_flag; UV_LOOP_PRIVATE_FIELDS }; @@ -2246,6 +1414,7 @@ #undef UV_ASYNC_PRIVATE_FIELDS #undef UV_TIMER_PRIVATE_FIELDS #undef UV_GETADDRINFO_PRIVATE_FIELDS +#undef UV_GETNAMEINFO_PRIVATE_FIELDS #undef UV_FS_REQ_PRIVATE_FIELDS #undef UV_WORK_PRIVATE_FIELDS #undef UV_FS_EVENT_PRIVATE_FIELDS diff -Nru nodejs-0.11.13/deps/uv/include/uv-linux.h nodejs-0.11.15/deps/uv/include/uv-linux.h --- nodejs-0.11.13/deps/uv/include/uv-linux.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv-linux.h 2015-01-20 21:22:17.000000000 +0000 @@ -31,6 +31,4 @@ void* watchers[2]; \ int wd; \ -#define UV_PLATFORM_HAS_IP6_LINK_LOCAL_ADDRESS - #endif /* UV_LINUX_H */ diff -Nru nodejs-0.11.13/deps/uv/include/uv-sunos.h nodejs-0.11.15/deps/uv/include/uv-sunos.h --- nodejs-0.11.13/deps/uv/include/uv-sunos.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv-sunos.h 2015-01-20 21:22:17.000000000 +0000 @@ -41,6 +41,4 @@ #endif /* defined(PORT_SOURCE_FILE) */ -#define UV_PLATFORM_HAS_IP6_LINK_LOCAL_ADDRESS - #endif /* UV_SUNOS_H */ diff -Nru nodejs-0.11.13/deps/uv/include/uv-threadpool.h nodejs-0.11.15/deps/uv/include/uv-threadpool.h --- nodejs-0.11.13/deps/uv/include/uv-threadpool.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv-threadpool.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,37 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* + * This file is private to libuv. It provides common functionality to both + * Windows and Unix backends. + */ + +#ifndef UV_THREADPOOL_H_ +#define UV_THREADPOOL_H_ + +struct uv__work { + void (*work)(struct uv__work *w); + void (*done)(struct uv__work *w, int status); + struct uv_loop_s* loop; + void* wq[2]; +}; + +#endif /* UV_THREADPOOL_H_ */ diff -Nru nodejs-0.11.13/deps/uv/include/uv-unix.h nodejs-0.11.15/deps/uv/include/uv-unix.h --- nodejs-0.11.13/deps/uv/include/uv-unix.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv-unix.h 2015-01-20 21:22:17.000000000 +0000 @@ -25,6 +25,7 @@ #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> +#include <dirent.h> #include <sys/socket.h> #include <netinet/in.h> @@ -42,8 +43,12 @@ #endif #include <signal.h> +#include "uv-threadpool.h" + #if defined(__linux__) # include "uv-linux.h" +#elif defined(_AIX) +# include "uv-aix.h" #elif defined(__sun) # include "uv-sunos.h" #elif defined(__APPLE__) @@ -55,6 +60,14 @@ # include "uv-bsd.h" #endif +#ifndef NI_MAXHOST +# define NI_MAXHOST 1025 +#endif + +#ifndef NI_MAXSERV +# define NI_MAXSERV 32 +#endif + #ifndef UV_IO_PRIVATE_PLATFORM_FIELDS # define UV_IO_PRIVATE_PLATFORM_FIELDS /* empty */ #endif @@ -88,13 +101,6 @@ int wfd; }; -struct uv__work { - void (*work)(struct uv__work *w); - void (*done)(struct uv__work *w, int status); - struct uv_loop_s* loop; - void* wq[2]; -}; - #ifndef UV_PLATFORM_SEM_T # define UV_PLATFORM_SEM_T sem_t #endif @@ -112,13 +118,14 @@ #endif /* Note: May be cast to struct iovec. See writev(2). */ -typedef struct { +typedef struct uv_buf_t { char* base; size_t len; } uv_buf_t; typedef int uv_file; typedef int uv_os_sock_t; +typedef int uv_os_fd_t; #define UV_ONCE_INIT PTHREAD_ONCE_INIT @@ -150,6 +157,47 @@ typedef gid_t uv_gid_t; typedef uid_t uv_uid_t; +typedef struct dirent uv__dirent_t; + +#if defined(DT_UNKNOWN) +# define HAVE_DIRENT_TYPES +# if defined(DT_REG) +# define UV__DT_FILE DT_REG +# else +# define UV__DT_FILE -1 +# endif +# if defined(DT_DIR) +# define UV__DT_DIR DT_DIR +# else +# define UV__DT_DIR -2 +# endif +# if defined(DT_LNK) +# define UV__DT_LINK DT_LNK +# else +# define UV__DT_LINK -3 +# endif +# if defined(DT_FIFO) +# define UV__DT_FIFO DT_FIFO +# else +# define UV__DT_FIFO -4 +# endif +# if defined(DT_SOCK) +# define UV__DT_SOCKET DT_SOCK +# else +# define UV__DT_SOCKET -5 +# endif +# if defined(DT_CHR) +# define UV__DT_CHAR DT_CHR +# else +# define UV__DT_CHAR -6 +# endif +# if defined(DT_BLK) +# define UV__DT_BLOCK DT_BLK +# else +# define UV__DT_BLOCK -7 +# endif +#endif + /* Platform-specific definitions for uv_dlopen support. */ #define UV_DYNAMIC /* empty */ @@ -171,7 +219,7 @@ uv_async_t wq_async; \ uv_rwlock_t cloexec_lock; \ uv_handle_t* closing_handles; \ - void* process_handles[1][2]; \ + void* process_handles[2]; \ void* prepare_handles[2]; \ void* check_handles[2]; \ void* idle_handles[2]; \ @@ -210,7 +258,7 @@ #define UV_UDP_SEND_PRIVATE_FIELDS \ void* queue[2]; \ - struct sockaddr_in6 addr; \ + struct sockaddr_storage addr; \ unsigned int nbufs; \ uv_buf_t* bufs; \ ssize_t status; \ @@ -281,6 +329,15 @@ struct addrinfo* res; \ int retcode; +#define UV_GETNAMEINFO_PRIVATE_FIELDS \ + struct uv__work work_req; \ + uv_getnameinfo_cb getnameinfo_cb; \ + struct sockaddr_storage storage; \ + int flags; \ + char host[NI_MAXHOST]; \ + char service[NI_MAXSERV]; \ + int retcode; + #define UV_PROCESS_PRIVATE_FIELDS \ void* queue[2]; \ int status; \ diff -Nru nodejs-0.11.13/deps/uv/include/uv-version.h nodejs-0.11.15/deps/uv/include/uv-version.h --- nodejs-0.11.13/deps/uv/include/uv-version.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv-version.h 2015-01-20 21:22:17.000000000 +0000 @@ -23,16 +23,17 @@ #define UV_VERSION_H /* - * Versions with an even minor version (e.g. 0.6.1 or 1.0.4) are API and ABI - * stable. When the minor version is odd, the API can change between patch - * releases. Make sure you update the -soname directives in configure.ac + * Versions with the same major number are ABI stable. API is allowed to + * evolve between minor releases, but only in a backwards compatible way. + * Make sure you update the -soname directives in configure.ac * and uv.gyp whenever you bump UV_VERSION_MAJOR or UV_VERSION_MINOR (but * not UV_VERSION_PATCH.) */ -#define UV_VERSION_MAJOR 0 -#define UV_VERSION_MINOR 11 -#define UV_VERSION_PATCH 25 +#define UV_VERSION_MAJOR 1 +#define UV_VERSION_MINOR 0 +#define UV_VERSION_PATCH 2 #define UV_VERSION_IS_RELEASE 1 +#define UV_VERSION_SUFFIX "" #endif /* UV_VERSION_H */ diff -Nru nodejs-0.11.13/deps/uv/include/uv-win.h nodejs-0.11.15/deps/uv/include/uv-win.h --- nodejs-0.11.13/deps/uv/include/uv-win.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/include/uv-win.h 2015-01-20 21:22:17.000000000 +0000 @@ -30,6 +30,29 @@ #endif #include <winsock2.h> + +#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR) +typedef struct pollfd { + SOCKET fd; + short events; + short revents; +} WSAPOLLFD, *PWSAPOLLFD, *LPWSAPOLLFD; +#endif + +#ifndef LOCALE_INVARIANT +# define LOCALE_INVARIANT 0x007f +#endif + +#ifndef _malloca +# if defined(_DEBUG) +# define _malloca(size) malloc(size) +# define _freea(ptr) free(ptr) +# else +# define _malloca(size) alloca(size) +# define _freea(ptr) +# endif +#endif + #include <mswsock.h> #include <ws2tcpip.h> #include <windows.h> @@ -45,6 +68,7 @@ #endif #include "tree.h" +#include "uv-threadpool.h" #define MAX_PIPENAME_LEN 256 @@ -205,8 +229,8 @@ } uv_buf_t; typedef int uv_file; - typedef SOCKET uv_os_sock_t; +typedef HANDLE uv_os_fd_t; typedef HANDLE uv_thread_t; @@ -265,6 +289,19 @@ typedef unsigned char uv_uid_t; typedef unsigned char uv_gid_t; +typedef struct uv__dirent_s { + int d_type; + char d_name[1]; +} uv__dirent_t; + +#define UV__DT_DIR UV_DIRENT_DIR +#define UV__DT_FILE UV_DIRENT_FILE +#define UV__DT_LINK UV_DIRENT_LINK +#define UV__DT_FIFO UV_DIRENT_FIFO +#define UV__DT_SOCKET UV_DIRENT_SOCKET +#define UV__DT_CHAR UV_DIRENT_CHAR +#define UV__DT_BLOCK UV_DIRENT_BLOCK + /* Platform-specific definitions for uv_dlopen support. */ #define UV_DYNAMIC FAR WINAPI typedef struct { @@ -279,8 +316,6 @@ HANDLE iocp; \ /* The current time according to the event loop. in msecs. */ \ uint64_t time; \ - /* GetTickCount() result when the event loop time was last updated. */ \ - DWORD last_tick_count; \ /* Tail of a single-linked circular queue of pending reqs. If the queue */ \ /* is empty, tail_ is NULL. If there is only one item, */ \ /* tail_->next_req == tail_ */ \ @@ -307,7 +342,11 @@ /* Counter to keep track of active udp streams */ \ unsigned int active_udp_streams; \ /* Counter to started timer */ \ - uint64_t timer_counter; + uint64_t timer_counter; \ + /* Threadpool */ \ + void* wq[2]; \ + uv_mutex_t wq_mutex; \ + uv_async_t wq_async; #define UV_REQ_TYPE_PRIVATE \ /* TODO: remove the req suffix */ \ @@ -395,7 +434,7 @@ #define UV_TCP_PRIVATE_FIELDS \ SOCKET socket; \ - int bind_error; \ + int delayed_error; \ union { \ struct { uv_tcp_server_fields }; \ struct { uv_tcp_connection_fields }; \ @@ -429,7 +468,8 @@ int queue_len; \ } pending_ipc_info; \ uv_write_t* non_overlapped_writes_tail; \ - void* reserved; + uv_mutex_t readfile_mutex; \ + volatile HANDLE readfile_thread; #define UV_PIPE_PRIVATE_FIELDS \ HANDLE handle; \ @@ -477,7 +517,10 @@ /* Used in fast mode */ \ SOCKET peer_socket; \ AFD_POLL_INFO afd_poll_info_1; \ - AFD_POLL_INFO afd_poll_info_2; \ + union { \ + AFD_POLL_INFO* afd_poll_info_ptr; \ + AFD_POLL_INFO afd_poll_info; \ + } afd_poll_info_2; \ /* Used in fast and slow mode. */ \ uv_req_t poll_req_1; \ uv_req_t poll_req_2; \ @@ -520,6 +563,7 @@ unsigned int flags; #define UV_GETADDRINFO_PRIVATE_FIELDS \ + struct uv__work work_req; \ uv_getaddrinfo_cb getaddrinfo_cb; \ void* alloc; \ WCHAR* node; \ @@ -528,6 +572,15 @@ struct addrinfoW* res; \ int retcode; +#define UV_GETNAMEINFO_PRIVATE_FIELDS \ + struct uv__work work_req; \ + uv_getnameinfo_cb getnameinfo_cb; \ + struct sockaddr_storage storage; \ + int flags; \ + char host[NI_MAXHOST]; \ + char service[NI_MAXSERV]; \ + int retcode; + #define UV_PROCESS_PRIVATE_FIELDS \ struct uv_process_exit_s { \ UV_REQ_FIELDS \ @@ -539,6 +592,7 @@ volatile char exit_cb_pending; #define UV_FS_PRIVATE_FIELDS \ + struct uv__work work_req; \ int flags; \ DWORD sys_errno_; \ union { \ @@ -564,6 +618,7 @@ }; #define UV_WORK_PRIVATE_FIELDS \ + struct uv__work work_req; #define UV_FS_EVENT_PRIVATE_FIELDS \ struct uv_fs_event_req_s { \ @@ -587,4 +642,15 @@ int uv_utf8_to_utf16(const char* utf8Buffer, WCHAR* utf16Buffer, size_t utf16Size); -#define UV_PLATFORM_HAS_IP6_LINK_LOCAL_ADDRESS +#ifndef F_OK +#define F_OK 0 +#endif +#ifndef R_OK +#define R_OK 4 +#endif +#ifndef W_OK +#define W_OK 2 +#endif +#ifndef X_OK +#define X_OK 1 +#endif diff -Nru nodejs-0.11.13/deps/uv/m4/as_case.m4 nodejs-0.11.15/deps/uv/m4/as_case.m4 --- nodejs-0.11.13/deps/uv/m4/as_case.m4 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/m4/as_case.m4 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,21 @@ +# AS_CASE(WORD, [PATTERN1], [IF-MATCHED1]...[DEFAULT]) +# ---------------------------------------------------- +# Expand into +# | case WORD in +# | PATTERN1) IF-MATCHED1 ;; +# | ... +# | *) DEFAULT ;; +# | esac +m4_define([_AS_CASE], +[m4_if([$#], 0, [m4_fatal([$0: too few arguments: $#])], + [$#], 1, [ *) $1 ;;], + [$#], 2, [ $1) m4_default([$2], [:]) ;;], + [ $1) m4_default([$2], [:]) ;; +$0(m4_shiftn(2, $@))])dnl +]) +m4_defun([AS_CASE], +[m4_ifval([$2$3], +[case $1 in +_AS_CASE(m4_shift($@)) +esac])]) + diff -Nru nodejs-0.11.13/deps/uv/m4/dtrace.m4 nodejs-0.11.15/deps/uv/m4/dtrace.m4 --- nodejs-0.11.13/deps/uv/m4/dtrace.m4 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/m4/dtrace.m4 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,66 @@ +dnl Copyright (C) 2009 Sun Microsystems +dnl This file is free software; Sun Microsystems +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl --------------------------------------------------------------------------- +dnl Macro: PANDORA_ENABLE_DTRACE +dnl --------------------------------------------------------------------------- +AC_DEFUN([PANDORA_ENABLE_DTRACE],[ + AC_ARG_ENABLE([dtrace], + [AS_HELP_STRING([--disable-dtrace], + [enable DTrace USDT probes. @<:@default=yes@:>@])], + [ac_cv_enable_dtrace="$enableval"], + [ac_cv_enable_dtrace="yes"]) + + AS_IF([test "$ac_cv_enable_dtrace" = "yes"],[ + AC_CHECK_PROGS([DTRACE], [dtrace]) + AS_IF([test "x$ac_cv_prog_DTRACE" = "xdtrace"],[ + + AC_CACHE_CHECK([if dtrace works],[ac_cv_dtrace_works],[ + cat >conftest.d <<_ACEOF +provider Example { + probe increment(int); +}; +_ACEOF + $DTRACE -h -o conftest.h -s conftest.d 2>/dev/zero + AS_IF([test $? -eq 0],[ac_cv_dtrace_works=yes], + [ac_cv_dtrace_works=no]) + rm -f conftest.h conftest.d + ]) + AS_IF([test "x$ac_cv_dtrace_works" = "xyes"],[ + AC_DEFINE([HAVE_DTRACE], [1], [Enables DTRACE Support]) + AC_CACHE_CHECK([if dtrace should instrument object files], + [ac_cv_dtrace_needs_objects],[ + dnl DTrace on MacOSX does not use -G option + cat >conftest.d <<_ACEOF +provider Example { + probe increment(int); +}; +_ACEOF + cat > conftest.c <<_ACEOF +#include "conftest.h" +void foo() { + EXAMPLE_INCREMENT(1); +} +_ACEOF + $DTRACE -h -o conftest.h -s conftest.d 2>/dev/zero + $CC -c -o conftest.o conftest.c + $DTRACE -G -o conftest.d.o -s conftest.d conftest.o 2>/dev/zero + AS_IF([test $? -eq 0],[ac_cv_dtrace_needs_objects=yes], + [ac_cv_dtrace_needs_objects=no]) + rm -f conftest.d.o conftest.d conftest.h conftest.o conftest.c + ]) + ]) + AC_SUBST(DTRACEFLAGS) dnl TODO: test for -G on OSX + ac_cv_have_dtrace=yes + ])]) + +AM_CONDITIONAL([HAVE_DTRACE], [test "x$ac_cv_dtrace_works" = "xyes"]) +AM_CONDITIONAL([DTRACE_NEEDS_OBJECTS], + [test "x$ac_cv_dtrace_needs_objects" = "xyes"]) + +]) +dnl --------------------------------------------------------------------------- +dnl End Macro: PANDORA_ENABLE_DTRACE +dnl --------------------------------------------------------------------------- diff -Nru nodejs-0.11.13/deps/uv/m4/.gitignore nodejs-0.11.15/deps/uv/m4/.gitignore --- nodejs-0.11.13/deps/uv/m4/.gitignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/m4/.gitignore 2015-01-20 21:22:17.000000000 +0000 @@ -1,2 +1,4 @@ # Ignore libtoolize-generated files. *.m4 +!as_case.m4 +!libuv-check-flags.m4 diff -Nru nodejs-0.11.13/deps/uv/m4/libuv-check-flags.m4 nodejs-0.11.15/deps/uv/m4/libuv-check-flags.m4 --- nodejs-0.11.13/deps/uv/m4/libuv-check-flags.m4 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/m4/libuv-check-flags.m4 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,319 @@ +dnl Macros to check the presence of generic (non-typed) symbols. +dnl Copyright (c) 2006-2008 Diego Pettenà <flameeyes gmail com> +dnl Copyright (c) 2006-2008 xine project +dnl +dnl This program is free software; you can redistribute it and/or modify +dnl it under the terms of the GNU General Public License as published by +dnl the Free Software Foundation; either version 3, or (at your option) +dnl any later version. +dnl +dnl This program is distributed in the hope that it will be useful, +dnl but WITHOUT ANY WARRANTY; without even the implied warranty of +dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +dnl GNU General Public License for more details. +dnl +dnl You should have received a copy of the GNU General Public License +dnl along with this program; if not, write to the Free Software +dnl Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +dnl 02110-1301, USA. +dnl +dnl As a special exception, the copyright owners of the +dnl macro gives unlimited permission to copy, distribute and modify the +dnl configure scripts that are the output of Autoconf when processing the +dnl Macro. You need not follow the terms of the GNU General Public +dnl License when using or distributing such scripts, even though portions +dnl of the text of the Macro appear in them. The GNU General Public +dnl License (GPL) does govern all other use of the material that +dnl constitutes the Autoconf Macro. +dnl +dnl This special exception to the GPL applies to versions of the +dnl Autoconf Macro released by this project. When you make and +dnl distribute a modified version of the Autoconf Macro, you may extend +dnl this special exception to the GPL to apply to your modified version as +dnl well. + +dnl Check if the flag is supported by compiler +dnl CC_CHECK_CFLAGS_SILENT([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND]) + +AC_DEFUN([CC_CHECK_CFLAGS_SILENT], [ + AC_CACHE_VAL(AS_TR_SH([cc_cv_cflags_$1]), + [ac_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $1" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([int a;])], + [eval "AS_TR_SH([cc_cv_cflags_$1])='yes'"], + [eval "AS_TR_SH([cc_cv_cflags_$1])='no'"]) + CFLAGS="$ac_save_CFLAGS" + ]) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes], + [$2], [$3]) +]) + +dnl Check if the flag is supported by compiler (cacheable) +dnl CC_CHECK_CFLAGS([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND]) + +AC_DEFUN([CC_CHECK_CFLAGS], [ + AC_CACHE_CHECK([if $CC supports $1 flag], + AS_TR_SH([cc_cv_cflags_$1]), + CC_CHECK_CFLAGS_SILENT([$1]) dnl Don't execute actions here! + ) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes], + [$2], [$3]) +]) + +dnl CC_CHECK_CFLAG_APPEND(FLAG, [action-if-found], [action-if-not-found]) +dnl Check for CFLAG and appends them to CFLAGS if supported +AC_DEFUN([CC_CHECK_CFLAG_APPEND], [ + AC_CACHE_CHECK([if $CC supports $1 flag], + AS_TR_SH([cc_cv_cflags_$1]), + CC_CHECK_CFLAGS_SILENT([$1]) dnl Don't execute actions here! + ) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes], + [CFLAGS="$CFLAGS $1"; DEBUG_CFLAGS="$DEBUG_CFLAGS $1"; $2], [$3]) +]) + +dnl CC_CHECK_CFLAGS_APPEND([FLAG1 FLAG2], [action-if-found], [action-if-not]) +AC_DEFUN([CC_CHECK_CFLAGS_APPEND], [ + for flag in $1; do + CC_CHECK_CFLAG_APPEND($flag, [$2], [$3]) + done +]) + +dnl Check if the flag is supported by linker (cacheable) +dnl CC_CHECK_LDFLAGS([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND]) + +AC_DEFUN([CC_CHECK_LDFLAGS], [ + AC_CACHE_CHECK([if $CC supports $1 flag], + AS_TR_SH([cc_cv_ldflags_$1]), + [ac_save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $1" + AC_LANG_PUSH([C]) + AC_LINK_IFELSE([AC_LANG_SOURCE([int main() { return 1; }])], + [eval "AS_TR_SH([cc_cv_ldflags_$1])='yes'"], + [eval "AS_TR_SH([cc_cv_ldflags_$1])="]) + AC_LANG_POP([C]) + LDFLAGS="$ac_save_LDFLAGS" + ]) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_ldflags_$1])[ = xyes], + [$2], [$3]) +]) + +dnl define the LDFLAGS_NOUNDEFINED variable with the correct value for +dnl the current linker to avoid undefined references in a shared object. +AC_DEFUN([CC_NOUNDEFINED], [ + dnl We check $host for which systems to enable this for. + AC_REQUIRE([AC_CANONICAL_HOST]) + + case $host in + dnl FreeBSD (et al.) does not complete linking for shared objects when pthreads + dnl are requested, as different implementations are present; to avoid problems + dnl use -Wl,-z,defs only for those platform not behaving this way. + *-freebsd* | *-openbsd*) ;; + *) + dnl First of all check for the --no-undefined variant of GNU ld. This allows + dnl for a much more readable commandline, so that people can understand what + dnl it does without going to look for what the heck -z defs does. + for possible_flags in "-Wl,--no-undefined" "-Wl,-z,defs"; do + CC_CHECK_LDFLAGS([$possible_flags], [LDFLAGS_NOUNDEFINED="$possible_flags"]) + break + done + ;; + esac + + AC_SUBST([LDFLAGS_NOUNDEFINED]) +]) + +dnl Check for a -Werror flag or equivalent. -Werror is the GCC +dnl and ICC flag that tells the compiler to treat all the warnings +dnl as fatal. We usually need this option to make sure that some +dnl constructs (like attributes) are not simply ignored. +dnl +dnl Other compilers don't support -Werror per se, but they support +dnl an equivalent flag: +dnl - Sun Studio compiler supports -errwarn=%all +AC_DEFUN([CC_CHECK_WERROR], [ + AC_CACHE_CHECK( + [for $CC way to treat warnings as errors], + [cc_cv_werror], + [CC_CHECK_CFLAGS_SILENT([-Werror], [cc_cv_werror=-Werror], + [CC_CHECK_CFLAGS_SILENT([-errwarn=%all], [cc_cv_werror=-errwarn=%all])]) + ]) +]) + +AC_DEFUN([CC_CHECK_ATTRIBUTE], [ + AC_REQUIRE([CC_CHECK_WERROR]) + AC_CACHE_CHECK([if $CC supports __attribute__(( ifelse([$2], , [$1], [$2]) ))], + AS_TR_SH([cc_cv_attribute_$1]), + [ac_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $cc_cv_werror" + AC_LANG_PUSH([C]) + AC_COMPILE_IFELSE([AC_LANG_SOURCE([$3])], + [eval "AS_TR_SH([cc_cv_attribute_$1])='yes'"], + [eval "AS_TR_SH([cc_cv_attribute_$1])='no'"]) + AC_LANG_POP([C]) + CFLAGS="$ac_save_CFLAGS" + ]) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_attribute_$1])[ = xyes], + [AC_DEFINE( + AS_TR_CPP([SUPPORT_ATTRIBUTE_$1]), 1, + [Define this if the compiler supports __attribute__(( ifelse([$2], , [$1], [$2]) ))] + ) + $4], + [$5]) +]) + +AC_DEFUN([CC_ATTRIBUTE_CONSTRUCTOR], [ + CC_CHECK_ATTRIBUTE( + [constructor],, + [void __attribute__((constructor)) ctor() { int a; }], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_FORMAT], [ + CC_CHECK_ATTRIBUTE( + [format], [format(printf, n, n)], + [void __attribute__((format(printf, 1, 2))) printflike(const char *fmt, ...) { fmt = (void *)0; }], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_FORMAT_ARG], [ + CC_CHECK_ATTRIBUTE( + [format_arg], [format_arg(printf)], + [char *__attribute__((format_arg(1))) gettextlike(const char *fmt) { fmt = (void *)0; }], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_VISIBILITY], [ + CC_CHECK_ATTRIBUTE( + [visibility_$1], [visibility("$1")], + [void __attribute__((visibility("$1"))) $1_function() { }], + [$2], [$3]) +]) + +AC_DEFUN([CC_ATTRIBUTE_NONNULL], [ + CC_CHECK_ATTRIBUTE( + [nonnull], [nonnull()], + [void __attribute__((nonnull())) some_function(void *foo, void *bar) { foo = (void*)0; bar = (void*)0; }], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_UNUSED], [ + CC_CHECK_ATTRIBUTE( + [unused], , + [void some_function(void *foo, __attribute__((unused)) void *bar);], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_SENTINEL], [ + CC_CHECK_ATTRIBUTE( + [sentinel], , + [void some_function(void *foo, ...) __attribute__((sentinel));], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_DEPRECATED], [ + CC_CHECK_ATTRIBUTE( + [deprecated], , + [void some_function(void *foo, ...) __attribute__((deprecated));], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_ALIAS], [ + CC_CHECK_ATTRIBUTE( + [alias], [weak, alias], + [void other_function(void *foo) { } + void some_function(void *foo) __attribute__((weak, alias("other_function")));], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_MALLOC], [ + CC_CHECK_ATTRIBUTE( + [malloc], , + [void * __attribute__((malloc)) my_alloc(int n);], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_PACKED], [ + CC_CHECK_ATTRIBUTE( + [packed], , + [struct astructure { char a; int b; long c; void *d; } __attribute__((packed));], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_CONST], [ + CC_CHECK_ATTRIBUTE( + [const], , + [int __attribute__((const)) twopow(int n) { return 1 << n; } ], + [$1], [$2]) +]) + +AC_DEFUN([CC_FLAG_VISIBILITY], [ + AC_REQUIRE([CC_CHECK_WERROR]) + AC_CACHE_CHECK([if $CC supports -fvisibility=hidden], + [cc_cv_flag_visibility], + [cc_flag_visibility_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $cc_cv_werror" + CC_CHECK_CFLAGS_SILENT([-fvisibility=hidden], + cc_cv_flag_visibility='yes', + cc_cv_flag_visibility='no') + CFLAGS="$cc_flag_visibility_save_CFLAGS"]) + + AS_IF([test "x$cc_cv_flag_visibility" = "xyes"], + [AC_DEFINE([SUPPORT_FLAG_VISIBILITY], 1, + [Define this if the compiler supports the -fvisibility flag]) + $1], + [$2]) +]) + +AC_DEFUN([CC_FUNC_EXPECT], [ + AC_REQUIRE([CC_CHECK_WERROR]) + AC_CACHE_CHECK([if compiler has __builtin_expect function], + [cc_cv_func_expect], + [ac_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $cc_cv_werror" + AC_LANG_PUSH([C]) + AC_COMPILE_IFELSE([AC_LANG_SOURCE( + [int some_function() { + int a = 3; + return (int)__builtin_expect(a, 3); + }])], + [cc_cv_func_expect=yes], + [cc_cv_func_expect=no]) + AC_LANG_POP([C]) + CFLAGS="$ac_save_CFLAGS" + ]) + + AS_IF([test "x$cc_cv_func_expect" = "xyes"], + [AC_DEFINE([SUPPORT__BUILTIN_EXPECT], 1, + [Define this if the compiler supports __builtin_expect() function]) + $1], + [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_ALIGNED], [ + AC_REQUIRE([CC_CHECK_WERROR]) + AC_CACHE_CHECK([highest __attribute__ ((aligned ())) supported], + [cc_cv_attribute_aligned], + [ac_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $cc_cv_werror" + AC_LANG_PUSH([C]) + for cc_attribute_align_try in 64 32 16 8 4 2; do + AC_COMPILE_IFELSE([AC_LANG_SOURCE([ + int main() { + static char c __attribute__ ((aligned($cc_attribute_align_try))) = 0; + return c; + }])], [cc_cv_attribute_aligned=$cc_attribute_align_try; break]) + done + AC_LANG_POP([C]) + CFLAGS="$ac_save_CFLAGS" + ]) + + if test "x$cc_cv_attribute_aligned" != "x"; then + AC_DEFINE_UNQUOTED([ATTRIBUTE_ALIGNED_MAX], [$cc_cv_attribute_aligned], + [Define the highest alignment supported]) + fi +]) \ No newline at end of file diff -Nru nodejs-0.11.13/deps/uv/.mailmap nodejs-0.11.15/deps/uv/.mailmap --- nodejs-0.11.13/deps/uv/.mailmap 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/.mailmap 2015-01-20 21:22:17.000000000 +0000 @@ -1,3 +1,4 @@ +Aaron Bieber <qbit@deftly.net> <deftly@gmail.com> Alan Gutierrez <alan@prettyrobots.com> <alan@blogometer.com> Andrius Bentkus <andrius.bentkus@gmail.com> <toxedvirus@gmail.com> Bert Belder <bertbelder@gmail.com> <info@2bs.nl> @@ -5,6 +6,7 @@ Brandon Philips <brandon.philips@rackspace.com> <brandon@ifup.org> Brian White <mscdex@mscdex.net> Brian White <mscdex@mscdex.net> <mscdex@gmail.com> +Caleb James DeLisle <cjd@hyperboria.ca> <cjd@cjdns.fr> Christoph Iserlohn <christoph.iserlohn@innoq.com> Fedor Indutny <fedor.indutny@gmail.com> <fedor@indutny.com> Frank Denis <github@pureftpd.org> @@ -12,9 +14,14 @@ Justin Venus <justin.venus@gmail.com> <justin.venus@orbitz.com> Keno Fischer <kenof@stanford.edu> <kfischer+github@college.harvard.edu> Keno Fischer <kenof@stanford.edu> <kfischer@college.harvard.edu> +Leonard Hecker <leonard.hecker91@gmail.com> <leonard@hecker.io> Maciej Małecki <maciej.malecki@notimplemented.org> <me@mmalecki.com> Marc Schlaich <marc.schlaich@googlemail.com> <marc.schlaich@gmail.com> Rasmus Christian Pedersen <ruysch@outlook.com> +Rasmus Christian Pedersen <ruysch@outlook.com> +Rasmus Christian Pedersen <ruysch@outlook.com> +Rasmus Christian Pedersen <ruysch@outlook.com> +Rasmus Christian Pedersen <zerhacken@yahoo.com> <ruysch@outlook.com> Rasmus Pedersen <ruysch@outlook.com> <zerhacken@yahoo.com> Robert Mustacchi <rm@joyent.com> <rm@fingolfin.org> Ryan Dahl <ryan@joyent.com> <ry@tinyclouds.org> diff -Nru nodejs-0.11.13/deps/uv/Makefile.am nodejs-0.11.15/deps/uv/Makefile.am --- nodejs-0.11.13/deps/uv/Makefile.am 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/Makefile.am 2015-01-20 21:22:17.000000000 +0000 @@ -17,17 +17,18 @@ AM_CPPFLAGS = -I$(top_srcdir)/include \ -I$(top_srcdir)/src -include_HEADERS=include/uv.h include/uv-errno.h include/uv-version.h +include_HEADERS=include/uv.h include/uv-errno.h include/uv-threadpool.h include/uv-version.h CLEANFILES = lib_LTLIBRARIES = libuv.la libuv_la_CFLAGS = @CFLAGS@ -libuv_la_LDFLAGS = -no-undefined -version-info 11:0:0 +libuv_la_LDFLAGS = -no-undefined -version-info 1:0:0 libuv_la_SOURCES = src/fs-poll.c \ src/heap-inl.h \ src/inet.c \ src/queue.h \ + src/threadpool.c \ src/uv-common.c \ src/uv-common.h \ src/version.c @@ -51,6 +52,7 @@ src/win/fs-event.c \ src/win/fs.c \ src/win/getaddrinfo.c \ + src/win/getnameinfo.c \ src/win/handle.c \ src/win/handle-inl.h \ src/win/internal.h \ @@ -66,7 +68,6 @@ src/win/stream-inl.h \ src/win/tcp.c \ src/win/thread.c \ - src/win/threadpool.c \ src/win/timer.c \ src/win/tty.c \ src/win/udp.c \ @@ -80,12 +81,14 @@ include_HEADERS += include/uv-unix.h AM_CPPFLAGS += -I$(top_srcdir)/src/unix +libuv_la_CFLAGS += -g --std=gnu89 -pedantic -Wall -Wextra -Wno-unused-parameter libuv_la_SOURCES += src/unix/async.c \ src/unix/atomic-ops.h \ src/unix/core.c \ src/unix/dl.c \ src/unix/fs.c \ src/unix/getaddrinfo.c \ + src/unix/getnameinfo.c \ src/unix/internal.h \ src/unix/loop-watcher.c \ src/unix/loop.c \ @@ -97,13 +100,15 @@ src/unix/stream.c \ src/unix/tcp.c \ src/unix/thread.c \ - src/unix/threadpool.c \ src/unix/timer.c \ src/unix/tty.c \ src/unix/udp.c endif # WINNT +EXTRA_DIST = test/fixtures/empty_file \ + test/fixtures/load_error.node + TESTS = test/run-tests check_PROGRAMS = test/run-tests test_run_tests_CFLAGS = @@ -125,6 +130,7 @@ test/test-condvar.c \ test/test-connection-fail.c \ test/test-cwd-and-chdir.c \ + test/test-default-loop-close.c \ test/test-delayed-accept.c \ test/test-dlerror.c \ test/test-embed.c \ @@ -138,7 +144,9 @@ test/test-get-loadavg.c \ test/test-get-memory.c \ test/test-getaddrinfo.c \ + test/test-getnameinfo.c \ test/test-getsockname.c \ + test/test-handle-fileno.c \ test/test-hrtime.c \ test/test-idle.c \ test/test-ip4-addr.c \ @@ -161,8 +169,10 @@ test/test-pipe-getsockname.c \ test/test-pipe-sendmsg.c \ test/test-pipe-server-close.c \ + test/test-pipe-close-stdout-read-stdin.c \ test/test-platform-output.c \ test/test-poll-close.c \ + test/test-poll-closesocket.c \ test/test-poll.c \ test/test-process-title.c \ test/test-ref.c \ @@ -174,6 +184,7 @@ test/test-shutdown-twice.c \ test/test-signal-multiple-loops.c \ test/test-signal.c \ + test/test-socket-buffer-size.c \ test/test-spawn.c \ test/test-stdio-over-pipes.c \ test/test-tcp-bind-error.c \ @@ -191,8 +202,11 @@ test/test-tcp-shutdown-after-write.c \ test/test-tcp-unexpected-read.c \ test/test-tcp-write-to-half-open-connection.c \ + test/test-tcp-write-after-connect.c \ test/test-tcp-writealot.c \ test/test-tcp-try-write.c \ + test/test-tcp-write-queue-order.c \ + test/test-thread-equal.c \ test/test-thread.c \ test/test-threadpool-cancel.c \ test/test-threadpool.c \ @@ -211,6 +225,9 @@ test/test-udp-open.c \ test/test-udp-options.c \ test/test-udp-send-and-recv.c \ + test/test-udp-send-immediate.c \ + test/test-udp-send-unreachable.c \ + test/test-udp-try-send.c \ test/test-walk-handles.c \ test/test-watcher-cross-stop.c test_run_tests_LDADD = libuv.la @@ -224,7 +241,7 @@ endif if AIX -test_run_tests_CFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500 +test_run_tests_CFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500 -D_LINUX_SOURCE_COMPAT endif if SUNOS @@ -233,18 +250,21 @@ if AIX -libuv_la_CFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500 +libuv_la_CFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500 -D_LINUX_SOURCE_COMPAT libuv_la_SOURCES += src/unix/aix.c endif if ANDROID -include_HEADERS += include/android-ifaddrs.h -libuv_la_SOURCES += src/unix/android-ifaddrs.c +include_HEADERS += include/android-ifaddrs.h \ + include/pthread-fixes.h +libuv_la_SOURCES += src/unix/android-ifaddrs.c \ + src/unix/pthread-fixes.c endif if DARWIN include_HEADERS += include/uv-darwin.h libuv_la_CFLAGS += -D_DARWIN_USE_64_BIT_INODE=1 +libuv_la_CFLAGS += -D_DARWIN_UNLIMITED_SELECT=1 libuv_la_SOURCES += src/unix/darwin.c \ src/unix/darwin-proctitle.c \ src/unix/fsevents.c \ @@ -252,6 +272,11 @@ src/unix/proctitle.c endif +if DRAGONFLY +include_HEADERS += include/uv-bsd.h +libuv_la_SOURCES += src/unix/kqueue.c src/unix/freebsd.c +endif + if FREEBSD include_HEADERS += include/uv-bsd.h libuv_la_SOURCES += src/unix/freebsd.c src/unix/kqueue.c @@ -282,46 +307,7 @@ libuv_la_SOURCES += src/unix/sunos.c endif -if HAVE_DTRACE -BUILT_SOURCES = include/uv-dtrace.h -CLEANFILES += include/uv-dtrace.h -if FREEBSD -libuv_la_LDFLAGS += -lelf -endif -endif - -if DTRACE_NEEDS_OBJECTS -libuv_la_SOURCES += src/unix/uv-dtrace.d -libuv_la_DEPENDENCIES = src/unix/uv-dtrace.o -libuv_la_LIBADD = uv-dtrace.lo -CLEANFILES += src/unix/uv-dtrace.o src/unix/uv-dtrace.lo -endif - if HAVE_PKG_CONFIG pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = @PACKAGE_NAME@.pc endif - -if HAVE_DTRACE -include/uv-dtrace.h: src/unix/uv-dtrace.d - $(AM_V_GEN)$(DTRACE) $(DTRACEFLAGS) -h -xnolibs -s $< -o $(top_srcdir)/$@ -endif - -if DTRACE_NEEDS_OBJECTS -SUFFIXES = .d - -src/unix/uv-dtrace.o: src/unix/uv-dtrace.d ${libuv_la_OBJECTS} - -# It's ok to specify the output here, because we have 1 .d file, and we process -# every created .o, most projects don't need to include more than one .d -.d.o: - $(AM_V_GEN)$(DTRACE) $(DTRACEFLAGS) -G -o $(top_builddir)/uv-dtrace.o -s $< \ - `find ${top_builddir}/src -name "*.o"` - $(AM_V_GEN)printf %s\\n \ - '# ${top_builddir}/uv-dtrace.lo - a libtool object file' \ - '# Generated by libtool (GNU libtool) 2.4' \ - '# libtool wants a .lo not a .o' \ - "pic_object='uv-dtrace.o'" \ - "non_pic_object='uv-dtrace.o'" \ - > ${top_builddir}/uv-dtrace.lo -endif diff -Nru nodejs-0.11.13/deps/uv/Makefile.mingw nodejs-0.11.15/deps/uv/Makefile.mingw --- nodejs-0.11.13/deps/uv/Makefile.mingw 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/Makefile.mingw 2015-01-20 21:22:17.000000000 +0000 @@ -26,6 +26,7 @@ INCLUDES = include/stdint-msvc2008.h \ include/tree.h \ include/uv-errno.h \ + include/uv-threadpool.h \ include/uv-version.h \ include/uv-win.h \ include/uv.h \ @@ -42,6 +43,7 @@ OBJS = src/fs-poll.o \ src/inet.o \ + src/threadpool.o \ src/uv-common.o \ src/version.o \ src/win/async.o \ @@ -51,6 +53,7 @@ src/win/fs-event.o \ src/win/fs.o \ src/win/getaddrinfo.o \ + src/win/getnameinfo.o \ src/win/handle.o \ src/win/loop-watcher.o \ src/win/pipe.o \ @@ -62,7 +65,6 @@ src/win/stream.o \ src/win/tcp.o \ src/win/thread.o \ - src/win/threadpool.o \ src/win/timer.o \ src/win/tty.o \ src/win/udp.o \ diff -Nru nodejs-0.11.13/deps/uv/README.md nodejs-0.11.15/deps/uv/README.md --- nodejs-0.11.13/deps/uv/README.md 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/README.md 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,11 @@ -# libuv +![libuv][libuv_banner] + +## Overview libuv is a multi-platform support library with a focus on asynchronous I/O. It was primarily developed for use by [Node.js](http://nodejs.org), but it's also -used by Mozilla's [Rust language](http://www.rust-lang.org/), -[Luvit](http://luvit.io/), [Julia](http://julialang.org/), -[pyuv](https://crate.io/packages/pyuv/), and [others](https://github.com/joyent/libuv/wiki/Projects-that-use-libuv). +used by [Luvit](http://luvit.io/), [Julia](http://julialang.org/), +[pyuv](https://github.com/saghul/pyuv), and [others](https://github.com/libuv/libuv/wiki/Projects-that-use-libuv). ## Feature highlights @@ -32,27 +33,70 @@ * Threading and synchronization primitives +## Versioning + +Starting with version 1.0.0 libuv follows the [semantic versioning](http://semver.org/) +scheme. The API change and backwards compatibility rules are those indicated by +SemVer. libuv will keep a stable ABI across major releases. ## Community * [Mailing list](http://groups.google.com/group/libuv) + * [IRC chatroom (#libuv@irc.freenode.org)](http://webchat.freenode.net?channels=libuv&uio=d4) ## Documentation - * [include/uv.h](https://github.com/joyent/libuv/blob/master/include/uv.h) + * [include/uv.h](https://github.com/libuv/libuv/blob/master/include/uv.h) — API documentation in the form of detailed header comments. + * [An Introduction to libuv](http://nikhilm.github.com/uvbook/) — An + overview of libuv with tutorials. + * [LXJS 2012 talk](http://www.youtube.com/watch?v=nGn60vDSxQ4) - High-level + introductory talk about libuv. + * [Tests and benchmarks](https://github.com/libuv/libuv/tree/master/test) - + API specification and usage examples. + +### Official API documentation + +Located in the docs/ subdirectory. It uses the [Sphinx](http://sphinx-doc.org/) +framework, which makes it possible to build the documentation in multiple +formats. + +Show different supported building options: + + $ make help + +Build documentation as HTML: + + $ make html + +Build documentation as man pages: + + $ make man + +Build documentation as ePub: + + $ make epub + +NOTE: Windows users need to use make.bat instead of plain 'make'. + +Documentation can be browsed online [here](http://docs.libuv.org). + +### Other resources + * [An Introduction to libuv](http://nikhilm.github.com/uvbook/) — An overview of libuv with tutorials. * [LXJS 2012 talk](http://www.youtube.com/watch?v=nGn60vDSxQ4) — High-level introductory talk about libuv. - * [Tests and benchmarks](https://github.com/joyent/libuv/tree/master/test) + * [Tests and benchmarks](https://github.com/libuv/libuv/tree/master/test) — API specification and usage examples. * [libuv-dox](https://github.com/thlorenz/libuv-dox) — Documenting types and methods of libuv, mostly by reading uv.h. + * [learnuv](https://github.com/thlorenz/learnuv) + — Learn uv for fun and profit, a self guided workshop to libuv. ## Build Instructions -For GCC there are two methods building: via autotools or via [GYP][]. +For GCC there are two build methods: via autotools or via [GYP][]. GYP is a meta-build system which can generate MSVS, Makefile, and XCode backends. It is best used for integration into other projects. @@ -66,10 +110,9 @@ ### Windows -First, Python 2.6 or 2.7 must be installed as it is required by [GYP][]. - -Also, the directory for the preferred Python executable must be specified -by the `PYTHON` or `Path` environment variables. +First, [Python][] 2.6 or 2.7 must be installed as it is required by [GYP][]. +If python is not in your path, set the environment variable `PYTHON` to its +location. For example: `set PYTHON=C:\Python27\python.exe` To build with Visual Studio, launch a git shell (e.g. Cmd or PowerShell) and run vcbuild.bat which will checkout the GYP code into build/gyp and @@ -78,8 +121,9 @@ To have GYP generate build script for another system, checkout GYP into the project tree manually: - $ mkdir -p build - $ git clone https://git.chromium.org/external/gyp.git build/gyp + $ git clone https://chromium.googlesource.com/external/gyp.git build/gyp + OR + $ svn co http://gyp.googlecode.com/svn/trunk build/gyp ### Unix @@ -88,6 +132,8 @@ $ ./gyp_uv.py -f make $ make -C out +Run `./gyp_uv.py -f make -Dtarget_arch=x32` to build [x32][] binaries. + ### OS X Run: @@ -96,6 +142,10 @@ $ xcodebuild -ARCHS="x86_64" -project uv.xcodeproj \ -configuration Release -target All +Using Homebrew: + + $ brew install --HEAD libuv + Note to OS X users: Make sure that you specify the architecture you wish to build for in the @@ -139,5 +189,7 @@ [node.js]: http://nodejs.org/ [GYP]: http://code.google.com/p/gyp/ +[Python]: https://www.python.org/downloads/ [Visual Studio Express 2010]: http://www.microsoft.com/visualstudio/eng/products/visual-studio-2010-express -[guidelines for contributing]: https://github.com/joyent/libuv/blob/master/CONTRIBUTING.md +[guidelines for contributing]: https://github.com/libuv/libuv/blob/master/CONTRIBUTING.md +[libuv_banner]: https://raw.githubusercontent.com/libuv/libuv/master/img/banner.png diff -Nru nodejs-0.11.13/deps/uv/src/fs-poll.c nodejs-0.11.15/deps/uv/src/fs-poll.c --- nodejs-0.11.13/deps/uv/src/fs-poll.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/fs-poll.c 2015-01-20 21:22:17.000000000 +0000 @@ -60,6 +60,7 @@ struct poll_ctx* ctx; uv_loop_t* loop; size_t len; + int err; if (uv__is_active(handle)) return 0; @@ -78,19 +79,25 @@ ctx->parent_handle = handle; memcpy(ctx->path, path, len + 1); - if (uv_timer_init(loop, &ctx->timer_handle)) - abort(); + err = uv_timer_init(loop, &ctx->timer_handle); + if (err < 0) + goto error; ctx->timer_handle.flags |= UV__HANDLE_INTERNAL; uv__handle_unref(&ctx->timer_handle); - if (uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb)) - abort(); + err = uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb); + if (err < 0) + goto error; handle->poll_ctx = ctx; uv__handle_start(handle); return 0; + +error: + free(ctx); + return err; } diff -Nru nodejs-0.11.13/deps/uv/src/heap-inl.h nodejs-0.11.15/deps/uv/src/heap-inl.h --- nodejs-0.11.13/deps/uv/src/heap-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/heap-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -227,6 +227,13 @@ break; heap_node_swap(heap, child, smallest); } + + /* Walk up the subtree and check that each parent is less than the node + * this is required, because `max` node is not guaranteed to be the + * actual maximum in tree + */ + while (child->parent != NULL && less_than(child, child->parent)) + heap_node_swap(heap, child->parent, child); } HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than)) { diff -Nru nodejs-0.11.13/deps/uv/src/inet.c nodejs-0.11.15/deps/uv/src/inet.c --- nodejs-0.11.13/deps/uv/src/inet.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/inet.c 2015-01-20 21:22:17.000000000 +0000 @@ -27,6 +27,9 @@ #include "uv.h" #include "uv-common.h" +#define UV__INET_ADDRSTRLEN 16 +#define UV__INET6_ADDRSTRLEN 46 + static int inet_ntop4(const unsigned char *src, char *dst, size_t size); static int inet_ntop6(const unsigned char *src, char *dst, size_t size); @@ -49,7 +52,7 @@ static int inet_ntop4(const unsigned char *src, char *dst, size_t size) { static const char fmt[] = "%u.%u.%u.%u"; - char tmp[sizeof "255.255.255.255"]; + char tmp[UV__INET_ADDRSTRLEN]; int l; #ifndef _WIN32 @@ -74,7 +77,7 @@ * Keep this in mind if you think this function should have been coded * to use pointer overlays. All the world's not a VAX. */ - char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp; + char tmp[UV__INET6_ADDRSTRLEN], *tp; struct { int base, len; } best, cur; unsigned int words[sizeof(struct in6_addr) / sizeof(uint16_t)]; int i; @@ -156,11 +159,27 @@ int uv_inet_pton(int af, const char* src, void* dst) { + if (src == NULL || dst == NULL) + return UV_EINVAL; + switch (af) { case AF_INET: return (inet_pton4(src, dst)); - case AF_INET6: - return (inet_pton6(src, dst)); + case AF_INET6: { + int len; + char tmp[UV__INET6_ADDRSTRLEN], *s, *p; + s = (char*) src; + p = strchr(src, '%'); + if (p != NULL) { + s = tmp; + len = p - src; + if (len > UV__INET6_ADDRSTRLEN-1) + return UV_EINVAL; + memcpy(s, src, len); + s[len] = '\0'; + } + return inet_pton6(s, dst); + } default: return UV_EAFNOSUPPORT; } @@ -225,7 +244,7 @@ curtok = src; seen_xdigits = 0; val = 0; - while ((ch = *src++) != '\0' && ch != '%') { + while ((ch = *src++) != '\0') { const char *pch; if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) @@ -256,19 +275,7 @@ continue; } if (ch == '.' && ((tp + sizeof(struct in_addr)) <= endp)) { - int err; - - /* Scope id present, parse ipv4 addr without it */ - pch = strchr(curtok, '%'); - if (pch != NULL) { - char tmp[sizeof "255.255.255.255"]; - - memcpy(tmp, curtok, pch - curtok); - curtok = tmp; - src = pch; - } - - err = inet_pton4(curtok, tp); + int err = inet_pton4(curtok, tp); if (err == 0) { tp += sizeof(struct in_addr); seen_xdigits = 0; diff -Nru nodejs-0.11.13/deps/uv/src/threadpool.c nodejs-0.11.15/deps/uv/src/threadpool.c --- nodejs-0.11.13/deps/uv/src/threadpool.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/threadpool.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,303 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv-common.h" + +#if !defined(_WIN32) +# include "unix/internal.h" +#else +# include "win/req-inl.h" +/* TODO(saghul): unify internal req functions */ +static void uv__req_init(uv_loop_t* loop, + uv_req_t* req, + uv_req_type type) { + uv_req_init(loop, req); + req->type = type; + uv__req_register(loop, req); +} +# define uv__req_init(loop, req, type) \ + uv__req_init((loop), (uv_req_t*)(req), (type)) +#endif + +#include <stdlib.h> + +#define MAX_THREADPOOL_SIZE 128 + +static uv_once_t once = UV_ONCE_INIT; +static uv_cond_t cond; +static uv_mutex_t mutex; +static unsigned int nthreads; +static uv_thread_t* threads; +static uv_thread_t default_threads[4]; +static QUEUE exit_message; +static QUEUE wq; +static volatile int initialized; + + +static void uv__cancelled(struct uv__work* w) { + abort(); +} + + +/* To avoid deadlock with uv_cancel() it's crucial that the worker + * never holds the global mutex and the loop-local mutex at the same time. + */ +static void worker(void* arg) { + struct uv__work* w; + QUEUE* q; + + (void) arg; + + for (;;) { + uv_mutex_lock(&mutex); + + while (QUEUE_EMPTY(&wq)) + uv_cond_wait(&cond, &mutex); + + q = QUEUE_HEAD(&wq); + + if (q == &exit_message) + uv_cond_signal(&cond); + else { + QUEUE_REMOVE(q); + QUEUE_INIT(q); /* Signal uv_cancel() that the work req is + executing. */ + } + + uv_mutex_unlock(&mutex); + + if (q == &exit_message) + break; + + w = QUEUE_DATA(q, struct uv__work, wq); + w->work(w); + + uv_mutex_lock(&w->loop->wq_mutex); + w->work = NULL; /* Signal uv_cancel() that the work req is done + executing. */ + QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq); + uv_async_send(&w->loop->wq_async); + uv_mutex_unlock(&w->loop->wq_mutex); + } +} + + +static void post(QUEUE* q) { + uv_mutex_lock(&mutex); + QUEUE_INSERT_TAIL(&wq, q); + uv_cond_signal(&cond); + uv_mutex_unlock(&mutex); +} + + +#ifndef _WIN32 +UV_DESTRUCTOR(static void cleanup(void)) { + unsigned int i; + + if (initialized == 0) + return; + + post(&exit_message); + + for (i = 0; i < nthreads; i++) + if (uv_thread_join(threads + i)) + abort(); + + if (threads != default_threads) + free(threads); + + uv_mutex_destroy(&mutex); + uv_cond_destroy(&cond); + + threads = NULL; + nthreads = 0; + initialized = 0; +} +#endif + + +static void init_once(void) { + unsigned int i; + const char* val; + + nthreads = ARRAY_SIZE(default_threads); + val = getenv("UV_THREADPOOL_SIZE"); + if (val != NULL) + nthreads = atoi(val); + if (nthreads == 0) + nthreads = 1; + if (nthreads > MAX_THREADPOOL_SIZE) + nthreads = MAX_THREADPOOL_SIZE; + + threads = default_threads; + if (nthreads > ARRAY_SIZE(default_threads)) { + threads = malloc(nthreads * sizeof(threads[0])); + if (threads == NULL) { + nthreads = ARRAY_SIZE(default_threads); + threads = default_threads; + } + } + + if (uv_cond_init(&cond)) + abort(); + + if (uv_mutex_init(&mutex)) + abort(); + + QUEUE_INIT(&wq); + + for (i = 0; i < nthreads; i++) + if (uv_thread_create(threads + i, worker, NULL)) + abort(); + + initialized = 1; +} + + +void uv__work_submit(uv_loop_t* loop, + struct uv__work* w, + void (*work)(struct uv__work* w), + void (*done)(struct uv__work* w, int status)) { + uv_once(&once, init_once); + w->loop = loop; + w->work = work; + w->done = done; + post(&w->wq); +} + + +static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { + int cancelled; + + uv_mutex_lock(&mutex); + uv_mutex_lock(&w->loop->wq_mutex); + + cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL; + if (cancelled) + QUEUE_REMOVE(&w->wq); + + uv_mutex_unlock(&w->loop->wq_mutex); + uv_mutex_unlock(&mutex); + + if (!cancelled) + return UV_EBUSY; + + w->work = uv__cancelled; + uv_mutex_lock(&loop->wq_mutex); + QUEUE_INSERT_TAIL(&loop->wq, &w->wq); + uv_async_send(&loop->wq_async); + uv_mutex_unlock(&loop->wq_mutex); + + return 0; +} + + +void uv__work_done(uv_async_t* handle) { + struct uv__work* w; + uv_loop_t* loop; + QUEUE* q; + QUEUE wq; + int err; + + loop = container_of(handle, uv_loop_t, wq_async); + QUEUE_INIT(&wq); + + uv_mutex_lock(&loop->wq_mutex); + if (!QUEUE_EMPTY(&loop->wq)) { + q = QUEUE_HEAD(&loop->wq); + QUEUE_SPLIT(&loop->wq, q, &wq); + } + uv_mutex_unlock(&loop->wq_mutex); + + while (!QUEUE_EMPTY(&wq)) { + q = QUEUE_HEAD(&wq); + QUEUE_REMOVE(q); + + w = container_of(q, struct uv__work, wq); + err = (w->work == uv__cancelled) ? UV_ECANCELED : 0; + w->done(w, err); + } +} + + +static void uv__queue_work(struct uv__work* w) { + uv_work_t* req = container_of(w, uv_work_t, work_req); + + req->work_cb(req); +} + + +static void uv__queue_done(struct uv__work* w, int err) { + uv_work_t* req; + + req = container_of(w, uv_work_t, work_req); + uv__req_unregister(req->loop, req); + + if (req->after_work_cb == NULL) + return; + + req->after_work_cb(req, err); +} + + +int uv_queue_work(uv_loop_t* loop, + uv_work_t* req, + uv_work_cb work_cb, + uv_after_work_cb after_work_cb) { + if (work_cb == NULL) + return UV_EINVAL; + + uv__req_init(loop, req, UV_WORK); + req->loop = loop; + req->work_cb = work_cb; + req->after_work_cb = after_work_cb; + uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done); + return 0; +} + + +int uv_cancel(uv_req_t* req) { + struct uv__work* wreq; + uv_loop_t* loop; + + switch (req->type) { + case UV_FS: + loop = ((uv_fs_t*) req)->loop; + wreq = &((uv_fs_t*) req)->work_req; + break; + case UV_GETADDRINFO: + loop = ((uv_getaddrinfo_t*) req)->loop; + wreq = &((uv_getaddrinfo_t*) req)->work_req; + break; + case UV_GETNAMEINFO: + loop = ((uv_getnameinfo_t*) req)->loop; + wreq = &((uv_getnameinfo_t*) req)->work_req; + break; + case UV_WORK: + loop = ((uv_work_t*) req)->loop; + wreq = &((uv_work_t*) req)->work_req; + break; + default: + return UV_EINVAL; + } + + return uv__work_cancel(loop, req, wreq); +} diff -Nru nodejs-0.11.13/deps/uv/src/unix/aix.c nodejs-0.11.15/deps/uv/src/unix/aix.c --- nodejs-0.11.13/deps/uv/src/unix/aix.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/aix.c 2015-01-20 21:22:17.000000000 +0000 @@ -39,12 +39,235 @@ #include <unistd.h> #include <fcntl.h> #include <utmp.h> +#include <libgen.h> #include <sys/protosw.h> #include <libperfstat.h> #include <sys/proc.h> #include <sys/procfs.h> +#include <sys/poll.h> + +#include <sys/pollset.h> +#include <ctype.h> +#include <sys/ahafs_evProds.h> + +#include <sys/mntctl.h> +#include <sys/vmount.h> +#include <limits.h> +#include <strings.h> +#include <sys/vnode.h> + +#define RDWR_BUF_SIZE 4096 +#define EQ(a,b) (strcmp(a,b) == 0) + +int uv__platform_loop_init(uv_loop_t* loop, int default_loop) { + loop->fs_fd = -1; + + /* Passing maxfd of -1 should mean the limit is determined + * by the user's ulimit or the global limit as per the doc */ + loop->backend_fd = pollset_create(-1); + + if (loop->backend_fd == -1) + return -1; + + return 0; +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { + if (loop->fs_fd != -1) { + uv__close(loop->fs_fd); + loop->fs_fd = -1; + } + + if (loop->backend_fd != -1) { + pollset_destroy(loop->backend_fd); + loop->backend_fd = -1; + } +} + + +void uv__io_poll(uv_loop_t* loop, int timeout) { + struct pollfd events[1024]; + struct pollfd pqry; + struct pollfd* pe; + struct poll_ctl pc; + QUEUE* q; + uv__io_t* w; + uint64_t base; + uint64_t diff; + int nevents; + int count; + int nfds; + int i; + int rc; + int add_failed; + + if (loop->nfds == 0) { + assert(QUEUE_EMPTY(&loop->watcher_queue)); + return; + } + + while (!QUEUE_EMPTY(&loop->watcher_queue)) { + q = QUEUE_HEAD(&loop->watcher_queue); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + + w = QUEUE_DATA(q, uv__io_t, watcher_queue); + assert(w->pevents != 0); + assert(w->fd >= 0); + assert(w->fd < (int) loop->nwatchers); + + pc.events = w->pevents; + pc.fd = w->fd; + + add_failed = 0; + if (w->events == 0) { + pc.cmd = PS_ADD; + if (pollset_ctl(loop->backend_fd, &pc, 1)) { + if (errno != EINVAL) { + assert(0 && "Failed to add file descriptor (pc.fd) to pollset"); + abort(); + } + /* Check if the fd is already in the pollset */ + pqry.fd = pc.fd; + rc = pollset_query(loop->backend_fd, &pqry); + switch (rc) { + case -1: + assert(0 && "Failed to query pollset for file descriptor"); + abort(); + case 0: + assert(0 && "Pollset does not contain file descriptor"); + abort(); + } + /* If we got here then the pollset already contained the file descriptor even though + * we didn't think it should. This probably shouldn't happen, but we can continue. */ + add_failed = 1; + } + } + if (w->events != 0 || add_failed) { + /* Modify, potentially removing events -- need to delete then add. + * Could maybe mod if we knew for sure no events are removed, but + * content of w->events is handled above as not reliable (falls back) + * so may require a pollset_query() which would have to be pretty cheap + * compared to a PS_DELETE to be worth optimizing. Alternatively, could + * lazily remove events, squelching them in the mean time. */ + pc.cmd = PS_DELETE; + if (pollset_ctl(loop->backend_fd, &pc, 1)) { + assert(0 && "Failed to delete file descriptor (pc.fd) from pollset"); + abort(); + } + pc.cmd = PS_ADD; + if (pollset_ctl(loop->backend_fd, &pc, 1)) { + assert(0 && "Failed to add file descriptor (pc.fd) to pollset"); + abort(); + } + } + + w->events = w->pevents; + } + + assert(timeout >= -1); + base = loop->time; + count = 48; /* Benchmarks suggest this gives the best throughput. */ + + for (;;) { + nfds = pollset_poll(loop->backend_fd, + events, + ARRAY_SIZE(events), + timeout); + + /* Update loop->time unconditionally. It's tempting to skip the update when + * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the + * operating system didn't reschedule our process while in the syscall. + */ + SAVE_ERRNO(uv__update_time(loop)); + + if (nfds == 0) { + assert(timeout != -1); + return; + } + + if (nfds == -1) { + if (errno != EINTR) { + abort(); + } + + if (timeout == -1) + continue; + + if (timeout == 0) + return; + + /* Interrupted by a signal. Update timeout and poll again. */ + goto update_timeout; + } + + nevents = 0; + + assert(loop->watchers != NULL); + loop->watchers[loop->nwatchers] = (void*) events; + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; + + for (i = 0; i < nfds; i++) { + pe = events + i; + pc.cmd = PS_DELETE; + pc.fd = pe->fd; + + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (pc.fd == -1) + continue; + + assert(pc.fd >= 0); + assert((unsigned) pc.fd < loop->nwatchers); + + w = loop->watchers[pc.fd]; + + if (w == NULL) { + /* File descriptor that we've stopped watching, disarm it. + * + * Ignore all errors because we may be racing with another thread + * when the file descriptor is closed. + */ + pollset_ctl(loop->backend_fd, &pc, 1); + continue; + } + + w->cb(loop, w, pe->revents); + nevents++; + } + + loop->watchers[loop->nwatchers] = NULL; + loop->watchers[loop->nwatchers + 1] = NULL; + + if (nevents != 0) { + if (nfds == ARRAY_SIZE(events) && --count != 0) { + /* Poll for more events but don't block this time. */ + timeout = 0; + continue; + } + return; + } + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + +update_timeout: + assert(timeout > 0); + + diff = loop->time - base; + if (diff >= (uint64_t) timeout) + return; + + timeout -= diff; + } +} + + uint64_t uv__hrtime(uv_clocktype_t type) { uint64_t G = 1000000000; timebasestruct_t t; @@ -58,28 +281,24 @@ * We could use a static buffer for the path manipulations that we need outside * of the function, but this function could be called by multiple consumers and * we don't want to potentially create a race condition in the use of snprintf. + * There is no direct way of getting the exe path in AIX - either through /procfs + * or through some libc APIs. The below approach is to parse the argv[0]'s pattern + * and use it in conjunction with PATH environment variable to craft one. */ int uv_exepath(char* buffer, size_t* size) { ssize_t res; - char pp[64], cwdl[PATH_MAX]; + char cwd[PATH_MAX], cwdl[PATH_MAX]; + char symlink[PATH_MAX], temp_buffer[PATH_MAX]; + char pp[64]; struct psinfo ps; int fd; + char **argv; - if (buffer == NULL) - return (-1); - - if (size == NULL) - return (-1); - - (void) snprintf(pp, sizeof(pp), "/proc/%lu/cwd", (unsigned long) getpid()); - - res = readlink(pp, cwdl, sizeof(cwdl) - 1); - if (res < 0) - return res; + if ((buffer == NULL) || (size == NULL)) + return -EINVAL; - cwdl[res] = '\0'; + snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid()); - (void) snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid()); fd = open(pp, O_RDONLY); if (fd < 0) return fd; @@ -89,9 +308,163 @@ if (res < 0) return res; - (void) snprintf(buffer, *size, "%s%s", cwdl, ps.pr_fname); - *size = strlen(buffer); - return 0; + if (ps.pr_argv == 0) + return -EINVAL; + + argv = (char **) *((char ***) (intptr_t) ps.pr_argv); + + if ((argv == NULL) || (argv[0] == NULL)) + return -EINVAL; + + /* + * Three possibilities for argv[0]: + * i) an absolute path such as: /home/user/myprojects/nodejs/node + * ii) a relative path such as: ./node or ./myprojects/nodejs/node + * iii) a bare filename such as "node", after exporting PATH variable + * to its location. + */ + + /* case #1, absolute path. */ + if (argv[0][0] == '/') { + snprintf(symlink, PATH_MAX-1, "%s", argv[0]); + + /* This could or could not be a symlink. */ + res = readlink(symlink, temp_buffer, PATH_MAX-1); + + /* if readlink fails, it is a normal file just copy symlink to the + * output buffer. + */ + if (res < 0) { + assert(*size > strlen(symlink)); + strcpy(buffer, symlink); + + /* If it is a link, the resolved filename is again a relative path, + * make it absolute. + */ + } else { + assert(*size > (strlen(symlink) + 1 + strlen(temp_buffer))); + snprintf(buffer, *size-1, "%s/%s", dirname(symlink), temp_buffer); + } + *size = strlen(buffer); + return 0; + + /* case #2, relative path with usage of '.' */ + } else if (argv[0][0] == '.') { + char *relative = strchr(argv[0], '/'); + if (relative == NULL) + return -EINVAL; + + /* Get the current working directory to resolve the relative path. */ + snprintf(cwd, PATH_MAX-1, "/proc/%lu/cwd", (unsigned long) getpid()); + + /* This is always a symlink, resolve it. */ + res = readlink(cwd, cwdl, sizeof(cwdl) - 1); + if (res < 0) + return -errno; + + snprintf(symlink, PATH_MAX-1, "%s%s", cwdl, relative + 1); + + res = readlink(symlink, temp_buffer, PATH_MAX-1); + if (res < 0) { + assert(*size > strlen(symlink)); + strcpy(buffer, symlink); + } else { + assert(*size > (strlen(symlink) + 1 + strlen(temp_buffer))); + snprintf(buffer, *size-1, "%s/%s", dirname(symlink), temp_buffer); + } + *size = strlen(buffer); + return 0; + + /* case #3, relative path without usage of '.', such as invocations in Node test suite. */ + } else if (strchr(argv[0], '/') != NULL) { + /* Get the current working directory to resolve the relative path. */ + snprintf(cwd, PATH_MAX-1, "/proc/%lu/cwd", (unsigned long) getpid()); + + /* This is always a symlink, resolve it. */ + res = readlink(cwd, cwdl, sizeof(cwdl) - 1); + if (res < 0) + return -errno; + + snprintf(symlink, PATH_MAX-1, "%s%s", cwdl, argv[0]); + + res = readlink(symlink, temp_buffer, PATH_MAX-1); + if (res < 0) { + assert(*size > strlen(symlink)); + strcpy(buffer, symlink); + } else { + assert(*size > (strlen(symlink) + 1 + strlen(temp_buffer))); + snprintf(buffer, *size-1, "%s/%s", dirname(symlink), temp_buffer); + } + *size = strlen(buffer); + return 0; + /* Usage of absolute filename with location exported in PATH */ + } else { + char clonedpath[8192]; /* assume 8k buffer will fit PATH */ + char *token = NULL; + struct stat statstruct; + + /* Get the paths. */ + char *path = getenv("PATH"); + if(sizeof(clonedpath) <= strlen(path)) + return -EINVAL; + + /* Get a local copy. */ + strcpy(clonedpath, path); + + /* Tokenize. */ + token = strtok(clonedpath, ":"); + + /* Get current working directory. (may be required in the loop). */ + snprintf(cwd, PATH_MAX-1, "/proc/%lu/cwd", (unsigned long) getpid()); + res = readlink(cwd, cwdl, sizeof(cwdl) - 1); + if (res < 0) + return -errno; + /* Run through the tokens, append our executable file name with each, + * and see which one succeeds. Exit on first match. */ + while(token != NULL) { + if (token[0] == '.') { + /* Path contains a token relative to current directory. */ + char *relative = strchr(token, '/'); + if (relative != NULL) + /* A path which is not current directory. */ + snprintf(symlink, PATH_MAX-1, "%s%s/%s", cwdl, relative+1, ps.pr_fname); + else + snprintf(symlink, PATH_MAX-1, "%s%s", cwdl, ps.pr_fname); + if (stat(symlink, &statstruct) != -1) { + /* File exists. Resolve if it is a link. */ + res = readlink(symlink, temp_buffer, PATH_MAX-1); + if (res < 0) { + assert(*size > strlen(symlink)); + strcpy(buffer, symlink); + } else { + assert(*size > (strlen(symlink) + 1 + strlen(temp_buffer))); + snprintf(buffer, *size-1, "%s/%s", dirname(symlink), temp_buffer); + } + *size = strlen(buffer); + return 0; + } + + /* Absolute path names. */ + } else { + snprintf(symlink, PATH_MAX-1, "%s/%s", token, ps.pr_fname); + if (stat(symlink, &statstruct) != -1) { + res = readlink(symlink, temp_buffer, PATH_MAX-1); + if (res < 0) { + assert(*size > strlen(symlink)); + strcpy(buffer, symlink); + } else { + assert(*size > (strlen(symlink) + 1 + strlen(temp_buffer))); + snprintf(buffer, *size-1, "%s/%s", dirname(symlink), temp_buffer); + } + *size = strlen(buffer); + return 0; + } + } + token = strtok(NULL, ":"); + } + /* Out of tokens (path entries), and no match found */ + return -EINVAL; + } } @@ -128,8 +501,369 @@ } +static char *uv__rawname(char *cp) { + static char rawbuf[FILENAME_MAX+1]; + char *dp = rindex(cp, '/'); + + if (dp == 0) + return 0; + + *dp = 0; + strcpy(rawbuf, cp); + *dp = '/'; + strcat(rawbuf, "/r"); + strcat(rawbuf, dp+1); + return rawbuf; +} + + +/* + * Determine whether given pathname is a directory + * Returns 0 if the path is a directory, -1 if not + * + * Note: Opportunity here for more detailed error information but + * that requires changing callers of this function as well + */ +static int uv__path_is_a_directory(char* filename) { + struct stat statbuf; + + if (stat(filename, &statbuf) < 0) + return -1; /* failed: not a directory, assume it is a file */ + + if (statbuf.st_type == VDIR) + return 0; + + return -1; +} + + +/* + * Check whether AHAFS is mounted. + * Returns 0 if AHAFS is mounted, or an error code < 0 on failure + */ +static int uv__is_ahafs_mounted(void){ + int rv, i = 2; + struct vmount *p; + int size_multiplier = 10; + size_t siz = sizeof(struct vmount)*size_multiplier; + struct vmount *vmt; + const char *dev = "/aha"; + char *obj, *stub; + + p = malloc(siz); + if (p == NULL) + return -errno; + + /* Retrieve all mounted filesystems */ + rv = mntctl(MCTL_QUERY, siz, (char*)p); + if (rv < 0) + return -errno; + if (rv == 0) { + /* buffer was not large enough, reallocate to correct size */ + siz = *(int*)p; + free(p); + p = malloc(siz); + if (p == NULL) + return -errno; + rv = mntctl(MCTL_QUERY, siz, (char*)p); + if (rv < 0) + return -errno; + } + + /* Look for dev in filesystems mount info */ + for(vmt = p, i = 0; i < rv; i++) { + obj = vmt2dataptr(vmt, VMT_OBJECT); /* device */ + stub = vmt2dataptr(vmt, VMT_STUB); /* mount point */ + + if (EQ(obj, dev) || EQ(uv__rawname(obj), dev) || EQ(stub, dev)) { + free(p); /* Found a match */ + return 0; + } + vmt = (struct vmount *) ((char *) vmt + vmt->vmt_length); + } + + /* /aha is required for monitoring filesystem changes */ + return -1; +} + +/* + * Recursive call to mkdir() to create intermediate folders, if any + * Returns code from mkdir call + */ +static int uv__makedir_p(const char *dir) { + char tmp[256]; + char *p = NULL; + size_t len; + int err; + + snprintf(tmp, sizeof(tmp),"%s",dir); + len = strlen(tmp); + if (tmp[len - 1] == '/') + tmp[len - 1] = 0; + for (p = tmp + 1; *p; p++) { + if (*p == '/') { + *p = 0; + err = mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); + if(err != 0) + return err; + *p = '/'; + } + } + return mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); +} + +/* + * Creates necessary subdirectories in the AIX Event Infrastructure + * file system for monitoring the object specified. + * Returns code from mkdir call + */ +static int uv__make_subdirs_p(const char *filename) { + char cmd[2048]; + char *p; + int rc = 0; + + /* Strip off the monitor file name */ + p = strrchr(filename, '/'); + + if (p == NULL) + return 0; + + if (uv__path_is_a_directory((char*)filename) == 0) { + sprintf(cmd, "/aha/fs/modDir.monFactory"); + } else { + sprintf(cmd, "/aha/fs/modFile.monFactory"); + } + + strncat(cmd, filename, (p - filename)); + rc = uv__makedir_p(cmd); + + if (rc == -1 && errno != EEXIST){ + return -errno; + } + + return rc; +} + + +/* + * Checks if /aha is mounted, then proceeds to set up the monitoring + * objects for the specified file. + * Returns 0 on success, or an error code < 0 on failure + */ +static int uv__setup_ahafs(const char* filename, int *fd) { + int rc = 0; + char mon_file_write_string[RDWR_BUF_SIZE]; + char mon_file[PATH_MAX]; + int file_is_directory = 0; /* -1 == NO, 0 == YES */ + + /* Create monitor file name for object */ + file_is_directory = uv__path_is_a_directory((char*)filename); + + if (file_is_directory == 0) + sprintf(mon_file, "/aha/fs/modDir.monFactory"); + else + sprintf(mon_file, "/aha/fs/modFile.monFactory"); + + if ((strlen(mon_file) + strlen(filename) + 5) > PATH_MAX) + return -ENAMETOOLONG; + + /* Make the necessary subdirectories for the monitor file */ + rc = uv__make_subdirs_p(filename); + if (rc == -1 && errno != EEXIST) + return rc; + + strcat(mon_file, filename); + strcat(mon_file, ".mon"); + + *fd = 0; errno = 0; + + /* Open the monitor file, creating it if necessary */ + *fd = open(mon_file, O_CREAT|O_RDWR); + if (*fd < 0) + return -errno; + + /* Write out the monitoring specifications. + * In this case, we are monitoring for a state change event type + * CHANGED=YES + * We will be waiting in select call, rather than a read: + * WAIT_TYPE=WAIT_IN_SELECT + * We only want minimal information for files: + * INFO_LVL=1 + * For directories, we want more information to track what file + * caused the change + * INFO_LVL=2 + */ + + if (file_is_directory == 0) + sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=2"); + else + sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=1"); + + rc = write(*fd, mon_file_write_string, strlen(mon_file_write_string)+1); + if (rc < 0) + return -errno; + + return 0; +} + +/* + * Skips a specified number of lines in the buffer passed in. + * Walks the buffer pointed to by p and attempts to skip n lines. + * Returns the total number of lines skipped + */ +static int uv__skip_lines(char **p, int n) { + int lines = 0; + + while(n > 0) { + *p = strchr(*p, '\n'); + if (!p) + return lines; + + (*p)++; + n--; + lines++; + } + return lines; +} + + +/* + * Parse the event occurrence data to figure out what event just occurred + * and take proper action. + * + * The buf is a pointer to the buffer containing the event occurrence data + * Returns 0 on success, -1 if unrecoverable error in parsing + * + */ +static int uv__parse_data(char *buf, int *events, uv_fs_event_t* handle) { + int evp_rc, i; + char *p; + char filename[PATH_MAX]; /* To be used when handling directories */ + + p = buf; + *events = 0; + + /* Clean the filename buffer*/ + for(i = 0; i < PATH_MAX; i++) { + filename[i] = 0; + } + i = 0; + + /* Check for BUF_WRAP */ + if (strncmp(buf, "BUF_WRAP", strlen("BUF_WRAP")) == 0) { + assert(0 && "Buffer wrap detected, Some event occurrences lost!"); + return 0; + } + + /* Since we are using the default buffer size (4K), and have specified + * INFO_LVL=1, we won't see any EVENT_OVERFLOW conditions. Applications + * should check for this keyword if they are using an INFO_LVL of 2 or + * higher, and have a buffer size of <= 4K + */ + + /* Skip to RC_FROM_EVPROD */ + if (uv__skip_lines(&p, 9) != 9) + return -1; + + if (sscanf(p, "RC_FROM_EVPROD=%d\nEND_EVENT_DATA", &evp_rc) == 1) { + if (uv__path_is_a_directory(handle->path) == 0) { /* Directory */ + if (evp_rc == AHAFS_MODDIR_UNMOUNT || evp_rc == AHAFS_MODDIR_REMOVE_SELF) { + /* The directory is no longer available for monitoring */ + *events = UV_RENAME; + handle->dir_filename = NULL; + } else { + /* A file was added/removed inside the directory */ + *events = UV_CHANGE; + + /* Get the EVPROD_INFO */ + if (uv__skip_lines(&p, 1) != 1) + return -1; + + /* Scan out the name of the file that triggered the event*/ + if (sscanf(p, "BEGIN_EVPROD_INFO\n%sEND_EVPROD_INFO", filename) == 1) { + handle->dir_filename = strdup((const char*)&filename); + } else + return -1; + } + } else { /* Regular File */ + if (evp_rc == AHAFS_MODFILE_RENAME) + *events = UV_RENAME; + else + *events = UV_CHANGE; + } + } + else + return -1; + + return 0; +} + + +/* This is the internal callback */ +static void uv__ahafs_event(uv_loop_t* loop, uv__io_t* event_watch, unsigned int fflags) { + char result_data[RDWR_BUF_SIZE]; + int bytes, rc = 0; + uv_fs_event_t* handle; + int events = 0; + int i = 0; + char fname[PATH_MAX]; + char *p; + + handle = container_of(event_watch, uv_fs_event_t, event_watcher); + + /* Clean all the buffers*/ + for(i = 0; i < PATH_MAX; i++) { + fname[i] = 0; + } + i = 0; + + /* At this point, we assume that polling has been done on the + * file descriptor, so we can just read the AHAFS event occurrence + * data and parse its results without having to block anything + */ + bytes = pread(event_watch->fd, result_data, RDWR_BUF_SIZE, 0); + + assert((bytes <= 0) && "uv__ahafs_event - Error reading monitor file"); + + /* Parse the data */ + if(bytes > 0) + rc = uv__parse_data(result_data, &events, handle); + + /* For directory changes, the name of the files that triggered the change + * are never absolute pathnames + */ + if (uv__path_is_a_directory(handle->path) == 0) { + p = handle->dir_filename; + while(*p != NULL){ + fname[i]= *p; + i++; + p++; + } + } else { + /* For file changes, figure out whether filename is absolute or not */ + if (handle->path[0] == '/') { + p = strrchr(handle->path, '/'); + p++; + + while(*p != NULL) { + fname[i]= *p; + i++; + p++; + } + } + } + + /* Unrecoverable error */ + if (rc == -1) + return; + else /* Call the actual JavaScript callback function */ + handle->cb(handle, (const char*)&fname, events, 0); +} + + int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { - return -ENOSYS; + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); + return 0; } @@ -137,17 +871,99 @@ uv_fs_event_cb cb, const char* filename, unsigned int flags) { - return -ENOSYS; + int fd, rc, i = 0, res = 0; + char cwd[PATH_MAX]; + char absolute_path[PATH_MAX]; + char fname[PATH_MAX]; + char *p; + + /* Clean all the buffers*/ + for(i = 0; i < PATH_MAX; i++) { + cwd[i] = 0; + absolute_path[i] = 0; + fname[i] = 0; + } + i = 0; + + /* Figure out whether filename is absolute or not */ + if (filename[0] == '/') { + /* We have absolute pathname, create the relative pathname*/ + sprintf(absolute_path, filename); + p = strrchr(filename, '/'); + p++; + } else { + if (filename[0] == '.' && filename[1] == '/') { + /* We have a relative pathname, compose the absolute pathname */ + sprintf(fname, filename); + snprintf(cwd, PATH_MAX-1, "/proc/%lu/cwd", (unsigned long) getpid()); + res = readlink(cwd, absolute_path, sizeof(absolute_path) - 1); + if (res < 0) + return res; + p = strrchr(absolute_path, '/'); + p++; + p++; + } else { + /* We have a relative pathname, compose the absolute pathname */ + sprintf(fname, filename); + snprintf(cwd, PATH_MAX-1, "/proc/%lu/cwd", (unsigned long) getpid()); + res = readlink(cwd, absolute_path, sizeof(absolute_path) - 1); + if (res < 0) + return res; + p = strrchr(absolute_path, '/'); + p++; + } + /* Copy to filename buffer */ + while(filename[i] != NULL) { + *p = filename[i]; + i++; + p++; + } + } + + if (uv__is_ahafs_mounted() < 0) /* /aha checks failed */ + return UV_ENOSYS; + + /* Setup ahafs */ + rc = uv__setup_ahafs((const char *)absolute_path, &fd); + if (rc != 0) + return rc; + + /* Setup/Initialize all the libuv routines */ + uv__handle_start(handle); + uv__io_init(&handle->event_watcher, uv__ahafs_event, fd); + handle->path = strdup((const char*)&absolute_path); + handle->cb = cb; + + uv__io_start(handle->loop, &handle->event_watcher, UV__POLLIN); + + return 0; } int uv_fs_event_stop(uv_fs_event_t* handle) { - return -ENOSYS; + + if (!uv__is_active(handle)) + return 0; + + uv__io_close(handle->loop, &handle->event_watcher); + uv__handle_stop(handle); + + if (uv__path_is_a_directory(handle->path) == 0) { + free(handle->dir_filename); + handle->dir_filename = NULL; + } + + free(handle->path); + handle->path = NULL; + uv__close(handle->event_watcher.fd); + handle->event_watcher.fd = -1; + + return 0; } void uv__fs_event_close(uv_fs_event_t* handle) { - UNREACHABLE(); + uv_fs_event_stop(handle); } @@ -175,7 +991,7 @@ int err; int fd; - (void) snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid()); + snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid()); fd = open(pp, O_RDONLY); if (fd == -1) @@ -397,3 +1213,21 @@ free(addresses); } + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + struct pollfd* events; + uintptr_t i; + uintptr_t nfds; + + assert(loop->watchers != NULL); + + events = (struct pollfd*) loop->watchers[loop->nwatchers]; + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; + if (events == NULL) + return; + + /* Invalidate events with same file descriptor */ + for (i = 0; i < nfds; i++) + if ((int) events[i].fd == fd) + events[i].fd = -1; +} diff -Nru nodejs-0.11.13/deps/uv/src/unix/async.c nodejs-0.11.15/deps/uv/src/unix/async.c --- nodejs-0.11.13/deps/uv/src/unix/async.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/async.c 2015-01-20 21:22:17.000000000 +0000 @@ -231,7 +231,7 @@ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", pipefd[0]); fd = uv__open_cloexec(buf, O_RDWR); - if (fd != -1) { + if (fd >= 0) { uv__close(pipefd[0]); uv__close(pipefd[1]); pipefd[0] = fd; diff -Nru nodejs-0.11.13/deps/uv/src/unix/core.c nodejs-0.11.15/deps/uv/src/unix/core.c --- nodejs-0.11.13/deps/uv/src/unix/core.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/core.c 2015-01-20 21:22:17.000000000 +0000 @@ -70,6 +70,10 @@ # endif #endif +#ifdef _AIX +#include <sys/ioctl.h> +#endif + static void uv__run_pending(uv_loop_t* loop); /* Verify that uv_buf_t is ABI-compatible with struct iovec. */ @@ -159,6 +163,33 @@ uv__make_close_pending(handle); } +int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) { + int r; + int fd; + socklen_t len; + + if (handle == NULL || value == NULL) + return -EINVAL; + + if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE) + fd = uv__stream_fd((uv_stream_t*) handle); + else if (handle->type == UV_UDP) + fd = ((uv_udp_t *) handle)->io_watcher.fd; + else + return -ENOTSUP; + + len = sizeof(*value); + + if (*value == 0) + r = getsockopt(fd, SOL_SOCKET, optname, value, &len); + else + r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len); + + if (r < 0) + return -errno; + + return 0; +} void uv__make_close_pending(uv_handle_t* handle) { assert(handle->flags & UV_CLOSING); @@ -279,13 +310,11 @@ uv__update_time(loop); while (r != 0 && loop->stop_flag == 0) { - UV_TICK_START(loop, mode); - uv__update_time(loop); uv__run_timers(loop); + uv__run_pending(loop); uv__run_idle(loop); uv__run_prepare(loop); - uv__run_pending(loop); timeout = 0; if ((mode & UV_RUN_NOWAIT) == 0) @@ -296,7 +325,7 @@ uv__run_closing_handles(loop); if (mode == UV_RUN_ONCE) { - /* UV_RUN_ONCE implies forward progess: at least one callback must have + /* UV_RUN_ONCE implies forward progress: at least one callback must have * been invoked when it returns. uv__io_poll() can return without doing * I/O (meaning: no callbacks) when its timeout expires - which means we * have pending timers that satisfy the forward progress constraint. @@ -309,7 +338,6 @@ } r = uv__loop_alive(loop); - UV_TICK_STOP(loop, mode); if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT)) break; @@ -444,7 +472,8 @@ } -#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \ + defined(_AIX) int uv__nonblock(int fd, int set) { int r; @@ -605,7 +634,7 @@ if (getcwd(buffer, *size) == NULL) return -errno; - *size = strlen(buffer) + 1; + *size = strlen(buffer); return 0; } @@ -630,6 +659,36 @@ } +int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) { + int fd_out; + + switch (handle->type) { + case UV_TCP: + case UV_NAMED_PIPE: + case UV_TTY: + fd_out = uv__stream_fd((uv_stream_t*) handle); + break; + + case UV_UDP: + fd_out = ((uv_udp_t *) handle)->io_watcher.fd; + break; + + case UV_POLL: + fd_out = ((uv_poll_t *) handle)->io_watcher.fd; + break; + + default: + return -EINVAL; + } + + if (uv__is_closing(handle) || fd_out == -1) + return -EBADF; + + *fd = fd_out; + return 0; +} + + static void uv__run_pending(uv_loop_t* loop) { QUEUE* q; uv__io_t* w; diff -Nru nodejs-0.11.13/deps/uv/src/unix/darwin.c nodejs-0.11.15/deps/uv/src/unix/darwin.c --- nodejs-0.11.13/deps/uv/src/unix/darwin.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/darwin.c 2015-01-20 21:22:17.000000000 +0000 @@ -53,9 +53,11 @@ uint64_t uv__hrtime(uv_clocktype_t type) { - mach_timebase_info_data_t info; + static mach_timebase_info_data_t info; - if (mach_timebase_info(&info) != KERN_SUCCESS) + if ((ACCESS_ONCE(uint32_t, info.numer) == 0 || + ACCESS_ONCE(uint32_t, info.denom) == 0) && + mach_timebase_info(&info) != KERN_SUCCESS) abort(); return mach_absolute_time() * info.numer / info.denom; diff -Nru nodejs-0.11.13/deps/uv/src/unix/darwin-proctitle.c nodejs-0.11.15/deps/uv/src/unix/darwin-proctitle.c --- nodejs-0.11.13/deps/uv/src/unix/darwin-proctitle.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/darwin-proctitle.c 2015-01-20 21:22:17.000000000 +0000 @@ -36,7 +36,7 @@ int err; /* pthread_setname_np() first appeared in OS X 10.6 and iOS 3.2. */ - dynamic_pthread_setname_np = dlsym(RTLD_DEFAULT, "pthread_setname_np"); + *(void **)(&dynamic_pthread_setname_np) = dlsym(RTLD_DEFAULT, "pthread_setname_np"); if (dynamic_pthread_setname_np == NULL) return -ENOSYS; @@ -94,13 +94,13 @@ if (application_services_handle == NULL || core_foundation_handle == NULL) goto out; - pCFStringCreateWithCString = + *(void **)(&pCFStringCreateWithCString) = dlsym(core_foundation_handle, "CFStringCreateWithCString"); - pCFBundleGetBundleWithIdentifier = + *(void **)(&pCFBundleGetBundleWithIdentifier) = dlsym(core_foundation_handle, "CFBundleGetBundleWithIdentifier"); - pCFBundleGetDataPointerForName = + *(void **)(&pCFBundleGetDataPointerForName) = dlsym(core_foundation_handle, "CFBundleGetDataPointerForName"); - pCFBundleGetFunctionPointerForName = + *(void **)(&pCFBundleGetFunctionPointerForName) = dlsym(core_foundation_handle, "CFBundleGetFunctionPointerForName"); if (pCFStringCreateWithCString == NULL || @@ -118,14 +118,14 @@ if (launch_services_bundle == NULL) goto out; - pLSGetCurrentApplicationASN = + *(void **)(&pLSGetCurrentApplicationASN) = pCFBundleGetFunctionPointerForName(launch_services_bundle, S("_LSGetCurrentApplicationASN")); if (pLSGetCurrentApplicationASN == NULL) goto out; - pLSSetApplicationInformationItem = + *(void **)(&pLSSetApplicationInformationItem) = pCFBundleGetFunctionPointerForName(launch_services_bundle, S("_LSSetApplicationInformationItem")); @@ -138,9 +138,9 @@ if (display_name_key == NULL || *display_name_key == NULL) goto out; - pCFBundleGetInfoDictionary = dlsym(core_foundation_handle, + *(void **)(&pCFBundleGetInfoDictionary) = dlsym(core_foundation_handle, "CFBundleGetInfoDictionary"); - pCFBundleGetMainBundle = dlsym(core_foundation_handle, + *(void **)(&pCFBundleGetMainBundle) = dlsym(core_foundation_handle, "CFBundleGetMainBundle"); if (pCFBundleGetInfoDictionary == NULL || pCFBundleGetMainBundle == NULL) goto out; @@ -152,13 +152,13 @@ if (hi_services_bundle == NULL) goto out; - pSetApplicationIsDaemon = pCFBundleGetFunctionPointerForName( + *(void **)(&pSetApplicationIsDaemon) = pCFBundleGetFunctionPointerForName( hi_services_bundle, S("SetApplicationIsDaemon")); - pLSApplicationCheckIn = pCFBundleGetFunctionPointerForName( + *(void **)(&pLSApplicationCheckIn) = pCFBundleGetFunctionPointerForName( launch_services_bundle, S("_LSApplicationCheckIn")); - pLSSetApplicationLaunchServicesServerConnectionStatus = + *(void **)(&pLSSetApplicationLaunchServicesServerConnectionStatus) = pCFBundleGetFunctionPointerForName( launch_services_bundle, S("_LSSetApplicationLaunchServicesServerConnectionStatus")); diff -Nru nodejs-0.11.13/deps/uv/src/unix/dl.c nodejs-0.11.15/deps/uv/src/unix/dl.c --- nodejs-0.11.13/deps/uv/src/unix/dl.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/dl.c 2015-01-20 21:22:17.000000000 +0000 @@ -59,7 +59,7 @@ } -const char* uv_dlerror(uv_lib_t* lib) { +const char* uv_dlerror(const uv_lib_t* lib) { return lib->errmsg ? lib->errmsg : "no error"; } diff -Nru nodejs-0.11.13/deps/uv/src/unix/freebsd.c nodejs-0.11.15/deps/uv/src/unix/freebsd.c --- nodejs-0.11.13/deps/uv/src/unix/freebsd.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/freebsd.c 2015-01-20 21:22:17.000000000 +0000 @@ -298,7 +298,7 @@ for (i = 0; i < numcpus; i++) { cpu_info = &(*cpu_infos)[i]; - + cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier; cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier; cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier; diff -Nru nodejs-0.11.13/deps/uv/src/unix/fs.c nodejs-0.11.15/deps/uv/src/unix/fs.c --- nodejs-0.11.13/deps/uv/src/unix/fs.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/fs.c 2015-01-20 21:22:17.000000000 +0000 @@ -38,7 +38,6 @@ #include <sys/stat.h> #include <sys/time.h> #include <pthread.h> -#include <dirent.h> #include <unistd.h> #include <fcntl.h> #include <utime.h> @@ -214,9 +213,23 @@ } +static ssize_t uv__fs_mkdtemp(uv_fs_t* req) { + return mkdtemp((char*) req->path) ? 0 : -1; +} + + static ssize_t uv__fs_read(uv_fs_t* req) { ssize_t result; +#if defined(_AIX) + struct stat buf; + if(fstat(req->file, &buf)) + return -1; + if(S_ISDIR(buf.st_mode)) { + errno = EISDIR; + return -1; + } +#endif /* defined(_AIX) */ if (req->off < 0) { if (req->nbufs == 1) result = read(req->file, req->bufs[0].base, req->bufs[0].len); @@ -282,64 +295,47 @@ #if defined(__OpenBSD__) || (defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)) -static int uv__fs_readdir_filter(struct dirent* dent) { +static int uv__fs_scandir_filter(uv__dirent_t* dent) { #else -static int uv__fs_readdir_filter(const struct dirent* dent) { +static int uv__fs_scandir_filter(const uv__dirent_t* dent) { #endif return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0; } -/* This should have been called uv__fs_scandir(). */ -static ssize_t uv__fs_readdir(uv_fs_t* req) { - struct dirent **dents; +static ssize_t uv__fs_scandir(uv_fs_t* req) { + uv__dirent_t **dents; int saved_errno; - size_t off; - size_t len; - char *buf; - int i; int n; dents = NULL; - n = scandir(req->path, &dents, uv__fs_readdir_filter, alphasort); + n = scandir(req->path, &dents, uv__fs_scandir_filter, alphasort); + + /* NOTE: We will use nbufs as an index field */ + req->nbufs = 0; if (n == 0) goto out; /* osx still needs to deallocate some memory */ else if (n == -1) return n; - len = 0; - - for (i = 0; i < n; i++) - len += strlen(dents[i]->d_name) + 1; - - buf = malloc(len); + req->ptr = dents; - if (buf == NULL) { - errno = ENOMEM; - n = -1; - goto out; - } - - off = 0; - - for (i = 0; i < n; i++) { - len = strlen(dents[i]->d_name) + 1; - memcpy(buf + off, dents[i]->d_name, len); - off += len; - } - - req->ptr = buf; + return n; out: saved_errno = errno; if (dents != NULL) { + int i; + for (i = 0; i < n; i++) free(dents[i]); free(dents); } errno = saved_errno; + req->ptr = NULL; + return n; } @@ -683,7 +679,8 @@ dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec; dst->st_flags = src->st_flags; dst->st_gen = src->st_gen; -#elif defined(_BSD_SOURCE) || defined(_SVID_SOURCE) || defined(_XOPEN_SOURCE) +#elif !defined(_AIX) && \ + (defined(_BSD_SOURCE) || defined(_SVID_SOURCE) || defined(_XOPEN_SOURCE)) dst->st_atim.tv_sec = src->st_atim.tv_sec; dst->st_atim.tv_nsec = src->st_atim.tv_nsec; dst->st_mtim.tv_sec = src->st_mtim.tv_sec; @@ -766,6 +763,7 @@ break; switch (req->fs_type) { + X(ACCESS, access(req->path, req->flags)); X(CHMOD, chmod(req->path, req->mode)); X(CHOWN, chown(req->path, req->uid, req->gid)); X(CLOSE, close(req->file)); @@ -779,8 +777,9 @@ X(LSTAT, uv__fs_lstat(req->path, &req->statbuf)); X(LINK, link(req->path, req->new_path)); X(MKDIR, mkdir(req->path, req->mode)); + X(MKDTEMP, uv__fs_mkdtemp(req)); X(READ, uv__fs_read(req)); - X(READDIR, uv__fs_readdir(req)); + X(SCANDIR, uv__fs_scandir(req)); X(READLINK, uv__fs_readlink(req)); X(RENAME, rename(req->path, req->new_path)); X(RMDIR, rmdir(req->path)); @@ -855,6 +854,18 @@ } +int uv_fs_access(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + uv_fs_cb cb) { + INIT(ACCESS); + PATH; + req->flags = flags; + POST; +} + + int uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, @@ -991,6 +1002,18 @@ } +int uv_fs_mkdtemp(uv_loop_t* loop, + uv_fs_t* req, + const char* tpl, + uv_fs_cb cb) { + INIT(MKDTEMP); + req->path = strdup(tpl); + if (req->path == NULL) + return -ENOMEM; + POST; +} + + int uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, @@ -1029,12 +1052,12 @@ } -int uv_fs_readdir(uv_loop_t* loop, +int uv_fs_scandir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, uv_fs_cb cb) { - INIT(READDIR); + INIT(SCANDIR); PATH; req->flags = flags; POST; @@ -1156,6 +1179,9 @@ req->path = NULL; req->new_path = NULL; + if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL) + uv__fs_scandir_cleanup(req); + if (req->ptr != &req->statbuf) free(req->ptr); req->ptr = NULL; diff -Nru nodejs-0.11.13/deps/uv/src/unix/fsevents.c nodejs-0.11.15/deps/uv/src/unix/fsevents.c --- nodejs-0.11.13/deps/uv/src/unix/fsevents.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/fsevents.c 2015-01-20 21:22:17.000000000 +0000 @@ -525,7 +525,7 @@ err = -ENOENT; #define V(handle, symbol) \ do { \ - p ## symbol = dlsym((handle), #symbol); \ + *(void **)(&p ## symbol) = dlsym((handle), #symbol); \ if (p ## symbol == NULL) \ goto out; \ } \ diff -Nru nodejs-0.11.13/deps/uv/src/unix/getaddrinfo.c nodejs-0.11.15/deps/uv/src/unix/getaddrinfo.c --- nodejs-0.11.13/deps/uv/src/unix/getaddrinfo.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/getaddrinfo.c 2015-01-20 21:22:17.000000000 +0000 @@ -18,6 +18,13 @@ * IN THE SOFTWARE. */ +/* Expose glibc-specific EAI_* error codes. Needs to be defined before we + * include any headers. + */ +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif + #include "uv.h" #include "internal.h" @@ -26,6 +33,66 @@ #include <stdlib.h> #include <string.h> +/* EAI_* constants. */ +#include <netdb.h> + + +int uv__getaddrinfo_translate_error(int sys_err) { + switch (sys_err) { + case 0: return 0; +#if defined(EAI_ADDRFAMILY) + case EAI_ADDRFAMILY: return UV_EAI_ADDRFAMILY; +#endif +#if defined(EAI_AGAIN) + case EAI_AGAIN: return UV_EAI_AGAIN; +#endif +#if defined(EAI_BADFLAGS) + case EAI_BADFLAGS: return UV_EAI_BADFLAGS; +#endif +#if defined(EAI_BADHINTS) + case EAI_BADHINTS: return UV_EAI_BADHINTS; +#endif +#if defined(EAI_CANCELED) + case EAI_CANCELED: return UV_EAI_CANCELED; +#endif +#if defined(EAI_FAIL) + case EAI_FAIL: return UV_EAI_FAIL; +#endif +#if defined(EAI_FAMILY) + case EAI_FAMILY: return UV_EAI_FAMILY; +#endif +#if defined(EAI_MEMORY) + case EAI_MEMORY: return UV_EAI_MEMORY; +#endif +#if defined(EAI_NODATA) + case EAI_NODATA: return UV_EAI_NODATA; +#endif +#if defined(EAI_NONAME) +# if !defined(EAI_NODATA) || EAI_NODATA != EAI_NONAME + case EAI_NONAME: return UV_EAI_NONAME; +# endif +#endif +#if defined(EAI_OVERFLOW) + case EAI_OVERFLOW: return UV_EAI_OVERFLOW; +#endif +#if defined(EAI_PROTOCOL) + case EAI_PROTOCOL: return UV_EAI_PROTOCOL; +#endif +#if defined(EAI_SERVICE) + case EAI_SERVICE: return UV_EAI_SERVICE; +#endif +#if defined(EAI_SOCKTYPE) + case EAI_SOCKTYPE: return UV_EAI_SOCKTYPE; +#endif +#if defined(EAI_SYSTEM) + case EAI_SYSTEM: return -errno; +#endif + } + assert(!"unknown EAI_* error code"); + abort(); + return 0; /* Pacify compiler. */ +} + static void uv__getaddrinfo_work(struct uv__work* w) { uv_getaddrinfo_t* req; @@ -115,10 +182,8 @@ len += service_len; } - if (hostname) { + if (hostname) req->hostname = memcpy(buf + len, hostname, hostname_len); - len += hostname_len; - } uv__work_submit(loop, &req->work_req, diff -Nru nodejs-0.11.13/deps/uv/src/unix/getnameinfo.c nodejs-0.11.15/deps/uv/src/unix/getnameinfo.c --- nodejs-0.11.13/deps/uv/src/unix/getnameinfo.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/getnameinfo.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,114 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), to +* deal in the Software without restriction, including without limitation the +* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +* sell copies of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +* IN THE SOFTWARE. +*/ + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> + +#include "uv.h" +#include "internal.h" + + +static void uv__getnameinfo_work(struct uv__work* w) { + uv_getnameinfo_t* req; + int err; + socklen_t salen; + + req = container_of(w, uv_getnameinfo_t, work_req); + + if (req->storage.ss_family == AF_INET) + salen = sizeof(struct sockaddr_in); + else if (req->storage.ss_family == AF_INET6) + salen = sizeof(struct sockaddr_in6); + else + abort(); + + err = getnameinfo((struct sockaddr*) &req->storage, + salen, + req->host, + sizeof(req->host), + req->service, + sizeof(req->service), + req->flags); + req->retcode = uv__getaddrinfo_translate_error(err); +} + +static void uv__getnameinfo_done(struct uv__work* w, int status) { + uv_getnameinfo_t* req; + char* host; + char* service; + + req = container_of(w, uv_getnameinfo_t, work_req); + uv__req_unregister(req->loop, req); + host = service = NULL; + + if (status == -ECANCELED) { + assert(req->retcode == 0); + req->retcode = UV_EAI_CANCELED; + } else if (req->retcode == 0) { + host = req->host; + service = req->service; + } + + req->getnameinfo_cb(req, req->retcode, host, service); +} + +/* +* Entry point for getnameinfo +* return 0 if a callback will be made +* return error code if validation fails +*/ +int uv_getnameinfo(uv_loop_t* loop, + uv_getnameinfo_t* req, + uv_getnameinfo_cb getnameinfo_cb, + const struct sockaddr* addr, + int flags) { + if (req == NULL || getnameinfo_cb == NULL || addr == NULL) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) { + memcpy(&req->storage, + addr, + sizeof(struct sockaddr_in)); + } else if (addr->sa_family == AF_INET6) { + memcpy(&req->storage, + addr, + sizeof(struct sockaddr_in6)); + } else { + return UV_EINVAL; + } + + uv__req_init(loop, (uv_req_t*)req, UV_GETNAMEINFO); + + req->getnameinfo_cb = getnameinfo_cb; + req->flags = flags; + req->type = UV_GETNAMEINFO; + req->loop = loop; + req->retcode = 0; + + uv__work_submit(loop, + &req->work_req, + uv__getnameinfo_work, + uv__getnameinfo_done); + + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/src/unix/internal.h nodejs-0.11.15/deps/uv/src/unix/internal.h --- nodejs-0.11.13/deps/uv/src/unix/internal.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/internal.h 2015-01-20 21:22:17.000000000 +0000 @@ -42,6 +42,12 @@ # include <port.h> #endif /* __sun */ +#if defined(_AIX) +#define reqevents events +#define rtnevents revents +#include <sys/poll.h> +#endif /* _AIX */ + #if defined(__APPLE__) && !TARGET_OS_IPHONE # include <CoreServices/CoreServices.h> #endif @@ -89,7 +95,7 @@ # define UV__POLLHUP UV__EPOLLHUP #endif -#if defined(__sun) +#if defined(__sun) || defined(_AIX) # define UV__POLLIN POLLIN # define UV__POLLOUT POLLOUT # define UV__POLLERR POLLERR @@ -137,7 +143,12 @@ UV_TCP_NODELAY = 0x400, /* Disable Nagle. */ UV_TCP_KEEPALIVE = 0x800, /* Turn on keep-alive. */ UV_TCP_SINGLE_ACCEPT = 0x1000, /* Only accept() when idle. */ - UV_HANDLE_IPV6 = 0x2000 /* Handle is bound to a IPv6 socket. */ + UV_HANDLE_IPV6 = 0x10000 /* Handle is bound to a IPv6 socket. */ +}; + +/* loop flags */ +enum { + UV_LOOP_BLOCK_SIGPROF = 1 }; typedef enum { @@ -210,13 +221,6 @@ void uv__signal_global_once_init(void); void uv__signal_loop_cleanup(uv_loop_t* loop); -/* thread pool */ -void uv__work_submit(uv_loop_t* loop, - struct uv__work *w, - void (*work)(struct uv__work *w), - void (*done)(struct uv__work *w, int status)); -void uv__work_done(uv_async_t* handle); - /* platform specific */ uint64_t uv__hrtime(uv_clocktype_t type); int uv__kqueue_init(uv_loop_t* loop); @@ -307,12 +311,4 @@ return s + 1; } - -#ifdef HAVE_DTRACE -#include "uv-dtrace.h" -#else -#define UV_TICK_START(arg0, arg1) -#define UV_TICK_STOP(arg0, arg1) -#endif - #endif /* UV_UNIX_INTERNAL_H_ */ diff -Nru nodejs-0.11.13/deps/uv/src/unix/kqueue.c nodejs-0.11.15/deps/uv/src/unix/kqueue.c --- nodejs-0.11.13/deps/uv/src/unix/kqueue.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/kqueue.c 2015-01-20 21:22:17.000000000 +0000 @@ -55,9 +55,11 @@ unsigned int nevents; unsigned int revents; QUEUE* q; + uv__io_t* w; + sigset_t* pset; + sigset_t set; uint64_t base; uint64_t diff; - uv__io_t* w; int filter; int fflags; int count; @@ -117,6 +119,13 @@ w->events = w->pevents; } + pset = NULL; + if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { + pset = &set; + sigemptyset(pset); + sigaddset(pset, SIGPROF); + } + assert(timeout >= -1); base = loop->time; count = 48; /* Benchmarks suggest this gives the best throughput. */ @@ -127,6 +136,9 @@ spec.tv_nsec = (timeout % 1000) * 1000000; } + if (pset != NULL) + pthread_sigmask(SIG_BLOCK, pset, NULL); + nfds = kevent(loop->backend_fd, events, nevents, @@ -134,6 +146,9 @@ ARRAY_SIZE(events), timeout == -1 ? NULL : &spec); + if (pset != NULL) + pthread_sigmask(SIG_UNBLOCK, pset, NULL); + /* Update loop->time unconditionally. It's tempting to skip the update when * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the * operating system didn't reschedule our process while in the syscall. @@ -377,7 +392,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) { if (!uv__is_active(handle)) - return -EINVAL; + return 0; uv__handle_stop(handle); diff -Nru nodejs-0.11.13/deps/uv/src/unix/linux-core.c nodejs-0.11.15/deps/uv/src/unix/linux-core.c --- nodejs-0.11.13/deps/uv/src/unix/linux-core.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/linux-core.c 2015-01-20 21:22:17.000000000 +0000 @@ -33,6 +33,7 @@ #include <sys/prctl.h> #include <sys/sysinfo.h> #include <unistd.h> +#include <signal.h> #include <fcntl.h> #include <time.h> @@ -141,6 +142,8 @@ struct uv__epoll_event e; QUEUE* q; uv__io_t* w; + sigset_t* pset; + sigset_t set; uint64_t base; uint64_t diff; int nevents; @@ -149,6 +152,7 @@ int fd; int op; int i; + static int no_epoll_wait; if (loop->nfds == 0) { assert(QUEUE_EMPTY(&loop->watcher_queue)); @@ -190,15 +194,34 @@ w->events = w->pevents; } + pset = NULL; + if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { + pset = &set; + sigemptyset(pset); + sigaddset(pset, SIGPROF); + } + assert(timeout >= -1); base = loop->time; count = 48; /* Benchmarks suggest this gives the best throughput. */ for (;;) { - nfds = uv__epoll_wait(loop->backend_fd, - events, - ARRAY_SIZE(events), - timeout); + if (no_epoll_wait || pset != NULL) { + nfds = uv__epoll_pwait(loop->backend_fd, + events, + ARRAY_SIZE(events), + timeout, + pset); + } else { + nfds = uv__epoll_wait(loop->backend_fd, + events, + ARRAY_SIZE(events), + timeout); + if (nfds == -1 && errno == ENOSYS) { + no_epoll_wait = 1; + continue; + } + } /* Update loop->time unconditionally. It's tempting to skip the update when * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the @@ -731,6 +754,7 @@ return -errno; *count = 0; + *addresses = NULL; /* Count the number of interfaces */ for (ent = addrs; ent != NULL; ent = ent->ifa_next) { @@ -743,6 +767,9 @@ (*count)++; } + if (*count == 0) + return 0; + *addresses = malloc(*count * sizeof(**addresses)); if (!(*addresses)) return -ENOMEM; diff -Nru nodejs-0.11.13/deps/uv/src/unix/linux-inotify.c nodejs-0.11.15/deps/uv/src/unix/linux-inotify.c --- nodejs-0.11.13/deps/uv/src/unix/linux-inotify.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/linux-inotify.c 2015-01-20 21:22:17.000000000 +0000 @@ -231,7 +231,7 @@ struct watcher_list* w; if (!uv__is_active(handle)) - return -EINVAL; + return 0; w = find_watcher(handle->loop, handle->wd); assert(w != NULL); diff -Nru nodejs-0.11.13/deps/uv/src/unix/linux-syscalls.c nodejs-0.11.15/deps/uv/src/unix/linux-syscalls.c --- nodejs-0.11.13/deps/uv/src/unix/linux-syscalls.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/linux-syscalls.c 2015-01-20 21:22:17.000000000 +0000 @@ -21,6 +21,7 @@ #include "linux-syscalls.h" #include <unistd.h> +#include <signal.h> #include <sys/syscall.h> #include <sys/types.h> #include <errno.h> @@ -328,7 +329,7 @@ nevents, timeout, sigmask, - sizeof(*sigmask)); + _NSIG / 8); #else return errno = ENOSYS, -1; #endif diff -Nru nodejs-0.11.13/deps/uv/src/unix/linux-syscalls.h nodejs-0.11.15/deps/uv/src/unix/linux-syscalls.h --- nodejs-0.11.13/deps/uv/src/unix/linux-syscalls.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/linux-syscalls.h 2015-01-20 21:22:17.000000000 +0000 @@ -44,7 +44,7 @@ #if defined(__alpha__) # define UV__O_NONBLOCK 0x4 #elif defined(__hppa__) -# define UV__O_NONBLOCK 0x10004 +# define UV__O_NONBLOCK O_NONBLOCK #elif defined(__mips__) # define UV__O_NONBLOCK 0x80 #elif defined(__sparc__) @@ -60,7 +60,11 @@ #define UV__IN_NONBLOCK UV__O_NONBLOCK #define UV__SOCK_CLOEXEC UV__O_CLOEXEC -#define UV__SOCK_NONBLOCK UV__O_NONBLOCK +#if defined(SOCK_NONBLOCK) +# define UV__SOCK_NONBLOCK SOCK_NONBLOCK +#else +# define UV__SOCK_NONBLOCK UV__O_NONBLOCK +#endif /* epoll flags */ #define UV__EPOLL_CLOEXEC UV__O_CLOEXEC diff -Nru nodejs-0.11.13/deps/uv/src/unix/loop.c nodejs-0.11.15/deps/uv/src/unix/loop.c --- nodejs-0.11.13/deps/uv/src/unix/loop.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/loop.c 2015-01-20 21:22:17.000000000 +0000 @@ -89,15 +89,16 @@ void uv_loop_delete(uv_loop_t* loop) { uv_loop_t* default_loop; + int err; default_loop = default_loop_ptr; - assert(uv_loop_close(loop) == 0); + err = uv_loop_close(loop); + assert(err == 0); if (loop != default_loop) free(loop); } static int uv__loop_init(uv_loop_t* loop, int default_loop) { - unsigned int i; int err; uv__signal_global_once_init(); @@ -136,9 +137,7 @@ uv_signal_init(loop, &loop->child_watcher); uv__handle_unref(&loop->child_watcher); loop->child_watcher.flags |= UV__HANDLE_INTERNAL; - - for (i = 0; i < ARRAY_SIZE(loop->process_handles); i++) - QUEUE_INIT(loop->process_handles + i); + QUEUE_INIT(&loop->process_handles); if (uv_rwlock_init(&loop->cloexec_lock)) abort(); @@ -193,3 +192,15 @@ loop->watchers = NULL; loop->nwatchers = 0; } + + +int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) { + if (option != UV_LOOP_BLOCK_SIGNAL) + return UV_ENOSYS; + + if (va_arg(ap, int) != SIGPROF) + return UV_EINVAL; + + loop->flags |= UV_LOOP_BLOCK_SIGPROF; + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/src/unix/netbsd.c nodejs-0.11.15/deps/uv/src/unix/netbsd.c --- nodejs-0.11.13/deps/uv/src/unix/netbsd.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/netbsd.c 2015-01-20 21:22:17.000000000 +0000 @@ -38,6 +38,7 @@ #include <sys/resource.h> #include <sys/types.h> #include <sys/sysctl.h> +#include <uvm/uvm_extern.h> #include <unistd.h> #include <time.h> diff -Nru nodejs-0.11.13/deps/uv/src/unix/openbsd.c nodejs-0.11.15/deps/uv/src/unix/openbsd.c --- nodejs-0.11.13/deps/uv/src/unix/openbsd.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/openbsd.c 2015-01-20 21:22:17.000000000 +0000 @@ -180,29 +180,23 @@ int uv_resident_set_memory(size_t* rss) { - kvm_t *kd = NULL; - struct kinfo_proc *kinfo = NULL; - pid_t pid; - int nprocs, max_size = sizeof(struct kinfo_proc); + struct kinfo_proc kinfo; size_t page_size = getpagesize(); + size_t size = sizeof(struct kinfo_proc); + int mib[6]; - pid = getpid(); + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_PID; + mib[3] = getpid(); + mib[4] = sizeof(struct kinfo_proc); + mib[5] = 1; - kd = kvm_open(NULL, _PATH_MEM, NULL, O_RDONLY, "kvm_open"); - if (kd == NULL) goto error; - - kinfo = kvm_getprocs(kd, KERN_PROC_PID, pid, max_size, &nprocs); - if (kinfo == NULL) goto error; - - *rss = kinfo->p_vm_rssize * page_size; - - kvm_close(kd); + if (sysctl(mib, 6, &kinfo, &size, NULL, 0) < 0) + return -errno; + *rss = kinfo.p_vm_rssize * page_size; return 0; - -error: - if (kd) kvm_close(kd); - return -EPERM; } @@ -267,7 +261,7 @@ } cpu_info = &(*cpu_infos)[i]; - + cpu_info->cpu_times.user = (uint64_t)(info[CP_USER]) * multiplier; cpu_info->cpu_times.nice = (uint64_t)(info[CP_NICE]) * multiplier; cpu_info->cpu_times.sys = (uint64_t)(info[CP_SYS]) * multiplier; diff -Nru nodejs-0.11.13/deps/uv/src/unix/pipe.c nodejs-0.11.15/deps/uv/src/unix/pipe.c --- nodejs-0.11.13/deps/uv/src/unix/pipe.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/pipe.c 2015-01-20 21:22:17.000000000 +0000 @@ -44,13 +44,10 @@ struct sockaddr_un saddr; const char* pipe_fname; int sockfd; - int bound; int err; pipe_fname = NULL; sockfd = -1; - bound = 0; - err = -EINVAL; /* Already bound? */ if (uv__stream_fd(handle) >= 0) @@ -83,7 +80,6 @@ err = -EACCES; goto out; } - bound = 1; /* Success. */ handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */ @@ -91,11 +87,9 @@ return 0; out: - if (bound) { - /* unlink() before uv__close() to avoid races. */ - assert(pipe_fname != NULL); - unlink(pipe_fname); - } + /* unlink() before uv__close() to avoid races. */ + assert(pipe_fname != NULL); + unlink(pipe_fname); uv__close(sockfd); free((void*)pipe_fname); return err; @@ -158,7 +152,6 @@ int r; new_sock = (uv__stream_fd(handle) == -1); - err = -EINVAL; if (new_sock) { err = uv__socket(AF_UNIX, SOCK_STREAM, 0); diff -Nru nodejs-0.11.13/deps/uv/src/unix/process.c nodejs-0.11.15/deps/uv/src/unix/process.c --- nodejs-0.11.13/deps/uv/src/unix/process.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/process.c 2015-01-20 21:22:17.000000000 +0000 @@ -45,77 +45,70 @@ #endif -static QUEUE* uv__process_queue(uv_loop_t* loop, int pid) { - assert(pid > 0); - return loop->process_handles + pid % ARRAY_SIZE(loop->process_handles); -} - - static void uv__chld(uv_signal_t* handle, int signum) { uv_process_t* process; uv_loop_t* loop; int exit_status; int term_signal; - unsigned int i; int status; pid_t pid; QUEUE pending; - QUEUE* h; QUEUE* q; + QUEUE* h; assert(signum == SIGCHLD); QUEUE_INIT(&pending); loop = handle->loop; - for (i = 0; i < ARRAY_SIZE(loop->process_handles); i++) { - h = loop->process_handles + i; - q = QUEUE_HEAD(h); - - while (q != h) { - process = QUEUE_DATA(q, uv_process_t, queue); - q = QUEUE_NEXT(q); - - do - pid = waitpid(process->pid, &status, WNOHANG); - while (pid == -1 && errno == EINTR); - - if (pid == 0) - continue; + h = &loop->process_handles; + q = QUEUE_HEAD(h); + while (q != h) { + process = QUEUE_DATA(q, uv_process_t, queue); + q = QUEUE_NEXT(q); + + do + pid = waitpid(process->pid, &status, WNOHANG); + while (pid == -1 && errno == EINTR); - if (pid == -1) { - if (errno != ECHILD) - abort(); - continue; - } + if (pid == 0) + continue; - process->status = status; - QUEUE_REMOVE(&process->queue); - QUEUE_INSERT_TAIL(&pending, &process->queue); + if (pid == -1) { + if (errno != ECHILD) + abort(); + continue; } - while (!QUEUE_EMPTY(&pending)) { - q = QUEUE_HEAD(&pending); - QUEUE_REMOVE(q); - QUEUE_INIT(q); + process->status = status; + QUEUE_REMOVE(&process->queue); + QUEUE_INSERT_TAIL(&pending, &process->queue); + } + + h = &pending; + q = QUEUE_HEAD(h); + while (q != h) { + process = QUEUE_DATA(q, uv_process_t, queue); + q = QUEUE_NEXT(q); + + QUEUE_REMOVE(&process->queue); + QUEUE_INIT(&process->queue); + uv__handle_stop(process); - process = QUEUE_DATA(q, uv_process_t, queue); - uv__handle_stop(process); + if (process->exit_cb == NULL) + continue; - if (process->exit_cb == NULL) - continue; + exit_status = 0; + if (WIFEXITED(process->status)) + exit_status = WEXITSTATUS(process->status); - exit_status = 0; - if (WIFEXITED(process->status)) - exit_status = WEXITSTATUS(process->status); - - term_signal = 0; - if (WIFSIGNALED(process->status)) - term_signal = WTERMSIG(process->status); + term_signal = 0; + if (WIFSIGNALED(process->status)) + term_signal = WTERMSIG(process->status); - process->exit_cb(process, exit_status, term_signal); - } + process->exit_cb(process, exit_status, term_signal); } + assert(QUEUE_EMPTY(&pending)); } @@ -369,7 +362,6 @@ int signal_pipe[2] = { -1, -1 }; int (*pipes)[2]; int stdio_count; - QUEUE* q; ssize_t r; pid_t pid; int err; @@ -483,8 +475,7 @@ /* Only activate this handle if exec() happened successfully */ if (exec_errorno == 0) { - q = uv__process_queue(loop, pid); - QUEUE_INSERT_TAIL(q, &process->queue); + QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue); uv__handle_start(process); } @@ -526,7 +517,8 @@ void uv__process_close(uv_process_t* handle) { - /* TODO stop signal watcher when this is the last handle */ QUEUE_REMOVE(&handle->queue); uv__handle_stop(handle); + if (QUEUE_EMPTY(&handle->loop->process_handles)) + uv_signal_stop(&handle->loop->child_watcher); } diff -Nru nodejs-0.11.13/deps/uv/src/unix/stream.c nodejs-0.11.15/deps/uv/src/unix/stream.c --- nodejs-0.11.13/deps/uv/src/unix/stream.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/stream.c 2015-01-20 21:22:17.000000000 +0000 @@ -53,6 +53,10 @@ int fake_fd; int int_fd; int fd; + fd_set* sread; + size_t sread_sz; + fd_set* swrite; + size_t swrite_sz; }; #endif /* defined(__APPLE__) */ @@ -60,21 +64,10 @@ static void uv__write(uv_stream_t* stream); static void uv__read(uv_stream_t* stream); static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events); +static void uv__write_callbacks(uv_stream_t* stream); static size_t uv__write_req_size(uv_write_t* req); -static size_t uv_count_bufs(const uv_buf_t bufs[], unsigned int nbufs) { - unsigned int i; - size_t bytes; - - bytes = 0; - for (i = 0; i < nbufs; i++) - bytes += bufs[i].len; - - return bytes; -} - - void uv__stream_init(uv_loop_t* loop, uv_stream_t* stream, uv_handle_type type) { @@ -138,8 +131,6 @@ uv_stream_t* stream; uv__stream_select_t* s; char buf[1024]; - fd_set sread; - fd_set swrite; int events; int fd; int r; @@ -160,17 +151,17 @@ break; /* Watch fd using select(2) */ - FD_ZERO(&sread); - FD_ZERO(&swrite); + memset(s->sread, 0, s->sread_sz); + memset(s->swrite, 0, s->swrite_sz); if (uv__io_active(&stream->io_watcher, UV__POLLIN)) - FD_SET(fd, &sread); + FD_SET(fd, s->sread); if (uv__io_active(&stream->io_watcher, UV__POLLOUT)) - FD_SET(fd, &swrite); - FD_SET(s->int_fd, &sread); + FD_SET(fd, s->swrite); + FD_SET(s->int_fd, s->sread); /* Wait indefinitely for fd events */ - r = select(max_fd + 1, &sread, &swrite, NULL, NULL); + r = select(max_fd + 1, s->sread, s->swrite, NULL, NULL); if (r == -1) { if (errno == EINTR) continue; @@ -184,7 +175,7 @@ continue; /* Empty socketpair's buffer in case of interruption */ - if (FD_ISSET(s->int_fd, &sread)) + if (FD_ISSET(s->int_fd, s->sread)) while (1) { r = read(s->int_fd, buf, sizeof(buf)); @@ -205,12 +196,12 @@ /* Handle events */ events = 0; - if (FD_ISSET(fd, &sread)) + if (FD_ISSET(fd, s->sread)) events |= UV__POLLIN; - if (FD_ISSET(fd, &swrite)) + if (FD_ISSET(fd, s->swrite)) events |= UV__POLLOUT; - assert(events != 0 || FD_ISSET(s->int_fd, &sread)); + assert(events != 0 || FD_ISSET(s->int_fd, s->sread)); if (events != 0) { ACCESS_ONCE(int, s->events) = events; @@ -272,6 +263,9 @@ int ret; int kq; int old_fd; + int max_fd; + size_t sread_sz; + size_t swrite_sz; kq = kqueue(); if (kq == -1) { @@ -295,31 +289,48 @@ return 0; /* At this point we definitely know that this fd won't work with kqueue */ - s = malloc(sizeof(*s)); - if (s == NULL) - return -ENOMEM; + + /* + * Create fds for io watcher and to interrupt the select() loop. + * NOTE: do it ahead of malloc below to allocate enough space for fd_sets + */ + if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) + return -errno; + + max_fd = *fd; + if (fds[1] > max_fd) + max_fd = fds[1]; + + sread_sz = (max_fd + NBBY) / NBBY; + swrite_sz = sread_sz; + + s = malloc(sizeof(*s) + sread_sz + swrite_sz); + if (s == NULL) { + err = -ENOMEM; + goto failed_malloc; + } s->events = 0; s->fd = *fd; + s->sread = (fd_set*) ((char*) s + sizeof(*s)); + s->sread_sz = sread_sz; + s->swrite = (fd_set*) ((char*) s->sread + sread_sz); + s->swrite_sz = swrite_sz; err = uv_async_init(stream->loop, &s->async, uv__stream_osx_select_cb); - if (err) { - free(s); - return err; - } + if (err) + goto failed_async_init; s->async.flags |= UV__HANDLE_INTERNAL; uv__handle_unref(&s->async); - if (uv_sem_init(&s->close_sem, 0)) - goto fatal1; - - if (uv_sem_init(&s->async_sem, 0)) - goto fatal2; - - /* Create fds for io watcher and to interrupt the select() loop. */ - if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) - goto fatal3; + err = uv_sem_init(&s->close_sem, 0); + if (err != 0) + goto failed_close_sem_init; + + err = uv_sem_init(&s->async_sem, 0); + if (err != 0) + goto failed_async_sem_init; s->fake_fd = fds[0]; s->int_fd = fds[1]; @@ -329,26 +340,36 @@ stream->select = s; *fd = s->fake_fd; - if (uv_thread_create(&s->thread, uv__stream_osx_select, stream)) - goto fatal4; + err = uv_thread_create(&s->thread, uv__stream_osx_select, stream); + if (err != 0) + goto failed_thread_create; return 0; -fatal4: +failed_thread_create: s->stream = NULL; stream->select = NULL; *fd = old_fd; - uv__close(s->fake_fd); - uv__close(s->int_fd); - s->fake_fd = -1; - s->int_fd = -1; -fatal3: + uv_sem_destroy(&s->async_sem); -fatal2: + +failed_async_sem_init: uv_sem_destroy(&s->close_sem); -fatal1: + +failed_close_sem_init: + uv__close(fds[0]); + uv__close(fds[1]); uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close); - return -errno; + return err; + +failed_async_init: + free(s); + +failed_malloc: + uv__close(fds[0]); + uv__close(fds[1]); + + return err; } #endif /* defined(__APPLE__) */ @@ -372,52 +393,34 @@ } -void uv__stream_destroy(uv_stream_t* stream) { +void uv__stream_flush_write_queue(uv_stream_t* stream, int error) { uv_write_t* req; QUEUE* q; - - assert(!uv__io_active(&stream->io_watcher, UV__POLLIN | UV__POLLOUT)); - assert(stream->flags & UV_CLOSED); - - if (stream->connect_req) { - uv__req_unregister(stream->loop, stream->connect_req); - stream->connect_req->cb(stream->connect_req, -ECANCELED); - stream->connect_req = NULL; - } - while (!QUEUE_EMPTY(&stream->write_queue)) { q = QUEUE_HEAD(&stream->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_write_t, queue); - uv__req_unregister(stream->loop, req); + req->error = error; - if (req->bufs != req->bufsml) - free(req->bufs); - req->bufs = NULL; - - if (req->cb != NULL) - req->cb(req, -ECANCELED); + QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); } +} - while (!QUEUE_EMPTY(&stream->write_completed_queue)) { - q = QUEUE_HEAD(&stream->write_completed_queue); - QUEUE_REMOVE(q); - - req = QUEUE_DATA(q, uv_write_t, queue); - uv__req_unregister(stream->loop, req); - if (req->bufs != NULL) { - stream->write_queue_size -= uv__write_req_size(req); - if (req->bufs != req->bufsml) - free(req->bufs); - req->bufs = NULL; - } +void uv__stream_destroy(uv_stream_t* stream) { + assert(!uv__io_active(&stream->io_watcher, UV__POLLIN | UV__POLLOUT)); + assert(stream->flags & UV_CLOSED); - if (req->cb) - req->cb(req, req->error); + if (stream->connect_req) { + uv__req_unregister(stream->loop, stream->connect_req); + stream->connect_req->cb(stream->connect_req, -ECANCELED); + stream->connect_req = NULL; } + uv__stream_flush_write_queue(stream, -ECANCELED); + uv__write_callbacks(stream); + if (stream->shutdown_req) { /* The ECANCELED error code is a lie, the shutdown(2) syscall is a * fait accompli at this point. Maybe we should revisit this in v0.11. @@ -428,6 +431,8 @@ stream->shutdown_req->cb(stream->shutdown_req, -ECANCELED); stream->shutdown_req = NULL; } + + assert(stream->write_queue_size == 0); } @@ -444,6 +449,7 @@ */ static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) { int err; + int emfile_fd; if (loop->emfile_fd == -1) return -EMFILE; @@ -457,7 +463,10 @@ uv__close(err); } while (err >= 0 || err == -EINTR); - SAVE_ERRNO(loop->emfile_fd = uv__open_cloexec("/", O_RDONLY)); + emfile_fd = uv__open_cloexec("/", O_RDONLY); + if (emfile_fd >= 0) + loop->emfile_fd = emfile_fd; + return err; } @@ -540,7 +549,6 @@ if (server->accepted_fd == -1) return -EAGAIN; - err = 0; switch (client->type) { case UV_NAMED_PIPE: case UV_TCP: @@ -563,7 +571,7 @@ break; default: - assert(0); + return -EINVAL; } done: @@ -599,7 +607,6 @@ int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) { int err; - err = -EINVAL; switch (stream->type) { case UV_TCP: err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb); @@ -610,7 +617,7 @@ break; default: - assert(0); + err = -EINVAL; } if (err == 0) @@ -656,8 +663,8 @@ size_t size; assert(req->bufs != NULL); - size = uv_count_bufs(req->bufs + req->write_index, - req->nbufs - req->write_index); + size = uv__count_bufs(req->bufs + req->write_index, + req->nbufs - req->write_index); assert(req->handle->write_queue_size >= size); return size; @@ -899,10 +906,6 @@ } assert(QUEUE_EMPTY(&stream->write_completed_queue)); - - /* Write queue drained. */ - if (QUEUE_EMPTY(&stream->write_queue)) - uv__drain(stream); } @@ -947,6 +950,7 @@ uv__handle_stop(stream); uv__stream_osx_interrupt_select(stream); stream->read_cb(stream, UV_EOF, buf); + stream->flags &= ~UV_STREAM_READING; } @@ -1113,8 +1117,13 @@ } else { /* Error. User should call uv_close(). */ stream->read_cb(stream, -errno, &buf); - assert(!uv__io_active(&stream->io_watcher, UV__POLLIN) && - "stream->read_cb(status=-1) did not call uv_close()"); + if (stream->flags & UV_STREAM_READING) { + stream->flags &= ~UV_STREAM_READING; + uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLIN); + if (!uv__io_active(&stream->io_watcher, UV__POLLOUT)) + uv__handle_stop(stream); + uv__stream_osx_interrupt_select(stream); + } } return; } else if (nread == 0) { @@ -1193,7 +1202,7 @@ assert(uv__stream_fd(stream) >= 0); /* Ignore POLLHUP here. Even it it's set, there may still be data to read. */ - if (events & (UV__POLLIN | UV__POLLERR)) + if (events & (UV__POLLIN | UV__POLLERR | UV__POLLHUP)) uv__read(stream); if (uv__stream_fd(stream) == -1) @@ -1219,6 +1228,10 @@ if (events & (UV__POLLOUT | UV__POLLERR | UV__POLLHUP)) { uv__write(stream); uv__write_callbacks(stream); + + /* Write queue drained. */ + if (QUEUE_EMPTY(&stream->write_queue)) + uv__drain(stream); } } @@ -1259,10 +1272,21 @@ stream->connect_req = NULL; uv__req_unregister(stream->loop, req); - uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT); + + if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) { + uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT); + } if (req->cb) req->cb(req, error); + + if (uv__stream_fd(stream) == -1) + return; + + if (error < 0) { + uv__stream_flush_write_queue(stream, -ECANCELED); + uv__write_callbacks(stream); + } } @@ -1300,7 +1324,7 @@ /* It's legal for write_queue_size > 0 even when the write_queue is empty; * it means there are error-state requests in the write_completed_queue that * will touch up write_queue_size later, see also uv__write_req_finish(). - * We chould check that write_queue is empty instead but that implies making + * We could check that write_queue is empty instead but that implies making * a write() syscall when we know that the handle is in error mode. */ empty_queue = (stream->write_queue_size == 0); @@ -1323,7 +1347,7 @@ memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); req->nbufs = nbufs; req->write_index = 0; - stream->write_queue_size += uv_count_bufs(bufs, nbufs); + stream->write_queue_size += uv__count_bufs(bufs, nbufs); /* Append the request to write_queue. */ QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue); @@ -1382,7 +1406,7 @@ /* Connecting or already writing some data */ if (stream->connect_req != NULL || stream->write_queue_size != 0) - return 0; + return -EAGAIN; has_pollout = uv__io_active(&stream->io_watcher, UV__POLLOUT); @@ -1391,7 +1415,7 @@ return r; /* Remove not written bytes from write queue size */ - written = uv_count_bufs(bufs, nbufs); + written = uv__count_bufs(bufs, nbufs); if (req.bufs != NULL) req_size = uv__write_req_size(&req); else @@ -1412,7 +1436,10 @@ uv__stream_osx_interrupt_select(stream); } - return (int) written; + if (written == 0) + return -EAGAIN; + else + return written; } @@ -1449,15 +1476,8 @@ int uv_read_stop(uv_stream_t* stream) { - /* Sanity check. We're going to stop the handle unless it's primed for - * writing but that means there should be some kind of write action in - * progress. - */ - assert(!uv__io_active(&stream->io_watcher, UV__POLLOUT) || - !QUEUE_EMPTY(&stream->write_completed_queue) || - !QUEUE_EMPTY(&stream->write_queue) || - stream->shutdown_req != NULL || - stream->connect_req != NULL); + if (!(stream->flags & UV_STREAM_READING)) + return 0; stream->flags &= ~UV_STREAM_READING; uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLIN); diff -Nru nodejs-0.11.13/deps/uv/src/unix/sunos.c nodejs-0.11.15/deps/uv/src/unix/sunos.c --- nodejs-0.11.13/deps/uv/src/unix/sunos.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/sunos.c 2015-01-20 21:22:17.000000000 +0000 @@ -122,6 +122,8 @@ struct timespec spec; QUEUE* q; uv__io_t* w; + sigset_t* pset; + sigset_t set; uint64_t base; uint64_t diff; unsigned int nfds; @@ -129,6 +131,7 @@ int saved_errno; int nevents; int count; + int err; int fd; if (loop->nfds == 0) { @@ -150,6 +153,13 @@ w->events = w->pevents; } + pset = NULL; + if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { + pset = &set; + sigemptyset(pset); + sigaddset(pset, SIGPROF); + } + assert(timeout >= -1); base = loop->time; count = 48; /* Benchmarks suggest this gives the best throughput. */ @@ -165,11 +175,20 @@ nfds = 1; saved_errno = 0; - if (port_getn(loop->backend_fd, - events, - ARRAY_SIZE(events), - &nfds, - timeout == -1 ? NULL : &spec)) { + + if (pset != NULL) + pthread_sigmask(SIG_BLOCK, pset, NULL); + + err = port_getn(loop->backend_fd, + events, + ARRAY_SIZE(events), + &nfds, + timeout == -1 ? NULL : &spec); + + if (pset != NULL) + pthread_sigmask(SIG_UNBLOCK, pset, NULL); + + if (err) { /* Work around another kernel bug: port_getn() may return events even * on error. */ @@ -431,7 +450,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) { if (!uv__is_active(handle)) - return -EINVAL; + return 0; if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) { port_dissociate(handle->loop->fs_fd, diff -Nru nodejs-0.11.13/deps/uv/src/unix/thread.c nodejs-0.11.15/deps/uv/src/unix/thread.c --- nodejs-0.11.13/deps/uv/src/unix/thread.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/thread.c 2015-01-20 21:22:17.000000000 +0000 @@ -31,11 +31,61 @@ #undef NANOSEC #define NANOSEC ((uint64_t) 1e9) + +struct thread_ctx { + void (*entry)(void* arg); + void* arg; +}; + + +static void* uv__thread_start(void *arg) +{ + struct thread_ctx *ctx_p; + struct thread_ctx ctx; + + ctx_p = arg; + ctx = *ctx_p; + free(ctx_p); + ctx.entry(ctx.arg); + + return 0; +} + + +int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { + struct thread_ctx* ctx; + int err; + + ctx = malloc(sizeof(*ctx)); + if (ctx == NULL) + return UV_ENOMEM; + + ctx->entry = entry; + ctx->arg = arg; + + err = pthread_create(tid, NULL, uv__thread_start, ctx); + + if (err) + free(ctx); + + return err ? -1 : 0; +} + + +uv_thread_t uv_thread_self(void) { + return pthread_self(); +} + int uv_thread_join(uv_thread_t *tid) { return -pthread_join(*tid, NULL); } +int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) { + return pthread_equal(*t1, *t2); +} + + int uv_mutex_init(uv_mutex_t* mutex) { #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK) return -pthread_mutex_init(mutex, NULL); @@ -399,7 +449,9 @@ } -void uv_barrier_wait(uv_barrier_t* barrier) { +int uv_barrier_wait(uv_barrier_t* barrier) { + int serial_thread; + uv_mutex_lock(&barrier->mutex); if (++barrier->count == barrier->n) { uv_sem_wait(&barrier->turnstile2); @@ -411,7 +463,8 @@ uv_sem_post(&barrier->turnstile1); uv_mutex_lock(&barrier->mutex); - if (--barrier->count == 0) { + serial_thread = (--barrier->count == 0); + if (serial_thread) { uv_sem_wait(&barrier->turnstile1); uv_sem_post(&barrier->turnstile2); } @@ -419,6 +472,7 @@ uv_sem_wait(&barrier->turnstile2); uv_sem_post(&barrier->turnstile2); + return serial_thread; } #else /* !(defined(__APPLE__) && defined(__MACH__)) */ @@ -434,10 +488,11 @@ } -void uv_barrier_wait(uv_barrier_t* barrier) { +int uv_barrier_wait(uv_barrier_t* barrier) { int r = pthread_barrier_wait(barrier); if (r && r != PTHREAD_BARRIER_SERIAL_THREAD) abort(); + return r == PTHREAD_BARRIER_SERIAL_THREAD; } #endif /* defined(__APPLE__) && defined(__MACH__) */ diff -Nru nodejs-0.11.13/deps/uv/src/unix/threadpool.c nodejs-0.11.15/deps/uv/src/unix/threadpool.c --- nodejs-0.11.13/deps/uv/src/unix/threadpool.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/threadpool.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,280 +0,0 @@ -/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#include "internal.h" -#include <stdlib.h> - -#define MAX_THREADPOOL_SIZE 128 - -static uv_once_t once = UV_ONCE_INIT; -static uv_cond_t cond; -static uv_mutex_t mutex; -static unsigned int nthreads; -static uv_thread_t* threads; -static uv_thread_t default_threads[4]; -static QUEUE exit_message; -static QUEUE wq; -static volatile int initialized; - - -static void uv__cancelled(struct uv__work* w) { - abort(); -} - - -/* To avoid deadlock with uv_cancel() it's crucial that the worker - * never holds the global mutex and the loop-local mutex at the same time. - */ -static void worker(void* arg) { - struct uv__work* w; - QUEUE* q; - - (void) arg; - - for (;;) { - uv_mutex_lock(&mutex); - - while (QUEUE_EMPTY(&wq)) - uv_cond_wait(&cond, &mutex); - - q = QUEUE_HEAD(&wq); - - if (q == &exit_message) - uv_cond_signal(&cond); - else { - QUEUE_REMOVE(q); - QUEUE_INIT(q); /* Signal uv_cancel() that the work req is - executing. */ - } - - uv_mutex_unlock(&mutex); - - if (q == &exit_message) - break; - - w = QUEUE_DATA(q, struct uv__work, wq); - w->work(w); - - uv_mutex_lock(&w->loop->wq_mutex); - w->work = NULL; /* Signal uv_cancel() that the work req is done - executing. */ - QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq); - uv_async_send(&w->loop->wq_async); - uv_mutex_unlock(&w->loop->wq_mutex); - } -} - - -static void post(QUEUE* q) { - uv_mutex_lock(&mutex); - QUEUE_INSERT_TAIL(&wq, q); - uv_cond_signal(&cond); - uv_mutex_unlock(&mutex); -} - - -static void init_once(void) { - unsigned int i; - const char* val; - - nthreads = ARRAY_SIZE(default_threads); - val = getenv("UV_THREADPOOL_SIZE"); - if (val != NULL) - nthreads = atoi(val); - if (nthreads == 0) - nthreads = 1; - if (nthreads > MAX_THREADPOOL_SIZE) - nthreads = MAX_THREADPOOL_SIZE; - - threads = default_threads; - if (nthreads > ARRAY_SIZE(default_threads)) { - threads = malloc(nthreads * sizeof(threads[0])); - if (threads == NULL) { - nthreads = ARRAY_SIZE(default_threads); - threads = default_threads; - } - } - - if (uv_cond_init(&cond)) - abort(); - - if (uv_mutex_init(&mutex)) - abort(); - - QUEUE_INIT(&wq); - - for (i = 0; i < nthreads; i++) - if (uv_thread_create(threads + i, worker, NULL)) - abort(); - - initialized = 1; -} - - -UV_DESTRUCTOR(static void cleanup(void)) { - unsigned int i; - - if (initialized == 0) - return; - - post(&exit_message); - - for (i = 0; i < nthreads; i++) - if (uv_thread_join(threads + i)) - abort(); - - if (threads != default_threads) - free(threads); - - uv_mutex_destroy(&mutex); - uv_cond_destroy(&cond); - - threads = NULL; - nthreads = 0; - initialized = 0; -} - - -void uv__work_submit(uv_loop_t* loop, - struct uv__work* w, - void (*work)(struct uv__work* w), - void (*done)(struct uv__work* w, int status)) { - uv_once(&once, init_once); - w->loop = loop; - w->work = work; - w->done = done; - post(&w->wq); -} - - -static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { - int cancelled; - - uv_mutex_lock(&mutex); - uv_mutex_lock(&w->loop->wq_mutex); - - cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL; - if (cancelled) - QUEUE_REMOVE(&w->wq); - - uv_mutex_unlock(&w->loop->wq_mutex); - uv_mutex_unlock(&mutex); - - if (!cancelled) - return -EBUSY; - - w->work = uv__cancelled; - uv_mutex_lock(&loop->wq_mutex); - QUEUE_INSERT_TAIL(&loop->wq, &w->wq); - uv_async_send(&loop->wq_async); - uv_mutex_unlock(&loop->wq_mutex); - - return 0; -} - - -void uv__work_done(uv_async_t* handle) { - struct uv__work* w; - uv_loop_t* loop; - QUEUE* q; - QUEUE wq; - int err; - - loop = container_of(handle, uv_loop_t, wq_async); - QUEUE_INIT(&wq); - - uv_mutex_lock(&loop->wq_mutex); - if (!QUEUE_EMPTY(&loop->wq)) { - q = QUEUE_HEAD(&loop->wq); - QUEUE_SPLIT(&loop->wq, q, &wq); - } - uv_mutex_unlock(&loop->wq_mutex); - - while (!QUEUE_EMPTY(&wq)) { - q = QUEUE_HEAD(&wq); - QUEUE_REMOVE(q); - - w = container_of(q, struct uv__work, wq); - err = (w->work == uv__cancelled) ? -ECANCELED : 0; - w->done(w, err); - } -} - - -static void uv__queue_work(struct uv__work* w) { - uv_work_t* req = container_of(w, uv_work_t, work_req); - - req->work_cb(req); -} - - -static void uv__queue_done(struct uv__work* w, int err) { - uv_work_t* req; - - req = container_of(w, uv_work_t, work_req); - uv__req_unregister(req->loop, req); - - if (req->after_work_cb == NULL) - return; - - req->after_work_cb(req, err); -} - - -int uv_queue_work(uv_loop_t* loop, - uv_work_t* req, - uv_work_cb work_cb, - uv_after_work_cb after_work_cb) { - if (work_cb == NULL) - return -EINVAL; - - uv__req_init(loop, req, UV_WORK); - req->loop = loop; - req->work_cb = work_cb; - req->after_work_cb = after_work_cb; - uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done); - return 0; -} - - -int uv_cancel(uv_req_t* req) { - struct uv__work* wreq; - uv_loop_t* loop; - - switch (req->type) { - case UV_FS: - loop = ((uv_fs_t*) req)->loop; - wreq = &((uv_fs_t*) req)->work_req; - break; - case UV_GETADDRINFO: - loop = ((uv_getaddrinfo_t*) req)->loop; - wreq = &((uv_getaddrinfo_t*) req)->work_req; - break; - case UV_WORK: - loop = ((uv_work_t*) req)->loop; - wreq = &((uv_work_t*) req)->work_req; - break; - default: - return -EINVAL; - } - - return uv__work_cancel(loop, req, wreq); -} diff -Nru nodejs-0.11.13/deps/uv/src/unix/timer.c nodejs-0.11.15/deps/uv/src/unix/timer.c --- nodejs-0.11.13/deps/uv/src/unix/timer.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/timer.c 2015-01-20 21:22:17.000000000 +0000 @@ -65,6 +65,9 @@ uint64_t repeat) { uint64_t clamped_timeout; + if (cb == NULL) + return -EINVAL; + if (uv__is_active(handle)) uv_timer_stop(handle); diff -Nru nodejs-0.11.13/deps/uv/src/unix/udp.c nodejs-0.11.15/deps/uv/src/unix/udp.c --- nodejs-0.11.13/deps/uv/src/unix/udp.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/unix/udp.c 2015-01-20 21:22:17.000000000 +0000 @@ -38,10 +38,9 @@ static void uv__udp_run_completed(uv_udp_t* handle); -static void uv__udp_run_pending(uv_udp_t* handle); static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents); -static void uv__udp_recvmsg(uv_loop_t* loop, uv__io_t* w, unsigned int revents); -static void uv__udp_sendmsg(uv_loop_t* loop, uv__io_t* w, unsigned int revents); +static void uv__udp_recvmsg(uv_udp_t* handle); +static void uv__udp_sendmsg(uv_udp_t* handle); static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, int domain, unsigned int flags); @@ -65,22 +64,19 @@ assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(handle->io_watcher.fd == -1); - uv__udp_run_completed(handle); - while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); - uv__req_unregister(handle->loop, req); + req->status = -ECANCELED; + QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); + } - if (req->bufs != req->bufsml) - free(req->bufs); - req->bufs = NULL; + uv__udp_run_completed(handle); - if (req->send_cb != NULL) - req->send_cb(req, -ECANCELED); - } + assert(handle->send_queue_size == 0); + assert(handle->send_queue_count == 0); /* Now tear down the handle. */ handle->recv_cb = NULL; @@ -89,50 +85,6 @@ } -static void uv__udp_run_pending(uv_udp_t* handle) { - uv_udp_send_t* req; - QUEUE* q; - struct msghdr h; - ssize_t size; - - while (!QUEUE_EMPTY(&handle->write_queue)) { - q = QUEUE_HEAD(&handle->write_queue); - assert(q != NULL); - - req = QUEUE_DATA(q, uv_udp_send_t, queue); - assert(req != NULL); - - memset(&h, 0, sizeof h); - h.msg_name = &req->addr; - h.msg_namelen = (req->addr.sin6_family == AF_INET6 ? - sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)); - h.msg_iov = (struct iovec*) req->bufs; - h.msg_iovlen = req->nbufs; - - do { - size = sendmsg(handle->io_watcher.fd, &h, 0); - } - while (size == -1 && errno == EINTR); - - /* TODO try to write once or twice more in the - * hope that the socket becomes readable again? - */ - if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) - break; - - req->status = (size == -1 ? -errno : size); - - /* Sending a datagram is an atomic operation: either all data - * is written or nothing is (and EMSGSIZE is raised). That is - * why we don't handle partial writes. Just pop the request - * off the write queue and onto the completed queue, done. - */ - QUEUE_REMOVE(&req->queue); - QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); - } -} - - static void uv__udp_run_completed(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; @@ -144,6 +96,9 @@ req = QUEUE_DATA(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); + handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); + handle->send_queue_count--; + if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; @@ -159,33 +114,40 @@ else req->send_cb(req, req->status); } + + if (QUEUE_EMPTY(&handle->write_queue)) { + /* Pending queue and completion queue empty, stop watcher. */ + uv__io_stop(handle->loop, &handle->io_watcher, UV__POLLOUT); + if (!uv__io_active(&handle->io_watcher, UV__POLLIN)) + uv__handle_stop(handle); + } } static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) { + uv_udp_t* handle; + + handle = container_of(w, uv_udp_t, io_watcher); + assert(handle->type == UV_UDP); + if (revents & UV__POLLIN) - uv__udp_recvmsg(loop, w, revents); + uv__udp_recvmsg(handle); - if (revents & UV__POLLOUT) - uv__udp_sendmsg(loop, w, revents); + if (revents & UV__POLLOUT) { + uv__udp_sendmsg(handle); + uv__udp_run_completed(handle); + } } -static void uv__udp_recvmsg(uv_loop_t* loop, - uv__io_t* w, - unsigned int revents) { +static void uv__udp_recvmsg(uv_udp_t* handle) { struct sockaddr_storage peer; struct msghdr h; - uv_udp_t* handle; ssize_t nread; uv_buf_t buf; int flags; int count; - handle = container_of(w, uv_udp_t, io_watcher); - assert(handle->type == UV_UDP); - assert(revents & UV__POLLIN); - assert(handle->recv_cb != NULL); assert(handle->alloc_cb != NULL); @@ -242,34 +204,43 @@ } -static void uv__udp_sendmsg(uv_loop_t* loop, - uv__io_t* w, - unsigned int revents) { - uv_udp_t* handle; +static void uv__udp_sendmsg(uv_udp_t* handle) { + uv_udp_send_t* req; + QUEUE* q; + struct msghdr h; + ssize_t size; - handle = container_of(w, uv_udp_t, io_watcher); - assert(handle->type == UV_UDP); - assert(revents & UV__POLLOUT); + while (!QUEUE_EMPTY(&handle->write_queue)) { + q = QUEUE_HEAD(&handle->write_queue); + assert(q != NULL); - assert(!QUEUE_EMPTY(&handle->write_queue) - || !QUEUE_EMPTY(&handle->write_completed_queue)); + req = QUEUE_DATA(q, uv_udp_send_t, queue); + assert(req != NULL); - /* Write out pending data first. */ - uv__udp_run_pending(handle); + memset(&h, 0, sizeof h); + h.msg_name = &req->addr; + h.msg_namelen = (req->addr.ss_family == AF_INET6 ? + sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)); + h.msg_iov = (struct iovec*) req->bufs; + h.msg_iovlen = req->nbufs; - /* Drain 'request completed' queue. */ - uv__udp_run_completed(handle); + do { + size = sendmsg(handle->io_watcher.fd, &h, 0); + } while (size == -1 && errno == EINTR); - if (!QUEUE_EMPTY(&handle->write_completed_queue)) { - /* Schedule completion callbacks. */ - uv__io_feed(handle->loop, &handle->io_watcher); - } - else if (QUEUE_EMPTY(&handle->write_queue)) { - /* Pending queue and completion queue empty, stop watcher. */ - uv__io_stop(loop, &handle->io_watcher, UV__POLLOUT); + if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) + break; - if (!uv__io_active(&handle->io_watcher, UV__POLLIN)) - uv__handle_stop(handle); + req->status = (size == -1 ? -errno : size); + + /* Sending a datagram is an atomic operation: either all data + * is written or nothing is (and EMSGSIZE is raised). That is + * why we don't handle partial writes. Just pop the request + * off the write queue and onto the completed queue, done. + */ + QUEUE_REMOVE(&req->queue); + QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); + uv__io_feed(handle->loop, &handle->io_watcher); } } @@ -307,9 +278,6 @@ int yes; int fd; - err = -EINVAL; - fd = -1; - /* Check for bad flags. */ if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR)) return -EINVAL; @@ -320,9 +288,10 @@ fd = handle->io_watcher.fd; if (fd == -1) { - fd = uv__socket(addr->sa_family, SOCK_DGRAM, 0); - if (fd == -1) - return -errno; + err = uv__socket(addr->sa_family, SOCK_DGRAM, 0); + if (err < 0) + return err; + fd = err; handle->io_watcher.fd = fd; } @@ -368,8 +337,6 @@ unsigned char taddr[sizeof(struct sockaddr_in6)]; socklen_t addrlen; - assert(domain == AF_INET || domain == AF_INET6); - if (handle->io_watcher.fd != -1) return 0; @@ -409,6 +376,7 @@ unsigned int addrlen, uv_udp_send_cb send_cb) { int err; + int empty_queue; assert(nbufs > 0); @@ -416,8 +384,13 @@ if (err) return err; - uv__req_init(handle->loop, req, UV_UDP_SEND); + /* It's legal for send_queue_count > 0 even when the write_queue is empty; + * it means there are error-state requests in the write_completed_queue that + * will touch up send_queue_size/count later. + */ + empty_queue = (handle->send_queue_count == 0); + uv__req_init(handle->loop, req, UV_UDP_SEND); assert(addrlen <= sizeof(req->addr)); memcpy(&req->addr, addr, addrlen); req->send_cb = send_cb; @@ -432,14 +405,60 @@ return -ENOMEM; memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); + handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs); + handle->send_queue_count++; QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); - uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); uv__handle_start(handle); + if (empty_queue) + uv__udp_sendmsg(handle); + else + uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); + return 0; } +int uv__udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen) { + int err; + struct msghdr h; + ssize_t size; + + assert(nbufs > 0); + + /* already sending a message */ + if (handle->send_queue_count != 0) + return -EAGAIN; + + err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); + if (err) + return err; + + memset(&h, 0, sizeof h); + h.msg_name = (struct sockaddr*) addr; + h.msg_namelen = addrlen; + h.msg_iov = (struct iovec*) bufs; + h.msg_iovlen = nbufs; + + do { + size = sendmsg(handle->io_watcher.fd, &h, 0); + } while (size == -1 && errno == EINTR); + + if (size == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + return -EAGAIN; + else + return -errno; + } + + return size; +} + + static int uv__udp_set_membership4(uv_udp_t* handle, const struct sockaddr_in* multicast_addr, const char* interface_addr, @@ -530,6 +549,8 @@ uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP); handle->alloc_cb = NULL; handle->recv_cb = NULL; + handle->send_queue_size = 0; + handle->send_queue_count = 0; uv__io_init(&handle->io_watcher, uv__udp_io, -1); QUEUE_INIT(&handle->write_queue); QUEUE_INIT(&handle->write_completed_queue); @@ -578,7 +599,7 @@ static int uv__setsockopt_maybe_char(uv_udp_t* handle, int option, int val) { -#if defined(__sun) +#if defined(__sun) || defined(_AIX) char arg = val; #else int arg = val; @@ -628,7 +649,6 @@ } int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) { - int err; struct sockaddr_storage addr_st; struct sockaddr_in* addr4; struct sockaddr_in6* addr6; @@ -654,9 +674,6 @@ } if (addr_st.ss_family == AF_INET) { - err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR); - if (err) - return err; if (setsockopt(handle->io_watcher.fd, IPPROTO_IP, IP_MULTICAST_IF, @@ -665,9 +682,6 @@ return -errno; } } else if (addr_st.ss_family == AF_INET6) { - err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR); - if (err) - return err; if (setsockopt(handle->io_watcher.fd, IPPROTO_IPV6, IPV6_MULTICAST_IF, @@ -684,7 +698,9 @@ } -int uv_udp_getsockname(uv_udp_t* handle, struct sockaddr* name, int* namelen) { +int uv_udp_getsockname(const uv_udp_t* handle, + struct sockaddr* name, + int* namelen) { socklen_t socklen; if (handle->io_watcher.fd == -1) diff -Nru nodejs-0.11.13/deps/uv/src/uv-common.c nodejs-0.11.15/deps/uv/src/uv-common.c --- nodejs-0.11.13/deps/uv/src/uv-common.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/uv-common.c 2015-01-20 21:22:17.000000000 +0000 @@ -19,31 +19,18 @@ * IN THE SOFTWARE. */ -/* Expose glibc-specific EAI_* error codes. Needs to be defined before we - * include any headers. - */ -#ifndef _GNU_SOURCE -# define _GNU_SOURCE -#endif - #include "uv.h" #include "uv-common.h" #include <stdio.h> #include <assert.h> +#include <stdarg.h> #include <stddef.h> /* NULL */ #include <stdlib.h> /* malloc */ #include <string.h> /* memset */ -#if defined(UV_PLATFORM_HAS_IP6_LINK_LOCAL_ADDRESS) && !defined(_WIN32) -# include <net/if.h> /* if_nametoindex */ -#endif - -/* EAI_* constants. */ #if !defined(_WIN32) -# include <sys/types.h> -# include <sys/socket.h> -# include <netdb.h> +# include <net/if.h> /* if_nametoindex */ #endif #define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t); @@ -67,6 +54,11 @@ #undef XX +size_t uv_loop_size(void) { + return sizeof(uv_loop_t); +} + + uv_buf_t uv_buf_init(char* base, unsigned int len) { uv_buf_t buf; buf.base = base; @@ -107,17 +99,14 @@ int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) { -#if defined(UV_PLATFORM_HAS_IP6_LINK_LOCAL_ADDRESS) char address_part[40]; size_t address_part_size; const char* zone_index; -#endif memset(addr, 0, sizeof(*addr)); addr->sin6_family = AF_INET6; addr->sin6_port = htons(port); -#if defined(UV_PLATFORM_HAS_IP6_LINK_LOCAL_ADDRESS) zone_index = strchr(ip, '%'); if (zone_index != NULL) { address_part_size = zone_index - ip; @@ -136,7 +125,6 @@ addr->sin6_scope_id = if_nametoindex(zone_index); #endif } -#endif return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr); } @@ -232,6 +220,26 @@ } +int uv_udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr) { + unsigned int addrlen; + + if (handle->type != UV_UDP) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) + addrlen = sizeof(struct sockaddr_in); + else if (addr->sa_family == AF_INET6) + addrlen = sizeof(struct sockaddr_in6); + else + return UV_EINVAL; + + return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen); +} + + int uv_udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, uv_udp_recv_cb recv_cb) { @@ -250,64 +258,6 @@ } -struct thread_ctx { - void (*entry)(void* arg); - void* arg; -}; - - -#ifdef _WIN32 -static UINT __stdcall uv__thread_start(void* arg) -#else -static void* uv__thread_start(void *arg) -#endif -{ - struct thread_ctx *ctx_p; - struct thread_ctx ctx; - - ctx_p = arg; - ctx = *ctx_p; - free(ctx_p); - ctx.entry(ctx.arg); - - return 0; -} - - -int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { - struct thread_ctx* ctx; - int err; - - ctx = malloc(sizeof(*ctx)); - if (ctx == NULL) - return UV_ENOMEM; - - ctx->entry = entry; - ctx->arg = arg; - -#ifdef _WIN32 - *tid = (HANDLE) _beginthreadex(NULL, 0, uv__thread_start, ctx, 0, NULL); - err = *tid ? 0 : errno; -#else - err = pthread_create(tid, NULL, uv__thread_start, ctx); -#endif - - if (err) - free(ctx); - - return err ? -1 : 0; -} - - -unsigned long uv_thread_self(void) { -#ifdef _WIN32 - return (unsigned long) GetCurrentThreadId(); -#else - return (unsigned long) pthread_self(); -#endif -} - - void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) { QUEUE* q; uv_handle_t* h; @@ -389,60 +339,24 @@ } -int uv__getaddrinfo_translate_error(int sys_err) { - switch (sys_err) { - case 0: return 0; -#if defined(EAI_ADDRFAMILY) - case EAI_ADDRFAMILY: return UV_EAI_ADDRFAMILY; -#endif -#if defined(EAI_AGAIN) - case EAI_AGAIN: return UV_EAI_AGAIN; -#endif -#if defined(EAI_BADFLAGS) - case EAI_BADFLAGS: return UV_EAI_BADFLAGS; -#endif -#if defined(EAI_BADHINTS) - case EAI_BADHINTS: return UV_EAI_BADHINTS; -#endif -#if defined(EAI_CANCELED) - case EAI_CANCELED: return UV_EAI_CANCELED; -#endif -#if defined(EAI_FAIL) - case EAI_FAIL: return UV_EAI_FAIL; -#endif -#if defined(EAI_FAMILY) - case EAI_FAMILY: return UV_EAI_FAMILY; -#endif -#if defined(EAI_MEMORY) - case EAI_MEMORY: return UV_EAI_MEMORY; -#endif -#if defined(EAI_NODATA) - case EAI_NODATA: return UV_EAI_NODATA; -#endif -#if defined(EAI_NONAME) -# if !defined(EAI_NODATA) || EAI_NODATA != EAI_NONAME - case EAI_NONAME: return UV_EAI_NONAME; -# endif -#endif -#if defined(EAI_OVERFLOW) - case EAI_OVERFLOW: return UV_EAI_OVERFLOW; -#endif -#if defined(EAI_PROTOCOL) - case EAI_PROTOCOL: return UV_EAI_PROTOCOL; -#endif -#if defined(EAI_SERVICE) - case EAI_SERVICE: return UV_EAI_SERVICE; -#endif -#if defined(EAI_SOCKTYPE) - case EAI_SOCKTYPE: return UV_EAI_SOCKTYPE; -#endif -#if defined(EAI_SYSTEM) - case EAI_SYSTEM: return UV_EAI_SYSTEM; -#endif - } - assert(!"unknown EAI_* error code"); - abort(); - return 0; /* Pacify compiler. */ + +size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) { + unsigned int i; + size_t bytes; + + bytes = 0; + for (i = 0; i < nbufs; i++) + bytes += (size_t) bufs[i].len; + + return bytes; +} + +int uv_recv_buffer_size(uv_handle_t* handle, int* value) { + return uv__socket_sockopt(handle, SO_RCVBUF, value); +} + +int uv_send_buffer_size(uv_handle_t* handle, int *value) { + return uv__socket_sockopt(handle, SO_SNDBUF, value); } int uv_fs_event_getpath(uv_fs_event_t* handle, char* buf, size_t* len) { @@ -464,3 +378,81 @@ return 0; } + + +void uv__fs_scandir_cleanup(uv_fs_t* req) { + uv__dirent_t** dents; + + dents = req->ptr; + if (req->nbufs > 0 && req->nbufs != (unsigned int) req->result) + req->nbufs--; + for (; req->nbufs < (unsigned int) req->result; req->nbufs++) + free(dents[req->nbufs]); +} + + +int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) { + uv__dirent_t** dents; + uv__dirent_t* dent; + + dents = req->ptr; + + /* Free previous entity */ + if (req->nbufs > 0) + free(dents[req->nbufs - 1]); + + /* End was already reached */ + if (req->nbufs == (unsigned int) req->result) { + free(dents); + req->ptr = NULL; + return UV_EOF; + } + + dent = dents[req->nbufs++]; + + ent->name = dent->d_name; +#ifdef HAVE_DIRENT_TYPES + switch (dent->d_type) { + case UV__DT_DIR: + ent->type = UV_DIRENT_DIR; + break; + case UV__DT_FILE: + ent->type = UV_DIRENT_FILE; + break; + case UV__DT_LINK: + ent->type = UV_DIRENT_LINK; + break; + case UV__DT_FIFO: + ent->type = UV_DIRENT_FIFO; + break; + case UV__DT_SOCKET: + ent->type = UV_DIRENT_SOCKET; + break; + case UV__DT_CHAR: + ent->type = UV_DIRENT_CHAR; + break; + case UV__DT_BLOCK: + ent->type = UV_DIRENT_BLOCK; + break; + default: + ent->type = UV_DIRENT_UNKNOWN; + } +#else + ent->type = UV_DIRENT_UNKNOWN; +#endif + + return 0; +} + + +int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) { + va_list ap; + int err; + + va_start(ap, option); + /* Any platform-agnostic options should be handled here. */ + err = uv__loop_configure(loop, option, ap); + va_end(ap); + + return err; +} diff -Nru nodejs-0.11.13/deps/uv/src/uv-common.h nodejs-0.11.15/deps/uv/src/uv-common.h --- nodejs-0.11.13/deps/uv/src/uv-common.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/uv-common.h 2015-01-20 21:22:17.000000000 +0000 @@ -28,6 +28,7 @@ #define UV_COMMON_H_ #include <assert.h> +#include <stdarg.h> #include <stddef.h> #if defined(_MSC_VER) && _MSC_VER < 1600 @@ -59,6 +60,8 @@ # define UV__HANDLE_CLOSING 0x01 #endif +int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap); + int uv__tcp_bind(uv_tcp_t* tcp, const struct sockaddr* addr, unsigned int addrlen, @@ -83,6 +86,12 @@ unsigned int addrlen, uv_udp_send_cb send_cb); +int uv__udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen); + int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloccb, uv_udp_recv_cb recv_cb); @@ -92,6 +101,19 @@ int uv__getaddrinfo_translate_error(int sys_err); /* EAI_* error. */ +void uv__work_submit(uv_loop_t* loop, + struct uv__work *w, + void (*work)(struct uv__work *w), + void (*done)(struct uv__work *w, int status)); + +void uv__work_done(uv_async_t* handle); + +size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs); + +int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value); + +void uv__fs_scandir_cleanup(uv_fs_t* req); + #define uv__has_active_reqs(loop) \ (QUEUE_EMPTY(&(loop)->active_reqs) == 0) diff -Nru nodejs-0.11.13/deps/uv/src/version.c nodejs-0.11.15/deps/uv/src/version.c --- nodejs-0.11.13/deps/uv/src/version.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/version.c 2015-01-20 21:22:17.000000000 +0000 @@ -35,7 +35,7 @@ #if UV_VERSION_IS_RELEASE # define UV_VERSION_STRING UV_VERSION_STRING_BASE #else -# define UV_VERSION_STRING UV_VERSION_STRING_BASE "-pre" +# define UV_VERSION_STRING UV_VERSION_STRING_BASE "-" UV_VERSION_SUFFIX #endif diff -Nru nodejs-0.11.13/deps/uv/src/win/core.c nodejs-0.11.15/deps/uv/src/win/core.c --- nodejs-0.11.13/deps/uv/src/win/core.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/core.c 2015-01-20 21:22:17.000000000 +0000 @@ -26,7 +26,7 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> -#if !defined(__MINGW32__) +#if defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR) #include <crtdbg.h> #endif @@ -36,37 +36,40 @@ #include "req-inl.h" -/* The only event loop we support right now */ -static uv_loop_t uv_default_loop_; +static uv_loop_t default_loop_struct; +static uv_loop_t* default_loop_ptr; -/* uv_once intialization guards */ +/* uv_once initialization guards */ static uv_once_t uv_init_guard_ = UV_ONCE_INIT; -static uv_once_t uv_default_loop_init_guard_ = UV_ONCE_INIT; -#if defined(_DEBUG) && !defined(__MINGW32__) -/* Our crt debug report handler allows us to temporarily disable asserts */ -/* just for the current thread. */ +#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR)) +/* Our crt debug report handler allows us to temporarily disable asserts + * just for the current thread. + */ -__declspec( thread ) int uv__crt_assert_enabled = TRUE; +UV_THREAD_LOCAL int uv__crt_assert_enabled = TRUE; static int uv__crt_dbg_report_handler(int report_type, char *message, int *ret_val) { if (uv__crt_assert_enabled || report_type != _CRT_ASSERT) return FALSE; - + if (ret_val) { - /* Set ret_val to 0 to continue with normal execution. */ - /* Set ret_val to 1 to trigger a breakpoint. */ + /* Set ret_val to 0 to continue with normal execution. + * Set ret_val to 1 to trigger a breakpoint. + */ - if(IsDebuggerPresent()) - *ret_val = 1; + if(IsDebuggerPresent()) + *ret_val = 1; else - *ret_val = 0; + *ret_val = 0; } /* Don't call _CrtDbgReport. */ return TRUE; } +#else +UV_THREAD_LOCAL int uv__crt_assert_enabled = FALSE; #endif @@ -84,21 +87,24 @@ SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); - /* Tell the CRT to not exit the application when an invalid parameter is */ - /* passed. The main issue is that invalid FDs will trigger this behavior. */ + /* Tell the CRT to not exit the application when an invalid parameter is + * passed. The main issue is that invalid FDs will trigger this behavior. + */ #if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800 _set_invalid_parameter_handler(uv__crt_invalid_parameter_handler); #endif - /* We also need to setup our debug report handler because some CRT */ - /* functions (eg _get_osfhandle) raise an assert when called with invalid */ - /* FDs even though they return the proper error code in the release build. */ -#if defined(_DEBUG) && !defined(__MINGW32__) + /* We also need to setup our debug report handler because some CRT + * functions (eg _get_osfhandle) raise an assert when called with invalid + * FDs even though they return the proper error code in the release build. + */ +#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR)) _CrtSetReportHook(uv__crt_dbg_report_handler); #endif - /* Fetch winapi function pointers. This must be done first because other */ - /* intialization code might need these function pointers to be loaded. */ + /* Fetch winapi function pointers. This must be done first because other + * initialization code might need these function pointers to be loaded. + */ uv_winapi_init(); /* Initialize winsock */ @@ -127,12 +133,13 @@ if (loop->iocp == NULL) return uv_translate_sys_error(GetLastError()); - /* To prevent uninitialized memory access, loop->time must be intialized */ - /* to zero before calling uv_update_time for the first time. */ + /* To prevent uninitialized memory access, loop->time must be initialized + * to zero before calling uv_update_time for the first time. + */ loop->time = 0; - loop->last_tick_count = 0; uv_update_time(loop); + QUEUE_INIT(&loop->wq); QUEUE_INIT(&loop->handle_queue); QUEUE_INIT(&loop->active_reqs); loop->active_handles = 0; @@ -159,16 +166,16 @@ loop->timer_counter = 0; loop->stop_flag = 0; - return 0; -} + if (uv_mutex_init(&loop->wq_mutex)) + abort(); + if (uv_async_init(loop, &loop->wq_async, uv__work_done)) + abort(); -static void uv_default_loop_init(void) { - /* Initialize libuv itself first */ - uv__once_init(); + uv__handle_unref(&loop->wq_async); + loop->wq_async.flags |= UV__HANDLE_INTERNAL; - /* Initialize the main loop */ - uv_loop_init(&uv_default_loop_); + return 0; } @@ -178,8 +185,39 @@ uv_loop_t* uv_default_loop(void) { - uv_once(&uv_default_loop_init_guard_, uv_default_loop_init); - return &uv_default_loop_; + if (default_loop_ptr != NULL) + return default_loop_ptr; + + if (uv_loop_init(&default_loop_struct)) + return NULL; + + default_loop_ptr = &default_loop_struct; + return default_loop_ptr; +} + + +static void uv__loop_close(uv_loop_t* loop) { + size_t i; + + /* close the async handle without needing an extra loop iteration */ + assert(!loop->wq_async.async_sent); + loop->wq_async.close_cb = NULL; + uv__handle_closing(&loop->wq_async); + uv__handle_close(&loop->wq_async); + + for (i = 0; i < ARRAY_SIZE(loop->poll_peer_sockets); i++) { + SOCKET sock = loop->poll_peer_sockets[i]; + if (sock != 0 && sock != INVALID_SOCKET) + closesocket(sock); + } + + uv_mutex_lock(&loop->wq_mutex); + assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!"); + assert(!uv__has_active_reqs(loop)); + uv_mutex_unlock(&loop->wq_mutex); + uv_mutex_destroy(&loop->wq_mutex); + + CloseHandle(loop->iocp); } @@ -193,15 +231,15 @@ if (!(h->flags & UV__HANDLE_INTERNAL)) return UV_EBUSY; } - if (loop != &uv_default_loop_) { - size_t i; - for (i = 0; i < ARRAY_SIZE(loop->poll_peer_sockets); i++) { - SOCKET sock = loop->poll_peer_sockets[i]; - if (sock != 0 && sock != INVALID_SOCKET) - closesocket(sock); - } - } - /* TODO: cleanup default loop*/ + + uv__loop_close(loop); + +#ifndef NDEBUG + memset(loop, -1, sizeof(*loop)); +#endif + if (loop == default_loop_ptr) + default_loop_ptr = NULL; + return 0; } @@ -224,34 +262,52 @@ void uv_loop_delete(uv_loop_t* loop) { - assert(uv_loop_close(loop) == 0); - if (loop != &uv_default_loop_) + uv_loop_t* default_loop; + int err; + default_loop = default_loop_ptr; + err = uv_loop_close(loop); + assert(err == 0); + if (loop != default_loop) free(loop); } +int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) { + return UV_ENOSYS; +} + + int uv_backend_fd(const uv_loop_t* loop) { return -1; } int uv_backend_timeout(const uv_loop_t* loop) { - return 0; + if (loop->stop_flag != 0) + return 0; + + if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop)) + return 0; + + if (loop->pending_reqs_tail) + return 0; + + if (loop->endgame_handles) + return 0; + + if (loop->idle_handles) + return 0; + + return uv__next_timeout(loop); } -static void uv_poll(uv_loop_t* loop, int block) { - DWORD bytes, timeout; +static void uv_poll(uv_loop_t* loop, DWORD timeout) { + DWORD bytes; ULONG_PTR key; OVERLAPPED* overlapped; uv_req_t* req; - if (block) { - timeout = uv_get_poll_timeout(loop); - } else { - timeout = 0; - } - GetQueuedCompletionStatus(loop->iocp, &bytes, &key, @@ -262,32 +318,30 @@ /* Package was dequeued */ req = uv_overlapped_to_req(overlapped); uv_insert_pending_req(loop, req); + + /* Some time might have passed waiting for I/O, + * so update the loop time here. + */ + uv_update_time(loop); } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus"); - } else { - /* We're sure that at least `timeout` milliseconds have expired, but */ - /* this may not be reflected yet in the GetTickCount() return value. */ - /* Therefore we ensure it's taken into account here. */ + } else if (timeout > 0) { + /* GetQueuedCompletionStatus can occasionally return a little early. + * Make sure that the desired timeout is reflected in the loop time. + */ uv__time_forward(loop, timeout); } } -static void uv_poll_ex(uv_loop_t* loop, int block) { +static void uv_poll_ex(uv_loop_t* loop, DWORD timeout) { BOOL success; - DWORD timeout; uv_req_t* req; OVERLAPPED_ENTRY overlappeds[128]; ULONG count; ULONG i; - if (block) { - timeout = uv_get_poll_timeout(loop); - } else { - timeout = 0; - } - success = pGetQueuedCompletionStatusEx(loop->iocp, overlappeds, ARRAY_SIZE(overlappeds), @@ -301,13 +355,18 @@ req = uv_overlapped_to_req(overlappeds[i].lpOverlapped); uv_insert_pending_req(loop, req); } + + /* Some time might have passed waiting for I/O, + * so update the loop time here. + */ + uv_update_time(loop); } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); } else if (timeout > 0) { - /* We're sure that at least `timeout` milliseconds have expired, but */ - /* this may not be reflected yet in the GetTickCount() return value. */ - /* Therefore we ensure it's taken into account here. */ + /* GetQueuedCompletionStatus can occasionally return a little early. + * Make sure that the desired timeout is reflected in the loop time. + */ uv__time_forward(loop, timeout); } } @@ -326,8 +385,9 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) { + DWORD timeout; int r; - void (*poll)(uv_loop_t* loop, int block); + void (*poll)(uv_loop_t* loop, DWORD timeout); if (pGetQueuedCompletionStatusEx) poll = &uv_poll_ex; @@ -346,19 +406,17 @@ uv_idle_invoke(loop); uv_prepare_invoke(loop); - (*poll)(loop, loop->idle_handles == NULL && - loop->pending_reqs_tail == NULL && - loop->endgame_handles == NULL && - !loop->stop_flag && - (loop->active_handles > 0 || - !QUEUE_EMPTY(&loop->active_reqs)) && - !(mode & UV_RUN_NOWAIT)); + timeout = 0; + if ((mode & UV_RUN_NOWAIT) == 0) + timeout = uv_backend_timeout(loop); + + (*poll)(loop, timeout); uv_check_invoke(loop); uv_process_endgames(loop); if (mode == UV_RUN_ONCE) { - /* UV_RUN_ONCE implies forward progess: at least one callback must have + /* UV_RUN_ONCE implies forward progress: at least one callback must have * been invoked when it returns. uv__io_poll() can return without doing * I/O (meaning: no callbacks) when its timeout expires - which means we * have pending timers that satisfy the forward progress constraint. @@ -366,7 +424,6 @@ * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from * the check. */ - uv_update_time(loop); uv_process_timers(loop); } @@ -383,3 +440,68 @@ return r; } + + +int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) { + uv_os_fd_t fd_out; + + switch (handle->type) { + case UV_TCP: + fd_out = (uv_os_fd_t)((uv_tcp_t*) handle)->socket; + break; + + case UV_NAMED_PIPE: + fd_out = ((uv_pipe_t*) handle)->handle; + break; + + case UV_TTY: + fd_out = ((uv_tty_t*) handle)->handle; + break; + + case UV_UDP: + fd_out = (uv_os_fd_t)((uv_udp_t*) handle)->socket; + break; + + case UV_POLL: + fd_out = (uv_os_fd_t)((uv_poll_t*) handle)->socket; + break; + + default: + return UV_EINVAL; + } + + if (uv_is_closing(handle) || fd_out == INVALID_HANDLE_VALUE) + return UV_EBADF; + + *fd = fd_out; + return 0; +} + + +int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) { + int r; + int len; + SOCKET socket; + + if (handle == NULL || value == NULL) + return UV_EINVAL; + + if (handle->type == UV_TCP) + socket = ((uv_tcp_t*) handle)->socket; + else if (handle->type == UV_UDP) + socket = ((uv_udp_t*) handle)->socket; + else + return UV_ENOTSUP; + + len = sizeof(*value); + + if (*value == 0) + r = getsockopt(socket, SOL_SOCKET, optname, (char*) value, &len); + else + r = setsockopt(socket, SOL_SOCKET, optname, (const char*) value, len); + + if (r == SOCKET_ERROR) + return uv_translate_sys_error(WSAGetLastError()); + + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/src/win/dl.c nodejs-0.11.15/deps/uv/src/win/dl.c --- nodejs-0.11.13/deps/uv/src/win/dl.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/dl.c 2015-01-20 21:22:17.000000000 +0000 @@ -64,7 +64,7 @@ } -const char* uv_dlerror(uv_lib_t* lib) { +const char* uv_dlerror(const uv_lib_t* lib) { return lib->errmsg ? lib->errmsg : "no error"; } diff -Nru nodejs-0.11.13/deps/uv/src/win/error.c nodejs-0.11.15/deps/uv/src/win/error.c --- nodejs-0.11.13/deps/uv/src/win/error.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/error.c 2015-01-20 21:22:17.000000000 +0000 @@ -133,6 +133,7 @@ case ERROR_DIRECTORY: return UV_ENOENT; case ERROR_FILE_NOT_FOUND: return UV_ENOENT; case ERROR_INVALID_NAME: return UV_ENOENT; + case ERROR_INVALID_DRIVE: return UV_ENOENT; case ERROR_INVALID_REPARSE_DATA: return UV_ENOENT; case ERROR_MOD_NOT_FOUND: return UV_ENOENT; case ERROR_PATH_NOT_FOUND: return UV_ENOENT; diff -Nru nodejs-0.11.13/deps/uv/src/win/fs.c nodejs-0.11.15/deps/uv/src/win/fs.c --- nodejs-0.11.13/deps/uv/src/win/fs.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/fs.c 2015-01-20 21:22:17.000000000 +0000 @@ -36,20 +36,20 @@ #include "req-inl.h" #include "handle-inl.h" +#include <wincrypt.h> + #define UV_FS_FREE_PATHS 0x0002 #define UV_FS_FREE_PTR 0x0008 #define UV_FS_CLEANEDUP 0x0010 +static const int uv__fs_dirent_slide = 0x20; + #define QUEUE_FS_TP_JOB(loop, req) \ do { \ - if (!QueueUserWorkItem(&uv_fs_thread_proc, \ - req, \ - WT_EXECUTEDEFAULT)) { \ - return uv_translate_sys_error(GetLastError()); \ - } \ uv__req_register(loop, req); \ + uv__work_submit((loop), &(req)->work_req, uv__fs_work, uv__fs_done); \ } while (0) #define SET_REQ_RESULT(req, result_value) \ @@ -232,11 +232,7 @@ req->result = 0; req->ptr = NULL; req->path = NULL; - - if (cb != NULL) { - req->cb = cb; - memset(&req->overlapped, 0, sizeof(req->overlapped)); - } + req->cb = cb; } @@ -287,7 +283,7 @@ (w_target[4] >= L'a' && w_target[4] <= L'z')) && w_target[5] == L':' && (w_target_len == 6 || w_target[6] == L'\\')) { - /* \??\drive:\ */ + /* \??\<drive>:\ */ w_target += 4; w_target_len -= 4; @@ -296,8 +292,8 @@ (w_target[5] == L'N' || w_target[5] == L'n') && (w_target[6] == L'C' || w_target[6] == L'c') && w_target[7] == L'\\') { - /* \??\UNC\server\share\ - make sure the final path looks like */ - /* \\server\share\ */ + /* \??\UNC\<server>\<share>\ - make sure the final path looks like */ + /* \\<server>\<share>\ */ w_target += 6; w_target[0] = L'\\'; w_target_len -= 6; @@ -312,8 +308,8 @@ w_target_len = reparse_data->MountPointReparseBuffer.SubstituteNameLength / sizeof(WCHAR); - /* Only treat junctions that look like \??\drive:\ as symlink. */ - /* Junctions can also be used as mount points, like \??\Volume{guid}, */ + /* Only treat junctions that look like \??\<drive>:\ as symlink. */ + /* Junctions can also be used as mount points, like \??\Volume{<guid>}, */ /* but that's confusing for programs since they wouldn't be able to */ /* actually understand such a path when returned by uv_readlink(). */ /* UNC paths are never valid for junctions so we don't care about them. */ @@ -553,7 +549,7 @@ VERIFY_FD(fd, req); handle = uv__get_osfhandle(fd); - + if (handle == INVALID_HANDLE_VALUE) { SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE); return; @@ -561,11 +557,6 @@ if (offset != -1) { memset(&overlapped, 0, sizeof overlapped); - - offset_.QuadPart = offset; - overlapped.Offset = offset_.LowPart; - overlapped.OffsetHigh = offset_.HighPart; - overlapped_ptr = &overlapped; } else { overlapped_ptr = NULL; @@ -575,6 +566,13 @@ bytes = 0; do { DWORD incremental_bytes; + + if (offset != -1) { + offset_.QuadPart = offset + bytes; + overlapped.Offset = offset_.LowPart; + overlapped.OffsetHigh = offset_.HighPart; + } + result = ReadFile(handle, req->bufs[index].base, req->bufs[index].len, @@ -617,11 +615,6 @@ if (offset != -1) { memset(&overlapped, 0, sizeof overlapped); - - offset_.QuadPart = offset; - overlapped.Offset = offset_.LowPart; - overlapped.OffsetHigh = offset_.HighPart; - overlapped_ptr = &overlapped; } else { overlapped_ptr = NULL; @@ -631,6 +624,13 @@ bytes = 0; do { DWORD incremental_bytes; + + if (offset != -1) { + offset_.QuadPart = offset + bytes; + overlapped.Offset = offset_.LowPart; + overlapped.OffsetHigh = offset_.HighPart; + } + result = WriteFile(handle, req->bufs[index].base, req->bufs[index].len, @@ -729,16 +729,75 @@ } -void fs__readdir(uv_fs_t* req) { +/* OpenBSD original: lib/libc/stdio/mktemp.c */ +void fs__mkdtemp(uv_fs_t* req) { + static const WCHAR *tempchars = + L"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; + static const size_t num_chars = 62; + static const size_t num_x = 6; + WCHAR *cp, *ep; + unsigned int tries, i; + size_t len; + HCRYPTPROV h_crypt_prov; + uint64_t v; + BOOL released; + + len = wcslen(req->pathw); + ep = req->pathw + len; + if (len < num_x || wcsncmp(ep - num_x, L"XXXXXX", num_x)) { + SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER); + return; + } + + if (!CryptAcquireContext(&h_crypt_prov, NULL, NULL, PROV_RSA_FULL, + CRYPT_VERIFYCONTEXT)) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + tries = TMP_MAX; + do { + if (!CryptGenRandom(h_crypt_prov, sizeof(v), (BYTE*) &v)) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + break; + } + + cp = ep - num_x; + for (i = 0; i < num_x; i++) { + *cp++ = tempchars[v % num_chars]; + v /= num_chars; + } + + if (_wmkdir(req->pathw) == 0) { + len = strlen(req->path); + wcstombs((char*) req->path + len - num_x, ep - num_x, num_x); + SET_REQ_RESULT(req, 0); + break; + } else if (errno != EEXIST) { + SET_REQ_RESULT(req, -1); + break; + } + } while (--tries); + + released = CryptReleaseContext(h_crypt_prov, 0); + assert(released); + if (tries == 0) { + SET_REQ_RESULT(req, -1); + } +} + + +void fs__scandir(uv_fs_t* req) { WCHAR* pathw = req->pathw; size_t len = wcslen(pathw); - int result, size; - WCHAR* buf = NULL, *ptr, *name; + int result; + WCHAR* name; HANDLE dir; WIN32_FIND_DATAW ent = { 0 }; - size_t buf_char_len = 4096; WCHAR* path2; const WCHAR* fmt; + uv__dirent_t** dents; + int dent_size; if (len == 0) { fmt = L"./*"; @@ -757,7 +816,8 @@ path2 = (WCHAR*)malloc(sizeof(WCHAR) * (len + 4)); if (!path2) { - uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); + SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY); + return; } _snwprintf(path2, len + 3, fmt, pathw); @@ -770,71 +830,81 @@ } result = 0; + dents = NULL; + dent_size = 0; do { - name = ent.cFileName; - - if (name[0] != L'.' || (name[1] && (name[1] != L'.' || name[2]))) { - len = wcslen(name); - - if (!buf) { - buf = (WCHAR*)malloc(buf_char_len * sizeof(WCHAR)); - if (!buf) { - uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); - } + uv__dirent_t* dent; + int utf8_len; - ptr = buf; - } + name = ent.cFileName; - while ((ptr - buf) + len + 1 > buf_char_len) { - buf_char_len *= 2; - path2 = buf; - buf = (WCHAR*)realloc(buf, buf_char_len * sizeof(WCHAR)); - if (!buf) { - uv_fatal_error(ERROR_OUTOFMEMORY, "realloc"); - } + if (!(name[0] != L'.' || (name[1] && (name[1] != L'.' || name[2])))) + continue; - ptr = buf + (ptr - path2); + /* Grow dents buffer, if needed */ + if (result >= dent_size) { + uv__dirent_t** tmp; + + dent_size += uv__fs_dirent_slide; + tmp = realloc(dents, dent_size * sizeof(*dents)); + if (tmp == NULL) { + SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY); + goto fatal; } - - wcscpy(ptr, name); - ptr += len + 1; - result++; + dents = tmp; } - } while(FindNextFileW(dir, &ent)); - - FindClose(dir); - if (buf) { - /* Convert result to UTF8. */ - size = uv_utf16_to_utf8(buf, buf_char_len, NULL, 0); - if (!size) { + /* Allocate enough space to fit utf8 encoding of file name */ + len = wcslen(name); + utf8_len = uv_utf16_to_utf8(name, len, NULL, 0); + if (!utf8_len) { SET_REQ_WIN32_ERROR(req, GetLastError()); - return; + goto fatal; } - req->ptr = (char*)malloc(size + 1); - if (!req->ptr) { - uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); + dent = malloc(sizeof(*dent) + utf8_len + 1); + if (dent == NULL) { + SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY); + goto fatal; } - size = uv_utf16_to_utf8(buf, buf_char_len, (char*)req->ptr, size); - if (!size) { - free(buf); - free(req->ptr); - req->ptr = NULL; + /* Copy file name */ + utf8_len = uv_utf16_to_utf8(name, len, dent->d_name, utf8_len); + if (!utf8_len) { + free(dent); SET_REQ_WIN32_ERROR(req, GetLastError()); - return; + goto fatal; } - free(buf); + dent->d_name[utf8_len] = '\0'; - ((char*)req->ptr)[size] = '\0'; + /* Copy file type */ + if ((ent.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) + dent->d_type = UV__DT_DIR; + else if ((ent.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) != 0) + dent->d_type = UV__DT_LINK; + else + dent->d_type = UV__DT_FILE; + + dents[result++] = dent; + } while(FindNextFileW(dir, &ent)); + + FindClose(dir); + + if (dents != NULL) req->flags |= UV_FS_FREE_PTR; - } else { - req->ptr = NULL; - } + /* NOTE: nbufs will be used as index */ + req->nbufs = 0; + req->ptr = dents; SET_REQ_RESULT(req, result); + return; + +fatal: + /* Deallocate dents */ + for (result--; result >= 0; result--) + free(dents[result]); + free(dents); } @@ -877,7 +947,7 @@ * * Currently it's based on whether the 'readonly' attribute is set, which * makes little sense because the semantics are so different: the 'read-only' - * flag is just a way for a user to protect against accidental deleteion, and + * flag is just a way for a user to protect against accidental deletion, and * serves no security purpose. Windows uses ACLs for that. * * Also people now use uv_fs_chmod() to take away the writable bit for good @@ -886,7 +956,7 @@ * deleted. * * IOW it's all just a clusterfuck and we should think of something that - * makes slighty more sense. + * makes slightly more sense. * * And uv_fs_chmod should probably just fail on windows or be a total no-op. * There's nothing sensible it can do anyway. @@ -1154,6 +1224,25 @@ } +static void fs__access(uv_fs_t* req) { + DWORD attr = GetFileAttributesW(req->pathw); + + if (attr == INVALID_FILE_ATTRIBUTES) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + if ((req->flags & W_OK) && + ((attr & FILE_ATTRIBUTE_READONLY) || + (attr & FILE_ATTRIBUTE_DIRECTORY))) { + SET_REQ_WIN32_ERROR(req, UV_EPERM); + return; + } + + SET_REQ_RESULT(req, 0); +} + + static void fs__chmod(uv_fs_t* req) { int result = _wchmod(req->pathw, req->mode); SET_REQ_RESULT(req, result); @@ -1401,7 +1490,7 @@ /* Open the directory */ handle = CreateFileW(new_path, - GENERIC_ALL, + GENERIC_WRITE, 0, NULL, OPEN_EXISTING, @@ -1510,11 +1599,10 @@ } -static DWORD WINAPI uv_fs_thread_proc(void* parameter) { - uv_fs_t* req = (uv_fs_t*) parameter; - uv_loop_t* loop = req->loop; +static void uv__fs_work(struct uv__work* w) { + uv_fs_t* req; - assert(req != NULL); + req = container_of(w, uv_fs_t, work_req); assert(req->type == UV_FS); #define XX(uc, lc) case UV_FS_##uc: fs__##lc(req); break; @@ -1530,6 +1618,7 @@ XX(FTRUNCATE, ftruncate) XX(UTIME, utime) XX(FUTIME, futime) + XX(ACCESS, access) XX(CHMOD, chmod) XX(FCHMOD, fchmod) XX(FSYNC, fsync) @@ -1537,8 +1626,9 @@ XX(UNLINK, unlink) XX(RMDIR, rmdir) XX(MKDIR, mkdir) + XX(MKDTEMP, mkdtemp) XX(RENAME, rename) - XX(READDIR, readdir) + XX(SCANDIR, scandir) XX(LINK, link) XX(SYMLINK, symlink) XX(READLINK, readlink) @@ -1547,9 +1637,41 @@ default: assert(!"bad uv_fs_type"); } +} - POST_COMPLETION_FOR_REQ(loop, req); - return 0; + +static void uv__fs_done(struct uv__work* w, int status) { + uv_fs_t* req; + + req = container_of(w, uv_fs_t, work_req); + uv__req_unregister(req->loop, req); + + if (status == UV_ECANCELED) { + assert(req->result == 0); + req->result = UV_ECANCELED; + } + + if (req->cb != NULL) + req->cb(req); +} + + +void uv_fs_req_cleanup(uv_fs_t* req) { + if (req->flags & UV_FS_CLEANEDUP) + return; + + if (req->flags & UV_FS_FREE_PATHS) + free(req->pathw); + + if (req->flags & UV_FS_FREE_PTR) + free(req->ptr); + + req->path = NULL; + req->pathw = NULL; + req->new_pathw = NULL; + req->ptr = NULL; + + req->flags |= UV_FS_CLEANEDUP; } @@ -1701,6 +1823,26 @@ } +int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, + uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_MKDTEMP, cb); + + err = fs__capture_path(loop, req, tpl, NULL, TRUE); + if (err) + return uv_translate_sys_error(err); + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__mkdtemp(req); + return req->result; + } +} + + int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { int err; @@ -1721,11 +1863,11 @@ } -int uv_fs_readdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, +int uv_fs_scandir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, uv_fs_cb cb) { int err; - uv_fs_req_init(loop, req, UV_FS_READDIR, cb); + uv_fs_req_init(loop, req, UV_FS_SCANDIR, cb); err = fs__capture_path(loop, req, path, NULL, cb != NULL); if (err) { @@ -1738,7 +1880,7 @@ QUEUE_FS_TP_JOB(loop, req); return 0; } else { - fs__readdir(req); + fs__scandir(req); return req->result; } } @@ -1984,6 +2126,31 @@ } +int uv_fs_access(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_ACCESS, cb); + + err = fs__capture_path(loop, req, path, NULL, cb != NULL); + if (err) + return uv_translate_sys_error(err); + + req->flags = flags; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } + + fs__access(req); + return req->result; +} + + int uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb) { int err; @@ -2064,30 +2231,3 @@ return req->result; } } - - -void uv_process_fs_req(uv_loop_t* loop, uv_fs_t* req) { - assert(req->cb); - uv__req_unregister(loop, req); - req->cb(req); -} - - -void uv_fs_req_cleanup(uv_fs_t* req) { - if (req->flags & UV_FS_CLEANEDUP) - return; - - if (req->flags & UV_FS_FREE_PATHS) - free(req->pathw); - - if (req->flags & UV_FS_FREE_PTR) - free(req->ptr); - - req->path = NULL; - req->pathw = NULL; - req->new_pathw = NULL; - req->ptr = NULL; - - req->flags |= UV_FS_CLEANEDUP; -} - diff -Nru nodejs-0.11.13/deps/uv/src/win/fs-event.c nodejs-0.11.15/deps/uv/src/win/fs-event.c --- nodejs-0.11.13/deps/uv/src/win/fs-event.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/fs-event.c 2015-01-20 21:22:17.000000000 +0000 @@ -290,7 +290,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) { if (!uv__is_active(handle)) - return UV_EINVAL; + return 0; if (handle->dir_handle != INVALID_HANDLE_VALUE) { CloseHandle(handle->dir_handle); diff -Nru nodejs-0.11.13/deps/uv/src/win/getaddrinfo.c nodejs-0.11.15/deps/uv/src/win/getaddrinfo.c --- nodejs-0.11.13/deps/uv/src/win/getaddrinfo.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/getaddrinfo.c 2015-01-20 21:22:17.000000000 +0000 @@ -26,6 +26,25 @@ #include "internal.h" #include "req-inl.h" +/* EAI_* constants. */ +#include <winsock2.h> + + +int uv__getaddrinfo_translate_error(int sys_err) { + switch (sys_err) { + case 0: return 0; + case WSATRY_AGAIN: return UV_EAI_AGAIN; + case WSAEINVAL: return UV_EAI_BADFLAGS; + case WSANO_RECOVERY: return UV_EAI_FAIL; + case WSAEAFNOSUPPORT: return UV_EAI_FAMILY; + case WSA_NOT_ENOUGH_MEMORY: return UV_EAI_MEMORY; + case WSAHOST_NOT_FOUND: return UV_EAI_NONAME; + case WSATYPE_NOT_FOUND: return UV_EAI_SERVICE; + case WSAESOCKTNOSUPPORT: return UV_EAI_SOCKTYPE; + default: return uv_translate_sys_error(sys_err); + } +} + /* * MinGW is missing this @@ -56,25 +75,13 @@ #define ALIGNED_SIZE(X) ((((X) + 3) >> 2) << 2) -/* getaddrinfo worker thread implementation */ -static DWORD WINAPI getaddrinfo_thread_proc(void* parameter) { - uv_getaddrinfo_t* req = (uv_getaddrinfo_t*) parameter; - uv_loop_t* loop = req->loop; - int ret; - - assert(req != NULL); - - /* call OS function on this thread */ - ret = GetAddrInfoW(req->node, - req->service, - req->hints, - &req->res); - req->retcode = ret; - - /* post getaddrinfo completed */ - POST_COMPLETION_FOR_REQ(loop, req); +static void uv__getaddrinfo_work(struct uv__work* w) { + uv_getaddrinfo_t* req; + int err; - return 0; + req = container_of(w, uv_getaddrinfo_t, work_req); + err = GetAddrInfoW(req->node, req->service, req->hints, &req->res); + req->retcode = uv__getaddrinfo_translate_error(err); } @@ -87,7 +94,8 @@ * and copy all structs and referenced strings into the one block. * Each size calculation is adjusted to avoid unaligned pointers. */ -void uv_process_getaddrinfo_req(uv_loop_t* loop, uv_getaddrinfo_t* req) { +static void uv__getaddrinfo_done(struct uv__work* w, int status) { + uv_getaddrinfo_t* req; int addrinfo_len = 0; int name_len = 0; size_t addrinfo_struct_len = ALIGNED_SIZE(sizeof(struct addrinfo)); @@ -95,7 +103,8 @@ struct addrinfo* addrinfo_ptr; char* alloc_ptr = NULL; char* cur_ptr = NULL; - int err = 0; + + req = container_of(w, uv_getaddrinfo_t, work_req); /* release input parameter memory */ if (req->alloc != NULL) { @@ -103,6 +112,16 @@ req->alloc = NULL; } + if (status == UV_ECANCELED) { + assert(req->retcode == 0); + req->retcode = UV_EAI_CANCELED; + if (req->res != NULL) { + FreeAddrInfoW(req->res); + req->res = NULL; + } + goto complete; + } + if (req->retcode == 0) { /* convert addrinfoW to addrinfo */ /* first calculate required length */ @@ -113,8 +132,7 @@ if (addrinfow_ptr->ai_canonname != NULL) { name_len = uv_utf16_to_utf8(addrinfow_ptr->ai_canonname, -1, NULL, 0); if (name_len == 0) { - /* FIXME(bnoordhuis) Retain GetLastError(). */ - err = UV_EAI_SYSTEM; + req->retcode = uv_translate_sys_error(GetLastError()); goto complete; } addrinfo_len += ALIGNED_SIZE(name_len); @@ -179,11 +197,8 @@ } } } else { - err = UV_EAI_MEMORY; + req->retcode = UV_EAI_MEMORY; } - } else { - /* GetAddrInfo failed */ - err = uv__getaddrinfo_translate_error(req->retcode); } /* return memory to system */ @@ -193,10 +208,10 @@ } complete: - uv__req_unregister(loop, req); + uv__req_unregister(req->loop, req); /* finally do callback with converted result */ - req->getaddrinfo_cb(req, err, (struct addrinfo*)alloc_ptr); + req->getaddrinfo_cb(req, req->retcode, (struct addrinfo*)alloc_ptr); } @@ -247,6 +262,7 @@ req->res = NULL; req->type = UV_GETADDRINFO; req->loop = loop; + req->retcode = 0; /* calculate required memory size for all input values */ if (node != NULL) { @@ -280,7 +296,7 @@ req->alloc = (void*)alloc_ptr; /* convert node string to UTF16 into allocated memory and save pointer in */ - /* the reques. */ + /* the request. */ if (node != NULL) { req->node = (WCHAR*)alloc_ptr; if (uv_utf8_to_utf16(node, @@ -324,13 +340,10 @@ req->hints = NULL; } - /* Ask thread to run. Treat this as a long operation */ - if (QueueUserWorkItem(&getaddrinfo_thread_proc, - req, - WT_EXECUTELONGFUNCTION) == 0) { - err = GetLastError(); - goto error; - } + uv__work_submit(loop, + &req->work_req, + uv__getaddrinfo_work, + uv__getaddrinfo_done); uv__req_register(loop, req); diff -Nru nodejs-0.11.13/deps/uv/src/win/getnameinfo.c nodejs-0.11.15/deps/uv/src/win/getnameinfo.c --- nodejs-0.11.13/deps/uv/src/win/getnameinfo.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/getnameinfo.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,145 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), to +* deal in the Software without restriction, including without limitation the +* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +* sell copies of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +* IN THE SOFTWARE. +*/ + +#include <assert.h> +#include <malloc.h> +#include <stdio.h> + +#include "uv.h" +#include "internal.h" +#include "req-inl.h" + +#ifndef GetNameInfo +int WSAAPI GetNameInfoW( + const SOCKADDR *pSockaddr, + socklen_t SockaddrLength, + PWCHAR pNodeBuffer, + DWORD NodeBufferSize, + PWCHAR pServiceBuffer, + DWORD ServiceBufferSize, + INT Flags +); +#endif + +static void uv__getnameinfo_work(struct uv__work* w) { + uv_getnameinfo_t* req; + WCHAR host[NI_MAXHOST]; + WCHAR service[NI_MAXSERV]; + int ret = 0; + + req = container_of(w, uv_getnameinfo_t, work_req); + if (GetNameInfoW((struct sockaddr*)&req->storage, + sizeof(req->storage), + host, + ARRAY_SIZE(host), + service, + ARRAY_SIZE(service), + req->flags)) { + ret = WSAGetLastError(); + } + req->retcode = uv__getaddrinfo_translate_error(ret); + + /* convert results to UTF-8 */ + WideCharToMultiByte(CP_UTF8, + 0, + host, + -1, + req->host, + sizeof(req->host), + NULL, + NULL); + + WideCharToMultiByte(CP_UTF8, + 0, + service, + -1, + req->service, + sizeof(req->service), + NULL, + NULL); +} + + +/* +* Called from uv_run when complete. +*/ +static void uv__getnameinfo_done(struct uv__work* w, int status) { + uv_getnameinfo_t* req; + char* host; + char* service; + + req = container_of(w, uv_getnameinfo_t, work_req); + uv__req_unregister(req->loop, req); + host = service = NULL; + + if (status == UV_ECANCELED) { + assert(req->retcode == 0); + req->retcode = UV_EAI_CANCELED; + } else if (req->retcode == 0) { + host = req->host; + service = req->service; + } + + req->getnameinfo_cb(req, req->retcode, host, service); +} + + +/* +* Entry point for getnameinfo +* return 0 if a callback will be made +* return error code if validation fails +*/ +int uv_getnameinfo(uv_loop_t* loop, + uv_getnameinfo_t* req, + uv_getnameinfo_cb getnameinfo_cb, + const struct sockaddr* addr, + int flags) { + if (req == NULL || getnameinfo_cb == NULL || addr == NULL) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) { + memcpy(&req->storage, + addr, + sizeof(struct sockaddr_in)); + } else if (addr->sa_family == AF_INET6) { + memcpy(&req->storage, + addr, + sizeof(struct sockaddr_in6)); + } else { + return UV_EINVAL; + } + + uv_req_init(loop, (uv_req_t*)req); + uv__req_register(loop, req); + + req->getnameinfo_cb = getnameinfo_cb; + req->flags = flags; + req->type = UV_GETNAMEINFO; + req->loop = loop; + req->retcode = 0; + + uv__work_submit(loop, + &req->work_req, + uv__getnameinfo_work, + uv__getnameinfo_done); + + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/src/win/handle-inl.h nodejs-0.11.15/deps/uv/src/win/handle-inl.h --- nodejs-0.11.13/deps/uv/src/win/handle-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/handle-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -168,7 +168,7 @@ /* But it also correctly checks the FD and returns INVALID_HANDLE_VALUE */ /* for invalid FDs in release builds (or if you let the assert continue). */ /* So this wrapper function disables asserts when calling _get_osfhandle. */ - + HANDLE handle; UV_BEGIN_DISABLE_CRT_ASSERT(); handle = (HANDLE) _get_osfhandle(fd); diff -Nru nodejs-0.11.13/deps/uv/src/win/internal.h nodejs-0.11.15/deps/uv/src/win/internal.h --- nodejs-0.11.13/deps/uv/src/win/internal.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/internal.h 2015-01-20 21:22:17.000000000 +0000 @@ -31,19 +31,22 @@ #ifdef _MSC_VER # define INLINE __inline +# define UV_THREAD_LOCAL __declspec( thread ) #else # define INLINE inline +# define UV_THREAD_LOCAL __thread #endif #ifdef _DEBUG -extern __declspec( thread ) int uv__crt_assert_enabled; + +extern UV_THREAD_LOCAL int uv__crt_assert_enabled; #define UV_BEGIN_DISABLE_CRT_ASSERT() \ { \ int uv__saved_crt_assert_enabled = uv__crt_assert_enabled; \ uv__crt_assert_enabled = FALSE; - + #define UV_END_DISABLE_CRT_ASSERT() \ uv__crt_assert_enabled = uv__saved_crt_assert_enabled; \ @@ -62,7 +65,6 @@ /* Used by all handles. */ #define UV_HANDLE_CLOSED 0x00000002 #define UV_HANDLE_ENDGAME_QUEUED 0x00000004 -#define UV_HANDLE_ACTIVE 0x00000010 /* uv-common.h: #define UV__HANDLE_CLOSING 0x00000001 */ /* uv-common.h: #define UV__HANDLE_ACTIVE 0x00000040 */ @@ -72,7 +74,6 @@ /* Used by streams and UDP handles. */ #define UV_HANDLE_READING 0x00000100 #define UV_HANDLE_BOUND 0x00000200 -#define UV_HANDLE_BIND_ERROR 0x00000400 #define UV_HANDLE_LISTENING 0x00000800 #define UV_HANDLE_CONNECTION 0x00001000 #define UV_HANDLE_CONNECTED 0x00002000 @@ -98,6 +99,7 @@ /* Only used by uv_pipe_t handles. */ #define UV_HANDLE_NON_OVERLAPPED_PIPE 0x01000000 #define UV_HANDLE_PIPESERVER 0x02000000 +#define UV_HANDLE_PIPE_READ_CANCELABLE 0x04000000 /* Only used by uv_tty_t handles. */ #define UV_HANDLE_TTY_READABLE 0x01000000 @@ -122,6 +124,12 @@ /* * TCP */ + +typedef struct { + WSAPROTOCOL_INFOW socket_info; + int delayed_error; +} uv__ipc_socket_info_ex; + int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb); int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client); int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb, @@ -140,7 +148,7 @@ void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp); void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle); -int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info, +int uv_tcp_import(uv_tcp_t* tcp, uv__ipc_socket_info_ex* socket_info_ex, int tcp_connection); int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid, @@ -173,6 +181,9 @@ int uv_pipe_write2(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle, const uv_buf_t bufs[], unsigned int nbufs, uv_stream_t* send_handle, uv_write_cb cb); +void uv__pipe_pause_read(uv_pipe_t* handle); +void uv__pipe_unpause_read(uv_pipe_t* handle); +void uv__pipe_stop_read(uv_pipe_t* handle); void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle, uv_req_t* req); @@ -231,7 +242,7 @@ */ void uv_timer_endgame(uv_loop_t* loop, uv_timer_t* handle); -DWORD uv_get_poll_timeout(uv_loop_t* loop); +DWORD uv__next_timeout(const uv_loop_t* loop); void uv__time_forward(uv_loop_t* loop, uint64_t msecs); void uv_process_timers(uv_loop_t* loop); @@ -286,22 +297,9 @@ /* - * Getaddrinfo - */ -void uv_process_getaddrinfo_req(uv_loop_t* loop, uv_getaddrinfo_t* req); - - -/* * FS */ void uv_fs_init(); -void uv_process_fs_req(uv_loop_t* loop, uv_fs_t* req); - - -/* - * Threadpool - */ -void uv_process_work_req(uv_loop_t* loop, uv_work_t* req); /* @@ -324,6 +322,7 @@ */ void uv__util_init(); +uint64_t uv__hrtime(double scale); int uv_parent_pid(); __declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall); diff -Nru nodejs-0.11.13/deps/uv/src/win/loop-watcher.c nodejs-0.11.15/deps/uv/src/win/loop-watcher.c --- nodejs-0.11.13/deps/uv/src/win/loop-watcher.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/loop-watcher.c 2015-01-20 21:22:17.000000000 +0000 @@ -49,7 +49,7 @@ \ assert(handle->type == UV_##NAME); \ \ - if (handle->flags & UV_HANDLE_ACTIVE) \ + if (uv__is_active(handle)) \ return 0; \ \ if (cb == NULL) \ @@ -67,7 +67,6 @@ loop->name##_handles = handle; \ \ handle->name##_cb = cb; \ - handle->flags |= UV_HANDLE_ACTIVE; \ uv__handle_start(handle); \ \ return 0; \ @@ -79,7 +78,7 @@ \ assert(handle->type == UV_##NAME); \ \ - if (!(handle->flags & UV_HANDLE_ACTIVE)) \ + if (!uv__is_active(handle)) \ return 0; \ \ /* Update loop head if needed */ \ @@ -99,7 +98,6 @@ handle->name##_next->name##_prev = handle->name##_prev; \ } \ \ - handle->flags &= ~UV_HANDLE_ACTIVE; \ uv__handle_stop(handle); \ \ return 0; \ diff -Nru nodejs-0.11.13/deps/uv/src/win/pipe.c nodejs-0.11.15/deps/uv/src/win/pipe.c --- nodejs-0.11.13/deps/uv/src/win/pipe.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/pipe.c 2015-01-20 21:22:17.000000000 +0000 @@ -35,10 +35,10 @@ struct uv__ipc_queue_item_s { /* - * NOTE: It is important for socket_info to be the first field, + * NOTE: It is important for socket_info_ex to be the first field, * because we will we assigning it to the pending_ipc_info.socket_info */ - WSAPROTOCOL_INFOW socket_info; + uv__ipc_socket_info_ex socket_info_ex; QUEUE member; int tcp_connection; }; @@ -73,7 +73,7 @@ /* IPC frame, which contains an imported TCP socket stream. */ typedef struct { uv_ipc_frame_header_t header; - WSAPROTOCOL_INFOW socket_info; + uv__ipc_socket_info_ex socket_info_ex; } uv_ipc_frame_uv_stream; static void eof_timer_init(uv_pipe_t* pipe); @@ -101,6 +101,7 @@ handle->pending_ipc_info.queue_len = 0; handle->ipc = ipc; handle->non_overlapped_writes_tail = NULL; + handle->readfile_thread = NULL; uv_req_init(loop, (uv_req_t*) &handle->ipc_header_write_req); @@ -112,10 +113,16 @@ uv_connection_init((uv_stream_t*) handle); handle->read_req.data = handle; handle->eof_timer = NULL; + assert(!(handle->flags & UV_HANDLE_PIPESERVER)); + if (pCancelSynchronousIo && + handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) { + uv_mutex_init(&handle->readfile_mutex); + handle->flags |= UV_HANDLE_PIPE_READ_CANCELABLE; + } } -static HANDLE open_named_pipe(WCHAR* name, DWORD* duplex_flags) { +static HANDLE open_named_pipe(const WCHAR* name, DWORD* duplex_flags) { HANDLE pipeHandle; /* @@ -230,7 +237,7 @@ NTSTATUS nt_status; IO_STATUS_BLOCK io_status; FILE_MODE_INFORMATION mode_info; - DWORD mode = PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT; + DWORD mode = PIPE_READMODE_BYTE | PIPE_WAIT; DWORD current_mode = 0; DWORD err = 0; @@ -246,11 +253,9 @@ if (!GetNamedPipeHandleState(pipeHandle, ¤t_mode, NULL, NULL, NULL, NULL, 0)) { return -1; - } else if (current_mode != mode) { + } else if (current_mode & PIPE_NOWAIT) { SetLastError(ERROR_ACCESS_DENIED); return -1; - } else { - duplex_flags &= ~UV_HANDLE_WRITABLE; } } else { /* If this returns ERROR_INVALID_PARAMETER we probably opened @@ -323,6 +328,11 @@ FILE_PIPE_LOCAL_INFORMATION pipe_info; uv__ipc_queue_item_t* item; + if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) { + handle->flags &= ~UV_HANDLE_PIPE_READ_CANCELABLE; + uv_mutex_destroy(&handle->readfile_mutex); + } + if ((handle->flags & UV_HANDLE_CONNECTION) && handle->shutdown_req != NULL && handle->write_reqs_pending == 0) { @@ -410,7 +420,7 @@ socket = WSASocketW(FROM_PROTOCOL_INFO, FROM_PROTOCOL_INFO, FROM_PROTOCOL_INFO, - &item->socket_info, + &item->socket_info_ex.socket_info, 0, WSA_FLAG_OVERLAPPED); free(item); @@ -660,12 +670,49 @@ } +void uv__pipe_pause_read(uv_pipe_t* handle) { + if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) { + /* Pause the ReadFile task briefly, to work + around the Windows kernel bug that causes + any access to a NamedPipe to deadlock if + any process has called ReadFile */ + HANDLE h; + uv_mutex_lock(&handle->readfile_mutex); + h = handle->readfile_thread; + while (h) { + /* spinlock: we expect this to finish quickly, + or we are probably about to deadlock anyways + (in the kernel), so it doesn't matter */ + pCancelSynchronousIo(h); + SwitchToThread(); /* yield thread control briefly */ + h = handle->readfile_thread; + } + } +} + + +void uv__pipe_unpause_read(uv_pipe_t* handle) { + if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) { + uv_mutex_unlock(&handle->readfile_mutex); + } +} + + +void uv__pipe_stop_read(uv_pipe_t* handle) { + handle->flags &= ~UV_HANDLE_READING; + uv__pipe_pause_read((uv_pipe_t*)handle); + uv__pipe_unpause_read((uv_pipe_t*)handle); +} + + /* Cleans up uv_pipe_t (server or connection) and all resources associated */ /* with it. */ void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) { int i; HANDLE pipeHandle; + uv__pipe_stop_read(handle); + if (handle->name) { free(handle->name); handle->name = NULL; @@ -691,6 +738,7 @@ CloseHandle(handle->handle); handle->handle = INVALID_HANDLE_VALUE; } + } @@ -789,7 +837,7 @@ item = QUEUE_DATA(q, uv__ipc_queue_item_t, member); err = uv_tcp_import((uv_tcp_t*)client, - &item->socket_info, + &item->socket_info_ex, item->tcp_connection); if (err != 0) return err; @@ -869,19 +917,61 @@ uv_read_t* req = (uv_read_t*) parameter; uv_pipe_t* handle = (uv_pipe_t*) req->data; uv_loop_t* loop = handle->loop; + HANDLE hThread = NULL; + DWORD err; + uv_mutex_t *m = &handle->readfile_mutex; assert(req != NULL); assert(req->type == UV_READ); assert(handle->type == UV_NAMED_PIPE); + if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) { + uv_mutex_lock(m); /* mutex controls *setting* of readfile_thread */ + if (DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), + GetCurrentProcess(), &hThread, + 0, TRUE, DUPLICATE_SAME_ACCESS)) { + handle->readfile_thread = hThread; + } else { + hThread = NULL; + } + uv_mutex_unlock(m); + } +restart_readfile: result = ReadFile(handle->handle, &uv_zero_, 0, &bytes, NULL); + if (!result) { + err = GetLastError(); + if (err == ERROR_OPERATION_ABORTED && + handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) { + if (handle->flags & UV_HANDLE_READING) { + /* just a brief break to do something else */ + handle->readfile_thread = NULL; + /* resume after it is finished */ + uv_mutex_lock(m); + handle->readfile_thread = hThread; + uv_mutex_unlock(m); + goto restart_readfile; + } else { + result = 1; /* successfully stopped reading */ + } + } + } + if (hThread) { + assert(hThread == handle->readfile_thread); + /* mutex does not control clearing readfile_thread */ + handle->readfile_thread = NULL; + uv_mutex_lock(m); + /* only when we hold the mutex lock is it safe to + open or close the handle */ + CloseHandle(hThread); + uv_mutex_unlock(m); + } if (!result) { - SET_REQ_ERROR(req, GetLastError()); + SET_REQ_ERROR(req, err); } POST_COMPLETION_FOR_REQ(loop, req); @@ -1134,10 +1224,13 @@ tcp_send_handle = (uv_tcp_t*)send_handle; err = uv_tcp_duplicate_socket(tcp_send_handle, handle->ipc_pid, - &ipc_frame.socket_info); + &ipc_frame.socket_info_ex.socket_info); if (err) { return err; } + + ipc_frame.socket_info_ex.delayed_error = tcp_send_handle->delayed_error; + ipc_frame.header.flags |= UV_IPC_TCP_SERVER; if (tcp_send_handle->flags & UV_HANDLE_CONNECTION) { @@ -1254,7 +1347,7 @@ } /* Request queued by the kernel. */ - req->queued_bytes = uv_count_bufs(bufs, nbufs); + req->queued_bytes = uv__count_bufs(bufs, nbufs); handle->write_queue_size += req->queued_bytes; } else if (handle->flags & UV_HANDLE_BLOCKING_WRITES) { /* Using overlapped IO, but wait for completion before returning */ @@ -1311,7 +1404,7 @@ req->queued_bytes = 0; } else { /* Request queued by the kernel. */ - req->queued_bytes = uv_count_bufs(bufs, nbufs); + req->queued_bytes = uv__count_bufs(bufs, nbufs); handle->write_queue_size += req->queued_bytes; } @@ -1397,7 +1490,7 @@ void uv__pipe_insert_pending_socket(uv_pipe_t* handle, - WSAPROTOCOL_INFOW* info, + uv__ipc_socket_info_ex* info, int tcp_connection) { uv__ipc_queue_item_t* item; @@ -1405,7 +1498,7 @@ if (item == NULL) uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); - memcpy(&item->socket_info, info, sizeof(item->socket_info)); + memcpy(&item->socket_info_ex, info, sizeof(item->socket_info_ex)); item->tcp_connection = tcp_connection; QUEUE_INSERT_TAIL(&handle->pending_ipc_info.queue, &item->member); handle->pending_ipc_info.queue_len++; @@ -1471,11 +1564,11 @@ if (ipc_frame.header.flags & UV_IPC_TCP_SERVER) { assert(avail - sizeof(ipc_frame.header) >= - sizeof(ipc_frame.socket_info)); + sizeof(ipc_frame.socket_info_ex)); /* Read the TCP socket info. */ if (!ReadFile(handle->handle, - &ipc_frame.socket_info, + &ipc_frame.socket_info_ex, sizeof(ipc_frame) - sizeof(ipc_frame.header), &bytes, NULL)) { @@ -1489,7 +1582,7 @@ /* Store the pending socket info. */ uv__pipe_insert_pending_socket( handle, - &ipc_frame.socket_info, + &ipc_frame.socket_info_ex, ipc_frame.header.flags & UV_IPC_TCP_CONNECTION); } @@ -1772,7 +1865,34 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) { HANDLE os_handle = uv__get_osfhandle(file); - DWORD duplex_flags = UV_HANDLE_READABLE | UV_HANDLE_WRITABLE; + NTSTATUS nt_status; + IO_STATUS_BLOCK io_status; + FILE_ACCESS_INFORMATION access; + DWORD duplex_flags = 0; + + /* Determine what kind of permissions we have on this handle. + * Cygwin opens the pipe in message mode, but we can support it, + * just query the access flags and set the stream flags accordingly. + */ + nt_status = pNtQueryInformationFile(os_handle, + &io_status, + &access, + sizeof(access), + FileAccessInformation); + if (nt_status != STATUS_SUCCESS) + return UV_EINVAL; + + if (pipe->ipc) { + if (!(access.AccessFlags & FILE_WRITE_DATA) || + !(access.AccessFlags & FILE_READ_DATA)) { + return UV_EINVAL; + } + } + + if (access.AccessFlags & FILE_WRITE_DATA) + duplex_flags |= UV_HANDLE_WRITABLE; + if (access.AccessFlags & FILE_READ_DATA) + duplex_flags |= UV_HANDLE_READABLE; if (os_handle == INVALID_HANDLE_VALUE || uv_set_pipe_handle(pipe->loop, pipe, os_handle, duplex_flags) == -1) { @@ -1808,6 +1928,8 @@ return UV_EINVAL; } + uv__pipe_pause_read((uv_pipe_t*)handle); /* cast away const warning */ + nt_status = pNtQueryInformationFile(handle->handle, &io_status, &tmp_name_info, @@ -1818,7 +1940,8 @@ name_info = malloc(name_size); if (!name_info) { *len = 0; - return UV_ENOMEM; + err = UV_ENOMEM; + goto cleanup; } nt_status = pNtQueryInformationFile(handle->handle, @@ -1890,10 +2013,14 @@ buf[addrlen++] = '\0'; *len = addrlen; - return 0; + err = 0; + goto cleanup; error: free(name_info); + +cleanup: + uv__pipe_unpause_read((uv_pipe_t*)handle); /* cast away const warning */ return err; } diff -Nru nodejs-0.11.13/deps/uv/src/win/poll.c nodejs-0.11.15/deps/uv/src/win/poll.c --- nodejs-0.11.13/deps/uv/src/win/poll.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/poll.c 2015-01-20 21:22:17.000000000 +0000 @@ -79,7 +79,7 @@ handle->mask_events_2 = handle->events; } else if (handle->submitted_events_2 == 0) { req = &handle->poll_req_2; - afd_poll_info = &handle->afd_poll_info_2; + afd_poll_info = &handle->afd_poll_info_2.afd_poll_info_ptr[0]; handle->submitted_events_2 = handle->events; handle->mask_events_1 = handle->events; handle->mask_events_2 = 0; @@ -119,18 +119,19 @@ static int uv__fast_poll_cancel_poll_req(uv_loop_t* loop, uv_poll_t* handle) { - AFD_POLL_INFO afd_poll_info; - int result; + AFD_POLL_INFO* afd_poll_info; + DWORD result; - afd_poll_info.Exclusive = TRUE; - afd_poll_info.NumberOfHandles = 1; - afd_poll_info.Timeout.QuadPart = INT64_MAX; - afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket; - afd_poll_info.Handles[0].Status = 0; - afd_poll_info.Handles[0].Events = AFD_POLL_ALL; + afd_poll_info = &handle->afd_poll_info_2.afd_poll_info_ptr[1]; + afd_poll_info->Exclusive = TRUE; + afd_poll_info->NumberOfHandles = 1; + afd_poll_info->Timeout.QuadPart = INT64_MAX; + afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket; + afd_poll_info->Handles[0].Status = 0; + afd_poll_info->Handles[0].Events = AFD_POLL_ALL; result = uv_msafd_poll(handle->socket, - &afd_poll_info, + afd_poll_info, uv__get_overlapped_dummy()); if (result == SOCKET_ERROR) { @@ -154,7 +155,7 @@ handle->submitted_events_1 = 0; mask_events = handle->mask_events_1; } else if (req == &handle->poll_req_2) { - afd_poll_info = &handle->afd_poll_info_2; + afd_poll_info = &handle->afd_poll_info_2.afd_poll_info_ptr[0]; handle->submitted_events_2 = 0; mask_events = handle->mask_events_2; } else { @@ -187,7 +188,8 @@ if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) { /* Stop polling. */ handle->events = 0; - uv__handle_stop(handle); + if (uv__is_active(handle)) + uv__handle_stop(handle); } if (events != 0) { @@ -529,7 +531,7 @@ SO_PROTOCOL_INFOW, (char*) &protocol_info, &len) != 0) { - return WSAGetLastError(); + return uv_translate_sys_error(WSAGetLastError()); } /* Get the peer socket that is needed to enable fast poll. If the returned */ @@ -545,7 +547,7 @@ handle->flags |= UV_HANDLE_POLL_SLOW; } - /* Intialize 2 poll reqs. */ + /* Initialize 2 poll reqs. */ handle->submitted_events_1 = 0; uv_req_init(loop, (uv_req_t*) &(handle->poll_req_1)); handle->poll_req_1.type = UV_POLL_REQ; @@ -556,6 +558,11 @@ handle->poll_req_2.type = UV_POLL_REQ; handle->poll_req_2.data = handle; + handle->afd_poll_info_2.afd_poll_info_ptr = malloc(sizeof(*handle->afd_poll_info_2.afd_poll_info_ptr) * 2); + if (handle->afd_poll_info_2.afd_poll_info_ptr == NULL) { + return UV_ENOMEM; + } + return 0; } @@ -617,5 +624,9 @@ assert(handle->submitted_events_1 == 0); assert(handle->submitted_events_2 == 0); + if (handle->afd_poll_info_2.afd_poll_info_ptr) { + free(handle->afd_poll_info_2.afd_poll_info_ptr); + handle->afd_poll_info_2.afd_poll_info_ptr = NULL; + } uv__handle_close(handle); } diff -Nru nodejs-0.11.13/deps/uv/src/win/process.c nodejs-0.11.15/deps/uv/src/win/process.c --- nodejs-0.11.13/deps/uv/src/win/process.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/process.c 2015-01-20 21:22:17.000000000 +0000 @@ -25,6 +25,8 @@ #include <stdlib.h> #include <signal.h> #include <limits.h> +#include <malloc.h> +#include <wchar.h> #include "uv.h" #include "internal.h" @@ -36,14 +38,27 @@ typedef struct env_var { - const char* narrow; - const WCHAR* wide; - size_t len; /* including null or '=' */ - DWORD value_len; - int supplied; + const WCHAR* const wide; + const WCHAR* const wide_eq; + const size_t len; /* including null or '=' */ } env_var_t; -#define E_V(str) { str "=", L##str, sizeof(str), 0, 0 } +#define E_V(str) { L##str, L##str L"=", sizeof(str) } + +static const env_var_t required_vars[] = { /* keep me sorted */ + E_V("HOMEDRIVE"), + E_V("HOMEPATH"), + E_V("LOGONSERVER"), + E_V("PATH"), + E_V("SYSTEMDRIVE"), + E_V("SYSTEMROOT"), + E_V("TEMP"), + E_V("USERDOMAIN"), + E_V("USERNAME"), + E_V("USERPROFILE"), + E_V("WINDIR"), +}; +static size_t n_required_vars = ARRAY_SIZE(required_vars); static HANDLE uv_global_job_handle_; @@ -156,8 +171,10 @@ size_t cwd_len) { WCHAR *result, *result_pos; DWORD attrs; - - if (dir_len >= 1 && (dir[0] == L'/' || dir[0] == L'\\')) { + if (dir_len > 2 && dir[0] == L'\\' && dir[1] == L'\\') { + /* It's a UNC path so ignore cwd */ + cwd_len = 0; + } else if (dir_len >= 1 && (dir[0] == L'/' || dir[0] == L'\\')) { /* It's a full path without drive letter, use cwd's drive letter only */ cwd_len = 2; } else if (dir_len >= 2 && dir[1] == L':' && @@ -316,7 +333,11 @@ * file that is not readable/executable; if the spawn fails it will not * continue searching. * - * TODO: correctly interpret UNC paths + * UNC path support: we are dealing with UNC paths in both the path and the + * filename. This is a deviation from what cmd.exe does (it does not let you + * start a program by specifying an UNC path on the command line) but this is + * really a pointless restriction. + * */ static WCHAR* search_path(const WCHAR *file, WCHAR *cwd, @@ -430,11 +451,10 @@ int quote_hit; WCHAR* start; - /* - * Check if the string must be quoted; - * if unnecessary, don't do it, it may only confuse older programs. - */ if (len == 0) { + /* Need double quotation for empty argument */ + *(target++) = L'"'; + *(target++) = L'"'; return target; } @@ -588,25 +608,56 @@ } -/* - * If we learn that people are passing in huge environment blocks - * then we should probably qsort() the array and then bsearch() - * to see if it contains this variable. But there are ownership - * issues associated with that solution; this is the caller's - * char**, and modifying it is rude. - */ -static void check_required_vars_contains_var(env_var_t* required, int count, - const char* var) { - int i; - for (i = 0; i < count; ++i) { - if (_strnicmp(required[i].narrow, var, required[i].len) == 0) { - required[i].supplied = 1; - return; +int env_strncmp(const wchar_t* a, int na, const wchar_t* b) { + wchar_t* a_eq; + wchar_t* b_eq; + wchar_t* A; + wchar_t* B; + int nb; + int r; + + if (na < 0) { + a_eq = wcschr(a, L'='); + assert(a_eq); + na = (int)(long)(a_eq - a); + } else { + na--; + } + b_eq = wcschr(b, L'='); + assert(b_eq); + nb = b_eq - b; + + A = alloca((na+1) * sizeof(wchar_t)); + B = alloca((nb+1) * sizeof(wchar_t)); + + r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, a, na, A, na); + assert(r==na); + A[na] = L'\0'; + r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, b, nb, B, nb); + assert(r==nb); + B[nb] = L'\0'; + + while (1) { + wchar_t AA = *A++; + wchar_t BB = *B++; + if (AA < BB) { + return -1; + } else if (AA > BB) { + return 1; + } else if (!AA && !BB) { + return 0; } } } +static int qsort_wcscmp(const void *a, const void *b) { + wchar_t* astr = *(wchar_t* const*)a; + wchar_t* bstr = *(wchar_t* const*)b; + return env_strncmp(astr, -1, bstr); +} + + /* * The way windows takes environment variables is different than what C does; * Windows wants a contiguous block of null-terminated strings, terminated @@ -617,95 +668,169 @@ * TEMP. SYSTEMDRIVE is probably also important. We therefore ensure that * these get defined if the input environment block does not contain any * values for them. + * + * Also add variables known to Cygwin to be required for correct + * subprocess operation in many cases: + * https://github.com/Alexpux/Cygwin/blob/b266b04fbbd3a595f02ea149e4306d3ab9b1fe3d/winsup/cygwin/environ.cc#L955 + * */ int make_program_env(char* env_block[], WCHAR** dst_ptr) { WCHAR* dst; WCHAR* ptr; char** env; - size_t env_len = 1; /* room for closing null */ + size_t env_len = 0; int len; size_t i; DWORD var_size; + size_t env_block_count = 1; /* 1 for null-terminator */ + WCHAR* dst_copy; + WCHAR** ptr_copy; + WCHAR** env_copy; + DWORD* required_vars_value_len = alloca(n_required_vars * sizeof(DWORD*)); - env_var_t required_vars[] = { - E_V("SYSTEMROOT"), - E_V("SYSTEMDRIVE"), - E_V("TEMP"), - }; - + /* first pass: determine size in UTF-16 */ for (env = env_block; *env; env++) { int len; - check_required_vars_contains_var(required_vars, - ARRAY_SIZE(required_vars), - *env); - - len = MultiByteToWideChar(CP_UTF8, - 0, - *env, - -1, - NULL, - 0); - if (len <= 0) { - return GetLastError(); + if (strchr(*env, '=')) { + len = MultiByteToWideChar(CP_UTF8, + 0, + *env, + -1, + NULL, + 0); + if (len <= 0) { + return GetLastError(); + } + env_len += len; + env_block_count++; } + } + + /* second pass: copy to UTF-16 environment block */ + dst_copy = _malloca(env_len * sizeof(WCHAR)); + if (!dst_copy) { + return ERROR_OUTOFMEMORY; + } + env_copy = alloca(env_block_count * sizeof(WCHAR*)); - env_len += len; + ptr = dst_copy; + ptr_copy = env_copy; + for (env = env_block; *env; env++) { + if (strchr(*env, '=')) { + len = MultiByteToWideChar(CP_UTF8, + 0, + *env, + -1, + ptr, + (int) (env_len - (ptr - dst_copy))); + if (len <= 0) { + DWORD err = GetLastError(); + _freea(dst_copy); + return err; + } + *ptr_copy++ = ptr; + ptr += len; + } } + *ptr_copy = NULL; + assert(env_len == ptr - dst_copy); - for (i = 0; i < ARRAY_SIZE(required_vars); ++i) { - if (!required_vars[i].supplied) { - env_len += required_vars[i].len; + /* sort our (UTF-16) copy */ + qsort(env_copy, env_block_count-1, sizeof(wchar_t*), qsort_wcscmp); + + /* third pass: check for required variables */ + for (ptr_copy = env_copy, i = 0; i < n_required_vars; ) { + int cmp; + if (!*ptr_copy) { + cmp = -1; + } else { + cmp = env_strncmp(required_vars[i].wide_eq, + required_vars[i].len, + *ptr_copy); + } + if (cmp < 0) { + /* missing required var */ var_size = GetEnvironmentVariableW(required_vars[i].wide, NULL, 0); - if (var_size == 0) { - return GetLastError(); + required_vars_value_len[i] = var_size; + if (var_size != 0) { + env_len += required_vars[i].len; + env_len += var_size; } - required_vars[i].value_len = var_size; - env_len += var_size; + i++; + } else { + ptr_copy++; + if (cmp == 0) + i++; } } - dst = malloc(env_len * sizeof(WCHAR)); + /* final pass: copy, in sort order, and inserting required variables */ + dst = malloc((1+env_len) * sizeof(WCHAR)); if (!dst) { + _freea(dst_copy); return ERROR_OUTOFMEMORY; } - ptr = dst; - - for (env = env_block; *env; env++, ptr += len) { - len = MultiByteToWideChar(CP_UTF8, - 0, - *env, - -1, - ptr, - (int) (env_len - (ptr - dst))); - if (len <= 0) { - free(dst); - return GetLastError(); - } - } - - for (i = 0; i < ARRAY_SIZE(required_vars); ++i) { - if (!required_vars[i].supplied) { - wcscpy(ptr, required_vars[i].wide); - ptr += required_vars[i].len - 1; - *ptr++ = L'='; - var_size = GetEnvironmentVariableW(required_vars[i].wide, - ptr, - required_vars[i].value_len); - if (var_size == 0) { - uv_fatal_error(GetLastError(), "GetEnvironmentVariableW"); + for (ptr = dst, ptr_copy = env_copy, i = 0; + *ptr_copy || i < n_required_vars; + ptr += len) { + int cmp; + if (i >= n_required_vars) { + cmp = 1; + } else if (!*ptr_copy) { + cmp = -1; + } else { + cmp = env_strncmp(required_vars[i].wide_eq, + required_vars[i].len, + *ptr_copy); + } + if (cmp < 0) { + /* missing required var */ + len = required_vars_value_len[i]; + if (len) { + wcscpy(ptr, required_vars[i].wide_eq); + ptr += required_vars[i].len; + var_size = GetEnvironmentVariableW(required_vars[i].wide, + ptr, + (int) (env_len - (ptr - dst))); + if (var_size != len-1) { /* race condition? */ + uv_fatal_error(GetLastError(), "GetEnvironmentVariableW"); + } } - ptr += required_vars[i].value_len; + i++; + } else { + /* copy var from env_block */ + len = wcslen(*ptr_copy) + 1; + wmemcpy(ptr, *ptr_copy, len); + ptr_copy++; + if (cmp == 0) + i++; } } /* Terminate with an extra NULL. */ + assert(env_len == (ptr - dst)); *ptr = L'\0'; + _freea(dst_copy); *dst_ptr = dst; return 0; } +/* + * Attempt to find the value of the PATH environment variable in the child's + * preprocessed environment. + * + * If found, a pointer into `env` is returned. If not found, NULL is returned. + */ +static WCHAR* find_path(WCHAR *env) { + for (; env != NULL && *env != 0; env += wcslen(env) + 1) { + if (wcsncmp(env, L"PATH=", 5) == 0) + return &env[5]; + } + + return NULL; +} /* * Called on Windows thread-pool thread to indicate that @@ -803,7 +928,7 @@ const uv_process_options_t* options) { int i; int err = 0; - WCHAR* path = NULL; + WCHAR* path = NULL, *alloc_path = NULL; BOOL result; WCHAR* application_path = NULL, *application = NULL, *arguments = NULL, *env = NULL, *cwd = NULL; @@ -877,7 +1002,8 @@ } /* Get PATH environment variable. */ - { + path = find_path(env); + if (path == NULL) { DWORD path_len, r; path_len = GetEnvironmentVariableW(L"PATH", NULL, 0); @@ -886,11 +1012,12 @@ goto done; } - path = (WCHAR*) malloc(path_len * sizeof(WCHAR)); - if (path == NULL) { + alloc_path = (WCHAR*) malloc(path_len * sizeof(WCHAR)); + if (alloc_path == NULL) { err = ERROR_OUTOFMEMORY; goto done; } + path = alloc_path; r = GetEnvironmentVariableW(L"PATH", path, path_len); if (r == 0 || r >= path_len) { @@ -936,7 +1063,7 @@ if (options->flags & UV_PROCESS_DETACHED) { /* Note that we're not setting the CREATE_BREAKAWAY_FROM_JOB flag. That - * means that libuv might not let you create a fully deamonized process + * means that libuv might not let you create a fully daemonized process * when run under job control. However the type of job control that libuv * itself creates doesn't trickle down to subprocesses so they can still * daemonize. @@ -965,7 +1092,7 @@ /* Spawn succeeded */ /* Beyond this point, failure is reported asynchronously. */ - + process->process_handle = info.hProcess; process->pid = info.dwProcessId; @@ -1011,10 +1138,10 @@ CloseHandle(info.hThread); - assert(!err); - + assert(!err); + /* Make the handle active. It will remain active until the exit callback */ - /* iis made or the handle is closed, whichever happens first. */ + /* is made or the handle is closed, whichever happens first. */ uv__handle_start(process); /* Cleanup, whether we succeeded or failed. */ @@ -1024,7 +1151,7 @@ free(arguments); free(cwd); free(env); - free(path); + free(alloc_path); if (process->child_stdio_buffer != NULL) { /* Clean up child stdio handles. */ @@ -1050,7 +1177,7 @@ return 0; /* If the process already exited before TerminateProcess was called, */ - /* TerminateProcess will fail with ERROR_ACESS_DENIED. */ + /* TerminateProcess will fail with ERROR_ACCESS_DENIED. */ err = GetLastError(); if (err == ERROR_ACCESS_DENIED && GetExitCodeProcess(process_handle, &status) && diff -Nru nodejs-0.11.13/deps/uv/src/win/req-inl.h nodejs-0.11.15/deps/uv/src/win/req-inl.h --- nodejs-0.11.13/deps/uv/src/win/req-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/req-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -195,22 +195,10 @@ uv_process_poll_req(loop, (uv_poll_t*) req->data, req); break; - case UV_GETADDRINFO: - uv_process_getaddrinfo_req(loop, (uv_getaddrinfo_t*) req); - break; - case UV_PROCESS_EXIT: uv_process_proc_exit(loop, (uv_process_t*) req->data); break; - case UV_FS: - uv_process_fs_req(loop, (uv_fs_t*) req); - break; - - case UV_WORK: - uv_process_work_req(loop, (uv_work_t*) req); - break; - case UV_FS_EVENT_REQ: uv_process_fs_event_req(loop, req, (uv_fs_event_t*) req->data); break; diff -Nru nodejs-0.11.13/deps/uv/src/win/stream.c nodejs-0.11.15/deps/uv/src/win/stream.c --- nodejs-0.11.13/deps/uv/src/win/stream.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/stream.c 2015-01-20 21:22:17.000000000 +0000 @@ -106,7 +106,11 @@ if (handle->type == UV_TTY) { err = uv_tty_read_stop((uv_tty_t*) handle); } else { - handle->flags &= ~UV_HANDLE_READING; + if (handle->type == UV_NAMED_PIPE) { + uv__pipe_stop_read((uv_pipe_t*) handle); + } else { + handle->flags &= ~UV_HANDLE_READING; + } DECREASE_ACTIVE_COUNT(handle->loop, handle); } diff -Nru nodejs-0.11.13/deps/uv/src/win/stream-inl.h nodejs-0.11.15/deps/uv/src/win/stream-inl.h --- nodejs-0.11.13/deps/uv/src/win/stream-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/stream-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -53,15 +53,4 @@ } -INLINE static size_t uv_count_bufs(const uv_buf_t bufs[], unsigned int nbufs) { - unsigned int i; - size_t bytes; - - bytes = 0; - for (i = 0; i < nbufs; i++) - bytes += (size_t) bufs[i].len; - - return bytes; -} - #endif /* UV_WIN_STREAM_INL_H_ */ diff -Nru nodejs-0.11.13/deps/uv/src/win/tcp.c nodejs-0.11.15/deps/uv/src/win/tcp.c --- nodejs-0.11.13/deps/uv/src/win/tcp.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/tcp.c 2015-01-20 21:22:17.000000000 +0000 @@ -156,6 +156,7 @@ handle->func_acceptex = NULL; handle->func_connectex = NULL; handle->processed_accepts = 0; + handle->delayed_error = 0; return 0; } @@ -195,6 +196,7 @@ if (!(handle->flags & UV_HANDLE_TCP_SOCKET_CLOSED)) { closesocket(handle->socket); + handle->socket = INVALID_SOCKET; handle->flags |= UV_HANDLE_TCP_SOCKET_CLOSED; } @@ -235,6 +237,17 @@ } +/* Unlike on Unix, here we don't set SO_REUSEADDR, because it doesn't just + * allow binding to addresses that are in use by sockets in TIME_WAIT, it + * effectively allows 'stealing' a port which is in use by another application. + * + * SO_EXCLUSIVEADDRUSE is also not good here because it does check all sockets, + * regardless of state, so we'd get an error even if the port is in use by a + * socket in TIME_WAIT state. + * + * See issue #1360. + * + */ static int uv_tcp_try_bind(uv_tcp_t* handle, const struct sockaddr* addr, unsigned int addrlen, @@ -244,7 +257,7 @@ if (handle->socket == INVALID_SOCKET) { SOCKET sock; - + /* Cannot set IPv6-only mode on non-IPv6 socket. */ if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6) return ERROR_INVALID_PARAMETER; @@ -291,8 +304,7 @@ err = WSAGetLastError(); if (err == WSAEADDRINUSE) { /* Some errors are not to be reported until connect() or listen() */ - handle->bind_error = err; - handle->flags |= UV_HANDLE_BIND_ERROR; + handle->delayed_error = err; } else { return err; } @@ -517,8 +529,8 @@ return WSAEISCONN; } - if (handle->flags & UV_HANDLE_BIND_ERROR) { - return handle->bind_error; + if (handle->delayed_error) { + return handle->delayed_error; } if (!(handle->flags & UV_HANDLE_BOUND)) { @@ -528,6 +540,8 @@ 0); if (err) return err; + if (handle->delayed_error) + return handle->delayed_error; } if (!handle->func_acceptex) { @@ -576,7 +590,7 @@ } /* Initialize other unused requests too, because uv_tcp_endgame */ - /* doesn't know how how many requests were intialized, so it will */ + /* doesn't know how how many requests were initialized, so it will */ /* try to clean up {uv_simultaneous_server_accepts} requests. */ for (i = simultaneous_accepts; i < uv_simultaneous_server_accepts; i++) { req = &handle->accept_reqs[i]; @@ -699,8 +713,8 @@ DWORD bytes; int err; - if (handle->flags & UV_HANDLE_BIND_ERROR) { - return handle->bind_error; + if (handle->delayed_error) { + return handle->delayed_error; } if (!(handle->flags & UV_HANDLE_BOUND)) { @@ -714,6 +728,8 @@ err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0); if (err) return err; + if (handle->delayed_error) + return handle->delayed_error; } if (!handle->func_connectex) { @@ -762,8 +778,8 @@ return UV_EINVAL; } - if (handle->flags & UV_HANDLE_BIND_ERROR) { - return uv_translate_sys_error(handle->bind_error); + if (handle->delayed_error) { + return uv_translate_sys_error(handle->delayed_error); } result = getsockname(handle->socket, name, namelen); @@ -784,8 +800,8 @@ return UV_EINVAL; } - if (handle->flags & UV_HANDLE_BIND_ERROR) { - return uv_translate_sys_error(handle->bind_error); + if (handle->delayed_error) { + return uv_translate_sys_error(handle->delayed_error); } result = getpeername(handle->socket, name, namelen); @@ -810,7 +826,6 @@ req->type = UV_WRITE; req->handle = (uv_stream_t*) handle; req->cb = cb; - memset(&req->overlapped, 0, sizeof(req->overlapped)); /* Prepare the overlapped structure. */ memset(&(req->overlapped), 0, sizeof(req->overlapped)); @@ -840,7 +855,7 @@ uv_insert_pending_req(loop, (uv_req_t*) req); } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { /* Request queued by the kernel. */ - req->queued_bytes = uv_count_bufs(bufs, nbufs); + req->queued_bytes = uv__count_bufs(bufs, nbufs); handle->reqs_pending++; handle->write_reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); @@ -1010,8 +1025,12 @@ } if (req->cb) { - err = GET_REQ_SOCK_ERROR(req); - req->cb(req, uv_translate_sys_error(err)); + err = uv_translate_sys_error(GET_REQ_SOCK_ERROR(req)); + if (err == UV_ECONNABORTED) { + /* use UV_ECANCELED for consistency with Unix */ + err = UV_ECANCELED; + } + req->cb(req, err); } handle->write_reqs_pending--; @@ -1103,14 +1122,13 @@ } -int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info, +int uv_tcp_import(uv_tcp_t* tcp, uv__ipc_socket_info_ex* socket_info_ex, int tcp_connection) { int err; - SOCKET socket = WSASocketW(FROM_PROTOCOL_INFO, FROM_PROTOCOL_INFO, FROM_PROTOCOL_INFO, - socket_protocol_info, + &socket_info_ex->socket_info, 0, WSA_FLAG_OVERLAPPED); @@ -1127,7 +1145,7 @@ err = uv_tcp_set_socket(tcp->loop, tcp, socket, - socket_protocol_info->iAddressFamily, + socket_info_ex->socket_info.iAddressFamily, 1); if (err) { closesocket(socket); @@ -1142,6 +1160,8 @@ tcp->flags |= UV_HANDLE_BOUND; tcp->flags |= UV_HANDLE_SHARED_TCP_SOCKET; + tcp->delayed_error = socket_info_ex->delayed_error; + tcp->loop->active_tcp_streams++; return 0; } @@ -1202,13 +1222,10 @@ return ERROR_INVALID_PARAMETER; } - /* Report any deferred bind errors now. */ - if (handle->flags & UV_HANDLE_BIND_ERROR) { - return handle->bind_error; - } - - if (listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) { - return WSAGetLastError(); + if (!(handle->delayed_error)) { + if (listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) { + handle->delayed_error = WSAGetLastError(); + } } } } @@ -1325,7 +1342,7 @@ if (uv_tcp_try_cancel_io(tcp) != 0) { /* When cancellation is not possible, there is another option: we can */ /* close the incoming sockets, which will also cancel the accept */ - /* operations. However this is not cool because we might inadvertedly */ + /* operations. However this is not cool because we might inadvertently */ /* close a socket that just accepted a new connection, which will */ /* cause the connection to be aborted. */ unsigned int i; @@ -1352,6 +1369,7 @@ if (close_socket) { closesocket(tcp->socket); + tcp->socket = INVALID_SOCKET; tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED; } diff -Nru nodejs-0.11.13/deps/uv/src/win/thread.c nodejs-0.11.15/deps/uv/src/win/thread.c --- nodejs-0.11.13/deps/uv/src/win/thread.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/thread.c 2015-01-20 21:22:17.000000000 +0000 @@ -100,7 +100,7 @@ } else { /* We lost the race. Destroy the event we created and wait for the */ - /* existing one todv become signaled. */ + /* existing one to become signaled. */ CloseHandle(created_event); result = WaitForSingleObject(existing_event, INFINITE); assert(result == WAIT_OBJECT_0); @@ -117,6 +117,68 @@ uv__once_inner(guard, callback); } +static UV_THREAD_LOCAL uv_thread_t uv__current_thread = NULL; + +struct thread_ctx { + void (*entry)(void* arg); + void* arg; + uv_thread_t self; +}; + + +static UINT __stdcall uv__thread_start(void* arg) +{ + struct thread_ctx *ctx_p; + struct thread_ctx ctx; + + ctx_p = arg; + ctx = *ctx_p; + free(ctx_p); + + uv__current_thread = ctx.self; + ctx.entry(ctx.arg); + + return 0; +} + + +int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { + struct thread_ctx* ctx; + int err; + HANDLE thread; + + ctx = malloc(sizeof(*ctx)); + if (ctx == NULL) + return UV_ENOMEM; + + ctx->entry = entry; + ctx->arg = arg; + + /* Create the thread in suspended state so we have a chance to pass + * its own creation handle to it */ + thread = (HANDLE) _beginthreadex(NULL, + 0, + uv__thread_start, + ctx, + CREATE_SUSPENDED, + NULL); + if (thread == NULL) { + err = errno; + free(ctx); + } else { + err = 0; + *tid = thread; + ctx->self = thread; + ResumeThread(thread); + } + + return err; +} + + +uv_thread_t uv_thread_self(void) { + return uv__current_thread; +} int uv_thread_join(uv_thread_t *tid) { if (WaitForSingleObject(*tid, INFINITE)) @@ -129,6 +191,11 @@ } +int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) { + return *t1 == *t2; +} + + int uv_mutex_init(uv_mutex_t* mutex) { InitializeCriticalSection(mutex); return 0; @@ -660,7 +727,9 @@ } -void uv_barrier_wait(uv_barrier_t* barrier) { +int uv_barrier_wait(uv_barrier_t* barrier) { + int serial_thread; + uv_mutex_lock(&barrier->mutex); if (++barrier->count == barrier->n) { uv_sem_wait(&barrier->turnstile2); @@ -672,7 +741,8 @@ uv_sem_post(&barrier->turnstile1); uv_mutex_lock(&barrier->mutex); - if (--barrier->count == 0) { + serial_thread = (--barrier->count == 0); + if (serial_thread) { uv_sem_wait(&barrier->turnstile1); uv_sem_post(&barrier->turnstile2); } @@ -680,6 +750,7 @@ uv_sem_wait(&barrier->turnstile2); uv_sem_post(&barrier->turnstile2); + return serial_thread; } diff -Nru nodejs-0.11.13/deps/uv/src/win/threadpool.c nodejs-0.11.15/deps/uv/src/win/threadpool.c --- nodejs-0.11.13/deps/uv/src/win/threadpool.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/threadpool.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#include <assert.h> - -#include "uv.h" -#include "internal.h" -#include "req-inl.h" - - -static void uv_work_req_init(uv_loop_t* loop, uv_work_t* req, - uv_work_cb work_cb, uv_after_work_cb after_work_cb) { - uv_req_init(loop, (uv_req_t*) req); - req->type = UV_WORK; - req->loop = loop; - req->work_cb = work_cb; - req->after_work_cb = after_work_cb; - memset(&req->overlapped, 0, sizeof(req->overlapped)); -} - - -static DWORD WINAPI uv_work_thread_proc(void* parameter) { - uv_work_t* req = (uv_work_t*)parameter; - uv_loop_t* loop = req->loop; - - assert(req != NULL); - assert(req->type == UV_WORK); - assert(req->work_cb); - - req->work_cb(req); - - POST_COMPLETION_FOR_REQ(loop, req); - - return 0; -} - - -int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb, - uv_after_work_cb after_work_cb) { - if (work_cb == NULL) - return UV_EINVAL; - - uv_work_req_init(loop, req, work_cb, after_work_cb); - - if (!QueueUserWorkItem(&uv_work_thread_proc, req, WT_EXECUTELONGFUNCTION)) { - return uv_translate_sys_error(GetLastError()); - } - - uv__req_register(loop, req); - return 0; -} - - -int uv_cancel(uv_req_t* req) { - return UV_ENOSYS; -} - - -void uv_process_work_req(uv_loop_t* loop, uv_work_t* req) { - uv__req_unregister(loop, req); - if(req->after_work_cb) - req->after_work_cb(req, 0); -} diff -Nru nodejs-0.11.13/deps/uv/src/win/timer.c nodejs-0.11.15/deps/uv/src/win/timer.c --- nodejs-0.11.13/deps/uv/src/win/timer.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/timer.c 2015-01-20 21:22:17.000000000 +0000 @@ -28,37 +28,17 @@ #include "handle-inl.h" -void uv_update_time(uv_loop_t* loop) { - DWORD ticks; - ULARGE_INTEGER time; - - ticks = GetTickCount(); +/* The number of milliseconds in one second. */ +#define UV__MILLISEC 1000 - time.QuadPart = loop->time; - /* GetTickCount() can conceivably wrap around, so when the current tick */ - /* count is lower than the last tick count, we'll assume it has wrapped. */ - /* uv_poll must make sure that the timer can never overflow more than */ - /* once between two subsequent uv_update_time calls. */ - time.LowPart = ticks; - if (ticks < loop->last_tick_count) - time.HighPart++; - - /* Remember the last tick count. */ - loop->last_tick_count = ticks; - - /* The GetTickCount() resolution isn't too good. Sometimes it'll happen */ - /* that GetQueuedCompletionStatus() or GetQueuedCompletionStatusEx() has */ - /* waited for a couple of ms but this is not reflected in the GetTickCount */ - /* result yet. Therefore whenever GetQueuedCompletionStatus times out */ - /* we'll add the number of ms that it has waited to the current loop time. */ - /* When that happened the loop time might be a little ms farther than what */ - /* we've just computed, and we shouldn't update the loop time. */ - if (loop->time < time.QuadPart) - loop->time = time.QuadPart; +void uv_update_time(uv_loop_t* loop) { + uint64_t new_time = uv__hrtime(UV__MILLISEC); + if (new_time > loop->time) { + loop->time = new_time; + } } - void uv__time_forward(uv_loop_t* loop, uint64_t msecs) { loop->time += msecs; } @@ -117,14 +97,15 @@ uv_loop_t* loop = handle->loop; uv_timer_t* old; - if (handle->flags & UV_HANDLE_ACTIVE) { - RB_REMOVE(uv_timer_tree_s, &loop->timers, handle); - } + if (timer_cb == NULL) + return UV_EINVAL; + + if (uv__is_active(handle)) + uv_timer_stop(handle); handle->timer_cb = timer_cb; handle->due = get_clamped_due_time(loop->time, timeout); handle->repeat = repeat; - handle->flags |= UV_HANDLE_ACTIVE; uv__handle_start(handle); /* start_id is the second index to be compared in uv__timer_cmp() */ @@ -140,12 +121,10 @@ int uv_timer_stop(uv_timer_t* handle) { uv_loop_t* loop = handle->loop; - if (!(handle->flags & UV_HANDLE_ACTIVE)) + if (!uv__is_active(handle)) return 0; RB_REMOVE(uv_timer_tree_s, &loop->timers, handle); - - handle->flags &= ~UV_HANDLE_ACTIVE; uv__handle_stop(handle); return 0; @@ -153,28 +132,14 @@ int uv_timer_again(uv_timer_t* handle) { - uv_loop_t* loop = handle->loop; - /* If timer_cb is NULL that means that the timer was never started. */ if (!handle->timer_cb) { return UV_EINVAL; } - if (handle->flags & UV_HANDLE_ACTIVE) { - RB_REMOVE(uv_timer_tree_s, &loop->timers, handle); - handle->flags &= ~UV_HANDLE_ACTIVE; - uv__handle_stop(handle); - } - if (handle->repeat) { - handle->due = get_clamped_due_time(loop->time, handle->repeat); - - if (RB_INSERT(uv_timer_tree_s, &loop->timers, handle) != NULL) { - uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT"); - } - - handle->flags |= UV_HANDLE_ACTIVE; - uv__handle_start(handle); + uv_timer_stop(handle); + uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat); } return 0; @@ -193,25 +158,20 @@ } -DWORD uv_get_poll_timeout(uv_loop_t* loop) { +DWORD uv__next_timeout(const uv_loop_t* loop) { uv_timer_t* timer; int64_t delta; - /* Check if there are any running timers */ - timer = RB_MIN(uv_timer_tree_s, &loop->timers); + /* Check if there are any running timers + * Need to cast away const first, since RB_MIN doesn't know what we are + * going to do with this return value, it can't be marked const + */ + timer = RB_MIN(uv_timer_tree_s, &((uv_loop_t*)loop)->timers); if (timer) { - uv_update_time(loop); - delta = timer->due - loop->time; - if (delta >= UINT_MAX >> 1) { - /* A timeout value of UINT_MAX means infinite, so that's no good. But */ - /* more importantly, there's always the risk that GetTickCount wraps. */ - /* uv_update_time can detect this, but we must make sure that the */ - /* tick counter never overflows twice between two subsequent */ - /* uv_update_time calls. We do this by never sleeping more than half */ - /* the time it takes to wrap the counter - which is huge overkill, */ - /* but hey, it's not so bad to wake up every 25 days. */ - return UINT_MAX >> 1; + if (delta >= UINT_MAX - 1) { + /* A timeout value of UINT_MAX means infinite, so that's no good. */ + return UINT_MAX - 1; } else if (delta < 0) { /* Negative timeout values are not allowed */ return 0; @@ -232,23 +192,9 @@ for (timer = RB_MIN(uv_timer_tree_s, &loop->timers); timer != NULL && timer->due <= loop->time; timer = RB_MIN(uv_timer_tree_s, &loop->timers)) { - RB_REMOVE(uv_timer_tree_s, &loop->timers, timer); - - if (timer->repeat != 0) { - /* If it is a repeating timer, reschedule with repeat timeout. */ - timer->due = get_clamped_due_time(timer->due, timer->repeat); - if (timer->due < loop->time) { - timer->due = loop->time; - } - if (RB_INSERT(uv_timer_tree_s, &loop->timers, timer) != NULL) { - uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT"); - } - } else { - /* If non-repeating, mark the timer as inactive. */ - timer->flags &= ~UV_HANDLE_ACTIVE; - uv__handle_stop(timer); - } + uv_timer_stop(timer); + uv_timer_again(timer); timer->timer_cb((uv_timer_t*) timer); } } diff -Nru nodejs-0.11.13/deps/uv/src/win/tty.c nodejs-0.11.15/deps/uv/src/win/tty.c --- nodejs-0.11.13/deps/uv/src/win/tty.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/tty.c 2015-01-20 21:22:17.000000000 +0000 @@ -30,6 +30,10 @@ # include <stdint.h> #endif +#ifndef COMMON_LVB_REVERSE_VIDEO +# define COMMON_LVB_REVERSE_VIDEO 0x4000 +#endif + #include "uv.h" #include "internal.h" #include "handle-inl.h" @@ -48,6 +52,8 @@ #define ANSI_IN_STRING 0x40 #define ANSI_BACKSLASH_SEEN 0x80 +#define MAX_INPUT_BUFFER_LENGTH 8192 + static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info); @@ -303,6 +309,8 @@ uv_tty_t* handle; uv_req_t* req; DWORD bytes, read_bytes; + WCHAR utf16[MAX_INPUT_BUFFER_LENGTH / 3]; + DWORD chars, read_chars; assert(data); @@ -314,18 +322,29 @@ assert(handle->read_line_buffer.len > 0); /* ReadConsole can't handle big buffers. */ - if (handle->read_line_buffer.len < 8192) { + if (handle->read_line_buffer.len < MAX_INPUT_BUFFER_LENGTH) { bytes = handle->read_line_buffer.len; } else { - bytes = 8192; + bytes = MAX_INPUT_BUFFER_LENGTH; } - /* Todo: Unicode */ - if (ReadConsoleA(handle->read_line_handle, - (void*) handle->read_line_buffer.base, - bytes, - &read_bytes, + /* At last, unicode! */ + /* One utf-16 codeunit never takes more than 3 utf-8 codeunits to encode */ + chars = bytes / 3; + + if (ReadConsoleW(handle->read_line_handle, + (void*) utf16, + chars, + &read_chars, NULL)) { + read_bytes = WideCharToMultiByte(CP_UTF8, + 0, + utf16, + read_chars, + handle->read_line_buffer.base, + bytes, + NULL, + NULL); SET_REQ_SUCCESS(req); req->overlapped.InternalHigh = read_bytes; } else { @@ -1117,6 +1136,14 @@ return 0; } +#define FLIP_FGBG \ + do { \ + WORD fg = info.wAttributes & 0xF; \ + WORD bg = info.wAttributes & 0xF0; \ + info.wAttributes &= 0xFF00; \ + info.wAttributes |= fg << 4; \ + info.wAttributes |= bg >> 4; \ + } while (0) static int uv_tty_set_style(uv_tty_t* handle, DWORD* error) { unsigned short argc = handle->ansi_csi_argc; @@ -1126,6 +1153,7 @@ char fg_color = -1, bg_color = -1; char fg_bright = -1, bg_bright = -1; + char inverse = -1; if (argc == 0) { /* Reset mode */ @@ -1133,6 +1161,7 @@ bg_color = 0; fg_bright = 0; bg_bright = 0; + inverse = 0; } for (i = 0; i < argc; i++) { @@ -1144,6 +1173,7 @@ bg_color = 0; fg_bright = 0; bg_bright = 0; + inverse = 0; } else if (arg == 1) { /* Foreground bright on */ @@ -1158,6 +1188,10 @@ /* Background bright on */ bg_bright = 1; + } else if (arg == 7) { + /* Inverse: on */ + inverse = 1; + } else if (arg == 21 || arg == 22) { /* Foreground bright off */ fg_bright = 0; @@ -1166,6 +1200,10 @@ /* Background bright off */ bg_bright = 0; + } else if (arg == 27) { + /* Inverse: off */ + inverse = 0; + } else if (arg >= 30 && arg <= 37) { /* Set foreground color */ fg_color = arg - 30; @@ -1198,7 +1236,7 @@ } if (fg_color == -1 && bg_color == -1 && fg_bright == -1 && - bg_bright == -1) { + bg_bright == -1 && inverse == -1) { /* Nothing changed */ return 0; } @@ -1208,6 +1246,10 @@ return -1; } + if ((info.wAttributes & COMMON_LVB_REVERSE_VIDEO) > 0) { + FLIP_FGBG; + } + if (fg_color != -1) { info.wAttributes &= ~(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE); if (fg_color & 1) info.wAttributes |= FOREGROUND_RED; @@ -1238,6 +1280,18 @@ } } + if (inverse != -1) { + if (inverse) { + info.wAttributes |= COMMON_LVB_REVERSE_VIDEO; + } else { + info.wAttributes &= ~COMMON_LVB_REVERSE_VIDEO; + } + } + + if ((info.wAttributes & COMMON_LVB_REVERSE_VIDEO) > 0) { + FLIP_FGBG; + } + if (!SetConsoleTextAttribute(handle->handle, info.wAttributes)) { *error = GetLastError(); return -1; @@ -1316,6 +1370,25 @@ return 0; } +static int uv_tty_set_cursor_visibility(uv_tty_t* handle, + BOOL visible, + DWORD* error) { + CONSOLE_CURSOR_INFO cursor_info; + + if (!GetConsoleCursorInfo(handle->handle, &cursor_info)) { + *error = GetLastError(); + return -1; + } + + cursor_info.bVisible = visible; + + if (!SetConsoleCursorInfo(handle->handle, &cursor_info)) { + *error = GetLastError(); + return -1; + } + + return 0; +} static int uv_tty_write_bufs(uv_tty_t* handle, const uv_buf_t bufs[], @@ -1527,6 +1600,13 @@ continue; } + } else if (utf8_codepoint == '?' && !(ansi_parser_state & ANSI_IN_ARG) && + handle->ansi_csi_argc == 0) { + /* Ignores '?' if it is the first character after CSI[ */ + /* This is an extension character from the VT100 codeset */ + /* that is supported and used by most ANSI terminals today. */ + continue; + } else if (utf8_codepoint >= '@' && utf8_codepoint <= '~' && (handle->ansi_csi_argc > 0 || utf8_codepoint != '[')) { int x, y, d; @@ -1629,6 +1709,24 @@ FLUSH_TEXT(); uv_tty_restore_state(handle, 0, error); break; + + case 'l': + /* Hide the cursor */ + if (handle->ansi_csi_argc == 1 && + handle->ansi_csi_argv[0] == 25) { + FLUSH_TEXT(); + uv_tty_set_cursor_visibility(handle, 0, error); + } + break; + + case 'h': + /* Show the cursor */ + if (handle->ansi_csi_argc == 1 && + handle->ansi_csi_argv[0] == 25) { + FLUSH_TEXT(); + uv_tty_set_cursor_visibility(handle, 1, error); + } + break; } /* Sequence ended - go back to normal state. */ @@ -1805,6 +1903,7 @@ if (handle->flags & UV_HANDLE_READING) uv_tty_read_stop(handle); + handle->handle = INVALID_HANDLE_VALUE; handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE); uv__handle_closing(handle); diff -Nru nodejs-0.11.13/deps/uv/src/win/udp.c nodejs-0.11.15/deps/uv/src/win/udp.c --- nodejs-0.11.13/deps/uv/src/win/udp.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/udp.c 2015-01-20 21:22:17.000000000 +0000 @@ -37,8 +37,9 @@ /* A zero-size buffer for use by uv_udp_read */ static char uv_zero_[] = ""; -int uv_udp_getsockname(uv_udp_t* handle, struct sockaddr* name, - int* namelen) { +int uv_udp_getsockname(const uv_udp_t* handle, + struct sockaddr* name, + int* namelen) { int result; if (!(handle->flags & UV_HANDLE_BOUND)) { @@ -82,7 +83,7 @@ } if (pSetFileCompletionNotificationModes) { - /* All know windowses that support SetFileCompletionNotificationModes */ + /* All known Windows that support SetFileCompletionNotificationModes */ /* have a bug that makes it impossible to use this function in */ /* conjunction with datagram sockets. We can work around that but only */ /* if the user is using the default UDP driver (AFD) and has no other */ @@ -129,6 +130,8 @@ handle->activecnt = 0; handle->func_wsarecv = WSARecv; handle->func_wsarecvfrom = WSARecvFrom; + handle->send_queue_size = 0; + handle->send_queue_count = 0; uv_req_init(loop, (uv_req_t*) &(handle->recv_req)); handle->recv_req.type = UV_UDP_RECV; @@ -141,6 +144,7 @@ void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) { uv_udp_recv_stop(handle); closesocket(handle->socket); + handle->socket = INVALID_SOCKET; uv__handle_closing(handle); @@ -396,12 +400,16 @@ /* Request completed immediately. */ req->queued_bytes = 0; handle->reqs_pending++; + handle->send_queue_size += req->queued_bytes; + handle->send_queue_count++; REGISTER_HANDLE_REQ(loop, handle, req); uv_insert_pending_req(loop, (uv_req_t*)req); } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { /* Request queued by the kernel. */ - req->queued_bytes = uv_count_bufs(bufs, nbufs); + req->queued_bytes = uv__count_bufs(bufs, nbufs); handle->reqs_pending++; + handle->send_queue_size += req->queued_bytes; + handle->send_queue_count++; REGISTER_HANDLE_REQ(loop, handle, req); } else { /* Send failed due to an error. */ @@ -498,9 +506,13 @@ } else if (err == WSAEWOULDBLOCK) { /* Kernel buffer empty */ handle->recv_cb(handle, 0, &buf, NULL, 0); - } else if (err != WSAECONNRESET && err != WSAENETRESET) { - /* Serious error. WSAECONNRESET/WSANETRESET is ignored because this */ - /* just indicates that a previous sendto operation failed. */ + } else if (err == WSAECONNRESET || err == WSAENETRESET) { + /* WSAECONNRESET/WSANETRESET is ignored because this just indicates + * that a previous sendto operation failed. + */ + handle->recv_cb(handle, 0, &buf, NULL, 0); + } else { + /* Any other error that we want to report back to the user. */ uv_udp_recv_stop(handle); handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0); } @@ -524,6 +536,11 @@ assert(handle->type == UV_UDP); + assert(handle->send_queue_size >= req->queued_bytes); + assert(handle->send_queue_count >= 1); + handle->send_queue_size -= req->queued_bytes; + handle->send_queue_count--; + UNREGISTER_HANDLE_REQ(loop, handle, req); if (req->cb) { @@ -560,7 +577,9 @@ memset(&mreq, 0, sizeof mreq); if (interface_addr) { - mreq.imr_interface.s_addr = inet_addr(interface_addr); + err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr); + if (err) + return err; } else { mreq.imr_interface.s_addr = htonl(INADDR_ANY); } @@ -662,7 +681,6 @@ int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) { - int err; struct sockaddr_storage addr_st; struct sockaddr_in* addr4; struct sockaddr_in6* addr6; @@ -687,13 +705,10 @@ return UV_EINVAL; } + if (!(handle->flags & UV_HANDLE_BOUND)) + return UV_EBADF; + if (addr_st.ss_family == AF_INET) { - err = uv_udp_maybe_bind(handle, - (const struct sockaddr*) &uv_addr_ip4_any_, - sizeof(uv_addr_ip4_any_), - UV_UDP_REUSEADDR); - if (err) - return uv_translate_sys_error(err); if (setsockopt(handle->socket, IPPROTO_IP, IP_MULTICAST_IF, @@ -702,12 +717,6 @@ return uv_translate_sys_error(WSAGetLastError()); } } else if (addr_st.ss_family == AF_INET6) { - err = uv_udp_maybe_bind(handle, - (const struct sockaddr*) &uv_addr_ip6_any_, - sizeof(uv_addr_ip6_any_), - UV_UDP_REUSEADDR); - if (err) - return uv_translate_sys_error(err); if (setsockopt(handle->socket, IPPROTO_IPV6, IPV6_MULTICAST_IF, @@ -726,15 +735,9 @@ int uv_udp_set_broadcast(uv_udp_t* handle, int value) { BOOL optval = (BOOL) value; - int err; - /* If the socket is unbound, bind to inaddr_any. */ - err = uv_udp_maybe_bind(handle, - (const struct sockaddr*) &uv_addr_ip4_any_, - sizeof(uv_addr_ip4_any_), - 0); - if (err) - return uv_translate_sys_error(err); + if (!(handle->flags & UV_HANDLE_BOUND)) + return UV_EBADF; if (setsockopt(handle->socket, SOL_SOCKET, @@ -774,19 +777,13 @@ #define SOCKOPT_SETTER(name, option4, option6, validate) \ int uv_udp_set_##name(uv_udp_t* handle, int value) { \ DWORD optval = (DWORD) value; \ - int err; \ \ if (!(validate(value))) { \ return UV_EINVAL; \ } \ \ - /* If the socket is unbound, bind to inaddr_any. */ \ - err = uv_udp_maybe_bind(handle, \ - (const struct sockaddr*) &uv_addr_ip4_any_, \ - sizeof(uv_addr_ip4_any_), \ - 0); \ - if (err) \ - return uv_translate_sys_error(err); \ + if (!(handle->flags & UV_HANDLE_BOUND)) \ + return UV_EBADF; \ \ if (!(handle->flags & UV_HANDLE_IPV6)) { \ /* Set IPv4 socket option */ \ @@ -882,3 +879,12 @@ return 0; } + + +int uv__udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen) { + return UV_ENOSYS; +} diff -Nru nodejs-0.11.13/deps/uv/src/win/util.c nodejs-0.11.15/deps/uv/src/win/util.c --- nodejs-0.11.13/deps/uv/src/win/util.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/util.c 2015-01-20 21:22:17.000000000 +0000 @@ -44,7 +44,7 @@ * of the console title is that it is smaller than 64K. However in practice * it is much smaller, and there is no way to figure out what the exact length * of the title is or can be, at least not on XP. To make it even more - * annoying, GetConsoleTitle failes when the buffer to be read into is bigger + * annoying, GetConsoleTitle fails when the buffer to be read into is bigger * than the actual maximum length. So we make a conservative guess here; * just don't put the novel you're writing in the title, unless the plot * survives truncation. @@ -52,20 +52,19 @@ #define MAX_TITLE_LENGTH 8192 /* The number of nanoseconds in one second. */ -#undef NANOSEC -#define NANOSEC 1000000000 +#define UV__NANOSEC 1000000000 /* Cached copy of the process title, plus a mutex guarding it. */ static char *process_title; static CRITICAL_SECTION process_title_lock; -/* Frequency (ticks per nanosecond) of the high-resolution clock. */ -static double hrtime_frequency_ = 0; +/* Interval (in seconds) of the high-resolution clock. */ +static double hrtime_interval_ = 0; /* - * One-time intialization code for functionality defined in util.c. + * One-time initialization code for functionality defined in util.c. */ void uv__util_init() { LARGE_INTEGER perf_frequency; @@ -73,11 +72,14 @@ /* Initialize process title access mutex. */ InitializeCriticalSection(&process_title_lock); - /* Retrieve high-resolution timer frequency. */ - if (QueryPerformanceFrequency(&perf_frequency)) - hrtime_frequency_ = (double) perf_frequency.QuadPart / (double) NANOSEC; - else - hrtime_frequency_= 0; + /* Retrieve high-resolution timer frequency + * and precompute its reciprocal. + */ + if (QueryPerformanceFrequency(&perf_frequency)) { + hrtime_interval_ = 1.0 / perf_frequency.QuadPart; + } else { + hrtime_interval_= 0; + } } @@ -204,7 +206,7 @@ if (r == 0) { return uv_translate_sys_error(GetLastError()); } else if (r > (int) *size) { - *size = r; + *size = r -1; return UV_ENOBUFS; } @@ -221,7 +223,7 @@ return uv_translate_sys_error(GetLastError()); } - *size = r; + *size = r - 1; return 0; } @@ -314,8 +316,7 @@ MEMORYSTATUSEX memory_status; memory_status.dwLength = sizeof(memory_status); - if(!GlobalMemoryStatusEx(&memory_status)) - { + if (!GlobalMemoryStatusEx(&memory_status)) { return -1; } @@ -327,8 +328,7 @@ MEMORYSTATUSEX memory_status; memory_status.dwLength = sizeof(memory_status); - if(!GlobalMemoryStatusEx(&memory_status)) - { + if (!GlobalMemoryStatusEx(&memory_status)) { return -1; } @@ -388,7 +388,7 @@ if (!length) { err = GetLastError(); goto done; - }; + } /* If the title must be truncated insert a \0 terminator there */ if (length > MAX_TITLE_LENGTH) { @@ -465,26 +465,27 @@ uint64_t uv_hrtime(void) { - LARGE_INTEGER counter; - uv__once_init(); + return uv__hrtime(UV__NANOSEC); +} - /* If the performance frequency is zero, there's no support. */ - if (hrtime_frequency_ == 0) { - /* uv__set_sys_error(loop, ERROR_NOT_SUPPORTED); */ +uint64_t uv__hrtime(double scale) { + LARGE_INTEGER counter; + + /* If the performance interval is zero, there's no support. */ + if (hrtime_interval_ == 0) { return 0; } if (!QueryPerformanceCounter(&counter)) { - /* uv__set_sys_error(loop, GetLastError()); */ return 0; } /* Because we have no guarantee about the order of magnitude of the - * performance counter frequency, integer math could cause this computation + * performance counter interval, integer math could cause this computation * to overflow. Therefore we resort to floating point math. */ - return (uint64_t) ((double) counter.QuadPart / hrtime_frequency_); + return (uint64_t) ((double) counter.QuadPart * hrtime_interval_ * scale); } @@ -777,11 +778,76 @@ } +static int is_windows_version_or_greater(DWORD os_major, + DWORD os_minor, + WORD service_pack_major, + WORD service_pack_minor) { + OSVERSIONINFOEX osvi; + DWORDLONG condition_mask = 0; + int op = VER_GREATER_EQUAL; + + /* Initialize the OSVERSIONINFOEX structure. */ + ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); + osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); + osvi.dwMajorVersion = os_major; + osvi.dwMinorVersion = os_minor; + osvi.wServicePackMajor = service_pack_major; + osvi.wServicePackMinor = service_pack_minor; + + /* Initialize the condition mask. */ + VER_SET_CONDITION(condition_mask, VER_MAJORVERSION, op); + VER_SET_CONDITION(condition_mask, VER_MINORVERSION, op); + VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMAJOR, op); + VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMINOR, op); + + /* Perform the test. */ + return (int) VerifyVersionInfo( + &osvi, + VER_MAJORVERSION | VER_MINORVERSION | + VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR, + condition_mask); +} + + +static int address_prefix_match(int family, + struct sockaddr* address, + struct sockaddr* prefix_address, + int prefix_len) { + uint8_t* address_data; + uint8_t* prefix_address_data; + int i; + + assert(address->sa_family == family); + assert(prefix_address->sa_family == family); + + if (family == AF_INET6) { + address_data = (uint8_t*) &(((struct sockaddr_in6 *) address)->sin6_addr); + prefix_address_data = + (uint8_t*) &(((struct sockaddr_in6 *) prefix_address)->sin6_addr); + } else { + address_data = (uint8_t*) &(((struct sockaddr_in *) address)->sin_addr); + prefix_address_data = + (uint8_t*) &(((struct sockaddr_in *) prefix_address)->sin_addr); + } + + for (i = 0; i < prefix_len >> 3; i++) { + if (address_data[i] != prefix_address_data[i]) + return 0; + } + + if (prefix_len % 8) + return prefix_address_data[i] == + (address_data[i] & (0xff << (8 - prefix_len % 8))); + + return 1; +} + + int uv_interface_addresses(uv_interface_address_t** addresses_ptr, int* count_ptr) { IP_ADAPTER_ADDRESSES* win_address_buf; ULONG win_address_buf_size; - IP_ADAPTER_ADDRESSES* win_address; + IP_ADAPTER_ADDRESSES* adapter; uv_interface_address_t* uv_address_buf; char* name_buf; @@ -790,6 +856,23 @@ int count; + int is_vista_or_greater; + ULONG flags; + + is_vista_or_greater = is_windows_version_or_greater(6, 0, 0, 0); + if (is_vista_or_greater) { + flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST | + GAA_FLAG_SKIP_DNS_SERVER; + } else { + /* We need at least XP SP1. */ + if (!is_windows_version_or_greater(5, 1, 1, 0)) + return UV_ENOTSUP; + + flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST | + GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_PREFIX; + } + + /* Fetch the size of the adapters reported by windows, and then get the */ /* list itself. */ win_address_buf_size = 0; @@ -802,7 +885,7 @@ /* ERROR_BUFFER_OVERFLOW, and the required buffer size will be stored in */ /* win_address_buf_size. */ r = GetAdaptersAddresses(AF_UNSPEC, - GAA_FLAG_INCLUDE_PREFIX, + flags, NULL, win_address_buf, &win_address_buf_size); @@ -861,25 +944,23 @@ count = 0; uv_address_buf_size = 0; - for (win_address = win_address_buf; - win_address != NULL; - win_address = win_address->Next) { - /* Use IP_ADAPTER_UNICAST_ADDRESS_XP to retain backwards compatibility */ - /* with Windows XP */ - IP_ADAPTER_UNICAST_ADDRESS_XP* unicast_address; + for (adapter = win_address_buf; + adapter != NULL; + adapter = adapter->Next) { + IP_ADAPTER_UNICAST_ADDRESS* unicast_address; int name_size; /* Interfaces that are not 'up' should not be reported. Also skip */ /* interfaces that have no associated unicast address, as to avoid */ /* allocating space for the name for this interface. */ - if (win_address->OperStatus != IfOperStatusUp || - win_address->FirstUnicastAddress == NULL) + if (adapter->OperStatus != IfOperStatusUp || + adapter->FirstUnicastAddress == NULL) continue; /* Compute the size of the interface name. */ name_size = WideCharToMultiByte(CP_UTF8, 0, - win_address->FriendlyName, + adapter->FriendlyName, -1, NULL, 0, @@ -893,8 +974,8 @@ /* Count the number of addresses associated with this interface, and */ /* compute the size. */ - for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS_XP*) - win_address->FirstUnicastAddress; + for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS*) + adapter->FirstUnicastAddress; unicast_address != NULL; unicast_address = unicast_address->Next) { count++; @@ -915,16 +996,15 @@ name_buf = (char*) (uv_address_buf + count); /* Fill out the output buffer. */ - for (win_address = win_address_buf; - win_address != NULL; - win_address = win_address->Next) { - IP_ADAPTER_UNICAST_ADDRESS_XP* unicast_address; - IP_ADAPTER_PREFIX* prefix; + for (adapter = win_address_buf; + adapter != NULL; + adapter = adapter->Next) { + IP_ADAPTER_UNICAST_ADDRESS* unicast_address; int name_size; size_t max_name_size; - if (win_address->OperStatus != IfOperStatusUp || - win_address->FirstUnicastAddress == NULL) + if (adapter->OperStatus != IfOperStatusUp || + adapter->FirstUnicastAddress == NULL) continue; /* Convert the interface name to UTF8. */ @@ -933,7 +1013,7 @@ max_name_size = INT_MAX; name_size = WideCharToMultiByte(CP_UTF8, 0, - win_address->FriendlyName, + adapter->FriendlyName, -1, name_buf, (int) max_name_size, @@ -945,47 +1025,78 @@ return uv_translate_sys_error(GetLastError()); } - prefix = win_address->FirstPrefix; - /* Add an uv_interface_address_t element for every unicast address. */ - /* Walk the prefix list in tandem with the address list. */ - for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS_XP*) - win_address->FirstUnicastAddress; - unicast_address != NULL && prefix != NULL; - unicast_address = unicast_address->Next, prefix = prefix->Next) { + for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS*) + adapter->FirstUnicastAddress; + unicast_address != NULL; + unicast_address = unicast_address->Next) { struct sockaddr* sa; ULONG prefix_len; sa = unicast_address->Address.lpSockaddr; - prefix_len = prefix->PrefixLength; + + /* XP has no OnLinkPrefixLength field. */ + if (is_vista_or_greater) { + prefix_len = + ((IP_ADAPTER_UNICAST_ADDRESS_LH*) unicast_address)->OnLinkPrefixLength; + } else { + /* Prior to Windows Vista the FirstPrefix pointed to the list with + * single prefix for each IP address assigned to the adapter. + * Order of FirstPrefix does not match order of FirstUnicastAddress, + * so we need to find corresponding prefix. + */ + IP_ADAPTER_PREFIX* prefix; + prefix_len = 0; + + for (prefix = adapter->FirstPrefix; prefix; prefix = prefix->Next) { + /* We want the longest matching prefix. */ + if (prefix->Address.lpSockaddr->sa_family != sa->sa_family || + prefix->PrefixLength <= prefix_len) + continue; + + if (address_prefix_match(sa->sa_family, sa, + prefix->Address.lpSockaddr, prefix->PrefixLength)) { + prefix_len = prefix->PrefixLength; + } + } + + /* If there is no matching prefix information, return a single-host + * subnet mask (e.g. 255.255.255.255 for IPv4). + */ + if (!prefix_len) + prefix_len = (sa->sa_family == AF_INET6) ? 128 : 32; + } memset(uv_address, 0, sizeof *uv_address); uv_address->name = name_buf; - if (win_address->PhysicalAddressLength == sizeof(uv_address->phys_addr)) { + if (adapter->PhysicalAddressLength == sizeof(uv_address->phys_addr)) { memcpy(uv_address->phys_addr, - win_address->PhysicalAddress, + adapter->PhysicalAddress, sizeof(uv_address->phys_addr)); } uv_address->is_internal = - (win_address->IfType == IF_TYPE_SOFTWARE_LOOPBACK); + (adapter->IfType == IF_TYPE_SOFTWARE_LOOPBACK); if (sa->sa_family == AF_INET6) { uv_address->address.address6 = *((struct sockaddr_in6 *) sa); uv_address->netmask.netmask6.sin6_family = AF_INET6; memset(uv_address->netmask.netmask6.sin6_addr.s6_addr, 0xff, prefix_len >> 3); - uv_address->netmask.netmask6.sin6_addr.s6_addr[prefix_len >> 3] = - 0xff << (8 - prefix_len % 8); + /* This check ensures that we don't write past the size of the data. */ + if (prefix_len % 8) { + uv_address->netmask.netmask6.sin6_addr.s6_addr[prefix_len >> 3] = + 0xff << (8 - prefix_len % 8); + } } else { uv_address->address.address4 = *((struct sockaddr_in *) sa); uv_address->netmask.netmask4.sin_family = AF_INET; - uv_address->netmask.netmask4.sin_addr.s_addr = - htonl(0xffffffff << (32 - prefix_len)); + uv_address->netmask.netmask4.sin_addr.s_addr = (prefix_len > 0) ? + htonl(0xffffffff << (32 - prefix_len)) : 0; } uv_address++; diff -Nru nodejs-0.11.13/deps/uv/src/win/winapi.c nodejs-0.11.15/deps/uv/src/win/winapi.c --- nodejs-0.11.13/deps/uv/src/win/winapi.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/winapi.c 2015-01-20 21:22:17.000000000 +0000 @@ -51,6 +51,7 @@ sSleepConditionVariableSRW pSleepConditionVariableSRW; sWakeAllConditionVariable pWakeAllConditionVariable; sWakeConditionVariable pWakeConditionVariable; +sCancelSynchronousIo pCancelSynchronousIo; void uv_winapi_init() { @@ -156,4 +157,7 @@ pWakeConditionVariable = (sWakeConditionVariable) GetProcAddress(kernel32_module, "WakeConditionVariable"); + + pCancelSynchronousIo = (sCancelSynchronousIo) + GetProcAddress(kernel32_module, "CancelSynchronousIo"); } diff -Nru nodejs-0.11.13/deps/uv/src/win/winapi.h nodejs-0.11.15/deps/uv/src/win/winapi.h --- nodejs-0.11.13/deps/uv/src/win/winapi.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/winapi.h 2015-01-20 21:22:17.000000000 +0000 @@ -4617,6 +4617,8 @@ typedef VOID (WINAPI* sWakeConditionVariable) (PCONDITION_VARIABLE ConditionVariable); +typedef BOOL (WINAPI* sCancelSynchronousIo) + (HANDLE hThread); /* Ntdll function pointers */ extern sRtlNtStatusToDosError pRtlNtStatusToDosError; @@ -4644,5 +4646,6 @@ extern sSleepConditionVariableSRW pSleepConditionVariableSRW; extern sWakeAllConditionVariable pWakeAllConditionVariable; extern sWakeConditionVariable pWakeConditionVariable; +extern sCancelSynchronousIo pCancelSynchronousIo; #endif /* UV_WIN_WINAPI_H_ */ diff -Nru nodejs-0.11.13/deps/uv/src/win/winsock.h nodejs-0.11.15/deps/uv/src/win/winsock.h --- nodejs-0.11.13/deps/uv/src/win/winsock.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/src/win/winsock.h 2015-01-20 21:22:17.000000000 +0000 @@ -166,6 +166,25 @@ ULONG LeaseLifetime; } IP_ADAPTER_UNICAST_ADDRESS_XP,*PIP_ADAPTER_UNICAST_ADDRESS_XP; +typedef struct _IP_ADAPTER_UNICAST_ADDRESS_LH { + union { + ULONGLONG Alignment; + struct { + ULONG Length; + DWORD Flags; + }; + }; + struct _IP_ADAPTER_UNICAST_ADDRESS_LH *Next; + SOCKET_ADDRESS Address; + IP_PREFIX_ORIGIN PrefixOrigin; + IP_SUFFIX_ORIGIN SuffixOrigin; + IP_DAD_STATE DadState; + ULONG ValidLifetime; + ULONG PreferredLifetime; + ULONG LeaseLifetime; + UINT8 OnLinkPrefixLength; +} IP_ADAPTER_UNICAST_ADDRESS_LH,*PIP_ADAPTER_UNICAST_ADDRESS_LH; + #endif #endif /* UV_WIN_WINSOCK_H_ */ diff -Nru nodejs-0.11.13/deps/uv/test/echo-server.c nodejs-0.11.15/deps/uv/test/echo-server.c --- nodejs-0.11.13/deps/uv/test/echo-server.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/echo-server.c 2015-01-20 21:22:17.000000000 +0000 @@ -51,20 +51,21 @@ /* Free the read/write buffer and the request */ wr = (write_req_t*) req; free(wr->buf.base); + free(wr); - if (status == 0) { - free(wr); + if (status == 0) return; - } fprintf(stderr, "uv_write error: %s - %s\n", uv_err_name(status), uv_strerror(status)); +} - if (!uv_is_closing((uv_handle_t*) req->handle)) - uv_close((uv_handle_t*) req->handle, on_close); - free(wr); + +static void after_shutdown(uv_shutdown_t* req, int status) { + uv_close((uv_handle_t*) req->handle, on_close); + free(req); } @@ -73,16 +74,15 @@ const uv_buf_t* buf) { int i; write_req_t *wr; + uv_shutdown_t* sreq; if (nread < 0) { /* Error or EOF */ ASSERT(nread == UV_EOF); - if (buf->base) { - free(buf->base); - } - - uv_close((uv_handle_t*) handle, on_close); + free(buf->base); + sreq = malloc(sizeof* sreq); + ASSERT(0 == uv_shutdown(sreq, handle, after_shutdown)); return; } diff -Nru nodejs-0.11.13/deps/uv/test/run-benchmarks.c nodejs-0.11.15/deps/uv/test/run-benchmarks.c --- nodejs-0.11.13/deps/uv/test/run-benchmarks.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/run-benchmarks.c 2015-01-20 21:22:17.000000000 +0000 @@ -33,7 +33,8 @@ int main(int argc, char **argv) { - platform_init(argc, argv); + if (platform_init(argc, argv)) + return EXIT_FAILURE; switch (argc) { case 1: return run_tests(1); @@ -41,8 +42,10 @@ case 3: return run_test_part(argv[1], argv[2]); default: LOGF("Too many arguments.\n"); - return 1; + return EXIT_FAILURE; } + + return EXIT_SUCCESS; } diff -Nru nodejs-0.11.13/deps/uv/test/runner.c nodejs-0.11.15/deps/uv/test/runner.c --- nodejs-0.11.13/deps/uv/test/runner.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/runner.c 2015-01-20 21:22:17.000000000 +0000 @@ -26,7 +26,7 @@ #include "task.h" #include "uv.h" -char executable_path[PATHMAX] = { '\0' }; +char executable_path[sizeof(executable_path)]; int tap_output = 0; diff -Nru nodejs-0.11.13/deps/uv/test/runner.h nodejs-0.11.15/deps/uv/test/runner.h --- nodejs-0.11.13/deps/uv/test/runner.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/runner.h 2015-01-20 21:22:17.000000000 +0000 @@ -22,6 +22,7 @@ #ifndef RUNNER_H_ #define RUNNER_H_ +#include <limits.h> /* PATH_MAX */ #include <stdio.h> /* FILE */ @@ -83,8 +84,11 @@ #define TEST_HELPER HELPER_ENTRY #define BENCHMARK_HELPER HELPER_ENTRY -#define PATHMAX 1024 -extern char executable_path[PATHMAX]; +#ifdef PATH_MAX +extern char executable_path[PATH_MAX]; +#else +extern char executable_path[4096]; +#endif /* * Include platform-dependent definitions @@ -130,7 +134,7 @@ */ /* Do platform-specific initialization. */ -void platform_init(int argc, char** argv); +int platform_init(int argc, char** argv); /* Invoke "argv[0] test-name [test-part]". Store process info in *p. */ /* Make sure that all stdio output of the processes is buffered up. */ diff -Nru nodejs-0.11.13/deps/uv/test/runner-unix.c nodejs-0.11.15/deps/uv/test/runner-unix.c --- nodejs-0.11.13/deps/uv/test/runner-unix.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/runner-unix.c 2015-01-20 21:22:17.000000000 +0000 @@ -22,10 +22,11 @@ #include "runner-unix.h" #include "runner.h" +#include <limits.h> #include <stdint.h> /* uintptr_t */ #include <errno.h> -#include <unistd.h> /* usleep */ +#include <unistd.h> /* readlink, usleep */ #include <string.h> /* strdup */ #include <stdio.h> #include <stdlib.h> @@ -40,7 +41,7 @@ /* Do platform-specific initialization. */ -void platform_init(int argc, char **argv) { +int platform_init(int argc, char **argv) { const char* tap; tap = getenv("UV_TAP_OUTPUT"); @@ -49,8 +50,14 @@ /* Disable stdio output buffering. */ setvbuf(stdout, NULL, _IONBF, 0); setvbuf(stderr, NULL, _IONBF, 0); - strncpy(executable_path, argv[0], sizeof(executable_path) - 1); signal(SIGPIPE, SIG_IGN); + + if (realpath(argv[0], executable_path) == NULL) { + perror("realpath"); + return -1; + } + + return 0; } diff -Nru nodejs-0.11.13/deps/uv/test/runner-win.c nodejs-0.11.15/deps/uv/test/runner-win.c --- nodejs-0.11.13/deps/uv/test/runner-win.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/runner-win.c 2015-01-20 21:22:17.000000000 +0000 @@ -43,7 +43,7 @@ /* Do platform-specific initialization. */ -void platform_init(int argc, char **argv) { +int platform_init(int argc, char **argv) { const char* tap; tap = getenv("UV_TAP_OUTPUT"); @@ -66,6 +66,8 @@ setvbuf(stderr, NULL, _IONBF, 0); strcpy(executable_path, argv[0]); + + return 0; } diff -Nru nodejs-0.11.13/deps/uv/test/run-tests.c nodejs-0.11.15/deps/uv/test/run-tests.c --- nodejs-0.11.13/deps/uv/test/run-tests.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/run-tests.c 2015-01-20 21:22:17.000000000 +0000 @@ -39,13 +39,15 @@ int ipc_helper(int listen_after_write); int ipc_helper_tcp_connection(void); int ipc_send_recv_helper(void); +int ipc_helper_bind_twice(void); int stdio_over_pipes_helper(void); static int maybe_run_test(int argc, char **argv); int main(int argc, char **argv) { - platform_init(argc, argv); + if (platform_init(argc, argv)) + return EXIT_FAILURE; argv = uv_setup_args(argc, argv); @@ -55,8 +57,10 @@ case 3: return run_test_part(argv[1], argv[2]); default: LOGF("Too many arguments.\n"); - return 1; + return EXIT_FAILURE; } + + return EXIT_SUCCESS; } @@ -82,6 +86,10 @@ return ipc_helper_tcp_connection(); } + if (strcmp(argv[1], "ipc_helper_bind_twice") == 0) { + return ipc_helper_bind_twice(); + } + if (strcmp(argv[1], "stdio_over_pipes_helper") == 0) { return stdio_over_pipes_helper(); } diff -Nru nodejs-0.11.13/deps/uv/test/test-barrier.c nodejs-0.11.15/deps/uv/test/test-barrier.c --- nodejs-0.11.13/deps/uv/test/test-barrier.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-barrier.c 2015-01-20 21:22:17.000000000 +0000 @@ -29,6 +29,8 @@ uv_barrier_t barrier; int delay; volatile int posted; + int main_barrier_wait_rval; + int worker_barrier_wait_rval; } worker_config; @@ -38,7 +40,7 @@ if (c->delay) uv_sleep(c->delay); - uv_barrier_wait(&c->barrier); + c->worker_barrier_wait_rval = uv_barrier_wait(&c->barrier); } @@ -52,11 +54,13 @@ ASSERT(0 == uv_thread_create(&thread, worker, &wc)); uv_sleep(100); - uv_barrier_wait(&wc.barrier); + wc.main_barrier_wait_rval = uv_barrier_wait(&wc.barrier); ASSERT(0 == uv_thread_join(&thread)); uv_barrier_destroy(&wc.barrier); + ASSERT(1 == (wc.main_barrier_wait_rval ^ wc.worker_barrier_wait_rval)); + return 0; } @@ -71,11 +75,13 @@ ASSERT(0 == uv_barrier_init(&wc.barrier, 2)); ASSERT(0 == uv_thread_create(&thread, worker, &wc)); - uv_barrier_wait(&wc.barrier); + wc.main_barrier_wait_rval = uv_barrier_wait(&wc.barrier); ASSERT(0 == uv_thread_join(&thread)); uv_barrier_destroy(&wc.barrier); + ASSERT(1 == (wc.main_barrier_wait_rval ^ wc.worker_barrier_wait_rval)); + return 0; } @@ -89,10 +95,12 @@ ASSERT(0 == uv_barrier_init(&wc.barrier, 2)); ASSERT(0 == uv_thread_create(&thread, worker, &wc)); - uv_barrier_wait(&wc.barrier); + wc.main_barrier_wait_rval = uv_barrier_wait(&wc.barrier); ASSERT(0 == uv_thread_join(&thread)); uv_barrier_destroy(&wc.barrier); + ASSERT(1 == (wc.main_barrier_wait_rval ^ wc.worker_barrier_wait_rval)); + return 0; } diff -Nru nodejs-0.11.13/deps/uv/test/test-default-loop-close.c nodejs-0.11.15/deps/uv/test/test-default-loop-close.c --- nodejs-0.11.13/deps/uv/test/test-default-loop-close.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-default-loop-close.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,59 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + + +static int timer_cb_called; + + +static void timer_cb(uv_timer_t* timer) { + timer_cb_called++; + uv_close((uv_handle_t*) timer, NULL); +} + + +TEST_IMPL(default_loop_close) { + uv_loop_t* loop; + uv_timer_t timer_handle; + + loop = uv_default_loop(); + ASSERT(loop != NULL); + + ASSERT(0 == uv_timer_init(loop, &timer_handle)); + ASSERT(0 == uv_timer_start(&timer_handle, timer_cb, 1, 0)); + ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); + ASSERT(1 == timer_cb_called); + ASSERT(0 == uv_loop_close(loop)); + + loop = uv_default_loop(); + ASSERT(loop != NULL); + + ASSERT(0 == uv_timer_init(loop, &timer_handle)); + ASSERT(0 == uv_timer_start(&timer_handle, timer_cb, 1, 0)); + ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); + ASSERT(2 == timer_cb_called); + ASSERT(0 == uv_loop_close(loop)); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-fs.c nodejs-0.11.15/deps/uv/test/test-fs.c --- nodejs-0.11.13/deps/uv/test/test-fs.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-fs.c 2015-01-20 21:22:17.000000000 +0000 @@ -36,11 +36,15 @@ # include <io.h> # define unlink _unlink # define rmdir _rmdir -# define stat _stati64 # define open _open # define write _write -# define lseek _lseek # define close _close +# ifndef stat +# define stat _stati64 +# endif +# ifndef lseek +# define lseek _lseek +# endif #endif #define TOO_LONG_NAME_LENGTH 65536 @@ -61,8 +65,9 @@ static int write_cb_count; static int unlink_cb_count; static int mkdir_cb_count; +static int mkdtemp_cb_count; static int rmdir_cb_count; -static int readdir_cb_count; +static int scandir_cb_count; static int stat_cb_count; static int rename_cb_count; static int fsync_cb_count; @@ -70,6 +75,7 @@ static int ftruncate_cb_count; static int sendfile_cb_count; static int fstat_cb_count; +static int access_cb_count; static int chmod_cb_count; static int fchmod_cb_count; static int chown_cb_count; @@ -89,8 +95,10 @@ static uv_fs_t unlink_req; static uv_fs_t close_req; static uv_fs_t mkdir_req; +static uv_fs_t mkdtemp_req1; +static uv_fs_t mkdtemp_req2; static uv_fs_t rmdir_req; -static uv_fs_t readdir_req; +static uv_fs_t scandir_req; static uv_fs_t stat_req; static uv_fs_t rename_req; static uv_fs_t fsync_req; @@ -101,7 +109,9 @@ static uv_fs_t futime_req; static char buf[32]; +static char buf2[32]; static char test_buf[] = "test-buffer\n"; +static char test_buf2[] = "second-buffer\n"; static uv_buf_t iov; static void check_permission(const char* filename, unsigned int mode) { @@ -157,6 +167,14 @@ uv_fs_req_cleanup(req); } + +static void access_cb(uv_fs_t* req) { + ASSERT(req->fs_type == UV_FS_ACCESS); + access_cb_count++; + uv_fs_req_cleanup(req); +} + + static void fchmod_cb(uv_fs_t* req) { ASSERT(req->fs_type == UV_FS_FCHMOD); ASSERT(req->result == 0); @@ -372,6 +390,32 @@ } +static void check_mkdtemp_result(uv_fs_t* req) { + int r; + + ASSERT(req->fs_type == UV_FS_MKDTEMP); + ASSERT(req->result == 0); + ASSERT(req->path); + ASSERT(strlen(req->path) == 15); + ASSERT(memcmp(req->path, "test_dir_", 9) == 0); + ASSERT(memcmp(req->path + 9, "XXXXXX", 6) != 0); + check_permission(req->path, 0700); + + /* Check if req->path is actually a directory */ + r = uv_fs_stat(uv_default_loop(), &stat_req, req->path, NULL); + ASSERT(r == 0); + ASSERT(((uv_stat_t*)stat_req.ptr)->st_mode & S_IFDIR); + uv_fs_req_cleanup(&stat_req); +} + + +static void mkdtemp_cb(uv_fs_t* req) { + ASSERT(req == &mkdtemp_req1); + check_mkdtemp_result(req); + mkdtemp_cb_count++; +} + + static void rmdir_cb(uv_fs_t* req) { ASSERT(req == &rmdir_req); ASSERT(req->fs_type == UV_FS_RMDIR); @@ -383,14 +427,18 @@ } -static void readdir_cb(uv_fs_t* req) { - ASSERT(req == &readdir_req); - ASSERT(req->fs_type == UV_FS_READDIR); +static void scandir_cb(uv_fs_t* req) { + uv_dirent_t dent; + ASSERT(req == &scandir_req); + ASSERT(req->fs_type == UV_FS_SCANDIR); ASSERT(req->result == 2); ASSERT(req->ptr); - ASSERT(memcmp(req->ptr, "file1\0file2\0", 12) == 0 - || memcmp(req->ptr, "file2\0file1\0", 12) == 0); - readdir_cb_count++; + + while (UV_EOF != uv_fs_scandir_next(req, &dent)) { + ASSERT(strcmp(dent.name, "file1") == 0 || strcmp(dent.name, "file2") == 0); + ASSERT(dent.type == UV_DIRENT_FILE || dent.type == UV_DIRENT_UNKNOWN); + } + scandir_cb_count++; ASSERT(req->path); ASSERT(memcmp(req->path, "test_dir\0", 9) == 0); uv_fs_req_cleanup(req); @@ -398,23 +446,26 @@ } -static void empty_readdir_cb(uv_fs_t* req) { - ASSERT(req == &readdir_req); - ASSERT(req->fs_type == UV_FS_READDIR); +static void empty_scandir_cb(uv_fs_t* req) { + uv_dirent_t dent; + + ASSERT(req == &scandir_req); + ASSERT(req->fs_type == UV_FS_SCANDIR); ASSERT(req->result == 0); ASSERT(req->ptr == NULL); + ASSERT(UV_EOF == uv_fs_scandir_next(req, &dent)); uv_fs_req_cleanup(req); - readdir_cb_count++; + scandir_cb_count++; } -static void file_readdir_cb(uv_fs_t* req) { - ASSERT(req == &readdir_req); - ASSERT(req->fs_type == UV_FS_READDIR); +static void file_scandir_cb(uv_fs_t* req) { + ASSERT(req == &scandir_req); + ASSERT(req->fs_type == UV_FS_SCANDIR); ASSERT(req->result == UV_ENOTDIR); ASSERT(req->ptr == NULL); uv_fs_req_cleanup(req); - readdir_cb_count++; + scandir_cb_count++; } @@ -769,6 +820,7 @@ TEST_IMPL(fs_async_dir) { int r; + uv_dirent_t dent; /* Setup */ unlink("test_dir/file1"); @@ -800,21 +852,23 @@ ASSERT(r == 0); uv_fs_req_cleanup(&close_req); - r = uv_fs_readdir(loop, &readdir_req, "test_dir", 0, readdir_cb); + r = uv_fs_scandir(loop, &scandir_req, "test_dir", 0, scandir_cb); ASSERT(r == 0); uv_run(loop, UV_RUN_DEFAULT); - ASSERT(readdir_cb_count == 1); + ASSERT(scandir_cb_count == 1); - /* sync uv_fs_readdir */ - r = uv_fs_readdir(loop, &readdir_req, "test_dir", 0, NULL); + /* sync uv_fs_scandir */ + r = uv_fs_scandir(loop, &scandir_req, "test_dir", 0, NULL); ASSERT(r == 2); - ASSERT(readdir_req.result == 2); - ASSERT(readdir_req.ptr); - ASSERT(memcmp(readdir_req.ptr, "file1\0file2\0", 12) == 0 - || memcmp(readdir_req.ptr, "file2\0file1\0", 12) == 0); - uv_fs_req_cleanup(&readdir_req); - ASSERT(!readdir_req.ptr); + ASSERT(scandir_req.result == 2); + ASSERT(scandir_req.ptr); + while (UV_EOF != uv_fs_scandir_next(&scandir_req, &dent)) { + ASSERT(strcmp(dent.name, "file1") == 0 || strcmp(dent.name, "file2") == 0); + ASSERT(dent.type == UV_DIRENT_FILE || dent.type == UV_DIRENT_UNKNOWN); + } + uv_fs_req_cleanup(&scandir_req); + ASSERT(!scandir_req.ptr); r = uv_fs_stat(loop, &stat_req, "test_dir", stat_cb); ASSERT(r == 0); @@ -923,6 +977,37 @@ } +TEST_IMPL(fs_mkdtemp) { + int r; + const char* path_template = "test_dir_XXXXXX"; + + loop = uv_default_loop(); + + r = uv_fs_mkdtemp(loop, &mkdtemp_req1, path_template, mkdtemp_cb); + ASSERT(r == 0); + + uv_run(loop, UV_RUN_DEFAULT); + ASSERT(mkdtemp_cb_count == 1); + + /* sync mkdtemp */ + r = uv_fs_mkdtemp(loop, &mkdtemp_req2, path_template, NULL); + ASSERT(r == 0); + check_mkdtemp_result(&mkdtemp_req2); + + /* mkdtemp return different values on subsequent calls */ + ASSERT(strcmp(mkdtemp_req1.path, mkdtemp_req2.path) != 0); + + /* Cleanup */ + rmdir(mkdtemp_req1.path); + rmdir(mkdtemp_req2.path); + uv_fs_req_cleanup(&mkdtemp_req1); + uv_fs_req_cleanup(&mkdtemp_req2); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + + TEST_IMPL(fs_fstat) { int r; uv_fs_t req; @@ -981,6 +1066,13 @@ ASSERT(s->st_birthtim.tv_nsec == t.st_birthtimespec.tv_nsec); ASSERT(s->st_flags == t.st_flags); ASSERT(s->st_gen == t.st_gen); +#elif defined(_AIX) + ASSERT(s->st_atim.tv_sec == t.st_atime); + ASSERT(s->st_atim.tv_nsec == 0); + ASSERT(s->st_mtim.tv_sec == t.st_mtime); + ASSERT(s->st_mtim.tv_nsec == 0); + ASSERT(s->st_ctim.tv_sec == t.st_ctime); + ASSERT(s->st_ctim.tv_nsec == 0); #elif defined(__sun) || \ defined(_BSD_SOURCE) || \ defined(_SVID_SOURCE) || \ @@ -1038,6 +1130,70 @@ } +TEST_IMPL(fs_access) { + int r; + uv_fs_t req; + uv_file file; + + /* Setup. */ + unlink("test_file"); + + loop = uv_default_loop(); + + /* File should not exist */ + r = uv_fs_access(loop, &req, "test_file", F_OK, NULL); + ASSERT(r < 0); + ASSERT(req.result < 0); + uv_fs_req_cleanup(&req); + + /* File should not exist */ + r = uv_fs_access(loop, &req, "test_file", F_OK, access_cb); + ASSERT(r == 0); + uv_run(loop, UV_RUN_DEFAULT); + ASSERT(access_cb_count == 1); + access_cb_count = 0; /* reset for the next test */ + + /* Create file */ + r = uv_fs_open(loop, &req, "test_file", O_RDWR | O_CREAT, + S_IWUSR | S_IRUSR, NULL); + ASSERT(r >= 0); + ASSERT(req.result >= 0); + file = req.result; + uv_fs_req_cleanup(&req); + + /* File should exist */ + r = uv_fs_access(loop, &req, "test_file", F_OK, NULL); + ASSERT(r == 0); + ASSERT(req.result == 0); + uv_fs_req_cleanup(&req); + + /* File should exist */ + r = uv_fs_access(loop, &req, "test_file", F_OK, access_cb); + ASSERT(r == 0); + uv_run(loop, UV_RUN_DEFAULT); + ASSERT(access_cb_count == 1); + access_cb_count = 0; /* reset for the next test */ + + /* Close file */ + r = uv_fs_close(loop, &req, file, NULL); + ASSERT(r == 0); + ASSERT(req.result == 0); + uv_fs_req_cleanup(&req); + + /* + * Run the loop just to check we don't have make any extraneous uv_ref() + * calls. This should drop out immediately. + */ + uv_run(loop, UV_RUN_DEFAULT); + + /* Cleanup. */ + unlink("test_file"); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + + TEST_IMPL(fs_chmod) { int r; uv_fs_t req; @@ -1450,6 +1606,7 @@ uv_fs_t req; int r; char* test_dir; + uv_dirent_t dent; /* set-up */ unlink("test_dir/file1"); @@ -1522,32 +1679,36 @@ ASSERT(r == 0); uv_fs_req_cleanup(&close_req); - r = uv_fs_readdir(loop, &readdir_req, "test_dir_symlink", 0, NULL); + r = uv_fs_scandir(loop, &scandir_req, "test_dir_symlink", 0, NULL); ASSERT(r == 2); - ASSERT(readdir_req.result == 2); - ASSERT(readdir_req.ptr); - ASSERT(memcmp(readdir_req.ptr, "file1\0file2\0", 12) == 0 - || memcmp(readdir_req.ptr, "file2\0file1\0", 12) == 0); - uv_fs_req_cleanup(&readdir_req); - ASSERT(!readdir_req.ptr); + ASSERT(scandir_req.result == 2); + ASSERT(scandir_req.ptr); + while (UV_EOF != uv_fs_scandir_next(&scandir_req, &dent)) { + ASSERT(strcmp(dent.name, "file1") == 0 || strcmp(dent.name, "file2") == 0); + ASSERT(dent.type == UV_DIRENT_FILE || dent.type == UV_DIRENT_UNKNOWN); + } + uv_fs_req_cleanup(&scandir_req); + ASSERT(!scandir_req.ptr); /* unlink will remove the directory symlink */ r = uv_fs_unlink(loop, &req, "test_dir_symlink", NULL); ASSERT(r == 0); uv_fs_req_cleanup(&req); - r = uv_fs_readdir(loop, &readdir_req, "test_dir_symlink", 0, NULL); + r = uv_fs_scandir(loop, &scandir_req, "test_dir_symlink", 0, NULL); ASSERT(r == UV_ENOENT); - uv_fs_req_cleanup(&readdir_req); + uv_fs_req_cleanup(&scandir_req); - r = uv_fs_readdir(loop, &readdir_req, "test_dir", 0, NULL); + r = uv_fs_scandir(loop, &scandir_req, "test_dir", 0, NULL); ASSERT(r == 2); - ASSERT(readdir_req.result == 2); - ASSERT(readdir_req.ptr); - ASSERT(memcmp(readdir_req.ptr, "file1\0file2\0", 12) == 0 - || memcmp(readdir_req.ptr, "file2\0file1\0", 12) == 0); - uv_fs_req_cleanup(&readdir_req); - ASSERT(!readdir_req.ptr); + ASSERT(scandir_req.result == 2); + ASSERT(scandir_req.ptr); + while (UV_EOF != uv_fs_scandir_next(&scandir_req, &dent)) { + ASSERT(strcmp(dent.name, "file1") == 0 || strcmp(dent.name, "file2") == 0); + ASSERT(dent.type == UV_DIRENT_FILE || dent.type == UV_DIRENT_UNKNOWN); + } + uv_fs_req_cleanup(&scandir_req); + ASSERT(!scandir_req.ptr); /* clean-up */ unlink("test_dir/file1"); @@ -1719,9 +1880,10 @@ } -TEST_IMPL(fs_readdir_empty_dir) { +TEST_IMPL(fs_scandir_empty_dir) { const char* path; uv_fs_t req; + uv_dirent_t dent; int r; path = "./empty_dir/"; @@ -1730,18 +1892,22 @@ uv_fs_mkdir(loop, &req, path, 0777, NULL); uv_fs_req_cleanup(&req); - r = uv_fs_readdir(loop, &req, path, 0, NULL); + /* Fill the req to ensure that required fields are cleaned up */ + memset(&req, 0xdb, sizeof(req)); + + r = uv_fs_scandir(loop, &req, path, 0, NULL); ASSERT(r == 0); ASSERT(req.result == 0); ASSERT(req.ptr == NULL); + ASSERT(UV_EOF == uv_fs_scandir_next(&req, &dent)); uv_fs_req_cleanup(&req); - r = uv_fs_readdir(loop, &readdir_req, path, 0, empty_readdir_cb); + r = uv_fs_scandir(loop, &scandir_req, path, 0, empty_scandir_cb); ASSERT(r == 0); - ASSERT(readdir_cb_count == 0); + ASSERT(scandir_cb_count == 0); uv_run(loop, UV_RUN_DEFAULT); - ASSERT(readdir_cb_count == 1); + ASSERT(scandir_cb_count == 1); uv_fs_rmdir(loop, &req, path, NULL); uv_fs_req_cleanup(&req); @@ -1751,23 +1917,23 @@ } -TEST_IMPL(fs_readdir_file) { +TEST_IMPL(fs_scandir_file) { const char* path; int r; path = "test/fixtures/empty_file"; loop = uv_default_loop(); - r = uv_fs_readdir(loop, &readdir_req, path, 0, NULL); + r = uv_fs_scandir(loop, &scandir_req, path, 0, NULL); ASSERT(r == UV_ENOTDIR); - uv_fs_req_cleanup(&readdir_req); + uv_fs_req_cleanup(&scandir_req); - r = uv_fs_readdir(loop, &readdir_req, path, 0, file_readdir_cb); + r = uv_fs_scandir(loop, &scandir_req, path, 0, file_scandir_cb); ASSERT(r == 0); - ASSERT(readdir_cb_count == 0); + ASSERT(scandir_cb_count == 0); uv_run(loop, UV_RUN_DEFAULT); - ASSERT(readdir_cb_count == 1); + ASSERT(scandir_cb_count == 1); MAKE_VALGRIND_HAPPY(); return 0; @@ -1983,6 +2149,70 @@ uv_fs_req_cleanup(&read_req); iov = uv_buf_init(buf, sizeof(buf)); + r = uv_fs_read(loop, &read_req, open_req1.result, &iov, 1, + read_req.result, NULL); + ASSERT(r == 0); + ASSERT(read_req.result == 0); + uv_fs_req_cleanup(&read_req); + + r = uv_fs_close(loop, &close_req, open_req1.result, NULL); + ASSERT(r == 0); + ASSERT(close_req.result == 0); + uv_fs_req_cleanup(&close_req); + + /* Cleanup */ + unlink("test_file"); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + + +TEST_IMPL(fs_write_multiple_bufs) { + uv_buf_t iovs[2]; + int r; + + /* Setup. */ + unlink("test_file"); + + loop = uv_default_loop(); + + r = uv_fs_open(loop, &open_req1, "test_file", O_WRONLY | O_CREAT, + S_IWUSR | S_IRUSR, NULL); + ASSERT(r >= 0); + ASSERT(open_req1.result >= 0); + uv_fs_req_cleanup(&open_req1); + + iovs[0] = uv_buf_init(test_buf, sizeof(test_buf)); + iovs[1] = uv_buf_init(test_buf2, sizeof(test_buf2)); + r = uv_fs_write(loop, &write_req, open_req1.result, iovs, 2, 0, NULL); + ASSERT(r >= 0); + ASSERT(write_req.result >= 0); + uv_fs_req_cleanup(&write_req); + + r = uv_fs_close(loop, &close_req, open_req1.result, NULL); + ASSERT(r == 0); + ASSERT(close_req.result == 0); + uv_fs_req_cleanup(&close_req); + + r = uv_fs_open(loop, &open_req1, "test_file", O_RDONLY, 0, NULL); + ASSERT(r >= 0); + ASSERT(open_req1.result >= 0); + uv_fs_req_cleanup(&open_req1); + + memset(buf, 0, sizeof(buf)); + memset(buf2, 0, sizeof(buf2)); + /* Read the strings back to separate buffers. */ + iovs[0] = uv_buf_init(buf, sizeof(test_buf)); + iovs[1] = uv_buf_init(buf2, sizeof(test_buf2)); + r = uv_fs_read(loop, &read_req, open_req1.result, iovs, 2, 0, NULL); + ASSERT(r >= 0); + ASSERT(read_req.result >= 0); + ASSERT(strcmp(buf, test_buf) == 0); + ASSERT(strcmp(buf2, test_buf2) == 0); + uv_fs_req_cleanup(&read_req); + + iov = uv_buf_init(buf, sizeof(buf)); r = uv_fs_read(loop, &read_req, open_req1.result, &iov, 1, read_req.result, NULL); ASSERT(r == 0); diff -Nru nodejs-0.11.13/deps/uv/test/test-getnameinfo.c nodejs-0.11.15/deps/uv/test/test-getnameinfo.c --- nodejs-0.11.13/deps/uv/test/test-getnameinfo.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-getnameinfo.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,83 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), to +* deal in the Software without restriction, including without limitation the +* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +* sell copies of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +* IN THE SOFTWARE. +*/ + +#include "uv.h" +#include "task.h" +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + + +static const char* address_ip4 = "127.0.0.1"; +static const char* address_ip6 = "::1"; +static const int port = 80; + +static struct sockaddr_in addr4; +static struct sockaddr_in6 addr6; +static uv_getnameinfo_t req; + +static void getnameinfo_req(uv_getnameinfo_t* handle, + int status, + const char* hostname, + const char* service) { + ASSERT(handle != NULL); + ASSERT(status == 0); + ASSERT(hostname != NULL); + ASSERT(service != NULL); +} + +TEST_IMPL(getnameinfo_basic_ip4) { + int r; + + r = uv_ip4_addr(address_ip4, port, &addr4); + ASSERT(r == 0); + + r = uv_getnameinfo(uv_default_loop(), + &req, + &getnameinfo_req, + (const struct sockaddr*)&addr4, + 0); + ASSERT(r == 0); + + uv_run(uv_default_loop(), UV_RUN_DEFAULT); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + +TEST_IMPL(getnameinfo_basic_ip6) { + int r; + + r = uv_ip6_addr(address_ip6, port, &addr6); + ASSERT(r == 0); + + r = uv_getnameinfo(uv_default_loop(), + &req, + &getnameinfo_req, + (const struct sockaddr*)&addr6, + 0); + ASSERT(r == 0); + + uv_run(uv_default_loop(), UV_RUN_DEFAULT); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-getsockname.c nodejs-0.11.15/deps/uv/test/test-getsockname.c --- nodejs-0.11.13/deps/uv/test/test-getsockname.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-getsockname.c 2015-01-20 21:22:17.000000000 +0000 @@ -353,6 +353,9 @@ ASSERT(getsocknamecount == 2); + ASSERT(udp.send_queue_size == 0); + ASSERT(udpServer.send_queue_size == 0); + MAKE_VALGRIND_HAPPY(); return 0; } diff -Nru nodejs-0.11.13/deps/uv/test/test-handle-fileno.c nodejs-0.11.15/deps/uv/test/test-handle-fileno.c --- nodejs-0.11.13/deps/uv/test/test-handle-fileno.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-handle-fileno.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,120 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + + +static int get_tty_fd(void) { + /* Make sure we have an FD that refers to a tty */ +#ifdef _WIN32 + HANDLE handle; + handle = CreateFileA("conout$", + GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, + NULL, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + NULL); + if (handle == INVALID_HANDLE_VALUE) + return -1; + return _open_osfhandle((intptr_t) handle, 0); +#else /* unix */ + return open("/dev/tty", O_RDONLY, 0); +#endif +} + + +TEST_IMPL(handle_fileno) { + int r; + int tty_fd; + struct sockaddr_in addr; + uv_os_fd_t fd; + uv_tcp_t tcp; + uv_udp_t udp; + uv_pipe_t pipe; + uv_tty_t tty; + uv_idle_t idle; + uv_loop_t* loop; + + loop = uv_default_loop(); + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + r = uv_idle_init(loop, &idle); + ASSERT(r == 0); + r = uv_fileno((uv_handle_t*) &idle, &fd); + ASSERT(r == UV_EINVAL); + uv_close((uv_handle_t*) &idle, NULL); + + r = uv_tcp_init(loop, &tcp); + ASSERT(r == 0); + r = uv_fileno((uv_handle_t*) &tcp, &fd); + ASSERT(r == UV_EBADF); + r = uv_tcp_bind(&tcp, (const struct sockaddr*) &addr, 0); + ASSERT(r == 0); + r = uv_fileno((uv_handle_t*) &tcp, &fd); + ASSERT(r == 0); + uv_close((uv_handle_t*) &tcp, NULL); + r = uv_fileno((uv_handle_t*) &tcp, &fd); + ASSERT(r == UV_EBADF); + + r = uv_udp_init(loop, &udp); + ASSERT(r == 0); + r = uv_fileno((uv_handle_t*) &udp, &fd); + ASSERT(r == UV_EBADF); + r = uv_udp_bind(&udp, (const struct sockaddr*) &addr, 0); + ASSERT(r == 0); + r = uv_fileno((uv_handle_t*) &udp, &fd); + ASSERT(r == 0); + uv_close((uv_handle_t*) &udp, NULL); + r = uv_fileno((uv_handle_t*) &udp, &fd); + ASSERT(r == UV_EBADF); + + r = uv_pipe_init(loop, &pipe, 0); + ASSERT(r == 0); + r = uv_fileno((uv_handle_t*) &pipe, &fd); + ASSERT(r == UV_EBADF); + r = uv_pipe_bind(&pipe, TEST_PIPENAME); + ASSERT(r == 0); + r = uv_fileno((uv_handle_t*) &pipe, &fd); + ASSERT(r == 0); + uv_close((uv_handle_t*) &pipe, NULL); + r = uv_fileno((uv_handle_t*) &pipe, &fd); + ASSERT(r == UV_EBADF); + + tty_fd = get_tty_fd(); + if (tty_fd < 0) { + LOGF("Cannot open a TTY fd"); + } else { + r = uv_tty_init(loop, &tty, tty_fd, 0); + ASSERT(r == 0); + r = uv_fileno((uv_handle_t*) &tty, &fd); + ASSERT(r == 0); + uv_close((uv_handle_t*) &tty, NULL); + r = uv_fileno((uv_handle_t*) &tty, &fd); + ASSERT(r == UV_EBADF); + } + + uv_run(loop, UV_RUN_DEFAULT); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-ip6-addr.c nodejs-0.11.15/deps/uv/test/test-ip6-addr.c --- nodejs-0.11.13/deps/uv/test/test-ip6-addr.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-ip6-addr.c 2015-01-20 21:22:17.000000000 +0000 @@ -32,7 +32,6 @@ TEST_IMPL(ip6_addr_link_local) { -#ifdef UV_PLATFORM_HAS_IP6_LINK_LOCAL_ADDRESS char string_address[INET6_ADDRSTRLEN]; uv_interface_address_t* addresses; uv_interface_address_t* address; @@ -93,9 +92,6 @@ MAKE_VALGRIND_HAPPY(); return 0; -#else - RETURN_SKIP("Qualified link-local addresses are not supported."); -#endif } @@ -107,6 +103,7 @@ X("fe80::2acf:daff:fedd:342a") \ X("fe80:0:0:0:2acf:daff:fedd:342a") \ X("fe80:0:0:0:2acf:daff:1.2.3.4") \ + X("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255") \ #define BAD_ADDR_LIST(X) \ X(":::1") \ @@ -114,6 +111,7 @@ X("fe80:0:0:0:2acf:daff:fedd:342a:5678") \ X("fe80:0:0:0:2acf:daff:abcd:1.2.3.4") \ X("fe80:0:0:2acf:daff:1.2.3.4.5") \ + X("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255.255") \ #define TEST_GOOD(ADDR) \ ASSERT(0 == uv_inet_pton(AF_INET6, ADDR, &addr)); \ diff -Nru nodejs-0.11.13/deps/uv/test/test-ipc.c nodejs-0.11.15/deps/uv/test/test-ipc.c --- nodejs-0.11.13/deps/uv/test/test-ipc.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-ipc.c 2015-01-20 21:22:17.000000000 +0000 @@ -27,6 +27,7 @@ static uv_pipe_t channel; static uv_tcp_t tcp_server; +static uv_tcp_t tcp_server2; static uv_tcp_t tcp_connection; static int exit_cb_called; @@ -38,8 +39,6 @@ static int remote_conn_accepted; static int tcp_server_listening; static uv_write_t write_req; -static uv_pipe_t channel; -static uv_tcp_t tcp_server; static uv_write_t conn_notify_req; static int close_cb_called; static int connection_accepted; @@ -205,6 +204,71 @@ free(buf->base); } +#ifdef _WIN32 +static void on_read_listen_after_bound_twice(uv_stream_t* handle, + ssize_t nread, + const uv_buf_t* buf) { + int r; + uv_pipe_t* pipe; + uv_handle_type pending; + + pipe = (uv_pipe_t*) handle; + + if (nread == 0) { + /* Everything OK, but nothing read. */ + free(buf->base); + return; + } + + if (nread < 0) { + if (nread == UV_EOF) { + free(buf->base); + return; + } + + printf("error recving on channel: %s\n", uv_strerror(nread)); + abort(); + } + + fprintf(stderr, "got %d bytes\n", (int)nread); + + ASSERT(uv_pipe_pending_count(pipe) > 0); + pending = uv_pipe_pending_type(pipe); + ASSERT(nread > 0 && buf->base && pending != UV_UNKNOWN_HANDLE); + read_cb_called++; + + if (read_cb_called == 1) { + /* Accept the first TCP server, and start listening on it. */ + ASSERT(pending == UV_TCP); + r = uv_tcp_init(uv_default_loop(), &tcp_server); + ASSERT(r == 0); + + r = uv_accept((uv_stream_t*)pipe, (uv_stream_t*)&tcp_server); + ASSERT(r == 0); + + r = uv_listen((uv_stream_t*)&tcp_server, 12, on_connection); + ASSERT(r == 0); + } else if (read_cb_called == 2) { + /* Accept the second TCP server, and start listening on it. */ + ASSERT(pending == UV_TCP); + r = uv_tcp_init(uv_default_loop(), &tcp_server2); + ASSERT(r == 0); + + r = uv_accept((uv_stream_t*)pipe, (uv_stream_t*)&tcp_server2); + ASSERT(r == 0); + + r = uv_listen((uv_stream_t*)&tcp_server2, 12, on_connection); + ASSERT(r == UV_EADDRINUSE); + + uv_close((uv_handle_t*)&tcp_server, NULL); + uv_close((uv_handle_t*)&tcp_server2, NULL); + ASSERT(0 == uv_pipe_pending_count(pipe)); + uv_close((uv_handle_t*)&channel, NULL); + } + + free(buf->base); +} +#endif void spawn_helper(uv_pipe_t* channel, uv_process_t* process, @@ -424,6 +488,13 @@ MAKE_VALGRIND_HAPPY(); return 0; } + +TEST_IMPL(ipc_listen_after_bind_twice) { + int r = run_ipc_test("ipc_helper_bind_twice", on_read_listen_after_bound_twice); + ASSERT(read_cb_called == 2); + ASSERT(exit_cb_called == 1); + return r; +} #endif @@ -608,7 +679,7 @@ int ipc_helper_tcp_connection(void) { /* - * This is launched from test-ipc.c. stdin is a duplex channel that we + * This is launched from test-ipc.c. stdin is a duplex channel * over which a handle will be transmitted. */ @@ -657,3 +728,51 @@ MAKE_VALGRIND_HAPPY(); return 0; } + +int ipc_helper_bind_twice(void) { + /* + * This is launched from test-ipc.c. stdin is a duplex channel + * over which two handles will be transmitted. + */ + struct sockaddr_in addr; + uv_write_t write_req; + uv_write_t write_req2; + int r; + uv_buf_t buf; + + ASSERT(0 == uv_ip4_addr("0.0.0.0", TEST_PORT, &addr)); + + r = uv_pipe_init(uv_default_loop(), &channel, 1); + ASSERT(r == 0); + + uv_pipe_open(&channel, 0); + + ASSERT(1 == uv_is_readable((uv_stream_t*) &channel)); + ASSERT(1 == uv_is_writable((uv_stream_t*) &channel)); + ASSERT(0 == uv_is_closing((uv_handle_t*) &channel)); + + buf = uv_buf_init("hello\n", 6); + + r = uv_tcp_init(uv_default_loop(), &tcp_server); + ASSERT(r == 0); + r = uv_tcp_init(uv_default_loop(), &tcp_server2); + ASSERT(r == 0); + + r = uv_tcp_bind(&tcp_server, (const struct sockaddr*) &addr, 0); + ASSERT(r == 0); + r = uv_tcp_bind(&tcp_server2, (const struct sockaddr*) &addr, 0); + ASSERT(r == 0); + + r = uv_write2(&write_req, (uv_stream_t*)&channel, &buf, 1, + (uv_stream_t*)&tcp_server, NULL); + ASSERT(r == 0); + r = uv_write2(&write_req2, (uv_stream_t*)&channel, &buf, 1, + (uv_stream_t*)&tcp_server2, NULL); + ASSERT(r == 0); + + r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); + ASSERT(r == 0); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-list.h nodejs-0.11.15/deps/uv/test/test-list.h --- nodejs-0.11.13/deps/uv/test/test-list.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-list.h 2015-01-20 21:22:17.000000000 +0000 @@ -28,6 +28,8 @@ TEST_DECLARE (loop_close) TEST_DECLARE (loop_stop) TEST_DECLARE (loop_update_time) +TEST_DECLARE (loop_backend_timeout) +TEST_DECLARE (default_loop_close) TEST_DECLARE (barrier_1) TEST_DECLARE (barrier_2) TEST_DECLARE (barrier_3) @@ -54,8 +56,12 @@ TEST_DECLARE (pipe_ping_pong) TEST_DECLARE (delayed_accept) TEST_DECLARE (multiple_listen) +#ifndef _WIN32 +TEST_DECLARE (tcp_write_after_connect) +#endif TEST_DECLARE (tcp_writealot) TEST_DECLARE (tcp_try_write) +TEST_DECLARE (tcp_write_queue_order) TEST_DECLARE (tcp_open) TEST_DECLARE (tcp_connect_error_after_write) TEST_DECLARE (tcp_shutdown_after_write) @@ -71,7 +77,9 @@ TEST_DECLARE (tcp_connect_timeout) TEST_DECLARE (tcp_close_while_connecting) TEST_DECLARE (tcp_close) +#ifndef _WIN32 TEST_DECLARE (tcp_close_accept) +#endif TEST_DECLARE (tcp_flags) TEST_DECLARE (tcp_write_to_half_open_connection) TEST_DECLARE (tcp_unexpected_read) @@ -84,6 +92,8 @@ TEST_DECLARE (udp_bind) TEST_DECLARE (udp_bind_reuseaddr) TEST_DECLARE (udp_send_and_recv) +TEST_DECLARE (udp_send_immediate) +TEST_DECLARE (udp_send_unreachable) TEST_DECLARE (udp_multicast_join) TEST_DECLARE (udp_multicast_join6) TEST_DECLARE (udp_multicast_ttl) @@ -93,7 +103,9 @@ TEST_DECLARE (udp_dual_stack) TEST_DECLARE (udp_ipv6_only) TEST_DECLARE (udp_options) +TEST_DECLARE (udp_no_autobind) TEST_DECLARE (udp_open) +TEST_DECLARE (udp_try_send) TEST_DECLARE (pipe_bind_error_addrinuse) TEST_DECLARE (pipe_bind_error_addrnotavail) TEST_DECLARE (pipe_bind_error_inval) @@ -102,6 +114,7 @@ TEST_DECLARE (pipe_connect_to_file) TEST_DECLARE (pipe_getsockname) TEST_DECLARE (pipe_getsockname_abstract) +TEST_DECLARE (pipe_getsockname_blocking) TEST_DECLARE (pipe_sendmsg) TEST_DECLARE (pipe_server_close) TEST_DECLARE (connection_fail) @@ -121,6 +134,7 @@ TEST_DECLARE (timer_huge_repeat) TEST_DECLARE (timer_run_once) TEST_DECLARE (timer_from_check) +TEST_DECLARE (timer_null_callback) TEST_DECLARE (idle_starvation) TEST_DECLARE (loop_handles) TEST_DECLARE (get_loadavg) @@ -148,6 +162,9 @@ TEST_DECLARE (pipe_ref2) TEST_DECLARE (pipe_ref3) TEST_DECLARE (pipe_ref4) +#ifndef _WIN32 +TEST_DECLARE (pipe_close_stdout_read_stdin) +#endif TEST_DECLARE (process_ref) TEST_DECLARE (has_ref) TEST_DECLARE (active) @@ -158,14 +175,18 @@ TEST_DECLARE (process_title) TEST_DECLARE (cwd_and_chdir) TEST_DECLARE (get_memory) +TEST_DECLARE (handle_fileno) TEST_DECLARE (hrtime) TEST_DECLARE (getaddrinfo_fail) TEST_DECLARE (getaddrinfo_basic) TEST_DECLARE (getaddrinfo_concurrent) +TEST_DECLARE (getnameinfo_basic_ip4) +TEST_DECLARE (getnameinfo_basic_ip6) TEST_DECLARE (getsockname_tcp) TEST_DECLARE (getsockname_udp) TEST_DECLARE (fail_always) TEST_DECLARE (pass_always) +TEST_DECLARE (socket_buffer_size) TEST_DECLARE (spawn_fails) TEST_DECLARE (spawn_exit_code) TEST_DECLARE (spawn_stdout) @@ -183,6 +204,7 @@ TEST_DECLARE (spawn_stdout_and_stderr_to_file) TEST_DECLARE (spawn_auto_unref) TEST_DECLARE (spawn_closed_process_io) +TEST_DECLARE (spawn_reads_child_path) TEST_DECLARE (fs_poll) TEST_DECLARE (fs_poll_getpath) TEST_DECLARE (kill) @@ -194,7 +216,9 @@ TEST_DECLARE (fs_file_write_null_buffer) TEST_DECLARE (fs_async_dir) TEST_DECLARE (fs_async_sendfile) +TEST_DECLARE (fs_mkdtemp) TEST_DECLARE (fs_fstat) +TEST_DECLARE (fs_access) TEST_DECLARE (fs_chmod) TEST_DECLARE (fs_chown) TEST_DECLARE (fs_link) @@ -218,14 +242,16 @@ TEST_DECLARE (fs_event_start_and_close) TEST_DECLARE (fs_event_error_reporting) TEST_DECLARE (fs_event_getpath) -TEST_DECLARE (fs_readdir_empty_dir) -TEST_DECLARE (fs_readdir_file) +TEST_DECLARE (fs_scandir_empty_dir) +TEST_DECLARE (fs_scandir_file) TEST_DECLARE (fs_open_dir) TEST_DECLARE (fs_rename_to_existing_file) +TEST_DECLARE (fs_write_multiple_bufs) TEST_DECLARE (threadpool_queue_work_simple) TEST_DECLARE (threadpool_queue_work_einval) TEST_DECLARE (threadpool_multiple_event_loops) TEST_DECLARE (threadpool_cancel_getaddrinfo) +TEST_DECLARE (threadpool_cancel_getnameinfo) TEST_DECLARE (threadpool_cancel_work) TEST_DECLARE (threadpool_cancel_fs) TEST_DECLARE (threadpool_cancel_single) @@ -233,13 +259,17 @@ TEST_DECLARE (thread_mutex) TEST_DECLARE (thread_rwlock) TEST_DECLARE (thread_create) +TEST_DECLARE (thread_equal) TEST_DECLARE (dlerror) TEST_DECLARE (poll_duplex) TEST_DECLARE (poll_unidirectional) TEST_DECLARE (poll_close) + TEST_DECLARE (ip4_addr) TEST_DECLARE (ip6_addr_link_local) + #ifdef _WIN32 +TEST_DECLARE (poll_closesocket) TEST_DECLARE (spawn_detect_pipe_name_collisions_on_windows) TEST_DECLARE (argument_escaping) TEST_DECLARE (environment_creation) @@ -247,6 +277,7 @@ TEST_DECLARE (listen_no_simultaneous_accepts) TEST_DECLARE (fs_stat_root) TEST_DECLARE (spawn_with_an_odd_path) +TEST_DECLARE (ipc_listen_after_bind_twice) #else TEST_DECLARE (emfile) TEST_DECLARE (close_fd) @@ -259,6 +290,7 @@ #endif #ifdef __APPLE__ TEST_DECLARE (osx_select) +TEST_DECLARE (osx_select_many_fds) #endif HELPER_DECLARE (tcp4_echo_server) HELPER_DECLARE (tcp6_echo_server) @@ -279,6 +311,8 @@ TEST_ENTRY (loop_close) TEST_ENTRY (loop_stop) TEST_ENTRY (loop_update_time) + TEST_ENTRY (loop_backend_timeout) + TEST_ENTRY (default_loop_close) TEST_ENTRY (barrier_1) TEST_ENTRY (barrier_2) TEST_ENTRY (barrier_3) @@ -295,6 +329,9 @@ TEST_ENTRY (pipe_connect_to_file) TEST_ENTRY (pipe_server_close) +#ifndef _WIN32 + TEST_ENTRY (pipe_close_stdout_read_stdin) +#endif TEST_ENTRY (tty) TEST_ENTRY (stdio_over_pipes) TEST_ENTRY (ip6_pton) @@ -318,11 +355,17 @@ TEST_ENTRY (delayed_accept) TEST_ENTRY (multiple_listen) +#ifndef _WIN32 + TEST_ENTRY (tcp_write_after_connect) +#endif + TEST_ENTRY (tcp_writealot) TEST_HELPER (tcp_writealot, tcp4_echo_server) TEST_ENTRY (tcp_try_write) + TEST_ENTRY (tcp_write_queue_order) + TEST_ENTRY (tcp_open) TEST_HELPER (tcp_open, tcp4_echo_server) @@ -342,7 +385,9 @@ TEST_ENTRY (tcp_connect_timeout) TEST_ENTRY (tcp_close_while_connecting) TEST_ENTRY (tcp_close) +#ifndef _WIN32 TEST_ENTRY (tcp_close_accept) +#endif TEST_ENTRY (tcp_flags) TEST_ENTRY (tcp_write_to_half_open_connection) TEST_ENTRY (tcp_unexpected_read) @@ -359,15 +404,19 @@ TEST_ENTRY (udp_bind) TEST_ENTRY (udp_bind_reuseaddr) TEST_ENTRY (udp_send_and_recv) + TEST_ENTRY (udp_send_immediate) + TEST_ENTRY (udp_send_unreachable) TEST_ENTRY (udp_dgram_too_big) TEST_ENTRY (udp_dual_stack) TEST_ENTRY (udp_ipv6_only) TEST_ENTRY (udp_options) + TEST_ENTRY (udp_no_autobind) TEST_ENTRY (udp_multicast_interface) TEST_ENTRY (udp_multicast_interface6) TEST_ENTRY (udp_multicast_join) TEST_ENTRY (udp_multicast_join6) TEST_ENTRY (udp_multicast_ttl) + TEST_ENTRY (udp_try_send) TEST_ENTRY (udp_open) TEST_HELPER (udp_open, udp4_echo_server) @@ -378,6 +427,7 @@ TEST_ENTRY (pipe_listen_without_bind) TEST_ENTRY (pipe_getsockname) TEST_ENTRY (pipe_getsockname_abstract) + TEST_ENTRY (pipe_getsockname_blocking) TEST_ENTRY (pipe_sendmsg) TEST_ENTRY (connection_fail) @@ -408,6 +458,7 @@ TEST_ENTRY (timer_huge_repeat) TEST_ENTRY (timer_run_once) TEST_ENTRY (timer_from_check) + TEST_ENTRY (timer_null_callback) TEST_ENTRY (idle_starvation) @@ -463,6 +514,8 @@ TEST_ENTRY (get_loadavg) + TEST_ENTRY (handle_fileno) + TEST_ENTRY (hrtime) TEST_ENTRY_CUSTOM (getaddrinfo_fail, 0, 0, 10000) @@ -470,6 +523,9 @@ TEST_ENTRY (getaddrinfo_basic) TEST_ENTRY (getaddrinfo_concurrent) + TEST_ENTRY (getnameinfo_basic_ip4) + TEST_ENTRY (getnameinfo_basic_ip6) + TEST_ENTRY (getsockname_tcp) TEST_ENTRY (getsockname_udp) @@ -477,6 +533,8 @@ TEST_ENTRY (poll_unidirectional) TEST_ENTRY (poll_close) + TEST_ENTRY (socket_buffer_size) + TEST_ENTRY (spawn_fails) TEST_ENTRY (spawn_exit_code) TEST_ENTRY (spawn_stdout) @@ -494,11 +552,13 @@ TEST_ENTRY (spawn_stdout_and_stderr_to_file) TEST_ENTRY (spawn_auto_unref) TEST_ENTRY (spawn_closed_process_io) + TEST_ENTRY (spawn_reads_child_path) TEST_ENTRY (fs_poll) TEST_ENTRY (fs_poll_getpath) TEST_ENTRY (kill) #ifdef _WIN32 + TEST_ENTRY (poll_closesocket) TEST_ENTRY (spawn_detect_pipe_name_collisions_on_windows) TEST_ENTRY (argument_escaping) TEST_ENTRY (environment_creation) @@ -506,6 +566,7 @@ TEST_ENTRY (listen_no_simultaneous_accepts) TEST_ENTRY (fs_stat_root) TEST_ENTRY (spawn_with_an_odd_path) + TEST_ENTRY (ipc_listen_after_bind_twice) #else TEST_ENTRY (emfile) TEST_ENTRY (close_fd) @@ -519,6 +580,7 @@ #ifdef __APPLE__ TEST_ENTRY (osx_select) + TEST_ENTRY (osx_select_many_fds) #endif TEST_ENTRY (fs_file_noent) @@ -529,7 +591,9 @@ TEST_ENTRY (fs_file_write_null_buffer) TEST_ENTRY (fs_async_dir) TEST_ENTRY (fs_async_sendfile) + TEST_ENTRY (fs_mkdtemp) TEST_ENTRY (fs_fstat) + TEST_ENTRY (fs_access) TEST_ENTRY (fs_chmod) TEST_ENTRY (fs_chown) TEST_ENTRY (fs_utime) @@ -552,14 +616,16 @@ TEST_ENTRY (fs_event_start_and_close) TEST_ENTRY (fs_event_error_reporting) TEST_ENTRY (fs_event_getpath) - TEST_ENTRY (fs_readdir_empty_dir) - TEST_ENTRY (fs_readdir_file) + TEST_ENTRY (fs_scandir_empty_dir) + TEST_ENTRY (fs_scandir_file) TEST_ENTRY (fs_open_dir) TEST_ENTRY (fs_rename_to_existing_file) + TEST_ENTRY (fs_write_multiple_bufs) TEST_ENTRY (threadpool_queue_work_simple) TEST_ENTRY (threadpool_queue_work_einval) TEST_ENTRY (threadpool_multiple_event_loops) TEST_ENTRY (threadpool_cancel_getaddrinfo) + TEST_ENTRY (threadpool_cancel_getnameinfo) TEST_ENTRY (threadpool_cancel_work) TEST_ENTRY (threadpool_cancel_fs) TEST_ENTRY (threadpool_cancel_single) @@ -567,6 +633,7 @@ TEST_ENTRY (thread_mutex) TEST_ENTRY (thread_rwlock) TEST_ENTRY (thread_create) + TEST_ENTRY (thread_equal) TEST_ENTRY (dlerror) TEST_ENTRY (ip4_addr) TEST_ENTRY (ip6_addr_link_local) diff -Nru nodejs-0.11.13/deps/uv/test/test-loop-time.c nodejs-0.11.15/deps/uv/test/test-loop-time.c --- nodejs-0.11.13/deps/uv/test/test-loop-time.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-loop-time.c 2015-01-20 21:22:17.000000000 +0000 @@ -30,5 +30,34 @@ while (uv_now(uv_default_loop()) - start < 1000) ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_NOWAIT)); + MAKE_VALGRIND_HAPPY(); + return 0; +} + +static void cb(uv_timer_t* timer) { + uv_close((uv_handle_t*)timer, NULL); +} + +TEST_IMPL(loop_backend_timeout) { + uv_loop_t *loop = uv_default_loop(); + uv_timer_t timer; + int r; + + r = uv_timer_init(loop, &timer); + ASSERT(r == 0); + + ASSERT(!uv_loop_alive(loop)); + ASSERT(uv_backend_timeout(loop) == 0); + + r = uv_timer_start(&timer, cb, 1000, 0); /* 1 sec */ + ASSERT(r == 0); + ASSERT(uv_backend_timeout(loop) > 100); /* 0.1 sec */ + ASSERT(uv_backend_timeout(loop) <= 1000); /* 1 sec */ + + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT(r == 0); + ASSERT(uv_backend_timeout(loop) == 0); + + MAKE_VALGRIND_HAPPY(); return 0; } diff -Nru nodejs-0.11.13/deps/uv/test/test-osx-select.c nodejs-0.11.15/deps/uv/test/test-osx-select.c --- nodejs-0.11.13/deps/uv/test/test-osx-select.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-osx-select.c 2015-01-20 21:22:17.000000000 +0000 @@ -79,4 +79,54 @@ return 0; } + +TEST_IMPL(osx_select_many_fds) { + int r; + int fd; + size_t i; + size_t len; + const char* str; + struct sockaddr_in addr; + uv_tty_t tty; + uv_tcp_t tcps[1500]; + + TEST_FILE_LIMIT(ARRAY_SIZE(tcps) + 2); + + r = uv_ip4_addr("127.0.0.1", 0, &addr); + ASSERT(r == 0); + + for (i = 0; i < ARRAY_SIZE(tcps); i++) { + r = uv_tcp_init(uv_default_loop(), &tcps[i]); + ASSERT(r == 0); + r = uv_tcp_bind(&tcps[i], (const struct sockaddr *) &addr, 0); + ASSERT(r == 0); + uv_unref((uv_handle_t*) &tcps[i]); + } + + fd = open("/dev/tty", O_RDONLY); + ASSERT(fd >= 0); + + r = uv_tty_init(uv_default_loop(), &tty, fd, 1); + ASSERT(r == 0); + + r = uv_read_start((uv_stream_t*) &tty, alloc_cb, read_cb); + ASSERT(r == 0); + + /* Emulate user-input */ + str = "got some input\n" + "with a couple of lines\n" + "feel pretty happy\n"; + for (i = 0, len = strlen(str); i < len; i++) { + r = ioctl(fd, TIOCSTI, str + i); + ASSERT(r == 0); + } + + uv_run(uv_default_loop(), UV_RUN_DEFAULT); + + ASSERT(read_count == 3); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + #endif /* __APPLE__ */ diff -Nru nodejs-0.11.13/deps/uv/test/test-pipe-close-stdout-read-stdin.c nodejs-0.11.15/deps/uv/test/test-pipe-close-stdout-read-stdin.c --- nodejs-0.11.13/deps/uv/test/test-pipe-close-stdout-read-stdin.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-pipe-close-stdout-read-stdin.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,105 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _WIN32 + +#include <stdlib.h> +#include <unistd.h> +#include <sys/wait.h> +#include <sys/types.h> + +#include "uv.h" +#include "task.h" + +void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t* buf) +{ + static char buffer[1024]; + + buf->base = buffer; + buf->len = sizeof(buffer); +} + +void read_stdin(uv_stream_t *stream, ssize_t nread, const uv_buf_t* buf) +{ + if (nread < 0) { + uv_close((uv_handle_t*)stream, NULL); + return; + } +} + +/* + * This test is a reproduction of joyent/libuv#1419 . + */ +TEST_IMPL(pipe_close_stdout_read_stdin) { + int r = -1; + int pid; + int fd[2]; + int status; + + r = pipe(fd); + ASSERT(r == 0); + + if ((pid = fork()) == 0) { + /* + * Make the read side of the pipe our stdin. + * The write side will be closed by the parent process. + */ + close(fd[1]); + close(0); + r = dup(fd[0]); + ASSERT(r != -1); + + /* Create a stream that reads from the pipe. */ + uv_pipe_t stdin_pipe; + + r = uv_pipe_init(uv_default_loop(), (uv_pipe_t *)&stdin_pipe, 0); + ASSERT(r == 0); + + r = uv_pipe_open((uv_pipe_t *)&stdin_pipe, 0); + ASSERT(r == 0); + + r = uv_read_start((uv_stream_t *)&stdin_pipe, alloc_buffer, read_stdin); + ASSERT(r == 0); + + /* + * Because the other end of the pipe was closed, there should + * be no event left to process after one run of the event loop. + * Otherwise, it means that events were not processed correctly. + */ + ASSERT(uv_run(uv_default_loop(), UV_RUN_NOWAIT) == 0); + } else { + /* + * Close both ends of the pipe so that the child + * get a POLLHUP event when it tries to read from + * the other end. + */ + close(fd[1]); + close(fd[0]); + + waitpid(pid, &status, 0); + ASSERT(WIFEXITED(status) && WEXITSTATUS(status) == 0); + } + + MAKE_VALGRIND_HAPPY(); + return 0; +} + +#endif /* ifndef _WIN32 */ diff -Nru nodejs-0.11.13/deps/uv/test/test-pipe-getsockname.c nodejs-0.11.15/deps/uv/test/test-pipe-getsockname.c --- nodejs-0.11.13/deps/uv/test/test-pipe-getsockname.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-pipe-getsockname.c 2015-01-20 21:22:17.000000000 +0000 @@ -32,6 +32,8 @@ #ifndef _WIN32 # include <unistd.h> /* close */ +#else +# include <fcntl.h> #endif @@ -120,3 +122,59 @@ #endif } +TEST_IMPL(pipe_getsockname_blocking) { +#ifdef _WIN32 + uv_pipe_t reader; + HANDLE readh, writeh; + int readfd; + char buf1[1024], buf2[1024]; + size_t len1, len2; + int r; + + r = CreatePipe(&readh, &writeh, NULL, 65536); + ASSERT(r != 0); + + r = uv_pipe_init(uv_default_loop(), &reader, 0); + ASSERT(r == 0); + readfd = _open_osfhandle((intptr_t)readh, _O_RDONLY); + ASSERT(r != -1); + r = uv_pipe_open(&reader, readfd); + ASSERT(r == 0); + r = uv_read_start((uv_stream_t*)&reader, NULL, NULL); + ASSERT(r == 0); + Sleep(100); + r = uv_read_stop((uv_stream_t*)&reader); + ASSERT(r == 0); + + len1 = sizeof buf1; + r = uv_pipe_getsockname(&reader, buf1, &len1); + ASSERT(r == 0); + + r = uv_read_start((uv_stream_t*)&reader, NULL, NULL); + ASSERT(r == 0); + Sleep(100); + + len2 = sizeof buf2; + r = uv_pipe_getsockname(&reader, buf2, &len2); + ASSERT(r == 0); + + r = uv_read_stop((uv_stream_t*)&reader); + ASSERT(r == 0); + + ASSERT(len1 == len2); + ASSERT(memcmp(buf1, buf2, len1) == 0); + + close_cb_called = 0; + uv_close((uv_handle_t*)&reader, close_cb); + + uv_run(uv_default_loop(), UV_RUN_DEFAULT); + + ASSERT(close_cb_called == 1); + + _close(readfd); + CloseHandle(writeh); +#endif + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-poll-closesocket.c nodejs-0.11.15/deps/uv/test/test-poll-closesocket.c --- nodejs-0.11.13/deps/uv/test/test-poll-closesocket.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-poll-closesocket.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,88 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef _WIN32 + +#include <errno.h> + +#include "uv.h" +#include "task.h" + +uv_os_sock_t sock; +uv_poll_t handle; + +static int close_cb_called = 0; + + +static void close_cb(uv_handle_t* h) { + close_cb_called++; +} + + +static void poll_cb(uv_poll_t* h, int status, int events) { + int r; + + ASSERT(status == 0); + ASSERT(h == &handle); + + r = uv_poll_start(&handle, UV_READABLE, poll_cb); + ASSERT(r == 0); + + closesocket(sock); + uv_close((uv_handle_t*) &handle, close_cb); + +} + + +TEST_IMPL(poll_closesocket) { + struct WSAData wsa_data; + int r; + unsigned long on; + struct sockaddr_in addr; + + r = WSAStartup(MAKEWORD(2, 2), &wsa_data); + ASSERT(r == 0); + + sock = socket(AF_INET, SOCK_STREAM, 0); + ASSERT(sock != INVALID_SOCKET); + on = 1; + r = ioctlsocket(sock, FIONBIO, &on); + ASSERT(r == 0); + + addr = uv_ip4_addr("127.0.0.1", TEST_PORT); + + r = connect(sock, (const struct sockaddr*) &addr, sizeof addr); + ASSERT(r != 0); + ASSERT(WSAGetLastError() == WSAEWOULDBLOCK); + + r = uv_poll_init_socket(uv_default_loop(), &handle, sock); + ASSERT(r == 0); + r = uv_poll_start(&handle, UV_WRITABLE, poll_cb); + ASSERT(r == 0); + + uv_run(uv_default_loop(), UV_RUN_DEFAULT); + + ASSERT(close_cb_called == 1); + + MAKE_VALGRIND_HAPPY(); + return 0; +} +#endif diff -Nru nodejs-0.11.13/deps/uv/test/test-socket-buffer-size.c nodejs-0.11.15/deps/uv/test/test-socket-buffer-size.c --- nodejs-0.11.13/deps/uv/test/test-socket-buffer-size.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-socket-buffer-size.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,77 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +static uv_udp_t udp; +static uv_tcp_t tcp; +static int close_cb_called; + + +static void close_cb(uv_handle_t* handle) { + close_cb_called++; +} + + +static void check_buffer_size(uv_handle_t* handle) { + int value; + + value = 0; + ASSERT(0 == uv_recv_buffer_size(handle, &value)); + ASSERT(value > 0); + + value = 10000; + ASSERT(0 == uv_recv_buffer_size(handle, &value)); + + value = 0; + ASSERT(0 == uv_recv_buffer_size(handle, &value)); + /* linux sets double the value */ + ASSERT(value == 10000 || value == 20000); +} + + +TEST_IMPL(socket_buffer_size) { + struct sockaddr_in addr; + + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + ASSERT(0 == uv_tcp_init(uv_default_loop(), &tcp)); + ASSERT(0 == uv_tcp_bind(&tcp, (struct sockaddr*) &addr, 0)); + check_buffer_size((uv_handle_t*) &tcp); + uv_close((uv_handle_t*) &tcp, close_cb); + + ASSERT(0 == uv_udp_init(uv_default_loop(), &udp)); + ASSERT(0 == uv_udp_bind(&udp, (struct sockaddr*) &addr, 0)); + check_buffer_size((uv_handle_t*) &udp); + uv_close((uv_handle_t*) &udp, close_cb); + + ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + ASSERT(close_cb_called == 2); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-spawn.c nodejs-0.11.15/deps/uv/test/test-spawn.c --- nodejs-0.11.13/deps/uv/test/test-spawn.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-spawn.c 2015-01-20 21:22:17.000000000 +0000 @@ -31,6 +31,7 @@ # include <basetyps.h> # endif # include <shellapi.h> +# include <wchar.h> #else # include <unistd.h> #endif @@ -808,6 +809,7 @@ TEST_IMPL(argument_escaping) { const WCHAR* test_str[] = { + L"", L"HelloWorld", L"Hello World", L"Hello\"World", @@ -896,45 +898,110 @@ "SYSTEM=ROOT", /* substring of a supplied var name */ "SYSTEMROOTED=OMG", /* supplied var name is a substring */ "TEMP=C:\\Temp", + "INVALID", "BAZ=QUX", + "B_Z=QUX", + "B\xe2\x82\xacZ=QUX", + "B\xf0\x90\x80\x82Z=QUX", + "B\xef\xbd\xa1Z=QUX", + "B\xf0\xa3\x91\x96Z=QUX", + "BAZ", /* repeat, invalid variable */ NULL }; - - WCHAR expected[512]; - WCHAR* ptr = expected; + WCHAR* wenvironment[] = { + L"BAZ=QUX", + L"B_Z=QUX", + L"B\x20acZ=QUX", + L"B\xd800\xdc02Z=QUX", + L"B\xd84d\xdc56Z=QUX", + L"B\xff61Z=QUX", + L"FOO=BAR", + L"SYSTEM=ROOT", /* substring of a supplied var name */ + L"SYSTEMROOTED=OMG", /* supplied var name is a substring */ + L"TEMP=C:\\Temp", + }; + WCHAR* from_env[] = { + /* list should be kept in sync with list + * in process.c, minus variables in wenvironment */ + L"HOMEDRIVE", + L"HOMEPATH", + L"LOGONSERVER", + L"PATH", + L"USERDOMAIN", + L"USERNAME", + L"USERPROFILE", + L"SYSTEMDRIVE", + L"SYSTEMROOT", + L"WINDIR", + /* test for behavior in the absence of a + * required-environment variable: */ + L"ZTHIS_ENV_VARIABLE_DOES_NOT_EXIST", + }; + int found_in_loc_env[ARRAY_SIZE(wenvironment)] = {0}; + int found_in_usr_env[ARRAY_SIZE(from_env)] = {0}; + WCHAR *expected[ARRAY_SIZE(from_env)]; int result; WCHAR* str; + WCHAR* prev; WCHAR* env; - for (i = 0; i < sizeof(environment) / sizeof(environment[0]) - 1; i++) { - ptr += uv_utf8_to_utf16(environment[i], - ptr, - expected + sizeof(expected) - ptr); + for (i = 0; i < ARRAY_SIZE(from_env); i++) { + /* copy expected additions to environment locally */ + size_t len = GetEnvironmentVariableW(from_env[i], NULL, 0); + if (len == 0) { + found_in_usr_env[i] = 1; + str = malloc(1 * sizeof(WCHAR)); + *str = 0; + expected[i] = str; + } else { + size_t name_len = wcslen(from_env[i]); + str = malloc((name_len+1+len) * sizeof(WCHAR)); + wmemcpy(str, from_env[i], name_len); + expected[i] = str; + str += name_len; + *str++ = L'='; + GetEnvironmentVariableW(from_env[i], str, len); + } } - memcpy(ptr, L"SYSTEMROOT=", sizeof(L"SYSTEMROOT=")); - ptr += sizeof(L"SYSTEMROOT=")/sizeof(WCHAR) - 1; - ptr += GetEnvironmentVariableW(L"SYSTEMROOT", - ptr, - expected + sizeof(expected) - ptr); - ++ptr; - - memcpy(ptr, L"SYSTEMDRIVE=", sizeof(L"SYSTEMDRIVE=")); - ptr += sizeof(L"SYSTEMDRIVE=")/sizeof(WCHAR) - 1; - ptr += GetEnvironmentVariableW(L"SYSTEMDRIVE", - ptr, - expected + sizeof(expected) - ptr); - ++ptr; - *ptr = '\0'; - result = make_program_env(environment, &env); ASSERT(result == 0); - for (str = env; *str; str += wcslen(str) + 1) { - wprintf(L"%s\n", str); + for (str = env, prev = NULL; *str; prev = str, str += wcslen(str) + 1) { + int found = 0; +#if 0 + _cputws(str); + putchar('\n'); +#endif + for (i = 0; i < ARRAY_SIZE(wenvironment) && !found; i++) { + if (!wcscmp(str, wenvironment[i])) { + ASSERT(!found_in_loc_env[i]); + found_in_loc_env[i] = 1; + found = 1; + } + } + for (i = 0; i < ARRAY_SIZE(expected) && !found; i++) { + if (!wcscmp(str, expected[i])) { + ASSERT(!found_in_usr_env[i]); + found_in_usr_env[i] = 1; + found = 1; + } + } + if (prev) { /* verify sort order -- requires Vista */ +#if _WIN32_WINNT >= 0x0600 + ASSERT(CompareStringOrdinal(prev, -1, str, -1, TRUE) == 1); +#endif + } + ASSERT(found); /* verify that we expected this variable */ } - ASSERT(wcscmp(expected, env) == 0); + /* verify that we found all expected variables */ + for (i = 0; i < ARRAY_SIZE(wenvironment); i++) { + ASSERT(found_in_loc_env[i]); + } + for (i = 0; i < ARRAY_SIZE(expected); i++) { + ASSERT(found_in_usr_env[i]); + } return 0; } @@ -1224,3 +1291,40 @@ return 0; } #endif /* !_WIN32 */ + +TEST_IMPL(spawn_reads_child_path) { + int r; + int len; + char file[64]; + char path[1024]; + char *env[2] = {path, NULL}; + + /* Set up the process, but make sure that the file to run is relative and */ + /* requires a lookup into PATH */ + init_process_options("spawn_helper1", exit_cb); + + /* Set up the PATH env variable */ + for (len = strlen(exepath); + exepath[len - 1] != '/' && exepath[len - 1] != '\\'; + len--); + strcpy(file, exepath + len); + exepath[len] = 0; + strcpy(path, "PATH="); + strcpy(path + 5, exepath); + + options.file = file; + options.args[0] = file; + options.env = env; + + r = uv_spawn(uv_default_loop(), &process, &options); + ASSERT(r == 0); + + r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); + ASSERT(r == 0); + + ASSERT(exit_cb_called == 1); + ASSERT(close_cb_called == 1); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-tcp-close-accept.c nodejs-0.11.15/deps/uv/test/test-tcp-close-accept.c --- nodejs-0.11.13/deps/uv/test/test-tcp-close-accept.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-tcp-close-accept.c 2015-01-20 21:22:17.000000000 +0000 @@ -19,6 +19,9 @@ * IN THE SOFTWARE. */ +/* this test is Unix only */ +#ifndef _WIN32 + #include "uv.h" #include "task.h" @@ -181,3 +184,5 @@ MAKE_VALGRIND_HAPPY(); return 0; } + +#endif /* !_WIN32 */ diff -Nru nodejs-0.11.13/deps/uv/test/test-tcp-try-write.c nodejs-0.11.15/deps/uv/test/test-tcp-try-write.c --- nodejs-0.11.13/deps/uv/test/test-tcp-try-write.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-tcp-try-write.c 2015-01-20 21:22:17.000000000 +0000 @@ -54,21 +54,19 @@ static void connect_cb(uv_connect_t* req, int status) { - static char zeroes[1024]; int r; uv_buf_t buf; ASSERT(status == 0); connect_cb_called++; do { - buf = uv_buf_init(zeroes, sizeof(zeroes)); + buf = uv_buf_init("PING", 4); r = uv_try_write((uv_stream_t*) &client, &buf, 1); - ASSERT(r >= 0); - bytes_written += r; - - /* Partial write */ - if (r != (int) sizeof(zeroes)) + ASSERT(r > 0 || r == UV_EAGAIN); + if (r > 0) { + bytes_written += r; break; + } } while (1); uv_close((uv_handle_t*) &client, close_cb); } diff -Nru nodejs-0.11.13/deps/uv/test/test-tcp-write-after-connect.c nodejs-0.11.15/deps/uv/test/test-tcp-write-after-connect.c --- nodejs-0.11.13/deps/uv/test/test-tcp-write-after-connect.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-tcp-write-after-connect.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,68 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _WIN32 + +#include "uv.h" +#include "task.h" + +uv_loop_t loop; +uv_tcp_t tcp_client; +uv_connect_t connection_request; +uv_write_t write_request; +uv_buf_t buf = { "HELLO", 4 }; + + +static void write_cb(uv_write_t *req, int status) { + ASSERT(status == UV_ECANCELED); + uv_close((uv_handle_t*) req->handle, NULL); +} + + +static void connect_cb(uv_connect_t *req, int status) { + ASSERT(status == UV_ECONNREFUSED); +} + + +TEST_IMPL(tcp_write_after_connect) { + struct sockaddr_in sa; + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &sa)); + ASSERT(0 == uv_loop_init(&loop)); + ASSERT(0 == uv_tcp_init(&loop, &tcp_client)); + + ASSERT(0 == uv_tcp_connect(&connection_request, + &tcp_client, + (const struct sockaddr *) + &sa, + connect_cb)); + + ASSERT(0 == uv_write(&write_request, + (uv_stream_t *)&tcp_client, + &buf, 1, + write_cb)); + + uv_run(&loop, UV_RUN_DEFAULT); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + +#endif diff -Nru nodejs-0.11.13/deps/uv/test/test-tcp-write-queue-order.c nodejs-0.11.15/deps/uv/test/test-tcp-write-queue-order.c --- nodejs-0.11.13/deps/uv/test/test-tcp-write-queue-order.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-tcp-write-queue-order.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,137 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "uv.h" +#include "task.h" + +#define REQ_COUNT 10000 + +static uv_timer_t timer; +static uv_tcp_t server; +static uv_tcp_t client; +static uv_tcp_t incoming; +static int connect_cb_called; +static int close_cb_called; +static int connection_cb_called; +static int write_callbacks; +static int write_cancelled_callbacks; +static int write_error_callbacks; + +static uv_write_t write_requests[REQ_COUNT]; + + +static void close_cb(uv_handle_t* handle) { + close_cb_called++; +} + +void timer_cb(uv_timer_t* handle) { + uv_close((uv_handle_t*) &client, close_cb); + uv_close((uv_handle_t*) &server, close_cb); + uv_close((uv_handle_t*) &incoming, close_cb); +} + +void write_cb(uv_write_t* req, int status) { + if (status == 0) + write_callbacks++; + else if (status == UV_ECANCELED) + write_cancelled_callbacks++; + else + write_error_callbacks++; +} + +static void connect_cb(uv_connect_t* req, int status) { + static char base[1024]; + int r; + int i; + uv_buf_t buf; + + ASSERT(status == 0); + connect_cb_called++; + + buf = uv_buf_init(base, sizeof(base)); + + for (i = 0; i < REQ_COUNT; i++) { + r = uv_write(&write_requests[i], + req->handle, + &buf, + 1, + write_cb); + ASSERT(r == 0); + } +} + + +static void connection_cb(uv_stream_t* tcp, int status) { + ASSERT(status == 0); + + ASSERT(0 == uv_tcp_init(tcp->loop, &incoming)); + ASSERT(0 == uv_accept(tcp, (uv_stream_t*) &incoming)); + + connection_cb_called++; +} + + +static void start_server(void) { + struct sockaddr_in addr; + + ASSERT(0 == uv_ip4_addr("0.0.0.0", TEST_PORT, &addr)); + + ASSERT(0 == uv_tcp_init(uv_default_loop(), &server)); + ASSERT(0 == uv_tcp_bind(&server, (struct sockaddr*) &addr, 0)); + ASSERT(0 == uv_listen((uv_stream_t*) &server, 128, connection_cb)); +} + + +TEST_IMPL(tcp_write_queue_order) { + uv_connect_t connect_req; + struct sockaddr_in addr; + + start_server(); + + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + ASSERT(0 == uv_tcp_init(uv_default_loop(), &client)); + ASSERT(0 == uv_tcp_connect(&connect_req, + &client, + (struct sockaddr*) &addr, + connect_cb)); + + ASSERT(0 == uv_timer_init(uv_default_loop(), &timer)); + ASSERT(0 == uv_timer_start(&timer, timer_cb, 100, 0)); + + ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + ASSERT(connect_cb_called == 1); + ASSERT(connection_cb_called == 1); + ASSERT(write_callbacks > 0); + ASSERT(write_cancelled_callbacks > 0); + ASSERT(write_callbacks + + write_error_callbacks + + write_cancelled_callbacks == REQ_COUNT); + ASSERT(close_cb_called == 3); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-thread-equal.c nodejs-0.11.15/deps/uv/test/test-thread-equal.c --- nodejs-0.11.13/deps/uv/test/test-thread-equal.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-thread-equal.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,45 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + +uv_thread_t main_thread_id; +uv_thread_t subthreads[2]; + +static void check_thread(void* arg) { + uv_thread_t *thread_id = arg; + uv_thread_t self_id = uv_thread_self(); + ASSERT(uv_thread_equal(&main_thread_id, &self_id) == 0); + *thread_id = uv_thread_self(); +} + +TEST_IMPL(thread_equal) { + uv_thread_t threads[2]; + main_thread_id = uv_thread_self(); + ASSERT(0 != uv_thread_equal(&main_thread_id, &main_thread_id)); + ASSERT(0 == uv_thread_create(threads + 0, check_thread, subthreads + 0)); + ASSERT(0 == uv_thread_create(threads + 1, check_thread, subthreads + 1)); + ASSERT(0 == uv_thread_join(threads + 0)); + ASSERT(0 == uv_thread_join(threads + 1)); + ASSERT(0 == uv_thread_equal(subthreads + 0, subthreads + 1)); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-threadpool-cancel.c nodejs-0.11.15/deps/uv/test/test-threadpool-cancel.c --- nodejs-0.11.13/deps/uv/test/test-threadpool-cancel.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-threadpool-cancel.c 2015-01-20 21:22:17.000000000 +0000 @@ -46,7 +46,6 @@ static unsigned done_cb_called; static unsigned done2_cb_called; static unsigned timer_cb_called; -static unsigned getaddrinfo_cb_called; static void work_cb(uv_work_t* req) { @@ -125,7 +124,16 @@ ASSERT(status == UV_EAI_CANCELED); ASSERT(res == NULL); uv_freeaddrinfo(res); /* Should not crash. */ - getaddrinfo_cb_called++; +} + + +static void getnameinfo_cb(uv_getnameinfo_t* handle, + int status, + const char* hostname, + const char* service) { + ASSERT(status == UV_EAI_CANCELED); + ASSERT(hostname == NULL); + ASSERT(service == NULL); } @@ -202,6 +210,44 @@ } +TEST_IMPL(threadpool_cancel_getnameinfo) { + uv_getnameinfo_t reqs[4]; + struct sockaddr_in addr4; + struct cancel_info ci; + uv_loop_t* loop; + int r; + + r = uv_ip4_addr("127.0.0.1", 80, &addr4); + ASSERT(r == 0); + + INIT_CANCEL_INFO(&ci, reqs); + loop = uv_default_loop(); + saturate_threadpool(); + + r = uv_getnameinfo(loop, reqs + 0, getnameinfo_cb, (const struct sockaddr*)&addr4, 0); + ASSERT(r == 0); + + r = uv_getnameinfo(loop, reqs + 1, getnameinfo_cb, (const struct sockaddr*)&addr4, 0); + ASSERT(r == 0); + + r = uv_getnameinfo(loop, reqs + 2, getnameinfo_cb, (const struct sockaddr*)&addr4, 0); + ASSERT(r == 0); + + r = uv_getnameinfo(loop, reqs + 3, getnameinfo_cb, (const struct sockaddr*)&addr4, 0); + ASSERT(r == 0); + + ASSERT(0 == uv_timer_init(loop, &ci.timer_handle)); + ASSERT(0 == uv_timer_start(&ci.timer_handle, timer_cb, 10, 0)); + ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); + ASSERT(1 == timer_cb_called); + + cleanup_threadpool(); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + + TEST_IMPL(threadpool_cancel_work) { struct cancel_info ci; uv_work_t reqs[16]; @@ -255,7 +301,7 @@ ASSERT(0 == uv_fs_mkdir(loop, reqs + n++, "/", 0, fs_cb)); ASSERT(0 == uv_fs_open(loop, reqs + n++, "/", 0, 0, fs_cb)); ASSERT(0 == uv_fs_read(loop, reqs + n++, 0, NULL, 0, 0, fs_cb)); - ASSERT(0 == uv_fs_readdir(loop, reqs + n++, "/", 0, fs_cb)); + ASSERT(0 == uv_fs_scandir(loop, reqs + n++, "/", 0, fs_cb)); ASSERT(0 == uv_fs_readlink(loop, reqs + n++, "/", fs_cb)); ASSERT(0 == uv_fs_rename(loop, reqs + n++, "/", "/", fs_cb)); ASSERT(0 == uv_fs_mkdir(loop, reqs + n++, "/", 0, fs_cb)); diff -Nru nodejs-0.11.13/deps/uv/test/test-timer.c nodejs-0.11.15/deps/uv/test/test-timer.c --- nodejs-0.11.13/deps/uv/test/test-timer.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-timer.c 2015-01-20 21:22:17.000000000 +0000 @@ -290,3 +290,14 @@ MAKE_VALGRIND_HAPPY(); return 0; } + + +TEST_IMPL(timer_null_callback) { + uv_timer_t handle; + + ASSERT(0 == uv_timer_init(uv_default_loop(), &handle)); + ASSERT(UV_EINVAL == uv_timer_start(&handle, NULL, 100, 100)); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-tty.c nodejs-0.11.15/deps/uv/test/test-tty.c --- nodejs-0.11.13/deps/uv/test/test-tty.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-tty.c 2015-01-20 21:22:17.000000000 +0000 @@ -96,6 +96,13 @@ printf("width=%d height=%d\n", width, height); + if (width == 0 && height == 0) { + /* Some environments such as containers or Jenkins behave like this + * sometimes */ + MAKE_VALGRIND_HAPPY(); + return TEST_SKIP; + } + /* * Is it a safe assumption that most people have terminals larger than * 10x10? diff -Nru nodejs-0.11.13/deps/uv/test/test-udp-ipv6.c nodejs-0.11.15/deps/uv/test/test-udp-ipv6.c --- nodejs-0.11.13/deps/uv/test/test-udp-ipv6.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-udp-ipv6.c 2015-01-20 21:22:17.000000000 +0000 @@ -147,12 +147,19 @@ TEST_IMPL(udp_dual_stack) { +#if defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__OpenBSD__) || \ + defined(__NetBSD__) + RETURN_SKIP("dual stack not enabled by default in this OS."); +#else do_test(ipv6_recv_ok, 0); ASSERT(recv_cb_called == 1); ASSERT(send_cb_called == 1); return 0; +#endif } diff -Nru nodejs-0.11.13/deps/uv/test/test-udp-multicast-interface6.c nodejs-0.11.15/deps/uv/test/test-udp-multicast-interface6.c --- nodejs-0.11.13/deps/uv/test/test-udp-multicast-interface6.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-udp-multicast-interface6.c 2015-01-20 21:22:17.000000000 +0000 @@ -69,7 +69,7 @@ r = uv_udp_bind(&server, (const struct sockaddr*)&baddr, 0); ASSERT(r == 0); -#if defined(__APPLE__) +#if defined(__APPLE__) || defined(__FreeBSD__) r = uv_udp_set_multicast_interface(&server, "::1%lo0"); #else r = uv_udp_set_multicast_interface(&server, NULL); diff -Nru nodejs-0.11.13/deps/uv/test/test-udp-multicast-interface.c nodejs-0.11.15/deps/uv/test/test-udp-multicast-interface.c --- nodejs-0.11.13/deps/uv/test/test-udp-multicast-interface.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-udp-multicast-interface.c 2015-01-20 21:22:17.000000000 +0000 @@ -91,6 +91,9 @@ ASSERT(sv_send_cb_called == 1); ASSERT(close_cb_called == 1); + ASSERT(client.send_queue_size == 0); + ASSERT(server.send_queue_size == 0); + MAKE_VALGRIND_HAPPY(); return 0; } diff -Nru nodejs-0.11.13/deps/uv/test/test-udp-open.c nodejs-0.11.15/deps/uv/test/test-udp-open.c --- nodejs-0.11.13/deps/uv/test/test-udp-open.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-udp-open.c 2015-01-20 21:22:17.000000000 +0000 @@ -159,6 +159,8 @@ ASSERT(send_cb_called == 1); ASSERT(close_cb_called == 1); + ASSERT(client.send_queue_size == 0); + MAKE_VALGRIND_HAPPY(); return 0; } diff -Nru nodejs-0.11.13/deps/uv/test/test-udp-options.c nodejs-0.11.15/deps/uv/test/test-udp-options.c --- nodejs-0.11.13/deps/uv/test/test-udp-options.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-udp-options.c 2015-01-20 21:22:17.000000000 +0000 @@ -86,3 +86,25 @@ MAKE_VALGRIND_HAPPY(); return 0; } + + +TEST_IMPL(udp_no_autobind) { + uv_loop_t* loop; + uv_udp_t h; + + loop = uv_default_loop(); + + ASSERT(0 == uv_udp_init(loop, &h)); + ASSERT(UV_EBADF == uv_udp_set_multicast_ttl(&h, 32)); + ASSERT(UV_EBADF == uv_udp_set_broadcast(&h, 1)); + ASSERT(UV_EBADF == uv_udp_set_ttl(&h, 1)); + ASSERT(UV_EBADF == uv_udp_set_multicast_loop(&h, 1)); + ASSERT(UV_EBADF == uv_udp_set_multicast_interface(&h, "0.0.0.0")); + + uv_close((uv_handle_t*) &h, NULL); + + ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-udp-send-and-recv.c nodejs-0.11.15/deps/uv/test/test-udp-send-and-recv.c --- nodejs-0.11.13/deps/uv/test/test-udp-send-and-recv.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-udp-send-and-recv.c 2015-01-20 21:22:17.000000000 +0000 @@ -206,6 +206,9 @@ ASSERT(sv_recv_cb_called == 1); ASSERT(close_cb_called == 2); + ASSERT(client.send_queue_size == 0); + ASSERT(server.send_queue_size == 0); + MAKE_VALGRIND_HAPPY(); return 0; } diff -Nru nodejs-0.11.13/deps/uv/test/test-udp-send-immediate.c nodejs-0.11.15/deps/uv/test/test-udp-send-immediate.c --- nodejs-0.11.13/deps/uv/test/test-udp-send-immediate.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-udp-send-immediate.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,148 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#define CHECK_HANDLE(handle) \ + ASSERT((uv_udp_t*)(handle) == &server || (uv_udp_t*)(handle) == &client) + +static uv_udp_t server; +static uv_udp_t client; + +static int cl_send_cb_called; +static int sv_recv_cb_called; +static int close_cb_called; + + +static void alloc_cb(uv_handle_t* handle, + size_t suggested_size, + uv_buf_t* buf) { + static char slab[65536]; + CHECK_HANDLE(handle); + ASSERT(suggested_size <= sizeof(slab)); + buf->base = slab; + buf->len = sizeof(slab); +} + + +static void close_cb(uv_handle_t* handle) { + CHECK_HANDLE(handle); + ASSERT(1 == uv_is_closing(handle)); + close_cb_called++; +} + + +static void cl_send_cb(uv_udp_send_t* req, int status) { + ASSERT(req != NULL); + ASSERT(status == 0); + CHECK_HANDLE(req->handle); + + cl_send_cb_called++; +} + + +static void sv_recv_cb(uv_udp_t* handle, + ssize_t nread, + const uv_buf_t* rcvbuf, + const struct sockaddr* addr, + unsigned flags) { + if (nread < 0) { + ASSERT(0 && "unexpected error"); + } + + if (nread == 0) { + /* Returning unused buffer */ + /* Don't count towards sv_recv_cb_called */ + ASSERT(addr == NULL); + return; + } + + CHECK_HANDLE(handle); + ASSERT(flags == 0); + + ASSERT(addr != NULL); + ASSERT(nread == 4); + ASSERT(memcmp("PING", rcvbuf->base, nread) == 0 || + memcmp("PANG", rcvbuf->base, nread) == 0); + + if (++sv_recv_cb_called == 2) { + uv_close((uv_handle_t*) &server, close_cb); + uv_close((uv_handle_t*) &client, close_cb); + } +} + + +TEST_IMPL(udp_send_immediate) { + struct sockaddr_in addr; + uv_udp_send_t req1, req2; + uv_buf_t buf; + int r; + + ASSERT(0 == uv_ip4_addr("0.0.0.0", TEST_PORT, &addr)); + + r = uv_udp_init(uv_default_loop(), &server); + ASSERT(r == 0); + + r = uv_udp_bind(&server, (const struct sockaddr*) &addr, 0); + ASSERT(r == 0); + + r = uv_udp_recv_start(&server, alloc_cb, sv_recv_cb); + ASSERT(r == 0); + + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + r = uv_udp_init(uv_default_loop(), &client); + ASSERT(r == 0); + + /* client sends "PING", then "PANG" */ + buf = uv_buf_init("PING", 4); + + r = uv_udp_send(&req1, + &client, + &buf, + 1, + (const struct sockaddr*) &addr, + cl_send_cb); + ASSERT(r == 0); + + buf = uv_buf_init("PANG", 4); + + r = uv_udp_send(&req2, + &client, + &buf, + 1, + (const struct sockaddr*) &addr, + cl_send_cb); + + uv_run(uv_default_loop(), UV_RUN_DEFAULT); + + ASSERT(cl_send_cb_called == 2); + ASSERT(sv_recv_cb_called == 2); + ASSERT(close_cb_called == 2); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-udp-send-unreachable.c nodejs-0.11.15/deps/uv/test/test-udp-send-unreachable.c --- nodejs-0.11.13/deps/uv/test/test-udp-send-unreachable.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-udp-send-unreachable.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,150 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#define CHECK_HANDLE(handle) \ + ASSERT((uv_udp_t*)(handle) == &client) + +static uv_udp_t client; +static uv_timer_t timer; + +static int send_cb_called; +static int recv_cb_called; +static int close_cb_called; +static int alloc_cb_called; +static int timer_cb_called; + + +static void alloc_cb(uv_handle_t* handle, + size_t suggested_size, + uv_buf_t* buf) { + static char slab[65536]; + CHECK_HANDLE(handle); + ASSERT(suggested_size <= sizeof(slab)); + buf->base = slab; + buf->len = sizeof(slab); + alloc_cb_called++; +} + + +static void close_cb(uv_handle_t* handle) { + ASSERT(1 == uv_is_closing(handle)); + close_cb_called++; +} + + +static void send_cb(uv_udp_send_t* req, int status) { + ASSERT(req != NULL); + ASSERT(status == 0); + CHECK_HANDLE(req->handle); + send_cb_called++; +} + + +static void recv_cb(uv_udp_t* handle, + ssize_t nread, + const uv_buf_t* rcvbuf, + const struct sockaddr* addr, + unsigned flags) { + CHECK_HANDLE(handle); + recv_cb_called++; + + if (nread < 0) { + ASSERT(0 && "unexpected error"); + } else if (nread == 0) { + /* Returning unused buffer */ + ASSERT(addr == NULL); + } else { + ASSERT(addr != NULL); + } +} + + +static void timer_cb(uv_timer_t* h) { + ASSERT(h == &timer); + timer_cb_called++; + uv_close((uv_handle_t*) &client, close_cb); + uv_close((uv_handle_t*) h, close_cb); +} + + +TEST_IMPL(udp_send_unreachable) { + struct sockaddr_in addr; + struct sockaddr_in addr2; + uv_udp_send_t req1, req2; + uv_buf_t buf; + int r; + + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT_2, &addr2)); + + r = uv_timer_init( uv_default_loop(), &timer ); + ASSERT(r == 0); + + r = uv_timer_start( &timer, timer_cb, 1000, 0 ); + ASSERT(r == 0); + + r = uv_udp_init(uv_default_loop(), &client); + ASSERT(r == 0); + + r = uv_udp_bind(&client, (const struct sockaddr*) &addr2, 0); + ASSERT(r == 0); + + r = uv_udp_recv_start(&client, alloc_cb, recv_cb); + ASSERT(r == 0); + + /* client sends "PING", then "PANG" */ + buf = uv_buf_init("PING", 4); + + r = uv_udp_send(&req1, + &client, + &buf, + 1, + (const struct sockaddr*) &addr, + send_cb); + ASSERT(r == 0); + + buf = uv_buf_init("PANG", 4); + + r = uv_udp_send(&req2, + &client, + &buf, + 1, + (const struct sockaddr*) &addr, + send_cb); + ASSERT(r == 0); + + uv_run(uv_default_loop(), UV_RUN_DEFAULT); + + ASSERT(send_cb_called == 2); + ASSERT(recv_cb_called == alloc_cb_called); + ASSERT(timer_cb_called == 1); + ASSERT(close_cb_called == 2); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff -Nru nodejs-0.11.13/deps/uv/test/test-udp-try-send.c nodejs-0.11.15/deps/uv/test/test-udp-try-send.c --- nodejs-0.11.13/deps/uv/test/test-udp-try-send.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-udp-try-send.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,133 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#ifdef _WIN32 + +TEST_IMPL(udp_try_send) { + + MAKE_VALGRIND_HAPPY(); + return 0; +} + +#else /* !_WIN32 */ + +#define CHECK_HANDLE(handle) \ + ASSERT((uv_udp_t*)(handle) == &server || (uv_udp_t*)(handle) == &client) + +static uv_udp_t server; +static uv_udp_t client; + +static int sv_recv_cb_called; + +static int close_cb_called; + + +static void alloc_cb(uv_handle_t* handle, + size_t suggested_size, + uv_buf_t* buf) { + static char slab[65536]; + CHECK_HANDLE(handle); + ASSERT(suggested_size <= sizeof(slab)); + buf->base = slab; + buf->len = sizeof(slab); +} + + +static void close_cb(uv_handle_t* handle) { + CHECK_HANDLE(handle); + ASSERT(uv_is_closing(handle)); + close_cb_called++; +} + + +static void sv_recv_cb(uv_udp_t* handle, + ssize_t nread, + const uv_buf_t* rcvbuf, + const struct sockaddr* addr, + unsigned flags) { + ASSERT(nread > 0); + + if (nread == 0) { + ASSERT(addr == NULL); + return; + } + + ASSERT(nread == 4); + ASSERT(addr != NULL); + + ASSERT(memcmp("EXIT", rcvbuf->base, nread) == 0); + uv_close((uv_handle_t*) handle, close_cb); + uv_close((uv_handle_t*) &client, close_cb); + + sv_recv_cb_called++; +} + + +TEST_IMPL(udp_try_send) { + struct sockaddr_in addr; + static char buffer[64 * 1024]; + uv_buf_t buf; + int r; + + ASSERT(0 == uv_ip4_addr("0.0.0.0", TEST_PORT, &addr)); + + r = uv_udp_init(uv_default_loop(), &server); + ASSERT(r == 0); + + r = uv_udp_bind(&server, (const struct sockaddr*) &addr, 0); + ASSERT(r == 0); + + r = uv_udp_recv_start(&server, alloc_cb, sv_recv_cb); + ASSERT(r == 0); + + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + r = uv_udp_init(uv_default_loop(), &client); + ASSERT(r == 0); + + buf = uv_buf_init(buffer, sizeof(buffer)); + r = uv_udp_try_send(&client, &buf, 1, (const struct sockaddr*) &addr); + ASSERT(r == UV_EMSGSIZE); + + buf = uv_buf_init("EXIT", 4); + r = uv_udp_try_send(&client, &buf, 1, (const struct sockaddr*) &addr); + ASSERT(r == 4); + + uv_run(uv_default_loop(), UV_RUN_DEFAULT); + + ASSERT(close_cb_called == 2); + ASSERT(sv_recv_cb_called == 1); + + ASSERT(client.send_queue_size == 0); + ASSERT(server.send_queue_size == 0); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + +#endif /* !_WIN32 */ diff -Nru nodejs-0.11.13/deps/uv/test/test-watcher-cross-stop.c nodejs-0.11.15/deps/uv/test/test-watcher-cross-stop.c --- nodejs-0.11.13/deps/uv/test/test-watcher-cross-stop.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/test/test-watcher-cross-stop.c 2015-01-20 21:22:17.000000000 +0000 @@ -91,11 +91,11 @@ for (i = 0; i < ARRAY_SIZE(sockets); i++) uv_close((uv_handle_t*) &sockets[i], close_cb); - ASSERT(0 < recv_cb_called && recv_cb_called <= ARRAY_SIZE(sockets)); - ASSERT(ARRAY_SIZE(sockets) == send_cb_called); + ASSERT(recv_cb_called > 0); uv_run(loop, UV_RUN_DEFAULT); + ASSERT(ARRAY_SIZE(sockets) == send_cb_called); ASSERT(ARRAY_SIZE(sockets) == close_cb_called); MAKE_VALGRIND_HAPPY(); diff -Nru nodejs-0.11.13/deps/uv/uv.gyp nodejs-0.11.15/deps/uv/uv.gyp --- nodejs-0.11.13/deps/uv/uv.gyp 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/uv.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -1,14 +1,4 @@ { - 'variables': { - 'uv_use_dtrace%': 'false', - # uv_parent_path is the relative path to libuv in the parent project - # this is only relevant when dtrace is enabled and libuv is a child project - # as it's necessary to correctly locate the object files for post - # processing. - # XXX gyp is quite sensitive about paths with double / they don't normalize - 'uv_parent_path': '/', - }, - 'target_defaults': { 'conditions': [ ['OS != "win"', { @@ -26,6 +16,30 @@ ], }], ], + 'xcode_settings': { + 'conditions': [ + [ 'clang==1', { + 'WARNING_CFLAGS': [ + '-Wall', + '-Wextra', + '-Wno-unused-parameter', + '-Wno-dollar-in-identifier-extension' + ]}, { + 'WARNING_CFLAGS': [ + '-Wall', + '-Wextra', + '-Wno-unused-parameter' + ]} + ] + ], + 'OTHER_LDFLAGS': [ + ], + 'OTHER_CFLAGS': [ + '-g', + '--std=gnu89', + '-pedantic' + ], + } }, 'targets': [ @@ -53,19 +67,18 @@ }], ], }, - 'defines': [ - 'HAVE_CONFIG_H' - ], 'sources': [ 'common.gypi', 'include/uv.h', 'include/tree.h', 'include/uv-errno.h', + 'include/uv-threadpool.h', 'include/uv-version.h', 'src/fs-poll.c', 'src/heap-inl.h', 'src/inet.c', 'src/queue.h', + 'src/threadpool.c', 'src/uv-common.c', 'src/uv-common.h', 'src/version.c' @@ -86,6 +99,7 @@ 'src/win/fs.c', 'src/win/fs-event.c', 'src/win/getaddrinfo.c', + 'src/win/getnameinfo.c', 'src/win/handle.c', 'src/win/handle-inl.h', 'src/win/internal.h', @@ -102,7 +116,6 @@ 'src/win/stream-inl.h', 'src/win/tcp.c', 'src/win/tty.c', - 'src/win/threadpool.c', 'src/win/timer.c', 'src/win/udp.c', 'src/win/util.c', @@ -113,11 +126,11 @@ ], 'link_settings': { 'libraries': [ - '-ladvapi32.lib', - '-liphlpapi.lib', - '-lpsapi.lib', - '-lshell32.lib', - '-lws2_32.lib' + '-ladvapi32', + '-liphlpapi', + '-lpsapi', + '-lshell32', + '-lws2_32' ], }, }, { # Not Windows i.e. POSIX @@ -135,12 +148,14 @@ 'include/uv-sunos.h', 'include/uv-darwin.h', 'include/uv-bsd.h', + 'include/uv-aix.h', 'src/unix/async.c', 'src/unix/atomic-ops.h', 'src/unix/core.c', 'src/unix/dl.c', 'src/unix/fs.c', 'src/unix/getaddrinfo.c', + 'src/unix/getnameinfo.c', 'src/unix/internal.h', 'src/unix/loop.c', 'src/unix/loop-watcher.c', @@ -152,7 +167,6 @@ 'src/unix/stream.c', 'src/unix/tcp.c', 'src/unix/thread.c', - 'src/unix/threadpool.c', 'src/unix/timer.c', 'src/unix/tty.c', 'src/unix/udp.c', @@ -175,8 +189,8 @@ ['uv_library=="shared_library" and OS!="mac"', { 'link_settings': { # Must correspond with UV_VERSION_MAJOR and UV_VERSION_MINOR - # in src/version.c - 'libraries': [ '-Wl,-soname,libuv.so.0.11' ], + # in include/uv-version.h + 'libraries': [ '-Wl,-soname,libuv.so.1.0' ], }, }], ], @@ -192,6 +206,7 @@ ], 'defines': [ '_DARWIN_USE_64_BIT_INODE=1', + '_DARWIN_UNLIMITED_SELECT=1', ] }], [ 'OS!="mac"', { @@ -243,6 +258,7 @@ 'defines': [ '_ALL_SOURCE', '_XOPEN_SOURCE=500', + '_LINUX_SOURCE_COMPAT', ], 'link_settings': { 'libraries': [ @@ -270,20 +286,6 @@ ['uv_library=="shared_library"', { 'defines': [ 'BUILDING_UV_SHARED=1' ] }], - # FIXME(bnoordhuis or tjfontaine) Unify this, it's extremely ugly. - ['uv_use_dtrace=="true"', { - 'defines': [ 'HAVE_DTRACE=1' ], - 'dependencies': [ 'uv_dtrace_header' ], - 'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)' ], - 'conditions': [ - [ 'OS not in "mac linux"', { - 'sources': [ 'src/unix/dtrace.c' ], - }], - [ 'OS=="linux"', { - 'sources': [ '<(SHARED_INTERMEDIATE_DIR)/dtrace.o' ] - }], - ], - }], ] }, @@ -308,6 +310,7 @@ 'test/test-close-order.c', 'test/test-connection-fail.c', 'test/test-cwd-and-chdir.c', + 'test/test-default-loop-close.c', 'test/test-delayed-accept.c', 'test/test-error.c', 'test/test-embed.c', @@ -318,7 +321,9 @@ 'test/test-get-currentexe.c', 'test/test-get-memory.c', 'test/test-getaddrinfo.c', + 'test/test-getnameinfo.c', 'test/test-getsockname.c', + 'test/test-handle-fileno.c', 'test/test-hrtime.c', 'test/test-idle.c', 'test/test-ip6-addr.c', @@ -341,9 +346,11 @@ 'test/test-pipe-getsockname.c', 'test/test-pipe-sendmsg.c', 'test/test-pipe-server-close.c', + 'test/test-pipe-close-stdout-read-stdin.c', 'test/test-platform-output.c', 'test/test-poll.c', 'test/test-poll-close.c', + 'test/test-poll-closesocket.c', 'test/test-process-title.c', 'test/test-ref.c', 'test/test-run-nowait.c', @@ -354,6 +361,7 @@ 'test/test-shutdown-twice.c', 'test/test-signal.c', 'test/test-signal-multiple-loops.c', + 'test/test-socket-buffer-size.c', 'test/test-spawn.c', 'test/test-fs-poll.c', 'test/test-stdio-over-pipes.c', @@ -370,12 +378,15 @@ 'test/test-tcp-connect6-error.c', 'test/test-tcp-open.c', 'test/test-tcp-write-to-half-open-connection.c', + 'test/test-tcp-write-after-connect.c', 'test/test-tcp-writealot.c', 'test/test-tcp-try-write.c', 'test/test-tcp-unexpected-read.c', 'test/test-tcp-read-stop.c', + 'test/test-tcp-write-queue-order.c', 'test/test-threadpool.c', 'test/test-threadpool-cancel.c', + 'test/test-thread-equal.c', 'test/test-mutexes.c', 'test/test-thread.c', 'test/test-barrier.c', @@ -390,6 +401,8 @@ 'test/test-udp-open.c', 'test/test-udp-options.c', 'test/test-udp-send-and-recv.c', + 'test/test-udp-send-immediate.c', + 'test/test-udp-send-unreachable.c', 'test/test-udp-multicast-join.c', 'test/test-udp-multicast-join6.c', 'test/test-dlerror.c', @@ -398,6 +411,7 @@ 'test/test-ip6-addr.c', 'test/test-udp-multicast-interface.c', 'test/test-udp-multicast-interface6.c', + 'test/test-udp-try-send.c', ], 'conditions': [ [ 'OS=="win"', { @@ -405,7 +419,7 @@ 'test/runner-win.c', 'test/runner-win.h' ], - 'libraries': [ 'ws2_32.lib' ] + 'libraries': [ '-lws2_32' ] }, { # POSIX 'defines': [ '_GNU_SOURCE' ], 'sources': [ @@ -469,7 +483,7 @@ 'test/runner-win.c', 'test/runner-win.h', ], - 'libraries': [ 'ws2_32.lib' ] + 'libraries': [ '-lws2_32' ] }, { # POSIX 'defines': [ '_GNU_SOURCE' ], 'sources': [ @@ -484,60 +498,5 @@ }, }, }, - - { - 'target_name': 'uv_dtrace_header', - 'type': 'none', - 'conditions': [ - [ 'uv_use_dtrace=="true"', { - 'actions': [ - { - 'action_name': 'uv_dtrace_header', - 'inputs': [ 'src/unix/uv-dtrace.d' ], - 'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/uv-dtrace.h' ], - 'action': [ 'dtrace', '-h', '-xnolibs', '-s', '<@(_inputs)', - '-o', '<@(_outputs)' ], - }, - ], - }], - ], - }, - - # FIXME(bnoordhuis or tjfontaine) Unify this, it's extremely ugly. - { - 'target_name': 'uv_dtrace_provider', - 'type': 'none', - 'conditions': [ - [ 'uv_use_dtrace=="true" and OS not in "mac linux"', { - 'actions': [ - { - 'action_name': 'uv_dtrace_o', - 'inputs': [ - 'src/unix/uv-dtrace.d', - '<(PRODUCT_DIR)/obj.target/libuv<(uv_parent_path)src/unix/core.o', - ], - 'outputs': [ - '<(PRODUCT_DIR)/obj.target/libuv<(uv_parent_path)src/unix/dtrace.o', - ], - 'action': [ 'dtrace', '-G', '-xnolibs', '-s', '<@(_inputs)', - '-o', '<@(_outputs)' ] - } - ] - }], - [ 'uv_use_dtrace=="true" and OS=="linux"', { - 'actions': [ - { - 'action_name': 'uv_dtrace_o', - 'inputs': [ 'src/unix/uv-dtrace.d' ], - 'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/dtrace.o' ], - 'action': [ - 'dtrace', '-C', '-G', '-s', '<@(_inputs)', '-o', '<@(_outputs)' - ], - } - ] - }], - ] - }, - ] } diff -Nru nodejs-0.11.13/deps/uv/vcbuild.bat nodejs-0.11.15/deps/uv/vcbuild.bat --- nodejs-0.11.13/deps/uv/vcbuild.bat 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/uv/vcbuild.bat 2015-01-20 21:22:17.000000000 +0000 @@ -51,8 +51,8 @@ set GYP_MSVS_VERSION=2013 goto select-target -@rem Look for Visual Studio 2012 :vc-set-2012 +@rem Look for Visual Studio 2012 if not defined VS110COMNTOOLS goto vc-set-2010 if not exist "%VS110COMNTOOLS%\..\..\vc\vcvarsall.bat" goto vc-set-2010 call "%VS110COMNTOOLS%\..\..\vc\vcvarsall.bat" %vs_toolset% @@ -101,8 +101,8 @@ exit /b 1 :have_gyp -if not defined PYTHON set PYTHON="python" -%PYTHON% gyp_uv.py -Dtarget_arch=%target_arch% -Duv_library=%library% +if not defined PYTHON set PYTHON=python +"%PYTHON%" gyp_uv.py -Dtarget_arch=%target_arch% -Duv_library=%library% if errorlevel 1 goto create-msvs-files-failed if not exist uv.sln goto create-msvs-files-failed echo Project files generated. diff -Nru nodejs-0.11.13/deps/v8/AUTHORS nodejs-0.11.15/deps/v8/AUTHORS --- nodejs-0.11.13/deps/v8/AUTHORS 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/AUTHORS 2015-01-20 21:22:17.000000000 +0000 @@ -13,6 +13,7 @@ NVIDIA Corporation BlackBerry Limited Opera Software ASA +Intel Corporation Akinori MUSHA <knu@FreeBSD.org> Alexander Botero-Lowry <alexbl@FreeBSD.org> @@ -24,6 +25,7 @@ Bert Belder <bertbelder@gmail.com> Burcu Dogan <burcujdogan@gmail.com> Craig Schlenter <craig.schlenter@gmail.com> +Chunyang Dai <chunyang.dai@intel.com> Daniel Andersson <kodandersson@gmail.com> Daniel James <dnljms@gmail.com> Derek J Conrod <dconrod@codeaurora.org> @@ -64,6 +66,7 @@ Tobias Burnus <burnus@net-b.de> Vincent Belliard <vincent.belliard@arm.com> Vlad Burlik <vladbph@gmail.com> +Weiliang Lin<weiliang.lin@intel.com> Xi Qian <xi.qian@intel.com> Yuqiang Xian <yuqiang.xian@intel.com> Zaheer Ahmad <zahmad@codeaurora.org> diff -Nru nodejs-0.11.13/deps/v8/benchmarks/v8.json nodejs-0.11.15/deps/v8/benchmarks/v8.json --- nodejs-0.11.13/deps/v8/benchmarks/v8.json 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/benchmarks/v8.json 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "path": ["."], + "main": "run.js", + "run_count": 2, + "results_regexp": "^%s: (.+)$", + "benchmarks": [ + {"name": "Richards"}, + {"name": "DeltaBlue"}, + {"name": "Crypto"}, + {"name": "RayTrace"}, + {"name": "EarleyBoyer"}, + {"name": "RegExp"}, + {"name": "Splay"}, + {"name": "NavierStokes"} + ] +} diff -Nru nodejs-0.11.13/deps/v8/build/all.gyp nodejs-0.11.15/deps/v8/build/all.gyp --- nodejs-0.11.13/deps/v8/build/all.gyp 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/build/all.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -10,7 +10,9 @@ 'dependencies': [ '../samples/samples.gyp:*', '../src/d8.gyp:d8', + '../test/base-unittests/base-unittests.gyp:*', '../test/cctest/cctest.gyp:*', + '../test/compiler-unittests/compiler-unittests.gyp:*', ], 'conditions': [ ['component!="shared_library"', { diff -Nru nodejs-0.11.13/deps/v8/build/android.gypi nodejs-0.11.15/deps/v8/build/android.gypi --- nodejs-0.11.13/deps/v8/build/android.gypi 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/build/android.gypi 2015-01-20 21:22:17.000000000 +0000 @@ -35,9 +35,6 @@ 'variables': { 'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)', 'android_toolchain%': '<!(/bin/echo -n $ANDROID_TOOLCHAIN)', - # This is set when building the Android WebView inside the Android build - # system, using the 'android' gyp backend. - 'android_webview_build%': 0, }, 'conditions': [ ['android_ndk_root==""', { @@ -51,7 +48,7 @@ 'android_stlport_libs': '<(android_stlport)/libs', }, { 'variables': { - 'android_sysroot': '<(android_ndk_root)/platforms/android-9/arch-<(android_target_arch)', + 'android_sysroot': '<(android_ndk_root)/platforms/android-<(android_target_platform)/arch-<(android_target_arch)', 'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/', }, 'android_include': '<(android_sysroot)/usr/include', @@ -64,9 +61,6 @@ # link the NDK one? 'use_system_stlport%': '<(android_webview_build)', 'android_stlport_library': 'stlport_static', - # Copy it out one scope. - 'android_webview_build%': '<(android_webview_build)', - 'OS': 'android', }, # variables 'target_defaults': { 'defines': [ @@ -81,7 +75,12 @@ }, # Release }, # configurations 'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter', - '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions', ], + '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions', + # Note: Using -std=c++0x will define __STRICT_ANSI__, which in + # turn will leave out some template stuff for 'long long'. What + # we want is -std=c++11, but this is not supported by GCC 4.6 or + # Xcode 4.2 + '-std=gnu++0x' ], 'target_conditions': [ ['_toolset=="target"', { 'cflags!': [ @@ -179,7 +178,7 @@ '-L<(android_stlport_libs)/mips', ], }], - ['target_arch=="ia32"', { + ['target_arch=="ia32" or target_arch=="x87"', { 'ldflags': [ '-L<(android_stlport_libs)/x86', ], @@ -196,7 +195,7 @@ }], ], }], - ['target_arch=="ia32"', { + ['target_arch=="ia32" or target_arch=="x87"', { # The x86 toolchain currently has problems with stack-protector. 'cflags!': [ '-fstack-protector', @@ -215,6 +214,15 @@ '-fno-stack-protector', ], }], + ['target_arch=="arm64" or target_arch=="x64"', { + # TODO(ulan): Enable PIE for other architectures (crbug.com/373219). + 'cflags': [ + '-fPIE', + ], + 'ldflags': [ + '-pie', + ], + }], ], 'target_conditions': [ ['_type=="executable"', { @@ -257,15 +265,8 @@ }], # _toolset=="target" # Settings for building host targets using the system toolchain. ['_toolset=="host"', { - 'conditions': [ - ['target_arch=="x64"', { - 'cflags': [ '-m64', '-pthread' ], - 'ldflags': [ '-m64', '-pthread' ], - }, { - 'cflags': [ '-m32', '-pthread' ], - 'ldflags': [ '-m32', '-pthread' ], - }], - ], + 'cflags': [ '-pthread' ], + 'ldflags': [ '-pthread' ], 'ldflags!': [ '-Wl,-z,noexecstack', '-Wl,--gc-sections', diff -Nru nodejs-0.11.13/deps/v8/build/detect_v8_host_arch.py nodejs-0.11.15/deps/v8/build/detect_v8_host_arch.py --- nodejs-0.11.13/deps/v8/build/detect_v8_host_arch.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/build/detect_v8_host_arch.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Outputs host CPU architecture in format recognized by gyp.""" + +import platform +import re +import sys + + +def main(): + print DoMain([]) + return 0 + +def DoMain(_): + """Hook to be called from gyp without starting a separate python + interpreter.""" + host_arch = platform.machine() + + # Convert machine type to format recognized by gyp. + if re.match(r'i.86', host_arch) or host_arch == 'i86pc': + host_arch = 'ia32' + elif host_arch in ['x86_64', 'amd64']: + host_arch = 'x64' + elif host_arch.startswith('arm'): + host_arch = 'arm' + elif host_arch == 'aarch64': + host_arch = 'arm64' + elif host_arch == 'mips64': + host_arch = 'mips64el' + elif host_arch.startswith('mips'): + host_arch = 'mipsel' + + # platform.machine is based on running kernel. It's possible to use 64-bit + # kernel with 32-bit userland, e.g. to give linker slightly more memory. + # Distinguish between different userland bitness by querying + # the python binary. + if host_arch == 'x64' and platform.architecture()[0] == '32bit': + host_arch = 'ia32' + + return host_arch + +if __name__ == '__main__': + sys.exit(main()) diff -Nru nodejs-0.11.13/deps/v8/build/features.gypi nodejs-0.11.15/deps/v8/build/features.gypi --- nodejs-0.11.13/deps/v8/build/features.gypi 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/build/features.gypi 2015-01-20 21:22:17.000000000 +0000 @@ -31,8 +31,6 @@ 'variables': { 'v8_compress_startup_data%': 'off', - 'v8_enable_debugger_support%': 1, - 'v8_enable_disassembler%': 0, 'v8_enable_gdbjit%': 0, @@ -43,6 +41,8 @@ 'v8_use_snapshot%': 'true', + 'v8_enable_verify_predictable%': 0, + # With post mortem support enabled, metadata is embedded into libv8 that # describes various parameters of the VM for use by debuggers. See # tools/gen-postmortem-metadata.py for details. @@ -59,14 +59,12 @@ # Enable compiler warnings when using V8_DEPRECATED apis. 'v8_deprecation_warnings%': 0, - # Use the v8 provided v8::Platform implementation. - 'v8_use_default_platform%': 1, + # Use external files for startup data blobs: + # the JS builtins sources and the start snapshot. + 'v8_use_external_startup_data%': 0, }, 'target_defaults': { 'conditions': [ - ['v8_enable_debugger_support==1', { - 'defines': ['ENABLE_DEBUGGER_SUPPORT',], - }], ['v8_enable_disassembler==1', { 'defines': ['ENABLE_DISASSEMBLER',], }], @@ -79,6 +77,9 @@ ['v8_enable_verify_heap==1', { 'defines': ['VERIFY_HEAP',], }], + ['v8_enable_verify_predictable==1', { + 'defines': ['VERIFY_PREDICTABLE',], + }], ['v8_interpreted_regexp==1', { 'defines': ['V8_INTERPRETED_REGEXP',], }], @@ -88,17 +89,16 @@ ['v8_enable_i18n_support==1', { 'defines': ['V8_I18N_SUPPORT',], }], - ['v8_use_default_platform==1', { - 'defines': ['V8_USE_DEFAULT_PLATFORM',], - }], ['v8_compress_startup_data=="bz2"', { - 'defines': [ - 'COMPRESS_STARTUP_DATA_BZ2', - ], + 'defines': ['COMPRESS_STARTUP_DATA_BZ2',], + }], + ['v8_use_external_startup_data==1', { + 'defines': ['V8_USE_EXTERNAL_STARTUP_DATA',], }], ], # conditions 'configurations': { - 'Debug': { + 'DebugBaseCommon': { + 'abstract': 1, 'variables': { 'v8_enable_extra_checks%': 1, 'v8_enable_handle_zapping%': 1, diff -Nru nodejs-0.11.13/deps/v8/build/get_landmines.py nodejs-0.11.15/deps/v8/build/get_landmines.py --- nodejs-0.11.13/deps/v8/build/get_landmines.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/build/get_landmines.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,26 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +This file emits the list of reasons why a particular build needs to be clobbered +(or a list of 'landmines'). +""" + +import sys + + +def main(): + """ + ALL LANDMINES ARE EMITTED FROM HERE. + """ + print 'Need to clobber after ICU52 roll.' + print 'Landmines test.' + print 'Activating MSVS 2013.' + print 'Revert activation of MSVS 2013.' + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff -Nru nodejs-0.11.13/deps/v8/build/gyp_v8 nodejs-0.11.15/deps/v8/build/gyp_v8 --- nodejs-0.11.13/deps/v8/build/gyp_v8 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/build/gyp_v8 2015-01-20 21:22:17.000000000 +0000 @@ -34,6 +34,7 @@ import os import platform import shlex +import subprocess import sys script_dir = os.path.dirname(os.path.realpath(__file__)) @@ -107,6 +108,14 @@ def run_gyp(args): rc = gyp.main(args) + + # Check for landmines (reasons to clobber the build). This must be run here, + # rather than a separate runhooks step so that any environment modifications + # from above are picked up. + print 'Running build/landmines.py...' + subprocess.check_call( + [sys.executable, os.path.join(script_dir, 'landmines.py')]) + if rc != 0: print 'Error running GYP' sys.exit(rc) @@ -158,7 +167,8 @@ # Generate for the architectures supported on the given platform. gyp_args = list(args) - if platform.system() == 'Linux': + gyp_generators = os.environ.get('GYP_GENERATORS') + if platform.system() == 'Linux' and gyp_generators != 'ninja': # Work around for crbug.com/331475. for f in glob.glob(os.path.join(v8_root, 'out', 'Makefile.*')): os.unlink(f) diff -Nru nodejs-0.11.13/deps/v8/build/landmines.py nodejs-0.11.15/deps/v8/build/landmines.py --- nodejs-0.11.13/deps/v8/build/landmines.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/build/landmines.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,139 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +This script runs every build as a hook. If it detects that the build should +be clobbered, it will touch the file <build_dir>/.landmine_triggered. The +various build scripts will then check for the presence of this file and clobber +accordingly. The script will also emit the reasons for the clobber to stdout. + +A landmine is tripped when a builder checks out a different revision, and the +diff between the new landmines and the old ones is non-null. At this point, the +build is clobbered. +""" + +import difflib +import logging +import optparse +import os +import sys +import subprocess +import time + +import landmine_utils + + +SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + + +def get_target_build_dir(build_tool, target): + """ + Returns output directory absolute path dependent on build and targets. + Examples: + r'c:\b\build\slave\win\build\src\out\Release' + '/mnt/data/b/build/slave/linux/build/src/out/Debug' + '/b/build/slave/ios_rel_device/build/src/xcodebuild/Release-iphoneos' + + Keep this function in sync with tools/build/scripts/slave/compile.py + """ + ret = None + if build_tool == 'xcode': + ret = os.path.join(SRC_DIR, 'xcodebuild', target) + elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios. + ret = os.path.join(SRC_DIR, 'out', target) + elif build_tool in ['msvs', 'vs', 'ib']: + ret = os.path.join(SRC_DIR, 'build', target) + else: + raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool) + return os.path.abspath(ret) + + +def set_up_landmines(target, new_landmines): + """Does the work of setting, planting, and triggering landmines.""" + out_dir = get_target_build_dir(landmine_utils.builder(), target) + + landmines_path = os.path.join(out_dir, '.landmines') + if not os.path.exists(out_dir): + return + + if not os.path.exists(landmines_path): + print "Landmines tracker didn't exists." + + # FIXME(machenbach): Clobber deletes the .landmines tracker. Difficult + # to know if we are right after a clobber or if it is first-time landmines + # deployment. Also, a landmine-triggered clobber right after a clobber is + # not possible. Different clobber methods for msvs, xcode and make all + # have different blacklists of files that are not deleted. + if os.path.exists(landmines_path): + triggered = os.path.join(out_dir, '.landmines_triggered') + with open(landmines_path, 'r') as f: + old_landmines = f.readlines() + if old_landmines != new_landmines: + old_date = time.ctime(os.stat(landmines_path).st_ctime) + diff = difflib.unified_diff(old_landmines, new_landmines, + fromfile='old_landmines', tofile='new_landmines', + fromfiledate=old_date, tofiledate=time.ctime(), n=0) + + with open(triggered, 'w') as f: + f.writelines(diff) + print "Setting landmine: %s" % triggered + elif os.path.exists(triggered): + # Remove false triggered landmines. + os.remove(triggered) + print "Removing landmine: %s" % triggered + with open(landmines_path, 'w') as f: + f.writelines(new_landmines) + + +def process_options(): + """Returns a list of landmine emitting scripts.""" + parser = optparse.OptionParser() + parser.add_option( + '-s', '--landmine-scripts', action='append', + default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')], + help='Path to the script which emits landmines to stdout. The target ' + 'is passed to this script via option -t. Note that an extra ' + 'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.') + parser.add_option('-v', '--verbose', action='store_true', + default=('LANDMINES_VERBOSE' in os.environ), + help=('Emit some extra debugging information (default off). This option ' + 'is also enabled by the presence of a LANDMINES_VERBOSE environment ' + 'variable.')) + + options, args = parser.parse_args() + + if args: + parser.error('Unknown arguments %s' % args) + + logging.basicConfig( + level=logging.DEBUG if options.verbose else logging.ERROR) + + extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT') + if extra_script: + return options.landmine_scripts + [extra_script] + else: + return options.landmine_scripts + + +def main(): + landmine_scripts = process_options() + + if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'): + return 0 + + landmines = [] + for s in landmine_scripts: + proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE) + output, _ = proc.communicate() + landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()]) + + for target in ('Debug', 'Release'): + set_up_landmines(target, landmines) + + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff -Nru nodejs-0.11.13/deps/v8/build/landmine_utils.py nodejs-0.11.15/deps/v8/build/landmine_utils.py --- nodejs-0.11.13/deps/v8/build/landmine_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/build/landmine_utils.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,114 @@ +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +import functools +import logging +import os +import shlex +import sys + + +def memoize(default=None): + """This decorator caches the return value of a parameterless pure function""" + def memoizer(func): + val = [] + @functools.wraps(func) + def inner(): + if not val: + ret = func() + val.append(ret if ret is not None else default) + if logging.getLogger().isEnabledFor(logging.INFO): + print '%s -> %r' % (func.__name__, val[0]) + return val[0] + return inner + return memoizer + + +@memoize() +def IsWindows(): + return sys.platform in ['win32', 'cygwin'] + + +@memoize() +def IsLinux(): + return sys.platform.startswith(('linux', 'freebsd')) + + +@memoize() +def IsMac(): + return sys.platform == 'darwin' + + +@memoize() +def gyp_defines(): + """Parses and returns GYP_DEFINES env var as a dictionary.""" + return dict(arg.split('=', 1) + for arg in shlex.split(os.environ.get('GYP_DEFINES', ''))) + +@memoize() +def gyp_msvs_version(): + return os.environ.get('GYP_MSVS_VERSION', '') + +@memoize() +def distributor(): + """ + Returns a string which is the distributed build engine in use (if any). + Possible values: 'goma', 'ib', '' + """ + if 'goma' in gyp_defines(): + return 'goma' + elif IsWindows(): + if 'CHROME_HEADLESS' in os.environ: + return 'ib' # use (win and !goma and headless) as approximation of ib + + +@memoize() +def platform(): + """ + Returns a string representing the platform this build is targetted for. + Possible values: 'win', 'mac', 'linux', 'ios', 'android' + """ + if 'OS' in gyp_defines(): + if 'android' in gyp_defines()['OS']: + return 'android' + else: + return gyp_defines()['OS'] + elif IsWindows(): + return 'win' + elif IsLinux(): + return 'linux' + else: + return 'mac' + + +@memoize() +def builder(): + """ + Returns a string representing the build engine (not compiler) to use. + Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons' + """ + if 'GYP_GENERATORS' in os.environ: + # for simplicity, only support the first explicit generator + generator = os.environ['GYP_GENERATORS'].split(',')[0] + if generator.endswith('-android'): + return generator.split('-')[0] + elif generator.endswith('-ninja'): + return 'ninja' + else: + return generator + else: + if platform() == 'android': + # Good enough for now? Do any android bots use make? + return 'make' + elif platform() == 'ios': + return 'xcode' + elif IsWindows(): + return 'msvs' + elif IsLinux(): + return 'make' + elif IsMac(): + return 'xcode' + else: + assert False, 'Don\'t know what builder we\'re using!' diff -Nru nodejs-0.11.13/deps/v8/build/standalone.gypi nodejs-0.11.15/deps/v8/build/standalone.gypi --- nodejs-0.11.13/deps/v8/build/standalone.gypi 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/build/standalone.gypi 2015-01-20 21:22:17.000000000 +0000 @@ -33,8 +33,8 @@ 'includes': ['toolchain.gypi'], 'variables': { 'component%': 'static_library', - 'clang%': 0, 'asan%': 0, + 'tsan%': 0, 'visibility%': 'hidden', 'v8_enable_backtrace%': 0, 'v8_enable_i18n_support%': 1, @@ -51,13 +51,7 @@ # Anything else gets passed through, which probably won't work # very well; such hosts should pass an explicit target_arch # to gyp. - 'host_arch%': - '<!(uname -m | sed -e "s/i.86/ia32/;\ - s/x86_64/x64/;\ - s/amd64/x64/;\ - s/aarch64/arm64/;\ - s/arm.*/arm/;\ - s/mips.*/mipsel/")', + 'host_arch%': '<!pymod_do_main(detect_v8_host_arch)', }, { # OS!="linux" and OS!="freebsd" and OS!="openbsd" and # OS!="netbsd" and OS!="mac" @@ -104,6 +98,7 @@ ['(v8_target_arch=="arm" and host_arch!="arm") or \ (v8_target_arch=="arm64" and host_arch!="arm64") or \ (v8_target_arch=="mipsel" and host_arch!="mipsel") or \ + (v8_target_arch=="mips64el" and host_arch!="mips64el") or \ (v8_target_arch=="x64" and host_arch!="x64") or \ (OS=="android" or OS=="qnx")', { 'want_separate_host_toolset': 1, @@ -115,16 +110,20 @@ }, { 'os_posix%': 1, }], - ['(v8_target_arch=="ia32" or v8_target_arch=="x64") and \ + ['(v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \ (OS=="linux" or OS=="mac")', { 'v8_enable_gdbjit%': 1, }, { 'v8_enable_gdbjit%': 0, }], + ['OS=="mac"', { + 'clang%': 1, + }, { + 'clang%': 0, + }], ], # Default ARM variable settings. 'arm_version%': 'default', - 'arm_neon%': 0, 'arm_fpu%': 'vfpv3', 'arm_float_abi%': 'default', 'arm_thumb': 'default', @@ -135,9 +134,15 @@ }, 'default_configuration': 'Debug', 'configurations': { - 'Debug': { + 'DebugBaseCommon': { 'cflags': [ '-g', '-O0' ], }, + 'Optdebug': { + 'inherit_from': [ 'DebugBaseCommon', 'DebugBase2' ], + }, + 'Debug': { + # Xcode insists on this empty entry. + }, 'Release': { # Xcode insists on this empty entry. }, @@ -186,17 +191,36 @@ ], }, }], + ['tsan==1', { + 'target_defaults': { + 'cflags+': [ + '-fno-omit-frame-pointer', + '-gline-tables-only', + '-fsanitize=thread', + '-fPIC', + '-Wno-c++11-extensions', + ], + 'cflags!': [ + '-fomit-frame-pointer', + ], + 'ldflags': [ + '-fsanitize=thread', + '-pie', + ], + 'defines': [ + 'THREAD_SANITIZER', + ], + }, + }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ or OS=="netbsd"', { 'target_defaults': { 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', - '-pthread', '-fno-exceptions', '-pedantic' ], - 'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti' ], + '-Wno-long-long', '-pthread', '-fno-exceptions', + '-pedantic' ], + 'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ], 'ldflags': [ '-pthread', ], 'conditions': [ - [ 'OS=="linux"', { - 'cflags': [ '-ansi' ], - }], [ 'visibility=="hidden" and v8_enable_backtrace==0', { 'cflags': [ '-fvisibility=hidden' ], }], @@ -212,7 +236,7 @@ 'target_defaults': { 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', '-fno-exceptions' ], - 'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti' ], + 'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ], 'conditions': [ [ 'visibility=="hidden"', { 'cflags': [ '-fvisibility=hidden' ], @@ -310,7 +334,7 @@ 'target_defaults': { 'xcode_settings': { 'ALWAYS_SEARCH_USER_PATHS': 'NO', - 'GCC_C_LANGUAGE_STANDARD': 'ansi', # -ansi + 'GCC_C_LANGUAGE_STANDARD': 'c99', # -std=c99 'GCC_CW_ASM_SYNTAX': 'NO', # No -fasm-blocks 'GCC_DYNAMIC_NO_PIC': 'NO', # No -mdynamic-no-pic # (Equivalent to -fPIC) @@ -321,7 +345,6 @@ 'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES', 'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden 'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics - 'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof 'GCC_WARN_NON_VIRTUAL_DESTRUCTOR': 'YES', # -Wnon-virtual-dtor # MACOSX_DEPLOYMENT_TARGET maps to -mmacosx-version-min 'MACOSX_DEPLOYMENT_TARGET': '<(mac_deployment_target)', @@ -347,7 +370,7 @@ ['clang==1', { 'xcode_settings': { 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0', - 'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++11', # -std=gnu++11 + 'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++0x', # -std=gnu++0x }, }], ], diff -Nru nodejs-0.11.13/deps/v8/build/toolchain.gypi nodejs-0.11.15/deps/v8/build/toolchain.gypi --- nodejs-0.11.13/deps/v8/build/toolchain.gypi 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/build/toolchain.gypi 2015-01-20 21:22:17.000000000 +0000 @@ -31,7 +31,7 @@ 'variables': { 'msvs_use_common_release': 0, 'gcc_version%': 'unknown', - 'CXX%': '${CXX:-$(which g++)}', # Used to assemble a shell command. + 'clang%': 0, 'v8_target_arch%': '<(target_arch)', # Native Client builds currently use the V8 ARM JIT and # arm/simulator-arm.cc to defer the significant effort required @@ -47,7 +47,7 @@ # these registers in the snapshot and use CPU feature probing when running # on the target. 'v8_can_use_vfp32dregs%': 'false', - 'arm_test%': 'off', + 'arm_test_noprobe%': 'off', # Similar to vfp but on MIPS. 'v8_can_use_fpu_instructions%': 'true', @@ -56,7 +56,7 @@ 'v8_use_mips_abi_hardfloat%': 'true', # Default arch variant for MIPS. - 'mips_arch_variant%': 'mips32r2', + 'mips_arch_variant%': 'r2', 'v8_enable_backtrace%': 0, @@ -82,35 +82,85 @@ # Allow to suppress the array bounds warning (default is no suppression). 'wno_array_bounds%': '', + + 'variables': { + # This is set when building the Android WebView inside the Android build + # system, using the 'android' gyp backend. + 'android_webview_build%': 0, + }, + # Copy it out one scope. + 'android_webview_build%': '<(android_webview_build)', }, + 'conditions': [ + ['host_arch=="ia32" or host_arch=="x64" or clang==1', { + 'variables': { + 'host_cxx_is_biarch%': 1, + }, + }, { + 'variables': { + 'host_cxx_is_biarch%': 0, + }, + }], + ['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \ + clang==1', { + 'variables': { + 'target_cxx_is_biarch%': 1, + }, + }, { + 'variables': { + 'target_cxx_is_biarch%': 0, + }, + }], + ], 'target_defaults': { 'conditions': [ ['v8_target_arch=="arm"', { 'defines': [ 'V8_TARGET_ARCH_ARM', ], + 'conditions': [ + [ 'arm_version==7 or arm_version=="default"', { + 'defines': [ + 'CAN_USE_ARMV7_INSTRUCTIONS', + ], + }], + [ 'arm_fpu=="vfpv3-d16" or arm_fpu=="default"', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + ], + }], + [ 'arm_fpu=="vfpv3"', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + 'CAN_USE_VFP32DREGS', + ], + }], + [ 'arm_fpu=="neon"', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + 'CAN_USE_VFP32DREGS', + 'CAN_USE_NEON', + ], + }], + [ 'arm_test_noprobe=="on"', { + 'defines': [ + 'ARM_TEST_NO_FEATURE_PROBE', + ], + }], + ], 'target_conditions': [ ['_toolset=="host"', { - 'variables': { - 'armcompiler': '<!($(echo ${CXX_host:-$(which g++)}) -v 2>&1 | grep -q "^Target: arm" && echo "yes" || echo "no")', - }, 'conditions': [ - ['armcompiler=="yes"', { + ['v8_target_arch==host_arch and android_webview_build==0', { + # Host built with an Arm CXX compiler. 'conditions': [ [ 'arm_version==7', { 'cflags': ['-march=armv7-a',], }], [ 'arm_version==7 or arm_version=="default"', { 'conditions': [ - [ 'arm_neon==1', { - 'cflags': ['-mfpu=neon',], - }, - { - 'conditions': [ - [ 'arm_fpu!="default"', { - 'cflags': ['-mfpu=<(arm_fpu)',], - }], - ], + [ 'arm_fpu!="default"', { + 'cflags': ['-mfpu=<(arm_fpu)',], }], ], }], @@ -123,44 +173,11 @@ [ 'arm_thumb==0', { 'cflags': ['-marm',], }], - [ 'arm_test=="on"', { - 'defines': [ - 'ARM_TEST', - ], - }], ], }, { - # armcompiler=="no" + # 'v8_target_arch!=host_arch' + # Host not built with an Arm CXX compiler (simulator build). 'conditions': [ - [ 'arm_version==7 or arm_version=="default"', { - 'defines': [ - 'CAN_USE_ARMV7_INSTRUCTIONS=1', - ], - 'conditions': [ - [ 'arm_fpu=="default"', { - 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', - ], - }], - [ 'arm_fpu=="vfpv3-d16"', { - 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', - ], - }], - [ 'arm_fpu=="vfpv3"', { - 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', - 'CAN_USE_VFP32DREGS', - ], - }], - [ 'arm_fpu=="neon" or arm_neon==1', { - 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', - 'CAN_USE_VFP32DREGS', - ], - }], - ], - }], [ 'arm_float_abi=="hard"', { 'defines': [ 'USE_EABI_HARDFLOAT=1', @@ -172,33 +189,21 @@ ], }], ], - 'defines': [ - 'ARM_TEST', - ], }], ], }], # _toolset=="host" ['_toolset=="target"', { - 'variables': { - 'armcompiler': '<!($(echo ${CXX_target:-<(CXX)}) -v 2>&1 | grep -q "^Target: arm" && echo "yes" || echo "no")', - }, 'conditions': [ - ['armcompiler=="yes"', { + ['v8_target_arch==target_arch and android_webview_build==0', { + # Target built with an Arm CXX compiler. 'conditions': [ [ 'arm_version==7', { 'cflags': ['-march=armv7-a',], }], [ 'arm_version==7 or arm_version=="default"', { 'conditions': [ - [ 'arm_neon==1', { - 'cflags': ['-mfpu=neon',], - }, - { - 'conditions': [ - [ 'arm_fpu!="default"', { - 'cflags': ['-mfpu=<(arm_fpu)',], - }], - ], + [ 'arm_fpu!="default"', { + 'cflags': ['-mfpu=<(arm_fpu)',], }], ], }], @@ -211,44 +216,11 @@ [ 'arm_thumb==0', { 'cflags': ['-marm',], }], - [ 'arm_test=="on"', { - 'defines': [ - 'ARM_TEST', - ], - }], ], }, { - # armcompiler=="no" + # 'v8_target_arch!=target_arch' + # Target not built with an Arm CXX compiler (simulator build). 'conditions': [ - [ 'arm_version==7 or arm_version=="default"', { - 'defines': [ - 'CAN_USE_ARMV7_INSTRUCTIONS=1', - ], - 'conditions': [ - [ 'arm_fpu=="default"', { - 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', - ], - }], - [ 'arm_fpu=="vfpv3-d16"', { - 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', - ], - }], - [ 'arm_fpu=="vfpv3"', { - 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', - 'CAN_USE_VFP32DREGS', - ], - }], - [ 'arm_fpu=="neon" or arm_neon==1', { - 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', - 'CAN_USE_VFP32DREGS', - ], - }], - ], - }], [ 'arm_float_abi=="hard"', { 'defines': [ 'USE_EABI_HARDFLOAT=1', @@ -260,9 +232,6 @@ ], }], ], - 'defines': [ - 'ARM_TEST', - ], }], ], }], # _toolset=="target" @@ -278,15 +247,68 @@ 'V8_TARGET_ARCH_IA32', ], }], # v8_target_arch=="ia32" + ['v8_target_arch=="x87"', { + 'defines': [ + 'V8_TARGET_ARCH_X87', + ], + 'cflags': ['-march=i586'], + }], # v8_target_arch=="x87" + ['v8_target_arch=="mips"', { + 'defines': [ + 'V8_TARGET_ARCH_MIPS', + ], + 'conditions': [ + ['v8_target_arch==target_arch and android_webview_build==0', { + # Target built with a Mips CXX compiler. + 'target_conditions': [ + ['_toolset=="target"', { + 'cflags': ['-EB'], + 'ldflags': ['-EB'], + 'conditions': [ + [ 'v8_use_mips_abi_hardfloat=="true"', { + 'cflags': ['-mhard-float'], + 'ldflags': ['-mhard-float'], + }, { + 'cflags': ['-msoft-float'], + 'ldflags': ['-msoft-float'], + }], + ['mips_arch_variant=="r2"', { + 'cflags': ['-mips32r2', '-Wa,-mips32r2'], + }], + ['mips_arch_variant=="r1"', { + 'cflags': ['-mips32', '-Wa,-mips32'], + }], + ], + }], + ], + }], + [ 'v8_can_use_fpu_instructions=="true"', { + 'defines': [ + 'CAN_USE_FPU_INSTRUCTIONS', + ], + }], + [ 'v8_use_mips_abi_hardfloat=="true"', { + 'defines': [ + '__mips_hard_float=1', + 'CAN_USE_FPU_INSTRUCTIONS', + ], + }, { + 'defines': [ + '__mips_soft_float=1' + ], + }], + ['mips_arch_variant=="r2"', { + 'defines': ['_MIPS_ARCH_MIPS32R2',], + }], + ], + }], # v8_target_arch=="mips" ['v8_target_arch=="mipsel"', { 'defines': [ 'V8_TARGET_ARCH_MIPS', ], - 'variables': { - 'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")', - }, 'conditions': [ - ['mipscompiler=="yes"', { + ['v8_target_arch==target_arch and android_webview_build==0', { + # Target built with a Mips CXX compiler. 'target_conditions': [ ['_toolset=="target"', { 'cflags': ['-EL'], @@ -299,10 +321,10 @@ 'cflags': ['-msoft-float'], 'ldflags': ['-msoft-float'], }], - ['mips_arch_variant=="mips32r2"', { + ['mips_arch_variant=="r2"', { 'cflags': ['-mips32r2', '-Wa,-mips32r2'], }], - ['mips_arch_variant=="mips32r1"', { + ['mips_arch_variant=="r1"', { 'cflags': ['-mips32', '-Wa,-mips32'], }], ['mips_arch_variant=="loongson"', { @@ -327,7 +349,7 @@ '__mips_soft_float=1' ], }], - ['mips_arch_variant=="mips32r2"', { + ['mips_arch_variant=="r2"', { 'defines': ['_MIPS_ARCH_MIPS32R2',], }], ['mips_arch_variant=="loongson"', { @@ -335,6 +357,68 @@ }], ], }], # v8_target_arch=="mipsel" + ['v8_target_arch=="mips64el"', { + 'defines': [ + 'V8_TARGET_ARCH_MIPS64', + ], + 'conditions': [ + ['v8_target_arch==target_arch and android_webview_build==0', { + # Target built with a Mips CXX compiler. + 'target_conditions': [ + ['_toolset=="target"', { + 'cflags': ['-EL'], + 'ldflags': ['-EL'], + 'conditions': [ + [ 'v8_use_mips_abi_hardfloat=="true"', { + 'cflags': ['-mhard-float'], + 'ldflags': ['-mhard-float'], + }, { + 'cflags': ['-msoft-float'], + 'ldflags': ['-msoft-float'], + }], + ['mips_arch_variant=="r6"', { + 'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'], + 'ldflags': [ + '-mips64r6', '-mabi=64', + '-Wl,--dynamic-linker=$(LDSO_PATH)', + '-Wl,--rpath=$(LD_R_PATH)', + ], + }], + ['mips_arch_variant=="r2"', { + 'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'], + 'ldflags': [ + '-mips64r2', '-mabi=64', + '-Wl,--dynamic-linker=$(LDSO_PATH)', + '-Wl,--rpath=$(LD_R_PATH)', + ], + }], + ], + }], + ], + }], + [ 'v8_can_use_fpu_instructions=="true"', { + 'defines': [ + 'CAN_USE_FPU_INSTRUCTIONS', + ], + }], + [ 'v8_use_mips_abi_hardfloat=="true"', { + 'defines': [ + '__mips_hard_float=1', + 'CAN_USE_FPU_INSTRUCTIONS', + ], + }, { + 'defines': [ + '__mips_soft_float=1' + ], + }], + ['mips_arch_variant=="r6"', { + 'defines': ['_MIPS_ARCH_MIPS64R6',], + }], + ['mips_arch_variant=="r2"', { + 'defines': ['_MIPS_ARCH_MIPS64R2',], + }], + ], + }], # v8_target_arch=="mips64el" ['v8_target_arch=="x64"', { 'defines': [ 'V8_TARGET_ARCH_X64', @@ -349,16 +433,42 @@ }, 'msvs_configuration_platform': 'x64', }], # v8_target_arch=="x64" + ['v8_target_arch=="x32"', { + 'defines': [ + # x32 port shares the source code with x64 port. + 'V8_TARGET_ARCH_X64', + 'V8_TARGET_ARCH_32_BIT', + ], + 'cflags': [ + '-mx32', + # Inhibit warning if long long type is used. + '-Wno-long-long', + ], + 'ldflags': [ + '-mx32', + ], + }], # v8_target_arch=="x32" ['OS=="win"', { 'defines': [ 'WIN32', ], + # 4351: VS 2005 and later are warning us that they've fixed a bug + # present in VS 2003 and earlier. + 'msvs_disabled_warnings': [4351], 'msvs_configuration_attributes': { 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)', 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', 'CharacterSet': '1', }, }], + ['OS=="win" and v8_target_arch=="ia32"', { + 'msvs_settings': { + 'VCCLCompilerTool': { + # Ensure no surprising artifacts from 80bit double math with x86. + 'AdditionalOptions': ['/arch:SSE2'], + }, + }, + }], ['OS=="win" and v8_enable_prof==1', { 'msvs_settings': { 'VCLinkerTool': { @@ -366,44 +476,28 @@ }, }, }], - ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ - or OS=="netbsd" or OS=="qnx"', { - 'conditions': [ - [ 'v8_no_strict_aliasing==1', { - 'cflags': [ '-fno-strict-aliasing' ], - }], - ], # conditions - }], - ['OS=="solaris"', { - 'defines': [ '__C99FEATURES__=1' ], # isinf() etc. - }], - ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ + ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \ (v8_target_arch=="arm" or v8_target_arch=="ia32" or \ + v8_target_arch=="x87" or v8_target_arch=="mips" or \ v8_target_arch=="mipsel")', { - # Check whether the host compiler and target compiler support the - # '-m32' option and set it if so. 'target_conditions': [ ['_toolset=="host"', { - 'variables': { - 'm32flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)', - }, - 'cflags': [ '<(m32flag)' ], - 'ldflags': [ '<(m32flag)' ], + 'conditions': [ + ['host_cxx_is_biarch==1', { + 'cflags': [ '-m32' ], + 'ldflags': [ '-m32' ] + }], + ], 'xcode_settings': { 'ARCHS': [ 'i386' ], }, }], ['_toolset=="target"', { - 'variables': { - 'm32flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)', - 'clang%': 0, - }, 'conditions': [ - ['((OS!="android" and OS!="qnx") or clang==1) and \ - nacl_target_arch!="nacl_x64"', { - 'cflags': [ '<(m32flag)' ], - 'ldflags': [ '<(m32flag)' ], + ['target_cxx_is_biarch==1 and nacl_target_arch!="nacl_x64"', { + 'cflags': [ '-m32' ], + 'ldflags': [ '-m32' ], }], ], 'xcode_settings': { @@ -414,28 +508,35 @@ }], ['(OS=="linux" or OS=="android") and \ (v8_target_arch=="x64" or v8_target_arch=="arm64")', { - # Check whether the host compiler and target compiler support the - # '-m64' option and set it if so. 'target_conditions': [ ['_toolset=="host"', { - 'variables': { - 'm64flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)', - }, - 'cflags': [ '<(m64flag)' ], - 'ldflags': [ '<(m64flag)' ], - }], - ['_toolset=="target"', { - 'variables': { - 'm64flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)', - }, 'conditions': [ - ['((OS!="android" and OS!="qnx") or clang==1)', { - 'cflags': [ '<(m64flag)' ], - 'ldflags': [ '<(m64flag)' ], - }], - ], - }] - ], + ['host_cxx_is_biarch==1', { + 'cflags': [ '-m64' ], + 'ldflags': [ '-m64' ] + }], + ], + }], + ['_toolset=="target"', { + 'conditions': [ + ['target_cxx_is_biarch==1', { + 'cflags': [ '-m64' ], + 'ldflags': [ '-m64' ], + }], + ] + }], + ], + }], + ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ + or OS=="netbsd" or OS=="qnx"', { + 'conditions': [ + [ 'v8_no_strict_aliasing==1', { + 'cflags': [ '-fno-strict-aliasing' ], + }], + ], # conditions + }], + ['OS=="solaris"', { + 'defines': [ '__C99FEATURES__=1' ], # isinf() etc. }], ['OS=="freebsd" or OS=="openbsd"', { 'cflags': [ '-I/usr/local/include' ], @@ -445,138 +546,154 @@ }], ], # conditions 'configurations': { - 'Debug': { - 'defines': [ - 'ENABLE_DISASSEMBLER', - 'V8_ENABLE_CHECKS', - 'OBJECT_PRINT', - 'VERIFY_HEAP', - 'DEBUG' - ], + # Abstract configuration for v8_optimized_debug == 0. + 'DebugBase0': { + 'abstract': 1, 'msvs_settings': { 'VCCLCompilerTool': { + 'Optimization': '0', 'conditions': [ - ['v8_optimized_debug==0', { - 'Optimization': '0', - 'conditions': [ - ['component=="shared_library"', { - 'RuntimeLibrary': '3', # /MDd - }, { - 'RuntimeLibrary': '1', # /MTd - }], - ], - }], - ['v8_optimized_debug==1', { - 'Optimization': '1', - 'InlineFunctionExpansion': '2', - 'EnableIntrinsicFunctions': 'true', - 'FavorSizeOrSpeed': '0', - 'StringPooling': 'true', - 'BasicRuntimeChecks': '0', - 'conditions': [ - ['component=="shared_library"', { - 'RuntimeLibrary': '3', # /MDd - }, { - 'RuntimeLibrary': '1', # /MTd - }], - ], - }], - ['v8_optimized_debug==2', { - 'Optimization': '2', - 'InlineFunctionExpansion': '2', - 'EnableIntrinsicFunctions': 'true', - 'FavorSizeOrSpeed': '0', - 'StringPooling': 'true', - 'BasicRuntimeChecks': '0', - 'conditions': [ - ['component=="shared_library"', { - 'RuntimeLibrary': '3', #/MDd - }, { - 'RuntimeLibrary': '1', #/MTd - }], - ['v8_target_arch=="x64"', { - # TODO(2207): remove this option once the bug is fixed. - 'WholeProgramOptimization': 'true', - }], - ], + ['component=="shared_library"', { + 'RuntimeLibrary': '3', # /MDd + }, { + 'RuntimeLibrary': '1', # /MTd }], ], }, 'VCLinkerTool': { + 'LinkIncremental': '2', + }, + }, + 'conditions': [ + ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \ + OS=="qnx"', { + 'cflags!': [ + '-O0', + '-O3', + '-O2', + '-O1', + '-Os', + ], + 'cflags': [ + '-fdata-sections', + '-ffunction-sections', + ], + }], + ['OS=="mac"', { + 'xcode_settings': { + 'GCC_OPTIMIZATION_LEVEL': '0', # -O0 + }, + }], + ], + }, # DebugBase0 + # Abstract configuration for v8_optimized_debug == 1. + 'DebugBase1': { + 'abstract': 1, + 'msvs_settings': { + 'VCCLCompilerTool': { + 'Optimization': '1', + 'InlineFunctionExpansion': '2', + 'EnableIntrinsicFunctions': 'true', + 'FavorSizeOrSpeed': '0', + 'StringPooling': 'true', + 'BasicRuntimeChecks': '0', 'conditions': [ - ['v8_optimized_debug==0', { - 'LinkIncremental': '2', - }], - ['v8_optimized_debug==1', { - 'LinkIncremental': '2', - }], - ['v8_optimized_debug==2', { - 'LinkIncremental': '1', - 'OptimizeReferences': '2', - 'EnableCOMDATFolding': '2', + ['component=="shared_library"', { + 'RuntimeLibrary': '3', # /MDd + }, { + 'RuntimeLibrary': '1', # /MTd }], ], }, + 'VCLinkerTool': { + 'LinkIncremental': '2', + }, }, 'conditions': [ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \ OS=="qnx"', { - 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', - '-Wnon-virtual-dtor', '-Woverloaded-virtual', - '<(wno_array_bounds)', - ], - 'conditions': [ - ['v8_optimized_debug==0', { - 'cflags!': [ - '-O0', - '-O3', - '-O2', - '-O1', - '-Os', - ], + 'cflags!': [ + '-O0', + '-O3', # TODO(2807) should be -O1. + '-O2', + '-Os', + ], + 'cflags': [ + '-fdata-sections', + '-ffunction-sections', + '-O1', # TODO(2807) should be -O3. + ], + 'conditions': [ + ['gcc_version==44 and clang==0', { 'cflags': [ - '-fdata-sections', - '-ffunction-sections', + # Avoid crashes with gcc 4.4 in the v8 test suite. + '-fno-tree-vrp', ], }], - ['v8_optimized_debug==1', { - 'cflags!': [ - '-O0', - '-O3', # TODO(2807) should be -O1. - '-O2', - '-Os', - ], - 'cflags': [ - '-fdata-sections', - '-ffunction-sections', - '-O1', # TODO(2807) should be -O3. - ], + ], + }], + ['OS=="mac"', { + 'xcode_settings': { + 'GCC_OPTIMIZATION_LEVEL': '3', # -O3 + 'GCC_STRICT_ALIASING': 'YES', + }, + }], + ], + }, # DebugBase1 + # Abstract configuration for v8_optimized_debug == 2. + 'DebugBase2': { + 'abstract': 1, + 'msvs_settings': { + 'VCCLCompilerTool': { + 'Optimization': '2', + 'InlineFunctionExpansion': '2', + 'EnableIntrinsicFunctions': 'true', + 'FavorSizeOrSpeed': '0', + 'StringPooling': 'true', + 'BasicRuntimeChecks': '0', + 'conditions': [ + ['component=="shared_library"', { + 'RuntimeLibrary': '3', #/MDd + }, { + 'RuntimeLibrary': '1', #/MTd }], - ['v8_optimized_debug==2', { - 'cflags!': [ - '-O0', - '-O1', - '-Os', - ], - 'cflags': [ - '-fdata-sections', - '-ffunction-sections', - ], - 'defines': [ - 'OPTIMIZED_DEBUG' - ], - 'conditions': [ - # TODO(crbug.com/272548): Avoid -O3 in NaCl - ['nacl_target_arch=="none"', { - 'cflags': ['-O3'], - 'cflags!': ['-O2'], - }, { - 'cflags': ['-O2'], - 'cflags!': ['-O3'], - }], - ], + ['v8_target_arch=="x64"', { + # TODO(2207): remove this option once the bug is fixed. + 'WholeProgramOptimization': 'true', + }], + ], + }, + 'VCLinkerTool': { + 'LinkIncremental': '1', + 'OptimizeReferences': '2', + 'EnableCOMDATFolding': '2', + }, + }, + 'conditions': [ + ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \ + OS=="qnx"', { + 'cflags!': [ + '-O0', + '-O1', + '-Os', + ], + 'cflags': [ + '-fdata-sections', + '-ffunction-sections', + ], + 'defines': [ + 'OPTIMIZED_DEBUG' + ], + 'conditions': [ + # TODO(crbug.com/272548): Avoid -O3 in NaCl + ['nacl_target_arch=="none"', { + 'cflags': ['-O3'], + 'cflags!': ['-O2'], + }, { + 'cflags': ['-O2'], + 'cflags!': ['-O3'], }], - ['v8_optimized_debug!=0 and gcc_version==44 and clang==0', { + ['gcc_version==44 and clang==0', { 'cflags': [ # Avoid crashes with gcc 4.4 in the v8 test suite. '-fno-tree-vrp', @@ -584,6 +701,29 @@ }], ], }], + ['OS=="mac"', { + 'xcode_settings': { + 'GCC_OPTIMIZATION_LEVEL': '3', # -O3 + 'GCC_STRICT_ALIASING': 'YES', + }, + }], + ], + }, # DebugBase2 + # Common settings for the Debug configuration. + 'DebugBaseCommon': { + 'abstract': 1, + 'defines': [ + 'ENABLE_DISASSEMBLER', + 'V8_ENABLE_CHECKS', + 'OBJECT_PRINT', + 'VERIFY_HEAP', + 'DEBUG' + ], + 'conditions': [ + ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \ + OS=="qnx"', { + 'cflags': [ '-Woverloaded-virtual', '<(wno_array_bounds)', ], + }], ['OS=="linux" and v8_enable_backtrace==1', { # Support for backtrace_symbols. 'ldflags': [ '-rdynamic' ], @@ -602,17 +742,19 @@ }], ], }], - ['OS=="mac"', { - 'xcode_settings': { - 'conditions': [ - ['v8_optimized_debug==0', { - 'GCC_OPTIMIZATION_LEVEL': '0', # -O0 - }, { - 'GCC_OPTIMIZATION_LEVEL': '3', # -O3 - 'GCC_STRICT_ALIASING': 'YES', - }], - ], - }, + ], + }, # DebugBaseCommon + 'Debug': { + 'inherit_from': ['DebugBaseCommon'], + 'conditions': [ + ['v8_optimized_debug==0', { + 'inherit_from': ['DebugBase0'], + }], + ['v8_optimized_debug==1', { + 'inherit_from': ['DebugBase1'], + }], + ['v8_optimized_debug==2', { + 'inherit_from': ['DebugBase2'], }], ], }, # Debug diff -Nru nodejs-0.11.13/deps/v8/BUILD.gn nodejs-0.11.15/deps/v8/BUILD.gn --- nodejs-0.11.13/deps/v8/BUILD.gn 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/BUILD.gn 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1281 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# TODO(jochen): These will need to be user-settable to support standalone V8 +# builds. +v8_compress_startup_data = "off" +v8_deprecation_warnings = false +v8_enable_disassembler = false +v8_enable_gdbjit = false +v8_enable_handle_zapping = true +v8_enable_i18n_support = true +v8_enable_verify_heap = false +v8_interpreted_regexp = false +v8_object_print = false +v8_postmortem_support = false +v8_use_snapshot = true +v8_use_external_startup_data = false +v8_enable_extra_checks = is_debug +v8_target_arch = cpu_arch +v8_random_seed = "314159265" + + +############################################################################### +# Configurations +# +config("internal_config") { + visibility = ":*" # Only targets in this file can depend on this. + + include_dirs = [ "." ] + + if (component_mode == "shared_library") { + defines = [ + "V8_SHARED", + "BUILDING_V8_SHARED", + ] + } +} + +config("internal_config_base") { + visibility = ":*" # Only targets in this file can depend on this. + + include_dirs = [ "." ] +} + +# This config should only be applied to code using V8 and not any V8 code +# itself. +config("external_config") { + if (is_component_build) { + defines = [ + "V8_SHARED", + "USING_V8_SHARED", + ] + } + include_dirs = [ "include" ] +} + +config("features") { + visibility = ":*" # Only targets in this file can depend on this. + + defines = [] + + if (v8_enable_disassembler == true) { + defines += [ + "ENABLE_DISASSEMBLER", + ] + } + if (v8_enable_gdbjit == true) { + defines += [ + "ENABLE_GDB_JIT_INTERFACE", + ] + } + if (v8_object_print == true) { + defines += [ + "OBJECT_PRINT", + ] + } + if (v8_enable_verify_heap == true) { + defines += [ + "VERIFY_HEAP", + ] + } + if (v8_interpreted_regexp == true) { + defines += [ + "V8_INTERPRETED_REGEXP", + ] + } + if (v8_deprecation_warnings == true) { + defines += [ + "V8_DEPRECATION_WARNINGS", + ] + } + if (v8_enable_i18n_support == true) { + defines += [ + "V8_I18N_SUPPORT", + ] + } + if (v8_compress_startup_data == "bz2") { + defines += [ + "COMPRESS_STARTUP_DATA_BZ2", + ] + } + if (v8_enable_extra_checks == true) { + defines += [ + "ENABLE_EXTRA_CHECKS", + ] + } + if (v8_enable_handle_zapping == true) { + defines += [ + "ENABLE_HANDLE_ZAPPING", + ] + } + if (v8_use_external_startup_data == true) { + defines += [ + "V8_USE_EXTERNAL_STARTUP_DATA", + ] + } +} + +config("toolchain") { + visibility = ":*" # Only targets in this file can depend on this. + + defines = [] + cflags = [] + + # TODO(jochen): Add support for arm, mips, mipsel. + + if (v8_target_arch == "arm64") { + defines += [ + "V8_TARGET_ARCH_ARM64", + ] + } + if (v8_target_arch == "x86") { + defines += [ + "V8_TARGET_ARCH_IA32", + ] + } + if (v8_target_arch == "x64") { + defines += [ + "V8_TARGET_ARCH_X64", + ] + } + if (is_win) { + defines += [ + "WIN32", + ] + # TODO(jochen): Support v8_enable_prof. + } + + # TODO(jochen): Add support for compiling with simulators. + + if (is_debug) { + # TODO(jochen): Add support for different debug optimization levels. + defines += [ + "ENABLE_DISASSEMBLER", + "V8_ENABLE_CHECKS", + "OBJECT_PRINT", + "VERIFY_HEAP", + "DEBUG", + "OPTIMIZED_DEBUG", + ] + } +} + +############################################################################### +# Actions +# + +action("js2c") { + visibility = ":*" # Only targets in this file can depend on this. + + script = "tools/js2c.py" + + # The script depends on this other script, this rule causes a rebuild if it + # changes. + source_prereqs = [ "tools/jsmin.py" ] + + sources = [ + "src/runtime.js", + "src/v8natives.js", + "src/symbol.js", + "src/array.js", + "src/string.js", + "src/uri.js", + "third_party/fdlibm/fdlibm.js", + "src/math.js", + "src/messages.js", + "src/apinatives.js", + "src/debug-debugger.js", + "src/mirror-debugger.js", + "src/liveedit-debugger.js", + "src/date.js", + "src/json.js", + "src/regexp.js", + "src/arraybuffer.js", + "src/typedarray.js", + "src/collection.js", + "src/collection-iterator.js", + "src/weak_collection.js", + "src/promise.js", + "src/object-observe.js", + "src/macros.py", + "src/array-iterator.js", + "src/string-iterator.js", + ] + + outputs = [ + "$target_gen_dir/libraries.cc" + ] + + if (v8_enable_i18n_support) { + sources += [ "src/i18n.js" ] + } + + args = [ + rebase_path("$target_gen_dir/libraries.cc", root_build_dir), + "CORE", + v8_compress_startup_data + ] + rebase_path(sources, root_build_dir) + + if (v8_use_external_startup_data) { + outputs += [ "$target_gen_dir/libraries.bin" ] + args += [ + "--startup_blob", + rebase_path("$target_gen_dir/libraries.bin", root_build_dir) + ] + } +} + +action("js2c_experimental") { + visibility = ":*" # Only targets in this file can depend on this. + + script = "tools/js2c.py" + + # The script depends on this other script, this rule causes a rebuild if it + # changes. + source_prereqs = [ "tools/jsmin.py" ] + + sources = [ + "src/macros.py", + "src/proxy.js", + "src/generator.js", + "src/harmony-string.js", + "src/harmony-array.js", + ] + + outputs = [ + "$target_gen_dir/experimental-libraries.cc" + ] + + args = [ + rebase_path("$target_gen_dir/experimental-libraries.cc", root_build_dir), + "EXPERIMENTAL", + v8_compress_startup_data + ] + rebase_path(sources, root_build_dir) + + if (v8_use_external_startup_data) { + outputs += [ "$target_gen_dir/libraries_experimental.bin" ] + args += [ + "--startup_blob", + rebase_path("$target_gen_dir/libraries_experimental.bin", root_build_dir) + ] + } +} + +if (v8_use_external_startup_data) { + action("natives_blob") { + visibility = ":*" # Only targets in this file can depend on this. + + deps = [ + ":js2c", + ":js2c_experimental" + ] + + sources = [ + "$target_gen_dir/libraries.bin", + "$target_gen_dir/libraries_experimental.bin" + ] + + outputs = [ + "$root_gen_dir/natives_blob.bin" + ] + + script = "tools/concatenate-files.py" + + args = rebase_path(sources + outputs, root_build_dir) + } +} + +action("postmortem-metadata") { + visibility = ":*" # Only targets in this file can depend on this. + + script = "tools/gen-postmortem-metadata.py" + + sources = [ + "src/objects.h", + "src/objects-inl.h", + ] + + outputs = [ + "$target_gen_dir/debug-support.cc" + ] + + args = + rebase_path(outputs, root_build_dir) + + rebase_path(sources, root_build_dir) +} + +action("run_mksnapshot") { + visibility = ":*" # Only targets in this file can depend on this. + + deps = [ ":mksnapshot($host_toolchain)" ] + + script = "tools/run.py" + + outputs = [ + "$target_gen_dir/snapshot.cc" + ] + + args = [ + "./" + rebase_path(get_label_info(":mksnapshot($host_toolchain)", + "root_out_dir") + "/mksnapshot", + root_build_dir), + "--log-snapshot-positions", + "--logfile", rebase_path("$target_gen_dir/snapshot.log", root_build_dir), + rebase_path("$target_gen_dir/snapshot.cc", root_build_dir) + ] + + if (v8_random_seed != "0") { + args += [ "--random-seed", v8_random_seed ] + } + + if (v8_use_external_startup_data) { + outputs += [ "$root_gen_dir/snapshot_blob.bin" ] + args += [ + "--startup_blob", + rebase_path("$root_gen_dir/snapshot_blob.bin", root_build_dir) + ] + } +} + + +############################################################################### +# Source Sets (aka static libraries) +# + +source_set("v8_nosnapshot") { + visibility = ":*" # Only targets in this file can depend on this. + + deps = [ + ":js2c", + ":js2c_experimental", + ":v8_base", + ] + + sources = [ + "$target_gen_dir/libraries.cc", + "$target_gen_dir/experimental-libraries.cc", + "src/snapshot-empty.cc", + "src/snapshot-common.cc", + ] + + configs -= [ "//build/config/compiler:chromium_code" ] + configs += [ "//build/config/compiler:no_chromium_code" ] + configs += [ ":internal_config", ":features", ":toolchain" ] +} + +source_set("v8_snapshot") { + visibility = ":*" # Only targets in this file can depend on this. + + deps = [ + ":js2c", + ":js2c_experimental", + ":run_mksnapshot", + ":v8_base", + ] + + sources = [ + "$target_gen_dir/libraries.cc", + "$target_gen_dir/experimental-libraries.cc", + "$target_gen_dir/snapshot.cc", + "src/snapshot-common.cc", + ] + + configs -= [ "//build/config/compiler:chromium_code" ] + configs += [ "//build/config/compiler:no_chromium_code" ] + configs += [ ":internal_config", ":features", ":toolchain" ] +} + +if (v8_use_external_startup_data) { + source_set("v8_external_snapshot") { + visibility = ":*" # Only targets in this file can depend on this. + + deps = [ + ":js2c", + ":js2c_experimental", + ":run_mksnapshot", + ":v8_base", + ":natives_blob", + ] + + sources = [ + "src/natives-external.cc", + "src/snapshot-external.cc", + ] + + configs -= [ "//build/config/compiler:chromium_code" ] + configs += [ "//build/config/compiler:no_chromium_code" ] + configs += [ ":internal_config", ":features", ":toolchain" ] + } +} + +source_set("v8_base") { + visibility = ":*" # Only targets in this file can depend on this. + + sources = [ + "src/accessors.cc", + "src/accessors.h", + "src/allocation.cc", + "src/allocation.h", + "src/allocation-site-scopes.cc", + "src/allocation-site-scopes.h", + "src/allocation-tracker.cc", + "src/allocation-tracker.h", + "src/api.cc", + "src/api.h", + "src/arguments.cc", + "src/arguments.h", + "src/assembler.cc", + "src/assembler.h", + "src/assert-scope.h", + "src/assert-scope.cc", + "src/ast-value-factory.cc", + "src/ast-value-factory.h", + "src/ast.cc", + "src/ast.h", + "src/bignum-dtoa.cc", + "src/bignum-dtoa.h", + "src/bignum.cc", + "src/bignum.h", + "src/bootstrapper.cc", + "src/bootstrapper.h", + "src/builtins.cc", + "src/builtins.h", + "src/bytecodes-irregexp.h", + "src/cached-powers.cc", + "src/cached-powers.h", + "src/char-predicates-inl.h", + "src/char-predicates.h", + "src/checks.cc", + "src/checks.h", + "src/circular-queue-inl.h", + "src/circular-queue.h", + "src/code-stubs.cc", + "src/code-stubs.h", + "src/code-stubs-hydrogen.cc", + "src/code.h", + "src/codegen.cc", + "src/codegen.h", + "src/compilation-cache.cc", + "src/compilation-cache.h", + "src/compiler/ast-graph-builder.cc", + "src/compiler/ast-graph-builder.h", + "src/compiler/code-generator-impl.h", + "src/compiler/code-generator.cc", + "src/compiler/code-generator.h", + "src/compiler/common-node-cache.h", + "src/compiler/common-operator.h", + "src/compiler/control-builders.cc", + "src/compiler/control-builders.h", + "src/compiler/frame.h", + "src/compiler/gap-resolver.cc", + "src/compiler/gap-resolver.h", + "src/compiler/generic-algorithm-inl.h", + "src/compiler/generic-algorithm.h", + "src/compiler/generic-graph.h", + "src/compiler/generic-node-inl.h", + "src/compiler/generic-node.h", + "src/compiler/graph-builder.cc", + "src/compiler/graph-builder.h", + "src/compiler/graph-inl.h", + "src/compiler/graph-reducer.cc", + "src/compiler/graph-reducer.h", + "src/compiler/graph-replay.cc", + "src/compiler/graph-replay.h", + "src/compiler/graph-visualizer.cc", + "src/compiler/graph-visualizer.h", + "src/compiler/graph.cc", + "src/compiler/graph.h", + "src/compiler/instruction-codes.h", + "src/compiler/instruction-selector-impl.h", + "src/compiler/instruction-selector.cc", + "src/compiler/instruction-selector.h", + "src/compiler/instruction.cc", + "src/compiler/instruction.h", + "src/compiler/js-context-specialization.cc", + "src/compiler/js-context-specialization.h", + "src/compiler/js-generic-lowering.cc", + "src/compiler/js-generic-lowering.h", + "src/compiler/js-graph.cc", + "src/compiler/js-graph.h", + "src/compiler/js-operator.h", + "src/compiler/js-typed-lowering.cc", + "src/compiler/js-typed-lowering.h", + "src/compiler/linkage-impl.h", + "src/compiler/linkage.cc", + "src/compiler/linkage.h", + "src/compiler/lowering-builder.cc", + "src/compiler/lowering-builder.h", + "src/compiler/machine-node-factory.h", + "src/compiler/machine-operator-reducer.cc", + "src/compiler/machine-operator-reducer.h", + "src/compiler/machine-operator.h", + "src/compiler/node-aux-data-inl.h", + "src/compiler/node-aux-data.h", + "src/compiler/node-cache.cc", + "src/compiler/node-cache.h", + "src/compiler/node-matchers.h", + "src/compiler/node-properties-inl.h", + "src/compiler/node-properties.h", + "src/compiler/node.cc", + "src/compiler/node.h", + "src/compiler/opcodes.h", + "src/compiler/operator-properties-inl.h", + "src/compiler/operator-properties.h", + "src/compiler/operator.h", + "src/compiler/phi-reducer.h", + "src/compiler/pipeline.cc", + "src/compiler/pipeline.h", + "src/compiler/raw-machine-assembler.cc", + "src/compiler/raw-machine-assembler.h", + "src/compiler/register-allocator.cc", + "src/compiler/register-allocator.h", + "src/compiler/representation-change.h", + "src/compiler/schedule.cc", + "src/compiler/schedule.h", + "src/compiler/scheduler.cc", + "src/compiler/scheduler.h", + "src/compiler/simplified-lowering.cc", + "src/compiler/simplified-lowering.h", + "src/compiler/simplified-node-factory.h", + "src/compiler/simplified-operator.h", + "src/compiler/source-position.cc", + "src/compiler/source-position.h", + "src/compiler/structured-machine-assembler.cc", + "src/compiler/structured-machine-assembler.h", + "src/compiler/typer.cc", + "src/compiler/typer.h", + "src/compiler/verifier.cc", + "src/compiler/verifier.h", + "src/compiler.cc", + "src/compiler.h", + "src/contexts.cc", + "src/contexts.h", + "src/conversions-inl.h", + "src/conversions.cc", + "src/conversions.h", + "src/counters.cc", + "src/counters.h", + "src/cpu-profiler-inl.h", + "src/cpu-profiler.cc", + "src/cpu-profiler.h", + "src/data-flow.cc", + "src/data-flow.h", + "src/date.cc", + "src/date.h", + "src/dateparser-inl.h", + "src/dateparser.cc", + "src/dateparser.h", + "src/debug.cc", + "src/debug.h", + "src/deoptimizer.cc", + "src/deoptimizer.h", + "src/disasm.h", + "src/disassembler.cc", + "src/disassembler.h", + "src/diy-fp.cc", + "src/diy-fp.h", + "src/double.h", + "src/dtoa.cc", + "src/dtoa.h", + "src/effects.h", + "src/elements-kind.cc", + "src/elements-kind.h", + "src/elements.cc", + "src/elements.h", + "src/execution.cc", + "src/execution.h", + "src/extensions/externalize-string-extension.cc", + "src/extensions/externalize-string-extension.h", + "src/extensions/free-buffer-extension.cc", + "src/extensions/free-buffer-extension.h", + "src/extensions/gc-extension.cc", + "src/extensions/gc-extension.h", + "src/extensions/statistics-extension.cc", + "src/extensions/statistics-extension.h", + "src/extensions/trigger-failure-extension.cc", + "src/extensions/trigger-failure-extension.h", + "src/factory.cc", + "src/factory.h", + "src/fast-dtoa.cc", + "src/fast-dtoa.h", + "src/feedback-slots.h", + "src/field-index.cc", + "src/field-index.h", + "src/field-index-inl.h", + "src/fixed-dtoa.cc", + "src/fixed-dtoa.h", + "src/flag-definitions.h", + "src/flags.cc", + "src/flags.h", + "src/frames-inl.h", + "src/frames.cc", + "src/frames.h", + "src/full-codegen.cc", + "src/full-codegen.h", + "src/func-name-inferrer.cc", + "src/func-name-inferrer.h", + "src/gdb-jit.cc", + "src/gdb-jit.h", + "src/global-handles.cc", + "src/global-handles.h", + "src/globals.h", + "src/handles-inl.h", + "src/handles.cc", + "src/handles.h", + "src/hashmap.h", + "src/heap-profiler.cc", + "src/heap-profiler.h", + "src/heap-snapshot-generator-inl.h", + "src/heap-snapshot-generator.cc", + "src/heap-snapshot-generator.h", + "src/heap/gc-tracer.cc", + "src/heap/gc-tracer.h", + "src/heap/heap-inl.h", + "src/heap/heap.cc", + "src/heap/heap.h", + "src/heap/incremental-marking.cc", + "src/heap/incremental-marking.h", + "src/heap/mark-compact-inl.h", + "src/heap/mark-compact.cc", + "src/heap/mark-compact.h", + "src/heap/objects-visiting-inl.h", + "src/heap/objects-visiting.cc", + "src/heap/objects-visiting.h", + "src/heap/spaces-inl.h", + "src/heap/spaces.cc", + "src/heap/spaces.h", + "src/heap/store-buffer-inl.h", + "src/heap/store-buffer.cc", + "src/heap/store-buffer.h", + "src/heap/sweeper-thread.h", + "src/heap/sweeper-thread.cc", + "src/hydrogen-alias-analysis.h", + "src/hydrogen-bce.cc", + "src/hydrogen-bce.h", + "src/hydrogen-bch.cc", + "src/hydrogen-bch.h", + "src/hydrogen-canonicalize.cc", + "src/hydrogen-canonicalize.h", + "src/hydrogen-check-elimination.cc", + "src/hydrogen-check-elimination.h", + "src/hydrogen-dce.cc", + "src/hydrogen-dce.h", + "src/hydrogen-dehoist.cc", + "src/hydrogen-dehoist.h", + "src/hydrogen-environment-liveness.cc", + "src/hydrogen-environment-liveness.h", + "src/hydrogen-escape-analysis.cc", + "src/hydrogen-escape-analysis.h", + "src/hydrogen-flow-engine.h", + "src/hydrogen-instructions.cc", + "src/hydrogen-instructions.h", + "src/hydrogen.cc", + "src/hydrogen.h", + "src/hydrogen-gvn.cc", + "src/hydrogen-gvn.h", + "src/hydrogen-infer-representation.cc", + "src/hydrogen-infer-representation.h", + "src/hydrogen-infer-types.cc", + "src/hydrogen-infer-types.h", + "src/hydrogen-load-elimination.cc", + "src/hydrogen-load-elimination.h", + "src/hydrogen-mark-deoptimize.cc", + "src/hydrogen-mark-deoptimize.h", + "src/hydrogen-mark-unreachable.cc", + "src/hydrogen-mark-unreachable.h", + "src/hydrogen-osr.cc", + "src/hydrogen-osr.h", + "src/hydrogen-range-analysis.cc", + "src/hydrogen-range-analysis.h", + "src/hydrogen-redundant-phi.cc", + "src/hydrogen-redundant-phi.h", + "src/hydrogen-removable-simulates.cc", + "src/hydrogen-removable-simulates.h", + "src/hydrogen-representation-changes.cc", + "src/hydrogen-representation-changes.h", + "src/hydrogen-sce.cc", + "src/hydrogen-sce.h", + "src/hydrogen-store-elimination.cc", + "src/hydrogen-store-elimination.h", + "src/hydrogen-types.cc", + "src/hydrogen-types.h", + "src/hydrogen-uint32-analysis.cc", + "src/hydrogen-uint32-analysis.h", + "src/i18n.cc", + "src/i18n.h", + "src/icu_util.cc", + "src/icu_util.h", + "src/ic-inl.h", + "src/ic.cc", + "src/ic.h", + "src/interface.cc", + "src/interface.h", + "src/interpreter-irregexp.cc", + "src/interpreter-irregexp.h", + "src/isolate.cc", + "src/isolate.h", + "src/json-parser.h", + "src/json-stringifier.h", + "src/jsregexp-inl.h", + "src/jsregexp.cc", + "src/jsregexp.h", + "src/list-inl.h", + "src/list.h", + "src/lithium-allocator-inl.h", + "src/lithium-allocator.cc", + "src/lithium-allocator.h", + "src/lithium-codegen.cc", + "src/lithium-codegen.h", + "src/lithium.cc", + "src/lithium.h", + "src/liveedit.cc", + "src/liveedit.h", + "src/log-inl.h", + "src/log-utils.cc", + "src/log-utils.h", + "src/log.cc", + "src/log.h", + "src/lookup-inl.h", + "src/lookup.cc", + "src/lookup.h", + "src/macro-assembler.h", + "src/messages.cc", + "src/messages.h", + "src/msan.h", + "src/natives.h", + "src/objects-debug.cc", + "src/objects-inl.h", + "src/objects-printer.cc", + "src/objects.cc", + "src/objects.h", + "src/optimizing-compiler-thread.cc", + "src/optimizing-compiler-thread.h", + "src/ostreams.cc", + "src/ostreams.h", + "src/parser.cc", + "src/parser.h", + "src/perf-jit.cc", + "src/perf-jit.h", + "src/preparse-data-format.h", + "src/preparse-data.cc", + "src/preparse-data.h", + "src/preparser.cc", + "src/preparser.h", + "src/prettyprinter.cc", + "src/prettyprinter.h", + "src/profile-generator-inl.h", + "src/profile-generator.cc", + "src/profile-generator.h", + "src/property-details.h", + "src/property.cc", + "src/property.h", + "src/prototype.h", + "src/regexp-macro-assembler-irregexp-inl.h", + "src/regexp-macro-assembler-irregexp.cc", + "src/regexp-macro-assembler-irregexp.h", + "src/regexp-macro-assembler-tracer.cc", + "src/regexp-macro-assembler-tracer.h", + "src/regexp-macro-assembler.cc", + "src/regexp-macro-assembler.h", + "src/regexp-stack.cc", + "src/regexp-stack.h", + "src/rewriter.cc", + "src/rewriter.h", + "src/runtime-profiler.cc", + "src/runtime-profiler.h", + "src/runtime.cc", + "src/runtime.h", + "src/safepoint-table.cc", + "src/safepoint-table.h", + "src/sampler.cc", + "src/sampler.h", + "src/scanner-character-streams.cc", + "src/scanner-character-streams.h", + "src/scanner.cc", + "src/scanner.h", + "src/scopeinfo.cc", + "src/scopeinfo.h", + "src/scopes.cc", + "src/scopes.h", + "src/serialize.cc", + "src/serialize.h", + "src/small-pointer-list.h", + "src/smart-pointers.h", + "src/snapshot-source-sink.cc", + "src/snapshot-source-sink.h", + "src/snapshot.h", + "src/string-search.cc", + "src/string-search.h", + "src/string-stream.cc", + "src/string-stream.h", + "src/strtod.cc", + "src/strtod.h", + "src/stub-cache.cc", + "src/stub-cache.h", + "src/token.cc", + "src/token.h", + "src/transitions-inl.h", + "src/transitions.cc", + "src/transitions.h", + "src/type-info.cc", + "src/type-info.h", + "src/types-inl.h", + "src/types.cc", + "src/types.h", + "src/typing.cc", + "src/typing.h", + "src/unbound-queue-inl.h", + "src/unbound-queue.h", + "src/unicode-inl.h", + "src/unicode.cc", + "src/unicode.h", + "src/unique.h", + "src/uri.h", + "src/utils-inl.h", + "src/utils.cc", + "src/utils.h", + "src/v8.cc", + "src/v8.h", + "src/v8memory.h", + "src/v8threads.cc", + "src/v8threads.h", + "src/variables.cc", + "src/variables.h", + "src/version.cc", + "src/version.h", + "src/vm-state-inl.h", + "src/vm-state.h", + "src/zone-inl.h", + "src/zone.cc", + "src/zone.h", + "third_party/fdlibm/fdlibm.cc", + "third_party/fdlibm/fdlibm.h", + ] + + if (v8_target_arch == "x86") { + sources += [ + "src/ia32/assembler-ia32-inl.h", + "src/ia32/assembler-ia32.cc", + "src/ia32/assembler-ia32.h", + "src/ia32/builtins-ia32.cc", + "src/ia32/code-stubs-ia32.cc", + "src/ia32/code-stubs-ia32.h", + "src/ia32/codegen-ia32.cc", + "src/ia32/codegen-ia32.h", + "src/ia32/cpu-ia32.cc", + "src/ia32/debug-ia32.cc", + "src/ia32/deoptimizer-ia32.cc", + "src/ia32/disasm-ia32.cc", + "src/ia32/frames-ia32.cc", + "src/ia32/frames-ia32.h", + "src/ia32/full-codegen-ia32.cc", + "src/ia32/ic-ia32.cc", + "src/ia32/lithium-codegen-ia32.cc", + "src/ia32/lithium-codegen-ia32.h", + "src/ia32/lithium-gap-resolver-ia32.cc", + "src/ia32/lithium-gap-resolver-ia32.h", + "src/ia32/lithium-ia32.cc", + "src/ia32/lithium-ia32.h", + "src/ia32/macro-assembler-ia32.cc", + "src/ia32/macro-assembler-ia32.h", + "src/ia32/regexp-macro-assembler-ia32.cc", + "src/ia32/regexp-macro-assembler-ia32.h", + "src/ia32/stub-cache-ia32.cc", + "src/compiler/ia32/code-generator-ia32.cc", + "src/compiler/ia32/instruction-codes-ia32.h", + "src/compiler/ia32/instruction-selector-ia32.cc", + "src/compiler/ia32/linkage-ia32.cc", + ] + } else if (v8_target_arch == "x64") { + sources += [ + "src/x64/assembler-x64-inl.h", + "src/x64/assembler-x64.cc", + "src/x64/assembler-x64.h", + "src/x64/builtins-x64.cc", + "src/x64/code-stubs-x64.cc", + "src/x64/code-stubs-x64.h", + "src/x64/codegen-x64.cc", + "src/x64/codegen-x64.h", + "src/x64/cpu-x64.cc", + "src/x64/debug-x64.cc", + "src/x64/deoptimizer-x64.cc", + "src/x64/disasm-x64.cc", + "src/x64/frames-x64.cc", + "src/x64/frames-x64.h", + "src/x64/full-codegen-x64.cc", + "src/x64/ic-x64.cc", + "src/x64/lithium-codegen-x64.cc", + "src/x64/lithium-codegen-x64.h", + "src/x64/lithium-gap-resolver-x64.cc", + "src/x64/lithium-gap-resolver-x64.h", + "src/x64/lithium-x64.cc", + "src/x64/lithium-x64.h", + "src/x64/macro-assembler-x64.cc", + "src/x64/macro-assembler-x64.h", + "src/x64/regexp-macro-assembler-x64.cc", + "src/x64/regexp-macro-assembler-x64.h", + "src/x64/stub-cache-x64.cc", + "src/compiler/x64/code-generator-x64.cc", + "src/compiler/x64/instruction-codes-x64.h", + "src/compiler/x64/instruction-selector-x64.cc", + "src/compiler/x64/linkage-x64.cc", + ] + } else if (v8_target_arch == "arm") { + sources += [ + "src/arm/assembler-arm-inl.h", + "src/arm/assembler-arm.cc", + "src/arm/assembler-arm.h", + "src/arm/builtins-arm.cc", + "src/arm/code-stubs-arm.cc", + "src/arm/code-stubs-arm.h", + "src/arm/codegen-arm.cc", + "src/arm/codegen-arm.h", + "src/arm/constants-arm.h", + "src/arm/constants-arm.cc", + "src/arm/cpu-arm.cc", + "src/arm/debug-arm.cc", + "src/arm/deoptimizer-arm.cc", + "src/arm/disasm-arm.cc", + "src/arm/frames-arm.cc", + "src/arm/frames-arm.h", + "src/arm/full-codegen-arm.cc", + "src/arm/ic-arm.cc", + "src/arm/lithium-arm.cc", + "src/arm/lithium-arm.h", + "src/arm/lithium-codegen-arm.cc", + "src/arm/lithium-codegen-arm.h", + "src/arm/lithium-gap-resolver-arm.cc", + "src/arm/lithium-gap-resolver-arm.h", + "src/arm/macro-assembler-arm.cc", + "src/arm/macro-assembler-arm.h", + "src/arm/regexp-macro-assembler-arm.cc", + "src/arm/regexp-macro-assembler-arm.h", + "src/arm/simulator-arm.cc", + "src/arm/stub-cache-arm.cc", + "src/compiler/arm/code-generator-arm.cc", + "src/compiler/arm/instruction-codes-arm.h", + "src/compiler/arm/instruction-selector-arm.cc", + "src/compiler/arm/linkage-arm.cc", + ] + } else if (v8_target_arch == "arm64") { + sources += [ + "src/arm64/assembler-arm64.cc", + "src/arm64/assembler-arm64.h", + "src/arm64/assembler-arm64-inl.h", + "src/arm64/builtins-arm64.cc", + "src/arm64/codegen-arm64.cc", + "src/arm64/codegen-arm64.h", + "src/arm64/code-stubs-arm64.cc", + "src/arm64/code-stubs-arm64.h", + "src/arm64/constants-arm64.h", + "src/arm64/cpu-arm64.cc", + "src/arm64/debug-arm64.cc", + "src/arm64/decoder-arm64.cc", + "src/arm64/decoder-arm64.h", + "src/arm64/decoder-arm64-inl.h", + "src/arm64/deoptimizer-arm64.cc", + "src/arm64/disasm-arm64.cc", + "src/arm64/disasm-arm64.h", + "src/arm64/frames-arm64.cc", + "src/arm64/frames-arm64.h", + "src/arm64/full-codegen-arm64.cc", + "src/arm64/ic-arm64.cc", + "src/arm64/instructions-arm64.cc", + "src/arm64/instructions-arm64.h", + "src/arm64/instrument-arm64.cc", + "src/arm64/instrument-arm64.h", + "src/arm64/lithium-arm64.cc", + "src/arm64/lithium-arm64.h", + "src/arm64/lithium-codegen-arm64.cc", + "src/arm64/lithium-codegen-arm64.h", + "src/arm64/lithium-gap-resolver-arm64.cc", + "src/arm64/lithium-gap-resolver-arm64.h", + "src/arm64/macro-assembler-arm64.cc", + "src/arm64/macro-assembler-arm64.h", + "src/arm64/macro-assembler-arm64-inl.h", + "src/arm64/regexp-macro-assembler-arm64.cc", + "src/arm64/regexp-macro-assembler-arm64.h", + "src/arm64/simulator-arm64.cc", + "src/arm64/simulator-arm64.h", + "src/arm64/stub-cache-arm64.cc", + "src/arm64/utils-arm64.cc", + "src/arm64/utils-arm64.h", + "src/compiler/arm64/code-generator-arm64.cc", + "src/compiler/arm64/instruction-codes-arm64.h", + "src/compiler/arm64/instruction-selector-arm64.cc", + "src/compiler/arm64/linkage-arm64.cc", + ] + } else if (v8_target_arch == "mipsel") { + sources += [ + "src/mips/assembler-mips.cc", + "src/mips/assembler-mips.h", + "src/mips/assembler-mips-inl.h", + "src/mips/builtins-mips.cc", + "src/mips/codegen-mips.cc", + "src/mips/codegen-mips.h", + "src/mips/code-stubs-mips.cc", + "src/mips/code-stubs-mips.h", + "src/mips/constants-mips.cc", + "src/mips/constants-mips.h", + "src/mips/cpu-mips.cc", + "src/mips/debug-mips.cc", + "src/mips/deoptimizer-mips.cc", + "src/mips/disasm-mips.cc", + "src/mips/frames-mips.cc", + "src/mips/frames-mips.h", + "src/mips/full-codegen-mips.cc", + "src/mips/ic-mips.cc", + "src/mips/lithium-codegen-mips.cc", + "src/mips/lithium-codegen-mips.h", + "src/mips/lithium-gap-resolver-mips.cc", + "src/mips/lithium-gap-resolver-mips.h", + "src/mips/lithium-mips.cc", + "src/mips/lithium-mips.h", + "src/mips/macro-assembler-mips.cc", + "src/mips/macro-assembler-mips.h", + "src/mips/regexp-macro-assembler-mips.cc", + "src/mips/regexp-macro-assembler-mips.h", + "src/mips/simulator-mips.cc", + "src/mips/stub-cache-mips.cc", + ] + } + + configs -= [ "//build/config/compiler:chromium_code" ] + configs += [ "//build/config/compiler:no_chromium_code" ] + configs += [ ":internal_config", ":features", ":toolchain" ] + + defines = [] + deps = [ ":v8_libbase" ] + + if (is_linux) { + if (v8_compress_startup_data == "bz2") { + libs += [ "bz2" ] + } + } + + if (v8_enable_i18n_support) { + deps += [ "//third_party/icu" ] + if (is_win) { + deps += [ "//third_party/icu:icudata" ] + } + # TODO(jochen): Add support for icu_use_data_file_flag + defines += [ "ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE" ] + } else { + sources -= [ + "src/i18n.cc", + "src/i18n.h", + ] + } + + if (v8_postmortem_support) { + sources += [ "$target_gen_dir/debug-support.cc" ] + deps += [ ":postmortem-metadata" ] + } +} + +source_set("v8_libbase") { + visibility = ":*" # Only targets in this file can depend on this. + + sources = [ + "src/base/atomicops.h", + "src/base/atomicops_internals_arm64_gcc.h", + "src/base/atomicops_internals_arm_gcc.h", + "src/base/atomicops_internals_atomicword_compat.h", + "src/base/atomicops_internals_mac.h", + "src/base/atomicops_internals_mips_gcc.h", + "src/base/atomicops_internals_tsan.h", + "src/base/atomicops_internals_x86_gcc.cc", + "src/base/atomicops_internals_x86_gcc.h", + "src/base/atomicops_internals_x86_msvc.h", + "src/base/build_config.h", + "src/base/cpu.cc", + "src/base/cpu.h", + "src/base/lazy-instance.h", + "src/base/logging.cc", + "src/base/logging.h", + "src/base/macros.h", + "src/base/once.cc", + "src/base/once.h", + "src/base/platform/elapsed-timer.h", + "src/base/platform/time.cc", + "src/base/platform/time.h", + "src/base/platform/condition-variable.cc", + "src/base/platform/condition-variable.h", + "src/base/platform/mutex.cc", + "src/base/platform/mutex.h", + "src/base/platform/platform.h", + "src/base/platform/semaphore.cc", + "src/base/platform/semaphore.h", + "src/base/safe_conversions.h", + "src/base/safe_conversions_impl.h", + "src/base/safe_math.h", + "src/base/safe_math_impl.h", + "src/base/utils/random-number-generator.cc", + "src/base/utils/random-number-generator.h", + ] + + configs -= [ "//build/config/compiler:chromium_code" ] + configs += [ "//build/config/compiler:no_chromium_code" ] + configs += [ ":internal_config_base", ":features", ":toolchain" ] + + defines = [] + + if (is_posix) { + sources += [ + "src/base/platform/platform-posix.cc" + ] + } + + if (is_linux) { + sources += [ + "src/base/platform/platform-linux.cc" + ] + + libs = [ "rt" ] + } else if (is_android) { + defines += [ "CAN_USE_VFP_INSTRUCTIONS" ] + + if (build_os == "mac") { + if (current_toolchain == host_toolchain) { + sources += [ "src/base/platform/platform-macos.cc" ] + } else { + sources += [ "src/base/platform/platform-linux.cc" ] + } + } else { + sources += [ "src/base/platform/platform-linux.cc" ] + if (current_toolchain == host_toolchain) { + defines += [ "V8_LIBRT_NOT_AVAILABLE" ] + } + } + } else if (is_mac) { + sources += [ "src/base/platform/platform-macos.cc" ] + } else if (is_win) { + # TODO(jochen): Add support for cygwin. + sources += [ + "src/base/platform/platform-win32.cc", + "src/base/win32-headers.h", + "src/base/win32-math.cc", + "src/base/win32-math.h", + ] + + defines += [ "_CRT_RAND_S" ] # for rand_s() + + libs = [ "winmm.lib", "ws2_32.lib" ] + } + + # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris. +} + +source_set("v8_libplatform") { + sources = [ + "include/libplatform/libplatform.h", + "src/libplatform/default-platform.cc", + "src/libplatform/default-platform.h", + "src/libplatform/task-queue.cc", + "src/libplatform/task-queue.h", + "src/libplatform/worker-thread.cc", + "src/libplatform/worker-thread.h", + ] + + configs -= [ "//build/config/compiler:chromium_code" ] + configs += [ "//build/config/compiler:no_chromium_code" ] + configs += [ ":internal_config_base", ":features", ":toolchain" ] + + deps = [ + ":v8_libbase", + ] +} + +############################################################################### +# Executables +# + +if (current_toolchain == host_toolchain) { + executable("mksnapshot") { + visibility = ":*" # Only targets in this file can depend on this. + + sources = [ + "src/mksnapshot.cc", + ] + + configs -= [ "//build/config/compiler:chromium_code" ] + configs += [ "//build/config/compiler:no_chromium_code" ] + configs += [ ":internal_config", ":features", ":toolchain" ] + + deps = [ + ":v8_base", + ":v8_libplatform", + ":v8_nosnapshot", + ] + + if (v8_compress_startup_data == "bz2") { + libs = [ "bz2" ] + } + } +} + +############################################################################### +# Public targets +# + +if (component_mode == "shared_library") { + +component("v8") { + sources = [ + "src/v8dll-main.cc", + ] + + if (v8_use_external_startup_data) { + deps = [ + ":v8_base", + ":v8_external_snapshot", + ] + } else if (v8_use_snapshot) { + deps = [ + ":v8_base", + ":v8_snapshot", + ] + } else { + deps = [ + ":v8_base", + ":v8_nosnapshot", + ] + } + + configs -= [ "//build/config/compiler:chromium_code" ] + configs += [ "//build/config/compiler:no_chromium_code" ] + configs += [ ":internal_config", ":features", ":toolchain" ] + + direct_dependent_configs = [ ":external_config" ] + + if (is_android && current_toolchain != host_toolchain) { + libs += [ "log" ] + } +} + +} else { + +group("v8") { + if (v8_use_external_startup_data) { + deps = [ + ":v8_base", + ":v8_external_snapshot", + ] + } else if (v8_use_snapshot) { + deps = [ + ":v8_base", + ":v8_snapshot", + ] + } else { + deps = [ + ":v8_base", + ":v8_nosnapshot", + ] + } + + direct_dependent_configs = [ ":external_config" ] +} + +} diff -Nru nodejs-0.11.13/deps/v8/ChangeLog nodejs-0.11.15/deps/v8/ChangeLog --- nodejs-0.11.13/deps/v8/ChangeLog 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/ChangeLog 2015-01-20 21:22:17.000000000 +0000 @@ -1,3 +1,1031 @@ +2014-08-13: Version 3.28.73 + + Performance and stability improvements on all platforms. + + +2014-08-12: Version 3.28.71 + + ToNumber(Symbol) should throw TypeError (issue 3499). + + Performance and stability improvements on all platforms. + + +2014-08-11: Version 3.28.69 + + Performance and stability improvements on all platforms. + + +2014-08-09: Version 3.28.65 + + Performance and stability improvements on all platforms. + + +2014-08-08: Version 3.28.64 + + ES6: Implement WeakMap and WeakSet constructor logic (issue 3399). + + Enable ES6 unscopables (issue 3401). + + Turn on harmony_unscopables for es_staging (issue 3401). + + Remove proxies from --harmony switch for M38, because problems. + + Reland "Add initial support for compiler unit tests using GTest/GMock." + (issue 3489). + + Enable ES6 iteration by default (issue 2214). + + Performance and stability improvements on all platforms. + + +2014-08-07: Version 3.28.62 + + Only escape U+0022 in argument values of `String.prototype` HTML methods + (issue 2217). + + Update webkit test for expected own properties. + + This implements unscopables (issue 3401). + + Add `CheckObjectCoercible` for the `String.prototype` HTML methods + (issue 2218). + + Add initial support for compiler unit tests using GTest/GMock (issue + 3489). + + Trigger exception debug events on Promise reject (Chromium issue + 393913). + + Refactor unit tests for the base library to use GTest (issue 3489). + + Performance and stability improvements on all platforms. + + +2014-08-06: Version 3.28.60 + + Enable ES6 Map and Set by default (issue 1622). + + Performance and stability improvements on all platforms. + + +2014-08-06: Version 3.28.59 + + Removed GetConstructor from the API. Instead either get the + "constructor" property stored in the prototype, or keep a side-table. + + Enable ES6 Symbols by default (issue 2158). + + Performance and stability improvements on all platforms. + + +2014-08-05: Version 3.28.57 + + Add dependencies on gtest and gmock. + + Performance and stability improvements on all platforms. + + +2014-08-04: Version 3.28.54 + + Performance and stability improvements on all platforms. + + +2014-08-01: Version 3.28.53 + + Performance and stability improvements on all platforms. + + +2014-07-31: Version 3.28.52 + + Performance and stability improvements on all platforms. + + +2014-07-31: Version 3.28.51 + + Drop deprecated memory related notification API (Chromium issue 397026). + + Performance and stability improvements on all platforms. + + +2014-07-31: Version 3.28.50 + + Use emergency memory in the case of out of memory during evacuation + (Chromium issue 395314). + + Performance and stability improvements on all platforms. + + +2014-07-30: Version 3.28.48 + + Fix Object.freeze with field type tracking. Keep the descriptor properly + intact while update the field type (issue 3458). + + Performance and stability improvements on all platforms. + + +2014-07-29: Version 3.28.45 + + Performance and stability improvements on all platforms. + + +2014-07-28: Version 3.28.43 + + Performance and stability improvements on all platforms. + + +2014-07-25: Version 3.28.38 + + Fix issue with setters and their holders in accessors.cc (Chromium issue + 3462). + + Introduce more debug events for promises (issue 3093). + + Move gc notifications from V8 to Isolate and make idle hint mandatory + (Chromium issue 397026). + + The accessors should get the value from the holder and not from this + (issue 3461). + + Performance and stability improvements on all platforms. + + +2014-07-24: Version 3.28.35 + + Rebaseline/update the intl tests with ICU 52 (issue 3454). + + Expose the content of Sets and WeakSets through SetMirror (issue 3093). + + Performance and stability improvements on all platforms. + + +2014-07-23: Version 3.28.32 + + Update ICU to 5.2 (matching chromium) (issue 3452). + + Performance and stability improvements on all platforms. + + +2014-07-22: Version 3.28.31 + + Remove harmony-typeof. + + Implement String.prototype.codePointAt and String.fromCodePoint (issue + 2840). + + Performance and stability improvements on all platforms. + + +2014-07-21: Version 3.28.30 + + Performance and stability improvements on all platforms. + + +2014-07-21: Version 3.28.29 + + Performance and stability improvements on all platforms. + + +2014-07-18: Version 3.28.28 + + Performance and stability improvements on all platforms. + + +2014-07-17: Version 3.28.26 + + Ship ES6 Math functions (issue 2938). + + Make ToPrimitive throw on symbol wrappers (issue 3442). + + Performance and stability improvements on all platforms. + + +2014-07-16: Version 3.28.25 + + Performance and stability improvements on all platforms. + + +2014-07-16: Version 3.28.24 + + Removed some copy-n-paste from StackFrame::Foo API entries (issue 3436). + + Performance and stability improvements on all platforms. + + +2014-07-15: Version 3.28.23 + + Fix error message about read-only symbol properties (issue 3441). + + Include symbol properties in Object.{create,defineProperties} (issue + 3440). + + Performance and stability improvements on all platforms. + + +2014-07-14: Version 3.28.22 + + Performance and stability improvements on all platforms. + + +2014-07-11: Version 3.28.21 + + Make `let` usable as an identifier in ES6 sloppy mode (issue 2198). + + Support ES6 Map and Set in heap profiler (issue 3368). + + Performance and stability improvements on all platforms. + + +2014-07-10: Version 3.28.20 + + Remove deprecate counter/histogram methods. + + Fixed printing of external references (Chromium issue 392068). + + Fix several issues with ES6 redeclaration checks (issue 3426). + + Performance and stability improvements on all platforms. + + +2014-07-09: Version 3.28.19 + + Performance and stability improvements on all platforms. + + +2014-07-09: Version 3.28.18 + + Reland "Postpone termination exceptions in debug scope." (issue 3408). + + Performance and stability improvements on all platforms. + + +2014-07-08: Version 3.28.17 + + MIPS: Fix computed properties on object literals with a double as + propertyname (Chromium issue 390732). + + Performance and stability improvements on all platforms. + + +2014-07-08: Version 3.28.16 + + Fix computed properties on object literals with a double as propertyname + (Chromium issue 390732). + + Avoid brittle use of .bind in Promise.all (issue 3420). + + Performance and stability improvements on all platforms. + + +2014-07-07: Version 3.28.15 + + Remove a bunch of Isolate::UncheckedCurrent calls. + + Performance and stability improvements on all platforms. + + +2014-07-07: Version 3.28.14 + + Use the HeapObjectIterator to scan-on-scavenge map pages (Chromium issue + 390732). + + Introduce debug events for Microtask queue (Chromium issue 272416). + + Split out libplatform into a separate libary. + + Add clang-format to presubmit checks. + + Stack traces exposed to Javascript should omit extensions (issue 311). + + Remove deprecated v8::Context::HasOutOfMemoryException. + + Postpone termination exceptions in debug scope (issue 3408). + + Performance and stability improvements on all platforms. + + +2014-07-04: Version 3.28.13 + + Rollback to r22134. + + +2014-07-04: Version 3.28.12 + + Use the HeapObjectIterator to scan-on-scavenge map pages (Chromium issue + 390732). + + Introduce debug events for Microtask queue (Chromium issue 272416). + + Performance and stability improvements on all platforms. + + +2014-07-03: Version 3.28.11 + + Split out libplatform into a separate libary. + + Performance and stability improvements on all platforms. + + +2014-07-03: Version 3.28.10 + + Add clang-format to presubmit checks. + + Stack traces exposed to Javascript should omit extensions (issue 311). + + Remove deprecated v8::Context::HasOutOfMemoryException. + + Postpone termination exceptions in debug scope (issue 3408). + + Performance and stability improvements on all platforms. + + +2014-07-02: Version 3.28.9 + + Make freeze & friends ignore private properties (issue 3419). + + Introduce a builddeps make target (issue 3418). + + Performance and stability improvements on all platforms. + + +2014-07-01: Version 3.28.8 + + Remove static initializer from isolate. + + ES6: Add missing Set.prototype.keys function (issue 3411). + + Introduce debug events for promises (issue 3093). + + Performance and stability improvements on all platforms. + + +2014-06-30: Version 3.28.7 + + Performance and stability improvements on all platforms. + + +2014-06-30: Version 3.28.6 + + Unbreak "os" stuff in shared d8 builds (issue 3407). + + Performance and stability improvements on all platforms. + + +2014-06-26: Version 3.28.4 + + Compile optimized code with active debugger but no break points + (Chromium issue 386492). + + Optimize Map/Set.prototype.forEach. + + Collect garbage with kReduceMemoryFootprintMask in IdleNotification + (Chromium issue 350720). + + Performance and stability improvements on all platforms. + + +2014-06-26: Version 3.28.3 + + Grow heap slower if GC freed many global handles (Chromium issue + 263503). + + Performance and stability improvements on all platforms. + + +2014-06-25: Version 3.28.2 + + Remove bogus assertions in HCompareObjectEqAndBranch (Chromium issue + 387636). + + Do not eagerly update allow_osr_at_loop_nesting_level (Chromium issue + 387599). + + Set host_arch to ia32 on machines with a 32bit userland but a 64bit + kernel (Chromium issue 368384). + + Map/Set: Implement constructor parameter handling (issue 3398). + + Performance and stability improvements on all platforms. + + +2014-06-24: Version 3.28.1 + + Support LiveEdit on Arm64 (Chromium issue 368580). + + Run JS micro tasks in the appropriate context (Chromium issue 385349). + + Add a use counter API. + + Set host_arch to ia32 on machines with a 32bit userland but a 64bit + kernel. + + Performance and stability improvements on all platforms. + + +2014-06-23: Version 3.28.0 + + MIPS: Support LiveEdit (Chromium issue 368580). + + Array.concat: properly go to dictionary mode when required (Chromium + issue 387031). + + Support LiveEdit on ARM (Chromium issue 368580). + + Performance and stability improvements on all platforms. + + +2014-06-18: Version 3.27.34 + + Reduce number of writes to DependentCode array when inserting dependent + IC (Chromium issue 305878). + + Performance and stability improvements on all platforms. + + +2014-06-17: Version 3.27.33 + + Do GC if CodeRange fails to allocate a block (Chromium issue 305878). + + Throw syntax error when a getter/setter has the wrong number of params + (issue 3371). + + Performance and stability improvements on all platforms. + + +2014-06-17: Version 3.27.32 + + Performance and stability improvements on all platforms. + + +2014-06-16: Version 3.27.31 + + Version fix. + + +2014-06-16: Version 3.27.30 + + Fix representation of Phis for mutable-heapnumber-in-object-literal + properties (issue 3392). + + Performance and stability improvements on all platforms. + + +2014-06-16: Version 3.27.29 + + Emulate MLS on pre-ARMv6T2. Cleaned up thumbee vs. thumb2 confusion. + + X87: Fixed flooring division by a power of 2, once again.. (issue 3259). + + Fixed undefined behavior in RNG (Chromium issue 377790). + + Performance and stability improvements on all platforms. + + +2014-06-13: Version 3.27.28 + + Add v8::Promise::Then (Chromium issue 371288). + + Performance and stability improvements on all platforms. + + +2014-06-12: Version 3.27.27 + + Fix detection of VFP3D16 on Galaxy Tab 10.1 (issue 3387). + + Performance and stability improvements on all platforms. + + +2014-06-12: Version 3.27.26 + + MIPS: Fixed flooring division by a power of 2, once again.. (issue + 3259). + + Fixed flooring division by a power of 2, once again.. (issue 3259). + + Fix unsigned comparisons (issue 3380). + + Performance and stability improvements on all platforms. + + +2014-06-11: Version 3.27.25 + + Performance and stability improvements on all platforms. + + +2014-06-11: Version 3.27.24 + + Fix invalid attributes when generalizing because of incompatible map + change (Chromium issue 382143). + + Fix missing smi check in inlined indexOf/lastIndexOf (Chromium issue + 382513). + + Performance and stability improvements on all platforms. + + +2014-06-06: Version 3.27.23 + + Performance and stability improvements on all platforms. + + +2014-06-06: Version 3.27.22 + + Performance and stability improvements on all platforms. + + +2014-06-06: Version 3.27.21 + + Turn on harmony_collections for es_staging (issue 1622). + + Do not make heap iterable eagerly (Chromium issue 379740). + + Performance and stability improvements on all platforms. + + +2014-06-05: Version 3.27.20 + + Fix invalid loop condition for Array.lastIndexOf() (Chromium issue + 380512). + + Add API support for passing a C++ function as a microtask callback. + + Performance and stability improvements on all platforms. + + +2014-06-04: Version 3.27.19 + + Split Put into Put and Remove. + + ES6: Add support for values/keys/entries for Map and Set (issue 1793). + + Performance and stability improvements on all platforms. + + +2014-06-03: Version 3.27.18 + + Remove PROHIBITS_OVERWRITING as it is subsumed by non-configurable + properties. + + Performance and stability improvements on all platforms. + + +2014-06-02: Version 3.27.17 + + BuildNumberToString: Check for undefined keys in the cache (Chromium + issue 368114). + + HRor and HSar can deoptimize (issue 3359). + + Simplify, speed-up correct-context ObjectObserve calls. + + Performance and stability improvements on all platforms. + + +2014-05-29: Version 3.27.16 + + Allow microtasks to throw exceptions and handle them gracefully + (Chromium issue 371566). + + Performance and stability improvements on all platforms. + + +2014-05-28: Version 3.27.15 + + Performance and stability improvements on all platforms. + + +2014-05-27: Version 3.27.14 + + Reland "Customized support for feedback on calls to Array." and follow- + up fixes (Chromium issues 377198, 377290). + + Performance and stability improvements on all platforms. + + +2014-05-26: Version 3.27.13 + + Performance and stability improvements on all platforms. + + +2014-05-26: Version 3.27.12 + + Check for cached transition to ExternalArray elements kind (issue 3337). + + Support ES6 weak collections in heap profiler (Chromium issue 376196). + + Performance and stability improvements on all platforms. + + +2014-05-23: Version 3.27.11 + + Add support for ES6 Symbol in heap profiler (Chromium issue 376194). + + Performance and stability improvements on all platforms. + + +2014-05-22: Version 3.27.10 + + Implement Mirror object for Symbols (issue 3290). + + Allow debugger to step into Map and Set forEach callbacks (issue 3341). + + Fix ArrayShift hydrogen support (Chromium issue 374838). + + Use SameValueZero for Map and Set (issue 1622). + + Array Iterator next should check for own property. + + Performance and stability improvements on all platforms. + + +2014-05-21: Version 3.27.9 + + Disable ArrayShift hydrogen support (Chromium issue 374838). + + ES6 Map/Set iterators/forEach improvements (issue 1793). + + Performance and stability improvements on all platforms. + + +2014-05-20: Version 3.27.8 + + Move microtask queueing logic from JavaScript to C++. + + Partial revert of "Next bunch of fixes for check elimination" (Chromium + issue 372173). + + Performance and stability improvements on all platforms. + + +2014-05-19: Version 3.27.7 + + Performance and stability improvements on all platforms. + + +2014-05-19: Version 3.27.6 + + Performance and stability improvements on all platforms. + + +2014-05-16: Version 3.27.5 + + Performance and stability improvements on all platforms. + + +2014-05-15: Version 3.27.4 + + Drop thenable coercion cache (Chromium issue 372788). + + Skip write barriers when updating the weak hash table (Chromium issue + 359401). + + Performance and stability improvements on all platforms. + + +2014-05-14: Version 3.27.3 + + Performance and stability improvements on all platforms. + + +2014-05-13: Version 3.27.2 + + Harden %SetIsObserved with RUNTIME_ASSERTs (Chromium issue 371782). + + Drop unused static microtask API. + + Introduce an api to query the microtask autorun state of an isolate. + + Performance and stability improvements on all platforms. + + +2014-05-12: Version 3.27.1 + + Object.observe: avoid accessing acceptList properties more than once + (issue 3315). + + Array Iterator prototype should not have a constructor (issue 3293). + + Fix typos in unit test for Array.prototype.fill(). + + Shorten autogenerated error message for functions only (issue 3019, + Chromium issue 331971). + + Reland "Removed default Isolate." (Chromium issue 359977). + + Performance and stability improvements on all platforms. + + +2014-05-09: Version 3.27.0 + + Unbreak samples and tools. + + Performance and stability improvements on all platforms. + + +2014-05-08: Version 3.26.33 + + Removed default Isolate (Chromium issue 359977). + + Performance and stability improvements on all platforms. + + +2014-05-07: Version 3.26.32 + + Performance and stability improvements on all platforms. + + +2014-05-06: Version 3.26.31 + + Add a basic gn file for V8. + + Performance and stability improvements on all platforms. + + +2014-05-05: Version 3.26.30 + + Introduce a microtask suppression scope and move microtask methods to + isolate (Chromium issue 369503). + + Re-enable Object.observe and add enforcement for security invariants. + + Move cache line size calculation directly into CPU::FlushICache + (Chromium issue 359977). + + Generation of our home-grown memmove doesn't depend on serializer state + anymore (Chromium issue 359977). + + Fix |RunMicrotasks()| leaking reference to the last context being run + on. + + Object.defineProperty shouldn't be a hint that we're constructing a + dictionary (Chromium issue 362870). + + Performance and stability improvements on all platforms. + + +2014-05-01: Version 3.26.29 + + Added a Isolate* parameter to Serializer::enabled() (Chromium issue + 359977). + + ES6: Add support for Array.prototype.fill() (issue 3273). + + Performance and stability improvements on all platforms. + + +2014-04-29: Version 3.26.28 + + PromiseThen should ignore non-function parameters (Chromium issue + 347455). + + Performance and stability improvements on all platforms. + + +2014-04-29: Version 3.26.27 + + Error stack getter should not overwrite itself with a data property + (issue 3294). + + Performance and stability improvements on all platforms. + + +2014-04-28: Version 3.26.26 + + Expose promise value through promise mirror (issue 3093). + + Simplified CPU/CpuFeatures a bit (Chromium issue 359977). + + Performance and stability improvements on all platforms. + + +2014-04-28: Version 3.26.25 + + Add timestamps to CPU profile samples (Chromium issue 363976). + + Expose promise status through promise mirror (issue 3093). + + Remove static CallCompletedCallback handlers. + + Added an Isolate* field to NoTrackDoubleFieldsForSerializerScope, + PlatformFeatureScope and BinaryOpIC::State (Chromium issue 359977). + + Trigger debug event on not yet caught exception in promises (issue + 3093). + + Unbreak vtunejit=on (issue 3288). + + Performance and stability improvements on all platforms. + + +2014-04-25: Version 3.26.24 + + MIPS: CodeStubs contain their corresponding Isolate* now. (part 2) + (Chromium issue 359977). + + MIPS: CodeStubs contain their corresponding Isolate* now. (part 1) + (Chromium issue 359977). + + CodeStubs contain their corresponding Isolate* now. (part 2) (Chromium + issue 359977). + + Make DescriptorArray::IsMoreGeneralThan() and DescriptorArray::Merge() + compatible again (Chromium issue 365172). + + CodeStubs contain their corresponding Isolate* now. (part 1) (Chromium + issue 359977). + + Performance and stability improvements on all platforms. + + +2014-04-24: Version 3.26.23 + + Performance and stability improvements on all platforms. + + +2014-04-23: Version 3.26.22 + + Disable field type tracking by default (Chromium issue 365172). + + Performance and stability improvements on all platforms. + + +2014-04-23: Version 3.26.21 + + Context-allocate all parameters in generators (issue 3280). + + Simplify v8/Isolate teardown (Chromium issue 359977). + + Performance and stability improvements on all platforms. + + +2014-04-21: Version 3.26.20 + + ES6: Add support for Map/Set forEach (Chromium issues 1793, 2323). + + Performance and stability improvements on all platforms. + + +2014-04-18: Version 3.26.19 + + ES6: Add support for Map/Set forEach (Chromium issues 1793, 2323). + + Performance and stability improvements on all platforms. + + +2014-04-17: Version 3.26.18 + + Removed Isolate::EnterDefaultIsolate (Chromium issue 359977). + + Performance and stability improvements on all platforms. + + +2014-04-16: Version 3.26.17 + + Clear invalid field maps in PropertyAccessInfo (Chromium issue 363956). + + ES6: Add support for Map/Set forEach (Chromium issues 1793, 2323). + + Performance and stability improvements on all platforms. + + +2014-04-16: Version 3.26.16 + + Removed EnterIsolateIfNeeded and a soon-to-be-useless assertion + (Chromium issue 359977). + + Removed GetDefaultIsolate{Debugger,ForLocking,StackGuard} (Chromium + issue 359977). + + Performance and stability improvements on all platforms. + + +2014-04-15: Version 3.26.15 + + Fix result of LCodeGen::DoWrapReceiver for strict functions and builtins + (Chromium issue 362128). + + Performance and stability improvements on all platforms. + + +2014-04-15: Version 3.26.14 + + Performance and stability improvements on all platforms. + + +2014-04-14: Version 3.26.13 + + Make maps in monomorphic IC stubs weak (issue 2073). + + x64: Make sure that the upper half of a 64bit register contains 0 for + int32 values (Chromium issue 360611). + + Performance and stability improvements on all platforms. + + +2014-04-11: Version 3.26.12 + + Do not use ranges after range analysis (Chromium issue 361608). + + Performance and stability improvements on all platforms. + + +2014-04-10: Version 3.26.11 + + Performance and stability improvements on all platforms. + + +2014-04-10: Version 3.26.10 + + Allow the embedder to pass the virtual memory limit to v8. + + Performance and stability improvements on all platforms. + + +2014-04-09: Version 3.26.9 + + Fix invalid local property lookup for transitions (Chromium issue + 361025). + + MIPS: Fixed flooring division by -1 (issue 3259). + + Fixed flooring division by -1 on ARM (issue 3259). + + Make `String.prototype.contains` throw when passing a regular expression + (issue 3261). + + Performance and stability improvements on all platforms. + + +2014-04-08: Version 3.26.8 + + Yet another regression test for range analysis (issue 3204). + + Performance and stability improvements on all platforms. + + +2014-04-07: Version 3.26.7 + + Performance and stability improvements on all platforms. + + +2014-04-04: Version 3.26.6 + + Performance and stability improvements on all platforms. + + +2014-04-03: Version 3.26.5 + + Performance and stability improvements on all platforms. + + +2014-04-03: Version 3.26.4 + + Make stray 'return' an early error. + + Show references from weak containers as weak in heap snapshots (Chromium + issue 356590). + + Make invalid LHSs that are calls late errors (Chromium issue 358346). + + Performance and stability improvements on all platforms. + + +2014-04-02: Version 3.26.3 + + Support typed arrays in IsMoreGeneralElementsKindTransition (Chromium + issue 357054). + + Remove debugger_auto_break flag. + + Store i18n meta data in hidden symbols instead of js accessible + properties (Chromium issue 354967). + + Performance and stability improvements on all platforms. + + +2014-04-01: Version 3.26.2 + + Performance and stability improvements on all platforms. + + +2014-04-01: Version 3.26.1 + + Fix Type::Intersect to skip uninhabited bitsets (Chromium issue 357330). + + Fix PrepareKeyedOperand on arm (Chromium issue 358057). + + Performance and stability improvements on all platforms. + + +2014-03-31: Version 3.26.0 + + Deprecate Start/StopCpuProfiling methods (issue 3213). + + Don't crash if we get a timezone change notification on an uninitialized + isolate (Chromium issue 357362). + + Performance and stability improvements on all platforms. + + 2014-03-28: Version 3.25.30 NativeContext::map_cache reference should be strong in heap snapshots diff -Nru nodejs-0.11.13/deps/v8/codereview.settings nodejs-0.11.15/deps/v8/codereview.settings --- nodejs-0.11.13/deps/v8/codereview.settings 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/codereview.settings 2015-01-20 21:22:17.000000000 +0000 @@ -5,3 +5,4 @@ TRY_ON_UPLOAD: False TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try-v8 TRYSERVER_ROOT: v8 +PROJECT: v8 diff -Nru nodejs-0.11.13/deps/v8/DEPS nodejs-0.11.15/deps/v8/DEPS --- nodejs-0.11.13/deps/v8/DEPS 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/DEPS 2015-01-20 21:22:17.000000000 +0000 @@ -2,26 +2,89 @@ # directory and assume that the root of the checkout is in ./v8/, so # all paths in here must match this assumption. +vars = { + "chromium_trunk": "https://src.chromium.org/svn/trunk", + + "buildtools_revision": "fb782d4369d5ae04f17a2fceef7de5a63e50f07b", +} + deps = { # Remember to keep the revision in sync with the Makefile. "v8/build/gyp": "http://gyp.googlecode.com/svn/trunk@1831", "v8/third_party/icu": - "https://src.chromium.org/chrome/trunk/deps/third_party/icu46@258359", + Var("chromium_trunk") + "/deps/third_party/icu52@277999", + + "v8/buildtools": + "https://chromium.googlesource.com/chromium/buildtools.git@" + + Var("buildtools_revision"), + + "v8/testing/gtest": + "http://googletest.googlecode.com/svn/trunk@692", + + "v8/testing/gmock": + "http://googlemock.googlecode.com/svn/trunk@485", } deps_os = { "win": { "v8/third_party/cygwin": - "http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844", + Var("chromium_trunk") + "/deps/third_party/cygwin@66844", "v8/third_party/python_26": - "http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111", + Var("chromium_trunk") + "/tools/third_party/python_26@89111", } } +include_rules = [ + # Everybody can use some things. + "+include", + "+unicode", + "+third_party/fdlibm", +] + +# checkdeps.py shouldn't check for includes in these directories: +skip_child_includes = [ + "build", + "third_party", +] + hooks = [ + # Pull clang-format binaries using checked-in hashes. + { + "name": "clang_format_win", + "pattern": ".", + "action": [ "download_from_google_storage", + "--no_resume", + "--platform=win32", + "--no_auth", + "--bucket", "chromium-clang-format", + "-s", "v8/buildtools/win/clang-format.exe.sha1", + ], + }, + { + "name": "clang_format_mac", + "pattern": ".", + "action": [ "download_from_google_storage", + "--no_resume", + "--platform=darwin", + "--no_auth", + "--bucket", "chromium-clang-format", + "-s", "v8/buildtools/mac/clang-format.sha1", + ], + }, + { + "name": "clang_format_linux", + "pattern": ".", + "action": [ "download_from_google_storage", + "--no_resume", + "--platform=linux*", + "--no_auth", + "--bucket", "chromium-clang-format", + "-s", "v8/buildtools/linux64/clang-format.sha1", + ], + }, { # A change to a .gyp, .gypi, or to GYP itself should run the generator. "pattern": ".", diff -Nru nodejs-0.11.13/deps/v8/.DEPS.git nodejs-0.11.15/deps/v8/.DEPS.git --- nodejs-0.11.13/deps/v8/.DEPS.git 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/.DEPS.git 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,108 @@ +# DO NOT EDIT EXCEPT FOR LOCAL TESTING. +# THIS IS A GENERATED FILE. +# ALL MANUAL CHANGES WILL BE OVERWRITTEN. +# SEE http://code.google.com/p/chromium/wiki/UsingGit +# FOR HOW TO ROLL DEPS +vars = { + 'webkit_url': + 'https://chromium.googlesource.com/chromium/blink.git', + 'git_url': + 'https://chromium.googlesource.com' +} + +deps = { + 'v8/build/gyp': + Var('git_url') + '/external/gyp.git@a3e2a5caf24a1e0a45401e09ad131210bf16b852', + 'v8/buildtools': + Var('git_url') + '/chromium/buildtools.git@fb782d4369d5ae04f17a2fceef7de5a63e50f07b', + 'v8/testing/gmock': + Var('git_url') + '/external/googlemock.git@896ba0e03f520fb9b6ed582bde2bd00847e3c3f2', + 'v8/testing/gtest': + Var('git_url') + '/external/googletest.git@4650552ff637bb44ecf7784060091cbed3252211', + 'v8/third_party/icu': + Var('git_url') + '/chromium/deps/icu52.git@26d8859357ac0bfb86b939bf21c087b8eae22494', +} + +deps_os = { + 'win': + { + 'v8/third_party/cygwin': + Var('git_url') + '/chromium/deps/cygwin.git@06a117a90c15174436bfa20ceebbfdf43b7eb820', + 'v8/third_party/python_26': + Var('git_url') + '/chromium/deps/python_26.git@67d19f904470effe3122d27101cc5a8195abd157', + }, +} + +include_rules = [ + '+include', + '+unicode', + '+third_party/fdlibm' +] + +skip_child_includes = [ + 'build', + 'third_party' +] + +hooks = [ + { + 'action': + [ + 'download_from_google_storage', + '--no_resume', + '--platform=win32', + '--no_auth', + '--bucket', + 'chromium-clang-format', + '-s', + 'v8/buildtools/win/clang-format.exe.sha1' +], + 'pattern': + '.', + 'name': + 'clang_format_win' +}, + { + 'action': + [ + 'download_from_google_storage', + '--no_resume', + '--platform=darwin', + '--no_auth', + '--bucket', + 'chromium-clang-format', + '-s', + 'v8/buildtools/mac/clang-format.sha1' +], + 'pattern': + '.', + 'name': + 'clang_format_mac' +}, + { + 'action': + [ + 'download_from_google_storage', + '--no_resume', + '--platform=linux*', + '--no_auth', + '--bucket', + 'chromium-clang-format', + '-s', + 'v8/buildtools/linux64/clang-format.sha1' +], + 'pattern': + '.', + 'name': + 'clang_format_linux' +}, + { + 'action': + [ + 'python', + 'v8/build/gyp_v8' +], + 'pattern': + '.' +} +] diff -Nru nodejs-0.11.13/deps/v8/.gitignore nodejs-0.11.15/deps/v8/.gitignore --- nodejs-0.11.13/deps/v8/.gitignore 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/.gitignore 2015-01-20 21:22:17.000000000 +0000 @@ -21,11 +21,18 @@ #*# *~ .cpplint-cache +.cproject .d8_history +.gclient_entries +.project +.pydevproject +.settings .*.sw? bsuite d8 d8_g +gccauses +gcsuspects shell shell_g /_* @@ -33,6 +40,7 @@ /build/gyp /build/ipch/ /build/Release +/buildtools /hydrogen.cfg /obj /out @@ -45,10 +53,18 @@ /test/benchmarks/sunspider /test/mozilla/CHECKED_OUT_VERSION /test/mozilla/data +/test/mozilla/data.old /test/mozilla/downloaded_* +/test/promises-aplus/promises-tests +/test/promises-aplus/promises-tests.tar.gz +/test/promises-aplus/sinon /test/test262/data +/test/test262/data.old /test/test262/tc39-test262-* -/third_party +/testing/gmock +/testing/gtest +/third_party/icu +/third_party/llvm /tools/jsfunfuzz /tools/jsfunfuzz.zip /tools/oom_dump/oom_dump @@ -63,3 +79,4 @@ GRTAGS GSYMS GPATH +gtags.files diff -Nru nodejs-0.11.13/deps/v8/include/libplatform/libplatform.h nodejs-0.11.15/deps/v8/include/libplatform/libplatform.h --- nodejs-0.11.13/deps/v8/include/libplatform/libplatform.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/include/libplatform/libplatform.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,38 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_LIBPLATFORM_LIBPLATFORM_H_ +#define V8_LIBPLATFORM_LIBPLATFORM_H_ + +#include "include/v8-platform.h" + +namespace v8 { +namespace platform { + +/** + * Returns a new instance of the default v8::Platform implementation. + * + * The caller will take ownership of the returned pointer. |thread_pool_size| + * is the number of worker threads to allocate for background jobs. If a value + * of zero is passed, a suitable default based on the current number of + * processors online will be chosen. + */ +v8::Platform* CreateDefaultPlatform(int thread_pool_size = 0); + + +/** + * Pumps the message loop for the given isolate. + * + * The caller has to make sure that this is called from the right thread. + * Returns true if a task was executed, and false otherwise. This call does + * not block if no task is pending. The |platform| has to be created using + * |CreateDefaultPlatform|. + */ +bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate); + + +} // namespace platform +} // namespace v8 + +#endif // V8_LIBPLATFORM_LIBPLATFORM_H_ diff -Nru nodejs-0.11.13/deps/v8/include/v8config.h nodejs-0.11.15/deps/v8/include/v8config.h --- nodejs-0.11.13/deps/v8/include/v8config.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/include/v8config.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8CONFIG_H_ #define V8CONFIG_H_ diff -Nru nodejs-0.11.13/deps/v8/include/v8-debug.h nodejs-0.11.15/deps/v8/include/v8-debug.h --- nodejs-0.11.13/deps/v8/include/v8-debug.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/include/v8-debug.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_V8_DEBUG_H_ #define V8_V8_DEBUG_H_ @@ -42,8 +19,10 @@ NewFunction = 3, BeforeCompile = 4, AfterCompile = 5, - ScriptCollected = 6, - BreakForCommand = 7 + CompileError = 6, + PromiseEvent = 7, + AsyncTaskEvent = 8, + BreakForCommand = 9 }; @@ -160,7 +139,7 @@ * A EventCallback2 does not take possession of the event data, * and must not rely on the data persisting after the handler returns. */ - typedef void (*EventCallback2)(const EventDetails& event_details); + typedef void (*EventCallback)(const EventDetails& event_details); /** * Debug message callback function. @@ -170,74 +149,38 @@ * A MessageHandler2 does not take possession of the message data, * and must not rely on the data persisting after the handler returns. */ - typedef void (*MessageHandler2)(const Message& message); - - /** - * Debug host dispatch callback function. - */ - typedef void (*HostDispatchHandler)(); + typedef void (*MessageHandler)(const Message& message); /** * Callback function for the host to ensure debug messages are processed. */ typedef void (*DebugMessageDispatchHandler)(); - static bool SetDebugEventListener2(EventCallback2 that, - Handle<Value> data = Handle<Value>()); - - // Set a JavaScript debug event listener. - static bool SetDebugEventListener(v8::Handle<v8::Object> that, + static bool SetDebugEventListener(EventCallback that, Handle<Value> data = Handle<Value>()); // Schedule a debugger break to happen when JavaScript code is run - // in the given isolate. If no isolate is provided the default - // isolate is used. - static void DebugBreak(Isolate* isolate = NULL); + // in the given isolate. + static void DebugBreak(Isolate* isolate); // Remove scheduled debugger break in given isolate if it has not - // happened yet. If no isolate is provided the default isolate is - // used. - static void CancelDebugBreak(Isolate* isolate = NULL); + // happened yet. + static void CancelDebugBreak(Isolate* isolate); // Break execution of JavaScript in the given isolate (this method // can be invoked from a non-VM thread) for further client command // execution on a VM thread. Client data is then passed in // EventDetails to EventCallback2 at the moment when the VM actually - // stops. If no isolate is provided the default isolate is used. - static void DebugBreakForCommand(ClientData* data = NULL, - Isolate* isolate = NULL); + // stops. + static void DebugBreakForCommand(Isolate* isolate, ClientData* data); // Message based interface. The message protocol is JSON. - static void SetMessageHandler2(MessageHandler2 handler); + static void SetMessageHandler(MessageHandler handler); - // If no isolate is provided the default isolate is - // used. - // TODO(dcarney): remove - static void SendCommand(const uint16_t* command, int length, - ClientData* client_data = NULL, - Isolate* isolate = NULL); static void SendCommand(Isolate* isolate, const uint16_t* command, int length, ClientData* client_data = NULL); - // Dispatch interface. - static void SetHostDispatchHandler(HostDispatchHandler handler, - int period = 100); - - /** - * Register a callback function to be called when a debug message has been - * received and is ready to be processed. For the debug messages to be - * processed V8 needs to be entered, and in certain embedding scenarios this - * callback can be used to make sure V8 is entered for the debug message to - * be processed. Note that debug messages will only be processed if there is - * a V8 break. This can happen automatically by using the option - * --debugger-auto-break. - * \param provide_locker requires that V8 acquires v8::Locker for you before - * calling handler - */ - static void SetDebugMessageDispatchHandler( - DebugMessageDispatchHandler handler, bool provide_locker = false); - /** * Run a JavaScript function in the debugger. * \param fun the function to call @@ -264,22 +207,6 @@ */ static Local<Value> GetMirror(v8::Handle<v8::Value> obj); - /** - * Enable the V8 builtin debug agent. The debugger agent will listen on the - * supplied TCP/IP port for remote debugger connection. - * \param name the name of the embedding application - * \param port the TCP/IP port to listen on - * \param wait_for_connection whether V8 should pause on a first statement - * allowing remote debugger to connect before anything interesting happened - */ - static bool EnableAgent(const char* name, int port, - bool wait_for_connection = false); - - /** - * Disable the V8 builtin debug agent. The TCP/IP connection will be closed. - */ - static void DisableAgent(); - /** * Makes V8 process all pending debug messages. * @@ -290,7 +217,7 @@ * * Generally when message arrives V8 may be in one of 3 states: * 1. V8 is running script; V8 will automatically interrupt and process all - * pending messages (however auto_break flag should be enabled); + * pending messages; * 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated * to reading and processing debug messages; * 3. V8 is not running at all or has called some long-working C++ function; @@ -298,10 +225,6 @@ * until V8 gets control again; however, embedding application may improve * this by manually calling this method. * - * It makes sense to call this method whenever a new debug message arrived and - * V8 is not already running. Method v8::Debug::SetDebugMessageDispatchHandler - * should help with the former condition. - * * Technically this method in many senses is equivalent to executing empty * script: * 1. It does nothing except for processing all pending debug messages. @@ -331,7 +254,7 @@ * (default Isolate if not provided). V8 will abort if LiveEdit is * unexpectedly used. LiveEdit is enabled by default. */ - static void SetLiveEditEnabled(bool enable, Isolate* isolate = NULL); + static void SetLiveEditEnabled(Isolate* isolate, bool enable); }; diff -Nru nodejs-0.11.13/deps/v8/include/v8.h nodejs-0.11.15/deps/v8/include/v8.h --- nodejs-0.11.13/deps/v8/include/v8.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/include/v8.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. /** \mainpage V8 API Reference Guide * @@ -129,6 +106,7 @@ class M = NonCopyablePersistentTraits<T> > class Persistent; template<class T> class UniquePersistent; template<class K, class V, class T> class PersistentValueMap; +template<class V, class T> class PersistentValueVector; template<class T, class P> class WeakCallbackObject; class FunctionTemplate; class ObjectTemplate; @@ -315,15 +293,6 @@ return New(isolate, that.val_); } -#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR - - private: -#endif - /** - * Creates a new handle for the specified value. - */ - V8_INLINE explicit Handle(T* val) : val_(val) {} - private: friend class Utils; template<class F, class M> friend class Persistent; @@ -342,6 +311,11 @@ friend class Object; friend class Private; + /** + * Creates a new handle for the specified value. + */ + V8_INLINE explicit Handle(T* val) : val_(val) {} + V8_INLINE static Handle<T> New(Isolate* isolate, T* that); T* val_; @@ -395,12 +369,6 @@ V8_INLINE static Local<T> New(Isolate* isolate, const PersistentBase<T>& that); -#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR - - private: -#endif - template <class S> V8_INLINE Local(S* that) : Handle<T>(that) { } - private: friend class Utils; template<class F> friend class Eternal; @@ -417,7 +385,9 @@ friend class HandleScope; friend class EscapableHandleScope; template<class F1, class F2, class F3> friend class PersistentValueMap; + template<class F1, class F2> friend class PersistentValueVector; + template <class S> V8_INLINE Local(S* that) : Handle<T>(that) { } V8_INLINE static Local<T> New(Isolate* isolate, T* that); }; @@ -522,6 +492,13 @@ return !operator==(that); } + /** + * Install a finalization callback on this object. + * NOTE: There is no guarantee as to *when* or even *if* the callback is + * invoked. The invocation is performed solely on a best effort basis. + * As always, GC-based finalization should *not* be relied upon for any + * critical form of resource management! + */ template<typename P> V8_INLINE void SetWeak( P* parameter, @@ -586,6 +563,7 @@ template<class F> friend class PersistentBase; template<class F> friend class ReturnValue; template<class F1, class F2, class F3> friend class PersistentValueMap; + template<class F1, class F2> friend class PersistentValueVector; friend class Object; explicit V8_INLINE PersistentBase(T* val) : val_(val) {} @@ -719,15 +697,6 @@ // This will be removed. V8_INLINE T* ClearAndLeak(); - // TODO(dcarney): remove -#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR - - private: -#endif - template <class S> V8_INLINE Persistent(S* that) : PersistentBase<T>(that) { } - - V8_INLINE T* operator*() const { return this->val_; } - private: friend class Isolate; friend class Utils; @@ -736,6 +705,8 @@ template<class F1, class F2> friend class Persistent; template<class F> friend class ReturnValue; + template <class S> V8_INLINE Persistent(S* that) : PersistentBase<T>(that) { } + V8_INLINE T* operator*() const { return this->val_; } template<class S, class M2> V8_INLINE void Copy(const Persistent<S, M2>& that); }; @@ -804,7 +775,7 @@ /** * Pass allows returning uniques from functions, etc. */ - V8_INLINE UniquePersistent Pass() { return UniquePersistent(RValue(this)); } + UniquePersistent Pass() { return UniquePersistent(RValue(this)); } private: UniquePersistent(UniquePersistent&); @@ -924,6 +895,13 @@ }; +// Convenience wrapper. +template <class T> +inline Maybe<T> maybe(T t) { + return Maybe<T>(t); +} + + // --- Special objects --- @@ -937,53 +915,6 @@ /** - * Pre-compilation data that can be associated with a script. This - * data can be calculated for a script in advance of actually - * compiling it, and can be stored between compilations. When script - * data is given to the compile method compilation will be faster. - */ -class V8_EXPORT ScriptData { // NOLINT - public: - virtual ~ScriptData() { } - - /** - * Pre-compiles the specified script (context-independent). - * - * NOTE: Pre-compilation using this method cannot happen on another thread - * without using Lockers. - * - * \param source Script source code. - */ - static ScriptData* PreCompile(Handle<String> source); - - /** - * Load previous pre-compilation data. - * - * \param data Pointer to data returned by a call to Data() of a previous - * ScriptData. Ownership is not transferred. - * \param length Length of data. - */ - static ScriptData* New(const char* data, int length); - - /** - * Returns the length of Data(). - */ - virtual int Length() = 0; - - /** - * Returns a serialized representation of this ScriptData that can later be - * passed to New(). NOTE: Serialized data is platform-dependent. - */ - virtual const char* Data() = 0; - - /** - * Returns true if the source code could not be parsed. - */ - virtual bool HasError() = 0; -}; - - -/** * The origin, within a file, of a script. */ class ScriptOrigin { @@ -992,20 +923,24 @@ Handle<Value> resource_name, Handle<Integer> resource_line_offset = Handle<Integer>(), Handle<Integer> resource_column_offset = Handle<Integer>(), - Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>()) + Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>(), + Handle<Integer> script_id = Handle<Integer>()) : resource_name_(resource_name), resource_line_offset_(resource_line_offset), resource_column_offset_(resource_column_offset), - resource_is_shared_cross_origin_(resource_is_shared_cross_origin) { } + resource_is_shared_cross_origin_(resource_is_shared_cross_origin), + script_id_(script_id) { } V8_INLINE Handle<Value> ResourceName() const; V8_INLINE Handle<Integer> ResourceLineOffset() const; V8_INLINE Handle<Integer> ResourceColumnOffset() const; V8_INLINE Handle<Boolean> ResourceIsSharedCrossOrigin() const; + V8_INLINE Handle<Integer> ScriptID() const; private: Handle<Value> resource_name_; Handle<Integer> resource_line_offset_; Handle<Integer> resource_column_offset_; Handle<Boolean> resource_is_shared_cross_origin_; + Handle<Integer> script_id_; }; @@ -1023,6 +958,15 @@ Handle<Value> GetScriptName(); /** + * Data read from magic sourceURL comments. + */ + Handle<Value> GetSourceURL(); + /** + * Data read from magic sourceMappingURL comments. + */ + Handle<Value> GetSourceMappingURL(); + + /** * Returns zero based line number of the code_pos location in the script. * -1 will be returned if no information available. */ @@ -1040,12 +984,9 @@ public: /** * A shorthand for ScriptCompiler::Compile(). - * The ScriptData parameter will be deprecated; use ScriptCompiler::Compile if - * you want to pass it. */ static Local<Script> Compile(Handle<String> source, - ScriptOrigin* origin = NULL, - ScriptData* script_data = NULL); + ScriptOrigin* origin = NULL); // To be decprecated, use the Compile above. static Local<Script> Compile(Handle<String> source, @@ -1063,24 +1004,9 @@ */ Local<UnboundScript> GetUnboundScript(); - // To be deprecated; use GetUnboundScript()->GetId(); - int GetId() { - return GetUnboundScript()->GetId(); - } - - // Use GetUnboundScript()->GetId(); V8_DEPRECATED("Use GetUnboundScript()->GetId()", - Handle<Value> GetScriptName()) { - return GetUnboundScript()->GetScriptName(); - } - - /** - * Returns zero based line number of the code_pos location in the script. - * -1 will be returned if no information available. - */ - V8_DEPRECATED("Use GetUnboundScript()->GetLineNumber()", - int GetLineNumber(int code_pos)) { - return GetUnboundScript()->GetLineNumber(code_pos); + int GetId()) { + return GetUnboundScript()->GetId(); } }; @@ -1118,15 +1044,14 @@ int length; BufferPolicy buffer_policy; - private: - // Prevent copying. Not implemented. - CachedData(const CachedData&); - CachedData& operator=(const CachedData&); + private: + // Prevent copying. Not implemented. + CachedData(const CachedData&); + CachedData& operator=(const CachedData&); }; /** - * Source code which can be then compiled to a UnboundScript or - * BoundScript. + * Source code which can be then compiled to a UnboundScript or Script. */ class Source { public: @@ -1144,7 +1069,7 @@ private: friend class ScriptCompiler; - // Prevent copying. Not implemented. + // Prevent copying. Not implemented. Source(const Source&); Source& operator=(const Source&); @@ -1156,19 +1081,31 @@ Handle<Integer> resource_column_offset; Handle<Boolean> resource_is_shared_cross_origin; - // Cached data from previous compilation (if any), or generated during - // compilation (if the generate_cached_data flag is passed to - // ScriptCompiler). + // Cached data from previous compilation (if a kConsume*Cache flag is + // set), or hold newly generated cache data (kProduce*Cache flags) are + // set when calling a compile method. CachedData* cached_data; }; enum CompileOptions { - kNoCompileOptions, - kProduceDataToCache = 1 << 0 + kNoCompileOptions = 0, + kProduceParserCache, + kConsumeParserCache, + kProduceCodeCache, + kConsumeCodeCache, + + // Support the previous API for a transition period. + kProduceDataToCache }; /** * Compiles the specified script (context-independent). + * Cached data as part of the source object can be optionally produced to be + * consumed later to speed up compilation of identical source scripts. + * + * Note that when producing cached data, the source must point to NULL for + * cached data. When consuming cached data, the cached data must have been + * produced by the same version of V8. * * \param source Script source code. * \return Compiled script object (context independent; for running it must be @@ -1204,16 +1141,16 @@ Local<String> GetSourceLine() const; /** - * Returns the resource name for the script from where the function causing - * the error originates. + * Returns the origin for the script from where the function causing the + * error originates. */ - Handle<Value> GetScriptResourceName() const; + ScriptOrigin GetScriptOrigin() const; /** - * Returns the resource data for the script from where the function causing + * Returns the resource name for the script from where the function causing * the error originates. */ - Handle<Value> GetScriptData() const; + Handle<Value> GetScriptResourceName() const; /** * Exception stack trace. By default stack traces are not captured for @@ -1286,6 +1223,7 @@ kIsConstructor = 1 << 5, kScriptNameOrSourceURL = 1 << 6, kScriptId = 1 << 7, + kExposeFramesAcrossSecurityOrigins = 1 << 8, kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName, kDetailed = kOverview | kIsEval | kIsConstructor | kScriptNameOrSourceURL }; @@ -2156,11 +2094,7 @@ * accessors have an explicit access control parameter which specifies * the kind of cross-context access that should be allowed. * - * Additionally, for security, accessors can prohibit overwriting by - * accessors defined in JavaScript. For objects that have such - * accessors either locally or in their prototype chain it is not - * possible to overwrite the accessor by using __defineGetter__ or - * __defineSetter__ from JavaScript code. + * TODO(dcarney): Remove PROHIBITS_OVERWRITING as it is now unused. */ enum AccessControl { DEFAULT = 0, @@ -2175,13 +2109,11 @@ */ class V8_EXPORT Object : public Value { public: - bool Set(Handle<Value> key, - Handle<Value> value, - PropertyAttribute attribs = None); + bool Set(Handle<Value> key, Handle<Value> value); bool Set(uint32_t index, Handle<Value> value); - // Sets a local property on this object bypassing interceptors and + // Sets an own property on this object bypassing interceptors and // overriding accessors or read-only properties. // // Note that if the object has an interceptor the property will be set @@ -2204,6 +2136,11 @@ */ PropertyAttribute GetPropertyAttributes(Handle<Value> key); + /** + * Returns Object.getOwnPropertyDescriptor as per ES5 section 15.2.3.3. + */ + Local<Value> GetOwnPropertyDescriptor(Local<String> key); + bool Has(Handle<Value> key); bool Delete(Handle<Value> key); @@ -2289,12 +2226,6 @@ Local<String> ObjectProtoToString(); /** - * Returns the function invoked as a constructor for this object. - * May be the null value. - */ - Local<Value> GetConstructor(); - - /** * Returns the name of the function invoked as a constructor for this object. */ Local<String> GetConstructorName(); @@ -2514,6 +2445,10 @@ // Convenience getter for Isolate V8_INLINE Isolate* GetIsolate(); + // Pointer setter: Uncompilable to prevent inadvertent misuse. + template <typename S> + V8_INLINE void Set(S* whatever); + private: template<class F> friend class ReturnValue; template<class F> friend class FunctionCallbackInfo; @@ -2714,6 +2649,7 @@ */ Local<Promise> Chain(Handle<Function> handler); Local<Promise> Catch(Handle<Function> handler); + Local<Promise> Then(Handle<Function> handler); V8_INLINE static Promise* Cast(Value* obj); @@ -3941,14 +3877,17 @@ * * \param physical_memory The total amount of physical memory on the current * device, in bytes. + * \param virtual_memory_limit The amount of virtual memory on the current + * device, in bytes, or zero, if there is no limit. * \param number_of_processors The number of CPUs available on the current * device. */ void ConfigureDefaults(uint64_t physical_memory, + uint64_t virtual_memory_limit, uint32_t number_of_processors); - int max_young_space_size() const { return max_young_space_size_; } - void set_max_young_space_size(int value) { max_young_space_size_ = value; } + int max_semi_space_size() const { return max_semi_space_size_; } + void set_max_semi_space_size(int value) { max_semi_space_size_ = value; } int max_old_space_size() const { return max_old_space_size_; } void set_max_old_space_size(int value) { max_old_space_size_ = value; } int max_executable_size() const { return max_executable_size_; } @@ -3961,13 +3900,18 @@ void set_max_available_threads(int value) { max_available_threads_ = value; } + size_t code_range_size() const { return code_range_size_; } + void set_code_range_size(size_t value) { + code_range_size_ = value; + } private: - int max_young_space_size_; + int max_semi_space_size_; int max_old_space_size_; int max_executable_size_; uint32_t* stack_limit_; int max_available_threads_; + size_t code_range_size_; }; @@ -4042,6 +3986,9 @@ // --- Leave Script Callback --- typedef void (*CallCompletedCallback)(); +// --- Microtask Callback --- +typedef void (*MicrotaskCallback)(void* data); + // --- Failed Access Check Callback --- typedef void (*FailedAccessCheckCallback)(Local<Object> target, AccessType type, @@ -4147,7 +4094,7 @@ /** * Assert that no Javascript code is invoked. */ - class DisallowJavascriptExecutionScope { + class V8_EXPORT DisallowJavascriptExecutionScope { public: enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE }; @@ -4168,7 +4115,7 @@ /** * Introduce exception to DisallowJavascriptExecutionScope. */ - class AllowJavascriptExecutionScope { + class V8_EXPORT AllowJavascriptExecutionScope { public: explicit AllowJavascriptExecutionScope(Isolate* isolate); ~AllowJavascriptExecutionScope(); @@ -4184,6 +4131,24 @@ }; /** + * Do not run microtasks while this scope is active, even if microtasks are + * automatically executed otherwise. + */ + class V8_EXPORT SuppressMicrotaskExecutionScope { + public: + explicit SuppressMicrotaskExecutionScope(Isolate* isolate); + ~SuppressMicrotaskExecutionScope(); + + private: + internal::Isolate* isolate_; + + // Prevent copying of Scope objects. + SuppressMicrotaskExecutionScope(const SuppressMicrotaskExecutionScope&); + SuppressMicrotaskExecutionScope& operator=( + const SuppressMicrotaskExecutionScope&); + }; + + /** * Types of garbage collections that can be requested via * RequestGarbageCollectionForTesting. */ @@ -4193,6 +4158,20 @@ }; /** + * Features reported via the SetUseCounterCallback callback. Do not chang + * assigned numbers of existing items; add new features to the end of this + * list. + */ + enum UseCounterFeature { + kUseAsm = 0, + kUseCounterFeatureCount // This enum value must be last. + }; + + typedef void (*UseCounterCallback)(Isolate* isolate, + UseCounterFeature feature); + + + /** * Creates a new isolate. Does not change the currently entered * isolate. * @@ -4208,6 +4187,17 @@ static Isolate* GetCurrent(); /** + * Custom callback used by embedders to help V8 determine if it should abort + * when it throws and no internal handler can catch the exception. + * If FLAG_abort_on_uncaught_exception is true, then V8 will abort if either: + * - no custom callback is set. + * - the custom callback set returns true. + * Otherwise it won't abort. + */ + typedef bool (*abort_on_uncaught_exception_t)(); + void SetAbortOnUncaughtException(abort_on_uncaught_exception_t callback); + + /** * Methods below this point require holding a lock (using Locker) in * a multi-threaded environment. */ @@ -4270,7 +4260,8 @@ * kept alive by JavaScript objects. * \returns the adjusted value. */ - int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes); + V8_INLINE int64_t + AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes); /** * Returns heap profiler for this isolate. Will return NULL until the isolate @@ -4418,6 +4409,96 @@ */ void SetEventLogger(LogEventCallback that); + /** + * Adds a callback to notify the host application when a script finished + * running. If a script re-enters the runtime during executing, the + * CallCompletedCallback is only invoked when the outer-most script + * execution ends. Executing scripts inside the callback do not trigger + * further callbacks. + */ + void AddCallCompletedCallback(CallCompletedCallback callback); + + /** + * Removes callback that was installed by AddCallCompletedCallback. + */ + void RemoveCallCompletedCallback(CallCompletedCallback callback); + + /** + * Experimental: Runs the Microtask Work Queue until empty + * Any exceptions thrown by microtask callbacks are swallowed. + */ + void RunMicrotasks(); + + /** + * Experimental: Enqueues the callback to the Microtask Work Queue + */ + void EnqueueMicrotask(Handle<Function> microtask); + + /** + * Experimental: Enqueues the callback to the Microtask Work Queue + */ + void EnqueueMicrotask(MicrotaskCallback microtask, void* data = NULL); + + /** + * Experimental: Controls whether the Microtask Work Queue is automatically + * run when the script call depth decrements to zero. + */ + void SetAutorunMicrotasks(bool autorun); + + /** + * Experimental: Returns whether the Microtask Work Queue is automatically + * run when the script call depth decrements to zero. + */ + bool WillAutorunMicrotasks() const; + + /** + * Sets a callback for counting the number of times a feature of V8 is used. + */ + void SetUseCounterCallback(UseCounterCallback callback); + + /** + * Enables the host application to provide a mechanism for recording + * statistics counters. + */ + void SetCounterFunction(CounterLookupCallback); + + /** + * Enables the host application to provide a mechanism for recording + * histograms. The CreateHistogram function returns a + * histogram which will later be passed to the AddHistogramSample + * function. + */ + void SetCreateHistogramFunction(CreateHistogramCallback); + void SetAddHistogramSampleFunction(AddHistogramSampleCallback); + + /** + * Optional notification that the embedder is idle. + * V8 uses the notification to reduce memory footprint. + * This call can be used repeatedly if the embedder remains idle. + * Returns true if the embedder should stop calling IdleNotification + * until real work has been done. This indicates that V8 has done + * as much cleanup as it will be able to do. + * + * The idle_time_in_ms argument specifies the time V8 has to do reduce + * the memory footprint. There is no guarantee that the actual work will be + * done within the time limit. + */ + bool IdleNotification(int idle_time_in_ms); + + /** + * Optional notification that the system is running low on memory. + * V8 uses these notifications to attempt to free memory. + */ + void LowMemoryNotification(); + + /** + * Optional notification that a context has been disposed. V8 uses + * these notifications to guide the GC heuristic. Returns the number + * of context disposals - including this one - since the last time + * V8 had a chance to clean up. + */ + int ContextDisposedNotification(); + private: template<class K, class V, class Traits> friend class PersistentValueMap; @@ -4431,6 +4512,7 @@ void SetObjectGroupId(internal::Object** object, UniqueId id); void SetReferenceFromGroup(UniqueId id, internal::Object** object); void SetReference(internal::Object** parent, internal::Object** child); + void CollectAllGarbage(const char* gc_reason); }; class V8_EXPORT StartupData { @@ -4541,7 +4623,7 @@ // Size of the instructions. size_t code_len; // Script info for CODE_ADDED event. - Handle<Script> script; + Handle<UnboundScript> script; // User-defined data for *_LINE_INFO_* event. It's used to hold the source // code line information which is returned from the // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent @@ -4670,6 +4752,24 @@ static void SetDecompressedStartupData(StartupData* decompressed_data); /** + * Hand startup data to V8, in case the embedder has chosen to build + * V8 with external startup data. + * + * Note: + * - By default the startup data is linked into the V8 library, in which + * case this function is not meaningful. + * - If this needs to be called, it needs to be called before V8 + * tries to make use of its built-ins. + * - To avoid unnecessary copies of data, V8 will point directly into the + * given data blob, so pretty please keep it around until V8 exit. + * - Compression of the startup blob might be useful, but needs to + * handled entirely on the embedders' side. + * - The call will abort if the data is invalid. + */ + static void SetNativesDataBlob(StartupData* startup_blob); + static void SetSnapshotDataBlob(StartupData* startup_blob); + + /** * Adds a message listener. * * The same message listener can be added more than once and in that @@ -4710,21 +4810,6 @@ /** Get the version string. */ static const char* GetVersion(); - /** - * Enables the host application to provide a mechanism for recording - * statistics counters. - */ - static void SetCounterFunction(CounterLookupCallback); - - /** - * Enables the host application to provide a mechanism for recording - * histograms. The CreateHistogram function returns a - * histogram which will later be passed to the AddHistogramSample - * function. - */ - static void SetCreateHistogramFunction(CreateHistogramCallback); - static void SetAddHistogramSampleFunction(AddHistogramSampleCallback); - /** Callback function for reporting failed access checks.*/ static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback); @@ -4780,36 +4865,6 @@ static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); /** - * Adds a callback to notify the host application when a script finished - * running. If a script re-enters the runtime during executing, the - * CallCompletedCallback is only invoked when the outer-most script - * execution ends. Executing scripts inside the callback do not trigger - * further callbacks. - */ - static void AddCallCompletedCallback(CallCompletedCallback callback); - - /** - * Removes callback that was installed by AddCallCompletedCallback. - */ - static void RemoveCallCompletedCallback(CallCompletedCallback callback); - - /** - * Experimental: Runs the Microtask Work Queue until empty - */ - static void RunMicrotasks(Isolate* isolate); - - /** - * Experimental: Enqueues the callback to the Microtask Work Queue - */ - static void EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask); - - /** - * Experimental: Controls whether the Microtask Work Queue is automatically - * run when the script call depth decrements to zero. - */ - static void SetAutorunMicrotasks(Isolate *source, bool autorun); - - /** * Initializes from snapshot if possible. Otherwise, attempts to * initialize from scratch. This function is called implicitly if * you use the API without calling it first. @@ -4870,15 +4925,14 @@ /** * Forcefully terminate the current thread of JavaScript execution - * in the given isolate. If no isolate is provided, the default - * isolate is used. + * in the given isolate. * * This method can be used by any thread even if that thread has not * acquired the V8 lock with a Locker object. * * \param isolate The isolate in which to terminate the current JS execution. */ - static void TerminateExecution(Isolate* isolate = NULL); + static void TerminateExecution(Isolate* isolate); /** * Is V8 terminating JavaScript execution. @@ -4945,34 +4999,6 @@ Isolate* isolate, PersistentHandleVisitor* visitor); /** - * Optional notification that the embedder is idle. - * V8 uses the notification to reduce memory footprint. - * This call can be used repeatedly if the embedder remains idle. - * Returns true if the embedder should stop calling IdleNotification - * until real work has been done. This indicates that V8 has done - * as much cleanup as it will be able to do. - * - * The hint argument specifies the amount of work to be done in the function - * on scale from 1 to 1000. There is no guarantee that the actual work will - * match the hint. - */ - static bool IdleNotification(int hint = 1000); - - /** - * Optional notification that the system is running low on memory. - * V8 uses these notifications to attempt to free memory. - */ - static void LowMemoryNotification(); - - /** - * Optional notification that a context has been disposed. V8 uses - * these notifications to guide the GC heuristic. Returns the number - * of context disposals - including this one - since the last time - * V8 had a chance to clean up. - */ - static int ContextDisposedNotification(); - - /** * Initialize the ICU library bundled with V8. The embedder should only * invoke this method when using the bundled ICU. Returns true on success. * @@ -5099,7 +5125,8 @@ /** * Clears any exceptions that may have been caught by this try/catch block. - * After this method has been called, HasCaught() will return false. + * After this method has been called, HasCaught() will return false. Cancels + * the scheduled exception if it is caught and ReThrow() is not called before. * * It is not necessary to clear a try/catch block before using it again; if * another exception is thrown the previously caught exception will just be @@ -5125,7 +5152,25 @@ */ void SetCaptureMessage(bool value); + /** + * There are cases when the raw address of C++ TryCatch object cannot be + * used for comparisons with addresses into the JS stack. The cases are: + * 1) ARM, ARM64 and MIPS simulators which have separate JS stack. + * 2) Address sanitizer allocates local C++ object in the heap when + * UseAfterReturn mode is enabled. + * This method returns address that can be used for comparisons with + * addresses into the JS stack. When neither simulator nor ASAN's + * UseAfterReturn is enabled, then the address returned will be the address + * of the C++ try catch handler itself. + */ + static void* JSStackComparableAddress(v8::TryCatch* handler) { + if (handler == NULL) return NULL; + return handler->js_stack_comparable_address_; + } + private: + void ResetInternal(); + // Make it hard to create heap-allocated TryCatch blocks. TryCatch(const TryCatch&); void operator=(const TryCatch&); @@ -5133,10 +5178,11 @@ void operator delete(void*, size_t); v8::internal::Isolate* isolate_; - void* next_; + v8::TryCatch* next_; void* exception_; void* message_obj_; void* message_script_; + void* js_stack_comparable_address_; int message_start_pos_; int message_end_pos_; bool is_verbose_ : 1; @@ -5246,9 +5292,6 @@ */ void Exit(); - /** Returns true if the context has experienced an out of memory situation. */ - bool HasOutOfMemoryException() { return false; } - /** Returns an isolate associated with a current context. */ v8::Isolate* GetIsolate(); @@ -5468,6 +5511,7 @@ const int kApiPointerSize = sizeof(void*); // NOLINT const int kApiIntSize = sizeof(int); // NOLINT +const int kApiInt64Size = sizeof(int64_t); // NOLINT // Tag information for HeapObject. const int kHeapObjectTag = 1; @@ -5493,7 +5537,7 @@ template <> struct SmiTagging<4> { static const int kSmiShiftSize = 0; static const int kSmiValueSize = 31; - V8_INLINE static int SmiToInt(internal::Object* value) { + V8_INLINE static int SmiToInt(const internal::Object* value) { int shift_bits = kSmiTagSize + kSmiShiftSize; // Throw away top 32 bits and shift down (requires >> to be sign extending). return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits; @@ -5521,7 +5565,7 @@ template <> struct SmiTagging<8> { static const int kSmiShiftSize = 31; static const int kSmiValueSize = 32; - V8_INLINE static int SmiToInt(internal::Object* value) { + V8_INLINE static int SmiToInt(const internal::Object* value) { int shift_bits = kSmiTagSize + kSmiShiftSize; // Shift down and throw away top 32 bits. return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits); @@ -5551,7 +5595,8 @@ // These values match non-compiler-dependent values defined within // the implementation of v8. static const int kHeapObjectMapOffset = 0; - static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize; + static const int kMapInstanceTypeAndBitFieldOffset = + 1 * kApiPointerSize + kApiIntSize; static const int kStringResourceOffset = 3 * kApiPointerSize; static const int kOddballKindOffset = 3 * kApiPointerSize; @@ -5559,19 +5604,29 @@ static const int kJSObjectHeaderSize = 3 * kApiPointerSize; static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; static const int kContextHeaderSize = 2 * kApiPointerSize; - static const int kContextEmbedderDataIndex = 65; + static const int kContextEmbedderDataIndex = 95; static const int kFullStringRepresentationMask = 0x07; static const int kStringEncodingMask = 0x4; static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalAsciiRepresentationTag = 0x06; static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize; - static const int kIsolateRootsOffset = 5 * kApiPointerSize; + static const int kAmountOfExternalAllocatedMemoryOffset = + 4 * kApiPointerSize; + static const int kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset = + kAmountOfExternalAllocatedMemoryOffset + kApiInt64Size; + static const int kIsolateRootsOffset = + kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset + kApiInt64Size + + kApiPointerSize; static const int kUndefinedValueRootIndex = 5; static const int kNullValueRootIndex = 7; static const int kTrueValueRootIndex = 8; static const int kFalseValueRootIndex = 9; - static const int kEmptyStringRootIndex = 154; + static const int kEmptyStringRootIndex = 164; + + // The external allocation limit should be below 256 MB on all architectures + // to avoid that resource-constrained embedders run low on memory. + static const int kExternalAllocationLimit = 192 * 1024 * 1024; static const int kNodeClassIdOffset = 1 * kApiPointerSize; static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3; @@ -5582,10 +5637,10 @@ static const int kNodeIsIndependentShift = 4; static const int kNodeIsPartiallyDependentShift = 5; - static const int kJSObjectType = 0xbb; + static const int kJSObjectType = 0xbc; static const int kFirstNonstringType = 0x80; static const int kOddballType = 0x83; - static const int kForeignType = 0x87; + static const int kForeignType = 0x88; static const int kUndefinedOddballKind = 5; static const int kNullOddballKind = 3; @@ -5599,12 +5654,12 @@ #endif } - V8_INLINE static bool HasHeapObjectTag(internal::Object* value) { + V8_INLINE static bool HasHeapObjectTag(const internal::Object* value) { return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) == kHeapObjectTag); } - V8_INLINE static int SmiValue(internal::Object* value) { + V8_INLINE static int SmiValue(const internal::Object* value) { return PlatformSmiTagging::SmiToInt(value); } @@ -5616,13 +5671,15 @@ return PlatformSmiTagging::IsValidSmi(value); } - V8_INLINE static int GetInstanceType(internal::Object* obj) { + V8_INLINE static int GetInstanceType(const internal::Object* obj) { typedef internal::Object O; O* map = ReadField<O*>(obj, kHeapObjectMapOffset); - return ReadField<uint8_t>(map, kMapInstanceTypeOffset); + // Map::InstanceType is defined so that it will always be loaded into + // the LS 8 bits of one 16-bit word, regardless of endianess. + return ReadField<uint16_t>(map, kMapInstanceTypeAndBitFieldOffset) & 0xff; } - V8_INLINE static int GetOddballKind(internal::Object* obj) { + V8_INLINE static int GetOddballKind(const internal::Object* obj) { typedef internal::Object O; return SmiValue(ReadField<O*>(obj, kOddballKindOffset)); } @@ -5655,18 +5712,19 @@ *addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value); } - V8_INLINE static void SetEmbedderData(v8::Isolate *isolate, + V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot, - void *data) { + void* data) { uint8_t *addr = reinterpret_cast<uint8_t *>(isolate) + kIsolateEmbedderDataOffset + slot * kApiPointerSize; *reinterpret_cast<void**>(addr) = data; } - V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate, uint32_t slot) { - uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + + V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate, + uint32_t slot) { + const uint8_t* addr = reinterpret_cast<const uint8_t*>(isolate) + kIsolateEmbedderDataOffset + slot * kApiPointerSize; - return *reinterpret_cast<void**>(addr); + return *reinterpret_cast<void* const*>(addr); } V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate, @@ -5675,16 +5733,18 @@ return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize); } - template <typename T> V8_INLINE static T ReadField(Object* ptr, int offset) { - uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag; - return *reinterpret_cast<T*>(addr); + template <typename T> + V8_INLINE static T ReadField(const internal::Object* ptr, int offset) { + const uint8_t* addr = + reinterpret_cast<const uint8_t*>(ptr) + offset - kHeapObjectTag; + return *reinterpret_cast<const T*>(addr); } template <typename T> - V8_INLINE static T ReadEmbedderData(Context* context, int index) { + V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) { typedef internal::Object O; typedef internal::Internals I; - O* ctx = *reinterpret_cast<O**>(context); + O* ctx = *reinterpret_cast<O* const*>(context); int embedder_data_offset = I::kContextHeaderSize + (internal::kApiPointerSize * I::kContextEmbedderDataIndex); O* embedder_data = I::ReadField<O*>(ctx, embedder_data_offset); @@ -5692,14 +5752,6 @@ I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index); return I::ReadField<T>(embedder_data, value_offset); } - - V8_INLINE static bool CanCastToHeapObject(void* o) { return false; } - V8_INLINE static bool CanCastToHeapObject(Context* o) { return true; } - V8_INLINE static bool CanCastToHeapObject(String* o) { return true; } - V8_INLINE static bool CanCastToHeapObject(Object* o) { return true; } - V8_INLINE static bool CanCastToHeapObject(Message* o) { return true; } - V8_INLINE static bool CanCastToHeapObject(StackTrace* o) { return true; } - V8_INLINE static bool CanCastToHeapObject(StackFrame* o) { return true; } }; } // namespace internal @@ -6007,6 +6059,13 @@ } template<typename T> +template<typename S> +void ReturnValue<T>::Set(S* whatever) { + // Uncompilable to prevent inadvertent misuse. + TYPE_CHECK(S*, Primitive); +} + +template<typename T> internal::Object* ReturnValue<T>::GetDefaultValue() { // Default value is always the pointer below value_ on the stack. return value_[-1]; @@ -6095,11 +6154,17 @@ return resource_column_offset_; } + Handle<Boolean> ScriptOrigin::ResourceIsSharedCrossOrigin() const { return resource_is_shared_cross_origin_; } +Handle<Integer> ScriptOrigin::ScriptID() const { + return script_id_; +} + + ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin, CachedData* data) : source_string(string), @@ -6191,7 +6256,7 @@ String::ExternalStringResource* String::GetExternalStringResource() const { typedef internal::Object O; typedef internal::Internals I; - O* obj = *reinterpret_cast<O**>(const_cast<String*>(this)); + O* obj = *reinterpret_cast<O* const*>(this); String::ExternalStringResource* result; if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) { void* value = I::ReadField<void*>(obj, I::kStringResourceOffset); @@ -6210,7 +6275,7 @@ String::Encoding* encoding_out) const { typedef internal::Object O; typedef internal::Internals I; - O* obj = *reinterpret_cast<O**>(const_cast<String*>(this)); + O* obj = *reinterpret_cast<O* const*>(this); int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask; *encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask); ExternalStringResourceBase* resource = NULL; @@ -6237,7 +6302,7 @@ bool Value::QuickIsUndefined() const { typedef internal::Object O; typedef internal::Internals I; - O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this)); + O* obj = *reinterpret_cast<O* const*>(this); if (!I::HasHeapObjectTag(obj)) return false; if (I::GetInstanceType(obj) != I::kOddballType) return false; return (I::GetOddballKind(obj) == I::kUndefinedOddballKind); @@ -6255,7 +6320,7 @@ bool Value::QuickIsNull() const { typedef internal::Object O; typedef internal::Internals I; - O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this)); + O* obj = *reinterpret_cast<O* const*>(this); if (!I::HasHeapObjectTag(obj)) return false; if (I::GetInstanceType(obj) != I::kOddballType) return false; return (I::GetOddballKind(obj) == I::kNullOddballKind); @@ -6273,7 +6338,7 @@ bool Value::QuickIsString() const { typedef internal::Object O; typedef internal::Internals I; - O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this)); + O* obj = *reinterpret_cast<O* const*>(this); if (!I::HasHeapObjectTag(obj)) return false; return (I::GetInstanceType(obj) < I::kFirstNonstringType); } @@ -6592,6 +6657,28 @@ } +int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( + int64_t change_in_bytes) { + typedef internal::Internals I; + int64_t* amount_of_external_allocated_memory = + reinterpret_cast<int64_t*>(reinterpret_cast<uint8_t*>(this) + + I::kAmountOfExternalAllocatedMemoryOffset); + int64_t* amount_of_external_allocated_memory_at_last_global_gc = + reinterpret_cast<int64_t*>( + reinterpret_cast<uint8_t*>(this) + + I::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset); + int64_t amount = *amount_of_external_allocated_memory + change_in_bytes; + if (change_in_bytes > 0 && + amount - *amount_of_external_allocated_memory_at_last_global_gc > + I::kExternalAllocationLimit) { + CollectAllGarbage("external memory allocation limit reached."); + } else { + *amount_of_external_allocated_memory = amount; + } + return *amount_of_external_allocated_memory; +} + + template<typename T> void Isolate::SetObjectGroupId(const Persistent<T>& object, UniqueId id) { diff -Nru nodejs-0.11.13/deps/v8/include/v8-platform.h nodejs-0.11.15/deps/v8/include/v8-platform.h --- nodejs-0.11.13/deps/v8/include/v8-platform.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/include/v8-platform.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_V8_PLATFORM_H_ #define V8_V8_PLATFORM_H_ -#include "v8.h" - namespace v8 { +class Isolate; + /** * A Task represents a unit of work. */ @@ -60,6 +37,8 @@ kLongRunningTask }; + virtual ~Platform() {} + /** * Schedules a task to be invoked on a background thread. |expected_runtime| * indicates that the task will run a long time. The Platform implementation @@ -76,9 +55,6 @@ * scheduling. The definition of "foreground" is opaque to V8. */ virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0; - - protected: - virtual ~Platform() {} }; } // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/include/v8-profiler.h nodejs-0.11.15/deps/v8/include/v8-profiler.h --- nodejs-0.11.13/deps/v8/include/v8-profiler.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/include/v8-profiler.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_V8_PROFILER_H_ #define V8_V8_PROFILER_H_ @@ -106,27 +83,35 @@ const CpuProfileNode* GetTopDownRoot() const; /** - * Returns number of samples recorded. The samples are not recorded unless - * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true. - */ + * Returns number of samples recorded. The samples are not recorded unless + * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true. + */ int GetSamplesCount() const; /** - * Returns profile node corresponding to the top frame the sample at - * the given index. - */ + * Returns profile node corresponding to the top frame the sample at + * the given index. + */ const CpuProfileNode* GetSample(int index) const; /** - * Returns time when the profile recording started (in microseconds - * since the Epoch). - */ + * Returns the timestamp of the sample. The timestamp is the number of + * microseconds since some unspecified starting point. + * The point is equal to the starting point used by GetStartTime. + */ + int64_t GetSampleTimestamp(int index) const; + + /** + * Returns time when the profile recording was started (in microseconds) + * since some unspecified starting point. + */ int64_t GetStartTime() const; /** - * Returns time when the profile recording was stopped (in microseconds - * since the Epoch). - */ + * Returns time when the profile recording was stopped (in microseconds) + * since some unspecified starting point. + * The point is equal to the starting point used by GetStartTime. + */ int64_t GetEndTime() const; /** @@ -164,7 +149,9 @@ void StartProfiling(Handle<String> title, bool record_samples = false); /** Deprecated. Use StartProfiling instead. */ - void StartCpuProfiling(Handle<String> title, bool record_samples = false); + V8_DEPRECATED("Use StartProfiling", + void StartCpuProfiling(Handle<String> title, + bool record_samples = false)); /** * Stops collecting CPU profile with a given title and returns it. @@ -173,7 +160,8 @@ CpuProfile* StopProfiling(Handle<String> title); /** Deprecated. Use StopProfiling instead. */ - const CpuProfile* StopCpuProfiling(Handle<String> title); + V8_DEPRECATED("Use StopProfiling", + const CpuProfile* StopCpuProfiling(Handle<String> title)); /** * Tells the profiler whether the embedder is idle. @@ -231,19 +219,20 @@ class V8_EXPORT HeapGraphNode { public: enum Type { - kHidden = 0, // Hidden node, may be filtered when shown to user. - kArray = 1, // An array of elements. - kString = 2, // A string. - kObject = 3, // A JS object (except for arrays and strings). - kCode = 4, // Compiled code. - kClosure = 5, // Function closure. - kRegExp = 6, // RegExp. - kHeapNumber = 7, // Number stored in the heap. - kNative = 8, // Native object (not from V8 heap). - kSynthetic = 9, // Synthetic object, usualy used for grouping - // snapshot items together. - kConsString = 10, // Concatenated string. A pair of pointers to strings. - kSlicedString = 11 // Sliced string. A fragment of another string. + kHidden = 0, // Hidden node, may be filtered when shown to user. + kArray = 1, // An array of elements. + kString = 2, // A string. + kObject = 3, // A JS object (except for arrays and strings). + kCode = 4, // Compiled code. + kClosure = 5, // Function closure. + kRegExp = 6, // RegExp. + kHeapNumber = 7, // Number stored in the heap. + kNative = 8, // Native object (not from V8 heap). + kSynthetic = 9, // Synthetic object, usualy used for grouping + // snapshot items together. + kConsString = 10, // Concatenated string. A pair of pointers to strings. + kSlicedString = 11, // Sliced string. A fragment of another string. + kSymbol = 12 // A Symbol (ES6). }; /** Returns node type (see HeapGraphNode::Type). */ @@ -304,7 +293,7 @@ */ virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) { return kAbort; - }; + } }; diff -Nru nodejs-0.11.13/deps/v8/include/v8stdint.h nodejs-0.11.15/deps/v8/include/v8stdint.h --- nodejs-0.11.13/deps/v8/include/v8stdint.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/include/v8stdint.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Load definitions of standard types. diff -Nru nodejs-0.11.13/deps/v8/include/v8-testing.h nodejs-0.11.15/deps/v8/include/v8-testing.h --- nodejs-0.11.13/deps/v8/include/v8-testing.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/include/v8-testing.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_V8_TEST_H_ #define V8_V8_TEST_H_ diff -Nru nodejs-0.11.13/deps/v8/include/v8-util.h nodejs-0.11.15/deps/v8/include/v8-util.h --- nodejs-0.11.13/deps/v8/include/v8-util.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/include/v8-util.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,13 @@ // Copyright 2014 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_UTIL_H_ #define V8_UTIL_H_ #include "v8.h" #include <map> +#include <vector> /** * Support for Persistent containers. @@ -42,6 +20,10 @@ typedef uintptr_t PersistentContainerValue; static const uintptr_t kPersistentContainerNotFound = 0; +enum PersistentContainerCallbackType { + kNotWeak, + kWeak +}; /** @@ -92,38 +74,34 @@ /** * A default trait implementation for PersistentValueMap, which inherits * a std:map backing map from StdMapTraits and holds non-weak persistent - * objects. + * objects and has no special Dispose handling. * - * Users have to implement their own dispose trait. + * You should not derive from this class, since MapType depends on the + * surrounding class, and hence a subclass cannot simply inherit the methods. */ template<typename K, typename V> -class StrongMapTraits : public StdMapTraits<K, V> { +class DefaultPersistentValueMapTraits : public StdMapTraits<K, V> { public: // Weak callback & friends: - static const bool kIsWeak = false; - typedef typename StdMapTraits<K, V>::Impl Impl; + static const PersistentContainerCallbackType kCallbackType = kNotWeak; + typedef PersistentValueMap<K, V, DefaultPersistentValueMapTraits<K, V> > + MapType; typedef void WeakCallbackDataType; + static WeakCallbackDataType* WeakCallbackParameter( - Impl* impl, const K& key, Local<V> value); - static Impl* ImplFromWeakCallbackData( - const WeakCallbackData<V, WeakCallbackDataType>& data); + MapType* map, const K& key, Local<V> value) { + return NULL; + } + static MapType* MapFromWeakCallbackData( + const WeakCallbackData<V, WeakCallbackDataType>& data) { + return NULL; + } static K KeyFromWeakCallbackData( - const WeakCallbackData<V, WeakCallbackDataType>& data); - static void DisposeCallbackData(WeakCallbackDataType* data); -}; - - -/** - * A default trait implementation for PersistentValueMap, with a std::map - * backing map, non-weak persistents as values, and no special dispose - * handling. Can be used as-is. - */ -template<typename K, typename V> -class DefaultPersistentValueMapTraits : public StrongMapTraits<K, V> { - public: - typedef typename StrongMapTraits<K, V>::Impl Impl; - static void Dispose(Isolate* isolate, UniquePersistent<V> value, - Impl* impl, K key) { } + const WeakCallbackData<V, WeakCallbackDataType>& data) { + return K(); + } + static void DisposeCallbackData(WeakCallbackDataType* data) { } + static void Dispose(Isolate* isolate, UniquePersistent<V> value, K key) { } }; @@ -140,55 +118,49 @@ template<typename K, typename V, typename Traits> class PersistentValueMap { public: - V8_INLINE explicit PersistentValueMap(Isolate* isolate) : isolate_(isolate) {} + explicit PersistentValueMap(Isolate* isolate) : isolate_(isolate) {} - V8_INLINE ~PersistentValueMap() { Clear(); } + ~PersistentValueMap() { Clear(); } - V8_INLINE Isolate* GetIsolate() { return isolate_; } + Isolate* GetIsolate() { return isolate_; } /** * Return size of the map. */ - V8_INLINE size_t Size() { return Traits::Size(&impl_); } + size_t Size() { return Traits::Size(&impl_); } /** * Return whether the map holds weak persistents. */ - V8_INLINE bool IsWeak() { return Traits::kIsWeak; } + bool IsWeak() { return Traits::kCallbackType != kNotWeak; } /** * Get value stored in map. */ - V8_INLINE Local<V> Get(const K& key) { + Local<V> Get(const K& key) { return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, key))); } /** * Check whether a value is contained in the map. */ - V8_INLINE bool Contains(const K& key) { - return Traits::Get(&impl_, key) != 0; + bool Contains(const K& key) { + return Traits::Get(&impl_, key) != kPersistentContainerNotFound; } /** * Get value stored in map and set it in returnValue. * Return true if a value was found. */ - V8_INLINE bool SetReturnValue(const K& key, - ReturnValue<Value>& returnValue) { - PersistentContainerValue value = Traits::Get(&impl_, key); - bool hasValue = value != 0; - if (hasValue) { - returnValue.SetInternal( - *reinterpret_cast<internal::Object**>(FromVal(value))); - } - return hasValue; + bool SetReturnValue(const K& key, + ReturnValue<Value> returnValue) { + return SetReturnValueFromVal(&returnValue, Traits::Get(&impl_, key)); } /** * Call Isolate::SetReference with the given parent and the map value. */ - V8_INLINE void SetReference(const K& key, + void SetReference(const K& key, const Persistent<Object>& parent) { GetIsolate()->SetReference( reinterpret_cast<internal::Object**>(parent.val_), @@ -215,7 +187,7 @@ /** * Return value for key and remove it from the map. */ - V8_INLINE UniquePersistent<V> Remove(const K& key) { + UniquePersistent<V> Remove(const K& key) { return Release(Traits::Remove(&impl_, key)).Pass(); } @@ -231,12 +203,76 @@ typename Traits::Impl impl; Traits::Swap(impl_, impl); for (It i = Traits::Begin(&impl); i != Traits::End(&impl); ++i) { - Traits::Dispose(isolate_, Release(Traits::Value(i)).Pass(), &impl, - Traits::Key(i)); + Traits::Dispose(isolate_, Release(Traits::Value(i)).Pass(), + Traits::Key(i)); } } } + /** + * Helper class for GetReference/SetWithReference. Do not use outside + * that context. + */ + class PersistentValueReference { + public: + PersistentValueReference() : value_(kPersistentContainerNotFound) { } + PersistentValueReference(const PersistentValueReference& other) + : value_(other.value_) { } + + Local<V> NewLocal(Isolate* isolate) const { + return Local<V>::New(isolate, FromVal(value_)); + } + bool IsEmpty() const { + return value_ == kPersistentContainerNotFound; + } + template<typename T> + bool SetReturnValue(ReturnValue<T> returnValue) { + return SetReturnValueFromVal(&returnValue, value_); + } + void Reset() { + value_ = kPersistentContainerNotFound; + } + void operator=(const PersistentValueReference& other) { + value_ = other.value_; + } + + private: + friend class PersistentValueMap; + + explicit PersistentValueReference(PersistentContainerValue value) + : value_(value) { } + + void operator=(PersistentContainerValue value) { + value_ = value; + } + + PersistentContainerValue value_; + }; + + /** + * Get a reference to a map value. This enables fast, repeated access + * to a value stored in the map while the map remains unchanged. + * + * Careful: This is potentially unsafe, so please use with care. + * The value will become invalid if the value for this key changes + * in the underlying map, as a result of Set or Remove for the same + * key; as a result of the weak callback for the same key; or as a + * result of calling Clear() or destruction of the map. + */ + PersistentValueReference GetReference(const K& key) { + return PersistentValueReference(Traits::Get(&impl_, key)); + } + + /** + * Put a value into the map and update the reference. + * Restrictions of GetReference apply here as well. + */ + UniquePersistent<V> Set(const K& key, UniquePersistent<V> value, + PersistentValueReference* reference) { + *reference = Leak(&value); + return SetUnique(key, &value); + } + private: PersistentValueMap(PersistentValueMap&); void operator=(PersistentValueMap&); @@ -246,10 +282,10 @@ * by the Traits class. */ UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) { - if (Traits::kIsWeak) { + if (Traits::kCallbackType != kNotWeak) { Local<V> value(Local<V>::New(isolate_, *persistent)); persistent->template SetWeak<typename Traits::WeakCallbackDataType>( - Traits::WeakCallbackParameter(&impl_, key, value), WeakCallback); + Traits::WeakCallbackParameter(this, key, value), WeakCallback); } PersistentContainerValue old_value = Traits::Set(&impl_, key, ClearAndLeak(persistent)); @@ -258,34 +294,51 @@ static void WeakCallback( const WeakCallbackData<V, typename Traits::WeakCallbackDataType>& data) { - if (Traits::kIsWeak) { - typename Traits::Impl* impl = Traits::ImplFromWeakCallbackData(data); + if (Traits::kCallbackType != kNotWeak) { + PersistentValueMap<K, V, Traits>* persistentValueMap = + Traits::MapFromWeakCallbackData(data); K key = Traits::KeyFromWeakCallbackData(data); - PersistentContainerValue value = Traits::Remove(impl, key); - Traits::Dispose(data.GetIsolate(), Release(value).Pass(), impl, key); + Traits::Dispose(data.GetIsolate(), + persistentValueMap->Remove(key).Pass(), key); + Traits::DisposeCallbackData(data.GetParameter()); } } - V8_INLINE static V* FromVal(PersistentContainerValue v) { + static V* FromVal(PersistentContainerValue v) { return reinterpret_cast<V*>(v); } - V8_INLINE static PersistentContainerValue ClearAndLeak( + static bool SetReturnValueFromVal( + ReturnValue<Value>* returnValue, PersistentContainerValue value) { + bool hasValue = value != kPersistentContainerNotFound; + if (hasValue) { + returnValue->SetInternal( + *reinterpret_cast<internal::Object**>(FromVal(value))); + } + return hasValue; + } + + static PersistentContainerValue ClearAndLeak( UniquePersistent<V>* persistent) { V* v = persistent->val_; persistent->val_ = 0; return reinterpret_cast<PersistentContainerValue>(v); } + static PersistentContainerValue Leak( + UniquePersistent<V>* persistent) { + return reinterpret_cast<PersistentContainerValue>(persistent->val_); + } + /** * Return a container value as UniquePersistent and make sure the weak * callback is properly disposed of. All remove functionality should go * through this. */ - V8_INLINE static UniquePersistent<V> Release(PersistentContainerValue v) { + static UniquePersistent<V> Release(PersistentContainerValue v) { UniquePersistent<V> p; p.val_ = FromVal(v); - if (Traits::kIsWeak && !p.IsEmpty()) { + if (Traits::kCallbackType != kNotWeak && p.IsWeak()) { Traits::DisposeCallbackData( p.template ClearWeak<typename Traits::WeakCallbackDataType>()); } @@ -313,42 +366,121 @@ }; +class DefaultPersistentValueVectorTraits { + public: + typedef std::vector<PersistentContainerValue> Impl; + + static void Append(Impl* impl, PersistentContainerValue value) { + impl->push_back(value); + } + static bool IsEmpty(const Impl* impl) { + return impl->empty(); + } + static size_t Size(const Impl* impl) { + return impl->size(); + } + static PersistentContainerValue Get(const Impl* impl, size_t i) { + return (i < impl->size()) ? impl->at(i) : kPersistentContainerNotFound; + } + static void ReserveCapacity(Impl* impl, size_t capacity) { + impl->reserve(capacity); + } + static void Clear(Impl* impl) { + impl->clear(); + } +}; + + /** - * Empty default implementations for StrongTraits methods. - * - * These should not be necessary, since they're only used in code that - * is surrounded by if(Traits::kIsWeak), which for StrongMapTraits is - * compile-time false. Most compilers can live without them; however - * the compiler we use from 64-bit Win differs. + * A vector wrapper that safely stores UniquePersistent values. + * C++11 embedders don't need this class, as they can use UniquePersistent + * directly in std containers. * - * TODO(vogelheim): Remove these once they're no longer necessary. + * This class relies on a backing vector implementation, whose type and methods + * are described by the Traits class. The backing map will handle values of type + * PersistentContainerValue, with all conversion into and out of V8 + * handles being transparently handled by this class. */ -template<typename K, typename V> -typename StrongMapTraits<K, V>::WeakCallbackDataType* - StrongMapTraits<K, V>::WeakCallbackParameter( - Impl* impl, const K& key, Local<V> value) { - return NULL; -} +template<typename V, typename Traits = DefaultPersistentValueVectorTraits> +class PersistentValueVector { + public: + explicit PersistentValueVector(Isolate* isolate) : isolate_(isolate) { } + ~PersistentValueVector() { + Clear(); + } -template<typename K, typename V> -typename StrongMapTraits<K, V>::Impl* - StrongMapTraits<K, V>::ImplFromWeakCallbackData( - const WeakCallbackData<V, WeakCallbackDataType>& data) { - return NULL; -} + /** + * Append a value to the vector. + */ + void Append(Local<V> value) { + UniquePersistent<V> persistent(isolate_, value); + Traits::Append(&impl_, ClearAndLeak(&persistent)); + } + /** + * Append a persistent's value to the vector. + */ + void Append(UniquePersistent<V> persistent) { + Traits::Append(&impl_, ClearAndLeak(&persistent)); + } -template<typename K, typename V> -K StrongMapTraits<K, V>::KeyFromWeakCallbackData( - const WeakCallbackData<V, WeakCallbackDataType>& data) { - return K(); -} + /** + * Are there any values in the vector? + */ + bool IsEmpty() const { + return Traits::IsEmpty(&impl_); + } + /** + * How many elements are in the vector? + */ + size_t Size() const { + return Traits::Size(&impl_); + } -template<typename K, typename V> -void StrongMapTraits<K, V>::DisposeCallbackData(WeakCallbackDataType* data) { -} + /** + * Retrieve the i-th value in the vector. + */ + Local<V> Get(size_t index) const { + return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, index))); + } + + /** + * Remove all elements from the vector. + */ + void Clear() { + size_t length = Traits::Size(&impl_); + for (size_t i = 0; i < length; i++) { + UniquePersistent<V> p; + p.val_ = FromVal(Traits::Get(&impl_, i)); + } + Traits::Clear(&impl_); + } + + /** + * Reserve capacity in the vector. + * (Efficiency gains depend on the backing implementation.) + */ + void ReserveCapacity(size_t capacity) { + Traits::ReserveCapacity(&impl_, capacity); + } + + private: + static PersistentContainerValue ClearAndLeak( + UniquePersistent<V>* persistent) { + V* v = persistent->val_; + persistent->val_ = 0; + return reinterpret_cast<PersistentContainerValue>(v); + } + + static V* FromVal(PersistentContainerValue v) { + return reinterpret_cast<V*>(v); + } + + Isolate* isolate_; + typename Traits::Impl impl_; +}; } // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/Makefile nodejs-0.11.15/deps/v8/Makefile --- nodejs-0.11.13/deps/v8/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -70,6 +70,10 @@ else GYPFLAGS += -Dv8_enable_backtrace=1 endif +# verifypredictable=on +ifeq ($(verifypredictable), on) + GYPFLAGS += -Dv8_enable_verify_predictable=1 +endif # snapshot=off ifeq ($(snapshot), off) GYPFLAGS += -Dv8_use_snapshot='false' @@ -96,10 +100,6 @@ ifeq ($(optdebug), on) GYPFLAGS += -Dv8_optimized_debug=2 endif -# debuggersupport=off -ifeq ($(debuggersupport), off) - GYPFLAGS += -Dv8_enable_debugger_support=0 -endif # unalignedaccess=on ifeq ($(unalignedaccess), on) GYPFLAGS += -Dv8_can_use_unaligned_accesses=true @@ -140,9 +140,9 @@ # asan=/path/to/clang++ ifneq ($(strip $(asan)),) GYPFLAGS += -Dasan=1 - export CXX="$(asan)" - export CXX_host="$(asan)" - export LINK="$(asan)" + export CXX=$(asan) + export CXX_host=$(asan) + export LINK=$(asan) export ASAN_SYMBOLIZER_PATH="$(dir $(asan))llvm-symbolizer" endif @@ -160,11 +160,6 @@ endif endif endif -# vfp2=off. Deprecated, use armfpu= -# vfp3=off. Deprecated, use armfpu= -ifeq ($(vfp3), off) - GYPFLAGS += -Darm_fpu=vfp -endif # hardfp=on/off. Deprecated, use armfloatabi ifeq ($(hardfp),on) GYPFLAGS += -Darm_float_abi=hard @@ -173,16 +168,10 @@ GYPFLAGS += -Darm_float_abi=softfp endif endif -# armneon=on/off -ifeq ($(armneon), on) - GYPFLAGS += -Darm_neon=1 -endif # fpu: armfpu=xxx # xxx: vfp, vfpv3-d16, vfpv3, neon. ifeq ($(armfpu),) -ifneq ($(vfp3), off) GYPFLAGS += -Darm_fpu=default -endif else GYPFLAGS += -Darm_fpu=$(armfpu) endif @@ -202,19 +191,19 @@ GYPFLAGS += -Darm_thumb=1 endif endif -# armtest=on +# arm_test_noprobe=on # With this flag set, by default v8 will only use features implied # by the compiler (no probe). This is done by modifying the default -# values of enable_armv7, enable_vfp2, enable_vfp3 and enable_32dregs. +# values of enable_armv7, enable_vfp3, enable_32dregs and enable_neon. # Modifying these flags when launching v8 will enable the probing for # the specified values. -# When using the simulator, this flag is implied. -ifeq ($(armtest), on) - GYPFLAGS += -Darm_test=on +ifeq ($(arm_test_noprobe), on) + GYPFLAGS += -Darm_test_noprobe=on endif # ----------------- available targets: -------------------- -# - "dependencies": pulls in external dependencies (currently: GYP) +# - "builddeps": pulls in external dependencies for building +# - "dependencies": pulls in all external dependencies # - "grokdump": rebuilds heap constants lists used by grokdump # - any arch listed in ARCHES (see below) # - any mode listed in MODES @@ -232,11 +221,11 @@ # Architectures and modes to be compiled. Consider these to be internal # variables, don't override them (use the targets instead). -ARCHES = ia32 x64 arm arm64 mipsel +ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87 DEFAULT_ARCHES = ia32 x64 arm MODES = release debug optdebug DEFAULT_MODES = release debug -ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel +ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87 NACL_ARCHES = nacl_ia32 nacl_x64 # List of files that trigger Makefile regeneration: @@ -262,7 +251,7 @@ # File where previously used GYPFLAGS are stored. ENVFILE = $(OUTDIR)/environment -.PHONY: all check clean dependencies $(ENVFILE).new native \ +.PHONY: all check clean builddeps dependencies $(ENVFILE).new native \ qc quickcheck $(QUICKCHECKS) \ $(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \ $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \ @@ -281,10 +270,6 @@ $(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \ builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)" -mips mips.release mips.debug: - @echo "V8 does not support big-endian MIPS builds at the moment," \ - "please use little-endian builds (mipsel)." - # Compile targets. MODES and ARCHES are convenience targets. .SECONDEXPANSION: $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES)) @@ -409,23 +394,27 @@ rm -rf $(OUTDIR)/native find $(OUTDIR) -regex '.*\(host\|target\)\.native\.mk' -delete -clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean +clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean gtags.clean # GYP file generation targets. OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS)) $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE) - PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \ - PYTHONPATH="$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \ + $(eval CXX_TARGET_ARCH:=$(shell $(CXX) -v 2>&1 | grep ^Target: | \ + cut -f 2 -d " " | cut -f 1 -d "-" )) + $(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH))) + $(eval V8_TARGET_ARCH:=$(subst .,,$(suffix $(basename $@)))) + PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \ GYP_GENERATORS=make \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ -Ibuild/standalone.gypi --depth=. \ - -Dv8_target_arch=$(subst .,,$(suffix $(basename $@))) \ + -Dv8_target_arch=$(V8_TARGET_ARCH) \ + $(if $(findstring $(CXX_TARGET_ARCH),$(V8_TARGET_ARCH)), \ + -Dtarget_arch=$(V8_TARGET_ARCH),) \ $(if $(findstring optdebug,$@),-Dv8_optimized_debug=2,) \ -S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS) $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE) - PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \ - PYTHONPATH="$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \ + PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \ GYP_GENERATORS=make \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS) @@ -467,11 +456,38 @@ @cat $(DUMP_FILE).tmpl > $(DUMP_FILE) @$(OUTDIR)/ia32.release/d8 --dump-heap-constants >> $(DUMP_FILE) -# Dependencies. +# Support for the GNU GLOBAL Source Code Tag System. +gtags.files: $(GYPFILES) $(ENVFILE) + @find include src test -name '*.h' -o -name '*.cc' -o -name '*.c' > $@ + +# We need to manually set the stack limit here, to work around bugs in +# gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems. +GPATH GRTAGS GSYMS GTAGS: gtags.files $(shell cat gtags.files 2> /dev/null) + @bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<' + +gtags.clean: + rm -f gtags.files GPATH GRTAGS GSYMS GTAGS + +# Dependencies. "builddeps" are dependencies required solely for building, +# "dependencies" includes also dependencies required for development. # Remember to keep these in sync with the DEPS file. -dependencies: +builddeps: svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \ --revision 1831 - svn checkout --force \ - https://src.chromium.org/chrome/trunk/deps/third_party/icu46 \ - third_party/icu --revision 258359 + if svn info third_party/icu 2>&1 | grep -q icu46 ; then \ + svn switch --force \ + https://src.chromium.org/chrome/trunk/deps/third_party/icu52 \ + third_party/icu --revision 277999 ; \ + else \ + svn checkout --force \ + https://src.chromium.org/chrome/trunk/deps/third_party/icu52 \ + third_party/icu --revision 277999 ; \ + fi + svn checkout --force http://googletest.googlecode.com/svn/trunk \ + testing/gtest --revision 692 + svn checkout --force http://googlemock.googlecode.com/svn/trunk \ + testing/gmock --revision 485 + +dependencies: builddeps + # The spec is a copy of the hooks in v8's DEPS file. + gclient sync -r fb782d4369d5ae04f17a2fceef7de5a63e50f07b --spec="solutions = [{u'managed': False, u'name': u'buildtools', u'url': u'https://chromium.googlesource.com/chromium/buildtools.git', u'custom_deps': {}, u'custom_hooks': [{u'name': u'clang_format_win',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=win32',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/win/clang-format.exe.sha1']},{u'name': u'clang_format_mac',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=darwin',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/mac/clang-format.sha1']},{u'name': u'clang_format_linux',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=linux*',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/linux64/clang-format.sha1']}],u'deps_file': u'.DEPS.git', u'safesync_url': u''}]" diff -Nru nodejs-0.11.13/deps/v8/Makefile.android nodejs-0.11.15/deps/v8/Makefile.android --- nodejs-0.11.13/deps/v8/Makefile.android 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/Makefile.android 2015-01-20 21:22:17.000000000 +0000 @@ -26,7 +26,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Those definitions should be consistent with the main Makefile -ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel +ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87 MODES = release debug # Generates all combinations of ANDROID ARCHES and MODES, @@ -47,20 +47,20 @@ endif ifeq ($(ARCH), android_arm) - DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm + DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm android_target_platform=14 DEFINES += arm_neon=0 arm_version=7 TOOLCHAIN_ARCH = arm-linux-androideabi TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH) - TOOLCHAIN_VER = 4.6 + TOOLCHAIN_VER = 4.8 else ifeq ($(ARCH), android_arm64) - DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 + DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=L TOOLCHAIN_ARCH = aarch64-linux-android TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH) - TOOLCHAIN_VER = 4.8 + TOOLCHAIN_VER = 4.9 else ifeq ($(ARCH), android_mipsel) - DEFINES = target_arch=mipsel v8_target_arch=mipsel + DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14 DEFINES += android_target_arch=mips mips_arch_variant=mips32r2 TOOLCHAIN_ARCH = mipsel-linux-android TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH) @@ -68,12 +68,19 @@ else ifeq ($(ARCH), android_ia32) - DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 + DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14 TOOLCHAIN_ARCH = x86 TOOLCHAIN_PREFIX = i686-linux-android TOOLCHAIN_VER = 4.6 else - $(error Target architecture "${ARCH}" is not supported) + ifeq ($(ARCH), android_x87) + DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14 + TOOLCHAIN_ARCH = x86 + TOOLCHAIN_PREFIX = i686-linux-android + TOOLCHAIN_VER = 4.6 + else + $(error Target architecture "${ARCH}" is not supported) + endif endif endif endif @@ -91,6 +98,7 @@ # For mksnapshot host generation. DEFINES += host_os=${HOST_OS} +DEFINES += OS=android .SECONDEXPANSION: $(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@ @@ -112,7 +120,7 @@ GYP_DEFINES="${DEFINES}" \ CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \ CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \ - PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \ + PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \ build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \ -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \ -S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS} diff -Nru nodejs-0.11.13/deps/v8/Makefile.nacl nodejs-0.11.15/deps/v8/Makefile.nacl --- nodejs-0.11.13/deps/v8/Makefile.nacl 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/Makefile.nacl 2015-01-20 21:22:17.000000000 +0000 @@ -77,6 +77,9 @@ # ICU doesn't support NaCl. GYPENV += v8_enable_i18n_support=0 +# Disable strict aliasing - v8 code often relies on undefined behavior of C++. +GYPENV += v8_no_strict_aliasing=1 + NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_BUILDS)) .SECONDEXPANSION: # For some reason the $$(basename $$@) expansion didn't work here... @@ -94,7 +97,7 @@ GYP_DEFINES="${GYPENV}" \ CC=${NACL_CC} \ CXX=${NACL_CXX} \ - PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \ + PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \ build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \ -Ibuild/standalone.gypi --depth=. \ -S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS) \ diff -Nru nodejs-0.11.13/deps/v8/OWNERS nodejs-0.11.15/deps/v8/OWNERS --- nodejs-0.11.13/deps/v8/OWNERS 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/OWNERS 2015-01-20 21:22:17.000000000 +0000 @@ -11,10 +11,12 @@ marja@chromium.org mstarzinger@chromium.org mvstanton@chromium.org +rmcilroy@chromium.org rossberg@chromium.org svenpanne@chromium.org titzer@chromium.org ulan@chromium.org vegorov@chromium.org verwaest@chromium.org +vogelheim@chromium.org yangguo@chromium.org diff -Nru nodejs-0.11.13/deps/v8/PRESUBMIT.py nodejs-0.11.15/deps/v8/PRESUBMIT.py --- nodejs-0.11.13/deps/v8/PRESUBMIT.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/PRESUBMIT.py 2015-01-20 21:22:17.000000000 +0000 @@ -31,6 +31,9 @@ for more details about the presubmit API built into gcl. """ +import sys + + def _V8PresubmitChecks(input_api, output_api): """Runs the V8 presubmit checks.""" import sys @@ -38,6 +41,8 @@ input_api.PresubmitLocalPath(), 'tools')) from presubmit import CppLintProcessor from presubmit import SourceProcessor + from presubmit import CheckGeneratedRuntimeTests + from presubmit import CheckExternalReferenceRegistration results = [] if not CppLintProcessor().Run(input_api.PresubmitLocalPath()): @@ -46,6 +51,65 @@ results.append(output_api.PresubmitError( "Copyright header, trailing whitespaces and two empty lines " \ "between declarations check failed")) + if not CheckGeneratedRuntimeTests(input_api.PresubmitLocalPath()): + results.append(output_api.PresubmitError( + "Generated runtime tests check failed")) + if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()): + results.append(output_api.PresubmitError( + "External references registration check failed")) + return results + + +def _CheckUnwantedDependencies(input_api, output_api): + """Runs checkdeps on #include statements added in this + change. Breaking - rules is an error, breaking ! rules is a + warning. + """ + # We need to wait until we have an input_api object and use this + # roundabout construct to import checkdeps because this file is + # eval-ed and thus doesn't have __file__. + original_sys_path = sys.path + try: + sys.path = sys.path + [input_api.os_path.join( + input_api.PresubmitLocalPath(), 'buildtools', 'checkdeps')] + import checkdeps + from cpp_checker import CppChecker + from rules import Rule + finally: + # Restore sys.path to what it was before. + sys.path = original_sys_path + + added_includes = [] + for f in input_api.AffectedFiles(): + if not CppChecker.IsCppFile(f.LocalPath()): + continue + + changed_lines = [line for line_num, line in f.ChangedContents()] + added_includes.append([f.LocalPath(), changed_lines]) + + deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath()) + + error_descriptions = [] + warning_descriptions = [] + for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes( + added_includes): + description_with_path = '%s\n %s' % (path, rule_description) + if rule_type == Rule.DISALLOW: + error_descriptions.append(description_with_path) + else: + warning_descriptions.append(description_with_path) + + results = [] + if error_descriptions: + results.append(output_api.PresubmitError( + 'You added one or more #includes that violate checkdeps rules.', + error_descriptions)) + if warning_descriptions: + results.append(output_api.PresubmitPromptOrNotify( + 'You added one or more #includes of files that are temporarily\n' + 'allowed but being removed. Can you avoid introducing the\n' + '#include? See relevant DEPS file(s) for details and contacts.', + warning_descriptions)) return results @@ -54,7 +118,10 @@ results = [] results.extend(input_api.canned_checks.CheckOwners( input_api, output_api, source_file_filter=None)) + results.extend(input_api.canned_checks.CheckPatchFormatted( + input_api, output_api)) results.extend(_V8PresubmitChecks(input_api, output_api)) + results.extend(_CheckUnwantedDependencies(input_api, output_api)) return results @@ -103,7 +170,16 @@ def GetPreferredTryMasters(project, change): return { 'tryserver.v8': { + 'v8_linux_rel': set(['defaulttests']), + 'v8_linux_dbg': set(['defaulttests']), + 'v8_linux_nosnap_rel': set(['defaulttests']), + 'v8_linux_nosnap_dbg': set(['defaulttests']), + 'v8_linux64_rel': set(['defaulttests']), + 'v8_linux_arm_dbg': set(['defaulttests']), + 'v8_linux_arm64_rel': set(['defaulttests']), + 'v8_linux_layout_dbg': set(['defaulttests']), 'v8_mac_rel': set(['defaulttests']), 'v8_win_rel': set(['defaulttests']), + 'v8_win64_rel': set(['defaulttests']), }, } diff -Nru nodejs-0.11.13/deps/v8/samples/lineprocessor.cc nodejs-0.11.15/deps/v8/samples/lineprocessor.cc --- nodejs-0.11.13/deps/v8/samples/lineprocessor.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/samples/lineprocessor.cc 2015-01-20 21:22:17.000000000 +0000 @@ -25,16 +25,15 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include <v8.h> +#include <include/v8.h> -#ifdef ENABLE_DEBUGGER_SUPPORT -#include <v8-debug.h> -#endif // ENABLE_DEBUGGER_SUPPORT +#include <include/libplatform/libplatform.h> +#include <include/v8-debug.h> #include <fcntl.h> -#include <string.h> #include <stdio.h> #include <stdlib.h> +#include <string.h> /** * This sample program should demonstrate certain aspects of debugging @@ -71,25 +70,6 @@ var res = line + " | " + line; print(res); } - - * - * When run with "-p" argument, the program starts V8 Debugger Agent and - * allows remote debugger to attach and debug JavaScript code. - * - * Interesting aspects: - * 1. Wait for remote debugger to attach - * Normally the program compiles custom script and immediately runs it. - * If programmer needs to debug script from the very beginning, he should - * run this sample program with "--wait-for-connection" command line parameter. - * This way V8 will suspend on the first statement and wait for - * debugger to attach. - * - * 2. Unresponsive V8 - * V8 Debugger Agent holds a connection with remote debugger, but it does - * respond only when V8 is running some script. In particular, when this program - * is waiting for input, all requests from debugger get deferred until V8 - * is called again. See how "--callback" command-line parameter in this sample - * fixes this issue. */ enum MainCycleType { @@ -109,47 +89,18 @@ bool report_exceptions); -#ifdef ENABLE_DEBUGGER_SUPPORT v8::Persistent<v8::Context> debug_message_context; -void DispatchDebugMessages() { - // We are in some random thread. We should already have v8::Locker acquired - // (we requested this when registered this callback). We was called - // because new debug messages arrived; they may have already been processed, - // but we shouldn't worry about this. - // - // All we have to do is to set context and call ProcessDebugMessages. - // - // We should decide which V8 context to use here. This is important for - // "evaluate" command, because it must be executed some context. - // In our sample we have only one context, so there is nothing really to - // think about. - v8::Isolate* isolate = v8::Isolate::GetCurrent(); - v8::HandleScope handle_scope(isolate); - v8::Local<v8::Context> context = - v8::Local<v8::Context>::New(isolate, debug_message_context); - v8::Context::Scope scope(context); - - v8::Debug::ProcessDebugMessages(); -} -#endif // ENABLE_DEBUGGER_SUPPORT - - int RunMain(int argc, char* argv[]) { v8::V8::SetFlagsFromCommandLine(&argc, argv, true); - v8::Isolate* isolate = v8::Isolate::GetCurrent(); + v8::Isolate* isolate = v8::Isolate::New(); + v8::Isolate::Scope isolate_scope(isolate); v8::HandleScope handle_scope(isolate); v8::Handle<v8::String> script_source; v8::Handle<v8::Value> script_name; int script_param_counter = 0; -#ifdef ENABLE_DEBUGGER_SUPPORT - int port_number = -1; - bool wait_for_connection = false; - bool support_callback = false; -#endif // ENABLE_DEBUGGER_SUPPORT - MainCycleType cycle_type = CycleInCpp; for (int i = 1; i < argc; i++) { @@ -162,15 +113,6 @@ cycle_type = CycleInCpp; } else if (strcmp(str, "--main-cycle-in-js") == 0) { cycle_type = CycleInJs; -#ifdef ENABLE_DEBUGGER_SUPPORT - } else if (strcmp(str, "--callback") == 0) { - support_callback = true; - } else if (strcmp(str, "--wait-for-connection") == 0) { - wait_for_connection = true; - } else if (strcmp(str, "-p") == 0 && i + 1 < argc) { - port_number = atoi(argv[i + 1]); // NOLINT - i++; -#endif // ENABLE_DEBUGGER_SUPPORT } else if (strncmp(str, "--", 2) == 0) { printf("Warning: unknown flag %s.\nTry --help for options\n", str); } else if (strcmp(str, "-e") == 0 && i + 1 < argc) { @@ -218,20 +160,8 @@ // Enter the newly created execution environment. v8::Context::Scope context_scope(context); -#ifdef ENABLE_DEBUGGER_SUPPORT debug_message_context.Reset(isolate, context); - v8::Locker locker(isolate); - - if (support_callback) { - v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true); - } - - if (port_number != -1) { - v8::Debug::EnableAgent("lineprocessor", port_number, wait_for_connection); - } -#endif // ENABLE_DEBUGGER_SUPPORT - bool report_exceptions = true; v8::Handle<v8::Script> script; @@ -275,9 +205,6 @@ v8::Local<v8::Context> context, bool report_exceptions) { v8::Isolate* isolate = context->GetIsolate(); -#ifdef ENABLE_DEBUGGER_SUPPORT - v8::Locker lock(isolate); -#endif // ENABLE_DEBUGGER_SUPPORT v8::Handle<v8::String> fun_name = v8::String::NewFromUtf8(isolate, "ProcessLine"); @@ -328,8 +255,12 @@ int main(int argc, char* argv[]) { v8::V8::InitializeICU(); + v8::Platform* platform = v8::platform::CreateDefaultPlatform(); + v8::V8::InitializePlatform(platform); int result = RunMain(argc, argv); v8::V8::Dispose(); + v8::V8::ShutdownPlatform(); + delete platform; return result; } @@ -374,7 +305,7 @@ printf("%s\n", exception_string); } else { // Print (filename):(line number): (message). - v8::String::Utf8Value filename(message->GetScriptResourceName()); + v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName()); const char* filename_string = ToCString(filename); int linenum = message->GetLineNumber(); printf("%s:%i: %s\n", filename_string, linenum, exception_string); @@ -435,9 +366,6 @@ char* res; { -#ifdef ENABLE_DEBUGGER_SUPPORT - v8::Unlocker unlocker(v8::Isolate::GetCurrent()); -#endif // ENABLE_DEBUGGER_SUPPORT res = fgets(buffer, kBufferSize, stdin); } v8::Isolate* isolate = v8::Isolate::GetCurrent(); diff -Nru nodejs-0.11.13/deps/v8/samples/process.cc nodejs-0.11.15/deps/v8/samples/process.cc --- nodejs-0.11.13/deps/v8/samples/process.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/samples/process.cc 2015-01-20 21:22:17.000000000 +0000 @@ -25,10 +25,12 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include <v8.h> +#include <include/v8.h> + +#include <include/libplatform/libplatform.h> -#include <string> #include <map> +#include <string> #ifdef COMPRESS_STARTUP_DATA_BZ2 #error Using compressed startup data is not supported for this sample @@ -574,7 +576,7 @@ void ParseOptions(int argc, char* argv[], - map<string, string>& options, + map<string, string>* options, string* file) { for (int i = 1; i < argc; i++) { string arg = argv[i]; @@ -584,7 +586,7 @@ } else { string key = arg.substr(0, index); string value = arg.substr(index+1); - options[key] = value; + (*options)[key] = value; } } } @@ -644,14 +646,17 @@ int main(int argc, char* argv[]) { v8::V8::InitializeICU(); + v8::Platform* platform = v8::platform::CreateDefaultPlatform(); + v8::V8::InitializePlatform(platform); map<string, string> options; string file; - ParseOptions(argc, argv, options, &file); + ParseOptions(argc, argv, &options, &file); if (file.empty()) { fprintf(stderr, "No script was specified.\n"); return 1; } - Isolate* isolate = Isolate::GetCurrent(); + Isolate* isolate = Isolate::New(); + Isolate::Scope isolate_scope(isolate); HandleScope scope(isolate); Handle<String> source = ReadFile(isolate, file); if (source.IsEmpty()) { diff -Nru nodejs-0.11.13/deps/v8/samples/samples.gyp nodejs-0.11.15/deps/v8/samples/samples.gyp --- nodejs-0.11.13/deps/v8/samples/samples.gyp 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/samples/samples.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -35,9 +35,10 @@ 'type': 'executable', 'dependencies': [ '../tools/gyp/v8.gyp:v8', + '../tools/gyp/v8.gyp:v8_libplatform', ], 'include_dirs': [ - '../include', + '..', ], 'conditions': [ ['v8_enable_i18n_support==1', { diff -Nru nodejs-0.11.13/deps/v8/samples/shell.cc nodejs-0.11.15/deps/v8/samples/shell.cc --- nodejs-0.11.13/deps/v8/samples/shell.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/samples/shell.cc 2015-01-20 21:22:17.000000000 +0000 @@ -25,12 +25,15 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include <v8.h> +#include <include/v8.h> + +#include <include/libplatform/libplatform.h> + #include <assert.h> #include <fcntl.h> -#include <string.h> #include <stdio.h> #include <stdlib.h> +#include <string.h> #ifdef COMPRESS_STARTUP_DATA_BZ2 #error Using compressed startup data is not supported for this sample @@ -65,25 +68,42 @@ static bool run_shell; +class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator { + public: + virtual void* Allocate(size_t length) { + void* data = AllocateUninitialized(length); + return data == NULL ? data : memset(data, 0, length); + } + virtual void* AllocateUninitialized(size_t length) { return malloc(length); } + virtual void Free(void* data, size_t) { free(data); } +}; + + int main(int argc, char* argv[]) { v8::V8::InitializeICU(); + v8::Platform* platform = v8::platform::CreateDefaultPlatform(); + v8::V8::InitializePlatform(platform); v8::V8::SetFlagsFromCommandLine(&argc, argv, true); - v8::Isolate* isolate = v8::Isolate::GetCurrent(); + ShellArrayBufferAllocator array_buffer_allocator; + v8::V8::SetArrayBufferAllocator(&array_buffer_allocator); + v8::Isolate* isolate = v8::Isolate::New(); run_shell = (argc == 1); int result; { + v8::Isolate::Scope isolate_scope(isolate); v8::HandleScope handle_scope(isolate); v8::Handle<v8::Context> context = CreateShellContext(isolate); if (context.IsEmpty()) { fprintf(stderr, "Error creating context\n"); return 1; } - context->Enter(); + v8::Context::Scope context_scope(context); result = RunMain(isolate, argc, argv); if (run_shell) RunShell(context); - context->Exit(); } v8::V8::Dispose(); + v8::V8::ShutdownPlatform(); + delete platform; return result; } @@ -345,7 +365,7 @@ fprintf(stderr, "%s\n", exception_string); } else { // Print (filename):(line number): (message). - v8::String::Utf8Value filename(message->GetScriptResourceName()); + v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName()); const char* filename_string = ToCString(filename); int linenum = message->GetLineNumber(); fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string); diff -Nru nodejs-0.11.13/deps/v8/src/accessors.cc nodejs-0.11.15/deps/v8/src/accessors.cc --- nodejs-0.11.13/deps/v8/src/accessors.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/accessors.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,89 +1,77 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" -#include "accessors.h" - -#include "compiler.h" -#include "contexts.h" -#include "deoptimizer.h" -#include "execution.h" -#include "factory.h" -#include "frames-inl.h" -#include "isolate.h" -#include "list-inl.h" -#include "property-details.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -namespace v8 { -namespace internal { +#include "src/v8.h" +#include "src/accessors.h" +#include "src/api.h" +#include "src/compiler.h" +#include "src/contexts.h" +#include "src/deoptimizer.h" +#include "src/execution.h" +#include "src/factory.h" +#include "src/frames-inl.h" +#include "src/isolate.h" +#include "src/list-inl.h" +#include "src/property-details.h" +#include "src/prototype.h" -template <class C> -static C* FindInstanceOf(Isolate* isolate, Object* obj) { - for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype(isolate)) { - if (Is<C>(cur)) return C::cast(cur); - } - return NULL; -} +namespace v8 { +namespace internal { -// Entry point that never should be called. -MaybeObject* Accessors::IllegalSetter(Isolate* isolate, - JSObject*, - Object*, - void*) { - UNREACHABLE(); - return NULL; +Handle<AccessorInfo> Accessors::MakeAccessor( + Isolate* isolate, + Handle<String> name, + AccessorGetterCallback getter, + AccessorSetterCallback setter, + PropertyAttributes attributes) { + Factory* factory = isolate->factory(); + Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo(); + info->set_property_attributes(attributes); + info->set_all_can_read(false); + info->set_all_can_write(false); + info->set_name(*name); + Handle<Object> get = v8::FromCData(isolate, getter); + Handle<Object> set = v8::FromCData(isolate, setter); + info->set_getter(*get); + info->set_setter(*set); + return info; } -Object* Accessors::IllegalGetAccessor(Isolate* isolate, - Object* object, - void*) { - UNREACHABLE(); - return object; +Handle<ExecutableAccessorInfo> Accessors::CloneAccessor( + Isolate* isolate, + Handle<ExecutableAccessorInfo> accessor) { + Factory* factory = isolate->factory(); + Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo(); + info->set_name(accessor->name()); + info->set_flag(accessor->flag()); + info->set_expected_receiver_type(accessor->expected_receiver_type()); + info->set_getter(accessor->getter()); + info->set_setter(accessor->setter()); + info->set_data(accessor->data()); + return info; } -MaybeObject* Accessors::ReadOnlySetAccessor(Isolate* isolate, - JSObject*, - Object* value, - void*) { - // According to ECMA-262, section 8.6.2.2, page 28, setting - // read-only properties must be silently ignored. - return value; +template <class C> +static C* FindInstanceOf(Isolate* isolate, Object* obj) { + for (PrototypeIterator iter(isolate, obj, + PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(); iter.Advance()) { + if (Is<C>(iter.GetCurrent())) return C::cast(iter.GetCurrent()); + } + return NULL; } -static V8_INLINE bool CheckForName(Handle<String> name, - String* property_name, +static V8_INLINE bool CheckForName(Handle<Name> name, + Handle<String> property_name, int offset, int* object_offset) { - if (name->Equals(property_name)) { + if (Name::Equals(name, property_name)) { *object_offset = offset; return true; } @@ -95,40 +83,40 @@ // If true, *object_offset contains offset of object field. template <class T> bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type, - Handle<String> name, + Handle<Name> name, int* object_offset) { Isolate* isolate = name->GetIsolate(); if (type->Is(T::String())) { - return CheckForName(name, isolate->heap()->length_string(), + return CheckForName(name, isolate->factory()->length_string(), String::kLengthOffset, object_offset); } if (!type->IsClass()) return false; - Handle<Map> map = type->AsClass(); + Handle<Map> map = type->AsClass()->Map(); switch (map->instance_type()) { case JS_ARRAY_TYPE: return - CheckForName(name, isolate->heap()->length_string(), + CheckForName(name, isolate->factory()->length_string(), JSArray::kLengthOffset, object_offset); case JS_TYPED_ARRAY_TYPE: return - CheckForName(name, isolate->heap()->length_string(), + CheckForName(name, isolate->factory()->length_string(), JSTypedArray::kLengthOffset, object_offset) || - CheckForName(name, isolate->heap()->byte_length_string(), + CheckForName(name, isolate->factory()->byte_length_string(), JSTypedArray::kByteLengthOffset, object_offset) || - CheckForName(name, isolate->heap()->byte_offset_string(), + CheckForName(name, isolate->factory()->byte_offset_string(), JSTypedArray::kByteOffsetOffset, object_offset); case JS_ARRAY_BUFFER_TYPE: return - CheckForName(name, isolate->heap()->byte_length_string(), + CheckForName(name, isolate->factory()->byte_length_string(), JSArrayBuffer::kByteLengthOffset, object_offset); case JS_DATA_VIEW_TYPE: return - CheckForName(name, isolate->heap()->byte_length_string(), + CheckForName(name, isolate->factory()->byte_length_string(), JSDataView::kByteLengthOffset, object_offset) || - CheckForName(name, isolate->heap()->byte_offset_string(), + CheckForName(name, isolate->factory()->byte_offset_string(), JSDataView::kByteOffsetOffset, object_offset); default: return false; @@ -138,39 +126,48 @@ template bool Accessors::IsJSObjectFieldAccessor<Type>(Type* type, - Handle<String> name, + Handle<Name> name, int* object_offset); template bool Accessors::IsJSObjectFieldAccessor<HeapType>(Handle<HeapType> type, - Handle<String> name, + Handle<Name> name, int* object_offset); +bool SetPropertyOnInstanceIfInherited( + Isolate* isolate, const v8::PropertyCallbackInfo<void>& info, + v8::Local<v8::String> name, Handle<Object> value) { + Handle<Object> holder = Utils::OpenHandle(*info.Holder()); + Handle<Object> receiver = Utils::OpenHandle(*info.This()); + if (*holder == *receiver) return false; + if (receiver->IsJSObject()) { + Handle<JSObject> object = Handle<JSObject>::cast(receiver); + // This behaves sloppy since we lost the actual strict-mode. + // TODO(verwaest): Fix by making ExecutableAccessorInfo behave like data + // properties. + if (!object->map()->is_extensible()) return true; + JSObject::SetOwnPropertyIgnoreAttributes(object, Utils::OpenHandle(*name), + value, NONE).Check(); + } + return true; +} + + // // Accessors::ArrayLength // -MaybeObject* Accessors::ArrayGetLength(Isolate* isolate, - Object* object, - void*) { - // Traverse the prototype chain until we reach an array. - JSArray* holder = FindInstanceOf<JSArray>(isolate, object); - return holder == NULL ? Smi::FromInt(0) : holder->length(); -} - - // The helper function will 'flatten' Number objects. Handle<Object> Accessors::FlattenNumber(Isolate* isolate, Handle<Object> value) { if (value->IsNumber() || !value->IsJSValue()) return value; Handle<JSValue> wrapper = Handle<JSValue>::cast(value); - ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()-> + DCHECK(wrapper->GetIsolate()->native_context()->number_function()-> has_initial_map()); - if (wrapper->map() == - isolate->context()->native_context()->number_function()->initial_map()) { + if (wrapper->map() == isolate->number_function()->initial_map()) { return handle(wrapper->value(), isolate); } @@ -178,174 +175,294 @@ } -MaybeObject* Accessors::ArraySetLength(Isolate* isolate, - JSObject* object_raw, - Object* value_raw, - void*) { - HandleScope scope(isolate); - Handle<JSObject> object(object_raw, isolate); - Handle<Object> value(value_raw, isolate); - - // This means one of the object's prototypes is a JSArray and the - // object does not have a 'length' property. Calling SetProperty - // causes an infinite loop. - if (!object->IsJSArray()) { - Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object, - isolate->factory()->length_string(), value, NONE); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; +void Accessors::ArrayLengthGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + JSArray* holder = JSArray::cast(*Utils::OpenHandle(*info.Holder())); + Object* result = holder->length(); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate))); +} + + +void Accessors::ArrayLengthSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> val, + const v8::PropertyCallbackInfo<void>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + HandleScope scope(isolate); + Handle<JSObject> object = Utils::OpenHandle(*info.This()); + Handle<Object> value = Utils::OpenHandle(*val); + if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) { + return; } value = FlattenNumber(isolate, value); Handle<JSArray> array_handle = Handle<JSArray>::cast(object); - - bool has_exception; - Handle<Object> uint32_v = - Execution::ToUint32(isolate, value, &has_exception); - if (has_exception) return Failure::Exception(); - Handle<Object> number_v = - Execution::ToNumber(isolate, value, &has_exception); - if (has_exception) return Failure::Exception(); + MaybeHandle<Object> maybe; + Handle<Object> uint32_v; + maybe = Execution::ToUint32(isolate, value); + if (!maybe.ToHandle(&uint32_v)) { + isolate->OptionalRescheduleException(false); + return; + } + Handle<Object> number_v; + maybe = Execution::ToNumber(isolate, value); + if (!maybe.ToHandle(&number_v)) { + isolate->OptionalRescheduleException(false); + return; + } if (uint32_v->Number() == number_v->Number()) { - Handle<Object> result = JSArray::SetElementsLength(array_handle, uint32_v); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; + maybe = JSArray::SetElementsLength(array_handle, uint32_v); + maybe.Check(); + return; } - return isolate->Throw( + + isolate->ScheduleThrow( *isolate->factory()->NewRangeError("invalid_array_length", HandleVector<Object>(NULL, 0))); } -const AccessorDescriptor Accessors::ArrayLength = { - ArrayGetLength, - ArraySetLength, - 0 -}; +Handle<AccessorInfo> Accessors::ArrayLengthInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->length_string(), + &ArrayLengthGetter, + &ArrayLengthSetter, + attributes); +} + // // Accessors::StringLength // +void Accessors::StringLengthGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + + // We have a slight impedance mismatch between the external API and the way we + // use callbacks internally: Externally, callbacks can only be used with + // v8::Object, but internally we have callbacks on entities which are higher + // in the hierarchy, in this case for String values. -MaybeObject* Accessors::StringGetLength(Isolate* isolate, - Object* object, - void*) { - Object* value = object; - if (object->IsJSValue()) value = JSValue::cast(object)->value(); - if (value->IsString()) return Smi::FromInt(String::cast(value)->length()); - // If object is not a string we return 0 to be compatible with WebKit. - // Note: Firefox returns the length of ToString(object). - return Smi::FromInt(0); + Object* value = *Utils::OpenHandle(*v8::Local<v8::Value>(info.This())); + if (!value->IsString()) { + // Not a string value. That means that we either got a String wrapper or + // a Value with a String wrapper in its prototype chain. + value = JSValue::cast(*Utils::OpenHandle(*info.Holder()))->value(); + } + Object* result = Smi::FromInt(String::cast(value)->length()); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate))); } -const AccessorDescriptor Accessors::StringLength = { - StringGetLength, - IllegalSetter, - 0 -}; +void Accessors::StringLengthSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::StringLengthInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->length_string(), + &StringLengthGetter, + &StringLengthSetter, + attributes); +} // -// Accessors::ScriptSource +// Accessors::ScriptColumnOffset // -MaybeObject* Accessors::ScriptGetSource(Isolate* isolate, - Object* object, - void*) { - Object* script = JSValue::cast(object)->value(); - return Script::cast(script)->source(); +void Accessors::ScriptColumnOffsetGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + Object* object = *Utils::OpenHandle(*info.This()); + Object* res = Script::cast(JSValue::cast(object)->value())->column_offset(); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate))); } -const AccessorDescriptor Accessors::ScriptSource = { - ScriptGetSource, - IllegalSetter, - 0 -}; +void Accessors::ScriptColumnOffsetSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo( + Isolate* isolate, PropertyAttributes attributes) { + Handle<String> name(isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("column_offset"))); + return MakeAccessor(isolate, + name, + &ScriptColumnOffsetGetter, + &ScriptColumnOffsetSetter, + attributes); +} // -// Accessors::ScriptName +// Accessors::ScriptId // -MaybeObject* Accessors::ScriptGetName(Isolate* isolate, - Object* object, - void*) { - Object* script = JSValue::cast(object)->value(); - return Script::cast(script)->name(); +void Accessors::ScriptIdGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + Object* object = *Utils::OpenHandle(*info.This()); + Object* id = Script::cast(JSValue::cast(object)->value())->id(); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(id, isolate))); } -const AccessorDescriptor Accessors::ScriptName = { - ScriptGetName, - IllegalSetter, - 0 -}; +void Accessors::ScriptIdSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptIdInfo( + Isolate* isolate, PropertyAttributes attributes) { + Handle<String> name(isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("id"))); + return MakeAccessor(isolate, + name, + &ScriptIdGetter, + &ScriptIdSetter, + attributes); +} // -// Accessors::ScriptId +// Accessors::ScriptName // -MaybeObject* Accessors::ScriptGetId(Isolate* isolate, Object* object, void*) { - Object* script = JSValue::cast(object)->value(); - return Script::cast(script)->id(); +void Accessors::ScriptNameGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + Object* object = *Utils::OpenHandle(*info.This()); + Object* source = Script::cast(JSValue::cast(object)->value())->name(); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate))); } -const AccessorDescriptor Accessors::ScriptId = { - ScriptGetId, - IllegalSetter, - 0 -}; +void Accessors::ScriptNameSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptNameInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->name_string(), + &ScriptNameGetter, + &ScriptNameSetter, + attributes); +} // -// Accessors::ScriptLineOffset +// Accessors::ScriptSource // -MaybeObject* Accessors::ScriptGetLineOffset(Isolate* isolate, - Object* object, - void*) { - Object* script = JSValue::cast(object)->value(); - return Script::cast(script)->line_offset(); +void Accessors::ScriptSourceGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + Object* object = *Utils::OpenHandle(*info.This()); + Object* source = Script::cast(JSValue::cast(object)->value())->source(); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate))); } -const AccessorDescriptor Accessors::ScriptLineOffset = { - ScriptGetLineOffset, - IllegalSetter, - 0 -}; +void Accessors::ScriptSourceSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptSourceInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->source_string(), + &ScriptSourceGetter, + &ScriptSourceSetter, + attributes); +} // -// Accessors::ScriptColumnOffset +// Accessors::ScriptLineOffset // -MaybeObject* Accessors::ScriptGetColumnOffset(Isolate* isolate, - Object* object, - void*) { - Object* script = JSValue::cast(object)->value(); - return Script::cast(script)->column_offset(); +void Accessors::ScriptLineOffsetGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + Object* object = *Utils::OpenHandle(*info.This()); + Object* res = Script::cast(JSValue::cast(object)->value())->line_offset(); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate))); } -const AccessorDescriptor Accessors::ScriptColumnOffset = { - ScriptGetColumnOffset, - IllegalSetter, - 0 -}; +void Accessors::ScriptLineOffsetSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo( + Isolate* isolate, PropertyAttributes attributes) { + Handle<String> name(isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("line_offset"))); + return MakeAccessor(isolate, + name, + &ScriptLineOffsetGetter, + &ScriptLineOffsetSetter, + attributes); +} // @@ -353,19 +470,36 @@ // -MaybeObject* Accessors::ScriptGetType(Isolate* isolate, - Object* object, - void*) { - Object* script = JSValue::cast(object)->value(); - return Script::cast(script)->type(); +void Accessors::ScriptTypeGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + Object* object = *Utils::OpenHandle(*info.This()); + Object* res = Script::cast(JSValue::cast(object)->value())->type(); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate))); } -const AccessorDescriptor Accessors::ScriptType = { - ScriptGetType, - IllegalSetter, - 0 -}; +void Accessors::ScriptTypeSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptTypeInfo( + Isolate* isolate, PropertyAttributes attributes) { + Handle<String> name(isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("type"))); + return MakeAccessor(isolate, + name, + &ScriptTypeGetter, + &ScriptTypeSetter, + attributes); +} // @@ -373,19 +507,37 @@ // -MaybeObject* Accessors::ScriptGetCompilationType(Isolate* isolate, - Object* object, - void*) { - Object* script = JSValue::cast(object)->value(); - return Smi::FromInt(Script::cast(script)->compilation_type()); +void Accessors::ScriptCompilationTypeGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + Object* object = *Utils::OpenHandle(*info.This()); + Object* res = Smi::FromInt( + Script::cast(JSValue::cast(object)->value())->compilation_type()); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate))); } -const AccessorDescriptor Accessors::ScriptCompilationType = { - ScriptGetCompilationType, - IllegalSetter, - 0 -}; +void Accessors::ScriptCompilationTypeSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo( + Isolate* isolate, PropertyAttributes attributes) { + Handle<String> name(isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("compilation_type"))); + return MakeAccessor(isolate, + name, + &ScriptCompilationTypeGetter, + &ScriptCompilationTypeSetter, + attributes); +} // @@ -393,29 +545,115 @@ // -MaybeObject* Accessors::ScriptGetLineEnds(Isolate* isolate, - Object* object, - void*) { - JSValue* wrapper = JSValue::cast(object); - HandleScope scope(isolate); - Handle<Script> script(Script::cast(wrapper->value()), isolate); - InitScriptLineEnds(script); - ASSERT(script->line_ends()->IsFixedArray()); +void Accessors::ScriptLineEndsGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + HandleScope scope(isolate); + Handle<Object> object = Utils::OpenHandle(*info.This()); + Handle<Script> script( + Script::cast(Handle<JSValue>::cast(object)->value()), isolate); + Script::InitLineEnds(script); + DCHECK(script->line_ends()->IsFixedArray()); Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends())); // We do not want anyone to modify this array from JS. - ASSERT(*line_ends == isolate->heap()->empty_fixed_array() || + DCHECK(*line_ends == isolate->heap()->empty_fixed_array() || line_ends->map() == isolate->heap()->fixed_cow_array_map()); Handle<JSArray> js_array = isolate->factory()->NewJSArrayWithElements(line_ends); - return *js_array; + info.GetReturnValue().Set(Utils::ToLocal(js_array)); } -const AccessorDescriptor Accessors::ScriptLineEnds = { - ScriptGetLineEnds, - IllegalSetter, - 0 -}; +void Accessors::ScriptLineEndsSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptLineEndsInfo( + Isolate* isolate, PropertyAttributes attributes) { + Handle<String> name(isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("line_ends"))); + return MakeAccessor(isolate, + name, + &ScriptLineEndsGetter, + &ScriptLineEndsSetter, + attributes); +} + + +// +// Accessors::ScriptSourceUrl +// + + +void Accessors::ScriptSourceUrlGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + Object* object = *Utils::OpenHandle(*info.This()); + Object* url = Script::cast(JSValue::cast(object)->value())->source_url(); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate))); +} + + +void Accessors::ScriptSourceUrlSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptSourceUrlInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->source_url_string(), + &ScriptSourceUrlGetter, + &ScriptSourceUrlSetter, + attributes); +} + + +// +// Accessors::ScriptSourceMappingUrl +// + + +void Accessors::ScriptSourceMappingUrlGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + Object* object = *Utils::OpenHandle(*info.This()); + Object* url = + Script::cast(JSValue::cast(object)->value())->source_mapping_url(); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate))); +} + + +void Accessors::ScriptSourceMappingUrlSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->source_mapping_url_string(), + &ScriptSourceMappingUrlGetter, + &ScriptSourceMappingUrlSetter, + attributes); +} // @@ -423,19 +661,36 @@ // -MaybeObject* Accessors::ScriptGetContextData(Isolate* isolate, - Object* object, - void*) { - Object* script = JSValue::cast(object)->value(); - return Script::cast(script)->context_data(); +void Accessors::ScriptContextDataGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + DisallowHeapAllocation no_allocation; + HandleScope scope(isolate); + Object* object = *Utils::OpenHandle(*info.This()); + Object* res = Script::cast(JSValue::cast(object)->value())->context_data(); + info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate))); } -const AccessorDescriptor Accessors::ScriptContextData = { - ScriptGetContextData, - IllegalSetter, - 0 -}; +void Accessors::ScriptContextDataSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptContextDataInfo( + Isolate* isolate, PropertyAttributes attributes) { + Handle<String> name(isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("context_data"))); + return MakeAccessor(isolate, + name, + &ScriptContextDataGetter, + &ScriptContextDataSetter, + attributes); +} // @@ -443,28 +698,46 @@ // -MaybeObject* Accessors::ScriptGetEvalFromScript(Isolate* isolate, - Object* object, - void*) { - Object* script = JSValue::cast(object)->value(); - if (!Script::cast(script)->eval_from_shared()->IsUndefined()) { +void Accessors::ScriptEvalFromScriptGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + HandleScope scope(isolate); + Handle<Object> object = Utils::OpenHandle(*info.This()); + Handle<Script> script( + Script::cast(Handle<JSValue>::cast(object)->value()), isolate); + Handle<Object> result = isolate->factory()->undefined_value(); + if (!script->eval_from_shared()->IsUndefined()) { Handle<SharedFunctionInfo> eval_from_shared( - SharedFunctionInfo::cast(Script::cast(script)->eval_from_shared())); - + SharedFunctionInfo::cast(script->eval_from_shared())); if (eval_from_shared->script()->IsScript()) { Handle<Script> eval_from_script(Script::cast(eval_from_shared->script())); - return *GetScriptWrapper(eval_from_script); + result = Script::GetWrapper(eval_from_script); } } - return isolate->heap()->undefined_value(); + + info.GetReturnValue().Set(Utils::ToLocal(result)); } -const AccessorDescriptor Accessors::ScriptEvalFromScript = { - ScriptGetEvalFromScript, - IllegalSetter, - 0 -}; +void Accessors::ScriptEvalFromScriptSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo( + Isolate* isolate, PropertyAttributes attributes) { + Handle<String> name(isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("eval_from_script"))); + return MakeAccessor(isolate, + name, + &ScriptEvalFromScriptGetter, + &ScriptEvalFromScriptSetter, + attributes); +} // @@ -472,32 +745,45 @@ // -MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Isolate* isolate, - Object* object, - void*) { - Script* raw_script = Script::cast(JSValue::cast(object)->value()); +void Accessors::ScriptEvalFromScriptPositionGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); HandleScope scope(isolate); - Handle<Script> script(raw_script); - - // If this is not a script compiled through eval there is no eval position. - if (script->compilation_type() != Script::COMPILATION_TYPE_EVAL) { - return script->GetHeap()->undefined_value(); + Handle<Object> object = Utils::OpenHandle(*info.This()); + Handle<Script> script( + Script::cast(Handle<JSValue>::cast(object)->value()), isolate); + Handle<Object> result = isolate->factory()->undefined_value(); + if (script->compilation_type() == Script::COMPILATION_TYPE_EVAL) { + Handle<Code> code(SharedFunctionInfo::cast( + script->eval_from_shared())->code()); + result = Handle<Object>( + Smi::FromInt(code->SourcePosition(code->instruction_start() + + script->eval_from_instructions_offset()->value())), + isolate); } + info.GetReturnValue().Set(Utils::ToLocal(result)); +} + - // Get the function from where eval was called and find the source position - // from the instruction offset. - Handle<Code> code(SharedFunctionInfo::cast( - script->eval_from_shared())->code()); - return Smi::FromInt(code->SourcePosition(code->instruction_start() + - script->eval_from_instructions_offset()->value())); +void Accessors::ScriptEvalFromScriptPositionSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); } -const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = { - ScriptGetEvalFromScriptPosition, - IllegalSetter, - 0 -}; +Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo( + Isolate* isolate, PropertyAttributes attributes) { + Handle<String> name(isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("eval_from_script_position"))); + return MakeAccessor(isolate, + name, + &ScriptEvalFromScriptPositionGetter, + &ScriptEvalFromScriptPositionSetter, + attributes); +} // @@ -505,100 +791,66 @@ // -MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Isolate* isolate, - Object* object, - void*) { - Object* script = JSValue::cast(object)->value(); - Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast( - Script::cast(script)->eval_from_shared())); - - +void Accessors::ScriptEvalFromFunctionNameGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + HandleScope scope(isolate); + Handle<Object> object = Utils::OpenHandle(*info.This()); + Handle<Script> script( + Script::cast(Handle<JSValue>::cast(object)->value()), isolate); + Handle<Object> result; + Handle<SharedFunctionInfo> shared( + SharedFunctionInfo::cast(script->eval_from_shared())); // Find the name of the function calling eval. if (!shared->name()->IsUndefined()) { - return shared->name(); + result = Handle<Object>(shared->name(), isolate); } else { - return shared->inferred_name(); + result = Handle<Object>(shared->inferred_name(), isolate); } + info.GetReturnValue().Set(Utils::ToLocal(result)); } -const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = { - ScriptGetEvalFromFunctionName, - IllegalSetter, - 0 -}; - - -// -// Accessors::FunctionPrototype -// - - -Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) { - CALL_HEAP_FUNCTION(function->GetIsolate(), - Accessors::FunctionGetPrototype(function->GetIsolate(), - *function, - NULL), - Object); +void Accessors::ScriptEvalFromFunctionNameSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> value, + const v8::PropertyCallbackInfo<void>& info) { + UNREACHABLE(); } -Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function, - Handle<Object> prototype) { - ASSERT(function->should_have_prototype()); - CALL_HEAP_FUNCTION(function->GetIsolate(), - Accessors::FunctionSetPrototype(function->GetIsolate(), - *function, - *prototype, - NULL), - Object); +Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo( + Isolate* isolate, PropertyAttributes attributes) { + Handle<String> name(isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("eval_from_function_name"))); + return MakeAccessor(isolate, + name, + &ScriptEvalFromFunctionNameGetter, + &ScriptEvalFromFunctionNameSetter, + attributes); } -MaybeObject* Accessors::FunctionGetPrototype(Isolate* isolate, - Object* object, - void*) { - JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object); - if (function_raw == NULL) return isolate->heap()->undefined_value(); - while (!function_raw->should_have_prototype()) { - function_raw = FindInstanceOf<JSFunction>(isolate, - function_raw->GetPrototype()); - // There has to be one because we hit the getter. - ASSERT(function_raw != NULL); - } - - if (!function_raw->has_prototype()) { - HandleScope scope(isolate); - Handle<JSFunction> function(function_raw); +// +// Accessors::FunctionPrototype +// + +static Handle<Object> GetFunctionPrototype(Isolate* isolate, + Handle<JSFunction> function) { + if (!function->has_prototype()) { Handle<Object> proto = isolate->factory()->NewFunctionPrototype(function); JSFunction::SetPrototype(function, proto); - function_raw = *function; } - return function_raw->prototype(); + return Handle<Object>(function->prototype(), isolate); } -MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate, - JSObject* object_raw, - Object* value_raw, - void*) { - JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object_raw); - if (function_raw == NULL) return isolate->heap()->undefined_value(); - - HandleScope scope(isolate); - Handle<JSFunction> function(function_raw, isolate); - Handle<JSObject> object(object_raw, isolate); - Handle<Object> value(value_raw, isolate); - if (!function->should_have_prototype()) { - // Since we hit this accessor, object will have no prototype property. - Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object, - isolate->factory()->prototype_string(), value, NONE); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; - } - +static Handle<Object> SetFunctionPrototype(Isolate* isolate, + Handle<JSFunction> function, + Handle<Object> value) { Handle<Object> old_value; - bool is_observed = *function == *object && function->map()->is_observed(); + bool is_observed = function->map()->is_observed(); if (is_observed) { if (function->has_prototype()) old_value = handle(function->prototype(), isolate); @@ -607,22 +859,66 @@ } JSFunction::SetPrototype(function, value); - ASSERT(function->prototype() == *value); + DCHECK(function->prototype() == *value); if (is_observed && !old_value->SameValue(*value)) { JSObject::EnqueueChangeRecord( function, "update", isolate->factory()->prototype_string(), old_value); } - return *function; + return function; } -const AccessorDescriptor Accessors::FunctionPrototype = { - FunctionGetPrototype, - FunctionSetPrototype, - 0 -}; +Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) { + return GetFunctionPrototype(function->GetIsolate(), function); +} + + +Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function, + Handle<Object> prototype) { + DCHECK(function->should_have_prototype()); + Isolate* isolate = function->GetIsolate(); + return SetFunctionPrototype(isolate, function, prototype); +} + + +void Accessors::FunctionPrototypeGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + HandleScope scope(isolate); + Handle<JSFunction> function = + Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder())); + Handle<Object> result = GetFunctionPrototype(isolate, function); + info.GetReturnValue().Set(Utils::ToLocal(result)); +} + + +void Accessors::FunctionPrototypeSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> val, + const v8::PropertyCallbackInfo<void>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + HandleScope scope(isolate); + Handle<Object> value = Utils::OpenHandle(*val); + if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) { + return; + } + Handle<JSFunction> object = + Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder())); + SetFunctionPrototype(isolate, object, value); +} + + +Handle<AccessorInfo> Accessors::FunctionPrototypeInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->prototype_string(), + &FunctionPrototypeGetter, + &FunctionPrototypeSetter, + attributes); +} // @@ -630,31 +926,49 @@ // -MaybeObject* Accessors::FunctionGetLength(Isolate* isolate, - Object* object, - void*) { - JSFunction* function = FindInstanceOf<JSFunction>(isolate, object); - if (function == NULL) return Smi::FromInt(0); - // Check if already compiled. - if (function->shared()->is_compiled()) { - return Smi::FromInt(function->shared()->length()); - } - // If the function isn't compiled yet, the length is not computed correctly - // yet. Compile it now and return the right length. +void Accessors::FunctionLengthGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); HandleScope scope(isolate); - Handle<JSFunction> function_handle(function); - if (Compiler::EnsureCompiled(function_handle, KEEP_EXCEPTION)) { - return Smi::FromInt(function_handle->shared()->length()); + Handle<JSFunction> function = + Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder())); + + int length = 0; + if (function->shared()->is_compiled()) { + length = function->shared()->length(); + } else { + // If the function isn't compiled yet, the length is not computed + // correctly yet. Compile it now and return the right length. + if (Compiler::EnsureCompiled(function, KEEP_EXCEPTION)) { + length = function->shared()->length(); + } + if (isolate->has_pending_exception()) { + isolate->OptionalRescheduleException(false); + } } - return Failure::Exception(); + Handle<Object> result(Smi::FromInt(length), isolate); + info.GetReturnValue().Set(Utils::ToLocal(result)); } -const AccessorDescriptor Accessors::FunctionLength = { - FunctionGetLength, - ReadOnlySetAccessor, - 0 -}; +void Accessors::FunctionLengthSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> val, + const v8::PropertyCallbackInfo<void>& info) { + // Function length is non writable, non configurable. + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::FunctionLengthInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->length_string(), + &FunctionLengthGetter, + &FunctionLengthSetter, + attributes); +} // @@ -662,21 +976,35 @@ // -MaybeObject* Accessors::FunctionGetName(Isolate* isolate, - Object* object, - void*) { - JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object); - return holder == NULL - ? isolate->heap()->undefined_value() - : holder->shared()->name(); +void Accessors::FunctionNameGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + HandleScope scope(isolate); + Handle<JSFunction> function = + Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder())); + Handle<Object> result(function->shared()->name(), isolate); + info.GetReturnValue().Set(Utils::ToLocal(result)); } -const AccessorDescriptor Accessors::FunctionName = { - FunctionGetName, - ReadOnlySetAccessor, - 0 -}; +void Accessors::FunctionNameSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> val, + const v8::PropertyCallbackInfo<void>& info) { + // Function name is non writable, non configurable. + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::FunctionNameInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->name_string(), + &FunctionNameGetter, + &FunctionNameSetter, + attributes); +} // @@ -684,16 +1012,7 @@ // -Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) { - CALL_HEAP_FUNCTION(function->GetIsolate(), - Accessors::FunctionGetArguments(function->GetIsolate(), - *function, - NULL), - Object); -} - - -static MaybeObject* ConstructArgumentsObjectForInlinedFunction( +static Handle<Object> ArgumentsForInlinedFunction( JavaScriptFrame* frame, Handle<JSFunction> inlined_function, int inlined_frame_index) { @@ -717,81 +1036,112 @@ arguments->set_elements(*array); // Return the freshly allocated arguments object. - return *arguments; + return arguments; } -MaybeObject* Accessors::FunctionGetArguments(Isolate* isolate, - Object* object, - void*) { - HandleScope scope(isolate); - JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object); - if (holder == NULL) return isolate->heap()->undefined_value(); - Handle<JSFunction> function(holder, isolate); +static int FindFunctionInFrame(JavaScriptFrame* frame, + Handle<JSFunction> function) { + DisallowHeapAllocation no_allocation; + List<JSFunction*> functions(2); + frame->GetFunctions(&functions); + for (int i = functions.length() - 1; i >= 0; i--) { + if (functions[i] == *function) return i; + } + return -1; +} + + +Handle<Object> GetFunctionArguments(Isolate* isolate, + Handle<JSFunction> function) { + if (function->shared()->native()) return isolate->factory()->null_value(); - if (function->shared()->native()) return isolate->heap()->null_value(); // Find the top invocation of the function by traversing frames. - List<JSFunction*> functions(2); for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) { JavaScriptFrame* frame = it.frame(); - frame->GetFunctions(&functions); - for (int i = functions.length() - 1; i >= 0; i--) { - // Skip all frames that aren't invocations of the given function. - if (functions[i] != *function) continue; - - if (i > 0) { - // The function in question was inlined. Inlined functions have the - // correct number of arguments and no allocated arguments object, so - // we can construct a fresh one by interpreting the function's - // deoptimization input data. - return ConstructArgumentsObjectForInlinedFunction(frame, function, i); - } + int function_index = FindFunctionInFrame(frame, function); + if (function_index < 0) continue; - if (!frame->is_optimized()) { - // If there is an arguments variable in the stack, we return that. - Handle<ScopeInfo> scope_info(function->shared()->scope_info()); - int index = scope_info->StackSlotIndex( - isolate->heap()->arguments_string()); - if (index >= 0) { - Handle<Object> arguments(frame->GetExpression(index), isolate); - if (!arguments->IsArgumentsMarker()) return *arguments; - } + if (function_index > 0) { + // The function in question was inlined. Inlined functions have the + // correct number of arguments and no allocated arguments object, so + // we can construct a fresh one by interpreting the function's + // deoptimization input data. + return ArgumentsForInlinedFunction(frame, function, function_index); + } + + if (!frame->is_optimized()) { + // If there is an arguments variable in the stack, we return that. + Handle<ScopeInfo> scope_info(function->shared()->scope_info()); + int index = scope_info->StackSlotIndex( + isolate->heap()->arguments_string()); + if (index >= 0) { + Handle<Object> arguments(frame->GetExpression(index), isolate); + if (!arguments->IsArgumentsMarker()) return arguments; } + } - // If there is no arguments variable in the stack or we have an - // optimized frame, we find the frame that holds the actual arguments - // passed to the function. - it.AdvanceToArgumentsFrame(); - frame = it.frame(); - - // Get the number of arguments and construct an arguments object - // mirror for the right frame. - const int length = frame->ComputeParametersCount(); - Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject( - function, length); - Handle<FixedArray> array = isolate->factory()->NewFixedArray(length); - - // Copy the parameters to the arguments object. - ASSERT(array->length() == length); - for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i)); - arguments->set_elements(*array); + // If there is no arguments variable in the stack or we have an + // optimized frame, we find the frame that holds the actual arguments + // passed to the function. + it.AdvanceToArgumentsFrame(); + frame = it.frame(); + + // Get the number of arguments and construct an arguments object + // mirror for the right frame. + const int length = frame->ComputeParametersCount(); + Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject( + function, length); + Handle<FixedArray> array = isolate->factory()->NewFixedArray(length); + + // Copy the parameters to the arguments object. + DCHECK(array->length() == length); + for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i)); + arguments->set_elements(*array); - // Return the freshly allocated arguments object. - return *arguments; - } - functions.Rewind(0); + // Return the freshly allocated arguments object. + return arguments; } // No frame corresponding to the given function found. Return null. - return isolate->heap()->null_value(); + return isolate->factory()->null_value(); } -const AccessorDescriptor Accessors::FunctionArguments = { - FunctionGetArguments, - ReadOnlySetAccessor, - 0 -}; +Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) { + return GetFunctionArguments(function->GetIsolate(), function); +} + + +void Accessors::FunctionArgumentsGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + HandleScope scope(isolate); + Handle<JSFunction> function = + Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder())); + Handle<Object> result = GetFunctionArguments(isolate, function); + info.GetReturnValue().Set(Utils::ToLocal(result)); +} + + +void Accessors::FunctionArgumentsSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> val, + const v8::PropertyCallbackInfo<void>& info) { + // Function arguments is non writable, non configurable. + UNREACHABLE(); +} + + +Handle<AccessorInfo> Accessors::FunctionArgumentsInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->arguments_string(), + &FunctionArgumentsGetter, + &FunctionArgumentsSetter, + attributes); +} // @@ -799,22 +1149,33 @@ // +static inline bool AllowAccessToFunction(Context* current_context, + JSFunction* function) { + return current_context->HasSameSecurityTokenAs(function->context()); +} + + class FrameFunctionIterator { public: FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise) - : frame_iterator_(isolate), + : isolate_(isolate), + frame_iterator_(isolate), functions_(2), index_(0) { GetFunctions(); } JSFunction* next() { - if (functions_.length() == 0) return NULL; - JSFunction* next_function = functions_[index_]; - index_--; - if (index_ < 0) { - GetFunctions(); + while (true) { + if (functions_.length() == 0) return NULL; + JSFunction* next_function = functions_[index_]; + index_--; + if (index_ < 0) { + GetFunctions(); + } + // Skip functions from other origins. + if (!AllowAccessToFunction(isolate_->context(), next_function)) continue; + return next_function; } - return next_function; } // Iterate through functions until the first occurence of 'function'. @@ -835,39 +1196,34 @@ if (frame_iterator_.done()) return; JavaScriptFrame* frame = frame_iterator_.frame(); frame->GetFunctions(&functions_); - ASSERT(functions_.length() > 0); + DCHECK(functions_.length() > 0); frame_iterator_.Advance(); index_ = functions_.length() - 1; } + Isolate* isolate_; JavaScriptFrameIterator frame_iterator_; List<JSFunction*> functions_; int index_; }; -MaybeObject* Accessors::FunctionGetCaller(Isolate* isolate, - Object* object, - void*) { - HandleScope scope(isolate); +MaybeHandle<JSFunction> FindCaller(Isolate* isolate, + Handle<JSFunction> function) { DisallowHeapAllocation no_allocation; - JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object); - if (holder == NULL) return isolate->heap()->undefined_value(); - if (holder->shared()->native()) return isolate->heap()->null_value(); - Handle<JSFunction> function(holder, isolate); - FrameFunctionIterator it(isolate, no_allocation); - + if (function->shared()->native()) { + return MaybeHandle<JSFunction>(); + } // Find the function from the frames. if (!it.Find(*function)) { // No frame corresponding to the given function found. Return null. - return isolate->heap()->null_value(); + return MaybeHandle<JSFunction>(); } - // Find previously called non-toplevel function. JSFunction* caller; do { caller = it.next(); - if (caller == NULL) return isolate->heap()->null_value(); + if (caller == NULL) return MaybeHandle<JSFunction>(); } while (caller->shared()->is_toplevel()); // If caller is a built-in function and caller's caller is also built-in, @@ -884,24 +1240,59 @@ // allows us to make bound functions use the strict function map // and its associated throwing caller and arguments. if (caller->shared()->bound()) { - return isolate->heap()->null_value(); + return MaybeHandle<JSFunction>(); } // Censor if the caller is not a sloppy mode function. // Change from ES5, which used to throw, see: // https://bugs.ecmascript.org/show_bug.cgi?id=310 if (caller->shared()->strict_mode() == STRICT) { - return isolate->heap()->null_value(); + return MaybeHandle<JSFunction>(); + } + // Don't return caller from another security context. + if (!AllowAccessToFunction(isolate->context(), caller)) { + return MaybeHandle<JSFunction>(); + } + return Handle<JSFunction>(caller); +} + + +void Accessors::FunctionCallerGetter( + v8::Local<v8::String> name, + const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + HandleScope scope(isolate); + Handle<JSFunction> function = + Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder())); + Handle<Object> result; + MaybeHandle<JSFunction> maybe_caller; + maybe_caller = FindCaller(isolate, function); + Handle<JSFunction> caller; + if (maybe_caller.ToHandle(&caller)) { + result = caller; + } else { + result = isolate->factory()->null_value(); } + info.GetReturnValue().Set(Utils::ToLocal(result)); +} + - return caller; +void Accessors::FunctionCallerSetter( + v8::Local<v8::String> name, + v8::Local<v8::Value> val, + const v8::PropertyCallbackInfo<void>& info) { + // Function caller is non writable, non configurable. + UNREACHABLE(); } -const AccessorDescriptor Accessors::FunctionCaller = { - FunctionGetCaller, - ReadOnlySetAccessor, - 0 -}; +Handle<AccessorInfo> Accessors::FunctionCallerInfo( + Isolate* isolate, PropertyAttributes attributes) { + return MakeAccessor(isolate, + isolate->factory()->caller_string(), + &FunctionCallerGetter, + &FunctionCallerSetter, + attributes); +} // @@ -913,7 +1304,7 @@ const v8::PropertyCallbackInfo<v8::Value>& info) { JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder())); Context* context = Context::cast(instance->context()); - ASSERT(context->IsModuleContext()); + DCHECK(context->IsModuleContext()); int slot = info.Data()->Int32Value(); Object* value = context->get(slot); Isolate* isolate = instance->GetIsolate(); @@ -934,7 +1325,7 @@ const v8::PropertyCallbackInfo<v8::Value>& info) { JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder())); Context* context = Context::cast(instance->context()); - ASSERT(context->IsModuleContext()); + DCHECK(context->IsModuleContext()); int slot = info.Data()->Int32Value(); Object* old_value = context->get(slot); if (old_value->IsTheHole()) { diff -Nru nodejs-0.11.13/deps/v8/src/accessors.h nodejs-0.11.15/deps/v8/src/accessors.h --- nodejs-0.11.13/deps/v8/src/accessors.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/accessors.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,77 +1,66 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ACCESSORS_H_ #define V8_ACCESSORS_H_ -#include "allocation.h" -#include "v8globals.h" +#include "src/allocation.h" +#include "src/globals.h" namespace v8 { namespace internal { // The list of accessor descriptors. This is a second-order macro // taking a macro to be applied to all accessor descriptor names. -#define ACCESSOR_DESCRIPTOR_LIST(V) \ - V(FunctionPrototype) \ - V(FunctionLength) \ - V(FunctionName) \ +#define ACCESSOR_INFO_LIST(V) \ + V(ArrayLength) \ V(FunctionArguments) \ V(FunctionCaller) \ - V(ArrayLength) \ - V(StringLength) \ - V(ScriptSource) \ - V(ScriptName) \ - V(ScriptId) \ - V(ScriptLineOffset) \ + V(FunctionName) \ + V(FunctionLength) \ + V(FunctionPrototype) \ V(ScriptColumnOffset) \ - V(ScriptType) \ V(ScriptCompilationType) \ - V(ScriptLineEnds) \ V(ScriptContextData) \ V(ScriptEvalFromScript) \ V(ScriptEvalFromScriptPosition) \ - V(ScriptEvalFromFunctionName) + V(ScriptEvalFromFunctionName) \ + V(ScriptId) \ + V(ScriptLineEnds) \ + V(ScriptLineOffset) \ + V(ScriptName) \ + V(ScriptSource) \ + V(ScriptType) \ + V(ScriptSourceUrl) \ + V(ScriptSourceMappingUrl) \ + V(StringLength) // Accessors contains all predefined proxy accessors. class Accessors : public AllStatic { public: // Accessor descriptors. -#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \ - static const AccessorDescriptor name; - ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION) -#undef ACCESSOR_DESCRIPTOR_DECLARATION +#define ACCESSOR_INFO_DECLARATION(name) \ + static void name##Getter( \ + v8::Local<v8::String> name, \ + const v8::PropertyCallbackInfo<v8::Value>& info); \ + static void name##Setter( \ + v8::Local<v8::String> name, \ + v8::Local<v8::Value> value, \ + const v8::PropertyCallbackInfo<void>& info); \ + static Handle<AccessorInfo> name##Info( \ + Isolate* isolate, \ + PropertyAttributes attributes); + ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION) +#undef ACCESSOR_INFO_DECLARATION enum DescriptorId { -#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \ - k##name, - ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION) -#undef ACCESSOR_DESCRIPTOR_DECLARATION +#define ACCESSOR_INFO_DECLARATION(name) \ + k##name##Getter, \ + k##name##Setter, + ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION) +#undef ACCESSOR_INFO_DECLARATION descriptorCount }; @@ -89,75 +78,24 @@ // If true, *object_offset contains offset of object field. template <class T> static bool IsJSObjectFieldAccessor(typename T::TypeHandle type, - Handle<String> name, + Handle<Name> name, int* object_offset); + static Handle<AccessorInfo> MakeAccessor( + Isolate* isolate, + Handle<String> name, + AccessorGetterCallback getter, + AccessorSetterCallback setter, + PropertyAttributes attributes); + + static Handle<ExecutableAccessorInfo> CloneAccessor( + Isolate* isolate, + Handle<ExecutableAccessorInfo> accessor); - private: - // Accessor functions only used through the descriptor. - static MaybeObject* FunctionSetPrototype(Isolate* isolate, - JSObject* object, - Object*, - void*); - static MaybeObject* FunctionGetPrototype(Isolate* isolate, - Object* object, - void*); - static MaybeObject* FunctionGetLength(Isolate* isolate, - Object* object, - void*); - static MaybeObject* FunctionGetName(Isolate* isolate, Object* object, void*); - static MaybeObject* FunctionGetArguments(Isolate* isolate, - Object* object, - void*); - static MaybeObject* FunctionGetCaller(Isolate* isolate, - Object* object, - void*); - static MaybeObject* ArraySetLength(Isolate* isolate, - JSObject* object, - Object*, - void*); - static MaybeObject* ArrayGetLength(Isolate* isolate, Object* object, void*); - static MaybeObject* StringGetLength(Isolate* isolate, Object* object, void*); - static MaybeObject* ScriptGetName(Isolate* isolate, Object* object, void*); - static MaybeObject* ScriptGetId(Isolate* isolate, Object* object, void*); - static MaybeObject* ScriptGetSource(Isolate* isolate, Object* object, void*); - static MaybeObject* ScriptGetLineOffset(Isolate* isolate, - Object* object, - void*); - static MaybeObject* ScriptGetColumnOffset(Isolate* isolate, - Object* object, - void*); - static MaybeObject* ScriptGetType(Isolate* isolate, Object* object, void*); - static MaybeObject* ScriptGetCompilationType(Isolate* isolate, - Object* object, - void*); - static MaybeObject* ScriptGetLineEnds(Isolate* isolate, - Object* object, - void*); - static MaybeObject* ScriptGetContextData(Isolate* isolate, - Object* object, - void*); - static MaybeObject* ScriptGetEvalFromScript(Isolate* isolate, - Object* object, - void*); - static MaybeObject* ScriptGetEvalFromScriptPosition(Isolate* isolate, - Object* object, - void*); - static MaybeObject* ScriptGetEvalFromFunctionName(Isolate* isolate, - Object* object, - void*); + private: // Helper functions. static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value); - static MaybeObject* IllegalSetter(Isolate* isolate, - JSObject*, - Object*, - void*); - static Object* IllegalGetAccessor(Isolate* isolate, Object* object, void*); - static MaybeObject* ReadOnlySetAccessor(Isolate* isolate, - JSObject*, - Object* value, - void*); }; } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/allocation.cc nodejs-0.11.15/deps/v8/src/allocation.cc --- nodejs-0.11.13/deps/v8/src/allocation.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/allocation.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "allocation.h" +#include "src/allocation.h" #include <stdlib.h> // For free, malloc. -#include "checks.h" -#include "platform.h" -#include "utils.h" +#include "src/base/logging.h" +#include "src/base/platform/platform.h" +#include "src/utils.h" #if V8_LIBC_BIONIC #include <malloc.h> // NOLINT @@ -89,7 +66,7 @@ char* StrDup(const char* str) { int length = StrLength(str); char* result = NewArray<char>(length + 1); - OS::MemCopy(result, str, length); + MemCopy(result, str, length); result[length] = '\0'; return result; } @@ -99,14 +76,14 @@ int length = StrLength(str); if (n < length) length = n; char* result = NewArray<char>(length + 1); - OS::MemCopy(result, str, length); + MemCopy(result, str, length); result[length] = '\0'; return result; } void* AlignedAlloc(size_t size, size_t alignment) { - ASSERT(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT + DCHECK(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT void* ptr; #if V8_OS_WIN ptr = _aligned_malloc(size, alignment); diff -Nru nodejs-0.11.13/deps/v8/src/allocation.h nodejs-0.11.15/deps/v8/src/allocation.h --- nodejs-0.11.13/deps/v8/src/allocation.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/allocation.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ALLOCATION_H_ #define V8_ALLOCATION_H_ -#include "globals.h" +#include "src/globals.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/allocation-site-scopes.cc nodejs-0.11.15/deps/v8/src/allocation-site-scopes.cc --- nodejs-0.11.13/deps/v8/src/allocation-site-scopes.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/allocation-site-scopes.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "allocation-site-scopes.h" +#include "src/allocation-site-scopes.h" namespace v8 { namespace internal { @@ -43,7 +20,7 @@ static_cast<void*>(*scope_site)); } } else { - ASSERT(!current().is_null()); + DCHECK(!current().is_null()); scope_site = isolate()->factory()->NewAllocationSite(); if (FLAG_trace_creation_allocation_sites) { PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n", @@ -54,7 +31,7 @@ current()->set_nested_site(*scope_site); update_current_site(*scope_site); } - ASSERT(!scope_site.is_null()); + DCHECK(!scope_site.is_null()); return scope_site; } @@ -62,7 +39,7 @@ void AllocationSiteCreationContext::ExitScope( Handle<AllocationSite> scope_site, Handle<JSObject> object) { - if (!object.is_null() && !object->IsFailure()) { + if (!object.is_null()) { bool top_level = !scope_site.is_null() && top().is_identical_to(scope_site); diff -Nru nodejs-0.11.13/deps/v8/src/allocation-site-scopes.h nodejs-0.11.15/deps/v8/src/allocation-site-scopes.h --- nodejs-0.11.13/deps/v8/src/allocation-site-scopes.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/allocation-site-scopes.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ALLOCATION_SITE_SCOPES_H_ #define V8_ALLOCATION_SITE_SCOPES_H_ -#include "ast.h" -#include "handles.h" -#include "objects.h" -#include "zone.h" +#include "src/ast.h" +#include "src/handles.h" +#include "src/objects.h" +#include "src/zone.h" namespace v8 { namespace internal { @@ -43,7 +20,7 @@ public: explicit AllocationSiteContext(Isolate* isolate) { isolate_ = isolate; - }; + } Handle<AllocationSite> top() { return top_; } Handle<AllocationSite> current() { return current_; } @@ -98,7 +75,7 @@ // Advance current site Object* nested_site = current()->nested_site(); // Something is wrong if we advance to the end of the list here. - ASSERT(nested_site->IsAllocationSite()); + DCHECK(nested_site->IsAllocationSite()); update_current_site(AllocationSite::cast(nested_site)); } return Handle<AllocationSite>(*current(), isolate()); @@ -108,7 +85,7 @@ Handle<JSObject> object) { // This assert ensures that we are pointing at the right sub-object in a // recursive walk of a nested literal. - ASSERT(object.is_null() || *object == scope_site->transition_info()); + DCHECK(object.is_null() || *object == scope_site->transition_info()); } bool ShouldCreateMemento(Handle<JSObject> object); diff -Nru nodejs-0.11.13/deps/v8/src/allocation-tracker.cc nodejs-0.11.15/deps/v8/src/allocation-tracker.cc --- nodejs-0.11.13/deps/v8/src/allocation-tracker.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/allocation-tracker.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,12 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "allocation-tracker.h" - -#include "heap-snapshot-generator.h" -#include "frames-inl.h" +#include "src/allocation-tracker.h" +#include "src/frames-inl.h" +#include "src/heap-snapshot-generator.h" namespace v8 { namespace internal { @@ -78,15 +54,15 @@ void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) { - OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' '); + base::OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' '); if (tracker != NULL) { AllocationTracker::FunctionInfo* info = tracker->function_info_list()[function_info_index_]; - OS::Print("%s #%u", info->name, id_); + base::OS::Print("%s #%u", info->name, id_); } else { - OS::Print("%u #%u", function_info_index_, id_); + base::OS::Print("%u #%u", function_info_index_, id_); } - OS::Print("\n"); + base::OS::Print("\n"); indent += 2; for (int i = 0; i < children_.length(); i++) { children_[i]->Print(indent, tracker); @@ -117,8 +93,8 @@ void AllocationTraceTree::Print(AllocationTracker* tracker) { - OS::Print("[AllocationTraceTree:]\n"); - OS::Print("Total size | Allocation count | Function id | id\n"); + base::OS::Print("[AllocationTraceTree:]\n"); + base::OS::Print("Total size | Allocation count | Function id | id\n"); root()->Print(0, tracker); } @@ -211,11 +187,6 @@ } -static bool AddressesMatch(void* key1, void* key2) { - return key1 == key2; -} - - void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) { delete *info; } @@ -225,7 +196,7 @@ HeapObjectsMap* ids, StringsStorage* names) : ids_(ids), names_(names), - id_to_function_info_index_(AddressesMatch), + id_to_function_info_index_(HashMap::PointersMatch), info_index_for_other_state_(0) { FunctionInfo* info = new FunctionInfo(); info->name = "(root)"; @@ -257,8 +228,8 @@ // Mark the new block as FreeSpace to make sure the heap is iterable // while we are capturing stack trace. FreeListNode::FromAddress(addr)->set_size(heap, size); - ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size); - ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr))); + DCHECK_EQ(HeapObject::FromAddress(addr)->Size(), size); + DCHECK(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr))); Isolate* isolate = heap->isolate(); int length = 0; @@ -354,8 +325,8 @@ void AllocationTracker::UnresolvedLocation::Resolve() { if (script_.is_null()) return; HandleScope scope(script_->GetIsolate()); - info_->line = GetScriptLineNumber(script_, start_position_); - info_->column = GetScriptColumnNumber(script_, start_position_); + info_->line = Script::GetLineNumber(script_, start_position_); + info_->column = Script::GetColumnNumber(script_, start_position_); } diff -Nru nodejs-0.11.13/deps/v8/src/allocation-tracker.h nodejs-0.11.15/deps/v8/src/allocation-tracker.h --- nodejs-0.11.13/deps/v8/src/allocation-tracker.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/allocation-tracker.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ALLOCATION_TRACKER_H_ #define V8_ALLOCATION_TRACKER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/api.cc nodejs-0.11.15/deps/v8/src/api.cc --- nodejs-0.11.13/deps/v8/src/api.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/api.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,77 +1,57 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "api.h" +#include "src/api.h" #include <string.h> // For memcpy, strlen. +#ifdef V8_USE_ADDRESS_SANITIZER +#include <sanitizer/asan_interface.h> +#endif // V8_USE_ADDRESS_SANITIZER #include <cmath> // For isnan. -#include "../include/v8-debug.h" -#include "../include/v8-profiler.h" -#include "../include/v8-testing.h" -#include "assert-scope.h" -#include "bootstrapper.h" -#include "code-stubs.h" -#include "compiler.h" -#include "conversions-inl.h" -#include "counters.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "deoptimizer.h" -#include "execution.h" -#include "global-handles.h" -#include "heap-profiler.h" -#include "heap-snapshot-generator-inl.h" -#include "icu_util.h" -#include "json-parser.h" -#include "messages.h" -#ifdef COMPRESS_STARTUP_DATA_BZ2 -#include "natives.h" -#endif -#include "parser.h" -#include "platform.h" -#include "platform/time.h" -#include "profile-generator-inl.h" -#include "property-details.h" -#include "property.h" -#include "runtime.h" -#include "runtime-profiler.h" -#include "scanner-character-streams.h" -#include "snapshot.h" -#include "unicode-inl.h" -#include "utils/random-number-generator.h" -#include "v8threads.h" -#include "version.h" -#include "vm-state-inl.h" +#include "include/v8-debug.h" +#include "include/v8-profiler.h" +#include "include/v8-testing.h" +#include "src/assert-scope.h" +#include "src/base/platform/platform.h" +#include "src/base/platform/time.h" +#include "src/base/utils/random-number-generator.h" +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/compiler.h" +#include "src/conversions-inl.h" +#include "src/counters.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/execution.h" +#include "src/global-handles.h" +#include "src/heap-profiler.h" +#include "src/heap-snapshot-generator-inl.h" +#include "src/icu_util.h" +#include "src/json-parser.h" +#include "src/messages.h" +#include "src/natives.h" +#include "src/parser.h" +#include "src/profile-generator-inl.h" +#include "src/property.h" +#include "src/property-details.h" +#include "src/prototype.h" +#include "src/runtime.h" +#include "src/runtime-profiler.h" +#include "src/scanner-character-streams.h" +#include "src/simulator.h" +#include "src/snapshot.h" +#include "src/unicode-inl.h" +#include "src/v8threads.h" +#include "src/version.h" +#include "src/vm-state-inl.h" #define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr)) #define ENTER_V8(isolate) \ - ASSERT((isolate)->IsInitialized()); \ + DCHECK((isolate)->IsInitialized()); \ i::VMState<i::OTHER> __state__((isolate)) namespace v8 { @@ -85,7 +65,7 @@ #define EXCEPTION_PREAMBLE(isolate) \ (isolate)->handle_scope_implementer()->IncrementCallDepth(); \ - ASSERT(!(isolate)->external_caught_exception()); \ + DCHECK(!(isolate)->external_caught_exception()); \ bool has_pending_exception = false @@ -106,7 +86,7 @@ #define EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, value) \ EXCEPTION_BAILOUT_CHECK_GENERIC( \ - isolate, value, i::V8::FireCallCompletedCallback(isolate);) + isolate, value, isolate->FireCallCompletedCallback();) #define EXCEPTION_BAILOUT_CHECK(isolate, value) \ @@ -195,9 +175,9 @@ i::Isolate* isolate = i::Isolate::Current(); FatalErrorCallback callback = isolate->exception_behavior(); if (callback == NULL) { - i::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n", - location, message); - i::OS::Abort(); + base::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n", location, + message); + base::OS::Abort(); } else { callback(location, message); } @@ -244,24 +224,6 @@ } -// Some initializing API functions are called early and may be -// called on a thread different from static initializer thread. -// If Isolate API is used, Isolate::Enter() will initialize TLS so -// Isolate::Current() works. If it's a legacy case, then the thread -// may not have TLS initialized yet. However, in initializing APIs it -// may be too early to call EnsureInitialized() - some pre-init -// parameters still have to be configured. -static inline i::Isolate* EnterIsolateIfNeeded() { - i::Isolate* isolate = i::Isolate::UncheckedCurrent(); - if (isolate != NULL) - return isolate; - - i::Isolate::EnterDefaultIsolate(); - isolate = i::Isolate::Current(); - return isolate; -} - - StartupDataDecompressor::StartupDataDecompressor() : raw_data(i::NewArray<char*>(V8::GetCompressedStartupDataCount())) { for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) { @@ -293,7 +255,7 @@ compressed_data[i].compressed_size); if (result != 0) return result; } else { - ASSERT_EQ(0, compressed_data[i].raw_size); + DCHECK_EQ(0, compressed_data[i].raw_size); } compressed_data[i].data = decompressed; } @@ -363,24 +325,24 @@ void V8::SetDecompressedStartupData(StartupData* decompressed_data) { #ifdef COMPRESS_STARTUP_DATA_BZ2 - ASSERT_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size); + DCHECK_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size); i::Snapshot::set_raw_data( reinterpret_cast<const i::byte*>(decompressed_data[kSnapshot].data)); - ASSERT_EQ(i::Snapshot::context_raw_size(), + DCHECK_EQ(i::Snapshot::context_raw_size(), decompressed_data[kSnapshotContext].raw_size); i::Snapshot::set_context_raw_data( reinterpret_cast<const i::byte*>( decompressed_data[kSnapshotContext].data)); - ASSERT_EQ(i::Natives::GetRawScriptsSize(), + DCHECK_EQ(i::Natives::GetRawScriptsSize(), decompressed_data[kLibraries].raw_size); i::Vector<const char> libraries_source( decompressed_data[kLibraries].data, decompressed_data[kLibraries].raw_size); i::Natives::SetRawScriptsSource(libraries_source); - ASSERT_EQ(i::ExperimentalNatives::GetRawScriptsSize(), + DCHECK_EQ(i::ExperimentalNatives::GetRawScriptsSize(), decompressed_data[kExperimentalLibraries].raw_size); i::Vector<const char> exp_libraries_source( decompressed_data[kExperimentalLibraries].data, @@ -390,15 +352,33 @@ } +void V8::SetNativesDataBlob(StartupData* natives_blob) { +#ifdef V8_USE_EXTERNAL_STARTUP_DATA + i::SetNativesFromFile(natives_blob); +#else + CHECK(false); +#endif +} + + +void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) { +#ifdef V8_USE_EXTERNAL_STARTUP_DATA + i::SetSnapshotFromFile(snapshot_blob); +#else + CHECK(false); +#endif +} + + void V8::SetFatalErrorHandler(FatalErrorCallback that) { - i::Isolate* isolate = EnterIsolateIfNeeded(); + i::Isolate* isolate = i::Isolate::Current(); isolate->set_exception_behavior(that); } void V8::SetAllowCodeGenerationFromStringsCallback( AllowCodeGenerationFromStringsCallback callback) { - i::Isolate* isolate = EnterIsolateIfNeeded(); + i::Isolate* isolate = i::Isolate::Current(); isolate->set_allow_code_gen_callback(callback); } @@ -460,15 +440,16 @@ ResourceConstraints::ResourceConstraints() - : max_young_space_size_(0), + : max_semi_space_size_(0), max_old_space_size_(0), max_executable_size_(0), stack_limit_(NULL), - max_available_threads_(0) { } + max_available_threads_(0), + code_range_size_(0) { } void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory, + uint64_t virtual_memory_limit, uint32_t number_of_processors) { - const int lump_of_memory = (i::kPointerSize / 4) * i::MB; #if V8_OS_ANDROID // Android has higher physical memory requirements before raising the maximum // heap size limits since it has no swap space. @@ -481,42 +462,51 @@ const uint64_t high_limit = 1ul * i::GB; #endif - // The young_space_size should be a power of 2 and old_generation_size should - // be a multiple of Page::kPageSize. if (physical_memory <= low_limit) { - set_max_young_space_size(2 * lump_of_memory); - set_max_old_space_size(128 * lump_of_memory); - set_max_executable_size(96 * lump_of_memory); + set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeLowMemoryDevice); + set_max_old_space_size(i::Heap::kMaxOldSpaceSizeLowMemoryDevice); + set_max_executable_size(i::Heap::kMaxExecutableSizeLowMemoryDevice); } else if (physical_memory <= medium_limit) { - set_max_young_space_size(8 * lump_of_memory); - set_max_old_space_size(256 * lump_of_memory); - set_max_executable_size(192 * lump_of_memory); + set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeMediumMemoryDevice); + set_max_old_space_size(i::Heap::kMaxOldSpaceSizeMediumMemoryDevice); + set_max_executable_size(i::Heap::kMaxExecutableSizeMediumMemoryDevice); } else if (physical_memory <= high_limit) { - set_max_young_space_size(16 * lump_of_memory); - set_max_old_space_size(512 * lump_of_memory); - set_max_executable_size(256 * lump_of_memory); - } else { - set_max_young_space_size(16 * lump_of_memory); - set_max_old_space_size(700 * lump_of_memory); - set_max_executable_size(256 * lump_of_memory); + set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHighMemoryDevice); + set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHighMemoryDevice); + set_max_executable_size(i::Heap::kMaxExecutableSizeHighMemoryDevice); + } else { + set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHugeMemoryDevice); + set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHugeMemoryDevice); + set_max_executable_size(i::Heap::kMaxExecutableSizeHugeMemoryDevice); } set_max_available_threads(i::Max(i::Min(number_of_processors, 4u), 1u)); + + if (virtual_memory_limit > 0 && i::kRequiresCodeRange) { + // Reserve no more than 1/8 of the memory for the code range, but at most + // kMaximalCodeRangeSize. + set_code_range_size( + i::Min(i::kMaximalCodeRangeSize / i::MB, + static_cast<size_t>((virtual_memory_limit >> 3) / i::MB))); + } } bool SetResourceConstraints(Isolate* v8_isolate, ResourceConstraints* constraints) { i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate); - int young_space_size = constraints->max_young_space_size(); - int old_gen_size = constraints->max_old_space_size(); + int semi_space_size = constraints->max_semi_space_size(); + int old_space_size = constraints->max_old_space_size(); int max_executable_size = constraints->max_executable_size(); - if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) { + size_t code_range_size = constraints->code_range_size(); + if (semi_space_size != 0 || old_space_size != 0 || + max_executable_size != 0 || code_range_size != 0) { // After initialization it's too late to change Heap constraints. - ASSERT(!isolate->IsInitialized()); - bool result = isolate->heap()->ConfigureHeap(young_space_size / 2, - old_gen_size, - max_executable_size); + DCHECK(!isolate->IsInitialized()); + bool result = isolate->heap()->ConfigureHeap(semi_space_size, + old_space_size, + max_executable_size, + code_range_size); if (!result) return false; } if (constraints->stack_limit() != NULL) { @@ -533,7 +523,7 @@ LOG_API(isolate, "Persistent::New"); i::Handle<i::Object> result = isolate->global_handles()->Create(*obj); #ifdef DEBUG - (*obj)->Verify(); + (*obj)->ObjectVerify(); #endif // DEBUG return result.location(); } @@ -542,7 +532,7 @@ i::Object** V8::CopyPersistent(i::Object** obj) { i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj); #ifdef DEBUG - (*obj)->Verify(); + (*obj)->ObjectVerify(); #endif // DEBUG return result.location(); } @@ -621,7 +611,7 @@ i::Object** HandleScope::CreateHandle(i::HeapObject* heap_object, i::Object* value) { - ASSERT(heap_object->IsHeapObject()); + DCHECK(heap_object->IsHeapObject()); return i::HandleScope::CreateHandle(heap_object->GetIsolate(), value); } @@ -703,7 +693,7 @@ return i::Handle<i::FixedArray>(); } int new_size = i::Max(index, data->length() << 1) + 1; - data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size); + data = i::FixedArray::CopySize(data, new_size); env->set_embedder_data(*data); return data; } @@ -724,7 +714,7 @@ if (data.is_null()) return; i::Handle<i::Object> val = Utils::OpenHandle(*value); data->set(index, *val); - ASSERT_EQ(*Utils::OpenHandle(*value), + DCHECK_EQ(*Utils::OpenHandle(*value), *Utils::OpenHandle(*GetEmbedderData(index))); } @@ -741,7 +731,7 @@ const char* location = "v8::Context::SetAlignedPointerInEmbedderData()"; i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location); data->set(index, EncodeAlignedAsSmi(value, location)); - ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index)); + DCHECK_EQ(value, GetAlignedPointerFromEmbedderData(index)); } @@ -778,8 +768,8 @@ i::Object* NeanderArray::get(int offset) { - ASSERT(0 <= offset); - ASSERT(offset < length()); + DCHECK(0 <= offset); + DCHECK(offset < length()); return obj_.get(offset + 1); } @@ -860,10 +850,12 @@ v8::Local<FunctionTemplate> setter, v8::PropertyAttribute attribute, v8::AccessControl access_control) { + // TODO(verwaest): Remove |access_control|. + DCHECK_EQ(v8::DEFAULT, access_control); i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ENTER_V8(isolate); - ASSERT(!name.IsEmpty()); - ASSERT(!getter.IsEmpty() || !setter.IsEmpty()); + DCHECK(!name.IsEmpty()); + DCHECK(!getter.IsEmpty() || !setter.IsEmpty()); i::HandleScope scope(isolate); const int kSize = 5; v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate); @@ -871,8 +863,7 @@ name, getter, setter, - v8::Integer::New(v8_isolate, attribute), - v8::Integer::New(v8_isolate, access_control)}; + v8::Integer::New(v8_isolate, attribute)}; TemplateSet(isolate, this, kSize, data); } @@ -1172,7 +1163,6 @@ obj->set_name(*Utils::OpenHandle(*name)); if (settings & ALL_CAN_READ) obj->set_all_can_read(true); if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true); - if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true); obj->set_property_attributes(static_cast<PropertyAttributes>(attributes)); if (!signature.IsEmpty()) { obj->set_expected_receiver_type(*Utils::OpenHandle(*signature)); @@ -1221,14 +1211,14 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (!Utils::ApiCheck(this != NULL, + i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this, true); + if (!Utils::ApiCheck(!handle.is_null(), "v8::FunctionTemplate::InstanceTemplate()", "Reading from empty handle")) { return Local<ObjectTemplate>(); } + i::Isolate* isolate = handle->GetIsolate(); ENTER_V8(isolate); - i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this); if (handle->instance_template()->IsUndefined()) { Local<ObjectTemplate> templ = ObjectTemplate::New(isolate, ToApiHandle<FunctionTemplate>(handle)); @@ -1567,45 +1557,6 @@ } -// --- S c r i p t D a t a --- - - -ScriptData* ScriptData::PreCompile(v8::Handle<String> source) { - i::Handle<i::String> str = Utils::OpenHandle(*source); - i::Isolate* isolate = str->GetIsolate(); - if (str->IsExternalTwoByteString()) { - i::ExternalTwoByteStringUtf16CharacterStream stream( - i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length()); - return i::PreParserApi::PreParse(isolate, &stream); - } else { - i::GenericStringUtf16CharacterStream stream(str, 0, str->length()); - return i::PreParserApi::PreParse(isolate, &stream); - } -} - - -ScriptData* ScriptData::New(const char* data, int length) { - // Return an empty ScriptData if the length is obviously invalid. - if (length % sizeof(unsigned) != 0) { - return new i::ScriptDataImpl(); - } - - // Copy the data to ensure it is properly aligned. - int deserialized_data_length = length / sizeof(unsigned); - // If aligned, don't create a copy of the data. - if (reinterpret_cast<intptr_t>(data) % sizeof(unsigned) == 0) { - return new i::ScriptDataImpl(data, length); - } - // Copy the data to align it. - unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length); - i::CopyBytes(reinterpret_cast<char*>(deserialized_data), - data, static_cast<size_t>(length)); - - return new i::ScriptDataImpl( - i::Vector<unsigned>(deserialized_data, deserialized_data_length)); -} - - // --- S c r i p t s --- @@ -1653,14 +1604,14 @@ int UnboundScript::GetLineNumber(int code_pos) { - i::Handle<i::HeapObject> obj = - i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this)); + i::Handle<i::SharedFunctionInfo> obj = + i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this)); i::Isolate* isolate = obj->GetIsolate(); ON_BAILOUT(isolate, "v8::UnboundScript::GetLineNumber()", return -1); LOG_API(isolate, "UnboundScript::GetLineNumber"); - if (obj->IsScript()) { - i::Handle<i::Script> script(i::Script::cast(*obj)); - return i::GetScriptLineNumber(script, code_pos); + if (obj->script()->IsScript()) { + i::Handle<i::Script> script(i::Script::cast(obj->script())); + return i::Script::GetLineNumber(script, code_pos); } else { return -1; } @@ -1668,14 +1619,14 @@ Handle<Value> UnboundScript::GetScriptName() { - i::Handle<i::HeapObject> obj = - i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this)); + i::Handle<i::SharedFunctionInfo> obj = + i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this)); i::Isolate* isolate = obj->GetIsolate(); ON_BAILOUT(isolate, "v8::UnboundScript::GetName()", return Handle<String>()); LOG_API(isolate, "UnboundScript::GetName"); - if (obj->IsScript()) { - i::Object* name = i::Script::cast(*obj)->name(); + if (obj->script()->IsScript()) { + i::Object* name = i::Script::cast(obj->script())->name(); return Utils::ToLocal(i::Handle<i::Object>(name, isolate)); } else { return Handle<String>(); @@ -1683,33 +1634,57 @@ } +Handle<Value> UnboundScript::GetSourceURL() { + i::Handle<i::SharedFunctionInfo> obj = + i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this)); + i::Isolate* isolate = obj->GetIsolate(); + ON_BAILOUT(isolate, "v8::UnboundScript::GetSourceURL()", + return Handle<String>()); + LOG_API(isolate, "UnboundScript::GetSourceURL"); + if (obj->script()->IsScript()) { + i::Object* url = i::Script::cast(obj->script())->source_url(); + return Utils::ToLocal(i::Handle<i::Object>(url, isolate)); + } else { + return Handle<String>(); + } +} + + +Handle<Value> UnboundScript::GetSourceMappingURL() { + i::Handle<i::SharedFunctionInfo> obj = + i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this)); + i::Isolate* isolate = obj->GetIsolate(); + ON_BAILOUT(isolate, "v8::UnboundScript::GetSourceMappingURL()", + return Handle<String>()); + LOG_API(isolate, "UnboundScript::GetSourceMappingURL"); + if (obj->script()->IsScript()) { + i::Object* url = i::Script::cast(obj->script())->source_mapping_url(); + return Utils::ToLocal(i::Handle<i::Object>(url, isolate)); + } else { + return Handle<String>(); + } +} + + Local<Value> Script::Run() { + i::Handle<i::Object> obj = Utils::OpenHandle(this, true); // If execution is terminating, Compile(..)->Run() requires this // check. - if (this == NULL) return Local<Value>(); - i::Handle<i::HeapObject> obj = - i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this)); - i::Isolate* isolate = obj->GetIsolate(); + if (obj.is_null()) return Local<Value>(); + i::Isolate* isolate = i::Handle<i::HeapObject>::cast(obj)->GetIsolate(); ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>()); LOG_API(isolate, "Script::Run"); ENTER_V8(isolate); - i::Logger::TimerEventScope timer_scope( - isolate, i::Logger::TimerEventScope::v8_execute); - i::Object* raw_result = NULL; - { - i::HandleScope scope(isolate); - i::Handle<i::JSFunction> fun = - i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate); - EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> receiver( - isolate->context()->global_proxy(), isolate); - i::Handle<i::Object> result = i::Execution::Call( - isolate, fun, receiver, 0, NULL, &has_pending_exception); - EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>()); - raw_result = *result; - } - i::Handle<i::Object> result(raw_result, isolate); - return Utils::ToLocal(result); + i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate); + i::HandleScope scope(isolate); + i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj); + EXCEPTION_PREAMBLE(isolate); + i::Handle<i::Object> receiver(isolate->global_proxy(), isolate); + i::Handle<i::Object> result; + has_pending_exception = !i::Execution::Call( + isolate, fun, receiver, 0, NULL).ToHandle(&result); + EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>()); + return Utils::ToLocal(scope.CloseAndEscape(result)); } @@ -1724,42 +1699,28 @@ Isolate* v8_isolate, Source* source, CompileOptions options) { - i::ScriptDataImpl* script_data_impl = NULL; - i::CachedDataMode cached_data_mode = i::NO_CACHED_DATA; - if (options & kProduceDataToCache) { - cached_data_mode = i::PRODUCE_CACHED_DATA; - ASSERT(source->cached_data == NULL); - if (source->cached_data) { - // Asked to produce cached data even though there is some already -> not - // good. In release mode, try to do the right thing: Just regenerate the - // data. - delete source->cached_data; - source->cached_data = NULL; - } - } else if (source->cached_data) { - // FIXME(marja): Make compiler use CachedData directly. Aligning needs to be - // taken care of. - script_data_impl = static_cast<i::ScriptDataImpl*>(ScriptData::New( - reinterpret_cast<const char*>(source->cached_data->data), - source->cached_data->length)); - // We assert that the pre-data is sane, even though we can actually - // handle it if it turns out not to be in release mode. - ASSERT(script_data_impl->SanityCheck()); - if (script_data_impl->SanityCheck()) { - cached_data_mode = i::CONSUME_CACHED_DATA; - } else { - // If the pre-data isn't sane we simply ignore it. - delete script_data_impl; - script_data_impl = NULL; - delete source->cached_data; - source->cached_data = NULL; - } - } - - i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string)); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate); ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileUnbound()", return Local<UnboundScript>()); + + // Support the old API for a transition period: + // - kProduceToCache -> kProduceParserCache + // - kNoCompileOptions + cached_data != NULL -> kConsumeParserCache + if (options == kProduceDataToCache) { + options = kProduceParserCache; + } else if (options == kNoCompileOptions && source->cached_data) { + options = kConsumeParserCache; + } + + i::ScriptData* script_data = NULL; + if (options == kConsumeParserCache || options == kConsumeCodeCache) { + DCHECK(source->cached_data); + // ScriptData takes care of pointer-aligning the data. + script_data = new i::ScriptData(source->cached_data->data, + source->cached_data->length); + } + + i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string)); LOG_API(isolate, "ScriptCompiler::CompileUnbound"); ENTER_V8(isolate); i::SharedFunctionInfo* raw_result = NULL; @@ -1784,29 +1745,30 @@ source->resource_is_shared_cross_origin == v8::True(v8_isolate); } EXCEPTION_PREAMBLE(isolate); - i::Handle<i::SharedFunctionInfo> result = - i::Compiler::CompileScript(str, - name_obj, - line_offset, - column_offset, - is_shared_cross_origin, - isolate->global_context(), - NULL, - &script_data_impl, - cached_data_mode, - i::NOT_NATIVES_CODE); + i::Handle<i::SharedFunctionInfo> result = i::Compiler::CompileScript( + str, name_obj, line_offset, column_offset, is_shared_cross_origin, + isolate->global_context(), NULL, &script_data, options, + i::NOT_NATIVES_CODE); has_pending_exception = result.is_null(); + if (has_pending_exception && script_data != NULL) { + // This case won't happen during normal operation; we have compiled + // successfully and produced cached data, and but the second compilation + // of the same source code fails. + delete script_data; + script_data = NULL; + } EXCEPTION_BAILOUT_CHECK(isolate, Local<UnboundScript>()); raw_result = *result; - if ((options & kProduceDataToCache) && script_data_impl != NULL) { - // script_data_impl now contains the data that was generated. source will + + if ((options == kProduceParserCache || options == kProduceCodeCache) && + script_data != NULL) { + // script_data now contains the data that was generated. source will // take the ownership. source->cached_data = new CachedData( - reinterpret_cast<const uint8_t*>(script_data_impl->Data()), - script_data_impl->Length(), CachedData::BufferOwned); - script_data_impl->owns_store_ = false; + script_data->data(), script_data->length(), CachedData::BufferOwned); + script_data->ReleaseDataOwnership(); } - delete script_data_impl; + delete script_data; } i::Handle<i::SharedFunctionInfo> result(raw_result, isolate); return ToApiHandle<UnboundScript>(result); @@ -1818,34 +1780,25 @@ Source* source, CompileOptions options) { i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate); - ON_BAILOUT(isolate, "v8::ScriptCompiler::Compile()", - return Local<Script>()); + ON_BAILOUT(isolate, "v8::ScriptCompiler::Compile()", return Local<Script>()); LOG_API(isolate, "ScriptCompiler::CompiletBound()"); ENTER_V8(isolate); - Local<UnboundScript> generic = - CompileUnbound(v8_isolate, source, options); + Local<UnboundScript> generic = CompileUnbound(v8_isolate, source, options); if (generic.IsEmpty()) return Local<Script>(); return generic->BindToCurrentContext(); } Local<Script> Script::Compile(v8::Handle<String> source, - v8::ScriptOrigin* origin, - ScriptData* script_data) { + v8::ScriptOrigin* origin) { i::Handle<i::String> str = Utils::OpenHandle(*source); - ScriptCompiler::CachedData* cached_data = NULL; - if (script_data) { - cached_data = new ScriptCompiler::CachedData( - reinterpret_cast<const uint8_t*>(script_data->Data()), - script_data->Length()); - } if (origin) { - ScriptCompiler::Source script_source(source, *origin, cached_data); + ScriptCompiler::Source script_source(source, *origin); return ScriptCompiler::Compile( reinterpret_cast<v8::Isolate*>(str->GetIsolate()), &script_source); } - ScriptCompiler::Source script_source(source, cached_data); + ScriptCompiler::Source script_source(source); return ScriptCompiler::Compile( reinterpret_cast<v8::Isolate*>(str->GetIsolate()), &script_source); @@ -1864,19 +1817,23 @@ v8::TryCatch::TryCatch() : isolate_(i::Isolate::Current()), - next_(isolate_->try_catch_handler_address()), + next_(isolate_->try_catch_handler()), is_verbose_(false), can_continue_(true), capture_message_(true), rethrow_(false), has_terminated_(false) { - Reset(); + ResetInternal(); + // Special handling for simulators which have a separate JS stack. + js_stack_comparable_address_ = + reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch( + GetCurrentStackPosition())); isolate_->RegisterTryCatchHandler(this); } v8::TryCatch::~TryCatch() { - ASSERT(isolate_ == i::Isolate::Current()); + DCHECK(isolate_ == i::Isolate::Current()); if (rethrow_) { v8::Isolate* isolate = reinterpret_cast<Isolate*>(isolate_); v8::HandleScope scope(isolate); @@ -1890,10 +1847,18 @@ isolate_->RestorePendingMessageFromTryCatch(this); } isolate_->UnregisterTryCatchHandler(this); + v8::internal::SimulatorStack::UnregisterCTryCatch(); reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc); - ASSERT(!isolate_->thread_local_top()->rethrowing_message_); + DCHECK(!isolate_->thread_local_top()->rethrowing_message_); } else { + if (HasCaught() && isolate_->has_scheduled_exception()) { + // If an exception was caught but is still scheduled because no API call + // promoted it, then it is canceled to prevent it from being propagated. + // Note that this will not cancel termination exceptions. + isolate_->CancelScheduledExceptionFromTryCatch(this); + } isolate_->UnregisterTryCatchHandler(this); + v8::internal::SimulatorStack::UnregisterCTryCatch(); } } @@ -1921,7 +1886,7 @@ v8::Local<Value> v8::TryCatch::Exception() const { - ASSERT(isolate_ == i::Isolate::Current()); + DCHECK(isolate_ == i::Isolate::Current()); if (HasCaught()) { // Check for out of memory exception. i::Object* exception = reinterpret_cast<i::Object*>(exception_); @@ -1933,16 +1898,22 @@ v8::Local<Value> v8::TryCatch::StackTrace() const { - ASSERT(isolate_ == i::Isolate::Current()); + DCHECK(isolate_ == i::Isolate::Current()); if (HasCaught()) { i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_); if (!raw_obj->IsJSObject()) return v8::Local<Value>(); i::HandleScope scope(isolate_); i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_); i::Handle<i::String> name = isolate_->factory()->stack_string(); - if (!i::JSReceiver::HasProperty(obj, name)) return v8::Local<Value>(); - i::Handle<i::Object> value = i::GetProperty(isolate_, obj, name); - if (value.is_null()) return v8::Local<Value>(); + EXCEPTION_PREAMBLE(isolate_); + Maybe<bool> maybe = i::JSReceiver::HasProperty(obj, name); + has_pending_exception = !maybe.has_value; + EXCEPTION_BAILOUT_CHECK(isolate_, v8::Local<Value>()); + if (!maybe.value) return v8::Local<Value>(); + i::Handle<i::Object> value; + if (!i::Object::GetProperty(obj, name).ToHandle(&value)) { + return v8::Local<Value>(); + } return v8::Utils::ToLocal(scope.CloseAndEscape(value)); } else { return v8::Local<Value>(); @@ -1951,9 +1922,9 @@ v8::Local<v8::Message> v8::TryCatch::Message() const { - ASSERT(isolate_ == i::Isolate::Current()); + DCHECK(isolate_ == i::Isolate::Current()); i::Object* message = reinterpret_cast<i::Object*>(message_obj_); - ASSERT(message->IsJSMessageObject() || message->IsTheHole()); + DCHECK(message->IsJSMessageObject() || message->IsTheHole()); if (HasCaught() && !message->IsTheHole()) { return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_)); } else { @@ -1963,7 +1934,18 @@ void v8::TryCatch::Reset() { - ASSERT(isolate_ == i::Isolate::Current()); + DCHECK(isolate_ == i::Isolate::Current()); + if (!rethrow_ && HasCaught() && isolate_->has_scheduled_exception()) { + // If an exception was caught but is still scheduled because no API call + // promoted it, then it is canceled to prevent it from being propagated. + // Note that this will not cancel termination exceptions. + isolate_->CancelScheduledExceptionFromTryCatch(this); + } + ResetInternal(); +} + + +void v8::TryCatch::ResetInternal() { i::Object* the_hole = isolate_->heap()->the_hole_value(); exception_ = the_hole; message_obj_ = the_hole; @@ -1998,19 +1980,30 @@ } -v8::Handle<Value> Message::GetScriptResourceName() const { +ScriptOrigin Message::GetScriptOrigin() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ENTER_V8(isolate); - EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSMessageObject> message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this)); - // Return this.script.name. - i::Handle<i::JSValue> script = - i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(), - isolate)); - i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name(), - isolate); - return scope.Escape(Utils::ToLocal(resource_name)); + i::Handle<i::Object> script_wraper = + i::Handle<i::Object>(message->script(), isolate); + i::Handle<i::JSValue> script_value = + i::Handle<i::JSValue>::cast(script_wraper); + i::Handle<i::Script> script(i::Script::cast(script_value->value())); + i::Handle<i::Object> scriptName(i::Script::GetNameOrSourceURL(script)); + v8::Isolate* v8_isolate = + reinterpret_cast<v8::Isolate*>(script->GetIsolate()); + v8::ScriptOrigin origin( + Utils::ToLocal(scriptName), + v8::Integer::New(v8_isolate, script->line_offset()->value()), + v8::Integer::New(v8_isolate, script->column_offset()->value()), + Handle<Boolean>(), + v8::Integer::New(v8_isolate, script->id()->value())); + return origin; +} + + +v8::Handle<Value> Message::GetScriptResourceName() const { + return GetScriptOrigin().ResourceName(); } @@ -2028,33 +2021,28 @@ } -static i::Handle<i::Object> CallV8HeapFunction(const char* name, - i::Handle<i::Object> recv, - int argc, - i::Handle<i::Object> argv[], - bool* has_pending_exception) { - i::Isolate* isolate = i::Isolate::Current(); - i::Handle<i::String> fmt_str = - isolate->factory()->InternalizeUtf8String(name); - i::Object* object_fun = - isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str); - i::Handle<i::JSFunction> fun = - i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun)); - i::Handle<i::Object> value = i::Execution::Call( - isolate, fun, recv, argc, argv, has_pending_exception); - return value; +MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction( + const char* name, + i::Handle<i::Object> recv, + int argc, + i::Handle<i::Object> argv[]) { + i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::Object> object_fun = + i::Object::GetProperty( + isolate, isolate->js_builtins_object(), name).ToHandleChecked(); + i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(object_fun); + return i::Execution::Call(isolate, fun, recv, argc, argv); } -static i::Handle<i::Object> CallV8HeapFunction(const char* name, - i::Handle<i::Object> data, - bool* has_pending_exception) { +MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction( + const char* name, + i::Handle<i::Object> data) { i::Handle<i::Object> argv[] = { data }; return CallV8HeapFunction(name, i::Isolate::Current()->js_builtins_object(), ARRAY_SIZE(argv), - argv, - has_pending_exception); + argv); } @@ -2065,9 +2053,9 @@ i::HandleScope scope(isolate); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> result = CallV8HeapFunction("GetLineNumber", - Utils::OpenHandle(this), - &has_pending_exception); + i::Handle<i::Object> result; + has_pending_exception = !CallV8HeapFunction( + "GetLineNumber", Utils::OpenHandle(this)).ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, 0); return static_cast<int>(result->Number()); } @@ -2095,14 +2083,14 @@ int Message::GetStartColumn() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + ON_BAILOUT(isolate, "v8::Message::GetStartColumn()", return kNoColumnInfo); ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> start_col_obj = CallV8HeapFunction( - "GetPositionInLine", - data_obj, - &has_pending_exception); + i::Handle<i::Object> start_col_obj; + has_pending_exception = !CallV8HeapFunction( + "GetPositionInLine", data_obj).ToHandle(&start_col_obj); EXCEPTION_BAILOUT_CHECK(isolate, 0); return static_cast<int>(start_col_obj->Number()); } @@ -2110,14 +2098,14 @@ int Message::GetEndColumn() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + ON_BAILOUT(isolate, "v8::Message::GetEndColumn()", return kNoColumnInfo); ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> start_col_obj = CallV8HeapFunction( - "GetPositionInLine", - data_obj, - &has_pending_exception); + i::Handle<i::Object> start_col_obj; + has_pending_exception = !CallV8HeapFunction( + "GetPositionInLine", data_obj).ToHandle(&start_col_obj); EXCEPTION_BAILOUT_CHECK(isolate, 0); i::Handle<i::JSMessageObject> message = i::Handle<i::JSMessageObject>::cast(data_obj); @@ -2146,9 +2134,9 @@ ENTER_V8(isolate); EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate)); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine", - Utils::OpenHandle(this), - &has_pending_exception); + i::Handle<i::Object> result; + has_pending_exception = !CallV8HeapFunction( + "GetSourceLine", Utils::OpenHandle(this)).ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::String>()); if (result->IsString()) { return scope.Escape(Utils::ToLocal(i::Handle<i::String>::cast(result))); @@ -2173,7 +2161,7 @@ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSArray> self = Utils::OpenHandle(this); i::Handle<i::Object> obj = - i::Object::GetElementNoExceptionThrown(isolate, self, index); + i::Object::GetElement(isolate, self, index).ToHandleChecked(); i::Handle<i::JSObject> jsobj = i::Handle<i::JSObject>::cast(obj); return scope.Escape(Utils::StackFrameToLocal(jsobj)); } @@ -2199,6 +2187,9 @@ StackTraceOptions options) { i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); ENTER_V8(i_isolate); + // TODO(dcarney): remove when ScriptDebugServer is fixed. + options = static_cast<StackTraceOptions>( + static_cast<int>(options) | kExposeFramesAcrossSecurityOrigins); i::Handle<i::JSArray> stackTrace = i_isolate->CaptureCurrentStackTrace(frame_limit, options); return Utils::StackTraceToLocal(stackTrace); @@ -2207,121 +2198,95 @@ // --- S t a c k F r a m e --- -int StackFrame::GetLineNumber() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); +static int getIntProperty(const StackFrame* f, const char* propertyName, + int defaultValue) { + i::Isolate* isolate = Utils::OpenHandle(f)->GetIsolate(); ENTER_V8(isolate); i::HandleScope scope(isolate); - i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> line = GetProperty(self, "lineNumber"); - if (!line->IsSmi()) { - return Message::kNoLineNumberInfo; - } - return i::Smi::cast(*line)->value(); + i::Handle<i::JSObject> self = Utils::OpenHandle(f); + i::Handle<i::Object> obj = + i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked(); + return obj->IsSmi() ? i::Smi::cast(*obj)->value() : defaultValue; +} + + +int StackFrame::GetLineNumber() const { + return getIntProperty(this, "lineNumber", Message::kNoLineNumberInfo); } int StackFrame::GetColumn() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ENTER_V8(isolate); - i::HandleScope scope(isolate); - i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> column = GetProperty(self, "column"); - if (!column->IsSmi()) { - return Message::kNoColumnInfo; - } - return i::Smi::cast(*column)->value(); + return getIntProperty(this, "column", Message::kNoColumnInfo); } int StackFrame::GetScriptId() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ENTER_V8(isolate); - i::HandleScope scope(isolate); - i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> scriptId = GetProperty(self, "scriptId"); - if (!scriptId->IsSmi()) { - return Message::kNoScriptIdInfo; - } - return i::Smi::cast(*scriptId)->value(); + return getIntProperty(this, "scriptId", Message::kNoScriptIdInfo); } -Local<String> StackFrame::GetScriptName() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); +static Local<String> getStringProperty(const StackFrame* f, + const char* propertyName) { + i::Isolate* isolate = Utils::OpenHandle(f)->GetIsolate(); ENTER_V8(isolate); EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate)); - i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> name = GetProperty(self, "scriptName"); - if (!name->IsString()) { - return Local<String>(); - } - return scope.Escape(Local<String>::Cast(Utils::ToLocal(name))); + i::Handle<i::JSObject> self = Utils::OpenHandle(f); + i::Handle<i::Object> obj = + i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked(); + return obj->IsString() + ? scope.Escape(Local<String>::Cast(Utils::ToLocal(obj))) + : Local<String>(); +} + + +Local<String> StackFrame::GetScriptName() const { + return getStringProperty(this, "scriptName"); } Local<String> StackFrame::GetScriptNameOrSourceURL() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ENTER_V8(isolate); - EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate)); - i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> name = GetProperty(self, "scriptNameOrSourceURL"); - if (!name->IsString()) { - return Local<String>(); - } - return scope.Escape(Local<String>::Cast(Utils::ToLocal(name))); + return getStringProperty(this, "scriptNameOrSourceURL"); } Local<String> StackFrame::GetFunctionName() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ENTER_V8(isolate); - EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate)); - i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> name = GetProperty(self, "functionName"); - if (!name->IsString()) { - return Local<String>(); - } - return scope.Escape(Local<String>::Cast(Utils::ToLocal(name))); + return getStringProperty(this, "functionName"); } -bool StackFrame::IsEval() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); +static bool getBoolProperty(const StackFrame* f, const char* propertyName) { + i::Isolate* isolate = Utils::OpenHandle(f)->GetIsolate(); ENTER_V8(isolate); i::HandleScope scope(isolate); - i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> is_eval = GetProperty(self, "isEval"); - return is_eval->IsTrue(); + i::Handle<i::JSObject> self = Utils::OpenHandle(f); + i::Handle<i::Object> obj = + i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked(); + return obj->IsTrue(); } +bool StackFrame::IsEval() const { return getBoolProperty(this, "isEval"); } + bool StackFrame::IsConstructor() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ENTER_V8(isolate); - i::HandleScope scope(isolate); - i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> is_constructor = GetProperty(self, "isConstructor"); - return is_constructor->IsTrue(); + return getBoolProperty(this, "isConstructor"); } // --- J S O N --- Local<Value> JSON::Parse(Local<String> json_string) { - i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::String> string = Utils::OpenHandle(*json_string); + i::Isolate* isolate = string->GetIsolate(); EnsureInitializedForIsolate(isolate, "v8::JSON::Parse"); ENTER_V8(isolate); i::HandleScope scope(isolate); - i::Handle<i::String> source = i::Handle<i::String>( - FlattenGetString(Utils::OpenHandle(*json_string))); + i::Handle<i::String> source = i::String::Flatten(string); EXCEPTION_PREAMBLE(isolate); + i::MaybeHandle<i::Object> maybe_result = + source->IsSeqOneByteString() ? i::JsonParser<true>::Parse(source) + : i::JsonParser<false>::Parse(source); i::Handle<i::Object> result; - if (source->IsSeqOneByteString()) { - result = i::JsonParser<true>::Parse(source); - } else { - result = i::JsonParser<false>::Parse(source); - } - has_pending_exception = result.is_null(); + has_pending_exception = !maybe_result.ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>()); return Utils::ToLocal( i::Handle<i::Object>::cast(scope.CloseAndEscape(result))); @@ -2332,14 +2297,14 @@ bool Value::FullIsUndefined() const { bool result = Utils::OpenHandle(this)->IsUndefined(); - ASSERT_EQ(result, QuickIsUndefined()); + DCHECK_EQ(result, QuickIsUndefined()); return result; } bool Value::FullIsNull() const { bool result = Utils::OpenHandle(this)->IsNull(); - ASSERT_EQ(result, QuickIsNull()); + DCHECK_EQ(result, QuickIsNull()); return result; } @@ -2361,7 +2326,7 @@ bool Value::FullIsString() const { bool result = Utils::OpenHandle(this)->IsString(); - ASSERT_EQ(result, QuickIsString()); + DCHECK_EQ(result, QuickIsString()); return result; } @@ -2453,60 +2418,55 @@ bool Value::IsDate() const { - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(this); + if (!obj->IsHeapObject()) return false; + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); return obj->HasSpecificClassOf(isolate->heap()->Date_string()); } bool Value::IsStringObject() const { - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(this); + if (!obj->IsHeapObject()) return false; + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); return obj->HasSpecificClassOf(isolate->heap()->String_string()); } bool Value::IsSymbolObject() const { - // TODO(svenpanne): these and other test functions should be written such - // that they do not use Isolate::Current(). - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(this); + if (!obj->IsHeapObject()) return false; + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); return obj->HasSpecificClassOf(isolate->heap()->Symbol_string()); } bool Value::IsNumberObject() const { - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(this); + if (!obj->IsHeapObject()) return false; + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); return obj->HasSpecificClassOf(isolate->heap()->Number_string()); } -static i::Object* LookupBuiltin(i::Isolate* isolate, - const char* builtin_name) { - i::Handle<i::String> string = - isolate->factory()->InternalizeUtf8String(builtin_name); - i::Handle<i::JSBuiltinsObject> builtins = isolate->js_builtins_object(); - return builtins->GetPropertyNoExceptionThrown(*string); -} - - static bool CheckConstructor(i::Isolate* isolate, i::Handle<i::JSObject> obj, const char* class_name) { - i::Object* constr = obj->map()->constructor(); + i::Handle<i::Object> constr(obj->map()->constructor(), isolate); if (!constr->IsJSFunction()) return false; - i::JSFunction* func = i::JSFunction::cast(constr); - return func->shared()->native() && - constr == LookupBuiltin(isolate, class_name); + i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(constr); + return func->shared()->native() && constr.is_identical_to( + i::Object::GetProperty(isolate, + isolate->js_builtins_object(), + class_name).ToHandleChecked()); } bool Value::IsNativeError() const { - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(this); if (obj->IsJSObject()) { i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj)); + i::Isolate* isolate = js_obj->GetIsolate(); return CheckConstructor(isolate, js_obj, "$Error") || CheckConstructor(isolate, js_obj, "$EvalError") || CheckConstructor(isolate, js_obj, "$RangeError") || @@ -2521,8 +2481,9 @@ bool Value::IsBooleanObject() const { - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(this); + if (!obj->IsHeapObject()) return false; + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); return obj->HasSpecificClassOf(isolate->heap()->Boolean_string()); } @@ -2543,7 +2504,8 @@ LOG_API(isolate, "ToString"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - str = i::Execution::ToString(isolate, obj, &has_pending_exception); + has_pending_exception = !i::Execution::ToString( + isolate, obj).ToHandle(&str); EXCEPTION_BAILOUT_CHECK(isolate, Local<String>()); } return ToApiHandle<String>(str); @@ -2560,7 +2522,8 @@ LOG_API(isolate, "ToDetailString"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - str = i::Execution::ToDetailString(isolate, obj, &has_pending_exception); + has_pending_exception = !i::Execution::ToDetailString( + isolate, obj).ToHandle(&str); EXCEPTION_BAILOUT_CHECK(isolate, Local<String>()); } return ToApiHandle<String>(str); @@ -2577,7 +2540,8 @@ LOG_API(isolate, "ToObject"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - val = i::Execution::ToObject(isolate, obj, &has_pending_exception); + has_pending_exception = !i::Execution::ToObject( + isolate, obj).ToHandle(&val); EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>()); } return ToApiHandle<Object>(val); @@ -2605,11 +2569,12 @@ if (obj->IsNumber()) { num = obj; } else { - i::Isolate* isolate = i::Isolate::Current(); + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); LOG_API(isolate, "ToNumber"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - num = i::Execution::ToNumber(isolate, obj, &has_pending_exception); + has_pending_exception = !i::Execution::ToNumber( + isolate, obj).ToHandle(&num); EXCEPTION_BAILOUT_CHECK(isolate, Local<Number>()); } return ToApiHandle<Number>(num); @@ -2622,11 +2587,12 @@ if (obj->IsSmi()) { num = obj; } else { - i::Isolate* isolate = i::Isolate::Current(); + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); LOG_API(isolate, "ToInteger"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - num = i::Execution::ToInteger(isolate, obj, &has_pending_exception); + has_pending_exception = !i::Execution::ToInteger( + isolate, obj).ToHandle(&num); EXCEPTION_BAILOUT_CHECK(isolate, Local<Integer>()); } return ToApiHandle<Integer>(num); @@ -2769,45 +2735,55 @@ void v8::Date::CheckCast(v8::Value* that) { - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(that); - Utils::ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()), + i::Isolate* isolate = NULL; + if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate(); + Utils::ApiCheck(isolate != NULL && + obj->HasSpecificClassOf(isolate->heap()->Date_string()), "v8::Date::Cast()", "Could not convert to date"); } void v8::StringObject::CheckCast(v8::Value* that) { - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(that); - Utils::ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()), + i::Isolate* isolate = NULL; + if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate(); + Utils::ApiCheck(isolate != NULL && + obj->HasSpecificClassOf(isolate->heap()->String_string()), "v8::StringObject::Cast()", "Could not convert to StringObject"); } void v8::SymbolObject::CheckCast(v8::Value* that) { - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(that); - Utils::ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Symbol_string()), + i::Isolate* isolate = NULL; + if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate(); + Utils::ApiCheck(isolate != NULL && + obj->HasSpecificClassOf(isolate->heap()->Symbol_string()), "v8::SymbolObject::Cast()", "Could not convert to SymbolObject"); } void v8::NumberObject::CheckCast(v8::Value* that) { - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(that); - Utils::ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()), + i::Isolate* isolate = NULL; + if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate(); + Utils::ApiCheck(isolate != NULL && + obj->HasSpecificClassOf(isolate->heap()->Number_string()), "v8::NumberObject::Cast()", "Could not convert to NumberObject"); } void v8::BooleanObject::CheckCast(v8::Value* that) { - i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::Object> obj = Utils::OpenHandle(that); - Utils::ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()), + i::Isolate* isolate = NULL; + if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate(); + Utils::ApiCheck(isolate != NULL && + obj->HasSpecificClassOf(isolate->heap()->Boolean_string()), "v8::BooleanObject::Cast()", "Could not convert to BooleanObject"); } @@ -2832,12 +2808,13 @@ if (obj->IsNumber()) { num = obj; } else { - i::Isolate* isolate = i::Isolate::Current(); + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); LOG_API(isolate, "NumberValue"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - num = i::Execution::ToNumber(isolate, obj, &has_pending_exception); - EXCEPTION_BAILOUT_CHECK(isolate, i::OS::nan_value()); + has_pending_exception = !i::Execution::ToNumber( + isolate, obj).ToHandle(&num); + EXCEPTION_BAILOUT_CHECK(isolate, base::OS::nan_value()); } return num->Number(); } @@ -2849,11 +2826,12 @@ if (obj->IsNumber()) { num = obj; } else { - i::Isolate* isolate = i::Isolate::Current(); + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); LOG_API(isolate, "IntegerValue"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - num = i::Execution::ToInteger(isolate, obj, &has_pending_exception); + has_pending_exception = !i::Execution::ToInteger( + isolate, obj).ToHandle(&num); EXCEPTION_BAILOUT_CHECK(isolate, 0); } if (num->IsSmi()) { @@ -2870,11 +2848,11 @@ if (obj->IsSmi()) { num = obj; } else { - i::Isolate* isolate = i::Isolate::Current(); + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); LOG_API(isolate, "ToInt32"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - num = i::Execution::ToInt32(isolate, obj, &has_pending_exception); + has_pending_exception = !i::Execution::ToInt32(isolate, obj).ToHandle(&num); EXCEPTION_BAILOUT_CHECK(isolate, Local<Int32>()); } return ToApiHandle<Int32>(num); @@ -2887,11 +2865,12 @@ if (obj->IsSmi()) { num = obj; } else { - i::Isolate* isolate = i::Isolate::Current(); + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); LOG_API(isolate, "ToUInt32"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - num = i::Execution::ToUint32(isolate, obj, &has_pending_exception); + has_pending_exception = !i::Execution::ToUint32( + isolate, obj).ToHandle(&num); EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>()); } return ToApiHandle<Uint32>(num); @@ -2904,12 +2883,13 @@ if (i::Smi::cast(*obj)->value() >= 0) return Utils::Uint32ToLocal(obj); return Local<Uint32>(); } - i::Isolate* isolate = i::Isolate::Current(); + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); LOG_API(isolate, "ToArrayIndex"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> string_obj = - i::Execution::ToString(isolate, obj, &has_pending_exception); + i::Handle<i::Object> string_obj; + has_pending_exception = !i::Execution::ToString( + isolate, obj).ToHandle(&string_obj); EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>()); i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj); uint32_t index; @@ -2931,12 +2911,12 @@ if (obj->IsSmi()) { return i::Smi::cast(*obj)->value(); } else { - i::Isolate* isolate = i::Isolate::Current(); + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); LOG_API(isolate, "Int32Value (slow)"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> num = - i::Execution::ToInt32(isolate, obj, &has_pending_exception); + i::Handle<i::Object> num; + has_pending_exception = !i::Execution::ToInt32(isolate, obj).ToHandle(&num); EXCEPTION_BAILOUT_CHECK(isolate, 0); if (num->IsSmi()) { return i::Smi::cast(*num)->value(); @@ -2949,14 +2929,14 @@ bool Value::Equals(Handle<Value> that) const { i::Isolate* isolate = i::Isolate::Current(); - if (!Utils::ApiCheck(this != NULL && !that.IsEmpty(), + i::Handle<i::Object> obj = Utils::OpenHandle(this, true); + if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(), "v8::Value::Equals()", "Reading from empty handle")) { return false; } LOG_API(isolate, "Equals"); ENTER_V8(isolate); - i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::Object> other = Utils::OpenHandle(*that); // If both obj and other are JSObjects, we'd better compare by identity // immediately when going into JS builtin. The reason is Invoke @@ -2966,9 +2946,9 @@ } i::Handle<i::Object> args[] = { other }; EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> result = - CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args, - &has_pending_exception); + i::Handle<i::Object> result; + has_pending_exception = !CallV8HeapFunction( + "EQUALS", obj, ARRAY_SIZE(args), args).ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, false); return *result == i::Smi::FromInt(i::EQUAL); } @@ -2976,13 +2956,13 @@ bool Value::StrictEquals(Handle<Value> that) const { i::Isolate* isolate = i::Isolate::Current(); - if (!Utils::ApiCheck(this != NULL && !that.IsEmpty(), + i::Handle<i::Object> obj = Utils::OpenHandle(this, true); + if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(), "v8::Value::StrictEquals()", "Reading from empty handle")) { return false; } LOG_API(isolate, "StrictEquals"); - i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::Object> other = Utils::OpenHandle(*that); // Must check HeapNumber first, since NaN !== NaN. if (obj->IsHeapNumber()) { @@ -2997,7 +2977,8 @@ return other->IsNumber() && obj->Number() == other->Number(); } else if (obj->IsString()) { return other->IsString() && - i::String::cast(*obj)->Equals(i::String::cast(*other)); + i::String::Equals(i::Handle<i::String>::cast(obj), + i::Handle<i::String>::cast(other)); } else if (obj->IsUndefined() || obj->IsUndetectableObject()) { return other->IsUndefined() || other->IsUndetectableObject(); } else { @@ -3007,14 +2988,12 @@ bool Value::SameValue(Handle<Value> that) const { - i::Isolate* isolate = i::Isolate::Current(); - if (!Utils::ApiCheck(this != NULL && !that.IsEmpty(), + i::Handle<i::Object> obj = Utils::OpenHandle(this, true); + if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(), "v8::Value::SameValue()", "Reading from empty handle")) { return false; } - LOG_API(isolate, "SameValue"); - i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::Object> other = Utils::OpenHandle(*that); return obj->SameValue(*other); } @@ -3025,12 +3004,13 @@ if (obj->IsSmi()) { return i::Smi::cast(*obj)->value(); } else { - i::Isolate* isolate = i::Isolate::Current(); + i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); LOG_API(isolate, "Uint32Value"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> num = - i::Execution::ToUint32(isolate, obj, &has_pending_exception); + i::Handle<i::Object> num; + has_pending_exception = !i::Execution::ToUint32( + isolate, obj).ToHandle(&num); EXCEPTION_BAILOUT_CHECK(isolate, 0); if (num->IsSmi()) { return i::Smi::cast(*num)->value(); @@ -3041,8 +3021,7 @@ } -bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value, - v8::PropertyAttribute attribs) { +bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::Set()", return false); ENTER_V8(isolate); @@ -3051,14 +3030,9 @@ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key); i::Handle<i::Object> value_obj = Utils::OpenHandle(*value); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> obj = i::Runtime::SetObjectProperty( - isolate, - self, - key_obj, - value_obj, - static_cast<PropertyAttributes>(attribs), - i::SLOPPY); - has_pending_exception = obj.is_null(); + has_pending_exception = + i::Runtime::SetObjectProperty(isolate, self, key_obj, value_obj, + i::SLOPPY).is_null(); EXCEPTION_BAILOUT_CHECK(isolate, false); return true; } @@ -3072,13 +3046,8 @@ i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::Object> value_obj = Utils::OpenHandle(*value); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> obj = i::JSObject::SetElement( - self, - index, - value_obj, - NONE, - i::SLOPPY); - has_pending_exception = obj.is_null(); + has_pending_exception = i::JSObject::SetElement( + self, index, value_obj, NONE, i::SLOPPY).is_null(); EXCEPTION_BAILOUT_CHECK(isolate, false); return true; } @@ -3095,20 +3064,19 @@ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key); i::Handle<i::Object> value_obj = Utils::OpenHandle(*value); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> obj = i::ForceSetProperty( + has_pending_exception = i::Runtime::DefineObjectProperty( self, key_obj, value_obj, - static_cast<PropertyAttributes>(attribs)); - has_pending_exception = obj.is_null(); + static_cast<PropertyAttributes>(attribs)).is_null(); EXCEPTION_BAILOUT_CHECK(isolate, false); return true; } bool v8::Object::SetPrivate(v8::Handle<Private> key, v8::Handle<Value> value) { - return Set(v8::Handle<Value>(reinterpret_cast<Value*>(*key)), - value, DontEnum); + return ForceSet(v8::Handle<Value>(reinterpret_cast<Value*>(*key)), + value, DontEnum); } @@ -3129,8 +3097,9 @@ } EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj); - has_pending_exception = obj.is_null(); + i::Handle<i::Object> obj; + has_pending_exception = !i::Runtime::DeleteObjectProperty( + isolate, self, key_obj, i::JSReceiver::FORCE_DELETION).ToHandle(&obj); EXCEPTION_BAILOUT_CHECK(isolate, false); return obj->IsTrue(); } @@ -3143,8 +3112,9 @@ i::Handle<i::Object> self = Utils::OpenHandle(this); i::Handle<i::Object> key_obj = Utils::OpenHandle(*key); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> result = i::GetProperty(isolate, self, key_obj); - has_pending_exception = result.is_null(); + i::Handle<i::Object> result; + has_pending_exception = + !i::Runtime::GetObjectProperty(isolate, self, key_obj).ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>()); return Utils::ToLocal(result); } @@ -3156,8 +3126,9 @@ ENTER_V8(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> result = i::Object::GetElement(isolate, self, index); - has_pending_exception = result.is_null(); + i::Handle<i::Object> result; + has_pending_exception = + !i::Object::GetElement(isolate, self, index).ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>()); return Utils::ToLocal(result); } @@ -3170,7 +3141,7 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ON_BAILOUT(isolate, "v8::Object::GetPropertyAttribute()", + ON_BAILOUT(isolate, "v8::Object::GetPropertyAttributes()", return static_cast<PropertyAttribute>(NONE)); ENTER_V8(isolate); i::HandleScope scope(isolate); @@ -3178,25 +3149,48 @@ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key); if (!key_obj->IsName()) { EXCEPTION_PREAMBLE(isolate); - key_obj = i::Execution::ToString(isolate, key_obj, &has_pending_exception); + has_pending_exception = !i::Execution::ToString( + isolate, key_obj).ToHandle(&key_obj); EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE)); } i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj); - PropertyAttributes result = - i::JSReceiver::GetPropertyAttribute(self, key_name); - if (result == ABSENT) return static_cast<PropertyAttribute>(NONE); - return static_cast<PropertyAttribute>(result); + EXCEPTION_PREAMBLE(isolate); + Maybe<PropertyAttributes> result = + i::JSReceiver::GetPropertyAttributes(self, key_name); + has_pending_exception = !result.has_value; + EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE)); + if (result.value == ABSENT) return static_cast<PropertyAttribute>(NONE); + return static_cast<PropertyAttribute>(result.value); +} + + +Local<Value> v8::Object::GetOwnPropertyDescriptor(Local<String> key) { + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + ON_BAILOUT(isolate, "v8::Object::GetOwnPropertyDescriptor()", + return Local<Value>()); + ENTER_V8(isolate); + i::Handle<i::JSObject> obj = Utils::OpenHandle(this); + i::Handle<i::Name> key_name = Utils::OpenHandle(*key); + i::Handle<i::Object> args[] = { obj, key_name }; + EXCEPTION_PREAMBLE(isolate); + i::Handle<i::Object> result; + has_pending_exception = !CallV8HeapFunction( + "ObjectGetOwnPropertyDescriptor", + isolate->factory()->undefined_value(), + ARRAY_SIZE(args), + args).ToHandle(&result); + EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>()); + return Utils::ToLocal(result); } Local<Value> v8::Object::GetPrototype() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ON_BAILOUT(isolate, "v8::Object::GetPrototype()", - return Local<v8::Value>()); + ON_BAILOUT(isolate, "v8::Object::GetPrototype()", return Local<v8::Value>()); ENTER_V8(isolate); i::Handle<i::Object> self = Utils::OpenHandle(this); - i::Handle<i::Object> result(self->GetPrototype(isolate), isolate); - return Utils::ToLocal(result); + i::PrototypeIterator iter(isolate, self); + return Utils::ToLocal(i::PrototypeIterator::GetCurrent(iter)); } @@ -3210,7 +3204,8 @@ // to propagate outside. TryCatch try_catch; EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> result = i::JSObject::SetPrototype(self, value_obj); + i::MaybeHandle<i::Object> result = + i::JSObject::SetPrototype(self, value_obj, false); has_pending_exception = result.is_null(); EXCEPTION_BAILOUT_CHECK(isolate, false); return true; @@ -3224,14 +3219,17 @@ "v8::Object::FindInstanceInPrototypeChain()", return Local<v8::Object>()); ENTER_V8(isolate); - i::JSObject* object = *Utils::OpenHandle(this); + i::PrototypeIterator iter(isolate, *Utils::OpenHandle(this), + i::PrototypeIterator::START_AT_RECEIVER); i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl); - while (!tmpl_info->IsTemplateFor(object)) { - i::Object* prototype = object->GetPrototype(); - if (!prototype->IsJSObject()) return Local<Object>(); - object = i::JSObject::cast(prototype); + while (!tmpl_info->IsTemplateFor(iter.GetCurrent())) { + iter.Advance(); + if (iter.IsAtEnd()) { + return Local<Object>(); + } } - return Utils::ToLocal(i::Handle<i::JSObject>(object)); + return Utils::ToLocal( + i::handle(i::JSObject::cast(iter.GetCurrent()), isolate)); } @@ -3242,10 +3240,11 @@ ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - bool threw = false; - i::Handle<i::FixedArray> value = - i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS, &threw); - if (threw) return Local<v8::Array>(); + EXCEPTION_PREAMBLE(isolate); + i::Handle<i::FixedArray> value; + has_pending_exception = !i::JSReceiver::GetKeys( + self, i::JSReceiver::INCLUDE_PROTOS).ToHandle(&value); + EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Array>()); // Because we use caching to speed up enumeration it is important // to never change the result of the basic enumeration function so // we clone the result. @@ -3263,10 +3262,11 @@ ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - bool threw = false; - i::Handle<i::FixedArray> value = - i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY, &threw); - if (threw) return Local<v8::Array>(); + EXCEPTION_PREAMBLE(isolate); + i::Handle<i::FixedArray> value; + has_pending_exception = !i::JSReceiver::GetKeys( + self, i::JSReceiver::OWN_ONLY).ToHandle(&value); + EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Array>()); // Because we use caching to speed up enumeration it is important // to never change the result of the basic enumeration function so // we clone the result. @@ -3312,7 +3312,7 @@ // Write prefix. char* ptr = buf.start(); - i::OS::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize); + i::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize); ptr += prefix_len; // Write real content. @@ -3320,7 +3320,7 @@ ptr += str_len; // Write postfix. - i::OS::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize); + i::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize); // Copy the buffer into a heap-allocated string and return it. Local<String> result = v8::String::NewFromUtf8( @@ -3331,17 +3331,6 @@ } -Local<Value> v8::Object::GetConstructor() { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ON_BAILOUT(isolate, "v8::Object::GetConstructor()", - return Local<v8::Function>()); - ENTER_V8(isolate); - i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> constructor(self->GetConstructor(), isolate); - return Utils::ToLocal(constructor); -} - - Local<String> v8::Object::GetConstructorName() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::GetConstructorName()", @@ -3361,8 +3350,9 @@ i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::Object> key_obj = Utils::OpenHandle(*key); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> obj = i::DeleteProperty(self, key_obj); - has_pending_exception = obj.is_null(); + i::Handle<i::Object> obj; + has_pending_exception = !i::Runtime::DeleteObjectProperty( + isolate, self, key_obj, i::JSReceiver::NORMAL_DELETION).ToHandle(&obj); EXCEPTION_BAILOUT_CHECK(isolate, false); return obj->IsTrue(); } @@ -3380,14 +3370,17 @@ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this); i::Handle<i::Object> key_obj = Utils::OpenHandle(*key); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> obj = i::HasProperty(self, key_obj); - has_pending_exception = obj.is_null(); + i::Handle<i::Object> obj; + has_pending_exception = !i::Runtime::HasObjectProperty( + isolate, self, key_obj).ToHandle(&obj); EXCEPTION_BAILOUT_CHECK(isolate, false); return obj->IsTrue(); } bool v8::Object::HasPrivate(v8::Handle<Private> key) { + // TODO(rossberg): this should use HasOwnProperty, but we'd need to + // generalise that to a (noy yet existant) Name argument first. return Has(v8::Handle<Value>(reinterpret_cast<Value*>(*key))); } @@ -3399,7 +3392,13 @@ ENTER_V8(isolate); HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - return i::JSReceiver::DeleteElement(self, index)->IsTrue(); + + EXCEPTION_PREAMBLE(isolate); + i::Handle<i::Object> obj; + has_pending_exception = + !i::JSReceiver::DeleteElement(self, index).ToHandle(&obj); + EXCEPTION_BAILOUT_CHECK(isolate, false); + return obj->IsTrue(); } @@ -3407,7 +3406,11 @@ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - return i::JSReceiver::HasElement(self, index); + EXCEPTION_PREAMBLE(isolate); + Maybe<bool> maybe = i::JSReceiver::HasElement(self, index); + has_pending_exception = !maybe.has_value; + EXCEPTION_BAILOUT_CHECK(isolate, false); + return maybe.value; } @@ -3428,10 +3431,13 @@ name, getter, setter, data, settings, attributes, signature); if (info.is_null()) return false; bool fast = Utils::OpenHandle(obj)->HasFastProperties(); - i::Handle<i::Object> result = - i::JSObject::SetAccessor(Utils::OpenHandle(obj), info); - if (result.is_null() || result->IsUndefined()) return false; - if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(obj), 0); + i::Handle<i::Object> result; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, result, + i::JSObject::SetAccessor(Utils::OpenHandle(obj), info), + false); + if (result->IsUndefined()) return false; + if (fast) i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0); return true; } @@ -3462,6 +3468,8 @@ Handle<Function> setter, PropertyAttribute attribute, AccessControl settings) { + // TODO(verwaest): Remove |settings|. + DCHECK_EQ(v8::DEFAULT, settings); i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::SetAccessorProperty()", return); ENTER_V8(isolate); @@ -3473,8 +3481,7 @@ v8::Utils::OpenHandle(*name), getter_i, setter_i, - static_cast<PropertyAttributes>(attribute), - settings); + static_cast<PropertyAttributes>(attribute)); } @@ -3482,8 +3489,12 @@ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()", return false); - return i::JSReceiver::HasLocalProperty( - Utils::OpenHandle(this), Utils::OpenHandle(*key)); + EXCEPTION_PREAMBLE(isolate); + Maybe<bool> maybe = i::JSReceiver::HasOwnProperty(Utils::OpenHandle(this), + Utils::OpenHandle(*key)); + has_pending_exception = !maybe.has_value; + EXCEPTION_BAILOUT_CHECK(isolate, false); + return maybe.value; } @@ -3491,8 +3502,12 @@ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()", return false); - return i::JSObject::HasRealNamedProperty(Utils::OpenHandle(this), - Utils::OpenHandle(*key)); + EXCEPTION_PREAMBLE(isolate); + Maybe<bool> maybe = i::JSObject::HasRealNamedProperty( + Utils::OpenHandle(this), Utils::OpenHandle(*key)); + has_pending_exception = !maybe.has_value; + EXCEPTION_BAILOUT_CHECK(isolate, false); + return maybe.value; } @@ -3500,7 +3515,12 @@ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::HasRealIndexedProperty()", return false); - return i::JSObject::HasRealElementProperty(Utils::OpenHandle(this), index); + EXCEPTION_PREAMBLE(isolate); + Maybe<bool> maybe = + i::JSObject::HasRealElementProperty(Utils::OpenHandle(this), index); + has_pending_exception = !maybe.has_value; + EXCEPTION_BAILOUT_CHECK(isolate, false); + return maybe.value; } @@ -3510,8 +3530,12 @@ "v8::Object::HasRealNamedCallbackProperty()", return false); ENTER_V8(isolate); - return i::JSObject::HasRealNamedCallbackProperty(Utils::OpenHandle(this), - Utils::OpenHandle(*key)); + EXCEPTION_PREAMBLE(isolate); + Maybe<bool> maybe = i::JSObject::HasRealNamedCallbackProperty( + Utils::OpenHandle(this), Utils::OpenHandle(*key)); + has_pending_exception = !maybe.has_value; + EXCEPTION_BAILOUT_CHECK(isolate, false); + return maybe.value; } @@ -3543,11 +3567,11 @@ // If the property being looked up is a callback, it can throw // an exception. EXCEPTION_PREAMBLE(isolate); - PropertyAttributes ignored; - i::Handle<i::Object> result = - i::Object::GetProperty(receiver, receiver, lookup, name, - &ignored); - has_pending_exception = result.is_null(); + i::LookupIterator it( + receiver, name, i::Handle<i::JSReceiver>(lookup->holder(), isolate), + i::LookupIterator::SKIP_INTERCEPTOR); + i::Handle<i::Object> result; + has_pending_exception = !i::Object::GetProperty(&it).ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>()); return Utils::ToLocal(result); @@ -3564,7 +3588,7 @@ i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this); i::Handle<i::String> key_obj = Utils::OpenHandle(*key); i::LookupResult lookup(isolate); - self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup); + self_obj->LookupRealNamedPropertyInPrototypes(key_obj, &lookup); return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup); } @@ -3577,7 +3601,7 @@ i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this); i::Handle<i::String> key_obj = Utils::OpenHandle(*key); i::LookupResult lookup(isolate); - self_obj->LookupRealNamedProperty(*key_obj, &lookup); + self_obj->LookupRealNamedProperty(key_obj, &lookup); return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup); } @@ -3596,10 +3620,9 @@ // as optimized code does not always handle access checks. i::Deoptimizer::DeoptimizeGlobalObject(*obj); - i::Handle<i::Map> new_map = - isolate->factory()->CopyMap(i::Handle<i::Map>(obj->map())); + i::Handle<i::Map> new_map = i::Map::Copy(i::Handle<i::Map>(obj->map())); new_map->set_is_access_check_needed(true); - obj->set_map(*new_map); + i::JSObject::MigrateToMap(obj, new_map); } @@ -3614,35 +3637,20 @@ ENTER_V8(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::JSObject> result = i::JSObject::Copy(self); + i::Handle<i::JSObject> result = isolate->factory()->CopyJSObject(self); has_pending_exception = result.is_null(); EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>()); return Utils::ToLocal(result); } -static i::Context* GetCreationContext(i::JSObject* object) { - i::Object* constructor = object->map()->constructor(); - i::JSFunction* function; - if (!constructor->IsJSFunction()) { - // Functions have null as a constructor, - // but any JSFunction knows its context immediately. - ASSERT(object->IsJSFunction()); - function = i::JSFunction::cast(object); - } else { - function = i::JSFunction::cast(constructor); - } - return function->context()->native_context(); -} - - Local<v8::Context> v8::Object::CreationContext() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::CreationContext()", return Local<v8::Context>()); ENTER_V8(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Context* context = GetCreationContext(*self); + i::Context* context = self->GetCreationContext(); return Utils::ToLocal(i::Handle<i::Context>(context)); } @@ -3653,8 +3661,7 @@ ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - return i::Handle<i::Smi>::cast( - i::JSReceiver::GetOrCreateIdentityHash(self))->value(); + return i::JSReceiver::GetOrCreateIdentityHash(self)->value(); } @@ -3685,7 +3692,7 @@ i::Handle<i::String> key_obj = Utils::OpenHandle(*key); i::Handle<i::String> key_string = isolate->factory()->InternalizeString(key_obj); - i::Handle<i::Object> result(self->GetHiddenProperty(*key_string), isolate); + i::Handle<i::Object> result(self->GetHiddenProperty(key_string), isolate); if (result->IsTheHole()) return v8::Local<v8::Value>(); return Utils::ToLocal(result); } @@ -3735,8 +3742,7 @@ object, GetElementsKindFromExternalArrayType(array_type)); - object->set_map(*external_array_map); - object->set_elements(*array); + i::JSObject::SetMapAndElements(object, external_array_map, array); } } // namespace @@ -3889,8 +3895,7 @@ return Local<v8::Value>()); LOG_API(isolate, "Object::CallAsFunction"); ENTER_V8(isolate); - i::Logger::TimerEventScope timer_scope( - isolate, i::Logger::TimerEventScope::v8_execute); + i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> obj = Utils::OpenHandle(this); i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv); @@ -3901,15 +3906,17 @@ fun = i::Handle<i::JSFunction>::cast(obj); } else { EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> delegate = i::Execution::TryGetFunctionDelegate( - isolate, obj, &has_pending_exception); + i::Handle<i::Object> delegate; + has_pending_exception = !i::Execution::TryGetFunctionDelegate( + isolate, obj).ToHandle(&delegate); EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>()); fun = i::Handle<i::JSFunction>::cast(delegate); recv_obj = obj; } EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> returned = i::Execution::Call( - isolate, fun, recv_obj, argc, args, &has_pending_exception, true); + i::Handle<i::Object> returned; + has_pending_exception = !i::Execution::Call( + isolate, fun, recv_obj, argc, args, true).ToHandle(&returned); EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>()); return Utils::ToLocal(scope.CloseAndEscape(returned)); } @@ -3922,8 +3929,7 @@ return Local<v8::Object>()); LOG_API(isolate, "Object::CallAsConstructor"); ENTER_V8(isolate); - i::Logger::TimerEventScope timer_scope( - isolate, i::Logger::TimerEventScope::v8_execute); + i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> obj = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); @@ -3931,23 +3937,26 @@ if (obj->IsJSFunction()) { i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> returned = - i::Execution::New(fun, argc, args, &has_pending_exception); + i::Handle<i::Object> returned; + has_pending_exception = !i::Execution::New( + fun, argc, args).ToHandle(&returned); EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>()); return Utils::ToLocal(scope.CloseAndEscape( i::Handle<i::JSObject>::cast(returned))); } EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> delegate = i::Execution::TryGetConstructorDelegate( - isolate, obj, &has_pending_exception); + i::Handle<i::Object> delegate; + has_pending_exception = !i::Execution::TryGetConstructorDelegate( + isolate, obj).ToHandle(&delegate); EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>()); if (!delegate->IsUndefined()) { i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> returned = i::Execution::Call( - isolate, fun, obj, argc, args, &has_pending_exception); + i::Handle<i::Object> returned; + has_pending_exception = !i::Execution::Call( + isolate, fun, obj, argc, args).ToHandle(&returned); EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>()); - ASSERT(!delegate->IsUndefined()); + DCHECK(!delegate->IsUndefined()); return Utils::ToLocal(scope.CloseAndEscape(returned)); } return Local<v8::Object>(); @@ -3979,15 +3988,15 @@ return Local<v8::Object>()); LOG_API(isolate, "Function::NewInstance"); ENTER_V8(isolate); - i::Logger::TimerEventScope timer_scope( - isolate, i::Logger::TimerEventScope::v8_execute); + i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate); EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSFunction> function = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> returned = - i::Execution::New(function, argc, args, &has_pending_exception); + i::Handle<i::Object> returned; + has_pending_exception = !i::Execution::New( + function, argc, args).ToHandle(&returned); EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>()); return scope.Escape(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned))); } @@ -3999,23 +4008,18 @@ ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>()); LOG_API(isolate, "Function::Call"); ENTER_V8(isolate); - i::Logger::TimerEventScope timer_scope( - isolate, i::Logger::TimerEventScope::v8_execute); - i::Object* raw_result = NULL; - { - i::HandleScope scope(isolate); - i::Handle<i::JSFunction> fun = Utils::OpenHandle(this); - i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv); - STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); - i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); - EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> returned = i::Execution::Call( - isolate, fun, recv_obj, argc, args, &has_pending_exception, true); - EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>()); - raw_result = *returned; - } - i::Handle<i::Object> result(raw_result, isolate); - return Utils::ToLocal(result); + i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate); + i::HandleScope scope(isolate); + i::Handle<i::JSFunction> fun = Utils::OpenHandle(this); + i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv); + STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); + i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); + EXCEPTION_PREAMBLE(isolate); + i::Handle<i::Object> returned; + has_pending_exception = !i::Execution::Call( + isolate, fun, recv_obj, argc, args, true).ToHandle(&returned); + EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>()); + return Utils::ToLocal(scope.CloseAndEscape(returned)); } @@ -4053,7 +4057,7 @@ isolate->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("displayName")); i::LookupResult lookup(isolate); - func->LookupRealNamedProperty(*property_name, &lookup); + func->LookupRealNamedProperty(property_name, &lookup); if (lookup.IsFound()) { i::Object* value = lookup.GetLazyValue(); if (value && value->IsString()) { @@ -4069,7 +4073,7 @@ i::Handle<i::JSFunction> func = Utils::OpenHandle(this); if (func->shared()->script()->IsScript()) { i::Handle<i::Script> script(i::Script::cast(func->shared()->script())); - i::Handle<i::Object> scriptName = GetScriptNameOrSourceURL(script); + i::Handle<i::Object> scriptName = i::Script::GetNameOrSourceURL(script); v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(func->GetIsolate()); v8::ScriptOrigin origin( Utils::ToLocal(scriptName), @@ -4088,7 +4092,7 @@ i::Handle<i::JSFunction> func = Utils::OpenHandle(this); if (func->shared()->script()->IsScript()) { i::Handle<i::Script> script(i::Script::cast(func->shared()->script())); - return i::GetScriptLineNumber(script, func->shared()->start_position()); + return i::Script::GetLineNumber(script, func->shared()->start_position()); } return kLineOffsetNotFound; } @@ -4098,7 +4102,7 @@ i::Handle<i::JSFunction> func = Utils::OpenHandle(this); if (func->shared()->script()->IsScript()) { i::Handle<i::Script> script(i::Script::cast(func->shared()->script())); - return i::GetScriptColumnNumber(script, func->shared()->start_position()); + return i::Script::GetColumnNumber(script, func->shared()->start_position()); } return kLineOffsetNotFound; } @@ -4286,9 +4290,7 @@ class Visitor { public: - inline explicit Visitor() - : utf8_length_(0), - state_(kInitialState) {} + Visitor() : utf8_length_(0), state_(kInitialState) {} void VisitOneByteString(const uint8_t* chars, int length) { int utf8_length = 0; @@ -4341,7 +4343,7 @@ uint8_t leaf_state) { bool edge_surrogate = StartsWithSurrogate(leaf_state); if (!(*state & kLeftmostEdgeIsCalculated)) { - ASSERT(!(*state & kLeftmostEdgeIsSurrogate)); + DCHECK(!(*state & kLeftmostEdgeIsSurrogate)); *state |= kLeftmostEdgeIsCalculated | (edge_surrogate ? kLeftmostEdgeIsSurrogate : 0); } else if (EndsWithSurrogate(*state) && edge_surrogate) { @@ -4359,7 +4361,7 @@ uint8_t leaf_state) { bool edge_surrogate = EndsWithSurrogate(leaf_state); if (!(*state & kRightmostEdgeIsCalculated)) { - ASSERT(!(*state & kRightmostEdgeIsSurrogate)); + DCHECK(!(*state & kRightmostEdgeIsSurrogate)); *state |= (kRightmostEdgeIsCalculated | (edge_surrogate ? kRightmostEdgeIsSurrogate : 0)); } else if (edge_surrogate && StartsWithSurrogate(*state)) { @@ -4375,7 +4377,7 @@ static inline void MergeTerminal(int* length, uint8_t state, uint8_t* state_out) { - ASSERT((state & kLeftmostEdgeIsCalculated) && + DCHECK((state & kLeftmostEdgeIsCalculated) && (state & kRightmostEdgeIsCalculated)); if (EndsWithSurrogate(state) && StartsWithSurrogate(state)) { *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates; @@ -4487,7 +4489,7 @@ char* const buffer, bool replace_invalid_utf8) { using namespace unibrow; - ASSERT(remaining > 0); + DCHECK(remaining > 0); // We can't use a local buffer here because Encode needs to modify // previous characters in the stream. We know, however, that // exactly one character will be advanced. @@ -4496,7 +4498,7 @@ character, last_character, replace_invalid_utf8); - ASSERT(written == 1); + DCHECK(written == 1); return written; } // Use a scratch buffer to check the required characters. @@ -4528,7 +4530,7 @@ template<typename Char> void Visit(const Char* chars, const int length) { using namespace unibrow; - ASSERT(!early_termination_); + DCHECK(!early_termination_); if (length == 0) return; // Copy state to stack. char* buffer = buffer_; @@ -4557,7 +4559,7 @@ for (; i < fast_length; i++) { buffer += Utf8::EncodeOneByte(buffer, static_cast<uint8_t>(*chars++)); - ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_); + DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_); } } else { for (; i < fast_length; i++) { @@ -4567,7 +4569,7 @@ last_character, replace_invalid_utf8_); last_character = character; - ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_); + DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_); } } // Array is fully written. Exit. @@ -4579,10 +4581,10 @@ return; } } - ASSERT(!skip_capacity_check_); + DCHECK(!skip_capacity_check_); // Slow loop. Must check capacity on each iteration. int remaining_capacity = capacity_ - static_cast<int>(buffer - start_); - ASSERT(remaining_capacity >= 0); + DCHECK(remaining_capacity >= 0); for (; i < length && remaining_capacity > 0; i++) { uint16_t character = *chars++; // remaining_capacity is <= 3 bytes at this point, so we do not write out @@ -4678,7 +4680,7 @@ ENTER_V8(isolate); i::Handle<i::String> str = Utils::OpenHandle(this); if (options & HINT_MANY_WRITES_EXPECTED) { - FlattenString(str); // Flatten the string for efficiency. + str = i::String::Flatten(str); // Flatten the string for efficiency. } const int string_length = str->length(); bool write_null = !(options & NO_NULL_TERMINATION); @@ -4713,7 +4715,7 @@ } } // Recursive slow path can potentially be unreasonable slow. Flatten. - str = FlattenGetString(str); + str = i::String::Flatten(str); Utf8WriterVisitor writer(buffer, capacity, false, replace_invalid_utf8); i::String::VisitFlat(&writer, *str); return writer.CompleteWrite(write_null, nchars_ref); @@ -4729,13 +4731,13 @@ i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate(); LOG_API(isolate, "String::Write"); ENTER_V8(isolate); - ASSERT(start >= 0 && length >= -1); + DCHECK(start >= 0 && length >= -1); i::Handle<i::String> str = Utils::OpenHandle(string); isolate->string_tracker()->RecordWrite(str); if (options & String::HINT_MANY_WRITES_EXPECTED) { // Flatten the string for efficiency. This applies whether we are // using StringCharacterStream or Get(i) to access the characters. - FlattenString(str); + str = i::String::Flatten(str); } int end = start + length; if ((length == -1) || (length > str->length() - start) ) @@ -4914,7 +4916,7 @@ if (!InternalFieldOK(obj, index, location)) return; i::Handle<i::Object> val = Utils::OpenHandle(*value); obj->SetInternalField(index, *val); - ASSERT_EQ(value, GetInternalField(index)); + DCHECK_EQ(value, GetInternalField(index)); } @@ -4931,7 +4933,7 @@ const char* location = "v8::Object::SetAlignedPointerInInternalField()"; if (!InternalFieldOK(obj, index, location)) return; obj->SetInternalField(index, EncodeAlignedAsSmi(value, location)); - ASSERT_EQ(value, GetAlignedPointerFromInternalField(index)); + DCHECK_EQ(value, GetAlignedPointerFromInternalField(index)); } @@ -4947,20 +4949,12 @@ void v8::V8::InitializePlatform(Platform* platform) { -#ifdef V8_USE_DEFAULT_PLATFORM - FATAL("Can't override v8::Platform when using default implementation"); -#else i::V8::InitializePlatform(platform); -#endif } void v8::V8::ShutdownPlatform() { -#ifdef V8_USE_DEFAULT_PLATFORM - FATAL("Can't override v8::Platform when using default implementation"); -#else i::V8::ShutdownPlatform(); -#endif } @@ -4974,7 +4968,7 @@ void v8::V8::SetEntropySource(EntropySource entropy_source) { - i::RandomNumberGenerator::SetEntropySource(entropy_source); + base::RandomNumberGenerator::SetEntropySource(entropy_source); } @@ -4986,8 +4980,8 @@ bool v8::V8::SetFunctionEntryHook(Isolate* ext_isolate, FunctionEntryHook entry_hook) { - ASSERT(ext_isolate != NULL); - ASSERT(entry_hook != NULL); + DCHECK(ext_isolate != NULL); + DCHECK(entry_hook != NULL); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(ext_isolate); @@ -5026,12 +5020,6 @@ bool v8::V8::Dispose() { - i::Isolate* isolate = i::Isolate::Current(); - if (!Utils::ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(), - "v8::V8::Dispose()", - "Use v8::Isolate::Dispose() for non-default isolate.")) { - return false; - } i::V8::TearDown(); return true; } @@ -5079,7 +5067,7 @@ void v8::V8::VisitHandlesForPartialDependence( Isolate* exported_isolate, PersistentHandleVisitor* visitor) { i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate); - ASSERT(isolate == i::Isolate::Current()); + DCHECK(isolate == i::Isolate::Current()); i::DisallowHeapAllocation no_allocation; VisitorAdapter visitor_adapter(visitor); @@ -5088,30 +5076,6 @@ } -bool v8::V8::IdleNotification(int hint) { - // Returning true tells the caller that it need not - // continue to call IdleNotification. - i::Isolate* isolate = i::Isolate::Current(); - if (isolate == NULL || !isolate->IsInitialized()) return true; - if (!i::FLAG_use_idle_notification) return true; - return isolate->heap()->IdleNotification(hint); -} - - -void v8::V8::LowMemoryNotification() { - i::Isolate* isolate = i::Isolate::Current(); - if (isolate == NULL || !isolate->IsInitialized()) return; - isolate->heap()->CollectAllAvailableGarbage("low memory notification"); -} - - -int v8::V8::ContextDisposedNotification() { - i::Isolate* isolate = i::Isolate::Current(); - if (!isolate->IsInitialized()) return 0; - return isolate->heap()->NotifyContextDisposed(); -} - - bool v8::V8::InitializeICU(const char* icu_data_file) { return i::InitializeICU(icu_data_file); } @@ -5126,7 +5090,7 @@ i::Isolate* isolate, v8::ExtensionConfiguration* extensions, v8::Handle<ObjectTemplate> global_template, - v8::Handle<Value> global_object) { + v8::Handle<Value> maybe_global_proxy) { i::Handle<i::Context> env; // Enter V8 via an ENTER_V8 scope. @@ -5164,16 +5128,19 @@ } } + i::Handle<i::Object> proxy = Utils::OpenHandle(*maybe_global_proxy, true); + i::MaybeHandle<i::JSGlobalProxy> maybe_proxy; + if (!proxy.is_null()) { + maybe_proxy = i::Handle<i::JSGlobalProxy>::cast(proxy); + } // Create the environment. env = isolate->bootstrapper()->CreateEnvironment( - Utils::OpenHandle(*global_object, true), - proxy_template, - extensions); + maybe_proxy, proxy_template, extensions); // Restore the access check info on the global template. if (!global_template.IsEmpty()) { - ASSERT(!global_constructor.is_null()); - ASSERT(!proxy_constructor.is_null()); + DCHECK(!global_constructor.is_null()); + DCHECK(!proxy_constructor.is_null()); global_constructor->set_access_check_info( proxy_constructor->access_check_info()); global_constructor->set_needs_access_check( @@ -5288,9 +5255,9 @@ LOG_API(isolate, "ObjectTemplate::NewInstance"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> obj = - i::Execution::InstantiateObject(Utils::OpenHandle(this), - &has_pending_exception); + i::Handle<i::Object> obj; + has_pending_exception = !i::Execution::InstantiateObject( + Utils::OpenHandle(this)).ToHandle(&obj); EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>()); return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj)); } @@ -5303,9 +5270,9 @@ LOG_API(isolate, "FunctionTemplate::GetFunction"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> obj = - i::Execution::InstantiateFunction(Utils::OpenHandle(this), - &has_pending_exception); + i::Handle<i::Object> obj; + has_pending_exception = !i::Execution::InstantiateFunction( + Utils::OpenHandle(this)).ToHandle(&obj); EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Function>()); return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj)); } @@ -5356,19 +5323,21 @@ } -inline i::Handle<i::String> NewString(i::Factory* factory, - String::NewStringType type, - i::Vector<const char> string) { - if (type ==String::kInternalizedString) { +MUST_USE_RESULT +inline i::MaybeHandle<i::String> NewString(i::Factory* factory, + String::NewStringType type, + i::Vector<const char> string) { + if (type == String::kInternalizedString) { return factory->InternalizeUtf8String(string); } return factory->NewStringFromUtf8(string); } -inline i::Handle<i::String> NewString(i::Factory* factory, - String::NewStringType type, - i::Vector<const uint8_t> string) { +MUST_USE_RESULT +inline i::MaybeHandle<i::String> NewString(i::Factory* factory, + String::NewStringType type, + i::Vector<const uint8_t> string) { if (type == String::kInternalizedString) { return factory->InternalizeOneByteString(string); } @@ -5376,9 +5345,10 @@ } -inline i::Handle<i::String> NewString(i::Factory* factory, - String::NewStringType type, - i::Vector<const uint16_t> string) { +MUST_USE_RESULT +inline i::MaybeHandle<i::String> NewString(i::Factory* factory, + String::NewStringType type, + i::Vector<const uint16_t> string) { if (type == String::kInternalizedString) { return factory->InternalizeTwoByteString(string); } @@ -5401,10 +5371,11 @@ } ENTER_V8(isolate); if (length == -1) length = StringLength(data); - i::Handle<i::String> result = NewString( - isolate->factory(), type, i::Vector<const Char>(data, length)); // We do not expect this to fail. Change this if it does. - CHECK(!result.is_null()); + i::Handle<i::String> result = NewString( + isolate->factory(), + type, + i::Vector<const Char>(data, length)).ToHandleChecked(); if (type == String::kUndetectableString) { result->MarkAsUndetectable(); } @@ -5460,10 +5431,9 @@ LOG_API(isolate, "String::New(char)"); ENTER_V8(isolate); i::Handle<i::String> right_string = Utils::OpenHandle(*right); - i::Handle<i::String> result = isolate->factory()->NewConsString(left_string, - right_string); // We do not expect this to fail. Change this if it does. - CHECK(!result.is_null()); + i::Handle<i::String> result = isolate->factory()->NewConsString( + left_string, right_string).ToHandleChecked(); return Utils::ToLocal(result); } @@ -5471,22 +5441,18 @@ static i::Handle<i::String> NewExternalStringHandle( i::Isolate* isolate, v8::String::ExternalStringResource* resource) { - i::Handle<i::String> result = - isolate->factory()->NewExternalStringFromTwoByte(resource); // We do not expect this to fail. Change this if it does. - CHECK(!result.is_null()); - return result; + return isolate->factory()->NewExternalStringFromTwoByte( + resource).ToHandleChecked(); } static i::Handle<i::String> NewExternalAsciiStringHandle( i::Isolate* isolate, v8::String::ExternalAsciiStringResource* resource) { - i::Handle<i::String> result = - isolate->factory()->NewExternalStringFromAscii(resource); // We do not expect this to fail. Change this if it does. - CHECK(!result.is_null()); - return result; + return isolate->factory()->NewExternalStringFromAscii( + resource).ToHandleChecked(); } @@ -5507,7 +5473,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) { i::Handle<i::String> obj = Utils::OpenHandle(this); i::Isolate* isolate = obj->GetIsolate(); - if (i::StringShape(*obj).IsExternalTwoByte()) { + if (i::StringShape(*obj).IsExternal()) { return false; // Already an external string. } ENTER_V8(isolate); @@ -5520,8 +5486,10 @@ CHECK(resource && resource->data()); bool result = obj->MakeExternal(resource); + // Assert that if CanMakeExternal(), then externalizing actually succeeds. + DCHECK(!CanMakeExternal() || result); if (result) { - ASSERT(obj->IsExternalString()); + DCHECK(obj->IsExternalString()); isolate->heap()->external_string_table()->AddString(*obj); } return result; @@ -5547,7 +5515,7 @@ v8::String::ExternalAsciiStringResource* resource) { i::Handle<i::String> obj = Utils::OpenHandle(this); i::Isolate* isolate = obj->GetIsolate(); - if (i::StringShape(*obj).IsExternalTwoByte()) { + if (i::StringShape(*obj).IsExternal()) { return false; // Already an external string. } ENTER_V8(isolate); @@ -5560,8 +5528,10 @@ CHECK(resource && resource->data()); bool result = obj->MakeExternal(resource); + // Assert that if CanMakeExternal(), then externalizing actually succeeds. + DCHECK(!CanMakeExternal() || result); if (result) { - ASSERT(obj->IsExternalString()); + DCHECK(obj->IsExternalString()); isolate->heap()->external_string_table()->AddString(*obj); } return result; @@ -5573,11 +5543,6 @@ i::Handle<i::String> obj = Utils::OpenHandle(this); i::Isolate* isolate = obj->GetIsolate(); - // TODO(yangguo): Externalizing sliced/cons strings allocates. - // This rule can be removed when all code that can - // trigger an access check is handlified and therefore GC safe. - if (isolate->heap()->old_pointer_space()->Contains(*obj)) return false; - if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false; int size = obj->Size(); // Byte size of the original string. if (size < i::ExternalString::kShortSize) return false; @@ -5603,16 +5568,17 @@ LOG_API(i_isolate, "NumberObject::New"); ENTER_V8(i_isolate); i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value); - i::Handle<i::Object> obj = i_isolate->factory()->ToObject(number); + i::Handle<i::Object> obj = + i::Object::ToObject(i_isolate, number).ToHandleChecked(); return Utils::ToLocal(obj); } double v8::NumberObject::ValueOf() const { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "NumberObject::NumberValue"); i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj); + i::Isolate* isolate = jsvalue->GetIsolate(); + LOG_API(isolate, "NumberObject::NumberValue"); return jsvalue->value()->Number(); } @@ -5626,36 +5592,38 @@ ? isolate->heap()->true_value() : isolate->heap()->false_value(), isolate); - i::Handle<i::Object> obj = isolate->factory()->ToObject(boolean); + i::Handle<i::Object> obj = + i::Object::ToObject(isolate, boolean).ToHandleChecked(); return Utils::ToLocal(obj); } bool v8::BooleanObject::ValueOf() const { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "BooleanObject::BooleanValue"); i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj); + i::Isolate* isolate = jsvalue->GetIsolate(); + LOG_API(isolate, "BooleanObject::BooleanValue"); return jsvalue->value()->IsTrue(); } Local<v8::Value> v8::StringObject::New(Handle<String> value) { - i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::String> string = Utils::OpenHandle(*value); + i::Isolate* isolate = string->GetIsolate(); EnsureInitializedForIsolate(isolate, "v8::StringObject::New()"); LOG_API(isolate, "StringObject::New"); ENTER_V8(isolate); i::Handle<i::Object> obj = - isolate->factory()->ToObject(Utils::OpenHandle(*value)); + i::Object::ToObject(isolate, string).ToHandleChecked(); return Utils::ToLocal(obj); } Local<v8::String> v8::StringObject::ValueOf() const { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "StringObject::StringValue"); i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj); + i::Isolate* isolate = jsvalue->GetIsolate(); + LOG_API(isolate, "StringObject::StringValue"); return Utils::ToLocal( i::Handle<i::String>(i::String::cast(jsvalue->value()))); } @@ -5666,17 +5634,17 @@ EnsureInitializedForIsolate(i_isolate, "v8::SymbolObject::New()"); LOG_API(i_isolate, "SymbolObject::New"); ENTER_V8(i_isolate); - i::Handle<i::Object> obj = - i_isolate->factory()->ToObject(Utils::OpenHandle(*value)); + i::Handle<i::Object> obj = i::Object::ToObject( + i_isolate, Utils::OpenHandle(*value)).ToHandleChecked(); return Utils::ToLocal(obj); } Local<v8::Symbol> v8::SymbolObject::ValueOf() const { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "SymbolObject::SymbolValue"); i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj); + i::Isolate* isolate = jsvalue->GetIsolate(); + LOG_API(isolate, "SymbolObject::SymbolValue"); return Utils::ToLocal( i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value()))); } @@ -5688,28 +5656,30 @@ LOG_API(i_isolate, "Date::New"); if (std::isnan(time)) { // Introduce only canonical NaN value into the VM, to avoid signaling NaNs. - time = i::OS::nan_value(); + time = base::OS::nan_value(); } ENTER_V8(i_isolate); EXCEPTION_PREAMBLE(i_isolate); - i::Handle<i::Object> obj = - i::Execution::NewDate(i_isolate, time, &has_pending_exception); + i::Handle<i::Object> obj; + has_pending_exception = !i::Execution::NewDate( + i_isolate, time).ToHandle(&obj); EXCEPTION_BAILOUT_CHECK(i_isolate, Local<v8::Value>()); return Utils::ToLocal(obj); } double v8::Date::ValueOf() const { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "Date::NumberValue"); i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj); + i::Isolate* isolate = jsdate->GetIsolate(); + LOG_API(isolate, "Date::NumberValue"); return jsdate->value()->Number(); } void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) { i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); + if (!i_isolate->IsInitialized()) return; ON_BAILOUT(i_isolate, "v8::Date::DateTimeConfigurationChangeNotification()", return); LOG_API(i_isolate, "Date::DateTimeConfigurationChangeNotification"); @@ -5724,7 +5694,7 @@ i::Handle<i::FixedArray> date_cache_version = i::Handle<i::FixedArray>::cast(i_isolate->eternal_handles()->GetSingleton( i::EternalHandles::DATE_CACHE_VERSION)); - ASSERT_EQ(1, date_cache_version->length()); + DCHECK_EQ(1, date_cache_version->length()); CHECK(date_cache_version->get(0)->IsSmi()); date_cache_version->set( 0, @@ -5739,7 +5709,7 @@ if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g'; if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm'; if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i'; - ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf))); + DCHECK(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf))); return isolate->factory()->InternalizeOneByteString( i::Vector<const uint8_t>(flags_buf, num_flags)); } @@ -5752,10 +5722,10 @@ LOG_API(isolate, "RegExp::New"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::JSRegExp> obj = i::Execution::NewJSRegExp( + i::Handle<i::JSRegExp> obj; + has_pending_exception = !i::Execution::NewJSRegExp( Utils::OpenHandle(*pattern), - RegExpFlagsToString(flags), - &has_pending_exception); + RegExpFlagsToString(flags)).ToHandle(&obj); EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::RegExp>()); return Utils::ToLocal(i::Handle<i::JSRegExp>::cast(obj)); } @@ -5823,7 +5793,8 @@ i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon)); EXCEPTION_PREAMBLE(isolate); ENTER_V8(isolate); - i::Handle<i::JSObject> result = i::JSObject::Copy(paragon_handle); + i::Handle<i::JSObject> result = + isolate->factory()->CopyJSObject(paragon_handle); has_pending_exception = result.is_null(); EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>()); return Utils::ToLocal(result); @@ -5839,14 +5810,13 @@ ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> argv[] = { obj }; - i::Handle<i::Object> b = i::Execution::Call( + i::Handle<i::Object> b; + has_pending_exception = !i::Execution::Call( isolate, - handle( - isolate->context()->global_object()->native_context()->is_promise()), + isolate->is_promise(), isolate->factory()->undefined_value(), ARRAY_SIZE(argv), argv, - &has_pending_exception, - false); + false).ToHandle(&b); EXCEPTION_BAILOUT_CHECK(isolate, false); return b->BooleanValue(); } @@ -5857,14 +5827,13 @@ LOG_API(isolate, "Promise::Resolver::New"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> result = i::Execution::Call( + i::Handle<i::Object> result; + has_pending_exception = !i::Execution::Call( isolate, - handle(isolate->context()->global_object()->native_context()-> - promise_create()), + isolate->promise_create(), isolate->factory()->undefined_value(), 0, NULL, - &has_pending_exception, - false); + false).ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise::Resolver>()); return Local<Promise::Resolver>::Cast(Utils::ToLocal(result)); } @@ -5883,14 +5852,12 @@ ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) }; - i::Execution::Call( + has_pending_exception = i::Execution::Call( isolate, - handle(isolate->context()->global_object()->native_context()-> - promise_resolve()), + isolate->promise_resolve(), isolate->factory()->undefined_value(), ARRAY_SIZE(argv), argv, - &has_pending_exception, - false); + false).is_null(); EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;); } @@ -5902,14 +5869,12 @@ ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) }; - i::Execution::Call( + has_pending_exception = i::Execution::Call( isolate, - handle(isolate->context()->global_object()->native_context()-> - promise_reject()), + isolate->promise_reject(), isolate->factory()->undefined_value(), ARRAY_SIZE(argv), argv, - &has_pending_exception, - false); + false).is_null(); EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;); } @@ -5921,14 +5886,13 @@ ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) }; - i::Handle<i::Object> result = i::Execution::Call( + i::Handle<i::Object> result; + has_pending_exception = !i::Execution::Call( isolate, - handle(isolate->context()->global_object()->native_context()-> - promise_chain()), + isolate->promise_chain(), promise, ARRAY_SIZE(argv), argv, - &has_pending_exception, - false); + false).ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>()); return Local<Promise>::Cast(Utils::ToLocal(result)); } @@ -5941,14 +5905,32 @@ ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) }; - i::Handle<i::Object> result = i::Execution::Call( + i::Handle<i::Object> result; + has_pending_exception = !i::Execution::Call( isolate, - handle(isolate->context()->global_object()->native_context()-> - promise_catch()), + isolate->promise_catch(), promise, ARRAY_SIZE(argv), argv, - &has_pending_exception, - false); + false).ToHandle(&result); + EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>()); + return Local<Promise>::Cast(Utils::ToLocal(result)); +} + + +Local<Promise> Promise::Then(Handle<Function> handler) { + i::Handle<i::JSObject> promise = Utils::OpenHandle(this); + i::Isolate* isolate = promise->GetIsolate(); + LOG_API(isolate, "Promise::Then"); + ENTER_V8(isolate); + EXCEPTION_PREAMBLE(isolate); + i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) }; + i::Handle<i::Object> result; + has_pending_exception = !i::Execution::Call( + isolate, + isolate->promise_then(), + promise, + ARRAY_SIZE(argv), argv, + false).ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>()); return Local<Promise>::Cast(Utils::ToLocal(result)); } @@ -6021,10 +6003,10 @@ i::Handle<i::JSArrayBuffer> buffer; if (obj->IsJSDataView()) { i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj)); - ASSERT(data_view->buffer()->IsJSArrayBuffer()); + DCHECK(data_view->buffer()->IsJSArrayBuffer()); buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer())); } else { - ASSERT(obj->IsJSTypedArray()); + DCHECK(obj->IsJSTypedArray()); buffer = i::JSTypedArray::cast(*obj)->GetBuffer(); } return Utils::ToLocal(buffer); @@ -6055,7 +6037,7 @@ i::Handle<i::JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length) { - ASSERT(byte_offset + byte_length <= + DCHECK(byte_offset + byte_length <= static_cast<size_t>(buffer->byte_length()->Number())); obj->set_buffer(*buffer); @@ -6082,9 +6064,10 @@ isolate->factory()->NewJSTypedArray(array_type); i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer); - ASSERT(byte_offset % sizeof(ElementType) == 0); + DCHECK(byte_offset % sizeof(ElementType) == 0); CHECK(length <= (std::numeric_limits<size_t>::max() / sizeof(ElementType))); + CHECK(length <= static_cast<size_t>(i::Smi::kMaxValue)); size_t byte_length = length * sizeof(ElementType); SetupArrayBufferView( isolate, obj, buffer, byte_offset, byte_length); @@ -6099,7 +6082,7 @@ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset); i::Handle<i::Map> map = i::JSObject::GetElementsTransitionMap(obj, elements_kind); - obj->set_map_and_elements(*map, *elements); + i::JSObject::SetMapAndElements(obj, map, elements); return obj; } @@ -6107,12 +6090,17 @@ #define TYPED_ARRAY_NEW(Type, type, TYPE, ctype, size) \ Local<Type##Array> Type##Array::New(Handle<ArrayBuffer> array_buffer, \ size_t byte_offset, size_t length) { \ - i::Isolate* isolate = i::Isolate::Current(); \ + i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate(); \ EnsureInitializedForIsolate(isolate, \ "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)"); \ LOG_API(isolate, \ "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)"); \ ENTER_V8(isolate); \ + if (!Utils::ApiCheck(length <= static_cast<size_t>(i::Smi::kMaxValue), \ + "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)", \ + "length exceeds max allowed value")) { \ + return Local<Type##Array>(); \ + } \ i::Handle<i::JSTypedArray> obj = \ NewTypedArray<ctype, v8::kExternal##Type##Array, \ i::EXTERNAL_##TYPE##_ELEMENTS>( \ @@ -6126,13 +6114,13 @@ Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer, size_t byte_offset, size_t byte_length) { - i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer); + i::Isolate* isolate = buffer->GetIsolate(); EnsureInitializedForIsolate( isolate, "v8::DataView::New(void*, size_t, size_t)"); LOG_API(isolate, "v8::DataView::New(void*, size_t, size_t)"); ENTER_V8(isolate); i::Handle<i::JSDataView> obj = isolate->factory()->NewJSDataView(); - i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer); SetupArrayBufferView( isolate, obj, buffer, byte_offset, byte_length); return Utils::ToLocal(obj); @@ -6156,13 +6144,15 @@ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry(); i::Handle<i::String> part = i_isolate->factory()->for_string(); i::Handle<i::JSObject> symbols = - i::Handle<i::JSObject>::cast(i::JSObject::GetProperty(registry, part)); - i::Handle<i::Object> symbol = i::JSObject::GetProperty(symbols, i_name); + i::Handle<i::JSObject>::cast( + i::Object::GetPropertyOrElement(registry, part).ToHandleChecked()); + i::Handle<i::Object> symbol = + i::Object::GetPropertyOrElement(symbols, i_name).ToHandleChecked(); if (!symbol->IsSymbol()) { - ASSERT(symbol->IsUndefined()); + DCHECK(symbol->IsUndefined()); symbol = i_isolate->factory()->NewSymbol(); i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name); - i::JSObject::SetProperty(symbols, i_name, symbol, NONE, i::STRICT); + i::JSObject::SetProperty(symbols, i_name, symbol, i::STRICT).Assert(); } return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol)); } @@ -6174,13 +6164,15 @@ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry(); i::Handle<i::String> part = i_isolate->factory()->for_api_string(); i::Handle<i::JSObject> symbols = - i::Handle<i::JSObject>::cast(i::JSObject::GetProperty(registry, part)); - i::Handle<i::Object> symbol = i::JSObject::GetProperty(symbols, i_name); + i::Handle<i::JSObject>::cast( + i::Object::GetPropertyOrElement(registry, part).ToHandleChecked()); + i::Handle<i::Object> symbol = + i::Object::GetPropertyOrElement(symbols, i_name).ToHandleChecked(); if (!symbol->IsSymbol()) { - ASSERT(symbol->IsUndefined()); + DCHECK(symbol->IsUndefined()); symbol = i_isolate->factory()->NewSymbol(); i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name); - i::JSObject::SetProperty(symbols, i_name, symbol, NONE, i::STRICT); + i::JSObject::SetProperty(symbols, i_name, symbol, i::STRICT).Assert(); } return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol)); } @@ -6204,13 +6196,15 @@ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry(); i::Handle<i::String> part = i_isolate->factory()->private_api_string(); i::Handle<i::JSObject> privates = - i::Handle<i::JSObject>::cast(i::JSObject::GetProperty(registry, part)); - i::Handle<i::Object> symbol = i::JSObject::GetProperty(privates, i_name); + i::Handle<i::JSObject>::cast( + i::Object::GetPropertyOrElement(registry, part).ToHandleChecked()); + i::Handle<i::Object> symbol = + i::Object::GetPropertyOrElement(privates, i_name).ToHandleChecked(); if (!symbol->IsSymbol()) { - ASSERT(symbol->IsUndefined()); + DCHECK(symbol->IsUndefined()); symbol = i_isolate->factory()->NewPrivateSymbol(); i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name); - i::JSObject::SetProperty(privates, i_name, symbol, NONE, i::STRICT); + i::JSObject::SetProperty(privates, i_name, symbol, i::STRICT).Assert(); } Local<Symbol> result = Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol)); return v8::Handle<Private>(reinterpret_cast<Private*>(*result)); @@ -6219,10 +6213,10 @@ Local<Number> v8::Number::New(Isolate* isolate, double value) { i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); - ASSERT(internal_isolate->IsInitialized()); + DCHECK(internal_isolate->IsInitialized()); if (std::isnan(value)) { // Introduce only canonical NaN value into the VM, to avoid signaling NaNs. - value = i::OS::nan_value(); + value = base::OS::nan_value(); } ENTER_V8(internal_isolate); i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value); @@ -6232,7 +6226,7 @@ Local<Integer> v8::Integer::New(Isolate* isolate, int32_t value) { i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); - ASSERT(internal_isolate->IsInitialized()); + DCHECK(internal_isolate->IsInitialized()); if (i::Smi::IsValid(value)) { return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value), internal_isolate)); @@ -6245,7 +6239,7 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) { i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); - ASSERT(internal_isolate->IsInitialized()); + DCHECK(internal_isolate->IsInitialized()); bool fits_into_int32_t = (value & (1 << 31)) == 0; if (fits_into_int32_t) { return Integer::New(isolate, static_cast<int32_t>(value)); @@ -6302,26 +6296,6 @@ } -void V8::SetCounterFunction(CounterLookupCallback callback) { - i::Isolate* isolate = EnterIsolateIfNeeded(); - isolate->stats_table()->SetCounterFunction(callback); -} - - -void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) { - i::Isolate* isolate = EnterIsolateIfNeeded(); - isolate->stats_table()->SetCreateHistogramFunction(callback); - isolate->InitializeLoggingAndCounters(); - isolate->counters()->ResetHistograms(); -} - - -void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) { - i::Isolate* isolate = EnterIsolateIfNeeded(); - isolate->stats_table()-> - SetAddHistogramSampleFunction(callback); -} - void V8::SetFailedAccessCheckCallbackFunction( FailedAccessCheckCallback callback) { i::Isolate* isolate = i::Isolate::Current(); @@ -6329,10 +6303,9 @@ } -int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( - int64_t change_in_bytes) { - i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap(); - return heap->AdjustAmountOfExternalAllocatedMemory(change_in_bytes); +void Isolate::CollectAllGarbage(const char* gc_reason) { + reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage( + i::Heap::kNoGCFlags, gc_reason); } @@ -6360,7 +6333,7 @@ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); i::Context* context = isolate->context(); if (context == NULL) return Local<Context>(); - i::Context* native_context = context->global_object()->native_context(); + i::Context* native_context = context->native_context(); if (native_context == NULL) return Local<Context>(); return Utils::ToLocal(i::Handle<i::Context>(native_context)); } @@ -6498,43 +6471,9 @@ } -void V8::AddCallCompletedCallback(CallCompletedCallback callback) { - if (callback == NULL) return; - i::V8::AddCallCompletedCallback(callback); -} - - -void V8::RunMicrotasks(Isolate* isolate) { - i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); - i::HandleScope scope(i_isolate); - i::V8::RunMicrotasks(i_isolate); -} - - -void V8::EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask) { - i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); - ENTER_V8(i_isolate); - i::Execution::EnqueueMicrotask(i_isolate, Utils::OpenHandle(*microtask)); -} - - -void V8::SetAutorunMicrotasks(Isolate* isolate, bool autorun) { - reinterpret_cast<i::Isolate*>(isolate)->set_autorun_microtasks(autorun); -} - - -void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) { - i::V8::RemoveCallCompletedCallback(callback); -} - - void V8::TerminateExecution(Isolate* isolate) { - // If no isolate is supplied, use the default isolate. - if (isolate != NULL) { - reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->TerminateExecution(); - } else { - i::Isolate::GetDefaultIsolateStackGuard()->TerminateExecution(); - } + i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); + i_isolate->stack_guard()->RequestTerminateExecution(); } @@ -6547,18 +6486,24 @@ void V8::CancelTerminateExecution(Isolate* isolate) { i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); - i_isolate->stack_guard()->CancelTerminateExecution(); + i_isolate->stack_guard()->ClearTerminateExecution(); + i_isolate->CancelTerminateExecution(); } void Isolate::RequestInterrupt(InterruptCallback callback, void* data) { - reinterpret_cast<i::Isolate*>(this)->stack_guard()->RequestInterrupt( - callback, data); + i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this); + i_isolate->set_api_interrupt_callback(callback); + i_isolate->set_api_interrupt_callback_data(data); + i_isolate->stack_guard()->RequestApiInterrupt(); } void Isolate::ClearInterrupt() { - reinterpret_cast<i::Isolate*>(this)->stack_guard()->ClearInterrupt(); + i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this); + i_isolate->stack_guard()->ClearApiInterrupt(); + i_isolate->set_api_interrupt_callback(NULL); + i_isolate->set_api_interrupt_callback_data(NULL); } @@ -6569,7 +6514,7 @@ i::NEW_SPACE, "Isolate::RequestGarbageCollection", kGCCallbackFlagForced); } else { - ASSERT_EQ(kFullGarbageCollection, type); + DCHECK_EQ(kFullGarbageCollection, type); reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage( i::Heap::kAbortIncrementalMarkingMask, "Isolate::RequestGarbageCollection", kGCCallbackFlagForced); @@ -6578,7 +6523,7 @@ Isolate* Isolate::GetCurrent() { - i::Isolate* isolate = i::Isolate::UncheckedCurrent(); + i::Isolate* isolate = i::Isolate::Current(); return reinterpret_cast<Isolate*>(isolate); } @@ -6621,7 +6566,7 @@ internal_ = reinterpret_cast<void*>( new i::DisallowJavascriptExecution(i_isolate)); } else { - ASSERT_EQ(THROW_ON_FAILURE, on_failure); + DCHECK_EQ(THROW_ON_FAILURE, on_failure); internal_ = reinterpret_cast<void*>( new i::ThrowOnJavascriptExecution(i_isolate)); } @@ -6653,6 +6598,18 @@ } +Isolate::SuppressMicrotaskExecutionScope::SuppressMicrotaskExecutionScope( + Isolate* isolate) + : isolate_(reinterpret_cast<i::Isolate*>(isolate)) { + isolate_->handle_scope_implementer()->IncrementCallDepth(); +} + + +Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() { + isolate_->handle_scope_implementer()->DecrementCallDepth(); +} + + void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); if (!isolate->IsInitialized()) { @@ -6674,10 +6631,112 @@ void Isolate::SetEventLogger(LogEventCallback that) { + // Do not overwrite the event logger if we want to log explicitly. + if (i::FLAG_log_timer_events) return; i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); isolate->set_event_logger(that); } + +void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) { + if (callback == NULL) return; + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + isolate->AddCallCompletedCallback(callback); +} + + +void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + isolate->RemoveCallCompletedCallback(callback); +} + + +void Isolate::RunMicrotasks() { + reinterpret_cast<i::Isolate*>(this)->RunMicrotasks(); +} + + +void Isolate::EnqueueMicrotask(Handle<Function> microtask) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + isolate->EnqueueMicrotask(Utils::OpenHandle(*microtask)); +} + + +void Isolate::EnqueueMicrotask(MicrotaskCallback microtask, void* data) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + i::HandleScope scope(isolate); + i::Handle<i::CallHandlerInfo> callback_info = + i::Handle<i::CallHandlerInfo>::cast( + isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE)); + SET_FIELD_WRAPPED(callback_info, set_callback, microtask); + SET_FIELD_WRAPPED(callback_info, set_data, data); + isolate->EnqueueMicrotask(callback_info); +} + + +void Isolate::SetAutorunMicrotasks(bool autorun) { + reinterpret_cast<i::Isolate*>(this)->set_autorun_microtasks(autorun); +} + + +bool Isolate::WillAutorunMicrotasks() const { + return reinterpret_cast<const i::Isolate*>(this)->autorun_microtasks(); +} + + +void Isolate::SetUseCounterCallback(UseCounterCallback callback) { + reinterpret_cast<i::Isolate*>(this)->SetUseCounterCallback(callback); +} + + +void Isolate::SetCounterFunction(CounterLookupCallback callback) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + isolate->stats_table()->SetCounterFunction(callback); + isolate->InitializeLoggingAndCounters(); + isolate->counters()->ResetCounters(); +} + + +void Isolate::SetCreateHistogramFunction(CreateHistogramCallback callback) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + isolate->stats_table()->SetCreateHistogramFunction(callback); + isolate->InitializeLoggingAndCounters(); + isolate->counters()->ResetHistograms(); +} + + +void Isolate::SetAddHistogramSampleFunction( + AddHistogramSampleCallback callback) { + reinterpret_cast<i::Isolate*>(this) + ->stats_table() + ->SetAddHistogramSampleFunction(callback); +} + + +bool v8::Isolate::IdleNotification(int idle_time_in_ms) { + // Returning true tells the caller that it need not + // continue to call IdleNotification. + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + if (!i::FLAG_use_idle_notification) return true; + return isolate->heap()->IdleNotification(idle_time_in_ms); +} + + +void v8::Isolate::LowMemoryNotification() { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + { + i::HistogramTimerScope idle_notification_scope( + isolate->counters()->gc_low_memory_notification()); + isolate->heap()->CollectAllAvailableGarbage("low memory notification"); + } +} + +int v8::Isolate::ContextDisposedNotification() { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + return isolate->heap()->NotifyContextDisposed(); +} + + String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) : str_(NULL), length_(0) { i::Isolate* isolate = i::Isolate::Current(); @@ -6807,72 +6866,44 @@ // --- D e b u g S u p p o r t --- -#ifdef ENABLE_DEBUGGER_SUPPORT - -bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) { +bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) { i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener2()"); - ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener2()", return false); + EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()"); + ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false); ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::Object> foreign = isolate->factory()->undefined_value(); if (that != NULL) { foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that)); } - isolate->debugger()->SetEventListener(foreign, - Utils::OpenHandle(*data, true)); - return true; -} - - -bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that, - Handle<Value> data) { - i::Isolate* isolate = i::Isolate::Current(); - ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false); - ENTER_V8(isolate); - isolate->debugger()->SetEventListener(Utils::OpenHandle(*that), - Utils::OpenHandle(*data, true)); + isolate->debug()->SetEventListener(foreign, + Utils::OpenHandle(*data, true)); return true; } void Debug::DebugBreak(Isolate* isolate) { - // If no isolate is supplied, use the default isolate. - if (isolate != NULL) { - reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->DebugBreak(); - } else { - i::Isolate::GetDefaultIsolateStackGuard()->DebugBreak(); - } + reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->RequestDebugBreak(); } void Debug::CancelDebugBreak(Isolate* isolate) { - // If no isolate is supplied, use the default isolate. - if (isolate != NULL) { - i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); - internal_isolate->stack_guard()->Continue(i::DEBUGBREAK); - } else { - i::Isolate::GetDefaultIsolateStackGuard()->Continue(i::DEBUGBREAK); - } + i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); + internal_isolate->stack_guard()->ClearDebugBreak(); } -void Debug::DebugBreakForCommand(ClientData* data, Isolate* isolate) { - // If no isolate is supplied, use the default isolate. - if (isolate != NULL) { - i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); - internal_isolate->debugger()->EnqueueDebugCommand(data); - } else { - i::Isolate::GetDefaultIsolateDebugger()->EnqueueDebugCommand(data); - } +void Debug::DebugBreakForCommand(Isolate* isolate, ClientData* data) { + i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); + internal_isolate->debug()->EnqueueDebugCommand(data); } -void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) { +void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) { i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler"); ENTER_V8(isolate); - isolate->debugger()->SetMessageHandler(handler); + isolate->debug()->SetMessageHandler(handler); } @@ -6881,64 +6912,28 @@ int length, ClientData* client_data) { i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); - internal_isolate->debugger()->ProcessCommand( + internal_isolate->debug()->EnqueueCommandMessage( i::Vector<const uint16_t>(command, length), client_data); } -void Debug::SendCommand(const uint16_t* command, int length, - ClientData* client_data, - Isolate* isolate) { - // If no isolate is supplied, use the default isolate. - if (isolate != NULL) { - i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); - internal_isolate->debugger()->ProcessCommand( - i::Vector<const uint16_t>(command, length), client_data); - } else { - i::Isolate::GetDefaultIsolateDebugger()->ProcessCommand( - i::Vector<const uint16_t>(command, length), client_data); - } -} - - -void Debug::SetHostDispatchHandler(HostDispatchHandler handler, - int period) { - i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::Debug::SetHostDispatchHandler"); - ENTER_V8(isolate); - isolate->debugger()->SetHostDispatchHandler( - handler, i::TimeDelta::FromMilliseconds(period)); -} - - -void Debug::SetDebugMessageDispatchHandler( - DebugMessageDispatchHandler handler, bool provide_locker) { - i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, - "v8::Debug::SetDebugMessageDispatchHandler"); - ENTER_V8(isolate); - isolate->debugger()->SetDebugMessageDispatchHandler( - handler, provide_locker); -} - - Local<Value> Debug::Call(v8::Handle<v8::Function> fun, v8::Handle<v8::Value> data) { i::Isolate* isolate = i::Isolate::Current(); if (!isolate->IsInitialized()) return Local<Value>(); ON_BAILOUT(isolate, "v8::Debug::Call()", return Local<Value>()); ENTER_V8(isolate); - i::Handle<i::Object> result; + i::MaybeHandle<i::Object> maybe_result; EXCEPTION_PREAMBLE(isolate); if (data.IsEmpty()) { - result = isolate->debugger()->Call(Utils::OpenHandle(*fun), - isolate->factory()->undefined_value(), - &has_pending_exception); - } else { - result = isolate->debugger()->Call(Utils::OpenHandle(*fun), - Utils::OpenHandle(*data), - &has_pending_exception); + maybe_result = isolate->debug()->Call( + Utils::OpenHandle(*fun), isolate->factory()->undefined_value()); + } else { + maybe_result = isolate->debug()->Call( + Utils::OpenHandle(*fun), Utils::OpenHandle(*data)); } + i::Handle<i::Object> result; + has_pending_exception = !maybe_result.ToHandle(&result); EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>()); return Utils::ToLocal(result); } @@ -6951,36 +6946,30 @@ ENTER_V8(isolate); v8::EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Debug* isolate_debug = isolate->debug(); - isolate_debug->Load(); - i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object()); - i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("MakeMirror")); - i::Handle<i::Object> fun_obj = i::GetProperty(isolate, debug, name); - i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj); - v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun); - const int kArgc = 1; - v8::Handle<v8::Value> argv[kArgc] = { obj }; EXCEPTION_PREAMBLE(isolate); - v8::Local<v8::Value> result = - v8_fun->Call(Utils::ToLocal(debug), kArgc, argv); + has_pending_exception = !isolate_debug->Load(); + v8::Local<v8::Value> result; + if (!has_pending_exception) { + i::Handle<i::JSObject> debug( + isolate_debug->debug_context()->global_object()); + i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("MakeMirror")); + i::Handle<i::Object> fun_obj = + i::Object::GetProperty(debug, name).ToHandleChecked(); + i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj); + v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun); + const int kArgc = 1; + v8::Handle<v8::Value> argv[kArgc] = { obj }; + result = v8_fun->Call(Utils::ToLocal(debug), kArgc, argv); + has_pending_exception = result.IsEmpty(); + } EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>()); return scope.Escape(result); } -bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) { - return i::Isolate::Current()->debugger()->StartAgent(name, port, - wait_for_connection); -} - - -void Debug::DisableAgent() { - return i::Isolate::Current()->debugger()->StopAgent(); -} - - void Debug::ProcessDebugMessages() { - i::Execution::ProcessDebugMessages(i::Isolate::Current(), true); + i::Isolate::Current()->debug()->ProcessDebugMessages(true); } @@ -6988,39 +6977,29 @@ i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::Debug::GetDebugContext()"); ENTER_V8(isolate); - return Utils::ToLocal(i::Isolate::Current()->debugger()->GetDebugContext()); + return Utils::ToLocal(i::Isolate::Current()->debug()->GetDebugContext()); } -void Debug::SetLiveEditEnabled(bool enable, Isolate* isolate) { - // If no isolate is supplied, use the default isolate. - i::Debugger* debugger; - if (isolate != NULL) { - i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); - debugger = internal_isolate->debugger(); - } else { - debugger = i::Isolate::GetDefaultIsolateDebugger(); - } - debugger->set_live_edit_enabled(enable); +void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) { + i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); + internal_isolate->debug()->set_live_edit_enabled(enable); } -#endif // ENABLE_DEBUGGER_SUPPORT - - Handle<String> CpuProfileNode::GetFunctionName() const { i::Isolate* isolate = i::Isolate::Current(); const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this); const i::CodeEntry* entry = node->entry(); + i::Handle<i::String> name = + isolate->factory()->InternalizeUtf8String(entry->name()); if (!entry->has_name_prefix()) { - return ToApiHandle<String>( - isolate->factory()->InternalizeUtf8String(entry->name())); + return ToApiHandle<String>(name); } else { + // We do not expect this to fail. Change this if it does. i::Handle<i::String> cons = isolate->factory()->NewConsString( isolate->factory()->InternalizeUtf8String(entry->name_prefix()), - isolate->factory()->InternalizeUtf8String(entry->name())); - // We do not expect this to fail. Change this if it does. - CHECK(!cons.is_null()); + name).ToHandleChecked(); return ToApiHandle<String>(cons); } } @@ -7088,7 +7067,7 @@ void CpuProfile::Delete() { i::Isolate* isolate = i::Isolate::Current(); i::CpuProfiler* profiler = isolate->cpu_profiler(); - ASSERT(profiler != NULL); + DCHECK(profiler != NULL); profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this)); } @@ -7113,15 +7092,22 @@ } +int64_t CpuProfile::GetSampleTimestamp(int index) const { + const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this); + return (profile->sample_timestamp(index) - base::TimeTicks()) + .InMicroseconds(); +} + + int64_t CpuProfile::GetStartTime() const { const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this); - return (profile->start_time() - i::Time::UnixEpoch()).InMicroseconds(); + return (profile->start_time() - base::TimeTicks()).InMicroseconds(); } int64_t CpuProfile::GetEndTime() const { const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this); - return (profile->end_time() - i::Time::UnixEpoch()).InMicroseconds(); + return (profile->end_time() - base::TimeTicks()).InMicroseconds(); } @@ -7131,9 +7117,9 @@ void CpuProfiler::SetSamplingInterval(int us) { - ASSERT(us >= 0); + DCHECK(us >= 0); return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval( - i::TimeDelta::FromMicroseconds(us)); + base::TimeDelta::FromMicroseconds(us)); } @@ -7163,7 +7149,7 @@ void CpuProfiler::SetIdle(bool is_idle) { i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate(); i::StateTag state = isolate->current_vm_state(); - ASSERT(state == i::EXTERNAL || state == i::IDLE); + DCHECK(state == i::EXTERNAL || state == i::IDLE); if (isolate->js_entry_sp() != NULL) return; if (is_idle) { isolate->set_current_vm_state(i::IDLE); @@ -7492,7 +7478,7 @@ char* HandleScopeImplementer::ArchiveThread(char* storage) { HandleScopeData* current = isolate_->handle_scope_data(); handle_scope_data_ = *current; - OS::MemCopy(storage, this, sizeof(*this)); + MemCopy(storage, this, sizeof(*this)); ResetAfterArchive(); current->Initialize(); @@ -7507,7 +7493,7 @@ char* HandleScopeImplementer::RestoreThread(char* storage) { - OS::MemCopy(this, storage, sizeof(*this)); + MemCopy(this, storage, sizeof(*this)); *isolate_->handle_scope_data() = handle_scope_data_; return storage + ArchiveSpacePerThread(); } @@ -7524,7 +7510,7 @@ (last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) && (last_handle_before_deferred_block_ >= block)) { v->VisitPointers(block, last_handle_before_deferred_block_); - ASSERT(!found_block_before_deferred); + DCHECK(!found_block_before_deferred); #ifdef DEBUG found_block_before_deferred = true; #endif @@ -7533,7 +7519,7 @@ } } - ASSERT(last_handle_before_deferred_block_ == NULL || + DCHECK(last_handle_before_deferred_block_ == NULL || found_block_before_deferred); // Iterate over live handles in the last block (if any). @@ -7573,7 +7559,7 @@ Object** block_start = blocks_.last(); Object** block_limit = &block_start[kHandleBlockSize]; // We should not need to check for SealHandleScope here. Assert this. - ASSERT(prev_limit == block_limit || + DCHECK(prev_limit == block_limit || !(block_start <= prev_limit && prev_limit <= block_limit)); if (prev_limit == block_limit) break; deferred->blocks_.Add(blocks_.last()); @@ -7584,17 +7570,17 @@ // HandleScope stack since BeginDeferredScope was called, but in // reverse order. - ASSERT(prev_limit == NULL || !blocks_.is_empty()); + DCHECK(prev_limit == NULL || !blocks_.is_empty()); - ASSERT(!blocks_.is_empty() && prev_limit != NULL); - ASSERT(last_handle_before_deferred_block_ != NULL); + DCHECK(!blocks_.is_empty() && prev_limit != NULL); + DCHECK(last_handle_before_deferred_block_ != NULL); last_handle_before_deferred_block_ = NULL; return deferred; } void HandleScopeImplementer::BeginDeferredScope() { - ASSERT(last_handle_before_deferred_block_ == NULL); + DCHECK(last_handle_before_deferred_block_ == NULL); last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next; } @@ -7612,9 +7598,9 @@ void DeferredHandles::Iterate(ObjectVisitor* v) { - ASSERT(!blocks_.is_empty()); + DCHECK(!blocks_.is_empty()); - ASSERT((first_block_limit_ >= blocks_.first()) && + DCHECK((first_block_limit_ >= blocks_.first()) && (first_block_limit_ <= &(blocks_.first())[kHandleBlockSize])); v->VisitPointers(blocks_.first(), first_block_limit_); diff -Nru nodejs-0.11.13/deps/v8/src/api.h nodejs-0.11.15/deps/v8/src/api.h --- nodejs-0.11.13/deps/v8/src/api.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/api.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_API_H_ #define V8_API_H_ -#include "v8.h" +#include "src/v8.h" -#include "../include/v8-testing.h" -#include "contexts.h" -#include "factory.h" -#include "isolate.h" -#include "list-inl.h" +#include "include/v8-testing.h" +#include "src/contexts.h" +#include "src/factory.h" +#include "src/isolate.h" +#include "src/list-inl.h" namespace v8 { @@ -104,13 +81,13 @@ v8::internal::Object* NeanderObject::get(int offset) { - ASSERT(value()->HasFastObjectElements()); + DCHECK(value()->HasFastObjectElements()); return v8::internal::FixedArray::cast(value()->elements())->get(offset); } void NeanderObject::set(int offset, v8::internal::Object* value) { - ASSERT(value_->HasFastObjectElements()); + DCHECK(value_->HasFastObjectElements()); v8::internal::FixedArray::cast(value_->elements())->set(offset, value); } @@ -186,9 +163,9 @@ V(Script, JSFunction) \ V(UnboundScript, SharedFunctionInfo) \ V(Function, JSFunction) \ - V(Message, JSObject) \ + V(Message, JSMessageObject) \ V(Context, Context) \ - V(External, Foreign) \ + V(External, Object) \ V(StackTrace, JSArray) \ V(StackFrame, JSObject) \ V(DeclaredAccessorDescriptor, DeclaredAccessorDescriptor) @@ -287,7 +264,7 @@ template<class From, class To> static inline Local<To> Convert(v8::internal::Handle<From> obj) { - ASSERT(obj.is_null() || !obj->IsTheHole()); + DCHECK(obj.is_null() || !obj->IsTheHole()); return Local<To>(reinterpret_cast<To*>(obj.location())); } @@ -348,7 +325,7 @@ #define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \ Local<v8::Type##Array> Utils::ToLocal##Type##Array( \ v8::internal::Handle<v8::internal::JSTypedArray> obj) { \ - ASSERT(obj->type() == kExternal##Type##Array); \ + DCHECK(obj->type() == kExternal##Type##Array); \ return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \ } @@ -393,8 +370,7 @@ const v8::From* that, bool allow_empty_handle) { \ EXTRA_CHECK(allow_empty_handle || that != NULL); \ EXTRA_CHECK(that == NULL || \ - !(*reinterpret_cast<v8::internal::To**>( \ - const_cast<v8::From*>(that)))->IsFailure()); \ + (*reinterpret_cast<v8::internal::Object* const*>(that))->Is##To()); \ return v8::internal::Handle<v8::internal::To>( \ reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \ } @@ -558,7 +534,7 @@ Isolate* isolate() const { return isolate_; } void ReturnBlock(Object** block) { - ASSERT(block != NULL); + DCHECK(block != NULL); if (spare_ != NULL) DeleteArray(spare_); spare_ = block; } @@ -574,9 +550,9 @@ } void Free() { - ASSERT(blocks_.length() == 0); - ASSERT(entered_contexts_.length() == 0); - ASSERT(saved_contexts_.length() == 0); + DCHECK(blocks_.length() == 0); + DCHECK(entered_contexts_.length() == 0); + DCHECK(saved_contexts_.length() == 0); blocks_.Free(); entered_contexts_.Free(); saved_contexts_.Free(); @@ -584,7 +560,7 @@ DeleteArray(spare_); spare_ = NULL; } - ASSERT(call_depth_ == 0); + DCHECK(call_depth_ == 0); } void BeginDeferredScope(); @@ -687,7 +663,7 @@ } spare_ = block_start; } - ASSERT((blocks_.is_empty() && prev_limit == NULL) || + DCHECK((blocks_.is_empty() && prev_limit == NULL) || (!blocks_.is_empty() && prev_limit != NULL)); } diff -Nru nodejs-0.11.13/deps/v8/src/apinatives.js nodejs-0.11.15/deps/v8/src/apinatives.js --- nodejs-0.11.13/deps/v8/src/apinatives.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/apinatives.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,8 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; // This file contains infrastructure used by the API. See // v8natives.js for an explanation of these files are processed and @@ -51,10 +30,16 @@ var Constructor = %GetTemplateField(data, kApiConstructorOffset); // Note: Do not directly use a function template as a condition, our // internal ToBoolean doesn't handle that! - var result = typeof Constructor === 'undefined' ? - {} : new (Instantiate(Constructor))(); - ConfigureTemplateInstance(result, data); - result = %ToFastProperties(result); + var result; + if (typeof Constructor === 'undefined') { + result = {}; + ConfigureTemplateInstance(result, data); + } else { + // ConfigureTemplateInstance is implicitly called before calling the API + // constructor in HandleApiCall. + result = new (Instantiate(Constructor))(); + result = %ToFastProperties(result); + } return result; default: throw 'Unknown API tag <' + tag + '>'; @@ -71,31 +56,25 @@ (serialNumber in cache) && (cache[serialNumber] != kUninitialized); if (!isFunctionCached) { try { - var fun = %CreateApiFunction(data); - if (name) %FunctionSetName(fun, name); var flags = %GetTemplateField(data, kApiFlagOffset); - var doNotCache = flags & (1 << kDoNotCacheBit); - if (!doNotCache) cache[serialNumber] = fun; - if (flags & (1 << kRemovePrototypeBit)) { - %FunctionRemovePrototype(fun); - } else { - var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset); - // Note: Do not directly use an object template as a condition, our - // internal ToBoolean doesn't handle that! - fun.prototype = typeof prototype === 'undefined' ? - {} : Instantiate(prototype); - if (flags & (1 << kReadOnlyPrototypeBit)) { - %FunctionSetReadOnlyPrototype(fun); - } - %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM); + var prototype; + if (!(flags & (1 << kRemovePrototypeBit))) { + var template = %GetTemplateField(data, kApiPrototypeTemplateOffset); + prototype = typeof template === 'undefined' + ? {} : Instantiate(template); + var parent = %GetTemplateField(data, kApiParentTemplateOffset); // Note: Do not directly use a function template as a condition, our // internal ToBoolean doesn't handle that! - if (!(typeof parent === 'undefined')) { + if (typeof parent !== 'undefined') { var parent_fun = Instantiate(parent); - %SetPrototype(fun.prototype, parent_fun.prototype); + %InternalSetPrototype(prototype, parent_fun.prototype); } } + var fun = %CreateApiFunction(data, prototype); + if (name) %FunctionSetName(fun, name); + var doNotCache = flags & (1 << kDoNotCacheBit); + if (!doNotCache) cache[serialNumber] = fun; ConfigureTemplateInstance(fun, data); if (doNotCache) return fun; } catch (e) { @@ -120,15 +99,15 @@ var prop_data = properties[i + 2]; var attributes = properties[i + 3]; var value = Instantiate(prop_data, name); - %SetProperty(obj, name, value, attributes); - } else if (length == 5) { + %AddPropertyForTemplate(obj, name, value, attributes); + } else if (length == 4 || length == 5) { + // TODO(verwaest): The 5th value used to be access_control. Remove once + // the bindings are updated. var name = properties[i + 1]; var getter = properties[i + 2]; var setter = properties[i + 3]; var attribute = properties[i + 4]; - var access_control = properties[i + 5]; - %SetAccessorProperty( - obj, name, getter, setter, attribute, access_control); + %DefineApiAccessorProperty(obj, name, getter, setter, attribute); } else { throw "Bad properties array"; } diff -Nru nodejs-0.11.13/deps/v8/src/arguments.cc nodejs-0.11.15/deps/v8/src/arguments.cc --- nodejs-0.11.13/deps/v8/src/arguments.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arguments.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" -#include "arguments.h" +#include "src/v8.h" -#include "vm-state-inl.h" +#include "src/arguments.h" +#include "src/vm-state-inl.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/arguments.h nodejs-0.11.15/deps/v8/src/arguments.h --- nodejs-0.11.13/deps/v8/src/arguments.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arguments.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARGUMENTS_H_ #define V8_ARGUMENTS_H_ -#include "allocation.h" +#include "src/allocation.h" +#include "src/isolate.h" namespace v8 { namespace internal { @@ -44,6 +22,9 @@ // Object* Runtime_function(Arguments args) { // ... use args[i] here ... // } +// +// Note that length_ (whose value is in the integer range) is defined +// as intptr_t to provide endian-neutrality on 64-bit archs. class Arguments BASE_EMBEDDED { public: @@ -51,7 +32,7 @@ : length_(length), arguments_(arguments) { } Object*& operator[] (int index) { - ASSERT(0 <= index && index < length_); + DCHECK(0 <= index && index < length_); return *(reinterpret_cast<Object**>(reinterpret_cast<intptr_t>(arguments_) - index * kPointerSize)); } @@ -73,12 +54,12 @@ } // Get the total number of arguments including the receiver. - int length() const { return length_; } + int length() const { return static_cast<int>(length_); } Object** arguments() { return arguments_; } private: - int length_; + intptr_t length_; Object** arguments_; }; @@ -195,8 +176,8 @@ values[T::kReturnValueDefaultValueIndex] = isolate->heap()->the_hole_value(); values[T::kReturnValueIndex] = isolate->heap()->the_hole_value(); - ASSERT(values[T::kHolderIndex]->IsHeapObject()); - ASSERT(values[T::kIsolateIndex]->IsSmi()); + DCHECK(values[T::kHolderIndex]->IsHeapObject()); + DCHECK(values[T::kIsolateIndex]->IsSmi()); } /* @@ -267,9 +248,9 @@ values[T::kReturnValueDefaultValueIndex] = isolate->heap()->the_hole_value(); values[T::kReturnValueIndex] = isolate->heap()->the_hole_value(); - ASSERT(values[T::kCalleeIndex]->IsJSFunction()); - ASSERT(values[T::kHolderIndex]->IsHeapObject()); - ASSERT(values[T::kIsolateIndex]->IsSmi()); + DCHECK(values[T::kCalleeIndex]->IsJSFunction()); + DCHECK(values[T::kHolderIndex]->IsHeapObject()); + DCHECK(values[T::kIsolateIndex]->IsSmi()); } /* @@ -299,18 +280,23 @@ #endif -#define DECLARE_RUNTIME_FUNCTION(Type, Name) \ -Type Name(int args_length, Object** args_object, Isolate* isolate) +#define DECLARE_RUNTIME_FUNCTION(Name) \ +Object* Name(int args_length, Object** args_object, Isolate* isolate) -#define RUNTIME_FUNCTION(Type, Name) \ -static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \ -Type Name(int args_length, Object** args_object, Isolate* isolate) { \ - CLOBBER_DOUBLE_REGISTERS(); \ - Arguments args(args_length, args_object); \ - return __RT_impl_##Name(args, isolate); \ -} \ +#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \ +static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \ +Type Name(int args_length, Object** args_object, Isolate* isolate) { \ + CLOBBER_DOUBLE_REGISTERS(); \ + Arguments args(args_length, args_object); \ + return __RT_impl_##Name(args, isolate); \ +} \ static Type __RT_impl_##Name(Arguments args, Isolate* isolate) + +#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name) +#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \ + RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name) + #define RUNTIME_ARGUMENTS(isolate, args) \ args.length(), args.arguments(), isolate diff -Nru nodejs-0.11.13/deps/v8/src/arm/assembler-arm.cc nodejs-0.11.15/deps/v8/src/arm/assembler-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/assembler-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/assembler-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -34,32 +34,18 @@ // modified significantly by Google Inc. // Copyright 2012 the V8 project authors. All rights reserved. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "arm/assembler-arm-inl.h" -#include "macro-assembler.h" -#include "serialize.h" +#include "src/arm/assembler-arm-inl.h" +#include "src/base/cpu.h" +#include "src/macro-assembler.h" +#include "src/serialize.h" namespace v8 { namespace internal { -#ifdef DEBUG -bool CpuFeatures::initialized_ = false; -#endif -unsigned CpuFeatures::supported_ = 0; -unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; -unsigned CpuFeatures::cross_compile_ = 0; -unsigned CpuFeatures::cache_line_size_ = 64; - - -ExternalReference ExternalReference::cpu_features() { - ASSERT(CpuFeatures::initialized_); - return ExternalReference(&CpuFeatures::supported_); -} - - // Get the CPU features enabled by the build. For cross compilation the // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS // can be defined to enable ARMv7 and VFPv3 instructions when building the @@ -67,19 +53,16 @@ static unsigned CpuFeaturesImpliedByCompiler() { unsigned answer = 0; #ifdef CAN_USE_ARMV7_INSTRUCTIONS - if (FLAG_enable_armv7) { - answer |= 1u << ARMv7; - } + if (FLAG_enable_armv7) answer |= 1u << ARMv7; #endif // CAN_USE_ARMV7_INSTRUCTIONS #ifdef CAN_USE_VFP3_INSTRUCTIONS - if (FLAG_enable_vfp3) { - answer |= 1u << VFP3 | 1u << ARMv7; - } + if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7; #endif // CAN_USE_VFP3_INSTRUCTIONS #ifdef CAN_USE_VFP32DREGS - if (FLAG_enable_32dregs) { - answer |= 1u << VFP32DREGS; - } + if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS; +#endif // CAN_USE_VFP32DREGS +#ifdef CAN_USE_NEON + if (FLAG_enable_neon) answer |= 1u << NEON; #endif // CAN_USE_VFP32DREGS if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) { answer |= 1u << UNALIGNED_ACCESSES; @@ -89,178 +72,112 @@ } -const char* DwVfpRegister::AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < NumAllocatableRegisters()); - ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() == - kNumReservedRegisters - 1); - if (index >= kDoubleRegZero.code()) - index += kNumReservedRegisters; - - return VFPRegisters::Name(index, true); -} - +void CpuFeatures::ProbeImpl(bool cross_compile) { + supported_ |= CpuFeaturesImpliedByCompiler(); + cache_line_size_ = 64; -void CpuFeatures::Probe() { - uint64_t standard_features = static_cast<unsigned>( - OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler(); - ASSERT(supported_ == 0 || supported_ == standard_features); -#ifdef DEBUG - initialized_ = true; -#endif - - // Get the features implied by the OS and the compiler settings. This is the - // minimal set of features which is also alowed for generated code in the - // snapshot. - supported_ |= standard_features; - - if (Serializer::enabled()) { - // No probing for features if we might serialize (generate snapshot). - printf(" "); - PrintFeatures(); - return; - } + // Only use statically determined features for cross compile (snapshot). + if (cross_compile) return; #ifndef __arm__ - // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is - // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. - if (FLAG_enable_vfp3) { - supported_ |= - static_cast<uint64_t>(1) << VFP3 | - static_cast<uint64_t>(1) << ARMv7; - } - if (FLAG_enable_neon) { - supported_ |= 1u << NEON; - } - // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled + // For the simulator build, use whatever the flags specify. if (FLAG_enable_armv7) { - supported_ |= static_cast<uint64_t>(1) << ARMv7; - } - - if (FLAG_enable_sudiv) { - supported_ |= static_cast<uint64_t>(1) << SUDIV; - } - - if (FLAG_enable_movw_movt) { - supported_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS; - } - - if (FLAG_enable_32dregs) { - supported_ |= static_cast<uint64_t>(1) << VFP32DREGS; - } - - if (FLAG_enable_unaligned_accesses) { - supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES; + supported_ |= 1u << ARMv7; + if (FLAG_enable_vfp3) supported_ |= 1u << VFP3; + if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS; + if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV; + if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; + if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS; } + if (FLAG_enable_mls) supported_ |= 1u << MLS; + if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES; #else // __arm__ - // Probe for additional features not already known to be available. - CPU cpu; - if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) { + // Probe for additional features at runtime. + base::CPU cpu; + if (FLAG_enable_vfp3 && cpu.has_vfp3()) { // This implementation also sets the VFP flags if runtime // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI // 0406B, page A1-6. - found_by_runtime_probing_only_ |= - static_cast<uint64_t>(1) << VFP3 | - static_cast<uint64_t>(1) << ARMv7; - } - - if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) { - found_by_runtime_probing_only_ |= 1u << NEON; - } - - if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) { - found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7; - } - - if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) { - found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV; - } - - if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses - && cpu.architecture() >= 7) { - found_by_runtime_probing_only_ |= - static_cast<uint64_t>(1) << UNALIGNED_ACCESSES; + supported_ |= 1u << VFP3 | 1u << ARMv7; } - // Use movw/movt for QUALCOMM ARMv7 cores. - if (cpu.implementer() == CPU::QUALCOMM && - cpu.architecture() >= 7 && - FLAG_enable_movw_movt) { - found_by_runtime_probing_only_ |= - static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS; + if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON; + if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV; + if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS; + + if (cpu.architecture() >= 7) { + if (FLAG_enable_armv7) supported_ |= 1u << ARMv7; + if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES; + // Use movw/movt for QUALCOMM ARMv7 cores. + if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) { + supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; + } } // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines. - if (cpu.implementer() == CPU::ARM && - (cpu.part() == CPU::ARM_CORTEX_A5 || - cpu.part() == CPU::ARM_CORTEX_A9)) { + if (cpu.implementer() == base::CPU::ARM && + (cpu.part() == base::CPU::ARM_CORTEX_A5 || + cpu.part() == base::CPU::ARM_CORTEX_A9)) { cache_line_size_ = 32; } - if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) { - found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS; - } - - supported_ |= found_by_runtime_probing_only_; + if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS; #endif - // Assert that VFP3 implies ARMv7. - ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7)); + DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7)); } void CpuFeatures::PrintTarget() { const char* arm_arch = NULL; - const char* arm_test = ""; + const char* arm_target_type = ""; + const char* arm_no_probe = ""; const char* arm_fpu = ""; const char* arm_thumb = ""; const char* arm_float_abi = NULL; +#if !defined __arm__ + arm_target_type = " simulator"; +#endif + +#if defined ARM_TEST_NO_FEATURE_PROBE + arm_no_probe = " noprobe"; +#endif + #if defined CAN_USE_ARMV7_INSTRUCTIONS arm_arch = "arm v7"; #else arm_arch = "arm v6"; #endif -#ifdef __arm__ - -# ifdef ARM_TEST - arm_test = " test"; -# endif -# if defined __ARM_NEON__ +#if defined CAN_USE_NEON arm_fpu = " neon"; -# elif defined CAN_USE_VFP3_INSTRUCTIONS - arm_fpu = " vfp3"; -# else - arm_fpu = " vfp2"; -# endif -# if (defined __thumb__) || (defined __thumb2__) - arm_thumb = " thumb"; -# endif - arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp"; - -#else // __arm__ - - arm_test = " simulator"; -# if defined CAN_USE_VFP3_INSTRUCTIONS +#elif defined CAN_USE_VFP3_INSTRUCTIONS # if defined CAN_USE_VFP32DREGS arm_fpu = " vfp3"; # else arm_fpu = " vfp3-d16"; # endif -# else +#else arm_fpu = " vfp2"; -# endif -# if USE_EABI_HARDFLOAT == 1 +#endif + +#ifdef __arm__ + arm_float_abi = base::OS::ArmUsingHardFloat() ? "hard" : "softfp"; +#elif USE_EABI_HARDFLOAT arm_float_abi = "hard"; -# else +#else arm_float_abi = "softfp"; -# endif +#endif -#endif // __arm__ +#if defined __arm__ && (defined __thumb__) || (defined __thumb2__) + arm_thumb = " thumb"; +#endif - printf("target%s %s%s%s %s\n", - arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi); + printf("target%s%s %s%s%s %s\n", + arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb, + arm_float_abi); } @@ -276,7 +193,7 @@ CpuFeatures::IsSupported(UNALIGNED_ACCESSES), CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS)); #ifdef __arm__ - bool eabi_hardfloat = OS::ArmUsingHardFloat(); + bool eabi_hardfloat = base::OS::ArmUsingHardFloat(); #elif USE_EABI_HARDFLOAT bool eabi_hardfloat = true; #else @@ -287,6 +204,18 @@ // ----------------------------------------------------------------------------- +// Implementation of DwVfpRegister + +const char* DwVfpRegister::AllocationIndexToString(int index) { + DCHECK(index >= 0 && index < NumAllocatableRegisters()); + DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() == + kNumReservedRegisters - 1); + if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters; + return VFPRegisters::Name(index, true); +} + + +// ----------------------------------------------------------------------------- // Implementation of RelocInfo const int RelocInfo::kApplyMask = 0; @@ -302,11 +231,7 @@ bool RelocInfo::IsInConstantPool() { - if (FLAG_enable_ool_constant_pool) { - return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)); - } else { - return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)); - } + return Assembler::is_constant_pool_load(pc_); } @@ -319,7 +244,7 @@ } // Indicate that code has changed. - CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); + CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize); } @@ -341,7 +266,7 @@ // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; if (obj->IsHeapObject()) { - ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); + DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); imm32_ = reinterpret_cast<intptr_t>(handle.location()); rmode_ = RelocInfo::EMBEDDED_OBJECT; } else { @@ -353,7 +278,7 @@ Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) { - ASSERT(is_uint5(shift_imm)); + DCHECK(is_uint5(shift_imm)); rm_ = rm; rs_ = no_reg; @@ -366,7 +291,7 @@ shift_op = LSL; } else if (shift_op == RRX) { // encoded as ROR with shift_imm == 0 - ASSERT(shift_imm == 0); + DCHECK(shift_imm == 0); shift_op_ = ROR; shift_imm_ = 0; } @@ -374,7 +299,7 @@ Operand::Operand(Register rm, ShiftOp shift_op, Register rs) { - ASSERT(shift_op != RRX); + DCHECK(shift_op != RRX); rm_ = rm; rs_ = no_reg; shift_op_ = shift_op; @@ -401,7 +326,7 @@ MemOperand::MemOperand(Register rn, Register rm, ShiftOp shift_op, int shift_imm, AddrMode am) { - ASSERT(is_uint5(shift_imm)); + DCHECK(is_uint5(shift_imm)); rn_ = rn; rm_ = rm; shift_op_ = shift_op; @@ -411,7 +336,7 @@ NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) { - ASSERT((am == Offset) || (am == PostIndex)); + DCHECK((am == Offset) || (am == PostIndex)); rn_ = rn; rm_ = (am == Offset) ? pc : sp; SetAlignment(align); @@ -473,10 +398,6 @@ // ----------------------------------------------------------------------------- // Specific instructions, constants, and masks. -// add(sp, sp, 4) instruction (aka Pop()) -const Instr kPopInstruction = - al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 | - kRegister_sp_Code * B12; // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r)) // register r is not encoded. const Instr kPushRegPattern = @@ -485,14 +406,15 @@ // register r is not encoded. const Instr kPopRegPattern = al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16; -// mov lr, pc -const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12; // ldr rd, [pc, #offset] -const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16; -const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16; +const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16; +const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16; // ldr rd, [pp, #offset] -const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16; -const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16; +const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16; +const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16; +// ldr rd, [pp, rn] +const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16; +const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16; // vldr dd, [pc, #offset] const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8; @@ -510,8 +432,8 @@ const Instr kMovMvnFlip = B22; const Instr kMovLeaveCCMask = 0xdff * B16; const Instr kMovLeaveCCPattern = 0x1a0 * B16; -const Instr kMovwMask = 0xff * B20; const Instr kMovwPattern = 0x30 * B20; +const Instr kMovtPattern = 0x34 * B20; const Instr kMovwLeaveCCFlip = 0x5 * B21; const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; const Instr kCmpCmnPattern = 0x15 * B20; @@ -529,8 +451,6 @@ const Instr kStrRegFpNegOffsetPattern = al | B26 | NegOffset | kRegister_fp_Code * B16; const Instr kLdrStrInstrTypeMask = 0xffff0000; -const Instr kLdrStrInstrArgumentMask = 0x0000ffff; -const Instr kLdrStrOffsetMask = 0x00000fff; Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) @@ -548,13 +468,12 @@ first_const_pool_64_use_ = -1; last_bound_pos_ = 0; constant_pool_available_ = !FLAG_enable_ool_constant_pool; - constant_pool_full_ = false; ClearRecordedAstId(); } Assembler::~Assembler() { - ASSERT(const_pool_blocked_nesting_ == 0); + DCHECK(const_pool_blocked_nesting_ == 0); } @@ -562,8 +481,8 @@ if (!FLAG_enable_ool_constant_pool) { // Emit constant pool if necessary. CheckConstPool(true, false); - ASSERT(num_pending_32_bit_reloc_info_ == 0); - ASSERT(num_pending_64_bit_reloc_info_ == 0); + DCHECK(num_pending_32_bit_reloc_info_ == 0); + DCHECK(num_pending_64_bit_reloc_info_ == 0); } // Set up code descriptor. desc->buffer = buffer_; @@ -575,7 +494,7 @@ void Assembler::Align(int m) { - ASSERT(m >= 4 && IsPowerOf2(m)); + DCHECK(m >= 4 && IsPowerOf2(m)); while ((pc_offset() & (m - 1)) != 0) { nop(); } @@ -599,7 +518,7 @@ int Assembler::GetBranchOffset(Instr instr) { - ASSERT(IsBranch(instr)); + DCHECK(IsBranch(instr)); // Take the jump offset in the lower 24 bits, sign extend it and multiply it // with 4 to get the offset in bytes. return ((instr & kImm24Mask) << 8) >> 6; @@ -617,7 +536,7 @@ int Assembler::GetLdrRegisterImmediateOffset(Instr instr) { - ASSERT(IsLdrRegisterImmediate(instr)); + DCHECK(IsLdrRegisterImmediate(instr)); bool positive = (instr & B23) == B23; int offset = instr & kOff12Mask; // Zero extended offset. return positive ? offset : -offset; @@ -625,7 +544,7 @@ int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) { - ASSERT(IsVldrDRegisterImmediate(instr)); + DCHECK(IsVldrDRegisterImmediate(instr)); bool positive = (instr & B23) == B23; int offset = instr & kOff8Mask; // Zero extended offset. offset <<= 2; @@ -634,10 +553,10 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) { - ASSERT(IsLdrRegisterImmediate(instr)); + DCHECK(IsLdrRegisterImmediate(instr)); bool positive = offset >= 0; if (!positive) offset = -offset; - ASSERT(is_uint12(offset)); + DCHECK(is_uint12(offset)); // Set bit indicating whether the offset should be added. instr = (instr & ~B23) | (positive ? B23 : 0); // Set the actual offset. @@ -646,11 +565,11 @@ Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) { - ASSERT(IsVldrDRegisterImmediate(instr)); - ASSERT((offset & ~3) == offset); // Must be 64-bit aligned. + DCHECK(IsVldrDRegisterImmediate(instr)); + DCHECK((offset & ~3) == offset); // Must be 64-bit aligned. bool positive = offset >= 0; if (!positive) offset = -offset; - ASSERT(is_uint10(offset)); + DCHECK(is_uint10(offset)); // Set bit indicating whether the offset should be added. instr = (instr & ~B23) | (positive ? B23 : 0); // Set the actual offset. Its bottom 2 bits are zero. @@ -664,10 +583,10 @@ Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) { - ASSERT(IsStrRegisterImmediate(instr)); + DCHECK(IsStrRegisterImmediate(instr)); bool positive = offset >= 0; if (!positive) offset = -offset; - ASSERT(is_uint12(offset)); + DCHECK(is_uint12(offset)); // Set bit indicating whether the offset should be added. instr = (instr & ~B23) | (positive ? B23 : 0); // Set the actual offset. @@ -681,9 +600,9 @@ Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) { - ASSERT(IsAddRegisterImmediate(instr)); - ASSERT(offset >= 0); - ASSERT(is_uint12(offset)); + DCHECK(IsAddRegisterImmediate(instr)); + DCHECK(offset >= 0); + DCHECK(is_uint12(offset)); // Set the offset. return (instr & ~kOff12Mask) | offset; } @@ -710,6 +629,24 @@ } +Instr Assembler::GetConsantPoolLoadPattern() { + if (FLAG_enable_ool_constant_pool) { + return kLdrPpImmedPattern; + } else { + return kLdrPCImmedPattern; + } +} + + +Instr Assembler::GetConsantPoolLoadMask() { + if (FLAG_enable_ool_constant_pool) { + return kLdrPpImmedMask; + } else { + return kLdrPCImmedMask; + } +} + + bool Assembler::IsPush(Instr instr) { return ((instr & ~kRdMask) == kPushRegPattern); } @@ -743,17 +680,27 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) { // Check the instruction is indeed a // ldr<cond> <Rd>, [pc +/- offset_12]. - return (instr & kLdrPCMask) == kLdrPCPattern; + return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern; } bool Assembler::IsLdrPpImmediateOffset(Instr instr) { // Check the instruction is indeed a // ldr<cond> <Rd>, [pp +/- offset_12]. - return (instr & kLdrPpMask) == kLdrPpPattern; + return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern; } +bool Assembler::IsLdrPpRegOffset(Instr instr) { + // Check the instruction is indeed a + // ldr<cond> <Rd>, [pp, +/- <Rm>]. + return (instr & kLdrPpRegMask) == kLdrPpRegPattern; +} + + +Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; } + + bool Assembler::IsVldrDPcImmediateOffset(Instr instr) { // Check the instruction is indeed a // vldr<cond> <Dd>, [pc +/- offset_10]. @@ -768,6 +715,20 @@ } +bool Assembler::IsBlxReg(Instr instr) { + // Check the instruction is indeed a + // blxcc <Rm> + return (instr & kBlxRegMask) == kBlxRegPattern; +} + + +bool Assembler::IsBlxIp(Instr instr) { + // Check the instruction is indeed a + // blx ip + return instr == kBlxIp; +} + + bool Assembler::IsTstImmediate(Instr instr) { return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) == (I | TST | S); @@ -787,13 +748,13 @@ Register Assembler::GetCmpImmediateRegister(Instr instr) { - ASSERT(IsCmpImmediate(instr)); + DCHECK(IsCmpImmediate(instr)); return GetRn(instr); } int Assembler::GetCmpImmediateRawImmediate(Instr instr) { - ASSERT(IsCmpImmediate(instr)); + DCHECK(IsCmpImmediate(instr)); return instr & kOff12Mask; } @@ -816,13 +777,13 @@ // same position. -int Assembler::target_at(int pos) { +int Assembler::target_at(int pos) { Instr instr = instr_at(pos); if (is_uint24(instr)) { // Emitted link to a label, not part of a branch. return instr; } - ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 + DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 int imm26 = ((instr & kImm24Mask) << 8) >> 6; if ((Instruction::ConditionField(instr) == kSpecialCondition) && ((instr & B24) != 0)) { @@ -836,7 +797,7 @@ void Assembler::target_at_put(int pos, int target_pos) { Instr instr = instr_at(pos); if (is_uint24(instr)) { - ASSERT(target_pos == pos || target_pos >= 0); + DCHECK(target_pos == pos || target_pos >= 0); // Emitted link to a label, not part of a branch. // Load the position of the label relative to the generated code object // pointer in a register. @@ -853,9 +814,9 @@ // We extract the destination register from the emitted nop instruction. Register dst = Register::from_code( Instruction::RmValue(instr_at(pos + kInstrSize))); - ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code())); + DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code())); uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag); - ASSERT(is_uint24(target24)); + DCHECK(is_uint24(target24)); if (is_uint8(target24)) { // If the target fits in a byte then only patch with a mov // instruction. @@ -904,17 +865,17 @@ return; } int imm26 = target_pos - (pos + kPcLoadDelta); - ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 + DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 if (Instruction::ConditionField(instr) == kSpecialCondition) { // blx uses bit 24 to encode bit 2 of imm26 - ASSERT((imm26 & 1) == 0); + DCHECK((imm26 & 1) == 0); instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24; } else { - ASSERT((imm26 & 3) == 0); + DCHECK((imm26 & 3) == 0); instr &= ~kImm24Mask; } int imm24 = imm26 >> 2; - ASSERT(is_int24(imm24)); + DCHECK(is_int24(imm24)); instr_at_put(pos, instr | (imm24 & kImm24Mask)); } @@ -933,7 +894,7 @@ if ((instr & ~kImm24Mask) == 0) { PrintF("value\n"); } else { - ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx + DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx Condition cond = Instruction::ConditionField(instr); const char* b; const char* c; @@ -978,7 +939,7 @@ void Assembler::bind_to(Label* L, int pos) { - ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position + DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position while (L->is_linked()) { int fixup_pos = L->pos(); next(L); // call next before overwriting link with target at fixup_pos @@ -994,20 +955,20 @@ void Assembler::bind(Label* L) { - ASSERT(!L->is_bound()); // label can only be bound once + DCHECK(!L->is_bound()); // label can only be bound once bind_to(L, pc_offset()); } void Assembler::next(Label* L) { - ASSERT(L->is_linked()); + DCHECK(L->is_linked()); int link = target_at(L->pos()); if (link == L->pos()) { // Branch target points to the same instuction. This is the end of the link // chain. L->Unuse(); } else { - ASSERT(link >= 0); + DCHECK(link >= 0); L->link_to(link); } } @@ -1041,7 +1002,7 @@ if (CpuFeatures::IsSupported(ARMv7)) { if (imm32 < 0x10000) { *instr ^= kMovwLeaveCCFlip; - *instr |= EncodeMovwImmediate(imm32); + *instr |= Assembler::EncodeMovwImmediate(imm32); *rotate_imm = *immed_8 = 0; // Not used for movw. return true; } @@ -1079,13 +1040,8 @@ // encoded. bool Operand::must_output_reloc_info(const Assembler* assembler) const { if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); - } -#endif // def DEBUG if (assembler != NULL && assembler->predictable_code_size()) return true; - return Serializer::enabled(); + return assembler->serializer_enabled(); } else if (RelocInfo::IsNone(rmode_)) { return false; } @@ -1095,10 +1051,10 @@ static bool use_mov_immediate_load(const Operand& x, const Assembler* assembler) { - if (assembler != NULL && !assembler->can_use_constant_pool()) { + if (assembler != NULL && !assembler->is_constant_pool_available()) { // If there is no constant pool available, we must use an mov immediate. // TODO(rmcilroy): enable ARMv6 support. - ASSERT(CpuFeatures::IsSupported(ARMv7)); + DCHECK(CpuFeatures::IsSupported(ARMv7)); return true; } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && (assembler == NULL || !assembler->predictable_code_size())) { @@ -1114,28 +1070,35 @@ } -bool Operand::is_single_instruction(const Assembler* assembler, - Instr instr) const { - if (rm_.is_valid()) return true; +int Operand::instructions_required(const Assembler* assembler, + Instr instr) const { + if (rm_.is_valid()) return 1; uint32_t dummy1, dummy2; if (must_output_reloc_info(assembler) || !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { // The immediate operand cannot be encoded as a shifter operand, or use of - // constant pool is required. For a mov instruction not setting the - // condition code additional instruction conventions can be used. - if ((instr & ~kCondMask) == 13*B21) { // mov, S not set - return !use_mov_immediate_load(*this, assembler); + // constant pool is required. First account for the instructions required + // for the constant pool or immediate load + int instructions; + if (use_mov_immediate_load(*this, assembler)) { + instructions = 2; // A movw, movt immediate load. + } else if (assembler != NULL && assembler->use_extended_constant_pool()) { + instructions = 3; // An extended constant pool load. } else { - // If this is not a mov or mvn instruction there will always an additional - // instructions - either mov or ldr. The mov might actually be two - // instructions mov or movw followed by movt so including the actual - // instruction two or three instructions will be generated. - return false; + instructions = 1; // A small constant pool load. } + + if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set + // For a mov or mvn instruction which doesn't set the condition + // code, the constant pool or immediate load is enough, otherwise we need + // to account for the actual instruction being requested. + instructions += 1; + } + return instructions; } else { // No use of constant pool and the immediate operand can be encoded as a // shifter operand. - return true; + return 1; } } @@ -1151,21 +1114,32 @@ if (use_mov_immediate_load(x, this)) { Register target = rd.code() == pc.code() ? ip : rd; // TODO(rmcilroy): add ARMv6 support for immediate loads. - ASSERT(CpuFeatures::IsSupported(ARMv7)); - if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) { + DCHECK(CpuFeatures::IsSupported(ARMv7)); + if (!FLAG_enable_ool_constant_pool && + x.must_output_reloc_info(this)) { // Make sure the movw/movt doesn't get separated. BlockConstPoolFor(2); } - emit(cond | 0x30*B20 | target.code()*B12 | - EncodeMovwImmediate(x.imm32_ & 0xffff)); + movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond); movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond); if (target.code() != rd.code()) { mov(rd, target, LeaveCC, cond); } } else { - ASSERT(can_use_constant_pool()); - ConstantPoolAddEntry(rinfo); - ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); + DCHECK(is_constant_pool_available()); + ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); + if (section == ConstantPoolArray::EXTENDED_SECTION) { + DCHECK(FLAG_enable_ool_constant_pool); + Register target = rd.code() == pc.code() ? ip : rd; + // Emit instructions to load constant pool offset. + movw(target, 0, cond); + movt(target, 0, cond); + // Load from constant pool at offset. + ldr(rd, MemOperand(pp, target), cond); + } else { + DCHECK(section == ConstantPoolArray::SMALL_SECTION); + ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); + } } } @@ -1175,7 +1149,7 @@ Register rd, const Operand& x) { CheckBuffer(); - ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0); + DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0); if (!x.rm_.is_valid()) { // Immediate. uint32_t rotate_imm; @@ -1202,7 +1176,7 @@ instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); } else { // Register shift. - ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); + DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); } emit(instr | rn.code()*B16 | rd.code()*B12); @@ -1214,7 +1188,7 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { - ASSERT((instr & ~(kCondMask | B | L)) == B26); + DCHECK((instr & ~(kCondMask | B | L)) == B26); int am = x.am_; if (!x.rm_.is_valid()) { // Immediate offset. @@ -1226,28 +1200,28 @@ if (!is_uint12(offset_12)) { // Immediate offset cannot be encoded, load it first to register ip // rn (and rd in a load) should never be ip, or will be trashed. - ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); + DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); return; } - ASSERT(offset_12 >= 0); // no masking needed + DCHECK(offset_12 >= 0); // no masking needed instr |= offset_12; } else { // Register offset (shift_imm_ and shift_op_ are 0) or scaled // register offset the constructors make sure than both shift_imm_ // and shift_op_ are initialized. - ASSERT(!x.rm_.is(pc)); + DCHECK(!x.rm_.is(pc)); instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); } - ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback + DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); } void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { - ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7)); - ASSERT(x.rn_.is_valid()); + DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7)); + DCHECK(x.rn_.is_valid()); int am = x.am_; if (!x.rm_.is_valid()) { // Immediate offset. @@ -1259,60 +1233,60 @@ if (!is_uint8(offset_8)) { // Immediate offset cannot be encoded, load it first to register ip // rn (and rd in a load) should never be ip, or will be trashed. - ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); + DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); return; } - ASSERT(offset_8 >= 0); // no masking needed + DCHECK(offset_8 >= 0); // no masking needed instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); } else if (x.shift_imm_ != 0) { // Scaled register offset not supported, load index first // rn (and rd in a load) should never be ip, or will be trashed. - ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); + DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, Instruction::ConditionField(instr)); addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); return; } else { // Register offset. - ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback + DCHECK((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback instr |= x.rm_.code(); } - ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback + DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); } void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { - ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27); - ASSERT(rl != 0); - ASSERT(!rn.is(pc)); + DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27); + DCHECK(rl != 0); + DCHECK(!rn.is(pc)); emit(instr | rn.code()*B16 | rl); } void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { // Unindexed addressing is not encoded by this function. - ASSERT_EQ((B27 | B26), + DCHECK_EQ((B27 | B26), (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L))); - ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); + DCHECK(x.rn_.is_valid() && !x.rm_.is_valid()); int am = x.am_; int offset_8 = x.offset_; - ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset + DCHECK((offset_8 & 3) == 0); // offset must be an aligned word offset offset_8 >>= 2; if (offset_8 < 0) { offset_8 = -offset_8; am ^= U; } - ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte - ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback + DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte + DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback // Post-indexed addressing requires W == 1; different than in addrmod2/3. if ((am & P) == 0) am |= W; - ASSERT(offset_8 >= 0); // no masking needed + DCHECK(offset_8 >= 0); // no masking needed emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8); } @@ -1341,9 +1315,9 @@ // Branch instructions. void Assembler::b(int branch_offset, Condition cond) { - ASSERT((branch_offset & 3) == 0); + DCHECK((branch_offset & 3) == 0); int imm24 = branch_offset >> 2; - ASSERT(is_int24(imm24)); + DCHECK(is_int24(imm24)); emit(cond | B27 | B25 | (imm24 & kImm24Mask)); if (cond == al) { @@ -1355,33 +1329,33 @@ void Assembler::bl(int branch_offset, Condition cond) { positions_recorder()->WriteRecordedPositions(); - ASSERT((branch_offset & 3) == 0); + DCHECK((branch_offset & 3) == 0); int imm24 = branch_offset >> 2; - ASSERT(is_int24(imm24)); + DCHECK(is_int24(imm24)); emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask)); } void Assembler::blx(int branch_offset) { // v5 and above positions_recorder()->WriteRecordedPositions(); - ASSERT((branch_offset & 1) == 0); + DCHECK((branch_offset & 1) == 0); int h = ((branch_offset & 2) >> 1)*B24; int imm24 = branch_offset >> 2; - ASSERT(is_int24(imm24)); + DCHECK(is_int24(imm24)); emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask)); } void Assembler::blx(Register target, Condition cond) { // v5 and above positions_recorder()->WriteRecordedPositions(); - ASSERT(!target.is(pc)); + DCHECK(!target.is(pc)); emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code()); } void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t positions_recorder()->WriteRecordedPositions(); - ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged + DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code()); } @@ -1453,7 +1427,7 @@ void Assembler::cmp_raw_immediate( Register src, int raw_immediate, Condition cond) { - ASSERT(is_uint12(raw_immediate)); + DCHECK(is_uint12(raw_immediate)); emit(cond | I | CMP | S | src.code() << 16 | raw_immediate); } @@ -1476,7 +1450,7 @@ // Don't allow nop instructions in the form mov rn, rn to be generated using // the mov instruction. They must be generated using nop(int/NopMarkerTypes) // or MarkCode(int/NopMarkerTypes) pseudo instructions. - ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); + DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); addrmod1(cond | MOV | s, r0, dst, src); } @@ -1509,7 +1483,7 @@ // // When the label gets bound: target_at extracts the link and target_at_put // patches the instructions. - ASSERT(is_uint24(link)); + DCHECK(is_uint24(link)); BlockConstPoolScope block_const_pool(this); emit(link); nop(dst.code()); @@ -1521,15 +1495,13 @@ void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { - ASSERT(immediate < 0x10000); - // May use movw if supported, but on unsupported platforms will try to use - // equivalent rotated immed_8 value and other tricks before falling back to a - // constant pool load. - mov(reg, Operand(immediate), LeaveCC, cond); + DCHECK(CpuFeatures::IsSupported(ARMv7)); + emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); } void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { + DCHECK(CpuFeatures::IsSupported(ARMv7)); emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); } @@ -1548,7 +1520,7 @@ // Multiply instructions. void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, SBit s, Condition cond) { - ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); + DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -1556,7 +1528,8 @@ void Assembler::mls(Register dst, Register src1, Register src2, Register srcA, Condition cond) { - ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); + DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); + DCHECK(IsEnabled(MLS)); emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -1564,16 +1537,25 @@ void Assembler::sdiv(Register dst, Register src1, Register src2, Condition cond) { - ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(IsEnabled(SUDIV)); + DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); + DCHECK(IsEnabled(SUDIV)); emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 | src2.code()*B8 | B4 | src1.code()); } +void Assembler::udiv(Register dst, Register src1, Register src2, + Condition cond) { + DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); + DCHECK(IsEnabled(SUDIV)); + emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 | + src2.code() * B8 | B4 | src1.code()); +} + + void Assembler::mul(Register dst, Register src1, Register src2, SBit s, Condition cond) { - ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); + DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); // dst goes in bits 16-19 for this instruction! emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -1585,8 +1567,8 @@ Register src2, SBit s, Condition cond) { - ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH)); + DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); + DCHECK(!dstL.is(dstH)); emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -1598,8 +1580,8 @@ Register src2, SBit s, Condition cond) { - ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH)); + DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); + DCHECK(!dstL.is(dstH)); emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -1611,8 +1593,8 @@ Register src2, SBit s, Condition cond) { - ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH)); + DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); + DCHECK(!dstL.is(dstH)); emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -1624,8 +1606,8 @@ Register src2, SBit s, Condition cond) { - ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH)); + DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); + DCHECK(!dstL.is(dstH)); emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -1634,7 +1616,7 @@ // Miscellaneous arithmetic instructions. void Assembler::clz(Register dst, Register src, Condition cond) { // v5 and above. - ASSERT(!dst.is(pc) && !src.is(pc)); + DCHECK(!dst.is(pc) && !src.is(pc)); emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 | 15*B8 | CLZ | src.code()); } @@ -1648,11 +1630,11 @@ const Operand& src, Condition cond) { // v6 and above. - ASSERT(CpuFeatures::IsSupported(ARMv7)); - ASSERT(!dst.is(pc) && !src.rm_.is(pc)); - ASSERT((satpos >= 0) && (satpos <= 31)); - ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL)); - ASSERT(src.rs_.is(no_reg)); + DCHECK(CpuFeatures::IsSupported(ARMv7)); + DCHECK(!dst.is(pc) && !src.rm_.is(pc)); + DCHECK((satpos >= 0) && (satpos <= 31)); + DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL)); + DCHECK(src.rs_.is(no_reg)); int sh = 0; if (src.shift_op_ == ASR) { @@ -1676,10 +1658,10 @@ int width, Condition cond) { // v7 and above. - ASSERT(CpuFeatures::IsSupported(ARMv7)); - ASSERT(!dst.is(pc) && !src.is(pc)); - ASSERT((lsb >= 0) && (lsb <= 31)); - ASSERT((width >= 1) && (width <= (32 - lsb))); + DCHECK(CpuFeatures::IsSupported(ARMv7)); + DCHECK(!dst.is(pc) && !src.is(pc)); + DCHECK((lsb >= 0) && (lsb <= 31)); + DCHECK((width >= 1) && (width <= (32 - lsb))); emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 | lsb*B7 | B6 | B4 | src.code()); } @@ -1696,10 +1678,10 @@ int width, Condition cond) { // v7 and above. - ASSERT(CpuFeatures::IsSupported(ARMv7)); - ASSERT(!dst.is(pc) && !src.is(pc)); - ASSERT((lsb >= 0) && (lsb <= 31)); - ASSERT((width >= 1) && (width <= (32 - lsb))); + DCHECK(CpuFeatures::IsSupported(ARMv7)); + DCHECK(!dst.is(pc) && !src.is(pc)); + DCHECK((lsb >= 0) && (lsb <= 31)); + DCHECK((width >= 1) && (width <= (32 - lsb))); emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 | lsb*B7 | B6 | B4 | src.code()); } @@ -1711,10 +1693,10 @@ // bfc dst, #lsb, #width void Assembler::bfc(Register dst, int lsb, int width, Condition cond) { // v7 and above. - ASSERT(CpuFeatures::IsSupported(ARMv7)); - ASSERT(!dst.is(pc)); - ASSERT((lsb >= 0) && (lsb <= 31)); - ASSERT((width >= 1) && (width <= (32 - lsb))); + DCHECK(CpuFeatures::IsSupported(ARMv7)); + DCHECK(!dst.is(pc)); + DCHECK((lsb >= 0) && (lsb <= 31)); + DCHECK((width >= 1) && (width <= (32 - lsb))); int msb = lsb + width - 1; emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf); } @@ -1730,10 +1712,10 @@ int width, Condition cond) { // v7 and above. - ASSERT(CpuFeatures::IsSupported(ARMv7)); - ASSERT(!dst.is(pc) && !src.is(pc)); - ASSERT((lsb >= 0) && (lsb <= 31)); - ASSERT((width >= 1) && (width <= (32 - lsb))); + DCHECK(CpuFeatures::IsSupported(ARMv7)); + DCHECK(!dst.is(pc) && !src.is(pc)); + DCHECK((lsb >= 0) && (lsb <= 31)); + DCHECK((width >= 1) && (width <= (32 - lsb))); int msb = lsb + width - 1; emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | src.code()); @@ -1747,13 +1729,13 @@ // Instruction details available in ARM DDI 0406C.b, A8.8.125. // cond(31-28) | 01101000(27-20) | Rn(19-16) | // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0) - ASSERT(!dst.is(pc)); - ASSERT(!src1.is(pc)); - ASSERT(!src2.rm().is(pc)); - ASSERT(!src2.rm().is(no_reg)); - ASSERT(src2.rs().is(no_reg)); - ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31)); - ASSERT(src2.shift_op() == LSL); + DCHECK(!dst.is(pc)); + DCHECK(!src1.is(pc)); + DCHECK(!src2.rm().is(pc)); + DCHECK(!src2.rm().is(no_reg)); + DCHECK(src2.rs().is(no_reg)); + DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31)); + DCHECK(src2.shift_op() == LSL); emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 | src2.shift_imm_*B7 | B4 | src2.rm().code()); } @@ -1766,13 +1748,13 @@ // Instruction details available in ARM DDI 0406C.b, A8.8.125. // cond(31-28) | 01101000(27-20) | Rn(19-16) | // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0) - ASSERT(!dst.is(pc)); - ASSERT(!src1.is(pc)); - ASSERT(!src2.rm().is(pc)); - ASSERT(!src2.rm().is(no_reg)); - ASSERT(src2.rs().is(no_reg)); - ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32)); - ASSERT(src2.shift_op() == ASR); + DCHECK(!dst.is(pc)); + DCHECK(!src1.is(pc)); + DCHECK(!src2.rm().is(pc)); + DCHECK(!src2.rm().is(no_reg)); + DCHECK(src2.rs().is(no_reg)); + DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32)); + DCHECK(src2.shift_op() == ASR); int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_; emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 | asr*B7 | B6 | B4 | src2.rm().code()); @@ -1785,16 +1767,16 @@ // Instruction details available in ARM DDI 0406C.b, A8.8.274. // cond(31-28) | 01101110(27-20) | 1111(19-16) | // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) - ASSERT(!dst.is(pc)); - ASSERT(!src.rm().is(pc)); - ASSERT(!src.rm().is(no_reg)); - ASSERT(src.rs().is(no_reg)); - ASSERT((src.shift_imm_ == 0) || + DCHECK(!dst.is(pc)); + DCHECK(!src.rm().is(pc)); + DCHECK(!src.rm().is(no_reg)); + DCHECK(src.rs().is(no_reg)); + DCHECK((src.shift_imm_ == 0) || (src.shift_imm_ == 8) || (src.shift_imm_ == 16) || (src.shift_imm_ == 24)); // Operand maps ROR #0 to LSL #0. - ASSERT((src.shift_op() == ROR) || + DCHECK((src.shift_op() == ROR) || ((src.shift_op() == LSL) && (src.shift_imm_ == 0))); emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 | ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code()); @@ -1808,17 +1790,17 @@ // Instruction details available in ARM DDI 0406C.b, A8.8.271. // cond(31-28) | 01101110(27-20) | Rn(19-16) | // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) - ASSERT(!dst.is(pc)); - ASSERT(!src1.is(pc)); - ASSERT(!src2.rm().is(pc)); - ASSERT(!src2.rm().is(no_reg)); - ASSERT(src2.rs().is(no_reg)); - ASSERT((src2.shift_imm_ == 0) || + DCHECK(!dst.is(pc)); + DCHECK(!src1.is(pc)); + DCHECK(!src2.rm().is(pc)); + DCHECK(!src2.rm().is(no_reg)); + DCHECK(src2.rs().is(no_reg)); + DCHECK((src2.shift_imm_ == 0) || (src2.shift_imm_ == 8) || (src2.shift_imm_ == 16) || (src2.shift_imm_ == 24)); // Operand maps ROR #0 to LSL #0. - ASSERT((src2.shift_op() == ROR) || + DCHECK((src2.shift_op() == ROR) || ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0))); emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 | ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code()); @@ -1831,16 +1813,16 @@ // Instruction details available in ARM DDI 0406C.b, A8.8.275. // cond(31-28) | 01101100(27-20) | 1111(19-16) | // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) - ASSERT(!dst.is(pc)); - ASSERT(!src.rm().is(pc)); - ASSERT(!src.rm().is(no_reg)); - ASSERT(src.rs().is(no_reg)); - ASSERT((src.shift_imm_ == 0) || + DCHECK(!dst.is(pc)); + DCHECK(!src.rm().is(pc)); + DCHECK(!src.rm().is(no_reg)); + DCHECK(src.rs().is(no_reg)); + DCHECK((src.shift_imm_ == 0) || (src.shift_imm_ == 8) || (src.shift_imm_ == 16) || (src.shift_imm_ == 24)); // Operand maps ROR #0 to LSL #0. - ASSERT((src.shift_op() == ROR) || + DCHECK((src.shift_op() == ROR) || ((src.shift_op() == LSL) && (src.shift_imm_ == 0))); emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 | ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code()); @@ -1849,14 +1831,14 @@ // Status register access instructions. void Assembler::mrs(Register dst, SRegister s, Condition cond) { - ASSERT(!dst.is(pc)); + DCHECK(!dst.is(pc)); emit(cond | B24 | s | 15*B16 | dst.code()*B12); } void Assembler::msr(SRegisterFieldMask fields, const Operand& src, Condition cond) { - ASSERT(fields >= B16 && fields < B20); // at least one field set + DCHECK(fields >= B16 && fields < B20); // at least one field set Instr instr; if (!src.rm_.is_valid()) { // Immediate. @@ -1871,7 +1853,7 @@ } instr = I | rotate_imm*B8 | immed_8; } else { - ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed + DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed instr = src.rm_.code(); } emit(cond | instr | B24 | B21 | fields | 15*B12); @@ -1924,22 +1906,22 @@ void Assembler::ldrd(Register dst1, Register dst2, const MemOperand& src, Condition cond) { - ASSERT(IsEnabled(ARMv7)); - ASSERT(src.rm().is(no_reg)); - ASSERT(!dst1.is(lr)); // r14. - ASSERT_EQ(0, dst1.code() % 2); - ASSERT_EQ(dst1.code() + 1, dst2.code()); + DCHECK(IsEnabled(ARMv7)); + DCHECK(src.rm().is(no_reg)); + DCHECK(!dst1.is(lr)); // r14. + DCHECK_EQ(0, dst1.code() % 2); + DCHECK_EQ(dst1.code() + 1, dst2.code()); addrmod3(cond | B7 | B6 | B4, dst1, src); } void Assembler::strd(Register src1, Register src2, const MemOperand& dst, Condition cond) { - ASSERT(dst.rm().is(no_reg)); - ASSERT(!src1.is(lr)); // r14. - ASSERT_EQ(0, src1.code() % 2); - ASSERT_EQ(src1.code() + 1, src2.code()); - ASSERT(IsEnabled(ARMv7)); + DCHECK(dst.rm().is(no_reg)); + DCHECK(!src1.is(lr)); // r14. + DCHECK_EQ(0, src1.code() % 2); + DCHECK_EQ(src1.code() + 1, src2.code()); + DCHECK(IsEnabled(ARMv7)); addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); } @@ -1949,15 +1931,15 @@ // Instruction details available in ARM DDI 0406C.b, A8.8.128. // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) | // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) | - ASSERT(address.rm().is(no_reg)); - ASSERT(address.am() == Offset); + DCHECK(address.rm().is(no_reg)); + DCHECK(address.am() == Offset); int U = B23; int offset = address.offset(); if (offset < 0) { offset = -offset; U = 0; } - ASSERT(offset < 4096); + DCHECK(offset < 4096); emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 | 0xf*B12 | offset); } @@ -1969,7 +1951,7 @@ RegList dst, Condition cond) { // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable. - ASSERT(base.is(sp) || (dst & sp.bit()) == 0); + DCHECK(base.is(sp) || (dst & sp.bit()) == 0); addrmod4(cond | B27 | am | L, base, dst); @@ -1998,7 +1980,7 @@ // enabling/disabling and a counter feature. See simulator-arm.h . void Assembler::stop(const char* msg, Condition cond, int32_t code) { #ifndef __arm__ - ASSERT(code >= kDefaultStopCode); + DCHECK(code >= kDefaultStopCode); { // The Simulator will handle the stop instruction and get the message // address. It expects to find the address just after the svc instruction. @@ -2024,13 +2006,13 @@ void Assembler::bkpt(uint32_t imm16) { // v5 and above - ASSERT(is_uint16(imm16)); + DCHECK(is_uint16(imm16)); emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf)); } void Assembler::svc(uint32_t imm24, Condition cond) { - ASSERT(is_uint24(imm24)); + DCHECK(is_uint24(imm24)); emit(cond | 15*B24 | imm24); } @@ -2043,7 +2025,7 @@ CRegister crm, int opcode_2, Condition cond) { - ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2)); + DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2)); emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 | crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code()); } @@ -2066,7 +2048,7 @@ CRegister crm, int opcode_2, Condition cond) { - ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); + DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2)); emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 | rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); } @@ -2089,7 +2071,7 @@ CRegister crm, int opcode_2, Condition cond) { - ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); + DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2)); emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 | rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); } @@ -2121,7 +2103,7 @@ LFlag l, Condition cond) { // Unindexed addressing. - ASSERT(is_uint8(option)); + DCHECK(is_uint8(option)); emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | coproc*B8 | (option & 255)); } @@ -2162,14 +2144,14 @@ int vd, d; dst.split_code(&vd, &d); - ASSERT(offset >= 0); + DCHECK(offset >= 0); if ((offset % 4) == 0 && (offset / 4) < 256) { emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 | 0xB*B8 | ((offset / 4) & 255)); } else { // Larger offsets must be handled by computing the correct address // in the ip register. - ASSERT(!base.is(ip)); + DCHECK(!base.is(ip)); if (u == 1) { add(ip, base, Operand(offset)); } else { @@ -2183,9 +2165,14 @@ void Assembler::vldr(const DwVfpRegister dst, const MemOperand& operand, const Condition cond) { - ASSERT(!operand.rm().is_valid()); - ASSERT(operand.am_ == Offset); - vldr(dst, operand.rn(), operand.offset(), cond); + DCHECK(operand.am_ == Offset); + if (operand.rm().is_valid()) { + add(ip, operand.rn(), + Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); + vldr(dst, ip, 0, cond); + } else { + vldr(dst, operand.rn(), operand.offset(), cond); + } } @@ -2204,7 +2191,7 @@ } int sd, d; dst.split_code(&sd, &d); - ASSERT(offset >= 0); + DCHECK(offset >= 0); if ((offset % 4) == 0 && (offset / 4) < 256) { emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | @@ -2212,7 +2199,7 @@ } else { // Larger offsets must be handled by computing the correct address // in the ip register. - ASSERT(!base.is(ip)); + DCHECK(!base.is(ip)); if (u == 1) { add(ip, base, Operand(offset)); } else { @@ -2226,9 +2213,14 @@ void Assembler::vldr(const SwVfpRegister dst, const MemOperand& operand, const Condition cond) { - ASSERT(!operand.rm().is_valid()); - ASSERT(operand.am_ == Offset); - vldr(dst, operand.rn(), operand.offset(), cond); + DCHECK(operand.am_ == Offset); + if (operand.rm().is_valid()) { + add(ip, operand.rn(), + Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); + vldr(dst, ip, 0, cond); + } else { + vldr(dst, operand.rn(), operand.offset(), cond); + } } @@ -2245,7 +2237,7 @@ offset = -offset; u = 0; } - ASSERT(offset >= 0); + DCHECK(offset >= 0); int vd, d; src.split_code(&vd, &d); @@ -2255,7 +2247,7 @@ } else { // Larger offsets must be handled by computing the correct address // in the ip register. - ASSERT(!base.is(ip)); + DCHECK(!base.is(ip)); if (u == 1) { add(ip, base, Operand(offset)); } else { @@ -2269,9 +2261,14 @@ void Assembler::vstr(const DwVfpRegister src, const MemOperand& operand, const Condition cond) { - ASSERT(!operand.rm().is_valid()); - ASSERT(operand.am_ == Offset); - vstr(src, operand.rn(), operand.offset(), cond); + DCHECK(operand.am_ == Offset); + if (operand.rm().is_valid()) { + add(ip, operand.rn(), + Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); + vstr(src, ip, 0, cond); + } else { + vstr(src, operand.rn(), operand.offset(), cond); + } } @@ -2290,14 +2287,14 @@ } int sd, d; src.split_code(&sd, &d); - ASSERT(offset >= 0); + DCHECK(offset >= 0); if ((offset % 4) == 0 && (offset / 4) < 256) { emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | 0xA*B8 | ((offset / 4) & 255)); } else { // Larger offsets must be handled by computing the correct address // in the ip register. - ASSERT(!base.is(ip)); + DCHECK(!base.is(ip)); if (u == 1) { add(ip, base, Operand(offset)); } else { @@ -2311,9 +2308,14 @@ void Assembler::vstr(const SwVfpRegister src, const MemOperand& operand, const Condition cond) { - ASSERT(!operand.rm().is_valid()); - ASSERT(operand.am_ == Offset); - vstr(src, operand.rn(), operand.offset(), cond); + DCHECK(operand.am_ == Offset); + if (operand.rm().is_valid()) { + add(ip, operand.rn(), + Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); + vstr(src, ip, 0, cond); + } else { + vstr(src, operand.rn(), operand.offset(), cond); + } } @@ -2325,14 +2327,14 @@ // Instruction details available in ARM DDI 0406C.b, A8-922. // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count * 2) - ASSERT_LE(first.code(), last.code()); - ASSERT(am == ia || am == ia_w || am == db_w); - ASSERT(!base.is(pc)); + DCHECK_LE(first.code(), last.code()); + DCHECK(am == ia || am == ia_w || am == db_w); + DCHECK(!base.is(pc)); int sd, d; first.split_code(&sd, &d); int count = last.code() - first.code() + 1; - ASSERT(count <= 16); + DCHECK(count <= 16); emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 | 0xB*B8 | count*2); } @@ -2346,14 +2348,14 @@ // Instruction details available in ARM DDI 0406C.b, A8-1080. // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count * 2) - ASSERT_LE(first.code(), last.code()); - ASSERT(am == ia || am == ia_w || am == db_w); - ASSERT(!base.is(pc)); + DCHECK_LE(first.code(), last.code()); + DCHECK(am == ia || am == ia_w || am == db_w); + DCHECK(!base.is(pc)); int sd, d; first.split_code(&sd, &d); int count = last.code() - first.code() + 1; - ASSERT(count <= 16); + DCHECK(count <= 16); emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 | 0xB*B8 | count*2); } @@ -2366,9 +2368,9 @@ // Instruction details available in ARM DDI 0406A, A8-626. // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // first(15-12) | 1010(11-8) | (count/2) - ASSERT_LE(first.code(), last.code()); - ASSERT(am == ia || am == ia_w || am == db_w); - ASSERT(!base.is(pc)); + DCHECK_LE(first.code(), last.code()); + DCHECK(am == ia || am == ia_w || am == db_w); + DCHECK(!base.is(pc)); int sd, d; first.split_code(&sd, &d); @@ -2386,9 +2388,9 @@ // Instruction details available in ARM DDI 0406A, A8-784. // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count/2) - ASSERT_LE(first.code(), last.code()); - ASSERT(am == ia || am == ia_w || am == db_w); - ASSERT(!base.is(pc)); + DCHECK_LE(first.code(), last.code()); + DCHECK(am == ia || am == ia_w || am == db_w); + DCHECK(!base.is(pc)); int sd, d; first.split_code(&sd, &d); @@ -2400,7 +2402,7 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { uint64_t i; - OS::MemCopy(&i, &d, 8); + memcpy(&i, &d, 8); *lo = i & 0xffffffff; *hi = i >> 32; @@ -2410,7 +2412,7 @@ // Only works for little endian floating point formats. // We don't support VFP on the mixed endian floating point platform. static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { - ASSERT(CpuFeatures::IsSupported(VFP3)); + DCHECK(CpuFeatures::IsSupported(VFP3)); // VMOV can accept an immediate of the form: // @@ -2472,7 +2474,7 @@ int vd, d; dst.split_code(&vd, &d); emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc); - } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) { + } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) { // TODO(jfb) Temporarily turned off until we have constant blinding or // some equivalent mitigation: an attacker can otherwise control // generated data which also happens to be executable, a Very Bad @@ -2489,8 +2491,18 @@ // that's tricky because vldr has a limited reach. Furthermore // it breaks load locality. RelocInfo rinfo(pc_, imm); - ConstantPoolAddEntry(rinfo); - vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0)); + ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); + if (section == ConstantPoolArray::EXTENDED_SECTION) { + DCHECK(FLAG_enable_ool_constant_pool); + // Emit instructions to load constant pool offset. + movw(ip, 0); + movt(ip, 0); + // Load from constant pool at offset. + vldr(dst, MemOperand(pp, ip)); + } else { + DCHECK(section == ConstantPoolArray::SMALL_SECTION); + vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0)); + } } else { // Synthesise the double from ARM immediates. uint32_t lo, hi; @@ -2564,7 +2576,7 @@ // Instruction details available in ARM DDI 0406C.b, A8-940. // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) | // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0) - ASSERT(index.index == 0 || index.index == 1); + DCHECK(index.index == 0 || index.index == 1); int vd, d; dst.split_code(&vd, &d); emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 | @@ -2580,7 +2592,7 @@ // Instruction details available in ARM DDI 0406C.b, A8.8.342. // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) | // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0) - ASSERT(index.index == 0 || index.index == 1); + DCHECK(index.index == 0 || index.index == 1); int vn, n; src.split_code(&vn, &n); emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 | @@ -2596,7 +2608,7 @@ // Instruction details available in ARM DDI 0406C.b, A8-948. // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(!src1.is(pc) && !src2.is(pc)); + DCHECK(!src1.is(pc) && !src2.is(pc)); int vm, m; dst.split_code(&vm, &m); emit(cond | 0xC*B24 | B22 | src2.code()*B16 | @@ -2612,7 +2624,7 @@ // Instruction details available in ARM DDI 0406C.b, A8-948. // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(!dst1.is(pc) && !dst2.is(pc)); + DCHECK(!dst1.is(pc) && !dst2.is(pc)); int vm, m; src.split_code(&vm, &m); emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 | @@ -2627,7 +2639,7 @@ // Instruction details available in ARM DDI 0406A, A8-642. // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(!src.is(pc)); + DCHECK(!src.is(pc)); int sn, n; dst.split_code(&sn, &n); emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4); @@ -2641,7 +2653,7 @@ // Instruction details available in ARM DDI 0406A, A8-642. // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(!dst.is(pc)); + DCHECK(!dst.is(pc)); int sn, n; src.split_code(&sn, &n); emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4); @@ -2702,7 +2714,7 @@ int reg_code, int* vm, int* m) { - ASSERT((reg_code >= 0) && (reg_code <= 31)); + DCHECK((reg_code >= 0) && (reg_code <= 31)); if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) { // 32 bit type. *m = reg_code & 0x1; @@ -2722,7 +2734,7 @@ const int src_code, VFPConversionMode mode, const Condition cond) { - ASSERT(src_type != dst_type); + DCHECK(src_type != dst_type); int D, Vd, M, Vm; SplitRegCode(src_type, src_code, &Vm, &M); SplitRegCode(dst_type, dst_code, &Vd, &D); @@ -2732,7 +2744,7 @@ // Instruction details available in ARM DDI 0406B, A8.6.295. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) | // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type)); + DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type)); int sz, opc2, op; @@ -2741,7 +2753,7 @@ sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; op = mode; } else { - ASSERT(IsIntegerVFPType(src_type)); + DCHECK(IsIntegerVFPType(src_type)); opc2 = 0x0; sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0; op = IsSignedVFPType(src_type) ? 0x1 : 0x0; @@ -2823,12 +2835,13 @@ // Instruction details available in ARM DDI 0406C.b, A8-874. // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) | // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0) - ASSERT(fraction_bits > 0 && fraction_bits <= 32); - ASSERT(CpuFeatures::IsSupported(VFP3)); + DCHECK(fraction_bits > 0 && fraction_bits <= 32); + DCHECK(CpuFeatures::IsSupported(VFP3)); int vd, d; dst.split_code(&vd, &d); - int i = ((32 - fraction_bits) >> 4) & 1; - int imm4 = (32 - fraction_bits) & 0xf; + int imm5 = 32 - fraction_bits; + int i = imm5 & 1; + int imm4 = (imm5 >> 1) & 0xf; emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 | vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4); } @@ -3004,7 +3017,7 @@ // Instruction details available in ARM DDI 0406C.b, A8-864. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0) - ASSERT(src2 == 0.0); + DCHECK(src2 == 0.0); int vd, d; src1.split_code(&vd, &d); emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6); @@ -3052,7 +3065,7 @@ // Instruction details available in ARM DDI 0406C.b, A8.8.320. // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) | // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0) - ASSERT(CpuFeatures::IsSupported(NEON)); + DCHECK(CpuFeatures::IsSupported(NEON)); int vd, d; dst.base().split_code(&vd, &d); emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 | @@ -3066,7 +3079,7 @@ // Instruction details available in ARM DDI 0406C.b, A8.8.404. // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) | // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0) - ASSERT(CpuFeatures::IsSupported(NEON)); + DCHECK(CpuFeatures::IsSupported(NEON)); int vd, d; src.base().split_code(&vd, &d); emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 | @@ -3078,7 +3091,7 @@ // Instruction details available in ARM DDI 0406C.b, A8.8.346. // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) | // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0) - ASSERT(CpuFeatures::IsSupported(NEON)); + DCHECK(CpuFeatures::IsSupported(NEON)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -3095,7 +3108,7 @@ // MOV Rx, Rx as NOP and it performs better even in newer CPUs. // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode // a type. - ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. + DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop. emit(al | 13*B21 | type*B12 | type); } @@ -3104,7 +3117,7 @@ instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions ((kNumRegisters-1)*B12) | // mask out register EncodeMovwImmediate(0xFFFF)); // mask out immediate value - return instr == 0x34*B20; + return instr == kMovtPattern; } @@ -3112,17 +3125,36 @@ instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions ((kNumRegisters-1)*B12) | // mask out destination EncodeMovwImmediate(0xFFFF)); // mask out immediate value - return instr == 0x30*B20; + return instr == kMovwPattern; +} + + +Instr Assembler::GetMovTPattern() { return kMovtPattern; } + + +Instr Assembler::GetMovWPattern() { return kMovwPattern; } + + +Instr Assembler::EncodeMovwImmediate(uint32_t immediate) { + DCHECK(immediate < 0x10000); + return ((immediate & 0xf000) << 4) | (immediate & 0xfff); +} + + +Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) { + instruction &= ~EncodeMovwImmediate(0xffff); + return instruction | EncodeMovwImmediate(immediate); } bool Assembler::IsNop(Instr instr, int type) { - ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. + DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop. // Check for mov rx, rx where x = type. return instr == (al | 13*B21 | type*B12 | type); } +// static bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { uint32_t dummy1; uint32_t dummy2; @@ -3161,9 +3193,7 @@ void Assembler::RecordConstPool(int size) { // We only need this for debugger support, to correctly compute offsets in the // code. -#ifdef ENABLE_DEBUGGER_SUPPORT RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); -#endif } @@ -3172,9 +3202,7 @@ // Compute new buffer size. CodeDesc desc; // the new buffer - if (buffer_size_ < 4*KB) { - desc.buffer_size = 4*KB; - } else if (buffer_size_ < 1*MB) { + if (buffer_size_ < 1 * MB) { desc.buffer_size = 2*buffer_size_; } else { desc.buffer_size = buffer_size_ + 1*MB; @@ -3190,9 +3218,9 @@ // Copy the data. int pc_delta = desc.buffer - buffer_; int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); - OS::MemMove(desc.buffer, buffer_, desc.instr_size); - OS::MemMove(reloc_info_writer.pos() + rc_delta, - reloc_info_writer.pos(), desc.reloc_size); + MemMove(desc.buffer, buffer_, desc.instr_size); + MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), + desc.reloc_size); // Switch buffers. DeleteArray(buffer_); @@ -3209,7 +3237,7 @@ // Relocate pending relocation entries. for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) { RelocInfo& rinfo = pending_32_bit_reloc_info_[i]; - ASSERT(rinfo.rmode() != RelocInfo::COMMENT && + DCHECK(rinfo.rmode() != RelocInfo::COMMENT && rinfo.rmode() != RelocInfo::POSITION); if (rinfo.rmode() != RelocInfo::JS_RETURN) { rinfo.set_pc(rinfo.pc() + pc_delta); @@ -3217,7 +3245,7 @@ } for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) { RelocInfo& rinfo = pending_64_bit_reloc_info_[i]; - ASSERT(rinfo.rmode() == RelocInfo::NONE64); + DCHECK(rinfo.rmode() == RelocInfo::NONE64); rinfo.set_pc(rinfo.pc() + pc_delta); } constant_pool_builder_.Relocate(pc_delta); @@ -3228,8 +3256,8 @@ // No relocation info should be pending while using db. db is used // to write pure data with no pointers and the constant pool should // be emitted before using db. - ASSERT(num_pending_32_bit_reloc_info_ == 0); - ASSERT(num_pending_64_bit_reloc_info_ == 0); + DCHECK(num_pending_32_bit_reloc_info_ == 0); + DCHECK(num_pending_64_bit_reloc_info_ == 0); CheckBuffer(); *reinterpret_cast<uint8_t*>(pc_) = data; pc_ += sizeof(uint8_t); @@ -3240,8 +3268,8 @@ // No relocation info should be pending while using dd. dd is used // to write pure data with no pointers and the constant pool should // be emitted before using dd. - ASSERT(num_pending_32_bit_reloc_info_ == 0); - ASSERT(num_pending_64_bit_reloc_info_ == 0); + DCHECK(num_pending_32_bit_reloc_info_ == 0); + DCHECK(num_pending_64_bit_reloc_info_ == 0); CheckBuffer(); *reinterpret_cast<uint32_t*>(pc_) = data; pc_ += sizeof(uint32_t); @@ -3265,17 +3293,11 @@ void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { if (!RelocInfo::IsNone(rinfo.rmode())) { // Don't record external references unless the heap will be serialized. - if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) { -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); - } -#endif - if (!Serializer::enabled() && !emit_debug_code()) { - return; - } + if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE && + !serializer_enabled() && !emit_debug_code()) { + return; } - ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here + DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { RelocInfo reloc_info_with_ast_id(rinfo.pc(), rinfo.rmode(), @@ -3290,18 +3312,19 @@ } -void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) { +ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry( + const RelocInfo& rinfo) { if (FLAG_enable_ool_constant_pool) { - constant_pool_builder_.AddEntry(this, rinfo); + return constant_pool_builder_.AddEntry(this, rinfo); } else { if (rinfo.rmode() == RelocInfo::NONE64) { - ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo); + DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo); if (num_pending_64_bit_reloc_info_ == 0) { first_const_pool_64_use_ = pc_offset(); } pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo; } else { - ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo); + DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo); if (num_pending_32_bit_reloc_info_ == 0) { first_const_pool_32_use_ = pc_offset(); } @@ -3310,6 +3333,7 @@ // Make sure the constant pool is not emitted in place of the next // instruction for which we just recorded relocation info. BlockConstPoolFor(1); + return ConstantPoolArray::SMALL_SECTION; } } @@ -3317,8 +3341,8 @@ void Assembler::BlockConstPoolFor(int instructions) { if (FLAG_enable_ool_constant_pool) { // Should be a no-op if using an out-of-line constant pool. - ASSERT(num_pending_32_bit_reloc_info_ == 0); - ASSERT(num_pending_64_bit_reloc_info_ == 0); + DCHECK(num_pending_32_bit_reloc_info_ == 0); + DCHECK(num_pending_64_bit_reloc_info_ == 0); return; } @@ -3327,10 +3351,10 @@ // Max pool start (if we need a jump and an alignment). #ifdef DEBUG int start = pc_limit + kInstrSize + 2 * kPointerSize; - ASSERT((num_pending_32_bit_reloc_info_ == 0) || + DCHECK((num_pending_32_bit_reloc_info_ == 0) || (start - first_const_pool_32_use_ + num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool)); - ASSERT((num_pending_64_bit_reloc_info_ == 0) || + DCHECK((num_pending_64_bit_reloc_info_ == 0) || (start - first_const_pool_64_use_ < kMaxDistToFPPool)); #endif no_const_pool_before_ = pc_limit; @@ -3345,8 +3369,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { if (FLAG_enable_ool_constant_pool) { // Should be a no-op if using an out-of-line constant pool. - ASSERT(num_pending_32_bit_reloc_info_ == 0); - ASSERT(num_pending_64_bit_reloc_info_ == 0); + DCHECK(num_pending_32_bit_reloc_info_ == 0); + DCHECK(num_pending_64_bit_reloc_info_ == 0); return; } @@ -3355,7 +3379,7 @@ // BlockConstPoolScope. if (is_const_pool_blocked()) { // Something is wrong if emission is forced and blocked at the same time. - ASSERT(!force_emit); + DCHECK(!force_emit); return; } @@ -3394,7 +3418,7 @@ // * the instruction doesn't require a jump after itself to jump over the // constant pool, and we're getting close to running out of range. if (!force_emit) { - ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0)); + DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0)); bool need_emit = false; if (has_fp_values) { int dist64 = pc_offset() + @@ -3444,15 +3468,15 @@ for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) { RelocInfo& rinfo = pending_64_bit_reloc_info_[i]; - ASSERT(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment. + DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment. Instr instr = instr_at(rinfo.pc()); // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0. - ASSERT((IsVldrDPcImmediateOffset(instr) && + DCHECK((IsVldrDPcImmediateOffset(instr) && GetVldrDRegisterImmediateOffset(instr) == 0)); int delta = pc_ - rinfo.pc() - kPcLoadDelta; - ASSERT(is_uint10(delta)); + DCHECK(is_uint10(delta)); bool found = false; uint64_t value = rinfo.raw_data64(); @@ -3460,9 +3484,9 @@ RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j]; if (value == rinfo2.raw_data64()) { found = true; - ASSERT(rinfo2.rmode() == RelocInfo::NONE64); + DCHECK(rinfo2.rmode() == RelocInfo::NONE64); Instr instr2 = instr_at(rinfo2.pc()); - ASSERT(IsVldrDPcImmediateOffset(instr2)); + DCHECK(IsVldrDPcImmediateOffset(instr2)); delta = GetVldrDRegisterImmediateOffset(instr2); delta += rinfo2.pc() - rinfo.pc(); break; @@ -3481,7 +3505,7 @@ // Emit 32-bit constant pool entries. for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) { RelocInfo& rinfo = pending_32_bit_reloc_info_[i]; - ASSERT(rinfo.rmode() != RelocInfo::COMMENT && + DCHECK(rinfo.rmode() != RelocInfo::COMMENT && rinfo.rmode() != RelocInfo::POSITION && rinfo.rmode() != RelocInfo::STATEMENT_POSITION && rinfo.rmode() != RelocInfo::CONST_POOL && @@ -3490,19 +3514,19 @@ Instr instr = instr_at(rinfo.pc()); // 64-bit loads shouldn't get here. - ASSERT(!IsVldrDPcImmediateOffset(instr)); + DCHECK(!IsVldrDPcImmediateOffset(instr)); if (IsLdrPcImmediateOffset(instr) && GetLdrRegisterImmediateOffset(instr) == 0) { int delta = pc_ - rinfo.pc() - kPcLoadDelta; - ASSERT(is_uint12(delta)); + DCHECK(is_uint12(delta)); // 0 is the smallest delta: // ldr rd, [pc, #0] // constant pool marker // data bool found = false; - if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) { + if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) { for (int j = 0; j < i; j++) { RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j]; @@ -3525,7 +3549,7 @@ emit(rinfo.data()); } } else { - ASSERT(IsMovW(instr)); + DCHECK(IsMovW(instr)); } } @@ -3547,25 +3571,21 @@ } -MaybeObject* Assembler::AllocateConstantPool(Heap* heap) { - ASSERT(FLAG_enable_ool_constant_pool); - return constant_pool_builder_.Allocate(heap); +Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { + if (!FLAG_enable_ool_constant_pool) { + return isolate->factory()->empty_constant_pool_array(); + } + return constant_pool_builder_.New(isolate); } void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { - ASSERT(FLAG_enable_ool_constant_pool); constant_pool_builder_.Populate(this, constant_pool); } ConstantPoolBuilder::ConstantPoolBuilder() - : entries_(), - merged_indexes_(), - count_of_64bit_(0), - count_of_code_ptr_(0), - count_of_heap_ptr_(0), - count_of_32bit_(0) { } + : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {} bool ConstantPoolBuilder::IsEmpty() { @@ -3573,166 +3593,175 @@ } -bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) { - return rmode == RelocInfo::NONE64; -} - - -bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) { - return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64; -} - - -bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) { - return RelocInfo::IsCodeTarget(rmode); -} - - -bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) { - return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode); +ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType( + RelocInfo::Mode rmode) { + if (rmode == RelocInfo::NONE64) { + return ConstantPoolArray::INT64; + } else if (!RelocInfo::IsGCRelocMode(rmode)) { + return ConstantPoolArray::INT32; + } else if (RelocInfo::IsCodeTarget(rmode)) { + return ConstantPoolArray::CODE_PTR; + } else { + DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode)); + return ConstantPoolArray::HEAP_PTR; + } } -void ConstantPoolBuilder::AddEntry(Assembler* assm, - const RelocInfo& rinfo) { +ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry( + Assembler* assm, const RelocInfo& rinfo) { RelocInfo::Mode rmode = rinfo.rmode(); - ASSERT(rmode != RelocInfo::COMMENT && + DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION && rmode != RelocInfo::STATEMENT_POSITION && rmode != RelocInfo::CONST_POOL); - // Try to merge entries which won't be patched. int merged_index = -1; + ConstantPoolArray::LayoutSection entry_section = current_section_; if (RelocInfo::IsNone(rmode) || - (!Serializer::enabled() && (rmode >= RelocInfo::CELL))) { + (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { size_t i; - std::vector<RelocInfo>::const_iterator it; + std::vector<ConstantPoolEntry>::const_iterator it; for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { - if (RelocInfo::IsEqual(rinfo, *it)) { + if (RelocInfo::IsEqual(rinfo, it->rinfo_)) { + // Merge with found entry. merged_index = i; + entry_section = entries_[i].section_; break; } } } - - entries_.push_back(rinfo); - merged_indexes_.push_back(merged_index); + DCHECK(entry_section <= current_section_); + entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index)); if (merged_index == -1) { // Not merged, so update the appropriate count. - if (Is64BitEntry(rmode)) { - count_of_64bit_++; - } else if (Is32BitEntry(rmode)) { - count_of_32bit_++; - } else if (IsCodePtrEntry(rmode)) { - count_of_code_ptr_++; - } else { - ASSERT(IsHeapPtrEntry(rmode)); - count_of_heap_ptr_++; - } + number_of_entries_[entry_section].increment(GetConstantPoolType(rmode)); } - // Check if we still have room for another entry given Arm's ldr and vldr - // immediate offset range. - if (!(is_uint12(ConstantPoolArray::SizeFor(count_of_64bit_, - count_of_code_ptr_, - count_of_heap_ptr_, - count_of_32bit_))) && - is_uint10(ConstantPoolArray::SizeFor(count_of_64bit_, 0, 0, 0))) { - assm->set_constant_pool_full(); + // Check if we still have room for another entry in the small section + // given Arm's ldr and vldr immediate offset range. + if (current_section_ == ConstantPoolArray::SMALL_SECTION && + !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) && + is_uint10(ConstantPoolArray::MaxInt64Offset( + small_entries()->count_of(ConstantPoolArray::INT64))))) { + current_section_ = ConstantPoolArray::EXTENDED_SECTION; } + return entry_section; } void ConstantPoolBuilder::Relocate(int pc_delta) { - for (std::vector<RelocInfo>::iterator rinfo = entries_.begin(); - rinfo != entries_.end(); rinfo++) { - ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN); - rinfo->set_pc(rinfo->pc() + pc_delta); + for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); + entry != entries_.end(); entry++) { + DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN); + entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta); } } -MaybeObject* ConstantPoolBuilder::Allocate(Heap* heap) { +Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { if (IsEmpty()) { - return heap->empty_constant_pool_array(); - } else { - return heap->AllocateConstantPoolArray(count_of_64bit_, count_of_code_ptr_, - count_of_heap_ptr_, count_of_32bit_); + return isolate->factory()->empty_constant_pool_array(); + } else if (extended_entries()->is_empty()) { + return isolate->factory()->NewConstantPoolArray(*small_entries()); + } else { + DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION); + return isolate->factory()->NewExtendedConstantPoolArray( + *small_entries(), *extended_entries()); } } void ConstantPoolBuilder::Populate(Assembler* assm, ConstantPoolArray* constant_pool) { - ASSERT(constant_pool->count_of_int64_entries() == count_of_64bit_); - ASSERT(constant_pool->count_of_code_ptr_entries() == count_of_code_ptr_); - ASSERT(constant_pool->count_of_heap_ptr_entries() == count_of_heap_ptr_); - ASSERT(constant_pool->count_of_int32_entries() == count_of_32bit_); - ASSERT(entries_.size() == merged_indexes_.size()); - - int index_64bit = 0; - int index_code_ptr = count_of_64bit_; - int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_; - int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_; - - size_t i; - std::vector<RelocInfo>::const_iterator rinfo; - for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) { - RelocInfo::Mode rmode = rinfo->rmode(); + DCHECK_EQ(extended_entries()->is_empty(), + !constant_pool->is_extended_layout()); + DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries( + constant_pool, ConstantPoolArray::SMALL_SECTION))); + if (constant_pool->is_extended_layout()) { + DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries( + constant_pool, ConstantPoolArray::EXTENDED_SECTION))); + } + + // Set up initial offsets. + int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS] + [ConstantPoolArray::NUMBER_OF_TYPES]; + for (int section = 0; section <= constant_pool->final_section(); section++) { + int section_start = (section == ConstantPoolArray::EXTENDED_SECTION) + ? small_entries()->total_count() + : 0; + for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) { + ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i); + if (number_of_entries_[section].count_of(type) != 0) { + offsets[section][type] = constant_pool->OffsetOfElementAt( + number_of_entries_[section].base_of(type) + section_start); + } + } + } + + for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); + entry != entries_.end(); entry++) { + RelocInfo rinfo = entry->rinfo_; + RelocInfo::Mode rmode = entry->rinfo_.rmode(); + ConstantPoolArray::Type type = GetConstantPoolType(rmode); // Update constant pool if necessary and get the entry's offset. int offset; - if (merged_indexes_[i] == -1) { - if (Is64BitEntry(rmode)) { - offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag; - constant_pool->set(index_64bit++, rinfo->data64()); - } else if (Is32BitEntry(rmode)) { - offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag; - constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data())); - } else if (IsCodePtrEntry(rmode)) { - offset = constant_pool->OffsetOfElementAt(index_code_ptr) - - kHeapObjectTag; - constant_pool->set(index_code_ptr++, - reinterpret_cast<Object *>(rinfo->data())); + if (entry->merged_index_ == -1) { + offset = offsets[entry->section_][type]; + offsets[entry->section_][type] += ConstantPoolArray::entry_size(type); + if (type == ConstantPoolArray::INT64) { + constant_pool->set_at_offset(offset, rinfo.data64()); + } else if (type == ConstantPoolArray::INT32) { + constant_pool->set_at_offset(offset, + static_cast<int32_t>(rinfo.data())); + } else if (type == ConstantPoolArray::CODE_PTR) { + constant_pool->set_at_offset(offset, + reinterpret_cast<Address>(rinfo.data())); } else { - ASSERT(IsHeapPtrEntry(rmode)); - offset = constant_pool->OffsetOfElementAt(index_heap_ptr) - - kHeapObjectTag; - constant_pool->set(index_heap_ptr++, - reinterpret_cast<Object *>(rinfo->data())); + DCHECK(type == ConstantPoolArray::HEAP_PTR); + constant_pool->set_at_offset(offset, + reinterpret_cast<Object*>(rinfo.data())); } - merged_indexes_[i] = offset; // Stash offset for merged entries. + offset -= kHeapObjectTag; + entry->merged_index_ = offset; // Stash offset for merged entries. } else { - size_t merged_index = static_cast<size_t>(merged_indexes_[i]); - ASSERT(merged_index < merged_indexes_.size() && merged_index < i); - offset = merged_indexes_[merged_index]; + DCHECK(entry->merged_index_ < (entry - entries_.begin())); + offset = entries_[entry->merged_index_].merged_index_; } // Patch vldr/ldr instruction with correct offset. - Instr instr = assm->instr_at(rinfo->pc()); - if (Is64BitEntry(rmode)) { + Instr instr = assm->instr_at(rinfo.pc()); + if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) { + // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0]. + Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize); + DCHECK((Assembler::IsMovW(instr) && + Instruction::ImmedMovwMovtValue(instr) == 0)); + DCHECK((Assembler::IsMovT(next_instr) && + Instruction::ImmedMovwMovtValue(next_instr) == 0)); + assm->instr_at_put(rinfo.pc(), + Assembler::PatchMovwImmediate(instr, offset & 0xffff)); + assm->instr_at_put( + rinfo.pc() + Assembler::kInstrSize, + Assembler::PatchMovwImmediate(next_instr, offset >> 16)); + } else if (type == ConstantPoolArray::INT64) { // Instruction to patch must be 'vldr rd, [pp, #0]'. - ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) && + DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) && Assembler::GetVldrDRegisterImmediateOffset(instr) == 0)); - ASSERT(is_uint10(offset)); - assm->instr_at_put(rinfo->pc(), - Assembler::SetVldrDRegisterImmediateOffset(instr, offset)); + DCHECK(is_uint10(offset)); + assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset( + instr, offset)); } else { // Instruction to patch must be 'ldr rd, [pp, #0]'. - ASSERT((Assembler::IsLdrPpImmediateOffset(instr) && + DCHECK((Assembler::IsLdrPpImmediateOffset(instr) && Assembler::GetLdrRegisterImmediateOffset(instr) == 0)); - ASSERT(is_uint12(offset)); - assm->instr_at_put(rinfo->pc(), - Assembler::SetLdrRegisterImmediateOffset(instr, offset)); + DCHECK(is_uint12(offset)); + assm->instr_at_put( + rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset)); } } - - ASSERT((index_64bit == count_of_64bit_) && - (index_code_ptr == (index_64bit + count_of_code_ptr_)) && - (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) && - (index_32bit == (index_heap_ptr + count_of_32bit_))); } diff -Nru nodejs-0.11.13/deps/v8/src/arm/assembler-arm.h nodejs-0.11.15/deps/v8/src/arm/assembler-arm.h --- nodejs-0.11.13/deps/v8/src/arm/assembler-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/assembler-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -43,80 +43,13 @@ #include <stdio.h> #include <vector> -#include "assembler.h" -#include "constants-arm.h" -#include "serialize.h" +#include "src/arm/constants-arm.h" +#include "src/assembler.h" +#include "src/serialize.h" namespace v8 { namespace internal { -// CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a CpuFeatureScope before use. -class CpuFeatures : public AllStatic { - public: - // Detect features of the target CPU. Set safe defaults if the serializer - // is enabled (snapshots must be portable). - static void Probe(); - - // Display target use when compiling. - static void PrintTarget(); - - // Display features. - static void PrintFeatures(); - - // Check whether a feature is supported by the target CPU. - static bool IsSupported(CpuFeature f) { - ASSERT(initialized_); - return Check(f, supported_); - } - - static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { - ASSERT(initialized_); - return Check(f, found_by_runtime_probing_only_); - } - - static bool IsSafeForSnapshot(CpuFeature f) { - return Check(f, cross_compile_) || - (IsSupported(f) && - (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); - } - - static unsigned cache_line_size() { return cache_line_size_; } - - static bool VerifyCrossCompiling() { - return cross_compile_ == 0; - } - - static bool VerifyCrossCompiling(CpuFeature f) { - unsigned mask = flag2set(f); - return cross_compile_ == 0 || - (cross_compile_ & mask) == mask; - } - - private: - static bool Check(CpuFeature f, unsigned set) { - return (set & flag2set(f)) != 0; - } - - static unsigned flag2set(CpuFeature f) { - return 1u << f; - } - -#ifdef DEBUG - static bool initialized_; -#endif - static unsigned supported_; - static unsigned found_by_runtime_probing_only_; - static unsigned cache_line_size_; - - static unsigned cross_compile_; - - friend class ExternalReference; - friend class PlatformFeatureScope; - DISALLOW_COPY_AND_ASSIGN(CpuFeatures); -}; - - // CPU Registers. // // 1) We would prefer to use an enum, but enum values are assignment- @@ -167,17 +100,17 @@ inline static int NumAllocatableRegisters(); static int ToAllocationIndex(Register reg) { - ASSERT(reg.code() < kMaxNumAllocatableRegisters); + DCHECK(reg.code() < kMaxNumAllocatableRegisters); return reg.code(); } static Register FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); return from_code(index); } static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "r0", "r1", @@ -203,17 +136,17 @@ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } bool is(Register reg) const { return code_ == reg.code_; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } int bit() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return 1 << code_; } void set_code(int code) { code_ = code; - ASSERT(is_valid()); + DCHECK(is_valid()); } // Unfortunately we can't make this private in a struct. @@ -249,15 +182,15 @@ bool is_valid() const { return 0 <= code_ && code_ < 32; } bool is(SwVfpRegister reg) const { return code_ == reg.code_; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } int bit() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return 1 << code_; } void split_code(int* vm, int* m) const { - ASSERT(is_valid()); + DCHECK(is_valid()); *m = code_ & 0x1; *vm = code_ >> 1; } @@ -299,15 +232,15 @@ } bool is(DwVfpRegister reg) const { return code_ == reg.code_; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } int bit() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return 1 << code_; } void split_code(int* vm, int* m) const { - ASSERT(is_valid()); + DCHECK(is_valid()); *m = (code_ & 0x10) >> 4; *vm = code_ & 0x0F; } @@ -338,21 +271,21 @@ bool is(DwVfpRegister reg) const { return code_ == reg.code_; } bool is(LowDwVfpRegister reg) const { return code_ == reg.code_; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } SwVfpRegister low() const { SwVfpRegister reg; reg.code_ = code_ * 2; - ASSERT(reg.is_valid()); + DCHECK(reg.is_valid()); return reg; } SwVfpRegister high() const { SwVfpRegister reg; reg.code_ = (code_ * 2) + 1; - ASSERT(reg.is_valid()); + DCHECK(reg.is_valid()); return reg; } @@ -374,11 +307,11 @@ } bool is(QwNeonRegister reg) const { return code_ == reg.code_; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } void split_code(int* vm, int* m) const { - ASSERT(is_valid()); + DCHECK(is_valid()); int encoded_code = code_ << 1; *m = (encoded_code & 0x10) >> 4; *vm = encoded_code & 0x0F; @@ -492,11 +425,11 @@ bool is_valid() const { return 0 <= code_ && code_ < 16; } bool is(CRegister creg) const { return code_ == creg.code_; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } int bit() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return 1 << code_; } @@ -585,16 +518,22 @@ // Return true if this is a register operand. INLINE(bool is_reg() const); - // Return true if this operand fits in one instruction so that no - // 2-instruction solution with a load into the ip register is necessary. If + // Return the number of actual instructions required to implement the given + // instruction for this particular operand. This can be a single instruction, + // if no load into the ip register is necessary, or anything between 2 and 4 + // instructions when we need to load from the constant pool (depending upon + // whether the constant pool entry is in the small or extended section). If // the instruction this operand is used for is a MOV or MVN instruction the // actual instruction to use is required for this calculation. For other // instructions instr is ignored. - bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const; + // + // The value returned is only valid as long as no entries are added to the + // constant pool between this call and the actual instruction being emitted. + int instructions_required(const Assembler* assembler, Instr instr = 0) const; bool must_output_reloc_info(const Assembler* assembler) const; inline int32_t immediate() const { - ASSERT(!rm_.is_valid()); + DCHECK(!rm_.is_valid()); return imm32_; } @@ -642,12 +581,12 @@ } void set_offset(int32_t offset) { - ASSERT(rm_.is(no_reg)); + DCHECK(rm_.is(no_reg)); offset_ = offset; } uint32_t offset() const { - ASSERT(rm_.is(no_reg)); + DCHECK(rm_.is(no_reg)); return offset_; } @@ -710,57 +649,48 @@ // Class used to build a constant pool. class ConstantPoolBuilder BASE_EMBEDDED { public: - explicit ConstantPoolBuilder(); - void AddEntry(Assembler* assm, const RelocInfo& rinfo); + ConstantPoolBuilder(); + ConstantPoolArray::LayoutSection AddEntry(Assembler* assm, + const RelocInfo& rinfo); void Relocate(int pc_delta); bool IsEmpty(); - MaybeObject* Allocate(Heap* heap); + Handle<ConstantPoolArray> New(Isolate* isolate); void Populate(Assembler* assm, ConstantPoolArray* constant_pool); - inline int count_of_64bit() const { return count_of_64bit_; } - inline int count_of_code_ptr() const { return count_of_code_ptr_; } - inline int count_of_heap_ptr() const { return count_of_heap_ptr_; } - inline int count_of_32bit() const { return count_of_32bit_; } + inline ConstantPoolArray::LayoutSection current_section() const { + return current_section_; + } + + inline ConstantPoolArray::NumberOfEntries* number_of_entries( + ConstantPoolArray::LayoutSection section) { + return &number_of_entries_[section]; + } + + inline ConstantPoolArray::NumberOfEntries* small_entries() { + return number_of_entries(ConstantPoolArray::SMALL_SECTION); + } + + inline ConstantPoolArray::NumberOfEntries* extended_entries() { + return number_of_entries(ConstantPoolArray::EXTENDED_SECTION); + } private: - bool Is64BitEntry(RelocInfo::Mode rmode); - bool Is32BitEntry(RelocInfo::Mode rmode); - bool IsCodePtrEntry(RelocInfo::Mode rmode); - bool IsHeapPtrEntry(RelocInfo::Mode rmode); - - std::vector<RelocInfo> entries_; - std::vector<int> merged_indexes_; - int count_of_64bit_; - int count_of_code_ptr_; - int count_of_heap_ptr_; - int count_of_32bit_; -}; + struct ConstantPoolEntry { + ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section, + int merged_index) + : rinfo_(rinfo), section_(section), merged_index_(merged_index) {} + + RelocInfo rinfo_; + ConstantPoolArray::LayoutSection section_; + int merged_index_; + }; + ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode); -extern const Instr kMovLrPc; -extern const Instr kLdrPCMask; -extern const Instr kLdrPCPattern; -extern const Instr kLdrPpMask; -extern const Instr kLdrPpPattern; -extern const Instr kBlxRegMask; -extern const Instr kBlxRegPattern; -extern const Instr kBlxIp; - -extern const Instr kMovMvnMask; -extern const Instr kMovMvnPattern; -extern const Instr kMovMvnFlip; - -extern const Instr kMovLeaveCCMask; -extern const Instr kMovLeaveCCPattern; -extern const Instr kMovwMask; -extern const Instr kMovwPattern; -extern const Instr kMovwLeaveCCFlip; - -extern const Instr kCmpCmnMask; -extern const Instr kCmpCmnPattern; -extern const Instr kCmpCmnFlip; -extern const Instr kAddSubFlip; -extern const Instr kAndBicFlip; + std::vector<ConstantPoolEntry> entries_; + ConstantPoolArray::LayoutSection current_section_; + ConstantPoolArray::NumberOfEntries number_of_entries_[2]; +}; struct VmovIndex { unsigned char index; @@ -813,13 +743,13 @@ // Manages the jump elimination optimization if the second parameter is true. int branch_offset(Label* L, bool jump_elimination_allowed); - // Return the address in the constant pool of the code target address used by - // the branch/call instruction at pc, or the object in a mov. - INLINE(static Address target_pointer_address_at(Address pc)); + // Returns true if the given pc address is the start of a constant pool load + // instruction sequence. + INLINE(static bool is_constant_pool_load(Address pc)); // Return the address in the constant pool of the code target address used by // the branch/call instruction at pc, or the object in a mov. - INLINE(static Address target_constant_pool_address_at( + INLINE(static Address constant_pool_entry_address( Address pc, ConstantPoolArray* constant_pool)); // Read/Modify the code target address in the branch/call instruction at pc. @@ -827,16 +757,20 @@ ConstantPoolArray* constant_pool)); INLINE(static void set_target_address_at(Address pc, ConstantPoolArray* constant_pool, - Address target)); + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)); INLINE(static Address target_address_at(Address pc, Code* code)) { ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; return target_address_at(pc, constant_pool); } INLINE(static void set_target_address_at(Address pc, Code* code, - Address target)) { + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)) { ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; - set_target_address_at(pc, constant_pool, target); + set_target_address_at(pc, constant_pool, target, icache_flush_mode); } // Return the code target address at a call site from the return address @@ -847,6 +781,9 @@ // in the instruction stream that the call will return from. INLINE(static Address return_address_from_call_start(Address pc)); + // Return the code target address of the patch debug break slot + INLINE(static Address break_address_from_return_address(Address pc)); + // This sets the branch destination (which is in the constant pool on ARM). // This is for calls and branches within generated code. inline static void deserialization_set_special_target_at( @@ -978,10 +915,8 @@ void mov_label_offset(Register dst, Label* label); // ARMv7 instructions for loading a 32 bit immediate in two instructions. - // This may actually emit a different mov instruction, but on an ARMv7 it - // is guaranteed to only emit one instruction. + // The constant for movw and movt should be in the range 0-0xffff. void movw(Register reg, uint32_t immediate, Condition cond = al); - // The constant for movt should be in the range 0-0xffff. void movt(Register reg, uint32_t immediate, Condition cond = al); void bic(Register dst, Register src1, const Operand& src2, @@ -990,6 +925,35 @@ void mvn(Register dst, const Operand& src, SBit s = LeaveCC, Condition cond = al); + // Shift instructions + + void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, + Condition cond = al) { + if (src2.is_reg()) { + mov(dst, Operand(src1, ASR, src2.rm()), s, cond); + } else { + mov(dst, Operand(src1, ASR, src2.immediate()), s, cond); + } + } + + void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, + Condition cond = al) { + if (src2.is_reg()) { + mov(dst, Operand(src1, LSL, src2.rm()), s, cond); + } else { + mov(dst, Operand(src1, LSL, src2.immediate()), s, cond); + } + } + + void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, + Condition cond = al) { + if (src2.is_reg()) { + mov(dst, Operand(src1, LSR, src2.rm()), s, cond); + } else { + mov(dst, Operand(src1, LSR, src2.immediate()), s, cond); + } + } + // Multiply instructions void mla(Register dst, Register src1, Register src2, Register srcA, @@ -1001,6 +965,8 @@ void sdiv(Register dst, Register src1, Register src2, Condition cond = al); + void udiv(Register dst, Register src1, Register src2, Condition cond = al); + void mul(Register dst, Register src1, Register src2, SBit s = LeaveCC, Condition cond = al); @@ -1358,7 +1324,7 @@ } // Check whether an immediate fits an addressing mode 1 instruction. - bool ImmediateFitsAddrMode1Instruction(int32_t imm32); + static bool ImmediateFitsAddrMode1Instruction(int32_t imm32); // Check whether an immediate fits an addressing mode 2 instruction. bool ImmediateFitsAddrMode2Instruction(int32_t imm32); @@ -1390,12 +1356,12 @@ // Record the AST id of the CallIC being compiled, so that it can be placed // in the relocation information. void SetRecordedAstId(TypeFeedbackId ast_id) { - ASSERT(recorded_ast_id_.IsNone()); + DCHECK(recorded_ast_id_.IsNone()); recorded_ast_id_ = ast_id; } TypeFeedbackId RecordedAstId() { - ASSERT(!recorded_ast_id_.IsNone()); + DCHECK(!recorded_ast_id_.IsNone()); return recorded_ast_id_; } @@ -1413,9 +1379,9 @@ // function, compiled with and without debugger support (see for example // Debug::PrepareForBreakPoints()). // Compiling functions with debugger support generates additional code - // (Debug::GenerateSlot()). This may affect the emission of the constant - // pools and cause the version of the code with debugger support to have - // constant pools generated in different places. + // (DebugCodegen::GenerateSlot()). This may affect the emission of the + // constant pools and cause the version of the code with debugger support to + // have constant pools generated in different places. // Recording the position and size of emitted constant pools allows to // correctly compute the offset mappings between the different versions of a // function in all situations. @@ -1450,6 +1416,10 @@ static int GetBranchOffset(Instr instr); static bool IsLdrRegisterImmediate(Instr instr); static bool IsVldrDRegisterImmediate(Instr instr); + static Instr GetConsantPoolLoadPattern(); + static Instr GetConsantPoolLoadMask(); + static bool IsLdrPpRegOffset(Instr instr); + static Instr GetLdrPpRegOffsetPattern(); static bool IsLdrPpImmediateOffset(Instr instr); static bool IsVldrDPpImmediateOffset(Instr instr); static int GetLdrRegisterImmediateOffset(Instr instr); @@ -1471,6 +1441,8 @@ static bool IsLdrRegFpNegOffset(Instr instr); static bool IsLdrPcImmediateOffset(Instr instr); static bool IsVldrDPcImmediateOffset(Instr instr); + static bool IsBlxReg(Instr instr); + static bool IsBlxIp(Instr instr); static bool IsTstImmediate(Instr instr); static bool IsCmpRegister(Instr instr); static bool IsCmpImmediate(Instr instr); @@ -1478,7 +1450,11 @@ static int GetCmpImmediateRawImmediate(Instr instr); static bool IsNop(Instr instr, int type = NON_MARKING_NOP); static bool IsMovT(Instr instr); + static Instr GetMovTPattern(); static bool IsMovW(Instr instr); + static Instr GetMovWPattern(); + static Instr EncodeMovwImmediate(uint32_t immediate); + static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate); // Constants in pools are accessed via pc relative addressing, which can // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point @@ -1498,19 +1474,19 @@ void CheckConstPool(bool force_emit, bool require_jump); // Allocate a constant pool of the correct size for the generated code. - MaybeObject* AllocateConstantPool(Heap* heap); + Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); // Generate the constant pool for the generated code. void PopulateConstantPool(ConstantPoolArray* constant_pool); - bool can_use_constant_pool() const { - return is_constant_pool_available() && !constant_pool_full_; - } + bool is_constant_pool_available() const { return constant_pool_available_; } - void set_constant_pool_full() { - constant_pool_full_ = true; + bool use_extended_constant_pool() const { + return constant_pool_builder_.current_section() == + ConstantPoolArray::EXTENDED_SECTION; } + protected: // Relocation for a type-recording IC has the AST id added to it. This // member variable is a way to pass the information from the call site to @@ -1544,10 +1520,10 @@ // Max pool start (if we need a jump and an alignment). int start = pc_offset() + kInstrSize + 2 * kPointerSize; // Check the constant pool hasn't been blocked for too long. - ASSERT((num_pending_32_bit_reloc_info_ == 0) || + DCHECK((num_pending_32_bit_reloc_info_ == 0) || (start + num_pending_64_bit_reloc_info_ * kDoubleSize < (first_const_pool_32_use_ + kMaxDistToIntPool))); - ASSERT((num_pending_64_bit_reloc_info_ == 0) || + DCHECK((num_pending_64_bit_reloc_info_ == 0) || (start < (first_const_pool_64_use_ + kMaxDistToFPPool))); #endif // Two cases: @@ -1564,10 +1540,6 @@ (pc_offset() < no_const_pool_before_); } - bool is_constant_pool_available() const { - return constant_pool_available_; - } - void set_constant_pool_available(bool available) { constant_pool_available_ = available; } @@ -1637,9 +1609,6 @@ // Indicates whether the constant pool can be accessed, which is only possible // if the pp register points to the current code object's constant pool. bool constant_pool_available_; - // Indicates whether the constant pool is too full to accept new entries due - // to the ldr instruction's limitted immediate offset range. - bool constant_pool_full_; // Code emission inline void CheckBuffer(); @@ -1671,7 +1640,7 @@ // Record reloc info for current pc_ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); void RecordRelocInfo(const RelocInfo& rinfo); - void ConstantPoolAddEntry(const RelocInfo& rinfo); + ConstantPoolArray::LayoutSection ConstantPoolAddEntry(const RelocInfo& rinfo); friend class RelocInfo; friend class CodePatcher; diff -Nru nodejs-0.11.13/deps/v8/src/arm/assembler-arm-inl.h nodejs-0.11.15/deps/v8/src/arm/assembler-arm-inl.h --- nodejs-0.11.13/deps/v8/src/arm/assembler-arm-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/assembler-arm-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -37,16 +37,19 @@ #ifndef V8_ARM_ASSEMBLER_ARM_INL_H_ #define V8_ARM_ASSEMBLER_ARM_INL_H_ -#include "arm/assembler-arm.h" +#include "src/arm/assembler-arm.h" -#include "cpu.h" -#include "debug.h" +#include "src/assembler.h" +#include "src/debug.h" namespace v8 { namespace internal { +bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); } + + int Register::NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; } @@ -68,8 +71,8 @@ int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) { - ASSERT(!reg.is(kDoubleRegZero)); - ASSERT(!reg.is(kScratchDoubleReg)); + DCHECK(!reg.is(kDoubleRegZero)); + DCHECK(!reg.is(kScratchDoubleReg)); if (reg.code() > kDoubleRegZero.code()) { return reg.code() - kNumReservedRegisters; } @@ -78,8 +81,8 @@ DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < NumAllocatableRegisters()); - ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() == + DCHECK(index >= 0 && index < NumAllocatableRegisters()); + DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() == kNumReservedRegisters - 1); if (index >= kDoubleRegZero.code()) { return from_code(index + kNumReservedRegisters); @@ -88,7 +91,7 @@ } -void RelocInfo::apply(intptr_t delta) { +void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) { if (RelocInfo::IsInternalReference(rmode_)) { // absolute code pointer inside code object moves with the code object. int32_t* p = reinterpret_cast<int32_t*>(pc_); @@ -100,13 +103,13 @@ Address RelocInfo::target_address() { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); return Assembler::target_address_at(pc_, host_); } Address RelocInfo::target_address_address() { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); if (FLAG_enable_ool_constant_pool || @@ -115,22 +118,15 @@ // serializerer and expects the address to reside within the code object. return reinterpret_cast<Address>(pc_); } else { - ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_))); - return Assembler::target_pointer_address_at(pc_); + DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_))); + return constant_pool_entry_address(); } } Address RelocInfo::constant_pool_entry_address() { - ASSERT(IsInConstantPool()); - if (FLAG_enable_ool_constant_pool) { - ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_))); - return Assembler::target_constant_pool_address_at(pc_, - host_->constant_pool()); - } else { - ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_))); - return Assembler::target_pointer_address_at(pc_); - } + DCHECK(IsInConstantPool()); + return Assembler::constant_pool_entry_address(pc_, host_->constant_pool()); } @@ -139,10 +135,13 @@ } -void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); - Assembler::set_target_address_at(pc_, host_, target); - if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { +void RelocInfo::set_target_address(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && + host() != NULL && IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( host(), this, HeapObject::cast(target_code)); @@ -151,24 +150,26 @@ Object* RelocInfo::target_object() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)); } Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return Handle<Object>(reinterpret_cast<Object**>( Assembler::target_address_at(pc_, host_))); } -void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - ASSERT(!target->IsConsString()); +void RelocInfo::set_target_object(Object* target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, host_, - reinterpret_cast<Address>(target)); - if (mode == UPDATE_WRITE_BARRIER && + reinterpret_cast<Address>(target), + icache_flush_mode); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && target->IsHeapObject()) { host()->GetHeap()->incremental_marking()->RecordWrite( @@ -178,42 +179,46 @@ Address RelocInfo::target_reference() { - ASSERT(rmode_ == EXTERNAL_REFERENCE); + DCHECK(rmode_ == EXTERNAL_REFERENCE); return Assembler::target_address_at(pc_, host_); } Address RelocInfo::target_runtime_entry(Assembler* origin) { - ASSERT(IsRuntimeEntry(rmode_)); + DCHECK(IsRuntimeEntry(rmode_)); return target_address(); } void RelocInfo::set_target_runtime_entry(Address target, - WriteBarrierMode mode) { - ASSERT(IsRuntimeEntry(rmode_)); - if (target_address() != target) set_target_address(target, mode); + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsRuntimeEntry(rmode_)); + if (target_address() != target) + set_target_address(target, write_barrier_mode, icache_flush_mode); } Handle<Cell> RelocInfo::target_cell_handle() { - ASSERT(rmode_ == RelocInfo::CELL); + DCHECK(rmode_ == RelocInfo::CELL); Address address = Memory::Address_at(pc_); return Handle<Cell>(reinterpret_cast<Cell**>(address)); } Cell* RelocInfo::target_cell() { - ASSERT(rmode_ == RelocInfo::CELL); + DCHECK(rmode_ == RelocInfo::CELL); return Cell::FromValueAddress(Memory::Address_at(pc_)); } -void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) { - ASSERT(rmode_ == RelocInfo::CELL); +void RelocInfo::set_target_cell(Cell* cell, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::CELL); Address address = cell->address() + Cell::kValueOffset; Memory::Address_at(pc_) = address; - if (mode == UPDATE_WRITE_BARRIER && host() != NULL) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) { // TODO(1550) We are passing NULL as a slot because cell can never be on // evacuation candidate. host()->GetHeap()->incremental_marking()->RecordWrite( @@ -222,7 +227,7 @@ } -static const int kNoCodeAgeSequenceLength = 3; +static const int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize; Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { @@ -232,17 +237,18 @@ Code* RelocInfo::code_age_stub() { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); return Code::GetCodeFromTargetAddress( - Memory::Address_at(pc_ + Assembler::kInstrSize * - (kNoCodeAgeSequenceLength - 1))); + Memory::Address_at(pc_ + + (kNoCodeAgeSequenceLength - Assembler::kInstrSize))); } -void RelocInfo::set_code_age_stub(Code* stub) { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - Memory::Address_at(pc_ + Assembler::kInstrSize * - (kNoCodeAgeSequenceLength - 1)) = +void RelocInfo::set_code_age_stub(Code* stub, + ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + Memory::Address_at(pc_ + + (kNoCodeAgeSequenceLength - Assembler::kInstrSize)) = stub->instruction_start(); } @@ -250,14 +256,14 @@ Address RelocInfo::call_address() { // The 2 instructions offset assumes patched debug break slot or return // sequence. - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize); } void RelocInfo::set_call_address(Address target) { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target; if (host() != NULL) { @@ -279,14 +285,14 @@ Object** RelocInfo::call_object_address() { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize); } void RelocInfo::WipeOut() { - ASSERT(IsEmbeddedObject(rmode_) || + DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsExternalReference(rmode_)); @@ -300,8 +306,8 @@ // A patched return sequence is: // ldr ip, [pc, #0] // blx ip - return ((current_instr & kLdrPCMask) == kLdrPCPattern) - && ((next_instr & kBlxRegMask) == kBlxRegPattern); + return Assembler::IsLdrPcImmediateOffset(current_instr) && + Assembler::IsBlxReg(next_instr); } @@ -323,14 +329,12 @@ visitor->VisitExternalReference(this); } else if (RelocInfo::IsCodeAgeSequence(mode)) { visitor->VisitCodeAgeSequence(this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence())) && isolate->debug()->has_break_points()) { visitor->VisitDebugTarget(this); -#endif } else if (RelocInfo::IsRuntimeEntry(mode)) { visitor->VisitRuntimeEntry(this); } @@ -350,14 +354,12 @@ StaticVisitor::VisitExternalReference(this); } else if (RelocInfo::IsCodeAgeSequence(mode)) { StaticVisitor::VisitCodeAgeSequence(heap, this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence()))) { StaticVisitor::VisitDebugTarget(heap, this); -#endif } else if (RelocInfo::IsRuntimeEntry(mode)) { StaticVisitor::VisitRuntimeEntry(this); } @@ -418,42 +420,6 @@ } -Address Assembler::target_pointer_address_at(Address pc) { - Instr instr = Memory::int32_at(pc); - return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta; -} - - -Address Assembler::target_constant_pool_address_at( - Address pc, ConstantPoolArray* constant_pool) { - ASSERT(constant_pool != NULL); - ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc))); - Instr instr = Memory::int32_at(pc); - return reinterpret_cast<Address>(constant_pool) + - GetLdrRegisterImmediateOffset(instr); -} - - -Address Assembler::target_address_at(Address pc, - ConstantPoolArray* constant_pool) { - if (IsMovW(Memory::int32_at(pc))) { - ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize))); - Instruction* instr = Instruction::At(pc); - Instruction* next_instr = Instruction::At(pc + kInstrSize); - return reinterpret_cast<Address>( - (next_instr->ImmedMovwMovtValue() << 16) | - instr->ImmedMovwMovtValue()); - } else if (FLAG_enable_ool_constant_pool) { - ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc))); - return Memory::Address_at( - target_constant_pool_address_at(pc, constant_pool)); - } else { - ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc))); - return Memory::Address_at(target_pointer_address_at(pc)); - } -} - - Address Assembler::target_address_from_return_address(Address pc) { // Returns the address of the call target from the return address that will // be returned to after a call. @@ -462,8 +428,15 @@ // movt ip, #... @ call address high 16 // blx ip // @ return address - // Or pre-V7 or cases that need frequent patching: - // ldr ip, [pc, #...] @ call address + // Or pre-V7 or cases that need frequent patching, the address is in the + // constant pool. It could be a small constant pool load: + // ldr ip, [pc / pp, #...] @ call address + // blx ip + // @ return address + // Or an extended constant pool load: + // movw ip, #... + // movt ip, #... + // ldr ip, [pc, ip] @ call address // blx ip // @ return address Address candidate = pc - 2 * Assembler::kInstrSize; @@ -471,22 +444,40 @@ if (IsLdrPcImmediateOffset(candidate_instr) | IsLdrPpImmediateOffset(candidate_instr)) { return candidate; + } else if (IsLdrPpRegOffset(candidate_instr)) { + candidate = pc - 4 * Assembler::kInstrSize; + DCHECK(IsMovW(Memory::int32_at(candidate)) && + IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize))); + return candidate; + } else { + candidate = pc - 3 * Assembler::kInstrSize; + DCHECK(IsMovW(Memory::int32_at(candidate)) && + IsMovT(Memory::int32_at(candidate + kInstrSize))); + return candidate; } - candidate = pc - 3 * Assembler::kInstrSize; - ASSERT(IsMovW(Memory::int32_at(candidate)) && - IsMovT(Memory::int32_at(candidate + kInstrSize))); - return candidate; +} + + +Address Assembler::break_address_from_return_address(Address pc) { + return pc - Assembler::kPatchDebugBreakSlotReturnOffset; } Address Assembler::return_address_from_call_start(Address pc) { if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) | IsLdrPpImmediateOffset(Memory::int32_at(pc))) { + // Load from constant pool, small section. return pc + kInstrSize * 2; } else { - ASSERT(IsMovW(Memory::int32_at(pc))); - ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize))); - return pc + kInstrSize * 3; + DCHECK(IsMovW(Memory::int32_at(pc))); + DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize))); + if (IsLdrPpRegOffset(Memory::int32_at(pc + kInstrSize))) { + // Load from constant pool, extended section. + return pc + kInstrSize * 4; + } else { + // A movw / movt load immediate. + return pc + kInstrSize * 3; + } } } @@ -501,45 +492,88 @@ } -static Instr EncodeMovwImmediate(uint32_t immediate) { - ASSERT(immediate < 0x10000); - return ((immediate & 0xf000) << 4) | (immediate & 0xfff); +bool Assembler::is_constant_pool_load(Address pc) { + return !Assembler::IsMovW(Memory::int32_at(pc)) || + (FLAG_enable_ool_constant_pool && + Assembler::IsLdrPpRegOffset( + Memory::int32_at(pc + 2 * Assembler::kInstrSize))); +} + + +Address Assembler::constant_pool_entry_address( + Address pc, ConstantPoolArray* constant_pool) { + if (FLAG_enable_ool_constant_pool) { + DCHECK(constant_pool != NULL); + int cp_offset; + if (IsMovW(Memory::int32_at(pc))) { + DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) && + IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize))); + // This is an extended constant pool lookup. + Instruction* movw_instr = Instruction::At(pc); + Instruction* movt_instr = Instruction::At(pc + kInstrSize); + cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) | + movw_instr->ImmedMovwMovtValue(); + } else { + // This is a small constant pool lookup. + DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc))); + cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc)); + } + return reinterpret_cast<Address>(constant_pool) + cp_offset; + } else { + DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc))); + Instr instr = Memory::int32_at(pc); + return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta; + } +} + + +Address Assembler::target_address_at(Address pc, + ConstantPoolArray* constant_pool) { + if (is_constant_pool_load(pc)) { + // This is a constant pool lookup. Return the value in the constant pool. + return Memory::Address_at(constant_pool_entry_address(pc, constant_pool)); + } else { + // This is an movw_movt immediate load. Return the immediate. + DCHECK(IsMovW(Memory::int32_at(pc)) && + IsMovT(Memory::int32_at(pc + kInstrSize))); + Instruction* movw_instr = Instruction::At(pc); + Instruction* movt_instr = Instruction::At(pc + kInstrSize); + return reinterpret_cast<Address>( + (movt_instr->ImmedMovwMovtValue() << 16) | + movw_instr->ImmedMovwMovtValue()); + } } void Assembler::set_target_address_at(Address pc, ConstantPoolArray* constant_pool, - Address target) { - if (IsMovW(Memory::int32_at(pc))) { - ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize))); - uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc); - uint32_t immediate = reinterpret_cast<uint32_t>(target); - uint32_t intermediate = instr_ptr[0]; - intermediate &= ~EncodeMovwImmediate(0xFFFF); - intermediate |= EncodeMovwImmediate(immediate & 0xFFFF); - instr_ptr[0] = intermediate; - intermediate = instr_ptr[1]; - intermediate &= ~EncodeMovwImmediate(0xFFFF); - intermediate |= EncodeMovwImmediate(immediate >> 16); - instr_ptr[1] = intermediate; - ASSERT(IsMovW(Memory::int32_at(pc))); - ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize))); - CPU::FlushICache(pc, 2 * kInstrSize); - } else if (FLAG_enable_ool_constant_pool) { - ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc))); - Memory::Address_at( - target_constant_pool_address_at(pc, constant_pool)) = target; - } else { - ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc))); - Memory::Address_at(target_pointer_address_at(pc)) = target; + Address target, + ICacheFlushMode icache_flush_mode) { + if (is_constant_pool_load(pc)) { + // This is a constant pool lookup. Update the entry in the constant pool. + Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target; // Intuitively, we would think it is necessary to always flush the // instruction cache after patching a target address in the code as follows: - // CPU::FlushICache(pc, sizeof(target)); + // CpuFeatures::FlushICache(pc, sizeof(target)); // However, on ARM, no instruction is actually patched in the case // of embedded constants of the form: - // ldr ip, [pc, #...] + // ldr ip, [pp, #...] // since the instruction accessing this address in the constant pool remains // unchanged. + } else { + // This is an movw_movt immediate load. Patch the immediate embedded in the + // instructions. + DCHECK(IsMovW(Memory::int32_at(pc))); + DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize))); + uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc); + uint32_t immediate = reinterpret_cast<uint32_t>(target); + instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF); + instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16); + DCHECK(IsMovW(Memory::int32_at(pc))); + DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize))); + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(pc, 2 * kInstrSize); + } } } diff -Nru nodejs-0.11.13/deps/v8/src/arm/builtins-arm.cc nodejs-0.11.15/deps/v8/src/arm/builtins-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/builtins-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/builtins-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "codegen.h" -#include "debug.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "runtime.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -63,7 +40,7 @@ num_extra_args = 1; __ push(r1); } else { - ASSERT(extra_args == NO_EXTRA_ARGUMENTS); + DCHECK(extra_args == NO_EXTRA_ARGUMENTS); } // JumpToExternalReference expects r0 to contain the number of arguments @@ -326,7 +303,7 @@ __ cmp(sp, Operand(ip)); __ b(hs, &ok); - CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode); + CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); GenerateTailCallToReturnedCode(masm); __ bind(&ok); @@ -336,7 +313,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function, - bool count_constructions, bool create_memento) { // ----------- S t a t e ------------- // -- r0 : number of arguments @@ -346,14 +322,8 @@ // -- sp[...]: constructor arguments // ----------------------------------- - // Should never count constructions for api objects. - ASSERT(!is_api_function || !count_constructions); - // Should never create mementos for api functions. - ASSERT(!is_api_function || !create_memento); - - // Should never create mementos before slack tracking is finished. - ASSERT(!count_constructions || !create_memento); + DCHECK(!is_api_function || !create_memento); Isolate* isolate = masm->isolate(); @@ -376,14 +346,12 @@ Label rt_call, allocated; if (FLAG_inline_new) { Label undo_allocation; -#ifdef ENABLE_DEBUGGER_SUPPORT ExternalReference debug_step_in_fp = ExternalReference::debug_step_in_fp_address(isolate); __ mov(r2, Operand(debug_step_in_fp)); __ ldr(r2, MemOperand(r2)); __ tst(r2, r2); __ b(ne, &rt_call); -#endif // Load the initial map and verify that it is in fact a map. // r1: constructor function @@ -400,22 +368,24 @@ __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); __ b(eq, &rt_call); - if (count_constructions) { + if (!is_api_function) { Label allocate; + MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset); + // Check if slack tracking is enabled. + __ ldr(r4, bit_field3); + __ DecodeField<Map::ConstructionCount>(r3, r4); + __ cmp(r3, Operand(JSFunction::kNoSlackTracking)); + __ b(eq, &allocate); // Decrease generous allocation count. - __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - MemOperand constructor_count = - FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset); - __ ldrb(r4, constructor_count); - __ sub(r4, r4, Operand(1), SetCC); - __ strb(r4, constructor_count); + __ sub(r4, r4, Operand(1 << Map::ConstructionCount::kShift)); + __ str(r4, bit_field3); + __ cmp(r3, Operand(JSFunction::kFinishSlackTracking)); __ b(ne, &allocate); __ push(r1); __ Push(r2, r1); // r1 = constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1); + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); __ pop(r2); __ pop(r1); @@ -441,11 +411,11 @@ // r4: JSObject (not tagged) __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); __ mov(r5, r4); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset); __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset); __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); // Fill all the in-object properties with the appropriate filler. @@ -454,10 +424,19 @@ // r3: object size (in words, including memento if create_memento) // r4: JSObject (not tagged) // r5: First in-object property of JSObject (not tagged) - ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); + DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize); + __ LoadRoot(r6, Heap::kUndefinedValueRootIndex); + + if (!is_api_function) { + Label no_inobject_slack_tracking; + + // Check if slack tracking is enabled. + __ ldr(ip, FieldMemOperand(r2, Map::kBitField3Offset)); + __ DecodeField<Map::ConstructionCount>(ip); + __ cmp(ip, Operand(JSFunction::kNoSlackTracking)); + __ b(eq, &no_inobject_slack_tracking); - if (count_constructions) { - __ LoadRoot(r6, Heap::kUndefinedValueRootIndex); + // Allocate object with a slack. __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, kBitsPerByte); @@ -471,25 +450,26 @@ __ InitializeFieldsWithFiller(r5, r0, r6); // To allow for truncation. __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex); - __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. - __ InitializeFieldsWithFiller(r5, r0, r6); - } else if (create_memento) { - __ sub(r6, r3, Operand(AllocationMemento::kSize / kPointerSize)); - __ add(r0, r4, Operand(r6, LSL, kPointerSizeLog2)); // End of object. - __ LoadRoot(r6, Heap::kUndefinedValueRootIndex); + // Fill the remaining fields with one pointer filler map. + + __ bind(&no_inobject_slack_tracking); + } + + if (create_memento) { + __ sub(ip, r3, Operand(AllocationMemento::kSize / kPointerSize)); + __ add(r0, r4, Operand(ip, LSL, kPointerSizeLog2)); // End of object. __ InitializeFieldsWithFiller(r5, r0, r6); // Fill in memento fields. // r5: points to the allocated but uninitialized memento. __ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex); - ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset); + DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset); __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); // Load the AllocationSite __ ldr(r6, MemOperand(sp, 2 * kPointerSize)); - ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset); + DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset); __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); } else { - __ LoadRoot(r6, Heap::kUndefinedValueRootIndex); __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. __ InitializeFieldsWithFiller(r5, r0, r6); } @@ -542,9 +522,9 @@ // r5: FixedArray (not tagged) __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); __ mov(r2, r5); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset); __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset); __ SmiTag(r0, r3); __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); @@ -555,7 +535,7 @@ // r4: JSObject // r5: FixedArray (not tagged) __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. - ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize); { Label loop, entry; __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); __ b(&entry); @@ -598,9 +578,9 @@ __ push(r1); // argument for Runtime_NewObject if (create_memento) { - __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2); + __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2); } else { - __ CallRuntime(Runtime::kHiddenNewObject, 1); + __ CallRuntime(Runtime::kNewObject, 1); } __ mov(r4, r0); @@ -680,7 +660,7 @@ } // Store offset of return address for deoptimizer. - if (!is_api_function && !count_constructions) { + if (!is_api_function) { masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); } @@ -732,18 +712,13 @@ } -void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, true, false); -} - - void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new); + Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); } void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, true, false, false); + Generate_JSConstructStubHelper(masm, true, false); } @@ -807,7 +782,7 @@ if (is_construct) { // No type feedback cell is available __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS); __ CallStub(&stub); } else { ParameterCount actual(r0); @@ -834,7 +809,7 @@ void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) { - CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized); + CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized); GenerateTailCallToReturnedCode(masm); } @@ -848,7 +823,7 @@ // Whether to compile in a background thread. __ Push(masm->isolate()->factory()->ToBoolean(concurrent)); - __ CallRuntime(Runtime::kHiddenCompileOptimized, 2); + __ CallRuntime(Runtime::kCompileOptimized, 2); // Restore receiver. __ pop(r1); } @@ -923,7 +898,7 @@ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); // Jump to point after the code-age stub. - __ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize)); + __ add(r0, r0, Operand(kNoCodeAgeSequenceLength)); __ mov(pc, r0); } @@ -943,7 +918,7 @@ // registers. __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved); // Pass the function and deoptimization type to the runtime system. - __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved); } @@ -969,7 +944,7 @@ // Pass the function and deoptimization type to the runtime system. __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type)))); __ push(r0); - __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); } // Get the full codegen state from the stack and untag it -> r6. @@ -1060,7 +1035,7 @@ __ b(hs, &ok); { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kHiddenStackGuard, 0); + __ CallRuntime(Runtime::kStackGuard, 0); } __ Jump(masm->isolate()->builtins()->OnStackReplacement(), RelocInfo::CODE_TARGET); @@ -1096,7 +1071,7 @@ // r1: function Label shift_arguments; __ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION - { Label convert_to_object, use_global_receiver, patch_receiver; + { Label convert_to_object, use_global_proxy, patch_receiver; // Change context eagerly in case we need the global receiver. __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); @@ -1121,10 +1096,10 @@ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex); __ cmp(r2, r3); - __ b(eq, &use_global_receiver); + __ b(eq, &use_global_proxy); __ LoadRoot(r3, Heap::kNullValueRootIndex); __ cmp(r2, r3); - __ b(eq, &use_global_receiver); + __ b(eq, &use_global_proxy); STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); __ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE); @@ -1153,9 +1128,9 @@ __ mov(r4, Operand::Zero()); __ jmp(&patch_receiver); - __ bind(&use_global_receiver); + __ bind(&use_global_proxy); __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); - __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); + __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset)); __ bind(&patch_receiver); __ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2)); @@ -1284,7 +1259,7 @@ // Out of stack space. __ ldr(r1, MemOperand(fp, kFunctionOffset)); __ Push(r1, r0); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); // End of stack check. // Push current limit and index. @@ -1309,7 +1284,7 @@ // Compute the receiver. // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; + Label call_to_object, use_global_proxy; __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize))); @@ -1323,10 +1298,10 @@ __ JumpIfSmi(r0, &call_to_object); __ LoadRoot(r1, Heap::kNullValueRootIndex); __ cmp(r0, r1); - __ b(eq, &use_global_receiver); + __ b(eq, &use_global_proxy); __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); __ cmp(r0, r1); - __ b(eq, &use_global_receiver); + __ b(eq, &use_global_proxy); // Check if the receiver is already a JavaScript object. // r0: receiver @@ -1341,9 +1316,9 @@ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); __ b(&push_receiver); - __ bind(&use_global_receiver); + __ bind(&use_global_proxy); __ ldr(r0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalProxyOffset)); // Push the receiver. // r0: receiver @@ -1407,6 +1382,26 @@ } +static void ArgumentAdaptorStackCheck(MacroAssembler* masm, + Label* stack_overflow) { + // ----------- S t a t e ------------- + // -- r0 : actual number of arguments + // -- r1 : function (passed through to callee) + // -- r2 : expected number of arguments + // ----------------------------------- + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + __ LoadRoot(r5, Heap::kRealStackLimitRootIndex); + // Make r5 the space we have left. The stack might already be overflowed + // here which will cause r5 to become negative. + __ sub(r5, sp, r5); + // Check if the arguments will overflow the stack. + __ cmp(r5, Operand(r2, LSL, kPointerSizeLog2)); + __ b(le, stack_overflow); // Signed comparison. +} + + static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ SmiTag(r0); __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); @@ -1446,6 +1441,8 @@ // -- r2 : expected number of arguments // ----------------------------------- + Label stack_overflow; + ArgumentAdaptorStackCheck(masm, &stack_overflow); Label invoke, dont_adapt_arguments; Label enough, too_few; @@ -1545,6 +1542,14 @@ // ------------------------------------------- __ bind(&dont_adapt_arguments); __ Jump(r3); + + __ bind(&stack_overflow); + { + FrameScope frame(masm, StackFrame::MANUAL); + EnterArgumentsAdaptorFrame(masm); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ bkpt(0); + } } diff -Nru nodejs-0.11.13/deps/v8/src/arm/codegen-arm.cc nodejs-0.11.15/deps/v8/src/arm/codegen-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/codegen-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/codegen-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "codegen.h" -#include "macro-assembler.h" -#include "simulator-arm.h" +#include "src/arm/simulator-arm.h" +#include "src/codegen.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { @@ -52,7 +29,8 @@ UnaryMathFunction CreateExpFunction() { if (!FLAG_fast_math) return &std::exp; size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &std::exp; ExternalReference::InitializeMathExpData(); @@ -87,10 +65,10 @@ CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); #if !defined(USE_SIMULATOR) return FUNCTION_CAST<UnaryMathFunction>(buffer); @@ -101,16 +79,14 @@ } #if defined(V8_HOST_ARCH_ARM) -OS::MemCopyUint8Function CreateMemCopyUint8Function( - OS::MemCopyUint8Function stub) { +MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) { #if defined(USE_SIMULATOR) return stub; #else - if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { - return stub; - } + if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub; size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return stub; MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); @@ -249,26 +225,25 @@ CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); - return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); + return FUNCTION_CAST<MemCopyUint8Function>(buffer); #endif } // Convert 8 to 16. The number of character to copy must be at least 8. -OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( - OS::MemCopyUint16Uint8Function stub) { +MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( + MemCopyUint16Uint8Function stub) { #if defined(USE_SIMULATOR) return stub; #else - if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { - return stub; - } + if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub; size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return stub; MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); @@ -339,10 +314,10 @@ CodeDesc desc; masm.GetCode(&desc); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); - return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer); + return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer); #endif } #endif @@ -352,7 +327,8 @@ return &std::sqrt; #else size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &std::sqrt; MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); @@ -364,10 +340,10 @@ CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST<UnaryMathFunction>(buffer); #endif } @@ -380,14 +356,14 @@ void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { masm->EnterFrame(StackFrame::INTERNAL); - ASSERT(!masm->has_frame()); + DCHECK(!masm->has_frame()); masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { masm->LeaveFrame(StackFrame::INTERNAL); - ASSERT(masm->has_frame()); + DCHECK(masm->has_frame()); masm->set_has_frame(false); } @@ -398,26 +374,28 @@ #define __ ACCESS_MASM(masm) void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - MacroAssembler* masm, AllocationSiteMode mode, + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, Label* allocation_memento_found) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // -- r3 : target map, scratch for subsequent call - // -- r4 : scratch (elements) - // ----------------------------------- + Register scratch_elements = r4; + DCHECK(!AreAliased(receiver, key, value, target_map, + scratch_elements)); + if (mode == TRACK_ALLOCATION_SITE) { - ASSERT(allocation_memento_found != NULL); - __ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found); + DCHECK(allocation_memento_found != NULL); + __ JumpIfJSArrayHasAllocationMemento( + receiver, scratch_elements, allocation_memento_found); } // Set transitioned map. - __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ RecordWriteField(r2, + __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, HeapObject::kMapOffset, - r3, + target_map, r9, kLRHasNotBeenSaved, kDontSaveFPRegs, @@ -427,87 +405,103 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( - MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // -- r3 : target map, scratch for subsequent call - // -- r4 : scratch (elements) - // ----------------------------------- + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Register lr contains the return address. Label loop, entry, convert_hole, gc_required, only_change_map, done; + Register elements = r4; + Register length = r5; + Register array = r6; + Register array_end = array; + + // target_map parameter can be clobbered. + Register scratch1 = target_map; + Register scratch2 = r9; + + // Verify input registers don't conflict with locals. + DCHECK(!AreAliased(receiver, key, value, target_map, + elements, length, array, scratch2)); if (mode == TRACK_ALLOCATION_SITE) { - __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail); + __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); } // Check for empty arrays, which only require a map transition and no changes // to the backing store. - __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); - __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex); + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex); __ b(eq, &only_change_map); __ push(lr); - __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); - // r5: number of elements (smi-tagged) + __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); + // length: number of elements (smi-tagged) // Allocate new FixedDoubleArray. // Use lr as a temporary register. - __ mov(lr, Operand(r5, LSL, 2)); + __ mov(lr, Operand(length, LSL, 2)); __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize)); - __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT); - // r6: destination FixedDoubleArray, not tagged as heap object. - __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); + __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT); + // array: destination FixedDoubleArray, not tagged as heap object. + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); // r4: source FixedArray. // Set destination FixedDoubleArray's length and map. - __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex); - __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset)); + __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex); + __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); // Update receiver's map. - __ str(r9, MemOperand(r6, HeapObject::kMapOffset)); + __ str(scratch2, MemOperand(array, HeapObject::kMapOffset)); - __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ RecordWriteField(r2, + __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, HeapObject::kMapOffset, - r3, - r9, + target_map, + scratch2, kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); // Replace receiver's backing store with newly created FixedDoubleArray. - __ add(r3, r6, Operand(kHeapObjectTag)); - __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset)); - __ RecordWriteField(r2, + __ add(scratch1, array, Operand(kHeapObjectTag)); + __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ RecordWriteField(receiver, JSObject::kElementsOffset, - r3, - r9, + scratch1, + scratch2, kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); // Prepare for conversion loop. - __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize)); - __ add(r6, r9, Operand(r5, LSL, 2)); - __ mov(r4, Operand(kHoleNanLower32)); - __ mov(r5, Operand(kHoleNanUpper32)); - // r3: begin of source FixedArray element fields, not tagged - // r4: kHoleNanLower32 - // r5: kHoleNanUpper32 - // r6: end of destination FixedDoubleArray, not tagged - // r9: begin of FixedDoubleArray element fields, not tagged + __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize)); + __ add(array_end, scratch2, Operand(length, LSL, 2)); + + // Repurpose registers no longer in use. + Register hole_lower = elements; + Register hole_upper = length; + + __ mov(hole_lower, Operand(kHoleNanLower32)); + __ mov(hole_upper, Operand(kHoleNanUpper32)); + // scratch1: begin of source FixedArray element fields, not tagged + // hole_lower: kHoleNanLower32 + // hole_upper: kHoleNanUpper32 + // array_end: end of destination FixedDoubleArray, not tagged + // scratch2: begin of FixedDoubleArray element fields, not tagged __ b(&entry); __ bind(&only_change_map); - __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ RecordWriteField(r2, + __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, HeapObject::kMapOffset, - r3, - r9, + target_map, + scratch2, kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, @@ -521,15 +515,15 @@ // Convert and copy elements. __ bind(&loop); - __ ldr(lr, MemOperand(r3, 4, PostIndex)); + __ ldr(lr, MemOperand(scratch1, 4, PostIndex)); // lr: current element __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole); // Normal smi, convert to double and store. __ vmov(s0, lr); __ vcvt_f64_s32(d0, s0); - __ vstr(d0, r9, 0); - __ add(r9, r9, Operand(8)); + __ vstr(d0, scratch2, 0); + __ add(scratch2, scratch2, Operand(8)); __ b(&entry); // Hole found, store the-hole NaN. @@ -541,10 +535,10 @@ __ CompareRoot(lr, Heap::kTheHoleValueRootIndex); __ Assert(eq, kObjectFoundInSmiOnlyArray); } - __ Strd(r4, r5, MemOperand(r9, 8, PostIndex)); + __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex)); __ bind(&entry); - __ cmp(r9, r6); + __ cmp(scratch2, array_end); __ b(lt, &loop); __ pop(lr); @@ -553,80 +547,104 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( - MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // -- r3 : target map, scratch for subsequent call - // -- r4 : scratch (elements) - // ----------------------------------- + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Register lr contains the return address. Label entry, loop, convert_hole, gc_required, only_change_map; + Register elements = r4; + Register array = r6; + Register length = r5; + Register scratch = r9; + + // Verify input registers don't conflict with locals. + DCHECK(!AreAliased(receiver, key, value, target_map, + elements, array, length, scratch)); if (mode == TRACK_ALLOCATION_SITE) { - __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail); + __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); } // Check for empty arrays, which only require a map transition and no changes // to the backing store. - __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); - __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex); + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex); __ b(eq, &only_change_map); __ push(lr); - __ Push(r3, r2, r1, r0); - __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); - // r4: source FixedDoubleArray - // r5: number of elements (smi-tagged) + __ Push(target_map, receiver, key, value); + __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); + // elements: source FixedDoubleArray + // length: number of elements (smi-tagged) // Allocate new FixedArray. - __ mov(r0, Operand(FixedDoubleArray::kHeaderSize)); - __ add(r0, r0, Operand(r5, LSL, 1)); - __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS); - // r6: destination FixedArray, not tagged as heap object + // Re-use value and target_map registers, as they have been saved on the + // stack. + Register array_size = value; + Register allocate_scratch = target_map; + __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize)); + __ add(array_size, array_size, Operand(length, LSL, 1)); + __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required, + NO_ALLOCATION_FLAGS); + // array: destination FixedArray, not tagged as heap object // Set destination FixedDoubleArray's length and map. - __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex); - __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset)); - __ str(r9, MemOperand(r6, HeapObject::kMapOffset)); + __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); + __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); + __ str(scratch, MemOperand(array, HeapObject::kMapOffset)); // Prepare for conversion loop. - __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); - __ add(r3, r6, Operand(FixedArray::kHeaderSize)); - __ add(r6, r6, Operand(kHeapObjectTag)); - __ add(r5, r3, Operand(r5, LSL, 1)); - __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex); - // Using offsetted addresses in r4 to fully take advantage of post-indexing. - // r3: begin of destination FixedArray element fields, not tagged - // r4: begin of source FixedDoubleArray element fields, not tagged, +4 - // r5: end of destination FixedArray, not tagged - // r6: destination FixedArray - // r9: heap number map + Register src_elements = elements; + Register dst_elements = target_map; + Register dst_end = length; + Register heap_number_map = scratch; + __ add(src_elements, elements, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); + __ add(dst_elements, array, Operand(FixedArray::kHeaderSize)); + __ add(array, array, Operand(kHeapObjectTag)); + __ add(dst_end, dst_elements, Operand(length, LSL, 1)); + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + // Using offsetted addresses in src_elements to fully take advantage of + // post-indexing. + // dst_elements: begin of destination FixedArray element fields, not tagged + // src_elements: begin of source FixedDoubleArray element fields, + // not tagged, +4 + // dst_end: end of destination FixedArray, not tagged + // array: destination FixedArray + // heap_number_map: heap number map __ b(&entry); // Call into runtime if GC is required. __ bind(&gc_required); - __ Pop(r3, r2, r1, r0); + __ Pop(target_map, receiver, key, value); __ pop(lr); __ b(fail); __ bind(&loop); - __ ldr(r1, MemOperand(r4, 8, PostIndex)); - // r1: current element's upper 32 bit - // r4: address of next element's upper 32 bit - __ cmp(r1, Operand(kHoleNanUpper32)); + Register upper_bits = key; + __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex)); + // upper_bits: current element's upper 32 bit + // src_elements: address of next element's upper 32 bit + __ cmp(upper_bits, Operand(kHoleNanUpper32)); __ b(eq, &convert_hole); // Non-hole double, copy value into a heap number. - __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required); - // r2: new heap number - __ ldr(r0, MemOperand(r4, 12, NegOffset)); - __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset)); - __ mov(r0, r3); - __ str(r2, MemOperand(r3, 4, PostIndex)); - __ RecordWrite(r6, - r0, - r2, + Register heap_number = receiver; + Register scratch2 = value; + __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map, + &gc_required); + // heap_number: new heap number + __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset)); + __ Strd(scratch2, upper_bits, + FieldMemOperand(heap_number, HeapNumber::kValueOffset)); + __ mov(scratch2, dst_elements); + __ str(heap_number, MemOperand(dst_elements, 4, PostIndex)); + __ RecordWrite(array, + scratch2, + heap_number, kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, @@ -635,20 +653,20 @@ // Replace the-hole NaN with the-hole pointer. __ bind(&convert_hole); - __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); - __ str(r0, MemOperand(r3, 4, PostIndex)); + __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); + __ str(scratch2, MemOperand(dst_elements, 4, PostIndex)); __ bind(&entry); - __ cmp(r3, r5); + __ cmp(dst_elements, dst_end); __ b(lt, &loop); - __ Pop(r3, r2, r1, r0); + __ Pop(target_map, receiver, key, value); // Replace receiver's backing store with newly created and filled FixedArray. - __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset)); - __ RecordWriteField(r2, + __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ RecordWriteField(receiver, JSObject::kElementsOffset, - r6, - r9, + array, + scratch, kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, @@ -657,11 +675,11 @@ __ bind(&only_change_map); // Update receiver's map. - __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ RecordWriteField(r2, + __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, HeapObject::kMapOffset, - r3, - r9, + target_map, + scratch, kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, @@ -736,7 +754,7 @@ __ Assert(eq, kExternalStringExpectedButNotFound); } // Rule out short external strings. - STATIC_CHECK(kShortExternalStringTag != 0); + STATIC_ASSERT(kShortExternalStringTag != 0); __ tst(result, Operand(kShortExternalStringMask)); __ b(ne, call_runtime); __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); @@ -769,16 +787,17 @@ Register temp1, Register temp2, Register temp3) { - ASSERT(!input.is(result)); - ASSERT(!input.is(double_scratch1)); - ASSERT(!input.is(double_scratch2)); - ASSERT(!result.is(double_scratch1)); - ASSERT(!result.is(double_scratch2)); - ASSERT(!double_scratch1.is(double_scratch2)); - ASSERT(!temp1.is(temp2)); - ASSERT(!temp1.is(temp3)); - ASSERT(!temp2.is(temp3)); - ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(!input.is(result)); + DCHECK(!input.is(double_scratch1)); + DCHECK(!input.is(double_scratch2)); + DCHECK(!result.is(double_scratch1)); + DCHECK(!result.is(double_scratch2)); + DCHECK(!double_scratch1.is(double_scratch2)); + DCHECK(!temp1.is(temp2)); + DCHECK(!temp1.is(temp3)); + DCHECK(!temp2.is(temp3)); + DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(!masm->serializer_enabled()); // External references not serializable. Label zero, infinity, done; @@ -809,7 +828,7 @@ __ vmul(result, result, double_scratch2); __ vsub(result, result, double_scratch1); // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1. - ASSERT(*reinterpret_cast<double*> + DCHECK(*reinterpret_cast<double*> (ExternalReference::math_exp_constants(8).address()) == 1); __ vmov(double_scratch2, 1); __ vadd(result, result, double_scratch2); @@ -849,47 +868,46 @@ static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008; #endif -static byte* GetNoCodeAgeSequence(uint32_t* length) { - // The sequence of instructions that is patched out for aging code is the - // following boilerplate stack-building prologue that is found in FUNCTIONS - static bool initialized = false; - static uint32_t sequence[kNoCodeAgeSequenceLength]; - byte* byte_sequence = reinterpret_cast<byte*>(sequence); - *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize; - if (!initialized) { - // Since patcher is a large object, allocate it dynamically when needed, - // to avoid overloading the stack in stress conditions. - SmartPointer<CodePatcher> - patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength)); - PredictableCodeSizeScope scope(patcher->masm(), *length); - patcher->masm()->PushFixedFrame(r1); - patcher->masm()->nop(ip.code()); - patcher->masm()->add( - fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - initialized = true; - } - return byte_sequence; +CodeAgingHelper::CodeAgingHelper() { + DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); + // Since patcher is a large object, allocate it dynamically when needed, + // to avoid overloading the stack in stress conditions. + // DONT_FLUSH is used because the CodeAgingHelper is initialized early in + // the process, before ARM simulator ICache is setup. + SmartPointer<CodePatcher> patcher( + new CodePatcher(young_sequence_.start(), + young_sequence_.length() / Assembler::kInstrSize, + CodePatcher::DONT_FLUSH)); + PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); + patcher->masm()->PushFixedFrame(r1); + patcher->masm()->nop(ip.code()); + patcher->masm()->add( + fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); } -bool Code::IsYoungSequence(byte* sequence) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); - bool result = !memcmp(sequence, young_sequence, young_length); - ASSERT(result || - Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction); +#ifdef DEBUG +bool CodeAgingHelper::IsOld(byte* candidate) const { + return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction; +} +#endif + + +bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { + bool result = isolate->code_aging_helper()->IsYoung(sequence); + DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); return result; } -void Code::GetCodeAgeAndParity(byte* sequence, Age* age, +void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, MarkingParity* parity) { - if (IsYoungSequence(sequence)) { + if (IsYoungSequence(isolate, sequence)) { *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { Address target_address = Memory::Address_at( - sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1)); + sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize)); Code* stub = GetCodeFromTargetAddress(target_address); GetCodeAgeAndParity(stub, age, parity); } @@ -900,11 +918,10 @@ byte* sequence, Code::Age age, MarkingParity parity) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); + uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); if (age == kNoAgeCodeAge) { - CopyBytes(sequence, young_sequence, young_length); - CPU::FlushICache(sequence, young_length); + isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); + CpuFeatures::FlushICache(sequence, young_length); } else { Code* stub = GetCodeAgeStub(isolate, age, parity); CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); diff -Nru nodejs-0.11.13/deps/v8/src/arm/codegen-arm.h nodejs-0.11.15/deps/v8/src/arm/codegen-arm.h --- nodejs-0.11.13/deps/v8/src/arm/codegen-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/codegen-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_CODEGEN_ARM_H_ #define V8_ARM_CODEGEN_ARM_H_ -#include "ast.h" -#include "ic-inl.h" +#include "src/ast.h" +#include "src/ic-inl.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/arm/code-stubs-arm.cc nodejs-0.11.15/deps/v8/src/arm/code-stubs-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/code-stubs-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/code-stubs-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,486 +1,352 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "bootstrapper.h" -#include "code-stubs.h" -#include "regexp-macro-assembler.h" -#include "stub-cache.h" +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/regexp-macro-assembler.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { void FastNewClosureStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r2 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry; + Register registers[] = { cp, r2 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry); } void FastNewContextStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r1 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { cp, r1 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void ToNumberStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r0 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { cp, r0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void NumberToStringStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r0 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry; + Register registers[] = { cp, r0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry); } void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r3, r2, r1 }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId( - Runtime::kHiddenCreateArrayLiteralStubBailout)->entry; + Register registers[] = { cp, r3, r2, r1 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Smi(), + Representation::Tagged() }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry, + representations); } void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r3, r2, r1, r0 }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry; + Register registers[] = { cp, r3, r2, r1, r0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry); } void CreateAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r2, r3 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { cp, r2, r3 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } -void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void CallFunctionStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r1, r0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); + // r1 function the function to call + Register registers[] = {cp, r1}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } -void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void CallConstructStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r1, r0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); + // r0 : number of arguments + // r1 : the function to call + // r2 : feedback vector + // r3 : (only if r2 is not the megamorphic symbol) slot in feedback + // vector (Smi) + // TODO(turbofan): So far we don't gather type feedback and hence skip the + // slot parameter, but ArrayConstructStub needs the vector to be undefined. + Register registers[] = {cp, r0, r1, r2}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void RegExpConstructResultStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r2, r1, r0 }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry; -} - - -void LoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r0 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedLoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r1 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void StringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r0, r2 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedStringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r1, r0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r2, r1, r0 }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); + Register registers[] = { cp, r2, r1, r0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry); } void TransitionElementsKindStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r0, r1 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; + Register registers[] = { cp, r0, r1 }; Address entry = Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; - descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(entry)); } void CompareNilICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r0 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(CompareNilIC_Miss); + Register registers[] = { cp, r0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(CompareNilIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate())); } +const Register InterfaceDescriptor::ContextRegister() { return cp; } + + static void InitializeArrayConstructorDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor, + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { // register state + // cp -- context // r0 -- number of arguments // r1 -- function // r2 -- allocation site with elements kind - static Register registers_variable_args[] = { r1, r2, r0 }; - static Register registers_no_args[] = { r1, r2 }; + Address deopt_handler = Runtime::FunctionForId( + Runtime::kArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers_no_args; + Register registers[] = { cp, r1, r2 }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); } else { // stack param count needs (constructor pointer, and single argument) - descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; - descriptor->stack_parameter_count_ = r0; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers_variable_args; + Register registers[] = { cp, r1, r2, r0 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); } - - descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; - descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry; } static void InitializeInternalArrayConstructorDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor, + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { // register state + // cp -- context // r0 -- number of arguments // r1 -- constructor function - static Register registers_variable_args[] = { r1, r0 }; - static Register registers_no_args[] = { r1 }; + Address deopt_handler = Runtime::FunctionForId( + Runtime::kInternalArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers_no_args; + Register registers[] = { cp, r1 }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); } else { // stack param count needs (constructor pointer, and single argument) - descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; - descriptor->stack_parameter_count_ = r0; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers_variable_args; + Register registers[] = { cp, r1, r0 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); } - - descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; - descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry; } void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, -1); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1); } void ToBooleanStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r0 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(ToBooleanIC_Miss); + Register registers[] = { cp, r0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(ToBooleanIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate())); } void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0); } void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1); } void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); -} - - -void StoreGlobalStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r1, r2, r0 }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(StoreIC_MissFromStubFailure); -} - - -void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r0, r3, r1, r2 }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1); } void BinaryOpICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r1, r0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); + Register registers[] = { cp, r1, r0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate())); } void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r2, r1, r0 }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite); + Register registers[] = { cp, r2, r1, r0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite)); } void StringAddStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { r1, r0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry; + Register registers[] = { cp, r1, r0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kStringAdd)->entry); } void CallDescriptors::InitializeForIsolate(Isolate* isolate) { - static PlatformCallInterfaceDescriptor default_descriptor = - PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); + static PlatformInterfaceDescriptor default_descriptor = + PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); - static PlatformCallInterfaceDescriptor noInlineDescriptor = - PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); + static PlatformInterfaceDescriptor noInlineDescriptor = + PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::ArgumentAdaptorCall); - static Register registers[] = { r1, // JSFunction - cp, // context - r0, // actual number of arguments - r2, // expected number of arguments + Register registers[] = { cp, // context + r1, // JSFunction + r0, // actual number of arguments + r2, // expected number of arguments }; - static Representation representations[] = { - Representation::Tagged(), // JSFunction + Representation representations[] = { Representation::Tagged(), // context + Representation::Tagged(), // JSFunction Representation::Integer32(), // actual number of arguments Representation::Integer32(), // expected number of arguments }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; - descriptor->platform_specific_descriptor_ = &default_descriptor; + descriptor->Initialize(ARRAY_SIZE(registers), registers, + representations, &default_descriptor); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::KeyedCall); - static Register registers[] = { cp, // context - r2, // key + Register registers[] = { cp, // context + r2, // key }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // key }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; - descriptor->platform_specific_descriptor_ = &noInlineDescriptor; + descriptor->Initialize(ARRAY_SIZE(registers), registers, + representations, &noInlineDescriptor); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::NamedCall); - static Register registers[] = { cp, // context - r2, // name + Register registers[] = { cp, // context + r2, // name }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // name }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; - descriptor->platform_specific_descriptor_ = &noInlineDescriptor; + descriptor->Initialize(ARRAY_SIZE(registers), registers, + representations, &noInlineDescriptor); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::CallHandler); - static Register registers[] = { cp, // context - r0, // receiver + Register registers[] = { cp, // context + r0, // receiver }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // receiver }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; - descriptor->platform_specific_descriptor_ = &default_descriptor; + descriptor->Initialize(ARRAY_SIZE(registers), registers, + representations, &default_descriptor); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::ApiFunctionCall); - static Register registers[] = { r0, // callee - r4, // call_data - r2, // holder - r1, // api_function_address - cp, // context + Register registers[] = { cp, // context + r0, // callee + r4, // call_data + r2, // holder + r1, // api_function_address }; - static Representation representations[] = { + Representation representations[] = { + Representation::Tagged(), // context Representation::Tagged(), // callee Representation::Tagged(), // call_data Representation::Tagged(), // holder Representation::External(), // api_function_address - Representation::Tagged(), // context }; - descriptor->register_param_count_ = 5; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; - descriptor->platform_specific_descriptor_ = &default_descriptor; + descriptor->Initialize(ARRAY_SIZE(registers), registers, + representations, &default_descriptor); } } @@ -504,22 +370,22 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { // Update the static counter each time a new code stub is generated. - Isolate* isolate = masm->isolate(); - isolate->counters()->code_stubs()->Increment(); + isolate()->counters()->code_stubs()->Increment(); - CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); - int param_count = descriptor->register_param_count_; + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); + int param_count = descriptor->GetEnvironmentParameterCount(); { // Call the runtime system in a fresh internal frame. FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - ASSERT(descriptor->register_param_count_ == 0 || - r0.is(descriptor->register_params_[param_count - 1])); + DCHECK(param_count == 0 || + r0.is(descriptor->GetEnvironmentParameterRegister( + param_count - 1))); // Push arguments for (int i = 0; i < param_count; ++i) { - __ push(descriptor->register_params_[i]); + __ push(descriptor->GetEnvironmentParameterRegister(i)); } ExternalReference miss = descriptor->miss_handler(); - __ CallExternalReference(miss, descriptor->register_param_count_); + __ CallExternalReference(miss, param_count); } __ Ret(); @@ -533,11 +399,13 @@ // stub so you don't have to set up the frame. class ConvertToDoubleStub : public PlatformCodeStub { public: - ConvertToDoubleStub(Register result_reg_1, + ConvertToDoubleStub(Isolate* isolate, + Register result_reg_1, Register result_reg_2, Register source_reg, Register scratch_reg) - : result1_(result_reg_1), + : PlatformCodeStub(isolate), + result1_(result_reg_1), result2_(result_reg_2), source_(source_reg), zeros_(scratch_reg) { } @@ -552,8 +420,8 @@ class ModeBits: public BitField<OverwriteMode, 0, 2> {}; class OpBits: public BitField<Token::Value, 2, 14> {}; - Major MajorKey() { return ConvertToDouble; } - int MinorKey() { + Major MajorKey() const { return ConvertToDouble; } + int MinorKey() const { // Encode the parameters in a unique 16 bit value. return result1_.code() + (result2_.code() << 4) + @@ -623,7 +491,7 @@ Label out_of_range, only_low, negate, done; Register input_reg = source(); Register result_reg = destination(); - ASSERT(is_truncating()); + DCHECK(is_truncating()); int double_offset = offset(); // Account for saved regs if input is sp. @@ -726,10 +594,10 @@ void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { - WriteInt32ToHeapNumberStub stub1(r1, r0, r2); - WriteInt32ToHeapNumberStub stub2(r2, r0, r3); - stub1.GetCode(isolate); - stub2.GetCode(isolate); + WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2); + WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3); + stub1.GetCode(); + stub2.GetCode(); } @@ -755,7 +623,7 @@ // but it just ends up combining harmlessly with the last digit of the // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get // the most significant 1 to hit the last bit of the 12 bit sign and exponent. - ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); + DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); __ str(scratch_, FieldMemOperand(the_heap_number_, @@ -886,7 +754,7 @@ Label* lhs_not_nan, Label* slow, bool strict) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || + DCHECK((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); Label rhs_is_smi; @@ -948,7 +816,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, Register rhs) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || + DCHECK((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); // If either operand is a JS object or an oddball value, then they are @@ -994,7 +862,7 @@ Label* both_loaded_as_doubles, Label* not_heap_numbers, Label* slow) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || + DCHECK((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); @@ -1017,7 +885,7 @@ Register rhs, Label* possible_strings, Label* not_both_strings) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || + DCHECK((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); // r2 is object type of rhs. @@ -1107,7 +975,7 @@ // If either is a Smi (we know that not both are), then they can only // be strictly equal if the other is a HeapNumber. STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(0, Smi::FromInt(0)); + DCHECK_EQ(0, Smi::FromInt(0)); __ and_(r2, lhs, Operand(rhs)); __ JumpIfNotSmi(r2, ¬_smis); // One operand is a smi. EmitSmiNonsmiComparison generates code that can: @@ -1124,7 +992,6 @@ __ bind(&both_loaded_as_doubles); // The arguments have been converted to doubles and stored in d6 and d7, if // VFP3 is supported, or in r0, r1, r2, and r3. - Isolate* isolate = masm->isolate(); __ bind(&lhs_not_nan); Label no_nan; // ARMv7 VFP3 instructions to implement double precision comparison. @@ -1187,7 +1054,8 @@ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); - __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); + __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2, + r3); if (cc == eq) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, @@ -1219,7 +1087,7 @@ if (cc == lt || cc == le) { ncr = GREATER; } else { - ASSERT(cc == gt || cc == ge); // remaining cases + DCHECK(cc == gt || cc == ge); // remaining cases ncr = LESS; } __ mov(r0, Operand(Smi::FromInt(ncr))); @@ -1251,9 +1119,9 @@ AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); - __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ mov(r0, Operand(ExternalReference::isolate_address(isolate()))); __ CallCFunction( - ExternalReference::store_buffer_overflow_function(masm->isolate()), + ExternalReference::store_buffer_overflow_function(isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { __ RestoreFPRegs(sp, scratch); @@ -1373,7 +1241,7 @@ __ PrepareCallCFunction(0, 2, scratch); __ MovToFloatParameters(double_base, double_exponent); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), + ExternalReference::power_double_double_function(isolate()), 0, 2); } __ pop(lr); @@ -1424,11 +1292,11 @@ __ vcvt_f64_s32(double_exponent, single_scratch); // Returning or bailing out. - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); if (exponent_type_ == ON_STACK) { // The arguments are still on the stack. __ bind(&call_runtime); - __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); + __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); // The stub is called from non-optimized code, which expects the result // as heap number in exponent. @@ -1437,7 +1305,7 @@ heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); __ vstr(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); - ASSERT(heapnumber.is(r0)); + DCHECK(heapnumber.is(r0)); __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); __ Ret(2); } else { @@ -1447,7 +1315,7 @@ __ PrepareCallCFunction(0, 2, scratch); __ MovToFloatParameters(double_base, double_exponent); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), + ExternalReference::power_double_double_function(isolate()), 0, 2); } __ pop(lr); @@ -1479,61 +1347,57 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) { SaveFPRegsMode mode = kSaveFPRegs; - CEntryStub save_doubles(1, mode); - StoreBufferOverflowStub stub(mode); + CEntryStub save_doubles(isolate, 1, mode); + StoreBufferOverflowStub stub(isolate, mode); // These stubs might already be in the snapshot, detect that and don't // regenerate, which would lead to code stub initialization state being messed // up. Code* save_doubles_code; - if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { - save_doubles_code = *save_doubles.GetCode(isolate); + if (!save_doubles.FindCodeInCache(&save_doubles_code)) { + save_doubles_code = *save_doubles.GetCode(); } Code* store_buffer_overflow_code; - if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) { - store_buffer_overflow_code = *stub.GetCode(isolate); + if (!stub.FindCodeInCache(&store_buffer_overflow_code)) { + store_buffer_overflow_code = *stub.GetCode(); } isolate->set_fp_stubs_generated(true); } void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { - CEntryStub stub(1, kDontSaveFPRegs); - stub.GetCode(isolate); + CEntryStub stub(isolate, 1, kDontSaveFPRegs); + stub.GetCode(); } -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - bool do_gc, - bool always_allocate) { - // r0: result parameter for PerformGC, if any - // r4: number of arguments including receiver (C callee-saved) - // r5: pointer to builtin function (C callee-saved) - // r6: pointer to the first argument (C callee-saved) - Isolate* isolate = masm->isolate(); +void CEntryStub::Generate(MacroAssembler* masm) { + // Called from JavaScript; parameters are on stack as if calling JS function. + // r0: number of arguments including receiver + // r1: pointer to builtin function + // fp: frame pointer (restored after C call) + // sp: stack pointer (restored as callee's sp after C call) + // cp: current context (C callee-saved) - if (do_gc) { - // Passing r0. - __ PrepareCallCFunction(2, 0, r1); - __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate()))); - __ CallCFunction(ExternalReference::perform_gc_function(isolate), - 2, 0); - } - - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(isolate); - if (always_allocate) { - __ mov(r0, Operand(scope_depth)); - __ ldr(r1, MemOperand(r0)); - __ add(r1, r1, Operand(1)); - __ str(r1, MemOperand(r0)); - } + ProfileEntryHookStub::MaybeCallEntryHook(masm); - // Call C built-in. - // r0 = argc, r1 = argv - __ mov(r0, Operand(r4)); - __ mov(r1, Operand(r6)); + __ mov(r5, Operand(r1)); + + // Compute the argv pointer in a callee-saved register. + __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2)); + __ sub(r1, r1, Operand(kPointerSize)); + + // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(save_doubles_); + + // Store a copy of argc in callee-saved registers for later. + __ mov(r4, Operand(r0)); + + // r0, r4: number of arguments including receiver (C callee-saved) + // r1: pointer to the first argument (C callee-saved) + // r5: pointer to builtin function (C callee-saved) + + // Result returned in r0 or r0+r1 by default. #if V8_HOST_ARCH_ARM int frame_alignment = MacroAssembler::ActivationFrameAlignment(); @@ -1541,7 +1405,7 @@ if (FLAG_debug_code) { if (frame_alignment > kPointerSize) { Label alignment_as_expected; - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); __ tst(sp, Operand(frame_alignment_mask)); __ b(eq, &alignment_as_expected); // Don't use Check here, as it will call Runtime_Abort re-entering here. @@ -1551,7 +1415,9 @@ } #endif - __ mov(r2, Operand(ExternalReference::isolate_address(isolate))); + // Call C built-in. + // r0 = argc, r1 = argv + __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); // To let the GC traverse the return address of the exit frames, we need to // know where the return address is. The CEntryStub is unmovable, so @@ -1570,132 +1436,67 @@ __ VFPEnsureFPSCRState(r2); - if (always_allocate) { - // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 - // though (contain the result). - __ mov(r2, Operand(scope_depth)); - __ ldr(r3, MemOperand(r2)); - __ sub(r3, r3, Operand(1)); - __ str(r3, MemOperand(r2)); - } - - // check for failure result - Label failure_returned; - STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); - // Lower 2 bits of r2 are 0 iff r0 has failure tag. - __ add(r2, r0, Operand(1)); - __ tst(r2, Operand(kFailureTagMask)); - __ b(eq, &failure_returned); + // Runtime functions should not return 'the hole'. Allowing it to escape may + // lead to crashes in the IC code later. + if (FLAG_debug_code) { + Label okay; + __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); + __ b(ne, &okay); + __ stop("The hole escaped"); + __ bind(&okay); + } + + // Check result for exception sentinel. + Label exception_returned; + __ CompareRoot(r0, Heap::kExceptionRootIndex); + __ b(eq, &exception_returned); + + ExternalReference pending_exception_address( + Isolate::kPendingExceptionAddress, isolate()); + + // Check that there is no pending exception, otherwise we + // should have returned the exception sentinel. + if (FLAG_debug_code) { + Label okay; + __ mov(r2, Operand(pending_exception_address)); + __ ldr(r2, MemOperand(r2)); + __ CompareRoot(r2, Heap::kTheHoleValueRootIndex); + // Cannot use check here as it attempts to generate call into runtime. + __ b(eq, &okay); + __ stop("Unexpected pending exception"); + __ bind(&okay); + } // Exit C frame and return. // r0:r1: result // sp: stack pointer // fp: frame pointer - // Callee-saved register r4 still holds argc. + // Callee-saved register r4 still holds argc. __ LeaveExitFrame(save_doubles_, r4, true); __ mov(pc, lr); - // check if we should retry or throw exception - Label retry; - __ bind(&failure_returned); - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); - __ b(eq, &retry); + // Handling of exception. + __ bind(&exception_returned); // Retrieve the pending exception. - __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); - __ ldr(r0, MemOperand(ip)); + __ mov(r2, Operand(pending_exception_address)); + __ ldr(r0, MemOperand(r2)); // Clear the pending exception. __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); - __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); - __ str(r3, MemOperand(ip)); + __ str(r3, MemOperand(r2)); // Special handling of termination exceptions which are uncatchable // by javascript code. - __ LoadRoot(r3, Heap::kTerminationExceptionRootIndex); - __ cmp(r0, r3); - __ b(eq, throw_termination_exception); - - // Handle normal exception. - __ jmp(throw_normal_exception); - - __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // Called from JavaScript; parameters are on stack as if calling JS function - // r0: number of arguments including receiver - // r1: pointer to builtin function - // fp: frame pointer (restored after C call) - // sp: stack pointer (restored as callee's sp after C call) - // cp: current context (C callee-saved) - - ProfileEntryHookStub::MaybeCallEntryHook(masm); - - // Result returned in r0 or r0+r1 by default. - - // NOTE: Invocations of builtins may return failure objects - // instead of a proper result. The builtin entry handles - // this by performing a garbage collection and retrying the - // builtin once. - - // Compute the argv pointer in a callee-saved register. - __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); - __ sub(r6, r6, Operand(kPointerSize)); - - // Enter the exit frame that transitions from JavaScript to C++. - FrameAndConstantPoolScope scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(save_doubles_); - - // Set up argc and the builtin function in callee-saved registers. - __ mov(r4, Operand(r0)); - __ mov(r5, Operand(r1)); - - // r4: number of arguments (C callee-saved) - // r5: pointer to builtin function (C callee-saved) - // r6: pointer to first argument (C callee-saved) - - Label throw_normal_exception; Label throw_termination_exception; + __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex); + __ b(eq, &throw_termination_exception); - // Call into the runtime system. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - false, - false); - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - true, - false); - - // Do full GC and retry runtime call one final time. - Failure* failure = Failure::InternalError(); - __ mov(r0, Operand(reinterpret_cast<int32_t>(failure))); - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - true, - true); - - { FrameScope scope(masm, StackFrame::MANUAL); - __ PrepareCallCFunction(0, r0); - __ CallCFunction( - ExternalReference::out_of_memory_function(masm->isolate()), 0, 0); - } + // Handle normal exception. + __ Throw(r0); __ bind(&throw_termination_exception); __ ThrowUncatchable(r0); - - __ bind(&throw_normal_exception); - __ Throw(r0); } @@ -1738,15 +1539,14 @@ // r2: receiver // r3: argc // r4: argv - Isolate* isolate = masm->isolate(); int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; if (FLAG_enable_ool_constant_pool) { - __ mov(r8, Operand(isolate->factory()->empty_constant_pool_array())); + __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array())); } __ mov(r7, Operand(Smi::FromInt(marker))); __ mov(r6, Operand(Smi::FromInt(marker))); __ mov(r5, - Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); + Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); __ ldr(r5, MemOperand(r5)); __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used. __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | @@ -1758,7 +1558,7 @@ // If this is the outermost JS call, set js_entry_sp value. Label non_outermost_js; - ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); + ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); __ mov(r5, Operand(ExternalReference(js_entry_sp))); __ ldr(r6, MemOperand(r5)); __ cmp(r6, Operand::Zero()); @@ -1788,10 +1588,10 @@ // fp will be invalid because the PushTryHandler below sets it to 0 to // signal the existence of the JSEntry frame. __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + isolate()))); } __ str(r0, MemOperand(ip)); - __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); + __ LoadRoot(r0, Heap::kExceptionRootIndex); __ b(&exit); // Invoke: Link this frame into the handler chain. There's only one @@ -1805,9 +1605,9 @@ // saved values before returning a failure to C. // Clear any pending exceptions. - __ mov(r5, Operand(isolate->factory()->the_hole_value())); + __ mov(r5, Operand(isolate()->factory()->the_hole_value())); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + isolate()))); __ str(r5, MemOperand(ip)); // Invoke the function by calling through JS entry trampoline builtin. @@ -1822,10 +1622,10 @@ // r4: argv if (is_construct) { ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, - isolate); + isolate()); __ mov(ip, Operand(construct_entry)); } else { - ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); + ExternalReference entry(Builtins::kJSEntryTrampoline, isolate()); __ mov(ip, Operand(entry)); } __ ldr(ip, MemOperand(ip)); // deref address @@ -1851,7 +1651,7 @@ // Restore the top frame descriptors from the stack. __ pop(r3); __ mov(ip, - Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); + Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); __ str(r3, MemOperand(ip)); // Reset the stack to the callee saved registers. @@ -1877,24 +1677,19 @@ // * function: r1 or at sp. // // An inlined call site may have been generated before calling this stub. -// In this case the offset to the inline site to patch is passed in r5. +// In this case the offset to the inline sites to patch are passed in r5 and r6. // (See LCodeGen::DoInstanceOfKnownGlobal) void InstanceofStub::Generate(MacroAssembler* masm) { // Call site inlining and patching implies arguments in registers. - ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); - // ReturnTrueFalse is only implemented for inlined call sites. - ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); + DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck()); // Fixed register usage throughout the stub: const Register object = r0; // Object (lhs). Register map = r3; // Map of the object. const Register function = r1; // Function (rhs). const Register prototype = r4; // Prototype of the function. - const Register inline_site = r9; const Register scratch = r2; - const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; - Label slow, loop, is_instance, is_not_instance, not_js_object; if (!HasArgsInRegisters()) { @@ -1908,7 +1703,7 @@ // If there is a call site cache don't look in the global cache, but do the // real lookup and update the call site cache. - if (!HasCallSiteInlineCheck()) { + if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) { Label miss; __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); __ b(ne, &miss); @@ -1933,17 +1728,17 @@ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); } else { - ASSERT(HasArgsInRegisters()); + DCHECK(HasArgsInRegisters()); // Patch the (relocated) inlined map check. - // The offset was stored in r5 + // The map_load_offset was stored in r5 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). - const Register offset = r5; - __ sub(inline_site, lr, offset); + const Register map_load_offset = r5; + __ sub(r9, lr, map_load_offset); // Get the map location in r5 and patch it. - __ GetRelocatedValueLocation(inline_site, offset); - __ ldr(offset, MemOperand(offset)); - __ str(map, FieldMemOperand(offset, Cell::kValueOffset)); + __ GetRelocatedValueLocation(r9, map_load_offset, scratch); + __ ldr(map_load_offset, MemOperand(map_load_offset)); + __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset)); } // Register mapping: r3 is object map and r4 is function prototype. @@ -1964,17 +1759,24 @@ __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); __ jmp(&loop); + Factory* factory = isolate()->factory(); __ bind(&is_instance); if (!HasCallSiteInlineCheck()) { __ mov(r0, Operand(Smi::FromInt(0))); __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + if (ReturnTrueFalseObject()) { + __ Move(r0, factory->true_value()); + } } else { // Patch the call site to return true. __ LoadRoot(r0, Heap::kTrueValueRootIndex); - __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // The bool_load_offset was stored in r6 + // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). + const Register bool_load_offset = r6; + __ sub(r9, lr, bool_load_offset); // Get the boolean result location in scratch and patch it. - __ GetRelocatedValueLocation(inline_site, scratch); + __ GetRelocatedValueLocation(r9, scratch, scratch2); __ str(r0, MemOperand(scratch)); if (!ReturnTrueFalseObject()) { @@ -1987,12 +1789,19 @@ if (!HasCallSiteInlineCheck()) { __ mov(r0, Operand(Smi::FromInt(1))); __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + if (ReturnTrueFalseObject()) { + __ Move(r0, factory->false_value()); + } } else { // Patch the call site to return false. __ LoadRoot(r0, Heap::kFalseValueRootIndex); - __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // The bool_load_offset was stored in r6 + // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). + const Register bool_load_offset = r6; + __ sub(r9, lr, bool_load_offset); + ; // Get the boolean result location in scratch and patch it. - __ GetRelocatedValueLocation(inline_site, scratch); + __ GetRelocatedValueLocation(r9, scratch, scratch2); __ str(r0, MemOperand(scratch)); if (!ReturnTrueFalseObject()) { @@ -2010,21 +1819,33 @@ __ b(ne, &slow); // Null is not instance of anything. - __ cmp(scratch, Operand(masm->isolate()->factory()->null_value())); + __ cmp(scratch, Operand(isolate()->factory()->null_value())); __ b(ne, &object_not_null); - __ mov(r0, Operand(Smi::FromInt(1))); + if (ReturnTrueFalseObject()) { + __ Move(r0, factory->false_value()); + } else { + __ mov(r0, Operand(Smi::FromInt(1))); + } __ Ret(HasArgsInRegisters() ? 0 : 2); __ bind(&object_not_null); // Smi values are not instances of anything. __ JumpIfNotSmi(object, &object_not_null_or_smi); - __ mov(r0, Operand(Smi::FromInt(1))); + if (ReturnTrueFalseObject()) { + __ Move(r0, factory->false_value()); + } else { + __ mov(r0, Operand(Smi::FromInt(1))); + } __ Ret(HasArgsInRegisters() ? 0 : 2); __ bind(&object_not_null_or_smi); // String values are not instances of anything. __ IsObjectJSStringType(object, scratch, &slow); - __ mov(r0, Operand(Smi::FromInt(1))); + if (ReturnTrueFalseObject()) { + __ Move(r0, factory->false_value()); + } else { + __ mov(r0, Operand(Smi::FromInt(1))); + } __ Ret(HasArgsInRegisters() ? 0 : 2); // Slow-case. Tail call builtin. @@ -2050,31 +1871,13 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) { Label miss; - Register receiver; - if (kind() == Code::KEYED_LOAD_IC) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string())); - __ b(ne, &miss); - receiver = r1; - } else { - ASSERT(kind() == Code::LOAD_IC); - // ----------- S t a t e ------------- - // -- r2 : name - // -- lr : return address - // -- r0 : receiver - // -- sp[0] : receiver - // ----------------------------------- - receiver = r0; - } + Register receiver = LoadIC::ReceiverRegister(); - StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss); + NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, + r4, &miss); __ bind(&miss); - StubCompiler::TailCallBuiltin( - masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); + PropertyAccessCompiler::TailCallBuiltin( + masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); } @@ -2155,7 +1958,7 @@ __ str(r3, MemOperand(sp, 1 * kPointerSize)); __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } @@ -2219,12 +2022,12 @@ __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT); // r0 = address of new object(s) (tagged) - // r2 = argument count (tagged) + // r2 = argument count (smi-tagged) // Get the arguments boilerplate from the current native context into r4. const int kNormalOffset = - Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX); + Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); const int kAliasedOffset = - Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); + Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX); __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); @@ -2234,22 +2037,23 @@ // r0 = address of new object (tagged) // r1 = mapped parameter count (tagged) - // r2 = argument count (tagged) - // r4 = address of boilerplate object (tagged) - // Copy the JS object part. - for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { - __ ldr(r3, FieldMemOperand(r4, i)); - __ str(r3, FieldMemOperand(r0, i)); - } + // r2 = argument count (smi-tagged) + // r4 = address of arguments map (tagged) + __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset)); + __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex); + __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset)); + __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); + __ AssertNotSmi(r3); const int kCalleeOffset = JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize; __ str(r3, FieldMemOperand(r0, kCalleeOffset)); // Use the length (smi tagged) and set that as an in-object property too. + __ AssertSmi(r2); STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); const int kLengthOffset = JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize; @@ -2359,7 +2163,7 @@ // r2 = argument count (tagged) __ bind(&runtime); __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. - __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } @@ -2403,15 +2207,18 @@ // Get the arguments boilerplate from the current native context. __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); - __ ldr(r4, MemOperand(r4, Context::SlotOffset( - Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX))); + __ ldr(r4, MemOperand( + r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX))); - // Copy the JS object part. - __ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize); + __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset)); + __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex); + __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset)); + __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); // Get the length (smi tagged) and set that as an in-object property too. STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); + __ AssertSmi(r1); __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize)); @@ -2453,7 +2260,7 @@ // Do the runtime call to allocate the arguments object. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); } @@ -2462,7 +2269,7 @@ // time or if regexp entry in generated code is turned off runtime switch or // at compilation. #ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); #else // V8_INTERPRETED_REGEXP // Stack frame on entry. @@ -2487,11 +2294,10 @@ Register last_match_info_elements = no_reg; // will be r6; // Ensure that a RegExp stack is allocated. - Isolate* isolate = masm->isolate(); ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address(isolate); + ExternalReference::address_of_regexp_stack_memory_address(isolate()); ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(isolate); + ExternalReference::address_of_regexp_stack_memory_size(isolate()); __ mov(r0, Operand(address_of_regexp_stack_memory_size)); __ ldr(r0, MemOperand(r0, 0)); __ cmp(r0, Operand::Zero()); @@ -2595,8 +2401,8 @@ STATIC_ASSERT(kSeqStringTag == 0); __ tst(r0, Operand(kStringRepresentationMask)); // The underlying external string is never a short external string. - STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); - STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); __ b(ne, &external_string); // Go to (7). // (5) Sequential string. Load regexp code according to encoding. @@ -2633,7 +2439,7 @@ // subject: Subject string // regexp_data: RegExp data (FixedArray) // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); + __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2); // Isolates: note we add an additional parameter here (isolate pointer). const int kRegExpExecuteArguments = 9; @@ -2644,7 +2450,7 @@ // Arguments are before that on the stack or in registers. // Argument 9 (sp[20]): Pass current isolate address. - __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); + __ mov(r0, Operand(ExternalReference::isolate_address(isolate()))); __ str(r0, MemOperand(sp, 5 * kPointerSize)); // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. @@ -2666,7 +2472,8 @@ // Argument 5 (sp[4]): static offsets vector buffer. __ mov(r0, - Operand(ExternalReference::address_of_static_offsets_vector(isolate))); + Operand(ExternalReference::address_of_static_offsets_vector( + isolate()))); __ str(r0, MemOperand(sp, 1 * kPointerSize)); // For arguments 4 and 3 get string length, calculate start of string data and @@ -2697,7 +2504,7 @@ // Locate the code entry and call it. __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(masm, r6); __ LeaveExitFrame(false, no_reg, true); @@ -2724,9 +2531,9 @@ // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r1, Operand(isolate->factory()->the_hole_value())); + __ mov(r1, Operand(isolate()->factory()->the_hole_value())); __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + isolate()))); __ ldr(r0, MemOperand(r2, 0)); __ cmp(r0, r1); __ b(eq, &runtime); @@ -2746,7 +2553,7 @@ __ bind(&failure); // For failure and exception return null. - __ mov(r0, Operand(masm->isolate()->factory()->null_value())); + __ mov(r0, Operand(isolate()->factory()->null_value())); __ add(sp, sp, Operand(4 * kPointerSize)); __ Ret(); @@ -2808,7 +2615,7 @@ // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(isolate); + ExternalReference::address_of_static_offsets_vector(isolate()); __ mov(r2, Operand(address_of_static_offsets_vector)); // r1: number of capture registers @@ -2837,7 +2644,7 @@ // Do the runtime call to execute the regexp. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); // Deferred code for string handling. // (6) Not a long external string? If yes, go to (8). @@ -2890,9 +2697,9 @@ // r3 : slot in feedback vector (Smi) Label initialize, done, miss, megamorphic, not_array_function; - ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), + DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), masm->isolate()->heap()->megamorphic_symbol()); - ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), + DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), masm->isolate()->heap()->uninitialized_symbol()); // Load the cache state into r4. @@ -2953,7 +2760,7 @@ __ SmiTag(r0); __ Push(r3, r2, r1, r0); - CreateAllocationSiteStub create_stub; + CreateAllocationSiteStub create_stub(masm->isolate()); __ CallStub(&create_stub); __ Pop(r3, r2, r1, r0); @@ -2977,14 +2784,67 @@ } -void CallFunctionStub::Generate(MacroAssembler* masm) { +static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { + // Do not transform the receiver for strict mode functions. + __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, cont); + + // Do not transform the receiver for native (Compilerhints already in r3). + __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); + __ b(ne, cont); +} + + +static void EmitSlowCase(MacroAssembler* masm, + int argc, + Label* non_function) { + // Check for function proxy. + __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); + __ b(ne, non_function); + __ push(r1); // put proxy as additional argument + __ mov(r0, Operand(argc + 1, RelocInfo::NONE32)); + __ mov(r2, Operand::Zero()); + __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY); + { + Handle<Code> adaptor = + masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + __ Jump(adaptor, RelocInfo::CODE_TARGET); + } + + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ bind(non_function); + __ str(r1, MemOperand(sp, argc * kPointerSize)); + __ mov(r0, Operand(argc)); // Set up the number of arguments. + __ mov(r2, Operand::Zero()); + __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); +} + + +static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { + // Wrap the receiver and patch it back onto the stack. + { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL); + __ Push(r1, r3); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ pop(r1); + } + __ str(r0, MemOperand(sp, argc * kPointerSize)); + __ jmp(cont); +} + + +static void CallFunctionNoFeedback(MacroAssembler* masm, + int argc, bool needs_checks, + bool call_as_method) { // r1 : the function to call - // r2 : feedback vector - // r3 : (only if r2 is not the megamorphic symbol) slot in feedback - // vector (Smi) Label slow, non_function, wrap, cont; - if (NeedsChecks()) { + if (needs_checks) { // Check that the function is really a JavaScript function. // r1: pushed function (to be verified) __ JumpIfSmi(r1, &non_function); @@ -2992,38 +2852,21 @@ // Goto slow case if we do not have a function. __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE); __ b(ne, &slow); - - if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); - // Type information was updated. Because we may call Array, which - // expects either undefined or an AllocationSite in ebx we need - // to set ebx to undefined. - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - } } // Fast-case: Invoke the function now. // r1: pushed function - ParameterCount actual(argc_); + ParameterCount actual(argc); - if (CallAsMethod()) { - if (NeedsChecks()) { - // Do not transform the receiver for strict mode functions. - __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + - kSmiTagSize))); - __ b(ne, &cont); - - // Do not transform the receiver for native (Compilerhints already in r3). - __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ b(ne, &cont); + if (call_as_method) { + if (needs_checks) { + EmitContinueIfStrictOrNative(masm, &cont); } // Compute the receiver in sloppy mode. - __ ldr(r3, MemOperand(sp, argc_ * kPointerSize)); + __ ldr(r3, MemOperand(sp, argc * kPointerSize)); - if (NeedsChecks()) { + if (needs_checks) { __ JumpIfSmi(r3, &wrap); __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE); __ b(lt, &wrap); @@ -3033,59 +2876,27 @@ __ bind(&cont); } + __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper()); - if (NeedsChecks()) { + if (needs_checks) { // Slow-case: Non-function called. __ bind(&slow); - if (RecordCallTarget()) { - // If there is a call target cache, mark it megamorphic in the - // non-function case. MegamorphicSentinel is an immortal immovable - // object (megamorphic symbol) so no write barrier is needed. - ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), - masm->isolate()->heap()->megamorphic_symbol()); - __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3)); - __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex); - __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize)); - } - // Check for function proxy. - __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); - __ b(ne, &non_function); - __ push(r1); // put proxy as additional argument - __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32)); - __ mov(r2, Operand::Zero()); - __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY); - { - Handle<Code> adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); - __ Jump(adaptor, RelocInfo::CODE_TARGET); - } - - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ bind(&non_function); - __ str(r1, MemOperand(sp, argc_ * kPointerSize)); - __ mov(r0, Operand(argc_)); // Set up the number of arguments. - __ mov(r2, Operand::Zero()); - __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION); - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + EmitSlowCase(masm, argc, &non_function); } - if (CallAsMethod()) { + if (call_as_method) { __ bind(&wrap); - // Wrap the receiver and patch it back onto the stack. - { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL); - __ Push(r1, r3); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ pop(r1); - } - __ str(r0, MemOperand(sp, argc_ * kPointerSize)); - __ jmp(&cont); + EmitWrapCase(masm, argc, &cont); } } +void CallFunctionStub::Generate(MacroAssembler* masm) { + CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod()); +} + + void CallConstructStub::Generate(MacroAssembler* masm) { // r0 : number of arguments // r1 : the function to call @@ -3150,6 +2961,153 @@ } +static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { + __ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ ldr(vector, FieldMemOperand(vector, + JSFunction::kSharedFunctionInfoOffset)); + __ ldr(vector, FieldMemOperand(vector, + SharedFunctionInfo::kFeedbackVectorOffset)); +} + + +void CallIC_ArrayStub::Generate(MacroAssembler* masm) { + // r1 - function + // r3 - slot id + Label miss; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, r2); + + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4); + __ cmp(r1, r4); + __ b(ne, &miss); + + __ mov(r0, Operand(arg_count())); + __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); + __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize)); + + // Verify that r4 contains an AllocationSite + __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset)); + __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); + __ b(ne, &miss); + + __ mov(r2, r4); + ArrayConstructorStub stub(masm->isolate(), arg_count()); + __ TailCallStub(&stub); + + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Customization_Miss); + + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, + arg_count(), + true, + CallAsMethod()); + + // Unreachable. + __ stop("Unexpected code address"); +} + + +void CallICStub::Generate(MacroAssembler* masm) { + // r1 - function + // r3 - slot id (Smi) + Label extra_checks_or_miss, slow_start; + Label slow, non_function, wrap, cont; + Label have_js_function; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, r2); + + // The checks. First, does r1 match the recorded monomorphic target? + __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); + __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize)); + __ cmp(r1, r4); + __ b(ne, &extra_checks_or_miss); + + __ bind(&have_js_function); + if (state_.CallAsMethod()) { + EmitContinueIfStrictOrNative(masm, &cont); + // Compute the receiver in sloppy mode. + __ ldr(r3, MemOperand(sp, argc * kPointerSize)); + + __ JumpIfSmi(r3, &wrap); + __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE); + __ b(lt, &wrap); + + __ bind(&cont); + } + + __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper()); + + __ bind(&slow); + EmitSlowCase(masm, argc, &non_function); + + if (state_.CallAsMethod()) { + __ bind(&wrap); + EmitWrapCase(masm, argc, &cont); + } + + __ bind(&extra_checks_or_miss); + Label miss; + + __ CompareRoot(r4, Heap::kMegamorphicSymbolRootIndex); + __ b(eq, &slow_start); + __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex); + __ b(eq, &miss); + + if (!FLAG_trace_ic) { + // We are going megamorphic. If the feedback is a JSFunction, it is fine + // to handle it here. More complex cases are dealt with in the runtime. + __ AssertNotSmi(r4); + __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE); + __ b(ne, &miss); + __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); + __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex); + __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize)); + __ jmp(&slow_start); + } + + // We are here because tracing is on or we are going monomorphic. + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Miss); + + // the slow case + __ bind(&slow_start); + // Check that the function is really a JavaScript function. + // r1: pushed function (to be verified) + __ JumpIfSmi(r1, &non_function); + + // Goto slow case if we do not have a function. + __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE); + __ b(ne, &slow); + __ jmp(&have_js_function); +} + + +void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) { + // Get the receiver of the function from the stack; 1 ~ return address. + __ ldr(r4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize)); + + { + FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + + // Push the receiver and the function and feedback info. + __ Push(r4, r1, r2, r3); + + // Call the entry. + ExternalReference miss = ExternalReference(IC_Utility(id), + masm->isolate()); + __ CallExternalReference(miss, 4); + + // Move result to edi and exit the internal frame. + __ mov(r1, r0); + } +} + + // StringCharCodeAtGenerator void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Label flat_string; @@ -3208,9 +3166,9 @@ if (index_flags_ == STRING_INDEX_IS_NUMBER) { __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); } else { - ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kHiddenNumberToSmi, 1); + __ CallRuntime(Runtime::kNumberToSmi, 1); } // Save the conversion result before the pop instructions below // have a chance to overwrite it. @@ -3232,7 +3190,7 @@ call_helper.BeforeCall(masm); __ SmiTag(index_); __ Push(object_, index_); - __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2); + __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); __ Move(result_, r0); call_helper.AfterCall(masm); __ jmp(&exit_); @@ -3248,7 +3206,7 @@ // Fast case of Heap::LookupSingleCharacterStringFromCode. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); + DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1)); __ tst(code_, Operand(kSmiTagMask | ((~String::kMaxOneByteCharCode) << kSmiTagSize))); @@ -3287,142 +3245,37 @@ }; -void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - int flags) { - bool ascii = (flags & COPY_ASCII) != 0; - bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; - - if (dest_always_aligned && FLAG_debug_code) { - // Check that destination is actually word aligned if the flag says - // that it is. +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + String::Encoding encoding) { + if (FLAG_debug_code) { + // Check that destination is word aligned. __ tst(dest, Operand(kPointerAlignmentMask)); __ Check(eq, kDestinationOfCopyNotAligned); } - const int kReadAlignment = 4; - const int kReadAlignmentMask = kReadAlignment - 1; - // Ensure that reading an entire aligned word containing the last character - // of a string will not read outside the allocated area (because we pad up - // to kObjectAlignment). - STATIC_ASSERT(kObjectAlignment >= kReadAlignment); // Assumes word reads and writes are little endian. // Nothing to do for zero characters. Label done; - if (!ascii) { + if (encoding == String::TWO_BYTE_ENCODING) { __ add(count, count, Operand(count), SetCC); - } else { - __ cmp(count, Operand::Zero()); } - __ b(eq, &done); - // Assume that you cannot read (or write) unaligned. - Label byte_loop; - // Must copy at least eight bytes, otherwise just do it one byte at a time. - __ cmp(count, Operand(8)); - __ add(count, dest, Operand(count)); - Register limit = count; // Read until src equals this. - __ b(lt, &byte_loop); - - if (!dest_always_aligned) { - // Align dest by byte copying. Copies between zero and three bytes. - __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); - Label dest_aligned; - __ b(eq, &dest_aligned); - __ cmp(scratch4, Operand(2)); - __ ldrb(scratch1, MemOperand(src, 1, PostIndex)); - __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le); - __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt); - __ strb(scratch1, MemOperand(dest, 1, PostIndex)); - __ strb(scratch2, MemOperand(dest, 1, PostIndex), le); - __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt); - __ bind(&dest_aligned); - } - - Label simple_loop; - - __ sub(scratch4, dest, Operand(src)); - __ and_(scratch4, scratch4, Operand(0x03), SetCC); - __ b(eq, &simple_loop); - // Shift register is number of bits in a source word that - // must be combined with bits in the next source word in order - // to create a destination word. - - // Complex loop for src/dst that are not aligned the same way. - { - Label loop; - __ mov(scratch4, Operand(scratch4, LSL, 3)); - Register left_shift = scratch4; - __ and_(src, src, Operand(~3)); // Round down to load previous word. - __ ldr(scratch1, MemOperand(src, 4, PostIndex)); - // Store the "shift" most significant bits of scratch in the least - // signficant bits (i.e., shift down by (32-shift)). - __ rsb(scratch2, left_shift, Operand(32)); - Register right_shift = scratch2; - __ mov(scratch1, Operand(scratch1, LSR, right_shift)); - - __ bind(&loop); - __ ldr(scratch3, MemOperand(src, 4, PostIndex)); - __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); - __ str(scratch1, MemOperand(dest, 4, PostIndex)); - __ mov(scratch1, Operand(scratch3, LSR, right_shift)); - // Loop if four or more bytes left to copy. - __ sub(scratch3, limit, Operand(dest)); - __ sub(scratch3, scratch3, Operand(4), SetCC); - __ b(ge, &loop); - } - // There is now between zero and three bytes left to copy (negative that - // number is in scratch3), and between one and three bytes already read into - // scratch1 (eight times that number in scratch4). We may have read past - // the end of the string, but because objects are aligned, we have not read - // past the end of the object. - // Find the minimum of remaining characters to move and preloaded characters - // and write those as bytes. - __ add(scratch3, scratch3, Operand(4), SetCC); - __ b(eq, &done); - __ cmp(scratch4, Operand(scratch3, LSL, 3), ne); - // Move minimum of bytes read and bytes left to copy to scratch4. - __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt); - // Between one and three (value in scratch3) characters already read into - // scratch ready to write. - __ cmp(scratch3, Operand(2)); - __ strb(scratch1, MemOperand(dest, 1, PostIndex)); - __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); - __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); - __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); - __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); - // Copy any remaining bytes. - __ b(&byte_loop); - - // Simple loop. - // Copy words from src to dst, until less than four bytes left. - // Both src and dest are word aligned. - __ bind(&simple_loop); - { - Label loop; - __ bind(&loop); - __ ldr(scratch1, MemOperand(src, 4, PostIndex)); - __ sub(scratch3, limit, Operand(dest)); - __ str(scratch1, MemOperand(dest, 4, PostIndex)); - // Compare to 8, not 4, because we do the substraction before increasing - // dest. - __ cmp(scratch3, Operand(8)); - __ b(ge, &loop); - } + Register limit = count; // Read until dest equals this. + __ add(limit, dest, Operand(count)); - // Copy bytes from src to dst until dst hits limit. - __ bind(&byte_loop); + Label loop_entry, loop; + // Copy bytes from src to dest until dest hits limit. + __ b(&loop_entry); + __ bind(&loop); + __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt); + __ strb(scratch, MemOperand(dest, 1, PostIndex)); + __ bind(&loop_entry); __ cmp(dest, Operand(limit)); - __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt); - __ b(ge, &done); - __ strb(scratch1, MemOperand(dest, 1, PostIndex)); - __ b(&byte_loop); + __ b(lt, &loop); __ bind(&done); } @@ -3512,10 +3365,8 @@ // Make sure first argument is a string. __ ldr(r0, MemOperand(sp, kStringOffset)); - // Do a JumpIfSmi, but fold its jump into the subsequent string test. - __ SmiTst(r0); - Condition is_string = masm->IsObjectStringType(r0, r1, ne); - ASSERT(is_string == eq); + __ JumpIfSmi(r0, &runtime); + Condition is_string = masm->IsObjectStringType(r0, r1); __ b(NegateCondition(is_string), &runtime); Label single_char; @@ -3619,7 +3470,7 @@ // Handle external string. // Rule out short external strings. - STATIC_CHECK(kShortExternalStringTag != 0); + STATIC_ASSERT(kShortExternalStringTag != 0); __ tst(r1, Operand(kShortExternalStringTag)); __ b(ne, &runtime); __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset)); @@ -3650,8 +3501,8 @@ // r2: result string length // r5: first character of substring to copy STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); - StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9, - COPY_ASCII | DEST_ALWAYS_ALIGNED); + StringHelper::GenerateCopyCharacters( + masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING); __ jmp(&return_r0); // Allocate and copy the resulting two-byte string. @@ -3669,18 +3520,18 @@ // r2: result length. // r5: first character of substring to copy. STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); - StringHelper::GenerateCopyCharactersLong( - masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED); + StringHelper::GenerateCopyCharacters( + masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING); __ bind(&return_r0); - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); __ Drop(3); __ Ret(); // Just jump to runtime to create the sub string. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1); + __ TailCallRuntime(Runtime::kSubString, 3, 1); __ bind(&single_char); // r0: original string @@ -3762,7 +3613,7 @@ // Compare lengths - strings up to min-length are equal. __ bind(&compare_lengths); - ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); + DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); // Use length_delta as result if it's zero. __ mov(r0, Operand(length_delta), SetCC); __ bind(&result_not_equal); @@ -3808,7 +3659,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { Label runtime; - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); // Stack frame on entry. // sp[0]: right string @@ -3838,212 +3689,7 @@ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); -} - - -void ArrayPushStub::Generate(MacroAssembler* masm) { - Register receiver = r0; - Register scratch = r1; - - int argc = arguments_count(); - - if (argc == 0) { - // Nothing to do, just return the length. - __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Drop(argc + 1); - __ Ret(); - return; - } - - Isolate* isolate = masm->isolate(); - - if (argc != 1) { - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - Label call_builtin, attempt_to_grow_elements, with_write_barrier; - - Register elements = r6; - Register end_elements = r5; - // Get the elements array of the object. - __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - // Check that the elements are in fast mode and writable. - __ CheckMap(elements, - scratch, - Heap::kFixedArrayMapRootIndex, - &call_builtin, - DONT_DO_SMI_CHECK); - } - - // Get the array's length into scratch and calculate new length. - __ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ add(scratch, scratch, Operand(Smi::FromInt(argc))); - - // Get the elements' length. - __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); - - // Check if we could survive without allocation. - __ cmp(scratch, r4); - - const int kEndElementsOffset = - FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - __ b(gt, &attempt_to_grow_elements); - - // Check if value is a smi. - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); - __ JumpIfNotSmi(r4, &with_write_barrier); - - // Store the value. - // We may need a register containing the address end_elements below, so - // write back the value in end_elements. - __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch)); - __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); - } else { - // Check if we could survive without allocation. - __ cmp(scratch, r4); - __ b(gt, &call_builtin); - - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); - __ StoreNumberToDoubleElements(r4, scratch, elements, r5, d0, - &call_builtin, argc * kDoubleSize); - } - - // Save new length. - __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Drop(argc + 1); - __ mov(r0, scratch); - __ Ret(); - - if (IsFastDoubleElementsKind(elements_kind())) { - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ bind(&with_write_barrier); - - if (IsFastSmiElementsKind(elements_kind())) { - if (FLAG_trace_elements_transitions) __ jmp(&call_builtin); - - __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(r9, ip); - __ b(eq, &call_builtin); - - ElementsKind target_kind = IsHoleyElementsKind(elements_kind()) - ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; - __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); - __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); - __ ldr(r3, ContextOperand(r3, Context::JS_ARRAY_MAPS_INDEX)); - const int header_size = FixedArrayBase::kHeaderSize; - // Verify that the object can be transitioned in place. - const int origin_offset = header_size + elements_kind() * kPointerSize; - __ ldr(r2, FieldMemOperand(receiver, origin_offset)); - __ ldr(ip, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ cmp(r2, ip); - __ b(ne, &call_builtin); - - const int target_offset = header_size + target_kind * kPointerSize; - __ ldr(r3, FieldMemOperand(r3, target_offset)); - __ mov(r2, receiver); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - masm, DONT_TRACK_ALLOCATION_SITE, NULL); - } - - // Save new length. - __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - - // Store the value. - // We may need a register containing the address end_elements below, so write - // back the value in end_elements. - __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch)); - __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); - - __ RecordWrite(elements, - end_elements, - r4, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ Drop(argc + 1); - __ mov(r0, scratch); - __ Ret(); - - __ bind(&attempt_to_grow_elements); - // scratch: array's length + 1. - - if (!FLAG_inline_new) { - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize)); - // Growing elements that are SMI-only requires special handling in case the - // new element is non-Smi. For now, delegate to the builtin. - if (IsFastSmiElementsKind(elements_kind())) { - __ JumpIfNotSmi(r2, &call_builtin); - } - - // We could be lucky and the elements array could be at the top of new-space. - // In this case we can just grow it in place by moving the allocation pointer - // up. - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate); - ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate); - - const int kAllocationDelta = 4; - ASSERT(kAllocationDelta >= argc); - // Load top and check if it is the end of elements. - __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch)); - __ add(end_elements, end_elements, Operand(kEndElementsOffset)); - __ mov(r4, Operand(new_space_allocation_top)); - __ ldr(r3, MemOperand(r4)); - __ cmp(end_elements, r3); - __ b(ne, &call_builtin); - - __ mov(r9, Operand(new_space_allocation_limit)); - __ ldr(r9, MemOperand(r9)); - __ add(r3, r3, Operand(kAllocationDelta * kPointerSize)); - __ cmp(r3, r9); - __ b(hi, &call_builtin); - - // We fit and could grow elements. - // Update new_space_allocation_top. - __ str(r3, MemOperand(r4)); - // Push the argument. - __ str(r2, MemOperand(end_elements)); - // Fill the rest with holes. - __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); - for (int i = 1; i < kAllocationDelta; i++) { - __ str(r3, MemOperand(end_elements, i * kPointerSize)); - } - - // Update elements' and array's sizes. - __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); - __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta))); - __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); - - // Elements are in new space, so write barrier is not required. - __ Drop(argc + 1); - __ mov(r0, scratch); - __ Ret(); - - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } @@ -4053,12 +3699,11 @@ // -- r0 : right // -- lr : return address // ----------------------------------- - Isolate* isolate = masm->isolate(); // Load r2 with the allocation site. We stick an undefined dummy value here // and replace it with the real allocation site later when we instantiate this // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). - __ Move(r2, handle(isolate->heap()->undefined_value())); + __ Move(r2, handle(isolate()->heap()->undefined_value())); // Make sure that we actually patched the allocation site. if (FLAG_debug_code) { @@ -4074,13 +3719,13 @@ // Tail call into the stub that handles binary operations with allocation // sites. - BinaryOpWithAllocationSiteStub stub(state_); + BinaryOpWithAllocationSiteStub stub(isolate(), state_); __ TailCallStub(&stub); } void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMI); + DCHECK(state_ == CompareIC::SMI); Label miss; __ orr(r2, r1, r0); __ JumpIfNotSmi(r2, &miss); @@ -4101,7 +3746,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::NUMBER); + DCHECK(state_ == CompareIC::NUMBER); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; @@ -4152,9 +3797,9 @@ __ bind(&unordered); __ bind(&generic_stub); - ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC, CompareIC::GENERIC); - __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { @@ -4178,7 +3823,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::INTERNALIZED_STRING); + DCHECK(state_ == CompareIC::INTERNALIZED_STRING); Label miss; // Registers containing left and right operands respectively. @@ -4204,7 +3849,7 @@ __ cmp(left, right); // Make sure r0 is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(r0)); + DCHECK(right.is(r0)); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); @@ -4216,8 +3861,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::UNIQUE_NAME); - ASSERT(GetCondition() == eq); + DCHECK(state_ == CompareIC::UNIQUE_NAME); + DCHECK(GetCondition() == eq); Label miss; // Registers containing left and right operands respectively. @@ -4243,7 +3888,7 @@ __ cmp(left, right); // Make sure r0 is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(r0)); + DCHECK(right.is(r0)); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); @@ -4255,7 +3900,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRING); + DCHECK(state_ == CompareIC::STRING); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -4295,13 +3940,13 @@ // because we already know they are not identical. We know they are both // strings. if (equality) { - ASSERT(GetCondition() == eq); + DCHECK(GetCondition() == eq); STATIC_ASSERT(kInternalizedTag == 0); __ orr(tmp3, tmp1, Operand(tmp2)); __ tst(tmp3, Operand(kIsNotInternalizedMask)); // Make sure r0 is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(r0)); + DCHECK(right.is(r0)); __ Ret(eq); } @@ -4325,7 +3970,7 @@ if (equality) { __ TailCallRuntime(Runtime::kStringEquals, 2, 1); } else { - __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } __ bind(&miss); @@ -4334,7 +3979,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECT); + DCHECK(state_ == CompareIC::OBJECT); Label miss; __ and_(r2, r1, Operand(r0)); __ JumpIfSmi(r2, &miss); @@ -4344,7 +3989,7 @@ __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); __ b(ne, &miss); - ASSERT(GetCondition() == eq); + DCHECK(GetCondition() == eq); __ sub(r0, r0, Operand(r1)); __ Ret(); @@ -4377,7 +4022,7 @@ { // Call the runtime system in a fresh internal frame. ExternalReference miss = - ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); + ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate()); FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); __ Push(r1, r0); @@ -4409,7 +4054,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) { intptr_t code = - reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); + reinterpret_cast<intptr_t>(GetCode().location()); __ Move(ip, target); __ mov(lr, Operand(code, RelocInfo::CODE_TARGET)); __ blx(lr); // Call the stub. @@ -4423,7 +4068,7 @@ Register properties, Handle<Name> name, Register scratch0) { - ASSERT(name->IsUniqueName()); + DCHECK(name->IsUniqueName()); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the // undefined value), it guarantees the hash table doesn't contain the @@ -4440,17 +4085,17 @@ Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ add(index, index, Operand(index, LSL, 1)); // index *= 3. Register entity_name = scratch0; // Having undefined at this place means the name is not contained. - ASSERT_EQ(kSmiTagSize, 1); + DCHECK_EQ(kSmiTagSize, 1); Register tmp = properties; __ add(tmp, properties, Operand(index, LSL, 1)); __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); - ASSERT(!tmp.is(entity_name)); + DCHECK(!tmp.is(entity_name)); __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); __ cmp(entity_name, tmp); __ b(eq, done); @@ -4485,7 +4130,7 @@ __ stm(db_w, sp, spill_mask); __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ mov(r1, Operand(Handle<Name>(name))); - NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); __ CallStub(&stub); __ cmp(r0, Operand::Zero()); __ ldm(ia_w, sp, spill_mask); @@ -4506,10 +4151,10 @@ Register name, Register scratch1, Register scratch2) { - ASSERT(!elements.is(scratch1)); - ASSERT(!elements.is(scratch2)); - ASSERT(!name.is(scratch1)); - ASSERT(!name.is(scratch2)); + DCHECK(!elements.is(scratch1)); + DCHECK(!elements.is(scratch2)); + DCHECK(!name.is(scratch1)); + DCHECK(!name.is(scratch2)); __ AssertName(name); @@ -4528,7 +4173,7 @@ // Add the probe offset (i + i * i) left shifted to avoid right shifting // the hash in a separate instruction. The value hash + i + i * i is right // shifted in the following and instruction. - ASSERT(NameDictionary::GetProbeOffset(i) < + DCHECK(NameDictionary::GetProbeOffset(i) < 1 << (32 - Name::kHashFieldOffset)); __ add(scratch2, scratch2, Operand( NameDictionary::GetProbeOffset(i) << Name::kHashShift)); @@ -4536,7 +4181,7 @@ __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); // Scale the index by multiplying by the element size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); // scratch2 = scratch2 * 3. __ add(scratch2, scratch2, Operand(scratch2, LSL, 1)); @@ -4554,14 +4199,14 @@ __ stm(db_w, sp, spill_mask); if (name.is(r0)) { - ASSERT(!elements.is(r1)); + DCHECK(!elements.is(r1)); __ Move(r1, name); __ Move(r0, elements); } else { __ Move(r0, elements); __ Move(r1, name); } - NameDictionaryLookupStub stub(POSITIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); __ CallStub(&stub); __ cmp(r0, Operand::Zero()); __ mov(scratch2, Operand(r2)); @@ -4610,7 +4255,7 @@ // Add the probe offset (i + i * i) left shifted to avoid right shifting // the hash in a separate instruction. The value hash + i + i * i is right // shifted in the following and instruction. - ASSERT(NameDictionary::GetProbeOffset(i) < + DCHECK(NameDictionary::GetProbeOffset(i) < 1 << (32 - Name::kHashFieldOffset)); __ add(index, hash, Operand( NameDictionary::GetProbeOffset(i) << Name::kHashShift)); @@ -4620,10 +4265,10 @@ __ and_(index, mask, Operand(index, LSR, Name::kHashShift)); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ add(index, index, Operand(index, LSL, 1)); // index *= 3. - ASSERT_EQ(kSmiTagSize, 1); + DCHECK_EQ(kSmiTagSize, 1); __ add(index, dictionary, Operand(index, LSL, 2)); __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); @@ -4665,16 +4310,11 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { - StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode(isolate); + StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs); + stub1.GetCode(); // Hydrogen code stubs need stub2 at snapshot time. - StoreBufferOverflowStub stub2(kSaveFPRegs); - stub2.GetCode(isolate); -} - - -bool CodeStub::CanUseFPRegisters() { - return true; // VFP2 is a base requirement for V8 + StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); + stub2.GetCode(); } @@ -4716,8 +4356,8 @@ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. // Will be checked in IncrementalMarking::ActivateGeneratedStub. - ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); - ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); + DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); + DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); PatchBranchIntoNop(masm, 0); PatchBranchIntoNop(masm, Assembler::kInstrSize); } @@ -4769,17 +4409,16 @@ __ PrepareCallCFunction(argument_count, regs_.scratch0()); Register address = r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); - ASSERT(!address.is(regs_.object())); - ASSERT(!address.is(r0)); + DCHECK(!address.is(regs_.object())); + DCHECK(!address.is(r0)); __ Move(address, regs_.address()); __ Move(r0, regs_.object()); __ Move(r1, address); - __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction( - ExternalReference::incremental_marking_record_write_function( - masm->isolate()), + ExternalReference::incremental_marking_record_write_function(isolate()), argument_count); regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); } @@ -4934,8 +4573,8 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { - CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); - __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + CEntryStub ces(isolate(), 1, kSaveFPRegs); + __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); int parameter_count_offset = StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; __ ldr(r1, MemOperand(fp, parameter_count_offset)); @@ -4951,8 +4590,9 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { - PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize); - ProfileEntryHookStub stub; + ProfileEntryHookStub stub(masm->isolate()); + int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize; + PredictableCodeSizeScope predictable(masm, code_size); __ push(lr); __ CallStub(&stub); __ pop(lr); @@ -4976,7 +4616,7 @@ // We also save lr, so the count here is one higher than the mask indicates. const int32_t kNumSavedRegs = 7; - ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved); + DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved); // Save all caller-save registers as this may be called from anywhere. __ stm(db_w, sp, kSavedRegs | lr.bit()); @@ -4992,24 +4632,24 @@ int frame_alignment = masm->ActivationFrameAlignment(); if (frame_alignment > kPointerSize) { __ mov(r5, sp); - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); __ and_(sp, sp, Operand(-frame_alignment)); } #if V8_HOST_ARCH_ARM int32_t entry_hook = - reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook()); + reinterpret_cast<int32_t>(isolate()->function_entry_hook()); __ mov(ip, Operand(entry_hook)); #else // Under the simulator we need to indirect the entry hook through a // trampoline function at a known address. // It additionally takes an isolate as a third parameter - __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); __ mov(ip, Operand(ExternalReference(&dispatcher, ExternalReference::BUILTIN_CALL, - masm->isolate()))); + isolate()))); #endif __ Call(ip); @@ -5027,7 +4667,7 @@ static void CreateArrayDispatch(MacroAssembler* masm, AllocationSiteOverrideMode mode) { if (mode == DISABLE_ALLOCATION_SITES) { - T stub(GetInitialFastElementsKind(), mode); + T stub(masm->isolate(), GetInitialFastElementsKind(), mode); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { int last_index = GetSequenceIndexFromFastElementsKind( @@ -5035,7 +4675,7 @@ for (int i = 0; i <= last_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmp(r3, Operand(kind)); - T stub(kind); + T stub(masm->isolate(), kind); __ TailCallStub(&stub, eq); } @@ -5056,12 +4696,12 @@ // sp[0] - last argument Label normal_sequence; if (mode == DONT_OVERRIDE) { - ASSERT(FAST_SMI_ELEMENTS == 0); - ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); - ASSERT(FAST_ELEMENTS == 2); - ASSERT(FAST_HOLEY_ELEMENTS == 3); - ASSERT(FAST_DOUBLE_ELEMENTS == 4); - ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + DCHECK(FAST_SMI_ELEMENTS == 0); + DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1); + DCHECK(FAST_ELEMENTS == 2); + DCHECK(FAST_HOLEY_ELEMENTS == 3); + DCHECK(FAST_DOUBLE_ELEMENTS == 4); + DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5); // is the low bit set? If so, we are holey and that is good. __ tst(r3, Operand(1)); @@ -5077,12 +4717,14 @@ ElementsKind initial = GetInitialFastElementsKind(); ElementsKind holey_initial = GetHoleyElementsKind(initial); - ArraySingleArgumentConstructorStub stub_holey(holey_initial, + ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), + holey_initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub_holey); __ bind(&normal_sequence); - ArraySingleArgumentConstructorStub stub(initial, + ArraySingleArgumentConstructorStub stub(masm->isolate(), + initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { @@ -5110,7 +4752,7 @@ for (int i = 0; i <= last_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmp(r3, Operand(kind)); - ArraySingleArgumentConstructorStub stub(kind); + ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); __ TailCallStub(&stub, eq); } @@ -5128,11 +4770,11 @@ TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= to_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); - T stub(kind); - stub.GetCode(isolate); + T stub(isolate, kind); + stub.GetCode(); if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { - T stub1(kind, DISABLE_ALLOCATION_SITES); - stub1.GetCode(isolate); + T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); + stub1.GetCode(); } } } @@ -5153,12 +4795,12 @@ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; for (int i = 0; i < 2; i++) { // For internal arrays we only need a few things - InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); - stubh1.GetCode(isolate); - InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); - stubh2.GetCode(isolate); - InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); - stubh3.GetCode(isolate); + InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); + stubh1.GetCode(); + InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); + stubh2.GetCode(); + InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); + stubh3.GetCode(); } } @@ -5236,10 +4878,10 @@ MacroAssembler* masm, ElementsKind kind) { __ cmp(r0, Operand(1)); - InternalArrayNoArgumentConstructorStub stub0(kind); + InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); __ TailCallStub(&stub0, lo); - InternalArrayNArgumentsConstructorStub stubN(kind); + InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); __ TailCallStub(&stubN, hi); if (IsFastPackedElementsKind(kind)) { @@ -5249,11 +4891,11 @@ __ cmp(r3, Operand::Zero()); InternalArraySingleArgumentConstructorStub - stub1_holey(GetHoleyElementsKind(kind)); + stub1_holey(isolate(), GetHoleyElementsKind(kind)); __ TailCallStub(&stub1_holey, ne); } - InternalArraySingleArgumentConstructorStub stub1(kind); + InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); __ TailCallStub(&stub1); } @@ -5285,7 +4927,7 @@ // but the following bit field extraction takes care of that anyway. __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset)); // Retrieve elements_kind from bit field 2. - __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount); + __ DecodeField<Map::ElementsKindBits>(r3); if (FLAG_debug_code) { Label done; @@ -5342,8 +4984,6 @@ STATIC_ASSERT(FCA::kHolderIndex == 0); STATIC_ASSERT(FCA::kArgsLength == 7); - Isolate* isolate = masm->isolate(); - // context save __ push(context); // load context from callee @@ -5365,7 +5005,7 @@ __ push(scratch); // isolate __ mov(scratch, - Operand(ExternalReference::isolate_address(isolate))); + Operand(ExternalReference::isolate_address(isolate()))); __ push(scratch); // holder __ push(holder); @@ -5377,10 +5017,10 @@ // it's not controlled by GC. const int kApiStackSpace = 4; - FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL); + FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); - ASSERT(!api_function_address.is(r0) && !scratch.is(r0)); + DCHECK(!api_function_address.is(r0) && !scratch.is(r0)); // r0 = FunctionCallbackInfo& // Arguments is after the return address. __ add(r0, sp, Operand(1 * kPointerSize)); @@ -5397,11 +5037,8 @@ __ str(ip, MemOperand(r0, 3 * kPointerSize)); const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; - Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); - ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL; - ApiFunction thunk_fun(thunk_address); - ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, - masm->isolate()); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(isolate()); AllowExternalCallThatCantCauseGC scope(masm); MemOperand context_restore_operand( @@ -5437,7 +5074,7 @@ __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA const int kApiStackSpace = 1; - FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL); + FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); // Create PropertyAccessorInfo instance on the stack above the exit frame with @@ -5447,12 +5084,8 @@ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; - Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); - ExternalReference::Type thunk_type = - ExternalReference::PROFILING_GETTER_CALL; - ApiFunction thunk_fun(thunk_address); - ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, - masm->isolate()); + ExternalReference thunk_ref = + ExternalReference::invoke_accessor_getter_callback(isolate()); __ CallApiFunctionAndReturn(api_function_address, thunk_ref, kStackUnwindSpace, diff -Nru nodejs-0.11.13/deps/v8/src/arm/code-stubs-arm.h nodejs-0.11.15/deps/v8/src/arm/code-stubs-arm.h --- nodejs-0.11.13/deps/v8/src/arm/code-stubs-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/code-stubs-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_CODE_STUBS_ARM_H_ #define V8_ARM_CODE_STUBS_ARM_H_ -#include "ic-inl.h" +#include "src/ic-inl.h" namespace v8 { namespace internal { @@ -39,8 +16,8 @@ class StoreBufferOverflowStub: public PlatformCodeStub { public: - explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) {} + StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp) + : PlatformCodeStub(isolate), save_doubles_(save_fp) {} void Generate(MacroAssembler* masm); @@ -50,8 +27,8 @@ private: SaveFPRegsMode save_doubles_; - Major MajorKey() { return StoreBufferOverflow; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } + Major MajorKey() const { return StoreBufferOverflow; } + int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } }; @@ -61,15 +38,12 @@ // is allowed to spend extra time setting up conditions to make copying // faster. Copying of overlapping regions is not supported. // Dest register ends at the position after the last character written. - static void GenerateCopyCharactersLong(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - int flags); + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + String::Encoding encoding); // Generate string hash. @@ -91,11 +65,11 @@ class SubStringStub: public PlatformCodeStub { public: - SubStringStub() {} + explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {} private: - Major MajorKey() { return SubString; } - int MinorKey() { return 0; } + Major MajorKey() const { return SubString; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); }; @@ -104,7 +78,7 @@ class StringCompareStub: public PlatformCodeStub { public: - StringCompareStub() { } + explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { } // Compares two flat ASCII strings and returns result in r0. static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, @@ -125,8 +99,8 @@ Register scratch3); private: - virtual Major MajorKey() { return StringCompare; } - virtual int MinorKey() { return 0; } + virtual Major MajorKey() const { return StringCompare; } + virtual int MinorKey() const { return 0; } virtual void Generate(MacroAssembler* masm); static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, @@ -144,10 +118,12 @@ // so you don't have to set up the frame. class WriteInt32ToHeapNumberStub : public PlatformCodeStub { public: - WriteInt32ToHeapNumberStub(Register the_int, + WriteInt32ToHeapNumberStub(Isolate* isolate, + Register the_int, Register the_heap_number, Register scratch) - : the_int_(the_int), + : PlatformCodeStub(isolate), + the_int_(the_int), the_heap_number_(the_heap_number), scratch_(scratch) { } @@ -163,8 +139,8 @@ class HeapNumberRegisterBits: public BitField<int, 4, 4> {}; class ScratchRegisterBits: public BitField<int, 8, 4> {}; - Major MajorKey() { return WriteInt32ToHeapNumber; } - int MinorKey() { + Major MajorKey() const { return WriteInt32ToHeapNumber; } + int MinorKey() const { // Encode the parameters in a unique 16 bit value. return IntRegisterBits::encode(the_int_.code()) | HeapNumberRegisterBits::encode(the_heap_number_.code()) @@ -177,12 +153,14 @@ class RecordWriteStub: public PlatformCodeStub { public: - RecordWriteStub(Register object, + RecordWriteStub(Isolate* isolate, + Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) - : object_(object), + : PlatformCodeStub(isolate), + object_(object), value_(value), address_(address), remembered_set_action_(remembered_set_action), @@ -202,12 +180,12 @@ static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20)); - ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos))); + DCHECK(Assembler::IsTstImmediate(masm->instr_at(pos))); } static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27); - ASSERT(Assembler::IsBranch(masm->instr_at(pos))); + DCHECK(Assembler::IsBranch(masm->instr_at(pos))); } static Mode GetMode(Code* stub) { @@ -219,13 +197,13 @@ return INCREMENTAL; } - ASSERT(Assembler::IsTstImmediate(first_instruction)); + DCHECK(Assembler::IsTstImmediate(first_instruction)); if (Assembler::IsBranch(second_instruction)) { return INCREMENTAL_COMPACTION; } - ASSERT(Assembler::IsTstImmediate(second_instruction)); + DCHECK(Assembler::IsTstImmediate(second_instruction)); return STORE_BUFFER_ONLY; } @@ -236,22 +214,23 @@ stub->instruction_size()); switch (mode) { case STORE_BUFFER_ONLY: - ASSERT(GetMode(stub) == INCREMENTAL || + DCHECK(GetMode(stub) == INCREMENTAL || GetMode(stub) == INCREMENTAL_COMPACTION); PatchBranchIntoNop(&masm, 0); PatchBranchIntoNop(&masm, Assembler::kInstrSize); break; case INCREMENTAL: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); PatchNopIntoBranch(&masm, 0); break; case INCREMENTAL_COMPACTION: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); PatchNopIntoBranch(&masm, Assembler::kInstrSize); break; } - ASSERT(GetMode(stub) == mode); - CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize); + DCHECK(GetMode(stub) == mode); + CpuFeatures::FlushICache(stub->instruction_start(), + 2 * Assembler::kInstrSize); } private: @@ -266,12 +245,12 @@ : object_(object), address_(address), scratch0_(scratch0) { - ASSERT(!AreAliased(scratch0, object, address, no_reg)); + DCHECK(!AreAliased(scratch0, object, address, no_reg)); scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_); } void Save(MacroAssembler* masm) { - ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); + DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_)); // We don't have to save scratch0_ because it was given to us as // a scratch register. masm->push(scratch1_); @@ -326,9 +305,9 @@ Mode mode); void InformIncrementalMarker(MacroAssembler* masm); - Major MajorKey() { return RecordWrite; } + Major MajorKey() const { return RecordWrite; } - int MinorKey() { + int MinorKey() const { return ObjectBits::encode(object_.code()) | ValueBits::encode(value_.code()) | AddressBits::encode(address_.code()) | @@ -363,13 +342,13 @@ // moved by GC class DirectCEntryStub: public PlatformCodeStub { public: - DirectCEntryStub() {} + explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {} void Generate(MacroAssembler* masm); void GenerateCall(MacroAssembler* masm, Register target); private: - Major MajorKey() { return DirectCEntry; } - int MinorKey() { return 0; } + Major MajorKey() const { return DirectCEntry; } + int MinorKey() const { return 0; } bool NeedsImmovableCode() { return true; } }; @@ -379,7 +358,8 @@ public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { } + NameDictionaryLookupStub(Isolate* isolate, LookupMode mode) + : PlatformCodeStub(isolate), mode_(mode) { } void Generate(MacroAssembler* masm); @@ -413,11 +393,9 @@ NameDictionary::kHeaderSize + NameDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return NameDictionaryLookup; } + Major MajorKey() const { return NameDictionaryLookup; } - int MinorKey() { - return LookupModeBits::encode(mode_); - } + int MinorKey() const { return LookupModeBits::encode(mode_); } class LookupModeBits: public BitField<LookupMode, 0, 1> {}; @@ -425,8 +403,9 @@ }; -struct PlatformCallInterfaceDescriptor { - explicit PlatformCallInterfaceDescriptor( +class PlatformInterfaceDescriptor { + public: + explicit PlatformInterfaceDescriptor( TargetAddressStorageMode storage_mode) : storage_mode_(storage_mode) { } diff -Nru nodejs-0.11.13/deps/v8/src/arm/constants-arm.cc nodejs-0.11.15/deps/v8/src/arm/constants-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/constants-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/constants-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "constants-arm.h" +#include "src/arm/constants-arm.h" namespace v8 { @@ -51,7 +28,7 @@ uint64_t imm = high16 << 48; double d; - OS::MemCopy(&d, &imm, 8); + memcpy(&d, &imm, 8); return d; } @@ -104,7 +81,7 @@ const char* VFPRegisters::Name(int reg, bool is_double) { - ASSERT((0 <= reg) && (reg < kNumVFPRegisters)); + DCHECK((0 <= reg) && (reg < kNumVFPRegisters)); return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)]; } diff -Nru nodejs-0.11.13/deps/v8/src/arm/constants-arm.h nodejs-0.11.15/deps/v8/src/arm/constants-arm.h --- nodejs-0.11.13/deps/v8/src/arm/constants-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/constants-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_CONSTANTS_ARM_H_ #define V8_ARM_CONSTANTS_ARM_H_ @@ -42,11 +19,11 @@ const int kConstantPoolMarker = 0xe7f000f0; const int kConstantPoolLengthMaxMask = 0xffff; inline int EncodeConstantPoolLength(int length) { - ASSERT((length & kConstantPoolLengthMaxMask) == length); + DCHECK((length & kConstantPoolLengthMaxMask) == length); return ((length & 0xfff0) << 4) | (length & 0xf); } inline int DecodeConstantPoolLength(int instr) { - ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker); + DCHECK((instr & kConstantPoolMarkerMask) == kConstantPoolMarker); return ((instr >> 4) & 0xfff0) | (instr & 0xf); } @@ -107,13 +84,13 @@ inline Condition NegateCondition(Condition cond) { - ASSERT(cond != al); + DCHECK(cond != al); return static_cast<Condition>(cond ^ ne); } -// Corresponds to transposing the operands of a comparison. -inline Condition ReverseCondition(Condition cond) { +// Commute a condition such that {a cond b == b cond' a}. +inline Condition CommuteCondition(Condition cond) { switch (cond) { case lo: return hi; @@ -133,7 +110,7 @@ return ge; default: return cond; - }; + } } @@ -429,64 +406,6 @@ // ----------------------------------------------------------------------------- -// Specific instructions, constants, and masks. -// These constants are declared in assembler-arm.cc, as they use named registers -// and other constants. - - -// add(sp, sp, 4) instruction (aka Pop()) -extern const Instr kPopInstruction; - -// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r)) -// register r is not encoded. -extern const Instr kPushRegPattern; - -// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r)) -// register r is not encoded. -extern const Instr kPopRegPattern; - -// mov lr, pc -extern const Instr kMovLrPc; -// ldr rd, [pc, #offset] -extern const Instr kLdrPCMask; -extern const Instr kLdrPCPattern; -// vldr dd, [pc, #offset] -extern const Instr kVldrDPCMask; -extern const Instr kVldrDPCPattern; -// blxcc rm -extern const Instr kBlxRegMask; - -extern const Instr kBlxRegPattern; - -extern const Instr kMovMvnMask; -extern const Instr kMovMvnPattern; -extern const Instr kMovMvnFlip; -extern const Instr kMovLeaveCCMask; -extern const Instr kMovLeaveCCPattern; -extern const Instr kMovwMask; -extern const Instr kMovwPattern; -extern const Instr kMovwLeaveCCFlip; -extern const Instr kCmpCmnMask; -extern const Instr kCmpCmnPattern; -extern const Instr kCmpCmnFlip; -extern const Instr kAddSubFlip; -extern const Instr kAndBicFlip; - -// A mask for the Rd register for push, pop, ldr, str instructions. -extern const Instr kLdrRegFpOffsetPattern; - -extern const Instr kStrRegFpOffsetPattern; - -extern const Instr kLdrRegFpNegOffsetPattern; - -extern const Instr kStrRegFpNegOffsetPattern; - -extern const Instr kLdrStrInstrTypeMask; -extern const Instr kLdrStrInstrArgumentMask; -extern const Instr kLdrStrOffsetMask; - - -// ----------------------------------------------------------------------------- // Instruction abstraction. // The class Instruction enables access to individual fields defined in the ARM @@ -649,6 +568,7 @@ inline int Immed4Value() const { return Bits(19, 16); } inline int ImmedMovwMovtValue() const { return Immed4Value() << 12 | Offset12Value(); } + DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue); // Fields used in Load/Store instructions inline int PUValue() const { return Bits(24, 23); } diff -Nru nodejs-0.11.13/deps/v8/src/arm/cpu-arm.cc nodejs-0.11.15/deps/v8/src/arm/cpu-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/cpu-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/cpu-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2006-2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // CPU specific code for arm independent of OS goes here. #ifdef __arm__ @@ -35,32 +12,20 @@ #endif #endif -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "cpu.h" -#include "macro-assembler.h" -#include "simulator.h" // for cache flushing. +#include "src/assembler.h" +#include "src/macro-assembler.h" +#include "src/simulator.h" // for cache flushing. namespace v8 { namespace internal { -void CPU::SetUp() { - CpuFeatures::Probe(); -} - - -bool CPU::SupportsCrankshaft() { - return CpuFeatures::IsSupported(VFP3); -} - -void CPU::FlushICache(void* start, size_t size) { - // Nothing to do flushing no instructions. - if (size == 0) { - return; - } +void CpuFeatures::FlushICache(void* start, size_t size) { + if (size == 0) return; #if defined(USE_SIMULATOR) // Not generating ARM instructions for C-code. This means that we are @@ -69,47 +34,31 @@ // None of this code ends up in the snapshot so there are no issues // around whether or not to generate the code when building snapshots. Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size); + #elif V8_OS_QNX msync(start, size, MS_SYNC | MS_INVALIDATE_ICACHE); + #else - // Ideally, we would call - // syscall(__ARM_NR_cacheflush, start, - // reinterpret_cast<intptr_t>(start) + size, 0); - // however, syscall(int, ...) is not supported on all platforms, especially - // not when using EABI, so we call the __ARM_NR_cacheflush syscall directly. - - register uint32_t beg asm("a1") = reinterpret_cast<uint32_t>(start); - register uint32_t end asm("a2") = - reinterpret_cast<uint32_t>(start) + size; - register uint32_t flg asm("a3") = 0; - #if defined (__arm__) && !defined(__thumb__) - // __arm__ may be defined in thumb mode. - register uint32_t scno asm("r7") = __ARM_NR_cacheflush; - asm volatile( - "svc 0x0" - : "=r" (beg) - : "0" (beg), "r" (end), "r" (flg), "r" (scno)); - #else - // r7 is reserved by the EABI in thumb mode. - asm volatile( - "@ Enter ARM Mode \n\t" - "adr r3, 1f \n\t" - "bx r3 \n\t" - ".ALIGN 4 \n\t" - ".ARM \n" - "1: push {r7} \n\t" - "mov r7, %4 \n\t" - "svc 0x0 \n\t" - "pop {r7} \n\t" - "@ Enter THUMB Mode\n\t" - "adr r3, 2f+1 \n\t" - "bx r3 \n\t" - ".THUMB \n" - "2: \n\t" - : "=r" (beg) - : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush) - : "r3"); - #endif + register uint32_t beg asm("r0") = reinterpret_cast<uint32_t>(start); + register uint32_t end asm("r1") = beg + size; + register uint32_t flg asm("r2") = 0; + + asm volatile( + // This assembly works for both ARM and Thumb targets. + + // Preserve r7; it is callee-saved, and GCC uses it as a frame pointer for + // Thumb targets. + " push {r7}\n" + // r0 = beg + // r1 = end + // r2 = flags (0) + " ldr r7, =%c[scno]\n" // r7 = syscall number + " svc 0\n" + + " pop {r7}\n" + : + : "r" (beg), "r" (end), "r" (flg), [scno] "i" (__ARM_NR_cacheflush) + : "memory"); #endif } diff -Nru nodejs-0.11.13/deps/v8/src/arm/debug-arm.cc nodejs-0.11.15/deps/v8/src/arm/debug-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/debug-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/debug-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,17 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "codegen.h" -#include "debug.h" +#include "src/codegen.h" +#include "src/debug.h" namespace v8 { namespace internal { -#ifdef ENABLE_DEBUGGER_SUPPORT bool BreakLocationIterator::IsDebugBreakAtReturn() { return Debug::IsDebugBreakAtReturn(rinfo()); } @@ -51,12 +27,12 @@ // ldr ip, [pc, #0] // blx ip // <debug break return code entry point address> - // bktp 0 + // bkpt 0 CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions); patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0)); patcher.masm()->blx(v8::internal::ip); patcher.Emit( - debug_info_->GetIsolate()->debug()->debug_break_return()->entry()); + debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry()); patcher.masm()->bkpt(0); } @@ -71,20 +47,20 @@ // A debug break in the frame exit code is identified by the JS frame exit code // having been patched with a call instruction. bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsJSReturn(rinfo->rmode())); + DCHECK(RelocInfo::IsJSReturn(rinfo->rmode())); return rinfo->IsPatchedReturnSequence(); } bool BreakLocationIterator::IsDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); // Check whether the debug break slot instructions have been patched. return rinfo()->IsPatchedDebugBreakSlotSequence(); } void BreakLocationIterator::SetDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); // Patch the code changing the debug break slot code from // mov r2, r2 // mov r2, r2 @@ -97,18 +73,16 @@ patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0)); patcher.masm()->blx(v8::internal::ip); patcher.Emit( - debug_info_->GetIsolate()->debug()->debug_break_slot()->entry()); + debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry()); } void BreakLocationIterator::ClearDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotInstructions); } -const bool Debug::FramePaddingLayout::kIsSupported = false; - #define __ ACCESS_MASM(masm) @@ -119,12 +93,20 @@ { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + // Load padding words on stack. + __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue))); + for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) { + __ push(ip); + } + __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize))); + __ push(ip); + // Store the registers containing live values on the expression stack to // make sure that these are correctly updated during GC. Non object values // are stored as a smi causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); + DCHECK((object_regs & ~kJSCallerSaved) == 0); + DCHECK((non_object_regs & ~kJSCallerSaved) == 0); + DCHECK((object_regs & non_object_regs) == 0); if ((object_regs | non_object_regs) != 0) { for (int i = 0; i < kNumJSCallerSaved; i++) { int r = JSCallerSavedCode(i); @@ -146,7 +128,7 @@ __ mov(r0, Operand::Zero()); // no arguments __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); - CEntryStub ceb(1); + CEntryStub ceb(masm->isolate(), 1); __ CallStub(&ceb); // Restore the register values from the expression stack. @@ -165,6 +147,9 @@ } } + // Don't bother removing padding bytes pushed on the stack + // as the frame is going to be restored right away. + // Leave the internal frame. } @@ -172,61 +157,58 @@ // jumping to the target address intended by the caller and that was // overwritten by the address of DebugBreakXXX. ExternalReference after_break_target = - ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate()); + ExternalReference::debug_after_break_target_address(masm->isolate()); __ mov(ip, Operand(after_break_target)); __ ldr(ip, MemOperand(ip)); __ Jump(ip); } -void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { - // Calling convention for IC load (from ic-arm.cc). +void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) { + // Register state for CallICStub // ----------- S t a t e ------------- - // -- r2 : name - // -- lr : return address - // -- r0 : receiver - // -- [sp] : receiver + // -- r1 : function + // -- r3 : slot in feedback array (smi) // ----------------------------------- - // Registers r0 and r2 contain objects that need to be pushed on the - // expression stack of the fake JS frame. - Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0); + Generate_DebugBreakCallHelper(masm, r1.bit() | r3.bit(), 0); +} + + +void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) { + // Calling convention for IC load (from ic-arm.cc). + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0); } -void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) { // Calling convention for IC store (from ic-arm.cc). - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - // Registers r0, r1, and r2 contain objects that need to be pushed on the - // expression stack of the fake JS frame. - Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0); + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + Register value = StoreIC::ValueRegister(); + Generate_DebugBreakCallHelper( + masm, receiver.bit() | name.bit() | value.bit(), 0); } -void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0); +void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { + // Calling convention for keyed IC load (from ic-arm.cc). + GenerateLoadICDebugBreak(masm); } -void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0); +void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { + // Calling convention for IC keyed store call (from ic-arm.cc). + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register name = KeyedStoreIC::NameRegister(); + Register value = KeyedStoreIC::ValueRegister(); + Generate_DebugBreakCallHelper( + masm, receiver.bit() | name.bit() | value.bit(), 0); } -void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { // Register state for CompareNil IC // ----------- S t a t e ------------- // -- r0 : value @@ -235,16 +217,7 @@ } -void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { - // Calling convention for IC call (from ic-arm.cc) - // ----------- S t a t e ------------- - // -- r2 : name - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, r2.bit(), 0); -} - - -void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) { // In places other than IC call sites it is expected that r0 is TOS which // is an object - this is not generally the case so this should be used with // care. @@ -252,7 +225,7 @@ } -void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { // Register state for CallFunctionStub (from code-stubs-arm.cc). // ----------- S t a t e ------------- // -- r1 : function @@ -261,18 +234,7 @@ } -void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) { - // Register state for CallFunctionStub (from code-stubs-arm.cc). - // ----------- S t a t e ------------- - // -- r1 : function - // -- r2 : feedback array - // -- r3 : slot in feedback array - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0); -} - - -void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { // Calling convention for CallConstructStub (from code-stubs-arm.cc) // ----------- S t a t e ------------- // -- r0 : number of arguments (not smi) @@ -282,7 +244,8 @@ } -void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallConstructStubRecordDebugBreak( + MacroAssembler* masm) { // Calling convention for CallConstructStub (from code-stubs-arm.cc) // ----------- S t a t e ------------- // -- r0 : number of arguments (not smi) @@ -294,7 +257,7 @@ } -void Debug::GenerateSlot(MacroAssembler* masm) { +void DebugCodegen::GenerateSlot(MacroAssembler* masm) { // Generate enough nop's to make space for a call instruction. Avoid emitting // the constant pool in the debug break slot code. Assembler::BlockConstPoolScope block_const_pool(masm); @@ -304,34 +267,57 @@ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) { __ nop(MacroAssembler::DEBUG_BREAK_NOP); } - ASSERT_EQ(Assembler::kDebugBreakSlotInstructions, + DCHECK_EQ(Assembler::kDebugBreakSlotInstructions, masm->InstructionsGeneratedSince(&check_codesize)); } -void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) { // In the places where a debug break slot is inserted no registers can contain // object pointers. Generate_DebugBreakCallHelper(masm, 0, 0); } -void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { - masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm); +void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { + __ Ret(); } -void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { - masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm); -} - -const bool Debug::kFrameDropperSupported = false; +void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { + ExternalReference restarter_frame_function_slot = + ExternalReference::debug_restarter_frame_function_pointer_address( + masm->isolate()); + __ mov(ip, Operand(restarter_frame_function_slot)); + __ mov(r1, Operand::Zero()); + __ str(r1, MemOperand(ip, 0)); + + // Load the function pointer off of our current stack frame. + __ ldr(r1, MemOperand(fp, + StandardFrameConstants::kConstantPoolOffset - kPointerSize)); + + // Pop return address, frame and constant pool pointer (if + // FLAG_enable_ool_constant_pool). + __ LeaveFrame(StackFrame::INTERNAL); + + { ConstantPoolUnavailableScope constant_pool_unavailable(masm); + // Load context from the function. + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + + // Get function code. + __ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset)); + __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); -#undef __ + // Re-run JSFunction, r1 is function, cp is context. + __ Jump(ip); + } +} +const bool LiveEdit::kFrameDropperSupported = true; -#endif // ENABLE_DEBUGGER_SUPPORT +#undef __ } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/arm/deoptimizer-arm.cc nodejs-0.11.15/deps/v8/src/arm/deoptimizer-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/deoptimizer-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/deoptimizer-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "codegen.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "safepoint-table.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/safepoint-table.h" namespace v8 { namespace internal { -const int Deoptimizer::table_entry_size_ = 12; +const int Deoptimizer::table_entry_size_ = 8; int Deoptimizer::patch_size() { @@ -54,7 +31,7 @@ // Fail hard and early if we enter this code object again. byte* pointer = code->FindCodeAgeSequence(); if (pointer != NULL) { - pointer += kNoCodeAgeSequenceLength * Assembler::kInstrSize; + pointer += kNoCodeAgeSequenceLength; } else { pointer = code->instruction_start(); } @@ -72,9 +49,6 @@ DeoptimizationInputData* deopt_data = DeoptimizationInputData::cast(code->deoptimization_data()); - SharedFunctionInfo* shared = - SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()); - shared->EvictFromOptimizedCodeMap(code, "deoptimized code"); #ifdef DEBUG Address prev_call_address = NULL; #endif @@ -87,16 +61,17 @@ // We need calls to have a predictable size in the unoptimized code, but // this is optimized code, so we don't have to have a predictable size. int call_size_in_bytes = - MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry, + MacroAssembler::CallSizeNotPredictableCodeSize(isolate, + deopt_entry, RelocInfo::NONE32); int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; - ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); - ASSERT(call_size_in_bytes <= patch_size()); + DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); + DCHECK(call_size_in_bytes <= patch_size()); CodePatcher patcher(call_address, call_size_in_words); patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); - ASSERT(prev_call_address == NULL || + DCHECK(prev_call_address == NULL || call_address >= prev_call_address + patch_size()); - ASSERT(call_address + patch_size() <= code->instruction_end()); + DCHECK(call_address + patch_size() <= code->instruction_end()); #ifdef DEBUG prev_call_address = call_address; #endif @@ -127,7 +102,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { - ApiFunction function(descriptor->deoptimization_handler_); + ApiFunction function(descriptor->deoptimization_handler()); ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); int params = descriptor->GetHandlerParameterCount(); @@ -150,11 +125,6 @@ } -Code* Deoptimizer::NotifyStubFailureBuiltin() { - return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); -} - - #define __ masm()-> // This code tries to be close to ia32 code so that any changes can be @@ -172,8 +142,8 @@ kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters; // Save all allocatable VFP registers before messing with them. - ASSERT(kDoubleRegZero.code() == 14); - ASSERT(kScratchDoubleReg.code() == 15); + DCHECK(kDoubleRegZero.code() == 14); + DCHECK(kScratchDoubleReg.code() == 15); // Check CPU flags for number of registers, setting the Z condition flag. __ CheckFor32DRegs(ip); @@ -224,7 +194,7 @@ __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); // Copy core registers into FrameDescription::registers_[kNumRegisters]. - ASSERT(Register::kNumRegisters == kNumberOfRegisters); + DCHECK(Register::kNumRegisters == kNumberOfRegisters); for (int i = 0; i < kNumberOfRegisters; i++) { int offset = (i * kPointerSize) + FrameDescription::registers_offset(); __ ldr(r2, MemOperand(sp, i * kPointerSize)); @@ -355,11 +325,11 @@ int start = masm()->pc_offset(); USE(start); __ mov(ip, Operand(i)); - __ push(ip); __ b(&done); - ASSERT(masm()->pc_offset() - start == table_entry_size_); + DCHECK(masm()->pc_offset() - start == table_entry_size_); } __ bind(&done); + __ push(ip); } @@ -374,7 +344,7 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { - ASSERT(FLAG_enable_ool_constant_pool); + DCHECK(FLAG_enable_ool_constant_pool); SetFrameSlot(offset, value); } diff -Nru nodejs-0.11.13/deps/v8/src/arm/disasm-arm.cc nodejs-0.11.15/deps/v8/src/arm/disasm-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/disasm-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/disasm-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // A Disassembler object is used to disassemble a block of code instruction by // instruction. The default implementation of the NameConverter object can be @@ -47,18 +24,18 @@ #include <assert.h> -#include <stdio.h> #include <stdarg.h> +#include <stdio.h> #include <string.h> -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "constants-arm.h" -#include "disasm.h" -#include "macro-assembler.h" -#include "platform.h" +#include "src/arm/constants-arm.h" +#include "src/base/platform/platform.h" +#include "src/disasm.h" +#include "src/macro-assembler.h" namespace v8 { @@ -230,15 +207,15 @@ } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) { shift_amount = 32; } - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - ", %s #%d", - shift_names[shift_index], - shift_amount); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + ", %s #%d", + shift_names[shift_index], + shift_amount); } else { // by register int rs = instr->RsValue(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - ", %s ", shift_names[shift_index]); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + ", %s ", shift_names[shift_index]); PrintRegister(rs); } } @@ -250,8 +227,7 @@ int rotate = instr->RotateValue() * 2; int immed8 = instr->Immed8Value(); int imm = (immed8 >> rotate) | (immed8 << (32 - rotate)); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "#%d", imm); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%d", imm); } @@ -259,10 +235,10 @@ void Decoder::PrintShiftSat(Instruction* instr) { int shift = instr->Bits(11, 7); if (shift > 0) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - ", %s #%d", - shift_names[instr->Bit(6) * 2], - instr->Bits(11, 7)); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + ", %s #%d", + shift_names[instr->Bit(6) * 2], + instr->Bits(11, 7)); } } @@ -306,14 +282,14 @@ return; default: if (svc >= kStopCode) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "%d - 0x%x", - svc & kStopCodeMask, - svc & kStopCodeMask); - } else { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "%d", - svc); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "%d - 0x%x", + svc & kStopCodeMask, + svc & kStopCodeMask); + } else { + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "%d", + svc); } return; } @@ -323,7 +299,7 @@ // Handle all register based formatting in this function to reduce the // complexity of FormatOption. int Decoder::FormatRegister(Instruction* instr, const char* format) { - ASSERT(format[0] == 'r'); + DCHECK(format[0] == 'r'); if (format[1] == 'n') { // 'rn: Rn register int reg = instr->RnValue(); PrintRegister(reg); @@ -346,7 +322,7 @@ return 2; } else if (format[1] == 'l') { // 'rlist: register list for load and store multiple instructions - ASSERT(STRING_STARTS_WITH(format, "rlist")); + DCHECK(STRING_STARTS_WITH(format, "rlist")); int rlist = instr->RlistValue(); int reg = 0; Print("{"); @@ -372,7 +348,7 @@ // Handle all VFP register based formatting in this function to reduce the // complexity of FormatOption. int Decoder::FormatVFPRegister(Instruction* instr, const char* format) { - ASSERT((format[0] == 'S') || (format[0] == 'D')); + DCHECK((format[0] == 'S') || (format[0] == 'D')); VFPRegPrecision precision = format[0] == 'D' ? kDoublePrecision : kSinglePrecision; @@ -422,35 +398,35 @@ void Decoder::FormatNeonList(int Vd, int type) { if (type == nlt_1) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "{d%d}", Vd); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "{d%d}", Vd); } else if (type == nlt_2) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "{d%d, d%d}", Vd, Vd + 1); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "{d%d, d%d}", Vd, Vd + 1); } else if (type == nlt_3) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2); } else if (type == nlt_4) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3); } } void Decoder::FormatNeonMemory(int Rn, int align, int Rm) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "[r%d", Rn); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "[r%d", Rn); if (align != 0) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - ":%d", (1 << align) << 6); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + ":%d", (1 << align) << 6); } if (Rm == 15) { Print("]"); } else if (Rm == 13) { Print("]!"); } else { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "], r%d", Rm); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "], r%d", Rm); } } @@ -460,8 +436,7 @@ int imm = instr->ImmedMovwMovtValue(); int rd = instr->RdValue(); PrintRegister(rd); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - ", #%d", imm); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", #%d", imm); } @@ -487,14 +462,13 @@ return 1; } case 'c': { // 'cond: conditional execution - ASSERT(STRING_STARTS_WITH(format, "cond")); + DCHECK(STRING_STARTS_WITH(format, "cond")); PrintCondition(instr); return 4; } case 'd': { // 'd: vmov double immediate. double d = instr->DoubleImmedVmov(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "#%g", d); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%g", d); return 1; } case 'f': { // 'f: bitfield instructions - v7 and above. @@ -504,11 +478,11 @@ // BFC/BFI: // Bits 20-16 represent most-significant bit. Covert to width. width -= lsbit; - ASSERT(width > 0); + DCHECK(width > 0); } - ASSERT((width + lsbit) <= 32); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "#%d, #%d", lsbit, width); + DCHECK((width + lsbit) <= 32); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "#%d, #%d", lsbit, width); return 1; } case 'h': { // 'h: halfword operation for extra loads and stores @@ -524,13 +498,13 @@ int width = (format[3] - '0') * 10 + (format[4] - '0'); int lsb = (format[6] - '0') * 10 + (format[7] - '0'); - ASSERT((width >= 1) && (width <= 32)); - ASSERT((lsb >= 0) && (lsb <= 31)); - ASSERT((width + lsb) <= 32); - - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "%d", - instr->Bits(width + lsb - 1, lsb)); + DCHECK((width >= 1) && (width <= 32)); + DCHECK((lsb >= 0) && (lsb <= 31)); + DCHECK((width + lsb) <= 32); + + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "%d", + instr->Bits(width + lsb - 1, lsb)); return 8; } case 'l': { // 'l: branch and link @@ -546,7 +520,7 @@ return 2; } if (format[1] == 'e') { // 'memop: load/store instructions. - ASSERT(STRING_STARTS_WITH(format, "memop")); + DCHECK(STRING_STARTS_WITH(format, "memop")); if (instr->HasL()) { Print("ldr"); } else { @@ -564,38 +538,37 @@ return 5; } // 'msg: for simulator break instructions - ASSERT(STRING_STARTS_WITH(format, "msg")); + DCHECK(STRING_STARTS_WITH(format, "msg")); byte* str = reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "%s", converter_.NameInCode(str)); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "%s", converter_.NameInCode(str)); return 3; } case 'o': { if ((format[3] == '1') && (format[4] == '2')) { // 'off12: 12-bit offset for load and store instructions - ASSERT(STRING_STARTS_WITH(format, "off12")); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "%d", instr->Offset12Value()); + DCHECK(STRING_STARTS_WITH(format, "off12")); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "%d", instr->Offset12Value()); return 5; } else if (format[3] == '0') { // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0. - ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19")); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "%d", - (instr->Bits(19, 8) << 4) + - instr->Bits(3, 0)); + DCHECK(STRING_STARTS_WITH(format, "off0to3and8to19")); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "%d", + (instr->Bits(19, 8) << 4) + + instr->Bits(3, 0)); return 15; } // 'off8: 8-bit offset for extra load and store instructions - ASSERT(STRING_STARTS_WITH(format, "off8")); + DCHECK(STRING_STARTS_WITH(format, "off8")); int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "%d", offs8); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs8); return 4; } case 'p': { // 'pu: P and U bits for load and store instructions - ASSERT(STRING_STARTS_WITH(format, "pu")); + DCHECK(STRING_STARTS_WITH(format, "pu")); PrintPU(instr); return 2; } @@ -605,29 +578,29 @@ case 's': { if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat. if (format[6] == 'o') { // 'shift_op - ASSERT(STRING_STARTS_WITH(format, "shift_op")); + DCHECK(STRING_STARTS_WITH(format, "shift_op")); if (instr->TypeValue() == 0) { PrintShiftRm(instr); } else { - ASSERT(instr->TypeValue() == 1); + DCHECK(instr->TypeValue() == 1); PrintShiftImm(instr); } return 8; } else if (format[6] == 's') { // 'shift_sat. - ASSERT(STRING_STARTS_WITH(format, "shift_sat")); + DCHECK(STRING_STARTS_WITH(format, "shift_sat")); PrintShiftSat(instr); return 9; } else { // 'shift_rm - ASSERT(STRING_STARTS_WITH(format, "shift_rm")); + DCHECK(STRING_STARTS_WITH(format, "shift_rm")); PrintShiftRm(instr); return 8; } } else if (format[1] == 'v') { // 'svc - ASSERT(STRING_STARTS_WITH(format, "svc")); + DCHECK(STRING_STARTS_WITH(format, "svc")); PrintSoftwareInterrupt(instr->SvcValue()); return 3; } else if (format[1] == 'i') { // 'sign: signed extra loads and stores - ASSERT(STRING_STARTS_WITH(format, "sign")); + DCHECK(STRING_STARTS_WITH(format, "sign")); if (instr->HasSign()) { Print("s"); } @@ -640,13 +613,13 @@ return 1; } case 't': { // 'target: target of branch instructions - ASSERT(STRING_STARTS_WITH(format, "target")); + DCHECK(STRING_STARTS_WITH(format, "target")); int off = (instr->SImmed24Value() << 2) + 8; - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "%+d -> %s", - off, - converter_.NameOfAddress( - reinterpret_cast<byte*>(instr) + off)); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "%+d -> %s", + off, + converter_.NameOfAddress( + reinterpret_cast<byte*>(instr) + off)); return 6; } case 'u': { // 'u: signed or unsigned multiplies @@ -1124,13 +1097,16 @@ } case db_x: { if (FLAG_enable_sudiv) { - if (!instr->HasW()) { - if (instr->Bits(5, 4) == 0x1) { - if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) { + if (instr->Bits(5, 4) == 0x1) { + if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) { + if (instr->Bit(21) == 0x1) { + // UDIV (in V8 notation matching ARM ISA format) rn = rm/rs + Format(instr, "udiv'cond'b 'rn, 'rm, 'rs"); + } else { // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs"); - break; } + break; } } } @@ -1207,14 +1183,14 @@ Format(instr, "stop'cond 'svc"); // Also print the stop message. Its address is encoded // in the following 4 bytes. - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "\n %p %08x stop message: %s", - reinterpret_cast<int32_t*>(instr - + Instruction::kInstrSize), - *reinterpret_cast<char**>(instr - + Instruction::kInstrSize), - *reinterpret_cast<char**>(instr - + Instruction::kInstrSize)); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "\n %p %08x stop message: %s", + reinterpret_cast<void*>(instr + + Instruction::kInstrSize), + *reinterpret_cast<uint32_t*>(instr + + Instruction::kInstrSize), + *reinterpret_cast<char**>(instr + + Instruction::kInstrSize)); // We have decoded 2 * Instruction::kInstrSize bytes. return 2 * Instruction::kInstrSize; } else { @@ -1272,10 +1248,10 @@ } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) && (instr->Bit(8) == 1)) { // vcvt.f64.s32 Dd, Dd, #<fbits> - int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0)); + int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5)); Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd"); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - ", #%d", fraction_bits); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + ", #%d", fraction_bits); } else if (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)) { DecodeVCVTBetweenFloatingPointAndInteger(instr); @@ -1570,8 +1546,8 @@ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1); int Vm = (instr->Bit(5) << 4) | instr->VmValue(); int imm3 = instr->Bits(21, 19); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm); } else { Unknown(instr); } @@ -1584,8 +1560,8 @@ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1); int Vm = (instr->Bit(5) << 4) | instr->VmValue(); int imm3 = instr->Bits(21, 19); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm); } else { Unknown(instr); } @@ -1599,8 +1575,8 @@ int size = instr->Bits(7, 6); int align = instr->Bits(5, 4); int Rm = instr->VmValue(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "vst1.%d ", (1 << size) << 3); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "vst1.%d ", (1 << size) << 3); FormatNeonList(Vd, type); Print(", "); FormatNeonMemory(Rn, align, Rm); @@ -1612,8 +1588,8 @@ int size = instr->Bits(7, 6); int align = instr->Bits(5, 4); int Rm = instr->VmValue(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "vld1.%d ", (1 << size) << 3); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "vld1.%d ", (1 << size) << 3); FormatNeonList(Vd, type); Print(", "); FormatNeonMemory(Rn, align, Rm); @@ -1627,14 +1603,14 @@ int Rn = instr->Bits(19, 16); int offset = instr->Bits(11, 0); if (offset == 0) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "pld [r%d]", Rn); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "pld [r%d]", Rn); } else if (instr->Bit(23) == 0) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "pld [r%d, #-%d]", Rn, offset); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "pld [r%d, #-%d]", Rn, offset); } else { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "pld [r%d, #+%d]", Rn, offset); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "pld [r%d, #+%d]", Rn, offset); } } else { Unknown(instr); @@ -1668,26 +1644,26 @@ int Decoder::InstructionDecode(byte* instr_ptr) { Instruction* instr = Instruction::At(instr_ptr); // Print raw instruction bytes. - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "%08x ", - instr->InstructionBits()); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "%08x ", + instr->InstructionBits()); if (instr->ConditionField() == kSpecialCondition) { DecodeSpecialCondition(instr); return Instruction::kInstrSize; } int instruction_bits = *(reinterpret_cast<int*>(instr_ptr)); if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "constant pool begin (length %d)", - DecodeConstantPoolLength(instruction_bits)); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "constant pool begin (length %d)", + DecodeConstantPoolLength(instruction_bits)); return Instruction::kInstrSize; } else if (instruction_bits == kCodeAgeJumpInstruction) { // The code age prologue has a constant immediatly following the jump // instruction. Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize); DecodeType2(instr); - OS::SNPrintF(out_buffer_ + out_buffer_pos_, - " (0x%08x)", target->InstructionBits()); + SNPrintF(out_buffer_ + out_buffer_pos_, + " (0x%08x)", target->InstructionBits()); return 2 * Instruction::kInstrSize; } switch (instr->TypeValue()) { @@ -1739,7 +1715,7 @@ const char* NameConverter::NameOfAddress(byte* addr) const { - v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr); + v8::internal::SNPrintF(tmp_buffer_, "%p", addr); return tmp_buffer_.start(); } diff -Nru nodejs-0.11.13/deps/v8/src/arm/frames-arm.cc nodejs-0.11.15/deps/v8/src/arm/frames-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/frames-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/frames-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,18 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "assembler.h" -#include "assembler-arm.h" -#include "assembler-arm-inl.h" -#include "frames.h" -#include "macro-assembler.h" -#include "macro-assembler-arm.h" +#include "src/assembler.h" +#include "src/frames.h" +#include "src/macro-assembler.h" + +#include "src/arm/assembler-arm-inl.h" +#include "src/arm/assembler-arm.h" +#include "src/arm/macro-assembler-arm.h" namespace v8 { namespace internal { @@ -43,7 +21,7 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; } Register JavaScriptFrame::context_register() { return cp; } Register JavaScriptFrame::constant_pool_pointer_register() { - ASSERT(FLAG_enable_ool_constant_pool); + DCHECK(FLAG_enable_ool_constant_pool); return pp; } @@ -51,13 +29,13 @@ Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; } Register StubFailureTrampolineFrame::context_register() { return cp; } Register StubFailureTrampolineFrame::constant_pool_pointer_register() { - ASSERT(FLAG_enable_ool_constant_pool); + DCHECK(FLAG_enable_ool_constant_pool); return pp; } Object*& ExitFrame::constant_pool_slot() const { - ASSERT(FLAG_enable_ool_constant_pool); + DCHECK(FLAG_enable_ool_constant_pool); const int offset = ExitFrameConstants::kConstantPoolOffset; return Memory::Object_at(fp() + offset); } diff -Nru nodejs-0.11.13/deps/v8/src/arm/frames-arm.h nodejs-0.11.15/deps/v8/src/arm/frames-arm.h --- nodejs-0.11.13/deps/v8/src/arm/frames-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/frames-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_FRAMES_ARM_H_ #define V8_ARM_FRAMES_ARM_H_ @@ -52,8 +29,6 @@ const int kNumJSCallerSaved = 4; -typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved]; - // Return the code of the n-th caller-saved register available to JavaScript // e.g. JSCallerSavedReg(0) returns r0.code() == 0 int JSCallerSavedCode(int n); diff -Nru nodejs-0.11.13/deps/v8/src/arm/full-codegen-arm.cc nodejs-0.11.15/deps/v8/src/arm/full-codegen-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/full-codegen-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/full-codegen-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,23 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "code-stubs.h" -#include "codegen.h" -#include "compiler.h" -#include "debug.h" -#include "full-codegen.h" -#include "isolate-inl.h" -#include "parser.h" -#include "scopes.h" -#include "stub-cache.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/full-codegen.h" +#include "src/isolate-inl.h" +#include "src/parser.h" +#include "src/scopes.h" +#include "src/stub-cache.h" -#include "arm/code-stubs-arm.h" -#include "arm/macro-assembler-arm.h" +#include "src/arm/code-stubs-arm.h" +#include "src/arm/macro-assembler-arm.h" namespace v8 { namespace internal { @@ -63,13 +40,13 @@ } ~JumpPatchSite() { - ASSERT(patch_site_.is_bound() == info_emitted_); + DCHECK(patch_site_.is_bound() == info_emitted_); } // When initially emitting this ensure that a jump is always generated to skip // the inlined smi code. void EmitJumpIfNotSmi(Register reg, Label* target) { - ASSERT(!patch_site_.is_bound() && !info_emitted_); + DCHECK(!patch_site_.is_bound() && !info_emitted_); Assembler::BlockConstPoolScope block_const_pool(masm_); __ bind(&patch_site_); __ cmp(reg, Operand(reg)); @@ -79,7 +56,7 @@ // When initially emitting this ensure that a jump is never generated to skip // the inlined smi code. void EmitJumpIfSmi(Register reg, Label* target) { - ASSERT(!patch_site_.is_bound() && !info_emitted_); + DCHECK(!patch_site_.is_bound() && !info_emitted_); Assembler::BlockConstPoolScope block_const_pool(masm_); __ bind(&patch_site_); __ cmp(reg, Operand(reg)); @@ -111,25 +88,6 @@ }; -static void EmitStackCheck(MacroAssembler* masm_, - Register stack_limit_scratch, - int pointers = 0, - Register scratch = sp) { - Isolate* isolate = masm_->isolate(); - Label ok; - ASSERT(scratch.is(sp) == (pointers == 0)); - if (pointers != 0) { - __ sub(scratch, sp, Operand(pointers * kPointerSize)); - } - __ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex); - __ cmp(scratch, Operand(stack_limit_scratch)); - __ b(hs, &ok); - PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); - __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET); - __ bind(&ok); -} - - // Generate code for a JS function. On entry to the function the receiver // and arguments have been pushed on the stack left to right. The actual // argument count matches the formal parameter count expected by the @@ -150,8 +108,6 @@ handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); - InitializeFeedbackVector(); - profiling_counter_ = isolate()->factory()->NewCell( Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); @@ -177,7 +133,7 @@ __ b(ne, &ok); __ ldr(r2, GlobalObjectOperand()); - __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); + __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset)); __ str(r2, MemOperand(sp, receiver_offset)); @@ -190,16 +146,22 @@ FrameScope frame_scope(masm_, StackFrame::MANUAL); info->set_prologue_offset(masm_->pc_offset()); - __ Prologue(BUILD_FUNCTION_FRAME); + __ Prologue(info->IsCodePreAgingActive()); info->AddNoFrameRange(0, masm_->pc_offset()); { Comment cmnt(masm_, "[ Allocate locals"); int locals_count = info->scope()->num_stack_slots(); // Generators allocate locals, if any, in context slots. - ASSERT(!info->function()->is_generator() || locals_count == 0); + DCHECK(!info->function()->is_generator() || locals_count == 0); if (locals_count > 0) { if (locals_count >= 128) { - EmitStackCheck(masm_, r2, locals_count, r9); + Label ok; + __ sub(r9, sp, Operand(locals_count * kPointerSize)); + __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); + __ cmp(r9, Operand(r2)); + __ b(hs, &ok); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ bind(&ok); } __ LoadRoot(r9, Heap::kUndefinedValueRootIndex); int kMaxPushes = FLAG_optimize_for_size ? 4 : 32; @@ -231,16 +193,19 @@ if (heap_slots > 0) { // Argument to NewContext is the function, which is still in r1. Comment cmnt(masm_, "[ Allocate context"); + bool need_write_barrier = true; if (FLAG_harmony_scoping && info->scope()->is_global_scope()) { __ push(r1); __ Push(info->scope()->GetScopeInfo()); - __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2); + __ CallRuntime(Runtime::kNewGlobalContext, 2); } else if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; } else { __ push(r1); - __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); } function_in_register = false; // Context is returned in r0. It replaces the context passed to us. @@ -261,8 +226,15 @@ __ str(r0, target); // Update the write barrier. - __ RecordWriteContextSlot( - cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs); + if (need_write_barrier) { + __ RecordWriteContextSlot( + cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(cp, r0, &done); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } } } } @@ -297,7 +269,7 @@ } else { type = ArgumentsAccessStub::NEW_SLOPPY_FAST; } - ArgumentsAccessStub stub(type); + ArgumentsAccessStub stub(isolate(), type); __ CallStub(&stub); SetVar(arguments, r0, r1, r2); @@ -320,9 +292,9 @@ // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { VariableDeclaration* function = scope()->function(); - ASSERT(function->proxy()->var()->mode() == CONST || + DCHECK(function->proxy()->var()->mode() == CONST || function->proxy()->var()->mode() == CONST_LEGACY); - ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED); + DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED); VisitVariableDeclaration(function); } VisitDeclarations(scope()->declarations()); @@ -330,13 +302,21 @@ { Comment cmnt(masm_, "[ Stack check"); PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS); - EmitStackCheck(masm_, ip); + Label ok; + __ LoadRoot(ip, Heap::kStackLimitRootIndex); + __ cmp(sp, Operand(ip)); + __ b(hs, &ok); + Handle<Code> stack_check = isolate()->builtins()->StackCheck(); + PredictableCodeSizeScope predictable(masm_, + masm_->CallSize(stack_check, RelocInfo::CODE_TARGET)); + __ Call(stack_check, RelocInfo::CODE_TARGET); + __ bind(&ok); } { Comment cmnt(masm_, "[ Body"); - ASSERT(loop_depth() == 0); + DCHECK(loop_depth() == 0); VisitStatements(function()->body()); - ASSERT(loop_depth() == 0); + DCHECK(loop_depth() == 0); } } @@ -366,13 +346,27 @@ } +static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize; + + void FullCodeGenerator::EmitProfilingCounterReset() { + Assembler::BlockConstPoolScope block_const_pool(masm_); + PredictableCodeSizeScope predictable_code_size_scope( + masm_, kProfileCounterResetSequenceLength); + Label start; + __ bind(&start); int reset_value = FLAG_interrupt_budget; - if (isolate()->IsDebuggerActive()) { + if (info_->is_debug()) { // Detect debug break requests as soon as possible. reset_value = FLAG_interrupt_budget >> 4; } __ mov(r2, Operand(profiling_counter_)); + // The mov instruction above can be either 1, 2 or 3 instructions depending + // upon whether it is an extended constant pool - insert nop to compensate. + DCHECK(masm_->InstructionsGeneratedSince(&start) <= 3); + while (masm_->InstructionsGeneratedSince(&start) != 3) { + __ nop(); + } __ mov(r3, Operand(Smi::FromInt(reset_value))); __ str(r3, FieldMemOperand(r2, Cell::kValueOffset)); } @@ -385,7 +379,7 @@ Assembler::BlockConstPoolScope block_const_pool(masm_); Label ok; - ASSERT(back_edge_target->is_bound()); + DCHECK(back_edge_target->is_bound()); int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier)); @@ -462,7 +456,7 @@ #ifdef DEBUG // Check that the size of the code used for returning is large enough // for the debugger's requirements. - ASSERT(Assembler::kJSReturnSequenceInstructions <= + DCHECK(Assembler::kJSReturnSequenceInstructions <= masm_->InstructionsGeneratedSince(&check_exit_codesize)); #endif } @@ -470,25 +464,25 @@ void FullCodeGenerator::EffectContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); } void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); codegen()->GetVar(result_register(), var); } void FullCodeGenerator::StackValueContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); codegen()->GetVar(result_register(), var); __ push(result_register()); } void FullCodeGenerator::TestContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); // For simplicity we always test the accumulator register. codegen()->GetVar(result_register(), var); codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL); @@ -553,7 +547,7 @@ true, true_label_, false_label_); - ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals. + DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals. if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) { if (false_label_ != fall_through_) __ b(false_label_); } else if (lit->IsTrue() || lit->IsJSObject()) { @@ -580,7 +574,7 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); __ Drop(count); } @@ -588,7 +582,7 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug( int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); __ Drop(count); __ Move(result_register(), reg); } @@ -596,7 +590,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); if (count > 1) __ Drop(count - 1); __ str(reg, MemOperand(sp, 0)); } @@ -604,7 +598,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); // For simplicity we always test the accumulator register. __ Drop(count); __ Move(result_register(), reg); @@ -615,7 +609,7 @@ void FullCodeGenerator::EffectContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == materialize_false); + DCHECK(materialize_true == materialize_false); __ bind(materialize_true); } @@ -649,8 +643,8 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == true_label_); - ASSERT(materialize_false == false_label_); + DCHECK(materialize_true == true_label_); + DCHECK(materialize_false == false_label_); } @@ -713,7 +707,7 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) { - ASSERT(var->IsStackAllocated()); + DCHECK(var->IsStackAllocated()); // Offset is negative because higher indexes are at lower addresses. int offset = -var->index() * kPointerSize; // Adjust by a (parameter or local) base offset. @@ -727,7 +721,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); if (var->IsContextSlot()) { int context_chain_length = scope()->ContextChainLength(var->scope()); __ LoadContext(scratch, context_chain_length); @@ -749,10 +743,10 @@ Register src, Register scratch0, Register scratch1) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); - ASSERT(!scratch0.is(src)); - ASSERT(!scratch0.is(scratch1)); - ASSERT(!scratch1.is(src)); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(!scratch0.is(src)); + DCHECK(!scratch0.is(scratch1)); + DCHECK(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ str(src, location); @@ -792,7 +786,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { // The variable in the declaration always resides in the current function // context. - ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); + DCHECK_EQ(0, scope()->ContextChainLength(variable->scope())); if (generate_debug_code_) { // Check that we're not inside a with or catch context. __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset)); @@ -846,7 +840,7 @@ Comment cmnt(masm_, "[ VariableDeclaration"); __ mov(r2, Operand(variable->name())); // Declaration nodes are always introduced in one of four modes. - ASSERT(IsDeclaredVariableMode(mode)); + DCHECK(IsDeclaredVariableMode(mode)); PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY : NONE; __ mov(r1, Operand(Smi::FromInt(attr))); @@ -861,7 +855,7 @@ __ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value. __ Push(cp, r2, r1, r0); } - __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); break; } } @@ -876,7 +870,7 @@ case Variable::UNALLOCATED: { globals_->Add(variable->name(), zone()); Handle<SharedFunctionInfo> function = - Compiler::BuildFunctionInfo(declaration->fun(), script()); + Compiler::BuildFunctionInfo(declaration->fun(), script(), info_); // Check for stack-overflow exception. if (function.is_null()) return SetStackOverflow(); globals_->Add(function, zone()); @@ -917,7 +911,7 @@ __ Push(cp, r2, r1); // Push initial value for function declaration. VisitForStackValue(declaration->fun()); - __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); break; } } @@ -926,8 +920,8 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { Variable* variable = declaration->proxy()->var(); - ASSERT(variable->location() == Variable::CONTEXT); - ASSERT(variable->interface()->IsFrozen()); + DCHECK(variable->location() == Variable::CONTEXT); + DCHECK(variable->interface()->IsFrozen()); Comment cmnt(masm_, "[ ModuleDeclaration"); EmitDebugCheckDeclarationContext(variable); @@ -989,7 +983,7 @@ __ mov(r1, Operand(pairs)); __ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags()))); __ Push(cp, r1, r0); - __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3); + __ CallRuntime(Runtime::kDeclareGlobals, 3); // Return value is ignored. } @@ -997,7 +991,7 @@ void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { // Call the runtime to declare the modules. __ Push(descriptions); - __ CallRuntime(Runtime::kHiddenDeclareModules, 1); + __ CallRuntime(Runtime::kDeclareModules, 1); // Return value is ignored. } @@ -1187,12 +1181,8 @@ Label non_proxy; __ bind(&fixed_array); - Handle<Object> feedback = Handle<Object>( - Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker), - isolate()); - StoreFeedbackVectorSlot(slot, feedback); __ Move(r1, FeedbackVector()); - __ mov(r2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker))); + __ mov(r2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot))); __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check @@ -1286,25 +1276,8 @@ Iteration loop_statement(this, stmt); increment_loop_depth(); - // var iterator = iterable[@@iterator]() - VisitForAccumulatorValue(stmt->assign_iterator()); - - // As with for-in, skip the loop if the iterator is null or undefined. - __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); - __ b(eq, loop_statement.break_label()); - __ CompareRoot(r0, Heap::kNullValueRootIndex); - __ b(eq, loop_statement.break_label()); - - // Convert the iterator to a JS object. - Label convert, done_convert; - __ JumpIfSmi(r0, &convert); - __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &done_convert); - __ bind(&convert); - __ push(r0); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ bind(&done_convert); - __ push(r0); + // var iterator = iterable[Symbol.iterator](); + VisitForEffect(stmt->assign_iterator()); // Loop entry. __ bind(loop_statement.continue_label()); @@ -1351,7 +1324,9 @@ !pretenure && scope()->is_function_scope() && info->num_literals() == 0) { - FastNewClosureStub stub(info->strict_mode(), info->is_generator()); + FastNewClosureStub stub(isolate(), + info->strict_mode(), + info->is_generator()); __ mov(r2, Operand(info)); __ CallStub(&stub); } else { @@ -1359,7 +1334,7 @@ __ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex); __ Push(cp, r0, r1); - __ CallRuntime(Runtime::kHiddenNewClosure, 3); + __ CallRuntime(Runtime::kNewClosure, 3); } context()->Plug(r0); } @@ -1371,7 +1346,7 @@ } -void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, +void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, TypeofState typeof_state, Label* slow) { Register current = cp; @@ -1419,8 +1394,13 @@ __ bind(&fast); } - __ ldr(r0, GlobalObjectOperand()); - __ mov(r2, Operand(var->name())); + __ ldr(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ mov(LoadIC::NameRegister(), Operand(proxy->var()->name())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } + ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL : CONTEXTUAL; @@ -1430,7 +1410,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var, Label* slow) { - ASSERT(var->IsContextSlot()); + DCHECK(var->IsContextSlot()); Register context = cp; Register next = r3; Register temp = r4; @@ -1460,7 +1440,7 @@ } -void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, +void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy, TypeofState typeof_state, Label* slow, Label* done) { @@ -1469,8 +1449,9 @@ // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. + Variable* var = proxy->var(); if (var->mode() == DYNAMIC_GLOBAL) { - EmitLoadGlobalCheckExtensions(var, typeof_state, slow); + EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow); __ jmp(done); } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); @@ -1484,7 +1465,7 @@ __ b(ne, done); __ mov(r0, Operand(var->name())); __ push(r0); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); + __ CallRuntime(Runtime::kThrowReferenceError, 1); } } __ jmp(done); @@ -1502,10 +1483,12 @@ switch (var->location()) { case Variable::UNALLOCATED: { Comment cmnt(masm_, "[ Global variable"); - // Use inline caching. Variable name is passed in r2 and the global - // object (receiver) in r0. - __ ldr(r0, GlobalObjectOperand()); - __ mov(r2, Operand(var->name())); + __ ldr(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ mov(LoadIC::NameRegister(), Operand(var->name())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } CallLoadIC(CONTEXTUAL); context()->Plug(r0); break; @@ -1522,7 +1505,7 @@ // always looked up dynamically, i.e. in that case // var->location() == LOOKUP. // always holds. - ASSERT(var->scope() != NULL); + DCHECK(var->scope() != NULL); // Check if the binding really needs an initialization check. The check // can be skipped in the following situation: we have a LET or CONST @@ -1545,8 +1528,8 @@ skip_init_check = false; } else { // Check that we always have valid source position. - ASSERT(var->initializer_position() != RelocInfo::kNoPosition); - ASSERT(proxy->position() != RelocInfo::kNoPosition); + DCHECK(var->initializer_position() != RelocInfo::kNoPosition); + DCHECK(proxy->position() != RelocInfo::kNoPosition); skip_init_check = var->mode() != CONST_LEGACY && var->initializer_position() < proxy->position(); } @@ -1562,11 +1545,11 @@ __ b(ne, &done); __ mov(r0, Operand(var->name())); __ push(r0); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); + __ CallRuntime(Runtime::kThrowReferenceError, 1); __ bind(&done); } else { // Uninitalized const bindings outside of harmony mode are unholed. - ASSERT(var->mode() == CONST_LEGACY); + DCHECK(var->mode() == CONST_LEGACY); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); } context()->Plug(r0); @@ -1582,11 +1565,11 @@ Label done, slow; // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); __ bind(&slow); __ mov(r1, Operand(var->name())); __ Push(cp, r1); // Context and name. - __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); __ bind(&done); context()->Plug(r0); } @@ -1619,7 +1602,7 @@ __ mov(r2, Operand(expr->pattern())); __ mov(r1, Operand(expr->flags())); __ Push(r4, r3, r2, r1); - __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4); + __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); __ mov(r5, r0); __ bind(&materialized); @@ -1631,7 +1614,7 @@ __ bind(&runtime_allocate); __ mov(r0, Operand(Smi::FromInt(size))); __ Push(r5, r0); - __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); __ pop(r5); __ bind(&allocated); @@ -1671,13 +1654,13 @@ : ObjectLiteral::kNoFlags; __ mov(r0, Operand(Smi::FromInt(flags))); int properties_count = constant_properties->length() / 2; - if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() || - flags != ObjectLiteral::kFastElements || + if (expr->may_store_doubles() || expr->depth() > 1 || + masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { __ Push(r3, r2, r1, r0); - __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); } else { - FastCloneShallowObjectStub stub(properties_count); + FastCloneShallowObjectStub stub(isolate(), properties_count); __ CallStub(&stub); } @@ -1705,14 +1688,15 @@ case ObjectLiteral::Property::CONSTANT: UNREACHABLE(); case ObjectLiteral::Property::MATERIALIZED_LITERAL: - ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value())); + DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value())); // Fall through. case ObjectLiteral::Property::COMPUTED: if (key->value()->IsInternalizedString()) { if (property->emit_store()) { VisitForAccumulatorValue(value); - __ mov(r2, Operand(key->value())); - __ ldr(r1, MemOperand(sp)); + DCHECK(StoreIC::ValueRegister().is(r0)); + __ mov(StoreIC::NameRegister(), Operand(key->value())); + __ ldr(StoreIC::ReceiverRegister(), MemOperand(sp)); CallStoreIC(key->LiteralFeedbackId()); PrepareForBailoutForId(key->id(), NO_REGISTERS); } else { @@ -1726,7 +1710,7 @@ VisitForStackValue(key); VisitForStackValue(value); if (property->emit_store()) { - __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes + __ mov(r0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes __ push(r0); __ CallRuntime(Runtime::kSetProperty, 4); } else { @@ -1766,11 +1750,11 @@ EmitAccessor(it->second->setter); __ mov(r0, Operand(Smi::FromInt(NONE))); __ push(r0); - __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); + __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5); } if (expr->has_function()) { - ASSERT(result_saved); + DCHECK(result_saved); __ ldr(r0, MemOperand(sp)); __ push(r0); __ CallRuntime(Runtime::kToFastProperties, 1); @@ -1795,7 +1779,7 @@ ZoneList<Expression*>* subexprs = expr->values(); int length = subexprs->length(); Handle<FixedArray> constant_elements = expr->constant_elements(); - ASSERT_EQ(2, constant_elements->length()); + DCHECK_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind); @@ -1813,31 +1797,12 @@ __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset)); __ mov(r2, Operand(Smi::FromInt(expr->literal_index()))); __ mov(r1, Operand(constant_elements)); - if (has_fast_elements && constant_elements_values->map() == - isolate()->heap()->fixed_cow_array_map()) { - FastCloneShallowArrayStub stub( - FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, - allocation_site_mode, - length); - __ CallStub(&stub); - __ IncrementCounter( - isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2); - } else if (expr->depth() > 1 || Serializer::enabled() || - length > FastCloneShallowArrayStub::kMaximumClonedLength) { + if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) { __ mov(r0, Operand(Smi::FromInt(flags))); __ Push(r3, r2, r1, r0); - __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4); + __ CallRuntime(Runtime::kCreateArrayLiteral, 4); } else { - ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || - FLAG_smi_only_arrays); - FastCloneShallowArrayStub::Mode mode = - FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; - - if (has_fast_elements) { - mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; - } - - FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); + FastCloneShallowArrayStub stub(isolate(), allocation_site_mode); __ CallStub(&stub); } @@ -1869,7 +1834,7 @@ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK); } else { __ mov(r3, Operand(Smi::FromInt(i))); - StoreArrayLiteralElementStub stub; + StoreArrayLiteralElementStub stub(isolate()); __ CallStub(&stub); } @@ -1886,7 +1851,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { - ASSERT(expr->target()->IsValidLeftHandSide()); + DCHECK(expr->target()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ Assignment"); @@ -1908,9 +1873,9 @@ break; case NAMED_PROPERTY: if (expr->is_compound()) { - // We need the receiver both on the stack and in the accumulator. - VisitForAccumulatorValue(property->obj()); - __ push(result_register()); + // We need the receiver both on the stack and in the register. + VisitForStackValue(property->obj()); + __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); } else { VisitForStackValue(property->obj()); } @@ -1918,9 +1883,9 @@ case KEYED_PROPERTY: if (expr->is_compound()) { VisitForStackValue(property->obj()); - VisitForAccumulatorValue(property->key()); - __ ldr(r1, MemOperand(sp, 0)); - __ push(r0); + VisitForStackValue(property->key()); + __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); + __ ldr(LoadIC::NameRegister(), MemOperand(sp, 0)); } else { VisitForStackValue(property->obj()); VisitForStackValue(property->key()); @@ -2016,7 +1981,7 @@ __ bind(&suspend); VisitForAccumulatorValue(expr->generator_object()); - ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); + DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); __ mov(r1, Operand(Smi::FromInt(continuation.pos()))); __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset)); __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset)); @@ -2027,7 +1992,7 @@ __ cmp(sp, r1); __ b(eq, &post_runtime); __ push(r0); // generator object - __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ bind(&post_runtime); __ pop(result_register()); @@ -2059,6 +2024,9 @@ Label l_catch, l_try, l_suspend, l_continuation, l_resume; Label l_next, l_call, l_loop; + Register load_receiver = LoadIC::ReceiverRegister(); + Register load_name = LoadIC::NameRegister(); + // Initial send value is undefined. __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); __ b(&l_next); @@ -2066,9 +2034,9 @@ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; } __ bind(&l_catch); handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); - __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw" - __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter - __ Push(r2, r3, r0); // "throw", iter, except + __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw" + __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter + __ Push(load_name, r3, r0); // "throw", iter, except __ jmp(&l_call); // try { received = %yield result } @@ -2086,14 +2054,14 @@ const int generator_object_depth = kPointerSize + handler_size; __ ldr(r0, MemOperand(sp, generator_object_depth)); __ push(r0); // g - ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); + DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); __ mov(r1, Operand(Smi::FromInt(l_continuation.pos()))); __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset)); __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset)); __ mov(r1, cp); __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2, kLRHasBeenSaved, kDontSaveFPRegs); - __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ pop(r0); // result EmitReturnSequence(); @@ -2102,19 +2070,24 @@ // receiver = iter; f = 'next'; arg = received; __ bind(&l_next); - __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next" - __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter - __ Push(r2, r3, r0); // "next", iter, received + + __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next" + __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter + __ Push(load_name, r3, r0); // "next", iter, received // result = receiver[f](arg); __ bind(&l_call); - __ ldr(r1, MemOperand(sp, kPointerSize)); - __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); + __ ldr(load_receiver, MemOperand(sp, kPointerSize)); + __ ldr(load_name, MemOperand(sp, 2 * kPointerSize)); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot()))); + } Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallIC(ic, TypeFeedbackId::None()); __ mov(r1, r0); __ str(r1, MemOperand(sp, 2 * kPointerSize)); - CallFunctionStub stub(1, CALL_AS_METHOD); + CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD); __ CallStub(&stub); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2122,19 +2095,29 @@ // if (!result.done) goto l_try; __ bind(&l_loop); - __ push(r0); // save result - __ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done" - CallLoadIC(NOT_CONTEXTUAL); // result.done in r0 + __ Move(load_receiver, r0); + + __ push(load_receiver); // save result + __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done" + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->DoneFeedbackSlot()))); + } + CallLoadIC(NOT_CONTEXTUAL); // r0=result.done Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate()); CallIC(bool_ic); __ cmp(r0, Operand(0)); __ b(eq, &l_try); // result.value - __ pop(r0); // result - __ LoadRoot(r2, Heap::kvalue_stringRootIndex); // "value" - CallLoadIC(NOT_CONTEXTUAL); // result.value in r0 - context()->DropAndPlug(2, r0); // drop iter and g + __ pop(load_receiver); // result + __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value" + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->ValueFeedbackSlot()))); + } + CallLoadIC(NOT_CONTEXTUAL); // r0=result.value + context()->DropAndPlug(2, r0); // drop iter and g break; } } @@ -2145,7 +2128,7 @@ Expression *value, JSGeneratorObject::ResumeMode resume_mode) { // The value stays in r0, and is ultimately read by the resumed generator, as - // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it + // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it // is read to throw the value when the resumed generator is already closed. // r1 will hold the generator object until the activation has been resumed. VisitForStackValue(generator); @@ -2236,10 +2219,10 @@ __ push(r2); __ b(&push_operand_holes); __ bind(&call_resume); - ASSERT(!result_register().is(r1)); + DCHECK(!result_register().is(r1)); __ Push(r1, result_register()); __ Push(Smi::FromInt(resume_mode)); - __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); // Not reached: the runtime call returns elsewhere. __ stop("not-reached"); @@ -2254,14 +2237,14 @@ } else { // Throw the provided value. __ push(r0); - __ CallRuntime(Runtime::kHiddenThrow, 1); + __ CallRuntime(Runtime::kThrow, 1); } __ jmp(&done); // Throw error if we attempt to operate on a running generator. __ bind(&wrong_state); __ push(r1); - __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); __ bind(&done); context()->Plug(result_register()); @@ -2272,14 +2255,14 @@ Label gc_required; Label allocated; - Handle<Map> map(isolate()->native_context()->generator_result_map()); + Handle<Map> map(isolate()->native_context()->iterator_result_map()); __ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT); __ jmp(&allocated); __ bind(&gc_required); __ Push(Smi::FromInt(map->instance_size())); - __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); __ ldr(context_register(), MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2288,7 +2271,7 @@ __ pop(r2); __ mov(r3, Operand(isolate()->factory()->ToBoolean(done))); __ mov(r4, Operand(isolate()->factory()->empty_fixed_array())); - ASSERT_EQ(map->instance_size(), 5 * kPointerSize); + DCHECK_EQ(map->instance_size(), 5 * kPointerSize); __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); @@ -2307,17 +2290,27 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); - __ mov(r2, Operand(key->value())); - // Call load IC. It has arguments receiver and property name r0 and r2. - CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + __ mov(LoadIC::NameRegister(), Operand(key->value())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(prop->PropertyFeedbackSlot()))); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + } } void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); - // Call keyed load IC. It has arguments key and receiver in r0 and r1. Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - CallIC(ic, prop->PropertyFeedbackId()); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(prop->PropertyFeedbackSlot()))); + CallIC(ic); + } else { + CallIC(ic, prop->PropertyFeedbackId()); + } } @@ -2343,8 +2336,8 @@ patch_site.EmitJumpIfSmi(scratch1, &smi_case); __ bind(&stub_call); - BinaryOpICStub stub(op, mode); - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + BinaryOpICStub stub(isolate(), op, mode); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); __ jmp(&done); @@ -2419,16 +2412,16 @@ Token::Value op, OverwriteMode mode) { __ pop(r1); - BinaryOpICStub stub(op, mode); + BinaryOpICStub stub(isolate(), op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); context()->Plug(r0); } void FullCodeGenerator::EmitAssignment(Expression* expr) { - ASSERT(expr->IsValidLeftHandSide()); + DCHECK(expr->IsValidReferenceExpression()); // Left-hand side can only be a property, a global or a (parameter or local) // slot. @@ -2451,9 +2444,10 @@ case NAMED_PROPERTY: { __ push(r0); // Preserve value. VisitForAccumulatorValue(prop->obj()); - __ mov(r1, r0); - __ pop(r0); // Restore value. - __ mov(r2, Operand(prop->key()->AsLiteral()->value())); + __ Move(StoreIC::ReceiverRegister(), r0); + __ pop(StoreIC::ValueRegister()); // Restore value. + __ mov(StoreIC::NameRegister(), + Operand(prop->key()->AsLiteral()->value())); CallStoreIC(); break; } @@ -2461,8 +2455,8 @@ __ push(r0); // Preserve value. VisitForStackValue(prop->obj()); VisitForAccumulatorValue(prop->key()); - __ mov(r1, r0); - __ Pop(r0, r2); // r0 = restored value. + __ Move(KeyedStoreIC::NameRegister(), r0); + __ Pop(KeyedStoreIC::ValueRegister(), KeyedStoreIC::ReceiverRegister()); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); @@ -2487,33 +2481,23 @@ } -void FullCodeGenerator::EmitCallStoreContextSlot( - Handle<String> name, StrictMode strict_mode) { - __ push(r0); // Value. - __ mov(r1, Operand(name)); - __ mov(r0, Operand(Smi::FromInt(strict_mode))); - __ Push(cp, r1, r0); // Context, name, strict mode. - __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4); -} - - void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { if (var->IsUnallocated()) { // Global var, const, or let. - __ mov(r2, Operand(var->name())); - __ ldr(r1, GlobalObjectOperand()); + __ mov(StoreIC::NameRegister(), Operand(var->name())); + __ ldr(StoreIC::ReceiverRegister(), GlobalObjectOperand()); CallStoreIC(); } else if (op == Token::INIT_CONST_LEGACY) { // Const initializers need a write barrier. - ASSERT(!var->IsParameter()); // No const parameters. + DCHECK(!var->IsParameter()); // No const parameters. if (var->IsLookupSlot()) { __ push(r0); __ mov(r0, Operand(var->name())); __ Push(cp, r0); // Context and name. - __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3); + __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3); } else { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); Label skip; MemOperand location = VarOperand(var, r1); __ ldr(r2, location); @@ -2525,30 +2509,32 @@ } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. - if (var->IsLookupSlot()) { - EmitCallStoreContextSlot(var->name(), strict_mode()); - } else { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); - Label assign; - MemOperand location = VarOperand(var, r1); - __ ldr(r3, location); - __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); - __ b(ne, &assign); - __ mov(r3, Operand(var->name())); - __ push(r3); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); - // Perform the assignment. - __ bind(&assign); - EmitStoreToStackLocalOrContextSlot(var, location); - } + DCHECK(!var->IsLookupSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + Label assign; + MemOperand location = VarOperand(var, r1); + __ ldr(r3, location); + __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); + __ b(ne, &assign); + __ mov(r3, Operand(var->name())); + __ push(r3); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + // Perform the assignment. + __ bind(&assign); + EmitStoreToStackLocalOrContextSlot(var, location); } else if (!var->is_const_mode() || op == Token::INIT_CONST) { - // Assignment to var or initializing assignment to let/const - // in harmony mode. if (var->IsLookupSlot()) { - EmitCallStoreContextSlot(var->name(), strict_mode()); + // Assignment to var. + __ push(r0); // Value. + __ mov(r1, Operand(var->name())); + __ mov(r0, Operand(Smi::FromInt(strict_mode()))); + __ Push(cp, r1, r0); // Context, name, strict mode. + __ CallRuntime(Runtime::kStoreLookupSlot, 4); } else { - ASSERT((var->IsStackAllocated() || var->IsContextSlot())); + // Assignment to var or initializing assignment to let/const in harmony + // mode. + DCHECK((var->IsStackAllocated() || var->IsContextSlot())); MemOperand location = VarOperand(var, r1); if (generate_debug_code_ && op == Token::INIT_LET) { // Check for an uninitialized let binding. @@ -2566,14 +2552,13 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { // Assignment to a property, using a named store IC. Property* prop = expr->target()->AsProperty(); - ASSERT(prop != NULL); - ASSERT(prop->key()->AsLiteral() != NULL); + DCHECK(prop != NULL); + DCHECK(prop->key()->IsLiteral()); // Record source code position before IC call. SetSourcePosition(expr->position()); - __ mov(r2, Operand(prop->key()->AsLiteral()->value())); - __ pop(r1); - + __ mov(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value())); + __ pop(StoreIC::ReceiverRegister()); CallStoreIC(expr->AssignmentFeedbackId()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); @@ -2586,7 +2571,8 @@ // Record source code position before IC call. SetSourcePosition(expr->position()); - __ Pop(r2, r1); // r1 = key. + __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister()); + DCHECK(KeyedStoreIC::ValueRegister().is(r0)); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() @@ -2604,13 +2590,15 @@ if (key->IsPropertyName()) { VisitForAccumulatorValue(expr->obj()); + __ Move(LoadIC::ReceiverRegister(), r0); EmitNamedPropertyLoad(expr); PrepareForBailoutForId(expr->LoadId(), TOS_REG); context()->Plug(r0); } else { VisitForStackValue(expr->obj()); VisitForAccumulatorValue(expr->key()); - __ pop(r1); + __ Move(LoadIC::NameRegister(), r0); + __ pop(LoadIC::ReceiverRegister()); EmitKeyedPropertyLoad(expr); context()->Plug(r0); } @@ -2628,14 +2616,15 @@ // Code common for calls using the IC. -void FullCodeGenerator::EmitCallWithIC(Call* expr) { +void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); - CallFunctionFlags flags; + CallIC::CallType call_type = callee->IsVariableProxy() + ? CallIC::FUNCTION + : CallIC::METHOD; + // Get the target function. - if (callee->IsVariableProxy()) { + if (call_type == CallIC::FUNCTION) { { StackValueContext context(this); EmitVariableLoad(callee->AsVariableProxy()); PrepareForBailout(callee, NO_REGISTERS); @@ -2643,55 +2632,34 @@ // Push undefined as receiver. This is patched in the method prologue if it // is a sloppy mode method. __ Push(isolate()->factory()->undefined_value()); - flags = NO_CALL_FUNCTION_FLAGS; } else { // Load the function from the receiver. - ASSERT(callee->IsProperty()); - __ ldr(r0, MemOperand(sp, 0)); + DCHECK(callee->IsProperty()); + __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); EmitNamedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); // Push the target function under the receiver. __ ldr(ip, MemOperand(sp, 0)); __ push(ip); __ str(r0, MemOperand(sp, kPointerSize)); - flags = CALL_AS_METHOD; - } - - // Load the arguments. - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } } - // Record source position for debugger. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, flags); - __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); - - RecordJSReturnSite(expr); - - // Restore context register. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, r0); + EmitCall(expr, call_type); } // Code common for calls using the IC. -void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, - Expression* key) { +void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, + Expression* key) { // Load the key. VisitForAccumulatorValue(key); Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); // Load the function from the receiver. - ASSERT(callee->IsProperty()); - __ ldr(r1, MemOperand(sp, 0)); + DCHECK(callee->IsProperty()); + __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); + __ Move(LoadIC::NameRegister(), r0); EmitKeyedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); @@ -2700,28 +2668,12 @@ __ push(ip); __ str(r0, MemOperand(sp, kPointerSize)); - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } - } - - // Record source position for debugger. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, CALL_AS_METHOD); - __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); - - RecordJSReturnSite(expr); - // Restore context register. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, r0); + EmitCall(expr, CallIC::METHOD); } -void FullCodeGenerator::EmitCallWithStub(Call* expr) { - // Code common for calls using the call stub. +void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { + // Load the arguments. ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); { PreservePositionScope scope(masm()->positions_recorder()); @@ -2729,19 +2681,17 @@ VisitForStackValue(args->at(i)); } } - // Record source position for debugger. - SetSourcePosition(expr->position()); - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized); - __ Move(r2, FeedbackVector()); + // Record source position of the IC call. + SetSourcePosition(expr->position()); + Handle<Code> ic = CallIC::initialize_stub( + isolate(), arg_count, call_type); __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot()))); - - // Record call targets in unoptimized code. - CallFunctionStub stub(arg_count, RECORD_CALL_TARGET); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); + // Don't assign a type feedback id to the IC, since type feedback is provided + // by the vector above. + CallIC(ic); + RecordJSReturnSite(expr); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2769,7 +2719,7 @@ // Do the runtime call. __ Push(r4, r3, r2, r1); - __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5); + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); } @@ -2816,7 +2766,7 @@ // Record source position for debugger. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); RecordJSReturnSite(expr); @@ -2824,7 +2774,7 @@ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); context()->DropAndPlug(1, r0); } else if (call_type == Call::GLOBAL_CALL) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else if (call_type == Call::LOOKUP_SLOT_CALL) { // Call to a lookup slot (dynamically introduced variable). @@ -2834,16 +2784,16 @@ { PreservePositionScope scope(masm()->positions_recorder()); // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); } __ bind(&slow); // Call the runtime to find the function to call (returned in r0) // and the object holding it (returned in edx). - ASSERT(!context_register().is(r2)); + DCHECK(!context_register().is(r2)); __ mov(r2, Operand(proxy->name())); __ Push(context_register(), r2); - __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); __ Push(r0, r1); // Function, receiver. // If fast case code has been generated, emit code to push the @@ -2864,19 +2814,19 @@ // The receiver is either the global receiver or an object found // by LoadContextSlot. - EmitCallWithStub(expr); + EmitCall(expr); } else if (call_type == Call::PROPERTY_CALL) { Property* property = callee->AsProperty(); { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(property->obj()); } if (property->key()->IsPropertyName()) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else { - EmitKeyedCallWithIC(expr, property->key()); + EmitKeyedCallWithLoadIC(expr, property->key()); } } else { - ASSERT(call_type == Call::OTHER_CALL); + DCHECK(call_type == Call::OTHER_CALL); // Call to an arbitrary expression not handled specially above. { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(callee); @@ -2884,12 +2834,12 @@ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); __ push(r1); // Emit function call. - EmitCallWithStub(expr); + EmitCall(expr); } #ifdef DEBUG // RecordJSReturnSite should have been called. - ASSERT(expr->return_is_recorded_); + DCHECK(expr->return_is_recorded_); #endif } @@ -2921,21 +2871,17 @@ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); // Record call targets in unoptimized code. - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized); if (FLAG_pretenuring_call_new) { - StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(), - isolate()->factory()->NewAllocationSite()); - ASSERT(expr->AllocationSiteFeedbackSlot() == + EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot()); + DCHECK(expr->AllocationSiteFeedbackSlot() == expr->CallNewFeedbackSlot() + 1); } __ Move(r2, FeedbackVector()); __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot()))); - CallConstructStub stub(RECORD_CALL_TARGET); - __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); + CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET); + __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); PrepareForBailoutForId(expr->ReturnId(), TOS_REG); context()->Plug(r0); } @@ -2943,7 +2889,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2964,7 +2910,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2985,7 +2931,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3018,7 +2964,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3040,7 +2986,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3065,7 +3011,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3109,7 +3055,7 @@ __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag)); // Calculate the end of the descriptor array. __ mov(r2, r4); - __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3)); + __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2)); // Loop through all the keys in the descriptor array. If one of these is the // string "valueOf" the result is false. @@ -3153,7 +3099,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3175,7 +3121,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3201,7 +3147,7 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3223,7 +3169,7 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3245,7 +3191,7 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); Label materialize_true, materialize_false; Label* if_true = NULL; @@ -3274,7 +3220,7 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); // Load the two objects into registers and perform the comparison. VisitForStackValue(args->at(0)); @@ -3298,21 +3244,21 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); // ArgumentsAccessStub expects the key in edx and the formal // parameter count in r0. VisitForAccumulatorValue(args->at(0)); __ mov(r1, r0); __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); - ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); + ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT); __ CallStub(&stub); context()->Plug(r0); } void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); // Get the number of formal parameters. __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); @@ -3332,7 +3278,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); Label done, null, function, non_function_constructor; VisitForAccumulatorValue(args->at(0)); @@ -3391,33 +3337,11 @@ } -void FullCodeGenerator::EmitLog(CallRuntime* expr) { - // Conditionally generate a log call. - // Args: - // 0 (literal string): The type of logging (corresponds to the flags). - // This is used to determine whether or not to generate the log call. - // 1 (string): Format string. Access the string at argument index 2 - // with '%2s' (see Logger::LogRuntime for all the formats). - // 2 (array): Arguments to the format string. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 3); - if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) { - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - __ CallRuntime(Runtime::kHiddenLog, 2); - } - - // Finally, we're expected to leave a value on the top of the stack. - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); - context()->Plug(r0); -} - - void FullCodeGenerator::EmitSubString(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - SubStringStub stub; + SubStringStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); + DCHECK(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); @@ -3428,9 +3352,9 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - RegExpExecStub stub; + RegExpExecStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 4); + DCHECK(args->length() == 4); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); @@ -3442,7 +3366,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); // Load the object. Label done; @@ -3459,8 +3383,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); - ASSERT_NE(NULL, args->at(1)->AsLiteral()); + DCHECK(args->length() == 2); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -3498,7 +3422,7 @@ } __ bind(¬_date_object); - __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0); + __ CallRuntime(Runtime::kThrowNotDateError, 0); __ bind(&done); context()->Plug(r0); } @@ -3506,7 +3430,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(3, args->length()); + DCHECK_EQ(3, args->length()); Register string = r0; Register index = r1; @@ -3539,7 +3463,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(3, args->length()); + DCHECK_EQ(3, args->length()); Register string = r0; Register index = r1; @@ -3575,10 +3499,10 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the runtime function. ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - MathPowStub stub(MathPowStub::ON_STACK); + MathPowStub stub(isolate(), MathPowStub::ON_STACK); __ CallStub(&stub); context()->Plug(r0); } @@ -3586,7 +3510,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); // Load the object. VisitForAccumulatorValue(args->at(1)); // Load the value. __ pop(r1); // r0 = value. r1 = object. @@ -3614,11 +3538,11 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 1); + DCHECK_EQ(args->length(), 1); // Load the argument into r0 and call the stub. VisitForAccumulatorValue(args->at(0)); - NumberToStringStub stub; + NumberToStringStub stub(isolate()); __ CallStub(&stub); context()->Plug(r0); } @@ -3626,7 +3550,7 @@ void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); Label done; @@ -3644,7 +3568,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); @@ -3689,7 +3613,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); @@ -3736,12 +3660,12 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); __ pop(r1); - StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED); + StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED); __ CallStub(&stub); context()->Plug(r0); } @@ -3749,39 +3673,19 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - StringCompareStub stub; + StringCompareStub stub(isolate()); __ CallStub(&stub); context()->Plug(r0); } -void FullCodeGenerator::EmitMathLog(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_log, 1); - context()->Plug(r0); -} - - -void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_sqrt, 1); - context()->Plug(r0); -} - - void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() >= 2); + DCHECK(args->length() >= 2); int arg_count = args->length() - 2; // 2 ~ receiver and function. for (int i = 0; i < arg_count + 1; i++) { @@ -3812,9 +3716,9 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { - RegExpConstructResultStub stub; + RegExpConstructResultStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); + DCHECK(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForAccumulatorValue(args->at(2)); @@ -3827,8 +3731,8 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); - ASSERT_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_EQ(2, args->length()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle<FixedArray> jsfunction_result_caches( @@ -3867,7 +3771,7 @@ __ bind(¬_found); // Call runtime to perform the lookup. __ Push(cache, key); - __ CallRuntime(Runtime::kHiddenGetFromCache, 2); + __ CallRuntime(Runtime::kGetFromCache, 2); __ bind(&done); context()->Plug(r0); @@ -3896,7 +3800,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); __ AssertString(r0); @@ -3913,7 +3817,7 @@ not_size_one_array, loop, empty_separator_loop, one_char_separator_loop, one_char_separator_loop_entry, long_separator_loop; ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(1)); VisitForAccumulatorValue(args->at(0)); @@ -4071,7 +3975,7 @@ __ CopyBytes(string, result_pos, string_length, scratch); __ cmp(element, elements_end); __ b(lt, &empty_separator_loop); // End while (element < elements_end). - ASSERT(result.is(r0)); + DCHECK(result.is(r0)); __ b(&done); // One-character separator case @@ -4103,7 +4007,7 @@ __ CopyBytes(string, result_pos, string_length, scratch); __ cmp(element, elements_end); __ b(lt, &one_char_separator_loop); // End while (element < elements_end). - ASSERT(result.is(r0)); + DCHECK(result.is(r0)); __ b(&done); // Long separator case (separator is more than one character). Entry is at the @@ -4133,7 +4037,7 @@ __ CopyBytes(string, result_pos, string_length, scratch); __ cmp(element, elements_end); __ b(lt, &long_separator_loop); // End while (element < elements_end). - ASSERT(result.is(r0)); + DCHECK(result.is(r0)); __ b(&done); __ bind(&bailout); @@ -4143,6 +4047,17 @@ } +void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + ExternalReference debug_is_active = + ExternalReference::debug_is_active_address(isolate()); + __ mov(ip, Operand(debug_is_active)); + __ ldrb(r0, MemOperand(ip)); + __ SmiTag(r0); + context()->Plug(r0); +} + + void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { if (expr->function() != NULL && expr->function()->intrinsic_type == Runtime::INLINE) { @@ -4157,13 +4072,20 @@ if (expr->is_jsruntime()) { // Push the builtins object as the receiver. - __ ldr(r0, GlobalObjectOperand()); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset)); - __ push(r0); + Register receiver = LoadIC::ReceiverRegister(); + __ ldr(receiver, GlobalObjectOperand()); + __ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset)); + __ push(receiver); // Load the function from the receiver. - __ mov(r2, Operand(expr->name())); - CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + __ mov(LoadIC::NameRegister(), Operand(expr->name())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot()))); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + } // Push the target function under the receiver. __ ldr(ip, MemOperand(sp, 0)); @@ -4178,7 +4100,7 @@ // Record source position of the IC call. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); @@ -4217,7 +4139,7 @@ Variable* var = proxy->var(); // Delete of an unqualified identifier is disallowed in strict mode // but "delete this" is allowed. - ASSERT(strict_mode() == SLOPPY || var->is_this()); + DCHECK(strict_mode() == SLOPPY || var->is_this()); if (var->IsUnallocated()) { __ ldr(r2, GlobalObjectOperand()); __ mov(r1, Operand(var->name())); @@ -4232,10 +4154,10 @@ } else { // Non-global variable. Call the runtime to try to delete from the // context where the variable was introduced. - ASSERT(!context_register().is(r2)); + DCHECK(!context_register().is(r2)); __ mov(r2, Operand(var->name())); __ Push(context_register(), r2); - __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2); + __ CallRuntime(Runtime::kDeleteLookupSlot, 2); context()->Plug(r0); } } else { @@ -4273,7 +4195,7 @@ // for control and plugging the control flow into the context, // because we need to prepare a pair of extra administrative AST ids // for the optimizing compiler. - ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue()); + DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue()); Label materialize_true, materialize_false, done; VisitForControl(expr->expression(), &materialize_false, @@ -4310,7 +4232,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { - ASSERT(expr->expression()->IsValidLeftHandSide()); + DCHECK(expr->expression()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ CountOperation"); SetSourcePosition(expr->position()); @@ -4329,7 +4251,7 @@ // Evaluate expression and get value. if (assign_type == VARIABLE) { - ASSERT(expr->expression()->AsVariableProxy()->var() != NULL); + DCHECK(expr->expression()->AsVariableProxy()->var() != NULL); AccumulatorValueContext context(this); EmitVariableLoad(expr->expression()->AsVariableProxy()); } else { @@ -4339,15 +4261,15 @@ __ push(ip); } if (assign_type == NAMED_PROPERTY) { - // Put the object both on the stack and in the accumulator. - VisitForAccumulatorValue(prop->obj()); - __ push(r0); + // Put the object both on the stack and in the register. + VisitForStackValue(prop->obj()); + __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); EmitNamedPropertyLoad(prop); } else { VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - __ ldr(r1, MemOperand(sp, 0)); - __ push(r0); + VisitForStackValue(prop->key()); + __ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); + __ ldr(LoadIC::NameRegister(), MemOperand(sp, 0)); EmitKeyedPropertyLoad(prop); } } @@ -4396,7 +4318,7 @@ __ jmp(&stub_call); __ bind(&slow); } - ToNumberStub convert_stub; + ToNumberStub convert_stub(isolate()); __ CallStub(&convert_stub); // Save result for postfix expressions. @@ -4427,8 +4349,8 @@ // Record position before stub call. SetSourcePosition(expr->position()); - BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); - CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); + BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE); + CallIC(stub.GetCode(), expr->CountBinOpFeedbackId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -4455,8 +4377,9 @@ } break; case NAMED_PROPERTY: { - __ mov(r2, Operand(prop->key()->AsLiteral()->value())); - __ pop(r1); + __ mov(StoreIC::NameRegister(), + Operand(prop->key()->AsLiteral()->value())); + __ pop(StoreIC::ReceiverRegister()); CallStoreIC(expr->CountStoreFeedbackId()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -4469,7 +4392,7 @@ break; } case KEYED_PROPERTY: { - __ Pop(r2, r1); // r1 = key. r2 = receiver. + __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister()); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); @@ -4489,13 +4412,17 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { - ASSERT(!context()->IsEffect()); - ASSERT(!context()->IsTest()); + DCHECK(!context()->IsEffect()); + DCHECK(!context()->IsTest()); VariableProxy* proxy = expr->AsVariableProxy(); if (proxy != NULL && proxy->var()->IsUnallocated()) { Comment cmnt(masm_, "[ Global variable"); - __ ldr(r0, GlobalObjectOperand()); - __ mov(r2, Operand(proxy->name())); + __ ldr(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ mov(LoadIC::NameRegister(), Operand(proxy->name())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } // Use a regular load, not a contextual load, to avoid a reference // error. CallLoadIC(NOT_CONTEXTUAL); @@ -4507,12 +4434,12 @@ // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done); __ bind(&slow); __ mov(r0, Operand(proxy->name())); __ Push(cp, r0); - __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2); + __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2); PrepareForBailout(expr, TOS_REG); __ bind(&done); @@ -4539,13 +4466,14 @@ } PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_string())) { + Factory* factory = isolate()->factory(); + if (String::Equals(check, factory->number_string())) { __ JumpIfSmi(r0, if_true); __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(r0, ip); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_string())) { + } else if (String::Equals(check, factory->string_string())) { __ JumpIfSmi(r0, if_false); // Check for undetectable objects => false. __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE); @@ -4553,20 +4481,16 @@ __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); __ tst(r1, Operand(1 << Map::kIsUndetectable)); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->symbol_string())) { + } else if (String::Equals(check, factory->symbol_string())) { __ JumpIfSmi(r0, if_false); __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_string())) { + } else if (String::Equals(check, factory->boolean_string())) { __ CompareRoot(r0, Heap::kTrueValueRootIndex); __ b(eq, if_true); __ CompareRoot(r0, Heap::kFalseValueRootIndex); Split(eq, if_true, if_false, fall_through); - } else if (FLAG_harmony_typeof && - check->Equals(isolate()->heap()->null_string())) { - __ CompareRoot(r0, Heap::kNullValueRootIndex); - Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_string())) { + } else if (String::Equals(check, factory->undefined_string())) { __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); __ b(eq, if_true); __ JumpIfSmi(r0, if_false); @@ -4576,19 +4500,17 @@ __ tst(r1, Operand(1 << Map::kIsUndetectable)); Split(ne, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->function_string())) { + } else if (String::Equals(check, factory->function_string())) { __ JumpIfSmi(r0, if_false); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE); __ b(eq, if_true); __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE)); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->object_string())) { + } else if (String::Equals(check, factory->object_string())) { __ JumpIfSmi(r0, if_false); - if (!FLAG_harmony_typeof) { - __ CompareRoot(r0, Heap::kNullValueRootIndex); - __ b(eq, if_true); - } + __ CompareRoot(r0, Heap::kNullValueRootIndex); + __ b(eq, if_true); // Check for JS objects => true. __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); __ b(lt, if_false); @@ -4636,7 +4558,7 @@ case Token::INSTANCEOF: { VisitForStackValue(expr->right()); - InstanceofStub stub(InstanceofStub::kNoFlags); + InstanceofStub stub(isolate(), InstanceofStub::kNoFlags); __ CallStub(&stub); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); // The stub returns 0 for true. @@ -4724,7 +4646,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { - ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); + DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); __ str(value, MemOperand(fp, frame_offset)); } @@ -4749,7 +4671,7 @@ // code. Fetch it from the context. __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX)); } else { - ASSERT(declaration_scope->is_function_scope()); + DCHECK(declaration_scope->is_function_scope()); __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); } __ push(ip); @@ -4760,7 +4682,7 @@ // Non-local control flow support. void FullCodeGenerator::EnterFinallyBlock() { - ASSERT(!result_register().is(r1)); + DCHECK(!result_register().is(r1)); // Store result register while executing finally block. __ push(result_register()); // Cook return address in link register to stack (smi encoded Code* delta) @@ -4780,7 +4702,8 @@ ExternalReference has_pending_message = ExternalReference::address_of_has_pending_message(isolate()); __ mov(ip, Operand(has_pending_message)); - __ ldr(r1, MemOperand(ip)); + STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof) + __ ldrb(r1, MemOperand(ip)); __ SmiTag(r1); __ push(r1); @@ -4793,7 +4716,7 @@ void FullCodeGenerator::ExitFinallyBlock() { - ASSERT(!result_register().is(r1)); + DCHECK(!result_register().is(r1)); // Restore pending message from stack. __ pop(r1); ExternalReference pending_message_script = @@ -4806,7 +4729,8 @@ ExternalReference has_pending_message = ExternalReference::address_of_has_pending_message(isolate()); __ mov(ip, Operand(has_pending_message)); - __ str(r1, MemOperand(ip)); + STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof) + __ strb(r1, MemOperand(ip)); __ pop(r1); ExternalReference pending_message_obj = @@ -4858,12 +4782,20 @@ static Address GetInterruptImmediateLoadAddress(Address pc) { Address load_address = pc - 2 * Assembler::kInstrSize; if (!FLAG_enable_ool_constant_pool) { - ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address))); + DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address))); + } else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) { + // This is an extended constant pool lookup. + load_address -= 2 * Assembler::kInstrSize; + DCHECK(Assembler::IsMovW(Memory::int32_at(load_address))); + DCHECK(Assembler::IsMovT( + Memory::int32_at(load_address + Assembler::kInstrSize))); } else if (Assembler::IsMovT(Memory::int32_at(load_address))) { + // This is a movw_movt immediate load. load_address -= Assembler::kInstrSize; - ASSERT(Assembler::IsMovW(Memory::int32_at(load_address))); + DCHECK(Assembler::IsMovW(Memory::int32_at(load_address))); } else { - ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address))); + // This is a small constant pool lookup. + DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address))); } return load_address; } @@ -4873,9 +4805,8 @@ Address pc, BackEdgeState target_state, Code* replacement_code) { - static const int kInstrSize = Assembler::kInstrSize; Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc); - Address branch_address = pc_immediate_load_address - kInstrSize; + Address branch_address = pc_immediate_load_address - Assembler::kInstrSize; CodePatcher patcher(branch_address, 1); switch (target_state) { case INTERRUPT: @@ -4883,14 +4814,19 @@ // <decrement profiling counter> // bpl ok // ; load interrupt stub address into ip - either of: - // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low> - // | movt ip, <immed high> + // ; <small cp load> | <extended cp load> | <immediate load> + // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm + // | movt ip, #imm> | movw ip, #imm + // | ldr ip, [pp, ip] // blx ip + // <reset profiling counter> // ok-label - // Calculate branch offet to the ok-label - this is the difference between - // the branch address and |pc| (which points at <blx ip>) plus one instr. - int branch_offset = pc + kInstrSize - branch_address; + // Calculate branch offset to the ok-label - this is the difference + // between the branch address and |pc| (which points at <blx ip>) plus + // kProfileCounterResetSequence instructions + int branch_offset = pc - Instruction::kPCReadOffset - branch_address + + kProfileCounterResetSequenceLength; patcher.masm()->b(branch_offset, pl); break; } @@ -4899,9 +4835,12 @@ // <decrement profiling counter> // mov r0, r0 (NOP) // ; load on-stack replacement address into ip - either of: - // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low> - // | movt ip, <immed high> + // ; <small cp load> | <extended cp load> | <immediate load> + // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm + // | movt ip, #imm> | movw ip, #imm + // | ldr ip, [pp, ip] // blx ip + // <reset profiling counter> // ok-label patcher.masm()->nop(); break; @@ -4920,28 +4859,27 @@ Isolate* isolate, Code* unoptimized_code, Address pc) { - static const int kInstrSize = Assembler::kInstrSize; - ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp); + DCHECK(Assembler::IsBlxIp(Memory::int32_at(pc - Assembler::kInstrSize))); Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc); - Address branch_address = pc_immediate_load_address - kInstrSize; + Address branch_address = pc_immediate_load_address - Assembler::kInstrSize; Address interrupt_address = Assembler::target_address_at( pc_immediate_load_address, unoptimized_code); if (Assembler::IsBranch(Assembler::instr_at(branch_address))) { - ASSERT(interrupt_address == + DCHECK(interrupt_address == isolate->builtins()->InterruptCheck()->entry()); return INTERRUPT; } - ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address))); + DCHECK(Assembler::IsNop(Assembler::instr_at(branch_address))); if (interrupt_address == isolate->builtins()->OnStackReplacement()->entry()) { return ON_STACK_REPLACEMENT; } - ASSERT(interrupt_address == + DCHECK(interrupt_address == isolate->builtins()->OsrAfterStackCheck()->entry()); return OSR_AFTER_STACK_CHECK; } diff -Nru nodejs-0.11.13/deps/v8/src/arm/ic-arm.cc nodejs-0.11.15/deps/v8/src/arm/ic-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/ic-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/ic-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "assembler-arm.h" -#include "code-stubs.h" -#include "codegen.h" -#include "disasm.h" -#include "ic-inl.h" -#include "runtime.h" -#include "stub-cache.h" +#include "src/arm/assembler-arm.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/disasm.h" +#include "src/ic-inl.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -62,48 +39,6 @@ } -// Generated code falls through if the receiver is a regular non-global -// JS object with slow properties and no interceptors. -static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, - Register receiver, - Register elements, - Register t0, - Register t1, - Label* miss) { - // Register usage: - // receiver: holds the receiver on entry and is unchanged. - // elements: holds the property dictionary on fall through. - // Scratch registers: - // t0: used to holds the receiver map. - // t1: used to holds the receiver instance type, receiver bit mask and - // elements map. - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - - // Check that the receiver is a valid JS object. - __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE); - __ b(lt, miss); - - // If this assert fails, we have to check upper bound too. - STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); - - GenerateGlobalInstanceTypeCheck(masm, t1, miss); - - // Check that the global object does not require access checks. - __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset)); - __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) | - (1 << Map::kHasNamedInterceptor))); - __ b(ne, miss); - - __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHashTableMapRootIndex); - __ cmp(t1, ip); - __ b(ne, miss); -} - - // Helper function used from LoadIC GenerateNormal. // // elements: Property dictionary. It is not clobbered if a jump to the miss @@ -234,7 +169,7 @@ // In the case that the object is a value-wrapper object, // we enter the runtime system to make sure that indexing into string // objects work as intended. - ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); + DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); __ cmp(scratch, Operand(JS_OBJECT_TYPE)); __ b(lt, slow); @@ -334,16 +269,17 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r2 : name - // -- lr : return address - // -- r0 : receiver - // ----------------------------------- + // The return address is in lr. + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(receiver.is(r1)); + DCHECK(name.is(r2)); // Probe the stub cache. - Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::LOAD_IC)); masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, r0, r2, r3, r4, r5, r6); + masm, flags, receiver, name, r3, r4, r5, r6); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -351,37 +287,35 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r2 : name - // -- lr : return address - // -- r0 : receiver - // ----------------------------------- - Label miss; + Register dictionary = r0; + DCHECK(!dictionary.is(ReceiverRegister())); + DCHECK(!dictionary.is(NameRegister())); - GenerateNameDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss); + Label slow; - // r1: elements - GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4); + __ ldr(dictionary, + FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); + GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), r0, r3, r4); __ Ret(); - // Cache miss: Jump to runtime. - __ bind(&miss); - GenerateMiss(masm); + // Dictionary load failed, go slow (but don't miss). + __ bind(&slow); + GenerateRuntimeGetProperty(masm); } +// A register that isn't one of the parameters to the load ic. +static const Register LoadIC_TempRegister() { return r3; } + + void LoadIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r2 : name - // -- lr : return address - // -- r0 : receiver - // ----------------------------------- + // The return address is in lr. Isolate* isolate = masm->isolate(); __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4); - __ mov(r3, r0); - __ Push(r3, r2); + __ mov(LoadIC_TempRegister(), ReceiverRegister()); + __ Push(LoadIC_TempRegister(), NameRegister()); // Perform tail call to the entry. ExternalReference ref = @@ -391,14 +325,10 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- r2 : name - // -- lr : return address - // -- r0 : receiver - // ----------------------------------- + // The return address is in lr. - __ mov(r3, r0); - __ Push(r3, r2); + __ mov(LoadIC_TempRegister(), ReceiverRegister()); + __ Push(LoadIC_TempRegister(), NameRegister()); __ TailCallRuntime(Runtime::kGetProperty, 2, 1); } @@ -490,25 +420,26 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- + // The return address is in lr. + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(r1)); + DCHECK(key.is(r2)); + Label slow, notin; MemOperand mapped_location = - GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, ¬in, &slow); + GenerateMappedArgumentsLookup( + masm, receiver, key, r0, r3, r4, ¬in, &slow); __ ldr(r0, mapped_location); __ Ret(); __ bind(¬in); - // The unmapped lookup expects that the parameter map is in r2. + // The unmapped lookup expects that the parameter map is in r0. MemOperand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow); - __ ldr(r2, unmapped_location); + GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow); + __ ldr(r0, unmapped_location); __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); - __ cmp(r2, r3); + __ cmp(r0, r3); __ b(eq, &slow); - __ mov(r0, r2); __ Ret(); __ bind(&slow); GenerateMiss(masm); @@ -516,27 +447,28 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // ----------------------------------- + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + Register value = ValueRegister(); + DCHECK(receiver.is(r1)); + DCHECK(key.is(r2)); + DCHECK(value.is(r0)); + Label slow, notin; - MemOperand mapped_location = - GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, ¬in, &slow); - __ str(r0, mapped_location); + MemOperand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, r3, r4, r5, ¬in, &slow); + __ str(value, mapped_location); __ add(r6, r3, r5); - __ mov(r9, r0); + __ mov(r9, value); __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in r3. MemOperand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow); - __ str(r0, unmapped_location); + GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow); + __ str(value, unmapped_location); __ add(r6, r3, r4); - __ mov(r9, r0); + __ mov(r9, value); __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); __ Ret(); __ bind(&slow); @@ -545,16 +477,12 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- + // The return address is in lr. Isolate* isolate = masm->isolate(); __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4); - __ Push(r1, r0); + __ Push(ReceiverRegister(), NameRegister()); // Perform tail call to the entry. ExternalReference ref = @@ -564,30 +492,51 @@ } +// IC register specifications +const Register LoadIC::ReceiverRegister() { return r1; } +const Register LoadIC::NameRegister() { return r2; } + + +const Register LoadIC::SlotRegister() { + DCHECK(FLAG_vector_ics); + return r0; +} + + +const Register LoadIC::VectorRegister() { + DCHECK(FLAG_vector_ics); + return r3; +} + + +const Register StoreIC::ReceiverRegister() { return r1; } +const Register StoreIC::NameRegister() { return r2; } +const Register StoreIC::ValueRegister() { return r0; } + + +const Register KeyedStoreIC::MapRegister() { + return r3; +} + + void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- + // The return address is in lr. - __ Push(r1, r0); + __ Push(ReceiverRegister(), NameRegister()); __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); } void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- + // The return address is in lr. Label slow, check_name, index_smi, index_name, property_array_property; Label probe_dictionary, check_number_dictionary; - Register key = r0; - Register receiver = r1; + Register key = NameRegister(); + Register receiver = ReceiverRegister(); + DCHECK(key.is(r2)); + DCHECK(receiver.is(r1)); Isolate* isolate = masm->isolate(); @@ -598,14 +547,14 @@ // where a numeric string is converted to a smi. GenerateKeyedLoadReceiverCheck( - masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow); + masm, receiver, r0, r3, Map::kHasIndexedInterceptor, &slow); // Check the receiver's map to see if it has fast elements. - __ CheckFastElements(r2, r3, &check_number_dictionary); + __ CheckFastElements(r0, r3, &check_number_dictionary); GenerateFastArrayLoad( - masm, receiver, key, r4, r3, r2, r0, NULL, &slow); - __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3); + masm, receiver, key, r0, r3, r4, r0, NULL, &slow); + __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3); __ Ret(); __ bind(&check_number_dictionary); @@ -613,31 +562,30 @@ __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset)); // Check whether the elements is a number dictionary. - // r0: key // r3: elements map // r4: elements __ LoadRoot(ip, Heap::kHashTableMapRootIndex); __ cmp(r3, ip); __ b(ne, &slow); - __ SmiUntag(r2, r0); - __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5); + __ SmiUntag(r0, key); + __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5); __ Ret(); - // Slow case, key and receiver still in r0 and r1. + // Slow case, key and receiver still in r2 and r1. __ bind(&slow); __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), - 1, r2, r3); + 1, r4, r3); GenerateRuntimeGetProperty(masm); __ bind(&check_name); - GenerateKeyNameCheck(masm, key, r2, r3, &index_name, &slow); + GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow); GenerateKeyedLoadReceiverCheck( - masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow); + masm, receiver, r0, r3, Map::kHasNamedInterceptor, &slow); // If the receiver is a fast-case object, check the keyed lookup // cache. Otherwise probe the dictionary. - __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset)); + __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHashTableMapRootIndex); __ cmp(r4, ip); @@ -645,9 +593,9 @@ // Load the map of the receiver, compute the keyed lookup cache hash // based on 32 bits of the map pointer and the name hash. - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift)); - __ ldr(r4, FieldMemOperand(r0, Name::kHashFieldOffset)); + __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift)); + __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset)); __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift)); int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; __ And(r3, r3, Operand(mask)); @@ -667,26 +615,24 @@ Label try_next_entry; // Load map and move r4 to next entry. __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex)); - __ cmp(r2, r5); + __ cmp(r0, r5); __ b(ne, &try_next_entry); __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name - __ cmp(r0, r5); + __ cmp(key, r5); __ b(eq, &hit_on_nth_entry[i]); __ bind(&try_next_entry); } // Last entry: Load map and move r4 to name. __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); - __ cmp(r2, r5); + __ cmp(r0, r5); __ b(ne, &slow); __ ldr(r5, MemOperand(r4)); - __ cmp(r0, r5); + __ cmp(key, r5); __ b(ne, &slow); // Get field offset. - // r0 : key - // r1 : receiver - // r2 : receiver's map + // r0 : receiver's map // r3 : lookup cache index ExternalReference cache_field_offsets = ExternalReference::keyed_lookup_cache_field_offsets(isolate); @@ -699,7 +645,7 @@ __ add(r3, r3, Operand(i)); } __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2)); - __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset)); + __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset)); __ sub(r5, r5, r6, SetCC); __ b(ge, &property_array_property); if (i != 0) { @@ -709,36 +655,34 @@ // Load in-object property. __ bind(&load_in_object_property); - __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset)); + __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset)); __ add(r6, r6, r5); // Index from start of object. - __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag. - __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2)); + __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. + __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2)); __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), - 1, r2, r3); + 1, r4, r3); __ Ret(); // Load property array property. __ bind(&property_array_property); - __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset)); - __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2)); + __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2)); __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), - 1, r2, r3); + 1, r4, r3); __ Ret(); // Do a quick inline probe of the receiver's dictionary, if it // exists. __ bind(&probe_dictionary); - // r1: receiver - // r0: key // r3: elements - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); - GenerateGlobalInstanceTypeCheck(masm, r2, &slow); + __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); + GenerateGlobalInstanceTypeCheck(masm, r0, &slow); // Load the property to r0. - GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4); + GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4); __ IncrementCounter( - isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3); + isolate->counters()->keyed_load_generic_symbol(), 1, r4, r3); __ Ret(); __ bind(&index_name); @@ -749,17 +693,14 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key (index) - // -- r1 : receiver - // ----------------------------------- + // Return address is in lr. Label miss; - Register receiver = r1; - Register index = r0; + Register receiver = ReceiverRegister(); + Register index = NameRegister(); Register scratch = r3; Register result = r0; + DCHECK(!scratch.is(receiver) && !scratch.is(index)); StringCharAtGenerator char_at_generator(receiver, index, @@ -781,39 +722,41 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- + // Return address is in lr. Label slow; + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + Register scratch1 = r3; + Register scratch2 = r4; + DCHECK(!scratch1.is(receiver) && !scratch1.is(key)); + DCHECK(!scratch2.is(receiver) && !scratch2.is(key)); + // Check that the receiver isn't a smi. - __ JumpIfSmi(r1, &slow); + __ JumpIfSmi(receiver, &slow); // Check that the key is an array index, that is Uint32. - __ NonNegativeSmiTst(r0); + __ NonNegativeSmiTst(key); __ b(ne, &slow); // Get the map of the receiver. - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check that it has indexed interceptor and access checks // are not enabled for this object. - __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); - __ and_(r3, r3, Operand(kSlowCaseBitFieldMask)); - __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor)); + __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); + __ and_(scratch2, scratch2, Operand(kSlowCaseBitFieldMask)); + __ cmp(scratch2, Operand(1 << Map::kHasIndexedInterceptor)); __ b(ne, &slow); // Everything is fine, call runtime. - __ Push(r1, r0); // Receiver, key. + __ Push(receiver, key); // Receiver, key. // Perform tail call to the entry. __ TailCallExternalReference( - ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor), + ExternalReference(IC_Utility(kLoadElementWithInterceptor), masm->isolate()), - 2, - 1); + 2, 1); __ bind(&slow); GenerateMiss(masm); @@ -821,15 +764,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // ----------------------------------- - // Push receiver, key and value for runtime call. - __ Push(r2, r1, r0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); @@ -838,15 +774,8 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- r0 : value - // -- r2 : key - // -- r1 : receiver - // -- lr : return address - // ----------------------------------- - // Push receiver, key and value for runtime call. - __ Push(r1, r2, r0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. @@ -857,15 +786,8 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // ----------------------------------- - // Push receiver, key and value for runtime call. - __ Push(r2, r1, r0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. @@ -877,21 +799,13 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode) { - // ---------- S t a t e -------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // ----------------------------------- - // Push receiver, key and value for runtime call. - __ Push(r2, r1, r0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); - __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode. - __ Push(r1, r0); + __ Push(r0); - __ TailCallRuntime(Runtime::kSetProperty, 5, 1); + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); } @@ -1021,10 +935,10 @@ receiver_map, r4, slow); - ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); - ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); + ElementsTransitionGenerator::GenerateSmiToDouble( + masm, receiver, key, value, receiver_map, mode, slow); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); @@ -1035,10 +949,9 @@ receiver_map, r4, slow); - ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, - slow); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition( + masm, receiver, key, value, receiver_map, mode, slow); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -1051,9 +964,9 @@ receiver_map, r4, slow); - ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); + ElementsTransitionGenerator::GenerateDoubleToObject( + masm, receiver, key, value, receiver_map, mode, slow); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); } @@ -1072,9 +985,12 @@ Label array, extra, check_if_double_array; // Register usage. - Register value = r0; - Register key = r1; - Register receiver = r2; + Register value = ValueRegister(); + Register key = NameRegister(); + Register receiver = ReceiverRegister(); + DCHECK(receiver.is(r1)); + DCHECK(key.is(r2)); + DCHECK(value.is(r0)); Register receiver_map = r3; Register elements_map = r6; Register elements = r9; // Elements array of the receiver. @@ -1160,18 +1076,18 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(receiver.is(r1)); + DCHECK(name.is(r2)); + DCHECK(ValueRegister().is(r0)); // Get the receiver from the stack and probe the stub cache. - Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC); + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, r1, r2, r3, r4, r5, r6); + masm, flags, receiver, name, r3, r4, r5, r6); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -1179,14 +1095,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - - __ Push(r1, r2, r0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); // Perform tail call to the entry. ExternalReference ref = @@ -1196,17 +1105,18 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- Label miss; + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + Register value = ValueRegister(); + Register dictionary = r3; + DCHECK(receiver.is(r1)); + DCHECK(name.is(r2)); + DCHECK(value.is(r0)); - GenerateNameDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss); + __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5); + GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5); @@ -1220,21 +1130,13 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - - __ Push(r1, r2, r0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); - __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes __ mov(r0, Operand(Smi::FromInt(strict_mode))); - __ Push(r1, r0); + __ Push(r0); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 5, 1); + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); } @@ -1316,20 +1218,20 @@ CodePatcher patcher(patch_address, 2); Register reg = Assembler::GetRn(instr_at_patch); if (check == ENABLE_INLINED_SMI_CHECK) { - ASSERT(Assembler::IsCmpRegister(instr_at_patch)); - ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), + DCHECK(Assembler::IsCmpRegister(instr_at_patch)); + DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(), Assembler::GetRm(instr_at_patch).code()); patcher.masm()->tst(reg, Operand(kSmiTagMask)); } else { - ASSERT(check == DISABLE_INLINED_SMI_CHECK); - ASSERT(Assembler::IsTstImmediate(instr_at_patch)); + DCHECK(check == DISABLE_INLINED_SMI_CHECK); + DCHECK(Assembler::IsTstImmediate(instr_at_patch)); patcher.masm()->cmp(reg, reg); } - ASSERT(Assembler::IsBranch(branch_instr)); + DCHECK(Assembler::IsBranch(branch_instr)); if (Assembler::GetCondition(branch_instr) == eq) { patcher.EmitCondition(ne); } else { - ASSERT(Assembler::GetCondition(branch_instr) == ne); + DCHECK(Assembler::GetCondition(branch_instr) == ne); patcher.EmitCondition(eq); } } diff -Nru nodejs-0.11.13/deps/v8/src/arm/lithium-arm.cc nodejs-0.11.15/deps/v8/src/arm/lithium-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/lithium-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/lithium-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "lithium-allocator-inl.h" -#include "arm/lithium-arm.h" -#include "arm/lithium-codegen-arm.h" -#include "hydrogen-osr.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/arm/lithium-codegen-arm.h" +#include "src/hydrogen-osr.h" +#include "src/lithium-inl.h" namespace v8 { namespace internal { @@ -48,17 +24,17 @@ // outputs because all registers are blocked by the calling convention. // Inputs operands must use a fixed register or use-at-start policy or // a non-register policy. - ASSERT(Output() == NULL || + DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() || !LUnallocated::cast(Output())->HasRegisterPolicy()); for (UseIterator it(this); !it.Done(); it.Advance()) { LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() || + DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart()); } for (TempIterator it(this); !it.Done(); it.Advance()) { LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); + DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); } } #endif @@ -340,8 +316,9 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); - hydrogen()->access().PrintTo(stream); - stream->Add(" <- "); + OStringStream os; + os << hydrogen()->access() << " <- "; + stream->Add(os.c_str()); value()->PrintTo(stream); } @@ -360,7 +337,7 @@ stream->Add("["); key()->PrintTo(stream); if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", additional_index()); + stream->Add(" + %d]", base_offset()); } else { stream->Add("]"); } @@ -372,13 +349,13 @@ stream->Add("["); key()->PrintTo(stream); if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", additional_index()); + stream->Add(" + %d] <-", base_offset()); } else { stream->Add("] <- "); } if (value() == NULL) { - ASSERT(hydrogen()->IsConstantHoleStore() && + DCHECK(hydrogen()->IsConstantHoleStore() && hydrogen()->value()->representation().IsDouble()); stream->Add("<the hole(nan)>"); } else { @@ -414,14 +391,14 @@ if (kind == DOUBLE_REGISTERS) { return LDoubleStackSlot::Create(index, zone()); } else { - ASSERT(kind == GENERAL_REGISTERS); + DCHECK(kind == GENERAL_REGISTERS); return LStackSlot::Create(index, zone()); } } LPlatformChunk* LChunkBuilder::Build() { - ASSERT(is_unused()); + DCHECK(is_unused()); chunk_ = new(zone()) LPlatformChunk(info(), graph()); LPhase phase("L_Building chunk", chunk_); status_ = BUILDING; @@ -623,6 +600,8 @@ !hinstr->HasObservableSideEffects(); if (needs_environment && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); + // We can't really figure out if the environment is needed or not. + instr->environment()->set_has_been_used(); } return instr; @@ -630,7 +609,7 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - ASSERT(!instr->HasPointerMap()); + DCHECK(!instr->HasPointerMap()); instr->set_pointer_map(new(zone()) LPointerMap(zone())); return instr; } @@ -649,16 +628,29 @@ } +LUnallocated* LChunkBuilder::TempDoubleRegister() { + LUnallocated* operand = + new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER); + int vreg = allocator_->GetVirtualRegister(); + if (!allocator_->AllocationOk()) { + Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); + vreg = 0; + } + operand->set_virtual_register(vreg); + return operand; +} + + LOperand* LChunkBuilder::FixedTemp(Register reg) { LUnallocated* operand = ToUnallocated(reg); - ASSERT(operand->HasFixedPolicy()); + DCHECK(operand->HasFixedPolicy()); return operand; } LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { LUnallocated* operand = ToUnallocated(reg); - ASSERT(operand->HasFixedPolicy()); + DCHECK(operand->HasFixedPolicy()); return operand; } @@ -687,8 +679,8 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->left()); HValue* right_value = instr->right(); @@ -729,9 +721,9 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); if (op == Token::MOD) { LOperand* left = UseFixedDouble(instr->left(), d0); LOperand* right = UseFixedDouble(instr->right(), d1); @@ -750,8 +742,8 @@ HBinaryOperation* instr) { HValue* left = instr->left(); HValue* right = instr->right(); - ASSERT(left->representation().IsTagged()); - ASSERT(right->representation().IsTagged()); + DCHECK(left->representation().IsTagged()); + DCHECK(right->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), cp); LOperand* left_operand = UseFixed(left, r1); LOperand* right_operand = UseFixed(right, r0); @@ -762,7 +754,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - ASSERT(is_building()); + DCHECK(is_building()); current_block_ = block; next_block_ = next_block; if (block->IsStartBlock()) { @@ -771,13 +763,13 @@ } else if (block->predecessors()->length() == 1) { // We have a single predecessor => copy environment and outgoing // argument count from the predecessor. - ASSERT(block->phis()->length() == 0); + DCHECK(block->phis()->length() == 0); HBasicBlock* pred = block->predecessors()->at(0); HEnvironment* last_environment = pred->last_environment(); - ASSERT(last_environment != NULL); + DCHECK(last_environment != NULL); // Only copy the environment, if it is later used again. if (pred->end()->SecondSuccessor() == NULL) { - ASSERT(pred->end()->FirstSuccessor() == block); + DCHECK(pred->end()->FirstSuccessor() == block); } else { if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || pred->end()->SecondSuccessor()->block_id() > block->block_id()) { @@ -785,7 +777,7 @@ } } block->UpdateEnvironment(last_environment); - ASSERT(pred->argument_count() >= 0); + DCHECK(pred->argument_count() >= 0); argument_count_ = pred->argument_count(); } else { // We are at a state join => process phis. @@ -837,7 +829,7 @@ if (current->OperandCount() == 0) { instr = DefineAsRegister(new(zone()) LDummy()); } else { - ASSERT(!current->OperandAt(0)->IsControlInstruction()); + DCHECK(!current->OperandAt(0)->IsControlInstruction()); instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(current->OperandAt(0)))); } @@ -849,75 +841,90 @@ chunk_->AddInstruction(dummy, current_block_); } } else { - instr = current->CompileToLithium(this); + HBasicBlock* successor; + if (current->IsControlInstruction() && + HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && + successor != NULL) { + instr = new(zone()) LGoto(successor); + } else { + instr = current->CompileToLithium(this); + } } argument_count_ += current->argument_delta(); - ASSERT(argument_count_ >= 0); + DCHECK(argument_count_ >= 0); if (instr != NULL) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(current); + AddInstruction(instr, current); + } + + current_instruction_ = old_current; +} + + +void LChunkBuilder::AddInstruction(LInstruction* instr, + HInstruction* hydrogen_val) { + // Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(hydrogen_val); #if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - ASSERT(fixed == 0 || used_at_start == 0); + // Make sure that the lithium instruction has either no fixed register + // constraints in temps or the result OR no uses that are only used at + // start. If this invariant doesn't hold, the register allocator can decide + // to insert a split of a range immediately before the instruction due to an + // already allocated register needing to be used for the instruction's fixed + // register constraint. In this case, The register allocator won't see an + // interference between the split child and the use-at-start (it would if + // the it was just a plain use), so it is free to move the split child into + // the same register that is used for the use-at-start. + // See https://code.google.com/p/chromium/issues/detail?id=201590 + if (!(instr->ClobbersRegisters() && + instr->ClobbersDoubleRegisters(isolate()))) { + int fixed = 0; + int used_at_start = 0; + for (UseIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->IsUsedAtStart()) ++used_at_start; + } + if (instr->Output() != NULL) { + if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; + } + for (TempIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->HasFixedPolicy()) ++fixed; } + DCHECK(fixed == 0 || used_at_start == 0); + } #endif - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); + if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { + instr = AssignPointerMap(instr); + } + if (FLAG_stress_environments && !instr->HasEnvironment()) { + instr = AssignEnvironment(instr); + } + chunk_->AddInstruction(instr, current_block_); - if (instr->IsCall()) { - HValue* hydrogen_value_for_lazy_bailout = current; - LInstruction* instruction_needing_environment = NULL; - if (current->HasObservableSideEffects()) { - HSimulate* sim = HSimulate::cast(current->next()); - instruction_needing_environment = instr; - sim->ReplayEnvironment(current_block_->last_environment()); - hydrogen_value_for_lazy_bailout = sim; - } - LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); - bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); - chunk_->AddInstruction(bailout, current_block_); - if (instruction_needing_environment != NULL) { - // Store the lazy deopt environment with the instruction if needed. - // Right now it is only used for LInstanceOfKnownGlobal. - instruction_needing_environment-> - SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); - } + if (instr->IsCall()) { + HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; + LInstruction* instruction_needing_environment = NULL; + if (hydrogen_val->HasObservableSideEffects()) { + HSimulate* sim = HSimulate::cast(hydrogen_val->next()); + instruction_needing_environment = instr; + sim->ReplayEnvironment(current_block_->last_environment()); + hydrogen_value_for_lazy_bailout = sim; + } + LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); + bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); + chunk_->AddInstruction(bailout, current_block_); + if (instruction_needing_environment != NULL) { + // Store the lazy deopt environment with the instruction if needed. + // Right now it is only used for LInstanceOfKnownGlobal. + instruction_needing_environment-> + SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); } } - current_instruction_ = old_current; } @@ -927,22 +934,21 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - HValue* value = instr->value(); - LBranch* result = new(zone()) LBranch(UseRegister(value)); - // Tagged values that are not known smis or booleans require a - // deoptimization environment. If the instruction is generic no - // environment is needed since all cases are handled. - Representation rep = value->representation(); + Representation r = value->representation(); HType type = value->type(); ToBooleanStub::Types expected = instr->expected_input_types(); - if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() && - !expected.IsGeneric()) { - return AssignEnvironment(result); + if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); + + bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || + type.IsJSArray() || type.IsHeapNumber() || type.IsString(); + LInstruction* branch = new(zone()) LBranch(UseRegister(value)); + if (!easy_case && + ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) || + !expected.IsGeneric())) { + branch = AssignEnvironment(branch); } - return result; + return branch; } @@ -952,10 +958,7 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp = TempRegister(); return new(zone()) LCmpMapAndBranch(value, temp); @@ -1016,9 +1019,13 @@ } -LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { - LOperand* argument = Use(instr->argument()); - return new(zone()) LPushArgument(argument); +LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { + int argc = instr->OperandCount(); + for (int i = 0; i < argc; ++i) { + LOperand* argument = Use(instr->argument(i)); + AddInstruction(new(zone()) LPushArgument(argument), instr); + } + return NULL; } @@ -1075,7 +1082,7 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor( HCallWithDescriptor* instr) { - const CallInterfaceDescriptor* descriptor = instr->descriptor(); + const InterfaceDescriptor* descriptor = instr->descriptor(); LOperand* target = UseRegisterOrConstantAtStart(instr->target()); ZoneList<LOperand*> ops(instr->OperandCount(), zone()); @@ -1102,14 +1109,24 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { switch (instr->op()) { - case kMathFloor: return DoMathFloor(instr); - case kMathRound: return DoMathRound(instr); - case kMathAbs: return DoMathAbs(instr); - case kMathLog: return DoMathLog(instr); - case kMathExp: return DoMathExp(instr); - case kMathSqrt: return DoMathSqrt(instr); - case kMathPowHalf: return DoMathPowHalf(instr); - case kMathClz32: return DoMathClz32(instr); + case kMathFloor: + return DoMathFloor(instr); + case kMathRound: + return DoMathRound(instr); + case kMathFround: + return DoMathFround(instr); + case kMathAbs: + return DoMathAbs(instr); + case kMathLog: + return DoMathLog(instr); + case kMathExp: + return DoMathExp(instr); + case kMathSqrt: + return DoMathSqrt(instr); + case kMathPowHalf: + return DoMathPowHalf(instr); + case kMathClz32: + return DoMathClz32(instr); default: UNREACHABLE(); return NULL; @@ -1126,26 +1143,36 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { LOperand* input = UseRegister(instr->value()); - LOperand* temp = FixedTemp(d3); + LOperand* temp = TempDoubleRegister(); LMathRound* result = new(zone()) LMathRound(input, temp); return AssignEnvironment(DefineAsRegister(result)); } +LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LMathFround* result = new (zone()) LMathFround(input); + return DefineAsRegister(result); +} + + LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { Representation r = instr->value()->representation(); LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32()) ? NULL : UseFixed(instr->context(), cp); LOperand* input = UseRegister(instr->value()); - LMathAbs* result = new(zone()) LMathAbs(context, input); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + LInstruction* result = + DefineAsRegister(new(zone()) LMathAbs(context, input)); + if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); + if (!r.IsDouble()) result = AssignEnvironment(result); + return result; } LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseFixedDouble(instr->value(), d0); return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr); } @@ -1159,12 +1186,12 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseRegister(instr->value()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll. + LOperand* double_temp = TempDoubleRegister(); LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2); return DefineAsRegister(result); } @@ -1236,9 +1263,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); - ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32)); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); @@ -1250,9 +1277,9 @@ LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( @@ -1268,9 +1295,9 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI( @@ -1284,15 +1311,26 @@ } -LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); +LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4); - LDivI* div = new(zone()) LDivI(dividend, divisor, temp); - return AssignEnvironment(DefineAsRegister(div)); + LOperand* temp = + CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister(); + LInstruction* result = + DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp)); + if (instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kBailoutOnMinusZero) || + (instr->CheckFlag(HValue::kCanOverflow) && + (!CpuFeatures::IsSupported(SUDIV) || + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) || + (!instr->IsMathFloorOfDiv() && + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { + result = AssignEnvironment(result); + } + return result; } @@ -1327,9 +1365,9 @@ LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp = @@ -1346,26 +1384,40 @@ } +LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + LOperand* divisor = UseRegister(instr->right()); + LOperand* temp = + CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister(); + LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor, temp); + return AssignEnvironment(DefineAsRegister(div)); +} + + LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { if (instr->RightIsPowerOf2()) { return DoFlooringDivByPowerOf2I(instr); } else if (instr->right()->IsConstant()) { return DoFlooringDivByConstI(instr); } else { - return DoDivI(instr); + return DoFlooringDivI(instr); } } LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegisterAtStart(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( dividend, divisor)); - if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + if (instr->CheckFlag(HValue::kLeftCanBeNegative) && + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { result = AssignEnvironment(result); } return result; @@ -1373,9 +1425,9 @@ LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineAsRegister(new(zone()) LModByConstI( @@ -1388,13 +1440,15 @@ LInstruction* LChunkBuilder::DoModI(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); - LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d10); - LOperand* temp2 = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d11); + LOperand* temp = + CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister(); + LOperand* temp2 = + CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister(); LInstruction* result = DefineAsRegister(new(zone()) LModI( dividend, divisor, temp, temp2)); if (instr->CheckFlag(HValue::kCanBeDivByZero) || @@ -1424,8 +1478,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); HValue* left = instr->BetterLeftOperand(); HValue* right = instr->BetterRightOperand(); LOperand* left_op; @@ -1464,8 +1518,8 @@ return DefineAsRegister(mul); } else if (instr->representation().IsDouble()) { - if (instr->UseCount() == 1 && (instr->uses().value()->IsAdd() || - instr->uses().value()->IsSub())) { + if (instr->HasOneUse() && (instr->uses().value()->IsAdd() || + instr->uses().value()->IsSub())) { HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value()); if (use->IsAdd() && instr == use->left()) { @@ -1494,8 +1548,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); if (instr->left()->IsConstant()) { // If lhs is constant, do reverse subtraction instead. @@ -1511,7 +1565,7 @@ } return result; } else if (instr->representation().IsDouble()) { - if (instr->right()->IsMul()) { + if (instr->right()->IsMul() && instr->right()->HasOneUse()) { return DoMultiplySub(instr->left(), HMul::cast(instr->right())); } @@ -1523,9 +1577,9 @@ LInstruction* LChunkBuilder::DoRSub(HSub* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); // Note: The lhs of the subtraction becomes the rhs of the // reverse-subtraction. @@ -1562,8 +1616,8 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); LAddI* add = new(zone()) LAddI(left, right); @@ -1573,21 +1627,21 @@ } return result; } else if (instr->representation().IsExternal()) { - ASSERT(instr->left()->representation().IsExternal()); - ASSERT(instr->right()->representation().IsInteger32()); - ASSERT(!instr->CheckFlag(HValue::kCanOverflow)); + DCHECK(instr->left()->representation().IsExternal()); + DCHECK(instr->right()->representation().IsInteger32()); + DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); LAddI* add = new(zone()) LAddI(left, right); LInstruction* result = DefineAsRegister(add); return result; } else if (instr->representation().IsDouble()) { - if (instr->left()->IsMul()) { + if (instr->left()->IsMul() && instr->left()->HasOneUse()) { return DoMultiplyAdd(HMul::cast(instr->left()), instr->right()); } - if (instr->right()->IsMul()) { - ASSERT(!instr->left()->IsMul()); + if (instr->right()->IsMul() && instr->right()->HasOneUse()) { + DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse()); return DoMultiplyAdd(HMul::cast(instr->right()), instr->left()); } @@ -1602,14 +1656,14 @@ LOperand* left = NULL; LOperand* right = NULL; if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); left = UseRegisterAtStart(instr->BetterLeftOperand()); right = UseOrConstantAtStart(instr->BetterRightOperand()); } else { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); left = UseRegisterAtStart(instr->left()); right = UseRegisterAtStart(instr->right()); } @@ -1618,11 +1672,11 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) { - ASSERT(instr->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); // We call a C function for double power. It can't trigger a GC. // We need to use fixed result register for the call. Representation exponent_type = instr->right()->representation(); - ASSERT(instr->left()->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); LOperand* left = UseFixedDouble(instr->left(), d0); LOperand* right = exponent_type.IsDouble() ? UseFixedDouble(instr->right(), d1) : @@ -1635,8 +1689,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseFixed(instr->left(), r1); LOperand* right = UseFixed(instr->right(), r0); @@ -1649,15 +1703,15 @@ HCompareNumericAndBranch* instr) { Representation r = instr->representation(); if (r.IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(r)); - ASSERT(instr->right()->representation().Equals(r)); + DCHECK(instr->left()->representation().Equals(r)); + DCHECK(instr->right()->representation().Equals(r)); LOperand* left = UseRegisterOrConstantAtStart(instr->left()); LOperand* right = UseRegisterOrConstantAtStart(instr->right()); return new(zone()) LCompareNumericAndBranch(left, right); } else { - ASSERT(r.IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(r.IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); return new(zone()) LCompareNumericAndBranch(left, right); @@ -1667,8 +1721,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( HCompareObjectEqAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); return new(zone()) LCmpObjectEqAndBranch(left, right); @@ -1684,8 +1736,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch( HCompareMinusZeroAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; LOperand* value = UseRegister(instr->value()); LOperand* scratch = TempRegister(); return new(zone()) LCompareMinusZeroAndBranch(value, scratch); @@ -1693,7 +1743,7 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp = TempRegister(); return new(zone()) LIsObjectAndBranch(value, temp); @@ -1701,7 +1751,7 @@ LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp = TempRegister(); return new(zone()) LIsStringAndBranch(value, temp); @@ -1709,14 +1759,14 @@ LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LIsSmiAndBranch(Use(instr->value())); } LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( HIsUndetectableAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new(zone()) LIsUndetectableAndBranch(value, TempRegister()); } @@ -1724,8 +1774,8 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch( HStringCompareAndBranch* instr) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseFixed(instr->left(), r1); LOperand* right = UseFixed(instr->right(), r0); @@ -1737,7 +1787,7 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( HHasInstanceTypeAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new(zone()) LHasInstanceTypeAndBranch(value); } @@ -1745,7 +1795,7 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex( HGetCachedArrayIndex* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value)); @@ -1754,7 +1804,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( HHasCachedArrayIndexAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LHasCachedArrayIndexAndBranch( UseRegisterAtStart(instr->value())); } @@ -1762,7 +1812,7 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch( HClassOfTestAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegister(instr->value()); return new(zone()) LClassOfTestAndBranch(value, TempRegister()); } @@ -1801,9 +1851,16 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - LOperand* value = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = UseRegister(instr->length()); - return AssignEnvironment(new(zone()) LBoundsCheck(value, length)); + if (!FLAG_debug_code && instr->skip_check()) return NULL; + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + LOperand* length = !index->IsConstantOperand() + ? UseRegisterOrConstantAtStart(instr->length()) + : UseRegisterAtStart(instr->length()); + LInstruction* result = new(zone()) LBoundsCheck(index, length); + if (!FLAG_debug_code || !instr->skip_check()) { + result = AssignEnvironment(result); + } + return result; } @@ -1837,87 +1894,81 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation from = instr->from(); Representation to = instr->to(); + HValue* val = instr->value(); if (from.IsSmi()) { if (to.IsTagged()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return DefineSameAsFirst(new(zone()) LDummyUse(value)); } from = Representation::Tagged(); } if (from.IsTagged()) { if (to.IsDouble()) { - LOperand* value = UseRegister(instr->value()); - LNumberUntagD* res = new(zone()) LNumberUntagD(value); - return AssignEnvironment(DefineAsRegister(res)); + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; } else if (to.IsSmi()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); if (val->type().IsSmi()) { return DefineSameAsFirst(new(zone()) LDummyUse(value)); } return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); } else { - ASSERT(to.IsInteger32()); - LOperand* value = NULL; - LInstruction* res = NULL; - HValue* val = instr->value(); + DCHECK(to.IsInteger32()); if (val->type().IsSmi() || val->representation().IsSmi()) { - value = UseRegisterAtStart(val); - res = DefineAsRegister(new(zone()) LSmiUntag(value, false)); + LOperand* value = UseRegisterAtStart(val); + return DefineAsRegister(new(zone()) LSmiUntag(value, false)); } else { - value = UseRegister(val); + LOperand* value = UseRegister(val); LOperand* temp1 = TempRegister(); - LOperand* temp2 = FixedTemp(d11); - res = DefineSameAsFirst(new(zone()) LTaggedToI(value, - temp1, - temp2)); - res = AssignEnvironment(res); + LOperand* temp2 = TempDoubleRegister(); + LInstruction* result = + DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; } - return res; } } else if (from.IsDouble()) { if (to.IsTagged()) { info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - - // Make sure that the temp and result_temp registers are - // different. LUnallocated* result_temp = TempRegister(); LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); - Define(result, result_temp); - return AssignPointerMap(result); + return AssignPointerMap(Define(result, result_temp)); } else if (to.IsSmi()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return AssignEnvironment( DefineAsRegister(new(zone()) LDoubleToSmi(value))); } else { - ASSERT(to.IsInteger32()); - LOperand* value = UseRegister(instr->value()); - LDoubleToI* res = new(zone()) LDoubleToI(value); - return AssignEnvironment(DefineAsRegister(res)); + DCHECK(to.IsInteger32()); + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); + if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); + return result; } } else if (from.IsInteger32()) { info()->MarkAsDeferredCalling(); if (to.IsTagged()) { - HValue* val = instr->value(); - LOperand* value = UseRegisterAtStart(val); if (!instr->CheckFlag(HValue::kCanOverflow)) { + LOperand* value = UseRegisterAtStart(val); return DefineAsRegister(new(zone()) LSmiTag(value)); } else if (val->CheckFlag(HInstruction::kUint32)) { + LOperand* value = UseRegisterAtStart(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + return AssignPointerMap(DefineAsRegister(result)); } else { + LOperand* value = UseRegisterAtStart(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + return AssignPointerMap(DefineAsRegister(result)); } } else if (to.IsSmi()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); if (instr->CheckFlag(HValue::kCanOverflow)) { @@ -1925,13 +1976,11 @@ } return result; } else { - ASSERT(to.IsDouble()); - if (instr->value()->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister( - new(zone()) LUint32ToDouble(UseRegister(instr->value()))); + DCHECK(to.IsDouble()); + if (val->CheckFlag(HInstruction::kUint32)) { + return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); } else { - return DefineAsRegister( - new(zone()) LInteger32ToDouble(Use(instr->value()))); + return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); } } } @@ -1942,7 +1991,11 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckNonSmi(value)); + LInstruction* result = new(zone()) LCheckNonSmi(value); + if (!instr->value()->type().IsHeapObject()) { + result = AssignEnvironment(result); + } + return result; } @@ -1966,15 +2019,12 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - LOperand* value = NULL; - if (!instr->CanOmitMapChecks()) { - value = UseRegisterAtStart(instr->value()); - if (instr->has_migration_target()) info()->MarkAsDeferredCalling(); - } - LCheckMaps* result = new(zone()) LCheckMaps(value); - if (!instr->CanOmitMapChecks()) { - AssignEnvironment(result); - if (instr->has_migration_target()) return AssignPointerMap(result); + if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; + LOperand* value = UseRegisterAtStart(instr->value()); + LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); + if (instr->HasMigrationTarget()) { + info()->MarkAsDeferredCalling(); + result = AssignPointerMap(result); } return result; } @@ -1989,10 +2039,11 @@ } else if (input_rep.IsInteger32()) { return DefineAsRegister(new(zone()) LClampIToUint8(reg)); } else { - ASSERT(input_rep.IsSmiOrTagged()); + DCHECK(input_rep.IsSmiOrTagged()); // Register allocator doesn't (yet) support allocation of double // temps. Reserve d1 explicitly. - LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11)); + LClampTToUint8* result = + new(zone()) LClampTToUint8(reg, TempDoubleRegister()); return AssignEnvironment(DefineAsRegister(result)); } } @@ -2000,7 +2051,7 @@ LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) { HValue* value = instr->value(); - ASSERT(value->representation().IsDouble()); + DCHECK(value->representation().IsDouble()); return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value))); } @@ -2051,9 +2102,14 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* global_object = UseFixed(instr->global_object(), r0); + LOperand* global_object = UseFixed(instr->global_object(), + LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } LLoadGlobalGeneric* result = - new(zone()) LLoadGlobalGeneric(context, global_object); + new(zone()) LLoadGlobalGeneric(context, global_object, vector); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -2072,7 +2128,10 @@ LOperand* context = UseRegisterAtStart(instr->value()); LInstruction* result = DefineAsRegister(new(zone()) LLoadContextSlot(context)); - return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; } @@ -2087,7 +2146,10 @@ value = UseRegister(instr->value()); } LInstruction* result = new(zone()) LStoreContextSlot(context, value); - return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; } @@ -2099,9 +2161,14 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->object(), r0); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LInstruction* result = - DefineFixed(new(zone()) LLoadNamedGeneric(context, object), r0); + DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), r0); return MarkAsCall(result, instr); } @@ -2119,54 +2186,63 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - ASSERT(instr->key()->representation().IsSmiOrInteger32()); + DCHECK(instr->key()->representation().IsSmiOrInteger32()); ElementsKind elements_kind = instr->elements_kind(); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyed* result = NULL; + LInstruction* result = NULL; if (!instr->is_typed_elements()) { LOperand* obj = NULL; if (instr->representation().IsDouble()) { obj = UseRegister(instr->elements()); } else { - ASSERT(instr->representation().IsSmiOrTagged()); + DCHECK(instr->representation().IsSmiOrTagged()); obj = UseRegisterAtStart(instr->elements()); } - result = new(zone()) LLoadKeyed(obj, key); + result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key)); } else { - ASSERT( + DCHECK( (instr->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(instr->elements_kind())) || + !IsDoubleOrFloatElementsKind(elements_kind)) || (instr->representation().IsDouble() && - IsDoubleOrFloatElementsKind(instr->elements_kind()))); + IsDoubleOrFloatElementsKind(elements_kind))); LOperand* backing_store = UseRegister(instr->elements()); - result = new(zone()) LLoadKeyed(backing_store, key); + result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key)); } - DefineAsRegister(result); - // An unsigned int array load might overflow and cause a deopt, make sure it - // has an environment. - bool can_deoptimize = instr->RequiresHoleCheck() || - elements_kind == EXTERNAL_UINT32_ELEMENTS || - elements_kind == UINT32_ELEMENTS; - return can_deoptimize ? AssignEnvironment(result) : result; + if ((instr->is_external() || instr->is_fixed_typed_array()) ? + // see LCodeGen::DoLoadKeyedExternalArray + ((elements_kind == EXTERNAL_UINT32_ELEMENTS || + elements_kind == UINT32_ELEMENTS) && + !instr->CheckFlag(HInstruction::kUint32)) : + // see LCodeGen::DoLoadKeyedFixedDoubleArray and + // LCodeGen::DoLoadKeyedFixedArray + instr->RequiresHoleCheck()) { + result = AssignEnvironment(result); + } + return result; } LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->object(), r1); - LOperand* key = UseFixed(instr->key(), r0); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } LInstruction* result = - DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), r0); + DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector), + r0); return MarkAsCall(result, instr); } LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { if (!instr->is_typed_elements()) { - ASSERT(instr->elements()->representation().IsTagged()); + DCHECK(instr->elements()->representation().IsTagged()); bool needs_write_barrier = instr->NeedsWriteBarrier(); LOperand* object = NULL; LOperand* key = NULL; @@ -2177,7 +2253,7 @@ val = UseRegister(instr->value()); key = UseRegisterOrConstantAtStart(instr->key()); } else { - ASSERT(instr->value()->representation().IsSmiOrTagged()); + DCHECK(instr->value()->representation().IsSmiOrTagged()); if (needs_write_barrier) { object = UseTempRegister(instr->elements()); val = UseTempRegister(instr->value()); @@ -2192,12 +2268,12 @@ return new(zone()) LStoreKeyed(object, key, val); } - ASSERT( + DCHECK( (instr->value()->representation().IsInteger32() && !IsDoubleOrFloatElementsKind(instr->elements_kind())) || (instr->value()->representation().IsDouble() && IsDoubleOrFloatElementsKind(instr->elements_kind()))); - ASSERT((instr->is_fixed_typed_array() && + DCHECK((instr->is_fixed_typed_array() && instr->elements()->representation().IsTagged()) || (instr->is_external() && instr->elements()->representation().IsExternal())); @@ -2210,13 +2286,13 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* obj = UseFixed(instr->object(), r2); - LOperand* key = UseFixed(instr->key(), r1); - LOperand* val = UseFixed(instr->value(), r0); - - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsTagged()); - ASSERT(instr->value()->representation().IsTagged()); + LOperand* obj = UseFixed(instr->object(), KeyedStoreIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister()); + LOperand* val = UseFixed(instr->value(), KeyedStoreIC::ValueRegister()); + + DCHECK(instr->object()->representation().IsTagged()); + DCHECK(instr->key()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return MarkAsCall( new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr); @@ -2225,17 +2301,18 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - LOperand* object = UseRegister(instr->object()); if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { + LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, NULL, new_map_reg); return result; } else { + LOperand* object = UseFixed(instr->object(), r0); LOperand* context = UseFixed(instr->context(), cp); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, context, NULL); - return AssignPointerMap(result); + return MarkAsCall(result, instr); } } @@ -2279,20 +2356,14 @@ // We need a temporary register for write barrier of the map field. LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; - LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp); - if (instr->field_representation().IsHeapObject()) { - if (!instr->value()->type().IsHeapObject()) { - return AssignEnvironment(result); - } - } - return result; + return new(zone()) LStoreNamedField(obj, val, temp); } LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* obj = UseFixed(instr->object(), r1); - LOperand* val = UseFixed(instr->value(), r0); + LOperand* obj = UseFixed(instr->object(), StoreIC::ReceiverRegister()); + LOperand* val = UseFixed(instr->value(), StoreIC::ValueRegister()); LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val); return MarkAsCall(result, instr); @@ -2315,7 +2386,7 @@ LOperand* context = UseAny(instr->context()); LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(context, string, index); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + return AssignPointerMap(DefineAsRegister(result)); } @@ -2331,9 +2402,7 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { info()->MarkAsDeferredCalling(); LOperand* context = UseAny(instr->context()); - LOperand* size = instr->size()->IsConstant() - ? UseConstant(instr->size()) - : UseTempRegister(instr->size()); + LOperand* size = UseRegisterOrConstant(instr->size()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2); @@ -2356,7 +2425,7 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - ASSERT(argument_count_ == 0); + DCHECK(argument_count_ == 0); allocator_->MarkAsOsrEntry(); current_block_->last_environment()->set_ast_id(instr->ast_id()); return AssignEnvironment(new(zone()) LOsrEntry); @@ -2369,11 +2438,11 @@ int spill_index = chunk()->GetParameterStackSlot(instr->index()); return DefineAsSpilled(result, spill_index); } else { - ASSERT(info()->IsStub()); + DCHECK(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = - info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + info()->code_stub()->GetInterfaceDescriptor(); int index = static_cast<int>(instr->index()); - Register reg = descriptor->GetParameterRegister(index); + Register reg = descriptor->GetEnvironmentParameterRegister(index); return DefineFixed(result, reg); } } @@ -2444,9 +2513,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - return new(zone()) LTypeofIsAndBranch(UseRegister(instr->value())); } @@ -2468,7 +2534,7 @@ LOperand* context = UseFixed(instr->context(), cp); return MarkAsCall(new(zone()) LStackCheck(context), instr); } else { - ASSERT(instr->is_backwards_branch()); + DCHECK(instr->is_backwards_branch()); LOperand* context = UseAny(instr->context()); return AssignEnvironment( AssignPointerMap(new(zone()) LStackCheck(context))); @@ -2478,6 +2544,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { HEnvironment* outer = current_block_->last_environment(); + outer->set_ast_id(instr->ReturnId()); HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner = outer->CopyForInlining(instr->closure(), instr->arguments_count(), @@ -2503,7 +2570,7 @@ if (env->entry()->arguments_pushed()) { int argument_count = env->arguments_environment()->parameter_count(); pop = new(zone()) LDrop(argument_count); - ASSERT(instr->argument_delta() == -argument_count); + DCHECK(instr->argument_delta() == -argument_count); } HEnvironment* outer = current_block_->last_environment()-> @@ -2537,8 +2604,26 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { LOperand* object = UseRegister(instr->object()); - LOperand* index = UseRegister(instr->index()); - return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index)); + LOperand* index = UseTempRegister(instr->index()); + LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); + LInstruction* result = DefineSameAsFirst(load); + return AssignPointerMap(result); +} + + +LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) { + LOperand* context = UseRegisterAtStart(instr->context()); + return new(zone()) LStoreFrameContext(context); +} + + +LInstruction* LChunkBuilder::DoAllocateBlockContext( + HAllocateBlockContext* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* function = UseRegisterAtStart(instr->function()); + LAllocateBlockContext* result = + new(zone()) LAllocateBlockContext(context, function); + return MarkAsCall(DefineFixed(result, cp), instr); } } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/arm/lithium-arm.h nodejs-0.11.15/deps/v8/src/arm/lithium-arm.h --- nodejs-0.11.13/deps/v8/src/arm/lithium-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/lithium-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_LITHIUM_ARM_H_ #define V8_ARM_LITHIUM_ARM_H_ -#include "hydrogen.h" -#include "lithium-allocator.h" -#include "lithium.h" -#include "safepoint-table.h" -#include "utils.h" +#include "src/hydrogen.h" +#include "src/lithium.h" +#include "src/lithium-allocator.h" +#include "src/safepoint-table.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -40,147 +17,151 @@ // Forward declarations. class LCodeGen; -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallJSFunction) \ - V(CallWithDescriptor) \ - V(CallFunction) \ - V(CallNew) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CallStub) \ - V(CheckInstanceType) \ - V(CheckNonSmi) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareMinusZeroAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(ConstructDouble) \ - V(Context) \ - V(DateField) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleBits) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(FunctionLiteral) \ - V(GetCachedArrayIndex) \ - V(Goto) \ - V(HasCachedArrayIndexAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstanceOf) \ - V(InstanceOfKnownGlobal) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsConstructCallAndBranch) \ - V(IsObjectAndBranch) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadRoot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadGlobalCell) \ - V(LoadGlobalGeneric) \ - V(LoadKeyed) \ - V(LoadKeyedGeneric) \ - V(LoadNamedField) \ - V(LoadNamedGeneric) \ - V(MapEnumLength) \ - V(MathAbs) \ - V(MathClz32) \ - V(MathExp) \ - V(MathFloor) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRound) \ - V(MathSqrt) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(MultiplyAddD) \ - V(MultiplySubD) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(PushArgument) \ - V(RegExpLiteral) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreGlobalCell) \ - V(StoreKeyed) \ - V(StoreKeyedGeneric) \ - V(StoreNamedField) \ - V(StoreNamedGeneric) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(RSubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(ToFastProperties) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ +#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ + V(AccessArgumentsAt) \ + V(AddI) \ + V(Allocate) \ + V(AllocateBlockContext) \ + V(ApplyArguments) \ + V(ArgumentsElements) \ + V(ArgumentsLength) \ + V(ArithmeticD) \ + V(ArithmeticT) \ + V(BitI) \ + V(BoundsCheck) \ + V(Branch) \ + V(CallJSFunction) \ + V(CallWithDescriptor) \ + V(CallFunction) \ + V(CallNew) \ + V(CallNewArray) \ + V(CallRuntime) \ + V(CallStub) \ + V(CheckInstanceType) \ + V(CheckNonSmi) \ + V(CheckMaps) \ + V(CheckMapValue) \ + V(CheckSmi) \ + V(CheckValue) \ + V(ClampDToUint8) \ + V(ClampIToUint8) \ + V(ClampTToUint8) \ + V(ClassOfTestAndBranch) \ + V(CompareMinusZeroAndBranch) \ + V(CompareNumericAndBranch) \ + V(CmpObjectEqAndBranch) \ + V(CmpHoleAndBranch) \ + V(CmpMapAndBranch) \ + V(CmpT) \ + V(ConstantD) \ + V(ConstantE) \ + V(ConstantI) \ + V(ConstantS) \ + V(ConstantT) \ + V(ConstructDouble) \ + V(Context) \ + V(DateField) \ + V(DebugBreak) \ + V(DeclareGlobals) \ + V(Deoptimize) \ + V(DivByConstI) \ + V(DivByPowerOf2I) \ + V(DivI) \ + V(DoubleBits) \ + V(DoubleToI) \ + V(DoubleToSmi) \ + V(Drop) \ + V(Dummy) \ + V(DummyUse) \ + V(FlooringDivByConstI) \ + V(FlooringDivByPowerOf2I) \ + V(FlooringDivI) \ + V(ForInCacheArray) \ + V(ForInPrepareMap) \ + V(FunctionLiteral) \ + V(GetCachedArrayIndex) \ + V(Goto) \ + V(HasCachedArrayIndexAndBranch) \ + V(HasInstanceTypeAndBranch) \ + V(InnerAllocatedObject) \ + V(InstanceOf) \ + V(InstanceOfKnownGlobal) \ + V(InstructionGap) \ + V(Integer32ToDouble) \ + V(InvokeFunction) \ + V(IsConstructCallAndBranch) \ + V(IsObjectAndBranch) \ + V(IsStringAndBranch) \ + V(IsSmiAndBranch) \ + V(IsUndetectableAndBranch) \ + V(Label) \ + V(LazyBailout) \ + V(LoadContextSlot) \ + V(LoadRoot) \ + V(LoadFieldByIndex) \ + V(LoadFunctionPrototype) \ + V(LoadGlobalCell) \ + V(LoadGlobalGeneric) \ + V(LoadKeyed) \ + V(LoadKeyedGeneric) \ + V(LoadNamedField) \ + V(LoadNamedGeneric) \ + V(MapEnumLength) \ + V(MathAbs) \ + V(MathClz32) \ + V(MathExp) \ + V(MathFloor) \ + V(MathFround) \ + V(MathLog) \ + V(MathMinMax) \ + V(MathPowHalf) \ + V(MathRound) \ + V(MathSqrt) \ + V(ModByConstI) \ + V(ModByPowerOf2I) \ + V(ModI) \ + V(MulI) \ + V(MultiplyAddD) \ + V(MultiplySubD) \ + V(NumberTagD) \ + V(NumberTagI) \ + V(NumberTagU) \ + V(NumberUntagD) \ + V(OsrEntry) \ + V(Parameter) \ + V(Power) \ + V(PushArgument) \ + V(RegExpLiteral) \ + V(Return) \ + V(SeqStringGetChar) \ + V(SeqStringSetChar) \ + V(ShiftI) \ + V(SmiTag) \ + V(SmiUntag) \ + V(StackCheck) \ + V(StoreCodeEntry) \ + V(StoreContextSlot) \ + V(StoreFrameContext) \ + V(StoreGlobalCell) \ + V(StoreKeyed) \ + V(StoreKeyedGeneric) \ + V(StoreNamedField) \ + V(StoreNamedGeneric) \ + V(StringAdd) \ + V(StringCharCodeAt) \ + V(StringCharFromCode) \ + V(StringCompareAndBranch) \ + V(SubI) \ + V(RSubI) \ + V(TaggedToI) \ + V(ThisFunction) \ + V(ToFastProperties) \ + V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ + V(Typeof) \ + V(TypeofIsAndBranch) \ + V(Uint32ToDouble) \ + V(UnknownOSRValue) \ V(WrapReceiver) @@ -193,7 +174,7 @@ return mnemonic; \ } \ static L##type* cast(LInstruction* instr) { \ - ASSERT(instr->Is##type()); \ + DCHECK(instr->Is##type()); \ return reinterpret_cast<L##type*>(instr); \ } @@ -242,6 +223,9 @@ virtual bool IsControl() const { return false; } + // Try deleting this instruction if possible. + virtual bool TryDelete() { return false; } + void set_environment(LEnvironment* env) { environment_ = env; } LEnvironment* environment() const { return environment_; } bool HasEnvironment() const { return environment_ != NULL; } @@ -261,7 +245,9 @@ // Interface to the register allocator and iterators. bool ClobbersTemps() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters() const { return IsCall(); } + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { + return IsCall(); + } // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return IsCall(); } @@ -278,11 +264,12 @@ void VerifyCall(); #endif + virtual int InputCount() = 0; + virtual LOperand* InputAt(int i) = 0; + private: // Iterator support. friend class InputIterator; - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; friend class TempIterator; virtual int TempCount() = 0; @@ -347,7 +334,7 @@ virtual bool IsGap() const V8_OVERRIDE { return true; } virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; static LGap* cast(LInstruction* instr) { - ASSERT(instr->IsGap()); + DCHECK(instr->IsGap()); return reinterpret_cast<LGap*>(instr); } @@ -427,7 +414,7 @@ class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> { public: - explicit LDummy() { } + LDummy() {} DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") }; @@ -443,6 +430,7 @@ class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> { public: + virtual bool IsControl() const V8_OVERRIDE { return true; } DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") DECLARE_HYDROGEN_ACCESSOR(Deoptimize) }; @@ -713,14 +701,14 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LDivI(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; + LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; temps_[0] = temp; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") @@ -767,6 +755,23 @@ }; +class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; + temps_[0] = temp; + } + + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) +}; + + class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: LMulI(LOperand* left, LOperand* right) { @@ -875,6 +880,16 @@ }; +class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathFround(LOperand* value) { inputs_[0] = value; } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") +}; + + class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: LMathAbs(LOperand* context, LOperand* value) { @@ -1563,7 +1578,7 @@ return parameter_count()->IsConstantOperand(); } LConstantOperand* constant_parameter_count() { - ASSERT(has_constant_parameter_count()); + DCHECK(has_constant_parameter_count()); return LConstantOperand::cast(parameter_count()); } LOperand* parameter_count() { return inputs_[2]; } @@ -1585,15 +1600,17 @@ }; -class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LLoadNamedGeneric(LOperand* context, LOperand* object) { + LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) { inputs_[0] = context; inputs_[1] = object; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) @@ -1650,23 +1667,27 @@ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - uint32_t additional_index() const { return hydrogen()->index_offset(); } + uint32_t base_offset() const { return hydrogen()->base_offset(); } }; -class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> { +class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> { public: - LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) { + LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key, + LOperand* vector) { inputs_[0] = context; inputs_[1] = object; inputs_[2] = key; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } LOperand* key() { return inputs_[2]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric) }; @@ -1677,15 +1698,18 @@ }; -class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LLoadGlobalGeneric(LOperand* context, LOperand* global_object) { + LLoadGlobalGeneric(LOperand* context, LOperand* global_object, + LOperand* vector) { inputs_[0] = context; inputs_[1] = global_object; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* global_object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric) @@ -1771,15 +1795,15 @@ }; -class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> { +class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> { public: LStoreCodeEntry(LOperand* function, LOperand* code_object) { inputs_[0] = function; - temps_[0] = code_object; + inputs_[1] = code_object; } LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return temps_[0]; } + LOperand* code_object() { return inputs_[1]; } virtual void PrintDataTo(StringStream* stream); @@ -1850,18 +1874,18 @@ class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> { public: - LCallWithDescriptor(const CallInterfaceDescriptor* descriptor, - ZoneList<LOperand*>& operands, + LCallWithDescriptor(const InterfaceDescriptor* descriptor, + const ZoneList<LOperand*>& operands, Zone* zone) : descriptor_(descriptor), - inputs_(descriptor->environment_length() + 1, zone) { - ASSERT(descriptor->environment_length() + 1 == operands.length()); + inputs_(descriptor->GetRegisterParameterCount() + 1, zone) { + DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length()); inputs_.AddAll(operands, zone); } LOperand* target() const { return inputs_[0]; } - const CallInterfaceDescriptor* descriptor() { return descriptor_; } + const InterfaceDescriptor* descriptor() { return descriptor_; } private: DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") @@ -1871,7 +1895,7 @@ int arity() const { return hydrogen()->argument_count() - 1; } - const CallInterfaceDescriptor* descriptor_; + const InterfaceDescriptor* descriptor_; ZoneList<LOperand*> inputs_; // Iterator support. @@ -1968,7 +1992,7 @@ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { return save_doubles() == kDontSaveFPRegs; } @@ -2164,7 +2188,6 @@ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - Handle<Map> transition() const { return hydrogen()->transition_map(); } Representation representation() const { return hydrogen()->field_representation(); } @@ -2226,7 +2249,7 @@ } return hydrogen()->NeedsCanonicalization(); } - uint32_t additional_index() const { return hydrogen()->index_offset(); } + uint32_t base_offset() const { return hydrogen()->base_offset(); } }; @@ -2379,7 +2402,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> { public: - explicit LCheckMaps(LOperand* value) { + explicit LCheckMaps(LOperand* value = NULL) { inputs_[0] = value; } @@ -2672,6 +2695,35 @@ }; +class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> { + public: + explicit LStoreFrameContext(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context") +}; + + +class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> { + public: + LAllocateBlockContext(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); } + + DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context") + DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext) +}; + + class LChunkBuilder; class LPlatformChunk V8_FINAL : public LChunk { public: @@ -2696,11 +2748,11 @@ next_block_(NULL), allocator_(allocator) { } + Isolate* isolate() const { return graph_->isolate(); } + // Build the sequence for the graph. LPlatformChunk* Build(); - LInstruction* CheckElideControlInstruction(HControlInstruction* instr); - // Declare methods that deal with the individual node types. #define DECLARE_DO(type) LInstruction* Do##type(H##type* node); HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -2714,6 +2766,7 @@ LInstruction* DoMathFloor(HUnaryMathOperation* instr); LInstruction* DoMathRound(HUnaryMathOperation* instr); + LInstruction* DoMathFround(HUnaryMathOperation* instr); LInstruction* DoMathAbs(HUnaryMathOperation* instr); LInstruction* DoMathLog(HUnaryMathOperation* instr); LInstruction* DoMathExp(HUnaryMathOperation* instr); @@ -2722,12 +2775,13 @@ LInstruction* DoMathClz32(HUnaryMathOperation* instr); LInstruction* DoDivByPowerOf2I(HDiv* instr); LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HBinaryOperation* instr); + LInstruction* DoDivI(HDiv* instr); LInstruction* DoModByPowerOf2I(HMod* instr); LInstruction* DoModByConstI(HMod* instr); LInstruction* DoModI(HMod* instr); LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); + LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); private: enum Status { @@ -2793,6 +2847,7 @@ // Temporary operand that must be in a register. MUST_USE_RESULT LUnallocated* TempRegister(); + MUST_USE_RESULT LUnallocated* TempDoubleRegister(); MUST_USE_RESULT LOperand* FixedTemp(Register reg); MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); @@ -2822,6 +2877,7 @@ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); void VisitInstruction(HInstruction* current); + void AddInstruction(LInstruction* instr, HInstruction* current); void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); diff -Nru nodejs-0.11.13/deps/v8/src/arm/lithium-codegen-arm.cc nodejs-0.11.15/deps/v8/src/arm/lithium-codegen-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/lithium-codegen-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/lithium-codegen-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "arm/lithium-codegen-arm.h" -#include "arm/lithium-gap-resolver-arm.h" -#include "code-stubs.h" -#include "stub-cache.h" -#include "hydrogen-osr.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/arm/lithium-codegen-arm.h" +#include "src/arm/lithium-gap-resolver-arm.h" +#include "src/code-stubs.h" +#include "src/hydrogen-osr.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -64,7 +41,7 @@ bool LCodeGen::GenerateCode() { LPhase phase("Z_Code generation", chunk()); - ASSERT(is_unused()); + DCHECK(is_unused()); status_ = GENERATING; // Open a frame scope to indicate that there is a frame on the stack. The @@ -81,24 +58,17 @@ void LCodeGen::FinishCode(Handle<Code> code) { - ASSERT(is_done()); + DCHECK(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); PopulateDeoptimizationData(code); - info()->CommitDependencies(code); -} - - -void LCodeGen::Abort(BailoutReason reason) { - info()->set_bailout_reason(reason); - status_ = ABORTED; } void LCodeGen::SaveCallerDoubles() { - ASSERT(info()->saves_caller_doubles()); - ASSERT(NeedsEagerFrame()); + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); Comment(";;; Save clobbered callee double registers"); int count = 0; BitVector* doubles = chunk()->allocated_double_registers(); @@ -113,8 +83,8 @@ void LCodeGen::RestoreCallerDoubles() { - ASSERT(info()->saves_caller_doubles()); - ASSERT(NeedsEagerFrame()); + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); Comment(";;; Restore clobbered callee double registers"); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); @@ -129,7 +99,7 @@ bool LCodeGen::GeneratePrologue() { - ASSERT(is_generating()); + DCHECK(is_generating()); if (info()->IsOptimizing()) { ProfileEntryHookStub::MaybeCallEntryHook(masm_); @@ -160,7 +130,7 @@ __ b(ne, &ok); __ ldr(r2, GlobalObjectOperand()); - __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); + __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset)); __ str(r2, MemOperand(sp, receiver_offset)); @@ -170,7 +140,11 @@ info()->set_prologue_offset(masm_->pc_offset()); if (NeedsEagerFrame()) { - __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); + if (info()->IsStub()) { + __ StubPrologue(); + } else { + __ Prologue(info()->IsCodePreAgingActive()); + } frame_is_built_ = true; info_->AddNoFrameRange(0, masm_->pc_offset()); } @@ -205,13 +179,16 @@ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); + bool need_write_barrier = true; // Argument to NewContext is the function, which is in r1. if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; } else { __ push(r1); - __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); } RecordSafepoint(Safepoint::kNoLazyDeopt); // Context is returned in both r0 and cp. It replaces the context @@ -231,13 +208,20 @@ MemOperand target = ContextOperand(cp, var->index()); __ str(r0, target); // Update the write barrier. This clobbers r3 and r0. - __ RecordWriteContextSlot( - cp, - target.offset(), - r0, - r3, - GetLinkRegisterState(), - kSaveFPRegs); + if (need_write_barrier) { + __ RecordWriteContextSlot( + cp, + target.offset(), + r0, + r3, + GetLinkRegisterState(), + kSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(cp, r0, &done); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } } } Comment(";;; End allocate local context"); @@ -263,7 +247,7 @@ // Adjust the frame size, subsuming the unoptimized frame into the // optimized frame. int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); - ASSERT(slots >= 0); + DCHECK(slots >= 0); __ sub(sp, sp, Operand(slots * kPointerSize)); } @@ -279,7 +263,7 @@ bool LCodeGen::GenerateDeferredCode() { - ASSERT(is_generating()); + DCHECK(is_generating()); if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; @@ -297,8 +281,8 @@ __ bind(code->entry()); if (NeedsDeferredFrame()) { Comment(";;; Build frame"); - ASSERT(!frame_is_built_); - ASSERT(info()->IsStub()); + DCHECK(!frame_is_built_); + DCHECK(info()->IsStub()); frame_is_built_ = true; __ PushFixedFrame(); __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); @@ -309,7 +293,7 @@ code->Generate(); if (NeedsDeferredFrame()) { Comment(";;; Destroy frame"); - ASSERT(frame_is_built_); + DCHECK(frame_is_built_); __ pop(ip); __ PopFixedFrame(); frame_is_built_ = false; @@ -340,48 +324,79 @@ } if (deopt_jump_table_.length() > 0) { + Label needs_frame, call_deopt_entry; + Comment(";;; -------------------- Jump table --------------------"); - } - Label table_start; - __ bind(&table_start); - Label needs_frame; - for (int i = 0; i < deopt_jump_table_.length(); i++) { - __ bind(&deopt_jump_table_[i].label); - Address entry = deopt_jump_table_[i].address; - Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; - int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); - if (id == Deoptimizer::kNotDeoptimizationEntry) { - Comment(";;; jump table entry %d.", i); - } else { + Address base = deopt_jump_table_[0].address; + + Register entry_offset = scratch0(); + + int length = deopt_jump_table_.length(); + for (int i = 0; i < length; i++) { + __ bind(&deopt_jump_table_[i].label); + + Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; + DCHECK(type == deopt_jump_table_[0].bailout_type); + Address entry = deopt_jump_table_[i].address; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + DCHECK(id != Deoptimizer::kNotDeoptimizationEntry); Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); - } - if (deopt_jump_table_[i].needs_frame) { - ASSERT(!info()->saves_caller_doubles()); - __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); - if (needs_frame.is_bound()) { - __ b(&needs_frame); + + // Second-level deopt table entries are contiguous and small, so instead + // of loading the full, absolute address of each one, load an immediate + // offset which will be added to the base address later. + __ mov(entry_offset, Operand(entry - base)); + + if (deopt_jump_table_[i].needs_frame) { + DCHECK(!info()->saves_caller_doubles()); + if (needs_frame.is_bound()) { + __ b(&needs_frame); + } else { + __ bind(&needs_frame); + Comment(";;; call deopt with frame"); + __ PushFixedFrame(); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + DCHECK(info()->IsStub()); + __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB))); + __ push(ip); + __ add(fp, sp, + Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); + __ bind(&call_deopt_entry); + // Add the base address to the offset previously loaded in + // entry_offset. + __ add(entry_offset, entry_offset, + Operand(ExternalReference::ForDeoptEntry(base))); + __ blx(entry_offset); + } + + masm()->CheckConstPool(false, false); } else { - __ bind(&needs_frame); - __ PushFixedFrame(); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - ASSERT(info()->IsStub()); - __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); - __ push(scratch0()); - __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - __ mov(lr, Operand(pc), LeaveCC, al); - __ mov(pc, ip); + // The last entry can fall through into `call_deopt_entry`, avoiding a + // branch. + bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound(); + + if (need_branch) __ b(&call_deopt_entry); + + masm()->CheckConstPool(false, !need_branch); } - } else { + } + + if (!call_deopt_entry.is_bound()) { + Comment(";;; call deopt"); + __ bind(&call_deopt_entry); + if (info()->saves_caller_doubles()) { - ASSERT(info()->IsStub()); + DCHECK(info()->IsStub()); RestoreCallerDoubles(); } - __ mov(lr, Operand(pc), LeaveCC, al); - __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); + + // Add the base address to the offset previously loaded in entry_offset. + __ add(entry_offset, entry_offset, + Operand(ExternalReference::ForDeoptEntry(base))); + __ blx(entry_offset); } - masm()->CheckConstPool(false, false); } // Force constant pool emission at the end of the deopt jump table to make @@ -396,7 +411,7 @@ bool LCodeGen::GenerateSafepointTable() { - ASSERT(is_done()); + DCHECK(is_done()); safepoints_.Emit(masm(), GetStackSlotCount()); return !is_aborted(); } @@ -413,7 +428,7 @@ Register LCodeGen::ToRegister(LOperand* op) const { - ASSERT(op->IsRegister()); + DCHECK(op->IsRegister()); return ToRegister(op->index()); } @@ -427,12 +442,12 @@ Handle<Object> literal = constant->handle(isolate()); Representation r = chunk_->LookupLiteralRepresentation(const_op); if (r.IsInteger32()) { - ASSERT(literal->IsNumber()); + DCHECK(literal->IsNumber()); __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); } else if (r.IsDouble()) { Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); } else { - ASSERT(r.IsSmiOrTagged()); + DCHECK(r.IsSmiOrTagged()); __ Move(scratch, literal); } return scratch; @@ -446,7 +461,7 @@ DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - ASSERT(op->IsDoubleRegister()); + DCHECK(op->IsDoubleRegister()); return ToDoubleRegister(op->index()); } @@ -462,7 +477,7 @@ Handle<Object> literal = constant->handle(isolate()); Representation r = chunk_->LookupLiteralRepresentation(const_op); if (r.IsInteger32()) { - ASSERT(literal->IsNumber()); + DCHECK(literal->IsNumber()); __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); __ vmov(flt_scratch, ip); __ vcvt_f64_s32(dbl_scratch, flt_scratch); @@ -486,7 +501,7 @@ Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); + DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); return constant->handle(isolate()); } @@ -511,7 +526,7 @@ HConstant* constant = chunk_->LookupConstant(op); int32_t value = constant->Integer32Value(); if (r.IsInteger32()) return value; - ASSERT(r.IsSmiOrTagged()); + DCHECK(r.IsSmiOrTagged()); return reinterpret_cast<int32_t>(Smi::FromInt(value)); } @@ -524,7 +539,7 @@ double LCodeGen::ToDouble(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(constant->HasDoubleValue()); + DCHECK(constant->HasDoubleValue()); return constant->DoubleValue(); } @@ -535,15 +550,15 @@ HConstant* constant = chunk()->LookupConstant(const_op); Representation r = chunk_->LookupLiteralRepresentation(const_op); if (r.IsSmi()) { - ASSERT(constant->HasSmiValue()); + DCHECK(constant->HasSmiValue()); return Operand(Smi::FromInt(constant->Integer32Value())); } else if (r.IsInteger32()) { - ASSERT(constant->HasInteger32Value()); + DCHECK(constant->HasInteger32Value()); return Operand(constant->Integer32Value()); } else if (r.IsDouble()) { Abort(kToOperandUnsupportedDoubleImmediate); } - ASSERT(r.IsTagged()); + DCHECK(r.IsTagged()); return Operand(constant->handle(isolate())); } else if (op->IsRegister()) { return Operand(ToRegister(op)); @@ -558,15 +573,15 @@ static int ArgumentsOffsetWithoutFrame(int index) { - ASSERT(index < 0); + DCHECK(index < 0); return -(index + 1) * kPointerSize; } MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - ASSERT(!op->IsRegister()); - ASSERT(!op->IsDoubleRegister()); - ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); + DCHECK(!op->IsRegister()); + DCHECK(!op->IsDoubleRegister()); + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); if (NeedsEagerFrame()) { return MemOperand(fp, StackSlotOffset(op->index())); } else { @@ -578,7 +593,7 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { - ASSERT(op->IsDoubleStackSlot()); + DCHECK(op->IsDoubleStackSlot()); if (NeedsEagerFrame()) { return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); } else { @@ -614,13 +629,13 @@ translation->BeginConstructStubFrame(closure_id, translation_size); break; case JS_GETTER: - ASSERT(translation_size == 1); - ASSERT(height == 0); + DCHECK(translation_size == 1); + DCHECK(height == 0); translation->BeginGetterStubFrame(closure_id); break; case JS_SETTER: - ASSERT(translation_size == 2); - ASSERT(height == 0); + DCHECK(translation_size == 2); + DCHECK(height == 0); translation->BeginSetterStubFrame(closure_id); break; case STUB: @@ -714,6 +729,16 @@ } +int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) { + int size = masm()->CallSize(code, mode); + if (code->kind() == Code::BINARY_OP_IC || + code->kind() == Code::COMPARE_IC) { + size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric. + } + return size; +} + + void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr, @@ -727,7 +752,7 @@ LInstruction* instr, SafepointMode safepoint_mode, TargetAddressStorageMode storage_mode) { - ASSERT(instr != NULL); + DCHECK(instr != NULL); // Block literal pool emission to ensure nop indicating no inlined smi code // is in the correct position. Assembler::BlockConstPoolScope block_const_pool(masm()); @@ -747,7 +772,7 @@ int num_arguments, LInstruction* instr, SaveFPRegsMode save_doubles) { - ASSERT(instr != NULL); + DCHECK(instr != NULL); __ CallRuntime(function, num_arguments, save_doubles); @@ -783,6 +808,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode) { + environment->set_has_been_used(); if (!environment->HasBeenRegistered()) { // Physical stack frame layout: // -x ............. -4 0 ..................................... y @@ -821,9 +847,9 @@ LEnvironment* environment, Deoptimizer::BailoutType bailout_type) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - ASSERT(environment->HasBeenRegistered()); + DCHECK(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); - ASSERT(info()->IsOptimizing() || info()->IsStub()); + DCHECK(info()->IsOptimizing() || info()->IsStub()); Address entry = Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { @@ -846,7 +872,7 @@ __ mov(scratch, Operand(count)); __ ldr(r1, MemOperand(scratch)); __ sub(r1, r1, Operand(1), SetCC); - __ movw(r1, FLAG_deopt_every_n_times, eq); + __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq); __ str(r1, MemOperand(scratch)); __ pop(r1); @@ -870,7 +896,7 @@ __ stop("trap_on_deopt", condition); } - ASSERT(info()->IsStub() || frame_is_built_); + DCHECK(info()->IsStub() || frame_is_built_); // Go through jump table if we need to handle condition, build frame, or // restore caller doubles. if (condition == al && frame_is_built_ && @@ -906,7 +932,7 @@ int length = deoptimizations_.length(); if (length == 0) return; Handle<DeoptimizationInputData> data = - factory()->NewDeoptimizationInputData(length, TENURED); + DeoptimizationInputData::New(isolate(), length, 0, TENURED); Handle<ByteArray> translations = translations_.CreateByteArray(isolate()->factory()); @@ -957,7 +983,7 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { - ASSERT(deoptimization_literals_.length() == 0); + DCHECK(deoptimization_literals_.length() == 0); const ZoneList<Handle<JSFunction> >* inlined_closures = chunk()->inlined_closures(); @@ -977,7 +1003,7 @@ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); } else { - ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kLazyDeopt); } @@ -989,7 +1015,7 @@ Safepoint::Kind kind, int arguments, Safepoint::DeoptMode deopt_mode) { - ASSERT(expected_safepoint_kind_ == kind); + DCHECK(expected_safepoint_kind_ == kind); const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), @@ -1029,15 +1055,6 @@ } -void LCodeGen::RecordSafepointWithRegistersAndDoubles( - LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint( - pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode); -} - - void LCodeGen::RecordAndWritePosition(int position) { if (position == RelocInfo::kNoPosition) return; masm()->positions_recorder()->RecordPosition(position); @@ -1091,22 +1108,22 @@ void LCodeGen::DoCallStub(LCallStub* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->result()).is(r0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->result()).is(r0)); switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpExec: { - RegExpExecStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + RegExpExecStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { - SubStringStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + SubStringStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { - StringCompareStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + StringCompareStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } default: @@ -1123,7 +1140,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(dividend.is(ToRegister(instr->result()))); + DCHECK(dividend.is(ToRegister(instr->result()))); // Theoretically, a variation of the branch-free code for integer division by // a power of 2 (calculating the remainder via an additional multiplication @@ -1157,7 +1174,7 @@ Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(!dividend.is(result)); + DCHECK(!dividend.is(result)); if (divisor == 0) { DeoptimizeIf(al, instr->environment()); @@ -1220,7 +1237,7 @@ // mls r3, r3, r2, r1 __ sdiv(result_reg, left_reg, right_reg); - __ mls(result_reg, result_reg, right_reg, left_reg); + __ Mls(result_reg, result_reg, right_reg, left_reg); // If we care about -0, test if the dividend is <0 and the result is 0. if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { @@ -1237,15 +1254,15 @@ Register right_reg = ToRegister(instr->right()); Register result_reg = ToRegister(instr->result()); Register scratch = scratch0(); - ASSERT(!scratch.is(left_reg)); - ASSERT(!scratch.is(right_reg)); - ASSERT(!scratch.is(result_reg)); + DCHECK(!scratch.is(left_reg)); + DCHECK(!scratch.is(right_reg)); + DCHECK(!scratch.is(result_reg)); DwVfpRegister dividend = ToDoubleRegister(instr->temp()); DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); - ASSERT(!divisor.is(dividend)); + DCHECK(!divisor.is(dividend)); LowDwVfpRegister quotient = double_scratch0(); - ASSERT(!quotient.is(dividend)); - ASSERT(!quotient.is(divisor)); + DCHECK(!quotient.is(dividend)); + DCHECK(!quotient.is(divisor)); Label done; // Check for x % 0, we have to deopt in this case because we can't return a @@ -1293,8 +1310,8 @@ Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor)))); - ASSERT(!result.is(dividend)); + DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor))); + DCHECK(!result.is(dividend)); // Check for (0 / -x) that will produce negative zero. HDiv* hdiv = instr->hydrogen(); @@ -1337,7 +1354,7 @@ Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(!dividend.is(result)); + DCHECK(!dividend.is(result)); if (divisor == 0) { DeoptimizeIf(al, instr->environment()); @@ -1363,15 +1380,16 @@ } +// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. void LCodeGen::DoDivI(LDivI* instr) { HBinaryOperation* hdiv = instr->hydrogen(); - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); Register result = ToRegister(instr->result()); // Check for x / 0. if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right, Operand::Zero()); + __ cmp(divisor, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); } @@ -1380,10 +1398,10 @@ Label positive; if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { // Do the test only if it hadn't be done above. - __ cmp(right, Operand::Zero()); + __ cmp(divisor, Operand::Zero()); } __ b(pl, &positive); - __ cmp(left, Operand::Zero()); + __ cmp(dividend, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); __ bind(&positive); } @@ -1394,39 +1412,30 @@ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { // We don't need to check for overflow when truncating with sdiv // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. - __ cmp(left, Operand(kMinInt)); - __ cmp(right, Operand(-1), eq); + __ cmp(dividend, Operand(kMinInt)); + __ cmp(divisor, Operand(-1), eq); DeoptimizeIf(eq, instr->environment()); } if (CpuFeatures::IsSupported(SUDIV)) { CpuFeatureScope scope(masm(), SUDIV); - __ sdiv(result, left, right); + __ sdiv(result, dividend, divisor); } else { DoubleRegister vleft = ToDoubleRegister(instr->temp()); DoubleRegister vright = double_scratch0(); - __ vmov(double_scratch0().low(), left); + __ vmov(double_scratch0().low(), dividend); __ vcvt_f64_s32(vleft, double_scratch0().low()); - __ vmov(double_scratch0().low(), right); + __ vmov(double_scratch0().low(), divisor); __ vcvt_f64_s32(vright, double_scratch0().low()); __ vdiv(vleft, vleft, vright); // vleft now contains the result. __ vcvt_s32_f64(double_scratch0().low(), vleft); __ vmov(result, double_scratch0().low()); } - if (hdiv->IsMathFloorOfDiv()) { - Label done; - Register remainder = scratch0(); - __ mls(remainder, result, right, left); - __ cmp(remainder, Operand::Zero()); - __ b(eq, &done); - __ eor(remainder, remainder, Operand(right)); - __ add(result, result, Operand(remainder, ASR, 31)); - __ bind(&done); - } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { // Compute remainder and deopt if it's not zero. Register remainder = scratch0(); - __ mls(remainder, result, right, left); + __ Mls(remainder, result, divisor, dividend); __ cmp(remainder, Operand::Zero()); DeoptimizeIf(ne, instr->environment()); } @@ -1439,7 +1448,7 @@ DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); // This is computed in-place. - ASSERT(addend.is(ToDoubleRegister(instr->result()))); + DCHECK(addend.is(ToDoubleRegister(instr->result()))); __ vmla(addend, multiplier, multiplicand); } @@ -1451,7 +1460,7 @@ DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); // This is computed in-place. - ASSERT(minuend.is(ToDoubleRegister(instr->result()))); + DCHECK(minuend.is(ToDoubleRegister(instr->result()))); __ vmls(minuend, multiplier, multiplicand); } @@ -1462,9 +1471,14 @@ Register result = ToRegister(instr->result()); int32_t divisor = instr->divisor(); + // If the divisor is 1, return the dividend. + if (divisor == 1) { + __ Move(result, dividend); + return; + } + // If the divisor is positive, things are easy: There can be no deopts and we // can simply do an arithmetic right shift. - if (divisor == 1) return; int32_t shift = WhichPowerOf2Abs(divisor); if (divisor > 1) { __ mov(result, Operand(dividend, ASR, shift)); @@ -1476,19 +1490,23 @@ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(eq, instr->environment()); } - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - // Note that we could emit branch-free code, but that would need one more - // register. - if (divisor == -1) { + + // Dividing by -1 is basically negation, unless we overflow. + if (divisor == -1) { + if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { DeoptimizeIf(vs, instr->environment()); - __ mov(result, Operand(dividend, ASR, shift)); - } else { - __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); - __ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc); } - } else { - __ mov(result, Operand(dividend, ASR, shift)); + return; } + + // If the negation could not overflow, simply shifting is OK. + if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + __ mov(result, Operand(result, ASR, shift)); + return; + } + + __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); + __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); } @@ -1496,7 +1514,7 @@ Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(!dividend.is(result)); + DCHECK(!dividend.is(result)); if (divisor == 0) { DeoptimizeIf(al, instr->environment()); @@ -1522,7 +1540,7 @@ // In the general case we may need to adjust before and after the truncating // division to get a flooring division. Register temp = ToRegister(instr->temp()); - ASSERT(!temp.is(dividend) && !temp.is(result)); + DCHECK(!temp.is(dividend) && !temp.is(result)); Label needs_adjustment, done; __ cmp(dividend, Operand::Zero()); __ b(divisor > 0 ? lt : gt, &needs_adjustment); @@ -1538,6 +1556,69 @@ } +// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. +void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register left = ToRegister(instr->dividend()); + Register right = ToRegister(instr->divisor()); + Register result = ToRegister(instr->result()); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + __ cmp(right, Operand::Zero()); + DeoptimizeIf(eq, instr->environment()); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label positive; + if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { + // Do the test only if it hadn't be done above. + __ cmp(right, Operand::Zero()); + } + __ b(pl, &positive); + __ cmp(left, Operand::Zero()); + DeoptimizeIf(eq, instr->environment()); + __ bind(&positive); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow) && + (!CpuFeatures::IsSupported(SUDIV) || + !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { + // We don't need to check for overflow when truncating with sdiv + // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. + __ cmp(left, Operand(kMinInt)); + __ cmp(right, Operand(-1), eq); + DeoptimizeIf(eq, instr->environment()); + } + + if (CpuFeatures::IsSupported(SUDIV)) { + CpuFeatureScope scope(masm(), SUDIV); + __ sdiv(result, left, right); + } else { + DoubleRegister vleft = ToDoubleRegister(instr->temp()); + DoubleRegister vright = double_scratch0(); + __ vmov(double_scratch0().low(), left); + __ vcvt_f64_s32(vleft, double_scratch0().low()); + __ vmov(double_scratch0().low(), right); + __ vcvt_f64_s32(vright, double_scratch0().low()); + __ vdiv(vleft, vleft, vright); // vleft now contains the result. + __ vcvt_s32_f64(double_scratch0().low(), vleft); + __ vmov(result, double_scratch0().low()); + } + + Label done; + Register remainder = scratch0(); + __ Mls(remainder, result, right, left); + __ cmp(remainder, Operand::Zero()); + __ b(eq, &done); + __ eor(remainder, remainder, Operand(right)); + __ add(result, result, Operand(remainder, ASR, 31)); + __ bind(&done); +} + + void LCodeGen::DoMulI(LMulI* instr) { Register result = ToRegister(instr->result()); // Note that result may alias left. @@ -1609,7 +1690,7 @@ } } else { - ASSERT(right_op->IsRegister()); + DCHECK(right_op->IsRegister()); Register right = ToRegister(right_op); if (overflow) { @@ -1648,7 +1729,7 @@ void LCodeGen::DoBitI(LBitI* instr) { LOperand* left_op = instr->left(); LOperand* right_op = instr->right(); - ASSERT(left_op->IsRegister()); + DCHECK(left_op->IsRegister()); Register left = ToRegister(left_op); Register result = ToRegister(instr->result()); Operand right(no_reg); @@ -1656,7 +1737,7 @@ if (right_op->IsStackSlot()) { right = Operand(EmitLoadRegister(right_op, ip)); } else { - ASSERT(right_op->IsRegister() || right_op->IsConstantOperand()); + DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); right = ToOperand(right_op); } @@ -1780,7 +1861,7 @@ Register right_reg = EmitLoadRegister(right, ip); __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); } else { - ASSERT(right->IsRegister() || right->IsConstantOperand()); + DCHECK(right->IsRegister() || right->IsConstantOperand()); __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); } @@ -1801,7 +1882,7 @@ Register right_reg = EmitLoadRegister(right, ip); __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); } else { - ASSERT(right->IsRegister() || right->IsConstantOperand()); + DCHECK(right->IsRegister() || right->IsConstantOperand()); __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); } @@ -1822,7 +1903,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { - ASSERT(instr->result()->IsDoubleRegister()); + DCHECK(instr->result()->IsDoubleRegister()); DwVfpRegister result = ToDoubleRegister(instr->result()); double v = instr->value(); __ Vmov(result, v, scratch0()); @@ -1835,9 +1916,9 @@ void LCodeGen::DoConstantT(LConstantT* instr) { - Handle<Object> value = instr->value(isolate()); + Handle<Object> object = instr->value(isolate()); AllowDeferredHandleDereference smi_check; - __ Move(ToRegister(instr->result()), value); + __ Move(ToRegister(instr->result()), object); } @@ -1854,10 +1935,10 @@ Register scratch = ToRegister(instr->temp()); Smi* index = instr->index(); Label runtime, done; - ASSERT(object.is(result)); - ASSERT(object.is(r0)); - ASSERT(!scratch.is(scratch0())); - ASSERT(!scratch.is(object)); + DCHECK(object.is(result)); + DCHECK(object.is(r0)); + DCHECK(!scratch.is(scratch0())); + DCHECK(!scratch.is(object)); __ SmiTst(object); DeoptimizeIf(eq, instr->environment()); @@ -1899,8 +1980,8 @@ return FieldMemOperand(string, SeqString::kHeaderSize + offset); } Register scratch = scratch0(); - ASSERT(!scratch.is(string)); - ASSERT(!scratch.is(ToRegister(index))); + DCHECK(!scratch.is(string)); + DCHECK(!scratch.is(ToRegister(index))); if (encoding == String::ONE_BYTE_ENCODING) { __ add(scratch, string, Operand(ToRegister(index))); } else { @@ -1974,7 +2055,7 @@ Register right_reg = EmitLoadRegister(right, ip); __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); } else { - ASSERT(right->IsRegister() || right->IsConstantOperand()); + DCHECK(right->IsRegister() || right->IsConstantOperand()); __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); } @@ -1999,7 +2080,7 @@ __ Move(result_reg, left_reg, condition); __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); } else { - ASSERT(instr->hydrogen()->representation().IsDouble()); + DCHECK(instr->hydrogen()->representation().IsDouble()); DwVfpRegister left_reg = ToDoubleRegister(left); DwVfpRegister right_reg = ToDoubleRegister(right); DwVfpRegister result_reg = ToDoubleRegister(instr->result()); @@ -2086,16 +2167,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->left()).is(r1)); - ASSERT(ToRegister(instr->right()).is(r0)); - ASSERT(ToRegister(instr->result()).is(r0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->left()).is(r1)); + DCHECK(ToRegister(instr->right()).is(r0)); + DCHECK(ToRegister(instr->result()).is(r0)); - BinaryOpICStub stub(instr->op(), NO_OVERWRITE); + BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); // Block literal pool emission to ensure nop indicating no inlined smi code // is in the correct position. Assembler::BlockConstPoolScope block_const_pool(masm()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -2134,34 +2215,34 @@ void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32() || r.IsSmi()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); Register reg = ToRegister(instr->value()); __ cmp(reg, Operand::Zero()); EmitBranch(instr, ne); } else if (r.IsDouble()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); DwVfpRegister reg = ToDoubleRegister(instr->value()); // Test the double value. Zero and NaN are false. __ VFPCompareAndSetFlags(reg, 0.0); __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false) EmitBranch(instr, ne); } else { - ASSERT(r.IsTagged()); + DCHECK(r.IsTagged()); Register reg = ToRegister(instr->value()); HType type = instr->hydrogen()->value()->type(); if (type.IsBoolean()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ CompareRoot(reg, Heap::kTrueValueRootIndex); EmitBranch(instr, eq); } else if (type.IsSmi()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ cmp(reg, Operand::Zero()); EmitBranch(instr, ne); } else if (type.IsJSArray()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); EmitBranch(instr, al); } else if (type.IsHeapNumber()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); DwVfpRegister dbl_scratch = double_scratch0(); __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); // Test the double value. Zero and NaN are false. @@ -2169,7 +2250,7 @@ __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN) EmitBranch(instr, ne); } else if (type.IsString()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); __ cmp(ip, Operand::Zero()); EmitBranch(instr, ne); @@ -2314,7 +2395,10 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - Condition cond = TokenToCondition(instr->op(), false); + bool is_unsigned = + instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || + instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); + Condition cond = TokenToCondition(instr->op(), is_unsigned); if (left->IsConstantOperand() && right->IsConstantOperand()) { // We can statically evaluate the comparison. @@ -2346,8 +2430,8 @@ } else { __ cmp(ToRegister(right), Operand(value)); } - // We transposed the operands. Reverse the condition. - cond = ReverseCondition(cond); + // We commuted the operands, so commute the condition. + cond = CommuteCondition(cond); } else { __ cmp(ToRegister(left), ToRegister(right)); } @@ -2388,7 +2472,7 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { Representation rep = instr->hydrogen()->value()->representation(); - ASSERT(!rep.IsInteger32()); + DCHECK(!rep.IsInteger32()); Register scratch = ToRegister(instr->temp()); if (rep.IsDouble()) { @@ -2470,7 +2554,7 @@ Register temp1 = ToRegister(instr->temp()); SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; Condition true_cond = EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); @@ -2490,7 +2574,7 @@ Register input = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); @@ -2521,7 +2605,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -2539,7 +2623,7 @@ InstanceType from = instr->from(); InstanceType to = instr->to(); if (from == FIRST_TYPE) return to; - ASSERT(from == to || to == LAST_TYPE); + DCHECK(from == to || to == LAST_TYPE); return from; } @@ -2559,7 +2643,7 @@ Register scratch = scratch0(); Register input = ToRegister(instr->value()); - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } @@ -2599,9 +2683,9 @@ Register input, Register temp, Register temp2) { - ASSERT(!input.is(temp)); - ASSERT(!input.is(temp2)); - ASSERT(!temp.is(temp2)); + DCHECK(!input.is(temp)); + DCHECK(!input.is(temp2)); + DCHECK(!temp.is(temp2)); __ JumpIfSmi(input, is_false); @@ -2682,12 +2766,12 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. - ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0. + DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1. - InstanceofStub stub(InstanceofStub::kArgsInRegisters); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ cmp(r0, Operand::Zero()); __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); @@ -2702,13 +2786,17 @@ LInstanceOfKnownGlobal* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { - codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); + codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_, + &load_bool_); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } Label* map_check() { return &map_check_; } + Label* load_bool() { return &load_bool_; } + private: LInstanceOfKnownGlobal* instr_; Label map_check_; + Label load_bool_; }; DeferredInstanceOfKnownGlobal* deferred; @@ -2736,12 +2824,12 @@ // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch with // the cached map. - PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize); Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); __ mov(ip, Operand(Handle<Object>(cell))); __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset)); __ cmp(map, Operand(ip)); __ b(ne, &cache_miss); + __ bind(deferred->load_bool()); // Label for calculating code patching. // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch // with true or false. @@ -2775,7 +2863,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, - Label* map_check) { + Label* map_check, + Label* bool_load) { InstanceofStub::Flags flags = InstanceofStub::kNoFlags; flags = static_cast<InstanceofStub::Flags>( flags | InstanceofStub::kArgsInRegisters); @@ -2783,29 +2872,43 @@ flags | InstanceofStub::kCallSiteInlineCheck); flags = static_cast<InstanceofStub::Flags>( flags | InstanceofStub::kReturnTrueFalseObject); - InstanceofStub stub(flags); + InstanceofStub stub(isolate(), flags); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); LoadContextFromDeferred(instr->context()); __ Move(InstanceofStub::right(), instr->function()); - static const int kAdditionalDelta = 4; + + int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET); + int additional_delta = (call_size / Assembler::kInstrSize) + 4; // Make sure that code size is predicable, since we use specific constants // offsets in the code to find embedded values.. - PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize); - int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; - Label before_push_delta; - __ bind(&before_push_delta); - __ BlockConstPoolFor(kAdditionalDelta); - // r5 is used to communicate the offset to the location of the map check. - __ mov(r5, Operand(delta * kPointerSize)); - // The mov above can generate one or two instructions. The delta was computed - // for two instructions, so we need to pad here in case of one instruction. - if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) { - ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta)); - __ nop(); + PredictableCodeSizeScope predictable( + masm_, (additional_delta + 1) * Assembler::kInstrSize); + // Make sure we don't emit any additional entries in the constant pool before + // the call to ensure that the CallCodeSize() calculated the correct number of + // instructions for the constant pool load. + { + ConstantPoolUnavailableScope constant_pool_unavailable(masm_); + int map_check_delta = + masm_->InstructionsGeneratedSince(map_check) + additional_delta; + int bool_load_delta = + masm_->InstructionsGeneratedSince(bool_load) + additional_delta; + Label before_push_delta; + __ bind(&before_push_delta); + __ BlockConstPoolFor(additional_delta); + // r5 is used to communicate the offset to the location of the map check. + __ mov(r5, Operand(map_check_delta * kPointerSize)); + // r6 is used to communicate the offset to the location of the bool load. + __ mov(r6, Operand(bool_load_delta * kPointerSize)); + // The mov above can generate one or two instructions. The delta was + // computed for two instructions, so we need to pad here in case of one + // instruction. + while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) { + __ nop(); + } } - CallCodeGeneric(stub.GetCode(isolate()), + CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); @@ -2818,7 +2921,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -2887,11 +2990,20 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->global_object()).is(r0)); - ASSERT(ToRegister(instr->result()).is(r0)); - - __ mov(r2, Operand(instr->name())); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(r0)); + + __ mov(LoadIC::NameRegister(), Operand(instr->name())); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ Move(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(r0)); + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(instr->hydrogen()->slot()))); + } ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); CallCode(ic, RelocInfo::CODE_TARGET, instr); @@ -2961,7 +3073,7 @@ __ str(value, target); if (instr->hydrogen()->NeedsWriteBarrier()) { SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; __ RecordWriteContextSlot(context, target.offset(), @@ -3006,12 +3118,21 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->object()).is(r0)); - ASSERT(ToRegister(instr->result()).is(r0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(r0)); // Name is always in r2. - __ mov(r2, Operand(instr->name())); + __ mov(LoadIC::NameRegister(), Operand(instr->name())); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ Move(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(r0)); + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(instr->hydrogen()->slot()))); + } Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); } @@ -3022,17 +3143,6 @@ Register function = ToRegister(instr->function()); Register result = ToRegister(instr->result()); - // Check that the function really is a function. Load map into the - // result register. - __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); - DeoptimizeIf(ne, instr->environment()); - - // Make sure that the function has an instance prototype. - Label non_instance; - __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); - __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); - __ b(ne, &non_instance); - // Get the prototype or initial map from the function. __ ldr(result, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); @@ -3049,12 +3159,6 @@ // Get the prototype from the initial map. __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); - __ jmp(&done); - - // Non-instance prototype: Fetch prototype from constructor field - // in initial map. - __ bind(&non_instance); - __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); // All done. __ bind(&done); @@ -3120,17 +3224,13 @@ int element_size_shift = ElementsKindToShiftSize(elements_kind); int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) ? (element_size_shift - kSmiTagSize) : element_size_shift; - int additional_offset = IsFixedTypedArrayElementsKind(elements_kind) - ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag - : 0; - + int base_offset = instr->base_offset(); if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || elements_kind == FLOAT32_ELEMENTS || elements_kind == EXTERNAL_FLOAT64_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - int base_offset = - (instr->additional_index() << element_size_shift) + additional_offset; + int base_offset = instr->base_offset(); DwVfpRegister result = ToDoubleRegister(instr->result()); Operand operand = key_is_constant ? Operand(constant_key << element_size_shift) @@ -3140,15 +3240,14 @@ elements_kind == FLOAT32_ELEMENTS) { __ vldr(double_scratch0().low(), scratch0(), base_offset); __ vcvt_f64_f32(result, double_scratch0().low()); - } else { // loading doubles, not floats. + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS __ vldr(result, scratch0(), base_offset); } } else { Register result = ToRegister(instr->result()); MemOperand mem_operand = PrepareKeyedOperand( key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, - instr->additional_index(), additional_offset); + element_size_shift, shift_size, base_offset); switch (elements_kind) { case EXTERNAL_INT8_ELEMENTS: case INT8_ELEMENTS: @@ -3208,15 +3307,13 @@ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int base_offset = - FixedDoubleArray::kHeaderSize - kHeapObjectTag + - (instr->additional_index() << element_size_shift); + int base_offset = instr->base_offset(); if (key_is_constant) { int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xF0000000) { Abort(kArrayIndexConstantValueTooBig); } - base_offset += constant_key << element_size_shift; + base_offset += constant_key * kDoubleSize; } __ add(scratch, elements, Operand(base_offset)); @@ -3242,12 +3339,11 @@ Register result = ToRegister(instr->result()); Register scratch = scratch0(); Register store_base = scratch; - int offset = 0; + int offset = instr->base_offset(); if (instr->key()->IsConstantOperand()) { LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); + offset += ToInteger32(const_operand) * kPointerSize; store_base = elements; } else { Register key = ToRegister(instr->key()); @@ -3260,9 +3356,8 @@ } else { __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); } - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); } - __ ldr(result, FieldMemOperand(store_base, offset)); + __ ldr(result, MemOperand(store_base, offset)); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { @@ -3295,52 +3390,45 @@ int constant_key, int element_size, int shift_size, - int additional_index, - int additional_offset) { - int base_offset = (additional_index << element_size) + additional_offset; + int base_offset) { if (key_is_constant) { - return MemOperand(base, - base_offset + (constant_key << element_size)); + return MemOperand(base, (constant_key << element_size) + base_offset); } - if (additional_offset != 0) { - __ mov(scratch0(), Operand(base_offset)); - if (shift_size >= 0) { - __ add(scratch0(), scratch0(), Operand(key, LSL, shift_size)); - } else { - ASSERT_EQ(-1, shift_size); - __ add(scratch0(), scratch0(), Operand(key, LSR, 1)); - } - return MemOperand(base, scratch0()); - } - - if (additional_index != 0) { - additional_index *= 1 << (element_size - shift_size); - __ add(scratch0(), key, Operand(additional_index)); - } - - if (additional_index == 0) { + if (base_offset == 0) { if (shift_size >= 0) { return MemOperand(base, key, LSL, shift_size); } else { - ASSERT_EQ(-1, shift_size); + DCHECK_EQ(-1, shift_size); return MemOperand(base, key, LSR, 1); } } if (shift_size >= 0) { - return MemOperand(base, scratch0(), LSL, shift_size); + __ add(scratch0(), base, Operand(key, LSL, shift_size)); + return MemOperand(scratch0(), base_offset); } else { - ASSERT_EQ(-1, shift_size); - return MemOperand(base, scratch0(), LSR, 1); + DCHECK_EQ(-1, shift_size); + __ add(scratch0(), base, Operand(key, ASR, 1)); + return MemOperand(scratch0(), base_offset); } } void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->object()).is(r1)); - ASSERT(ToRegister(instr->key()).is(r0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister())); + + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ Move(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(r0)); + __ mov(LoadIC::SlotRegister(), + Operand(Smi::FromInt(instr->hydrogen()->slot()))); + } Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); @@ -3436,8 +3524,7 @@ __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); __ ldr(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); - __ ldr(result, - FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset)); + __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); if (result.is(receiver)) { __ bind(&result_in_receiver); @@ -3457,9 +3544,9 @@ Register length = ToRegister(instr->length()); Register elements = ToRegister(instr->elements()); Register scratch = scratch0(); - ASSERT(receiver.is(r0)); // Used for parameter count. - ASSERT(function.is(r1)); // Required by InvokeFunction. - ASSERT(ToRegister(instr->result()).is(r0)); + DCHECK(receiver.is(r0)); // Used for parameter count. + DCHECK(function.is(r1)); // Required by InvokeFunction. + DCHECK(ToRegister(instr->result()).is(r0)); // Copy the arguments to this function possibly from the // adaptor frame below it. @@ -3487,7 +3574,7 @@ __ b(ne, &loop); __ bind(&invoke); - ASSERT(instr->HasPointerMap()); + DCHECK(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator safepoint_generator( this, pointers, Safepoint::kLazyDeopt); @@ -3527,19 +3614,19 @@ __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); } else { // If there is no frame, the context must be in cp. - ASSERT(result.is(cp)); + DCHECK(result.is(cp)); } } void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); __ push(cp); // The context is the first argument. __ Move(scratch0(), instr->hydrogen()->pairs()); __ push(scratch0()); __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); __ push(scratch0()); - CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); } @@ -3585,8 +3672,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { - ASSERT(instr->context() != NULL); - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(instr->context() != NULL); + DCHECK(ToRegister(instr->context()).is(cp)); Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -3611,7 +3698,7 @@ // Input is negative. Reverse its sign. // Preserve the value of all registers. { - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); // Registers were saved at the safepoint, so we can use // many scratch registers. @@ -3630,7 +3717,7 @@ // Slow case: Call the runtime system to do the number allocation. __ bind(&slow); - CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr, + CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, instr->context()); // Set the pointer to the new heap number in tmp. if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); @@ -3761,6 +3848,15 @@ } +void LCodeGen::DoMathFround(LMathFround* instr) { + DwVfpRegister input_reg = ToDoubleRegister(instr->value()); + DwVfpRegister output_reg = ToDoubleRegister(instr->result()); + LowDwVfpRegister scratch = double_scratch0(); + __ vcvt_f32_f64(scratch.low(), input_reg); + __ vcvt_f64_f32(output_reg, scratch.low()); +} + + void LCodeGen::DoMathSqrt(LMathSqrt* instr) { DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); @@ -3793,15 +3889,15 @@ Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. - ASSERT(!instr->right()->IsDoubleRegister() || + DCHECK(!instr->right()->IsDoubleRegister() || ToDoubleRegister(instr->right()).is(d1)); - ASSERT(!instr->right()->IsRegister() || + DCHECK(!instr->right()->IsRegister() || ToRegister(instr->right()).is(r2)); - ASSERT(ToDoubleRegister(instr->left()).is(d0)); - ASSERT(ToDoubleRegister(instr->result()).is(d2)); + DCHECK(ToDoubleRegister(instr->left()).is(d0)); + DCHECK(ToDoubleRegister(instr->result()).is(d2)); if (exponent_type.IsSmi()) { - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsTagged()) { Label no_deopt; @@ -3811,14 +3907,14 @@ __ cmp(r6, Operand(ip)); DeoptimizeIf(ne, instr->environment()); __ bind(&no_deopt); - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsInteger32()) { - MathPowStub stub(MathPowStub::INTEGER); + MathPowStub stub(isolate(), MathPowStub::INTEGER); __ CallStub(&stub); } else { - ASSERT(exponent_type.IsDouble()); - MathPowStub stub(MathPowStub::DOUBLE); + DCHECK(exponent_type.IsDouble()); + MathPowStub stub(isolate(), MathPowStub::DOUBLE); __ CallStub(&stub); } } @@ -3855,9 +3951,9 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->function()).is(r1)); - ASSERT(instr->HasPointerMap()); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->function()).is(r1)); + DCHECK(instr->HasPointerMap()); Handle<JSFunction> known_function = instr->hydrogen()->known_function(); if (known_function.is_null()) { @@ -3876,7 +3972,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - ASSERT(ToRegister(instr->result()).is(r0)); + DCHECK(ToRegister(instr->result()).is(r0)); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); @@ -3885,15 +3981,21 @@ LConstantOperand* target = LConstantOperand::cast(instr->target()); Handle<Code> code = Handle<Code>::cast(ToHandle(target)); generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); - PlatformCallInterfaceDescriptor* call_descriptor = + PlatformInterfaceDescriptor* call_descriptor = instr->descriptor()->platform_specific_descriptor(); __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al, call_descriptor->storage_mode()); } else { - ASSERT(instr->target()->IsRegister()); + DCHECK(instr->target()->IsRegister()); Register target = ToRegister(instr->target()); generator.BeforeCall(__ CallSize(target)); - __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Make sure we don't emit any additional entries in the constant pool + // before the call to ensure that the CallCodeSize() calculated the correct + // number of instructions for the constant pool load. + { + ConstantPoolUnavailableScope constant_pool_unavailable(masm_); + __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); + } __ Call(target); } generator.AfterCall(); @@ -3901,8 +4003,8 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { - ASSERT(ToRegister(instr->function()).is(r1)); - ASSERT(ToRegister(instr->result()).is(r0)); + DCHECK(ToRegister(instr->function()).is(r1)); + DCHECK(ToRegister(instr->result()).is(r0)); if (instr->hydrogen()->pass_argument_count()) { __ mov(r0, Operand(instr->arity())); @@ -3920,33 +4022,33 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->function()).is(r1)); - ASSERT(ToRegister(instr->result()).is(r0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->function()).is(r1)); + DCHECK(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); - CallFunctionStub stub(arity, instr->hydrogen()->function_flags()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoCallNew(LCallNew* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->constructor()).is(r1)); - ASSERT(ToRegister(instr->result()).is(r0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->constructor()).is(r1)); + DCHECK(ToRegister(instr->result()).is(r0)); __ mov(r0, Operand(instr->arity())); // No cell in r2 for construct type feedback in optimized code __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->constructor()).is(r1)); - ASSERT(ToRegister(instr->result()).is(r0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->constructor()).is(r1)); + DCHECK(ToRegister(instr->result()).is(r0)); __ mov(r0, Operand(instr->arity())); __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); @@ -3957,8 +4059,8 @@ : DONT_OVERRIDE; if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } else if (instr->arity() == 1) { Label done; if (IsFastPackedElementsKind(kind)) { @@ -3970,18 +4072,20 @@ __ b(eq, &packed_case); ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(holey_kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), + holey_kind, + override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ jmp(&done); __ bind(&packed_case); } - ArraySingleArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ bind(&done); } else { - ArrayNArgumentsConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } } @@ -4028,46 +4132,33 @@ return; } - Handle<Map> transition = instr->transition(); - SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - - ASSERT(!(representation.IsSmi() && - instr->value()->IsConstantOperand() && - !IsSmi(LConstantOperand::cast(instr->value())))); - if (representation.IsHeapObject()) { - Register value = ToRegister(instr->value()); - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ SmiTst(value); - DeoptimizeIf(eq, instr->environment()); + __ AssertNotSmi(object); - // We know that value is a smi now, so we can omit the check below. - check_needed = OMIT_SMI_CHECK; - } - } else if (representation.IsDouble()) { - ASSERT(transition.is_null()); - ASSERT(access.IsInobject()); - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + DCHECK(!representation.IsSmi() || + !instr->value()->IsConstantOperand() || + IsSmi(LConstantOperand::cast(instr->value()))); + if (representation.IsDouble()) { + DCHECK(access.IsInobject()); + DCHECK(!instr->hydrogen()->has_transition()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); DwVfpRegister value = ToDoubleRegister(instr->value()); __ vstr(value, FieldMemOperand(object, offset)); return; } - if (!transition.is_null()) { + if (instr->hydrogen()->has_transition()) { + Handle<Map> transition = instr->hydrogen()->transition_map(); + AddDeprecationDependency(transition); __ mov(scratch, Operand(transition)); __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); if (instr->hydrogen()->NeedsWriteBarrierForMap()) { Register temp = ToRegister(instr->temp()); // Update the write barrier for the map field. - __ RecordWriteField(object, - HeapObject::kMapOffset, - scratch, - temp, - GetLinkRegisterState(), - kSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWriteForMap(object, + scratch, + temp, + GetLinkRegisterState(), + kSaveFPRegs); } } @@ -4085,7 +4176,8 @@ GetLinkRegisterState(), kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + instr->hydrogen()->SmiCheckForWriteBarrier(), + instr->hydrogen()->PointersToHereCheckForValue()); } } else { __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); @@ -4101,56 +4193,47 @@ GetLinkRegisterState(), kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + instr->hydrogen()->SmiCheckForWriteBarrier(), + instr->hydrogen()->PointersToHereCheckForValue()); } } } void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->object()).is(r1)); - ASSERT(ToRegister(instr->value()).is(r0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister())); - // Name is always in r2. - __ mov(r2, Operand(instr->name())); + __ mov(StoreIC::NameRegister(), Operand(instr->name())); Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); } -void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) { - if (FLAG_debug_code && check->hydrogen()->skip_check()) { +void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { + Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; + if (instr->index()->IsConstantOperand()) { + Operand index = ToOperand(instr->index()); + Register length = ToRegister(instr->length()); + __ cmp(length, index); + cc = CommuteCondition(cc); + } else { + Register index = ToRegister(instr->index()); + Operand length = ToOperand(instr->length()); + __ cmp(index, length); + } + if (FLAG_debug_code && instr->hydrogen()->skip_check()) { Label done; - __ b(NegateCondition(condition), &done); + __ b(NegateCondition(cc), &done); __ stop("eliminated bounds check failed"); __ bind(&done); } else { - DeoptimizeIf(condition, check->environment()); + DeoptimizeIf(cc, instr->environment()); } } -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - if (instr->hydrogen()->skip_check()) return; - - if (instr->index()->IsConstantOperand()) { - int constant_index = - ToInteger32(LConstantOperand::cast(instr->index())); - if (instr->hydrogen()->length()->representation().IsSmi()) { - __ mov(ip, Operand(Smi::FromInt(constant_index))); - } else { - __ mov(ip, Operand(constant_index)); - } - __ cmp(ip, ToRegister(instr->length())); - } else { - __ cmp(ToRegister(instr->index()), ToRegister(instr->length())); - } - Condition condition = instr->hydrogen()->allow_equality() ? hi : hs; - ApplyCheckIf(condition, instr); -} - - void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { Register external_pointer = ToRegister(instr->elements()); Register key = no_reg; @@ -4168,16 +4251,12 @@ int element_size_shift = ElementsKindToShiftSize(elements_kind); int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) ? (element_size_shift - kSmiTagSize) : element_size_shift; - int additional_offset = IsFixedTypedArrayElementsKind(elements_kind) - ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag - : 0; + int base_offset = instr->base_offset(); if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || elements_kind == FLOAT32_ELEMENTS || elements_kind == EXTERNAL_FLOAT64_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - int base_offset = - (instr->additional_index() << element_size_shift) + additional_offset; Register address = scratch0(); DwVfpRegister value(ToDoubleRegister(instr->value())); if (key_is_constant) { @@ -4202,7 +4281,7 @@ MemOperand mem_operand = PrepareKeyedOperand( key, external_pointer, key_is_constant, constant_key, element_size_shift, shift_size, - instr->additional_index(), additional_offset); + base_offset); switch (elements_kind) { case EXTERNAL_UINT8_CLAMPED_ELEMENTS: case EXTERNAL_INT8_ELEMENTS: @@ -4249,6 +4328,7 @@ Register scratch = scratch0(); DwVfpRegister double_scratch = double_scratch0(); bool key_is_constant = instr->key()->IsConstantOperand(); + int base_offset = instr->base_offset(); // Calculate the effective address of the slot in the array to store the // double value. @@ -4259,13 +4339,11 @@ Abort(kArrayIndexConstantValueTooBig); } __ add(scratch, elements, - Operand((constant_key << element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + Operand((constant_key << element_size_shift) + base_offset)); } else { int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) ? (element_size_shift - kSmiTagSize) : element_size_shift; - __ add(scratch, elements, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + __ add(scratch, elements, Operand(base_offset)); __ add(scratch, scratch, Operand(ToRegister(instr->key()), LSL, shift_size)); } @@ -4278,10 +4356,9 @@ __ Assert(ne, kDefaultNaNModeNotSet); } __ VFPCanonicalizeNaN(double_scratch, value); - __ vstr(double_scratch, scratch, - instr->additional_index() << element_size_shift); + __ vstr(double_scratch, scratch, 0); } else { - __ vstr(value, scratch, instr->additional_index() << element_size_shift); + __ vstr(value, scratch, 0); } } @@ -4293,14 +4370,13 @@ : no_reg; Register scratch = scratch0(); Register store_base = scratch; - int offset = 0; + int offset = instr->base_offset(); // Do the store. if (instr->key()->IsConstantOperand()) { - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); + offset += ToInteger32(const_operand) * kPointerSize; store_base = elements; } else { // Even though the HLoadKeyed instruction forces the input @@ -4312,23 +4388,23 @@ } else { __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); } - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); } - __ str(value, FieldMemOperand(store_base, offset)); + __ str(value, MemOperand(store_base, offset)); if (instr->hydrogen()->NeedsWriteBarrier()) { SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. - __ add(key, store_base, Operand(offset - kHeapObjectTag)); + __ add(key, store_base, Operand(offset)); __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + check_needed, + instr->hydrogen()->PointersToHereCheckForValue()); } } @@ -4346,10 +4422,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->object()).is(r2)); - ASSERT(ToRegister(instr->key()).is(r1)); - ASSERT(ToRegister(instr->value()).is(r0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); + DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); Handle<Code> ic = instr->strict_mode() == STRICT ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() @@ -4377,19 +4453,21 @@ __ mov(new_map_reg, Operand(to_map)); __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); // Write barrier. - __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, - scratch, GetLinkRegisterState(), kDontSaveFPRegs); - } else { - ASSERT(ToRegister(instr->context()).is(cp)); - PushSafepointRegistersScope scope( - this, Safepoint::kWithRegistersAndDoubles); - __ Move(r0, object_reg); + __ RecordWriteForMap(object_reg, + new_map_reg, + scratch, + GetLinkRegisterState(), + kDontSaveFPRegs); + } else { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(object_reg.is(r0)); + PushSafepointRegistersScope scope(this); __ Move(r1, to_map); bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; - TransitionElementsKindStub stub(from_kind, to_kind, is_js_array); + TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); __ CallStub(&stub); - RecordSafepointWithRegistersAndDoubles( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kLazyDeopt); } __ bind(¬_applicable); } @@ -4406,12 +4484,13 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->left()).is(r1)); - ASSERT(ToRegister(instr->right()).is(r0)); - StringAddStub stub(instr->hydrogen()->flags(), + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->left()).is(r1)); + DCHECK(ToRegister(instr->right()).is(r0)); + StringAddStub stub(isolate(), + instr->hydrogen()->flags(), instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -4450,7 +4529,7 @@ // contained in the register pointer map. __ mov(result, Operand::Zero()); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); __ push(string); // Push the index as a smi. This is safe because of the checks in // DoStringCharCodeAt above. @@ -4463,7 +4542,7 @@ __ SmiTag(index); __ push(index); } - CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr, + CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, instr->context()); __ AssertSmi(r0); __ SmiUntag(r0); @@ -4487,10 +4566,10 @@ DeferredStringCharFromCode* deferred = new(zone()) DeferredStringCharFromCode(this, instr); - ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); + DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); Register char_code = ToRegister(instr->char_code()); Register result = ToRegister(instr->result()); - ASSERT(!char_code.is(result)); + DCHECK(!char_code.is(result)); __ cmp(char_code, Operand(String::kMaxOneByteCharCode)); __ b(hi, deferred->entry()); @@ -4513,7 +4592,7 @@ // contained in the register pointer map. __ mov(result, Operand::Zero()); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); __ SmiTag(char_code); __ push(char_code); CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); @@ -4523,9 +4602,9 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { LOperand* input = instr->value(); - ASSERT(input->IsRegister() || input->IsStackSlot()); + DCHECK(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); - ASSERT(output->IsDoubleRegister()); + DCHECK(output->IsDoubleRegister()); SwVfpRegister single_scratch = double_scratch0().low(); if (input->IsStackSlot()) { Register scratch = scratch0(); @@ -4646,15 +4725,15 @@ __ mov(dst, Operand::Zero()); // Preserve the value of all registers. - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); // NumberTagI and NumberTagD use the context from the frame, rather than // the environment's HContext or HInlinedContext value. - // They only call Runtime::kHiddenAllocateHeapNumber. + // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ sub(r0, r0, Operand(kHeapObjectTag)); @@ -4711,14 +4790,14 @@ Register reg = ToRegister(instr->result()); __ mov(reg, Operand::Zero()); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); // NumberTagI and NumberTagD use the context from the frame, rather than // the environment's HContext or HInlinedContext value. - // They only call Runtime::kHiddenAllocateHeapNumber. + // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ sub(r0, r0, Operand(kHeapObjectTag)); @@ -4767,7 +4846,7 @@ NumberUntagDMode mode) { Register scratch = scratch0(); SwVfpRegister flt_scratch = double_scratch0().low(); - ASSERT(!result_reg.is(double_scratch0())); + DCHECK(!result_reg.is(double_scratch0())); Label convert, load_smi, done; if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { // Smi check. @@ -4804,7 +4883,7 @@ } } else { __ SmiUntag(scratch, input_reg); - ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); + DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); } // Smi to double register conversion __ bind(&load_smi); @@ -4822,8 +4901,8 @@ LowDwVfpRegister double_scratch = double_scratch0(); DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2()); - ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); - ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); + DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); + DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); Label done; @@ -4903,8 +4982,8 @@ }; LOperand* input = instr->value(); - ASSERT(input->IsRegister()); - ASSERT(input->Equals(instr->result())); + DCHECK(input->IsRegister()); + DCHECK(input->Equals(instr->result())); Register input_reg = ToRegister(input); @@ -4926,9 +5005,9 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { LOperand* input = instr->value(); - ASSERT(input->IsRegister()); + DCHECK(input->IsRegister()); LOperand* result = instr->result(); - ASSERT(result->IsDoubleRegister()); + DCHECK(result->IsDoubleRegister()); Register input_reg = ToRegister(input); DwVfpRegister result_reg = ToDoubleRegister(result); @@ -5005,7 +5084,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { LOperand* input = instr->value(); __ SmiTst(ToRegister(input)); DeoptimizeIf(eq, instr->environment()); @@ -5044,7 +5123,7 @@ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); if (IsPowerOf2(mask)) { - ASSERT(tag == 0 || IsPowerOf2(tag)); + DCHECK(tag == 0 || IsPowerOf2(tag)); __ tst(scratch, Operand(mask)); DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); } else { @@ -5075,7 +5154,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { { - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); __ push(object); __ mov(cp, Operand::Zero()); __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); @@ -5106,32 +5185,39 @@ Register object_; }; - if (instr->hydrogen()->CanOmitMapChecks()) return; + if (instr->hydrogen()->IsStabilityCheck()) { + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + for (int i = 0; i < maps->size(); ++i) { + AddStabilityDependency(maps->at(i).handle()); + } + return; + } + Register map_reg = scratch0(); LOperand* input = instr->value(); - ASSERT(input->IsRegister()); + DCHECK(input->IsRegister()); Register reg = ToRegister(input); __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->has_migration_target()) { + if (instr->hydrogen()->HasMigrationTarget()) { deferred = new(zone()) DeferredCheckMaps(this, instr, reg); __ bind(deferred->check_maps()); } - UniqueSet<Map> map_set = instr->hydrogen()->map_set(); + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); Label success; - for (int i = 0; i < map_set.size() - 1; i++) { - Handle<Map> map = map_set.at(i).handle(); + for (int i = 0; i < maps->size() - 1; i++) { + Handle<Map> map = maps->at(i).handle(); __ CompareMap(map_reg, map, &success); __ b(eq, &success); } - Handle<Map> map = map_set.at(map_set.size() - 1).handle(); + Handle<Map> map = maps->at(maps->size() - 1).handle(); __ CompareMap(map_reg, map, &success); - if (instr->hydrogen()->has_migration_target()) { + if (instr->hydrogen()->HasMigrationTarget()) { __ b(ne, deferred->entry()); } else { DeoptimizeIf(ne, instr->environment()); @@ -5237,11 +5323,11 @@ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); } if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); } @@ -5254,33 +5340,25 @@ } } else { Register size = ToRegister(instr->size()); - __ Allocate(size, - result, - scratch, - scratch2, - deferred->entry(), - flags); + __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); } __ bind(deferred->exit()); if (instr->hydrogen()->MustPrefillWithFiller()) { + STATIC_ASSERT(kHeapObjectTag == 1); if (instr->size()->IsConstantOperand()) { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ mov(scratch, Operand(size)); + __ mov(scratch, Operand(size - kHeapObjectTag)); } else { - scratch = ToRegister(instr->size()); + __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); } - __ sub(scratch, scratch, Operand(kPointerSize)); - __ sub(result, result, Operand(kHeapObjectTag)); + __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); Label loop; __ bind(&loop); - __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); + __ sub(scratch, scratch, Operand(kPointerSize), SetCC); __ str(scratch2, MemOperand(result, scratch)); - __ sub(scratch, scratch, Operand(kPointerSize)); - __ cmp(scratch, Operand(0)); __ b(ge, &loop); - __ add(result, result, Operand(kHeapObjectTag)); } } @@ -5293,25 +5371,31 @@ // contained in the register pointer map. __ mov(result, Operand(Smi::FromInt(0))); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); if (instr->size()->IsRegister()) { Register size = ToRegister(instr->size()); - ASSERT(!size.is(result)); + DCHECK(!size.is(result)); __ SmiTag(size); __ push(size); } else { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ Push(Smi::FromInt(size)); + if (size >= 0 && size <= Smi::kMaxValue) { + __ Push(Smi::FromInt(size)); + } else { + // We should never get here at runtime => abort + __ stop("invalid allocation size"); + return; + } } int flags = AllocateDoubleAlignFlag::encode( instr->hydrogen()->MustAllocateDoubleAligned()); if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); } else { flags = AllocateTargetSpace::update(flags, NEW_SPACE); @@ -5319,20 +5403,20 @@ __ Push(Smi::FromInt(flags)); CallRuntimeFromDeferred( - Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context()); + Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); __ StoreToSafepointRegisterSlot(r0, result); } void LCodeGen::DoToFastProperties(LToFastProperties* instr) { - ASSERT(ToRegister(instr->value()).is(r0)); + DCHECK(ToRegister(instr->value()).is(r0)); __ push(r0); CallRuntime(Runtime::kToFastProperties, 1, instr); } void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Label materialized; // Registers will be used as follows: // r6 = literals array. @@ -5353,7 +5437,7 @@ __ mov(r4, Operand(instr->hydrogen()->pattern())); __ mov(r3, Operand(instr->hydrogen()->flags())); __ Push(r6, r5, r4, r3); - CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr); + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); __ mov(r1, r0); __ bind(&materialized); @@ -5366,7 +5450,7 @@ __ bind(&runtime_allocate); __ mov(r0, Operand(Smi::FromInt(size))); __ Push(r1, r0); - CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr); + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); __ pop(r1); __ bind(&allocated); @@ -5376,21 +5460,22 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. bool pretenure = instr->hydrogen()->pretenure(); if (!pretenure && instr->hydrogen()->has_no_literals()) { - FastNewClosureStub stub(instr->hydrogen()->strict_mode(), + FastNewClosureStub stub(isolate(), + instr->hydrogen()->strict_mode(), instr->hydrogen()->is_generator()); __ mov(r2, Operand(instr->hydrogen()->shared_info())); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } else { __ mov(r2, Operand(instr->hydrogen()->shared_info())); __ mov(r1, Operand(pretenure ? factory()->true_value() : factory()->false_value())); __ Push(cp, r2, r1); - CallRuntime(Runtime::kHiddenNewClosure, 3, instr); + CallRuntime(Runtime::kNewClosure, 3, instr); } } @@ -5421,13 +5506,14 @@ Handle<String> type_name) { Condition final_branch_condition = kNoCondition; Register scratch = scratch0(); - if (type_name->Equals(heap()->number_string())) { + Factory* factory = isolate()->factory(); + if (String::Equals(type_name, factory->number_string())) { __ JumpIfSmi(input, true_label); __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); final_branch_condition = eq; - } else if (type_name->Equals(heap()->string_string())) { + } else if (String::Equals(type_name, factory->string_string())) { __ JumpIfSmi(input, false_label); __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); __ b(ge, false_label); @@ -5435,22 +5521,18 @@ __ tst(scratch, Operand(1 << Map::kIsUndetectable)); final_branch_condition = eq; - } else if (type_name->Equals(heap()->symbol_string())) { + } else if (String::Equals(type_name, factory->symbol_string())) { __ JumpIfSmi(input, false_label); __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); final_branch_condition = eq; - } else if (type_name->Equals(heap()->boolean_string())) { + } else if (String::Equals(type_name, factory->boolean_string())) { __ CompareRoot(input, Heap::kTrueValueRootIndex); __ b(eq, true_label); __ CompareRoot(input, Heap::kFalseValueRootIndex); final_branch_condition = eq; - } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { - __ CompareRoot(input, Heap::kNullValueRootIndex); - final_branch_condition = eq; - - } else if (type_name->Equals(heap()->undefined_string())) { + } else if (String::Equals(type_name, factory->undefined_string())) { __ CompareRoot(input, Heap::kUndefinedValueRootIndex); __ b(eq, true_label); __ JumpIfSmi(input, false_label); @@ -5460,7 +5542,7 @@ __ tst(scratch, Operand(1 << Map::kIsUndetectable)); final_branch_condition = ne; - } else if (type_name->Equals(heap()->function_string())) { + } else if (String::Equals(type_name, factory->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); Register type_reg = scratch; __ JumpIfSmi(input, false_label); @@ -5469,13 +5551,11 @@ __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE)); final_branch_condition = eq; - } else if (type_name->Equals(heap()->object_string())) { + } else if (String::Equals(type_name, factory->object_string())) { Register map = scratch; __ JumpIfSmi(input, false_label); - if (!FLAG_harmony_typeof) { - __ CompareRoot(input, Heap::kNullValueRootIndex); - __ b(eq, true_label); - } + __ CompareRoot(input, Heap::kNullValueRootIndex); + __ b(eq, true_label); __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, @@ -5503,7 +5583,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { - ASSERT(!temp1.is(temp2)); + DCHECK(!temp1.is(temp2)); // Get the frame pointer for the calling frame. __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); @@ -5527,7 +5607,7 @@ // Block literal pool emission for duration of padding. Assembler::BlockConstPoolScope block_const_pool(masm()); int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - ASSERT_EQ(0, padding_size % Assembler::kInstrSize); + DCHECK_EQ(0, padding_size % Assembler::kInstrSize); while (padding_size > 0) { __ nop(); padding_size -= Assembler::kInstrSize; @@ -5540,7 +5620,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) { last_lazy_deopt_pc_ = masm()->pc_offset(); - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); @@ -5573,12 +5653,12 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); LoadContextFromDeferred(instr->context()); - __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard); + __ CallRuntimeSaveDoubles(Runtime::kStackGuard); RecordSafepointWithLazyDeopt( instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); } @@ -5597,7 +5677,7 @@ LStackCheck* instr_; }; - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); // There is no LLazyBailout instruction for stack-checks. We have to // prepare for lazy deoptimization explicitly here. @@ -5607,15 +5687,15 @@ __ LoadRoot(ip, Heap::kStackLimitRootIndex); __ cmp(sp, Operand(ip)); __ b(hs, &done); - PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); - ASSERT(instr->context()->IsRegister()); - ASSERT(ToRegister(instr->context()).is(cp)); - CallCode(isolate()->builtins()->StackCheck(), - RelocInfo::CODE_TARGET, - instr); + Handle<Code> stack_check = isolate()->builtins()->StackCheck(); + PredictableCodeSizeScope predictable(masm(), + CallCodeSize(stack_check, RelocInfo::CODE_TARGET)); + DCHECK(instr->context()->IsRegister()); + DCHECK(ToRegister(instr->context()).is(cp)); + CallCode(stack_check, RelocInfo::CODE_TARGET, instr); __ bind(&done); } else { - ASSERT(instr->hydrogen()->is_backwards_branch()); + DCHECK(instr->hydrogen()->is_backwards_branch()); // Perform stack overflow check if this goto needs it before jumping. DeferredStackCheck* deferred_stack_check = new(zone()) DeferredStackCheck(this, instr); @@ -5641,7 +5721,7 @@ // If the environment were already registered, we would have no way of // backpatching it with the spill slot operands. - ASSERT(!environment->HasBeenRegistered()); + DCHECK(!environment->HasBeenRegistered()); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); GenerateOsrPrologue(); @@ -5716,13 +5796,61 @@ } +void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register result, + Register object, + Register index) { + PushSafepointRegistersScope scope(this); + __ Push(object); + __ Push(index); + __ mov(cp, Operand::Zero()); + __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(r0, result); +} + + void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { + public: + DeferredLoadMutableDouble(LCodeGen* codegen, + LLoadFieldByIndex* instr, + Register result, + Register object, + Register index) + : LDeferredCode(codegen), + instr_(instr), + result_(result), + object_(object), + index_(index) { + } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LLoadFieldByIndex* instr_; + Register result_; + Register object_; + Register index_; + }; + Register object = ToRegister(instr->object()); Register index = ToRegister(instr->index()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); + DeferredLoadMutableDouble* deferred; + deferred = new(zone()) DeferredLoadMutableDouble( + this, instr, result, object, index); + Label out_of_object, done; + + __ tst(index, Operand(Smi::FromInt(1))); + __ b(ne, deferred->entry()); + __ mov(index, Operand(index, ASR, 1)); + __ cmp(index, Operand::Zero()); __ b(lt, &out_of_object); @@ -5738,10 +5866,26 @@ __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); + __ bind(deferred->exit()); __ bind(&done); } +void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { + Register context = ToRegister(instr->context()); + __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); +} + + +void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { + Handle<ScopeInfo> scope_info = instr->scope_info(); + __ Push(scope_info); + __ push(ToRegister(instr->function())); + CallRuntime(Runtime::kPushBlockContext, 2, instr); + RecordSafepoint(Safepoint::kNoLazyDeopt); +} + + #undef __ } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/arm/lithium-codegen-arm.h nodejs-0.11.15/deps/v8/src/arm/lithium-codegen-arm.h --- nodejs-0.11.13/deps/v8/src/arm/lithium-codegen-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/lithium-codegen-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_ #define V8_ARM_LITHIUM_CODEGEN_ARM_H_ -#include "arm/lithium-arm.h" +#include "src/arm/lithium-arm.h" -#include "arm/lithium-gap-resolver-arm.h" -#include "deoptimizer.h" -#include "lithium-codegen.h" -#include "safepoint-table.h" -#include "scopes.h" -#include "v8utils.h" +#include "src/arm/lithium-gap-resolver-arm.h" +#include "src/deoptimizer.h" +#include "src/lithium-codegen.h" +#include "src/safepoint-table.h" +#include "src/scopes.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -139,8 +116,12 @@ void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredAllocate(LAllocate* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, - Label* map_check); + Label* map_check, Label* bool_load); void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); + void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register result, + Register object, + Register index); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -152,8 +133,7 @@ int constant_key, int element_size, int shift_size, - int additional_index, - int additional_offset); + int base_offset); // Emit frame translation commands for an environment. void WriteTranslation(LEnvironment* environment, Translation* translation); @@ -182,8 +162,6 @@ int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - void Abort(BailoutReason reason); - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } void SaveCallerDoubles(); @@ -205,6 +183,8 @@ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS }; + int CallCodeSize(Handle<Code> code, RelocInfo::Mode mode); + void CallCode( Handle<Code> code, RelocInfo::Mode mode, @@ -258,7 +238,6 @@ LEnvironment* environment, Deoptimizer::BailoutType bailout_type); void DeoptimizeIf(Condition condition, LEnvironment* environment); - void ApplyCheckIf(Condition condition, LBoundsCheck* check); void AddToTranslation(LEnvironment* environment, Translation* translation, @@ -291,9 +270,6 @@ void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, Safepoint::DeoptMode mode); - void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode); void RecordAndWritePosition(int position) V8_OVERRIDE; @@ -377,38 +353,17 @@ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { public: - PushSafepointRegistersScope(LCodeGen* codegen, - Safepoint::Kind kind) + explicit PushSafepointRegistersScope(LCodeGen* codegen) : codegen_(codegen) { - ASSERT(codegen_->info()->is_calling()); - ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->expected_safepoint_kind_ = kind; - - switch (codegen_->expected_safepoint_kind_) { - case Safepoint::kWithRegisters: - codegen_->masm_->PushSafepointRegisters(); - break; - case Safepoint::kWithRegistersAndDoubles: - codegen_->masm_->PushSafepointRegistersAndDoubles(); - break; - default: - UNREACHABLE(); - } + DCHECK(codegen_->info()->is_calling()); + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); + codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; + codegen_->masm_->PushSafepointRegisters(); } ~PushSafepointRegistersScope() { - Safepoint::Kind kind = codegen_->expected_safepoint_kind_; - ASSERT((kind & Safepoint::kWithRegisters) != 0); - switch (kind) { - case Safepoint::kWithRegisters: - codegen_->masm_->PopSafepointRegisters(); - break; - case Safepoint::kWithRegistersAndDoubles: - codegen_->masm_->PopSafepointRegistersAndDoubles(); - break; - default: - UNREACHABLE(); - } + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); + codegen_->masm_->PopSafepointRegisters(); codegen_->expected_safepoint_kind_ = Safepoint::kSimple; } diff -Nru nodejs-0.11.13/deps/v8/src/arm/lithium-gap-resolver-arm.cc nodejs-0.11.15/deps/v8/src/arm/lithium-gap-resolver-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/lithium-gap-resolver-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/lithium-gap-resolver-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,47 +1,35 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "arm/lithium-gap-resolver-arm.h" -#include "arm/lithium-codegen-arm.h" +#include "src/arm/lithium-codegen-arm.h" +#include "src/arm/lithium-gap-resolver-arm.h" namespace v8 { namespace internal { -static const Register kSavedValueRegister = { 9 }; +// We use the root register to spill a value while breaking a cycle in parallel +// moves. We don't need access to roots while resolving the move list and using +// the root register has two advantages: +// - It is not in crankshaft allocatable registers list, so it can't interfere +// with any of the moves we are resolving. +// - We don't need to push it on the stack, as we can reload it with its value +// once we have resolved a cycle. +#define kSavedValueRegister kRootRegister + LGapResolver::LGapResolver(LCodeGen* owner) : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false), - saved_destination_(NULL) { } + saved_destination_(NULL), need_to_restore_root_(false) { } + + +#define __ ACCESS_MASM(cgen_->masm()) void LGapResolver::Resolve(LParallelMove* parallel_move) { - ASSERT(moves_.is_empty()); + DCHECK(moves_.is_empty()); // Build up a worklist of moves. BuildInitialMoveList(parallel_move); @@ -62,11 +50,17 @@ // Perform the moves with constant sources. for (int i = 0; i < moves_.length(); ++i) { if (!moves_[i].IsEliminated()) { - ASSERT(moves_[i].source()->IsConstantOperand()); + DCHECK(moves_[i].source()->IsConstantOperand()); EmitMove(i); } } + if (need_to_restore_root_) { + DCHECK(kSavedValueRegister.is(kRootRegister)); + __ InitializeRootRegister(); + need_to_restore_root_ = false; + } + moves_.Rewind(0); } @@ -100,13 +94,13 @@ // An additional complication is that moves to MemOperands with large // offsets (more than 1K or 4K) require us to spill this spilled value to // the stack, to free up the register. - ASSERT(!moves_[index].IsPending()); - ASSERT(!moves_[index].IsRedundant()); + DCHECK(!moves_[index].IsPending()); + DCHECK(!moves_[index].IsRedundant()); // Clear this move's destination to indicate a pending move. The actual // destination is saved in a stack allocated local. Multiple moves can // be pending because this function is recursive. - ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. + DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. LOperand* destination = moves_[index].destination(); moves_[index].set_destination(NULL); @@ -133,7 +127,7 @@ // a scratch register to break it. LMoveOperands other_move = moves_[root_index_]; if (other_move.Blocks(destination)) { - ASSERT(other_move.IsPending()); + DCHECK(other_move.IsPending()); BreakCycle(index); return; } @@ -144,31 +138,32 @@ void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_ASSERTS +#ifdef ENABLE_SLOW_DCHECKS // No operand should be the destination for more than one move. for (int i = 0; i < moves_.length(); ++i) { LOperand* destination = moves_[i].destination(); for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_ASSERT(!destination->Equals(moves_[j].destination())); + SLOW_DCHECK(!destination->Equals(moves_[j].destination())); } } #endif } -#define __ ACCESS_MASM(cgen_->masm()) void LGapResolver::BreakCycle(int index) { - // We save in a register the value that should end up in the source of - // moves_[root_index]. After performing all moves in the tree rooted - // in that move, we save the value to that source. - ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); - ASSERT(!in_cycle_); + // We save in a register the source of that move and we remember its + // destination. Then we mark this move as resolved so the cycle is + // broken and we can perform the other moves. + DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); + DCHECK(!in_cycle_); in_cycle_ = true; LOperand* source = moves_[index].source(); saved_destination_ = moves_[index].destination(); if (source->IsRegister()) { + need_to_restore_root_ = true; __ mov(kSavedValueRegister, cgen_->ToRegister(source)); } else if (source->IsStackSlot()) { + need_to_restore_root_ = true; __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); @@ -183,10 +178,9 @@ void LGapResolver::RestoreValue() { - ASSERT(in_cycle_); - ASSERT(saved_destination_ != NULL); + DCHECK(in_cycle_); + DCHECK(saved_destination_ != NULL); - // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister. if (saved_destination_->IsRegister()) { __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister); } else if (saved_destination_->IsStackSlot()) { @@ -216,7 +210,7 @@ if (destination->IsRegister()) { __ mov(cgen_->ToRegister(destination), source_register); } else { - ASSERT(destination->IsStackSlot()); + DCHECK(destination->IsStackSlot()); __ str(source_register, cgen_->ToMemOperand(destination)); } } else if (source->IsStackSlot()) { @@ -224,22 +218,17 @@ if (destination->IsRegister()) { __ ldr(cgen_->ToRegister(destination), source_operand); } else { - ASSERT(destination->IsStackSlot()); + DCHECK(destination->IsStackSlot()); MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { - if (!destination_operand.OffsetIsUint12Encodable()) { - // ip is overwritten while saving the value to the destination. - // Therefore we can't use ip. It is OK if the read from the source - // destroys ip, since that happens before the value is read. - __ vldr(kScratchDoubleReg.low(), source_operand); - __ vstr(kScratchDoubleReg.low(), destination_operand); - } else { - __ ldr(ip, source_operand); - __ str(ip, destination_operand); - } + if (!destination_operand.OffsetIsUint12Encodable()) { + // ip is overwritten while saving the value to the destination. + // Therefore we can't use ip. It is OK if the read from the source + // destroys ip, since that happens before the value is read. + __ vldr(kScratchDoubleReg.low(), source_operand); + __ vstr(kScratchDoubleReg.low(), destination_operand); } else { - __ ldr(kSavedValueRegister, source_operand); - __ str(kSavedValueRegister, destination_operand); + __ ldr(ip, source_operand); + __ str(ip, destination_operand); } } @@ -259,16 +248,16 @@ double v = cgen_->ToDouble(constant_source); __ Vmov(result, v, ip); } else { - ASSERT(destination->IsStackSlot()); - ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. + DCHECK(destination->IsStackSlot()); + DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. + need_to_restore_root_ = true; Representation r = cgen_->IsSmi(constant_source) ? Representation::Smi() : Representation::Integer32(); if (cgen_->IsInteger32(constant_source)) { __ mov(kSavedValueRegister, Operand(cgen_->ToRepresentation(constant_source, r))); } else { - __ Move(kSavedValueRegister, - cgen_->ToHandle(constant_source)); + __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source)); } __ str(kSavedValueRegister, cgen_->ToMemOperand(destination)); } @@ -278,7 +267,7 @@ if (destination->IsDoubleRegister()) { __ vmov(cgen_->ToDoubleRegister(destination), source_register); } else { - ASSERT(destination->IsDoubleStackSlot()); + DCHECK(destination->IsDoubleStackSlot()); __ vstr(source_register, cgen_->ToMemOperand(destination)); } @@ -287,19 +276,14 @@ if (destination->IsDoubleRegister()) { __ vldr(cgen_->ToDoubleRegister(destination), source_operand); } else { - ASSERT(destination->IsDoubleStackSlot()); + DCHECK(destination->IsDoubleStackSlot()); MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { - // kSavedDoubleValueRegister was used to break the cycle, - // but kSavedValueRegister is free. - MemOperand source_high_operand = - cgen_->ToHighMemOperand(source); - MemOperand destination_high_operand = - cgen_->ToHighMemOperand(destination); - __ ldr(kSavedValueRegister, source_operand); - __ str(kSavedValueRegister, destination_operand); - __ ldr(kSavedValueRegister, source_high_operand); - __ str(kSavedValueRegister, destination_high_operand); + // kScratchDoubleReg was used to break the cycle. + __ vstm(db_w, sp, kScratchDoubleReg, kScratchDoubleReg); + __ vldr(kScratchDoubleReg, source_operand); + __ vstr(kScratchDoubleReg, destination_operand); + __ vldm(ia_w, sp, kScratchDoubleReg, kScratchDoubleReg); } else { __ vldr(kScratchDoubleReg, source_operand); __ vstr(kScratchDoubleReg, destination_operand); diff -Nru nodejs-0.11.13/deps/v8/src/arm/lithium-gap-resolver-arm.h nodejs-0.11.15/deps/v8/src/arm/lithium-gap-resolver-arm.h --- nodejs-0.11.13/deps/v8/src/arm/lithium-gap-resolver-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/lithium-gap-resolver-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ #define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ -#include "v8.h" +#include "src/v8.h" -#include "lithium.h" +#include "src/lithium.h" namespace v8 { namespace internal { @@ -76,6 +53,10 @@ int root_index_; bool in_cycle_; LOperand* saved_destination_; + + // We use the root register as a scratch in a few places. When that happens, + // this flag is set to indicate that it needs to be restored. + bool need_to_restore_root_; }; } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/arm/macro-assembler-arm.cc nodejs-0.11.15/deps/v8/src/arm/macro-assembler-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/macro-assembler-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/macro-assembler-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <limits.h> // For LONG_MIN, LONG_MAX. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "bootstrapper.h" -#include "codegen.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "isolate-inl.h" -#include "runtime.h" +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/isolate-inl.h" +#include "src/runtime.h" namespace v8 { namespace internal { @@ -59,21 +36,21 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond) { - ASSERT(RelocInfo::IsCodeTarget(rmode)); + DCHECK(RelocInfo::IsCodeTarget(rmode)); mov(pc, Operand(target, rmode), LeaveCC, cond); } void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { - ASSERT(!RelocInfo::IsCodeTarget(rmode)); + DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(reinterpret_cast<intptr_t>(target), rmode, cond); } void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond) { - ASSERT(RelocInfo::IsCodeTarget(rmode)); + DCHECK(RelocInfo::IsCodeTarget(rmode)); // 'code' is always generated ARM code, never THUMB code AllowDeferredHandleDereference embedding_raw_address; Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); @@ -91,31 +68,33 @@ Label start; bind(&start); blx(target, cond); - ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); + DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); } int MacroAssembler::CallSize( Address target, RelocInfo::Mode rmode, Condition cond) { - int size = 2 * kInstrSize; Instr mov_instr = cond | MOV | LeaveCC; - intptr_t immediate = reinterpret_cast<intptr_t>(target); - if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) { - size += kInstrSize; - } - return size; + Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); + return kInstrSize + + mov_operand.instructions_required(this, mov_instr) * kInstrSize; } -int MacroAssembler::CallSizeNotPredictableCodeSize( - Address target, RelocInfo::Mode rmode, Condition cond) { - int size = 2 * kInstrSize; +int MacroAssembler::CallStubSize( + CodeStub* stub, TypeFeedbackId ast_id, Condition cond) { + return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); +} + + +int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate, + Address target, + RelocInfo::Mode rmode, + Condition cond) { Instr mov_instr = cond | MOV | LeaveCC; - intptr_t immediate = reinterpret_cast<intptr_t>(target); - if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) { - size += kInstrSize; - } - return size; + Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); + return kInstrSize + + mov_operand.instructions_required(NULL, mov_instr) * kInstrSize; } @@ -159,7 +138,7 @@ mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode)); blx(ip, cond); - ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); + DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); if (mode == NEVER_INLINE_TARGET_ADDRESS) { set_predictable_code_size(old_predictable_code_size); } @@ -182,7 +161,7 @@ TargetAddressStorageMode mode) { Label start; bind(&start); - ASSERT(RelocInfo::IsCodeTarget(rmode)); + DCHECK(RelocInfo::IsCodeTarget(rmode)); if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { SetRecordedAstId(ast_id); rmode = RelocInfo::CODE_TARGET_WITH_ID; @@ -243,7 +222,7 @@ if (value->IsSmi()) { mov(dst, Operand(value)); } else { - ASSERT(value->IsHeapObject()); + DCHECK(value->IsHeapObject()); if (isolate()->heap()->InNewSpace(*value)) { Handle<Cell> cell = isolate()->factory()->NewCell(value); mov(dst, Operand(cell)); @@ -269,13 +248,26 @@ } +void MacroAssembler::Mls(Register dst, Register src1, Register src2, + Register srcA, Condition cond) { + if (CpuFeatures::IsSupported(MLS)) { + CpuFeatureScope scope(this, MLS); + mls(dst, src1, src2, srcA, cond); + } else { + DCHECK(!srcA.is(ip)); + mul(ip, src1, src2, LeaveCC, cond); + sub(dst, srcA, ip, LeaveCC, cond); + } +} + + void MacroAssembler::And(Register dst, Register src1, const Operand& src2, Condition cond) { if (!src2.is_reg() && !src2.must_output_reloc_info(this) && src2.immediate() == 0) { mov(dst, Operand::Zero(), LeaveCC, cond); - } else if (!src2.is_single_instruction(this) && + } else if (!(src2.instructions_required(this) == 1) && !src2.must_output_reloc_info(this) && CpuFeatures::IsSupported(ARMv7) && IsPowerOf2(src2.immediate() + 1)) { @@ -289,7 +281,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, Condition cond) { - ASSERT(lsb < 32); + DCHECK(lsb < 32); if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); and_(dst, src1, Operand(mask), LeaveCC, cond); @@ -304,7 +296,7 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, Condition cond) { - ASSERT(lsb < 32); + DCHECK(lsb < 32); if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); and_(dst, src1, Operand(mask), LeaveCC, cond); @@ -328,10 +320,10 @@ int lsb, int width, Condition cond) { - ASSERT(0 <= lsb && lsb < 32); - ASSERT(0 <= width && width < 32); - ASSERT(lsb + width < 32); - ASSERT(!scratch.is(dst)); + DCHECK(0 <= lsb && lsb < 32); + DCHECK(0 <= width && width < 32); + DCHECK(lsb + width < 32); + DCHECK(!scratch.is(dst)); if (width == 0) return; if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); @@ -347,7 +339,7 @@ void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, Condition cond) { - ASSERT(lsb < 32); + DCHECK(lsb < 32); if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); bic(dst, src, Operand(mask)); @@ -361,13 +353,13 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, Condition cond) { if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { - ASSERT(!dst.is(pc) && !src.rm().is(pc)); - ASSERT((satpos >= 0) && (satpos <= 31)); + DCHECK(!dst.is(pc) && !src.rm().is(pc)); + DCHECK((satpos >= 0) && (satpos <= 31)); // These asserts are required to ensure compatibility with the ARMv7 // implementation. - ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL)); - ASSERT(src.rs().is(no_reg)); + DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL)); + DCHECK(src.rs().is(no_reg)); Label done; int satval = (1 << satpos) - 1; @@ -392,7 +384,7 @@ void MacroAssembler::Load(Register dst, const MemOperand& src, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8()) { ldrsb(dst, src); } else if (r.IsUInteger8()) { @@ -410,12 +402,17 @@ void MacroAssembler::Store(Register src, const MemOperand& dst, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8() || r.IsUInteger8()) { strb(src, dst); } else if (r.IsInteger16() || r.IsUInteger16()) { strh(src, dst); } else { + if (r.IsHeapObject()) { + AssertNotSmi(src); + } else if (r.IsSmi()) { + AssertSmi(src); + } str(src, dst); } } @@ -448,7 +445,7 @@ Register scratch, Condition cond, Label* branch) { - ASSERT(cond == eq || cond == ne); + DCHECK(cond == eq || cond == ne); and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); cmp(scratch, Operand(ExternalReference::new_space_start(isolate()))); b(cond, branch); @@ -463,7 +460,8 @@ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action, - SmiCheck smi_check) { + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { // First, check if a write barrier is even needed. The tests below // catch stores of Smis. Label done; @@ -475,7 +473,7 @@ // Although the object register is tagged, the offset is relative to the start // of the object, so so offset must be a multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize)); + DCHECK(IsAligned(offset, kPointerSize)); add(dst, object, Operand(offset - kHeapObjectTag)); if (emit_debug_code()) { @@ -492,7 +490,8 @@ lr_status, save_fp, remembered_set_action, - OMIT_SMI_CHECK); + OMIT_SMI_CHECK, + pointers_to_here_check_for_value); bind(&done); @@ -505,26 +504,99 @@ } +// Will clobber 4 registers: object, map, dst, ip. The +// register 'object' contains a heap object pointer. +void MacroAssembler::RecordWriteForMap(Register object, + Register map, + Register dst, + LinkRegisterStatus lr_status, + SaveFPRegsMode fp_mode) { + if (emit_debug_code()) { + ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset)); + cmp(dst, Operand(isolate()->factory()->meta_map())); + Check(eq, kWrongAddressOrValuePassedToRecordWrite); + } + + if (!FLAG_incremental_marking) { + return; + } + + if (emit_debug_code()) { + ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset)); + cmp(ip, map); + Check(eq, kWrongAddressOrValuePassedToRecordWrite); + } + + Label done; + + // A single check of the map's pages interesting flag suffices, since it is + // only set during incremental collection, and then it's also guaranteed that + // the from object's page's interesting flag is also set. This optimization + // relies on the fact that maps can never be in new space. + CheckPageFlag(map, + map, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + + add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); + if (emit_debug_code()) { + Label ok; + tst(dst, Operand((1 << kPointerSizeLog2) - 1)); + b(eq, &ok); + stop("Unaligned cell in write barrier"); + bind(&ok); + } + + // Record the actual write. + if (lr_status == kLRHasNotBeenSaved) { + push(lr); + } + RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, + fp_mode); + CallStub(&stub); + if (lr_status == kLRHasNotBeenSaved) { + pop(lr); + } + + bind(&done); + + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst); + + // Clobber clobbered registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + mov(dst, Operand(BitCast<int32_t>(kZapValue + 12))); + mov(map, Operand(BitCast<int32_t>(kZapValue + 16))); + } +} + + // Will clobber 4 registers: object, address, scratch, ip. The // register 'object' contains a heap object pointer. The heap object // tag is shifted away. -void MacroAssembler::RecordWrite(Register object, - Register address, - Register value, - LinkRegisterStatus lr_status, - SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - ASSERT(!object.is(value)); +void MacroAssembler::RecordWrite( + Register object, + Register address, + Register value, + LinkRegisterStatus lr_status, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + DCHECK(!object.is(value)); if (emit_debug_code()) { ldr(ip, MemOperand(address)); cmp(ip, value); Check(eq, kWrongAddressOrValuePassedToRecordWrite); } - // Count number of write barriers in generated code. - isolate()->counters()->write_barriers_static()->Increment(); - // TODO(mstarzinger): Dynamic counter missing. + if (remembered_set_action == OMIT_REMEMBERED_SET && + !FLAG_incremental_marking) { + return; + } // First, check if a write barrier is even needed. The tests below // catch stores of smis and stores into the young generation. @@ -534,11 +606,13 @@ JumpIfSmi(value, &done); } - CheckPageFlag(value, - value, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, - eq, - &done); + if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + } CheckPageFlag(object, value, // Used as scratch. MemoryChunk::kPointersFromHereAreInterestingMask, @@ -549,7 +623,8 @@ if (lr_status == kLRHasNotBeenSaved) { push(lr); } - RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, + fp_mode); CallStub(&stub); if (lr_status == kLRHasNotBeenSaved) { pop(lr); @@ -557,6 +632,11 @@ bind(&done); + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, + value); + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { @@ -593,12 +673,12 @@ if (and_then == kFallThroughAtEnd) { b(eq, &done); } else { - ASSERT(and_then == kReturnAtEnd); + DCHECK(and_then == kReturnAtEnd); Ret(eq); } push(lr); StoreBufferOverflowStub store_buffer_overflow = - StoreBufferOverflowStub(fp_mode); + StoreBufferOverflowStub(isolate(), fp_mode); CallStub(&store_buffer_overflow); pop(lr); bind(&done); @@ -609,7 +689,7 @@ void MacroAssembler::PushFixedFrame(Register marker_reg) { - ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code()); + DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code()); stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() | (FLAG_enable_ool_constant_pool ? pp.bit() : 0) | @@ -619,7 +699,7 @@ void MacroAssembler::PopFixedFrame(Register marker_reg) { - ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code()); + DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code()); ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() | (FLAG_enable_ool_constant_pool ? pp.bit() : 0) | @@ -631,11 +711,11 @@ // Push and pop all registers that can hold pointers. void MacroAssembler::PushSafepointRegisters() { // Safepoints expect a block of contiguous register values starting with r0: - ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters); + DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters); // Safepoints expect a block of kNumSafepointRegisters values on the // stack, so adjust the stack for unsaved registers. const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; - ASSERT(num_unsaved >= 0); + DCHECK(num_unsaved >= 0); sub(sp, sp, Operand(num_unsaved * kPointerSize)); stm(db_w, sp, kSafepointSavedRegisters); } @@ -648,39 +728,6 @@ } -void MacroAssembler::PushSafepointRegistersAndDoubles() { - // Number of d-regs not known at snapshot time. - ASSERT(!Serializer::enabled()); - PushSafepointRegisters(); - // Only save allocatable registers. - ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14)); - ASSERT(DwVfpRegister::NumReservedRegisters() == 2); - if (CpuFeatures::IsSupported(VFP32DREGS)) { - vstm(db_w, sp, d16, d31); - } - vstm(db_w, sp, d0, d13); -} - - -void MacroAssembler::PopSafepointRegistersAndDoubles() { - // Number of d-regs not known at snapshot time. - ASSERT(!Serializer::enabled()); - // Only save allocatable registers. - ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14)); - ASSERT(DwVfpRegister::NumReservedRegisters() == 2); - vldm(ia_w, sp, d0, d13); - if (CpuFeatures::IsSupported(VFP32DREGS)) { - vldm(ia_w, sp, d16, d31); - } - PopSafepointRegisters(); -} - -void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, - Register dst) { - str(src, SafepointRegistersAndDoublesSlot(dst)); -} - - void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { str(src, SafepointRegisterSlot(dst)); } @@ -694,7 +741,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { // The registers are pushed starting with the highest encoding, // which means that lowest encodings are closest to the stack pointer. - ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); + DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters); return reg_code; } @@ -706,7 +753,7 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { // Number of d-regs not known at snapshot time. - ASSERT(!Serializer::enabled()); + DCHECK(!serializer_enabled()); // General purpose registers are pushed last on the stack. int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; @@ -716,12 +763,12 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2, const MemOperand& src, Condition cond) { - ASSERT(src.rm().is(no_reg)); - ASSERT(!dst1.is(lr)); // r14. + DCHECK(src.rm().is(no_reg)); + DCHECK(!dst1.is(lr)); // r14. // V8 does not use this addressing mode, so the fallback code // below doesn't support it yet. - ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); + DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex)); // Generate two ldr instructions if ldrd is not available. if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && @@ -740,7 +787,7 @@ ldr(dst2, src2, cond); } } else { // PostIndex or NegPostIndex. - ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex)); + DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex)); if (dst1.is(src.rn())) { ldr(dst2, MemOperand(src.rn(), 4, Offset), cond); ldr(dst1, src, cond); @@ -757,12 +804,12 @@ void MacroAssembler::Strd(Register src1, Register src2, const MemOperand& dst, Condition cond) { - ASSERT(dst.rm().is(no_reg)); - ASSERT(!src1.is(lr)); // r14. + DCHECK(dst.rm().is(no_reg)); + DCHECK(!src1.is(lr)); // r14. // V8 does not use this addressing mode, so the fallback code // below doesn't support it yet. - ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); + DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); // Generate two str instructions if strd is not available. if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && @@ -776,7 +823,7 @@ str(src1, dst, cond); str(src2, dst2, cond); } else { // PostIndex or NegPostIndex. - ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); + DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); dst2.set_offset(dst2.offset() - 4); str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); str(src2, dst2, cond); @@ -789,6 +836,14 @@ // If needed, restore wanted bits of FPSCR. Label fpscr_done; vmrs(scratch); + if (emit_debug_code()) { + Label rounding_mode_correct; + tst(scratch, Operand(kVFPRoundingModeMask)); + b(eq, &rounding_mode_correct); + // Don't call Assert here, since Runtime_Abort could re-enter here. + stop("Default rounding mode not set"); + bind(&rounding_mode_correct); + } tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); b(ne, &fpscr_done); orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); @@ -898,24 +953,30 @@ if (FLAG_enable_ool_constant_pool) { int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize - pc_offset() - Instruction::kPCReadOffset; - ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset)); + DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset)); ldr(pp, MemOperand(pc, constant_pool_offset)); } } -void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { - if (frame_mode == BUILD_STUB_FRAME) { - PushFixedFrame(); - Push(Smi::FromInt(StackFrame::STUB)); - // Adjust FP to point to saved FP. - add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - } else { - PredictableCodeSizeScope predictible_code_size_scope( - this, kNoCodeAgeSequenceLength * Assembler::kInstrSize); +void MacroAssembler::StubPrologue() { + PushFixedFrame(); + Push(Smi::FromInt(StackFrame::STUB)); + // Adjust FP to point to saved FP. + add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); + if (FLAG_enable_ool_constant_pool) { + LoadConstantPoolPointerRegister(); + set_constant_pool_available(true); + } +} + + +void MacroAssembler::Prologue(bool code_pre_aging) { + { PredictableCodeSizeScope predictible_code_size_scope( + this, kNoCodeAgeSequenceLength); // The following three instructions must remain together and unmodified // for code aging to work properly. - if (isolate()->IsCodePreAgingActive()) { + if (code_pre_aging) { // Pre-age the code. Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); add(r0, pc, Operand(-8)); @@ -976,9 +1037,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { // Set up the frame structure on the stack. - ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); - ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); - ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); + DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); + DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); + DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); Push(lr, fp); mov(fp, Operand(sp)); // Set up new frame pointer. // Reserve room for saved entry sp and code object. @@ -989,7 +1050,6 @@ } if (FLAG_enable_ool_constant_pool) { str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); - LoadConstantPoolPointerRegister(); } mov(ip, Operand(CodeObject())); str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); @@ -1015,7 +1075,7 @@ const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); if (frame_alignment > 0) { - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); and_(sp, sp, Operand(-frame_alignment)); } @@ -1046,7 +1106,7 @@ // environment. // Note: This will break if we ever start generating snapshots on one ARM // platform for another ARM platform with a different alignment. - return OS::ActivationFrameAlignment(); + return base::OS::ActivationFrameAlignment(); #else // V8_HOST_ARCH_ARM // If we are using the simulator then we should always align to the expected // alignment. As the simulator is used to generate snapshots we do not know @@ -1134,12 +1194,12 @@ // The code below is made a lot easier because the calling code already sets // up actual and expected registers according to the contract if values are // passed in registers. - ASSERT(actual.is_immediate() || actual.reg().is(r0)); - ASSERT(expected.is_immediate() || expected.reg().is(r2)); - ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3)); + DCHECK(actual.is_immediate() || actual.reg().is(r0)); + DCHECK(expected.is_immediate() || expected.reg().is(r2)); + DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3)); if (expected.is_immediate()) { - ASSERT(actual.is_immediate()); + DCHECK(actual.is_immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; } else { @@ -1196,7 +1256,7 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); Label done; bool definitely_mismatches = false; @@ -1209,7 +1269,7 @@ Call(code); call_wrapper.AfterCall(); } else { - ASSERT(flag == JUMP_FUNCTION); + DCHECK(flag == JUMP_FUNCTION); Jump(code); } @@ -1225,10 +1285,10 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Contract with called JS functions requires that function is passed in r1. - ASSERT(fun.is(r1)); + DCHECK(fun.is(r1)); Register expected_reg = r2; Register code_reg = r3; @@ -1253,10 +1313,10 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Contract with called JS functions requires that function is passed in r1. - ASSERT(function.is(r1)); + DCHECK(function.is(r1)); // Get the function and setup the context. ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); @@ -1302,7 +1362,7 @@ void MacroAssembler::IsObjectJSStringType(Register object, Register scratch, Label* fail) { - ASSERT(kNotStringTag != 0); + DCHECK(kNotStringTag != 0); ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); @@ -1321,15 +1381,13 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { mov(r0, Operand::Zero()); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); - CEntryStub ces(1); - ASSERT(AllowThisStubCall(&ces)); - Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); + CEntryStub ces(isolate(), 1); + DCHECK(AllowThisStubCall(&ces)); + Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } -#endif void MacroAssembler::PushTryHandler(StackHandler::Kind kind, @@ -1475,9 +1533,9 @@ Label* miss) { Label same_contexts; - ASSERT(!holder_reg.is(scratch)); - ASSERT(!holder_reg.is(ip)); - ASSERT(!scratch.is(ip)); + DCHECK(!holder_reg.is(scratch)); + DCHECK(!holder_reg.is(ip)); + DCHECK(!scratch.is(ip)); // Load current lexical context from the stack frame. ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -1547,7 +1605,7 @@ // Compute the hash code from the untagged key. This must be kept in sync with -// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in +// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in // code-stub-hydrogen.cc void MacroAssembler::GetNumberHash(Register t0, Register scratch) { // First of all we assign the hash seed to scratch. @@ -1625,7 +1683,7 @@ and_(t2, t2, Operand(t1)); // Scale the index by multiplying by the element size. - ASSERT(SeededNumberDictionary::kEntrySize == 3); + DCHECK(SeededNumberDictionary::kEntrySize == 3); add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 // Check if the key is identical to the name. @@ -1661,7 +1719,7 @@ Register scratch2, Label* gc_required, AllocationFlags flags) { - ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -1673,17 +1731,17 @@ return; } - ASSERT(!result.is(scratch1)); - ASSERT(!result.is(scratch2)); - ASSERT(!scratch1.is(scratch2)); - ASSERT(!scratch1.is(ip)); - ASSERT(!scratch2.is(ip)); + DCHECK(!result.is(scratch1)); + DCHECK(!result.is(scratch2)); + DCHECK(!scratch1.is(scratch2)); + DCHECK(!scratch1.is(ip)); + DCHECK(!scratch2.is(ip)); // Make object size into bytes. if ((flags & SIZE_IN_WORDS) != 0) { object_size *= kPointerSize; } - ASSERT_EQ(0, object_size & kObjectAlignmentMask); + DCHECK_EQ(0, object_size & kObjectAlignmentMask); // Check relative positions of allocation top and limit addresses. // The values must be adjacent in memory to allow the use of LDM. @@ -1698,8 +1756,8 @@ reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); - ASSERT((limit - top) == kPointerSize); - ASSERT(result.code() < ip.code()); + DCHECK((limit - top) == kPointerSize); + DCHECK(result.code() < ip.code()); // Set up allocation top address register. Register topaddr = scratch1; @@ -1726,7 +1784,7 @@ if ((flags & DOUBLE_ALIGNMENT) != 0) { // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. - ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); Label aligned; @@ -1743,7 +1801,7 @@ // Calculate new top and bail out if new space is exhausted. Use result // to calculate the new top. We must preserve the ip register at this // point, so we cannot just use add(). - ASSERT(object_size > 0); + DCHECK(object_size > 0); Register source = result; Condition cond = al; int shift = 0; @@ -1755,7 +1813,7 @@ object_size -= bits; shift += 8; Operand bits_operand(bits); - ASSERT(bits_operand.is_single_instruction(this)); + DCHECK(bits_operand.instructions_required(this) == 1); add(scratch2, source, bits_operand, SetCC, cond); source = scratch2; cond = cc; @@ -1792,13 +1850,13 @@ // Assert that the register arguments are different and that none of // them are ip. ip is used explicitly in the code generated below. - ASSERT(!result.is(scratch1)); - ASSERT(!result.is(scratch2)); - ASSERT(!scratch1.is(scratch2)); - ASSERT(!object_size.is(ip)); - ASSERT(!result.is(ip)); - ASSERT(!scratch1.is(ip)); - ASSERT(!scratch2.is(ip)); + DCHECK(!result.is(scratch1)); + DCHECK(!result.is(scratch2)); + DCHECK(!scratch1.is(scratch2)); + DCHECK(!object_size.is(ip)); + DCHECK(!result.is(ip)); + DCHECK(!scratch1.is(ip)); + DCHECK(!scratch2.is(ip)); // Check relative positions of allocation top and limit addresses. // The values must be adjacent in memory to allow the use of LDM. @@ -1812,8 +1870,8 @@ reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); - ASSERT((limit - top) == kPointerSize); - ASSERT(result.code() < ip.code()); + DCHECK((limit - top) == kPointerSize); + DCHECK(result.code() < ip.code()); // Set up allocation top address. Register topaddr = scratch1; @@ -1840,8 +1898,8 @@ if ((flags & DOUBLE_ALIGNMENT) != 0) { // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. - ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); - ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); Label aligned; b(eq, &aligned); @@ -1908,7 +1966,7 @@ Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); + DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars. add(scratch1, scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); @@ -1939,8 +1997,8 @@ Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); - ASSERT(kCharSize == 1); + DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); + DCHECK(kCharSize == 1); add(scratch1, length, Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); @@ -1983,34 +2041,12 @@ Register scratch1, Register scratch2, Label* gc_required) { - Label allocate_new_space, install_map; - AllocationFlags flags = TAG_OBJECT; - - ExternalReference high_promotion_mode = ExternalReference:: - new_space_high_promotion_mode_active_address(isolate()); - mov(scratch1, Operand(high_promotion_mode)); - ldr(scratch1, MemOperand(scratch1, 0)); - cmp(scratch1, Operand::Zero()); - b(eq, &allocate_new_space); - Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, - static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE)); - - jmp(&install_map); - - bind(&allocate_new_space); - Allocate(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - flags); - - bind(&install_map); + TAG_OBJECT); InitializeNewString(result, length, @@ -2093,7 +2129,7 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) { - ASSERT(!obj.is(ip)); + DCHECK(!obj.is(ip)); LoadRoot(ip, index); cmp(obj, ip); } @@ -2248,14 +2284,15 @@ Register scratch, Label* miss, bool miss_on_bound_function) { - // Check that the receiver isn't a smi. - JumpIfSmi(function, miss); + Label non_instance; + if (miss_on_bound_function) { + // Check that the receiver isn't a smi. + JumpIfSmi(function, miss); - // Check that the function really is a function. Load map into result reg. - CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); - b(ne, miss); + // Check that the function really is a function. Load map into result reg. + CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); + b(ne, miss); - if (miss_on_bound_function) { ldr(scratch, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); ldr(scratch, @@ -2263,13 +2300,12 @@ tst(scratch, Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction))); b(ne, miss); - } - // Make sure that the function has an instance prototype. - Label non_instance; - ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); - tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); - b(ne, &non_instance); + // Make sure that the function has an instance prototype. + ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); + tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); + b(ne, &non_instance); + } // Get the prototype or initial map from the function. ldr(result, @@ -2289,12 +2325,15 @@ // Get the prototype from the initial map. ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); - jmp(&done); - // Non-instance prototype: Fetch prototype from constructor field - // in initial map. - bind(&non_instance); - ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); + if (miss_on_bound_function) { + jmp(&done); + + // Non-instance prototype: Fetch prototype from constructor field + // in initial map. + bind(&non_instance); + ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); + } // All done. bind(&done); @@ -2304,13 +2343,13 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id, Condition cond) { - ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. - Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond); + DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); } void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { - Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond); + Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); } @@ -2335,14 +2374,11 @@ ExternalReference::handle_scope_level_address(isolate()), next_address); - ASSERT(function_address.is(r1) || function_address.is(r2)); + DCHECK(function_address.is(r1) || function_address.is(r2)); Label profiler_disabled; Label end_profiler_check; - bool* is_profiling_flag = - isolate()->cpu_profiler()->is_profiling_address(); - STATIC_ASSERT(sizeof(*is_profiling_flag) == 1); - mov(r9, Operand(reinterpret_cast<int32_t>(is_profiling_flag))); + mov(r9, Operand(ExternalReference::is_profiling_address(isolate()))); ldrb(r9, MemOperand(r9, 0)); cmp(r9, Operand(0)); b(eq, &profiler_disabled); @@ -2375,7 +2411,7 @@ // Native call returns to the DirectCEntry stub which redirects to the // return address pushed on stack (could have moved after GC). // DirectCEntry stub itself is generated early and never moves. - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(this, r3); if (FLAG_log_timer_events) { @@ -2432,7 +2468,7 @@ { FrameScope frame(this, StackFrame::INTERNAL); CallExternalReference( - ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()), + ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0); } jmp(&exception_handled); @@ -2455,25 +2491,14 @@ } -void MacroAssembler::IllegalOperation(int num_arguments) { - if (num_arguments > 0) { - add(sp, sp, Operand(num_arguments * kPointerSize)); - } - LoadRoot(r0, Heap::kUndefinedValueRootIndex); -} - - void MacroAssembler::IndexFromHash(Register hash, Register index) { // If the hash field contains an array index pick it out. The assert checks // that the constants for the maximum number of digits for an array index // cached in the hash field and the number of bits reserved for it does not // conflict. - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < (1 << String::kArrayIndexValueBits)); - // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in - // the low kHashShift bits. - Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); - SmiTag(index, hash); + DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); } @@ -2491,7 +2516,7 @@ void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, LowDwVfpRegister double_scratch) { - ASSERT(!double_input.is(double_scratch)); + DCHECK(!double_input.is(double_scratch)); vcvt_s32_f64(double_scratch.low(), double_input); vcvt_f64_s32(double_scratch, double_scratch.low()); VFPCompareAndSetFlags(double_input, double_scratch); @@ -2501,7 +2526,7 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result, DwVfpRegister double_input, LowDwVfpRegister double_scratch) { - ASSERT(!double_input.is(double_scratch)); + DCHECK(!double_input.is(double_scratch)); vcvt_s32_f64(double_scratch.low(), double_input); vmov(result, double_scratch.low()); vcvt_f64_s32(double_scratch, double_scratch.low()); @@ -2515,8 +2540,8 @@ LowDwVfpRegister double_scratch, Label* done, Label* exact) { - ASSERT(!result.is(input_high)); - ASSERT(!double_input.is(double_scratch)); + DCHECK(!result.is(input_high)); + DCHECK(!double_input.is(double_scratch)); Label negative, exception; VmovHigh(input_high, double_input); @@ -2580,7 +2605,7 @@ sub(sp, sp, Operand(kDoubleSize)); // Put input on stack. vstr(double_input, MemOperand(sp, 0)); - DoubleToIStub stub(sp, result, 0, true, true); + DoubleToIStub stub(isolate(), sp, result, 0, true, true); CallStub(&stub); add(sp, sp, Operand(kDoubleSize)); @@ -2594,7 +2619,7 @@ Register object) { Label done; LowDwVfpRegister double_scratch = kScratchDoubleReg; - ASSERT(!result.is(object)); + DCHECK(!result.is(object)); vldr(double_scratch, MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); @@ -2602,7 +2627,8 @@ // If we fell through then inline version didn't succeed - call stub instead. push(lr); - DoubleToIStub stub(object, + DoubleToIStub stub(isolate(), + object, result, HeapNumber::kValueOffset - kHeapObjectTag, true, @@ -2620,7 +2646,7 @@ Register scratch1, Label* not_number) { Label done; - ASSERT(!result.is(object)); + DCHECK(!result.is(object)); UntagAndJumpIfSmi(result, object, &done); JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); @@ -2657,10 +2683,7 @@ // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the // expectation. - if (f->nargs >= 0 && f->nargs != num_arguments) { - IllegalOperation(num_arguments); - return; - } + CHECK(f->nargs < 0 || f->nargs == num_arguments); // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we @@ -2668,7 +2691,7 @@ // smarter. mov(r0, Operand(num_arguments)); mov(r1, Operand(ExternalReference(f, isolate()))); - CEntryStub stub(1, save_doubles); + CEntryStub stub(isolate(), 1, save_doubles); CallStub(&stub); } @@ -2678,7 +2701,7 @@ mov(r0, Operand(num_arguments)); mov(r1, Operand(ext)); - CEntryStub stub(1); + CEntryStub stub(isolate(), 1); CallStub(&stub); } @@ -2707,11 +2730,11 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { #if defined(__thumb__) // Thumb mode builtin. - ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); + DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); #endif mov(r1, Operand(builtin)); - CEntryStub stub(1); - Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); + CEntryStub stub(isolate(), 1); + Jump(stub.GetCode(), RelocInfo::CODE_TARGET); } @@ -2719,7 +2742,7 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a builtin without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); GetBuiltinEntry(r2, id); if (flag == CALL_FUNCTION) { @@ -2727,7 +2750,7 @@ Call(r2); call_wrapper.AfterCall(); } else { - ASSERT(flag == JUMP_FUNCTION); + DCHECK(flag == JUMP_FUNCTION); Jump(r2); } } @@ -2746,7 +2769,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { - ASSERT(!target.is(r1)); + DCHECK(!target.is(r1)); GetBuiltinFunction(r1, id); // Load the code entry point from the builtins object. ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); @@ -2765,7 +2788,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { mov(scratch2, Operand(ExternalReference(counter))); ldr(scratch1, MemOperand(scratch2)); @@ -2777,7 +2800,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { mov(scratch2, Operand(ExternalReference(counter))); ldr(scratch1, MemOperand(scratch2)); @@ -2795,7 +2818,7 @@ void MacroAssembler::AssertFastElements(Register elements) { if (emit_debug_code()) { - ASSERT(!elements.is(ip)); + DCHECK(!elements.is(ip)); Label ok; push(elements); ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); @@ -2859,7 +2882,7 @@ // of the Abort macro constant. static const int kExpectedAbortInstructions = 7; int abort_instructions = InstructionsGeneratedSince(&abort_start); - ASSERT(abort_instructions <= kExpectedAbortInstructions); + DCHECK(abort_instructions <= kExpectedAbortInstructions); while (abort_instructions++ < kExpectedAbortInstructions) { nop(); } @@ -3216,14 +3239,19 @@ Register scratch2, Register heap_number_map, Label* gc_required, - TaggingMode tagging_mode) { + TaggingMode tagging_mode, + MutableMode mode) { // Allocate an object in the heap for the heap number and tag it as a heap // object. Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); + Heap::RootListIndex map_index = mode == MUTABLE + ? Heap::kMutableHeapNumberMapRootIndex + : Heap::kHeapNumberMapRootIndex; + AssertIsRoot(heap_number_map, map_index); + // Store heap number map in the allocated object. - AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); if (tagging_mode == TAG_RESULT) { str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); } else { @@ -3461,7 +3489,7 @@ // and the original value of sp. mov(scratch, sp); sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); and_(sp, sp, Operand(-frame_alignment)); str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); } else { @@ -3477,7 +3505,7 @@ void MacroAssembler::MovToFloatParameter(DwVfpRegister src) { - ASSERT(src.is(d0)); + DCHECK(src.is(d0)); if (!use_eabi_hardfloat()) { vmov(r0, r1, src); } @@ -3492,8 +3520,8 @@ void MacroAssembler::MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2) { - ASSERT(src1.is(d0)); - ASSERT(src2.is(d1)); + DCHECK(src1.is(d0)); + DCHECK(src2.is(d1)); if (!use_eabi_hardfloat()) { vmov(r0, r1, src1); vmov(r2, r3, src2); @@ -3531,16 +3559,16 @@ void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { - ASSERT(has_frame()); + DCHECK(has_frame()); // Make sure that the stack is aligned before calling a C function unless // running in the simulator. The simulator has its own alignment check which // provides more information. #if V8_HOST_ARCH_ARM if (emit_debug_code()) { - int frame_alignment = OS::ActivationFrameAlignment(); + int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); Label alignment_as_expected; tst(sp, Operand(frame_alignment_mask)); b(eq, &alignment_as_expected); @@ -3567,25 +3595,65 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, - Register result) { - const uint32_t kLdrOffsetMask = (1 << 12) - 1; + Register result, + Register scratch) { + Label small_constant_pool_load, load_result; ldr(result, MemOperand(ldr_location)); + + if (FLAG_enable_ool_constant_pool) { + // Check if this is an extended constant pool load. + and_(scratch, result, Operand(GetConsantPoolLoadMask())); + teq(scratch, Operand(GetConsantPoolLoadPattern())); + b(eq, &small_constant_pool_load); + if (emit_debug_code()) { + // Check that the instruction sequence is: + // movw reg, #offset_low + // movt reg, #offset_high + // ldr reg, [pp, reg] + Instr patterns[] = {GetMovWPattern(), GetMovTPattern(), + GetLdrPpRegOffsetPattern()}; + for (int i = 0; i < 3; i++) { + ldr(result, MemOperand(ldr_location, i * kInstrSize)); + and_(result, result, Operand(patterns[i])); + cmp(result, Operand(patterns[i])); + Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); + } + // Result was clobbered. Restore it. + ldr(result, MemOperand(ldr_location)); + } + + // Get the offset into the constant pool. First extract movw immediate into + // result. + and_(scratch, result, Operand(0xfff)); + mov(ip, Operand(result, LSR, 4)); + and_(ip, ip, Operand(0xf000)); + orr(result, scratch, Operand(ip)); + // Then extract movt immediate and or into result. + ldr(scratch, MemOperand(ldr_location, kInstrSize)); + and_(ip, scratch, Operand(0xf0000)); + orr(result, result, Operand(ip, LSL, 12)); + and_(scratch, scratch, Operand(0xfff)); + orr(result, result, Operand(scratch, LSL, 16)); + + b(&load_result); + } + + bind(&small_constant_pool_load); if (emit_debug_code()) { // Check that the instruction is a ldr reg, [<pc or pp> + offset] . - if (FLAG_enable_ool_constant_pool) { - and_(result, result, Operand(kLdrPpPattern)); - cmp(result, Operand(kLdrPpPattern)); - Check(eq, kTheInstructionToPatchShouldBeALoadFromPp); - } else { - and_(result, result, Operand(kLdrPCPattern)); - cmp(result, Operand(kLdrPCPattern)); - Check(eq, kTheInstructionToPatchShouldBeALoadFromPc); - } + and_(result, result, Operand(GetConsantPoolLoadPattern())); + cmp(result, Operand(GetConsantPoolLoadPattern())); + Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); // Result was clobbered. Restore it. ldr(result, MemOperand(ldr_location)); } - // Get the address of the constant. + + // Get the offset into the constant pool. + const uint32_t kLdrOffsetMask = (1 << 12) - 1; and_(result, result, Operand(kLdrOffsetMask)); + + bind(&load_result); + // Get the address of the constant. if (FLAG_enable_ool_constant_pool) { add(result, pp, Operand(result)); } else { @@ -3614,7 +3682,7 @@ if (map->CanBeDeprecated()) { mov(scratch, Operand(map)); ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); - tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask))); + tst(scratch, Operand(Map::Deprecated::kMask)); b(ne, if_deprecated); } } @@ -3625,7 +3693,7 @@ Register scratch1, Label* on_black) { HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); } @@ -3635,7 +3703,7 @@ Label* has_color, int first_bit, int second_bit) { - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); GetMarkBits(object, bitmap_scratch, mask_scratch); @@ -3668,8 +3736,8 @@ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); b(eq, &is_data_object); - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); // If it's a string and it's not a cons string then it's an object containing // no GC pointers. ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); @@ -3682,7 +3750,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg, Register mask_reg) { - ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); + DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; @@ -3699,14 +3767,14 @@ Register mask_scratch, Register load_scratch, Label* value_is_white_and_not_data) { - ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); + DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); GetMarkBits(value, bitmap_scratch, mask_scratch); // If the value is black or grey we don't need to do anything. - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); Label done; @@ -3739,8 +3807,8 @@ b(eq, &is_data_object); // Check for strings. - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); // If it's a string and it's not a cons string then it's an object containing // no GC pointers. Register instance_type = load_scratch; @@ -3752,8 +3820,8 @@ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). // External strings are the only ones with the kExternalStringTag bit // set. - ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); - ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); + DCHECK_EQ(0, kConsStringTag & kExternalStringTag); tst(instance_type, Operand(kExternalStringTag)); mov(length, Operand(ExternalString::kSize), LeaveCC, ne); b(ne, &is_data_object); @@ -3762,8 +3830,8 @@ // For ASCII (char-size of 1) we shift the smi tag away to get the length. // For UC16 (char-size of 2) we just leave the smi tag in place, thereby // getting the length multiplied by 2. - ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); - ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); + DCHECK(kSmiTag == 0 && kSmiTagSize == 1); ldr(ip, FieldMemOperand(value, String::kLengthOffset)); tst(instance_type, Operand(kStringEncodingMask)); mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); @@ -3794,83 +3862,20 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg, DwVfpRegister input_reg, LowDwVfpRegister double_scratch) { - Label above_zero; Label done; - Label in_bounds; - - VFPCompareAndSetFlags(input_reg, 0.0); - b(gt, &above_zero); - - // Double value is less than zero, NaN or Inf, return 0. - mov(result_reg, Operand::Zero()); - b(al, &done); - // Double value is >= 255, return 255. - bind(&above_zero); + // Handle inputs >= 255 (including +infinity). Vmov(double_scratch, 255.0, result_reg); - VFPCompareAndSetFlags(input_reg, double_scratch); - b(le, &in_bounds); mov(result_reg, Operand(255)); - b(al, &done); + VFPCompareAndSetFlags(input_reg, double_scratch); + b(ge, &done); - // In 0-255 range, round and truncate. - bind(&in_bounds); - // Save FPSCR. - vmrs(ip); - // Set rounding mode to round to the nearest integer by clearing bits[23:22]. - bic(result_reg, ip, Operand(kVFPRoundingModeMask)); - vmsr(result_reg); - vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding); + // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest + // rounding mode will provide the correct result. + vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding); vmov(result_reg, double_scratch.low()); - // Restore FPSCR. - vmsr(ip); - bind(&done); -} - -void MacroAssembler::Throw(BailoutReason reason) { - Label throw_start; - bind(&throw_start); -#ifdef DEBUG - const char* msg = GetBailoutReason(reason); - if (msg != NULL) { - RecordComment("Throw message: "); - RecordComment(msg); - } -#endif - - mov(r0, Operand(Smi::FromInt(reason))); - push(r0); - // Disable stub call restrictions to always allow calls to throw. - if (!has_frame_) { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(this, StackFrame::NONE); - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } else { - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } - // will not return here - if (is_const_pool_blocked()) { - // If the calling code cares throw the exact number of - // instructions generated, we insert padding here to keep the size - // of the ThrowMessage macro constant. - static const int kExpectedThrowMessageInstructions = 10; - int throw_instructions = InstructionsGeneratedSince(&throw_start); - ASSERT(throw_instructions <= kExpectedThrowMessageInstructions); - while (throw_instructions++ < kExpectedThrowMessageInstructions) { - nop(); - } - } -} - - -void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) { - Label L; - b(NegateCondition(cc), &L); - Throw(reason); - // will not return here - bind(&L); + bind(&done); } @@ -3889,7 +3894,8 @@ void MacroAssembler::EnumLength(Register dst, Register map) { STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); - and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask))); + and_(dst, dst, Operand(Map::EnumLengthBits::kMask)); + SmiTag(dst); } @@ -3988,7 +3994,7 @@ Register scratch0, Register scratch1, Label* found) { - ASSERT(!scratch1.is(scratch0)); + DCHECK(!scratch1.is(scratch0)); Factory* factory = isolate()->factory(); Register current = scratch0; Label loop_again; @@ -4000,7 +4006,7 @@ bind(&loop_again); ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); - Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount); + DecodeField<Map::ElementsKindBits>(scratch1); cmp(scratch1, Operand(DICTIONARY_ELEMENTS)); b(eq, found); ldr(current, FieldMemOperand(current, Map::kPrototypeOffset)); @@ -4015,9 +4021,12 @@ Register reg3, Register reg4, Register reg5, - Register reg6) { + Register reg6, + Register reg7, + Register reg8) { int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + - reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid(); + reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + + reg7.is_valid() + reg8.is_valid(); RegList regs = 0; if (reg1.is_valid()) regs |= reg1.bit(); @@ -4026,6 +4035,8 @@ if (reg4.is_valid()) regs |= reg4.bit(); if (reg5.is_valid()) regs |= reg5.bit(); if (reg6.is_valid()) regs |= reg6.bit(); + if (reg7.is_valid()) regs |= reg7.bit(); + if (reg8.is_valid()) regs |= reg8.bit(); int n_of_non_aliasing_regs = NumRegs(regs); return n_of_valid_regs != n_of_non_aliasing_regs; @@ -4043,19 +4054,19 @@ // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } CodePatcher::~CodePatcher() { // Indicate that code has changed. if (flush_cache_ == FLUSH) { - CPU::FlushICache(address_, size_); + CpuFeatures::FlushICache(address_, size_); } // Check that the code was patched as expected. - ASSERT(masm_.pc_ == address_ + size_); - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); + DCHECK(masm_.pc_ == address_ + size_); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } @@ -4079,9 +4090,9 @@ void MacroAssembler::TruncatingDiv(Register result, Register dividend, int32_t divisor) { - ASSERT(!dividend.is(result)); - ASSERT(!dividend.is(ip)); - ASSERT(!result.is(ip)); + DCHECK(!dividend.is(result)); + DCHECK(!dividend.is(ip)); + DCHECK(!result.is(ip)); MultiplierAndShift ms(divisor); mov(ip, Operand(ms.multiplier())); smull(ip, result, dividend, ip); diff -Nru nodejs-0.11.13/deps/v8/src/arm/macro-assembler-arm.h nodejs-0.11.15/deps/v8/src/arm/macro-assembler-arm.h --- nodejs-0.11.13/deps/v8/src/arm/macro-assembler-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/macro-assembler-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ -#include "assembler.h" -#include "frames.h" -#include "v8globals.h" +#include "src/assembler.h" +#include "src/frames.h" +#include "src/globals.h" namespace v8 { namespace internal { @@ -60,6 +37,10 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum PointersToHereCheck { + kPointersToHereMaybeInteresting, + kPointersToHereAreAlwaysInteresting +}; enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; @@ -77,7 +58,9 @@ Register reg3 = no_reg, Register reg4 = no_reg, Register reg5 = no_reg, - Register reg6 = no_reg); + Register reg6 = no_reg, + Register reg7 = no_reg, + Register reg8 = no_reg); #endif @@ -95,16 +78,25 @@ // macro assembler. MacroAssembler(Isolate* isolate, void* buffer, int size); + + // Returns the size of a call in instructions. Note, the value returned is + // only valid as long as no entries are added to the constant pool between + // checking the call size and emitting the actual call. + static int CallSize(Register target, Condition cond = al); + int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); + int CallStubSize(CodeStub* stub, + TypeFeedbackId ast_id = TypeFeedbackId::None(), + Condition cond = al); + static int CallSizeNotPredictableCodeSize(Isolate* isolate, + Address target, + RelocInfo::Mode rmode, + Condition cond = al); + // Jump, Call, and Ret pseudo instructions implementing inter-working. void Jump(Register target, Condition cond = al); void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); - static int CallSize(Register target, Condition cond = al); void Call(Register target, Condition cond = al); - int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); - static int CallSizeNotPredictableCodeSize(Address target, - RelocInfo::Mode rmode, - Condition cond = al); void Call(Address target, RelocInfo::Mode rmode, Condition cond = al, TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); @@ -132,7 +124,8 @@ Register scratch = no_reg, Condition cond = al); - + void Mls(Register dst, Register src1, Register src2, Register srcA, + Condition cond = al); void And(Register dst, Register src1, const Operand& src2, Condition cond = al); void Ubfx(Register dst, Register src, int lsb, int width, @@ -159,6 +152,9 @@ // Register move. May do nothing if the registers are identical. void Move(Register dst, Handle<Object> value); void Move(Register dst, Register src, Condition cond = al); + void Move(Register dst, const Operand& src, Condition cond = al) { + if (!src.is_reg() || !src.rm().is(dst)) mov(dst, src, LeaveCC, cond); + } void Move(DwVfpRegister dst, DwVfpRegister src); void Load(Register dst, const MemOperand& src, Representation r); @@ -263,7 +259,9 @@ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); // As above, but the offset has the tag presubtracted. For use with // MemOperand(reg, off). @@ -275,7 +273,9 @@ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK) { + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting) { RecordWriteField(context, offset + kHeapObjectTag, value, @@ -283,9 +283,17 @@ lr_status, save_fp, remembered_set_action, - smi_check); + smi_check, + pointers_to_here_check_for_value); } + void RecordWriteForMap( + Register object, + Register map, + Register dst, + LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp); + // For a given |object| notify the garbage collector that the slot |address| // has been written. |value| is the object being stored. The value and // address registers are clobbered by the operation. @@ -296,7 +304,9 @@ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); // Push a handle. void Push(Handle<Object> handle); @@ -304,7 +314,7 @@ // Push two registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Condition cond = al) { - ASSERT(!src1.is(src2)); + DCHECK(!src1.is(src2)); if (src1.code() > src2.code()) { stm(db_w, sp, src1.bit() | src2.bit(), cond); } else { @@ -315,9 +325,9 @@ // Push three registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Register src3, Condition cond = al) { - ASSERT(!src1.is(src2)); - ASSERT(!src2.is(src3)); - ASSERT(!src1.is(src3)); + DCHECK(!src1.is(src2)); + DCHECK(!src2.is(src3)); + DCHECK(!src1.is(src3)); if (src1.code() > src2.code()) { if (src2.code() > src3.code()) { stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); @@ -337,12 +347,12 @@ Register src3, Register src4, Condition cond = al) { - ASSERT(!src1.is(src2)); - ASSERT(!src2.is(src3)); - ASSERT(!src1.is(src3)); - ASSERT(!src1.is(src4)); - ASSERT(!src2.is(src4)); - ASSERT(!src3.is(src4)); + DCHECK(!src1.is(src2)); + DCHECK(!src2.is(src3)); + DCHECK(!src1.is(src3)); + DCHECK(!src1.is(src4)); + DCHECK(!src2.is(src4)); + DCHECK(!src3.is(src4)); if (src1.code() > src2.code()) { if (src2.code() > src3.code()) { if (src3.code() > src4.code()) { @@ -366,7 +376,7 @@ // Pop two registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2, Condition cond = al) { - ASSERT(!src1.is(src2)); + DCHECK(!src1.is(src2)); if (src1.code() > src2.code()) { ldm(ia_w, sp, src1.bit() | src2.bit(), cond); } else { @@ -377,9 +387,9 @@ // Pop three registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2, Register src3, Condition cond = al) { - ASSERT(!src1.is(src2)); - ASSERT(!src2.is(src3)); - ASSERT(!src1.is(src3)); + DCHECK(!src1.is(src2)); + DCHECK(!src2.is(src3)); + DCHECK(!src1.is(src3)); if (src1.code() > src2.code()) { if (src2.code() > src3.code()) { ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); @@ -399,12 +409,12 @@ Register src3, Register src4, Condition cond = al) { - ASSERT(!src1.is(src2)); - ASSERT(!src2.is(src3)); - ASSERT(!src1.is(src3)); - ASSERT(!src1.is(src4)); - ASSERT(!src2.is(src4)); - ASSERT(!src3.is(src4)); + DCHECK(!src1.is(src2)); + DCHECK(!src2.is(src3)); + DCHECK(!src1.is(src3)); + DCHECK(!src1.is(src4)); + DCHECK(!src2.is(src4)); + DCHECK(!src3.is(src4)); if (src1.code() > src2.code()) { if (src2.code() > src3.code()) { if (src3.code() > src4.code()) { @@ -436,12 +446,9 @@ // RegList constant kSafepointSavedRegisters. void PushSafepointRegisters(); void PopSafepointRegisters(); - void PushSafepointRegistersAndDoubles(); - void PopSafepointRegistersAndDoubles(); // Store value in register src in the safepoint stack slot for // register dst. void StoreToSafepointRegisterSlot(Register src, Register dst); - void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); // Load the value of the src register from its safepoint stack slot // into register dst. void LoadFromSafepointRegisterSlot(Register dst, Register src); @@ -538,7 +545,8 @@ Label* not_int32); // Generates function and stub prologue code. - void Prologue(PrologueFrameMode frame_mode); + void StubPrologue(); + void Prologue(bool code_pre_aging); // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. @@ -627,12 +635,10 @@ Register scratch, Label* fail); -#ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- // Debugger Support void DebugBreak(); -#endif // --------------------------------------------------------------------------- // Exception handling @@ -651,12 +657,6 @@ // handler chain. void ThrowUncatchable(Register value); - // Throw a message string as an exception. - void Throw(BailoutReason reason); - - // Throw a message string as an exception if a condition is not true. - void ThrowIf(Condition cc, BailoutReason reason); - // --------------------------------------------------------------------------- // Inline caching support @@ -687,7 +687,7 @@ // These instructions are generated to mark special location in the code, // like some special IC code. static inline bool IsMarkedCode(Instr instr, int type) { - ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); + DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); return IsNop(instr, type); } @@ -707,7 +707,7 @@ (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER) ? src_reg : -1; - ASSERT((type == -1) || + DCHECK((type == -1) || ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); return type; } @@ -785,7 +785,8 @@ Register scratch2, Register heap_number_map, Label* gc_required, - TaggingMode tagging_mode = TAG_RESULT); + TaggingMode tagging_mode = TAG_RESULT, + MutableMode mode = IMMUTABLE); void AllocateHeapNumberWithValue(Register result, DwVfpRegister value, Register scratch1, @@ -946,15 +947,11 @@ ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); tst(type, Operand(kIsNotStringMask), cond); - ASSERT_EQ(0, kStringTag); + DCHECK_EQ(0, kStringTag); return eq; } - // Generates code for reporting that an illegal operation has - // occurred. - void IllegalOperation(int num_arguments); - // Picks out an array index from the hash field. // Register use: // hash - holds the index's hash. Clobbered. @@ -1147,7 +1144,7 @@ void GetBuiltinFunction(Register target, Builtins::JavaScript id); Handle<Object> CodeObject() { - ASSERT(!code_object_.is_null()); + DCHECK(!code_object_.is_null()); return code_object_; } @@ -1191,7 +1188,7 @@ // EABI variant for double arguments in use. bool use_eabi_hardfloat() { #ifdef __arm__ - return OS::ArmUsingHardFloat(); + return base::OS::ArmUsingHardFloat(); #elif USE_EABI_HARDFLOAT return true; #else @@ -1364,8 +1361,8 @@ // Get the location of a relocated constant (its address in the constant pool) // from its load site. - void GetRelocatedValueLocation(Register ldr_location, - Register result); + void GetRelocatedValueLocation(Register ldr_location, Register result, + Register scratch); void ClampUint8(Register output_reg, Register input_reg); @@ -1380,11 +1377,35 @@ void NumberOfOwnDescriptors(Register dst, Register map); template<typename Field> + void DecodeField(Register dst, Register src) { + Ubfx(dst, src, Field::kShift, Field::kSize); + } + + template<typename Field> void DecodeField(Register reg) { + DecodeField<Field>(reg, reg); + } + + template<typename Field> + void DecodeFieldToSmi(Register dst, Register src) { static const int shift = Field::kShift; - static const int mask = (Field::kMask >> shift) << kSmiTagSize; - mov(reg, Operand(reg, LSR, shift)); - and_(reg, reg, Operand(mask)); + static const int mask = Field::kMask >> shift << kSmiTagSize; + STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); + STATIC_ASSERT(kSmiTag == 0); + if (shift < kSmiTagSize) { + mov(dst, Operand(src, LSL, kSmiTagSize - shift)); + and_(dst, dst, Operand(mask)); + } else if (shift > kSmiTagSize) { + mov(dst, Operand(src, LSR, shift - kSmiTagSize)); + and_(dst, dst, Operand(mask)); + } else { + and_(dst, src, Operand(mask)); + } + } + + template<typename Field> + void DecodeFieldToSmi(Register reg) { + DecodeField<Field>(reg, reg); } // Activation support. @@ -1524,11 +1545,12 @@ type_(type), old_has_frame_(masm->has_frame()), old_constant_pool_available_(masm->is_constant_pool_available()) { + // We only want to enable constant pool access for non-manual frame scopes + // to ensure the constant pool pointer is valid throughout the scope. + DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); masm->set_has_frame(true); masm->set_constant_pool_available(true); - if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) { - masm->EnterFrame(type, !old_constant_pool_available_); - } + masm->EnterFrame(type, !old_constant_pool_available_); } ~FrameAndConstantPoolScope() { @@ -1543,7 +1565,7 @@ // scope, the MacroAssembler is still marked as being in a frame scope, and // the code will be generated again when it goes out of scope. void GenerateLeaveFrame() { - ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); + DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); masm_->LeaveFrame(type_); } diff -Nru nodejs-0.11.13/deps/v8/src/arm/regexp-macro-assembler-arm.cc nodejs-0.11.15/deps/v8/src/arm/regexp-macro-assembler-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/regexp-macro-assembler-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/regexp-macro-assembler-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,20 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "cpu-profiler.h" -#include "unicode.h" -#include "log.h" -#include "code-stubs.h" -#include "regexp-stack.h" -#include "macro-assembler.h" -#include "regexp-macro-assembler.h" -#include "arm/regexp-macro-assembler-arm.h" +#include "src/code-stubs.h" +#include "src/cpu-profiler.h" +#include "src/log.h" +#include "src/macro-assembler.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-stack.h" +#include "src/unicode.h" + +#include "src/arm/regexp-macro-assembler-arm.h" namespace v8 { namespace internal { @@ -132,7 +110,7 @@ success_label_(), backtrack_label_(), exit_label_() { - ASSERT_EQ(0, registers_to_save % 2); + DCHECK_EQ(0, registers_to_save % 2); __ jmp(&entry_label_); // We'll write the entry code later. __ bind(&start_label_); // And then continue from here. } @@ -165,8 +143,8 @@ void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) { - ASSERT(reg >= 0); - ASSERT(reg < num_registers_); + DCHECK(reg >= 0); + DCHECK(reg < num_registers_); if (by != 0) { __ ldr(r0, register_location(reg)); __ add(r0, r0, Operand(by)); @@ -309,7 +287,7 @@ // Compute new value of character position after the matched part. __ sub(current_input_offset(), r2, end_of_input_address()); } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); int argument_count = 4; __ PrepareCallCFunction(argument_count, r2); @@ -380,7 +358,7 @@ __ ldrb(r3, MemOperand(r0, char_size(), PostIndex)); __ ldrb(r4, MemOperand(r2, char_size(), PostIndex)); } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); __ ldrh(r3, MemOperand(r0, char_size(), PostIndex)); __ ldrh(r4, MemOperand(r2, char_size(), PostIndex)); } @@ -433,7 +411,7 @@ uc16 minus, uc16 mask, Label* on_not_equal) { - ASSERT(minus < String::kMaxUtf16CodeUnit); + DCHECK(minus < String::kMaxUtf16CodeUnit); __ sub(r0, current_character(), Operand(minus)); __ and_(r0, r0, Operand(mask)); __ cmp(r0, Operand(c)); @@ -732,7 +710,7 @@ __ add(r1, r1, Operand(r2)); // r1 is length of string in characters. - ASSERT_EQ(0, num_saved_registers_ % 2); + DCHECK_EQ(0, num_saved_registers_ % 2); // Always an even number of capture registers. This allows us to // unroll the loop once to add an operation between a load of a register // and the following use of that register. @@ -917,8 +895,8 @@ Label* on_end_of_input, bool check_bounds, int characters) { - ASSERT(cp_offset >= -1); // ^ and \b can look behind one character. - ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works) + DCHECK(cp_offset >= -1); // ^ and \b can look behind one character. + DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works) if (check_bounds) { CheckPosition(cp_offset + characters - 1, on_end_of_input); } @@ -983,7 +961,7 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) { - ASSERT(register_index >= num_saved_registers_); // Reserved for positions! + DCHECK(register_index >= num_saved_registers_); // Reserved for positions! __ mov(r0, Operand(to)); __ str(r0, register_location(register_index)); } @@ -1007,7 +985,7 @@ void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) { - ASSERT(reg_from <= reg_to); + DCHECK(reg_from <= reg_to); __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); for (int reg = reg_from; reg <= reg_to; reg++) { __ str(r0, register_location(reg)); @@ -1033,8 +1011,8 @@ __ mov(r1, Operand(masm_->CodeObject())); // We need to make room for the return address on the stack. - int stack_alignment = OS::ActivationFrameAlignment(); - ASSERT(IsAligned(stack_alignment, kPointerSize)); + int stack_alignment = base::OS::ActivationFrameAlignment(); + DCHECK(IsAligned(stack_alignment, kPointerSize)); __ sub(sp, sp, Operand(stack_alignment)); // r0 will point to the return address, placed by DirectCEntry. @@ -1043,13 +1021,13 @@ ExternalReference stack_guard_check = ExternalReference::re_check_stack_guard_state(isolate()); __ mov(ip, Operand(stack_guard_check)); - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(masm_, ip); // Drop the return address from the stack. __ add(sp, sp, Operand(stack_alignment)); - ASSERT(stack_alignment != 0); + DCHECK(stack_alignment != 0); __ ldr(sp, MemOperand(sp, 0)); __ mov(code_pointer(), Operand(masm_->CodeObject())); @@ -1067,7 +1045,8 @@ Code* re_code, Address re_frame) { Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate); - if (isolate->stack_guard()->IsStackOverflow()) { + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { isolate->StackOverflow(); return EXCEPTION; } @@ -1090,11 +1069,11 @@ // Current string. bool is_ascii = subject->IsOneByteRepresentationUnderneath(); - ASSERT(re_code->instruction_start() <= *return_address); - ASSERT(*return_address <= + DCHECK(re_code->instruction_start() <= *return_address); + DCHECK(*return_address <= re_code->instruction_start() + re_code->instruction_size()); - MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate); + Object* result = isolate->stack_guard()->HandleInterrupts(); if (*code_handle != re_code) { // Return address no longer valid int delta = code_handle->address() - re_code->address(); @@ -1130,7 +1109,7 @@ // be a sequential or external string with the same content. // Update the start and end pointers in the stack frame to the current // location (whether it has actually moved or not). - ASSERT(StringShape(*subject_tmp).IsSequential() || + DCHECK(StringShape(*subject_tmp).IsSequential() || StringShape(*subject_tmp).IsExternal()); // The original start address of the characters to match. @@ -1162,7 +1141,7 @@ MemOperand RegExpMacroAssemblerARM::register_location(int register_index) { - ASSERT(register_index < (1<<30)); + DCHECK(register_index < (1<<30)); if (num_registers_ <= register_index) { num_registers_ = register_index + 1; } @@ -1215,14 +1194,14 @@ void RegExpMacroAssemblerARM::Push(Register source) { - ASSERT(!source.is(backtrack_stackpointer())); + DCHECK(!source.is(backtrack_stackpointer())); __ str(source, MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex)); } void RegExpMacroAssemblerARM::Pop(Register target) { - ASSERT(!target.is(backtrack_stackpointer())); + DCHECK(!target.is(backtrack_stackpointer())); __ ldr(target, MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex)); } @@ -1267,7 +1246,7 @@ // If unaligned load/stores are not supported then this function must only // be used to load a single character at a time. if (!CanReadUnaligned()) { - ASSERT(characters == 1); + DCHECK(characters == 1); } if (mode_ == ASCII) { @@ -1276,15 +1255,15 @@ } else if (characters == 2) { __ ldrh(current_character(), MemOperand(end_of_input_address(), offset)); } else { - ASSERT(characters == 1); + DCHECK(characters == 1); __ ldrb(current_character(), MemOperand(end_of_input_address(), offset)); } } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); if (characters == 2) { __ ldr(current_character(), MemOperand(end_of_input_address(), offset)); } else { - ASSERT(characters == 1); + DCHECK(characters == 1); __ ldrh(current_character(), MemOperand(end_of_input_address(), offset)); } } diff -Nru nodejs-0.11.13/deps/v8/src/arm/regexp-macro-assembler-arm.h nodejs-0.11.15/deps/v8/src/arm/regexp-macro-assembler-arm.h --- nodejs-0.11.13/deps/v8/src/arm/regexp-macro-assembler-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/regexp-macro-assembler-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_ -#include "arm/assembler-arm.h" -#include "arm/assembler-arm-inl.h" -#include "macro-assembler.h" +#include "src/arm/assembler-arm.h" +#include "src/arm/assembler-arm-inl.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/arm/simulator-arm.cc nodejs-0.11.15/deps/v8/src/arm/simulator-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/simulator-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/simulator-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,20 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdarg.h> #include <stdlib.h> #include <cmath> -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "disasm.h" -#include "assembler.h" -#include "codegen.h" -#include "arm/constants-arm.h" -#include "arm/simulator-arm.h" +#include "src/arm/constants-arm.h" +#include "src/arm/simulator-arm.h" +#include "src/assembler.h" +#include "src/codegen.h" +#include "src/disasm.h" #if defined(USE_SIMULATOR) @@ -110,7 +87,7 @@ char** msg_address = reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize); char* msg = *msg_address; - ASSERT(msg != NULL); + DCHECK(msg != NULL); // Update this stop description. if (isWatchedStop(code) && !watched_stops_[code].desc) { @@ -365,17 +342,18 @@ || (strcmp(cmd, "printobject") == 0)) { if (argc == 2) { int32_t value; + OFStream os(stdout); if (GetValue(arg1, &value)) { Object* obj = reinterpret_cast<Object*>(value); - PrintF("%s: \n", arg1); + os << arg1 << ": \n"; #ifdef DEBUG - obj->PrintLn(); + obj->Print(os); + os << "\n"; #else - obj->ShortPrint(); - PrintF("\n"); + os << Brief(obj) << "\n"; #endif } else { - PrintF("%s unrecognized\n", arg1); + os << arg1 << " unrecognized\n"; } } else { PrintF("printobject <value>\n"); @@ -474,7 +452,7 @@ } } else if (strcmp(cmd, "gdb") == 0) { PrintF("relinquishing control to gdb\n"); - v8::internal::OS::DebugBreak(); + v8::base::OS::DebugBreak(); PrintF("regaining control from gdb\n"); } else if (strcmp(cmd, "break") == 0) { if (argc == 2) { @@ -630,8 +608,8 @@ static bool ICacheMatch(void* one, void* two) { - ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0); - ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0); + DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0); + DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0); return one == two; } @@ -668,7 +646,7 @@ FlushOnePage(i_cache, start, bytes_to_flush); start += bytes_to_flush; size -= bytes_to_flush; - ASSERT_EQ(0, start & CachePage::kPageMask); + DCHECK_EQ(0, start & CachePage::kPageMask); offset = 0; } if (size != 0) { @@ -693,10 +671,10 @@ void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start, int size) { - ASSERT(size <= CachePage::kPageSize); - ASSERT(AllOnOnePage(start, size - 1)); - ASSERT((start & CachePage::kLineMask) == 0); - ASSERT((size & CachePage::kLineMask) == 0); + DCHECK(size <= CachePage::kPageSize); + DCHECK(AllOnOnePage(start, size - 1)); + DCHECK((start & CachePage::kLineMask) == 0); + DCHECK((size & CachePage::kLineMask) == 0); void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask)); int offset = (start & CachePage::kPageMask); CachePage* cache_page = GetCachePage(i_cache, page); @@ -717,12 +695,12 @@ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask); if (cache_hit) { // Check that the data in memory matches the contents of the I-cache. - CHECK(memcmp(reinterpret_cast<void*>(instr), - cache_page->CachedData(offset), - Instruction::kInstrSize) == 0); + CHECK_EQ(0, + memcmp(reinterpret_cast<void*>(instr), + cache_page->CachedData(offset), Instruction::kInstrSize)); } else { // Cache miss. Load memory into the cache. - OS::MemCopy(cached_line, line, CachePage::kLineLength); + memcpy(cached_line, line, CachePage::kLineLength); *cache_valid_byte = CachePage::LINE_VALID; } } @@ -773,8 +751,8 @@ z_flag_FPSCR_ = false; c_flag_FPSCR_ = false; v_flag_FPSCR_ = false; - FPSCR_rounding_mode_ = RZ; - FPSCR_default_NaN_mode_ = true; + FPSCR_rounding_mode_ = RN; + FPSCR_default_NaN_mode_ = false; inv_op_vfp_flag_ = false; div_zero_vfp_flag_ = false; @@ -836,7 +814,7 @@ Redirection* current = isolate->simulator_redirection(); for (; current != NULL; current = current->next_) { if (current->external_function_ == external_function) { - ASSERT_EQ(current->type(), type); + DCHECK_EQ(current->type(), type); return current; } } @@ -875,7 +853,7 @@ Simulator* Simulator::current(Isolate* isolate) { v8::internal::Isolate::PerIsolateThreadData* isolate_data = isolate->FindOrAllocatePerThreadDataForThisThread(); - ASSERT(isolate_data != NULL); + DCHECK(isolate_data != NULL); Simulator* sim = isolate_data->simulator(); if (sim == NULL) { @@ -890,7 +868,7 @@ // Sets the register in the architecture state. It will also deal with updating // Simulator internal state for special registers such as PC. void Simulator::set_register(int reg, int32_t value) { - ASSERT((reg >= 0) && (reg < num_registers)); + DCHECK((reg >= 0) && (reg < num_registers)); if (reg == pc) { pc_modified_ = true; } @@ -901,7 +879,7 @@ // Get the register from the architecture state. This function does handle // the special case of accessing the PC register. int32_t Simulator::get_register(int reg) const { - ASSERT((reg >= 0) && (reg < num_registers)); + DCHECK((reg >= 0) && (reg < num_registers)); // Stupid code added to avoid bug in GCC. // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949 if (reg >= num_registers) return 0; @@ -911,75 +889,75 @@ double Simulator::get_double_from_register_pair(int reg) { - ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0)); + DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0)); double dm_val = 0.0; // Read the bits from the unsigned integer register_[] array // into the double precision floating point value and return it. char buffer[2 * sizeof(vfp_registers_[0])]; - OS::MemCopy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); - OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0])); + memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); + memcpy(&dm_val, buffer, 2 * sizeof(registers_[0])); return(dm_val); } void Simulator::set_register_pair_from_double(int reg, double* value) { - ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0)); + DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0)); memcpy(registers_ + reg, value, sizeof(*value)); } void Simulator::set_dw_register(int dreg, const int* dbl) { - ASSERT((dreg >= 0) && (dreg < num_d_registers)); + DCHECK((dreg >= 0) && (dreg < num_d_registers)); registers_[dreg] = dbl[0]; registers_[dreg + 1] = dbl[1]; } void Simulator::get_d_register(int dreg, uint64_t* value) { - ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters())); + DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters())); memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value)); } void Simulator::set_d_register(int dreg, const uint64_t* value) { - ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters())); + DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters())); memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value)); } void Simulator::get_d_register(int dreg, uint32_t* value) { - ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters())); + DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters())); memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2); } void Simulator::set_d_register(int dreg, const uint32_t* value) { - ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters())); + DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters())); memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2); } void Simulator::get_q_register(int qreg, uint64_t* value) { - ASSERT((qreg >= 0) && (qreg < num_q_registers)); + DCHECK((qreg >= 0) && (qreg < num_q_registers)); memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2); } void Simulator::set_q_register(int qreg, const uint64_t* value) { - ASSERT((qreg >= 0) && (qreg < num_q_registers)); + DCHECK((qreg >= 0) && (qreg < num_q_registers)); memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2); } void Simulator::get_q_register(int qreg, uint32_t* value) { - ASSERT((qreg >= 0) && (qreg < num_q_registers)); + DCHECK((qreg >= 0) && (qreg < num_q_registers)); memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4); } void Simulator::set_q_register(int qreg, const uint32_t* value) { - ASSERT((qreg >= 0) && (qreg < num_q_registers)); + DCHECK((qreg >= 0) && (qreg < num_q_registers)); memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4); } @@ -1004,41 +982,41 @@ // Getting from and setting into VFP registers. void Simulator::set_s_register(int sreg, unsigned int value) { - ASSERT((sreg >= 0) && (sreg < num_s_registers)); + DCHECK((sreg >= 0) && (sreg < num_s_registers)); vfp_registers_[sreg] = value; } unsigned int Simulator::get_s_register(int sreg) const { - ASSERT((sreg >= 0) && (sreg < num_s_registers)); + DCHECK((sreg >= 0) && (sreg < num_s_registers)); return vfp_registers_[sreg]; } template<class InputType, int register_size> void Simulator::SetVFPRegister(int reg_index, const InputType& value) { - ASSERT(reg_index >= 0); - if (register_size == 1) ASSERT(reg_index < num_s_registers); - if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters()); + DCHECK(reg_index >= 0); + if (register_size == 1) DCHECK(reg_index < num_s_registers); + if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters()); char buffer[register_size * sizeof(vfp_registers_[0])]; - OS::MemCopy(buffer, &value, register_size * sizeof(vfp_registers_[0])); - OS::MemCopy(&vfp_registers_[reg_index * register_size], buffer, - register_size * sizeof(vfp_registers_[0])); + memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0])); + memcpy(&vfp_registers_[reg_index * register_size], buffer, + register_size * sizeof(vfp_registers_[0])); } template<class ReturnType, int register_size> ReturnType Simulator::GetFromVFPRegister(int reg_index) { - ASSERT(reg_index >= 0); - if (register_size == 1) ASSERT(reg_index < num_s_registers); - if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters()); + DCHECK(reg_index >= 0); + if (register_size == 1) DCHECK(reg_index < num_s_registers); + if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters()); ReturnType value = 0; char buffer[register_size * sizeof(vfp_registers_[0])]; - OS::MemCopy(buffer, &vfp_registers_[register_size * reg_index], - register_size * sizeof(vfp_registers_[0])); - OS::MemCopy(&value, buffer, register_size * sizeof(vfp_registers_[0])); + memcpy(buffer, &vfp_registers_[register_size * reg_index], + register_size * sizeof(vfp_registers_[0])); + memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0])); return value; } @@ -1067,14 +1045,14 @@ void Simulator::SetFpResult(const double& result) { if (use_eabi_hardfloat()) { char buffer[2 * sizeof(vfp_registers_[0])]; - OS::MemCopy(buffer, &result, sizeof(buffer)); + memcpy(buffer, &result, sizeof(buffer)); // Copy result to d0. - OS::MemCopy(vfp_registers_, buffer, sizeof(buffer)); + memcpy(vfp_registers_, buffer, sizeof(buffer)); } else { char buffer[2 * sizeof(registers_[0])]; - OS::MemCopy(buffer, &result, sizeof(buffer)); + memcpy(buffer, &result, sizeof(buffer)); // Copy result to r0 and r1. - OS::MemCopy(registers_, buffer, sizeof(buffer)); + memcpy(registers_, buffer, sizeof(buffer)); } } @@ -1452,7 +1430,7 @@ *carry_out = (result & 1) == 1; result >>= 1; } else { - ASSERT(shift_amount >= 32); + DCHECK(shift_amount >= 32); if (result < 0) { *carry_out = true; result = 0xffffffff; @@ -1475,7 +1453,7 @@ *carry_out = (result & 1) == 1; result = 0; } else { - ASSERT(shift_amount > 32); + DCHECK(shift_amount > 32); *carry_out = false; result = 0; } @@ -1597,7 +1575,7 @@ intptr_t* address = reinterpret_cast<intptr_t*>(start_address); // Catch null pointers a little earlier. - ASSERT(start_address > 8191 || start_address < 0); + DCHECK(start_address > 8191 || start_address < 0); int reg = 0; while (rlist != 0) { if ((rlist & 1) != 0) { @@ -1611,7 +1589,7 @@ reg++; rlist >>= 1; } - ASSERT(end_address == ((intptr_t)address) - 4); + DCHECK(end_address == ((intptr_t)address) - 4); if (instr->HasW()) { set_register(instr->RnValue(), rn_val); } @@ -1658,19 +1636,19 @@ ReadW(reinterpret_cast<int32_t>(address + 1), instr) }; double d; - OS::MemCopy(&d, data, 8); + memcpy(&d, data, 8); set_d_register_from_double(reg, d); } else { int32_t data[2]; double d = get_double_from_d_register(reg); - OS::MemCopy(data, &d, 8); + memcpy(data, &d, 8); WriteW(reinterpret_cast<int32_t>(address), data[0], instr); WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr); } address += 2; } } - ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address); + DCHECK(reinterpret_cast<intptr_t>(address) - operand_size == end_address); if (instr->HasW()) { set_register(instr->RnValue(), rn_val); } @@ -1875,7 +1853,7 @@ target(arg0, arg1, Redirection::ReverseRedirection(arg2)); } else { // builtin call. - ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL); + DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL); SimulatorRuntimeCall target = reinterpret_cast<SimulatorRuntimeCall>(external); if (::v8::internal::FLAG_trace_sim || !stack_aligned) { @@ -1951,13 +1929,13 @@ bool Simulator::isWatchedStop(uint32_t code) { - ASSERT(code <= kMaxStopCode); + DCHECK(code <= kMaxStopCode); return code < kNumOfWatchedStops; } bool Simulator::isEnabledStop(uint32_t code) { - ASSERT(code <= kMaxStopCode); + DCHECK(code <= kMaxStopCode); // Unwatched stops are always enabled. return !isWatchedStop(code) || !(watched_stops_[code].count & kStopDisabledBit); @@ -1965,7 +1943,7 @@ void Simulator::EnableStop(uint32_t code) { - ASSERT(isWatchedStop(code)); + DCHECK(isWatchedStop(code)); if (!isEnabledStop(code)) { watched_stops_[code].count &= ~kStopDisabledBit; } @@ -1973,7 +1951,7 @@ void Simulator::DisableStop(uint32_t code) { - ASSERT(isWatchedStop(code)); + DCHECK(isWatchedStop(code)); if (isEnabledStop(code)) { watched_stops_[code].count |= kStopDisabledBit; } @@ -1981,8 +1959,8 @@ void Simulator::IncreaseStopCounter(uint32_t code) { - ASSERT(code <= kMaxStopCode); - ASSERT(isWatchedStop(code)); + DCHECK(code <= kMaxStopCode); + DCHECK(isWatchedStop(code)); if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) { PrintF("Stop counter for code %i has overflowed.\n" "Enabling this code and reseting the counter to 0.\n", code); @@ -1996,7 +1974,7 @@ // Print a stop status. void Simulator::PrintStopInfo(uint32_t code) { - ASSERT(code <= kMaxStopCode); + DCHECK(code <= kMaxStopCode); if (!isWatchedStop(code)) { PrintF("Stop not watched."); } else { @@ -2114,7 +2092,7 @@ switch (instr->PUField()) { case da_x: { // Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm"); - ASSERT(!instr->HasW()); + DCHECK(!instr->HasW()); addr = rn_val; rn_val -= rm_val; set_register(rn, rn_val); @@ -2122,7 +2100,7 @@ } case ia_x: { // Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm"); - ASSERT(!instr->HasW()); + DCHECK(!instr->HasW()); addr = rn_val; rn_val += rm_val; set_register(rn, rn_val); @@ -2157,7 +2135,7 @@ switch (instr->PUField()) { case da_x: { // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8"); - ASSERT(!instr->HasW()); + DCHECK(!instr->HasW()); addr = rn_val; rn_val -= imm_val; set_register(rn, rn_val); @@ -2165,7 +2143,7 @@ } case ia_x: { // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8"); - ASSERT(!instr->HasW()); + DCHECK(!instr->HasW()); addr = rn_val; rn_val += imm_val; set_register(rn, rn_val); @@ -2197,7 +2175,7 @@ } } if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) { - ASSERT((rd % 2) == 0); + DCHECK((rd % 2) == 0); if (instr->HasH()) { // The strd instruction. int32_t value1 = get_register(rd); @@ -2228,8 +2206,8 @@ } } else { // signed byte loads - ASSERT(instr->HasSign()); - ASSERT(instr->HasL()); + DCHECK(instr->HasSign()); + DCHECK(instr->HasL()); int8_t val = ReadB(addr); set_register(rd, val); } @@ -2293,7 +2271,7 @@ if (type == 0) { shifter_operand = GetShiftRm(instr, &shifter_carry_out); } else { - ASSERT(instr->TypeValue() == 1); + DCHECK(instr->TypeValue() == 1); shifter_operand = GetImm(instr, &shifter_carry_out); } int32_t alu_out; @@ -2516,7 +2494,7 @@ switch (instr->PUField()) { case da_x: { // Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12"); - ASSERT(!instr->HasW()); + DCHECK(!instr->HasW()); addr = rn_val; rn_val -= im_val; set_register(rn, rn_val); @@ -2524,7 +2502,7 @@ } case ia_x: { // Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12"); - ASSERT(!instr->HasW()); + DCHECK(!instr->HasW()); addr = rn_val; rn_val += im_val; set_register(rn, rn_val); @@ -2580,7 +2558,7 @@ int32_t addr = 0; switch (instr->PUField()) { case da_x: { - ASSERT(!instr->HasW()); + DCHECK(!instr->HasW()); Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm"); UNIMPLEMENTED(); break; @@ -2733,28 +2711,30 @@ } case db_x: { if (FLAG_enable_sudiv) { - if (!instr->HasW()) { - if (instr->Bits(5, 4) == 0x1) { - if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) { - // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs - // Format(instr, "'sdiv'cond'b 'rn, 'rm, 'rs); - int rm = instr->RmValue(); - int32_t rm_val = get_register(rm); - int rs = instr->RsValue(); - int32_t rs_val = get_register(rs); - int32_t ret_val = 0; - ASSERT(rs_val != 0); - if ((rm_val == kMinInt) && (rs_val == -1)) { - ret_val = kMinInt; - } else { - ret_val = rm_val / rs_val; - } - set_register(rn, ret_val); - return; - } - } - } - } + if (instr->Bits(5, 4) == 0x1) { + if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) { + // (s/u)div (in V8 notation matching ARM ISA format) rn = rm/rs + // Format(instr, "'(s/u)div'cond'b 'rn, 'rm, 'rs); + int rm = instr->RmValue(); + int32_t rm_val = get_register(rm); + int rs = instr->RsValue(); + int32_t rs_val = get_register(rs); + int32_t ret_val = 0; + DCHECK(rs_val != 0); + // udiv + if (instr->Bit(21) == 0x1) { + ret_val = static_cast<int32_t>(static_cast<uint32_t>(rm_val) / + static_cast<uint32_t>(rs_val)); + } else if ((rm_val == kMinInt) && (rs_val == -1)) { + ret_val = kMinInt; + } else { + ret_val = rm_val / rs_val; + } + set_register(rn, ret_val); + return; + } + } + } // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w"); addr = rn_val - shifter_operand; if (instr->HasW()) { @@ -2794,7 +2774,7 @@ uint32_t rd_val = static_cast<uint32_t>(get_register(instr->RdValue())); uint32_t bitcount = msbit - lsbit + 1; - uint32_t mask = (1 << bitcount) - 1; + uint32_t mask = 0xffffffffu >> (32 - bitcount); rd_val &= ~(mask << lsbit); if (instr->RmValue() != 15) { // bfi - bitfield insert. @@ -2841,7 +2821,7 @@ void Simulator::DecodeType4(Instruction* instr) { - ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode + DCHECK(instr->Bit(22) == 0); // only allowed to be set in privileged mode if (instr->HasL()) { // Format(instr, "ldm'cond'pu 'rn'w, 'rlist"); HandleRList(instr, true); @@ -2895,8 +2875,8 @@ // vmrs // Dd = vsqrt(Dm) void Simulator::DecodeTypeVFP(Instruction* instr) { - ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) ); - ASSERT(instr->Bits(11, 9) == 0x5); + DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) ); + DCHECK(instr->Bits(11, 9) == 0x5); // Obtain double precision register codes. int vm = instr->VFPMRegValue(kDoublePrecision); @@ -2936,7 +2916,7 @@ } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) && (instr->Bit(8) == 1)) { // vcvt.f64.s32 Dd, Dd, #<fbits> - int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0)); + int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5)); int fixed_value = get_sinteger_from_s_register(vd * 2); double divide = 1 << fraction_bits; set_d_register_from_double(vd, fixed_value / divide); @@ -3043,9 +3023,9 @@ int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4); double dd_value = get_double_from_d_register(vd); int32_t data[2]; - OS::MemCopy(data, &dd_value, 8); + memcpy(data, &dd_value, 8); data[instr->Bit(21)] = get_register(instr->RtValue()); - OS::MemCopy(&dd_value, data, 8); + memcpy(&dd_value, data, 8); set_d_register_from_double(vd, dd_value); } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1) && @@ -3054,7 +3034,7 @@ int vn = instr->Bits(19, 16) | (instr->Bit(7) << 4); double dn_value = get_double_from_d_register(vn); int32_t data[2]; - OS::MemCopy(data, &dn_value, 8); + memcpy(data, &dn_value, 8); set_register(instr->RtValue(), data[instr->Bit(21)]); } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x0) && @@ -3111,7 +3091,7 @@ void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters( Instruction* instr) { - ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) && + DCHECK((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) && (instr->VAValue() == 0x0)); int t = instr->RtValue(); @@ -3129,8 +3109,8 @@ void Simulator::DecodeVCMP(Instruction* instr) { - ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); - ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) && + DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); + DCHECK(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) && (instr->Opc3Value() & 0x1)); // Comparison. @@ -3167,8 +3147,8 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) { - ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); - ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)); + DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); + DCHECK((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)); VFPRegPrecision dst_precision = kDoublePrecision; VFPRegPrecision src_precision = kSinglePrecision; @@ -3192,7 +3172,7 @@ bool get_inv_op_vfp_flag(VFPRoundingMode mode, double val, bool unsigned_) { - ASSERT((mode == RN) || (mode == RM) || (mode == RZ)); + DCHECK((mode == RN) || (mode == RM) || (mode == RZ)); double max_uint = static_cast<double>(0xffffffffu); double max_int = static_cast<double>(kMaxInt); double min_int = static_cast<double>(kMinInt); @@ -3245,9 +3225,9 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { - ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) && + DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) && (instr->Bits(27, 23) == 0x1D)); - ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) || + DCHECK(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) || (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1))); // Conversion between floating-point and integer. @@ -3271,7 +3251,7 @@ // mode or the default Round to Zero mode. VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_ : RZ; - ASSERT((mode == RM) || (mode == RZ) || (mode == RN)); + DCHECK((mode == RM) || (mode == RZ) || (mode == RN)); bool unsigned_integer = (instr->Bit(16) == 0); bool double_precision = (src_precision == kDoublePrecision); @@ -3355,7 +3335,7 @@ // Ddst = MEM(Rbase + 4*offset). // MEM(Rbase + 4*offset) = Dsrc. void Simulator::DecodeType6CoprocessorIns(Instruction* instr) { - ASSERT((instr->TypeValue() == 6)); + DCHECK((instr->TypeValue() == 6)); if (instr->CoprocessorValue() == 0xA) { switch (instr->OpcodeValue()) { @@ -3405,13 +3385,13 @@ if (instr->HasL()) { int32_t data[2]; double d = get_double_from_d_register(vm); - OS::MemCopy(data, &d, 8); + memcpy(data, &d, 8); set_register(rt, data[0]); set_register(rn, data[1]); } else { int32_t data[] = { get_register(rt), get_register(rn) }; double d; - OS::MemCopy(&d, data, 8); + memcpy(&d, data, 8); set_d_register_from_double(vm, d); } } @@ -3434,13 +3414,13 @@ ReadW(address + 4, instr) }; double val; - OS::MemCopy(&val, data, 8); + memcpy(&val, data, 8); set_d_register_from_double(vd, val); } else { // Store double to memory: vstr. int32_t data[2]; double val = get_double_from_d_register(vd); - OS::MemCopy(data, &val, 8); + memcpy(data, &val, 8); WriteW(address, data[0], instr); WriteW(address + 4, data[1], instr); } @@ -3776,7 +3756,7 @@ // Set up arguments // First four arguments passed in registers. - ASSERT(argument_count >= 4); + DCHECK(argument_count >= 4); set_register(r0, va_arg(parameters, int32_t)); set_register(r1, va_arg(parameters, int32_t)); set_register(r2, va_arg(parameters, int32_t)); @@ -3786,8 +3766,8 @@ int original_stack = get_register(sp); // Compute position of stack on entry to generated code. int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)); - if (OS::ActivationFrameAlignment() != 0) { - entry_stack &= -OS::ActivationFrameAlignment(); + if (base::OS::ActivationFrameAlignment() != 0) { + entry_stack &= -base::OS::ActivationFrameAlignment(); } // Store remaining arguments on stack, from low to high memory. intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); diff -Nru nodejs-0.11.13/deps/v8/src/arm/simulator-arm.h nodejs-0.11.15/deps/v8/src/arm/simulator-arm.h --- nodejs-0.11.13/deps/v8/src/arm/simulator-arm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/simulator-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Declares a Simulator for ARM instructions if we are not generating a native @@ -36,7 +13,7 @@ #ifndef V8_ARM_SIMULATOR_ARM_H_ #define V8_ARM_SIMULATOR_ARM_H_ -#include "allocation.h" +#include "src/allocation.h" #if !defined(USE_SIMULATOR) // Running without a simulator on a native arm platform. @@ -60,9 +37,6 @@ (FUNCTION_CAST<arm_regexp_matcher>(entry)( \ p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)) -#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ - reinterpret_cast<TryCatch*>(try_catch_address) - // The stack limit beyond which we will throw stack overflow errors in // generated code. Because generated code on arm uses the C stack, we // just use the C stack limit. @@ -86,9 +60,9 @@ #else // !defined(USE_SIMULATOR) // Running with a simulator. -#include "constants-arm.h" -#include "hashmap.h" -#include "assembler.h" +#include "src/arm/constants-arm.h" +#include "src/assembler.h" +#include "src/hashmap.h" namespace v8 { namespace internal { @@ -288,7 +262,7 @@ inline int GetCarry() { return c_flag_ ? 1 : 0; - }; + } // Support for VFP. void Compute_FPSCR_Flags(double val1, double val2); @@ -459,10 +433,6 @@ Simulator::current(Isolate::Current())->Call( \ entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) -#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ - try_catch_address == NULL ? \ - NULL : *(reinterpret_cast<TryCatch**>(try_catch_address)) - // The simulator has its own stack. Thus it has a different stack limit from // the C-based native code. Setting the c_limit to indicate a very small diff -Nru nodejs-0.11.13/deps/v8/src/arm/stub-cache-arm.cc nodejs-0.11.15/deps/v8/src/arm/stub-cache-arm.cc --- nodejs-0.11.13/deps/v8/src/arm/stub-cache-arm.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm/stub-cache-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM -#include "ic-inl.h" -#include "codegen.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -59,12 +36,12 @@ uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); // Check the relative positions of the address fields. - ASSERT(value_off_addr > key_off_addr); - ASSERT((value_off_addr - key_off_addr) % 4 == 0); - ASSERT((value_off_addr - key_off_addr) < (256 * 4)); - ASSERT(map_off_addr > key_off_addr); - ASSERT((map_off_addr - key_off_addr) % 4 == 0); - ASSERT((map_off_addr - key_off_addr) < (256 * 4)); + DCHECK(value_off_addr > key_off_addr); + DCHECK((value_off_addr - key_off_addr) % 4 == 0); + DCHECK((value_off_addr - key_off_addr) < (256 * 4)); + DCHECK(map_off_addr > key_off_addr); + DCHECK((map_off_addr - key_off_addr) % 4 == 0); + DCHECK((map_off_addr - key_off_addr) < (256 * 4)); Label miss; Register base_addr = scratch; @@ -100,7 +77,7 @@ // It's a nice optimization if this constant is encodable in the bic insn. uint32_t mask = Code::kFlagsNotUsedInLookup; - ASSERT(__ ImmediateFitsAddrMode1Instruction(mask)); + DCHECK(__ ImmediateFitsAddrMode1Instruction(mask)); __ bic(flags_reg, flags_reg, Operand(mask)); __ cmp(flags_reg, Operand(flags)); __ b(ne, &miss); @@ -121,14 +98,11 @@ } -void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, - Label* miss_label, - Register receiver, - Handle<Name> name, - Register scratch0, - Register scratch1) { - ASSERT(name->IsUniqueName()); - ASSERT(!receiver.is(scratch0)); +void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( + MacroAssembler* masm, Label* miss_label, Register receiver, + Handle<Name> name, Register scratch0, Register scratch1) { + DCHECK(name->IsUniqueName()); + DCHECK(!receiver.is(scratch0)); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); @@ -189,27 +163,27 @@ // Make sure that code is valid. The multiplying code relies on the // entry size being 12. - ASSERT(sizeof(Entry) == 12); + DCHECK(sizeof(Entry) == 12); // Make sure the flags does not name a specific type. - ASSERT(Code::ExtractTypeFromFlags(flags) == 0); + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); // Make sure that there are no register conflicts. - ASSERT(!scratch.is(receiver)); - ASSERT(!scratch.is(name)); - ASSERT(!extra.is(receiver)); - ASSERT(!extra.is(name)); - ASSERT(!extra.is(scratch)); - ASSERT(!extra2.is(receiver)); - ASSERT(!extra2.is(name)); - ASSERT(!extra2.is(scratch)); - ASSERT(!extra2.is(extra)); + DCHECK(!scratch.is(receiver)); + DCHECK(!scratch.is(name)); + DCHECK(!extra.is(receiver)); + DCHECK(!extra.is(name)); + DCHECK(!extra.is(scratch)); + DCHECK(!extra2.is(receiver)); + DCHECK(!extra2.is(name)); + DCHECK(!extra2.is(scratch)); + DCHECK(!extra2.is(extra)); // Check scratch, extra and extra2 registers are valid. - ASSERT(!scratch.is(no_reg)); - ASSERT(!extra.is(no_reg)); - ASSERT(!extra2.is(no_reg)); - ASSERT(!extra3.is(no_reg)); + DCHECK(!scratch.is(no_reg)); + DCHECK(!extra.is(no_reg)); + DCHECK(!extra2.is(no_reg)); + DCHECK(!extra3.is(no_reg)); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, @@ -225,10 +199,10 @@ uint32_t mask = kPrimaryTableSize - 1; // We shift out the last two bits because they are not part of the hash and // they are always 01 for maps. - __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize)); + __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift)); // Mask down the eor argument to the minimum to keep the immediate // ARM-encodable. - __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); + __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); // Prefer and_ to ubfx here because ubfx takes 2 cycles. __ and_(scratch, scratch, Operand(mask)); @@ -245,9 +219,9 @@ extra3); // Primary miss: Compute hash for secondary probe. - __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); + __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); uint32_t mask2 = kSecondaryTableSize - 1; - __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); + __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); __ and_(scratch, scratch, Operand(mask2)); // Probe the secondary table. @@ -270,30 +244,8 @@ } -void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, - int index, - Register prototype) { - // Load the global or builtins object from the current context. - __ ldr(prototype, - MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - // Load the native context from the global or builtins object. - __ ldr(prototype, - FieldMemOperand(prototype, GlobalObject::kNativeContextOffset)); - // Load the function from the native context. - __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index))); - // Load the initial map. The global functions all have initial maps. - __ ldr(prototype, - FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); - // Load the prototype from the initial map. - __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); -} - - -void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( - MacroAssembler* masm, - int index, - Register prototype, - Label* miss) { +void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( + MacroAssembler* masm, int index, Register prototype, Label* miss) { Isolate* isolate = masm->isolate(); // Get the global function with the given index. Handle<JSFunction> function( @@ -316,46 +268,9 @@ } -void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, - Register dst, - Register src, - bool inobject, - int index, - Representation representation) { - ASSERT(!representation.IsDouble()); - int offset = index * kPointerSize; - if (!inobject) { - // Calculate the offset into the properties array. - offset = offset + FixedArray::kHeaderSize; - __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); - src = dst; - } - __ ldr(dst, FieldMemOperand(src, offset)); -} - - -void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, - Register receiver, - Register scratch, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss_label); - - // Check that the object is a JS array. - __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); - __ b(ne, miss_label); - - // Load length directly from the JS array. - __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Ret(); -} - - -void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, - Register receiver, - Register scratch1, - Register scratch2, - Label* miss_label) { +void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype( + MacroAssembler* masm, Register receiver, Register scratch1, + Register scratch2, Label* miss_label) { __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); __ mov(r0, scratch1); __ Ret(); @@ -365,13 +280,11 @@ // Generate code to check that a global property cell is empty. Create // the property cell at compilation time if no cell exists for the // property. -void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm, - Handle<JSGlobalObject> global, - Handle<Name> name, - Register scratch, - Label* miss) { +void PropertyHandlerCompiler::GenerateCheckPropertyCell( + MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name, + Register scratch, Label* miss) { Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name); - ASSERT(cell->value()->IsTheHole()); + DCHECK(cell->value()->IsTheHole()); __ mov(scratch, Operand(cell)); __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); @@ -380,18 +293,120 @@ } -void StoreStubCompiler::GenerateNegativeHolderLookup( - MacroAssembler* masm, - Handle<JSObject> holder, - Register holder_reg, - Handle<Name> name, - Label* miss) { - if (holder->IsJSGlobalObject()) { - GenerateCheckPropertyCell( - masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss); - } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { - GenerateDictionaryNegativeLookup( - masm, miss, holder_reg, name, scratch1(), scratch2()); +static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, + Register holder, Register name, + Handle<JSObject> holder_obj) { + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4); + __ push(name); + Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); + DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor)); + Register scratch = name; + __ mov(scratch, Operand(interceptor)); + __ push(scratch); + __ push(receiver); + __ push(holder); +} + + +static void CompileCallLoadPropertyWithInterceptor( + MacroAssembler* masm, Register receiver, Register holder, Register name, + Handle<JSObject> holder_obj, IC::UtilityId id) { + PushInterceptorArguments(masm, receiver, holder, name, holder_obj); + __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()), + NamedLoadHandlerCompiler::kInterceptorArgsLength); +} + + +// Generate call to api function. +void PropertyHandlerCompiler::GenerateFastApiCall( + MacroAssembler* masm, const CallOptimization& optimization, + Handle<Map> receiver_map, Register receiver, Register scratch_in, + bool is_store, int argc, Register* values) { + DCHECK(!receiver.is(scratch_in)); + __ push(receiver); + // Write the arguments to stack frame. + for (int i = 0; i < argc; i++) { + Register arg = values[argc - 1 - i]; + DCHECK(!receiver.is(arg)); + DCHECK(!scratch_in.is(arg)); + __ push(arg); + } + DCHECK(optimization.is_simple_api_call()); + + // Abi for CallApiFunctionStub. + Register callee = r0; + Register call_data = r4; + Register holder = r2; + Register api_function_address = r1; + + // Put holder in place. + CallOptimization::HolderLookup holder_lookup; + Handle<JSObject> api_holder = + optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup); + switch (holder_lookup) { + case CallOptimization::kHolderIsReceiver: + __ Move(holder, receiver); + break; + case CallOptimization::kHolderFound: + __ Move(holder, api_holder); + break; + case CallOptimization::kHolderNotFound: + UNREACHABLE(); + break; + } + + Isolate* isolate = masm->isolate(); + Handle<JSFunction> function = optimization.constant_function(); + Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); + Handle<Object> call_data_obj(api_call_info->data(), isolate); + + // Put callee in place. + __ Move(callee, function); + + bool call_data_undefined = false; + // Put call_data in place. + if (isolate->heap()->InNewSpace(*call_data_obj)) { + __ Move(call_data, api_call_info); + __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); + } else if (call_data_obj->IsUndefined()) { + call_data_undefined = true; + __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); + } else { + __ Move(call_data, call_data_obj); + } + + // Put api_function_address in place. + Address function_address = v8::ToCData<Address>(api_call_info->callback()); + ApiFunction fun(function_address); + ExternalReference::Type type = ExternalReference::DIRECT_API_CALL; + ExternalReference ref = ExternalReference(&fun, type, masm->isolate()); + __ mov(api_function_address, Operand(ref)); + + // Jump to stub. + CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc); + __ TailCallStub(&stub); +} + + +void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm, + Handle<Code> code) { + __ Jump(code, RelocInfo::CODE_TARGET); +} + + +#undef __ +#define __ ACCESS_MASM(masm()) + + +void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ bind(label); + __ mov(this->name(), Operand(name)); } } @@ -400,19 +415,10 @@ // When leaving generated code after success, the receiver_reg and name_reg // may be clobbered. Upon branch to miss_label, the receiver and name // registers have their original values. -void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register storage_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Label* miss_label, - Label* slow) { +void NamedStoreHandlerCompiler::GenerateStoreTransition( + Handle<Map> transition, Handle<Name> name, Register receiver_reg, + Register storage_reg, Register value_reg, Register scratch1, + Register scratch2, Register scratch3, Label* miss_label, Label* slow) { // r0 : value Label exit; @@ -420,10 +426,10 @@ DescriptorArray* descriptors = transition->instance_descriptors(); PropertyDetails details = descriptors->GetDetails(descriptor); Representation representation = details.representation(); - ASSERT(!representation.IsNone()); + DCHECK(!representation.IsNone()); if (details.type() == CONSTANT) { - Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); + Handle<Object> constant(descriptors->GetValue(descriptor), isolate()); __ Move(scratch1, constant); __ cmp(value_reg, scratch1); __ b(ne, miss_label); @@ -431,10 +437,27 @@ __ JumpIfNotSmi(value_reg, miss_label); } else if (representation.IsHeapObject()) { __ JumpIfSmi(value_reg, miss_label); + HeapType* field_type = descriptors->GetFieldType(descriptor); + HeapType::Iterator<Map> it = field_type->Classes(); + if (!it.Done()) { + __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); + Label do_store; + while (true) { + __ CompareMap(scratch1, it.Current(), &do_store); + it.Advance(); + if (it.Done()) { + __ b(ne, miss_label); + break; + } + __ b(eq, &do_store); + } + __ bind(&do_store); + } } else if (representation.IsDouble()) { Label do_store, heap_number; - __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow); + __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex); + __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow, + TAG_RESULT, MUTABLE); __ JumpIfNotSmi(value_reg, &heap_number); __ SmiUntag(scratch1, value_reg); @@ -451,13 +474,12 @@ __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); } - // Stub never generated for non-global objects that require access - // checks. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); + // Stub never generated for objects that require access checks. + DCHECK(!transition->is_access_check_needed()); // Perform map transition for the receiver if necessary. if (details.type() == FIELD && - object->map()->unused_property_fields() == 0) { + Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) { // The properties must be extended before we can store the value. // We jump to a runtime call that extends the properties array. __ push(receiver_reg); @@ -465,9 +487,8 @@ __ Push(r2, r0); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), - masm->isolate()), - 3, - 1); + isolate()), + 3, 1); return; } @@ -486,7 +507,7 @@ OMIT_SMI_CHECK); if (details.type() == CONSTANT) { - ASSERT(value_reg.is(r0)); + DCHECK(value_reg.is(r0)); __ Ret(); return; } @@ -497,14 +518,14 @@ // Adjust for the number of properties stored in the object. Even in the // face of a transition we can use the old map here because the size of the // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); + index -= transition->inobject_properties(); // TODO(verwaest): Share this code as a code stub. SmiCheck smi_check = representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; if (index < 0) { // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); + int offset = transition->instance_size() + (index * kPointerSize); if (representation.IsDouble()) { __ str(storage_reg, FieldMemOperand(receiver_reg, offset)); } else { @@ -554,281 +575,46 @@ } // Return the value (register r0). - ASSERT(value_reg.is(r0)); - __ bind(&exit); - __ Ret(); -} - - -// Generate StoreField code, value is passed in r0 register. -// When leaving generated code after success, the receiver_reg and name_reg -// may be clobbered. Upon branch to miss_label, the receiver and name -// registers have their original values. -void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label) { - // r0 : value - Label exit; - - // Stub never generated for non-global objects that require access - // checks. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - - int index = lookup->GetFieldIndex().field_index(); - - // Adjust for the number of properties stored in the object. Even in the - // face of a transition we can use the old map here because the size of the - // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); - - Representation representation = lookup->representation(); - ASSERT(!representation.IsNone()); - if (representation.IsSmi()) { - __ JumpIfNotSmi(value_reg, miss_label); - } else if (representation.IsHeapObject()) { - __ JumpIfSmi(value_reg, miss_label); - } else if (representation.IsDouble()) { - // Load the double storage. - if (index < 0) { - int offset = object->map()->instance_size() + (index * kPointerSize); - __ ldr(scratch1, FieldMemOperand(receiver_reg, offset)); - } else { - __ ldr(scratch1, - FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); - int offset = index * kPointerSize + FixedArray::kHeaderSize; - __ ldr(scratch1, FieldMemOperand(scratch1, offset)); - } - - // Store the value into the storage. - Label do_store, heap_number; - __ JumpIfNotSmi(value_reg, &heap_number); - __ SmiUntag(scratch2, value_reg); - __ vmov(s0, scratch2); - __ vcvt_f64_s32(d0, s0); - __ jmp(&do_store); - - __ bind(&heap_number); - __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex, - miss_label, DONT_DO_SMI_CHECK); - __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); - - __ bind(&do_store); - __ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); - // Return the value (register r0). - ASSERT(value_reg.is(r0)); - __ Ret(); - return; - } - - // TODO(verwaest): Share this code as a code stub. - SmiCheck smi_check = representation.IsTagged() - ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; - if (index < 0) { - // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); - __ str(value_reg, FieldMemOperand(receiver_reg, offset)); - - if (!representation.IsSmi()) { - // Skip updating write barrier if storing a smi. - __ JumpIfSmi(value_reg, &exit); - - // Update the write barrier for the array address. - // Pass the now unused name_reg as a scratch register. - __ mov(name_reg, value_reg); - __ RecordWriteField(receiver_reg, - offset, - name_reg, - scratch1, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); - } - } else { - // Write to the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; - // Get the properties array - __ ldr(scratch1, - FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ str(value_reg, FieldMemOperand(scratch1, offset)); - - if (!representation.IsSmi()) { - // Skip updating write barrier if storing a smi. - __ JumpIfSmi(value_reg, &exit); - - // Update the write barrier for the array address. - // Ok to clobber receiver_reg and name_reg, since we return. - __ mov(name_reg, value_reg); - __ RecordWriteField(scratch1, - offset, - name_reg, - receiver_reg, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); - } - } - - // Return the value (register r0). - ASSERT(value_reg.is(r0)); + DCHECK(value_reg.is(r0)); __ bind(&exit); __ Ret(); } -void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, - Label* label, - Handle<Name> name) { - if (!label->is_unused()) { - __ bind(label); - __ mov(this->name(), Operand(name)); - } -} - - -static void PushInterceptorArguments(MacroAssembler* masm, - Register receiver, - Register holder, - Register name, - Handle<JSObject> holder_obj) { - STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0); - STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1); - STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2); - STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3); - STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4); - __ push(name); - Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); - ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor)); - Register scratch = name; - __ mov(scratch, Operand(interceptor)); - __ push(scratch); - __ push(receiver); - __ push(holder); -} - - -static void CompileCallLoadPropertyWithInterceptor( - MacroAssembler* masm, - Register receiver, - Register holder, - Register name, - Handle<JSObject> holder_obj, - IC::UtilityId id) { - PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallExternalReference( - ExternalReference(IC_Utility(id), masm->isolate()), - StubCache::kInterceptorArgsLength); -} - - -// Generate call to api function. -void StubCompiler::GenerateFastApiCall(MacroAssembler* masm, - const CallOptimization& optimization, - Handle<Map> receiver_map, - Register receiver, - Register scratch_in, - bool is_store, - int argc, - Register* values) { - ASSERT(!receiver.is(scratch_in)); - __ push(receiver); - // Write the arguments to stack frame. - for (int i = 0; i < argc; i++) { - Register arg = values[argc-1-i]; - ASSERT(!receiver.is(arg)); - ASSERT(!scratch_in.is(arg)); - __ push(arg); - } - ASSERT(optimization.is_simple_api_call()); - - // Abi for CallApiFunctionStub. - Register callee = r0; - Register call_data = r4; - Register holder = r2; - Register api_function_address = r1; - - // Put holder in place. - CallOptimization::HolderLookup holder_lookup; - Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType( - receiver_map, - &holder_lookup); - switch (holder_lookup) { - case CallOptimization::kHolderIsReceiver: - __ Move(holder, receiver); - break; - case CallOptimization::kHolderFound: - __ Move(holder, api_holder); - break; - case CallOptimization::kHolderNotFound: - UNREACHABLE(); +void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup, + Register value_reg, + Label* miss_label) { + DCHECK(lookup->representation().IsHeapObject()); + __ JumpIfSmi(value_reg, miss_label); + HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes(); + __ ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset)); + Label do_store; + while (true) { + __ CompareMap(scratch1(), it.Current(), &do_store); + it.Advance(); + if (it.Done()) { + __ b(ne, miss_label); break; + } + __ b(eq, &do_store); } + __ bind(&do_store); - Isolate* isolate = masm->isolate(); - Handle<JSFunction> function = optimization.constant_function(); - Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); - Handle<Object> call_data_obj(api_call_info->data(), isolate); - - // Put callee in place. - __ Move(callee, function); - - bool call_data_undefined = false; - // Put call_data in place. - if (isolate->heap()->InNewSpace(*call_data_obj)) { - __ Move(call_data, api_call_info); - __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); - } else if (call_data_obj->IsUndefined()) { - call_data_undefined = true; - __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); - } else { - __ Move(call_data, call_data_obj); - } - - // Put api_function_address in place. - Address function_address = v8::ToCData<Address>(api_call_info->callback()); - ApiFunction fun(function_address); - ExternalReference::Type type = ExternalReference::DIRECT_API_CALL; - ExternalReference ref = ExternalReference(&fun, - type, - masm->isolate()); - __ mov(api_function_address, Operand(ref)); - - // Jump to stub. - CallApiFunctionStub stub(is_store, call_data_undefined, argc); - __ TailCallStub(&stub); + StoreFieldStub stub(isolate(), lookup->GetFieldIndex(), + lookup->representation()); + GenerateTailCall(masm(), stub.GetCode()); } -void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { - __ Jump(code, RelocInfo::CODE_TARGET); -} - - -#undef __ -#define __ ACCESS_MASM(masm()) - - -Register StubCompiler::CheckPrototypes(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Register holder_reg, - Register scratch1, - Register scratch2, - Handle<Name> name, - Label* miss, - PrototypeCheckType check) { - Handle<Map> receiver_map(IC::TypeToMap(*type, isolate())); +Register PropertyHandlerCompiler::CheckPrototypes( + Register object_reg, Register holder_reg, Register scratch1, + Register scratch2, Handle<Name> name, Label* miss, + PrototypeCheckType check) { + Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate())); // Make sure there's no overlap between holder and object registers. - ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); - ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) + DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); + DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) && !scratch2.is(scratch1)); // Keep track of the current object in register reg. @@ -836,10 +622,12 @@ int depth = 0; Handle<JSObject> current = Handle<JSObject>::null(); - if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant()); + if (type()->IsConstant()) { + current = Handle<JSObject>::cast(type()->AsConstant()->Value()); + } Handle<JSObject> prototype = Handle<JSObject>::null(); Handle<Map> current_map = receiver_map; - Handle<Map> holder_map(holder->map()); + Handle<Map> holder_map(holder()->map()); // Traverse the prototype chain and check the maps in the prototype chain for // fast and global objects or do negative lookup for normal objects. while (!current_map.is_identical_to(holder_map)) { @@ -847,19 +635,19 @@ // Only global objects and objects that do not require access // checks are allowed in stubs. - ASSERT(current_map->IsJSGlobalProxyMap() || + DCHECK(current_map->IsJSGlobalProxyMap() || !current_map->is_access_check_needed()); prototype = handle(JSObject::cast(current_map->prototype())); if (current_map->is_dictionary_map() && - !current_map->IsJSGlobalObjectMap() && - !current_map->IsJSGlobalProxyMap()) { + !current_map->IsJSGlobalObjectMap()) { + DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast. if (!name->IsUniqueName()) { - ASSERT(name->IsString()); + DCHECK(name->IsString()); name = factory()->InternalizeString(Handle<String>::cast(name)); } - ASSERT(current.is_null() || - current->property_dictionary()->FindEntry(*name) == + DCHECK(current.is_null() || + current->property_dictionary()->FindEntry(name) == NameDictionary::kNotFound); GenerateDictionaryNegativeLookup(masm(), miss, reg, name, @@ -880,6 +668,9 @@ // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global // object. + // This allows us to install generated handlers for accesses to the + // global proxy (as opposed to using slow ICs). See corresponding code + // in LookupForRead(). if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch2, miss); } else if (current_map->IsJSGlobalObjectMap()) { @@ -890,12 +681,15 @@ reg = holder_reg; // From now on the object will be in holder_reg. - if (heap()->InNewSpace(*prototype)) { - // The prototype is in new space; we cannot store a reference to it - // in the code. Load it from the map. + // Two possible reasons for loading the prototype from the map: + // (1) Can't store references to new space in code. + // (2) Handler is shared for all receivers with the same prototype + // map (but not necessarily the same prototype instance). + bool load_prototype_from_map = + heap()->InNewSpace(*prototype) || depth == 1; + if (load_prototype_from_map) { __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); } else { - // The prototype is in old space; load it directly. __ mov(reg, Operand(prototype)); } } @@ -914,7 +708,7 @@ } // Perform security check for access to the global object. - ASSERT(current_map->IsJSGlobalProxyMap() || + DCHECK(current_map->IsJSGlobalProxyMap() || !current_map->is_access_check_needed()); if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch1, miss); @@ -925,7 +719,7 @@ } -void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { +void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { if (!miss->is_unused()) { Label success; __ b(&success); @@ -936,92 +730,26 @@ } -void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { +void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { if (!miss->is_unused()) { Label success; __ b(&success); - GenerateRestoreName(masm(), miss, name); + GenerateRestoreName(miss, name); TailCallBuiltin(masm(), MissBuiltin(kind())); __ bind(&success); } } -Register LoadStubCompiler::CallbackHandlerFrontend( - Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Handle<Object> callback) { - Label miss; - - Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); - - if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { - ASSERT(!reg.is(scratch2())); - ASSERT(!reg.is(scratch3())); - ASSERT(!reg.is(scratch4())); - - // Load the properties dictionary. - Register dictionary = scratch4(); - __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); - - // Probe the dictionary. - Label probe_done; - NameDictionaryLookupStub::GeneratePositiveLookup(masm(), - &miss, - &probe_done, - dictionary, - this->name(), - scratch2(), - scratch3()); - __ bind(&probe_done); - - // If probing finds an entry in the dictionary, scratch3 contains the - // pointer into the dictionary. Check that the value is the callback. - Register pointer = scratch3(); - const int kElementsStartOffset = NameDictionary::kHeaderSize + - NameDictionary::kElementsStartIndex * kPointerSize; - const int kValueOffset = kElementsStartOffset + kPointerSize; - __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset)); - __ cmp(scratch2(), Operand(callback)); - __ b(ne, &miss); - } - - HandlerFrontendFooter(name, &miss); - return reg; -} - - -void LoadStubCompiler::GenerateLoadField(Register reg, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation) { - if (!reg.is(receiver())) __ mov(receiver(), reg); - if (kind() == Code::LOAD_IC) { - LoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); - } else { - KeyedLoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); - } -} - - -void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { +void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) { // Return the constant value. __ Move(r0, value); __ Ret(); } -void LoadStubCompiler::GenerateLoadCallback( - Register reg, - Handle<ExecutableAccessorInfo> callback) { +void NamedLoadHandlerCompiler::GenerateLoadCallback( + Register reg, Handle<ExecutableAccessorInfo> callback) { // Build AccessorInfo::args_ list on the stack and push property name below // the exit frame to make GC aware of them and store pointers to them. STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); @@ -1031,9 +759,9 @@ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); - ASSERT(!scratch2().is(reg)); - ASSERT(!scratch3().is(reg)); - ASSERT(!scratch4().is(reg)); + DCHECK(!scratch2().is(reg)); + DCHECK(!scratch3().is(reg)); + DCHECK(!scratch4().is(reg)); __ push(receiver()); if (heap()->InNewSpace(callback->data())) { __ Move(scratch3(), callback); @@ -1061,19 +789,16 @@ ExternalReference ref = ExternalReference(&fun, type, isolate()); __ mov(getter_address_reg, Operand(ref)); - CallApiGetterStub stub; + CallApiGetterStub stub(isolate()); __ TailCallStub(&stub); } -void LoadStubCompiler::GenerateLoadInterceptor( - Register holder_reg, - Handle<Object> object, - Handle<JSObject> interceptor_holder, - LookupResult* lookup, - Handle<Name> name) { - ASSERT(interceptor_holder->HasNamedInterceptor()); - ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); +void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg, + LookupResult* lookup, + Handle<Name> name) { + DCHECK(holder()->HasNamedInterceptor()); + DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined()); // So far the most popular follow ups for interceptor loads are FIELD // and CALLBACKS, so inline only them, other cases may be added @@ -1084,10 +809,12 @@ compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { - ExecutableAccessorInfo* callback = - ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); - compile_followup_inline = callback->getter() != NULL && - callback->IsCompatibleReceiver(*object); + Handle<ExecutableAccessorInfo> callback( + ExecutableAccessorInfo::cast(lookup->GetCallbackObject())); + compile_followup_inline = + callback->getter() != NULL && + ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback, + type()); } } @@ -1095,13 +822,13 @@ // Compile the interceptor call, followed by inline code to load the // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. - ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); + DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1())); // Preserve the receiver register explicitly whenever it is different from // the holder and it is needed should the interceptor return without any // result. The CALLBACKS case needs the receiver to be passed into C++ code, // the FIELD case might cause a miss during the prototype check. - bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); + bool must_perfrom_prototype_check = *holder() != lookup->holder(); bool must_preserve_receiver_reg = !receiver().is(holder_reg) && (lookup->type() == CALLBACKS || must_perfrom_prototype_check); @@ -1118,7 +845,7 @@ // interceptor's holder has been compiled before (see a caller // of this method.) CompileCallLoadPropertyWithInterceptor( - masm(), receiver(), holder_reg, this->name(), interceptor_holder, + masm(), receiver(), holder_reg, this->name(), holder(), IC::kLoadPropertyWithInterceptorOnly); // Check if interceptor provided a value for property. If it's @@ -1139,44 +866,26 @@ // Leave the internal frame. } - GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); + GenerateLoadPostInterceptor(holder_reg, name, lookup); } else { // !compile_followup_inline // Call the runtime system to load the interceptor. // Check that the maps haven't changed. - PushInterceptorArguments(masm(), receiver(), holder_reg, - this->name(), interceptor_holder); + PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), + holder()); ExternalReference ref = - ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), + ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor), isolate()); - __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1); + __ TailCallExternalReference( + ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); } } -void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) { - Label success; - // Check that the object is a boolean. - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(object, ip); - __ b(eq, &success); - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(object, ip); - __ b(ne, miss); - __ bind(&success); -} - - -Handle<Code> StoreStubCompiler::CompileStoreCallback( - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( + Handle<JSObject> object, Handle<Name> name, Handle<ExecutableAccessorInfo> callback) { - Register holder_reg = HandlerFrontend( - IC::CurrentTypeOf(object, isolate()), receiver(), holder, name); - - // Stub never generated for non-global objects that require access checks. - ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); + Register holder_reg = Frontend(receiver(), name); __ push(receiver()); // receiver __ push(holder_reg); @@ -1199,10 +908,8 @@ #define __ ACCESS_MASM(masm) -void StoreStubCompiler::GenerateStoreViaSetter( - MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, +void NamedStoreHandlerCompiler::GenerateStoreViaSetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, Handle<JSFunction> setter) { // ----------- S t a t e ------------- // -- lr : return address @@ -1218,8 +925,7 @@ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { // Swap in the global receiver. __ ldr(receiver, - FieldMemOperand( - receiver, JSGlobalObject::kGlobalReceiverOffset)); + FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); } __ Push(receiver, value()); ParameterCount actual(1); @@ -1246,14 +952,13 @@ #define __ ACCESS_MASM(masm()) -Handle<Code> StoreStubCompiler::CompileStoreInterceptor( - Handle<JSObject> object, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( Handle<Name> name) { __ Push(receiver(), this->name(), value()); // Do tail-call to the runtime system. - ExternalReference store_ic_property = - ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); + ExternalReference store_ic_property = ExternalReference( + IC_Utility(IC::kStorePropertyWithInterceptor), isolate()); __ TailCallExternalReference(store_ic_property, 3, 1); // Return the generated code. @@ -1261,62 +966,35 @@ } -Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type, - Handle<JSObject> last, - Handle<Name> name) { - NonexistentHandlerFrontend(type, last, name); - - // Return undefined if maps of the full prototype chain are still the - // same and no global property with this name contains a value. - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); - __ Ret(); - - // Return the generated code. - return GetCode(kind(), Code::FAST, name); -} - - -Register* LoadStubCompiler::registers() { +Register* PropertyAccessCompiler::load_calling_convention() { // receiver, name, scratch1, scratch2, scratch3, scratch4. - static Register registers[] = { r0, r2, r3, r1, r4, r5 }; + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + static Register registers[] = { receiver, name, r3, r0, r4, r5 }; return registers; } -Register* KeyedLoadStubCompiler::registers() { - // receiver, name, scratch1, scratch2, scratch3, scratch4. - static Register registers[] = { r1, r0, r2, r3, r4, r5 }; - return registers; -} - - -Register StoreStubCompiler::value() { - return r0; -} - - -Register* StoreStubCompiler::registers() { +Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. - static Register registers[] = { r1, r2, r3, r4, r5 }; + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + DCHECK(r3.is(KeyedStoreIC::MapRegister())); + static Register registers[] = { receiver, name, r3, r4, r5 }; return registers; } -Register* KeyedStoreStubCompiler::registers() { - // receiver, name, scratch1, scratch2, scratch3. - static Register registers[] = { r2, r1, r3, r4, r5 }; - return registers; -} +Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); } #undef __ #define __ ACCESS_MASM(masm) -void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, - Handle<JSFunction> getter) { +void NamedLoadHandlerCompiler::GenerateLoadViaGetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, + Handle<JSFunction> getter) { // ----------- S t a t e ------------- // -- r0 : receiver // -- r2 : name @@ -1330,8 +1008,7 @@ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { // Swap in the global receiver. __ ldr(receiver, - FieldMemOperand( - receiver, JSGlobalObject::kGlobalReceiverOffset)); + FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); } __ push(receiver); ParameterCount actual(0); @@ -1355,57 +1032,61 @@ #define __ ACCESS_MASM(masm()) -Handle<Code> LoadStubCompiler::CompileLoadGlobal( - Handle<HeapType> type, - Handle<GlobalObject> global, - Handle<PropertyCell> cell, - Handle<Name> name, - bool is_dont_delete) { +Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal( + Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) { Label miss; - HandlerFrontendHeader(type, receiver(), global, name, &miss); + FrontendHeader(receiver(), name, &miss); // Get the value from the cell. - __ mov(r3, Operand(cell)); - __ ldr(r4, FieldMemOperand(r3, Cell::kValueOffset)); + Register result = StoreIC::ValueRegister(); + __ mov(result, Operand(cell)); + __ ldr(result, FieldMemOperand(result, Cell::kValueOffset)); // Check for deleted property if property can actually be deleted. - if (!is_dont_delete) { + if (is_configurable) { __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r4, ip); + __ cmp(result, ip); __ b(eq, &miss); } Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3); - __ mov(r0, r4); __ Ret(); - HandlerFrontendFooter(name, &miss); + FrontendFooter(name, &miss); // Return the generated code. return GetCode(kind(), Code::NORMAL, name); } -Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( - TypeHandleList* types, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { +Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { Label miss; if (check == PROPERTY && (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - __ cmp(this->name(), Operand(name)); - __ b(ne, &miss); + // In case we are compiling an IC for dictionary loads and stores, just + // check whether the name is unique. + if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { + __ JumpIfNotUniqueName(this->name(), &miss); + } else { + __ cmp(this->name(), Operand(name)); + __ b(ne, &miss); + } } Label number_case; Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; __ JumpIfSmi(receiver(), smi_target); + // Polymorphic keyed stores may use the map register Register map_reg = scratch1(); + DCHECK(kind() != Code::KEYED_STORE_IC || + map_reg.is(KeyedStoreIC::MapRegister())); int receiver_count = types->length(); int number_of_handled_maps = 0; @@ -1418,13 +1099,13 @@ __ mov(ip, Operand(map)); __ cmp(map_reg, ip); if (type->Is(HeapType::Number())) { - ASSERT(!number_case.is_unused()); + DCHECK(!number_case.is_unused()); __ bind(&number_case); } __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq); } } - ASSERT(number_of_handled_maps != 0); + DCHECK(number_of_handled_maps != 0); __ bind(&miss); TailCallBuiltin(masm(), MissBuiltin(kind())); @@ -1432,24 +1113,12 @@ // Return the generated code. InlineCacheState state = number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetICCode(kind(), type, name, state); + return GetCode(kind(), type, name, state); } -void StoreStubCompiler::GenerateStoreArrayLength() { - // Prepare tail call to StoreIC_ArrayLength. - __ Push(receiver(), value()); - - ExternalReference ref = - ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), - masm()->isolate()); - __ TailCallExternalReference(ref, 2, 1); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( - MapHandleList* receiver_maps, - CodeHandleList* handler_stubs, +Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( + MapHandleList* receiver_maps, CodeHandleList* handler_stubs, MapHandleList* transitioned_maps) { Label miss; __ JumpIfSmi(receiver(), &miss); @@ -1474,8 +1143,7 @@ TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetICCode( - kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); + return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); } @@ -1483,21 +1151,19 @@ #define __ ACCESS_MASM(masm) -void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( +void ElementHandlerCompiler::GenerateLoadDictionaryElement( MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- + // The return address is in lr. Label slow, miss; - Register key = r0; - Register receiver = r1; + Register key = LoadIC::NameRegister(); + Register receiver = LoadIC::ReceiverRegister(); + DCHECK(receiver.is(r1)); + DCHECK(key.is(r2)); - __ UntagAndJumpIfNotSmi(r2, key, &miss); + __ UntagAndJumpIfNotSmi(r6, key, &miss); __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5); + __ LoadFromNumberDictionary(&slow, r4, key, r0, r6, r3, r5); __ Ret(); __ bind(&slow); @@ -1505,21 +1171,11 @@ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, r2, r3); - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); // Miss case, call the runtime. __ bind(&miss); - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/assembler-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/assembler-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/assembler-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/assembler-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -26,49 +26,64 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 #define ARM64_DEFINE_REG_STATICS -#include "arm64/assembler-arm64-inl.h" +#include "src/arm64/assembler-arm64-inl.h" +#include "src/base/cpu.h" namespace v8 { namespace internal { // ----------------------------------------------------------------------------- -// CpuFeatures utilities (for V8 compatibility). +// CpuFeatures implementation. -ExternalReference ExternalReference::cpu_features() { - return ExternalReference(&CpuFeatures::supported_); +void CpuFeatures::ProbeImpl(bool cross_compile) { + if (cross_compile) { + // Always align csp in cross compiled code - this is safe and ensures that + // csp will always be aligned if it is enabled by probing at runtime. + if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP; + } else { + base::CPU cpu; + if (FLAG_enable_always_align_csp && + (cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) { + supported_ |= 1u << ALWAYS_ALIGN_CSP; + } + } } +void CpuFeatures::PrintTarget() { } +void CpuFeatures::PrintFeatures() { } + + // ----------------------------------------------------------------------------- // CPURegList utilities. CPURegister CPURegList::PopLowestIndex() { - ASSERT(IsValid()); + DCHECK(IsValid()); if (IsEmpty()) { return NoCPUReg; } int index = CountTrailingZeros(list_, kRegListSizeInBits); - ASSERT((1 << index) & list_); + DCHECK((1 << index) & list_); Remove(index); return CPURegister::Create(index, size_, type_); } CPURegister CPURegList::PopHighestIndex() { - ASSERT(IsValid()); + DCHECK(IsValid()); if (IsEmpty()) { return NoCPUReg; } int index = CountLeadingZeros(list_, kRegListSizeInBits); index = kRegListSizeInBits - 1 - index; - ASSERT((1 << index) & list_); + DCHECK((1 << index) & list_); Remove(index); return CPURegister::Create(index, size_, type_); } @@ -80,8 +95,8 @@ } else if (type() == CPURegister::kFPRegister) { Remove(GetCalleeSavedFP(RegisterSizeInBits())); } else { - ASSERT(type() == CPURegister::kNoRegister); - ASSERT(IsEmpty()); + DCHECK(type() == CPURegister::kNoRegister); + DCHECK(IsEmpty()); // The list must already be empty, so do nothing. } } @@ -176,7 +191,7 @@ } // Indicate that code has changed. - CPU::FlushICache(pc_, instruction_count * kInstructionSize); + CpuFeatures::FlushICache(pc_, instruction_count * kInstructionSize); } @@ -212,7 +227,7 @@ const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; - for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) { + for (unsigned i = 0; i < ARRAY_SIZE(regs); i++) { if (regs[i].IsRegister()) { number_of_valid_regs++; unique_regs |= regs[i].Bit(); @@ -220,7 +235,7 @@ number_of_valid_fpregs++; unique_fpregs |= regs[i].Bit(); } else { - ASSERT(!regs[i].IsValid()); + DCHECK(!regs[i].IsValid()); } } @@ -229,8 +244,8 @@ int number_of_unique_fpregs = CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte); - ASSERT(number_of_valid_regs >= number_of_unique_regs); - ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs); + DCHECK(number_of_valid_regs >= number_of_unique_regs); + DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs); return (number_of_valid_regs != number_of_unique_regs) || (number_of_valid_fpregs != number_of_unique_fpregs); @@ -241,7 +256,7 @@ const CPURegister& reg3, const CPURegister& reg4, const CPURegister& reg5, const CPURegister& reg6, const CPURegister& reg7, const CPURegister& reg8) { - ASSERT(reg1.IsValid()); + DCHECK(reg1.IsValid()); bool match = true; match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1); match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); @@ -254,41 +269,285 @@ } -void Operand::initialize_handle(Handle<Object> handle) { +void Immediate::InitializeHandle(Handle<Object> handle) { AllowDeferredHandleDereference using_raw_address; // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; if (obj->IsHeapObject()) { - ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); - immediate_ = reinterpret_cast<intptr_t>(handle.location()); + DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); + value_ = reinterpret_cast<intptr_t>(handle.location()); rmode_ = RelocInfo::EMBEDDED_OBJECT; } else { STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t)); - immediate_ = reinterpret_cast<intptr_t>(obj); + value_ = reinterpret_cast<intptr_t>(obj); rmode_ = RelocInfo::NONE64; } } -bool Operand::NeedsRelocation() const { - if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); +bool Operand::NeedsRelocation(const Assembler* assembler) const { + RelocInfo::Mode rmode = immediate_.rmode(); + + if (rmode == RelocInfo::EXTERNAL_REFERENCE) { + return assembler->serializer_enabled(); + } + + return !RelocInfo::IsNone(rmode); +} + + +// Constant Pool. +void ConstPool::RecordEntry(intptr_t data, + RelocInfo::Mode mode) { + DCHECK(mode != RelocInfo::COMMENT && + mode != RelocInfo::POSITION && + mode != RelocInfo::STATEMENT_POSITION && + mode != RelocInfo::CONST_POOL && + mode != RelocInfo::VENEER_POOL && + mode != RelocInfo::CODE_AGE_SEQUENCE); + + uint64_t raw_data = static_cast<uint64_t>(data); + int offset = assm_->pc_offset(); + if (IsEmpty()) { + first_use_ = offset; + } + + std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset); + if (CanBeShared(mode)) { + shared_entries_.insert(entry); + if (shared_entries_.count(entry.first) == 1) { + shared_entries_count++; } + } else { + unique_entries_.push_back(entry); + } + + if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) { + // Request constant pool emission after the next instruction. + assm_->SetNextConstPoolCheckIn(1); + } +} + + +int ConstPool::DistanceToFirstUse() { + DCHECK(first_use_ >= 0); + return assm_->pc_offset() - first_use_; +} + + +int ConstPool::MaxPcOffset() { + // There are no pending entries in the pool so we can never get out of + // range. + if (IsEmpty()) return kMaxInt; + + // Entries are not necessarily emitted in the order they are added so in the + // worst case the first constant pool use will be accessing the last entry. + return first_use_ + kMaxLoadLiteralRange - WorstCaseSize(); +} + + +int ConstPool::WorstCaseSize() { + if (IsEmpty()) return 0; + + // Max size prologue: + // b over + // ldr xzr, #pool_size + // blr xzr + // nop + // All entries are 64-bit for now. + return 4 * kInstructionSize + EntryCount() * kPointerSize; +} + + +int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) { + if (IsEmpty()) return 0; + + // Prologue is: + // b over ;; if require_jump + // ldr xzr, #pool_size + // blr xzr + // nop ;; if not 64-bit aligned + int prologue_size = require_jump ? kInstructionSize : 0; + prologue_size += 2 * kInstructionSize; + prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ? + 0 : kInstructionSize; + + // All entries are 64-bit for now. + return prologue_size + EntryCount() * kPointerSize; +} + + +void ConstPool::Emit(bool require_jump) { + DCHECK(!assm_->is_const_pool_blocked()); + // Prevent recursive pool emission and protect from veneer pools. + Assembler::BlockPoolsScope block_pools(assm_); + + int size = SizeIfEmittedAtCurrentPc(require_jump); + Label size_check; + assm_->bind(&size_check); + + assm_->RecordConstPool(size); + // Emit the constant pool. It is preceded by an optional branch if + // require_jump and a header which will: + // 1) Encode the size of the constant pool, for use by the disassembler. + // 2) Terminate the program, to try to prevent execution from accidentally + // flowing into the constant pool. + // 3) align the pool entries to 64-bit. + // The header is therefore made of up to three arm64 instructions: + // ldr xzr, #<size of the constant pool in 32-bit words> + // blr xzr + // nop + // + // If executed, the header will likely segfault and lr will point to the + // instruction following the offending blr. + // TODO(all): Make the alignment part less fragile. Currently code is + // allocated as a byte array so there are no guarantees the alignment will + // be preserved on compaction. Currently it works as allocation seems to be + // 64-bit aligned. + + // Emit branch if required + Label after_pool; + if (require_jump) { + assm_->b(&after_pool); + } + + // Emit the header. + assm_->RecordComment("[ Constant Pool"); + EmitMarker(); + EmitGuard(); + assm_->Align(8); + + // Emit constant pool entries. + // TODO(all): currently each relocated constant is 64 bits, consider adding + // support for 32-bit entries. + EmitEntries(); + assm_->RecordComment("]"); + + if (after_pool.is_linked()) { + assm_->bind(&after_pool); + } + + DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) == + static_cast<unsigned>(size)); +} + + +void ConstPool::Clear() { + shared_entries_.clear(); + shared_entries_count = 0; + unique_entries_.clear(); + first_use_ = -1; +} + + +bool ConstPool::CanBeShared(RelocInfo::Mode mode) { + // Constant pool currently does not support 32-bit entries. + DCHECK(mode != RelocInfo::NONE32); + + return RelocInfo::IsNone(mode) || + (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL)); +} + + +void ConstPool::EmitMarker() { + // A constant pool size is expressed in number of 32-bits words. + // Currently all entries are 64-bit. + // + 1 is for the crash guard. + // + 0/1 for alignment. + int word_count = EntryCount() * 2 + 1 + + (IsAligned(assm_->pc_offset(), 8) ? 0 : 1); + assm_->Emit(LDR_x_lit | + Assembler::ImmLLiteral(word_count) | + Assembler::Rt(xzr)); +} + + +MemOperand::PairResult MemOperand::AreConsistentForPair( + const MemOperand& operandA, + const MemOperand& operandB, + int access_size_log2) { + DCHECK(access_size_log2 >= 0); + DCHECK(access_size_log2 <= 3); + // Step one: check that they share the same base, that the mode is Offset + // and that the offset is a multiple of access size. + if (!operandA.base().Is(operandB.base()) || + (operandA.addrmode() != Offset) || + (operandB.addrmode() != Offset) || + ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) { + return kNotPair; + } + // Step two: check that the offsets are contiguous and that the range + // is OK for ldp/stp. + if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) && + is_int7(operandA.offset() >> access_size_log2)) { + return kPairAB; + } + if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) && + is_int7(operandB.offset() >> access_size_log2)) { + return kPairBA; + } + return kNotPair; +} + + +void ConstPool::EmitGuard() { +#ifdef DEBUG + Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc()); + DCHECK(instr->preceding()->IsLdrLiteralX() && + instr->preceding()->Rt() == xzr.code()); #endif - return Serializer::enabled(); + assm_->EmitPoolGuard(); +} + + +void ConstPool::EmitEntries() { + DCHECK(IsAligned(assm_->pc_offset(), 8)); + + typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator; + SharedEntriesIterator value_it; + // Iterate through the keys (constant pool values). + for (value_it = shared_entries_.begin(); + value_it != shared_entries_.end(); + value_it = shared_entries_.upper_bound(value_it->first)) { + std::pair<SharedEntriesIterator, SharedEntriesIterator> range; + uint64_t data = value_it->first; + range = shared_entries_.equal_range(data); + SharedEntriesIterator offset_it; + // Iterate through the offsets of a given key. + for (offset_it = range.first; offset_it != range.second; offset_it++) { + Instruction* instr = assm_->InstructionAt(offset_it->second); + + // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. + DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); + instr->SetImmPCOffsetTarget(assm_->pc()); + } + assm_->dc64(data); } + shared_entries_.clear(); + shared_entries_count = 0; - return !RelocInfo::IsNone(rmode_); + // Emit unique entries. + std::vector<std::pair<uint64_t, int> >::const_iterator unique_it; + for (unique_it = unique_entries_.begin(); + unique_it != unique_entries_.end(); + unique_it++) { + Instruction* instr = assm_->InstructionAt(unique_it->second); + + // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. + DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); + instr->SetImmPCOffsetTarget(assm_->pc()); + assm_->dc64(unique_it->first); + } + unique_entries_.clear(); + first_use_ = -1; } // Assembler - Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) : AssemblerBase(isolate, buffer, buffer_size), + constpool_(this), recorded_ast_id_(TypeFeedbackId::None()), unresolved_branches_(), positions_recorder_(this) { @@ -299,28 +558,27 @@ Assembler::~Assembler() { - ASSERT(num_pending_reloc_info_ == 0); - ASSERT(const_pool_blocked_nesting_ == 0); - ASSERT(veneer_pool_blocked_nesting_ == 0); + DCHECK(constpool_.IsEmpty()); + DCHECK(const_pool_blocked_nesting_ == 0); + DCHECK(veneer_pool_blocked_nesting_ == 0); } void Assembler::Reset() { #ifdef DEBUG - ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); - ASSERT(const_pool_blocked_nesting_ == 0); - ASSERT(veneer_pool_blocked_nesting_ == 0); - ASSERT(unresolved_branches_.empty()); + DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); + DCHECK(const_pool_blocked_nesting_ == 0); + DCHECK(veneer_pool_blocked_nesting_ == 0); + DCHECK(unresolved_branches_.empty()); memset(buffer_, 0, pc_ - buffer_); #endif pc_ = buffer_; reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), reinterpret_cast<byte*>(pc_)); - num_pending_reloc_info_ = 0; + constpool_.Clear(); next_constant_pool_check_ = 0; next_veneer_pool_check_ = kMaxInt; no_const_pool_before_ = 0; - first_const_pool_use_ = -1; ClearRecordedAstId(); } @@ -328,7 +586,7 @@ void Assembler::GetCode(CodeDesc* desc) { // Emit constant pool if necessary. CheckConstPool(true, false); - ASSERT(num_pending_reloc_info_ == 0); + DCHECK(constpool_.IsEmpty()); // Set up code descriptor. if (desc) { @@ -343,7 +601,7 @@ void Assembler::Align(int m) { - ASSERT(m >= 4 && IsPowerOf2(m)); + DCHECK(m >= 4 && IsPowerOf2(m)); while ((pc_offset() & (m - 1)) != 0) { nop(); } @@ -371,7 +629,7 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch, Label* label, Instruction* label_veneer) { - ASSERT(label->is_linked()); + DCHECK(label->is_linked()); CheckLabelLinkChain(label); @@ -387,7 +645,7 @@ link = next_link; } - ASSERT(branch == link); + DCHECK(branch == link); next_link = branch->ImmPCOffsetTarget(); if (branch == prev_link) { @@ -453,8 +711,10 @@ // that are linked to this label will be updated to point to the newly-bound // label. - ASSERT(!label->is_near_linked()); - ASSERT(!label->is_bound()); + DCHECK(!label->is_near_linked()); + DCHECK(!label->is_bound()); + + DeleteUnresolvedBranchInfoForLabel(label); // If the label is linked, the link chain looks something like this: // @@ -475,11 +735,11 @@ CheckLabelLinkChain(label); - ASSERT(linkoffset >= 0); - ASSERT(linkoffset < pc_offset()); - ASSERT((linkoffset > prevlinkoffset) || + DCHECK(linkoffset >= 0); + DCHECK(linkoffset < pc_offset()); + DCHECK((linkoffset > prevlinkoffset) || (linkoffset - prevlinkoffset == kStartOfLabelLinkChain)); - ASSERT(prevlinkoffset >= 0); + DCHECK(prevlinkoffset >= 0); // Update the link to point to the label. link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); @@ -495,15 +755,13 @@ } label->bind_to(pc_offset()); - ASSERT(label->is_bound()); - ASSERT(!label->is_linked()); - - DeleteUnresolvedBranchInfoForLabel(label); + DCHECK(label->is_bound()); + DCHECK(!label->is_linked()); } int Assembler::LinkAndGetByteOffsetTo(Label* label) { - ASSERT(sizeof(*pc_) == 1); + DCHECK(sizeof(*pc_) == 1); CheckLabelLinkChain(label); int offset; @@ -518,7 +776,7 @@ // Note that offset can be zero for self-referential instructions. (This // could be useful for ADR, for example.) offset = label->pos() - pc_offset(); - ASSERT(offset <= 0); + DCHECK(offset <= 0); } else { if (label->is_linked()) { // The label is linked, so the referring instruction should be added onto @@ -527,7 +785,7 @@ // In this case, label->pos() returns the offset of the last linked // instruction from the start of the buffer. offset = label->pos() - pc_offset(); - ASSERT(offset != kStartOfLabelLinkChain); + DCHECK(offset != kStartOfLabelLinkChain); // Note that the offset here needs to be PC-relative only so that the // first instruction in a buffer can link to an unbound label. Otherwise, // the offset would be 0 for this case, and 0 is reserved for @@ -545,21 +803,50 @@ } +void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) { + DCHECK(label->is_linked()); + CheckLabelLinkChain(label); + + int link_offset = label->pos(); + int link_pcoffset; + bool end_of_chain = false; + + while (!end_of_chain) { + Instruction * link = InstructionAt(link_offset); + link_pcoffset = link->ImmPCOffset(); + + // ADR instructions are not handled by veneers. + if (link->IsImmBranch()) { + int max_reachable_pc = InstructionOffset(link) + + Instruction::ImmBranchRange(link->BranchType()); + typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it; + std::pair<unresolved_info_it, unresolved_info_it> range; + range = unresolved_branches_.equal_range(max_reachable_pc); + unresolved_info_it it; + for (it = range.first; it != range.second; ++it) { + if (it->second.pc_offset_ == link_offset) { + unresolved_branches_.erase(it); + break; + } + } + } + + end_of_chain = (link_pcoffset == 0); + link_offset = link_offset + link_pcoffset; + } +} + + void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { if (unresolved_branches_.empty()) { - ASSERT(next_veneer_pool_check_ == kMaxInt); + DCHECK(next_veneer_pool_check_ == kMaxInt); return; } - // Branches to this label will be resolved when the label is bound below. - std::multimap<int, FarBranchInfo>::iterator it_tmp, it; - it = unresolved_branches_.begin(); - while (it != unresolved_branches_.end()) { - it_tmp = it++; - if (it_tmp->second.label_ == label) { - CHECK(it_tmp->first >= pc_offset()); - unresolved_branches_.erase(it_tmp); - } + if (label->is_linked()) { + // Branches to this label will be resolved when the label is bound, normally + // just after all the associated info has been deleted. + DeleteUnresolvedBranchInfoForLabelTraverse(label); } if (unresolved_branches_.empty()) { next_veneer_pool_check_ = kMaxInt; @@ -582,8 +869,7 @@ void Assembler::EndBlockConstPool() { if (--const_pool_blocked_nesting_ == 0) { // Check the constant pool hasn't been blocked for too long. - ASSERT((num_pending_reloc_info_ == 0) || - (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool))); + DCHECK(pc_offset() < constpool_.MaxPcOffset()); // Two cases: // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is // still blocked @@ -608,7 +894,7 @@ // It is still worth asserting the marker is complete. // 4: blr xzr - ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() && + DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() && instr->following()->Rn() == xzr.code())); return result; @@ -642,13 +928,6 @@ } -void Assembler::ConstantPoolMarker(uint32_t size) { - ASSERT(is_const_pool_blocked()); - // + 1 is for the crash guard. - Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr)); -} - - void Assembler::EmitPoolGuard() { // We must generate only one instruction as this is used in scopes that // control the size of the code generated. @@ -656,18 +935,6 @@ } -void Assembler::ConstantPoolGuard() { -#ifdef DEBUG - // Currently this is only used after a constant pool marker. - ASSERT(is_const_pool_blocked()); - Instruction* instr = reinterpret_cast<Instruction*>(pc_); - ASSERT(instr->preceding()->IsLdrLiteralX() && - instr->preceding()->Rt() == xzr.code()); -#endif - EmitPoolGuard(); -} - - void Assembler::StartBlockVeneerPool() { ++veneer_pool_blocked_nesting_; } @@ -676,7 +943,7 @@ void Assembler::EndBlockVeneerPool() { if (--veneer_pool_blocked_nesting_ == 0) { // Check the veneer pool hasn't been blocked for too long. - ASSERT(unresolved_branches_.empty() || + DCHECK(unresolved_branches_.empty() || (pc_offset() < unresolved_branches_first_limit())); } } @@ -684,24 +951,24 @@ void Assembler::br(const Register& xn) { positions_recorder()->WriteRecordedPositions(); - ASSERT(xn.Is64Bits()); + DCHECK(xn.Is64Bits()); Emit(BR | Rn(xn)); } void Assembler::blr(const Register& xn) { positions_recorder()->WriteRecordedPositions(); - ASSERT(xn.Is64Bits()); + DCHECK(xn.Is64Bits()); // The pattern 'blr xzr' is used as a guard to detect when execution falls // through the constant pool. It should not be emitted. - ASSERT(!xn.Is(xzr)); + DCHECK(!xn.Is(xzr)); Emit(BLR | Rn(xn)); } void Assembler::ret(const Register& xn) { positions_recorder()->WriteRecordedPositions(); - ASSERT(xn.Is64Bits()); + DCHECK(xn.Is64Bits()); Emit(RET | Rn(xn)); } @@ -772,7 +1039,7 @@ unsigned bit_pos, int imm14) { positions_recorder()->WriteRecordedPositions(); - ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); + DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); } @@ -789,7 +1056,7 @@ unsigned bit_pos, int imm14) { positions_recorder()->WriteRecordedPositions(); - ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); + DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); } @@ -803,7 +1070,7 @@ void Assembler::adr(const Register& rd, int imm21) { - ASSERT(rd.Is64Bits()); + DCHECK(rd.Is64Bits()); Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd)); } @@ -972,8 +1239,8 @@ void Assembler::lslv(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(rd.SizeInBits() == rm.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rm.SizeInBits()); Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); } @@ -981,8 +1248,8 @@ void Assembler::lsrv(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(rd.SizeInBits() == rm.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rm.SizeInBits()); Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); } @@ -990,8 +1257,8 @@ void Assembler::asrv(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(rd.SizeInBits() == rm.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rm.SizeInBits()); Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); } @@ -999,8 +1266,8 @@ void Assembler::rorv(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(rd.SizeInBits() == rm.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rm.SizeInBits()); Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); } @@ -1010,7 +1277,7 @@ const Register& rn, unsigned immr, unsigned imms) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); Emit(SF(rd) | BFM | N | ImmR(immr, rd.SizeInBits()) | @@ -1023,7 +1290,7 @@ const Register& rn, unsigned immr, unsigned imms) { - ASSERT(rd.Is64Bits() || rn.Is32Bits()); + DCHECK(rd.Is64Bits() || rn.Is32Bits()); Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); Emit(SF(rd) | SBFM | N | ImmR(immr, rd.SizeInBits()) | @@ -1036,7 +1303,7 @@ const Register& rn, unsigned immr, unsigned imms) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); Emit(SF(rd) | UBFM | N | ImmR(immr, rd.SizeInBits()) | @@ -1049,8 +1316,8 @@ const Register& rn, const Register& rm, unsigned lsb) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(rd.SizeInBits() == rm.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rm.SizeInBits()); Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd)); @@ -1090,34 +1357,34 @@ void Assembler::cset(const Register &rd, Condition cond) { - ASSERT((cond != al) && (cond != nv)); + DCHECK((cond != al) && (cond != nv)); Register zr = AppropriateZeroRegFor(rd); - csinc(rd, zr, zr, InvertCondition(cond)); + csinc(rd, zr, zr, NegateCondition(cond)); } void Assembler::csetm(const Register &rd, Condition cond) { - ASSERT((cond != al) && (cond != nv)); + DCHECK((cond != al) && (cond != nv)); Register zr = AppropriateZeroRegFor(rd); - csinv(rd, zr, zr, InvertCondition(cond)); + csinv(rd, zr, zr, NegateCondition(cond)); } void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) { - ASSERT((cond != al) && (cond != nv)); - csinc(rd, rn, rn, InvertCondition(cond)); + DCHECK((cond != al) && (cond != nv)); + csinc(rd, rn, rn, NegateCondition(cond)); } void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) { - ASSERT((cond != al) && (cond != nv)); - csinv(rd, rn, rn, InvertCondition(cond)); + DCHECK((cond != al) && (cond != nv)); + csinv(rd, rn, rn, NegateCondition(cond)); } void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) { - ASSERT((cond != al) && (cond != nv)); - csneg(rd, rn, rn, InvertCondition(cond)); + DCHECK((cond != al) && (cond != nv)); + csneg(rd, rn, rn, NegateCondition(cond)); } @@ -1126,8 +1393,8 @@ const Register& rm, Condition cond, ConditionalSelectOp op) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(rd.SizeInBits() == rm.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rm.SizeInBits()); Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); } @@ -1160,7 +1427,7 @@ void Assembler::mul(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(AreSameSizeAndType(rd, rn, rm)); + DCHECK(AreSameSizeAndType(rd, rn, rm)); Register zr = AppropriateZeroRegFor(rn); DataProcessing3Source(rd, rn, rm, zr, MADD); } @@ -1170,7 +1437,7 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(AreSameSizeAndType(rd, rn, rm, ra)); + DCHECK(AreSameSizeAndType(rd, rn, rm, ra)); DataProcessing3Source(rd, rn, rm, ra, MADD); } @@ -1178,7 +1445,7 @@ void Assembler::mneg(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(AreSameSizeAndType(rd, rn, rm)); + DCHECK(AreSameSizeAndType(rd, rn, rm)); Register zr = AppropriateZeroRegFor(rn); DataProcessing3Source(rd, rn, rm, zr, MSUB); } @@ -1188,7 +1455,7 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(AreSameSizeAndType(rd, rn, rm, ra)); + DCHECK(AreSameSizeAndType(rd, rn, rm, ra)); DataProcessing3Source(rd, rn, rm, ra, MSUB); } @@ -1197,8 +1464,8 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(rd.Is64Bits() && ra.Is64Bits()); - ASSERT(rn.Is32Bits() && rm.Is32Bits()); + DCHECK(rd.Is64Bits() && ra.Is64Bits()); + DCHECK(rn.Is32Bits() && rm.Is32Bits()); DataProcessing3Source(rd, rn, rm, ra, SMADDL_x); } @@ -1207,8 +1474,8 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(rd.Is64Bits() && ra.Is64Bits()); - ASSERT(rn.Is32Bits() && rm.Is32Bits()); + DCHECK(rd.Is64Bits() && ra.Is64Bits()); + DCHECK(rn.Is32Bits() && rm.Is32Bits()); DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x); } @@ -1217,8 +1484,8 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(rd.Is64Bits() && ra.Is64Bits()); - ASSERT(rn.Is32Bits() && rm.Is32Bits()); + DCHECK(rd.Is64Bits() && ra.Is64Bits()); + DCHECK(rn.Is32Bits() && rm.Is32Bits()); DataProcessing3Source(rd, rn, rm, ra, UMADDL_x); } @@ -1227,8 +1494,8 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(rd.Is64Bits() && ra.Is64Bits()); - ASSERT(rn.Is32Bits() && rm.Is32Bits()); + DCHECK(rd.Is64Bits() && ra.Is64Bits()); + DCHECK(rn.Is32Bits() && rm.Is32Bits()); DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x); } @@ -1236,8 +1503,8 @@ void Assembler::smull(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(rd.Is64Bits()); - ASSERT(rn.Is32Bits() && rm.Is32Bits()); + DCHECK(rd.Is64Bits()); + DCHECK(rn.Is32Bits() && rm.Is32Bits()); DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x); } @@ -1245,7 +1512,7 @@ void Assembler::smulh(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(AreSameSizeAndType(rd, rn, rm)); + DCHECK(AreSameSizeAndType(rd, rn, rm)); DataProcessing3Source(rd, rn, rm, xzr, SMULH_x); } @@ -1253,8 +1520,8 @@ void Assembler::sdiv(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(rd.SizeInBits() == rm.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rm.SizeInBits()); Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); } @@ -1262,8 +1529,8 @@ void Assembler::udiv(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(rd.SizeInBits() == rm.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rm.SizeInBits()); Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); } @@ -1282,7 +1549,7 @@ void Assembler::rev32(const Register& rd, const Register& rn) { - ASSERT(rd.Is64Bits()); + DCHECK(rd.Is64Bits()); DataProcessing1Source(rd, rn, REV); } @@ -1322,7 +1589,7 @@ void Assembler::ldpsw(const Register& rt, const Register& rt2, const MemOperand& src) { - ASSERT(rt.Is64Bits()); + DCHECK(rt.Is64Bits()); LoadStorePair(rt, rt2, src, LDPSW_x); } @@ -1332,8 +1599,8 @@ const MemOperand& addr, LoadStorePairOp op) { // 'rt' and 'rt2' can only be aliased for stores. - ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); - ASSERT(AreSameSizeAndType(rt, rt2)); + DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); + DCHECK(AreSameSizeAndType(rt, rt2)); Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(addr.offset(), CalcLSPairDataSize(op)); @@ -1343,13 +1610,13 @@ addrmodeop = LoadStorePairOffsetFixed; } else { // Pre-index and post-index modes. - ASSERT(!rt.Is(addr.base())); - ASSERT(!rt2.Is(addr.base())); - ASSERT(addr.offset() != 0); + DCHECK(!rt.Is(addr.base())); + DCHECK(!rt2.Is(addr.base())); + DCHECK(addr.offset() != 0); if (addr.IsPreIndex()) { addrmodeop = LoadStorePairPreIndexFixed; } else { - ASSERT(addr.IsPostIndex()); + DCHECK(addr.IsPostIndex()); addrmodeop = LoadStorePairPostIndexFixed; } } @@ -1377,9 +1644,9 @@ const CPURegister& rt2, const MemOperand& addr, LoadStorePairNonTemporalOp op) { - ASSERT(!rt.Is(rt2)); - ASSERT(AreSameSizeAndType(rt, rt2)); - ASSERT(addr.IsImmediateOffset()); + DCHECK(!rt.Is(rt2)); + DCHECK(AreSameSizeAndType(rt, rt2)); + DCHECK(addr.IsImmediateOffset()); LSDataSize size = CalcLSPairDataSize( static_cast<LoadStorePairOp>(op & LoadStorePairMask)); @@ -1430,32 +1697,28 @@ void Assembler::ldrsw(const Register& rt, const MemOperand& src) { - ASSERT(rt.Is64Bits()); + DCHECK(rt.Is64Bits()); LoadStore(rt, src, LDRSW_x); } -void Assembler::ldr(const Register& rt, uint64_t imm) { - // TODO(all): Constant pool may be garbage collected. Hence we cannot store - // arbitrary values in them. Manually move it for now. Fix - // MacroAssembler::Fmov when this is implemented. - UNIMPLEMENTED(); +void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) { + // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a + // constant pool. It should not be emitted. + DCHECK(!rt.IsZero()); + Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt)); } -void Assembler::ldr(const FPRegister& ft, double imm) { - // TODO(all): Constant pool may be garbage collected. Hence we cannot store - // arbitrary values in them. Manually move it for now. Fix - // MacroAssembler::Fmov when this is implemented. - UNIMPLEMENTED(); -} - +void Assembler::ldr(const CPURegister& rt, const Immediate& imm) { + // Currently we only support 64-bit literals. + DCHECK(rt.Is64Bits()); -void Assembler::ldr(const FPRegister& ft, float imm) { - // TODO(all): Constant pool may be garbage collected. Hence we cannot store - // arbitrary values in them. Manually move it for now. Fix - // MacroAssembler::Fmov when this is implemented. - UNIMPLEMENTED(); + RecordRelocInfo(imm.rmode(), imm.value()); + BlockConstPoolFor(1); + // The load will be patched when the constpool is emitted, patching code + // expect a load literal with offset 0. + ldr_pcrel(rt, 0); } @@ -1477,13 +1740,13 @@ void Assembler::mrs(const Register& rt, SystemRegister sysreg) { - ASSERT(rt.Is64Bits()); + DCHECK(rt.Is64Bits()); Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); } void Assembler::msr(SystemRegister sysreg, const Register& rt) { - ASSERT(rt.Is64Bits()); + DCHECK(rt.Is64Bits()); Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); } @@ -1509,35 +1772,35 @@ void Assembler::fmov(FPRegister fd, double imm) { - ASSERT(fd.Is64Bits()); - ASSERT(IsImmFP64(imm)); + DCHECK(fd.Is64Bits()); + DCHECK(IsImmFP64(imm)); Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm)); } void Assembler::fmov(FPRegister fd, float imm) { - ASSERT(fd.Is32Bits()); - ASSERT(IsImmFP32(imm)); + DCHECK(fd.Is32Bits()); + DCHECK(IsImmFP32(imm)); Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm)); } void Assembler::fmov(Register rd, FPRegister fn) { - ASSERT(rd.SizeInBits() == fn.SizeInBits()); + DCHECK(rd.SizeInBits() == fn.SizeInBits()); FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; Emit(op | Rd(rd) | Rn(fn)); } void Assembler::fmov(FPRegister fd, Register rn) { - ASSERT(fd.SizeInBits() == rn.SizeInBits()); + DCHECK(fd.SizeInBits() == rn.SizeInBits()); FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx; Emit(op | Rd(fd) | Rn(rn)); } void Assembler::fmov(FPRegister fd, FPRegister fn) { - ASSERT(fd.SizeInBits() == fn.SizeInBits()); + DCHECK(fd.SizeInBits() == fn.SizeInBits()); Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn)); } @@ -1632,49 +1895,56 @@ void Assembler::fabs(const FPRegister& fd, const FPRegister& fn) { - ASSERT(fd.SizeInBits() == fn.SizeInBits()); + DCHECK(fd.SizeInBits() == fn.SizeInBits()); FPDataProcessing1Source(fd, fn, FABS); } void Assembler::fneg(const FPRegister& fd, const FPRegister& fn) { - ASSERT(fd.SizeInBits() == fn.SizeInBits()); + DCHECK(fd.SizeInBits() == fn.SizeInBits()); FPDataProcessing1Source(fd, fn, FNEG); } void Assembler::fsqrt(const FPRegister& fd, const FPRegister& fn) { - ASSERT(fd.SizeInBits() == fn.SizeInBits()); + DCHECK(fd.SizeInBits() == fn.SizeInBits()); FPDataProcessing1Source(fd, fn, FSQRT); } void Assembler::frinta(const FPRegister& fd, const FPRegister& fn) { - ASSERT(fd.SizeInBits() == fn.SizeInBits()); + DCHECK(fd.SizeInBits() == fn.SizeInBits()); FPDataProcessing1Source(fd, fn, FRINTA); } +void Assembler::frintm(const FPRegister& fd, + const FPRegister& fn) { + DCHECK(fd.SizeInBits() == fn.SizeInBits()); + FPDataProcessing1Source(fd, fn, FRINTM); +} + + void Assembler::frintn(const FPRegister& fd, const FPRegister& fn) { - ASSERT(fd.SizeInBits() == fn.SizeInBits()); + DCHECK(fd.SizeInBits() == fn.SizeInBits()); FPDataProcessing1Source(fd, fn, FRINTN); } void Assembler::frintz(const FPRegister& fd, const FPRegister& fn) { - ASSERT(fd.SizeInBits() == fn.SizeInBits()); + DCHECK(fd.SizeInBits() == fn.SizeInBits()); FPDataProcessing1Source(fd, fn, FRINTZ); } void Assembler::fcmp(const FPRegister& fn, const FPRegister& fm) { - ASSERT(fn.SizeInBits() == fm.SizeInBits()); + DCHECK(fn.SizeInBits() == fm.SizeInBits()); Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn)); } @@ -1685,7 +1955,7 @@ // Although the fcmp instruction can strictly only take an immediate value of // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't // affect the result of the comparison. - ASSERT(value == 0.0); + DCHECK(value == 0.0); Emit(FPType(fn) | FCMP_zero | Rn(fn)); } @@ -1694,7 +1964,7 @@ const FPRegister& fm, StatusFlags nzcv, Condition cond) { - ASSERT(fn.SizeInBits() == fm.SizeInBits()); + DCHECK(fn.SizeInBits() == fm.SizeInBits()); Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv)); } @@ -1703,8 +1973,8 @@ const FPRegister& fn, const FPRegister& fm, Condition cond) { - ASSERT(fd.SizeInBits() == fn.SizeInBits()); - ASSERT(fd.SizeInBits() == fm.SizeInBits()); + DCHECK(fd.SizeInBits() == fn.SizeInBits()); + DCHECK(fd.SizeInBits() == fm.SizeInBits()); Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd)); } @@ -1720,11 +1990,11 @@ const FPRegister& fn) { if (fd.Is64Bits()) { // Convert float to double. - ASSERT(fn.Is32Bits()); + DCHECK(fn.Is32Bits()); FPDataProcessing1Source(fd, fn, FCVT_ds); } else { // Convert double to float. - ASSERT(fn.Is64Bits()); + DCHECK(fn.Is64Bits()); FPDataProcessing1Source(fd, fn, FCVT_sd); } } @@ -1799,7 +2069,7 @@ // negated bit. // If b is 1, then B is 0. Instr Assembler::ImmFP32(float imm) { - ASSERT(IsImmFP32(imm)); + DCHECK(IsImmFP32(imm)); // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 uint32_t bits = float_to_rawbits(imm); // bit7: a000.0000 @@ -1814,7 +2084,7 @@ Instr Assembler::ImmFP64(double imm) { - ASSERT(IsImmFP64(imm)); + DCHECK(IsImmFP64(imm)); // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 // 0000.0000.0000.0000.0000.0000.0000.0000 uint64_t bits = double_to_rawbits(imm); @@ -1834,10 +2104,19 @@ uint64_t imm, int shift, MoveWideImmediateOp mov_op) { + // Ignore the top 32 bits of an immediate if we're moving to a W register. + if (rd.Is32Bits()) { + // Check that the top 32 bits are zero (a positive 32-bit number) or top + // 33 bits are one (a negative 32-bit number, sign extended to 64 bits). + DCHECK(((imm >> kWRegSizeInBits) == 0) || + ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff)); + imm &= kWRegMask; + } + if (shift >= 0) { // Explicit shift specified. - ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48)); - ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16)); + DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48)); + DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16)); shift /= 16; } else { // Calculate a new immediate and shift combination to encode the immediate @@ -1849,17 +2128,17 @@ imm >>= 16; shift = 1; } else if ((imm & ~(0xffffUL << 32)) == 0) { - ASSERT(rd.Is64Bits()); + DCHECK(rd.Is64Bits()); imm >>= 32; shift = 2; } else if ((imm & ~(0xffffUL << 48)) == 0) { - ASSERT(rd.Is64Bits()); + DCHECK(rd.Is64Bits()); imm >>= 48; shift = 3; } } - ASSERT(is_uint16(imm)); + DCHECK(is_uint16(imm)); Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift)); @@ -1871,17 +2150,17 @@ const Operand& operand, FlagsUpdate S, AddSubOp op) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(!operand.NeedsRelocation()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(!operand.NeedsRelocation(this)); if (operand.IsImmediate()) { - int64_t immediate = operand.immediate(); - ASSERT(IsImmAddSub(immediate)); + int64_t immediate = operand.ImmediateValue(); + DCHECK(IsImmAddSub(immediate)); Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | ImmAddSub(immediate) | dest_reg | RnSP(rn)); } else if (operand.IsShiftedRegister()) { - ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); - ASSERT(operand.shift() != ROR); + DCHECK(operand.reg().SizeInBits() == rd.SizeInBits()); + DCHECK(operand.shift() != ROR); // For instructions of the form: // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ] @@ -1891,14 +2170,14 @@ // or their 64-bit register equivalents, convert the operand from shifted to // extended register mode, and emit an add/sub extended instruction. if (rn.IsSP() || rd.IsSP()) { - ASSERT(!(rd.IsSP() && (S == SetFlags))); + DCHECK(!(rd.IsSP() && (S == SetFlags))); DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S, AddSubExtendedFixed | op); } else { DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op); } } else { - ASSERT(operand.IsExtendedRegister()); + DCHECK(operand.IsExtendedRegister()); DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op); } } @@ -1909,22 +2188,22 @@ const Operand& operand, FlagsUpdate S, AddSubWithCarryOp op) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(rd.SizeInBits() == operand.reg().SizeInBits()); - ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); - ASSERT(!operand.NeedsRelocation()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == operand.reg().SizeInBits()); + DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); + DCHECK(!operand.NeedsRelocation(this)); Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); } void Assembler::hlt(int code) { - ASSERT(is_uint16(code)); + DCHECK(is_uint16(code)); Emit(HLT | ImmException(code)); } void Assembler::brk(int code) { - ASSERT(is_uint16(code)); + DCHECK(is_uint16(code)); Emit(BRK | ImmException(code)); } @@ -1933,10 +2212,7 @@ #ifdef USE_SIMULATOR // Don't generate simulator specific code if we are building a snapshot, which // might be run on real hardware. - if (!Serializer::enabled()) { -#ifdef DEBUG - Serializer::TooLateToEnableNow(); -#endif + if (!serializer_enabled()) { // The arguments to the debug marker need to be contiguous in memory, so // make sure we don't try to emit pools. BlockPoolsScope scope(this); @@ -1947,11 +2223,11 @@ // Refer to instructions-arm64.h for a description of the marker and its // arguments. hlt(kImmExceptionIsDebug); - ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset); + DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset); dc32(code); - ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset); + DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset); dc32(params); - ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset); + DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset); EmitStringData(message); hlt(kImmExceptionIsUnreachable); @@ -1970,15 +2246,15 @@ const Register& rn, const Operand& operand, LogicalOp op) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); - ASSERT(!operand.NeedsRelocation()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(!operand.NeedsRelocation(this)); if (operand.IsImmediate()) { - int64_t immediate = operand.immediate(); + int64_t immediate = operand.ImmediateValue(); unsigned reg_size = rd.SizeInBits(); - ASSERT(immediate != 0); - ASSERT(immediate != -1); - ASSERT(rd.Is64Bits() || is_uint32(immediate)); + DCHECK(immediate != 0); + DCHECK(immediate != -1); + DCHECK(rd.Is64Bits() || is_uint32(immediate)); // If the operation is NOT, invert the operation and immediate. if ((op & NOT) == NOT) { @@ -1995,8 +2271,8 @@ UNREACHABLE(); } } else { - ASSERT(operand.IsShiftedRegister()); - ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); + DCHECK(operand.IsShiftedRegister()); + DCHECK(operand.reg().SizeInBits() == rd.SizeInBits()); Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed); DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op); } @@ -2023,13 +2299,13 @@ Condition cond, ConditionalCompareOp op) { Instr ccmpop; - ASSERT(!operand.NeedsRelocation()); + DCHECK(!operand.NeedsRelocation(this)); if (operand.IsImmediate()) { - int64_t immediate = operand.immediate(); - ASSERT(IsImmConditionalCompare(immediate)); + int64_t immediate = operand.ImmediateValue(); + DCHECK(IsImmConditionalCompare(immediate)); ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate); } else { - ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); + DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg()); } Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); @@ -2039,7 +2315,7 @@ void Assembler::DataProcessing1Source(const Register& rd, const Register& rn, DataProcessing1SourceOp op) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); Emit(SF(rn) | op | Rn(rn) | Rd(rd)); } @@ -2055,8 +2331,8 @@ const FPRegister& fn, const FPRegister& fm, FPDataProcessing2SourceOp op) { - ASSERT(fd.SizeInBits() == fn.SizeInBits()); - ASSERT(fd.SizeInBits() == fm.SizeInBits()); + DCHECK(fd.SizeInBits() == fn.SizeInBits()); + DCHECK(fd.SizeInBits() == fm.SizeInBits()); Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd)); } @@ -2066,7 +2342,7 @@ const FPRegister& fm, const FPRegister& fa, FPDataProcessing3SourceOp op) { - ASSERT(AreSameSizeAndType(fd, fn, fm, fa)); + DCHECK(AreSameSizeAndType(fd, fn, fm, fa)); Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa)); } @@ -2098,7 +2374,7 @@ const Register& rn, Extend extend, unsigned left_shift) { - ASSERT(rd.SizeInBits() >= rn.SizeInBits()); + DCHECK(rd.SizeInBits() >= rn.SizeInBits()); unsigned reg_size = rd.SizeInBits(); // Use the correct size of register. Register rn_ = Register::Create(rn.code(), rd.SizeInBits()); @@ -2117,7 +2393,7 @@ case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break; case UXTX: case SXTX: { - ASSERT(rn.SizeInBits() == kXRegSizeInBits); + DCHECK(rn.SizeInBits() == kXRegSizeInBits); // Nothing to extend. Just shift. lsl(rd, rn_, left_shift); break; @@ -2136,9 +2412,9 @@ const Operand& operand, FlagsUpdate S, Instr op) { - ASSERT(operand.IsShiftedRegister()); - ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount()))); - ASSERT(!operand.NeedsRelocation()); + DCHECK(operand.IsShiftedRegister()); + DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount()))); + DCHECK(!operand.NeedsRelocation(this)); Emit(SF(rd) | op | Flags(S) | ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); @@ -2150,7 +2426,7 @@ const Operand& operand, FlagsUpdate S, Instr op) { - ASSERT(!operand.NeedsRelocation()); + DCHECK(!operand.NeedsRelocation(this)); Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) | @@ -2194,18 +2470,18 @@ // Shifts are encoded in one bit, indicating a left shift by the memory // access size. - ASSERT((shift_amount == 0) || + DCHECK((shift_amount == 0) || (shift_amount == static_cast<unsigned>(CalcLSDataSize(op)))); Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) | ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0)); } else { // Pre-index and post-index modes. - ASSERT(!rt.Is(addr.base())); + DCHECK(!rt.Is(addr.base())); if (IsImmLSUnscaled(offset)) { if (addr.IsPreIndex()) { Emit(LoadStorePreIndexFixed | memop | ImmLS(offset)); } else { - ASSERT(addr.IsPostIndex()); + DCHECK(addr.IsPostIndex()); Emit(LoadStorePostIndexFixed | memop | ImmLS(offset)); } } else { @@ -2227,25 +2503,9 @@ } -void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) { - ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0); - // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a - // constant pool. It should not be emitted. - ASSERT(!rt.Is(xzr)); - Emit(LDR_x_lit | - ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) | - Rt(rt)); -} - - -void Assembler::LoadRelocatedValue(const CPURegister& rt, - const Operand& operand, - LoadLiteralOp op) { - int64_t imm = operand.immediate(); - ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits())); - RecordRelocInfo(operand.rmode(), imm); - BlockConstPoolFor(1); - Emit(op | ImmLLiteral(0) | Rt(rt)); +bool Assembler::IsImmLSPair(ptrdiff_t offset, LSDataSize size) { + bool offset_is_size_multiple = (((offset >> size) << size) == offset); + return offset_is_size_multiple && is_int7(offset >> size); } @@ -2261,94 +2521,200 @@ unsigned* n, unsigned* imm_s, unsigned* imm_r) { - ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); - ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits)); + DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); + DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits)); + + bool negate = false; // Logical immediates are encoded using parameters n, imm_s and imm_r using // the following table: // - // N imms immr size S R - // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) - // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) - // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) - // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) - // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) - // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) // (s bits must not be all set) // - // A pattern is constructed of size bits, where the least significant S+1 - // bits are set. The pattern is rotated right by R, and repeated across a - // 32 or 64-bit value, depending on destination register width. + // A pattern is constructed of size bits, where the least significant S+1 bits + // are set. The pattern is rotated right by R, and repeated across a 32 or + // 64-bit value, depending on destination register width. // - // To test if an arbitary immediate can be encoded using this scheme, an - // iterative algorithm is used. + // Put another way: the basic format of a logical immediate is a single + // contiguous stretch of 1 bits, repeated across the whole word at intervals + // given by a power of 2. To identify them quickly, we first locate the + // lowest stretch of 1 bits, then the next 1 bit above that; that combination + // is different for every logical immediate, so it gives us all the + // information we need to identify the only logical immediate that our input + // could be, and then we simply check if that's the value we actually have. // - // TODO(mcapewel) This code does not consider using X/W register overlap to - // support 64-bit immediates where the top 32-bits are zero, and the bottom - // 32-bits are an encodable logical immediate. - - // 1. If the value has all set or all clear bits, it can't be encoded. - if ((value == 0) || (value == 0xffffffffffffffffUL) || - ((width == kWRegSizeInBits) && (value == 0xffffffff))) { - return false; + // (The rotation parameter does give the possibility of the stretch of 1 bits + // going 'round the end' of the word. To deal with that, we observe that in + // any situation where that happens the bitwise NOT of the value is also a + // valid logical immediate. So we simply invert the input whenever its low bit + // is set, and then we know that the rotated case can't arise.) + + if (value & 1) { + // If the low bit is 1, negate the value, and set a flag to remember that we + // did (so that we can adjust the return values appropriately). + negate = true; + value = ~value; + } + + if (width == kWRegSizeInBits) { + // To handle 32-bit logical immediates, the very easiest thing is to repeat + // the input value twice to make a 64-bit word. The correct encoding of that + // as a logical immediate will also be the correct encoding of the 32-bit + // value. + + // The most-significant 32 bits may not be zero (ie. negate is true) so + // shift the value left before duplicating it. + value <<= kWRegSizeInBits; + value |= value >> kWRegSizeInBits; } - unsigned lead_zero = CountLeadingZeros(value, width); - unsigned lead_one = CountLeadingZeros(~value, width); - unsigned trail_zero = CountTrailingZeros(value, width); - unsigned trail_one = CountTrailingZeros(~value, width); - unsigned set_bits = CountSetBits(value, width); - - // The fixed bits in the immediate s field. - // If width == 64 (X reg), start at 0xFFFFFF80. - // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit - // widths won't be executed. - int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64; - int imm_s_mask = 0x3F; - - for (;;) { - // 2. If the value is two bits wide, it can be encoded. - if (width == 2) { - *n = 0; - *imm_s = 0x3C; - *imm_r = (value & 3) - 1; - return true; - } - - *n = (width == 64) ? 1 : 0; - *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask); - if ((lead_zero + set_bits) == width) { - *imm_r = 0; + // The basic analysis idea: imagine our input word looks like this. + // + // 0011111000111110001111100011111000111110001111100011111000111110 + // c b a + // |<--d-->| + // + // We find the lowest set bit (as an actual power-of-2 value, not its index) + // and call it a. Then we add a to our original number, which wipes out the + // bottommost stretch of set bits and replaces it with a 1 carried into the + // next zero bit. Then we look for the new lowest set bit, which is in + // position b, and subtract it, so now our number is just like the original + // but with the lowest stretch of set bits completely gone. Now we find the + // lowest set bit again, which is position c in the diagram above. Then we'll + // measure the distance d between bit positions a and c (using CLZ), and that + // tells us that the only valid logical immediate that could possibly be equal + // to this number is the one in which a stretch of bits running from a to just + // below b is replicated every d bits. + uint64_t a = LargestPowerOf2Divisor(value); + uint64_t value_plus_a = value + a; + uint64_t b = LargestPowerOf2Divisor(value_plus_a); + uint64_t value_plus_a_minus_b = value_plus_a - b; + uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b); + + int d, clz_a, out_n; + uint64_t mask; + + if (c != 0) { + // The general case, in which there is more than one stretch of set bits. + // Compute the repeat distance d, and set up a bitmask covering the basic + // unit of repetition (i.e. a word with the bottom d bits set). Also, in all + // of these cases the N bit of the output will be zero. + clz_a = CountLeadingZeros(a, kXRegSizeInBits); + int clz_c = CountLeadingZeros(c, kXRegSizeInBits); + d = clz_a - clz_c; + mask = ((V8_UINT64_C(1) << d) - 1); + out_n = 0; + } else { + // Handle degenerate cases. + // + // If any of those 'find lowest set bit' operations didn't find a set bit at + // all, then the word will have been zero thereafter, so in particular the + // last lowest_set_bit operation will have returned zero. So we can test for + // all the special case conditions in one go by seeing if c is zero. + if (a == 0) { + // The input was zero (or all 1 bits, which will come to here too after we + // inverted it at the start of the function), for which we just return + // false. + return false; } else { - *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one; + // Otherwise, if c was zero but a was not, then there's just one stretch + // of set bits in our word, meaning that we have the trivial case of + // d == 64 and only one 'repetition'. Set up all the same variables as in + // the general case above, and set the N bit in the output. + clz_a = CountLeadingZeros(a, kXRegSizeInBits); + d = 64; + mask = ~V8_UINT64_C(0); + out_n = 1; } + } - // 3. If the sum of leading zeros, trailing zeros and set bits is equal to - // the bit width of the value, it can be encoded. - if (lead_zero + trail_zero + set_bits == width) { - return true; - } + // If the repeat period d is not a power of two, it can't be encoded. + if (!IS_POWER_OF_TWO(d)) { + return false; + } - // 4. If the sum of leading ones, trailing ones and unset bits in the - // value is equal to the bit width of the value, it can be encoded. - if (lead_one + trail_one + (width - set_bits) == width) { - return true; - } + if (((b - a) & ~mask) != 0) { + // If the bit stretch (b - a) does not fit within the mask derived from the + // repeat period, then fail. + return false; + } - // 5. If the most-significant half of the bitwise value is equal to the - // least-significant half, return to step 2 using the least-significant - // half of the value. - uint64_t mask = (1UL << (width >> 1)) - 1; - if ((value & mask) == ((value >> (width >> 1)) & mask)) { - width >>= 1; - set_bits >>= 1; - imm_s_fixed >>= 1; - continue; - } + // The only possible option is b - a repeated every d bits. Now we're going to + // actually construct the valid logical immediate derived from that + // specification, and see if it equals our original input. + // + // To repeat a value every d bits, we multiply it by a number of the form + // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can + // be derived using a table lookup on CLZ(d). + static const uint64_t multipliers[] = { + 0x0000000000000001UL, + 0x0000000100000001UL, + 0x0001000100010001UL, + 0x0101010101010101UL, + 0x1111111111111111UL, + 0x5555555555555555UL, + }; + int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57; + // Ensure that the index to the multipliers array is within bounds. + DCHECK((multiplier_idx >= 0) && + (static_cast<size_t>(multiplier_idx) < ARRAY_SIZE(multipliers))); + uint64_t multiplier = multipliers[multiplier_idx]; + uint64_t candidate = (b - a) * multiplier; - // 6. Otherwise, the value can't be encoded. + if (value != candidate) { + // The candidate pattern doesn't match our input value, so fail. return false; } + + // We have a match! This is a valid logical immediate, so now we have to + // construct the bits and pieces of the instruction encoding that generates + // it. + + // Count the set bits in our basic stretch. The special case of clz(0) == -1 + // makes the answer come out right for stretches that reach the very top of + // the word (e.g. numbers like 0xffffc00000000000). + int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits); + int s = clz_a - clz_b; + + // Decide how many bits to rotate right by, to put the low bit of that basic + // stretch in position a. + int r; + if (negate) { + // If we inverted the input right at the start of this function, here's + // where we compensate: the number of set bits becomes the number of clear + // bits, and the rotation count is based on position b rather than position + // a (since b is the location of the 'lowest' 1 bit after inversion). + s = d - s; + r = (clz_b + 1) & (d - 1); + } else { + r = (clz_a + 1) & (d - 1); + } + + // Now we're done, except for having to encode the S output in such a way that + // it gives both the number of set bits and the length of the repeated + // segment. The s field is encoded like this: + // + // imms size S + // ssssss 64 UInt(ssssss) + // 0sssss 32 UInt(sssss) + // 10ssss 16 UInt(ssss) + // 110sss 8 UInt(sss) + // 1110ss 4 UInt(ss) + // 11110s 2 UInt(s) + // + // So we 'or' (-d << 1) with our computed s to form imms. + *n = out_n; + *imm_s = ((-d << 1) | (s - 1)) & 0x3f; + *imm_r = r; + + return true; } @@ -2411,9 +2777,7 @@ // Compute new buffer size. CodeDesc desc; // the new buffer - if (buffer_size_ < 4 * KB) { - desc.buffer_size = 4 * KB; - } else if (buffer_size_ < 1 * MB) { + if (buffer_size_ < 1 * MB) { desc.buffer_size = 2 * buffer_size_; } else { desc.buffer_size = buffer_size_ + 1 * MB; @@ -2448,15 +2812,7 @@ // buffer nor pc absolute pointing inside the code buffer, so there is no need // to relocate any emitted relocation entries. - // Relocate pending relocation entries. - for (int i = 0; i < num_pending_reloc_info_; i++) { - RelocInfo& rinfo = pending_reloc_info_[i]; - ASSERT(rinfo.rmode() != RelocInfo::COMMENT && - rinfo.rmode() != RelocInfo::POSITION); - if (rinfo.rmode() != RelocInfo::JS_RETURN) { - rinfo.set_pc(rinfo.pc() + pc_delta); - } - } + // Pending relocation entries are also relative, no need to relocate. } @@ -2468,7 +2824,7 @@ (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL)) { // Adjust code for new modes. - ASSERT(RelocInfo::IsDebugBreakSlot(rmode) + DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsJSReturn(rmode) || RelocInfo::IsComment(rmode) || RelocInfo::IsPosition(rmode) @@ -2476,11 +2832,7 @@ || RelocInfo::IsVeneerPool(rmode)); // These modes do not need an entry in the constant pool. } else { - ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); - if (num_pending_reloc_info_ == 0) { - first_const_pool_use_ = pc_offset(); - } - pending_reloc_info_[num_pending_reloc_info_++] = rinfo; + constpool_.RecordEntry(data, rmode); // Make sure the constant pool is not emitted in place of the next // instruction for which we just recorded relocation info. BlockConstPoolFor(1); @@ -2488,17 +2840,11 @@ if (!RelocInfo::IsNone(rmode)) { // Don't record external references unless the heap will be serialized. - if (rmode == RelocInfo::EXTERNAL_REFERENCE) { -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); - } -#endif - if (!Serializer::enabled() && !emit_debug_code()) { - return; - } + if (rmode == RelocInfo::EXTERNAL_REFERENCE && + !serializer_enabled() && !emit_debug_code()) { + return; } - ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here + DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { RelocInfo reloc_info_with_ast_id( reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL); @@ -2514,11 +2860,9 @@ void Assembler::BlockConstPoolFor(int instructions) { int pc_limit = pc_offset() + instructions * kInstructionSize; if (no_const_pool_before_ < pc_limit) { - // If there are some pending entries, the constant pool cannot be blocked - // further than first_const_pool_use_ + kMaxDistToConstPool - ASSERT((num_pending_reloc_info_ == 0) || - (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool))); no_const_pool_before_ = pc_limit; + // Make sure the pool won't be blocked for too long. + DCHECK(pc_limit < constpool_.MaxPcOffset()); } if (next_constant_pool_check_ < no_const_pool_before_) { @@ -2533,111 +2877,53 @@ // BlockConstPoolScope. if (is_const_pool_blocked()) { // Something is wrong if emission is forced and blocked at the same time. - ASSERT(!force_emit); + DCHECK(!force_emit); return; } // There is nothing to do if there are no pending constant pool entries. - if (num_pending_reloc_info_ == 0) { + if (constpool_.IsEmpty()) { // Calculate the offset of the next check. - next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; + SetNextConstPoolCheckIn(kCheckConstPoolInterval); return; } // We emit a constant pool when: // * requested to do so by parameter force_emit (e.g. after each function). // * the distance to the first instruction accessing the constant pool is - // kAvgDistToConstPool or more. - // * no jump is required and the distance to the first instruction accessing - // the constant pool is at least kMaxDistToPConstool / 2. - ASSERT(first_const_pool_use_ >= 0); - int dist = pc_offset() - first_const_pool_use_; - if (!force_emit && dist < kAvgDistToConstPool && - (require_jump || (dist < (kMaxDistToConstPool / 2)))) { + // kApproxMaxDistToConstPool or more. + // * the number of entries in the pool is kApproxMaxPoolEntryCount or more. + int dist = constpool_.DistanceToFirstUse(); + int count = constpool_.EntryCount(); + if (!force_emit && + (dist < kApproxMaxDistToConstPool) && + (count < kApproxMaxPoolEntryCount)) { return; } - int jump_instr = require_jump ? kInstructionSize : 0; - int size_pool_marker = kInstructionSize; - int size_pool_guard = kInstructionSize; - int pool_size = jump_instr + size_pool_marker + size_pool_guard + - num_pending_reloc_info_ * kPointerSize; - int needed_space = pool_size + kGap; // Emit veneers for branches that would go out of range during emission of the // constant pool. - CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size); - - Label size_check; - bind(&size_check); + int worst_case_size = constpool_.WorstCaseSize(); + CheckVeneerPool(false, require_jump, + kVeneerDistanceMargin + worst_case_size); // Check that the code buffer is large enough before emitting the constant - // pool (include the jump over the pool, the constant pool marker, the - // constant pool guard, and the gap to the relocation information). + // pool (this includes the gap to the relocation information). + int needed_space = worst_case_size + kGap + 1 * kInstructionSize; while (buffer_space() <= needed_space) { GrowBuffer(); } - { - // Block recursive calls to CheckConstPool and protect from veneer pools. - BlockPoolsScope block_pools(this); - RecordComment("[ Constant Pool"); - RecordConstPool(pool_size); - - // Emit jump over constant pool if necessary. - Label after_pool; - if (require_jump) { - b(&after_pool); - } - - // Emit a constant pool header. The header has two goals: - // 1) Encode the size of the constant pool, for use by the disassembler. - // 2) Terminate the program, to try to prevent execution from accidentally - // flowing into the constant pool. - // The header is therefore made of two arm64 instructions: - // ldr xzr, #<size of the constant pool in 32-bit words> - // blr xzr - // If executed the code will likely segfault and lr will point to the - // beginning of the constant pool. - // TODO(all): currently each relocated constant is 64 bits, consider adding - // support for 32-bit entries. - ConstantPoolMarker(2 * num_pending_reloc_info_); - ConstantPoolGuard(); - - // Emit constant pool entries. - for (int i = 0; i < num_pending_reloc_info_; i++) { - RelocInfo& rinfo = pending_reloc_info_[i]; - ASSERT(rinfo.rmode() != RelocInfo::COMMENT && - rinfo.rmode() != RelocInfo::POSITION && - rinfo.rmode() != RelocInfo::STATEMENT_POSITION && - rinfo.rmode() != RelocInfo::CONST_POOL && - rinfo.rmode() != RelocInfo::VENEER_POOL); - - Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc()); - // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. - ASSERT(instr->IsLdrLiteral() && - instr->ImmLLiteral() == 0); - - instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); - dc64(rinfo.data()); - } - - num_pending_reloc_info_ = 0; - first_const_pool_use_ = -1; - - RecordComment("]"); - - if (after_pool.is_linked()) { - bind(&after_pool); - } - } + Label size_check; + bind(&size_check); + constpool_.Emit(require_jump); + DCHECK(SizeOfCodeGeneratedSince(&size_check) <= + static_cast<unsigned>(worst_case_size)); // Since a constant pool was just emitted, move the check offset forward by // the standard interval. - next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; - - ASSERT(SizeOfCodeGeneratedSince(&size_check) == - static_cast<unsigned>(pool_size)); + SetNextConstPoolCheckIn(kCheckConstPoolInterval); } @@ -2650,12 +2936,10 @@ void Assembler::RecordVeneerPool(int location_offset, int size) { -#ifdef ENABLE_DEBUGGER_SUPPORT RelocInfo rinfo(buffer_ + location_offset, RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), NULL); reloc_info_writer.Write(&rinfo); -#endif } @@ -2699,7 +2983,7 @@ branch->SetImmPCOffsetTarget(veneer); b(label); #ifdef DEBUG - ASSERT(SizeOfCodeGeneratedSince(&veneer_size_check) <= + DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <= static_cast<uint64_t>(kMaxVeneerCodeSize)); veneer_size_check.Unuse(); #endif @@ -2732,17 +3016,17 @@ int margin) { // There is nothing to do if there are no pending veneer pool entries. if (unresolved_branches_.empty()) { - ASSERT(next_veneer_pool_check_ == kMaxInt); + DCHECK(next_veneer_pool_check_ == kMaxInt); return; } - ASSERT(pc_offset() < unresolved_branches_first_limit()); + DCHECK(pc_offset() < unresolved_branches_first_limit()); // Some short sequence of instruction mustn't be broken up by veneer pool // emission, such sequences are protected by calls to BlockVeneerPoolFor and // BlockVeneerPoolScope. if (is_veneer_pool_blocked()) { - ASSERT(!force_emit); + DCHECK(!force_emit); return; } @@ -2789,22 +3073,54 @@ void Assembler::RecordConstPool(int size) { // We only need this for debugger support, to correctly compute offsets in the // code. -#ifdef ENABLE_DEBUGGER_SUPPORT RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); -#endif } -MaybeObject* Assembler::AllocateConstantPool(Heap* heap) { +Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { // No out-of-line constant pool support. - UNREACHABLE(); - return NULL; + DCHECK(!FLAG_enable_ool_constant_pool); + return isolate->factory()->empty_constant_pool_array(); } void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { // No out-of-line constant pool support. - UNREACHABLE(); + DCHECK(!FLAG_enable_ool_constant_pool); + return; +} + + +void PatchingAssembler::PatchAdrFar(ptrdiff_t target_offset) { + // The code at the current instruction should be: + // adr rd, 0 + // nop (adr_far) + // nop (adr_far) + // movz scratch, 0 + + // Verify the expected code. + Instruction* expected_adr = InstructionAt(0); + CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0)); + int rd_code = expected_adr->Rd(); + for (int i = 0; i < kAdrFarPatchableNNops; ++i) { + CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP)); + } + Instruction* expected_movz = + InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize); + CHECK(expected_movz->IsMovz() && + (expected_movz->ImmMoveWide() == 0) && + (expected_movz->ShiftMoveWide() == 0)); + int scratch_code = expected_movz->Rd(); + + // Patch to load the correct address. + Register rd = Register::XRegFromCode(rd_code); + Register scratch = Register::XRegFromCode(scratch_code); + // Addresses are only 48 bits. + adr(rd, target_offset & 0xFFFF); + movz(scratch, (target_offset >> 16) & 0xFFFF, 16); + movk(scratch, (target_offset >> 32) & 0xFFFF, 32); + DCHECK((target_offset >> 48) == 0); + add(rd, rd, scratch); } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/assembler-arm64.h nodejs-0.11.15/deps/v8/src/arm64/assembler-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/assembler-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/assembler-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_ASSEMBLER_ARM64_H_ #define V8_ARM64_ASSEMBLER_ARM64_H_ #include <list> #include <map> +#include <vector> -#include "globals.h" -#include "utils.h" -#include "assembler.h" -#include "serialize.h" -#include "arm64/instructions-arm64.h" -#include "arm64/cpu-arm64.h" +#include "src/arm64/instructions-arm64.h" +#include "src/assembler.h" +#include "src/globals.h" +#include "src/serialize.h" +#include "src/utils.h" namespace v8 { @@ -89,6 +66,7 @@ bool IsValidFPRegister() const; bool IsNone() const; bool Is(const CPURegister& other) const; + bool Aliases(const CPURegister& other) const; bool IsZero() const; bool IsSP() const; @@ -128,18 +106,18 @@ reg_code = r.reg_code; reg_size = r.reg_size; reg_type = r.reg_type; - ASSERT(IsValidOrNone()); + DCHECK(IsValidOrNone()); } Register(const Register& r) { // NOLINT(runtime/explicit) reg_code = r.reg_code; reg_size = r.reg_size; reg_type = r.reg_type; - ASSERT(IsValidOrNone()); + DCHECK(IsValidOrNone()); } bool IsValid() const { - ASSERT(IsRegister() || IsNone()); + DCHECK(IsRegister() || IsNone()); return IsValidRegister(); } @@ -191,7 +169,7 @@ } static Register FromAllocationIndex(unsigned index) { - ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters())); + DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters())); // cp is the last allocatable register. if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) { return from_code(kAllocatableContext); @@ -204,8 +182,8 @@ } static const char* AllocationIndexToString(int index) { - ASSERT((index >= 0) && (index < NumAllocatableRegisters())); - ASSERT((kAllocatableLowRangeBegin == 0) && + DCHECK((index >= 0) && (index < NumAllocatableRegisters())); + DCHECK((kAllocatableLowRangeBegin == 0) && (kAllocatableLowRangeEnd == 15) && (kAllocatableHighRangeBegin == 18) && (kAllocatableHighRangeEnd == 24) && @@ -221,7 +199,7 @@ } static int ToAllocationIndex(Register reg) { - ASSERT(reg.IsAllocatable()); + DCHECK(reg.IsAllocatable()); unsigned code = reg.code(); if (code == kAllocatableContext) { return NumAllocatableRegisters() - 1; @@ -257,18 +235,18 @@ reg_code = r.reg_code; reg_size = r.reg_size; reg_type = r.reg_type; - ASSERT(IsValidOrNone()); + DCHECK(IsValidOrNone()); } FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit) reg_code = r.reg_code; reg_size = r.reg_size; reg_type = r.reg_type; - ASSERT(IsValidOrNone()); + DCHECK(IsValidOrNone()); } bool IsValid() const { - ASSERT(IsFPRegister() || IsNone()); + DCHECK(IsFPRegister() || IsNone()); return IsValidFPRegister(); } @@ -285,9 +263,9 @@ static const unsigned kAllocatableLowRangeBegin = 0; static const unsigned kAllocatableLowRangeEnd = 14; static const unsigned kAllocatableHighRangeBegin = 16; - static const unsigned kAllocatableHighRangeEnd = 29; + static const unsigned kAllocatableHighRangeEnd = 28; - static const RegList kAllocatableFPRegisters = 0x3fff7fff; + static const RegList kAllocatableFPRegisters = 0x1fff7fff; // Gap between low and high ranges. static const int kAllocatableRangeGapSize = @@ -304,7 +282,7 @@ } static FPRegister FromAllocationIndex(unsigned int index) { - ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters())); + DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters())); return (index <= kAllocatableLowRangeEnd) ? from_code(index) @@ -312,22 +290,22 @@ } static const char* AllocationIndexToString(int index) { - ASSERT((index >= 0) && (index < NumAllocatableRegisters())); - ASSERT((kAllocatableLowRangeBegin == 0) && + DCHECK((index >= 0) && (index < NumAllocatableRegisters())); + DCHECK((kAllocatableLowRangeBegin == 0) && (kAllocatableLowRangeEnd == 14) && (kAllocatableHighRangeBegin == 16) && - (kAllocatableHighRangeEnd == 29)); + (kAllocatableHighRangeEnd == 28)); const char* const names[] = { "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", - "d24", "d25", "d26", "d27", "d28", "d29" + "d24", "d25", "d26", "d27", "d28" }; return names[index]; } static int ToAllocationIndex(FPRegister reg) { - ASSERT(reg.IsAllocatable()); + DCHECK(reg.IsAllocatable()); unsigned code = reg.code(); return (code <= kAllocatableLowRangeEnd) @@ -420,9 +398,11 @@ // Keeps the 0 double value. ALIAS_REGISTER(FPRegister, fp_zero, d15); // Crankshaft double scratch register. -ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d30); -// MacroAssembler double scratch register. -ALIAS_REGISTER(FPRegister, fp_scratch, d31); +ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29); +// MacroAssembler double scratch registers. +ALIAS_REGISTER(FPRegister, fp_scratch, d30); +ALIAS_REGISTER(FPRegister, fp_scratch1, d30); +ALIAS_REGISTER(FPRegister, fp_scratch2, d31); #undef ALIAS_REGISTER @@ -471,40 +451,40 @@ CPURegister reg4 = NoCPUReg) : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()), size_(reg1.SizeInBits()), type_(reg1.type()) { - ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4)); - ASSERT(IsValid()); + DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4)); + DCHECK(IsValid()); } CPURegList(CPURegister::RegisterType type, unsigned size, RegList list) : list_(list), size_(size), type_(type) { - ASSERT(IsValid()); + DCHECK(IsValid()); } CPURegList(CPURegister::RegisterType type, unsigned size, unsigned first_reg, unsigned last_reg) : size_(size), type_(type) { - ASSERT(((type == CPURegister::kRegister) && + DCHECK(((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) || ((type == CPURegister::kFPRegister) && (last_reg < kNumberOfFPRegisters))); - ASSERT(last_reg >= first_reg); + DCHECK(last_reg >= first_reg); list_ = (1UL << (last_reg + 1)) - 1; list_ &= ~((1UL << first_reg) - 1); - ASSERT(IsValid()); + DCHECK(IsValid()); } CPURegister::RegisterType type() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return type_; } RegList list() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return list_; } inline void set_list(RegList new_list) { - ASSERT(IsValid()); + DCHECK(IsValid()); list_ = new_list; } @@ -514,8 +494,8 @@ void Combine(const CPURegList& other); // Remove every register in the other CPURegList from this one. Registers that - // do not exist in this list are ignored. The type and size of the registers - // in the 'other' list must match those in this list. + // do not exist in this list are ignored. The type of the registers in the + // 'other' list must match those in this list. void Remove(const CPURegList& other); // Variants of Combine and Remove which take CPURegisters. @@ -549,7 +529,7 @@ static CPURegList GetSafepointSavedRegisters(); bool IsEmpty() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return list_ == 0; } @@ -557,7 +537,7 @@ const CPURegister& other2 = NoCPUReg, const CPURegister& other3 = NoCPUReg, const CPURegister& other4 = NoCPUReg) const { - ASSERT(IsValid()); + DCHECK(IsValid()); RegList list = 0; if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit(); if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit(); @@ -567,21 +547,26 @@ } int Count() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return CountSetBits(list_, kRegListSizeInBits); } unsigned RegisterSizeInBits() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return size_; } unsigned RegisterSizeInBytes() const { int size_in_bits = RegisterSizeInBits(); - ASSERT((size_in_bits % kBitsPerByte) == 0); + DCHECK((size_in_bits % kBitsPerByte) == 0); return size_in_bits / kBitsPerByte; } + unsigned TotalSizeInBytes() const { + DCHECK(IsValid()); + return RegisterSizeInBytes() * Count(); + } + private: RegList list_; unsigned size_; @@ -614,6 +599,31 @@ #define kCallerSaved CPURegList::GetCallerSaved() #define kCallerSavedFP CPURegList::GetCallerSavedFP() +// ----------------------------------------------------------------------------- +// Immediates. +class Immediate { + public: + template<typename T> + inline explicit Immediate(Handle<T> handle); + + // This is allowed to be an implicit constructor because Immediate is + // a wrapper class that doesn't normally perform any type conversion. + template<typename T> + inline Immediate(T value); // NOLINT(runtime/explicit) + + template<typename T> + inline Immediate(T value, RelocInfo::Mode rmode); + + int64_t value() const { return value_; } + RelocInfo::Mode rmode() const { return rmode_; } + + private: + void InitializeHandle(Handle<Object> value); + + int64_t value_; + RelocInfo::Mode rmode_; +}; + // ----------------------------------------------------------------------------- // Operands. @@ -649,8 +659,8 @@ inline Operand(T t); // NOLINT(runtime/explicit) // Implicit constructor for int types. - template<typename int_t> - inline Operand(int_t t, RelocInfo::Mode rmode); + template<typename T> + inline Operand(T t, RelocInfo::Mode rmode); inline bool IsImmediate() const; inline bool IsShiftedRegister() const; @@ -661,35 +671,33 @@ // which helps in the encoding of instructions that use the stack pointer. inline Operand ToExtendedRegister() const; - inline int64_t immediate() const; + inline Immediate immediate() const; + inline int64_t ImmediateValue() const; inline Register reg() const; inline Shift shift() const; inline Extend extend() const; inline unsigned shift_amount() const; // Relocation information. - RelocInfo::Mode rmode() const { return rmode_; } - void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; } - bool NeedsRelocation() const; + bool NeedsRelocation(const Assembler* assembler) const; // Helpers inline static Operand UntagSmi(Register smi); inline static Operand UntagSmiAndScale(Register smi, int scale); private: - void initialize_handle(Handle<Object> value); - int64_t immediate_; + Immediate immediate_; Register reg_; Shift shift_; Extend extend_; unsigned shift_amount_; - RelocInfo::Mode rmode_; }; // MemOperand represents a memory operand in a load or store instruction. class MemOperand { public: + inline MemOperand(); inline explicit MemOperand(Register base, ptrdiff_t offset = 0, AddrMode addrmode = Offset); @@ -721,6 +729,16 @@ // handle indexed modes. inline Operand OffsetAsOperand() const; + enum PairResult { + kNotPair, // Can't use a pair instruction. + kPairAB, // Can use a pair instruction (operandA has lower address). + kPairBA // Can use a pair instruction (operandB has lower address). + }; + // Check if two MemOperand are consistent for stp/ldp use. + static PairResult AreConsistentForPair(const MemOperand& operandA, + const MemOperand& operandB, + int access_size_log2 = kXRegSizeLog2); + private: Register base_; Register regoffset_; @@ -732,6 +750,55 @@ }; +class ConstPool { + public: + explicit ConstPool(Assembler* assm) + : assm_(assm), + first_use_(-1), + shared_entries_count(0) {} + void RecordEntry(intptr_t data, RelocInfo::Mode mode); + int EntryCount() const { + return shared_entries_count + unique_entries_.size(); + } + bool IsEmpty() const { + return shared_entries_.empty() && unique_entries_.empty(); + } + // Distance in bytes between the current pc and the first instruction + // using the pool. If there are no pending entries return kMaxInt. + int DistanceToFirstUse(); + // Offset after which instructions using the pool will be out of range. + int MaxPcOffset(); + // Maximum size the constant pool can be with current entries. It always + // includes alignment padding and branch over. + int WorstCaseSize(); + // Size in bytes of the literal pool *if* it is emitted at the current + // pc. The size will include the branch over the pool if it was requested. + int SizeIfEmittedAtCurrentPc(bool require_jump); + // Emit the literal pool at the current pc with a branch over the pool if + // requested. + void Emit(bool require_jump); + // Discard any pending pool entries. + void Clear(); + + private: + bool CanBeShared(RelocInfo::Mode mode); + void EmitMarker(); + void EmitGuard(); + void EmitEntries(); + + Assembler* assm_; + // Keep track of the first instruction requiring a constant pool entry + // since the previous constant pool was emitted. + int first_use_; + // values, pc offset(s) of entries which can be shared. + std::multimap<uint64_t, int> shared_entries_; + // Number of distinct literal in shared entries. + int shared_entries_count; + // values, pc offset of entries which cannot be shared. + std::vector<std::pair<uint64_t, int> > unique_entries_; +}; + + // ----------------------------------------------------------------------------- // Assembler. @@ -755,14 +822,14 @@ virtual ~Assembler(); virtual void AbortedCodeGeneration() { - num_pending_reloc_info_ = 0; + constpool_.Clear(); } // System functions --------------------------------------------------------- // Start generating code from the beginning of the buffer, discarding any code // and data that has already been emitted into the buffer. // - // In order to avoid any accidental transfer of state, Reset ASSERTs that the + // In order to avoid any accidental transfer of state, Reset DCHECKs that the // constant pool is not blocked. void Reset(); @@ -802,11 +869,15 @@ ConstantPoolArray* constant_pool); inline static void set_target_address_at(Address pc, ConstantPoolArray* constant_pool, - Address target); + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED); static inline Address target_address_at(Address pc, Code* code); static inline void set_target_address_at(Address pc, Code* code, - Address target); + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED); // Return the code target address at a call site from the return address of // that call in the instruction stream. @@ -816,6 +887,9 @@ // instruction stream that call will return from. inline static Address return_address_from_call_start(Address pc); + // Return the code target address of the patch debug break slot + inline static Address break_address_from_return_address(Address pc); + // This sets the branch destination (which is in the constant pool on ARM). // This is for calls and branches within generated code. inline static void deserialization_set_special_target_at( @@ -842,15 +916,15 @@ // Size of the generated code in bytes uint64_t SizeOfGeneratedCode() const { - ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_))); + DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_))); return pc_ - buffer_; } // Return the code size generated from label to the current position. uint64_t SizeOfCodeGeneratedSince(const Label* label) { - ASSERT(label->is_bound()); - ASSERT(pc_offset() >= label->pos()); - ASSERT(pc_offset() < buffer_size_); + DCHECK(label->is_bound()); + DCHECK(pc_offset() >= label->pos()); + DCHECK(pc_offset() < buffer_size_); return pc_offset() - label->pos(); } @@ -860,8 +934,8 @@ // TODO(jbramley): Work out what sign to use for these things and if possible, // change things to be consistent. void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) { - ASSERT(size >= 0); - ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label)); + DCHECK(size >= 0); + DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label)); } // Return the number of instructions generated from label to the @@ -879,7 +953,8 @@ static const int kPatchDebugBreakSlotAddressOffset = 0; // Number of instructions necessary to be able to later patch it to a call. - // See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot(). + // See DebugCodegen::GenerateSlot() and + // BreakLocationIterator::SetDebugBreakAtSlot(). static const int kDebugBreakSlotInstructions = 4; static const int kDebugBreakSlotLength = kDebugBreakSlotInstructions * kInstructionSize; @@ -899,9 +974,7 @@ static bool IsConstantPoolAt(Instruction* instr); static int ConstantPoolSizeAt(Instruction* instr); // See Assembler::CheckConstPool for more info. - void ConstantPoolMarker(uint32_t size); void EmitPoolGuard(); - void ConstantPoolGuard(); // Prevent veneer pool emission until EndBlockVeneerPool is called. // Call to this function can be nested but must be followed by an equal @@ -945,9 +1018,9 @@ // function, compiled with and without debugger support (see for example // Debug::PrepareForBreakPoints()). // Compiling functions with debugger support generates additional code - // (Debug::GenerateSlot()). This may affect the emission of the pools and - // cause the version of the code with debugger support to have pools generated - // in different places. + // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools + // and cause the version of the code with debugger support to have pools + // generated in different places. // Recording the position and size of emitted pools allows to correctly // compute the offset mappings between the different versions of a function in // all situations. @@ -1144,8 +1217,8 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(width >= 1); - ASSERT(lsb + width <= rn.SizeInBits()); + DCHECK(width >= 1); + DCHECK(lsb + width <= rn.SizeInBits()); bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1); } @@ -1154,15 +1227,15 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(width >= 1); - ASSERT(lsb + width <= rn.SizeInBits()); + DCHECK(width >= 1); + DCHECK(lsb + width <= rn.SizeInBits()); bfm(rd, rn, lsb, lsb + width - 1); } // Sbfm aliases. // Arithmetic shift right. void asr(const Register& rd, const Register& rn, unsigned shift) { - ASSERT(shift < rd.SizeInBits()); + DCHECK(shift < rd.SizeInBits()); sbfm(rd, rn, shift, rd.SizeInBits() - 1); } @@ -1171,8 +1244,8 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(width >= 1); - ASSERT(lsb + width <= rn.SizeInBits()); + DCHECK(width >= 1); + DCHECK(lsb + width <= rn.SizeInBits()); sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1); } @@ -1181,8 +1254,8 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(width >= 1); - ASSERT(lsb + width <= rn.SizeInBits()); + DCHECK(width >= 1); + DCHECK(lsb + width <= rn.SizeInBits()); sbfm(rd, rn, lsb, lsb + width - 1); } @@ -1205,13 +1278,13 @@ // Logical shift left. void lsl(const Register& rd, const Register& rn, unsigned shift) { unsigned reg_size = rd.SizeInBits(); - ASSERT(shift < reg_size); + DCHECK(shift < reg_size); ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1); } // Logical shift right. void lsr(const Register& rd, const Register& rn, unsigned shift) { - ASSERT(shift < rd.SizeInBits()); + DCHECK(shift < rd.SizeInBits()); ubfm(rd, rn, shift, rd.SizeInBits() - 1); } @@ -1220,8 +1293,8 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(width >= 1); - ASSERT(lsb + width <= rn.SizeInBits()); + DCHECK(width >= 1); + DCHECK(lsb + width <= rn.SizeInBits()); ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1); } @@ -1230,8 +1303,8 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(width >= 1); - ASSERT(lsb + width <= rn.SizeInBits()); + DCHECK(width >= 1); + DCHECK(lsb + width <= rn.SizeInBits()); ubfm(rd, rn, lsb, lsb + width - 1); } @@ -1378,9 +1451,6 @@ // Memory instructions. - // Load literal from pc + offset_from_pc. - void LoadLiteral(const CPURegister& rt, int offset_from_pc); - // Load integer or FP register. void ldr(const CPURegister& rt, const MemOperand& src); @@ -1427,12 +1497,11 @@ void stnp(const CPURegister& rt, const CPURegister& rt2, const MemOperand& dst); - // Load literal to register. - void ldr(const Register& rt, uint64_t imm); + // Load literal to register from a pc relative address. + void ldr_pcrel(const CPURegister& rt, int imm19); - // Load literal to FP register. - void ldr(const FPRegister& ft, double imm); - void ldr(const FPRegister& ft, float imm); + // Load literal to register. + void ldr(const CPURegister& rt, const Immediate& imm); // Move instructions. The default shift of -1 indicates that the move // instruction will calculate an appropriate 16-bit immediate and left shift @@ -1499,12 +1568,13 @@ enum NopMarkerTypes { DEBUG_BREAK_NOP, INTERRUPT_CODE_NOP, + ADR_FAR_NOP, FIRST_NOP_MARKER = DEBUG_BREAK_NOP, - LAST_NOP_MARKER = INTERRUPT_CODE_NOP + LAST_NOP_MARKER = ADR_FAR_NOP }; void nop(NopMarkerTypes n) { - ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER)); + DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER)); mov(Register::XRegFromCode(n), Register::XRegFromCode(n)); } @@ -1582,6 +1652,9 @@ // FP round to integer (nearest with ties to away). void frinta(const FPRegister& fd, const FPRegister& fn); + // FP round to integer (toward minus infinity). + void frintm(const FPRegister& fd, const FPRegister& fn); + // FP round to integer (nearest with ties to even). void frintn(const FPRegister& fd, const FPRegister& fn); @@ -1662,7 +1735,7 @@ // subsequent instructions. void EmitStringData(const char * string) { size_t len = strlen(string) + 1; - ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap)); + DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap)); EmitData(string, len); // Pad with NULL characters until pc_ is aligned. const char pad[] = {'\0', '\0', '\0', '\0'}; @@ -1682,52 +1755,58 @@ // Code generation helpers -------------------------------------------------- - unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; } + bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); } + + Instruction* pc() const { return Instruction::Cast(pc_); } Instruction* InstructionAt(int offset) const { return reinterpret_cast<Instruction*>(buffer_ + offset); } + ptrdiff_t InstructionOffset(Instruction* instr) const { + return reinterpret_cast<byte*>(instr) - buffer_; + } + // Register encoding. static Instr Rd(CPURegister rd) { - ASSERT(rd.code() != kSPRegInternalCode); + DCHECK(rd.code() != kSPRegInternalCode); return rd.code() << Rd_offset; } static Instr Rn(CPURegister rn) { - ASSERT(rn.code() != kSPRegInternalCode); + DCHECK(rn.code() != kSPRegInternalCode); return rn.code() << Rn_offset; } static Instr Rm(CPURegister rm) { - ASSERT(rm.code() != kSPRegInternalCode); + DCHECK(rm.code() != kSPRegInternalCode); return rm.code() << Rm_offset; } static Instr Ra(CPURegister ra) { - ASSERT(ra.code() != kSPRegInternalCode); + DCHECK(ra.code() != kSPRegInternalCode); return ra.code() << Ra_offset; } static Instr Rt(CPURegister rt) { - ASSERT(rt.code() != kSPRegInternalCode); + DCHECK(rt.code() != kSPRegInternalCode); return rt.code() << Rt_offset; } static Instr Rt2(CPURegister rt2) { - ASSERT(rt2.code() != kSPRegInternalCode); + DCHECK(rt2.code() != kSPRegInternalCode); return rt2.code() << Rt2_offset; } // These encoding functions allow the stack pointer to be encoded, and // disallow the zero register. static Instr RdSP(Register rd) { - ASSERT(!rd.IsZero()); + DCHECK(!rd.IsZero()); return (rd.code() & kRegCodeMask) << Rd_offset; } static Instr RnSP(Register rn) { - ASSERT(!rn.IsZero()); + DCHECK(!rn.IsZero()); return (rn.code() & kRegCodeMask) << Rn_offset; } @@ -1761,6 +1840,13 @@ inline static Instr ImmCondCmp(unsigned imm); inline static Instr Nzcv(StatusFlags nzcv); + static bool IsImmAddSub(int64_t immediate); + static bool IsImmLogical(uint64_t value, + unsigned width, + unsigned* n, + unsigned* imm_s, + unsigned* imm_r); + // MemOperand offset encoding. inline static Instr ImmLSUnsigned(int imm12); inline static Instr ImmLS(int imm9); @@ -1805,7 +1891,7 @@ void CheckConstPool(bool force_emit, bool require_jump); // Allocate a constant pool of the correct size for the generated code. - MaybeObject* AllocateConstantPool(Heap* heap); + Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); // Generate the constant pool for the generated code. void PopulateConstantPool(ConstantPoolArray* constant_pool); @@ -1835,7 +1921,6 @@ void CheckVeneerPool(bool force_emit, bool require_jump, int margin = kVeneerDistanceMargin); - class BlockPoolsScope { public: explicit BlockPoolsScope(Assembler* assem) : assem_(assem) { @@ -1851,10 +1936,6 @@ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope); }; - // Available for constrained code generation scopes. Prefer - // MacroAssembler::Mov() when possible. - inline void LoadRelocated(const CPURegister& rt, const Operand& operand); - protected: inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const; @@ -1864,6 +1945,10 @@ static bool IsImmLSUnscaled(ptrdiff_t offset); static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size); + void LoadStorePair(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& addr, LoadStorePairOp op); + static bool IsImmLSPair(ptrdiff_t offset, LSDataSize size); + void Logical(const Register& rd, const Register& rn, const Operand& operand, @@ -1874,11 +1959,6 @@ unsigned imm_s, unsigned imm_r, LogicalOp op); - static bool IsImmLogical(uint64_t value, - unsigned width, - unsigned* n, - unsigned* imm_s, - unsigned* imm_r); void ConditionalCompare(const Register& rn, const Operand& operand, @@ -1909,7 +1989,6 @@ const Operand& operand, FlagsUpdate S, AddSubOp op); - static bool IsImmAddSub(int64_t immediate); static bool IsImmFP32(float imm); static bool IsImmFP64(double imm); @@ -1927,6 +2006,7 @@ const CPURegister& rt, const CPURegister& rt2); static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor( const CPURegister& rt, const CPURegister& rt2); + static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt); // Remove the specified branch from the unbound label link chain. // If available, a veneer for this label can be used for other branches in the @@ -1951,19 +2031,10 @@ const Operand& operand, FlagsUpdate S, Instr op); - void LoadStorePair(const CPURegister& rt, - const CPURegister& rt2, - const MemOperand& addr, - LoadStorePairOp op); void LoadStorePairNonTemporal(const CPURegister& rt, const CPURegister& rt2, const MemOperand& addr, LoadStorePairNonTemporalOp op); - // Register the relocation information for the operand and load its value - // into rt. - void LoadRelocatedValue(const CPURegister& rt, - const Operand& operand, - LoadLiteralOp op); void ConditionalSelect(const Register& rd, const Register& rn, const Register& rm, @@ -2010,11 +2081,16 @@ // instructions. void BlockConstPoolFor(int instructions); + // Set how far from current pc the next constant pool check will be. + void SetNextConstPoolCheckIn(int instructions) { + next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize; + } + // Emit the instruction at pc_. void Emit(Instr instruction) { STATIC_ASSERT(sizeof(*pc_) == 1); STATIC_ASSERT(sizeof(instruction) == kInstructionSize); - ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_)); + DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_)); memcpy(pc_, &instruction, sizeof(instruction)); pc_ += sizeof(instruction); @@ -2023,8 +2099,8 @@ // Emit data inline in the instruction stream. void EmitData(void const * data, unsigned size) { - ASSERT(sizeof(*pc_) == 1); - ASSERT((pc_ + size) <= (buffer_ + buffer_size_)); + DCHECK(sizeof(*pc_) == 1); + DCHECK((pc_ + size) <= (buffer_ + buffer_size_)); // TODO(all): Somehow register we have some data here. Then we can // disassemble it correctly. @@ -2034,18 +2110,20 @@ } void GrowBuffer(); + void CheckBufferSpace(); void CheckBuffer(); // Pc offset of the next constant pool check. int next_constant_pool_check_; // Constant pool generation - // Pools are emitted in the instruction stream, preferably after unconditional - // jumps or after returns from functions (in dead code locations). - // If a long code sequence does not contain unconditional jumps, it is - // necessary to emit the constant pool before the pool gets too far from the - // location it is accessed from. In this case, we emit a jump over the emitted - // constant pool. + // Pools are emitted in the instruction stream. They are emitted when: + // * the distance to the first use is above a pre-defined distance or + // * the numbers of entries in the pool is above a pre-defined size or + // * code generation is finished + // If a pool needs to be emitted before code generation is finished a branch + // over the emitted pool will be inserted. + // Constants in the pool may be addresses of functions that gets relocated; // if so, a relocation info entry is associated to the constant pool entry. @@ -2053,34 +2131,22 @@ // expensive. By default we only check again once a number of instructions // has been generated. That also means that the sizing of the buffers is not // an exact science, and that we rely on some slop to not overrun buffers. - static const int kCheckConstPoolIntervalInst = 128; - static const int kCheckConstPoolInterval = - kCheckConstPoolIntervalInst * kInstructionSize; - - // Constants in pools are accessed via pc relative addressing, which can - // reach +/-4KB thereby defining a maximum distance between the instruction - // and the accessed constant. - static const int kMaxDistToConstPool = 4 * KB; - static const int kMaxNumPendingRelocInfo = - kMaxDistToConstPool / kInstructionSize; - - - // Average distance beetween a constant pool and the first instruction - // accessing the constant pool. Longer distance should result in less I-cache - // pollution. - // In practice the distance will be smaller since constant pool emission is - // forced after function return and sometimes after unconditional branches. - static const int kAvgDistToConstPool = - kMaxDistToConstPool - kCheckConstPoolInterval; + static const int kCheckConstPoolInterval = 128; + + // Distance to first use after a which a pool will be emitted. Pool entries + // are accessed with pc relative load therefore this cannot be more than + // 1 * MB. Since constant pool emission checks are interval based this value + // is an approximation. + static const int kApproxMaxDistToConstPool = 64 * KB; + + // Number of pool entries after which a pool will be emitted. Since constant + // pool emission checks are interval based this value is an approximation. + static const int kApproxMaxPoolEntryCount = 512; // Emission of the constant pool may be blocked in some code sequences. int const_pool_blocked_nesting_; // Block emission if this is not zero. int no_const_pool_before_; // Block emission before this pc offset. - // Keep track of the first instruction requiring a constant pool entry - // since the previous constant pool was emitted. - int first_const_pool_use_; - // Emission of the veneer pools may be blocked in some code sequences. int veneer_pool_blocked_nesting_; // Block emission if this is not zero. @@ -2096,10 +2162,8 @@ // If every instruction in a long sequence is accessing the pool, we need one // pending relocation entry per instruction. - // the buffer of pending relocation info - RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo]; - // number of pending reloc info entries in the buffer - int num_pending_reloc_info_; + // The pending constant pool. + ConstPool constpool_; // Relocation for a type-recording IC has the AST id added to it. This // member variable is a way to pass the information from the call site to @@ -2113,7 +2177,7 @@ // Record the AST id of the CallIC being compiled, so that it can be placed // in the relocation information. void SetRecordedAstId(TypeFeedbackId ast_id) { - ASSERT(recorded_ast_id_.IsNone()); + DCHECK(recorded_ast_id_.IsNone()); recorded_ast_id_ = ast_id; } @@ -2161,7 +2225,7 @@ static const int kVeneerDistanceCheckMargin = kVeneerNoProtectionFactor * kVeneerDistanceMargin; int unresolved_branches_first_limit() const { - ASSERT(!unresolved_branches_.empty()); + DCHECK(!unresolved_branches_.empty()); return unresolved_branches_.begin()->first; } // This is similar to next_constant_pool_check_ and helps reduce the overhead @@ -2176,11 +2240,17 @@ // not later attempt (likely unsuccessfully) to patch it to branch directly to // the label. void DeleteUnresolvedBranchInfoForLabel(Label* label); + // This function deletes the information related to the label by traversing + // the label chain, and for each PC-relative instruction in the chain checking + // if pending unresolved information exists. Its complexity is proportional to + // the length of the label chain. + void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label); private: PositionsRecorder positions_recorder_; friend class PositionsRecorder; friend class EnsureSpace; + friend class ConstPool; }; class PatchingAssembler : public Assembler { @@ -2208,23 +2278,28 @@ ~PatchingAssembler() { // Const pool should still be blocked. - ASSERT(is_const_pool_blocked()); + DCHECK(is_const_pool_blocked()); EndBlockPools(); // Verify we have generated the number of instruction we expected. - ASSERT((pc_offset() + kGap) == buffer_size_); + DCHECK((pc_offset() + kGap) == buffer_size_); // Verify no relocation information has been emitted. - ASSERT(num_pending_reloc_info() == 0); + DCHECK(IsConstPoolEmpty()); // Flush the Instruction cache. size_t length = buffer_size_ - kGap; - CPU::FlushICache(buffer_, length); + CpuFeatures::FlushICache(buffer_, length); } + + // See definition of PatchAdrFar() for details. + static const int kAdrFarPatchableNNops = 2; + static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2; + void PatchAdrFar(ptrdiff_t target_offset); }; class EnsureSpace BASE_EMBEDDED { public: explicit EnsureSpace(Assembler* assembler) { - assembler->CheckBuffer(); + assembler->CheckBufferSpace(); } }; diff -Nru nodejs-0.11.13/deps/v8/src/arm64/assembler-arm64-inl.h nodejs-0.11.15/deps/v8/src/arm64/assembler-arm64-inl.h --- nodejs-0.11.13/deps/v8/src/arm64/assembler-arm64-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/assembler-arm64-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,51 +1,34 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_ #define V8_ARM64_ASSEMBLER_ARM64_INL_H_ -#include "arm64/assembler-arm64.h" -#include "cpu.h" -#include "debug.h" +#include "src/arm64/assembler-arm64.h" +#include "src/assembler.h" +#include "src/debug.h" namespace v8 { namespace internal { -void RelocInfo::apply(intptr_t delta) { +bool CpuFeatures::SupportsCrankshaft() { return true; } + + +void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) { UNIMPLEMENTED(); } -void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); - Assembler::set_target_address_at(pc_, host_, target); - if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { +void RelocInfo::set_target_address(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && + IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( host(), this, HeapObject::cast(target_code)); @@ -54,54 +37,54 @@ inline unsigned CPURegister::code() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return reg_code; } inline CPURegister::RegisterType CPURegister::type() const { - ASSERT(IsValidOrNone()); + DCHECK(IsValidOrNone()); return reg_type; } inline RegList CPURegister::Bit() const { - ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte)); + DCHECK(reg_code < (sizeof(RegList) * kBitsPerByte)); return IsValid() ? 1UL << reg_code : 0; } inline unsigned CPURegister::SizeInBits() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return reg_size; } inline int CPURegister::SizeInBytes() const { - ASSERT(IsValid()); - ASSERT(SizeInBits() % 8 == 0); + DCHECK(IsValid()); + DCHECK(SizeInBits() % 8 == 0); return reg_size / 8; } inline bool CPURegister::Is32Bits() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return reg_size == 32; } inline bool CPURegister::Is64Bits() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return reg_size == 64; } inline bool CPURegister::IsValid() const { if (IsValidRegister() || IsValidFPRegister()) { - ASSERT(!IsNone()); + DCHECK(!IsNone()); return true; } else { - ASSERT(IsNone()); + DCHECK(IsNone()); return false; } } @@ -123,17 +106,22 @@ inline bool CPURegister::IsNone() const { // kNoRegister types should always have size 0 and code 0. - ASSERT((reg_type != kNoRegister) || (reg_code == 0)); - ASSERT((reg_type != kNoRegister) || (reg_size == 0)); + DCHECK((reg_type != kNoRegister) || (reg_code == 0)); + DCHECK((reg_type != kNoRegister) || (reg_size == 0)); return reg_type == kNoRegister; } inline bool CPURegister::Is(const CPURegister& other) const { - ASSERT(IsValidOrNone() && other.IsValidOrNone()); - return (reg_code == other.reg_code) && (reg_size == other.reg_size) && - (reg_type == other.reg_type); + DCHECK(IsValidOrNone() && other.IsValidOrNone()); + return Aliases(other) && (reg_size == other.reg_size); +} + + +inline bool CPURegister::Aliases(const CPURegister& other) const { + DCHECK(IsValidOrNone() && other.IsValidOrNone()); + return (reg_code == other.reg_code) && (reg_type == other.reg_type); } @@ -158,27 +146,27 @@ inline bool CPURegister::IsZero() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return IsRegister() && (reg_code == kZeroRegCode); } inline bool CPURegister::IsSP() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return IsRegister() && (reg_code == kSPRegInternalCode); } inline void CPURegList::Combine(const CPURegList& other) { - ASSERT(IsValid()); - ASSERT(other.type() == type_); - ASSERT(other.RegisterSizeInBits() == size_); + DCHECK(IsValid()); + DCHECK(other.type() == type_); + DCHECK(other.RegisterSizeInBits() == size_); list_ |= other.list(); } inline void CPURegList::Remove(const CPURegList& other) { - ASSERT(IsValid()); + DCHECK(IsValid()); if (other.type() == type_) { list_ &= ~other.list(); } @@ -186,8 +174,8 @@ inline void CPURegList::Combine(const CPURegister& other) { - ASSERT(other.type() == type_); - ASSERT(other.SizeInBits() == size_); + DCHECK(other.type() == type_); + DCHECK(other.SizeInBits() == size_); Combine(other.code()); } @@ -204,92 +192,92 @@ inline void CPURegList::Combine(int code) { - ASSERT(IsValid()); - ASSERT(CPURegister::Create(code, size_, type_).IsValid()); + DCHECK(IsValid()); + DCHECK(CPURegister::Create(code, size_, type_).IsValid()); list_ |= (1UL << code); } inline void CPURegList::Remove(int code) { - ASSERT(IsValid()); - ASSERT(CPURegister::Create(code, size_, type_).IsValid()); + DCHECK(IsValid()); + DCHECK(CPURegister::Create(code, size_, type_).IsValid()); list_ &= ~(1UL << code); } inline Register Register::XRegFromCode(unsigned code) { - // This function returns the zero register when code = 31. The stack pointer - // can not be returned. - ASSERT(code < kNumberOfRegisters); - return Register::Create(code, kXRegSizeInBits); + if (code == kSPRegInternalCode) { + return csp; + } else { + DCHECK(code < kNumberOfRegisters); + return Register::Create(code, kXRegSizeInBits); + } } inline Register Register::WRegFromCode(unsigned code) { - ASSERT(code < kNumberOfRegisters); - return Register::Create(code, kWRegSizeInBits); + if (code == kSPRegInternalCode) { + return wcsp; + } else { + DCHECK(code < kNumberOfRegisters); + return Register::Create(code, kWRegSizeInBits); + } } inline FPRegister FPRegister::SRegFromCode(unsigned code) { - ASSERT(code < kNumberOfFPRegisters); + DCHECK(code < kNumberOfFPRegisters); return FPRegister::Create(code, kSRegSizeInBits); } inline FPRegister FPRegister::DRegFromCode(unsigned code) { - ASSERT(code < kNumberOfFPRegisters); + DCHECK(code < kNumberOfFPRegisters); return FPRegister::Create(code, kDRegSizeInBits); } inline Register CPURegister::W() const { - ASSERT(IsValidRegister()); + DCHECK(IsValidRegister()); return Register::WRegFromCode(reg_code); } inline Register CPURegister::X() const { - ASSERT(IsValidRegister()); + DCHECK(IsValidRegister()); return Register::XRegFromCode(reg_code); } inline FPRegister CPURegister::S() const { - ASSERT(IsValidFPRegister()); + DCHECK(IsValidFPRegister()); return FPRegister::SRegFromCode(reg_code); } inline FPRegister CPURegister::D() const { - ASSERT(IsValidFPRegister()); + DCHECK(IsValidFPRegister()); return FPRegister::DRegFromCode(reg_code); } -// Operand. -template<typename T> -Operand::Operand(Handle<T> value) : reg_(NoReg) { - initialize_handle(value); -} - - +// Immediate. // Default initializer is for int types -template<typename int_t> -struct OperandInitializer { +template<typename T> +struct ImmediateInitializer { static const bool kIsIntType = true; - static inline RelocInfo::Mode rmode_for(int_t) { - return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32; + static inline RelocInfo::Mode rmode_for(T) { + return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32; } - static inline int64_t immediate_for(int_t t) { - STATIC_ASSERT(sizeof(int_t) <= 8); + static inline int64_t immediate_for(T t) { + STATIC_ASSERT(sizeof(T) <= 8); return t; } }; template<> -struct OperandInitializer<Smi*> { +struct ImmediateInitializer<Smi*> { static const bool kIsIntType = false; static inline RelocInfo::Mode rmode_for(Smi* t) { return RelocInfo::NONE64; @@ -301,7 +289,7 @@ template<> -struct OperandInitializer<ExternalReference> { +struct ImmediateInitializer<ExternalReference> { static const bool kIsIntType = false; static inline RelocInfo::Mode rmode_for(ExternalReference t) { return RelocInfo::EXTERNAL_REFERENCE; @@ -313,45 +301,64 @@ template<typename T> -Operand::Operand(T t) - : immediate_(OperandInitializer<T>::immediate_for(t)), - reg_(NoReg), - rmode_(OperandInitializer<T>::rmode_for(t)) {} +Immediate::Immediate(Handle<T> value) { + InitializeHandle(value); +} template<typename T> -Operand::Operand(T t, RelocInfo::Mode rmode) - : immediate_(OperandInitializer<T>::immediate_for(t)), - reg_(NoReg), +Immediate::Immediate(T t) + : value_(ImmediateInitializer<T>::immediate_for(t)), + rmode_(ImmediateInitializer<T>::rmode_for(t)) {} + + +template<typename T> +Immediate::Immediate(T t, RelocInfo::Mode rmode) + : value_(ImmediateInitializer<T>::immediate_for(t)), rmode_(rmode) { - STATIC_ASSERT(OperandInitializer<T>::kIsIntType); + STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType); } +// Operand. +template<typename T> +Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {} + + +template<typename T> +Operand::Operand(T t) : immediate_(t), reg_(NoReg) {} + + +template<typename T> +Operand::Operand(T t, RelocInfo::Mode rmode) + : immediate_(t, rmode), + reg_(NoReg) {} + + Operand::Operand(Register reg, Shift shift, unsigned shift_amount) - : reg_(reg), + : immediate_(0), + reg_(reg), shift_(shift), extend_(NO_EXTEND), - shift_amount_(shift_amount), - rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) { - ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits)); - ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits)); - ASSERT(!reg.IsSP()); + shift_amount_(shift_amount) { + DCHECK(reg.Is64Bits() || (shift_amount < kWRegSizeInBits)); + DCHECK(reg.Is32Bits() || (shift_amount < kXRegSizeInBits)); + DCHECK(!reg.IsSP()); } Operand::Operand(Register reg, Extend extend, unsigned shift_amount) - : reg_(reg), + : immediate_(0), + reg_(reg), shift_(NO_SHIFT), extend_(extend), - shift_amount_(shift_amount), - rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) { - ASSERT(reg.IsValid()); - ASSERT(shift_amount <= 4); - ASSERT(!reg.IsSP()); + shift_amount_(shift_amount) { + DCHECK(reg.IsValid()); + DCHECK(shift_amount <= 4); + DCHECK(!reg.IsSP()); // Extend modes SXTX and UXTX require a 64-bit register. - ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX))); + DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX))); } @@ -372,7 +379,7 @@ bool Operand::IsZero() const { if (IsImmediate()) { - return immediate() == 0; + return ImmediateValue() == 0; } else { return reg().IsZero(); } @@ -380,51 +387,61 @@ Operand Operand::ToExtendedRegister() const { - ASSERT(IsShiftedRegister()); - ASSERT((shift_ == LSL) && (shift_amount_ <= 4)); + DCHECK(IsShiftedRegister()); + DCHECK((shift_ == LSL) && (shift_amount_ <= 4)); return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); } -int64_t Operand::immediate() const { - ASSERT(IsImmediate()); +Immediate Operand::immediate() const { + DCHECK(IsImmediate()); return immediate_; } +int64_t Operand::ImmediateValue() const { + DCHECK(IsImmediate()); + return immediate_.value(); +} + + Register Operand::reg() const { - ASSERT(IsShiftedRegister() || IsExtendedRegister()); + DCHECK(IsShiftedRegister() || IsExtendedRegister()); return reg_; } Shift Operand::shift() const { - ASSERT(IsShiftedRegister()); + DCHECK(IsShiftedRegister()); return shift_; } Extend Operand::extend() const { - ASSERT(IsExtendedRegister()); + DCHECK(IsExtendedRegister()); return extend_; } unsigned Operand::shift_amount() const { - ASSERT(IsShiftedRegister() || IsExtendedRegister()); + DCHECK(IsShiftedRegister() || IsExtendedRegister()); return shift_amount_; } Operand Operand::UntagSmi(Register smi) { - ASSERT(smi.Is64Bits()); + STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift + + kSmiValueSize)); + DCHECK(smi.Is64Bits()); return Operand(smi, ASR, kSmiShift); } Operand Operand::UntagSmiAndScale(Register smi, int scale) { - ASSERT(smi.Is64Bits()); - ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize))); + STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift + + kSmiValueSize)); + DCHECK(smi.Is64Bits()); + DCHECK((scale >= 0) && (scale <= (64 - kSmiValueSize))); if (scale > kSmiShift) { return Operand(smi, LSL, scale - kSmiShift); } else if (scale < kSmiShift) { @@ -434,10 +451,16 @@ } +MemOperand::MemOperand() + : base_(NoReg), regoffset_(NoReg), offset_(0), addrmode_(Offset), + shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) { +} + + MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode) : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode), shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) { - ASSERT(base.Is64Bits() && !base.IsZero()); + DCHECK(base.Is64Bits() && !base.IsZero()); } @@ -447,12 +470,12 @@ unsigned shift_amount) : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset), shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) { - ASSERT(base.Is64Bits() && !base.IsZero()); - ASSERT(!regoffset.IsSP()); - ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); + DCHECK(base.Is64Bits() && !base.IsZero()); + DCHECK(!regoffset.IsSP()); + DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); // SXTX extend mode requires a 64-bit offset register. - ASSERT(regoffset.Is64Bits() || (extend != SXTX)); + DCHECK(regoffset.Is64Bits() || (extend != SXTX)); } @@ -462,22 +485,22 @@ unsigned shift_amount) : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset), shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) { - ASSERT(base.Is64Bits() && !base.IsZero()); - ASSERT(regoffset.Is64Bits() && !regoffset.IsSP()); - ASSERT(shift == LSL); + DCHECK(base.Is64Bits() && !base.IsZero()); + DCHECK(regoffset.Is64Bits() && !regoffset.IsSP()); + DCHECK(shift == LSL); } MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode) : base_(base), addrmode_(addrmode) { - ASSERT(base.Is64Bits() && !base.IsZero()); + DCHECK(base.Is64Bits() && !base.IsZero()); if (offset.IsImmediate()) { - offset_ = offset.immediate(); + offset_ = offset.ImmediateValue(); regoffset_ = NoReg; } else if (offset.IsShiftedRegister()) { - ASSERT(addrmode == Offset); + DCHECK(addrmode == Offset); regoffset_ = offset.reg(); shift_= offset.shift(); @@ -487,11 +510,11 @@ offset_ = 0; // These assertions match those in the shifted-register constructor. - ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP()); - ASSERT(shift_ == LSL); + DCHECK(regoffset_.Is64Bits() && !regoffset_.IsSP()); + DCHECK(shift_ == LSL); } else { - ASSERT(offset.IsExtendedRegister()); - ASSERT(addrmode == Offset); + DCHECK(offset.IsExtendedRegister()); + DCHECK(addrmode == Offset); regoffset_ = offset.reg(); extend_ = offset.extend(); @@ -501,9 +524,9 @@ offset_ = 0; // These assertions match those in the extended-register constructor. - ASSERT(!regoffset_.IsSP()); - ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); - ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX))); + DCHECK(!regoffset_.IsSP()); + DCHECK((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); + DCHECK((regoffset_.Is64Bits() || (extend_ != SXTX))); } } @@ -530,7 +553,7 @@ if (IsImmediateOffset()) { return offset(); } else { - ASSERT(IsRegisterOffset()); + DCHECK(IsRegisterOffset()); if (extend() == NO_EXTEND) { return Operand(regoffset(), shift(), shift_amount()); } else { @@ -552,7 +575,7 @@ Address Assembler::target_pointer_address_at(Address pc) { Instruction* instr = reinterpret_cast<Instruction*>(pc); - ASSERT(instr->IsLdrLiteralX()); + DCHECK(instr->IsLdrLiteralX()); return reinterpret_cast<Address>(instr->ImmPCOffsetTarget()); } @@ -579,11 +602,16 @@ Address candidate = pc - 2 * kInstructionSize; Instruction* instr = reinterpret_cast<Instruction*>(candidate); USE(instr); - ASSERT(instr->IsLdrLiteralX()); + DCHECK(instr->IsLdrLiteralX()); return candidate; } +Address Assembler::break_address_from_return_address(Address pc) { + return pc - Assembler::kPatchDebugBreakSlotReturnOffset; +} + + Address Assembler::return_address_from_call_start(Address pc) { // The call, generated by MacroAssembler::Call, is one of two possible // sequences: @@ -607,14 +635,14 @@ Instruction* instr = reinterpret_cast<Instruction*>(pc); if (instr->IsMovz()) { // Verify the instruction sequence. - ASSERT(instr->following(1)->IsMovk()); - ASSERT(instr->following(2)->IsMovk()); - ASSERT(instr->following(3)->IsBranchAndLinkToRegister()); + DCHECK(instr->following(1)->IsMovk()); + DCHECK(instr->following(2)->IsMovk()); + DCHECK(instr->following(3)->IsBranchAndLinkToRegister()); return pc + Assembler::kCallSizeWithoutRelocation; } else { // Verify the instruction sequence. - ASSERT(instr->IsLdrLiteralX()); - ASSERT(instr->following(1)->IsBranchAndLinkToRegister()); + DCHECK(instr->IsLdrLiteralX()); + DCHECK(instr->following(1)->IsBranchAndLinkToRegister()); return pc + Assembler::kCallSizeWithRelocation; } } @@ -628,11 +656,12 @@ void Assembler::set_target_address_at(Address pc, ConstantPoolArray* constant_pool, - Address target) { + Address target, + ICacheFlushMode icache_flush_mode) { Memory::Address_at(target_pointer_address_at(pc)) = target; // Intuitively, we would think it is necessary to always flush the // instruction cache after patching a target address in the code as follows: - // CPU::FlushICache(pc, sizeof(target)); + // CpuFeatures::FlushICache(pc, sizeof(target)); // However, on ARM, an instruction is actually patched in the case of // embedded constants of the form: // ldr ip, [pc, #...] @@ -643,9 +672,10 @@ void Assembler::set_target_address_at(Address pc, Code* code, - Address target) { + Address target, + ICacheFlushMode icache_flush_mode) { ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; - set_target_address_at(pc, constant_pool, target); + set_target_address_at(pc, constant_pool, target, icache_flush_mode); } @@ -655,13 +685,13 @@ Address RelocInfo::target_address() { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); return Assembler::target_address_at(pc_, host_); } Address RelocInfo::target_address_address() { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); return Assembler::target_pointer_address_at(pc_); @@ -669,30 +699,32 @@ Address RelocInfo::constant_pool_entry_address() { - ASSERT(IsInConstantPool()); + DCHECK(IsInConstantPool()); return Assembler::target_pointer_address_at(pc_); } Object* RelocInfo::target_object() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)); } Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return Handle<Object>(reinterpret_cast<Object**>( Assembler::target_address_at(pc_, host_))); } -void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - ASSERT(!target->IsConsString()); +void RelocInfo::set_target_object(Object* target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, host_, - reinterpret_cast<Address>(target)); - if (mode == UPDATE_WRITE_BARRIER && + reinterpret_cast<Address>(target), + icache_flush_mode); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && target->IsHeapObject()) { host()->GetHeap()->incremental_marking()->RecordWrite( @@ -702,21 +734,24 @@ Address RelocInfo::target_reference() { - ASSERT(rmode_ == EXTERNAL_REFERENCE); + DCHECK(rmode_ == EXTERNAL_REFERENCE); return Assembler::target_address_at(pc_, host_); } Address RelocInfo::target_runtime_entry(Assembler* origin) { - ASSERT(IsRuntimeEntry(rmode_)); + DCHECK(IsRuntimeEntry(rmode_)); return target_address(); } void RelocInfo::set_target_runtime_entry(Address target, - WriteBarrierMode mode) { - ASSERT(IsRuntimeEntry(rmode_)); - if (target_address() != target) set_target_address(target, mode); + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsRuntimeEntry(rmode_)); + if (target_address() != target) { + set_target_address(target, write_barrier_mode, icache_flush_mode); + } } @@ -728,17 +763,19 @@ Cell* RelocInfo::target_cell() { - ASSERT(rmode_ == RelocInfo::CELL); + DCHECK(rmode_ == RelocInfo::CELL); return Cell::FromValueAddress(Memory::Address_at(pc_)); } -void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) { +void RelocInfo::set_target_cell(Cell* cell, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { UNIMPLEMENTED(); } -static const int kCodeAgeSequenceSize = 5 * kInstructionSize; +static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize; static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize; @@ -749,17 +786,17 @@ Code* RelocInfo::code_age_stub() { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - ASSERT(!Code::IsYoungSequence(pc_)); + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); // Read the stub entry point from the code age sequence. Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset; return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address)); } -void RelocInfo::set_code_age_stub(Code* stub) { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - ASSERT(!Code::IsYoungSequence(pc_)); +void RelocInfo::set_code_age_stub(Code* stub, + ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + DCHECK(!Code::IsYoungSequence(stub->GetIsolate(), pc_)); // Overwrite the stub entry point in the code age sequence. This is loaded as // a literal so there is no need to call FlushICache here. Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset; @@ -768,7 +805,7 @@ Address RelocInfo::call_address() { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); // For the above sequences the Relocinfo points to the load literal loading // the call address. @@ -777,7 +814,7 @@ void RelocInfo::set_call_address(Address target) { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Assembler::set_target_address_at(pc_, host_, target); if (host() != NULL) { @@ -789,7 +826,7 @@ void RelocInfo::WipeOut() { - ASSERT(IsEmbeddedObject(rmode_) || + DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsExternalReference(rmode_)); @@ -825,14 +862,12 @@ visitor->VisitCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence())) && isolate->debug()->has_break_points()) { visitor->VisitDebugTarget(this); -#endif } else if (RelocInfo::IsRuntimeEntry(mode)) { visitor->VisitRuntimeEntry(this); } @@ -850,14 +885,12 @@ StaticVisitor::VisitCell(heap, this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence()))) { StaticVisitor::VisitDebugTarget(heap, this); -#endif } else if (RelocInfo::IsRuntimeEntry(mode)) { StaticVisitor::VisitRuntimeEntry(this); } @@ -865,11 +898,11 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) { - ASSERT(rt.IsValid()); + DCHECK(rt.IsValid()); if (rt.IsRegister()) { return rt.Is64Bits() ? LDR_x : LDR_w; } else { - ASSERT(rt.IsFPRegister()); + DCHECK(rt.IsFPRegister()); return rt.Is64Bits() ? LDR_d : LDR_s; } } @@ -877,23 +910,23 @@ LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt, const CPURegister& rt2) { - ASSERT(AreSameSizeAndType(rt, rt2)); + DCHECK(AreSameSizeAndType(rt, rt2)); USE(rt2); if (rt.IsRegister()) { return rt.Is64Bits() ? LDP_x : LDP_w; } else { - ASSERT(rt.IsFPRegister()); + DCHECK(rt.IsFPRegister()); return rt.Is64Bits() ? LDP_d : LDP_s; } } LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) { - ASSERT(rt.IsValid()); + DCHECK(rt.IsValid()); if (rt.IsRegister()) { return rt.Is64Bits() ? STR_x : STR_w; } else { - ASSERT(rt.IsFPRegister()); + DCHECK(rt.IsFPRegister()); return rt.Is64Bits() ? STR_d : STR_s; } } @@ -901,12 +934,12 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt, const CPURegister& rt2) { - ASSERT(AreSameSizeAndType(rt, rt2)); + DCHECK(AreSameSizeAndType(rt, rt2)); USE(rt2); if (rt.IsRegister()) { return rt.Is64Bits() ? STP_x : STP_w; } else { - ASSERT(rt.IsFPRegister()); + DCHECK(rt.IsFPRegister()); return rt.Is64Bits() ? STP_d : STP_s; } } @@ -914,12 +947,12 @@ LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor( const CPURegister& rt, const CPURegister& rt2) { - ASSERT(AreSameSizeAndType(rt, rt2)); + DCHECK(AreSameSizeAndType(rt, rt2)); USE(rt2); if (rt.IsRegister()) { return rt.Is64Bits() ? LDNP_x : LDNP_w; } else { - ASSERT(rt.IsFPRegister()); + DCHECK(rt.IsFPRegister()); return rt.Is64Bits() ? LDNP_d : LDNP_s; } } @@ -927,21 +960,31 @@ LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor( const CPURegister& rt, const CPURegister& rt2) { - ASSERT(AreSameSizeAndType(rt, rt2)); + DCHECK(AreSameSizeAndType(rt, rt2)); USE(rt2); if (rt.IsRegister()) { return rt.Is64Bits() ? STNP_x : STNP_w; } else { - ASSERT(rt.IsFPRegister()); + DCHECK(rt.IsFPRegister()); return rt.Is64Bits() ? STNP_d : STNP_s; } } +LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) { + if (rt.IsRegister()) { + return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit; + } else { + DCHECK(rt.IsFPRegister()); + return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit; + } +} + + int Assembler::LinkAndGetInstructionOffsetTo(Label* label) { - ASSERT(kStartOfLabelLinkChain == 0); + DCHECK(kStartOfLabelLinkChain == 0); int offset = LinkAndGetByteOffsetTo(label); - ASSERT(IsAligned(offset, kInstructionSize)); + DCHECK(IsAligned(offset, kInstructionSize)); return offset >> kInstructionSizeLog2; } @@ -996,7 +1039,7 @@ Instr Assembler::ImmTestBranchBit(unsigned bit_pos) { - ASSERT(is_uint6(bit_pos)); + DCHECK(is_uint6(bit_pos)); // Subtract five from the shift offset, as we need bit 5 from bit_pos. unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5); unsigned b40 = bit_pos << ImmTestBranchBit40_offset; @@ -1012,7 +1055,7 @@ Instr Assembler::ImmAddSub(int64_t imm) { - ASSERT(IsImmAddSub(imm)); + DCHECK(IsImmAddSub(imm)); if (is_uint12(imm)) { // No shift required. return imm << ImmAddSub_offset; } else { @@ -1022,7 +1065,7 @@ Instr Assembler::ImmS(unsigned imms, unsigned reg_size) { - ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) || + DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(imms)) || ((reg_size == kWRegSizeInBits) && is_uint5(imms))); USE(reg_size); return imms << ImmS_offset; @@ -1030,26 +1073,26 @@ Instr Assembler::ImmR(unsigned immr, unsigned reg_size) { - ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) || + DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) || ((reg_size == kWRegSizeInBits) && is_uint5(immr))); USE(reg_size); - ASSERT(is_uint6(immr)); + DCHECK(is_uint6(immr)); return immr << ImmR_offset; } Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) { - ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); - ASSERT(is_uint6(imms)); - ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3)); + DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); + DCHECK(is_uint6(imms)); + DCHECK((reg_size == kXRegSizeInBits) || is_uint6(imms + 3)); USE(reg_size); return imms << ImmSetBits_offset; } Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) { - ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); - ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) || + DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); + DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) || ((reg_size == kWRegSizeInBits) && is_uint5(immr))); USE(reg_size); return immr << ImmRotate_offset; @@ -1063,21 +1106,21 @@ Instr Assembler::BitN(unsigned bitn, unsigned reg_size) { - ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); - ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0)); + DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); + DCHECK((reg_size == kXRegSizeInBits) || (bitn == 0)); USE(reg_size); return bitn << BitN_offset; } Instr Assembler::ShiftDP(Shift shift) { - ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR); + DCHECK(shift == LSL || shift == LSR || shift == ASR || shift == ROR); return shift << ShiftDP_offset; } Instr Assembler::ImmDPShift(unsigned amount) { - ASSERT(is_uint6(amount)); + DCHECK(is_uint6(amount)); return amount << ImmDPShift_offset; } @@ -1088,13 +1131,13 @@ Instr Assembler::ImmExtendShift(unsigned left_shift) { - ASSERT(left_shift <= 4); + DCHECK(left_shift <= 4); return left_shift << ImmExtendShift_offset; } Instr Assembler::ImmCondCmp(unsigned imm) { - ASSERT(is_uint5(imm)); + DCHECK(is_uint5(imm)); return imm << ImmCondCmp_offset; } @@ -1105,75 +1148,75 @@ Instr Assembler::ImmLSUnsigned(int imm12) { - ASSERT(is_uint12(imm12)); + DCHECK(is_uint12(imm12)); return imm12 << ImmLSUnsigned_offset; } Instr Assembler::ImmLS(int imm9) { - ASSERT(is_int9(imm9)); + DCHECK(is_int9(imm9)); return truncate_to_int9(imm9) << ImmLS_offset; } Instr Assembler::ImmLSPair(int imm7, LSDataSize size) { - ASSERT(((imm7 >> size) << size) == imm7); + DCHECK(((imm7 >> size) << size) == imm7); int scaled_imm7 = imm7 >> size; - ASSERT(is_int7(scaled_imm7)); + DCHECK(is_int7(scaled_imm7)); return truncate_to_int7(scaled_imm7) << ImmLSPair_offset; } Instr Assembler::ImmShiftLS(unsigned shift_amount) { - ASSERT(is_uint1(shift_amount)); + DCHECK(is_uint1(shift_amount)); return shift_amount << ImmShiftLS_offset; } Instr Assembler::ImmException(int imm16) { - ASSERT(is_uint16(imm16)); + DCHECK(is_uint16(imm16)); return imm16 << ImmException_offset; } Instr Assembler::ImmSystemRegister(int imm15) { - ASSERT(is_uint15(imm15)); + DCHECK(is_uint15(imm15)); return imm15 << ImmSystemRegister_offset; } Instr Assembler::ImmHint(int imm7) { - ASSERT(is_uint7(imm7)); + DCHECK(is_uint7(imm7)); return imm7 << ImmHint_offset; } Instr Assembler::ImmBarrierDomain(int imm2) { - ASSERT(is_uint2(imm2)); + DCHECK(is_uint2(imm2)); return imm2 << ImmBarrierDomain_offset; } Instr Assembler::ImmBarrierType(int imm2) { - ASSERT(is_uint2(imm2)); + DCHECK(is_uint2(imm2)); return imm2 << ImmBarrierType_offset; } LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) { - ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8)); + DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8)); return static_cast<LSDataSize>(op >> SizeLS_offset); } Instr Assembler::ImmMoveWide(uint64_t imm) { - ASSERT(is_uint16(imm)); + DCHECK(is_uint16(imm)); return imm << ImmMoveWide_offset; } Instr Assembler::ShiftMoveWide(int64_t shift) { - ASSERT(is_uint2(shift)); + DCHECK(is_uint2(shift)); return shift << ShiftMoveWide_offset; } @@ -1184,7 +1227,7 @@ Instr Assembler::FPScale(unsigned scale) { - ASSERT(is_uint6(scale)); + DCHECK(is_uint6(scale)); return scale << FPScale_offset; } @@ -1194,16 +1237,16 @@ } -void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) { - LoadRelocatedValue(rt, operand, LDR_x_lit); +inline void Assembler::CheckBufferSpace() { + DCHECK(pc_ < (buffer_ + buffer_size_)); + if (buffer_space() < kGap) { + GrowBuffer(); + } } inline void Assembler::CheckBuffer() { - ASSERT(pc_ < (buffer_ + buffer_size_)); - if (buffer_space() < kGap) { - GrowBuffer(); - } + CheckBufferSpace(); if (pc_offset() >= next_veneer_pool_check_) { CheckVeneerPool(false, true); } @@ -1214,7 +1257,7 @@ TypeFeedbackId Assembler::RecordedAstId() { - ASSERT(!recorded_ast_id_.IsNone()); + DCHECK(!recorded_ast_id_.IsNone()); return recorded_ast_id_; } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/builtins-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/builtins-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/builtins-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/builtins-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "codegen.h" -#include "debug.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "runtime.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -89,7 +66,7 @@ num_extra_args = 1; __ Push(x1); } else { - ASSERT(extra_args == NO_EXTRA_ARGUMENTS); + DCHECK(extra_args == NO_EXTRA_ARGUMENTS); } // JumpToExternalReference expects x0 to contain the number of arguments @@ -317,7 +294,7 @@ __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex); __ B(hs, &ok); - CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode); + CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); GenerateTailCallToReturnedCode(masm); __ Bind(&ok); @@ -327,7 +304,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function, - bool count_constructions, bool create_memento) { // ----------- S t a t e ------------- // -- x0 : number of arguments @@ -338,12 +314,8 @@ // ----------------------------------- ASM_LOCATION("Builtins::Generate_JSConstructStubHelper"); - // Should never count constructions for api objects. - ASSERT(!is_api_function || !count_constructions); // Should never create mementos for api functions. - ASSERT(!is_api_function || !create_memento); - // Should never create mementos before slack tracking is finished. - ASSERT(!count_constructions || !create_memento); + DCHECK(!is_api_function || !create_memento); Isolate* isolate = masm->isolate(); @@ -370,13 +342,11 @@ Label rt_call, allocated; if (FLAG_inline_new) { Label undo_allocation; -#if ENABLE_DEBUGGER_SUPPORT ExternalReference debug_step_in_fp = ExternalReference::debug_step_in_fp_address(isolate); __ Mov(x2, Operand(debug_step_in_fp)); __ Ldr(x2, MemOperand(x2)); __ Cbnz(x2, &rt_call); -#endif // Load the initial map and verify that it is in fact a map. Register init_map = x2; __ Ldr(init_map, @@ -391,24 +361,28 @@ __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE); __ B(eq, &rt_call); - if (count_constructions) { + Register constructon_count = x14; + if (!is_api_function) { Label allocate; + MemOperand bit_field3 = + FieldMemOperand(init_map, Map::kBitField3Offset); + // Check if slack tracking is enabled. + __ Ldr(x4, bit_field3); + __ DecodeField<Map::ConstructionCount>(constructon_count, x4); + __ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking)); + __ B(eq, &allocate); // Decrease generous allocation count. - __ Ldr(x3, FieldMemOperand(constructor, - JSFunction::kSharedFunctionInfoOffset)); - MemOperand constructor_count = - FieldMemOperand(x3, SharedFunctionInfo::kConstructionCountOffset); - __ Ldrb(x4, constructor_count); - __ Subs(x4, x4, 1); - __ Strb(x4, constructor_count); + __ Subs(x4, x4, Operand(1 << Map::ConstructionCount::kShift)); + __ Str(x4, bit_field3); + __ Cmp(constructon_count, Operand(JSFunction::kFinishSlackTracking)); __ B(ne, &allocate); // Push the constructor and map to the stack, and the constructor again // as argument to the runtime call. __ Push(constructor, init_map, constructor); - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1); + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); __ Pop(init_map, constructor); + __ Mov(constructon_count, Operand(JSFunction::kNoSlackTracking)); __ Bind(&allocate); } @@ -438,8 +412,8 @@ __ Add(first_prop, new_obj, JSObject::kHeaderSize); // Fill all of the in-object properties with the appropriate filler. - Register undef = x7; - __ LoadRoot(undef, Heap::kUndefinedValueRootIndex); + Register filler = x7; + __ LoadRoot(filler, Heap::kUndefinedValueRootIndex); // Obtain number of pre-allocated property fields and in-object // properties. @@ -457,48 +431,50 @@ Register prop_fields = x6; __ Sub(prop_fields, obj_size, JSObject::kHeaderSize / kPointerSize); - if (count_constructions) { + if (!is_api_function) { + Label no_inobject_slack_tracking; + + // Check if slack tracking is enabled. + __ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking)); + __ B(eq, &no_inobject_slack_tracking); + constructon_count = NoReg; + // Fill the pre-allocated fields with undef. - __ FillFields(first_prop, prealloc_fields, undef); + __ FillFields(first_prop, prealloc_fields, filler); - // Register first_non_prealloc is the offset of the first field after + // Update first_prop register to be the offset of the first field after // pre-allocated fields. - Register first_non_prealloc = x12; - __ Add(first_non_prealloc, first_prop, + __ Add(first_prop, first_prop, Operand(prealloc_fields, LSL, kPointerSizeLog2)); - first_prop = NoReg; - if (FLAG_debug_code) { - Register obj_end = x5; + Register obj_end = x14; __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2)); - __ Cmp(first_non_prealloc, obj_end); + __ Cmp(first_prop, obj_end); __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields); } // Fill the remaining fields with one pointer filler map. - Register one_pointer_filler = x5; - Register non_prealloc_fields = x6; - __ LoadRoot(one_pointer_filler, Heap::kOnePointerFillerMapRootIndex); - __ Sub(non_prealloc_fields, prop_fields, prealloc_fields); - __ FillFields(first_non_prealloc, non_prealloc_fields, - one_pointer_filler); - prop_fields = NoReg; - } else if (create_memento) { + __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex); + __ Sub(prop_fields, prop_fields, prealloc_fields); + + __ bind(&no_inobject_slack_tracking); + } + if (create_memento) { // Fill the pre-allocated fields with undef. - __ FillFields(first_prop, prop_fields, undef); + __ FillFields(first_prop, prop_fields, filler); __ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2)); __ LoadRoot(x14, Heap::kAllocationMementoMapRootIndex); - ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset); + DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset); __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex)); // Load the AllocationSite __ Peek(x14, 2 * kXRegSize); - ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset); + DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset); __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex)); first_prop = NoReg; } else { // Fill all of the property fields with undef. - __ FillFields(first_prop, prop_fields, undef); + __ FillFields(first_prop, prop_fields, filler); first_prop = NoReg; prop_fields = NoReg; } @@ -541,7 +517,7 @@ // Initialize the fields to undefined. Register elements = x10; __ Add(elements, new_array, FixedArray::kHeaderSize); - __ FillFields(elements, element_count, undef); + __ FillFields(elements, element_count, filler); // Store the initialized FixedArray into the properties field of the // JSObject. @@ -566,7 +542,7 @@ __ Peek(x4, 2 * kXRegSize); __ Push(x4); __ Push(constructor); // Argument for Runtime_NewObject. - __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2); + __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2); __ Mov(x4, x0); // If we ended up using the runtime, and we want a memento, then the // runtime call made it for us, and we shouldn't do create count @@ -574,7 +550,7 @@ __ jmp(&count_incremented); } else { __ Push(constructor); // Argument for Runtime_NewObject. - __ CallRuntime(Runtime::kHiddenNewObject, 1); + __ CallRuntime(Runtime::kNewObject, 1); __ Mov(x4, x0); } @@ -649,7 +625,7 @@ } // Store offset of return address for deoptimizer. - if (!is_api_function && !count_constructions) { + if (!is_api_function) { masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); } @@ -700,18 +676,13 @@ } -void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, true, false); -} - - void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new); + Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); } void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, true, false, false); + Generate_JSConstructStubHelper(masm, true, false); } @@ -785,7 +756,7 @@ // No type feedback cell is available. __ LoadRoot(x2, Heap::kUndefinedValueRootIndex); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS); __ CallStub(&stub); } else { ParameterCount actual(x0); @@ -811,7 +782,7 @@ void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) { - CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized); + CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized); GenerateTailCallToReturnedCode(masm); } @@ -821,11 +792,11 @@ Register function = x1; // Preserve function. At the same time, push arguments for - // kHiddenCompileOptimized. + // kCompileOptimized. __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent)); __ Push(function, function, x10); - __ CallRuntime(Runtime::kHiddenCompileOptimized, 2); + __ CallRuntime(Runtime::kCompileOptimized, 2); // Restore receiver. __ Pop(function); @@ -912,7 +883,7 @@ } // Jump to point after the code-age stub. - __ Add(x0, x0, kCodeAgeSequenceSize); + __ Add(x0, x0, kNoCodeAgeSequenceLength); __ Br(x0); } @@ -935,7 +906,7 @@ // preserve the registers with parameters. __ PushXRegList(kSafepointSavedRegisters); // Pass the function and deoptimization type to the runtime system. - __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ PopXRegList(kSafepointSavedRegisters); } @@ -965,7 +936,7 @@ // Pass the deoptimization type to the runtime system. __ Mov(x0, Smi::FromInt(static_cast<int>(type))); __ Push(x0); - __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); } // Get the full codegen state from the stack and untag it. @@ -1050,7 +1021,7 @@ __ B(hs, &ok); { FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kHiddenStackGuard, 0); + __ CallRuntime(Runtime::kStackGuard, 0); } __ Jump(masm->isolate()->builtins()->OnStackReplacement(), RelocInfo::CODE_TARGET); @@ -1094,7 +1065,7 @@ // 3a. Patch the first argument if necessary when calling a function. Label shift_arguments; __ Mov(call_type, static_cast<int>(call_type_JS_func)); - { Label convert_to_object, use_global_receiver, patch_receiver; + { Label convert_to_object, use_global_proxy, patch_receiver; // Change context eagerly in case we need the global receiver. __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); @@ -1118,8 +1089,8 @@ __ JumpIfSmi(receiver, &convert_to_object); __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, - &use_global_receiver); - __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver); + &use_global_proxy); + __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_proxy); STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); __ JumpIfObjectType(receiver, scratch1, scratch2, @@ -1147,10 +1118,10 @@ __ Mov(call_type, static_cast<int>(call_type_JS_func)); __ B(&patch_receiver); - __ Bind(&use_global_receiver); + __ Bind(&use_global_proxy); __ Ldr(receiver, GlobalObjectMemOperand()); __ Ldr(receiver, - FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset)); + FieldMemOperand(receiver, GlobalObject::kGlobalProxyOffset)); __ Bind(&patch_receiver); @@ -1275,12 +1246,12 @@ // TODO(jbramley): Check that the stack usage here is safe. __ Sub(x10, jssp, x10); // Check if the arguments will overflow the stack. - __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2)); + __ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2)); __ B(gt, &enough_stack_space); // There is not enough stack space, so use a builtin to throw an appropriate // error. __ Push(function, argc); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); // We should never return from the APPLY_OVERFLOW builtin. if (__ emit_debug_code()) { __ Unreachable(); @@ -1307,7 +1278,7 @@ // Compute and push the receiver. // Do not transform the receiver for strict mode functions. - Label convert_receiver_to_object, use_global_receiver; + Label convert_receiver_to_object, use_global_proxy; __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset)); __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver); // Do not transform the receiver for native functions. @@ -1315,9 +1286,9 @@ // Compute the receiver in sloppy mode. __ JumpIfSmi(receiver, &convert_receiver_to_object); - __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver); + __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_proxy); __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, - &use_global_receiver); + &use_global_proxy); // Check if the receiver is already a JavaScript object. STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); @@ -1331,9 +1302,9 @@ __ Mov(receiver, x0); __ B(&push_receiver); - __ Bind(&use_global_receiver); + __ Bind(&use_global_proxy); __ Ldr(x10, GlobalObjectMemOperand()); - __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); + __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset)); // Push the receiver __ Bind(&push_receiver); @@ -1400,6 +1371,27 @@ } +static void ArgumentAdaptorStackCheck(MacroAssembler* masm, + Label* stack_overflow) { + // ----------- S t a t e ------------- + // -- x0 : actual number of arguments + // -- x1 : function (passed through to callee) + // -- x2 : expected number of arguments + // ----------------------------------- + // Check the stack for overflow. + // We are not trying to catch interruptions (e.g. debug break and + // preemption) here, so the "real stack limit" is checked. + Label enough_stack_space; + __ LoadRoot(x10, Heap::kRealStackLimitRootIndex); + // Make x10 the space we have left. The stack might already be overflowed + // here which will cause x10 to become negative. + __ Sub(x10, jssp, x10); + // Check if the arguments will overflow the stack. + __ Cmp(x10, Operand(x2, LSL, kPointerSizeLog2)); + __ B(le, stack_overflow); +} + + static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ SmiTag(x10, x0); __ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); @@ -1433,6 +1425,9 @@ // -- x2 : expected number of arguments // ----------------------------------- + Label stack_overflow; + ArgumentAdaptorStackCheck(masm, &stack_overflow); + Register argc_actual = x0; // Excluding the receiver. Register argc_expected = x2; // Excluding the receiver. Register function = x1; @@ -1552,6 +1547,14 @@ // Call the entry point without adapting the arguments. __ Bind(&dont_adapt_arguments); __ Jump(code_entry); + + __ Bind(&stack_overflow); + { + FrameScope frame(masm, StackFrame::MANUAL); + EnterArgumentsAdaptorFrame(masm); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ Unreachable(); + } } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/codegen-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/codegen-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/codegen-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/codegen-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "codegen.h" -#include "macro-assembler.h" -#include "simulator-arm64.h" +#include "src/arm64/simulator-arm64.h" +#include "src/codegen.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { @@ -58,7 +35,8 @@ // an AAPCS64-compliant exp() function. This will be faster than the C // library's exp() function, but probably less accurate. size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &std::exp; ExternalReference::InitializeMathExpData(); @@ -84,10 +62,10 @@ CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); #if !defined(USE_SIMULATOR) return FUNCTION_CAST<UnaryMathFunction>(buffer); @@ -108,14 +86,14 @@ void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { masm->EnterFrame(StackFrame::INTERNAL); - ASSERT(!masm->has_frame()); + DCHECK(!masm->has_frame()); masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { masm->LeaveFrame(StackFrame::INTERNAL); - ASSERT(masm->has_frame()); + DCHECK(masm->has_frame()); masm->set_has_frame(false); } @@ -124,26 +102,28 @@ // Code generators void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - MacroAssembler* masm, AllocationSiteMode mode, + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, Label* allocation_memento_found) { - // ----------- S t a t e ------------- - // -- x2 : receiver - // -- x3 : target map - // ----------------------------------- - Register receiver = x2; - Register map = x3; + ASM_LOCATION( + "ElementsTransitionGenerator::GenerateMapChangeElementsTransition"); + DCHECK(!AreAliased(receiver, key, value, target_map)); if (mode == TRACK_ALLOCATION_SITE) { - ASSERT(allocation_memento_found != NULL); + DCHECK(allocation_memento_found != NULL); __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, allocation_memento_found); } // Set transitioned map. - __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ RecordWriteField(receiver, HeapObject::kMapOffset, - map, + target_map, x10, kLRHasNotBeenSaved, kDontSaveFPRegs, @@ -153,19 +133,25 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( - MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble"); - // ----------- S t a t e ------------- - // -- lr : return address - // -- x0 : value - // -- x1 : key - // -- x2 : receiver - // -- x3 : target map, scratch for subsequent call - // ----------------------------------- - Register receiver = x2; - Register target_map = x3; - Label gc_required, only_change_map; + Register elements = x4; + Register length = x5; + Register array_size = x6; + Register array = x7; + + Register scratch = x6; + + // Verify input registers don't conflict with locals. + DCHECK(!AreAliased(receiver, key, value, target_map, + elements, length, array_size, array)); if (mode == TRACK_ALLOCATION_SITE) { __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail); @@ -173,32 +159,28 @@ // Check for empty arrays, which only require a map transition and no changes // to the backing store. - Register elements = x4; __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map); __ Push(lr); - Register length = x5; __ Ldrsw(length, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset)); // Allocate new FixedDoubleArray. - Register array_size = x6; - Register array = x7; __ Lsl(array_size, length, kDoubleSizeLog2); __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize); __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT); // Register array is non-tagged heap object. // Set the destination FixedDoubleArray's length and map. - Register map_root = x6; + Register map_root = array_size; __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex); __ SmiTag(x11, length); __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset)); __ Str(map_root, MemOperand(array, HeapObject::kMapOffset)); __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6, + __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch, kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); @@ -206,7 +188,7 @@ __ Add(x10, array, kHeapObjectTag); __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ RecordWriteField(receiver, JSObject::kElementsOffset, x10, - x6, kLRHasBeenSaved, kDontSaveFPRegs, + scratch, kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); // Prepare for conversion loop. @@ -225,7 +207,7 @@ __ Bind(&only_change_map); __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6, + __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch, kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ B(&done); @@ -257,20 +239,22 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( - MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject"); - // ----------- S t a t e ------------- - // -- x0 : value - // -- x1 : key - // -- x2 : receiver - // -- lr : return address - // -- x3 : target map, scratch for subsequent call - // -- x4 : scratch (elements) - // ----------------------------------- - Register value = x0; - Register key = x1; - Register receiver = x2; - Register target_map = x3; + Register elements = x4; + Register array_size = x6; + Register array = x7; + Register length = x5; + + // Verify input registers don't conflict with locals. + DCHECK(!AreAliased(receiver, key, value, target_map, + elements, array_size, array, length)); if (mode == TRACK_ALLOCATION_SITE) { __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail); @@ -279,7 +263,7 @@ // Check for empty arrays, which only require a map transition and no changes // to the backing store. Label only_change_map; - Register elements = x4; + __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map); @@ -287,20 +271,16 @@ // TODO(all): These registers may not need to be pushed. Examine // RecordWriteStub and check whether it's needed. __ Push(target_map, receiver, key, value); - Register length = x5; __ Ldrsw(length, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset)); - // Allocate new FixedArray. - Register array_size = x6; - Register array = x7; Label gc_required; __ Mov(array_size, FixedDoubleArray::kHeaderSize); __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2)); __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS); // Set destination FixedDoubleArray's length and map. - Register map_root = x6; + Register map_root = array_size; __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex); __ SmiTag(x11, length); __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset)); @@ -338,9 +318,11 @@ __ B(eq, &convert_hole); // Non-hole double, copy value into a heap number. - Register heap_num = x5; - __ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map); - __ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset)); + Register heap_num = length; + Register scratch = array_size; + Register scratch2 = elements; + __ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2, + x13, heap_num_map); __ Mov(x13, dst_elements); __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex)); __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs, @@ -373,14 +355,41 @@ } -bool Code::IsYoungSequence(byte* sequence) { - return MacroAssembler::IsYoungSequence(sequence); +CodeAgingHelper::CodeAgingHelper() { + DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); + // The sequence of instructions that is patched out for aging code is the + // following boilerplate stack-building prologue that is found both in + // FUNCTION and OPTIMIZED_FUNCTION code: + PatchingAssembler patcher(young_sequence_.start(), + young_sequence_.length() / kInstructionSize); + // The young sequence is the frame setup code for FUNCTION code types. It is + // generated by FullCodeGenerator::Generate. + MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher); + +#ifdef DEBUG + const int length = kCodeAgeStubEntryOffset / kInstructionSize; + DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset); + PatchingAssembler patcher_old(old_sequence_.start(), length); + MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL); +#endif +} + + +#ifdef DEBUG +bool CodeAgingHelper::IsOld(byte* candidate) const { + return memcmp(candidate, old_sequence_.start(), kCodeAgeStubEntryOffset) == 0; +} +#endif + + +bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { + return MacroAssembler::IsYoungSequence(isolate, sequence); } -void Code::GetCodeAgeAndParity(byte* sequence, Age* age, +void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, MarkingParity* parity) { - if (IsYoungSequence(sequence)) { + if (IsYoungSequence(isolate, sequence)) { *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { @@ -395,7 +404,8 @@ byte* sequence, Code::Age age, MarkingParity parity) { - PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize); + PatchingAssembler patcher(sequence, + kNoCodeAgeSequenceLength / kInstructionSize); if (age == kNoAgeCodeAge) { MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher); } else { @@ -410,7 +420,7 @@ Register index, Register result, Label* call_runtime) { - ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits()); + DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits()); // Fetch the instance type of the receiver into result register. __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset)); __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); @@ -468,7 +478,7 @@ __ Assert(eq, kExternalStringExpectedButNotFound); } // Rule out short external strings. - STATIC_CHECK(kShortExternalStringTag != 0); + STATIC_ASSERT(kShortExternalStringTag != 0); // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime // can be bound far away in deferred code. __ Tst(result, kShortExternalStringMask); @@ -506,10 +516,11 @@ // instead of fmul and fsub. Doing this changes the result, but since this is // an estimation anyway, does it matter? - ASSERT(!AreAliased(input, result, + DCHECK(!AreAliased(input, result, double_temp1, double_temp2, temp1, temp2, temp3)); - ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(!masm->serializer_enabled()); // External references not serializable. Label done; DoubleRegister double_temp3 = result; @@ -529,7 +540,7 @@ Label result_is_finite_non_zero; // Assert that we can load offset 0 (the small input threshold) and offset 1 // (the large input threshold) with a single ldp. - ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() - + DCHECK(kDRegSize == (ExpConstant(constants, 1).offset() - ExpConstant(constants, 0).offset())); __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0)); @@ -559,7 +570,7 @@ __ Bind(&result_is_finite_non_zero); // Assert that we can load offset 3 and offset 4 with a single ldp. - ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() - + DCHECK(kDRegSize == (ExpConstant(constants, 4).offset() - ExpConstant(constants, 3).offset())); __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3)); __ Fmadd(double_temp1, double_temp1, input, double_temp3); @@ -567,7 +578,7 @@ __ Fsub(double_temp1, double_temp1, double_temp3); // Assert that we can load offset 5 and offset 6 with a single ldp. - ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() - + DCHECK(kDRegSize == (ExpConstant(constants, 6).offset() - ExpConstant(constants, 5).offset())); __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5)); // TODO(jbramley): Consider using Fnmsub here. diff -Nru nodejs-0.11.13/deps/v8/src/arm64/codegen-arm64.h nodejs-0.11.15/deps/v8/src/arm64/codegen-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/codegen-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/codegen-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_CODEGEN_ARM64_H_ #define V8_ARM64_CODEGEN_ARM64_H_ -#include "ast.h" -#include "ic-inl.h" +#include "src/ast.h" +#include "src/ic-inl.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/arm64/code-stubs-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/code-stubs-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/code-stubs-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/code-stubs-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,530 +1,389 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "bootstrapper.h" -#include "code-stubs.h" -#include "regexp-macro-assembler.h" -#include "stub-cache.h" +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/regexp-macro-assembler.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { - void FastNewClosureStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x2: function info - static Register registers[] = { x2 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry; + Register registers[] = { cp, x2 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry); } void FastNewContextStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x1: function - static Register registers[] = { x1 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { cp, x1 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void ToNumberStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x0: value - static Register registers[] = { x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { cp, x0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void NumberToStringStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x0: value - static Register registers[] = { x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry; + Register registers[] = { cp, x0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry); } void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x3: array literals array // x2: array literal index // x1: constant elements - static Register registers[] = { x3, x2, x1 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId( - Runtime::kHiddenCreateArrayLiteralStubBailout)->entry; + Register registers[] = { cp, x3, x2, x1 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Smi(), + Representation::Tagged() }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry, + representations); } void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x3: object literals array // x2: object literal index // x1: constant properties // x0: object literal flags - static Register registers[] = { x3, x2, x1, x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry; + Register registers[] = { cp, x3, x2, x1, x0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry); } void CreateAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x2: feedback vector // x3: call feedback slot - static Register registers[] = { x2, x3 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { cp, x2, x3 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } -void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void CallFunctionStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - // x1: receiver - // x0: key - static Register registers[] = { x1, x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); + // x1 function the function to call + Register registers[] = {cp, x1}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } -void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void CallConstructStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - // x1: receiver - // x0: key - static Register registers[] = { x1, x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); + // x0 : number of arguments + // x1 : the function to call + // x2 : feedback vector + // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol) + // TODO(turbofan): So far we don't gather type feedback and hence skip the + // slot parameter, but ArrayConstructStub needs the vector to be undefined. + Register registers[] = {cp, x0, x1, x2}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void RegExpConstructResultStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x2: length // x1: index (of last match) // x0: string - static Register registers[] = { x2, x1, x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry; -} - - -void LoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - // x0: receiver - static Register registers[] = { x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedLoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - // x1: receiver - static Register registers[] = { x1 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void StringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { x0, x2 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedStringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { x1, x0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - // x2: receiver - // x1: key - // x0: value - static Register registers[] = { x2, x1, x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); + Register registers[] = { cp, x2, x1, x0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry); } void TransitionElementsKindStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x0: value (js_array) // x1: to_map - static Register registers[] = { x0, x1 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; + Register registers[] = { cp, x0, x1 }; Address entry = Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; - descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(entry)); } void CompareNilICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x0: value to compare - static Register registers[] = { x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(CompareNilIC_Miss); + Register registers[] = { cp, x0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(CompareNilIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate())); } +const Register InterfaceDescriptor::ContextRegister() { return cp; } + + static void InitializeArrayConstructorDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor, + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { + // cp: context // x1: function // x2: allocation site with elements kind // x0: number of arguments to the constructor function - static Register registers_variable_args[] = { x1, x2, x0 }; - static Register registers_no_args[] = { x1, x2 }; + Address deopt_handler = Runtime::FunctionForId( + Runtime::kArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { - descriptor->register_param_count_ = - sizeof(registers_no_args) / sizeof(registers_no_args[0]); - descriptor->register_params_ = registers_no_args; + Register registers[] = { cp, x1, x2 }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); } else { // stack param count needs (constructor pointer, and single argument) - descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; - descriptor->stack_parameter_count_ = x0; - descriptor->register_param_count_ = - sizeof(registers_variable_args) / sizeof(registers_variable_args[0]); - descriptor->register_params_ = registers_variable_args; + Register registers[] = { cp, x1, x2, x0 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); } - - descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; - descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry; } void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, -1); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1); } static void InitializeInternalArrayConstructorDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor, + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { + // cp: context // x1: constructor function // x0: number of arguments to the constructor function - static Register registers_variable_args[] = { x1, x0 }; - static Register registers_no_args[] = { x1 }; + Address deopt_handler = Runtime::FunctionForId( + Runtime::kInternalArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { - descriptor->register_param_count_ = - sizeof(registers_no_args) / sizeof(registers_no_args[0]); - descriptor->register_params_ = registers_no_args; + Register registers[] = { cp, x1 }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); } else { // stack param count needs (constructor pointer, and single argument) - descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; - descriptor->stack_parameter_count_ = x0; - descriptor->register_param_count_ = - sizeof(registers_variable_args) / sizeof(registers_variable_args[0]); - descriptor->register_params_ = registers_variable_args; + Register registers[] = { cp, x1, x0 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); } - - descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; - descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry; } void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0); } void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1); } void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1); } void ToBooleanStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x0: value - static Register registers[] = { x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss); + Register registers[] = { cp, x0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(ToBooleanIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); -} - - -void StoreGlobalStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - // x1: receiver - // x2: key (unused) - // x0: value - static Register registers[] = { x1, x2, x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(StoreIC_MissFromStubFailure); -} - - -void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - // x0: value - // x3: target map - // x1: key - // x2: receiver - static Register registers[] = { x0, x3, x1, x2 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); + ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate())); } void BinaryOpICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x1: left operand // x0: right operand - static Register registers[] = { x1, x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); + Register registers[] = { cp, x1, x0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate())); } void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x2: allocation site // x1: left operand // x0: right operand - static Register registers[] = { x2, x1, x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite); + Register registers[] = { cp, x2, x1, x0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite)); } void StringAddStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + // cp: context // x1: left operand // x0: right operand - static Register registers[] = { x1, x0 }; - descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry; + Register registers[] = { cp, x1, x0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kStringAdd)->entry); } void CallDescriptors::InitializeForIsolate(Isolate* isolate) { - static PlatformCallInterfaceDescriptor default_descriptor = - PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); + static PlatformInterfaceDescriptor default_descriptor = + PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); - static PlatformCallInterfaceDescriptor noInlineDescriptor = - PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); + static PlatformInterfaceDescriptor noInlineDescriptor = + PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::ArgumentAdaptorCall); - static Register registers[] = { x1, // JSFunction - cp, // context - x0, // actual number of arguments - x2, // expected number of arguments + Register registers[] = { cp, // context + x1, // JSFunction + x0, // actual number of arguments + x2, // expected number of arguments }; - static Representation representations[] = { - Representation::Tagged(), // JSFunction + Representation representations[] = { Representation::Tagged(), // context + Representation::Tagged(), // JSFunction Representation::Integer32(), // actual number of arguments Representation::Integer32(), // expected number of arguments }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; - descriptor->platform_specific_descriptor_ = &default_descriptor; + descriptor->Initialize(ARRAY_SIZE(registers), registers, + representations, &default_descriptor); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::KeyedCall); - static Register registers[] = { cp, // context - x2, // key + Register registers[] = { cp, // context + x2, // key }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // key }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; - descriptor->platform_specific_descriptor_ = &noInlineDescriptor; + descriptor->Initialize(ARRAY_SIZE(registers), registers, + representations, &noInlineDescriptor); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::NamedCall); - static Register registers[] = { cp, // context - x2, // name + Register registers[] = { cp, // context + x2, // name }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // name }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; - descriptor->platform_specific_descriptor_ = &noInlineDescriptor; + descriptor->Initialize(ARRAY_SIZE(registers), registers, + representations, &noInlineDescriptor); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::CallHandler); - static Register registers[] = { cp, // context - x0, // receiver + Register registers[] = { cp, // context + x0, // receiver }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // receiver }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; - descriptor->platform_specific_descriptor_ = &default_descriptor; + descriptor->Initialize(ARRAY_SIZE(registers), registers, + representations, &default_descriptor); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::ApiFunctionCall); - static Register registers[] = { x0, // callee - x4, // call_data - x2, // holder - x1, // api_function_address - cp, // context + Register registers[] = { cp, // context + x0, // callee + x4, // call_data + x2, // holder + x1, // api_function_address }; - static Representation representations[] = { + Representation representations[] = { + Representation::Tagged(), // context Representation::Tagged(), // callee Representation::Tagged(), // call_data Representation::Tagged(), // holder Representation::External(), // api_function_address - Representation::Tagged(), // context }; - descriptor->register_param_count_ = 5; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; - descriptor->platform_specific_descriptor_ = &default_descriptor; + descriptor->Initialize(ARRAY_SIZE(registers), registers, + representations, &default_descriptor); } } @@ -534,26 +393,25 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { // Update the static counter each time a new code stub is generated. - Isolate* isolate = masm->isolate(); - isolate->counters()->code_stubs()->Increment(); + isolate()->counters()->code_stubs()->Increment(); - CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); - int param_count = descriptor->register_param_count_; + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); + int param_count = descriptor->GetEnvironmentParameterCount(); { // Call the runtime system in a fresh internal frame. FrameScope scope(masm, StackFrame::INTERNAL); - ASSERT((descriptor->register_param_count_ == 0) || - x0.Is(descriptor->register_params_[param_count - 1])); + DCHECK((param_count == 0) || + x0.Is(descriptor->GetEnvironmentParameterRegister(param_count - 1))); // Push arguments MacroAssembler::PushPopQueue queue(masm); for (int i = 0; i < param_count; ++i) { - queue.Queue(descriptor->register_params_[i]); + queue.Queue(descriptor->GetEnvironmentParameterRegister(i)); } queue.PushQueued(); ExternalReference miss = descriptor->miss_handler(); - __ CallExternalReference(miss, descriptor->register_param_count_); + __ CallExternalReference(miss, param_count); } __ Ret(); @@ -564,10 +422,10 @@ Label done; Register input = source(); Register result = destination(); - ASSERT(is_truncating()); + DCHECK(is_truncating()); - ASSERT(result.Is64Bits()); - ASSERT(jssp.Is(masm->StackPointer())); + DCHECK(result.Is64Bits()); + DCHECK(jssp.Is(masm->StackPointer())); int double_offset = offset(); @@ -647,7 +505,7 @@ FPRegister double_scratch, Label* slow, Condition cond) { - ASSERT(!AreAliased(left, right, scratch)); + DCHECK(!AreAliased(left, right, scratch)); Label not_identical, return_equal, heap_number; Register result = x0; @@ -702,7 +560,7 @@ // it is handled in the parser (see Parser::ParseBinaryExpression). We are // only concerned with cases ge, le and eq here. if ((cond != lt) && (cond != gt)) { - ASSERT((cond == ge) || (cond == le) || (cond == eq)); + DCHECK((cond == ge) || (cond == le) || (cond == eq)); __ Bind(&heap_number); // Left and right are identical pointers to a heap number object. Return // non-equal if the heap number is a NaN, and equal otherwise. Comparing @@ -735,7 +593,7 @@ Register left_type, Register right_type, Register scratch) { - ASSERT(!AreAliased(left, right, left_type, right_type, scratch)); + DCHECK(!AreAliased(left, right, left_type, right_type, scratch)); if (masm->emit_debug_code()) { // We assume that the arguments are not identical. @@ -753,7 +611,7 @@ __ B(lt, &right_non_object); // Return non-zero - x0 already contains a non-zero pointer. - ASSERT(left.is(x0) || right.is(x0)); + DCHECK(left.is(x0) || right.is(x0)); Label return_not_equal; __ Bind(&return_not_equal); __ Ret(); @@ -791,9 +649,9 @@ Register scratch, Label* slow, bool strict) { - ASSERT(!AreAliased(left, right, scratch)); - ASSERT(!AreAliased(left_d, right_d)); - ASSERT((left.is(x0) && right.is(x1)) || + DCHECK(!AreAliased(left, right, scratch)); + DCHECK(!AreAliased(left_d, right_d)); + DCHECK((left.is(x0) && right.is(x1)) || (right.is(x0) && left.is(x1))); Register result = x0; @@ -866,7 +724,7 @@ Register right_type, Label* possible_strings, Label* not_both_strings) { - ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type)); + DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type)); Register result = x0; Label object_test; @@ -986,7 +844,7 @@ // Left and/or right is a NaN. Load the result register with whatever makes // the comparison fail, since comparisons with NaN always fail (except ne, // which is filtered out at a higher level.) - ASSERT(cond != ne); + DCHECK(cond != ne); if ((cond == lt) || (cond == le)) { __ Mov(result, GREATER); } else { @@ -1049,8 +907,7 @@ __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14, x15, &slow); - Isolate* isolate = masm->isolate(); - __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10, + __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10, x11); if (cond == eq) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs, @@ -1078,7 +935,7 @@ if ((cond == lt) || (cond == le)) { ncr = GREATER; } else { - ASSERT((cond == gt) || (cond == ge)); // remaining cases + DCHECK((cond == gt) || (cond == ge)); // remaining cases ncr = LESS; } __ Mov(x10, Smi::FromInt(ncr)); @@ -1095,30 +952,29 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { - // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9, - // ip0 and ip1 are corrupted by the call into C. CPURegList saved_regs = kCallerSaved; - saved_regs.Remove(ip0); - saved_regs.Remove(ip1); - saved_regs.Remove(x8); - saved_regs.Remove(x9); + CPURegList saved_fp_regs = kCallerSavedFP; // We don't allow a GC during a store buffer overflow so there is no need to // store the registers in any particular way, but we do have to store and // restore them. + + // We don't care if MacroAssembler scratch registers are corrupted. + saved_regs.Remove(*(masm->TmpList())); + saved_fp_regs.Remove(*(masm->FPTmpList())); + __ PushCPURegList(saved_regs); if (save_doubles_ == kSaveFPRegs) { - __ PushCPURegList(kCallerSavedFP); + __ PushCPURegList(saved_fp_regs); } AllowExternalCallThatCantCauseGC scope(masm); - __ Mov(x0, ExternalReference::isolate_address(masm->isolate())); + __ Mov(x0, ExternalReference::isolate_address(isolate())); __ CallCFunction( - ExternalReference::store_buffer_overflow_function(masm->isolate()), - 1, 0); + ExternalReference::store_buffer_overflow_function(isolate()), 1, 0); if (save_doubles_ == kSaveFPRegs) { - __ PopCPURegList(kCallerSavedFP); + __ PopCPURegList(saved_fp_regs); } __ PopCPURegList(saved_regs); __ Ret(); @@ -1127,10 +983,10 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { - StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode(isolate); - StoreBufferOverflowStub stub2(kSaveFPRegs); - stub2.GetCode(isolate); + StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs); + stub1.GetCode(); + StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); + stub2.GetCode(); } @@ -1143,11 +999,7 @@ // Restore lr with the value it had before the call to this stub (the value // which must be pushed). __ Mov(lr, saved_lr); - if (save_doubles_ == kSaveFPRegs) { - __ PushSafepointRegistersAndDoubles(); - } else { - __ PushSafepointRegisters(); - } + __ PushSafepointRegisters(); __ Ret(return_address); } @@ -1158,11 +1010,7 @@ Register return_address = temps.AcquireX(); // Preserve the return address (lr will be clobbered by the pop). __ Mov(return_address, lr); - if (save_doubles_ == kSaveFPRegs) { - __ PopSafepointRegistersAndDoubles(); - } else { - __ PopSafepointRegisters(); - } + __ PopSafepointRegisters(); __ Ret(return_address); } @@ -1230,8 +1078,8 @@ if (exponent_type_ != INTEGER) { // Detect integer exponents stored as doubles and handle those in the // integer fast-path. - __ TryConvertDoubleToInt64(exponent_integer, exponent_double, - scratch0_double, &exponent_is_integer); + __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double, + scratch0_double, &exponent_is_integer); if (exponent_type_ == ON_STACK) { FPRegister half_double = d3; @@ -1314,7 +1162,7 @@ AllowExternalCallThatCantCauseGC scope(masm); __ Mov(saved_lr, lr); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), + ExternalReference::power_double_double_function(isolate()), 0, 2); __ Mov(lr, saved_lr); __ B(&done); @@ -1389,16 +1237,15 @@ __ Bind(&call_runtime); // Put the arguments back on the stack. __ Push(base_tagged, exponent_tagged); - __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); + __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); // Return. __ Bind(&done); - __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1); - __ Str(result_double, - FieldMemOperand(result_tagged, HeapNumber::kValueOffset)); - ASSERT(result_tagged.is(x0)); + __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1, + result_double); + DCHECK(result_tagged.is(x0)); __ IncrementCounter( - masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1); + isolate()->counters()->math_pow(), 1, scratch0, scratch1); __ Ret(); } else { AllowExternalCallThatCantCauseGC scope(masm); @@ -1406,12 +1253,12 @@ __ Fmov(base_double, base_double_copy); __ Scvtf(exponent_double, exponent_integer); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), + ExternalReference::power_double_double_function(isolate()), 0, 2); __ Mov(lr, saved_lr); __ Bind(&done); __ IncrementCounter( - masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1); + isolate()->counters()->math_pow(), 1, scratch0, scratch1); __ Ret(); } } @@ -1435,18 +1282,14 @@ void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) { - StoreRegistersStateStub stub1(kDontSaveFPRegs); - stub1.GetCode(isolate); - StoreRegistersStateStub stub2(kSaveFPRegs); - stub2.GetCode(isolate); + StoreRegistersStateStub stub(isolate); + stub.GetCode(); } void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) { - RestoreRegistersStateStub stub1(kDontSaveFPRegs); - stub1.GetCode(isolate); - RestoreRegistersStateStub stub2(kSaveFPRegs); - stub2.GetCode(isolate); + RestoreRegistersStateStub stub(isolate); + stub.GetCode(); } @@ -1471,22 +1314,85 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { - CEntryStub stub(1, kDontSaveFPRegs); - stub.GetCode(isolate); - CEntryStub stub_fp(1, kSaveFPRegs); - stub_fp.GetCode(isolate); + CEntryStub stub(isolate, 1, kDontSaveFPRegs); + stub.GetCode(); + CEntryStub stub_fp(isolate, 1, kSaveFPRegs); + stub_fp.GetCode(); } -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal, - Label* throw_termination, - bool do_gc, - bool always_allocate) { - // x0 : Result parameter for PerformGC, if do_gc is true. +void CEntryStub::Generate(MacroAssembler* masm) { + // The Abort mechanism relies on CallRuntime, which in turn relies on + // CEntryStub, so until this stub has been generated, we have to use a + // fall-back Abort mechanism. + // + // Note that this stub must be generated before any use of Abort. + MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); + + ASM_LOCATION("CEntryStub::Generate entry"); + ProfileEntryHookStub::MaybeCallEntryHook(masm); + + // Register parameters: + // x0: argc (including receiver, untagged) + // x1: target + // + // The stack on entry holds the arguments and the receiver, with the receiver + // at the highest address: + // + // jssp]argc-1]: receiver + // jssp[argc-2]: arg[argc-2] + // ... ... + // jssp[1]: arg[1] + // jssp[0]: arg[0] + // + // The arguments are in reverse order, so that arg[argc-2] is actually the + // first argument to the target function and arg[0] is the last. + DCHECK(jssp.Is(__ StackPointer())); + const Register& argc_input = x0; + const Register& target_input = x1; + + // Calculate argv, argc and the target address, and store them in + // callee-saved registers so we can retry the call without having to reload + // these arguments. + // TODO(jbramley): If the first call attempt succeeds in the common case (as + // it should), then we might be better off putting these parameters directly + // into their argument registers, rather than using callee-saved registers and + // preserving them on the stack. + const Register& argv = x21; + const Register& argc = x22; + const Register& target = x23; + + // Derive argv from the stack pointer so that it points to the first argument + // (arg[argc-2]), or just below the receiver in case there are no arguments. + // - Adjust for the arg[] array. + Register temp_argv = x11; + __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2)); + // - Adjust for the receiver. + __ Sub(temp_argv, temp_argv, 1 * kPointerSize); + + // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved + // registers. + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(save_doubles_, x10, 3); + DCHECK(csp.Is(__ StackPointer())); + + // Poke callee-saved registers into reserved space. + __ Poke(argv, 1 * kPointerSize); + __ Poke(argc, 2 * kPointerSize); + __ Poke(target, 3 * kPointerSize); + + // We normally only keep tagged values in callee-saved registers, as they + // could be pushed onto the stack by called stubs and functions, and on the + // stack they can confuse the GC. However, we're only calling C functions + // which can push arbitrary data onto the stack anyway, and so the GC won't + // examine that part of the stack. + __ Mov(argc, argc_input); + __ Mov(target, target_input); + __ Mov(argv, temp_argv); + // x21 : argv // x22 : argc - // x23 : target + // x23 : call target // // The stack (on entry) holds the arguments and the receiver, with the // receiver at the highest address: @@ -1516,44 +1422,19 @@ // // After an unsuccessful call, the exit frame and suchlike are left // untouched, and the stub either throws an exception by jumping to one of - // the provided throw_ labels, or it falls through. The failure details are - // passed through in x0. - ASSERT(csp.Is(__ StackPointer())); + // the exception_returned label. - Isolate* isolate = masm->isolate(); - - const Register& argv = x21; - const Register& argc = x22; - const Register& target = x23; - - if (do_gc) { - // Call Runtime::PerformGC, passing x0 (the result parameter for - // PerformGC) and x1 (the isolate). - __ Mov(x1, ExternalReference::isolate_address(masm->isolate())); - __ CallCFunction( - ExternalReference::perform_gc_function(isolate), 2, 0); - } - - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(isolate); - if (always_allocate) { - __ Mov(x10, Operand(scope_depth)); - __ Ldr(x11, MemOperand(x10)); - __ Add(x11, x11, 1); - __ Str(x11, MemOperand(x10)); - } + DCHECK(csp.Is(__ StackPointer())); // Prepare AAPCS64 arguments to pass to the builtin. __ Mov(x0, argc); __ Mov(x1, argv); - __ Mov(x2, ExternalReference::isolate_address(isolate)); + __ Mov(x2, ExternalReference::isolate_address(isolate())); - // Store the return address on the stack, in the space previously allocated - // by EnterExitFrame. The return address is queried by - // ExitFrame::GetStateForFramePointer. Label return_location; __ Adr(x12, &return_location); __ Poke(x12, 0); + if (__ emit_debug_code()) { // Verify that the slot below fp[kSPOffset]-8 points to the return location // (currently in x12). @@ -1568,27 +1449,17 @@ // Call the builtin. __ Blr(target); __ Bind(&return_location); - const Register& result = x0; - - if (always_allocate) { - __ Mov(x10, Operand(scope_depth)); - __ Ldr(x11, MemOperand(x10)); - __ Sub(x11, x11, 1); - __ Str(x11, MemOperand(x10)); - } // x0 result The return code from the call. // x21 argv // x22 argc // x23 target - // - // If all of the result bits matching kFailureTagMask are '1', the result is - // a failure. Otherwise, it's an ordinary tagged object and the call was a - // success. - Label failure; - __ And(x10, result, kFailureTagMask); - __ Cmp(x10, kFailureTagMask); - __ B(&failure, eq); + const Register& result = x0; + + // Check result for exception sentinel. + Label exception_returned; + __ CompareRoot(result, Heap::kExceptionRootIndex); + __ B(eq, &exception_returned); // The call succeeded, so unwind the stack and return. @@ -1600,44 +1471,33 @@ __ Peek(target, 3 * kPointerSize); __ LeaveExitFrame(save_doubles_, x10, true); - ASSERT(jssp.Is(__ StackPointer())); + DCHECK(jssp.Is(__ StackPointer())); // Pop or drop the remaining stack slots and return from the stub. // jssp[24]: Arguments array (of size argc), including receiver. // jssp[16]: Preserved x23 (used for target). // jssp[8]: Preserved x22 (used for argc). // jssp[0]: Preserved x21 (used for argv). __ Drop(x11); + __ AssertFPCRState(); __ Ret(); // The stack pointer is still csp if we aren't returning, and the frame // hasn't changed (except for the return address). __ SetStackPointer(csp); - __ Bind(&failure); - // The call failed, so check if we need to throw an exception, and fall - // through (to retry) otherwise. - - Label retry; - // x0 result The return code from the call, including the failure - // code and details. - // x21 argv - // x22 argc - // x23 target - // Refer to the Failure class for details of the bit layout. - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ Tst(result, kFailureTypeTagMask << kFailureTagSize); - __ B(eq, &retry); // RETRY_AFTER_GC + // Handling of exception. + __ Bind(&exception_returned); // Retrieve the pending exception. + ExternalReference pending_exception_address( + Isolate::kPendingExceptionAddress, isolate()); const Register& exception = result; const Register& exception_address = x11; - __ Mov(exception_address, - Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + __ Mov(exception_address, Operand(pending_exception_address)); __ Ldr(exception, MemOperand(exception_address)); // Clear the pending exception. - __ Mov(x10, Operand(isolate->factory()->the_hole_value())); + __ Mov(x10, Operand(isolate()->factory()->the_hole_value())); __ Str(x10, MemOperand(exception_address)); // x0 exception The exception descriptor. @@ -1647,118 +1507,9 @@ // Special handling of termination exceptions, which are uncatchable by // JavaScript code. - __ Cmp(exception, Operand(isolate->factory()->termination_exception())); - __ B(eq, throw_termination); - - // Handle normal exception. - __ B(throw_normal); - - __ Bind(&retry); - // The result (x0) is passed through as the next PerformGC parameter. -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // The Abort mechanism relies on CallRuntime, which in turn relies on - // CEntryStub, so until this stub has been generated, we have to use a - // fall-back Abort mechanism. - // - // Note that this stub must be generated before any use of Abort. - MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); - - ASM_LOCATION("CEntryStub::Generate entry"); - ProfileEntryHookStub::MaybeCallEntryHook(masm); - - // Register parameters: - // x0: argc (including receiver, untagged) - // x1: target - // - // The stack on entry holds the arguments and the receiver, with the receiver - // at the highest address: - // - // jssp]argc-1]: receiver - // jssp[argc-2]: arg[argc-2] - // ... ... - // jssp[1]: arg[1] - // jssp[0]: arg[0] - // - // The arguments are in reverse order, so that arg[argc-2] is actually the - // first argument to the target function and arg[0] is the last. - ASSERT(jssp.Is(__ StackPointer())); - const Register& argc_input = x0; - const Register& target_input = x1; - - // Calculate argv, argc and the target address, and store them in - // callee-saved registers so we can retry the call without having to reload - // these arguments. - // TODO(jbramley): If the first call attempt succeeds in the common case (as - // it should), then we might be better off putting these parameters directly - // into their argument registers, rather than using callee-saved registers and - // preserving them on the stack. - const Register& argv = x21; - const Register& argc = x22; - const Register& target = x23; - - // Derive argv from the stack pointer so that it points to the first argument - // (arg[argc-2]), or just below the receiver in case there are no arguments. - // - Adjust for the arg[] array. - Register temp_argv = x11; - __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2)); - // - Adjust for the receiver. - __ Sub(temp_argv, temp_argv, 1 * kPointerSize); - - // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved - // registers. - FrameScope scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(save_doubles_, x10, 3); - ASSERT(csp.Is(__ StackPointer())); - - // Poke callee-saved registers into reserved space. - __ Poke(argv, 1 * kPointerSize); - __ Poke(argc, 2 * kPointerSize); - __ Poke(target, 3 * kPointerSize); - - // We normally only keep tagged values in callee-saved registers, as they - // could be pushed onto the stack by called stubs and functions, and on the - // stack they can confuse the GC. However, we're only calling C functions - // which can push arbitrary data onto the stack anyway, and so the GC won't - // examine that part of the stack. - __ Mov(argc, argc_input); - __ Mov(target, target_input); - __ Mov(argv, temp_argv); - - Label throw_normal; - Label throw_termination; - - // Call the runtime function. - GenerateCore(masm, - &throw_normal, - &throw_termination, - false, - false); - - // If successful, the previous GenerateCore will have returned to the - // calling code. Otherwise, we fall through into the following. - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal, - &throw_termination, - true, - false); - - // Do full GC and retry runtime call one final time. - __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError())); - GenerateCore(masm, - &throw_normal, - &throw_termination, - true, - true); - - { FrameScope scope(masm, StackFrame::MANUAL); - __ CallCFunction( - ExternalReference::out_of_memory_function(masm->isolate()), 0); - } + Label throw_termination_exception; + __ Cmp(exception, Operand(isolate()->factory()->termination_exception())); + __ B(eq, &throw_termination_exception); // We didn't execute a return case, so the stack frame hasn't been updated // (except for the return address slot). However, we don't need to initialize @@ -1766,24 +1517,18 @@ // unwinds the stack. __ SetStackPointer(jssp); - // Throw exceptions. - // If we throw an exception, we can end up re-entering CEntryStub before we - // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values - // here. - - __ Bind(&throw_termination); - ASM_LOCATION("Throw termination"); + ASM_LOCATION("Throw normal"); __ Mov(argv, 0); __ Mov(argc, 0); __ Mov(target, 0); - __ ThrowUncatchable(x0, x10, x11, x12, x13); + __ Throw(x0, x10, x11, x12, x13); - __ Bind(&throw_normal); - ASM_LOCATION("Throw normal"); + __ Bind(&throw_termination_exception); + ASM_LOCATION("Throw termination"); __ Mov(argv, 0); __ Mov(argc, 0); __ Mov(target, 0); - __ Throw(x0, x10, x11, x12, x13); + __ ThrowUncatchable(x0, x10, x11, x12, x13); } @@ -1798,7 +1543,7 @@ // Output: // x0: result. void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { - ASSERT(jssp.Is(__ StackPointer())); + DCHECK(jssp.Is(__ StackPointer())); Register code_entry = x0; // Enable instruction instrumentation. This only works on the simulator, and @@ -1817,20 +1562,22 @@ __ Mov(jssp, csp); __ SetStackPointer(jssp); + // Configure the FPCR. We don't restore it, so this is technically not allowed + // according to AAPCS64. However, we only set default-NaN mode and this will + // be harmless for most C code. Also, it works for ARM. + __ ConfigureFPCR(); + ProfileEntryHookStub::MaybeCallEntryHook(masm); // Set up the reserved register for 0.0. __ Fmov(fp_zero, 0.0); // Build an entry frame (see layout below). - Isolate* isolate = masm->isolate(); - - // Build an entry frame. int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used. __ Mov(x13, bad_frame_pointer); __ Mov(x12, Smi::FromInt(marker)); - __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate)); + __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate())); __ Ldr(x10, MemOperand(x11)); __ Push(x13, xzr, x12, x10); @@ -1840,7 +1587,7 @@ // Push the JS entry frame marker. Also set js_entry_sp if this is the // outermost JS call. Label non_outermost_js, done; - ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); + ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); __ Mov(x10, ExternalReference(js_entry_sp)); __ Ldr(x11, MemOperand(x10)); __ Cbnz(x11, &non_outermost_js); @@ -1850,7 +1597,7 @@ __ B(&done); __ Bind(&non_outermost_js); // We spare one instruction by pushing xzr since the marker is 0. - ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL); + DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL); __ Push(xzr); __ Bind(&done); @@ -1880,10 +1627,10 @@ // fp will be invalid because the PushTryHandler below sets it to 0 to // signal the existence of the JSEntry frame. __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + isolate()))); } __ Str(code_entry, MemOperand(x10)); - __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception()))); + __ LoadRoot(x0, Heap::kExceptionRootIndex); __ B(&exit); // Invoke: Link this frame into the handler chain. There's only one @@ -1896,9 +1643,9 @@ // saved values before returning a failure to C. // Clear any pending exceptions. - __ Mov(x10, Operand(isolate->factory()->the_hole_value())); + __ Mov(x10, Operand(isolate()->factory()->the_hole_value())); __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + isolate()))); __ Str(x10, MemOperand(x11)); // Invoke the function by calling through the JS entry trampoline builtin. @@ -1913,7 +1660,7 @@ // x4: argv. ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline : Builtins::kJSEntryTrampoline, - isolate); + isolate()); __ Mov(x10, entry); // Call the JSEntryTrampoline. @@ -1946,13 +1693,13 @@ // Restore the top frame descriptors from the stack. __ Pop(x10); - __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate)); + __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate())); __ Str(x10, MemOperand(x11)); // Reset the stack to the callee saved registers. __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes); // Restore the callee-saved registers and return. - ASSERT(jssp.Is(__ StackPointer())); + DCHECK(jssp.Is(__ StackPointer())); __ Mov(csp, jssp); __ SetStackPointer(csp); __ PopCalleeSavedRegisters(); @@ -1964,33 +1711,14 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) { Label miss; - Register receiver; - if (kind() == Code::KEYED_LOAD_IC) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- x1 : receiver - // -- x0 : key - // ----------------------------------- - Register key = x0; - receiver = x1; - __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string())); - __ B(ne, &miss); - } else { - ASSERT(kind() == Code::LOAD_IC); - // ----------- S t a t e ------------- - // -- lr : return address - // -- x2 : name - // -- x0 : receiver - // -- sp[0] : receiver - // ----------------------------------- - receiver = x0; - } + Register receiver = LoadIC::ReceiverRegister(); - StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss); + NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, + x11, &miss); __ Bind(&miss); - StubCompiler::TailCallBuiltin(masm, - BaseLoadStoreStubCompiler::MissBuiltin(kind())); + PropertyAccessCompiler::TailCallBuiltin( + masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); } @@ -2038,7 +1766,7 @@ // If there is a call site cache, don't look in the global cache, but do the // real lookup and update the call site cache. - if (!HasCallSiteInlineCheck()) { + if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) { Label miss; __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss); __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss); @@ -2070,6 +1798,7 @@ } Label return_true, return_result; + Register smi_value = scratch1; { // Loop through the prototype chain looking for the function prototype. Register chain_map = x1; @@ -2080,6 +1809,10 @@ __ LoadRoot(null_value, Heap::kNullValueRootIndex); // Speculatively set a result. __ Mov(result, res_false); + if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) { + // Value to store in the cache cannot be an object. + __ Mov(smi_value, Smi::FromInt(1)); + } __ Bind(&loop); @@ -2102,14 +1835,19 @@ // We cannot fall through to here. __ Bind(&return_true); __ Mov(result, res_true); + if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) { + // Value to store in the cache cannot be an object. + __ Mov(smi_value, Smi::FromInt(0)); + } __ Bind(&return_result); if (HasCallSiteInlineCheck()) { - ASSERT(ReturnTrueFalseObject()); + DCHECK(ReturnTrueFalseObject()); __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult); __ GetRelocatedValueLocation(map_check_site, scratch2); __ Str(result, MemOperand(scratch2)); } else { - __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex); + Register cached_value = ReturnTrueFalseObject() ? smi_value : result; + __ StoreRoot(cached_value, Heap::kInstanceofCacheAnswerRootIndex); } __ Ret(); @@ -2131,7 +1869,7 @@ __ Mov(result, res_false); // Null is not instance of anything. - __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value())); + __ Cmp(object_type, Operand(isolate()->factory()->null_value())); __ B(ne, &object_not_null); __ Ret(); @@ -2236,9 +1974,8 @@ Register caller_fp = x10; __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); // Load and untag the context. - STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4); - __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset + - (kSmiShift / kBitsPerByte))); + __ Ldr(w11, UntagSmiMemOperand(caller_fp, + StandardFrameConstants::kContextOffset)); __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR); __ B(ne, &runtime); @@ -2251,7 +1988,7 @@ __ Poke(x10, 1 * kXRegSize); __ Bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } @@ -2351,41 +2088,42 @@ // Get the arguments boilerplate from the current (global) context. - // x0 alloc_obj pointer to allocated objects (param map, backing - // store, arguments) - // x1 mapped_params number of mapped parameters, min(params, args) - // x2 arg_count number of function arguments - // x3 arg_count_smi number of function arguments (smi) - // x4 function function pointer - // x7 param_count number of function parameters - // x11 args_offset offset to args (or aliased args) boilerplate (uninit) - // x14 recv_arg pointer to receiver arguments + // x0 alloc_obj pointer to allocated objects (param map, backing + // store, arguments) + // x1 mapped_params number of mapped parameters, min(params, args) + // x2 arg_count number of function arguments + // x3 arg_count_smi number of function arguments (smi) + // x4 function function pointer + // x7 param_count number of function parameters + // x11 sloppy_args_map offset to args (or aliased args) map (uninit) + // x14 recv_arg pointer to receiver arguments Register global_object = x10; Register global_ctx = x10; - Register args_offset = x11; - Register aliased_args_offset = x10; + Register sloppy_args_map = x11; + Register aliased_args_map = x10; __ Ldr(global_object, GlobalObjectMemOperand()); __ Ldr(global_ctx, FieldMemOperand(global_object, GlobalObject::kNativeContextOffset)); - __ Ldr(args_offset, - ContextMemOperand(global_ctx, - Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX)); - __ Ldr(aliased_args_offset, - ContextMemOperand(global_ctx, - Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)); + __ Ldr(sloppy_args_map, + ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX)); + __ Ldr(aliased_args_map, + ContextMemOperand(global_ctx, Context::ALIASED_ARGUMENTS_MAP_INDEX)); __ Cmp(mapped_params, 0); - __ CmovX(args_offset, aliased_args_offset, ne); + __ CmovX(sloppy_args_map, aliased_args_map, ne); // Copy the JS object part. - __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13), - JSObject::kHeaderSize / kPointerSize); + __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset)); + __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex); + __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset)); + __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); const int kCalleeOffset = JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize; + __ AssertNotSmi(function); __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset)); // Use the length and set that as an in-object property. @@ -2523,7 +2261,7 @@ // Do the runtime call to allocate the arguments object. __ Bind(&runtime); __ Push(function, recv_arg, arg_count_smi); - __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } @@ -2586,25 +2324,24 @@ // Get the arguments boilerplate from the current (native) context. Register global_object = x10; Register global_ctx = x10; - Register args_offset = x4; + Register strict_args_map = x4; __ Ldr(global_object, GlobalObjectMemOperand()); __ Ldr(global_ctx, FieldMemOperand(global_object, GlobalObject::kNativeContextOffset)); - __ Ldr(args_offset, - ContextMemOperand(global_ctx, - Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)); + __ Ldr(strict_args_map, + ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX)); // x0 alloc_obj pointer to allocated objects: parameter array and // arguments object // x1 param_count_smi number of parameters passed to function (smi) // x2 params pointer to parameters // x3 function function pointer - // x4 args_offset offset to arguments boilerplate + // x4 strict_args_map offset to arguments map // x13 param_count number of parameters passed to function - - // Copy the JS object part. - __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7), - JSObject::kHeaderSize / kPointerSize); + __ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset)); + __ LoadRoot(x5, Heap::kEmptyFixedArrayRootIndex); + __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset)); + __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); // Set the smi-tagged length as an in-object property. STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); @@ -2656,13 +2393,13 @@ // Do the runtime call to allocate the arguments object. __ Bind(&runtime); __ Push(function, params, param_count_smi); - __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); } void RegExpExecStub::Generate(MacroAssembler* masm) { #ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); #else // V8_INTERPRETED_REGEXP // Stack frame on entry. @@ -2732,17 +2469,16 @@ const int kJSRegExpOffset = 7 * kPointerSize; // Ensure that a RegExp stack is allocated. - Isolate* isolate = masm->isolate(); ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address(isolate); + ExternalReference::address_of_regexp_stack_memory_address(isolate()); ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(isolate); + ExternalReference::address_of_regexp_stack_memory_size(isolate()); __ Mov(x10, address_of_regexp_stack_memory_size); __ Ldr(x10, MemOperand(x10)); __ Cbz(x10, &runtime); // Check that the first argument is a JSRegExp object. - ASSERT(jssp.Is(__ StackPointer())); + DCHECK(jssp.Is(__ StackPointer())); __ Peek(jsregexp_object, kJSRegExpOffset); __ JumpIfSmi(jsregexp_object, &runtime); __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime); @@ -2779,7 +2515,7 @@ // Initialize offset for possibly sliced string. __ Mov(sliced_string_offset, 0); - ASSERT(jssp.Is(__ StackPointer())); + DCHECK(jssp.Is(__ StackPointer())); __ Peek(subject, kSubjectOffset); __ JumpIfSmi(subject, &runtime); @@ -2851,8 +2587,8 @@ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); STATIC_ASSERT(kSeqStringTag == 0); // The underlying external string is never a short external string. - STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); - STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); __ TestAndBranchIfAnySet(string_type.X(), kStringRepresentationMask, &external_string); // Go to (7). @@ -2862,7 +2598,7 @@ // Check that the third argument is a positive smi less than the subject // string length. A negative value will be greater (unsigned comparison). - ASSERT(jssp.Is(__ StackPointer())); + DCHECK(jssp.Is(__ StackPointer())); __ Peek(x10, kPreviousIndexOffset); __ JumpIfNotSmi(x10, &runtime); __ Cmp(jsstring_length, x10); @@ -2880,7 +2616,7 @@ // Find the code object based on the assumptions above. // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset // of kPointerSize to reach the latter. - ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize, + DCHECK_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize, JSRegExp::kDataUC16CodeOffset); __ Mov(x10, kPointerSize); // We will need the encoding later: ASCII = 0x04 @@ -2898,13 +2634,13 @@ __ JumpIfSmi(code_object, &runtime); // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, + __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, x10, x11); // Isolates: note we add an additional parameter here (isolate pointer). __ EnterExitFrame(false, x10, 1); - ASSERT(csp.Is(__ StackPointer())); + DCHECK(csp.Is(__ StackPointer())); // We have 9 arguments to pass to the regexp code, therefore we have to pass // one on the stack and the rest as registers. @@ -2914,7 +2650,7 @@ // csp[0]: Space for the return address placed by DirectCEntryStub. // csp[8]: Argument 9, the current isolate address. - __ Mov(x10, ExternalReference::isolate_address(isolate)); + __ Mov(x10, ExternalReference::isolate_address(isolate())); __ Poke(x10, kPointerSize); Register length = w11; @@ -2963,7 +2699,7 @@ __ Add(x3, x2, Operand(w10, UXTW)); // Argument 5 (x4): static offsets vector buffer. - __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate)); + __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate())); // Argument 6 (x5): Set the number of capture registers to zero to force // global regexps to behave as non-global. This stub is not used for global @@ -2982,7 +2718,7 @@ // Locate the code entry and call it. __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag); - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(masm, code_object); __ LeaveExitFrame(false, x10, true); @@ -3008,7 +2744,7 @@ __ Add(number_of_capture_registers, x10, 2); // Check that the fourth object is a JSArray object. - ASSERT(jssp.Is(__ StackPointer())); + DCHECK(jssp.Is(__ StackPointer())); __ Peek(x10, kLastMatchInfoOffset); __ JumpIfSmi(x10, &runtime); __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime); @@ -3068,7 +2804,7 @@ // Get the static offsets vector filled by the native regexp code // and fill the last match info. ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(isolate); + ExternalReference::address_of_static_offsets_vector(isolate()); __ Mov(offsets_vector_index, address_of_static_offsets_vector); Label next_capture, done; @@ -3087,8 +2823,8 @@ // Store the smi values in the last match info. __ SmiTag(x10, current_offset); // Clearing the 32 bottom bits gives us a Smi. - STATIC_ASSERT(kSmiShift == 32); - __ And(x11, current_offset, ~kWRegMask); + STATIC_ASSERT(kSmiTag == 0); + __ Bic(x11, current_offset, kSmiShiftMask); __ Stp(x10, x11, MemOperand(last_match_offsets, kXRegSize * 2, PostIndex)); @@ -3107,10 +2843,10 @@ // A stack overflow (on the backtrack stack) may have occured // in the RegExp code but no exception has been created yet. // If there is no pending exception, handle that in the runtime system. - __ Mov(x10, Operand(isolate->factory()->the_hole_value())); + __ Mov(x10, Operand(isolate()->factory()->the_hole_value())); __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + isolate()))); __ Ldr(exception_value, MemOperand(x11)); __ Cmp(x10, exception_value); __ B(eq, &runtime); @@ -3129,7 +2865,7 @@ __ ThrowUncatchable(exception_value, x10, x11, x12, x13); __ Bind(&failure); - __ Mov(x0, Operand(masm->isolate()->factory()->null_value())); + __ Mov(x0, Operand(isolate()->factory()->null_value())); __ PopCPURegList(used_callee_saved_registers); // Drop the 4 arguments of the stub from the stack. __ Drop(4); @@ -3137,7 +2873,7 @@ __ Bind(&runtime); __ PopCPURegList(used_callee_saved_registers); - __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); // Deferred code for string handling. // (6) Not a long external string? If yes, go to (8). @@ -3190,7 +2926,7 @@ Register scratch1, Register scratch2) { ASM_LOCATION("GenerateRecordCallTarget"); - ASSERT(!AreAliased(scratch1, scratch2, + DCHECK(!AreAliased(scratch1, scratch2, argc, function, feedback_vector, index)); // Cache the called function in a feedback vector slot. Cache states are // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic. @@ -3200,9 +2936,9 @@ // index : slot in feedback vector (smi) Label initialize, done, miss, megamorphic, not_array_function; - ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), + DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), masm->isolate()->heap()->megamorphic_symbol()); - ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), + DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), masm->isolate()->heap()->uninitialized_symbol()); // Load the cache state. @@ -3259,7 +2995,7 @@ // slot. { FrameScope scope(masm, StackFrame::INTERNAL); - CreateAllocationSiteStub create_stub; + CreateAllocationSiteStub create_stub(masm->isolate()); // Arguments register must be smi-tagged to call out. __ SmiTag(argc); @@ -3267,7 +3003,7 @@ // CreateAllocationSiteStub expect the feedback vector in x2 and the slot // index in x3. - ASSERT(feedback_vector.Is(x2) && index.Is(x3)); + DCHECK(feedback_vector.Is(x2) && index.Is(x3)); __ CallStub(&create_stub); __ Pop(index, feedback_vector, function, argc); @@ -3294,55 +3030,91 @@ } -void CallFunctionStub::Generate(MacroAssembler* masm) { - ASM_LOCATION("CallFunctionStub::Generate"); +static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { + // Do not transform the receiver for strict mode functions. + __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); + __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset)); + __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, cont); + + // Do not transform the receiver for native (Compilerhints already in x3). + __ Tbnz(w4, SharedFunctionInfo::kNative, cont); +} + + +static void EmitSlowCase(MacroAssembler* masm, + int argc, + Register function, + Register type, + Label* non_function) { + // Check for function proxy. + // x10 : function type. + __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, non_function); + __ Push(function); // put proxy as additional argument + __ Mov(x0, argc + 1); + __ Mov(x2, 0); + __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY); + { + Handle<Code> adaptor = + masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + __ Jump(adaptor, RelocInfo::CODE_TARGET); + } + + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ Bind(non_function); + __ Poke(function, argc * kXRegSize); + __ Mov(x0, argc); // Set up the number of arguments. + __ Mov(x2, 0); + __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); +} + + +static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { + // Wrap the receiver and patch it back onto the stack. + { FrameScope frame_scope(masm, StackFrame::INTERNAL); + __ Push(x1, x3); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ Pop(x1); + } + __ Poke(x0, argc * kPointerSize); + __ B(cont); +} + + +static void CallFunctionNoFeedback(MacroAssembler* masm, + int argc, bool needs_checks, + bool call_as_method) { // x1 function the function to call - // x2 : feedback vector - // x3 : slot in feedback vector (smi) (if x2 is not the megamorphic symbol) Register function = x1; - Register cache_cell = x2; - Register slot = x3; Register type = x4; Label slow, non_function, wrap, cont; // TODO(jbramley): This function has a lot of unnamed registers. Name them, // and tidy things up a bit. - if (NeedsChecks()) { + if (needs_checks) { // Check that the function is really a JavaScript function. __ JumpIfSmi(function, &non_function); // Goto slow case if we do not have a function. __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow); - - if (RecordCallTarget()) { - GenerateRecordCallTarget(masm, x0, function, cache_cell, slot, x4, x5); - // Type information was updated. Because we may call Array, which - // expects either undefined or an AllocationSite in ebx we need - // to set ebx to undefined. - __ LoadRoot(cache_cell, Heap::kUndefinedValueRootIndex); - } } // Fast-case: Invoke the function now. // x1 function pushed function - ParameterCount actual(argc_); - - if (CallAsMethod()) { - if (NeedsChecks()) { - // Do not transform the receiver for strict mode functions. - __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); - __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset)); - __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont); + ParameterCount actual(argc); - // Do not transform the receiver for native (Compilerhints already in x3). - __ Tbnz(w4, SharedFunctionInfo::kNative, &cont); + if (call_as_method) { + if (needs_checks) { + EmitContinueIfStrictOrNative(masm, &cont); } // Compute the receiver in sloppy mode. - __ Peek(x3, argc_ * kPointerSize); + __ Peek(x3, argc * kPointerSize); - if (NeedsChecks()) { + if (needs_checks) { __ JumpIfSmi(x3, &wrap); __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt); } else { @@ -3351,63 +3123,30 @@ __ Bind(&cont); } + __ InvokeFunction(function, actual, JUMP_FUNCTION, NullCallWrapper()); - - if (NeedsChecks()) { + if (needs_checks) { // Slow-case: Non-function called. __ Bind(&slow); - if (RecordCallTarget()) { - // If there is a call target cache, mark it megamorphic in the - // non-function case. MegamorphicSentinel is an immortal immovable object - // (megamorphic symbol) so no write barrier is needed. - ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), - masm->isolate()->heap()->megamorphic_symbol()); - __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot, - kPointerSizeLog2)); - __ LoadRoot(x11, Heap::kMegamorphicSymbolRootIndex); - __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize)); - } - // Check for function proxy. - // x10 : function type. - __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function); - __ Push(function); // put proxy as additional argument - __ Mov(x0, argc_ + 1); - __ Mov(x2, 0); - __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY); - { - Handle<Code> adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); - __ Jump(adaptor, RelocInfo::CODE_TARGET); - } - - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ Bind(&non_function); - __ Poke(function, argc_ * kXRegSize); - __ Mov(x0, argc_); // Set up the number of arguments. - __ Mov(x2, 0); - __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION); - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + EmitSlowCase(masm, argc, function, type, &non_function); } - if (CallAsMethod()) { + if (call_as_method) { __ Bind(&wrap); - // Wrap the receiver and patch it back onto the stack. - { FrameScope frame_scope(masm, StackFrame::INTERNAL); - __ Push(x1, x3); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ Pop(x1); - } - __ Poke(x0, argc_ * kPointerSize); - __ B(&cont); + EmitWrapCase(masm, argc, &cont); } } +void CallFunctionStub::Generate(MacroAssembler* masm) { + ASM_LOCATION("CallFunctionStub::Generate"); + CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod()); +} + + void CallConstructStub::Generate(MacroAssembler* masm) { ASM_LOCATION("CallConstructStub::Generate"); // x0 : number of arguments @@ -3473,11 +3212,173 @@ __ Bind(&do_call); // Set expected number of arguments to zero (not changing x0). __ Mov(x2, 0); - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); } +static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { + __ Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ Ldr(vector, FieldMemOperand(vector, + JSFunction::kSharedFunctionInfoOffset)); + __ Ldr(vector, FieldMemOperand(vector, + SharedFunctionInfo::kFeedbackVectorOffset)); +} + + +void CallIC_ArrayStub::Generate(MacroAssembler* masm) { + // x1 - function + // x3 - slot id + Label miss; + Register function = x1; + Register feedback_vector = x2; + Register index = x3; + Register scratch = x4; + + EmitLoadTypeFeedbackVector(masm, feedback_vector); + + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch); + __ Cmp(function, scratch); + __ B(ne, &miss); + + __ Mov(x0, Operand(arg_count())); + + __ Add(scratch, feedback_vector, + Operand::UntagSmiAndScale(index, kPointerSizeLog2)); + __ Ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + + // Verify that scratch contains an AllocationSite + Register map = x5; + __ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset)); + __ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss); + + Register allocation_site = feedback_vector; + __ Mov(allocation_site, scratch); + ArrayConstructorStub stub(masm->isolate(), arg_count()); + __ TailCallStub(&stub); + + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Customization_Miss); + + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, + arg_count(), + true, + CallAsMethod()); + + __ Unreachable(); +} + + +void CallICStub::Generate(MacroAssembler* masm) { + ASM_LOCATION("CallICStub"); + + // x1 - function + // x3 - slot id (Smi) + Label extra_checks_or_miss, slow_start; + Label slow, non_function, wrap, cont; + Label have_js_function; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + Register function = x1; + Register feedback_vector = x2; + Register index = x3; + Register type = x4; + + EmitLoadTypeFeedbackVector(masm, feedback_vector); + + // The checks. First, does x1 match the recorded monomorphic target? + __ Add(x4, feedback_vector, + Operand::UntagSmiAndScale(index, kPointerSizeLog2)); + __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize)); + + __ Cmp(x4, function); + __ B(ne, &extra_checks_or_miss); + + __ bind(&have_js_function); + if (state_.CallAsMethod()) { + EmitContinueIfStrictOrNative(masm, &cont); + + // Compute the receiver in sloppy mode. + __ Peek(x3, argc * kPointerSize); + + __ JumpIfSmi(x3, &wrap); + __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt); + + __ Bind(&cont); + } + + __ InvokeFunction(function, + actual, + JUMP_FUNCTION, + NullCallWrapper()); + + __ bind(&slow); + EmitSlowCase(masm, argc, function, type, &non_function); + + if (state_.CallAsMethod()) { + __ bind(&wrap); + EmitWrapCase(masm, argc, &cont); + } + + __ bind(&extra_checks_or_miss); + Label miss; + + __ JumpIfRoot(x4, Heap::kMegamorphicSymbolRootIndex, &slow_start); + __ JumpIfRoot(x4, Heap::kUninitializedSymbolRootIndex, &miss); + + if (!FLAG_trace_ic) { + // We are going megamorphic. If the feedback is a JSFunction, it is fine + // to handle it here. More complex cases are dealt with in the runtime. + __ AssertNotSmi(x4); + __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss); + __ Add(x4, feedback_vector, + Operand::UntagSmiAndScale(index, kPointerSizeLog2)); + __ LoadRoot(x5, Heap::kMegamorphicSymbolRootIndex); + __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize)); + __ B(&slow_start); + } + + // We are here because tracing is on or we are going monomorphic. + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Miss); + + // the slow case + __ bind(&slow_start); + + // Check that the function is really a JavaScript function. + __ JumpIfSmi(function, &non_function); + + // Goto slow case if we do not have a function. + __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow); + __ B(&have_js_function); +} + + +void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) { + ASM_LOCATION("CallICStub[Miss]"); + + // Get the receiver of the function from the stack; 1 ~ return address. + __ Peek(x4, (state_.arg_count() + 1) * kPointerSize); + + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Push the receiver and the function and feedback info. + __ Push(x4, x1, x2, x3); + + // Call the entry. + ExternalReference miss = ExternalReference(IC_Utility(id), + masm->isolate()); + __ CallExternalReference(miss, 4); + + // Move result to edi and exit the internal frame. + __ Mov(x1, x0); + } +} + + void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { // If the receiver is a smi trigger the non-string case. __ JumpIfSmi(object_, receiver_not_string_); @@ -3528,9 +3429,9 @@ if (index_flags_ == STRING_INDEX_IS_NUMBER) { __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); } else { - ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kHiddenNumberToSmi, 1); + __ CallRuntime(Runtime::kNumberToSmi, 1); } // Save the conversion result before the pop instructions below // have a chance to overwrite it. @@ -3553,7 +3454,7 @@ call_helper.BeforeCall(masm); __ SmiTag(index_); __ Push(object_, index_); - __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2); + __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); __ Mov(result_, x0); call_helper.AfterCall(masm); __ B(&exit_); @@ -3569,8 +3470,7 @@ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); // At this point code register contains smi tagged ASCII char code. - STATIC_ASSERT(kSmiShift > kPointerSizeLog2); - __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2)); + __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2)); __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_); __ Bind(&exit_); @@ -3596,7 +3496,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { // Inputs are in x0 (lhs) and x1 (rhs). - ASSERT(state_ == CompareIC::SMI); + DCHECK(state_ == CompareIC::SMI); ASM_LOCATION("ICCompareStub[Smis]"); Label miss; // Bail out (to 'miss') unless both x0 and x1 are smis. @@ -3618,7 +3518,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::NUMBER); + DCHECK(state_ == CompareIC::NUMBER); ASM_LOCATION("ICCompareStub[HeapNumbers]"); Label unordered, maybe_undefined1, maybe_undefined2; @@ -3663,9 +3563,9 @@ __ Ret(); __ Bind(&unordered); - ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC, CompareIC::GENERIC); - __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ Bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { @@ -3686,7 +3586,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::INTERNALIZED_STRING); + DCHECK(state_ == CompareIC::INTERNALIZED_STRING); ASM_LOCATION("ICCompareStub[InternalizedStrings]"); Label miss; @@ -3724,9 +3624,9 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::UNIQUE_NAME); + DCHECK(state_ == CompareIC::UNIQUE_NAME); ASM_LOCATION("ICCompareStub[UniqueNames]"); - ASSERT(GetCondition() == eq); + DCHECK(GetCondition() == eq); Label miss; Register result = x0; @@ -3763,7 +3663,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRING); + DCHECK(state_ == CompareIC::STRING); ASM_LOCATION("ICCompareStub[Strings]"); Label miss; @@ -3804,7 +3704,7 @@ // because we already know they are not identical. We know they are both // strings. if (equality) { - ASSERT(GetCondition() == eq); + DCHECK(GetCondition() == eq); STATIC_ASSERT(kInternalizedTag == 0); Label not_internalized_strings; __ Orr(x12, lhs_type, rhs_type); @@ -3835,7 +3735,7 @@ if (equality) { __ TailCallRuntime(Runtime::kStringEquals, 2, 1); } else { - __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } __ Bind(&miss); @@ -3844,7 +3744,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECT); + DCHECK(state_ == CompareIC::OBJECT); ASM_LOCATION("ICCompareStub[Objects]"); Label miss; @@ -3858,7 +3758,7 @@ __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss); __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss); - ASSERT(GetCondition() == eq); + DCHECK(GetCondition() == eq); __ Sub(result, rhs, lhs); __ Ret(); @@ -3905,7 +3805,7 @@ Register stub_entry = x11; { ExternalReference miss = - ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); + ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate()); FrameScope scope(masm, StackFrame::INTERNAL); Register op = x10; @@ -3934,12 +3834,12 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm, Register hash, Register character) { - ASSERT(!AreAliased(hash, character)); + DCHECK(!AreAliased(hash, character)); // hash = character + (character << 10); __ LoadRoot(hash, Heap::kHashSeedRootIndex); // Untag smi seed and add the character. - __ Add(hash, character, Operand(hash, LSR, kSmiShift)); + __ Add(hash, character, Operand::UntagSmi(hash)); // Compute hashes modulo 2^32 using a 32-bit W register. Register hash_w = hash.W(); @@ -3954,7 +3854,7 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, Register hash, Register character) { - ASSERT(!AreAliased(hash, character)); + DCHECK(!AreAliased(hash, character)); // hash += character; __ Add(hash, hash, character); @@ -3975,7 +3875,7 @@ // Compute hashes modulo 2^32 using a 32-bit W register. Register hash_w = hash.W(); Register scratch_w = scratch.W(); - ASSERT(!AreAliased(hash_w, scratch_w)); + DCHECK(!AreAliased(hash_w, scratch_w)); // hash += hash << 3; __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3)); @@ -4219,13 +4119,13 @@ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong); __ Bind(&return_x0); - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1, x3, x4); __ Drop(3); __ Ret(); __ Bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1); + __ TailCallRuntime(Runtime::kSubString, 3, 1); __ bind(&single_char); // x1: result_length @@ -4249,7 +4149,7 @@ Register scratch1, Register scratch2, Register scratch3) { - ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3)); Register result = x0; Register left_length = scratch1; Register right_length = scratch2; @@ -4292,7 +4192,7 @@ Register scratch2, Register scratch3, Register scratch4) { - ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); + DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); Label result_not_equal, compare_lengths; // Find minimum length and length difference. @@ -4313,7 +4213,7 @@ // Compare lengths - strings up to min-length are equal. __ Bind(&compare_lengths); - ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); + DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); // Use length_delta as result if it's zero. Register result = x0; @@ -4338,7 +4238,7 @@ Register scratch1, Register scratch2, Label* chars_not_equal) { - ASSERT(!AreAliased(left, right, length, scratch1, scratch2)); + DCHECK(!AreAliased(left, right, length, scratch1, scratch2)); // Change index to run from -length to -1 by adding length to string // start. This means that loop ends when index reaches zero, which @@ -4366,7 +4266,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { Label runtime; - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); // Stack frame on entry. // sp[0]: right string @@ -4402,216 +4302,7 @@ // Call the runtime. // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer. - __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); -} - - -void ArrayPushStub::Generate(MacroAssembler* masm) { - Register receiver = x0; - - int argc = arguments_count(); - - if (argc == 0) { - // Nothing to do, just return the length. - __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Drop(argc + 1); - __ Ret(); - return; - } - - Isolate* isolate = masm->isolate(); - - if (argc != 1) { - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - Label call_builtin, attempt_to_grow_elements, with_write_barrier; - - Register elements_length = x8; - Register length = x7; - Register elements = x6; - Register end_elements = x5; - Register value = x4; - // Get the elements array of the object. - __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - // Check that the elements are in fast mode and writable. - __ CheckMap(elements, - x10, - Heap::kFixedArrayMapRootIndex, - &call_builtin, - DONT_DO_SMI_CHECK); - } - - // Get the array's length and calculate new length. - __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ Add(length, length, Smi::FromInt(argc)); - - // Check if we could survive without allocation. - __ Ldr(elements_length, - FieldMemOperand(elements, FixedArray::kLengthOffset)); - __ Cmp(length, elements_length); - - const int kEndElementsOffset = - FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - __ B(gt, &attempt_to_grow_elements); - - // Check if value is a smi. - __ Peek(value, (argc - 1) * kPointerSize); - __ JumpIfNotSmi(value, &with_write_barrier); - - // Store the value. - // We may need a register containing the address end_elements below, - // so write back the value in end_elements. - __ Add(end_elements, elements, - Operand::UntagSmiAndScale(length, kPointerSizeLog2)); - __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex)); - } else { - __ B(gt, &call_builtin); - - __ Peek(value, (argc - 1) * kPointerSize); - __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1, - &call_builtin, argc * kDoubleSize); - } - - // Save new length. - __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); - - // Return length. - __ Drop(argc + 1); - __ Mov(x0, length); - __ Ret(); - - if (IsFastDoubleElementsKind(elements_kind())) { - __ Bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ Bind(&with_write_barrier); - - if (IsFastSmiElementsKind(elements_kind())) { - if (FLAG_trace_elements_transitions) { - __ B(&call_builtin); - } - - __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset)); - __ JumpIfHeapNumber(x10, &call_builtin); - - ElementsKind target_kind = IsHoleyElementsKind(elements_kind()) - ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; - __ Ldr(x10, GlobalObjectMemOperand()); - __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset)); - __ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX)); - const int header_size = FixedArrayBase::kHeaderSize; - // Verify that the object can be transitioned in place. - const int origin_offset = header_size + elements_kind() * kPointerSize; - __ ldr(x11, FieldMemOperand(receiver, origin_offset)); - __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset)); - __ cmp(x11, x12); - __ B(ne, &call_builtin); - - const int target_offset = header_size + target_kind * kPointerSize; - __ Ldr(x10, FieldMemOperand(x10, target_offset)); - __ Mov(x11, receiver); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - masm, DONT_TRACK_ALLOCATION_SITE, NULL); - } - - // Save new length. - __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); - - // Store the value. - // We may need a register containing the address end_elements below, - // so write back the value in end_elements. - __ Add(end_elements, elements, - Operand::UntagSmiAndScale(length, kPointerSizeLog2)); - __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex)); - - __ RecordWrite(elements, - end_elements, - value, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ Drop(argc + 1); - __ Mov(x0, length); - __ Ret(); - - __ Bind(&attempt_to_grow_elements); - - if (!FLAG_inline_new) { - __ B(&call_builtin); - } - - Register argument = x2; - __ Peek(argument, (argc - 1) * kPointerSize); - // Growing elements that are SMI-only requires special handling in case - // the new element is non-Smi. For now, delegate to the builtin. - if (IsFastSmiElementsKind(elements_kind())) { - __ JumpIfNotSmi(argument, &call_builtin); - } - - // We could be lucky and the elements array could be at the top of new-space. - // In this case we can just grow it in place by moving the allocation pointer - // up. - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate); - ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate); - - const int kAllocationDelta = 4; - ASSERT(kAllocationDelta >= argc); - Register allocation_top_addr = x5; - Register allocation_top = x9; - // Load top and check if it is the end of elements. - __ Add(end_elements, elements, - Operand::UntagSmiAndScale(length, kPointerSizeLog2)); - __ Add(end_elements, end_elements, kEndElementsOffset); - __ Mov(allocation_top_addr, new_space_allocation_top); - __ Ldr(allocation_top, MemOperand(allocation_top_addr)); - __ Cmp(end_elements, allocation_top); - __ B(ne, &call_builtin); - - __ Mov(x10, new_space_allocation_limit); - __ Ldr(x10, MemOperand(x10)); - __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize); - __ Cmp(allocation_top, x10); - __ B(hi, &call_builtin); - - // We fit and could grow elements. - // Update new_space_allocation_top. - __ Str(allocation_top, MemOperand(allocation_top_addr)); - // Push the argument. - __ Str(argument, MemOperand(end_elements)); - // Fill the rest with holes. - __ LoadRoot(x10, Heap::kTheHoleValueRootIndex); - ASSERT(kAllocationDelta == 4); - __ Stp(x10, x10, MemOperand(end_elements, 1 * kPointerSize)); - __ Stp(x10, x10, MemOperand(end_elements, 3 * kPointerSize)); - - // Update elements' and array's sizes. - __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Add(elements_length, elements_length, Smi::FromInt(kAllocationDelta)); - __ Str(elements_length, - FieldMemOperand(elements, FixedArray::kLengthOffset)); - - // Elements are in new space, so write barrier is not required. - __ Drop(argc + 1); - __ Mov(x0, length); - __ Ret(); - - __ Bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } @@ -4621,12 +4312,11 @@ // -- x0 : right // -- lr : return address // ----------------------------------- - Isolate* isolate = masm->isolate(); // Load x2 with the allocation site. We stick an undefined dummy value here // and replace it with the real allocation site later when we instantiate this // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). - __ LoadObject(x2, handle(isolate->heap()->undefined_value())); + __ LoadObject(x2, handle(isolate()->heap()->undefined_value())); // Make sure that we actually patched the allocation site. if (FLAG_debug_code) { @@ -4638,17 +4328,11 @@ // Tail call into the stub that handles binary operations with allocation // sites. - BinaryOpWithAllocationSiteStub stub(state_); + BinaryOpWithAllocationSiteStub stub(isolate(), state_); __ TailCallStub(&stub); } -bool CodeStub::CanUseFPRegisters() { - // FP registers always available on ARM64. - return true; -} - - void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { // We need some extra registers for this stub, they have been allocated // but we need to save them before using them. @@ -4694,17 +4378,17 @@ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); Register address = x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address(); - ASSERT(!address.Is(regs_.object())); - ASSERT(!address.Is(x0)); + DCHECK(!address.Is(regs_.object())); + DCHECK(!address.Is(x0)); __ Mov(address, regs_.address()); __ Mov(x0, regs_.object()); __ Mov(x1, address); - __ Mov(x2, ExternalReference::isolate_address(masm->isolate())); + __ Mov(x2, ExternalReference::isolate_address(isolate())); AllowExternalCallThatCantCauseGC scope(masm); ExternalReference function = ExternalReference::incremental_marking_record_write_function( - masm->isolate()); + isolate()); __ CallCFunction(function, 3, 0); regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); @@ -4857,7 +4541,7 @@ __ JumpIfSmi(value, &smi_element); // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS. - __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::kElementsKindShift), + __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift), &fast_elements); // Store into the array literal requires an elements transition. Call into @@ -4890,15 +4574,15 @@ __ Bind(&double_elements); __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); - __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1, + __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, &slow_elements); __ Ret(); } void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { - CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); - __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + CEntryStub ces(isolate(), 1, kSaveFPRegs); + __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); int parameter_count_offset = StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; __ Ldr(x1, MemOperand(fp, parameter_count_offset)); @@ -4912,22 +4596,31 @@ } -// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by -// a "Push lr" instruction, followed by a call. -static const unsigned int kProfileEntryHookCallSize = - Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); +static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) { + // The entry hook is a "BumpSystemStackPointer" instruction (sub), + // followed by a "Push lr" instruction, followed by a call. + unsigned int size = + Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); + if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) { + // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in + // "BumpSystemStackPointer". + size += kInstructionSize; + } + return size; +} void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { - ProfileEntryHookStub stub; + ProfileEntryHookStub stub(masm->isolate()); Assembler::BlockConstPoolScope no_const_pools(masm); + DontEmitDebugCodeScope no_debug_code(masm); Label entry_hook_call_start; __ Bind(&entry_hook_call_start); __ Push(lr); __ CallStub(&stub); - ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) == - kProfileEntryHookCallSize); + DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) == + GetProfileEntryHookCallSize(masm)); __ Pop(lr); } @@ -4941,15 +4634,15 @@ // from anywhere. // TODO(jbramley): What about FP registers? __ PushCPURegList(kCallerSaved); - ASSERT(kCallerSaved.IncludesAliasOf(lr)); + DCHECK(kCallerSaved.IncludesAliasOf(lr)); const int kNumSavedRegs = kCallerSaved.Count(); // Compute the function's address as the first argument. - __ Sub(x0, lr, kProfileEntryHookCallSize); + __ Sub(x0, lr, GetProfileEntryHookCallSize(masm)); #if V8_HOST_ARCH_ARM64 uintptr_t entry_hook = - reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook()); + reinterpret_cast<uintptr_t>(isolate()->function_entry_hook()); __ Mov(x10, entry_hook); #else // Under the simulator we need to indirect the entry hook through a trampoline @@ -4957,9 +4650,9 @@ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); __ Mov(x10, Operand(ExternalReference(&dispatcher, ExternalReference::BUILTIN_CALL, - masm->isolate()))); + isolate()))); // It additionally takes an isolate as a third parameter - __ Mov(x2, ExternalReference::isolate_address(masm->isolate())); + __ Mov(x2, ExternalReference::isolate_address(isolate())); #endif // The caller's return address is above the saved temporaries. @@ -4992,6 +4685,7 @@ __ Blr(x10); // Return to calling code. __ Peek(lr, 0); + __ AssertFPCRState(); __ Ret(); __ SetStackPointer(old_stack_pointer); @@ -5001,10 +4695,10 @@ Register target) { // Make sure the caller configured the stack pointer (see comment in // DirectCEntryStub::Generate). - ASSERT(csp.Is(__ StackPointer())); + DCHECK(csp.Is(__ StackPointer())); intptr_t code = - reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); + reinterpret_cast<intptr_t>(GetCode().location()); __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET)); __ Mov(x10, target); // Branch to the stub. @@ -5026,7 +4720,7 @@ Register name, Register scratch1, Register scratch2) { - ASSERT(!AreAliased(elements, name, scratch1, scratch2)); + DCHECK(!AreAliased(elements, name, scratch1, scratch2)); // Assert that name contains a string. __ AssertName(name); @@ -5043,7 +4737,7 @@ // Add the probe offset (i + i * i) left shifted to avoid right shifting // the hash in a separate instruction. The value hash + i + i * i is right // shifted in the following and instruction. - ASSERT(NameDictionary::GetProbeOffset(i) < + DCHECK(NameDictionary::GetProbeOffset(i) < 1 << (32 - Name::kHashFieldOffset)); __ Add(scratch2, scratch2, Operand( NameDictionary::GetProbeOffset(i) << Name::kHashShift)); @@ -5051,7 +4745,7 @@ __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); // Scale the index by multiplying by the element size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); // Check if the key is identical to the name. @@ -5074,7 +4768,7 @@ __ PushCPURegList(spill_list); if (name.is(x0)) { - ASSERT(!elements.is(x1)); + DCHECK(!elements.is(x1)); __ Mov(x1, name); __ Mov(x0, elements); } else { @@ -5083,7 +4777,7 @@ } Label not_found; - NameDictionaryLookupStub stub(POSITIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); __ CallStub(&stub); __ Cbz(x0, ¬_found); __ Mov(scratch2, x2); // Move entry index into scratch2. @@ -5103,8 +4797,8 @@ Register properties, Handle<Name> name, Register scratch0) { - ASSERT(!AreAliased(receiver, properties, scratch0)); - ASSERT(name->IsUniqueName()); + DCHECK(!AreAliased(receiver, properties, scratch0)); + DCHECK(name->IsUniqueName()); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the // undefined value), it guarantees the hash table doesn't contain the @@ -5120,7 +4814,7 @@ __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i)); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. Register entity_name = scratch0; @@ -5154,7 +4848,7 @@ __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ Mov(x1, Operand(name)); - NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); __ CallStub(&stub); // Move stub return value to scratch0. Note that scratch0 is not included in // spill_list and won't be clobbered by PopCPURegList. @@ -5201,7 +4895,7 @@ // Add the probe offset (i + i * i) left shifted to avoid right shifting // the hash in a separate instruction. The value hash + i + i * i is right // shifted in the following and instruction. - ASSERT(NameDictionary::GetProbeOffset(i) < + DCHECK(NameDictionary::GetProbeOffset(i) < 1 << (32 - Name::kHashFieldOffset)); __ Add(index, hash, NameDictionary::GetProbeOffset(i) << Name::kHashShift); @@ -5211,7 +4905,7 @@ __ And(index, mask, Operand(index, LSR, Name::kHashShift)); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2)); @@ -5257,7 +4951,7 @@ AllocationSiteOverrideMode mode) { ASM_LOCATION("CreateArrayDispatch"); if (mode == DISABLE_ALLOCATION_SITES) { - T stub(GetInitialFastElementsKind(), mode); + T stub(masm->isolate(), GetInitialFastElementsKind(), mode); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { @@ -5270,7 +4964,7 @@ // TODO(jbramley): Is this the best way to handle this? Can we make the // tail calls conditional, rather than hopping over each one? __ CompareAndBranch(kind, candidate_kind, ne, &next); - T stub(candidate_kind); + T stub(masm->isolate(), candidate_kind); __ TailCallStub(&stub); __ Bind(&next); } @@ -5320,12 +5014,14 @@ ElementsKind initial = GetInitialFastElementsKind(); ElementsKind holey_initial = GetHoleyElementsKind(initial); - ArraySingleArgumentConstructorStub stub_holey(holey_initial, + ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), + holey_initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub_holey); __ Bind(&normal_sequence); - ArraySingleArgumentConstructorStub stub(initial, + ArraySingleArgumentConstructorStub stub(masm->isolate(), + initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { @@ -5357,7 +5053,7 @@ Label next; ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i); __ CompareAndBranch(kind, candidate_kind, ne, &next); - ArraySingleArgumentConstructorStub stub(candidate_kind); + ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind); __ TailCallStub(&stub); __ Bind(&next); } @@ -5376,11 +5072,11 @@ TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= to_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); - T stub(kind); - stub.GetCode(isolate); + T stub(isolate, kind); + stub.GetCode(); if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { - T stub1(kind, DISABLE_ALLOCATION_SITES); - stub1.GetCode(isolate); + T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); + stub1.GetCode(); } } } @@ -5401,12 +5097,12 @@ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; for (int i = 0; i < 2; i++) { // For internal arrays we only need a few things - InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); - stubh1.GetCode(isolate); - InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); - stubh2.GetCode(isolate); - InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); - stubh3.GetCode(isolate); + InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); + stubh1.GetCode(); + InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); + stubh2.GetCode(); + InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); + stubh3.GetCode(); } } @@ -5509,22 +5205,22 @@ __ Cbz(x10, &packed_case); InternalArraySingleArgumentConstructorStub - stub1_holey(GetHoleyElementsKind(kind)); + stub1_holey(isolate(), GetHoleyElementsKind(kind)); __ TailCallStub(&stub1_holey); __ Bind(&packed_case); } - InternalArraySingleArgumentConstructorStub stub1(kind); + InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); __ TailCallStub(&stub1); __ Bind(&zero_case); // No arguments. - InternalArrayNoArgumentConstructorStub stub0(kind); + InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); __ TailCallStub(&stub0); __ Bind(&n_case); // N arguments. - InternalArrayNArgumentsConstructorStub stubN(kind); + InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); __ TailCallStub(&stubN); } @@ -5536,8 +5232,6 @@ // -- sp[0] : return address // -- sp[4] : last argument // ----------------------------------- - Handle<Object> undefined_sentinel( - masm->isolate()->heap()->undefined_value(), masm->isolate()); Register constructor = x1; @@ -5616,8 +5310,6 @@ STATIC_ASSERT(FCA::kHolderIndex == 0); STATIC_ASSERT(FCA::kArgsLength == 7); - Isolate* isolate = masm->isolate(); - // FunctionCallbackArguments: context, callee and call data. __ Push(context, callee, call_data); @@ -5628,7 +5320,7 @@ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); } Register isolate_reg = x5; - __ Mov(isolate_reg, ExternalReference::isolate_address(isolate)); + __ Mov(isolate_reg, ExternalReference::isolate_address(isolate())); // FunctionCallbackArguments: // return value, return value default, isolate, holder. @@ -5649,7 +5341,7 @@ FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace); - ASSERT(!AreAliased(x0, api_function_address)); + DCHECK(!AreAliased(x0, api_function_address)); // x0 = FunctionCallbackInfo& // Arguments is after the return address. __ Add(x0, masm->StackPointer(), 1 * kPointerSize); @@ -5662,11 +5354,8 @@ __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize)); const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; - Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); - ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL; - ApiFunction thunk_fun(thunk_address); - ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, - masm->isolate()); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(isolate()); AllowExternalCallThatCantCauseGC scope(masm); MemOperand context_restore_operand( @@ -5719,12 +5408,8 @@ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; - Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); - ExternalReference::Type thunk_type = - ExternalReference::PROFILING_GETTER_CALL; - ApiFunction thunk_fun(thunk_address); - ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, - masm->isolate()); + ExternalReference thunk_ref = + ExternalReference::invoke_accessor_getter_callback(isolate()); const int spill_offset = 1 + kApiStackSpace; __ CallApiFunctionAndReturn(api_function_address, diff -Nru nodejs-0.11.13/deps/v8/src/arm64/code-stubs-arm64.h nodejs-0.11.15/deps/v8/src/arm64/code-stubs-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/code-stubs-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/code-stubs-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_CODE_STUBS_ARM64_H_ #define V8_ARM64_CODE_STUBS_ARM64_H_ -#include "ic-inl.h" +#include "src/ic-inl.h" namespace v8 { namespace internal { @@ -39,8 +16,8 @@ class StoreBufferOverflowStub: public PlatformCodeStub { public: - explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) { } + StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp) + : PlatformCodeStub(isolate), save_doubles_(save_fp) { } void Generate(MacroAssembler* masm); @@ -50,8 +27,8 @@ private: SaveFPRegsMode save_doubles_; - Major MajorKey() { return StoreBufferOverflow; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } + Major MajorKey() const { return StoreBufferOverflow; } + int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } }; @@ -79,15 +56,14 @@ class StoreRegistersStateStub: public PlatformCodeStub { public: - explicit StoreRegistersStateStub(SaveFPRegsMode with_fp) - : save_doubles_(with_fp) {} + explicit StoreRegistersStateStub(Isolate* isolate) + : PlatformCodeStub(isolate) {} static Register to_be_pushed_lr() { return ip0; } static void GenerateAheadOfTime(Isolate* isolate); private: - Major MajorKey() { return StoreRegistersState; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } - SaveFPRegsMode save_doubles_; + Major MajorKey() const { return StoreRegistersState; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); }; @@ -95,14 +71,13 @@ class RestoreRegistersStateStub: public PlatformCodeStub { public: - explicit RestoreRegistersStateStub(SaveFPRegsMode with_fp) - : save_doubles_(with_fp) {} + explicit RestoreRegistersStateStub(Isolate* isolate) + : PlatformCodeStub(isolate) {} static void GenerateAheadOfTime(Isolate* isolate); private: - Major MajorKey() { return RestoreRegistersState; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } - SaveFPRegsMode save_doubles_; + Major MajorKey() const { return RestoreRegistersState; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); }; @@ -113,12 +88,14 @@ // Stub to record the write of 'value' at 'address' in 'object'. // Typically 'address' = 'object' + <some offset>. // See MacroAssembler::RecordWriteField() for example. - RecordWriteStub(Register object, + RecordWriteStub(Isolate* isolate, + Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) - : object_(object), + : PlatformCodeStub(isolate), + object_(object), value_(value), address_(address), remembered_set_action_(remembered_set_action), @@ -143,17 +120,17 @@ Instruction* instr2 = instr1->following(); if (instr1->IsUncondBranchImm()) { - ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code())); + DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code())); return INCREMENTAL; } - ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code())); + DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code())); if (instr2->IsUncondBranchImm()) { return INCREMENTAL_COMPACTION; } - ASSERT(instr2->IsPCRelAddressing()); + DCHECK(instr2->IsPCRelAddressing()); return STORE_BUFFER_ONLY; } @@ -172,31 +149,31 @@ Instruction* instr1 = patcher.InstructionAt(0); Instruction* instr2 = patcher.InstructionAt(kInstructionSize); // Instructions must be either 'adr' or 'b'. - ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm()); - ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm()); + DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm()); + DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm()); // Retrieve the offsets to the labels. int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset(); int32_t offset_to_incremental_compacting = instr2->ImmPCOffset(); switch (mode) { case STORE_BUFFER_ONLY: - ASSERT(GetMode(stub) == INCREMENTAL || + DCHECK(GetMode(stub) == INCREMENTAL || GetMode(stub) == INCREMENTAL_COMPACTION); patcher.adr(xzr, offset_to_incremental_noncompacting); patcher.adr(xzr, offset_to_incremental_compacting); break; case INCREMENTAL: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2); patcher.adr(xzr, offset_to_incremental_compacting); break; case INCREMENTAL_COMPACTION: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); patcher.adr(xzr, offset_to_incremental_noncompacting); patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2); break; } - ASSERT(GetMode(stub) == mode); + DCHECK(GetMode(stub) == mode); } private: @@ -210,8 +187,14 @@ : object_(object), address_(address), scratch0_(scratch), - saved_regs_(kCallerSaved) { - ASSERT(!AreAliased(scratch, object, address)); + saved_regs_(kCallerSaved), + saved_fp_regs_(kCallerSavedFP) { + DCHECK(!AreAliased(scratch, object, address)); + + // The SaveCallerSaveRegisters method needs to save caller-saved + // registers, but we don't bother saving MacroAssembler scratch registers. + saved_regs_.Remove(MacroAssembler::DefaultTmpList()); + saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList()); // We would like to require more scratch registers for this stub, // but the number of registers comes down to the ones used in @@ -223,12 +206,6 @@ scratch1_ = Register(pool_available.PopLowestIndex()); scratch2_ = Register(pool_available.PopLowestIndex()); - // SaveCallerRegisters method needs to save caller saved register, however - // we don't bother saving ip0 and ip1 because they are used as scratch - // registers by the MacroAssembler. - saved_regs_.Remove(ip0); - saved_regs_.Remove(ip1); - // The scratch registers will be restored by other means so we don't need // to save them with the other caller saved registers. saved_regs_.Remove(scratch0_); @@ -253,7 +230,7 @@ // register will need to be preserved. Can we improve this? masm->PushCPURegList(saved_regs_); if (mode == kSaveFPRegs) { - masm->PushCPURegList(kCallerSavedFP); + masm->PushCPURegList(saved_fp_regs_); } } @@ -261,7 +238,7 @@ // TODO(all): This can be very expensive, and it is likely that not every // register will need to be preserved. Can we improve this? if (mode == kSaveFPRegs) { - masm->PopCPURegList(kCallerSavedFP); + masm->PopCPURegList(saved_fp_regs_); } masm->PopCPURegList(saved_regs_); } @@ -279,6 +256,7 @@ Register scratch1_; Register scratch2_; CPURegList saved_regs_; + CPURegList saved_fp_regs_; // TODO(all): We should consider moving this somewhere else. static CPURegList GetValidRegistersForAllocation() { @@ -296,10 +274,7 @@ CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25); // We also remove MacroAssembler's scratch registers. - list.Remove(ip0); - list.Remove(ip1); - list.Remove(x8); - list.Remove(x9); + list.Remove(MacroAssembler::DefaultTmpList()); return list; } @@ -326,9 +301,9 @@ Mode mode); void InformIncrementalMarker(MacroAssembler* masm); - Major MajorKey() { return RecordWrite; } + Major MajorKey() const { return RecordWrite; } - int MinorKey() { + int MinorKey() const { return MinorKeyFor(object_, value_, address_, remembered_set_action_, save_fp_regs_mode_); } @@ -338,9 +313,9 @@ Register address, RememberedSetAction action, SaveFPRegsMode fp_mode) { - ASSERT(object.Is64Bits()); - ASSERT(value.Is64Bits()); - ASSERT(address.Is64Bits()); + DCHECK(object.Is64Bits()); + DCHECK(value.Is64Bits()); + DCHECK(address.Is64Bits()); return ObjectBits::encode(object.code()) | ValueBits::encode(value.code()) | AddressBits::encode(address.code()) | @@ -372,13 +347,13 @@ // the exit frame before doing the call with GenerateCall. class DirectCEntryStub: public PlatformCodeStub { public: - DirectCEntryStub() {} + explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {} void Generate(MacroAssembler* masm); void GenerateCall(MacroAssembler* masm, Register target); private: - Major MajorKey() { return DirectCEntry; } - int MinorKey() { return 0; } + Major MajorKey() const { return DirectCEntry; } + int MinorKey() const { return 0; } bool NeedsImmovableCode() { return true; } }; @@ -388,7 +363,8 @@ public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { } + NameDictionaryLookupStub(Isolate* isolate, LookupMode mode) + : PlatformCodeStub(isolate), mode_(mode) { } void Generate(MacroAssembler* masm); @@ -422,11 +398,9 @@ NameDictionary::kHeaderSize + NameDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return NameDictionaryLookup; } + Major MajorKey() const { return NameDictionaryLookup; } - int MinorKey() { - return LookupModeBits::encode(mode_); - } + int MinorKey() const { return LookupModeBits::encode(mode_); } class LookupModeBits: public BitField<LookupMode, 0, 1> {}; @@ -436,11 +410,11 @@ class SubStringStub: public PlatformCodeStub { public: - SubStringStub() {} + explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {} private: - Major MajorKey() { return SubString; } - int MinorKey() { return 0; } + Major MajorKey() const { return SubString; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); }; @@ -448,7 +422,7 @@ class StringCompareStub: public PlatformCodeStub { public: - StringCompareStub() { } + explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { } // Compares two flat ASCII strings and returns result in x0. static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, @@ -469,8 +443,8 @@ Register scratch3); private: - virtual Major MajorKey() { return StringCompare; } - virtual int MinorKey() { return 0; } + virtual Major MajorKey() const { return StringCompare; } + virtual int MinorKey() const { return 0; } virtual void Generate(MacroAssembler* masm); static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, @@ -483,8 +457,9 @@ }; -struct PlatformCallInterfaceDescriptor { - explicit PlatformCallInterfaceDescriptor( +class PlatformInterfaceDescriptor { + public: + explicit PlatformInterfaceDescriptor( TargetAddressStorageMode storage_mode) : storage_mode_(storage_mode) { } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/constants-arm64.h nodejs-0.11.15/deps/v8/src/arm64/constants-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/constants-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/constants-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_CONSTANTS_ARM64_H_ #define V8_ARM64_CONSTANTS_ARM64_H_ @@ -38,7 +15,9 @@ // Get the standard printf format macros for C99 stdint types. +#ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS +#endif #include <inttypes.h> @@ -48,8 +27,7 @@ const unsigned kInstructionSize = 4; const unsigned kInstructionSizeLog2 = 2; -const unsigned kLiteralEntrySize = 4; -const unsigned kLiteralEntrySizeLog2 = 2; +const unsigned kLoadLiteralScaleLog2 = 2; const unsigned kMaxLoadLiteralRange = 1 * MB; const unsigned kNumberOfRegisters = 32; @@ -112,6 +90,8 @@ const unsigned kJSSPCode = 28; const unsigned kSPRegInternalCode = 63; const unsigned kRegCodeMask = 0x1f; +const unsigned kShiftAmountWRegMask = 0x1f; +const unsigned kShiftAmountXRegMask = 0x3f; // Standard machine types defined by AAPCS64. const unsigned kByteSize = 8; const unsigned kByteSizeInBytes = kByteSize >> 3; @@ -130,6 +110,7 @@ // AArch64 floating-point specifics. These match IEEE-754. const unsigned kDoubleMantissaBits = 52; const unsigned kDoubleExponentBits = 11; +const unsigned kDoubleExponentBias = 1023; const unsigned kFloatMantissaBits = 23; const unsigned kFloatExponentBits = 8; @@ -262,8 +243,8 @@ enum Condition { eq = 0, ne = 1, - hs = 2, - lo = 3, + hs = 2, cs = hs, + lo = 3, cc = lo, mi = 4, pl = 5, vs = 6, @@ -278,15 +259,15 @@ nv = 15 // Behaves as always/al. }; -inline Condition InvertCondition(Condition cond) { +inline Condition NegateCondition(Condition cond) { // Conditions al and nv behave identically, as "always true". They can't be // inverted, because there is no never condition. - ASSERT((cond != al) && (cond != nv)); + DCHECK((cond != al) && (cond != nv)); return static_cast<Condition>(cond ^ 1); } -// Corresponds to transposing the operands of a comparison. -inline Condition ReverseConditionForCmp(Condition cond) { +// Commute a condition such that {a cond b == b cond' a}. +inline Condition CommuteCondition(Condition cond) { switch (cond) { case lo: return hi; @@ -313,7 +294,7 @@ // 'mi' for instance). UNREACHABLE(); return nv; - }; + } } enum FlagsUpdate { @@ -419,7 +400,7 @@ // // The enumerations can be used like this: // -// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed); +// DCHECK(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed); // switch(instr->Mask(PCRelAddressingMask)) { // case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break; // case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break; diff -Nru nodejs-0.11.13/deps/v8/src/arm64/cpu-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/cpu-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/cpu-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/cpu-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,68 +1,47 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // CPU specific code for arm independent of OS goes here. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "arm64/cpu-arm64.h" -#include "arm64/utils-arm64.h" +#include "src/arm64/utils-arm64.h" +#include "src/assembler.h" namespace v8 { namespace internal { -#ifdef DEBUG -bool CpuFeatures::initialized_ = false; +class CacheLineSizes { + public: + CacheLineSizes() { +#ifdef USE_SIMULATOR + cache_type_register_ = 0; +#else + // Copy the content of the cache type register to a core register. + __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT + : [ctr] "=r" (cache_type_register_)); #endif -unsigned CpuFeatures::supported_ = 0; -unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; -unsigned CpuFeatures::cross_compile_ = 0; - -// Initialise to smallest possible cache size. -unsigned CpuFeatures::dcache_line_size_ = 1; -unsigned CpuFeatures::icache_line_size_ = 1; - + } -void CPU::SetUp() { - CpuFeatures::Probe(); -} + uint32_t icache_line_size() const { return ExtractCacheLineSize(0); } + uint32_t dcache_line_size() const { return ExtractCacheLineSize(16); } + private: + uint32_t ExtractCacheLineSize(int cache_line_size_shift) const { + // The cache type register holds the size of cache lines in words as a + // power of two. + return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xf); + } -bool CPU::SupportsCrankshaft() { - return true; -} + uint32_t cache_type_register_; +}; -void CPU::FlushICache(void* address, size_t length) { - if (length == 0) { - return; - } +void CpuFeatures::FlushICache(void* address, size_t length) { + if (length == 0) return; #ifdef USE_SIMULATOR // TODO(all): consider doing some cache simulation to ensure every address @@ -76,11 +55,12 @@ uintptr_t start = reinterpret_cast<uintptr_t>(address); // Sizes will be used to generate a mask big enough to cover a pointer. - uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size()); - uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size()); + CacheLineSizes sizes; + uintptr_t dsize = sizes.dcache_line_size(); + uintptr_t isize = sizes.icache_line_size(); // Cache line sizes are always a power of 2. - ASSERT(CountSetBits(dsize, 64) == 1); - ASSERT(CountSetBits(isize, 64) == 1); + DCHECK(CountSetBits(dsize, 64) == 1); + DCHECK(CountSetBits(isize, 64) == 1); uintptr_t dstart = start & ~(dsize - 1); uintptr_t istart = start & ~(isize - 1); uintptr_t end = start + length; @@ -138,62 +118,6 @@ #endif } - -void CpuFeatures::Probe() { - // Compute I and D cache line size. The cache type register holds - // information about the caches. - uint32_t cache_type_register = GetCacheType(); - - static const int kDCacheLineSizeShift = 16; - static const int kICacheLineSizeShift = 0; - static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift; - static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift; - - // The cache type register holds the size of the I and D caches as a power of - // two. - uint32_t dcache_line_size_power_of_two = - (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift; - uint32_t icache_line_size_power_of_two = - (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift; - - dcache_line_size_ = 1 << dcache_line_size_power_of_two; - icache_line_size_ = 1 << icache_line_size_power_of_two; - - // AArch64 has no configuration options, no further probing is required. - supported_ = 0; - -#ifdef DEBUG - initialized_ = true; -#endif -} - - -unsigned CpuFeatures::dcache_line_size() { - ASSERT(initialized_); - return dcache_line_size_; -} - - -unsigned CpuFeatures::icache_line_size() { - ASSERT(initialized_); - return icache_line_size_; -} - - -uint32_t CpuFeatures::GetCacheType() { -#ifdef USE_SIMULATOR - // This will lead to a cache with 1 byte long lines, which is fine since the - // simulator will not need this information. - return 0; -#else - uint32_t cache_type_register; - // Copy the content of the cache type register to a core register. - __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT - : [ctr] "=r" (cache_type_register)); - return cache_type_register; -#endif -} - } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM64 diff -Nru nodejs-0.11.13/deps/v8/src/arm64/cpu-arm64.h nodejs-0.11.15/deps/v8/src/arm64/cpu-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/cpu-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/cpu-arm64.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_ARM64_CPU_ARM64_H_ -#define V8_ARM64_CPU_ARM64_H_ - -#include <stdio.h> -#include "serialize.h" -#include "cpu.h" - -namespace v8 { -namespace internal { - - -// CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a CpuFeatureScope before use. -class CpuFeatures : public AllStatic { - public: - // Detect features of the target CPU. Set safe defaults if the serializer - // is enabled (snapshots must be portable). - static void Probe(); - - // Check whether a feature is supported by the target CPU. - static bool IsSupported(CpuFeature f) { - ASSERT(initialized_); - // There are no optional features for ARM64. - return false; - }; - - static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { - ASSERT(initialized_); - // There are no optional features for ARM64. - return false; - } - - static bool IsSafeForSnapshot(CpuFeature f) { - return (IsSupported(f) && - (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); - } - - // I and D cache line size in bytes. - static unsigned dcache_line_size(); - static unsigned icache_line_size(); - - static unsigned supported_; - - static bool VerifyCrossCompiling() { - // There are no optional features for ARM64. - ASSERT(cross_compile_ == 0); - return true; - } - - static bool VerifyCrossCompiling(CpuFeature f) { - // There are no optional features for ARM64. - USE(f); - ASSERT(cross_compile_ == 0); - return true; - } - - private: - // Return the content of the cache type register. - static uint32_t GetCacheType(); - - // I and D cache line size in bytes. - static unsigned icache_line_size_; - static unsigned dcache_line_size_; - -#ifdef DEBUG - static bool initialized_; -#endif - - // This isn't used (and is always 0), but it is required by V8. - static unsigned found_by_runtime_probing_only_; - - static unsigned cross_compile_; - - friend class PlatformFeatureScope; - DISALLOW_COPY_AND_ASSIGN(CpuFeatures); -}; - -} } // namespace v8::internal - -#endif // V8_ARM64_CPU_ARM64_H_ diff -Nru nodejs-0.11.13/deps/v8/src/arm64/debug-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/debug-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/debug-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/debug-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "codegen.h" -#include "debug.h" +#include "src/codegen.h" +#include "src/debug.h" namespace v8 { namespace internal { @@ -38,8 +15,6 @@ #define __ ACCESS_MASM(masm) - -#ifdef ENABLE_DEBUGGER_SUPPORT bool BreakLocationIterator::IsDebugBreakAtReturn() { return Debug::IsDebugBreakAtReturn(rinfo()); } @@ -67,11 +42,11 @@ STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5); PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5); byte* entry = - debug_info_->GetIsolate()->debug()->debug_break_return()->entry(); + debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(); // The first instruction of a patched return sequence must be a load literal // loading the address of the debug break return code. - patcher.LoadLiteral(ip0, 3 * kInstructionSize); + patcher.ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2); // TODO(all): check the following is correct. // The debug break return code will push a frame and call statically compiled // code. By using blr, even though control will not return after the branch, @@ -92,21 +67,21 @@ bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsJSReturn(rinfo->rmode())); + DCHECK(RelocInfo::IsJSReturn(rinfo->rmode())); return rinfo->IsPatchedReturnSequence(); } bool BreakLocationIterator::IsDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); // Check whether the debug break slot instructions have been patched. return rinfo()->IsPatchedDebugBreakSlotSequence(); } void BreakLocationIterator::SetDebugBreakAtSlot() { - // Patch the code emitted by Debug::GenerateSlots, changing the debug break - // slot code from + // Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug + // break slot code from // mov x0, x0 @ nop DEBUG_BREAK_NOP // mov x0, x0 @ nop DEBUG_BREAK_NOP // mov x0, x0 @ nop DEBUG_BREAK_NOP @@ -126,11 +101,11 @@ STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4); PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4); byte* entry = - debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(); + debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry(); // The first instruction of a patched debug break slot must be a load literal // loading the address of the debug break slot code. - patcher.LoadLiteral(ip0, 2 * kInstructionSize); + patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2); // TODO(all): check the following is correct. // The debug break slot code will push a frame and call statically compiled // code. By using blr, event hough control will not return after the branch, @@ -143,12 +118,11 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotInstructions); } -const bool Debug::FramePaddingLayout::kIsSupported = false; static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList object_regs, @@ -157,6 +131,12 @@ { FrameScope scope(masm, StackFrame::INTERNAL); + // Load padding words on stack. + __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingValue)); + __ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize); + __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize)); + __ Push(scratch); + // Any live values (object_regs and non_object_regs) in caller-saved // registers (or lr) need to be stored on the stack so that their values are // safely preserved for a call into C code. @@ -170,12 +150,12 @@ // collector doesn't try to interpret them as pointers. // // TODO(jbramley): Why can't this handle callee-saved registers? - ASSERT((~kCallerSaved.list() & object_regs) == 0); - ASSERT((~kCallerSaved.list() & non_object_regs) == 0); - ASSERT((object_regs & non_object_regs) == 0); - ASSERT((scratch.Bit() & object_regs) == 0); - ASSERT((scratch.Bit() & non_object_regs) == 0); - ASSERT((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0); + DCHECK((~kCallerSaved.list() & object_regs) == 0); + DCHECK((~kCallerSaved.list() & non_object_regs) == 0); + DCHECK((object_regs & non_object_regs) == 0); + DCHECK((scratch.Bit() & object_regs) == 0); + DCHECK((scratch.Bit() & non_object_regs) == 0); + DCHECK((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0); STATIC_ASSERT(kSmiValueSize == 32); CPURegList non_object_list = @@ -183,15 +163,16 @@ while (!non_object_list.IsEmpty()) { // Store each non-object register as two SMIs. Register reg = Register(non_object_list.PopLowestIndex()); - __ Push(reg); - __ Poke(wzr, 0); - __ Push(reg.W(), wzr); + __ Lsr(scratch, reg, 32); + __ SmiTagAndPush(scratch, reg); + // Stack: // jssp[12]: reg[63:32] // jssp[8]: 0x00000000 (SMI tag & padding) // jssp[4]: reg[31:0] // jssp[0]: 0x00000000 (SMI tag & padding) - STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32)); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == kWRegSizeInBits); } if (object_regs != 0) { @@ -204,7 +185,7 @@ __ Mov(x0, 0); // No arguments. __ Mov(x1, ExternalReference::debug_break(masm->isolate())); - CEntryStub stub(1); + CEntryStub stub(masm->isolate(), 1); __ CallStub(&stub); // Restore the register values from the expression stack. @@ -226,86 +207,77 @@ __ Bfxil(reg, scratch, 32, 32); } + // Don't bother removing padding bytes pushed on the stack + // as the frame is going to be restored right away. + // Leave the internal frame. } // Now that the break point has been handled, resume normal execution by // jumping to the target address intended by the caller and that was // overwritten by the address of DebugBreakXXX. - ExternalReference after_break_target(Debug_Address::AfterBreakTarget(), - masm->isolate()); + ExternalReference after_break_target = + ExternalReference::debug_after_break_target_address(masm->isolate()); __ Mov(scratch, after_break_target); __ Ldr(scratch, MemOperand(scratch)); __ Br(scratch); } -void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { - // Calling convention for IC load (from ic-arm.cc). +void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) { + // Register state for CallICStub // ----------- S t a t e ------------- - // -- x2 : name - // -- lr : return address - // -- x0 : receiver - // -- [sp] : receiver + // -- x1 : function + // -- x3 : slot in feedback array // ----------------------------------- - // Registers x0 and x2 contain objects that need to be pushed on the - // expression stack of the fake JS frame. - Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10); + Generate_DebugBreakCallHelper(masm, x1.Bit() | x3.Bit(), 0, x10); } -void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) { - // Calling convention for IC store (from ic-arm.cc). - // ----------- S t a t e ------------- - // -- x0 : value - // -- x1 : receiver - // -- x2 : name - // -- lr : return address - // ----------------------------------- - // Registers x0, x1, and x2 contain objects that need to be pushed on the - // expression stack of the fake JS frame. - Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10); +void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) { + // Calling convention for IC load (from ic-arm.cc). + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10); } -void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- x0 : key - // -- x1 : receiver - Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10); +void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) { + // Calling convention for IC store (from ic-arm64.cc). + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + Register value = StoreIC::ValueRegister(); + Generate_DebugBreakCallHelper( + masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10); } -void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- x0 : value - // -- x1 : key - // -- x2 : receiver - // -- lr : return address - Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10); +void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { + // Calling convention for keyed IC load (from ic-arm.cc). + GenerateLoadICDebugBreak(masm); } -void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { - // Register state for CompareNil IC - // ----------- S t a t e ------------- - // -- r0 : value - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10); +void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { + // Calling convention for IC keyed store call (from ic-arm64.cc). + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register name = KeyedStoreIC::NameRegister(); + Register value = KeyedStoreIC::ValueRegister(); + Generate_DebugBreakCallHelper( + masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10); } -void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { - // Calling convention for IC call (from ic-arm.cc) +void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { + // Register state for CompareNil IC // ----------- S t a t e ------------- - // -- x2 : name + // -- r0 : value // ----------------------------------- - Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10); + Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10); } -void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) { // In places other than IC call sites it is expected that r0 is TOS which // is an object - this is not generally the case so this should be used with // care. @@ -313,7 +285,7 @@ } -void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { // Register state for CallFunctionStub (from code-stubs-arm64.cc). // ----------- S t a t e ------------- // -- x1 : function @@ -322,18 +294,7 @@ } -void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) { - // Register state for CallFunctionStub (from code-stubs-arm64.cc). - // ----------- S t a t e ------------- - // -- x1 : function - // -- x2 : feedback array - // -- x3 : slot in feedback array - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10); -} - - -void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { // Calling convention for CallConstructStub (from code-stubs-arm64.cc). // ----------- S t a t e ------------- // -- x0 : number of arguments (not smi) @@ -343,7 +304,8 @@ } -void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallConstructStubRecordDebugBreak( + MacroAssembler* masm) { // Calling convention for CallConstructStub (from code-stubs-arm64.cc). // ----------- S t a t e ------------- // -- x0 : number of arguments (not smi) @@ -356,7 +318,7 @@ } -void Debug::GenerateSlot(MacroAssembler* masm) { +void DebugCodegen::GenerateSlot(MacroAssembler* masm) { // Generate enough nop's to make space for a call instruction. Avoid emitting // the constant pool in the debug break slot code. InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions); @@ -368,25 +330,48 @@ } -void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) { // In the places where a debug break slot is inserted no registers can contain // object pointers. Generate_DebugBreakCallHelper(masm, 0, 0, x10); } -void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { - masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64); +void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { + __ Ret(); } -void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { - masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64); +void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { + ExternalReference restarter_frame_function_slot = + ExternalReference::debug_restarter_frame_function_pointer_address( + masm->isolate()); + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + + __ Mov(scratch, restarter_frame_function_slot); + __ Str(xzr, MemOperand(scratch)); + + // We do not know our frame height, but set sp based on fp. + __ Sub(masm->StackPointer(), fp, kPointerSize); + __ AssertStackConsistency(); + + __ Pop(x1, fp, lr); // Function, Frame, Return address. + + // Load context from the function. + __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset)); + + // Get function code. + __ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); + __ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset)); + __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag); + + // Re-run JSFunction, x1 is function, cp is context. + __ Br(scratch); } -const bool Debug::kFrameDropperSupported = false; -#endif // ENABLE_DEBUGGER_SUPPORT +const bool LiveEdit::kFrameDropperSupported = true; } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/arm64/decoder-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/decoder-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/decoder-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/decoder-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "globals.h" -#include "utils.h" -#include "arm64/decoder-arm64.h" +#include "src/arm64/decoder-arm64.h" +#include "src/globals.h" +#include "src/utils.h" namespace v8 { @@ -62,7 +39,7 @@ } // We reached the end of the list. The last element must be // registered_visitor. - ASSERT(*it == registered_visitor); + DCHECK(*it == registered_visitor); visitors_.insert(it, new_visitor); } @@ -80,7 +57,7 @@ } // We reached the end of the list. The last element must be // registered_visitor. - ASSERT(*it == registered_visitor); + DCHECK(*it == registered_visitor); visitors_.push_back(new_visitor); } @@ -93,7 +70,7 @@ #define DEFINE_VISITOR_CALLERS(A) \ void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \ if (!(instr->Mask(A##FMask) == A##Fixed)) { \ - ASSERT(instr->Mask(A##FMask) == A##Fixed); \ + DCHECK(instr->Mask(A##FMask) == A##Fixed); \ } \ std::list<DecoderVisitor*>::iterator it; \ for (it = visitors_.begin(); it != visitors_.end(); it++) { \ diff -Nru nodejs-0.11.13/deps/v8/src/arm64/decoder-arm64.h nodejs-0.11.15/deps/v8/src/arm64/decoder-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/decoder-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/decoder-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_DECODER_ARM64_H_ #define V8_ARM64_DECODER_ARM64_H_ #include <list> -#include "globals.h" -#include "arm64/instructions-arm64.h" +#include "src/arm64/instructions-arm64.h" +#include "src/globals.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/arm64/decoder-arm64-inl.h nodejs-0.11.15/deps/v8/src/arm64/decoder-arm64-inl.h --- nodejs-0.11.13/deps/v8/src/arm64/decoder-arm64-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/decoder-arm64-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2014 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_DECODER_ARM64_INL_H_ #define V8_ARM64_DECODER_ARM64_INL_H_ -#include "arm64/decoder-arm64.h" -#include "globals.h" -#include "utils.h" +#include "src/arm64/decoder-arm64.h" +#include "src/globals.h" +#include "src/utils.h" namespace v8 { @@ -119,17 +96,17 @@ template<typename V> void Decoder<V>::DecodePCRelAddressing(Instruction* instr) { - ASSERT(instr->Bits(27, 24) == 0x0); + DCHECK(instr->Bits(27, 24) == 0x0); // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level // decode. - ASSERT(instr->Bit(28) == 0x1); + DCHECK(instr->Bit(28) == 0x1); V::VisitPCRelAddressing(instr); } template<typename V> void Decoder<V>::DecodeBranchSystemException(Instruction* instr) { - ASSERT((instr->Bits(27, 24) == 0x4) || + DCHECK((instr->Bits(27, 24) == 0x4) || (instr->Bits(27, 24) == 0x5) || (instr->Bits(27, 24) == 0x6) || (instr->Bits(27, 24) == 0x7) ); @@ -231,7 +208,7 @@ template<typename V> void Decoder<V>::DecodeLoadStore(Instruction* instr) { - ASSERT((instr->Bits(27, 24) == 0x8) || + DCHECK((instr->Bits(27, 24) == 0x8) || (instr->Bits(27, 24) == 0x9) || (instr->Bits(27, 24) == 0xC) || (instr->Bits(27, 24) == 0xD) ); @@ -351,7 +328,7 @@ template<typename V> void Decoder<V>::DecodeLogical(Instruction* instr) { - ASSERT(instr->Bits(27, 24) == 0x2); + DCHECK(instr->Bits(27, 24) == 0x2); if (instr->Mask(0x80400000) == 0x00400000) { V::VisitUnallocated(instr); @@ -371,7 +348,7 @@ template<typename V> void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) { - ASSERT(instr->Bits(27, 24) == 0x3); + DCHECK(instr->Bits(27, 24) == 0x3); if ((instr->Mask(0x80400000) == 0x80000000) || (instr->Mask(0x80400000) == 0x00400000) || @@ -397,7 +374,7 @@ template<typename V> void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) { - ASSERT(instr->Bits(27, 24) == 0x1); + DCHECK(instr->Bits(27, 24) == 0x1); if (instr->Bit(23) == 1) { V::VisitUnallocated(instr); } else { @@ -408,7 +385,7 @@ template<typename V> void Decoder<V>::DecodeDataProcessing(Instruction* instr) { - ASSERT((instr->Bits(27, 24) == 0xA) || + DCHECK((instr->Bits(27, 24) == 0xA) || (instr->Bits(27, 24) == 0xB) ); if (instr->Bit(24) == 0) { @@ -524,7 +501,7 @@ template<typename V> void Decoder<V>::DecodeFP(Instruction* instr) { - ASSERT((instr->Bits(27, 24) == 0xE) || + DCHECK((instr->Bits(27, 24) == 0xE) || (instr->Bits(27, 24) == 0xF) ); if (instr->Bit(28) == 0) { @@ -637,7 +614,7 @@ } } else { // Bit 30 == 1 has been handled earlier. - ASSERT(instr->Bit(30) == 0); + DCHECK(instr->Bit(30) == 0); if (instr->Mask(0xA0800000) != 0) { V::VisitUnallocated(instr); } else { @@ -653,7 +630,7 @@ template<typename V> void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) { // TODO(all): Implement Advanced SIMD load/store instruction decode. - ASSERT(instr->Bits(29, 25) == 0x6); + DCHECK(instr->Bits(29, 25) == 0x6); V::VisitUnimplemented(instr); } @@ -661,7 +638,7 @@ template<typename V> void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) { // TODO(all): Implement Advanced SIMD data processing instruction decode. - ASSERT(instr->Bits(27, 25) == 0x7); + DCHECK(instr->Bits(27, 25) == 0x7); V::VisitUnimplemented(instr); } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/delayed-masm-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/delayed-masm-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/delayed-masm-arm64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/delayed-masm-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,198 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_ARM64 + +#include "src/arm64/delayed-masm-arm64.h" +#include "src/arm64/lithium-codegen-arm64.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm_) + + +void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) { + DCHECK(src->IsStackSlot()); + DCHECK(dst->IsStackSlot()); + MemOperand src_operand = cgen_->ToMemOperand(src); + MemOperand dst_operand = cgen_->ToMemOperand(dst); + if (pending_ == kStackSlotMove) { + DCHECK(pending_pc_ == masm_->pc_offset()); + UseScratchRegisterScope scope(masm_); + DoubleRegister temp1 = scope.AcquireD(); + DoubleRegister temp2 = scope.AcquireD(); + switch (MemOperand::AreConsistentForPair(pending_address_src_, + src_operand)) { + case MemOperand::kNotPair: + __ Ldr(temp1, pending_address_src_); + __ Ldr(temp2, src_operand); + break; + case MemOperand::kPairAB: + __ Ldp(temp1, temp2, pending_address_src_); + break; + case MemOperand::kPairBA: + __ Ldp(temp2, temp1, src_operand); + break; + } + switch (MemOperand::AreConsistentForPair(pending_address_dst_, + dst_operand)) { + case MemOperand::kNotPair: + __ Str(temp1, pending_address_dst_); + __ Str(temp2, dst_operand); + break; + case MemOperand::kPairAB: + __ Stp(temp1, temp2, pending_address_dst_); + break; + case MemOperand::kPairBA: + __ Stp(temp2, temp1, dst_operand); + break; + } + ResetPending(); + return; + } + + EmitPending(); + pending_ = kStackSlotMove; + pending_address_src_ = src_operand; + pending_address_dst_ = dst_operand; +#ifdef DEBUG + pending_pc_ = masm_->pc_offset(); +#endif +} + + +void DelayedMasm::StoreConstant(uint64_t value, const MemOperand& operand) { + DCHECK(!scratch_register_acquired_); + if ((pending_ == kStoreConstant) && (value == pending_value_)) { + MemOperand::PairResult result = + MemOperand::AreConsistentForPair(pending_address_dst_, operand); + if (result != MemOperand::kNotPair) { + const MemOperand& dst = + (result == MemOperand::kPairAB) ? + pending_address_dst_ : + operand; + DCHECK(pending_pc_ == masm_->pc_offset()); + if (pending_value_ == 0) { + __ Stp(xzr, xzr, dst); + } else { + SetSavedValue(pending_value_); + __ Stp(ScratchRegister(), ScratchRegister(), dst); + } + ResetPending(); + return; + } + } + + EmitPending(); + pending_ = kStoreConstant; + pending_address_dst_ = operand; + pending_value_ = value; +#ifdef DEBUG + pending_pc_ = masm_->pc_offset(); +#endif +} + + +void DelayedMasm::Load(const CPURegister& rd, const MemOperand& operand) { + if ((pending_ == kLoad) && + pending_register_.IsSameSizeAndType(rd)) { + switch (MemOperand::AreConsistentForPair(pending_address_src_, operand)) { + case MemOperand::kNotPair: + break; + case MemOperand::kPairAB: + DCHECK(pending_pc_ == masm_->pc_offset()); + DCHECK(!IsScratchRegister(pending_register_) || + scratch_register_acquired_); + DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_); + __ Ldp(pending_register_, rd, pending_address_src_); + ResetPending(); + return; + case MemOperand::kPairBA: + DCHECK(pending_pc_ == masm_->pc_offset()); + DCHECK(!IsScratchRegister(pending_register_) || + scratch_register_acquired_); + DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_); + __ Ldp(rd, pending_register_, operand); + ResetPending(); + return; + } + } + + EmitPending(); + pending_ = kLoad; + pending_register_ = rd; + pending_address_src_ = operand; +#ifdef DEBUG + pending_pc_ = masm_->pc_offset(); +#endif +} + + +void DelayedMasm::Store(const CPURegister& rd, const MemOperand& operand) { + if ((pending_ == kStore) && + pending_register_.IsSameSizeAndType(rd)) { + switch (MemOperand::AreConsistentForPair(pending_address_dst_, operand)) { + case MemOperand::kNotPair: + break; + case MemOperand::kPairAB: + DCHECK(pending_pc_ == masm_->pc_offset()); + __ Stp(pending_register_, rd, pending_address_dst_); + ResetPending(); + return; + case MemOperand::kPairBA: + DCHECK(pending_pc_ == masm_->pc_offset()); + __ Stp(rd, pending_register_, operand); + ResetPending(); + return; + } + } + + EmitPending(); + pending_ = kStore; + pending_register_ = rd; + pending_address_dst_ = operand; +#ifdef DEBUG + pending_pc_ = masm_->pc_offset(); +#endif +} + + +void DelayedMasm::EmitPending() { + DCHECK((pending_ == kNone) || (pending_pc_ == masm_->pc_offset())); + switch (pending_) { + case kNone: + return; + case kStoreConstant: + if (pending_value_ == 0) { + __ Str(xzr, pending_address_dst_); + } else { + SetSavedValue(pending_value_); + __ Str(ScratchRegister(), pending_address_dst_); + } + break; + case kLoad: + DCHECK(!IsScratchRegister(pending_register_) || + scratch_register_acquired_); + __ Ldr(pending_register_, pending_address_src_); + break; + case kStore: + __ Str(pending_register_, pending_address_dst_); + break; + case kStackSlotMove: { + UseScratchRegisterScope scope(masm_); + DoubleRegister temp = scope.AcquireD(); + __ Ldr(temp, pending_address_src_); + __ Str(temp, pending_address_dst_); + break; + } + } + ResetPending(); +} + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM64 diff -Nru nodejs-0.11.13/deps/v8/src/arm64/delayed-masm-arm64.h nodejs-0.11.15/deps/v8/src/arm64/delayed-masm-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/delayed-masm-arm64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/delayed-masm-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,164 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_ARM64_DELAYED_MASM_ARM64_H_ +#define V8_ARM64_DELAYED_MASM_ARM64_H_ + +#include "src/lithium.h" + +namespace v8 { +namespace internal { + +class LCodeGen; + +// This class delays the generation of some instructions. This way, we have a +// chance to merge two instructions in one (with load/store pair). +// Each instruction must either: +// - merge with the pending instruction and generate just one instruction. +// - emit the pending instruction and then generate the instruction (or set the +// pending instruction). +class DelayedMasm BASE_EMBEDDED { + public: + DelayedMasm(LCodeGen* owner, + MacroAssembler* masm, + const Register& scratch_register) + : cgen_(owner), masm_(masm), scratch_register_(scratch_register), + scratch_register_used_(false), pending_(kNone), saved_value_(0) { +#ifdef DEBUG + pending_register_ = no_reg; + pending_value_ = 0; + pending_pc_ = 0; + scratch_register_acquired_ = false; +#endif + } + ~DelayedMasm() { + DCHECK(!scratch_register_acquired_); + DCHECK(!scratch_register_used_); + DCHECK(!pending()); + } + inline void EndDelayedUse(); + + const Register& ScratchRegister() { + scratch_register_used_ = true; + return scratch_register_; + } + bool IsScratchRegister(const CPURegister& reg) { + return reg.Is(scratch_register_); + } + bool scratch_register_used() const { return scratch_register_used_; } + void reset_scratch_register_used() { scratch_register_used_ = false; } + // Acquire/Release scratch register for use outside this class. + void AcquireScratchRegister() { + EmitPending(); + ResetSavedValue(); +#ifdef DEBUG + DCHECK(!scratch_register_acquired_); + scratch_register_acquired_ = true; +#endif + } + void ReleaseScratchRegister() { +#ifdef DEBUG + DCHECK(scratch_register_acquired_); + scratch_register_acquired_ = false; +#endif + } + bool pending() { return pending_ != kNone; } + + // Extra layer over the macro-assembler instructions (which emits the + // potential pending instruction). + inline void Mov(const Register& rd, + const Operand& operand, + DiscardMoveMode discard_mode = kDontDiscardForSameWReg); + inline void Fmov(FPRegister fd, FPRegister fn); + inline void Fmov(FPRegister fd, double imm); + inline void LoadObject(Register result, Handle<Object> object); + // Instructions which try to merge which the pending instructions. + void StackSlotMove(LOperand* src, LOperand* dst); + // StoreConstant can only be used if the scratch register is not acquired. + void StoreConstant(uint64_t value, const MemOperand& operand); + void Load(const CPURegister& rd, const MemOperand& operand); + void Store(const CPURegister& rd, const MemOperand& operand); + // Emit the potential pending instruction. + void EmitPending(); + // Reset the pending state. + void ResetPending() { + pending_ = kNone; +#ifdef DEBUG + pending_register_ = no_reg; + MemOperand tmp; + pending_address_src_ = tmp; + pending_address_dst_ = tmp; + pending_value_ = 0; + pending_pc_ = 0; +#endif + } + void InitializeRootRegister() { + masm_->InitializeRootRegister(); + } + + private: + // Set the saved value and load the ScratchRegister with it. + void SetSavedValue(uint64_t saved_value) { + DCHECK(saved_value != 0); + if (saved_value_ != saved_value) { + masm_->Mov(ScratchRegister(), saved_value); + saved_value_ = saved_value; + } + } + // Reset the saved value (i.e. the value of ScratchRegister is no longer + // known). + void ResetSavedValue() { + saved_value_ = 0; + } + + LCodeGen* cgen_; + MacroAssembler* masm_; + + // Register used to store a constant. + Register scratch_register_; + bool scratch_register_used_; + + // Sometimes we store or load two values in two contiguous stack slots. + // In this case, we try to use the ldp/stp instructions to reduce code size. + // To be able to do that, instead of generating directly the instructions, + // we register with the following fields that an instruction needs to be + // generated. Then with the next instruction, if the instruction is + // consistent with the pending one for stp/ldp we generate ldp/stp. Else, + // if they are not consistent, we generate the pending instruction and we + // register the new instruction (which becomes pending). + + // Enumeration of instructions which can be pending. + enum Pending { + kNone, + kStoreConstant, + kLoad, kStore, + kStackSlotMove + }; + // The pending instruction. + Pending pending_; + // For kLoad, kStore: register which must be loaded/stored. + CPURegister pending_register_; + // For kLoad, kStackSlotMove: address of the load. + MemOperand pending_address_src_; + // For kStoreConstant, kStore, kStackSlotMove: address of the store. + MemOperand pending_address_dst_; + // For kStoreConstant: value to be stored. + uint64_t pending_value_; + // Value held into the ScratchRegister if the saved_value_ is not 0. + // For 0, we use xzr. + uint64_t saved_value_; +#ifdef DEBUG + // Address where the pending instruction must be generated. It's only used to + // check that nothing else has been generated since we set the pending + // instruction. + int pending_pc_; + // If true, the scratch register has been acquired outside this class. The + // scratch register can no longer be used for constants. + bool scratch_register_acquired_; +#endif +}; + +} } // namespace v8::internal + +#endif // V8_ARM64_DELAYED_MASM_ARM64_H_ diff -Nru nodejs-0.11.13/deps/v8/src/arm64/delayed-masm-arm64-inl.h nodejs-0.11.15/deps/v8/src/arm64/delayed-masm-arm64-inl.h --- nodejs-0.11.13/deps/v8/src/arm64/delayed-masm-arm64-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/delayed-masm-arm64-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,55 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_ARM64_DELAYED_MASM_ARM64_INL_H_ +#define V8_ARM64_DELAYED_MASM_ARM64_INL_H_ + +#include "src/arm64/delayed-masm-arm64.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm_) + + +void DelayedMasm::EndDelayedUse() { + EmitPending(); + DCHECK(!scratch_register_acquired_); + ResetSavedValue(); +} + + +void DelayedMasm::Mov(const Register& rd, + const Operand& operand, + DiscardMoveMode discard_mode) { + EmitPending(); + DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_); + __ Mov(rd, operand, discard_mode); +} + + +void DelayedMasm::Fmov(FPRegister fd, FPRegister fn) { + EmitPending(); + __ Fmov(fd, fn); +} + + +void DelayedMasm::Fmov(FPRegister fd, double imm) { + EmitPending(); + __ Fmov(fd, imm); +} + + +void DelayedMasm::LoadObject(Register result, Handle<Object> object) { + EmitPending(); + DCHECK(!IsScratchRegister(result) || scratch_register_acquired_); + __ LoadObject(result, object); +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_ARM64_DELAYED_MASM_ARM64_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/arm64/deoptimizer-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/deoptimizer-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/deoptimizer-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/deoptimizer-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "codegen.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "safepoint-table.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/safepoint-table.h" namespace v8 { @@ -55,9 +32,6 @@ DeoptimizationInputData* deopt_data = DeoptimizationInputData::cast(code->deoptimization_data()); - SharedFunctionInfo* shared = - SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()); - shared->EvictFromOptimizedCodeMap(code, "deoptimized code"); Address code_start_address = code->instruction_start(); #ifdef DEBUG Address prev_call_address = NULL; @@ -71,13 +45,13 @@ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); PatchingAssembler patcher(call_address, patch_size() / kInstructionSize); - patcher.LoadLiteral(ip0, 2 * kInstructionSize); + patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2); patcher.blr(ip0); patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry)); - ASSERT((prev_call_address == NULL) || + DCHECK((prev_call_address == NULL) || (call_address >= prev_call_address + patch_size())); - ASSERT(call_address + patch_size() <= code->instruction_end()); + DCHECK(call_address + patch_size() <= code->instruction_end()); #ifdef DEBUG prev_call_address = call_address; #endif @@ -116,7 +90,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { - ApiFunction function(descriptor->deoptimization_handler_); + ApiFunction function(descriptor->deoptimization_handler()); ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); int params = descriptor->GetHandlerParameterCount(); @@ -133,10 +107,6 @@ } -Code* Deoptimizer::NotifyStubFailureBuiltin() { - return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); -} - #define __ masm()-> @@ -202,7 +172,6 @@ // Copy core registers into the input frame. CPURegList copy_to_input = saved_registers; for (int i = 0; i < saved_registers.Count(); i++) { - // TODO(all): Look for opportunities to optimize this by using ldp/stp. __ Peek(x2, i * kPointerSize); CPURegister current_reg = copy_to_input.PopLowestIndex(); int offset = (current_reg.code() * kPointerSize) + @@ -212,7 +181,6 @@ // Copy FP registers to the input frame. for (int i = 0; i < saved_fp_registers.Count(); i++) { - // TODO(all): Look for opportunities to optimize this by using ldp/stp. int dst_offset = FrameDescription::double_registers_offset() + (i * kDoubleSize); int src_offset = kFPRegistersOffset + (i * kDoubleSize); @@ -282,7 +250,7 @@ __ B(lt, &outer_push_loop); __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset())); - ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) && + DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) && !saved_fp_registers.IncludesAliasOf(fp_zero) && !saved_fp_registers.IncludesAliasOf(fp_scratch)); int src_offset = FrameDescription::double_registers_offset(); @@ -309,7 +277,7 @@ // Note that lr is not in the list of saved_registers and will be restored // later. We can use it to hold the address of last output frame while // reloading the other registers. - ASSERT(!saved_registers.IncludesAliasOf(lr)); + DCHECK(!saved_registers.IncludesAliasOf(lr)); Register last_output_frame = lr; __ Mov(last_output_frame, current_frame); @@ -352,14 +320,14 @@ // The number of entry will never exceed kMaxNumberOfEntries. // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use // a movz instruction to load the entry id. - ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries)); + DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries)); for (int i = 0; i < count(); i++) { int start = masm()->pc_offset(); USE(start); __ movz(entry_id, i); __ b(&done); - ASSERT(masm()->pc_offset() - start == table_entry_size_); + DCHECK(masm()->pc_offset() - start == table_entry_size_); } } __ Bind(&done); diff -Nru nodejs-0.11.13/deps/v8/src/arm64/disasm-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/disasm-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/disasm-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/disasm-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,21 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <assert.h> -#include <stdio.h> #include <stdarg.h> +#include <stdio.h> #include <string.h> -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "disasm.h" -#include "arm64/decoder-arm64-inl.h" -#include "arm64/disasm-arm64.h" -#include "macro-assembler.h" -#include "platform.h" +#include "src/arm64/decoder-arm64-inl.h" +#include "src/arm64/disasm-arm64.h" +#include "src/base/platform/platform.h" +#include "src/disasm.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { @@ -281,7 +258,7 @@ bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) { - ASSERT((reg_size == kXRegSizeInBits) || + DCHECK((reg_size == kXRegSizeInBits) || ((reg_size == kWRegSizeInBits) && (value <= 0xffffffff))); // Test for movz: 16-bits set at positions 0, 16, 32 or 48. @@ -1199,7 +1176,7 @@ } } } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { - ASSERT(instr->Mask(SystemHintMask) == HINT); + DCHECK(instr->Mask(SystemHintMask) == HINT); switch (instr->ImmHint()) { case NOP: { mnemonic = "nop"; @@ -1269,7 +1246,7 @@ const char* format) { // TODO(mcapewel) don't think I can use the instr address here - there needs // to be a base address too - ASSERT(mnemonic != NULL); + DCHECK(mnemonic != NULL); ResetOutput(); Substitute(instr, mnemonic); if (format != NULL) { @@ -1387,7 +1364,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr, const char* format) { - ASSERT(format[0] == 'I'); + DCHECK(format[0] == 'I'); switch (format[1]) { case 'M': { // IMoveImm or IMoveLSL. @@ -1395,7 +1372,7 @@ uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide()); AppendToOutput("#0x%" PRIx64, imm); } else { - ASSERT(format[5] == 'L'); + DCHECK(format[5] == 'L'); AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide()); if (instr->ShiftMoveWide() > 0) { AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide()); @@ -1407,7 +1384,7 @@ switch (format[2]) { case 'L': { // ILLiteral - Immediate Load Literal. AppendToOutput("pc%+" PRId64, - instr->ImmLLiteral() << kLiteralEntrySizeLog2); + instr->ImmLLiteral() << kLoadLiteralScaleLog2); return 9; } case 'S': { // ILS - Immediate Load/Store. @@ -1440,7 +1417,7 @@ return 6; } case 'A': { // IAddSub. - ASSERT(instr->ShiftAddSub() <= 1); + DCHECK(instr->ShiftAddSub() <= 1); int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub()); AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm); return 7; @@ -1497,7 +1474,7 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr, const char* format) { - ASSERT((format[0] == 'I') && (format[1] == 'B')); + DCHECK((format[0] == 'I') && (format[1] == 'B')); unsigned r = instr->ImmR(); unsigned s = instr->ImmS(); @@ -1511,13 +1488,13 @@ AppendToOutput("#%d", s + 1); return 5; } else { - ASSERT(format[3] == '-'); + DCHECK(format[3] == '-'); AppendToOutput("#%d", s - r + 1); return 7; } } case 'Z': { // IBZ-r. - ASSERT((format[3] == '-') && (format[4] == 'r')); + DCHECK((format[3] == '-') && (format[4] == 'r')); unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits : kWRegSizeInBits; AppendToOutput("#%d", reg_size - r); @@ -1533,7 +1510,7 @@ int Disassembler::SubstituteLiteralField(Instruction* instr, const char* format) { - ASSERT(strncmp(format, "LValue", 6) == 0); + DCHECK(strncmp(format, "LValue", 6) == 0); USE(format); switch (instr->Mask(LoadLiteralMask)) { @@ -1549,12 +1526,12 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) { - ASSERT(format[0] == 'H'); - ASSERT(instr->ShiftDP() <= 0x3); + DCHECK(format[0] == 'H'); + DCHECK(instr->ShiftDP() <= 0x3); switch (format[1]) { case 'D': { // HDP. - ASSERT(instr->ShiftDP() != ROR); + DCHECK(instr->ShiftDP() != ROR); } // Fall through. case 'L': { // HLo. if (instr->ImmDPShift() != 0) { @@ -1573,7 +1550,7 @@ int Disassembler::SubstituteConditionField(Instruction* instr, const char* format) { - ASSERT(format[0] == 'C'); + DCHECK(format[0] == 'C'); const char* condition_code[] = { "eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc", "hi", "ls", "ge", "lt", @@ -1582,7 +1559,7 @@ switch (format[1]) { case 'B': cond = instr->ConditionBranch(); break; case 'I': { - cond = InvertCondition(static_cast<Condition>(instr->Condition())); + cond = NegateCondition(static_cast<Condition>(instr->Condition())); break; } default: cond = instr->Condition(); @@ -1595,12 +1572,12 @@ int Disassembler::SubstitutePCRelAddressField(Instruction* instr, const char* format) { USE(format); - ASSERT(strncmp(format, "AddrPCRel", 9) == 0); + DCHECK(strncmp(format, "AddrPCRel", 9) == 0); int offset = instr->ImmPCRel(); // Only ADR (AddrPCRelByte) is supported. - ASSERT(strcmp(format, "AddrPCRelByte") == 0); + DCHECK(strcmp(format, "AddrPCRelByte") == 0); char sign = '+'; if (offset < 0) { @@ -1615,7 +1592,7 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr, const char* format) { - ASSERT(strncmp(format, "BImm", 4) == 0); + DCHECK(strncmp(format, "BImm", 4) == 0); int64_t offset = 0; switch (format[5]) { @@ -1632,10 +1609,9 @@ offset <<= kInstructionSizeLog2; char sign = '+'; if (offset < 0) { - offset = -offset; sign = '-'; } - AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, offset, + AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, Abs(offset), instr->InstructionAtOffset(offset), Instruction::NO_CHECK); return 8; } @@ -1643,8 +1619,8 @@ int Disassembler::SubstituteExtendField(Instruction* instr, const char* format) { - ASSERT(strncmp(format, "Ext", 3) == 0); - ASSERT(instr->ExtendMode() <= 7); + DCHECK(strncmp(format, "Ext", 3) == 0); + DCHECK(instr->ExtendMode() <= 7); USE(format); const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx", @@ -1670,7 +1646,7 @@ int Disassembler::SubstituteLSRegOffsetField(Instruction* instr, const char* format) { - ASSERT(strncmp(format, "Offsetreg", 9) == 0); + DCHECK(strncmp(format, "Offsetreg", 9) == 0); const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl", "undefined", "undefined", "sxtw", "sxtx" }; USE(format); @@ -1699,7 +1675,7 @@ int Disassembler::SubstitutePrefetchField(Instruction* instr, const char* format) { - ASSERT(format[0] == 'P'); + DCHECK(format[0] == 'P'); USE(format); int prefetch_mode = instr->PrefetchMode(); @@ -1714,7 +1690,7 @@ int Disassembler::SubstituteBarrierField(Instruction* instr, const char* format) { - ASSERT(format[0] == 'M'); + DCHECK(format[0] == 'M'); USE(format); static const char* options[4][4] = { @@ -1758,7 +1734,7 @@ const char* NameConverter::NameOfAddress(byte* addr) const { - v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr); + v8::internal::SNPrintF(tmp_buffer_, "%p", addr); return tmp_buffer_.start(); } @@ -1776,7 +1752,7 @@ if (ureg == v8::internal::kZeroRegCode) { return "xzr"; } - v8::internal::OS::SNPrintF(tmp_buffer_, "x%u", ureg); + v8::internal::SNPrintF(tmp_buffer_, "x%u", ureg); return tmp_buffer_.start(); } @@ -1810,7 +1786,7 @@ ~BufferDisassembler() { } virtual void ProcessOutput(v8::internal::Instruction* instr) { - v8::internal::OS::SNPrintF(out_buffer_, "%s", GetOutput()); + v8::internal::SNPrintF(out_buffer_, "%s", GetOutput()); } private: @@ -1821,7 +1797,7 @@ : converter_(converter) {} -Disassembler::~Disassembler() {} +Disassembler::~Disassembler() { USE(converter_); } int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer, diff -Nru nodejs-0.11.13/deps/v8/src/arm64/disasm-arm64.h nodejs-0.11.15/deps/v8/src/arm64/disasm-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/disasm-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/disasm-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,16 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_DISASM_ARM64_H #define V8_ARM64_DISASM_ARM64_H -#include "v8.h" +#include "src/v8.h" -#include "globals.h" -#include "utils.h" -#include "instructions-arm64.h" -#include "decoder-arm64.h" +#include "src/arm64/decoder-arm64.h" +#include "src/arm64/instructions-arm64.h" +#include "src/globals.h" +#include "src/utils.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/arm64/frames-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/frames-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/frames-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/frames-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "assembler.h" -#include "assembler-arm64.h" -#include "assembler-arm64-inl.h" -#include "frames.h" +#include "src/arm64/assembler-arm64-inl.h" +#include "src/arm64/assembler-arm64.h" +#include "src/assembler.h" +#include "src/frames.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/arm64/frames-arm64.h nodejs-0.11.15/deps/v8/src/arm64/frames-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/frames-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/frames-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,9 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "arm64/constants-arm64.h" -#include "arm64/assembler-arm64.h" +#include "src/arm64/assembler-arm64.h" +#include "src/arm64/constants-arm64.h" #ifndef V8_ARM64_FRAMES_ARM64_H_ #define V8_ARM64_FRAMES_ARM64_H_ @@ -38,7 +15,6 @@ // Registers x0-x17 are caller-saved. const int kNumJSCallerSaved = 18; const RegList kJSCallerSaved = 0x3ffff; -typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved]; // Number of registers for which space is reserved in safepoints. Must be a // multiple of eight. diff -Nru nodejs-0.11.13/deps/v8/src/arm64/full-codegen-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/full-codegen-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/full-codegen-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/full-codegen-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,23 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "code-stubs.h" -#include "codegen.h" -#include "compiler.h" -#include "debug.h" -#include "full-codegen.h" -#include "isolate-inl.h" -#include "parser.h" -#include "scopes.h" -#include "stub-cache.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/full-codegen.h" +#include "src/isolate-inl.h" +#include "src/parser.h" +#include "src/scopes.h" +#include "src/stub-cache.h" -#include "arm64/code-stubs-arm64.h" -#include "arm64/macro-assembler-arm64.h" +#include "src/arm64/code-stubs-arm64.h" +#include "src/arm64/macro-assembler-arm64.h" namespace v8 { namespace internal { @@ -57,18 +34,18 @@ ~JumpPatchSite() { if (patch_site_.is_bound()) { - ASSERT(info_emitted_); + DCHECK(info_emitted_); } else { - ASSERT(reg_.IsNone()); + DCHECK(reg_.IsNone()); } } void EmitJumpIfNotSmi(Register reg, Label* target) { // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc. InstructionAccurateScope scope(masm_, 1); - ASSERT(!info_emitted_); - ASSERT(reg.Is64Bits()); - ASSERT(!reg.Is(csp)); + DCHECK(!info_emitted_); + DCHECK(reg.Is64Bits()); + DCHECK(!reg.Is(csp)); reg_ = reg; __ bind(&patch_site_); __ tbz(xzr, 0, target); // Always taken before patched. @@ -77,9 +54,9 @@ void EmitJumpIfSmi(Register reg, Label* target) { // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc. InstructionAccurateScope scope(masm_, 1); - ASSERT(!info_emitted_); - ASSERT(reg.Is64Bits()); - ASSERT(!reg.Is(csp)); + DCHECK(!info_emitted_); + DCHECK(reg.Is64Bits()); + DCHECK(!reg.Is(csp)); reg_ = reg; __ bind(&patch_site_); __ tbnz(xzr, 0, target); // Never taken before patched. @@ -110,25 +87,6 @@ }; -static void EmitStackCheck(MacroAssembler* masm_, - int pointers = 0, - Register scratch = jssp) { - Isolate* isolate = masm_->isolate(); - Label ok; - ASSERT(jssp.Is(__ StackPointer())); - ASSERT(scratch.Is(jssp) == (pointers == 0)); - if (pointers != 0) { - __ Sub(scratch, jssp, pointers * kPointerSize); - } - __ CompareRoot(scratch, Heap::kStackLimitRootIndex); - __ B(hs, &ok); - PredictableCodeSizeScope predictable(masm_, - Assembler::kCallSizeWithRelocation); - __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET); - __ Bind(&ok); -} - - // Generate code for a JS function. On entry to the function the receiver // and arguments have been pushed on the stack left to right. The actual // argument count matches the formal parameter count expected by the @@ -148,8 +106,6 @@ handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); - InitializeFeedbackVector(); - profiling_counter_ = isolate()->factory()->NewCell( Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); @@ -174,7 +130,7 @@ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok); __ Ldr(x10, GlobalObjectMemOperand()); - __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); + __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset)); __ Poke(x10, receiver_offset); __ Bind(&ok); @@ -191,18 +147,24 @@ // Push(lr, fp, cp, x1); // Add(fp, jssp, 2 * kPointerSize); info->set_prologue_offset(masm_->pc_offset()); - __ Prologue(BUILD_FUNCTION_FRAME); + __ Prologue(info->IsCodePreAgingActive()); info->AddNoFrameRange(0, masm_->pc_offset()); // Reserve space on the stack for locals. { Comment cmnt(masm_, "[ Allocate locals"); int locals_count = info->scope()->num_stack_slots(); // Generators allocate locals, if any, in context slots. - ASSERT(!info->function()->is_generator() || locals_count == 0); + DCHECK(!info->function()->is_generator() || locals_count == 0); if (locals_count > 0) { if (locals_count >= 128) { - EmitStackCheck(masm_, locals_count, x10); + Label ok; + DCHECK(jssp.Is(__ StackPointer())); + __ Sub(x10, jssp, locals_count * kPointerSize); + __ CompareRoot(x10, Heap::kRealStackLimitRootIndex); + __ B(hs, &ok); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ Bind(&ok); } __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); if (FLAG_optimize_for_size) { @@ -232,16 +194,19 @@ if (heap_slots > 0) { // Argument to NewContext is the function, which is still in x1. Comment cmnt(masm_, "[ Allocate context"); + bool need_write_barrier = true; if (FLAG_harmony_scoping && info->scope()->is_global_scope()) { __ Mov(x10, Operand(info->scope()->GetScopeInfo())); __ Push(x1, x10); - __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2); + __ CallRuntime(Runtime::kNewGlobalContext, 2); } else if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; } else { __ Push(x1); - __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); } function_in_register_x1 = false; // Context is returned in x0. It replaces the context passed to us. @@ -262,8 +227,15 @@ __ Str(x10, target); // Update the write barrier. - __ RecordWriteContextSlot( - cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs); + if (need_write_barrier) { + __ RecordWriteContextSlot( + cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(cp, &done); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } } } } @@ -297,7 +269,7 @@ } else { type = ArgumentsAccessStub::NEW_SLOPPY_FAST; } - ArgumentsAccessStub stub(type); + ArgumentsAccessStub stub(isolate(), type); __ CallStub(&stub); SetVar(arguments, x0, x1, x2); @@ -319,9 +291,9 @@ { Comment cmnt(masm_, "[ Declarations"); if (scope()->is_function_scope() && scope()->function() != NULL) { VariableDeclaration* function = scope()->function(); - ASSERT(function->proxy()->var()->mode() == CONST || + DCHECK(function->proxy()->var()->mode() == CONST || function->proxy()->var()->mode() == CONST_LEGACY); - ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED); + DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED); VisitVariableDeclaration(function); } VisitDeclarations(scope()->declarations()); @@ -330,13 +302,20 @@ { Comment cmnt(masm_, "[ Stack check"); PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS); - EmitStackCheck(masm_); + Label ok; + DCHECK(jssp.Is(__ StackPointer())); + __ CompareRoot(jssp, Heap::kStackLimitRootIndex); + __ B(hs, &ok); + PredictableCodeSizeScope predictable(masm_, + Assembler::kCallSizeWithRelocation); + __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET); + __ Bind(&ok); } { Comment cmnt(masm_, "[ Body"); - ASSERT(loop_depth() == 0); + DCHECK(loop_depth() == 0); VisitStatements(function()->body()); - ASSERT(loop_depth() == 0); + DCHECK(loop_depth() == 0); } // Always emit a 'return undefined' in case control fell off the end of @@ -368,7 +347,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() { int reset_value = FLAG_interrupt_budget; - if (isolate()->IsDebuggerActive()) { + if (info_->is_debug()) { // Detect debug break requests as soon as possible. reset_value = FLAG_interrupt_budget >> 4; } @@ -380,14 +359,19 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, Label* back_edge_target) { - ASSERT(jssp.Is(__ StackPointer())); + DCHECK(jssp.Is(__ StackPointer())); Comment cmnt(masm_, "[ Back edge bookkeeping"); // Block literal pools whilst emitting back edge code. Assembler::BlockPoolsScope block_const_pool(masm_); Label ok; - ASSERT(back_edge_target->is_bound()); - int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); + DCHECK(back_edge_target->is_bound()); + // We want to do a round rather than a floor of distance/kCodeSizeMultiplier + // to reduce the absolute error due to the integer division. To do that, + // we add kCodeSizeMultiplier/2 to the distance (equivalent to adding 0.5 to + // the result). + int distance = + masm_->SizeOfCodeGeneratedSince(back_edge_target) + kCodeSizeMultiplier / 2; int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier)); EmitProfilingCounterDecrement(weight); @@ -423,14 +407,14 @@ // Runtime::TraceExit returns its parameter in x0. __ Push(result_register()); __ CallRuntime(Runtime::kTraceExit, 1); - ASSERT(x0.Is(result_register())); + DCHECK(x0.Is(result_register())); } // Pretend that the exit is a backwards jump to the entry. int weight = 1; if (info_->ShouldSelfOptimize()) { weight = FLAG_interrupt_budget / FLAG_self_opt_count; } else { - int distance = masm_->pc_offset(); + int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2; weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier)); } @@ -457,7 +441,7 @@ // of the generated code must be consistent. const Register& current_sp = __ StackPointer(); // Nothing ensures 16 bytes alignment here. - ASSERT(!current_sp.Is(csp)); + DCHECK(!current_sp.Is(csp)); __ mov(current_sp, fp); int no_frame_start = masm_->pc_offset(); __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex)); @@ -465,7 +449,7 @@ // TODO(all): This implementation is overkill as it supports 2**31+1 // arguments, consider how to improve it without creating a security // hole. - __ LoadLiteral(ip0, 3 * kInstructionSize); + __ ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2); __ add(current_sp, current_sp, ip0); __ ret(); __ dc64(kXRegSize * (info_->scope()->num_parameters() + 1)); @@ -476,25 +460,25 @@ void FullCodeGenerator::EffectContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); } void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); codegen()->GetVar(result_register(), var); } void FullCodeGenerator::StackValueContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); codegen()->GetVar(result_register(), var); __ Push(result_register()); } void FullCodeGenerator::TestContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); // For simplicity we always test the accumulator register. codegen()->GetVar(result_register(), var); codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL); @@ -558,7 +542,7 @@ true, true_label_, false_label_); - ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals. + DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals. if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) { if (false_label_ != fall_through_) __ B(false_label_); } else if (lit->IsTrue() || lit->IsJSObject()) { @@ -585,7 +569,7 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); __ Drop(count); } @@ -593,7 +577,7 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug( int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); __ Drop(count); __ Move(result_register(), reg); } @@ -601,7 +585,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); if (count > 1) __ Drop(count - 1); __ Poke(reg, 0); } @@ -609,7 +593,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); // For simplicity we always test the accumulator register. __ Drop(count); __ Mov(result_register(), reg); @@ -620,7 +604,7 @@ void FullCodeGenerator::EffectContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == materialize_false); + DCHECK(materialize_true == materialize_false); __ Bind(materialize_true); } @@ -654,8 +638,8 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == true_label_); - ASSERT(materialize_false == false_label_); + DCHECK(materialize_true == true_label_); + DCHECK(materialize_false == false_label_); } @@ -716,8 +700,8 @@ if (if_false == fall_through) { __ B(cond, if_true); } else if (if_true == fall_through) { - ASSERT(if_false != fall_through); - __ B(InvertCondition(cond), if_false); + DCHECK(if_false != fall_through); + __ B(NegateCondition(cond), if_false); } else { __ B(cond, if_true); __ B(if_false); @@ -739,7 +723,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); if (var->IsContextSlot()) { int context_chain_length = scope()->ContextChainLength(var->scope()); __ LoadContext(scratch, context_chain_length); @@ -761,8 +745,8 @@ Register src, Register scratch0, Register scratch1) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); - ASSERT(!AreAliased(src, scratch0, scratch1)); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(!AreAliased(src, scratch0, scratch1)); MemOperand location = VarOperand(var, scratch0); __ Str(src, location); @@ -805,7 +789,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { // The variable in the declaration always resides in the current function // context. - ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); + DCHECK_EQ(0, scope()->ContextChainLength(variable->scope())); if (generate_debug_code_) { // Check that we're not inside a with or catch context. __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset)); @@ -860,7 +844,7 @@ Comment cmnt(masm_, "[ VariableDeclaration"); __ Mov(x2, Operand(variable->name())); // Declaration nodes are always introduced in one of four modes. - ASSERT(IsDeclaredVariableMode(mode)); + DCHECK(IsDeclaredVariableMode(mode)); PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY : NONE; __ Mov(x1, Smi::FromInt(attr)); @@ -875,7 +859,7 @@ // Pushing 0 (xzr) indicates no initial value. __ Push(cp, x2, x1, xzr); } - __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); break; } } @@ -890,7 +874,7 @@ case Variable::UNALLOCATED: { globals_->Add(variable->name(), zone()); Handle<SharedFunctionInfo> function = - Compiler::BuildFunctionInfo(declaration->fun(), script()); + Compiler::BuildFunctionInfo(declaration->fun(), script(), info_); // Check for stack overflow exception. if (function.is_null()) return SetStackOverflow(); globals_->Add(function, zone()); @@ -931,7 +915,7 @@ __ Push(cp, x2, x1); // Push initial value for function declaration. VisitForStackValue(declaration->fun()); - __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); break; } } @@ -940,8 +924,8 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { Variable* variable = declaration->proxy()->var(); - ASSERT(variable->location() == Variable::CONTEXT); - ASSERT(variable->interface()->IsFrozen()); + DCHECK(variable->location() == Variable::CONTEXT); + DCHECK(variable->interface()->IsFrozen()); Comment cmnt(masm_, "[ ModuleDeclaration"); EmitDebugCheckDeclarationContext(variable); @@ -1006,7 +990,7 @@ __ Mov(flags, Smi::FromInt(DeclareGlobalsFlags())); } __ Push(cp, x11, flags); - __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3); + __ CallRuntime(Runtime::kDeclareGlobals, 3); // Return value is ignored. } @@ -1014,7 +998,7 @@ void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { // Call the runtime to declare the modules. __ Push(descriptions); - __ CallRuntime(Runtime::kHiddenDeclareModules, 1); + __ CallRuntime(Runtime::kDeclareModules, 1); // Return value is ignored. } @@ -1181,11 +1165,9 @@ FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset)); // Set up the four remaining stack slots. - __ Push(x0); // Map. - __ Mov(x0, Smi::FromInt(0)); - // Push enumeration cache, enumeration cache length (as smi) and zero. __ SmiTag(x1); - __ Push(x2, x1, x0); + // Map, enumeration cache, enum cache length, zero (both last as smis). + __ Push(x0, x2, x1, xzr); __ B(&loop); __ Bind(&no_descriptors); @@ -1195,12 +1177,8 @@ // We got a fixed array in register x0. Iterate through that. __ Bind(&fixed_array); - Handle<Object> feedback = Handle<Object>( - Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker), - isolate()); - StoreFeedbackVectorSlot(slot, feedback); __ LoadObject(x1, FeedbackVector()); - __ Mov(x10, Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)); + __ Mov(x10, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot))); __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check. @@ -1208,11 +1186,11 @@ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); // TODO(all): similar check was done already. Can we avoid it here? __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE); - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); __ CzeroX(x1, le); // Zero indicates proxy. - __ Push(x1, x0); // Smi and array - __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset)); - __ Push(x1, xzr); // Fixed array length (as smi) and initial index. + __ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset)); + // Smi and array, fixed array length (as smi) and initial index. + __ Push(x1, x0, x2, xzr); // Generate code for doing the condition check. PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); @@ -1293,26 +1271,8 @@ Iteration loop_statement(this, stmt); increment_loop_depth(); - // var iterator = iterable[@@iterator]() - VisitForAccumulatorValue(stmt->assign_iterator()); - - // As with for-in, skip the loop if the iterator is null or undefined. - Register iterator = x0; - __ JumpIfRoot(iterator, Heap::kUndefinedValueRootIndex, - loop_statement.break_label()); - __ JumpIfRoot(iterator, Heap::kNullValueRootIndex, - loop_statement.break_label()); - - // Convert the iterator to a JS object. - Label convert, done_convert; - __ JumpIfSmi(iterator, &convert); - __ CompareObjectType(iterator, x1, x1, FIRST_SPEC_OBJECT_TYPE); - __ B(ge, &done_convert); - __ Bind(&convert); - __ Push(iterator); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ Bind(&done_convert); - __ Push(iterator); + // var iterator = iterable[Symbol.iterator](); + VisitForEffect(stmt->assign_iterator()); // Loop entry. __ Bind(loop_statement.continue_label()); @@ -1359,7 +1319,9 @@ !pretenure && scope()->is_function_scope() && info->num_literals() == 0) { - FastNewClosureStub stub(info->strict_mode(), info->is_generator()); + FastNewClosureStub stub(isolate(), + info->strict_mode(), + info->is_generator()); __ Mov(x2, Operand(info)); __ CallStub(&stub); } else { @@ -1367,7 +1329,7 @@ __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex); __ Push(cp, x11, x10); - __ CallRuntime(Runtime::kHiddenNewClosure, 3); + __ CallRuntime(Runtime::kNewClosure, 3); } context()->Plug(x0); } @@ -1379,7 +1341,7 @@ } -void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, +void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, TypeofState typeof_state, Label* slow) { Register current = cp; @@ -1422,8 +1384,13 @@ __ Bind(&fast); } - __ Ldr(x0, GlobalObjectMemOperand()); - __ Mov(x2, Operand(var->name())); + __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand()); + __ Mov(LoadIC::NameRegister(), Operand(proxy->var()->name())); + if (FLAG_vector_ics) { + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(proxy->VariableFeedbackSlot())); + } + ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL : CONTEXTUAL; CallLoadIC(mode); @@ -1432,7 +1399,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var, Label* slow) { - ASSERT(var->IsContextSlot()); + DCHECK(var->IsContextSlot()); Register context = cp; Register next = x10; Register temp = x11; @@ -1460,7 +1427,7 @@ } -void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, +void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy, TypeofState typeof_state, Label* slow, Label* done) { @@ -1469,8 +1436,9 @@ // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. + Variable* var = proxy->var(); if (var->mode() == DYNAMIC_GLOBAL) { - EmitLoadGlobalCheckExtensions(var, typeof_state, slow); + EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow); __ B(done); } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); @@ -1483,7 +1451,7 @@ } else { // LET || CONST __ Mov(x0, Operand(var->name())); __ Push(x0); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); + __ CallRuntime(Runtime::kThrowReferenceError, 1); } } __ B(done); @@ -1501,10 +1469,12 @@ switch (var->location()) { case Variable::UNALLOCATED: { Comment cmnt(masm_, "Global variable"); - // Use inline caching. Variable name is passed in x2 and the global - // object (receiver) in x0. - __ Ldr(x0, GlobalObjectMemOperand()); - __ Mov(x2, Operand(var->name())); + __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand()); + __ Mov(LoadIC::NameRegister(), Operand(var->name())); + if (FLAG_vector_ics) { + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(proxy->VariableFeedbackSlot())); + } CallLoadIC(CONTEXTUAL); context()->Plug(x0); break; @@ -1522,7 +1492,7 @@ // always looked up dynamically, i.e. in that case // var->location() == LOOKUP. // always holds. - ASSERT(var->scope() != NULL); + DCHECK(var->scope() != NULL); // Check if the binding really needs an initialization check. The check // can be skipped in the following situation: we have a LET or CONST @@ -1545,8 +1515,8 @@ skip_init_check = false; } else { // Check that we always have valid source position. - ASSERT(var->initializer_position() != RelocInfo::kNoPosition); - ASSERT(proxy->position() != RelocInfo::kNoPosition); + DCHECK(var->initializer_position() != RelocInfo::kNoPosition); + DCHECK(proxy->position() != RelocInfo::kNoPosition); skip_init_check = var->mode() != CONST_LEGACY && var->initializer_position() < proxy->position(); } @@ -1561,11 +1531,11 @@ // binding in harmony mode. __ Mov(x0, Operand(var->name())); __ Push(x0); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); + __ CallRuntime(Runtime::kThrowReferenceError, 1); __ Bind(&done); } else { // Uninitalized const bindings outside of harmony mode are unholed. - ASSERT(var->mode() == CONST_LEGACY); + DCHECK(var->mode() == CONST_LEGACY); __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); __ Bind(&done); } @@ -1581,12 +1551,12 @@ Label done, slow; // Generate code for loading from variables potentially shadowed by // eval-introduced variables. - EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); __ Bind(&slow); Comment cmnt(masm_, "Lookup variable"); __ Mov(x1, Operand(var->name())); __ Push(cp, x1); // Context and name. - __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); __ Bind(&done); context()->Plug(x0); break; @@ -1618,7 +1588,7 @@ __ Mov(x2, Operand(expr->pattern())); __ Mov(x1, Operand(expr->flags())); __ Push(x4, x3, x2, x1); - __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4); + __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); __ Mov(x5, x0); __ Bind(&materialized); @@ -1630,7 +1600,7 @@ __ Bind(&runtime_allocate); __ Mov(x10, Smi::FromInt(size)); __ Push(x5, x10); - __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); __ Pop(x5); __ Bind(&allocated); @@ -1672,13 +1642,13 @@ int properties_count = constant_properties->length() / 2; const int max_cloned_properties = FastCloneShallowObjectStub::kMaximumClonedProperties; - if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() || - flags != ObjectLiteral::kFastElements || + if (expr->may_store_doubles() || expr->depth() > 1 || + masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements || properties_count > max_cloned_properties) { __ Push(x3, x2, x1, x0); - __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); } else { - FastCloneShallowObjectStub stub(properties_count); + FastCloneShallowObjectStub stub(isolate(), properties_count); __ CallStub(&stub); } @@ -1706,14 +1676,15 @@ case ObjectLiteral::Property::CONSTANT: UNREACHABLE(); case ObjectLiteral::Property::MATERIALIZED_LITERAL: - ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value())); + DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value())); // Fall through. case ObjectLiteral::Property::COMPUTED: if (key->value()->IsInternalizedString()) { if (property->emit_store()) { VisitForAccumulatorValue(value); - __ Mov(x2, Operand(key->value())); - __ Peek(x1, 0); + DCHECK(StoreIC::ValueRegister().is(x0)); + __ Mov(StoreIC::NameRegister(), Operand(key->value())); + __ Peek(StoreIC::ReceiverRegister(), 0); CallStoreIC(key->LiteralFeedbackId()); PrepareForBailoutForId(key->id(), NO_REGISTERS); } else { @@ -1727,7 +1698,7 @@ __ Push(x0); VisitForStackValue(key); VisitForStackValue(value); - __ Mov(x0, Smi::FromInt(NONE)); // PropertyAttributes + __ Mov(x0, Smi::FromInt(SLOPPY)); // Strict mode __ Push(x0); __ CallRuntime(Runtime::kSetProperty, 4); } else { @@ -1767,11 +1738,11 @@ EmitAccessor(it->second->setter); __ Mov(x10, Smi::FromInt(NONE)); __ Push(x10); - __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); + __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5); } if (expr->has_function()) { - ASSERT(result_saved); + DCHECK(result_saved); __ Peek(x0, 0); __ Push(x0); __ CallRuntime(Runtime::kToFastProperties, 1); @@ -1795,7 +1766,7 @@ ZoneList<Expression*>* subexprs = expr->values(); int length = subexprs->length(); Handle<FixedArray> constant_elements = expr->constant_elements(); - ASSERT_EQ(2, constant_elements->length()); + DCHECK_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind); @@ -1813,31 +1784,12 @@ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset)); __ Mov(x2, Smi::FromInt(expr->literal_index())); __ Mov(x1, Operand(constant_elements)); - if (has_fast_elements && constant_elements_values->map() == - isolate()->heap()->fixed_cow_array_map()) { - FastCloneShallowArrayStub stub( - FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, - allocation_site_mode, - length); - __ CallStub(&stub); - __ IncrementCounter( - isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11); - } else if ((expr->depth() > 1) || Serializer::enabled() || - length > FastCloneShallowArrayStub::kMaximumClonedLength) { + if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) { __ Mov(x0, Smi::FromInt(flags)); __ Push(x3, x2, x1, x0); - __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4); + __ CallRuntime(Runtime::kCreateArrayLiteral, 4); } else { - ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || - FLAG_smi_only_arrays); - FastCloneShallowArrayStub::Mode mode = - FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; - - if (has_fast_elements) { - mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; - } - - FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); + FastCloneShallowArrayStub stub(isolate(), allocation_site_mode); __ CallStub(&stub); } @@ -1852,8 +1804,8 @@ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue; if (!result_saved) { - __ Push(x0); - __ Push(Smi::FromInt(expr->literal_index())); + __ Mov(x1, Smi::FromInt(expr->literal_index())); + __ Push(x0, x1); result_saved = true; } VisitForAccumulatorValue(subexpr); @@ -1869,7 +1821,7 @@ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK); } else { __ Mov(x3, Smi::FromInt(i)); - StoreArrayLiteralElementStub stub; + StoreArrayLiteralElementStub stub(isolate()); __ CallStub(&stub); } @@ -1886,7 +1838,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { - ASSERT(expr->target()->IsValidLeftHandSide()); + DCHECK(expr->target()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ Assignment"); @@ -1908,9 +1860,9 @@ break; case NAMED_PROPERTY: if (expr->is_compound()) { - // We need the receiver both on the stack and in the accumulator. - VisitForAccumulatorValue(property->obj()); - __ Push(result_register()); + // We need the receiver both on the stack and in the register. + VisitForStackValue(property->obj()); + __ Peek(LoadIC::ReceiverRegister(), 0); } else { VisitForStackValue(property->obj()); } @@ -1918,9 +1870,9 @@ case KEYED_PROPERTY: if (expr->is_compound()) { VisitForStackValue(property->obj()); - VisitForAccumulatorValue(property->key()); - __ Peek(x1, 0); - __ Push(x0); + VisitForStackValue(property->key()); + __ Peek(LoadIC::ReceiverRegister(), 1 * kPointerSize); + __ Peek(LoadIC::NameRegister(), 0); } else { VisitForStackValue(property->obj()); VisitForStackValue(property->key()); @@ -1997,9 +1949,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); - __ Mov(x2, Operand(key->value())); - // Call load IC. It has arguments receiver and property name x0 and x2. - CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + __ Mov(LoadIC::NameRegister(), Operand(key->value())); + if (FLAG_vector_ics) { + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(prop->PropertyFeedbackSlot())); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + } } @@ -2007,7 +1964,13 @@ SetSourcePosition(prop->position()); // Call keyed load IC. It has arguments key and receiver in r0 and r1. Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - CallIC(ic, prop->PropertyFeedbackId()); + if (FLAG_vector_ics) { + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(prop->PropertyFeedbackSlot())); + CallIC(ic); + } else { + CallIC(ic, prop->PropertyFeedbackId()); + } } @@ -2030,10 +1993,10 @@ patch_site.EmitJumpIfSmi(x10, &both_smis); __ Bind(&stub_call); - BinaryOpICStub stub(op, mode); + BinaryOpICStub stub(isolate(), op, mode); { Assembler::BlockPoolsScope scope(masm_); - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); } __ B(&done); @@ -2078,11 +2041,12 @@ break; case Token::MUL: { Label not_minus_zero, done; + STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == (kXRegSizeInBits / 2)); + STATIC_ASSERT(kSmiTag == 0); __ Smulh(x10, left, right); __ Cbnz(x10, ¬_minus_zero); __ Eor(x11, left, right); __ Tbnz(x11, kXSignBit, &stub_call); - STATIC_ASSERT(kSmiTag == 0); __ Mov(result, x10); __ B(&done); __ Bind(¬_minus_zero); @@ -2115,11 +2079,11 @@ Token::Value op, OverwriteMode mode) { __ Pop(x1); - BinaryOpICStub stub(op, mode); + BinaryOpICStub stub(isolate(), op, mode); JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code. { Assembler::BlockPoolsScope scope(masm_); - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); } context()->Plug(x0); @@ -2127,7 +2091,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) { - ASSERT(expr->IsValidLeftHandSide()); + DCHECK(expr->IsValidReferenceExpression()); // Left-hand side can only be a property, a global or a (parameter or local) // slot. @@ -2152,9 +2116,10 @@ VisitForAccumulatorValue(prop->obj()); // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid // this copy. - __ Mov(x1, x0); - __ Pop(x0); // Restore value. - __ Mov(x2, Operand(prop->key()->AsLiteral()->value())); + __ Mov(StoreIC::ReceiverRegister(), x0); + __ Pop(StoreIC::ValueRegister()); // Restore value. + __ Mov(StoreIC::NameRegister(), + Operand(prop->key()->AsLiteral()->value())); CallStoreIC(); break; } @@ -2162,8 +2127,8 @@ __ Push(x0); // Preserve value. VisitForStackValue(prop->obj()); VisitForAccumulatorValue(prop->key()); - __ Mov(x1, x0); - __ Pop(x2, x0); + __ Mov(KeyedStoreIC::NameRegister(), x0); + __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::ValueRegister()); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); @@ -2188,38 +2153,24 @@ } -void FullCodeGenerator::EmitCallStoreContextSlot( - Handle<String> name, StrictMode strict_mode) { - __ Mov(x11, Operand(name)); - __ Mov(x10, Smi::FromInt(strict_mode)); - // jssp[0] : mode. - // jssp[8] : name. - // jssp[16] : context. - // jssp[24] : value. - __ Push(x0, cp, x11, x10); - __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4); -} - - void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment"); if (var->IsUnallocated()) { // Global var, const, or let. - __ Mov(x2, Operand(var->name())); - __ Ldr(x1, GlobalObjectMemOperand()); + __ Mov(StoreIC::NameRegister(), Operand(var->name())); + __ Ldr(StoreIC::ReceiverRegister(), GlobalObjectMemOperand()); CallStoreIC(); } else if (op == Token::INIT_CONST_LEGACY) { // Const initializers need a write barrier. - ASSERT(!var->IsParameter()); // No const parameters. + DCHECK(!var->IsParameter()); // No const parameters. if (var->IsLookupSlot()) { - __ Push(x0); - __ Mov(x0, Operand(var->name())); - __ Push(cp, x0); // Context and name. - __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3); + __ Mov(x1, Operand(var->name())); + __ Push(x0, cp, x1); + __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3); } else { - ASSERT(var->IsStackLocal() || var->IsContextSlot()); + DCHECK(var->IsStackLocal() || var->IsContextSlot()); Label skip; MemOperand location = VarOperand(var, x1); __ Ldr(x10, location); @@ -2230,29 +2181,34 @@ } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. - if (var->IsLookupSlot()) { - EmitCallStoreContextSlot(var->name(), strict_mode()); - } else { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); - Label assign; - MemOperand location = VarOperand(var, x1); - __ Ldr(x10, location); - __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign); - __ Mov(x10, Operand(var->name())); - __ Push(x10); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); - // Perform the assignment. - __ Bind(&assign); - EmitStoreToStackLocalOrContextSlot(var, location); - } + DCHECK(!var->IsLookupSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + Label assign; + MemOperand location = VarOperand(var, x1); + __ Ldr(x10, location); + __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign); + __ Mov(x10, Operand(var->name())); + __ Push(x10); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + // Perform the assignment. + __ Bind(&assign); + EmitStoreToStackLocalOrContextSlot(var, location); } else if (!var->is_const_mode() || op == Token::INIT_CONST) { - // Assignment to var or initializing assignment to let/const - // in harmony mode. if (var->IsLookupSlot()) { - EmitCallStoreContextSlot(var->name(), strict_mode()); + // Assignment to var. + __ Mov(x11, Operand(var->name())); + __ Mov(x10, Smi::FromInt(strict_mode())); + // jssp[0] : mode. + // jssp[8] : name. + // jssp[16] : context. + // jssp[24] : value. + __ Push(x0, cp, x11, x10); + __ CallRuntime(Runtime::kStoreLookupSlot, 4); } else { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + // Assignment to var or initializing assignment to let/const in harmony + // mode. + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); MemOperand location = VarOperand(var, x1); if (FLAG_debug_code && op == Token::INIT_LET) { __ Ldr(x10, location); @@ -2270,14 +2226,13 @@ ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment"); // Assignment to a property, using a named store IC. Property* prop = expr->target()->AsProperty(); - ASSERT(prop != NULL); - ASSERT(prop->key()->AsLiteral() != NULL); + DCHECK(prop != NULL); + DCHECK(prop->key()->IsLiteral()); // Record source code position before IC call. SetSourcePosition(expr->position()); - __ Mov(x2, Operand(prop->key()->AsLiteral()->value())); - __ Pop(x1); - + __ Mov(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value())); + __ Pop(StoreIC::ReceiverRegister()); CallStoreIC(expr->AssignmentFeedbackId()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); @@ -2292,7 +2247,8 @@ // Record source code position before IC call. SetSourcePosition(expr->position()); // TODO(all): Could we pass this in registers rather than on the stack? - __ Pop(x1, x2); // Key and object holding the property. + __ Pop(KeyedStoreIC::NameRegister(), KeyedStoreIC::ReceiverRegister()); + DCHECK(KeyedStoreIC::ValueRegister().is(x0)); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() @@ -2310,13 +2266,15 @@ if (key->IsPropertyName()) { VisitForAccumulatorValue(expr->obj()); + __ Move(LoadIC::ReceiverRegister(), x0); EmitNamedPropertyLoad(expr); PrepareForBailoutForId(expr->LoadId(), TOS_REG); context()->Plug(x0); } else { VisitForStackValue(expr->obj()); VisitForAccumulatorValue(expr->key()); - __ Pop(x1); + __ Move(LoadIC::NameRegister(), x0); + __ Pop(LoadIC::ReceiverRegister()); EmitKeyedPropertyLoad(expr); context()->Plug(x0); } @@ -2333,16 +2291,15 @@ // Code common for calls using the IC. -void FullCodeGenerator::EmitCallWithIC(Call* expr) { - ASM_LOCATION("EmitCallWithIC"); - +void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); - CallFunctionFlags flags; + CallIC::CallType call_type = callee->IsVariableProxy() + ? CallIC::FUNCTION + : CallIC::METHOD; + // Get the target function. - if (callee->IsVariableProxy()) { + if (call_type == CallIC::FUNCTION) { { StackValueContext context(this); EmitVariableLoad(callee->AsVariableProxy()); PrepareForBailout(callee, NO_REGISTERS); @@ -2350,54 +2307,33 @@ // Push undefined as receiver. This is patched in the method prologue if it // is a sloppy mode method. __ Push(isolate()->factory()->undefined_value()); - flags = NO_CALL_FUNCTION_FLAGS; } else { // Load the function from the receiver. - ASSERT(callee->IsProperty()); - __ Peek(x0, 0); + DCHECK(callee->IsProperty()); + __ Peek(LoadIC::ReceiverRegister(), 0); EmitNamedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); // Push the target function under the receiver. __ Pop(x10); __ Push(x0, x10); - flags = CALL_AS_METHOD; - } - - // Load the arguments. - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } } - // Record source position for debugger. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, flags); - __ Peek(x1, (arg_count + 1) * kPointerSize); - __ CallStub(&stub); - - RecordJSReturnSite(expr); - - // Restore context register. - __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, x0); + EmitCall(expr, call_type); } // Code common for calls using the IC. -void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, - Expression* key) { +void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, + Expression* key) { // Load the key. VisitForAccumulatorValue(key); Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); // Load the function from the receiver. - ASSERT(callee->IsProperty()); - __ Peek(x1, 0); + DCHECK(callee->IsProperty()); + __ Peek(LoadIC::ReceiverRegister(), 0); + __ Move(LoadIC::NameRegister(), x0); EmitKeyedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); @@ -2405,28 +2341,12 @@ __ Pop(x10); __ Push(x0, x10); - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } - } - - // Record source position for debugger. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, CALL_AS_METHOD); - __ Peek(x1, (arg_count + 1) * kPointerSize); - __ CallStub(&stub); - - RecordJSReturnSite(expr); - // Restore context register. - __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, x0); + EmitCall(expr, CallIC::METHOD); } -void FullCodeGenerator::EmitCallWithStub(Call* expr) { - // Code common for calls using the call stub. +void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { + // Load the arguments. ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); { PreservePositionScope scope(masm()->positions_recorder()); @@ -2434,19 +2354,17 @@ VisitForStackValue(args->at(i)); } } - // Record source position for debugger. + // Record source position of the IC call. SetSourcePosition(expr->position()); - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized); - __ LoadObject(x2, FeedbackVector()); + Handle<Code> ic = CallIC::initialize_stub( + isolate(), arg_count, call_type); __ Mov(x3, Smi::FromInt(expr->CallFeedbackSlot())); - - // Record call targets in unoptimized code. - CallFunctionStub stub(arg_count, RECORD_CALL_TARGET); __ Peek(x1, (arg_count + 1) * kXRegSize); - __ CallStub(&stub); + // Don't assign a type feedback id to the IC, since type feedback is provided + // by the vector above. + CallIC(ic); + RecordJSReturnSite(expr); // Restore context register. __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2468,19 +2386,16 @@ int receiver_offset = 2 + info_->scope()->num_parameters(); __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize)); - // Push. - __ Push(x10, x11); - // Prepare to push the language mode. - __ Mov(x10, Smi::FromInt(strict_mode())); + __ Mov(x12, Smi::FromInt(strict_mode())); // Prepare to push the start position of the scope the calls resides in. - __ Mov(x11, Smi::FromInt(scope()->start_position())); + __ Mov(x13, Smi::FromInt(scope()->start_position())); // Push. - __ Push(x10, x11); + __ Push(x10, x11, x12, x13); // Do the runtime call. - __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5); + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); } @@ -2529,7 +2444,7 @@ SetSourcePosition(expr->position()); // Call the evaluated function. - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ Peek(x1, (arg_count + 1) * kXRegSize); __ CallStub(&stub); RecordJSReturnSite(expr); @@ -2538,7 +2453,7 @@ context()->DropAndPlug(1, x0); } else if (call_type == Call::GLOBAL_CALL) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else if (call_type == Call::LOOKUP_SLOT_CALL) { // Call to a lookup slot (dynamically introduced variable). @@ -2548,16 +2463,15 @@ { PreservePositionScope scope(masm()->positions_recorder()); // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); } __ Bind(&slow); // Call the runtime to find the function to call (returned in x0) // and the object holding it (returned in x1). - __ Push(context_register()); __ Mov(x10, Operand(proxy->name())); - __ Push(x10); - __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2); + __ Push(context_register(), x10); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); __ Push(x0, x1); // Receiver, function. // If fast case code has been generated, emit code to push the @@ -2568,30 +2482,29 @@ __ B(&call); __ Bind(&done); // Push function. - __ Push(x0); // The receiver is implicitly the global receiver. Indicate this // by passing the undefined to the call function stub. __ LoadRoot(x1, Heap::kUndefinedValueRootIndex); - __ Push(x1); + __ Push(x0, x1); __ Bind(&call); } // The receiver is either the global receiver or an object found // by LoadContextSlot. - EmitCallWithStub(expr); + EmitCall(expr); } else if (call_type == Call::PROPERTY_CALL) { Property* property = callee->AsProperty(); { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(property->obj()); } if (property->key()->IsPropertyName()) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else { - EmitKeyedCallWithIC(expr, property->key()); + EmitKeyedCallWithLoadIC(expr, property->key()); } } else { - ASSERT(call_type == Call::OTHER_CALL); + DCHECK(call_type == Call::OTHER_CALL); // Call to an arbitrary expression not handled specially above. { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(callee); @@ -2599,12 +2512,12 @@ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex); __ Push(x1); // Emit function call. - EmitCallWithStub(expr); + EmitCall(expr); } #ifdef DEBUG // RecordJSReturnSite should have been called. - ASSERT(expr->return_is_recorded_); + DCHECK(expr->return_is_recorded_); #endif } @@ -2636,21 +2549,17 @@ __ Peek(x1, arg_count * kXRegSize); // Record call targets in unoptimized code. - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized); if (FLAG_pretenuring_call_new) { - StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(), - isolate()->factory()->NewAllocationSite()); - ASSERT(expr->AllocationSiteFeedbackSlot() == + EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot()); + DCHECK(expr->AllocationSiteFeedbackSlot() == expr->CallNewFeedbackSlot() + 1); } __ LoadObject(x2, FeedbackVector()); __ Mov(x3, Smi::FromInt(expr->CallNewFeedbackSlot())); - CallConstructStub stub(RECORD_CALL_TARGET); - __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); + CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET); + __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); PrepareForBailoutForId(expr->ReturnId(), TOS_REG); context()->Plug(x0); } @@ -2658,7 +2567,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2678,7 +2587,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2689,9 +2598,10 @@ context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); + uint64_t sign_mask = V8_UINT64_C(1) << (kSmiShift + kSmiValueSize - 1); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true, - if_false, fall_through); + __ TestAndSplit(x0, kSmiTagMask | sign_mask, if_true, if_false, fall_through); context()->Plug(if_true, if_false); } @@ -2699,7 +2609,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2729,7 +2639,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2752,7 +2662,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) { ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject"); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2777,7 +2687,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); Label materialize_true, materialize_false, skip_lookup; @@ -2878,7 +2788,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2900,7 +2810,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2927,7 +2837,7 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2949,7 +2859,7 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2971,7 +2881,7 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); Label materialize_true, materialize_false; Label* if_true = NULL; @@ -3003,7 +2913,7 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); // Load the two objects into registers and perform the comparison. VisitForStackValue(args->at(0)); @@ -3027,20 +2937,20 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); // ArgumentsAccessStub expects the key in x1. VisitForAccumulatorValue(args->at(0)); __ Mov(x1, x0); __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters())); - ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); + ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT); __ CallStub(&stub); context()->Plug(x0); } void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); Label exit; // Get the number of formal parameters. __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters())); @@ -3063,7 +2973,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { ASM_LOCATION("FullCodeGenerator::EmitClassOf"); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); Label done, null, function, non_function_constructor; VisitForAccumulatorValue(args->at(0)); @@ -3124,33 +3034,11 @@ } -void FullCodeGenerator::EmitLog(CallRuntime* expr) { - // Conditionally generate a log call. - // Args: - // 0 (literal string): The type of logging (corresponds to the flags). - // This is used to determine whether or not to generate the log call. - // 1 (string): Format string. Access the string at argument index 2 - // with '%2s' (see Logger::LogRuntime for all the formats). - // 2 (array): Arguments to the format string. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 3); - if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) { - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - __ CallRuntime(Runtime::kHiddenLog, 2); - } - - // Finally, we're expected to leave a value on the top of the stack. - __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); - context()->Plug(x0); -} - - void FullCodeGenerator::EmitSubString(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - SubStringStub stub; + SubStringStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); + DCHECK(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); @@ -3161,9 +3049,9 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - RegExpExecStub stub; + RegExpExecStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 4); + DCHECK(args->length() == 4); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); @@ -3176,7 +3064,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { ASM_LOCATION("FullCodeGenerator::EmitValueOf"); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); // Load the object. Label done; @@ -3193,8 +3081,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); - ASSERT_NE(NULL, args->at(1)->AsLiteral()); + DCHECK(args->length() == 2); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -3231,7 +3119,7 @@ } __ Bind(¬_date_object); - __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0); + __ CallRuntime(Runtime::kThrowNotDateError, 0); __ Bind(&done); context()->Plug(x0); } @@ -3239,7 +3127,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(3, args->length()); + DCHECK_EQ(3, args->length()); Register string = x0; Register index = x1; @@ -3269,7 +3157,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(3, args->length()); + DCHECK_EQ(3, args->length()); Register string = x0; Register index = x1; @@ -3300,10 +3188,10 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the MathPow stub. ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - MathPowStub stub(MathPowStub::ON_STACK); + MathPowStub stub(isolate(), MathPowStub::ON_STACK); __ CallStub(&stub); context()->Plug(x0); } @@ -3311,7 +3199,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); // Load the object. VisitForAccumulatorValue(args->at(1)); // Load the value. __ Pop(x1); @@ -3340,12 +3228,12 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 1); + DCHECK_EQ(args->length(), 1); // Load the argument into x0 and call the stub. VisitForAccumulatorValue(args->at(0)); - NumberToStringStub stub; + NumberToStringStub stub(isolate()); __ CallStub(&stub); context()->Plug(x0); } @@ -3353,7 +3241,7 @@ void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3375,7 +3263,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); @@ -3420,7 +3308,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); @@ -3467,13 +3355,13 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) { ASM_LOCATION("FullCodeGenerator::EmitStringAdd"); ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); __ Pop(x1); - StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED); + StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED); __ CallStub(&stub); context()->Plug(x0); @@ -3482,40 +3370,20 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - StringCompareStub stub; + StringCompareStub stub(isolate()); __ CallStub(&stub); context()->Plug(x0); } -void FullCodeGenerator::EmitMathLog(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_log, 1); - context()->Plug(x0); -} - - -void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_sqrt, 1); - context()->Plug(x0); -} - - void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { ASM_LOCATION("FullCodeGenerator::EmitCallFunction"); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() >= 2); + DCHECK(args->length() >= 2); int arg_count = args->length() - 2; // 2 ~ receiver and function. for (int i = 0; i < arg_count + 1; i++) { @@ -3545,9 +3413,9 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { - RegExpConstructResultStub stub; + RegExpConstructResultStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); + DCHECK(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForAccumulatorValue(args->at(2)); @@ -3559,8 +3427,8 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); - ASSERT_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_EQ(2, args->length()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle<FixedArray> jsfunction_result_caches( @@ -3598,7 +3466,7 @@ // Call runtime to perform the lookup. __ Push(cache, key); - __ CallRuntime(Runtime::kHiddenGetFromCache, 2); + __ CallRuntime(Runtime::kGetFromCache, 2); __ Bind(&done); context()->Plug(x0); @@ -3627,7 +3495,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); __ AssertString(x0); @@ -3643,7 +3511,7 @@ ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin"); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(1)); VisitForAccumulatorValue(args->at(0)); @@ -3855,6 +3723,17 @@ } +void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + ExternalReference debug_is_active = + ExternalReference::debug_is_active_address(isolate()); + __ Mov(x10, debug_is_active); + __ Ldrb(x0, MemOperand(x10)); + __ SmiTag(x0); + context()->Plug(x0); +} + + void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { if (expr->function() != NULL && expr->function()->intrinsic_type == Runtime::INLINE) { @@ -3870,13 +3749,20 @@ if (expr->is_jsruntime()) { // Push the builtins object as the receiver. __ Ldr(x10, GlobalObjectMemOperand()); - __ Ldr(x0, FieldMemOperand(x10, GlobalObject::kBuiltinsOffset)); - __ Push(x0); + __ Ldr(LoadIC::ReceiverRegister(), + FieldMemOperand(x10, GlobalObject::kBuiltinsOffset)); + __ Push(LoadIC::ReceiverRegister()); // Load the function from the receiver. Handle<String> name = expr->name(); - __ Mov(x2, Operand(name)); - CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + __ Mov(LoadIC::NameRegister(), Operand(name)); + if (FLAG_vector_ics) { + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(expr->CallRuntimeFeedbackSlot())); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + } // Push the target function under the receiver. __ Pop(x10); @@ -3889,7 +3775,7 @@ // Record source position of the IC call. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ Peek(x1, (arg_count + 1) * kPointerSize); __ CallStub(&stub); @@ -3928,7 +3814,7 @@ Variable* var = proxy->var(); // Delete of an unqualified identifier is disallowed in strict mode // but "delete this" is allowed. - ASSERT(strict_mode() == SLOPPY || var->is_this()); + DCHECK(strict_mode() == SLOPPY || var->is_this()); if (var->IsUnallocated()) { __ Ldr(x12, GlobalObjectMemOperand()); __ Mov(x11, Operand(var->name())); @@ -3945,7 +3831,7 @@ // context where the variable was introduced. __ Mov(x2, Operand(var->name())); __ Push(context_register(), x2); - __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2); + __ CallRuntime(Runtime::kDeleteLookupSlot, 2); context()->Plug(x0); } } else { @@ -3978,7 +3864,7 @@ test->fall_through()); context()->Plug(test->true_label(), test->false_label()); } else { - ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue()); + DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue()); // TODO(jbramley): This could be much more efficient using (for // example) the CSEL instruction. Label materialize_true, materialize_false, done; @@ -4021,7 +3907,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { - ASSERT(expr->expression()->IsValidLeftHandSide()); + DCHECK(expr->expression()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ CountOperation"); SetSourcePosition(expr->position()); @@ -4040,7 +3926,7 @@ // Evaluate expression and get value. if (assign_type == VARIABLE) { - ASSERT(expr->expression()->AsVariableProxy()->var() != NULL); + DCHECK(expr->expression()->AsVariableProxy()->var() != NULL); AccumulatorValueContext context(this); EmitVariableLoad(expr->expression()->AsVariableProxy()); } else { @@ -4049,16 +3935,16 @@ __ Push(xzr); } if (assign_type == NAMED_PROPERTY) { - // Put the object both on the stack and in the accumulator. - VisitForAccumulatorValue(prop->obj()); - __ Push(x0); + // Put the object both on the stack and in the register. + VisitForStackValue(prop->obj()); + __ Peek(LoadIC::ReceiverRegister(), 0); EmitNamedPropertyLoad(prop); } else { // KEYED_PROPERTY VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - __ Peek(x1, 0); - __ Push(x0); + VisitForStackValue(prop->key()); + __ Peek(LoadIC::ReceiverRegister(), 1 * kPointerSize); + __ Peek(LoadIC::NameRegister(), 0); EmitKeyedPropertyLoad(prop); } } @@ -4107,7 +3993,7 @@ __ B(&stub_call); __ Bind(&slow); } - ToNumberStub convert_stub; + ToNumberStub convert_stub(isolate()); __ CallStub(&convert_stub); // Save result for postfix expressions. @@ -4139,8 +4025,8 @@ { Assembler::BlockPoolsScope scope(masm_); - BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); - CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); + BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE); + CallIC(stub.GetCode(), expr->CountBinOpFeedbackId()); patch_site.EmitPatchInfo(); } __ Bind(&done); @@ -4168,8 +4054,9 @@ } break; case NAMED_PROPERTY: { - __ Mov(x2, Operand(prop->key()->AsLiteral()->value())); - __ Pop(x1); + __ Mov(StoreIC::NameRegister(), + Operand(prop->key()->AsLiteral()->value())); + __ Pop(StoreIC::ReceiverRegister()); CallStoreIC(expr->CountStoreFeedbackId()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -4182,8 +4069,8 @@ break; } case KEYED_PROPERTY: { - __ Pop(x1); // Key. - __ Pop(x2); // Receiver. + __ Pop(KeyedStoreIC::NameRegister()); + __ Pop(KeyedStoreIC::ReceiverRegister()); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); @@ -4203,13 +4090,17 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { - ASSERT(!context()->IsEffect()); - ASSERT(!context()->IsTest()); + DCHECK(!context()->IsEffect()); + DCHECK(!context()->IsTest()); VariableProxy* proxy = expr->AsVariableProxy(); if (proxy != NULL && proxy->var()->IsUnallocated()) { Comment cmnt(masm_, "Global variable"); - __ Ldr(x0, GlobalObjectMemOperand()); - __ Mov(x2, Operand(proxy->name())); + __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand()); + __ Mov(LoadIC::NameRegister(), Operand(proxy->name())); + if (FLAG_vector_ics) { + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(proxy->VariableFeedbackSlot())); + } // Use a regular load, not a contextual load, to avoid a reference // error. CallLoadIC(NOT_CONTEXTUAL); @@ -4220,12 +4111,12 @@ // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done); __ Bind(&slow); __ Mov(x0, Operand(proxy->name())); __ Push(cp, x0); - __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2); + __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2); PrepareForBailout(expr, TOS_REG); __ Bind(&done); @@ -4254,13 +4145,14 @@ } PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_string())) { + Factory* factory = isolate()->factory(); + if (String::Equals(check, factory->number_string())) { ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string"); __ JumpIfSmi(x0, if_true); __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset)); __ CompareRoot(x0, Heap::kHeapNumberMapRootIndex); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_string())) { + } else if (String::Equals(check, factory->string_string())) { ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string"); __ JumpIfSmi(x0, if_false); // Check for undetectable objects => false. @@ -4268,22 +4160,17 @@ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset)); __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->symbol_string())) { + } else if (String::Equals(check, factory->symbol_string())) { ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string"); __ JumpIfSmi(x0, if_false); __ CompareObjectType(x0, x0, x1, SYMBOL_TYPE); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_string())) { + } else if (String::Equals(check, factory->boolean_string())) { ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string"); __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true); __ CompareRoot(x0, Heap::kFalseValueRootIndex); Split(eq, if_true, if_false, fall_through); - } else if (FLAG_harmony_typeof && - check->Equals(isolate()->heap()->null_string())) { - ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string"); - __ CompareRoot(x0, Heap::kNullValueRootIndex); - Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_string())) { + } else if (String::Equals(check, factory->undefined_string())) { ASM_LOCATION( "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string"); __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true); @@ -4293,7 +4180,7 @@ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset)); __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true, fall_through); - } else if (check->Equals(isolate()->heap()->function_string())) { + } else if (String::Equals(check, factory->function_string())) { ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string"); __ JumpIfSmi(x0, if_false); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); @@ -4301,12 +4188,10 @@ __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->object_string())) { + } else if (String::Equals(check, factory->object_string())) { ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string"); __ JumpIfSmi(x0, if_false); - if (!FLAG_harmony_typeof) { - __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true); - } + __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true); // Check for JS objects => true. Register map = x10; __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, @@ -4360,7 +4245,7 @@ case Token::INSTANCEOF: { VisitForStackValue(expr->right()); - InstanceofStub stub(InstanceofStub::kNoFlags); + InstanceofStub stub(isolate(), InstanceofStub::kNoFlags); __ CallStub(&stub); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); // The stub returns 0 for true. @@ -4465,7 +4350,7 @@ __ Bind(&suspend); VisitForAccumulatorValue(expr->generator_object()); - ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos())); + DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos())); __ Mov(x1, Smi::FromInt(continuation.pos())); __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset)); __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset)); @@ -4476,7 +4361,7 @@ __ Cmp(__ StackPointer(), x1); __ B(eq, &post_runtime); __ Push(x0); // generator object - __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ Bind(&post_runtime); __ Pop(result_register()); @@ -4508,6 +4393,9 @@ Label l_catch, l_try, l_suspend, l_continuation, l_resume; Label l_next, l_call, l_loop; + Register load_receiver = LoadIC::ReceiverRegister(); + Register load_name = LoadIC::NameRegister(); + // Initial send value is undefined. __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); __ B(&l_next); @@ -4515,9 +4403,9 @@ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; } __ Bind(&l_catch); handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); - __ LoadRoot(x2, Heap::kthrow_stringRootIndex); // "throw" - __ Peek(x3, 1 * kPointerSize); // iter - __ Push(x2, x3, x0); // "throw", iter, except + __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw" + __ Peek(x3, 1 * kPointerSize); // iter + __ Push(load_name, x3, x0); // "throw", iter, except __ B(&l_call); // try { received = %yield result } @@ -4540,14 +4428,14 @@ const int generator_object_depth = kPointerSize + handler_size; __ Peek(x0, generator_object_depth); __ Push(x0); // g - ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos())); + DCHECK((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos())); __ Mov(x1, Smi::FromInt(l_continuation.pos())); __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset)); __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset)); __ Mov(x1, cp); __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2, kLRHasBeenSaved, kDontSaveFPRegs); - __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ Pop(x0); // result EmitReturnSequence(); @@ -4556,19 +4444,24 @@ // receiver = iter; f = 'next'; arg = received; __ Bind(&l_next); - __ LoadRoot(x2, Heap::knext_stringRootIndex); // "next" - __ Peek(x3, 1 * kPointerSize); // iter - __ Push(x2, x3, x0); // "next", iter, received + + __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next" + __ Peek(x3, 1 * kPointerSize); // iter + __ Push(load_name, x3, x0); // "next", iter, received // result = receiver[f](arg); __ Bind(&l_call); - __ Peek(x1, 1 * kPointerSize); - __ Peek(x0, 2 * kPointerSize); + __ Peek(load_receiver, 1 * kPointerSize); + __ Peek(load_name, 2 * kPointerSize); + if (FLAG_vector_ics) { + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(expr->KeyedLoadFeedbackSlot())); + } Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallIC(ic, TypeFeedbackId::None()); __ Mov(x1, x0); __ Poke(x1, 2 * kPointerSize); - CallFunctionStub stub(1, CALL_AS_METHOD); + CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD); __ CallStub(&stub); __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -4576,19 +4469,29 @@ // if (!result.done) goto l_try; __ Bind(&l_loop); - __ Push(x0); // save result - __ LoadRoot(x2, Heap::kdone_stringRootIndex); // "done" - CallLoadIC(NOT_CONTEXTUAL); // result.done in x0 + __ Move(load_receiver, x0); + + __ Push(load_receiver); // save result + __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done" + if (FLAG_vector_ics) { + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(expr->DoneFeedbackSlot())); + } + CallLoadIC(NOT_CONTEXTUAL); // x0=result.done // The ToBooleanStub argument (result.done) is in x0. Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate()); CallIC(bool_ic); __ Cbz(x0, &l_try); // result.value - __ Pop(x0); // result - __ LoadRoot(x2, Heap::kvalue_stringRootIndex); // "value" - CallLoadIC(NOT_CONTEXTUAL); // result.value in x0 - context()->DropAndPlug(2, x0); // drop iter and g + __ Pop(load_receiver); // result + __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value" + if (FLAG_vector_ics) { + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(expr->ValueFeedbackSlot())); + } + CallLoadIC(NOT_CONTEXTUAL); // x0=result.value + context()->DropAndPlug(2, x0); // drop iter and g break; } } @@ -4606,7 +4509,7 @@ Register function = x4; // The value stays in x0, and is ultimately read by the resumed generator, as - // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it + // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it // is read to throw the value when the resumed generator is already closed. r1 // will hold the generator object until the activation has been resumed. VisitForStackValue(generator); @@ -4688,7 +4591,7 @@ __ Mov(x10, Smi::FromInt(resume_mode)); __ Push(generator_object, result_register(), x10); - __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); // Not reached: the runtime call returns elsewhere. __ Unreachable(); @@ -4703,14 +4606,14 @@ } else { // Throw the provided value. __ Push(value_reg); - __ CallRuntime(Runtime::kHiddenThrow, 1); + __ CallRuntime(Runtime::kThrow, 1); } __ B(&done); // Throw error if we attempt to operate on a running generator. __ Bind(&wrong_state); __ Push(generator_object); - __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); __ Bind(&done); context()->Plug(result_register()); @@ -4721,7 +4624,7 @@ Label gc_required; Label allocated; - Handle<Map> map(isolate()->native_context()->generator_result_map()); + Handle<Map> map(isolate()->native_context()->iterator_result_map()); // Allocate and populate an object with this form: { value: VAL, done: DONE } @@ -4731,7 +4634,7 @@ __ Bind(&gc_required); __ Push(Smi::FromInt(map->instance_size())); - __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); __ Ldr(context_register(), MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -4740,22 +4643,23 @@ Register result_value = x2; Register boolean_done = x3; Register empty_fixed_array = x4; + Register untagged_result = x5; __ Mov(map_reg, Operand(map)); __ Pop(result_value); __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done))); __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array())); - ASSERT_EQ(map->instance_size(), 5 * kPointerSize); - // TODO(jbramley): Use Stp if possible. - __ Str(map_reg, FieldMemOperand(result, HeapObject::kMapOffset)); - __ Str(empty_fixed_array, - FieldMemOperand(result, JSObject::kPropertiesOffset)); - __ Str(empty_fixed_array, FieldMemOperand(result, JSObject::kElementsOffset)); - __ Str(result_value, - FieldMemOperand(result, - JSGeneratorObject::kResultValuePropertyOffset)); - __ Str(boolean_done, - FieldMemOperand(result, - JSGeneratorObject::kResultDonePropertyOffset)); + DCHECK_EQ(map->instance_size(), 5 * kPointerSize); + STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize == + JSObject::kElementsOffset); + STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize == + JSGeneratorObject::kResultDonePropertyOffset); + __ ObjectUntag(untagged_result, result); + __ Str(map_reg, MemOperand(untagged_result, HeapObject::kMapOffset)); + __ Stp(empty_fixed_array, empty_fixed_array, + MemOperand(untagged_result, JSObject::kPropertiesOffset)); + __ Stp(result_value, boolean_done, + MemOperand(untagged_result, + JSGeneratorObject::kResultValuePropertyOffset)); // Only the value field needs a write barrier, as the other values are in the // root set. @@ -4784,7 +4688,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { - ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset); + DCHECK(POINTER_SIZE_ALIGN(frame_offset) == frame_offset); __ Str(value, MemOperand(fp, frame_offset)); } @@ -4802,7 +4706,7 @@ // as their closure, not the anonymous closure containing the global // code. Pass a smi sentinel and let the runtime look up the empty // function. - ASSERT(kSmiTag == 0); + DCHECK(kSmiTag == 0); __ Push(xzr); } else if (declaration_scope->is_eval_scope()) { // Contexts created by a call to eval have the same closure as the @@ -4811,7 +4715,7 @@ __ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX)); __ Push(x10); } else { - ASSERT(declaration_scope->is_function_scope()); + DCHECK(declaration_scope->is_function_scope()); __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ Push(x10); } @@ -4820,7 +4724,7 @@ void FullCodeGenerator::EnterFinallyBlock() { ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock"); - ASSERT(!result_register().is(x10)); + DCHECK(!result_register().is(x10)); // Preserve the result register while executing finally block. // Also cook the return address in lr to the stack (smi encoded Code* delta). __ Sub(x10, lr, Operand(masm_->CodeObject())); @@ -4835,8 +4739,9 @@ ExternalReference has_pending_message = ExternalReference::address_of_has_pending_message(isolate()); + STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof) __ Mov(x11, has_pending_message); - __ Ldr(x11, MemOperand(x11)); + __ Ldrb(x11, MemOperand(x11)); __ SmiTag(x11); __ Push(x10, x11); @@ -4851,7 +4756,7 @@ void FullCodeGenerator::ExitFinallyBlock() { ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock"); - ASSERT(!result_register().is(x10)); + DCHECK(!result_register().is(x10)); // Restore pending message from stack. __ Pop(x10, x11, x12); @@ -4864,7 +4769,8 @@ ExternalReference has_pending_message = ExternalReference::address_of_has_pending_message(isolate()); __ Mov(x13, has_pending_message); - __ Str(x11, MemOperand(x13)); + STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof) + __ Strb(x11, MemOperand(x13)); ExternalReference pending_message_obj = ExternalReference::address_of_pending_message_obj(isolate()); @@ -4892,7 +4798,7 @@ Address branch_address = pc - 3 * kInstructionSize; PatchingAssembler patcher(branch_address, 1); - ASSERT(Instruction::Cast(branch_address) + DCHECK(Instruction::Cast(branch_address) ->IsNop(Assembler::INTERRUPT_CODE_NOP) || (Instruction::Cast(branch_address)->IsCondBranchImm() && Instruction::Cast(branch_address)->ImmPCOffset() == @@ -4923,7 +4829,7 @@ Instruction* load = Instruction::Cast(pc)->preceding(2); Address interrupt_address_pointer = reinterpret_cast<Address>(load) + load->ImmPCOffset(); - ASSERT((Memory::uint64_at(interrupt_address_pointer) == + DCHECK((Memory::uint64_at(interrupt_address_pointer) == reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate() ->builtins() ->OnStackReplacement() diff -Nru nodejs-0.11.13/deps/v8/src/arm64/ic-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/ic-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/ic-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/ic-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "arm64/assembler-arm64.h" -#include "code-stubs.h" -#include "codegen.h" -#include "disasm.h" -#include "ic-inl.h" -#include "runtime.h" -#include "stub-cache.h" +#include "src/arm64/assembler-arm64.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/disasm.h" +#include "src/ic-inl.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -57,51 +34,6 @@ } -// Generated code falls through if the receiver is a regular non-global -// JS object with slow properties and no interceptors. -// -// "receiver" holds the receiver on entry and is unchanged. -// "elements" holds the property dictionary on fall through. -static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, - Register receiver, - Register elements, - Register scratch0, - Register scratch1, - Label* miss) { - ASSERT(!AreAliased(receiver, elements, scratch0, scratch1)); - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - - // Check that the receiver is a valid JS object. - // Let t be the object instance type, we want: - // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE. - // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only - // check the lower bound. - STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); - - __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE, - miss, lt); - - // scratch0 now contains the map of the receiver and scratch1 the object type. - Register map = scratch0; - Register type = scratch1; - - // Check if the receiver is a global JS object. - GenerateGlobalInstanceTypeCheck(masm, type, miss); - - // Check that the object does not require access checks. - __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset)); - __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss); - __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss); - - // Check that the properties dictionary is valid. - __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss); -} - - // Helper function used from LoadIC GenerateNormal. // // elements: Property dictionary. It is not clobbered if a jump to the miss @@ -120,8 +52,8 @@ Register result, Register scratch1, Register scratch2) { - ASSERT(!AreAliased(elements, name, scratch1, scratch2)); - ASSERT(!AreAliased(result, scratch1, scratch2)); + DCHECK(!AreAliased(elements, name, scratch1, scratch2)); + DCHECK(!AreAliased(result, scratch1, scratch2)); Label done; @@ -167,7 +99,7 @@ Register value, Register scratch1, Register scratch2) { - ASSERT(!AreAliased(elements, name, value, scratch1, scratch2)); + DCHECK(!AreAliased(elements, name, value, scratch1, scratch2)); Label done; @@ -215,7 +147,7 @@ Register scratch, int interceptor_bit, Label* slow) { - ASSERT(!AreAliased(map_scratch, scratch)); + DCHECK(!AreAliased(map_scratch, scratch)); // Check that the object isn't a smi. __ JumpIfSmi(receiver, slow); @@ -264,7 +196,7 @@ Register result, Label* not_fast_array, Label* slow) { - ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2)); + DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2)); // Check for fast array. __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); @@ -313,7 +245,7 @@ Register hash_scratch, Label* index_string, Label* not_unique) { - ASSERT(!AreAliased(key, map_scratch, hash_scratch)); + DCHECK(!AreAliased(key, map_scratch, hash_scratch)); // Is the key a name? Label unique; @@ -352,7 +284,7 @@ Register scratch2, Label* unmapped_case, Label* slow_case) { - ASSERT(!AreAliased(object, key, map, scratch1, scratch2)); + DCHECK(!AreAliased(object, key, map, scratch1, scratch2)); Heap* heap = masm->isolate()->heap(); @@ -407,7 +339,7 @@ Register parameter_map, Register scratch, Label* slow_case) { - ASSERT(!AreAliased(key, parameter_map, scratch)); + DCHECK(!AreAliased(key, parameter_map, scratch)); // Element is in arguments backing store, which is referenced by the // second element of the parameter_map. @@ -430,16 +362,17 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- x2 : name - // -- lr : return address - // -- x0 : receiver - // ----------------------------------- + // The return address is in lr. + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(receiver.is(x1)); + DCHECK(name.is(x2)); // Probe the stub cache. - Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::LOAD_IC)); masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, x0, x2, x3, x4, x5, x6); + masm, flags, receiver, name, x3, x4, x5, x6); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -447,38 +380,31 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- x2 : name - // -- lr : return address - // -- x0 : receiver - // ----------------------------------- - Label miss; - - GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss); + Register dictionary = x0; + DCHECK(!dictionary.is(ReceiverRegister())); + DCHECK(!dictionary.is(NameRegister())); + Label slow; - // x1 now holds the property dictionary. - GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4); + __ Ldr(dictionary, + FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); + GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), x0, x3, x4); __ Ret(); - // Cache miss: Jump to runtime. - __ Bind(&miss); - GenerateMiss(masm); + // Dictionary load failed, go slow (but don't miss). + __ Bind(&slow); + GenerateRuntimeGetProperty(masm); } void LoadIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- x2 : name - // -- lr : return address - // -- x0 : receiver - // ----------------------------------- + // The return address is in lr. Isolate* isolate = masm->isolate(); ASM_LOCATION("LoadIC::GenerateMiss"); __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4); // Perform tail call to the entry. - __ Push(x0, x2); + __ Push(ReceiverRegister(), NameRegister()); ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); __ TailCallExternalReference(ref, 2, 1); @@ -486,29 +412,23 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- x2 : name - // -- lr : return address - // -- x0 : receiver - // ----------------------------------- - - __ Push(x0, x2); + // The return address is in lr. + __ Push(ReceiverRegister(), NameRegister()); __ TailCallRuntime(Runtime::kGetProperty, 2, 1); } void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- x0 : key - // -- x1 : receiver - // ----------------------------------- + // The return address is in lr. Register result = x0; - Register key = x0; - Register receiver = x1; + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(x1)); + DCHECK(key.is(x2)); + Label miss, unmapped; - Register map_scratch = x2; + Register map_scratch = x0; MemOperand mapped_location = GenerateMappedArgumentsLookup( masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss); __ Ldr(result, mapped_location); @@ -518,10 +438,8 @@ // Parameter map is left in map_scratch when a jump on unmapped is done. MemOperand unmapped_location = GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss); - __ Ldr(x2, unmapped_location); - __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss); - // Move the result in x0. x0 must be preserved on miss. - __ Mov(result, x2); + __ Ldr(result, unmapped_location); + __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss); __ Ret(); __ Bind(&miss); @@ -531,18 +449,14 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments"); - // ---------- S t a t e -------------- - // -- lr : return address - // -- x0 : value - // -- x1 : key - // -- x2 : receiver - // ----------------------------------- - Label slow, notin; + Register value = ValueRegister(); + Register key = NameRegister(); + Register receiver = ReceiverRegister(); + DCHECK(receiver.is(x1)); + DCHECK(key.is(x2)); + DCHECK(value.is(x0)); - Register value = x0; - Register key = x1; - Register receiver = x2; Register map = x3; // These registers are used by GenerateMappedArgumentsLookup to build a @@ -582,16 +496,12 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- x0 : key - // -- x1 : receiver - // ----------------------------------- + // The return address is in lr. Isolate* isolate = masm->isolate(); __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11); - __ Push(x1, x0); + __ Push(ReceiverRegister(), NameRegister()); // Perform tail call to the entry. ExternalReference ref = @@ -601,16 +511,35 @@ } -void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- x0 : key - // -- x1 : receiver - // ----------------------------------- - Register key = x0; - Register receiver = x1; +// IC register specifications +const Register LoadIC::ReceiverRegister() { return x1; } +const Register LoadIC::NameRegister() { return x2; } - __ Push(receiver, key); +const Register LoadIC::SlotRegister() { + DCHECK(FLAG_vector_ics); + return x0; +} + + +const Register LoadIC::VectorRegister() { + DCHECK(FLAG_vector_ics); + return x3; +} + + +const Register StoreIC::ReceiverRegister() { return x1; } +const Register StoreIC::NameRegister() { return x2; } +const Register StoreIC::ValueRegister() { return x0; } + + +const Register KeyedStoreIC::MapRegister() { + return x3; +} + + +void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { + // The return address is in lr. + __ Push(ReceiverRegister(), NameRegister()); __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); } @@ -624,7 +553,7 @@ Register scratch4, Register scratch5, Label *slow) { - ASSERT(!AreAliased( + DCHECK(!AreAliased( key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5)); Isolate* isolate = masm->isolate(); @@ -665,7 +594,7 @@ Register scratch4, Register scratch5, Label *slow) { - ASSERT(!AreAliased( + DCHECK(!AreAliased( key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5)); Isolate* isolate = masm->isolate(); @@ -779,32 +708,30 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- x0 : key - // -- x1 : receiver - // ----------------------------------- + // The return address is in lr. Label slow, check_name, index_smi, index_name; - Register key = x0; - Register receiver = x1; + Register key = NameRegister(); + Register receiver = ReceiverRegister(); + DCHECK(key.is(x2)); + DCHECK(receiver.is(x1)); __ JumpIfNotSmi(key, &check_name); __ Bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from below // where a numeric string is converted to a smi. - GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow); + GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow); - // Slow case, key and receiver still in x0 and x1. + // Slow case. __ Bind(&slow); __ IncrementCounter( - masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3); + masm->isolate()->counters()->keyed_load_generic_slow(), 1, x4, x3); GenerateRuntimeGetProperty(masm); __ Bind(&check_name); - GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow); + GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow); - GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow); + GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow); __ Bind(&index_name); __ IndexFromHash(x3, key); @@ -814,17 +741,14 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- x0 : key (index) - // -- x1 : receiver - // ----------------------------------- + // Return address is in lr. Label miss; - Register index = x0; - Register receiver = x1; + Register receiver = ReceiverRegister(); + Register index = NameRegister(); Register result = x0; Register scratch = x3; + DCHECK(!scratch.is(receiver) && !scratch.is(index)); StringCharAtGenerator char_at_generator(receiver, index, @@ -846,14 +770,14 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- x0 : key - // -- x1 : receiver - // ----------------------------------- + // Return address is in lr. Label slow; - Register key = x0; - Register receiver = x1; + + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + Register scratch1 = x3; + Register scratch2 = x4; + DCHECK(!AreAliased(scratch1, scratch2, receiver, key)); // Check that the receiver isn't a smi. __ JumpIfSmi(receiver, &slow); @@ -862,24 +786,23 @@ __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow); // Get the map of the receiver. - Register map = x2; + Register map = scratch1; __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check that it has indexed interceptor and access checks // are not enabled for this object. - __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset)); - ASSERT(kSlowCaseBitFieldMask == + __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset)); + DCHECK(kSlowCaseBitFieldMask == ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor))); - __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow); - __ Tbz(x3, Map::kHasIndexedInterceptor, &slow); + __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow); + __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow); // Everything is fine, call runtime. __ Push(receiver, key); __ TailCallExternalReference( - ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor), + ExternalReference(IC_Utility(kLoadElementWithInterceptor), masm->isolate()), - 2, - 1); + 2, 1); __ Bind(&slow); GenerateMiss(masm); @@ -888,15 +811,9 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { ASM_LOCATION("KeyedStoreIC::GenerateMiss"); - // ---------- S t a t e -------------- - // -- x0 : value - // -- x1 : key - // -- x2 : receiver - // -- lr : return address - // ----------------------------------- // Push receiver, key and value for runtime call. - __ Push(x2, x1, x0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); @@ -906,15 +823,9 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { ASM_LOCATION("KeyedStoreIC::GenerateSlow"); - // ---------- S t a t e -------------- - // -- lr : return address - // -- x0 : value - // -- x1 : key - // -- x2 : receiver - // ----------------------------------- // Push receiver, key and value for runtime call. - __ Push(x2, x1, x0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. @@ -927,22 +838,15 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode) { ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty"); - // ---------- S t a t e -------------- - // -- x0 : value - // -- x1 : key - // -- x2 : receiver - // -- lr : return address - // ----------------------------------- // Push receiver, key and value for runtime call. - __ Push(x2, x1, x0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); - // Push PropertyAttributes(NONE) and strict_mode for runtime call. - STATIC_ASSERT(NONE == 0); + // Push strict_mode for runtime call. __ Mov(x10, Smi::FromInt(strict_mode)); - __ Push(xzr, x10); + __ Push(x10); - __ TailCallRuntime(Runtime::kSetProperty, 5, 1); + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); } @@ -959,7 +863,7 @@ Register receiver_map, Register elements_map, Register elements) { - ASSERT(!AreAliased( + DCHECK(!AreAliased( value, key, receiver, receiver_map, elements_map, elements, x10, x11)); Label transition_smi_elements; @@ -1044,7 +948,6 @@ elements, x10, d0, - d1, &transition_double_elements); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. @@ -1067,10 +970,10 @@ x10, x11, slow); - ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3. AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); - ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); + ElementsTransitionGenerator::GenerateSmiToDouble( + masm, receiver, key, value, receiver_map, mode, slow); __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ B(&fast_double_without_map_check); @@ -1082,10 +985,11 @@ x10, x11, slow); - ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3. + mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, - slow); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition( + masm, receiver, key, value, receiver_map, mode, slow); + __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ B(&finish_store); @@ -1099,9 +1003,9 @@ x10, x11, slow); - ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3. mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); + ElementsTransitionGenerator::GenerateDoubleToObject( + masm, receiver, key, value, receiver_map, mode, slow); __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ B(&finish_store); } @@ -1110,12 +1014,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode) { ASM_LOCATION("KeyedStoreIC::GenerateGeneric"); - // ---------- S t a t e -------------- - // -- x0 : value - // -- x1 : key - // -- x2 : receiver - // -- lr : return address - // ----------------------------------- Label slow; Label array; Label fast_object; @@ -1124,9 +1022,13 @@ Label fast_double_grow; Label fast_double; - Register value = x0; - Register key = x1; - Register receiver = x2; + Register value = ValueRegister(); + Register key = NameRegister(); + Register receiver = ReceiverRegister(); + DCHECK(receiver.is(x1)); + DCHECK(key.is(x2)); + DCHECK(value.is(x0)); + Register receiver_map = x3; Register elements = x4; Register elements_map = x5; @@ -1211,17 +1113,15 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- x0 : value - // -- x1 : receiver - // -- x2 : name - // -- lr : return address - // ----------------------------------- + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6)); // Probe the stub cache. - Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC); + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, x1, x2, x3, x4, x5, x6); + masm, flags, receiver, name, x3, x4, x5, x6); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -1229,14 +1129,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- x0 : value - // -- x1 : receiver - // -- x2 : name - // -- lr : return address - // ----------------------------------- - - __ Push(x1, x2, x0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); // Tail call to the entry. ExternalReference ref = @@ -1246,20 +1139,14 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- x0 : value - // -- x1 : receiver - // -- x2 : name - // -- lr : return address - // ----------------------------------- Label miss; - Register value = x0; - Register receiver = x1; - Register name = x2; + Register value = ValueRegister(); + Register receiver = ReceiverRegister(); + Register name = NameRegister(); Register dictionary = x3; + DCHECK(!AreAliased(value, receiver, name, x3, x4, x5)); - GenerateNameDictionaryReceiverCheck( - masm, receiver, dictionary, x4, x5, &miss); + __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5); Counters* counters = masm->isolate()->counters(); @@ -1276,21 +1163,14 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode) { ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty"); - // ----------- S t a t e ------------- - // -- x0 : value - // -- x1 : receiver - // -- x2 : name - // -- lr : return address - // ----------------------------------- - __ Push(x1, x2, x0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); - __ Mov(x11, Smi::FromInt(NONE)); // PropertyAttributes __ Mov(x10, Smi::FromInt(strict_mode)); - __ Push(x11, x10); + __ Push(x10); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 5, 1); + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); } @@ -1303,7 +1183,7 @@ // ----------------------------------- // Push receiver, name and value for runtime call. - __ Push(x1, x2, x0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. @@ -1373,9 +1253,9 @@ // tb(!n)z test_reg, #0, <target> Instruction* to_patch = info.SmiCheck(); PatchingAssembler patcher(to_patch, 1); - ASSERT(to_patch->IsTestBranch()); - ASSERT(to_patch->ImmTestBranchBit5() == 0); - ASSERT(to_patch->ImmTestBranchBit40() == 0); + DCHECK(to_patch->IsTestBranch()); + DCHECK(to_patch->ImmTestBranchBit5() == 0); + DCHECK(to_patch->ImmTestBranchBit40() == 0); STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagMask == 1); @@ -1383,11 +1263,11 @@ int branch_imm = to_patch->ImmTestBranch(); Register smi_reg; if (check == ENABLE_INLINED_SMI_CHECK) { - ASSERT(to_patch->Rt() == xzr.code()); + DCHECK(to_patch->Rt() == xzr.code()); smi_reg = info.SmiRegister(); } else { - ASSERT(check == DISABLE_INLINED_SMI_CHECK); - ASSERT(to_patch->Rt() != xzr.code()); + DCHECK(check == DISABLE_INLINED_SMI_CHECK); + DCHECK(to_patch->Rt() != xzr.code()); smi_reg = xzr; } @@ -1395,7 +1275,7 @@ // This is JumpIfNotSmi(smi_reg, branch_imm). patcher.tbnz(smi_reg, 0, branch_imm); } else { - ASSERT(to_patch->Mask(TestBranchMask) == TBNZ); + DCHECK(to_patch->Mask(TestBranchMask) == TBNZ); // This is JumpIfSmi(smi_reg, branch_imm). patcher.tbz(smi_reg, 0, branch_imm); } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/instructions-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/instructions-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/instructions-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/instructions-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 #define ARM64_DEFINE_FP_STATICS -#include "arm64/instructions-arm64.h" -#include "arm64/assembler-arm64-inl.h" +#include "src/arm64/assembler-arm64-inl.h" +#include "src/arm64/instructions-arm64.h" namespace v8 { namespace internal { @@ -90,7 +67,7 @@ static uint64_t RotateRight(uint64_t value, unsigned int rotate, unsigned int width) { - ASSERT(width <= 64); + DCHECK(width <= 64); rotate &= 63; return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) | (value >> rotate); @@ -100,9 +77,9 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size, uint64_t value, unsigned width) { - ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || + DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) || (width == 32)); - ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); + DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); uint64_t result = value & ((1UL << width) - 1UL); for (unsigned i = width; i < reg_size; i *= 2) { result |= (result << i); @@ -216,7 +193,7 @@ offset = ImmBranch() << kInstructionSizeLog2; } else { // Load literal (offset from PC). - ASSERT(IsLdrLiteral()); + DCHECK(IsLdrLiteral()); // The offset is always shifted by 2 bits, even for loads to 64-bits // registers. offset = ImmLLiteral() << kInstructionSizeLog2; @@ -254,16 +231,23 @@ void Instruction::SetPCRelImmTarget(Instruction* target) { // ADRP is not supported, so 'this' must point to an ADR instruction. - ASSERT(Mask(PCRelAddressingMask) == ADR); + DCHECK(IsAdr()); - Instr imm = Assembler::ImmPCRelAddress(DistanceTo(target)); - - SetInstructionBits(Mask(~ImmPCRel_mask) | imm); + ptrdiff_t target_offset = DistanceTo(target); + Instr imm; + if (Instruction::IsValidPCRelOffset(target_offset)) { + imm = Assembler::ImmPCRelAddress(target_offset); + SetInstructionBits(Mask(~ImmPCRel_mask) | imm); + } else { + PatchingAssembler patcher(this, + PatchingAssembler::kAdrFarPatchableNInstrs); + patcher.PatchAdrFar(target_offset); + } } void Instruction::SetBranchImmTarget(Instruction* target) { - ASSERT(IsAligned(DistanceTo(target), kInstructionSize)); + DCHECK(IsAligned(DistanceTo(target), kInstructionSize)); Instr branch_imm = 0; uint32_t imm_mask = 0; ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2; @@ -295,8 +279,8 @@ void Instruction::SetImmLLiteral(Instruction* source) { - ASSERT(IsAligned(DistanceTo(source), kInstructionSize)); - ptrdiff_t offset = DistanceTo(source) >> kLiteralEntrySizeLog2; + DCHECK(IsAligned(DistanceTo(source), kInstructionSize)); + ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2; Instr imm = Assembler::ImmLLiteral(offset); Instr mask = ImmLLiteral_mask; @@ -320,7 +304,7 @@ // xzr and Register are not defined in that header. Consider adding // instructions-arm64-inl.h to work around this. uint64_t InstructionSequence::InlineData() const { - ASSERT(IsInlineData()); + DCHECK(IsInlineData()); uint64_t payload = ImmMoveWide(); // TODO(all): If we extend ::InlineData() to support bigger data, we need // to update this method too. diff -Nru nodejs-0.11.13/deps/v8/src/arm64/instructions-arm64.h nodejs-0.11.15/deps/v8/src/arm64/instructions-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/instructions-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/instructions-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_ #define V8_ARM64_INSTRUCTIONS_ARM64_H_ -#include "globals.h" -#include "utils.h" -#include "arm64/constants-arm64.h" -#include "arm64/utils-arm64.h" +#include "src/arm64/constants-arm64.h" +#include "src/arm64/utils-arm64.h" +#include "src/globals.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -160,9 +137,10 @@ // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), // formed from ImmPCRelLo and ImmPCRelHi. int ImmPCRel() const { + DCHECK(IsPCRelAddressing()); int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo()); int const width = ImmPCRelLo_width + ImmPCRelHi_width; - return signed_bitextract_32(width-1, 0, offset); + return signed_bitextract_32(width - 1, 0, offset); } uint64_t ImmLogical(); @@ -191,6 +169,10 @@ return Mask(TestBranchFMask) == TestBranchFixed; } + bool IsImmBranch() const { + return BranchType() != UnknownBranchType; + } + bool IsLdrLiteral() const { return Mask(LoadLiteralFMask) == LoadLiteralFixed; } @@ -203,6 +185,10 @@ return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; } + bool IsAdr() const { + return Mask(PCRelAddressingMask) == ADR; + } + bool IsLogicalImmediate() const { return Mask(LogicalImmediateFMask) == LogicalImmediateFixed; } @@ -211,6 +197,10 @@ return Mask(AddSubImmediateFMask) == AddSubImmediateFixed; } + bool IsAddSubShifted() const { + return Mask(AddSubShiftedFMask) == AddSubShiftedFixed; + } + bool IsAddSubExtended() const { return Mask(AddSubExtendedFMask) == AddSubExtendedFixed; } @@ -363,7 +353,7 @@ void SetImmLLiteral(Instruction* source); uint8_t* LiteralAddress() { - int offset = ImmLLiteral() << kLiteralEntrySizeLog2; + int offset = ImmLLiteral() << kLoadLiteralScaleLog2; return reinterpret_cast<uint8_t*>(this) + offset; } @@ -374,7 +364,7 @@ CheckAlignment check = CHECK_ALIGNMENT) { Address addr = reinterpret_cast<Address>(this) + offset; // The FUZZ_disasm test relies on no check being done. - ASSERT(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize)); + DCHECK(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize)); return Cast(addr); } @@ -387,6 +377,10 @@ } + static const int ImmPCRelRangeBitwidth = 21; + static bool IsValidPCRelOffset(int offset) { + return is_int21(offset); + } void SetPCRelImmTarget(Instruction* target); void SetBranchImmTarget(Instruction* target); }; @@ -422,24 +416,38 @@ // A pseudo 'printf' instruction. The arguments will be passed to the platform // printf method. const Instr kImmExceptionIsPrintf = 0xdeb1; -// Parameters are stored in ARM64 registers as if the printf pseudo-instruction -// was a call to the real printf method: -// -// x0: The format string, then either of: +// Most parameters are stored in ARM64 registers as if the printf +// pseudo-instruction was a call to the real printf method: +// x0: The format string. // x1-x7: Optional arguments. // d0-d7: Optional arguments. // -// Floating-point and integer arguments are passed in separate sets of -// registers in AAPCS64 (even for varargs functions), so it is not possible to -// determine the type of location of each arguments without some information -// about the values that were passed in. This information could be retrieved -// from the printf format string, but the format string is not trivial to -// parse so we encode the relevant information with the HLT instruction. -// - Type -// Either kRegister or kFPRegister, but stored as a uint32_t because there's -// no way to guarantee the size of the CPURegister::RegisterType enum. -const unsigned kPrintfTypeOffset = 1 * kInstructionSize; -const unsigned kPrintfLength = 2 * kInstructionSize; +// Also, the argument layout is described inline in the instructions: +// - arg_count: The number of arguments. +// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields. +// +// Floating-point and integer arguments are passed in separate sets of registers +// in AAPCS64 (even for varargs functions), so it is not possible to determine +// the type of each argument without some information about the values that were +// passed in. This information could be retrieved from the printf format string, +// but the format string is not trivial to parse so we encode the relevant +// information with the HLT instruction. +const unsigned kPrintfArgCountOffset = 1 * kInstructionSize; +const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize; +const unsigned kPrintfLength = 3 * kInstructionSize; + +const unsigned kPrintfMaxArgCount = 4; + +// The argument pattern is a set of two-bit-fields, each with one of the +// following values: +enum PrintfArgPattern { + kPrintfArgW = 1, + kPrintfArgX = 2, + // There is no kPrintfArgS because floats are always converted to doubles in C + // varargs calls. + kPrintfArgD = 3 +}; +static const unsigned kPrintfArgPatternBits = 2; // A pseudo 'debug' instruction. const Instr kImmExceptionIsDebug = 0xdeb0; diff -Nru nodejs-0.11.13/deps/v8/src/arm64/instrument-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/instrument-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/instrument-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/instrument-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "arm64/instrument-arm64.h" +#include "src/arm64/instrument-arm64.h" namespace v8 { namespace internal { Counter::Counter(const char* name, CounterType type) : count_(0), enabled_(false), type_(type) { - ASSERT(name != NULL); + DCHECK(name != NULL); strncpy(name_, name, kCounterNameMaxLength); } @@ -130,8 +107,7 @@ } } - static const int num_counters = - sizeof(kCounterList) / sizeof(CounterDescriptor); + static const int num_counters = ARRAY_SIZE(kCounterList); // Dump an instrumentation description comment at the top of the file. fprintf(output_stream_, "# counters=%d\n", num_counters); @@ -167,7 +143,7 @@ // Increment the instruction counter, and dump all counters if a sample period // has elapsed. static Counter* counter = GetCounter("Instruction"); - ASSERT(counter->type() == Cumulative); + DCHECK(counter->type() == Cumulative); counter->Increment(); if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) { diff -Nru nodejs-0.11.13/deps/v8/src/arm64/instrument-arm64.h nodejs-0.11.15/deps/v8/src/arm64/instrument-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/instrument-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/instrument-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,15 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_INSTRUMENT_ARM64_H_ #define V8_ARM64_INSTRUMENT_ARM64_H_ -#include "globals.h" -#include "utils.h" -#include "arm64/decoder-arm64.h" -#include "arm64/constants-arm64.h" +#include "src/globals.h" +#include "src/utils.h" + +#include "src/arm64/constants-arm64.h" +#include "src/arm64/decoder-arm64.h" namespace v8 { namespace internal { @@ -54,7 +32,7 @@ class Counter { public: - Counter(const char* name, CounterType type = Gauge); + explicit Counter(const char* name, CounterType type = Gauge); void Increment(); void Enable(); diff -Nru nodejs-0.11.13/deps/v8/src/arm64/lithium-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/lithium-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/lithium-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/lithium-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,16 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "lithium-allocator-inl.h" -#include "arm64/lithium-arm64.h" -#include "arm64/lithium-codegen-arm64.h" -#include "hydrogen-osr.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/arm64/lithium-codegen-arm64.h" +#include "src/hydrogen-osr.h" +#include "src/lithium-inl.h" namespace v8 { namespace internal { - #define DEFINE_COMPILE(type) \ void L##type::CompileToNative(LCodeGen* generator) { \ generator->Do##type(this); \ @@ -49,17 +24,17 @@ // outputs because all registers are blocked by the calling convention. // Inputs operands must use a fixed register or use-at-start policy or // a non-register policy. - ASSERT(Output() == NULL || + DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() || !LUnallocated::cast(Output())->HasRegisterPolicy()); for (UseIterator it(this); !it.Done(); it.Advance()) { LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() || + DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart()); } for (TempIterator it(this); !it.Done(); it.Advance()) { LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); + DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); } } #endif @@ -307,7 +282,9 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); - hydrogen()->access().PrintTo(stream); + OStringStream os; + os << hydrogen()->access(); + stream->Add(os.c_str()); stream->Add(" <- "); value()->PrintTo(stream); } @@ -515,6 +492,8 @@ !hinstr->HasObservableSideEffects(); if (needs_environment && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); + // We can't really figure out if the environment is needed or not. + instr->environment()->set_has_been_used(); } return instr; @@ -522,7 +501,7 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - ASSERT(!instr->HasPointerMap()); + DCHECK(!instr->HasPointerMap()); instr->set_pointer_map(new(zone()) LPointerMap(zone())); return instr; } @@ -541,6 +520,19 @@ } +LUnallocated* LChunkBuilder::TempDoubleRegister() { + LUnallocated* operand = + new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER); + int vreg = allocator_->GetVirtualRegister(); + if (!allocator_->AllocationOk()) { + Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); + vreg = 0; + } + operand->set_virtual_register(vreg); + return operand; +} + + int LPlatformChunk::GetNextSpillIndex() { return spill_slot_count_++; } @@ -551,21 +543,28 @@ if (kind == DOUBLE_REGISTERS) { return LDoubleStackSlot::Create(index, zone()); } else { - ASSERT(kind == GENERAL_REGISTERS); + DCHECK(kind == GENERAL_REGISTERS); return LStackSlot::Create(index, zone()); } } +LOperand* LChunkBuilder::FixedTemp(Register reg) { + LUnallocated* operand = ToUnallocated(reg); + DCHECK(operand->HasFixedPolicy()); + return operand; +} + + LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { LUnallocated* operand = ToUnallocated(reg); - ASSERT(operand->HasFixedPolicy()); + DCHECK(operand->HasFixedPolicy()); return operand; } LPlatformChunk* LChunkBuilder::Build() { - ASSERT(is_unused()); + DCHECK(is_unused()); chunk_ = new(zone()) LPlatformChunk(info_, graph_); LPhase phase("L_Building chunk", chunk_); status_ = BUILDING; @@ -591,7 +590,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block) { - ASSERT(is_building()); + DCHECK(is_building()); current_block_ = block; if (block->IsStartBlock()) { @@ -600,14 +599,14 @@ } else if (block->predecessors()->length() == 1) { // We have a single predecessor => copy environment and outgoing // argument count from the predecessor. - ASSERT(block->phis()->length() == 0); + DCHECK(block->phis()->length() == 0); HBasicBlock* pred = block->predecessors()->at(0); HEnvironment* last_environment = pred->last_environment(); - ASSERT(last_environment != NULL); + DCHECK(last_environment != NULL); // Only copy the environment, if it is later used again. if (pred->end()->SecondSuccessor() == NULL) { - ASSERT(pred->end()->FirstSuccessor() == block); + DCHECK(pred->end()->FirstSuccessor() == block); } else { if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) || (pred->end()->SecondSuccessor()->block_id() > block->block_id())) { @@ -615,7 +614,7 @@ } } block->UpdateEnvironment(last_environment); - ASSERT(pred->argument_count() >= 0); + DCHECK(pred->argument_count() >= 0); argument_count_ = pred->argument_count(); } else { // We are at a state join => process phis. @@ -668,7 +667,7 @@ if (current->OperandCount() == 0) { instr = DefineAsRegister(new(zone()) LDummy()); } else { - ASSERT(!current->OperandAt(0)->IsControlInstruction()); + DCHECK(!current->OperandAt(0)->IsControlInstruction()); instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(current->OperandAt(0)))); } @@ -680,75 +679,90 @@ chunk_->AddInstruction(dummy, current_block_); } } else { - instr = current->CompileToLithium(this); + HBasicBlock* successor; + if (current->IsControlInstruction() && + HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && + successor != NULL) { + instr = new(zone()) LGoto(successor); + } else { + instr = current->CompileToLithium(this); + } } argument_count_ += current->argument_delta(); - ASSERT(argument_count_ >= 0); + DCHECK(argument_count_ >= 0); if (instr != NULL) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(current); + AddInstruction(instr, current); + } + + current_instruction_ = old_current; +} + + +void LChunkBuilder::AddInstruction(LInstruction* instr, + HInstruction* hydrogen_val) { + // Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(hydrogen_val); #if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, the register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - ASSERT(fixed == 0 || used_at_start == 0); + // Make sure that the lithium instruction has either no fixed register + // constraints in temps or the result OR no uses that are only used at + // start. If this invariant doesn't hold, the register allocator can decide + // to insert a split of a range immediately before the instruction due to an + // already allocated register needing to be used for the instruction's fixed + // register constraint. In this case, the register allocator won't see an + // interference between the split child and the use-at-start (it would if + // the it was just a plain use), so it is free to move the split child into + // the same register that is used for the use-at-start. + // See https://code.google.com/p/chromium/issues/detail?id=201590 + if (!(instr->ClobbersRegisters() && + instr->ClobbersDoubleRegisters(isolate()))) { + int fixed = 0; + int used_at_start = 0; + for (UseIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->IsUsedAtStart()) ++used_at_start; + } + if (instr->Output() != NULL) { + if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; + } + for (TempIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->HasFixedPolicy()) ++fixed; } + DCHECK(fixed == 0 || used_at_start == 0); + } #endif - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); + if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { + instr = AssignPointerMap(instr); + } + if (FLAG_stress_environments && !instr->HasEnvironment()) { + instr = AssignEnvironment(instr); + } + chunk_->AddInstruction(instr, current_block_); - if (instr->IsCall()) { - HValue* hydrogen_value_for_lazy_bailout = current; - LInstruction* instruction_needing_environment = NULL; - if (current->HasObservableSideEffects()) { - HSimulate* sim = HSimulate::cast(current->next()); - instruction_needing_environment = instr; - sim->ReplayEnvironment(current_block_->last_environment()); - hydrogen_value_for_lazy_bailout = sim; - } - LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); - bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); - chunk_->AddInstruction(bailout, current_block_); - if (instruction_needing_environment != NULL) { - // Store the lazy deopt environment with the instruction if needed. - // Right now it is only used for LInstanceOfKnownGlobal. - instruction_needing_environment-> - SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); - } + if (instr->IsCall()) { + HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; + LInstruction* instruction_needing_environment = NULL; + if (hydrogen_val->HasObservableSideEffects()) { + HSimulate* sim = HSimulate::cast(hydrogen_val->next()); + instruction_needing_environment = instr; + sim->ReplayEnvironment(current_block_->last_environment()); + hydrogen_value_for_lazy_bailout = sim; + } + LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); + bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); + chunk_->AddInstruction(bailout, current_block_); + if (instruction_needing_environment != NULL) { + // Store the lazy deopt environment with the instruction if needed. + // Right now it is only used for LInstanceOfKnownGlobal. + instruction_needing_environment-> + SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); } } - current_instruction_ = old_current; } @@ -772,9 +786,9 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); if (op == Token::MOD) { LOperand* left = UseFixedDouble(instr->left(), d0); @@ -792,7 +806,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, HBinaryOperation* instr) { - ASSERT((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) || + DCHECK((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) || (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) || (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) || (op == Token::BIT_OR) || (op == Token::BIT_AND) || @@ -802,9 +816,9 @@ // TODO(jbramley): Once we've implemented smi support for all arithmetic // operations, these assertions should check IsTagged(). - ASSERT(instr->representation().IsSmiOrTagged()); - ASSERT(left->representation().IsSmiOrTagged()); - ASSERT(right->representation().IsSmiOrTagged()); + DCHECK(instr->representation().IsSmiOrTagged()); + DCHECK(left->representation().IsSmiOrTagged()); + DCHECK(right->representation().IsSmiOrTagged()); LOperand* context = UseFixed(instr->context(), cp); LOperand* left_operand = UseFixed(left, x1); @@ -844,8 +858,14 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + + LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr); + if (shifted_operation != NULL) { + return shifted_operation; + } + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseRegisterOrConstantAtStart(instr->BetterRightOperand()); @@ -857,16 +877,16 @@ } return result; } else if (instr->representation().IsExternal()) { - ASSERT(instr->left()->representation().IsExternal()); - ASSERT(instr->right()->representation().IsInteger32()); - ASSERT(!instr->CheckFlag(HValue::kCanOverflow)); + DCHECK(instr->left()->representation().IsExternal()); + DCHECK(instr->right()->representation().IsInteger32()); + DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterOrConstantAtStart(instr->right()); return DefineAsRegister(new(zone()) LAddE(left, right)); } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::ADD, instr); } else { - ASSERT(instr->representation().IsTagged()); + DCHECK(instr->representation().IsTagged()); return DoArithmeticT(Token::ADD, instr); } } @@ -922,9 +942,14 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); - ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32)); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); + + LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr); + if (shifted_operation != NULL) { + return shifted_operation; + } LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = @@ -947,16 +972,20 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - LOperand* value = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = UseRegister(instr->length()); - return AssignEnvironment(new(zone()) LBoundsCheck(value, length)); + if (!FLAG_debug_code && instr->skip_check()) return NULL; + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + LOperand* length = !index->IsConstantOperand() + ? UseRegisterOrConstantAtStart(instr->length()) + : UseRegisterAtStart(instr->length()); + LInstruction* result = new(zone()) LBoundsCheck(index, length); + if (!FLAG_debug_code || !instr->skip_check()) { + result = AssignEnvironment(result); + } + return result; } LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - HValue* value = instr->value(); Representation r = value->representation(); HType type = value->type(); @@ -965,7 +994,7 @@ // These representations have simple checks that cannot deoptimize. return new(zone()) LBranch(UseRegister(value), NULL, NULL); } else { - ASSERT(r.IsTagged()); + DCHECK(r.IsTagged()); if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() || type.IsHeapNumber()) { // These types have simple checks that cannot deoptimize. @@ -985,7 +1014,7 @@ if (expected.IsGeneric() || expected.IsEmpty()) { // The generic case cannot deoptimize because it already supports every // possible input type. - ASSERT(needs_temps); + DCHECK(needs_temps); return new(zone()) LBranch(UseRegister(value), temp1, temp2); } else { return AssignEnvironment( @@ -1007,7 +1036,7 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor( HCallWithDescriptor* instr) { - const CallInterfaceDescriptor* descriptor = instr->descriptor(); + const InterfaceDescriptor* descriptor = instr->descriptor(); LOperand* target = UseRegisterOrConstantAtStart(instr->target()); ZoneList<LOperand*> ops(instr->OperandCount(), zone()); @@ -1074,63 +1103,59 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation from = instr->from(); Representation to = instr->to(); - + HValue* val = instr->value(); if (from.IsSmi()) { if (to.IsTagged()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return DefineSameAsFirst(new(zone()) LDummyUse(value)); } from = Representation::Tagged(); } - if (from.IsTagged()) { if (to.IsDouble()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); LOperand* temp = TempRegister(); - LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp); - return AssignEnvironment(DefineAsRegister(res)); + LInstruction* result = + DefineAsRegister(new(zone()) LNumberUntagD(value, temp)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; } else if (to.IsSmi()) { - LOperand* value = UseRegister(instr->value()); - if (instr->value()->type().IsSmi()) { + LOperand* value = UseRegister(val); + if (val->type().IsSmi()) { return DefineSameAsFirst(new(zone()) LDummyUse(value)); } return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); } else { - ASSERT(to.IsInteger32()); - LInstruction* res = NULL; - - if (instr->value()->type().IsSmi() || - instr->value()->representation().IsSmi()) { - LOperand* value = UseRegisterAtStart(instr->value()); - res = DefineAsRegister(new(zone()) LSmiUntag(value, false)); + DCHECK(to.IsInteger32()); + if (val->type().IsSmi() || val->representation().IsSmi()) { + LOperand* value = UseRegisterAtStart(val); + return DefineAsRegister(new(zone()) LSmiUntag(value, false)); } else { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); LOperand* temp1 = TempRegister(); - LOperand* temp2 = instr->CanTruncateToInt32() ? NULL : FixedTemp(d24); - res = DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2)); - res = AssignEnvironment(res); + LOperand* temp2 = instr->CanTruncateToInt32() + ? NULL : TempDoubleRegister(); + LInstruction* result = + DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; } - - return res; } } else if (from.IsDouble()) { if (to.IsTagged()) { info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); return AssignPointerMap(DefineAsRegister(result)); } else { - ASSERT(to.IsSmi() || to.IsInteger32()); - LOperand* value = UseRegister(instr->value()); - + DCHECK(to.IsSmi() || to.IsInteger32()); if (instr->CanTruncateToInt32()) { - LTruncateDoubleToIntOrSmi* result = - new(zone()) LTruncateDoubleToIntOrSmi(value); - return DefineAsRegister(result); + LOperand* value = UseRegister(val); + return DefineAsRegister(new(zone()) LTruncateDoubleToIntOrSmi(value)); } else { + LOperand* value = UseRegister(val); LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value); return AssignEnvironment(DefineAsRegister(result)); } @@ -1138,37 +1163,35 @@ } else if (from.IsInteger32()) { info()->MarkAsDeferredCalling(); if (to.IsTagged()) { - if (instr->value()->CheckFlag(HInstruction::kUint32)) { - LOperand* value = UseRegister(instr->value()); - LNumberTagU* result = new(zone()) LNumberTagU(value, - TempRegister(), - TempRegister()); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + if (val->CheckFlag(HInstruction::kUint32)) { + LOperand* value = UseRegister(val); + LNumberTagU* result = + new(zone()) LNumberTagU(value, TempRegister(), TempRegister()); + return AssignPointerMap(DefineAsRegister(result)); } else { STATIC_ASSERT((kMinInt == Smi::kMinValue) && (kMaxInt == Smi::kMaxValue)); - LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* value = UseRegisterAtStart(val); return DefineAsRegister(new(zone()) LSmiTag(value)); } } else if (to.IsSmi()) { - LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* value = UseRegisterAtStart(val); LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); - if (instr->value()->CheckFlag(HInstruction::kUint32)) { + if (val->CheckFlag(HInstruction::kUint32)) { result = AssignEnvironment(result); } return result; } else { - ASSERT(to.IsDouble()); - if (instr->value()->CheckFlag(HInstruction::kUint32)) { + DCHECK(to.IsDouble()); + if (val->CheckFlag(HInstruction::kUint32)) { return DefineAsRegister( - new(zone()) LUint32ToDouble(UseRegisterAtStart(instr->value()))); + new(zone()) LUint32ToDouble(UseRegisterAtStart(val))); } else { return DefineAsRegister( - new(zone()) LInteger32ToDouble(UseRegisterAtStart(instr->value()))); + new(zone()) LInteger32ToDouble(UseRegisterAtStart(val))); } } } - UNREACHABLE(); return NULL; } @@ -1189,27 +1212,25 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - if (instr->CanOmitMapChecks()) { - // LCheckMaps does nothing in this case. - return new(zone()) LCheckMaps(NULL); - } else { - LOperand* value = UseRegisterAtStart(instr->value()); - LOperand* temp = TempRegister(); - - if (instr->has_migration_target()) { - info()->MarkAsDeferredCalling(); - LInstruction* result = new(zone()) LCheckMaps(value, temp); - return AssignPointerMap(AssignEnvironment(result)); - } else { - return AssignEnvironment(new(zone()) LCheckMaps(value, temp)); - } + if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; + LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* temp = TempRegister(); + LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value, temp)); + if (instr->HasMigrationTarget()) { + info()->MarkAsDeferredCalling(); + result = AssignPointerMap(result); } + return result; } LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckNonSmi(value)); + LInstruction* result = new(zone()) LCheckNonSmi(value); + if (!instr->value()->type().IsHeapObject()) { + result = AssignEnvironment(result); + } + return result; } @@ -1228,18 +1249,18 @@ } else if (input_rep.IsInteger32()) { return DefineAsRegister(new(zone()) LClampIToUint8(reg)); } else { - ASSERT(input_rep.IsSmiOrTagged()); + DCHECK(input_rep.IsSmiOrTagged()); return AssignEnvironment( DefineAsRegister(new(zone()) LClampTToUint8(reg, TempRegister(), - FixedTemp(d24)))); + TempDoubleRegister()))); } } LInstruction* LChunkBuilder::DoClassOfTestAndBranch( HClassOfTestAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new(zone()) LClassOfTestAndBranch(value, TempRegister(), @@ -1250,34 +1271,31 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch( HCompareNumericAndBranch* instr) { Representation r = instr->representation(); - if (r.IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(r)); - ASSERT(instr->right()->representation().Equals(r)); + DCHECK(instr->left()->representation().Equals(r)); + DCHECK(instr->right()->representation().Equals(r)); LOperand* left = UseRegisterOrConstantAtStart(instr->left()); LOperand* right = UseRegisterOrConstantAtStart(instr->right()); return new(zone()) LCompareNumericAndBranch(left, right); } else { - ASSERT(r.IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); - // TODO(all): In fact the only case that we can handle more efficiently is - // when one of the operand is the constant 0. Currently the MacroAssembler - // will be able to cope with any constant by loading it into an internal - // scratch register. This means that if the constant is used more that once, - // it will be loaded multiple times. Unfortunatly crankshaft already - // duplicates constant loads, but we should modify the code below once this - // issue has been addressed in crankshaft. - LOperand* left = UseRegisterOrConstantAtStart(instr->left()); - LOperand* right = UseRegisterOrConstantAtStart(instr->right()); + DCHECK(r.IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); + if (instr->left()->IsConstant() && instr->right()->IsConstant()) { + LOperand* left = UseConstant(instr->left()); + LOperand* right = UseConstant(instr->right()); + return new(zone()) LCompareNumericAndBranch(left, right); + } + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseRegisterAtStart(instr->right()); return new(zone()) LCompareNumericAndBranch(left, right); } } LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseFixed(instr->left(), x1); LOperand* right = UseFixed(instr->right(), x0); @@ -1300,9 +1318,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( HCompareObjectEqAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); return new(zone()) LCmpObjectEqAndBranch(left, right); @@ -1310,10 +1325,7 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp = TempRegister(); return new(zone()) LCmpMapAndBranch(value, temp); @@ -1374,9 +1386,9 @@ LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( @@ -1392,9 +1404,9 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) @@ -1411,15 +1423,19 @@ LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) ? NULL : TempRegister(); - LDivI* div = new(zone()) LDivI(dividend, divisor, temp); - return AssignEnvironment(DefineAsRegister(div)); + LInstruction* result = + DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp)); + if (!instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + result = AssignEnvironment(result); + } + return result; } @@ -1447,6 +1463,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { HEnvironment* outer = current_block_->last_environment(); + outer->set_ast_id(instr->ReturnId()); HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner = outer->CopyForInlining(instr->closure(), instr->arguments_count(), @@ -1489,7 +1506,7 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex( HGetCachedArrayIndex* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value)); } @@ -1502,7 +1519,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( HHasCachedArrayIndexAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LHasCachedArrayIndexAndBranch( UseRegisterAtStart(instr->value()), TempRegister()); } @@ -1510,7 +1527,7 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( HHasInstanceTypeAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister()); } @@ -1561,8 +1578,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch( HCompareMinusZeroAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; LOperand* value = UseRegister(instr->value()); LOperand* scratch = TempRegister(); return new(zone()) LCompareMinusZeroAndBranch(value, scratch); @@ -1570,7 +1585,7 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); @@ -1579,7 +1594,7 @@ LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp = TempRegister(); return new(zone()) LIsStringAndBranch(value, temp); @@ -1587,14 +1602,14 @@ LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value())); } LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( HIsUndetectableAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new(zone()) LIsUndetectableAndBranch(value, TempRegister()); } @@ -1607,7 +1622,7 @@ if (env->entry()->arguments_pushed()) { int argument_count = env->arguments_environment()->parameter_count(); pop = new(zone()) LDrop(argument_count); - ASSERT(instr->argument_delta() == -argument_count); + DCHECK(instr->argument_delta() == -argument_count); } HEnvironment* outer = @@ -1622,7 +1637,10 @@ LOperand* context = UseRegisterAtStart(instr->value()); LInstruction* result = DefineAsRegister(new(zone()) LLoadContextSlot(context)); - return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; } @@ -1645,18 +1663,24 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* global_object = UseFixed(instr->global_object(), x0); + LOperand* global_object = UseFixed(instr->global_object(), + LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LLoadGlobalGeneric* result = - new(zone()) LLoadGlobalGeneric(context, global_object); + new(zone()) LLoadGlobalGeneric(context, global_object, vector); return MarkAsCall(DefineFixed(result, x0), instr); } LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - ASSERT(instr->key()->representation().IsSmiOrInteger32()); + DCHECK(instr->key()->representation().IsSmiOrInteger32()); ElementsKind elements_kind = instr->elements_kind(); LOperand* elements = UseRegister(instr->elements()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + LOperand* key = UseRegisterOrConstant(instr->key()); if (!instr->is_typed_elements()) { if (instr->representation().IsDouble()) { @@ -1671,7 +1695,7 @@ ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); } else { - ASSERT(instr->representation().IsSmiOrTagged() || + DCHECK(instr->representation().IsSmiOrTagged() || instr->representation().IsInteger32()); LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister(); LLoadKeyedFixed* result = @@ -1681,34 +1705,36 @@ : DefineAsRegister(result); } } else { - ASSERT((instr->representation().IsInteger32() && + DCHECK((instr->representation().IsInteger32() && !IsDoubleOrFloatElementsKind(instr->elements_kind())) || (instr->representation().IsDouble() && IsDoubleOrFloatElementsKind(instr->elements_kind()))); LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister(); - LLoadKeyedExternal* result = - new(zone()) LLoadKeyedExternal(elements, key, temp); - // An unsigned int array load might overflow and cause a deopt. Make sure it - // has an environment. - if (instr->RequiresHoleCheck() || - elements_kind == EXTERNAL_UINT32_ELEMENTS || - elements_kind == UINT32_ELEMENTS) { - return AssignEnvironment(DefineAsRegister(result)); - } else { - return DefineAsRegister(result); + LInstruction* result = DefineAsRegister( + new(zone()) LLoadKeyedExternal(elements, key, temp)); + if ((elements_kind == EXTERNAL_UINT32_ELEMENTS || + elements_kind == UINT32_ELEMENTS) && + !instr->CheckFlag(HInstruction::kUint32)) { + result = AssignEnvironment(result); } + return result; } } LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->object(), x1); - LOperand* key = UseFixed(instr->key(), x0); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } LInstruction* result = - DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), x0); + DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector), + x0); return MarkAsCall(result, instr); } @@ -1721,9 +1747,14 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->object(), x0); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LInstruction* result = - DefineFixed(new(zone()) LLoadNamedGeneric(context, object), x0); + DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), x0); return MarkAsCall(result, instr); } @@ -1740,9 +1771,9 @@ LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegisterAtStart(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I( @@ -1756,9 +1787,9 @@ LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp = @@ -1800,14 +1831,14 @@ LOperand* left = NULL; LOperand* right = NULL; if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); left = UseRegisterAtStart(instr->BetterLeftOperand()); right = UseRegisterOrConstantAtStart(instr->BetterRightOperand()); } else { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); left = UseRegisterAtStart(instr->left()); right = UseRegisterAtStart(instr->right()); } @@ -1816,14 +1847,15 @@ LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegisterAtStart(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( dividend, divisor)); - if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + if (instr->CheckFlag(HValue::kLeftCanBeNegative) && + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { result = AssignEnvironment(result); } return result; @@ -1831,9 +1863,9 @@ LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp = TempRegister(); @@ -1847,9 +1879,9 @@ LInstruction* LChunkBuilder::DoModI(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); LInstruction* result = DefineAsRegister(new(zone()) LModI(dividend, divisor)); @@ -1880,18 +1912,15 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero); - bool needs_environment = can_overflow || bailout_on_minus_zero; HValue* least_const = instr->BetterLeftOperand(); HValue* most_const = instr->BetterRightOperand(); - LOperand* left; - // LMulConstI can handle a subset of constants: // With support for overflow detection: // -1, 0, 1, 2 @@ -1911,26 +1940,27 @@ IsPowerOf2(constant_abs - 1))))) { LConstantOperand* right = UseConstant(most_const); bool need_register = IsPowerOf2(constant_abs) && !small_constant; - left = need_register ? UseRegister(least_const) - : UseRegisterAtStart(least_const); - LMulConstIS* mul = new(zone()) LMulConstIS(left, right); - if (needs_environment) AssignEnvironment(mul); - return DefineAsRegister(mul); + LOperand* left = need_register ? UseRegister(least_const) + : UseRegisterAtStart(least_const); + LInstruction* result = + DefineAsRegister(new(zone()) LMulConstIS(left, right)); + if ((bailout_on_minus_zero && constant <= 0) || can_overflow) { + result = AssignEnvironment(result); + } + return result; } } - left = UseRegisterAtStart(least_const); // LMulI/S can handle all cases, but it requires that a register is // allocated for the second operand. - LInstruction* result; - if (instr->representation().IsSmi()) { - LOperand* right = UseRegisterAtStart(most_const); - result = DefineAsRegister(new(zone()) LMulS(left, right)); - } else { - LOperand* right = UseRegisterAtStart(most_const); - result = DefineAsRegister(new(zone()) LMulI(left, right)); + LOperand* left = UseRegisterAtStart(least_const); + LOperand* right = UseRegisterAtStart(most_const); + LInstruction* result = instr->representation().IsSmi() + ? DefineAsRegister(new(zone()) LMulS(left, right)) + : DefineAsRegister(new(zone()) LMulI(left, right)); + if ((bailout_on_minus_zero && least_const != most_const) || can_overflow) { + result = AssignEnvironment(result); } - if (needs_environment) AssignEnvironment(result); return result; } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::MUL, instr); @@ -1941,7 +1971,7 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - ASSERT(argument_count_ == 0); + DCHECK(argument_count_ == 0); allocator_->MarkAsOsrEntry(); current_block_->last_environment()->set_ast_id(instr->ast_id()); return AssignEnvironment(new(zone()) LOsrEntry); @@ -1954,22 +1984,22 @@ int spill_index = chunk_->GetParameterStackSlot(instr->index()); return DefineAsSpilled(result, spill_index); } else { - ASSERT(info()->IsStub()); + DCHECK(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = - info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + info()->code_stub()->GetInterfaceDescriptor(); int index = static_cast<int>(instr->index()); - Register reg = descriptor->GetParameterRegister(index); + Register reg = descriptor->GetEnvironmentParameterRegister(index); return DefineFixed(result, reg); } } LInstruction* LChunkBuilder::DoPower(HPower* instr) { - ASSERT(instr->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); // We call a C function for double power. It can't trigger a GC. // We need to use fixed result register for the call. Representation exponent_type = instr->right()->representation(); - ASSERT(instr->left()->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); LOperand* left = UseFixedDouble(instr->left(), d0); LOperand* right = exponent_type.IsInteger32() ? UseFixed(instr->right(), x12) @@ -1983,9 +2013,21 @@ } -LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { - LOperand* argument = UseRegister(instr->argument()); - return new(zone()) LPushArgument(argument); +LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { + int argc = instr->OperandCount(); + AddInstruction(new(zone()) LPreparePushArguments(argc), instr); + + LPushArguments* push_args = new(zone()) LPushArguments(zone()); + + for (int i = 0; i < argc; ++i) { + if (push_args->ShouldSplitPush()) { + AddInstruction(push_args, instr); + push_args = new(zone()) LPushArguments(zone()); + } + push_args->AddArgument(UseRegister(instr->argument(i))); + } + + return push_args; } @@ -1998,16 +2040,15 @@ LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) { HValue* value = instr->value(); - ASSERT(value->representation().IsDouble()); + DCHECK(value->representation().IsDouble()); return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value))); } LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) { - LOperand* lo = UseRegister(instr->lo()); + LOperand* lo = UseRegisterAndClobber(instr->lo()); LOperand* hi = UseRegister(instr->hi()); - LOperand* temp = TempRegister(); - return DefineAsRegister(new(zone()) LConstructDouble(hi, lo, temp)); + return DefineAsRegister(new(zone()) LConstructDouble(hi, lo)); } @@ -2045,16 +2086,131 @@ } +HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val, + HValue** left) { + if (!val->representation().IsInteger32()) return NULL; + if (!(val->IsBitwise() || val->IsAdd() || val->IsSub())) return NULL; + + HBinaryOperation* hinstr = HBinaryOperation::cast(val); + HValue* hleft = hinstr->left(); + HValue* hright = hinstr->right(); + DCHECK(hleft->representation().Equals(hinstr->representation())); + DCHECK(hright->representation().Equals(hinstr->representation())); + + if ((hright->IsConstant() && + LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) || + (hinstr->IsCommutative() && hleft->IsConstant() && + LikelyFitsImmField(hinstr, HConstant::cast(hleft)->Integer32Value()))) { + // The constant operand will likely fit in the immediate field. We are + // better off with + // lsl x8, x9, #imm + // add x0, x8, #imm2 + // than with + // mov x16, #imm2 + // add x0, x16, x9 LSL #imm + return NULL; + } + + HBitwiseBinaryOperation* shift = NULL; + // TODO(aleram): We will miss situations where a shift operation is used by + // different instructions both as a left and right operands. + if (hright->IsBitwiseBinaryShift() && + HBitwiseBinaryOperation::cast(hright)->right()->IsConstant()) { + shift = HBitwiseBinaryOperation::cast(hright); + if (left != NULL) { + *left = hleft; + } + } else if (hinstr->IsCommutative() && + hleft->IsBitwiseBinaryShift() && + HBitwiseBinaryOperation::cast(hleft)->right()->IsConstant()) { + shift = HBitwiseBinaryOperation::cast(hleft); + if (left != NULL) { + *left = hright; + } + } else { + return NULL; + } + + if ((JSShiftAmountFromHConstant(shift->right()) == 0) && shift->IsShr()) { + // Shifts right by zero can deoptimize. + return NULL; + } + + return shift; +} + + +bool LChunkBuilder::ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift) { + if (!shift->representation().IsInteger32()) { + return false; + } + for (HUseIterator it(shift->uses()); !it.Done(); it.Advance()) { + if (shift != CanTransformToShiftedOp(it.value())) { + return false; + } + } + return true; +} + + +LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand( + HBinaryOperation* instr) { + HValue* left; + HBitwiseBinaryOperation* shift = CanTransformToShiftedOp(instr, &left); + + if ((shift != NULL) && ShiftCanBeOptimizedAway(shift)) { + return DoShiftedBinaryOp(instr, left, shift); + } + return NULL; +} + + +LInstruction* LChunkBuilder::DoShiftedBinaryOp( + HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) { + DCHECK(hshift->IsBitwiseBinaryShift()); + DCHECK(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0)); + + LTemplateResultInstruction<1>* res; + LOperand* left = UseRegisterAtStart(hleft); + LOperand* right = UseRegisterAtStart(hshift->left()); + LOperand* shift_amount = UseConstant(hshift->right()); + Shift shift_op; + switch (hshift->opcode()) { + case HValue::kShl: shift_op = LSL; break; + case HValue::kShr: shift_op = LSR; break; + case HValue::kSar: shift_op = ASR; break; + default: UNREACHABLE(); shift_op = NO_SHIFT; + } + + if (hinstr->IsBitwise()) { + res = new(zone()) LBitI(left, right, shift_op, shift_amount); + } else if (hinstr->IsAdd()) { + res = new(zone()) LAddI(left, right, shift_op, shift_amount); + } else { + DCHECK(hinstr->IsSub()); + res = new(zone()) LSubI(left, right, shift_op, shift_amount); + } + if (hinstr->CheckFlag(HValue::kCanOverflow)) { + AssignEnvironment(res); + } + return DefineAsRegister(res); +} + + LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { if (instr->representation().IsTagged()) { return DoArithmeticT(op, instr); } - ASSERT(instr->representation().IsInteger32() || + DCHECK(instr->representation().IsInteger32() || instr->representation().IsSmi()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + + if (ShiftCanBeOptimizedAway(instr)) { + return NULL; + } LOperand* left = instr->representation().IsSmi() ? UseRegister(instr->left()) @@ -2066,8 +2222,7 @@ int constant_value = 0; if (right_value->IsConstant()) { right = UseConstant(right_value); - HConstant* constant = HConstant::cast(right_value); - constant_value = constant->Integer32Value() & 0x1f; + constant_value = JSShiftAmountFromHConstant(right_value); } else { right = UseRegisterAtStart(right_value); if (op == Token::ROR) { @@ -2090,7 +2245,7 @@ if (instr->representation().IsInteger32()) { result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt)); } else { - ASSERT(instr->representation().IsSmi()); + DCHECK(instr->representation().IsSmi()); result = DefineAsRegister( new(zone()) LShiftS(op, left, right, temp, does_deopt)); } @@ -2130,7 +2285,7 @@ LOperand* context = UseFixed(instr->context(), cp); return MarkAsCall(new(zone()) LStackCheck(context), instr); } else { - ASSERT(instr->is_backwards_branch()); + DCHECK(instr->is_backwards_branch()); LOperand* context = UseAny(instr->context()); return AssignEnvironment( AssignPointerMap(new(zone()) LStackCheck(context))); @@ -2160,7 +2315,10 @@ value = UseRegister(instr->value()); } LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp); - return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; } @@ -2177,10 +2335,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { + LOperand* key = UseRegisterOrConstant(instr->key()); LOperand* temp = NULL; LOperand* elements = NULL; LOperand* val = NULL; - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); if (!instr->is_typed_elements() && instr->value()->representation().IsTagged() && @@ -2196,23 +2354,23 @@ } if (instr->is_typed_elements()) { - ASSERT((instr->value()->representation().IsInteger32() && + DCHECK((instr->value()->representation().IsInteger32() && !IsDoubleOrFloatElementsKind(instr->elements_kind())) || (instr->value()->representation().IsDouble() && IsDoubleOrFloatElementsKind(instr->elements_kind()))); - ASSERT((instr->is_fixed_typed_array() && + DCHECK((instr->is_fixed_typed_array() && instr->elements()->representation().IsTagged()) || (instr->is_external() && instr->elements()->representation().IsExternal())); return new(zone()) LStoreKeyedExternal(elements, key, val, temp); } else if (instr->value()->representation().IsDouble()) { - ASSERT(instr->elements()->representation().IsTagged()); + DCHECK(instr->elements()->representation().IsTagged()); return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp); } else { - ASSERT(instr->elements()->representation().IsTagged()); - ASSERT(instr->value()->representation().IsSmiOrTagged() || + DCHECK(instr->elements()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsSmiOrTagged() || instr->value()->representation().IsInteger32()); return new(zone()) LStoreKeyedFixed(elements, key, val, temp); } @@ -2221,13 +2379,14 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->object(), x2); - LOperand* key = UseFixed(instr->key(), x1); - LOperand* value = UseFixed(instr->value(), x0); - - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsTagged()); - ASSERT(instr->value()->representation().IsTagged()); + LOperand* object = UseFixed(instr->object(), + KeyedStoreIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister()); + LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister()); + + DCHECK(instr->object()->representation().IsTagged()); + DCHECK(instr->key()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return MarkAsCall( new(zone()) LStoreKeyedGeneric(context, object, key, value), instr); @@ -2259,20 +2418,15 @@ temp0 = TempRegister(); } - LStoreNamedField* result = - new(zone()) LStoreNamedField(object, value, temp0, temp1); - if (instr->field_representation().IsHeapObject() && - !instr->value()->type().IsHeapObject()) { - return AssignEnvironment(result); - } - return result; + return new(zone()) LStoreNamedField(object, value, temp0, temp1); } LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->object(), x1); - LOperand* value = UseFixed(instr->value(), x0); + LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister()); + LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister()); + LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value); return MarkAsCall(result, instr); } @@ -2294,7 +2448,7 @@ LOperand* context = UseAny(instr->context()); LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(context, string, index); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + return AssignPointerMap(DefineAsRegister(result)); } @@ -2309,8 +2463,8 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch( HStringCompareAndBranch* instr) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseFixed(instr->left(), x1); LOperand* right = UseFixed(instr->right(), x0); @@ -2322,8 +2476,14 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + + LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr); + if (shifted_operation != NULL) { + return shifted_operation; + } + LOperand *left; if (instr->left()->IsConstant() && (HConstant::cast(instr->left())->Integer32Value() == 0)) { @@ -2365,17 +2525,18 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - LOperand* object = UseRegister(instr->object()); if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { + LOperand* object = UseRegister(instr->object()); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, NULL, TempRegister(), TempRegister()); return result; } else { + LOperand* object = UseFixed(instr->object(), x0); LOperand* context = UseFixed(instr->context(), cp); LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, context, TempRegister()); - return AssignPointerMap(result); + new(zone()) LTransitionElementsKind(object, context, NULL, NULL); + return MarkAsCall(result, instr); } } @@ -2404,9 +2565,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - // We only need temp registers in some cases, but we can't dereference the // instr->type_literal() handle to test that here. LOperand* temp1 = TempRegister(); @@ -2429,29 +2587,21 @@ LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LOperand* temp3 = TempRegister(); - LMathAbsTagged* result = - new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + LInstruction* result = DefineAsRegister( + new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3)); + return AssignEnvironment(AssignPointerMap(result)); } else { LOperand* input = UseRegisterAtStart(instr->value()); - LMathAbs* result = new(zone()) LMathAbs(input); - if (r.IsDouble()) { - // The Double case can never fail so it doesn't need an environment. - return DefineAsRegister(result); - } else { - ASSERT(r.IsInteger32() || r.IsSmi()); - // The Integer32 and Smi cases need an environment because they can - // deoptimize on minimum representable number. - return AssignEnvironment(DefineAsRegister(result)); - } + LInstruction* result = DefineAsRegister(new(zone()) LMathAbs(input)); + if (!r.IsDouble()) result = AssignEnvironment(result); + return result; } } case kMathExp: { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseRegister(instr->value()); - // TODO(all): Implement TempFPRegister. - LOperand* double_temp1 = FixedTemp(d24); // This was chosen arbitrarily. + LOperand* double_temp1 = TempDoubleRegister(); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LOperand* temp3 = TempRegister(); @@ -2460,47 +2610,58 @@ return DefineAsRegister(result); } case kMathFloor: { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->value()->representation().IsDouble()); - // TODO(jbramley): ARM64 can easily handle a double argument with frintm, - // but we're never asked for it here. At the moment, we fall back to the - // runtime if the result doesn't fit, like the other architectures. + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseRegisterAtStart(instr->value()); - LMathFloor* result = new(zone()) LMathFloor(input); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + if (instr->representation().IsInteger32()) { + LMathFloorI* result = new(zone()) LMathFloorI(input); + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + } else { + DCHECK(instr->representation().IsDouble()); + LMathFloorD* result = new(zone()) LMathFloorD(input); + return DefineAsRegister(result); + } } case kMathLog: { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseFixedDouble(instr->value(), d0); LMathLog* result = new(zone()) LMathLog(input); return MarkAsCall(DefineFixedDouble(result, d0), instr); } case kMathPowHalf: { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseRegister(instr->value()); return DefineAsRegister(new(zone()) LMathPowHalf(input)); } case kMathRound: { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->value()->representation().IsDouble()); - // TODO(jbramley): As with kMathFloor, we can probably handle double - // results fairly easily, but we are never asked for them. + DCHECK(instr->value()->representation().IsDouble()); + LOperand* input = UseRegister(instr->value()); + if (instr->representation().IsInteger32()) { + LOperand* temp = TempDoubleRegister(); + LMathRoundI* result = new(zone()) LMathRoundI(input, temp); + return AssignEnvironment(DefineAsRegister(result)); + } else { + DCHECK(instr->representation().IsDouble()); + LMathRoundD* result = new(zone()) LMathRoundD(input); + return DefineAsRegister(result); + } + } + case kMathFround: { + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseRegister(instr->value()); - LOperand* temp = FixedTemp(d24); // Choosen arbitrarily. - LMathRound* result = new(zone()) LMathRound(input, temp); - return AssignEnvironment(DefineAsRegister(result)); + LMathFround* result = new (zone()) LMathFround(input); + return DefineAsRegister(result); } case kMathSqrt: { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseRegisterAtStart(instr->value()); return DefineAsRegister(new(zone()) LMathSqrt(input)); } case kMathClz32: { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->value()->representation().IsInteger32()); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->value()->representation().IsInteger32()); LOperand* input = UseRegisterAtStart(instr->value()); return DefineAsRegister(new(zone()) LMathClz32(input)); } @@ -2560,8 +2721,10 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { LOperand* object = UseRegisterAtStart(instr->object()); - LOperand* index = UseRegister(instr->index()); - return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index)); + LOperand* index = UseRegisterAndClobber(instr->index()); + LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); + LInstruction* result = DefineSameAsFirst(load); + return AssignPointerMap(result); } @@ -2573,4 +2736,20 @@ } +LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) { + LOperand* context = UseRegisterAtStart(instr->context()); + return new(zone()) LStoreFrameContext(context); +} + + +LInstruction* LChunkBuilder::DoAllocateBlockContext( + HAllocateBlockContext* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* function = UseRegisterAtStart(instr->function()); + LAllocateBlockContext* result = + new(zone()) LAllocateBlockContext(context, function); + return MarkAsCall(DefineFixed(result, cp), instr); +} + + } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/arm64/lithium-arm64.h nodejs-0.11.15/deps/v8/src/arm64/lithium-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/lithium-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/lithium-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_LITHIUM_ARM64_H_ #define V8_ARM64_LITHIUM_ARM64_H_ -#include "hydrogen.h" -#include "lithium-allocator.h" -#include "lithium.h" -#include "safepoint-table.h" -#include "utils.h" +#include "src/hydrogen.h" +#include "src/lithium.h" +#include "src/lithium-allocator.h" +#include "src/safepoint-table.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -40,157 +17,163 @@ // Forward declarations. class LCodeGen; -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddE) \ - V(AddI) \ - V(AddS) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BitS) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallFunction) \ - V(CallJSFunction) \ - V(CallNew) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CallStub) \ - V(CallWithDescriptor) \ - V(CheckInstanceType) \ - V(CheckMapValue) \ - V(CheckMaps) \ - V(CheckNonSmi) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CmpHoleAndBranchD) \ - V(CmpHoleAndBranchT) \ - V(CmpMapAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpT) \ - V(CompareMinusZeroAndBranch) \ - V(CompareNumericAndBranch) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(ConstructDouble) \ - V(Context) \ - V(DateField) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleBits) \ - V(DoubleToIntOrSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(FlooringDivI) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(FunctionLiteral) \ - V(GetCachedArrayIndex) \ - V(Goto) \ - V(HasCachedArrayIndexAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstanceOf) \ - V(InstanceOfKnownGlobal) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsConstructCallAndBranch) \ - V(IsObjectAndBranch) \ - V(IsSmiAndBranch) \ - V(IsStringAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadGlobalCell) \ - V(LoadGlobalGeneric) \ - V(LoadKeyedExternal) \ - V(LoadKeyedFixed) \ - V(LoadKeyedFixedDouble) \ - V(LoadKeyedGeneric) \ - V(LoadNamedField) \ - V(LoadNamedGeneric) \ - V(LoadRoot) \ - V(MapEnumLength) \ - V(MathAbs) \ - V(MathAbsTagged) \ - V(MathClz32) \ - V(MathExp) \ - V(MathFloor) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRound) \ - V(MathSqrt) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulConstIS) \ - V(MulI) \ - V(MulS) \ - V(NumberTagD) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(PushArgument) \ - V(RegExpLiteral) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(ShiftS) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreGlobalCell) \ - V(StoreKeyedExternal) \ - V(StoreKeyedFixed) \ - V(StoreKeyedFixedDouble) \ - V(StoreKeyedGeneric) \ - V(StoreNamedField) \ - V(StoreNamedGeneric) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(SubS) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(ToFastProperties) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(TruncateDoubleToIntOrSmi) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ +#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ + V(AccessArgumentsAt) \ + V(AddE) \ + V(AddI) \ + V(AddS) \ + V(Allocate) \ + V(AllocateBlockContext) \ + V(ApplyArguments) \ + V(ArgumentsElements) \ + V(ArgumentsLength) \ + V(ArithmeticD) \ + V(ArithmeticT) \ + V(BitI) \ + V(BitS) \ + V(BoundsCheck) \ + V(Branch) \ + V(CallFunction) \ + V(CallJSFunction) \ + V(CallNew) \ + V(CallNewArray) \ + V(CallRuntime) \ + V(CallStub) \ + V(CallWithDescriptor) \ + V(CheckInstanceType) \ + V(CheckMapValue) \ + V(CheckMaps) \ + V(CheckNonSmi) \ + V(CheckSmi) \ + V(CheckValue) \ + V(ClampDToUint8) \ + V(ClampIToUint8) \ + V(ClampTToUint8) \ + V(ClassOfTestAndBranch) \ + V(CmpHoleAndBranchD) \ + V(CmpHoleAndBranchT) \ + V(CmpMapAndBranch) \ + V(CmpObjectEqAndBranch) \ + V(CmpT) \ + V(CompareMinusZeroAndBranch) \ + V(CompareNumericAndBranch) \ + V(ConstantD) \ + V(ConstantE) \ + V(ConstantI) \ + V(ConstantS) \ + V(ConstantT) \ + V(ConstructDouble) \ + V(Context) \ + V(DateField) \ + V(DebugBreak) \ + V(DeclareGlobals) \ + V(Deoptimize) \ + V(DivByConstI) \ + V(DivByPowerOf2I) \ + V(DivI) \ + V(DoubleBits) \ + V(DoubleToIntOrSmi) \ + V(Drop) \ + V(Dummy) \ + V(DummyUse) \ + V(FlooringDivByConstI) \ + V(FlooringDivByPowerOf2I) \ + V(FlooringDivI) \ + V(ForInCacheArray) \ + V(ForInPrepareMap) \ + V(FunctionLiteral) \ + V(GetCachedArrayIndex) \ + V(Goto) \ + V(HasCachedArrayIndexAndBranch) \ + V(HasInstanceTypeAndBranch) \ + V(InnerAllocatedObject) \ + V(InstanceOf) \ + V(InstanceOfKnownGlobal) \ + V(InstructionGap) \ + V(Integer32ToDouble) \ + V(InvokeFunction) \ + V(IsConstructCallAndBranch) \ + V(IsObjectAndBranch) \ + V(IsSmiAndBranch) \ + V(IsStringAndBranch) \ + V(IsUndetectableAndBranch) \ + V(Label) \ + V(LazyBailout) \ + V(LoadContextSlot) \ + V(LoadFieldByIndex) \ + V(LoadFunctionPrototype) \ + V(LoadGlobalCell) \ + V(LoadGlobalGeneric) \ + V(LoadKeyedExternal) \ + V(LoadKeyedFixed) \ + V(LoadKeyedFixedDouble) \ + V(LoadKeyedGeneric) \ + V(LoadNamedField) \ + V(LoadNamedGeneric) \ + V(LoadRoot) \ + V(MapEnumLength) \ + V(MathAbs) \ + V(MathAbsTagged) \ + V(MathClz32) \ + V(MathExp) \ + V(MathFloorD) \ + V(MathFloorI) \ + V(MathFround) \ + V(MathLog) \ + V(MathMinMax) \ + V(MathPowHalf) \ + V(MathRoundD) \ + V(MathRoundI) \ + V(MathSqrt) \ + V(ModByConstI) \ + V(ModByPowerOf2I) \ + V(ModI) \ + V(MulConstIS) \ + V(MulI) \ + V(MulS) \ + V(NumberTagD) \ + V(NumberTagU) \ + V(NumberUntagD) \ + V(OsrEntry) \ + V(Parameter) \ + V(Power) \ + V(PreparePushArguments) \ + V(PushArguments) \ + V(RegExpLiteral) \ + V(Return) \ + V(SeqStringGetChar) \ + V(SeqStringSetChar) \ + V(ShiftI) \ + V(ShiftS) \ + V(SmiTag) \ + V(SmiUntag) \ + V(StackCheck) \ + V(StoreCodeEntry) \ + V(StoreContextSlot) \ + V(StoreFrameContext) \ + V(StoreGlobalCell) \ + V(StoreKeyedExternal) \ + V(StoreKeyedFixed) \ + V(StoreKeyedFixedDouble) \ + V(StoreKeyedGeneric) \ + V(StoreNamedField) \ + V(StoreNamedGeneric) \ + V(StringAdd) \ + V(StringCharCodeAt) \ + V(StringCharFromCode) \ + V(StringCompareAndBranch) \ + V(SubI) \ + V(SubS) \ + V(TaggedToI) \ + V(ThisFunction) \ + V(ToFastProperties) \ + V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ + V(TruncateDoubleToIntOrSmi) \ + V(Typeof) \ + V(TypeofIsAndBranch) \ + V(Uint32ToDouble) \ + V(UnknownOSRValue) \ V(WrapReceiver) @@ -203,7 +186,7 @@ return mnemonic; \ } \ static L##type* cast(LInstruction* instr) { \ - ASSERT(instr->Is##type()); \ + DCHECK(instr->Is##type()); \ return reinterpret_cast<L##type*>(instr); \ } @@ -251,6 +234,9 @@ virtual bool IsControl() const { return false; } + // Try deleting this instruction if possible. + virtual bool TryDelete() { return false; } + void set_environment(LEnvironment* env) { environment_ = env; } LEnvironment* environment() const { return environment_; } bool HasEnvironment() const { return environment_ != NULL; } @@ -270,7 +256,9 @@ // Interface to the register allocator and iterators. bool ClobbersTemps() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters() const { return IsCall(); } + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { + return IsCall(); + } bool IsMarkedAsCall() const { return IsCall(); } virtual bool HasResult() const = 0; @@ -403,7 +391,7 @@ virtual bool IsGap() const V8_OVERRIDE { return true; } virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; static LGap* cast(LInstruction* instr) { - ASSERT(instr->IsGap()); + DCHECK(instr->IsGap()); return reinterpret_cast<LGap*>(instr); } @@ -464,7 +452,7 @@ class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> { public: - explicit LDummy() { } + LDummy() {} DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") }; @@ -584,7 +572,14 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - LAddI(LOperand* left, LOperand* right) { + LAddI(LOperand* left, LOperand* right) + : shift_(NO_SHIFT), shift_amount_(0) { + inputs_[0] = left; + inputs_[1] = right; + } + + LAddI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount) + : shift_(shift), shift_amount_(shift_amount) { inputs_[0] = left; inputs_[1] = right; } @@ -592,8 +587,15 @@ LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } + Shift shift() const { return shift_; } + LOperand* shift_amount() const { return shift_amount_; } + DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") DECLARE_HYDROGEN_ACCESSOR(Add) + + protected: + Shift shift_; + LOperand* shift_amount_; }; @@ -753,7 +755,14 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - LBitI(LOperand* left, LOperand* right) { + LBitI(LOperand* left, LOperand* right) + : shift_(NO_SHIFT), shift_amount_(0) { + inputs_[0] = left; + inputs_[1] = right; + } + + LBitI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount) + : shift_(shift), shift_amount_(shift_amount) { inputs_[0] = left; inputs_[1] = right; } @@ -761,10 +770,17 @@ LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } + Shift shift() const { return shift_; } + LOperand* shift_amount() const { return shift_amount_; } + Token::Value op() const { return hydrogen()->op(); } DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") DECLARE_HYDROGEN_ACCESSOR(Bitwise) + + protected: + Shift shift_; + LOperand* shift_amount_; }; @@ -887,7 +903,7 @@ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { return save_doubles() == kDontSaveFPRegs; } @@ -927,7 +943,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> { public: - explicit LCheckMaps(LOperand* value, LOperand* temp = NULL) { + explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) { inputs_[0] = value; temps_[0] = temp; } @@ -1031,17 +1047,15 @@ }; -class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 1> { +class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - LConstructDouble(LOperand* hi, LOperand* lo, LOperand* temp) { + LConstructDouble(LOperand* hi, LOperand* lo) { inputs_[0] = hi; inputs_[1] = lo; - temps_[0] = temp; } LOperand* hi() { return inputs_[0]; } LOperand* lo() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double") }; @@ -1279,6 +1293,7 @@ class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> { public: + virtual bool IsControl() const V8_OVERRIDE { return true; } DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") DECLARE_HYDROGEN_ACCESSOR(Deoptimize) }; @@ -1324,14 +1339,14 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LDivI(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; + LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; temps_[0] = temp; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") @@ -1508,18 +1523,18 @@ class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> { public: - LCallWithDescriptor(const CallInterfaceDescriptor* descriptor, - ZoneList<LOperand*>& operands, + LCallWithDescriptor(const InterfaceDescriptor* descriptor, + const ZoneList<LOperand*>& operands, Zone* zone) : descriptor_(descriptor), - inputs_(descriptor->environment_length() + 1, zone) { - ASSERT(descriptor->environment_length() + 1 == operands.length()); + inputs_(descriptor->GetRegisterParameterCount() + 1, zone) { + DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length()); inputs_.AddAll(operands, zone); } LOperand* target() const { return inputs_[0]; } - const CallInterfaceDescriptor* descriptor() { return descriptor_; } + const InterfaceDescriptor* descriptor() { return descriptor_; } private: DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") @@ -1529,7 +1544,7 @@ int arity() const { return hydrogen()->argument_count() - 1; } - const CallInterfaceDescriptor* descriptor_; + const InterfaceDescriptor* descriptor_; ZoneList<LOperand*> inputs_; // Iterator support. @@ -1709,15 +1724,18 @@ }; -class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LLoadGlobalGeneric(LOperand* context, LOperand* global_object) { + LLoadGlobalGeneric(LOperand* context, LOperand* global_object, + LOperand* vector) { inputs_[0] = context; inputs_[1] = global_object; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* global_object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric) @@ -1749,15 +1767,15 @@ bool is_typed_elements() const { return is_external() || is_fixed_typed_array(); } - uint32_t additional_index() const { - return this->hydrogen()->index_offset(); + uint32_t base_offset() const { + return this->hydrogen()->base_offset(); } void PrintDataTo(StringStream* stream) V8_OVERRIDE { this->elements()->PrintTo(stream); stream->Add("["); this->key()->PrintTo(stream); - if (this->hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", this->additional_index()); + if (this->base_offset() != 0) { + stream->Add(" + %d]", this->base_offset()); } else { stream->Add("]"); } @@ -1806,31 +1824,37 @@ }; -class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> { +class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> { public: - LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) { + LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key, + LOperand* vector) { inputs_[0] = context; inputs_[1] = object; inputs_[2] = key; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } LOperand* key() { return inputs_[2]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric) }; -class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LLoadNamedGeneric(LOperand* context, LOperand* object) { + LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) { inputs_[0] = context; inputs_[1] = object; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) @@ -1930,10 +1954,19 @@ }; -class LMathFloor V8_FINAL : public LUnaryMathOperation<0> { +// Math.floor with a double result. +class LMathFloorD V8_FINAL : public LUnaryMathOperation<0> { public: - explicit LMathFloor(LOperand* value) : LUnaryMathOperation<0>(value) { } - DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor") + explicit LMathFloorD(LOperand* value) : LUnaryMathOperation<0>(value) { } + DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d") +}; + + +// Math.floor with an integer result. +class LMathFloorI V8_FINAL : public LUnaryMathOperation<0> { + public: + explicit LMathFloorI(LOperand* value) : LUnaryMathOperation<0>(value) { } + DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i") }; @@ -2029,16 +2062,36 @@ }; -class LMathRound V8_FINAL : public LUnaryMathOperation<1> { +// Math.round with an integer result. +class LMathRoundD V8_FINAL : public LUnaryMathOperation<0> { + public: + explicit LMathRoundD(LOperand* value) + : LUnaryMathOperation<0>(value) { + } + + DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d") +}; + + +// Math.round with an integer result. +class LMathRoundI V8_FINAL : public LUnaryMathOperation<1> { public: - LMathRound(LOperand* value, LOperand* temp1) + LMathRoundI(LOperand* value, LOperand* temp1) : LUnaryMathOperation<1>(value) { temps_[0] = temp1; } LOperand* temp1() { return temps_[0]; } - DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") + DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i") +}; + + +class LMathFround V8_FINAL : public LUnaryMathOperation<0> { + public: + explicit LMathFround(LOperand* value) : LUnaryMathOperation<0>(value) {} + + DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") }; @@ -2220,15 +2273,50 @@ }; -class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> { +class LPreparePushArguments V8_FINAL : public LTemplateInstruction<0, 0, 0> { public: - explicit LPushArgument(LOperand* value) { - inputs_[0] = value; + explicit LPreparePushArguments(int argc) : argc_(argc) {} + + inline int argc() const { return argc_; } + + DECLARE_CONCRETE_INSTRUCTION(PreparePushArguments, "prepare-push-arguments") + + protected: + int argc_; +}; + + +class LPushArguments V8_FINAL : public LTemplateResultInstruction<0> { + public: + explicit LPushArguments(Zone* zone, + int capacity = kRecommendedMaxPushedArgs) + : zone_(zone), inputs_(capacity, zone) {} + + LOperand* argument(int i) { return inputs_[i]; } + int ArgumentCount() const { return inputs_.length(); } + + void AddArgument(LOperand* arg) { inputs_.Add(arg, zone_); } + + DECLARE_CONCRETE_INSTRUCTION(PushArguments, "push-arguments") + + // It is better to limit the number of arguments pushed simultaneously to + // avoid pressure on the register allocator. + static const int kRecommendedMaxPushedArgs = 4; + bool ShouldSplitPush() const { + return inputs_.length() >= kRecommendedMaxPushedArgs; } - LOperand* value() { return inputs_[0]; } + protected: + Zone* zone_; + ZoneList<LOperand*> inputs_; + + private: + // Iterator support. + virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); } + virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; } - DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") + virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; } + virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; } }; @@ -2260,7 +2348,7 @@ return parameter_count()->IsConstantOperand(); } LConstantOperand* constant_parameter_count() { - ASSERT(has_constant_parameter_count()); + DCHECK(has_constant_parameter_count()); return LConstantOperand::cast(parameter_count()); } @@ -2384,22 +2472,26 @@ } bool NeedsCanonicalization() { + if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() || + hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) { + return false; + } return this->hydrogen()->NeedsCanonicalization(); } - uint32_t additional_index() const { return this->hydrogen()->index_offset(); } + uint32_t base_offset() const { return this->hydrogen()->base_offset(); } void PrintDataTo(StringStream* stream) V8_OVERRIDE { this->elements()->PrintTo(stream); stream->Add("["); this->key()->PrintTo(stream); - if (this->hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", this->additional_index()); + if (this->base_offset() != 0) { + stream->Add(" + %d] <-", this->base_offset()); } else { stream->Add("] <- "); } if (this->value() == NULL) { - ASSERT(hydrogen()->IsConstantHoleStore() && + DCHECK(hydrogen()->IsConstantHoleStore() && hydrogen()->value()->representation().IsDouble()); stream->Add("<the hole(nan)>"); } else { @@ -2417,7 +2509,7 @@ LOperand* temp) : LStoreKeyed<1>(elements, key, value) { temps_[0] = temp; - }; + } LOperand* temp() { return temps_[0]; } @@ -2431,7 +2523,7 @@ LOperand* temp) : LStoreKeyed<1>(elements, key, value) { temps_[0] = temp; - }; + } LOperand* temp() { return temps_[0]; } @@ -2445,7 +2537,7 @@ LOperand* temp) : LStoreKeyed<1>(elements, key, value) { temps_[0] = temp; - }; + } LOperand* temp() { return temps_[0]; } @@ -2500,7 +2592,6 @@ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - Handle<Map> transition() const { return hydrogen()->transition_map(); } Representation representation() const { return hydrogen()->field_representation(); } @@ -2725,7 +2816,14 @@ class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - LSubI(LOperand* left, LOperand* right) { + LSubI(LOperand* left, LOperand* right) + : shift_(NO_SHIFT), shift_amount_(0) { + inputs_[0] = left; + inputs_[1] = right; + } + + LSubI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount) + : shift_(shift), shift_amount_(shift_amount) { inputs_[0] = left; inputs_[1] = right; } @@ -2733,8 +2831,15 @@ LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } + Shift shift() const { return shift_; } + LOperand* shift_amount() const { return shift_amount_; } + DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") DECLARE_HYDROGEN_ACCESSOR(Sub) + + protected: + Shift shift_; + LOperand* shift_amount_; }; @@ -2778,7 +2883,7 @@ LTransitionElementsKind(LOperand* object, LOperand* context, LOperand* temp1, - LOperand* temp2 = NULL) { + LOperand* temp2) { inputs_[0] = object; inputs_[1] = context; temps_[0] = temp1; @@ -2915,6 +3020,35 @@ }; +class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> { + public: + explicit LStoreFrameContext(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context") +}; + + +class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> { + public: + LAllocateBlockContext(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); } + + DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context") + DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext) +}; + + class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: LWrapReceiver(LOperand* receiver, LOperand* function) { @@ -2956,8 +3090,6 @@ // Build the sequence for the graph. LPlatformChunk* Build(); - LInstruction* CheckElideControlInstruction(HControlInstruction* instr); - // Declare methods that deal with the individual node types. #define DECLARE_DO(type) LInstruction* Do##type(H##type* node); HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -3042,6 +3174,11 @@ // Temporary operand that must be in a register. MUST_USE_RESULT LUnallocated* TempRegister(); + // Temporary operand that must be in a double register. + MUST_USE_RESULT LUnallocated* TempDoubleRegister(); + + MUST_USE_RESULT LOperand* FixedTemp(Register reg); + // Temporary operand that must be in a fixed double register. MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); @@ -3073,8 +3210,42 @@ LInstruction* AssignEnvironment(LInstruction* instr); void VisitInstruction(HInstruction* current); + void AddInstruction(LInstruction* instr, HInstruction* current); void DoBasicBlock(HBasicBlock* block); + int JSShiftAmountFromHConstant(HValue* constant) { + return HConstant::cast(constant)->Integer32Value() & 0x1f; + } + bool LikelyFitsImmField(HInstruction* instr, int imm) { + if (instr->IsAdd() || instr->IsSub()) { + return Assembler::IsImmAddSub(imm) || Assembler::IsImmAddSub(-imm); + } else { + DCHECK(instr->IsBitwise()); + unsigned unused_n, unused_imm_s, unused_imm_r; + return Assembler::IsImmLogical(imm, kWRegSizeInBits, + &unused_n, &unused_imm_s, &unused_imm_r); + } + } + + // Indicates if a sequence of the form + // lsl x8, x9, #imm + // add x0, x1, x8 + // can be replaced with: + // add x0, x1, x9 LSL #imm + // If this is not possible, the function returns NULL. Otherwise it returns a + // pointer to the shift instruction that would be optimized away. + HBitwiseBinaryOperation* CanTransformToShiftedOp(HValue* val, + HValue** left = NULL); + // Checks if all uses of the shift operation can optimize it away. + bool ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift); + // Attempts to merge the binary operation and an eventual previous shift + // operation into a single operation. Returns the merged instruction on + // success, and NULL otherwise. + LInstruction* TryDoOpWithShiftedRightOperand(HBinaryOperation* op); + LInstruction* DoShiftedBinaryOp(HBinaryOperation* instr, + HValue* left, + HBitwiseBinaryOperation* shift); + LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); LInstruction* DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr); diff -Nru nodejs-0.11.13/deps/v8/src/arm64/lithium-codegen-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/lithium-codegen-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/lithium-codegen-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/lithium-codegen-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "arm64/lithium-codegen-arm64.h" -#include "arm64/lithium-gap-resolver-arm64.h" -#include "code-stubs.h" -#include "stub-cache.h" -#include "hydrogen-osr.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/arm64/lithium-codegen-arm64.h" +#include "src/arm64/lithium-gap-resolver-arm64.h" +#include "src/code-stubs.h" +#include "src/hydrogen-osr.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -79,7 +56,7 @@ virtual void EmitInverted(Label* label) const { if (cond_ != al) { - __ B(InvertCondition(cond_), label); + __ B(NegateCondition(cond_), label); } } @@ -109,7 +86,7 @@ } virtual void EmitInverted(Label* label) const { - __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label); + __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label); } private: @@ -159,7 +136,7 @@ break; default: __ Tst(value_, mask_); - __ B(InvertCondition(cond_), label); + __ B(NegateCondition(cond_), label); } } @@ -261,13 +238,13 @@ translation->BeginConstructStubFrame(closure_id, translation_size); break; case JS_GETTER: - ASSERT(translation_size == 1); - ASSERT(height == 0); + DCHECK(translation_size == 1); + DCHECK(height == 0); translation->BeginGetterStubFrame(closure_id); break; case JS_SETTER: - ASSERT(translation_size == 2); - ASSERT(height == 0); + DCHECK(translation_size == 2); + DCHECK(height == 0); translation->BeginSetterStubFrame(closure_id); break; case STUB: @@ -376,6 +353,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode) { + environment->set_has_been_used(); if (!environment->HasBeenRegistered()) { int frame_count = 0; int jsframe_count = 0; @@ -408,7 +386,7 @@ RelocInfo::Mode mode, LInstruction* instr, SafepointMode safepoint_mode) { - ASSERT(instr != NULL); + DCHECK(instr != NULL); Assembler::BlockPoolsScope scope(masm_); __ Call(code, mode); @@ -424,36 +402,38 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->function()).Is(x1)); - ASSERT(ToRegister(instr->result()).Is(x0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->function()).Is(x1)); + DCHECK(ToRegister(instr->result()).Is(x0)); int arity = instr->arity(); - CallFunctionStub stub(arity, instr->hydrogen()->function_flags()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + after_push_argument_ = false; } void LCodeGen::DoCallNew(LCallNew* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(instr->IsMarkedAsCall()); - ASSERT(ToRegister(instr->constructor()).is(x1)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(instr->IsMarkedAsCall()); + DCHECK(ToRegister(instr->constructor()).is(x1)); __ Mov(x0, instr->arity()); // No cell in x2 for construct type feedback in optimized code. __ LoadRoot(x2, Heap::kUndefinedValueRootIndex); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + after_push_argument_ = false; - ASSERT(ToRegister(instr->result()).is(x0)); + DCHECK(ToRegister(instr->result()).is(x0)); } void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - ASSERT(instr->IsMarkedAsCall()); - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->constructor()).is(x1)); + DCHECK(instr->IsMarkedAsCall()); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->constructor()).is(x1)); __ Mov(x0, Operand(instr->arity())); __ LoadRoot(x2, Heap::kUndefinedValueRootIndex); @@ -465,8 +445,8 @@ : DONT_OVERRIDE; if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } else if (instr->arity() == 1) { Label done; if (IsFastPackedElementsKind(kind)) { @@ -477,21 +457,24 @@ __ Cbz(x10, &packed_case); ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(holey_kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), + holey_kind, + override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ B(&done); __ Bind(&packed_case); } - ArraySingleArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ Bind(&done); } else { - ArrayNArgumentsConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } + after_push_argument_ = false; - ASSERT(ToRegister(instr->result()).is(x0)); + DCHECK(ToRegister(instr->result()).is(x0)); } @@ -499,7 +482,7 @@ int num_arguments, LInstruction* instr, SaveFPRegsMode save_doubles) { - ASSERT(instr != NULL); + DCHECK(instr != NULL); __ CallRuntime(function, num_arguments, save_doubles); @@ -511,7 +494,7 @@ if (context->IsRegister()) { __ Mov(cp, ToRegister(context)); } else if (context->IsStackSlot()) { - __ Ldr(cp, ToMemOperand(context)); + __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer)); } else if (context->IsConstantOperand()) { HConstant* constant = chunk_->LookupConstant(LConstantOperand::cast(context)); @@ -546,7 +529,7 @@ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); } else { - ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kLazyDeopt); } @@ -557,7 +540,7 @@ Safepoint::Kind kind, int arguments, Safepoint::DeoptMode deopt_mode) { - ASSERT(expected_safepoint_kind_ == kind); + DCHECK(expected_safepoint_kind_ == kind); const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); Safepoint safepoint = safepoints_.DefineSafepoint( @@ -597,16 +580,9 @@ } -void LCodeGen::RecordSafepointWithRegistersAndDoubles( - LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) { - RecordSafepoint( - pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode); -} - - bool LCodeGen::GenerateCode() { LPhase phase("Z_Code generation", chunk()); - ASSERT(is_unused()); + DCHECK(is_unused()); status_ = GENERATING; // Open a frame scope to indicate that there is a frame on the stack. The @@ -623,8 +599,8 @@ void LCodeGen::SaveCallerDoubles() { - ASSERT(info()->saves_caller_doubles()); - ASSERT(NeedsEagerFrame()); + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); Comment(";;; Save clobbered callee double registers"); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator iterator(doubles); @@ -641,8 +617,8 @@ void LCodeGen::RestoreCallerDoubles() { - ASSERT(info()->saves_caller_doubles()); - ASSERT(NeedsEagerFrame()); + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); Comment(";;; Restore clobbered callee double registers"); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator iterator(doubles); @@ -659,7 +635,7 @@ bool LCodeGen::GeneratePrologue() { - ASSERT(is_generating()); + DCHECK(is_generating()); if (info()->IsOptimizing()) { ProfileEntryHookStub::MaybeCallEntryHook(masm_); @@ -678,17 +654,21 @@ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok); __ Ldr(x10, GlobalObjectMemOperand()); - __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); + __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset)); __ Poke(x10, receiver_offset); __ Bind(&ok); } } - ASSERT(__ StackPointer().Is(jssp)); + DCHECK(__ StackPointer().Is(jssp)); info()->set_prologue_offset(masm_->pc_offset()); if (NeedsEagerFrame()) { - __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); + if (info()->IsStub()) { + __ StubPrologue(); + } else { + __ Prologue(info()->IsCodePreAgingActive()); + } frame_is_built_ = true; info_->AddNoFrameRange(0, masm_->pc_offset()); } @@ -707,13 +687,16 @@ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); + bool need_write_barrier = true; // Argument to NewContext is the function, which is in x1. if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; } else { __ Push(x1); - __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); } RecordSafepoint(Safepoint::kNoLazyDeopt); // Context is returned in x0. It replaces the context passed to us. It's @@ -736,8 +719,15 @@ MemOperand target = ContextMemOperand(cp, var->index()); __ Str(value, target); // Update the write barrier. This clobbers value and scratch. - __ RecordWriteContextSlot(cp, target.offset(), value, scratch, - GetLinkRegisterState(), kSaveFPRegs); + if (need_write_barrier) { + __ RecordWriteContextSlot(cp, target.offset(), value, scratch, + GetLinkRegisterState(), kSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(cp, &done); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } } } Comment(";;; End allocate local context"); @@ -764,7 +754,7 @@ // Adjust the frame size, subsuming the unoptimized frame into the // optimized frame. int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); - ASSERT(slots >= 0); + DCHECK(slots >= 0); __ Claim(slots); } @@ -780,7 +770,7 @@ bool LCodeGen::GenerateDeferredCode() { - ASSERT(is_generating()); + DCHECK(is_generating()); if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) { LDeferredCode* code = deferred_[i]; @@ -800,8 +790,8 @@ if (NeedsDeferredFrame()) { Comment(";;; Build frame"); - ASSERT(!frame_is_built_); - ASSERT(info()->IsStub()); + DCHECK(!frame_is_built_); + DCHECK(info()->IsStub()); frame_is_built_ = true; __ Push(lr, fp, cp); __ Mov(fp, Smi::FromInt(StackFrame::STUB)); @@ -815,7 +805,7 @@ if (NeedsDeferredFrame()) { Comment(";;; Destroy frame"); - ASSERT(frame_is_built_); + DCHECK(frame_is_built_); __ Pop(xzr, cp, fp, lr); frame_is_built_ = false; } @@ -835,51 +825,82 @@ bool LCodeGen::GenerateDeoptJumpTable() { + Label needs_frame, restore_caller_doubles, call_deopt_entry; + if (deopt_jump_table_.length() > 0) { Comment(";;; -------------------- Jump table --------------------"); - } - Label table_start; - __ bind(&table_start); - Label needs_frame; - for (int i = 0; i < deopt_jump_table_.length(); i++) { - __ Bind(&deopt_jump_table_[i]->label); - Address entry = deopt_jump_table_[i]->address; - Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type; - int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); - if (id == Deoptimizer::kNotDeoptimizationEntry) { - Comment(";;; jump table entry %d.", i); - } else { - Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); - } - if (deopt_jump_table_[i]->needs_frame) { - ASSERT(!info()->saves_caller_doubles()); + Address base = deopt_jump_table_[0]->address; - UseScratchRegisterScope temps(masm()); - Register stub_deopt_entry = temps.AcquireX(); - Register stub_marker = temps.AcquireX(); + UseScratchRegisterScope temps(masm()); + Register entry_offset = temps.AcquireX(); - __ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry)); - if (needs_frame.is_bound()) { - __ B(&needs_frame); + int length = deopt_jump_table_.length(); + for (int i = 0; i < length; i++) { + __ Bind(&deopt_jump_table_[i]->label); + + Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type; + Address entry = deopt_jump_table_[i]->address; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + if (id == Deoptimizer::kNotDeoptimizationEntry) { + Comment(";;; jump table entry %d.", i); } else { - __ Bind(&needs_frame); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - ASSERT(info()->IsStub()); - __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB)); - __ Push(lr, fp, cp, stub_marker); - __ Add(fp, __ StackPointer(), 2 * kPointerSize); - __ Call(stub_deopt_entry); + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); } - } else { - if (info()->saves_caller_doubles()) { - ASSERT(info()->IsStub()); - RestoreCallerDoubles(); + + // Second-level deopt table entries are contiguous and small, so instead + // of loading the full, absolute address of each one, load the base + // address and add an immediate offset. + __ Mov(entry_offset, entry - base); + + // The last entry can fall through into `call_deopt_entry`, avoiding a + // branch. + bool last_entry = (i + 1) == length; + + if (deopt_jump_table_[i]->needs_frame) { + DCHECK(!info()->saves_caller_doubles()); + if (!needs_frame.is_bound()) { + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + DCHECK(info()->IsStub()); + + UseScratchRegisterScope temps(masm()); + Register stub_marker = temps.AcquireX(); + __ Bind(&needs_frame); + __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB)); + __ Push(lr, fp, cp, stub_marker); + __ Add(fp, __ StackPointer(), 2 * kPointerSize); + if (!last_entry) __ B(&call_deopt_entry); + } else { + // Reuse the existing needs_frame code. + __ B(&needs_frame); + } + } else if (info()->saves_caller_doubles()) { + DCHECK(info()->IsStub()); + if (!restore_caller_doubles.is_bound()) { + __ Bind(&restore_caller_doubles); + RestoreCallerDoubles(); + if (!last_entry) __ B(&call_deopt_entry); + } else { + // Reuse the existing restore_caller_doubles code. + __ B(&restore_caller_doubles); + } + } else { + // There is nothing special to do, so just continue to the second-level + // table. + if (!last_entry) __ B(&call_deopt_entry); } - __ Call(entry, RelocInfo::RUNTIME_ENTRY); + + masm()->CheckConstPool(false, last_entry); } - masm()->CheckConstPool(false, false); + + // Generate common code for calling the second-level deopt table. + Register deopt_entry = temps.AcquireX(); + __ Bind(&call_deopt_entry); + __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base), + RelocInfo::RUNTIME_ENTRY)); + __ Add(deopt_entry, deopt_entry, entry_offset); + __ Call(deopt_entry); } // Force constant pool emission at the end of the deopt jump table to make @@ -894,7 +915,7 @@ bool LCodeGen::GenerateSafepointTable() { - ASSERT(is_done()); + DCHECK(is_done()); // We do not know how much data will be emitted for the safepoint table, so // force emission of the veneer pool. masm()->CheckVeneerPool(true, true); @@ -904,18 +925,11 @@ void LCodeGen::FinishCode(Handle<Code> code) { - ASSERT(is_done()); + DCHECK(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); PopulateDeoptimizationData(code); - info()->CommitDependencies(code); -} - - -void LCodeGen::Abort(BailoutReason reason) { - info()->set_bailout_reason(reason); - status_ = ABORTED; } @@ -924,7 +938,7 @@ if (length == 0) return; Handle<DeoptimizationInputData> data = - factory()->NewDeoptimizationInputData(length, TENURED); + DeoptimizationInputData::New(isolate(), length, 0, TENURED); Handle<ByteArray> translations = translations_.CreateByteArray(isolate()->factory()); @@ -966,7 +980,7 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { - ASSERT(deoptimization_literals_.length() == 0); + DCHECK(deoptimization_literals_.length() == 0); const ZoneList<Handle<JSFunction> >* inlined_closures = chunk()->inlined_closures(); @@ -991,8 +1005,8 @@ bailout_type = *override_bailout_type; } - ASSERT(environment->HasBeenRegistered()); - ASSERT(info()->IsOptimizing() || info()->IsStub()); + DCHECK(environment->HasBeenRegistered()); + DCHECK(info()->IsOptimizing() || info()->IsStub()); int id = environment->deoptimization_index(); Address entry = Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); @@ -1014,7 +1028,7 @@ __ Mov(w1, FLAG_deopt_every_n_times); __ Str(w1, MemOperand(x0)); __ Pop(x2, x1, x0); - ASSERT(frame_is_built_); + DCHECK(frame_is_built_); __ Call(entry, RelocInfo::RUNTIME_ENTRY); __ Unreachable(); @@ -1031,7 +1045,7 @@ __ Bind(&dont_trap); } - ASSERT(info()->IsStub() || frame_is_built_); + DCHECK(info()->IsStub() || frame_is_built_); // Go through jump table if we need to build frame, or restore caller doubles. if (branch_type == always && frame_is_built_ && !info()->saves_caller_doubles()) { @@ -1138,7 +1152,7 @@ if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - ASSERT((padding_size % kInstructionSize) == 0); + DCHECK((padding_size % kInstructionSize) == 0); InstructionAccurateScope instruction_accurate( masm(), padding_size / kInstructionSize); @@ -1154,16 +1168,16 @@ Register LCodeGen::ToRegister(LOperand* op) const { // TODO(all): support zero register results, as ToRegister32. - ASSERT((op != NULL) && op->IsRegister()); + DCHECK((op != NULL) && op->IsRegister()); return Register::FromAllocationIndex(op->index()); } Register LCodeGen::ToRegister32(LOperand* op) const { - ASSERT(op != NULL); + DCHECK(op != NULL); if (op->IsConstantOperand()) { // If this is a constant operand, the result must be the zero register. - ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0); + DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0); return wzr; } else { return ToRegister(op).W(); @@ -1178,27 +1192,27 @@ DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - ASSERT((op != NULL) && op->IsDoubleRegister()); + DCHECK((op != NULL) && op->IsDoubleRegister()); return DoubleRegister::FromAllocationIndex(op->index()); } Operand LCodeGen::ToOperand(LOperand* op) { - ASSERT(op != NULL); + DCHECK(op != NULL); if (op->IsConstantOperand()) { LConstantOperand* const_op = LConstantOperand::cast(op); HConstant* constant = chunk()->LookupConstant(const_op); Representation r = chunk_->LookupLiteralRepresentation(const_op); if (r.IsSmi()) { - ASSERT(constant->HasSmiValue()); + DCHECK(constant->HasSmiValue()); return Operand(Smi::FromInt(constant->Integer32Value())); } else if (r.IsInteger32()) { - ASSERT(constant->HasInteger32Value()); + DCHECK(constant->HasInteger32Value()); return Operand(constant->Integer32Value()); } else if (r.IsDouble()) { Abort(kToOperandUnsupportedDoubleImmediate); } - ASSERT(r.IsTagged()); + DCHECK(r.IsTagged()); return Operand(constant->handle(isolate())); } else if (op->IsRegister()) { return Operand(ToRegister(op)); @@ -1223,7 +1237,7 @@ Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) { - ASSERT(op != NULL); + DCHECK(op != NULL); if (op->IsRegister()) { return Operand(ToRegister32(op)); } else if (op->IsConstantOperand()) { @@ -1231,10 +1245,10 @@ HConstant* constant = chunk()->LookupConstant(const_op); Representation r = chunk_->LookupLiteralRepresentation(const_op); if (r.IsInteger32()) { - ASSERT(constant->HasInteger32Value()); - return Operand(signedness == SIGNED_INT32 - ? constant->Integer32Value() - : static_cast<uint32_t>(constant->Integer32Value())); + DCHECK(constant->HasInteger32Value()); + return (signedness == SIGNED_INT32) + ? Operand(constant->Integer32Value()) + : Operand(static_cast<uint32_t>(constant->Integer32Value())); } else { // Other constants not implemented. Abort(kToOperand32UnsupportedImmediate); @@ -1247,18 +1261,43 @@ static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) { - ASSERT(index < 0); + DCHECK(index < 0); return -(index + 1) * kPointerSize; } -MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - ASSERT(op != NULL); - ASSERT(!op->IsRegister()); - ASSERT(!op->IsDoubleRegister()); - ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); +MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const { + DCHECK(op != NULL); + DCHECK(!op->IsRegister()); + DCHECK(!op->IsDoubleRegister()); + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); if (NeedsEagerFrame()) { - return MemOperand(fp, StackSlotOffset(op->index())); + int fp_offset = StackSlotOffset(op->index()); + if (op->index() >= 0) { + // Loads and stores have a bigger reach in positive offset than negative. + // When the load or the store can't be done in one instruction via fp + // (too big negative offset), we try to access via jssp (positive offset). + // We can reference a stack slot from jssp only if jssp references the end + // of the stack slots. It's not the case when: + // - stack_mode != kCanUseStackPointer: this is the case when a deferred + // code saved the registers. + // - after_push_argument_: arguments has been pushed for a call. + // - inlined_arguments_: inlined arguments have been pushed once. All the + // remainder of the function cannot trust jssp any longer. + // - saves_caller_doubles: some double registers have been pushed, jssp + // references the end of the double registers and not the end of the + // stack slots. + // Also, if the offset from fp is small enough to make a load/store in + // one instruction, we use a fp access. + if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ && + !inlined_arguments_ && !is_int9(fp_offset) && + !info()->saves_caller_doubles()) { + int jssp_offset = + (GetStackSlotCount() - op->index() - 1) * kPointerSize; + return MemOperand(masm()->StackPointer(), jssp_offset); + } + } + return MemOperand(fp, fp_offset); } else { // Retrieve parameter without eager stack-frame relative to the // stack-pointer. @@ -1270,11 +1309,26 @@ Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); + DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); return constant->handle(isolate()); } +template<class LI> +Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info, + IntegerSignedness signedness) { + if (shift_info->shift() == NO_SHIFT) { + return (signedness == SIGNED_INT32) ? ToOperand32I(right) + : ToOperand32U(right); + } else { + return Operand( + ToRegister32(right), + shift_info->shift(), + JSShiftAmountFromLConstant(shift_info->shift_amount())); + } +} + + bool LCodeGen::IsSmi(LConstantOperand* op) const { return chunk_->LookupLiteralRepresentation(op).IsSmi(); } @@ -1293,7 +1347,7 @@ double LCodeGen::ToDouble(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(constant->HasDoubleValue()); + DCHECK(constant->HasDoubleValue()); return constant->DoubleValue(); } @@ -1353,7 +1407,7 @@ template<class InstrType> void LCodeGen::EmitBranch(InstrType instr, Condition condition) { - ASSERT((condition != al) && (condition != nv)); + DCHECK((condition != al) && (condition != nv)); BranchOnCondition branch(this, condition); EmitBranchGeneric(instr, branch); } @@ -1364,7 +1418,7 @@ Condition condition, const Register& lhs, const Operand& rhs) { - ASSERT((condition != al) && (condition != nv)); + DCHECK((condition != al) && (condition != nv)); CompareAndBranch branch(this, condition, lhs, rhs); EmitBranchGeneric(instr, branch); } @@ -1375,7 +1429,7 @@ Condition condition, const Register& value, uint64_t mask) { - ASSERT((condition != al) && (condition != nv)); + DCHECK((condition != al) && (condition != nv)); TestAndBranch branch(this, condition, value, mask); EmitBranchGeneric(instr, branch); } @@ -1462,7 +1516,7 @@ ? ToInteger32(LConstantOperand::cast(instr->right())) : Operand(ToRegister32(instr->right()), SXTW); - ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)); + DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)); __ Add(result, left, right); } @@ -1471,7 +1525,8 @@ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); Register result = ToRegister32(instr->result()); Register left = ToRegister32(instr->left()); - Operand right = ToOperand32I(instr->right()); + Operand right = ToShiftedRightOperand32I(instr->right(), instr); + if (can_overflow) { __ Adds(result, left, right); DeoptimizeIf(vs, instr->environment()); @@ -1519,11 +1574,11 @@ } if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); } @@ -1558,7 +1613,7 @@ __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map())); __ FillFields(untagged_result, filler_count, filler); } else { - ASSERT(instr->temp3() == NULL); + DCHECK(instr->temp3() == NULL); } } @@ -1569,7 +1624,7 @@ // contained in the register pointer map. __ Mov(ToRegister(instr->result()), Smi::FromInt(0)); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); // We're in a SafepointRegistersScope so we can use any scratch registers. Register size = x0; if (instr->size()->IsConstantOperand()) { @@ -1580,11 +1635,11 @@ int flags = AllocateDoubleAlignFlag::encode( instr->hydrogen()->MustAllocateDoubleAligned()); if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); } else { flags = AllocateTargetSpace::update(flags, NEW_SPACE); @@ -1593,7 +1648,7 @@ __ Push(size, x10); CallRuntimeFromDeferred( - Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context()); + Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result())); } @@ -1605,10 +1660,10 @@ Register elements = ToRegister(instr->elements()); Register scratch = x5; - ASSERT(receiver.Is(x0)); // Used for parameter count. - ASSERT(function.Is(x1)); // Required by InvokeFunction. - ASSERT(ToRegister(instr->result()).Is(x0)); - ASSERT(instr->IsMarkedAsCall()); + DCHECK(receiver.Is(x0)); // Used for parameter count. + DCHECK(function.Is(x1)); // Required by InvokeFunction. + DCHECK(ToRegister(instr->result()).Is(x0)); + DCHECK(instr->IsMarkedAsCall()); // Copy the arguments to this function possibly from the // adaptor frame below it. @@ -1637,7 +1692,7 @@ __ B(ne, &loop); __ Bind(&invoke); - ASSERT(instr->HasPointerMap()); + DCHECK(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); // The number of arguments is stored in argc (receiver) which is x0, as @@ -1648,6 +1703,10 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { + // We push some arguments and they will be pop in an other block. We can't + // trust that jssp references the end of the stack slots until the end of + // the function. + inlined_arguments_ = true; Register result = ToRegister(instr->result()); if (instr->hydrogen()->from_inlined()) { @@ -1659,10 +1718,10 @@ // LAccessArgumentsAt implementation take that into account. // In the inlined case we need to subtract the size of 2 words to jssp to // get a pointer which will work well with LAccessArgumentsAt. - ASSERT(masm()->StackPointer().Is(jssp)); + DCHECK(masm()->StackPointer().Is(jssp)); __ Sub(result, jssp, 2 * kPointerSize); } else { - ASSERT(instr->temp() != NULL); + DCHECK(instr->temp() != NULL); Register previous_fp = ToRegister(instr->temp()); __ Ldr(previous_fp, @@ -1716,12 +1775,12 @@ // precision), it should be possible. However, we would need support for // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't // support that yet. - ASSERT(left.Is(d0)); - ASSERT(right.Is(d1)); + DCHECK(left.Is(d0)); + DCHECK(right.Is(d1)); __ CallCFunction( ExternalReference::mod_two_doubles_operation(isolate()), 0, 2); - ASSERT(result.Is(d0)); + DCHECK(result.Is(d0)); break; } default: @@ -1732,20 +1791,20 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->left()).is(x1)); - ASSERT(ToRegister(instr->right()).is(x0)); - ASSERT(ToRegister(instr->result()).is(x0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->left()).is(x1)); + DCHECK(ToRegister(instr->right()).is(x0)); + DCHECK(ToRegister(instr->result()).is(x0)); - BinaryOpICStub stub(instr->op(), NO_OVERWRITE); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoBitI(LBitI* instr) { Register result = ToRegister32(instr->result()); Register left = ToRegister32(instr->left()); - Operand right = ToOperand32U(instr->right()); + Operand right = ToShiftedRightOperand32U(instr->right(), instr); switch (instr->op()) { case Token::BIT_AND: __ And(result, left, right); break; @@ -1774,36 +1833,25 @@ } -void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { - if (FLAG_debug_code && check->hydrogen()->skip_check()) { - __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed); - } else { - DeoptimizeIf(cc, check->environment()); - } -} - - void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) { - if (instr->hydrogen()->skip_check()) return; - - ASSERT(instr->hydrogen()->length()->representation().IsInteger32()); - Register length = ToRegister32(instr->length()); - + Condition cond = instr->hydrogen()->allow_equality() ? hi : hs; + DCHECK(instr->hydrogen()->index()->representation().IsInteger32()); + DCHECK(instr->hydrogen()->length()->representation().IsInteger32()); if (instr->index()->IsConstantOperand()) { - int constant_index = - ToInteger32(LConstantOperand::cast(instr->index())); - - if (instr->hydrogen()->length()->representation().IsSmi()) { - __ Cmp(length, Smi::FromInt(constant_index)); - } else { - __ Cmp(length, constant_index); - } + Operand index = ToOperand32I(instr->index()); + Register length = ToRegister32(instr->length()); + __ Cmp(length, index); + cond = CommuteCondition(cond); } else { - ASSERT(instr->hydrogen()->index()->representation().IsInteger32()); - __ Cmp(length, ToRegister32(instr->index())); + Register index = ToRegister32(instr->index()); + Operand length = ToOperand32I(instr->length()); + __ Cmp(index, length); + } + if (FLAG_debug_code && instr->hydrogen()->skip_check()) { + __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); + } else { + DeoptimizeIf(cond, instr->environment()); } - Condition condition = instr->hydrogen()->allow_equality() ? lo : ls; - ApplyCheckIf(condition, instr); } @@ -1813,10 +1861,10 @@ Label* false_label = instr->FalseLabel(chunk_); if (r.IsInteger32()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0); } else if (r.IsSmi()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); STATIC_ASSERT(kSmiTag == 0); EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0); } else if (r.IsDouble()) { @@ -1824,28 +1872,28 @@ // Test the double value. Zero and NaN are false. EmitBranchIfNonZeroNumber(instr, value, double_scratch()); } else { - ASSERT(r.IsTagged()); + DCHECK(r.IsTagged()); Register value = ToRegister(instr->value()); HType type = instr->hydrogen()->value()->type(); if (type.IsBoolean()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ CompareRoot(value, Heap::kTrueValueRootIndex); EmitBranch(instr, eq); } else if (type.IsSmi()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0)); } else if (type.IsJSArray()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); EmitGoto(instr->TrueDestination(chunk())); } else if (type.IsHeapNumber()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset)); // Test the double value. Zero and NaN are false. EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch()); } else if (type.IsString()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); Register temp = ToRegister(instr->temp1()); __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset)); EmitCompareAndBranch(instr, ne, temp, 0); @@ -1876,7 +1924,7 @@ if (expected.Contains(ToBooleanStub::SMI)) { // Smis: 0 -> false, all other -> true. - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); __ Cbz(value, false_label); __ JumpIfSmi(value, true_label); } else if (expected.NeedsMap()) { @@ -1888,7 +1936,7 @@ Register scratch = NoReg; if (expected.NeedsMap()) { - ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); + DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); map = ToRegister(instr->temp1()); scratch = ToRegister(instr->temp2()); @@ -1960,7 +2008,7 @@ dont_adapt_arguments || formal_parameter_count == arity; // The function interface relies on the following register assignments. - ASSERT(function_reg.Is(x1) || function_reg.IsNone()); + DCHECK(function_reg.Is(x1) || function_reg.IsNone()); Register arity_reg = x0; LPointerMap* pointers = instr->pointer_map(); @@ -2005,8 +2053,8 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - ASSERT(instr->IsMarkedAsCall()); - ASSERT(ToRegister(instr->result()).Is(x0)); + DCHECK(instr->IsMarkedAsCall()); + DCHECK(ToRegister(instr->result()).Is(x0)); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); @@ -2020,19 +2068,20 @@ // this understanding is correct. __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None()); } else { - ASSERT(instr->target()->IsRegister()); + DCHECK(instr->target()->IsRegister()); Register target = ToRegister(instr->target()); generator.BeforeCall(__ CallSize(target)); __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); __ Call(target); } generator.AfterCall(); + after_push_argument_ = false; } void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { - ASSERT(instr->IsMarkedAsCall()); - ASSERT(ToRegister(instr->function()).is(x1)); + DCHECK(instr->IsMarkedAsCall()); + DCHECK(ToRegister(instr->function()).is(x1)); if (instr->hydrogen()->pass_argument_count()) { __ Mov(x0, Operand(instr->arity())); @@ -2046,36 +2095,39 @@ __ Call(x10); RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); + after_push_argument_ = false; } void LCodeGen::DoCallRuntime(LCallRuntime* instr) { CallRuntime(instr->function(), instr->arity(), instr); + after_push_argument_ = false; } void LCodeGen::DoCallStub(LCallStub* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->result()).is(x0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->result()).is(x0)); switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpExec: { - RegExpExecStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + RegExpExecStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { - SubStringStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + SubStringStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { - StringCompareStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + StringCompareStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } default: UNREACHABLE(); } + after_push_argument_ = false; } @@ -2087,7 +2139,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { Register temp = ToRegister(instr->temp()); { - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); __ Push(object); __ Mov(cp, 0); __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); @@ -2117,9 +2169,11 @@ Register object_; }; - if (instr->hydrogen()->CanOmitMapChecks()) { - ASSERT(instr->value() == NULL); - ASSERT(instr->temp() == NULL); + if (instr->hydrogen()->IsStabilityCheck()) { + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + for (int i = 0; i < maps->size(); ++i) { + AddStabilityDependency(maps->at(i).handle()); + } return; } @@ -2129,24 +2183,26 @@ __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset)); DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->has_migration_target()) { + if (instr->hydrogen()->HasMigrationTarget()) { deferred = new(zone()) DeferredCheckMaps(this, instr, object); __ Bind(deferred->check_maps()); } - UniqueSet<Map> map_set = instr->hydrogen()->map_set(); + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); Label success; - for (int i = 0; i < map_set.size(); i++) { - Handle<Map> map = map_set.at(i).handle(); + for (int i = 0; i < maps->size() - 1; i++) { + Handle<Map> map = maps->at(i).handle(); __ CompareMap(map_reg, map); __ B(eq, &success); } + Handle<Map> map = maps->at(maps->size() - 1).handle(); + __ CompareMap(map_reg, map); // We didn't match a map. - if (instr->hydrogen()->has_migration_target()) { - __ B(deferred->entry()); + if (instr->hydrogen()->HasMigrationTarget()) { + __ B(ne, deferred->entry()); } else { - Deoptimize(instr->environment()); + DeoptimizeIf(ne, instr->environment()); } __ Bind(&success); @@ -2154,7 +2210,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment()); } } @@ -2162,7 +2218,7 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) { Register value = ToRegister(instr->value()); - ASSERT(!instr->result() || ToRegister(instr->result()).Is(value)); + DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); DeoptimizeIfNotSmi(value, instr->environment()); } @@ -2197,7 +2253,7 @@ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); if (IsPowerOf2(mask)) { - ASSERT((tag == 0) || (tag == mask)); + DCHECK((tag == 0) || (tag == mask)); if (tag == 0) { DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment()); } else { @@ -2272,7 +2328,7 @@ Register result_reg = ToRegister(instr->result()); if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { __ Fmov(result_reg, value_reg); - __ Mov(result_reg, Operand(result_reg, LSR, 32)); + __ Lsr(result_reg, result_reg, 32); } else { __ Fmov(result_reg.W(), value_reg.S()); } @@ -2282,12 +2338,12 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) { Register hi_reg = ToRegister(instr->hi()); Register lo_reg = ToRegister(instr->lo()); - Register temp = ToRegister(instr->temp()); DoubleRegister result_reg = ToDoubleRegister(instr->result()); - __ And(temp, lo_reg, Operand(0xffffffff)); - __ Orr(temp, temp, Operand(hi_reg, LSL, 32)); - __ Fmov(result_reg, temp); + // Insert the least significant 32 bits of hi_reg into the most significant + // 32 bits of lo_reg, and move to a floating point register. + __ Bfi(lo_reg, hi_reg, 32, 32); + __ Fmov(result_reg, lo_reg); } @@ -2353,7 +2409,7 @@ void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) { - ASSERT(instr->hydrogen()->representation().IsDouble()); + DCHECK(instr->hydrogen()->representation().IsDouble()); FPRegister object = ToDoubleRegister(instr->object()); Register temp = ToRegister(instr->temp()); @@ -2369,7 +2425,7 @@ void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) { - ASSERT(instr->hydrogen()->representation().IsTagged()); + DCHECK(instr->hydrogen()->representation().IsTagged()); Register object = ToRegister(instr->object()); EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex); @@ -2387,7 +2443,7 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { Representation rep = instr->hydrogen()->value()->representation(); - ASSERT(!rep.IsInteger32()); + DCHECK(!rep.IsInteger32()); Register scratch = ToRegister(instr->temp()); if (rep.IsDouble()) { @@ -2397,8 +2453,8 @@ Register value = ToRegister(instr->value()); __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex, instr->FalseLabel(chunk()), DO_SMI_CHECK); - __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset)); - __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk())); + __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset)); + __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk())); } EmitGoto(instr->FalseDestination(chunk())); } @@ -2407,7 +2463,10 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - Condition cond = TokenToCondition(instr->op(), false); + bool is_unsigned = + instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || + instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); + Condition cond = TokenToCondition(instr->op(), is_unsigned); if (left->IsConstantOperand() && right->IsConstantOperand()) { // We can statically evaluate the comparison. @@ -2418,17 +2477,7 @@ EmitGoto(next_block); } else { if (instr->is_double()) { - if (right->IsConstantOperand()) { - __ Fcmp(ToDoubleRegister(left), - ToDouble(LConstantOperand::cast(right))); - } else if (left->IsConstantOperand()) { - // Transpose the operands and reverse the condition. - __ Fcmp(ToDoubleRegister(right), - ToDouble(LConstantOperand::cast(left))); - cond = ReverseConditionForCmp(cond); - } else { - __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right)); - } + __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right)); // If a NaN is involved, i.e. the result is unordered (V set), // jump to false block label. @@ -2442,14 +2491,14 @@ ToRegister32(left), ToOperand32I(right)); } else { - // Transpose the operands and reverse the condition. + // Commute the operands and the condition. EmitCompareAndBranch(instr, - ReverseConditionForCmp(cond), + CommuteCondition(cond), ToRegister32(right), ToOperand32I(left)); } } else { - ASSERT(instr->hydrogen_value()->representation().IsSmi()); + DCHECK(instr->hydrogen_value()->representation().IsSmi()); if (right->IsConstantOperand()) { int32_t value = ToInteger32(LConstantOperand::cast(right)); EmitCompareAndBranch(instr, @@ -2457,10 +2506,10 @@ ToRegister(left), Operand(Smi::FromInt(value))); } else if (left->IsConstantOperand()) { - // Transpose the operands and reverse the condition. + // Commute the operands and the condition. int32_t value = ToInteger32(LConstantOperand::cast(left)); EmitCompareAndBranch(instr, - ReverseConditionForCmp(cond), + CommuteCondition(cond), ToRegister(right), Operand(Smi::FromInt(value))); } else { @@ -2483,12 +2532,12 @@ void LCodeGen::DoCmpT(LCmpT* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Token::Value op = instr->op(); Condition cond = TokenToCondition(op, false); - ASSERT(ToRegister(instr->left()).Is(x1)); - ASSERT(ToRegister(instr->right()).Is(x0)); + DCHECK(ToRegister(instr->left()).Is(x1)); + DCHECK(ToRegister(instr->right()).Is(x0)); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); // Signal that we don't inline smi code before this stub. @@ -2496,7 +2545,7 @@ // Return true or false depending on CompareIC result. // This instruction is marked as call. We can clobber any register. - ASSERT(instr->IsMarkedAsCall()); + DCHECK(instr->IsMarkedAsCall()); __ LoadTrueFalseRoots(x1, x2); __ Cmp(x0, 0); __ Csel(ToRegister(instr->result()), x1, x2, cond); @@ -2504,9 +2553,17 @@ void LCodeGen::DoConstantD(LConstantD* instr) { - ASSERT(instr->result()->IsDoubleRegister()); + DCHECK(instr->result()->IsDoubleRegister()); DoubleRegister result = ToDoubleRegister(instr->result()); - __ Fmov(result, instr->value()); + if (instr->value() == 0) { + if (copysign(1.0, instr->value()) == 1.0) { + __ Fmov(result, fp_zero); + } else { + __ Fneg(result, fp_zero); + } + } else { + __ Fmov(result, instr->value()); + } } @@ -2516,7 +2573,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) { - ASSERT(is_int32(instr->value())); + DCHECK(is_int32(instr->value())); // Cast the value here to ensure that the value isn't sign extended by the // implicit Operand constructor. __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value())); @@ -2529,9 +2586,9 @@ void LCodeGen::DoConstantT(LConstantT* instr) { - Handle<Object> value = instr->value(isolate()); + Handle<Object> object = instr->value(isolate()); AllowDeferredHandleDereference smi_check; - __ LoadObject(ToRegister(instr->result()), value); + __ LoadObject(ToRegister(instr->result()), object); } @@ -2542,7 +2599,7 @@ __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); } else { // If there is no frame, the context must be in cp. - ASSERT(result.is(cp)); + DCHECK(result.is(cp)); } } @@ -2567,7 +2624,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) { last_lazy_deopt_pc_ = masm()->pc_offset(); - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); @@ -2580,19 +2637,15 @@ Register temp1 = x10; Register temp2 = x11; Smi* index = instr->index(); - Label runtime, done, deopt, obj_ok; + Label runtime, done; - ASSERT(object.is(result) && object.Is(x0)); - ASSERT(instr->IsMarkedAsCall()); + DCHECK(object.is(result) && object.Is(x0)); + DCHECK(instr->IsMarkedAsCall()); - __ JumpIfSmi(object, &deopt); + DeoptimizeIfSmi(object, instr->environment()); __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); - __ B(eq, &obj_ok); - - __ Bind(&deopt); - Deoptimize(instr->environment()); + DeoptimizeIf(ne, instr->environment()); - __ Bind(&obj_ok); if (index->value() == 0) { __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); } else { @@ -2636,19 +2689,20 @@ Register dividend = ToRegister32(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister32(instr->result()); - ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor)))); - ASSERT(!result.is(dividend)); + DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor))); + DCHECK(!result.is(dividend)); // Check for (0 / -x) that will produce negative zero. HDiv* hdiv = instr->hydrogen(); if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ Cmp(dividend, 0); - DeoptimizeIf(eq, instr->environment()); + DeoptimizeIfZero(dividend, instr->environment()); } // Check for (kMinInt / -1). if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { - __ Cmp(dividend, kMinInt); - DeoptimizeIf(eq, instr->environment()); + // Test dividend for kMinInt by subtracting one (cmp) and checking for + // overflow. + __ Cmp(dividend, 1); + DeoptimizeIf(vs, instr->environment()); } // Deoptimize if remainder will not be 0. if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && @@ -2680,7 +2734,7 @@ Register dividend = ToRegister32(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister32(instr->result()); - ASSERT(!AreAliased(dividend, result)); + DCHECK(!AreAliased(dividend, result)); if (divisor == 0) { Deoptimize(instr->environment()); @@ -2698,7 +2752,7 @@ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { Register temp = ToRegister32(instr->temp()); - ASSERT(!AreAliased(dividend, result, temp)); + DCHECK(!AreAliased(dividend, result, temp)); __ Sxtw(dividend.X(), dividend); __ Mov(temp, divisor); __ Smsubl(temp.X(), result, temp, dividend.X()); @@ -2707,10 +2761,11 @@ } +// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. void LCodeGen::DoDivI(LDivI* instr) { HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister32(instr->left()); - Register divisor = ToRegister32(instr->right()); + Register dividend = ToRegister32(instr->dividend()); + Register divisor = ToRegister32(instr->divisor()); Register result = ToRegister32(instr->result()); // Issue the division first, and then check for any deopt cases whilst the @@ -2718,14 +2773,13 @@ __ Sdiv(result, dividend, divisor); if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - ASSERT_EQ(NULL, instr->temp()); + DCHECK_EQ(NULL, instr->temp()); return; } - Label deopt; // Check for x / 0. if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ Cbz(divisor, &deopt); + DeoptimizeIfZero(divisor, instr->environment()); } // Check for (0 / -x) as that will produce negative zero. @@ -2737,7 +2791,7 @@ // If the divisor >= 0 (pl, the opposite of mi) set the flags to // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. __ Ccmp(dividend, 0, NoFlag, mi); - __ B(eq, &deopt); + DeoptimizeIf(eq, instr->environment()); } // Check for (kMinInt / -1). @@ -2749,19 +2803,13 @@ // -1. If overflow is clear, set the flags for condition ne, as the // dividend isn't -1, and thus we shouldn't deopt. __ Ccmp(divisor, -1, NoFlag, vs); - __ B(eq, &deopt); + DeoptimizeIf(eq, instr->environment()); } // Compute remainder and deopt if it's not zero. Register remainder = ToRegister32(instr->temp()); __ Msub(remainder, result, divisor, dividend); - __ Cbnz(remainder, &deopt); - - Label div_ok; - __ B(&div_ok); - __ Bind(&deopt); - Deoptimize(instr->environment()); - __ Bind(&div_ok); + DeoptimizeIfNotZero(remainder, instr->environment()); } @@ -2773,7 +2821,7 @@ DeoptimizeIfMinusZero(input, instr->environment()); } - __ TryConvertDoubleToInt32(result, input, double_scratch()); + __ TryRepresentDoubleAsInt32(result, input, double_scratch()); DeoptimizeIf(ne, instr->environment()); if (instr->tag_result()) { @@ -2798,24 +2846,25 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); // FunctionLiteral instruction is marked as call, we can trash any register. - ASSERT(instr->IsMarkedAsCall()); + DCHECK(instr->IsMarkedAsCall()); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. bool pretenure = instr->hydrogen()->pretenure(); if (!pretenure && instr->hydrogen()->has_no_literals()) { - FastNewClosureStub stub(instr->hydrogen()->strict_mode(), + FastNewClosureStub stub(isolate(), + instr->hydrogen()->strict_mode(), instr->hydrogen()->is_generator()); __ Mov(x2, Operand(instr->hydrogen()->shared_info())); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } else { __ Mov(x2, Operand(instr->hydrogen()->shared_info())); __ Mov(x1, Operand(pretenure ? factory()->true_value() : factory()->false_value())); __ Push(cp, x2, x1); - CallRuntime(Runtime::kHiddenNewClosure, 3, instr); + CallRuntime(Runtime::kNewClosure, 3, instr); } } @@ -2845,22 +2894,21 @@ Register object = ToRegister(instr->object()); Register null_value = x5; - ASSERT(instr->IsMarkedAsCall()); - ASSERT(object.Is(x0)); - - Label deopt; + DCHECK(instr->IsMarkedAsCall()); + DCHECK(object.Is(x0)); - __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt); + DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, + instr->environment()); __ LoadRoot(null_value, Heap::kNullValueRootIndex); __ Cmp(object, null_value); - __ B(eq, &deopt); + DeoptimizeIf(eq, instr->environment()); - __ JumpIfSmi(object, &deopt); + DeoptimizeIfSmi(object, instr->environment()); STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); - __ B(le, &deopt); + DeoptimizeIf(le, instr->environment()); Label use_cache, call_runtime; __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); @@ -2868,16 +2916,13 @@ __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); __ B(&use_cache); - __ Bind(&deopt); - Deoptimize(instr->environment()); - // Get the set of properties to enumerate. __ Bind(&call_runtime); __ Push(object); CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); - __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt); + DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment()); __ Bind(&use_cache); } @@ -2890,7 +2935,7 @@ __ AssertString(input); // Assert that we can use a W register load to get the hash. - ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits); + DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits); __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset)); __ IndexFromHash(result, result); } @@ -2915,7 +2960,7 @@ Register temp = ToRegister32(instr->temp()); // Assert that the cache status bits fit in a W register. - ASSERT(is_uint32(String::kContainsCachedArrayIndexMask)); + DCHECK(is_uint32(String::kContainsCachedArrayIndexMask)); __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset)); __ Tst(temp, String::kContainsCachedArrayIndexMask); EmitBranch(instr, eq); @@ -2939,7 +2984,7 @@ InstanceType from = instr->from(); InstanceType to = instr->to(); if (from == FIRST_TYPE) return to; - ASSERT((from == to) || (to == LAST_TYPE)); + DCHECK((from == to) || (to == LAST_TYPE)); return from; } @@ -2960,7 +3005,7 @@ Register input = ToRegister(instr->value()); Register scratch = ToRegister(instr->temp()); - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); @@ -2980,13 +3025,13 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); // Assert that the arguments are in the registers expected by InstanceofStub. - ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left())); - ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right())); + DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left())); + DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right())); - InstanceofStub stub(InstanceofStub::kArgsInRegisters); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); // InstanceofStub returns a result in x0: // 0 => not an instance @@ -3022,10 +3067,10 @@ Register map = x5; // This instruction is marked as call. We can clobber any register. - ASSERT(instr->IsMarkedAsCall()); + DCHECK(instr->IsMarkedAsCall()); // We must take into account that object is in x11. - ASSERT(object.Is(x11)); + DCHECK(object.Is(x11)); Register scratch = x10; // A Smi is not instance of anything. @@ -3043,15 +3088,15 @@ __ bind(&map_check); // Will be patched with the cached map. Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); - __ LoadRelocated(scratch, Operand(Handle<Object>(cell))); + __ ldr(scratch, Immediate(Handle<Object>(cell))); __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset)); __ cmp(map, scratch); __ b(&cache_miss, ne); // The address of this instruction is computed relative to the map check // above, so check the size of the code generated. - ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4); + DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4); // Will be patched with the cached result. - __ LoadRelocated(result, Operand(factory()->the_hole_value())); + __ ldr(result, Immediate(factory()->the_hole_value())); } __ B(&done); @@ -3084,7 +3129,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { Register result = ToRegister(instr->result()); - ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0. + DCHECK(result.Is(x0)); // InstanceofStub returns its result in x0. InstanceofStub::Flags flags = InstanceofStub::kNoFlags; flags = static_cast<InstanceofStub::Flags>( flags | InstanceofStub::kArgsInRegisters); @@ -3093,15 +3138,15 @@ flags = static_cast<InstanceofStub::Flags>( flags | InstanceofStub::kCallSiteInlineCheck); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); LoadContextFromDeferred(instr->context()); // Prepare InstanceofStub arguments. - ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left())); + DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left())); __ LoadObject(InstanceofStub::right(), instr->function()); - InstanceofStub stub(flags); - CallCodeGeneric(stub.GetCode(isolate()), + InstanceofStub stub(isolate(), flags); + CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); @@ -3126,10 +3171,10 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); // The function is required to be in x1. - ASSERT(ToRegister(instr->function()).is(x1)); - ASSERT(instr->HasPointerMap()); + DCHECK(ToRegister(instr->function()).is(x1)); + DCHECK(instr->HasPointerMap()); Handle<JSFunction> known_function = instr->hydrogen()->known_function(); if (known_function.is_null()) { @@ -3144,6 +3189,7 @@ instr, x1); } + after_push_argument_ = false; } @@ -3213,7 +3259,7 @@ Register scratch = ToRegister(instr->temp()); SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; Condition true_cond = EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed); @@ -3233,7 +3279,7 @@ Register input = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); @@ -3285,23 +3331,14 @@ Register function = ToRegister(instr->function()); Register result = ToRegister(instr->result()); Register temp = ToRegister(instr->temp()); - Label deopt; - - // Check that the function really is a function. Leaves map in the result - // register. - __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt); - - // Make sure that the function has an instance prototype. - Label non_instance; - __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset)); - __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance); // Get the prototype or initial map from the function. __ Ldr(result, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); // Check that the function has a prototype or an initial map. - __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt); + DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, + instr->environment()); // If the function does not have an initial map, we're done. Label done; @@ -3310,17 +3347,6 @@ // Get the prototype from the initial map. __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); - __ B(&done); - - // Non-instance prototype: fetch prototype from constructor field in initial - // map. - __ Bind(&non_instance); - __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); - __ B(&done); - - // Deoptimize case. - __ Bind(&deopt); - Deoptimize(instr->environment()); // All done. __ Bind(&done); @@ -3339,10 +3365,19 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->global_object()).Is(x0)); - ASSERT(ToRegister(instr->result()).Is(x0)); - __ Mov(x2, Operand(instr->name())); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).Is(x0)); + __ Mov(LoadIC::NameRegister(), Operand(instr->name())); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ Mov(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(x0)); + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(instr->hydrogen()->slot())); + } ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); CallCode(ic, RelocInfo::CODE_TARGET, instr); @@ -3357,53 +3392,26 @@ bool key_is_constant, int constant_key, ElementsKind elements_kind, - int additional_index) { + int base_offset) { int element_size_shift = ElementsKindToShiftSize(elements_kind); - int additional_offset = IsFixedTypedArrayElementsKind(elements_kind) - ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag - : 0; if (key_is_constant) { - int base_offset = ((constant_key + additional_index) << element_size_shift); - return MemOperand(base, base_offset + additional_offset); + int key_offset = constant_key << element_size_shift; + return MemOperand(base, key_offset + base_offset); } - if (additional_index == 0) { - if (key_is_smi) { - // Key is smi: untag, and scale by element size. - __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift)); - return MemOperand(scratch, additional_offset); - } else { - // Key is not smi, and element size is not byte: scale by element size. - if (additional_offset == 0) { - return MemOperand(base, key, SXTW, element_size_shift); - } else { - __ Add(scratch, base, Operand(key, SXTW, element_size_shift)); - return MemOperand(scratch, additional_offset); - } - } - } else { - // TODO(all): Try to combine these cases a bit more intelligently. - if (additional_offset == 0) { - if (key_is_smi) { - __ SmiUntag(scratch, key); - __ Add(scratch.W(), scratch.W(), additional_index); - } else { - __ Add(scratch.W(), key.W(), additional_index); - } - return MemOperand(base, scratch, LSL, element_size_shift); - } else { - if (key_is_smi) { - __ Add(scratch, base, - Operand::UntagSmiAndScale(key, element_size_shift)); - } else { - __ Add(scratch, base, Operand(key, SXTW, element_size_shift)); - } - return MemOperand( - scratch, - (additional_index << element_size_shift) + additional_offset); - } + if (key_is_smi) { + __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift)); + return MemOperand(scratch, base_offset); } + + if (base_offset == 0) { + return MemOperand(base, key, SXTW, element_size_shift); + } + + DCHECK(!AreAliased(scratch, key)); + __ Add(scratch, base, base_offset); + return MemOperand(scratch, key, SXTW, element_size_shift); } @@ -3417,7 +3425,7 @@ Register key = no_reg; int constant_key = 0; if (key_is_constant) { - ASSERT(instr->temp() == NULL); + DCHECK(instr->temp() == NULL); constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xf0000000) { Abort(kArrayIndexConstantValueTooBig); @@ -3431,7 +3439,7 @@ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi, key_is_constant, constant_key, elements_kind, - instr->additional_index()); + instr->base_offset()); if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) || (elements_kind == FLOAT32_ELEMENTS)) { @@ -3496,11 +3504,15 @@ } -void LCodeGen::CalcKeyedArrayBaseRegister(Register base, - Register elements, - Register key, - bool key_is_tagged, - ElementsKind elements_kind) { +MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, + Register elements, + Register key, + bool key_is_tagged, + ElementsKind elements_kind, + Representation representation, + int base_offset) { + STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); + STATIC_ASSERT(kSmiTag == 0); int element_size_shift = ElementsKindToShiftSize(elements_kind); // Even though the HLoad/StoreKeyed instructions force the input @@ -3509,11 +3521,26 @@ // can be tagged, so that case must be handled here, too. if (key_is_tagged) { __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift)); + if (representation.IsInteger32()) { + DCHECK(elements_kind == FAST_SMI_ELEMENTS); + // Read or write only the smi payload in the case of fast smi arrays. + return UntagSmiMemOperand(base, base_offset); + } else { + return MemOperand(base, base_offset); + } } else { // Sign extend key because it could be a 32-bit negative value or contain // garbage in the top 32-bits. The address computation happens in 64-bit. - ASSERT((element_size_shift >= 0) && (element_size_shift <= 4)); - __ Add(base, elements, Operand(key, SXTW, element_size_shift)); + DCHECK((element_size_shift >= 0) && (element_size_shift <= 4)); + if (representation.IsInteger32()) { + DCHECK(elements_kind == FAST_SMI_ELEMENTS); + // Read or write only the smi payload in the case of fast smi arrays. + __ Add(base, elements, Operand(key, SXTW, element_size_shift)); + return UntagSmiMemOperand(base, base_offset); + } else { + __ Add(base, elements, base_offset); + return MemOperand(base, key, SXTW, element_size_shift); + } } } @@ -3521,38 +3548,38 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) { Register elements = ToRegister(instr->elements()); DoubleRegister result = ToDoubleRegister(instr->result()); - Register load_base; - int offset = 0; + MemOperand mem_op; if (instr->key()->IsConstantOperand()) { - ASSERT(instr->hydrogen()->RequiresHoleCheck() || + DCHECK(instr->hydrogen()->RequiresHoleCheck() || (instr->temp() == NULL)); int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xf0000000) { Abort(kArrayIndexConstantValueTooBig); } - offset = FixedDoubleArray::OffsetOfElementAt(constant_key + - instr->additional_index()); - load_base = elements; + int offset = instr->base_offset() + constant_key * kDoubleSize; + mem_op = MemOperand(elements, offset); } else { - load_base = ToRegister(instr->temp()); + Register load_base = ToRegister(instr->temp()); Register key = ToRegister(instr->key()); bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind()); - offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); + mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, + instr->hydrogen()->elements_kind(), + instr->hydrogen()->representation(), + instr->base_offset()); } - __ Ldr(result, FieldMemOperand(load_base, offset)); + + __ Ldr(result, mem_op); if (instr->hydrogen()->RequiresHoleCheck()) { Register scratch = ToRegister(instr->temp()); - - // TODO(all): Is it faster to reload this value to an integer register, or - // move from fp to integer? - __ Fmov(scratch, result); - __ Cmp(scratch, kHoleNanInt64); - DeoptimizeIf(eq, instr->environment()); + // Detect the hole NaN by adding one to the integer representation of the + // result, and checking for overflow. + STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff); + __ Ldr(scratch, mem_op); + __ Cmn(scratch, 1); + DeoptimizeIf(vs, instr->environment()); } } @@ -3560,35 +3587,34 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { Register elements = ToRegister(instr->elements()); Register result = ToRegister(instr->result()); - Register load_base; - int offset = 0; + MemOperand mem_op; + Representation representation = instr->hydrogen()->representation(); if (instr->key()->IsConstantOperand()) { - ASSERT(instr->temp() == NULL); + DCHECK(instr->temp() == NULL); LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); - load_base = elements; + int offset = instr->base_offset() + + ToInteger32(const_operand) * kPointerSize; + if (representation.IsInteger32()) { + DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); + STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); + STATIC_ASSERT(kSmiTag == 0); + mem_op = UntagSmiMemOperand(elements, offset); + } else { + mem_op = MemOperand(elements, offset); + } } else { - load_base = ToRegister(instr->temp()); + Register load_base = ToRegister(instr->temp()); Register key = ToRegister(instr->key()); bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind()); - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); - } - Representation representation = instr->hydrogen()->representation(); - if (representation.IsInteger32() && - instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) { - STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); - __ Load(result, UntagSmiFieldMemOperand(load_base, offset), - Representation::Integer32()); - } else { - __ Load(result, FieldMemOperand(load_base, offset), - representation); + mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, + instr->hydrogen()->elements_kind(), + representation, instr->base_offset()); } + __ Load(result, mem_op, representation); + if (instr->hydrogen()->RequiresHoleCheck()) { if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { DeoptimizeIfNotSmi(result, instr->environment()); @@ -3601,14 +3627,23 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->object()).Is(x1)); - ASSERT(ToRegister(instr->key()).Is(x0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister())); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ Mov(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(x0)); + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(instr->hydrogen()->slot())); + } Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallCode(ic, RelocInfo::CODE_TARGET, instr); - ASSERT(ToRegister(instr->result()).Is(x0)); + DCHECK(ToRegister(instr->result()).Is(x0)); } @@ -3642,7 +3677,8 @@ if (access.representation().IsSmi() && instr->hydrogen()->representation().IsInteger32()) { // Read int value directly from upper half of the smi. - STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); + STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); + STATIC_ASSERT(kSmiTag == 0); __ Load(result, UntagSmiFieldMemOperand(source, offset), Representation::Integer32()); } else { @@ -3652,15 +3688,24 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - // LoadIC expects x2 to hold the name, and x0 to hold the receiver. - ASSERT(ToRegister(instr->object()).is(x0)); - __ Mov(x2, Operand(instr->name())); + DCHECK(ToRegister(instr->context()).is(cp)); + // LoadIC expects name and receiver in registers. + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + __ Mov(LoadIC::NameRegister(), Operand(instr->name())); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ Mov(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(x0)); + __ Mov(LoadIC::SlotRegister(), + Smi::FromInt(instr->hydrogen()->slot())); + } Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); CallCode(ic, RelocInfo::CODE_TARGET, instr); - ASSERT(ToRegister(instr->result()).is(x0)); + DCHECK(ToRegister(instr->result()).is(x0)); } @@ -3688,10 +3733,8 @@ : ToRegister32(instr->value()); Register result = r.IsSmi() ? ToRegister(instr->result()) : ToRegister32(instr->result()); - Label done; - __ Abs(result, input, NULL, &done); - Deoptimize(instr->environment()); - __ Bind(&done); + __ Abs(result, input); + DeoptimizeIf(vs, instr->environment()); } } @@ -3708,8 +3751,8 @@ // - The (smi) input -0x80000000, produces +0x80000000, which does not fit // a smi. In this case, the inline code sets the result and jumps directly // to the allocation_entry label. - ASSERT(instr->context() != NULL); - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(instr->context() != NULL); + DCHECK(ToRegister(instr->context()).is(cp)); Register input = ToRegister(instr->value()); Register temp1 = ToRegister(instr->temp1()); Register temp2 = ToRegister(instr->temp2()); @@ -3755,8 +3798,8 @@ __ Bind(&result_ok); } - { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr, + { PushSafepointRegistersScope scope(this); + CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, instr->context()); __ StoreToSafepointRegisterSlot(x0, result); } @@ -3783,12 +3826,12 @@ // TODO(jbramley): The early-exit mechanism would skip the new frame handling // in GenerateDeferredCode. Tidy this up. - ASSERT(!NeedsDeferredFrame()); + DCHECK(!NeedsDeferredFrame()); DeferredMathAbsTagged* deferred = new(zone()) DeferredMathAbsTagged(this, instr); - ASSERT(instr->hydrogen()->value()->representation().IsTagged() || + DCHECK(instr->hydrogen()->value()->representation().IsTagged() || instr->hydrogen()->value()->representation().IsSmi()); Register input = ToRegister(instr->value()); Register result_bits = ToRegister(instr->temp3()); @@ -3832,9 +3875,15 @@ } -void LCodeGen::DoMathFloor(LMathFloor* instr) { - // TODO(jbramley): If we could provide a double result, we could use frintm - // and produce a valid double result in a single instruction. +void LCodeGen::DoMathFloorD(LMathFloorD* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + DoubleRegister result = ToDoubleRegister(instr->result()); + + __ Frintm(result, input); +} + + +void LCodeGen::DoMathFloorI(LMathFloorI* instr) { DoubleRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); @@ -3858,9 +3907,14 @@ Register result = ToRegister32(instr->result()); int32_t divisor = instr->divisor(); + // If the divisor is 1, return the dividend. + if (divisor == 1) { + __ Mov(result, dividend, kDiscardForSameWReg); + return; + } + // If the divisor is positive, things are easy: There can be no deopts and we // can simply do an arithmetic right shift. - if (divisor == 1) return; int32_t shift = WhichPowerOf2Abs(divisor); if (divisor > 1) { __ Mov(result, Operand(dividend, ASR, shift)); @@ -3868,25 +3922,27 @@ } // If the divisor is negative, we have to negate and handle edge cases. - Label not_kmin_int, done; __ Negs(result, dividend); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(eq, instr->environment()); } - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - // Note that we could emit branch-free code, but that would need one more - // register. - if (divisor == -1) { + + // Dividing by -1 is basically negation, unless we overflow. + if (divisor == -1) { + if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { DeoptimizeIf(vs, instr->environment()); - } else { - __ B(vc, ¬_kmin_int); - __ Mov(result, kMinInt / divisor); - __ B(&done); } + return; } - __ bind(¬_kmin_int); - __ Mov(result, Operand(dividend, ASR, shift)); - __ bind(&done); + + // If the negation could not overflow, simply shifting is OK. + if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + __ Mov(result, Operand(dividend, ASR, shift)); + return; + } + + __ Asr(result, result, shift); + __ Csel(result, result, kMinInt / divisor, vc); } @@ -3894,7 +3950,7 @@ Register dividend = ToRegister32(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister32(instr->result()); - ASSERT(!AreAliased(dividend, result)); + DCHECK(!AreAliased(dividend, result)); if (divisor == 0) { Deoptimize(instr->environment()); @@ -3904,8 +3960,7 @@ // Check for (0 / -x) that will produce negative zero. HMathFloorOfDiv* hdiv = instr->hydrogen(); if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { - __ Cmp(dividend, 0); - DeoptimizeIf(eq, instr->environment()); + DeoptimizeIfZero(dividend, instr->environment()); } // Easy case: We need no dynamic check for the dividend and the flooring @@ -3920,22 +3975,23 @@ // In the general case we may need to adjust before and after the truncating // division to get a flooring division. Register temp = ToRegister32(instr->temp()); - ASSERT(!AreAliased(temp, dividend, result)); + DCHECK(!AreAliased(temp, dividend, result)); Label needs_adjustment, done; __ Cmp(dividend, 0); __ B(divisor > 0 ? lt : gt, &needs_adjustment); __ TruncatingDiv(result, dividend, Abs(divisor)); if (divisor < 0) __ Neg(result, result); __ B(&done); - __ bind(&needs_adjustment); + __ Bind(&needs_adjustment); __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); __ TruncatingDiv(result, temp, Abs(divisor)); if (divisor < 0) __ Neg(result, result); __ Sub(result, result, Operand(1)); - __ bind(&done); + __ Bind(&done); } +// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { Register dividend = ToRegister32(instr->dividend()); Register divisor = ToRegister32(instr->divisor()); @@ -3982,11 +4038,11 @@ void LCodeGen::DoMathLog(LMathLog* instr) { - ASSERT(instr->IsMarkedAsCall()); - ASSERT(ToDoubleRegister(instr->value()).is(d0)); + DCHECK(instr->IsMarkedAsCall()); + DCHECK(ToDoubleRegister(instr->value()).is(d0)); __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0, 1); - ASSERT(ToDoubleRegister(instr->result()).Is(d0)); + DCHECK(ToDoubleRegister(instr->result()).Is(d0)); } @@ -4025,16 +4081,16 @@ Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. - ASSERT(!instr->right()->IsDoubleRegister() || + DCHECK(!instr->right()->IsDoubleRegister() || ToDoubleRegister(instr->right()).is(d1)); - ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() || + DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() || ToRegister(instr->right()).is(x11)); - ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12)); - ASSERT(ToDoubleRegister(instr->left()).is(d0)); - ASSERT(ToDoubleRegister(instr->result()).is(d0)); + DCHECK(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12)); + DCHECK(ToDoubleRegister(instr->left()).is(d0)); + DCHECK(ToDoubleRegister(instr->result()).is(d0)); if (exponent_type.IsSmi()) { - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsTagged()) { Label no_deopt; @@ -4043,29 +4099,58 @@ DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex, instr->environment()); __ Bind(&no_deopt); - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsInteger32()) { // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub // supports large integer exponents. Register exponent = ToRegister(instr->right()); __ Sxtw(exponent, exponent); - MathPowStub stub(MathPowStub::INTEGER); + MathPowStub stub(isolate(), MathPowStub::INTEGER); __ CallStub(&stub); } else { - ASSERT(exponent_type.IsDouble()); - MathPowStub stub(MathPowStub::DOUBLE); + DCHECK(exponent_type.IsDouble()); + MathPowStub stub(isolate(), MathPowStub::DOUBLE); __ CallStub(&stub); } } -void LCodeGen::DoMathRound(LMathRound* instr) { - // TODO(jbramley): We could provide a double result here using frint. +void LCodeGen::DoMathRoundD(LMathRoundD* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + DoubleRegister result = ToDoubleRegister(instr->result()); + DoubleRegister scratch_d = double_scratch(); + + DCHECK(!AreAliased(input, result, scratch_d)); + + Label done; + + __ Frinta(result, input); + __ Fcmp(input, 0.0); + __ Fccmp(result, input, ZFlag, lt); + // The result is correct if the input was in [-0, +infinity], or was a + // negative integral value. + __ B(eq, &done); + + // Here the input is negative, non integral, with an exponent lower than 52. + // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff) + // case. So we can safely add 0.5. + __ Fmov(scratch_d, 0.5); + __ Fadd(result, input, scratch_d); + __ Frintm(result, result); + // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative. + __ Fabs(result, result); + __ Fneg(result, result); + + __ Bind(&done); +} + + +void LCodeGen::DoMathRoundI(LMathRoundI* instr) { DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister temp1 = ToDoubleRegister(instr->temp1()); + DoubleRegister temp = ToDoubleRegister(instr->temp1()); + DoubleRegister dot_five = double_scratch(); Register result = ToRegister(instr->result()); - Label try_rounding; Label done; // Math.round() rounds to the nearest integer, with ties going towards @@ -4076,46 +4161,53 @@ // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a // result of -0.0. - DoubleRegister dot_five = double_scratch(); + // Add 0.5 and round towards -infinity. __ Fmov(dot_five, 0.5); - __ Fabs(temp1, input); - __ Fcmp(temp1, dot_five); - // If input is in [-0.5, -0], the result is -0. - // If input is in [+0, +0.5[, the result is +0. - // If the input is +0.5, the result is 1. - __ B(hi, &try_rounding); // hi so NaN will also branch. + __ Fadd(temp, input, dot_five); + __ Fcvtms(result, temp); + + // The result is correct if: + // result is not 0, as the input could be NaN or [-0.5, -0.0]. + // result is not 1, as 0.499...94 will wrongly map to 1. + // result fits in 32 bits. + __ Cmp(result, Operand(result.W(), SXTW)); + __ Ccmp(result, 1, ZFlag, eq); + __ B(hi, &done); + + // At this point, we have to handle possible inputs of NaN or numbers in the + // range [-0.5, 1.5[, or numbers larger than 32 bits. + // Deoptimize if the result > 1, as it must be larger than 32 bits. + __ Cmp(result, 1); + DeoptimizeIf(hi, instr->environment()); + + // Deoptimize for negative inputs, which at this point are only numbers in + // the range [-0.5, -0.0] if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ Fmov(result, input); - DeoptimizeIfNegative(result, instr->environment()); // [-0.5, -0.0]. + DeoptimizeIfNegative(result, instr->environment()); } - __ Fcmp(input, dot_five); - __ Mov(result, 1); // +0.5. - // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on - // flag kBailoutOnMinusZero, will return 0 (xzr). - __ Csel(result, result, xzr, eq); - __ B(&done); - __ Bind(&try_rounding); - // Since we're providing a 32-bit result, we can implement ties-to-infinity by - // adding 0.5 to the input, then taking the floor of the result. This does not - // work for very large positive doubles because adding 0.5 would cause an - // intermediate rounding stage, so a different approach will be necessary if a - // double result is needed. - __ Fadd(temp1, input, dot_five); - __ Fcvtms(result, temp1); - - // Deopt if - // * the input was NaN - // * the result is not representable using a 32-bit integer. - __ Fcmp(input, 0.0); - __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc); - DeoptimizeIf(ne, instr->environment()); + // Deoptimize if the input was NaN. + __ Fcmp(input, dot_five); + DeoptimizeIf(vs, instr->environment()); + // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ + // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, + // else 0; we avoid dealing with 0.499...94 directly. + __ Cset(result, ge); __ Bind(&done); } +void LCodeGen::DoMathFround(LMathFround* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + DoubleRegister result = ToDoubleRegister(instr->result()); + __ Fcvt(result.S(), input); + __ Fcvt(result, result.S()); +} + + void LCodeGen::DoMathSqrt(LMathSqrt* instr) { DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister result = ToDoubleRegister(instr->result()); @@ -4140,7 +4232,7 @@ __ Cmp(left, right); __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); } else { - ASSERT(instr->hydrogen()->representation().IsDouble()); + DCHECK(instr->hydrogen()->representation().IsDouble()); DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister left = ToDoubleRegister(instr->left()); DoubleRegister right = ToDoubleRegister(instr->right()); @@ -4148,7 +4240,7 @@ if (op == HMathMinMax::kMathMax) { __ Fmax(result, left, right); } else { - ASSERT(op == HMathMinMax::kMathMin); + DCHECK(op == HMathMinMax::kMathMin); __ Fmin(result, left, right); } } @@ -4158,7 +4250,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { Register dividend = ToRegister32(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(dividend.is(ToRegister32(instr->result()))); + DCHECK(dividend.is(ToRegister32(instr->result()))); // Theoretically, a variation of the branch-free code for integer division by // a power of 2 (calculating the remainder via an additional multiplication @@ -4170,8 +4262,7 @@ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); Label dividend_is_not_negative, done; if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { - __ Cmp(dividend, 0); - __ B(pl, ÷nd_is_not_negative); + __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative); // Note that this is correct even for kMinInt operands. __ Neg(dividend, dividend); __ And(dividend, dividend, mask); @@ -4193,7 +4284,7 @@ int32_t divisor = instr->divisor(); Register result = ToRegister32(instr->result()); Register temp = ToRegister32(instr->temp()); - ASSERT(!AreAliased(dividend, result, temp)); + DCHECK(!AreAliased(dividend, result, temp)); if (divisor == 0) { Deoptimize(instr->environment()); @@ -4221,39 +4312,30 @@ Register divisor = ToRegister32(instr->right()); Register result = ToRegister32(instr->result()); - Label deopt, done; + Label done; // modulo = dividend - quotient * divisor __ Sdiv(result, dividend, divisor); if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { - // Combine the deoptimization sites. - Label ok; - __ Cbnz(divisor, &ok); - __ Bind(&deopt); - Deoptimize(instr->environment()); - __ Bind(&ok); + DeoptimizeIfZero(divisor, instr->environment()); } __ Msub(result, result, divisor, dividend); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ Cbnz(result, &done); - if (deopt.is_bound()) { // TODO(all) This is a hack, remove this... - __ Tbnz(dividend, kWSignBit, &deopt); - } else { - DeoptimizeIfNegative(dividend, instr->environment()); - } + DeoptimizeIfNegative(dividend, instr->environment()); } __ Bind(&done); } void LCodeGen::DoMulConstIS(LMulConstIS* instr) { - ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32()); + DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); bool is_smi = instr->hydrogen()->representation().IsSmi(); Register result = is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); Register left = is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; int32_t right = ToInteger32(instr->right()); - ASSERT((right > -kMaxInt) || (right < kMaxInt)); + DCHECK((right > -kMaxInt) || (right < kMaxInt)); bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); bool bailout_on_minus_zero = @@ -4307,7 +4389,7 @@ if (can_overflow) { Register scratch = result; - ASSERT(!AreAliased(scratch, left)); + DCHECK(!AreAliased(scratch, left)); __ Cls(scratch, left); __ Cmp(scratch, right_log2); DeoptimizeIf(lt, instr->environment()); @@ -4332,7 +4414,7 @@ // For the following cases, we could perform a conservative overflow check // with CLS as above. However the few cycles saved are likely not worth // the risk of deoptimizing more often than required. - ASSERT(!can_overflow); + DCHECK(!can_overflow); if (right >= 0) { if (IsPowerOf2(right - 1)) { @@ -4430,7 +4512,7 @@ __ SmiUntag(result, left); __ Mul(result, result, right); } else { - ASSERT(!left.Is(result)); + DCHECK(!left.Is(result)); // Registers result and right alias, left is distinct, or all registers // are distinct: untag right into result, and then multiply by left, // giving a tagged result. @@ -4448,14 +4530,14 @@ Register result = ToRegister(instr->result()); __ Mov(result, 0); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); // NumberTagU and NumberTagD use the context from the frame, rather than // the environment's HContext or HInlinedContext value. - // They only call Runtime::kHiddenAllocateHeapNumber. + // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(x0, result); @@ -4513,15 +4595,15 @@ __ Mov(dst, 0); { // Preserve the value of all registers. - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); // NumberTagU and NumberTagD use the context from the frame, rather than // the environment's HContext or HInlinedContext value. - // They only call Runtime::kHiddenAllocateHeapNumber. + // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(x0, dst); @@ -4610,7 +4692,7 @@ } } else { - ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); + DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); // Fall through to load_smi. } @@ -4630,7 +4712,7 @@ // If the environment were already registered, we would have no way of // backpatching it with the spill slot operands. - ASSERT(!environment->HasBeenRegistered()); + DCHECK(!environment->HasBeenRegistered()); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); GenerateOsrPrologue(); @@ -4642,13 +4724,27 @@ } -void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->value(); - if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { - Abort(kDoPushArgumentNotImplementedForDoubleType); - } else { - __ Push(ToRegister(argument)); +void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) { + __ PushPreamble(instr->argc(), kPointerSize); +} + + +void LCodeGen::DoPushArguments(LPushArguments* instr) { + MacroAssembler::PushPopQueue args(masm()); + + for (int i = 0; i < instr->ArgumentCount(); ++i) { + LOperand* arg = instr->argument(i); + if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) { + Abort(kDoPushArgumentNotImplementedForDoubleType); + return; + } + args.Queue(ToRegister(arg)); } + + // The preamble was done by LPreparePushArguments. + args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE); + + after_push_argument_ = true; } @@ -4703,13 +4799,13 @@ return FieldMemOperand(string, SeqString::kHeaderSize + offset); } + __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag); if (encoding == String::ONE_BYTE_ENCODING) { - __ Add(temp, string, Operand(ToRegister32(index), SXTW)); + return MemOperand(temp, ToRegister32(index), SXTW); } else { STATIC_ASSERT(kUC16Size == 2); - __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1)); + return MemOperand(temp, ToRegister32(index), SXTW, 1); } - return FieldMemOperand(temp, SeqString::kHeaderSize); } @@ -4755,7 +4851,7 @@ Register temp = ToRegister(instr->temp()); if (FLAG_debug_code) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Register index = ToRegister(instr->index()); static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; @@ -4825,8 +4921,8 @@ default: UNREACHABLE(); } } else { - ASSERT(right_op->IsConstantOperand()); - int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f; + DCHECK(right_op->IsConstantOperand()); + int shift_count = JSShiftAmountFromLConstant(right_op); if (shift_count == 0) { if ((instr->op() == Token::SHR) && instr->can_deopt()) { DeoptimizeIfNegative(left, instr->environment()); @@ -4851,7 +4947,7 @@ Register result = ToRegister(instr->result()); // Only ROR by register needs a temp. - ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) || + DCHECK(((instr->op() == Token::ROR) && right_op->IsRegister()) || (instr->temp() == NULL)); if (right_op->IsRegister()) { @@ -4888,8 +4984,8 @@ default: UNREACHABLE(); } } else { - ASSERT(right_op->IsConstantOperand()); - int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f; + DCHECK(right_op->IsConstantOperand()); + int shift_count = JSShiftAmountFromLConstant(right_op); if (shift_count == 0) { if ((instr->op() == Token::SHR) && instr->can_deopt()) { DeoptimizeIfNegative(left, instr->environment()); @@ -4926,10 +5022,10 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Register scratch1 = x5; Register scratch2 = x6; - ASSERT(instr->IsMarkedAsCall()); + DCHECK(instr->IsMarkedAsCall()); ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals"); // TODO(all): if Mov could handle object in new space then it could be used @@ -4937,17 +5033,17 @@ __ LoadHeapObject(scratch1, instr->hydrogen()->pairs()); __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags())); __ Push(cp, scratch1, scratch2); // The context is the first argument. - CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); } void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); LoadContextFromDeferred(instr->context()); - __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard); + __ CallRuntimeSaveDoubles(Runtime::kStackGuard); RecordSafepointWithLazyDeopt( instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); } @@ -4964,7 +5060,7 @@ LStackCheck* instr_; }; - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); // There is no LLazyBailout instruction for stack-checks. We have to // prepare for lazy deoptimization explicitly here. @@ -4976,14 +5072,14 @@ PredictableCodeSizeScope predictable(masm_, Assembler::kCallSizeWithRelocation); - ASSERT(instr->context()->IsRegister()); - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(instr->context()->IsRegister()); + DCHECK(ToRegister(instr->context()).is(cp)); CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, instr); __ Bind(&done); } else { - ASSERT(instr->hydrogen()->is_backwards_branch()); + DCHECK(instr->hydrogen()->is_backwards_branch()); // Perform stack overflow check if this goto needs it before jumping. DeferredStackCheck* deferred_stack_check = new(zone()) DeferredStackCheck(this, instr); @@ -5031,7 +5127,7 @@ __ Str(value, target); if (instr->hydrogen()->NeedsWriteBarrier()) { SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; __ RecordWriteContextSlot(context, target.offset(), @@ -5080,7 +5176,7 @@ bool key_is_constant = instr->key()->IsConstantOperand(); int constant_key = 0; if (key_is_constant) { - ASSERT(instr->temp() == NULL); + DCHECK(instr->temp() == NULL); constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xf0000000) { Abort(kArrayIndexConstantValueTooBig); @@ -5094,7 +5190,7 @@ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi, key_is_constant, constant_key, elements_kind, - instr->additional_index()); + instr->base_offset()); if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) || (elements_kind == FLOAT32_ELEMENTS)) { @@ -5152,34 +5248,30 @@ void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) { Register elements = ToRegister(instr->elements()); DoubleRegister value = ToDoubleRegister(instr->value()); - Register store_base = no_reg; - int offset = 0; + MemOperand mem_op; if (instr->key()->IsConstantOperand()) { int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xf0000000) { Abort(kArrayIndexConstantValueTooBig); } - offset = FixedDoubleArray::OffsetOfElementAt(constant_key + - instr->additional_index()); - store_base = elements; + int offset = instr->base_offset() + constant_key * kDoubleSize; + mem_op = MemOperand(elements, offset); } else { - store_base = ToRegister(instr->temp()); + Register store_base = ToRegister(instr->temp()); Register key = ToRegister(instr->key()); bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind()); - offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); + mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged, + instr->hydrogen()->elements_kind(), + instr->hydrogen()->representation(), + instr->base_offset()); } if (instr->NeedsCanonicalization()) { - DoubleRegister dbl_scratch = double_scratch(); - __ Fmov(dbl_scratch, - FixedDoubleArray::canonical_not_the_hole_nan_as_double()); - __ Fmaxnm(dbl_scratch, dbl_scratch, value); - __ Str(dbl_scratch, FieldMemOperand(store_base, offset)); + __ CanonicalizeNaN(double_scratch(), value); + __ Str(double_scratch(), mem_op); } else { - __ Str(value, FieldMemOperand(store_base, offset)); + __ Str(value, mem_op); } } @@ -5190,57 +5282,61 @@ Register scratch = no_reg; Register store_base = no_reg; Register key = no_reg; - int offset = 0; + MemOperand mem_op; if (!instr->key()->IsConstantOperand() || instr->hydrogen()->NeedsWriteBarrier()) { scratch = ToRegister(instr->temp()); } + Representation representation = instr->hydrogen()->value()->representation(); if (instr->key()->IsConstantOperand()) { LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); + int offset = instr->base_offset() + + ToInteger32(const_operand) * kPointerSize; store_base = elements; + if (representation.IsInteger32()) { + DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); + DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); + STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); + STATIC_ASSERT(kSmiTag == 0); + mem_op = UntagSmiMemOperand(store_base, offset); + } else { + mem_op = MemOperand(store_base, offset); + } } else { store_base = scratch; key = ToRegister(instr->key()); bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind()); - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); - } - Representation representation = instr->hydrogen()->value()->representation(); - if (representation.IsInteger32()) { - ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); - ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); - STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); - __ Store(value, UntagSmiFieldMemOperand(store_base, offset), - Representation::Integer32()); - } else { - __ Store(value, FieldMemOperand(store_base, offset), representation); + + mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged, + instr->hydrogen()->elements_kind(), + representation, instr->base_offset()); } + __ Store(value, mem_op, representation); + if (instr->hydrogen()->NeedsWriteBarrier()) { - ASSERT(representation.IsTagged()); + DCHECK(representation.IsTagged()); // This assignment may cause element_addr to alias store_base. Register element_addr = scratch; SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. - __ Add(element_addr, store_base, offset - kHeapObjectTag); + __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand()); __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(), - kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed); + kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed, + instr->hydrogen()->PointersToHereCheckForValue()); } } void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->object()).Is(x2)); - ASSERT(ToRegister(instr->key()).Is(x1)); - ASSERT(ToRegister(instr->value()).Is(x0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); + DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); Handle<Code> ic = instr->strict_mode() == STRICT ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() @@ -5254,19 +5350,22 @@ Register object = ToRegister(instr->object()); HObjectAccess access = instr->hydrogen()->access(); - Handle<Map> transition = instr->transition(); int offset = access.offset(); if (access.IsExternalMemory()) { - ASSERT(transition.is_null()); - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + DCHECK(!instr->hydrogen()->has_transition()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); Register value = ToRegister(instr->value()); __ Store(value, MemOperand(object, offset), representation); return; - } else if (representation.IsDouble()) { - ASSERT(transition.is_null()); - ASSERT(access.IsInobject()); - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + } + + __ AssertNotSmi(object); + + if (representation.IsDouble()) { + DCHECK(access.IsInobject()); + DCHECK(!instr->hydrogen()->has_transition()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); FPRegister value = ToDoubleRegister(instr->value()); __ Str(value, FieldMemOperand(object, offset)); return; @@ -5274,35 +5373,24 @@ Register value = ToRegister(instr->value()); - SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - - ASSERT(!(representation.IsSmi() && - instr->value()->IsConstantOperand() && - !IsInteger32Constant(LConstantOperand::cast(instr->value())))); - if (representation.IsHeapObject() && - !instr->hydrogen()->value()->type().IsHeapObject()) { - DeoptimizeIfSmi(value, instr->environment()); - - // We know that value is a smi now, so we can omit the check below. - check_needed = OMIT_SMI_CHECK; - } - - if (!transition.is_null()) { + DCHECK(!representation.IsSmi() || + !instr->value()->IsConstantOperand() || + IsInteger32Constant(LConstantOperand::cast(instr->value()))); + + if (instr->hydrogen()->has_transition()) { + Handle<Map> transition = instr->hydrogen()->transition_map(); + AddDeprecationDependency(transition); // Store the new map value. Register new_map_value = ToRegister(instr->temp0()); __ Mov(new_map_value, Operand(transition)); __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset)); if (instr->hydrogen()->NeedsWriteBarrierForMap()) { // Update the write barrier for the map field. - __ RecordWriteField(object, - HeapObject::kMapOffset, - new_map_value, - ToRegister(instr->temp1()), - GetLinkRegisterState(), - kSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWriteForMap(object, + new_map_value, + ToRegister(instr->temp1()), + GetLinkRegisterState(), + kSaveFPRegs); } } @@ -5318,7 +5406,7 @@ if (representation.IsSmi() && instr->hydrogen()->value()->representation().IsInteger32()) { - ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); + DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); #ifdef DEBUG Register temp0 = ToRegister(instr->temp0()); __ Ldr(temp0, FieldMemOperand(destination, offset)); @@ -5326,11 +5414,12 @@ // If destination aliased temp0, restore it to the address calculated // earlier. if (destination.Is(temp0)) { - ASSERT(!access.IsInobject()); + DCHECK(!access.IsInobject()); __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); } #endif - STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); + STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); + STATIC_ASSERT(kSmiTag == 0); __ Store(value, UntagSmiFieldMemOperand(destination, offset), Representation::Integer32()); } else { @@ -5344,30 +5433,31 @@ GetLinkRegisterState(), kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + instr->hydrogen()->SmiCheckForWriteBarrier(), + instr->hydrogen()->PointersToHereCheckForValue()); } } void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->value()).is(x0)); - ASSERT(ToRegister(instr->object()).is(x1)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister())); - // Name must be in x2. - __ Mov(x2, Operand(instr->name())); + __ Mov(StoreIC::NameRegister(), Operand(instr->name())); Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); CallCode(ic, RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoStringAdd(LStringAdd* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->left()).Is(x1)); - ASSERT(ToRegister(instr->right()).Is(x0)); - StringAddStub stub(instr->hydrogen()->flags(), + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->left()).Is(x1)); + DCHECK(ToRegister(instr->right()).Is(x0)); + StringAddStub stub(isolate(), + instr->hydrogen()->flags(), instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -5403,15 +5493,14 @@ // contained in the register pointer map. __ Mov(result, 0); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); __ Push(string); // Push the index as a smi. This is safe because of the checks in // DoStringCharCodeAt above. Register index = ToRegister(instr->index()); - __ SmiTag(index); - __ Push(index); + __ SmiTagAndPush(index); - CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr, + CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, instr->context()); __ AssertSmi(x0); __ SmiUntag(x0); @@ -5433,15 +5522,15 @@ DeferredStringCharFromCode* deferred = new(zone()) DeferredStringCharFromCode(this, instr); - ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); + DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); Register char_code = ToRegister32(instr->char_code()); Register result = ToRegister(instr->result()); __ Cmp(char_code, String::kMaxOneByteCharCode); __ B(hi, deferred->entry()); __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); - __ Add(result, result, Operand(char_code, SXTW, kPointerSizeLog2)); - __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); + __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag); + __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2)); __ CompareRoot(result, Heap::kUndefinedValueRootIndex); __ B(eq, deferred->entry()); __ Bind(deferred->exit()); @@ -5457,16 +5546,15 @@ // contained in the register pointer map. __ Mov(result, 0); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - __ SmiTag(char_code); - __ Push(char_code); + PushSafepointRegistersScope scope(this); + __ SmiTagAndPush(char_code); CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); __ StoreToSafepointRegisterSlot(x0, result); } void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -5483,7 +5571,8 @@ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); Register result = ToRegister32(instr->result()); Register left = ToRegister32(instr->left()); - Operand right = ToOperand32I(instr->right()); + Operand right = ToShiftedRightOperand32I(instr->right(), instr); + if (can_overflow) { __ Subs(result, left, right); DeoptimizeIf(vs, instr->environment()); @@ -5557,7 +5646,7 @@ // A heap number: load value and convert to int32 using non-truncating // function. If the result is out of range, branch to deoptimize. __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); - __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2); + __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); DeoptimizeIf(ne, instr->environment()); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { @@ -5608,15 +5697,15 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) { - ASSERT(ToRegister(instr->value()).Is(x0)); - ASSERT(ToRegister(instr->result()).Is(x0)); + DCHECK(ToRegister(instr->value()).Is(x0)); + DCHECK(ToRegister(instr->result()).Is(x0)); __ Push(x0); CallRuntime(Runtime::kToFastProperties, 1, instr); } void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Label materialized; // Registers will be used as follows: // x7 = literals array. @@ -5635,7 +5724,7 @@ __ Mov(x11, Operand(instr->hydrogen()->pattern())); __ Mov(x10, Operand(instr->hydrogen()->flags())); __ Push(x7, x12, x11, x10); - CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr); + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); __ Mov(x1, x0); __ Bind(&materialized); @@ -5648,7 +5737,7 @@ __ Bind(&runtime_allocate); __ Mov(x0, Smi::FromInt(size)); __ Push(x1, x0); - CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr); + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); __ Pop(x1); __ Bind(&allocated); @@ -5659,7 +5748,6 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { Register object = ToRegister(instr->object()); - Register temp1 = ToRegister(instr->temp1()); Handle<Map> from_map = instr->original_map(); Handle<Map> to_map = instr->transitioned_map(); @@ -5667,26 +5755,33 @@ ElementsKind to_kind = instr->to_kind(); Label not_applicable; - __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK); if (IsSimpleMapChangeTransition(from_kind, to_kind)) { + Register temp1 = ToRegister(instr->temp1()); Register new_map = ToRegister(instr->temp2()); + __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK); __ Mov(new_map, Operand(to_map)); __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset)); // Write barrier. - __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1, - GetLinkRegisterState(), kDontSaveFPRegs); + __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(), + kDontSaveFPRegs); } else { - ASSERT(ToRegister(instr->context()).is(cp)); - PushSafepointRegistersScope scope( - this, Safepoint::kWithRegistersAndDoubles); - __ Mov(x0, object); + { + UseScratchRegisterScope temps(masm()); + // Use the temp register only in a restricted scope - the codegen checks + // that we do not use any register across a call. + __ CheckMap(object, temps.AcquireX(), from_map, ¬_applicable, + DONT_DO_SMI_CHECK); + } + DCHECK(object.is(x0)); + DCHECK(ToRegister(instr->context()).is(cp)); + PushSafepointRegistersScope scope(this); __ Mov(x1, Operand(to_map)); bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; - TransitionElementsKindStub stub(from_kind, to_kind, is_js_array); + TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); __ CallStub(&stub); - RecordSafepointWithRegistersAndDoubles( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kLazyDeopt); } __ Bind(¬_applicable); } @@ -5698,8 +5793,8 @@ Register temp2 = ToRegister(instr->temp2()); Label no_memento_found; - __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found); - Deoptimize(instr->environment()); + __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); + DeoptimizeIf(eq, instr->environment()); __ Bind(&no_memento_found); } @@ -5727,8 +5822,9 @@ Label* false_label = instr->FalseLabel(chunk_); Register value = ToRegister(instr->value()); - if (type_name->Equals(heap()->number_string())) { - ASSERT(instr->temp1() != NULL); + Factory* factory = isolate()->factory(); + if (String::Equals(type_name, factory->number_string())) { + DCHECK(instr->temp1() != NULL); Register map = ToRegister(instr->temp1()); __ JumpIfSmi(value, true_label); @@ -5736,8 +5832,8 @@ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); EmitBranch(instr, eq); - } else if (type_name->Equals(heap()->string_string())) { - ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); + } else if (String::Equals(type_name, factory->string_string())) { + DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); Register map = ToRegister(instr->temp1()); Register scratch = ToRegister(instr->temp2()); @@ -5747,8 +5843,8 @@ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable); - } else if (type_name->Equals(heap()->symbol_string())) { - ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); + } else if (String::Equals(type_name, factory->symbol_string())) { + DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); Register map = ToRegister(instr->temp1()); Register scratch = ToRegister(instr->temp2()); @@ -5756,17 +5852,13 @@ __ CompareObjectType(value, map, scratch, SYMBOL_TYPE); EmitBranch(instr, eq); - } else if (type_name->Equals(heap()->boolean_string())) { + } else if (String::Equals(type_name, factory->boolean_string())) { __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label); __ CompareRoot(value, Heap::kFalseValueRootIndex); EmitBranch(instr, eq); - } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { - __ CompareRoot(value, Heap::kNullValueRootIndex); - EmitBranch(instr, eq); - - } else if (type_name->Equals(heap()->undefined_string())) { - ASSERT(instr->temp1() != NULL); + } else if (String::Equals(type_name, factory->undefined_string())) { + DCHECK(instr->temp1() != NULL); Register scratch = ToRegister(instr->temp1()); __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label); @@ -5776,9 +5868,9 @@ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable); - } else if (type_name->Equals(heap()->function_string())) { + } else if (String::Equals(type_name, factory->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); - ASSERT(instr->temp1() != NULL); + DCHECK(instr->temp1() != NULL); Register type = ToRegister(instr->temp1()); __ JumpIfSmi(value, false_label); @@ -5786,21 +5878,19 @@ // HeapObject's type has been loaded into type register by JumpIfObjectType. EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE); - } else if (type_name->Equals(heap()->object_string())) { - ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); + } else if (String::Equals(type_name, factory->object_string())) { + DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); Register map = ToRegister(instr->temp1()); Register scratch = ToRegister(instr->temp2()); __ JumpIfSmi(value, false_label); - if (!FLAG_harmony_typeof) { - __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label); - } + __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label); __ JumpIfObjectType(value, map, scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt); __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); __ B(gt, false_label); // Check for undetectable objects => false. - __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset)); + __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable); } else { @@ -5832,7 +5922,7 @@ // If the receiver is null or undefined, we have to pass the global object as // a receiver to normal functions. Values have to be passed unchanged to // builtins and strict-mode functions. - Label global_object, done, deopt; + Label global_object, done, copy_receiver; if (!instr->hydrogen()->known_function()) { __ Ldr(result, FieldMemOperand(function, @@ -5843,10 +5933,10 @@ FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset)); // Do not transform the receiver to object for strict mode functions. - __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done); + __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, ©_receiver); // Do not transform the receiver to object for builtins. - __ Tbnz(result, SharedFunctionInfo::kNative, &done); + __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver); } // Normal function. Replace undefined or null with global receiver. @@ -5854,32 +5944,78 @@ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); // Deoptimize if the receiver is not a JS object. - __ JumpIfSmi(receiver, &deopt); + DeoptimizeIfSmi(receiver, instr->environment()); __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); - __ Mov(result, receiver); - __ B(ge, &done); - // Otherwise, fall through to deopt. - - __ Bind(&deopt); + __ B(ge, ©_receiver); Deoptimize(instr->environment()); __ Bind(&global_object); __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX)); - __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset)); + __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); + __ B(&done); + __ Bind(©_receiver); + __ Mov(result, receiver); __ Bind(&done); } +void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register result, + Register object, + Register index) { + PushSafepointRegistersScope scope(this); + __ Push(object); + __ Push(index); + __ Mov(cp, 0); + __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(x0, result); +} + + void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { + public: + DeferredLoadMutableDouble(LCodeGen* codegen, + LLoadFieldByIndex* instr, + Register result, + Register object, + Register index) + : LDeferredCode(codegen), + instr_(instr), + result_(result), + object_(object), + index_(index) { + } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LLoadFieldByIndex* instr_; + Register result_; + Register object_; + Register index_; + }; Register object = ToRegister(instr->object()); Register index = ToRegister(instr->index()); Register result = ToRegister(instr->result()); __ AssertSmi(index); + DeferredLoadMutableDouble* deferred; + deferred = new(zone()) DeferredLoadMutableDouble( + this, instr, result, object, index); + Label out_of_object, done; + + __ TestAndBranchIfAnySet( + index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry()); + __ Mov(index, Operand(index, ASR, 1)); + __ Cmp(index, Smi::FromInt(0)); __ B(lt, &out_of_object); @@ -5895,7 +6031,25 @@ __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize - kPointerSize)); + __ Bind(deferred->exit()); __ Bind(&done); } + +void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { + Register context = ToRegister(instr->context()); + __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); +} + + +void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { + Handle<ScopeInfo> scope_info = instr->scope_info(); + __ Push(scope_info); + __ Push(ToRegister(instr->function())); + CallRuntime(Runtime::kPushBlockContext, 2, instr); + RecordSafepoint(Safepoint::kNoLazyDeopt); +} + + + } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/arm64/lithium-codegen-arm64.h nodejs-0.11.15/deps/v8/src/arm64/lithium-codegen-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/lithium-codegen-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/lithium-codegen-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_ #define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_ -#include "arm64/lithium-arm64.h" +#include "src/arm64/lithium-arm64.h" -#include "arm64/lithium-gap-resolver-arm64.h" -#include "deoptimizer.h" -#include "lithium-codegen.h" -#include "safepoint-table.h" -#include "scopes.h" -#include "v8utils.h" +#include "src/arm64/lithium-gap-resolver-arm64.h" +#include "src/deoptimizer.h" +#include "src/lithium-codegen.h" +#include "src/safepoint-table.h" +#include "src/scopes.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -60,10 +37,16 @@ frame_is_built_(false), safepoints_(info->zone()), resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple) { + expected_safepoint_kind_(Safepoint::kSimple), + after_push_argument_(false), + inlined_arguments_(false) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } + ~LCodeGen() { + DCHECK(!after_push_argument_ || inlined_arguments_); + } + // Simple accessors. Scope* scope() const { return scope_; } @@ -98,6 +81,7 @@ // information on it. void FinishCode(Handle<Code> code); + enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; // Support for converting LOperands to assembler types. // LOperand must be a register. Register ToRegister(LOperand* op) const; @@ -105,9 +89,30 @@ Operand ToOperand(LOperand* op); Operand ToOperand32I(LOperand* op); Operand ToOperand32U(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; + enum StackMode { kMustUseFramePointer, kCanUseStackPointer }; + MemOperand ToMemOperand(LOperand* op, + StackMode stack_mode = kCanUseStackPointer) const; Handle<Object> ToHandle(LConstantOperand* op) const; + template<class LI> + Operand ToShiftedRightOperand32I(LOperand* right, + LI* shift_info) { + return ToShiftedRightOperand32(right, shift_info, SIGNED_INT32); + } + template<class LI> + Operand ToShiftedRightOperand32U(LOperand* right, + LI* shift_info) { + return ToShiftedRightOperand32(right, shift_info, UNSIGNED_INT32); + } + template<class LI> + Operand ToShiftedRightOperand32(LOperand* right, + LI* shift_info, + IntegerSignedness signedness); + + int JSShiftAmountFromLConstant(LOperand* constant) { + return ToInteger32(LConstantOperand::cast(constant)) & 0x1f; + } + // TODO(jbramley): Examine these helpers and check that they make sense. // IsInteger32Constant returns true for smi constants, for example. bool IsInteger32Constant(LConstantOperand* op) const; @@ -137,7 +142,6 @@ Label* exit, Label* allocation_entry); - enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; void DoDeferredNumberTagU(LInstruction* instr, LOperand* value, LOperand* temp1, @@ -149,6 +153,10 @@ void DoDeferredAllocate(LAllocate* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr); void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); + void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register result, + Register object, + Register index); Operand ToOperand32(LOperand* op, IntegerSignedness signedness); @@ -224,7 +232,7 @@ Deoptimizer::BailoutType* override_bailout_type = NULL); void Deoptimize(LEnvironment* environment, Deoptimizer::BailoutType* override_bailout_type = NULL); - void DeoptimizeIf(Condition cc, LEnvironment* environment); + void DeoptimizeIf(Condition cond, LEnvironment* environment); void DeoptimizeIfZero(Register rt, LEnvironment* environment); void DeoptimizeIfNotZero(Register rt, LEnvironment* environment); void DeoptimizeIfNegative(Register rt, LEnvironment* environment); @@ -239,7 +247,6 @@ void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment); void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment); void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment); - void ApplyCheckIf(Condition cc, LBoundsCheck* check); MemOperand PrepareKeyedExternalArrayOperand(Register key, Register base, @@ -248,20 +255,20 @@ bool key_is_constant, int constant_key, ElementsKind elements_kind, - int additional_index); - void CalcKeyedArrayBaseRegister(Register base, - Register elements, - Register key, - bool key_is_tagged, - ElementsKind elements_kind); + int base_offset); + MemOperand PrepareKeyedArrayOperand(Register base, + Register elements, + Register key, + bool key_is_tagged, + ElementsKind elements_kind, + Representation representation, + int base_offset); void RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode); int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - void Abort(BailoutReason reason); - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } // Emit frame translation commands for an environment. @@ -341,9 +348,6 @@ void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, Safepoint::DeoptMode mode); - void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode); void RecordSafepointWithLazyDeopt(LInstruction* instr, SafepointMode safepoint_mode); @@ -368,16 +372,24 @@ Safepoint::Kind expected_safepoint_kind_; + // This flag is true when we are after a push (but before a call). + // In this situation, jssp no longer references the end of the stack slots so, + // we can only reference a stack slot via fp. + bool after_push_argument_; + // If we have inlined arguments, we are no longer able to use jssp because + // jssp is modified and we never know if we are in a block after or before + // the pop of the arguments (which restores jssp). + bool inlined_arguments_; + int old_position_; class PushSafepointRegistersScope BASE_EMBEDDED { public: - PushSafepointRegistersScope(LCodeGen* codegen, - Safepoint::Kind kind) + explicit PushSafepointRegistersScope(LCodeGen* codegen) : codegen_(codegen) { - ASSERT(codegen_->info()->is_calling()); - ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->expected_safepoint_kind_ = kind; + DCHECK(codegen_->info()->is_calling()); + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); + codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; UseScratchRegisterScope temps(codegen_->masm_); // Preserve the value of lr which must be saved on the stack (the call to @@ -385,39 +397,14 @@ Register to_be_pushed_lr = temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr()); codegen_->masm_->Mov(to_be_pushed_lr, lr); - switch (codegen_->expected_safepoint_kind_) { - case Safepoint::kWithRegisters: { - StoreRegistersStateStub stub(kDontSaveFPRegs); - codegen_->masm_->CallStub(&stub); - break; - } - case Safepoint::kWithRegistersAndDoubles: { - StoreRegistersStateStub stub(kSaveFPRegs); - codegen_->masm_->CallStub(&stub); - break; - } - default: - UNREACHABLE(); - } + StoreRegistersStateStub stub(codegen_->isolate()); + codegen_->masm_->CallStub(&stub); } ~PushSafepointRegistersScope() { - Safepoint::Kind kind = codegen_->expected_safepoint_kind_; - ASSERT((kind & Safepoint::kWithRegisters) != 0); - switch (kind) { - case Safepoint::kWithRegisters: { - RestoreRegistersStateStub stub(kDontSaveFPRegs); - codegen_->masm_->CallStub(&stub); - break; - } - case Safepoint::kWithRegistersAndDoubles: { - RestoreRegistersStateStub stub(kSaveFPRegs); - codegen_->masm_->CallStub(&stub); - break; - } - default: - UNREACHABLE(); - } + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); + RestoreRegistersStateStub stub(codegen_->isolate()); + codegen_->masm_->CallStub(&stub); codegen_->expected_safepoint_kind_ = Safepoint::kSimple; } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,62 +1,39 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "arm64/lithium-gap-resolver-arm64.h" -#include "arm64/lithium-codegen-arm64.h" +#include "src/arm64/delayed-masm-arm64-inl.h" +#include "src/arm64/lithium-codegen-arm64.h" +#include "src/arm64/lithium-gap-resolver-arm64.h" namespace v8 { namespace internal { -// We use the root register to spill a value while breaking a cycle in parallel -// moves. We don't need access to roots while resolving the move list and using -// the root register has two advantages: -// - It is not in crankshaft allocatable registers list, so it can't interfere -// with any of the moves we are resolving. -// - We don't need to push it on the stack, as we can reload it with its value -// once we have resolved a cycle. -#define kSavedValue root - -// We use the MacroAssembler floating-point scratch register to break a cycle -// involving double values as the MacroAssembler will not need it for the -// operations performed by the gap resolver. -#define kSavedDoubleValue fp_scratch +#define __ ACCESS_MASM((&masm_)) -LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false), - saved_destination_(NULL), need_to_restore_root_(false) { } +void DelayedGapMasm::EndDelayedUse() { + DelayedMasm::EndDelayedUse(); + if (scratch_register_used()) { + DCHECK(ScratchRegister().Is(root)); + DCHECK(!pending()); + InitializeRootRegister(); + reset_scratch_register_used(); + } +} -#define __ ACCESS_MASM(cgen_->masm()) +LGapResolver::LGapResolver(LCodeGen* owner) + : cgen_(owner), masm_(owner, owner->masm()), moves_(32, owner->zone()), + root_index_(0), in_cycle_(false), saved_destination_(NULL) { +} + void LGapResolver::Resolve(LParallelMove* parallel_move) { - ASSERT(moves_.is_empty()); + DCHECK(moves_.is_empty()); + DCHECK(!masm_.pending()); // Build up a worklist of moves. BuildInitialMoveList(parallel_move); @@ -79,16 +56,12 @@ LMoveOperands move = moves_[i]; if (!move.IsEliminated()) { - ASSERT(move.source()->IsConstantOperand()); + DCHECK(move.source()->IsConstantOperand()); EmitMove(i); } } - if (need_to_restore_root_) { - ASSERT(kSavedValue.Is(root)); - __ InitializeRootRegister(); - need_to_restore_root_ = false; - } + __ EndDelayedUse(); moves_.Rewind(0); } @@ -115,13 +88,13 @@ // cycles in the move graph. LMoveOperands& current_move = moves_[index]; - ASSERT(!current_move.IsPending()); - ASSERT(!current_move.IsRedundant()); + DCHECK(!current_move.IsPending()); + DCHECK(!current_move.IsRedundant()); // Clear this move's destination to indicate a pending move. The actual // destination is saved in a stack allocated local. Multiple moves can // be pending because this function is recursive. - ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated. + DCHECK(current_move.source() != NULL); // Otherwise it will look eliminated. LOperand* destination = current_move.destination(); current_move.set_destination(NULL); @@ -148,7 +121,7 @@ // a scratch register to break it. LMoveOperands other_move = moves_[root_index_]; if (other_move.Blocks(destination)) { - ASSERT(other_move.IsPending()); + DCHECK(other_move.IsPending()); BreakCycle(index); return; } @@ -159,12 +132,12 @@ void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_ASSERTS +#ifdef ENABLE_SLOW_DCHECKS // No operand should be the destination for more than one move. for (int i = 0; i < moves_.length(); ++i) { LOperand* destination = moves_[i].destination(); for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_ASSERT(!destination->Equals(moves_[j].destination())); + SLOW_DCHECK(!destination->Equals(moves_[j].destination())); } } #endif @@ -172,13 +145,8 @@ void LGapResolver::BreakCycle(int index) { - ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); - ASSERT(!in_cycle_); - - // We use registers which are not allocatable by crankshaft to break the cycle - // to be sure they don't interfere with the moves we are resolving. - ASSERT(!kSavedValue.IsAllocatable()); - ASSERT(!kSavedDoubleValue.IsAllocatable()); + DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); + DCHECK(!in_cycle_); // We save in a register the source of that move and we remember its // destination. Then we mark this move as resolved so the cycle is @@ -188,19 +156,15 @@ saved_destination_ = moves_[index].destination(); if (source->IsRegister()) { - need_to_restore_root_ = true; - __ Mov(kSavedValue, cgen_->ToRegister(source)); + AcquireSavedValueRegister(); + __ Mov(SavedValueRegister(), cgen_->ToRegister(source)); } else if (source->IsStackSlot()) { - need_to_restore_root_ = true; - __ Ldr(kSavedValue, cgen_->ToMemOperand(source)); + AcquireSavedValueRegister(); + __ Load(SavedValueRegister(), cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { - ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue)); - cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue); - __ Fmov(kSavedDoubleValue, cgen_->ToDoubleRegister(source)); + __ Fmov(SavedFPValueRegister(), cgen_->ToDoubleRegister(source)); } else if (source->IsDoubleStackSlot()) { - ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue)); - cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue); - __ Ldr(kSavedDoubleValue, cgen_->ToMemOperand(source)); + __ Load(SavedFPValueRegister(), cgen_->ToMemOperand(source)); } else { UNREACHABLE(); } @@ -213,19 +177,20 @@ void LGapResolver::RestoreValue() { - ASSERT(in_cycle_); - ASSERT(saved_destination_ != NULL); + DCHECK(in_cycle_); + DCHECK(saved_destination_ != NULL); if (saved_destination_->IsRegister()) { - __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue); + __ Mov(cgen_->ToRegister(saved_destination_), SavedValueRegister()); + ReleaseSavedValueRegister(); } else if (saved_destination_->IsStackSlot()) { - __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_)); + __ Store(SavedValueRegister(), cgen_->ToMemOperand(saved_destination_)); + ReleaseSavedValueRegister(); } else if (saved_destination_->IsDoubleRegister()) { - __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedDoubleValue); - cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue); + __ Fmov(cgen_->ToDoubleRegister(saved_destination_), + SavedFPValueRegister()); } else if (saved_destination_->IsDoubleStackSlot()) { - __ Str(kSavedDoubleValue, cgen_->ToMemOperand(saved_destination_)); - cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue); + __ Store(SavedFPValueRegister(), cgen_->ToMemOperand(saved_destination_)); } else { UNREACHABLE(); } @@ -247,16 +212,16 @@ if (destination->IsRegister()) { __ Mov(cgen_->ToRegister(destination), source_register); } else { - ASSERT(destination->IsStackSlot()); - __ Str(source_register, cgen_->ToMemOperand(destination)); + DCHECK(destination->IsStackSlot()); + __ Store(source_register, cgen_->ToMemOperand(destination)); } } else if (source->IsStackSlot()) { MemOperand source_operand = cgen_->ToMemOperand(source); if (destination->IsRegister()) { - __ Ldr(cgen_->ToRegister(destination), source_operand); + __ Load(cgen_->ToRegister(destination), source_operand); } else { - ASSERT(destination->IsStackSlot()); + DCHECK(destination->IsStackSlot()); EmitStackSlotMove(index); } @@ -275,17 +240,30 @@ DoubleRegister result = cgen_->ToDoubleRegister(destination); __ Fmov(result, cgen_->ToDouble(constant_source)); } else { - ASSERT(destination->IsStackSlot()); - ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. - need_to_restore_root_ = true; + DCHECK(destination->IsStackSlot()); + DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. if (cgen_->IsSmi(constant_source)) { - __ Mov(kSavedValue, cgen_->ToSmi(constant_source)); + Smi* smi = cgen_->ToSmi(constant_source); + __ StoreConstant(reinterpret_cast<intptr_t>(smi), + cgen_->ToMemOperand(destination)); } else if (cgen_->IsInteger32Constant(constant_source)) { - __ Mov(kSavedValue, cgen_->ToInteger32(constant_source)); + __ StoreConstant(cgen_->ToInteger32(constant_source), + cgen_->ToMemOperand(destination)); } else { - __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source)); + Handle<Object> handle = cgen_->ToHandle(constant_source); + AllowDeferredHandleDereference smi_object_check; + if (handle->IsSmi()) { + Object* obj = *handle; + DCHECK(!obj->IsHeapObject()); + __ StoreConstant(reinterpret_cast<intptr_t>(obj), + cgen_->ToMemOperand(destination)); + } else { + AcquireSavedValueRegister(); + __ LoadObject(SavedValueRegister(), handle); + __ Store(SavedValueRegister(), cgen_->ToMemOperand(destination)); + ReleaseSavedValueRegister(); + } } - __ Str(kSavedValue, cgen_->ToMemOperand(destination)); } } else if (source->IsDoubleRegister()) { @@ -293,16 +271,16 @@ if (destination->IsDoubleRegister()) { __ Fmov(cgen_->ToDoubleRegister(destination), src); } else { - ASSERT(destination->IsDoubleStackSlot()); - __ Str(src, cgen_->ToMemOperand(destination)); + DCHECK(destination->IsDoubleStackSlot()); + __ Store(src, cgen_->ToMemOperand(destination)); } } else if (source->IsDoubleStackSlot()) { MemOperand src = cgen_->ToMemOperand(source); if (destination->IsDoubleRegister()) { - __ Ldr(cgen_->ToDoubleRegister(destination), src); + __ Load(cgen_->ToDoubleRegister(destination), src); } else { - ASSERT(destination->IsDoubleStackSlot()); + DCHECK(destination->IsDoubleStackSlot()); EmitStackSlotMove(index); } @@ -314,21 +292,4 @@ moves_[index].Eliminate(); } - -void LGapResolver::EmitStackSlotMove(int index) { - // We need a temp register to perform a stack slot to stack slot move, and - // the register must not be involved in breaking cycles. - - // Use the Crankshaft double scratch register as the temporary. - DoubleRegister temp = crankshaft_fp_scratch; - - LOperand* src = moves_[index].source(); - LOperand* dst = moves_[index].destination(); - - ASSERT(src->IsStackSlot()); - ASSERT(dst->IsStackSlot()); - __ Ldr(temp, cgen_->ToMemOperand(src)); - __ Str(temp, cgen_->ToMemOperand(dst)); -} - } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/arm64/lithium-gap-resolver-arm64.h nodejs-0.11.15/deps/v8/src/arm64/lithium-gap-resolver-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/lithium-gap-resolver-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/lithium-gap-resolver-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_ #define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_ -#include "v8.h" +#include "src/v8.h" -#include "lithium.h" +#include "src/arm64/delayed-masm-arm64.h" +#include "src/lithium.h" namespace v8 { namespace internal { @@ -38,6 +16,21 @@ class LCodeGen; class LGapResolver; +class DelayedGapMasm : public DelayedMasm { + public: + DelayedGapMasm(LCodeGen* owner, MacroAssembler* masm) + : DelayedMasm(owner, masm, root) { + // We use the root register as an extra scratch register. + // The root register has two advantages: + // - It is not in crankshaft allocatable registers list, so it can't + // interfere with the allocatable registers. + // - We don't need to push it on the stack, as we can reload it with its + // value once we have finish. + } + void EndDelayedUse(); +}; + + class LGapResolver BASE_EMBEDDED { public: explicit LGapResolver(LCodeGen* owner); @@ -66,12 +59,32 @@ void EmitMove(int index); // Emit a move from one stack slot to another. - void EmitStackSlotMove(int index); + void EmitStackSlotMove(int index) { + masm_.StackSlotMove(moves_[index].source(), moves_[index].destination()); + } // Verify the move list before performing moves. void Verify(); + // Registers used to solve cycles. + const Register& SavedValueRegister() { + DCHECK(!masm_.ScratchRegister().IsAllocatable()); + return masm_.ScratchRegister(); + } + // The scratch register is used to break cycles and to store constant. + // These two methods switch from one mode to the other. + void AcquireSavedValueRegister() { masm_.AcquireScratchRegister(); } + void ReleaseSavedValueRegister() { masm_.ReleaseScratchRegister(); } + const FPRegister& SavedFPValueRegister() { + // We use the Crankshaft floating-point scratch register to break a cycle + // involving double values as the MacroAssembler will not need it for the + // operations performed by the gap resolver. + DCHECK(!crankshaft_fp_scratch.IsAllocatable()); + return crankshaft_fp_scratch; + } + LCodeGen* cgen_; + DelayedGapMasm masm_; // List of moves not yet resolved. ZoneList<LMoveOperands> moves_; @@ -79,10 +92,6 @@ int root_index_; bool in_cycle_; LOperand* saved_destination_; - - // We use the root register as a scratch in a few places. When that happens, - // this flag is set to indicate that it needs to be restored. - bool need_to_restore_root_; }; } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/arm64/macro-assembler-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/macro-assembler-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/macro-assembler-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/macro-assembler-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "bootstrapper.h" -#include "codegen.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "isolate-inl.h" -#include "runtime.h" +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/isolate-inl.h" +#include "src/runtime.h" namespace v8 { namespace internal { @@ -53,7 +30,9 @@ #endif has_frame_(false), use_real_aborts_(true), - sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) { + sp_(jssp), + tmp_list_(DefaultTmpList()), + fptmp_list_(DefaultFPTmpList()) { if (isolate() != NULL) { code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), isolate()); @@ -61,31 +40,47 @@ } +CPURegList MacroAssembler::DefaultTmpList() { + return CPURegList(ip0, ip1); +} + + +CPURegList MacroAssembler::DefaultFPTmpList() { + return CPURegList(fp_scratch1, fp_scratch2); +} + + void MacroAssembler::LogicalMacro(const Register& rd, const Register& rn, const Operand& operand, LogicalOp op) { UseScratchRegisterScope temps(this); - if (operand.NeedsRelocation()) { + if (operand.NeedsRelocation(this)) { Register temp = temps.AcquireX(); - LoadRelocated(temp, operand); + Ldr(temp, operand.immediate()); Logical(rd, rn, temp, op); } else if (operand.IsImmediate()) { - int64_t immediate = operand.immediate(); + int64_t immediate = operand.ImmediateValue(); unsigned reg_size = rd.SizeInBits(); - ASSERT(rd.Is64Bits() || is_uint32(immediate)); // If the operation is NOT, invert the operation and immediate. if ((op & NOT) == NOT) { op = static_cast<LogicalOp>(op & ~NOT); immediate = ~immediate; - if (rd.Is32Bits()) { - immediate &= kWRegMask; - } } + // Ignore the top 32 bits of an immediate if we're moving to a W register. + if (rd.Is32Bits()) { + // Check that the top 32 bits are consistent. + DCHECK(((immediate >> kWRegSizeInBits) == 0) || + ((immediate >> kWRegSizeInBits) == -1)); + immediate &= kWRegMask; + } + + DCHECK(rd.Is64Bits() || is_uint32(immediate)); + // Special cases for all set or all clear immediates. if (immediate == 0) { switch (op) { @@ -129,23 +124,24 @@ } else { // Immediate can't be encoded: synthesize using move immediate. Register temp = temps.AcquireSameSizeAs(rn); - Mov(temp, immediate); + Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate); if (rd.Is(csp)) { // If rd is the stack pointer we cannot use it as the destination // register so we use the temp register as an intermediate again. - Logical(temp, rn, temp, op); + Logical(temp, rn, imm_operand, op); Mov(csp, temp); + AssertStackConsistency(); } else { - Logical(rd, rn, temp, op); + Logical(rd, rn, imm_operand, op); } } } else if (operand.IsExtendedRegister()) { - ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); + DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits()); // Add/sub extended supports shift <= 4. We want to support exactly the // same modes here. - ASSERT(operand.shift_amount() <= 4); - ASSERT(operand.reg().Is64Bits() || + DCHECK(operand.shift_amount() <= 4); + DCHECK(operand.reg().Is64Bits() || ((operand.extend() != UXTX) && (operand.extend() != SXTX))); Register temp = temps.AcquireSameSizeAs(rn); EmitExtendShift(temp, operand.reg(), operand.extend(), @@ -154,16 +150,16 @@ } else { // The operand can be encoded in the instruction. - ASSERT(operand.IsShiftedRegister()); + DCHECK(operand.IsShiftedRegister()); Logical(rd, rn, operand, op); } } void MacroAssembler::Mov(const Register& rd, uint64_t imm) { - ASSERT(allow_macro_instructions_); - ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); + DCHECK(!rd.IsZero()); // TODO(all) extend to support more immediates. // @@ -182,20 +178,11 @@ // applying move-keep operations to move-zero and move-inverted initial // values. - unsigned reg_size = rd.SizeInBits(); - unsigned n, imm_s, imm_r; - if (IsImmMovz(imm, reg_size) && !rd.IsSP()) { - // Immediate can be represented in a move zero instruction. Movz can't - // write to the stack pointer. - movz(rd, imm); - } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) { - // Immediate can be represented in a move inverted instruction. Movn can't - // write to the stack pointer. - movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask)); - } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { - // Immediate can be represented in a logical orr instruction. - LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR); - } else { + // Try to move the immediate in one instruction, and if that fails, switch to + // using multiple instructions. + if (!TryOneInstrMoveImmediate(rd, imm)) { + unsigned reg_size = rd.SizeInBits(); + // Generic immediate case. Imm will be represented by // [imm3, imm2, imm1, imm0], where each imm is 16 bits. // A move-zero or move-inverted is generated for the first non-zero or @@ -218,7 +205,7 @@ // Iterate through the halfwords. Use movn/movz for the first non-ignored // halfword, and movk for subsequent halfwords. - ASSERT((reg_size % 16) == 0); + DCHECK((reg_size % 16) == 0); bool first_mov_done = false; for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; @@ -236,12 +223,13 @@ } } } - ASSERT(first_mov_done); + DCHECK(first_mov_done); // Move the temporary if the original destination register was the stack // pointer. if (rd.IsSP()) { mov(rd, temp); + AssertStackConsistency(); } } } @@ -250,20 +238,20 @@ void MacroAssembler::Mov(const Register& rd, const Operand& operand, DiscardMoveMode discard_mode) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); // Provide a swap register for instructions that need to write into the // system stack pointer (and can't do this inherently). UseScratchRegisterScope temps(this); Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; - if (operand.NeedsRelocation()) { - LoadRelocated(dst, operand); + if (operand.NeedsRelocation(this)) { + Ldr(dst, operand.immediate()); } else if (operand.IsImmediate()) { // Call the macro assembler for generic immediates. - Mov(dst, operand.immediate()); + Mov(dst, operand.ImmediateValue()); } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { // Emit a shift instruction if moving a shifted register. This operation @@ -297,22 +285,22 @@ // Copy the result to the system stack pointer. if (!dst.Is(rd)) { - ASSERT(rd.IsSP()); + DCHECK(rd.IsSP()); Assembler::mov(rd, dst); } } void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); - if (operand.NeedsRelocation()) { - LoadRelocated(rd, operand); + if (operand.NeedsRelocation(this)) { + Ldr(rd, operand.immediate()); mvn(rd, rd); } else if (operand.IsImmediate()) { // Call the macro assembler for generic immediates. - Mov(rd, ~operand.immediate()); + Mov(rd, ~operand.ImmediateValue()); } else if (operand.IsExtendedRegister()) { // Emit two instructions for the extend case. This differs from Mov, as @@ -328,7 +316,7 @@ unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { - ASSERT((reg_size % 8) == 0); + DCHECK((reg_size % 8) == 0); int count = 0; for (unsigned i = 0; i < (reg_size / 16); i++) { if ((imm & 0xffff) == 0) { @@ -343,7 +331,7 @@ // The movz instruction can generate immediates containing an arbitrary 16-bit // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { - ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); + DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); } @@ -360,15 +348,16 @@ StatusFlags nzcv, Condition cond, ConditionalCompareOp op) { - ASSERT((cond != al) && (cond != nv)); - if (operand.NeedsRelocation()) { + DCHECK((cond != al) && (cond != nv)); + if (operand.NeedsRelocation(this)) { UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); - LoadRelocated(temp, operand); + Ldr(temp, operand.immediate()); ConditionalCompareMacro(rn, temp, nzcv, cond, op); } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || - (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { + (operand.IsImmediate() && + IsImmConditionalCompare(operand.ImmediateValue()))) { // The immediate can be encoded in the instruction, or the operand is an // unshifted register: call the assembler. ConditionalCompare(rn, operand, nzcv, cond, op); @@ -388,13 +377,13 @@ const Register& rn, const Operand& operand, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); + DCHECK((cond != al) && (cond != nv)); if (operand.IsImmediate()) { // Immediate argument. Handle special cases of 0, 1 and -1 using zero // register. - int64_t imm = operand.immediate(); + int64_t imm = operand.ImmediateValue(); Register zr = AppropriateZeroRegFor(rn); if (imm == 0) { csel(rd, rn, zr, cond); @@ -405,7 +394,7 @@ } else { UseScratchRegisterScope temps(this); Register temp = temps.AcquireSameSizeAs(rn); - Mov(temp, operand.immediate()); + Mov(temp, imm); csel(rd, rn, temp, cond); } } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { @@ -421,29 +410,96 @@ } +bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst, + int64_t imm) { + unsigned n, imm_s, imm_r; + int reg_size = dst.SizeInBits(); + if (IsImmMovz(imm, reg_size) && !dst.IsSP()) { + // Immediate can be represented in a move zero instruction. Movz can't write + // to the stack pointer. + movz(dst, imm); + return true; + } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) { + // Immediate can be represented in a move not instruction. Movn can't write + // to the stack pointer. + movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask)); + return true; + } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { + // Immediate can be represented in a logical orr instruction. + LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR); + return true; + } + return false; +} + + +Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, + int64_t imm) { + int reg_size = dst.SizeInBits(); + + // Encode the immediate in a single move instruction, if possible. + if (TryOneInstrMoveImmediate(dst, imm)) { + // The move was successful; nothing to do here. + } else { + // Pre-shift the immediate to the least-significant bits of the register. + int shift_low = CountTrailingZeros(imm, reg_size); + int64_t imm_low = imm >> shift_low; + + // Pre-shift the immediate to the most-significant bits of the register. We + // insert set bits in the least-significant bits, as this creates a + // different immediate that may be encodable using movn or orr-immediate. + // If this new immediate is encodable, the set bits will be eliminated by + // the post shift on the following instruction. + int shift_high = CountLeadingZeros(imm, reg_size); + int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1); + + if (TryOneInstrMoveImmediate(dst, imm_low)) { + // The new immediate has been moved into the destination's low bits: + // return a new leftward-shifting operand. + return Operand(dst, LSL, shift_low); + } else if (TryOneInstrMoveImmediate(dst, imm_high)) { + // The new immediate has been moved into the destination's high bits: + // return a new rightward-shifting operand. + return Operand(dst, LSR, shift_high); + } else { + // Use the generic move operation to set up the immediate. + Mov(dst, imm); + } + } + return Operand(dst); +} + + void MacroAssembler::AddSubMacro(const Register& rd, const Register& rn, const Operand& operand, FlagsUpdate S, AddSubOp op) { if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && - !operand.NeedsRelocation() && (S == LeaveFlags)) { + !operand.NeedsRelocation(this) && (S == LeaveFlags)) { // The instruction would be a nop. Avoid generating useless code. return; } - if (operand.NeedsRelocation()) { + if (operand.NeedsRelocation(this)) { UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); - LoadRelocated(temp, operand); + Ldr(temp, operand.immediate()); AddSubMacro(rd, rn, temp, S, op); - } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || - (rn.IsZero() && !operand.IsShiftedRegister()) || + } else if ((operand.IsImmediate() && + !IsImmAddSub(operand.ImmediateValue())) || + (rn.IsZero() && !operand.IsShiftedRegister()) || (operand.IsShiftedRegister() && (operand.shift() == ROR))) { UseScratchRegisterScope temps(this); Register temp = temps.AcquireSameSizeAs(rn); - Mov(temp, operand); - AddSub(rd, rn, temp, S, op); + if (operand.IsImmediate()) { + Operand imm_operand = + MoveImmediateForShiftedOp(temp, operand.ImmediateValue()); + AddSub(rd, rn, imm_operand, S, op); + } else { + Mov(temp, operand); + AddSub(rd, rn, temp, S, op); + } } else { AddSub(rd, rn, operand, S, op); } @@ -455,12 +511,12 @@ const Operand& operand, FlagsUpdate S, AddSubWithCarryOp op) { - ASSERT(rd.SizeInBits() == rn.SizeInBits()); + DCHECK(rd.SizeInBits() == rn.SizeInBits()); UseScratchRegisterScope temps(this); - if (operand.NeedsRelocation()) { + if (operand.NeedsRelocation(this)) { Register temp = temps.AcquireX(); - LoadRelocated(temp, operand); + Ldr(temp, operand.immediate()); AddSubWithCarryMacro(rd, rn, temp, S, op); } else if (operand.IsImmediate() || @@ -472,9 +528,9 @@ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { // Add/sub with carry (shifted register). - ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); - ASSERT(operand.shift() != ROR); - ASSERT(is_uintn(operand.shift_amount(), + DCHECK(operand.reg().SizeInBits() == rd.SizeInBits()); + DCHECK(operand.shift() != ROR); + DCHECK(is_uintn(operand.shift_amount(), rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2 : kWRegSizeInBitsLog2)); Register temp = temps.AcquireSameSizeAs(rn); @@ -483,11 +539,11 @@ } else if (operand.IsExtendedRegister()) { // Add/sub with carry (extended register). - ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); + DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits()); // Add/sub extended supports a shift <= 4. We want to support exactly the // same modes. - ASSERT(operand.shift_amount() <= 4); - ASSERT(operand.reg().Is64Bits() || + DCHECK(operand.shift_amount() <= 4); + DCHECK(operand.reg().Is64Bits() || ((operand.extend() != UXTX) && (operand.extend() != SXTX))); Register temp = temps.AcquireSameSizeAs(rn); EmitExtendShift(temp, operand.reg(), operand.extend(), @@ -532,11 +588,44 @@ } } +void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op) { + // TODO(all): Should we support register offset for load-store-pair? + DCHECK(!addr.IsRegisterOffset()); + + int64_t offset = addr.offset(); + LSDataSize size = CalcLSPairDataSize(op); + + // Check if the offset fits in the immediate field of the appropriate + // instruction. If not, emit two instructions to perform the operation. + if (IsImmLSPair(offset, size)) { + // Encodable in one load/store pair instruction. + LoadStorePair(rt, rt2, addr, op); + } else { + Register base = addr.base(); + if (addr.IsImmediateOffset()) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(base); + Add(temp, base, offset); + LoadStorePair(rt, rt2, MemOperand(temp), op); + } else if (addr.IsPostIndex()) { + LoadStorePair(rt, rt2, MemOperand(base), op); + Add(base, base, offset); + } else { + DCHECK(addr.IsPreIndex()); + Add(base, base, offset); + LoadStorePair(rt, rt2, MemOperand(base), op); + } + } +} + void MacroAssembler::Load(const Register& rt, const MemOperand& addr, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8()) { Ldrsb(rt, addr); @@ -549,7 +638,7 @@ } else if (r.IsInteger32()) { Ldr(rt.W(), addr); } else { - ASSERT(rt.Is64Bits()); + DCHECK(rt.Is64Bits()); Ldr(rt, addr); } } @@ -558,7 +647,7 @@ void MacroAssembler::Store(const Register& rt, const MemOperand& addr, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8() || r.IsUInteger8()) { Strb(rt, addr); @@ -567,7 +656,12 @@ } else if (r.IsInteger32()) { Str(rt.W(), addr); } else { - ASSERT(rt.Is64Bits()); + DCHECK(rt.Is64Bits()); + if (r.IsHeapObject()) { + AssertNotSmi(rt); + } else if (r.IsSmi()) { + AssertSmi(rt); + } Str(rt, addr); } } @@ -599,8 +693,43 @@ } +void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); + + if (hint == kAdrNear) { + adr(rd, label); + return; + } + + DCHECK(hint == kAdrFar); + if (label->is_bound()) { + int label_offset = label->pos() - pc_offset(); + if (Instruction::IsValidPCRelOffset(label_offset)) { + adr(rd, label); + } else { + DCHECK(label_offset <= 0); + int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1)); + adr(rd, min_adr_offset); + Add(rd, rd, label_offset - min_adr_offset); + } + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.AcquireX(); + + InstructionAccurateScope scope( + this, PatchingAssembler::kAdrFarPatchableNInstrs); + adr(rd, label); + for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) { + nop(ADR_FAR_NOP); + } + movz(scratch, 0); + } +} + + void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { - ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) && + DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) && (bit == -1 || type >= kBranchTypeFirstUsingBit)); if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { B(static_cast<Condition>(type), label); @@ -620,15 +749,15 @@ void MacroAssembler::B(Label* label, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK((cond != al) && (cond != nv)); Label done; bool need_extra_instructions = NeedExtraInstructionsOrRegisterBranch(label, CondBranchType); if (need_extra_instructions) { - b(&done, InvertCondition(cond)); + b(&done, NegateCondition(cond)); B(label); } else { b(label, cond); @@ -638,7 +767,7 @@ void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); Label done; bool need_extra_instructions = @@ -655,7 +784,7 @@ void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); Label done; bool need_extra_instructions = @@ -672,7 +801,7 @@ void MacroAssembler::Cbnz(const Register& rt, Label* label) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); Label done; bool need_extra_instructions = @@ -689,7 +818,7 @@ void MacroAssembler::Cbz(const Register& rt, Label* label) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); Label done; bool need_extra_instructions = @@ -711,8 +840,8 @@ void MacroAssembler::Abs(const Register& rd, const Register& rm, Label* is_not_representable, Label* is_representable) { - ASSERT(allow_macro_instructions_); - ASSERT(AreSameSizeAndType(rd, rm)); + DCHECK(allow_macro_instructions_); + DCHECK(AreSameSizeAndType(rd, rm)); Cmp(rm, 1); Cneg(rd, rm, lt); @@ -736,12 +865,12 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, const CPURegister& src2, const CPURegister& src3) { - ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); + DCHECK(AreSameSizeAndType(src0, src1, src2, src3)); int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); int size = src0.SizeInBytes(); - PrepareForPush(count, size); + PushPreamble(count, size); PushHelper(count, size, src0, src1, src2, src3); } @@ -750,12 +879,12 @@ const CPURegister& src2, const CPURegister& src3, const CPURegister& src4, const CPURegister& src5, const CPURegister& src6, const CPURegister& src7) { - ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7)); + DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7)); int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid(); int size = src0.SizeInBytes(); - PrepareForPush(count, size); + PushPreamble(count, size); PushHelper(4, size, src0, src1, src2, src3); PushHelper(count - 4, size, src4, src5, src6, src7); } @@ -765,29 +894,36 @@ const CPURegister& dst2, const CPURegister& dst3) { // It is not valid to pop into the same register more than once in one // instruction, not even into the zero register. - ASSERT(!AreAliased(dst0, dst1, dst2, dst3)); - ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); - ASSERT(dst0.IsValid()); + DCHECK(!AreAliased(dst0, dst1, dst2, dst3)); + DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3)); + DCHECK(dst0.IsValid()); int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); int size = dst0.SizeInBytes(); - PrepareForPop(count, size); PopHelper(count, size, dst0, dst1, dst2, dst3); + PopPostamble(count, size); +} - if (!csp.Is(StackPointer()) && emit_debug_code()) { - // It is safe to leave csp where it is when unwinding the JavaScript stack, - // but if we keep it matching StackPointer, the simulator can detect memory - // accesses in the now-free part of the stack. - Mov(csp, StackPointer()); - } + +void MacroAssembler::Push(const Register& src0, const FPRegister& src1) { + int size = src0.SizeInBytes() + src1.SizeInBytes(); + + PushPreamble(size); + // Reserve room for src0 and push src1. + str(src1, MemOperand(StackPointer(), -size, PreIndex)); + // Fill the gap with src0. + str(src0, MemOperand(StackPointer(), src1.SizeInBytes())); } -void MacroAssembler::PushPopQueue::PushQueued() { +void MacroAssembler::PushPopQueue::PushQueued( + PreambleDirective preamble_directive) { if (queued_.empty()) return; - masm_->PrepareForPush(size_); + if (preamble_directive == WITH_PREAMBLE) { + masm_->PushPreamble(size_); + } int count = queued_.size(); int index = 0; @@ -812,8 +948,6 @@ void MacroAssembler::PushPopQueue::PopQueued() { if (queued_.empty()) return; - masm_->PrepareForPop(size_); - int count = queued_.size(); int index = 0; while (index < count) { @@ -830,6 +964,7 @@ batch[0], batch[1], batch[2], batch[3]); } + masm_->PopPostamble(size_); queued_.clear(); } @@ -837,7 +972,7 @@ void MacroAssembler::PushCPURegList(CPURegList registers) { int size = registers.RegisterSizeInBytes(); - PrepareForPush(registers.Count(), size); + PushPreamble(registers.Count(), size); // Push up to four registers at a time because if the current stack pointer is // csp and reg_size is 32, registers must be pushed in blocks of four in order // to maintain the 16-byte alignment for csp. @@ -856,7 +991,6 @@ void MacroAssembler::PopCPURegList(CPURegList registers) { int size = registers.RegisterSizeInBytes(); - PrepareForPop(registers.Count(), size); // Pop up to four registers at a time because if the current stack pointer is // csp and reg_size is 32, registers must be pushed in blocks of four in // order to maintain the 16-byte alignment for csp. @@ -869,20 +1003,14 @@ int count = count_before - registers.Count(); PopHelper(count, size, dst0, dst1, dst2, dst3); } - - if (!csp.Is(StackPointer()) && emit_debug_code()) { - // It is safe to leave csp where it is when unwinding the JavaScript stack, - // but if we keep it matching StackPointer, the simulator can detect memory - // accesses in the now-free part of the stack. - Mov(csp, StackPointer()); - } + PopPostamble(registers.Count(), size); } void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { int size = src.SizeInBytes(); - PrepareForPush(count, size); + PushPreamble(count, size); if (FLAG_optimize_for_size && count > 8) { UseScratchRegisterScope temps(this); @@ -913,12 +1041,12 @@ PushHelper(1, size, src, NoReg, NoReg, NoReg); count -= 1; } - ASSERT(count == 0); + DCHECK(count == 0); } void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { - PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); + PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); UseScratchRegisterScope temps(this); Register temp = temps.AcquireSameSizeAs(count); @@ -971,22 +1099,22 @@ // Ensure that we don't unintentially modify scratch or debug registers. InstructionAccurateScope scope(this); - ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); - ASSERT(size == src0.SizeInBytes()); + DCHECK(AreSameSizeAndType(src0, src1, src2, src3)); + DCHECK(size == src0.SizeInBytes()); // When pushing multiple registers, the store order is chosen such that // Push(a, b) is equivalent to Push(a) followed by Push(b). switch (count) { case 1: - ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone()); + DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone()); str(src0, MemOperand(StackPointer(), -1 * size, PreIndex)); break; case 2: - ASSERT(src2.IsNone() && src3.IsNone()); + DCHECK(src2.IsNone() && src3.IsNone()); stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex)); break; case 3: - ASSERT(src3.IsNone()); + DCHECK(src3.IsNone()); stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex)); str(src0, MemOperand(StackPointer(), 2 * size)); break; @@ -1011,22 +1139,22 @@ // Ensure that we don't unintentially modify scratch or debug registers. InstructionAccurateScope scope(this); - ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); - ASSERT(size == dst0.SizeInBytes()); + DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3)); + DCHECK(size == dst0.SizeInBytes()); // When popping multiple registers, the load order is chosen such that // Pop(a, b) is equivalent to Pop(a) followed by Pop(b). switch (count) { case 1: - ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone()); + DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone()); ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex)); break; case 2: - ASSERT(dst2.IsNone() && dst3.IsNone()); + DCHECK(dst2.IsNone() && dst3.IsNone()); ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex)); break; case 3: - ASSERT(dst3.IsNone()); + DCHECK(dst3.IsNone()); ldr(dst2, MemOperand(StackPointer(), 2 * size)); ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex)); break; @@ -1044,15 +1172,13 @@ } -void MacroAssembler::PrepareForPush(Operand total_size) { - // TODO(jbramley): This assertion generates too much code in some debug tests. - // AssertStackConsistency(); +void MacroAssembler::PushPreamble(Operand total_size) { if (csp.Is(StackPointer())) { // If the current stack pointer is csp, then it must be aligned to 16 bytes // on entry and the total size of the specified registers must also be a // multiple of 16 bytes. if (total_size.IsImmediate()) { - ASSERT((total_size.immediate() % 16) == 0); + DCHECK((total_size.ImmediateValue() % 16) == 0); } // Don't check access size for non-immediate sizes. It's difficult to do @@ -1066,25 +1192,29 @@ } -void MacroAssembler::PrepareForPop(Operand total_size) { - AssertStackConsistency(); +void MacroAssembler::PopPostamble(Operand total_size) { if (csp.Is(StackPointer())) { // If the current stack pointer is csp, then it must be aligned to 16 bytes // on entry and the total size of the specified registers must also be a // multiple of 16 bytes. if (total_size.IsImmediate()) { - ASSERT((total_size.immediate() % 16) == 0); + DCHECK((total_size.ImmediateValue() % 16) == 0); } // Don't check access size for non-immediate sizes. It's difficult to do // well, and it will be caught by hardware (or the simulator) anyway. + } else if (emit_debug_code()) { + // It is safe to leave csp where it is when unwinding the JavaScript stack, + // but if we keep it matching StackPointer, the simulator can detect memory + // accesses in the now-free part of the stack. + SyncSystemStackPointer(); } } void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { if (offset.IsImmediate()) { - ASSERT(offset.immediate() >= 0); + DCHECK(offset.ImmediateValue() >= 0); } else if (emit_debug_code()) { Cmp(xzr, offset); Check(le, kStackAccessBelowStackPointer); @@ -1096,7 +1226,7 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) { if (offset.IsImmediate()) { - ASSERT(offset.immediate() >= 0); + DCHECK(offset.ImmediateValue() >= 0); } else if (emit_debug_code()) { Cmp(xzr, offset); Check(le, kStackAccessBelowStackPointer); @@ -1109,8 +1239,8 @@ void MacroAssembler::PokePair(const CPURegister& src1, const CPURegister& src2, int offset) { - ASSERT(AreSameSizeAndType(src1, src2)); - ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0)); + DCHECK(AreSameSizeAndType(src1, src2)); + DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0)); Stp(src1, src2, MemOperand(StackPointer(), offset)); } @@ -1118,8 +1248,8 @@ void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset) { - ASSERT(AreSameSizeAndType(dst1, dst2)); - ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0)); + DCHECK(AreSameSizeAndType(dst1, dst2)); + DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0)); Ldp(dst1, dst2, MemOperand(StackPointer(), offset)); } @@ -1130,7 +1260,7 @@ // This method must not be called unless the current stack pointer is the // system stack pointer (csp). - ASSERT(csp.Is(StackPointer())); + DCHECK(csp.Is(StackPointer())); MemOperand tos(csp, -2 * kXRegSize, PreIndex); @@ -1154,7 +1284,7 @@ // This method must not be called unless the current stack pointer is the // system stack pointer (csp). - ASSERT(csp.Is(StackPointer())); + DCHECK(csp.Is(StackPointer())); MemOperand tos(csp, 2 * kXRegSize, PostIndex); @@ -1173,20 +1303,27 @@ void MacroAssembler::AssertStackConsistency() { - if (emit_debug_code()) { - if (csp.Is(StackPointer())) { - // We can't check the alignment of csp without using a scratch register - // (or clobbering the flags), but the processor (or simulator) will abort - // if it is not properly aligned during a load. + // Avoid emitting code when !use_real_abort() since non-real aborts cause too + // much code to be generated. + if (emit_debug_code() && use_real_aborts()) { + if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) { + // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We + // can't check the alignment of csp without using a scratch register (or + // clobbering the flags), but the processor (or simulator) will abort if + // it is not properly aligned during a load. ldr(xzr, MemOperand(csp, 0)); - } else if (FLAG_enable_slow_asserts) { + } + if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) { Label ok; // Check that csp <= StackPointer(), preserving all registers and NZCV. sub(StackPointer(), csp, StackPointer()); cbz(StackPointer(), &ok); // Ok if csp == StackPointer(). tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer(). - Abort(kTheCurrentStackPointerIsBelowCsp); + // Avoid generating AssertStackConsistency checks for the Push in Abort. + { DontEmitDebugCodeScope dont_emit_debug_code_scope(this); + Abort(kTheCurrentStackPointerIsBelowCsp); + } bind(&ok); // Restore StackPointer(). @@ -1196,7 +1333,65 @@ } -void MacroAssembler::LoadRoot(Register destination, +void MacroAssembler::AssertFPCRState(Register fpcr) { + if (emit_debug_code()) { + Label unexpected_mode, done; + UseScratchRegisterScope temps(this); + if (fpcr.IsNone()) { + fpcr = temps.AcquireX(); + Mrs(fpcr, FPCR); + } + + // Settings overridden by ConfiugreFPCR(): + // - Assert that default-NaN mode is set. + Tbz(fpcr, DN_offset, &unexpected_mode); + + // Settings left to their default values: + // - Assert that flush-to-zero is not set. + Tbnz(fpcr, FZ_offset, &unexpected_mode); + // - Assert that the rounding mode is nearest-with-ties-to-even. + STATIC_ASSERT(FPTieEven == 0); + Tst(fpcr, RMode_mask); + B(eq, &done); + + Bind(&unexpected_mode); + Abort(kUnexpectedFPCRMode); + + Bind(&done); + } +} + + +void MacroAssembler::ConfigureFPCR() { + UseScratchRegisterScope temps(this); + Register fpcr = temps.AcquireX(); + Mrs(fpcr, FPCR); + + // If necessary, enable default-NaN mode. The default values of the other FPCR + // options should be suitable, and AssertFPCRState will verify that. + Label no_write_required; + Tbnz(fpcr, DN_offset, &no_write_required); + + Orr(fpcr, fpcr, DN_mask); + Msr(FPCR, fpcr); + + Bind(&no_write_required); + AssertFPCRState(fpcr); +} + + +void MacroAssembler::CanonicalizeNaN(const FPRegister& dst, + const FPRegister& src) { + AssertFPCRState(); + + // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except + // for NaNs, which become the default NaN. We use fsub rather than fadd + // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0. + Fsub(dst, src, fp_zero); +} + + +void MacroAssembler::LoadRoot(CPURegister destination, Heap::RootListIndex index) { // TODO(jbramley): Most root values are constants, and can be synthesized // without a load. Refer to the ARM back end for details. @@ -1245,15 +1440,14 @@ void MacroAssembler::EnumLengthUntagged(Register dst, Register map) { STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); - Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset)); + Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset)); And(dst, dst, Map::EnumLengthBits::kMask); } void MacroAssembler::EnumLengthSmi(Register dst, Register map) { - STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); - Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); - And(dst, dst, Smi::FromInt(Map::EnumLengthBits::kMask)); + EnumLengthUntagged(dst, map); + SmiTag(dst, dst); } @@ -1264,7 +1458,7 @@ Register scratch2, Register scratch3, Label* call_runtime) { - ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2, + DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2, scratch3)); Register empty_fixed_array_value = scratch0; @@ -1346,7 +1540,7 @@ Register scratch1, Register scratch2) { // Handler expects argument in x0. - ASSERT(exception.Is(x0)); + DCHECK(exception.Is(x0)); // Compute the handler entry address and jump to it. The handler table is // a fixed array of (smi-tagged) code offsets. @@ -1364,7 +1558,7 @@ void MacroAssembler::InNewSpace(Register object, Condition cond, Label* branch) { - ASSERT(cond == eq || cond == ne); + DCHECK(cond == eq || cond == ne); UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); And(temp, object, ExternalReference::new_space_mask(isolate())); @@ -1387,10 +1581,10 @@ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); // The handler expects the exception in x0. - ASSERT(value.Is(x0)); + DCHECK(value.Is(x0)); // Drop the stack pointer to the top of the top handler. - ASSERT(jssp.Is(StackPointer())); + DCHECK(jssp.Is(StackPointer())); Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); Ldr(jssp, MemOperand(scratch1)); @@ -1429,10 +1623,10 @@ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); // The handler expects the exception in x0. - ASSERT(value.Is(x0)); + DCHECK(value.Is(x0)); // Drop the stack pointer to the top of the top stack handler. - ASSERT(jssp.Is(StackPointer())); + DCHECK(jssp.Is(StackPointer())); Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); Ldr(jssp, MemOperand(scratch1)); @@ -1462,50 +1656,8 @@ } -void MacroAssembler::Throw(BailoutReason reason) { - Label throw_start; - Bind(&throw_start); -#ifdef DEBUG - const char* msg = GetBailoutReason(reason); - RecordComment("Throw message: "); - RecordComment((msg != NULL) ? msg : "UNKNOWN"); -#endif - - Mov(x0, Smi::FromInt(reason)); - Push(x0); - - // Disable stub call restrictions to always allow calls to throw. - if (!has_frame_) { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(this, StackFrame::NONE); - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } else { - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } - // ThrowMessage should not return here. - Unreachable(); -} - - -void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) { - Label ok; - B(InvertCondition(cc), &ok); - Throw(reason); - Bind(&ok); -} - - -void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) { - Label ok; - JumpIfNotSmi(value, &ok); - Throw(reason); - Bind(&ok); -} - - void MacroAssembler::SmiAbs(const Register& smi, Label* slow) { - ASSERT(smi.Is64Bits()); + DCHECK(smi.Is64Bits()); Abs(smi, smi, slow); } @@ -1571,13 +1723,13 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { - ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. - Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id); + DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); } void MacroAssembler::TailCallStub(CodeStub* stub) { - Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET); + Jump(stub->GetCode(), RelocInfo::CODE_TARGET); } @@ -1589,20 +1741,13 @@ // Check that the number of arguments matches what the function expects. // If f->nargs is -1, the function can accept a variable number of arguments. - if (f->nargs >= 0 && f->nargs != num_arguments) { - // Illegal operation: drop the stack arguments and return undefined. - if (num_arguments > 0) { - Drop(num_arguments); - } - LoadRoot(x0, Heap::kUndefinedValueRootIndex); - return; - } + CHECK(f->nargs < 0 || f->nargs == num_arguments); // Place the necessary arguments. Mov(x0, num_arguments); Mov(x1, ExternalReference(f, isolate())); - CEntryStub stub(1, save_doubles); + CEntryStub stub(isolate(), 1, save_doubles); CallStub(&stub); } @@ -1630,13 +1775,11 @@ ExternalReference::handle_scope_level_address(isolate()), next_address); - ASSERT(function_address.is(x1) || function_address.is(x2)); + DCHECK(function_address.is(x1) || function_address.is(x2)); Label profiler_disabled; Label end_profiler_check; - bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address(); - STATIC_ASSERT(sizeof(*is_profiling_flag) == 1); - Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag)); + Mov(x10, ExternalReference::is_profiling_address(isolate())); Ldrb(w10, MemOperand(x10)); Cbz(w10, &profiler_disabled); Mov(x3, thunk_ref); @@ -1680,7 +1823,7 @@ // Native call returns to the DirectCEntry stub which redirects to the // return address pushed on stack (could have moved after GC). // DirectCEntry stub itself is generated early and never moves. - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(this, x3); if (FLAG_log_timer_events) { @@ -1741,7 +1884,7 @@ FrameScope frame(this, StackFrame::INTERNAL); CallExternalReference( ExternalReference( - Runtime::kHiddenPromoteScheduledException, isolate()), 0); + Runtime::kPromoteScheduledException, isolate()), 0); } B(&exception_handled); @@ -1764,15 +1907,15 @@ Mov(x0, num_arguments); Mov(x1, ext); - CEntryStub stub(1); + CEntryStub stub(isolate(), 1); CallStub(&stub); } void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { Mov(x1, builtin); - CEntryStub stub(1); - Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); + CEntryStub stub(isolate(), 1); + Jump(stub.GetCode(), RelocInfo::CODE_TARGET); } @@ -1790,7 +1933,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Register function, Builtins::JavaScript id) { - ASSERT(!AreAliased(target, function)); + DCHECK(!AreAliased(target, function)); GetBuiltinFunction(function, id); // Load the code entry point from the builtins object. Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); @@ -1802,7 +1945,7 @@ const CallWrapper& call_wrapper) { ASM_LOCATION("MacroAssembler::InvokeBuiltin"); // You can't call a builtin without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Get the builtin entry in x2 and setup the function object in x1. GetBuiltinEntry(x2, x1, id); @@ -1811,7 +1954,7 @@ Call(x2); call_wrapper.AfterCall(); } else { - ASSERT(flag == JUMP_FUNCTION); + DCHECK(flag == JUMP_FUNCTION); Jump(x2); } } @@ -1843,7 +1986,7 @@ Heap::RootListIndex map_index, Register scratch1, Register scratch2) { - ASSERT(!AreAliased(string, length, scratch1, scratch2)); + DCHECK(!AreAliased(string, length, scratch1, scratch2)); LoadRoot(scratch2, map_index); SmiTag(scratch1, length); Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); @@ -1860,7 +2003,7 @@ // environment. // Note: This will break if we ever start generating snapshots on one ARM // platform for another ARM platform with a different alignment. - return OS::ActivationFrameAlignment(); + return base::OS::ActivationFrameAlignment(); #else // V8_HOST_ARCH_ARM64 // If we are using the simulator then we should always align to the expected // alignment. As the simulator is used to generate snapshots we do not know @@ -1890,10 +2033,10 @@ void MacroAssembler::CallCFunction(Register function, int num_of_reg_args, int num_of_double_args) { - ASSERT(has_frame()); + DCHECK(has_frame()); // We can pass 8 integer arguments in registers. If we need to pass more than // that, we'll need to implement support for passing them on the stack. - ASSERT(num_of_reg_args <= 8); + DCHECK(num_of_reg_args <= 8); // If we're passing doubles, we're limited to the following prototypes // (defined by ExternalReference::Type): @@ -1902,8 +2045,8 @@ // BUILTIN_FP_CALL: double f(double) // BUILTIN_FP_INT_CALL: double f(double, int) if (num_of_double_args > 0) { - ASSERT(num_of_reg_args <= 1); - ASSERT((num_of_double_args + num_of_reg_args) <= 2); + DCHECK(num_of_reg_args <= 1); + DCHECK((num_of_double_args + num_of_reg_args) <= 2); } @@ -1915,12 +2058,12 @@ int sp_alignment = ActivationFrameAlignment(); // The ABI mandates at least 16-byte alignment. - ASSERT(sp_alignment >= 16); - ASSERT(IsPowerOf2(sp_alignment)); + DCHECK(sp_alignment >= 16); + DCHECK(IsPowerOf2(sp_alignment)); // The current stack pointer is a callee saved register, and is preserved // across the call. - ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer)); + DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer)); // Align and synchronize the system stack pointer with jssp. Bic(csp, old_stack_pointer, sp_alignment - 1); @@ -1938,7 +2081,7 @@ // where we only pushed one W register on top of an aligned jssp. UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); - ASSERT(ActivationFrameAlignment() == 16); + DCHECK(ActivationFrameAlignment() == 16); Sub(temp, csp, old_stack_pointer); // We want temp <= 0 && temp >= -12. Cmp(temp, 0); @@ -1964,13 +2107,13 @@ void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { - ASSERT(!RelocInfo::IsCodeTarget(rmode)); + DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(reinterpret_cast<intptr_t>(target), rmode); } void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { - ASSERT(RelocInfo::IsCodeTarget(rmode)); + DCHECK(RelocInfo::IsCodeTarget(rmode)); AllowDeferredHandleDereference embedding_raw_address; Jump(reinterpret_cast<intptr_t>(code.location()), rmode); } @@ -2019,7 +2162,7 @@ positions_recorder()->WriteRecordedPositions(); // Addresses always have 64 bits, so we shouldn't encounter NONE32. - ASSERT(rmode != RelocInfo::NONE32); + DCHECK(rmode != RelocInfo::NONE32); UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); @@ -2028,12 +2171,12 @@ // Addresses are 48 bits so we never need to load the upper 16 bits. uint64_t imm = reinterpret_cast<uint64_t>(target); // If we don't use ARM tagged addresses, the 16 higher bits must be 0. - ASSERT(((imm >> 48) & 0xffff) == 0); + DCHECK(((imm >> 48) & 0xffff) == 0); movz(temp, (imm >> 0) & 0xffff, 0); movk(temp, (imm >> 16) & 0xffff, 16); movk(temp, (imm >> 32) & 0xffff, 32); } else { - LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode)); + Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode)); } Blr(temp); #ifdef DEBUG @@ -2081,7 +2224,7 @@ USE(target); // Addresses always have 64 bits, so we shouldn't encounter NONE32. - ASSERT(rmode != RelocInfo::NONE32); + DCHECK(rmode != RelocInfo::NONE32); if (rmode == RelocInfo::NONE64) { return kCallSizeWithoutRelocation; @@ -2098,7 +2241,7 @@ USE(ast_id); // Addresses always have 64 bits, so we shouldn't encounter NONE32. - ASSERT(rmode != RelocInfo::NONE32); + DCHECK(rmode != RelocInfo::NONE32); if (rmode == RelocInfo::NONE64) { return kCallSizeWithoutRelocation; @@ -2115,7 +2258,7 @@ Register heap_number_map, Label* on_heap_number, Label* on_not_heap_number) { - ASSERT(on_heap_number || on_not_heap_number); + DCHECK(on_heap_number || on_not_heap_number); AssertNotSmi(object); UseScratchRegisterScope temps(this); @@ -2129,7 +2272,7 @@ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); } - ASSERT(!AreAliased(temp, heap_number_map)); + DCHECK(!AreAliased(temp, heap_number_map)); Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); Cmp(temp, heap_number_map); @@ -2169,7 +2312,7 @@ Register scratch2, Register scratch3, Label* not_found) { - ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3)); // Use of registers. Register result is used as a temporary. Register number_string_cache = result; @@ -2237,11 +2380,11 @@ } -void MacroAssembler::TryConvertDoubleToInt(Register as_int, - FPRegister value, - FPRegister scratch_d, - Label* on_successful_conversion, - Label* on_failed_conversion) { +void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, + FPRegister value, + FPRegister scratch_d, + Label* on_successful_conversion, + Label* on_failed_conversion) { // Convert to an int and back again, then compare with the original value. Fcvtzs(as_int, value); Scvtf(scratch_d, as_int); @@ -2273,6 +2416,16 @@ } +void MacroAssembler::JumpIfMinusZero(Register input, + Label* on_negative_zero) { + DCHECK(input.Is64Bits()); + // Floating point value is in an integer register. Detect -0.0 by subtracting + // 1 (cmp), which will cause overflow. + Cmp(input, 1); + B(vs, on_negative_zero); +} + + void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { // Clamp the value to [0..255]. Cmp(input.W(), Operand(input.W(), UXTB)); @@ -2318,9 +2471,9 @@ Register scratch5) { // Untag src and dst into scratch registers. // Copy src->dst in a tight loop. - ASSERT(!AreAliased(dst, src, + DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4, scratch5)); - ASSERT(count >= 2); + DCHECK(count >= 2); const Register& remaining = scratch3; Mov(remaining, count / 2); @@ -2357,7 +2510,7 @@ Register scratch4) { // Untag src and dst into scratch registers. // Copy src->dst in an unrolled loop. - ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4)); + DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4)); const Register& dst_untagged = scratch1; const Register& src_untagged = scratch2; @@ -2386,7 +2539,7 @@ Register scratch3) { // Untag src and dst into scratch registers. // Copy src->dst in an unrolled loop. - ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3)); const Register& dst_untagged = scratch1; const Register& src_untagged = scratch2; @@ -2415,10 +2568,10 @@ // // In both cases, fields are copied in pairs if possible, and left-overs are // handled separately. - ASSERT(!AreAliased(dst, src)); - ASSERT(!temps.IncludesAliasOf(dst)); - ASSERT(!temps.IncludesAliasOf(src)); - ASSERT(!temps.IncludesAliasOf(xzr)); + DCHECK(!AreAliased(dst, src)); + DCHECK(!temps.IncludesAliasOf(dst)); + DCHECK(!temps.IncludesAliasOf(src)); + DCHECK(!temps.IncludesAliasOf(xzr)); if (emit_debug_code()) { Cmp(dst, src); @@ -2462,8 +2615,8 @@ UseScratchRegisterScope temps(this); Register tmp1 = temps.AcquireX(); Register tmp2 = temps.AcquireX(); - ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2)); - ASSERT(!AreAliased(src, dst, csp)); + DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2)); + DCHECK(!AreAliased(src, dst, csp)); if (emit_debug_code()) { // Check copy length. @@ -2512,7 +2665,7 @@ void MacroAssembler::FillFields(Register dst, Register field_count, Register filler) { - ASSERT(!dst.Is(csp)); + DCHECK(!dst.Is(csp)); UseScratchRegisterScope temps(this); Register field_ptr = temps.AcquireX(); Register counter = temps.AcquireX(); @@ -2557,7 +2710,7 @@ if (smi_check == DO_SMI_CHECK) { JumpIfEitherSmi(first, second, failure); } else if (emit_debug_code()) { - ASSERT(smi_check == DONT_DO_SMI_CHECK); + DCHECK(smi_check == DONT_DO_SMI_CHECK); Label not_smi; JumpIfEitherSmi(first, second, NULL, ¬_smi); @@ -2588,8 +2741,8 @@ Register scratch1, Register scratch2, Label* failure) { - ASSERT(!AreAliased(scratch1, second)); - ASSERT(!AreAliased(scratch1, scratch2)); + DCHECK(!AreAliased(scratch1, second)); + DCHECK(!AreAliased(scratch1, scratch2)); static const int kFlatAsciiStringMask = kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; static const int kFlatAsciiStringTag = ASCII_STRING_TYPE; @@ -2620,7 +2773,7 @@ Register scratch1, Register scratch2, Label* failure) { - ASSERT(!AreAliased(first, second, scratch1, scratch2)); + DCHECK(!AreAliased(first, second, scratch1, scratch2)); const int kFlatAsciiStringMask = kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; const int kFlatAsciiStringTag = @@ -2668,12 +2821,12 @@ // The code below is made a lot easier because the calling code already sets // up actual and expected registers according to the contract if values are // passed in registers. - ASSERT(actual.is_immediate() || actual.reg().is(x0)); - ASSERT(expected.is_immediate() || expected.reg().is(x2)); - ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3)); + DCHECK(actual.is_immediate() || actual.reg().is(x0)); + DCHECK(expected.is_immediate() || expected.reg().is(x2)); + DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3)); if (expected.is_immediate()) { - ASSERT(actual.is_immediate()); + DCHECK(actual.is_immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; @@ -2736,7 +2889,7 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); Label done; @@ -2753,7 +2906,7 @@ Call(code); call_wrapper.AfterCall(); } else { - ASSERT(flag == JUMP_FUNCTION); + DCHECK(flag == JUMP_FUNCTION); Jump(code); } } @@ -2769,11 +2922,11 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Contract with called JS functions requires that function is passed in x1. // (See FullCodeGenerator::Generate().) - ASSERT(function.is(x1)); + DCHECK(function.is(x1)); Register expected_reg = x2; Register code_reg = x3; @@ -2801,11 +2954,11 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Contract with called JS functions requires that function is passed in x1. // (See FullCodeGenerator::Generate().) - ASSERT(function.Is(x1)); + DCHECK(function.Is(x1)); Register code_reg = x3; @@ -2860,17 +3013,17 @@ void MacroAssembler::TruncateDoubleToI(Register result, DoubleRegister double_input) { Label done; - ASSERT(jssp.Is(StackPointer())); + DCHECK(jssp.Is(StackPointer())); // Try to convert the double to an int64. If successful, the bottom 32 bits // contain our truncated int32 result. TryConvertDoubleToInt64(result, double_input, &done); // If we fell through then inline version didn't succeed - call stub instead. - Push(lr); - Push(double_input); // Put input on stack. + Push(lr, double_input); - DoubleToIStub stub(jssp, + DoubleToIStub stub(isolate(), + jssp, result, 0, true, // is_truncating @@ -2887,8 +3040,8 @@ void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { Label done; - ASSERT(!result.is(object)); - ASSERT(jssp.Is(StackPointer())); + DCHECK(!result.is(object)); + DCHECK(jssp.Is(StackPointer())); Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); @@ -2898,7 +3051,8 @@ // If we fell through then inline version didn't succeed - call stub instead. Push(lr); - DoubleToIStub stub(object, + DoubleToIStub stub(isolate(), + object, result, HeapNumber::kValueOffset - kHeapObjectTag, true, // is_truncating @@ -2910,29 +3064,30 @@ } -void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { - if (frame_mode == BUILD_STUB_FRAME) { - ASSERT(StackPointer().Is(jssp)); - UseScratchRegisterScope temps(this); - Register temp = temps.AcquireX(); - __ Mov(temp, Smi::FromInt(StackFrame::STUB)); - // Compiled stubs don't age, and so they don't need the predictable code - // ageing sequence. - __ Push(lr, fp, cp, temp); - __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); - } else { - if (isolate()->IsCodePreAgingActive()) { - Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); - __ EmitCodeAgeSequence(stub); - } else { - __ EmitFrameSetupForCodeAgePatching(); - } +void MacroAssembler::StubPrologue() { + DCHECK(StackPointer().Is(jssp)); + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + __ Mov(temp, Smi::FromInt(StackFrame::STUB)); + // Compiled stubs don't age, and so they don't need the predictable code + // ageing sequence. + __ Push(lr, fp, cp, temp); + __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); +} + + +void MacroAssembler::Prologue(bool code_pre_aging) { + if (code_pre_aging) { + Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); + __ EmitCodeAgeSequence(stub); + } else { + __ EmitFrameSetupForCodeAgePatching(); } } void MacroAssembler::EnterFrame(StackFrame::Type type) { - ASSERT(jssp.Is(StackPointer())); + DCHECK(jssp.Is(StackPointer())); UseScratchRegisterScope temps(this); Register type_reg = temps.AcquireX(); Register code_reg = temps.AcquireX(); @@ -2953,7 +3108,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { - ASSERT(jssp.Is(StackPointer())); + DCHECK(jssp.Is(StackPointer())); // Drop the execution stack down to the frame pointer and restore // the caller frame pointer and return address. Mov(jssp, fp); @@ -2971,7 +3126,7 @@ // Read the registers from the stack without popping them. The stack pointer // will be reset as part of the unwinding process. CPURegList saved_fp_regs = kCallerSavedFP; - ASSERT(saved_fp_regs.Count() % 2 == 0); + DCHECK(saved_fp_regs.Count() % 2 == 0); int offset = ExitFrameConstants::kLastExitFrameField; while (!saved_fp_regs.IsEmpty()) { @@ -2986,7 +3141,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch, int extra_space) { - ASSERT(jssp.Is(StackPointer())); + DCHECK(jssp.Is(StackPointer())); // Set up the new stack frame. Mov(scratch, Operand(CodeObject())); @@ -3032,7 +3187,7 @@ // Align and synchronize the system stack pointer with jssp. AlignAndSetCSPForFrame(); - ASSERT(csp.Is(StackPointer())); + DCHECK(csp.Is(StackPointer())); // fp[8]: CallerPC (lr) // fp -> fp[0]: CallerFP (old fp) @@ -3056,7 +3211,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles, const Register& scratch, bool restore_context) { - ASSERT(csp.Is(StackPointer())); + DCHECK(csp.Is(StackPointer())); if (restore_doubles) { ExitFrameRestoreFPRegs(); @@ -3103,7 +3258,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { - ASSERT(value != 0); + DCHECK(value != 0); if (FLAG_native_code_counters && counter->Enabled()) { Mov(scratch2, ExternalReference(counter)); Ldr(scratch1, MemOperand(scratch2)); @@ -3135,20 +3290,18 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { Mov(x0, 0); Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate())); - CEntryStub ces(1); - ASSERT(AllowThisStubCall(&ces)); - Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); + CEntryStub ces(isolate(), 1); + DCHECK(AllowThisStubCall(&ces)); + Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } -#endif void MacroAssembler::PushTryHandler(StackHandler::Kind kind, int handler_index) { - ASSERT(jssp.Is(StackPointer())); + DCHECK(jssp.Is(StackPointer())); // Adjust this code if the asserts don't hold. STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); @@ -3170,7 +3323,7 @@ // Push the frame pointer, context, state, and code object. if (kind == StackHandler::JS_ENTRY) { - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); Push(xzr, xzr, x11, x10); } else { Push(fp, cp, x11, x10); @@ -3200,7 +3353,7 @@ Register scratch2, Label* gc_required, AllocationFlags flags) { - ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -3216,14 +3369,14 @@ UseScratchRegisterScope temps(this); Register scratch3 = temps.AcquireX(); - ASSERT(!AreAliased(result, scratch1, scratch2, scratch3)); - ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); + DCHECK(!AreAliased(result, scratch1, scratch2, scratch3)); + DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); // Make object size into bytes. if ((flags & SIZE_IN_WORDS) != 0) { object_size *= kPointerSize; } - ASSERT(0 == (object_size & kObjectAlignmentMask)); + DCHECK(0 == (object_size & kObjectAlignmentMask)); // Check relative positions of allocation top and limit addresses. // The values must be adjacent in memory to allow the use of LDP. @@ -3233,7 +3386,7 @@ AllocationUtils::GetAllocationLimitReference(isolate(), flags); intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); - ASSERT((limit - top) == kPointerSize); + DCHECK((limit - top) == kPointerSize); // Set up allocation top address and object size registers. Register top_address = scratch1; @@ -3260,14 +3413,13 @@ // Calculate new top and bail out if new space is exhausted. Adds(scratch3, result, object_size); - B(vs, gc_required); - Cmp(scratch3, allocation_limit); + Ccmp(scratch3, allocation_limit, CFlag, cc); B(hi, gc_required); Str(scratch3, MemOperand(top_address)); // Tag the object if requested. if ((flags & TAG_OBJECT) != 0) { - Orr(result, result, kHeapObjectTag); + ObjectTag(result, result); } } @@ -3293,8 +3445,8 @@ UseScratchRegisterScope temps(this); Register scratch3 = temps.AcquireX(); - ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3)); - ASSERT(object_size.Is64Bits() && result.Is64Bits() && + DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3)); + DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); // Check relative positions of allocation top and limit addresses. @@ -3305,7 +3457,7 @@ AllocationUtils::GetAllocationLimitReference(isolate(), flags); intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); - ASSERT((limit - top) == kPointerSize); + DCHECK((limit - top) == kPointerSize); // Set up allocation top address and object size registers. Register top_address = scratch1; @@ -3342,14 +3494,13 @@ Check(eq, kUnalignedAllocationInNewSpace); } - B(vs, gc_required); - Cmp(scratch3, allocation_limit); + Ccmp(scratch3, allocation_limit, CFlag, cc); B(hi, gc_required); Str(scratch3, MemOperand(top_address)); // Tag the object if requested. if ((flags & TAG_OBJECT) != 0) { - Orr(result, result, kHeapObjectTag); + ObjectTag(result, result); } } @@ -3380,7 +3531,7 @@ Register scratch2, Register scratch3, Label* gc_required) { - ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3)); // Calculate the number of bytes needed for the characters in the string while // observing object alignment. STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); @@ -3411,7 +3562,7 @@ Register scratch2, Register scratch3, Label* gc_required) { - ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3)); // Calculate the number of bytes needed for the characters in the string while // observing object alignment. STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); @@ -3457,33 +3608,12 @@ Register scratch1, Register scratch2, Label* gc_required) { - Label allocate_new_space, install_map; - AllocationFlags flags = TAG_OBJECT; - - ExternalReference high_promotion_mode = ExternalReference:: - new_space_high_promotion_mode_active_address(isolate()); - Mov(scratch1, high_promotion_mode); - Ldr(scratch1, MemOperand(scratch1)); - Cbz(scratch1, &allocate_new_space); - - Allocate(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE)); - - B(&install_map); - - Bind(&allocate_new_space); Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, - flags); - - Bind(&install_map); + TAG_OBJECT); InitializeNewString(result, length, @@ -3498,7 +3628,7 @@ Register scratch1, Register scratch2, Label* gc_required) { - ASSERT(!AreAliased(result, length, scratch1, scratch2)); + DCHECK(!AreAliased(result, length, scratch1, scratch2)); Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); @@ -3515,7 +3645,7 @@ Register scratch1, Register scratch2, Label* gc_required) { - ASSERT(!AreAliased(result, length, scratch1, scratch2)); + DCHECK(!AreAliased(result, length, scratch1, scratch2)); Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); @@ -3533,32 +3663,55 @@ Label* gc_required, Register scratch1, Register scratch2, - Register heap_number_map) { + CPURegister value, + CPURegister heap_number_map, + MutableMode mode) { + DCHECK(!value.IsValid() || value.Is64Bits()); + UseScratchRegisterScope temps(this); + // Allocate an object in the heap for the heap number and tag it as a heap // object. Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, - TAG_OBJECT); + NO_ALLOCATION_FLAGS); - // Store heap number map in the allocated object. - if (heap_number_map.Is(NoReg)) { - heap_number_map = scratch1; - LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + Heap::RootListIndex map_index = mode == MUTABLE + ? Heap::kMutableHeapNumberMapRootIndex + : Heap::kHeapNumberMapRootIndex; + + // Prepare the heap number map. + if (!heap_number_map.IsValid()) { + // If we have a valid value register, use the same type of register to store + // the map so we can use STP to store both in one instruction. + if (value.IsValid() && value.IsFPRegister()) { + heap_number_map = temps.AcquireD(); + } else { + heap_number_map = scratch1; + } + LoadRoot(heap_number_map, map_index); + } + if (emit_debug_code()) { + Register map; + if (heap_number_map.IsFPRegister()) { + map = scratch1; + Fmov(map, DoubleRegister(heap_number_map)); + } else { + map = Register(heap_number_map); + } + AssertRegisterIsRoot(map, map_index); } - AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); -} - -void MacroAssembler::AllocateHeapNumberWithValue(Register result, - DoubleRegister value, - Label* gc_required, - Register scratch1, - Register scratch2, - Register heap_number_map) { - // TODO(all): Check if it would be more efficient to use STP to store both - // the map and the value. - AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map); - Str(value, FieldMemOperand(result, HeapNumber::kValueOffset)); + // Store the heap number map and the value in the allocated object. + if (value.IsSameSizeAndType(heap_number_map)) { + STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize == + HeapNumber::kValueOffset); + Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset)); + } else { + Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); + if (value.IsValid()) { + Str(value, MemOperand(result, HeapNumber::kValueOffset)); + } + } + ObjectTag(result, result); } @@ -3685,7 +3838,7 @@ // Load the map's "bit field 2". __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset)); // Retrieve elements_kind from bit field 2. - __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); + DecodeField<Map::ElementsKindBits>(result); } @@ -3694,15 +3847,16 @@ Register scratch, Label* miss, BoundFunctionAction action) { - ASSERT(!AreAliased(function, result, scratch)); + DCHECK(!AreAliased(function, result, scratch)); - // Check that the receiver isn't a smi. - JumpIfSmi(function, miss); + Label non_instance; + if (action == kMissOnBoundFunction) { + // Check that the receiver isn't a smi. + JumpIfSmi(function, miss); - // Check that the function really is a function. Load map into result reg. - JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss); + // Check that the function really is a function. Load map into result reg. + JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss); - if (action == kMissOnBoundFunction) { Register scratch_w = scratch.W(); Ldr(scratch, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); @@ -3711,12 +3865,11 @@ Ldr(scratch_w, FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss); - } - // Make sure that the function has an instance prototype. - Label non_instance; - Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); - Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance); + // Make sure that the function has an instance prototype. + Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); + Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance); + } // Get the prototype or initial map from the function. Ldr(result, @@ -3733,12 +3886,15 @@ // Get the prototype from the initial map. Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); - B(&done); - // Non-instance prototype: fetch prototype from constructor field in initial - // map. - Bind(&non_instance); - Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); + if (action == kMissOnBoundFunction) { + B(&done); + + // Non-instance prototype: fetch prototype from constructor field in initial + // map. + Bind(&non_instance); + Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); + } // All done. Bind(&done); @@ -3749,7 +3905,7 @@ Heap::RootListIndex index) { UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); - ASSERT(!AreAliased(obj, temp)); + DCHECK(!AreAliased(obj, temp)); LoadRoot(temp, index); Cmp(obj, temp); } @@ -3784,7 +3940,7 @@ } else if (if_false == fall_through) { CompareAndBranch(lhs, rhs, cond, if_true); } else if (if_true == fall_through) { - CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false); + CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false); } else { CompareAndBranch(lhs, rhs, cond, if_true); B(if_false); @@ -3848,10 +4004,9 @@ Register elements_reg, Register scratch1, FPRegister fpscratch1, - FPRegister fpscratch2, Label* fail, int elements_offset) { - ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); + DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); Label store_num; // Speculatively convert the smi to a double - all smis can be exactly @@ -3866,12 +4021,9 @@ fail, DONT_DO_SMI_CHECK); Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); - Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); - // Check for NaN by comparing the number to itself: NaN comparison will - // report unordered, indicated by the overflow flag being set. - Fcmp(fpscratch1, fpscratch1); - Fcsel(fpscratch1, fpscratch2, fpscratch1, vs); + // Canonicalize NaNs. + CanonicalizeNaN(fpscratch1); // Store the result. Bind(&store_num); @@ -3893,13 +4045,10 @@ // that the constants for the maximum number of digits for an array index // cached in the hash field and the number of bits reserved for it does not // conflict. - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < (1 << String::kArrayIndexValueBits)); - // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in - // the low kHashShift bits. - STATIC_ASSERT(kSmiTag == 0); - Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); - SmiTag(index, hash); + DecodeField<String::ArrayIndexValueBits>(index, hash); + SmiTag(index, index); } @@ -3909,7 +4058,7 @@ SeqStringSetCharCheckIndexType index_type, Register scratch, uint32_t encoding_mask) { - ASSERT(!AreAliased(string, index, scratch)); + DCHECK(!AreAliased(string, index, scratch)); if (index_type == kIndexIsSmi) { AssertSmi(index); @@ -3930,7 +4079,7 @@ Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); Check(lt, kIndexIsTooLarge); - ASSERT_EQ(0, Smi::FromInt(0)); + DCHECK_EQ(0, Smi::FromInt(0)); Cmp(index, 0); Check(ge, kIndexIsNegative); } @@ -3940,7 +4089,7 @@ Register scratch1, Register scratch2, Label* miss) { - ASSERT(!AreAliased(holder_reg, scratch1, scratch2)); + DCHECK(!AreAliased(holder_reg, scratch1, scratch2)); Label same_contexts; // Load current lexical context from the stack frame. @@ -4002,10 +4151,10 @@ // Compute the hash code from the untagged key. This must be kept in sync with -// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in +// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in // code-stub-hydrogen.cc void MacroAssembler::GetNumberHash(Register key, Register scratch) { - ASSERT(!AreAliased(key, scratch)); + DCHECK(!AreAliased(key, scratch)); // Xor original key with a seed. LoadRoot(scratch, Heap::kHashSeedRootIndex); @@ -4044,7 +4193,7 @@ Register scratch1, Register scratch2, Register scratch3) { - ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3)); Label done; @@ -4068,7 +4217,7 @@ And(scratch2, scratch2, scratch1); // Scale the index by multiplying by the element size. - ASSERT(SeededNumberDictionary::kEntrySize == 3); + DCHECK(SeededNumberDictionary::kEntrySize == 3); Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); // Check if the key is identical to the name. @@ -4103,7 +4252,7 @@ Register scratch1, SaveFPRegsMode fp_mode, RememberedSetFinalAction and_then) { - ASSERT(!AreAliased(object, address, scratch1)); + DCHECK(!AreAliased(object, address, scratch1)); Label done, store_buffer_overflow; if (emit_debug_code()) { Label ok; @@ -4123,12 +4272,12 @@ Str(scratch1, MemOperand(scratch2)); // Call stub on end of buffer. // Check for end of buffer. - ASSERT(StoreBuffer::kStoreBufferOverflowBit == + DCHECK(StoreBuffer::kStoreBufferOverflowBit == (1 << (14 + kPointerSizeLog2))); if (and_then == kFallThroughAtEnd) { Tbz(scratch1, (14 + kPointerSizeLog2), &done); } else { - ASSERT(and_then == kReturnAtEnd); + DCHECK(and_then == kReturnAtEnd); Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow); Ret(); } @@ -4136,7 +4285,7 @@ Bind(&store_buffer_overflow); Push(lr); StoreBufferOverflowStub store_buffer_overflow_stub = - StoreBufferOverflowStub(fp_mode); + StoreBufferOverflowStub(isolate(), fp_mode); CallStub(&store_buffer_overflow_stub); Pop(lr); @@ -4158,7 +4307,7 @@ // Safepoints expect a block of kNumSafepointRegisters values on the stack, so // adjust the stack for unsaved registers. const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; - ASSERT(num_unsaved >= 0); + DCHECK(num_unsaved >= 0); Claim(num_unsaved); PushXRegList(kSafepointSavedRegisters); } @@ -4180,7 +4329,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { // Make sure the safepoint registers list is what we expect. - ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); + DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); // Safepoint registers are stored contiguously on the stack, but not all the // registers are saved. The following registers are excluded: @@ -4237,7 +4386,8 @@ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action, - SmiCheck smi_check) { + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { // First, check if a write barrier is even needed. The tests below // catch stores of Smis. Label done; @@ -4249,7 +4399,7 @@ // Although the object register is tagged, the offset is relative to the start // of the object, so offset must be a multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize)); + DCHECK(IsAligned(offset, kPointerSize)); Add(scratch, object, offset - kHeapObjectTag); if (emit_debug_code()) { @@ -4266,7 +4416,8 @@ lr_status, save_fp, remembered_set_action, - OMIT_SMI_CHECK); + OMIT_SMI_CHECK, + pointers_to_here_check_for_value); Bind(&done); @@ -4279,20 +4430,94 @@ } +// Will clobber: object, map, dst. +// If lr_status is kLRHasBeenSaved, lr will also be clobbered. +void MacroAssembler::RecordWriteForMap(Register object, + Register map, + Register dst, + LinkRegisterStatus lr_status, + SaveFPRegsMode fp_mode) { + ASM_LOCATION("MacroAssembler::RecordWrite"); + DCHECK(!AreAliased(object, map)); + + if (emit_debug_code()) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + + CompareMap(map, temp, isolate()->factory()->meta_map()); + Check(eq, kWrongAddressOrValuePassedToRecordWrite); + } + + if (!FLAG_incremental_marking) { + return; + } + + if (emit_debug_code()) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + + Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); + Cmp(temp, map); + Check(eq, kWrongAddressOrValuePassedToRecordWrite); + } + + // First, check if a write barrier is even needed. The tests below + // catch stores of smis and stores into the young generation. + Label done; + + // A single check of the map's pages interesting flag suffices, since it is + // only set during incremental collection, and then it's also guaranteed that + // the from object's page's interesting flag is also set. This optimization + // relies on the fact that maps can never be in new space. + CheckPageFlagClear(map, + map, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + &done); + + // Record the actual write. + if (lr_status == kLRHasNotBeenSaved) { + Push(lr); + } + Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag); + RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, + fp_mode); + CallStub(&stub); + if (lr_status == kLRHasNotBeenSaved) { + Pop(lr); + } + + Bind(&done); + + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map, + dst); + + // Clobber clobbered registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + Mov(dst, Operand(BitCast<int64_t>(kZapValue + 12))); + Mov(map, Operand(BitCast<int64_t>(kZapValue + 16))); + } +} + + // Will clobber: object, address, value. // If lr_status is kLRHasBeenSaved, lr will also be clobbered. // // The register 'object' contains a heap object pointer. The heap object tag is // shifted away. -void MacroAssembler::RecordWrite(Register object, - Register address, - Register value, - LinkRegisterStatus lr_status, - SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { +void MacroAssembler::RecordWrite( + Register object, + Register address, + Register value, + LinkRegisterStatus lr_status, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { ASM_LOCATION("MacroAssembler::RecordWrite"); - ASSERT(!AreAliased(object, value)); + DCHECK(!AreAliased(object, value)); if (emit_debug_code()) { UseScratchRegisterScope temps(this); @@ -4303,23 +4528,21 @@ Check(eq, kWrongAddressOrValuePassedToRecordWrite); } - // Count number of write barriers in generated code. - isolate()->counters()->write_barriers_static()->Increment(); - // TODO(mstarzinger): Dynamic counter missing. - // First, check if a write barrier is even needed. The tests below // catch stores of smis and stores into the young generation. Label done; if (smi_check == INLINE_SMI_CHECK) { - ASSERT_EQ(0, kSmiTag); + DCHECK_EQ(0, kSmiTag); JumpIfSmi(value, &done); } - CheckPageFlagClear(value, - value, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, - &done); + if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { + CheckPageFlagClear(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + &done); + } CheckPageFlagClear(object, value, // Used as scratch. MemoryChunk::kPointersFromHereAreInterestingMask, @@ -4329,7 +4552,8 @@ if (lr_status == kLRHasNotBeenSaved) { Push(lr); } - RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, + fp_mode); CallStub(&stub); if (lr_status == kLRHasNotBeenSaved) { Pop(lr); @@ -4337,6 +4561,11 @@ Bind(&done); + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address, + value); + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { @@ -4350,7 +4579,7 @@ if (emit_debug_code()) { // The bit sequence is backward. The first character in the string // represents the least significant bit. - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); Label color_is_valid; Tbnz(reg, 0, &color_is_valid); @@ -4364,8 +4593,8 @@ void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg, Register shift_reg) { - ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg)); - ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits()); + DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg)); + DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits()); // addr_reg is divided into fields: // |63 page base 20|19 high 8|7 shift 3|2 0| // 'high' gives the index of the cell holding color bits for the object. @@ -4389,7 +4618,7 @@ int first_bit, int second_bit) { // See mark-compact.h for color definitions. - ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch)); + DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch)); GetMarkBits(object, bitmap_scratch, shift_scratch); Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); @@ -4400,14 +4629,14 @@ // These bit sequences are backwards. The first character in the string // represents the least significant bit. - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); // Check for the color. if (first_bit == 0) { // Checking for white. - ASSERT(second_bit == 0); + DCHECK(second_bit == 0); // We only need to test the first bit. Tbz(bitmap_scratch, 0, has_color); } else { @@ -4431,7 +4660,7 @@ Label* if_deprecated) { if (map->CanBeDeprecated()) { Mov(scratch, Operand(map)); - Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset)); + Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated); } } @@ -4441,7 +4670,7 @@ Register scratch0, Register scratch1, Label* on_black) { - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. } @@ -4451,7 +4680,7 @@ Register scratch0, Register scratch1, Label* found) { - ASSERT(!AreAliased(object, scratch0, scratch1)); + DCHECK(!AreAliased(object, scratch0, scratch1)); Factory* factory = isolate()->factory(); Register current = scratch0; Label loop_again; @@ -4463,7 +4692,7 @@ Bind(&loop_again); Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); - Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount); + DecodeField<Map::ElementsKindBits>(scratch1); CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found); Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset)); CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again); @@ -4472,7 +4701,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, Register result) { - ASSERT(!result.Is(ldr_location)); + DCHECK(!result.Is(ldr_location)); const uint32_t kLdrLitOffset_lsb = 5; const uint32_t kLdrLitOffset_width = 19; Ldr(result, MemOperand(ldr_location)); @@ -4495,14 +4724,14 @@ Register load_scratch, Register length_scratch, Label* value_is_white_and_not_data) { - ASSERT(!AreAliased( + DCHECK(!AreAliased( value, bitmap_scratch, shift_scratch, load_scratch, length_scratch)); // These bit sequences are backwards. The first character in the string // represents the least significant bit. - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); GetMarkBits(value, bitmap_scratch, shift_scratch); Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); @@ -4526,8 +4755,8 @@ JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object); // Check for strings. - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); // If it's a string and it's not a cons string then it's an object containing // no GC pointers. Register instance_type = load_scratch; @@ -4541,8 +4770,8 @@ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). // External strings are the only ones with the kExternalStringTag bit // set. - ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); - ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); + DCHECK_EQ(0, kConsStringTag & kExternalStringTag); Mov(length_scratch, ExternalString::kSize); TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object); @@ -4550,7 +4779,7 @@ // For ASCII (char-size of 1) we shift the smi tag away to get the length. // For UC16 (char-size of 2) we just leave the smi tag in place, thereby // getting the length multiplied by 2. - ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); + DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); Ldrsw(length_scratch, UntagSmiFieldMemOperand(value, String::kLengthOffset)); Tst(instance_type, kStringEncodingMask); @@ -4674,8 +4903,7 @@ // We need some scratch registers for the MacroAssembler, so make sure we have // some. This is safe here because Abort never returns. RegList old_tmp_list = TmpList()->list(); - TmpList()->Combine(ip0); - TmpList()->Combine(ip1); + TmpList()->Combine(MacroAssembler::DefaultTmpList()); if (use_real_aborts()) { // Avoid infinite recursion; Push contains some assertions that use Abort. @@ -4777,110 +5005,97 @@ const CPURegister& arg3) { // We cannot handle a caller-saved stack pointer. It doesn't make much sense // in most cases anyway, so this restriction shouldn't be too serious. - ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); + DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer())); + + // The provided arguments, and their proper procedure-call standard registers. + CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3}; + CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg}; + + int arg_count = kPrintfMaxArgCount; + + // The PCS varargs registers for printf. Note that x0 is used for the printf + // format string. + static const CPURegList kPCSVarargs = + CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count); + static const CPURegList kPCSVarargsFP = + CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1); - // Make sure that the macro assembler doesn't try to use any of our arguments - // as scratch registers. - ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3)); - ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3)); - - // We cannot print the stack pointer because it is typically used to preserve - // caller-saved registers (using other Printf variants which depend on this - // helper). - ASSERT(!AreAliased(arg0, StackPointer())); - ASSERT(!AreAliased(arg1, StackPointer())); - ASSERT(!AreAliased(arg2, StackPointer())); - ASSERT(!AreAliased(arg3, StackPointer())); - - static const int kMaxArgCount = 4; - // Assume that we have the maximum number of arguments until we know - // otherwise. - int arg_count = kMaxArgCount; - - // The provided arguments. - CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3}; - - // The PCS registers where the arguments need to end up. - CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg}; - - // Promote FP arguments to doubles, and integer arguments to X registers. - // Note that FP and integer arguments cannot be mixed, but we'll check - // AreSameSizeAndType once we've processed these promotions. - for (int i = 0; i < kMaxArgCount; i++) { + // We can use caller-saved registers as scratch values, except for the + // arguments and the PCS registers where they might need to go. + CPURegList tmp_list = kCallerSaved; + tmp_list.Remove(x0); // Used to pass the format string. + tmp_list.Remove(kPCSVarargs); + tmp_list.Remove(arg0, arg1, arg2, arg3); + + CPURegList fp_tmp_list = kCallerSavedFP; + fp_tmp_list.Remove(kPCSVarargsFP); + fp_tmp_list.Remove(arg0, arg1, arg2, arg3); + + // Override the MacroAssembler's scratch register list. The lists will be + // reset automatically at the end of the UseScratchRegisterScope. + UseScratchRegisterScope temps(this); + TmpList()->set_list(tmp_list.list()); + FPTmpList()->set_list(fp_tmp_list.list()); + + // Copies of the printf vararg registers that we can pop from. + CPURegList pcs_varargs = kPCSVarargs; + CPURegList pcs_varargs_fp = kPCSVarargsFP; + + // Place the arguments. There are lots of clever tricks and optimizations we + // could use here, but Printf is a debug tool so instead we just try to keep + // it simple: Move each input that isn't already in the right place to a + // scratch register, then move everything back. + for (unsigned i = 0; i < kPrintfMaxArgCount; i++) { + // Work out the proper PCS register for this argument. if (args[i].IsRegister()) { - // Note that we use x1 onwards, because x0 will hold the format string. - pcs[i] = Register::XRegFromCode(i + 1); - // For simplicity, we handle all integer arguments as X registers. An X - // register argument takes the same space as a W register argument in the - // PCS anyway. The only limitation is that we must explicitly clear the - // top word for W register arguments as the callee will expect it to be - // clear. - if (!args[i].Is64Bits()) { - const Register& as_x = args[i].X(); - And(as_x, as_x, 0x00000000ffffffff); - args[i] = as_x; - } + pcs[i] = pcs_varargs.PopLowestIndex().X(); + // We might only need a W register here. We need to know the size of the + // argument so we can properly encode it for the simulator call. + if (args[i].Is32Bits()) pcs[i] = pcs[i].W(); } else if (args[i].IsFPRegister()) { - pcs[i] = FPRegister::DRegFromCode(i); - // C and C++ varargs functions (such as printf) implicitly promote float - // arguments to doubles. - if (!args[i].Is64Bits()) { - FPRegister s(args[i]); - const FPRegister& as_d = args[i].D(); - Fcvt(as_d, s); - args[i] = as_d; - } + // In C, floats are always cast to doubles for varargs calls. + pcs[i] = pcs_varargs_fp.PopLowestIndex().D(); } else { - // This is the first empty (NoCPUReg) argument, so use it to set the - // argument count and bail out. + DCHECK(args[i].IsNone()); arg_count = i; break; } - } - ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount)); - // Check that every remaining argument is NoCPUReg. - for (int i = arg_count; i < kMaxArgCount; i++) { - ASSERT(args[i].IsNone()); - } - ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1], - args[2], args[3], - pcs[0], pcs[1], - pcs[2], pcs[3])); - // Move the arguments into the appropriate PCS registers. - // - // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is - // surprisingly complicated. - // - // * For even numbers of registers, we push the arguments and then pop them - // into their final registers. This maintains 16-byte stack alignment in - // case csp is the stack pointer, since we're only handling X or D - // registers at this point. - // - // * For odd numbers of registers, we push and pop all but one register in - // the same way, but the left-over register is moved directly, since we - // can always safely move one register without clobbering any source. - if (arg_count >= 4) { - Push(args[3], args[2], args[1], args[0]); - } else if (arg_count >= 2) { - Push(args[1], args[0]); - } + // If the argument is already in the right place, leave it where it is. + if (args[i].Aliases(pcs[i])) continue; - if ((arg_count % 2) != 0) { - // Move the left-over register directly. - const CPURegister& leftover_arg = args[arg_count - 1]; - const CPURegister& leftover_pcs = pcs[arg_count - 1]; - if (leftover_arg.IsRegister()) { - Mov(Register(leftover_pcs), Register(leftover_arg)); - } else { - Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg)); + // Otherwise, if the argument is in a PCS argument register, allocate an + // appropriate scratch register and then move it out of the way. + if (kPCSVarargs.IncludesAliasOf(args[i]) || + kPCSVarargsFP.IncludesAliasOf(args[i])) { + if (args[i].IsRegister()) { + Register old_arg = Register(args[i]); + Register new_arg = temps.AcquireSameSizeAs(old_arg); + Mov(new_arg, old_arg); + args[i] = new_arg; + } else { + FPRegister old_arg = FPRegister(args[i]); + FPRegister new_arg = temps.AcquireSameSizeAs(old_arg); + Fmov(new_arg, old_arg); + args[i] = new_arg; + } } } - if (arg_count >= 4) { - Pop(pcs[0], pcs[1], pcs[2], pcs[3]); - } else if (arg_count >= 2) { - Pop(pcs[0], pcs[1]); + // Do a second pass to move values into their final positions and perform any + // conversions that may be required. + for (int i = 0; i < arg_count; i++) { + DCHECK(pcs[i].type() == args[i].type()); + if (pcs[i].IsRegister()) { + Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg); + } else { + DCHECK(pcs[i].IsFPRegister()); + if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) { + Fmov(FPRegister(pcs[i]), FPRegister(args[i])); + } else { + Fcvt(FPRegister(pcs[i]), FPRegister(args[i])); + } + } } // Load the format string into x0, as per the procedure-call standard. @@ -4908,18 +5123,33 @@ Bic(csp, StackPointer(), 0xf); } - CallPrintf(pcs[0].type()); + CallPrintf(arg_count, pcs); } -void MacroAssembler::CallPrintf(CPURegister::RegisterType type) { +void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) { // A call to printf needs special handling for the simulator, since the system // printf function will use a different instruction set and the procedure-call // standard will not be compatible. #ifdef USE_SIMULATOR { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize); hlt(kImmExceptionIsPrintf); - dc32(type); + dc32(arg_count); // kPrintfArgCountOffset + + // Determine the argument pattern. + uint32_t arg_pattern_list = 0; + for (int i = 0; i < arg_count; i++) { + uint32_t arg_pattern; + if (args[i].IsRegister()) { + arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX; + } else { + DCHECK(args[i].Is64Bits()); + arg_pattern = kPrintfArgD; + } + DCHECK(arg_pattern < (1 << kPrintfArgPatternBits)); + arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i)); + } + dc32(arg_pattern_list); // kPrintfArgPatternListOffset } #else Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); @@ -4928,10 +5158,18 @@ void MacroAssembler::Printf(const char * format, - const CPURegister& arg0, - const CPURegister& arg1, - const CPURegister& arg2, - const CPURegister& arg3) { + CPURegister arg0, + CPURegister arg1, + CPURegister arg2, + CPURegister arg3) { + // We can only print sp if it is the current stack pointer. + if (!csp.Is(StackPointer())) { + DCHECK(!csp.Aliases(arg0)); + DCHECK(!csp.Aliases(arg1)); + DCHECK(!csp.Aliases(arg2)); + DCHECK(!csp.Aliases(arg3)); + } + // Printf is expected to preserve all registers, so make sure that none are // available as scratch registers until we've preserved them. RegList old_tmp_list = TmpList()->list(); @@ -4953,19 +5191,41 @@ TmpList()->set_list(tmp_list.list()); FPTmpList()->set_list(fp_tmp_list.list()); - // Preserve NZCV. { UseScratchRegisterScope temps(this); - Register tmp = temps.AcquireX(); - Mrs(tmp, NZCV); - Push(tmp, xzr); - } + // If any of the arguments are the current stack pointer, allocate a new + // register for them, and adjust the value to compensate for pushing the + // caller-saved registers. + bool arg0_sp = StackPointer().Aliases(arg0); + bool arg1_sp = StackPointer().Aliases(arg1); + bool arg2_sp = StackPointer().Aliases(arg2); + bool arg3_sp = StackPointer().Aliases(arg3); + if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) { + // Allocate a register to hold the original stack pointer value, to pass + // to PrintfNoPreserve as an argument. + Register arg_sp = temps.AcquireX(); + Add(arg_sp, StackPointer(), + kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes()); + if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits()); + if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits()); + if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits()); + if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits()); + } + + // Preserve NZCV. + { UseScratchRegisterScope temps(this); + Register tmp = temps.AcquireX(); + Mrs(tmp, NZCV); + Push(tmp, xzr); + } - PrintfNoPreserve(format, arg0, arg1, arg2, arg3); + PrintfNoPreserve(format, arg0, arg1, arg2, arg3); - { UseScratchRegisterScope temps(this); - Register tmp = temps.AcquireX(); - Pop(xzr, tmp); - Msr(NZCV, tmp); + // Restore NZCV. + { UseScratchRegisterScope temps(this); + Register tmp = temps.AcquireX(); + Pop(xzr, tmp); + Msr(NZCV, tmp); + } } PopCPURegList(kCallerSavedFP); @@ -4980,16 +5240,18 @@ // TODO(jbramley): Other architectures use the internal memcpy to copy the // sequence. If this is a performance bottleneck, we should consider caching // the sequence and copying it in the same way. - InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); - ASSERT(jssp.Is(StackPointer())); + InstructionAccurateScope scope(this, + kNoCodeAgeSequenceLength / kInstructionSize); + DCHECK(jssp.Is(StackPointer())); EmitFrameSetupForCodeAgePatching(this); } void MacroAssembler::EmitCodeAgeSequence(Code* stub) { - InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); - ASSERT(jssp.Is(StackPointer())); + InstructionAccurateScope scope(this, + kNoCodeAgeSequenceLength / kInstructionSize); + DCHECK(jssp.Is(StackPointer())); EmitCodeAgeSequence(this, stub); } @@ -5012,7 +5274,7 @@ __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize)); __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); - __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); + __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength); } @@ -5027,7 +5289,7 @@ // // A branch (br) is used rather than a call (blr) because this code replaces // the frame setup code that would normally preserve lr. - __ LoadLiteral(ip0, kCodeAgeStubEntryOffset); + __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2); __ adr(x0, &start); __ br(ip0); // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up @@ -5035,53 +5297,24 @@ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset); if (stub) { __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start())); - __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); + __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength); } } -bool MacroAssembler::IsYoungSequence(byte* sequence) { - // Generate a young sequence to compare with. - const int length = kCodeAgeSequenceSize / kInstructionSize; - static bool initialized = false; - static byte young[kCodeAgeSequenceSize]; - if (!initialized) { - PatchingAssembler patcher(young, length); - // The young sequence is the frame setup code for FUNCTION code types. It is - // generated by FullCodeGenerator::Generate. - MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher); - initialized = true; - } - - bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0); - ASSERT(is_young || IsCodeAgeSequence(sequence)); +bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) { + bool is_young = isolate->code_aging_helper()->IsYoung(sequence); + DCHECK(is_young || + isolate->code_aging_helper()->IsOld(sequence)); return is_young; } -#ifdef DEBUG -bool MacroAssembler::IsCodeAgeSequence(byte* sequence) { - // The old sequence varies depending on the code age. However, the code up - // until kCodeAgeStubEntryOffset does not change, so we can check that part to - // get a reasonable level of verification. - const int length = kCodeAgeStubEntryOffset / kInstructionSize; - static bool initialized = false; - static byte old[kCodeAgeStubEntryOffset]; - if (!initialized) { - PatchingAssembler patcher(old, length); - MacroAssembler::EmitCodeAgeSequence(&patcher, NULL); - initialized = true; - } - return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0; -} -#endif - - void MacroAssembler::TruncatingDiv(Register result, Register dividend, int32_t divisor) { - ASSERT(!AreAliased(result, dividend)); - ASSERT(result.Is32Bits() && dividend.Is32Bits()); + DCHECK(!AreAliased(result, dividend)); + DCHECK(result.Is32Bits() && dividend.Is32Bits()); MultiplierAndShift ms(divisor); Mov(result, ms.multiplier()); Smull(result.X(), dividend, result); @@ -5118,14 +5351,14 @@ CPURegList* available) { CHECK(!available->IsEmpty()); CPURegister result = available->PopLowestIndex(); - ASSERT(!AreAliased(result, xzr, csp)); + DCHECK(!AreAliased(result, xzr, csp)); return result; } CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available, const CPURegister& reg) { - ASSERT(available->IncludesAliasOf(reg)); + DCHECK(available->IncludesAliasOf(reg)); available->Remove(reg); return reg; } @@ -5138,8 +5371,8 @@ const Label* smi_check) { Assembler::BlockPoolsScope scope(masm); if (reg.IsValid()) { - ASSERT(smi_check->is_bound()); - ASSERT(reg.Is64Bits()); + DCHECK(smi_check->is_bound()); + DCHECK(reg.Is64Bits()); // Encode the register (x0-x30) in the lowest 5 bits, then the offset to // 'check' in the other bits. The possible offset is limited in that we @@ -5148,7 +5381,7 @@ uint32_t delta = __ InstructionsGeneratedSince(smi_check); __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta)); } else { - ASSERT(!smi_check->is_bound()); + DCHECK(!smi_check->is_bound()); // An offset of 0 indicates that there is no patch site. __ InlineData(0); @@ -5159,17 +5392,17 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info) : reg_(NoReg), smi_check_(NULL) { InstructionSequence* inline_data = InstructionSequence::At(info); - ASSERT(inline_data->IsInlineData()); + DCHECK(inline_data->IsInlineData()); if (inline_data->IsInlineData()) { uint64_t payload = inline_data->InlineData(); // We use BitField to decode the payload, and BitField can only handle // 32-bit values. - ASSERT(is_uint32(payload)); + DCHECK(is_uint32(payload)); if (payload != 0) { int reg_code = RegisterBits::decode(payload); reg_ = Register::XRegFromCode(reg_code); uint64_t smi_check_delta = DeltaBits::decode(payload); - ASSERT(smi_check_delta != 0); + DCHECK(smi_check_delta != 0); smi_check_ = inline_data->preceding(smi_check_delta); } } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/macro-assembler-arm64.h nodejs-0.11.15/deps/v8/src/arm64/macro-assembler-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/macro-assembler-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/macro-assembler-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,33 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_ #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_ #include <vector> -#include "v8globals.h" -#include "globals.h" +#include "src/globals.h" + +#include "src/arm64/assembler-arm64-inl.h" + +// Simulator specific helpers. +#if USE_SIMULATOR + // TODO(all): If possible automatically prepend an indicator like + // UNIMPLEMENTED or LOCATION. + #define ASM_UNIMPLEMENTED(message) \ + __ Debug(message, __LINE__, NO_PARAM) + #define ASM_UNIMPLEMENTED_BREAK(message) \ + __ Debug(message, __LINE__, \ + FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK) + #define ASM_LOCATION(message) \ + __ Debug("LOCATION: " message, __LINE__, NO_PARAM) +#else + #define ASM_UNIMPLEMENTED(message) + #define ASM_UNIMPLEMENTED_BREAK(message) + #define ASM_LOCATION(message) +#endif -#include "arm64/assembler-arm64-inl.h" namespace v8 { namespace internal { @@ -49,6 +43,11 @@ V(Str, CPURegister&, rt, StoreOpFor(rt)) \ V(Ldrsw, Register&, rt, LDRSW_x) +#define LSPAIR_MACRO_LIST(V) \ + V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \ + V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \ + V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x) + // ---------------------------------------------------------------------------- // Static helper functions @@ -105,7 +104,7 @@ inline BranchType InvertBranchType(BranchType type) { if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { return static_cast<BranchType>( - InvertCondition(static_cast<Condition>(type))); + NegateCondition(static_cast<Condition>(type))); } else { return static_cast<BranchType>(type ^ 1); } @@ -113,6 +112,10 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum PointersToHereCheck { + kPointersToHereMaybeInteresting, + kPointersToHereAreAlwaysInteresting +}; enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; enum TargetAddressStorageMode { CAN_INLINE_TARGET_ADDRESS, @@ -222,6 +225,18 @@ static bool IsImmMovz(uint64_t imm, unsigned reg_size); static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); + // Try to move an immediate into the destination register in a single + // instruction. Returns true for success, and updates the contents of dst. + // Returns false, otherwise. + bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm); + + // Move an immediate into register dst, and return an Operand object for use + // with a subsequent instruction that accepts a shift. The value moved into + // dst is not necessarily equal to imm; it may have had a shifting operation + // applied to it that will be subsequently undone by the shift applied in the + // Operand. + Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm); + // Conditional macros. inline void Ccmp(const Register& rn, const Operand& operand, @@ -251,12 +266,28 @@ const MemOperand& addr, LoadStoreOp op); +#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ + inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr); + LSPAIR_MACRO_LIST(DECLARE_FUNCTION) +#undef DECLARE_FUNCTION + + void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& addr, LoadStorePairOp op); + // V8-specific load/store helpers. void Load(const Register& rt, const MemOperand& addr, Representation r); void Store(const Register& rt, const MemOperand& addr, Representation r); + enum AdrHint { + // The target must be within the immediate range of adr. + kAdrNear, + // The target may be outside of the immediate range of adr. Additional + // instructions may be emitted. + kAdrFar + }; + void Adr(const Register& rd, Label* label, AdrHint = kAdrNear); + // Remaining instructions are simple pass-through calls to the assembler. - inline void Adr(const Register& rd, Label* label); inline void Asr(const Register& rd, const Register& rn, unsigned shift); inline void Asr(const Register& rd, const Register& rn, const Register& rm); @@ -366,7 +397,7 @@ // Provide a template to allow other types to be converted automatically. template<typename T> void Fmov(FPRegister fd, T imm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); Fmov(fd, static_cast<double>(imm)); } inline void Fmov(Register rd, FPRegister fn); @@ -387,6 +418,7 @@ const FPRegister& fm, const FPRegister& fa); inline void Frinta(const FPRegister& fd, const FPRegister& fn); + inline void Frintm(const FPRegister& fd, const FPRegister& fn); inline void Frintn(const FPRegister& fd, const FPRegister& fn); inline void Frintz(const FPRegister& fd, const FPRegister& fn); inline void Fsqrt(const FPRegister& fd, const FPRegister& fn); @@ -399,19 +431,10 @@ inline void Ldnp(const CPURegister& rt, const CPURegister& rt2, const MemOperand& src); - inline void Ldp(const CPURegister& rt, - const CPURegister& rt2, - const MemOperand& src); - inline void Ldpsw(const Register& rt, - const Register& rt2, - const MemOperand& src); - // Provide both double and float interfaces for FP immediate loads, rather - // than relying on implicit C++ casts. This allows signalling NaNs to be - // preserved when the immediate matches the format of fd. Most systems convert - // signalling NaNs to quiet NaNs when converting between float and double. - inline void Ldr(const FPRegister& ft, double imm); - inline void Ldr(const FPRegister& ft, float imm); - inline void Ldr(const Register& rt, uint64_t imm); + // Load a literal from the inline constant pool. + inline void Ldr(const CPURegister& rt, const Immediate& imm); + // Helper function for double immediate. + inline void Ldr(const CPURegister& rt, double imm); inline void Lsl(const Register& rd, const Register& rn, unsigned shift); inline void Lsl(const Register& rd, const Register& rn, const Register& rm); inline void Lsr(const Register& rd, const Register& rn, unsigned shift); @@ -467,9 +490,6 @@ inline void Stnp(const CPURegister& rt, const CPURegister& rt2, const MemOperand& dst); - inline void Stp(const CPURegister& rt, - const CPURegister& rt2, - const MemOperand& dst); inline void Sxtb(const Register& rd, const Register& rn); inline void Sxth(const Register& rd, const Register& rn); inline void Sxtw(const Register& rd, const Register& rn); @@ -502,7 +522,8 @@ // Pseudo-instructions ------------------------------------------------------ // Compute rd = abs(rm). - // This function clobbers the condition flags. + // This function clobbers the condition flags. On output the overflow flag is + // set iff the negation overflowed. // // If rm is the minimum representable value, the result is not representable. // Handlers for each case can be specified using the relevant labels. @@ -544,6 +565,7 @@ const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg); void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg, const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg); + void Push(const Register& src0, const FPRegister& src1); // Alternative forms of Push and Pop, taking a RegList or CPURegList that // specifies the registers that are to be pushed or popped. Higher-numbered @@ -619,7 +641,7 @@ explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { } ~PushPopQueue() { - ASSERT(queued_.empty()); + DCHECK(queued_.empty()); } void Queue(const CPURegister& rt) { @@ -627,7 +649,11 @@ queued_.push_back(rt); } - void PushQueued(); + enum PreambleDirective { + WITH_PREAMBLE, + SKIP_PREAMBLE + }; + void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE); void PopQueued(); private: @@ -731,9 +757,11 @@ // it can be evidence of a potential bug because the ABI forbids accesses // below csp. // - // If emit_debug_code() is false, this emits no code. + // If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is + // enabled, then csp will be dereferenced to cause the processor + // (or simulator) to abort if it is not properly aligned. // - // If StackPointer() is the system stack pointer, this emits no code. + // If emit_debug_code() is false, this emits no code. void AssertStackConsistency(); // Preserve the callee-saved registers (as defined by AAPCS64). @@ -765,7 +793,7 @@ // Set the current stack pointer, but don't generate any code. inline void SetStackPointer(const Register& stack_pointer) { - ASSERT(!TmpList()->IncludesAliasOf(stack_pointer)); + DCHECK(!TmpList()->IncludesAliasOf(stack_pointer)); sp_ = stack_pointer; } @@ -779,8 +807,8 @@ inline void AlignAndSetCSPForFrame() { int sp_alignment = ActivationFrameAlignment(); // AAPCS64 mandates at least 16-byte alignment. - ASSERT(sp_alignment >= 16); - ASSERT(IsPowerOf2(sp_alignment)); + DCHECK(sp_alignment >= 16); + DCHECK(IsPowerOf2(sp_alignment)); Bic(csp, StackPointer(), sp_alignment - 1); SetStackPointer(csp); } @@ -791,18 +819,35 @@ // // This is necessary when pushing or otherwise adding things to the stack, to // satisfy the AAPCS64 constraint that the memory below the system stack - // pointer is not accessed. + // pointer is not accessed. The amount pushed will be increased as necessary + // to ensure csp remains aligned to 16 bytes. // // This method asserts that StackPointer() is not csp, since the call does // not make sense in that context. inline void BumpSystemStackPointer(const Operand& space); + // Re-synchronizes the system stack pointer (csp) with the current stack + // pointer (according to StackPointer()). This function will ensure the + // new value of the system stack pointer is remains aligned to 16 bytes, and + // is lower than or equal to the value of the current stack pointer. + // + // This method asserts that StackPointer() is not csp, since the call does + // not make sense in that context. + inline void SyncSystemStackPointer(); + // Helpers ------------------------------------------------------------------ // Root register. inline void InitializeRootRegister(); + void AssertFPCRState(Register fpcr = NoReg); + void ConfigureFPCR(); + void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src); + void CanonicalizeNaN(const FPRegister& reg) { + CanonicalizeNaN(reg, reg); + } + // Load an object from the root table. - void LoadRoot(Register destination, + void LoadRoot(CPURegister destination, Heap::RootListIndex index); // Store an object to the root table. void StoreRoot(Register source, @@ -818,7 +863,7 @@ if (object->IsHeapObject()) { LoadHeapObject(result, Handle<HeapObject>::cast(object)); } else { - ASSERT(object->IsSmi()); + DCHECK(object->IsSmi()); Mov(result, Operand(object)); } } @@ -836,10 +881,15 @@ void NumberOfOwnDescriptors(Register dst, Register map); template<typename Field> - void DecodeField(Register reg) { - static const uint64_t shift = Field::kShift + kSmiShift; + void DecodeField(Register dst, Register src) { + static const uint64_t shift = Field::kShift; static const uint64_t setbits = CountSetBits(Field::kMask, 32); - Ubfx(reg, reg, shift, setbits); + Ubfx(dst, src, shift, setbits); + } + + template<typename Field> + void DecodeField(Register reg) { + DecodeField<Field>(reg, reg); } // ---- SMI and Number Utilities ---- @@ -855,6 +905,10 @@ Register src, UntagMode mode = kNotSpeculativeUntag); + // Tag and push in one step. + inline void SmiTagAndPush(Register src); + inline void SmiTagAndPush(Register src1, Register src2); + // Compute the absolute value of 'smi' and leave the result in 'smi' // register. If 'smi' is the most negative SMI, the absolute value cannot // be represented as a SMI and a jump to 'slow' is done. @@ -883,6 +937,9 @@ void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi); void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi); + inline void ObjectTag(Register tagged_obj, Register obj); + inline void ObjectUntag(Register untagged_obj, Register obj); + // Abort execution if argument is not a name, enabled via --debug-code. void AssertName(Register object); @@ -910,6 +967,10 @@ // Jump to label if the input double register contains -0.0. void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero); + // Jump to label if the input integer register contains the double precision + // floating point representation of -0.0. + void JumpIfMinusZero(Register input, Label* on_negative_zero); + // Generate code to do a lookup in the number string cache. If the number in // the register object is found in the cache the generated code falls through // with the result in the result register. The object and the result register @@ -932,34 +993,34 @@ DoubleRegister input, DoubleRegister dbl_scratch); - // Try to convert a double to a signed 32-bit int. + // Try to represent a double as a signed 32-bit int. // This succeeds if the result compares equal to the input, so inputs of -0.0 - // are converted to 0 and handled as a success. + // are represented as 0 and handled as a success. // - // On output the Z flag is set if the conversion was successful. - void TryConvertDoubleToInt32(Register as_int, - FPRegister value, - FPRegister scratch_d, - Label* on_successful_conversion = NULL, - Label* on_failed_conversion = NULL) { - ASSERT(as_int.Is32Bits()); - TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion, - on_failed_conversion); + // On output the Z flag is set if the operation was successful. + void TryRepresentDoubleAsInt32(Register as_int, + FPRegister value, + FPRegister scratch_d, + Label* on_successful_conversion = NULL, + Label* on_failed_conversion = NULL) { + DCHECK(as_int.Is32Bits()); + TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion, + on_failed_conversion); } - // Try to convert a double to a signed 64-bit int. + // Try to represent a double as a signed 64-bit int. // This succeeds if the result compares equal to the input, so inputs of -0.0 - // are converted to 0 and handled as a success. + // are represented as 0 and handled as a success. // - // On output the Z flag is set if the conversion was successful. - void TryConvertDoubleToInt64(Register as_int, - FPRegister value, - FPRegister scratch_d, - Label* on_successful_conversion = NULL, - Label* on_failed_conversion = NULL) { - ASSERT(as_int.Is64Bits()); - TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion, - on_failed_conversion); + // On output the Z flag is set if the operation was successful. + void TryRepresentDoubleAsInt64(Register as_int, + FPRegister value, + FPRegister scratch_d, + Label* on_successful_conversion = NULL, + Label* on_failed_conversion = NULL) { + DCHECK(as_int.Is64Bits()); + TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion, + on_failed_conversion); } // ---- Object Utilities ---- @@ -1051,15 +1112,6 @@ Register scratch3, Register scratch4); - // Throw a message string as an exception. - void Throw(BailoutReason reason); - - // Throw a message string as an exception if a condition is not true. - void ThrowIf(Condition cc, BailoutReason reason); - - // Throw a message string as an exception if the value is a smi. - void ThrowIfSmi(const Register& value, BailoutReason reason); - void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None()); void TailCallStub(CodeStub* stub); @@ -1265,12 +1317,11 @@ MacroAssembler* masm_; }; -#ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- // Debugger Support void DebugBreak(); -#endif + // --------------------------------------------------------------------------- // Exception handling @@ -1354,13 +1405,9 @@ Label* gc_required, Register scratch1, Register scratch2, - Register heap_number_map = NoReg); - void AllocateHeapNumberWithValue(Register result, - DoubleRegister value, - Label* gc_required, - Register scratch1, - Register scratch2, - Register heap_number_map = NoReg); + CPURegister value = NoFPReg, + CPURegister heap_number_map = NoReg, + MutableMode mode = IMMUTABLE); // --------------------------------------------------------------------------- // Support functions. @@ -1549,7 +1596,6 @@ Register elements_reg, Register scratch1, FPRegister fpscratch1, - FPRegister fpscratch2, Label* fail, int elements_offset = 0); @@ -1646,7 +1692,8 @@ void ExitFrameRestoreFPRegs(); // Generates function and stub prologue code. - void Prologue(PrologueFrameMode frame_mode); + void StubPrologue(); + void Prologue(bool code_pre_aging); // Enter exit frame. Exit frames are used when calling C code from generated // (JavaScript) code. @@ -1781,7 +1828,9 @@ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); // As above, but the offset has the tag presubtracted. For use with // MemOperand(reg, off). @@ -1793,7 +1842,9 @@ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK) { + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting) { RecordWriteField(context, offset + kHeapObjectTag, value, @@ -1801,9 +1852,17 @@ lr_status, save_fp, remembered_set_action, - smi_check); + smi_check, + pointers_to_here_check_for_value); } + void RecordWriteForMap( + Register object, + Register map, + Register dst, + LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp); + // For a given |object| notify the garbage collector that the slot |address| // has been written. |value| is the object being stored. The value and // address registers are clobbered by the operation. @@ -1814,7 +1873,9 @@ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); // Checks the color of an object. If the object is already grey or black // then we just fall through, since it is already live. If it is white and @@ -1919,18 +1980,22 @@ CPURegList* TmpList() { return &tmp_list_; } CPURegList* FPTmpList() { return &fptmp_list_; } + static CPURegList DefaultTmpList(); + static CPURegList DefaultFPTmpList(); + // Like printf, but print at run-time from generated code. // // The caller must ensure that arguments for floating-point placeholders // (such as %e, %f or %g) are FPRegisters, and that arguments for integer // placeholders are Registers. // - // A maximum of four arguments may be given to any single Printf call. The - // arguments must be of the same type, but they do not need to have the same - // size. + // At the moment it is only possible to print the value of csp if it is the + // current stack pointer. Otherwise, the MacroAssembler will automatically + // update csp on every push (using BumpSystemStackPointer), so determining its + // value is difficult. // - // The following registers cannot be printed: - // StackPointer(), csp. + // Format placeholders that refer to more than one argument, or to a specific + // argument, are not supported. This includes formats like "%1$d" or "%.*d". // // This function automatically preserves caller-saved registers so that // calling code can use Printf at any point without having to worry about @@ -1938,19 +2003,11 @@ // a problem, preserve the important registers manually and then call // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are // implicitly preserved. - // - // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be - // preserved, and can be printed. This allows Printf to be used during debug - // code. - // - // This function assumes (and asserts) that the current stack pointer is - // callee-saved, not caller-saved. This is most likely the case anyway, as a - // caller-saved stack pointer doesn't make a lot of sense. void Printf(const char * format, - const CPURegister& arg0 = NoCPUReg, - const CPURegister& arg1 = NoCPUReg, - const CPURegister& arg2 = NoCPUReg, - const CPURegister& arg3 = NoCPUReg); + CPURegister arg0 = NoCPUReg, + CPURegister arg1 = NoCPUReg, + CPURegister arg2 = NoCPUReg, + CPURegister arg3 = NoCPUReg); // Like Printf, but don't preserve any caller-saved registers, not even 'lr'. // @@ -1998,18 +2055,21 @@ // Return true if the sequence is a young sequence geneated by // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the // sequence is a code age sequence (emitted by EmitCodeAgeSequence). - static bool IsYoungSequence(byte* sequence); - -#ifdef DEBUG - // Return true if the sequence is a code age sequence generated by - // EmitCodeAgeSequence. - static bool IsCodeAgeSequence(byte* sequence); -#endif + static bool IsYoungSequence(Isolate* isolate, byte* sequence); // Jumps to found label if a prototype map has dictionary elements. void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, Register scratch1, Label* found); + // Perform necessary maintenance operations before a push or after a pop. + // + // Note that size is specified in bytes. + void PushPreamble(Operand total_size); + void PopPostamble(Operand total_size); + + void PushPreamble(int count, int size) { PushPreamble(count * size); } + void PopPostamble(int count, int size) { PopPostamble(count * size); } + private: // Helpers for CopyFields. // These each implement CopyFields in a different way. @@ -2037,22 +2097,15 @@ const CPURegister& dst0, const CPURegister& dst1, const CPURegister& dst2, const CPURegister& dst3); - // Perform necessary maintenance operations before a push or pop. - // - // Note that size is specified in bytes. - void PrepareForPush(Operand total_size); - void PrepareForPop(Operand total_size); - - void PrepareForPush(int count, int size) { PrepareForPush(count * size); } - void PrepareForPop(int count, int size) { PrepareForPop(count * size); } - // Call Printf. On a native build, a simple call will be generated, but if the // simulator is being used then a suitable pseudo-instruction is used. The // arguments and stack (csp) must be prepared by the caller as for a normal // AAPCS64 call to 'printf'. // - // The 'type' argument specifies the type of the optional arguments. - void CallPrintf(CPURegister::RegisterType type = CPURegister::kNoRegister); + // The 'args' argument should point to an array of variable arguments in their + // proper PCS registers (and in calling order). The argument registers can + // have mixed types. The format string (x0) should not be included. + void CallPrintf(int arg_count = 0, const CPURegister * args = NULL); // Helper for throwing exceptions. Compute a handler address and jump to // it. See the implementation for register usage. @@ -2067,7 +2120,7 @@ Condition cond, // eq for new space, ne otherwise. Label* branch); - // Try to convert a double to an int so that integer fast-paths may be + // Try to represent a double as an int so that integer fast-paths may be // used. Not every valid integer value is guaranteed to be caught. // It supports both 32-bit and 64-bit integers depending whether 'as_int' // is a W or X register. @@ -2075,12 +2128,12 @@ // This does not distinguish between +0 and -0, so if this distinction is // important it must be checked separately. // - // On output the Z flag is set if the conversion was successful. - void TryConvertDoubleToInt(Register as_int, - FPRegister value, - FPRegister scratch_d, - Label* on_successful_conversion = NULL, - Label* on_failed_conversion = NULL); + // On output the Z flag is set if the operation was successful. + void TryRepresentDoubleAsInt(Register as_int, + FPRegister value, + FPRegister scratch_d, + Label* on_successful_conversion = NULL, + Label* on_failed_conversion = NULL); bool generating_stub_; #if DEBUG @@ -2148,7 +2201,7 @@ // emitted is what you specified when creating the scope. class InstructionAccurateScope BASE_EMBEDDED { public: - InstructionAccurateScope(MacroAssembler* masm, size_t count = 0) + explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0) : masm_(masm) #ifdef DEBUG , @@ -2173,7 +2226,7 @@ masm_->EndBlockPools(); #ifdef DEBUG if (start_.is_bound()) { - ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_); + DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_); } masm_->set_allow_macro_instructions(previous_allow_macro_instructions_); #endif @@ -2203,8 +2256,8 @@ availablefp_(masm->FPTmpList()), old_available_(available_->list()), old_availablefp_(availablefp_->list()) { - ASSERT(available_->type() == CPURegister::kRegister); - ASSERT(availablefp_->type() == CPURegister::kFPRegister); + DCHECK(available_->type() == CPURegister::kRegister); + DCHECK(availablefp_->type() == CPURegister::kFPRegister); } ~UseScratchRegisterScope(); diff -Nru nodejs-0.11.13/deps/v8/src/arm64/macro-assembler-arm64-inl.h nodejs-0.11.15/deps/v8/src/arm64/macro-assembler-arm64-inl.h --- nodejs-0.11.13/deps/v8/src/arm64/macro-assembler-arm64-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/macro-assembler-arm64-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,18 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ #define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ #include <ctype.h> -#include "v8globals.h" -#include "globals.h" +#include "src/globals.h" -#include "arm64/assembler-arm64.h" -#include "arm64/assembler-arm64-inl.h" -#include "arm64/macro-assembler-arm64.h" -#include "arm64/instrument-arm64.h" +#include "src/arm64/assembler-arm64-inl.h" +#include "src/arm64/assembler-arm64.h" +#include "src/arm64/instrument-arm64.h" +#include "src/arm64/macro-assembler-arm64.h" namespace v8 { @@ -61,7 +37,7 @@ Handle<Object> MacroAssembler::CodeObject() { - ASSERT(!code_object_.is_null()); + DCHECK(!code_object_.is_null()); return code_object_; } @@ -69,8 +45,8 @@ void MacroAssembler::And(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, AND); } @@ -78,15 +54,15 @@ void MacroAssembler::Ands(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ANDS); } void MacroAssembler::Tst(const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS); } @@ -94,8 +70,8 @@ void MacroAssembler::Bic(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, BIC); } @@ -103,8 +79,8 @@ void MacroAssembler::Bics(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, BICS); } @@ -112,8 +88,8 @@ void MacroAssembler::Orr(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ORR); } @@ -121,8 +97,8 @@ void MacroAssembler::Orn(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ORN); } @@ -130,8 +106,8 @@ void MacroAssembler::Eor(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, EOR); } @@ -139,8 +115,8 @@ void MacroAssembler::Eon(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, EON); } @@ -149,9 +125,9 @@ const Operand& operand, StatusFlags nzcv, Condition cond) { - ASSERT(allow_macro_instructions_); - if (operand.IsImmediate() && (operand.immediate() < 0)) { - ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN); + DCHECK(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) { + ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMN); } else { ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP); } @@ -162,9 +138,9 @@ const Operand& operand, StatusFlags nzcv, Condition cond) { - ASSERT(allow_macro_instructions_); - if (operand.IsImmediate() && (operand.immediate() < 0)) { - ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP); + DCHECK(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) { + ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP); } else { ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN); } @@ -174,9 +150,10 @@ void MacroAssembler::Add(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - if (operand.IsImmediate() && (operand.immediate() < 0)) { - AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB); + DCHECK(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && + IsImmAddSub(-operand.ImmediateValue())) { + AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, SUB); } else { AddSubMacro(rd, rn, operand, LeaveFlags, ADD); } @@ -185,9 +162,10 @@ void MacroAssembler::Adds(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - if (operand.IsImmediate() && (operand.immediate() < 0)) { - AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB); + DCHECK(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && + IsImmAddSub(-operand.ImmediateValue())) { + AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, SUB); } else { AddSubMacro(rd, rn, operand, SetFlags, ADD); } @@ -197,9 +175,10 @@ void MacroAssembler::Sub(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - if (operand.IsImmediate() && (operand.immediate() < 0)) { - AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD); + DCHECK(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && + IsImmAddSub(-operand.ImmediateValue())) { + AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, ADD); } else { AddSubMacro(rd, rn, operand, LeaveFlags, SUB); } @@ -209,9 +188,10 @@ void MacroAssembler::Subs(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - if (operand.IsImmediate() && (operand.immediate() < 0)) { - AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD); + DCHECK(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && + IsImmAddSub(-operand.ImmediateValue())) { + AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, ADD); } else { AddSubMacro(rd, rn, operand, SetFlags, SUB); } @@ -219,23 +199,23 @@ void MacroAssembler::Cmn(const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); Adds(AppropriateZeroRegFor(rn), rn, operand); } void MacroAssembler::Cmp(const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); Subs(AppropriateZeroRegFor(rn), rn, operand); } void MacroAssembler::Neg(const Register& rd, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); if (operand.IsImmediate()) { - Mov(rd, -operand.immediate()); + Mov(rd, -operand.ImmediateValue()); } else { Sub(rd, AppropriateZeroRegFor(rd), operand); } @@ -244,7 +224,7 @@ void MacroAssembler::Negs(const Register& rd, const Operand& operand) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); Subs(rd, AppropriateZeroRegFor(rd), operand); } @@ -252,8 +232,8 @@ void MacroAssembler::Adc(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC); } @@ -261,8 +241,8 @@ void MacroAssembler::Adcs(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC); } @@ -270,8 +250,8 @@ void MacroAssembler::Sbc(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC); } @@ -279,16 +259,16 @@ void MacroAssembler::Sbcs(const Register& rd, const Register& rn, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC); } void MacroAssembler::Ngc(const Register& rd, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); Register zr = AppropriateZeroRegFor(rd); Sbc(rd, zr, operand); } @@ -296,41 +276,44 @@ void MacroAssembler::Ngcs(const Register& rd, const Operand& operand) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); Register zr = AppropriateZeroRegFor(rd); Sbcs(rd, zr, operand); } void MacroAssembler::Mvn(const Register& rd, uint64_t imm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); Mov(rd, ~imm); } #define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \ void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ - ASSERT(allow_macro_instructions_); \ + DCHECK(allow_macro_instructions_); \ LoadStoreMacro(REG, addr, OP); \ } LS_MACRO_LIST(DEFINE_FUNCTION) #undef DEFINE_FUNCTION -void MacroAssembler::Adr(const Register& rd, Label* label) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); - adr(rd, label); -} +#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ + void MacroAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \ + const MemOperand& addr) { \ + DCHECK(allow_macro_instructions_); \ + LoadStorePairMacro(REG, REG2, addr, OP); \ + } +LSPAIR_MACRO_LIST(DEFINE_FUNCTION) +#undef DEFINE_FUNCTION void MacroAssembler::Asr(const Register& rd, const Register& rn, unsigned shift) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); asr(rd, rn, shift); } @@ -338,8 +321,8 @@ void MacroAssembler::Asr(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); asrv(rd, rn, rm); } @@ -351,7 +334,7 @@ void MacroAssembler::B(Condition cond, Label* label) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); B(label, cond); } @@ -360,8 +343,8 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); bfi(rd, rn, lsb, width); } @@ -370,40 +353,40 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); bfxil(rd, rn, lsb, width); } void MacroAssembler::Bind(Label* label) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); bind(label); } void MacroAssembler::Bl(Label* label) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); bl(label); } void MacroAssembler::Blr(const Register& xn) { - ASSERT(allow_macro_instructions_); - ASSERT(!xn.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!xn.IsZero()); blr(xn); } void MacroAssembler::Br(const Register& xn) { - ASSERT(allow_macro_instructions_); - ASSERT(!xn.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!xn.IsZero()); br(xn); } void MacroAssembler::Brk(int code) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); brk(code); } @@ -411,9 +394,9 @@ void MacroAssembler::Cinc(const Register& rd, const Register& rn, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); + DCHECK((cond != al) && (cond != nv)); cinc(rd, rn, cond); } @@ -421,23 +404,23 @@ void MacroAssembler::Cinv(const Register& rd, const Register& rn, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); + DCHECK((cond != al) && (cond != nv)); cinv(rd, rn, cond); } void MacroAssembler::Cls(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); cls(rd, rn); } void MacroAssembler::Clz(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); clz(rd, rn); } @@ -445,9 +428,9 @@ void MacroAssembler::Cneg(const Register& rd, const Register& rn, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); + DCHECK((cond != al) && (cond != nv)); cneg(rd, rn, cond); } @@ -456,9 +439,9 @@ // due to the truncation side-effect when used on W registers. void MacroAssembler::CzeroX(const Register& rd, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsSP() && rd.Is64Bits()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsSP() && rd.Is64Bits()); + DCHECK((cond != al) && (cond != nv)); csel(rd, xzr, rd, cond); } @@ -468,10 +451,10 @@ void MacroAssembler::CmovX(const Register& rd, const Register& rn, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsSP()); - ASSERT(rd.Is64Bits() && rn.Is64Bits()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsSP()); + DCHECK(rd.Is64Bits() && rn.Is64Bits()); + DCHECK((cond != al) && (cond != nv)); if (!rd.is(rn)) { csel(rd, rn, rd, cond); } @@ -479,17 +462,17 @@ void MacroAssembler::Cset(const Register& rd, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); + DCHECK((cond != al) && (cond != nv)); cset(rd, cond); } void MacroAssembler::Csetm(const Register& rd, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); + DCHECK((cond != al) && (cond != nv)); csetm(rd, cond); } @@ -498,9 +481,9 @@ const Register& rn, const Register& rm, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); + DCHECK((cond != al) && (cond != nv)); csinc(rd, rn, rm, cond); } @@ -509,9 +492,9 @@ const Register& rn, const Register& rm, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); + DCHECK((cond != al) && (cond != nv)); csinv(rd, rn, rm, cond); } @@ -520,27 +503,27 @@ const Register& rn, const Register& rm, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); + DCHECK((cond != al) && (cond != nv)); csneg(rd, rn, rm, cond); } void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); dmb(domain, type); } void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); dsb(domain, type); } void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); debug(message, code, params); } @@ -549,14 +532,14 @@ const Register& rn, const Register& rm, unsigned lsb) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); extr(rd, rn, rm, lsb); } void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fabs(fd, fn); } @@ -564,7 +547,7 @@ void MacroAssembler::Fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fadd(fd, fn, fm); } @@ -573,20 +556,20 @@ const FPRegister& fm, StatusFlags nzcv, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK((cond != al) && (cond != nv)); fccmp(fn, fm, nzcv, cond); } void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fcmp(fn, fm); } void MacroAssembler::Fcmp(const FPRegister& fn, double value) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); if (value != 0.0) { UseScratchRegisterScope temps(this); FPRegister tmp = temps.AcquireSameSizeAs(fn); @@ -602,68 +585,68 @@ const FPRegister& fn, const FPRegister& fm, Condition cond) { - ASSERT(allow_macro_instructions_); - ASSERT((cond != al) && (cond != nv)); + DCHECK(allow_macro_instructions_); + DCHECK((cond != al) && (cond != nv)); fcsel(fd, fn, fm, cond); } void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fcvt(fd, fn); } void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); fcvtas(rd, fn); } void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); fcvtau(rd, fn); } void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); fcvtms(rd, fn); } void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); fcvtmu(rd, fn); } void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); fcvtns(rd, fn); } void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); fcvtnu(rd, fn); } void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); fcvtzs(rd, fn); } void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); fcvtzu(rd, fn); } @@ -671,7 +654,7 @@ void MacroAssembler::Fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fdiv(fd, fn, fm); } @@ -680,7 +663,7 @@ const FPRegister& fn, const FPRegister& fm, const FPRegister& fa) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fmadd(fd, fn, fm, fa); } @@ -688,7 +671,7 @@ void MacroAssembler::Fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fmax(fd, fn, fm); } @@ -696,7 +679,7 @@ void MacroAssembler::Fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fmaxnm(fd, fn, fm); } @@ -704,7 +687,7 @@ void MacroAssembler::Fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fmin(fd, fn, fm); } @@ -712,13 +695,13 @@ void MacroAssembler::Fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fminnm(fd, fn, fm); } void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); // Only emit an instruction if fd and fn are different, and they are both D // registers. fmov(s0, s0) is not a no-op because it clears the top word of // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the @@ -730,41 +713,37 @@ void MacroAssembler::Fmov(FPRegister fd, Register rn) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fmov(fd, rn); } void MacroAssembler::Fmov(FPRegister fd, double imm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); if (fd.Is32Bits()) { Fmov(fd, static_cast<float>(imm)); return; } - ASSERT(fd.Is64Bits()); + DCHECK(fd.Is64Bits()); if (IsImmFP64(imm)) { fmov(fd, imm); } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) { fmov(fd, xzr); } else { - UseScratchRegisterScope temps(this); - Register tmp = temps.AcquireX(); - // TODO(all): Use Assembler::ldr(const FPRegister& ft, double imm). - Mov(tmp, double_to_rawbits(imm)); - Fmov(fd, tmp); + Ldr(fd, imm); } } void MacroAssembler::Fmov(FPRegister fd, float imm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); if (fd.Is64Bits()) { Fmov(fd, static_cast<double>(imm)); return; } - ASSERT(fd.Is32Bits()); + DCHECK(fd.Is32Bits()); if (IsImmFP32(imm)) { fmov(fd, imm); } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) { @@ -780,8 +759,8 @@ void MacroAssembler::Fmov(Register rd, FPRegister fn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); fmov(rd, fn); } @@ -790,7 +769,7 @@ const FPRegister& fn, const FPRegister& fm, const FPRegister& fa) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fmsub(fd, fn, fm, fa); } @@ -798,13 +777,13 @@ void MacroAssembler::Fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fmul(fd, fn, fm); } void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fneg(fd, fn); } @@ -813,7 +792,7 @@ const FPRegister& fn, const FPRegister& fm, const FPRegister& fa) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fnmadd(fd, fn, fm, fa); } @@ -822,31 +801,37 @@ const FPRegister& fn, const FPRegister& fm, const FPRegister& fa) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fnmsub(fd, fn, fm, fa); } void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); frinta(fd, fn); } +void MacroAssembler::Frintm(const FPRegister& fd, const FPRegister& fn) { + DCHECK(allow_macro_instructions_); + frintm(fd, fn); +} + + void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); frintn(fd, fn); } void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); frintz(fd, fn); } void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fsqrt(fd, fn); } @@ -854,25 +839,25 @@ void MacroAssembler::Fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); fsub(fd, fn, fm); } void MacroAssembler::Hint(SystemHint code) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); hint(code); } void MacroAssembler::Hlt(int code) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); hlt(code); } void MacroAssembler::Isb() { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); isb(); } @@ -880,49 +865,30 @@ void MacroAssembler::Ldnp(const CPURegister& rt, const CPURegister& rt2, const MemOperand& src) { - ASSERT(allow_macro_instructions_); - ASSERT(!AreAliased(rt, rt2)); + DCHECK(allow_macro_instructions_); + DCHECK(!AreAliased(rt, rt2)); ldnp(rt, rt2, src); } -void MacroAssembler::Ldp(const CPURegister& rt, - const CPURegister& rt2, - const MemOperand& src) { - ASSERT(allow_macro_instructions_); - ASSERT(!AreAliased(rt, rt2)); - ldp(rt, rt2, src); -} - - -void MacroAssembler::Ldpsw(const Register& rt, - const Register& rt2, - const MemOperand& src) { - ASSERT(allow_macro_instructions_); - ASSERT(!rt.IsZero()); - ASSERT(!rt2.IsZero()); - ldpsw(rt, rt2, src); -} - - -void MacroAssembler::Ldr(const FPRegister& ft, double imm) { - ASSERT(allow_macro_instructions_); - ldr(ft, imm); +void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) { + DCHECK(allow_macro_instructions_); + ldr(rt, imm); } -void MacroAssembler::Ldr(const Register& rt, uint64_t imm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rt.IsZero()); - ldr(rt, imm); +void MacroAssembler::Ldr(const CPURegister& rt, double imm) { + DCHECK(allow_macro_instructions_); + DCHECK(rt.Is64Bits()); + ldr(rt, Immediate(double_to_rawbits(imm))); } void MacroAssembler::Lsl(const Register& rd, const Register& rn, unsigned shift) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); lsl(rd, rn, shift); } @@ -930,8 +896,8 @@ void MacroAssembler::Lsl(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); lslv(rd, rn, rm); } @@ -939,8 +905,8 @@ void MacroAssembler::Lsr(const Register& rd, const Register& rn, unsigned shift) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); lsr(rd, rn, shift); } @@ -948,8 +914,8 @@ void MacroAssembler::Lsr(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); lsrv(rd, rn, rm); } @@ -958,8 +924,8 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); madd(rd, rn, rm, ra); } @@ -967,15 +933,15 @@ void MacroAssembler::Mneg(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); mneg(rd, rn, rm); } void MacroAssembler::Mov(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); // Emit a register move only if the registers are distinct, or if they are // not X registers. Note that mov(w0, w0) is not a no-op because it clears // the top word of x0. @@ -986,22 +952,21 @@ void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); movk(rd, imm, shift); } void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) { - ASSERT(allow_macro_instructions_); - ASSERT(!rt.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rt.IsZero()); mrs(rt, sysreg); } void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) { - ASSERT(allow_macro_instructions_); - ASSERT(!rt.IsZero()); + DCHECK(allow_macro_instructions_); msr(sysreg, rt); } @@ -1010,8 +975,8 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); msub(rd, rn, rm, ra); } @@ -1019,44 +984,44 @@ void MacroAssembler::Mul(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); mul(rd, rn, rm); } void MacroAssembler::Rbit(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); rbit(rd, rn); } void MacroAssembler::Ret(const Register& xn) { - ASSERT(allow_macro_instructions_); - ASSERT(!xn.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!xn.IsZero()); ret(xn); CheckVeneerPool(false, false); } void MacroAssembler::Rev(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); rev(rd, rn); } void MacroAssembler::Rev16(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); rev16(rd, rn); } void MacroAssembler::Rev32(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); rev32(rd, rn); } @@ -1064,8 +1029,8 @@ void MacroAssembler::Ror(const Register& rd, const Register& rs, unsigned shift) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); ror(rd, rs, shift); } @@ -1073,8 +1038,8 @@ void MacroAssembler::Ror(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); rorv(rd, rn, rm); } @@ -1083,8 +1048,8 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); sbfiz(rd, rn, lsb, width); } @@ -1093,8 +1058,8 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); sbfx(rd, rn, lsb, width); } @@ -1102,7 +1067,7 @@ void MacroAssembler::Scvtf(const FPRegister& fd, const Register& rn, unsigned fbits) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); scvtf(fd, rn, fbits); } @@ -1110,8 +1075,8 @@ void MacroAssembler::Sdiv(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); sdiv(rd, rn, rm); } @@ -1120,8 +1085,8 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); smaddl(rd, rn, rm, ra); } @@ -1130,8 +1095,8 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); smsubl(rd, rn, rm, ra); } @@ -1139,8 +1104,8 @@ void MacroAssembler::Smull(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); smull(rd, rn, rm); } @@ -1148,8 +1113,8 @@ void MacroAssembler::Smulh(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); smulh(rd, rn, rm); } @@ -1157,36 +1122,28 @@ void MacroAssembler::Stnp(const CPURegister& rt, const CPURegister& rt2, const MemOperand& dst) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); stnp(rt, rt2, dst); } -void MacroAssembler::Stp(const CPURegister& rt, - const CPURegister& rt2, - const MemOperand& dst) { - ASSERT(allow_macro_instructions_); - stp(rt, rt2, dst); -} - - void MacroAssembler::Sxtb(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); sxtb(rd, rn); } void MacroAssembler::Sxth(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); sxth(rd, rn); } void MacroAssembler::Sxtw(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); sxtw(rd, rn); } @@ -1195,8 +1152,8 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); ubfiz(rd, rn, lsb, width); } @@ -1205,8 +1162,8 @@ const Register& rn, unsigned lsb, unsigned width) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); ubfx(rd, rn, lsb, width); } @@ -1214,7 +1171,7 @@ void MacroAssembler::Ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits) { - ASSERT(allow_macro_instructions_); + DCHECK(allow_macro_instructions_); ucvtf(fd, rn, fbits); } @@ -1222,8 +1179,8 @@ void MacroAssembler::Udiv(const Register& rd, const Register& rn, const Register& rm) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); udiv(rd, rn, rm); } @@ -1232,8 +1189,8 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); umaddl(rd, rn, rm, ra); } @@ -1242,58 +1199,87 @@ const Register& rn, const Register& rm, const Register& ra) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); umsubl(rd, rn, rm, ra); } void MacroAssembler::Uxtb(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); uxtb(rd, rn); } void MacroAssembler::Uxth(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); uxth(rd, rn); } void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { - ASSERT(allow_macro_instructions_); - ASSERT(!rd.IsZero()); + DCHECK(allow_macro_instructions_); + DCHECK(!rd.IsZero()); uxtw(rd, rn); } void MacroAssembler::BumpSystemStackPointer(const Operand& space) { - ASSERT(!csp.Is(sp_)); - // TODO(jbramley): Several callers rely on this not using scratch registers, - // so we use the assembler directly here. However, this means that large - // immediate values of 'space' cannot be handled cleanly. (Only 24-bits - // immediates or values of 'space' that can be encoded in one instruction are - // accepted.) Once we implement our flexible scratch register idea, we could - // greatly simplify this function. - InstructionAccurateScope scope(this); - if ((space.IsImmediate()) && !is_uint12(space.immediate())) { - // The subtract instruction supports a 12-bit immediate, shifted left by - // zero or 12 bits. So, in two instructions, we can subtract any immediate - // between zero and (1 << 24) - 1. - int64_t imm = space.immediate(); - ASSERT(is_uint24(imm)); - - int64_t imm_top_12_bits = imm >> 12; - sub(csp, StackPointer(), imm_top_12_bits << 12); - imm -= imm_top_12_bits << 12; - if (imm > 0) { - sub(csp, csp, imm); + DCHECK(!csp.Is(sp_)); + if (!TmpList()->IsEmpty()) { + if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + Sub(temp, StackPointer(), space); + Bic(csp, temp, 0xf); + } else { + Sub(csp, StackPointer(), space); } } else { - sub(csp, StackPointer(), space); + // TODO(jbramley): Several callers rely on this not using scratch + // registers, so we use the assembler directly here. However, this means + // that large immediate values of 'space' cannot be handled cleanly. (Only + // 24-bits immediates or values of 'space' that can be encoded in one + // instruction are accepted.) Once we implement our flexible scratch + // register idea, we could greatly simplify this function. + InstructionAccurateScope scope(this); + DCHECK(space.IsImmediate()); + // Align to 16 bytes. + uint64_t imm = RoundUp(space.ImmediateValue(), 0x10); + DCHECK(is_uint24(imm)); + + Register source = StackPointer(); + if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) { + bic(csp, source, 0xf); + source = csp; + } + if (!is_uint12(imm)) { + int64_t imm_top_12_bits = imm >> 12; + sub(csp, source, imm_top_12_bits << 12); + source = csp; + imm -= imm_top_12_bits << 12; + } + if (imm > 0) { + sub(csp, source, imm); + } } + AssertStackConsistency(); +} + + +void MacroAssembler::SyncSystemStackPointer() { + DCHECK(emit_debug_code()); + DCHECK(!csp.Is(sp_)); + { InstructionAccurateScope scope(this); + if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) { + bic(csp, StackPointer(), 0xf); + } else { + mov(csp, StackPointer()); + } + } + AssertStackConsistency(); } @@ -1305,7 +1291,9 @@ void MacroAssembler::SmiTag(Register dst, Register src) { - ASSERT(dst.Is64Bits() && src.Is64Bits()); + STATIC_ASSERT(kXRegSizeInBits == + static_cast<unsigned>(kSmiShift + kSmiValueSize)); + DCHECK(dst.Is64Bits() && src.Is64Bits()); Lsl(dst, src, kSmiShift); } @@ -1314,7 +1302,9 @@ void MacroAssembler::SmiUntag(Register dst, Register src) { - ASSERT(dst.Is64Bits() && src.Is64Bits()); + STATIC_ASSERT(kXRegSizeInBits == + static_cast<unsigned>(kSmiShift + kSmiValueSize)); + DCHECK(dst.Is64Bits() && src.Is64Bits()); if (FLAG_enable_slow_asserts) { AssertSmi(src); } @@ -1328,7 +1318,7 @@ void MacroAssembler::SmiUntagToDouble(FPRegister dst, Register src, UntagMode mode) { - ASSERT(dst.Is64Bits() && src.Is64Bits()); + DCHECK(dst.Is64Bits() && src.Is64Bits()); if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) { AssertSmi(src); } @@ -1339,7 +1329,7 @@ void MacroAssembler::SmiUntagToFloat(FPRegister dst, Register src, UntagMode mode) { - ASSERT(dst.Is32Bits() && src.Is64Bits()); + DCHECK(dst.Is32Bits() && src.Is64Bits()); if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) { AssertSmi(src); } @@ -1347,6 +1337,22 @@ } +void MacroAssembler::SmiTagAndPush(Register src) { + STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) && + (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) && + (kSmiTag == 0)); + Push(src.W(), wzr); +} + + +void MacroAssembler::SmiTagAndPush(Register src1, Register src2) { + STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) && + (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) && + (kSmiTag == 0)); + Push(src1.W(), wzr, src2.W(), wzr); +} + + void MacroAssembler::JumpIfSmi(Register value, Label* smi_label, Label* not_smi_label) { @@ -1358,7 +1364,7 @@ B(not_smi_label); } } else { - ASSERT(not_smi_label); + DCHECK(not_smi_label); Tbnz(value, 0, not_smi_label); } } @@ -1409,6 +1415,30 @@ } +void MacroAssembler::ObjectTag(Register tagged_obj, Register obj) { + STATIC_ASSERT(kHeapObjectTag == 1); + if (emit_debug_code()) { + Label ok; + Tbz(obj, 0, &ok); + Abort(kObjectTagged); + Bind(&ok); + } + Orr(tagged_obj, obj, kHeapObjectTag); +} + + +void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) { + STATIC_ASSERT(kHeapObjectTag == 1); + if (emit_debug_code()) { + Label ok; + Tbnz(obj, 0, &ok); + Abort(kObjectNotTagged); + Bind(&ok); + } + Bic(untagged_obj, obj, kHeapObjectTag); +} + + void MacroAssembler::IsObjectNameType(Register object, Register type, Label* fail) { @@ -1451,7 +1481,7 @@ Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset)); STATIC_ASSERT(kStringTag == 0); - ASSERT((string != NULL) || (not_string != NULL)); + DCHECK((string != NULL) || (not_string != NULL)); if (string == NULL) { TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string); } else if (not_string == NULL) { @@ -1479,7 +1509,7 @@ } if (csp.Is(StackPointer())) { - ASSERT(size % 16 == 0); + DCHECK(size % 16 == 0); } else { BumpSystemStackPointer(size); } @@ -1489,11 +1519,8 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) { - ASSERT(IsPowerOf2(unit_size)); - - if (unit_size == 0) { - return; - } + if (unit_size == 0) return; + DCHECK(IsPowerOf2(unit_size)); const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); const Operand size(count, LSL, shift); @@ -1511,7 +1538,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) { - ASSERT(IsPowerOf2(unit_size)); + DCHECK(unit_size == 0 || IsPowerOf2(unit_size)); const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; const Operand size(count_smi, (shift >= 0) ? (LSL) : (LSR), @@ -1539,22 +1566,19 @@ Add(StackPointer(), StackPointer(), size); if (csp.Is(StackPointer())) { - ASSERT(size % 16 == 0); + DCHECK(size % 16 == 0); } else if (emit_debug_code()) { // It is safe to leave csp where it is when unwinding the JavaScript stack, // but if we keep it matching StackPointer, the simulator can detect memory // accesses in the now-free part of the stack. - Mov(csp, StackPointer()); + SyncSystemStackPointer(); } } void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { - ASSERT(IsPowerOf2(unit_size)); - - if (unit_size == 0) { - return; - } + if (unit_size == 0) return; + DCHECK(IsPowerOf2(unit_size)); const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); const Operand size(count, LSL, shift); @@ -1569,13 +1593,13 @@ // It is safe to leave csp where it is when unwinding the JavaScript stack, // but if we keep it matching StackPointer, the simulator can detect memory // accesses in the now-free part of the stack. - Mov(csp, StackPointer()); + SyncSystemStackPointer(); } } void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) { - ASSERT(IsPowerOf2(unit_size)); + DCHECK(unit_size == 0 || IsPowerOf2(unit_size)); const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; const Operand size(count_smi, (shift >= 0) ? (LSL) : (LSR), @@ -1591,7 +1615,7 @@ // It is safe to leave csp where it is when unwinding the JavaScript stack, // but if we keep it matching StackPointer, the simulator can detect memory // accesses in the now-free part of the stack. - Mov(csp, StackPointer()); + SyncSystemStackPointer(); } } @@ -1600,7 +1624,7 @@ const Operand& rhs, Condition cond, Label* label) { - if (rhs.IsImmediate() && (rhs.immediate() == 0) && + if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) && ((cond == eq) || (cond == ne))) { if (cond == eq) { Cbz(lhs, label); @@ -1618,7 +1642,7 @@ const uint64_t bit_pattern, Label* label) { int bits = reg.SizeInBits(); - ASSERT(CountSetBits(bit_pattern, bits) > 0); + DCHECK(CountSetBits(bit_pattern, bits) > 0); if (CountSetBits(bit_pattern, bits) == 1) { Tbnz(reg, MaskToBit(bit_pattern), label); } else { @@ -1632,7 +1656,7 @@ const uint64_t bit_pattern, Label* label) { int bits = reg.SizeInBits(); - ASSERT(CountSetBits(bit_pattern, bits) > 0); + DCHECK(CountSetBits(bit_pattern, bits) > 0); if (CountSetBits(bit_pattern, bits) == 1) { Tbz(reg, MaskToBit(bit_pattern), label); } else { @@ -1643,7 +1667,7 @@ void MacroAssembler::InlineData(uint64_t data) { - ASSERT(is_uint16(data)); + DCHECK(is_uint16(data)); InstructionAccurateScope scope(this, 1); movz(xzr, data); } @@ -1662,11 +1686,11 @@ void MacroAssembler::AnnotateInstrumentation(const char* marker_name) { - ASSERT(strlen(marker_name) == 2); + DCHECK(strlen(marker_name) == 2); // We allow only printable characters in the marker names. Unprintable // characters are reserved for controlling features of the instrumentation. - ASSERT(isprint(marker_name[0]) && isprint(marker_name[1])); + DCHECK(isprint(marker_name[0]) && isprint(marker_name[1])); InstructionAccurateScope scope(this, 1); movn(xzr, (marker_name[1] << 8) | marker_name[0]); diff -Nru nodejs-0.11.13/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,20 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "cpu-profiler.h" -#include "unicode.h" -#include "log.h" -#include "code-stubs.h" -#include "regexp-stack.h" -#include "macro-assembler.h" -#include "regexp-macro-assembler.h" -#include "arm64/regexp-macro-assembler-arm64.h" +#include "src/code-stubs.h" +#include "src/cpu-profiler.h" +#include "src/log.h" +#include "src/macro-assembler.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-stack.h" +#include "src/unicode.h" + +#include "src/arm64/regexp-macro-assembler-arm64.h" namespace v8 { namespace internal { @@ -148,7 +126,7 @@ backtrack_label_(), exit_label_() { __ SetStackPointer(csp); - ASSERT_EQ(0, registers_to_save % 2); + DCHECK_EQ(0, registers_to_save % 2); // We can cache at most 16 W registers in x0-x7. STATIC_ASSERT(kNumCachedRegisters <= 16); STATIC_ASSERT((kNumCachedRegisters % 2) == 0); @@ -183,7 +161,7 @@ void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) { - ASSERT((reg >= 0) && (reg < num_registers_)); + DCHECK((reg >= 0) && (reg < num_registers_)); if (by != 0) { Register to_advance; RegisterState register_state = GetRegisterState(reg); @@ -284,7 +262,7 @@ for (int i = 0; i < str.length(); i++) { if (mode_ == ASCII) { __ Ldrb(w10, MemOperand(characters_address, 1, PostIndex)); - ASSERT(str[i] <= String::kMaxOneByteCharCode); + DCHECK(str[i] <= String::kMaxOneByteCharCode); } else { __ Ldrh(w10, MemOperand(characters_address, 2, PostIndex)); } @@ -311,10 +289,10 @@ // Save the capture length in a callee-saved register so it will // be preserved if we call a C helper. Register capture_length = w19; - ASSERT(kCalleeSaved.IncludesAliasOf(capture_length)); + DCHECK(kCalleeSaved.IncludesAliasOf(capture_length)); // Find length of back-referenced capture. - ASSERT((start_reg % 2) == 0); + DCHECK((start_reg % 2) == 0); if (start_reg < kNumCachedRegisters) { __ Mov(capture_start_offset.X(), GetCachedRegister(start_reg)); __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits); @@ -387,12 +365,12 @@ __ Check(le, kOffsetOutOfRange); } } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); int argument_count = 4; // The cached registers need to be retained. CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7); - ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters); + DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters); __ PushCPURegList(cached_registers); // Put arguments into arguments registers. @@ -419,11 +397,14 @@ } // Check if function returned non-zero for success or zero for failure. - CompareAndBranchOrBacktrack(x0, 0, eq, on_no_match); + // x0 is one of the registers used as a cache so it must be tested before + // the cache is restored. + __ Cmp(x0, 0); + __ PopCPURegList(cached_registers); + BranchOrBacktrack(eq, on_no_match); + // On success, increment position by length of capture. __ Add(current_input_offset(), current_input_offset(), capture_length); - // Reset the cached registers. - __ PopCPURegList(cached_registers); } __ Bind(&fallthrough); @@ -440,7 +421,7 @@ Register capture_length = w15; // Find length of back-referenced capture. - ASSERT((start_reg % 2) == 0); + DCHECK((start_reg % 2) == 0); if (start_reg < kNumCachedRegisters) { __ Mov(x10, GetCachedRegister(start_reg)); __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits); @@ -470,7 +451,7 @@ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex)); __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex)); } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); __ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex)); __ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex)); } @@ -518,7 +499,7 @@ uc16 minus, uc16 mask, Label* on_not_equal) { - ASSERT(minus < String::kMaxUtf16CodeUnit); + DCHECK(minus < String::kMaxUtf16CodeUnit); __ Sub(w10, current_character(), minus); __ And(w10, w10, mask); CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal); @@ -700,10 +681,10 @@ CPURegList argument_registers(x0, x5, x6, x7); CPURegList registers_to_retain = kCalleeSaved; - ASSERT(kCalleeSaved.Count() == 11); + DCHECK(kCalleeSaved.Count() == 11); registers_to_retain.Combine(lr); - ASSERT(csp.Is(__ StackPointer())); + DCHECK(csp.Is(__ StackPointer())); __ PushCPURegList(registers_to_retain); __ PushCPURegList(argument_registers); @@ -727,7 +708,7 @@ // Make sure the stack alignment will be respected. int alignment = masm_->ActivationFrameAlignment(); - ASSERT_EQ(alignment % 16, 0); + DCHECK_EQ(alignment % 16, 0); int align_mask = (alignment / kWRegSize) - 1; num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask; @@ -880,7 +861,7 @@ Register base = x10; // There are always an even number of capture registers. A couple of // registers determine one match with two offsets. - ASSERT_EQ(0, num_registers_left_on_stack % 2); + DCHECK_EQ(0, num_registers_left_on_stack % 2); __ Add(base, frame_pointer(), kFirstCaptureOnStack); // We can unroll the loop here, we should not unroll for less than 2 @@ -997,8 +978,9 @@ __ Bind(&return_w0); // Set stack pointer back to first register to retain - ASSERT(csp.Is(__ StackPointer())); + DCHECK(csp.Is(__ StackPointer())); __ Mov(csp, fp); + __ AssertStackConsistency(); // Restore registers. __ PopCPURegList(registers_to_retain); @@ -1009,7 +991,7 @@ // Registers x0 to x7 are used to store the first captures, they need to be // retained over calls to C++ code. CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7); - ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters); + DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters); if (check_preempt_label_.is_linked()) { __ Bind(&check_preempt_label_); @@ -1102,9 +1084,9 @@ int characters) { // TODO(pielan): Make sure long strings are caught before this, and not // just asserted in debug mode. - ASSERT(cp_offset >= -1); // ^ and \b can look behind one character. + DCHECK(cp_offset >= -1); // ^ and \b can look behind one character. // Be sane! (And ensure that an int32_t can be used to index the string) - ASSERT(cp_offset < (1<<30)); + DCHECK(cp_offset < (1<<30)); if (check_bounds) { CheckPosition(cp_offset + characters - 1, on_end_of_input); } @@ -1128,7 +1110,7 @@ int target = label->pos(); __ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag); } else { - __ Adr(x10, label); + __ Adr(x10, label, MacroAssembler::kAdrFar); __ Sub(x10, x10, code_pointer()); if (masm_->emit_debug_code()) { __ Cmp(x10, kWRegMask); @@ -1197,7 +1179,7 @@ void RegExpMacroAssemblerARM64::SetRegister(int register_index, int to) { - ASSERT(register_index >= num_saved_registers_); // Reserved for positions! + DCHECK(register_index >= num_saved_registers_); // Reserved for positions! Register set_to = wzr; if (to != 0) { set_to = w10; @@ -1225,7 +1207,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) { - ASSERT(reg_from <= reg_to); + DCHECK(reg_from <= reg_to); int num_registers = reg_to - reg_from + 1; // If the first capture register is cached in a hardware register but not @@ -1238,7 +1220,7 @@ // Clear cached registers in pairs as far as possible. while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) { - ASSERT(GetRegisterState(reg_from) == CACHED_LSW); + DCHECK(GetRegisterState(reg_from) == CACHED_LSW); __ Mov(GetCachedRegister(reg_from), twice_non_position_value()); reg_from += 2; num_registers -= 2; @@ -1252,7 +1234,7 @@ if (num_registers > 0) { // If there are some remaining registers, they are stored on the stack. - ASSERT(reg_from >= kNumCachedRegisters); + DCHECK(reg_from >= kNumCachedRegisters); // Move down the indexes of the registers on stack to get the correct offset // in memory. @@ -1311,7 +1293,8 @@ const byte** input_start, const byte** input_end) { Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate); - if (isolate->stack_guard()->IsStackOverflow()) { + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { isolate->StackOverflow(); return EXCEPTION; } @@ -1334,11 +1317,11 @@ // Current string. bool is_ascii = subject->IsOneByteRepresentationUnderneath(); - ASSERT(re_code->instruction_start() <= *return_address); - ASSERT(*return_address <= + DCHECK(re_code->instruction_start() <= *return_address); + DCHECK(*return_address <= re_code->instruction_start() + re_code->instruction_size()); - MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate); + Object* result = isolate->stack_guard()->HandleInterrupts(); if (*code_handle != re_code) { // Return address no longer valid int delta = code_handle->address() - re_code->address(); @@ -1374,7 +1357,7 @@ // be a sequential or external string with the same content. // Update the start and end pointers in the stack frame to the current // location (whether it has actually moved or not). - ASSERT(StringShape(*subject_tmp).IsSequential() || + DCHECK(StringShape(*subject_tmp).IsSequential() || StringShape(*subject_tmp).IsExternal()); // The original start address of the characters to match. @@ -1427,11 +1410,11 @@ // moved. Allocate extra space for 2 arguments passed by pointers. // AAPCS64 requires the stack to be 16 byte aligned. int alignment = masm_->ActivationFrameAlignment(); - ASSERT_EQ(alignment % 16, 0); + DCHECK_EQ(alignment % 16, 0); int align_mask = (alignment / kXRegSize) - 1; int xreg_to_claim = (3 + align_mask) & ~align_mask; - ASSERT(csp.Is(__ StackPointer())); + DCHECK(csp.Is(__ StackPointer())); __ Claim(xreg_to_claim); // CheckStackGuardState needs the end and start addresses of the input string. @@ -1454,14 +1437,14 @@ ExternalReference check_stack_guard_state = ExternalReference::re_check_stack_guard_state(isolate()); __ Mov(scratch, check_stack_guard_state); - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(masm_, scratch); // The input string may have been moved in memory, we need to reload it. __ Peek(input_start(), kPointerSize); __ Peek(input_end(), 2 * kPointerSize); - ASSERT(csp.Is(__ StackPointer())); + DCHECK(csp.Is(__ StackPointer())); __ Drop(xreg_to_claim); // Reload the Code pointer. @@ -1481,12 +1464,7 @@ if (to == NULL) { to = &backtrack_label_; } - // TODO(ulan): do direct jump when jump distance is known and fits in imm19. - Condition inverted_condition = InvertCondition(condition); - Label no_branch; - __ B(inverted_condition, &no_branch); - __ B(to); - __ Bind(&no_branch); + __ B(condition, to); } void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg, @@ -1497,15 +1475,11 @@ if (to == NULL) { to = &backtrack_label_; } - // TODO(ulan): do direct jump when jump distance is known and fits in imm19. - Label no_branch; if (condition == eq) { - __ Cbnz(reg, &no_branch); + __ Cbz(reg, to); } else { - __ Cbz(reg, &no_branch); + __ Cbnz(reg, to); } - __ B(to); - __ Bind(&no_branch); } else { __ Cmp(reg, immediate); BranchOrBacktrack(condition, to); @@ -1519,7 +1493,7 @@ ExternalReference::address_of_stack_limit(isolate()); __ Mov(x10, stack_limit); __ Ldr(x10, MemOperand(x10)); - ASSERT(csp.Is(__ StackPointer())); + DCHECK(csp.Is(__ StackPointer())); __ Cmp(csp, x10); CallIf(&check_preempt_label_, ls); } @@ -1536,8 +1510,8 @@ void RegExpMacroAssemblerARM64::Push(Register source) { - ASSERT(source.Is32Bits()); - ASSERT(!source.is(backtrack_stackpointer())); + DCHECK(source.Is32Bits()); + DCHECK(!source.is(backtrack_stackpointer())); __ Str(source, MemOperand(backtrack_stackpointer(), -static_cast<int>(kWRegSize), @@ -1546,23 +1520,23 @@ void RegExpMacroAssemblerARM64::Pop(Register target) { - ASSERT(target.Is32Bits()); - ASSERT(!target.is(backtrack_stackpointer())); + DCHECK(target.Is32Bits()); + DCHECK(!target.is(backtrack_stackpointer())); __ Ldr(target, MemOperand(backtrack_stackpointer(), kWRegSize, PostIndex)); } Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) { - ASSERT(register_index < kNumCachedRegisters); + DCHECK(register_index < kNumCachedRegisters); return Register::Create(register_index / 2, kXRegSizeInBits); } Register RegExpMacroAssemblerARM64::GetRegister(int register_index, Register maybe_result) { - ASSERT(maybe_result.Is32Bits()); - ASSERT(register_index >= 0); + DCHECK(maybe_result.Is32Bits()); + DCHECK(register_index >= 0); if (num_registers_ <= register_index) { num_registers_ = register_index + 1; } @@ -1585,15 +1559,15 @@ UNREACHABLE(); break; } - ASSERT(result.Is32Bits()); + DCHECK(result.Is32Bits()); return result; } void RegExpMacroAssemblerARM64::StoreRegister(int register_index, Register source) { - ASSERT(source.Is32Bits()); - ASSERT(register_index >= 0); + DCHECK(source.Is32Bits()); + DCHECK(register_index >= 0); if (num_registers_ <= register_index) { num_registers_ = register_index + 1; } @@ -1623,29 +1597,29 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) { Label skip_call; - if (condition != al) __ B(&skip_call, InvertCondition(condition)); + if (condition != al) __ B(&skip_call, NegateCondition(condition)); __ Bl(to); __ Bind(&skip_call); } void RegExpMacroAssemblerARM64::RestoreLinkRegister() { - ASSERT(csp.Is(__ StackPointer())); + DCHECK(csp.Is(__ StackPointer())); __ Pop(lr, xzr); __ Add(lr, lr, Operand(masm_->CodeObject())); } void RegExpMacroAssemblerARM64::SaveLinkRegister() { - ASSERT(csp.Is(__ StackPointer())); + DCHECK(csp.Is(__ StackPointer())); __ Sub(lr, lr, Operand(masm_->CodeObject())); __ Push(xzr, lr); } MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) { - ASSERT(register_index < (1<<30)); - ASSERT(register_index >= kNumCachedRegisters); + DCHECK(register_index < (1<<30)); + DCHECK(register_index >= kNumCachedRegisters); if (num_registers_ <= register_index) { num_registers_ = register_index + 1; } @@ -1656,10 +1630,10 @@ MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index, Register scratch) { - ASSERT(register_index < (1<<30)); - ASSERT(register_index < num_saved_registers_); - ASSERT(register_index >= kNumCachedRegisters); - ASSERT_EQ(register_index % 2, 0); + DCHECK(register_index < (1<<30)); + DCHECK(register_index < num_saved_registers_); + DCHECK(register_index >= kNumCachedRegisters); + DCHECK_EQ(register_index % 2, 0); register_index -= kNumCachedRegisters; int offset = kFirstCaptureOnStack - register_index * kWRegSize; // capture_location is used with Stp instructions to load/store 2 registers. @@ -1685,7 +1659,7 @@ // disable it. // TODO(pielan): See whether or not we should disable unaligned accesses. if (!CanReadUnaligned()) { - ASSERT(characters == 1); + DCHECK(characters == 1); } if (cp_offset != 0) { @@ -1707,15 +1681,15 @@ } else if (characters == 2) { __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW)); } else { - ASSERT(characters == 1); + DCHECK(characters == 1); __ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW)); } } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); if (characters == 2) { __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW)); } else { - ASSERT(characters == 1); + DCHECK(characters == 1); __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW)); } } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/regexp-macro-assembler-arm64.h nodejs-0.11.15/deps/v8/src/arm64/regexp-macro-assembler-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/regexp-macro-assembler-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/regexp-macro-assembler-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_ #define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_ -#include "arm64/assembler-arm64.h" -#include "arm64/assembler-arm64-inl.h" -#include "macro-assembler.h" +#include "src/macro-assembler.h" + +#include "src/arm64/assembler-arm64.h" +#include "src/arm64/assembler-arm64-inl.h" namespace v8 { namespace internal { @@ -253,7 +231,7 @@ }; RegisterState GetRegisterState(int register_index) { - ASSERT(register_index >= 0); + DCHECK(register_index >= 0); if (register_index >= kNumCachedRegisters) { return STACKED; } else { diff -Nru nodejs-0.11.13/deps/v8/src/arm64/simulator-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/simulator-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/simulator-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/simulator-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,20 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdlib.h> #include <cmath> #include <cstdarg> -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "disasm.h" -#include "assembler.h" -#include "arm64/decoder-arm64-inl.h" -#include "arm64/simulator-arm64.h" -#include "macro-assembler.h" +#include "src/arm64/decoder-arm64-inl.h" +#include "src/arm64/simulator-arm64.h" +#include "src/assembler.h" +#include "src/disasm.h" +#include "src/macro-assembler.h" +#include "src/ostreams.h" namespace v8 { namespace internal { @@ -80,11 +58,11 @@ // This is basically the same as PrintF, with a guard for FLAG_trace_sim. -void PRINTF_CHECKING TraceSim(const char* format, ...) { +void Simulator::TraceSim(const char* format, ...) { if (FLAG_trace_sim) { va_list arguments; va_start(arguments, format); - OS::VPrint(format, arguments); + base::OS::VFPrint(stream_, format, arguments); va_end(arguments); } } @@ -95,11 +73,11 @@ void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) { int width = msb - lsb + 1; - ASSERT(is_uintn(bits, width) || is_intn(bits, width)); + DCHECK(is_uintn(bits, width) || is_intn(bits, width)); bits <<= lsb; uint32_t mask = ((1 << width) - 1) << lsb; - ASSERT((mask & write_ignore_mask_) == 0); + DCHECK((mask & write_ignore_mask_) == 0); value_ = (value_ & ~mask) | (bits & mask); } @@ -129,7 +107,7 @@ Simulator* Simulator::current(Isolate* isolate) { Isolate::PerIsolateThreadData* isolate_data = isolate->FindOrAllocatePerThreadDataForThisThread(); - ASSERT(isolate_data != NULL); + DCHECK(isolate_data != NULL); Simulator* sim = isolate_data->simulator(); if (sim == NULL) { @@ -157,7 +135,7 @@ } else if (arg.IsD() && (index_d < 8)) { set_dreg_bits(index_d++, arg.bits()); } else { - ASSERT(arg.IsD() || arg.IsX()); + DCHECK(arg.IsD() || arg.IsX()); stack_args.push_back(arg.bits()); } } @@ -166,8 +144,8 @@ uintptr_t original_stack = sp(); uintptr_t entry_stack = original_stack - stack_args.size() * sizeof(stack_args[0]); - if (OS::ActivationFrameAlignment() != 0) { - entry_stack &= -OS::ActivationFrameAlignment(); + if (base::OS::ActivationFrameAlignment() != 0) { + entry_stack &= -base::OS::ActivationFrameAlignment(); } char * stack = reinterpret_cast<char*>(entry_stack); std::vector<int64_t>::const_iterator it; @@ -176,7 +154,7 @@ stack += sizeof(*it); } - ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack); + DCHECK(reinterpret_cast<uintptr_t>(stack) <= original_stack); set_sp(entry_stack); // Call the generated code. @@ -278,7 +256,7 @@ CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code())); } for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) { - ASSERT(saved_fpregisters[i] == + DCHECK(saved_fpregisters[i] == dreg_bits(fpregister_list.PopLowestIndex().code())); } @@ -311,7 +289,7 @@ set_xreg(code, value | code); } } else { - ASSERT(list->type() == CPURegister::kFPRegister); + DCHECK(list->type() == CPURegister::kFPRegister); while (!list->IsEmpty()) { unsigned code = list->PopLowestIndex().code(); set_dreg_bits(code, value | code); @@ -333,7 +311,7 @@ // Extending the stack by 2 * 64 bits is required for stack alignment purposes. uintptr_t Simulator::PushAddress(uintptr_t address) { - ASSERT(sizeof(uintptr_t) < 2 * kXRegSize); + DCHECK(sizeof(uintptr_t) < 2 * kXRegSize); intptr_t new_sp = sp() - 2 * kXRegSize; uintptr_t* alignment_slot = reinterpret_cast<uintptr_t*>(new_sp + kXRegSize); @@ -349,7 +327,7 @@ intptr_t current_sp = sp(); uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); uintptr_t address = *stack_slot; - ASSERT(sizeof(uintptr_t) < 2 * kXRegSize); + DCHECK(sizeof(uintptr_t) < 2 * kXRegSize); set_sp(current_sp + 2 * kXRegSize); return address; } @@ -392,7 +370,7 @@ last_debugger_input_(NULL), log_parameters_(NO_PARAM), isolate_(NULL) { - Init(NULL); + Init(stdout); CHECK(!FLAG_trace_sim && !FLAG_log_instruction_stats); } @@ -503,7 +481,7 @@ Redirection* current = isolate->simulator_redirection(); for (; current != NULL; current = current->next_) { if (current->external_function_ == external_function) { - ASSERT_EQ(current->type(), type); + DCHECK_EQ(current->type(), type); return current; } } @@ -593,7 +571,7 @@ break; case ExternalReference::BUILTIN_CALL: { - // MaybeObject* f(v8::internal::Arguments). + // Object* f(v8::internal::Arguments). TraceSim("Type: BUILTIN_CALL\n"); SimulatorRuntimeCall target = reinterpret_cast<SimulatorRuntimeCall>(external); @@ -787,7 +765,7 @@ const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { - ASSERT(code < kNumberOfRegisters); + DCHECK(code < kNumberOfRegisters); // If the code represents the stack pointer, index the name after zr. if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { code = kZeroRegCode + 1; @@ -797,7 +775,7 @@ const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { - ASSERT(code < kNumberOfRegisters); + DCHECK(code < kNumberOfRegisters); // If the code represents the stack pointer, index the name after zr. if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { code = kZeroRegCode + 1; @@ -807,19 +785,19 @@ const char* Simulator::SRegNameForCode(unsigned code) { - ASSERT(code < kNumberOfFPRegisters); + DCHECK(code < kNumberOfFPRegisters); return sreg_names[code]; } const char* Simulator::DRegNameForCode(unsigned code) { - ASSERT(code < kNumberOfFPRegisters); + DCHECK(code < kNumberOfFPRegisters); return dreg_names[code]; } const char* Simulator::VRegNameForCode(unsigned code) { - ASSERT(code < kNumberOfFPRegisters); + DCHECK(code < kNumberOfFPRegisters); return vreg_names[code]; } @@ -846,49 +824,30 @@ // Helpers --------------------------------------------------------------------- -int64_t Simulator::AddWithCarry(unsigned reg_size, - bool set_flags, - int64_t src1, - int64_t src2, - int64_t carry_in) { - ASSERT((carry_in == 0) || (carry_in == 1)); - ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); - - uint64_t u1, u2; - int64_t result; - int64_t signed_sum = src1 + src2 + carry_in; +template <typename T> +T Simulator::AddWithCarry(bool set_flags, + T src1, + T src2, + T carry_in) { + typedef typename make_unsigned<T>::type unsignedT; + DCHECK((carry_in == 0) || (carry_in == 1)); + + T signed_sum = src1 + src2 + carry_in; + T result = signed_sum; bool N, Z, C, V; - if (reg_size == kWRegSizeInBits) { - u1 = static_cast<uint64_t>(src1) & kWRegMask; - u2 = static_cast<uint64_t>(src2) & kWRegMask; - - result = signed_sum & kWRegMask; - // Compute the C flag by comparing the sum to the max unsigned integer. - C = ((kWMaxUInt - u1) < (u2 + carry_in)) || - ((kWMaxUInt - u1 - carry_in) < u2); - // Overflow iff the sign bit is the same for the two inputs and different - // for the result. - int64_t s_src1 = src1 << (kXRegSizeInBits - kWRegSizeInBits); - int64_t s_src2 = src2 << (kXRegSizeInBits - kWRegSizeInBits); - int64_t s_result = result << (kXRegSizeInBits - kWRegSizeInBits); - V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0); - - } else { - u1 = static_cast<uint64_t>(src1); - u2 = static_cast<uint64_t>(src2); - - result = signed_sum; - // Compute the C flag by comparing the sum to the max unsigned integer. - C = ((kXMaxUInt - u1) < (u2 + carry_in)) || - ((kXMaxUInt - u1 - carry_in) < u2); - // Overflow iff the sign bit is the same for the two inputs and different - // for the result. - V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0); - } + // Compute the C flag + unsignedT u1 = static_cast<unsignedT>(src1); + unsignedT u2 = static_cast<unsignedT>(src2); + unsignedT urest = std::numeric_limits<unsignedT>::max() - u1; + C = (u2 > urest) || (carry_in && (((u2 + 1) > urest) || (u2 > (urest - 1)))); + + // Overflow iff the sign bit is the same for the two inputs and different + // for the result. + V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0); - N = CalcNFlag(result, reg_size); + N = CalcNFlag(result); Z = CalcZFlag(result); if (set_flags) { @@ -901,33 +860,42 @@ } -int64_t Simulator::ShiftOperand(unsigned reg_size, - int64_t value, - Shift shift_type, - unsigned amount) { +template<typename T> +void Simulator::AddSubWithCarry(Instruction* instr) { + T op2 = reg<T>(instr->Rm()); + T new_val; + + if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) { + op2 = ~op2; + } + + new_val = AddWithCarry<T>(instr->FlagsUpdate(), + reg<T>(instr->Rn()), + op2, + nzcv().C()); + + set_reg<T>(instr->Rd(), new_val); +} + +template <typename T> +T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) { + typedef typename make_unsigned<T>::type unsignedT; + if (amount == 0) { return value; } - int64_t mask = reg_size == kXRegSizeInBits ? kXRegMask : kWRegMask; + switch (shift_type) { case LSL: - return (value << amount) & mask; + return value << amount; case LSR: - return static_cast<uint64_t>(value) >> amount; - case ASR: { - // Shift used to restore the sign. - unsigned s_shift = kXRegSizeInBits - reg_size; - // Value with its sign restored. - int64_t s_value = (value << s_shift) >> s_shift; - return (s_value >> amount) & mask; - } - case ROR: { - if (reg_size == kWRegSizeInBits) { - value &= kWRegMask; - } - return (static_cast<uint64_t>(value) >> amount) | - ((value & ((1L << amount) - 1L)) << (reg_size - amount)); - } + return static_cast<unsignedT>(value) >> amount; + case ASR: + return value >> amount; + case ROR: + return (static_cast<unsignedT>(value) >> amount) | + ((value & ((1L << amount) - 1L)) << + (sizeof(unsignedT) * 8 - amount)); default: UNIMPLEMENTED(); return 0; @@ -935,10 +903,12 @@ } -int64_t Simulator::ExtendValue(unsigned reg_size, - int64_t value, - Extend extend_type, - unsigned left_shift) { +template <typename T> +T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) { + const unsigned kSignExtendBShift = (sizeof(T) - 1) * 8; + const unsigned kSignExtendHShift = (sizeof(T) - 2) * 8; + const unsigned kSignExtendWShift = (sizeof(T) - 4) * 8; + switch (extend_type) { case UXTB: value &= kByteMask; @@ -950,13 +920,13 @@ value &= kWordMask; break; case SXTB: - value = (value << 56) >> 56; + value = (value << kSignExtendBShift) >> kSignExtendBShift; break; case SXTH: - value = (value << 48) >> 48; + value = (value << kSignExtendHShift) >> kSignExtendHShift; break; case SXTW: - value = (value << 32) >> 32; + value = (value << kSignExtendWShift) >> kSignExtendWShift; break; case UXTX: case SXTX: @@ -964,8 +934,21 @@ default: UNREACHABLE(); } - int64_t mask = (reg_size == kXRegSizeInBits) ? kXRegMask : kWRegMask; - return (value << left_shift) & mask; + return value << left_shift; +} + + +template <typename T> +void Simulator::Extract(Instruction* instr) { + unsigned lsb = instr->ImmS(); + T op2 = reg<T>(instr->Rm()); + T result = op2; + + if (lsb) { + T op1 = reg<T>(instr->Rn()); + result = op2 >> lsb | (op1 << ((sizeof(T) * 8) - lsb)); + } + set_reg<T>(instr->Rd(), result); } @@ -1001,7 +984,8 @@ void Simulator::SetBreakpoint(Instruction* location) { for (unsigned i = 0; i < breakpoints_.size(); i++) { if (breakpoints_.at(i).location == location) { - PrintF("Existing breakpoint at %p was %s\n", + PrintF(stream_, + "Existing breakpoint at %p was %s\n", reinterpret_cast<void*>(location), breakpoints_.at(i).enabled ? "disabled" : "enabled"); breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled; @@ -1010,14 +994,15 @@ } Breakpoint new_breakpoint = {location, true}; breakpoints_.push_back(new_breakpoint); - PrintF("Set a breakpoint at %p\n", reinterpret_cast<void*>(location)); + PrintF(stream_, + "Set a breakpoint at %p\n", reinterpret_cast<void*>(location)); } void Simulator::ListBreakpoints() { - PrintF("Breakpoints:\n"); + PrintF(stream_, "Breakpoints:\n"); for (unsigned i = 0; i < breakpoints_.size(); i++) { - PrintF("%p : %s\n", + PrintF(stream_, "%p : %s\n", reinterpret_cast<void*>(breakpoints_.at(i).location), breakpoints_.at(i).enabled ? "enabled" : "disabled"); } @@ -1035,7 +1020,7 @@ } } if (hit_a_breakpoint) { - PrintF("Hit and disabled a breakpoint at %p.\n", + PrintF(stream_, "Hit and disabled a breakpoint at %p.\n", reinterpret_cast<void*>(pc_)); Debug(); } @@ -1080,7 +1065,7 @@ "0b10 (Round towards Minus Infinity)", "0b11 (Round towards Zero)" }; - ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0]))); + DCHECK(fpcr().RMode() < ARRAY_SIZE(rmode)); fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n", clr_flag_name, clr_flag_value, @@ -1220,7 +1205,7 @@ void Simulator::VisitConditionalBranch(Instruction* instr) { - ASSERT(instr->Mask(ConditionalBranchMask) == B_cond); + DCHECK(instr->Mask(ConditionalBranchMask) == B_cond); if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) { set_pc(instr->ImmPCOffsetTarget()); } @@ -1277,110 +1262,110 @@ } -void Simulator::AddSubHelper(Instruction* instr, int64_t op2) { - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; +template<typename T> +void Simulator::AddSubHelper(Instruction* instr, T op2) { bool set_flags = instr->FlagsUpdate(); - int64_t new_val = 0; + T new_val = 0; Instr operation = instr->Mask(AddSubOpMask); switch (operation) { case ADD: case ADDS: { - new_val = AddWithCarry(reg_size, - set_flags, - reg(reg_size, instr->Rn(), instr->RnMode()), - op2); + new_val = AddWithCarry<T>(set_flags, + reg<T>(instr->Rn(), instr->RnMode()), + op2); break; } case SUB: case SUBS: { - new_val = AddWithCarry(reg_size, - set_flags, - reg(reg_size, instr->Rn(), instr->RnMode()), - ~op2, - 1); + new_val = AddWithCarry<T>(set_flags, + reg<T>(instr->Rn(), instr->RnMode()), + ~op2, + 1); break; } default: UNREACHABLE(); } - set_reg(reg_size, instr->Rd(), new_val, instr->RdMode()); + set_reg<T>(instr->Rd(), new_val, instr->RdMode()); } void Simulator::VisitAddSubShifted(Instruction* instr) { - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; - int64_t op2 = ShiftOperand(reg_size, - reg(reg_size, instr->Rm()), - static_cast<Shift>(instr->ShiftDP()), - instr->ImmDPShift()); - AddSubHelper(instr, op2); + Shift shift_type = static_cast<Shift>(instr->ShiftDP()); + unsigned shift_amount = instr->ImmDPShift(); + + if (instr->SixtyFourBits()) { + int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount); + AddSubHelper(instr, op2); + } else { + int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount); + AddSubHelper(instr, op2); + } } void Simulator::VisitAddSubImmediate(Instruction* instr) { int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0); - AddSubHelper(instr, op2); + if (instr->SixtyFourBits()) { + AddSubHelper<int64_t>(instr, op2); + } else { + AddSubHelper<int32_t>(instr, op2); + } } void Simulator::VisitAddSubExtended(Instruction* instr) { - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; - int64_t op2 = ExtendValue(reg_size, - reg(reg_size, instr->Rm()), - static_cast<Extend>(instr->ExtendMode()), - instr->ImmExtendShift()); - AddSubHelper(instr, op2); + Extend ext = static_cast<Extend>(instr->ExtendMode()); + unsigned left_shift = instr->ImmExtendShift(); + if (instr->SixtyFourBits()) { + int64_t op2 = ExtendValue(xreg(instr->Rm()), ext, left_shift); + AddSubHelper(instr, op2); + } else { + int32_t op2 = ExtendValue(wreg(instr->Rm()), ext, left_shift); + AddSubHelper(instr, op2); + } } void Simulator::VisitAddSubWithCarry(Instruction* instr) { - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; - int64_t op2 = reg(reg_size, instr->Rm()); - int64_t new_val; - - if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) { - op2 = ~op2; + if (instr->SixtyFourBits()) { + AddSubWithCarry<int64_t>(instr); + } else { + AddSubWithCarry<int32_t>(instr); } - - new_val = AddWithCarry(reg_size, - instr->FlagsUpdate(), - reg(reg_size, instr->Rn()), - op2, - nzcv().C()); - - set_reg(reg_size, instr->Rd(), new_val); } void Simulator::VisitLogicalShifted(Instruction* instr) { - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; Shift shift_type = static_cast<Shift>(instr->ShiftDP()); unsigned shift_amount = instr->ImmDPShift(); - int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type, - shift_amount); - if (instr->Mask(NOT) == NOT) { - op2 = ~op2; + + if (instr->SixtyFourBits()) { + int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount); + op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2; + LogicalHelper<int64_t>(instr, op2); + } else { + int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount); + op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2; + LogicalHelper<int32_t>(instr, op2); } - LogicalHelper(instr, op2); } void Simulator::VisitLogicalImmediate(Instruction* instr) { - LogicalHelper(instr, instr->ImmLogical()); + if (instr->SixtyFourBits()) { + LogicalHelper<int64_t>(instr, instr->ImmLogical()); + } else { + LogicalHelper<int32_t>(instr, instr->ImmLogical()); + } } -void Simulator::LogicalHelper(Instruction* instr, int64_t op2) { - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; - int64_t op1 = reg(reg_size, instr->Rn()); - int64_t result = 0; +template<typename T> +void Simulator::LogicalHelper(Instruction* instr, T op2) { + T op1 = reg<T>(instr->Rn()); + T result = 0; bool update_flags = false; // Switch on the logical operation, stripping out the NOT bit, as it has a @@ -1395,41 +1380,46 @@ } if (update_flags) { - nzcv().SetN(CalcNFlag(result, reg_size)); + nzcv().SetN(CalcNFlag(result)); nzcv().SetZ(CalcZFlag(result)); nzcv().SetC(0); nzcv().SetV(0); } - set_reg(reg_size, instr->Rd(), result, instr->RdMode()); + set_reg<T>(instr->Rd(), result, instr->RdMode()); } void Simulator::VisitConditionalCompareRegister(Instruction* instr) { - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; - ConditionalCompareHelper(instr, reg(reg_size, instr->Rm())); + if (instr->SixtyFourBits()) { + ConditionalCompareHelper(instr, xreg(instr->Rm())); + } else { + ConditionalCompareHelper(instr, wreg(instr->Rm())); + } } void Simulator::VisitConditionalCompareImmediate(Instruction* instr) { - ConditionalCompareHelper(instr, instr->ImmCondCmp()); + if (instr->SixtyFourBits()) { + ConditionalCompareHelper<int64_t>(instr, instr->ImmCondCmp()); + } else { + ConditionalCompareHelper<int32_t>(instr, instr->ImmCondCmp()); + } } -void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) { - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; - int64_t op1 = reg(reg_size, instr->Rn()); +template<typename T> +void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) { + T op1 = reg<T>(instr->Rn()); if (ConditionPassed(static_cast<Condition>(instr->Condition()))) { // If the condition passes, set the status flags to the result of comparing // the operands. if (instr->Mask(ConditionalCompareMask) == CCMP) { - AddWithCarry(reg_size, true, op1, ~op2, 1); + AddWithCarry<T>(true, op1, ~op2, 1); } else { - ASSERT(instr->Mask(ConditionalCompareMask) == CCMN); - AddWithCarry(reg_size, true, op1, op2, 0); + DCHECK(instr->Mask(ConditionalCompareMask) == CCMN); + AddWithCarry<T>(true, op1, op2, 0); } } else { // If the condition fails, set the status flags to the nzcv immediate. @@ -1461,11 +1451,10 @@ void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) { Extend ext = static_cast<Extend>(instr->ExtendMode()); - ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); + DCHECK((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS(); - int64_t offset = ExtendValue(kXRegSizeInBits, xreg(instr->Rm()), ext, - shift_amount); + int64_t offset = ExtendValue(xreg(instr->Rm()), ext, shift_amount); LoadStoreHelper(instr, offset, Offset); } @@ -1505,28 +1494,23 @@ case STR_w: case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break; case LDRSB_w: { - set_wreg(srcdst, - ExtendValue(kWRegSizeInBits, MemoryRead8(address), SXTB)); + set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead8(address), SXTB)); break; } case LDRSB_x: { - set_xreg(srcdst, - ExtendValue(kXRegSizeInBits, MemoryRead8(address), SXTB)); + set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead8(address), SXTB)); break; } case LDRSH_w: { - set_wreg(srcdst, - ExtendValue(kWRegSizeInBits, MemoryRead16(address), SXTH)); + set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead16(address), SXTH)); break; } case LDRSH_x: { - set_xreg(srcdst, - ExtendValue(kXRegSizeInBits, MemoryRead16(address), SXTH)); + set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead16(address), SXTH)); break; } case LDRSW_x: { - set_xreg(srcdst, - ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW)); + set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead32(address), SXTW)); break; } case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break; @@ -1602,7 +1586,7 @@ static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask)); // 'rt' and 'rt2' can only be aliased for stores. - ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2)); + DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2)); switch (op) { case LDP_w: { @@ -1626,8 +1610,8 @@ break; } case LDPSW_x: { - set_xreg(rt, ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW)); - set_xreg(rt2, ExtendValue(kXRegSizeInBits, + set_xreg(rt, ExtendValue<int64_t>(MemoryRead32(address), SXTW)); + set_xreg(rt2, ExtendValue<int64_t>( MemoryRead32(address + kWRegSize), SXTW)); break; } @@ -1710,7 +1694,7 @@ int64_t offset, AddrMode addrmode) { if ((addrmode == PreIndex) || (addrmode == PostIndex)) { - ASSERT(offset != 0); + DCHECK(offset != 0); uint64_t address = xreg(addr_reg, Reg31IsStackPointer); set_reg(addr_reg, address + offset, Reg31IsStackPointer); } @@ -1730,8 +1714,8 @@ uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) { - ASSERT(address != NULL); - ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t))); + DCHECK(address != NULL); + DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t))); uint64_t read = 0; memcpy(&read, address, num_bytes); return read; @@ -1771,8 +1755,8 @@ void Simulator::MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes) { - ASSERT(address != NULL); - ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t))); + DCHECK(address != NULL); + DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t))); LogWrite(address, value, num_bytes); memcpy(address, &value, num_bytes); @@ -1806,7 +1790,7 @@ bool is_64_bits = instr->SixtyFourBits() == 1; // Shift is limited for W operations. - ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2)); + DCHECK(is_64_bits || (instr->ShiftMoveWide() < 2)); // Get the shifted immediate. int64_t shift = instr->ShiftMoveWide() * 16; @@ -1843,25 +1827,26 @@ void Simulator::VisitConditionalSelect(Instruction* instr) { - uint64_t new_val = xreg(instr->Rn()); - if (ConditionFailed(static_cast<Condition>(instr->Condition()))) { - new_val = xreg(instr->Rm()); + uint64_t new_val = xreg(instr->Rm()); switch (instr->Mask(ConditionalSelectMask)) { - case CSEL_w: - case CSEL_x: break; - case CSINC_w: - case CSINC_x: new_val++; break; - case CSINV_w: - case CSINV_x: new_val = ~new_val; break; - case CSNEG_w: - case CSNEG_x: new_val = -new_val; break; + case CSEL_w: set_wreg(instr->Rd(), new_val); break; + case CSEL_x: set_xreg(instr->Rd(), new_val); break; + case CSINC_w: set_wreg(instr->Rd(), new_val + 1); break; + case CSINC_x: set_xreg(instr->Rd(), new_val + 1); break; + case CSINV_w: set_wreg(instr->Rd(), ~new_val); break; + case CSINV_x: set_xreg(instr->Rd(), ~new_val); break; + case CSNEG_w: set_wreg(instr->Rd(), -new_val); break; + case CSNEG_x: set_xreg(instr->Rd(), -new_val); break; default: UNIMPLEMENTED(); } + } else { + if (instr->SixtyFourBits()) { + set_xreg(instr->Rd(), xreg(instr->Rn())); + } else { + set_wreg(instr->Rd(), wreg(instr->Rn())); + } } - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; - set_reg(reg_size, instr->Rd(), new_val); } @@ -1895,7 +1880,7 @@ uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) { - ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits)); + DCHECK((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits)); uint64_t result = 0; for (unsigned i = 0; i < num_bits; i++) { result = (result << 1) | (value & 1); @@ -1919,7 +1904,7 @@ // permute_table[Reverse16] is used by REV16_x, REV16_w // permute_table[Reverse32] is used by REV32_x, REV_w // permute_table[Reverse64] is used by REV_x - ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2)); + DCHECK((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2)); static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1}, {4, 5, 6, 7, 0, 1, 2, 3}, {0, 1, 2, 3, 4, 5, 6, 7} }; @@ -1932,28 +1917,17 @@ } -void Simulator::VisitDataProcessing2Source(Instruction* instr) { +template <typename T> +void Simulator::DataProcessing2Source(Instruction* instr) { Shift shift_op = NO_SHIFT; - int64_t result = 0; + T result = 0; switch (instr->Mask(DataProcessing2SourceMask)) { - case SDIV_w: { - int32_t rn = wreg(instr->Rn()); - int32_t rm = wreg(instr->Rm()); - if ((rn == kWMinInt) && (rm == -1)) { - result = kWMinInt; - } else if (rm == 0) { - // Division by zero can be trapped, but not on A-class processors. - result = 0; - } else { - result = rn / rm; - } - break; - } + case SDIV_w: case SDIV_x: { - int64_t rn = xreg(instr->Rn()); - int64_t rm = xreg(instr->Rm()); - if ((rn == kXMinInt) && (rm == -1)) { - result = kXMinInt; + T rn = reg<T>(instr->Rn()); + T rm = reg<T>(instr->Rm()); + if ((rn == std::numeric_limits<T>::min()) && (rm == -1)) { + result = std::numeric_limits<T>::min(); } else if (rm == 0) { // Division by zero can be trapped, but not on A-class processors. result = 0; @@ -1962,20 +1936,11 @@ } break; } - case UDIV_w: { - uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn())); - uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm())); - if (rm == 0) { - // Division by zero can be trapped, but not on A-class processors. - result = 0; - } else { - result = rn / rm; - } - break; - } + case UDIV_w: case UDIV_x: { - uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn())); - uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm())); + typedef typename make_unsigned<T>::type unsignedT; + unsignedT rn = static_cast<unsignedT>(reg<T>(instr->Rn())); + unsignedT rm = static_cast<unsignedT>(reg<T>(instr->Rm())); if (rm == 0) { // Division by zero can be trapped, but not on A-class processors. result = 0; @@ -1995,17 +1960,27 @@ default: UNIMPLEMENTED(); } - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; if (shift_op != NO_SHIFT) { // Shift distance encoded in the least-significant five/six bits of the // register. - int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f; - unsigned shift = wreg(instr->Rm()) & mask; - result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op, - shift); + unsigned shift = wreg(instr->Rm()); + if (sizeof(T) == kWRegSize) { + shift &= kShiftAmountWRegMask; + } else { + shift &= kShiftAmountXRegMask; + } + result = ShiftOperand(reg<T>(instr->Rn()), shift_op, shift); + } + set_reg<T>(instr->Rd(), result); +} + + +void Simulator::VisitDataProcessing2Source(Instruction* instr) { + if (instr->SixtyFourBits()) { + DataProcessing2Source<int64_t>(instr); + } else { + DataProcessing2Source<int32_t>(instr); } - set_reg(reg_size, instr->Rd(), result); } @@ -2032,9 +2007,6 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) { - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; - int64_t result = 0; // Extract and sign- or zero-extend 32-bit arguments for widening operations. uint64_t rn_u32 = reg<uint32_t>(instr->Rn()); @@ -2055,26 +2027,31 @@ case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break; case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break; case SMULH_x: - ASSERT(instr->Ra() == kZeroRegCode); + DCHECK(instr->Ra() == kZeroRegCode); result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm())); break; default: UNIMPLEMENTED(); } - set_reg(reg_size, instr->Rd(), result); + + if (instr->SixtyFourBits()) { + set_xreg(instr->Rd(), result); + } else { + set_wreg(instr->Rd(), result); + } } -void Simulator::VisitBitfield(Instruction* instr) { - unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits - : kWRegSizeInBits; - int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask; - int64_t R = instr->ImmR(); - int64_t S = instr->ImmS(); - int64_t diff = S - R; - int64_t mask; +template <typename T> +void Simulator::BitfieldHelper(Instruction* instr) { + typedef typename make_unsigned<T>::type unsignedT; + T reg_size = sizeof(T) * 8; + T R = instr->ImmR(); + T S = instr->ImmS(); + T diff = S - R; + T mask; if (diff >= 0) { - mask = diff < reg_size - 1 ? (1L << (diff + 1)) - 1 - : reg_mask; + mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1 + : static_cast<T>(-1); } else { mask = ((1L << (S + 1)) - 1); mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R)); @@ -2103,29 +2080,37 @@ UNIMPLEMENTED(); } - int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd()); - int64_t src = reg(reg_size, instr->Rn()); + T dst = inzero ? 0 : reg<T>(instr->Rd()); + T src = reg<T>(instr->Rn()); // Rotate source bitfield into place. - int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R)); + T result = (static_cast<unsignedT>(src) >> R) | (src << (reg_size - R)); // Determine the sign extension. - int64_t topbits = ((1L << (reg_size - diff - 1)) - 1) << (diff + 1); - int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0; + T topbits_preshift = (static_cast<T>(1) << (reg_size - diff - 1)) - 1; + T signbits = (extend && ((src >> S) & 1) ? topbits_preshift : 0) + << (diff + 1); // Merge sign extension, dest/zero and bitfield. result = signbits | (result & mask) | (dst & ~mask); - set_reg(reg_size, instr->Rd(), result); + set_reg<T>(instr->Rd(), result); +} + + +void Simulator::VisitBitfield(Instruction* instr) { + if (instr->SixtyFourBits()) { + BitfieldHelper<int64_t>(instr); + } else { + BitfieldHelper<int32_t>(instr); + } } void Simulator::VisitExtract(Instruction* instr) { - unsigned lsb = instr->ImmS(); - unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits - : kWRegSizeInBits; - set_reg(reg_size, - instr->Rd(), - (static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) | - (reg(reg_size, instr->Rn()) << (reg_size - lsb))); + if (instr->SixtyFourBits()) { + Extract<uint64_t>(instr); + } else { + Extract<uint32_t>(instr); + } } @@ -2389,6 +2374,10 @@ case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break; case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break; case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break; + case FRINTM_s: + set_sreg(fd, FPRoundInt(sreg(fn), FPNegativeInfinity)); break; + case FRINTM_d: + set_dreg(fd, FPRoundInt(dreg(fn), FPNegativeInfinity)); break; case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break; case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break; case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break; @@ -2418,10 +2407,10 @@ template <class T, int ebits, int mbits> static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa, FPRounding round_mode) { - ASSERT((sign == 0) || (sign == 1)); + DCHECK((sign == 0) || (sign == 1)); // Only the FPTieEven rounding mode is implemented. - ASSERT(round_mode == FPTieEven); + DCHECK(round_mode == FPTieEven); USE(round_mode); // Rounding can promote subnormals to normals, and normals to infinities. For @@ -2655,17 +2644,27 @@ double error = value - int_result; switch (round_mode) { case FPTieAway: { - // If the error is greater than 0.5, or is equal to 0.5 and the integer - // result is positive, round up. - if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) { + // Take care of correctly handling the range ]-0.5, -0.0], which must + // yield -0.0. + if ((-0.5 < value) && (value < 0.0)) { + int_result = -0.0; + + } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) { + // If the error is greater than 0.5, or is equal to 0.5 and the integer + // result is positive, round up. int_result++; } break; } case FPTieEven: { + // Take care of correctly handling the range [-0.5, -0.0], which must + // yield -0.0. + if ((-0.5 <= value) && (value < 0.0)) { + int_result = -0.0; + // If the error is greater than 0.5, or is equal to 0.5 and the integer // result is odd, round up. - if ((error > 0.5) || + } else if ((error > 0.5) || ((error == 0.5) && (fmod(int_result, 2) != 0))) { int_result++; } @@ -2728,7 +2727,7 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) { // Only the FPTieEven rounding mode is implemented. - ASSERT(round_mode == FPTieEven); + DCHECK(round_mode == FPTieEven); USE(round_mode); switch (std::fpclassify(value)) { @@ -2857,7 +2856,7 @@ template <typename T> T Simulator::FPAdd(T op1, T op2) { // NaNs should be handled elsewhere. - ASSERT(!std::isnan(op1) && !std::isnan(op2)); + DCHECK(!std::isnan(op1) && !std::isnan(op2)); if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) { // inf + -inf returns the default NaN. @@ -2872,7 +2871,7 @@ template <typename T> T Simulator::FPDiv(T op1, T op2) { // NaNs should be handled elsewhere. - ASSERT(!std::isnan(op1) && !std::isnan(op2)); + DCHECK(!std::isnan(op1) && !std::isnan(op2)); if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) { // inf / inf and 0.0 / 0.0 return the default NaN. @@ -2887,7 +2886,7 @@ template <typename T> T Simulator::FPMax(T a, T b) { // NaNs should be handled elsewhere. - ASSERT(!std::isnan(a) && !std::isnan(b)); + DCHECK(!std::isnan(a) && !std::isnan(b)); if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) { @@ -2914,7 +2913,7 @@ template <typename T> T Simulator::FPMin(T a, T b) { // NaNs should be handled elsewhere. - ASSERT(!isnan(a) && !isnan(b)); + DCHECK(!std::isnan(a) && !std::isnan(b)); if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) { @@ -2942,7 +2941,7 @@ template <typename T> T Simulator::FPMul(T op1, T op2) { // NaNs should be handled elsewhere. - ASSERT(!std::isnan(op1) && !std::isnan(op2)); + DCHECK(!std::isnan(op1) && !std::isnan(op2)); if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) { // inf * 0.0 returns the default NaN. @@ -2987,7 +2986,7 @@ } result = FusedMultiplyAdd(op1, op2, a); - ASSERT(!std::isnan(result)); + DCHECK(!std::isnan(result)); // Work around broken fma implementations for rounded zero results: If a is // 0.0, the sign of the result is the sign of op1 * op2 before rounding. @@ -3014,7 +3013,7 @@ template <typename T> T Simulator::FPSub(T op1, T op2) { // NaNs should be handled elsewhere. - ASSERT(!std::isnan(op1) && !std::isnan(op2)); + DCHECK(!std::isnan(op1) && !std::isnan(op2)); if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) { // inf - inf returns the default NaN. @@ -3028,7 +3027,7 @@ template <typename T> T Simulator::FPProcessNaN(T op) { - ASSERT(std::isnan(op)); + DCHECK(std::isnan(op)); return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op); } @@ -3040,10 +3039,10 @@ } else if (IsSignallingNaN(op2)) { return FPProcessNaN(op2); } else if (std::isnan(op1)) { - ASSERT(IsQuietNaN(op1)); + DCHECK(IsQuietNaN(op1)); return FPProcessNaN(op1); } else if (std::isnan(op2)) { - ASSERT(IsQuietNaN(op2)); + DCHECK(IsQuietNaN(op2)); return FPProcessNaN(op2); } else { return 0.0; @@ -3060,13 +3059,13 @@ } else if (IsSignallingNaN(op3)) { return FPProcessNaN(op3); } else if (std::isnan(op1)) { - ASSERT(IsQuietNaN(op1)); + DCHECK(IsQuietNaN(op1)); return FPProcessNaN(op1); } else if (std::isnan(op2)) { - ASSERT(IsQuietNaN(op2)); + DCHECK(IsQuietNaN(op2)); return FPProcessNaN(op2); } else if (std::isnan(op3)) { - ASSERT(IsQuietNaN(op3)); + DCHECK(IsQuietNaN(op3)); return FPProcessNaN(op3); } else { return 0.0; @@ -3122,7 +3121,7 @@ } } } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { - ASSERT(instr->Mask(SystemHintMask) == HINT); + DCHECK(instr->Mask(SystemHintMask) == HINT); switch (instr->ImmHint()) { case NOP: break; default: UNIMPLEMENTED(); @@ -3165,13 +3164,13 @@ bool Simulator::PrintValue(const char* desc) { if (strcmp(desc, "csp") == 0) { - ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode)); - PrintF("%s csp:%s 0x%016" PRIx64 "%s\n", + DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode)); + PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n", clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal); return true; } else if (strcmp(desc, "wcsp") == 0) { - ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode)); - PrintF("%s wcsp:%s 0x%08" PRIx32 "%s\n", + DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode)); + PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n", clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal); return true; } @@ -3181,7 +3180,7 @@ if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false; if (desc[0] == 'v') { - PrintF("%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n", + PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n", clr_fpreg_name, VRegNameForCode(i), clr_fpreg_value, double_to_rawbits(dreg(i)), clr_normal, @@ -3192,25 +3191,25 @@ clr_normal); return true; } else if (desc[0] == 'd') { - PrintF("%s %s:%s %g%s\n", + PrintF(stream_, "%s %s:%s %g%s\n", clr_fpreg_name, DRegNameForCode(i), clr_fpreg_value, dreg(i), clr_normal); return true; } else if (desc[0] == 's') { - PrintF("%s %s:%s %g%s\n", + PrintF(stream_, "%s %s:%s %g%s\n", clr_fpreg_name, SRegNameForCode(i), clr_fpreg_value, sreg(i), clr_normal); return true; } else if (desc[0] == 'w') { - PrintF("%s %s:%s 0x%08" PRIx32 "%s\n", + PrintF(stream_, "%s %s:%s 0x%08" PRIx32 "%s\n", clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal); return true; } else { // X register names have a wide variety of starting characters, but anything // else will be an X register. - PrintF("%s %s:%s 0x%016" PRIx64 "%s\n", + PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s\n", clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal); return true; } @@ -3346,17 +3345,18 @@ (strcmp(cmd, "po") == 0)) { if (argc == 2) { int64_t value; + OFStream os(stdout); if (GetValue(arg1, &value)) { Object* obj = reinterpret_cast<Object*>(value); - PrintF("%s: \n", arg1); + os << arg1 << ": \n"; #ifdef DEBUG - obj->PrintLn(); + obj->Print(os); + os << "\n"; #else - obj->ShortPrint(); - PrintF("\n"); + os << Brief(obj) << "\n"; #endif } else { - PrintF("%s unrecognized\n", arg1); + os << arg1 << " unrecognized\n"; } } else { PrintF("printobject <value>\n" @@ -3446,7 +3446,7 @@ // gdb ------------------------------------------------------------------- } else if (strcmp(cmd, "gdb") == 0) { PrintF("Relinquishing control to gdb.\n"); - OS::DebugBreak(); + base::OS::DebugBreak(); PrintF("Regaining control from gdb.\n"); // sysregs --------------------------------------------------------------- @@ -3529,14 +3529,16 @@ // terms of speed. if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) { if (message != NULL) { - PrintF("%sDebugger hit %d: %s%s%s\n", + PrintF(stream_, + "%sDebugger hit %d: %s%s%s\n", clr_debug_number, code, clr_debug_message, message, clr_normal); } else { - PrintF("%sDebugger hit %d.%s\n", + PrintF(stream_, + "%sDebugger hit %d.%s\n", clr_debug_number, code, clr_normal); @@ -3559,7 +3561,7 @@ break; default: // We don't support a one-shot LOG_DISASM. - ASSERT((parameters & LOG_DISASM) == 0); + DCHECK((parameters & LOG_DISASM) == 0); // Don't print information that is already being traced. parameters &= ~log_parameters(); // Print the requested information. @@ -3573,8 +3575,8 @@ size_t size = kDebugMessageOffset + strlen(message) + 1; pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize)); // - Verify that the unreachable marker is present. - ASSERT(pc_->Mask(ExceptionMask) == HLT); - ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable); + DCHECK(pc_->Mask(ExceptionMask) == HLT); + DCHECK(pc_->ImmException() == kImmExceptionIsUnreachable); // - Skip past the unreachable marker. set_pc(pc_->following()); @@ -3584,43 +3586,7 @@ } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) { DoRuntimeCall(instr); } else if (instr->ImmException() == kImmExceptionIsPrintf) { - // Read the argument encoded inline in the instruction stream. - uint32_t type; - memcpy(&type, - pc_->InstructionAtOffset(kPrintfTypeOffset), - sizeof(type)); - - const char* format = reg<const char*>(0); - - // Pass all of the relevant PCS registers onto printf. It doesn't - // matter if we pass too many as the extra ones won't be read. - int result; - fputs(clr_printf, stream_); - if (type == CPURegister::kRegister) { - result = fprintf(stream_, format, - xreg(1), xreg(2), xreg(3), xreg(4), - xreg(5), xreg(6), xreg(7)); - } else if (type == CPURegister::kFPRegister) { - result = fprintf(stream_, format, - dreg(0), dreg(1), dreg(2), dreg(3), - dreg(4), dreg(5), dreg(6), dreg(7)); - } else { - ASSERT(type == CPURegister::kNoRegister); - result = fprintf(stream_, "%s", format); - } - fputs(clr_normal, stream_); - -#ifdef DEBUG - CorruptAllCallerSavedCPURegisters(); -#endif - - set_xreg(0, result); - - // The printf parameters are inlined in the code, so skip them. - set_pc(pc_->InstructionAtOffset(kPrintfLength)); - - // Set LR as if we'd just called a native printf function. - set_lr(pc()); + DoPrintf(instr); } else if (instr->ImmException() == kImmExceptionIsUnreachable) { fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n", @@ -3628,7 +3594,7 @@ abort(); } else { - OS::DebugBreak(); + base::OS::DebugBreak(); } break; } @@ -3638,6 +3604,133 @@ } } + +void Simulator::DoPrintf(Instruction* instr) { + DCHECK((instr->Mask(ExceptionMask) == HLT) && + (instr->ImmException() == kImmExceptionIsPrintf)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t arg_count; + uint32_t arg_pattern_list; + STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(&arg_count, + instr + kPrintfArgCountOffset, + sizeof(arg_count)); + memcpy(&arg_pattern_list, + instr + kPrintfArgPatternListOffset, + sizeof(arg_pattern_list)); + + DCHECK(arg_count <= kPrintfMaxArgCount); + DCHECK((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0); + + // We need to call the host printf function with a set of arguments defined by + // arg_pattern_list. Because we don't know the types and sizes of the + // arguments, this is very difficult to do in a robust and portable way. To + // work around the problem, we pick apart the format string, and print one + // format placeholder at a time. + + // Allocate space for the format string. We take a copy, so we can modify it. + // Leave enough space for one extra character per expected argument (plus the + // '\0' termination). + const char * format_base = reg<const char *>(0); + DCHECK(format_base != NULL); + size_t length = strlen(format_base) + 1; + char * const format = new char[length + arg_count]; + + // A list of chunks, each with exactly one format placeholder. + const char * chunks[kPrintfMaxArgCount]; + + // Copy the format string and search for format placeholders. + uint32_t placeholder_count = 0; + char * format_scratch = format; + for (size_t i = 0; i < length; i++) { + if (format_base[i] != '%') { + *format_scratch++ = format_base[i]; + } else { + if (format_base[i + 1] == '%') { + // Ignore explicit "%%" sequences. + *format_scratch++ = format_base[i]; + + if (placeholder_count == 0) { + // The first chunk is passed to printf using "%s", so we need to + // unescape "%%" sequences in this chunk. (Just skip the next '%'.) + i++; + } else { + // Otherwise, pass through "%%" unchanged. + *format_scratch++ = format_base[++i]; + } + } else { + CHECK(placeholder_count < arg_count); + // Insert '\0' before placeholders, and store their locations. + *format_scratch++ = '\0'; + chunks[placeholder_count++] = format_scratch; + *format_scratch++ = format_base[i]; + } + } + } + DCHECK(format_scratch <= (format + length + arg_count)); + CHECK(placeholder_count == arg_count); + + // Finally, call printf with each chunk, passing the appropriate register + // argument. Normally, printf returns the number of bytes transmitted, so we + // can emulate a single printf call by adding the result from each chunk. If + // any call returns a negative (error) value, though, just return that value. + + fprintf(stream_, "%s", clr_printf); + + // Because '\0' is inserted before each placeholder, the first string in + // 'format' contains no format placeholders and should be printed literally. + int result = fprintf(stream_, "%s", format); + int pcs_r = 1; // Start at x1. x0 holds the format string. + int pcs_f = 0; // Start at d0. + if (result >= 0) { + for (uint32_t i = 0; i < placeholder_count; i++) { + int part_result = -1; + + uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits); + arg_pattern &= (1 << kPrintfArgPatternBits) - 1; + switch (arg_pattern) { + case kPrintfArgW: + part_result = fprintf(stream_, chunks[i], wreg(pcs_r++)); + break; + case kPrintfArgX: + part_result = fprintf(stream_, chunks[i], xreg(pcs_r++)); + break; + case kPrintfArgD: + part_result = fprintf(stream_, chunks[i], dreg(pcs_f++)); + break; + default: UNREACHABLE(); + } + + if (part_result < 0) { + // Handle error values. + result = part_result; + break; + } + + result += part_result; + } + } + + fprintf(stream_, "%s", clr_normal); + +#ifdef DEBUG + CorruptAllCallerSavedCPURegisters(); +#endif + + // Printf returns its result in x0 (just like the C library's printf). + set_xreg(0, result); + + // The printf parameters are inlined in the code, so skip them. + set_pc(instr->InstructionAtOffset(kPrintfLength)); + + // Set LR as if we'd just called a native printf function. + set_lr(pc()); + + delete[] format; +} + + #endif // USE_SIMULATOR } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/arm64/simulator-arm64.h nodejs-0.11.15/deps/v8/src/arm64/simulator-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/simulator-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/simulator-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_SIMULATOR_ARM64_H_ #define V8_ARM64_SIMULATOR_ARM64_H_ @@ -31,16 +8,16 @@ #include <stdarg.h> #include <vector> -#include "v8.h" +#include "src/v8.h" -#include "globals.h" -#include "utils.h" -#include "allocation.h" -#include "assembler.h" -#include "arm64/assembler-arm64.h" -#include "arm64/decoder-arm64.h" -#include "arm64/disasm-arm64.h" -#include "arm64/instrument-arm64.h" +#include "src/allocation.h" +#include "src/arm64/assembler-arm64.h" +#include "src/arm64/decoder-arm64.h" +#include "src/arm64/disasm-arm64.h" +#include "src/arm64/instrument-arm64.h" +#include "src/assembler.h" +#include "src/globals.h" +#include "src/utils.h" #define REGISTER_CODE_LIST(R) \ R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ @@ -77,9 +54,6 @@ (FUNCTION_CAST<arm64_regexp_matcher>(entry)( \ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)) -#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ - reinterpret_cast<TryCatch*>(try_catch_address) - // Running without a simulator there is nothing to do. class SimulatorStack : public v8::internal::AllStatic { public: @@ -159,35 +133,28 @@ // Represent a register (r0-r31, v0-v31). -template<int kSizeInBytes> class SimRegisterBase { public: template<typename T> - void Set(T new_value, unsigned size = sizeof(T)) { - ASSERT(size <= kSizeInBytes); - ASSERT(size <= sizeof(new_value)); - // All AArch64 registers are zero-extending; Writing a W register clears the - // top bits of the corresponding X register. - memset(value_, 0, kSizeInBytes); - memcpy(value_, &new_value, size); + void Set(T new_value) { + value_ = 0; + memcpy(&value_, &new_value, sizeof(T)); } - // Copy 'size' bytes of the register to the result, and zero-extend to fill - // the result. template<typename T> - T Get(unsigned size = sizeof(T)) const { - ASSERT(size <= kSizeInBytes); + T Get() const { T result; - memset(&result, 0, sizeof(result)); - memcpy(&result, value_, size); + memcpy(&result, &value_, sizeof(T)); return result; } protected: - uint8_t value_[kSizeInBytes]; + int64_t value_; }; -typedef SimRegisterBase<kXRegSize> SimRegister; // r0-r31 -typedef SimRegisterBase<kDRegSize> SimFPRegister; // v0-v31 + + +typedef SimRegisterBase SimRegister; // r0-r31 +typedef SimRegisterBase SimFPRegister; // v0-v31 class Simulator : public DecoderVisitor { @@ -244,13 +211,14 @@ public: template<typename T> explicit CallArgument(T argument) { - ASSERT(sizeof(argument) <= sizeof(bits_)); + bits_ = 0; + DCHECK(sizeof(argument) <= sizeof(bits_)); memcpy(&bits_, &argument, sizeof(argument)); type_ = X_ARG; } explicit CallArgument(double argument) { - ASSERT(sizeof(argument) == sizeof(bits_)); + DCHECK(sizeof(argument) == sizeof(bits_)); memcpy(&bits_, &argument, sizeof(argument)); type_ = D_ARG; } @@ -261,10 +229,10 @@ UNIMPLEMENTED(); // Make the D register a NaN to try to trap errors if the callee expects a // double. If it expects a float, the callee should ignore the top word. - ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_)); + DCHECK(sizeof(kFP64SignallingNaN) == sizeof(bits_)); memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN)); // Write the float payload to the S register. - ASSERT(sizeof(argument) <= sizeof(bits_)); + DCHECK(sizeof(argument) <= sizeof(bits_)); memcpy(&bits_, &argument, sizeof(argument)); type_ = D_ARG; } @@ -322,7 +290,7 @@ // Simulation helpers. template <typename T> void set_pc(T new_pc) { - ASSERT(sizeof(T) == sizeof(pc_)); + DCHECK(sizeof(T) == sizeof(pc_)); memcpy(&pc_, &new_pc, sizeof(T)); pc_modified_ = true; } @@ -341,7 +309,7 @@ } void ExecuteInstruction() { - ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize)); + DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize)); CheckBreakNext(); Decode(pc_); LogProcessorState(); @@ -354,98 +322,65 @@ VISITOR_LIST(DECLARE) #undef DECLARE - // Register accessors. + bool IsZeroRegister(unsigned code, Reg31Mode r31mode) const { + return ((code == 31) && (r31mode == Reg31IsZeroRegister)); + } + // Register accessors. // Return 'size' bits of the value of an integer register, as the specified // type. The value is zero-extended to fill the result. // - // The only supported values of 'size' are kXRegSizeInBits and - // kWRegSizeInBits. - template<typename T> - T reg(unsigned size, unsigned code, - Reg31Mode r31mode = Reg31IsZeroRegister) const { - unsigned size_in_bytes = size / 8; - ASSERT(size_in_bytes <= sizeof(T)); - ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits)); - ASSERT(code < kNumberOfRegisters); - - if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { - T result; - memset(&result, 0, sizeof(result)); - return result; - } - return registers_[code].Get<T>(size_in_bytes); - } - - // Like reg(), but infer the access size from the template type. template<typename T> T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const { - return reg<T>(sizeof(T) * 8, code, r31mode); + DCHECK(code < kNumberOfRegisters); + if (IsZeroRegister(code, r31mode)) { + return 0; + } + return registers_[code].Get<T>(); } // Common specialized accessors for the reg() template. - int32_t wreg(unsigned code, - Reg31Mode r31mode = Reg31IsZeroRegister) const { + int32_t wreg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const { return reg<int32_t>(code, r31mode); } - int64_t xreg(unsigned code, - Reg31Mode r31mode = Reg31IsZeroRegister) const { + int64_t xreg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const { return reg<int64_t>(code, r31mode); } - int64_t reg(unsigned size, unsigned code, - Reg31Mode r31mode = Reg31IsZeroRegister) const { - return reg<int64_t>(size, code, r31mode); - } - // Write 'size' bits of 'value' into an integer register. The value is // zero-extended. This behaviour matches AArch64 register writes. - // - // The only supported values of 'size' are kXRegSizeInBits and - // kWRegSizeInBits. - template<typename T> - void set_reg(unsigned size, unsigned code, T value, - Reg31Mode r31mode = Reg31IsZeroRegister) { - unsigned size_in_bytes = size / 8; - ASSERT(size_in_bytes <= sizeof(T)); - ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits)); - ASSERT(code < kNumberOfRegisters); - - if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { - return; - } - return registers_[code].Set(value, size_in_bytes); - } // Like set_reg(), but infer the access size from the template type. template<typename T> void set_reg(unsigned code, T value, Reg31Mode r31mode = Reg31IsZeroRegister) { - set_reg(sizeof(value) * 8, code, value, r31mode); + DCHECK(code < kNumberOfRegisters); + if (!IsZeroRegister(code, r31mode)) + registers_[code].Set(value); } // Common specialized accessors for the set_reg() template. void set_wreg(unsigned code, int32_t value, Reg31Mode r31mode = Reg31IsZeroRegister) { - set_reg(kWRegSizeInBits, code, value, r31mode); + set_reg(code, value, r31mode); } void set_xreg(unsigned code, int64_t value, Reg31Mode r31mode = Reg31IsZeroRegister) { - set_reg(kXRegSizeInBits, code, value, r31mode); + set_reg(code, value, r31mode); } // Commonly-used special cases. template<typename T> void set_lr(T value) { - ASSERT(sizeof(T) == kPointerSize); + DCHECK(sizeof(T) == kPointerSize); set_reg(kLinkRegCode, value); } template<typename T> void set_sp(T value) { - ASSERT(sizeof(T) == kPointerSize); + DCHECK(sizeof(T) == kPointerSize); set_reg(31, value, Reg31IsStackPointer); } @@ -458,24 +393,10 @@ Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); } - // Return 'size' bits of the value of a floating-point register, as the - // specified type. The value is zero-extended to fill the result. - // - // The only supported values of 'size' are kDRegSizeInBits and - // kSRegSizeInBits. - template<typename T> - T fpreg(unsigned size, unsigned code) const { - unsigned size_in_bytes = size / 8; - ASSERT(size_in_bytes <= sizeof(T)); - ASSERT((size == kDRegSizeInBits) || (size == kSRegSizeInBits)); - ASSERT(code < kNumberOfFPRegisters); - return fpregisters_[code].Get<T>(size_in_bytes); - } - - // Like fpreg(), but infer the access size from the template type. template<typename T> T fpreg(unsigned code) const { - return fpreg<T>(sizeof(T) * 8, code); + DCHECK(code < kNumberOfRegisters); + return fpregisters_[code].Get<T>(); } // Common specialized accessors for the fpreg() template. @@ -509,9 +430,9 @@ // This behaviour matches AArch64 register writes. template<typename T> void set_fpreg(unsigned code, T value) { - ASSERT((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize)); - ASSERT(code < kNumberOfFPRegisters); - fpregisters_[code].Set(value, sizeof(value)); + DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize)); + DCHECK(code < kNumberOfFPRegisters); + fpregisters_[code].Set(value); } // Common specialized accessors for the set_fpreg() template. @@ -651,14 +572,19 @@ return !ConditionPassed(cond); } - void AddSubHelper(Instruction* instr, int64_t op2); - int64_t AddWithCarry(unsigned reg_size, - bool set_flags, - int64_t src1, - int64_t src2, - int64_t carry_in = 0); - void LogicalHelper(Instruction* instr, int64_t op2); - void ConditionalCompareHelper(Instruction* instr, int64_t op2); + template<typename T> + void AddSubHelper(Instruction* instr, T op2); + template<typename T> + T AddWithCarry(bool set_flags, + T src1, + T src2, + T carry_in = 0); + template<typename T> + void AddSubWithCarry(Instruction* instr); + template<typename T> + void LogicalHelper(Instruction* instr, T op2); + template<typename T> + void ConditionalCompareHelper(Instruction* instr, T op2); void LoadStoreHelper(Instruction* instr, int64_t offset, AddrMode addrmode); @@ -685,18 +611,21 @@ void MemoryWrite64(uint8_t* address, uint64_t value); void MemoryWriteFP64(uint8_t* address, double value); - int64_t ShiftOperand(unsigned reg_size, - int64_t value, - Shift shift_type, - unsigned amount); - int64_t Rotate(unsigned reg_width, - int64_t value, + + template <typename T> + T ShiftOperand(T value, Shift shift_type, unsigned amount); - int64_t ExtendValue(unsigned reg_width, - int64_t value, - Extend extend_type, - unsigned left_shift = 0); + template <typename T> + T ExtendValue(T value, + Extend extend_type, + unsigned left_shift = 0); + template <typename T> + void Extract(Instruction* instr); + template <typename T> + void DataProcessing2Source(Instruction* instr); + template <typename T> + void BitfieldHelper(Instruction* instr); uint64_t ReverseBits(uint64_t value, unsigned num_bits); uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode); @@ -780,11 +709,15 @@ void CorruptAllCallerSavedCPURegisters(); #endif + // Pseudo Printf instruction + void DoPrintf(Instruction* instr); + // Processor state --------------------------------------- // Output stream. FILE* stream_; PrintDisassembler* print_disasm_; + void PRINTF_METHOD_CHECKING TraceSim(const char* format, ...); // Instrumentation. Instrument* instrument_; @@ -811,15 +744,16 @@ // functions, or to save and restore it when entering and leaving generated // code. void AssertSupportedFPCR() { - ASSERT(fpcr().FZ() == 0); // No flush-to-zero support. - ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only. + DCHECK(fpcr().FZ() == 0); // No flush-to-zero support. + DCHECK(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only. // The simulator does not support half-precision operations so fpcr().AHP() // is irrelevant, and is not checked here. } - static int CalcNFlag(uint64_t result, unsigned reg_size) { - return (result >> (reg_size - 1)) & 1; + template <typename T> + static int CalcNFlag(T result) { + return (result >> (sizeof(T) * 8 - 1)) & 1; } static int CalcZFlag(uint64_t result) { @@ -876,10 +810,6 @@ entry, \ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8) -#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ - try_catch_address == NULL ? \ - NULL : *(reinterpret_cast<TryCatch**>(try_catch_address)) - // The simulator has its own stack. Thus it has a different stack limit from // the C-based native code. diff -Nru nodejs-0.11.13/deps/v8/src/arm64/stub-cache-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/stub-cache-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/stub-cache-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/stub-cache-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_ARM64 -#include "ic-inl.h" -#include "codegen.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -40,14 +17,11 @@ #define __ ACCESS_MASM(masm) -void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, - Label* miss_label, - Register receiver, - Handle<Name> name, - Register scratch0, - Register scratch1) { - ASSERT(!AreAliased(receiver, scratch0, scratch1)); - ASSERT(name->IsUniqueName()); +void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( + MacroAssembler* masm, Label* miss_label, Register receiver, + Handle<Name> name, Register scratch0, Register scratch1) { + DCHECK(!AreAliased(receiver, scratch0, scratch1)); + DCHECK(name->IsUniqueName()); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); @@ -119,7 +93,7 @@ Label miss; - ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3)); + DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3)); // Multiply by 3 because there are 3 fields per entry. __ Add(scratch3, offset, Operand(offset, LSL, 1)); @@ -177,15 +151,15 @@ Label miss; // Make sure the flags does not name a specific type. - ASSERT(Code::ExtractTypeFromFlags(flags) == 0); + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); // Make sure that there are no register conflicts. - ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3)); + DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3)); // Make sure extra and extra2 registers are valid. - ASSERT(!extra.is(no_reg)); - ASSERT(!extra2.is(no_reg)); - ASSERT(!extra3.is(no_reg)); + DCHECK(!extra.is(no_reg)); + DCHECK(!extra2.is(no_reg)); + DCHECK(!extra3.is(no_reg)); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, @@ -200,7 +174,7 @@ __ Add(scratch, scratch, extra); __ Eor(scratch, scratch, flags); // We shift out the last two bits because they are not part of the hash. - __ Ubfx(scratch, scratch, kHeapObjectTagSize, + __ Ubfx(scratch, scratch, kCacheIndexShift, CountTrailingZeros(kPrimaryTableSize, 64)); // Probe the primary table. @@ -208,8 +182,8 @@ scratch, extra, extra2, extra3); // Primary miss: Compute hash for secondary table. - __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); - __ Add(scratch, scratch, flags >> kHeapObjectTagSize); + __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); + __ Add(scratch, scratch, flags >> kCacheIndexShift); __ And(scratch, scratch, kSecondaryTableSize - 1); // Probe the secondary table. @@ -224,29 +198,8 @@ } -void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, - int index, - Register prototype) { - // Load the global or builtins object from the current context. - __ Ldr(prototype, GlobalObjectMemOperand()); - // Load the native context from the global or builtins object. - __ Ldr(prototype, - FieldMemOperand(prototype, GlobalObject::kNativeContextOffset)); - // Load the function from the native context. - __ Ldr(prototype, ContextMemOperand(prototype, index)); - // Load the initial map. The global functions all have initial maps. - __ Ldr(prototype, - FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); - // Load the prototype from the initial map. - __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); -} - - -void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( - MacroAssembler* masm, - int index, - Register prototype, - Label* miss) { +void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( + MacroAssembler* masm, int index, Register prototype, Label* miss) { Isolate* isolate = masm->isolate(); // Get the global function with the given index. Handle<JSFunction> function( @@ -267,50 +220,9 @@ } -void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, - Register dst, - Register src, - bool inobject, - int index, - Representation representation) { - ASSERT(!representation.IsDouble()); - USE(representation); - if (inobject) { - int offset = index * kPointerSize; - __ Ldr(dst, FieldMemOperand(src, offset)); - } else { - // Calculate the offset into the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; - __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); - __ Ldr(dst, FieldMemOperand(dst, offset)); - } -} - - -void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, - Register receiver, - Register scratch, - Label* miss_label) { - ASSERT(!AreAliased(receiver, scratch)); - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss_label); - - // Check that the object is a JS array. - __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE, - miss_label); - - // Load length directly from the JS array. - __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Ret(); -} - - -void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, - Register receiver, - Register scratch1, - Register scratch2, - Label* miss_label) { +void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype( + MacroAssembler* masm, Register receiver, Register scratch1, + Register scratch2, Label* miss_label) { __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); // TryGetFunctionPrototype can't put the result directly in x0 because the // 3 inputs registers can't alias and we call this function from @@ -324,31 +236,134 @@ // Generate code to check that a global property cell is empty. Create // the property cell at compilation time if no cell exists for the // property. -void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm, - Handle<JSGlobalObject> global, - Handle<Name> name, - Register scratch, - Label* miss) { +void PropertyHandlerCompiler::GenerateCheckPropertyCell( + MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name, + Register scratch, Label* miss) { Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name); - ASSERT(cell->value()->IsTheHole()); + DCHECK(cell->value()->IsTheHole()); __ Mov(scratch, Operand(cell)); __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss); } -void StoreStubCompiler::GenerateNegativeHolderLookup( - MacroAssembler* masm, - Handle<JSObject> holder, - Register holder_reg, - Handle<Name> name, - Label* miss) { - if (holder->IsJSGlobalObject()) { - GenerateCheckPropertyCell( - masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss); - } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { - GenerateDictionaryNegativeLookup( - masm, miss, holder_reg, name, scratch1(), scratch2()); +static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, + Register holder, Register name, + Handle<JSObject> holder_obj) { + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4); + + __ Push(name); + Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); + DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor)); + Register scratch = name; + __ Mov(scratch, Operand(interceptor)); + __ Push(scratch, receiver, holder); +} + + +static void CompileCallLoadPropertyWithInterceptor( + MacroAssembler* masm, Register receiver, Register holder, Register name, + Handle<JSObject> holder_obj, IC::UtilityId id) { + PushInterceptorArguments(masm, receiver, holder, name, holder_obj); + + __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()), + NamedLoadHandlerCompiler::kInterceptorArgsLength); +} + + +// Generate call to api function. +void PropertyHandlerCompiler::GenerateFastApiCall( + MacroAssembler* masm, const CallOptimization& optimization, + Handle<Map> receiver_map, Register receiver, Register scratch, + bool is_store, int argc, Register* values) { + DCHECK(!AreAliased(receiver, scratch)); + + MacroAssembler::PushPopQueue queue(masm); + queue.Queue(receiver); + // Write the arguments to the stack frame. + for (int i = 0; i < argc; i++) { + Register arg = values[argc - 1 - i]; + DCHECK(!AreAliased(receiver, scratch, arg)); + queue.Queue(arg); + } + queue.PushQueued(); + + DCHECK(optimization.is_simple_api_call()); + + // Abi for CallApiFunctionStub. + Register callee = x0; + Register call_data = x4; + Register holder = x2; + Register api_function_address = x1; + + // Put holder in place. + CallOptimization::HolderLookup holder_lookup; + Handle<JSObject> api_holder = + optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup); + switch (holder_lookup) { + case CallOptimization::kHolderIsReceiver: + __ Mov(holder, receiver); + break; + case CallOptimization::kHolderFound: + __ LoadObject(holder, api_holder); + break; + case CallOptimization::kHolderNotFound: + UNREACHABLE(); + break; + } + + Isolate* isolate = masm->isolate(); + Handle<JSFunction> function = optimization.constant_function(); + Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); + Handle<Object> call_data_obj(api_call_info->data(), isolate); + + // Put callee in place. + __ LoadObject(callee, function); + + bool call_data_undefined = false; + // Put call_data in place. + if (isolate->heap()->InNewSpace(*call_data_obj)) { + __ LoadObject(call_data, api_call_info); + __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); + } else if (call_data_obj->IsUndefined()) { + call_data_undefined = true; + __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); + } else { + __ LoadObject(call_data, call_data_obj); + } + + // Put api_function_address in place. + Address function_address = v8::ToCData<Address>(api_call_info->callback()); + ApiFunction fun(function_address); + ExternalReference ref = ExternalReference( + &fun, ExternalReference::DIRECT_API_CALL, masm->isolate()); + __ Mov(api_function_address, ref); + + // Jump to stub. + CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc); + __ TailCallStub(&stub); +} + + +void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm, + Handle<Code> code) { + __ Jump(code, RelocInfo::CODE_TARGET); +} + + +#undef __ +#define __ ACCESS_MASM(masm()) + + +void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ Bind(label); + __ Mov(this->name(), Operand(name)); } } @@ -357,22 +372,13 @@ // When leaving generated code after success, the receiver_reg and storage_reg // may be clobbered. Upon branch to miss_label, the receiver and name registers // have their original values. -void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register storage_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Label* miss_label, - Label* slow) { +void NamedStoreHandlerCompiler::GenerateStoreTransition( + Handle<Map> transition, Handle<Name> name, Register receiver_reg, + Register storage_reg, Register value_reg, Register scratch1, + Register scratch2, Register scratch3, Label* miss_label, Label* slow) { Label exit; - ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg, + DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg, scratch1, scratch2, scratch3)); // We don't need scratch3. @@ -382,10 +388,10 @@ DescriptorArray* descriptors = transition->instance_descriptors(); PropertyDetails details = descriptors->GetDetails(descriptor); Representation representation = details.representation(); - ASSERT(!representation.IsNone()); + DCHECK(!representation.IsNone()); if (details.type() == CONSTANT) { - Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); + Handle<Object> constant(descriptors->GetValue(descriptor), isolate()); __ LoadObject(scratch1, constant); __ Cmp(value_reg, scratch1); __ B(ne, miss_label); @@ -393,14 +399,28 @@ __ JumpIfNotSmi(value_reg, miss_label); } else if (representation.IsHeapObject()) { __ JumpIfSmi(value_reg, miss_label); + HeapType* field_type = descriptors->GetFieldType(descriptor); + HeapType::Iterator<Map> it = field_type->Classes(); + if (!it.Done()) { + __ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); + Label do_store; + while (true) { + __ CompareMap(scratch1, it.Current()); + it.Advance(); + if (it.Done()) { + __ B(ne, miss_label); + break; + } + __ B(eq, &do_store); + } + __ Bind(&do_store); + } } else if (representation.IsDouble()) { - UseScratchRegisterScope temps(masm); + UseScratchRegisterScope temps(masm()); DoubleRegister temp_double = temps.AcquireD(); __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag); - Label do_store, heap_number; - __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2); - + Label do_store; __ JumpIfSmi(value_reg, &do_store); __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, @@ -408,24 +428,24 @@ __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); __ Bind(&do_store); - __ Str(temp_double, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); + __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double, + NoReg, MUTABLE); } - // Stub never generated for non-global objects that require access checks. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); + // Stub never generated for objects that require access checks. + DCHECK(!transition->is_access_check_needed()); // Perform map transition for the receiver if necessary. - if ((details.type() == FIELD) && - (object->map()->unused_property_fields() == 0)) { + if (details.type() == FIELD && + Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) { // The properties must be extended before we can store the value. // We jump to a runtime call that extends the properties array. __ Mov(scratch1, Operand(transition)); __ Push(receiver_reg, scratch1, value_reg); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), - masm->isolate()), - 3, - 1); + isolate()), + 3, 1); return; } @@ -444,7 +464,7 @@ OMIT_SMI_CHECK); if (details.type() == CONSTANT) { - ASSERT(value_reg.is(x0)); + DCHECK(value_reg.is(x0)); __ Ret(); return; } @@ -455,7 +475,7 @@ // Adjust for the number of properties stored in the object. Even in the // face of a transition we can use the old map here because the size of the // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); + index -= transition->inobject_properties(); // TODO(verwaest): Share this code as a code stub. SmiCheck smi_check = representation.IsTagged() @@ -463,7 +483,7 @@ Register prop_reg = representation.IsDouble() ? storage_reg : value_reg; if (index < 0) { // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); + int offset = transition->instance_size() + (index * kPointerSize); __ Str(prop_reg, FieldMemOperand(receiver_reg, offset)); if (!representation.IsSmi()) { @@ -506,295 +526,57 @@ __ Bind(&exit); // Return the value (register x0). - ASSERT(value_reg.is(x0)); - __ Ret(); -} - - -// Generate StoreField code, value is passed in x0 register. -// When leaving generated code after success, the receiver_reg and name_reg may -// be clobbered. Upon branch to miss_label, the receiver and name registers have -// their original values. -void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label) { - // x0 : value - Label exit; - - // Stub never generated for non-global objects that require access - // checks. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - - int index = lookup->GetFieldIndex().field_index(); - - // Adjust for the number of properties stored in the object. Even in the - // face of a transition we can use the old map here because the size of the - // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); - - Representation representation = lookup->representation(); - ASSERT(!representation.IsNone()); - if (representation.IsSmi()) { - __ JumpIfNotSmi(value_reg, miss_label); - } else if (representation.IsHeapObject()) { - __ JumpIfSmi(value_reg, miss_label); - } else if (representation.IsDouble()) { - UseScratchRegisterScope temps(masm); - DoubleRegister temp_double = temps.AcquireD(); - - __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag); - - // Load the double storage. - if (index < 0) { - int offset = (index * kPointerSize) + object->map()->instance_size(); - __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset)); - } else { - int offset = (index * kPointerSize) + FixedArray::kHeaderSize; - __ Ldr(scratch1, - FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ Ldr(scratch1, FieldMemOperand(scratch1, offset)); - } - - // Store the value into the storage. - Label do_store, heap_number; - - __ JumpIfSmi(value_reg, &do_store); - - __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex, - miss_label, DONT_DO_SMI_CHECK); - __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); - - __ Bind(&do_store); - __ Str(temp_double, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); - - // Return the value (register x0). - ASSERT(value_reg.is(x0)); - __ Ret(); - return; - } - - // TODO(verwaest): Share this code as a code stub. - SmiCheck smi_check = representation.IsTagged() - ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; - if (index < 0) { - // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); - __ Str(value_reg, FieldMemOperand(receiver_reg, offset)); - - if (!representation.IsSmi()) { - // Skip updating write barrier if storing a smi. - __ JumpIfSmi(value_reg, &exit); - - // Update the write barrier for the array address. - // Pass the now unused name_reg as a scratch register. - __ Mov(name_reg, value_reg); - __ RecordWriteField(receiver_reg, - offset, - name_reg, - scratch1, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); - } - } else { - // Write to the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; - // Get the properties array - __ Ldr(scratch1, - FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ Str(value_reg, FieldMemOperand(scratch1, offset)); - - if (!representation.IsSmi()) { - // Skip updating write barrier if storing a smi. - __ JumpIfSmi(value_reg, &exit); - - // Update the write barrier for the array address. - // Ok to clobber receiver_reg and name_reg, since we return. - __ Mov(name_reg, value_reg); - __ RecordWriteField(scratch1, - offset, - name_reg, - receiver_reg, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); - } - } - - __ Bind(&exit); - // Return the value (register x0). - ASSERT(value_reg.is(x0)); + DCHECK(value_reg.is(x0)); __ Ret(); } -void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, - Label* label, - Handle<Name> name) { - if (!label->is_unused()) { - __ Bind(label); - __ Mov(this->name(), Operand(name)); - } -} - - -static void PushInterceptorArguments(MacroAssembler* masm, - Register receiver, - Register holder, - Register name, - Handle<JSObject> holder_obj) { - STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0); - STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1); - STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2); - STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3); - STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4); - - __ Push(name); - Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); - ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor)); - Register scratch = name; - __ Mov(scratch, Operand(interceptor)); - __ Push(scratch, receiver, holder); -} - - -static void CompileCallLoadPropertyWithInterceptor( - MacroAssembler* masm, - Register receiver, - Register holder, - Register name, - Handle<JSObject> holder_obj, - IC::UtilityId id) { - PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - - __ CallExternalReference( - ExternalReference(IC_Utility(id), masm->isolate()), - StubCache::kInterceptorArgsLength); -} - - -// Generate call to api function. -void StubCompiler::GenerateFastApiCall(MacroAssembler* masm, - const CallOptimization& optimization, - Handle<Map> receiver_map, - Register receiver, - Register scratch, - bool is_store, - int argc, - Register* values) { - ASSERT(!AreAliased(receiver, scratch)); - - MacroAssembler::PushPopQueue queue(masm); - queue.Queue(receiver); - // Write the arguments to the stack frame. - for (int i = 0; i < argc; i++) { - Register arg = values[argc-1-i]; - ASSERT(!AreAliased(receiver, scratch, arg)); - queue.Queue(arg); - } - queue.PushQueued(); - - ASSERT(optimization.is_simple_api_call()); - - // Abi for CallApiFunctionStub. - Register callee = x0; - Register call_data = x4; - Register holder = x2; - Register api_function_address = x1; - - // Put holder in place. - CallOptimization::HolderLookup holder_lookup; - Handle<JSObject> api_holder = - optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup); - switch (holder_lookup) { - case CallOptimization::kHolderIsReceiver: - __ Mov(holder, receiver); - break; - case CallOptimization::kHolderFound: - __ LoadObject(holder, api_holder); - break; - case CallOptimization::kHolderNotFound: - UNREACHABLE(); +void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup, + Register value_reg, + Label* miss_label) { + DCHECK(lookup->representation().IsHeapObject()); + __ JumpIfSmi(value_reg, miss_label); + HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes(); + __ Ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset)); + Label do_store; + while (true) { + __ CompareMap(scratch1(), it.Current()); + it.Advance(); + if (it.Done()) { + __ B(ne, miss_label); break; + } + __ B(eq, &do_store); } + __ Bind(&do_store); - Isolate* isolate = masm->isolate(); - Handle<JSFunction> function = optimization.constant_function(); - Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); - Handle<Object> call_data_obj(api_call_info->data(), isolate); - - // Put callee in place. - __ LoadObject(callee, function); - - bool call_data_undefined = false; - // Put call_data in place. - if (isolate->heap()->InNewSpace(*call_data_obj)) { - __ LoadObject(call_data, api_call_info); - __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); - } else if (call_data_obj->IsUndefined()) { - call_data_undefined = true; - __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); - } else { - __ LoadObject(call_data, call_data_obj); - } - - // Put api_function_address in place. - Address function_address = v8::ToCData<Address>(api_call_info->callback()); - ApiFunction fun(function_address); - ExternalReference ref = ExternalReference(&fun, - ExternalReference::DIRECT_API_CALL, - masm->isolate()); - __ Mov(api_function_address, ref); - - // Jump to stub. - CallApiFunctionStub stub(is_store, call_data_undefined, argc); - __ TailCallStub(&stub); -} - - -void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { - __ Jump(code, RelocInfo::CODE_TARGET); + StoreFieldStub stub(isolate(), lookup->GetFieldIndex(), + lookup->representation()); + GenerateTailCall(masm(), stub.GetCode()); } -#undef __ -#define __ ACCESS_MASM(masm()) - - -Register StubCompiler::CheckPrototypes(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Register holder_reg, - Register scratch1, - Register scratch2, - Handle<Name> name, - Label* miss, - PrototypeCheckType check) { - Handle<Map> receiver_map(IC::TypeToMap(*type, isolate())); +Register PropertyHandlerCompiler::CheckPrototypes( + Register object_reg, Register holder_reg, Register scratch1, + Register scratch2, Handle<Name> name, Label* miss, + PrototypeCheckType check) { + Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate())); // object_reg and holder_reg registers can alias. - ASSERT(!AreAliased(object_reg, scratch1, scratch2)); - ASSERT(!AreAliased(holder_reg, scratch1, scratch2)); + DCHECK(!AreAliased(object_reg, scratch1, scratch2)); + DCHECK(!AreAliased(holder_reg, scratch1, scratch2)); // Keep track of the current object in register reg. Register reg = object_reg; int depth = 0; Handle<JSObject> current = Handle<JSObject>::null(); - if (type->IsConstant()) { - current = Handle<JSObject>::cast(type->AsConstant()); + if (type()->IsConstant()) { + current = Handle<JSObject>::cast(type()->AsConstant()->Value()); } Handle<JSObject> prototype = Handle<JSObject>::null(); Handle<Map> current_map = receiver_map; - Handle<Map> holder_map(holder->map()); + Handle<Map> holder_map(holder()->map()); // Traverse the prototype chain and check the maps in the prototype chain for // fast and global objects or do negative lookup for normal objects. while (!current_map.is_identical_to(holder_map)) { @@ -802,19 +584,19 @@ // Only global objects and objects that do not require access // checks are allowed in stubs. - ASSERT(current_map->IsJSGlobalProxyMap() || + DCHECK(current_map->IsJSGlobalProxyMap() || !current_map->is_access_check_needed()); prototype = handle(JSObject::cast(current_map->prototype())); if (current_map->is_dictionary_map() && - !current_map->IsJSGlobalObjectMap() && - !current_map->IsJSGlobalProxyMap()) { + !current_map->IsJSGlobalObjectMap()) { + DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast. if (!name->IsUniqueName()) { - ASSERT(name->IsString()); + DCHECK(name->IsString()); name = factory()->InternalizeString(Handle<String>::cast(name)); } - ASSERT(current.is_null() || - (current->property_dictionary()->FindEntry(*name) == + DCHECK(current.is_null() || + (current->property_dictionary()->FindEntry(name) == NameDictionary::kNotFound)); GenerateDictionaryNegativeLookup(masm(), miss, reg, name, @@ -824,13 +606,14 @@ reg = holder_reg; // From now on the object will be in holder_reg. __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); } else { - bool need_map = (depth != 1 || check == CHECK_ALL_MAPS) || - heap()->InNewSpace(*prototype); - Register map_reg = NoReg; - if (need_map) { - map_reg = scratch1; - __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); - } + // Two possible reasons for loading the prototype from the map: + // (1) Can't store references to new space in code. + // (2) Handler is shared for all receivers with the same prototype + // map (but not necessarily the same prototype instance). + bool load_prototype_from_map = + heap()->InNewSpace(*prototype) || depth == 1; + Register map_reg = scratch1; + __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); if (depth != 1 || check == CHECK_ALL_MAPS) { __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK); @@ -839,6 +622,9 @@ // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global // object. + // This allows us to install generated handlers for accesses to the + // global proxy (as opposed to using slow ICs). See corresponding code + // in LookupForRead(). if (current_map->IsJSGlobalProxyMap()) { UseScratchRegisterScope temps(masm()); __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss); @@ -850,12 +636,9 @@ reg = holder_reg; // From now on the object will be in holder_reg. - if (heap()->InNewSpace(*prototype)) { - // The prototype is in new space; we cannot store a reference to it - // in the code. Load it from the map. + if (load_prototype_from_map) { __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); } else { - // The prototype is in old space; load it directly. __ Mov(reg, Operand(prototype)); } } @@ -875,7 +658,7 @@ } // Perform security check for access to the global object. - ASSERT(current_map->IsJSGlobalProxyMap() || + DCHECK(current_map->IsJSGlobalProxyMap() || !current_map->is_access_check_needed()); if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss); @@ -886,7 +669,7 @@ } -void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { +void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { if (!miss->is_unused()) { Label success; __ B(&success); @@ -899,12 +682,12 @@ } -void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { +void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { if (!miss->is_unused()) { Label success; __ B(&success); - GenerateRestoreName(masm(), miss, name); + GenerateRestoreName(miss, name); TailCallBuiltin(masm(), MissBuiltin(kind())); __ Bind(&success); @@ -912,82 +695,16 @@ } -Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Handle<Object> callback) { - Label miss; - - Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); - // HandlerFrontendHeader can return its result into scratch1() so do not - // use it. - Register scratch2 = this->scratch2(); - Register scratch3 = this->scratch3(); - Register dictionary = this->scratch4(); - ASSERT(!AreAliased(reg, scratch2, scratch3, dictionary)); - - if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { - // Load the properties dictionary. - __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); - - // Probe the dictionary. - Label probe_done; - NameDictionaryLookupStub::GeneratePositiveLookup(masm(), - &miss, - &probe_done, - dictionary, - this->name(), - scratch2, - scratch3); - __ Bind(&probe_done); - - // If probing finds an entry in the dictionary, scratch3 contains the - // pointer into the dictionary. Check that the value is the callback. - Register pointer = scratch3; - const int kElementsStartOffset = NameDictionary::kHeaderSize + - NameDictionary::kElementsStartIndex * kPointerSize; - const int kValueOffset = kElementsStartOffset + kPointerSize; - __ Ldr(scratch2, FieldMemOperand(pointer, kValueOffset)); - __ Cmp(scratch2, Operand(callback)); - __ B(ne, &miss); - } - - HandlerFrontendFooter(name, &miss); - return reg; -} - - -void LoadStubCompiler::GenerateLoadField(Register reg, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation) { - __ Mov(receiver(), reg); - if (kind() == Code::LOAD_IC) { - LoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); - } else { - KeyedLoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); - } -} - - -void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { +void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) { // Return the constant value. __ LoadObject(x0, value); __ Ret(); } -void LoadStubCompiler::GenerateLoadCallback( - Register reg, - Handle<ExecutableAccessorInfo> callback) { - ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg)); +void NamedLoadHandlerCompiler::GenerateLoadCallback( + Register reg, Handle<ExecutableAccessorInfo> callback) { + DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg)); // Build ExecutableAccessorInfo::args_ list on the stack and push property // name below the exit frame to make GC aware of them and store pointers to @@ -1034,21 +751,18 @@ ExternalReference ref = ExternalReference(&fun, type, isolate()); __ Mov(getter_address_reg, ref); - CallApiGetterStub stub; + CallApiGetterStub stub(isolate()); __ TailCallStub(&stub); } -void LoadStubCompiler::GenerateLoadInterceptor( - Register holder_reg, - Handle<Object> object, - Handle<JSObject> interceptor_holder, - LookupResult* lookup, - Handle<Name> name) { - ASSERT(!AreAliased(receiver(), this->name(), +void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg, + LookupResult* lookup, + Handle<Name> name) { + DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(), scratch3())); - ASSERT(interceptor_holder->HasNamedInterceptor()); - ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); + DCHECK(holder()->HasNamedInterceptor()); + DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined()); // So far the most popular follow ups for interceptor loads are FIELD // and CALLBACKS, so inline only them, other cases may be added later. @@ -1058,10 +772,12 @@ compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { - ExecutableAccessorInfo* callback = - ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); - compile_followup_inline = callback->getter() != NULL && - callback->IsCompatibleReceiver(*object); + Handle<ExecutableAccessorInfo> callback( + ExecutableAccessorInfo::cast(lookup->GetCallbackObject())); + compile_followup_inline = + callback->getter() != NULL && + ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback, + type()); } } @@ -1069,13 +785,13 @@ // Compile the interceptor call, followed by inline code to load the // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. - ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); + DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1())); // Preserve the receiver register explicitly whenever it is different from // the holder and it is needed should the interceptor return without any // result. The CALLBACKS case needs the receiver to be passed into C++ code, // the FIELD case might cause a miss during the prototype check. - bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); + bool must_perfrom_prototype_check = *holder() != lookup->holder(); bool must_preserve_receiver_reg = !receiver().Is(holder_reg) && (lookup->type() == CALLBACKS || must_perfrom_prototype_check); @@ -1092,7 +808,7 @@ // interceptor's holder has been compiled before (see a caller // of this method.) CompileCallLoadPropertyWithInterceptor( - masm(), receiver(), holder_reg, this->name(), interceptor_holder, + masm(), receiver(), holder_reg, this->name(), holder(), IC::kLoadPropertyWithInterceptorOnly); // Check if interceptor provided a value for property. If it's @@ -1112,49 +828,34 @@ } // Leave the internal frame. } - GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); + GenerateLoadPostInterceptor(holder_reg, name, lookup); } else { // !compile_followup_inline // Call the runtime system to load the interceptor. // Check that the maps haven't changed. - PushInterceptorArguments( - masm(), receiver(), holder_reg, this->name(), interceptor_holder); + PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), + holder()); ExternalReference ref = - ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), + ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor), isolate()); - __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1); + __ TailCallExternalReference( + ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); } } -void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) { - UseScratchRegisterScope temps(masm()); - // Check that the object is a boolean. - Register true_root = temps.AcquireX(); - Register false_root = temps.AcquireX(); - ASSERT(!AreAliased(object, true_root, false_root)); - __ LoadTrueFalseRoots(true_root, false_root); - __ Cmp(object, true_root); - __ Ccmp(object, false_root, ZFlag, ne); - __ B(ne, miss); -} - - -Handle<Code> StoreStubCompiler::CompileStoreCallback( - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( + Handle<JSObject> object, Handle<Name> name, Handle<ExecutableAccessorInfo> callback) { - ASM_LOCATION("StoreStubCompiler::CompileStoreCallback"); - Register holder_reg = HandlerFrontend( - IC::CurrentTypeOf(object, isolate()), receiver(), holder, name); + ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback"); + Register holder_reg = Frontend(receiver(), name); // Stub never generated for non-global objects that require access checks. - ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); + DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded()); // receiver() and holder_reg can alias. - ASSERT(!AreAliased(receiver(), scratch1(), scratch2(), value())); - ASSERT(!AreAliased(holder_reg, scratch1(), scratch2(), value())); + DCHECK(!AreAliased(receiver(), scratch1(), scratch2(), value())); + DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value())); __ Mov(scratch1(), Operand(callback)); __ Mov(scratch2(), Operand(name)); __ Push(receiver(), holder_reg, scratch1(), scratch2(), value()); @@ -1173,10 +874,8 @@ #define __ ACCESS_MASM(masm) -void StoreStubCompiler::GenerateStoreViaSetter( - MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, +void NamedStoreHandlerCompiler::GenerateStoreViaSetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, Handle<JSFunction> setter) { // ----------- S t a t e ------------- // -- lr : return address @@ -1194,8 +893,7 @@ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { // Swap in the global receiver. __ Ldr(receiver, - FieldMemOperand( - receiver, JSGlobalObject::kGlobalReceiverOffset)); + FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); } __ Push(receiver, value()); ParameterCount actual(1); @@ -1222,18 +920,17 @@ #define __ ACCESS_MASM(masm()) -Handle<Code> StoreStubCompiler::CompileStoreInterceptor( - Handle<JSObject> object, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( Handle<Name> name) { Label miss; - ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor"); + ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreInterceptor"); __ Push(receiver(), this->name(), value()); // Do tail-call to the runtime system. - ExternalReference store_ic_property = - ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); + ExternalReference store_ic_property = ExternalReference( + IC_Utility(IC::kStorePropertyWithInterceptor), isolate()); __ TailCallExternalReference(store_ic_property, 3, 1); // Return the generated code. @@ -1241,67 +938,41 @@ } -Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type, - Handle<JSObject> last, - Handle<Name> name) { - NonexistentHandlerFrontend(type, last, name); - - // Return undefined if maps of the full prototype chain are still the - // same and no global property with this name contains a value. - __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); - __ Ret(); - - // Return the generated code. - return GetCode(kind(), Code::FAST, name); -} - - // TODO(all): The so-called scratch registers are significant in some cases. For -// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for -// KeyedStoreCompiler::transition_map(). We should verify which registers are -// actually scratch registers, and which are important. For now, we use the same -// assignments as ARM to remain on the safe side. +// example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is +// actually +// used for KeyedStoreCompiler::transition_map(). We should verify which +// registers are actually scratch registers, and which are important. For now, +// we use the same assignments as ARM to remain on the safe side. -Register* LoadStubCompiler::registers() { +Register* PropertyAccessCompiler::load_calling_convention() { // receiver, name, scratch1, scratch2, scratch3, scratch4. - static Register registers[] = { x0, x2, x3, x1, x4, x5 }; - return registers; -} - - -Register* KeyedLoadStubCompiler::registers() { - // receiver, name/key, scratch1, scratch2, scratch3, scratch4. - static Register registers[] = { x1, x0, x2, x3, x4, x5 }; + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + static Register registers[] = { receiver, name, x3, x0, x4, x5 }; return registers; } -Register StoreStubCompiler::value() { - return x0; -} - - -Register* StoreStubCompiler::registers() { +Register* PropertyAccessCompiler::store_calling_convention() { // receiver, value, scratch1, scratch2, scratch3. - static Register registers[] = { x1, x2, x3, x4, x5 }; + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + DCHECK(x3.is(KeyedStoreIC::MapRegister())); + static Register registers[] = { receiver, name, x3, x4, x5 }; return registers; } -Register* KeyedStoreStubCompiler::registers() { - // receiver, name, scratch1, scratch2, scratch3. - static Register registers[] = { x2, x1, x3, x4, x5 }; - return registers; -} +Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); } #undef __ #define __ ACCESS_MASM(masm) -void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, - Handle<JSFunction> getter) { +void NamedLoadHandlerCompiler::GenerateLoadViaGetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, + Handle<JSFunction> getter) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -1310,8 +981,7 @@ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { // Swap in the global receiver. __ Ldr(receiver, - FieldMemOperand( - receiver, JSGlobalObject::kGlobalReceiverOffset)); + FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); } __ Push(receiver); ParameterCount actual(0); @@ -1335,54 +1005,58 @@ #define __ ACCESS_MASM(masm()) -Handle<Code> LoadStubCompiler::CompileLoadGlobal( - Handle<HeapType> type, - Handle<GlobalObject> global, - Handle<PropertyCell> cell, - Handle<Name> name, - bool is_dont_delete) { +Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal( + Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) { Label miss; - HandlerFrontendHeader(type, receiver(), global, name, &miss); + FrontendHeader(receiver(), name, &miss); // Get the value from the cell. - __ Mov(x3, Operand(cell)); - __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset)); + Register result = StoreIC::ValueRegister(); + __ Mov(result, Operand(cell)); + __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); // Check for deleted property if property can actually be deleted. - if (!is_dont_delete) { - __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss); + if (is_configurable) { + __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss); } Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3); - __ Mov(x0, x4); __ Ret(); - HandlerFrontendFooter(name, &miss); + FrontendFooter(name, &miss); // Return the generated code. return GetCode(kind(), Code::NORMAL, name); } -Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( - TypeHandleList* types, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { +Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { Label miss; if (check == PROPERTY && (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - __ CompareAndBranch(this->name(), Operand(name), ne, &miss); + // In case we are compiling an IC for dictionary loads and stores, just + // check whether the name is unique. + if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { + __ JumpIfNotUniqueName(this->name(), &miss); + } else { + __ CompareAndBranch(this->name(), Operand(name), ne, &miss); + } } Label number_case; Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; __ JumpIfSmi(receiver(), smi_target); + // Polymorphic keyed stores may use the map register Register map_reg = scratch1(); + DCHECK(kind() != Code::KEYED_STORE_IC || + map_reg.is(KeyedStoreIC::MapRegister())); __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); int receiver_count = types->length(); int number_of_handled_maps = 0; @@ -1395,14 +1069,14 @@ __ Cmp(map_reg, Operand(map)); __ B(ne, &try_next); if (type->Is(HeapType::Number())) { - ASSERT(!number_case.is_unused()); + DCHECK(!number_case.is_unused()); __ Bind(&number_case); } __ Jump(handlers->at(current), RelocInfo::CODE_TARGET); __ Bind(&try_next); } } - ASSERT(number_of_handled_maps != 0); + DCHECK(number_of_handled_maps != 0); __ Bind(&miss); TailCallBuiltin(masm(), MissBuiltin(kind())); @@ -1410,28 +1084,16 @@ // Return the generated code. InlineCacheState state = (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC; - return GetICCode(kind(), type, name, state); + return GetCode(kind(), type, name, state); } -void StoreStubCompiler::GenerateStoreArrayLength() { - // Prepare tail call to StoreIC_ArrayLength. - __ Push(receiver(), value()); - - ExternalReference ref = - ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), - masm()->isolate()); - __ TailCallExternalReference(ref, 2, 1); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( - MapHandleList* receiver_maps, - CodeHandleList* handler_stubs, +Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( + MapHandleList* receiver_maps, CodeHandleList* handler_stubs, MapHandleList* transitioned_maps) { Label miss; - ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic"); + ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic"); __ JumpIfSmi(receiver(), &miss); @@ -1454,35 +1116,32 @@ __ Bind(&miss); TailCallBuiltin(masm(), MissBuiltin(kind())); - return GetICCode( - kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); + return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); } #undef __ #define __ ACCESS_MASM(masm) -void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( +void ElementHandlerCompiler::GenerateLoadDictionaryElement( MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- x0 : key - // -- x1 : receiver - // ----------------------------------- + // The return address is in lr. Label slow, miss; Register result = x0; - Register key = x0; - Register receiver = x1; + Register key = LoadIC::NameRegister(); + Register receiver = LoadIC::ReceiverRegister(); + DCHECK(receiver.is(x1)); + DCHECK(key.is(x2)); __ JumpIfNotSmi(key, &miss); __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6); + __ LoadFromNumberDictionary(&slow, x4, key, result, x7, x3, x5, x6); __ Ret(); __ Bind(&slow); __ IncrementCounter( - masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3); + masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x4, x3); TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); // Miss case, call the runtime. diff -Nru nodejs-0.11.13/deps/v8/src/arm64/utils-arm64.cc nodejs-0.11.15/deps/v8/src/arm64/utils-arm64.cc --- nodejs-0.11.13/deps/v8/src/arm64/utils-arm64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/utils-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,33 +1,10 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #if V8_TARGET_ARCH_ARM64 -#include "arm64/utils-arm64.h" +#include "src/arm64/utils-arm64.h" namespace v8 { @@ -38,7 +15,7 @@ int CountLeadingZeros(uint64_t value, int width) { // TODO(jbramley): Optimize this for ARM64 hosts. - ASSERT((width == 32) || (width == 64)); + DCHECK((width == 32) || (width == 64)); int count = 0; uint64_t bit_test = 1UL << (width - 1); while ((count < width) && ((bit_test & value) == 0)) { @@ -51,7 +28,7 @@ int CountLeadingSignBits(int64_t value, int width) { // TODO(jbramley): Optimize this for ARM64 hosts. - ASSERT((width == 32) || (width == 64)); + DCHECK((width == 32) || (width == 64)); if (value >= 0) { return CountLeadingZeros(value, width) - 1; } else { @@ -62,7 +39,7 @@ int CountTrailingZeros(uint64_t value, int width) { // TODO(jbramley): Optimize this for ARM64 hosts. - ASSERT((width == 32) || (width == 64)); + DCHECK((width == 32) || (width == 64)); int count = 0; while ((count < width) && (((value >> count) & 1) == 0)) { count++; @@ -74,7 +51,7 @@ int CountSetBits(uint64_t value, int width) { // TODO(jbramley): Would it be useful to allow other widths? The // implementation already supports them. - ASSERT((width == 32) || (width == 64)); + DCHECK((width == 32) || (width == 64)); // Mask out unused bits to ensure that they are not counted. value &= (0xffffffffffffffffUL >> (64-width)); @@ -101,8 +78,13 @@ } +uint64_t LargestPowerOf2Divisor(uint64_t value) { + return value & -value; +} + + int MaskToBit(uint64_t mask) { - ASSERT(CountSetBits(mask, 64) == 1); + DCHECK(CountSetBits(mask, 64) == 1); return CountTrailingZeros(mask, 64); } diff -Nru nodejs-0.11.13/deps/v8/src/arm64/utils-arm64.h nodejs-0.11.15/deps/v8/src/arm64/utils-arm64.h --- nodejs-0.11.13/deps/v8/src/arm64/utils-arm64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arm64/utils-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM64_UTILS_ARM64_H_ #define V8_ARM64_UTILS_ARM64_H_ #include <cmath> -#include "v8.h" -#include "arm64/constants-arm64.h" +#include "src/v8.h" + +#include "src/arm64/constants-arm64.h" #define REGISTER_CODE_LIST(R) \ R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ @@ -79,6 +57,7 @@ int CountLeadingSignBits(int64_t value, int width); int CountTrailingZeros(uint64_t value, int width); int CountSetBits(uint64_t value, int width); +uint64_t LargestPowerOf2Divisor(uint64_t value); int MaskToBit(uint64_t mask); @@ -109,13 +88,13 @@ // Convert the NaN in 'num' to a quiet NaN. inline double ToQuietNaN(double num) { - ASSERT(isnan(num)); + DCHECK(std::isnan(num)); return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask); } inline float ToQuietNaN(float num) { - ASSERT(isnan(num)); + DCHECK(std::isnan(num)); return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask); } diff -Nru nodejs-0.11.13/deps/v8/src/arraybuffer.js nodejs-0.11.15/deps/v8/src/arraybuffer.js --- nodejs-0.11.13/deps/v8/src/arraybuffer.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/arraybuffer.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. "use strict"; @@ -40,12 +17,12 @@ } } -function ArrayBufferGetByteLength() { +function ArrayBufferGetByteLen() { if (!IS_ARRAYBUFFER(this)) { throw MakeTypeError('incompatible_method_receiver', ['ArrayBuffer.prototype.byteLength', this]); } - return %ArrayBufferGetByteLength(this); + return %_ArrayBufferGetByteLength(this); } // ES6 Draft 15.13.5.5.3 @@ -60,7 +37,7 @@ end = TO_INTEGER(end); } var first; - var byte_length = %ArrayBufferGetByteLength(this); + var byte_length = %_ArrayBufferGetByteLength(this); if (relativeStart < 0) { first = MathMax(byte_length + relativeStart, 0); } else { @@ -85,7 +62,7 @@ return result; } -function ArrayBufferIsView(obj) { +function ArrayBufferIsViewJS(obj) { return %ArrayBufferIsView(obj); } @@ -97,12 +74,13 @@ %FunctionSetPrototype($ArrayBuffer, new $Object()); // Set up the constructor property on the ArrayBuffer prototype object. - %SetProperty($ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM); + %AddNamedProperty( + $ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM); - InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength); + InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLen); InstallFunctions($ArrayBuffer, DONT_ENUM, $Array( - "isView", ArrayBufferIsView + "isView", ArrayBufferIsViewJS )); InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array( diff -Nru nodejs-0.11.13/deps/v8/src/array-iterator.js nodejs-0.11.15/deps/v8/src/array-iterator.js --- nodejs-0.11.13/deps/v8/src/array-iterator.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/array-iterator.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,47 +1,28 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// 'AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. 'use strict'; + // This file relies on the fact that the following declaration has been made // in runtime.js: // var $Array = global.Array; -var ARRAY_ITERATOR_KIND_KEYS = 1; -var ARRAY_ITERATOR_KIND_VALUES = 2; -var ARRAY_ITERATOR_KIND_ENTRIES = 3; -// The spec draft also has "sparse" but it is never used. var arrayIteratorObjectSymbol = GLOBAL_PRIVATE("ArrayIterator#object"); var arrayIteratorNextIndexSymbol = GLOBAL_PRIVATE("ArrayIterator#next"); var arrayIterationKindSymbol = GLOBAL_PRIVATE("ArrayIterator#kind"); + function ArrayIterator() {} + +// TODO(wingo): Update section numbers when ES6 has stabilized. The +// section numbers below are already out of date as of the May 2014 +// draft. + + // 15.4.5.1 CreateArrayIterator Abstract Operation function CreateArrayIterator(array, kind) { var object = ToObject(array); @@ -52,20 +33,33 @@ return iterator; } + // 15.19.4.3.4 CreateItrResultObject function CreateIteratorResultObject(value, done) { return {value: value, done: done}; } + +// 22.1.5.2.2 %ArrayIteratorPrototype%[@@iterator] +function ArrayIteratorIterator() { + return this; +} + + // 15.4.5.2.2 ArrayIterator.prototype.next( ) function ArrayIteratorNext() { var iterator = ToObject(this); - var array = GET_PRIVATE(iterator, arrayIteratorObjectSymbol); - if (!array) { + + if (!HAS_PRIVATE(iterator, arrayIteratorObjectSymbol)) { throw MakeTypeError('incompatible_method_receiver', ['Array Iterator.prototype.next']); } + var array = GET_PRIVATE(iterator, arrayIteratorObjectSymbol); + if (IS_UNDEFINED(array)) { + return CreateIteratorResultObject(UNDEFINED, true); + } + var index = GET_PRIVATE(iterator, arrayIteratorNextIndexSymbol); var itemKind = GET_PRIVATE(iterator, arrayIterationKindSymbol); var length = TO_UINT32(array.length); @@ -73,46 +67,55 @@ // "sparse" is never used. if (index >= length) { - SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, INFINITY); + SET_PRIVATE(iterator, arrayIteratorObjectSymbol, UNDEFINED); return CreateIteratorResultObject(UNDEFINED, true); } SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, index + 1); - if (itemKind == ARRAY_ITERATOR_KIND_VALUES) + if (itemKind == ITERATOR_KIND_VALUES) { return CreateIteratorResultObject(array[index], false); + } - if (itemKind == ARRAY_ITERATOR_KIND_ENTRIES) + if (itemKind == ITERATOR_KIND_ENTRIES) { return CreateIteratorResultObject([index, array[index]], false); + } return CreateIteratorResultObject(index, false); } + function ArrayEntries() { - return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_ENTRIES); + return CreateArrayIterator(this, ITERATOR_KIND_ENTRIES); } + function ArrayValues() { - return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_VALUES); + return CreateArrayIterator(this, ITERATOR_KIND_VALUES); } + function ArrayKeys() { - return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_KEYS); + return CreateArrayIterator(this, ITERATOR_KIND_KEYS); } + function SetUpArrayIterator() { %CheckIsBootstrapping(); + %FunctionSetPrototype(ArrayIterator, new $Object()); %FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator'); - %FunctionSetReadOnlyPrototype(ArrayIterator); InstallFunctions(ArrayIterator.prototype, DONT_ENUM, $Array( 'next', ArrayIteratorNext )); + %FunctionSetName(ArrayIteratorIterator, '[Symbol.iterator]'); + %AddNamedProperty(ArrayIterator.prototype, symbolIterator, + ArrayIteratorIterator, DONT_ENUM); } - SetUpArrayIterator(); + function ExtendArrayPrototype() { %CheckIsBootstrapping(); @@ -121,6 +124,34 @@ 'values', ArrayValues, 'keys', ArrayKeys )); -} + %AddNamedProperty($Array.prototype, symbolIterator, ArrayValues, DONT_ENUM); +} ExtendArrayPrototype(); + + +function ExtendTypedArrayPrototypes() { + %CheckIsBootstrapping(); + +macro TYPED_ARRAYS(FUNCTION) + FUNCTION(Uint8Array) + FUNCTION(Int8Array) + FUNCTION(Uint16Array) + FUNCTION(Int16Array) + FUNCTION(Uint32Array) + FUNCTION(Int32Array) + FUNCTION(Float32Array) + FUNCTION(Float64Array) + FUNCTION(Uint8ClampedArray) +endmacro + +macro EXTEND_TYPED_ARRAY(NAME) + %AddNamedProperty($NAME.prototype, 'entries', ArrayEntries, DONT_ENUM); + %AddNamedProperty($NAME.prototype, 'values', ArrayValues, DONT_ENUM); + %AddNamedProperty($NAME.prototype, 'keys', ArrayKeys, DONT_ENUM); + %AddNamedProperty($NAME.prototype, symbolIterator, ArrayValues, DONT_ENUM); +endmacro + + TYPED_ARRAYS(EXTEND_TYPED_ARRAY) +} +ExtendTypedArrayPrototypes(); diff -Nru nodejs-0.11.13/deps/v8/src/array.js nodejs-0.11.15/deps/v8/src/array.js --- nodejs-0.11.13/deps/v8/src/array.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/array.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,8 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; // This file relies on the fact that the following declarations have been made // in runtime.js: @@ -66,7 +45,7 @@ } -function SparseJoinWithSeparator(array, len, convert, separator) { +function SparseJoinWithSeparatorJS(array, len, convert, separator) { var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len)); var totalLength = 0; var elements = new InternalArray(keys.length * 2); @@ -107,11 +86,20 @@ } -function UseSparseVariant(object, length, is_array) { - return is_array && - length > 1000 && - (!%_IsSmi(length) || - %EstimateNumberOfElements(object) < (length >> 2)); +function UseSparseVariant(array, length, is_array, touched) { + // Only use the sparse variant on arrays that are likely to be sparse and the + // number of elements touched in the operation is relatively small compared to + // the overall size of the array. + if (!is_array || length < 1000 || %IsObserved(array)) { + return false; + } + if (!%_IsSmi(length)) { + return true; + } + var elements_threshold = length >> 2; // No more than 75% holes + var estimated_elements = %EstimateNumberOfElements(array); + return (estimated_elements < elements_threshold) && + (touched > estimated_elements * 4); } @@ -128,11 +116,12 @@ // Attempt to convert the elements. try { - if (UseSparseVariant(array, length, is_array)) { + if (UseSparseVariant(array, length, is_array, length)) { + %NormalizeElements(array); if (separator.length == 0) { return SparseJoin(array, length, convert); } else { - return SparseJoinWithSeparator(array, length, convert, separator); + return SparseJoinWithSeparatorJS(array, length, convert, separator); } } @@ -292,7 +281,7 @@ function SimpleSlice(array, start_i, del_count, len, deleted_elements) { for (var i = 0; i < del_count; i++) { var index = start_i + i; - // The spec could also be interpreted such that %HasLocalProperty + // The spec could also be interpreted such that %HasOwnProperty // would be the appropriate test. We follow KJS in consulting the // prototype. var current = array[index]; @@ -312,7 +301,7 @@ var from_index = i + del_count - 1; var to_index = i + num_additional_args - 1; // The spec could also be interpreted such that - // %HasLocalProperty would be the appropriate test. We follow + // %HasOwnProperty would be the appropriate test. We follow // KJS in consulting the prototype. var current = array[from_index]; if (!IS_UNDEFINED(current) || from_index in array) { @@ -326,7 +315,7 @@ var from_index = i + del_count; var to_index = i + num_additional_args; // The spec could also be interpreted such that - // %HasLocalProperty would be the appropriate test. We follow + // %HasOwnProperty would be the appropriate test. We follow // KJS in consulting the prototype. var current = array[from_index]; if (!IS_UNDEFINED(current) || from_index in array) { @@ -378,17 +367,18 @@ function ArrayJoin(separator) { CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join"); - var length = TO_UINT32(this.length); + var array = TO_OBJECT_INLINE(this); + var length = TO_UINT32(array.length); if (IS_UNDEFINED(separator)) { separator = ','; } else if (!IS_STRING(separator)) { separator = NonStringToString(separator); } - var result = %_FastAsciiArrayJoin(this, separator); + var result = %_FastAsciiArrayJoin(array, separator); if (!IS_UNDEFINED(result)) return result; - return Join(this, length, separator, ConvertToString); + return Join(array, length, separator, ConvertToString); } @@ -413,24 +403,20 @@ function ArrayPop() { CHECK_OBJECT_COERCIBLE(this, "Array.prototype.pop"); - var n = TO_UINT32(this.length); + var array = TO_OBJECT_INLINE(this); + var n = TO_UINT32(array.length); if (n == 0) { - this.length = n; + array.length = n; return; } - if ($Object.isSealed(this)) { - throw MakeTypeError("array_functions_change_sealed", - ["Array.prototype.pop"]); - } - - if (%IsObserved(this)) - return ObservedArrayPop.call(this, n); + if (%IsObserved(array)) + return ObservedArrayPop.call(array, n); n--; - var value = this[n]; - Delete(this, ToName(n), true); - this.length = n; + var value = array[n]; + Delete(array, ToName(n), true); + array.length = n; return value; } @@ -444,13 +430,14 @@ for (var i = 0; i < m; i++) { this[i+n] = %_Arguments(i); } - this.length = n + m; + var new_length = n + m; + this.length = new_length; } finally { EndPerformSplice(this); EnqueueSpliceRecord(this, n, [], m); } - return this.length; + return new_length; } // Appends the arguments to the end of the array and returns the new @@ -458,28 +445,27 @@ function ArrayPush() { CHECK_OBJECT_COERCIBLE(this, "Array.prototype.push"); - var n = TO_UINT32(this.length); - var m = %_ArgumentsLength(); - if (m > 0 && $Object.isSealed(this)) { - throw MakeTypeError("array_functions_change_sealed", - ["Array.prototype.push"]); - } - if (%IsObserved(this)) return ObservedArrayPush.apply(this, arguments); + var array = TO_OBJECT_INLINE(this); + var n = TO_UINT32(array.length); + var m = %_ArgumentsLength(); + for (var i = 0; i < m; i++) { - this[i+n] = %_Arguments(i); + array[i+n] = %_Arguments(i); } - this.length = n + m; - return this.length; + + var new_length = n + m; + array.length = new_length; + return new_length; } // Returns an array containing the array elements of the object followed // by the array elements of each argument in order. See ECMA-262, // section 15.4.4.7. -function ArrayConcat(arg1) { // length == 1 +function ArrayConcatJS(arg1) { // length == 1 CHECK_OBJECT_COERCIBLE(this, "Array.prototype.concat"); var array = ToObject(this); @@ -541,33 +527,36 @@ function ArrayReverse() { CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse"); - var j = TO_UINT32(this.length) - 1; + var array = TO_OBJECT_INLINE(this); + var len = TO_UINT32(array.length); - if (UseSparseVariant(this, j, IS_ARRAY(this))) { - SparseReverse(this, j+1); - return this; + if (UseSparseVariant(array, len, IS_ARRAY(array), len)) { + %NormalizeElements(array); + SparseReverse(array, len); + return array; } + var j = len - 1; for (var i = 0; i < j; i++, j--) { - var current_i = this[i]; - if (!IS_UNDEFINED(current_i) || i in this) { - var current_j = this[j]; - if (!IS_UNDEFINED(current_j) || j in this) { - this[i] = current_j; - this[j] = current_i; + var current_i = array[i]; + if (!IS_UNDEFINED(current_i) || i in array) { + var current_j = array[j]; + if (!IS_UNDEFINED(current_j) || j in array) { + array[i] = current_j; + array[j] = current_i; } else { - this[j] = current_i; - delete this[i]; + array[j] = current_i; + delete array[i]; } } else { - var current_j = this[j]; - if (!IS_UNDEFINED(current_j) || j in this) { - this[i] = current_j; - delete this[j]; + var current_j = array[j]; + if (!IS_UNDEFINED(current_j) || j in array) { + array[i] = current_j; + delete array[j]; } } } - return this; + return array; } @@ -589,30 +578,31 @@ function ArrayShift() { CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift"); - var len = TO_UINT32(this.length); + var array = TO_OBJECT_INLINE(this); + var len = TO_UINT32(array.length); if (len === 0) { - this.length = 0; + array.length = 0; return; } - if ($Object.isSealed(this)) { + if (ObjectIsSealed(array)) { throw MakeTypeError("array_functions_change_sealed", ["Array.prototype.shift"]); } - if (%IsObserved(this)) - return ObservedArrayShift.call(this, len); + if (%IsObserved(array)) + return ObservedArrayShift.call(array, len); - var first = this[0]; + var first = array[0]; - if (IS_ARRAY(this)) { - SmartMove(this, 0, 1, len, 0); + if (IS_ARRAY(array)) { + SmartMove(array, 0, 1, len, 0); } else { - SimpleMove(this, 0, 1, len, 0); + SimpleMove(array, 0, 1, len, 0); } - this.length = len - 1; + array.length = len - 1; return first; } @@ -627,61 +617,48 @@ for (var i = 0; i < num_arguments; i++) { this[i] = %_Arguments(i); } - this.length = len + num_arguments; + var new_length = len + num_arguments; + this.length = new_length; } finally { EndPerformSplice(this); EnqueueSpliceRecord(this, 0, [], num_arguments); } - return len + num_arguments; + return new_length; } function ArrayUnshift(arg1) { // length == 1 CHECK_OBJECT_COERCIBLE(this, "Array.prototype.unshift"); - var len = TO_UINT32(this.length); - var num_arguments = %_ArgumentsLength(); - var is_sealed = $Object.isSealed(this); - - if (num_arguments > 0 && is_sealed) { - throw MakeTypeError("array_functions_change_sealed", - ["Array.prototype.unshift"]); - } - if (%IsObserved(this)) return ObservedArrayUnshift.apply(this, arguments); - if (IS_ARRAY(this) && !is_sealed) { - SmartMove(this, 0, 0, len, num_arguments); - } else { - if (num_arguments == 0 && $Object.isFrozen(this)) { - // In the zero argument case, values from the prototype come into the - // object. This can't be allowed on frozen arrays. - for (var i = 0; i < len; i++) { - if (!this.hasOwnProperty(i) && !IS_UNDEFINED(this[i])) { - throw MakeTypeError("array_functions_on_frozen", - ["Array.prototype.shift"]); - } - } - } + var array = TO_OBJECT_INLINE(this); + var len = TO_UINT32(array.length); + var num_arguments = %_ArgumentsLength(); + var is_sealed = ObjectIsSealed(array); - SimpleMove(this, 0, 0, len, num_arguments); + if (IS_ARRAY(array) && !is_sealed && len > 0) { + SmartMove(array, 0, 0, len, num_arguments); + } else { + SimpleMove(array, 0, 0, len, num_arguments); } for (var i = 0; i < num_arguments; i++) { - this[i] = %_Arguments(i); + array[i] = %_Arguments(i); } - this.length = len + num_arguments; - - return this.length; + var new_length = len + num_arguments; + array.length = new_length; + return new_length; } function ArraySlice(start, end) { CHECK_OBJECT_COERCIBLE(this, "Array.prototype.slice"); - var len = TO_UINT32(this.length); + var array = TO_OBJECT_INLINE(this); + var len = TO_UINT32(array.length); var start_i = TO_INTEGER(start); var end_i = len; @@ -705,13 +682,12 @@ if (end_i < start_i) return result; - if (IS_ARRAY(this) && - !%IsObserved(this) && - (end_i > 1000) && - (%EstimateNumberOfElements(this) < end_i)) { - SmartSlice(this, start_i, end_i - start_i, len, result); + if (UseSparseVariant(array, len, IS_ARRAY(array), end_i - start_i)) { + %NormalizeElements(array); + %NormalizeElements(result); + SmartSlice(array, start_i, end_i - start_i, len, result); } else { - SimpleSlice(this, start_i, end_i - start_i, len, result); + SimpleSlice(array, start_i, end_i - start_i, len, result); } result.length = end_i - start_i; @@ -799,7 +775,8 @@ return ObservedArraySplice.apply(this, arguments); var num_arguments = %_ArgumentsLength(); - var len = TO_UINT32(this.length); + var array = TO_OBJECT_INLINE(this); + var len = TO_UINT32(array.length); var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len); var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i); @@ -807,32 +784,28 @@ deleted_elements.length = del_count; var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0; - if (del_count != num_elements_to_add && $Object.isSealed(this)) { + if (del_count != num_elements_to_add && ObjectIsSealed(array)) { throw MakeTypeError("array_functions_change_sealed", ["Array.prototype.splice"]); - } else if (del_count > 0 && $Object.isFrozen(this)) { + } else if (del_count > 0 && ObjectIsFrozen(array)) { throw MakeTypeError("array_functions_on_frozen", ["Array.prototype.splice"]); } - var use_simple_splice = true; - if (IS_ARRAY(this) && - num_elements_to_add !== del_count) { - // If we are only deleting/moving a few things near the end of the - // array then the simple version is going to be faster, because it - // doesn't touch most of the array. - var estimated_non_hole_elements = %EstimateNumberOfElements(this); - if (len > 20 && (estimated_non_hole_elements >> 2) < (len - start_i)) { - use_simple_splice = false; - } - } - - if (use_simple_splice) { - SimpleSlice(this, start_i, del_count, len, deleted_elements); - SimpleMove(this, start_i, del_count, len, num_elements_to_add); + var changed_elements = del_count; + if (num_elements_to_add != del_count) { + // If the slice needs to do a actually move elements after the insertion + // point, then include those in the estimate of changed elements. + changed_elements += len - start_i - del_count; + } + if (UseSparseVariant(array, len, IS_ARRAY(array), changed_elements)) { + %NormalizeElements(array); + %NormalizeElements(deleted_elements); + SmartSlice(array, start_i, del_count, len, deleted_elements); + SmartMove(array, start_i, del_count, len, num_elements_to_add); } else { - SmartSlice(this, start_i, del_count, len, deleted_elements); - SmartMove(this, start_i, del_count, len, num_elements_to_add); + SimpleSlice(array, start_i, del_count, len, deleted_elements); + SimpleMove(array, start_i, del_count, len, num_elements_to_add); } // Insert the arguments into the resulting array in @@ -841,9 +814,9 @@ var arguments_index = 2; var arguments_length = %_ArgumentsLength(); while (arguments_index < arguments_length) { - this[i++] = %_Arguments(arguments_index++); + array[i++] = %_Arguments(arguments_index++); } - this.length = len - del_count + num_elements_to_add; + array.length = len - del_count + num_elements_to_add; // Return the deleted elements. return deleted_elements; @@ -1107,7 +1080,7 @@ // For compatibility with JSC, we also sort elements inherited from // the prototype chain on non-Array objects. // We do this by copying them to this object and sorting only - // local elements. This is not very efficient, but sorting with + // own elements. This is not very efficient, but sorting with // inherited elements happens very, very rarely, if at all. // The specification allows "implementation dependent" behavior // if an element on the prototype chain has an element that @@ -1160,28 +1133,16 @@ var result = new $Array(); var accumulator = new InternalArray(); var accumulator_length = 0; - if (%DebugCallbackSupportsStepping(f)) { - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - // Prepare break slots for debugger step in. - %DebugPrepareStepInIfStepping(f); - if (%_CallFunction(receiver, element, i, array, f)) { - accumulator[accumulator_length++] = element; - } + var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f); + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + if (stepping) %DebugPrepareStepInIfStepping(f); + if (%_CallFunction(receiver, element, i, array, f)) { + accumulator[accumulator_length++] = element; } } - } else { - // This is a duplicate of the previous loop sans debug stepping. - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - if (%_CallFunction(receiver, element, i, array, f)) { - accumulator[accumulator_length++] = element; - } - } - } - // End of duplicate. } %MoveArrayContents(accumulator, result); return result; @@ -1205,24 +1166,14 @@ receiver = ToObject(receiver); } - if (%DebugCallbackSupportsStepping(f)) { - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - // Prepare break slots for debugger step in. - %DebugPrepareStepInIfStepping(f); - %_CallFunction(receiver, element, i, array, f); - } + var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f); + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + if (stepping) %DebugPrepareStepInIfStepping(f); + %_CallFunction(receiver, element, i, array, f); } - } else { - // This is a duplicate of the previous loop sans debug stepping. - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - %_CallFunction(receiver, element, i, array, f); - } - } - // End of duplicate. } } @@ -1246,24 +1197,14 @@ receiver = ToObject(receiver); } - if (%DebugCallbackSupportsStepping(f)) { - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - // Prepare break slots for debugger step in. - %DebugPrepareStepInIfStepping(f); - if (%_CallFunction(receiver, element, i, array, f)) return true; - } - } - } else { - // This is a duplicate of the previous loop sans debug stepping. - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - if (%_CallFunction(receiver, element, i, array, f)) return true; - } + var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f); + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + if (stepping) %DebugPrepareStepInIfStepping(f); + if (%_CallFunction(receiver, element, i, array, f)) return true; } - // End of duplicate. } return false; } @@ -1286,24 +1227,14 @@ receiver = ToObject(receiver); } - if (%DebugCallbackSupportsStepping(f)) { - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - // Prepare break slots for debugger step in. - %DebugPrepareStepInIfStepping(f); - if (!%_CallFunction(receiver, element, i, array, f)) return false; - } - } - } else { - // This is a duplicate of the previous loop sans debug stepping. - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - if (!%_CallFunction(receiver, element, i, array, f)) return false; - } + var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f); + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + if (stepping) %DebugPrepareStepInIfStepping(f); + if (!%_CallFunction(receiver, element, i, array, f)) return false; } - // End of duplicate. } return true; } @@ -1327,24 +1258,14 @@ var result = new $Array(); var accumulator = new InternalArray(length); - if (%DebugCallbackSupportsStepping(f)) { - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - // Prepare break slots for debugger step in. - %DebugPrepareStepInIfStepping(f); - accumulator[i] = %_CallFunction(receiver, element, i, array, f); - } + var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f); + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + if (stepping) %DebugPrepareStepInIfStepping(f); + accumulator[i] = %_CallFunction(receiver, element, i, array, f); } - } else { - // This is a duplicate of the previous loop sans debug stepping. - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - accumulator[i] = %_CallFunction(receiver, element, i, array, f); - } - } - // End of duplicate. } %MoveArrayContents(accumulator, result); return result; @@ -1369,7 +1290,8 @@ } var min = index; var max = length; - if (UseSparseVariant(this, length, IS_ARRAY(this))) { + if (UseSparseVariant(this, length, IS_ARRAY(this), max - min)) { + %NormalizeElements(this); var indices = %GetArrayKeys(this, length); if (IS_NUMBER(indices)) { // It's an interval. @@ -1424,7 +1346,8 @@ } var min = 0; var max = index; - if (UseSparseVariant(this, length, IS_ARRAY(this))) { + if (UseSparseVariant(this, length, IS_ARRAY(this), index)) { + %NormalizeElements(this); var indices = %GetArrayKeys(this, index + 1); if (IS_NUMBER(indices)) { // It's an interval. @@ -1484,27 +1407,14 @@ } var receiver = %GetDefaultReceiver(callback); - - if (%DebugCallbackSupportsStepping(callback)) { - for (; i < length; i++) { - if (i in array) { - var element = array[i]; - // Prepare break slots for debugger step in. - %DebugPrepareStepInIfStepping(callback); - current = - %_CallFunction(receiver, current, element, i, array, callback); - } - } - } else { - // This is a duplicate of the previous loop sans debug stepping. - for (; i < length; i++) { - if (i in array) { - var element = array[i]; - current = - %_CallFunction(receiver, current, element, i, array, callback); - } + var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback); + for (; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + if (stepping) %DebugPrepareStepInIfStepping(callback); + current = %_CallFunction(receiver, current, element, i, array, callback); } - // End of duplicate. } return current; } @@ -1534,27 +1444,14 @@ } var receiver = %GetDefaultReceiver(callback); - - if (%DebugCallbackSupportsStepping(callback)) { - for (; i >= 0; i--) { - if (i in array) { - var element = array[i]; - // Prepare break slots for debugger step in. - %DebugPrepareStepInIfStepping(callback); - current = - %_CallFunction(receiver, current, element, i, array, callback); - } - } - } else { - // This is a duplicate of the previous loop sans debug stepping. - for (; i >= 0; i--) { - if (i in array) { - var element = array[i]; - current = - %_CallFunction(receiver, current, element, i, array, callback); - } + var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback); + for (; i >= 0; i--) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + if (stepping) %DebugPrepareStepInIfStepping(callback); + current = %_CallFunction(receiver, current, element, i, array, callback); } - // End of duplicate. } return current; } @@ -1572,14 +1469,28 @@ // Set up non-enumerable constructor property on the Array.prototype // object. - %SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM); + %AddNamedProperty($Array.prototype, "constructor", $Array, DONT_ENUM); + + // Set up unscopable properties on the Array.prototype object. + var unscopables = { + __proto__: null, + copyWithin: true, + entries: true, + fill: true, + find: true, + findIndex: true, + keys: true, + values: true, + }; + %AddNamedProperty($Array.prototype, symbolUnscopables, unscopables, + DONT_ENUM | READ_ONLY); // Set up non-enumerable functions on the Array object. InstallFunctions($Array, DONT_ENUM, $Array( "isArray", ArrayIsArray )); - var specialFunctions = %SpecialArrayFunctions({}); + var specialFunctions = %SpecialArrayFunctions(); var getFunction = function(name, jsBuiltin, len) { var f = jsBuiltin; @@ -1602,7 +1513,7 @@ "join", getFunction("join", ArrayJoin), "pop", getFunction("pop", ArrayPop), "push", getFunction("push", ArrayPush, 1), - "concat", getFunction("concat", ArrayConcat, 1), + "concat", getFunction("concat", ArrayConcatJS, 1), "reverse", getFunction("reverse", ArrayReverse), "shift", getFunction("shift", ArrayShift), "unshift", getFunction("unshift", ArrayUnshift, 1), @@ -1626,7 +1537,7 @@ // exposed to user code. // Adding only the functions that are actually used. SetUpLockedPrototype(InternalArray, $Array(), $Array( - "concat", getFunction("concat", ArrayConcat), + "concat", getFunction("concat", ArrayConcatJS), "indexOf", getFunction("indexOf", ArrayIndexOf), "join", getFunction("join", ArrayJoin), "pop", getFunction("pop", ArrayPop), diff -Nru nodejs-0.11.13/deps/v8/src/assembler.cc nodejs-0.11.15/deps/v8/src/assembler.cc --- nodejs-0.11.13/deps/v8/src/assembler.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/assembler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -32,39 +32,43 @@ // modified significantly by Google Inc. // Copyright 2012 the V8 project authors. All rights reserved. -#include "assembler.h" +#include "src/assembler.h" #include <cmath> -#include "api.h" -#include "builtins.h" -#include "counters.h" -#include "cpu.h" -#include "debug.h" -#include "deoptimizer.h" -#include "execution.h" -#include "ic.h" -#include "isolate-inl.h" -#include "jsregexp.h" -#include "lazy-instance.h" -#include "platform.h" -#include "regexp-macro-assembler.h" -#include "regexp-stack.h" -#include "runtime.h" -#include "serialize.h" -#include "store-buffer-inl.h" -#include "stub-cache.h" -#include "token.h" +#include "src/api.h" +#include "src/base/cpu.h" +#include "src/base/lazy-instance.h" +#include "src/base/platform/platform.h" +#include "src/builtins.h" +#include "src/counters.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/execution.h" +#include "src/ic.h" +#include "src/isolate-inl.h" +#include "src/jsregexp.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-stack.h" +#include "src/runtime.h" +#include "src/serialize.h" +#include "src/stub-cache.h" +#include "src/token.h" #if V8_TARGET_ARCH_IA32 -#include "ia32/assembler-ia32-inl.h" +#include "src/ia32/assembler-ia32-inl.h" // NOLINT #elif V8_TARGET_ARCH_X64 -#include "x64/assembler-x64-inl.h" +#include "src/x64/assembler-x64-inl.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 -#include "arm64/assembler-arm64-inl.h" +#include "src/arm64/assembler-arm64-inl.h" // NOLINT #elif V8_TARGET_ARCH_ARM -#include "arm/assembler-arm-inl.h" +#include "src/arm/assembler-arm-inl.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/assembler-mips-inl.h" +#include "src/mips/assembler-mips-inl.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/assembler-mips64-inl.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/assembler-x87-inl.h" // NOLINT #else #error "Unknown architecture." #endif @@ -72,15 +76,19 @@ // Include native regexp-macro-assembler. #ifndef V8_INTERPRETED_REGEXP #if V8_TARGET_ARCH_IA32 -#include "ia32/regexp-macro-assembler-ia32.h" +#include "src/ia32/regexp-macro-assembler-ia32.h" // NOLINT #elif V8_TARGET_ARCH_X64 -#include "x64/regexp-macro-assembler-x64.h" +#include "src/x64/regexp-macro-assembler-x64.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 -#include "arm64/regexp-macro-assembler-arm64.h" +#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM -#include "arm/regexp-macro-assembler-arm.h" +#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/regexp-macro-assembler-mips.h" +#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/regexp-macro-assembler-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/regexp-macro-assembler-x87.h" // NOLINT #else // Unknown architecture. #error "Unknown architecture." #endif // Target architecture. @@ -93,16 +101,13 @@ // Common double constants. struct DoubleConstant BASE_EMBEDDED { - double min_int; - double one_half; - double minus_one_half; - double minus_zero; - double zero; - double uint8_max_value; - double negative_infinity; - double canonical_non_hole_nan; - double the_hole_nan; - double uint32_bias; +double min_int; +double one_half; +double minus_one_half; +double negative_infinity; +double canonical_non_hole_nan; +double the_hole_nan; +double uint32_bias; }; static DoubleConstant double_constants; @@ -110,7 +115,7 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; static bool math_exp_data_initialized = false; -static Mutex* math_exp_data_mutex = NULL; +static base::Mutex* math_exp_data_mutex = NULL; static double* math_exp_constants_array = NULL; static double* math_exp_log_table_array = NULL; @@ -122,26 +127,16 @@ jit_cookie_(0), enabled_cpu_features_(0), emit_debug_code_(FLAG_debug_code), - predictable_code_size_(false) { + predictable_code_size_(false), + // We may use the assembler without an isolate. + serializer_enabled_(isolate && isolate->serializer_enabled()) { if (FLAG_mask_constants_with_cookie && isolate != NULL) { jit_cookie_ = isolate->random_number_generator()->NextInt(); } - if (buffer == NULL) { - // Do our own buffer management. - if (buffer_size <= kMinimalBufferSize) { - buffer_size = kMinimalBufferSize; - if (isolate->assembler_spare_buffer() != NULL) { - buffer = isolate->assembler_spare_buffer(); - isolate->set_assembler_spare_buffer(NULL); - } - } - if (buffer == NULL) buffer = NewArray<byte>(buffer_size); - own_buffer_ = true; - } else { - // Use externally provided buffer instead. - ASSERT(buffer_size > 0); - own_buffer_ = false; - } + own_buffer_ = buffer == NULL; + if (buffer_size == 0) buffer_size = kMinimalBufferSize; + DCHECK(buffer_size > 0); + if (own_buffer_) buffer = NewArray<byte>(buffer_size); buffer_ = static_cast<byte*>(buffer); buffer_size_ = buffer_size; @@ -150,15 +145,7 @@ AssemblerBase::~AssemblerBase() { - if (own_buffer_) { - if (isolate() != NULL && - isolate()->assembler_spare_buffer() == NULL && - buffer_size_ == kMinimalBufferSize) { - isolate()->set_assembler_spare_buffer(buffer_); - } else { - DeleteArray(buffer_); - } - } + if (own_buffer_) DeleteArray(buffer_); } @@ -190,7 +177,7 @@ #ifdef DEBUG CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) : assembler_(assembler) { - ASSERT(CpuFeatures::IsSafeForSnapshot(f)); + DCHECK(CpuFeatures::IsSupported(f)); old_enabled_ = assembler_->enabled_cpu_features(); uint64_t mask = static_cast<uint64_t>(1) << f; // TODO(svenpanne) This special case below doesn't belong here! @@ -210,22 +197,9 @@ #endif -// ----------------------------------------------------------------------------- -// Implementation of PlatformFeatureScope - -PlatformFeatureScope::PlatformFeatureScope(CpuFeature f) - : old_cross_compile_(CpuFeatures::cross_compile_) { - // CpuFeatures is a global singleton, therefore this is only safe in - // single threaded code. - ASSERT(Serializer::enabled()); - uint64_t mask = static_cast<uint64_t>(1) << f; - CpuFeatures::cross_compile_ |= mask; -} - - -PlatformFeatureScope::~PlatformFeatureScope() { - CpuFeatures::cross_compile_ = old_cross_compile_; -} +bool CpuFeatures::initialized_ = false; +unsigned CpuFeatures::supported_ = 0; +unsigned CpuFeatures::cache_line_size_ = 0; // ----------------------------------------------------------------------------- @@ -360,7 +334,7 @@ if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta; WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag); uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits; - ASSERT(pc_jump > 0); + DCHECK(pc_jump > 0); // Write kChunkBits size chunks of the pc_jump. for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) { byte b = pc_jump & kChunkMask; @@ -434,9 +408,9 @@ #ifdef DEBUG byte* begin_pos = pos_; #endif - ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES); - ASSERT(rinfo->pc() - last_pc_ >= 0); - ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM + DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES); + DCHECK(rinfo->pc() - last_pc_ >= 0); + DCHECK(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM <= kMaxStandardNonCompactModes); // Use unsigned delta-encoding for pc. uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_); @@ -447,10 +421,10 @@ WriteTaggedPC(pc_delta, kEmbeddedObjectTag); } else if (rmode == RelocInfo::CODE_TARGET) { WriteTaggedPC(pc_delta, kCodeTargetTag); - ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize); + DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize); } else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { // Use signed delta-encoding for id. - ASSERT(static_cast<int>(rinfo->data()) == rinfo->data()); + DCHECK(static_cast<int>(rinfo->data()) == rinfo->data()); int id_delta = static_cast<int>(rinfo->data()) - last_id_; // Check if delta is small enough to fit in a tagged byte. if (is_intn(id_delta, kSmallDataBits)) { @@ -464,7 +438,7 @@ last_id_ = static_cast<int>(rinfo->data()); } else if (RelocInfo::IsPosition(rmode)) { // Use signed delta-encoding for position. - ASSERT(static_cast<int>(rinfo->data()) == rinfo->data()); + DCHECK(static_cast<int>(rinfo->data()) == rinfo->data()); int pos_delta = static_cast<int>(rinfo->data()) - last_position_; int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag : kStatementPositionTag; @@ -482,23 +456,23 @@ // Comments are normally not generated, so we use the costly encoding. WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); WriteExtraTaggedData(rinfo->data(), kCommentTag); - ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize); + DCHECK(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize); } else if (RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)) { WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); WriteExtraTaggedPoolData(static_cast<int>(rinfo->data()), RelocInfo::IsConstPool(rmode) ? kConstPoolTag : kVeneerPoolTag); } else { - ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM); + DCHECK(rmode > RelocInfo::LAST_COMPACT_ENUM); int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM; // For all other modes we simply use the mode as the extra tag. // None of these modes need a data component. - ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag); + DCHECK(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag); WriteExtraTaggedPC(pc_delta, saved_mode); } last_pc_ = rinfo->pc(); #ifdef DEBUG - ASSERT(begin_pos - pos_ <= kMaxSize); + DCHECK(begin_pos - pos_ <= kMaxSize); #endif } @@ -604,7 +578,7 @@ static inline RelocInfo::Mode GetPositionModeFromTag(int tag) { - ASSERT(tag == kNonstatementPositionTag || + DCHECK(tag == kNonstatementPositionTag || tag == kStatementPositionTag); return (tag == kNonstatementPositionTag) ? RelocInfo::POSITION : @@ -613,7 +587,7 @@ void RelocIterator::next() { - ASSERT(!done()); + DCHECK(!done()); // Basically, do the opposite of RelocInfoWriter::Write. // Reading of data is as far as possible avoided for unwanted modes, // but we must always update the pc. @@ -639,7 +613,7 @@ } else { // Compact encoding is never used for comments, // so it must be a position. - ASSERT(locatable_tag == kNonstatementPositionTag || + DCHECK(locatable_tag == kNonstatementPositionTag || locatable_tag == kStatementPositionTag); if (mode_mask_ & RelocInfo::kPositionMask) { ReadTaggedPosition(); @@ -647,7 +621,7 @@ } } } else { - ASSERT(tag == kDefaultTag); + DCHECK(tag == kDefaultTag); int extra_tag = GetExtraTag(); if (extra_tag == kPCJumpExtraTag) { if (GetTopTag() == kVariableLengthPCJumpTopTag) { @@ -664,7 +638,7 @@ } Advance(kIntSize); } else if (locatable_tag != kCommentTag) { - ASSERT(locatable_tag == kNonstatementPositionTag || + DCHECK(locatable_tag == kNonstatementPositionTag || locatable_tag == kStatementPositionTag); if (mode_mask_ & RelocInfo::kPositionMask) { AdvanceReadPosition(); @@ -673,7 +647,7 @@ Advance(kIntSize); } } else { - ASSERT(locatable_tag == kCommentTag); + DCHECK(locatable_tag == kCommentTag); if (SetMode(RelocInfo::COMMENT)) { AdvanceReadData(); return; @@ -682,7 +656,7 @@ } } else if (extra_tag == kPoolExtraTag) { int pool_type = GetTopTag(); - ASSERT(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag); + DCHECK(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag); RelocInfo::Mode rmode = (pool_type == kConstPoolTag) ? RelocInfo::CONST_POOL : RelocInfo::VENEER_POOL; if (SetMode(rmode)) { @@ -722,7 +696,10 @@ last_id_ = 0; last_position_ = 0; byte* sequence = code->FindCodeAgeSequence(); - if (sequence != NULL && !Code::IsYoungSequence(sequence)) { + // We get the isolate from the map, because at serialization time + // the code pointer has been cloned and isn't really in heap space. + Isolate* isolate = code->map()->GetIsolate(); + if (sequence != NULL && !Code::IsYoungSequence(isolate, sequence)) { code_age_sequence_ = sequence; } else { code_age_sequence_ = NULL; @@ -779,9 +756,6 @@ case RelocInfo::CONSTRUCT_CALL: return "code target (js construct call)"; case RelocInfo::DEBUG_BREAK: -#ifndef ENABLE_DEBUGGER_SUPPORT - UNREACHABLE(); -#endif return "debug break"; case RelocInfo::CODE_TARGET: return "code target"; @@ -808,9 +782,6 @@ case RelocInfo::VENEER_POOL: return "veneer pool"; case RelocInfo::DEBUG_BREAK_SLOT: -#ifndef ENABLE_DEBUGGER_SUPPORT - UNREACHABLE(); -#endif return "debug break slot"; case RelocInfo::CODE_AGE_SEQUENCE: return "code_age_sequence"; @@ -822,45 +793,42 @@ } -void RelocInfo::Print(Isolate* isolate, FILE* out) { - PrintF(out, "%p %s", pc_, RelocModeName(rmode_)); +void RelocInfo::Print(Isolate* isolate, OStream& os) { // NOLINT + os << pc_ << " " << RelocModeName(rmode_); if (IsComment(rmode_)) { - PrintF(out, " (%s)", reinterpret_cast<char*>(data_)); + os << " (" << reinterpret_cast<char*>(data_) << ")"; } else if (rmode_ == EMBEDDED_OBJECT) { - PrintF(out, " ("); - target_object()->ShortPrint(out); - PrintF(out, ")"); + os << " (" << Brief(target_object()) << ")"; } else if (rmode_ == EXTERNAL_REFERENCE) { ExternalReferenceEncoder ref_encoder(isolate); - PrintF(out, " (%s) (%p)", - ref_encoder.NameOfAddress(target_reference()), - target_reference()); + os << " (" << ref_encoder.NameOfAddress(target_reference()) << ") (" + << target_reference() << ")"; } else if (IsCodeTarget(rmode_)) { Code* code = Code::GetCodeFromTargetAddress(target_address()); - PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()), - target_address()); + os << " (" << Code::Kind2String(code->kind()) << ") (" << target_address() + << ")"; if (rmode_ == CODE_TARGET_WITH_ID) { - PrintF(out, " (id=%d)", static_cast<int>(data_)); + os << " (id=" << static_cast<int>(data_) << ")"; } } else if (IsPosition(rmode_)) { - PrintF(out, " (%" V8_PTR_PREFIX "d)", data()); + os << " (" << data() << ")"; } else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != NULL) { // Depotimization bailouts are stored as runtime entries. int id = Deoptimizer::GetDeoptimizationId( isolate, target_address(), Deoptimizer::EAGER); if (id != Deoptimizer::kNotDeoptimizationEntry) { - PrintF(out, " (deoptimization bailout %d)", id); + os << " (deoptimization bailout " << id << ")"; } } - PrintF(out, "\n"); + os << "\n"; } #endif // ENABLE_DISASSEMBLER #ifdef VERIFY_HEAP -void RelocInfo::Verify() { +void RelocInfo::Verify(Isolate* isolate) { switch (rmode_) { case EMBEDDED_OBJECT: Object::VerifyPointer(target_object()); @@ -869,10 +837,6 @@ Object::VerifyPointer(target_cell()); break; case DEBUG_BREAK: -#ifndef ENABLE_DEBUGGER_SUPPORT - UNREACHABLE(); - break; -#endif case CONSTRUCT_CALL: case CODE_TARGET_WITH_ID: case CODE_TARGET: { @@ -881,7 +845,7 @@ CHECK(addr != NULL); // Check that we can find the right code object. Code* code = Code::GetCodeFromTargetAddress(addr); - Object* found = code->GetIsolate()->FindCodeObject(addr); + Object* found = isolate->FindCodeObject(addr); CHECK(found->IsCode()); CHECK(code->address() == HeapObject::cast(found)->address()); break; @@ -903,7 +867,7 @@ UNREACHABLE(); break; case CODE_AGE_SEQUENCE: - ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode()); + DCHECK(Code::IsYoungSequence(isolate, pc_) || code_age_stub()->IsCode()); break; } } @@ -917,16 +881,13 @@ double_constants.min_int = kMinInt; double_constants.one_half = 0.5; double_constants.minus_one_half = -0.5; - double_constants.minus_zero = -0.0; - double_constants.uint8_max_value = 255; - double_constants.zero = 0.0; - double_constants.canonical_non_hole_nan = OS::nan_value(); + double_constants.canonical_non_hole_nan = base::OS::nan_value(); double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64); double_constants.negative_infinity = -V8_INFINITY; double_constants.uint32_bias = static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1; - math_exp_data_mutex = new Mutex(); + math_exp_data_mutex = new base::Mutex(); } @@ -934,7 +895,7 @@ // Early return? if (math_exp_data_initialized) return; - LockGuard<Mutex> lock_guard(math_exp_data_mutex); + base::LockGuard<base::Mutex> lock_guard(math_exp_data_mutex); if (!math_exp_data_initialized) { // If this is changed, generated code must be adapted too. const int kTableSizeBits = 11; @@ -1014,11 +975,6 @@ Isolate* isolate) : address_(Redirect(isolate, ic_utility.address())) {} -#ifdef ENABLE_DEBUGGER_SUPPORT -ExternalReference::ExternalReference(const Debug_Address& debug_address, - Isolate* isolate) - : address_(debug_address.address(isolate)) {} -#endif ExternalReference::ExternalReference(StatsCounter* counter) : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {} @@ -1049,19 +1005,8 @@ ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) { - return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache))); -} - - -ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) { - return - ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC))); -} - - -ExternalReference ExternalReference::out_of_memory_function(Isolate* isolate) { - return - ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::OutOfMemory))); + return ExternalReference( + Redirect(isolate, FUNCTION_ADDR(CpuFeatures::FlushICache))); } @@ -1193,13 +1138,6 @@ } -ExternalReference ExternalReference::heap_always_allocate_scope_depth( - Isolate* isolate) { - Heap* heap = isolate->heap(); - return ExternalReference(heap->always_allocate_scope_depth_address()); -} - - ExternalReference ExternalReference::new_space_allocation_limit_address( Isolate* isolate) { return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress()); @@ -1234,13 +1172,6 @@ } -ExternalReference ExternalReference:: - new_space_high_promotion_mode_active_address(Isolate* isolate) { - return ExternalReference( - isolate->heap()->NewSpaceHighPromotionModeActiveAddress()); -} - - ExternalReference ExternalReference::handle_scope_level_address( Isolate* isolate) { return ExternalReference(HandleScope::current_level_address(isolate)); @@ -1299,23 +1230,6 @@ } -ExternalReference ExternalReference::address_of_minus_zero() { - return ExternalReference( - reinterpret_cast<void*>(&double_constants.minus_zero)); -} - - -ExternalReference ExternalReference::address_of_zero() { - return ExternalReference(reinterpret_cast<void*>(&double_constants.zero)); -} - - -ExternalReference ExternalReference::address_of_uint8_max_value() { - return ExternalReference( - reinterpret_cast<void*>(&double_constants.uint8_max_value)); -} - - ExternalReference ExternalReference::address_of_negative_infinity() { return ExternalReference( reinterpret_cast<void*>(&double_constants.negative_infinity)); @@ -1340,6 +1254,30 @@ } +ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) { + return ExternalReference(isolate->cpu_profiler()->is_profiling_address()); +} + + +ExternalReference ExternalReference::invoke_function_callback( + Isolate* isolate) { + Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); + ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL; + ApiFunction thunk_fun(thunk_address); + return ExternalReference(&thunk_fun, thunk_type, isolate); +} + + +ExternalReference ExternalReference::invoke_accessor_getter_callback( + Isolate* isolate) { + Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); + ExternalReference::Type thunk_type = + ExternalReference::PROFILING_GETTER_CALL; + ApiFunction thunk_fun(thunk_address); + return ExternalReference(&thunk_fun, thunk_type, isolate); +} + + #ifndef V8_INTERPRETED_REGEXP ExternalReference ExternalReference::re_check_stack_guard_state( @@ -1355,6 +1293,10 @@ function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState); #elif V8_TARGET_ARCH_MIPS function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState); +#elif V8_TARGET_ARCH_MIPS64 + function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState); +#elif V8_TARGET_ARCH_X87 + function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState); #else UNREACHABLE(); #endif @@ -1410,14 +1352,14 @@ ExternalReference ExternalReference::math_exp_constants(int constant_index) { - ASSERT(math_exp_data_initialized); + DCHECK(math_exp_data_initialized); return ExternalReference( reinterpret_cast<void*>(math_exp_constants_array + constant_index)); } ExternalReference ExternalReference::math_exp_log_table() { - ASSERT(math_exp_data_initialized); + DCHECK(math_exp_data_initialized); return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array)); } @@ -1433,6 +1375,32 @@ } +ExternalReference ExternalReference::cpu_features() { + DCHECK(CpuFeatures::initialized_); + return ExternalReference(&CpuFeatures::supported_); +} + + +ExternalReference ExternalReference::debug_is_active_address( + Isolate* isolate) { + return ExternalReference(isolate->debug()->is_active_address()); +} + + +ExternalReference ExternalReference::debug_after_break_target_address( + Isolate* isolate) { + return ExternalReference(isolate->debug()->after_break_target_address()); +} + + +ExternalReference + ExternalReference::debug_restarter_frame_function_pointer_address( + Isolate* isolate) { + return ExternalReference( + isolate->debug()->restarter_frame_function_pointer_address()); +} + + double power_helper(double x, double y) { int y_int = static_cast<int>(y); if (y == y_int) { @@ -1491,7 +1459,7 @@ // The checks for special cases can be dropped in ia32 because it has already // been done in generated code before bailing out here. if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) { - return OS::nan_value(); + return base::OS::nan_value(); } return std::pow(x, y); } @@ -1514,7 +1482,7 @@ bool EvalComparison(Token::Value op, double op1, double op2) { - ASSERT(Token::IsCompareOp(op)); + DCHECK(Token::IsCompareOp(op)); switch (op) { case Token::EQ: case Token::EQ_STRICT: return (op1 == op2); @@ -1538,7 +1506,6 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT ExternalReference ExternalReference::debug_break(Isolate* isolate) { return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break))); } @@ -1548,18 +1515,12 @@ Isolate* isolate) { return ExternalReference(isolate->debug()->step_in_fp_addr()); } -#endif void PositionsRecorder::RecordPosition(int pos) { - ASSERT(pos != RelocInfo::kNoPosition); - ASSERT(pos >= 0); + DCHECK(pos != RelocInfo::kNoPosition); + DCHECK(pos >= 0); state_.current_position = pos; -#ifdef ENABLE_GDB_JIT_INTERFACE - if (gdbjit_lineinfo_ != NULL) { - gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false); - } -#endif LOG_CODE_EVENT(assembler_->isolate(), CodeLinePosInfoAddPositionEvent(jit_handler_data_, assembler_->pc_offset(), @@ -1568,14 +1529,9 @@ void PositionsRecorder::RecordStatementPosition(int pos) { - ASSERT(pos != RelocInfo::kNoPosition); - ASSERT(pos >= 0); + DCHECK(pos != RelocInfo::kNoPosition); + DCHECK(pos >= 0); state_.current_statement_position = pos; -#ifdef ENABLE_GDB_JIT_INTERFACE - if (gdbjit_lineinfo_ != NULL) { - gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true); - } -#endif LOG_CODE_EVENT(assembler_->isolate(), CodeLinePosInfoAddStatementPositionEvent( jit_handler_data_, @@ -1613,7 +1569,7 @@ MultiplierAndShift::MultiplierAndShift(int32_t d) { - ASSERT(d <= -2 || 2 <= d); + DCHECK(d <= -2 || 2 <= d); const uint32_t two31 = 0x80000000; uint32_t ad = Abs(d); uint32_t t = two31 + (uint32_t(d) >> 31); diff -Nru nodejs-0.11.13/deps/v8/src/assembler.h nodejs-0.11.15/deps/v8/src/assembler.h --- nodejs-0.11.13/deps/v8/src/assembler.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/assembler.h 2015-01-20 21:22:17.000000000 +0000 @@ -35,14 +35,14 @@ #ifndef V8_ASSEMBLER_H_ #define V8_ASSEMBLER_H_ -#include "v8.h" +#include "src/v8.h" -#include "allocation.h" -#include "builtins.h" -#include "gdb-jit.h" -#include "isolate.h" -#include "runtime.h" -#include "token.h" +#include "src/allocation.h" +#include "src/builtins.h" +#include "src/gdb-jit.h" +#include "src/isolate.h" +#include "src/runtime.h" +#include "src/token.h" namespace v8 { @@ -65,6 +65,9 @@ bool emit_debug_code() const { return emit_debug_code_; } void set_emit_debug_code(bool value) { emit_debug_code_ = value; } + bool serializer_enabled() const { return serializer_enabled_; } + void enable_serializer() { serializer_enabled_ = true; } + bool predictable_code_size() const { return predictable_code_size_; } void set_predictable_code_size(bool value) { predictable_code_size_ = value; } @@ -104,6 +107,23 @@ uint64_t enabled_cpu_features_; bool emit_debug_code_; bool predictable_code_size_; + bool serializer_enabled_; +}; + + +// Avoids emitting debug code during the lifetime of this scope object. +class DontEmitDebugCodeScope BASE_EMBEDDED { + public: + explicit DontEmitDebugCodeScope(AssemblerBase* assembler) + : assembler_(assembler), old_value_(assembler->emit_debug_code()) { + assembler_->set_emit_debug_code(false); + } + ~DontEmitDebugCodeScope() { + assembler_->set_emit_debug_code(old_value_); + } + private: + AssemblerBase* assembler_; + bool old_value_; }; @@ -138,15 +158,55 @@ }; -// Enable a unsupported feature within a scope for cross-compiling for a -// different CPU. -class PlatformFeatureScope BASE_EMBEDDED { +// CpuFeatures keeps track of which features are supported by the target CPU. +// Supported features must be enabled by a CpuFeatureScope before use. +// Example: +// if (assembler->IsSupported(SSE3)) { +// CpuFeatureScope fscope(assembler, SSE3); +// // Generate code containing SSE3 instructions. +// } else { +// // Generate alternative code. +// } +class CpuFeatures : public AllStatic { public: - explicit PlatformFeatureScope(CpuFeature f); - ~PlatformFeatureScope(); + static void Probe(bool cross_compile) { + STATIC_ASSERT(NUMBER_OF_CPU_FEATURES <= kBitsPerInt); + if (initialized_) return; + initialized_ = true; + ProbeImpl(cross_compile); + } + + static unsigned SupportedFeatures() { + Probe(false); + return supported_; + } + + static bool IsSupported(CpuFeature f) { + return (supported_ & (1u << f)) != 0; + } + + static inline bool SupportsCrankshaft(); + + static inline unsigned cache_line_size() { + DCHECK(cache_line_size_ != 0); + return cache_line_size_; + } + + static void PrintTarget(); + static void PrintFeatures(); + + // Flush instruction cache. + static void FlushICache(void* start, size_t size); private: - uint64_t old_cross_compile_; + // Platform-dependent implementation. + static void ProbeImpl(bool cross_compile); + + static unsigned supported_; + static unsigned cache_line_size_; + static bool initialized_; + friend class ExternalReference; + DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; @@ -168,8 +228,8 @@ } INLINE(~Label()) { - ASSERT(!is_linked()); - ASSERT(!is_near_linked()); + DCHECK(!is_linked()); + DCHECK(!is_near_linked()); } INLINE(void Unuse()) { pos_ = 0; } @@ -199,15 +259,15 @@ void bind_to(int pos) { pos_ = -pos - 1; - ASSERT(is_bound()); + DCHECK(is_bound()); } void link_to(int pos, Distance distance = kFar) { if (distance == kNear) { near_link_pos_ = pos + 1; - ASSERT(is_near_linked()); + DCHECK(is_near_linked()); } else { pos_ = pos + 1; - ASSERT(is_linked()); + DCHECK(is_linked()); } } @@ -225,6 +285,12 @@ enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs }; +// Specifies whether to perform icache flush operations on RelocInfo updates. +// If FLUSH_ICACHE_IF_NEEDED, the icache will always be flushed if an +// instruction was modified. If SKIP_ICACHE_FLUSH the flush will always be +// skipped (only use this if you will flush the icache manually before it is +// executed). +enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH }; // ----------------------------------------------------------------------------- // Relocation information @@ -237,7 +303,7 @@ // describe a property of the datum. Such rmodes are useful for GC // and nice disassembly output. -class RelocInfo BASE_EMBEDDED { +class RelocInfo { public: // The constant kNoPosition is used with the collecting of source positions // in the relocation information. Two types of source positions are collected @@ -309,7 +375,6 @@ LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE }; - RelocInfo() {} RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host) @@ -324,7 +389,7 @@ mode <= LAST_REAL_RELOC_MODE; } static inline bool IsPseudoRelocMode(Mode mode) { - ASSERT(!IsRealRelocMode(mode)); + DCHECK(!IsRealRelocMode(mode)); return mode >= FIRST_PSEUDO_RELOC_MODE && mode <= LAST_PSEUDO_RELOC_MODE; } @@ -401,7 +466,9 @@ void set_host(Code* host) { host_ = host; } // Apply a relocation by delta bytes - INLINE(void apply(intptr_t delta)); + INLINE(void apply(intptr_t delta, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)); // Is the pointer this relocation info refers to coded like a plain pointer // or is it strange in some way (e.g. relative or patched into a series of @@ -417,22 +484,35 @@ // can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) INLINE(Address target_address()); INLINE(void set_target_address(Address target, - WriteBarrierMode mode = UPDATE_WRITE_BARRIER)); + WriteBarrierMode write_barrier_mode = + UPDATE_WRITE_BARRIER, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)); INLINE(Object* target_object()); INLINE(Handle<Object> target_object_handle(Assembler* origin)); INLINE(void set_target_object(Object* target, - WriteBarrierMode mode = UPDATE_WRITE_BARRIER)); + WriteBarrierMode write_barrier_mode = + UPDATE_WRITE_BARRIER, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)); INLINE(Address target_runtime_entry(Assembler* origin)); INLINE(void set_target_runtime_entry(Address target, - WriteBarrierMode mode = - UPDATE_WRITE_BARRIER)); + WriteBarrierMode write_barrier_mode = + UPDATE_WRITE_BARRIER, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)); INLINE(Cell* target_cell()); INLINE(Handle<Cell> target_cell_handle()); INLINE(void set_target_cell(Cell* cell, - WriteBarrierMode mode = UPDATE_WRITE_BARRIER)); + WriteBarrierMode write_barrier_mode = + UPDATE_WRITE_BARRIER, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)); INLINE(Handle<Object> code_age_stub_handle(Assembler* origin)); INLINE(Code* code_age_stub()); - INLINE(void set_code_age_stub(Code* stub)); + INLINE(void set_code_age_stub(Code* stub, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)); // Returns the address of the constant pool entry where the target address // is held. This should only be called if IsInConstantPool returns true. @@ -500,10 +580,10 @@ #ifdef ENABLE_DISASSEMBLER // Printing static const char* RelocModeName(Mode rmode); - void Print(Isolate* isolate, FILE* out); + void Print(Isolate* isolate, OStream& os); // NOLINT #endif // ENABLE_DISASSEMBLER #ifdef VERIFY_HEAP - void Verify(); + void Verify(Isolate* isolate); #endif static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1; @@ -606,7 +686,7 @@ // Return pointer valid until next next(). RelocInfo* rinfo() { - ASSERT(!done()); + DCHECK(!done()); return &rinfo_; } @@ -653,9 +733,7 @@ //---------------------------------------------------------------------------- class IC_Utility; class SCTableReference; -#ifdef ENABLE_DEBUGGER_SUPPORT class Debug_Address; -#endif // An ExternalReference represents a C++ address used in the generated @@ -668,7 +746,7 @@ // Used in the simulator to support different native api calls. enum Type { // Builtin call. - // MaybeObject* f(v8::internal::Arguments). + // Object* f(v8::internal::Arguments). BUILTIN_CALL, // default // Builtin that takes float arguments and returns an int. @@ -725,10 +803,6 @@ ExternalReference(const IC_Utility& ic_utility, Isolate* isolate); -#ifdef ENABLE_DEBUGGER_SUPPORT - ExternalReference(const Debug_Address& debug_address, Isolate* isolate); -#endif - explicit ExternalReference(StatsCounter* counter); ExternalReference(Isolate::AddressId id, Isolate* isolate); @@ -747,8 +821,6 @@ static ExternalReference store_buffer_overflow_function( Isolate* isolate); static ExternalReference flush_icache_function(Isolate* isolate); - static ExternalReference perform_gc_function(Isolate* isolate); - static ExternalReference out_of_memory_function(Isolate* isolate); static ExternalReference delete_handle_scope_extensions(Isolate* isolate); static ExternalReference get_date_field_function(Isolate* isolate); @@ -794,8 +866,6 @@ // Static variable Heap::NewSpaceStart() static ExternalReference new_space_start(Isolate* isolate); static ExternalReference new_space_mask(Isolate* isolate); - static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate); - static ExternalReference new_space_mark_bits(Isolate* isolate); // Write barrier. static ExternalReference store_buffer_top(Isolate* isolate); @@ -811,8 +881,6 @@ Isolate* isolate); static ExternalReference old_data_space_allocation_limit_address( Isolate* isolate); - static ExternalReference new_space_high_promotion_mode_active_address( - Isolate* isolate); static ExternalReference mod_two_doubles_operation(Isolate* isolate); static ExternalReference power_double_double_function(Isolate* isolate); @@ -831,9 +899,6 @@ static ExternalReference address_of_min_int(); static ExternalReference address_of_one_half(); static ExternalReference address_of_minus_one_half(); - static ExternalReference address_of_minus_zero(); - static ExternalReference address_of_zero(); - static ExternalReference address_of_uint8_max_value(); static ExternalReference address_of_negative_infinity(); static ExternalReference address_of_canonical_non_hole_nan(); static ExternalReference address_of_the_hole_nan(); @@ -850,15 +915,22 @@ static ExternalReference cpu_features(); + static ExternalReference debug_is_active_address(Isolate* isolate); + static ExternalReference debug_after_break_target_address(Isolate* isolate); + static ExternalReference debug_restarter_frame_function_pointer_address( + Isolate* isolate); + + static ExternalReference is_profiling_address(Isolate* isolate); + static ExternalReference invoke_function_callback(Isolate* isolate); + static ExternalReference invoke_accessor_getter_callback(Isolate* isolate); + Address address() const { return reinterpret_cast<Address>(address_); } -#ifdef ENABLE_DEBUGGER_SUPPORT // Function Debug::Break() static ExternalReference debug_break(Isolate* isolate); // Used to check if single stepping is enabled in generated code. static ExternalReference debug_step_in_fp_address(Isolate* isolate); -#endif #ifndef V8_INTERPRETED_REGEXP // C functions called from RegExp generated code. @@ -882,7 +954,7 @@ static void set_redirector(Isolate* isolate, ExternalReferenceRedirector* redirector) { // We can't stack them. - ASSERT(isolate->external_reference_redirector() == NULL); + DCHECK(isolate->external_reference_redirector() == NULL); isolate->set_external_reference_redirector( reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector)); } @@ -902,17 +974,6 @@ : address_(address) {} static void* Redirect(Isolate* isolate, - void* address, - Type type = ExternalReference::BUILTIN_CALL) { - ExternalReferenceRedirector* redirector = - reinterpret_cast<ExternalReferenceRedirector*>( - isolate->external_reference_redirector()); - if (redirector == NULL) return address; - void* answer = (*redirector)(address, type); - return answer; - } - - static void* Redirect(Isolate* isolate, Address address_arg, Type type = ExternalReference::BUILTIN_CALL) { ExternalReferenceRedirector* redirector = @@ -950,29 +1011,9 @@ public: explicit PositionsRecorder(Assembler* assembler) : assembler_(assembler) { -#ifdef ENABLE_GDB_JIT_INTERFACE - gdbjit_lineinfo_ = NULL; -#endif jit_handler_data_ = NULL; } -#ifdef ENABLE_GDB_JIT_INTERFACE - ~PositionsRecorder() { - delete gdbjit_lineinfo_; - } - - void StartGDBJITLineInfoRecording() { - if (FLAG_gdbjit) { - gdbjit_lineinfo_ = new GDBJITLineInfo(); - } - } - - GDBJITLineInfo* DetachGDBJITLineInfo() { - GDBJITLineInfo* lineinfo = gdbjit_lineinfo_; - gdbjit_lineinfo_ = NULL; // To prevent deallocation in destructor. - return lineinfo; - } -#endif void AttachJITHandlerData(void* user_data) { jit_handler_data_ = user_data; } @@ -1000,9 +1041,6 @@ private: Assembler* assembler_; PositionState state_; -#ifdef ENABLE_GDB_JIT_INTERFACE - GDBJITLineInfo* gdbjit_lineinfo_; -#endif // Currently jit_handler_data_ is used to store JITHandler-specific data // over the lifetime of a PositionsRecorder diff -Nru nodejs-0.11.13/deps/v8/src/assert-scope.cc nodejs-0.11.15/deps/v8/src/assert-scope.cc --- nodejs-0.11.13/deps/v8/src/assert-scope.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/assert-scope.cc 2015-01-20 21:22:17.000000000 +0000 @@ -3,8 +3,8 @@ // found in the LICENSE file. -#include "assert-scope.h" -#include "v8.h" +#include "src/assert-scope.h" +#include "src/v8.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/assert-scope.h nodejs-0.11.15/deps/v8/src/assert-scope.h --- nodejs-0.11.13/deps/v8/src/assert-scope.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/assert-scope.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ASSERT_SCOPE_H_ #define V8_ASSERT_SCOPE_H_ -#include "allocation.h" -#include "platform.h" -#include "utils.h" +#include "src/allocation.h" +#include "src/base/platform/platform.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -50,7 +27,9 @@ enum PerIsolateAssertType { JAVASCRIPT_EXECUTION_ASSERT, JAVASCRIPT_EXECUTION_THROWS, - ALLOCATION_FAILURE_ASSERT + ALLOCATION_FAILURE_ASSERT, + DEOPTIMIZATION_ASSERT, + COMPILATION_ASSERT }; @@ -95,7 +74,7 @@ ~PerThreadAssertScopeBase() { if (!data_->decrement_level()) return; for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) { - ASSERT(data_->get(static_cast<PerThreadAssertType>(i))); + DCHECK(data_->get(static_cast<PerThreadAssertType>(i))); } delete data_; SetThreadLocalData(NULL); @@ -103,16 +82,16 @@ static PerThreadAssertData* GetAssertData() { return reinterpret_cast<PerThreadAssertData*>( - Thread::GetThreadLocal(thread_local_key)); + base::Thread::GetThreadLocal(thread_local_key)); } - static Thread::LocalStorageKey thread_local_key; + static base::Thread::LocalStorageKey thread_local_key; PerThreadAssertData* data_; friend class Isolate; private: static void SetThreadLocalData(PerThreadAssertData* data) { - Thread::SetThreadLocal(thread_local_key, data); + base::Thread::SetThreadLocal(thread_local_key, data); } }; @@ -268,6 +247,21 @@ typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, true> AllowAllocationFailure; +// Scope to document where we do not expect deoptimization. +typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, false> + DisallowDeoptimization; + +// Scope to introduce an exception to DisallowDeoptimization. +typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, true> + AllowDeoptimization; + +// Scope to document where we do not expect deoptimization. +typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, false> + DisallowCompilation; + +// Scope to introduce an exception to DisallowDeoptimization. +typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true> + AllowCompilation; } } // namespace v8::internal #endif // V8_ASSERT_SCOPE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/ast.cc nodejs-0.11.15/deps/v8/src/ast.cc --- nodejs-0.11.13/deps/v8/src/ast.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ast.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,21 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "ast.h" +#include "src/ast.h" #include <cmath> // For isfinite. -#include "builtins.h" -#include "code-stubs.h" -#include "contexts.h" -#include "conversions.h" -#include "hashmap.h" -#include "parser.h" -#include "property-details.h" -#include "property.h" -#include "scopes.h" -#include "string-stream.h" -#include "type-info.h" +#include "src/builtins.h" +#include "src/code-stubs.h" +#include "src/contexts.h" +#include "src/conversions.h" +#include "src/hashmap.h" +#include "src/parser.h" +#include "src/property.h" +#include "src/property-details.h" +#include "src/scopes.h" +#include "src/string-stream.h" +#include "src/type-info.h" namespace v8 { namespace internal { @@ -56,46 +33,46 @@ // Implementation of other node functionality. -bool Expression::IsSmiLiteral() { - return AsLiteral() != NULL && AsLiteral()->value()->IsSmi(); +bool Expression::IsSmiLiteral() const { + return IsLiteral() && AsLiteral()->value()->IsSmi(); } -bool Expression::IsStringLiteral() { - return AsLiteral() != NULL && AsLiteral()->value()->IsString(); +bool Expression::IsStringLiteral() const { + return IsLiteral() && AsLiteral()->value()->IsString(); } -bool Expression::IsNullLiteral() { - return AsLiteral() != NULL && AsLiteral()->value()->IsNull(); +bool Expression::IsNullLiteral() const { + return IsLiteral() && AsLiteral()->value()->IsNull(); } -bool Expression::IsUndefinedLiteral(Isolate* isolate) { - VariableProxy* var_proxy = AsVariableProxy(); +bool Expression::IsUndefinedLiteral(Isolate* isolate) const { + const VariableProxy* var_proxy = AsVariableProxy(); if (var_proxy == NULL) return false; Variable* var = var_proxy->var(); // The global identifier "undefined" is immutable. Everything // else could be reassigned. return var != NULL && var->location() == Variable::UNALLOCATED && - var_proxy->name()->Equals(isolate->heap()->undefined_string()); + var_proxy->raw_name()->IsOneByteEqualTo("undefined"); } VariableProxy::VariableProxy(Zone* zone, Variable* var, int position) : Expression(zone, position), - name_(var->name()), + name_(var->raw_name()), var_(NULL), // Will be set by the call to BindTo. is_this_(var->is_this()), - is_trivial_(false), - is_lvalue_(false), - interface_(var->interface()) { + is_assigned_(false), + interface_(var->interface()), + variable_feedback_slot_(kInvalidFeedbackSlot) { BindTo(var); } VariableProxy::VariableProxy(Zone* zone, - Handle<String> name, + const AstRawString* name, bool is_this, Interface* interface, int position) @@ -103,26 +80,24 @@ name_(name), var_(NULL), is_this_(is_this), - is_trivial_(false), - is_lvalue_(false), - interface_(interface) { - // Names must be canonicalized for fast equality checks. - ASSERT(name->IsInternalizedString()); + is_assigned_(false), + interface_(interface), + variable_feedback_slot_(kInvalidFeedbackSlot) { } void VariableProxy::BindTo(Variable* var) { - ASSERT(var_ == NULL); // must be bound only once - ASSERT(var != NULL); // must bind - ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface())); - ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name())); + DCHECK(var_ == NULL); // must be bound only once + DCHECK(var != NULL); // must bind + DCHECK(!FLAG_harmony_modules || interface_->IsUnified(var->interface())); + DCHECK((is_this() && var->is_this()) || name_ == var->raw_name()); // Ideally CONST-ness should match. However, this is very hard to achieve // because we don't know the exact semantics of conflicting (const and // non-const) multiple variable declarations, const vars introduced via // eval() etc. Const-ness and variable declarations are a complete mess // in JS. Sigh... var_ = var; - var->set_is_used(true); + var->set_is_used(); } @@ -202,18 +177,17 @@ } -ObjectLiteralProperty::ObjectLiteralProperty( - Zone* zone, Literal* key, Expression* value) { +ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone, + AstValueFactory* ast_value_factory, + Literal* key, Expression* value) { emit_store_ = true; key_ = key; value_ = value; - Object* k = *key->value(); - if (k->IsInternalizedString() && - zone->isolate()->heap()->proto_string()->Equals(String::cast(k))) { + if (key->raw_value()->EqualsString(ast_value_factory->proto_string())) { kind_ = PROTOTYPE; } else if (value_->AsMaterializedLiteral() != NULL) { kind_ = MATERIALIZED_LITERAL; - } else if (value_->AsLiteral() != NULL) { + } else if (value_->IsLiteral()) { kind_ = CONSTANT; } else { kind_ = COMPUTED; @@ -378,9 +352,9 @@ } else if (boilerplate_value->IsUninitialized()) { is_simple = false; JSObject::SetOwnElement( - array, i, handle(Smi::FromInt(0), isolate), SLOPPY); + array, i, handle(Smi::FromInt(0), isolate), SLOPPY).Assert(); } else { - JSObject::SetOwnElement(array, i, boilerplate_value, SLOPPY); + JSObject::SetOwnElement(array, i, boilerplate_value, SLOPPY).Assert(); } } @@ -411,7 +385,7 @@ Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression, Isolate* isolate) { - if (expression->AsLiteral() != NULL) { + if (expression->IsLiteral()) { return expression->AsLiteral()->value(); } if (CompileTimeValue::IsCompileTimeValue(expression)) { @@ -428,8 +402,8 @@ if (IsObjectLiteral()) { return AsObjectLiteral()->BuildConstantProperties(isolate); } - ASSERT(IsRegExpLiteral()); - ASSERT(depth() >= 1); // Depth should be initialized. + DCHECK(IsRegExpLiteral()); + DCHECK(depth() >= 1); // Depth should be initialized. } @@ -463,7 +437,7 @@ } -bool BinaryOperation::ResultOverwriteAllowed() { +bool BinaryOperation::ResultOverwriteAllowed() const { switch (op_) { case Token::COMMA: case Token::OR: @@ -520,7 +494,7 @@ UnaryOperation* maybe_unary = expr->AsUnaryOperation(); return maybe_unary != NULL && maybe_unary->op() == Token::VOID && - maybe_unary->expression()->AsLiteral() != NULL; + maybe_unary->expression()->IsLiteral(); } @@ -592,14 +566,9 @@ } -int Call::ComputeFeedbackSlotCount(Isolate* isolate) { +bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const { CallType call_type = GetCallType(isolate); - if (call_type == LOOKUP_SLOT_CALL || call_type == OTHER_CALL) { - // Call only uses a slot in some cases. - return 1; - } - - return 0; + return (call_type != POSSIBLY_EVAL_CALL); } @@ -624,7 +593,7 @@ LookupResult* lookup) { target_ = Handle<JSFunction>::null(); cell_ = Handle<Cell>::null(); - ASSERT(lookup->IsFound() && + DCHECK(lookup->IsFound() && lookup->type() == NORMAL && lookup->holder() == *global); cell_ = Handle<Cell>(global->GetPropertyCell(lookup)); @@ -832,51 +801,44 @@ // output formats are alike. class RegExpUnparser V8_FINAL : public RegExpVisitor { public: - explicit RegExpUnparser(Zone* zone); + RegExpUnparser(OStream& os, Zone* zone) : os_(os), zone_(zone) {} void VisitCharacterRange(CharacterRange that); - SmartArrayPointer<const char> ToString() { return stream_.ToCString(); } #define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \ void* data) V8_OVERRIDE; FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE) #undef MAKE_CASE private: - StringStream* stream() { return &stream_; } - HeapStringAllocator alloc_; - StringStream stream_; + OStream& os_; Zone* zone_; }; -RegExpUnparser::RegExpUnparser(Zone* zone) : stream_(&alloc_), zone_(zone) { -} - - void* RegExpUnparser::VisitDisjunction(RegExpDisjunction* that, void* data) { - stream()->Add("(|"); + os_ << "(|"; for (int i = 0; i < that->alternatives()->length(); i++) { - stream()->Add(" "); + os_ << " "; that->alternatives()->at(i)->Accept(this, data); } - stream()->Add(")"); + os_ << ")"; return NULL; } void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) { - stream()->Add("(:"); + os_ << "(:"; for (int i = 0; i < that->nodes()->length(); i++) { - stream()->Add(" "); + os_ << " "; that->nodes()->at(i)->Accept(this, data); } - stream()->Add(")"); + os_ << ")"; return NULL; } void RegExpUnparser::VisitCharacterRange(CharacterRange that) { - stream()->Add("%k", that.from()); + os_ << AsUC16(that.from()); if (!that.IsSingleton()) { - stream()->Add("-%k", that.to()); + os_ << "-" << AsUC16(that.to()); } } @@ -884,14 +846,13 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that, void* data) { - if (that->is_negated()) - stream()->Add("^"); - stream()->Add("["); + if (that->is_negated()) os_ << "^"; + os_ << "["; for (int i = 0; i < that->ranges(zone_)->length(); i++) { - if (i > 0) stream()->Add(" "); + if (i > 0) os_ << " "; VisitCharacterRange(that->ranges(zone_)->at(i)); } - stream()->Add("]"); + os_ << "]"; return NULL; } @@ -899,22 +860,22 @@ void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) { switch (that->assertion_type()) { case RegExpAssertion::START_OF_INPUT: - stream()->Add("@^i"); + os_ << "@^i"; break; case RegExpAssertion::END_OF_INPUT: - stream()->Add("@$i"); + os_ << "@$i"; break; case RegExpAssertion::START_OF_LINE: - stream()->Add("@^l"); + os_ << "@^l"; break; case RegExpAssertion::END_OF_LINE: - stream()->Add("@$l"); + os_ << "@$l"; break; case RegExpAssertion::BOUNDARY: - stream()->Add("@b"); + os_ << "@b"; break; case RegExpAssertion::NON_BOUNDARY: - stream()->Add("@B"); + os_ << "@B"; break; } return NULL; @@ -922,12 +883,12 @@ void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) { - stream()->Add("'"); + os_ << "'"; Vector<const uc16> chardata = that->data(); for (int i = 0; i < chardata.length(); i++) { - stream()->Add("%k", chardata[i]); + os_ << AsUC16(chardata[i]); } - stream()->Add("'"); + os_ << "'"; return NULL; } @@ -936,71 +897,70 @@ if (that->elements()->length() == 1) { that->elements()->at(0).tree()->Accept(this, data); } else { - stream()->Add("(!"); + os_ << "(!"; for (int i = 0; i < that->elements()->length(); i++) { - stream()->Add(" "); + os_ << " "; that->elements()->at(i).tree()->Accept(this, data); } - stream()->Add(")"); + os_ << ")"; } return NULL; } void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) { - stream()->Add("(# %i ", that->min()); + os_ << "(# " << that->min() << " "; if (that->max() == RegExpTree::kInfinity) { - stream()->Add("- "); + os_ << "- "; } else { - stream()->Add("%i ", that->max()); + os_ << that->max() << " "; } - stream()->Add(that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n "); + os_ << (that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n "); that->body()->Accept(this, data); - stream()->Add(")"); + os_ << ")"; return NULL; } void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) { - stream()->Add("(^ "); + os_ << "(^ "; that->body()->Accept(this, data); - stream()->Add(")"); + os_ << ")"; return NULL; } void* RegExpUnparser::VisitLookahead(RegExpLookahead* that, void* data) { - stream()->Add("(-> "); - stream()->Add(that->is_positive() ? "+ " : "- "); + os_ << "(-> " << (that->is_positive() ? "+ " : "- "); that->body()->Accept(this, data); - stream()->Add(")"); + os_ << ")"; return NULL; } void* RegExpUnparser::VisitBackReference(RegExpBackReference* that, void* data) { - stream()->Add("(<- %i)", that->index()); + os_ << "(<- " << that->index() << ")"; return NULL; } void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) { - stream()->Put('%'); + os_ << '%'; return NULL; } -SmartArrayPointer<const char> RegExpTree::ToString(Zone* zone) { - RegExpUnparser unparser(zone); +OStream& RegExpTree::Print(OStream& os, Zone* zone) { // NOLINT + RegExpUnparser unparser(os, zone); Accept(&unparser, NULL); - return unparser.ToString(); + return os; } RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives) : alternatives_(alternatives) { - ASSERT(alternatives->length() > 1); + DCHECK(alternatives->length() > 1); RegExpTree* first_alternative = alternatives->at(0); min_match_ = first_alternative->min_match(); max_match_ = first_alternative->max_match(); @@ -1022,7 +982,7 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes) : nodes_(nodes) { - ASSERT(nodes->length() > 1); + DCHECK(nodes->length() > 1); min_match_ = 0; max_match_ = 0; for (int i = 0; i < nodes->length(); i++) { @@ -1061,10 +1021,16 @@ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ increase_node_count(); \ set_dont_optimize_reason(k##NodeType); \ - add_flag(kDontInline); \ add_flag(kDontSelfOptimize); \ } -#define DONT_SELFOPTIMIZE_NODE(NodeType) \ +#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \ + void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ + increase_node_count(); \ + add_slot_node(node); \ + set_dont_optimize_reason(k##NodeType); \ + add_flag(kDontSelfOptimize); \ + } +#define DONT_SELFOPTIMIZE_NODE(NodeType) \ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ increase_node_count(); \ add_flag(kDontSelfOptimize); \ @@ -1079,7 +1045,6 @@ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ increase_node_count(); \ set_dont_optimize_reason(k##NodeType); \ - add_flag(kDontInline); \ add_flag(kDontSelfOptimize); \ add_flag(kDontCache); \ } @@ -1103,19 +1068,21 @@ REGULAR_NODE(FunctionLiteral) REGULAR_NODE(Assignment) REGULAR_NODE(Throw) -REGULAR_NODE(Property) REGULAR_NODE(UnaryOperation) REGULAR_NODE(CountOperation) REGULAR_NODE(BinaryOperation) REGULAR_NODE(CompareOperation) REGULAR_NODE(ThisFunction) + REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call) REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew) +REGULAR_NODE_WITH_FEEDBACK_SLOTS(Property) // In theory, for VariableProxy we'd have to add: -// if (node->var()->IsLookupSlot()) add_flag(kDontInline); +// if (node->var()->IsLookupSlot()) +// set_dont_optimize_reason(kReferenceToAVariableWhichRequiresDynamicLookup); // But node->var() is usually not bound yet at VariableProxy creation time, and // LOOKUP variables only result from constructs that cannot be inlined anyway. -REGULAR_NODE(VariableProxy) +REGULAR_NODE_WITH_FEEDBACK_SLOTS(VariableProxy) // We currently do not optimize any modules. DONT_OPTIMIZE_NODE(ModuleDeclaration) @@ -1125,36 +1092,30 @@ DONT_OPTIMIZE_NODE(ModulePath) DONT_OPTIMIZE_NODE(ModuleUrl) DONT_OPTIMIZE_NODE(ModuleStatement) -DONT_OPTIMIZE_NODE(Yield) DONT_OPTIMIZE_NODE(WithStatement) DONT_OPTIMIZE_NODE(TryCatchStatement) DONT_OPTIMIZE_NODE(TryFinallyStatement) DONT_OPTIMIZE_NODE(DebuggerStatement) DONT_OPTIMIZE_NODE(NativeFunctionLiteral) +DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(Yield) + DONT_SELFOPTIMIZE_NODE(DoWhileStatement) DONT_SELFOPTIMIZE_NODE(WhileStatement) DONT_SELFOPTIMIZE_NODE(ForStatement) -DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement) DONT_SELFOPTIMIZE_NODE(ForOfStatement) +DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement) + DONT_CACHE_NODE(ModuleLiteral) void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) { increase_node_count(); + add_slot_node(node); if (node->is_jsruntime()) { - // Don't try to inline JS runtime calls because we don't (currently) even - // optimize them. - add_flag(kDontInline); - } else if (node->function()->intrinsic_type == Runtime::INLINE && - (node->name()->IsOneByteEqualTo( - STATIC_ASCII_VECTOR("_ArgumentsLength")) || - node->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_Arguments")))) { - // Don't inline the %_ArgumentsLength or %_Arguments because their - // implementation will not work. There is no stack frame to get them - // from. - add_flag(kDontInline); + // Don't try to optimize JS runtime calls because we bailout on them. + set_dont_optimize_reason(kCallToAJavaScriptRuntimeFunction); } } @@ -1165,19 +1126,19 @@ Handle<String> Literal::ToString() { - if (value_->IsString()) return Handle<String>::cast(value_); - ASSERT(value_->IsNumber()); + if (value_->IsString()) return value_->AsString()->string(); + DCHECK(value_->IsNumber()); char arr[100]; Vector<char> buffer(arr, ARRAY_SIZE(arr)); const char* str; - if (value_->IsSmi()) { + if (value()->IsSmi()) { // Optimization only, the heap number case would subsume this. - OS::SNPrintF(buffer, "%d", Smi::cast(*value_)->value()); + SNPrintF(buffer, "%d", Smi::cast(*value())->value()); str = arr; } else { - str = DoubleToCString(value_->Number(), buffer); + str = DoubleToCString(value()->Number(), buffer); } - return isolate_->factory()->NewStringFromAscii(CStrVector(str)); + return isolate_->factory()->NewStringFromAsciiChecked(str); } diff -Nru nodejs-0.11.13/deps/v8/src/ast.h nodejs-0.11.15/deps/v8/src/ast.h --- nodejs-0.11.13/deps/v8/src/ast.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ast.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,50 +1,28 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_AST_H_ #define V8_AST_H_ -#include "v8.h" +#include "src/v8.h" -#include "assembler.h" -#include "factory.h" -#include "feedback-slots.h" -#include "isolate.h" -#include "jsregexp.h" -#include "list-inl.h" -#include "runtime.h" -#include "small-pointer-list.h" -#include "smart-pointers.h" -#include "token.h" -#include "types.h" -#include "utils.h" -#include "variables.h" -#include "interface.h" -#include "zone-inl.h" +#include "src/assembler.h" +#include "src/ast-value-factory.h" +#include "src/factory.h" +#include "src/feedback-slots.h" +#include "src/interface.h" +#include "src/isolate.h" +#include "src/jsregexp.h" +#include "src/list-inl.h" +#include "src/runtime.h" +#include "src/small-pointer-list.h" +#include "src/smart-pointers.h" +#include "src/token.h" +#include "src/types.h" +#include "src/utils.h" +#include "src/variables.h" +#include "src/zone-inl.h" namespace v8 { namespace internal { @@ -134,6 +112,7 @@ class Expression; class IterationStatement; class MaterializedLiteral; +class OStream; class Statement; class TargetCollector; class TypeFeedbackOracle; @@ -171,7 +150,6 @@ enum AstPropertiesFlag { - kDontInline, kDontSelfOptimize, kDontSoftInline, kDontCache @@ -182,15 +160,21 @@ public: class Flags : public EnumSet<AstPropertiesFlag, int> {}; - AstProperties() : node_count_(0) {} +AstProperties() : node_count_(0), feedback_slots_(0) {} Flags* flags() { return &flags_; } int node_count() { return node_count_; } void add_node_count(int count) { node_count_ += count; } + int feedback_slots() const { return feedback_slots_; } + void increase_feedback_slots(int count) { + feedback_slots_ += count; + } + private: Flags flags_; int node_count_; + int feedback_slots_; }; @@ -215,9 +199,14 @@ int position() const { return position_; } // Type testing & conversion functions overridden by concrete subclasses. -#define DECLARE_NODE_FUNCTIONS(type) \ - bool Is##type() { return node_type() == AstNode::k##type; } \ - type* As##type() { return Is##type() ? reinterpret_cast<type*>(this) : NULL; } +#define DECLARE_NODE_FUNCTIONS(type) \ + bool Is##type() const { return node_type() == AstNode::k##type; } \ + type* As##type() { \ + return Is##type() ? reinterpret_cast<type*>(this) : NULL; \ + } \ + const type* As##type() const { \ + return Is##type() ? reinterpret_cast<const type*>(this) : NULL; \ + } AST_NODE_LIST(DECLARE_NODE_FUNCTIONS) #undef DECLARE_NODE_FUNCTIONS @@ -276,8 +265,7 @@ int length() const { return list_.length(); } void AddMapIfMissing(Handle<Map> map, Zone* zone) { - map = Map::CurrentMapForDeprecated(map); - if (map.is_null()) return; + if (!Map::TryUpdate(map).ToHandle(&map)) return; for (int i = 0; i < length(); ++i) { if (at(i).is_identical_to(map)) return; } @@ -325,37 +313,42 @@ kTest }; - virtual bool IsValidLeftHandSide() { return false; } + virtual bool IsValidReferenceExpression() const { return false; } // Helpers for ToBoolean conversion. - virtual bool ToBooleanIsTrue() { return false; } - virtual bool ToBooleanIsFalse() { return false; } + virtual bool ToBooleanIsTrue() const { return false; } + virtual bool ToBooleanIsFalse() const { return false; } // Symbols that cannot be parsed as array indices are considered property // names. We do not treat symbols that can be array indexes as property // names because [] for string objects is handled only by keyed ICs. - virtual bool IsPropertyName() { return false; } + virtual bool IsPropertyName() const { return false; } // True iff the result can be safely overwritten (to avoid allocation). // False for operations that can return one of their operands. - virtual bool ResultOverwriteAllowed() { return false; } + virtual bool ResultOverwriteAllowed() const { return false; } // True iff the expression is a literal represented as a smi. - bool IsSmiLiteral(); + bool IsSmiLiteral() const; // True iff the expression is a string literal. - bool IsStringLiteral(); + bool IsStringLiteral() const; // True iff the expression is the null literal. - bool IsNullLiteral(); + bool IsNullLiteral() const; // True if we can prove that the expression is the undefined literal. - bool IsUndefinedLiteral(Isolate* isolate); + bool IsUndefinedLiteral(Isolate* isolate) const; // Expression type bounds - Bounds bounds() { return bounds_; } + Bounds bounds() const { return bounds_; } void set_bounds(Bounds bounds) { bounds_ = bounds; } + // Whether the expression is parenthesized + unsigned parenthesization_level() const { return parenthesization_level_; } + bool is_parenthesized() const { return parenthesization_level_ > 0; } + void increase_parenthesization_level() { ++parenthesization_level_; } + // Type feedback information for assignments and properties. virtual bool IsMonomorphic() { UNREACHABLE(); @@ -380,14 +373,19 @@ protected: Expression(Zone* zone, int pos) : AstNode(pos), + zone_(zone), bounds_(Bounds::Unbounded(zone)), + parenthesization_level_(0), id_(GetNextId(zone)), test_id_(GetNextId(zone)) {} void set_to_boolean_types(byte types) { to_boolean_types_ = types; } + Zone* zone_; + private: Bounds bounds_; byte to_boolean_types_; + unsigned parenthesization_level_; const BailoutId id_; const TypeFeedbackId test_id_; @@ -403,7 +401,7 @@ // The labels associated with this statement. May be NULL; // if it is != NULL, guaranteed to contain at least one entry. - ZoneStringList* labels() const { return labels_; } + ZoneList<const AstRawString*>* labels() const { return labels_; } // Type testing & conversion. virtual BreakableStatement* AsBreakableStatement() V8_FINAL V8_OVERRIDE { @@ -423,19 +421,19 @@ protected: BreakableStatement( - Zone* zone, ZoneStringList* labels, + Zone* zone, ZoneList<const AstRawString*>* labels, BreakableType breakable_type, int position) : Statement(zone, position), labels_(labels), breakable_type_(breakable_type), entry_id_(GetNextId(zone)), exit_id_(GetNextId(zone)) { - ASSERT(labels == NULL || labels->length() > 0); + DCHECK(labels == NULL || labels->length() > 0); } private: - ZoneStringList* labels_; + ZoneList<const AstRawString*>* labels_; BreakableType breakable_type_; Label break_target_; const BailoutId entry_id_; @@ -454,6 +452,8 @@ ZoneList<Statement*>* statements() { return &statements_; } bool is_initializer_block() const { return is_initializer_block_; } + BailoutId DeclsId() const { return decls_id_; } + virtual bool IsJump() const V8_OVERRIDE { return !statements_.is_empty() && statements_.last()->IsJump() && labels() == NULL; // Good enough as an approximation... @@ -464,19 +464,21 @@ protected: Block(Zone* zone, - ZoneStringList* labels, + ZoneList<const AstRawString*>* labels, int capacity, bool is_initializer_block, int pos) : BreakableStatement(zone, labels, TARGET_FOR_NAMED_ONLY, pos), statements_(capacity, zone), is_initializer_block_(is_initializer_block), + decls_id_(GetNextId(zone)), scope_(NULL) { } private: ZoneList<Statement*> statements_; bool is_initializer_block_; + const BailoutId decls_id_; Scope* scope_; }; @@ -499,7 +501,7 @@ proxy_(proxy), mode_(mode), scope_(scope) { - ASSERT(IsDeclaredVariableMode(mode)); + DCHECK(IsDeclaredVariableMode(mode)); } private: @@ -550,8 +552,8 @@ : Declaration(zone, proxy, mode, scope, pos), fun_(fun) { // At the moment there are no "const functions" in JavaScript... - ASSERT(mode == VAR || mode == LET); - ASSERT(fun != NULL); + DCHECK(mode == VAR || mode == LET); + DCHECK(fun != NULL); } private: @@ -671,18 +673,15 @@ DECLARE_NODE_TYPE(ModulePath) Module* module() const { return module_; } - Handle<String> name() const { return name_; } + Handle<String> name() const { return name_->string(); } protected: - ModulePath(Zone* zone, Module* module, Handle<String> name, int pos) - : Module(zone, pos), - module_(module), - name_(name) { - } + ModulePath(Zone* zone, Module* module, const AstRawString* name, int pos) + : Module(zone, pos), module_(module), name_(name) {} private: Module* module_; - Handle<String> name_; + const AstRawString* name_; }; @@ -739,7 +738,7 @@ Label* continue_target() { return &continue_target_; } protected: - IterationStatement(Zone* zone, ZoneStringList* labels, int pos) + IterationStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos) : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos), body_(NULL), osr_entry_id_(GetNextId(zone)) { @@ -773,7 +772,7 @@ BailoutId BackEdgeId() const { return back_edge_id_; } protected: - DoWhileStatement(Zone* zone, ZoneStringList* labels, int pos) + DoWhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos) : IterationStatement(zone, labels, pos), cond_(NULL), continue_id_(GetNextId(zone)), @@ -810,7 +809,7 @@ BailoutId BodyId() const { return body_id_; } protected: - WhileStatement(Zone* zone, ZoneStringList* labels, int pos) + WhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos) : IterationStatement(zone, labels, pos), cond_(NULL), may_have_function_literal_(true), @@ -861,7 +860,7 @@ void set_loop_variable(Variable* var) { loop_variable_ = var; } protected: - ForStatement(Zone* zone, ZoneStringList* labels, int pos) + ForStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos) : IterationStatement(zone, labels, pos), init_(NULL), cond_(NULL), @@ -903,11 +902,8 @@ Expression* subject() const { return subject_; } protected: - ForEachStatement(Zone* zone, ZoneStringList* labels, int pos) - : IterationStatement(zone, labels, pos), - each_(NULL), - subject_(NULL) { - } + ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos) + : IterationStatement(zone, labels, pos), each_(NULL), subject_(NULL) {} private: Expression* each_; @@ -925,12 +921,11 @@ } // Type feedback information. - virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; } - virtual int ComputeFeedbackSlotCount(Isolate* isolate) { return 1; } + virtual int ComputeFeedbackSlotCount() { return 1; } virtual void SetFirstFeedbackSlot(int slot) { for_in_feedback_slot_ = slot; } int ForInFeedbackSlot() { - ASSERT(for_in_feedback_slot_ != kInvalidFeedbackSlot); + DCHECK(for_in_feedback_slot_ != kInvalidFeedbackSlot); return for_in_feedback_slot_; } @@ -944,7 +939,7 @@ virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; } protected: - ForInStatement(Zone* zone, ZoneStringList* labels, int pos) + ForInStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos) : ForEachStatement(zone, labels, pos), for_in_type_(SLOW_FOR_IN), for_in_feedback_slot_(kInvalidFeedbackSlot), @@ -981,7 +976,7 @@ return subject(); } - // var iterator = iterable; + // var iterator = subject[Symbol.iterator](); Expression* assign_iterator() const { return assign_iterator_; } @@ -1007,7 +1002,7 @@ BailoutId BackEdgeId() const { return back_edge_id_; } protected: - ForOfStatement(Zone* zone, ZoneStringList* labels, int pos) + ForOfStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos) : ForEachStatement(zone, labels, pos), assign_iterator_(NULL), next_result_(NULL), @@ -1167,7 +1162,7 @@ ZoneList<CaseClause*>* cases() const { return cases_; } protected: - SwitchStatement(Zone* zone, ZoneStringList* labels, int pos) + SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos) : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos), tag_(NULL), cases_(NULL) { } @@ -1346,41 +1341,29 @@ public: DECLARE_NODE_TYPE(Literal) - virtual bool IsPropertyName() V8_OVERRIDE { - if (value_->IsInternalizedString()) { - uint32_t ignored; - return !String::cast(*value_)->AsArrayIndex(&ignored); - } - return false; + virtual bool IsPropertyName() const V8_OVERRIDE { + return value_->IsPropertyName(); } Handle<String> AsPropertyName() { - ASSERT(IsPropertyName()); - return Handle<String>::cast(value_); + DCHECK(IsPropertyName()); + return Handle<String>::cast(value()); } - virtual bool ToBooleanIsTrue() V8_OVERRIDE { - return value_->BooleanValue(); - } - virtual bool ToBooleanIsFalse() V8_OVERRIDE { - return !value_->BooleanValue(); + const AstRawString* AsRawPropertyName() { + DCHECK(IsPropertyName()); + return value_->AsString(); } - // Identity testers. - bool IsNull() const { - ASSERT(!value_.is_null()); - return value_->IsNull(); - } - bool IsTrue() const { - ASSERT(!value_.is_null()); - return value_->IsTrue(); + virtual bool ToBooleanIsTrue() const V8_OVERRIDE { + return value()->BooleanValue(); } - bool IsFalse() const { - ASSERT(!value_.is_null()); - return value_->IsFalse(); + virtual bool ToBooleanIsFalse() const V8_OVERRIDE { + return !value()->BooleanValue(); } - Handle<Object> value() const { return value_; } + Handle<Object> value() const { return value_->value(); } + const AstValue* raw_value() const { return value_; } // Support for using Literal as a HashMap key. NOTE: Currently, this works // only for string and number literals! @@ -1389,13 +1372,13 @@ static bool Match(void* literal1, void* literal2) { Handle<String> s1 = static_cast<Literal*>(literal1)->ToString(); Handle<String> s2 = static_cast<Literal*>(literal2)->ToString(); - return s1->Equals(*s2); + return String::Equals(s1, s2); } TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); } protected: - Literal(Zone* zone, Handle<Object> value, int position) + Literal(Zone* zone, const AstValue* value, int position) : Expression(zone, position), value_(value), isolate_(zone->isolate()) { } @@ -1403,7 +1386,7 @@ private: Handle<String> ToString(); - Handle<Object> value_; + const AstValue* value_; // TODO(dcarney): remove. this is only needed for Match and Hash. Isolate* isolate_; }; @@ -1418,7 +1401,7 @@ int depth() const { // only callable after initialization. - ASSERT(depth_ >= 1); + DCHECK(depth_ >= 1); return depth_; } @@ -1438,7 +1421,7 @@ friend class CompileTimeValue; void set_depth(int depth) { - ASSERT(depth >= 1); + DCHECK(depth >= 1); depth_ = depth; } @@ -1474,7 +1457,8 @@ PROTOTYPE // Property is __proto__. }; - ObjectLiteralProperty(Zone* zone, Literal* key, Expression* value); + ObjectLiteralProperty(Zone* zone, AstValueFactory* ast_value_factory, + Literal* key, Expression* value); Literal* key() { return key_; } Expression* value() { return value_; } @@ -1532,6 +1516,13 @@ // marked expressions, no store code is emitted. void CalculateEmitStore(Zone* zone); + // Assemble bitfield of flags for the CreateObjectLiteral helper. + int ComputeFlags() const { + int flags = fast_elements() ? kFastElements : kNoFlags; + flags |= has_function() ? kHasFunction : kNoFlags; + return flags; + } + enum Flags { kNoFlags = 0, kFastElements = 1, @@ -1573,13 +1564,13 @@ public: DECLARE_NODE_TYPE(RegExpLiteral) - Handle<String> pattern() const { return pattern_; } - Handle<String> flags() const { return flags_; } + Handle<String> pattern() const { return pattern_->string(); } + Handle<String> flags() const { return flags_->string(); } protected: RegExpLiteral(Zone* zone, - Handle<String> pattern, - Handle<String> flags, + const AstRawString* pattern, + const AstRawString* flags, int literal_index, int pos) : MaterializedLiteral(zone, literal_index, pos), @@ -1589,8 +1580,8 @@ } private: - Handle<String> pattern_; - Handle<String> flags_; + const AstRawString* pattern_; + const AstRawString* flags_; }; @@ -1611,6 +1602,13 @@ // Populate the constant elements fixed array. void BuildConstantElements(Isolate* isolate); + // Assemble bitfield of flags for the CreateArrayLiteral helper. + int ComputeFlags() const { + int flags = depth() == 1 ? kShallowElements : kNoFlags; + flags |= ArrayLiteral::kDisableMementos; + return flags; + } + enum Flags { kNoFlags = 0, kShallowElements = 1, @@ -1633,61 +1631,58 @@ }; -class VariableProxy V8_FINAL : public Expression { +class VariableProxy V8_FINAL : public Expression, public FeedbackSlotInterface { public: DECLARE_NODE_TYPE(VariableProxy) - virtual bool IsValidLeftHandSide() V8_OVERRIDE { - return var_ == NULL ? true : var_->IsValidLeftHandSide(); + virtual bool IsValidReferenceExpression() const V8_OVERRIDE { + return var_ == NULL ? true : var_->IsValidReference(); } - bool IsVariable(Handle<String> n) { - return !is_this() && name().is_identical_to(n); - } - - bool IsArguments() { return var_ != NULL && var_->is_arguments(); } - - bool IsLValue() { - return is_lvalue_; - } + bool IsArguments() const { return var_ != NULL && var_->is_arguments(); } - Handle<String> name() const { return name_; } + Handle<String> name() const { return name_->string(); } + const AstRawString* raw_name() const { return name_; } Variable* var() const { return var_; } bool is_this() const { return is_this_; } Interface* interface() const { return interface_; } - - void MarkAsTrivial() { is_trivial_ = true; } - void MarkAsLValue() { is_lvalue_ = true; } + bool is_assigned() const { return is_assigned_; } + void set_is_assigned() { is_assigned_ = true; } // Bind this proxy to the variable var. Interfaces must match. void BindTo(Variable* var); + virtual int ComputeFeedbackSlotCount() { return FLAG_vector_ics ? 1 : 0; } + virtual void SetFirstFeedbackSlot(int slot) { + variable_feedback_slot_ = slot; + } + + int VariableFeedbackSlot() { return variable_feedback_slot_; } + protected: VariableProxy(Zone* zone, Variable* var, int position); VariableProxy(Zone* zone, - Handle<String> name, + const AstRawString* name, bool is_this, Interface* interface, int position); - Handle<String> name_; + const AstRawString* name_; Variable* var_; // resolved variable, or NULL bool is_this_; - bool is_trivial_; - // True if this variable proxy is being used in an assignment - // or with a increment/decrement operator. - bool is_lvalue_; + bool is_assigned_; Interface* interface_; + int variable_feedback_slot_; }; -class Property V8_FINAL : public Expression { +class Property V8_FINAL : public Expression, public FeedbackSlotInterface { public: DECLARE_NODE_TYPE(Property) - virtual bool IsValidLeftHandSide() V8_OVERRIDE { return true; } + virtual bool IsValidReferenceExpression() const V8_OVERRIDE { return true; } Expression* obj() const { return obj_; } Expression* key() const { return key_; } @@ -1695,7 +1690,6 @@ BailoutId LoadId() const { return load_id_; } bool IsStringAccess() const { return is_string_access_; } - bool IsFunctionPrototype() const { return is_function_prototype_; } // Type feedback information. virtual bool IsMonomorphic() V8_OVERRIDE { @@ -1713,36 +1707,39 @@ } void set_is_uninitialized(bool b) { is_uninitialized_ = b; } void set_is_string_access(bool b) { is_string_access_ = b; } - void set_is_function_prototype(bool b) { is_function_prototype_ = b; } void mark_for_call() { is_for_call_ = true; } bool IsForCall() { return is_for_call_; } TypeFeedbackId PropertyFeedbackId() { return reuse(id()); } + virtual int ComputeFeedbackSlotCount() { return FLAG_vector_ics ? 1 : 0; } + virtual void SetFirstFeedbackSlot(int slot) { + property_feedback_slot_ = slot; + } + + int PropertyFeedbackSlot() const { return property_feedback_slot_; } + protected: - Property(Zone* zone, - Expression* obj, - Expression* key, - int pos) + Property(Zone* zone, Expression* obj, Expression* key, int pos) : Expression(zone, pos), obj_(obj), key_(key), load_id_(GetNextId(zone)), + property_feedback_slot_(kInvalidFeedbackSlot), is_for_call_(false), is_uninitialized_(false), - is_string_access_(false), - is_function_prototype_(false) { } + is_string_access_(false) {} private: Expression* obj_; Expression* key_; const BailoutId load_id_; + int property_feedback_slot_; SmallMapList receiver_types_; bool is_for_call_ : 1; bool is_uninitialized_ : 1; bool is_string_access_ : 1; - bool is_function_prototype_ : 1; }; @@ -1754,8 +1751,7 @@ ZoneList<Expression*>* arguments() const { return arguments_; } // Type feedback information. - virtual ComputablePhase GetComputablePhase() { return AFTER_SCOPING; } - virtual int ComputeFeedbackSlotCount(Isolate* isolate); + virtual int ComputeFeedbackSlotCount() { return 1; } virtual void SetFirstFeedbackSlot(int slot) { call_feedback_slot_ = slot; } @@ -1779,11 +1775,25 @@ return !target_.is_null(); } + bool global_call() const { + VariableProxy* proxy = expression_->AsVariableProxy(); + return proxy != NULL && proxy->var()->IsUnallocated(); + } + + bool known_global_function() const { + return global_call() && !target_.is_null(); + } + Handle<JSFunction> target() { return target_; } Handle<Cell> cell() { return cell_; } + Handle<AllocationSite> allocation_site() { return allocation_site_; } + void set_target(Handle<JSFunction> target) { target_ = target; } + void set_allocation_site(Handle<AllocationSite> site) { + allocation_site_ = site; + } bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup); BailoutId ReturnId() const { return return_id_; } @@ -1798,6 +1808,7 @@ // Helpers to determine how to handle the call. CallType GetCallType(Isolate* isolate) const; + bool IsUsingCallFeedbackSlot(Isolate* isolate) const; #ifdef DEBUG // Used to assert that the FullCodeGenerator records the return site. @@ -1825,6 +1836,7 @@ Handle<JSFunction> target_; Handle<Cell> cell_; + Handle<AllocationSite> allocation_site_; int call_feedback_slot_; const BailoutId return_id_; @@ -1839,8 +1851,7 @@ ZoneList<Expression*>* arguments() const { return arguments_; } // Type feedback information. - virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; } - virtual int ComputeFeedbackSlotCount(Isolate* isolate) { + virtual int ComputeFeedbackSlotCount() { return FLAG_pretenuring_call_new ? 2 : 1; } virtual void SetFirstFeedbackSlot(int slot) { @@ -1848,12 +1859,12 @@ } int CallNewFeedbackSlot() { - ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot); + DCHECK(callnew_feedback_slot_ != kInvalidFeedbackSlot); return callnew_feedback_slot_; } int AllocationSiteFeedbackSlot() { - ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot); - ASSERT(FLAG_pretenuring_call_new); + DCHECK(callnew_feedback_slot_ != kInvalidFeedbackSlot); + DCHECK(FLAG_pretenuring_call_new); return callnew_feedback_slot_ + 1; } @@ -1900,32 +1911,48 @@ // language construct. Instead it is used to call a C or JS function // with a set of arguments. This is used from the builtins that are // implemented in JavaScript (see "v8natives.js"). -class CallRuntime V8_FINAL : public Expression { +class CallRuntime V8_FINAL : public Expression, public FeedbackSlotInterface { public: DECLARE_NODE_TYPE(CallRuntime) - Handle<String> name() const { return name_; } + Handle<String> name() const { return raw_name_->string(); } + const AstRawString* raw_name() const { return raw_name_; } const Runtime::Function* function() const { return function_; } ZoneList<Expression*>* arguments() const { return arguments_; } bool is_jsruntime() const { return function_ == NULL; } + // Type feedback information. + virtual int ComputeFeedbackSlotCount() { + return (FLAG_vector_ics && is_jsruntime()) ? 1 : 0; + } + virtual void SetFirstFeedbackSlot(int slot) { + callruntime_feedback_slot_ = slot; + } + + int CallRuntimeFeedbackSlot() { + DCHECK(!is_jsruntime() || + callruntime_feedback_slot_ != kInvalidFeedbackSlot); + return callruntime_feedback_slot_; + } + TypeFeedbackId CallRuntimeFeedbackId() const { return reuse(id()); } protected: CallRuntime(Zone* zone, - Handle<String> name, + const AstRawString* name, const Runtime::Function* function, ZoneList<Expression*>* arguments, int pos) : Expression(zone, pos), - name_(name), + raw_name_(name), function_(function), arguments_(arguments) { } private: - Handle<String> name_; + const AstRawString* raw_name_; const Runtime::Function* function_; ZoneList<Expression*>* arguments_; + int callruntime_feedback_slot_; }; @@ -1952,7 +1979,7 @@ expression_(expression), materialize_true_id_(GetNextId(zone)), materialize_false_id_(GetNextId(zone)) { - ASSERT(Token::IsUnaryOp(op)); + DCHECK(Token::IsUnaryOp(op)); } private: @@ -1970,7 +1997,7 @@ public: DECLARE_NODE_TYPE(BinaryOperation) - virtual bool ResultOverwriteAllowed(); + virtual bool ResultOverwriteAllowed() const V8_OVERRIDE; Token::Value op() const { return op_; } Expression* left() const { return left_; } @@ -2000,7 +2027,7 @@ left_(left), right_(right), right_id_(GetNextId(zone)) { - ASSERT(Token::IsBinaryOp(op)); + DCHECK(Token::IsBinaryOp(op)); } private: @@ -2108,7 +2135,7 @@ left_(left), right_(right), combined_type_(Type::None(zone)) { - ASSERT(Token::IsCompareOp(op)); + DCHECK(Token::IsCompareOp(op)); } private: @@ -2198,7 +2225,7 @@ template<class Visitor> void Init(Zone* zone, AstNodeFactory<Visitor>* factory) { - ASSERT(Token::IsAssignmentOp(op_)); + DCHECK(Token::IsAssignmentOp(op_)); if (is_compound()) { binary_operation_ = factory->NewBinaryOperation( binary_op(), target_, value_, position() + 1); @@ -2219,7 +2246,7 @@ }; -class Yield V8_FINAL : public Expression { +class Yield V8_FINAL : public Expression, public FeedbackSlotInterface { public: DECLARE_NODE_TYPE(Yield) @@ -2238,14 +2265,37 @@ // locates the catch handler in the handler table, and is equivalent to // TryCatchStatement::index(). int index() const { - ASSERT(yield_kind() == DELEGATING); + DCHECK(yield_kind() == DELEGATING); return index_; } void set_index(int index) { - ASSERT(yield_kind() == DELEGATING); + DCHECK(yield_kind() == DELEGATING); index_ = index; } + // Type feedback information. + virtual int ComputeFeedbackSlotCount() { + return (FLAG_vector_ics && yield_kind() == DELEGATING) ? 3 : 0; + } + virtual void SetFirstFeedbackSlot(int slot) { + yield_first_feedback_slot_ = slot; + } + + int KeyedLoadFeedbackSlot() { + DCHECK(yield_first_feedback_slot_ != kInvalidFeedbackSlot); + return yield_first_feedback_slot_; + } + + int DoneFeedbackSlot() { + DCHECK(yield_first_feedback_slot_ != kInvalidFeedbackSlot); + return yield_first_feedback_slot_ + 1; + } + + int ValueFeedbackSlot() { + DCHECK(yield_first_feedback_slot_ != kInvalidFeedbackSlot); + return yield_first_feedback_slot_ + 2; + } + protected: Yield(Zone* zone, Expression* generator_object, @@ -2256,13 +2306,15 @@ generator_object_(generator_object), expression_(expression), yield_kind_(yield_kind), - index_(-1) { } + index_(-1), + yield_first_feedback_slot_(kInvalidFeedbackSlot) { } private: Expression* generator_object_; Expression* expression_; Kind yield_kind_; int index_; + int yield_first_feedback_slot_; }; @@ -2304,14 +2356,22 @@ kNotParenthesized }; - enum IsGeneratorFlag { - kIsGenerator, - kNotGenerator + enum KindFlag { + kNormalFunction, + kArrowFunction, + kGeneratorFunction + }; + + enum ArityRestriction { + NORMAL_ARITY, + GETTER_ARITY, + SETTER_ARITY }; DECLARE_NODE_TYPE(FunctionLiteral) - Handle<String> name() const { return name_; } + Handle<String> name() const { return raw_name_->string(); } + const AstRawString* raw_name() const { return raw_name_; } Scope* scope() const { return scope_; } ZoneList<Statement*>* body() const { return body_; } void set_function_token_position(int pos) { function_token_position_ = pos; } @@ -2334,13 +2394,37 @@ void InitializeSharedInfo(Handle<Code> code); Handle<String> debug_name() const { - if (name_->length() > 0) return name_; + if (raw_name_ != NULL && !raw_name_->IsEmpty()) { + return raw_name_->string(); + } return inferred_name(); } - Handle<String> inferred_name() const { return inferred_name_; } + Handle<String> inferred_name() const { + if (!inferred_name_.is_null()) { + DCHECK(raw_inferred_name_ == NULL); + return inferred_name_; + } + if (raw_inferred_name_ != NULL) { + return raw_inferred_name_->string(); + } + UNREACHABLE(); + return Handle<String>(); + } + + // Only one of {set_inferred_name, set_raw_inferred_name} should be called. void set_inferred_name(Handle<String> inferred_name) { + DCHECK(!inferred_name.is_null()); inferred_name_ = inferred_name; + DCHECK(raw_inferred_name_== NULL || raw_inferred_name_->IsEmpty()); + raw_inferred_name_ = NULL; + } + + void set_raw_inferred_name(const AstString* raw_inferred_name) { + DCHECK(raw_inferred_name != NULL); + raw_inferred_name_ = raw_inferred_name; + DCHECK(inferred_name_.is_null()); + inferred_name_ = Handle<String>(); } // shared_info may be null if it's not cached in full code. @@ -2367,23 +2451,16 @@ bitfield_ = IsParenthesized::update(bitfield_, kIsParenthesized); } - bool is_generator() { - return IsGenerator::decode(bitfield_) == kIsGenerator; - } + bool is_generator() { return IsGenerator::decode(bitfield_); } + bool is_arrow() { return IsArrow::decode(bitfield_); } int ast_node_count() { return ast_properties_.node_count(); } AstProperties::Flags* flags() { return ast_properties_.flags(); } void set_ast_properties(AstProperties* ast_properties) { ast_properties_ = *ast_properties; } - void set_slot_processor(DeferredFeedbackSlotProcessor* slot_processor) { - slot_processor_ = *slot_processor; - } - void ProcessFeedbackSlots(Isolate* isolate) { - slot_processor_.ProcessFeedbackSlots(isolate); - } int slot_count() { - return slot_processor_.slot_count(); + return ast_properties_.feedback_slots(); } bool dont_optimize() { return dont_optimize_reason_ != kNoReason; } BailoutReason dont_optimize_reason() { return dont_optimize_reason_; } @@ -2392,49 +2469,45 @@ } protected: - FunctionLiteral(Zone* zone, - Handle<String> name, - Scope* scope, - ZoneList<Statement*>* body, - int materialized_literal_count, - int expected_property_count, - int handler_count, - int parameter_count, - FunctionType function_type, + FunctionLiteral(Zone* zone, const AstRawString* name, + AstValueFactory* ast_value_factory, Scope* scope, + ZoneList<Statement*>* body, int materialized_literal_count, + int expected_property_count, int handler_count, + int parameter_count, FunctionType function_type, ParameterFlag has_duplicate_parameters, IsFunctionFlag is_function, - IsParenthesizedFlag is_parenthesized, - IsGeneratorFlag is_generator, + IsParenthesizedFlag is_parenthesized, KindFlag kind, int position) : Expression(zone, position), - name_(name), + raw_name_(name), scope_(scope), body_(body), - inferred_name_(zone->isolate()->factory()->empty_string()), + raw_inferred_name_(ast_value_factory->empty_string()), dont_optimize_reason_(kNoReason), materialized_literal_count_(materialized_literal_count), expected_property_count_(expected_property_count), handler_count_(handler_count), parameter_count_(parameter_count), function_token_position_(RelocInfo::kNoPosition) { - bitfield_ = - IsExpression::encode(function_type != DECLARATION) | - IsAnonymous::encode(function_type == ANONYMOUS_EXPRESSION) | - Pretenure::encode(false) | - HasDuplicateParameters::encode(has_duplicate_parameters) | - IsFunction::encode(is_function) | - IsParenthesized::encode(is_parenthesized) | - IsGenerator::encode(is_generator); + bitfield_ = IsExpression::encode(function_type != DECLARATION) | + IsAnonymous::encode(function_type == ANONYMOUS_EXPRESSION) | + Pretenure::encode(false) | + HasDuplicateParameters::encode(has_duplicate_parameters) | + IsFunction::encode(is_function) | + IsParenthesized::encode(is_parenthesized) | + IsGenerator::encode(kind == kGeneratorFunction) | + IsArrow::encode(kind == kArrowFunction); } private: + const AstRawString* raw_name_; Handle<String> name_; Handle<SharedFunctionInfo> shared_info_; Scope* scope_; ZoneList<Statement*>* body_; + const AstString* raw_inferred_name_; Handle<String> inferred_name_; AstProperties ast_properties_; - DeferredFeedbackSlotProcessor slot_processor_; BailoutReason dont_optimize_reason_; int materialized_literal_count_; @@ -2450,7 +2523,8 @@ class HasDuplicateParameters: public BitField<ParameterFlag, 3, 1> {}; class IsFunction: public BitField<IsFunctionFlag, 4, 1> {}; class IsParenthesized: public BitField<IsParenthesizedFlag, 5, 1> {}; - class IsGenerator: public BitField<IsGeneratorFlag, 6, 1> {}; + class IsGenerator : public BitField<bool, 6, 1> {}; + class IsArrow : public BitField<bool, 7, 1> {}; }; @@ -2458,16 +2532,16 @@ public: DECLARE_NODE_TYPE(NativeFunctionLiteral) - Handle<String> name() const { return name_; } + Handle<String> name() const { return name_->string(); } v8::Extension* extension() const { return extension_; } protected: - NativeFunctionLiteral( - Zone* zone, Handle<String> name, v8::Extension* extension, int pos) + NativeFunctionLiteral(Zone* zone, const AstRawString* name, + v8::Extension* extension, int pos) : Expression(zone, pos), name_(name), extension_(extension) {} private: - Handle<String> name_; + const AstRawString* name_; v8::Extension* extension_; }; @@ -2513,7 +2587,7 @@ // expression. virtual Interval CaptureRegisters() { return Interval::Empty(); } virtual void AppendToText(RegExpText* text, Zone* zone); - SmartArrayPointer<const char> ToString(Zone* zone); + OStream& Print(OStream& os, Zone* zone); // NOLINT #define MAKE_ASTYPE(Name) \ virtual RegExp##Name* As##Name(); \ virtual bool Is##Name(); @@ -2909,13 +2983,10 @@ class AstConstructionVisitor BASE_EMBEDDED { public: - explicit AstConstructionVisitor(Zone* zone) - : dont_optimize_reason_(kNoReason), - zone_(zone) { } + AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { } AstProperties* ast_properties() { return &properties_; } BailoutReason dont_optimize_reason() { return dont_optimize_reason_; } - DeferredFeedbackSlotProcessor* slot_processor() { return &slot_processor_; } private: template<class> friend class AstNodeFactory; @@ -2933,20 +3004,20 @@ } void add_slot_node(FeedbackSlotInterface* slot_node) { - slot_processor_.add_slot_node(zone_, slot_node); + int count = slot_node->ComputeFeedbackSlotCount(); + if (count > 0) { + slot_node->SetFirstFeedbackSlot(properties_.feedback_slots()); + properties_.increase_feedback_slots(count); + } } AstProperties properties_; - DeferredFeedbackSlotProcessor slot_processor_; BailoutReason dont_optimize_reason_; - Zone* zone_; }; class AstNullVisitor BASE_EMBEDDED { public: - explicit AstNullVisitor(Zone* zone) {} - // Node visitors. #define DEF_VISIT(type) \ void Visit##type(type* node) {} @@ -2962,9 +3033,8 @@ template<class Visitor> class AstNodeFactory V8_FINAL BASE_EMBEDDED { public: - explicit AstNodeFactory(Zone* zone) - : zone_(zone), - visitor_(zone) { } + explicit AstNodeFactory(Zone* zone, AstValueFactory* ast_value_factory) + : zone_(zone), ast_value_factory_(ast_value_factory) {} Visitor* visitor() { return &visitor_; } @@ -3028,8 +3098,8 @@ VISIT_AND_RETURN(ModuleVariable, module) } - ModulePath* NewModulePath(Module* origin, Handle<String> name, int pos) { - ModulePath* module = new(zone_) ModulePath(zone_, origin, name, pos); + ModulePath* NewModulePath(Module* origin, const AstRawString* name, int pos) { + ModulePath* module = new (zone_) ModulePath(zone_, origin, name, pos); VISIT_AND_RETURN(ModulePath, module) } @@ -3038,7 +3108,7 @@ VISIT_AND_RETURN(ModuleUrl, module) } - Block* NewBlock(ZoneStringList* labels, + Block* NewBlock(ZoneList<const AstRawString*>* labels, int capacity, bool is_initializer_block, int pos) { @@ -3048,7 +3118,7 @@ } #define STATEMENT_WITH_LABELS(NodeType) \ - NodeType* New##NodeType(ZoneStringList* labels, int pos) { \ + NodeType* New##NodeType(ZoneList<const AstRawString*>* labels, int pos) { \ NodeType* stmt = new(zone_) NodeType(zone_, labels, pos); \ VISIT_AND_RETURN(NodeType, stmt); \ } @@ -3059,7 +3129,7 @@ #undef STATEMENT_WITH_LABELS ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode, - ZoneStringList* labels, + ZoneList<const AstRawString*>* labels, int pos) { switch (visit_mode) { case ForEachStatement::ENUMERATE: { @@ -3156,14 +3226,60 @@ VISIT_AND_RETURN(CaseClause, clause) } - Literal* NewLiteral(Handle<Object> handle, int pos) { - Literal* lit = new(zone_) Literal(zone_, handle, pos); + Literal* NewStringLiteral(const AstRawString* string, int pos) { + Literal* lit = + new (zone_) Literal(zone_, ast_value_factory_->NewString(string), pos); + VISIT_AND_RETURN(Literal, lit) + } + + // A JavaScript symbol (ECMA-262 edition 6). + Literal* NewSymbolLiteral(const char* name, int pos) { + Literal* lit = + new (zone_) Literal(zone_, ast_value_factory_->NewSymbol(name), pos); VISIT_AND_RETURN(Literal, lit) } Literal* NewNumberLiteral(double number, int pos) { - return NewLiteral( - zone_->isolate()->factory()->NewNumber(number, TENURED), pos); + Literal* lit = new (zone_) + Literal(zone_, ast_value_factory_->NewNumber(number), pos); + VISIT_AND_RETURN(Literal, lit) + } + + Literal* NewSmiLiteral(int number, int pos) { + Literal* lit = + new (zone_) Literal(zone_, ast_value_factory_->NewSmi(number), pos); + VISIT_AND_RETURN(Literal, lit) + } + + Literal* NewBooleanLiteral(bool b, int pos) { + Literal* lit = + new (zone_) Literal(zone_, ast_value_factory_->NewBoolean(b), pos); + VISIT_AND_RETURN(Literal, lit) + } + + Literal* NewStringListLiteral(ZoneList<const AstRawString*>* strings, + int pos) { + Literal* lit = new (zone_) + Literal(zone_, ast_value_factory_->NewStringList(strings), pos); + VISIT_AND_RETURN(Literal, lit) + } + + Literal* NewNullLiteral(int pos) { + Literal* lit = + new (zone_) Literal(zone_, ast_value_factory_->NewNull(), pos); + VISIT_AND_RETURN(Literal, lit) + } + + Literal* NewUndefinedLiteral(int pos) { + Literal* lit = + new (zone_) Literal(zone_, ast_value_factory_->NewUndefined(), pos); + VISIT_AND_RETURN(Literal, lit) + } + + Literal* NewTheHoleLiteral(int pos) { + Literal* lit = + new (zone_) Literal(zone_, ast_value_factory_->NewTheHole(), pos); + VISIT_AND_RETURN(Literal, lit) } ObjectLiteral* NewObjectLiteral( @@ -3180,7 +3296,8 @@ ObjectLiteral::Property* NewObjectLiteralProperty(Literal* key, Expression* value) { - return new(zone_) ObjectLiteral::Property(zone_, key, value); + return new (zone_) + ObjectLiteral::Property(zone_, ast_value_factory_, key, value); } ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter, @@ -3188,12 +3305,12 @@ int pos) { ObjectLiteral::Property* prop = new(zone_) ObjectLiteral::Property(zone_, is_getter, value); - prop->set_key(NewLiteral(value->name(), pos)); + prop->set_key(NewStringLiteral(value->raw_name(), pos)); return prop; // Not an AST node, will not be visited. } - RegExpLiteral* NewRegExpLiteral(Handle<String> pattern, - Handle<String> flags, + RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, + const AstRawString* flags, int literal_index, int pos) { RegExpLiteral* lit = @@ -3215,7 +3332,7 @@ VISIT_AND_RETURN(VariableProxy, proxy) } - VariableProxy* NewVariableProxy(Handle<String> name, + VariableProxy* NewVariableProxy(const AstRawString* name, bool is_this, Interface* interface = Interface::NewValue(), int position = RelocInfo::kNoPosition) { @@ -3243,7 +3360,7 @@ VISIT_AND_RETURN(CallNew, call) } - CallRuntime* NewCallRuntime(Handle<String> name, + CallRuntime* NewCallRuntime(const AstRawString* name, const Runtime::Function* function, ZoneList<Expression*>* arguments, int pos) { @@ -3310,6 +3427,7 @@ Expression* expression, Yield::Kind yield_kind, int pos) { + if (!expression) expression = NewUndefinedLiteral(pos); Yield* yield = new(zone_) Yield( zone_, generator_object, expression, yield_kind, pos); VISIT_AND_RETURN(Yield, yield) @@ -3321,24 +3439,19 @@ } FunctionLiteral* NewFunctionLiteral( - Handle<String> name, - Scope* scope, - ZoneList<Statement*>* body, - int materialized_literal_count, - int expected_property_count, - int handler_count, - int parameter_count, + const AstRawString* name, AstValueFactory* ast_value_factory, + Scope* scope, ZoneList<Statement*>* body, int materialized_literal_count, + int expected_property_count, int handler_count, int parameter_count, FunctionLiteral::ParameterFlag has_duplicate_parameters, FunctionLiteral::FunctionType function_type, FunctionLiteral::IsFunctionFlag is_function, FunctionLiteral::IsParenthesizedFlag is_parenthesized, - FunctionLiteral::IsGeneratorFlag is_generator, - int position) { - FunctionLiteral* lit = new(zone_) FunctionLiteral( - zone_, name, scope, body, - materialized_literal_count, expected_property_count, handler_count, - parameter_count, function_type, has_duplicate_parameters, is_function, - is_parenthesized, is_generator, position); + FunctionLiteral::KindFlag kind, int position) { + FunctionLiteral* lit = new (zone_) FunctionLiteral( + zone_, name, ast_value_factory, scope, body, materialized_literal_count, + expected_property_count, handler_count, parameter_count, function_type, + has_duplicate_parameters, is_function, is_parenthesized, kind, + position); // Top-level literal doesn't count for the AST's properties. if (is_function == FunctionLiteral::kIsFunction) { visitor_.VisitFunctionLiteral(lit); @@ -3347,7 +3460,8 @@ } NativeFunctionLiteral* NewNativeFunctionLiteral( - Handle<String> name, v8::Extension* extension, int pos) { + const AstRawString* name, v8::Extension* extension, + int pos) { NativeFunctionLiteral* lit = new(zone_) NativeFunctionLiteral(zone_, name, extension, pos); VISIT_AND_RETURN(NativeFunctionLiteral, lit) @@ -3363,6 +3477,7 @@ private: Zone* zone_; Visitor visitor_; + AstValueFactory* ast_value_factory_; }; diff -Nru nodejs-0.11.13/deps/v8/src/ast-value-factory.cc nodejs-0.11.15/deps/v8/src/ast-value-factory.cc --- nodejs-0.11.13/deps/v8/src/ast-value-factory.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ast-value-factory.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,409 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "src/ast-value-factory.h" + +#include "src/api.h" +#include "src/objects.h" + +namespace v8 { +namespace internal { + +namespace { + +// For using StringToArrayIndex. +class OneByteStringStream { + public: + explicit OneByteStringStream(Vector<const byte> lb) : + literal_bytes_(lb), pos_(0) {} + + bool HasMore() { return pos_ < literal_bytes_.length(); } + uint16_t GetNext() { return literal_bytes_[pos_++]; } + + private: + Vector<const byte> literal_bytes_; + int pos_; +}; + +} + +class AstRawStringInternalizationKey : public HashTableKey { + public: + explicit AstRawStringInternalizationKey(const AstRawString* string) + : string_(string) {} + + virtual bool IsMatch(Object* other) V8_OVERRIDE { + if (string_->is_one_byte_) + return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_); + return String::cast(other)->IsTwoByteEqualTo( + Vector<const uint16_t>::cast(string_->literal_bytes_)); + } + + virtual uint32_t Hash() V8_OVERRIDE { + return string_->hash() >> Name::kHashShift; + } + + virtual uint32_t HashForObject(Object* key) V8_OVERRIDE { + return String::cast(key)->Hash(); + } + + virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { + if (string_->is_one_byte_) + return isolate->factory()->NewOneByteInternalizedString( + string_->literal_bytes_, string_->hash()); + return isolate->factory()->NewTwoByteInternalizedString( + Vector<const uint16_t>::cast(string_->literal_bytes_), string_->hash()); + } + + private: + const AstRawString* string_; +}; + + +void AstRawString::Internalize(Isolate* isolate) { + if (!string_.is_null()) return; + if (literal_bytes_.length() == 0) { + string_ = isolate->factory()->empty_string(); + } else { + AstRawStringInternalizationKey key(this); + string_ = StringTable::LookupKey(isolate, &key); + } +} + + +bool AstRawString::AsArrayIndex(uint32_t* index) const { + if (!string_.is_null()) + return string_->AsArrayIndex(index); + if (!is_one_byte_ || literal_bytes_.length() == 0 || + literal_bytes_.length() > String::kMaxArrayIndexSize) + return false; + OneByteStringStream stream(literal_bytes_); + return StringToArrayIndex(&stream, index); +} + + +bool AstRawString::IsOneByteEqualTo(const char* data) const { + int length = static_cast<int>(strlen(data)); + if (is_one_byte_ && literal_bytes_.length() == length) { + const char* token = reinterpret_cast<const char*>(literal_bytes_.start()); + return !strncmp(token, data, length); + } + return false; +} + + +bool AstRawString::Compare(void* a, void* b) { + AstRawString* string1 = reinterpret_cast<AstRawString*>(a); + AstRawString* string2 = reinterpret_cast<AstRawString*>(b); + if (string1->is_one_byte_ != string2->is_one_byte_) return false; + if (string1->hash_ != string2->hash_) return false; + int length = string1->literal_bytes_.length(); + if (string2->literal_bytes_.length() != length) return false; + return memcmp(string1->literal_bytes_.start(), + string2->literal_bytes_.start(), length) == 0; +} + + +void AstConsString::Internalize(Isolate* isolate) { + // AstRawStrings are internalized before AstConsStrings so left and right are + // already internalized. + string_ = isolate->factory() + ->NewConsString(left_->string(), right_->string()) + .ToHandleChecked(); +} + + +bool AstValue::IsPropertyName() const { + if (type_ == STRING) { + uint32_t index; + return !string_->AsArrayIndex(&index); + } + return false; +} + + +bool AstValue::BooleanValue() const { + switch (type_) { + case STRING: + DCHECK(string_ != NULL); + return !string_->IsEmpty(); + case SYMBOL: + UNREACHABLE(); + break; + case NUMBER: + return DoubleToBoolean(number_); + case SMI: + return smi_ != 0; + case STRING_ARRAY: + UNREACHABLE(); + break; + case BOOLEAN: + return bool_; + case NULL_TYPE: + return false; + case THE_HOLE: + UNREACHABLE(); + break; + case UNDEFINED: + return false; + } + UNREACHABLE(); + return false; +} + + +void AstValue::Internalize(Isolate* isolate) { + switch (type_) { + case STRING: + DCHECK(string_ != NULL); + // Strings are already internalized. + DCHECK(!string_->string().is_null()); + break; + case SYMBOL: + value_ = Object::GetProperty( + isolate, handle(isolate->native_context()->builtins()), + symbol_name_).ToHandleChecked(); + break; + case NUMBER: + value_ = isolate->factory()->NewNumber(number_, TENURED); + break; + case SMI: + value_ = handle(Smi::FromInt(smi_), isolate); + break; + case BOOLEAN: + if (bool_) { + value_ = isolate->factory()->true_value(); + } else { + value_ = isolate->factory()->false_value(); + } + break; + case STRING_ARRAY: { + DCHECK(strings_ != NULL); + Factory* factory = isolate->factory(); + int len = strings_->length(); + Handle<FixedArray> elements = factory->NewFixedArray(len, TENURED); + for (int i = 0; i < len; i++) { + const AstRawString* string = (*strings_)[i]; + Handle<Object> element = string->string(); + // Strings are already internalized. + DCHECK(!element.is_null()); + elements->set(i, *element); + } + value_ = + factory->NewJSArrayWithElements(elements, FAST_ELEMENTS, TENURED); + break; + } + case NULL_TYPE: + value_ = isolate->factory()->null_value(); + break; + case THE_HOLE: + value_ = isolate->factory()->the_hole_value(); + break; + case UNDEFINED: + value_ = isolate->factory()->undefined_value(); + break; + } +} + + +const AstRawString* AstValueFactory::GetOneByteString( + Vector<const uint8_t> literal) { + uint32_t hash = StringHasher::HashSequentialString<uint8_t>( + literal.start(), literal.length(), hash_seed_); + return GetString(hash, true, literal); +} + + +const AstRawString* AstValueFactory::GetTwoByteString( + Vector<const uint16_t> literal) { + uint32_t hash = StringHasher::HashSequentialString<uint16_t>( + literal.start(), literal.length(), hash_seed_); + return GetString(hash, false, Vector<const byte>::cast(literal)); +} + + +const AstRawString* AstValueFactory::GetString(Handle<String> literal) { + DisallowHeapAllocation no_gc; + String::FlatContent content = literal->GetFlatContent(); + if (content.IsAscii()) { + return GetOneByteString(content.ToOneByteVector()); + } + DCHECK(content.IsTwoByte()); + return GetTwoByteString(content.ToUC16Vector()); +} + + +const AstConsString* AstValueFactory::NewConsString( + const AstString* left, const AstString* right) { + // This Vector will be valid as long as the Collector is alive (meaning that + // the AstRawString will not be moved). + AstConsString* new_string = new (zone_) AstConsString(left, right); + strings_.Add(new_string); + if (isolate_) { + new_string->Internalize(isolate_); + } + return new_string; +} + + +void AstValueFactory::Internalize(Isolate* isolate) { + if (isolate_) { + // Everything is already internalized. + return; + } + // Strings need to be internalized before values, because values refer to + // strings. + for (int i = 0; i < strings_.length(); ++i) { + strings_[i]->Internalize(isolate); + } + for (int i = 0; i < values_.length(); ++i) { + values_[i]->Internalize(isolate); + } + isolate_ = isolate; +} + + +const AstValue* AstValueFactory::NewString(const AstRawString* string) { + AstValue* value = new (zone_) AstValue(string); + DCHECK(string != NULL); + if (isolate_) { + value->Internalize(isolate_); + } + values_.Add(value); + return value; +} + + +const AstValue* AstValueFactory::NewSymbol(const char* name) { + AstValue* value = new (zone_) AstValue(name); + if (isolate_) { + value->Internalize(isolate_); + } + values_.Add(value); + return value; +} + + +const AstValue* AstValueFactory::NewNumber(double number) { + AstValue* value = new (zone_) AstValue(number); + if (isolate_) { + value->Internalize(isolate_); + } + values_.Add(value); + return value; +} + + +const AstValue* AstValueFactory::NewSmi(int number) { + AstValue* value = + new (zone_) AstValue(AstValue::SMI, number); + if (isolate_) { + value->Internalize(isolate_); + } + values_.Add(value); + return value; +} + + +const AstValue* AstValueFactory::NewBoolean(bool b) { + AstValue* value = new (zone_) AstValue(b); + if (isolate_) { + value->Internalize(isolate_); + } + values_.Add(value); + return value; +} + + +const AstValue* AstValueFactory::NewStringList( + ZoneList<const AstRawString*>* strings) { + AstValue* value = new (zone_) AstValue(strings); + if (isolate_) { + value->Internalize(isolate_); + } + values_.Add(value); + return value; +} + + +const AstValue* AstValueFactory::NewNull() { + AstValue* value = new (zone_) AstValue(AstValue::NULL_TYPE); + if (isolate_) { + value->Internalize(isolate_); + } + values_.Add(value); + return value; +} + + +const AstValue* AstValueFactory::NewUndefined() { + AstValue* value = new (zone_) AstValue(AstValue::UNDEFINED); + if (isolate_) { + value->Internalize(isolate_); + } + values_.Add(value); + return value; +} + + +const AstValue* AstValueFactory::NewTheHole() { + AstValue* value = new (zone_) AstValue(AstValue::THE_HOLE); + if (isolate_) { + value->Internalize(isolate_); + } + values_.Add(value); + return value; +} + + +const AstRawString* AstValueFactory::GetString( + uint32_t hash, bool is_one_byte, Vector<const byte> literal_bytes) { + // literal_bytes here points to whatever the user passed, and this is OK + // because we use vector_compare (which checks the contents) to compare + // against the AstRawStrings which are in the string_table_. We should not + // return this AstRawString. + AstRawString key(is_one_byte, literal_bytes, hash); + HashMap::Entry* entry = string_table_.Lookup(&key, hash, true); + if (entry->value == NULL) { + // Copy literal contents for later comparison. + int length = literal_bytes.length(); + byte* new_literal_bytes = zone_->NewArray<byte>(length); + memcpy(new_literal_bytes, literal_bytes.start(), length); + AstRawString* new_string = new (zone_) AstRawString( + is_one_byte, Vector<const byte>(new_literal_bytes, length), hash); + entry->key = new_string; + strings_.Add(new_string); + if (isolate_) { + new_string->Internalize(isolate_); + } + entry->value = reinterpret_cast<void*>(1); + } + return reinterpret_cast<AstRawString*>(entry->key); +} + + +} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/ast-value-factory.h nodejs-0.11.15/deps/v8/src/ast-value-factory.h --- nodejs-0.11.13/deps/v8/src/ast-value-factory.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ast-value-factory.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,344 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_AST_VALUE_FACTORY_H_ +#define V8_AST_VALUE_FACTORY_H_ + +#include "src/api.h" +#include "src/hashmap.h" +#include "src/utils.h" + +// AstString, AstValue and AstValueFactory are for storing strings and values +// independent of the V8 heap and internalizing them later. During parsing, +// AstStrings and AstValues are created and stored outside the heap, in +// AstValueFactory. After parsing, the strings and values are internalized +// (moved into the V8 heap). +namespace v8 { +namespace internal { + +class AstString : public ZoneObject { + public: + virtual ~AstString() {} + + virtual int length() const = 0; + bool IsEmpty() const { return length() == 0; } + + // Puts the string into the V8 heap. + virtual void Internalize(Isolate* isolate) = 0; + + // This function can be called after internalizing. + V8_INLINE Handle<String> string() const { + DCHECK(!string_.is_null()); + return string_; + } + + protected: + // This is null until the string is internalized. + Handle<String> string_; +}; + + +class AstRawString : public AstString { + public: + virtual int length() const V8_OVERRIDE { + if (is_one_byte_) + return literal_bytes_.length(); + return literal_bytes_.length() / 2; + } + + virtual void Internalize(Isolate* isolate) V8_OVERRIDE; + + bool AsArrayIndex(uint32_t* index) const; + + // The string is not null-terminated, use length() to find out the length. + const unsigned char* raw_data() const { + return literal_bytes_.start(); + } + bool is_one_byte() const { return is_one_byte_; } + bool IsOneByteEqualTo(const char* data) const; + uint16_t FirstCharacter() const { + if (is_one_byte_) + return literal_bytes_[0]; + const uint16_t* c = + reinterpret_cast<const uint16_t*>(literal_bytes_.start()); + return *c; + } + + // For storing AstRawStrings in a hash map. + uint32_t hash() const { + return hash_; + } + static bool Compare(void* a, void* b); + + private: + friend class AstValueFactory; + friend class AstRawStringInternalizationKey; + + AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes, + uint32_t hash) + : is_one_byte_(is_one_byte), literal_bytes_(literal_bytes), hash_(hash) {} + + AstRawString() + : is_one_byte_(true), + hash_(0) {} + + bool is_one_byte_; + + // Points to memory owned by Zone. + Vector<const byte> literal_bytes_; + uint32_t hash_; +}; + + +class AstConsString : public AstString { + public: + AstConsString(const AstString* left, const AstString* right) + : left_(left), + right_(right) {} + + virtual int length() const V8_OVERRIDE { + return left_->length() + right_->length(); + } + + virtual void Internalize(Isolate* isolate) V8_OVERRIDE; + + private: + friend class AstValueFactory; + + const AstString* left_; + const AstString* right_; +}; + + +// AstValue is either a string, a number, a string array, a boolean, or a +// special value (null, undefined, the hole). +class AstValue : public ZoneObject { + public: + bool IsString() const { + return type_ == STRING; + } + + bool IsNumber() const { + return type_ == NUMBER || type_ == SMI; + } + + const AstRawString* AsString() const { + if (type_ == STRING) + return string_; + UNREACHABLE(); + return 0; + } + + double AsNumber() const { + if (type_ == NUMBER) + return number_; + if (type_ == SMI) + return smi_; + UNREACHABLE(); + return 0; + } + + bool EqualsString(const AstRawString* string) const { + return type_ == STRING && string_ == string; + } + + bool IsPropertyName() const; + + bool BooleanValue() const; + + void Internalize(Isolate* isolate); + + // Can be called after Internalize has been called. + V8_INLINE Handle<Object> value() const { + if (type_ == STRING) { + return string_->string(); + } + DCHECK(!value_.is_null()); + return value_; + } + + private: + friend class AstValueFactory; + + enum Type { + STRING, + SYMBOL, + NUMBER, + SMI, + BOOLEAN, + STRING_ARRAY, + NULL_TYPE, + UNDEFINED, + THE_HOLE + }; + + explicit AstValue(const AstRawString* s) : type_(STRING) { string_ = s; } + + explicit AstValue(const char* name) : type_(SYMBOL) { symbol_name_ = name; } + + explicit AstValue(double n) : type_(NUMBER) { number_ = n; } + + AstValue(Type t, int i) : type_(t) { + DCHECK(type_ == SMI); + smi_ = i; + } + + explicit AstValue(bool b) : type_(BOOLEAN) { bool_ = b; } + + explicit AstValue(ZoneList<const AstRawString*>* s) : type_(STRING_ARRAY) { + strings_ = s; + } + + explicit AstValue(Type t) : type_(t) { + DCHECK(t == NULL_TYPE || t == UNDEFINED || t == THE_HOLE); + } + + Type type_; + + // Uninternalized value. + union { + const AstRawString* string_; + double number_; + int smi_; + bool bool_; + ZoneList<const AstRawString*>* strings_; + const char* symbol_name_; + }; + + // Internalized value (empty before internalized). + Handle<Object> value_; +}; + + +// For generating string constants. +#define STRING_CONSTANTS(F) \ + F(anonymous_function, "(anonymous function)") \ + F(arguments, "arguments") \ + F(done, "done") \ + F(dot, ".") \ + F(dot_for, ".for") \ + F(dot_generator, ".generator") \ + F(dot_generator_object, ".generator_object") \ + F(dot_iterator, ".iterator") \ + F(dot_module, ".module") \ + F(dot_result, ".result") \ + F(empty, "") \ + F(eval, "eval") \ + F(initialize_const_global, "initializeConstGlobal") \ + F(initialize_var_global, "initializeVarGlobal") \ + F(make_reference_error, "MakeReferenceError") \ + F(make_syntax_error, "MakeSyntaxError") \ + F(make_type_error, "MakeTypeError") \ + F(module, "module") \ + F(native, "native") \ + F(next, "next") \ + F(proto, "__proto__") \ + F(prototype, "prototype") \ + F(this, "this") \ + F(use_asm, "use asm") \ + F(use_strict, "use strict") \ + F(value, "value") + + +class AstValueFactory { + public: + AstValueFactory(Zone* zone, uint32_t hash_seed) + : string_table_(AstRawString::Compare), + zone_(zone), + isolate_(NULL), + hash_seed_(hash_seed) { +#define F(name, str) \ + name##_string_ = NULL; + STRING_CONSTANTS(F) +#undef F + } + + const AstRawString* GetOneByteString(Vector<const uint8_t> literal); + const AstRawString* GetOneByteString(const char* string) { + return GetOneByteString(Vector<const uint8_t>( + reinterpret_cast<const uint8_t*>(string), StrLength(string))); + } + const AstRawString* GetTwoByteString(Vector<const uint16_t> literal); + const AstRawString* GetString(Handle<String> literal); + const AstConsString* NewConsString(const AstString* left, + const AstString* right); + + void Internalize(Isolate* isolate); + bool IsInternalized() { + return isolate_ != NULL; + } + +#define F(name, str) \ + const AstRawString* name##_string() { \ + if (name##_string_ == NULL) { \ + const char* data = str; \ + name##_string_ = GetOneByteString( \ + Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), \ + static_cast<int>(strlen(data)))); \ + } \ + return name##_string_; \ + } + STRING_CONSTANTS(F) +#undef F + + const AstValue* NewString(const AstRawString* string); + // A JavaScript symbol (ECMA-262 edition 6). + const AstValue* NewSymbol(const char* name); + const AstValue* NewNumber(double number); + const AstValue* NewSmi(int number); + const AstValue* NewBoolean(bool b); + const AstValue* NewStringList(ZoneList<const AstRawString*>* strings); + const AstValue* NewNull(); + const AstValue* NewUndefined(); + const AstValue* NewTheHole(); + + private: + const AstRawString* GetString(uint32_t hash, bool is_one_byte, + Vector<const byte> literal_bytes); + + // All strings are copied here, one after another (no NULLs inbetween). + HashMap string_table_; + // For keeping track of all AstValues and AstRawStrings we've created (so that + // they can be internalized later). + List<AstValue*> values_; + List<AstString*> strings_; + Zone* zone_; + Isolate* isolate_; + + uint32_t hash_seed_; + +#define F(name, str) \ + const AstRawString* name##_string_; + STRING_CONSTANTS(F) +#undef F +}; + +} } // namespace v8::internal + +#undef STRING_CONSTANTS + +#endif // V8_AST_VALUE_FACTORY_H_ diff -Nru nodejs-0.11.13/deps/v8/src/atomicops.h nodejs-0.11.15/deps/v8/src/atomicops.h --- nodejs-0.11.13/deps/v8/src/atomicops.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/atomicops.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,181 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// The routines exported by this module are subtle. If you use them, even if -// you get the code right, it will depend on careful reasoning about atomicity -// and memory ordering; it will be less readable, and harder to maintain. If -// you plan to use these routines, you should have a good reason, such as solid -// evidence that performance would otherwise suffer, or there being no -// alternative. You should assume only properties explicitly guaranteed by the -// specifications in this file. You are almost certainly _not_ writing code -// just for the x86; if you assume x86 semantics, x86 hardware bugs and -// implementations on other archtectures will cause your code to break. If you -// do not know what you are doing, avoid these routines, and use a Mutex. -// -// It is incorrect to make direct assignments to/from an atomic variable. -// You should use one of the Load or Store routines. The NoBarrier -// versions are provided when no barriers are needed: -// NoBarrier_Store() -// NoBarrier_Load() -// Although there are currently no compiler enforcement, you are encouraged -// to use these. -// - -#ifndef V8_ATOMICOPS_H_ -#define V8_ATOMICOPS_H_ - -#include "../include/v8.h" -#include "globals.h" - -#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT) -// windows.h #defines this (only on x64). This causes problems because the -// public API also uses MemoryBarrier at the public name for this fence. So, on -// X64, undef it, and call its documented -// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) -// implementation directly. -#undef MemoryBarrier -#endif - -namespace v8 { -namespace internal { - -typedef int32_t Atomic32; -#ifdef V8_HOST_ARCH_64_BIT -// We need to be able to go between Atomic64 and AtomicWord implicitly. This -// means Atomic64 and AtomicWord should be the same type on 64-bit. -#if defined(__ILP32__) -typedef int64_t Atomic64; -#else -typedef intptr_t Atomic64; -#endif -#endif - -// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or -// Atomic64 routines below, depending on your architecture. -typedef intptr_t AtomicWord; - -// Atomically execute: -// result = *ptr; -// if (*ptr == old_value) -// *ptr = new_value; -// return result; -// -// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". -// Always return the old value of "*ptr" -// -// This routine implies no memory barriers. -Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value); - -// Atomically store new_value into *ptr, returning the previous value held in -// *ptr. This routine implies no memory barriers. -Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); - -// Atomically increment *ptr by "increment". Returns the new value of -// *ptr with the increment applied. This routine implies no memory barriers. -Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); - -Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment); - -// These following lower-level operations are typically useful only to people -// implementing higher-level synchronization operations like spinlocks, -// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or -// a store with appropriate memory-ordering instructions. "Acquire" operations -// ensure that no later memory access can be reordered ahead of the operation. -// "Release" operations ensure that no previous memory access can be reordered -// after the operation. "Barrier" operations have both "Acquire" and "Release" -// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory -// access. -Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value); -Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value); - -void MemoryBarrier(); -void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); -void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); -void Release_Store(volatile Atomic32* ptr, Atomic32 value); - -Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); -Atomic32 Acquire_Load(volatile const Atomic32* ptr); -Atomic32 Release_Load(volatile const Atomic32* ptr); - -// 64-bit atomic operations (only available on 64-bit processors). -#ifdef V8_HOST_ARCH_64_BIT -Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value); -Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); -Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); -Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); - -Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value); -Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value); -void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); -void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); -void Release_Store(volatile Atomic64* ptr, Atomic64 value); -Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); -Atomic64 Acquire_Load(volatile const Atomic64* ptr); -Atomic64 Release_Load(volatile const Atomic64* ptr); -#endif // V8_HOST_ARCH_64_BIT - -} } // namespace v8::internal - -// Include our platform specific implementation. -#if defined(THREAD_SANITIZER) -#include "atomicops_internals_tsan.h" -#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) -#include "atomicops_internals_x86_msvc.h" -#elif defined(__APPLE__) -#include "atomicops_internals_mac.h" -#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64 -#include "atomicops_internals_arm64_gcc.h" -#elif defined(__GNUC__) && V8_HOST_ARCH_ARM -#include "atomicops_internals_arm_gcc.h" -#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) -#include "atomicops_internals_x86_gcc.h" -#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS -#include "atomicops_internals_mips_gcc.h" -#else -#error "Atomic operations are not supported on your platform" -#endif - -// On some platforms we need additional declarations to make -// AtomicWord compatible with our other Atomic* types. -#if defined(__APPLE__) || defined(__OpenBSD__) -#include "atomicops_internals_atomicword_compat.h" -#endif - -#endif // V8_ATOMICOPS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/atomicops_internals_arm64_gcc.h nodejs-0.11.15/deps/v8/src/atomicops_internals_arm64_gcc.h --- nodejs-0.11.13/deps/v8/src/atomicops_internals_arm64_gcc.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/atomicops_internals_arm64_gcc.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,372 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ -#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ - -namespace v8 { -namespace internal { - -inline void MemoryBarrier() { - __asm__ __volatile__ ( // NOLINT - "dmb ish \n\t" // Data memory barrier. - ::: "memory" - ); // NOLINT -} - - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev; - int32_t temp; - - __asm__ __volatile__ ( // NOLINT - "0: \n\t" - "ldxr %w[prev], %[ptr] \n\t" // Load the previous value. - "cmp %w[prev], %w[old_value] \n\t" - "bne 1f \n\t" - "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. - "cbnz %w[temp], 0b \n\t" // Retry if it did not work. - "1: \n\t" - "clrex \n\t" // In case we didn't swap. - : [prev]"=&r" (prev), - [temp]"=&r" (temp), - [ptr]"+Q" (*ptr) - : [old_value]"r" (old_value), - [new_value]"r" (new_value) - : "memory", "cc" - ); // NOLINT - - return prev; -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - Atomic32 result; - int32_t temp; - - __asm__ __volatile__ ( // NOLINT - "0: \n\t" - "ldxr %w[result], %[ptr] \n\t" // Load the previous value. - "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. - "cbnz %w[temp], 0b \n\t" // Retry if it did not work. - : [result]"=&r" (result), - [temp]"=&r" (temp), - [ptr]"+Q" (*ptr) - : [new_value]"r" (new_value) - : "memory" - ); // NOLINT - - return result; -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - Atomic32 result; - int32_t temp; - - __asm__ __volatile__ ( // NOLINT - "0: \n\t" - "ldxr %w[result], %[ptr] \n\t" // Load the previous value. - "add %w[result], %w[result], %w[increment]\n\t" - "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result. - "cbnz %w[temp], 0b \n\t" // Retry on failure. - : [result]"=&r" (result), - [temp]"=&r" (temp), - [ptr]"+Q" (*ptr) - : [increment]"r" (increment) - : "memory" - ); // NOLINT - - return result; -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - MemoryBarrier(); - Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment); - MemoryBarrier(); - - return result; -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev; - int32_t temp; - - __asm__ __volatile__ ( // NOLINT - "0: \n\t" - "ldxr %w[prev], %[ptr] \n\t" // Load the previous value. - "cmp %w[prev], %w[old_value] \n\t" - "bne 1f \n\t" - "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. - "cbnz %w[temp], 0b \n\t" // Retry if it did not work. - "dmb ish \n\t" // Data memory barrier. - "1: \n\t" - // If the compare failed the 'dmb' is unnecessary, but we still need a - // 'clrex'. - "clrex \n\t" - : [prev]"=&r" (prev), - [temp]"=&r" (temp), - [ptr]"+Q" (*ptr) - : [old_value]"r" (old_value), - [new_value]"r" (new_value) - : "memory", "cc" - ); // NOLINT - - return prev; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev; - int32_t temp; - - MemoryBarrier(); - - __asm__ __volatile__ ( // NOLINT - "0: \n\t" - "ldxr %w[prev], %[ptr] \n\t" // Load the previous value. - "cmp %w[prev], %w[old_value] \n\t" - "bne 1f \n\t" - "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. - "cbnz %w[temp], 0b \n\t" // Retry if it did not work. - "1: \n\t" - // If the compare failed the we still need a 'clrex'. - "clrex \n\t" - : [prev]"=&r" (prev), - [temp]"=&r" (temp), - [ptr]"+Q" (*ptr) - : [old_value]"r" (old_value), - [new_value]"r" (new_value) - : "memory", "cc" - ); // NOLINT - - return prev; -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - MemoryBarrier(); - *ptr = value; -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return *ptr; -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - Atomic32 value = *ptr; - MemoryBarrier(); - return value; -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - -// 64-bit versions of the operations. -// See the 32-bit versions for comments. - -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 prev; - int32_t temp; - - __asm__ __volatile__ ( // NOLINT - "0: \n\t" - "ldxr %[prev], %[ptr] \n\t" - "cmp %[prev], %[old_value] \n\t" - "bne 1f \n\t" - "stxr %w[temp], %[new_value], %[ptr] \n\t" - "cbnz %w[temp], 0b \n\t" - "1: \n\t" - "clrex \n\t" - : [prev]"=&r" (prev), - [temp]"=&r" (temp), - [ptr]"+Q" (*ptr) - : [old_value]"r" (old_value), - [new_value]"r" (new_value) - : "memory", "cc" - ); // NOLINT - - return prev; -} - -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - Atomic64 result; - int32_t temp; - - __asm__ __volatile__ ( // NOLINT - "0: \n\t" - "ldxr %[result], %[ptr] \n\t" - "stxr %w[temp], %[new_value], %[ptr] \n\t" - "cbnz %w[temp], 0b \n\t" - : [result]"=&r" (result), - [temp]"=&r" (temp), - [ptr]"+Q" (*ptr) - : [new_value]"r" (new_value) - : "memory" - ); // NOLINT - - return result; -} - -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - Atomic64 result; - int32_t temp; - - __asm__ __volatile__ ( // NOLINT - "0: \n\t" - "ldxr %[result], %[ptr] \n\t" - "add %[result], %[result], %[increment] \n\t" - "stxr %w[temp], %[result], %[ptr] \n\t" - "cbnz %w[temp], 0b \n\t" - : [result]"=&r" (result), - [temp]"=&r" (temp), - [ptr]"+Q" (*ptr) - : [increment]"r" (increment) - : "memory" - ); // NOLINT - - return result; -} - -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - MemoryBarrier(); - Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment); - MemoryBarrier(); - - return result; -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 prev; - int32_t temp; - - __asm__ __volatile__ ( // NOLINT - "0: \n\t" - "ldxr %[prev], %[ptr] \n\t" - "cmp %[prev], %[old_value] \n\t" - "bne 1f \n\t" - "stxr %w[temp], %[new_value], %[ptr] \n\t" - "cbnz %w[temp], 0b \n\t" - "dmb ish \n\t" - "1: \n\t" - "clrex \n\t" - : [prev]"=&r" (prev), - [temp]"=&r" (temp), - [ptr]"+Q" (*ptr) - : [old_value]"r" (old_value), - [new_value]"r" (new_value) - : "memory", "cc" - ); // NOLINT - - return prev; -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 prev; - int32_t temp; - - MemoryBarrier(); - - __asm__ __volatile__ ( // NOLINT - "0: \n\t" - "ldxr %[prev], %[ptr] \n\t" - "cmp %[prev], %[old_value] \n\t" - "bne 1f \n\t" - "stxr %w[temp], %[new_value], %[ptr] \n\t" - "cbnz %w[temp], 0b \n\t" - "1: \n\t" - "clrex \n\t" - : [prev]"=&r" (prev), - [temp]"=&r" (temp), - [ptr]"+Q" (*ptr) - : [old_value]"r" (old_value), - [new_value]"r" (new_value) - : "memory", "cc" - ); // NOLINT - - return prev; -} - -inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - MemoryBarrier(); - *ptr = value; -} - -inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { - return *ptr; -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - Atomic64 value = *ptr; - MemoryBarrier(); - return value; -} - -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return *ptr; -} - -} } // namespace v8::internal - -#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/atomicops_internals_arm_gcc.h nodejs-0.11.15/deps/v8/src/atomicops_internals_arm_gcc.h --- nodejs-0.11.13/deps/v8/src/atomicops_internals_arm_gcc.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/atomicops_internals_arm_gcc.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,316 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. -// -// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. - -#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ -#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ - -#if defined(__QNXNTO__) -#include <sys/cpuinline.h> -#endif - -namespace v8 { -namespace internal { - -// Memory barriers on ARM are funky, but the kernel is here to help: -// -// * ARMv5 didn't support SMP, there is no memory barrier instruction at -// all on this architecture, or when targeting its machine code. -// -// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by -// writing a random value to a very specific coprocessor register. -// -// * On ARMv7, the "dmb" instruction is used to perform a full memory -// barrier (though writing to the co-processor will still work). -// However, on single core devices (e.g. Nexus One, or Nexus S), -// this instruction will take up to 200 ns, which is huge, even though -// it's completely un-needed on these devices. -// -// * There is no easy way to determine at runtime if the device is -// single or multi-core. However, the kernel provides a useful helper -// function at a fixed memory address (0xffff0fa0), which will always -// perform a memory barrier in the most efficient way. I.e. on single -// core devices, this is an empty function that exits immediately. -// On multi-core devices, it implements a full memory barrier. -// -// * This source could be compiled to ARMv5 machine code that runs on a -// multi-core ARMv6 or ARMv7 device. In this case, memory barriers -// are needed for correct execution. Always call the kernel helper, even -// when targeting ARMv5TE. -// - -inline void MemoryBarrier() { -#if defined(__linux__) || defined(__ANDROID__) - // Note: This is a function call, which is also an implicit compiler barrier. - typedef void (*KernelMemoryBarrierFunc)(); - ((KernelMemoryBarrierFunc)0xffff0fa0)(); -#elif defined(__QNXNTO__) - __cpu_membarrier(); -#else -#error MemoryBarrier() is not implemented on this platform. -#endif -} - -// An ARM toolchain would only define one of these depending on which -// variant of the target architecture is being used. This tests against -// any known ARMv6 or ARMv7 variant, where it is possible to directly -// use ldrex/strex instructions to implement fast atomic operations. -#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \ - defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \ - defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ - defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ - defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_6T2__) - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev_value; - int reloop; - do { - // The following is equivalent to: - // - // prev_value = LDREX(ptr) - // reloop = 0 - // if (prev_value != old_value) - // reloop = STREX(ptr, new_value) - __asm__ __volatile__(" ldrex %0, [%3]\n" - " mov %1, #0\n" - " cmp %0, %4\n" -#ifdef __thumb2__ - " it eq\n" -#endif - " strexeq %1, %5, [%3]\n" - : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr) - : "r"(ptr), "r"(old_value), "r"(new_value) - : "cc", "memory"); - } while (reloop != 0); - return prev_value; -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value); - MemoryBarrier(); - return result; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - MemoryBarrier(); - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - Atomic32 value; - int reloop; - do { - // Equivalent to: - // - // value = LDREX(ptr) - // value += increment - // reloop = STREX(ptr, value) - // - __asm__ __volatile__(" ldrex %0, [%3]\n" - " add %0, %0, %4\n" - " strex %1, %0, [%3]\n" - : "=&r"(value), "=&r"(reloop), "+m"(*ptr) - : "r"(ptr), "r"(increment) - : "cc", "memory"); - } while (reloop); - return value; -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - // TODO(digit): Investigate if it's possible to implement this with - // a single MemoryBarrier() operation between the LDREX and STREX. - // See http://crbug.com/246514 - MemoryBarrier(); - Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment); - MemoryBarrier(); - return result; -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - Atomic32 old_value; - int reloop; - do { - // old_value = LDREX(ptr) - // reloop = STREX(ptr, new_value) - __asm__ __volatile__(" ldrex %0, [%3]\n" - " strex %1, %4, [%3]\n" - : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr) - : "r"(ptr), "r"(new_value) - : "cc", "memory"); - } while (reloop != 0); - return old_value; -} - -// This tests against any known ARMv5 variant. -#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \ - defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__) - -// The kernel also provides a helper function to perform an atomic -// compare-and-swap operation at the hard-wired address 0xffff0fc0. -// On ARMv5, this is implemented by a special code path that the kernel -// detects and treats specially when thread pre-emption happens. -// On ARMv6 and higher, it uses LDREX/STREX instructions instead. -// -// Note that this always perform a full memory barrier, there is no -// need to add calls MemoryBarrier() before or after it. It also -// returns 0 on success, and 1 on exit. -// -// Available and reliable since Linux 2.6.24. Both Android and ChromeOS -// use newer kernel revisions, so this should not be a concern. -namespace { - -inline int LinuxKernelCmpxchg(Atomic32 old_value, - Atomic32 new_value, - volatile Atomic32* ptr) { - typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*); - return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr); -} - -} // namespace - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev_value; - for (;;) { - prev_value = *ptr; - if (prev_value != old_value) - return prev_value; - if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) - return old_value; - } -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - Atomic32 old_value; - do { - old_value = *ptr; - } while (LinuxKernelCmpxchg(old_value, new_value, ptr)); - return old_value; -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return Barrier_AtomicIncrement(ptr, increment); -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - for (;;) { - // Atomic exchange the old value with an incremented one. - Atomic32 old_value = *ptr; - Atomic32 new_value = old_value + increment; - if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) { - // The exchange took place as expected. - return new_value; - } - // Otherwise, *ptr changed mid-loop and we need to retry. - } -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev_value; - for (;;) { - prev_value = *ptr; - if (prev_value != old_value) { - // Always ensure acquire semantics. - MemoryBarrier(); - return prev_value; - } - if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) - return old_value; - } -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - // This could be implemented as: - // MemoryBarrier(); - // return NoBarrier_CompareAndSwap(); - // - // But would use 3 barriers per succesful CAS. To save performance, - // use Acquire_CompareAndSwap(). Its implementation guarantees that: - // - A succesful swap uses only 2 barriers (in the kernel helper). - // - An early return due to (prev_value != old_value) performs - // a memory barrier with no store, which is equivalent to the - // generic implementation above. - return Acquire_CompareAndSwap(ptr, old_value, new_value); -} - -#else -# error "Your CPU's ARM architecture is not supported yet" -#endif - -// NOTE: Atomicity of the following load and store operations is only -// guaranteed in case of 32-bit alignement of |ptr| values. - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - MemoryBarrier(); - *ptr = value; -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - Atomic32 value = *ptr; - MemoryBarrier(); - return value; -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - -} } // namespace v8::internal - -#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/atomicops_internals_atomicword_compat.h nodejs-0.11.15/deps/v8/src/atomicops_internals_atomicword_compat.h --- nodejs-0.11.13/deps/v8/src/atomicops_internals_atomicword_compat.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/atomicops_internals_atomicword_compat.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,122 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_ -#define V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_ - -// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32, -// which in turn means int. On some LP32 platforms, intptr_t is an int, but -// on others, it's a long. When AtomicWord and Atomic32 are based on different -// fundamental types, their pointers are incompatible. -// -// This file defines function overloads to allow both AtomicWord and Atomic32 -// data to be used with this interface. -// -// On LP64 platforms, AtomicWord and Atomic64 are both always long, -// so this problem doesn't occur. - -#if !defined(V8_HOST_ARCH_64_BIT) - -namespace v8 { -namespace internal { - -inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, - AtomicWord old_value, - AtomicWord new_value) { - return NoBarrier_CompareAndSwap( - reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value); -} - -inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, - AtomicWord new_value) { - return NoBarrier_AtomicExchange( - reinterpret_cast<volatile Atomic32*>(ptr), new_value); -} - -inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, - AtomicWord increment) { - return NoBarrier_AtomicIncrement( - reinterpret_cast<volatile Atomic32*>(ptr), increment); -} - -inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, - AtomicWord increment) { - return Barrier_AtomicIncrement( - reinterpret_cast<volatile Atomic32*>(ptr), increment); -} - -inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, - AtomicWord old_value, - AtomicWord new_value) { - return v8::internal::Acquire_CompareAndSwap( - reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value); -} - -inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, - AtomicWord old_value, - AtomicWord new_value) { - return v8::internal::Release_CompareAndSwap( - reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value); -} - -inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { - NoBarrier_Store( - reinterpret_cast<volatile Atomic32*>(ptr), value); -} - -inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { - return v8::internal::Acquire_Store( - reinterpret_cast<volatile Atomic32*>(ptr), value); -} - -inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { - return v8::internal::Release_Store( - reinterpret_cast<volatile Atomic32*>(ptr), value); -} - -inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { - return NoBarrier_Load( - reinterpret_cast<volatile const Atomic32*>(ptr)); -} - -inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { - return v8::internal::Acquire_Load( - reinterpret_cast<volatile const Atomic32*>(ptr)); -} - -inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { - return v8::internal::Release_Load( - reinterpret_cast<volatile const Atomic32*>(ptr)); -} - -} } // namespace v8::internal - -#endif // !defined(V8_HOST_ARCH_64_BIT) - -#endif // V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_ diff -Nru nodejs-0.11.13/deps/v8/src/atomicops_internals_mac.h nodejs-0.11.15/deps/v8/src/atomicops_internals_mac.h --- nodejs-0.11.13/deps/v8/src/atomicops_internals_mac.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/atomicops_internals_mac.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,219 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_MAC_H_ -#define V8_ATOMICOPS_INTERNALS_MAC_H_ - -#include <libkern/OSAtomic.h> - -namespace v8 { -namespace internal { - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev_value; - do { - if (OSAtomicCompareAndSwap32(old_value, new_value, - const_cast<Atomic32*>(ptr))) { - return old_value; - } - prev_value = *ptr; - } while (prev_value == old_value); - return prev_value; -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - Atomic32 old_value; - do { - old_value = *ptr; - } while (!OSAtomicCompareAndSwap32(old_value, new_value, - const_cast<Atomic32*>(ptr))); - return old_value; -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); -} - -inline void MemoryBarrier() { - OSMemoryBarrier(); -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev_value; - do { - if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, - const_cast<Atomic32*>(ptr))) { - return old_value; - } - prev_value = *ptr; - } while (prev_value == old_value); - return prev_value; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - return Acquire_CompareAndSwap(ptr, old_value, new_value); -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - MemoryBarrier(); - *ptr = value; -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return *ptr; -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - Atomic32 value = *ptr; - MemoryBarrier(); - return value; -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - -#ifdef __LP64__ - -// 64-bit implementation on 64-bit platform - -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 prev_value; - do { - if (OSAtomicCompareAndSwap64(old_value, new_value, - reinterpret_cast<volatile int64_t*>(ptr))) { - return old_value; - } - prev_value = *ptr; - } while (prev_value == old_value); - return prev_value; -} - -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - Atomic64 old_value; - do { - old_value = *ptr; - } while (!OSAtomicCompareAndSwap64(old_value, new_value, - reinterpret_cast<volatile int64_t*>(ptr))); - return old_value; -} - -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr)); -} - -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return OSAtomicAdd64Barrier(increment, - reinterpret_cast<volatile int64_t*>(ptr)); -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 prev_value; - do { - if (OSAtomicCompareAndSwap64Barrier( - old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) { - return old_value; - } - prev_value = *ptr; - } while (prev_value == old_value); - return prev_value; -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - // The lib kern interface does not distinguish between - // Acquire and Release memory barriers; they are equivalent. - return Acquire_CompareAndSwap(ptr, old_value, new_value); -} - -inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - MemoryBarrier(); - *ptr = value; -} - -inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { - return *ptr; -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - Atomic64 value = *ptr; - MemoryBarrier(); - return value; -} - -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return *ptr; -} - -#endif // defined(__LP64__) - -} } // namespace v8::internal - -#endif // V8_ATOMICOPS_INTERNALS_MAC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/atomicops_internals_mips_gcc.h nodejs-0.11.15/deps/v8/src/atomicops_internals_mips_gcc.h --- nodejs-0.11.13/deps/v8/src/atomicops_internals_mips_gcc.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/atomicops_internals_mips_gcc.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,174 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ -#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ - -namespace v8 { -namespace internal { - -// Atomically execute: -// result = *ptr; -// if (*ptr == old_value) -// *ptr = new_value; -// return result; -// -// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". -// Always return the old value of "*ptr" -// -// This routine implies no memory barriers. -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev, tmp; - __asm__ __volatile__(".set push\n" - ".set noreorder\n" - "1:\n" - "ll %0, %5\n" // prev = *ptr - "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 - "move %2, %4\n" // tmp = new_value - "sc %2, %1\n" // *ptr = tmp (with atomic check) - "beqz %2, 1b\n" // start again on atomic error - "nop\n" // delay slot nop - "2:\n" - ".set pop\n" - : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) - : "Ir" (old_value), "r" (new_value), "m" (*ptr) - : "memory"); - return prev; -} - -// Atomically store new_value into *ptr, returning the previous value held in -// *ptr. This routine implies no memory barriers. -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - Atomic32 temp, old; - __asm__ __volatile__(".set push\n" - ".set noreorder\n" - "1:\n" - "ll %1, %2\n" // old = *ptr - "move %0, %3\n" // temp = new_value - "sc %0, %2\n" // *ptr = temp (with atomic check) - "beqz %0, 1b\n" // start again on atomic error - "nop\n" // delay slot nop - ".set pop\n" - : "=&r" (temp), "=&r" (old), "=m" (*ptr) - : "r" (new_value), "m" (*ptr) - : "memory"); - - return old; -} - -// Atomically increment *ptr by "increment". Returns the new value of -// *ptr with the increment applied. This routine implies no memory barriers. -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - Atomic32 temp, temp2; - - __asm__ __volatile__(".set push\n" - ".set noreorder\n" - "1:\n" - "ll %0, %2\n" // temp = *ptr - "addu %1, %0, %3\n" // temp2 = temp + increment - "sc %1, %2\n" // *ptr = temp2 (with atomic check) - "beqz %1, 1b\n" // start again on atomic error - "addu %1, %0, %3\n" // temp2 = temp + increment - ".set pop\n" - : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) - : "Ir" (increment), "m" (*ptr) - : "memory"); - // temp2 now holds the final value. - return temp2; -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - MemoryBarrier(); - Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); - MemoryBarrier(); - return res; -} - -// "Acquire" operations -// ensure that no later memory access can be reordered ahead of the operation. -// "Release" operations ensure that no previous memory access can be reordered -// after the operation. "Barrier" operations have both "Acquire" and "Release" -// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory -// access. -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); - MemoryBarrier(); - return res; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - MemoryBarrier(); - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; -} - -inline void MemoryBarrier() { - __asm__ __volatile__("sync" : : : "memory"); -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - MemoryBarrier(); - *ptr = value; -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return *ptr; -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - Atomic32 value = *ptr; - MemoryBarrier(); - return value; -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - -} } // namespace v8::internal - -#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/atomicops_internals_tsan.h nodejs-0.11.15/deps/v8/src/atomicops_internals_tsan.h --- nodejs-0.11.13/deps/v8/src/atomicops_internals_tsan.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/atomicops_internals_tsan.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,394 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -// This file is an internal atomic implementation for compiler-based -// ThreadSanitizer. Use base/atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_ -#define V8_ATOMICOPS_INTERNALS_TSAN_H_ - -namespace v8 { -namespace internal { - -#ifndef TSAN_INTERFACE_ATOMIC_H -#define TSAN_INTERFACE_ATOMIC_H - -// This struct is not part of the public API of this module; clients may not -// use it. (However, it's exported via BASE_EXPORT because clients implicitly -// do use it at link time by inlining these functions.) -// Features of this x86. Values may not be correct before main() is run, -// but are set conservatively. -struct AtomicOps_x86CPUFeatureStruct { - bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence - // after acquire compare-and-swap. - bool has_sse2; // Processor has SSE2. -}; -extern struct AtomicOps_x86CPUFeatureStruct - AtomicOps_Internalx86CPUFeatures; - -#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") - -extern "C" { -typedef char __tsan_atomic8; -typedef short __tsan_atomic16; // NOLINT -typedef int __tsan_atomic32; -typedef long __tsan_atomic64; // NOLINT - -#if defined(__SIZEOF_INT128__) \ - || (__clang_major__ * 100 + __clang_minor__ >= 302) -typedef __int128 __tsan_atomic128; -#define __TSAN_HAS_INT128 1 -#else -typedef char __tsan_atomic128; -#define __TSAN_HAS_INT128 0 -#endif - -typedef enum { - __tsan_memory_order_relaxed, - __tsan_memory_order_consume, - __tsan_memory_order_acquire, - __tsan_memory_order_release, - __tsan_memory_order_acq_rel, - __tsan_memory_order_seq_cst, -} __tsan_memory_order; - -__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a, - __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a, - __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a, - __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a, - __tsan_memory_order mo); -__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a, - __tsan_memory_order mo); - -void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v, - __tsan_memory_order mo); -void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v, - __tsan_memory_order mo); -void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v, - __tsan_memory_order mo); -void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v, - __tsan_memory_order mo); -void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v, - __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); -__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a, - __tsan_atomic128 v, __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); -__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a, - __tsan_atomic128 v, __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); -__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a, - __tsan_atomic128 v, __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); -__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a, - __tsan_atomic128 v, __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); -__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a, - __tsan_atomic128 v, __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); -__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a, - __tsan_atomic128 v, __tsan_memory_order mo); - -int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a, - __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo, - __tsan_memory_order fail_mo); -int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a, - __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo, - __tsan_memory_order fail_mo); -int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a, - __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo, - __tsan_memory_order fail_mo); -int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a, - __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo, - __tsan_memory_order fail_mo); -int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a, - __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo, - __tsan_memory_order fail_mo); - -int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a, - __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo, - __tsan_memory_order fail_mo); -int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a, - __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo, - __tsan_memory_order fail_mo); -int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a, - __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo, - __tsan_memory_order fail_mo); -int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a, - __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo, - __tsan_memory_order fail_mo); -int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a, - __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo, - __tsan_memory_order fail_mo); - -__tsan_atomic8 __tsan_atomic8_compare_exchange_val( - volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); -__tsan_atomic16 __tsan_atomic16_compare_exchange_val( - volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); -__tsan_atomic32 __tsan_atomic32_compare_exchange_val( - volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); -__tsan_atomic64 __tsan_atomic64_compare_exchange_val( - volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); -__tsan_atomic128 __tsan_atomic128_compare_exchange_val( - volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); - -void __tsan_atomic_thread_fence(__tsan_memory_order mo); -void __tsan_atomic_signal_fence(__tsan_memory_order mo); -} // extern "C" - -#endif // #ifndef TSAN_INTERFACE_ATOMIC_H - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 cmp = old_value; - __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_relaxed, __tsan_memory_order_relaxed); - return cmp; -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - return __tsan_atomic32_exchange(ptr, new_value, - __tsan_memory_order_relaxed); -} - -inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - return __tsan_atomic32_exchange(ptr, new_value, - __tsan_memory_order_acquire); -} - -inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - return __tsan_atomic32_exchange(ptr, new_value, - __tsan_memory_order_release); -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return increment + __tsan_atomic32_fetch_add(ptr, increment, - __tsan_memory_order_relaxed); -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return increment + __tsan_atomic32_fetch_add(ptr, increment, - __tsan_memory_order_acq_rel); -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 cmp = old_value; - __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_acquire, __tsan_memory_order_acquire); - return cmp; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 cmp = old_value; - __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_release, __tsan_memory_order_relaxed); - return cmp; -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); - __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); - return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); -} - -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 cmp = old_value; - __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_relaxed, __tsan_memory_order_relaxed); - return cmp; -} - -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed); -} - -inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire); -} - -inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release); -} - -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return increment + __tsan_atomic64_fetch_add(ptr, increment, - __tsan_memory_order_relaxed); -} - -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return increment + __tsan_atomic64_fetch_add(ptr, increment, - __tsan_memory_order_acq_rel); -} - -inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { - __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); -} - -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); - __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); -} - -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - __tsan_atomic64_store(ptr, value, __tsan_memory_order_release); -} - -inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { - return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire); -} - -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); - return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 cmp = old_value; - __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_acquire, __tsan_memory_order_acquire); - return cmp; -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 cmp = old_value; - __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_release, __tsan_memory_order_relaxed); - return cmp; -} - -inline void MemoryBarrier() { - __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); -} - -} // namespace internal -} // namespace v8 - -#undef ATOMICOPS_COMPILER_BARRIER - -#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_ diff -Nru nodejs-0.11.13/deps/v8/src/atomicops_internals_x86_gcc.cc nodejs-0.11.15/deps/v8/src/atomicops_internals_x86_gcc.cc --- nodejs-0.11.13/deps/v8/src/atomicops_internals_x86_gcc.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/atomicops_internals_x86_gcc.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,135 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This module gets enough CPU information to optimize the -// atomicops module on x86. - -#include <string.h> - -#include "atomicops.h" -#include "platform.h" - -// This file only makes sense with atomicops_internals_x86_gcc.h -- it -// depends on structs that are defined in that file. If atomicops.h -// doesn't sub-include that file, then we aren't needed, and shouldn't -// try to do anything. -#ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ - -// Inline cpuid instruction. In PIC compilations, %ebx contains the address -// of the global offset table. To avoid breaking such executables, this code -// must preserve that register's value across cpuid instructions. -#if defined(__i386__) -#define cpuid(a, b, c, d, inp) \ - asm("mov %%ebx, %%edi\n" \ - "cpuid\n" \ - "xchg %%edi, %%ebx\n" \ - : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) -#elif defined(__x86_64__) -#define cpuid(a, b, c, d, inp) \ - asm("mov %%rbx, %%rdi\n" \ - "cpuid\n" \ - "xchg %%rdi, %%rbx\n" \ - : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) -#endif - -#if defined(cpuid) // initialize the struct only on x86 - -namespace v8 { -namespace internal { - -// Set the flags so that code will run correctly and conservatively, so even -// if we haven't been initialized yet, we're probably single threaded, and our -// default values should hopefully be pretty safe. -struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { - false, // bug can't exist before process spawns multiple threads - false, // no SSE2 -}; - -} } // namespace v8::internal - -namespace { - -// Initialize the AtomicOps_Internalx86CPUFeatures struct. -void AtomicOps_Internalx86CPUFeaturesInit() { - using v8::internal::AtomicOps_Internalx86CPUFeatures; - - uint32_t eax; - uint32_t ebx; - uint32_t ecx; - uint32_t edx; - - // Get vendor string (issue CPUID with eax = 0) - cpuid(eax, ebx, ecx, edx, 0); - char vendor[13]; - v8::internal::OS::MemCopy(vendor, &ebx, 4); - v8::internal::OS::MemCopy(vendor + 4, &edx, 4); - v8::internal::OS::MemCopy(vendor + 8, &ecx, 4); - vendor[12] = 0; - - // get feature flags in ecx/edx, and family/model in eax - cpuid(eax, ebx, ecx, edx, 1); - - int family = (eax >> 8) & 0xf; // family and model fields - int model = (eax >> 4) & 0xf; - if (family == 0xf) { // use extended family and model fields - family += (eax >> 20) & 0xff; - model += ((eax >> 16) & 0xf) << 4; - } - - // Opteron Rev E has a bug in which on very rare occasions a locked - // instruction doesn't act as a read-acquire barrier if followed by a - // non-locked read-modify-write instruction. Rev F has this bug in - // pre-release versions, but not in versions released to customers, - // so we test only for Rev E, which is family 15, model 32..63 inclusive. - if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD - family == 15 && - 32 <= model && model <= 63) { - AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true; - } else { - AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false; - } - - // edx bit 26 is SSE2 which we use to tell use whether we can use mfence - AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); -} - -class AtomicOpsx86Initializer { - public: - AtomicOpsx86Initializer() { - AtomicOps_Internalx86CPUFeaturesInit(); - } -}; - - -// A global to get use initialized on startup via static initialization :/ -AtomicOpsx86Initializer g_initer; - -} // namespace - -#endif // if x86 - -#endif // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/atomicops_internals_x86_gcc.h nodejs-0.11.15/deps/v8/src/atomicops_internals_x86_gcc.h --- nodejs-0.11.13/deps/v8/src/atomicops_internals_x86_gcc.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/atomicops_internals_x86_gcc.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,287 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ -#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_ - -namespace v8 { -namespace internal { - -// This struct is not part of the public API of this module; clients may not -// use it. -// Features of this x86. Values may not be correct before main() is run, -// but are set conservatively. -struct AtomicOps_x86CPUFeatureStruct { - bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence - // after acquire compare-and-swap. - bool has_sse2; // Processor has SSE2. -}; -extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; - -#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") - -// 32-bit low-level operations on any platform. - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 prev; - __asm__ __volatile__("lock; cmpxchgl %1,%2" - : "=a" (prev) - : "q" (new_value), "m" (*ptr), "0" (old_value) - : "memory"); - return prev; -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg. - : "=r" (new_value) - : "m" (*ptr), "0" (new_value) - : "memory"); - return new_value; // Now it's the previous value. -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - Atomic32 temp = increment; - __asm__ __volatile__("lock; xaddl %0,%1" - : "+r" (temp), "+m" (*ptr) - : : "memory"); - // temp now holds the old value of *ptr - return temp + increment; -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - Atomic32 temp = increment; - __asm__ __volatile__("lock; xaddl %0,%1" - : "+r" (temp), "+m" (*ptr) - : : "memory"); - // temp now holds the old value of *ptr - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { - __asm__ __volatile__("lfence" : : : "memory"); - } - return temp + increment; -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { - __asm__ __volatile__("lfence" : : : "memory"); - } - return x; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; -} - -#if defined(__x86_64__) - -// 64-bit implementations of memory barrier can be simpler, because it -// "mfence" is guaranteed to exist. -inline void MemoryBarrier() { - __asm__ __volatile__("mfence" : : : "memory"); -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - -#else - -inline void MemoryBarrier() { - if (AtomicOps_Internalx86CPUFeatures.has_sse2) { - __asm__ __volatile__("mfence" : : : "memory"); - } else { // mfence is faster but not present on PIII - Atomic32 x = 0; - NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII - } -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - if (AtomicOps_Internalx86CPUFeatures.has_sse2) { - *ptr = value; - __asm__ __volatile__("mfence" : : : "memory"); - } else { - NoBarrier_AtomicExchange(ptr, value); - // acts as a barrier on PIII - } -} -#endif - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - ATOMICOPS_COMPILER_BARRIER(); - *ptr = value; // An x86 store acts as a release barrier. - // See comments in Atomic64 version of Release_Store(), below. -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return *ptr; -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. - // See comments in Atomic64 version of Release_Store(), below. - ATOMICOPS_COMPILER_BARRIER(); - return value; -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - -#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT) - -// 64-bit low-level operations on 64-bit platform. - -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 prev; - __asm__ __volatile__("lock; cmpxchgq %1,%2" - : "=a" (prev) - : "q" (new_value), "m" (*ptr), "0" (old_value) - : "memory"); - return prev; -} - -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg. - : "=r" (new_value) - : "m" (*ptr), "0" (new_value) - : "memory"); - return new_value; // Now it's the previous value. -} - -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - Atomic64 temp = increment; - __asm__ __volatile__("lock; xaddq %0,%1" - : "+r" (temp), "+m" (*ptr) - : : "memory"); - // temp now contains the previous value of *ptr - return temp + increment; -} - -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - Atomic64 temp = increment; - __asm__ __volatile__("lock; xaddq %0,%1" - : "+r" (temp), "+m" (*ptr) - : : "memory"); - // temp now contains the previous value of *ptr - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { - __asm__ __volatile__("lfence" : : : "memory"); - } - return temp + increment; -} - -inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; - MemoryBarrier(); -} - -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - ATOMICOPS_COMPILER_BARRIER(); - - *ptr = value; // An x86 store acts as a release barrier - // for current AMD/Intel chips as of Jan 2008. - // See also Acquire_Load(), below. - - // When new chips come out, check: - // IA-32 Intel Architecture Software Developer's Manual, Volume 3: - // System Programming Guide, Chatper 7: Multiple-processor management, - // Section 7.2, Memory Ordering. - // Last seen at: - // http://developer.intel.com/design/pentium4/manuals/index_new.htm - // - // x86 stores/loads fail to act as barriers for a few instructions (clflush - // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are - // not generated by the compiler, and are rare. Users of these instructions - // need to know about cache behaviour in any case since all of these involve - // either flushing cache lines or non-temporal cache hints. -} - -inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { - return *ptr; -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, - // for current AMD/Intel chips as of Jan 2008. - // See also Release_Store(), above. - ATOMICOPS_COMPILER_BARRIER(); - return value; -} - -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return *ptr; -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { - __asm__ __volatile__("lfence" : : : "memory"); - } - return x; -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -#endif // defined(__x86_64__) - -} } // namespace v8::internal - -#undef ATOMICOPS_COMPILER_BARRIER - -#endif // V8_ATOMICOPS_INTERNALS_X86_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/atomicops_internals_x86_msvc.h nodejs-0.11.15/deps/v8/src/atomicops_internals_x86_msvc.h --- nodejs-0.11.13/deps/v8/src/atomicops_internals_x86_msvc.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/atomicops_internals_x86_msvc.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,217 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is an internal atomic implementation, use atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_ -#define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_ - -#include "checks.h" -#include "win32-headers.h" - -#if defined(V8_HOST_ARCH_64_BIT) -// windows.h #defines this (only on x64). This causes problems because the -// public API also uses MemoryBarrier at the public name for this fence. So, on -// X64, undef it, and call its documented -// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) -// implementation directly. -#undef MemoryBarrier -#endif - -namespace v8 { -namespace internal { - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - LONG result = InterlockedCompareExchange( - reinterpret_cast<volatile LONG*>(ptr), - static_cast<LONG>(new_value), - static_cast<LONG>(old_value)); - return static_cast<Atomic32>(result); -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - LONG result = InterlockedExchange( - reinterpret_cast<volatile LONG*>(ptr), - static_cast<LONG>(new_value)); - return static_cast<Atomic32>(result); -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return InterlockedExchangeAdd( - reinterpret_cast<volatile LONG*>(ptr), - static_cast<LONG>(increment)) + increment; -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return Barrier_AtomicIncrement(ptr, increment); -} - -#if !(defined(_MSC_VER) && _MSC_VER >= 1400) -#error "We require at least vs2005 for MemoryBarrier" -#endif -inline void MemoryBarrier() { -#if defined(V8_HOST_ARCH_64_BIT) - // See #undef and note at the top of this file. - __faststorefence(); -#else - // We use MemoryBarrier from WinNT.h - ::MemoryBarrier(); -#endif -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - NoBarrier_AtomicExchange(ptr, value); - // acts as a barrier in this implementation -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; // works w/o barrier for current Intel chips as of June 2005 - // See comments in Atomic64 version of Release_Store() below. -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return *ptr; -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - Atomic32 value = *ptr; - return value; -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - -#if defined(_WIN64) - -// 64-bit low-level operations on 64-bit platform. - -STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID)); - -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - PVOID result = InterlockedCompareExchangePointer( - reinterpret_cast<volatile PVOID*>(ptr), - reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); - return reinterpret_cast<Atomic64>(result); -} - -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - PVOID result = InterlockedExchangePointer( - reinterpret_cast<volatile PVOID*>(ptr), - reinterpret_cast<PVOID>(new_value)); - return reinterpret_cast<Atomic64>(result); -} - -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return InterlockedExchangeAdd64( - reinterpret_cast<volatile LONGLONG*>(ptr), - static_cast<LONGLONG>(increment)) + increment; -} - -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return Barrier_AtomicIncrement(ptr, increment); -} - -inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; -} - -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - NoBarrier_AtomicExchange(ptr, value); - // acts as a barrier in this implementation -} - -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; // works w/o barrier for current Intel chips as of June 2005 - - // When new chips come out, check: - // IA-32 Intel Architecture Software Developer's Manual, Volume 3: - // System Programming Guide, Chatper 7: Multiple-processor management, - // Section 7.2, Memory Ordering. - // Last seen at: - // http://developer.intel.com/design/pentium4/manuals/index_new.htm -} - -inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { - return *ptr; -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - Atomic64 value = *ptr; - return value; -} - -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return *ptr; -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -} - - -#endif // defined(_WIN64) - -} } // namespace v8::internal - -#endif // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops.h nodejs-0.11.15/deps/v8/src/base/atomicops.h --- nodejs-0.11.13/deps/v8/src/base/atomicops.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,163 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// The routines exported by this module are subtle. If you use them, even if +// you get the code right, it will depend on careful reasoning about atomicity +// and memory ordering; it will be less readable, and harder to maintain. If +// you plan to use these routines, you should have a good reason, such as solid +// evidence that performance would otherwise suffer, or there being no +// alternative. You should assume only properties explicitly guaranteed by the +// specifications in this file. You are almost certainly _not_ writing code +// just for the x86; if you assume x86 semantics, x86 hardware bugs and +// implementations on other archtectures will cause your code to break. If you +// do not know what you are doing, avoid these routines, and use a Mutex. +// +// It is incorrect to make direct assignments to/from an atomic variable. +// You should use one of the Load or Store routines. The NoBarrier +// versions are provided when no barriers are needed: +// NoBarrier_Store() +// NoBarrier_Load() +// Although there are currently no compiler enforcement, you are encouraged +// to use these. +// + +#ifndef V8_BASE_ATOMICOPS_H_ +#define V8_BASE_ATOMICOPS_H_ + +#include "include/v8stdint.h" +#include "src/base/build_config.h" + +#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT) +// windows.h #defines this (only on x64). This causes problems because the +// public API also uses MemoryBarrier at the public name for this fence. So, on +// X64, undef it, and call its documented +// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) +// implementation directly. +#undef MemoryBarrier +#endif + +namespace v8 { +namespace base { + +typedef char Atomic8; +typedef int32_t Atomic32; +#ifdef V8_HOST_ARCH_64_BIT +// We need to be able to go between Atomic64 and AtomicWord implicitly. This +// means Atomic64 and AtomicWord should be the same type on 64-bit. +#if defined(__ILP32__) +typedef int64_t Atomic64; +#else +typedef intptr_t Atomic64; +#endif +#endif + +// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or +// Atomic64 routines below, depending on your architecture. +typedef intptr_t AtomicWord; + +// Atomically execute: +// result = *ptr; +// if (*ptr == old_value) +// *ptr = new_value; +// return result; +// +// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". +// Always return the old value of "*ptr" +// +// This routine implies no memory barriers. +Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value); + +// Atomically store new_value into *ptr, returning the previous value held in +// *ptr. This routine implies no memory barriers. +Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); + +// Atomically increment *ptr by "increment". Returns the new value of +// *ptr with the increment applied. This routine implies no memory barriers. +Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); + +Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment); + +// These following lower-level operations are typically useful only to people +// implementing higher-level synchronization operations like spinlocks, +// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or +// a store with appropriate memory-ordering instructions. "Acquire" operations +// ensure that no later memory access can be reordered ahead of the operation. +// "Release" operations ensure that no previous memory access can be reordered +// after the operation. "Barrier" operations have both "Acquire" and "Release" +// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory +// access. +Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value); +Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value); + +void MemoryBarrier(); +void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value); +void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); +void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); +void Release_Store(volatile Atomic32* ptr, Atomic32 value); + +Atomic8 NoBarrier_Load(volatile const Atomic8* ptr); +Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); +Atomic32 Acquire_Load(volatile const Atomic32* ptr); +Atomic32 Release_Load(volatile const Atomic32* ptr); + +// 64-bit atomic operations (only available on 64-bit processors). +#ifdef V8_HOST_ARCH_64_BIT +Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value); +Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); +Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); +Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); + +Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value); +Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value); +void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); +void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); +void Release_Store(volatile Atomic64* ptr, Atomic64 value); +Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); +Atomic64 Acquire_Load(volatile const Atomic64* ptr); +Atomic64 Release_Load(volatile const Atomic64* ptr); +#endif // V8_HOST_ARCH_64_BIT + +} } // namespace v8::base + +// Include our platform specific implementation. +#if defined(THREAD_SANITIZER) +#include "src/base/atomicops_internals_tsan.h" +#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) +#include "src/base/atomicops_internals_x86_msvc.h" +#elif defined(__APPLE__) +#include "src/base/atomicops_internals_mac.h" +#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64 +#include "src/base/atomicops_internals_arm64_gcc.h" +#elif defined(__GNUC__) && V8_HOST_ARCH_ARM +#include "src/base/atomicops_internals_arm_gcc.h" +#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) +#include "src/base/atomicops_internals_x86_gcc.h" +#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS +#include "src/base/atomicops_internals_mips_gcc.h" +#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64 +#include "src/base/atomicops_internals_mips64_gcc.h" +#else +#error "Atomic operations are not supported on your platform" +#endif + +// On some platforms we need additional declarations to make +// AtomicWord compatible with our other Atomic* types. +#if defined(__APPLE__) || defined(__OpenBSD__) +#include "src/base/atomicops_internals_atomicword_compat.h" +#endif + +#endif // V8_BASE_ATOMICOPS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops_internals_arm64_gcc.h nodejs-0.11.15/deps/v8/src/base/atomicops_internals_arm64_gcc.h --- nodejs-0.11.13/deps/v8/src/base/atomicops_internals_arm64_gcc.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops_internals_arm64_gcc.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,316 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use atomicops.h instead. + +#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ +#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ + +namespace v8 { +namespace base { + +inline void MemoryBarrier() { + __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT +} + +// NoBarrier versions of the operation include "memory" in the clobber list. +// This is not required for direct usage of the NoBarrier versions of the +// operations. However this is required for correctness when they are used as +// part of the Acquire or Release versions, to ensure that nothing from outside +// the call is reordered between the operation and the memory barrier. This does +// not change the code generated, so has no or minimal impact on the +// NoBarrier operations. + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[prev], %[ptr] \n\t" // Load the previous value. + "cmp %w[prev], %w[old_value] \n\t" + "bne 1f \n\t" + "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. + "cbnz %w[temp], 0b \n\t" // Retry if it did not work. + "1: \n\t" + : [prev]"=&r" (prev), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [old_value]"IJr" (old_value), + [new_value]"r" (new_value) + : "cc", "memory" + ); // NOLINT + + return prev; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[result], %[ptr] \n\t" // Load the previous value. + "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. + "cbnz %w[temp], 0b \n\t" // Retry if it did not work. + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [new_value]"r" (new_value) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[result], %[ptr] \n\t" // Load the previous value. + "add %w[result], %w[result], %w[increment]\n\t" + "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result. + "cbnz %w[temp], 0b \n\t" // Retry on failure. + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [increment]"IJr" (increment) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 result; + + MemoryBarrier(); + result = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + + return result; +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + + prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + MemoryBarrier(); + + return prev; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + + MemoryBarrier(); + prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + + return prev; +} + +inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { + *ptr = value; +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + __asm__ __volatile__ ( // NOLINT + "stlr %w[value], %[ptr] \n\t" + : [ptr]"=Q" (*ptr) + : [value]"r" (value) + : "memory" + ); // NOLINT +} + +inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { + return *ptr; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value; + + __asm__ __volatile__ ( // NOLINT + "ldar %w[value], %[ptr] \n\t" + : [value]"=r" (value) + : [ptr]"Q" (*ptr) + : "memory" + ); // NOLINT + + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +// 64-bit versions of the operations. +// See the 32-bit versions for comments. + +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[prev], %[ptr] \n\t" + "cmp %[prev], %[old_value] \n\t" + "bne 1f \n\t" + "stxr %w[temp], %[new_value], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + "1: \n\t" + : [prev]"=&r" (prev), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [old_value]"IJr" (old_value), + [new_value]"r" (new_value) + : "cc", "memory" + ); // NOLINT + + return prev; +} + +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + Atomic64 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[result], %[ptr] \n\t" + "stxr %w[temp], %[new_value], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [new_value]"r" (new_value) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + Atomic64 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[result], %[ptr] \n\t" + "add %[result], %[result], %[increment] \n\t" + "stxr %w[temp], %[result], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [increment]"IJr" (increment) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + Atomic64 result; + + MemoryBarrier(); + result = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + + return result; +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev; + + prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + MemoryBarrier(); + + return prev; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev; + + MemoryBarrier(); + prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + + return prev; +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + __asm__ __volatile__ ( // NOLINT + "stlr %x[value], %[ptr] \n\t" + : [ptr]"=Q" (*ptr) + : [value]"r" (value) + : "memory" + ); // NOLINT +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return *ptr; +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + Atomic64 value; + + __asm__ __volatile__ ( // NOLINT + "ldar %x[value], %[ptr] \n\t" + : [value]"=r" (value) + : [ptr]"Q" (*ptr) + : "memory" + ); // NOLINT + + return value; +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + MemoryBarrier(); + return *ptr; +} + +} } // namespace v8::base + +#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops_internals_arm_gcc.h nodejs-0.11.15/deps/v8/src/base/atomicops_internals_arm_gcc.h --- nodejs-0.11.13/deps/v8/src/base/atomicops_internals_arm_gcc.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops_internals_arm_gcc.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,301 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use atomicops.h instead. +// +// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. + +#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ +#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ + +#if defined(__QNXNTO__) +#include <sys/cpuinline.h> +#endif + +namespace v8 { +namespace base { + +// Memory barriers on ARM are funky, but the kernel is here to help: +// +// * ARMv5 didn't support SMP, there is no memory barrier instruction at +// all on this architecture, or when targeting its machine code. +// +// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by +// writing a random value to a very specific coprocessor register. +// +// * On ARMv7, the "dmb" instruction is used to perform a full memory +// barrier (though writing to the co-processor will still work). +// However, on single core devices (e.g. Nexus One, or Nexus S), +// this instruction will take up to 200 ns, which is huge, even though +// it's completely un-needed on these devices. +// +// * There is no easy way to determine at runtime if the device is +// single or multi-core. However, the kernel provides a useful helper +// function at a fixed memory address (0xffff0fa0), which will always +// perform a memory barrier in the most efficient way. I.e. on single +// core devices, this is an empty function that exits immediately. +// On multi-core devices, it implements a full memory barrier. +// +// * This source could be compiled to ARMv5 machine code that runs on a +// multi-core ARMv6 or ARMv7 device. In this case, memory barriers +// are needed for correct execution. Always call the kernel helper, even +// when targeting ARMv5TE. +// + +inline void MemoryBarrier() { +#if defined(__linux__) || defined(__ANDROID__) + // Note: This is a function call, which is also an implicit compiler barrier. + typedef void (*KernelMemoryBarrierFunc)(); + ((KernelMemoryBarrierFunc)0xffff0fa0)(); +#elif defined(__QNXNTO__) + __cpu_membarrier(); +#else +#error MemoryBarrier() is not implemented on this platform. +#endif +} + +// An ARM toolchain would only define one of these depending on which +// variant of the target architecture is being used. This tests against +// any known ARMv6 or ARMv7 variant, where it is possible to directly +// use ldrex/strex instructions to implement fast atomic operations. +#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \ + defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \ + defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ + defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ + defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_6T2__) + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev_value; + int reloop; + do { + // The following is equivalent to: + // + // prev_value = LDREX(ptr) + // reloop = 0 + // if (prev_value != old_value) + // reloop = STREX(ptr, new_value) + __asm__ __volatile__(" ldrex %0, [%3]\n" + " mov %1, #0\n" + " cmp %0, %4\n" +#ifdef __thumb2__ + " it eq\n" +#endif + " strexeq %1, %5, [%3]\n" + : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr) + : "r"(ptr), "r"(old_value), "r"(new_value) + : "cc", "memory"); + } while (reloop != 0); + return prev_value; +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + MemoryBarrier(); + return result; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + MemoryBarrier(); + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 value; + int reloop; + do { + // Equivalent to: + // + // value = LDREX(ptr) + // value += increment + // reloop = STREX(ptr, value) + // + __asm__ __volatile__(" ldrex %0, [%3]\n" + " add %0, %0, %4\n" + " strex %1, %0, [%3]\n" + : "=&r"(value), "=&r"(reloop), "+m"(*ptr) + : "r"(ptr), "r"(increment) + : "cc", "memory"); + } while (reloop); + return value; +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + // TODO(digit): Investigate if it's possible to implement this with + // a single MemoryBarrier() operation between the LDREX and STREX. + // See http://crbug.com/246514 + MemoryBarrier(); + Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + return result; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 old_value; + int reloop; + do { + // old_value = LDREX(ptr) + // reloop = STREX(ptr, new_value) + __asm__ __volatile__(" ldrex %0, [%3]\n" + " strex %1, %4, [%3]\n" + : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr) + : "r"(ptr), "r"(new_value) + : "cc", "memory"); + } while (reloop != 0); + return old_value; +} + +// This tests against any known ARMv5 variant. +#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \ + defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__) + +// The kernel also provides a helper function to perform an atomic +// compare-and-swap operation at the hard-wired address 0xffff0fc0. +// On ARMv5, this is implemented by a special code path that the kernel +// detects and treats specially when thread pre-emption happens. +// On ARMv6 and higher, it uses LDREX/STREX instructions instead. +// +// Note that this always perform a full memory barrier, there is no +// need to add calls MemoryBarrier() before or after it. It also +// returns 0 on success, and 1 on exit. +// +// Available and reliable since Linux 2.6.24. Both Android and ChromeOS +// use newer kernel revisions, so this should not be a concern. +namespace { + +inline int LinuxKernelCmpxchg(Atomic32 old_value, + Atomic32 new_value, + volatile Atomic32* ptr) { + typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*); + return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr); +} + +} // namespace + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev_value; + for (;;) { + prev_value = *ptr; + if (prev_value != old_value) + return prev_value; + if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) + return old_value; + } +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 old_value; + do { + old_value = *ptr; + } while (LinuxKernelCmpxchg(old_value, new_value, ptr)); + return old_value; +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return Barrier_AtomicIncrement(ptr, increment); +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + for (;;) { + // Atomic exchange the old value with an incremented one. + Atomic32 old_value = *ptr; + Atomic32 new_value = old_value + increment; + if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) { + // The exchange took place as expected. + return new_value; + } + // Otherwise, *ptr changed mid-loop and we need to retry. + } +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev_value; + for (;;) { + prev_value = *ptr; + if (prev_value != old_value) { + // Always ensure acquire semantics. + MemoryBarrier(); + return prev_value; + } + if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) + return old_value; + } +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + // This could be implemented as: + // MemoryBarrier(); + // return NoBarrier_CompareAndSwap(); + // + // But would use 3 barriers per succesful CAS. To save performance, + // use Acquire_CompareAndSwap(). Its implementation guarantees that: + // - A succesful swap uses only 2 barriers (in the kernel helper). + // - An early return due to (prev_value != old_value) performs + // a memory barrier with no store, which is equivalent to the + // generic implementation above. + return Acquire_CompareAndSwap(ptr, old_value, new_value); +} + +#else +# error "Your CPU's ARM architecture is not supported yet" +#endif + +// NOTE: Atomicity of the following load and store operations is only +// guaranteed in case of 32-bit alignement of |ptr| values. + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +// Byte accessors. + +inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { + *ptr = value; +} + +inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; } + +} } // namespace v8::base + +#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops_internals_atomicword_compat.h nodejs-0.11.15/deps/v8/src/base/atomicops_internals_atomicword_compat.h --- nodejs-0.11.13/deps/v8/src/base/atomicops_internals_atomicword_compat.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops_internals_atomicword_compat.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,99 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use atomicops.h instead. + +#ifndef V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_ +#define V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_ + +// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32, +// which in turn means int. On some LP32 platforms, intptr_t is an int, but +// on others, it's a long. When AtomicWord and Atomic32 are based on different +// fundamental types, their pointers are incompatible. +// +// This file defines function overloads to allow both AtomicWord and Atomic32 +// data to be used with this interface. +// +// On LP64 platforms, AtomicWord and Atomic64 are both always long, +// so this problem doesn't occur. + +#if !defined(V8_HOST_ARCH_64_BIT) + +namespace v8 { +namespace base { + +inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, + AtomicWord old_value, + AtomicWord new_value) { + return NoBarrier_CompareAndSwap( + reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value); +} + +inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, + AtomicWord new_value) { + return NoBarrier_AtomicExchange( + reinterpret_cast<volatile Atomic32*>(ptr), new_value); +} + +inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, + AtomicWord increment) { + return NoBarrier_AtomicIncrement( + reinterpret_cast<volatile Atomic32*>(ptr), increment); +} + +inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, + AtomicWord increment) { + return Barrier_AtomicIncrement( + reinterpret_cast<volatile Atomic32*>(ptr), increment); +} + +inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, + AtomicWord old_value, + AtomicWord new_value) { + return v8::base::Acquire_CompareAndSwap( + reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value); +} + +inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, + AtomicWord old_value, + AtomicWord new_value) { + return v8::base::Release_CompareAndSwap( + reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value); +} + +inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { + NoBarrier_Store( + reinterpret_cast<volatile Atomic32*>(ptr), value); +} + +inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { + return v8::base::Acquire_Store( + reinterpret_cast<volatile Atomic32*>(ptr), value); +} + +inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { + return v8::base::Release_Store( + reinterpret_cast<volatile Atomic32*>(ptr), value); +} + +inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { + return NoBarrier_Load( + reinterpret_cast<volatile const Atomic32*>(ptr)); +} + +inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { + return v8::base::Acquire_Load( + reinterpret_cast<volatile const Atomic32*>(ptr)); +} + +inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { + return v8::base::Release_Load( + reinterpret_cast<volatile const Atomic32*>(ptr)); +} + +} } // namespace v8::base + +#endif // !defined(V8_HOST_ARCH_64_BIT) + +#endif // V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops_internals_mac.h nodejs-0.11.15/deps/v8/src/base/atomicops_internals_mac.h --- nodejs-0.11.13/deps/v8/src/base/atomicops_internals_mac.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops_internals_mac.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,204 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use atomicops.h instead. + +#ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ +#define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ + +#include <libkern/OSAtomic.h> + +namespace v8 { +namespace base { + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev_value; + do { + if (OSAtomicCompareAndSwap32(old_value, new_value, + const_cast<Atomic32*>(ptr))) { + return old_value; + } + prev_value = *ptr; + } while (prev_value == old_value); + return prev_value; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 old_value; + do { + old_value = *ptr; + } while (!OSAtomicCompareAndSwap32(old_value, new_value, + const_cast<Atomic32*>(ptr))); + return old_value; +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); +} + +inline void MemoryBarrier() { + OSMemoryBarrier(); +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev_value; + do { + if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, + const_cast<Atomic32*>(ptr))) { + return old_value; + } + prev_value = *ptr; + } while (prev_value == old_value); + return prev_value; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return Acquire_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { + *ptr = value; +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { + return *ptr; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +#ifdef __LP64__ + +// 64-bit implementation on 64-bit platform + +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev_value; + do { + if (OSAtomicCompareAndSwap64(old_value, new_value, + reinterpret_cast<volatile int64_t*>(ptr))) { + return old_value; + } + prev_value = *ptr; + } while (prev_value == old_value); + return prev_value; +} + +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + Atomic64 old_value; + do { + old_value = *ptr; + } while (!OSAtomicCompareAndSwap64(old_value, new_value, + reinterpret_cast<volatile int64_t*>(ptr))); + return old_value; +} + +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr)); +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return OSAtomicAdd64Barrier(increment, + reinterpret_cast<volatile int64_t*>(ptr)); +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev_value; + do { + if (OSAtomicCompareAndSwap64Barrier( + old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) { + return old_value; + } + prev_value = *ptr; + } while (prev_value == old_value); + return prev_value; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + // The lib kern interface does not distinguish between + // Acquire and Release memory barriers; they are equivalent. + return Acquire_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return *ptr; +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + Atomic64 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + MemoryBarrier(); + return *ptr; +} + +#endif // defined(__LP64__) + +} } // namespace v8::base + +#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops_internals_mips64_gcc.h nodejs-0.11.15/deps/v8/src/base/atomicops_internals_mips64_gcc.h --- nodejs-0.11.13/deps/v8/src/base/atomicops_internals_mips64_gcc.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops_internals_mips64_gcc.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,307 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is an internal atomic implementation, use atomicops.h instead. + +#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ +#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ + +namespace v8 { +namespace base { + +// Atomically execute: +// result = *ptr; +// if (*ptr == old_value) +// *ptr = new_value; +// return result; +// +// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". +// Always return the old value of "*ptr" +// +// This routine implies no memory barriers. +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev, tmp; + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "ll %0, %5\n" // prev = *ptr + "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 + "move %2, %4\n" // tmp = new_value + "sc %2, %1\n" // *ptr = tmp (with atomic check) + "beqz %2, 1b\n" // start again on atomic error + "nop\n" // delay slot nop + "2:\n" + ".set pop\n" + : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) + : "Ir" (old_value), "r" (new_value), "m" (*ptr) + : "memory"); + return prev; +} + +// Atomically store new_value into *ptr, returning the previous value held in +// *ptr. This routine implies no memory barriers. +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 temp, old; + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "ll %1, %2\n" // old = *ptr + "move %0, %3\n" // temp = new_value + "sc %0, %2\n" // *ptr = temp (with atomic check) + "beqz %0, 1b\n" // start again on atomic error + "nop\n" // delay slot nop + ".set pop\n" + : "=&r" (temp), "=&r" (old), "=m" (*ptr) + : "r" (new_value), "m" (*ptr) + : "memory"); + + return old; +} + +// Atomically increment *ptr by "increment". Returns the new value of +// *ptr with the increment applied. This routine implies no memory barriers. +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 temp, temp2; + + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "ll %0, %2\n" // temp = *ptr + "addu %1, %0, %3\n" // temp2 = temp + increment + "sc %1, %2\n" // *ptr = temp2 (with atomic check) + "beqz %1, 1b\n" // start again on atomic error + "addu %1, %0, %3\n" // temp2 = temp + increment + ".set pop\n" + : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) + : "Ir" (increment), "m" (*ptr) + : "memory"); + // temp2 now holds the final value. + return temp2; +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + MemoryBarrier(); + Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + return res; +} + +// "Acquire" operations +// ensure that no later memory access can be reordered ahead of the operation. +// "Release" operations ensure that no previous memory access can be reordered +// after the operation. "Barrier" operations have both "Acquire" and "Release" +// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory +// access. +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + MemoryBarrier(); + return res; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + MemoryBarrier(); + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { + *ptr = value; +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void MemoryBarrier() { + __asm__ __volatile__("sync" : : : "memory"); +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { + return *ptr; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + + +// 64-bit versions of the atomic ops. + +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev, tmp; + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "lld %0, %5\n" // prev = *ptr + "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 + "move %2, %4\n" // tmp = new_value + "scd %2, %1\n" // *ptr = tmp (with atomic check) + "beqz %2, 1b\n" // start again on atomic error + "nop\n" // delay slot nop + "2:\n" + ".set pop\n" + : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) + : "Ir" (old_value), "r" (new_value), "m" (*ptr) + : "memory"); + return prev; +} + +// Atomically store new_value into *ptr, returning the previous value held in +// *ptr. This routine implies no memory barriers. +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + Atomic64 temp, old; + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "lld %1, %2\n" // old = *ptr + "move %0, %3\n" // temp = new_value + "scd %0, %2\n" // *ptr = temp (with atomic check) + "beqz %0, 1b\n" // start again on atomic error + "nop\n" // delay slot nop + ".set pop\n" + : "=&r" (temp), "=&r" (old), "=m" (*ptr) + : "r" (new_value), "m" (*ptr) + : "memory"); + + return old; +} + +// Atomically increment *ptr by "increment". Returns the new value of +// *ptr with the increment applied. This routine implies no memory barriers. +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + Atomic64 temp, temp2; + + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "lld %0, %2\n" // temp = *ptr + "daddu %1, %0, %3\n" // temp2 = temp + increment + "scd %1, %2\n" // *ptr = temp2 (with atomic check) + "beqz %1, 1b\n" // start again on atomic error + "daddu %1, %0, %3\n" // temp2 = temp + increment + ".set pop\n" + : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) + : "Ir" (increment), "m" (*ptr) + : "memory"); + // temp2 now holds the final value. + return temp2; +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + MemoryBarrier(); + Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + return res; +} + +// "Acquire" operations +// ensure that no later memory access can be reordered ahead of the operation. +// "Release" operations ensure that no previous memory access can be reordered +// after the operation. "Barrier" operations have both "Acquire" and "Release" +// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory +// access. +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + MemoryBarrier(); + return res; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + MemoryBarrier(); + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return *ptr; +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + Atomic64 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + MemoryBarrier(); + return *ptr; +} + +} } // namespace v8::base + +#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops_internals_mips_gcc.h nodejs-0.11.15/deps/v8/src/base/atomicops_internals_mips_gcc.h --- nodejs-0.11.13/deps/v8/src/base/atomicops_internals_mips_gcc.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops_internals_mips_gcc.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,159 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use atomicops.h instead. + +#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ +#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ + +namespace v8 { +namespace base { + +// Atomically execute: +// result = *ptr; +// if (*ptr == old_value) +// *ptr = new_value; +// return result; +// +// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". +// Always return the old value of "*ptr" +// +// This routine implies no memory barriers. +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev, tmp; + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "ll %0, %5\n" // prev = *ptr + "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 + "move %2, %4\n" // tmp = new_value + "sc %2, %1\n" // *ptr = tmp (with atomic check) + "beqz %2, 1b\n" // start again on atomic error + "nop\n" // delay slot nop + "2:\n" + ".set pop\n" + : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) + : "Ir" (old_value), "r" (new_value), "m" (*ptr) + : "memory"); + return prev; +} + +// Atomically store new_value into *ptr, returning the previous value held in +// *ptr. This routine implies no memory barriers. +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 temp, old; + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "ll %1, %2\n" // old = *ptr + "move %0, %3\n" // temp = new_value + "sc %0, %2\n" // *ptr = temp (with atomic check) + "beqz %0, 1b\n" // start again on atomic error + "nop\n" // delay slot nop + ".set pop\n" + : "=&r" (temp), "=&r" (old), "=m" (*ptr) + : "r" (new_value), "m" (*ptr) + : "memory"); + + return old; +} + +// Atomically increment *ptr by "increment". Returns the new value of +// *ptr with the increment applied. This routine implies no memory barriers. +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 temp, temp2; + + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "ll %0, %2\n" // temp = *ptr + "addu %1, %0, %3\n" // temp2 = temp + increment + "sc %1, %2\n" // *ptr = temp2 (with atomic check) + "beqz %1, 1b\n" // start again on atomic error + "addu %1, %0, %3\n" // temp2 = temp + increment + ".set pop\n" + : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) + : "Ir" (increment), "m" (*ptr) + : "memory"); + // temp2 now holds the final value. + return temp2; +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + MemoryBarrier(); + Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + return res; +} + +// "Acquire" operations +// ensure that no later memory access can be reordered ahead of the operation. +// "Release" operations ensure that no previous memory access can be reordered +// after the operation. "Barrier" operations have both "Acquire" and "Release" +// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory +// access. +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + MemoryBarrier(); + return res; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + MemoryBarrier(); + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { + *ptr = value; +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void MemoryBarrier() { + __asm__ __volatile__("sync" : : : "memory"); +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { + return *ptr; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +} } // namespace v8::base + +#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops_internals_tsan.h nodejs-0.11.15/deps/v8/src/base/atomicops_internals_tsan.h --- nodejs-0.11.13/deps/v8/src/base/atomicops_internals_tsan.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops_internals_tsan.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,363 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + +// This file is an internal atomic implementation for compiler-based +// ThreadSanitizer. Use base/atomicops.h instead. + +#ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_ +#define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_ + +namespace v8 { +namespace base { + +#ifndef TSAN_INTERFACE_ATOMIC_H +#define TSAN_INTERFACE_ATOMIC_H + + +extern "C" { +typedef char __tsan_atomic8; +typedef short __tsan_atomic16; // NOLINT +typedef int __tsan_atomic32; +typedef long __tsan_atomic64; // NOLINT + +#if defined(__SIZEOF_INT128__) \ + || (__clang_major__ * 100 + __clang_minor__ >= 302) +typedef __int128 __tsan_atomic128; +#define __TSAN_HAS_INT128 1 +#else +typedef char __tsan_atomic128; +#define __TSAN_HAS_INT128 0 +#endif + +typedef enum { + __tsan_memory_order_relaxed, + __tsan_memory_order_consume, + __tsan_memory_order_acquire, + __tsan_memory_order_release, + __tsan_memory_order_acq_rel, + __tsan_memory_order_seq_cst, +} __tsan_memory_order; + +__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a, + __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a, + __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a, + __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a, + __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a, + __tsan_memory_order mo); + +void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v, + __tsan_memory_order mo); +void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v, + __tsan_memory_order mo); +void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v, + __tsan_memory_order mo); +void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v, + __tsan_memory_order mo); +void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v, + __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a, + __tsan_atomic128 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a, + __tsan_atomic128 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a, + __tsan_atomic128 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a, + __tsan_atomic128 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a, + __tsan_atomic128 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a, + __tsan_atomic128 v, __tsan_memory_order mo); + +int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a, + __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a, + __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a, + __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a, + __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a, + __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); + +int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a, + __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a, + __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a, + __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a, + __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a, + __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); + +__tsan_atomic8 __tsan_atomic8_compare_exchange_val( + volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); +__tsan_atomic16 __tsan_atomic16_compare_exchange_val( + volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); +__tsan_atomic32 __tsan_atomic32_compare_exchange_val( + volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); +__tsan_atomic64 __tsan_atomic64_compare_exchange_val( + volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); +__tsan_atomic128 __tsan_atomic128_compare_exchange_val( + volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); + +void __tsan_atomic_thread_fence(__tsan_memory_order mo); +void __tsan_atomic_signal_fence(__tsan_memory_order mo); +} // extern "C" + +#endif // #ifndef TSAN_INTERFACE_ATOMIC_H + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 cmp = old_value; + __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_relaxed, __tsan_memory_order_relaxed); + return cmp; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + return __tsan_atomic32_exchange(ptr, new_value, + __tsan_memory_order_relaxed); +} + +inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + return __tsan_atomic32_exchange(ptr, new_value, + __tsan_memory_order_acquire); +} + +inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + return __tsan_atomic32_exchange(ptr, new_value, + __tsan_memory_order_release); +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return increment + __tsan_atomic32_fetch_add(ptr, increment, + __tsan_memory_order_relaxed); +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return increment + __tsan_atomic32_fetch_add(ptr, increment, + __tsan_memory_order_acq_rel); +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 cmp = old_value; + __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_acquire, __tsan_memory_order_acquire); + return cmp; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 cmp = old_value; + __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_release, __tsan_memory_order_relaxed); + return cmp; +} + +inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { + __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed); +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); +} + +inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { + return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); + return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 cmp = old_value; + __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_relaxed, __tsan_memory_order_relaxed); + return cmp; +} + +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed); +} + +inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire); +} + +inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release); +} + +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return increment + __tsan_atomic64_fetch_add(ptr, increment, + __tsan_memory_order_relaxed); +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return increment + __tsan_atomic64_fetch_add(ptr, increment, + __tsan_memory_order_acq_rel); +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + __tsan_atomic64_store(ptr, value, __tsan_memory_order_release); +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire); +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); + return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 cmp = old_value; + __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_acquire, __tsan_memory_order_acquire); + return cmp; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 cmp = old_value; + __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_release, __tsan_memory_order_relaxed); + return cmp; +} + +inline void MemoryBarrier() { + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); +} + +} // namespace base +} // namespace v8 + +#endif // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops_internals_x86_gcc.cc nodejs-0.11.15/deps/v8/src/base/atomicops_internals_x86_gcc.cc --- nodejs-0.11.13/deps/v8/src/base/atomicops_internals_x86_gcc.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops_internals_x86_gcc.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,115 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This module gets enough CPU information to optimize the +// atomicops module on x86. + +#include <string.h> + +#include "src/base/atomicops.h" + +// This file only makes sense with atomicops_internals_x86_gcc.h -- it +// depends on structs that are defined in that file. If atomicops.h +// doesn't sub-include that file, then we aren't needed, and shouldn't +// try to do anything. +#ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ + +// Inline cpuid instruction. In PIC compilations, %ebx contains the address +// of the global offset table. To avoid breaking such executables, this code +// must preserve that register's value across cpuid instructions. +#if defined(__i386__) +#define cpuid(a, b, c, d, inp) \ + asm("mov %%ebx, %%edi\n" \ + "cpuid\n" \ + "xchg %%edi, %%ebx\n" \ + : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) +#elif defined(__x86_64__) +#define cpuid(a, b, c, d, inp) \ + asm("mov %%rbx, %%rdi\n" \ + "cpuid\n" \ + "xchg %%rdi, %%rbx\n" \ + : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) +#endif + +#if defined(cpuid) // initialize the struct only on x86 + +namespace v8 { +namespace base { + +// Set the flags so that code will run correctly and conservatively, so even +// if we haven't been initialized yet, we're probably single threaded, and our +// default values should hopefully be pretty safe. +struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { + false, // bug can't exist before process spawns multiple threads +#if !defined(__SSE2__) + false, // no SSE2 +#endif +}; + +} } // namespace v8::base + +namespace { + +// Initialize the AtomicOps_Internalx86CPUFeatures struct. +void AtomicOps_Internalx86CPUFeaturesInit() { + using v8::base::AtomicOps_Internalx86CPUFeatures; + + uint32_t eax = 0; + uint32_t ebx = 0; + uint32_t ecx = 0; + uint32_t edx = 0; + + // Get vendor string (issue CPUID with eax = 0) + cpuid(eax, ebx, ecx, edx, 0); + char vendor[13]; + memcpy(vendor, &ebx, 4); + memcpy(vendor + 4, &edx, 4); + memcpy(vendor + 8, &ecx, 4); + vendor[12] = 0; + + // get feature flags in ecx/edx, and family/model in eax + cpuid(eax, ebx, ecx, edx, 1); + + int family = (eax >> 8) & 0xf; // family and model fields + int model = (eax >> 4) & 0xf; + if (family == 0xf) { // use extended family and model fields + family += (eax >> 20) & 0xff; + model += ((eax >> 16) & 0xf) << 4; + } + + // Opteron Rev E has a bug in which on very rare occasions a locked + // instruction doesn't act as a read-acquire barrier if followed by a + // non-locked read-modify-write instruction. Rev F has this bug in + // pre-release versions, but not in versions released to customers, + // so we test only for Rev E, which is family 15, model 32..63 inclusive. + if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD + family == 15 && + 32 <= model && model <= 63) { + AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true; + } else { + AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false; + } + +#if !defined(__SSE2__) + // edx bit 26 is SSE2 which we use to tell use whether we can use mfence + AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); +#endif +} + +class AtomicOpsx86Initializer { + public: + AtomicOpsx86Initializer() { + AtomicOps_Internalx86CPUFeaturesInit(); + } +}; + + +// A global to get use initialized on startup via static initialization :/ +AtomicOpsx86Initializer g_initer; + +} // namespace + +#endif // if x86 + +#endif // ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops_internals_x86_gcc.h nodejs-0.11.15/deps/v8/src/base/atomicops_internals_x86_gcc.h --- nodejs-0.11.13/deps/v8/src/base/atomicops_internals_x86_gcc.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops_internals_x86_gcc.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,274 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use atomicops.h instead. + +#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ +#define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ + +namespace v8 { +namespace base { + +// This struct is not part of the public API of this module; clients may not +// use it. +// Features of this x86. Values may not be correct before main() is run, +// but are set conservatively. +struct AtomicOps_x86CPUFeatureStruct { + bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence + // after acquire compare-and-swap. +#if !defined(__SSE2__) + bool has_sse2; // Processor has SSE2. +#endif +}; +extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; + +#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") + +// 32-bit low-level operations on any platform. + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + __asm__ __volatile__("lock; cmpxchgl %1,%2" + : "=a" (prev) + : "q" (new_value), "m" (*ptr), "0" (old_value) + : "memory"); + return prev; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg. + : "=r" (new_value) + : "m" (*ptr), "0" (new_value) + : "memory"); + return new_value; // Now it's the previous value. +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 temp = increment; + __asm__ __volatile__("lock; xaddl %0,%1" + : "+r" (temp), "+m" (*ptr) + : : "memory"); + // temp now holds the old value of *ptr + return temp + increment; +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 temp = increment; + __asm__ __volatile__("lock; xaddl %0,%1" + : "+r" (temp), "+m" (*ptr) + : : "memory"); + // temp now holds the old value of *ptr + if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { + __asm__ __volatile__("lfence" : : : "memory"); + } + return temp + increment; +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { + __asm__ __volatile__("lfence" : : : "memory"); + } + return x; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { + *ptr = value; +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +#if defined(__x86_64__) || defined(__SSE2__) + +// 64-bit implementations of memory barrier can be simpler, because it +// "mfence" is guaranteed to exist. +inline void MemoryBarrier() { + __asm__ __volatile__("mfence" : : : "memory"); +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +#else + +inline void MemoryBarrier() { + if (AtomicOps_Internalx86CPUFeatures.has_sse2) { + __asm__ __volatile__("mfence" : : : "memory"); + } else { // mfence is faster but not present on PIII + Atomic32 x = 0; + NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII + } +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + if (AtomicOps_Internalx86CPUFeatures.has_sse2) { + *ptr = value; + __asm__ __volatile__("mfence" : : : "memory"); + } else { + NoBarrier_AtomicExchange(ptr, value); + // acts as a barrier on PIII + } +} +#endif + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + ATOMICOPS_COMPILER_BARRIER(); + *ptr = value; // An x86 store acts as a release barrier. + // See comments in Atomic64 version of Release_Store(), below. +} + +inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { + return *ptr; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. + // See comments in Atomic64 version of Release_Store(), below. + ATOMICOPS_COMPILER_BARRIER(); + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT) + +// 64-bit low-level operations on 64-bit platform. + +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev; + __asm__ __volatile__("lock; cmpxchgq %1,%2" + : "=a" (prev) + : "q" (new_value), "m" (*ptr), "0" (old_value) + : "memory"); + return prev; +} + +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg. + : "=r" (new_value) + : "m" (*ptr), "0" (new_value) + : "memory"); + return new_value; // Now it's the previous value. +} + +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + Atomic64 temp = increment; + __asm__ __volatile__("lock; xaddq %0,%1" + : "+r" (temp), "+m" (*ptr) + : : "memory"); + // temp now contains the previous value of *ptr + return temp + increment; +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + Atomic64 temp = increment; + __asm__ __volatile__("lock; xaddq %0,%1" + : "+r" (temp), "+m" (*ptr) + : : "memory"); + // temp now contains the previous value of *ptr + if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { + __asm__ __volatile__("lfence" : : : "memory"); + } + return temp + increment; +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + ATOMICOPS_COMPILER_BARRIER(); + + *ptr = value; // An x86 store acts as a release barrier + // for current AMD/Intel chips as of Jan 2008. + // See also Acquire_Load(), below. + + // When new chips come out, check: + // IA-32 Intel Architecture Software Developer's Manual, Volume 3: + // System Programming Guide, Chatper 7: Multiple-processor management, + // Section 7.2, Memory Ordering. + // Last seen at: + // http://developer.intel.com/design/pentium4/manuals/index_new.htm + // + // x86 stores/loads fail to act as barriers for a few instructions (clflush + // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are + // not generated by the compiler, and are rare. Users of these instructions + // need to know about cache behaviour in any case since all of these involve + // either flushing cache lines or non-temporal cache hints. +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return *ptr; +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, + // for current AMD/Intel chips as of Jan 2008. + // See also Release_Store(), above. + ATOMICOPS_COMPILER_BARRIER(); + return value; +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + MemoryBarrier(); + return *ptr; +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { + __asm__ __volatile__("lfence" : : : "memory"); + } + return x; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +#endif // defined(__x86_64__) + +} } // namespace v8::base + +#undef ATOMICOPS_COMPILER_BARRIER + +#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/atomicops_internals_x86_msvc.h nodejs-0.11.15/deps/v8/src/base/atomicops_internals_x86_msvc.h --- nodejs-0.11.13/deps/v8/src/base/atomicops_internals_x86_msvc.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/atomicops_internals_x86_msvc.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,202 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use atomicops.h instead. + +#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ +#define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ + +#include "src/base/macros.h" +#include "src/base/win32-headers.h" + +#if defined(V8_HOST_ARCH_64_BIT) +// windows.h #defines this (only on x64). This causes problems because the +// public API also uses MemoryBarrier at the public name for this fence. So, on +// X64, undef it, and call its documented +// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) +// implementation directly. +#undef MemoryBarrier +#endif + +namespace v8 { +namespace base { + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + LONG result = InterlockedCompareExchange( + reinterpret_cast<volatile LONG*>(ptr), + static_cast<LONG>(new_value), + static_cast<LONG>(old_value)); + return static_cast<Atomic32>(result); +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + LONG result = InterlockedExchange( + reinterpret_cast<volatile LONG*>(ptr), + static_cast<LONG>(new_value)); + return static_cast<Atomic32>(result); +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return InterlockedExchangeAdd( + reinterpret_cast<volatile LONG*>(ptr), + static_cast<LONG>(increment)) + increment; +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return Barrier_AtomicIncrement(ptr, increment); +} + +#if !(defined(_MSC_VER) && _MSC_VER >= 1400) +#error "We require at least vs2005 for MemoryBarrier" +#endif +inline void MemoryBarrier() { +#if defined(V8_HOST_ARCH_64_BIT) + // See #undef and note at the top of this file. + __faststorefence(); +#else + // We use MemoryBarrier from WinNT.h + ::MemoryBarrier(); +#endif +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { + *ptr = value; +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + NoBarrier_AtomicExchange(ptr, value); + // acts as a barrier in this implementation +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; // works w/o barrier for current Intel chips as of June 2005 + // See comments in Atomic64 version of Release_Store() below. +} + +inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { + return *ptr; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +#if defined(_WIN64) + +// 64-bit low-level operations on 64-bit platform. + +STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID)); + +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + PVOID result = InterlockedCompareExchangePointer( + reinterpret_cast<volatile PVOID*>(ptr), + reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); + return reinterpret_cast<Atomic64>(result); +} + +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + PVOID result = InterlockedExchangePointer( + reinterpret_cast<volatile PVOID*>(ptr), + reinterpret_cast<PVOID>(new_value)); + return reinterpret_cast<Atomic64>(result); +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return InterlockedExchangeAdd64( + reinterpret_cast<volatile LONGLONG*>(ptr), + static_cast<LONGLONG>(increment)) + increment; +} + +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return Barrier_AtomicIncrement(ptr, increment); +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + NoBarrier_AtomicExchange(ptr, value); + // acts as a barrier in this implementation +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; // works w/o barrier for current Intel chips as of June 2005 + + // When new chips come out, check: + // IA-32 Intel Architecture Software Developer's Manual, Volume 3: + // System Programming Guide, Chatper 7: Multiple-processor management, + // Section 7.2, Memory Ordering. + // Last seen at: + // http://developer.intel.com/design/pentium4/manuals/index_new.htm +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return *ptr; +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + Atomic64 value = *ptr; + return value; +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + MemoryBarrier(); + return *ptr; +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + + +#endif // defined(_WIN64) + +} } // namespace v8::base + +#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/build_config.h nodejs-0.11.15/deps/v8/src/base/build_config.h --- nodejs-0.11.13/deps/v8/src/base/build_config.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/build_config.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,169 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_BUILD_CONFIG_H_ +#define V8_BASE_BUILD_CONFIG_H_ + +#include "include/v8config.h" + +// Processor architecture detection. For more info on what's defined, see: +// http://msdn.microsoft.com/en-us/library/b0084kay.aspx +// http://www.agner.org/optimize/calling_conventions.pdf +// or with gcc, run: "echo | gcc -E -dM -" +#if defined(_M_X64) || defined(__x86_64__) +#if defined(__native_client__) +// For Native Client builds of V8, use V8_TARGET_ARCH_ARM, so that V8 +// generates ARM machine code, together with a portable ARM simulator +// compiled for the host architecture in question. +// +// Since Native Client is ILP-32 on all architectures we use +// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86. +#define V8_HOST_ARCH_IA32 1 +#define V8_HOST_ARCH_32_BIT 1 +#define V8_HOST_CAN_READ_UNALIGNED 1 +#else +#define V8_HOST_ARCH_X64 1 +#if defined(__x86_64__) && !defined(__LP64__) +#define V8_HOST_ARCH_32_BIT 1 +#else +#define V8_HOST_ARCH_64_BIT 1 +#endif +#define V8_HOST_CAN_READ_UNALIGNED 1 +#endif // __native_client__ +#elif defined(_M_IX86) || defined(__i386__) +#define V8_HOST_ARCH_IA32 1 +#define V8_HOST_ARCH_32_BIT 1 +#define V8_HOST_CAN_READ_UNALIGNED 1 +#elif defined(__AARCH64EL__) +#define V8_HOST_ARCH_ARM64 1 +#define V8_HOST_ARCH_64_BIT 1 +#define V8_HOST_CAN_READ_UNALIGNED 1 +#elif defined(__ARMEL__) +#define V8_HOST_ARCH_ARM 1 +#define V8_HOST_ARCH_32_BIT 1 +#elif defined(__mips64) +#define V8_HOST_ARCH_MIPS64 1 +#define V8_HOST_ARCH_64_BIT 1 +#elif defined(__MIPSEB__) || defined(__MIPSEL__) +#define V8_HOST_ARCH_MIPS 1 +#define V8_HOST_ARCH_32_BIT 1 +#else +#error "Host architecture was not detected as supported by v8" +#endif + +#if defined(__ARM_ARCH_7A__) || \ + defined(__ARM_ARCH_7R__) || \ + defined(__ARM_ARCH_7__) +# define CAN_USE_ARMV7_INSTRUCTIONS 1 +# ifndef CAN_USE_VFP3_INSTRUCTIONS +# define CAN_USE_VFP3_INSTRUCTIONS +# endif +#endif + + +// Target architecture detection. This may be set externally. If not, detect +// in the same way as the host architecture, that is, target the native +// environment as presented by the compiler. +#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \ + !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \ + !V8_TARGET_ARCH_MIPS64 +#if defined(_M_X64) || defined(__x86_64__) +#define V8_TARGET_ARCH_X64 1 +#elif defined(_M_IX86) || defined(__i386__) +#define V8_TARGET_ARCH_IA32 1 +#elif defined(__AARCH64EL__) +#define V8_TARGET_ARCH_ARM64 1 +#elif defined(__ARMEL__) +#define V8_TARGET_ARCH_ARM 1 +#elif defined(__mips64) +#define V8_TARGET_ARCH_MIPS64 1 +#elif defined(__MIPSEB__) || defined(__MIPSEL__) +#define V8_TARGET_ARCH_MIPS 1 +#else +#error Target architecture was not detected as supported by v8 +#endif +#endif + +// Determine architecture pointer size. +#if V8_TARGET_ARCH_IA32 +#define V8_TARGET_ARCH_32_BIT 1 +#elif V8_TARGET_ARCH_X64 +#if !V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_64_BIT +#if defined(__x86_64__) && !defined(__LP64__) +#define V8_TARGET_ARCH_32_BIT 1 +#else +#define V8_TARGET_ARCH_64_BIT 1 +#endif +#endif +#elif V8_TARGET_ARCH_ARM +#define V8_TARGET_ARCH_32_BIT 1 +#elif V8_TARGET_ARCH_ARM64 +#define V8_TARGET_ARCH_64_BIT 1 +#elif V8_TARGET_ARCH_MIPS +#define V8_TARGET_ARCH_32_BIT 1 +#elif V8_TARGET_ARCH_MIPS64 +#define V8_TARGET_ARCH_64_BIT 1 +#elif V8_TARGET_ARCH_X87 +#define V8_TARGET_ARCH_32_BIT 1 +#else +#error Unknown target architecture pointer size +#endif + +// Check for supported combinations of host and target architectures. +#if V8_TARGET_ARCH_IA32 && !V8_HOST_ARCH_IA32 +#error Target architecture ia32 is only supported on ia32 host +#endif +#if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT && \ + !(V8_HOST_ARCH_X64 && V8_HOST_ARCH_64_BIT)) +#error Target architecture x64 is only supported on x64 host +#endif +#if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT && \ + !(V8_HOST_ARCH_X64 && V8_HOST_ARCH_32_BIT)) +#error Target architecture x32 is only supported on x64 host with x32 support +#endif +#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM)) +#error Target architecture arm is only supported on arm and ia32 host +#endif +#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64)) +#error Target architecture arm64 is only supported on arm64 and x64 host +#endif +#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS)) +#error Target architecture mips is only supported on mips and ia32 host +#endif +#if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64)) +#error Target architecture mips64 is only supported on mips64 and x64 host +#endif + +// Determine architecture endianness. +#if V8_TARGET_ARCH_IA32 +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_X64 +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_ARM +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_ARM64 +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_MIPS +#if defined(__MIPSEB__) +#define V8_TARGET_BIG_ENDIAN 1 +#else +#define V8_TARGET_LITTLE_ENDIAN 1 +#endif +#elif V8_TARGET_ARCH_MIPS64 +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_X87 +#define V8_TARGET_LITTLE_ENDIAN 1 +#else +#error Unknown target architecture endianness +#endif + +#if V8_OS_MACOSX || defined(__FreeBSD__) || defined(__OpenBSD__) +#define USING_BSD_ABI +#endif + +// Number of bits to represent the page size for paged spaces. The value of 20 +// gives 1Mb bytes per page. +const int kPageSizeBits = 20; + +#endif // V8_BASE_BUILD_CONFIG_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/cpu.cc nodejs-0.11.15/deps/v8/src/base/cpu.cc --- nodejs-0.11.13/deps/v8/src/base/cpu.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/cpu.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,499 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/cpu.h" + +#if V8_LIBC_MSVCRT +#include <intrin.h> // __cpuid() +#endif +#if V8_OS_POSIX +#include <unistd.h> // sysconf() +#endif +#if V8_OS_QNX +#include <sys/syspage.h> // cpuinfo +#endif + +#include <ctype.h> +#include <limits.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <algorithm> + +#include "src/base/logging.h" +#if V8_OS_WIN +#include "src/base/win32-headers.h" // NOLINT +#endif + +namespace v8 { +namespace base { + +#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64 + +// Define __cpuid() for non-MSVC libraries. +#if !V8_LIBC_MSVCRT + +static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { +#if defined(__i386__) && defined(__pic__) + // Make sure to preserve ebx, which contains the pointer + // to the GOT in case we're generating PIC. + __asm__ volatile ( + "mov %%ebx, %%edi\n\t" + "cpuid\n\t" + "xchg %%edi, %%ebx\n\t" + : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(info_type) + ); +#else + __asm__ volatile ( + "cpuid \n\t" + : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(info_type) + ); +#endif // defined(__i386__) && defined(__pic__) +} + +#endif // !V8_LIBC_MSVCRT + +#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 \ + || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 + +#if V8_OS_LINUX + +#if V8_HOST_ARCH_ARM + +// See <uapi/asm/hwcap.h> kernel header. +/* + * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP + */ +#define HWCAP_SWP (1 << 0) +#define HWCAP_HALF (1 << 1) +#define HWCAP_THUMB (1 << 2) +#define HWCAP_26BIT (1 << 3) /* Play it safe */ +#define HWCAP_FAST_MULT (1 << 4) +#define HWCAP_FPA (1 << 5) +#define HWCAP_VFP (1 << 6) +#define HWCAP_EDSP (1 << 7) +#define HWCAP_JAVA (1 << 8) +#define HWCAP_IWMMXT (1 << 9) +#define HWCAP_CRUNCH (1 << 10) +#define HWCAP_THUMBEE (1 << 11) +#define HWCAP_NEON (1 << 12) +#define HWCAP_VFPv3 (1 << 13) +#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */ +#define HWCAP_TLS (1 << 15) +#define HWCAP_VFPv4 (1 << 16) +#define HWCAP_IDIVA (1 << 17) +#define HWCAP_IDIVT (1 << 18) +#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ +#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) +#define HWCAP_LPAE (1 << 20) + +#define AT_HWCAP 16 + +// Read the ELF HWCAP flags by parsing /proc/self/auxv. +static uint32_t ReadELFHWCaps() { + uint32_t result = 0; + FILE* fp = fopen("/proc/self/auxv", "r"); + if (fp != NULL) { + struct { uint32_t tag; uint32_t value; } entry; + for (;;) { + size_t n = fread(&entry, sizeof(entry), 1, fp); + if (n == 0 || (entry.tag == 0 && entry.value == 0)) { + break; + } + if (entry.tag == AT_HWCAP) { + result = entry.value; + break; + } + } + fclose(fp); + } + return result; +} + +#endif // V8_HOST_ARCH_ARM + +// Extract the information exposed by the kernel via /proc/cpuinfo. +class CPUInfo V8_FINAL { + public: + CPUInfo() : datalen_(0) { + // Get the size of the cpuinfo file by reading it until the end. This is + // required because files under /proc do not always return a valid size + // when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed. + static const char PATHNAME[] = "/proc/cpuinfo"; + FILE* fp = fopen(PATHNAME, "r"); + if (fp != NULL) { + for (;;) { + char buffer[256]; + size_t n = fread(buffer, 1, sizeof(buffer), fp); + if (n == 0) { + break; + } + datalen_ += n; + } + fclose(fp); + } + + // Read the contents of the cpuinfo file. + data_ = new char[datalen_ + 1]; + fp = fopen(PATHNAME, "r"); + if (fp != NULL) { + for (size_t offset = 0; offset < datalen_; ) { + size_t n = fread(data_ + offset, 1, datalen_ - offset, fp); + if (n == 0) { + break; + } + offset += n; + } + fclose(fp); + } + + // Zero-terminate the data. + data_[datalen_] = '\0'; + } + + ~CPUInfo() { + delete[] data_; + } + + // Extract the content of a the first occurence of a given field in + // the content of the cpuinfo file and return it as a heap-allocated + // string that must be freed by the caller using delete[]. + // Return NULL if not found. + char* ExtractField(const char* field) const { + DCHECK(field != NULL); + + // Look for first field occurence, and ensure it starts the line. + size_t fieldlen = strlen(field); + char* p = data_; + for (;;) { + p = strstr(p, field); + if (p == NULL) { + return NULL; + } + if (p == data_ || p[-1] == '\n') { + break; + } + p += fieldlen; + } + + // Skip to the first colon followed by a space. + p = strchr(p + fieldlen, ':'); + if (p == NULL || !isspace(p[1])) { + return NULL; + } + p += 2; + + // Find the end of the line. + char* q = strchr(p, '\n'); + if (q == NULL) { + q = data_ + datalen_; + } + + // Copy the line into a heap-allocated buffer. + size_t len = q - p; + char* result = new char[len + 1]; + if (result != NULL) { + memcpy(result, p, len); + result[len] = '\0'; + } + return result; + } + + private: + char* data_; + size_t datalen_; +}; + +#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 + +// Checks that a space-separated list of items contains one given 'item'. +static bool HasListItem(const char* list, const char* item) { + ssize_t item_len = strlen(item); + const char* p = list; + if (p != NULL) { + while (*p != '\0') { + // Skip whitespace. + while (isspace(*p)) ++p; + + // Find end of current list item. + const char* q = p; + while (*q != '\0' && !isspace(*q)) ++q; + + if (item_len == q - p && memcmp(p, item, item_len) == 0) { + return true; + } + + // Skip to next item. + p = q; + } + } + return false; +} + +#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 + +#endif // V8_OS_LINUX + +#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64 + +CPU::CPU() : stepping_(0), + model_(0), + ext_model_(0), + family_(0), + ext_family_(0), + type_(0), + implementer_(0), + architecture_(0), + part_(0), + has_fpu_(false), + has_cmov_(false), + has_sahf_(false), + has_mmx_(false), + has_sse_(false), + has_sse2_(false), + has_sse3_(false), + has_ssse3_(false), + has_sse41_(false), + has_sse42_(false), + has_idiva_(false), + has_neon_(false), + has_thumb2_(false), + has_vfp_(false), + has_vfp3_(false), + has_vfp3_d32_(false) { + memcpy(vendor_, "Unknown", 8); +#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64 + int cpu_info[4]; + + // __cpuid with an InfoType argument of 0 returns the number of + // valid Ids in CPUInfo[0] and the CPU identification string in + // the other three array elements. The CPU identification string is + // not in linear order. The code below arranges the information + // in a human readable form. The human readable order is CPUInfo[1] | + // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped + // before using memcpy to copy these three array elements to cpu_string. + __cpuid(cpu_info, 0); + unsigned num_ids = cpu_info[0]; + std::swap(cpu_info[2], cpu_info[3]); + memcpy(vendor_, cpu_info + 1, 12); + vendor_[12] = '\0'; + + // Interpret CPU feature information. + if (num_ids > 0) { + __cpuid(cpu_info, 1); + stepping_ = cpu_info[0] & 0xf; + model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0); + family_ = (cpu_info[0] >> 8) & 0xf; + type_ = (cpu_info[0] >> 12) & 0x3; + ext_model_ = (cpu_info[0] >> 16) & 0xf; + ext_family_ = (cpu_info[0] >> 20) & 0xff; + has_fpu_ = (cpu_info[3] & 0x00000001) != 0; + has_cmov_ = (cpu_info[3] & 0x00008000) != 0; + has_mmx_ = (cpu_info[3] & 0x00800000) != 0; + has_sse_ = (cpu_info[3] & 0x02000000) != 0; + has_sse2_ = (cpu_info[3] & 0x04000000) != 0; + has_sse3_ = (cpu_info[2] & 0x00000001) != 0; + has_ssse3_ = (cpu_info[2] & 0x00000200) != 0; + has_sse41_ = (cpu_info[2] & 0x00080000) != 0; + has_sse42_ = (cpu_info[2] & 0x00100000) != 0; + } + +#if V8_HOST_ARCH_IA32 + // SAHF is always available in compat/legacy mode, + has_sahf_ = true; +#else + // Query extended IDs. + __cpuid(cpu_info, 0x80000000); + unsigned num_ext_ids = cpu_info[0]; + + // Interpret extended CPU feature information. + if (num_ext_ids > 0x80000000) { + __cpuid(cpu_info, 0x80000001); + // SAHF must be probed in long mode. + has_sahf_ = (cpu_info[2] & 0x00000001) != 0; + } +#endif + +#elif V8_HOST_ARCH_ARM + +#if V8_OS_LINUX + + CPUInfo cpu_info; + + // Extract implementor from the "CPU implementer" field. + char* implementer = cpu_info.ExtractField("CPU implementer"); + if (implementer != NULL) { + char* end ; + implementer_ = strtol(implementer, &end, 0); + if (end == implementer) { + implementer_ = 0; + } + delete[] implementer; + } + + // Extract part number from the "CPU part" field. + char* part = cpu_info.ExtractField("CPU part"); + if (part != NULL) { + char* end ; + part_ = strtol(part, &end, 0); + if (end == part) { + part_ = 0; + } + delete[] part; + } + + // Extract architecture from the "CPU Architecture" field. + // The list is well-known, unlike the the output of + // the 'Processor' field which can vary greatly. + // See the definition of the 'proc_arch' array in + // $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in + // same file. + char* architecture = cpu_info.ExtractField("CPU architecture"); + if (architecture != NULL) { + char* end; + architecture_ = strtol(architecture, &end, 10); + if (end == architecture) { + architecture_ = 0; + } + delete[] architecture; + + // Unfortunately, it seems that certain ARMv6-based CPUs + // report an incorrect architecture number of 7! + // + // See http://code.google.com/p/android/issues/detail?id=10812 + // + // We try to correct this by looking at the 'elf_format' + // field reported by the 'Processor' field, which is of the + // form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for + // an ARMv6-one. For example, the Raspberry Pi is one popular + // ARMv6 device that reports architecture 7. + if (architecture_ == 7) { + char* processor = cpu_info.ExtractField("Processor"); + if (HasListItem(processor, "(v6l)")) { + architecture_ = 6; + } + delete[] processor; + } + } + + // Try to extract the list of CPU features from ELF hwcaps. + uint32_t hwcaps = ReadELFHWCaps(); + if (hwcaps != 0) { + has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0; + has_neon_ = (hwcaps & HWCAP_NEON) != 0; + has_vfp_ = (hwcaps & HWCAP_VFP) != 0; + has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0; + has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 || + (hwcaps & HWCAP_VFPD32) != 0)); + } else { + // Try to fallback to "Features" CPUInfo field. + char* features = cpu_info.ExtractField("Features"); + has_idiva_ = HasListItem(features, "idiva"); + has_neon_ = HasListItem(features, "neon"); + has_thumb2_ = HasListItem(features, "thumb2"); + has_vfp_ = HasListItem(features, "vfp"); + if (HasListItem(features, "vfpv3d16")) { + has_vfp3_ = true; + } else if (HasListItem(features, "vfpv3")) { + has_vfp3_ = true; + has_vfp3_d32_ = true; + } + delete[] features; + } + + // Some old kernels will report vfp not vfpv3. Here we make an attempt + // to detect vfpv3 by checking for vfp *and* neon, since neon is only + // available on architectures with vfpv3. Checking neon on its own is + // not enough as it is possible to have neon without vfp. + if (has_vfp_ && has_neon_) { + has_vfp3_ = true; + } + + // VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. + if (architecture_ < 7 && has_vfp3_) { + architecture_ = 7; + } + + // ARMv7 implies Thumb2. + if (architecture_ >= 7) { + has_thumb2_ = true; + } + + // The earliest architecture with Thumb2 is ARMv6T2. + if (has_thumb2_ && architecture_ < 6) { + architecture_ = 6; + } + + // We don't support any FPUs other than VFP. + has_fpu_ = has_vfp_; + +#elif V8_OS_QNX + + uint32_t cpu_flags = SYSPAGE_ENTRY(cpuinfo)->flags; + if (cpu_flags & ARM_CPU_FLAG_V7) { + architecture_ = 7; + has_thumb2_ = true; + } else if (cpu_flags & ARM_CPU_FLAG_V6) { + architecture_ = 6; + // QNX doesn't say if Thumb2 is available. + // Assume false for the architectures older than ARMv7. + } + DCHECK(architecture_ >= 6); + has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0; + has_vfp_ = has_fpu_; + if (cpu_flags & ARM_CPU_FLAG_NEON) { + has_neon_ = true; + has_vfp3_ = has_vfp_; +#ifdef ARM_CPU_FLAG_VFP_D32 + has_vfp3_d32_ = (cpu_flags & ARM_CPU_FLAG_VFP_D32) != 0; +#endif + } + has_idiva_ = (cpu_flags & ARM_CPU_FLAG_IDIV) != 0; + +#endif // V8_OS_LINUX + +#elif V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 + + // Simple detection of FPU at runtime for Linux. + // It is based on /proc/cpuinfo, which reveals hardware configuration + // to user-space applications. According to MIPS (early 2010), no similar + // facility is universally available on the MIPS architectures, + // so it's up to individual OSes to provide such. + CPUInfo cpu_info; + char* cpu_model = cpu_info.ExtractField("cpu model"); + has_fpu_ = HasListItem(cpu_model, "FPU"); + delete[] cpu_model; + +#elif V8_HOST_ARCH_ARM64 + + CPUInfo cpu_info; + + // Extract implementor from the "CPU implementer" field. + char* implementer = cpu_info.ExtractField("CPU implementer"); + if (implementer != NULL) { + char* end ; + implementer_ = strtol(implementer, &end, 0); + if (end == implementer) { + implementer_ = 0; + } + delete[] implementer; + } + + // Extract part number from the "CPU part" field. + char* part = cpu_info.ExtractField("CPU part"); + if (part != NULL) { + char* end ; + part_ = strtol(part, &end, 0); + if (end == part) { + part_ = 0; + } + delete[] part; + } + +#endif +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/cpu.h nodejs-0.11.15/deps/v8/src/base/cpu.h --- nodejs-0.11.13/deps/v8/src/base/cpu.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/cpu.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,111 @@ +// Copyright 2006-2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This module contains the architecture-specific code. This make the rest of +// the code less dependent on differences between different processor +// architecture. +// The classes have the same definition for all architectures. The +// implementation for a particular architecture is put in cpu_<arch>.cc. +// The build system then uses the implementation for the target architecture. +// + +#ifndef V8_BASE_CPU_H_ +#define V8_BASE_CPU_H_ + +#include "src/base/macros.h" + +namespace v8 { +namespace base { + +// ---------------------------------------------------------------------------- +// CPU +// +// Query information about the processor. +// +// This class also has static methods for the architecture specific functions. +// Add methods here to cope with differences between the supported +// architectures. For each architecture the file cpu_<arch>.cc contains the +// implementation of these static functions. + +class CPU V8_FINAL { + public: + CPU(); + + // x86 CPUID information + const char* vendor() const { return vendor_; } + int stepping() const { return stepping_; } + int model() const { return model_; } + int ext_model() const { return ext_model_; } + int family() const { return family_; } + int ext_family() const { return ext_family_; } + int type() const { return type_; } + + // arm implementer/part information + int implementer() const { return implementer_; } + static const int ARM = 0x41; + static const int NVIDIA = 0x4e; + static const int QUALCOMM = 0x51; + int architecture() const { return architecture_; } + int part() const { return part_; } + static const int ARM_CORTEX_A5 = 0xc05; + static const int ARM_CORTEX_A7 = 0xc07; + static const int ARM_CORTEX_A8 = 0xc08; + static const int ARM_CORTEX_A9 = 0xc09; + static const int ARM_CORTEX_A12 = 0xc0c; + static const int ARM_CORTEX_A15 = 0xc0f; + + // General features + bool has_fpu() const { return has_fpu_; } + + // x86 features + bool has_cmov() const { return has_cmov_; } + bool has_sahf() const { return has_sahf_; } + bool has_mmx() const { return has_mmx_; } + bool has_sse() const { return has_sse_; } + bool has_sse2() const { return has_sse2_; } + bool has_sse3() const { return has_sse3_; } + bool has_ssse3() const { return has_ssse3_; } + bool has_sse41() const { return has_sse41_; } + bool has_sse42() const { return has_sse42_; } + + // arm features + bool has_idiva() const { return has_idiva_; } + bool has_neon() const { return has_neon_; } + bool has_thumb2() const { return has_thumb2_; } + bool has_vfp() const { return has_vfp_; } + bool has_vfp3() const { return has_vfp3_; } + bool has_vfp3_d32() const { return has_vfp3_d32_; } + + private: + char vendor_[13]; + int stepping_; + int model_; + int ext_model_; + int family_; + int ext_family_; + int type_; + int implementer_; + int architecture_; + int part_; + bool has_fpu_; + bool has_cmov_; + bool has_sahf_; + bool has_mmx_; + bool has_sse_; + bool has_sse2_; + bool has_sse3_; + bool has_ssse3_; + bool has_sse41_; + bool has_sse42_; + bool has_idiva_; + bool has_neon_; + bool has_thumb2_; + bool has_vfp_; + bool has_vfp3_; + bool has_vfp3_d32_; +}; + +} } // namespace v8::base + +#endif // V8_BASE_CPU_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/DEPS nodejs-0.11.15/deps/v8/src/base/DEPS --- nodejs-0.11.13/deps/v8/src/base/DEPS 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/DEPS 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,7 @@ +include_rules = [ + "-include", + "+include/v8config.h", + "+include/v8stdint.h", + "-src", + "+src/base", +] diff -Nru nodejs-0.11.13/deps/v8/src/base/lazy-instance.h nodejs-0.11.15/deps/v8/src/base/lazy-instance.h --- nodejs-0.11.13/deps/v8/src/base/lazy-instance.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/lazy-instance.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,237 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// The LazyInstance<Type, Traits> class manages a single instance of Type, +// which will be lazily created on the first time it's accessed. This class is +// useful for places you would normally use a function-level static, but you +// need to have guaranteed thread-safety. The Type constructor will only ever +// be called once, even if two threads are racing to create the object. Get() +// and Pointer() will always return the same, completely initialized instance. +// +// LazyInstance is completely thread safe, assuming that you create it safely. +// The class was designed to be POD initialized, so it shouldn't require a +// static constructor. It really only makes sense to declare a LazyInstance as +// a global variable using the LAZY_INSTANCE_INITIALIZER initializer. +// +// LazyInstance is similar to Singleton, except it does not have the singleton +// property. You can have multiple LazyInstance's of the same type, and each +// will manage a unique instance. It also preallocates the space for Type, as +// to avoid allocating the Type instance on the heap. This may help with the +// performance of creating the instance, and reducing heap fragmentation. This +// requires that Type be a complete type so we can determine the size. See +// notes for advanced users below for more explanations. +// +// Example usage: +// static LazyInstance<MyClass>::type my_instance = LAZY_INSTANCE_INITIALIZER; +// void SomeMethod() { +// my_instance.Get().SomeMethod(); // MyClass::SomeMethod() +// +// MyClass* ptr = my_instance.Pointer(); +// ptr->DoDoDo(); // MyClass::DoDoDo +// } +// +// Additionally you can override the way your instance is constructed by +// providing your own trait: +// Example usage: +// struct MyCreateTrait { +// static void Construct(MyClass* allocated_ptr) { +// new (allocated_ptr) MyClass(/* extra parameters... */); +// } +// }; +// static LazyInstance<MyClass, MyCreateTrait>::type my_instance = +// LAZY_INSTANCE_INITIALIZER; +// +// WARNINGS: +// - This implementation of LazyInstance IS THREAD-SAFE by default. See +// SingleThreadInitOnceTrait if you don't care about thread safety. +// - Lazy initialization comes with a cost. Make sure that you don't use it on +// critical path. Consider adding your initialization code to a function +// which is explicitly called once. +// +// Notes for advanced users: +// LazyInstance can actually be used in two different ways: +// +// - "Static mode" which is the default mode since it is the most efficient +// (no extra heap allocation). In this mode, the instance is statically +// allocated (stored in the global data section at compile time). +// The macro LAZY_STATIC_INSTANCE_INITIALIZER (= LAZY_INSTANCE_INITIALIZER) +// must be used to initialize static lazy instances. +// +// - "Dynamic mode". In this mode, the instance is dynamically allocated and +// constructed (using new) by default. This mode is useful if you have to +// deal with some code already allocating the instance for you (e.g. +// OS::Mutex() which returns a new private OS-dependent subclass of Mutex). +// The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize +// dynamic lazy instances. + +#ifndef V8_BASE_LAZY_INSTANCE_H_ +#define V8_BASE_LAZY_INSTANCE_H_ + +#include "src/base/macros.h" +#include "src/base/once.h" + +namespace v8 { +namespace base { + +#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, { {} } } +#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 } + +// Default to static mode. +#define LAZY_INSTANCE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER + + +template <typename T> +struct LeakyInstanceTrait { + static void Destroy(T* /* instance */) {} +}; + + +// Traits that define how an instance is allocated and accessed. + + +template <typename T> +struct StaticallyAllocatedInstanceTrait { + // 16-byte alignment fallback to be on the safe side here. + struct V8_ALIGNAS(T, 16) StorageType { + char x[sizeof(T)]; + }; + + STATIC_ASSERT(V8_ALIGNOF(StorageType) >= V8_ALIGNOF(T)); + + static T* MutableInstance(StorageType* storage) { + return reinterpret_cast<T*>(storage); + } + + template <typename ConstructTrait> + static void InitStorageUsingTrait(StorageType* storage) { + ConstructTrait::Construct(MutableInstance(storage)); + } +}; + + +template <typename T> +struct DynamicallyAllocatedInstanceTrait { + typedef T* StorageType; + + static T* MutableInstance(StorageType* storage) { + return *storage; + } + + template <typename CreateTrait> + static void InitStorageUsingTrait(StorageType* storage) { + *storage = CreateTrait::Create(); + } +}; + + +template <typename T> +struct DefaultConstructTrait { + // Constructs the provided object which was already allocated. + static void Construct(T* allocated_ptr) { + new(allocated_ptr) T(); + } +}; + + +template <typename T> +struct DefaultCreateTrait { + static T* Create() { + return new T(); + } +}; + + +struct ThreadSafeInitOnceTrait { + template <typename Function, typename Storage> + static void Init(OnceType* once, Function function, Storage storage) { + CallOnce(once, function, storage); + } +}; + + +// Initialization trait for users who don't care about thread-safety. +struct SingleThreadInitOnceTrait { + template <typename Function, typename Storage> + static void Init(OnceType* once, Function function, Storage storage) { + if (*once == ONCE_STATE_UNINITIALIZED) { + function(storage); + *once = ONCE_STATE_DONE; + } + } +}; + + +// TODO(pliard): Handle instances destruction (using global destructors). +template <typename T, typename AllocationTrait, typename CreateTrait, + typename InitOnceTrait, typename DestroyTrait /* not used yet. */> +struct LazyInstanceImpl { + public: + typedef typename AllocationTrait::StorageType StorageType; + + private: + static void InitInstance(StorageType* storage) { + AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage); + } + + void Init() const { + InitOnceTrait::Init( + &once_, + // Casts to void* are needed here to avoid breaking strict aliasing + // rules. + reinterpret_cast<void(*)(void*)>(&InitInstance), // NOLINT + reinterpret_cast<void*>(&storage_)); + } + + public: + T* Pointer() { + Init(); + return AllocationTrait::MutableInstance(&storage_); + } + + const T& Get() const { + Init(); + return *AllocationTrait::MutableInstance(&storage_); + } + + mutable OnceType once_; + // Note that the previous field, OnceType, is an AtomicWord which guarantees + // 4-byte alignment of the storage field below. If compiling with GCC (>4.2), + // the LAZY_ALIGN macro above will guarantee correctness for any alignment. + mutable StorageType storage_; +}; + + +template <typename T, + typename CreateTrait = DefaultConstructTrait<T>, + typename InitOnceTrait = ThreadSafeInitOnceTrait, + typename DestroyTrait = LeakyInstanceTrait<T> > +struct LazyStaticInstance { + typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>, + CreateTrait, InitOnceTrait, DestroyTrait> type; +}; + + +template <typename T, + typename CreateTrait = DefaultConstructTrait<T>, + typename InitOnceTrait = ThreadSafeInitOnceTrait, + typename DestroyTrait = LeakyInstanceTrait<T> > +struct LazyInstance { + // A LazyInstance is a LazyStaticInstance. + typedef typename LazyStaticInstance<T, CreateTrait, InitOnceTrait, + DestroyTrait>::type type; +}; + + +template <typename T, + typename CreateTrait = DefaultCreateTrait<T>, + typename InitOnceTrait = ThreadSafeInitOnceTrait, + typename DestroyTrait = LeakyInstanceTrait<T> > +struct LazyDynamicInstance { + typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>, + CreateTrait, InitOnceTrait, DestroyTrait> type; +}; + +} } // namespace v8::base + +#endif // V8_BASE_LAZY_INSTANCE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/logging.cc nodejs-0.11.15/deps/v8/src/base/logging.cc --- nodejs-0.11.13/deps/v8/src/base/logging.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/logging.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,88 @@ +// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/logging.h" + +#if V8_LIBC_GLIBC || V8_OS_BSD +# include <cxxabi.h> +# include <execinfo.h> +#elif V8_OS_QNX +# include <backtrace.h> +#endif // V8_LIBC_GLIBC || V8_OS_BSD +#include <stdio.h> +#include <stdlib.h> + +#include "src/base/platform/platform.h" + +namespace v8 { +namespace base { + +// Attempts to dump a backtrace (if supported). +void DumpBacktrace() { +#if V8_LIBC_GLIBC || V8_OS_BSD + void* trace[100]; + int size = backtrace(trace, ARRAY_SIZE(trace)); + char** symbols = backtrace_symbols(trace, size); + OS::PrintError("\n==== C stack trace ===============================\n\n"); + if (size == 0) { + OS::PrintError("(empty)\n"); + } else if (symbols == NULL) { + OS::PrintError("(no symbols)\n"); + } else { + for (int i = 1; i < size; ++i) { + OS::PrintError("%2d: ", i); + char mangled[201]; + if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT + int status; + size_t length; + char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status); + OS::PrintError("%s\n", demangled != NULL ? demangled : mangled); + free(demangled); + } else { + OS::PrintError("??\n"); + } + } + } + free(symbols); +#elif V8_OS_QNX + char out[1024]; + bt_accessor_t acc; + bt_memmap_t memmap; + bt_init_accessor(&acc, BT_SELF); + bt_load_memmap(&acc, &memmap); + bt_sprn_memmap(&memmap, out, sizeof(out)); + OS::PrintError(out); + bt_addr_t trace[100]; + int size = bt_get_backtrace(&acc, trace, ARRAY_SIZE(trace)); + OS::PrintError("\n==== C stack trace ===============================\n\n"); + if (size == 0) { + OS::PrintError("(empty)\n"); + } else { + bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"), + out, sizeof(out), NULL); + OS::PrintError(out); + } + bt_unload_memmap(&memmap); + bt_release_accessor(&acc); +#endif // V8_LIBC_GLIBC || V8_OS_BSD +} + +} } // namespace v8::base + + +// Contains protection against recursive calls (faults while handling faults). +extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) { + fflush(stdout); + fflush(stderr); + v8::base::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, + line); + va_list arguments; + va_start(arguments, format); + v8::base::OS::VPrintError(format, arguments); + va_end(arguments); + v8::base::OS::PrintError("\n#\n"); + v8::base::DumpBacktrace(); + fflush(stderr); + v8::base::OS::Abort(); +} diff -Nru nodejs-0.11.13/deps/v8/src/base/logging.h nodejs-0.11.15/deps/v8/src/base/logging.h --- nodejs-0.11.13/deps/v8/src/base/logging.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/logging.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,223 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_LOGGING_H_ +#define V8_BASE_LOGGING_H_ + +#include <string.h> + +#include "include/v8stdint.h" +#include "src/base/build_config.h" + +extern "C" void V8_Fatal(const char* file, int line, const char* format, ...); + + +// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during +// development, but they should not be relied on in the final product. +#ifdef DEBUG +#define FATAL(msg) \ + V8_Fatal(__FILE__, __LINE__, "%s", (msg)) +#define UNIMPLEMENTED() \ + V8_Fatal(__FILE__, __LINE__, "unimplemented code") +#define UNREACHABLE() \ + V8_Fatal(__FILE__, __LINE__, "unreachable code") +#else +#define FATAL(msg) \ + V8_Fatal("", 0, "%s", (msg)) +#define UNIMPLEMENTED() \ + V8_Fatal("", 0, "unimplemented code") +#define UNREACHABLE() ((void) 0) +#endif + + +// The CHECK macro checks that the given condition is true; if not, it +// prints a message to stderr and aborts. +#define CHECK(condition) do { \ + if (!(condition)) { \ + V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \ + } \ + } while (0) + + +// Helper function used by the CHECK_EQ function when given int +// arguments. Should not be called directly. +inline void CheckEqualsHelper(const char* file, int line, + const char* expected_source, int expected, + const char* value_source, int value) { + if (expected != value) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i", + expected_source, value_source, expected, value); + } +} + + +// Helper function used by the CHECK_EQ function when given int64_t +// arguments. Should not be called directly. +inline void CheckEqualsHelper(const char* file, int line, + const char* expected_source, + int64_t expected, + const char* value_source, + int64_t value) { + if (expected != value) { + // Print int64_t values in hex, as two int32s, + // to avoid platform-dependencies. + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n#" + " Expected: 0x%08x%08x\n# Found: 0x%08x%08x", + expected_source, value_source, + static_cast<uint32_t>(expected >> 32), + static_cast<uint32_t>(expected), + static_cast<uint32_t>(value >> 32), + static_cast<uint32_t>(value)); + } +} + + +// Helper function used by the CHECK_NE function when given int +// arguments. Should not be called directly. +inline void CheckNonEqualsHelper(const char* file, + int line, + const char* unexpected_source, + int unexpected, + const char* value_source, + int value) { + if (unexpected == value) { + V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i", + unexpected_source, value_source, value); + } +} + + +// Helper function used by the CHECK function when given string +// arguments. Should not be called directly. +inline void CheckEqualsHelper(const char* file, + int line, + const char* expected_source, + const char* expected, + const char* value_source, + const char* value) { + if ((expected == NULL && value != NULL) || + (expected != NULL && value == NULL) || + (expected != NULL && value != NULL && strcmp(expected, value) != 0)) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s", + expected_source, value_source, expected, value); + } +} + + +inline void CheckNonEqualsHelper(const char* file, + int line, + const char* expected_source, + const char* expected, + const char* value_source, + const char* value) { + if (expected == value || + (expected != NULL && value != NULL && strcmp(expected, value) == 0)) { + V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s", + expected_source, value_source, value); + } +} + + +// Helper function used by the CHECK function when given pointer +// arguments. Should not be called directly. +inline void CheckEqualsHelper(const char* file, + int line, + const char* expected_source, + const void* expected, + const char* value_source, + const void* value) { + if (expected != value) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p", + expected_source, value_source, + expected, value); + } +} + + +inline void CheckNonEqualsHelper(const char* file, + int line, + const char* expected_source, + const void* expected, + const char* value_source, + const void* value) { + if (expected == value) { + V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p", + expected_source, value_source, value); + } +} + + +inline void CheckNonEqualsHelper(const char* file, + int line, + const char* expected_source, + int64_t expected, + const char* value_source, + int64_t value) { + if (expected == value) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", + expected_source, value_source, expected, value); + } +} + + +#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \ + #expected, expected, #value, value) + + +#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \ + #unexpected, unexpected, #value, value) + + +#define CHECK_GT(a, b) CHECK((a) > (b)) +#define CHECK_GE(a, b) CHECK((a) >= (b)) +#define CHECK_LT(a, b) CHECK((a) < (b)) +#define CHECK_LE(a, b) CHECK((a) <= (b)) + + +namespace v8 { +namespace base { + +// Exposed for making debugging easier (to see where your function is being +// called, just add a call to DumpBacktrace). +void DumpBacktrace(); + +} } // namespace v8::base + + +// The DCHECK macro is equivalent to CHECK except that it only +// generates code in debug builds. +#ifdef DEBUG +#define DCHECK_RESULT(expr) CHECK(expr) +#define DCHECK(condition) CHECK(condition) +#define DCHECK_EQ(v1, v2) CHECK_EQ(v1, v2) +#define DCHECK_NE(v1, v2) CHECK_NE(v1, v2) +#define DCHECK_GE(v1, v2) CHECK_GE(v1, v2) +#define DCHECK_LT(v1, v2) CHECK_LT(v1, v2) +#define DCHECK_LE(v1, v2) CHECK_LE(v1, v2) +#else +#define DCHECK_RESULT(expr) (expr) +#define DCHECK(condition) ((void) 0) +#define DCHECK_EQ(v1, v2) ((void) 0) +#define DCHECK_NE(v1, v2) ((void) 0) +#define DCHECK_GE(v1, v2) ((void) 0) +#define DCHECK_LT(v1, v2) ((void) 0) +#define DCHECK_LE(v1, v2) ((void) 0) +#endif + +#define DCHECK_NOT_NULL(p) DCHECK_NE(NULL, p) + +// "Extra checks" are lightweight checks that are enabled in some release +// builds. +#ifdef ENABLE_EXTRA_CHECKS +#define EXTRA_CHECK(condition) CHECK(condition) +#else +#define EXTRA_CHECK(condition) ((void) 0) +#endif + +#endif // V8_BASE_LOGGING_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/macros.h nodejs-0.11.15/deps/v8/src/base/macros.h --- nodejs-0.11.13/deps/v8/src/base/macros.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/macros.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,259 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_MACROS_H_ +#define V8_BASE_MACROS_H_ + +#include "include/v8stdint.h" +#include "src/base/build_config.h" +#include "src/base/logging.h" + + +// The expression OFFSET_OF(type, field) computes the byte-offset +// of the specified field relative to the containing type. This +// corresponds to 'offsetof' (in stddef.h), except that it doesn't +// use 0 or NULL, which causes a problem with the compiler warnings +// we have enabled (which is also why 'offsetof' doesn't seem to work). +// Here we simply use the non-zero value 4, which seems to work. +#define OFFSET_OF(type, field) \ + (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4) + + +// The expression ARRAY_SIZE(a) is a compile-time constant of type +// size_t which represents the number of elements of the given +// array. You should only use ARRAY_SIZE on statically allocated +// arrays. +#define ARRAY_SIZE(a) \ + ((sizeof(a) / sizeof(*(a))) / \ + static_cast<size_t>(!(sizeof(a) % sizeof(*(a))))) + + +// A macro to disallow the evil copy constructor and operator= functions +// This should be used in the private: declarations for a class +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) V8_DELETE; \ + void operator=(const TypeName&) V8_DELETE + + +// A macro to disallow all the implicit constructors, namely the +// default constructor, copy constructor and operator= functions. +// +// This should be used in the private: declarations for a class +// that wants to prevent anyone from instantiating it. This is +// especially useful for classes containing only static methods. +#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ + TypeName() V8_DELETE; \ + DISALLOW_COPY_AND_ASSIGN(TypeName) + + +// Newly written code should use V8_INLINE and V8_NOINLINE directly. +#define INLINE(declarator) V8_INLINE declarator +#define NO_INLINE(declarator) V8_NOINLINE declarator + + +// Newly written code should use V8_WARN_UNUSED_RESULT. +#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT + + +// Define V8_USE_ADDRESS_SANITIZER macros. +#if defined(__has_feature) +#if __has_feature(address_sanitizer) +#define V8_USE_ADDRESS_SANITIZER 1 +#endif +#endif + +// Define DISABLE_ASAN macros. +#ifdef V8_USE_ADDRESS_SANITIZER +#define DISABLE_ASAN __attribute__((no_sanitize_address)) +#else +#define DISABLE_ASAN +#endif + + +#if V8_CC_GNU +#define V8_IMMEDIATE_CRASH() __builtin_trap() +#else +#define V8_IMMEDIATE_CRASH() ((void(*)())0)() +#endif + + +// Use C++11 static_assert if possible, which gives error +// messages that are easier to understand on first sight. +#if V8_HAS_CXX11_STATIC_ASSERT +#define STATIC_ASSERT(test) static_assert(test, #test) +#else +// This is inspired by the static assertion facility in boost. This +// is pretty magical. If it causes you trouble on a platform you may +// find a fix in the boost code. +template <bool> class StaticAssertion; +template <> class StaticAssertion<true> { }; +// This macro joins two tokens. If one of the tokens is a macro the +// helper call causes it to be resolved before joining. +#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b) +#define SEMI_STATIC_JOIN_HELPER(a, b) a##b +// Causes an error during compilation of the condition is not +// statically known to be true. It is formulated as a typedef so that +// it can be used wherever a typedef can be used. Beware that this +// actually causes each use to introduce a new defined type with a +// name depending on the source line. +template <int> class StaticAssertionHelper { }; +#define STATIC_ASSERT(test) \ + typedef \ + StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \ + SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED + +#endif + + +// The USE(x) template is used to silence C++ compiler warnings +// issued for (yet) unused variables (typically parameters). +template <typename T> +inline void USE(T) { } + + +#define IS_POWER_OF_TWO(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) + + +// Returns true iff x is a power of 2. Cannot be used with the maximally +// negative value of the type T (the -1 overflows). +template <typename T> +inline bool IsPowerOf2(T x) { + return IS_POWER_OF_TWO(x); +} + + +// Define our own macros for writing 64-bit constants. This is less fragile +// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it +// works on compilers that don't have it (like MSVC). +#if V8_CC_MSVC +# define V8_UINT64_C(x) (x ## UI64) +# define V8_INT64_C(x) (x ## I64) +# if V8_HOST_ARCH_64_BIT +# define V8_INTPTR_C(x) (x ## I64) +# define V8_PTR_PREFIX "ll" +# else +# define V8_INTPTR_C(x) (x) +# define V8_PTR_PREFIX "" +# endif // V8_HOST_ARCH_64_BIT +#elif V8_CC_MINGW64 +# define V8_UINT64_C(x) (x ## ULL) +# define V8_INT64_C(x) (x ## LL) +# define V8_INTPTR_C(x) (x ## LL) +# define V8_PTR_PREFIX "I64" +#elif V8_HOST_ARCH_64_BIT +# if V8_OS_MACOSX +# define V8_UINT64_C(x) (x ## ULL) +# define V8_INT64_C(x) (x ## LL) +# else +# define V8_UINT64_C(x) (x ## UL) +# define V8_INT64_C(x) (x ## L) +# endif +# define V8_INTPTR_C(x) (x ## L) +# define V8_PTR_PREFIX "l" +#else +# define V8_UINT64_C(x) (x ## ULL) +# define V8_INT64_C(x) (x ## LL) +# define V8_INTPTR_C(x) (x) +# define V8_PTR_PREFIX "" +#endif + +#define V8PRIxPTR V8_PTR_PREFIX "x" +#define V8PRIdPTR V8_PTR_PREFIX "d" +#define V8PRIuPTR V8_PTR_PREFIX "u" + +// Fix for Mac OS X defining uintptr_t as "unsigned long": +#if V8_OS_MACOSX +#undef V8PRIxPTR +#define V8PRIxPTR "lx" +#endif + +// The following macro works on both 32 and 64-bit platforms. +// Usage: instead of writing 0x1234567890123456 +// write V8_2PART_UINT64_C(0x12345678,90123456); +#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u)) + + +// Compute the 0-relative offset of some absolute value x of type T. +// This allows conversion of Addresses and integral types into +// 0-relative int offsets. +template <typename T> +inline intptr_t OffsetFrom(T x) { + return x - static_cast<T>(0); +} + + +// Compute the absolute value of type T for some 0-relative offset x. +// This allows conversion of 0-relative int offsets into Addresses and +// integral types. +template <typename T> +inline T AddressFrom(intptr_t x) { + return static_cast<T>(static_cast<T>(0) + x); +} + + +// Return the largest multiple of m which is <= x. +template <typename T> +inline T RoundDown(T x, intptr_t m) { + DCHECK(IsPowerOf2(m)); + return AddressFrom<T>(OffsetFrom(x) & -m); +} + + +// Return the smallest multiple of m which is >= x. +template <typename T> +inline T RoundUp(T x, intptr_t m) { + return RoundDown<T>(static_cast<T>(x + m - 1), m); +} + + +// Increment a pointer until it has the specified alignment. +// This works like RoundUp, but it works correctly on pointer types where +// sizeof(*pointer) might not be 1. +template<class T> +T AlignUp(T pointer, size_t alignment) { + DCHECK(sizeof(pointer) == sizeof(uintptr_t)); + uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer); + return reinterpret_cast<T>(RoundUp(pointer_raw, alignment)); +} + + +template <typename T, typename U> +inline bool IsAligned(T value, U alignment) { + return (value & (alignment - 1)) == 0; +} + + +// Returns the smallest power of two which is >= x. If you pass in a +// number that is already a power of two, it is returned as is. +// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr., +// figure 3-3, page 48, where the function is called clp2. +inline uint32_t RoundUpToPowerOf2(uint32_t x) { + DCHECK(x <= 0x80000000u); + x = x - 1; + x = x | (x >> 1); + x = x | (x >> 2); + x = x | (x >> 4); + x = x | (x >> 8); + x = x | (x >> 16); + return x + 1; +} + + +inline uint32_t RoundDownToPowerOf2(uint32_t x) { + uint32_t rounded_up = RoundUpToPowerOf2(x); + if (rounded_up > x) return rounded_up >> 1; + return rounded_up; +} + + +// Returns current value of top of the stack. Works correctly with ASAN. +DISABLE_ASAN +inline uintptr_t GetCurrentStackPosition() { + // Takes the address of the limit variable in order to find out where + // the top of stack is right now. + uintptr_t limit = reinterpret_cast<uintptr_t>(&limit); + return limit; +} + +#endif // V8_BASE_MACROS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/once.cc nodejs-0.11.15/deps/v8/src/base/once.cc --- nodejs-0.11.13/deps/v8/src/base/once.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/once.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,53 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/once.h" + +#ifdef _WIN32 +#include <windows.h> +#else +#include <sched.h> +#endif + +#include "src/base/atomicops.h" + +namespace v8 { +namespace base { + +void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) { + AtomicWord state = Acquire_Load(once); + // Fast path. The provided function was already executed. + if (state == ONCE_STATE_DONE) { + return; + } + + // The function execution did not complete yet. The once object can be in one + // of the two following states: + // - UNINITIALIZED: We are the first thread calling this function. + // - EXECUTING_FUNCTION: Another thread is already executing the function. + // + // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION + // atomically. + state = Acquire_CompareAndSwap( + once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION); + if (state == ONCE_STATE_UNINITIALIZED) { + // We are the first thread to call this function, so we have to call the + // function. + init_func(arg); + Release_Store(once, ONCE_STATE_DONE); + } else { + // Another thread has already started executing the function. We need to + // wait until it completes the initialization. + while (state == ONCE_STATE_EXECUTING_FUNCTION) { +#ifdef _WIN32 + ::Sleep(0); +#else + sched_yield(); +#endif + state = Acquire_Load(once); + } + } +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/once.h nodejs-0.11.15/deps/v8/src/base/once.h --- nodejs-0.11.13/deps/v8/src/base/once.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/once.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,100 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// emulates google3/base/once.h +// +// This header is intended to be included only by v8's internal code. Users +// should not use this directly. +// +// This is basically a portable version of pthread_once(). +// +// This header declares: +// * A type called OnceType. +// * A macro V8_DECLARE_ONCE() which declares a (global) variable of type +// OnceType. +// * A function CallOnce(OnceType* once, void (*init_func)()). +// This function, when invoked multiple times given the same OnceType object, +// will invoke init_func on the first call only, and will make sure none of +// the calls return before that first call to init_func has finished. +// +// Additionally, the following features are supported: +// * A macro V8_ONCE_INIT which is expanded into the expression used to +// initialize a OnceType. This is only useful when clients embed a OnceType +// into a structure of their own and want to initialize it statically. +// * The user can provide a parameter which CallOnce() forwards to the +// user-provided function when it is called. Usage example: +// CallOnce(&my_once, &MyFunctionExpectingIntArgument, 10); +// * This implementation guarantees that OnceType is a POD (i.e. no static +// initializer generated). +// +// This implements a way to perform lazy initialization. It's more efficient +// than using mutexes as no lock is needed if initialization has already +// happened. +// +// Example usage: +// void Init(); +// V8_DECLARE_ONCE(once_init); +// +// // Calls Init() exactly once. +// void InitOnce() { +// CallOnce(&once_init, &Init); +// } +// +// Note that if CallOnce() is called before main() has begun, it must +// only be called by the thread that will eventually call main() -- that is, +// the thread that performs dynamic initialization. In general this is a safe +// assumption since people don't usually construct threads before main() starts, +// but it is technically not guaranteed. Unfortunately, Win32 provides no way +// whatsoever to statically-initialize its synchronization primitives, so our +// only choice is to assume that dynamic initialization is single-threaded. + +#ifndef V8_BASE_ONCE_H_ +#define V8_BASE_ONCE_H_ + +#include "src/base/atomicops.h" + +namespace v8 { +namespace base { + +typedef AtomicWord OnceType; + +#define V8_ONCE_INIT 0 + +#define V8_DECLARE_ONCE(NAME) ::v8::base::OnceType NAME + +enum { + ONCE_STATE_UNINITIALIZED = 0, + ONCE_STATE_EXECUTING_FUNCTION = 1, + ONCE_STATE_DONE = 2 +}; + +typedef void (*NoArgFunction)(); +typedef void (*PointerArgFunction)(void* arg); + +template <typename T> +struct OneArgFunction { + typedef void (*type)(T); +}; + +void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg); + +inline void CallOnce(OnceType* once, NoArgFunction init_func) { + if (Acquire_Load(once) != ONCE_STATE_DONE) { + CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL); + } +} + + +template <typename Arg> +inline void CallOnce(OnceType* once, + typename OneArgFunction<Arg*>::type init_func, Arg* arg) { + if (Acquire_Load(once) != ONCE_STATE_DONE) { + CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), + static_cast<void*>(arg)); + } +} + +} } // namespace v8::base + +#endif // V8_BASE_ONCE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/condition-variable.cc nodejs-0.11.15/deps/v8/src/base/platform/condition-variable.cc --- nodejs-0.11.13/deps/v8/src/base/platform/condition-variable.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/condition-variable.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,322 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/platform/condition-variable.h" + +#include <errno.h> +#include <time.h> + +#include "src/base/platform/time.h" + +namespace v8 { +namespace base { + +#if V8_OS_POSIX + +ConditionVariable::ConditionVariable() { + // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary + // hack to support cross-compiling Chrome for Android in AOSP. Remove + // this once AOSP is fixed. +#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \ + (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE + // On Free/Net/OpenBSD and Linux with glibc we can change the time + // source for pthread_cond_timedwait() to use the monotonic clock. + pthread_condattr_t attr; + int result = pthread_condattr_init(&attr); + DCHECK_EQ(0, result); + result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); + DCHECK_EQ(0, result); + result = pthread_cond_init(&native_handle_, &attr); + DCHECK_EQ(0, result); + result = pthread_condattr_destroy(&attr); +#else + int result = pthread_cond_init(&native_handle_, NULL); +#endif + DCHECK_EQ(0, result); + USE(result); +} + + +ConditionVariable::~ConditionVariable() { + int result = pthread_cond_destroy(&native_handle_); + DCHECK_EQ(0, result); + USE(result); +} + + +void ConditionVariable::NotifyOne() { + int result = pthread_cond_signal(&native_handle_); + DCHECK_EQ(0, result); + USE(result); +} + + +void ConditionVariable::NotifyAll() { + int result = pthread_cond_broadcast(&native_handle_); + DCHECK_EQ(0, result); + USE(result); +} + + +void ConditionVariable::Wait(Mutex* mutex) { + mutex->AssertHeldAndUnmark(); + int result = pthread_cond_wait(&native_handle_, &mutex->native_handle()); + DCHECK_EQ(0, result); + USE(result); + mutex->AssertUnheldAndMark(); +} + + +bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) { + struct timespec ts; + int result; + mutex->AssertHeldAndUnmark(); +#if V8_OS_MACOSX + // Mac OS X provides pthread_cond_timedwait_relative_np(), which does + // not depend on the real time clock, which is what you really WANT here! + ts = rel_time.ToTimespec(); + DCHECK_GE(ts.tv_sec, 0); + DCHECK_GE(ts.tv_nsec, 0); + result = pthread_cond_timedwait_relative_np( + &native_handle_, &mutex->native_handle(), &ts); +#else + // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary + // hack to support cross-compiling Chrome for Android in AOSP. Remove + // this once AOSP is fixed. +#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \ + (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE + // On Free/Net/OpenBSD and Linux with glibc we can change the time + // source for pthread_cond_timedwait() to use the monotonic clock. + result = clock_gettime(CLOCK_MONOTONIC, &ts); + DCHECK_EQ(0, result); + Time now = Time::FromTimespec(ts); +#else + // The timeout argument to pthread_cond_timedwait() is in absolute time. + Time now = Time::NowFromSystemTime(); +#endif + Time end_time = now + rel_time; + DCHECK_GE(end_time, now); + ts = end_time.ToTimespec(); + result = pthread_cond_timedwait( + &native_handle_, &mutex->native_handle(), &ts); +#endif // V8_OS_MACOSX + mutex->AssertUnheldAndMark(); + if (result == ETIMEDOUT) { + return false; + } + DCHECK_EQ(0, result); + return true; +} + +#elif V8_OS_WIN + +struct ConditionVariable::Event { + Event() : handle_(::CreateEventA(NULL, true, false, NULL)) { + DCHECK(handle_ != NULL); + } + + ~Event() { + BOOL ok = ::CloseHandle(handle_); + DCHECK(ok); + USE(ok); + } + + bool WaitFor(DWORD timeout_ms) { + DWORD result = ::WaitForSingleObject(handle_, timeout_ms); + if (result == WAIT_OBJECT_0) { + return true; + } + DCHECK(result == WAIT_TIMEOUT); + return false; + } + + HANDLE handle_; + Event* next_; + HANDLE thread_; + volatile bool notified_; +}; + + +ConditionVariable::NativeHandle::~NativeHandle() { + DCHECK(waitlist_ == NULL); + + while (freelist_ != NULL) { + Event* event = freelist_; + freelist_ = event->next_; + delete event; + } +} + + +ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() { + LockGuard<Mutex> lock_guard(&mutex_); + + // Grab an event from the free list or create a new one. + Event* event = freelist_; + if (event != NULL) { + freelist_ = event->next_; + } else { + event = new Event; + } + event->thread_ = GetCurrentThread(); + event->notified_ = false; + +#ifdef DEBUG + // The event must not be on the wait list. + for (Event* we = waitlist_; we != NULL; we = we->next_) { + DCHECK_NE(event, we); + } +#endif + + // Prepend the event to the wait list. + event->next_ = waitlist_; + waitlist_ = event; + + return event; +} + + +void ConditionVariable::NativeHandle::Post(Event* event, bool result) { + LockGuard<Mutex> lock_guard(&mutex_); + + // Remove the event from the wait list. + for (Event** wep = &waitlist_;; wep = &(*wep)->next_) { + DCHECK_NE(NULL, *wep); + if (*wep == event) { + *wep = event->next_; + break; + } + } + +#ifdef DEBUG + // The event must not be on the free list. + for (Event* fe = freelist_; fe != NULL; fe = fe->next_) { + DCHECK_NE(event, fe); + } +#endif + + // Reset the event. + BOOL ok = ::ResetEvent(event->handle_); + DCHECK(ok); + USE(ok); + + // Insert the event into the free list. + event->next_ = freelist_; + freelist_ = event; + + // Forward signals delivered after the timeout to the next waiting event. + if (!result && event->notified_ && waitlist_ != NULL) { + ok = ::SetEvent(waitlist_->handle_); + DCHECK(ok); + USE(ok); + waitlist_->notified_ = true; + } +} + + +ConditionVariable::ConditionVariable() {} + + +ConditionVariable::~ConditionVariable() {} + + +void ConditionVariable::NotifyOne() { + // Notify the thread with the highest priority in the waitlist + // that was not already signalled. + LockGuard<Mutex> lock_guard(native_handle_.mutex()); + Event* highest_event = NULL; + int highest_priority = std::numeric_limits<int>::min(); + for (Event* event = native_handle().waitlist(); + event != NULL; + event = event->next_) { + if (event->notified_) { + continue; + } + int priority = GetThreadPriority(event->thread_); + DCHECK_NE(THREAD_PRIORITY_ERROR_RETURN, priority); + if (priority >= highest_priority) { + highest_priority = priority; + highest_event = event; + } + } + if (highest_event != NULL) { + DCHECK(!highest_event->notified_); + ::SetEvent(highest_event->handle_); + highest_event->notified_ = true; + } +} + + +void ConditionVariable::NotifyAll() { + // Notify all threads on the waitlist. + LockGuard<Mutex> lock_guard(native_handle_.mutex()); + for (Event* event = native_handle().waitlist(); + event != NULL; + event = event->next_) { + if (!event->notified_) { + ::SetEvent(event->handle_); + event->notified_ = true; + } + } +} + + +void ConditionVariable::Wait(Mutex* mutex) { + // Create and setup the wait event. + Event* event = native_handle_.Pre(); + + // Release the user mutex. + mutex->Unlock(); + + // Wait on the wait event. + while (!event->WaitFor(INFINITE)) + ; + + // Reaquire the user mutex. + mutex->Lock(); + + // Release the wait event (we must have been notified). + DCHECK(event->notified_); + native_handle_.Post(event, true); +} + + +bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) { + // Create and setup the wait event. + Event* event = native_handle_.Pre(); + + // Release the user mutex. + mutex->Unlock(); + + // Wait on the wait event. + TimeTicks now = TimeTicks::Now(); + TimeTicks end = now + rel_time; + bool result = false; + while (true) { + int64_t msec = (end - now).InMilliseconds(); + if (msec >= static_cast<int64_t>(INFINITE)) { + result = event->WaitFor(INFINITE - 1); + if (result) { + break; + } + now = TimeTicks::Now(); + } else { + result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec)); + break; + } + } + + // Reaquire the user mutex. + mutex->Lock(); + + // Release the wait event. + DCHECK(!result || event->notified_); + native_handle_.Post(event, result); + + return result; +} + +#endif // V8_OS_POSIX + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/condition-variable.h nodejs-0.11.15/deps/v8/src/base/platform/condition-variable.h --- nodejs-0.11.13/deps/v8/src/base/platform/condition-variable.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/condition-variable.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,118 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_PLATFORM_CONDITION_VARIABLE_H_ +#define V8_BASE_PLATFORM_CONDITION_VARIABLE_H_ + +#include "src/base/lazy-instance.h" +#include "src/base/platform/mutex.h" + +namespace v8 { +namespace base { + +// Forward declarations. +class ConditionVariableEvent; +class TimeDelta; + +// ----------------------------------------------------------------------------- +// ConditionVariable +// +// This class is a synchronization primitive that can be used to block a thread, +// or multiple threads at the same time, until: +// - a notification is received from another thread, +// - a timeout expires, or +// - a spurious wakeup occurs +// Any thread that intends to wait on a ConditionVariable has to acquire a lock +// on a Mutex first. The |Wait()| and |WaitFor()| operations atomically release +// the mutex and suspend the execution of the calling thread. When the condition +// variable is notified, the thread is awakened, and the mutex is reacquired. + +class ConditionVariable V8_FINAL { + public: + ConditionVariable(); + ~ConditionVariable(); + + // If any threads are waiting on this condition variable, calling + // |NotifyOne()| unblocks one of the waiting threads. + void NotifyOne(); + + // Unblocks all threads currently waiting for this condition variable. + void NotifyAll(); + + // |Wait()| causes the calling thread to block until the condition variable is + // notified or a spurious wakeup occurs. Atomically releases the mutex, blocks + // the current executing thread, and adds it to the list of threads waiting on + // this condition variable. The thread will be unblocked when |NotifyAll()| or + // |NotifyOne()| is executed. It may also be unblocked spuriously. When + // unblocked, regardless of the reason, the lock on the mutex is reacquired + // and |Wait()| exits. + void Wait(Mutex* mutex); + + // Atomically releases the mutex, blocks the current executing thread, and + // adds it to the list of threads waiting on this condition variable. The + // thread will be unblocked when |NotifyAll()| or |NotifyOne()| is executed, + // or when the relative timeout |rel_time| expires. It may also be unblocked + // spuriously. When unblocked, regardless of the reason, the lock on the mutex + // is reacquired and |WaitFor()| exits. Returns true if the condition variable + // was notified prior to the timeout. + bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT; + + // The implementation-defined native handle type. +#if V8_OS_POSIX + typedef pthread_cond_t NativeHandle; +#elif V8_OS_WIN + struct Event; + class NativeHandle V8_FINAL { + public: + NativeHandle() : waitlist_(NULL), freelist_(NULL) {} + ~NativeHandle(); + + Event* Pre() V8_WARN_UNUSED_RESULT; + void Post(Event* event, bool result); + + Mutex* mutex() { return &mutex_; } + Event* waitlist() { return waitlist_; } + + private: + Event* waitlist_; + Event* freelist_; + Mutex mutex_; + + DISALLOW_COPY_AND_ASSIGN(NativeHandle); + }; +#endif + + NativeHandle& native_handle() { + return native_handle_; + } + const NativeHandle& native_handle() const { + return native_handle_; + } + + private: + NativeHandle native_handle_; + + DISALLOW_COPY_AND_ASSIGN(ConditionVariable); +}; + + +// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is +// called). +// Usage: +// static LazyConditionVariable my_condvar = +// LAZY_CONDITION_VARIABLE_INITIALIZER; +// +// void my_function() { +// LockGuard<Mutex> lock_guard(&my_mutex); +// my_condvar.Pointer()->Wait(&my_mutex); +// } +typedef LazyStaticInstance< + ConditionVariable, DefaultConstructTrait<ConditionVariable>, + ThreadSafeInitOnceTrait>::type LazyConditionVariable; + +#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER + +} } // namespace v8::base + +#endif // V8_BASE_PLATFORM_CONDITION_VARIABLE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/elapsed-timer.h nodejs-0.11.15/deps/v8/src/base/platform/elapsed-timer.h --- nodejs-0.11.13/deps/v8/src/base/platform/elapsed-timer.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/elapsed-timer.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,97 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_PLATFORM_ELAPSED_TIMER_H_ +#define V8_BASE_PLATFORM_ELAPSED_TIMER_H_ + +#include "src/base/logging.h" +#include "src/base/platform/time.h" + +namespace v8 { +namespace base { + +class ElapsedTimer V8_FINAL { + public: +#ifdef DEBUG + ElapsedTimer() : started_(false) {} +#endif + + // Starts this timer. Once started a timer can be checked with + // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|. + // This method must not be called on an already started timer. + void Start() { + DCHECK(!IsStarted()); + start_ticks_ = Now(); +#ifdef DEBUG + started_ = true; +#endif + DCHECK(IsStarted()); + } + + // Stops this timer. Must not be called on a timer that was not + // started before. + void Stop() { + DCHECK(IsStarted()); + start_ticks_ = TimeTicks(); +#ifdef DEBUG + started_ = false; +#endif + DCHECK(!IsStarted()); + } + + // Returns |true| if this timer was started previously. + bool IsStarted() const { + DCHECK(started_ || start_ticks_.IsNull()); + DCHECK(!started_ || !start_ticks_.IsNull()); + return !start_ticks_.IsNull(); + } + + // Restarts the timer and returns the time elapsed since the previous start. + // This method is equivalent to obtaining the elapsed time with |Elapsed()| + // and then starting the timer again, but does so in one single operation, + // avoiding the need to obtain the clock value twice. It may only be called + // on a previously started timer. + TimeDelta Restart() { + DCHECK(IsStarted()); + TimeTicks ticks = Now(); + TimeDelta elapsed = ticks - start_ticks_; + DCHECK(elapsed.InMicroseconds() >= 0); + start_ticks_ = ticks; + DCHECK(IsStarted()); + return elapsed; + } + + // Returns the time elapsed since the previous start. This method may only + // be called on a previously started timer. + TimeDelta Elapsed() const { + DCHECK(IsStarted()); + TimeDelta elapsed = Now() - start_ticks_; + DCHECK(elapsed.InMicroseconds() >= 0); + return elapsed; + } + + // Returns |true| if the specified |time_delta| has elapsed since the + // previous start, or |false| if not. This method may only be called on + // a previously started timer. + bool HasExpired(TimeDelta time_delta) const { + DCHECK(IsStarted()); + return Elapsed() >= time_delta; + } + + private: + static V8_INLINE TimeTicks Now() { + TimeTicks now = TimeTicks::HighResolutionNow(); + DCHECK(!now.IsNull()); + return now; + } + + TimeTicks start_ticks_; +#ifdef DEBUG + bool started_; +#endif +}; + +} } // namespace v8::base + +#endif // V8_BASE_PLATFORM_ELAPSED_TIMER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/mutex.cc nodejs-0.11.15/deps/v8/src/base/platform/mutex.cc --- nodejs-0.11.13/deps/v8/src/base/platform/mutex.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/mutex.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,191 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/platform/mutex.h" + +#include <errno.h> + +namespace v8 { +namespace base { + +#if V8_OS_POSIX + +static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) { + int result; +#if defined(DEBUG) + // Use an error checking mutex in debug mode. + pthread_mutexattr_t attr; + result = pthread_mutexattr_init(&attr); + DCHECK_EQ(0, result); + result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); + DCHECK_EQ(0, result); + result = pthread_mutex_init(mutex, &attr); + DCHECK_EQ(0, result); + result = pthread_mutexattr_destroy(&attr); +#else + // Use a fast mutex (default attributes). + result = pthread_mutex_init(mutex, NULL); +#endif // defined(DEBUG) + DCHECK_EQ(0, result); + USE(result); +} + + +static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) { + pthread_mutexattr_t attr; + int result = pthread_mutexattr_init(&attr); + DCHECK_EQ(0, result); + result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + DCHECK_EQ(0, result); + result = pthread_mutex_init(mutex, &attr); + DCHECK_EQ(0, result); + result = pthread_mutexattr_destroy(&attr); + DCHECK_EQ(0, result); + USE(result); +} + + +static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) { + int result = pthread_mutex_destroy(mutex); + DCHECK_EQ(0, result); + USE(result); +} + + +static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) { + int result = pthread_mutex_lock(mutex); + DCHECK_EQ(0, result); + USE(result); +} + + +static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) { + int result = pthread_mutex_unlock(mutex); + DCHECK_EQ(0, result); + USE(result); +} + + +static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) { + int result = pthread_mutex_trylock(mutex); + if (result == EBUSY) { + return false; + } + DCHECK_EQ(0, result); + return true; +} + +#elif V8_OS_WIN + +static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) { + InitializeCriticalSection(cs); +} + + +static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) { + InitializeCriticalSection(cs); +} + + +static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) { + DeleteCriticalSection(cs); +} + + +static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) { + EnterCriticalSection(cs); +} + + +static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) { + LeaveCriticalSection(cs); +} + + +static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) { + return TryEnterCriticalSection(cs); +} + +#endif // V8_OS_POSIX + + +Mutex::Mutex() { + InitializeNativeHandle(&native_handle_); +#ifdef DEBUG + level_ = 0; +#endif +} + + +Mutex::~Mutex() { + DestroyNativeHandle(&native_handle_); + DCHECK_EQ(0, level_); +} + + +void Mutex::Lock() { + LockNativeHandle(&native_handle_); + AssertUnheldAndMark(); +} + + +void Mutex::Unlock() { + AssertHeldAndUnmark(); + UnlockNativeHandle(&native_handle_); +} + + +bool Mutex::TryLock() { + if (!TryLockNativeHandle(&native_handle_)) { + return false; + } + AssertUnheldAndMark(); + return true; +} + + +RecursiveMutex::RecursiveMutex() { + InitializeRecursiveNativeHandle(&native_handle_); +#ifdef DEBUG + level_ = 0; +#endif +} + + +RecursiveMutex::~RecursiveMutex() { + DestroyNativeHandle(&native_handle_); + DCHECK_EQ(0, level_); +} + + +void RecursiveMutex::Lock() { + LockNativeHandle(&native_handle_); +#ifdef DEBUG + DCHECK_LE(0, level_); + level_++; +#endif +} + + +void RecursiveMutex::Unlock() { +#ifdef DEBUG + DCHECK_LT(0, level_); + level_--; +#endif + UnlockNativeHandle(&native_handle_); +} + + +bool RecursiveMutex::TryLock() { + if (!TryLockNativeHandle(&native_handle_)) { + return false; + } +#ifdef DEBUG + DCHECK_LE(0, level_); + level_++; +#endif + return true; +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/mutex.h nodejs-0.11.15/deps/v8/src/base/platform/mutex.h --- nodejs-0.11.13/deps/v8/src/base/platform/mutex.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/mutex.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,215 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_PLATFORM_MUTEX_H_ +#define V8_BASE_PLATFORM_MUTEX_H_ + +#include "src/base/lazy-instance.h" +#if V8_OS_WIN +#include "src/base/win32-headers.h" +#endif +#include "src/base/logging.h" + +#if V8_OS_POSIX +#include <pthread.h> // NOLINT +#endif + +namespace v8 { +namespace base { + +// ---------------------------------------------------------------------------- +// Mutex +// +// This class is a synchronization primitive that can be used to protect shared +// data from being simultaneously accessed by multiple threads. A mutex offers +// exclusive, non-recursive ownership semantics: +// - A calling thread owns a mutex from the time that it successfully calls +// either |Lock()| or |TryLock()| until it calls |Unlock()|. +// - When a thread owns a mutex, all other threads will block (for calls to +// |Lock()|) or receive a |false| return value (for |TryLock()|) if they +// attempt to claim ownership of the mutex. +// A calling thread must not own the mutex prior to calling |Lock()| or +// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed +// while still owned by some thread. The Mutex class is non-copyable. + +class Mutex V8_FINAL { + public: + Mutex(); + ~Mutex(); + + // Locks the given mutex. If the mutex is currently unlocked, it becomes + // locked and owned by the calling thread, and immediately. If the mutex + // is already locked by another thread, suspends the calling thread until + // the mutex is unlocked. + void Lock(); + + // Unlocks the given mutex. The mutex is assumed to be locked and owned by + // the calling thread on entrance. + void Unlock(); + + // Tries to lock the given mutex. Returns whether the mutex was + // successfully locked. + bool TryLock() V8_WARN_UNUSED_RESULT; + + // The implementation-defined native handle type. +#if V8_OS_POSIX + typedef pthread_mutex_t NativeHandle; +#elif V8_OS_WIN + typedef CRITICAL_SECTION NativeHandle; +#endif + + NativeHandle& native_handle() { + return native_handle_; + } + const NativeHandle& native_handle() const { + return native_handle_; + } + + private: + NativeHandle native_handle_; +#ifdef DEBUG + int level_; +#endif + + V8_INLINE void AssertHeldAndUnmark() { +#ifdef DEBUG + DCHECK_EQ(1, level_); + level_--; +#endif + } + + V8_INLINE void AssertUnheldAndMark() { +#ifdef DEBUG + DCHECK_EQ(0, level_); + level_++; +#endif + } + + friend class ConditionVariable; + + DISALLOW_COPY_AND_ASSIGN(Mutex); +}; + + +// POD Mutex initialized lazily (i.e. the first time Pointer() is called). +// Usage: +// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER; +// +// void my_function() { +// LockGuard<Mutex> guard(my_mutex.Pointer()); +// // Do something. +// } +// +typedef LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>, + ThreadSafeInitOnceTrait>::type LazyMutex; + +#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER + + +// ----------------------------------------------------------------------------- +// RecursiveMutex +// +// This class is a synchronization primitive that can be used to protect shared +// data from being simultaneously accessed by multiple threads. A recursive +// mutex offers exclusive, recursive ownership semantics: +// - A calling thread owns a recursive mutex for a period of time that starts +// when it successfully calls either |Lock()| or |TryLock()|. During this +// period, the thread may make additional calls to |Lock()| or |TryLock()|. +// The period of ownership ends when the thread makes a matching number of +// calls to |Unlock()|. +// - When a thread owns a recursive mutex, all other threads will block (for +// calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if +// they attempt to claim ownership of the recursive mutex. +// - The maximum number of times that a recursive mutex may be locked is +// unspecified, but after that number is reached, calls to |Lock()| will +// probably abort the process and calls to |TryLock()| return false. +// The behavior of a program is undefined if a recursive mutex is destroyed +// while still owned by some thread. The RecursiveMutex class is non-copyable. + +class RecursiveMutex V8_FINAL { + public: + RecursiveMutex(); + ~RecursiveMutex(); + + // Locks the mutex. If another thread has already locked the mutex, a call to + // |Lock()| will block execution until the lock is acquired. A thread may call + // |Lock()| on a recursive mutex repeatedly. Ownership will only be released + // after the thread makes a matching number of calls to |Unlock()|. + // The behavior is undefined if the mutex is not unlocked before being + // destroyed, i.e. some thread still owns it. + void Lock(); + + // Unlocks the mutex if its level of ownership is 1 (there was exactly one + // more call to |Lock()| than there were calls to unlock() made by this + // thread), reduces the level of ownership by 1 otherwise. The mutex must be + // locked by the current thread of execution, otherwise, the behavior is + // undefined. + void Unlock(); + + // Tries to lock the given mutex. Returns whether the mutex was + // successfully locked. + bool TryLock() V8_WARN_UNUSED_RESULT; + + // The implementation-defined native handle type. + typedef Mutex::NativeHandle NativeHandle; + + NativeHandle& native_handle() { + return native_handle_; + } + const NativeHandle& native_handle() const { + return native_handle_; + } + + private: + NativeHandle native_handle_; +#ifdef DEBUG + int level_; +#endif + + DISALLOW_COPY_AND_ASSIGN(RecursiveMutex); +}; + + +// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is +// called). +// Usage: +// static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER; +// +// void my_function() { +// LockGuard<RecursiveMutex> guard(my_mutex.Pointer()); +// // Do something. +// } +// +typedef LazyStaticInstance<RecursiveMutex, + DefaultConstructTrait<RecursiveMutex>, + ThreadSafeInitOnceTrait>::type LazyRecursiveMutex; + +#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER + + +// ----------------------------------------------------------------------------- +// LockGuard +// +// This class is a mutex wrapper that provides a convenient RAII-style mechanism +// for owning a mutex for the duration of a scoped block. +// When a LockGuard object is created, it attempts to take ownership of the +// mutex it is given. When control leaves the scope in which the LockGuard +// object was created, the LockGuard is destructed and the mutex is released. +// The LockGuard class is non-copyable. + +template <typename Mutex> +class LockGuard V8_FINAL { + public: + explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); } + ~LockGuard() { mutex_->Unlock(); } + + private: + Mutex* mutex_; + + DISALLOW_COPY_AND_ASSIGN(LockGuard); +}; + +} } // namespace v8::base + +#endif // V8_BASE_PLATFORM_MUTEX_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/platform-cygwin.cc nodejs-0.11.15/deps/v8/src/base/platform/platform-cygwin.cc --- nodejs-0.11.13/deps/v8/src/base/platform/platform-cygwin.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/platform-cygwin.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,303 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Platform-specific code for Cygwin goes here. For the POSIX-compatible +// parts, the implementation is in platform-posix.cc. + +#include <errno.h> +#include <pthread.h> +#include <semaphore.h> +#include <stdarg.h> +#include <strings.h> // index +#include <sys/mman.h> // mmap & munmap +#include <sys/time.h> +#include <unistd.h> // sysconf + +#include <cmath> + +#undef MAP_TYPE + +#include "src/base/macros.h" +#include "src/base/platform/platform.h" +#include "src/base/win32-headers.h" + +namespace v8 { +namespace base { + + +const char* OS::LocalTimezone(double time, TimezoneCache* cache) { + if (std::isnan(time)) return ""; + time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return ""; + return tzname[0]; // The location of the timezone string on Cygwin. +} + + +double OS::LocalTimeOffset(TimezoneCache* cache) { + // On Cygwin, struct tm does not contain a tm_gmtoff field. + time_t utc = time(NULL); + DCHECK(utc != -1); + struct tm* loc = localtime(&utc); + DCHECK(loc != NULL); + // time - localtime includes any daylight savings offset, so subtract it. + return static_cast<double>((mktime(loc) - utc) * msPerSecond - + (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0)); +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (mbase == MAP_FAILED) return NULL; + *allocated = msize; + return mbase; +} + + +class PosixMemoryMappedFile : public OS::MemoryMappedFile { + public: + PosixMemoryMappedFile(FILE* file, void* memory, int size) + : file_(file), memory_(memory), size_(size) { } + virtual ~PosixMemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + FILE* file_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "r+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + int result = fwrite(initial, size, 1, file); + if (result < 1) { + fclose(file); + return NULL; + } + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +PosixMemoryMappedFile::~PosixMemoryMappedFile() { + if (memory_) munmap(memory_, size_); + fclose(file_); +} + + +std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { + std::vector<SharedLibraryAddresses> result; + // This function assumes that the layout of the file is as follows: + // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] + // If we encounter an unexpected situation we abort scanning further entries. + FILE* fp = fopen("/proc/self/maps", "r"); + if (fp == NULL) return result; + + // Allocate enough room to be able to store a full file name. + const int kLibNameLen = FILENAME_MAX + 1; + char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); + + // This loop will terminate once the scanning hits an EOF. + while (true) { + uintptr_t start, end; + char attr_r, attr_w, attr_x, attr_p; + // Parse the addresses and permission bits at the beginning of the line. + if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; + if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; + + int c; + if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { + // Found a read-only executable entry. Skip characters until we reach + // the beginning of the filename or the end of the line. + do { + c = getc(fp); + } while ((c != EOF) && (c != '\n') && (c != '/')); + if (c == EOF) break; // EOF: Was unexpected, just exit. + + // Process the filename if found. + if (c == '/') { + ungetc(c, fp); // Push the '/' back into the stream to be read below. + + // Read to the end of the line. Exit if the read fails. + if (fgets(lib_name, kLibNameLen, fp) == NULL) break; + + // Drop the newline character read by fgets. We do not need to check + // for a zero-length string because we know that we at least read the + // '/' character. + lib_name[strlen(lib_name) - 1] = '\0'; + } else { + // No library name found, just record the raw address range. + snprintf(lib_name, kLibNameLen, + "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); + } + result.push_back(SharedLibraryAddress(lib_name, start, end)); + } else { + // Entry not describing executable data. Skip to end of line to set up + // reading the next entry. + do { + c = getc(fp); + } while ((c != EOF) && (c != '\n')); + if (c == EOF) break; + } + } + free(lib_name); + fclose(fp); + return result; +} + + +void OS::SignalCodeMovingGC() { + // Nothing to do on Cygwin. +} + + +// The VirtualMemory implementation is taken from platform-win32.cc. +// The mmap-based virtual memory implementation as it is used on most posix +// platforms does not work well because Cygwin does not support MAP_FIXED. +// This causes VirtualMemory::Commit to not always commit the memory region +// specified. + +static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { + LPVOID base = NULL; + + if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { + // For exectutable pages try and randomize the allocation address + for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { + base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection); + } + } + + // After three attempts give up and let the OS find an address to use. + if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); + + return base; +} + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* address = ReserveRegion(request_size); + if (address == NULL) return; + uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment); + // Try reducing the size by freeing and then reallocating a specific area. + bool result = ReleaseRegion(address, request_size); + USE(result); + DCHECK(result); + address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); + if (address != NULL) { + request_size = size; + DCHECK(base == static_cast<uint8_t*>(address)); + } else { + // Resizing failed, just go with a bigger area. + address = ReserveRegion(request_size); + if (address == NULL) return; + } + address_ = address; + size_ = request_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address_, size_); + DCHECK(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + DCHECK(IsReserved()); + return UncommitRegion(address, size); +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; + if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { + return false; + } + return true; +} + + +bool VirtualMemory::Guard(void* address) { + if (NULL == VirtualAlloc(address, + OS::CommitPageSize(), + MEM_COMMIT, + PAGE_NOACCESS)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return VirtualFree(base, size, MEM_DECOMMIT) != 0; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return VirtualFree(base, 0, MEM_RELEASE) != 0; +} + + +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/platform-freebsd.cc nodejs-0.11.15/deps/v8/src/base/platform/platform-freebsd.cc --- nodejs-0.11.13/deps/v8/src/base/platform/platform-freebsd.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/platform-freebsd.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,307 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Platform-specific code for FreeBSD goes here. For the POSIX-compatible +// parts, the implementation is in platform-posix.cc. + +#include <pthread.h> +#include <semaphore.h> +#include <signal.h> +#include <stdlib.h> +#include <sys/resource.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/ucontext.h> + +#include <sys/fcntl.h> // open +#include <sys/mman.h> // mmap & munmap +#include <sys/stat.h> // open +#include <sys/types.h> // mmap & munmap +#include <unistd.h> // getpagesize +// If you don't have execinfo.h then you need devel/libexecinfo from ports. +#include <errno.h> +#include <limits.h> +#include <stdarg.h> +#include <strings.h> // index + +#include <cmath> + +#undef MAP_TYPE + +#include "src/base/macros.h" +#include "src/base/platform/platform.h" + + +namespace v8 { +namespace base { + + +const char* OS::LocalTimezone(double time, TimezoneCache* cache) { + if (std::isnan(time)) return ""; + time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return ""; + return t->tm_zone; +} + + +double OS::LocalTimeOffset(TimezoneCache* cache) { + time_t tv = time(NULL); + struct tm* t = localtime(&tv); + // tm_gmtoff includes any daylight savings offset, so subtract it. + return static_cast<double>(t->tm_gmtoff * msPerSecond - + (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool executable) { + const size_t msize = RoundUp(requested, getpagesize()); + int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); + void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); + + if (mbase == MAP_FAILED) return NULL; + *allocated = msize; + return mbase; +} + + +class PosixMemoryMappedFile : public OS::MemoryMappedFile { + public: + PosixMemoryMappedFile(FILE* file, void* memory, int size) + : file_(file), memory_(memory), size_(size) { } + virtual ~PosixMemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + FILE* file_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "r+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + int result = fwrite(initial, size, 1, file); + if (result < 1) { + fclose(file); + return NULL; + } + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +PosixMemoryMappedFile::~PosixMemoryMappedFile() { + if (memory_) munmap(memory_, size_); + fclose(file_); +} + + +static unsigned StringToLong(char* buffer) { + return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT +} + + +std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { + std::vector<SharedLibraryAddress> result; + static const int MAP_LENGTH = 1024; + int fd = open("/proc/self/maps", O_RDONLY); + if (fd < 0) return result; + while (true) { + char addr_buffer[11]; + addr_buffer[0] = '0'; + addr_buffer[1] = 'x'; + addr_buffer[10] = 0; + int result = read(fd, addr_buffer + 2, 8); + if (result < 8) break; + unsigned start = StringToLong(addr_buffer); + result = read(fd, addr_buffer + 2, 1); + if (result < 1) break; + if (addr_buffer[2] != '-') break; + result = read(fd, addr_buffer + 2, 8); + if (result < 8) break; + unsigned end = StringToLong(addr_buffer); + char buffer[MAP_LENGTH]; + int bytes_read = -1; + do { + bytes_read++; + if (bytes_read >= MAP_LENGTH - 1) + break; + result = read(fd, buffer + bytes_read, 1); + if (result < 1) break; + } while (buffer[bytes_read] != '\n'); + buffer[bytes_read] = 0; + // Ignore mappings that are not executable. + if (buffer[3] != 'x') continue; + char* start_of_path = index(buffer, '/'); + // There may be no filename in this line. Skip to next. + if (start_of_path == NULL) continue; + buffer[bytes_read] = 0; + result.push_back(SharedLibraryAddress(start_of_path, start, end)); + } + close(fd); + return result; +} + + +void OS::SignalCodeMovingGC() { +} + + + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + uint8_t* base = static_cast<uint8_t*>(reservation); + uint8_t* aligned_base = RoundUp(base, alignment); + DCHECK_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast<size_t>(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + DCHECK_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + DCHECK(aligned_size == request_size); + + address_ = static_cast<void*>(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + DCHECK(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (MAP_FAILED == mmap(base, + size, + prot, + MAP_PRIVATE | MAP_ANON | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/platform.h nodejs-0.11.15/deps/v8/src/base/platform/platform.h --- nodejs-0.11.13/deps/v8/src/base/platform/platform.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/platform.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,527 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This module contains the platform-specific code. This make the rest of the +// code less dependent on operating system, compilers and runtime libraries. +// This module does specifically not deal with differences between different +// processor architecture. +// The platform classes have the same definition for all platforms. The +// implementation for a particular platform is put in platform_<os>.cc. +// The build system then uses the implementation for the target platform. +// +// This design has been chosen because it is simple and fast. Alternatively, +// the platform dependent classes could have been implemented using abstract +// superclasses with virtual methods and having specializations for each +// platform. This design was rejected because it was more complicated and +// slower. It would require factory methods for selecting the right +// implementation and the overhead of virtual methods for performance +// sensitive like mutex locking/unlocking. + +#ifndef V8_BASE_PLATFORM_PLATFORM_H_ +#define V8_BASE_PLATFORM_PLATFORM_H_ + +#include <stdarg.h> +#include <string> +#include <vector> + +#include "src/base/build_config.h" +#include "src/base/platform/mutex.h" +#include "src/base/platform/semaphore.h" + +#ifdef __sun +# ifndef signbit +namespace std { +int signbit(double x); +} +# endif +#include <alloca.h> +#endif + +#if V8_OS_QNX +#include "src/base/qnx-math.h" +#endif + +// Microsoft Visual C++ specific stuff. +#if V8_LIBC_MSVCRT + +#include "src/base/win32-headers.h" +#include "src/base/win32-math.h" + +int strncasecmp(const char* s1, const char* s2, int n); + +// Visual C++ 2013 and higher implement this function. +#if (_MSC_VER < 1800) +inline int lrint(double flt) { + int intgr; +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 + __asm { + fld flt + fistp intgr + }; +#else + intgr = static_cast<int>(flt + 0.5); + if ((intgr & 1) != 0 && intgr - flt == 0.5) { + // If the number is halfway between two integers, round to the even one. + intgr--; + } +#endif + return intgr; +} +#endif // _MSC_VER < 1800 + +#endif // V8_LIBC_MSVCRT + +namespace v8 { +namespace base { + +// ---------------------------------------------------------------------------- +// Fast TLS support + +#ifndef V8_NO_FAST_TLS + +#if defined(_MSC_VER) && (V8_HOST_ARCH_IA32) + +#define V8_FAST_TLS_SUPPORTED 1 + +INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); + +inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { + const intptr_t kTibInlineTlsOffset = 0xE10; + const intptr_t kTibExtraTlsOffset = 0xF94; + const intptr_t kMaxInlineSlots = 64; + const intptr_t kMaxSlots = kMaxInlineSlots + 1024; + const intptr_t kPointerSize = sizeof(void*); + DCHECK(0 <= index && index < kMaxSlots); + if (index < kMaxInlineSlots) { + return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset + + kPointerSize * index)); + } + intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset)); + DCHECK(extra != 0); + return *reinterpret_cast<intptr_t*>(extra + + kPointerSize * (index - kMaxInlineSlots)); +} + +#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) + +#define V8_FAST_TLS_SUPPORTED 1 + +extern intptr_t kMacTlsBaseOffset; + +INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); + +inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { + intptr_t result; +#if V8_HOST_ARCH_IA32 + asm("movl %%gs:(%1,%2,4), %0;" + :"=r"(result) // Output must be a writable register. + :"r"(kMacTlsBaseOffset), "r"(index)); +#else + asm("movq %%gs:(%1,%2,8), %0;" + :"=r"(result) + :"r"(kMacTlsBaseOffset), "r"(index)); +#endif + return result; +} + +#endif + +#endif // V8_NO_FAST_TLS + + +class TimezoneCache; + + +// ---------------------------------------------------------------------------- +// OS +// +// This class has static methods for the different platform specific +// functions. Add methods here to cope with differences between the +// supported platforms. + +class OS { + public: + // Initialize the OS class. + // - random_seed: Used for the GetRandomMmapAddress() if non-zero. + // - hard_abort: If true, OS::Abort() will crash instead of aborting. + // - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof. + static void Initialize(int64_t random_seed, + bool hard_abort, + const char* const gc_fake_mmap); + + // Returns the accumulated user time for thread. This routine + // can be used for profiling. The implementation should + // strive for high-precision timer resolution, preferable + // micro-second resolution. + static int GetUserTime(uint32_t* secs, uint32_t* usecs); + + // Returns current time as the number of milliseconds since + // 00:00:00 UTC, January 1, 1970. + static double TimeCurrentMillis(); + + static TimezoneCache* CreateTimezoneCache(); + static void DisposeTimezoneCache(TimezoneCache* cache); + static void ClearTimezoneCache(TimezoneCache* cache); + + // Returns a string identifying the current time zone. The + // timestamp is used for determining if DST is in effect. + static const char* LocalTimezone(double time, TimezoneCache* cache); + + // Returns the local time offset in milliseconds east of UTC without + // taking daylight savings time into account. + static double LocalTimeOffset(TimezoneCache* cache); + + // Returns the daylight savings offset for the given time. + static double DaylightSavingsOffset(double time, TimezoneCache* cache); + + // Returns last OS error. + static int GetLastError(); + + static FILE* FOpen(const char* path, const char* mode); + static bool Remove(const char* path); + + // Opens a temporary file, the file is auto removed on close. + static FILE* OpenTemporaryFile(); + + // Log file open mode is platform-dependent due to line ends issues. + static const char* const LogFileOpenMode; + + // Print output to console. This is mostly used for debugging output. + // On platforms that has standard terminal output, the output + // should go to stdout. + static void Print(const char* format, ...); + static void VPrint(const char* format, va_list args); + + // Print output to a file. This is mostly used for debugging output. + static void FPrint(FILE* out, const char* format, ...); + static void VFPrint(FILE* out, const char* format, va_list args); + + // Print error output to console. This is mostly used for error message + // output. On platforms that has standard terminal output, the output + // should go to stderr. + static void PrintError(const char* format, ...); + static void VPrintError(const char* format, va_list args); + + // Allocate/Free memory used by JS heap. Pages are readable/writable, but + // they are not guaranteed to be executable unless 'executable' is true. + // Returns the address of allocated memory, or NULL if failed. + static void* Allocate(const size_t requested, + size_t* allocated, + bool is_executable); + static void Free(void* address, const size_t size); + + // This is the granularity at which the ProtectCode(...) call can set page + // permissions. + static intptr_t CommitPageSize(); + + // Mark code segments non-writable. + static void ProtectCode(void* address, const size_t size); + + // Assign memory as a guard page so that access will cause an exception. + static void Guard(void* address, const size_t size); + + // Generate a random address to be used for hinting mmap(). + static void* GetRandomMmapAddr(); + + // Get the Alignment guaranteed by Allocate(). + static size_t AllocateAlignment(); + + // Sleep for a number of milliseconds. + static void Sleep(const int milliseconds); + + // Abort the current process. + static void Abort(); + + // Debug break. + static void DebugBreak(); + + // Walk the stack. + static const int kStackWalkError = -1; + static const int kStackWalkMaxNameLen = 256; + static const int kStackWalkMaxTextLen = 256; + struct StackFrame { + void* address; + char text[kStackWalkMaxTextLen]; + }; + + class MemoryMappedFile { + public: + static MemoryMappedFile* open(const char* name); + static MemoryMappedFile* create(const char* name, int size, void* initial); + virtual ~MemoryMappedFile() { } + virtual void* memory() = 0; + virtual int size() = 0; + }; + + // Safe formatting print. Ensures that str is always null-terminated. + // Returns the number of chars written, or -1 if output was truncated. + static int SNPrintF(char* str, int length, const char* format, ...); + static int VSNPrintF(char* str, + int length, + const char* format, + va_list args); + + static char* StrChr(char* str, int c); + static void StrNCpy(char* dest, int length, const char* src, size_t n); + + // Support for the profiler. Can do nothing, in which case ticks + // occuring in shared libraries will not be properly accounted for. + struct SharedLibraryAddress { + SharedLibraryAddress( + const std::string& library_path, uintptr_t start, uintptr_t end) + : library_path(library_path), start(start), end(end) {} + + std::string library_path; + uintptr_t start; + uintptr_t end; + }; + + static std::vector<SharedLibraryAddress> GetSharedLibraryAddresses(); + + // Support for the profiler. Notifies the external profiling + // process that a code moving garbage collection starts. Can do + // nothing, in which case the code objects must not move (e.g., by + // using --never-compact) if accurate profiling is desired. + static void SignalCodeMovingGC(); + + // Returns the number of processors online. + static int NumberOfProcessorsOnline(); + + // The total amount of physical memory available on the current system. + static uint64_t TotalPhysicalMemory(); + + // Maximum size of the virtual memory. 0 means there is no artificial + // limit. + static intptr_t MaxVirtualMemory(); + + // Returns the double constant NAN + static double nan_value(); + + // Support runtime detection of whether the hard float option of the + // EABI is used. + static bool ArmUsingHardFloat(); + + // Returns the activation frame alignment constraint or zero if + // the platform doesn't care. Guaranteed to be a power of two. + static int ActivationFrameAlignment(); + + static int GetCurrentProcessId(); + + static int GetCurrentThreadId(); + + private: + static const int msPerSecond = 1000; + +#if V8_OS_POSIX + static const char* GetGCFakeMMapFile(); +#endif + + DISALLOW_IMPLICIT_CONSTRUCTORS(OS); +}; + +// Represents and controls an area of reserved memory. +// Control of the reserved memory can be assigned to another VirtualMemory +// object by assignment or copy-contructing. This removes the reserved memory +// from the original object. +class VirtualMemory { + public: + // Empty VirtualMemory object, controlling no reserved memory. + VirtualMemory(); + + // Reserves virtual memory with size. + explicit VirtualMemory(size_t size); + + // Reserves virtual memory containing an area of the given size that + // is aligned per alignment. This may not be at the position returned + // by address(). + VirtualMemory(size_t size, size_t alignment); + + // Releases the reserved memory, if any, controlled by this VirtualMemory + // object. + ~VirtualMemory(); + + // Returns whether the memory has been reserved. + bool IsReserved(); + + // Initialize or resets an embedded VirtualMemory object. + void Reset(); + + // Returns the start address of the reserved memory. + // If the memory was reserved with an alignment, this address is not + // necessarily aligned. The user might need to round it up to a multiple of + // the alignment to get the start of the aligned block. + void* address() { + DCHECK(IsReserved()); + return address_; + } + + // Returns the size of the reserved memory. The returned value is only + // meaningful when IsReserved() returns true. + // If the memory was reserved with an alignment, this size may be larger + // than the requested size. + size_t size() { return size_; } + + // Commits real memory. Returns whether the operation succeeded. + bool Commit(void* address, size_t size, bool is_executable); + + // Uncommit real memory. Returns whether the operation succeeded. + bool Uncommit(void* address, size_t size); + + // Creates a single guard page at the given address. + bool Guard(void* address); + + void Release() { + DCHECK(IsReserved()); + // Notice: Order is important here. The VirtualMemory object might live + // inside the allocated region. + void* address = address_; + size_t size = size_; + Reset(); + bool result = ReleaseRegion(address, size); + USE(result); + DCHECK(result); + } + + // Assign control of the reserved region to a different VirtualMemory object. + // The old object is no longer functional (IsReserved() returns false). + void TakeControl(VirtualMemory* from) { + DCHECK(!IsReserved()); + address_ = from->address_; + size_ = from->size_; + from->Reset(); + } + + static void* ReserveRegion(size_t size); + + static bool CommitRegion(void* base, size_t size, bool is_executable); + + static bool UncommitRegion(void* base, size_t size); + + // Must be called with a base pointer that has been returned by ReserveRegion + // and the same size it was reserved with. + static bool ReleaseRegion(void* base, size_t size); + + // Returns true if OS performs lazy commits, i.e. the memory allocation call + // defers actual physical memory allocation till the first memory access. + // Otherwise returns false. + static bool HasLazyCommits(); + + private: + void* address_; // Start address of the virtual memory. + size_t size_; // Size of the virtual memory. +}; + + +// ---------------------------------------------------------------------------- +// Thread +// +// Thread objects are used for creating and running threads. When the start() +// method is called the new thread starts running the run() method in the new +// thread. The Thread object should not be deallocated before the thread has +// terminated. + +class Thread { + public: + // Opaque data type for thread-local storage keys. + typedef int32_t LocalStorageKey; + + class Options { + public: + Options() : name_("v8:<unknown>"), stack_size_(0) {} + explicit Options(const char* name, int stack_size = 0) + : name_(name), stack_size_(stack_size) {} + + const char* name() const { return name_; } + int stack_size() const { return stack_size_; } + + private: + const char* name_; + int stack_size_; + }; + + // Create new thread. + explicit Thread(const Options& options); + virtual ~Thread(); + + // Start new thread by calling the Run() method on the new thread. + void Start(); + + // Start new thread and wait until Run() method is called on the new thread. + void StartSynchronously() { + start_semaphore_ = new Semaphore(0); + Start(); + start_semaphore_->Wait(); + delete start_semaphore_; + start_semaphore_ = NULL; + } + + // Wait until thread terminates. + void Join(); + + inline const char* name() const { + return name_; + } + + // Abstract method for run handler. + virtual void Run() = 0; + + // Thread-local storage. + static LocalStorageKey CreateThreadLocalKey(); + static void DeleteThreadLocalKey(LocalStorageKey key); + static void* GetThreadLocal(LocalStorageKey key); + static int GetThreadLocalInt(LocalStorageKey key) { + return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key))); + } + static void SetThreadLocal(LocalStorageKey key, void* value); + static void SetThreadLocalInt(LocalStorageKey key, int value) { + SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value))); + } + static bool HasThreadLocal(LocalStorageKey key) { + return GetThreadLocal(key) != NULL; + } + +#ifdef V8_FAST_TLS_SUPPORTED + static inline void* GetExistingThreadLocal(LocalStorageKey key) { + void* result = reinterpret_cast<void*>( + InternalGetExistingThreadLocal(static_cast<intptr_t>(key))); + DCHECK(result == GetThreadLocal(key)); + return result; + } +#else + static inline void* GetExistingThreadLocal(LocalStorageKey key) { + return GetThreadLocal(key); + } +#endif + + // A hint to the scheduler to let another thread run. + static void YieldCPU(); + + + // The thread name length is limited to 16 based on Linux's implementation of + // prctl(). + static const int kMaxThreadNameLength = 16; + + class PlatformData; + PlatformData* data() { return data_; } + + void NotifyStartedAndRun() { + if (start_semaphore_) start_semaphore_->Signal(); + Run(); + } + + private: + void set_name(const char* name); + + PlatformData* data_; + + char name_[kMaxThreadNameLength]; + int stack_size_; + Semaphore* start_semaphore_; + + DISALLOW_COPY_AND_ASSIGN(Thread); +}; + +} } // namespace v8::base + +#endif // V8_BASE_PLATFORM_PLATFORM_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/platform-linux.cc nodejs-0.11.15/deps/v8/src/base/platform/platform-linux.cc --- nodejs-0.11.13/deps/v8/src/base/platform/platform-linux.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/platform-linux.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,432 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Platform-specific code for Linux goes here. For the POSIX-compatible +// parts, the implementation is in platform-posix.cc. + +#include <pthread.h> +#include <semaphore.h> +#include <signal.h> +#include <stdlib.h> +#include <sys/prctl.h> +#include <sys/resource.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> + +// Ubuntu Dapper requires memory pages to be marked as +// executable. Otherwise, OS raises an exception when executing code +// in that page. +#include <errno.h> +#include <fcntl.h> // open +#include <stdarg.h> +#include <strings.h> // index +#include <sys/mman.h> // mmap & munmap +#include <sys/stat.h> // open +#include <sys/types.h> // mmap & munmap +#include <unistd.h> // sysconf + +// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'. +// Old versions of the C library <signal.h> didn't define the type. +#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \ + (defined(__arm__) || defined(__aarch64__)) && \ + !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT) +#include <asm/sigcontext.h> // NOLINT +#endif + +#if defined(LEAK_SANITIZER) +#include <sanitizer/lsan_interface.h> +#endif + +#include <cmath> + +#undef MAP_TYPE + +#include "src/base/macros.h" +#include "src/base/platform/platform.h" + + +namespace v8 { +namespace base { + + +#ifdef __arm__ + +bool OS::ArmUsingHardFloat() { + // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify + // the Floating Point ABI used (PCS stands for Procedure Call Standard). + // We use these as well as a couple of other defines to statically determine + // what FP ABI used. + // GCC versions 4.4 and below don't support hard-fp. + // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or + // __ARM_PCS_VFP. + +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#if GCC_VERSION >= 40600 +#if defined(__ARM_PCS_VFP) + return true; +#else + return false; +#endif + +#elif GCC_VERSION < 40500 + return false; + +#else +#if defined(__ARM_PCS_VFP) + return true; +#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \ + !defined(__VFP_FP__) + return false; +#else +#error "Your version of GCC does not report the FP ABI compiled for." \ + "Please report it on this issue" \ + "http://code.google.com/p/v8/issues/detail?id=2140" + +#endif +#endif +#undef GCC_VERSION +} + +#endif // def __arm__ + + +const char* OS::LocalTimezone(double time, TimezoneCache* cache) { + if (std::isnan(time)) return ""; + time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return ""; + return t->tm_zone; +} + + +double OS::LocalTimeOffset(TimezoneCache* cache) { + time_t tv = time(NULL); + struct tm* t = localtime(&tv); + // tm_gmtoff includes any daylight savings offset, so subtract it. + return static_cast<double>(t->tm_gmtoff * msPerSecond - + (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, AllocateAlignment()); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* addr = OS::GetRandomMmapAddr(); + void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (mbase == MAP_FAILED) return NULL; + *allocated = msize; + return mbase; +} + + +class PosixMemoryMappedFile : public OS::MemoryMappedFile { + public: + PosixMemoryMappedFile(FILE* file, void* memory, int size) + : file_(file), memory_(memory), size_(size) { } + virtual ~PosixMemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + FILE* file_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "r+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fileno(file), + 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + int result = fwrite(initial, size, 1, file); + if (result < 1) { + fclose(file); + return NULL; + } + void* memory = + mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fileno(file), + 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +PosixMemoryMappedFile::~PosixMemoryMappedFile() { + if (memory_) OS::Free(memory_, size_); + fclose(file_); +} + + +std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { + std::vector<SharedLibraryAddress> result; + // This function assumes that the layout of the file is as follows: + // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] + // If we encounter an unexpected situation we abort scanning further entries. + FILE* fp = fopen("/proc/self/maps", "r"); + if (fp == NULL) return result; + + // Allocate enough room to be able to store a full file name. + const int kLibNameLen = FILENAME_MAX + 1; + char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); + + // This loop will terminate once the scanning hits an EOF. + while (true) { + uintptr_t start, end; + char attr_r, attr_w, attr_x, attr_p; + // Parse the addresses and permission bits at the beginning of the line. + if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; + if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; + + int c; + if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { + // Found a read-only executable entry. Skip characters until we reach + // the beginning of the filename or the end of the line. + do { + c = getc(fp); + } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '[')); + if (c == EOF) break; // EOF: Was unexpected, just exit. + + // Process the filename if found. + if ((c == '/') || (c == '[')) { + // Push the '/' or '[' back into the stream to be read below. + ungetc(c, fp); + + // Read to the end of the line. Exit if the read fails. + if (fgets(lib_name, kLibNameLen, fp) == NULL) break; + + // Drop the newline character read by fgets. We do not need to check + // for a zero-length string because we know that we at least read the + // '/' or '[' character. + lib_name[strlen(lib_name) - 1] = '\0'; + } else { + // No library name found, just record the raw address range. + snprintf(lib_name, kLibNameLen, + "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); + } + result.push_back(SharedLibraryAddress(lib_name, start, end)); + } else { + // Entry not describing executable data. Skip to end of line to set up + // reading the next entry. + do { + c = getc(fp); + } while ((c != EOF) && (c != '\n')); + if (c == EOF) break; + } + } + free(lib_name); + fclose(fp); + return result; +} + + +void OS::SignalCodeMovingGC() { + // Support for ll_prof.py. + // + // The Linux profiler built into the kernel logs all mmap's with + // PROT_EXEC so that analysis tools can properly attribute ticks. We + // do a mmap with a name known by ll_prof.py and immediately munmap + // it. This injects a GC marker into the stream of events generated + // by the kernel and allows us to synchronize V8 code log and the + // kernel log. + int size = sysconf(_SC_PAGESIZE); + FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+"); + if (f == NULL) { + OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile()); + OS::Abort(); + } + void* addr = mmap(OS::GetRandomMmapAddr(), + size, +#if defined(__native_client__) + // The Native Client port of V8 uses an interpreter, + // so code pages don't need PROT_EXEC. + PROT_READ, +#else + PROT_READ | PROT_EXEC, +#endif + MAP_PRIVATE, + fileno(f), + 0); + DCHECK(addr != MAP_FAILED); + OS::Free(addr, size); + fclose(f); +} + + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + uint8_t* base = static_cast<uint8_t*>(reservation); + uint8_t* aligned_base = RoundUp(base, alignment); + DCHECK_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast<size_t>(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + DCHECK_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + DCHECK(aligned_size == request_size); + + address_ = static_cast<void*>(aligned_base); + size_ = aligned_size; +#if defined(LEAK_SANITIZER) + __lsan_register_root_region(address_, size_); +#endif +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + DCHECK(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + +#if defined(LEAK_SANITIZER) + __lsan_register_root_region(result, size); +#endif + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { +#if defined(__native_client__) + // The Native Client port of V8 uses an interpreter, + // so code pages don't need PROT_EXEC. + int prot = PROT_READ | PROT_WRITE; +#else + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); +#endif + if (MAP_FAILED == mmap(base, + size, + prot, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { +#if defined(LEAK_SANITIZER) + __lsan_unregister_root_region(base, size); +#endif + return munmap(base, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + return true; +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/platform-macos.cc nodejs-0.11.15/deps/v8/src/base/platform/platform-macos.cc --- nodejs-0.11.13/deps/v8/src/base/platform/platform-macos.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/platform-macos.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,310 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Platform-specific code for MacOS goes here. For the POSIX-compatible +// parts, the implementation is in platform-posix.cc. + +#include <dlfcn.h> +#include <mach/mach_init.h> +#include <mach-o/dyld.h> +#include <mach-o/getsect.h> +#include <sys/mman.h> +#include <unistd.h> + +#include <AvailabilityMacros.h> + +#include <errno.h> +#include <libkern/OSAtomic.h> +#include <mach/mach.h> +#include <mach/semaphore.h> +#include <mach/task.h> +#include <mach/vm_statistics.h> +#include <pthread.h> +#include <semaphore.h> +#include <signal.h> +#include <stdarg.h> +#include <stdlib.h> +#include <string.h> +#include <sys/resource.h> +#include <sys/sysctl.h> +#include <sys/time.h> +#include <sys/types.h> + +#include <cmath> + +#undef MAP_TYPE + +#include "src/base/macros.h" +#include "src/base/platform/platform.h" + + +namespace v8 { +namespace base { + + +// Constants used for mmap. +// kMmapFd is used to pass vm_alloc flags to tag the region with the user +// defined tag 255 This helps identify V8-allocated regions in memory analysis +// tools like vmmap(1). +static const int kMmapFd = VM_MAKE_TAG(255); +static const off_t kMmapFdOffset = 0; + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, getpagesize()); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* mbase = mmap(OS::GetRandomMmapAddr(), + msize, + prot, + MAP_PRIVATE | MAP_ANON, + kMmapFd, + kMmapFdOffset); + if (mbase == MAP_FAILED) return NULL; + *allocated = msize; + return mbase; +} + + +class PosixMemoryMappedFile : public OS::MemoryMappedFile { + public: + PosixMemoryMappedFile(FILE* file, void* memory, int size) + : file_(file), memory_(memory), size_(size) { } + virtual ~PosixMemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + FILE* file_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "r+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fileno(file), + 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + int result = fwrite(initial, size, 1, file); + if (result < 1) { + fclose(file); + return NULL; + } + void* memory = + mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fileno(file), + 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +PosixMemoryMappedFile::~PosixMemoryMappedFile() { + if (memory_) OS::Free(memory_, size_); + fclose(file_); +} + + +std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { + std::vector<SharedLibraryAddress> result; + unsigned int images_count = _dyld_image_count(); + for (unsigned int i = 0; i < images_count; ++i) { + const mach_header* header = _dyld_get_image_header(i); + if (header == NULL) continue; +#if V8_HOST_ARCH_X64 + uint64_t size; + char* code_ptr = getsectdatafromheader_64( + reinterpret_cast<const mach_header_64*>(header), + SEG_TEXT, + SECT_TEXT, + &size); +#else + unsigned int size; + char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); +#endif + if (code_ptr == NULL) continue; + const uintptr_t slide = _dyld_get_image_vmaddr_slide(i); + const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide; + result.push_back( + SharedLibraryAddress(_dyld_get_image_name(i), start, start + size)); + } + return result; +} + + +void OS::SignalCodeMovingGC() { +} + + +const char* OS::LocalTimezone(double time, TimezoneCache* cache) { + if (std::isnan(time)) return ""; + time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return ""; + return t->tm_zone; +} + + +double OS::LocalTimeOffset(TimezoneCache* cache) { + time_t tv = time(NULL); + struct tm* t = localtime(&tv); + // tm_gmtoff includes any daylight savings offset, so subtract it. + return static_cast<double>(t->tm_gmtoff * msPerSecond - + (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); +} + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + uint8_t* base = static_cast<uint8_t*>(reservation); + uint8_t* aligned_base = RoundUp(base, alignment); + DCHECK_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast<size_t>(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + DCHECK_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + DCHECK(aligned_size == request_size); + + address_ = static_cast<void*>(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + DCHECK(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* address, + size_t size, + bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (MAP_FAILED == mmap(address, + size, + prot, + MAP_PRIVATE | MAP_ANON | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* address, size_t size) { + return mmap(address, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* address, size_t size) { + return munmap(address, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + return false; +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/platform-openbsd.cc nodejs-0.11.15/deps/v8/src/base/platform/platform-openbsd.cc --- nodejs-0.11.13/deps/v8/src/base/platform/platform-openbsd.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/platform-openbsd.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,338 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Platform-specific code for OpenBSD and NetBSD goes here. For the +// POSIX-compatible parts, the implementation is in platform-posix.cc. + +#include <pthread.h> +#include <semaphore.h> +#include <signal.h> +#include <stdlib.h> +#include <sys/resource.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> + +#include <errno.h> +#include <fcntl.h> // open +#include <stdarg.h> +#include <strings.h> // index +#include <sys/mman.h> // mmap & munmap +#include <sys/stat.h> // open +#include <sys/types.h> // mmap & munmap +#include <unistd.h> // sysconf + +#include <cmath> + +#undef MAP_TYPE + +#include "src/base/macros.h" +#include "src/base/platform/platform.h" + + +namespace v8 { +namespace base { + + +const char* OS::LocalTimezone(double time, TimezoneCache* cache) { + if (std::isnan(time)) return ""; + time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return ""; + return t->tm_zone; +} + + +double OS::LocalTimeOffset(TimezoneCache* cache) { + time_t tv = time(NULL); + struct tm* t = localtime(&tv); + // tm_gmtoff includes any daylight savings offset, so subtract it. + return static_cast<double>(t->tm_gmtoff * msPerSecond - + (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, AllocateAlignment()); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* addr = OS::GetRandomMmapAddr(); + void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mbase == MAP_FAILED) return NULL; + *allocated = msize; + return mbase; +} + + +class PosixMemoryMappedFile : public OS::MemoryMappedFile { + public: + PosixMemoryMappedFile(FILE* file, void* memory, int size) + : file_(file), memory_(memory), size_(size) { } + virtual ~PosixMemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + FILE* file_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "r+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + int result = fwrite(initial, size, 1, file); + if (result < 1) { + fclose(file); + return NULL; + } + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +PosixMemoryMappedFile::~PosixMemoryMappedFile() { + if (memory_) OS::Free(memory_, size_); + fclose(file_); +} + + +std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { + std::vector<SharedLibraryAddress> result; + // This function assumes that the layout of the file is as follows: + // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] + // If we encounter an unexpected situation we abort scanning further entries. + FILE* fp = fopen("/proc/self/maps", "r"); + if (fp == NULL) return result; + + // Allocate enough room to be able to store a full file name. + const int kLibNameLen = FILENAME_MAX + 1; + char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); + + // This loop will terminate once the scanning hits an EOF. + while (true) { + uintptr_t start, end; + char attr_r, attr_w, attr_x, attr_p; + // Parse the addresses and permission bits at the beginning of the line. + if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; + if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; + + int c; + if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { + // Found a read-only executable entry. Skip characters until we reach + // the beginning of the filename or the end of the line. + do { + c = getc(fp); + } while ((c != EOF) && (c != '\n') && (c != '/')); + if (c == EOF) break; // EOF: Was unexpected, just exit. + + // Process the filename if found. + if (c == '/') { + ungetc(c, fp); // Push the '/' back into the stream to be read below. + + // Read to the end of the line. Exit if the read fails. + if (fgets(lib_name, kLibNameLen, fp) == NULL) break; + + // Drop the newline character read by fgets. We do not need to check + // for a zero-length string because we know that we at least read the + // '/' character. + lib_name[strlen(lib_name) - 1] = '\0'; + } else { + // No library name found, just record the raw address range. + snprintf(lib_name, kLibNameLen, + "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); + } + result.push_back(SharedLibraryAddress(lib_name, start, end)); + } else { + // Entry not describing executable data. Skip to end of line to set up + // reading the next entry. + do { + c = getc(fp); + } while ((c != EOF) && (c != '\n')); + if (c == EOF) break; + } + } + free(lib_name); + fclose(fp); + return result; +} + + +void OS::SignalCodeMovingGC() { + // Support for ll_prof.py. + // + // The Linux profiler built into the kernel logs all mmap's with + // PROT_EXEC so that analysis tools can properly attribute ticks. We + // do a mmap with a name known by ll_prof.py and immediately munmap + // it. This injects a GC marker into the stream of events generated + // by the kernel and allows us to synchronize V8 code log and the + // kernel log. + int size = sysconf(_SC_PAGESIZE); + FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+"); + if (f == NULL) { + OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile()); + OS::Abort(); + } + void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, + fileno(f), 0); + DCHECK(addr != MAP_FAILED); + OS::Free(addr, size); + fclose(f); +} + + + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + uint8_t* base = static_cast<uint8_t*>(reservation); + uint8_t* aligned_base = RoundUp(base, alignment); + DCHECK_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast<size_t>(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + DCHECK_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + DCHECK(aligned_size == request_size); + + address_ = static_cast<void*>(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + DCHECK(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (MAP_FAILED == mmap(base, + size, + prot, + MAP_PRIVATE | MAP_ANON | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/platform-posix.cc nodejs-0.11.15/deps/v8/src/base/platform/platform-posix.cc --- nodejs-0.11.13/deps/v8/src/base/platform/platform-posix.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/platform-posix.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,740 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Platform-specific code for POSIX goes here. This is not a platform on its +// own, but contains the parts which are the same across the POSIX platforms +// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX. + +#include <dlfcn.h> +#include <errno.h> +#include <limits.h> +#include <pthread.h> +#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) +#include <pthread_np.h> // for pthread_set_name_np +#endif +#include <sched.h> // for sched_yield +#include <time.h> +#include <unistd.h> + +#include <sys/mman.h> +#include <sys/resource.h> +#include <sys/stat.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> +#if defined(__linux__) +#include <sys/prctl.h> // NOLINT, for prctl +#endif +#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \ + defined(__NetBSD__) || defined(__OpenBSD__) +#include <sys/sysctl.h> // NOLINT, for sysctl +#endif + +#include <arpa/inet.h> +#include <netdb.h> +#include <netinet/in.h> + +#undef MAP_TYPE + +#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) +#define LOG_TAG "v8" +#include <android/log.h> // NOLINT +#endif + +#include <cmath> +#include <cstdlib> + +#include "src/base/lazy-instance.h" +#include "src/base/macros.h" +#include "src/base/platform/platform.h" +#include "src/base/platform/time.h" +#include "src/base/utils/random-number-generator.h" + +#ifdef V8_FAST_TLS_SUPPORTED +#include "src/base/atomicops.h" +#endif + +namespace v8 { +namespace base { + +namespace { + +// 0 is never a valid thread id. +const pthread_t kNoThread = (pthread_t) 0; + +bool g_hard_abort = false; + +const char* g_gc_fake_mmap = NULL; + +} // namespace + + +int OS::NumberOfProcessorsOnline() { + return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN)); +} + + +// Maximum size of the virtual memory. 0 means there is no artificial +// limit. + +intptr_t OS::MaxVirtualMemory() { + struct rlimit limit; + int result = getrlimit(RLIMIT_DATA, &limit); + if (result != 0) return 0; +#if V8_OS_NACL + // The NaCl compiler doesn't like resource.h constants. + if (static_cast<int>(limit.rlim_cur) == -1) return 0; +#else + if (limit.rlim_cur == RLIM_INFINITY) return 0; +#endif + return limit.rlim_cur; +} + + +uint64_t OS::TotalPhysicalMemory() { +#if V8_OS_MACOSX + int mib[2]; + mib[0] = CTL_HW; + mib[1] = HW_MEMSIZE; + int64_t size = 0; + size_t len = sizeof(size); + if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) { + UNREACHABLE(); + return 0; + } + return static_cast<uint64_t>(size); +#elif V8_OS_FREEBSD + int pages, page_size; + size_t size = sizeof(pages); + sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0); + sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0); + if (pages == -1 || page_size == -1) { + UNREACHABLE(); + return 0; + } + return static_cast<uint64_t>(pages) * page_size; +#elif V8_OS_CYGWIN + MEMORYSTATUS memory_info; + memory_info.dwLength = sizeof(memory_info); + if (!GlobalMemoryStatus(&memory_info)) { + UNREACHABLE(); + return 0; + } + return static_cast<uint64_t>(memory_info.dwTotalPhys); +#elif V8_OS_QNX + struct stat stat_buf; + if (stat("/proc", &stat_buf) != 0) { + UNREACHABLE(); + return 0; + } + return static_cast<uint64_t>(stat_buf.st_size); +#else + intptr_t pages = sysconf(_SC_PHYS_PAGES); + intptr_t page_size = sysconf(_SC_PAGESIZE); + if (pages == -1 || page_size == -1) { + UNREACHABLE(); + return 0; + } + return static_cast<uint64_t>(pages) * page_size; +#endif +} + + +int OS::ActivationFrameAlignment() { +#if V8_TARGET_ARCH_ARM + // On EABI ARM targets this is required for fp correctness in the + // runtime system. + return 8; +#elif V8_TARGET_ARCH_MIPS + return 8; +#else + // Otherwise we just assume 16 byte alignment, i.e.: + // - With gcc 4.4 the tree vectorization optimizer can generate code + // that requires 16 byte alignment such as movdqa on x86. + // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned; + // see "Mac OS X ABI Function Call Guide" + return 16; +#endif +} + + +intptr_t OS::CommitPageSize() { + static intptr_t page_size = getpagesize(); + return page_size; +} + + +void OS::Free(void* address, const size_t size) { + // TODO(1240712): munmap has a return value which is ignored here. + int result = munmap(address, size); + USE(result); + DCHECK(result == 0); +} + + +// Get rid of writable permission on code allocations. +void OS::ProtectCode(void* address, const size_t size) { +#if V8_OS_CYGWIN + DWORD old_protect; + VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); +#elif V8_OS_NACL + // The Native Client port of V8 uses an interpreter, so + // code pages don't need PROT_EXEC. + mprotect(address, size, PROT_READ); +#else + mprotect(address, size, PROT_READ | PROT_EXEC); +#endif +} + + +// Create guard pages. +void OS::Guard(void* address, const size_t size) { +#if V8_OS_CYGWIN + DWORD oldprotect; + VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect); +#else + mprotect(address, size, PROT_NONE); +#endif +} + + +static LazyInstance<RandomNumberGenerator>::type + platform_random_number_generator = LAZY_INSTANCE_INITIALIZER; + + +void OS::Initialize(int64_t random_seed, bool hard_abort, + const char* const gc_fake_mmap) { + if (random_seed) { + platform_random_number_generator.Pointer()->SetSeed(random_seed); + } + g_hard_abort = hard_abort; + g_gc_fake_mmap = gc_fake_mmap; +} + + +const char* OS::GetGCFakeMMapFile() { + return g_gc_fake_mmap; +} + + +void* OS::GetRandomMmapAddr() { +#if V8_OS_NACL + // TODO(bradchen): restore randomization once Native Client gets + // smarter about using mmap address hints. + // See http://code.google.com/p/nativeclient/issues/3341 + return NULL; +#endif +#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \ + defined(THREAD_SANITIZER) + // Dynamic tools do not support custom mmap addresses. + return NULL; +#endif + uintptr_t raw_addr; + platform_random_number_generator.Pointer()->NextBytes(&raw_addr, + sizeof(raw_addr)); +#if V8_TARGET_ARCH_X64 + // Currently available CPUs have 48 bits of virtual addressing. Truncate + // the hint address to 46 bits to give the kernel a fighting chance of + // fulfilling our placement request. + raw_addr &= V8_UINT64_C(0x3ffffffff000); +#else + raw_addr &= 0x3ffff000; + +# ifdef __sun + // For our Solaris/illumos mmap hint, we pick a random address in the bottom + // half of the top half of the address space (that is, the third quarter). + // Because we do not MAP_FIXED, this will be treated only as a hint -- the + // system will not fail to mmap() because something else happens to already + // be mapped at our random address. We deliberately set the hint high enough + // to get well above the system's break (that is, the heap); Solaris and + // illumos will try the hint and if that fails allocate as if there were + // no hint at all. The high hint prevents the break from getting hemmed in + // at low values, ceding half of the address space to the system heap. + raw_addr += 0x80000000; +# else + // The range 0x20000000 - 0x60000000 is relatively unpopulated across a + // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos + // 10.6 and 10.7. + raw_addr += 0x20000000; +# endif +#endif + return reinterpret_cast<void*>(raw_addr); +} + + +size_t OS::AllocateAlignment() { + return static_cast<size_t>(sysconf(_SC_PAGESIZE)); +} + + +void OS::Sleep(int milliseconds) { + useconds_t ms = static_cast<useconds_t>(milliseconds); + usleep(1000 * ms); +} + + +void OS::Abort() { + if (g_hard_abort) { + V8_IMMEDIATE_CRASH(); + } + // Redirect to std abort to signal abnormal program termination. + abort(); +} + + +void OS::DebugBreak() { +#if V8_HOST_ARCH_ARM + asm("bkpt 0"); +#elif V8_HOST_ARCH_ARM64 + asm("brk 0"); +#elif V8_HOST_ARCH_MIPS + asm("break"); +#elif V8_HOST_ARCH_MIPS64 + asm("break"); +#elif V8_HOST_ARCH_IA32 +#if defined(__native_client__) + asm("hlt"); +#else + asm("int $3"); +#endif // __native_client__ +#elif V8_HOST_ARCH_X64 + asm("int $3"); +#else +#error Unsupported host architecture. +#endif +} + + +// ---------------------------------------------------------------------------- +// Math functions + +double OS::nan_value() { + // NAN from math.h is defined in C99 and not in POSIX. + return NAN; +} + + +int OS::GetCurrentProcessId() { + return static_cast<int>(getpid()); +} + + +int OS::GetCurrentThreadId() { +#if V8_OS_MACOSX + return static_cast<int>(pthread_mach_thread_np(pthread_self())); +#elif V8_OS_LINUX + return static_cast<int>(syscall(__NR_gettid)); +#elif V8_OS_ANDROID + return static_cast<int>(gettid()); +#else + return static_cast<int>(pthread_self()); +#endif +} + + +// ---------------------------------------------------------------------------- +// POSIX date/time support. +// + +int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { + struct rusage usage; + + if (getrusage(RUSAGE_SELF, &usage) < 0) return -1; + *secs = usage.ru_utime.tv_sec; + *usecs = usage.ru_utime.tv_usec; + return 0; +} + + +double OS::TimeCurrentMillis() { + return Time::Now().ToJsTime(); +} + + +class TimezoneCache {}; + + +TimezoneCache* OS::CreateTimezoneCache() { + return NULL; +} + + +void OS::DisposeTimezoneCache(TimezoneCache* cache) { + DCHECK(cache == NULL); +} + + +void OS::ClearTimezoneCache(TimezoneCache* cache) { + DCHECK(cache == NULL); +} + + +double OS::DaylightSavingsOffset(double time, TimezoneCache*) { + if (std::isnan(time)) return nan_value(); + time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return nan_value(); + return t->tm_isdst > 0 ? 3600 * msPerSecond : 0; +} + + +int OS::GetLastError() { + return errno; +} + + +// ---------------------------------------------------------------------------- +// POSIX stdio support. +// + +FILE* OS::FOpen(const char* path, const char* mode) { + FILE* file = fopen(path, mode); + if (file == NULL) return NULL; + struct stat file_stat; + if (fstat(fileno(file), &file_stat) != 0) return NULL; + bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0); + if (is_regular_file) return file; + fclose(file); + return NULL; +} + + +bool OS::Remove(const char* path) { + return (remove(path) == 0); +} + + +FILE* OS::OpenTemporaryFile() { + return tmpfile(); +} + + +const char* const OS::LogFileOpenMode = "w"; + + +void OS::Print(const char* format, ...) { + va_list args; + va_start(args, format); + VPrint(format, args); + va_end(args); +} + + +void OS::VPrint(const char* format, va_list args) { +#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) + __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args); +#else + vprintf(format, args); +#endif +} + + +void OS::FPrint(FILE* out, const char* format, ...) { + va_list args; + va_start(args, format); + VFPrint(out, format, args); + va_end(args); +} + + +void OS::VFPrint(FILE* out, const char* format, va_list args) { +#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) + __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args); +#else + vfprintf(out, format, args); +#endif +} + + +void OS::PrintError(const char* format, ...) { + va_list args; + va_start(args, format); + VPrintError(format, args); + va_end(args); +} + + +void OS::VPrintError(const char* format, va_list args) { +#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) + __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args); +#else + vfprintf(stderr, format, args); +#endif +} + + +int OS::SNPrintF(char* str, int length, const char* format, ...) { + va_list args; + va_start(args, format); + int result = VSNPrintF(str, length, format, args); + va_end(args); + return result; +} + + +int OS::VSNPrintF(char* str, + int length, + const char* format, + va_list args) { + int n = vsnprintf(str, length, format, args); + if (n < 0 || n >= length) { + // If the length is zero, the assignment fails. + if (length > 0) + str[length - 1] = '\0'; + return -1; + } else { + return n; + } +} + + +// ---------------------------------------------------------------------------- +// POSIX string support. +// + +char* OS::StrChr(char* str, int c) { + return strchr(str, c); +} + + +void OS::StrNCpy(char* dest, int length, const char* src, size_t n) { + strncpy(dest, src, n); +} + + +// ---------------------------------------------------------------------------- +// POSIX thread support. +// + +class Thread::PlatformData { + public: + PlatformData() : thread_(kNoThread) {} + pthread_t thread_; // Thread handle for pthread. + // Synchronizes thread creation + Mutex thread_creation_mutex_; +}; + +Thread::Thread(const Options& options) + : data_(new PlatformData), + stack_size_(options.stack_size()), + start_semaphore_(NULL) { + if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) { + stack_size_ = PTHREAD_STACK_MIN; + } + set_name(options.name()); +} + + +Thread::~Thread() { + delete data_; +} + + +static void SetThreadName(const char* name) { +#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD + pthread_set_name_np(pthread_self(), name); +#elif V8_OS_NETBSD + STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP); + pthread_setname_np(pthread_self(), "%s", name); +#elif V8_OS_MACOSX + // pthread_setname_np is only available in 10.6 or later, so test + // for it at runtime. + int (*dynamic_pthread_setname_np)(const char*); + *reinterpret_cast<void**>(&dynamic_pthread_setname_np) = + dlsym(RTLD_DEFAULT, "pthread_setname_np"); + if (dynamic_pthread_setname_np == NULL) + return; + + // Mac OS X does not expose the length limit of the name, so hardcode it. + static const int kMaxNameLength = 63; + STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength); + dynamic_pthread_setname_np(name); +#elif defined(PR_SET_NAME) + prctl(PR_SET_NAME, + reinterpret_cast<unsigned long>(name), // NOLINT + 0, 0, 0); +#endif +} + + +static void* ThreadEntry(void* arg) { + Thread* thread = reinterpret_cast<Thread*>(arg); + // We take the lock here to make sure that pthread_create finished first since + // we don't know which thread will run first (the original thread or the new + // one). + { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); } + SetThreadName(thread->name()); + DCHECK(thread->data()->thread_ != kNoThread); + thread->NotifyStartedAndRun(); + return NULL; +} + + +void Thread::set_name(const char* name) { + strncpy(name_, name, sizeof(name_)); + name_[sizeof(name_) - 1] = '\0'; +} + + +void Thread::Start() { + int result; + pthread_attr_t attr; + memset(&attr, 0, sizeof(attr)); + result = pthread_attr_init(&attr); + DCHECK_EQ(0, result); + // Native client uses default stack size. +#if !V8_OS_NACL + if (stack_size_ > 0) { + result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_)); + DCHECK_EQ(0, result); + } +#endif + { + LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_); + result = pthread_create(&data_->thread_, &attr, ThreadEntry, this); + } + DCHECK_EQ(0, result); + result = pthread_attr_destroy(&attr); + DCHECK_EQ(0, result); + DCHECK(data_->thread_ != kNoThread); + USE(result); +} + + +void Thread::Join() { + pthread_join(data_->thread_, NULL); +} + + +void Thread::YieldCPU() { + const timespec delay = { 0, 1 }; + nanosleep(&delay, NULL); +} + + +static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) { +#if V8_OS_CYGWIN + // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps + // because pthread_key_t is a pointer type on Cygwin. This will probably not + // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway. + STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); + intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key); + return static_cast<Thread::LocalStorageKey>(ptr_key); +#else + return static_cast<Thread::LocalStorageKey>(pthread_key); +#endif +} + + +static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) { +#if V8_OS_CYGWIN + STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); + intptr_t ptr_key = static_cast<intptr_t>(local_key); + return reinterpret_cast<pthread_key_t>(ptr_key); +#else + return static_cast<pthread_key_t>(local_key); +#endif +} + + +#ifdef V8_FAST_TLS_SUPPORTED + +static Atomic32 tls_base_offset_initialized = 0; +intptr_t kMacTlsBaseOffset = 0; + +// It's safe to do the initialization more that once, but it has to be +// done at least once. +static void InitializeTlsBaseOffset() { + const size_t kBufferSize = 128; + char buffer[kBufferSize]; + size_t buffer_size = kBufferSize; + int ctl_name[] = { CTL_KERN , KERN_OSRELEASE }; + if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) { + V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version"); + } + // The buffer now contains a string of the form XX.YY.ZZ, where + // XX is the major kernel version component. + // Make sure the buffer is 0-terminated. + buffer[kBufferSize - 1] = '\0'; + char* period_pos = strchr(buffer, '.'); + *period_pos = '\0'; + int kernel_version_major = + static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT + // The constants below are taken from pthreads.s from the XNU kernel + // sources archive at www.opensource.apple.com. + if (kernel_version_major < 11) { + // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the + // same offsets. +#if V8_HOST_ARCH_IA32 + kMacTlsBaseOffset = 0x48; +#else + kMacTlsBaseOffset = 0x60; +#endif + } else { + // 11.x.x (Lion) changed the offset. + kMacTlsBaseOffset = 0; + } + + Release_Store(&tls_base_offset_initialized, 1); +} + + +static void CheckFastTls(Thread::LocalStorageKey key) { + void* expected = reinterpret_cast<void*>(0x1234CAFE); + Thread::SetThreadLocal(key, expected); + void* actual = Thread::GetExistingThreadLocal(key); + if (expected != actual) { + V8_Fatal(__FILE__, __LINE__, + "V8 failed to initialize fast TLS on current kernel"); + } + Thread::SetThreadLocal(key, NULL); +} + +#endif // V8_FAST_TLS_SUPPORTED + + +Thread::LocalStorageKey Thread::CreateThreadLocalKey() { +#ifdef V8_FAST_TLS_SUPPORTED + bool check_fast_tls = false; + if (tls_base_offset_initialized == 0) { + check_fast_tls = true; + InitializeTlsBaseOffset(); + } +#endif + pthread_key_t key; + int result = pthread_key_create(&key, NULL); + DCHECK_EQ(0, result); + USE(result); + LocalStorageKey local_key = PthreadKeyToLocalKey(key); +#ifdef V8_FAST_TLS_SUPPORTED + // If we just initialized fast TLS support, make sure it works. + if (check_fast_tls) CheckFastTls(local_key); +#endif + return local_key; +} + + +void Thread::DeleteThreadLocalKey(LocalStorageKey key) { + pthread_key_t pthread_key = LocalKeyToPthreadKey(key); + int result = pthread_key_delete(pthread_key); + DCHECK_EQ(0, result); + USE(result); +} + + +void* Thread::GetThreadLocal(LocalStorageKey key) { + pthread_key_t pthread_key = LocalKeyToPthreadKey(key); + return pthread_getspecific(pthread_key); +} + + +void Thread::SetThreadLocal(LocalStorageKey key, void* value) { + pthread_key_t pthread_key = LocalKeyToPthreadKey(key); + int result = pthread_setspecific(pthread_key, value); + DCHECK_EQ(0, result); + USE(result); +} + + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/platform-qnx.cc nodejs-0.11.15/deps/v8/src/base/platform/platform-qnx.cc --- nodejs-0.11.13/deps/v8/src/base/platform/platform-qnx.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/platform-qnx.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,374 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Platform-specific code for QNX goes here. For the POSIX-compatible +// parts the implementation is in platform-posix.cc. + +#include <backtrace.h> +#include <pthread.h> +#include <semaphore.h> +#include <signal.h> +#include <stdlib.h> +#include <sys/resource.h> +#include <sys/time.h> +#include <sys/types.h> +#include <ucontext.h> + +// QNX requires memory pages to be marked as executable. +// Otherwise, the OS raises an exception when executing code in that page. +#include <errno.h> +#include <fcntl.h> // open +#include <stdarg.h> +#include <strings.h> // index +#include <sys/mman.h> // mmap & munmap +#include <sys/procfs.h> +#include <sys/stat.h> // open +#include <sys/types.h> // mmap & munmap +#include <unistd.h> // sysconf + +#include <cmath> + +#undef MAP_TYPE + +#include "src/base/macros.h" +#include "src/base/platform/platform.h" + + +namespace v8 { +namespace base { + +// 0 is never a valid thread id on Qnx since tids and pids share a +// name space and pid 0 is reserved (see man 2 kill). +static const pthread_t kNoThread = (pthread_t) 0; + + +#ifdef __arm__ + +bool OS::ArmUsingHardFloat() { + // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify + // the Floating Point ABI used (PCS stands for Procedure Call Standard). + // We use these as well as a couple of other defines to statically determine + // what FP ABI used. + // GCC versions 4.4 and below don't support hard-fp. + // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or + // __ARM_PCS_VFP. + +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#if GCC_VERSION >= 40600 +#if defined(__ARM_PCS_VFP) + return true; +#else + return false; +#endif + +#elif GCC_VERSION < 40500 + return false; + +#else +#if defined(__ARM_PCS_VFP) + return true; +#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \ + !defined(__VFP_FP__) + return false; +#else +#error "Your version of GCC does not report the FP ABI compiled for." \ + "Please report it on this issue" \ + "http://code.google.com/p/v8/issues/detail?id=2140" + +#endif +#endif +#undef GCC_VERSION +} + +#endif // __arm__ + + +const char* OS::LocalTimezone(double time, TimezoneCache* cache) { + if (std::isnan(time)) return ""; + time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return ""; + return t->tm_zone; +} + + +double OS::LocalTimeOffset(TimezoneCache* cache) { + time_t tv = time(NULL); + struct tm* t = localtime(&tv); + // tm_gmtoff includes any daylight savings offset, so subtract it. + return static_cast<double>(t->tm_gmtoff * msPerSecond - + (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, AllocateAlignment()); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* addr = OS::GetRandomMmapAddr(); + void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (mbase == MAP_FAILED) return NULL; + *allocated = msize; + return mbase; +} + + +class PosixMemoryMappedFile : public OS::MemoryMappedFile { + public: + PosixMemoryMappedFile(FILE* file, void* memory, int size) + : file_(file), memory_(memory), size_(size) { } + virtual ~PosixMemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + FILE* file_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "r+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fileno(file), + 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + int result = fwrite(initial, size, 1, file); + if (result < 1) { + fclose(file); + return NULL; + } + void* memory = + mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fileno(file), + 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +PosixMemoryMappedFile::~PosixMemoryMappedFile() { + if (memory_) OS::Free(memory_, size_); + fclose(file_); +} + + +std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { + std::vector<SharedLibraryAddress> result; + procfs_mapinfo *mapinfos = NULL, *mapinfo; + int proc_fd, num, i; + + struct { + procfs_debuginfo info; + char buff[PATH_MAX]; + } map; + + char buf[PATH_MAX + 1]; + snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid()); + + if ((proc_fd = open(buf, O_RDONLY)) == -1) { + close(proc_fd); + return result; + } + + /* Get the number of map entries. */ + if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) { + close(proc_fd); + return result; + } + + mapinfos = reinterpret_cast<procfs_mapinfo *>( + malloc(num * sizeof(procfs_mapinfo))); + if (mapinfos == NULL) { + close(proc_fd); + return result; + } + + /* Fill the map entries. */ + if (devctl(proc_fd, DCMD_PROC_PAGEDATA, + mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) { + free(mapinfos); + close(proc_fd); + return result; + } + + for (i = 0; i < num; i++) { + mapinfo = mapinfos + i; + if (mapinfo->flags & MAP_ELF) { + map.info.vaddr = mapinfo->vaddr; + if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) { + continue; + } + result.push_back(SharedLibraryAddress( + map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size)); + } + } + free(mapinfos); + close(proc_fd); + return result; +} + + +void OS::SignalCodeMovingGC() { +} + + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + uint8_t* base = static_cast<uint8_t*>(reservation); + uint8_t* aligned_base = RoundUp(base, alignment); + DCHECK_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast<size_t>(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + DCHECK_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + DCHECK(aligned_size == request_size); + + address_ = static_cast<void*>(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + DCHECK(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (MAP_FAILED == mmap(base, + size, + prot, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + return false; +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/platform-solaris.cc nodejs-0.11.15/deps/v8/src/base/platform/platform-solaris.cc --- nodejs-0.11.13/deps/v8/src/base/platform/platform-solaris.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/platform-solaris.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,279 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Platform-specific code for Solaris 10 goes here. For the POSIX-compatible +// parts, the implementation is in platform-posix.cc. + +#ifdef __sparc +# error "V8 does not support the SPARC CPU architecture." +#endif + +#include <dlfcn.h> // dladdr +#include <errno.h> +#include <ieeefp.h> // finite() +#include <pthread.h> +#include <semaphore.h> +#include <signal.h> // sigemptyset(), etc +#include <sys/mman.h> // mmap() +#include <sys/regset.h> +#include <sys/stack.h> // for stack alignment +#include <sys/time.h> // gettimeofday(), timeradd() +#include <time.h> +#include <ucontext.h> // walkstack(), getcontext() +#include <unistd.h> // getpagesize(), usleep() + +#include <cmath> + +#undef MAP_TYPE + +#include "src/base/macros.h" +#include "src/base/platform/platform.h" + + +// It seems there is a bug in some Solaris distributions (experienced in +// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to +// access signbit() despite the availability of other C99 math functions. +#ifndef signbit +namespace std { +// Test sign - usually defined in math.h +int signbit(double x) { + // We need to take care of the special case of both positive and negative + // versions of zero. + if (x == 0) { + return fpclass(x) & FP_NZERO; + } else { + // This won't detect negative NaN but that should be okay since we don't + // assume that behavior. + return x < 0; + } +} +} // namespace std +#endif // signbit + +namespace v8 { +namespace base { + + +const char* OS::LocalTimezone(double time, TimezoneCache* cache) { + if (std::isnan(time)) return ""; + time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return ""; + return tzname[0]; // The location of the timezone string on Solaris. +} + + +double OS::LocalTimeOffset(TimezoneCache* cache) { + tzset(); + return -static_cast<double>(timezone * msPerSecond); +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + const size_t msize = RoundUp(requested, getpagesize()); + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); + + if (mbase == MAP_FAILED) return NULL; + *allocated = msize; + return mbase; +} + + +class PosixMemoryMappedFile : public OS::MemoryMappedFile { + public: + PosixMemoryMappedFile(FILE* file, void* memory, int size) + : file_(file), memory_(memory), size_(size) { } + virtual ~PosixMemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + FILE* file_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "r+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + int result = fwrite(initial, size, 1, file); + if (result < 1) { + fclose(file); + return NULL; + } + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +PosixMemoryMappedFile::~PosixMemoryMappedFile() { + if (memory_) munmap(memory_, size_); + fclose(file_); +} + + +std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { + return std::vector<SharedLibraryAddress>(); +} + + +void OS::SignalCodeMovingGC() { +} + + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + uint8_t* base = static_cast<uint8_t*>(reservation); + uint8_t* aligned_base = RoundUp(base, alignment); + DCHECK_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast<size_t>(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + DCHECK_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + DCHECK(aligned_size == request_size); + + address_ = static_cast<void*>(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + DCHECK(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (MAP_FAILED == mmap(base, + size, + prot, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; +} + + +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/platform-win32.cc nodejs-0.11.15/deps/v8/src/base/platform/platform-win32.cc --- nodejs-0.11.13/deps/v8/src/base/platform/platform-win32.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/platform-win32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1429 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Platform-specific code for Win32. + +// Secure API functions are not available using MinGW with msvcrt.dll +// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to +// disable definition of secure API functions in standard headers that +// would conflict with our own implementation. +#ifdef __MINGW32__ +#include <_mingw.h> +#ifdef MINGW_HAS_SECURE_API +#undef MINGW_HAS_SECURE_API +#endif // MINGW_HAS_SECURE_API +#endif // __MINGW32__ + +#ifdef _MSC_VER +#include <limits> +#endif + +#include "src/base/win32-headers.h" + +#include "src/base/lazy-instance.h" +#include "src/base/macros.h" +#include "src/base/platform/platform.h" +#include "src/base/platform/time.h" +#include "src/base/utils/random-number-generator.h" + +#ifdef _MSC_VER + +// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually +// defined in strings.h. +int strncasecmp(const char* s1, const char* s2, int n) { + return _strnicmp(s1, s2, n); +} + +#endif // _MSC_VER + + +// Extra functions for MinGW. Most of these are the _s functions which are in +// the Microsoft Visual Studio C++ CRT. +#ifdef __MINGW32__ + + +#ifndef __MINGW64_VERSION_MAJOR + +#define _TRUNCATE 0 +#define STRUNCATE 80 + +inline void MemoryBarrier() { + int barrier = 0; + __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier)); +} + +#endif // __MINGW64_VERSION_MAJOR + + +int localtime_s(tm* out_tm, const time_t* time) { + tm* posix_local_time_struct = localtime(time); + if (posix_local_time_struct == NULL) return 1; + *out_tm = *posix_local_time_struct; + return 0; +} + + +int fopen_s(FILE** pFile, const char* filename, const char* mode) { + *pFile = fopen(filename, mode); + return *pFile != NULL ? 0 : 1; +} + +int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count, + const char* format, va_list argptr) { + DCHECK(count == _TRUNCATE); + return _vsnprintf(buffer, sizeOfBuffer, format, argptr); +} + + +int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) { + CHECK(source != NULL); + CHECK(dest != NULL); + CHECK_GT(dest_size, 0); + + if (count == _TRUNCATE) { + while (dest_size > 0 && *source != 0) { + *(dest++) = *(source++); + --dest_size; + } + if (dest_size == 0) { + *(dest - 1) = 0; + return STRUNCATE; + } + } else { + while (dest_size > 0 && count > 0 && *source != 0) { + *(dest++) = *(source++); + --dest_size; + --count; + } + } + CHECK_GT(dest_size, 0); + *dest = 0; + return 0; +} + +#endif // __MINGW32__ + +namespace v8 { +namespace base { + +namespace { + +bool g_hard_abort = false; + +} // namespace + +intptr_t OS::MaxVirtualMemory() { + return 0; +} + + +class TimezoneCache { + public: + TimezoneCache() : initialized_(false) { } + + void Clear() { + initialized_ = false; + } + + // Initialize timezone information. The timezone information is obtained from + // windows. If we cannot get the timezone information we fall back to CET. + void InitializeIfNeeded() { + // Just return if timezone information has already been initialized. + if (initialized_) return; + + // Initialize POSIX time zone data. + _tzset(); + // Obtain timezone information from operating system. + memset(&tzinfo_, 0, sizeof(tzinfo_)); + if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) { + // If we cannot get timezone information we fall back to CET. + tzinfo_.Bias = -60; + tzinfo_.StandardDate.wMonth = 10; + tzinfo_.StandardDate.wDay = 5; + tzinfo_.StandardDate.wHour = 3; + tzinfo_.StandardBias = 0; + tzinfo_.DaylightDate.wMonth = 3; + tzinfo_.DaylightDate.wDay = 5; + tzinfo_.DaylightDate.wHour = 2; + tzinfo_.DaylightBias = -60; + } + + // Make standard and DST timezone names. + WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1, + std_tz_name_, kTzNameSize, NULL, NULL); + std_tz_name_[kTzNameSize - 1] = '\0'; + WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1, + dst_tz_name_, kTzNameSize, NULL, NULL); + dst_tz_name_[kTzNameSize - 1] = '\0'; + + // If OS returned empty string or resource id (like "@tzres.dll,-211") + // simply guess the name from the UTC bias of the timezone. + // To properly resolve the resource identifier requires a library load, + // which is not possible in a sandbox. + if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') { + OS::SNPrintF(std_tz_name_, kTzNameSize - 1, + "%s Standard Time", + GuessTimezoneNameFromBias(tzinfo_.Bias)); + } + if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') { + OS::SNPrintF(dst_tz_name_, kTzNameSize - 1, + "%s Daylight Time", + GuessTimezoneNameFromBias(tzinfo_.Bias)); + } + // Timezone information initialized. + initialized_ = true; + } + + // Guess the name of the timezone from the bias. + // The guess is very biased towards the northern hemisphere. + const char* GuessTimezoneNameFromBias(int bias) { + static const int kHour = 60; + switch (-bias) { + case -9*kHour: return "Alaska"; + case -8*kHour: return "Pacific"; + case -7*kHour: return "Mountain"; + case -6*kHour: return "Central"; + case -5*kHour: return "Eastern"; + case -4*kHour: return "Atlantic"; + case 0*kHour: return "GMT"; + case +1*kHour: return "Central Europe"; + case +2*kHour: return "Eastern Europe"; + case +3*kHour: return "Russia"; + case +5*kHour + 30: return "India"; + case +8*kHour: return "China"; + case +9*kHour: return "Japan"; + case +12*kHour: return "New Zealand"; + default: return "Local"; + } + } + + + private: + static const int kTzNameSize = 128; + bool initialized_; + char std_tz_name_[kTzNameSize]; + char dst_tz_name_[kTzNameSize]; + TIME_ZONE_INFORMATION tzinfo_; + friend class Win32Time; +}; + + +// ---------------------------------------------------------------------------- +// The Time class represents time on win32. A timestamp is represented as +// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript +// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC, +// January 1, 1970. + +class Win32Time { + public: + // Constructors. + Win32Time(); + explicit Win32Time(double jstime); + Win32Time(int year, int mon, int day, int hour, int min, int sec); + + // Convert timestamp to JavaScript representation. + double ToJSTime(); + + // Set timestamp to current time. + void SetToCurrentTime(); + + // Returns the local timezone offset in milliseconds east of UTC. This is + // the number of milliseconds you must add to UTC to get local time, i.e. + // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This + // routine also takes into account whether daylight saving is effect + // at the time. + int64_t LocalOffset(TimezoneCache* cache); + + // Returns the daylight savings time offset for the time in milliseconds. + int64_t DaylightSavingsOffset(TimezoneCache* cache); + + // Returns a string identifying the current timezone for the + // timestamp taking into account daylight saving. + char* LocalTimezone(TimezoneCache* cache); + + private: + // Constants for time conversion. + static const int64_t kTimeEpoc = 116444736000000000LL; + static const int64_t kTimeScaler = 10000; + static const int64_t kMsPerMinute = 60000; + + // Constants for timezone information. + static const bool kShortTzNames = false; + + // Return whether or not daylight savings time is in effect at this time. + bool InDST(TimezoneCache* cache); + + // Accessor for FILETIME representation. + FILETIME& ft() { return time_.ft_; } + + // Accessor for integer representation. + int64_t& t() { return time_.t_; } + + // Although win32 uses 64-bit integers for representing timestamps, + // these are packed into a FILETIME structure. The FILETIME structure + // is just a struct representing a 64-bit integer. The TimeStamp union + // allows access to both a FILETIME and an integer representation of + // the timestamp. + union TimeStamp { + FILETIME ft_; + int64_t t_; + }; + + TimeStamp time_; +}; + + +// Initialize timestamp to start of epoc. +Win32Time::Win32Time() { + t() = 0; +} + + +// Initialize timestamp from a JavaScript timestamp. +Win32Time::Win32Time(double jstime) { + t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc; +} + + +// Initialize timestamp from date/time components. +Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) { + SYSTEMTIME st; + st.wYear = year; + st.wMonth = mon; + st.wDay = day; + st.wHour = hour; + st.wMinute = min; + st.wSecond = sec; + st.wMilliseconds = 0; + SystemTimeToFileTime(&st, &ft()); +} + + +// Convert timestamp to JavaScript timestamp. +double Win32Time::ToJSTime() { + return static_cast<double>((t() - kTimeEpoc) / kTimeScaler); +} + + +// Set timestamp to current time. +void Win32Time::SetToCurrentTime() { + // The default GetSystemTimeAsFileTime has a ~15.5ms resolution. + // Because we're fast, we like fast timers which have at least a + // 1ms resolution. + // + // timeGetTime() provides 1ms granularity when combined with + // timeBeginPeriod(). If the host application for v8 wants fast + // timers, it can use timeBeginPeriod to increase the resolution. + // + // Using timeGetTime() has a drawback because it is a 32bit value + // and hence rolls-over every ~49days. + // + // To use the clock, we use GetSystemTimeAsFileTime as our base; + // and then use timeGetTime to extrapolate current time from the + // start time. To deal with rollovers, we resync the clock + // any time when more than kMaxClockElapsedTime has passed or + // whenever timeGetTime creates a rollover. + + static bool initialized = false; + static TimeStamp init_time; + static DWORD init_ticks; + static const int64_t kHundredNanosecondsPerSecond = 10000000; + static const int64_t kMaxClockElapsedTime = + 60*kHundredNanosecondsPerSecond; // 1 minute + + // If we are uninitialized, we need to resync the clock. + bool needs_resync = !initialized; + + // Get the current time. + TimeStamp time_now; + GetSystemTimeAsFileTime(&time_now.ft_); + DWORD ticks_now = timeGetTime(); + + // Check if we need to resync due to clock rollover. + needs_resync |= ticks_now < init_ticks; + + // Check if we need to resync due to elapsed time. + needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime; + + // Check if we need to resync due to backwards time change. + needs_resync |= time_now.t_ < init_time.t_; + + // Resync the clock if necessary. + if (needs_resync) { + GetSystemTimeAsFileTime(&init_time.ft_); + init_ticks = ticks_now = timeGetTime(); + initialized = true; + } + + // Finally, compute the actual time. Why is this so hard. + DWORD elapsed = ticks_now - init_ticks; + this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000); +} + + +// Return the local timezone offset in milliseconds east of UTC. This +// takes into account whether daylight saving is in effect at the time. +// Only times in the 32-bit Unix range may be passed to this function. +// Also, adding the time-zone offset to the input must not overflow. +// The function EquivalentTime() in date.js guarantees this. +int64_t Win32Time::LocalOffset(TimezoneCache* cache) { + cache->InitializeIfNeeded(); + + Win32Time rounded_to_second(*this); + rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler * + 1000 * kTimeScaler; + // Convert to local time using POSIX localtime function. + // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime() + // very slow. Other browsers use localtime(). + + // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to + // POSIX seconds past 1/1/1970 0:00:00. + double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000; + if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) { + return 0; + } + // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int. + time_t posix_time = static_cast<time_t>(unchecked_posix_time); + + // Convert to local time, as struct with fields for day, hour, year, etc. + tm posix_local_time_struct; + if (localtime_s(&posix_local_time_struct, &posix_time)) return 0; + + if (posix_local_time_struct.tm_isdst > 0) { + return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute; + } else if (posix_local_time_struct.tm_isdst == 0) { + return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute; + } else { + return cache->tzinfo_.Bias * -kMsPerMinute; + } +} + + +// Return whether or not daylight savings time is in effect at this time. +bool Win32Time::InDST(TimezoneCache* cache) { + cache->InitializeIfNeeded(); + + // Determine if DST is in effect at the specified time. + bool in_dst = false; + if (cache->tzinfo_.StandardDate.wMonth != 0 || + cache->tzinfo_.DaylightDate.wMonth != 0) { + // Get the local timezone offset for the timestamp in milliseconds. + int64_t offset = LocalOffset(cache); + + // Compute the offset for DST. The bias parameters in the timezone info + // are specified in minutes. These must be converted to milliseconds. + int64_t dstofs = + -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute; + + // If the local time offset equals the timezone bias plus the daylight + // bias then DST is in effect. + in_dst = offset == dstofs; + } + + return in_dst; +} + + +// Return the daylight savings time offset for this time. +int64_t Win32Time::DaylightSavingsOffset(TimezoneCache* cache) { + return InDST(cache) ? 60 * kMsPerMinute : 0; +} + + +// Returns a string identifying the current timezone for the +// timestamp taking into account daylight saving. +char* Win32Time::LocalTimezone(TimezoneCache* cache) { + // Return the standard or DST time zone name based on whether daylight + // saving is in effect at the given time. + return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_; +} + + +// Returns the accumulated user time for thread. +int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { + FILETIME dummy; + uint64_t usertime; + + // Get the amount of time that the thread has executed in user mode. + if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy, + reinterpret_cast<FILETIME*>(&usertime))) return -1; + + // Adjust the resolution to micro-seconds. + usertime /= 10; + + // Convert to seconds and microseconds + *secs = static_cast<uint32_t>(usertime / 1000000); + *usecs = static_cast<uint32_t>(usertime % 1000000); + return 0; +} + + +// Returns current time as the number of milliseconds since +// 00:00:00 UTC, January 1, 1970. +double OS::TimeCurrentMillis() { + return Time::Now().ToJsTime(); +} + + +TimezoneCache* OS::CreateTimezoneCache() { + return new TimezoneCache(); +} + + +void OS::DisposeTimezoneCache(TimezoneCache* cache) { + delete cache; +} + + +void OS::ClearTimezoneCache(TimezoneCache* cache) { + cache->Clear(); +} + + +// Returns a string identifying the current timezone taking into +// account daylight saving. +const char* OS::LocalTimezone(double time, TimezoneCache* cache) { + return Win32Time(time).LocalTimezone(cache); +} + + +// Returns the local time offset in milliseconds east of UTC without +// taking daylight savings time into account. +double OS::LocalTimeOffset(TimezoneCache* cache) { + // Use current time, rounded to the millisecond. + Win32Time t(TimeCurrentMillis()); + // Time::LocalOffset inlcudes any daylight savings offset, so subtract it. + return static_cast<double>(t.LocalOffset(cache) - + t.DaylightSavingsOffset(cache)); +} + + +// Returns the daylight savings offset in milliseconds for the given +// time. +double OS::DaylightSavingsOffset(double time, TimezoneCache* cache) { + int64_t offset = Win32Time(time).DaylightSavingsOffset(cache); + return static_cast<double>(offset); +} + + +int OS::GetLastError() { + return ::GetLastError(); +} + + +int OS::GetCurrentProcessId() { + return static_cast<int>(::GetCurrentProcessId()); +} + + +int OS::GetCurrentThreadId() { + return static_cast<int>(::GetCurrentThreadId()); +} + + +// ---------------------------------------------------------------------------- +// Win32 console output. +// +// If a Win32 application is linked as a console application it has a normal +// standard output and standard error. In this case normal printf works fine +// for output. However, if the application is linked as a GUI application, +// the process doesn't have a console, and therefore (debugging) output is lost. +// This is the case if we are embedded in a windows program (like a browser). +// In order to be able to get debug output in this case the the debugging +// facility using OutputDebugString. This output goes to the active debugger +// for the process (if any). Else the output can be monitored using DBMON.EXE. + +enum OutputMode { + UNKNOWN, // Output method has not yet been determined. + CONSOLE, // Output is written to stdout. + ODS // Output is written to debug facility. +}; + +static OutputMode output_mode = UNKNOWN; // Current output mode. + + +// Determine if the process has a console for output. +static bool HasConsole() { + // Only check the first time. Eventual race conditions are not a problem, + // because all threads will eventually determine the same mode. + if (output_mode == UNKNOWN) { + // We cannot just check that the standard output is attached to a console + // because this would fail if output is redirected to a file. Therefore we + // say that a process does not have an output console if either the + // standard output handle is invalid or its file type is unknown. + if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE && + GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN) + output_mode = CONSOLE; + else + output_mode = ODS; + } + return output_mode == CONSOLE; +} + + +static void VPrintHelper(FILE* stream, const char* format, va_list args) { + if ((stream == stdout || stream == stderr) && !HasConsole()) { + // It is important to use safe print here in order to avoid + // overflowing the buffer. We might truncate the output, but this + // does not crash. + char buffer[4096]; + OS::VSNPrintF(buffer, sizeof(buffer), format, args); + OutputDebugStringA(buffer); + } else { + vfprintf(stream, format, args); + } +} + + +FILE* OS::FOpen(const char* path, const char* mode) { + FILE* result; + if (fopen_s(&result, path, mode) == 0) { + return result; + } else { + return NULL; + } +} + + +bool OS::Remove(const char* path) { + return (DeleteFileA(path) != 0); +} + + +FILE* OS::OpenTemporaryFile() { + // tmpfile_s tries to use the root dir, don't use it. + char tempPathBuffer[MAX_PATH]; + DWORD path_result = 0; + path_result = GetTempPathA(MAX_PATH, tempPathBuffer); + if (path_result > MAX_PATH || path_result == 0) return NULL; + UINT name_result = 0; + char tempNameBuffer[MAX_PATH]; + name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer); + if (name_result == 0) return NULL; + FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses. + if (result != NULL) { + Remove(tempNameBuffer); // Delete on close. + } + return result; +} + + +// Open log file in binary mode to avoid /n -> /r/n conversion. +const char* const OS::LogFileOpenMode = "wb"; + + +// Print (debug) message to console. +void OS::Print(const char* format, ...) { + va_list args; + va_start(args, format); + VPrint(format, args); + va_end(args); +} + + +void OS::VPrint(const char* format, va_list args) { + VPrintHelper(stdout, format, args); +} + + +void OS::FPrint(FILE* out, const char* format, ...) { + va_list args; + va_start(args, format); + VFPrint(out, format, args); + va_end(args); +} + + +void OS::VFPrint(FILE* out, const char* format, va_list args) { + VPrintHelper(out, format, args); +} + + +// Print error message to console. +void OS::PrintError(const char* format, ...) { + va_list args; + va_start(args, format); + VPrintError(format, args); + va_end(args); +} + + +void OS::VPrintError(const char* format, va_list args) { + VPrintHelper(stderr, format, args); +} + + +int OS::SNPrintF(char* str, int length, const char* format, ...) { + va_list args; + va_start(args, format); + int result = VSNPrintF(str, length, format, args); + va_end(args); + return result; +} + + +int OS::VSNPrintF(char* str, int length, const char* format, va_list args) { + int n = _vsnprintf_s(str, length, _TRUNCATE, format, args); + // Make sure to zero-terminate the string if the output was + // truncated or if there was an error. + if (n < 0 || n >= length) { + if (length > 0) + str[length - 1] = '\0'; + return -1; + } else { + return n; + } +} + + +char* OS::StrChr(char* str, int c) { + return const_cast<char*>(strchr(str, c)); +} + + +void OS::StrNCpy(char* dest, int length, const char* src, size_t n) { + // Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small. + size_t buffer_size = static_cast<size_t>(length); + if (n + 1 > buffer_size) // count for trailing '\0' + n = _TRUNCATE; + int result = strncpy_s(dest, length, src, n); + USE(result); + DCHECK(result == 0 || (n == _TRUNCATE && result == STRUNCATE)); +} + + +#undef _TRUNCATE +#undef STRUNCATE + + +// Get the system's page size used by VirtualAlloc() or the next power +// of two. The reason for always returning a power of two is that the +// rounding up in OS::Allocate expects that. +static size_t GetPageSize() { + static size_t page_size = 0; + if (page_size == 0) { + SYSTEM_INFO info; + GetSystemInfo(&info); + page_size = RoundUpToPowerOf2(info.dwPageSize); + } + return page_size; +} + + +// The allocation alignment is the guaranteed alignment for +// VirtualAlloc'ed blocks of memory. +size_t OS::AllocateAlignment() { + static size_t allocate_alignment = 0; + if (allocate_alignment == 0) { + SYSTEM_INFO info; + GetSystemInfo(&info); + allocate_alignment = info.dwAllocationGranularity; + } + return allocate_alignment; +} + + +static LazyInstance<RandomNumberGenerator>::type + platform_random_number_generator = LAZY_INSTANCE_INITIALIZER; + + +void OS::Initialize(int64_t random_seed, bool hard_abort, + const char* const gc_fake_mmap) { + if (random_seed) { + platform_random_number_generator.Pointer()->SetSeed(random_seed); + } + g_hard_abort = hard_abort; +} + + +void* OS::GetRandomMmapAddr() { + // The address range used to randomize RWX allocations in OS::Allocate + // Try not to map pages into the default range that windows loads DLLs + // Use a multiple of 64k to prevent committing unused memory. + // Note: This does not guarantee RWX regions will be within the + // range kAllocationRandomAddressMin to kAllocationRandomAddressMax +#ifdef V8_HOST_ARCH_64_BIT + static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; + static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; +#else + static const intptr_t kAllocationRandomAddressMin = 0x04000000; + static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; +#endif + uintptr_t address = + (platform_random_number_generator.Pointer()->NextInt() << kPageSizeBits) | + kAllocationRandomAddressMin; + address &= kAllocationRandomAddressMax; + return reinterpret_cast<void *>(address); +} + + +static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { + LPVOID base = NULL; + + if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { + // For exectutable pages try and randomize the allocation address + for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { + base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection); + } + } + + // After three attempts give up and let the OS find an address to use. + if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); + + return base; +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { + // VirtualAlloc rounds allocated size to page size automatically. + size_t msize = RoundUp(requested, static_cast<int>(GetPageSize())); + + // Windows XP SP2 allows Data Excution Prevention (DEP). + int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; + + LPVOID mbase = RandomizedVirtualAlloc(msize, + MEM_COMMIT | MEM_RESERVE, + prot); + + if (mbase == NULL) return NULL; + + DCHECK(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment())); + + *allocated = msize; + return mbase; +} + + +void OS::Free(void* address, const size_t size) { + // TODO(1240712): VirtualFree has a return value which is ignored here. + VirtualFree(address, 0, MEM_RELEASE); + USE(size); +} + + +intptr_t OS::CommitPageSize() { + return 4096; +} + + +void OS::ProtectCode(void* address, const size_t size) { + DWORD old_protect; + VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); +} + + +void OS::Guard(void* address, const size_t size) { + DWORD oldprotect; + VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect); +} + + +void OS::Sleep(int milliseconds) { + ::Sleep(milliseconds); +} + + +void OS::Abort() { + if (g_hard_abort) { + V8_IMMEDIATE_CRASH(); + } + // Make the MSVCRT do a silent abort. + raise(SIGABRT); +} + + +void OS::DebugBreak() { +#ifdef _MSC_VER + // To avoid Visual Studio runtime support the following code can be used + // instead + // __asm { int 3 } + __debugbreak(); +#else + ::DebugBreak(); +#endif +} + + +class Win32MemoryMappedFile : public OS::MemoryMappedFile { + public: + Win32MemoryMappedFile(HANDLE file, + HANDLE file_mapping, + void* memory, + int size) + : file_(file), + file_mapping_(file_mapping), + memory_(memory), + size_(size) { } + virtual ~Win32MemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + HANDLE file_; + HANDLE file_mapping_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + // Open a physical file + HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL); + if (file == INVALID_HANDLE_VALUE) return NULL; + + int size = static_cast<int>(GetFileSize(file, NULL)); + + // Create a file mapping for the physical file + HANDLE file_mapping = CreateFileMapping(file, NULL, + PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL); + if (file_mapping == NULL) return NULL; + + // Map a view of the file into memory + void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size); + return new Win32MemoryMappedFile(file, file_mapping, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + // Open a physical file + HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL); + if (file == NULL) return NULL; + // Create a file mapping for the physical file + HANDLE file_mapping = CreateFileMapping(file, NULL, + PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL); + if (file_mapping == NULL) return NULL; + // Map a view of the file into memory + void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size); + if (memory) memmove(memory, initial, size); + return new Win32MemoryMappedFile(file, file_mapping, memory, size); +} + + +Win32MemoryMappedFile::~Win32MemoryMappedFile() { + if (memory_ != NULL) + UnmapViewOfFile(memory_); + CloseHandle(file_mapping_); + CloseHandle(file_); +} + + +// The following code loads functions defined in DbhHelp.h and TlHelp32.h +// dynamically. This is to avoid being depending on dbghelp.dll and +// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to +// kernel32.dll at some point so loading functions defines in TlHelp32.h +// dynamically might not be necessary any more - for some versions of Windows?). + +// Function pointers to functions dynamically loaded from dbghelp.dll. +#define DBGHELP_FUNCTION_LIST(V) \ + V(SymInitialize) \ + V(SymGetOptions) \ + V(SymSetOptions) \ + V(SymGetSearchPath) \ + V(SymLoadModule64) \ + V(StackWalk64) \ + V(SymGetSymFromAddr64) \ + V(SymGetLineFromAddr64) \ + V(SymFunctionTableAccess64) \ + V(SymGetModuleBase64) + +// Function pointers to functions dynamically loaded from dbghelp.dll. +#define TLHELP32_FUNCTION_LIST(V) \ + V(CreateToolhelp32Snapshot) \ + V(Module32FirstW) \ + V(Module32NextW) + +// Define the decoration to use for the type and variable name used for +// dynamically loaded DLL function.. +#define DLL_FUNC_TYPE(name) _##name##_ +#define DLL_FUNC_VAR(name) _##name + +// Define the type for each dynamically loaded DLL function. The function +// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros +// from the Windows include files are redefined here to have the function +// definitions to be as close to the ones in the original .h files as possible. +#ifndef IN +#define IN +#endif +#ifndef VOID +#define VOID void +#endif + +// DbgHelp isn't supported on MinGW yet +#ifndef __MINGW32__ +// DbgHelp.h functions. +typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess, + IN PSTR UserSearchPath, + IN BOOL fInvadeProcess); +typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID); +typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions); +typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))( + IN HANDLE hProcess, + OUT PSTR SearchPath, + IN DWORD SearchPathLength); +typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))( + IN HANDLE hProcess, + IN HANDLE hFile, + IN PSTR ImageName, + IN PSTR ModuleName, + IN DWORD64 BaseOfDll, + IN DWORD SizeOfDll); +typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))( + DWORD MachineType, + HANDLE hProcess, + HANDLE hThread, + LPSTACKFRAME64 StackFrame, + PVOID ContextRecord, + PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine, + PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine, + PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine, + PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress); +typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))( + IN HANDLE hProcess, + IN DWORD64 qwAddr, + OUT PDWORD64 pdwDisplacement, + OUT PIMAGEHLP_SYMBOL64 Symbol); +typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))( + IN HANDLE hProcess, + IN DWORD64 qwAddr, + OUT PDWORD pdwDisplacement, + OUT PIMAGEHLP_LINE64 Line64); +// DbgHelp.h typedefs. Implementation found in dbghelp.dll. +typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))( + HANDLE hProcess, + DWORD64 AddrBase); // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64 +typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))( + HANDLE hProcess, + DWORD64 AddrBase); // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64 + +// TlHelp32.h functions. +typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))( + DWORD dwFlags, + DWORD th32ProcessID); +typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot, + LPMODULEENTRY32W lpme); +typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot, + LPMODULEENTRY32W lpme); + +#undef IN +#undef VOID + +// Declare a variable for each dynamically loaded DLL function. +#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL; +DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION) +TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION) +#undef DEF_DLL_FUNCTION + +// Load the functions. This function has a lot of "ugly" macros in order to +// keep down code duplication. + +static bool LoadDbgHelpAndTlHelp32() { + static bool dbghelp_loaded = false; + + if (dbghelp_loaded) return true; + + HMODULE module; + + // Load functions from the dbghelp.dll module. + module = LoadLibrary(TEXT("dbghelp.dll")); + if (module == NULL) { + return false; + } + +#define LOAD_DLL_FUNC(name) \ + DLL_FUNC_VAR(name) = \ + reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name)); + +DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC) + +#undef LOAD_DLL_FUNC + + // Load functions from the kernel32.dll module (the TlHelp32.h function used + // to be in tlhelp32.dll but are now moved to kernel32.dll). + module = LoadLibrary(TEXT("kernel32.dll")); + if (module == NULL) { + return false; + } + +#define LOAD_DLL_FUNC(name) \ + DLL_FUNC_VAR(name) = \ + reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name)); + +TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC) + +#undef LOAD_DLL_FUNC + + // Check that all functions where loaded. + bool result = +#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) && + +DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED) +TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED) + +#undef DLL_FUNC_LOADED + true; + + dbghelp_loaded = result; + return result; + // NOTE: The modules are never unloaded and will stay around until the + // application is closed. +} + +#undef DBGHELP_FUNCTION_LIST +#undef TLHELP32_FUNCTION_LIST +#undef DLL_FUNC_VAR +#undef DLL_FUNC_TYPE + + +// Load the symbols for generating stack traces. +static std::vector<OS::SharedLibraryAddress> LoadSymbols( + HANDLE process_handle) { + static std::vector<OS::SharedLibraryAddress> result; + + static bool symbols_loaded = false; + + if (symbols_loaded) return result; + + BOOL ok; + + // Initialize the symbol engine. + ok = _SymInitialize(process_handle, // hProcess + NULL, // UserSearchPath + false); // fInvadeProcess + if (!ok) return result; + + DWORD options = _SymGetOptions(); + options |= SYMOPT_LOAD_LINES; + options |= SYMOPT_FAIL_CRITICAL_ERRORS; + options = _SymSetOptions(options); + + char buf[OS::kStackWalkMaxNameLen] = {0}; + ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen); + if (!ok) { + int err = GetLastError(); + OS::Print("%d\n", err); + return result; + } + + HANDLE snapshot = _CreateToolhelp32Snapshot( + TH32CS_SNAPMODULE, // dwFlags + GetCurrentProcessId()); // th32ProcessId + if (snapshot == INVALID_HANDLE_VALUE) return result; + MODULEENTRY32W module_entry; + module_entry.dwSize = sizeof(module_entry); // Set the size of the structure. + BOOL cont = _Module32FirstW(snapshot, &module_entry); + while (cont) { + DWORD64 base; + // NOTE the SymLoadModule64 function has the peculiarity of accepting a + // both unicode and ASCII strings even though the parameter is PSTR. + base = _SymLoadModule64( + process_handle, // hProcess + 0, // hFile + reinterpret_cast<PSTR>(module_entry.szExePath), // ImageName + reinterpret_cast<PSTR>(module_entry.szModule), // ModuleName + reinterpret_cast<DWORD64>(module_entry.modBaseAddr), // BaseOfDll + module_entry.modBaseSize); // SizeOfDll + if (base == 0) { + int err = GetLastError(); + if (err != ERROR_MOD_NOT_FOUND && + err != ERROR_INVALID_HANDLE) { + result.clear(); + return result; + } + } + int lib_name_length = WideCharToMultiByte( + CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL); + std::string lib_name(lib_name_length, 0); + WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0], + lib_name_length, NULL, NULL); + result.push_back(OS::SharedLibraryAddress( + lib_name, reinterpret_cast<unsigned int>(module_entry.modBaseAddr), + reinterpret_cast<unsigned int>(module_entry.modBaseAddr + + module_entry.modBaseSize))); + cont = _Module32NextW(snapshot, &module_entry); + } + CloseHandle(snapshot); + + symbols_loaded = true; + return result; +} + + +std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { + // SharedLibraryEvents are logged when loading symbol information. + // Only the shared libraries loaded at the time of the call to + // GetSharedLibraryAddresses are logged. DLLs loaded after + // initialization are not accounted for. + if (!LoadDbgHelpAndTlHelp32()) return std::vector<OS::SharedLibraryAddress>(); + HANDLE process_handle = GetCurrentProcess(); + return LoadSymbols(process_handle); +} + + +void OS::SignalCodeMovingGC() { +} + + +uint64_t OS::TotalPhysicalMemory() { + MEMORYSTATUSEX memory_info; + memory_info.dwLength = sizeof(memory_info); + if (!GlobalMemoryStatusEx(&memory_info)) { + UNREACHABLE(); + return 0; + } + + return static_cast<uint64_t>(memory_info.ullTotalPhys); +} + + +#else // __MINGW32__ +std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { + return std::vector<OS::SharedLibraryAddress>(); +} + + +void OS::SignalCodeMovingGC() { } +#endif // __MINGW32__ + + +int OS::NumberOfProcessorsOnline() { + SYSTEM_INFO info; + GetSystemInfo(&info); + return info.dwNumberOfProcessors; +} + + +double OS::nan_value() { +#ifdef _MSC_VER + return std::numeric_limits<double>::quiet_NaN(); +#else // _MSC_VER + return NAN; +#endif // _MSC_VER +} + + +int OS::ActivationFrameAlignment() { +#ifdef _WIN64 + return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned. +#elif defined(__MINGW32__) + // With gcc 4.4 the tree vectorization optimizer can generate code + // that requires 16 byte alignment such as movdqa on x86. + return 16; +#else + return 8; // Floating-point math runs faster with 8-byte alignment. +#endif +} + + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + DCHECK(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* address = ReserveRegion(request_size); + if (address == NULL) return; + uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment); + // Try reducing the size by freeing and then reallocating a specific area. + bool result = ReleaseRegion(address, request_size); + USE(result); + DCHECK(result); + address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); + if (address != NULL) { + request_size = size; + DCHECK(base == static_cast<uint8_t*>(address)); + } else { + // Resizing failed, just go with a bigger area. + address = ReserveRegion(request_size); + if (address == NULL) return; + } + address_ = address; + size_ = request_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + DCHECK(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + DCHECK(IsReserved()); + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + if (NULL == VirtualAlloc(address, + OS::CommitPageSize(), + MEM_COMMIT, + PAGE_NOACCESS)) { + return false; + } + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; + if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { + return false; + } + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return VirtualFree(base, size, MEM_DECOMMIT) != 0; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return VirtualFree(base, 0, MEM_RELEASE) != 0; +} + + +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + + +// ---------------------------------------------------------------------------- +// Win32 thread support. + +// Definition of invalid thread handle and id. +static const HANDLE kNoThread = INVALID_HANDLE_VALUE; + +// Entry point for threads. The supplied argument is a pointer to the thread +// object. The entry function dispatches to the run method in the thread +// object. It is important that this function has __stdcall calling +// convention. +static unsigned int __stdcall ThreadEntry(void* arg) { + Thread* thread = reinterpret_cast<Thread*>(arg); + thread->NotifyStartedAndRun(); + return 0; +} + + +class Thread::PlatformData { + public: + explicit PlatformData(HANDLE thread) : thread_(thread) {} + HANDLE thread_; + unsigned thread_id_; +}; + + +// Initialize a Win32 thread object. The thread has an invalid thread +// handle until it is started. + +Thread::Thread(const Options& options) + : stack_size_(options.stack_size()), + start_semaphore_(NULL) { + data_ = new PlatformData(kNoThread); + set_name(options.name()); +} + + +void Thread::set_name(const char* name) { + OS::StrNCpy(name_, sizeof(name_), name, strlen(name)); + name_[sizeof(name_) - 1] = '\0'; +} + + +// Close our own handle for the thread. +Thread::~Thread() { + if (data_->thread_ != kNoThread) CloseHandle(data_->thread_); + delete data_; +} + + +// Create a new thread. It is important to use _beginthreadex() instead of +// the Win32 function CreateThread(), because the CreateThread() does not +// initialize thread specific structures in the C runtime library. +void Thread::Start() { + data_->thread_ = reinterpret_cast<HANDLE>( + _beginthreadex(NULL, + static_cast<unsigned>(stack_size_), + ThreadEntry, + this, + 0, + &data_->thread_id_)); +} + + +// Wait for thread to terminate. +void Thread::Join() { + if (data_->thread_id_ != GetCurrentThreadId()) { + WaitForSingleObject(data_->thread_, INFINITE); + } +} + + +Thread::LocalStorageKey Thread::CreateThreadLocalKey() { + DWORD result = TlsAlloc(); + DCHECK(result != TLS_OUT_OF_INDEXES); + return static_cast<LocalStorageKey>(result); +} + + +void Thread::DeleteThreadLocalKey(LocalStorageKey key) { + BOOL result = TlsFree(static_cast<DWORD>(key)); + USE(result); + DCHECK(result); +} + + +void* Thread::GetThreadLocal(LocalStorageKey key) { + return TlsGetValue(static_cast<DWORD>(key)); +} + + +void Thread::SetThreadLocal(LocalStorageKey key, void* value) { + BOOL result = TlsSetValue(static_cast<DWORD>(key), value); + USE(result); + DCHECK(result); +} + + + +void Thread::YieldCPU() { + Sleep(0); +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/semaphore.cc nodejs-0.11.15/deps/v8/src/base/platform/semaphore.cc --- nodejs-0.11.13/deps/v8/src/base/platform/semaphore.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/semaphore.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,191 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/platform/semaphore.h" + +#if V8_OS_MACOSX +#include <mach/mach_init.h> +#include <mach/task.h> +#endif + +#include <errno.h> + +#include "src/base/logging.h" +#include "src/base/platform/time.h" + +namespace v8 { +namespace base { + +#if V8_OS_MACOSX + +Semaphore::Semaphore(int count) { + kern_return_t result = semaphore_create( + mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count); + DCHECK_EQ(KERN_SUCCESS, result); + USE(result); +} + + +Semaphore::~Semaphore() { + kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_); + DCHECK_EQ(KERN_SUCCESS, result); + USE(result); +} + + +void Semaphore::Signal() { + kern_return_t result = semaphore_signal(native_handle_); + DCHECK_EQ(KERN_SUCCESS, result); + USE(result); +} + + +void Semaphore::Wait() { + while (true) { + kern_return_t result = semaphore_wait(native_handle_); + if (result == KERN_SUCCESS) return; // Semaphore was signalled. + DCHECK_EQ(KERN_ABORTED, result); + } +} + + +bool Semaphore::WaitFor(const TimeDelta& rel_time) { + TimeTicks now = TimeTicks::Now(); + TimeTicks end = now + rel_time; + while (true) { + mach_timespec_t ts; + if (now >= end) { + // Return immediately if semaphore was not signalled. + ts.tv_sec = 0; + ts.tv_nsec = 0; + } else { + ts = (end - now).ToMachTimespec(); + } + kern_return_t result = semaphore_timedwait(native_handle_, ts); + if (result == KERN_SUCCESS) return true; // Semaphore was signalled. + if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout. + DCHECK_EQ(KERN_ABORTED, result); + now = TimeTicks::Now(); + } +} + +#elif V8_OS_POSIX + +Semaphore::Semaphore(int count) { + DCHECK(count >= 0); + int result = sem_init(&native_handle_, 0, count); + DCHECK_EQ(0, result); + USE(result); +} + + +Semaphore::~Semaphore() { + int result = sem_destroy(&native_handle_); + DCHECK_EQ(0, result); + USE(result); +} + + +void Semaphore::Signal() { + int result = sem_post(&native_handle_); + DCHECK_EQ(0, result); + USE(result); +} + + +void Semaphore::Wait() { + while (true) { + int result = sem_wait(&native_handle_); + if (result == 0) return; // Semaphore was signalled. + // Signal caused spurious wakeup. + DCHECK_EQ(-1, result); + DCHECK_EQ(EINTR, errno); + } +} + + +bool Semaphore::WaitFor(const TimeDelta& rel_time) { + // Compute the time for end of timeout. + const Time time = Time::NowFromSystemTime() + rel_time; + const struct timespec ts = time.ToTimespec(); + + // Wait for semaphore signalled or timeout. + while (true) { + int result = sem_timedwait(&native_handle_, &ts); + if (result == 0) return true; // Semaphore was signalled. +#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) + if (result > 0) { + // sem_timedwait in glibc prior to 2.3.4 returns the errno instead of -1. + errno = result; + result = -1; + } +#endif + if (result == -1 && errno == ETIMEDOUT) { + // Timed out while waiting for semaphore. + return false; + } + // Signal caused spurious wakeup. + DCHECK_EQ(-1, result); + DCHECK_EQ(EINTR, errno); + } +} + +#elif V8_OS_WIN + +Semaphore::Semaphore(int count) { + DCHECK(count >= 0); + native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL); + DCHECK(native_handle_ != NULL); +} + + +Semaphore::~Semaphore() { + BOOL result = CloseHandle(native_handle_); + DCHECK(result); + USE(result); +} + + +void Semaphore::Signal() { + LONG dummy; + BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy); + DCHECK(result); + USE(result); +} + + +void Semaphore::Wait() { + DWORD result = WaitForSingleObject(native_handle_, INFINITE); + DCHECK(result == WAIT_OBJECT_0); + USE(result); +} + + +bool Semaphore::WaitFor(const TimeDelta& rel_time) { + TimeTicks now = TimeTicks::Now(); + TimeTicks end = now + rel_time; + while (true) { + int64_t msec = (end - now).InMilliseconds(); + if (msec >= static_cast<int64_t>(INFINITE)) { + DWORD result = WaitForSingleObject(native_handle_, INFINITE - 1); + if (result == WAIT_OBJECT_0) { + return true; + } + DCHECK(result == WAIT_TIMEOUT); + now = TimeTicks::Now(); + } else { + DWORD result = WaitForSingleObject( + native_handle_, (msec < 0) ? 0 : static_cast<DWORD>(msec)); + if (result == WAIT_TIMEOUT) { + return false; + } + DCHECK(result == WAIT_OBJECT_0); + return true; + } + } +} + +#endif // V8_OS_MACOSX + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/semaphore.h nodejs-0.11.15/deps/v8/src/base/platform/semaphore.h --- nodejs-0.11.13/deps/v8/src/base/platform/semaphore.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/semaphore.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,101 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_PLATFORM_SEMAPHORE_H_ +#define V8_BASE_PLATFORM_SEMAPHORE_H_ + +#include "src/base/lazy-instance.h" +#if V8_OS_WIN +#include "src/base/win32-headers.h" +#endif + +#if V8_OS_MACOSX +#include <mach/semaphore.h> // NOLINT +#elif V8_OS_POSIX +#include <semaphore.h> // NOLINT +#endif + +namespace v8 { +namespace base { + +// Forward declarations. +class TimeDelta; + +// ---------------------------------------------------------------------------- +// Semaphore +// +// A semaphore object is a synchronization object that maintains a count. The +// count is decremented each time a thread completes a wait for the semaphore +// object and incremented each time a thread signals the semaphore. When the +// count reaches zero, threads waiting for the semaphore blocks until the +// count becomes non-zero. + +class Semaphore V8_FINAL { + public: + explicit Semaphore(int count); + ~Semaphore(); + + // Increments the semaphore counter. + void Signal(); + + // Suspends the calling thread until the semaphore counter is non zero + // and then decrements the semaphore counter. + void Wait(); + + // Suspends the calling thread until the counter is non zero or the timeout + // time has passed. If timeout happens the return value is false and the + // counter is unchanged. Otherwise the semaphore counter is decremented and + // true is returned. + bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT; + +#if V8_OS_MACOSX + typedef semaphore_t NativeHandle; +#elif V8_OS_POSIX + typedef sem_t NativeHandle; +#elif V8_OS_WIN + typedef HANDLE NativeHandle; +#endif + + NativeHandle& native_handle() { + return native_handle_; + } + const NativeHandle& native_handle() const { + return native_handle_; + } + + private: + NativeHandle native_handle_; + + DISALLOW_COPY_AND_ASSIGN(Semaphore); +}; + + +// POD Semaphore initialized lazily (i.e. the first time Pointer() is called). +// Usage: +// // The following semaphore starts at 0. +// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER; +// +// void my_function() { +// // Do something with my_semaphore.Pointer(). +// } +// + +template <int N> +struct CreateSemaphoreTrait { + static Semaphore* Create() { + return new Semaphore(N); + } +}; + +template <int N> +struct LazySemaphore { + typedef typename LazyDynamicInstance<Semaphore, CreateSemaphoreTrait<N>, + ThreadSafeInitOnceTrait>::type type; +}; + +#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER + +} } // namespace v8::base + +#endif // V8_BASE_PLATFORM_SEMAPHORE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/time.cc nodejs-0.11.15/deps/v8/src/base/platform/time.cc --- nodejs-0.11.13/deps/v8/src/base/platform/time.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/time.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,654 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/platform/time.h" + +#if V8_OS_POSIX +#include <fcntl.h> // for O_RDONLY +#include <sys/time.h> +#include <unistd.h> +#endif +#if V8_OS_MACOSX +#include <mach/mach_time.h> +#endif + +#include <string.h> + +#if V8_OS_WIN +#include "src/base/lazy-instance.h" +#include "src/base/win32-headers.h" +#endif +#include "src/base/cpu.h" +#include "src/base/logging.h" +#include "src/base/platform/platform.h" + +namespace v8 { +namespace base { + +TimeDelta TimeDelta::FromDays(int days) { + return TimeDelta(days * Time::kMicrosecondsPerDay); +} + + +TimeDelta TimeDelta::FromHours(int hours) { + return TimeDelta(hours * Time::kMicrosecondsPerHour); +} + + +TimeDelta TimeDelta::FromMinutes(int minutes) { + return TimeDelta(minutes * Time::kMicrosecondsPerMinute); +} + + +TimeDelta TimeDelta::FromSeconds(int64_t seconds) { + return TimeDelta(seconds * Time::kMicrosecondsPerSecond); +} + + +TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) { + return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond); +} + + +TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) { + return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond); +} + + +int TimeDelta::InDays() const { + return static_cast<int>(delta_ / Time::kMicrosecondsPerDay); +} + + +int TimeDelta::InHours() const { + return static_cast<int>(delta_ / Time::kMicrosecondsPerHour); +} + + +int TimeDelta::InMinutes() const { + return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute); +} + + +double TimeDelta::InSecondsF() const { + return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond; +} + + +int64_t TimeDelta::InSeconds() const { + return delta_ / Time::kMicrosecondsPerSecond; +} + + +double TimeDelta::InMillisecondsF() const { + return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond; +} + + +int64_t TimeDelta::InMilliseconds() const { + return delta_ / Time::kMicrosecondsPerMillisecond; +} + + +int64_t TimeDelta::InNanoseconds() const { + return delta_ * Time::kNanosecondsPerMicrosecond; +} + + +#if V8_OS_MACOSX + +TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) { + DCHECK_GE(ts.tv_nsec, 0); + DCHECK_LT(ts.tv_nsec, + static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT + return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond + + ts.tv_nsec / Time::kNanosecondsPerMicrosecond); +} + + +struct mach_timespec TimeDelta::ToMachTimespec() const { + struct mach_timespec ts; + DCHECK(delta_ >= 0); + ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond; + ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) * + Time::kNanosecondsPerMicrosecond; + return ts; +} + +#endif // V8_OS_MACOSX + + +#if V8_OS_POSIX + +TimeDelta TimeDelta::FromTimespec(struct timespec ts) { + DCHECK_GE(ts.tv_nsec, 0); + DCHECK_LT(ts.tv_nsec, + static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT + return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond + + ts.tv_nsec / Time::kNanosecondsPerMicrosecond); +} + + +struct timespec TimeDelta::ToTimespec() const { + struct timespec ts; + ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond; + ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) * + Time::kNanosecondsPerMicrosecond; + return ts; +} + +#endif // V8_OS_POSIX + + +#if V8_OS_WIN + +// We implement time using the high-resolution timers so that we can get +// timeouts which are smaller than 10-15ms. To avoid any drift, we +// periodically resync the internal clock to the system clock. +class Clock V8_FINAL { + public: + Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {} + + Time Now() { + // Time between resampling the un-granular clock for this API (1 minute). + const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1); + + LockGuard<Mutex> lock_guard(&mutex_); + + // Determine current time and ticks. + TimeTicks ticks = GetSystemTicks(); + Time time = GetSystemTime(); + + // Check if we need to synchronize with the system clock due to a backwards + // time change or the amount of time elapsed. + TimeDelta elapsed = ticks - initial_ticks_; + if (time < initial_time_ || elapsed > kMaxElapsedTime) { + initial_ticks_ = ticks; + initial_time_ = time; + return time; + } + + return initial_time_ + elapsed; + } + + Time NowFromSystemTime() { + LockGuard<Mutex> lock_guard(&mutex_); + initial_ticks_ = GetSystemTicks(); + initial_time_ = GetSystemTime(); + return initial_time_; + } + + private: + static TimeTicks GetSystemTicks() { + return TimeTicks::Now(); + } + + static Time GetSystemTime() { + FILETIME ft; + ::GetSystemTimeAsFileTime(&ft); + return Time::FromFiletime(ft); + } + + TimeTicks initial_ticks_; + Time initial_time_; + Mutex mutex_; +}; + + +static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>, + ThreadSafeInitOnceTrait>::type clock = + LAZY_STATIC_INSTANCE_INITIALIZER; + + +Time Time::Now() { + return clock.Pointer()->Now(); +} + + +Time Time::NowFromSystemTime() { + return clock.Pointer()->NowFromSystemTime(); +} + + +// Time between windows epoch and standard epoch. +static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000); + + +Time Time::FromFiletime(FILETIME ft) { + if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) { + return Time(); + } + if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() && + ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) { + return Max(); + } + int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) + + (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10; + return Time(us - kTimeToEpochInMicroseconds); +} + + +FILETIME Time::ToFiletime() const { + DCHECK(us_ >= 0); + FILETIME ft; + if (IsNull()) { + ft.dwLowDateTime = 0; + ft.dwHighDateTime = 0; + return ft; + } + if (IsMax()) { + ft.dwLowDateTime = std::numeric_limits<DWORD>::max(); + ft.dwHighDateTime = std::numeric_limits<DWORD>::max(); + return ft; + } + uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10; + ft.dwLowDateTime = static_cast<DWORD>(us); + ft.dwHighDateTime = static_cast<DWORD>(us >> 32); + return ft; +} + +#elif V8_OS_POSIX + +Time Time::Now() { + struct timeval tv; + int result = gettimeofday(&tv, NULL); + DCHECK_EQ(0, result); + USE(result); + return FromTimeval(tv); +} + + +Time Time::NowFromSystemTime() { + return Now(); +} + + +Time Time::FromTimespec(struct timespec ts) { + DCHECK(ts.tv_nsec >= 0); + DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT + if (ts.tv_nsec == 0 && ts.tv_sec == 0) { + return Time(); + } + if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT + ts.tv_sec == std::numeric_limits<time_t>::max()) { + return Max(); + } + return Time(ts.tv_sec * kMicrosecondsPerSecond + + ts.tv_nsec / kNanosecondsPerMicrosecond); +} + + +struct timespec Time::ToTimespec() const { + struct timespec ts; + if (IsNull()) { + ts.tv_sec = 0; + ts.tv_nsec = 0; + return ts; + } + if (IsMax()) { + ts.tv_sec = std::numeric_limits<time_t>::max(); + ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT + return ts; + } + ts.tv_sec = us_ / kMicrosecondsPerSecond; + ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond; + return ts; +} + + +Time Time::FromTimeval(struct timeval tv) { + DCHECK(tv.tv_usec >= 0); + DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond)); + if (tv.tv_usec == 0 && tv.tv_sec == 0) { + return Time(); + } + if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) && + tv.tv_sec == std::numeric_limits<time_t>::max()) { + return Max(); + } + return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec); +} + + +struct timeval Time::ToTimeval() const { + struct timeval tv; + if (IsNull()) { + tv.tv_sec = 0; + tv.tv_usec = 0; + return tv; + } + if (IsMax()) { + tv.tv_sec = std::numeric_limits<time_t>::max(); + tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1); + return tv; + } + tv.tv_sec = us_ / kMicrosecondsPerSecond; + tv.tv_usec = us_ % kMicrosecondsPerSecond; + return tv; +} + +#endif // V8_OS_WIN + + +Time Time::FromJsTime(double ms_since_epoch) { + // The epoch is a valid time, so this constructor doesn't interpret + // 0 as the null time. + if (ms_since_epoch == std::numeric_limits<double>::max()) { + return Max(); + } + return Time( + static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond)); +} + + +double Time::ToJsTime() const { + if (IsNull()) { + // Preserve 0 so the invalid result doesn't depend on the platform. + return 0; + } + if (IsMax()) { + // Preserve max without offset to prevent overflow. + return std::numeric_limits<double>::max(); + } + return static_cast<double>(us_) / kMicrosecondsPerMillisecond; +} + + +#if V8_OS_WIN + +class TickClock { + public: + virtual ~TickClock() {} + virtual int64_t Now() = 0; + virtual bool IsHighResolution() = 0; +}; + + +// Overview of time counters: +// (1) CPU cycle counter. (Retrieved via RDTSC) +// The CPU counter provides the highest resolution time stamp and is the least +// expensive to retrieve. However, the CPU counter is unreliable and should not +// be used in production. Its biggest issue is that it is per processor and it +// is not synchronized between processors. Also, on some computers, the counters +// will change frequency due to thermal and power changes, and stop in some +// states. +// +// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high- +// resolution (100 nanoseconds) time stamp but is comparatively more expensive +// to retrieve. What QueryPerformanceCounter actually does is up to the HAL. +// (with some help from ACPI). +// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx +// in the worst case, it gets the counter from the rollover interrupt on the +// programmable interrupt timer. In best cases, the HAL may conclude that the +// RDTSC counter runs at a constant frequency, then it uses that instead. On +// multiprocessor machines, it will try to verify the values returned from +// RDTSC on each processor are consistent with each other, and apply a handful +// of workarounds for known buggy hardware. In other words, QPC is supposed to +// give consistent result on a multiprocessor computer, but it is unreliable in +// reality due to bugs in BIOS or HAL on some, especially old computers. +// With recent updates on HAL and newer BIOS, QPC is getting more reliable but +// it should be used with caution. +// +// (3) System time. The system time provides a low-resolution (typically 10ms +// to 55 milliseconds) time stamp but is comparatively less expensive to +// retrieve and more reliable. +class HighResolutionTickClock V8_FINAL : public TickClock { + public: + explicit HighResolutionTickClock(int64_t ticks_per_second) + : ticks_per_second_(ticks_per_second) { + DCHECK_LT(0, ticks_per_second); + } + virtual ~HighResolutionTickClock() {} + + virtual int64_t Now() V8_OVERRIDE { + LARGE_INTEGER now; + BOOL result = QueryPerformanceCounter(&now); + DCHECK(result); + USE(result); + + // Intentionally calculate microseconds in a round about manner to avoid + // overflow and precision issues. Think twice before simplifying! + int64_t whole_seconds = now.QuadPart / ticks_per_second_; + int64_t leftover_ticks = now.QuadPart % ticks_per_second_; + int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) + + ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_); + + // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow() + // will never return 0. + return ticks + 1; + } + + virtual bool IsHighResolution() V8_OVERRIDE { + return true; + } + + private: + int64_t ticks_per_second_; +}; + + +class RolloverProtectedTickClock V8_FINAL : public TickClock { + public: + // We initialize rollover_ms_ to 1 to ensure that we will never + // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below. + RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {} + virtual ~RolloverProtectedTickClock() {} + + virtual int64_t Now() V8_OVERRIDE { + LockGuard<Mutex> lock_guard(&mutex_); + // We use timeGetTime() to implement TimeTicks::Now(), which rolls over + // every ~49.7 days. We try to track rollover ourselves, which works if + // TimeTicks::Now() is called at least every 49 days. + // Note that we do not use GetTickCount() here, since timeGetTime() gives + // more predictable delta values, as described here: + // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx + // timeGetTime() provides 1ms granularity when combined with + // timeBeginPeriod(). If the host application for V8 wants fast timers, it + // can use timeBeginPeriod() to increase the resolution. + DWORD now = timeGetTime(); + if (now < last_seen_now_) { + rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days. + } + last_seen_now_ = now; + return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond; + } + + virtual bool IsHighResolution() V8_OVERRIDE { + return false; + } + + private: + Mutex mutex_; + DWORD last_seen_now_; + int64_t rollover_ms_; +}; + + +static LazyStaticInstance<RolloverProtectedTickClock, + DefaultConstructTrait<RolloverProtectedTickClock>, + ThreadSafeInitOnceTrait>::type tick_clock = + LAZY_STATIC_INSTANCE_INITIALIZER; + + +struct CreateHighResTickClockTrait { + static TickClock* Create() { + // Check if the installed hardware supports a high-resolution performance + // counter, and if not fallback to the low-resolution tick clock. + LARGE_INTEGER ticks_per_second; + if (!QueryPerformanceFrequency(&ticks_per_second)) { + return tick_clock.Pointer(); + } + + // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter + // is unreliable, fallback to the low-resolution tick clock. + CPU cpu; + if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) { + return tick_clock.Pointer(); + } + + return new HighResolutionTickClock(ticks_per_second.QuadPart); + } +}; + + +static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait, + ThreadSafeInitOnceTrait>::type high_res_tick_clock = + LAZY_DYNAMIC_INSTANCE_INITIALIZER; + + +TimeTicks TimeTicks::Now() { + // Make sure we never return 0 here. + TimeTicks ticks(tick_clock.Pointer()->Now()); + DCHECK(!ticks.IsNull()); + return ticks; +} + + +TimeTicks TimeTicks::HighResolutionNow() { + // Make sure we never return 0 here. + TimeTicks ticks(high_res_tick_clock.Pointer()->Now()); + DCHECK(!ticks.IsNull()); + return ticks; +} + + +// static +bool TimeTicks::IsHighResolutionClockWorking() { + return high_res_tick_clock.Pointer()->IsHighResolution(); +} + + +// static +TimeTicks TimeTicks::KernelTimestampNow() { return TimeTicks(0); } + + +// static +bool TimeTicks::KernelTimestampAvailable() { return false; } + +#else // V8_OS_WIN + +TimeTicks TimeTicks::Now() { + return HighResolutionNow(); +} + + +TimeTicks TimeTicks::HighResolutionNow() { + int64_t ticks; +#if V8_OS_MACOSX + static struct mach_timebase_info info; + if (info.denom == 0) { + kern_return_t result = mach_timebase_info(&info); + DCHECK_EQ(KERN_SUCCESS, result); + USE(result); + } + ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond * + info.numer / info.denom); +#elif V8_OS_SOLARIS + ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond); +#elif V8_LIBRT_NOT_AVAILABLE + // TODO(bmeurer): This is a temporary hack to support cross-compiling + // Chrome for Android in AOSP. Remove this once AOSP is fixed, also + // cleanup the tools/gyp/v8.gyp file. + struct timeval tv; + int result = gettimeofday(&tv, NULL); + DCHECK_EQ(0, result); + USE(result); + ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec); +#elif V8_OS_POSIX + struct timespec ts; + int result = clock_gettime(CLOCK_MONOTONIC, &ts); + DCHECK_EQ(0, result); + USE(result); + ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond + + ts.tv_nsec / Time::kNanosecondsPerMicrosecond); +#endif // V8_OS_MACOSX + // Make sure we never return 0 here. + return TimeTicks(ticks + 1); +} + + +// static +bool TimeTicks::IsHighResolutionClockWorking() { + return true; +} + + +#if V8_OS_LINUX && !V8_LIBRT_NOT_AVAILABLE + +class KernelTimestampClock { + public: + KernelTimestampClock() : clock_fd_(-1), clock_id_(kClockInvalid) { + clock_fd_ = open(kTraceClockDevice, O_RDONLY); + if (clock_fd_ == -1) { + return; + } + clock_id_ = get_clockid(clock_fd_); + } + + virtual ~KernelTimestampClock() { + if (clock_fd_ != -1) { + close(clock_fd_); + } + } + + int64_t Now() { + if (clock_id_ == kClockInvalid) { + return 0; + } + + struct timespec ts; + + clock_gettime(clock_id_, &ts); + return ((int64_t)ts.tv_sec * kNsecPerSec) + ts.tv_nsec; + } + + bool Available() { return clock_id_ != kClockInvalid; } + + private: + static const clockid_t kClockInvalid = -1; + static const char kTraceClockDevice[]; + static const uint64_t kNsecPerSec = 1000000000; + + int clock_fd_; + clockid_t clock_id_; + + static int get_clockid(int fd) { return ((~(clockid_t)(fd) << 3) | 3); } +}; + + +// Timestamp module name +const char KernelTimestampClock::kTraceClockDevice[] = "/dev/trace_clock"; + +#else + +class KernelTimestampClock { + public: + KernelTimestampClock() {} + + int64_t Now() { return 0; } + bool Available() { return false; } +}; + +#endif // V8_OS_LINUX && !V8_LIBRT_NOT_AVAILABLE + +static LazyStaticInstance<KernelTimestampClock, + DefaultConstructTrait<KernelTimestampClock>, + ThreadSafeInitOnceTrait>::type kernel_tick_clock = + LAZY_STATIC_INSTANCE_INITIALIZER; + + +// static +TimeTicks TimeTicks::KernelTimestampNow() { + return TimeTicks(kernel_tick_clock.Pointer()->Now()); +} + + +// static +bool TimeTicks::KernelTimestampAvailable() { + return kernel_tick_clock.Pointer()->Available(); +} + +#endif // V8_OS_WIN + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/platform/time.h nodejs-0.11.15/deps/v8/src/base/platform/time.h --- nodejs-0.11.13/deps/v8/src/base/platform/time.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/platform/time.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,400 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_PLATFORM_TIME_H_ +#define V8_BASE_PLATFORM_TIME_H_ + +#include <time.h> +#include <limits> + +#include "src/base/macros.h" + +// Forward declarations. +extern "C" { +struct _FILETIME; +struct mach_timespec; +struct timespec; +struct timeval; +} + +namespace v8 { +namespace base { + +class Time; +class TimeTicks; + +// ----------------------------------------------------------------------------- +// TimeDelta +// +// This class represents a duration of time, internally represented in +// microseonds. + +class TimeDelta V8_FINAL { + public: + TimeDelta() : delta_(0) {} + + // Converts units of time to TimeDeltas. + static TimeDelta FromDays(int days); + static TimeDelta FromHours(int hours); + static TimeDelta FromMinutes(int minutes); + static TimeDelta FromSeconds(int64_t seconds); + static TimeDelta FromMilliseconds(int64_t milliseconds); + static TimeDelta FromMicroseconds(int64_t microseconds) { + return TimeDelta(microseconds); + } + static TimeDelta FromNanoseconds(int64_t nanoseconds); + + // Returns the time delta in some unit. The F versions return a floating + // point value, the "regular" versions return a rounded-down value. + // + // InMillisecondsRoundedUp() instead returns an integer that is rounded up + // to the next full millisecond. + int InDays() const; + int InHours() const; + int InMinutes() const; + double InSecondsF() const; + int64_t InSeconds() const; + double InMillisecondsF() const; + int64_t InMilliseconds() const; + int64_t InMillisecondsRoundedUp() const; + int64_t InMicroseconds() const { return delta_; } + int64_t InNanoseconds() const; + + // Converts to/from Mach time specs. + static TimeDelta FromMachTimespec(struct mach_timespec ts); + struct mach_timespec ToMachTimespec() const; + + // Converts to/from POSIX time specs. + static TimeDelta FromTimespec(struct timespec ts); + struct timespec ToTimespec() const; + + TimeDelta& operator=(const TimeDelta& other) { + delta_ = other.delta_; + return *this; + } + + // Computations with other deltas. + TimeDelta operator+(const TimeDelta& other) const { + return TimeDelta(delta_ + other.delta_); + } + TimeDelta operator-(const TimeDelta& other) const { + return TimeDelta(delta_ - other.delta_); + } + + TimeDelta& operator+=(const TimeDelta& other) { + delta_ += other.delta_; + return *this; + } + TimeDelta& operator-=(const TimeDelta& other) { + delta_ -= other.delta_; + return *this; + } + TimeDelta operator-() const { + return TimeDelta(-delta_); + } + + double TimesOf(const TimeDelta& other) const { + return static_cast<double>(delta_) / static_cast<double>(other.delta_); + } + double PercentOf(const TimeDelta& other) const { + return TimesOf(other) * 100.0; + } + + // Computations with ints, note that we only allow multiplicative operations + // with ints, and additive operations with other deltas. + TimeDelta operator*(int64_t a) const { + return TimeDelta(delta_ * a); + } + TimeDelta operator/(int64_t a) const { + return TimeDelta(delta_ / a); + } + TimeDelta& operator*=(int64_t a) { + delta_ *= a; + return *this; + } + TimeDelta& operator/=(int64_t a) { + delta_ /= a; + return *this; + } + int64_t operator/(const TimeDelta& other) const { + return delta_ / other.delta_; + } + + // Comparison operators. + bool operator==(const TimeDelta& other) const { + return delta_ == other.delta_; + } + bool operator!=(const TimeDelta& other) const { + return delta_ != other.delta_; + } + bool operator<(const TimeDelta& other) const { + return delta_ < other.delta_; + } + bool operator<=(const TimeDelta& other) const { + return delta_ <= other.delta_; + } + bool operator>(const TimeDelta& other) const { + return delta_ > other.delta_; + } + bool operator>=(const TimeDelta& other) const { + return delta_ >= other.delta_; + } + + private: + // Constructs a delta given the duration in microseconds. This is private + // to avoid confusion by callers with an integer constructor. Use + // FromSeconds, FromMilliseconds, etc. instead. + explicit TimeDelta(int64_t delta) : delta_(delta) {} + + // Delta in microseconds. + int64_t delta_; +}; + + +// ----------------------------------------------------------------------------- +// Time +// +// This class represents an absolute point in time, internally represented as +// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970. + +class Time V8_FINAL { + public: + static const int64_t kMillisecondsPerSecond = 1000; + static const int64_t kMicrosecondsPerMillisecond = 1000; + static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond * + kMillisecondsPerSecond; + static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60; + static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60; + static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24; + static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7; + static const int64_t kNanosecondsPerMicrosecond = 1000; + static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond * + kMicrosecondsPerSecond; + + // Contains the NULL time. Use Time::Now() to get the current time. + Time() : us_(0) {} + + // Returns true if the time object has not been initialized. + bool IsNull() const { return us_ == 0; } + + // Returns true if the time object is the maximum time. + bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); } + + // Returns the current time. Watch out, the system might adjust its clock + // in which case time will actually go backwards. We don't guarantee that + // times are increasing, or that two calls to Now() won't be the same. + static Time Now(); + + // Returns the current time. Same as Now() except that this function always + // uses system time so that there are no discrepancies between the returned + // time and system time even on virtual environments including our test bot. + // For timing sensitive unittests, this function should be used. + static Time NowFromSystemTime(); + + // Returns the time for epoch in Unix-like system (Jan 1, 1970). + static Time UnixEpoch() { return Time(0); } + + // Returns the maximum time, which should be greater than any reasonable time + // with which we might compare it. + static Time Max() { return Time(std::numeric_limits<int64_t>::max()); } + + // Converts to/from internal values. The meaning of the "internal value" is + // completely up to the implementation, so it should be treated as opaque. + static Time FromInternalValue(int64_t value) { + return Time(value); + } + int64_t ToInternalValue() const { + return us_; + } + + // Converts to/from POSIX time specs. + static Time FromTimespec(struct timespec ts); + struct timespec ToTimespec() const; + + // Converts to/from POSIX time values. + static Time FromTimeval(struct timeval tv); + struct timeval ToTimeval() const; + + // Converts to/from Windows file times. + static Time FromFiletime(struct _FILETIME ft); + struct _FILETIME ToFiletime() const; + + // Converts to/from the Javascript convention for times, a number of + // milliseconds since the epoch: + static Time FromJsTime(double ms_since_epoch); + double ToJsTime() const; + + Time& operator=(const Time& other) { + us_ = other.us_; + return *this; + } + + // Compute the difference between two times. + TimeDelta operator-(const Time& other) const { + return TimeDelta::FromMicroseconds(us_ - other.us_); + } + + // Modify by some time delta. + Time& operator+=(const TimeDelta& delta) { + us_ += delta.InMicroseconds(); + return *this; + } + Time& operator-=(const TimeDelta& delta) { + us_ -= delta.InMicroseconds(); + return *this; + } + + // Return a new time modified by some delta. + Time operator+(const TimeDelta& delta) const { + return Time(us_ + delta.InMicroseconds()); + } + Time operator-(const TimeDelta& delta) const { + return Time(us_ - delta.InMicroseconds()); + } + + // Comparison operators + bool operator==(const Time& other) const { + return us_ == other.us_; + } + bool operator!=(const Time& other) const { + return us_ != other.us_; + } + bool operator<(const Time& other) const { + return us_ < other.us_; + } + bool operator<=(const Time& other) const { + return us_ <= other.us_; + } + bool operator>(const Time& other) const { + return us_ > other.us_; + } + bool operator>=(const Time& other) const { + return us_ >= other.us_; + } + + private: + explicit Time(int64_t us) : us_(us) {} + + // Time in microseconds in UTC. + int64_t us_; +}; + +inline Time operator+(const TimeDelta& delta, const Time& time) { + return time + delta; +} + + +// ----------------------------------------------------------------------------- +// TimeTicks +// +// This class represents an abstract time that is most of the time incrementing +// for use in measuring time durations. It is internally represented in +// microseconds. It can not be converted to a human-readable time, but is +// guaranteed not to decrease (if the user changes the computer clock, +// Time::Now() may actually decrease or jump). But note that TimeTicks may +// "stand still", for example if the computer suspended. + +class TimeTicks V8_FINAL { + public: + TimeTicks() : ticks_(0) {} + + // Platform-dependent tick count representing "right now." + // The resolution of this clock is ~1-15ms. Resolution varies depending + // on hardware/operating system configuration. + // This method never returns a null TimeTicks. + static TimeTicks Now(); + + // Returns a platform-dependent high-resolution tick count. Implementation + // is hardware dependent and may or may not return sub-millisecond + // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND + // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED. + // This method never returns a null TimeTicks. + static TimeTicks HighResolutionNow(); + + // Returns true if the high-resolution clock is working on this system. + static bool IsHighResolutionClockWorking(); + + // Returns Linux kernel timestamp for generating profiler events. This method + // returns null TimeTicks if the kernel cannot provide the timestamps (e.g., + // on non-Linux OS or if the kernel module for timestamps is not loaded). + + static TimeTicks KernelTimestampNow(); + static bool KernelTimestampAvailable(); + + // Returns true if this object has not been initialized. + bool IsNull() const { return ticks_ == 0; } + + // Converts to/from internal values. The meaning of the "internal value" is + // completely up to the implementation, so it should be treated as opaque. + static TimeTicks FromInternalValue(int64_t value) { + return TimeTicks(value); + } + int64_t ToInternalValue() const { + return ticks_; + } + + TimeTicks& operator=(const TimeTicks other) { + ticks_ = other.ticks_; + return *this; + } + + // Compute the difference between two times. + TimeDelta operator-(const TimeTicks other) const { + return TimeDelta::FromMicroseconds(ticks_ - other.ticks_); + } + + // Modify by some time delta. + TimeTicks& operator+=(const TimeDelta& delta) { + ticks_ += delta.InMicroseconds(); + return *this; + } + TimeTicks& operator-=(const TimeDelta& delta) { + ticks_ -= delta.InMicroseconds(); + return *this; + } + + // Return a new TimeTicks modified by some delta. + TimeTicks operator+(const TimeDelta& delta) const { + return TimeTicks(ticks_ + delta.InMicroseconds()); + } + TimeTicks operator-(const TimeDelta& delta) const { + return TimeTicks(ticks_ - delta.InMicroseconds()); + } + + // Comparison operators + bool operator==(const TimeTicks& other) const { + return ticks_ == other.ticks_; + } + bool operator!=(const TimeTicks& other) const { + return ticks_ != other.ticks_; + } + bool operator<(const TimeTicks& other) const { + return ticks_ < other.ticks_; + } + bool operator<=(const TimeTicks& other) const { + return ticks_ <= other.ticks_; + } + bool operator>(const TimeTicks& other) const { + return ticks_ > other.ticks_; + } + bool operator>=(const TimeTicks& other) const { + return ticks_ >= other.ticks_; + } + + private: + // Please use Now() to create a new object. This is for internal use + // and testing. Ticks is in microseconds. + explicit TimeTicks(int64_t ticks) : ticks_(ticks) {} + + // Tick count in microseconds. + int64_t ticks_; +}; + +inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) { + return ticks + delta; +} + +} } // namespace v8::base + +#endif // V8_BASE_PLATFORM_TIME_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/qnx-math.h nodejs-0.11.15/deps/v8/src/base/qnx-math.h --- nodejs-0.11.13/deps/v8/src/base/qnx-math.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/qnx-math.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,19 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_QNX_MATH_H_ +#define V8_QBASE_NX_MATH_H_ + +#include <cmath> + +#undef fpclassify +#undef isfinite +#undef isinf +#undef isnan +#undef isnormal +#undef signbit + +using std::lrint; + +#endif // V8_BASE_QNX_MATH_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/safe_conversions.h nodejs-0.11.15/deps/v8/src/base/safe_conversions.h --- nodejs-0.11.13/deps/v8/src/base/safe_conversions.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/safe_conversions.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,67 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Slightly adapted for inclusion in V8. +// Copyright 2014 the V8 project authors. All rights reserved. + +#ifndef V8_BASE_SAFE_CONVERSIONS_H_ +#define V8_BASE_SAFE_CONVERSIONS_H_ + +#include <limits> + +#include "src/base/safe_conversions_impl.h" + +namespace v8 { +namespace base { + +// Convenience function that returns true if the supplied value is in range +// for the destination type. +template <typename Dst, typename Src> +inline bool IsValueInRangeForNumericType(Src value) { + return internal::DstRangeRelationToSrcRange<Dst>(value) == + internal::RANGE_VALID; +} + +// checked_cast<> is analogous to static_cast<> for numeric types, +// except that it CHECKs that the specified numeric conversion will not +// overflow or underflow. NaN source will always trigger a CHECK. +template <typename Dst, typename Src> +inline Dst checked_cast(Src value) { + CHECK(IsValueInRangeForNumericType<Dst>(value)); + return static_cast<Dst>(value); +} + +// saturated_cast<> is analogous to static_cast<> for numeric types, except +// that the specified numeric conversion will saturate rather than overflow or +// underflow. NaN assignment to an integral will trigger a CHECK condition. +template <typename Dst, typename Src> +inline Dst saturated_cast(Src value) { + // Optimization for floating point values, which already saturate. + if (std::numeric_limits<Dst>::is_iec559) + return static_cast<Dst>(value); + + switch (internal::DstRangeRelationToSrcRange<Dst>(value)) { + case internal::RANGE_VALID: + return static_cast<Dst>(value); + + case internal::RANGE_UNDERFLOW: + return std::numeric_limits<Dst>::min(); + + case internal::RANGE_OVERFLOW: + return std::numeric_limits<Dst>::max(); + + // Should fail only on attempting to assign NaN to a saturated integer. + case internal::RANGE_INVALID: + CHECK(false); + return std::numeric_limits<Dst>::max(); + } + + UNREACHABLE(); + return static_cast<Dst>(value); +} + +} // namespace base +} // namespace v8 + +#endif // V8_BASE_SAFE_CONVERSIONS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/safe_conversions_impl.h nodejs-0.11.15/deps/v8/src/base/safe_conversions_impl.h --- nodejs-0.11.13/deps/v8/src/base/safe_conversions_impl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/safe_conversions_impl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,220 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Slightly adapted for inclusion in V8. +// Copyright 2014 the V8 project authors. All rights reserved. + +#ifndef V8_BASE_SAFE_CONVERSIONS_IMPL_H_ +#define V8_BASE_SAFE_CONVERSIONS_IMPL_H_ + +#include <limits> + +#include "src/base/logging.h" +#include "src/base/macros.h" + +namespace v8 { +namespace base { +namespace internal { + +// The std library doesn't provide a binary max_exponent for integers, however +// we can compute one by adding one to the number of non-sign bits. This allows +// for accurate range comparisons between floating point and integer types. +template <typename NumericType> +struct MaxExponent { + static const int value = std::numeric_limits<NumericType>::is_iec559 + ? std::numeric_limits<NumericType>::max_exponent + : (sizeof(NumericType) * 8 + 1 - + std::numeric_limits<NumericType>::is_signed); +}; + +enum IntegerRepresentation { + INTEGER_REPRESENTATION_UNSIGNED, + INTEGER_REPRESENTATION_SIGNED +}; + +// A range for a given nunmeric Src type is contained for a given numeric Dst +// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and +// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true. +// We implement this as template specializations rather than simple static +// comparisons to ensure type correctness in our comparisons. +enum NumericRangeRepresentation { + NUMERIC_RANGE_NOT_CONTAINED, + NUMERIC_RANGE_CONTAINED +}; + +// Helper templates to statically determine if our destination type can contain +// maximum and minimum values represented by the source type. + +template < + typename Dst, + typename Src, + IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed + ? INTEGER_REPRESENTATION_SIGNED + : INTEGER_REPRESENTATION_UNSIGNED, + IntegerRepresentation SrcSign = + std::numeric_limits<Src>::is_signed + ? INTEGER_REPRESENTATION_SIGNED + : INTEGER_REPRESENTATION_UNSIGNED > +struct StaticDstRangeRelationToSrcRange; + +// Same sign: Dst is guaranteed to contain Src only if its range is equal or +// larger. +template <typename Dst, typename Src, IntegerRepresentation Sign> +struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> { + static const NumericRangeRepresentation value = + MaxExponent<Dst>::value >= MaxExponent<Src>::value + ? NUMERIC_RANGE_CONTAINED + : NUMERIC_RANGE_NOT_CONTAINED; +}; + +// Unsigned to signed: Dst is guaranteed to contain source only if its range is +// larger. +template <typename Dst, typename Src> +struct StaticDstRangeRelationToSrcRange<Dst, + Src, + INTEGER_REPRESENTATION_SIGNED, + INTEGER_REPRESENTATION_UNSIGNED> { + static const NumericRangeRepresentation value = + MaxExponent<Dst>::value > MaxExponent<Src>::value + ? NUMERIC_RANGE_CONTAINED + : NUMERIC_RANGE_NOT_CONTAINED; +}; + +// Signed to unsigned: Dst cannot be statically determined to contain Src. +template <typename Dst, typename Src> +struct StaticDstRangeRelationToSrcRange<Dst, + Src, + INTEGER_REPRESENTATION_UNSIGNED, + INTEGER_REPRESENTATION_SIGNED> { + static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED; +}; + +enum RangeConstraint { + RANGE_VALID = 0x0, // Value can be represented by the destination type. + RANGE_UNDERFLOW = 0x1, // Value would overflow. + RANGE_OVERFLOW = 0x2, // Value would underflow. + RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN). +}; + +// Helper function for coercing an int back to a RangeContraint. +inline RangeConstraint GetRangeConstraint(int integer_range_constraint) { + DCHECK(integer_range_constraint >= RANGE_VALID && + integer_range_constraint <= RANGE_INVALID); + return static_cast<RangeConstraint>(integer_range_constraint); +} + +// This function creates a RangeConstraint from an upper and lower bound +// check by taking advantage of the fact that only NaN can be out of range in +// both directions at once. +inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound, + bool is_in_lower_bound) { + return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) | + (is_in_lower_bound ? 0 : RANGE_UNDERFLOW)); +} + +template < + typename Dst, + typename Src, + IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed + ? INTEGER_REPRESENTATION_SIGNED + : INTEGER_REPRESENTATION_UNSIGNED, + IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed + ? INTEGER_REPRESENTATION_SIGNED + : INTEGER_REPRESENTATION_UNSIGNED, + NumericRangeRepresentation DstRange = + StaticDstRangeRelationToSrcRange<Dst, Src>::value > +struct DstRangeRelationToSrcRangeImpl; + +// The following templates are for ranges that must be verified at runtime. We +// split it into checks based on signedness to avoid confusing casts and +// compiler warnings on signed an unsigned comparisons. + +// Dst range is statically determined to contain Src: Nothing to check. +template <typename Dst, + typename Src, + IntegerRepresentation DstSign, + IntegerRepresentation SrcSign> +struct DstRangeRelationToSrcRangeImpl<Dst, + Src, + DstSign, + SrcSign, + NUMERIC_RANGE_CONTAINED> { + static RangeConstraint Check(Src value) { return RANGE_VALID; } +}; + +// Signed to signed narrowing: Both the upper and lower boundaries may be +// exceeded. +template <typename Dst, typename Src> +struct DstRangeRelationToSrcRangeImpl<Dst, + Src, + INTEGER_REPRESENTATION_SIGNED, + INTEGER_REPRESENTATION_SIGNED, + NUMERIC_RANGE_NOT_CONTAINED> { + static RangeConstraint Check(Src value) { + return std::numeric_limits<Dst>::is_iec559 + ? GetRangeConstraint(value <= std::numeric_limits<Dst>::max(), + value >= -std::numeric_limits<Dst>::max()) + : GetRangeConstraint(value <= std::numeric_limits<Dst>::max(), + value >= std::numeric_limits<Dst>::min()); + } +}; + +// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded. +template <typename Dst, typename Src> +struct DstRangeRelationToSrcRangeImpl<Dst, + Src, + INTEGER_REPRESENTATION_UNSIGNED, + INTEGER_REPRESENTATION_UNSIGNED, + NUMERIC_RANGE_NOT_CONTAINED> { + static RangeConstraint Check(Src value) { + return GetRangeConstraint(value <= std::numeric_limits<Dst>::max(), true); + } +}; + +// Unsigned to signed: The upper boundary may be exceeded. +template <typename Dst, typename Src> +struct DstRangeRelationToSrcRangeImpl<Dst, + Src, + INTEGER_REPRESENTATION_SIGNED, + INTEGER_REPRESENTATION_UNSIGNED, + NUMERIC_RANGE_NOT_CONTAINED> { + static RangeConstraint Check(Src value) { + return sizeof(Dst) > sizeof(Src) + ? RANGE_VALID + : GetRangeConstraint( + value <= static_cast<Src>(std::numeric_limits<Dst>::max()), + true); + } +}; + +// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst, +// and any negative value exceeds the lower boundary. +template <typename Dst, typename Src> +struct DstRangeRelationToSrcRangeImpl<Dst, + Src, + INTEGER_REPRESENTATION_UNSIGNED, + INTEGER_REPRESENTATION_SIGNED, + NUMERIC_RANGE_NOT_CONTAINED> { + static RangeConstraint Check(Src value) { + return (MaxExponent<Dst>::value >= MaxExponent<Src>::value) + ? GetRangeConstraint(true, value >= static_cast<Src>(0)) + : GetRangeConstraint( + value <= static_cast<Src>(std::numeric_limits<Dst>::max()), + value >= static_cast<Src>(0)); + } +}; + +template <typename Dst, typename Src> +inline RangeConstraint DstRangeRelationToSrcRange(Src value) { + // Both source and destination must be numeric. + STATIC_ASSERT(std::numeric_limits<Src>::is_specialized); + STATIC_ASSERT(std::numeric_limits<Dst>::is_specialized); + return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value); +} + +} // namespace internal +} // namespace base +} // namespace v8 + +#endif // V8_BASE_SAFE_CONVERSIONS_IMPL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/safe_math.h nodejs-0.11.15/deps/v8/src/base/safe_math.h --- nodejs-0.11.13/deps/v8/src/base/safe_math.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/safe_math.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,276 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Slightly adapted for inclusion in V8. +// Copyright 2014 the V8 project authors. All rights reserved. + +#ifndef V8_BASE_SAFE_MATH_H_ +#define V8_BASE_SAFE_MATH_H_ + +#include "src/base/safe_math_impl.h" + +namespace v8 { +namespace base { +namespace internal { + +// CheckedNumeric implements all the logic and operators for detecting integer +// boundary conditions such as overflow, underflow, and invalid conversions. +// The CheckedNumeric type implicitly converts from floating point and integer +// data types, and contains overloads for basic arithmetic operations (i.e.: +, +// -, *, /, %). +// +// The following methods convert from CheckedNumeric to standard numeric values: +// IsValid() - Returns true if the underlying numeric value is valid (i.e. has +// has not wrapped and is not the result of an invalid conversion). +// ValueOrDie() - Returns the underlying value. If the state is not valid this +// call will crash on a CHECK. +// ValueOrDefault() - Returns the current value, or the supplied default if the +// state is not valid. +// ValueFloating() - Returns the underlying floating point value (valid only +// only for floating point CheckedNumeric types). +// +// Bitwise operations are explicitly not supported, because correct +// handling of some cases (e.g. sign manipulation) is ambiguous. Comparison +// operations are explicitly not supported because they could result in a crash +// on a CHECK condition. You should use patterns like the following for these +// operations: +// Bitwise operation: +// CheckedNumeric<int> checked_int = untrusted_input_value; +// int x = checked_int.ValueOrDefault(0) | kFlagValues; +// Comparison: +// CheckedNumeric<size_t> checked_size; +// CheckedNumeric<int> checked_size = untrusted_input_value; +// checked_size = checked_size + HEADER LENGTH; +// if (checked_size.IsValid() && checked_size.ValueOrDie() < buffer_size) +// Do stuff... +template <typename T> +class CheckedNumeric { + public: + typedef T type; + + CheckedNumeric() {} + + // Copy constructor. + template <typename Src> + CheckedNumeric(const CheckedNumeric<Src>& rhs) + : state_(rhs.ValueUnsafe(), rhs.validity()) {} + + template <typename Src> + CheckedNumeric(Src value, RangeConstraint validity) + : state_(value, validity) {} + + // This is not an explicit constructor because we implicitly upgrade regular + // numerics to CheckedNumerics to make them easier to use. + template <typename Src> + CheckedNumeric(Src value) // NOLINT + : state_(value) { + // Argument must be numeric. + STATIC_ASSERT(std::numeric_limits<Src>::is_specialized); + } + + // IsValid() is the public API to test if a CheckedNumeric is currently valid. + bool IsValid() const { return validity() == RANGE_VALID; } + + // ValueOrDie() The primary accessor for the underlying value. If the current + // state is not valid it will CHECK and crash. + T ValueOrDie() const { + CHECK(IsValid()); + return state_.value(); + } + + // ValueOrDefault(T default_value) A convenience method that returns the + // current value if the state is valid, and the supplied default_value for + // any other state. + T ValueOrDefault(T default_value) const { + return IsValid() ? state_.value() : default_value; + } + + // ValueFloating() - Since floating point values include their validity state, + // we provide an easy method for extracting them directly, without a risk of + // crashing on a CHECK. + T ValueFloating() const { + // Argument must be a floating-point value. + STATIC_ASSERT(std::numeric_limits<T>::is_iec559); + return CheckedNumeric<T>::cast(*this).ValueUnsafe(); + } + + // validity() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now for + // tests and to avoid a big matrix of friend operator overloads. But the + // values it returns are likely to change in the future. + // Returns: current validity state (i.e. valid, overflow, underflow, nan). + // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for + // saturation/wrapping so we can expose this state consistently and implement + // saturated arithmetic. + RangeConstraint validity() const { return state_.validity(); } + + // ValueUnsafe() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now + // for tests and to avoid a big matrix of friend operator overloads. But the + // values it returns are likely to change in the future. + // Returns: the raw numeric value, regardless of the current state. + // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for + // saturation/wrapping so we can expose this state consistently and implement + // saturated arithmetic. + T ValueUnsafe() const { return state_.value(); } + + // Prototypes for the supported arithmetic operator overloads. + template <typename Src> CheckedNumeric& operator+=(Src rhs); + template <typename Src> CheckedNumeric& operator-=(Src rhs); + template <typename Src> CheckedNumeric& operator*=(Src rhs); + template <typename Src> CheckedNumeric& operator/=(Src rhs); + template <typename Src> CheckedNumeric& operator%=(Src rhs); + + CheckedNumeric operator-() const { + RangeConstraint validity; + T value = CheckedNeg(state_.value(), &validity); + // Negation is always valid for floating point. + if (std::numeric_limits<T>::is_iec559) + return CheckedNumeric<T>(value); + + validity = GetRangeConstraint(state_.validity() | validity); + return CheckedNumeric<T>(value, validity); + } + + CheckedNumeric Abs() const { + RangeConstraint validity; + T value = CheckedAbs(state_.value(), &validity); + // Absolute value is always valid for floating point. + if (std::numeric_limits<T>::is_iec559) + return CheckedNumeric<T>(value); + + validity = GetRangeConstraint(state_.validity() | validity); + return CheckedNumeric<T>(value, validity); + } + + CheckedNumeric& operator++() { + *this += 1; + return *this; + } + + CheckedNumeric operator++(int) { + CheckedNumeric value = *this; + *this += 1; + return value; + } + + CheckedNumeric& operator--() { + *this -= 1; + return *this; + } + + CheckedNumeric operator--(int) { + CheckedNumeric value = *this; + *this -= 1; + return value; + } + + // These static methods behave like a convenience cast operator targeting + // the desired CheckedNumeric type. As an optimization, a reference is + // returned when Src is the same type as T. + template <typename Src> + static CheckedNumeric<T> cast( + Src u, + typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type = + 0) { + return u; + } + + template <typename Src> + static CheckedNumeric<T> cast( + const CheckedNumeric<Src>& u, + typename enable_if<!is_same<Src, T>::value, int>::type = 0) { + return u; + } + + static const CheckedNumeric<T>& cast(const CheckedNumeric<T>& u) { return u; } + + private: + CheckedNumericState<T> state_; +}; + +// This is the boilerplate for the standard arithmetic operator overloads. A +// macro isn't the prettiest solution, but it beats rewriting these five times. +// Some details worth noting are: +// * We apply the standard arithmetic promotions. +// * We skip range checks for floating points. +// * We skip range checks for destination integers with sufficient range. +// TODO(jschuh): extract these out into templates. +#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP) \ + /* Binary arithmetic operator for CheckedNumerics of the same type. */ \ + template <typename T> \ + CheckedNumeric<typename ArithmeticPromotion<T>::type> operator OP( \ + const CheckedNumeric<T>& lhs, const CheckedNumeric<T>& rhs) { \ + typedef typename ArithmeticPromotion<T>::type Promotion; \ + /* Floating point always takes the fast path */ \ + if (std::numeric_limits<T>::is_iec559) \ + return CheckedNumeric<T>(lhs.ValueUnsafe() OP rhs.ValueUnsafe()); \ + if (IsIntegerArithmeticSafe<Promotion, T, T>::value) \ + return CheckedNumeric<Promotion>( \ + lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \ + GetRangeConstraint(rhs.validity() | lhs.validity())); \ + RangeConstraint validity = RANGE_VALID; \ + T result = Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()), \ + static_cast<Promotion>(rhs.ValueUnsafe()), \ + &validity); \ + return CheckedNumeric<Promotion>( \ + result, \ + GetRangeConstraint(validity | lhs.validity() | rhs.validity())); \ + } \ + /* Assignment arithmetic operator implementation from CheckedNumeric. */ \ + template <typename T> \ + template <typename Src> \ + CheckedNumeric<T>& CheckedNumeric<T>::operator COMPOUND_OP(Src rhs) { \ + *this = CheckedNumeric<T>::cast(*this) OP CheckedNumeric<Src>::cast(rhs); \ + return *this; \ + } \ + /* Binary arithmetic operator for CheckedNumeric of different type. */ \ + template <typename T, typename Src> \ + CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \ + const CheckedNumeric<Src>& lhs, const CheckedNumeric<T>& rhs) { \ + typedef typename ArithmeticPromotion<T, Src>::type Promotion; \ + if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \ + return CheckedNumeric<Promotion>( \ + lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \ + GetRangeConstraint(rhs.validity() | lhs.validity())); \ + return CheckedNumeric<Promotion>::cast(lhs) \ + OP CheckedNumeric<Promotion>::cast(rhs); \ + } \ + /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \ + template <typename T, typename Src> \ + CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \ + const CheckedNumeric<T>& lhs, Src rhs) { \ + typedef typename ArithmeticPromotion<T, Src>::type Promotion; \ + if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \ + return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs, \ + lhs.validity()); \ + return CheckedNumeric<Promotion>::cast(lhs) \ + OP CheckedNumeric<Promotion>::cast(rhs); \ + } \ + /* Binary arithmetic operator for right numeric and left CheckedNumeric. */ \ + template <typename T, typename Src> \ + CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \ + Src lhs, const CheckedNumeric<T>& rhs) { \ + typedef typename ArithmeticPromotion<T, Src>::type Promotion; \ + if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \ + return CheckedNumeric<Promotion>(lhs OP rhs.ValueUnsafe(), \ + rhs.validity()); \ + return CheckedNumeric<Promotion>::cast(lhs) \ + OP CheckedNumeric<Promotion>::cast(rhs); \ + } + +BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, += ) +BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -= ) +BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *= ) +BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /= ) +BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %= ) + +#undef BASE_NUMERIC_ARITHMETIC_OPERATORS + +} // namespace internal + +using internal::CheckedNumeric; + +} // namespace base +} // namespace v8 + +#endif // V8_BASE_SAFE_MATH_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/safe_math_impl.h nodejs-0.11.15/deps/v8/src/base/safe_math_impl.h --- nodejs-0.11.13/deps/v8/src/base/safe_math_impl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/safe_math_impl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,531 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Slightly adapted for inclusion in V8. +// Copyright 2014 the V8 project authors. All rights reserved. + +#ifndef V8_BASE_SAFE_MATH_IMPL_H_ +#define V8_BASE_SAFE_MATH_IMPL_H_ + +#include <stdint.h> + +#include <cmath> +#include <cstdlib> +#include <limits> + +#include "src/base/macros.h" +#include "src/base/safe_conversions.h" + +namespace v8 { +namespace base { +namespace internal { + + +// From Chromium's base/template_util.h: + +template<class T, T v> +struct integral_constant { + static const T value = v; + typedef T value_type; + typedef integral_constant<T, v> type; +}; + +template <class T, T v> const T integral_constant<T, v>::value; + +typedef integral_constant<bool, true> true_type; +typedef integral_constant<bool, false> false_type; + +template <class T, class U> struct is_same : public false_type {}; +template <class T> struct is_same<T, T> : true_type {}; + +template<bool B, class T = void> +struct enable_if {}; + +template<class T> +struct enable_if<true, T> { typedef T type; }; + +// </template_util.h> + + +// Everything from here up to the floating point operations is portable C++, +// but it may not be fast. This code could be split based on +// platform/architecture and replaced with potentially faster implementations. + +// Integer promotion templates used by the portable checked integer arithmetic. +template <size_t Size, bool IsSigned> +struct IntegerForSizeAndSign; +template <> +struct IntegerForSizeAndSign<1, true> { + typedef int8_t type; +}; +template <> +struct IntegerForSizeAndSign<1, false> { + typedef uint8_t type; +}; +template <> +struct IntegerForSizeAndSign<2, true> { + typedef int16_t type; +}; +template <> +struct IntegerForSizeAndSign<2, false> { + typedef uint16_t type; +}; +template <> +struct IntegerForSizeAndSign<4, true> { + typedef int32_t type; +}; +template <> +struct IntegerForSizeAndSign<4, false> { + typedef uint32_t type; +}; +template <> +struct IntegerForSizeAndSign<8, true> { + typedef int64_t type; +}; +template <> +struct IntegerForSizeAndSign<8, false> { + typedef uint64_t type; +}; + +// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to +// support 128-bit math, then the ArithmeticPromotion template below will need +// to be updated (or more likely replaced with a decltype expression). + +template <typename Integer> +struct UnsignedIntegerForSize { + typedef typename enable_if< + std::numeric_limits<Integer>::is_integer, + typename IntegerForSizeAndSign<sizeof(Integer), false>::type>::type type; +}; + +template <typename Integer> +struct SignedIntegerForSize { + typedef typename enable_if< + std::numeric_limits<Integer>::is_integer, + typename IntegerForSizeAndSign<sizeof(Integer), true>::type>::type type; +}; + +template <typename Integer> +struct TwiceWiderInteger { + typedef typename enable_if< + std::numeric_limits<Integer>::is_integer, + typename IntegerForSizeAndSign< + sizeof(Integer) * 2, + std::numeric_limits<Integer>::is_signed>::type>::type type; +}; + +template <typename Integer> +struct PositionOfSignBit { + static const typename enable_if<std::numeric_limits<Integer>::is_integer, + size_t>::type value = 8 * sizeof(Integer) - 1; +}; + +// Helper templates for integer manipulations. + +template <typename T> +bool HasSignBit(T x) { + // Cast to unsigned since right shift on signed is undefined. + return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >> + PositionOfSignBit<T>::value); +} + +// This wrapper undoes the standard integer promotions. +template <typename T> +T BinaryComplement(T x) { + return ~x; +} + +// Here are the actual portable checked integer math implementations. +// TODO(jschuh): Break this code out from the enable_if pattern and find a clean +// way to coalesce things into the CheckedNumericState specializations below. + +template <typename T> +typename enable_if<std::numeric_limits<T>::is_integer, T>::type +CheckedAdd(T x, T y, RangeConstraint* validity) { + // Since the value of x+y is undefined if we have a signed type, we compute + // it using the unsigned type of the same size. + typedef typename UnsignedIntegerForSize<T>::type UnsignedDst; + UnsignedDst ux = static_cast<UnsignedDst>(x); + UnsignedDst uy = static_cast<UnsignedDst>(y); + UnsignedDst uresult = ux + uy; + // Addition is valid if the sign of (x + y) is equal to either that of x or + // that of y. + if (std::numeric_limits<T>::is_signed) { + if (HasSignBit(BinaryComplement((uresult ^ ux) & (uresult ^ uy)))) + *validity = RANGE_VALID; + else // Direction of wrap is inverse of result sign. + *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW; + + } else { // Unsigned is either valid or overflow. + *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW; + } + return static_cast<T>(uresult); +} + +template <typename T> +typename enable_if<std::numeric_limits<T>::is_integer, T>::type +CheckedSub(T x, T y, RangeConstraint* validity) { + // Since the value of x+y is undefined if we have a signed type, we compute + // it using the unsigned type of the same size. + typedef typename UnsignedIntegerForSize<T>::type UnsignedDst; + UnsignedDst ux = static_cast<UnsignedDst>(x); + UnsignedDst uy = static_cast<UnsignedDst>(y); + UnsignedDst uresult = ux - uy; + // Subtraction is valid if either x and y have same sign, or (x-y) and x have + // the same sign. + if (std::numeric_limits<T>::is_signed) { + if (HasSignBit(BinaryComplement((uresult ^ ux) & (ux ^ uy)))) + *validity = RANGE_VALID; + else // Direction of wrap is inverse of result sign. + *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW; + + } else { // Unsigned is either valid or underflow. + *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW; + } + return static_cast<T>(uresult); +} + +// Integer multiplication is a bit complicated. In the fast case we just +// we just promote to a twice wider type, and range check the result. In the +// slow case we need to manually check that the result won't be truncated by +// checking with division against the appropriate bound. +template <typename T> +typename enable_if< + std::numeric_limits<T>::is_integer && sizeof(T) * 2 <= sizeof(uintmax_t), + T>::type +CheckedMul(T x, T y, RangeConstraint* validity) { + typedef typename TwiceWiderInteger<T>::type IntermediateType; + IntermediateType tmp = + static_cast<IntermediateType>(x) * static_cast<IntermediateType>(y); + *validity = DstRangeRelationToSrcRange<T>(tmp); + return static_cast<T>(tmp); +} + +template <typename T> +typename enable_if<std::numeric_limits<T>::is_integer && + std::numeric_limits<T>::is_signed && + (sizeof(T) * 2 > sizeof(uintmax_t)), + T>::type +CheckedMul(T x, T y, RangeConstraint* validity) { + // if either side is zero then the result will be zero. + if (!(x || y)) { + return RANGE_VALID; + + } else if (x > 0) { + if (y > 0) + *validity = + x <= std::numeric_limits<T>::max() / y ? RANGE_VALID : RANGE_OVERFLOW; + else + *validity = y >= std::numeric_limits<T>::min() / x ? RANGE_VALID + : RANGE_UNDERFLOW; + + } else { + if (y > 0) + *validity = x >= std::numeric_limits<T>::min() / y ? RANGE_VALID + : RANGE_UNDERFLOW; + else + *validity = + y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW; + } + + return x * y; +} + +template <typename T> +typename enable_if<std::numeric_limits<T>::is_integer && + !std::numeric_limits<T>::is_signed && + (sizeof(T) * 2 > sizeof(uintmax_t)), + T>::type +CheckedMul(T x, T y, RangeConstraint* validity) { + *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y) + ? RANGE_VALID + : RANGE_OVERFLOW; + return x * y; +} + +// Division just requires a check for an invalid negation on signed min/-1. +template <typename T> +T CheckedDiv( + T x, + T y, + RangeConstraint* validity, + typename enable_if<std::numeric_limits<T>::is_integer, int>::type = 0) { + if (std::numeric_limits<T>::is_signed && x == std::numeric_limits<T>::min() && + y == static_cast<T>(-1)) { + *validity = RANGE_OVERFLOW; + return std::numeric_limits<T>::min(); + } + + *validity = RANGE_VALID; + return x / y; +} + +template <typename T> +typename enable_if< + std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed, + T>::type +CheckedMod(T x, T y, RangeConstraint* validity) { + *validity = y > 0 ? RANGE_VALID : RANGE_INVALID; + return x % y; +} + +template <typename T> +typename enable_if< + std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, + T>::type +CheckedMod(T x, T y, RangeConstraint* validity) { + *validity = RANGE_VALID; + return x % y; +} + +template <typename T> +typename enable_if< + std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed, + T>::type +CheckedNeg(T value, RangeConstraint* validity) { + *validity = + value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW; + // The negation of signed min is min, so catch that one. + return -value; +} + +template <typename T> +typename enable_if< + std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, + T>::type +CheckedNeg(T value, RangeConstraint* validity) { + // The only legal unsigned negation is zero. + *validity = value ? RANGE_UNDERFLOW : RANGE_VALID; + return static_cast<T>( + -static_cast<typename SignedIntegerForSize<T>::type>(value)); +} + +template <typename T> +typename enable_if< + std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed, + T>::type +CheckedAbs(T value, RangeConstraint* validity) { + *validity = + value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW; + return std::abs(value); +} + +template <typename T> +typename enable_if< + std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, + T>::type +CheckedAbs(T value, RangeConstraint* validity) { + // Absolute value of a positive is just its identiy. + *validity = RANGE_VALID; + return value; +} + +// These are the floating point stubs that the compiler needs to see. Only the +// negation operation is ever called. +#define BASE_FLOAT_ARITHMETIC_STUBS(NAME) \ + template <typename T> \ + typename enable_if<std::numeric_limits<T>::is_iec559, T>::type \ + Checked##NAME(T, T, RangeConstraint*) { \ + UNREACHABLE(); \ + return 0; \ + } + +BASE_FLOAT_ARITHMETIC_STUBS(Add) +BASE_FLOAT_ARITHMETIC_STUBS(Sub) +BASE_FLOAT_ARITHMETIC_STUBS(Mul) +BASE_FLOAT_ARITHMETIC_STUBS(Div) +BASE_FLOAT_ARITHMETIC_STUBS(Mod) + +#undef BASE_FLOAT_ARITHMETIC_STUBS + +template <typename T> +typename enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg( + T value, + RangeConstraint*) { + return -value; +} + +template <typename T> +typename enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs( + T value, + RangeConstraint*) { + return std::abs(value); +} + +// Floats carry around their validity state with them, but integers do not. So, +// we wrap the underlying value in a specialization in order to hide that detail +// and expose an interface via accessors. +enum NumericRepresentation { + NUMERIC_INTEGER, + NUMERIC_FLOATING, + NUMERIC_UNKNOWN +}; + +template <typename NumericType> +struct GetNumericRepresentation { + static const NumericRepresentation value = + std::numeric_limits<NumericType>::is_integer + ? NUMERIC_INTEGER + : (std::numeric_limits<NumericType>::is_iec559 ? NUMERIC_FLOATING + : NUMERIC_UNKNOWN); +}; + +template <typename T, NumericRepresentation type = + GetNumericRepresentation<T>::value> +class CheckedNumericState {}; + +// Integrals require quite a bit of additional housekeeping to manage state. +template <typename T> +class CheckedNumericState<T, NUMERIC_INTEGER> { + private: + T value_; + RangeConstraint validity_; + + public: + template <typename Src, NumericRepresentation type> + friend class CheckedNumericState; + + CheckedNumericState() : value_(0), validity_(RANGE_VALID) {} + + template <typename Src> + CheckedNumericState(Src value, RangeConstraint validity) + : value_(value), + validity_(GetRangeConstraint(validity | + DstRangeRelationToSrcRange<T>(value))) { + // Argument must be numeric. + STATIC_ASSERT(std::numeric_limits<Src>::is_specialized); + } + + // Copy constructor. + template <typename Src> + CheckedNumericState(const CheckedNumericState<Src>& rhs) + : value_(static_cast<T>(rhs.value())), + validity_(GetRangeConstraint( + rhs.validity() | DstRangeRelationToSrcRange<T>(rhs.value()))) {} + + template <typename Src> + explicit CheckedNumericState( + Src value, + typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type = + 0) + : value_(static_cast<T>(value)), + validity_(DstRangeRelationToSrcRange<T>(value)) {} + + RangeConstraint validity() const { return validity_; } + T value() const { return value_; } +}; + +// Floating points maintain their own validity, but need translation wrappers. +template <typename T> +class CheckedNumericState<T, NUMERIC_FLOATING> { + private: + T value_; + + public: + template <typename Src, NumericRepresentation type> + friend class CheckedNumericState; + + CheckedNumericState() : value_(0.0) {} + + template <typename Src> + CheckedNumericState( + Src value, + RangeConstraint validity, + typename enable_if<std::numeric_limits<Src>::is_integer, int>::type = 0) { + switch (DstRangeRelationToSrcRange<T>(value)) { + case RANGE_VALID: + value_ = static_cast<T>(value); + break; + + case RANGE_UNDERFLOW: + value_ = -std::numeric_limits<T>::infinity(); + break; + + case RANGE_OVERFLOW: + value_ = std::numeric_limits<T>::infinity(); + break; + + case RANGE_INVALID: + value_ = std::numeric_limits<T>::quiet_NaN(); + break; + } + } + + template <typename Src> + explicit CheckedNumericState( + Src value, + typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type = + 0) + : value_(static_cast<T>(value)) {} + + // Copy constructor. + template <typename Src> + CheckedNumericState(const CheckedNumericState<Src>& rhs) + : value_(static_cast<T>(rhs.value())) {} + + RangeConstraint validity() const { + return GetRangeConstraint(value_ <= std::numeric_limits<T>::max(), + value_ >= -std::numeric_limits<T>::max()); + } + T value() const { return value_; } +}; + +// For integers less than 128-bit and floats 32-bit or larger, we can distil +// C/C++ arithmetic promotions down to two simple rules: +// 1. The type with the larger maximum exponent always takes precedence. +// 2. The resulting type must be promoted to at least an int. +// The following template specializations implement that promotion logic. +enum ArithmeticPromotionCategory { + LEFT_PROMOTION, + RIGHT_PROMOTION, + DEFAULT_PROMOTION +}; + +template <typename Lhs, + typename Rhs = Lhs, + ArithmeticPromotionCategory Promotion = + (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value) + ? (MaxExponent<Lhs>::value > MaxExponent<int>::value + ? LEFT_PROMOTION + : DEFAULT_PROMOTION) + : (MaxExponent<Rhs>::value > MaxExponent<int>::value + ? RIGHT_PROMOTION + : DEFAULT_PROMOTION) > +struct ArithmeticPromotion; + +template <typename Lhs, typename Rhs> +struct ArithmeticPromotion<Lhs, Rhs, LEFT_PROMOTION> { + typedef Lhs type; +}; + +template <typename Lhs, typename Rhs> +struct ArithmeticPromotion<Lhs, Rhs, RIGHT_PROMOTION> { + typedef Rhs type; +}; + +template <typename Lhs, typename Rhs> +struct ArithmeticPromotion<Lhs, Rhs, DEFAULT_PROMOTION> { + typedef int type; +}; + +// We can statically check if operations on the provided types can wrap, so we +// can skip the checked operations if they're not needed. So, for an integer we +// care if the destination type preserves the sign and is twice the width of +// the source. +template <typename T, typename Lhs, typename Rhs> +struct IsIntegerArithmeticSafe { + static const bool value = !std::numeric_limits<T>::is_iec559 && + StaticDstRangeRelationToSrcRange<T, Lhs>::value == + NUMERIC_RANGE_CONTAINED && + sizeof(T) >= (2 * sizeof(Lhs)) && + StaticDstRangeRelationToSrcRange<T, Rhs>::value != + NUMERIC_RANGE_CONTAINED && + sizeof(T) >= (2 * sizeof(Rhs)); +}; + +} // namespace internal +} // namespace base +} // namespace v8 + +#endif // V8_BASE_SAFE_MATH_IMPL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/utils/random-number-generator.cc nodejs-0.11.15/deps/v8/src/base/utils/random-number-generator.cc --- nodejs-0.11.13/deps/v8/src/base/utils/random-number-generator.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/utils/random-number-generator.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,131 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/utils/random-number-generator.h" + +#include <stdio.h> +#include <stdlib.h> + +#include <new> + +#include "src/base/macros.h" +#include "src/base/platform/mutex.h" +#include "src/base/platform/time.h" + +namespace v8 { +namespace base { + +static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER; +static RandomNumberGenerator::EntropySource entropy_source = NULL; + + +// static +void RandomNumberGenerator::SetEntropySource(EntropySource source) { + LockGuard<Mutex> lock_guard(entropy_mutex.Pointer()); + entropy_source = source; +} + + +RandomNumberGenerator::RandomNumberGenerator() { + // Check if embedder supplied an entropy source. + { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer()); + if (entropy_source != NULL) { + int64_t seed; + if (entropy_source(reinterpret_cast<unsigned char*>(&seed), + sizeof(seed))) { + SetSeed(seed); + return; + } + } + } + +#if V8_OS_CYGWIN || V8_OS_WIN + // Use rand_s() to gather entropy on Windows. See: + // https://code.google.com/p/v8/issues/detail?id=2905 + unsigned first_half, second_half; + errno_t result = rand_s(&first_half); + DCHECK_EQ(0, result); + result = rand_s(&second_half); + DCHECK_EQ(0, result); + SetSeed((static_cast<int64_t>(first_half) << 32) + second_half); +#else + // Gather entropy from /dev/urandom if available. + FILE* fp = fopen("/dev/urandom", "rb"); + if (fp != NULL) { + int64_t seed; + size_t n = fread(&seed, sizeof(seed), 1, fp); + fclose(fp); + if (n == 1) { + SetSeed(seed); + return; + } + } + + // We cannot assume that random() or rand() were seeded + // properly, so instead of relying on random() or rand(), + // we just seed our PRNG using timing data as fallback. + // This is weak entropy, but it's sufficient, because + // it is the responsibility of the embedder to install + // an entropy source using v8::V8::SetEntropySource(), + // which provides reasonable entropy, see: + // https://code.google.com/p/v8/issues/detail?id=2905 + int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24; + seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16; + seed ^= TimeTicks::Now().ToInternalValue() << 8; + SetSeed(seed); +#endif // V8_OS_CYGWIN || V8_OS_WIN +} + + +int RandomNumberGenerator::NextInt(int max) { + DCHECK_LE(0, max); + + // Fast path if max is a power of 2. + if (IS_POWER_OF_TWO(max)) { + return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31); + } + + while (true) { + int rnd = Next(31); + int val = rnd % max; + if (rnd - val + (max - 1) >= 0) { + return val; + } + } +} + + +double RandomNumberGenerator::NextDouble() { + return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) / + static_cast<double>(static_cast<int64_t>(1) << 53); +} + + +void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) { + for (size_t n = 0; n < buflen; ++n) { + static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8)); + } +} + + +int RandomNumberGenerator::Next(int bits) { + DCHECK_LT(0, bits); + DCHECK_GE(32, bits); + // Do unsigned multiplication, which has the intended modulo semantics, while + // signed multiplication would expose undefined behavior. + uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier; + // Assigning a uint64_t to an int64_t is implementation defined, but this + // should be OK. Use a static_cast to explicitly state that we know what we're + // doing. (Famous last words...) + int64_t seed = static_cast<int64_t>((product + kAddend) & kMask); + seed_ = seed; + return static_cast<int>(seed >> (48 - bits)); +} + + +void RandomNumberGenerator::SetSeed(int64_t seed) { + seed_ = (seed ^ kMultiplier) & kMask; +} + +} } // namespace v8::base diff -Nru nodejs-0.11.13/deps/v8/src/base/utils/random-number-generator.h nodejs-0.11.15/deps/v8/src/base/utils/random-number-generator.h --- nodejs-0.11.13/deps/v8/src/base/utils/random-number-generator.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/utils/random-number-generator.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,89 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_ +#define V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_ + +#include "src/base/macros.h" + +namespace v8 { +namespace base { + +// ----------------------------------------------------------------------------- +// RandomNumberGenerator +// +// This class is used to generate a stream of pseudorandom numbers. The class +// uses a 48-bit seed, which is modified using a linear congruential formula. +// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.) +// If two instances of RandomNumberGenerator are created with the same seed, and +// the same sequence of method calls is made for each, they will generate and +// return identical sequences of numbers. +// This class uses (probably) weak entropy by default, but it's sufficient, +// because it is the responsibility of the embedder to install an entropy source +// using v8::V8::SetEntropySource(), which provides reasonable entropy, see: +// https://code.google.com/p/v8/issues/detail?id=2905 +// This class is neither reentrant nor threadsafe. + +class RandomNumberGenerator V8_FINAL { + public: + // EntropySource is used as a callback function when V8 needs a source of + // entropy. + typedef bool (*EntropySource)(unsigned char* buffer, size_t buflen); + static void SetEntropySource(EntropySource entropy_source); + + RandomNumberGenerator(); + explicit RandomNumberGenerator(int64_t seed) { SetSeed(seed); } + + // Returns the next pseudorandom, uniformly distributed int value from this + // random number generator's sequence. The general contract of |NextInt()| is + // that one int value is pseudorandomly generated and returned. + // All 2^32 possible integer values are produced with (approximately) equal + // probability. + V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT { + return Next(32); + } + + // Returns a pseudorandom, uniformly distributed int value between 0 + // (inclusive) and the specified max value (exclusive), drawn from this random + // number generator's sequence. The general contract of |NextInt(int)| is that + // one int value in the specified range is pseudorandomly generated and + // returned. All max possible int values are produced with (approximately) + // equal probability. + int NextInt(int max) V8_WARN_UNUSED_RESULT; + + // Returns the next pseudorandom, uniformly distributed boolean value from + // this random number generator's sequence. The general contract of + // |NextBoolean()| is that one boolean value is pseudorandomly generated and + // returned. The values true and false are produced with (approximately) equal + // probability. + V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT { + return Next(1) != 0; + } + + // Returns the next pseudorandom, uniformly distributed double value between + // 0.0 and 1.0 from this random number generator's sequence. + // The general contract of |NextDouble()| is that one double value, chosen + // (approximately) uniformly from the range 0.0 (inclusive) to 1.0 + // (exclusive), is pseudorandomly generated and returned. + double NextDouble() V8_WARN_UNUSED_RESULT; + + // Fills the elements of a specified array of bytes with random numbers. + void NextBytes(void* buffer, size_t buflen); + + // Override the current ssed. + void SetSeed(int64_t seed); + + private: + static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d); + static const int64_t kAddend = 0xb; + static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff); + + int Next(int bits) V8_WARN_UNUSED_RESULT; + + int64_t seed_; +}; + +} } // namespace v8::base + +#endif // V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/win32-headers.h nodejs-0.11.15/deps/v8/src/base/win32-headers.h --- nodejs-0.11.13/deps/v8/src/base/win32-headers.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/win32-headers.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,79 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_WIN32_HEADERS_H_ +#define V8_BASE_WIN32_HEADERS_H_ + +#ifndef WIN32_LEAN_AND_MEAN +// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI. +#define WIN32_LEAN_AND_MEAN +#endif +#ifndef NOMINMAX +#define NOMINMAX +#endif +#ifndef NOKERNEL +#define NOKERNEL +#endif +#ifndef NOUSER +#define NOUSER +#endif +#ifndef NOSERVICE +#define NOSERVICE +#endif +#ifndef NOSOUND +#define NOSOUND +#endif +#ifndef NOMCX +#define NOMCX +#endif +// Require Windows XP or higher (this is required for the RtlCaptureContext +// function to be present). +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x501 +#endif + +#include <windows.h> + +#include <mmsystem.h> // For timeGetTime(). +#include <signal.h> // For raise(). +#include <time.h> // For LocalOffset() implementation. +#ifdef __MINGW32__ +// Require Windows XP or higher when compiling with MinGW. This is for MinGW +// header files to expose getaddrinfo. +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x501 +#endif // __MINGW32__ +#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR) +#include <dbghelp.h> // For SymLoadModule64 and al. +#include <errno.h> // For STRUNCATE +#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR) +#include <limits.h> // For INT_MAX and al. +#include <tlhelp32.h> // For Module32First and al. + +// These additional WIN32 includes have to be right here as the #undef's below +// makes it impossible to have them elsewhere. +#include <winsock2.h> +#include <ws2tcpip.h> +#ifndef __MINGW32__ +#include <wspiapi.h> +#endif // __MINGW32__ +#include <process.h> // For _beginthreadex(). +#include <stdlib.h> + +#undef VOID +#undef DELETE +#undef IN +#undef THIS +#undef CONST +#undef NAN +#undef UNKNOWN +#undef NONE +#undef ANY +#undef IGNORE +#undef STRICT +#undef GetObject +#undef CreateSemaphore +#undef Yield + +#endif // V8_BASE_WIN32_HEADERS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/base/win32-math.cc nodejs-0.11.15/deps/v8/src/base/win32-math.cc --- nodejs-0.11.13/deps/v8/src/base/win32-math.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/win32-math.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,82 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please +// refer to The Open Group Base Specification for specification of the correct +// semantics for these functions. +// (http://www.opengroup.org/onlinepubs/000095399/) +#if defined(_MSC_VER) && (_MSC_VER < 1800) + +#include "src/base/win32-headers.h" +#include <float.h> // Required for DBL_MAX and on Win32 for finite() +#include <limits.h> // Required for INT_MAX etc. +#include <cmath> +#include "src/base/win32-math.h" + +#include "src/base/logging.h" + + +namespace std { + +// Test for a NaN (not a number) value - usually defined in math.h +int isnan(double x) { + return _isnan(x); +} + + +// Test for infinity - usually defined in math.h +int isinf(double x) { + return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0; +} + + +// Test for finite value - usually defined in math.h +int isfinite(double x) { + return _finite(x); +} + + +// Test if x is less than y and both nominal - usually defined in math.h +int isless(double x, double y) { + return isnan(x) || isnan(y) ? 0 : x < y; +} + + +// Test if x is greater than y and both nominal - usually defined in math.h +int isgreater(double x, double y) { + return isnan(x) || isnan(y) ? 0 : x > y; +} + + +// Classify floating point number - usually defined in math.h +int fpclassify(double x) { + // Use the MS-specific _fpclass() for classification. + int flags = _fpclass(x); + + // Determine class. We cannot use a switch statement because + // the _FPCLASS_ constants are defined as flags. + if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL; + if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO; + if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL; + if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE; + + // All cases should be covered by the code above. + DCHECK(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN)); + return FP_NAN; +} + + +// Test sign - usually defined in math.h +int signbit(double x) { + // We need to take care of the special case of both positive + // and negative versions of zero. + if (x == 0) + return _fpclass(x) & _FPCLASS_NZ; + else + return x < 0; +} + +} // namespace std + +#endif // _MSC_VER diff -Nru nodejs-0.11.13/deps/v8/src/base/win32-math.h nodejs-0.11.15/deps/v8/src/base/win32-math.h --- nodejs-0.11.13/deps/v8/src/base/win32-math.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/base/win32-math.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,42 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please +// refer to The Open Group Base Specification for specification of the correct +// semantics for these functions. +// (http://www.opengroup.org/onlinepubs/000095399/) + +#ifndef V8_BASE_WIN32_MATH_H_ +#define V8_BASE_WIN32_MATH_H_ + +#ifndef _MSC_VER +#error Wrong environment, expected MSVC. +#endif // _MSC_VER + +// MSVC 2013+ provides implementations of all standard math functions. +#if (_MSC_VER < 1800) +enum { + FP_NAN, + FP_INFINITE, + FP_ZERO, + FP_SUBNORMAL, + FP_NORMAL +}; + + +namespace std { + +int isfinite(double x); +int isinf(double x); +int isnan(double x); +int isless(double x, double y); +int isgreater(double x, double y); +int fpclassify(double x); +int signbit(double x); + +} // namespace std + +#endif // _MSC_VER < 1800 + +#endif // V8_BASE_WIN32_MATH_H_ diff -Nru nodejs-0.11.13/deps/v8/src/bignum.cc nodejs-0.11.15/deps/v8/src/bignum.cc --- nodejs-0.11.13/deps/v8/src/bignum.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/bignum.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,33 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "../include/v8stdint.h" -#include "utils.h" -#include "bignum.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/bignum.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -48,7 +26,7 @@ // Guaranteed to lie in one Bigit. void Bignum::AssignUInt16(uint16_t value) { - ASSERT(kBigitSize >= BitSize(value)); + DCHECK(kBigitSize >= BitSize(value)); Zero(); if (value == 0) return; @@ -94,7 +72,7 @@ uint64_t result = 0; for (int i = from; i < from + digits_to_read; ++i) { int digit = buffer[i] - '0'; - ASSERT(0 <= digit && digit <= 9); + DCHECK(0 <= digit && digit <= 9); result = result * 10 + digit; } return result; @@ -170,8 +148,8 @@ void Bignum::AddBignum(const Bignum& other) { - ASSERT(IsClamped()); - ASSERT(other.IsClamped()); + DCHECK(IsClamped()); + DCHECK(other.IsClamped()); // If this has a greater exponent than other append zero-bigits to this. // After this call exponent_ <= other.exponent_. @@ -192,7 +170,7 @@ EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_); Chunk carry = 0; int bigit_pos = other.exponent_ - exponent_; - ASSERT(bigit_pos >= 0); + DCHECK(bigit_pos >= 0); for (int i = 0; i < other.used_digits_; ++i) { Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry; bigits_[bigit_pos] = sum & kBigitMask; @@ -207,15 +185,15 @@ bigit_pos++; } used_digits_ = Max(bigit_pos, used_digits_); - ASSERT(IsClamped()); + DCHECK(IsClamped()); } void Bignum::SubtractBignum(const Bignum& other) { - ASSERT(IsClamped()); - ASSERT(other.IsClamped()); + DCHECK(IsClamped()); + DCHECK(other.IsClamped()); // We require this to be bigger than other. - ASSERT(LessEqual(other, *this)); + DCHECK(LessEqual(other, *this)); Align(other); @@ -223,7 +201,7 @@ Chunk borrow = 0; int i; for (i = 0; i < other.used_digits_; ++i) { - ASSERT((borrow == 0) || (borrow == 1)); + DCHECK((borrow == 0) || (borrow == 1)); Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow; bigits_[i + offset] = difference & kBigitMask; borrow = difference >> (kChunkSize - 1); @@ -257,7 +235,7 @@ // The product of a bigit with the factor is of size kBigitSize + 32. // Assert that this number + 1 (for the carry) fits into double chunk. - ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1); + DCHECK(kDoubleChunkSize >= kBigitSize + 32 + 1); DoubleChunk carry = 0; for (int i = 0; i < used_digits_; ++i) { DoubleChunk product = static_cast<DoubleChunk>(factor) * bigits_[i] + carry; @@ -279,7 +257,7 @@ Zero(); return; } - ASSERT(kBigitSize < 32); + DCHECK(kBigitSize < 32); uint64_t carry = 0; uint64_t low = factor & 0xFFFFFFFF; uint64_t high = factor >> 32; @@ -319,7 +297,7 @@ { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6, kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 }; - ASSERT(exponent >= 0); + DCHECK(exponent >= 0); if (exponent == 0) return; if (used_digits_ == 0) return; @@ -341,7 +319,7 @@ void Bignum::Square() { - ASSERT(IsClamped()); + DCHECK(IsClamped()); int product_length = 2 * used_digits_; EnsureCapacity(product_length); @@ -403,7 +381,7 @@ } // Since the result was guaranteed to lie inside the number the // accumulator must be 0 now. - ASSERT(accumulator == 0); + DCHECK(accumulator == 0); // Don't forget to update the used_digits and the exponent. used_digits_ = product_length; @@ -413,8 +391,8 @@ void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) { - ASSERT(base != 0); - ASSERT(power_exponent >= 0); + DCHECK(base != 0); + DCHECK(power_exponent >= 0); if (power_exponent == 0) { AssignUInt16(1); return; @@ -487,9 +465,9 @@ // Precondition: this/other < 16bit. uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) { - ASSERT(IsClamped()); - ASSERT(other.IsClamped()); - ASSERT(other.used_digits_ > 0); + DCHECK(IsClamped()); + DCHECK(other.IsClamped()); + DCHECK(other.used_digits_ > 0); // Easy case: if we have less digits than the divisor than the result is 0. // Note: this handles the case where this == 0, too. @@ -507,14 +485,14 @@ // This naive approach is extremely inefficient if the this divided other // might be big. This function is implemented for doubleToString where // the result should be small (less than 10). - ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16)); + DCHECK(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16)); // Remove the multiples of the first digit. // Example this = 23 and other equals 9. -> Remove 2 multiples. result += bigits_[used_digits_ - 1]; SubtractTimes(other, bigits_[used_digits_ - 1]); } - ASSERT(BigitLength() == other.BigitLength()); + DCHECK(BigitLength() == other.BigitLength()); // Both bignums are at the same length now. // Since other has more than 0 digits we know that the access to @@ -551,7 +529,7 @@ template<typename S> static int SizeInHexChars(S number) { - ASSERT(number > 0); + DCHECK(number > 0); int result = 0; while (number != 0) { number >>= 4; @@ -562,16 +540,16 @@ static char HexCharOfValue(int value) { - ASSERT(0 <= value && value <= 16); + DCHECK(0 <= value && value <= 16); if (value < 10) return value + '0'; return value - 10 + 'A'; } bool Bignum::ToHexString(char* buffer, int buffer_size) const { - ASSERT(IsClamped()); + DCHECK(IsClamped()); // Each bigit must be printable as separate hex-character. - ASSERT(kBigitSize % 4 == 0); + DCHECK(kBigitSize % 4 == 0); const int kHexCharsPerBigit = kBigitSize / 4; if (used_digits_ == 0) { @@ -616,8 +594,8 @@ int Bignum::Compare(const Bignum& a, const Bignum& b) { - ASSERT(a.IsClamped()); - ASSERT(b.IsClamped()); + DCHECK(a.IsClamped()); + DCHECK(b.IsClamped()); int bigit_length_a = a.BigitLength(); int bigit_length_b = b.BigitLength(); if (bigit_length_a < bigit_length_b) return -1; @@ -634,9 +612,9 @@ int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) { - ASSERT(a.IsClamped()); - ASSERT(b.IsClamped()); - ASSERT(c.IsClamped()); + DCHECK(a.IsClamped()); + DCHECK(b.IsClamped()); + DCHECK(c.IsClamped()); if (a.BigitLength() < b.BigitLength()) { return PlusCompare(b, a, c); } @@ -713,15 +691,15 @@ } used_digits_ += zero_digits; exponent_ -= zero_digits; - ASSERT(used_digits_ >= 0); - ASSERT(exponent_ >= 0); + DCHECK(used_digits_ >= 0); + DCHECK(exponent_ >= 0); } } void Bignum::BigitsShiftLeft(int shift_amount) { - ASSERT(shift_amount < kBigitSize); - ASSERT(shift_amount >= 0); + DCHECK(shift_amount < kBigitSize); + DCHECK(shift_amount >= 0); Chunk carry = 0; for (int i = 0; i < used_digits_; ++i) { Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount); @@ -743,7 +721,7 @@ b.MultiplyByUInt32(factor); a.SubtractBignum(b); #endif - ASSERT(exponent_ <= other.exponent_); + DCHECK(exponent_ <= other.exponent_); if (factor < 3) { for (int i = 0; i < factor; ++i) { SubtractBignum(other); @@ -768,7 +746,7 @@ borrow = difference >> (kChunkSize - 1); } Clamp(); - ASSERT(Bignum::Equal(a, *this)); + DCHECK(Bignum::Equal(a, *this)); } diff -Nru nodejs-0.11.13/deps/v8/src/bignum-dtoa.cc nodejs-0.11.15/deps/v8/src/bignum-dtoa.cc --- nodejs-0.11.13/deps/v8/src/bignum-dtoa.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/bignum-dtoa.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,23 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <cmath> -#include "../include/v8stdint.h" -#include "checks.h" -#include "utils.h" +#include "include/v8stdint.h" +#include "src/base/logging.h" +#include "src/utils.h" -#include "bignum-dtoa.h" +#include "src/bignum-dtoa.h" -#include "bignum.h" -#include "double.h" +#include "src/bignum.h" +#include "src/double.h" namespace v8 { namespace internal { static int NormalizedExponent(uint64_t significand, int exponent) { - ASSERT(significand != 0); + DCHECK(significand != 0); while ((significand & Double::kHiddenBit) == 0) { significand = significand << 1; exponent = exponent - 1; @@ -91,8 +68,8 @@ void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits, Vector<char> buffer, int* length, int* decimal_point) { - ASSERT(v > 0); - ASSERT(!Double(v).IsSpecial()); + DCHECK(v > 0); + DCHECK(!Double(v).IsSpecial()); uint64_t significand = Double(v).Significand(); bool is_even = (significand & 1) == 0; int exponent = Double(v).Exponent(); @@ -122,7 +99,7 @@ // 4e-324. In this case the denominator needs fewer than 324*4 binary digits. // The maximum double is 1.7976931348623157e308 which needs fewer than // 308*4 binary digits. - ASSERT(Bignum::kMaxSignificantBits >= 324*4); + DCHECK(Bignum::kMaxSignificantBits >= 324*4); bool need_boundary_deltas = (mode == BIGNUM_DTOA_SHORTEST); InitialScaledStartValues(v, estimated_power, need_boundary_deltas, &numerator, &denominator, @@ -182,7 +159,7 @@ while (true) { uint16_t digit; digit = numerator->DivideModuloIntBignum(*denominator); - ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive. + DCHECK(digit <= 9); // digit is a uint16_t and therefore always positive. // digit = numerator / denominator (integer division). // numerator = numerator % denominator. buffer[(*length)++] = digit + '0'; @@ -228,7 +205,7 @@ // loop would have stopped earlier. // We still have an assert here in case the preconditions were not // satisfied. - ASSERT(buffer[(*length) - 1] != '9'); + DCHECK(buffer[(*length) - 1] != '9'); buffer[(*length) - 1]++; } else { // Halfway case. @@ -239,7 +216,7 @@ if ((buffer[(*length) - 1] - '0') % 2 == 0) { // Round down => Do nothing. } else { - ASSERT(buffer[(*length) - 1] != '9'); + DCHECK(buffer[(*length) - 1] != '9'); buffer[(*length) - 1]++; } } @@ -251,9 +228,9 @@ // Round up. // Note again that the last digit could not be '9' since this would have // stopped the loop earlier. - // We still have an ASSERT here, in case the preconditions were not + // We still have an DCHECK here, in case the preconditions were not // satisfied. - ASSERT(buffer[(*length) -1] != '9'); + DCHECK(buffer[(*length) -1] != '9'); buffer[(*length) - 1]++; return; } @@ -270,11 +247,11 @@ static void GenerateCountedDigits(int count, int* decimal_point, Bignum* numerator, Bignum* denominator, Vector<char>(buffer), int* length) { - ASSERT(count >= 0); + DCHECK(count >= 0); for (int i = 0; i < count - 1; ++i) { uint16_t digit; digit = numerator->DivideModuloIntBignum(*denominator); - ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive. + DCHECK(digit <= 9); // digit is a uint16_t and therefore always positive. // digit = numerator / denominator (integer division). // numerator = numerator % denominator. buffer[i] = digit + '0'; @@ -327,7 +304,7 @@ } else if (-(*decimal_point) == requested_digits) { // We only need to verify if the number rounds down or up. // Ex: 0.04 and 0.06 with requested_digits == 1. - ASSERT(*decimal_point == -requested_digits); + DCHECK(*decimal_point == -requested_digits); // Initially the fraction lies in range (1, 10]. Multiply the denominator // by 10 so that we can compare more easily. denominator->Times10(); @@ -406,7 +383,7 @@ Bignum* numerator, Bignum* denominator, Bignum* delta_minus, Bignum* delta_plus) { // A positive exponent implies a positive power. - ASSERT(estimated_power >= 0); + DCHECK(estimated_power >= 0); // Since the estimated_power is positive we simply multiply the denominator // by 10^estimated_power. @@ -525,7 +502,7 @@ // numerator = v * 10^-estimated_power * 2 * 2^-exponent. // Remember: numerator has been abused as power_ten. So no need to assign it // to itself. - ASSERT(numerator == power_ten); + DCHECK(numerator == power_ten); numerator->MultiplyByUInt64(significand); // denominator = 2 * 2^-exponent with exponent < 0. diff -Nru nodejs-0.11.13/deps/v8/src/bignum-dtoa.h nodejs-0.11.15/deps/v8/src/bignum-dtoa.h --- nodejs-0.11.13/deps/v8/src/bignum-dtoa.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/bignum-dtoa.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_BIGNUM_DTOA_H_ #define V8_BIGNUM_DTOA_H_ diff -Nru nodejs-0.11.13/deps/v8/src/bignum.h nodejs-0.11.15/deps/v8/src/bignum.h --- nodejs-0.11.13/deps/v8/src/bignum.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/bignum.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_BIGNUM_H_ #define V8_BIGNUM_H_ diff -Nru nodejs-0.11.13/deps/v8/src/bootstrapper.cc nodejs-0.11.15/deps/v8/src/bootstrapper.cc --- nodejs-0.11.13/deps/v8/src/bootstrapper.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/bootstrapper.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,57 +1,24 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "accessors.h" -#include "api.h" -#include "bootstrapper.h" -#include "compiler.h" -#include "debug.h" -#include "execution.h" -#include "global-handles.h" -#include "isolate-inl.h" -#include "macro-assembler.h" -#include "natives.h" -#include "objects-visiting.h" -#include "platform.h" -#include "snapshot.h" -#include "trig-table.h" -#include "extensions/externalize-string-extension.h" -#include "extensions/free-buffer-extension.h" -#include "extensions/gc-extension.h" -#include "extensions/statistics-extension.h" -#include "extensions/trigger-failure-extension.h" -#include "code-stubs.h" +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/bootstrapper.h" + +#include "src/accessors.h" +#include "src/code-stubs.h" +#include "src/extensions/externalize-string-extension.h" +#include "src/extensions/free-buffer-extension.h" +#include "src/extensions/gc-extension.h" +#include "src/extensions/statistics-extension.h" +#include "src/extensions/trigger-failure-extension.h" +#include "src/isolate-inl.h" +#include "src/natives.h" +#include "src/snapshot.h" +#include "third_party/fdlibm/fdlibm.h" namespace v8 { namespace internal { - NativesExternalStringResource::NativesExternalStringResource( Bootstrapper* bootstrapper, const char* source, @@ -77,7 +44,7 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) { - ASSERT(0 <= index && index < Natives::GetBuiltinsCount()); + DCHECK(0 <= index && index < Natives::GetBuiltinsCount()); Heap* heap = isolate_->heap(); if (heap->natives_source_cache()->get(index)->IsUndefined()) { // We can use external strings for the natives. @@ -86,10 +53,10 @@ new NativesExternalStringResource(this, source.start(), source.length()); - Handle<String> source_code = - isolate_->factory()->NewExternalStringFromAscii(resource); // We do not expect this to throw an exception. Change this if it does. - CHECK_NOT_EMPTY_HANDLE(isolate_, source_code); + Handle<String> source_code = + isolate_->factory()->NewExternalStringFromAscii( + resource).ToHandleChecked(); heap->natives_source_cache()->set(index, *source_code); } Handle<Object> cached_source(heap->natives_source_cache()->get(index), @@ -154,7 +121,7 @@ void Bootstrapper::TearDown() { if (delete_these_non_arrays_on_tear_down_ != NULL) { int len = delete_these_non_arrays_on_tear_down_->length(); - ASSERT(len < 24); // Don't use this mechanism for unbounded allocations. + DCHECK(len < 27); // Don't use this mechanism for unbounded allocations. for (int i = 0; i < len; i++) { delete delete_these_non_arrays_on_tear_down_->at(i); delete_these_non_arrays_on_tear_down_->at(i) = NULL; @@ -165,7 +132,7 @@ if (delete_these_arrays_on_tear_down_ != NULL) { int len = delete_these_arrays_on_tear_down_->length(); - ASSERT(len < 1000); // Don't use this mechanism for unbounded allocations. + DCHECK(len < 1000); // Don't use this mechanism for unbounded allocations. for (int i = 0; i < len; i++) { delete[] delete_these_arrays_on_tear_down_->at(i); delete_these_arrays_on_tear_down_->at(i) = NULL; @@ -181,8 +148,8 @@ class Genesis BASE_EMBEDDED { public: Genesis(Isolate* isolate, - Handle<Object> global_object, - v8::Handle<v8::ObjectTemplate> global_template, + MaybeHandle<JSGlobalProxy> maybe_global_proxy, + v8::Handle<v8::ObjectTemplate> global_proxy_template, v8::ExtensionConfiguration* extensions); ~Genesis() { } @@ -200,7 +167,9 @@ // Creates the empty function. Used for creating a context from scratch. Handle<JSFunction> CreateEmptyFunction(Isolate* isolate); // Creates the ThrowTypeError function. ECMA 5th Ed. 13.2.3 - Handle<JSFunction> GetThrowTypeErrorFunction(); + Handle<JSFunction> GetStrictPoisonFunction(); + // Poison for sloppy generator function arguments/callee. + Handle<JSFunction> GetGeneratorPoisonFunction(); void CreateStrictModeFunctionMaps(Handle<JSFunction> empty); @@ -214,34 +183,36 @@ // we have to used the deserialized ones that are linked together with the // rest of the context snapshot. Handle<JSGlobalProxy> CreateNewGlobals( - v8::Handle<v8::ObjectTemplate> global_template, - Handle<Object> global_object, - Handle<GlobalObject>* global_proxy_out); + v8::Handle<v8::ObjectTemplate> global_proxy_template, + MaybeHandle<JSGlobalProxy> maybe_global_proxy, + Handle<GlobalObject>* global_object_out); // Hooks the given global proxy into the context. If the context was created // by deserialization then this will unhook the global proxy that was // deserialized, leaving the GC to pick it up. - void HookUpGlobalProxy(Handle<GlobalObject> inner_global, + void HookUpGlobalProxy(Handle<GlobalObject> global_object, Handle<JSGlobalProxy> global_proxy); - // Similarly, we want to use the inner global that has been created by the - // templates passed through the API. The inner global from the snapshot is - // detached from the other objects in the snapshot. - void HookUpInnerGlobal(Handle<GlobalObject> inner_global); + // Similarly, we want to use the global that has been created by the templates + // passed through the API. The global from the snapshot is detached from the + // other objects in the snapshot. + void HookUpGlobalObject(Handle<GlobalObject> global_object); // New context initialization. Used for creating a context from scratch. - void InitializeGlobal(Handle<GlobalObject> inner_global, + void InitializeGlobal(Handle<GlobalObject> global_object, Handle<JSFunction> empty_function); void InitializeExperimentalGlobal(); // Installs the contents of the native .js files on the global objects. // Used for creating a context from scratch. void InstallNativeFunctions(); - void InstallExperimentalBuiltinFunctionIds(); void InstallExperimentalNativeFunctions(); Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins, const char* name, ElementsKind elements_kind); bool InstallNatives(); - Handle<JSFunction> InstallTypedArray(const char* name, - ElementsKind elementsKind); + void InstallTypedArray( + const char* name, + ElementsKind elements_kind, + Handle<JSFunction>* fun, + Handle<Map>* external_map); bool InstallExperimentalNatives(); void InstallBuiltinFunctionIds(); void InstallJSFunctionResultCaches(); @@ -281,7 +252,8 @@ bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins); bool ConfigureApiObject(Handle<JSObject> object, Handle<ObjectTemplateInfo> object_template); - bool ConfigureGlobalObjects(v8::Handle<v8::ObjectTemplate> global_template); + bool ConfigureGlobalObjects( + v8::Handle<v8::ObjectTemplate> global_proxy_template); // Migrates all properties from the 'from' object to the 'to' // object and overrides the prototype in 'to' with the one from @@ -290,24 +262,32 @@ void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to); void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to); - enum PrototypePropertyMode { - DONT_ADD_PROTOTYPE, - ADD_READONLY_PROTOTYPE, - ADD_WRITEABLE_PROTOTYPE + enum FunctionMode { + // With prototype. + FUNCTION_WITH_WRITEABLE_PROTOTYPE, + FUNCTION_WITH_READONLY_PROTOTYPE, + // Without prototype. + FUNCTION_WITHOUT_PROTOTYPE, + BOUND_FUNCTION }; - Handle<Map> CreateFunctionMap(PrototypePropertyMode prototype_mode); + static bool IsFunctionModeWithPrototype(FunctionMode function_mode) { + return (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE || + function_mode == FUNCTION_WITH_READONLY_PROTOTYPE); + } + + Handle<Map> CreateFunctionMap(FunctionMode function_mode); void SetFunctionInstanceDescriptor(Handle<Map> map, - PrototypePropertyMode prototypeMode); + FunctionMode function_mode); void MakeFunctionInstancePrototypeWritable(); Handle<Map> CreateStrictFunctionMap( - PrototypePropertyMode prototype_mode, + FunctionMode function_mode, Handle<JSFunction> empty_function); void SetStrictFunctionInstanceDescriptor(Handle<Map> map, - PrototypePropertyMode propertyMode); + FunctionMode function_mode); static bool CompileBuiltin(Isolate* isolate, int index); static bool CompileExperimentalBuiltin(Isolate* isolate, int index); @@ -332,7 +312,8 @@ // prototype, maps. Handle<Map> sloppy_function_map_writable_prototype_; Handle<Map> strict_function_map_writable_prototype_; - Handle<JSFunction> throw_type_error_function; + Handle<JSFunction> strict_poison_function; + Handle<JSFunction> generator_poison_function; BootstrapperActive active_; friend class Bootstrapper; @@ -346,11 +327,12 @@ Handle<Context> Bootstrapper::CreateEnvironment( - Handle<Object> global_object, - v8::Handle<v8::ObjectTemplate> global_template, + MaybeHandle<JSGlobalProxy> maybe_global_proxy, + v8::Handle<v8::ObjectTemplate> global_proxy_template, v8::ExtensionConfiguration* extensions) { HandleScope scope(isolate_); - Genesis genesis(isolate_, global_object, global_template, extensions); + Genesis genesis( + isolate_, maybe_global_proxy, global_proxy_template, extensions); Handle<Context> env = genesis.result(); if (env.is_null() || !InstallExtensions(env, extensions)) { return Handle<Context>(); @@ -361,11 +343,10 @@ static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) { // object.__proto__ = proto; - Factory* factory = object->GetIsolate()->factory(); - Handle<Map> old_to_map = Handle<Map>(object->map()); - Handle<Map> new_to_map = factory->CopyMap(old_to_map); - new_to_map->set_prototype(*proto); - object->set_map(*new_to_map); + Handle<Map> old_map = Handle<Map>(object->map()); + Handle<Map> new_map = Map::Copy(old_map); + new_map->set_prototype(*proto); + JSObject::MigrateToMap(object, new_map); } @@ -374,6 +355,7 @@ Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy())); global_proxy->set_native_context(*factory->null_value()); SetObjectPrototype(global_proxy, factory->null_value()); + global_proxy->map()->set_constructor(*factory->null_value()); } @@ -381,22 +363,17 @@ const char* name, InstanceType type, int instance_size, - Handle<JSObject> prototype, - Builtins::Name call, - bool install_initial_map, - bool set_instance_class_name) { + MaybeHandle<JSObject> maybe_prototype, + Builtins::Name call) { Isolate* isolate = target->GetIsolate(); Factory* factory = isolate->factory(); Handle<String> internalized_name = factory->InternalizeUtf8String(name); Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call)); - Handle<JSFunction> function = prototype.is_null() ? - factory->NewFunctionWithoutPrototype(internalized_name, call_code) : - factory->NewFunctionWithPrototype(internalized_name, - type, - instance_size, - prototype, - call_code, - install_initial_map); + Handle<JSObject> prototype; + Handle<JSFunction> function = maybe_prototype.ToHandle(&prototype) + ? factory->NewFunction(internalized_name, call_code, prototype, + type, instance_size) + : factory->NewFunctionWithoutPrototype(internalized_name, call_code); PropertyAttributes attributes; if (target->IsJSBuiltinsObject()) { attributes = @@ -404,10 +381,8 @@ } else { attributes = DONT_ENUM; } - CHECK_NOT_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - target, internalized_name, function, attributes)); - if (set_instance_class_name) { + JSObject::AddProperty(target, internalized_name, function, attributes); + if (target->IsJSGlobalObject()) { function->shared()->set_instance_class_name(*internalized_name); } function->shared()->set_native(true); @@ -416,54 +391,58 @@ void Genesis::SetFunctionInstanceDescriptor( - Handle<Map> map, PrototypePropertyMode prototypeMode) { - int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5; - Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(0, size)); - DescriptorArray::WhitenessWitness witness(*descriptors); - - Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength)); - Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName)); - Handle<Foreign> args(factory()->NewForeign(&Accessors::FunctionArguments)); - Handle<Foreign> caller(factory()->NewForeign(&Accessors::FunctionCaller)); - Handle<Foreign> prototype; - if (prototypeMode != DONT_ADD_PROTOTYPE) { - prototype = factory()->NewForeign(&Accessors::FunctionPrototype); - } + Handle<Map> map, FunctionMode function_mode) { + int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4; + Map::EnsureDescriptorSlack(map, size); + PropertyAttributes attribs = static_cast<PropertyAttributes>( DONT_ENUM | DONT_DELETE | READ_ONLY); - map->set_instance_descriptors(*descriptors); + Handle<AccessorInfo> length = + Accessors::FunctionLengthInfo(isolate(), attribs); { // Add length. - CallbacksDescriptor d(*factory()->length_string(), *length, attribs); - map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(length->name())), + length, attribs); + map->AppendDescriptor(&d); } + Handle<AccessorInfo> name = + Accessors::FunctionNameInfo(isolate(), attribs); { // Add name. - CallbacksDescriptor d(*factory()->name_string(), *name, attribs); - map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(name->name())), + name, attribs); + map->AppendDescriptor(&d); } + Handle<AccessorInfo> args = + Accessors::FunctionArgumentsInfo(isolate(), attribs); { // Add arguments. - CallbacksDescriptor d(*factory()->arguments_string(), *args, attribs); - map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(args->name())), + args, attribs); + map->AppendDescriptor(&d); } + Handle<AccessorInfo> caller = + Accessors::FunctionCallerInfo(isolate(), attribs); { // Add caller. - CallbacksDescriptor d(*factory()->caller_string(), *caller, attribs); - map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(caller->name())), + caller, attribs); + map->AppendDescriptor(&d); } - if (prototypeMode != DONT_ADD_PROTOTYPE) { - // Add prototype. - if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) { + if (IsFunctionModeWithPrototype(function_mode)) { + if (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE) { attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY); } - CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs); - map->AppendDescriptor(&d, witness); + Handle<AccessorInfo> prototype = + Accessors::FunctionPrototypeInfo(isolate(), attribs); + CallbacksDescriptor d(Handle<Name>(Name::cast(prototype->name())), + prototype, attribs); + map->AppendDescriptor(&d); } } -Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) { +Handle<Map> Genesis::CreateFunctionMap(FunctionMode function_mode) { Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize); - SetFunctionInstanceDescriptor(map, prototype_mode); - map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE); + SetFunctionInstanceDescriptor(map, function_mode); + map->set_function_with_prototype(IsFunctionModeWithPrototype(function_mode)); return map; } @@ -475,32 +454,36 @@ // Functions with this map will not have a 'prototype' property, and // can not be used as constructors. Handle<Map> function_without_prototype_map = - CreateFunctionMap(DONT_ADD_PROTOTYPE); + CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE); native_context()->set_sloppy_function_without_prototype_map( *function_without_prototype_map); // Allocate the function map. This map is temporary, used only for processing // of builtins. // Later the map is replaced with writable prototype map, allocated below. - Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE); + Handle<Map> function_map = + CreateFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE); native_context()->set_sloppy_function_map(*function_map); + native_context()->set_sloppy_function_with_readonly_prototype_map( + *function_map); // The final map for functions. Writeable prototype. // This map is installed in MakeFunctionInstancePrototypeWritable. sloppy_function_map_writable_prototype_ = - CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE); + CreateFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE); Factory* factory = isolate->factory(); Handle<String> object_name = factory->Object_string(); { // --- O b j e c t --- - Handle<JSFunction> object_fun = - factory->NewFunction(object_name, factory->null_value()); + Handle<JSFunction> object_fun = factory->NewFunction(object_name); Handle<Map> object_function_map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); - object_fun->set_initial_map(*object_function_map); - object_function_map->set_constructor(*object_fun); + JSFunction::SetInitialMap(object_fun, object_function_map, + isolate->factory()->null_value()); + object_function_map->set_unused_property_fields( + JSObject::kInitialGlobalObjectUnusedPropertiesCount); native_context()->set_object_function(*object_fun); @@ -508,6 +491,9 @@ Handle<JSObject> prototype = factory->NewJSObject( isolate->object_function(), TENURED); + Handle<Map> map = Map::Copy(handle(prototype->map())); + map->set_is_prototype_map(true); + prototype->set_map(*map); native_context()->set_initial_object_prototype(*prototype); // For bootstrapping set the array prototype to be the same as the object @@ -521,17 +507,21 @@ // 262 15.3.4. Handle<String> empty_string = factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty")); - Handle<JSFunction> empty_function = - factory->NewFunctionWithoutPrototype(empty_string, SLOPPY); + Handle<Code> code(isolate->builtins()->builtin(Builtins::kEmptyFunction)); + Handle<JSFunction> empty_function = factory->NewFunctionWithoutPrototype( + empty_string, code); + + // Allocate the function map first and then patch the prototype later + Handle<Map> empty_function_map = + CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE); + DCHECK(!empty_function_map->is_dictionary_map()); + empty_function_map->set_prototype( + native_context()->object_function()->prototype()); + empty_function_map->set_is_prototype_map(true); + empty_function->set_map(*empty_function_map); // --- E m p t y --- - Handle<Code> code = - Handle<Code>(isolate->builtins()->builtin( - Builtins::kEmptyFunction)); - empty_function->set_code(*code); - empty_function->shared()->set_code(*code); - Handle<String> source = - factory->NewStringFromOneByte(STATIC_ASCII_VECTOR("() {}")); + Handle<String> source = factory->NewStringFromStaticAscii("() {}"); Handle<Script> script = factory->NewScript(source); script->set_type(Smi::FromInt(Script::TYPE_NATIVE)); empty_function->shared()->set_script(*script); @@ -544,89 +534,107 @@ native_context()->sloppy_function_without_prototype_map()-> set_prototype(*empty_function); sloppy_function_map_writable_prototype_->set_prototype(*empty_function); - - // Allocate the function map first and then patch the prototype later - Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE); - empty_function_map->set_prototype( - native_context()->object_function()->prototype()); - empty_function->set_map(*empty_function_map); return empty_function; } void Genesis::SetStrictFunctionInstanceDescriptor( - Handle<Map> map, PrototypePropertyMode prototypeMode) { - int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5; - Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(0, size)); - DescriptorArray::WhitenessWitness witness(*descriptors); + Handle<Map> map, FunctionMode function_mode) { + int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4; + Map::EnsureDescriptorSlack(map, size); - Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength)); - Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName)); Handle<AccessorPair> arguments(factory()->NewAccessorPair()); Handle<AccessorPair> caller(factory()->NewAccessorPair()); - Handle<Foreign> prototype; - if (prototypeMode != DONT_ADD_PROTOTYPE) { - prototype = factory()->NewForeign(&Accessors::FunctionPrototype); - } PropertyAttributes rw_attribs = static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE); PropertyAttributes ro_attribs = static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY); - map->set_instance_descriptors(*descriptors); - { // Add length. - CallbacksDescriptor d(*factory()->length_string(), *length, ro_attribs); - map->AppendDescriptor(&d, witness); + // Add length. + if (function_mode == BOUND_FUNCTION) { + Handle<String> length_string = isolate()->factory()->length_string(); + FieldDescriptor d(length_string, 0, ro_attribs, Representation::Tagged()); + map->AppendDescriptor(&d); + } else { + DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE || + function_mode == FUNCTION_WITH_READONLY_PROTOTYPE || + function_mode == FUNCTION_WITHOUT_PROTOTYPE); + Handle<AccessorInfo> length = + Accessors::FunctionLengthInfo(isolate(), ro_attribs); + CallbacksDescriptor d(Handle<Name>(Name::cast(length->name())), + length, ro_attribs); + map->AppendDescriptor(&d); } + Handle<AccessorInfo> name = + Accessors::FunctionNameInfo(isolate(), ro_attribs); { // Add name. - CallbacksDescriptor d(*factory()->name_string(), *name, ro_attribs); - map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(name->name())), + name, ro_attribs); + map->AppendDescriptor(&d); } { // Add arguments. - CallbacksDescriptor d(*factory()->arguments_string(), *arguments, + CallbacksDescriptor d(factory()->arguments_string(), arguments, rw_attribs); - map->AppendDescriptor(&d, witness); + map->AppendDescriptor(&d); } { // Add caller. - CallbacksDescriptor d(*factory()->caller_string(), *caller, rw_attribs); - map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(factory()->caller_string(), caller, rw_attribs); + map->AppendDescriptor(&d); } - if (prototypeMode != DONT_ADD_PROTOTYPE) { + if (IsFunctionModeWithPrototype(function_mode)) { // Add prototype. PropertyAttributes attribs = - prototypeMode == ADD_WRITEABLE_PROTOTYPE ? rw_attribs : ro_attribs; - CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs); - map->AppendDescriptor(&d, witness); + function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ? rw_attribs + : ro_attribs; + Handle<AccessorInfo> prototype = + Accessors::FunctionPrototypeInfo(isolate(), attribs); + CallbacksDescriptor d(Handle<Name>(Name::cast(prototype->name())), + prototype, attribs); + map->AppendDescriptor(&d); } } // ECMAScript 5th Edition, 13.2.3 -Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() { - if (throw_type_error_function.is_null()) { +Handle<JSFunction> Genesis::GetStrictPoisonFunction() { + if (strict_poison_function.is_null()) { Handle<String> name = factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("ThrowTypeError")); - throw_type_error_function = - factory()->NewFunctionWithoutPrototype(name, SLOPPY); Handle<Code> code(isolate()->builtins()->builtin( Builtins::kStrictModePoisonPill)); - throw_type_error_function->set_map(native_context()->sloppy_function_map()); - throw_type_error_function->set_code(*code); - throw_type_error_function->shared()->set_code(*code); - throw_type_error_function->shared()->DontAdaptArguments(); + strict_poison_function = factory()->NewFunctionWithoutPrototype(name, code); + strict_poison_function->set_map(native_context()->sloppy_function_map()); + strict_poison_function->shared()->DontAdaptArguments(); + + JSObject::PreventExtensions(strict_poison_function).Assert(); + } + return strict_poison_function; +} + - JSObject::PreventExtensions(throw_type_error_function); +Handle<JSFunction> Genesis::GetGeneratorPoisonFunction() { + if (generator_poison_function.is_null()) { + Handle<String> name = factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("ThrowTypeError")); + Handle<Code> code(isolate()->builtins()->builtin( + Builtins::kGeneratorPoisonPill)); + generator_poison_function = factory()->NewFunctionWithoutPrototype( + name, code); + generator_poison_function->set_map(native_context()->sloppy_function_map()); + generator_poison_function->shared()->DontAdaptArguments(); + + JSObject::PreventExtensions(generator_poison_function).Assert(); } - return throw_type_error_function; + return generator_poison_function; } Handle<Map> Genesis::CreateStrictFunctionMap( - PrototypePropertyMode prototype_mode, + FunctionMode function_mode, Handle<JSFunction> empty_function) { Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize); - SetStrictFunctionInstanceDescriptor(map, prototype_mode); - map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE); + SetStrictFunctionInstanceDescriptor(map, function_mode); + map->set_function_with_prototype(IsFunctionModeWithPrototype(function_mode)); map->set_prototype(*empty_function); return map; } @@ -635,7 +643,7 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) { // Allocate map for the prototype-less strict mode instances. Handle<Map> strict_function_without_prototype_map = - CreateStrictFunctionMap(DONT_ADD_PROTOTYPE, empty); + CreateStrictFunctionMap(FUNCTION_WITHOUT_PROTOTYPE, empty); native_context()->set_strict_function_without_prototype_map( *strict_function_without_prototype_map); @@ -643,18 +651,23 @@ // only for processing of builtins. // Later the map is replaced with writable prototype map, allocated below. Handle<Map> strict_function_map = - CreateStrictFunctionMap(ADD_READONLY_PROTOTYPE, empty); + CreateStrictFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE, empty); native_context()->set_strict_function_map(*strict_function_map); // The final map for the strict mode functions. Writeable prototype. // This map is installed in MakeFunctionInstancePrototypeWritable. strict_function_map_writable_prototype_ = - CreateStrictFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty); + CreateStrictFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty); + // Special map for bound functions. + Handle<Map> bound_function_map = + CreateStrictFunctionMap(BOUND_FUNCTION, empty); + native_context()->set_bound_function_map(*bound_function_map); // Complete the callbacks. PoisonArgumentsAndCaller(strict_function_without_prototype_map); PoisonArgumentsAndCaller(strict_function_map); PoisonArgumentsAndCaller(strict_function_map_writable_prototype_); + PoisonArgumentsAndCaller(bound_function_map); } @@ -669,23 +682,34 @@ } +static void ReplaceAccessors(Handle<Map> map, + Handle<String> name, + PropertyAttributes attributes, + Handle<AccessorPair> accessor_pair) { + DescriptorArray* descriptors = map->instance_descriptors(); + int idx = descriptors->SearchWithCache(*name, *map); + CallbacksDescriptor descriptor(name, accessor_pair, attributes); + descriptors->Replace(idx, &descriptor); +} + + void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) { - SetAccessors(map, factory()->arguments_string(), GetThrowTypeErrorFunction()); - SetAccessors(map, factory()->caller_string(), GetThrowTypeErrorFunction()); + SetAccessors(map, factory()->arguments_string(), GetStrictPoisonFunction()); + SetAccessors(map, factory()->caller_string(), GetStrictPoisonFunction()); } static void AddToWeakNativeContextList(Context* context) { - ASSERT(context->IsNativeContext()); + DCHECK(context->IsNativeContext()); Heap* heap = context->GetIsolate()->heap(); #ifdef DEBUG { // NOLINT - ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined()); + DCHECK(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined()); // Check that context is not in the list yet. for (Object* current = heap->native_contexts_list(); !current->IsUndefined(); current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) { - ASSERT(current != context); + DCHECK(current != context); } } #endif @@ -712,88 +736,89 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals( - v8::Handle<v8::ObjectTemplate> global_template, - Handle<Object> global_object, - Handle<GlobalObject>* inner_global_out) { - // The argument global_template aka data is an ObjectTemplateInfo. + v8::Handle<v8::ObjectTemplate> global_proxy_template, + MaybeHandle<JSGlobalProxy> maybe_global_proxy, + Handle<GlobalObject>* global_object_out) { + // The argument global_proxy_template aka data is an ObjectTemplateInfo. // It has a constructor pointer that points at global_constructor which is a // FunctionTemplateInfo. - // The global_constructor is used to create or reinitialize the global_proxy. - // The global_constructor also has a prototype_template pointer that points at - // js_global_template which is an ObjectTemplateInfo. + // The global_proxy_constructor is used to create or reinitialize the + // global_proxy. The global_proxy_constructor also has a prototype_template + // pointer that points at js_global_object_template which is an + // ObjectTemplateInfo. // That in turn has a constructor pointer that points at - // js_global_constructor which is a FunctionTemplateInfo. - // js_global_constructor is used to make js_global_function - // js_global_function is used to make the new inner_global. + // js_global_object_constructor which is a FunctionTemplateInfo. + // js_global_object_constructor is used to make js_global_object_function + // js_global_object_function is used to make the new global_object. // // --- G l o b a l --- - // Step 1: Create a fresh inner JSGlobalObject. - Handle<JSFunction> js_global_function; - Handle<ObjectTemplateInfo> js_global_template; - if (!global_template.IsEmpty()) { - // Get prototype template of the global_template. + // Step 1: Create a fresh JSGlobalObject. + Handle<JSFunction> js_global_object_function; + Handle<ObjectTemplateInfo> js_global_object_template; + if (!global_proxy_template.IsEmpty()) { + // Get prototype template of the global_proxy_template. Handle<ObjectTemplateInfo> data = - v8::Utils::OpenHandle(*global_template); + v8::Utils::OpenHandle(*global_proxy_template); Handle<FunctionTemplateInfo> global_constructor = Handle<FunctionTemplateInfo>( FunctionTemplateInfo::cast(data->constructor())); Handle<Object> proto_template(global_constructor->prototype_template(), isolate()); if (!proto_template->IsUndefined()) { - js_global_template = + js_global_object_template = Handle<ObjectTemplateInfo>::cast(proto_template); } } - if (js_global_template.is_null()) { + if (js_global_object_template.is_null()) { Handle<String> name = Handle<String>(heap()->empty_string()); Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin( Builtins::kIllegal)); - js_global_function = - factory()->NewFunction(name, JS_GLOBAL_OBJECT_TYPE, - JSGlobalObject::kSize, code, true); - // Change the constructor property of the prototype of the - // hidden global function to refer to the Object function. Handle<JSObject> prototype = - Handle<JSObject>( - JSObject::cast(js_global_function->instance_prototype())); - CHECK_NOT_EMPTY_HANDLE(isolate(), - JSObject::SetLocalPropertyIgnoreAttributes( - prototype, factory()->constructor_string(), - isolate()->object_function(), NONE)); + factory()->NewFunctionPrototype(isolate()->object_function()); + js_global_object_function = factory()->NewFunction( + name, code, prototype, JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize); +#ifdef DEBUG + LookupIterator it(prototype, factory()->constructor_string(), + LookupIterator::CHECK_OWN_REAL); + Handle<Object> value = JSReceiver::GetProperty(&it).ToHandleChecked(); + DCHECK(it.IsFound()); + DCHECK_EQ(*isolate()->object_function(), *value); +#endif } else { - Handle<FunctionTemplateInfo> js_global_constructor( - FunctionTemplateInfo::cast(js_global_template->constructor())); - js_global_function = - factory()->CreateApiFunction(js_global_constructor, - factory()->InnerGlobalObject); + Handle<FunctionTemplateInfo> js_global_object_constructor( + FunctionTemplateInfo::cast(js_global_object_template->constructor())); + js_global_object_function = + factory()->CreateApiFunction(js_global_object_constructor, + factory()->the_hole_value(), + factory()->GlobalObjectType); } - js_global_function->initial_map()->set_is_hidden_prototype(); - js_global_function->initial_map()->set_dictionary_map(true); - Handle<GlobalObject> inner_global = - factory()->NewGlobalObject(js_global_function); - if (inner_global_out != NULL) { - *inner_global_out = inner_global; + js_global_object_function->initial_map()->set_is_hidden_prototype(); + js_global_object_function->initial_map()->set_dictionary_map(true); + Handle<GlobalObject> global_object = + factory()->NewGlobalObject(js_global_object_function); + if (global_object_out != NULL) { + *global_object_out = global_object; } // Step 2: create or re-initialize the global proxy object. Handle<JSFunction> global_proxy_function; - if (global_template.IsEmpty()) { + if (global_proxy_template.IsEmpty()) { Handle<String> name = Handle<String>(heap()->empty_string()); Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin( Builtins::kIllegal)); - global_proxy_function = - factory()->NewFunction(name, JS_GLOBAL_PROXY_TYPE, - JSGlobalProxy::kSize, code, true); + global_proxy_function = factory()->NewFunction( + name, code, JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize); } else { Handle<ObjectTemplateInfo> data = - v8::Utils::OpenHandle(*global_template); + v8::Utils::OpenHandle(*global_proxy_template); Handle<FunctionTemplateInfo> global_constructor( FunctionTemplateInfo::cast(data->constructor())); global_proxy_function = factory()->CreateApiFunction(global_constructor, - factory()->OuterGlobalObject); + factory()->the_hole_value(), + factory()->GlobalProxyType); } Handle<String> global_name = factory()->InternalizeOneByteString( @@ -804,88 +829,85 @@ // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects // Return the global proxy. - if (global_object.location() != NULL) { - ASSERT(global_object->IsJSGlobalProxy()); - return ReinitializeJSGlobalProxy( - global_proxy_function, - Handle<JSGlobalProxy>::cast(global_object)); + Handle<JSGlobalProxy> global_proxy; + if (maybe_global_proxy.ToHandle(&global_proxy)) { + factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function); } else { - return Handle<JSGlobalProxy>::cast( + global_proxy = Handle<JSGlobalProxy>::cast( factory()->NewJSObject(global_proxy_function, TENURED)); + global_proxy->set_hash(heap()->undefined_value()); } + return global_proxy; } -void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global, +void Genesis::HookUpGlobalProxy(Handle<GlobalObject> global_object, Handle<JSGlobalProxy> global_proxy) { // Set the native context for the global object. - inner_global->set_native_context(*native_context()); - inner_global->set_global_context(*native_context()); - inner_global->set_global_receiver(*global_proxy); + global_object->set_native_context(*native_context()); + global_object->set_global_context(*native_context()); + global_object->set_global_proxy(*global_proxy); global_proxy->set_native_context(*native_context()); native_context()->set_global_proxy(*global_proxy); } -void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) { - Handle<GlobalObject> inner_global_from_snapshot( +void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object) { + Handle<GlobalObject> global_object_from_snapshot( GlobalObject::cast(native_context()->extension())); Handle<JSBuiltinsObject> builtins_global(native_context()->builtins()); - native_context()->set_extension(*inner_global); - native_context()->set_global_object(*inner_global); - native_context()->set_security_token(*inner_global); + native_context()->set_extension(*global_object); + native_context()->set_global_object(*global_object); + native_context()->set_security_token(*global_object); static const PropertyAttributes attributes = static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE); - ForceSetProperty(builtins_global, - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("global")), - inner_global, - attributes); + Runtime::DefineObjectProperty(builtins_global, + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("global")), + global_object, + attributes).Assert(); // Set up the reference from the global object to the builtins object. - JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global); - TransferNamedProperties(inner_global_from_snapshot, inner_global); - TransferIndexedProperties(inner_global_from_snapshot, inner_global); + JSGlobalObject::cast(*global_object)->set_builtins(*builtins_global); + TransferNamedProperties(global_object_from_snapshot, global_object); + TransferIndexedProperties(global_object_from_snapshot, global_object); } // This is only called if we are not using snapshots. The equivalent -// work in the snapshot case is done in HookUpInnerGlobal. -void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, +// work in the snapshot case is done in HookUpGlobalObject. +void Genesis::InitializeGlobal(Handle<GlobalObject> global_object, Handle<JSFunction> empty_function) { // --- N a t i v e C o n t e x t --- // Use the empty function as closure (no scope info). native_context()->set_closure(*empty_function); native_context()->set_previous(NULL); // Set extension and global object. - native_context()->set_extension(*inner_global); - native_context()->set_global_object(*inner_global); - // Security setup: Set the security token of the global object to - // its the inner global. This makes the security check between two - // different contexts fail by default even in case of global - // object reinitialization. - native_context()->set_security_token(*inner_global); + native_context()->set_extension(*global_object); + native_context()->set_global_object(*global_object); + // Security setup: Set the security token of the native context to the global + // object. This makes the security check between two different contexts fail + // by default even in case of global object reinitialization. + native_context()->set_security_token(*global_object); - Isolate* isolate = inner_global->GetIsolate(); + Isolate* isolate = global_object->GetIsolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); Handle<String> object_name = factory->Object_string(); - CHECK_NOT_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - inner_global, object_name, - isolate->object_function(), DONT_ENUM)); + JSObject::AddProperty( + global_object, object_name, isolate->object_function(), DONT_ENUM); - Handle<JSObject> global = Handle<JSObject>(native_context()->global_object()); + Handle<JSObject> global(native_context()->global_object()); // Install global Function object InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize, - empty_function, Builtins::kIllegal, true, true); + empty_function, Builtins::kIllegal); { // --- A r r a y --- Handle<JSFunction> array_function = InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize, isolate->initial_object_prototype(), - Builtins::kArrayCode, true, true); + Builtins::kArrayCode); array_function->shared()->DontAdaptArguments(); array_function->shared()->set_function_data(Smi::FromInt(kArrayCode)); @@ -897,20 +919,19 @@ // This assert protects an optimization in // HGraphBuilder::JSArrayBuilder::EmitMapCode() - ASSERT(initial_map->elements_kind() == GetInitialFastElementsKind()); + DCHECK(initial_map->elements_kind() == GetInitialFastElementsKind()); + Map::EnsureDescriptorSlack(initial_map, 1); - Handle<DescriptorArray> array_descriptors( - factory->NewDescriptorArray(0, 1)); - DescriptorArray::WhitenessWitness witness(*array_descriptors); - - Handle<Foreign> array_length(factory->NewForeign(&Accessors::ArrayLength)); PropertyAttributes attribs = static_cast<PropertyAttributes>( DONT_ENUM | DONT_DELETE); - initial_map->set_instance_descriptors(*array_descriptors); + Handle<AccessorInfo> array_length = + Accessors::ArrayLengthInfo(isolate, attribs); { // Add length. - CallbacksDescriptor d(*factory->length_string(), *array_length, attribs); - array_function->initial_map()->AppendDescriptor(&d, witness); + CallbacksDescriptor d( + Handle<Name>(Name::cast(array_length->name())), + array_length, attribs); + array_function->initial_map()->AppendDescriptor(&d); } // array_function is used internally. JS code creating array object should @@ -922,7 +943,7 @@ // Cache the array maps, needed by ArrayConstructorStub CacheInitialJSArrayMaps(native_context(), initial_map); ArrayConstructorStub array_constructor_stub(isolate); - Handle<Code> code = array_constructor_stub.GetCode(isolate); + Handle<Code> code = array_constructor_stub.GetCode(); array_function->shared()->set_construct_stub(*code); } @@ -930,7 +951,7 @@ Handle<JSFunction> number_fun = InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize, isolate->initial_object_prototype(), - Builtins::kIllegal, true, true); + Builtins::kIllegal); native_context()->set_number_function(*number_fun); } @@ -938,7 +959,7 @@ Handle<JSFunction> boolean_fun = InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize, isolate->initial_object_prototype(), - Builtins::kIllegal, true, true); + Builtins::kIllegal); native_context()->set_boolean_function(*boolean_fun); } @@ -946,35 +967,40 @@ Handle<JSFunction> string_fun = InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize, isolate->initial_object_prototype(), - Builtins::kIllegal, true, true); + Builtins::kIllegal); string_fun->shared()->set_construct_stub( isolate->builtins()->builtin(Builtins::kStringConstructCode)); native_context()->set_string_function(*string_fun); Handle<Map> string_map = Handle<Map>(native_context()->string_function()->initial_map()); - Handle<DescriptorArray> string_descriptors( - factory->NewDescriptorArray(0, 1)); - DescriptorArray::WhitenessWitness witness(*string_descriptors); + Map::EnsureDescriptorSlack(string_map, 1); - Handle<Foreign> string_length( - factory->NewForeign(&Accessors::StringLength)); PropertyAttributes attribs = static_cast<PropertyAttributes>( DONT_ENUM | DONT_DELETE | READ_ONLY); - string_map->set_instance_descriptors(*string_descriptors); + Handle<AccessorInfo> string_length( + Accessors::StringLengthInfo(isolate, attribs)); { // Add length. - CallbacksDescriptor d(*factory->length_string(), *string_length, attribs); - string_map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(factory->length_string(), string_length, attribs); + string_map->AppendDescriptor(&d); } } + { + // --- S y m b o l --- + Handle<JSFunction> symbol_fun = InstallFunction( + global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, + isolate->initial_object_prototype(), Builtins::kIllegal); + native_context()->set_symbol_function(*symbol_fun); + } + { // --- D a t e --- // Builtin functions for Date.prototype. Handle<JSFunction> date_fun = InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize, isolate->initial_object_prototype(), - Builtins::kIllegal, true, true); + Builtins::kIllegal); native_context()->set_date_function(*date_fun); } @@ -985,61 +1011,59 @@ Handle<JSFunction> regexp_fun = InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize, isolate->initial_object_prototype(), - Builtins::kIllegal, true, true); + Builtins::kIllegal); native_context()->set_regexp_function(*regexp_fun); - ASSERT(regexp_fun->has_initial_map()); + DCHECK(regexp_fun->has_initial_map()); Handle<Map> initial_map(regexp_fun->initial_map()); - ASSERT_EQ(0, initial_map->inobject_properties()); + DCHECK_EQ(0, initial_map->inobject_properties()); PropertyAttributes final = static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY); - Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 5); - DescriptorArray::WhitenessWitness witness(*descriptors); - initial_map->set_instance_descriptors(*descriptors); + Map::EnsureDescriptorSlack(initial_map, 5); { // ECMA-262, section 15.10.7.1. - FieldDescriptor field(heap->source_string(), + FieldDescriptor field(factory->source_string(), JSRegExp::kSourceFieldIndex, final, Representation::Tagged()); - initial_map->AppendDescriptor(&field, witness); + initial_map->AppendDescriptor(&field); } { // ECMA-262, section 15.10.7.2. - FieldDescriptor field(heap->global_string(), + FieldDescriptor field(factory->global_string(), JSRegExp::kGlobalFieldIndex, final, Representation::Tagged()); - initial_map->AppendDescriptor(&field, witness); + initial_map->AppendDescriptor(&field); } { // ECMA-262, section 15.10.7.3. - FieldDescriptor field(heap->ignore_case_string(), + FieldDescriptor field(factory->ignore_case_string(), JSRegExp::kIgnoreCaseFieldIndex, final, Representation::Tagged()); - initial_map->AppendDescriptor(&field, witness); + initial_map->AppendDescriptor(&field); } { // ECMA-262, section 15.10.7.4. - FieldDescriptor field(heap->multiline_string(), + FieldDescriptor field(factory->multiline_string(), JSRegExp::kMultilineFieldIndex, final, Representation::Tagged()); - initial_map->AppendDescriptor(&field, witness); + initial_map->AppendDescriptor(&field); } { // ECMA-262, section 15.10.7.5. PropertyAttributes writable = static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE); - FieldDescriptor field(heap->last_index_string(), + FieldDescriptor field(factory->last_index_string(), JSRegExp::kLastIndexFieldIndex, writable, Representation::Tagged()); - initial_map->AppendDescriptor(&field, witness); + initial_map->AppendDescriptor(&field); } initial_map->set_inobject_properties(5); @@ -1050,7 +1074,7 @@ initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map)); // RegExp prototype object is itself a RegExp. - Handle<Map> proto_map = factory->CopyMap(initial_map); + Handle<Map> proto_map = Map::Copy(initial_map); proto_map->set_prototype(native_context()->initial_object_prototype()); Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map); proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, @@ -1064,6 +1088,7 @@ proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, Smi::FromInt(0), SKIP_WRITE_BARRIER); // It's a Smi. + proto_map->set_is_prototype_map(true); initial_map->set_prototype(*proto); factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto), JSRegExp::IRREGEXP, factory->empty_string(), @@ -1072,35 +1097,37 @@ { // -- J S O N Handle<String> name = factory->InternalizeUtf8String("JSON"); - Handle<JSFunction> cons = factory->NewFunction(name, - factory->the_hole_value()); + Handle<JSFunction> cons = factory->NewFunction(name); JSFunction::SetInstancePrototype(cons, Handle<Object>(native_context()->initial_object_prototype(), isolate)); cons->SetInstanceClassName(*name); Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED); - ASSERT(json_object->IsJSObject()); - CHECK_NOT_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - global, name, json_object, DONT_ENUM)); + DCHECK(json_object->IsJSObject()); + JSObject::AddProperty(global, name, json_object, DONT_ENUM); native_context()->set_json_object(*json_object); } - { // -- A r r a y B u f f e r + { // -- A r r a y B u f f e r Handle<JSFunction> array_buffer_fun = InstallFunction( global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE, JSArrayBuffer::kSizeWithInternalFields, isolate->initial_object_prototype(), - Builtins::kIllegal, true, true); + Builtins::kIllegal); native_context()->set_array_buffer_fun(*array_buffer_fun); } - { // -- T y p e d A r r a y s + { // -- T y p e d A r r a y s #define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \ { \ - Handle<JSFunction> fun = InstallTypedArray(#Type "Array", \ - TYPE##_ELEMENTS); \ + Handle<JSFunction> fun; \ + Handle<Map> external_map; \ + InstallTypedArray(#Type "Array", \ + TYPE##_ELEMENTS, \ + &fun, \ + &external_map); \ native_context()->set_##type##_array_fun(*fun); \ + native_context()->set_##type##_array_external_map(*external_map); \ } TYPED_ARRAYS(INSTALL_TYPED_ARRAY) #undef INSTALL_TYPED_ARRAY @@ -1110,103 +1137,102 @@ global, "DataView", JS_DATA_VIEW_TYPE, JSDataView::kSizeWithInternalFields, isolate->initial_object_prototype(), - Builtins::kIllegal, true, true); + Builtins::kIllegal); native_context()->set_data_view_fun(*data_view_fun); } - { // -- W e a k M a p - InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize, - isolate->initial_object_prototype(), - Builtins::kIllegal, true, true); - } + // -- M a p + InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize, + isolate->initial_object_prototype(), Builtins::kIllegal); + + // -- S e t + InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize, + isolate->initial_object_prototype(), Builtins::kIllegal); - { // -- W e a k S e t - InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize, - isolate->initial_object_prototype(), - Builtins::kIllegal, true, true); - } + { // Set up the iterator result object + STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2); + Handle<JSFunction> object_function(native_context()->object_function()); + DCHECK(object_function->initial_map()->inobject_properties() == 0); + Handle<Map> iterator_result_map = + Map::Create(object_function, JSGeneratorObject::kResultPropertyCount); + DCHECK(iterator_result_map->inobject_properties() == + JSGeneratorObject::kResultPropertyCount); + Map::EnsureDescriptorSlack(iterator_result_map, + JSGeneratorObject::kResultPropertyCount); - { // --- arguments_boilerplate_ + FieldDescriptor value_descr(factory->value_string(), + JSGeneratorObject::kResultValuePropertyIndex, + NONE, Representation::Tagged()); + iterator_result_map->AppendDescriptor(&value_descr); + + FieldDescriptor done_descr(factory->done_string(), + JSGeneratorObject::kResultDonePropertyIndex, + NONE, Representation::Tagged()); + iterator_result_map->AppendDescriptor(&done_descr); + + iterator_result_map->set_unused_property_fields(0); + DCHECK_EQ(JSGeneratorObject::kResultSize, + iterator_result_map->instance_size()); + native_context()->set_iterator_result_map(*iterator_result_map); + } + + // -- W e a k M a p + InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize, + isolate->initial_object_prototype(), Builtins::kIllegal); + // -- W e a k S e t + InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize, + isolate->initial_object_prototype(), Builtins::kIllegal); + + { // --- sloppy arguments map // Make sure we can recognize argument objects at runtime. // This is done by introducing an anonymous function with // class_name equals 'Arguments'. Handle<String> arguments_string = factory->InternalizeOneByteString( STATIC_ASCII_VECTOR("Arguments")); - Handle<Code> code = Handle<Code>( - isolate->builtins()->builtin(Builtins::kIllegal)); - Handle<JSObject> prototype = - Handle<JSObject>( - JSObject::cast(native_context()->object_function()->prototype())); - - Handle<JSFunction> function = - factory->NewFunctionWithPrototype(arguments_string, - JS_OBJECT_TYPE, - JSObject::kHeaderSize, - prototype, - code, - false); - ASSERT(!function->has_initial_map()); + Handle<Code> code(isolate->builtins()->builtin(Builtins::kIllegal)); + Handle<JSFunction> function = factory->NewFunctionWithoutPrototype( + arguments_string, code); function->shared()->set_instance_class_name(*arguments_string); - function->shared()->set_expected_nof_properties(2); - Handle<JSObject> result = factory->NewJSObject(function); - native_context()->set_sloppy_arguments_boilerplate(*result); - // Note: length must be added as the first property and - // callee must be added as the second property. - CHECK_NOT_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - result, factory->length_string(), - factory->undefined_value(), DONT_ENUM, - Object::FORCE_TAGGED, FORCE_FIELD)); - CHECK_NOT_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - result, factory->callee_string(), - factory->undefined_value(), DONT_ENUM, - Object::FORCE_TAGGED, FORCE_FIELD)); + Handle<Map> map = + factory->NewMap(JS_OBJECT_TYPE, Heap::kSloppyArgumentsObjectSize); + // Create the descriptor array for the arguments object. + Map::EnsureDescriptorSlack(map, 2); -#ifdef DEBUG - LookupResult lookup(isolate); - result->LocalLookup(heap->callee_string(), &lookup); - ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex); - - result->LocalLookup(heap->length_string(), &lookup); - ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex); - - ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex); - ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex); - - // Check the state of the object. - ASSERT(result->HasFastProperties()); - ASSERT(result->HasFastObjectElements()); -#endif + { // length + FieldDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex, + DONT_ENUM, Representation::Tagged()); + map->AppendDescriptor(&d); + } + { // callee + FieldDescriptor d(factory->callee_string(), Heap::kArgumentsCalleeIndex, + DONT_ENUM, Representation::Tagged()); + map->AppendDescriptor(&d); + } + + map->set_function_with_prototype(true); + map->set_pre_allocated_property_fields(2); + map->set_inobject_properties(2); + native_context()->set_sloppy_arguments_map(*map); + + DCHECK(!function->has_initial_map()); + JSFunction::SetInitialMap(function, map, + isolate->initial_object_prototype()); + + DCHECK(map->inobject_properties() > Heap::kArgumentsCalleeIndex); + DCHECK(map->inobject_properties() > Heap::kArgumentsLengthIndex); + DCHECK(!map->is_dictionary_map()); + DCHECK(IsFastObjectElementsKind(map->elements_kind())); } - { // --- aliased_arguments_boilerplate_ - // Set up a well-formed parameter map to make assertions happy. - Handle<FixedArray> elements = factory->NewFixedArray(2); - elements->set_map(heap->sloppy_arguments_elements_map()); - Handle<FixedArray> array; - array = factory->NewFixedArray(0); - elements->set(0, *array); - array = factory->NewFixedArray(0); - elements->set(1, *array); - - Handle<Map> old_map( - native_context()->sloppy_arguments_boilerplate()->map()); - Handle<Map> new_map = factory->CopyMap(old_map); - new_map->set_pre_allocated_property_fields(2); - Handle<JSObject> result = factory->NewJSObjectFromMap(new_map); - // Set elements kind after allocating the object because - // NewJSObjectFromMap assumes a fast elements map. - new_map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS); - result->set_elements(*elements); - ASSERT(result->HasSloppyArgumentsElements()); - native_context()->set_aliased_arguments_boilerplate(*result); + { // --- aliased arguments map + Handle<Map> map = Map::Copy(isolate->sloppy_arguments_map()); + map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS); + DCHECK_EQ(2, map->pre_allocated_property_fields()); + native_context()->set_aliased_arguments_map(*map); } - { // --- strict mode arguments boilerplate + { // --- strict mode arguments map const PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY); @@ -1214,39 +1240,32 @@ Handle<AccessorPair> callee = factory->NewAccessorPair(); Handle<AccessorPair> caller = factory->NewAccessorPair(); - Handle<JSFunction> throw_function = - GetThrowTypeErrorFunction(); + Handle<JSFunction> poison = GetStrictPoisonFunction(); // Install the ThrowTypeError functions. - callee->set_getter(*throw_function); - callee->set_setter(*throw_function); - caller->set_getter(*throw_function); - caller->set_setter(*throw_function); + callee->set_getter(*poison); + callee->set_setter(*poison); + caller->set_getter(*poison); + caller->set_setter(*poison); // Create the map. Allocate one in-object field for length. Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, Heap::kStrictArgumentsObjectSize); // Create the descriptor array for the arguments object. - Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 3); - DescriptorArray::WhitenessWitness witness(*descriptors); - map->set_instance_descriptors(*descriptors); + Map::EnsureDescriptorSlack(map, 3); { // length - FieldDescriptor d( - *factory->length_string(), 0, DONT_ENUM, Representation::Tagged()); - map->AppendDescriptor(&d, witness); + FieldDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex, + DONT_ENUM, Representation::Tagged()); + map->AppendDescriptor(&d); } { // callee - CallbacksDescriptor d(*factory->callee_string(), - *callee, - attributes); - map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(factory->callee_string(), callee, attributes); + map->AppendDescriptor(&d); } { // caller - CallbacksDescriptor d(*factory->caller_string(), - *caller, - attributes); - map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(factory->caller_string(), caller, attributes); + map->AppendDescriptor(&d); } map->set_function_with_prototype(true); @@ -1256,42 +1275,22 @@ // Copy constructor from the sloppy arguments boilerplate. map->set_constructor( - native_context()->sloppy_arguments_boilerplate()->map()->constructor()); + native_context()->sloppy_arguments_map()->constructor()); - // Allocate the arguments boilerplate object. - Handle<JSObject> result = factory->NewJSObjectFromMap(map); - native_context()->set_strict_arguments_boilerplate(*result); - - // Add length property only for strict mode boilerplate. - CHECK_NOT_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - result, factory->length_string(), - factory->undefined_value(), DONT_ENUM)); + native_context()->set_strict_arguments_map(*map); -#ifdef DEBUG - LookupResult lookup(isolate); - result->LocalLookup(heap->length_string(), &lookup); - ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex); - - ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex); - - // Check the state of the object. - ASSERT(result->HasFastProperties()); - ASSERT(result->HasFastObjectElements()); -#endif + DCHECK(map->inobject_properties() > Heap::kArgumentsLengthIndex); + DCHECK(!map->is_dictionary_map()); + DCHECK(IsFastObjectElementsKind(map->elements_kind())); } { // --- context extension // Create a function for the context extension objects. Handle<Code> code = Handle<Code>( isolate->builtins()->builtin(Builtins::kIllegal)); - Handle<JSFunction> context_extension_fun = - factory->NewFunction(factory->empty_string(), - JS_CONTEXT_EXTENSION_OBJECT_TYPE, - JSObject::kHeaderSize, - code, - true); + Handle<JSFunction> context_extension_fun = factory->NewFunction( + factory->empty_string(), code, JS_CONTEXT_EXTENSION_OBJECT_TYPE, + JSObject::kHeaderSize); Handle<String> name = factory->InternalizeOneByteString( STATIC_ASCII_VECTOR("context_extension")); @@ -1305,9 +1304,8 @@ Handle<Code> code = Handle<Code>(isolate->builtins()->builtin( Builtins::kHandleApiCallAsFunction)); - Handle<JSFunction> delegate = - factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE, - JSObject::kHeaderSize, code, true); + Handle<JSFunction> delegate = factory->NewFunction( + factory->empty_string(), code, JS_OBJECT_TYPE, JSObject::kHeaderSize); native_context()->set_call_as_function_delegate(*delegate); delegate->shared()->DontAdaptArguments(); } @@ -1317,9 +1315,8 @@ Handle<Code> code = Handle<Code>(isolate->builtins()->builtin( Builtins::kHandleApiCallAsConstructor)); - Handle<JSFunction> delegate = - factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE, - JSObject::kHeaderSize, code, true); + Handle<JSFunction> delegate = factory->NewFunction( + factory->empty_string(), code, JS_OBJECT_TYPE, JSObject::kHeaderSize); native_context()->set_call_as_constructor_delegate(*delegate); delegate->shared()->DontAdaptArguments(); } @@ -1330,122 +1327,93 @@ } -Handle<JSFunction> Genesis::InstallTypedArray( - const char* name, ElementsKind elementsKind) { +void Genesis::InstallTypedArray( + const char* name, + ElementsKind elements_kind, + Handle<JSFunction>* fun, + Handle<Map>* external_map) { Handle<JSObject> global = Handle<JSObject>(native_context()->global_object()); - Handle<JSFunction> result = InstallFunction(global, name, JS_TYPED_ARRAY_TYPE, - JSTypedArray::kSize, isolate()->initial_object_prototype(), - Builtins::kIllegal, false, true); + Handle<JSFunction> result = InstallFunction( + global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, + isolate()->initial_object_prototype(), Builtins::kIllegal); Handle<Map> initial_map = isolate()->factory()->NewMap( - JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithInternalFields, elementsKind); - result->set_initial_map(*initial_map); - initial_map->set_constructor(*result); - return result; + JS_TYPED_ARRAY_TYPE, + JSTypedArray::kSizeWithInternalFields, + elements_kind); + JSFunction::SetInitialMap(result, initial_map, + handle(initial_map->prototype(), isolate())); + *fun = result; + + ElementsKind external_kind = GetNextTransitionElementsKind(elements_kind); + *external_map = Map::AsElementsKind(initial_map, external_kind); } void Genesis::InitializeExperimentalGlobal() { - Handle<JSObject> global = Handle<JSObject>(native_context()->global_object()); - // TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no // longer need to live behind flags, so functions get added to the snapshot. - if (FLAG_harmony_symbols) { - // --- S y m b o l --- - Handle<JSFunction> symbol_fun = - InstallFunction(global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, - isolate()->initial_object_prototype(), - Builtins::kIllegal, true, true); - native_context()->set_symbol_function(*symbol_fun); - } - - if (FLAG_harmony_collections) { - { // -- M a p - InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize, - isolate()->initial_object_prototype(), - Builtins::kIllegal, true, true); - } - { // -- S e t - InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize, - isolate()->initial_object_prototype(), - Builtins::kIllegal, true, true); - } - } - if (FLAG_harmony_generators) { // Create generator meta-objects and install them on the builtins object. Handle<JSObject> builtins(native_context()->builtins()); Handle<JSObject> generator_object_prototype = factory()->NewJSObject(isolate()->object_function(), TENURED); - Handle<JSFunction> generator_function_prototype = - InstallFunction(builtins, "GeneratorFunctionPrototype", - JS_FUNCTION_TYPE, JSFunction::kHeaderSize, - generator_object_prototype, Builtins::kIllegal, - false, false); + Handle<JSFunction> generator_function_prototype = InstallFunction( + builtins, "GeneratorFunctionPrototype", JS_FUNCTION_TYPE, + JSFunction::kHeaderSize, generator_object_prototype, + Builtins::kIllegal); InstallFunction(builtins, "GeneratorFunction", JS_FUNCTION_TYPE, JSFunction::kSize, - generator_function_prototype, Builtins::kIllegal, - false, false); + generator_function_prototype, Builtins::kIllegal); // Create maps for generator functions and their prototypes. Store those // maps in the native context. - Handle<Map> function_map(native_context()->sloppy_function_map()); - Handle<Map> generator_function_map = factory()->CopyMap(function_map); + Handle<Map> sloppy_function_map(native_context()->sloppy_function_map()); + Handle<Map> generator_function_map = Map::Copy(sloppy_function_map); generator_function_map->set_prototype(*generator_function_prototype); native_context()->set_sloppy_generator_function_map( *generator_function_map); - Handle<Map> strict_mode_function_map( - native_context()->strict_function_map()); - Handle<Map> strict_mode_generator_function_map = factory()->CopyMap( - strict_mode_function_map); - strict_mode_generator_function_map->set_prototype( - *generator_function_prototype); + // The "arguments" and "caller" instance properties aren't specified, so + // technically we could leave them out. They make even less sense for + // generators than for functions. Still, the same argument that it makes + // sense to keep them around but poisoned in strict mode applies to + // generators as well. With poisoned accessors, naive callers can still + // iterate over the properties without accessing them. + // + // We can't use PoisonArgumentsAndCaller because that mutates accessor pairs + // in place, and the initial state of the generator function map shares the + // accessor pair with sloppy functions. Also the error message should be + // different. Also unhappily, we can't use the API accessors to implement + // poisoning, because API accessors present themselves as data properties, + // not accessor properties, and so getOwnPropertyDescriptor raises an + // exception as it tries to get the values. Sadness. + Handle<AccessorPair> poison_pair(factory()->NewAccessorPair()); + PropertyAttributes rw_attribs = + static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE); + Handle<JSFunction> poison_function = GetGeneratorPoisonFunction(); + poison_pair->set_getter(*poison_function); + poison_pair->set_setter(*poison_function); + ReplaceAccessors(generator_function_map, factory()->arguments_string(), + rw_attribs, poison_pair); + ReplaceAccessors(generator_function_map, factory()->caller_string(), + rw_attribs, poison_pair); + + Handle<Map> strict_function_map(native_context()->strict_function_map()); + Handle<Map> strict_generator_function_map = Map::Copy(strict_function_map); + // "arguments" and "caller" already poisoned. + strict_generator_function_map->set_prototype(*generator_function_prototype); native_context()->set_strict_generator_function_map( - *strict_mode_generator_function_map); + *strict_generator_function_map); - Handle<Map> object_map(native_context()->object_function()->initial_map()); - Handle<Map> generator_object_prototype_map = factory()->CopyMap( - object_map, 0); + Handle<JSFunction> object_function(native_context()->object_function()); + Handle<Map> generator_object_prototype_map = Map::Create( + object_function, 0); generator_object_prototype_map->set_prototype( *generator_object_prototype); native_context()->set_generator_object_prototype_map( *generator_object_prototype_map); - - // Create a map for generator result objects. - ASSERT(object_map->inobject_properties() == 0); - STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2); - Handle<Map> generator_result_map = factory()->CopyMap(object_map, - JSGeneratorObject::kResultPropertyCount); - ASSERT(generator_result_map->inobject_properties() == - JSGeneratorObject::kResultPropertyCount); - - Handle<DescriptorArray> descriptors = factory()->NewDescriptorArray(0, - JSGeneratorObject::kResultPropertyCount); - DescriptorArray::WhitenessWitness witness(*descriptors); - generator_result_map->set_instance_descriptors(*descriptors); - - Handle<String> value_string = factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("value")); - FieldDescriptor value_descr(*value_string, - JSGeneratorObject::kResultValuePropertyIndex, - NONE, - Representation::Tagged()); - generator_result_map->AppendDescriptor(&value_descr, witness); - - Handle<String> done_string = factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("done")); - FieldDescriptor done_descr(*done_string, - JSGeneratorObject::kResultDonePropertyIndex, - NONE, - Representation::Tagged()); - generator_result_map->AppendDescriptor(&done_descr, witness); - - generator_result_map->set_unused_property_fields(0); - ASSERT_EQ(JSGeneratorObject::kResultSize, - generator_result_map->instance_size()); - native_context()->set_generator_result_map(*generator_result_map); } } @@ -1461,10 +1429,12 @@ bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) { Vector<const char> name = ExperimentalNatives::GetScriptName(index); Factory* factory = isolate->factory(); - Handle<String> source_code = + Handle<String> source_code; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, source_code, factory->NewStringFromAscii( - ExperimentalNatives::GetRawScriptSource(index)); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, source_code, false); + ExperimentalNatives::GetRawScriptSource(index)), + false); return CompileNative(isolate, name, source_code); } @@ -1473,9 +1443,7 @@ Vector<const char> name, Handle<String> source) { HandleScope scope(isolate); -#ifdef ENABLE_DEBUGGER_SUPPORT - isolate->debugger()->set_compiling_natives(true); -#endif + SuppressDebug compiling_natives(isolate->debug()); // During genesis, the boilerplate for stack overflow won't work until the // environment has been at least partially initialized. Add a stack check // before entering JS code to catch overflow early. @@ -1489,11 +1457,8 @@ NULL, Handle<Context>(isolate->context()), true); - ASSERT(isolate->has_pending_exception() != result); + DCHECK(isolate->has_pending_exception() != result); if (!result) isolate->clear_pending_exception(); -#ifdef ENABLE_DEBUGGER_SUPPORT - isolate->debugger()->set_compiling_natives(false); -#endif return result; } @@ -1512,19 +1477,12 @@ // If we can't find the function in the cache, we compile a new // function and insert it into the cache. if (cache == NULL || !cache->Lookup(name, &function_info)) { - ASSERT(source->IsOneByteRepresentation()); - Handle<String> script_name = factory->NewStringFromUtf8(name); - ASSERT(!script_name.is_null()); + DCHECK(source->IsOneByteRepresentation()); + Handle<String> script_name = + factory->NewStringFromUtf8(name).ToHandleChecked(); function_info = Compiler::CompileScript( - source, - script_name, - 0, - 0, - false, - top_context, - extension, - NULL, - NO_CACHED_DATA, + source, script_name, 0, 0, false, top_context, extension, NULL, + ScriptCompiler::kNoCompileOptions, use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE); if (function_info.is_null()) return false; if (cache != NULL) cache->Add(name, function_info); @@ -1533,7 +1491,7 @@ // Set up the function context. Conceptually, we should clone the // function before overwriting the context but since we're in a // single-threaded environment it is not strictly necessary. - ASSERT(top_context->IsNativeContext()); + DCHECK(top_context->IsNativeContext()); Handle<Context> context = Handle<Context>(use_runtime_context ? Handle<Context>(top_context->runtime_context()) @@ -1548,21 +1506,56 @@ ? top_context->builtins() : top_context->global_object(), isolate); - bool has_pending_exception; - Execution::Call(isolate, fun, receiver, 0, NULL, &has_pending_exception); - if (has_pending_exception) return false; - return true; + return !Execution::Call( + isolate, fun, receiver, 0, NULL).is_null(); } -#define INSTALL_NATIVE(Type, name, var) \ - Handle<String> var##_name = \ - factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name)); \ - Object* var##_native = \ - native_context()->builtins()->GetPropertyNoExceptionThrown( \ - *var##_name); \ - native_context()->set_##var(Type::cast(var##_native)); +static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context, + const char* holder_expr) { + Isolate* isolate = native_context->GetIsolate(); + Factory* factory = isolate->factory(); + Handle<GlobalObject> global(native_context->global_object()); + const char* period_pos = strchr(holder_expr, '.'); + if (period_pos == NULL) { + return Handle<JSObject>::cast( + Object::GetPropertyOrElement( + global, factory->InternalizeUtf8String(holder_expr)) + .ToHandleChecked()); + } + const char* inner = period_pos + 1; + DCHECK_EQ(NULL, strchr(inner, '.')); + Vector<const char> property(holder_expr, + static_cast<int>(period_pos - holder_expr)); + Handle<String> property_string = factory->InternalizeUtf8String(property); + DCHECK(!property_string.is_null()); + Handle<JSObject> object = Handle<JSObject>::cast( + Object::GetProperty(global, property_string).ToHandleChecked()); + if (strcmp("prototype", inner) == 0) { + Handle<JSFunction> function = Handle<JSFunction>::cast(object); + return Handle<JSObject>(JSObject::cast(function->prototype())); + } + Handle<String> inner_string = factory->InternalizeUtf8String(inner); + DCHECK(!inner_string.is_null()); + Handle<Object> value = + Object::GetProperty(object, inner_string).ToHandleChecked(); + return Handle<JSObject>::cast(value); +} + +#define INSTALL_NATIVE(Type, name, var) \ + Handle<String> var##_name = \ + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name)); \ + Handle<Object> var##_native = Object::GetProperty( \ + handle(native_context()->builtins()), var##_name).ToHandleChecked(); \ + native_context()->set_##var(Type::cast(*var##_native)); + +#define INSTALL_NATIVE_MATH(name) \ + { \ + Handle<Object> fun = \ + ResolveBuiltinIdHolder(native_context(), "Math." #name); \ + native_context()->set_math_##name##_fun(JSFunction::cast(*fun)); \ + } void Genesis::InstallNativeFunctions() { HandleScope scope(isolate()); @@ -1591,6 +1584,7 @@ INSTALL_NATIVE(JSFunction, "PromiseReject", promise_reject); INSTALL_NATIVE(JSFunction, "PromiseChain", promise_chain); INSTALL_NATIVE(JSFunction, "PromiseCatch", promise_catch); + INSTALL_NATIVE(JSFunction, "PromiseThen", promise_then); INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change); INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice); @@ -1598,14 +1592,39 @@ observers_begin_perform_splice); INSTALL_NATIVE(JSFunction, "EndPerformSplice", observers_end_perform_splice); + INSTALL_NATIVE(JSFunction, "NativeObjectObserve", + native_object_observe); + INSTALL_NATIVE(JSFunction, "NativeObjectGetNotifier", + native_object_get_notifier); + INSTALL_NATIVE(JSFunction, "NativeObjectNotifierPerformChange", + native_object_notifier_perform_change); + + INSTALL_NATIVE(Symbol, "symbolIterator", iterator_symbol); + INSTALL_NATIVE(Symbol, "symbolUnscopables", unscopables_symbol); + + INSTALL_NATIVE_MATH(abs) + INSTALL_NATIVE_MATH(acos) + INSTALL_NATIVE_MATH(asin) + INSTALL_NATIVE_MATH(atan) + INSTALL_NATIVE_MATH(atan2) + INSTALL_NATIVE_MATH(ceil) + INSTALL_NATIVE_MATH(cos) + INSTALL_NATIVE_MATH(exp) + INSTALL_NATIVE_MATH(floor) + INSTALL_NATIVE_MATH(imul) + INSTALL_NATIVE_MATH(log) + INSTALL_NATIVE_MATH(max) + INSTALL_NATIVE_MATH(min) + INSTALL_NATIVE_MATH(pow) + INSTALL_NATIVE_MATH(random) + INSTALL_NATIVE_MATH(round) + INSTALL_NATIVE_MATH(sin) + INSTALL_NATIVE_MATH(sqrt) + INSTALL_NATIVE_MATH(tan) } void Genesis::InstallExperimentalNativeFunctions() { - INSTALL_NATIVE(JSFunction, "RunMicrotasks", run_microtasks); - INSTALL_NATIVE(JSFunction, "EnqueueExternalMicrotask", - enqueue_external_microtask); - if (FLAG_harmony_proxies) { INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap); INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap); @@ -1627,43 +1646,34 @@ // doesn't inherit from Object.prototype. // To be used only for internal work by builtins. Instances // must not be leaked to user code. - Handle<JSFunction> array_function = - InstallFunction(builtins, - name, - JS_ARRAY_TYPE, - JSArray::kSize, - isolate()->initial_object_prototype(), - Builtins::kInternalArrayCode, - true, true); Handle<JSObject> prototype = factory()->NewJSObject(isolate()->object_function(), TENURED); - Accessors::FunctionSetPrototype(array_function, prototype); + Handle<JSFunction> array_function = InstallFunction( + builtins, name, JS_ARRAY_TYPE, JSArray::kSize, + prototype, Builtins::kInternalArrayCode); InternalArrayConstructorStub internal_array_constructor_stub(isolate()); - Handle<Code> code = internal_array_constructor_stub.GetCode(isolate()); + Handle<Code> code = internal_array_constructor_stub.GetCode(); array_function->shared()->set_construct_stub(*code); array_function->shared()->DontAdaptArguments(); Handle<Map> original_map(array_function->initial_map()); - Handle<Map> initial_map = factory()->CopyMap(original_map); + Handle<Map> initial_map = Map::Copy(original_map); initial_map->set_elements_kind(elements_kind); - array_function->set_initial_map(*initial_map); + JSFunction::SetInitialMap(array_function, initial_map, prototype); // Make "length" magic on instances. - Handle<DescriptorArray> array_descriptors( - factory()->NewDescriptorArray(0, 1)); - DescriptorArray::WhitenessWitness witness(*array_descriptors); + Map::EnsureDescriptorSlack(initial_map, 1); - Handle<Foreign> array_length(factory()->NewForeign( - &Accessors::ArrayLength)); PropertyAttributes attribs = static_cast<PropertyAttributes>( DONT_ENUM | DONT_DELETE); - initial_map->set_instance_descriptors(*array_descriptors); + Handle<AccessorInfo> array_length = + Accessors::ArrayLengthInfo(isolate(), attribs); { // Add length. CallbacksDescriptor d( - *factory()->length_string(), *array_length, attribs); - array_function->initial_map()->AppendDescriptor(&d, witness); + Handle<Name>(Name::cast(array_length->name())), array_length, attribs); + array_function->initial_map()->AppendDescriptor(&d); } return array_function; @@ -1678,10 +1688,9 @@ // (itself) and a reference to the native_context directly in the object. Handle<Code> code = Handle<Code>( isolate()->builtins()->builtin(Builtins::kIllegal)); - Handle<JSFunction> builtins_fun = - factory()->NewFunction(factory()->empty_string(), - JS_BUILTINS_OBJECT_TYPE, - JSBuiltinsObject::kSize, code, true); + Handle<JSFunction> builtins_fun = factory()->NewFunction( + factory()->empty_string(), code, JS_BUILTINS_OBJECT_TYPE, + JSBuiltinsObject::kSize); Handle<String> name = factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins")); @@ -1695,8 +1704,7 @@ builtins->set_builtins(*builtins); builtins->set_native_context(*native_context()); builtins->set_global_context(*native_context()); - builtins->set_global_receiver(*builtins); - builtins->set_global_receiver(native_context()->global_proxy()); + builtins->set_global_proxy(native_context()->global_proxy()); // Set up the 'global' properties of the builtins object. The @@ -1708,24 +1716,18 @@ Handle<String> global_string = factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("global")); Handle<Object> global_obj(native_context()->global_object(), isolate()); - CHECK_NOT_EMPTY_HANDLE(isolate(), - JSObject::SetLocalPropertyIgnoreAttributes( - builtins, global_string, global_obj, attributes)); + JSObject::AddProperty(builtins, global_string, global_obj, attributes); Handle<String> builtins_string = factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins")); - CHECK_NOT_EMPTY_HANDLE(isolate(), - JSObject::SetLocalPropertyIgnoreAttributes( - builtins, builtins_string, builtins, attributes)); + JSObject::AddProperty(builtins, builtins_string, builtins, attributes); // Set up the reference from the global object to the builtins object. JSGlobalObject::cast(native_context()->global_object())-> set_builtins(*builtins); // Create a bridge function that has context in the native context. - Handle<JSFunction> bridge = - factory()->NewFunction(factory()->empty_string(), - factory()->undefined_value()); - ASSERT(bridge->context() == *isolate()->native_context()); + Handle<JSFunction> bridge = factory()->NewFunction(factory()->empty_string()); + DCHECK(bridge->context() == *isolate()->native_context()); // Allocate the builtins context. Handle<Context> context = @@ -1736,142 +1738,137 @@ { // -- S c r i p t // Builtin functions for Script. - Handle<JSFunction> script_fun = - InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize, - isolate()->initial_object_prototype(), - Builtins::kIllegal, false, false); + Handle<JSFunction> script_fun = InstallFunction( + builtins, "Script", JS_VALUE_TYPE, JSValue::kSize, + isolate()->initial_object_prototype(), Builtins::kIllegal); Handle<JSObject> prototype = factory()->NewJSObject(isolate()->object_function(), TENURED); Accessors::FunctionSetPrototype(script_fun, prototype); native_context()->set_script_function(*script_fun); Handle<Map> script_map = Handle<Map>(script_fun->initial_map()); + Map::EnsureDescriptorSlack(script_map, 14); - Handle<DescriptorArray> script_descriptors( - factory()->NewDescriptorArray(0, 13)); - DescriptorArray::WhitenessWitness witness(*script_descriptors); - - Handle<Foreign> script_source( - factory()->NewForeign(&Accessors::ScriptSource)); - Handle<Foreign> script_name(factory()->NewForeign(&Accessors::ScriptName)); - Handle<String> id_string(factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("id"))); - Handle<Foreign> script_id(factory()->NewForeign(&Accessors::ScriptId)); - Handle<String> line_offset_string( - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("line_offset"))); - Handle<Foreign> script_line_offset( - factory()->NewForeign(&Accessors::ScriptLineOffset)); - Handle<String> column_offset_string( - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("column_offset"))); - Handle<Foreign> script_column_offset( - factory()->NewForeign(&Accessors::ScriptColumnOffset)); - Handle<String> type_string(factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("type"))); - Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType)); - Handle<String> compilation_type_string( - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("compilation_type"))); - Handle<Foreign> script_compilation_type( - factory()->NewForeign(&Accessors::ScriptCompilationType)); - Handle<String> line_ends_string(factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("line_ends"))); - Handle<Foreign> script_line_ends( - factory()->NewForeign(&Accessors::ScriptLineEnds)); - Handle<String> context_data_string( - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("context_data"))); - Handle<Foreign> script_context_data( - factory()->NewForeign(&Accessors::ScriptContextData)); - Handle<String> eval_from_script_string( - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("eval_from_script"))); - Handle<Foreign> script_eval_from_script( - factory()->NewForeign(&Accessors::ScriptEvalFromScript)); - Handle<String> eval_from_script_position_string( - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("eval_from_script_position"))); - Handle<Foreign> script_eval_from_script_position( - factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition)); - Handle<String> eval_from_function_name_string( - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("eval_from_function_name"))); - Handle<Foreign> script_eval_from_function_name( - factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName)); PropertyAttributes attribs = static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY); - script_map->set_instance_descriptors(*script_descriptors); + Handle<AccessorInfo> script_column = + Accessors::ScriptColumnOffsetInfo(isolate(), attribs); { - CallbacksDescriptor d( - *factory()->source_string(), *script_source, attribs); - script_map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(script_column->name())), + script_column, attribs); + script_map->AppendDescriptor(&d); } + Handle<AccessorInfo> script_id = + Accessors::ScriptIdInfo(isolate(), attribs); { - CallbacksDescriptor d(*factory()->name_string(), *script_name, attribs); - script_map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(script_id->name())), + script_id, attribs); + script_map->AppendDescriptor(&d); } + + Handle<AccessorInfo> script_name = + Accessors::ScriptNameInfo(isolate(), attribs); { - CallbacksDescriptor d(*id_string, *script_id, attribs); - script_map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(script_name->name())), + script_name, attribs); + script_map->AppendDescriptor(&d); } + Handle<AccessorInfo> script_line = + Accessors::ScriptLineOffsetInfo(isolate(), attribs); { - CallbacksDescriptor d(*line_offset_string, *script_line_offset, attribs); - script_map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(script_line->name())), + script_line, attribs); + script_map->AppendDescriptor(&d); } + Handle<AccessorInfo> script_source = + Accessors::ScriptSourceInfo(isolate(), attribs); { - CallbacksDescriptor d( - *column_offset_string, *script_column_offset, attribs); - script_map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(script_source->name())), + script_source, attribs); + script_map->AppendDescriptor(&d); } + Handle<AccessorInfo> script_type = + Accessors::ScriptTypeInfo(isolate(), attribs); { - CallbacksDescriptor d(*type_string, *script_type, attribs); - script_map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(script_type->name())), + script_type, attribs); + script_map->AppendDescriptor(&d); } + Handle<AccessorInfo> script_compilation_type = + Accessors::ScriptCompilationTypeInfo(isolate(), attribs); { CallbacksDescriptor d( - *compilation_type_string, *script_compilation_type, attribs); - script_map->AppendDescriptor(&d, witness); + Handle<Name>(Name::cast(script_compilation_type->name())), + script_compilation_type, attribs); + script_map->AppendDescriptor(&d); } + Handle<AccessorInfo> script_line_ends = + Accessors::ScriptLineEndsInfo(isolate(), attribs); { - CallbacksDescriptor d(*line_ends_string, *script_line_ends, attribs); - script_map->AppendDescriptor(&d, witness); + CallbacksDescriptor d(Handle<Name>(Name::cast(script_line_ends->name())), + script_line_ends, attribs); + script_map->AppendDescriptor(&d); } + Handle<AccessorInfo> script_context_data = + Accessors::ScriptContextDataInfo(isolate(), attribs); { CallbacksDescriptor d( - *context_data_string, *script_context_data, attribs); - script_map->AppendDescriptor(&d, witness); + Handle<Name>(Name::cast(script_context_data->name())), + script_context_data, attribs); + script_map->AppendDescriptor(&d); } + Handle<AccessorInfo> script_eval_from_script = + Accessors::ScriptEvalFromScriptInfo(isolate(), attribs); { CallbacksDescriptor d( - *eval_from_script_string, *script_eval_from_script, attribs); - script_map->AppendDescriptor(&d, witness); + Handle<Name>(Name::cast(script_eval_from_script->name())), + script_eval_from_script, attribs); + script_map->AppendDescriptor(&d); } + Handle<AccessorInfo> script_eval_from_script_position = + Accessors::ScriptEvalFromScriptPositionInfo(isolate(), attribs); { CallbacksDescriptor d( - *eval_from_script_position_string, - *script_eval_from_script_position, - attribs); - script_map->AppendDescriptor(&d, witness); + Handle<Name>(Name::cast(script_eval_from_script_position->name())), + script_eval_from_script_position, attribs); + script_map->AppendDescriptor(&d); } + Handle<AccessorInfo> script_eval_from_function_name = + Accessors::ScriptEvalFromFunctionNameInfo(isolate(), attribs); { CallbacksDescriptor d( - *eval_from_function_name_string, - *script_eval_from_function_name, - attribs); - script_map->AppendDescriptor(&d, witness); + Handle<Name>(Name::cast(script_eval_from_function_name->name())), + script_eval_from_function_name, attribs); + script_map->AppendDescriptor(&d); + } + + Handle<AccessorInfo> script_source_url = + Accessors::ScriptSourceUrlInfo(isolate(), attribs); + { + CallbacksDescriptor d(Handle<Name>(Name::cast(script_source_url->name())), + script_source_url, attribs); + script_map->AppendDescriptor(&d); + } + + Handle<AccessorInfo> script_source_mapping_url = + Accessors::ScriptSourceMappingUrlInfo(isolate(), attribs); + { + CallbacksDescriptor d( + Handle<Name>(Name::cast(script_source_mapping_url->name())), + script_source_mapping_url, attribs); + script_map->AppendDescriptor(&d); } // Allocate the empty script. @@ -1883,11 +1880,9 @@ // Builtin function for OpaqueReference -- a JSValue-based object, // that keeps its field isolated from JavaScript code. It may store // objects, that JavaScript code may not access. - Handle<JSFunction> opaque_reference_fun = - InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE, - JSValue::kSize, - isolate()->initial_object_prototype(), - Builtins::kIllegal, false, false); + Handle<JSFunction> opaque_reference_fun = InstallFunction( + builtins, "OpaqueReference", JS_VALUE_TYPE, JSValue::kSize, + isolate()->initial_object_prototype(), Builtins::kIllegal); Handle<JSObject> prototype = factory()->NewJSObject(isolate()->object_function(), TENURED); Accessors::FunctionSetPrototype(opaque_reference_fun, prototype); @@ -1909,6 +1904,22 @@ InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS); } + { // -- S e t I t e r a t o r + Handle<JSFunction> set_iterator_function = InstallFunction( + builtins, "SetIterator", JS_SET_ITERATOR_TYPE, JSSetIterator::kSize, + isolate()->initial_object_prototype(), Builtins::kIllegal); + native_context()->set_set_iterator_map( + set_iterator_function->initial_map()); + } + + { // -- M a p I t e r a t o r + Handle<JSFunction> map_iterator_function = InstallFunction( + builtins, "MapIterator", JS_MAP_ITERATOR_TYPE, JSMapIterator::kSize, + isolate()->initial_object_prototype(), Builtins::kIllegal); + native_context()->set_map_iterator_map( + map_iterator_function->initial_map()); + } + if (FLAG_disable_native_files) { PrintF("Warning: Running without installed natives!\n"); return true; @@ -1930,7 +1941,7 @@ // Store the map for the string prototype after the natives has been compiled // and the String function has been set up. Handle<JSFunction> string_function(native_context()->string_function()); - ASSERT(JSObject::cast( + DCHECK(JSObject::cast( string_function->initial_map()->prototype())->HasFastProperties()); native_context()->set_string_function_prototype_map( HeapObject::cast(string_function->initial_map()->prototype())->map()); @@ -1938,28 +1949,30 @@ // Install Function.prototype.call and apply. { Handle<String> key = factory()->function_class_string(); Handle<JSFunction> function = - Handle<JSFunction>::cast( - GetProperty(isolate(), isolate()->global_object(), key)); + Handle<JSFunction>::cast(Object::GetProperty( + handle(native_context()->global_object()), key).ToHandleChecked()); Handle<JSObject> proto = Handle<JSObject>(JSObject::cast(function->instance_prototype())); // Install the call and the apply functions. Handle<JSFunction> call = InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize, - Handle<JSObject>::null(), - Builtins::kFunctionCall, - false, false); + MaybeHandle<JSObject>(), Builtins::kFunctionCall); Handle<JSFunction> apply = InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize, - Handle<JSObject>::null(), - Builtins::kFunctionApply, - false, false); + MaybeHandle<JSObject>(), Builtins::kFunctionApply); + if (FLAG_vector_ics) { + // Apply embeds an IC, so we need a type vector of size 1 in the shared + // function info. + Handle<FixedArray> feedback_vector = factory()->NewTypeFeedbackVector(1); + apply->shared()->set_feedback_vector(*feedback_vector); + } // Make sure that Function.prototype.call appears to be compiled. // The code will never be called, but inline caching for call will // only work if it appears to be compiled. call->shared()->DontAdaptArguments(); - ASSERT(call->is_compiled()); + DCHECK(call->is_compiled()); // Set the expected parameters for apply to 2; required by builtin. apply->shared()->set_formal_parameter_count(2); @@ -1991,38 +2004,36 @@ initial_map->set_prototype(*array_prototype); // Update map with length accessor from Array and add "index" and "input". - Handle<DescriptorArray> reresult_descriptors = - factory()->NewDescriptorArray(0, 3); - DescriptorArray::WhitenessWitness witness(*reresult_descriptors); - initial_map->set_instance_descriptors(*reresult_descriptors); + Map::EnsureDescriptorSlack(initial_map, 3); { JSFunction* array_function = native_context()->array_function(); Handle<DescriptorArray> array_descriptors( array_function->initial_map()->instance_descriptors()); - String* length = heap()->length_string(); + Handle<String> length = factory()->length_string(); int old = array_descriptors->SearchWithCache( - length, array_function->initial_map()); - ASSERT(old != DescriptorArray::kNotFound); + *length, array_function->initial_map()); + DCHECK(old != DescriptorArray::kNotFound); CallbacksDescriptor desc(length, - array_descriptors->GetValue(old), + handle(array_descriptors->GetValue(old), + isolate()), array_descriptors->GetDetails(old).attributes()); - initial_map->AppendDescriptor(&desc, witness); + initial_map->AppendDescriptor(&desc); } { - FieldDescriptor index_field(heap()->index_string(), + FieldDescriptor index_field(factory()->index_string(), JSRegExpResult::kIndexIndex, NONE, Representation::Tagged()); - initial_map->AppendDescriptor(&index_field, witness); + initial_map->AppendDescriptor(&index_field); } { - FieldDescriptor input_field(heap()->input_string(), + FieldDescriptor input_field(factory()->input_string(), JSRegExpResult::kInputIndex, NONE, Representation::Tagged()); - initial_map->AppendDescriptor(&input_field, witness); + initial_map->AppendDescriptor(&input_field); } initial_map->set_inobject_properties(2); @@ -2033,7 +2044,7 @@ } #ifdef VERIFY_HEAP - builtins->Verify(); + builtins->ObjectVerify(); #endif return true; @@ -2052,51 +2063,24 @@ for (int i = ExperimentalNatives::GetDebuggerCount(); i < ExperimentalNatives::GetBuiltinsCount(); i++) { - INSTALL_EXPERIMENTAL_NATIVE(i, symbols, "symbol.js") INSTALL_EXPERIMENTAL_NATIVE(i, proxies, "proxy.js") - INSTALL_EXPERIMENTAL_NATIVE(i, collections, "collection.js") INSTALL_EXPERIMENTAL_NATIVE(i, generators, "generator.js") - INSTALL_EXPERIMENTAL_NATIVE(i, iteration, "array-iterator.js") INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js") INSTALL_EXPERIMENTAL_NATIVE(i, arrays, "harmony-array.js") - INSTALL_EXPERIMENTAL_NATIVE(i, maths, "harmony-math.js") } InstallExperimentalNativeFunctions(); - InstallExperimentalBuiltinFunctionIds(); return true; } -static Handle<JSObject> ResolveBuiltinIdHolder( - Handle<Context> native_context, - const char* holder_expr) { - Isolate* isolate = native_context->GetIsolate(); - Factory* factory = isolate->factory(); - Handle<GlobalObject> global(native_context->global_object()); - const char* period_pos = strchr(holder_expr, '.'); - if (period_pos == NULL) { - return Handle<JSObject>::cast(GetProperty( - isolate, global, factory->InternalizeUtf8String(holder_expr))); - } - ASSERT_EQ(".prototype", period_pos); - Vector<const char> property(holder_expr, - static_cast<int>(period_pos - holder_expr)); - Handle<String> property_string = factory->InternalizeUtf8String(property); - ASSERT(!property_string.is_null()); - Handle<JSFunction> function = Handle<JSFunction>::cast( - GetProperty(isolate, global, property_string)); - return Handle<JSObject>(JSObject::cast(function->prototype())); -} - - static void InstallBuiltinFunctionId(Handle<JSObject> holder, const char* function_name, BuiltinFunctionId id) { - Factory* factory = holder->GetIsolate()->factory(); - Handle<String> name = factory->InternalizeUtf8String(function_name); - Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked(); - Handle<JSFunction> function(JSFunction::cast(function_object)); + Isolate* isolate = holder->GetIsolate(); + Handle<Object> function_object = + Object::GetProperty(isolate, holder, function_name).ToHandleChecked(); + Handle<JSFunction> function = Handle<JSFunction>::cast(function_object); function->shared()->set_function_data(Smi::FromInt(id)); } @@ -2115,15 +2099,6 @@ } -void Genesis::InstallExperimentalBuiltinFunctionIds() { - HandleScope scope(isolate()); - if (FLAG_harmony_maths) { - Handle<JSObject> holder = ResolveBuiltinIdHolder(native_context(), "Math"); - InstallBuiltinFunctionId(holder, "clz32", kMathClz32); - } -} - - // Do not forget to update macros.py with named constant // of cache id. #define JSFUNCTION_RESULT_CACHE_LIST(F) \ @@ -2169,9 +2144,8 @@ void Genesis::InitializeNormalizedMapCaches() { - Handle<FixedArray> array( - factory()->NewFixedArray(NormalizedMapCache::kEntries, TENURED)); - native_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array)); + Handle<NormalizedMapCache> cache = NormalizedMapCache::New(isolate()); + native_context()->set_normalized_map_cache(*cache); } @@ -2187,52 +2161,56 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) { Isolate* isolate = native_context->GetIsolate(); + // Don't install extensions into the snapshot. + if (isolate->serializer_enabled()) return true; + Factory* factory = isolate->factory(); HandleScope scope(isolate); Handle<JSGlobalObject> global(JSGlobalObject::cast( native_context->global_object())); + + Handle<JSObject> Error = Handle<JSObject>::cast( + Object::GetProperty(isolate, global, "Error").ToHandleChecked()); + Handle<String> name = + factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("stackTraceLimit")); + Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate); + JSObject::AddProperty(Error, name, stack_trace_limit, NONE); + // Expose the natives in global if a name for it is specified. if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) { Handle<String> natives = factory->InternalizeUtf8String(FLAG_expose_natives_as); - JSObject::SetLocalPropertyIgnoreAttributes( - global, natives, Handle<JSObject>(global->builtins()), DONT_ENUM); - if (isolate->has_pending_exception()) return false; + JSObject::AddProperty(global, natives, handle(global->builtins()), + DONT_ENUM); } - Handle<Object> Error = GetProperty(global, "Error"); - if (Error->IsJSObject()) { - Handle<String> name = factory->InternalizeOneByteString( - STATIC_ASCII_VECTOR("stackTraceLimit")); - Handle<Smi> stack_trace_limit( - Smi::FromInt(FLAG_stack_trace_limit), isolate); - JSObject::SetLocalPropertyIgnoreAttributes( - Handle<JSObject>::cast(Error), name, stack_trace_limit, NONE); - if (isolate->has_pending_exception()) return false; - } + // Expose the stack trace symbol to native JS. + RETURN_ON_EXCEPTION_VALUE( + isolate, + JSObject::SetOwnPropertyIgnoreAttributes( + handle(native_context->builtins(), isolate), + factory->InternalizeOneByteString( + STATIC_ASCII_VECTOR("stack_trace_symbol")), + factory->stack_trace_symbol(), + NONE), + false); -#ifdef ENABLE_DEBUGGER_SUPPORT // Expose the debug global object in global if a name for it is specified. if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) { - Debug* debug = isolate->debug(); // If loading fails we just bail out without installing the // debugger but without tanking the whole context. + Debug* debug = isolate->debug(); if (!debug->Load()) return true; + Handle<Context> debug_context = debug->debug_context(); // Set the security token for the debugger context to the same as // the shell native context to allow calling between these (otherwise // exposing debug global object doesn't make much sense). - debug->debug_context()->set_security_token( - native_context->security_token()); - + debug_context->set_security_token(native_context->security_token()); Handle<String> debug_string = factory->InternalizeUtf8String(FLAG_expose_debug_as); - Handle<Object> global_proxy( - debug->debug_context()->global_proxy(), isolate); - JSObject::SetLocalPropertyIgnoreAttributes( - global, debug_string, global_proxy, DONT_ENUM); - if (isolate->has_pending_exception()) return false; + Handle<Object> global_proxy(debug_context->global_proxy(), isolate); + JSObject::AddProperty(global, debug_string, global_proxy, DONT_ENUM); } -#endif return true; } @@ -2242,12 +2220,7 @@ } -static bool MatchRegisteredExtensions(void* key1, void* key2) { - return key1 == key2; -} - -Genesis::ExtensionStates::ExtensionStates() - : map_(MatchRegisteredExtensions, 8) { } +Genesis::ExtensionStates::ExtensionStates() : map_(HashMap::PointersMatch, 8) {} Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state( RegisteredExtension* extension) { @@ -2340,7 +2313,7 @@ "Circular extension dependency")) { return false; } - ASSERT(extension_states->get_state(current) == UNVISITED); + DCHECK(extension_states->get_state(current) == UNVISITED); extension_states->set_state(current, VISITED); v8::Extension* extension = current->extension(); // Install the extension's dependencies @@ -2351,10 +2324,10 @@ return false; } } - Handle<String> source_code = - isolate->factory()->NewExternalStringFromAscii(extension->source()); // We do not expect this to throw an exception. Change this if it does. - CHECK_NOT_EMPTY_HANDLE(isolate, source_code); + Handle<String> source_code = + isolate->factory()->NewExternalStringFromAscii( + extension->source()).ToHandleChecked(); bool result = CompileScriptCached(isolate, CStrVector(extension->name()), source_code, @@ -2362,14 +2335,14 @@ extension, Handle<Context>(isolate->context()), false); - ASSERT(isolate->has_pending_exception() != result); + DCHECK(isolate->has_pending_exception() != result); if (!result) { // We print out the name of the extension that fail to install. // When an error is thrown during bootstrapping we automatically print // the line number at which this happened to the console in the isolate // error throwing functionality. - OS::PrintError("Error installing extension '%s'.\n", - current->extension()->name()); + base::OS::PrintError("Error installing extension '%s'.\n", + current->extension()->name()); isolate->clear_pending_exception(); } extension_states->set_state(current, INSTALLED); @@ -2382,12 +2355,14 @@ HandleScope scope(isolate()); for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) { Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i); - Handle<String> name = - factory()->InternalizeUtf8String(Builtins::GetName(id)); - Object* function_object = builtins->GetPropertyNoExceptionThrown(*name); - Handle<JSFunction> function - = Handle<JSFunction>(JSFunction::cast(function_object)); + Handle<Object> function_object = Object::GetProperty( + isolate(), builtins, Builtins::GetName(id)).ToHandleChecked(); + Handle<JSFunction> function = Handle<JSFunction>::cast(function_object); builtins->set_javascript_builtin(id, *function); + // TODO(mstarzinger): This is just a temporary hack to make TurboFan work, + // the correct solution is to restore the context register after invoking + // builtins from full-codegen. + function->shared()->set_optimization_disabled(true); if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) { return false; } @@ -2401,26 +2376,26 @@ v8::Handle<v8::ObjectTemplate> global_proxy_template) { Handle<JSObject> global_proxy( JSObject::cast(native_context()->global_proxy())); - Handle<JSObject> inner_global( + Handle<JSObject> global_object( JSObject::cast(native_context()->global_object())); if (!global_proxy_template.IsEmpty()) { // Configure the global proxy object. - Handle<ObjectTemplateInfo> proxy_data = + Handle<ObjectTemplateInfo> global_proxy_data = v8::Utils::OpenHandle(*global_proxy_template); - if (!ConfigureApiObject(global_proxy, proxy_data)) return false; + if (!ConfigureApiObject(global_proxy, global_proxy_data)) return false; - // Configure the inner global object. + // Configure the global object. Handle<FunctionTemplateInfo> proxy_constructor( - FunctionTemplateInfo::cast(proxy_data->constructor())); + FunctionTemplateInfo::cast(global_proxy_data->constructor())); if (!proxy_constructor->prototype_template()->IsUndefined()) { - Handle<ObjectTemplateInfo> inner_data( + Handle<ObjectTemplateInfo> global_object_data( ObjectTemplateInfo::cast(proxy_constructor->prototype_template())); - if (!ConfigureApiObject(inner_global, inner_data)) return false; + if (!ConfigureApiObject(global_object, global_object_data)) return false; } } - SetObjectPrototype(global_proxy, inner_global); + SetObjectPrototype(global_proxy, global_object); native_context()->set_initial_array_prototype( JSArray::cast(native_context()->array_function()->prototype())); @@ -2430,16 +2405,16 @@ bool Genesis::ConfigureApiObject(Handle<JSObject> object, - Handle<ObjectTemplateInfo> object_template) { - ASSERT(!object_template.is_null()); - ASSERT(FunctionTemplateInfo::cast(object_template->constructor()) + Handle<ObjectTemplateInfo> object_template) { + DCHECK(!object_template.is_null()); + DCHECK(FunctionTemplateInfo::cast(object_template->constructor()) ->IsTemplateFor(object->map()));; - bool pending_exception = false; - Handle<JSObject> obj = - Execution::InstantiateObject(object_template, &pending_exception); - if (pending_exception) { - ASSERT(isolate()->has_pending_exception()); + MaybeHandle<JSObject> maybe_obj = + Execution::InstantiateObject(object_template); + Handle<JSObject> obj; + if (!maybe_obj.ToHandle(&obj)) { + DCHECK(isolate()->has_pending_exception()); isolate()->clear_pending_exception(); return false; } @@ -2459,33 +2434,29 @@ case FIELD: { HandleScope inner(isolate()); Handle<Name> key = Handle<Name>(descs->GetKey(i)); - int index = descs->GetFieldIndex(i); - ASSERT(!descs->GetDetails(i).representation().IsDouble()); + FieldIndex index = FieldIndex::ForDescriptor(from->map(), i); + DCHECK(!descs->GetDetails(i).representation().IsDouble()); Handle<Object> value = Handle<Object>(from->RawFastPropertyAt(index), isolate()); - CHECK_NOT_EMPTY_HANDLE(isolate(), - JSObject::SetLocalPropertyIgnoreAttributes( - to, key, value, details.attributes())); + JSObject::AddProperty(to, key, value, details.attributes()); break; } case CONSTANT: { HandleScope inner(isolate()); Handle<Name> key = Handle<Name>(descs->GetKey(i)); Handle<Object> constant(descs->GetConstant(i), isolate()); - CHECK_NOT_EMPTY_HANDLE(isolate(), - JSObject::SetLocalPropertyIgnoreAttributes( - to, key, constant, details.attributes())); + JSObject::AddProperty(to, key, constant, details.attributes()); break; } case CALLBACKS: { LookupResult result(isolate()); - to->LocalLookup(descs->GetKey(i), &result); + Handle<Name> key(Name::cast(descs->GetKey(i)), isolate()); + to->LookupOwn(key, &result); // If the property is already there we skip it if (result.IsFound()) continue; HandleScope inner(isolate()); - ASSERT(!to->HasFastProperties()); + DCHECK(!to->HasFastProperties()); // Add to dictionary. - Handle<Name> key = Handle<Name>(descs->GetKey(i)); Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate()); PropertyDetails d = PropertyDetails( details.attributes(), CALLBACKS, i + 1); @@ -2496,7 +2467,6 @@ // Do not occur since the from object has fast properties. case HANDLER: case INTERCEPTOR: - case TRANSITION: case NONEXISTENT: // No element in instance descriptors have proxy or interceptor type. UNREACHABLE(); @@ -2510,24 +2480,22 @@ for (int i = 0; i < capacity; i++) { Object* raw_key(properties->KeyAt(i)); if (properties->IsKey(raw_key)) { - ASSERT(raw_key->IsName()); + DCHECK(raw_key->IsName()); // If the property is already there we skip it. LookupResult result(isolate()); - to->LocalLookup(Name::cast(raw_key), &result); + Handle<Name> key(Name::cast(raw_key)); + to->LookupOwn(key, &result); if (result.IsFound()) continue; // Set the property. - Handle<Name> key = Handle<Name>(Name::cast(raw_key)); Handle<Object> value = Handle<Object>(properties->ValueAt(i), isolate()); - ASSERT(!value->IsCell()); + DCHECK(!value->IsCell()); if (value->IsPropertyCell()) { value = Handle<Object>(PropertyCell::cast(*value)->value(), isolate()); } PropertyDetails details = properties->DetailsAt(i); - CHECK_NOT_EMPTY_HANDLE(isolate(), - JSObject::SetLocalPropertyIgnoreAttributes( - to, key, value, details.attributes())); + JSObject::AddProperty(to, key, value, details.attributes()); } } } @@ -2547,17 +2515,15 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) { HandleScope outer(isolate()); - ASSERT(!from->IsJSArray()); - ASSERT(!to->IsJSArray()); + DCHECK(!from->IsJSArray()); + DCHECK(!to->IsJSArray()); TransferNamedProperties(from, to); TransferIndexedProperties(from, to); // Transfer the prototype (new map is needed). - Handle<Map> old_to_map = Handle<Map>(to->map()); - Handle<Map> new_to_map = factory()->CopyMap(old_to_map); - new_to_map->set_prototype(from->map()->prototype()); - to->set_map(*new_to_map); + Handle<Object> proto(from->map()->prototype(), isolate()); + SetObjectPrototype(to, proto); } @@ -2565,8 +2531,8 @@ // The maps with writable prototype are created in CreateEmptyFunction // and CreateStrictModeFunctionMaps respectively. Initially the maps are // created with read-only prototype for JS builtins processing. - ASSERT(!sloppy_function_map_writable_prototype_.is_null()); - ASSERT(!strict_function_map_writable_prototype_.is_null()); + DCHECK(!sloppy_function_map_writable_prototype_.is_null()); + DCHECK(!strict_function_map_writable_prototype_.is_null()); // Replace function instance maps to make prototype writable. native_context()->set_sloppy_function_map( @@ -2578,17 +2544,17 @@ class NoTrackDoubleFieldsForSerializerScope { public: - NoTrackDoubleFieldsForSerializerScope() : flag_(FLAG_track_double_fields) { - if (Serializer::enabled()) { + explicit NoTrackDoubleFieldsForSerializerScope(Isolate* isolate) + : flag_(FLAG_track_double_fields) { + if (isolate->serializer_enabled()) { // Disable tracking double fields because heap numbers treated as // immutable by the serializer. FLAG_track_double_fields = false; } } + ~NoTrackDoubleFieldsForSerializerScope() { - if (Serializer::enabled()) { - FLAG_track_double_fields = flag_; - } + FLAG_track_double_fields = flag_; } private: @@ -2597,12 +2563,12 @@ Genesis::Genesis(Isolate* isolate, - Handle<Object> global_object, - v8::Handle<v8::ObjectTemplate> global_template, + MaybeHandle<JSGlobalProxy> maybe_global_proxy, + v8::Handle<v8::ObjectTemplate> global_proxy_template, v8::ExtensionConfiguration* extensions) : isolate_(isolate), active_(isolate->bootstrapper()) { - NoTrackDoubleFieldsForSerializerScope disable_double_tracking_for_serializer; + NoTrackDoubleFieldsForSerializerScope disable_scope(isolate); result_ = Handle<Context>::null(); // If V8 cannot be initialized, just return. if (!V8::Initialize(NULL)) return; @@ -2629,35 +2595,33 @@ AddToWeakNativeContextList(*native_context()); isolate->set_context(*native_context()); isolate->counters()->contexts_created_by_snapshot()->Increment(); - Handle<GlobalObject> inner_global; - Handle<JSGlobalProxy> global_proxy = - CreateNewGlobals(global_template, - global_object, - &inner_global); - - HookUpGlobalProxy(inner_global, global_proxy); - HookUpInnerGlobal(inner_global); - native_context()->builtins()->set_global_receiver( + Handle<GlobalObject> global_object; + Handle<JSGlobalProxy> global_proxy = CreateNewGlobals( + global_proxy_template, maybe_global_proxy, &global_object); + + HookUpGlobalProxy(global_object, global_proxy); + HookUpGlobalObject(global_object); + native_context()->builtins()->set_global_proxy( native_context()->global_proxy()); - if (!ConfigureGlobalObjects(global_template)) return; + if (!ConfigureGlobalObjects(global_proxy_template)) return; } else { // We get here if there was no context snapshot. CreateRoots(); Handle<JSFunction> empty_function = CreateEmptyFunction(isolate); CreateStrictModeFunctionMaps(empty_function); - Handle<GlobalObject> inner_global; - Handle<JSGlobalProxy> global_proxy = - CreateNewGlobals(global_template, global_object, &inner_global); - HookUpGlobalProxy(inner_global, global_proxy); - InitializeGlobal(inner_global, empty_function); + Handle<GlobalObject> global_object; + Handle<JSGlobalProxy> global_proxy = CreateNewGlobals( + global_proxy_template, maybe_global_proxy, &global_object); + HookUpGlobalProxy(global_object, global_proxy); + InitializeGlobal(global_object, empty_function); InstallJSFunctionResultCaches(); InitializeNormalizedMapCaches(); if (!InstallNatives()) return; MakeFunctionInstancePrototypeWritable(); - if (!ConfigureGlobalObjects(global_template)) return; + if (!ConfigureGlobalObjects(global_proxy_template)) return; isolate->counters()->contexts_created_from_scratch()->Increment(); } @@ -2668,7 +2632,7 @@ // We can't (de-)serialize typed arrays currently, but we are lucky: The state // of the random number generator needs no initialization during snapshot // creation time and we don't need trigonometric functions then. - if (!Serializer::enabled()) { + if (!isolate->serializer_enabled()) { // Initially seed the per-context random number generator using the // per-isolate random number generator. const int num_elems = 2; @@ -2684,47 +2648,25 @@ Utils::OpenHandle(*buffer)->set_should_be_freed(true); v8::Local<v8::Uint32Array> ta = v8::Uint32Array::New(buffer, 0, num_elems); Handle<JSBuiltinsObject> builtins(native_context()->builtins()); - ForceSetProperty(builtins, - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("rngstate")), - Utils::OpenHandle(*ta), - NONE); + Runtime::DefineObjectProperty(builtins, + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("rngstate")), + Utils::OpenHandle(*ta), + NONE).Assert(); // Initialize trigonometric lookup tables and constants. - const int table_num_bytes = TrigonometricLookupTable::table_num_bytes(); - v8::Local<v8::ArrayBuffer> sin_buffer = v8::ArrayBuffer::New( - reinterpret_cast<v8::Isolate*>(isolate), - TrigonometricLookupTable::sin_table(), table_num_bytes); - v8::Local<v8::ArrayBuffer> cos_buffer = v8::ArrayBuffer::New( + const int constants_size = ARRAY_SIZE(fdlibm::MathConstants::constants); + const int table_num_bytes = constants_size * kDoubleSize; + v8::Local<v8::ArrayBuffer> trig_buffer = v8::ArrayBuffer::New( reinterpret_cast<v8::Isolate*>(isolate), - TrigonometricLookupTable::cos_x_interval_table(), table_num_bytes); - v8::Local<v8::Float64Array> sin_table = v8::Float64Array::New( - sin_buffer, 0, TrigonometricLookupTable::table_size()); - v8::Local<v8::Float64Array> cos_table = v8::Float64Array::New( - cos_buffer, 0, TrigonometricLookupTable::table_size()); - - ForceSetProperty(builtins, - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("kSinTable")), - Utils::OpenHandle(*sin_table), - NONE); - ForceSetProperty(builtins, - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("kCosXIntervalTable")), - Utils::OpenHandle(*cos_table), - NONE); - ForceSetProperty(builtins, - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("kSamples")), - factory()->NewHeapNumber( - TrigonometricLookupTable::samples()), - NONE); - ForceSetProperty(builtins, - factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("kIndexConvert")), - factory()->NewHeapNumber( - TrigonometricLookupTable::samples_over_pi_half()), - NONE); + const_cast<double*>(fdlibm::MathConstants::constants), table_num_bytes); + v8::Local<v8::Float64Array> trig_table = + v8::Float64Array::New(trig_buffer, 0, constants_size); + + Runtime::DefineObjectProperty( + builtins, + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("kMath")), + Utils::OpenHandle(*trig_table), NONE).Assert(); } result_ = native_context(); @@ -2739,7 +2681,7 @@ } -// Archive statics that are thread local. +// Archive statics that are thread-local. char* Bootstrapper::ArchiveState(char* to) { *reinterpret_cast<NestingCounterType*>(to) = nesting_; nesting_ = 0; @@ -2747,7 +2689,7 @@ } -// Restore statics that are thread local. +// Restore statics that are thread-local. char* Bootstrapper::RestoreState(char* from) { nesting_ = *reinterpret_cast<NestingCounterType*>(from); return from + sizeof(NestingCounterType); @@ -2756,7 +2698,7 @@ // Called when the top-level V8 mutex is destroyed. void Bootstrapper::FreeThreadResources() { - ASSERT(!IsActive()); + DCHECK(!IsActive()); } } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/bootstrapper.h nodejs-0.11.15/deps/v8/src/bootstrapper.h --- nodejs-0.11.13/deps/v8/src/bootstrapper.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/bootstrapper.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,21 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_BOOTSTRAPPER_H_ #define V8_BOOTSTRAPPER_H_ -#include "allocation.h" +#include "src/factory.h" namespace v8 { namespace internal { - // A SourceCodeCache uses a FixedArray to store pairs of // (AsciiString*, JSFunction*), mapping names of native code files // (runtime.js, etc.) to precompiled functions. Instead of mapping // names to functions it might make sense to let the JS2C tool // generate an index for each native JS file. -class SourceCodeCache BASE_EMBEDDED { +class SourceCodeCache V8_FINAL BASE_EMBEDDED { public: explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { } @@ -72,8 +47,9 @@ Handle<FixedArray> new_array = factory->NewFixedArray(length + 2, TENURED); cache_->CopyTo(0, *new_array, 0, cache_->length()); cache_ = *new_array; - Handle<String> str = factory->NewStringFromAscii(name, TENURED); - ASSERT(!str.is_null()); + Handle<String> str = + factory->NewStringFromAscii(name, TENURED).ToHandleChecked(); + DCHECK(!str.is_null()); cache_->set(length, *str); cache_->set(length + 1, *shared); Script::cast(shared->script())->set_type(Smi::FromInt(type_)); @@ -88,7 +64,7 @@ // The Boostrapper is the public interface for creating a JavaScript global // context. -class Bootstrapper { +class Bootstrapper V8_FINAL { public: static void InitializeOncePerProcess(); static void TearDownExtensions(); @@ -100,8 +76,8 @@ // Creates a JavaScript Global Context with initial object graph. // The returned value is a global handle casted to V8Environment*. Handle<Context> CreateEnvironment( - Handle<Object> global_object, - v8::Handle<v8::ObjectTemplate> global_template, + MaybeHandle<JSGlobalProxy> maybe_global_proxy, + v8::Handle<v8::ObjectTemplate> global_object_template, v8::ExtensionConfiguration* extensions); // Detach the environment from its outer global object. @@ -158,7 +134,7 @@ }; -class BootstrapperActive BASE_EMBEDDED { +class BootstrapperActive V8_FINAL BASE_EMBEDDED { public: explicit BootstrapperActive(Bootstrapper* bootstrapper) : bootstrapper_(bootstrapper) { @@ -176,20 +152,15 @@ }; -class NativesExternalStringResource +class NativesExternalStringResource V8_FINAL : public v8::String::ExternalAsciiStringResource { public: NativesExternalStringResource(Bootstrapper* bootstrapper, const char* source, size_t length); + virtual const char* data() const V8_OVERRIDE { return data_; } + virtual size_t length() const V8_OVERRIDE { return length_; } - const char* data() const { - return data_; - } - - size_t length() const { - return length_; - } private: const char* data_; size_t length_; diff -Nru nodejs-0.11.13/deps/v8/src/builtins.cc nodejs-0.11.15/deps/v8/src/builtins.cc --- nodejs-0.11.13/deps/v8/src/builtins.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/builtins.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,22 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "api.h" -#include "arguments.h" -#include "bootstrapper.h" -#include "builtins.h" -#include "cpu-profiler.h" -#include "gdb-jit.h" -#include "ic-inl.h" -#include "heap-profiler.h" -#include "mark-compact.h" -#include "stub-cache.h" -#include "vm-state-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/api.h" +#include "src/arguments.h" +#include "src/base/once.h" +#include "src/bootstrapper.h" +#include "src/builtins.h" +#include "src/cpu-profiler.h" +#include "src/gdb-jit.h" +#include "src/heap/mark-compact.h" +#include "src/heap-profiler.h" +#include "src/ic-inl.h" +#include "src/prototype.h" +#include "src/stub-cache.h" +#include "src/vm-state-inl.h" namespace v8 { namespace internal { @@ -52,12 +31,12 @@ : Arguments(length, arguments) { } Object*& operator[] (int index) { - ASSERT(index < length()); + DCHECK(index < length()); return Arguments::operator[](index); } template <class S> Handle<S> at(int index) { - ASSERT(index < length()); + DCHECK(index < length()); return Arguments::at<S>(index); } @@ -80,7 +59,7 @@ #ifdef DEBUG void Verify() { // Check we have at least the receiver. - ASSERT(Arguments::length() >= 1); + DCHECK(Arguments::length() >= 1); } #endif }; @@ -97,7 +76,7 @@ template <> void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() { // Check we have at least the receiver and the called function. - ASSERT(Arguments::length() >= 2); + DCHECK(Arguments::length() >= 2); // Make sure cast to JSFunction succeeds. called_function(); } @@ -127,28 +106,28 @@ #ifdef DEBUG #define BUILTIN(name) \ - MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \ + MUST_USE_RESULT static Object* Builtin_Impl_##name( \ name##ArgumentsType args, Isolate* isolate); \ - MUST_USE_RESULT static MaybeObject* Builtin_##name( \ + MUST_USE_RESULT static Object* Builtin_##name( \ int args_length, Object** args_object, Isolate* isolate) { \ name##ArgumentsType args(args_length, args_object); \ args.Verify(); \ return Builtin_Impl_##name(args, isolate); \ } \ - MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \ + MUST_USE_RESULT static Object* Builtin_Impl_##name( \ name##ArgumentsType args, Isolate* isolate) #else // For release mode. #define BUILTIN(name) \ - static MaybeObject* Builtin_impl##name( \ + static Object* Builtin_impl##name( \ name##ArgumentsType args, Isolate* isolate); \ - static MaybeObject* Builtin_##name( \ + static Object* Builtin_##name( \ int args_length, Object** args_object, Isolate* isolate) { \ name##ArgumentsType args(args_length, args_object); \ return Builtin_impl##name(args, isolate); \ } \ - static MaybeObject* Builtin_impl##name( \ + static Object* Builtin_impl##name( \ name##ArgumentsType args, Isolate* isolate) #endif @@ -159,7 +138,7 @@ // that the state of the stack is as we assume it to be in the // code below. StackFrameIterator it(isolate); - ASSERT(it.frame()->is_exit()); + DCHECK(it.frame()->is_exit()); it.Advance(); StackFrame* frame = it.frame(); bool reference_result = frame->is_construct(); @@ -176,7 +155,7 @@ const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT); Object* marker = Memory::Object_at(caller_fp + kMarkerOffset); bool result = (marker == kConstructMarker); - ASSERT_EQ(result, reference_result); + DCHECK_EQ(result, reference_result); return result; } #endif @@ -195,123 +174,51 @@ } -static void MoveDoubleElements(FixedDoubleArray* dst, - int dst_index, - FixedDoubleArray* src, - int src_index, - int len) { +static void MoveDoubleElements(FixedDoubleArray* dst, int dst_index, + FixedDoubleArray* src, int src_index, int len) { if (len == 0) return; - OS::MemMove(dst->data_start() + dst_index, - src->data_start() + src_index, - len * kDoubleSize); -} - - -static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) { - ASSERT(dst->map() != heap->fixed_cow_array_map()); - MemsetPointer(dst->data_start() + from, heap->the_hole_value(), to - from); -} - - -static void FillWithHoles(FixedDoubleArray* dst, int from, int to) { - for (int i = from; i < to; i++) { - dst->set_the_hole(i); - } -} - - -static FixedArrayBase* LeftTrimFixedArray(Heap* heap, - FixedArrayBase* elms, - int to_trim) { - Map* map = elms->map(); - int entry_size; - if (elms->IsFixedArray()) { - entry_size = kPointerSize; - } else { - entry_size = kDoubleSize; - } - ASSERT(elms->map() != heap->fixed_cow_array_map()); - // For now this trick is only applied to fixed arrays in new and paged space. - // In large object space the object's start must coincide with chunk - // and thus the trick is just not applicable. - ASSERT(!heap->lo_space()->Contains(elms)); - - STATIC_ASSERT(FixedArrayBase::kMapOffset == 0); - STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize); - STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize); - - Object** former_start = HeapObject::RawField(elms, 0); - - const int len = elms->length(); - - if (to_trim * entry_size > FixedArrayBase::kHeaderSize && - elms->IsFixedArray() && - !heap->new_space()->Contains(elms)) { - // If we are doing a big trim in old space then we zap the space that was - // formerly part of the array so that the GC (aided by the card-based - // remembered set) won't find pointers to new-space there. - Object** zap = reinterpret_cast<Object**>(elms->address()); - zap++; // Header of filler must be at least one word so skip that. - for (int i = 1; i < to_trim; i++) { - *zap++ = Smi::FromInt(0); - } - } - // Technically in new space this write might be omitted (except for - // debug mode which iterates through the heap), but to play safer - // we still do it. - heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size); - - int new_start_index = to_trim * (entry_size / kPointerSize); - former_start[new_start_index] = map; - former_start[new_start_index + 1] = Smi::FromInt(len - to_trim); - - // Maintain marking consistency for HeapObjectIterator and - // IncrementalMarking. - int size_delta = to_trim * entry_size; - Address new_start = elms->address() + size_delta; - heap->marking()->TransferMark(elms->address(), new_start); - heap->AdjustLiveBytes(new_start, -size_delta, Heap::FROM_MUTATOR); - - FixedArrayBase* new_elms = - FixedArrayBase::cast(HeapObject::FromAddress(new_start)); - HeapProfiler* profiler = heap->isolate()->heap_profiler(); - if (profiler->is_tracking_object_moves()) { - profiler->ObjectMoveEvent(elms->address(), - new_elms->address(), - new_elms->Size()); - } - return new_elms; + MemMove(dst->data_start() + dst_index, src->data_start() + src_index, + len * kDoubleSize); } static bool ArrayPrototypeHasNoElements(Heap* heap, Context* native_context, JSObject* array_proto) { + DisallowHeapAllocation no_gc; // This method depends on non writability of Object and Array prototype // fields. if (array_proto->elements() != heap->empty_fixed_array()) return false; // Object.prototype - Object* proto = array_proto->GetPrototype(); - if (proto == heap->null_value()) return false; - array_proto = JSObject::cast(proto); + PrototypeIterator iter(heap->isolate(), array_proto); + if (iter.IsAtEnd()) { + return false; + } + array_proto = JSObject::cast(iter.GetCurrent()); if (array_proto != native_context->initial_object_prototype()) return false; if (array_proto->elements() != heap->empty_fixed_array()) return false; - return array_proto->GetPrototype()->IsNull(); + iter.Advance(); + return iter.IsAtEnd(); } // Returns empty handle if not applicable. MUST_USE_RESULT -static inline Handle<FixedArrayBase> EnsureJSArrayWithWritableFastElements( +static inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements( Isolate* isolate, Handle<Object> receiver, Arguments* args, int first_added_arg) { - if (!receiver->IsJSArray()) return Handle<FixedArrayBase>::null(); + if (!receiver->IsJSArray()) return MaybeHandle<FixedArrayBase>(); Handle<JSArray> array = Handle<JSArray>::cast(receiver); - if (array->map()->is_observed()) return Handle<FixedArrayBase>::null(); - if (!array->map()->is_extensible()) return Handle<FixedArrayBase>::null(); - Handle<FixedArrayBase> elms(array->elements()); + // If there may be elements accessors in the prototype chain, the fast path + // cannot be used if there arguments to add to the array. + if (args != NULL && array->map()->DictionaryElementsInPrototypeChainOnly()) { + return MaybeHandle<FixedArrayBase>(); + } + if (array->map()->is_observed()) return MaybeHandle<FixedArrayBase>(); + if (!array->map()->is_extensible()) return MaybeHandle<FixedArrayBase>(); + Handle<FixedArrayBase> elms(array->elements(), isolate); Heap* heap = isolate->heap(); Map* map = elms->map(); if (map == heap->fixed_array_map()) { @@ -322,73 +229,78 @@ } else if (map == heap->fixed_double_array_map()) { if (args == NULL) return elms; } else { - return Handle<FixedArrayBase>::null(); + return MaybeHandle<FixedArrayBase>(); } // Need to ensure that the arguments passed in args can be contained in // the array. int args_length = args->length(); - if (first_added_arg >= args_length) return handle(array->elements()); + if (first_added_arg >= args_length) return handle(array->elements(), isolate); ElementsKind origin_kind = array->map()->elements_kind(); - ASSERT(!IsFastObjectElementsKind(origin_kind)); + DCHECK(!IsFastObjectElementsKind(origin_kind)); ElementsKind target_kind = origin_kind; - int arg_count = args->length() - first_added_arg; - Object** arguments = args->arguments() - first_added_arg - (arg_count - 1); - for (int i = 0; i < arg_count; i++) { - Object* arg = arguments[i]; - if (arg->IsHeapObject()) { - if (arg->IsHeapNumber()) { - target_kind = FAST_DOUBLE_ELEMENTS; - } else { - target_kind = FAST_ELEMENTS; - break; + { + DisallowHeapAllocation no_gc; + int arg_count = args->length() - first_added_arg; + Object** arguments = args->arguments() - first_added_arg - (arg_count - 1); + for (int i = 0; i < arg_count; i++) { + Object* arg = arguments[i]; + if (arg->IsHeapObject()) { + if (arg->IsHeapNumber()) { + target_kind = FAST_DOUBLE_ELEMENTS; + } else { + target_kind = FAST_ELEMENTS; + break; + } } } } if (target_kind != origin_kind) { JSObject::TransitionElementsKind(array, target_kind); - return handle(array->elements()); + return handle(array->elements(), isolate); } return elms; } -// TODO(ishell): Handlify when all Array* builtins are handlified. static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap, JSArray* receiver) { if (!FLAG_clever_optimizations) return false; + DisallowHeapAllocation no_gc; Context* native_context = heap->isolate()->context()->native_context(); JSObject* array_proto = JSObject::cast(native_context->array_function()->prototype()); - return receiver->GetPrototype() == array_proto && + PrototypeIterator iter(heap->isolate(), receiver); + return iter.GetCurrent() == array_proto && ArrayPrototypeHasNoElements(heap, native_context, array_proto); } -MUST_USE_RESULT static MaybeObject* CallJsBuiltin( +MUST_USE_RESULT static Object* CallJsBuiltin( Isolate* isolate, const char* name, BuiltinArguments<NO_EXTRA_ARGUMENTS> args) { HandleScope handleScope(isolate); - Handle<Object> js_builtin = - GetProperty(Handle<JSObject>(isolate->native_context()->builtins()), - name); + Handle<Object> js_builtin = Object::GetProperty( + isolate, + handle(isolate->native_context()->builtins(), isolate), + name).ToHandleChecked(); Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin); int argc = args.length() - 1; ScopedVector<Handle<Object> > argv(argc); for (int i = 0; i < argc; ++i) { argv[i] = args.at<Object>(i + 1); } - bool pending_exception; - Handle<Object> result = Execution::Call(isolate, - function, - args.receiver(), - argc, - argv.start(), - &pending_exception); - if (pending_exception) return Failure::Exception(); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Execution::Call(isolate, + function, + args.receiver(), + argc, + argv.start())); return *result; } @@ -396,26 +308,31 @@ BUILTIN(ArrayPush) { HandleScope scope(isolate); Handle<Object> receiver = args.receiver(); - Handle<FixedArrayBase> elms_obj = + MaybeHandle<FixedArrayBase> maybe_elms_obj = EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1); - if (elms_obj.is_null()) return CallJsBuiltin(isolate, "ArrayPush", args); + Handle<FixedArrayBase> elms_obj; + if (!maybe_elms_obj.ToHandle(&elms_obj)) { + return CallJsBuiltin(isolate, "ArrayPush", args); + } Handle<JSArray> array = Handle<JSArray>::cast(receiver); - ASSERT(!array->map()->is_observed()); + int len = Smi::cast(array->length())->value(); + int to_add = args.length() - 1; + if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) { + return CallJsBuiltin(isolate, "ArrayPush", args); + } + DCHECK(!array->map()->is_observed()); ElementsKind kind = array->GetElementsKind(); if (IsFastSmiOrObjectElementsKind(kind)) { Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); - - int len = Smi::cast(array->length())->value(); - int to_add = args.length() - 1; if (to_add == 0) { return Smi::FromInt(len); } // Currently fixed arrays cannot grow too big, so // we should never hit this case. - ASSERT(to_add <= (Smi::kMaxValue - len)); + DCHECK(to_add <= (Smi::kMaxValue - len)); int new_length = len + to_add; @@ -427,8 +344,8 @@ ElementsAccessor* accessor = array->GetElementsAccessor(); accessor->CopyElements( - Handle<JSObject>::null(), 0, kind, new_elms, 0, - ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj); + elms_obj, 0, kind, new_elms, 0, + ElementsAccessor::kCopyToEndAndInitializeToHole); elms = new_elms; } @@ -448,16 +365,13 @@ array->set_length(Smi::FromInt(new_length)); return Smi::FromInt(new_length); } else { - int len = Smi::cast(array->length())->value(); int elms_len = elms_obj->length(); - - int to_add = args.length() - 1; if (to_add == 0) { return Smi::FromInt(len); } // Currently fixed arrays cannot grow too big, so // we should never hit this case. - ASSERT(to_add <= (Smi::kMaxValue - len)); + DCHECK(to_add <= (Smi::kMaxValue - len)); int new_length = len + to_add; @@ -466,12 +380,15 @@ if (new_length > elms_len) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; - new_elms = isolate->factory()->NewFixedDoubleArray(capacity); + // Create new backing store; since capacity > 0, we can + // safely cast to FixedDoubleArray. + new_elms = Handle<FixedDoubleArray>::cast( + isolate->factory()->NewFixedDoubleArray(capacity)); ElementsAccessor* accessor = array->GetElementsAccessor(); accessor->CopyElements( - Handle<JSObject>::null(), 0, kind, new_elms, 0, - ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj); + elms_obj, 0, kind, new_elms, 0, + ElementsAccessor::kCopyToEndAndInitializeToHole); } else { // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the @@ -498,46 +415,32 @@ } -// TODO(ishell): Temporary wrapper until handlified. -static bool ElementsAccessorHasElementWrapper( - ElementsAccessor* accessor, - Handle<Object> receiver, - Handle<JSObject> holder, - uint32_t key, - Handle<FixedArrayBase> backing_store = Handle<FixedArrayBase>::null()) { - return accessor->HasElement(*receiver, *holder, key, - backing_store.is_null() ? NULL : *backing_store); -} - - BUILTIN(ArrayPop) { HandleScope scope(isolate); Handle<Object> receiver = args.receiver(); - Handle<FixedArrayBase> elms_obj = + MaybeHandle<FixedArrayBase> maybe_elms_obj = EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0); - if (elms_obj.is_null()) return CallJsBuiltin(isolate, "ArrayPop", args); + Handle<FixedArrayBase> elms_obj; + if (!maybe_elms_obj.ToHandle(&elms_obj)) { + return CallJsBuiltin(isolate, "ArrayPop", args); + } Handle<JSArray> array = Handle<JSArray>::cast(receiver); - ASSERT(!array->map()->is_observed()); + DCHECK(!array->map()->is_observed()); int len = Smi::cast(array->length())->value(); if (len == 0) return isolate->heap()->undefined_value(); ElementsAccessor* accessor = array->GetElementsAccessor(); int new_length = len - 1; - Handle<Object> element; - if (ElementsAccessorHasElementWrapper( - accessor, array, array, new_length, elms_obj)) { - element = accessor->Get( - array, array, new_length, elms_obj); - } else { - Handle<Object> proto(array->GetPrototype(), isolate); - element = Object::GetElement(isolate, proto, len - 1); - } - RETURN_IF_EMPTY_HANDLE(isolate, element); - RETURN_IF_EMPTY_HANDLE(isolate, - accessor->SetLength( - array, handle(Smi::FromInt(new_length), isolate))); + Handle<Object> element = + accessor->Get(array, array, new_length, elms_obj).ToHandleChecked(); + if (element->IsTheHole()) { + return CallJsBuiltin(isolate, "ArrayPop", args); + } + RETURN_FAILURE_ON_EXCEPTION( + isolate, + accessor->SetLength(array, handle(Smi::FromInt(new_length), isolate))); return *element; } @@ -546,29 +449,30 @@ HandleScope scope(isolate); Heap* heap = isolate->heap(); Handle<Object> receiver = args.receiver(); - Handle<FixedArrayBase> elms_obj = + MaybeHandle<FixedArrayBase> maybe_elms_obj = EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0); - if (elms_obj.is_null() || + Handle<FixedArrayBase> elms_obj; + if (!maybe_elms_obj.ToHandle(&elms_obj) || !IsJSArrayFastElementMovingAllowed(heap, *Handle<JSArray>::cast(receiver))) { return CallJsBuiltin(isolate, "ArrayShift", args); } Handle<JSArray> array = Handle<JSArray>::cast(receiver); - ASSERT(!array->map()->is_observed()); + DCHECK(!array->map()->is_observed()); int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); // Get first element ElementsAccessor* accessor = array->GetElementsAccessor(); - Handle<Object> first = accessor->Get(receiver, array, 0, elms_obj); - RETURN_IF_EMPTY_HANDLE(isolate, first); + Handle<Object> first = + accessor->Get(array, array, 0, elms_obj).ToHandleChecked(); if (first->IsTheHole()) { - first = isolate->factory()->undefined_value(); + return CallJsBuiltin(isolate, "ArrayShift", args); } - if (!heap->CanMoveObjectStart(*elms_obj)) { - array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1)); + if (heap->CanMoveObjectStart(*elms_obj)) { + array->set_elements(heap->LeftTrimFixedArray(*elms_obj, 1)); } else { // Shift the elements. if (elms_obj->IsFixedArray()) { @@ -594,26 +498,31 @@ HandleScope scope(isolate); Heap* heap = isolate->heap(); Handle<Object> receiver = args.receiver(); - Handle<FixedArrayBase> elms_obj = + MaybeHandle<FixedArrayBase> maybe_elms_obj = EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0); - if (elms_obj.is_null() || + Handle<FixedArrayBase> elms_obj; + if (!maybe_elms_obj.ToHandle(&elms_obj) || !IsJSArrayFastElementMovingAllowed(heap, *Handle<JSArray>::cast(receiver))) { return CallJsBuiltin(isolate, "ArrayUnshift", args); } Handle<JSArray> array = Handle<JSArray>::cast(receiver); - ASSERT(!array->map()->is_observed()); + DCHECK(!array->map()->is_observed()); if (!array->HasFastSmiOrObjectElements()) { return CallJsBuiltin(isolate, "ArrayUnshift", args); } - Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); - int len = Smi::cast(array->length())->value(); int to_add = args.length() - 1; int new_length = len + to_add; // Currently fixed arrays cannot grow too big, so // we should never hit this case. - ASSERT(to_add <= (Smi::kMaxValue - len)); + DCHECK(to_add <= (Smi::kMaxValue - len)); + + if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) { + return CallJsBuiltin(isolate, "ArrayUnshift", args); + } + + Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); JSObject::EnsureCanContainElements(array, &args, 1, to_add, DONT_ALLOW_DOUBLE_ELEMENTS); @@ -627,8 +536,8 @@ ElementsKind kind = array->GetElementsKind(); ElementsAccessor* accessor = array->GetElementsAccessor(); accessor->CopyElements( - Handle<JSObject>::null(), 0, kind, new_elms, to_add, - ElementsAccessor::kCopyToEndAndInitializeToHole, elms); + elms, 0, kind, new_elms, to_add, + ElementsAccessor::kCopyToEndAndInitializeToHole); elms = new_elms; array->set_elements(*elms); @@ -654,87 +563,95 @@ HandleScope scope(isolate); Heap* heap = isolate->heap(); Handle<Object> receiver = args.receiver(); - Handle<FixedArrayBase> elms; int len = -1; - if (receiver->IsJSArray()) { - Handle<JSArray> array = Handle<JSArray>::cast(receiver); - if (!IsJSArrayFastElementMovingAllowed(heap, *array)) { - return CallJsBuiltin(isolate, "ArraySlice", args); - } - - if (array->HasFastElements()) { - elms = handle(array->elements()); - } else { - return CallJsBuiltin(isolate, "ArraySlice", args); - } + int relative_start = 0; + int relative_end = 0; + { + DisallowHeapAllocation no_gc; + if (receiver->IsJSArray()) { + JSArray* array = JSArray::cast(*receiver); + if (!IsJSArrayFastElementMovingAllowed(heap, array)) { + AllowHeapAllocation allow_allocation; + return CallJsBuiltin(isolate, "ArraySlice", args); + } - len = Smi::cast(array->length())->value(); - } else { - // Array.slice(arguments, ...) is quite a common idiom (notably more - // than 50% of invocations in Web apps). Treat it in C++ as well. - Handle<Map> arguments_map(isolate->context()->native_context()-> - sloppy_arguments_boilerplate()->map()); - - bool is_arguments_object_with_fast_elements = - receiver->IsJSObject() && - Handle<JSObject>::cast(receiver)->map() == *arguments_map; - if (!is_arguments_object_with_fast_elements) { - return CallJsBuiltin(isolate, "ArraySlice", args); - } - Handle<JSObject> object = Handle<JSObject>::cast(receiver); + if (!array->HasFastElements()) { + AllowHeapAllocation allow_allocation; + return CallJsBuiltin(isolate, "ArraySlice", args); + } - if (object->HasFastElements()) { - elms = handle(object->elements()); + len = Smi::cast(array->length())->value(); } else { - return CallJsBuiltin(isolate, "ArraySlice", args); - } - Handle<Object> len_obj( - object->InObjectPropertyAt(Heap::kArgumentsLengthIndex), isolate); - if (!len_obj->IsSmi()) { - return CallJsBuiltin(isolate, "ArraySlice", args); - } - len = Handle<Smi>::cast(len_obj)->value(); - if (len > elms->length()) { - return CallJsBuiltin(isolate, "ArraySlice", args); - } - } - - Handle<JSObject> object = Handle<JSObject>::cast(receiver); + // Array.slice(arguments, ...) is quite a common idiom (notably more + // than 50% of invocations in Web apps). Treat it in C++ as well. + Map* arguments_map = + isolate->context()->native_context()->sloppy_arguments_map(); + + bool is_arguments_object_with_fast_elements = + receiver->IsJSObject() && + JSObject::cast(*receiver)->map() == arguments_map; + if (!is_arguments_object_with_fast_elements) { + AllowHeapAllocation allow_allocation; + return CallJsBuiltin(isolate, "ArraySlice", args); + } + JSObject* object = JSObject::cast(*receiver); - ASSERT(len >= 0); - int n_arguments = args.length() - 1; + if (!object->HasFastElements()) { + AllowHeapAllocation allow_allocation; + return CallJsBuiltin(isolate, "ArraySlice", args); + } - // Note carefully choosen defaults---if argument is missing, - // it's undefined which gets converted to 0 for relative_start - // and to len for relative_end. - int relative_start = 0; - int relative_end = len; - if (n_arguments > 0) { - Handle<Object> arg1 = args.at<Object>(1); - if (arg1->IsSmi()) { - relative_start = Handle<Smi>::cast(arg1)->value(); - } else if (arg1->IsHeapNumber()) { - double start = Handle<HeapNumber>::cast(arg1)->value(); - if (start < kMinInt || start > kMaxInt) { + Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex); + if (!len_obj->IsSmi()) { + AllowHeapAllocation allow_allocation; + return CallJsBuiltin(isolate, "ArraySlice", args); + } + len = Smi::cast(len_obj)->value(); + if (len > object->elements()->length()) { + AllowHeapAllocation allow_allocation; return CallJsBuiltin(isolate, "ArraySlice", args); } - relative_start = std::isnan(start) ? 0 : static_cast<int>(start); - } else if (!arg1->IsUndefined()) { - return CallJsBuiltin(isolate, "ArraySlice", args); } - if (n_arguments > 1) { - Handle<Object> arg2 = args.at<Object>(2); - if (arg2->IsSmi()) { - relative_end = Handle<Smi>::cast(arg2)->value(); - } else if (arg2->IsHeapNumber()) { - double end = Handle<HeapNumber>::cast(arg2)->value(); - if (end < kMinInt || end > kMaxInt) { + + DCHECK(len >= 0); + int n_arguments = args.length() - 1; + + // Note carefully choosen defaults---if argument is missing, + // it's undefined which gets converted to 0 for relative_start + // and to len for relative_end. + relative_start = 0; + relative_end = len; + if (n_arguments > 0) { + Object* arg1 = args[1]; + if (arg1->IsSmi()) { + relative_start = Smi::cast(arg1)->value(); + } else if (arg1->IsHeapNumber()) { + double start = HeapNumber::cast(arg1)->value(); + if (start < kMinInt || start > kMaxInt) { + AllowHeapAllocation allow_allocation; return CallJsBuiltin(isolate, "ArraySlice", args); } - relative_end = std::isnan(end) ? 0 : static_cast<int>(end); - } else if (!arg2->IsUndefined()) { + relative_start = std::isnan(start) ? 0 : static_cast<int>(start); + } else if (!arg1->IsUndefined()) { + AllowHeapAllocation allow_allocation; return CallJsBuiltin(isolate, "ArraySlice", args); } + if (n_arguments > 1) { + Object* arg2 = args[2]; + if (arg2->IsSmi()) { + relative_end = Smi::cast(arg2)->value(); + } else if (arg2->IsHeapNumber()) { + double end = HeapNumber::cast(arg2)->value(); + if (end < kMinInt || end > kMaxInt) { + AllowHeapAllocation allow_allocation; + return CallJsBuiltin(isolate, "ArraySlice", args); + } + relative_end = std::isnan(end) ? 0 : static_cast<int>(end); + } else if (!arg2->IsUndefined()) { + AllowHeapAllocation allow_allocation; + return CallJsBuiltin(isolate, "ArraySlice", args); + } + } } } @@ -749,13 +666,16 @@ // Calculate the length of result array. int result_len = Max(final - k, 0); + Handle<JSObject> object = Handle<JSObject>::cast(receiver); + Handle<FixedArrayBase> elms(object->elements(), isolate); + ElementsKind kind = object->GetElementsKind(); if (IsHoleyElementsKind(kind)) { + DisallowHeapAllocation no_gc; bool packed = true; ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); for (int i = k; i < final; i++) { - if (!ElementsAccessorHasElementWrapper( - accessor, object, object, i, elms)) { + if (!accessor->HasElement(object, object, i, elms)) { packed = false; break; } @@ -763,6 +683,7 @@ if (packed) { kind = GetPackedElementsKind(kind); } else if (!receiver->IsJSArray()) { + AllowHeapAllocation allow_allocation; return CallJsBuiltin(isolate, "ArraySlice", args); } } @@ -774,8 +695,8 @@ if (result_len == 0) return *result_array; ElementsAccessor* accessor = object->GetElementsAccessor(); - accessor->CopyElements(Handle<JSObject>::null(), k, kind, - handle(result_array->elements()), 0, result_len, elms); + accessor->CopyElements( + elms, k, kind, handle(result_array->elements(), isolate), 0, result_len); return *result_array; } @@ -784,15 +705,16 @@ HandleScope scope(isolate); Heap* heap = isolate->heap(); Handle<Object> receiver = args.receiver(); - Handle<FixedArrayBase> elms_obj = + MaybeHandle<FixedArrayBase> maybe_elms_obj = EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3); - if (elms_obj.is_null() || + Handle<FixedArrayBase> elms_obj; + if (!maybe_elms_obj.ToHandle(&elms_obj) || !IsJSArrayFastElementMovingAllowed(heap, *Handle<JSArray>::cast(receiver))) { return CallJsBuiltin(isolate, "ArraySplice", args); } Handle<JSArray> array = Handle<JSArray>::cast(receiver); - ASSERT(!array->map()->is_observed()); + DCHECK(!array->map()->is_observed()); int len = Smi::cast(array->length())->value(); @@ -800,16 +722,19 @@ int relative_start = 0; if (n_arguments > 0) { - Handle<Object> arg1 = args.at<Object>(1); + DisallowHeapAllocation no_gc; + Object* arg1 = args[1]; if (arg1->IsSmi()) { - relative_start = Handle<Smi>::cast(arg1)->value(); + relative_start = Smi::cast(arg1)->value(); } else if (arg1->IsHeapNumber()) { - double start = Handle<HeapNumber>::cast(arg1)->value(); + double start = HeapNumber::cast(arg1)->value(); if (start < kMinInt || start > kMaxInt) { + AllowHeapAllocation allow_allocation; return CallJsBuiltin(isolate, "ArraySplice", args); } relative_start = std::isnan(start) ? 0 : static_cast<int>(start); } else if (!arg1->IsUndefined()) { + AllowHeapAllocation allow_allocation; return CallJsBuiltin(isolate, "ArraySplice", args); } } @@ -823,15 +748,17 @@ // compatibility. int actual_delete_count; if (n_arguments == 1) { - ASSERT(len - actual_start >= 0); + DCHECK(len - actual_start >= 0); actual_delete_count = len - actual_start; } else { int value = 0; // ToInteger(undefined) == 0 if (n_arguments > 1) { + DisallowHeapAllocation no_gc; Object* arg2 = args[2]; if (arg2->IsSmi()) { value = Smi::cast(arg2)->value(); } else { + AllowHeapAllocation allow_allocation; return CallJsBuiltin(isolate, "ArraySplice", args); } } @@ -865,8 +792,8 @@ DisallowHeapAllocation no_gc; ElementsAccessor* accessor = array->GetElementsAccessor(); accessor->CopyElements( - Handle<JSObject>::null(), actual_start, elements_kind, - handle(result_array->elements()), 0, actual_delete_count, elms_obj); + elms_obj, actual_start, elements_kind, + handle(result_array->elements(), isolate), 0, actual_delete_count); } bool elms_changed = false; @@ -890,7 +817,7 @@ if (heap->CanMoveObjectStart(*elms_obj)) { // On the fast path we move the start of the object in memory. - elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta)); + elms_obj = handle(heap->LeftTrimFixedArray(*elms_obj, delta)); } else { // This is the slow path. We are going to move the elements to the left // by copying them. For trimmed values we store the hole. @@ -898,12 +825,12 @@ Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj); MoveDoubleElements(*elms, 0, *elms, delta, len - delta); - FillWithHoles(*elms, len - delta, len); + elms->FillWithHoles(len - delta, len); } else { Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); DisallowHeapAllocation no_gc; heap->MoveElements(*elms, 0, delta, len - delta); - FillWithHoles(heap, *elms, len - delta, len); + elms->FillWithHoles(len - delta, len); } } elms_changed = true; @@ -914,21 +841,21 @@ MoveDoubleElements(*elms, actual_start + item_count, *elms, actual_start + actual_delete_count, (len - actual_delete_count - actual_start)); - FillWithHoles(*elms, new_length, len); + elms->FillWithHoles(new_length, len); } else { Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); DisallowHeapAllocation no_gc; heap->MoveElements(*elms, actual_start + item_count, actual_start + actual_delete_count, (len - actual_delete_count - actual_start)); - FillWithHoles(heap, *elms, new_length, len); + elms->FillWithHoles(new_length, len); } } } else if (item_count > actual_delete_count) { Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); // Currently fixed arrays cannot grow too big, so // we should never hit this case. - ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len)); + DCHECK((item_count - actual_delete_count) <= (Smi::kMaxValue - len)); // Check if array need to grow. if (new_length > elms->length()) { @@ -944,12 +871,12 @@ if (actual_start > 0) { // Copy the part before actual_start as is. accessor->CopyElements( - Handle<JSObject>::null(), 0, kind, new_elms, 0, actual_start, elms); + elms, 0, kind, new_elms, 0, actual_start); } accessor->CopyElements( - Handle<JSObject>::null(), actual_start + actual_delete_count, kind, + elms, actual_start + actual_delete_count, kind, new_elms, actual_start + item_count, - ElementsAccessor::kCopyToEndAndInitializeToHole, elms); + ElementsAccessor::kCopyToEndAndInitializeToHole); elms_obj = new_elms; elms_changed = true; @@ -992,51 +919,57 @@ BUILTIN(ArrayConcat) { HandleScope scope(isolate); - Heap* heap = isolate->heap(); - Handle<Context> native_context(isolate->context()->native_context()); - Handle<JSObject> array_proto( - JSObject::cast(native_context->array_function()->prototype())); - if (!ArrayPrototypeHasNoElements(heap, *native_context, *array_proto)) { - return CallJsBuiltin(isolate, "ArrayConcat", args); - } - // Iterate through all the arguments performing checks - // and calculating total length. int n_arguments = args.length(); int result_len = 0; ElementsKind elements_kind = GetInitialFastElementsKind(); bool has_double = false; - bool is_holey = false; - for (int i = 0; i < n_arguments; i++) { - Handle<Object> arg = args.at<Object>(i); - if (!arg->IsJSArray() || - !Handle<JSArray>::cast(arg)->HasFastElements() || - Handle<JSArray>::cast(arg)->GetPrototype() != *array_proto) { - return CallJsBuiltin(isolate, "ArrayConcat", args); - } - int len = Smi::cast(Handle<JSArray>::cast(arg)->length())->value(); - - // We shouldn't overflow when adding another len. - const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2); - STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt); - USE(kHalfOfMaxInt); - result_len += len; - ASSERT(result_len >= 0); + { + DisallowHeapAllocation no_gc; + Heap* heap = isolate->heap(); + Context* native_context = isolate->context()->native_context(); + JSObject* array_proto = + JSObject::cast(native_context->array_function()->prototype()); + if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) { + AllowHeapAllocation allow_allocation; + return CallJsBuiltin(isolate, "ArrayConcatJS", args); + } + + // Iterate through all the arguments performing checks + // and calculating total length. + bool is_holey = false; + for (int i = 0; i < n_arguments; i++) { + Object* arg = args[i]; + PrototypeIterator iter(isolate, arg); + if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements() || + iter.GetCurrent() != array_proto) { + AllowHeapAllocation allow_allocation; + return CallJsBuiltin(isolate, "ArrayConcatJS", args); + } + int len = Smi::cast(JSArray::cast(arg)->length())->value(); - if (result_len > FixedDoubleArray::kMaxLength) { - return CallJsBuiltin(isolate, "ArrayConcat", args); - } + // We shouldn't overflow when adding another len. + const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2); + STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt); + USE(kHalfOfMaxInt); + result_len += len; + DCHECK(result_len >= 0); + + if (result_len > FixedDoubleArray::kMaxLength) { + AllowHeapAllocation allow_allocation; + return CallJsBuiltin(isolate, "ArrayConcatJS", args); + } - ElementsKind arg_kind = Handle<JSArray>::cast(arg)->map()->elements_kind(); - has_double = has_double || IsFastDoubleElementsKind(arg_kind); - is_holey = is_holey || IsFastHoleyElementsKind(arg_kind); - if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) { - elements_kind = arg_kind; + ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind(); + has_double = has_double || IsFastDoubleElementsKind(arg_kind); + is_holey = is_holey || IsFastHoleyElementsKind(arg_kind); + if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) { + elements_kind = arg_kind; + } } + if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind); } - if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind); - // If a double array is concatted into a fast elements array, the fast // elements array needs to be initialized to contain proper holes, since // boxing doubles may cause incremental marking. @@ -1051,10 +984,12 @@ if (result_len == 0) return *result_array; int j = 0; - Handle<FixedArrayBase> storage(result_array->elements()); + Handle<FixedArrayBase> storage(result_array->elements(), isolate); ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind); for (int i = 0; i < n_arguments; i++) { - Handle<JSArray> array = args.at<JSArray>(i); + // TODO(ishell): It is crucial to keep |array| as a raw pointer to avoid + // performance degradation. Revisit this later. + JSArray* array = JSArray::cast(args[i]); int len = Smi::cast(array->length())->value(); ElementsKind from_kind = array->GetElementsKind(); if (len > 0) { @@ -1063,14 +998,14 @@ } } - ASSERT(j == result_len); + DCHECK(j == result_len); return *result_array; } // ----------------------------------------------------------------------------- -// Strict mode poison pills +// Generator and strict mode poison pills BUILTIN(StrictModePoisonPill) { @@ -1080,6 +1015,13 @@ } +BUILTIN(GeneratorPoisonPill) { + HandleScope scope(isolate); + return isolate->Throw(*isolate->factory()->NewTypeError( + "generator_poison_pill", HandleVector<Object>(NULL, 0))); +} + + // ----------------------------------------------------------------------------- // @@ -1090,11 +1032,12 @@ static inline Object* FindHidden(Heap* heap, Object* object, FunctionTemplateInfo* type) { - if (type->IsTemplateFor(object)) return object; - Object* proto = object->GetPrototype(heap->isolate()); - if (proto->IsJSObject() && - JSObject::cast(proto)->map()->is_hidden_prototype()) { - return FindHidden(heap, proto, type); + for (PrototypeIterator iter(heap->isolate(), object, + PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) { + if (type->IsTemplateFor(iter.GetCurrent())) { + return iter.GetCurrent(); + } } return heap->null_value(); } @@ -1143,36 +1086,32 @@ template <bool is_construct> -MUST_USE_RESULT static MaybeObject* HandleApiCallHelper( +MUST_USE_RESULT static Object* HandleApiCallHelper( BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) { - ASSERT(is_construct == CalledAsConstructor(isolate)); + DCHECK(is_construct == CalledAsConstructor(isolate)); Heap* heap = isolate->heap(); HandleScope scope(isolate); Handle<JSFunction> function = args.called_function(); - ASSERT(function->shared()->IsApiFunction()); + DCHECK(function->shared()->IsApiFunction()); - FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data(); + Handle<FunctionTemplateInfo> fun_data( + function->shared()->get_api_func_data(), isolate); if (is_construct) { - Handle<FunctionTemplateInfo> desc(fun_data, isolate); - bool pending_exception = false; - isolate->factory()->ConfigureInstance( - desc, Handle<JSObject>::cast(args.receiver()), &pending_exception); - ASSERT(isolate->has_pending_exception() == pending_exception); - if (pending_exception) return Failure::Exception(); - fun_data = *desc; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, fun_data, + isolate->factory()->ConfigureInstance( + fun_data, Handle<JSObject>::cast(args.receiver()))); } SharedFunctionInfo* shared = function->shared(); if (shared->strict_mode() == SLOPPY && !shared->native()) { Object* recv = args[0]; - ASSERT(!recv->IsNull()); - if (recv->IsUndefined()) { - args[0] = function->context()->global_object()->global_receiver(); - } + DCHECK(!recv->IsNull()); + if (recv->IsUndefined()) args[0] = function->global_proxy(); } - Object* raw_holder = TypeCheck(heap, args.length(), &args[0], fun_data); + Object* raw_holder = TypeCheck(heap, args.length(), &args[0], *fun_data); if (raw_holder->IsNull()) { // This function cannot be called with the given receiver. Abort! @@ -1192,7 +1131,7 @@ Object* result; LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver()))); - ASSERT(raw_holder->IsJSObject()); + DCHECK(raw_holder->IsJSObject()); FunctionCallbackArguments custom(isolate, data_obj, @@ -1210,7 +1149,7 @@ result->VerifyApiCallResultType(); } - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); if (!is_construct || result->IsJSObject()) return result; } @@ -1231,13 +1170,13 @@ // Helper function to handle calls to non-function objects created through the // API. The object can be called as either a constructor (using new) or just as // a function (without new). -MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor( +MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor( Isolate* isolate, bool is_construct_call, BuiltinArguments<NO_EXTRA_ARGUMENTS> args) { // Non-functions are never called as constructors. Even if this is an object // called as a constructor the delegate call is not a construct call. - ASSERT(!CalledAsConstructor(isolate)); + DCHECK(!CalledAsConstructor(isolate)); Heap* heap = isolate->heap(); Handle<Object> receiver = args.receiver(); @@ -1247,12 +1186,12 @@ // Get the invocation callback from the function descriptor that was // used to create the called object. - ASSERT(obj->map()->has_instance_call_handler()); + DCHECK(obj->map()->has_instance_call_handler()); JSFunction* constructor = JSFunction::cast(obj->map()->constructor()); - ASSERT(constructor->shared()->IsApiFunction()); + DCHECK(constructor->shared()->IsApiFunction()); Object* handler = constructor->shared()->get_api_func_data()->instance_call_handler(); - ASSERT(!handler->IsUndefined()); + DCHECK(!handler->IsUndefined()); CallHandlerInfo* call_data = CallHandlerInfo::cast(handler); Object* callback_obj = call_data->callback(); v8::FunctionCallback callback = @@ -1280,7 +1219,7 @@ } } // Check for exceptions and return result. - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); return result; } @@ -1310,7 +1249,7 @@ static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) { - LoadStubCompiler::GenerateLoadViaGetterForDeopt(masm); + NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm); } @@ -1375,7 +1314,7 @@ static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) { - StoreStubCompiler::GenerateStoreViaSetterForDeopt(masm); + NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm); } @@ -1424,73 +1363,70 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT +static void Generate_CallICStub_DebugBreak(MacroAssembler* masm) { + DebugCodegen::GenerateCallICStubDebugBreak(masm); +} + + static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) { - Debug::GenerateLoadICDebugBreak(masm); + DebugCodegen::GenerateLoadICDebugBreak(masm); } static void Generate_StoreIC_DebugBreak(MacroAssembler* masm) { - Debug::GenerateStoreICDebugBreak(masm); + DebugCodegen::GenerateStoreICDebugBreak(masm); } static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) { - Debug::GenerateKeyedLoadICDebugBreak(masm); + DebugCodegen::GenerateKeyedLoadICDebugBreak(masm); } static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) { - Debug::GenerateKeyedStoreICDebugBreak(masm); + DebugCodegen::GenerateKeyedStoreICDebugBreak(masm); } static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) { - Debug::GenerateCompareNilICDebugBreak(masm); + DebugCodegen::GenerateCompareNilICDebugBreak(masm); } static void Generate_Return_DebugBreak(MacroAssembler* masm) { - Debug::GenerateReturnDebugBreak(masm); + DebugCodegen::GenerateReturnDebugBreak(masm); } static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) { - Debug::GenerateCallFunctionStubDebugBreak(masm); -} - - -static void Generate_CallFunctionStub_Recording_DebugBreak( - MacroAssembler* masm) { - Debug::GenerateCallFunctionStubRecordDebugBreak(masm); + DebugCodegen::GenerateCallFunctionStubDebugBreak(masm); } static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) { - Debug::GenerateCallConstructStubDebugBreak(masm); + DebugCodegen::GenerateCallConstructStubDebugBreak(masm); } static void Generate_CallConstructStub_Recording_DebugBreak( MacroAssembler* masm) { - Debug::GenerateCallConstructStubRecordDebugBreak(masm); + DebugCodegen::GenerateCallConstructStubRecordDebugBreak(masm); } static void Generate_Slot_DebugBreak(MacroAssembler* masm) { - Debug::GenerateSlotDebugBreak(masm); + DebugCodegen::GenerateSlotDebugBreak(masm); } static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) { - Debug::GeneratePlainReturnLiveEdit(masm); + DebugCodegen::GeneratePlainReturnLiveEdit(masm); } static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) { - Debug::GenerateFrameDropperLiveEdit(masm); + DebugCodegen::GenerateFrameDropperLiveEdit(masm); } -#endif Builtins::Builtins() : initialized_(false) { @@ -1535,11 +1471,11 @@ class BuiltinFunctionTable { public: BuiltinDesc* functions() { - CallOnce(&once_, &Builtins::InitBuiltinFunctionTable); + base::CallOnce(&once_, &Builtins::InitBuiltinFunctionTable); return functions_; } - OnceType once_; + base::OnceType once_; BuiltinDesc functions_[Builtins::builtin_count + 1]; friend class Builtins; @@ -1601,8 +1537,7 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) { - ASSERT(!initialized_); - Heap* heap = isolate->heap(); + DCHECK(!initialized_); // Create a scope for the handles in the builtins. HandleScope scope(isolate); @@ -1612,9 +1547,13 @@ // For now we generate builtin adaptor code into a stack-allocated // buffer, before copying it into individual code objects. Be careful // with alignment, some platforms don't like unaligned code. - // TODO(jbramley): I had to increase the size of this buffer from 8KB because - // we can generate a lot of debug code on ARM64. - union { int force_alignment; byte buffer[16*KB]; } u; +#ifdef DEBUG + // We can generate a lot of debug code on Arm64. + const size_t buffer_size = 32*KB; +#else + const size_t buffer_size = 8*KB; +#endif + union { int force_alignment; byte buffer[buffer_size]; } u; // Traverse the list of builtins and generate an adaptor in a // separate code object for each one. @@ -1627,39 +1566,26 @@ // We pass all arguments to the generator, but it may not use all of // them. This works because the first arguments are on top of the // stack. - ASSERT(!masm.has_frame()); + DCHECK(!masm.has_frame()); g(&masm, functions[i].name, functions[i].extra_args); // Move the code into the object heap. CodeDesc desc; masm.GetCode(&desc); Code::Flags flags = functions[i].flags; - Object* code = NULL; - { - // During startup it's OK to always allocate and defer GC to later. - // This simplifies things because we don't need to retry. - AlwaysAllocateScope __scope__(isolate); - { MaybeObject* maybe_code = - heap->CreateCode(desc, flags, masm.CodeObject()); - if (!maybe_code->ToObject(&code)) { - v8::internal::V8::FatalProcessOutOfMemory("CreateCode"); - } - } - } + Handle<Code> code = + isolate->factory()->NewCode(desc, flags, masm.CodeObject()); // Log the event and add the code to the builtins array. PROFILE(isolate, - CodeCreateEvent(Logger::BUILTIN_TAG, - Code::cast(code), - functions[i].s_name)); - GDBJIT(AddCode(GDBJITInterface::BUILTIN, - functions[i].s_name, - Code::cast(code))); - builtins_[i] = code; + CodeCreateEvent(Logger::BUILTIN_TAG, *code, functions[i].s_name)); + builtins_[i] = *code; + if (code->kind() == Code::BUILTIN) code->set_builtin_index(i); #ifdef ENABLE_DISASSEMBLER if (FLAG_print_builtin_code) { CodeTracer::Scope trace_scope(isolate->GetCodeTracer()); - PrintF(trace_scope.file(), "Builtin: %s\n", functions[i].s_name); - Code::cast(code)->Disassemble(functions[i].s_name, trace_scope.file()); - PrintF(trace_scope.file(), "\n"); + OFStream os(trace_scope.file()); + os << "Builtin: " << functions[i].s_name << "\n"; + code->Disassemble(functions[i].s_name, os); + os << "\n"; } #endif } else { @@ -1699,12 +1625,12 @@ void Builtins::Generate_InterruptCheck(MacroAssembler* masm) { - masm->TailCallRuntime(Runtime::kHiddenInterrupt, 0, 1); + masm->TailCallRuntime(Runtime::kInterrupt, 0, 1); } void Builtins::Generate_StackCheck(MacroAssembler* masm) { - masm->TailCallRuntime(Runtime::kHiddenStackGuard, 0, 1); + masm->TailCallRuntime(Runtime::kStackGuard, 0, 1); } diff -Nru nodejs-0.11.13/deps/v8/src/builtins.h nodejs-0.11.15/deps/v8/src/builtins.h --- nodejs-0.11.13/deps/v8/src/builtins.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/builtins.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_BUILTINS_H_ #define V8_BUILTINS_H_ @@ -82,7 +59,8 @@ V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \ V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \ \ - V(StrictModePoisonPill, NO_EXTRA_ARGUMENTS) + V(StrictModePoisonPill, NO_EXTRA_ARGUMENTS) \ + V(GeneratorPoisonPill, NO_EXTRA_ARGUMENTS) // Define list of builtins implemented in assembly. #define BUILTIN_LIST_A(V) \ @@ -90,8 +68,6 @@ kNoExtraICState) \ V(InOptimizationQueue, BUILTIN, UNINITIALIZED, \ kNoExtraICState) \ - V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \ - kNoExtraICState) \ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \ kNoExtraICState) \ V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \ @@ -197,19 +173,18 @@ V(LoadIC_Normal, LOAD_IC) \ V(StoreIC_Normal, STORE_IC) -#ifdef ENABLE_DEBUGGER_SUPPORT // Define list of builtins used by the debugger implemented in assembly. #define BUILTIN_LIST_DEBUG_A(V) \ V(Return_DebugBreak, BUILTIN, DEBUG_STUB, \ DEBUG_BREAK) \ V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_STUB, \ DEBUG_BREAK) \ - V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \ - DEBUG_BREAK) \ V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_STUB, \ DEBUG_BREAK) \ V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \ DEBUG_BREAK) \ + V(CallICStub_DebugBreak, CALL_IC, DEBUG_STUB, \ + DEBUG_BREAK) \ V(LoadIC_DebugBreak, LOAD_IC, DEBUG_STUB, \ DEBUG_BREAK) \ V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_STUB, \ @@ -226,9 +201,6 @@ DEBUG_BREAK) \ V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, \ DEBUG_BREAK) -#else -#define BUILTIN_LIST_DEBUG_A(V) -#endif // Define list of builtins implemented in JavaScript. #define BUILTINS_LIST_JS(V) \ @@ -260,7 +232,7 @@ V(STRING_ADD_LEFT, 1) \ V(STRING_ADD_RIGHT, 1) \ V(APPLY_PREPARE, 1) \ - V(APPLY_OVERFLOW, 1) + V(STACK_OVERFLOW, 1) class BuiltinFunctionTable; class ObjectVisitor; @@ -335,8 +307,8 @@ static const char* GetName(JavaScript id) { return javascript_names_[id]; } const char* name(int index) { - ASSERT(index >= 0); - ASSERT(index < builtin_count); + DCHECK(index >= 0); + DCHECK(index < builtin_count); return names_[index]; } static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; } @@ -366,7 +338,6 @@ static void Generate_InOptimizationQueue(MacroAssembler* masm); static void Generate_CompileOptimized(MacroAssembler* masm); static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm); - static void Generate_JSConstructStubCountdown(MacroAssembler* masm); static void Generate_JSConstructStubGeneric(MacroAssembler* masm); static void Generate_JSConstructStubApi(MacroAssembler* masm); static void Generate_JSEntryTrampoline(MacroAssembler* masm); diff -Nru nodejs-0.11.13/deps/v8/src/bytecodes-irregexp.h nodejs-0.11.15/deps/v8/src/bytecodes-irregexp.h --- nodejs-0.11.13/deps/v8/src/bytecodes-irregexp.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/bytecodes-irregexp.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_BYTECODES_IRREGEXP_H_ diff -Nru nodejs-0.11.13/deps/v8/src/cached-powers.cc nodejs-0.11.15/deps/v8/src/cached-powers.cc --- nodejs-0.11.13/deps/v8/src/cached-powers.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/cached-powers.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include <stdarg.h> #include <limits.h> +#include <stdarg.h> #include <cmath> -#include "../include/v8stdint.h" -#include "globals.h" -#include "checks.h" -#include "cached-powers.h" +#include "include/v8stdint.h" +#include "src/base/logging.h" +#include "src/cached-powers.h" +#include "src/globals.h" namespace v8 { namespace internal { @@ -156,10 +133,10 @@ int foo = kCachedPowersOffset; int index = (foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1; - ASSERT(0 <= index && index < kCachedPowersLength); + DCHECK(0 <= index && index < kCachedPowersLength); CachedPower cached_power = kCachedPowers[index]; - ASSERT(min_exponent <= cached_power.binary_exponent); - ASSERT(cached_power.binary_exponent <= max_exponent); + DCHECK(min_exponent <= cached_power.binary_exponent); + DCHECK(cached_power.binary_exponent <= max_exponent); *decimal_exponent = cached_power.decimal_exponent; *power = DiyFp(cached_power.significand, cached_power.binary_exponent); } @@ -168,15 +145,15 @@ void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent, DiyFp* power, int* found_exponent) { - ASSERT(kMinDecimalExponent <= requested_exponent); - ASSERT(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance); + DCHECK(kMinDecimalExponent <= requested_exponent); + DCHECK(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance); int index = (requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance; CachedPower cached_power = kCachedPowers[index]; *power = DiyFp(cached_power.significand, cached_power.binary_exponent); *found_exponent = cached_power.decimal_exponent; - ASSERT(*found_exponent <= requested_exponent); - ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance); + DCHECK(*found_exponent <= requested_exponent); + DCHECK(requested_exponent < *found_exponent + kDecimalExponentDistance); } } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/cached-powers.h nodejs-0.11.15/deps/v8/src/cached-powers.h --- nodejs-0.11.13/deps/v8/src/cached-powers.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/cached-powers.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,12 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CACHED_POWERS_H_ #define V8_CACHED_POWERS_H_ -#include "diy-fp.h" +#include "src/base/logging.h" +#include "src/diy-fp.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/char-predicates.h nodejs-0.11.15/deps/v8/src/char-predicates.h --- nodejs-0.11.13/deps/v8/src/char-predicates.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/char-predicates.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CHAR_PREDICATES_H_ #define V8_CHAR_PREDICATES_H_ -#include "unicode.h" +#include "src/unicode.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/char-predicates-inl.h nodejs-0.11.15/deps/v8/src/char-predicates-inl.h --- nodejs-0.11.13/deps/v8/src/char-predicates-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/char-predicates-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CHAR_PREDICATES_INL_H_ #define V8_CHAR_PREDICATES_INL_H_ -#include "char-predicates.h" +#include "src/char-predicates.h" namespace v8 { namespace internal { @@ -53,7 +30,7 @@ inline bool IsInRange(int value, int lower_limit, int higher_limit) { - ASSERT(lower_limit <= higher_limit); + DCHECK(lower_limit <= higher_limit); return static_cast<unsigned int>(value - lower_limit) <= static_cast<unsigned int>(higher_limit - lower_limit); } diff -Nru nodejs-0.11.13/deps/v8/src/checks.cc nodejs-0.11.15/deps/v8/src/checks.cc --- nodejs-0.11.13/deps/v8/src/checks.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/checks.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,116 +1,60 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "checks.h" - -#if V8_LIBC_GLIBC || V8_OS_BSD -# include <cxxabi.h> -# include <execinfo.h> -#elif V8_OS_QNX -# include <backtrace.h> -#endif // V8_LIBC_GLIBC || V8_OS_BSD -#include <stdio.h> +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "platform.h" -#include "v8.h" +#include "src/checks.h" + +#include "src/v8.h" namespace v8 { namespace internal { intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; } -// Attempts to dump a backtrace (if supported). -void DumpBacktrace() { -#if V8_LIBC_GLIBC || V8_OS_BSD - void* trace[100]; - int size = backtrace(trace, ARRAY_SIZE(trace)); - char** symbols = backtrace_symbols(trace, size); - OS::PrintError("\n==== C stack trace ===============================\n\n"); - if (size == 0) { - OS::PrintError("(empty)\n"); - } else if (symbols == NULL) { - OS::PrintError("(no symbols)\n"); - } else { - for (int i = 1; i < size; ++i) { - OS::PrintError("%2d: ", i); - char mangled[201]; - if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT - int status; - size_t length; - char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status); - OS::PrintError("%s\n", demangled != NULL ? demangled : mangled); - free(demangled); - } else { - OS::PrintError("??\n"); - } - } - } - free(symbols); -#elif V8_OS_QNX - char out[1024]; - bt_accessor_t acc; - bt_memmap_t memmap; - bt_init_accessor(&acc, BT_SELF); - bt_load_memmap(&acc, &memmap); - bt_sprn_memmap(&memmap, out, sizeof(out)); - OS::PrintError(out); - bt_addr_t trace[100]; - int size = bt_get_backtrace(&acc, trace, ARRAY_SIZE(trace)); - OS::PrintError("\n==== C stack trace ===============================\n\n"); - if (size == 0) { - OS::PrintError("(empty)\n"); - } else { - bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"), - out, sizeof(out), NULL); - OS::PrintError(out); - } - bt_unload_memmap(&memmap); - bt_release_accessor(&acc); -#endif // V8_LIBC_GLIBC || V8_OS_BSD +} } // namespace v8::internal + + +static bool CheckEqualsStrict(volatile double* exp, volatile double* val) { + v8::internal::DoubleRepresentation exp_rep(*exp); + v8::internal::DoubleRepresentation val_rep(*val); + if (std::isnan(exp_rep.value) && std::isnan(val_rep.value)) return true; + return exp_rep.bits == val_rep.bits; } -} } // namespace v8::internal +void CheckEqualsHelper(const char* file, int line, const char* expected_source, + double expected, const char* value_source, + double value) { + // Force values to 64 bit memory to truncate 80 bit precision on IA32. + volatile double* exp = new double[1]; + *exp = expected; + volatile double* val = new double[1]; + *val = value; + if (!CheckEqualsStrict(exp, val)) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", + expected_source, value_source, *exp, *val); + } + delete[] exp; + delete[] val; +} -// Contains protection against recursive calls (faults while handling faults). -extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) { - i::AllowHandleDereference allow_deref; - i::AllowDeferredHandleDereference allow_deferred_deref; - fflush(stdout); - fflush(stderr); - i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line); - va_list arguments; - va_start(arguments, format); - i::OS::VPrintError(format, arguments); - va_end(arguments); - i::OS::PrintError("\n#\n"); - v8::internal::DumpBacktrace(); - fflush(stderr); - i::OS::Abort(); + +void CheckNonEqualsHelper(const char* file, int line, + const char* expected_source, double expected, + const char* value_source, double value) { + // Force values to 64 bit memory to truncate 80 bit precision on IA32. + volatile double* exp = new double[1]; + *exp = expected; + volatile double* val = new double[1]; + *val = value; + if (CheckEqualsStrict(exp, val)) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", + expected_source, value_source, *exp, *val); + } + delete[] exp; + delete[] val; } diff -Nru nodejs-0.11.13/deps/v8/src/checks.h nodejs-0.11.15/deps/v8/src/checks.h --- nodejs-0.11.13/deps/v8/src/checks.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/checks.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,353 +1,62 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CHECKS_H_ #define V8_CHECKS_H_ -#include <string.h> +#include "src/base/logging.h" -#include "../include/v8stdint.h" - -extern "C" void V8_Fatal(const char* file, int line, const char* format, ...); - - -// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during -// development, but they should not be relied on in the final product. #ifdef DEBUG -#define FATAL(msg) \ - V8_Fatal(__FILE__, __LINE__, "%s", (msg)) -#define UNIMPLEMENTED() \ - V8_Fatal(__FILE__, __LINE__, "unimplemented code") -#define UNREACHABLE() \ - V8_Fatal(__FILE__, __LINE__, "unreachable code") -#else -#define FATAL(msg) \ - V8_Fatal("", 0, "%s", (msg)) -#define UNIMPLEMENTED() \ - V8_Fatal("", 0, "unimplemented code") -#define UNREACHABLE() ((void) 0) +#ifndef OPTIMIZED_DEBUG +#define ENABLE_SLOW_DCHECKS 1 #endif - -// Simulator specific helpers. -#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_ARM64) - // TODO(all): If possible automatically prepend an indicator like - // UNIMPLEMENTED or LOCATION. - #define ASM_UNIMPLEMENTED(message) \ - __ Debug(message, __LINE__, NO_PARAM) - #define ASM_UNIMPLEMENTED_BREAK(message) \ - __ Debug(message, __LINE__, \ - FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK) - #define ASM_LOCATION(message) \ - __ Debug("LOCATION: " message, __LINE__, NO_PARAM) -#else - #define ASM_UNIMPLEMENTED(message) - #define ASM_UNIMPLEMENTED_BREAK(message) - #define ASM_LOCATION(message) #endif +namespace v8 { -// The CHECK macro checks that the given condition is true; if not, it -// prints a message to stderr and aborts. -#define CHECK(condition) do { \ - if (!(condition)) { \ - V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \ - } \ - } while (0) - - -// Helper function used by the CHECK_EQ function when given int -// arguments. Should not be called directly. -inline void CheckEqualsHelper(const char* file, int line, - const char* expected_source, int expected, - const char* value_source, int value) { - if (expected != value) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i", - expected_source, value_source, expected, value); - } -} - - -// Helper function used by the CHECK_EQ function when given int64_t -// arguments. Should not be called directly. -inline void CheckEqualsHelper(const char* file, int line, - const char* expected_source, - int64_t expected, - const char* value_source, - int64_t value) { - if (expected != value) { - // Print int64_t values in hex, as two int32s, - // to avoid platform-dependencies. - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n#" - " Expected: 0x%08x%08x\n# Found: 0x%08x%08x", - expected_source, value_source, - static_cast<uint32_t>(expected >> 32), - static_cast<uint32_t>(expected), - static_cast<uint32_t>(value >> 32), - static_cast<uint32_t>(value)); - } -} - - -// Helper function used by the CHECK_NE function when given int -// arguments. Should not be called directly. -inline void CheckNonEqualsHelper(const char* file, - int line, - const char* unexpected_source, - int unexpected, - const char* value_source, - int value) { - if (unexpected == value) { - V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i", - unexpected_source, value_source, value); - } -} - - -// Helper function used by the CHECK function when given string -// arguments. Should not be called directly. -inline void CheckEqualsHelper(const char* file, - int line, - const char* expected_source, - const char* expected, - const char* value_source, - const char* value) { - if ((expected == NULL && value != NULL) || - (expected != NULL && value == NULL) || - (expected != NULL && value != NULL && strcmp(expected, value) != 0)) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s", - expected_source, value_source, expected, value); - } -} - - -inline void CheckNonEqualsHelper(const char* file, - int line, - const char* expected_source, - const char* expected, - const char* value_source, - const char* value) { - if (expected == value || - (expected != NULL && value != NULL && strcmp(expected, value) == 0)) { - V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s", - expected_source, value_source, value); - } -} - - -// Helper function used by the CHECK function when given pointer -// arguments. Should not be called directly. -inline void CheckEqualsHelper(const char* file, - int line, - const char* expected_source, - const void* expected, - const char* value_source, - const void* value) { - if (expected != value) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p", - expected_source, value_source, - expected, value); - } -} - - -inline void CheckNonEqualsHelper(const char* file, - int line, - const char* expected_source, - const void* expected, - const char* value_source, - const void* value) { - if (expected == value) { - V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p", - expected_source, value_source, value); - } -} - - -// Helper function used by the CHECK function when given floating -// point arguments. Should not be called directly. -inline void CheckEqualsHelper(const char* file, - int line, - const char* expected_source, - double expected, - const char* value_source, - double value) { - // Force values to 64 bit memory to truncate 80 bit precision on IA32. - volatile double* exp = new double[1]; - *exp = expected; - volatile double* val = new double[1]; - *val = value; - if (*exp != *val) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", - expected_source, value_source, *exp, *val); - } - delete[] exp; - delete[] val; -} - - -inline void CheckNonEqualsHelper(const char* file, - int line, - const char* expected_source, - int64_t expected, - const char* value_source, - int64_t value) { - if (expected == value) { - V8_Fatal(file, line, - "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", - expected_source, value_source, expected, value); - } -} - - -inline void CheckNonEqualsHelper(const char* file, - int line, - const char* expected_source, - double expected, - const char* value_source, - double value) { - // Force values to 64 bit memory to truncate 80 bit precision on IA32. - volatile double* exp = new double[1]; - *exp = expected; - volatile double* val = new double[1]; - *val = value; - if (*exp == *val) { - V8_Fatal(file, line, - "CHECK_NE(%s, %s) failed\n# Value: %f", - expected_source, value_source, *val); - } - delete[] exp; - delete[] val; -} - - -#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \ - #expected, expected, #value, value) - - -#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \ - #unexpected, unexpected, #value, value) - - -#define CHECK_GT(a, b) CHECK((a) > (b)) -#define CHECK_GE(a, b) CHECK((a) >= (b)) -#define CHECK_LT(a, b) CHECK((a) < (b)) -#define CHECK_LE(a, b) CHECK((a) <= (b)) - - -// Use C++11 static_assert if possible, which gives error -// messages that are easier to understand on first sight. -#if V8_HAS_CXX11_STATIC_ASSERT -#define STATIC_CHECK(test) static_assert(test, #test) -#else -// This is inspired by the static assertion facility in boost. This -// is pretty magical. If it causes you trouble on a platform you may -// find a fix in the boost code. -template <bool> class StaticAssertion; -template <> class StaticAssertion<true> { }; -// This macro joins two tokens. If one of the tokens is a macro the -// helper call causes it to be resolved before joining. -#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b) -#define SEMI_STATIC_JOIN_HELPER(a, b) a##b -// Causes an error during compilation of the condition is not -// statically known to be true. It is formulated as a typedef so that -// it can be used wherever a typedef can be used. Beware that this -// actually causes each use to introduce a new defined type with a -// name depending on the source line. -template <int> class StaticAssertionHelper { }; -#define STATIC_CHECK(test) \ - typedef \ - StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \ - SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED -#endif +class Value; +template <class T> class Handle; +namespace internal { -#ifdef DEBUG -#ifndef OPTIMIZED_DEBUG -#define ENABLE_SLOW_ASSERTS 1 -#endif -#endif +intptr_t HeapObjectTagMask(); -namespace v8 { -namespace internal { -#ifdef ENABLE_SLOW_ASSERTS -#define SLOW_ASSERT(condition) \ +#ifdef ENABLE_SLOW_DCHECKS +#define SLOW_DCHECK(condition) \ CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition)) extern bool FLAG_enable_slow_asserts; #else -#define SLOW_ASSERT(condition) ((void) 0) +#define SLOW_DCHECK(condition) ((void) 0) const bool FLAG_enable_slow_asserts = false; #endif -// Exposed for making debugging easier (to see where your function is being -// called, just add a call to DumpBacktrace). -void DumpBacktrace(); - } } // namespace v8::internal -// The ASSERT macro is equivalent to CHECK except that it only -// generates code in debug builds. -#ifdef DEBUG -#define ASSERT_RESULT(expr) CHECK(expr) -#define ASSERT(condition) CHECK(condition) -#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2) -#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2) -#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2) -#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2) -#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2) -#else -#define ASSERT_RESULT(expr) (expr) -#define ASSERT(condition) ((void) 0) -#define ASSERT_EQ(v1, v2) ((void) 0) -#define ASSERT_NE(v1, v2) ((void) 0) -#define ASSERT_GE(v1, v2) ((void) 0) -#define ASSERT_LT(v1, v2) ((void) 0) -#define ASSERT_LE(v1, v2) ((void) 0) -#endif -// Static asserts has no impact on runtime performance, so they can be -// safely enabled in release mode. Moreover, the ((void) 0) expression -// obeys different syntax rules than typedef's, e.g. it can't appear -// inside class declaration, this leads to inconsistency between debug -// and release compilation modes behavior. -#define STATIC_ASSERT(test) STATIC_CHECK(test) - -#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p) - -// "Extra checks" are lightweight checks that are enabled in some release -// builds. -#ifdef ENABLE_EXTRA_CHECKS -#define EXTRA_CHECK(condition) CHECK(condition) -#else -#define EXTRA_CHECK(condition) ((void) 0) -#endif +void CheckNonEqualsHelper(const char* file, int line, + const char* expected_source, double expected, + const char* value_source, double value); + +void CheckEqualsHelper(const char* file, int line, const char* expected_source, + double expected, const char* value_source, double value); + +void CheckNonEqualsHelper(const char* file, int line, + const char* unexpected_source, + v8::Handle<v8::Value> unexpected, + const char* value_source, + v8::Handle<v8::Value> value); + +void CheckEqualsHelper(const char* file, + int line, + const char* expected_source, + v8::Handle<v8::Value> expected, + const char* value_source, + v8::Handle<v8::Value> value); + +#define DCHECK_TAG_ALIGNED(address) \ + DCHECK((reinterpret_cast<intptr_t>(address) & HeapObjectTagMask()) == 0) + +#define DCHECK_SIZE_TAG_ALIGNED(size) DCHECK((size & HeapObjectTagMask()) == 0) #endif // V8_CHECKS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/circular-queue.h nodejs-0.11.15/deps/v8/src/circular-queue.h --- nodejs-0.11.13/deps/v8/src/circular-queue.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/circular-queue.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CIRCULAR_QUEUE_H_ #define V8_CIRCULAR_QUEUE_H_ -#include "atomicops.h" -#include "v8globals.h" +#include "src/base/atomicops.h" +#include "src/globals.h" namespace v8 { namespace internal { @@ -73,7 +50,7 @@ struct V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry { Entry() : marker(kEmpty) {} T record; - Atomic32 marker; + base::Atomic32 marker; }; Entry* Next(Entry* entry); diff -Nru nodejs-0.11.13/deps/v8/src/circular-queue-inl.h nodejs-0.11.15/deps/v8/src/circular-queue-inl.h --- nodejs-0.11.13/deps/v8/src/circular-queue-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/circular-queue-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CIRCULAR_QUEUE_INL_H_ #define V8_CIRCULAR_QUEUE_INL_H_ -#include "circular-queue.h" +#include "src/circular-queue.h" namespace v8 { namespace internal { @@ -47,8 +24,8 @@ template<typename T, unsigned L> T* SamplingCircularQueue<T, L>::Peek() { - MemoryBarrier(); - if (Acquire_Load(&dequeue_pos_->marker) == kFull) { + base::MemoryBarrier(); + if (base::Acquire_Load(&dequeue_pos_->marker) == kFull) { return &dequeue_pos_->record; } return NULL; @@ -57,15 +34,15 @@ template<typename T, unsigned L> void SamplingCircularQueue<T, L>::Remove() { - Release_Store(&dequeue_pos_->marker, kEmpty); + base::Release_Store(&dequeue_pos_->marker, kEmpty); dequeue_pos_ = Next(dequeue_pos_); } template<typename T, unsigned L> T* SamplingCircularQueue<T, L>::StartEnqueue() { - MemoryBarrier(); - if (Acquire_Load(&enqueue_pos_->marker) == kEmpty) { + base::MemoryBarrier(); + if (base::Acquire_Load(&enqueue_pos_->marker) == kEmpty) { return &enqueue_pos_->record; } return NULL; @@ -74,7 +51,7 @@ template<typename T, unsigned L> void SamplingCircularQueue<T, L>::FinishEnqueue() { - Release_Store(&enqueue_pos_->marker, kFull); + base::Release_Store(&enqueue_pos_->marker, kFull); enqueue_pos_ = Next(enqueue_pos_); } diff -Nru nodejs-0.11.13/deps/v8/src/codegen.cc nodejs-0.11.15/deps/v8/src/codegen.cc --- nodejs-0.11.13/deps/v8/src/codegen.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/codegen.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,45 +1,81 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "bootstrapper.h" -#include "codegen.h" -#include "compiler.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "prettyprinter.h" -#include "rewriter.h" -#include "runtime.h" -#include "stub-cache.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/prettyprinter.h" +#include "src/rewriter.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { + +#if defined(_WIN64) +typedef double (*ModuloFunction)(double, double); +static ModuloFunction modulo_function = NULL; +// Defined in codegen-x64.cc. +ModuloFunction CreateModuloFunction(); + +void init_modulo_function() { + modulo_function = CreateModuloFunction(); +} + + +double modulo(double x, double y) { + // Note: here we rely on dependent reads being ordered. This is true + // on all architectures we currently support. + return (*modulo_function)(x, y); +} +#elif defined(_WIN32) + +double modulo(double x, double y) { + // Workaround MS fmod bugs. ECMA-262 says: + // dividend is finite and divisor is an infinity => result equals dividend + // dividend is a zero and divisor is nonzero finite => result equals dividend + if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) && + !(x == 0 && (y != 0 && std::isfinite(y)))) { + x = fmod(x, y); + } + return x; +} +#else // POSIX + +double modulo(double x, double y) { + return std::fmod(x, y); +} +#endif // defined(_WIN64) + + +#define UNARY_MATH_FUNCTION(name, generator) \ +static UnaryMathFunction fast_##name##_function = NULL; \ +void init_fast_##name##_function() { \ + fast_##name##_function = generator; \ +} \ +double fast_##name(double x) { \ + return (*fast_##name##_function)(x); \ +} + +UNARY_MATH_FUNCTION(exp, CreateExpFunction()) +UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction()) + +#undef UNARY_MATH_FUNCTION + + +void lazily_initialize_fast_exp() { + if (fast_exp_function == NULL) { + init_fast_exp_function(); + } +} + + #define __ ACCESS_MASM(masm_) #ifdef DEBUG @@ -114,7 +150,8 @@ Handle<Code> code = isolate->factory()->NewCode(desc, flags, masm->CodeObject(), false, is_crankshafted, - info->prologue_offset()); + info->prologue_offset(), + info->is_debug() && !is_crankshafted); isolate->counters()->total_compiled_code_size()->Increment( code->instruction_size()); isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted, @@ -138,10 +175,11 @@ code->kind() == Code::FUNCTION; CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer()); + OFStream os(tracing_scope.file()); if (print_source) { Handle<Script> script = info->script(); if (!script->IsUndefined() && !script->source()->IsUndefined()) { - PrintF(tracing_scope.file(), "--- Raw source ---\n"); + os << "--- Raw source ---\n"; ConsStringIteratorOp op; StringCharacterStream stream(String::cast(script->source()), &op, @@ -152,57 +190,38 @@ function->end_position() - function->start_position() + 1; for (int i = 0; i < source_len; i++) { if (stream.HasMore()) { - PrintF(tracing_scope.file(), "%c", stream.GetNext()); + os << AsReversiblyEscapedUC16(stream.GetNext()); } } - PrintF(tracing_scope.file(), "\n\n"); + os << "\n\n"; } } if (info->IsOptimizing()) { if (FLAG_print_unopt_code) { - PrintF(tracing_scope.file(), "--- Unoptimized code ---\n"); + os << "--- Unoptimized code ---\n"; info->closure()->shared()->code()->Disassemble( - function->debug_name()->ToCString().get(), tracing_scope.file()); + function->debug_name()->ToCString().get(), os); } - PrintF(tracing_scope.file(), "--- Optimized code ---\n"); - PrintF(tracing_scope.file(), - "optimization_id = %d\n", info->optimization_id()); + os << "--- Optimized code ---\n" + << "optimization_id = " << info->optimization_id() << "\n"; } else { - PrintF(tracing_scope.file(), "--- Code ---\n"); + os << "--- Code ---\n"; } if (print_source) { - PrintF(tracing_scope.file(), - "source_position = %d\n", function->start_position()); + os << "source_position = " << function->start_position() << "\n"; } if (info->IsStub()) { CodeStub::Major major_key = info->code_stub()->MajorKey(); - code->Disassemble(CodeStub::MajorName(major_key, false), - tracing_scope.file()); + code->Disassemble(CodeStub::MajorName(major_key, false), os); } else { - code->Disassemble(function->debug_name()->ToCString().get(), - tracing_scope.file()); + code->Disassemble(function->debug_name()->ToCString().get(), os); } - PrintF(tracing_scope.file(), "--- End code ---\n"); + os << "--- End code ---\n"; } #endif // ENABLE_DISASSEMBLER } -bool CodeGenerator::ShouldGenerateLog(Isolate* isolate, Expression* type) { - ASSERT(type != NULL); - if (!isolate->logger()->is_logging() && - !isolate->cpu_profiler()->is_profiling()) { - return false; - } - Handle<String> name = Handle<String>::cast(type->AsLiteral()->value()); - if (FLAG_log_regexp) { - if (name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("regexp"))) - return true; - } - return false; -} - - bool CodeGenerator::RecordPositions(MacroAssembler* masm, int pos, bool right_here) { @@ -235,9 +254,9 @@ } -int CEntryStub::MinorKey() { +int CEntryStub::MinorKey() const { int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0; - ASSERT(result_size_ == 1 || result_size_ == 2); + DCHECK(result_size_ == 1 || result_size_ == 2); #ifdef _WIN64 return result | ((result_size_ == 1) ? 0 : 2); #else diff -Nru nodejs-0.11.13/deps/v8/src/codegen.h nodejs-0.11.15/deps/v8/src/codegen.h --- nodejs-0.11.13/deps/v8/src/codegen.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/codegen.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CODEGEN_H_ #define V8_CODEGEN_H_ -#include "code-stubs.h" -#include "runtime.h" +#include "src/code-stubs.h" +#include "src/runtime.h" // Include the declaration of the architecture defined class CodeGenerator. // The contract to the shared code is that the the CodeGenerator is a subclass @@ -69,15 +46,19 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; #if V8_TARGET_ARCH_IA32 -#include "ia32/codegen-ia32.h" +#include "src/ia32/codegen-ia32.h" // NOLINT #elif V8_TARGET_ARCH_X64 -#include "x64/codegen-x64.h" +#include "src/x64/codegen-x64.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 -#include "arm64/codegen-arm64.h" +#include "src/arm64/codegen-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM -#include "arm/codegen-arm.h" +#include "src/arm/codegen-arm.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/codegen-mips.h" +#include "src/mips/codegen-mips.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/codegen-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/codegen-x87.h" // NOLINT #else #error Unsupported target architecture. #endif @@ -102,8 +83,6 @@ // Print the code after compiling it. static void PrintCode(Handle<Code> code, CompilationInfo* info); - static bool ShouldGenerateLog(Isolate* isolate, Expression* type); - static bool RecordPositions(MacroAssembler* masm, int pos, bool right_here = false); @@ -122,19 +101,46 @@ UnaryMathFunction CreateSqrtFunction(); +double modulo(double x, double y); + +// Custom implementation of math functions. +double fast_exp(double input); +double fast_sqrt(double input); +#ifdef _WIN64 +void init_modulo_function(); +#endif +void lazily_initialize_fast_exp(); +void init_fast_sqrt_function(); + + class ElementsTransitionGenerator : public AllStatic { public: // If |mode| is set to DONT_TRACK_ALLOCATION_SITE, // |allocation_memento_found| may be NULL. - static void GenerateMapChangeElementsTransition(MacroAssembler* masm, + static void GenerateMapChangeElementsTransition( + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, AllocationSiteMode mode, Label* allocation_memento_found); - static void GenerateSmiToDouble(MacroAssembler* masm, - AllocationSiteMode mode, - Label* fail); - static void GenerateDoubleToObject(MacroAssembler* masm, - AllocationSiteMode mode, - Label* fail); + static void GenerateSmiToDouble( + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail); + static void GenerateDoubleToObject( + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail); private: DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator); @@ -143,6 +149,33 @@ static const int kNumberDictionaryProbes = 4; +class CodeAgingHelper { + public: + CodeAgingHelper(); + + uint32_t young_sequence_length() const { return young_sequence_.length(); } + bool IsYoung(byte* candidate) const { + return memcmp(candidate, + young_sequence_.start(), + young_sequence_.length()) == 0; + } + void CopyYoungSequenceTo(byte* new_buffer) const { + CopyBytes(new_buffer, young_sequence_.start(), young_sequence_.length()); + } + +#ifdef DEBUG + bool IsOld(byte* candidate) const; +#endif + + protected: + const EmbeddedVector<byte, kNoCodeAgeSequenceLength> young_sequence_; +#ifdef DEBUG +#ifdef V8_TARGET_ARCH_ARM64 + const EmbeddedVector<byte, kNoCodeAgeSequenceLength> old_sequence_; +#endif +#endif +}; + } } // namespace v8::internal #endif // V8_CODEGEN_H_ diff -Nru nodejs-0.11.13/deps/v8/src/code.h nodejs-0.11.15/deps/v8/src/code.h --- nodejs-0.11.13/deps/v8/src/code.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/code.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CODE_H_ #define V8_CODE_H_ -#include "allocation.h" -#include "handles.h" -#include "objects.h" +#include "src/allocation.h" +#include "src/handles.h" +#include "src/objects.h" namespace v8 { namespace internal { @@ -53,11 +30,11 @@ bool is_immediate() const { return !is_reg(); } Register reg() const { - ASSERT(is_reg()); + DCHECK(is_reg()); return reg_; } int immediate() const { - ASSERT(is_immediate()); + DCHECK(is_immediate()); return immediate_; } diff -Nru nodejs-0.11.13/deps/v8/src/code-stubs.cc nodejs-0.11.15/deps/v8/src/code-stubs.cc --- nodejs-0.11.13/deps/v8/src/code-stubs.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/code-stubs.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,58 +1,109 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "bootstrapper.h" -#include "code-stubs.h" -#include "cpu-profiler.h" -#include "stub-cache.h" -#include "factory.h" -#include "gdb-jit.h" -#include "macro-assembler.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/cpu-profiler.h" +#include "src/factory.h" +#include "src/gdb-jit.h" +#include "src/macro-assembler.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { +InterfaceDescriptor::InterfaceDescriptor() + : register_param_count_(-1) { } + + CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor() - : register_param_count_(-1), - stack_parameter_count_(no_reg), + : stack_parameter_count_(no_reg), hint_stack_parameter_count_(-1), function_mode_(NOT_JS_FUNCTION_STUB_MODE), - register_params_(NULL), deoptimization_handler_(NULL), handler_arguments_mode_(DONT_PASS_ARGUMENTS), miss_handler_(), has_miss_handler_(false) { } -bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) { - UnseededNumberDictionary* stubs = isolate->heap()->code_stubs(); +void InterfaceDescriptor::Initialize( + int register_parameter_count, + Register* registers, + Representation* register_param_representations, + PlatformInterfaceDescriptor* platform_descriptor) { + platform_specific_descriptor_ = platform_descriptor; + register_param_count_ = register_parameter_count; + + // An interface descriptor must have a context register. + DCHECK(register_parameter_count > 0 && registers[0].is(ContextRegister())); + + // InterfaceDescriptor owns a copy of the registers array. + register_params_.Reset(NewArray<Register>(register_parameter_count)); + for (int i = 0; i < register_parameter_count; i++) { + register_params_[i] = registers[i]; + } + + // If a representations array is specified, then the descriptor owns that as + // well. + if (register_param_representations != NULL) { + register_param_representations_.Reset( + NewArray<Representation>(register_parameter_count)); + for (int i = 0; i < register_parameter_count; i++) { + // If there is a context register, the representation must be tagged. + DCHECK(i != 0 || register_param_representations[i].Equals( + Representation::Tagged())); + register_param_representations_[i] = register_param_representations[i]; + } + } +} + + +void CodeStubInterfaceDescriptor::Initialize( + CodeStub::Major major, int register_parameter_count, Register* registers, + Address deoptimization_handler, + Representation* register_param_representations, + int hint_stack_parameter_count, StubFunctionMode function_mode) { + InterfaceDescriptor::Initialize(register_parameter_count, registers, + register_param_representations); + + deoptimization_handler_ = deoptimization_handler; + + hint_stack_parameter_count_ = hint_stack_parameter_count; + function_mode_ = function_mode; + major_ = major; +} + + +void CodeStubInterfaceDescriptor::Initialize( + CodeStub::Major major, int register_parameter_count, Register* registers, + Register stack_parameter_count, Address deoptimization_handler, + Representation* register_param_representations, + int hint_stack_parameter_count, StubFunctionMode function_mode, + HandlerArgumentsMode handler_mode) { + Initialize(major, register_parameter_count, registers, deoptimization_handler, + register_param_representations, hint_stack_parameter_count, + function_mode); + stack_parameter_count_ = stack_parameter_count; + handler_arguments_mode_ = handler_mode; +} + + +void CallInterfaceDescriptor::Initialize( + int register_parameter_count, + Register* registers, + Representation* param_representations, + PlatformInterfaceDescriptor* platform_descriptor) { + InterfaceDescriptor::Initialize(register_parameter_count, registers, + param_representations, platform_descriptor); +} + + +bool CodeStub::FindCodeInCache(Code** code_out) { + UnseededNumberDictionary* stubs = isolate()->heap()->code_stubs(); int index = stubs->FindEntry(GetKey()); if (index != UnseededNumberDictionary::kNotFound) { *code_out = Code::cast(stubs->ValueAt(index)); @@ -62,21 +113,12 @@ } -SmartArrayPointer<const char> CodeStub::GetName() { - char buffer[100]; - NoAllocationStringAllocator allocator(buffer, - static_cast<unsigned>(sizeof(buffer))); - StringStream stream(&allocator); - PrintName(&stream); - return stream.ToCString(); -} - - -void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) { - SmartArrayPointer<const char> name = GetName(); - PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, name.get())); - GDBJIT(AddCode(GDBJITInterface::STUB, name.get(), code)); - Counters* counters = isolate->counters(); +void CodeStub::RecordCodeGeneration(Handle<Code> code) { + IC::RegisterWeakMapDependency(code); + OStringStream os; + os << *this; + PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, os.c_str())); + Counters* counters = isolate()->counters(); counters->total_stubs_code_size()->Increment(code->instruction_size()); } @@ -86,25 +128,27 @@ } -Handle<Code> CodeStub::GetCodeCopy(Isolate* isolate, - const Code::FindAndReplacePattern& pattern) { - Handle<Code> ic = GetCode(isolate); - ic = isolate->factory()->CopyCode(ic); +Handle<Code> CodeStub::GetCodeCopy(const Code::FindAndReplacePattern& pattern) { + Handle<Code> ic = GetCode(); + ic = isolate()->factory()->CopyCode(ic); ic->FindAndReplace(pattern); - RecordCodeGeneration(*ic, isolate); + RecordCodeGeneration(ic); return ic; } -Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) { - Factory* factory = isolate->factory(); +Handle<Code> PlatformCodeStub::GenerateCode() { + Factory* factory = isolate()->factory(); // Generate the new code. - MacroAssembler masm(isolate, NULL, 256); + MacroAssembler masm(isolate(), NULL, 256); + + // TODO(yangguo) remove this once the code serializer handles code stubs. + if (FLAG_serialize_toplevel) masm.enable_serializer(); { // Update the static counter each time a new code stub is generated. - isolate->counters()->code_stubs()->Increment(); + isolate()->counters()->code_stubs()->Increment(); // Generate the code for the stub. masm.set_generating_stub(true); @@ -128,39 +172,32 @@ } -void CodeStub::VerifyPlatformFeatures(Isolate* isolate) { - ASSERT(CpuFeatures::VerifyCrossCompiling()); -} - - -Handle<Code> CodeStub::GetCode(Isolate* isolate) { - Factory* factory = isolate->factory(); - Heap* heap = isolate->heap(); +Handle<Code> CodeStub::GetCode() { + Heap* heap = isolate()->heap(); Code* code; if (UseSpecialCache() - ? FindCodeInSpecialCache(&code, isolate) - : FindCodeInCache(&code, isolate)) { - ASSERT(GetCodeKind() == code->kind()); + ? FindCodeInSpecialCache(&code) + : FindCodeInCache(&code)) { + DCHECK(GetCodeKind() == code->kind()); return Handle<Code>(code); } -#ifdef DEBUG - VerifyPlatformFeatures(isolate); -#endif - { - HandleScope scope(isolate); + HandleScope scope(isolate()); - Handle<Code> new_object = GenerateCode(isolate); - new_object->set_major_key(MajorKey()); + Handle<Code> new_object = GenerateCode(); + new_object->set_stub_key(GetKey()); FinishCode(new_object); - RecordCodeGeneration(*new_object, isolate); + RecordCodeGeneration(new_object); #ifdef ENABLE_DISASSEMBLER if (FLAG_print_code_stubs) { - CodeTracer::Scope trace_scope(isolate->GetCodeTracer()); - new_object->Disassemble(GetName().get(), trace_scope.file()); - PrintF(trace_scope.file(), "\n"); + CodeTracer::Scope trace_scope(isolate()->GetCodeTracer()); + OFStream os(trace_scope.file()); + OStringStream name; + name << *this; + new_object->Disassemble(name.c_str(), os); + os << "\n"; } #endif @@ -169,7 +206,7 @@ } else { // Update the dictionary and the root in Heap. Handle<UnseededNumberDictionary> dict = - factory->DictionaryAtNumberPut( + UnseededNumberDictionary::AtNumberPut( Handle<UnseededNumberDictionary>(heap->code_stubs()), GetKey(), new_object); @@ -179,10 +216,10 @@ } Activate(code); - ASSERT(!NeedsImmovableCode() || + DCHECK(!NeedsImmovableCode() || heap->lo_space()->Contains(code) || heap->code_space()->FirstPage()->Contains(code->address())); - return Handle<Code>(code, isolate); + return Handle<Code>(code, isolate()); } @@ -193,6 +230,8 @@ CODE_STUB_LIST(DEF_CASE) #undef DEF_CASE case UninitializedMajorKey: return "<UninitializedMajorKey>Stub"; + case NoCache: + return "<NoCache>Stub"; default: if (!allow_unknown_keys) { UNREACHABLE(); @@ -202,14 +241,14 @@ } -void CodeStub::PrintBaseName(StringStream* stream) { - stream->Add("%s", MajorName(MajorKey(), false)); +void CodeStub::PrintBaseName(OStream& os) const { // NOLINT + os << MajorName(MajorKey(), false); } -void CodeStub::PrintName(StringStream* stream) { - PrintBaseName(stream); - PrintState(stream); +void CodeStub::PrintName(OStream& os) const { // NOLINT + PrintBaseName(os); + PrintState(os); } @@ -218,9 +257,10 @@ // Generate the uninitialized versions of the stub. for (int op = Token::BIT_OR; op <= Token::MOD; ++op) { for (int mode = NO_OVERWRITE; mode <= OVERWRITE_RIGHT; ++mode) { - BinaryOpICStub stub(static_cast<Token::Value>(op), + BinaryOpICStub stub(isolate, + static_cast<Token::Value>(op), static_cast<OverwriteMode>(mode)); - stub.GetCode(isolate); + stub.GetCode(); } } @@ -229,16 +269,16 @@ } -void BinaryOpICStub::PrintState(StringStream* stream) { - state_.Print(stream); +void BinaryOpICStub::PrintState(OStream& os) const { // NOLINT + os << state_; } // static void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate, const BinaryOpIC::State& state) { - BinaryOpICStub stub(state); - stub.GetCode(isolate); + BinaryOpICStub stub(isolate, state); + stub.GetCode(); } @@ -249,8 +289,9 @@ } -void BinaryOpICWithAllocationSiteStub::PrintState(StringStream* stream) { - state_.Print(stream); +void BinaryOpICWithAllocationSiteStub::PrintState( + OStream& os) const { // NOLINT + os << state_; } @@ -258,28 +299,28 @@ void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime( Isolate* isolate, const BinaryOpIC::State& state) { if (state.CouldCreateAllocationMementos()) { - BinaryOpICWithAllocationSiteStub stub(state); - stub.GetCode(isolate); + BinaryOpICWithAllocationSiteStub stub(isolate, state); + stub.GetCode(); } } -void StringAddStub::PrintBaseName(StringStream* stream) { - stream->Add("StringAddStub"); +void StringAddStub::PrintBaseName(OStream& os) const { // NOLINT + os << "StringAddStub"; if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) { - stream->Add("_CheckBoth"); + os << "_CheckBoth"; } else if ((flags() & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) { - stream->Add("_CheckLeft"); + os << "_CheckLeft"; } else if ((flags() & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) { - stream->Add("_CheckRight"); + os << "_CheckRight"; } if (pretenure_flag() == TENURED) { - stream->Add("_Tenured"); + os << "_Tenured"; } } -InlineCacheState ICCompareStub::GetICState() { +InlineCacheState ICCompareStub::GetICState() const { CompareIC::State state = Max(left_, right_); switch (state) { case CompareIC::UNINITIALIZED: @@ -301,7 +342,7 @@ void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) { - ASSERT(*known_map_ != NULL); + DCHECK(*known_map_ != NULL); Isolate* isolate = new_object->GetIsolate(); Factory* factory = isolate->factory(); return Map::UpdateCodeCache(known_map_, @@ -312,26 +353,26 @@ } -bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) { - Factory* factory = isolate->factory(); +bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) { + Factory* factory = isolate()->factory(); Code::Flags flags = Code::ComputeFlags( GetCodeKind(), UNINITIALIZED); - ASSERT(op_ == Token::EQ || op_ == Token::EQ_STRICT); + DCHECK(op_ == Token::EQ || op_ == Token::EQ_STRICT); Handle<Object> probe( known_map_->FindInCodeCache( strict() ? *factory->strict_compare_ic_string() : *factory->compare_ic_string(), flags), - isolate); + isolate()); if (probe->IsCode()) { *code_out = Code::cast(*probe); #ifdef DEBUG Token::Value cached_op; - ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL, - &cached_op); - ASSERT(op_ == cached_op); + ICCompareStub::DecodeKey((*code_out)->stub_key(), NULL, NULL, NULL, + &cached_op); + DCHECK(op_ == cached_op); #endif return true; } @@ -339,7 +380,7 @@ } -int ICCompareStub::MinorKey() { +int ICCompareStub::MinorKey() const { return OpField::encode(op_ - Token::EQ) | LeftStateField::encode(left_) | RightStateField::encode(right_) | @@ -347,11 +388,11 @@ } -void ICCompareStub::DecodeMinorKey(int minor_key, - CompareIC::State* left_state, - CompareIC::State* right_state, - CompareIC::State* handler_state, - Token::Value* op) { +void ICCompareStub::DecodeKey(uint32_t stub_key, CompareIC::State* left_state, + CompareIC::State* right_state, + CompareIC::State* handler_state, + Token::Value* op) { + int minor_key = MinorKeyFromKey(stub_key); if (left_state) { *left_state = static_cast<CompareIC::State>(LeftStateField::decode(minor_key)); @@ -394,7 +435,7 @@ GenerateObjects(masm); break; case CompareIC::KNOWN_OBJECT: - ASSERT(*known_map_ != NULL); + DCHECK(*known_map_ != NULL); GenerateKnownObjects(masm); break; case CompareIC::GENERIC: @@ -405,7 +446,7 @@ void CompareNilICStub::UpdateStatus(Handle<Object> object) { - ASSERT(!state_.Contains(GENERIC)); + DCHECK(!state_.Contains(GENERIC)); State old_state(state_); if (object->IsNull()) { state_.Add(NULL_TYPE); @@ -430,44 +471,55 @@ void HydrogenCodeStub::TraceTransition(StateType from, StateType to) { // Note: Although a no-op transition is semantically OK, it is hinting at a // bug somewhere in our state transition machinery. - ASSERT(from != to); + DCHECK(from != to); if (!FLAG_trace_ic) return; - char buffer[100]; - NoAllocationStringAllocator allocator(buffer, - static_cast<unsigned>(sizeof(buffer))); - StringStream stream(&allocator); - stream.Add("["); - PrintBaseName(&stream); - stream.Add(": "); - from.Print(&stream); - stream.Add("=>"); - to.Print(&stream); - stream.Add("]\n"); - stream.OutputToStdOut(); + OFStream os(stdout); + os << "["; + PrintBaseName(os); + os << ": " << from << "=>" << to << "]" << endl; } -void CompareNilICStub::PrintBaseName(StringStream* stream) { - CodeStub::PrintBaseName(stream); - stream->Add((nil_value_ == kNullValue) ? "(NullValue)": - "(UndefinedValue)"); +void CompareNilICStub::PrintBaseName(OStream& os) const { // NOLINT + CodeStub::PrintBaseName(os); + os << ((nil_value_ == kNullValue) ? "(NullValue)" : "(UndefinedValue)"); } -void CompareNilICStub::PrintState(StringStream* stream) { - state_.Print(stream); +void CompareNilICStub::PrintState(OStream& os) const { // NOLINT + os << state_; } -void CompareNilICStub::State::Print(StringStream* stream) const { - stream->Add("("); - SimpleListPrinter printer(stream); - if (IsEmpty()) printer.Add("None"); - if (Contains(UNDEFINED)) printer.Add("Undefined"); - if (Contains(NULL_TYPE)) printer.Add("Null"); - if (Contains(MONOMORPHIC_MAP)) printer.Add("MonomorphicMap"); - if (Contains(GENERIC)) printer.Add("Generic"); - stream->Add(")"); +// TODO(svenpanne) Make this a real infix_ostream_iterator. +class SimpleListPrinter { + public: + explicit SimpleListPrinter(OStream& os) : os_(os), first_(true) {} + + void Add(const char* s) { + if (first_) { + first_ = false; + } else { + os_ << ","; + } + os_ << s; + } + + private: + OStream& os_; + bool first_; +}; + + +OStream& operator<<(OStream& os, const CompareNilICStub::State& s) { + os << "("; + SimpleListPrinter p(os); + if (s.IsEmpty()) p.Add("None"); + if (s.Contains(CompareNilICStub::UNDEFINED)) p.Add("Undefined"); + if (s.Contains(CompareNilICStub::NULL_TYPE)) p.Add("Null"); + if (s.Contains(CompareNilICStub::MONOMORPHIC_MAP)) p.Add("MonomorphicMap"); + if (s.Contains(CompareNilICStub::GENERIC)) p.Add("Generic"); + return os << ")"; } @@ -501,26 +553,21 @@ } -void InstanceofStub::PrintName(StringStream* stream) { - const char* args = ""; - if (HasArgsInRegisters()) { - args = "_REGS"; - } +void CallIC_ArrayStub::PrintState(OStream& os) const { // NOLINT + os << state_ << " (Array)"; +} - const char* inline_check = ""; - if (HasCallSiteInlineCheck()) { - inline_check = "_INLINE"; - } - const char* return_true_false_object = ""; - if (ReturnTrueFalseObject()) { - return_true_false_object = "_TRUEFALSE"; - } +void CallICStub::PrintState(OStream& os) const { // NOLINT + os << state_; +} - stream->Add("InstanceofStub%s%s%s", - args, - inline_check, - return_true_false_object); + +void InstanceofStub::PrintName(OStream& os) const { // NOLINT + os << "InstanceofStub"; + if (HasArgsInRegisters()) os << "_REGS"; + if (HasCallSiteInlineCheck()) os << "_INLINE"; + if (ReturnTrueFalseObject()) os << "_TRUEFALSE"; } @@ -532,19 +579,101 @@ } -void KeyedLoadDictionaryElementPlatformStub::Generate( - MacroAssembler* masm) { - KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm); +void LoadFastElementStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { InterfaceDescriptor::ContextRegister(), + LoadIC::ReceiverRegister(), + LoadIC::NameRegister() }; + STATIC_ASSERT(LoadIC::kParameterCount == 2); + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure)); +} + + +void LoadDictionaryElementStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { InterfaceDescriptor::ContextRegister(), + LoadIC::ReceiverRegister(), + LoadIC::NameRegister() }; + STATIC_ASSERT(LoadIC::kParameterCount == 2); + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure)); +} + + +void KeyedLoadGenericStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { InterfaceDescriptor::ContextRegister(), + LoadIC::ReceiverRegister(), + LoadIC::NameRegister() }; + STATIC_ASSERT(LoadIC::kParameterCount == 2); + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry); +} + + +void HandlerStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + if (kind() == Code::LOAD_IC) { + Register registers[] = {InterfaceDescriptor::ContextRegister(), + LoadIC::ReceiverRegister(), LoadIC::NameRegister()}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); + } else { + DCHECK_EQ(Code::STORE_IC, kind()); + Register registers[] = {InterfaceDescriptor::ContextRegister(), + StoreIC::ReceiverRegister(), + StoreIC::NameRegister(), StoreIC::ValueRegister()}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(StoreIC_MissFromStubFailure)); + } +} + + +void StoreFastElementStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { InterfaceDescriptor::ContextRegister(), + KeyedStoreIC::ReceiverRegister(), + KeyedStoreIC::NameRegister(), + KeyedStoreIC::ValueRegister() }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure)); +} + + +void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { InterfaceDescriptor::ContextRegister(), + ValueRegister(), + MapRegister(), + KeyRegister(), + ObjectRegister() }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss)); +} + + +void InstanceofStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { InterfaceDescriptor::ContextRegister(), + InstanceofStub::left(), + InstanceofStub::right() }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); +} + + +void LoadDictionaryElementPlatformStub::Generate(MacroAssembler* masm) { + ElementHandlerCompiler::GenerateLoadDictionaryElement(masm); } void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) { - CreateAllocationSiteStub stub; - stub.GetCode(isolate); + CreateAllocationSiteStub stub(isolate); + stub.GetCode(); } -void KeyedStoreElementStub::Generate(MacroAssembler* masm) { +void StoreElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: case FAST_HOLEY_ELEMENTS: @@ -561,7 +690,7 @@ UNREACHABLE(); break; case DICTIONARY_ELEMENTS: - KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm); + ElementHandlerCompiler::GenerateStoreDictionaryElement(masm); break; case SLOPPY_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -570,48 +699,64 @@ } -void ArgumentsAccessStub::PrintName(StringStream* stream) { - stream->Add("ArgumentsAccessStub_"); +void ArgumentsAccessStub::PrintName(OStream& os) const { // NOLINT + os << "ArgumentsAccessStub_"; switch (type_) { - case READ_ELEMENT: stream->Add("ReadElement"); break; - case NEW_SLOPPY_FAST: stream->Add("NewSloppyFast"); break; - case NEW_SLOPPY_SLOW: stream->Add("NewSloppySlow"); break; - case NEW_STRICT: stream->Add("NewStrict"); break; + case READ_ELEMENT: + os << "ReadElement"; + break; + case NEW_SLOPPY_FAST: + os << "NewSloppyFast"; + break; + case NEW_SLOPPY_SLOW: + os << "NewSloppySlow"; + break; + case NEW_STRICT: + os << "NewStrict"; + break; } + return; } -void CallFunctionStub::PrintName(StringStream* stream) { - stream->Add("CallFunctionStub_Args%d", argc_); - if (RecordCallTarget()) stream->Add("_Recording"); +void CallFunctionStub::PrintName(OStream& os) const { // NOLINT + os << "CallFunctionStub_Args" << argc_; } -void CallConstructStub::PrintName(StringStream* stream) { - stream->Add("CallConstructStub"); - if (RecordCallTarget()) stream->Add("_Recording"); +void CallConstructStub::PrintName(OStream& os) const { // NOLINT + os << "CallConstructStub"; + if (RecordCallTarget()) os << "_Recording"; } -void ArrayConstructorStub::PrintName(StringStream* stream) { - stream->Add("ArrayConstructorStub"); +void ArrayConstructorStub::PrintName(OStream& os) const { // NOLINT + os << "ArrayConstructorStub"; switch (argument_count_) { - case ANY: stream->Add("_Any"); break; - case NONE: stream->Add("_None"); break; - case ONE: stream->Add("_One"); break; - case MORE_THAN_ONE: stream->Add("_More_Than_One"); break; + case ANY: + os << "_Any"; + break; + case NONE: + os << "_None"; + break; + case ONE: + os << "_One"; + break; + case MORE_THAN_ONE: + os << "_More_Than_One"; + break; } + return; } -void ArrayConstructorStubBase::BasePrintName(const char* name, - StringStream* stream) { - stream->Add(name); - stream->Add("_"); - stream->Add(ElementsKindToString(elements_kind())); +OStream& ArrayConstructorStubBase::BasePrintName(OStream& os, // NOLINT + const char* name) const { + os << name << "_" << ElementsKindToString(elements_kind()); if (override_mode() == DISABLE_ALLOCATION_SITES) { - stream->Add("_DISABLE_ALLOCATION_SITES"); + os << "_DISABLE_ALLOCATION_SITES"; } + return os; } @@ -623,24 +768,24 @@ } -void ToBooleanStub::PrintState(StringStream* stream) { - types_.Print(stream); +void ToBooleanStub::PrintState(OStream& os) const { // NOLINT + os << types_; } -void ToBooleanStub::Types::Print(StringStream* stream) const { - stream->Add("("); - SimpleListPrinter printer(stream); - if (IsEmpty()) printer.Add("None"); - if (Contains(UNDEFINED)) printer.Add("Undefined"); - if (Contains(BOOLEAN)) printer.Add("Bool"); - if (Contains(NULL_TYPE)) printer.Add("Null"); - if (Contains(SMI)) printer.Add("Smi"); - if (Contains(SPEC_OBJECT)) printer.Add("SpecObject"); - if (Contains(STRING)) printer.Add("String"); - if (Contains(SYMBOL)) printer.Add("Symbol"); - if (Contains(HEAP_NUMBER)) printer.Add("HeapNumber"); - stream->Add(")"); +OStream& operator<<(OStream& os, const ToBooleanStub::Types& s) { + os << "("; + SimpleListPrinter p(os); + if (s.IsEmpty()) p.Add("None"); + if (s.Contains(ToBooleanStub::UNDEFINED)) p.Add("Undefined"); + if (s.Contains(ToBooleanStub::BOOLEAN)) p.Add("Bool"); + if (s.Contains(ToBooleanStub::NULL_TYPE)) p.Add("Null"); + if (s.Contains(ToBooleanStub::SMI)) p.Add("Smi"); + if (s.Contains(ToBooleanStub::SPEC_OBJECT)) p.Add("SpecObject"); + if (s.Contains(ToBooleanStub::STRING)) p.Add("String"); + if (s.Contains(ToBooleanStub::SYMBOL)) p.Add("Symbol"); + if (s.Contains(ToBooleanStub::HEAP_NUMBER)) p.Add("HeapNumber"); + return os << ")"; } @@ -668,7 +813,7 @@ Add(SYMBOL); return true; } else if (object->IsHeapNumber()) { - ASSERT(!object->IsUndetectableObject()); + DCHECK(!object->IsUndetectableObject()); Add(HEAP_NUMBER); double value = HeapNumber::cast(*object)->value(); return value != 0 && !std::isnan(value); @@ -695,10 +840,10 @@ void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) { - StubFailureTrampolineStub stub1(NOT_JS_FUNCTION_STUB_MODE); - StubFailureTrampolineStub stub2(JS_FUNCTION_STUB_MODE); - stub1.GetCode(isolate); - stub2.GetCode(isolate); + StubFailureTrampolineStub stub1(isolate, NOT_JS_FUNCTION_STUB_MODE); + StubFailureTrampolineStub stub2(isolate, JS_FUNCTION_STUB_MODE); + stub1.GetCode(); + stub2.GetCode(); } @@ -706,7 +851,7 @@ intptr_t stack_pointer, Isolate* isolate) { FunctionEntryHook entry_hook = isolate->function_entry_hook(); - ASSERT(entry_hook != NULL); + DCHECK(entry_hook != NULL); entry_hook(function, stack_pointer); } @@ -715,84 +860,100 @@ int major_key = stub->MajorKey(); CodeStubInterfaceDescriptor* descriptor = isolate->code_stub_interface_descriptor(major_key); - if (!descriptor->initialized()) { - stub->InitializeInterfaceDescriptor(isolate, descriptor); + if (!descriptor->IsInitialized()) { + stub->InitializeInterfaceDescriptor(descriptor); } } void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) { - ArrayNoArgumentConstructorStub stub1(GetInitialFastElementsKind()); + ArrayNoArgumentConstructorStub stub1(isolate, GetInitialFastElementsKind()); InstallDescriptor(isolate, &stub1); - ArraySingleArgumentConstructorStub stub2(GetInitialFastElementsKind()); + ArraySingleArgumentConstructorStub stub2(isolate, + GetInitialFastElementsKind()); InstallDescriptor(isolate, &stub2); - ArrayNArgumentsConstructorStub stub3(GetInitialFastElementsKind()); + ArrayNArgumentsConstructorStub stub3(isolate, GetInitialFastElementsKind()); InstallDescriptor(isolate, &stub3); } void NumberToStringStub::InstallDescriptors(Isolate* isolate) { - NumberToStringStub stub; + NumberToStringStub stub(isolate); InstallDescriptor(isolate, &stub); } void FastNewClosureStub::InstallDescriptors(Isolate* isolate) { - FastNewClosureStub stub(STRICT, false); + FastNewClosureStub stub(isolate, STRICT, false); InstallDescriptor(isolate, &stub); } void FastNewContextStub::InstallDescriptors(Isolate* isolate) { - FastNewContextStub stub(FastNewContextStub::kMaximumSlots); + FastNewContextStub stub(isolate, FastNewContextStub::kMaximumSlots); InstallDescriptor(isolate, &stub); } // static void FastCloneShallowArrayStub::InstallDescriptors(Isolate* isolate) { - FastCloneShallowArrayStub stub(FastCloneShallowArrayStub::CLONE_ELEMENTS, - DONT_TRACK_ALLOCATION_SITE, 0); + FastCloneShallowArrayStub stub(isolate, DONT_TRACK_ALLOCATION_SITE); InstallDescriptor(isolate, &stub); } // static void BinaryOpICStub::InstallDescriptors(Isolate* isolate) { - BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); + BinaryOpICStub stub(isolate, Token::ADD, NO_OVERWRITE); InstallDescriptor(isolate, &stub); } // static void BinaryOpWithAllocationSiteStub::InstallDescriptors(Isolate* isolate) { - BinaryOpWithAllocationSiteStub stub(Token::ADD, NO_OVERWRITE); + BinaryOpWithAllocationSiteStub stub(isolate, Token::ADD, NO_OVERWRITE); InstallDescriptor(isolate, &stub); } // static void StringAddStub::InstallDescriptors(Isolate* isolate) { - StringAddStub stub(STRING_ADD_CHECK_NONE, NOT_TENURED); + StringAddStub stub(isolate, STRING_ADD_CHECK_NONE, NOT_TENURED); InstallDescriptor(isolate, &stub); } // static void RegExpConstructResultStub::InstallDescriptors(Isolate* isolate) { - RegExpConstructResultStub stub; + RegExpConstructResultStub stub(isolate); + InstallDescriptor(isolate, &stub); +} + + +// static +void KeyedLoadGenericStub::InstallDescriptors(Isolate* isolate) { + KeyedLoadGenericStub stub(isolate); + InstallDescriptor(isolate, &stub); +} + + +// static +void StoreFieldStub::InstallDescriptors(Isolate* isolate) { + StoreFieldStub stub(isolate, FieldIndex::ForInObjectOffset(0), + Representation::None()); InstallDescriptor(isolate, &stub); } ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate) - : argument_count_(ANY) { + : PlatformCodeStub(isolate), argument_count_(ANY) { ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); } ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate, - int argument_count) { + int argument_count) + : PlatformCodeStub(isolate) { if (argument_count == 0) { argument_count_ = NONE; } else if (argument_count == 1) { @@ -807,16 +968,16 @@ void InternalArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) { - InternalArrayNoArgumentConstructorStub stub1(FAST_ELEMENTS); + InternalArrayNoArgumentConstructorStub stub1(isolate, FAST_ELEMENTS); InstallDescriptor(isolate, &stub1); - InternalArraySingleArgumentConstructorStub stub2(FAST_ELEMENTS); + InternalArraySingleArgumentConstructorStub stub2(isolate, FAST_ELEMENTS); InstallDescriptor(isolate, &stub2); - InternalArrayNArgumentsConstructorStub stub3(FAST_ELEMENTS); + InternalArrayNArgumentsConstructorStub stub3(isolate, FAST_ELEMENTS); InstallDescriptor(isolate, &stub3); } InternalArrayConstructorStub::InternalArrayConstructorStub( - Isolate* isolate) { + Isolate* isolate) : PlatformCodeStub(isolate) { InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); } diff -Nru nodejs-0.11.13/deps/v8/src/code-stubs.h nodejs-0.11.15/deps/v8/src/code-stubs.h --- nodejs-0.11.13/deps/v8/src/code-stubs.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/code-stubs.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,104 +1,84 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CODE_STUBS_H_ #define V8_CODE_STUBS_H_ -#include "allocation.h" -#include "assembler.h" -#include "codegen.h" -#include "globals.h" -#include "macro-assembler.h" +#include "src/allocation.h" +#include "src/assembler.h" +#include "src/codegen.h" +#include "src/globals.h" +#include "src/macro-assembler.h" +#include "src/ostreams.h" namespace v8 { namespace internal { // List of code stubs used on all platforms. -#define CODE_STUB_LIST_ALL_PLATFORMS(V) \ - V(CallFunction) \ - V(CallConstruct) \ - V(BinaryOpIC) \ - V(BinaryOpICWithAllocationSite) \ - V(BinaryOpWithAllocationSite) \ - V(StringAdd) \ - V(SubString) \ - V(StringCompare) \ - V(Compare) \ - V(CompareIC) \ - V(CompareNilIC) \ - V(MathPow) \ - V(FunctionPrototype) \ - V(RecordWrite) \ - V(StoreBufferOverflow) \ - V(RegExpExec) \ - V(Instanceof) \ - V(ConvertToDouble) \ - V(WriteInt32ToHeapNumber) \ - V(StackCheck) \ - V(Interrupt) \ - V(FastNewClosure) \ - V(FastNewContext) \ - V(FastCloneShallowArray) \ - V(FastCloneShallowObject) \ - V(CreateAllocationSite) \ - V(ToBoolean) \ - V(ToNumber) \ - V(ArgumentsAccess) \ - V(RegExpConstructResult) \ - V(NumberToString) \ - V(DoubleToI) \ - V(CEntry) \ - V(JSEntry) \ - V(KeyedLoadElement) \ - V(ArrayPush) \ - V(ArrayNoArgumentConstructor) \ - V(ArraySingleArgumentConstructor) \ - V(ArrayNArgumentsConstructor) \ - V(InternalArrayNoArgumentConstructor) \ - V(InternalArraySingleArgumentConstructor) \ - V(InternalArrayNArgumentsConstructor) \ - V(KeyedStoreElement) \ - V(DebuggerStatement) \ - V(NameDictionaryLookup) \ - V(ElementsTransitionAndStore) \ - V(TransitionElementsKind) \ - V(StoreArrayLiteralElement) \ - V(StubFailureTrampoline) \ - V(ArrayConstructor) \ - V(InternalArrayConstructor) \ - V(ProfileEntryHook) \ - V(StoreGlobal) \ - V(CallApiFunction) \ - V(CallApiGetter) \ - /* IC Handler stubs */ \ - V(LoadField) \ - V(KeyedLoadField) \ - V(StringLength) \ - V(KeyedStringLength) +#define CODE_STUB_LIST_ALL_PLATFORMS(V) \ + V(CallFunction) \ + V(CallConstruct) \ + V(BinaryOpIC) \ + V(BinaryOpICWithAllocationSite) \ + V(BinaryOpWithAllocationSite) \ + V(StringAdd) \ + V(SubString) \ + V(StringCompare) \ + V(Compare) \ + V(CompareIC) \ + V(CompareNilIC) \ + V(MathPow) \ + V(CallIC) \ + V(CallIC_Array) \ + V(FunctionPrototype) \ + V(RecordWrite) \ + V(StoreBufferOverflow) \ + V(RegExpExec) \ + V(Instanceof) \ + V(ConvertToDouble) \ + V(WriteInt32ToHeapNumber) \ + V(StackCheck) \ + V(Interrupt) \ + V(FastNewClosure) \ + V(FastNewContext) \ + V(FastCloneShallowArray) \ + V(FastCloneShallowObject) \ + V(CreateAllocationSite) \ + V(ToBoolean) \ + V(ToNumber) \ + V(ArgumentsAccess) \ + V(RegExpConstructResult) \ + V(NumberToString) \ + V(DoubleToI) \ + V(CEntry) \ + V(JSEntry) \ + V(LoadElement) \ + V(KeyedLoadGeneric) \ + V(ArrayNoArgumentConstructor) \ + V(ArraySingleArgumentConstructor) \ + V(ArrayNArgumentsConstructor) \ + V(InternalArrayNoArgumentConstructor) \ + V(InternalArraySingleArgumentConstructor) \ + V(InternalArrayNArgumentsConstructor) \ + V(StoreElement) \ + V(DebuggerStatement) \ + V(NameDictionaryLookup) \ + V(ElementsTransitionAndStore) \ + V(TransitionElementsKind) \ + V(StoreArrayLiteralElement) \ + V(StubFailureTrampoline) \ + V(ArrayConstructor) \ + V(InternalArrayConstructor) \ + V(ProfileEntryHook) \ + V(StoreGlobal) \ + V(CallApiFunction) \ + V(CallApiGetter) \ + /* IC Handler stubs */ \ + V(LoadField) \ + V(StoreField) \ + V(LoadConstant) \ + V(StringLength) // List of code stubs only used on ARM 32 bits platforms. #if V8_TARGET_ARCH_ARM @@ -131,6 +111,12 @@ V(DirectCEntry) \ V(StoreRegistersState) \ V(RestoreRegistersState) +#elif V8_TARGET_ARCH_MIPS64 +#define CODE_STUB_LIST_MIPS(V) \ + V(RegExpCEntry) \ + V(DirectCEntry) \ + V(StoreRegistersState) \ + V(RestoreRegistersState) #else #define CODE_STUB_LIST_MIPS(V) #endif @@ -155,11 +141,10 @@ }; // Retrieve the code for the stub. Generate the code if needed. - Handle<Code> GetCode(Isolate* isolate); + Handle<Code> GetCode(); // Retrieve the code for the stub, make and return a copy of the code. - Handle<Code> GetCodeCopy( - Isolate* isolate, const Code::FindAndReplacePattern& pattern); + Handle<Code> GetCodeCopy(const Code::FindAndReplacePattern& pattern); static Major MajorKeyFromKey(uint32_t key) { return static_cast<Major>(MajorKeyBits::decode(key)); @@ -170,11 +155,14 @@ // Gets the major key from a code object that is a code stub or binary op IC. static Major GetMajorKey(Code* code_stub) { - return static_cast<Major>(code_stub->major_key()); + return MajorKeyFromKey(code_stub->stub_key()); } + static uint32_t NoCacheKey() { return MajorKeyBits::encode(NoCache); } + static const char* MajorName(Major major_key, bool allow_unknown_keys); + explicit CodeStub(Isolate* isolate) : isolate_(isolate) { } virtual ~CodeStub() {} static void GenerateStubsAheadOfTime(Isolate* isolate); @@ -189,46 +177,47 @@ virtual bool SometimesSetsUpAFrame() { return true; } // Lookup the code in the (possibly custom) cache. - bool FindCodeInCache(Code** code_out, Isolate* isolate); + bool FindCodeInCache(Code** code_out); // Returns information for computing the number key. - virtual Major MajorKey() = 0; - virtual int MinorKey() = 0; + virtual Major MajorKey() const = 0; + virtual int MinorKey() const = 0; - virtual InlineCacheState GetICState() { - return UNINITIALIZED; - } - virtual ExtraICState GetExtraICState() { - return kNoExtraICState; - } + virtual InlineCacheState GetICState() const { return UNINITIALIZED; } + virtual ExtraICState GetExtraICState() const { return kNoExtraICState; } virtual Code::StubType GetStubType() { return Code::NORMAL; } - virtual void PrintName(StringStream* stream); + friend OStream& operator<<(OStream& os, const CodeStub& s) { + s.PrintName(os); + return os; + } - // Returns a name for logging/debugging purposes. - SmartArrayPointer<const char> GetName(); + Isolate* isolate() const { return isolate_; } protected: - static bool CanUseFPRegisters(); - // Generates the assembler code for the stub. - virtual Handle<Code> GenerateCode(Isolate* isolate) = 0; - - virtual void VerifyPlatformFeatures(Isolate* isolate); + virtual Handle<Code> GenerateCode() = 0; // Returns whether the code generated for this stub needs to be allocated as // a fixed (non-moveable) code object. virtual bool NeedsImmovableCode() { return false; } - virtual void PrintBaseName(StringStream* stream); - virtual void PrintState(StringStream* stream) { } + virtual void PrintName(OStream& os) const; // NOLINT + virtual void PrintBaseName(OStream& os) const; // NOLINT + virtual void PrintState(OStream& os) const { ; } // NOLINT + + // Computes the key based on major and minor. + uint32_t GetKey() { + DCHECK(static_cast<int>(MajorKey()) < NUMBER_OF_IDS); + return MinorKeyBits::encode(MinorKey()) | MajorKeyBits::encode(MajorKey()); + } private: // Perform bookkeeping required after code generation when stub code is // initially generated. - void RecordCodeGeneration(Code* code, Isolate* isolate); + void RecordCodeGeneration(Handle<Code> code); // Finish the code object after it has been generated. virtual void FinishCode(Handle<Code> code) { } @@ -246,33 +235,30 @@ virtual void AddToSpecialCache(Handle<Code> new_object) { } // Find code in a specialized cache, work is delegated to the specific stub. - virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) { + virtual bool FindCodeInSpecialCache(Code** code_out) { return false; } // If a stub uses a special cache override this. virtual bool UseSpecialCache() { return false; } - // Computes the key based on major and minor. - uint32_t GetKey() { - ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS); - return MinorKeyBits::encode(MinorKey()) | - MajorKeyBits::encode(MajorKey()); - } - STATIC_ASSERT(NUMBER_OF_IDS < (1 << kStubMajorKeyBits)); class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {}; class MinorKeyBits: public BitField<uint32_t, kStubMajorKeyBits, kStubMinorKeyBits> {}; // NOLINT friend class BreakPointIterator; + + Isolate* isolate_; }; class PlatformCodeStub : public CodeStub { public: + explicit PlatformCodeStub(Isolate* isolate) : CodeStub(isolate) { } + // Retrieve the code for the stub. Generate the code if needed. - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual Code::Kind GetCodeKind() const { return Code::STUB; } @@ -285,97 +271,162 @@ enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE }; enum HandlerArgumentsMode { DONT_PASS_ARGUMENTS, PASS_ARGUMENTS }; -struct CodeStubInterfaceDescriptor { - CodeStubInterfaceDescriptor(); - int register_param_count_; - Register stack_parameter_count_; - // if hint_stack_parameter_count_ > 0, the code stub can optimize the - // return sequence. Default value is -1, which means it is ignored. - int hint_stack_parameter_count_; - StubFunctionMode function_mode_; - Register* register_params_; +class PlatformInterfaceDescriptor; - Address deoptimization_handler_; - HandlerArgumentsMode handler_arguments_mode_; - bool initialized() const { return register_param_count_ >= 0; } +class InterfaceDescriptor { + public: + bool IsInitialized() const { return register_param_count_ >= 0; } + + int GetEnvironmentLength() const { return register_param_count_; } + + int GetRegisterParameterCount() const { return register_param_count_; } + + Register GetParameterRegister(int index) const { + return register_params_[index]; + } + + Representation GetParameterRepresentation(int index) const { + DCHECK(index < register_param_count_); + if (register_param_representations_.get() == NULL) { + return Representation::Tagged(); + } + + return register_param_representations_[index]; + } + + // "Environment" versions of parameter functions. The first register + // parameter (context) is not included. + int GetEnvironmentParameterCount() const { + return GetEnvironmentLength() - 1; + } - int environment_length() const { - return register_param_count_; + Register GetEnvironmentParameterRegister(int index) const { + return GetParameterRegister(index + 1); } + Representation GetEnvironmentParameterRepresentation(int index) const { + return GetParameterRepresentation(index + 1); + } + + // Some platforms have extra information to associate with the descriptor. + PlatformInterfaceDescriptor* platform_specific_descriptor() const { + return platform_specific_descriptor_; + } + + static const Register ContextRegister(); + + protected: + InterfaceDescriptor(); + virtual ~InterfaceDescriptor() {} + + void Initialize(int register_parameter_count, Register* registers, + Representation* register_param_representations, + PlatformInterfaceDescriptor* platform_descriptor = NULL); + + private: + int register_param_count_; + + // The Register params are allocated dynamically by the + // InterfaceDescriptor, and freed on destruction. This is because static + // arrays of Registers cause creation of runtime static initializers + // which we don't want. + SmartArrayPointer<Register> register_params_; + // Specifies Representations for the stub's parameter. Points to an array of + // Representations of the same length of the numbers of parameters to the + // stub, or if NULL (the default value), Representation of each parameter + // assumed to be Tagged(). + SmartArrayPointer<Representation> register_param_representations_; + + PlatformInterfaceDescriptor* platform_specific_descriptor_; + + DISALLOW_COPY_AND_ASSIGN(InterfaceDescriptor); +}; + + +class CodeStubInterfaceDescriptor: public InterfaceDescriptor { + public: + CodeStubInterfaceDescriptor(); + + void Initialize(CodeStub::Major major, int register_parameter_count, + Register* registers, Address deoptimization_handler = NULL, + Representation* register_param_representations = NULL, + int hint_stack_parameter_count = -1, + StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE); + void Initialize(CodeStub::Major major, int register_parameter_count, + Register* registers, Register stack_parameter_count, + Address deoptimization_handler = NULL, + Representation* register_param_representations = NULL, + int hint_stack_parameter_count = -1, + StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE, + HandlerArgumentsMode handler_mode = DONT_PASS_ARGUMENTS); + void SetMissHandler(ExternalReference handler) { miss_handler_ = handler; has_miss_handler_ = true; // Our miss handler infrastructure doesn't currently support // variable stack parameter counts. - ASSERT(!stack_parameter_count_.is_valid()); + DCHECK(!stack_parameter_count_.is_valid()); } - ExternalReference miss_handler() { - ASSERT(has_miss_handler_); + ExternalReference miss_handler() const { + DCHECK(has_miss_handler_); return miss_handler_; } - bool has_miss_handler() { + bool has_miss_handler() const { return has_miss_handler_; } - Register GetParameterRegister(int index) const { - return register_params_[index]; - } - - bool IsParameterCountRegister(int index) { - return GetParameterRegister(index).is(stack_parameter_count_); + bool IsEnvironmentParameterCountRegister(int index) const { + return GetEnvironmentParameterRegister(index).is(stack_parameter_count_); } - int GetHandlerParameterCount() { - int params = environment_length(); + int GetHandlerParameterCount() const { + int params = GetEnvironmentParameterCount(); if (handler_arguments_mode_ == PASS_ARGUMENTS) { params += 1; } return params; } + int hint_stack_parameter_count() const { return hint_stack_parameter_count_; } + Register stack_parameter_count() const { return stack_parameter_count_; } + StubFunctionMode function_mode() const { return function_mode_; } + Address deoptimization_handler() const { return deoptimization_handler_; } + CodeStub::Major MajorKey() const { return major_; } + private: + Register stack_parameter_count_; + // If hint_stack_parameter_count_ > 0, the code stub can optimize the + // return sequence. Default value is -1, which means it is ignored. + int hint_stack_parameter_count_; + StubFunctionMode function_mode_; + + Address deoptimization_handler_; + HandlerArgumentsMode handler_arguments_mode_; + ExternalReference miss_handler_; bool has_miss_handler_; + CodeStub::Major major_; }; -struct PlatformCallInterfaceDescriptor; - - -struct CallInterfaceDescriptor { - CallInterfaceDescriptor() - : register_param_count_(-1), - register_params_(NULL), - param_representations_(NULL), - platform_specific_descriptor_(NULL) { } - - bool initialized() const { return register_param_count_ >= 0; } - - int environment_length() const { - return register_param_count_; - } - - Representation GetParameterRepresentation(int index) const { - return param_representations_[index]; - } - - Register GetParameterRegister(int index) const { - return register_params_[index]; - } +class CallInterfaceDescriptor: public InterfaceDescriptor { + public: + CallInterfaceDescriptor() { } - PlatformCallInterfaceDescriptor* platform_specific_descriptor() const { - return platform_specific_descriptor_; - } + // A copy of the passed in registers and param_representations is made + // and owned by the CallInterfaceDescriptor. - int register_param_count_; - Register* register_params_; - Representation* param_representations_; - PlatformCallInterfaceDescriptor* platform_specific_descriptor_; + // TODO(mvstanton): Instead of taking parallel arrays register and + // param_representations, how about a struct that puts the representation + // and register side by side (eg, RegRep(r1, Representation::Tagged()). + // The same should go for the CodeStubInterfaceDescriptor class. + void Initialize(int register_parameter_count, Register* registers, + Representation* param_representations, + PlatformInterfaceDescriptor* platform_descriptor = NULL); }; @@ -386,14 +437,16 @@ INITIALIZED }; - explicit HydrogenCodeStub(InitializationState state = INITIALIZED) { + explicit HydrogenCodeStub(Isolate* isolate, + InitializationState state = INITIALIZED) + : CodeStub(isolate) { is_uninitialized_ = (state == UNINITIALIZED); } virtual Code::Kind GetCodeKind() const { return Code::STUB; } - CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate) { - return isolate->code_stub_interface_descriptor(MajorKey()); + CodeStubInterfaceDescriptor* GetInterfaceDescriptor() { + return isolate()->code_stub_interface_descriptor(MajorKey()); } bool IsUninitialized() { return is_uninitialized_; } @@ -405,15 +458,14 @@ } virtual void InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) = 0; // Retrieve the code for the stub. Generate the code if needed. - virtual Handle<Code> GenerateCode(Isolate* isolate) = 0; + virtual Handle<Code> GenerateCode() = 0; - virtual int NotMissMinorKey() = 0; + virtual int NotMissMinorKey() const = 0; - Handle<Code> GenerateLightweightMissCode(Isolate* isolate); + Handle<Code> GenerateLightweightMissCode(); template<class StateType> void TraceTransition(StateType from, StateType to); @@ -423,7 +475,7 @@ class IsMissBits: public BitField<bool, kStubMinorKeyBits - 1, 1> {}; void GenerateLightweightMiss(MacroAssembler* masm); - virtual int MinorKey() { + virtual int MinorKey() const { return IsMissBits::encode(is_uninitialized_) | MinorKeyBits::encode(NotMissMinorKey()); } @@ -452,15 +504,19 @@ } } // namespace v8::internal #if V8_TARGET_ARCH_IA32 -#include "ia32/code-stubs-ia32.h" +#include "src/ia32/code-stubs-ia32.h" #elif V8_TARGET_ARCH_X64 -#include "x64/code-stubs-x64.h" +#include "src/x64/code-stubs-x64.h" #elif V8_TARGET_ARCH_ARM64 -#include "arm64/code-stubs-arm64.h" +#include "src/arm64/code-stubs-arm64.h" #elif V8_TARGET_ARCH_ARM -#include "arm/code-stubs-arm.h" +#include "src/arm/code-stubs-arm.h" #elif V8_TARGET_ARCH_MIPS -#include "mips/code-stubs-mips.h" +#include "src/mips/code-stubs-mips.h" +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/code-stubs-mips64.h" +#elif V8_TARGET_ARCH_X87 +#include "src/x87/code-stubs-x87.h" #else #error Unsupported target architecture. #endif @@ -494,35 +550,32 @@ class ToNumberStub: public HydrogenCodeStub { public: - ToNumberStub() { } + explicit ToNumberStub(Isolate* isolate) : HydrogenCodeStub(isolate) { } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; static void InstallDescriptors(Isolate* isolate) { - ToNumberStub stub; + ToNumberStub stub(isolate); stub.InitializeInterfaceDescriptor( - isolate, isolate->code_stub_interface_descriptor(CodeStub::ToNumber)); } private: - Major MajorKey() { return ToNumber; } - int NotMissMinorKey() { return 0; } + Major MajorKey() const { return ToNumber; } + int NotMissMinorKey() const { return 0; } }; class NumberToStringStub V8_FINAL : public HydrogenCodeStub { public: - NumberToStringStub() {} + explicit NumberToStringStub(Isolate* isolate) : HydrogenCodeStub(isolate) {} - virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE; + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; static void InstallDescriptors(Isolate* isolate); @@ -531,22 +584,24 @@ static const int kNumber = 0; private: - virtual Major MajorKey() V8_OVERRIDE { return NumberToString; } - virtual int NotMissMinorKey() V8_OVERRIDE { return 0; } + virtual Major MajorKey() const V8_OVERRIDE { return NumberToString; } + virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; } }; class FastNewClosureStub : public HydrogenCodeStub { public: - explicit FastNewClosureStub(StrictMode strict_mode, bool is_generator) - : strict_mode_(strict_mode), - is_generator_(is_generator) { } + FastNewClosureStub(Isolate* isolate, + StrictMode strict_mode, + bool is_generator) + : HydrogenCodeStub(isolate), + strict_mode_(strict_mode), + is_generator_(is_generator) { } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; static void InstallDescriptors(Isolate* isolate); @@ -557,8 +612,8 @@ class StrictModeBits: public BitField<bool, 0, 1> {}; class IsGeneratorBits: public BitField<bool, 1, 1> {}; - Major MajorKey() { return FastNewClosure; } - int NotMissMinorKey() { + Major MajorKey() const { return FastNewClosure; } + int NotMissMinorKey() const { return StrictModeBits::encode(strict_mode_ == STRICT) | IsGeneratorBits::encode(is_generator_); } @@ -572,22 +627,22 @@ public: static const int kMaximumSlots = 64; - explicit FastNewContextStub(int slots) : slots_(slots) { - ASSERT(slots_ > 0 && slots_ <= kMaximumSlots); + FastNewContextStub(Isolate* isolate, int slots) + : HydrogenCodeStub(isolate), slots_(slots) { + DCHECK(slots_ > 0 && slots_ <= kMaximumSlots); } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; static void InstallDescriptors(Isolate* isolate); int slots() const { return slots_; } - virtual Major MajorKey() V8_OVERRIDE { return FastNewContext; } - virtual int NotMissMinorKey() V8_OVERRIDE { return slots_; } + virtual Major MajorKey() const V8_OVERRIDE { return FastNewContext; } + virtual int NotMissMinorKey() const V8_OVERRIDE { return slots_; } // Parameters accessed via CodeStubGraphBuilder::GetParameter() static const int kFunction = 0; @@ -599,73 +654,30 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub { public: - // Maximum length of copied elements array. - static const int kMaximumClonedLength = 8; - enum Mode { - CLONE_ELEMENTS, - CLONE_DOUBLE_ELEMENTS, - COPY_ON_WRITE_ELEMENTS, - CLONE_ANY_ELEMENTS, - LAST_CLONE_MODE = CLONE_ANY_ELEMENTS - }; + FastCloneShallowArrayStub(Isolate* isolate, + AllocationSiteMode allocation_site_mode) + : HydrogenCodeStub(isolate), + allocation_site_mode_(allocation_site_mode) {} - static const int kFastCloneModeCount = LAST_CLONE_MODE + 1; - - FastCloneShallowArrayStub(Mode mode, - AllocationSiteMode allocation_site_mode, - int length) - : mode_(mode), - allocation_site_mode_(allocation_site_mode), - length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) { - ASSERT_GE(length_, 0); - ASSERT_LE(length_, kMaximumClonedLength); - } - - Mode mode() const { return mode_; } - int length() const { return length_; } AllocationSiteMode allocation_site_mode() const { return allocation_site_mode_; } - ElementsKind ComputeElementsKind() const { - switch (mode()) { - case CLONE_ELEMENTS: - case COPY_ON_WRITE_ELEMENTS: - return FAST_ELEMENTS; - case CLONE_DOUBLE_ELEMENTS: - return FAST_DOUBLE_ELEMENTS; - case CLONE_ANY_ELEMENTS: - /*fall-through*/; - } - UNREACHABLE(); - return LAST_ELEMENTS_KIND; - } - - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode(); virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; static void InstallDescriptors(Isolate* isolate); private: - Mode mode_; AllocationSiteMode allocation_site_mode_; - int length_; class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {}; - class ModeBits: public BitField<Mode, 1, 4> {}; - class LengthBits: public BitField<int, 5, 4> {}; // Ensure data fits within available bits. - STATIC_ASSERT(LAST_ALLOCATION_SITE_MODE == 1); - STATIC_ASSERT(kFastCloneModeCount < 16); - STATIC_ASSERT(kMaximumClonedLength < 16); - Major MajorKey() { return FastCloneShallowArray; } - int NotMissMinorKey() { - return AllocationSiteModeBits::encode(allocation_site_mode_) - | ModeBits::encode(mode_) - | LengthBits::encode(length_); + Major MajorKey() const { return FastCloneShallowArray; } + int NotMissMinorKey() const { + return AllocationSiteModeBits::encode(allocation_site_mode_); } }; @@ -675,24 +687,24 @@ // Maximum number of properties in copied object. static const int kMaximumClonedProperties = 6; - explicit FastCloneShallowObjectStub(int length) : length_(length) { - ASSERT_GE(length_, 0); - ASSERT_LE(length_, kMaximumClonedProperties); + FastCloneShallowObjectStub(Isolate* isolate, int length) + : HydrogenCodeStub(isolate), length_(length) { + DCHECK_GE(length_, 0); + DCHECK_LE(length_, kMaximumClonedProperties); } int length() const { return length_; } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: int length_; - Major MajorKey() { return FastCloneShallowObject; } - int NotMissMinorKey() { return length_; } + Major MajorKey() const { return FastCloneShallowObject; } + int NotMissMinorKey() const { return length_; } DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub); }; @@ -700,19 +712,19 @@ class CreateAllocationSiteStub : public HydrogenCodeStub { public: - explicit CreateAllocationSiteStub() { } + explicit CreateAllocationSiteStub(Isolate* isolate) + : HydrogenCodeStub(isolate) { } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; static void GenerateAheadOfTime(Isolate* isolate); virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: - Major MajorKey() { return CreateAllocationSite; } - int NotMissMinorKey() { return 0; } + Major MajorKey() const { return CreateAllocationSite; } + int NotMissMinorKey() const { return 0; } DISALLOW_COPY_AND_ASSIGN(CreateAllocationSiteStub); }; @@ -727,16 +739,20 @@ kReturnTrueFalseObject = 1 << 2 }; - explicit InstanceofStub(Flags flags) : flags_(flags) { } + InstanceofStub(Isolate* isolate, Flags flags) + : PlatformCodeStub(isolate), flags_(flags) { } static Register left(); static Register right(); void Generate(MacroAssembler* masm); + virtual void InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor); + private: - Major MajorKey() { return Instanceof; } - int MinorKey() { return static_cast<int>(flags_); } + Major MajorKey() const { return Instanceof; } + int MinorKey() const { return static_cast<int>(flags_); } bool HasArgsInRegisters() const { return (flags_ & kArgsInRegisters) != 0; @@ -750,7 +766,7 @@ return (flags_ & kReturnTrueFalseObject) != 0; } - virtual void PrintName(StringStream* stream); + virtual void PrintName(OStream& os) const V8_OVERRIDE; // NOLINT Flags flags_; }; @@ -774,10 +790,10 @@ private: void GenerateDispatchToArrayStub(MacroAssembler* masm, AllocationSiteOverrideMode mode); - virtual void PrintName(StringStream* stream); + virtual void PrintName(OStream& os) const V8_OVERRIDE; // NOLINT - virtual CodeStub::Major MajorKey() { return ArrayConstructor; } - virtual int MinorKey() { return argument_count_; } + virtual CodeStub::Major MajorKey() const { return ArrayConstructor; } + virtual int MinorKey() const { return argument_count_; } ArgumentCountKey argument_count_; }; @@ -790,8 +806,8 @@ void Generate(MacroAssembler* masm); private: - virtual CodeStub::Major MajorKey() { return InternalArrayConstructor; } - virtual int MinorKey() { return 0; } + virtual CodeStub::Major MajorKey() const { return InternalArrayConstructor; } + virtual int MinorKey() const { return 0; } void GenerateCase(MacroAssembler* masm, ElementsKind kind); }; @@ -801,199 +817,200 @@ public: enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK }; - explicit MathPowStub(ExponentType exponent_type) - : exponent_type_(exponent_type) { } + MathPowStub(Isolate* isolate, ExponentType exponent_type) + : PlatformCodeStub(isolate), exponent_type_(exponent_type) { } virtual void Generate(MacroAssembler* masm); private: - virtual CodeStub::Major MajorKey() { return MathPow; } - virtual int MinorKey() { return exponent_type_; } + virtual CodeStub::Major MajorKey() const { return MathPow; } + virtual int MinorKey() const { return exponent_type_; } ExponentType exponent_type_; }; -class ICStub: public PlatformCodeStub { +class CallICStub: public PlatformCodeStub { public: - explicit ICStub(Code::Kind kind) : kind_(kind) { } - virtual Code::Kind GetCodeKind() const { return kind_; } - virtual InlineCacheState GetICState() { return MONOMORPHIC; } + CallICStub(Isolate* isolate, const CallIC::State& state) + : PlatformCodeStub(isolate), state_(state) {} + + bool CallAsMethod() const { return state_.CallAsMethod(); } - bool Describes(Code* code) { - return GetMajorKey(code) == MajorKey() && code->stub_info() == MinorKey(); + int arg_count() const { return state_.arg_count(); } + + static int ExtractArgcFromMinorKey(int minor_key) { + CallIC::State state((ExtraICState) minor_key); + return state.arg_count(); } - protected: - class KindBits: public BitField<Code::Kind, 0, 4> {}; - virtual void FinishCode(Handle<Code> code) { - code->set_stub_info(MinorKey()); + virtual void Generate(MacroAssembler* masm); + + virtual Code::Kind GetCodeKind() const V8_OVERRIDE { + return Code::CALL_IC; } - Code::Kind kind() { return kind_; } - virtual int MinorKey() { - return KindBits::encode(kind_); + virtual InlineCacheState GetICState() const V8_OVERRIDE { return DEFAULT; } + + virtual ExtraICState GetExtraICState() const V8_FINAL V8_OVERRIDE { + return state_.GetExtraICState(); } - private: - Code::Kind kind_; -}; + protected: + virtual int MinorKey() const { return GetExtraICState(); } + virtual void PrintState(OStream& os) const V8_OVERRIDE; // NOLINT + virtual CodeStub::Major MajorKey() const { return CallIC; } -class FunctionPrototypeStub: public ICStub { - public: - explicit FunctionPrototypeStub(Code::Kind kind) : ICStub(kind) { } - virtual void Generate(MacroAssembler* masm); + // Code generation helpers. + void GenerateMiss(MacroAssembler* masm, IC::UtilityId id); - private: - virtual CodeStub::Major MajorKey() { return FunctionPrototype; } + const CallIC::State state_; }; -class StoreICStub: public ICStub { +class CallIC_ArrayStub: public CallICStub { public: - StoreICStub(Code::Kind kind, StrictMode strict_mode) - : ICStub(kind), strict_mode_(strict_mode) { } + CallIC_ArrayStub(Isolate* isolate, const CallIC::State& state_in) + : CallICStub(isolate, state_in) {} - protected: - virtual ExtraICState GetExtraICState() { - return StoreIC::ComputeExtraICState(strict_mode_); - } + virtual void Generate(MacroAssembler* masm); - private: - STATIC_ASSERT(KindBits::kSize == 4); - class StrictModeBits: public BitField<bool, 4, 1> {}; - virtual int MinorKey() { - return KindBits::encode(kind()) | StrictModeBits::encode(strict_mode_); + virtual InlineCacheState GetICState() const V8_FINAL V8_OVERRIDE { + return MONOMORPHIC; } - StrictMode strict_mode_; + protected: + virtual void PrintState(OStream& os) const V8_OVERRIDE; // NOLINT + + virtual CodeStub::Major MajorKey() const { return CallIC_Array; } }; -class HICStub: public HydrogenCodeStub { +// TODO(verwaest): Translate to hydrogen code stub. +class FunctionPrototypeStub : public PlatformCodeStub { public: - virtual Code::Kind GetCodeKind() const { return kind(); } - virtual InlineCacheState GetICState() { return MONOMORPHIC; } + explicit FunctionPrototypeStub(Isolate* isolate) + : PlatformCodeStub(isolate) {} + virtual void Generate(MacroAssembler* masm); + virtual Code::Kind GetCodeKind() const { return Code::HANDLER; } - protected: - class KindBits: public BitField<Code::Kind, 0, 4> {}; - virtual Code::Kind kind() const = 0; + private: + virtual CodeStub::Major MajorKey() const { return FunctionPrototype; } + virtual int MinorKey() const { return 0; } }; -class HandlerStub: public HICStub { +class HandlerStub : public HydrogenCodeStub { public: virtual Code::Kind GetCodeKind() const { return Code::HANDLER; } - virtual ExtraICState GetExtraICState() { return kind(); } + virtual ExtraICState GetExtraICState() const { return kind(); } + virtual InlineCacheState GetICState() const { return MONOMORPHIC; } + + virtual void InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; protected: - HandlerStub() : HICStub() { } - virtual int NotMissMinorKey() { return bit_field_; } + explicit HandlerStub(Isolate* isolate) + : HydrogenCodeStub(isolate), bit_field_(0) {} + virtual int NotMissMinorKey() const { return bit_field_; } + virtual Code::Kind kind() const = 0; int bit_field_; }; class LoadFieldStub: public HandlerStub { public: - LoadFieldStub(bool inobject, int index, Representation representation) { - Initialize(Code::LOAD_IC, inobject, index, representation); + LoadFieldStub(Isolate* isolate, FieldIndex index) + : HandlerStub(isolate), index_(index) { + int property_index_key = index_.GetFieldAccessStubKey(); + bit_field_ = EncodedLoadFieldByIndexBits::encode(property_index_key); } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; - virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + FieldIndex index() const { return index_; } - Representation representation() { - if (unboxed_double()) return Representation::Double(); - return Representation::Tagged(); - } + protected: + explicit LoadFieldStub(Isolate* isolate); + virtual Code::Kind kind() const { return Code::LOAD_IC; } + virtual Code::StubType GetStubType() { return Code::FAST; } - virtual Code::Kind kind() const { - return KindBits::decode(bit_field_); - } + private: + class EncodedLoadFieldByIndexBits : public BitField<int, 0, 13> {}; + virtual CodeStub::Major MajorKey() const { return LoadField; } + FieldIndex index_; +}; - bool is_inobject() { - return InobjectBits::decode(bit_field_); - } - int offset() { - int index = IndexBits::decode(bit_field_); - int offset = index * kPointerSize; - if (is_inobject()) return offset; - return FixedArray::kHeaderSize + offset; +class LoadConstantStub : public HandlerStub { + public: + LoadConstantStub(Isolate* isolate, int descriptor) : HandlerStub(isolate) { + bit_field_ = descriptor; } - bool unboxed_double() { - return UnboxedDoubleBits::decode(bit_field_); - } + virtual Handle<Code> GenerateCode() V8_OVERRIDE; - virtual Code::StubType GetStubType() { return Code::FAST; } + int descriptor() const { return bit_field_; } protected: - LoadFieldStub() : HandlerStub() { } - - void Initialize(Code::Kind kind, - bool inobject, - int index, - Representation representation) { - bit_field_ = KindBits::encode(kind) - | InobjectBits::encode(inobject) - | IndexBits::encode(index) - | UnboxedDoubleBits::encode(representation.IsDouble()); - } + explicit LoadConstantStub(Isolate* isolate); + virtual Code::Kind kind() const { return Code::LOAD_IC; } + virtual Code::StubType GetStubType() { return Code::FAST; } private: - STATIC_ASSERT(KindBits::kSize == 4); - class InobjectBits: public BitField<bool, 4, 1> {}; - class IndexBits: public BitField<int, 5, 11> {}; - class UnboxedDoubleBits: public BitField<bool, 16, 1> {}; - virtual CodeStub::Major MajorKey() { return LoadField; } + virtual CodeStub::Major MajorKey() const { return LoadConstant; } }; class StringLengthStub: public HandlerStub { public: - explicit StringLengthStub() : HandlerStub() { - Initialize(Code::LOAD_IC); - } - virtual Handle<Code> GenerateCode(Isolate* isolate); - virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + explicit StringLengthStub(Isolate* isolate) : HandlerStub(isolate) {} + virtual Handle<Code> GenerateCode() V8_OVERRIDE; protected: - virtual Code::Kind kind() const { - return KindBits::decode(bit_field_); - } - - void Initialize(Code::Kind kind) { - bit_field_ = KindBits::encode(kind); - } + virtual Code::Kind kind() const { return Code::LOAD_IC; } + virtual Code::StubType GetStubType() { return Code::FAST; } private: - virtual CodeStub::Major MajorKey() { return StringLength; } + virtual CodeStub::Major MajorKey() const { return StringLength; } }; -class KeyedStringLengthStub: public StringLengthStub { +class StoreFieldStub : public HandlerStub { public: - explicit KeyedStringLengthStub() : StringLengthStub() { - Initialize(Code::KEYED_LOAD_IC); + StoreFieldStub(Isolate* isolate, FieldIndex index, + Representation representation) + : HandlerStub(isolate), index_(index), representation_(representation) { + int property_index_key = index_.GetFieldAccessStubKey(); + bit_field_ = EncodedStoreFieldByIndexBits::encode(property_index_key) | + RepresentationBits::encode( + PropertyDetails::EncodeRepresentation(representation)); } - virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + + virtual Handle<Code> GenerateCode() V8_OVERRIDE; + + FieldIndex index() const { return index_; } + Representation representation() { return representation_; } + static void InstallDescriptors(Isolate* isolate); + + protected: + explicit StoreFieldStub(Isolate* isolate); + virtual Code::Kind kind() const { return Code::STORE_IC; } + virtual Code::StubType GetStubType() { return Code::FAST; } private: - virtual CodeStub::Major MajorKey() { return KeyedStringLength; } + class EncodedStoreFieldByIndexBits : public BitField<int, 0, 13> {}; + class RepresentationBits : public BitField<int, 13, 4> {}; + virtual CodeStub::Major MajorKey() const { return StoreField; } + FieldIndex index_; + Representation representation_; }; class StoreGlobalStub : public HandlerStub { public: - explicit StoreGlobalStub(bool is_constant, bool check_global) { + StoreGlobalStub(Isolate* isolate, bool is_constant, bool check_global) + : HandlerStub(isolate) { bit_field_ = IsConstantBits::encode(is_constant) | CheckGlobalBits::encode(check_global); } @@ -1002,29 +1019,24 @@ return isolate->factory()->uninitialized_value(); } - Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate, - Handle<GlobalObject> global, + Handle<Code> GetCodeCopyFromTemplate(Handle<GlobalObject> global, Handle<PropertyCell> cell) { if (check_global()) { Code::FindAndReplacePattern pattern; - pattern.Add(Handle<Map>(global_placeholder(isolate)->map()), global); - pattern.Add(isolate->factory()->meta_map(), Handle<Map>(global->map())); - pattern.Add(isolate->factory()->global_property_cell_map(), cell); - return CodeStub::GetCodeCopy(isolate, pattern); + pattern.Add(Handle<Map>(global_placeholder(isolate())->map()), global); + pattern.Add(isolate()->factory()->meta_map(), Handle<Map>(global->map())); + pattern.Add(isolate()->factory()->global_property_cell_map(), cell); + return CodeStub::GetCodeCopy(pattern); } else { Code::FindAndReplacePattern pattern; - pattern.Add(isolate->factory()->global_property_cell_map(), cell); - return CodeStub::GetCodeCopy(isolate, pattern); + pattern.Add(isolate()->factory()->global_property_cell_map(), cell); + return CodeStub::GetCodeCopy(pattern); } } virtual Code::Kind kind() const { return Code::STORE_IC; } - virtual Handle<Code> GenerateCode(Isolate* isolate); - - virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; bool is_constant() const { return IsConstantBits::decode(bit_field_); @@ -1044,7 +1056,7 @@ } private: - Major MajorKey() { return StoreGlobal; } + Major MajorKey() const { return StoreGlobal; } class IsConstantBits: public BitField<bool, 0, 1> {}; class RepresentationBits: public BitField<Representation::Kind, 1, 8> {}; @@ -1056,20 +1068,21 @@ class CallApiFunctionStub : public PlatformCodeStub { public: - CallApiFunctionStub(bool is_store, + CallApiFunctionStub(Isolate* isolate, + bool is_store, bool call_data_undefined, - int argc) { + int argc) : PlatformCodeStub(isolate) { bit_field_ = IsStoreBits::encode(is_store) | CallDataUndefinedBits::encode(call_data_undefined) | ArgumentBits::encode(argc); - ASSERT(!is_store || argc == 1); + DCHECK(!is_store || argc == 1); } private: virtual void Generate(MacroAssembler* masm) V8_OVERRIDE; - virtual Major MajorKey() V8_OVERRIDE { return CallApiFunction; } - virtual int MinorKey() V8_OVERRIDE { return bit_field_; } + virtual Major MajorKey() const V8_OVERRIDE { return CallApiFunction; } + virtual int MinorKey() const V8_OVERRIDE { return bit_field_; } class IsStoreBits: public BitField<bool, 0, 1> {}; class CallDataUndefinedBits: public BitField<bool, 1, 1> {}; @@ -1083,44 +1096,30 @@ class CallApiGetterStub : public PlatformCodeStub { public: - CallApiGetterStub() {} + explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {} private: virtual void Generate(MacroAssembler* masm) V8_OVERRIDE; - virtual Major MajorKey() V8_OVERRIDE { return CallApiGetter; } - virtual int MinorKey() V8_OVERRIDE { return 0; } + virtual Major MajorKey() const V8_OVERRIDE { return CallApiGetter; } + virtual int MinorKey() const V8_OVERRIDE { return 0; } DISALLOW_COPY_AND_ASSIGN(CallApiGetterStub); }; -class KeyedLoadFieldStub: public LoadFieldStub { - public: - KeyedLoadFieldStub(bool inobject, int index, Representation representation) - : LoadFieldStub() { - Initialize(Code::KEYED_LOAD_IC, inobject, index, representation); - } - - virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); - - private: - virtual CodeStub::Major MajorKey() { return KeyedLoadField; } -}; - - class BinaryOpICStub : public HydrogenCodeStub { public: - BinaryOpICStub(Token::Value op, OverwriteMode mode) - : HydrogenCodeStub(UNINITIALIZED), state_(op, mode) {} + BinaryOpICStub(Isolate* isolate, Token::Value op, + OverwriteMode mode = NO_OVERWRITE) + : HydrogenCodeStub(isolate, UNINITIALIZED), state_(isolate, op, mode) {} - explicit BinaryOpICStub(const BinaryOpIC::State& state) : state_(state) {} + explicit BinaryOpICStub(Isolate* isolate, const BinaryOpIC::State& state) + : HydrogenCodeStub(isolate), state_(state) {} static void GenerateAheadOfTime(Isolate* isolate); virtual void InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; static void InstallDescriptors(Isolate* isolate); @@ -1128,26 +1127,22 @@ return Code::BINARY_OP_IC; } - virtual InlineCacheState GetICState() V8_FINAL V8_OVERRIDE { + virtual InlineCacheState GetICState() const V8_FINAL V8_OVERRIDE { return state_.GetICState(); } - virtual ExtraICState GetExtraICState() V8_FINAL V8_OVERRIDE { + virtual ExtraICState GetExtraICState() const V8_FINAL V8_OVERRIDE { return state_.GetExtraICState(); } - virtual void VerifyPlatformFeatures(Isolate* isolate) V8_FINAL V8_OVERRIDE { - ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2)); - } - - virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE; + virtual Handle<Code> GenerateCode() V8_OVERRIDE; const BinaryOpIC::State& state() const { return state_; } - virtual void PrintState(StringStream* stream) V8_FINAL V8_OVERRIDE; + virtual void PrintState(OStream& os) const V8_FINAL V8_OVERRIDE; // NOLINT - virtual Major MajorKey() V8_OVERRIDE { return BinaryOpIC; } - virtual int NotMissMinorKey() V8_FINAL V8_OVERRIDE { + virtual Major MajorKey() const V8_OVERRIDE { return BinaryOpIC; } + virtual int NotMissMinorKey() const V8_FINAL V8_OVERRIDE { return GetExtraICState(); } @@ -1165,68 +1160,42 @@ }; -class ArrayPushStub: public PlatformCodeStub { - public: - ArrayPushStub(ElementsKind kind, int argc) { - bit_field_ = ElementsKindBits::encode(kind) | ArgcBits::encode(argc); - } - - void Generate(MacroAssembler* masm); - - private: - int arguments_count() { return ArgcBits::decode(bit_field_); } - ElementsKind elements_kind() { - return ElementsKindBits::decode(bit_field_); - } - - virtual CodeStub::Major MajorKey() { return ArrayPush; } - virtual int MinorKey() { return bit_field_; } - - class ElementsKindBits: public BitField<ElementsKind, 0, 3> {}; - class ArgcBits: public BitField<int, 3, Code::kArgumentsBits> {}; - - int bit_field_; -}; - - // TODO(bmeurer): Merge this into the BinaryOpICStub once we have proper tail // call support for stubs in Hydrogen. class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub { public: - explicit BinaryOpICWithAllocationSiteStub(const BinaryOpIC::State& state) - : state_(state) {} + BinaryOpICWithAllocationSiteStub(Isolate* isolate, + const BinaryOpIC::State& state) + : PlatformCodeStub(isolate), state_(state) {} static void GenerateAheadOfTime(Isolate* isolate); - Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate, - Handle<AllocationSite> allocation_site) { + Handle<Code> GetCodeCopyFromTemplate(Handle<AllocationSite> allocation_site) { Code::FindAndReplacePattern pattern; - pattern.Add(isolate->factory()->oddball_map(), allocation_site); - return CodeStub::GetCodeCopy(isolate, pattern); + pattern.Add(isolate()->factory()->undefined_map(), allocation_site); + return CodeStub::GetCodeCopy(pattern); } virtual Code::Kind GetCodeKind() const V8_OVERRIDE { return Code::BINARY_OP_IC; } - virtual InlineCacheState GetICState() V8_OVERRIDE { + virtual InlineCacheState GetICState() const V8_OVERRIDE { return state_.GetICState(); } - virtual ExtraICState GetExtraICState() V8_OVERRIDE { + virtual ExtraICState GetExtraICState() const V8_OVERRIDE { return state_.GetExtraICState(); } - virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE { - ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2)); - } - virtual void Generate(MacroAssembler* masm) V8_OVERRIDE; - virtual void PrintState(StringStream* stream) V8_OVERRIDE; + virtual void PrintState(OStream& os) const V8_OVERRIDE; // NOLINT - virtual Major MajorKey() V8_OVERRIDE { return BinaryOpICWithAllocationSite; } - virtual int MinorKey() V8_OVERRIDE { return GetExtraICState(); } + virtual Major MajorKey() const V8_OVERRIDE { + return BinaryOpICWithAllocationSite; + } + virtual int MinorKey() const V8_OVERRIDE { return GetExtraICState(); } private: static void GenerateAheadOfTime(Isolate* isolate, @@ -1240,14 +1209,17 @@ class BinaryOpWithAllocationSiteStub V8_FINAL : public BinaryOpICStub { public: - BinaryOpWithAllocationSiteStub(Token::Value op, OverwriteMode mode) - : BinaryOpICStub(op, mode) {} - - explicit BinaryOpWithAllocationSiteStub(const BinaryOpIC::State& state) - : BinaryOpICStub(state) {} + BinaryOpWithAllocationSiteStub(Isolate* isolate, + Token::Value op, + OverwriteMode mode) + : BinaryOpICStub(isolate, op, mode) {} + + BinaryOpWithAllocationSiteStub(Isolate* isolate, + const BinaryOpIC::State& state) + : BinaryOpICStub(isolate, state) {} virtual void InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; static void InstallDescriptors(Isolate* isolate); @@ -1255,9 +1227,9 @@ return Code::STUB; } - virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE; + virtual Handle<Code> GenerateCode() V8_OVERRIDE; - virtual Major MajorKey() V8_OVERRIDE { + virtual Major MajorKey() const V8_OVERRIDE { return BinaryOpWithAllocationSite; } @@ -1282,8 +1254,11 @@ class StringAddStub V8_FINAL : public HydrogenCodeStub { public: - StringAddStub(StringAddFlags flags, PretenureFlag pretenure_flag) - : bit_field_(StringAddFlagsBits::encode(flags) | + StringAddStub(Isolate* isolate, + StringAddFlags flags, + PretenureFlag pretenure_flag) + : HydrogenCodeStub(isolate), + bit_field_(StringAddFlagsBits::encode(flags) | PretenureFlagBits::encode(pretenure_flag)) {} StringAddFlags flags() const { @@ -1294,14 +1269,9 @@ return PretenureFlagBits::decode(bit_field_); } - virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE { - ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2)); - } - - virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE; + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; static void InstallDescriptors(Isolate* isolate); @@ -1315,10 +1285,10 @@ class PretenureFlagBits: public BitField<PretenureFlag, 2, 1> {}; uint32_t bit_field_; - virtual Major MajorKey() V8_OVERRIDE { return StringAdd; } - virtual int NotMissMinorKey() V8_OVERRIDE { return bit_field_; } + virtual Major MajorKey() const V8_OVERRIDE { return StringAdd; } + virtual int NotMissMinorKey() const V8_OVERRIDE { return bit_field_; } - virtual void PrintBaseName(StringStream* stream) V8_OVERRIDE; + virtual void PrintBaseName(OStream& os) const V8_OVERRIDE; // NOLINT DISALLOW_COPY_AND_ASSIGN(StringAddStub); }; @@ -1326,28 +1296,28 @@ class ICCompareStub: public PlatformCodeStub { public: - ICCompareStub(Token::Value op, + ICCompareStub(Isolate* isolate, + Token::Value op, CompareIC::State left, CompareIC::State right, CompareIC::State handler) - : op_(op), + : PlatformCodeStub(isolate), + op_(op), left_(left), right_(right), state_(handler) { - ASSERT(Token::IsCompareOp(op)); + DCHECK(Token::IsCompareOp(op)); } virtual void Generate(MacroAssembler* masm); void set_known_map(Handle<Map> map) { known_map_ = map; } - static void DecodeMinorKey(int minor_key, - CompareIC::State* left_state, - CompareIC::State* right_state, - CompareIC::State* handler_state, - Token::Value* op); + static void DecodeKey(uint32_t stub_key, CompareIC::State* left_state, + CompareIC::State* right_state, + CompareIC::State* handler_state, Token::Value* op); - virtual InlineCacheState GetICState(); + virtual InlineCacheState GetICState() const; private: class OpField: public BitField<int, 0, 3> { }; @@ -1355,12 +1325,8 @@ class RightStateField: public BitField<int, 7, 4> { }; class HandlerStateField: public BitField<int, 11, 4> { }; - virtual void FinishCode(Handle<Code> code) { - code->set_stub_info(MinorKey()); - } - - virtual CodeStub::Major MajorKey() { return CompareIC; } - virtual int MinorKey(); + virtual CodeStub::Major MajorKey() const { return CompareIC; } + virtual int MinorKey() const; virtual Code::Kind GetCodeKind() const { return Code::COMPARE_IC; } @@ -1378,7 +1344,7 @@ Condition GetCondition() const { return CompareIC::ComputeCondition(op_); } virtual void AddToSpecialCache(Handle<Code> new_object); - virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate); + virtual bool FindCodeInSpecialCache(Code** code_out); virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECT; } Token::Value op_; @@ -1394,32 +1360,32 @@ Type* GetType(Zone* zone, Handle<Map> map = Handle<Map>()); Type* GetInputType(Zone* zone, Handle<Map> map); - explicit CompareNilICStub(NilValue nil) : nil_value_(nil) { } + CompareNilICStub(Isolate* isolate, NilValue nil) + : HydrogenCodeStub(isolate), nil_value_(nil) { } - CompareNilICStub(ExtraICState ic_state, + CompareNilICStub(Isolate* isolate, + ExtraICState ic_state, InitializationState init_state = INITIALIZED) - : HydrogenCodeStub(init_state), + : HydrogenCodeStub(isolate, init_state), nil_value_(NilValueField::decode(ic_state)), state_(State(TypesField::decode(ic_state))) { } static Handle<Code> GetUninitialized(Isolate* isolate, NilValue nil) { - return CompareNilICStub(nil, UNINITIALIZED).GetCode(isolate); + return CompareNilICStub(isolate, nil, UNINITIALIZED).GetCode(); } virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; static void InstallDescriptors(Isolate* isolate) { - CompareNilICStub compare_stub(kNullValue, UNINITIALIZED); + CompareNilICStub compare_stub(isolate, kNullValue, UNINITIALIZED); compare_stub.InitializeInterfaceDescriptor( - isolate, isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC)); } - virtual InlineCacheState GetICState() { + virtual InlineCacheState GetICState() const { if (state_.Contains(GENERIC)) { return MEGAMORPHIC; } else if (state_.Contains(MONOMORPHIC_MAP)) { @@ -1431,9 +1397,9 @@ virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; - virtual ExtraICState GetExtraICState() { + virtual ExtraICState GetExtraICState() const { return NilValueField::encode(nil_value_) | TypesField::encode(state_.ToIntegral()); } @@ -1444,8 +1410,8 @@ NilValue GetNilValue() const { return nil_value_; } void ClearState() { state_.RemoveAll(); } - virtual void PrintState(StringStream* stream); - virtual void PrintBaseName(StringStream* stream); + virtual void PrintState(OStream& os) const V8_OVERRIDE; // NOLINT + virtual void PrintBaseName(OStream& os) const V8_OVERRIDE; // NOLINT private: friend class CompareNilIC; @@ -1467,18 +1433,19 @@ public: State() : EnumSet<CompareNilType, byte>(0) { } explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { } - - void Print(StringStream* stream) const; }; + friend OStream& operator<<(OStream& os, const State& s); - CompareNilICStub(NilValue nil, InitializationState init_state) - : HydrogenCodeStub(init_state), nil_value_(nil) { } + CompareNilICStub(Isolate* isolate, + NilValue nil, + InitializationState init_state) + : HydrogenCodeStub(isolate, init_state), nil_value_(nil) { } class NilValueField : public BitField<NilValue, 0, 1> {}; class TypesField : public BitField<byte, 1, NUMBER_OF_TYPES> {}; - virtual CodeStub::Major MajorKey() { return CompareNilIC; } - virtual int NotMissMinorKey() { return GetExtraICState(); } + virtual CodeStub::Major MajorKey() const { return CompareNilIC; } + virtual int NotMissMinorKey() const { return GetExtraICState(); } NilValue nil_value_; State state_; @@ -1487,11 +1454,17 @@ }; +OStream& operator<<(OStream& os, const CompareNilICStub::State& s); + + class CEntryStub : public PlatformCodeStub { public: - explicit CEntryStub(int result_size, - SaveFPRegsMode save_doubles = kDontSaveFPRegs) - : result_size_(result_size), save_doubles_(save_doubles) { } + CEntryStub(Isolate* isolate, + int result_size, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) + : PlatformCodeStub(isolate), + result_size_(result_size), + save_doubles_(save_doubles) { } void Generate(MacroAssembler* masm); @@ -1501,25 +1474,13 @@ // can generate both variants ahead of time. static void GenerateAheadOfTime(Isolate* isolate); - protected: - virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE { - ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2)); - }; - private: - void GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - bool do_gc, - bool always_allocate_scope); - // Number of pointers/values returned. - Isolate* isolate_; const int result_size_; SaveFPRegsMode save_doubles_; - Major MajorKey() { return CEntry; } - int MinorKey(); + Major MajorKey() const { return CEntry; } + int MinorKey() const; bool NeedsImmovableCode(); }; @@ -1527,7 +1488,7 @@ class JSEntryStub : public PlatformCodeStub { public: - JSEntryStub() { } + explicit JSEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) { } void Generate(MacroAssembler* masm) { GenerateBody(masm, false); } @@ -1535,8 +1496,8 @@ void GenerateBody(MacroAssembler* masm, bool is_construct); private: - Major MajorKey() { return JSEntry; } - int MinorKey() { return 0; } + Major MajorKey() const { return JSEntry; } + int MinorKey() const { return 0; } virtual void FinishCode(Handle<Code> code); @@ -1546,15 +1507,15 @@ class JSConstructEntryStub : public JSEntryStub { public: - JSConstructEntryStub() { } + explicit JSConstructEntryStub(Isolate* isolate) : JSEntryStub(isolate) { } void Generate(MacroAssembler* masm) { GenerateBody(masm, true); } private: - int MinorKey() { return 1; } + int MinorKey() const { return 1; } - virtual void PrintName(StringStream* stream) { - stream->Add("JSConstructEntryStub"); + virtual void PrintName(OStream& os) const V8_OVERRIDE { // NOLINT + os << "JSConstructEntryStub"; } }; @@ -1568,13 +1529,14 @@ NEW_STRICT }; - explicit ArgumentsAccessStub(Type type) : type_(type) { } + ArgumentsAccessStub(Isolate* isolate, Type type) + : PlatformCodeStub(isolate), type_(type) { } private: Type type_; - Major MajorKey() { return ArgumentsAccess; } - int MinorKey() { return type_; } + Major MajorKey() const { return ArgumentsAccess; } + int MinorKey() const { return type_; } void Generate(MacroAssembler* masm); void GenerateReadElement(MacroAssembler* masm); @@ -1582,17 +1544,17 @@ void GenerateNewSloppyFast(MacroAssembler* masm); void GenerateNewSloppySlow(MacroAssembler* masm); - virtual void PrintName(StringStream* stream); + virtual void PrintName(OStream& os) const V8_OVERRIDE; // NOLINT }; class RegExpExecStub: public PlatformCodeStub { public: - RegExpExecStub() { } + explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { } private: - Major MajorKey() { return RegExpExec; } - int MinorKey() { return 0; } + Major MajorKey() const { return RegExpExec; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); }; @@ -1600,16 +1562,16 @@ class RegExpConstructResultStub V8_FINAL : public HydrogenCodeStub { public: - RegExpConstructResultStub() { } + explicit RegExpConstructResultStub(Isolate* isolate) + : HydrogenCodeStub(isolate) { } - virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE; + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; - virtual Major MajorKey() V8_OVERRIDE { return RegExpConstructResult; } - virtual int NotMissMinorKey() V8_OVERRIDE { return 0; } + virtual Major MajorKey() const V8_OVERRIDE { return RegExpConstructResult; } + virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; } static void InstallDescriptors(Isolate* isolate); @@ -1625,39 +1587,38 @@ class CallFunctionStub: public PlatformCodeStub { public: - CallFunctionStub(int argc, CallFunctionFlags flags) - : argc_(argc), flags_(flags) { } + CallFunctionStub(Isolate* isolate, int argc, CallFunctionFlags flags) + : PlatformCodeStub(isolate), argc_(argc), flags_(flags) { + DCHECK(argc <= Code::kMaxArguments); + } void Generate(MacroAssembler* masm); - virtual void FinishCode(Handle<Code> code) { - code->set_has_function_cache(RecordCallTarget()); - } - static int ExtractArgcFromMinorKey(int minor_key) { return ArgcBits::decode(minor_key); } + virtual void InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor); + private: int argc_; CallFunctionFlags flags_; - virtual void PrintName(StringStream* stream); + virtual void PrintName(OStream& os) const V8_OVERRIDE; // NOLINT // Minor key encoding in 32 bits with Bitfield <Type, shift, size>. class FlagBits: public BitField<CallFunctionFlags, 0, 2> {}; - class ArgcBits: public BitField<unsigned, 2, 32 - 2> {}; + class ArgcBits : public BitField<unsigned, 2, Code::kArgumentsBits> {}; + + STATIC_ASSERT(Code::kArgumentsBits + 2 <= kStubMinorKeyBits); - Major MajorKey() { return CallFunction; } - int MinorKey() { + Major MajorKey() const { return CallFunction; } + int MinorKey() const { // Encode the parameters in a unique 32 bit value. return FlagBits::encode(flags_) | ArgcBits::encode(argc_); } - bool RecordCallTarget() { - return flags_ == RECORD_CALL_TARGET; - } - bool CallAsMethod() { return flags_ == CALL_AS_METHOD || flags_ == WRAP_AND_CALL; } @@ -1670,7 +1631,8 @@ class CallConstructStub: public PlatformCodeStub { public: - explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {} + CallConstructStub(Isolate* isolate, CallConstructorFlags flags) + : PlatformCodeStub(isolate), flags_(flags) {} void Generate(MacroAssembler* masm); @@ -1678,20 +1640,19 @@ code->set_has_function_cache(RecordCallTarget()); } - private: - CallFunctionFlags flags_; + virtual void InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor); - virtual void PrintName(StringStream* stream); + private: + CallConstructorFlags flags_; - Major MajorKey() { return CallConstruct; } - int MinorKey() { return flags_; } + virtual void PrintName(OStream& os) const V8_OVERRIDE; // NOLINT - bool RecordCallTarget() { - return (flags_ & RECORD_CALL_TARGET) != 0; - } + Major MajorKey() const { return CallConstruct; } + int MinorKey() const { return flags_; } - bool CallAsMethod() { - return (flags_ & CALL_AS_METHOD) != 0; + bool RecordCallTarget() const { + return (flags_ & RECORD_CONSTRUCTOR_TARGET) != 0; } }; @@ -1733,8 +1694,8 @@ index_not_number_(index_not_number), index_out_of_range_(index_out_of_range), index_flags_(index_flags) { - ASSERT(!result_.is(object_)); - ASSERT(!result_.is(index_)); + DCHECK(!result_.is(object_)); + DCHECK(!result_.is(index_)); } // Generates the fast case code. On the fallthrough path |result| @@ -1781,7 +1742,7 @@ Register result) : code_(code), result_(result) { - ASSERT(!code_.is(result_)); + DCHECK(!code_.is(result_)); } // Generates the fast case code. On the fallthrough path |result| @@ -1870,52 +1831,76 @@ }; -class KeyedLoadDictionaryElementStub : public HydrogenCodeStub { +class LoadDictionaryElementStub : public HydrogenCodeStub { public: - KeyedLoadDictionaryElementStub() {} + explicit LoadDictionaryElementStub(Isolate* isolate) + : HydrogenCodeStub(isolate) {} - virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE; + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: - Major MajorKey() { return KeyedLoadElement; } - int NotMissMinorKey() { return DICTIONARY_ELEMENTS; } + Major MajorKey() const { return LoadElement; } + int NotMissMinorKey() const { return DICTIONARY_ELEMENTS; } - DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub); + DISALLOW_COPY_AND_ASSIGN(LoadDictionaryElementStub); }; -class KeyedLoadDictionaryElementPlatformStub : public PlatformCodeStub { +class LoadDictionaryElementPlatformStub : public PlatformCodeStub { public: - KeyedLoadDictionaryElementPlatformStub() {} + explicit LoadDictionaryElementPlatformStub(Isolate* isolate) + : PlatformCodeStub(isolate) {} void Generate(MacroAssembler* masm); private: - Major MajorKey() { return KeyedLoadElement; } - int MinorKey() { return DICTIONARY_ELEMENTS; } + Major MajorKey() const { return LoadElement; } + int MinorKey() const { return DICTIONARY_ELEMENTS; } - DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementPlatformStub); + DISALLOW_COPY_AND_ASSIGN(LoadDictionaryElementPlatformStub); +}; + + +class KeyedLoadGenericStub : public HydrogenCodeStub { + public: + explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {} + + virtual Handle<Code> GenerateCode() V8_OVERRIDE; + + virtual void InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; + + static void InstallDescriptors(Isolate* isolate); + + virtual Code::Kind GetCodeKind() const { return Code::KEYED_LOAD_IC; } + virtual InlineCacheState GetICState() const { return GENERIC; } + + private: + Major MajorKey() const { return KeyedLoadGeneric; } + int NotMissMinorKey() const { return 0; } + + DISALLOW_COPY_AND_ASSIGN(KeyedLoadGenericStub); }; class DoubleToIStub : public PlatformCodeStub { public: - DoubleToIStub(Register source, + DoubleToIStub(Isolate* isolate, + Register source, Register destination, int offset, bool is_truncating, - bool skip_fastpath = false) : bit_field_(0) { + bool skip_fastpath = false) + : PlatformCodeStub(isolate), bit_field_(0) { bit_field_ = SourceRegisterBits::encode(source.code()) | DestinationRegisterBits::encode(destination.code()) | OffsetBits::encode(offset) | IsTruncatingBits::encode(is_truncating) | SkipFastPathBits::encode(skip_fastpath) | - SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ? - CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0); + SSE3Bits::encode(CpuFeatures::IsSupported(SSE3) ? 1 : 0); } Register source() { @@ -1942,11 +1927,6 @@ virtual bool SometimesSetsUpAFrame() { return false; } - protected: - virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE { - ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2)); - } - private: static const int kBitsPerRegisterNumber = 6; STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters); @@ -1961,11 +1941,11 @@ public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT class SkipFastPathBits: public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT - class SSEBits: - public BitField<int, 2 * kBitsPerRegisterNumber + 5, 2> {}; // NOLINT + class SSE3Bits: + public BitField<int, 2 * kBitsPerRegisterNumber + 5, 1> {}; // NOLINT - Major MajorKey() { return DoubleToI; } - int MinorKey() { return bit_field_; } + Major MajorKey() const { return DoubleToI; } + int MinorKey() const { return bit_field_; } int bit_field_; @@ -1973,9 +1953,11 @@ }; -class KeyedLoadFastElementStub : public HydrogenCodeStub { +class LoadFastElementStub : public HydrogenCodeStub { public: - KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) { + LoadFastElementStub(Isolate* isolate, bool is_js_array, + ElementsKind elements_kind) + : HydrogenCodeStub(isolate) { bit_field_ = ElementsKindBits::encode(elements_kind) | IsJSArrayBits::encode(is_js_array); } @@ -1988,29 +1970,28 @@ return ElementsKindBits::decode(bit_field_); } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; class IsJSArrayBits: public BitField<bool, 8, 1> {}; uint32_t bit_field_; - Major MajorKey() { return KeyedLoadElement; } - int NotMissMinorKey() { return bit_field_; } + Major MajorKey() const { return LoadElement; } + int NotMissMinorKey() const { return bit_field_; } - DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub); + DISALLOW_COPY_AND_ASSIGN(LoadFastElementStub); }; -class KeyedStoreFastElementStub : public HydrogenCodeStub { +class StoreFastElementStub : public HydrogenCodeStub { public: - KeyedStoreFastElementStub(bool is_js_array, - ElementsKind elements_kind, - KeyedAccessStoreMode mode) { + StoreFastElementStub(Isolate* isolate, bool is_js_array, + ElementsKind elements_kind, KeyedAccessStoreMode mode) + : HydrogenCodeStub(isolate) { bit_field_ = ElementsKindBits::encode(elements_kind) | IsJSArrayBits::encode(is_js_array) | StoreModeBits::encode(mode); @@ -2028,11 +2009,10 @@ return StoreModeBits::decode(bit_field_); } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; @@ -2040,18 +2020,19 @@ class IsJSArrayBits: public BitField<bool, 12, 1> {}; uint32_t bit_field_; - Major MajorKey() { return KeyedStoreElement; } - int NotMissMinorKey() { return bit_field_; } + Major MajorKey() const { return StoreElement; } + int NotMissMinorKey() const { return bit_field_; } - DISALLOW_COPY_AND_ASSIGN(KeyedStoreFastElementStub); + DISALLOW_COPY_AND_ASSIGN(StoreFastElementStub); }; class TransitionElementsKindStub : public HydrogenCodeStub { public: - TransitionElementsKindStub(ElementsKind from_kind, + TransitionElementsKindStub(Isolate* isolate, + ElementsKind from_kind, ElementsKind to_kind, - bool is_js_array) { + bool is_js_array) : HydrogenCodeStub(isolate) { bit_field_ = FromKindBits::encode(from_kind) | ToKindBits::encode(to_kind) | IsJSArrayBits::encode(is_js_array); @@ -2069,11 +2050,10 @@ return IsJSArrayBits::decode(bit_field_); } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: class FromKindBits: public BitField<ElementsKind, 8, 8> {}; @@ -2081,8 +2061,8 @@ class IsJSArrayBits: public BitField<bool, 16, 1> {}; uint32_t bit_field_; - Major MajorKey() { return TransitionElementsKind; } - int NotMissMinorKey() { return bit_field_; } + Major MajorKey() const { return TransitionElementsKind; } + int NotMissMinorKey() const { return bit_field_; } DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub); }; @@ -2090,12 +2070,14 @@ class ArrayConstructorStubBase : public HydrogenCodeStub { public: - ArrayConstructorStubBase(ElementsKind kind, - AllocationSiteOverrideMode override_mode) { + ArrayConstructorStubBase(Isolate* isolate, + ElementsKind kind, + AllocationSiteOverrideMode override_mode) + : HydrogenCodeStub(isolate) { // It only makes sense to override local allocation site behavior // if there is a difference between the global allocation site policy // for an ElementsKind and the desired usage of the stub. - ASSERT(override_mode != DISABLE_ALLOCATION_SITES || + DCHECK(override_mode != DISABLE_ALLOCATION_SITES || AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE); bit_field_ = ElementsKindBits::encode(kind) | AllocationSiteOverrideModeBits::encode(override_mode); @@ -2117,10 +2099,10 @@ static const int kAllocationSite = 1; protected: - void BasePrintName(const char* name, StringStream* stream); + OStream& BasePrintName(OStream& os, const char* name) const; // NOLINT private: - int NotMissMinorKey() { return bit_field_; } + int NotMissMinorKey() const { return bit_field_; } // Ensure data fits within available bits. STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1); @@ -2137,22 +2119,22 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase { public: ArrayNoArgumentConstructorStub( + Isolate* isolate, ElementsKind kind, AllocationSiteOverrideMode override_mode = DONT_OVERRIDE) - : ArrayConstructorStubBase(kind, override_mode) { + : ArrayConstructorStubBase(isolate, kind, override_mode) { } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: - Major MajorKey() { return ArrayNoArgumentConstructor; } + Major MajorKey() const { return ArrayNoArgumentConstructor; } - virtual void PrintName(StringStream* stream) { - BasePrintName("ArrayNoArgumentConstructorStub", stream); + virtual void PrintName(OStream& os) const V8_OVERRIDE { // NOLINT + BasePrintName(os, "ArrayNoArgumentConstructorStub"); } DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub); @@ -2162,22 +2144,22 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase { public: ArraySingleArgumentConstructorStub( + Isolate* isolate, ElementsKind kind, AllocationSiteOverrideMode override_mode = DONT_OVERRIDE) - : ArrayConstructorStubBase(kind, override_mode) { + : ArrayConstructorStubBase(isolate, kind, override_mode) { } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: - Major MajorKey() { return ArraySingleArgumentConstructor; } + Major MajorKey() const { return ArraySingleArgumentConstructor; } - virtual void PrintName(StringStream* stream) { - BasePrintName("ArraySingleArgumentConstructorStub", stream); + virtual void PrintName(OStream& os) const { // NOLINT + BasePrintName(os, "ArraySingleArgumentConstructorStub"); } DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub); @@ -2187,22 +2169,22 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase { public: ArrayNArgumentsConstructorStub( + Isolate* isolate, ElementsKind kind, AllocationSiteOverrideMode override_mode = DONT_OVERRIDE) - : ArrayConstructorStubBase(kind, override_mode) { + : ArrayConstructorStubBase(isolate, kind, override_mode) { } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: - Major MajorKey() { return ArrayNArgumentsConstructor; } + Major MajorKey() const { return ArrayNArgumentsConstructor; } - virtual void PrintName(StringStream* stream) { - BasePrintName("ArrayNArgumentsConstructorStub", stream); + virtual void PrintName(OStream& os) const { // NOLINT + BasePrintName(os, "ArrayNArgumentsConstructorStub"); } DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub); @@ -2211,7 +2193,8 @@ class InternalArrayConstructorStubBase : public HydrogenCodeStub { public: - explicit InternalArrayConstructorStubBase(ElementsKind kind) { + InternalArrayConstructorStubBase(Isolate* isolate, ElementsKind kind) + : HydrogenCodeStub(isolate) { kind_ = kind; } @@ -2224,7 +2207,7 @@ ElementsKind elements_kind() const { return kind_; } private: - int NotMissMinorKey() { return kind_; } + int NotMissMinorKey() const { return kind_; } ElementsKind kind_; @@ -2235,17 +2218,17 @@ class InternalArrayNoArgumentConstructorStub : public InternalArrayConstructorStubBase { public: - explicit InternalArrayNoArgumentConstructorStub(ElementsKind kind) - : InternalArrayConstructorStubBase(kind) { } + InternalArrayNoArgumentConstructorStub(Isolate* isolate, + ElementsKind kind) + : InternalArrayConstructorStubBase(isolate, kind) { } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: - Major MajorKey() { return InternalArrayNoArgumentConstructor; } + Major MajorKey() const { return InternalArrayNoArgumentConstructor; } DISALLOW_COPY_AND_ASSIGN(InternalArrayNoArgumentConstructorStub); }; @@ -2254,17 +2237,17 @@ class InternalArraySingleArgumentConstructorStub : public InternalArrayConstructorStubBase { public: - explicit InternalArraySingleArgumentConstructorStub(ElementsKind kind) - : InternalArrayConstructorStubBase(kind) { } + InternalArraySingleArgumentConstructorStub(Isolate* isolate, + ElementsKind kind) + : InternalArrayConstructorStubBase(isolate, kind) { } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: - Major MajorKey() { return InternalArraySingleArgumentConstructor; } + Major MajorKey() const { return InternalArraySingleArgumentConstructor; } DISALLOW_COPY_AND_ASSIGN(InternalArraySingleArgumentConstructorStub); }; @@ -2273,38 +2256,35 @@ class InternalArrayNArgumentsConstructorStub : public InternalArrayConstructorStubBase { public: - explicit InternalArrayNArgumentsConstructorStub(ElementsKind kind) - : InternalArrayConstructorStubBase(kind) { } + InternalArrayNArgumentsConstructorStub(Isolate* isolate, ElementsKind kind) + : InternalArrayConstructorStubBase(isolate, kind) { } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; private: - Major MajorKey() { return InternalArrayNArgumentsConstructor; } + Major MajorKey() const { return InternalArrayNArgumentsConstructor; } DISALLOW_COPY_AND_ASSIGN(InternalArrayNArgumentsConstructorStub); }; -class KeyedStoreElementStub : public PlatformCodeStub { +class StoreElementStub : public PlatformCodeStub { public: - KeyedStoreElementStub(bool is_js_array, - ElementsKind elements_kind, - KeyedAccessStoreMode store_mode) - : is_js_array_(is_js_array), + StoreElementStub(Isolate* isolate, bool is_js_array, + ElementsKind elements_kind, KeyedAccessStoreMode store_mode) + : PlatformCodeStub(isolate), + is_js_array_(is_js_array), elements_kind_(elements_kind), - store_mode_(store_mode), - fp_registers_(CanUseFPRegisters()) { } + store_mode_(store_mode) {} - Major MajorKey() { return KeyedStoreElement; } - int MinorKey() { + Major MajorKey() const { return StoreElement; } + int MinorKey() const { return ElementsKindBits::encode(elements_kind_) | IsJSArrayBits::encode(is_js_array_) | - StoreModeBits::encode(store_mode_) | - FPRegisters::encode(fp_registers_); + StoreModeBits::encode(store_mode_); } void Generate(MacroAssembler* masm); @@ -2313,14 +2293,12 @@ class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {}; class IsJSArrayBits: public BitField<bool, 12, 1> {}; - class FPRegisters: public BitField<bool, 13, 1> {}; bool is_js_array_; ElementsKind elements_kind_; KeyedAccessStoreMode store_mode_; - bool fp_registers_; - DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub); + DISALLOW_COPY_AND_ASSIGN(StoreElementStub); }; @@ -2338,6 +2316,12 @@ NUMBER_OF_TYPES }; + enum ResultMode { + RESULT_AS_SMI, // For Smi(1) on truthy value, Smi(0) otherwise. + RESULT_AS_ODDBALL, // For {true} on truthy value, {false} otherwise. + RESULT_AS_INVERSE_ODDBALL // For {false} on truthy value, {true} otherwise. + }; + // At most 8 different types can be distinguished, because the Code object // only has room for a single byte to hold a set of these types. :-P STATIC_ASSERT(NUMBER_OF_TYPES <= 8); @@ -2348,7 +2332,6 @@ explicit Types(byte bits) : EnumSet<Type, byte>(bits) {} byte ToByte() const { return ToIntegral(); } - void Print(StringStream* stream) const; bool UpdateStatus(Handle<Object> object); bool NeedsMap() const; bool CanBeUndetectable() const; @@ -2357,40 +2340,39 @@ static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); } }; - explicit ToBooleanStub(Types types = Types()) - : types_(types) { } - explicit ToBooleanStub(ExtraICState state) - : types_(static_cast<byte>(state)) { } + ToBooleanStub(Isolate* isolate, ResultMode mode, Types types = Types()) + : HydrogenCodeStub(isolate), types_(types), mode_(mode) {} + ToBooleanStub(Isolate* isolate, ExtraICState state) + : HydrogenCodeStub(isolate), + types_(static_cast<byte>(state)), + mode_(RESULT_AS_SMI) {} bool UpdateStatus(Handle<Object> object); Types GetTypes() { return types_; } + ResultMode GetMode() { return mode_; } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; virtual void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; virtual Code::Kind GetCodeKind() const { return Code::TO_BOOLEAN_IC; } - virtual void PrintState(StringStream* stream); + virtual void PrintState(OStream& os) const V8_OVERRIDE; // NOLINT virtual bool SometimesSetsUpAFrame() { return false; } static void InstallDescriptors(Isolate* isolate) { - ToBooleanStub stub; + ToBooleanStub stub(isolate, RESULT_AS_SMI); stub.InitializeInterfaceDescriptor( - isolate, isolate->code_stub_interface_descriptor(CodeStub::ToBoolean)); } static Handle<Code> GetUninitialized(Isolate* isolate) { - return ToBooleanStub(UNINITIALIZED).GetCode(isolate); + return ToBooleanStub(isolate, UNINITIALIZED).GetCode(); } - virtual ExtraICState GetExtraICState() { - return types_.ToIntegral(); - } + virtual ExtraICState GetExtraICState() const { return types_.ToIntegral(); } - virtual InlineCacheState GetICState() { + virtual InlineCacheState GetICState() const { if (types_.IsEmpty()) { return ::v8::internal::UNINITIALIZED; } else { @@ -2399,23 +2381,34 @@ } private: - Major MajorKey() { return ToBoolean; } - int NotMissMinorKey() { return GetExtraICState(); } + class TypesBits : public BitField<byte, 0, NUMBER_OF_TYPES> {}; + class ResultModeBits : public BitField<ResultMode, NUMBER_OF_TYPES, 2> {}; + + Major MajorKey() const { return ToBoolean; } + int NotMissMinorKey() const { + return TypesBits::encode(types_.ToByte()) | ResultModeBits::encode(mode_); + } - explicit ToBooleanStub(InitializationState init_state) : - HydrogenCodeStub(init_state) {} + ToBooleanStub(Isolate* isolate, InitializationState init_state) + : HydrogenCodeStub(isolate, init_state), mode_(RESULT_AS_SMI) {} Types types_; + ResultMode mode_; }; +OStream& operator<<(OStream& os, const ToBooleanStub::Types& t); + + class ElementsTransitionAndStoreStub : public HydrogenCodeStub { public: - ElementsTransitionAndStoreStub(ElementsKind from_kind, + ElementsTransitionAndStoreStub(Isolate* isolate, + ElementsKind from_kind, ElementsKind to_kind, bool is_jsarray, KeyedAccessStoreMode store_mode) - : from_kind_(from_kind), + : HydrogenCodeStub(isolate), + from_kind_(from_kind), to_kind_(to_kind), is_jsarray_(is_jsarray), store_mode_(store_mode) {} @@ -2425,11 +2418,28 @@ bool is_jsarray() const { return is_jsarray_; } KeyedAccessStoreMode store_mode() const { return store_mode_; } - virtual Handle<Code> GenerateCode(Isolate* isolate); + virtual Handle<Code> GenerateCode() V8_OVERRIDE; - void InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor); + virtual void InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; + + // Parameters accessed via CodeStubGraphBuilder::GetParameter() + enum ParameterIndices { + kValueIndex, + kMapIndex, + kKeyIndex, + kObjectIndex, + kParameterCount + }; + + static const Register ValueRegister() { + return KeyedStoreIC::ValueRegister(); + } + static const Register MapRegister() { return KeyedStoreIC::MapRegister(); } + static const Register KeyRegister() { return KeyedStoreIC::NameRegister(); } + static const Register ObjectRegister() { + return KeyedStoreIC::ReceiverRegister(); + } private: class FromBits: public BitField<ElementsKind, 0, 8> {}; @@ -2437,8 +2447,8 @@ class IsJSArrayBits: public BitField<bool, 16, 1> {}; class StoreModeBits: public BitField<KeyedAccessStoreMode, 17, 4> {}; - Major MajorKey() { return ElementsTransitionAndStore; } - int NotMissMinorKey() { + Major MajorKey() const { return ElementsTransitionAndStore; } + int NotMissMinorKey() const { return FromBits::encode(from_kind_) | ToBits::encode(to_kind_) | IsJSArrayBits::encode(is_jsarray_) | @@ -2456,43 +2466,35 @@ class StoreArrayLiteralElementStub : public PlatformCodeStub { public: - StoreArrayLiteralElementStub() - : fp_registers_(CanUseFPRegisters()) { } + explicit StoreArrayLiteralElementStub(Isolate* isolate) + : PlatformCodeStub(isolate) { } private: - class FPRegisters: public BitField<bool, 0, 1> {}; - - Major MajorKey() { return StoreArrayLiteralElement; } - int MinorKey() { return FPRegisters::encode(fp_registers_); } + Major MajorKey() const { return StoreArrayLiteralElement; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); - bool fp_registers_; - DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub); }; class StubFailureTrampolineStub : public PlatformCodeStub { public: - explicit StubFailureTrampolineStub(StubFunctionMode function_mode) - : fp_registers_(CanUseFPRegisters()), function_mode_(function_mode) {} + StubFailureTrampolineStub(Isolate* isolate, StubFunctionMode function_mode) + : PlatformCodeStub(isolate), + function_mode_(function_mode) {} static void GenerateAheadOfTime(Isolate* isolate); private: - class FPRegisters: public BitField<bool, 0, 1> {}; - class FunctionModeField: public BitField<StubFunctionMode, 1, 1> {}; + class FunctionModeField: public BitField<StubFunctionMode, 0, 1> {}; - Major MajorKey() { return StubFailureTrampoline; } - int MinorKey() { - return FPRegisters::encode(fp_registers_) | - FunctionModeField::encode(function_mode_); - } + Major MajorKey() const { return StubFailureTrampoline; } + int MinorKey() const { return FunctionModeField::encode(function_mode_); } void Generate(MacroAssembler* masm); - bool fp_registers_; StubFunctionMode function_mode_; DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub); @@ -2501,7 +2503,7 @@ class ProfileEntryHookStub : public PlatformCodeStub { public: - explicit ProfileEntryHookStub() {} + explicit ProfileEntryHookStub(Isolate* isolate) : PlatformCodeStub(isolate) {} // The profile entry hook function is not allowed to cause a GC. virtual bool SometimesSetsUpAFrame() { return false; } @@ -2514,8 +2516,8 @@ intptr_t stack_pointer, Isolate* isolate); - Major MajorKey() { return ProfileEntryHook; } - int MinorKey() { return 0; } + Major MajorKey() const { return ProfileEntryHook; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); diff -Nru nodejs-0.11.13/deps/v8/src/code-stubs-hydrogen.cc nodejs-0.11.15/deps/v8/src/code-stubs-hydrogen.cc --- nodejs-0.11.13/deps/v8/src/code-stubs-hydrogen.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/code-stubs-hydrogen.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "code-stubs.h" -#include "hydrogen.h" -#include "lithium.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/code-stubs.h" +#include "src/field-index.h" +#include "src/hydrogen.h" +#include "src/lithium.h" namespace v8 { namespace internal { @@ -40,7 +18,7 @@ DisallowHandleAllocation no_handles; DisallowHandleDereference no_deref; - ASSERT(graph != NULL); + DCHECK(graph != NULL); BailoutReason bailout_reason = kNoReason; if (!graph->Optimize(&bailout_reason)) { FATAL(GetBailoutReason(bailout_reason)); @@ -60,20 +38,21 @@ arguments_length_(NULL), info_(stub, isolate), context_(NULL) { - descriptor_ = stub->GetInterfaceDescriptor(isolate); - parameters_.Reset(new HParameter*[descriptor_->register_param_count_]); + descriptor_ = stub->GetInterfaceDescriptor(); + int parameter_count = descriptor_->GetEnvironmentParameterCount(); + parameters_.Reset(new HParameter*[parameter_count]); } virtual bool BuildGraph(); protected: virtual HValue* BuildCodeStub() = 0; HParameter* GetParameter(int parameter) { - ASSERT(parameter < descriptor_->register_param_count_); + DCHECK(parameter < descriptor_->GetEnvironmentParameterCount()); return parameters_[parameter]; } HValue* GetArgumentsLength() { // This is initialized in BuildGraph() - ASSERT(arguments_length_ != NULL); + DCHECK(arguments_length_ != NULL); return arguments_length_; } CompilationInfo* info() { return &info_; } @@ -82,9 +61,9 @@ Isolate* isolate() { return info_.isolate(); } HLoadNamedField* BuildLoadNamedField(HValue* object, - Representation representation, - int offset, - bool is_inobject); + FieldIndex index); + void BuildStoreNamedField(HValue* object, HValue* value, FieldIndex index, + Representation representation); enum ArgumentClass { NONE, @@ -140,30 +119,29 @@ isolate()->GetHTracer()->TraceCompilation(&info_); } - int param_count = descriptor_->register_param_count_; + int param_count = descriptor_->GetEnvironmentParameterCount(); HEnvironment* start_environment = graph()->start_environment(); HBasicBlock* next_block = CreateBasicBlock(start_environment); Goto(next_block); next_block->SetJoinId(BailoutId::StubEntry()); set_current_block(next_block); - bool runtime_stack_params = descriptor_->stack_parameter_count_.is_valid(); + bool runtime_stack_params = descriptor_->stack_parameter_count().is_valid(); HInstruction* stack_parameter_count = NULL; for (int i = 0; i < param_count; ++i) { - Representation r = descriptor_->IsParameterCountRegister(i) - ? Representation::Integer32() - : Representation::Tagged(); - HParameter* param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r); + Representation r = descriptor_->GetEnvironmentParameterRepresentation(i); + HParameter* param = Add<HParameter>(i, + HParameter::REGISTER_PARAMETER, r); start_environment->Bind(i, param); parameters_[i] = param; - if (descriptor_->IsParameterCountRegister(i)) { + if (descriptor_->IsEnvironmentParameterCountRegister(i)) { param->set_type(HType::Smi()); stack_parameter_count = param; arguments_length_ = stack_parameter_count; } } - ASSERT(!runtime_stack_params || arguments_length_ != NULL); + DCHECK(!runtime_stack_params || arguments_length_ != NULL); if (!runtime_stack_params) { stack_parameter_count = graph()->GetConstantMinus1(); arguments_length_ = graph()->GetConstant0(); @@ -181,16 +159,16 @@ // We might have extra expressions to pop from the stack in addition to the // arguments above. HInstruction* stack_pop_count = stack_parameter_count; - if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) { + if (descriptor_->function_mode() == JS_FUNCTION_STUB_MODE) { if (!stack_parameter_count->IsConstant() && - descriptor_->hint_stack_parameter_count_ < 0) { + descriptor_->hint_stack_parameter_count() < 0) { HInstruction* constant_one = graph()->GetConstant1(); stack_pop_count = AddUncasted<HAdd>(stack_parameter_count, constant_one); stack_pop_count->ClearFlag(HValue::kCanOverflow); // TODO(mvstanton): verify that stack_parameter_count+1 really fits in a // smi. } else { - int count = descriptor_->hint_stack_parameter_count_; + int count = descriptor_->hint_stack_parameter_count(); stack_pop_count = Add<HConstant>(count); } } @@ -238,15 +216,15 @@ }; -Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) { - Factory* factory = isolate->factory(); +Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode() { + Factory* factory = isolate()->factory(); // Generate the new code. - MacroAssembler masm(isolate, NULL, 256); + MacroAssembler masm(isolate(), NULL, 256); { // Update the static counter each time a new code stub is generated. - isolate->counters()->code_stubs()->Increment(); + isolate()->counters()->code_stubs()->Increment(); // Generate the code for the stub. masm.set_generating_stub(true); @@ -271,33 +249,35 @@ template <class Stub> -static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) { - CodeStub::Major major_key = - static_cast<HydrogenCodeStub*>(stub)->MajorKey(); +static Handle<Code> DoGenerateCode(Stub* stub) { + Isolate* isolate = stub->isolate(); + CodeStub::Major major_key = static_cast<CodeStub*>(stub)->MajorKey(); CodeStubInterfaceDescriptor* descriptor = isolate->code_stub_interface_descriptor(major_key); - if (descriptor->register_param_count_ < 0) { - stub->InitializeInterfaceDescriptor(isolate, descriptor); + if (!descriptor->IsInitialized()) { + stub->InitializeInterfaceDescriptor(descriptor); } // If we are uninitialized we can use a light-weight stub to enter // the runtime that is significantly faster than using the standard // stub-failure deopt mechanism. if (stub->IsUninitialized() && descriptor->has_miss_handler()) { - ASSERT(!descriptor->stack_parameter_count_.is_valid()); - return stub->GenerateLightweightMissCode(isolate); + DCHECK(!descriptor->stack_parameter_count().is_valid()); + return stub->GenerateLightweightMissCode(); } - ElapsedTimer timer; + base::ElapsedTimer timer; if (FLAG_profile_hydrogen_code_stub_compilation) { timer.Start(); } CodeStubGraphBuilder<Stub> builder(isolate, stub); LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + // TODO(yangguo) remove this once the code serializer handles code stubs. + if (FLAG_serialize_toplevel) chunk->info()->PrepareForSerializing(); Handle<Code> code = chunk->Codegen(); if (FLAG_profile_hydrogen_code_stub_compilation) { - double ms = timer.Elapsed().InMillisecondsF(); - PrintF("[Lazy compilation of %s took %0.3f ms]\n", - stub->GetName().get(), ms); + OFStream os(stdout); + os << "[Lazy compilation of " << stub << " took " + << timer.Elapsed().InMillisecondsF() << " ms]" << endl; } return code; } @@ -320,7 +300,7 @@ // Convert the parameter to number using the builtin. HValue* function = AddLoadJSBuiltin(Builtins::TO_NUMBER); - Add<HPushArgument>(value); + Add<HPushArguments>(value); Push(Add<HInvokeFunction>(function, 1)); if_number.End(); @@ -329,8 +309,8 @@ } -Handle<Code> ToNumberStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> ToNumberStub::GenerateCode() { + return DoGenerateCode(this); } @@ -342,8 +322,8 @@ } -Handle<Code> NumberToStringStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> NumberToStringStub::GenerateCode() { + return DoGenerateCode(this); } @@ -352,8 +332,10 @@ Factory* factory = isolate()->factory(); HValue* undefined = graph()->GetConstantUndefined(); AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode(); - FastCloneShallowArrayStub::Mode mode = casted_stub()->mode(); - int length = casted_stub()->length(); + + // This stub is very performance sensitive, the generated code must be tuned + // so that it doesn't build and eager frame. + info()->MarkMustNotHaveEagerFrame(); HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0), GetParameter(1), @@ -368,46 +350,40 @@ AllocationSite::kTransitionInfoOffset); HInstruction* boilerplate = Add<HLoadNamedField>( allocation_site, static_cast<HValue*>(NULL), access); - HValue* push_value; - if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) { - HValue* elements = AddLoadElements(boilerplate); - - IfBuilder if_fixed_cow(this); - if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map()); - if_fixed_cow.Then(); - push_value = BuildCloneShallowArray(boilerplate, - allocation_site, - alloc_site_mode, - FAST_ELEMENTS, - 0/*copy-on-write*/); - environment()->Push(push_value); - if_fixed_cow.Else(); - - IfBuilder if_fixed(this); - if_fixed.If<HCompareMap>(elements, factory->fixed_array_map()); - if_fixed.Then(); - push_value = BuildCloneShallowArray(boilerplate, - allocation_site, - alloc_site_mode, - FAST_ELEMENTS, - length); - environment()->Push(push_value); - if_fixed.Else(); - push_value = BuildCloneShallowArray(boilerplate, - allocation_site, - alloc_site_mode, - FAST_DOUBLE_ELEMENTS, - length); - environment()->Push(push_value); - } else { - ElementsKind elements_kind = casted_stub()->ComputeElementsKind(); - push_value = BuildCloneShallowArray(boilerplate, - allocation_site, - alloc_site_mode, - elements_kind, - length); - environment()->Push(push_value); - } + HValue* elements = AddLoadElements(boilerplate); + HValue* capacity = AddLoadFixedArrayLength(elements); + IfBuilder zero_capacity(this); + zero_capacity.If<HCompareNumericAndBranch>(capacity, graph()->GetConstant0(), + Token::EQ); + zero_capacity.Then(); + Push(BuildCloneShallowArrayEmpty(boilerplate, + allocation_site, + alloc_site_mode)); + zero_capacity.Else(); + IfBuilder if_fixed_cow(this); + if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map()); + if_fixed_cow.Then(); + Push(BuildCloneShallowArrayCow(boilerplate, + allocation_site, + alloc_site_mode, + FAST_ELEMENTS)); + if_fixed_cow.Else(); + IfBuilder if_fixed(this); + if_fixed.If<HCompareMap>(elements, factory->fixed_array_map()); + if_fixed.Then(); + Push(BuildCloneShallowArrayNonEmpty(boilerplate, + allocation_site, + alloc_site_mode, + FAST_ELEMENTS)); + + if_fixed.Else(); + Push(BuildCloneShallowArrayNonEmpty(boilerplate, + allocation_site, + alloc_site_mode, + FAST_DOUBLE_ELEMENTS)); + if_fixed.End(); + if_fixed_cow.End(); + zero_capacity.End(); checker.ElseDeopt("Uninitialized boilerplate literals"); checker.End(); @@ -416,8 +392,8 @@ } -Handle<Code> FastCloneShallowArrayStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> FastCloneShallowArrayStub::GenerateCode() { + return DoGenerateCode(this); } @@ -469,7 +445,7 @@ boilerplate, static_cast<HValue*>(NULL), access)); } - ASSERT(FLAG_allocation_site_pretenuring || (size == object_size)); + DCHECK(FLAG_allocation_site_pretenuring || (size == object_size)); if (FLAG_allocation_site_pretenuring) { BuildCreateAllocationMemento( object, Add<HConstant>(object_size), allocation_site); @@ -483,8 +459,8 @@ } -Handle<Code> FastCloneShallowObjectStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> FastCloneShallowObjectStub::GenerateCode() { + return DoGenerateCode(this); } @@ -526,7 +502,7 @@ // Store an empty fixed array for the code dependency. HConstant* empty_fixed_array = Add<HConstant>(isolate()->factory()->empty_fixed_array()); - HStoreNamedField* store = Add<HStoreNamedField>( + Add<HStoreNamedField>( object, HObjectAccess::ForAllocationSiteOffset( AllocationSite::kDependentCodeOffset), @@ -538,10 +514,15 @@ HValue* site = Add<HLoadNamedField>( site_list, static_cast<HValue*>(NULL), HObjectAccess::ForAllocationSiteList()); - store = Add<HStoreNamedField>(object, + // TODO(mvstanton): This is a store to a weak pointer, which we may want to + // mark as such in order to skip the write barrier, once we have a unified + // system for weakness. For now we decided to keep it like this because having + // an initial write barrier backed store makes this pointer strong until the + // next GC, and allocation sites are designed to survive several GCs anyway. + Add<HStoreNamedField>( + object, HObjectAccess::ForAllocationSiteOffset(AllocationSite::kWeakNextOffset), site); - store->SkipWriteBarrier(); Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(), object); @@ -553,35 +534,41 @@ } -Handle<Code> CreateAllocationSiteStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> CreateAllocationSiteStub::GenerateCode() { + return DoGenerateCode(this); } template <> -HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() { +HValue* CodeStubGraphBuilder<LoadFastElementStub>::BuildCodeStub() { HInstruction* load = BuildUncheckedMonomorphicElementAccess( - GetParameter(0), GetParameter(1), NULL, - casted_stub()->is_js_array(), casted_stub()->elements_kind(), - LOAD, NEVER_RETURN_HOLE, STANDARD_STORE); + GetParameter(KeyedLoadIC::kReceiverIndex), + GetParameter(KeyedLoadIC::kNameIndex), + NULL, + casted_stub()->is_js_array(), + casted_stub()->elements_kind(), + LOAD, + NEVER_RETURN_HOLE, + STANDARD_STORE); return load; } -Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> LoadFastElementStub::GenerateCode() { + return DoGenerateCode(this); } HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField( - HValue* object, - Representation representation, - int offset, - bool is_inobject) { - HObjectAccess access = is_inobject + HValue* object, FieldIndex index) { + Representation representation = index.is_double() + ? Representation::Double() + : Representation::Tagged(); + int offset = index.offset(); + HObjectAccess access = index.is_inobject() ? HObjectAccess::ForObservableJSObjectOffset(offset, representation) : HObjectAccess::ForBackingStoreOffset(offset, representation); - if (representation.IsDouble()) { + if (index.is_double()) { // Load the heap number. object = Add<HLoadNamedField>( object, static_cast<HValue*>(NULL), @@ -595,36 +582,88 @@ template<> HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() { - return BuildLoadNamedField(GetParameter(0), - casted_stub()->representation(), - casted_stub()->offset(), - casted_stub()->is_inobject()); + return BuildLoadNamedField(GetParameter(0), casted_stub()->index()); } -Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> LoadFieldStub::GenerateCode() { + return DoGenerateCode(this); } -template<> +template <> +HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() { + HValue* map = AddLoadMap(GetParameter(0), NULL); + HObjectAccess descriptors_access = HObjectAccess::ForObservableJSObjectOffset( + Map::kDescriptorsOffset, Representation::Tagged()); + HValue* descriptors = + Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), descriptors_access); + HObjectAccess value_access = HObjectAccess::ForObservableJSObjectOffset( + DescriptorArray::GetValueOffset(casted_stub()->descriptor())); + return Add<HLoadNamedField>(descriptors, static_cast<HValue*>(NULL), + value_access); +} + + +Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); } + + +void CodeStubGraphBuilderBase::BuildStoreNamedField( + HValue* object, HValue* value, FieldIndex index, + Representation representation) { + DCHECK(!index.is_double() || representation.IsDouble()); + int offset = index.offset(); + HObjectAccess access = + index.is_inobject() + ? HObjectAccess::ForObservableJSObjectOffset(offset, representation) + : HObjectAccess::ForBackingStoreOffset(offset, representation); + + if (representation.IsDouble()) { + // Load the heap number. + object = Add<HLoadNamedField>( + object, static_cast<HValue*>(NULL), + access.WithRepresentation(Representation::Tagged())); + // Store the double value into it. + access = HObjectAccess::ForHeapNumberValue(); + } else if (representation.IsHeapObject()) { + BuildCheckHeapObject(value); + } + + Add<HStoreNamedField>(object, access, value, INITIALIZING_STORE); +} + + +template <> +HValue* CodeStubGraphBuilder<StoreFieldStub>::BuildCodeStub() { + BuildStoreNamedField(GetParameter(0), GetParameter(2), casted_stub()->index(), + casted_stub()->representation()); + return GetParameter(2); +} + + +Handle<Code> StoreFieldStub::GenerateCode() { return DoGenerateCode(this); } + + +template <> HValue* CodeStubGraphBuilder<StringLengthStub>::BuildCodeStub() { - HValue* string = BuildLoadNamedField( - GetParameter(0), Representation::Tagged(), JSValue::kValueOffset, true); - return BuildLoadNamedField( - string, Representation::Tagged(), String::kLengthOffset, true); + HValue* string = BuildLoadNamedField(GetParameter(0), + FieldIndex::ForInObjectOffset(JSValue::kValueOffset)); + return BuildLoadNamedField(string, + FieldIndex::ForInObjectOffset(String::kLengthOffset)); } -Handle<Code> StringLengthStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> StringLengthStub::GenerateCode() { + return DoGenerateCode(this); } template <> -HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() { +HValue* CodeStubGraphBuilder<StoreFastElementStub>::BuildCodeStub() { BuildUncheckedMonomorphicElementAccess( - GetParameter(0), GetParameter(1), GetParameter(2), + GetParameter(StoreIC::kReceiverIndex), + GetParameter(StoreIC::kNameIndex), + GetParameter(StoreIC::kValueIndex), casted_stub()->is_js_array(), casted_stub()->elements_kind(), STORE, NEVER_RETURN_HOLE, casted_stub()->store_mode()); @@ -632,8 +671,8 @@ } -Handle<Code> KeyedStoreFastElementStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> StoreFastElementStub::GenerateCode() { + return DoGenerateCode(this); } @@ -651,8 +690,8 @@ } -Handle<Code> TransitionElementsKindStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> TransitionElementsKindStub::GenerateCode() { + return DoGenerateCode(this); } HValue* CodeStubGraphBuilderBase::BuildArrayConstructor( @@ -666,6 +705,9 @@ HValue* result = NULL; switch (argument_class) { case NONE: + // This stub is very performance sensitive, the generated code must be + // tuned so that it doesn't build and eager frame. + info()->MarkMustNotHaveEagerFrame(); result = array_builder.AllocateEmptyArray(); break; case SINGLE: @@ -689,6 +731,9 @@ HValue* result = NULL; switch (argument_class) { case NONE: + // This stub is very performance sensitive, the generated code must be + // tuned so that it doesn't build and eager frame. + info()->MarkMustNotHaveEagerFrame(); result = array_builder.AllocateEmptyArray(); break; case SINGLE: @@ -739,10 +784,11 @@ ? JSArrayBuilder::FILL_WITH_HOLE : JSArrayBuilder::DONT_FILL_WITH_HOLE; HValue* new_object = array_builder->AllocateArray(checked_length, + max_alloc_length, checked_length, fill_mode); HValue* elements = array_builder->GetElementsLocation(); - ASSERT(elements != NULL); + DCHECK(elements != NULL); // Now populate the elements correctly. LoopBuilder builder(this, @@ -768,8 +814,8 @@ } -Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() { + return DoGenerateCode(this); } @@ -782,9 +828,8 @@ } -Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode( - Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() { + return DoGenerateCode(this); } @@ -796,8 +841,8 @@ } -Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() { + return DoGenerateCode(this); } @@ -809,9 +854,8 @@ } -Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode( - Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode() { + return DoGenerateCode(this); } @@ -823,9 +867,8 @@ } -Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode( - Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode() { + return DoGenerateCode(this); } @@ -837,9 +880,8 @@ } -Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode( - Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() { + return DoGenerateCode(this); } @@ -864,8 +906,8 @@ } -Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> CompareNilICStub::GenerateCode() { + return DoGenerateCode(this); } @@ -880,7 +922,7 @@ Type* right_type = state.GetRightType(zone()); Type* result_type = state.GetResultType(zone()); - ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) && + DCHECK(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) && (state.HasSideEffects() || !result_type->Is(Type::None()))); HValue* result = NULL; @@ -941,20 +983,6 @@ // If we encounter a generic argument, the number conversion is // observable, thus we cannot afford to bail out after the fact. if (!state.HasSideEffects()) { - if (result_type->Is(Type::SignedSmall())) { - if (state.op() == Token::SHR) { - // TODO(olivf) Replace this by a SmiTagU Instruction. - // 0x40000000: this number would convert to negative when interpreting - // the register as signed value; - IfBuilder if_of(this); - if_of.IfNot<HCompareNumericAndBranch>(result, - Add<HConstant>(static_cast<int>(SmiValuesAre32Bits() - ? 0x80000000 : 0x40000000)), Token::EQ_STRICT); - if_of.Then(); - if_of.ElseDeopt("UInt->Smi oveflow"); - if_of.End(); - } - } result = EnforceNumberType(result, result_type); } @@ -963,7 +991,7 @@ if (state.CanReuseDoubleBox()) { HValue* operand = (state.mode() == OVERWRITE_LEFT) ? left : right; IfBuilder if_heap_number(this); - if_heap_number.IfNot<HIsSmiAndBranch>(operand); + if_heap_number.If<HHasInstanceTypeAndBranch>(operand, HEAP_NUMBER_TYPE); if_heap_number.Then(); Add<HStoreNamedField>(operand, HObjectAccess::ForHeapNumberValue(), result); Push(operand); @@ -977,8 +1005,8 @@ } -Handle<Code> BinaryOpICStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> BinaryOpICStub::GenerateCode() { + return DoGenerateCode(this); } @@ -1002,8 +1030,8 @@ } -Handle<Code> BinaryOpWithAllocationSiteStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> BinaryOpWithAllocationSiteStub::GenerateCode() { + return DoGenerateCode(this); } @@ -1028,27 +1056,44 @@ } -Handle<Code> StringAddStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> StringAddStub::GenerateCode() { + return DoGenerateCode(this); } template <> HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() { ToBooleanStub* stub = casted_stub(); + HValue* true_value = NULL; + HValue* false_value = NULL; + + switch (stub->GetMode()) { + case ToBooleanStub::RESULT_AS_SMI: + true_value = graph()->GetConstant1(); + false_value = graph()->GetConstant0(); + break; + case ToBooleanStub::RESULT_AS_ODDBALL: + true_value = graph()->GetConstantTrue(); + false_value = graph()->GetConstantFalse(); + break; + case ToBooleanStub::RESULT_AS_INVERSE_ODDBALL: + true_value = graph()->GetConstantFalse(); + false_value = graph()->GetConstantTrue(); + break; + } IfBuilder if_true(this); if_true.If<HBranch>(GetParameter(0), stub->GetTypes()); if_true.Then(); - if_true.Return(graph()->GetConstant1()); + if_true.Return(true_value); if_true.Else(); if_true.End(); - return graph()->GetConstant0(); + return false_value; } -Handle<Code> ToBooleanStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> ToBooleanStub::GenerateCode() { + return DoGenerateCode(this); } @@ -1060,7 +1105,7 @@ Handle<PropertyCell> placeholder_cell = isolate()->factory()->NewPropertyCell(placeholer_value); - HParameter* value = GetParameter(2); + HParameter* value = GetParameter(StoreIC::kValueIndex); if (stub->check_global()) { // Check that the map of the global has not changed: use a placeholder map @@ -1068,7 +1113,7 @@ Handle<Map> placeholder_map = isolate()->factory()->meta_map(); HValue* global = Add<HConstant>( StoreGlobalStub::global_placeholder(isolate())); - Add<HCheckMaps>(global, placeholder_map, top_info()); + Add<HCheckMaps>(global, placeholder_map); } HValue* cell = Add<HConstant>(placeholder_cell); @@ -1100,17 +1145,17 @@ } -Handle<Code> StoreGlobalStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> StoreGlobalStub::GenerateCode() { + return DoGenerateCode(this); } template<> HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() { - HValue* value = GetParameter(0); - HValue* map = GetParameter(1); - HValue* key = GetParameter(2); - HValue* object = GetParameter(3); + HValue* value = GetParameter(ElementsTransitionAndStoreStub::kValueIndex); + HValue* map = GetParameter(ElementsTransitionAndStoreStub::kMapIndex); + HValue* key = GetParameter(ElementsTransitionAndStoreStub::kKeyIndex); + HValue* object = GetParameter(ElementsTransitionAndStoreStub::kObjectIndex); if (FLAG_trace_elements_transitions) { // Tracing elements transitions is the job of the runtime. @@ -1134,8 +1179,8 @@ } -Handle<Code> ElementsTransitionAndStoreStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() { + return DoGenerateCode(this); } @@ -1204,7 +1249,7 @@ int field_offset) { // By making sure to express these loads in the form [<hvalue> + constant] // the keyed load can be hoisted. - ASSERT(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength); + DCHECK(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength); HValue* field_slot = iterator; if (field_offset > 0) { HValue* field_offset_value = Add<HConstant>(field_offset); @@ -1344,8 +1389,8 @@ } -Handle<Code> FastNewClosureStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> FastNewClosureStub::GenerateCode() { + return DoGenerateCode(this); } @@ -1359,7 +1404,7 @@ // Allocate the context in new space. HAllocate* function_context = Add<HAllocate>( Add<HConstant>(length * kPointerSize + FixedArray::kHeaderSize), - HType::Tagged(), NOT_TENURED, FIXED_ARRAY_TYPE); + HType::HeapObject(), NOT_TENURED, FIXED_ARRAY_TYPE); // Set up the object header. AddStoreMapConstant(function_context, @@ -1399,24 +1444,28 @@ } -Handle<Code> FastNewContextStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> FastNewContextStub::GenerateCode() { + return DoGenerateCode(this); } -template<> -HValue* CodeStubGraphBuilder<KeyedLoadDictionaryElementStub>::BuildCodeStub() { - HValue* receiver = GetParameter(0); - HValue* key = GetParameter(1); +template <> +HValue* CodeStubGraphBuilder<LoadDictionaryElementStub>::BuildCodeStub() { + HValue* receiver = GetParameter(KeyedLoadIC::kReceiverIndex); + HValue* key = GetParameter(KeyedLoadIC::kNameIndex); Add<HCheckSmi>(key); - return BuildUncheckedDictionaryElementLoad(receiver, key); + HValue* elements = AddLoadElements(receiver); + + HValue* hash = BuildElementIndexHash(key); + + return BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash); } -Handle<Code> KeyedLoadDictionaryElementStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> LoadDictionaryElementStub::GenerateCode() { + return DoGenerateCode(this); } @@ -1427,12 +1476,321 @@ HValue* index = GetParameter(RegExpConstructResultStub::kIndex); HValue* input = GetParameter(RegExpConstructResultStub::kInput); + info()->MarkMustNotHaveEagerFrame(); + return BuildRegExpConstructResult(length, index, input); } -Handle<Code> RegExpConstructResultStub::GenerateCode(Isolate* isolate) { - return DoGenerateCode(isolate, this); +Handle<Code> RegExpConstructResultStub::GenerateCode() { + return DoGenerateCode(this); +} + + +template <> +class CodeStubGraphBuilder<KeyedLoadGenericStub> + : public CodeStubGraphBuilderBase { + public: + CodeStubGraphBuilder(Isolate* isolate, KeyedLoadGenericStub* stub) + : CodeStubGraphBuilderBase(isolate, stub) {} + + protected: + virtual HValue* BuildCodeStub(); + + void BuildElementsKindLimitCheck(HGraphBuilder::IfBuilder* if_builder, + HValue* bit_field2, + ElementsKind kind); + + void BuildFastElementLoad(HGraphBuilder::IfBuilder* if_builder, + HValue* receiver, + HValue* key, + HValue* instance_type, + HValue* bit_field2, + ElementsKind kind); + + void BuildExternalElementLoad(HGraphBuilder::IfBuilder* if_builder, + HValue* receiver, + HValue* key, + HValue* instance_type, + HValue* bit_field2, + ElementsKind kind); + + KeyedLoadGenericStub* casted_stub() { + return static_cast<KeyedLoadGenericStub*>(stub()); + } +}; + + +void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildElementsKindLimitCheck( + HGraphBuilder::IfBuilder* if_builder, HValue* bit_field2, + ElementsKind kind) { + ElementsKind next_kind = static_cast<ElementsKind>(kind + 1); + HValue* kind_limit = Add<HConstant>( + static_cast<int>(Map::ElementsKindBits::encode(next_kind))); + + if_builder->If<HCompareNumericAndBranch>(bit_field2, kind_limit, Token::LT); + if_builder->Then(); +} + + +void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildFastElementLoad( + HGraphBuilder::IfBuilder* if_builder, HValue* receiver, HValue* key, + HValue* instance_type, HValue* bit_field2, ElementsKind kind) { + DCHECK(!IsExternalArrayElementsKind(kind)); + + BuildElementsKindLimitCheck(if_builder, bit_field2, kind); + + IfBuilder js_array_check(this); + js_array_check.If<HCompareNumericAndBranch>( + instance_type, Add<HConstant>(JS_ARRAY_TYPE), Token::EQ); + js_array_check.Then(); + Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL, + true, kind, + LOAD, NEVER_RETURN_HOLE, + STANDARD_STORE)); + js_array_check.Else(); + Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL, + false, kind, + LOAD, NEVER_RETURN_HOLE, + STANDARD_STORE)); + js_array_check.End(); +} + + +void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildExternalElementLoad( + HGraphBuilder::IfBuilder* if_builder, HValue* receiver, HValue* key, + HValue* instance_type, HValue* bit_field2, ElementsKind kind) { + DCHECK(IsExternalArrayElementsKind(kind)); + + BuildElementsKindLimitCheck(if_builder, bit_field2, kind); + + Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL, + false, kind, + LOAD, NEVER_RETURN_HOLE, + STANDARD_STORE)); +} + + +HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() { + HValue* receiver = GetParameter(KeyedLoadIC::kReceiverIndex); + HValue* key = GetParameter(KeyedLoadIC::kNameIndex); + + // Split into a smi/integer case and unique string case. + HIfContinuation index_name_split_continuation(graph()->CreateBasicBlock(), + graph()->CreateBasicBlock()); + + BuildKeyedIndexCheck(key, &index_name_split_continuation); + + IfBuilder index_name_split(this, &index_name_split_continuation); + index_name_split.Then(); + { + // Key is an index (number) + key = Pop(); + + int bit_field_mask = (1 << Map::kIsAccessCheckNeeded) | + (1 << Map::kHasIndexedInterceptor); + BuildJSObjectCheck(receiver, bit_field_mask); + + HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL), + HObjectAccess::ForMap()); + + HValue* instance_type = + Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), + HObjectAccess::ForMapInstanceType()); + + HValue* bit_field2 = Add<HLoadNamedField>(map, + static_cast<HValue*>(NULL), + HObjectAccess::ForMapBitField2()); + + IfBuilder kind_if(this); + BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + FAST_HOLEY_ELEMENTS); + + kind_if.Else(); + { + BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + FAST_HOLEY_DOUBLE_ELEMENTS); + } + kind_if.Else(); + + // The DICTIONARY_ELEMENTS check generates a "kind_if.Then" + BuildElementsKindLimitCheck(&kind_if, bit_field2, DICTIONARY_ELEMENTS); + { + HValue* elements = AddLoadElements(receiver); + + HValue* hash = BuildElementIndexHash(key); + + Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash)); + } + kind_if.Else(); + + // The SLOPPY_ARGUMENTS_ELEMENTS check generates a "kind_if.Then" + BuildElementsKindLimitCheck(&kind_if, bit_field2, + SLOPPY_ARGUMENTS_ELEMENTS); + // Non-strict elements are not handled. + Add<HDeoptimize>("non-strict elements in KeyedLoadGenericStub", + Deoptimizer::EAGER); + Push(graph()->GetConstant0()); + + kind_if.Else(); + BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + EXTERNAL_INT8_ELEMENTS); + + kind_if.Else(); + BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + EXTERNAL_UINT8_ELEMENTS); + + kind_if.Else(); + BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + EXTERNAL_INT16_ELEMENTS); + + kind_if.Else(); + BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + EXTERNAL_UINT16_ELEMENTS); + + kind_if.Else(); + BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + EXTERNAL_INT32_ELEMENTS); + + kind_if.Else(); + BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + EXTERNAL_UINT32_ELEMENTS); + + kind_if.Else(); + BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + EXTERNAL_FLOAT32_ELEMENTS); + + kind_if.Else(); + BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + EXTERNAL_FLOAT64_ELEMENTS); + + kind_if.Else(); + BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2, + EXTERNAL_UINT8_CLAMPED_ELEMENTS); + + kind_if.ElseDeopt("ElementsKind unhandled in KeyedLoadGenericStub"); + + kind_if.End(); + } + index_name_split.Else(); + { + // Key is a unique string. + key = Pop(); + + int bit_field_mask = (1 << Map::kIsAccessCheckNeeded) | + (1 << Map::kHasNamedInterceptor); + BuildJSObjectCheck(receiver, bit_field_mask); + + HIfContinuation continuation; + BuildTestForDictionaryProperties(receiver, &continuation); + IfBuilder if_dict_properties(this, &continuation); + if_dict_properties.Then(); + { + // Key is string, properties are dictionary mode + BuildNonGlobalObjectCheck(receiver); + + HValue* properties = Add<HLoadNamedField>( + receiver, static_cast<HValue*>(NULL), + HObjectAccess::ForPropertiesPointer()); + + HValue* hash = + Add<HLoadNamedField>(key, static_cast<HValue*>(NULL), + HObjectAccess::ForNameHashField()); + + hash = AddUncasted<HShr>(hash, Add<HConstant>(Name::kHashShift)); + + HValue* value = BuildUncheckedDictionaryElementLoad(receiver, + properties, + key, + hash); + Push(value); + } + if_dict_properties.Else(); + { + // Key is string, properties are fast mode + HValue* hash = BuildKeyedLookupCacheHash(receiver, key); + + ExternalReference cache_keys_ref = + ExternalReference::keyed_lookup_cache_keys(isolate()); + HValue* cache_keys = Add<HConstant>(cache_keys_ref); + + HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL), + HObjectAccess::ForMap()); + HValue* base_index = AddUncasted<HMul>(hash, Add<HConstant>(2)); + base_index->ClearFlag(HValue::kCanOverflow); + + HIfContinuation inline_or_runtime_continuation( + graph()->CreateBasicBlock(), graph()->CreateBasicBlock()); + { + IfBuilder lookup_ifs[KeyedLookupCache::kEntriesPerBucket]; + for (int probe = 0; probe < KeyedLookupCache::kEntriesPerBucket; + ++probe) { + IfBuilder* lookup_if = &lookup_ifs[probe]; + lookup_if->Initialize(this); + int probe_base = probe * KeyedLookupCache::kEntryLength; + HValue* map_index = AddUncasted<HAdd>( + base_index, + Add<HConstant>(probe_base + KeyedLookupCache::kMapIndex)); + map_index->ClearFlag(HValue::kCanOverflow); + HValue* key_index = AddUncasted<HAdd>( + base_index, + Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex)); + key_index->ClearFlag(HValue::kCanOverflow); + HValue* map_to_check = + Add<HLoadKeyed>(cache_keys, map_index, static_cast<HValue*>(NULL), + FAST_ELEMENTS, NEVER_RETURN_HOLE, 0); + lookup_if->If<HCompareObjectEqAndBranch>(map_to_check, map); + lookup_if->And(); + HValue* key_to_check = + Add<HLoadKeyed>(cache_keys, key_index, static_cast<HValue*>(NULL), + FAST_ELEMENTS, NEVER_RETURN_HOLE, 0); + lookup_if->If<HCompareObjectEqAndBranch>(key_to_check, key); + lookup_if->Then(); + { + ExternalReference cache_field_offsets_ref = + ExternalReference::keyed_lookup_cache_field_offsets(isolate()); + HValue* cache_field_offsets = + Add<HConstant>(cache_field_offsets_ref); + HValue* index = AddUncasted<HAdd>(hash, Add<HConstant>(probe)); + index->ClearFlag(HValue::kCanOverflow); + HValue* property_index = Add<HLoadKeyed>( + cache_field_offsets, index, static_cast<HValue*>(NULL), + EXTERNAL_INT32_ELEMENTS, NEVER_RETURN_HOLE, 0); + Push(property_index); + } + lookup_if->Else(); + } + for (int i = 0; i < KeyedLookupCache::kEntriesPerBucket; ++i) { + lookup_ifs[i].JoinContinuation(&inline_or_runtime_continuation); + } + } + + IfBuilder inline_or_runtime(this, &inline_or_runtime_continuation); + inline_or_runtime.Then(); + { + // Found a cached index, load property inline. + Push(Add<HLoadFieldByIndex>(receiver, Pop())); + } + inline_or_runtime.Else(); + { + // KeyedLookupCache miss; call runtime. + Add<HPushArguments>(receiver, key); + Push(Add<HCallRuntime>( + isolate()->factory()->empty_string(), + Runtime::FunctionForId(Runtime::kKeyedGetProperty), 2)); + } + inline_or_runtime.End(); + } + if_dict_properties.End(); + } + index_name_split.End(); + + return Pop(); +} + + +Handle<Code> KeyedLoadGenericStub::GenerateCode() { + return DoGenerateCode(this); } diff -Nru nodejs-0.11.13/deps/v8/src/collection-iterator.js nodejs-0.11.15/deps/v8/src/collection-iterator.js --- nodejs-0.11.13/deps/v8/src/collection-iterator.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/collection-iterator.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,194 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +'use strict'; + + +// This file relies on the fact that the following declaration has been made +// in runtime.js: +// var $Set = global.Set; +// var $Map = global.Map; + + +function SetIteratorConstructor(set, kind) { + %SetIteratorInitialize(this, set, kind); +} + + +function SetIteratorNextJS() { + if (!IS_SET_ITERATOR(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Set Iterator.prototype.next', this]); + } + + var value_array = [UNDEFINED, UNDEFINED]; + var entry = {value: value_array, done: false}; + switch (%SetIteratorNext(this, value_array)) { + case 0: + entry.value = UNDEFINED; + entry.done = true; + break; + case ITERATOR_KIND_VALUES: + entry.value = value_array[0]; + break; + case ITERATOR_KIND_ENTRIES: + value_array[1] = value_array[0]; + break; + } + + return entry; +} + + +function SetIteratorSymbolIterator() { + return this; +} + + +function SetEntries() { + if (!IS_SET(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Set.prototype.entries', this]); + } + return new SetIterator(this, ITERATOR_KIND_ENTRIES); +} + + +function SetValues() { + if (!IS_SET(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Set.prototype.values', this]); + } + return new SetIterator(this, ITERATOR_KIND_VALUES); +} + + +function SetUpSetIterator() { + %CheckIsBootstrapping(); + + %SetCode(SetIterator, SetIteratorConstructor); + %FunctionSetPrototype(SetIterator, new $Object()); + %FunctionSetInstanceClassName(SetIterator, 'Set Iterator'); + InstallFunctions(SetIterator.prototype, DONT_ENUM, $Array( + 'next', SetIteratorNextJS + )); + + %FunctionSetName(SetIteratorSymbolIterator, '[Symbol.iterator]'); + %AddNamedProperty(SetIterator.prototype, symbolIterator, + SetIteratorSymbolIterator, DONT_ENUM); +} + +SetUpSetIterator(); + + +function ExtendSetPrototype() { + %CheckIsBootstrapping(); + + InstallFunctions($Set.prototype, DONT_ENUM, $Array( + 'entries', SetEntries, + 'keys', SetValues, + 'values', SetValues + )); + + %AddNamedProperty($Set.prototype, symbolIterator, SetValues, DONT_ENUM); +} + +ExtendSetPrototype(); + + + +function MapIteratorConstructor(map, kind) { + %MapIteratorInitialize(this, map, kind); +} + + +function MapIteratorSymbolIterator() { + return this; +} + + +function MapIteratorNextJS() { + if (!IS_MAP_ITERATOR(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Map Iterator.prototype.next', this]); + } + + var value_array = [UNDEFINED, UNDEFINED]; + var entry = {value: value_array, done: false}; + switch (%MapIteratorNext(this, value_array)) { + case 0: + entry.value = UNDEFINED; + entry.done = true; + break; + case ITERATOR_KIND_KEYS: + entry.value = value_array[0]; + break; + case ITERATOR_KIND_VALUES: + entry.value = value_array[1]; + break; + // ITERATOR_KIND_ENTRIES does not need any processing. + } + + return entry; +} + + +function MapEntries() { + if (!IS_MAP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Map.prototype.entries', this]); + } + return new MapIterator(this, ITERATOR_KIND_ENTRIES); +} + + +function MapKeys() { + if (!IS_MAP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Map.prototype.keys', this]); + } + return new MapIterator(this, ITERATOR_KIND_KEYS); +} + + +function MapValues() { + if (!IS_MAP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Map.prototype.values', this]); + } + return new MapIterator(this, ITERATOR_KIND_VALUES); +} + + +function SetUpMapIterator() { + %CheckIsBootstrapping(); + + %SetCode(MapIterator, MapIteratorConstructor); + %FunctionSetPrototype(MapIterator, new $Object()); + %FunctionSetInstanceClassName(MapIterator, 'Map Iterator'); + InstallFunctions(MapIterator.prototype, DONT_ENUM, $Array( + 'next', MapIteratorNextJS + )); + + %FunctionSetName(MapIteratorSymbolIterator, '[Symbol.iterator]'); + %AddNamedProperty(MapIterator.prototype, symbolIterator, + MapIteratorSymbolIterator, DONT_ENUM); +} + +SetUpMapIterator(); + + +function ExtendMapPrototype() { + %CheckIsBootstrapping(); + + InstallFunctions($Map.prototype, DONT_ENUM, $Array( + 'entries', MapEntries, + 'keys', MapKeys, + 'values', MapValues + )); + + %AddNamedProperty($Map.prototype, symbolIterator, MapEntries, DONT_ENUM); +} + +ExtendMapPrototype(); diff -Nru nodejs-0.11.13/deps/v8/src/collection.js nodejs-0.11.15/deps/v8/src/collection.js --- nodejs-0.11.13/deps/v8/src/collection.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/collection.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. "use strict"; @@ -34,72 +11,67 @@ var $Set = global.Set; var $Map = global.Map; -// Global sentinel to be used instead of undefined keys, which are not -// supported internally but required for Harmony sets and maps. -var undefined_sentinel = {}; +// ------------------------------------------------------------------- +// Harmony Set -// Map and Set uses SameValueZero which means that +0 and -0 should be treated -// as the same value. -function NormalizeKey(key) { - if (IS_UNDEFINED(key)) { - return undefined_sentinel; +function SetConstructor(iterable) { + if (!%_IsConstructCall()) { + throw MakeTypeError('constructor_not_function', ['Set']); } - if (key === 0) { - return 0; - } + var iter, adder; - return key; -} + if (!IS_NULL_OR_UNDEFINED(iterable)) { + iter = GetIterator(iterable); + adder = this.add; + if (!IS_SPEC_FUNCTION(adder)) { + throw MakeTypeError('property_not_function', ['add', this]); + } + } + %SetInitialize(this); -// ------------------------------------------------------------------- -// Harmony Set + if (IS_UNDEFINED(iter)) return; -function SetConstructor() { - if (%_IsConstructCall()) { - %SetInitialize(this); - } else { - throw MakeTypeError('constructor_not_function', ['Set']); + var next, done; + while (!(next = iter.next()).done) { + if (!IS_SPEC_OBJECT(next)) { + throw MakeTypeError('iterator_result_not_an_object', [next]); + } + %_CallFunction(this, next.value, adder); } } -function SetAdd(key) { +function SetAddJS(key) { if (!IS_SET(this)) { throw MakeTypeError('incompatible_method_receiver', ['Set.prototype.add', this]); } - return %SetAdd(this, NormalizeKey(key)); + return %SetAdd(this, key); } -function SetHas(key) { +function SetHasJS(key) { if (!IS_SET(this)) { throw MakeTypeError('incompatible_method_receiver', ['Set.prototype.has', this]); } - return %SetHas(this, NormalizeKey(key)); + return %SetHas(this, key); } -function SetDelete(key) { +function SetDeleteJS(key) { if (!IS_SET(this)) { throw MakeTypeError('incompatible_method_receiver', ['Set.prototype.delete', this]); } - key = NormalizeKey(key); - if (%SetHas(this, key)) { - %SetDelete(this, key); - return true; - } else { - return false; - } + return %SetDelete(this, key); } -function SetGetSize() { +function SetGetSizeJS() { if (!IS_SET(this)) { throw MakeTypeError('incompatible_method_receiver', ['Set.prototype.size', this]); @@ -108,13 +80,34 @@ } -function SetClear() { +function SetClearJS() { if (!IS_SET(this)) { throw MakeTypeError('incompatible_method_receiver', ['Set.prototype.clear', this]); } - // Replace the internal table with a new empty table. - %SetInitialize(this); + %SetClear(this); +} + + +function SetForEach(f, receiver) { + if (!IS_SET(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Set.prototype.forEach', this]); + } + + if (!IS_SPEC_FUNCTION(f)) { + throw MakeTypeError('called_non_callable', [f]); + } + + var iterator = new SetIterator(this, ITERATOR_KIND_VALUES); + var key; + var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f); + var value_array = [UNDEFINED]; + while (%SetIteratorNext(iterator, value_array)) { + if (stepping) %DebugPrepareStepInIfStepping(f); + key = value_array[0]; + %_CallFunction(receiver, key, key, this, f); + } } @@ -125,15 +118,18 @@ %SetCode($Set, SetConstructor); %FunctionSetPrototype($Set, new $Object()); - %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM); + %AddNamedProperty($Set.prototype, "constructor", $Set, DONT_ENUM); + + %FunctionSetLength(SetForEach, 1); // Set up the non-enumerable functions on the Set prototype object. - InstallGetter($Set.prototype, "size", SetGetSize); + InstallGetter($Set.prototype, "size", SetGetSizeJS); InstallFunctions($Set.prototype, DONT_ENUM, $Array( - "add", SetAdd, - "has", SetHas, - "delete", SetDelete, - "clear", SetClear + "add", SetAddJS, + "has", SetHasJS, + "delete", SetDeleteJS, + "clear", SetClearJS, + "forEach", SetForEach )); } @@ -143,52 +139,76 @@ // ------------------------------------------------------------------- // Harmony Map -function MapConstructor() { - if (%_IsConstructCall()) { - %MapInitialize(this); - } else { +function MapConstructor(iterable) { + if (!%_IsConstructCall()) { throw MakeTypeError('constructor_not_function', ['Map']); } + + var iter, adder; + + if (!IS_NULL_OR_UNDEFINED(iterable)) { + iter = GetIterator(iterable); + adder = this.set; + if (!IS_SPEC_FUNCTION(adder)) { + throw MakeTypeError('property_not_function', ['set', this]); + } + } + + %MapInitialize(this); + + if (IS_UNDEFINED(iter)) return; + + var next, done, nextItem; + while (!(next = iter.next()).done) { + if (!IS_SPEC_OBJECT(next)) { + throw MakeTypeError('iterator_result_not_an_object', [next]); + } + nextItem = next.value; + if (!IS_SPEC_OBJECT(nextItem)) { + throw MakeTypeError('iterator_value_not_an_object', [nextItem]); + } + %_CallFunction(this, nextItem[0], nextItem[1], adder); + } } -function MapGet(key) { +function MapGetJS(key) { if (!IS_MAP(this)) { throw MakeTypeError('incompatible_method_receiver', ['Map.prototype.get', this]); } - return %MapGet(this, NormalizeKey(key)); + return %MapGet(this, key); } -function MapSet(key, value) { +function MapSetJS(key, value) { if (!IS_MAP(this)) { throw MakeTypeError('incompatible_method_receiver', ['Map.prototype.set', this]); } - return %MapSet(this, NormalizeKey(key), value); + return %MapSet(this, key, value); } -function MapHas(key) { +function MapHasJS(key) { if (!IS_MAP(this)) { throw MakeTypeError('incompatible_method_receiver', ['Map.prototype.has', this]); } - return %MapHas(this, NormalizeKey(key)); + return %MapHas(this, key); } -function MapDelete(key) { +function MapDeleteJS(key) { if (!IS_MAP(this)) { throw MakeTypeError('incompatible_method_receiver', ['Map.prototype.delete', this]); } - return %MapDelete(this, NormalizeKey(key)); + return %MapDelete(this, key); } -function MapGetSize() { +function MapGetSizeJS() { if (!IS_MAP(this)) { throw MakeTypeError('incompatible_method_receiver', ['Map.prototype.size', this]); @@ -197,13 +217,32 @@ } -function MapClear() { +function MapClearJS() { if (!IS_MAP(this)) { throw MakeTypeError('incompatible_method_receiver', ['Map.prototype.clear', this]); } - // Replace the internal table with a new empty table. - %MapInitialize(this); + %MapClear(this); +} + + +function MapForEach(f, receiver) { + if (!IS_MAP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Map.prototype.forEach', this]); + } + + if (!IS_SPEC_FUNCTION(f)) { + throw MakeTypeError('called_non_callable', [f]); + } + + var iterator = new MapIterator(this, ITERATOR_KIND_ENTRIES); + var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f); + var value_array = [UNDEFINED, UNDEFINED]; + while (%MapIteratorNext(iterator, value_array)) { + if (stepping) %DebugPrepareStepInIfStepping(f); + %_CallFunction(receiver, value_array[1], value_array[0], this, f); + } } @@ -214,16 +253,19 @@ %SetCode($Map, MapConstructor); %FunctionSetPrototype($Map, new $Object()); - %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM); + %AddNamedProperty($Map.prototype, "constructor", $Map, DONT_ENUM); + + %FunctionSetLength(MapForEach, 1); // Set up the non-enumerable functions on the Map prototype object. - InstallGetter($Map.prototype, "size", MapGetSize); + InstallGetter($Map.prototype, "size", MapGetSizeJS); InstallFunctions($Map.prototype, DONT_ENUM, $Array( - "get", MapGet, - "set", MapSet, - "has", MapHas, - "delete", MapDelete, - "clear", MapClear + "get", MapGetJS, + "set", MapSetJS, + "has", MapHasJS, + "delete", MapDeleteJS, + "clear", MapClearJS, + "forEach", MapForEach )); } diff -Nru nodejs-0.11.13/deps/v8/src/compilation-cache.cc nodejs-0.11.15/deps/v8/src/compilation-cache.cc --- nodejs-0.11.13/deps/v8/src/compilation-cache.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compilation-cache.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "assembler.h" -#include "compilation-cache.h" -#include "serialize.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/assembler.h" +#include "src/compilation-cache.h" +#include "src/serialize.h" namespace v8 { namespace internal { @@ -65,18 +42,11 @@ CompilationCache::~CompilationCache() {} -static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) { - CALL_HEAP_FUNCTION(isolate, - CompilationCacheTable::Allocate(isolate->heap(), size), - CompilationCacheTable); -} - - Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) { - ASSERT(generation < generations_); + DCHECK(generation < generations_); Handle<CompilationCacheTable> result; if (tables_[generation]->IsUndefined()) { - result = AllocateTable(isolate(), kInitialCacheSize); + result = CompilationCacheTable::New(isolate(), kInitialCacheSize); tables_[generation] = *result; } else { CompilationCacheTable* table = @@ -161,7 +131,8 @@ // Were both scripts tagged by the embedder as being shared cross-origin? if (is_shared_cross_origin != script->is_shared_cross_origin()) return false; // Compare the two name strings for equality. - return String::cast(*name)->Equals(String::cast(script->name())); + return String::Equals(Handle<String>::cast(name), + Handle<String>(String::cast(script->name()))); } @@ -184,7 +155,7 @@ { HandleScope scope(isolate()); for (generation = 0; generation < generations(); generation++) { Handle<CompilationCacheTable> table = GetTable(generation); - Handle<Object> probe(table->Lookup(*source, *context), isolate()); + Handle<Object> probe = table->Lookup(source, context); if (probe->IsSharedFunctionInfo()) { Handle<SharedFunctionInfo> function_info = Handle<SharedFunctionInfo>::cast(probe); @@ -222,7 +193,7 @@ if (result != NULL) { Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result), isolate()); - ASSERT(HasOrigin(shared, + DCHECK(HasOrigin(shared, name, line_offset, column_offset, @@ -239,153 +210,93 @@ } -MaybeObject* CompilationCacheScript::TryTablePut( - Handle<String> source, - Handle<Context> context, - Handle<SharedFunctionInfo> function_info) { - Handle<CompilationCacheTable> table = GetFirstTable(); - return table->Put(*source, *context, *function_info); -} - - -Handle<CompilationCacheTable> CompilationCacheScript::TablePut( - Handle<String> source, - Handle<Context> context, - Handle<SharedFunctionInfo> function_info) { - CALL_HEAP_FUNCTION(isolate(), - TryTablePut(source, context, function_info), - CompilationCacheTable); -} - - void CompilationCacheScript::Put(Handle<String> source, Handle<Context> context, Handle<SharedFunctionInfo> function_info) { HandleScope scope(isolate()); - SetFirstTable(TablePut(source, context, function_info)); + Handle<CompilationCacheTable> table = GetFirstTable(); + SetFirstTable( + CompilationCacheTable::Put(table, source, context, function_info)); } -Handle<SharedFunctionInfo> CompilationCacheEval::Lookup( +MaybeHandle<SharedFunctionInfo> CompilationCacheEval::Lookup( Handle<String> source, Handle<Context> context, StrictMode strict_mode, int scope_position) { + HandleScope scope(isolate()); // Make sure not to leak the table into the surrounding handle // scope. Otherwise, we risk keeping old tables around even after // having cleared the cache. - Object* result = NULL; + Handle<Object> result = isolate()->factory()->undefined_value(); int generation; - { HandleScope scope(isolate()); - for (generation = 0; generation < generations(); generation++) { - Handle<CompilationCacheTable> table = GetTable(generation); - result = table->LookupEval( - *source, *context, strict_mode, scope_position); - if (result->IsSharedFunctionInfo()) { - break; - } - } + for (generation = 0; generation < generations(); generation++) { + Handle<CompilationCacheTable> table = GetTable(generation); + result = table->LookupEval(source, context, strict_mode, scope_position); + if (result->IsSharedFunctionInfo()) break; } if (result->IsSharedFunctionInfo()) { - Handle<SharedFunctionInfo> - function_info(SharedFunctionInfo::cast(result), isolate()); + Handle<SharedFunctionInfo> function_info = + Handle<SharedFunctionInfo>::cast(result); if (generation != 0) { Put(source, context, function_info, scope_position); } isolate()->counters()->compilation_cache_hits()->Increment(); - return function_info; + return scope.CloseAndEscape(function_info); } else { isolate()->counters()->compilation_cache_misses()->Increment(); - return Handle<SharedFunctionInfo>::null(); + return MaybeHandle<SharedFunctionInfo>(); } } -MaybeObject* CompilationCacheEval::TryTablePut( - Handle<String> source, - Handle<Context> context, - Handle<SharedFunctionInfo> function_info, - int scope_position) { - Handle<CompilationCacheTable> table = GetFirstTable(); - return table->PutEval(*source, *context, *function_info, scope_position); -} - - -Handle<CompilationCacheTable> CompilationCacheEval::TablePut( - Handle<String> source, - Handle<Context> context, - Handle<SharedFunctionInfo> function_info, - int scope_position) { - CALL_HEAP_FUNCTION(isolate(), - TryTablePut( - source, context, function_info, scope_position), - CompilationCacheTable); -} - - void CompilationCacheEval::Put(Handle<String> source, Handle<Context> context, Handle<SharedFunctionInfo> function_info, int scope_position) { HandleScope scope(isolate()); - SetFirstTable(TablePut(source, context, function_info, scope_position)); + Handle<CompilationCacheTable> table = GetFirstTable(); + table = CompilationCacheTable::PutEval(table, source, context, + function_info, scope_position); + SetFirstTable(table); } -Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source, - JSRegExp::Flags flags) { +MaybeHandle<FixedArray> CompilationCacheRegExp::Lookup( + Handle<String> source, + JSRegExp::Flags flags) { + HandleScope scope(isolate()); // Make sure not to leak the table into the surrounding handle // scope. Otherwise, we risk keeping old tables around even after // having cleared the cache. - Object* result = NULL; + Handle<Object> result = isolate()->factory()->undefined_value(); int generation; - { HandleScope scope(isolate()); - for (generation = 0; generation < generations(); generation++) { - Handle<CompilationCacheTable> table = GetTable(generation); - result = table->LookupRegExp(*source, flags); - if (result->IsFixedArray()) { - break; - } - } + for (generation = 0; generation < generations(); generation++) { + Handle<CompilationCacheTable> table = GetTable(generation); + result = table->LookupRegExp(source, flags); + if (result->IsFixedArray()) break; } if (result->IsFixedArray()) { - Handle<FixedArray> data(FixedArray::cast(result), isolate()); + Handle<FixedArray> data = Handle<FixedArray>::cast(result); if (generation != 0) { Put(source, flags, data); } isolate()->counters()->compilation_cache_hits()->Increment(); - return data; + return scope.CloseAndEscape(data); } else { isolate()->counters()->compilation_cache_misses()->Increment(); - return Handle<FixedArray>::null(); + return MaybeHandle<FixedArray>(); } } -MaybeObject* CompilationCacheRegExp::TryTablePut( - Handle<String> source, - JSRegExp::Flags flags, - Handle<FixedArray> data) { - Handle<CompilationCacheTable> table = GetFirstTable(); - return table->PutRegExp(*source, flags, *data); -} - - -Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut( - Handle<String> source, - JSRegExp::Flags flags, - Handle<FixedArray> data) { - CALL_HEAP_FUNCTION(isolate(), - TryTablePut(source, flags, data), - CompilationCacheTable); -} - - void CompilationCacheRegExp::Put(Handle<String> source, JSRegExp::Flags flags, Handle<FixedArray> data) { HandleScope scope(isolate()); - SetFirstTable(TablePut(source, flags, data)); + Handle<CompilationCacheTable> table = GetFirstTable(); + SetFirstTable(CompilationCacheTable::PutRegExp(table, source, flags, data)); } @@ -398,41 +309,33 @@ } -Handle<SharedFunctionInfo> CompilationCache::LookupScript( +MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript( Handle<String> source, Handle<Object> name, int line_offset, int column_offset, bool is_shared_cross_origin, Handle<Context> context) { - if (!IsEnabled()) { - return Handle<SharedFunctionInfo>::null(); - } + if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>(); - return script_.Lookup(source, - name, - line_offset, - column_offset, - is_shared_cross_origin, - context); + return script_.Lookup(source, name, line_offset, column_offset, + is_shared_cross_origin, context); } -Handle<SharedFunctionInfo> CompilationCache::LookupEval( +MaybeHandle<SharedFunctionInfo> CompilationCache::LookupEval( Handle<String> source, Handle<Context> context, StrictMode strict_mode, int scope_position) { - if (!IsEnabled()) { - return Handle<SharedFunctionInfo>::null(); - } + if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>(); - Handle<SharedFunctionInfo> result; + MaybeHandle<SharedFunctionInfo> result; if (context->IsNativeContext()) { result = eval_global_.Lookup( source, context, strict_mode, scope_position); } else { - ASSERT(scope_position != RelocInfo::kNoPosition); + DCHECK(scope_position != RelocInfo::kNoPosition); result = eval_contextual_.Lookup( source, context, strict_mode, scope_position); } @@ -440,11 +343,9 @@ } -Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source, +MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source, JSRegExp::Flags flags) { - if (!IsEnabled()) { - return Handle<FixedArray>::null(); - } + if (!IsEnabled()) return MaybeHandle<FixedArray>(); return reg_exp_.Lookup(source, flags); } @@ -469,7 +370,7 @@ if (context->IsNativeContext()) { eval_global_.Put(source, context, function_info, scope_position); } else { - ASSERT(scope_position != RelocInfo::kNoPosition); + DCHECK(scope_position != RelocInfo::kNoPosition); eval_contextual_.Put(source, context, function_info, scope_position); } } diff -Nru nodejs-0.11.13/deps/v8/src/compilation-cache.h nodejs-0.11.15/deps/v8/src/compilation-cache.h --- nodejs-0.11.13/deps/v8/src/compilation-cache.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compilation-cache.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_COMPILATION_CACHE_H_ #define V8_COMPILATION_CACHE_H_ @@ -57,7 +34,7 @@ return GetTable(kFirstGeneration); } void SetFirstTable(Handle<CompilationCacheTable> value) { - ASSERT(kFirstGeneration < generations_); + DCHECK(kFirstGeneration < generations_); tables_[kFirstGeneration] = *value; } @@ -106,17 +83,6 @@ Handle<SharedFunctionInfo> function_info); private: - MUST_USE_RESULT MaybeObject* TryTablePut( - Handle<String> source, - Handle<Context> context, - Handle<SharedFunctionInfo> function_info); - - // Note: Returns a new hash table if operation results in expansion. - Handle<CompilationCacheTable> TablePut( - Handle<String> source, - Handle<Context> context, - Handle<SharedFunctionInfo> function_info); - bool HasOrigin(Handle<SharedFunctionInfo> function_info, Handle<Object> name, int line_offset, @@ -147,10 +113,10 @@ CompilationCacheEval(Isolate* isolate, int generations) : CompilationSubCache(isolate, generations) { } - Handle<SharedFunctionInfo> Lookup(Handle<String> source, - Handle<Context> context, - StrictMode strict_mode, - int scope_position); + MaybeHandle<SharedFunctionInfo> Lookup(Handle<String> source, + Handle<Context> context, + StrictMode strict_mode, + int scope_position); void Put(Handle<String> source, Handle<Context> context, @@ -158,19 +124,6 @@ int scope_position); private: - MUST_USE_RESULT MaybeObject* TryTablePut( - Handle<String> source, - Handle<Context> context, - Handle<SharedFunctionInfo> function_info, - int scope_position); - - // Note: Returns a new hash table if operation results in expansion. - Handle<CompilationCacheTable> TablePut( - Handle<String> source, - Handle<Context> context, - Handle<SharedFunctionInfo> function_info, - int scope_position); - DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval); }; @@ -181,21 +134,12 @@ CompilationCacheRegExp(Isolate* isolate, int generations) : CompilationSubCache(isolate, generations) { } - Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags); + MaybeHandle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags); void Put(Handle<String> source, JSRegExp::Flags flags, Handle<FixedArray> data); private: - MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source, - JSRegExp::Flags flags, - Handle<FixedArray> data); - - // Note: Returns a new hash table if operation results in expansion. - Handle<CompilationCacheTable> TablePut(Handle<String> source, - JSRegExp::Flags flags, - Handle<FixedArray> data); - DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp); }; @@ -209,25 +153,21 @@ // Finds the script shared function info for a source // string. Returns an empty handle if the cache doesn't contain a // script for the given source string with the right origin. - Handle<SharedFunctionInfo> LookupScript(Handle<String> source, - Handle<Object> name, - int line_offset, - int column_offset, - bool is_shared_cross_origin, - Handle<Context> context); + MaybeHandle<SharedFunctionInfo> LookupScript( + Handle<String> source, Handle<Object> name, int line_offset, + int column_offset, bool is_shared_cross_origin, Handle<Context> context); // Finds the shared function info for a source string for eval in a // given context. Returns an empty handle if the cache doesn't // contain a script for the given source string. - Handle<SharedFunctionInfo> LookupEval(Handle<String> source, - Handle<Context> context, - StrictMode strict_mode, - int scope_position); + MaybeHandle<SharedFunctionInfo> LookupEval( + Handle<String> source, Handle<Context> context, StrictMode strict_mode, + int scope_position); // Returns the regexp data associated with the given regexp if it // is in cache, otherwise an empty handle. - Handle<FixedArray> LookupRegExp(Handle<String> source, - JSRegExp::Flags flags); + MaybeHandle<FixedArray> LookupRegExp( + Handle<String> source, JSRegExp::Flags flags); // Associate the (source, kind) pair to the shared function // info. This may overwrite an existing mapping. diff -Nru nodejs-0.11.13/deps/v8/src/compiler/arm/code-generator-arm.cc nodejs-0.11.15/deps/v8/src/compiler/arm/code-generator-arm.cc --- nodejs-0.11.13/deps/v8/src/compiler/arm/code-generator-arm.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/arm/code-generator-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,848 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/code-generator.h" + +#include "src/arm/macro-assembler-arm.h" +#include "src/compiler/code-generator-impl.h" +#include "src/compiler/gap-resolver.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties-inl.h" +#include "src/scopes.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#define __ masm()-> + + +#define kScratchReg r9 + + +// Adds Arm-specific methods to convert InstructionOperands. +class ArmOperandConverter : public InstructionOperandConverter { + public: + ArmOperandConverter(CodeGenerator* gen, Instruction* instr) + : InstructionOperandConverter(gen, instr) {} + + SBit OutputSBit() const { + switch (instr_->flags_mode()) { + case kFlags_branch: + case kFlags_set: + return SetCC; + case kFlags_none: + return LeaveCC; + } + UNREACHABLE(); + return LeaveCC; + } + + Operand InputImmediate(int index) { + Constant constant = ToConstant(instr_->InputAt(index)); + switch (constant.type()) { + case Constant::kInt32: + return Operand(constant.ToInt32()); + case Constant::kFloat64: + return Operand( + isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED)); + case Constant::kInt64: + case Constant::kExternalReference: + case Constant::kHeapObject: + break; + } + UNREACHABLE(); + return Operand::Zero(); + } + + Operand InputOperand2(int first_index) { + const int index = first_index; + switch (AddressingModeField::decode(instr_->opcode())) { + case kMode_None: + case kMode_Offset_RI: + case kMode_Offset_RR: + break; + case kMode_Operand2_I: + return InputImmediate(index + 0); + case kMode_Operand2_R: + return Operand(InputRegister(index + 0)); + case kMode_Operand2_R_ASR_I: + return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1)); + case kMode_Operand2_R_ASR_R: + return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1)); + case kMode_Operand2_R_LSL_I: + return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1)); + case kMode_Operand2_R_LSL_R: + return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1)); + case kMode_Operand2_R_LSR_I: + return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1)); + case kMode_Operand2_R_LSR_R: + return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1)); + case kMode_Operand2_R_ROR_I: + return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1)); + case kMode_Operand2_R_ROR_R: + return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1)); + } + UNREACHABLE(); + return Operand::Zero(); + } + + MemOperand InputOffset(int* first_index) { + const int index = *first_index; + switch (AddressingModeField::decode(instr_->opcode())) { + case kMode_None: + case kMode_Operand2_I: + case kMode_Operand2_R: + case kMode_Operand2_R_ASR_I: + case kMode_Operand2_R_ASR_R: + case kMode_Operand2_R_LSL_I: + case kMode_Operand2_R_LSL_R: + case kMode_Operand2_R_LSR_I: + case kMode_Operand2_R_LSR_R: + case kMode_Operand2_R_ROR_I: + case kMode_Operand2_R_ROR_R: + break; + case kMode_Offset_RI: + *first_index += 2; + return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); + case kMode_Offset_RR: + *first_index += 2; + return MemOperand(InputRegister(index + 0), InputRegister(index + 1)); + } + UNREACHABLE(); + return MemOperand(r0); + } + + MemOperand InputOffset() { + int index = 0; + return InputOffset(&index); + } + + MemOperand ToMemOperand(InstructionOperand* op) const { + DCHECK(op != NULL); + DCHECK(!op->IsRegister()); + DCHECK(!op->IsDoubleRegister()); + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); + // The linkage computes where all spill slots are located. + FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0); + return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); + } +}; + + +// Assembles an instruction after register allocation, producing machine code. +void CodeGenerator::AssembleArchInstruction(Instruction* instr) { + ArmOperandConverter i(this, instr); + + switch (ArchOpcodeField::decode(instr->opcode())) { + case kArchJmp: + __ b(code_->GetLabel(i.InputBlock(0))); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArchNop: + // don't emit code for nops. + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArchRet: + AssembleReturn(); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArchDeoptimize: { + int deoptimization_id = MiscField::decode(instr->opcode()); + BuildTranslation(instr, deoptimization_id); + + Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( + isolate(), deoptimization_id, Deoptimizer::LAZY); + __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmAdd: + __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), + i.OutputSBit()); + break; + case kArmAnd: + __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), + i.OutputSBit()); + break; + case kArmBic: + __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), + i.OutputSBit()); + break; + case kArmMul: + __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), + i.OutputSBit()); + break; + case kArmMla: + __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), + i.InputRegister(2), i.OutputSBit()); + break; + case kArmMls: { + CpuFeatureScope scope(masm(), MLS); + __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), + i.InputRegister(2)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmSdiv: { + CpuFeatureScope scope(masm(), SUDIV); + __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmUdiv: { + CpuFeatureScope scope(masm(), SUDIV); + __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmMov: + __ Move(i.OutputRegister(), i.InputOperand2(0)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArmMvn: + __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit()); + break; + case kArmOrr: + __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), + i.OutputSBit()); + break; + case kArmEor: + __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), + i.OutputSBit()); + break; + case kArmSub: + __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), + i.OutputSBit()); + break; + case kArmRsb: + __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), + i.OutputSBit()); + break; + case kArmBfc: { + CpuFeatureScope scope(masm(), ARMv7); + __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmUbfx: { + CpuFeatureScope scope(masm(), ARMv7); + __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), + i.InputInt8(2)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmCallCodeObject: { + if (instr->InputAt(0)->IsImmediate()) { + Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); + __ Call(code, RelocInfo::CODE_TARGET); + RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + } else { + Register reg = i.InputRegister(0); + int entry = Code::kHeaderSize - kHeapObjectTag; + __ ldr(reg, MemOperand(reg, entry)); + __ Call(reg); + RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + } + bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1); + if (lazy_deopt) { + RecordLazyDeoptimizationEntry(instr); + } + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmCallJSFunction: { + Register func = i.InputRegister(0); + + // TODO(jarin) The load of the context should be separated from the call. + __ ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset)); + __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); + __ Call(ip); + + RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + RecordLazyDeoptimizationEntry(instr); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmCallAddress: { + DirectCEntryStub stub(isolate()); + stub.GenerateCall(masm(), i.InputRegister(0)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmPush: + __ Push(i.InputRegister(0)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArmDrop: { + int words = MiscField::decode(instr->opcode()); + __ Drop(words); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmCmp: + __ cmp(i.InputRegister(0), i.InputOperand2(1)); + DCHECK_EQ(SetCC, i.OutputSBit()); + break; + case kArmCmn: + __ cmn(i.InputRegister(0), i.InputOperand2(1)); + DCHECK_EQ(SetCC, i.OutputSBit()); + break; + case kArmTst: + __ tst(i.InputRegister(0), i.InputOperand2(1)); + DCHECK_EQ(SetCC, i.OutputSBit()); + break; + case kArmTeq: + __ teq(i.InputRegister(0), i.InputOperand2(1)); + DCHECK_EQ(SetCC, i.OutputSBit()); + break; + case kArmVcmpF64: + __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + DCHECK_EQ(SetCC, i.OutputSBit()); + break; + case kArmVaddF64: + __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArmVsubF64: + __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArmVmulF64: + __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArmVmlaF64: + __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1), + i.InputDoubleRegister(2)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArmVmlsF64: + __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1), + i.InputDoubleRegister(2)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArmVdivF64: + __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArmVmodF64: { + // TODO(bmeurer): We should really get rid of this special instruction, + // and generate a CallAddress instruction instead. + FrameScope scope(masm(), StackFrame::MANUAL); + __ PrepareCallCFunction(0, 2, kScratchReg); + __ MovToFloatParameters(i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), + 0, 2); + // Move the result in the double result register. + __ MovFromFloatResult(i.OutputDoubleRegister()); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmVnegF64: + __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + case kArmVcvtF64S32: { + SwVfpRegister scratch = kScratchDoubleReg.low(); + __ vmov(scratch, i.InputRegister(0)); + __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmVcvtF64U32: { + SwVfpRegister scratch = kScratchDoubleReg.low(); + __ vmov(scratch, i.InputRegister(0)); + __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmVcvtS32F64: { + SwVfpRegister scratch = kScratchDoubleReg.low(); + __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0)); + __ vmov(i.OutputRegister(), scratch); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmVcvtU32F64: { + SwVfpRegister scratch = kScratchDoubleReg.low(); + __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0)); + __ vmov(i.OutputRegister(), scratch); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmLoadWord8: + __ ldrb(i.OutputRegister(), i.InputOffset()); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArmStoreWord8: { + int index = 0; + MemOperand operand = i.InputOffset(&index); + __ strb(i.InputRegister(index), operand); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmLoadWord16: + __ ldrh(i.OutputRegister(), i.InputOffset()); + break; + case kArmStoreWord16: { + int index = 0; + MemOperand operand = i.InputOffset(&index); + __ strh(i.InputRegister(index), operand); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmLoadWord32: + __ ldr(i.OutputRegister(), i.InputOffset()); + break; + case kArmStoreWord32: { + int index = 0; + MemOperand operand = i.InputOffset(&index); + __ str(i.InputRegister(index), operand); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmFloat64Load: + __ vldr(i.OutputDoubleRegister(), i.InputOffset()); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + case kArmFloat64Store: { + int index = 0; + MemOperand operand = i.InputOffset(&index); + __ vstr(i.InputDoubleRegister(index), operand); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + case kArmStoreWriteBarrier: { + Register object = i.InputRegister(0); + Register index = i.InputRegister(1); + Register value = i.InputRegister(2); + __ add(index, object, index); + __ str(value, MemOperand(index)); + SaveFPRegsMode mode = + frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; + LinkRegisterStatus lr_status = kLRHasNotBeenSaved; + __ RecordWrite(object, index, value, lr_status, mode); + DCHECK_EQ(LeaveCC, i.OutputSBit()); + break; + } + } +} + + +// Assembles branches after an instruction. +void CodeGenerator::AssembleArchBranch(Instruction* instr, + FlagsCondition condition) { + ArmOperandConverter i(this, instr); + Label done; + + // Emit a branch. The true and false targets are always the last two inputs + // to the instruction. + BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2); + BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1); + bool fallthru = IsNextInAssemblyOrder(fblock); + Label* tlabel = code()->GetLabel(tblock); + Label* flabel = fallthru ? &done : code()->GetLabel(fblock); + switch (condition) { + case kUnorderedEqual: + __ b(vs, flabel); + // Fall through. + case kEqual: + __ b(eq, tlabel); + break; + case kUnorderedNotEqual: + __ b(vs, tlabel); + // Fall through. + case kNotEqual: + __ b(ne, tlabel); + break; + case kSignedLessThan: + __ b(lt, tlabel); + break; + case kSignedGreaterThanOrEqual: + __ b(ge, tlabel); + break; + case kSignedLessThanOrEqual: + __ b(le, tlabel); + break; + case kSignedGreaterThan: + __ b(gt, tlabel); + break; + case kUnorderedLessThan: + __ b(vs, flabel); + // Fall through. + case kUnsignedLessThan: + __ b(lo, tlabel); + break; + case kUnorderedGreaterThanOrEqual: + __ b(vs, tlabel); + // Fall through. + case kUnsignedGreaterThanOrEqual: + __ b(hs, tlabel); + break; + case kUnorderedLessThanOrEqual: + __ b(vs, flabel); + // Fall through. + case kUnsignedLessThanOrEqual: + __ b(ls, tlabel); + break; + case kUnorderedGreaterThan: + __ b(vs, tlabel); + // Fall through. + case kUnsignedGreaterThan: + __ b(hi, tlabel); + break; + case kOverflow: + __ b(vs, tlabel); + break; + case kNotOverflow: + __ b(vc, tlabel); + break; + } + if (!fallthru) __ b(flabel); // no fallthru to flabel. + __ bind(&done); +} + + +// Assembles boolean materializations after an instruction. +void CodeGenerator::AssembleArchBoolean(Instruction* instr, + FlagsCondition condition) { + ArmOperandConverter i(this, instr); + Label done; + + // Materialize a full 32-bit 1 or 0 value. The result register is always the + // last output of the instruction. + Label check; + DCHECK_NE(0, instr->OutputCount()); + Register reg = i.OutputRegister(instr->OutputCount() - 1); + Condition cc = kNoCondition; + switch (condition) { + case kUnorderedEqual: + __ b(vc, &check); + __ mov(reg, Operand(0)); + __ b(&done); + // Fall through. + case kEqual: + cc = eq; + break; + case kUnorderedNotEqual: + __ b(vc, &check); + __ mov(reg, Operand(1)); + __ b(&done); + // Fall through. + case kNotEqual: + cc = ne; + break; + case kSignedLessThan: + cc = lt; + break; + case kSignedGreaterThanOrEqual: + cc = ge; + break; + case kSignedLessThanOrEqual: + cc = le; + break; + case kSignedGreaterThan: + cc = gt; + break; + case kUnorderedLessThan: + __ b(vc, &check); + __ mov(reg, Operand(0)); + __ b(&done); + // Fall through. + case kUnsignedLessThan: + cc = lo; + break; + case kUnorderedGreaterThanOrEqual: + __ b(vc, &check); + __ mov(reg, Operand(1)); + __ b(&done); + // Fall through. + case kUnsignedGreaterThanOrEqual: + cc = hs; + break; + case kUnorderedLessThanOrEqual: + __ b(vc, &check); + __ mov(reg, Operand(0)); + __ b(&done); + // Fall through. + case kUnsignedLessThanOrEqual: + cc = ls; + break; + case kUnorderedGreaterThan: + __ b(vc, &check); + __ mov(reg, Operand(1)); + __ b(&done); + // Fall through. + case kUnsignedGreaterThan: + cc = hi; + break; + case kOverflow: + cc = vs; + break; + case kNotOverflow: + cc = vc; + break; + } + __ bind(&check); + __ mov(reg, Operand(0)); + __ mov(reg, Operand(1), LeaveCC, cc); + __ bind(&done); +} + + +void CodeGenerator::AssemblePrologue() { + CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); + if (descriptor->kind() == CallDescriptor::kCallAddress) { + __ Push(lr, fp); + __ mov(fp, sp); + const RegList saves = descriptor->CalleeSavedRegisters(); + if (saves != 0) { // Save callee-saved registers. + int register_save_area_size = 0; + for (int i = Register::kNumRegisters - 1; i >= 0; i--) { + if (!((1 << i) & saves)) continue; + register_save_area_size += kPointerSize; + } + frame()->SetRegisterSaveAreaSize(register_save_area_size); + __ stm(db_w, sp, saves); + } + } else if (descriptor->IsJSFunctionCall()) { + CompilationInfo* info = linkage()->info(); + __ Prologue(info->IsCodePreAgingActive()); + frame()->SetRegisterSaveAreaSize( + StandardFrameConstants::kFixedFrameSizeFromFp); + + // Sloppy mode functions and builtins need to replace the receiver with the + // global proxy when called as functions (without an explicit receiver + // object). + // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC? + if (info->strict_mode() == SLOPPY && !info->is_native()) { + Label ok; + // +2 for return address and saved frame pointer. + int receiver_slot = info->scope()->num_parameters() + 2; + __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize)); + __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); + __ b(ne, &ok); + __ ldr(r2, GlobalObjectOperand()); + __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset)); + __ str(r2, MemOperand(fp, receiver_slot * kPointerSize)); + __ bind(&ok); + } + + } else { + __ StubPrologue(); + frame()->SetRegisterSaveAreaSize( + StandardFrameConstants::kFixedFrameSizeFromFp); + } + int stack_slots = frame()->GetSpillSlotCount(); + if (stack_slots > 0) { + __ sub(sp, sp, Operand(stack_slots * kPointerSize)); + } +} + + +void CodeGenerator::AssembleReturn() { + CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); + if (descriptor->kind() == CallDescriptor::kCallAddress) { + if (frame()->GetRegisterSaveAreaSize() > 0) { + // Remove this frame's spill slots first. + int stack_slots = frame()->GetSpillSlotCount(); + if (stack_slots > 0) { + __ add(sp, sp, Operand(stack_slots * kPointerSize)); + } + // Restore registers. + const RegList saves = descriptor->CalleeSavedRegisters(); + if (saves != 0) { + __ ldm(ia_w, sp, saves); + } + } + __ mov(sp, fp); + __ ldm(ia_w, sp, fp.bit() | lr.bit()); + __ Ret(); + } else { + __ mov(sp, fp); + __ ldm(ia_w, sp, fp.bit() | lr.bit()); + int pop_count = + descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0; + __ Drop(pop_count); + __ Ret(); + } +} + + +void CodeGenerator::AssembleMove(InstructionOperand* source, + InstructionOperand* destination) { + ArmOperandConverter g(this, NULL); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + Register src = g.ToRegister(source); + if (destination->IsRegister()) { + __ mov(g.ToRegister(destination), src); + } else { + __ str(src, g.ToMemOperand(destination)); + } + } else if (source->IsStackSlot()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + MemOperand src = g.ToMemOperand(source); + if (destination->IsRegister()) { + __ ldr(g.ToRegister(destination), src); + } else { + Register temp = kScratchReg; + __ ldr(temp, src); + __ str(temp, g.ToMemOperand(destination)); + } + } else if (source->IsConstant()) { + if (destination->IsRegister() || destination->IsStackSlot()) { + Register dst = + destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; + Constant src = g.ToConstant(source); + switch (src.type()) { + case Constant::kInt32: + __ mov(dst, Operand(src.ToInt32())); + break; + case Constant::kInt64: + UNREACHABLE(); + break; + case Constant::kFloat64: + __ Move(dst, + isolate()->factory()->NewNumber(src.ToFloat64(), TENURED)); + break; + case Constant::kExternalReference: + __ mov(dst, Operand(src.ToExternalReference())); + break; + case Constant::kHeapObject: + __ Move(dst, src.ToHeapObject()); + break; + } + if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination)); + } else if (destination->IsDoubleRegister()) { + DwVfpRegister result = g.ToDoubleRegister(destination); + __ vmov(result, g.ToDouble(source)); + } else { + DCHECK(destination->IsDoubleStackSlot()); + DwVfpRegister temp = kScratchDoubleReg; + __ vmov(temp, g.ToDouble(source)); + __ vstr(temp, g.ToMemOperand(destination)); + } + } else if (source->IsDoubleRegister()) { + DwVfpRegister src = g.ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + DwVfpRegister dst = g.ToDoubleRegister(destination); + __ Move(dst, src); + } else { + DCHECK(destination->IsDoubleStackSlot()); + __ vstr(src, g.ToMemOperand(destination)); + } + } else if (source->IsDoubleStackSlot()) { + DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); + MemOperand src = g.ToMemOperand(source); + if (destination->IsDoubleRegister()) { + __ vldr(g.ToDoubleRegister(destination), src); + } else { + DwVfpRegister temp = kScratchDoubleReg; + __ vldr(temp, src); + __ vstr(temp, g.ToMemOperand(destination)); + } + } else { + UNREACHABLE(); + } +} + + +void CodeGenerator::AssembleSwap(InstructionOperand* source, + InstructionOperand* destination) { + ArmOperandConverter g(this, NULL); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister()) { + // Register-register. + Register temp = kScratchReg; + Register src = g.ToRegister(source); + if (destination->IsRegister()) { + Register dst = g.ToRegister(destination); + __ Move(temp, src); + __ Move(src, dst); + __ Move(dst, temp); + } else { + DCHECK(destination->IsStackSlot()); + MemOperand dst = g.ToMemOperand(destination); + __ mov(temp, src); + __ ldr(src, dst); + __ str(temp, dst); + } + } else if (source->IsStackSlot()) { + DCHECK(destination->IsStackSlot()); + Register temp_0 = kScratchReg; + SwVfpRegister temp_1 = kScratchDoubleReg.low(); + MemOperand src = g.ToMemOperand(source); + MemOperand dst = g.ToMemOperand(destination); + __ ldr(temp_0, src); + __ vldr(temp_1, dst); + __ str(temp_0, dst); + __ vstr(temp_1, src); + } else if (source->IsDoubleRegister()) { + DwVfpRegister temp = kScratchDoubleReg; + DwVfpRegister src = g.ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + DwVfpRegister dst = g.ToDoubleRegister(destination); + __ Move(temp, src); + __ Move(src, dst); + __ Move(src, temp); + } else { + DCHECK(destination->IsDoubleStackSlot()); + MemOperand dst = g.ToMemOperand(destination); + __ Move(temp, src); + __ vldr(src, dst); + __ vstr(temp, dst); + } + } else if (source->IsDoubleStackSlot()) { + DCHECK(destination->IsDoubleStackSlot()); + Register temp_0 = kScratchReg; + DwVfpRegister temp_1 = kScratchDoubleReg; + MemOperand src0 = g.ToMemOperand(source); + MemOperand src1(src0.rn(), src0.offset() + kPointerSize); + MemOperand dst0 = g.ToMemOperand(destination); + MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize); + __ vldr(temp_1, dst0); // Save destination in temp_1. + __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination. + __ str(temp_0, dst0); + __ ldr(temp_0, src1); + __ str(temp_0, dst1); + __ vstr(temp_1, src0); + } else { + // No other combinations are possible. + UNREACHABLE(); + } +} + + +void CodeGenerator::AddNopForSmiCodeInlining() { + // On 32-bit ARM we do not insert nops for inlined Smi code. + UNREACHABLE(); +} + +#ifdef DEBUG + +// Checks whether the code between start_pc and end_pc is a no-op. +bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc, + int end_pc) { + return false; +} + +#endif // DEBUG + +#undef __ +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/arm/instruction-codes-arm.h nodejs-0.11.15/deps/v8/src/compiler/arm/instruction-codes-arm.h --- nodejs-0.11.13/deps/v8/src/compiler/arm/instruction-codes-arm.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/arm/instruction-codes-arm.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,86 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_ +#define V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_ + +namespace v8 { +namespace internal { +namespace compiler { + +// ARM-specific opcodes that specify which assembly sequence to emit. +// Most opcodes specify a single instruction. +#define TARGET_ARCH_OPCODE_LIST(V) \ + V(ArmAdd) \ + V(ArmAnd) \ + V(ArmBic) \ + V(ArmCmp) \ + V(ArmCmn) \ + V(ArmTst) \ + V(ArmTeq) \ + V(ArmOrr) \ + V(ArmEor) \ + V(ArmSub) \ + V(ArmRsb) \ + V(ArmMul) \ + V(ArmMla) \ + V(ArmMls) \ + V(ArmSdiv) \ + V(ArmUdiv) \ + V(ArmMov) \ + V(ArmMvn) \ + V(ArmBfc) \ + V(ArmUbfx) \ + V(ArmCallCodeObject) \ + V(ArmCallJSFunction) \ + V(ArmCallAddress) \ + V(ArmPush) \ + V(ArmDrop) \ + V(ArmVcmpF64) \ + V(ArmVaddF64) \ + V(ArmVsubF64) \ + V(ArmVmulF64) \ + V(ArmVmlaF64) \ + V(ArmVmlsF64) \ + V(ArmVdivF64) \ + V(ArmVmodF64) \ + V(ArmVnegF64) \ + V(ArmVcvtF64S32) \ + V(ArmVcvtF64U32) \ + V(ArmVcvtS32F64) \ + V(ArmVcvtU32F64) \ + V(ArmFloat64Load) \ + V(ArmFloat64Store) \ + V(ArmLoadWord8) \ + V(ArmStoreWord8) \ + V(ArmLoadWord16) \ + V(ArmStoreWord16) \ + V(ArmLoadWord32) \ + V(ArmStoreWord32) \ + V(ArmStoreWriteBarrier) + + +// Addressing modes represent the "shape" of inputs to an instruction. +// Many instructions support multiple addressing modes. Addressing modes +// are encoded into the InstructionCode of the instruction and tell the +// code generator after register allocation which assembler method to call. +#define TARGET_ADDRESSING_MODE_LIST(V) \ + V(Offset_RI) /* [%r0 + K] */ \ + V(Offset_RR) /* [%r0 + %r1] */ \ + V(Operand2_I) /* K */ \ + V(Operand2_R) /* %r0 */ \ + V(Operand2_R_ASR_I) /* %r0 ASR K */ \ + V(Operand2_R_LSL_I) /* %r0 LSL K */ \ + V(Operand2_R_LSR_I) /* %r0 LSR K */ \ + V(Operand2_R_ROR_I) /* %r0 ROR K */ \ + V(Operand2_R_ASR_R) /* %r0 ASR %r1 */ \ + V(Operand2_R_LSL_R) /* %r0 LSL %r1 */ \ + V(Operand2_R_LSR_R) /* %r0 LSR %r1 */ \ + V(Operand2_R_ROR_R) /* %r0 ROR %r1 */ + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/arm/instruction-selector-arm.cc nodejs-0.11.15/deps/v8/src/compiler/arm/instruction-selector-arm.cc --- nodejs-0.11.13/deps/v8/src/compiler/arm/instruction-selector-arm.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/arm/instruction-selector-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,943 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/instruction-selector-impl.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler-intrinsics.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Adds Arm-specific methods for generating InstructionOperands. +class ArmOperandGenerator V8_FINAL : public OperandGenerator { + public: + explicit ArmOperandGenerator(InstructionSelector* selector) + : OperandGenerator(selector) {} + + InstructionOperand* UseOperand(Node* node, InstructionCode opcode) { + if (CanBeImmediate(node, opcode)) { + return UseImmediate(node); + } + return UseRegister(node); + } + + bool CanBeImmediate(Node* node, InstructionCode opcode) { + int32_t value; + switch (node->opcode()) { + case IrOpcode::kInt32Constant: + case IrOpcode::kNumberConstant: + value = ValueOf<int32_t>(node->op()); + break; + default: + return false; + } + switch (ArchOpcodeField::decode(opcode)) { + case kArmAnd: + case kArmMov: + case kArmMvn: + case kArmBic: + return ImmediateFitsAddrMode1Instruction(value) || + ImmediateFitsAddrMode1Instruction(~value); + + case kArmAdd: + case kArmSub: + case kArmCmp: + case kArmCmn: + return ImmediateFitsAddrMode1Instruction(value) || + ImmediateFitsAddrMode1Instruction(-value); + + case kArmTst: + case kArmTeq: + case kArmOrr: + case kArmEor: + case kArmRsb: + return ImmediateFitsAddrMode1Instruction(value); + + case kArmFloat64Load: + case kArmFloat64Store: + return value >= -1020 && value <= 1020 && (value % 4) == 0; + + case kArmLoadWord8: + case kArmStoreWord8: + case kArmLoadWord32: + case kArmStoreWord32: + case kArmStoreWriteBarrier: + return value >= -4095 && value <= 4095; + + case kArmLoadWord16: + case kArmStoreWord16: + return value >= -255 && value <= 255; + + case kArchJmp: + case kArchNop: + case kArchRet: + case kArchDeoptimize: + case kArmMul: + case kArmMla: + case kArmMls: + case kArmSdiv: + case kArmUdiv: + case kArmBfc: + case kArmUbfx: + case kArmCallCodeObject: + case kArmCallJSFunction: + case kArmCallAddress: + case kArmPush: + case kArmDrop: + case kArmVcmpF64: + case kArmVaddF64: + case kArmVsubF64: + case kArmVmulF64: + case kArmVmlaF64: + case kArmVmlsF64: + case kArmVdivF64: + case kArmVmodF64: + case kArmVnegF64: + case kArmVcvtF64S32: + case kArmVcvtF64U32: + case kArmVcvtS32F64: + case kArmVcvtU32F64: + return false; + } + UNREACHABLE(); + return false; + } + + private: + bool ImmediateFitsAddrMode1Instruction(int32_t imm) const { + return Assembler::ImmediateFitsAddrMode1Instruction(imm); + } +}; + + +static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + ArmOperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsDoubleRegister(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1))); +} + + +static bool TryMatchROR(InstructionSelector* selector, + InstructionCode* opcode_return, Node* node, + InstructionOperand** value_return, + InstructionOperand** shift_return) { + ArmOperandGenerator g(selector); + if (node->opcode() != IrOpcode::kWord32Or) return false; + Int32BinopMatcher m(node); + Node* shl = m.left().node(); + Node* shr = m.right().node(); + if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) { + std::swap(shl, shr); + } else if (!m.left().IsWord32Shl() || !m.right().IsWord32Shr()) { + return false; + } + Int32BinopMatcher mshr(shr); + Int32BinopMatcher mshl(shl); + Node* value = mshr.left().node(); + if (value != mshl.left().node()) return false; + Node* shift = mshr.right().node(); + Int32Matcher mshift(shift); + if (mshift.IsInRange(1, 31) && mshl.right().Is(32 - mshift.Value())) { + *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_I); + *value_return = g.UseRegister(value); + *shift_return = g.UseImmediate(shift); + return true; + } + if (mshl.right().IsInt32Sub()) { + Int32BinopMatcher mshlright(mshl.right().node()); + if (!mshlright.left().Is(32)) return false; + if (mshlright.right().node() != shift) return false; + *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_R); + *value_return = g.UseRegister(value); + *shift_return = g.UseRegister(shift); + return true; + } + return false; +} + + +static inline bool TryMatchASR(InstructionSelector* selector, + InstructionCode* opcode_return, Node* node, + InstructionOperand** value_return, + InstructionOperand** shift_return) { + ArmOperandGenerator g(selector); + if (node->opcode() != IrOpcode::kWord32Sar) return false; + Int32BinopMatcher m(node); + *value_return = g.UseRegister(m.left().node()); + if (m.right().IsInRange(1, 32)) { + *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_I); + *shift_return = g.UseImmediate(m.right().node()); + } else { + *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_R); + *shift_return = g.UseRegister(m.right().node()); + } + return true; +} + + +static inline bool TryMatchLSL(InstructionSelector* selector, + InstructionCode* opcode_return, Node* node, + InstructionOperand** value_return, + InstructionOperand** shift_return) { + ArmOperandGenerator g(selector); + if (node->opcode() != IrOpcode::kWord32Shl) return false; + Int32BinopMatcher m(node); + *value_return = g.UseRegister(m.left().node()); + if (m.right().IsInRange(0, 31)) { + *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_I); + *shift_return = g.UseImmediate(m.right().node()); + } else { + *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_R); + *shift_return = g.UseRegister(m.right().node()); + } + return true; +} + + +static inline bool TryMatchLSR(InstructionSelector* selector, + InstructionCode* opcode_return, Node* node, + InstructionOperand** value_return, + InstructionOperand** shift_return) { + ArmOperandGenerator g(selector); + if (node->opcode() != IrOpcode::kWord32Shr) return false; + Int32BinopMatcher m(node); + *value_return = g.UseRegister(m.left().node()); + if (m.right().IsInRange(1, 32)) { + *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_I); + *shift_return = g.UseImmediate(m.right().node()); + } else { + *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_R); + *shift_return = g.UseRegister(m.right().node()); + } + return true; +} + + +static inline bool TryMatchShift(InstructionSelector* selector, + InstructionCode* opcode_return, Node* node, + InstructionOperand** value_return, + InstructionOperand** shift_return) { + return ( + TryMatchASR(selector, opcode_return, node, value_return, shift_return) || + TryMatchLSL(selector, opcode_return, node, value_return, shift_return) || + TryMatchLSR(selector, opcode_return, node, value_return, shift_return) || + TryMatchROR(selector, opcode_return, node, value_return, shift_return)); +} + + +static inline bool TryMatchImmediateOrShift(InstructionSelector* selector, + InstructionCode* opcode_return, + Node* node, + size_t* input_count_return, + InstructionOperand** inputs) { + ArmOperandGenerator g(selector); + if (g.CanBeImmediate(node, *opcode_return)) { + *opcode_return |= AddressingModeField::encode(kMode_Operand2_I); + inputs[0] = g.UseImmediate(node); + *input_count_return = 1; + return true; + } + if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) { + *input_count_return = 2; + return true; + } + return false; +} + + +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, InstructionCode reverse_opcode, + FlagsContinuation* cont) { + ArmOperandGenerator g(selector); + Int32BinopMatcher m(node); + InstructionOperand* inputs[5]; + size_t input_count = 0; + InstructionOperand* outputs[2]; + size_t output_count = 0; + + if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(), + &input_count, &inputs[1])) { + inputs[0] = g.UseRegister(m.left().node()); + input_count++; + } else if (TryMatchImmediateOrShift(selector, &reverse_opcode, + m.left().node(), &input_count, + &inputs[1])) { + inputs[0] = g.UseRegister(m.right().node()); + opcode = reverse_opcode; + input_count++; + } else { + opcode |= AddressingModeField::encode(kMode_Operand2_R); + inputs[input_count++] = g.UseRegister(m.left().node()); + inputs[input_count++] = g.UseRegister(m.right().node()); + } + + if (cont->IsBranch()) { + inputs[input_count++] = g.Label(cont->true_block()); + inputs[input_count++] = g.Label(cont->false_block()); + } + + outputs[output_count++] = g.DefineAsRegister(node); + if (cont->IsSet()) { + outputs[output_count++] = g.DefineAsRegister(cont->result()); + } + + DCHECK_NE(0, input_count); + DCHECK_NE(0, output_count); + DCHECK_GE(ARRAY_SIZE(inputs), input_count); + DCHECK_GE(ARRAY_SIZE(outputs), output_count); + DCHECK_NE(kMode_None, AddressingModeField::decode(opcode)); + + Instruction* instr = selector->Emit(cont->Encode(opcode), output_count, + outputs, input_count, inputs); + if (cont->IsBranch()) instr->MarkAsControl(); +} + + +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, InstructionCode reverse_opcode) { + FlagsContinuation cont; + VisitBinop(selector, node, opcode, reverse_opcode, &cont); +} + + +void InstructionSelector::VisitLoad(Node* node) { + MachineType rep = OpParameter<MachineType>(node); + ArmOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + InstructionOperand* result = rep == kMachineFloat64 + ? g.DefineAsDoubleRegister(node) + : g.DefineAsRegister(node); + + ArchOpcode opcode; + switch (rep) { + case kMachineFloat64: + opcode = kArmFloat64Load; + break; + case kMachineWord8: + opcode = kArmLoadWord8; + break; + case kMachineWord16: + opcode = kArmLoadWord16; + break; + case kMachineTagged: // Fall through. + case kMachineWord32: + opcode = kArmLoadWord32; + break; + default: + UNREACHABLE(); + return; + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result, + g.UseRegister(base), g.UseImmediate(index)); + } else if (g.CanBeImmediate(base, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result, + g.UseRegister(index), g.UseImmediate(base)); + } else { + Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), result, + g.UseRegister(base), g.UseRegister(index)); + } +} + + +void InstructionSelector::VisitStore(Node* node) { + ArmOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); + MachineType rep = store_rep.rep; + if (store_rep.write_barrier_kind == kFullWriteBarrier) { + DCHECK(rep == kMachineTagged); + // TODO(dcarney): refactor RecordWrite function to take temp registers + // and pass them here instead of using fixed regs + // TODO(dcarney): handle immediate indices. + InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)}; + Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4), + g.UseFixed(index, r5), g.UseFixed(value, r6), ARRAY_SIZE(temps), + temps); + return; + } + DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind); + InstructionOperand* val = rep == kMachineFloat64 ? g.UseDoubleRegister(value) + : g.UseRegister(value); + + ArchOpcode opcode; + switch (rep) { + case kMachineFloat64: + opcode = kArmFloat64Store; + break; + case kMachineWord8: + opcode = kArmStoreWord8; + break; + case kMachineWord16: + opcode = kArmStoreWord16; + break; + case kMachineTagged: // Fall through. + case kMachineWord32: + opcode = kArmStoreWord32; + break; + default: + UNREACHABLE(); + return; + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL, + g.UseRegister(base), g.UseImmediate(index), val); + } else if (g.CanBeImmediate(base, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL, + g.UseRegister(index), g.UseImmediate(base), val); + } else { + Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL, + g.UseRegister(base), g.UseRegister(index), val); + } +} + + +static inline void EmitBic(InstructionSelector* selector, Node* node, + Node* left, Node* right) { + ArmOperandGenerator g(selector); + InstructionCode opcode = kArmBic; + InstructionOperand* value_operand; + InstructionOperand* shift_operand; + if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) { + selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left), + value_operand, shift_operand); + return; + } + selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R), + g.DefineAsRegister(node), g.UseRegister(left), + g.UseRegister(right)); +} + + +void InstructionSelector::VisitWord32And(Node* node) { + ArmOperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) { + Int32BinopMatcher mleft(m.left().node()); + if (mleft.right().Is(-1)) { + EmitBic(this, node, m.right().node(), mleft.left().node()); + return; + } + } + if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) { + Int32BinopMatcher mright(m.right().node()); + if (mright.right().Is(-1)) { + EmitBic(this, node, m.left().node(), mright.left().node()); + return; + } + } + if (IsSupported(ARMv7) && m.right().HasValue()) { + uint32_t value = m.right().Value(); + uint32_t width = CompilerIntrinsics::CountSetBits(value); + uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value); + if (width != 0 && msb + width == 32) { + DCHECK_EQ(0, CompilerIntrinsics::CountTrailingZeros(value)); + if (m.left().IsWord32Shr()) { + Int32BinopMatcher mleft(m.left().node()); + if (mleft.right().IsInRange(0, 31)) { + Emit(kArmUbfx, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), + g.UseImmediate(mleft.right().node()), g.TempImmediate(width)); + return; + } + } + Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.TempImmediate(0), g.TempImmediate(width)); + return; + } + // Try to interpret this AND as BFC. + width = 32 - width; + msb = CompilerIntrinsics::CountLeadingZeros(~value); + uint32_t lsb = CompilerIntrinsics::CountTrailingZeros(~value); + if (msb + width + lsb == 32) { + Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.TempImmediate(lsb), g.TempImmediate(width)); + return; + } + } + VisitBinop(this, node, kArmAnd, kArmAnd); +} + + +void InstructionSelector::VisitWord32Or(Node* node) { + ArmOperandGenerator g(this); + InstructionCode opcode = kArmMov; + InstructionOperand* value_operand; + InstructionOperand* shift_operand; + if (TryMatchROR(this, &opcode, node, &value_operand, &shift_operand)) { + Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand); + return; + } + VisitBinop(this, node, kArmOrr, kArmOrr); +} + + +void InstructionSelector::VisitWord32Xor(Node* node) { + ArmOperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.right().Is(-1)) { + InstructionCode opcode = kArmMvn; + InstructionOperand* value_operand; + InstructionOperand* shift_operand; + if (TryMatchShift(this, &opcode, m.left().node(), &value_operand, + &shift_operand)) { + Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand); + return; + } + Emit(opcode | AddressingModeField::encode(kMode_Operand2_R), + g.DefineAsRegister(node), g.UseRegister(m.left().node())); + return; + } + VisitBinop(this, node, kArmEor, kArmEor); +} + + +template <typename TryMatchShift> +static inline void VisitShift(InstructionSelector* selector, Node* node, + TryMatchShift try_match_shift) { + ArmOperandGenerator g(selector); + InstructionCode opcode = kArmMov; + InstructionOperand* value_operand = NULL; + InstructionOperand* shift_operand = NULL; + CHECK( + try_match_shift(selector, &opcode, node, &value_operand, &shift_operand)); + selector->Emit(opcode, g.DefineAsRegister(node), value_operand, + shift_operand); +} + + +void InstructionSelector::VisitWord32Shl(Node* node) { + VisitShift(this, node, TryMatchLSL); +} + + +void InstructionSelector::VisitWord32Shr(Node* node) { + ArmOperandGenerator g(this); + Int32BinopMatcher m(node); + if (IsSupported(ARMv7) && m.left().IsWord32And() && + m.right().IsInRange(0, 31)) { + int32_t lsb = m.right().Value(); + Int32BinopMatcher mleft(m.left().node()); + if (mleft.right().HasValue()) { + uint32_t value = (mleft.right().Value() >> lsb) << lsb; + uint32_t width = CompilerIntrinsics::CountSetBits(value); + uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value); + if (msb + width + lsb == 32) { + DCHECK_EQ(lsb, CompilerIntrinsics::CountTrailingZeros(value)); + Emit(kArmUbfx, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), + g.TempImmediate(width)); + return; + } + } + } + VisitShift(this, node, TryMatchLSR); +} + + +void InstructionSelector::VisitWord32Sar(Node* node) { + VisitShift(this, node, TryMatchASR); +} + + +void InstructionSelector::VisitInt32Add(Node* node) { + ArmOperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) { + Int32BinopMatcher mleft(m.left().node()); + Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node())); + return; + } + if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) { + Int32BinopMatcher mright(m.right().node()); + Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node()), g.UseRegister(m.left().node())); + return; + } + VisitBinop(this, node, kArmAdd, kArmAdd); +} + + +void InstructionSelector::VisitInt32Sub(Node* node) { + ArmOperandGenerator g(this); + Int32BinopMatcher m(node); + if (IsSupported(MLS) && m.right().IsInt32Mul() && + CanCover(node, m.right().node())) { + Int32BinopMatcher mright(m.right().node()); + Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node()), g.UseRegister(m.left().node())); + return; + } + VisitBinop(this, node, kArmSub, kArmRsb); +} + + +void InstructionSelector::VisitInt32Mul(Node* node) { + ArmOperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.right().HasValue() && m.right().Value() > 0) { + int32_t value = m.right().Value(); + if (IsPowerOf2(value - 1)) { + Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I), + g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.left().node()), + g.TempImmediate(WhichPowerOf2(value - 1))); + return; + } + if (value < kMaxInt && IsPowerOf2(value + 1)) { + Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I), + g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.left().node()), + g.TempImmediate(WhichPowerOf2(value + 1))); + return; + } + } + Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + + +static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode, + ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode, + InstructionOperand* result_operand, + InstructionOperand* left_operand, + InstructionOperand* right_operand) { + ArmOperandGenerator g(selector); + if (selector->IsSupported(SUDIV)) { + selector->Emit(div_opcode, result_operand, left_operand, right_operand); + return; + } + InstructionOperand* left_double_operand = g.TempDoubleRegister(); + InstructionOperand* right_double_operand = g.TempDoubleRegister(); + InstructionOperand* result_double_operand = g.TempDoubleRegister(); + selector->Emit(f64i32_opcode, left_double_operand, left_operand); + selector->Emit(f64i32_opcode, right_double_operand, right_operand); + selector->Emit(kArmVdivF64, result_double_operand, left_double_operand, + right_double_operand); + selector->Emit(i32f64_opcode, result_operand, result_double_operand); +} + + +static void VisitDiv(InstructionSelector* selector, Node* node, + ArchOpcode div_opcode, ArchOpcode f64i32_opcode, + ArchOpcode i32f64_opcode) { + ArmOperandGenerator g(selector); + Int32BinopMatcher m(node); + EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, + g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + + +void InstructionSelector::VisitInt32Div(Node* node) { + VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64); +} + + +void InstructionSelector::VisitInt32UDiv(Node* node) { + VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64); +} + + +static void VisitMod(InstructionSelector* selector, Node* node, + ArchOpcode div_opcode, ArchOpcode f64i32_opcode, + ArchOpcode i32f64_opcode) { + ArmOperandGenerator g(selector); + Int32BinopMatcher m(node); + InstructionOperand* div_operand = g.TempRegister(); + InstructionOperand* result_operand = g.DefineAsRegister(node); + InstructionOperand* left_operand = g.UseRegister(m.left().node()); + InstructionOperand* right_operand = g.UseRegister(m.right().node()); + EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand, + left_operand, right_operand); + if (selector->IsSupported(MLS)) { + selector->Emit(kArmMls, result_operand, div_operand, right_operand, + left_operand); + return; + } + InstructionOperand* mul_operand = g.TempRegister(); + selector->Emit(kArmMul, mul_operand, div_operand, right_operand); + selector->Emit(kArmSub, result_operand, left_operand, mul_operand); +} + + +void InstructionSelector::VisitInt32Mod(Node* node) { + VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64); +} + + +void InstructionSelector::VisitInt32UMod(Node* node) { + VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64); +} + + +void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { + ArmOperandGenerator g(this); + Emit(kArmVcvtF64S32, g.DefineAsDoubleRegister(node), + g.UseRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { + ArmOperandGenerator g(this); + Emit(kArmVcvtF64U32, g.DefineAsDoubleRegister(node), + g.UseRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { + ArmOperandGenerator g(this); + Emit(kArmVcvtS32F64, g.DefineAsRegister(node), + g.UseDoubleRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { + ArmOperandGenerator g(this); + Emit(kArmVcvtU32F64, g.DefineAsRegister(node), + g.UseDoubleRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitFloat64Add(Node* node) { + ArmOperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { + Int32BinopMatcher mleft(m.left().node()); + Emit(kArmVmlaF64, g.DefineSameAsFirst(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { + Int32BinopMatcher mright(m.right().node()); + Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + VisitRRRFloat64(this, kArmVaddF64, node); +} + + +void InstructionSelector::VisitFloat64Sub(Node* node) { + ArmOperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { + Int32BinopMatcher mright(m.right().node()); + Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + VisitRRRFloat64(this, kArmVsubF64, node); +} + + +void InstructionSelector::VisitFloat64Mul(Node* node) { + ArmOperandGenerator g(this); + Float64BinopMatcher m(node); + if (m.right().Is(-1.0)) { + Emit(kArmVnegF64, g.DefineAsRegister(node), + g.UseDoubleRegister(m.left().node())); + } else { + VisitRRRFloat64(this, kArmVmulF64, node); + } +} + + +void InstructionSelector::VisitFloat64Div(Node* node) { + VisitRRRFloat64(this, kArmVdivF64, node); +} + + +void InstructionSelector::VisitFloat64Mod(Node* node) { + ArmOperandGenerator g(this); + Emit(kArmVmodF64, g.DefineAsFixedDouble(node, d0), + g.UseFixedDouble(node->InputAt(0), d0), + g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall(); +} + + +void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, + BasicBlock* deoptimization) { + ArmOperandGenerator g(this); + CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call); + CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here? + + // Compute InstructionOperands for inputs and outputs. + // TODO(turbofan): on ARM64 it's probably better to use the code object in a + // register if there are multiple uses of it. Improve constant pool and the + // heuristics in the register allocator for where to emit constants. + InitializeCallBuffer(call, &buffer, true, false, continuation, + deoptimization); + + // TODO(dcarney): might be possible to use claim/poke instead + // Push any stack arguments. + for (int i = buffer.pushed_count - 1; i >= 0; --i) { + Node* input = buffer.pushed_nodes[i]; + Emit(kArmPush, NULL, g.UseRegister(input)); + } + + // Select the appropriate opcode based on the call type. + InstructionCode opcode; + switch (descriptor->kind()) { + case CallDescriptor::kCallCodeObject: { + bool lazy_deopt = descriptor->CanLazilyDeoptimize(); + opcode = kArmCallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0); + break; + } + case CallDescriptor::kCallAddress: + opcode = kArmCallAddress; + break; + case CallDescriptor::kCallJSFunction: + opcode = kArmCallJSFunction; + break; + default: + UNREACHABLE(); + return; + } + + // Emit the call instruction. + Instruction* call_instr = + Emit(opcode, buffer.output_count, buffer.outputs, + buffer.fixed_and_control_count(), buffer.fixed_and_control_args); + + call_instr->MarkAsCall(); + if (deoptimization != NULL) { + DCHECK(continuation != NULL); + call_instr->MarkAsControl(); + } + + // Caller clean up of stack for C-style calls. + if (descriptor->kind() == CallDescriptor::kCallAddress && + buffer.pushed_count > 0) { + DCHECK(deoptimization == NULL && continuation == NULL); + Emit(kArmDrop | MiscField::encode(buffer.pushed_count), NULL); + } +} + + +void InstructionSelector::VisitInt32AddWithOverflow(Node* node, + FlagsContinuation* cont) { + VisitBinop(this, node, kArmAdd, kArmAdd, cont); +} + + +void InstructionSelector::VisitInt32SubWithOverflow(Node* node, + FlagsContinuation* cont) { + VisitBinop(this, node, kArmSub, kArmRsb, cont); +} + + +// Shared routine for multiple compare operations. +static void VisitWordCompare(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont, + bool commutative) { + ArmOperandGenerator g(selector); + Int32BinopMatcher m(node); + InstructionOperand* inputs[5]; + size_t input_count = 0; + InstructionOperand* outputs[1]; + size_t output_count = 0; + + if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(), + &input_count, &inputs[1])) { + inputs[0] = g.UseRegister(m.left().node()); + input_count++; + } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(), + &input_count, &inputs[1])) { + if (!commutative) cont->Commute(); + inputs[0] = g.UseRegister(m.right().node()); + input_count++; + } else { + opcode |= AddressingModeField::encode(kMode_Operand2_R); + inputs[input_count++] = g.UseRegister(m.left().node()); + inputs[input_count++] = g.UseRegister(m.right().node()); + } + + if (cont->IsBranch()) { + inputs[input_count++] = g.Label(cont->true_block()); + inputs[input_count++] = g.Label(cont->false_block()); + } else { + DCHECK(cont->IsSet()); + outputs[output_count++] = g.DefineAsRegister(cont->result()); + } + + DCHECK_NE(0, input_count); + DCHECK_GE(ARRAY_SIZE(inputs), input_count); + DCHECK_GE(ARRAY_SIZE(outputs), output_count); + + Instruction* instr = selector->Emit(cont->Encode(opcode), output_count, + outputs, input_count, inputs); + if (cont->IsBranch()) instr->MarkAsControl(); +} + + +void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) { + switch (node->opcode()) { + case IrOpcode::kInt32Add: + return VisitWordCompare(this, node, kArmCmn, cont, true); + case IrOpcode::kInt32Sub: + return VisitWordCompare(this, node, kArmCmp, cont, false); + case IrOpcode::kWord32And: + return VisitWordCompare(this, node, kArmTst, cont, true); + case IrOpcode::kWord32Or: + return VisitBinop(this, node, kArmOrr, kArmOrr, cont); + case IrOpcode::kWord32Xor: + return VisitWordCompare(this, node, kArmTeq, cont, true); + default: + break; + } + + ArmOperandGenerator g(this); + InstructionCode opcode = + cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R); + if (cont->IsBranch()) { + Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node), + g.Label(cont->true_block()), + g.Label(cont->false_block()))->MarkAsControl(); + } else { + Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node), + g.UseRegister(node)); + } +} + + +void InstructionSelector::VisitWord32Compare(Node* node, + FlagsContinuation* cont) { + VisitWordCompare(this, node, kArmCmp, cont, false); +} + + +void InstructionSelector::VisitFloat64Compare(Node* node, + FlagsContinuation* cont) { + ArmOperandGenerator g(this); + Float64BinopMatcher m(node); + if (cont->IsBranch()) { + Emit(cont->Encode(kArmVcmpF64), NULL, g.UseDoubleRegister(m.left().node()), + g.UseDoubleRegister(m.right().node()), g.Label(cont->true_block()), + g.Label(cont->false_block()))->MarkAsControl(); + } else { + DCHECK(cont->IsSet()); + Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()), + g.UseDoubleRegister(m.left().node()), + g.UseDoubleRegister(m.right().node())); + } +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/arm/linkage-arm.cc nodejs-0.11.15/deps/v8/src/compiler/arm/linkage-arm.cc --- nodejs-0.11.13/deps/v8/src/compiler/arm/linkage-arm.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/arm/linkage-arm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,67 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/assembler.h" +#include "src/code-stubs.h" +#include "src/compiler/linkage.h" +#include "src/compiler/linkage-impl.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { +namespace compiler { + +struct LinkageHelperTraits { + static Register ReturnValueReg() { return r0; } + static Register ReturnValue2Reg() { return r1; } + static Register JSCallFunctionReg() { return r1; } + static Register ContextReg() { return cp; } + static Register RuntimeCallFunctionReg() { return r1; } + static Register RuntimeCallArgCountReg() { return r0; } + static RegList CCalleeSaveRegisters() { + return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() | + r10.bit(); + } + static Register CRegisterParameter(int i) { + static Register register_parameters[] = {r0, r1, r2, r3}; + return register_parameters[i]; + } + static int CRegisterParametersLength() { return 4; } +}; + + +CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) { + return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>( + zone, parameter_count); +} + + +CallDescriptor* Linkage::GetRuntimeCallDescriptor( + Runtime::FunctionId function, int parameter_count, + Operator::Property properties, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) { + return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>( + zone, function, parameter_count, properties, can_deoptimize); +} + + +CallDescriptor* Linkage::GetStubCallDescriptor( + CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) { + return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>( + zone, descriptor, stack_parameter_count, can_deoptimize); +} + + +CallDescriptor* Linkage::GetSimplifiedCDescriptor( + Zone* zone, int num_params, MachineType return_type, + const MachineType* param_types) { + return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>( + zone, num_params, return_type, param_types); +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/arm64/code-generator-arm64.cc nodejs-0.11.15/deps/v8/src/compiler/arm64/code-generator-arm64.cc --- nodejs-0.11.13/deps/v8/src/compiler/arm64/code-generator-arm64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/arm64/code-generator-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,854 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/code-generator.h" + +#include "src/arm64/macro-assembler-arm64.h" +#include "src/compiler/code-generator-impl.h" +#include "src/compiler/gap-resolver.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties-inl.h" +#include "src/scopes.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#define __ masm()-> + + +// Adds Arm64-specific methods to convert InstructionOperands. +class Arm64OperandConverter V8_FINAL : public InstructionOperandConverter { + public: + Arm64OperandConverter(CodeGenerator* gen, Instruction* instr) + : InstructionOperandConverter(gen, instr) {} + + Register InputRegister32(int index) { + return ToRegister(instr_->InputAt(index)).W(); + } + + Register InputRegister64(int index) { return InputRegister(index); } + + Operand InputImmediate(int index) { + return ToImmediate(instr_->InputAt(index)); + } + + Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); } + + Operand InputOperand64(int index) { return InputOperand(index); } + + Operand InputOperand32(int index) { + return ToOperand32(instr_->InputAt(index)); + } + + Register OutputRegister64() { return OutputRegister(); } + + Register OutputRegister32() { return ToRegister(instr_->Output()).W(); } + + MemOperand MemoryOperand(int* first_index) { + const int index = *first_index; + switch (AddressingModeField::decode(instr_->opcode())) { + case kMode_None: + break; + case kMode_MRI: + *first_index += 2; + return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); + case kMode_MRR: + *first_index += 2; + return MemOperand(InputRegister(index + 0), InputRegister(index + 1), + SXTW); + } + UNREACHABLE(); + return MemOperand(no_reg); + } + + MemOperand MemoryOperand() { + int index = 0; + return MemoryOperand(&index); + } + + Operand ToOperand(InstructionOperand* op) { + if (op->IsRegister()) { + return Operand(ToRegister(op)); + } + return ToImmediate(op); + } + + Operand ToOperand32(InstructionOperand* op) { + if (op->IsRegister()) { + return Operand(ToRegister(op).W()); + } + return ToImmediate(op); + } + + Operand ToImmediate(InstructionOperand* operand) { + Constant constant = ToConstant(operand); + switch (constant.type()) { + case Constant::kInt32: + return Operand(constant.ToInt32()); + case Constant::kInt64: + return Operand(constant.ToInt64()); + case Constant::kFloat64: + return Operand( + isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED)); + case Constant::kExternalReference: + return Operand(constant.ToExternalReference()); + case Constant::kHeapObject: + return Operand(constant.ToHeapObject()); + } + UNREACHABLE(); + return Operand(-1); + } + + MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const { + DCHECK(op != NULL); + DCHECK(!op->IsRegister()); + DCHECK(!op->IsDoubleRegister()); + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); + // The linkage computes where all spill slots are located. + FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0); + return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp, + offset.offset()); + } +}; + + +#define ASSEMBLE_SHIFT(asm_instr, width) \ + do { \ + if (instr->InputAt(1)->IsRegister()) { \ + __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \ + i.InputRegister##width(1)); \ + } else { \ + int64_t imm = i.InputOperand##width(1).immediate().value(); \ + __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \ + } \ + } while (0); + + +// Assembles an instruction after register allocation, producing machine code. +void CodeGenerator::AssembleArchInstruction(Instruction* instr) { + Arm64OperandConverter i(this, instr); + InstructionCode opcode = instr->opcode(); + switch (ArchOpcodeField::decode(opcode)) { + case kArchJmp: + __ B(code_->GetLabel(i.InputBlock(0))); + break; + case kArchNop: + // don't emit code for nops. + break; + case kArchRet: + AssembleReturn(); + break; + case kArchDeoptimize: { + int deoptimization_id = MiscField::decode(instr->opcode()); + BuildTranslation(instr, deoptimization_id); + + Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( + isolate(), deoptimization_id, Deoptimizer::LAZY); + __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY); + break; + } + case kArm64Add: + __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kArm64Add32: + if (FlagsModeField::decode(opcode) != kFlags_none) { + __ Adds(i.OutputRegister32(), i.InputRegister32(0), + i.InputOperand32(1)); + } else { + __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); + } + break; + case kArm64And: + __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kArm64And32: + __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); + break; + case kArm64Mul: + __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + break; + case kArm64Mul32: + __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); + break; + case kArm64Idiv: + __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + break; + case kArm64Idiv32: + __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); + break; + case kArm64Udiv: + __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + break; + case kArm64Udiv32: + __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); + break; + case kArm64Imod: { + UseScratchRegisterScope scope(masm()); + Register temp = scope.AcquireX(); + __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1)); + __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); + break; + } + case kArm64Imod32: { + UseScratchRegisterScope scope(masm()); + Register temp = scope.AcquireW(); + __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1)); + __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), + i.InputRegister32(0)); + break; + } + case kArm64Umod: { + UseScratchRegisterScope scope(masm()); + Register temp = scope.AcquireX(); + __ Udiv(temp, i.InputRegister(0), i.InputRegister(1)); + __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); + break; + } + case kArm64Umod32: { + UseScratchRegisterScope scope(masm()); + Register temp = scope.AcquireW(); + __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1)); + __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), + i.InputRegister32(0)); + break; + } + // TODO(dcarney): use mvn instr?? + case kArm64Not: + __ Orn(i.OutputRegister(), xzr, i.InputOperand(0)); + break; + case kArm64Not32: + __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0)); + break; + case kArm64Neg: + __ Neg(i.OutputRegister(), i.InputOperand(0)); + break; + case kArm64Neg32: + __ Neg(i.OutputRegister32(), i.InputOperand32(0)); + break; + case kArm64Or: + __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kArm64Or32: + __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); + break; + case kArm64Xor: + __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kArm64Xor32: + __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); + break; + case kArm64Sub: + __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kArm64Sub32: + if (FlagsModeField::decode(opcode) != kFlags_none) { + __ Subs(i.OutputRegister32(), i.InputRegister32(0), + i.InputOperand32(1)); + } else { + __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); + } + break; + case kArm64Shl: + ASSEMBLE_SHIFT(Lsl, 64); + break; + case kArm64Shl32: + ASSEMBLE_SHIFT(Lsl, 32); + break; + case kArm64Shr: + ASSEMBLE_SHIFT(Lsr, 64); + break; + case kArm64Shr32: + ASSEMBLE_SHIFT(Lsr, 32); + break; + case kArm64Sar: + ASSEMBLE_SHIFT(Asr, 64); + break; + case kArm64Sar32: + ASSEMBLE_SHIFT(Asr, 32); + break; + case kArm64CallCodeObject: { + if (instr->InputAt(0)->IsImmediate()) { + Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); + __ Call(code, RelocInfo::CODE_TARGET); + RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + } else { + Register reg = i.InputRegister(0); + int entry = Code::kHeaderSize - kHeapObjectTag; + __ Ldr(reg, MemOperand(reg, entry)); + __ Call(reg); + RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + } + bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1); + if (lazy_deopt) { + RecordLazyDeoptimizationEntry(instr); + } + // Meaningless instruction for ICs to overwrite. + AddNopForSmiCodeInlining(); + break; + } + case kArm64CallJSFunction: { + Register func = i.InputRegister(0); + + // TODO(jarin) The load of the context should be separated from the call. + __ Ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset)); + __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); + __ Call(x10); + + RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + RecordLazyDeoptimizationEntry(instr); + break; + } + case kArm64CallAddress: { + DirectCEntryStub stub(isolate()); + stub.GenerateCall(masm(), i.InputRegister(0)); + break; + } + case kArm64Claim: { + int words = MiscField::decode(instr->opcode()); + __ Claim(words); + break; + } + case kArm64Poke: { + int slot = MiscField::decode(instr->opcode()); + Operand operand(slot * kPointerSize); + __ Poke(i.InputRegister(0), operand); + break; + } + case kArm64PokePairZero: { + // TODO(dcarney): test slot offset and register order. + int slot = MiscField::decode(instr->opcode()) - 1; + __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize); + break; + } + case kArm64PokePair: { + int slot = MiscField::decode(instr->opcode()) - 1; + __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize); + break; + } + case kArm64Drop: { + int words = MiscField::decode(instr->opcode()); + __ Drop(words); + break; + } + case kArm64Cmp: + __ Cmp(i.InputRegister(0), i.InputOperand(1)); + break; + case kArm64Cmp32: + __ Cmp(i.InputRegister32(0), i.InputOperand32(1)); + break; + case kArm64Tst: + __ Tst(i.InputRegister(0), i.InputOperand(1)); + break; + case kArm64Tst32: + __ Tst(i.InputRegister32(0), i.InputOperand32(1)); + break; + case kArm64Float64Cmp: + __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); + break; + case kArm64Float64Add: + __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kArm64Float64Sub: + __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kArm64Float64Mul: + __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kArm64Float64Div: + __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kArm64Float64Mod: { + // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc + FrameScope scope(masm(), StackFrame::MANUAL); + DCHECK(d0.is(i.InputDoubleRegister(0))); + DCHECK(d1.is(i.InputDoubleRegister(1))); + DCHECK(d0.is(i.OutputDoubleRegister())); + // TODO(dcarney): make sure this saves all relevant registers. + __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), + 0, 2); + break; + } + case kArm64Int32ToInt64: + __ Sxtw(i.OutputRegister(), i.InputRegister(0)); + break; + case kArm64Int64ToInt32: + if (!i.OutputRegister().is(i.InputRegister(0))) { + __ Mov(i.OutputRegister(), i.InputRegister(0)); + } + break; + case kArm64Float64ToInt32: + __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0)); + break; + case kArm64Float64ToUint32: + __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0)); + break; + case kArm64Int32ToFloat64: + __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0)); + break; + case kArm64Uint32ToFloat64: + __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0)); + break; + case kArm64LoadWord8: + __ Ldrb(i.OutputRegister(), i.MemoryOperand()); + break; + case kArm64StoreWord8: + __ Strb(i.InputRegister(2), i.MemoryOperand()); + break; + case kArm64LoadWord16: + __ Ldrh(i.OutputRegister(), i.MemoryOperand()); + break; + case kArm64StoreWord16: + __ Strh(i.InputRegister(2), i.MemoryOperand()); + break; + case kArm64LoadWord32: + __ Ldr(i.OutputRegister32(), i.MemoryOperand()); + break; + case kArm64StoreWord32: + __ Str(i.InputRegister32(2), i.MemoryOperand()); + break; + case kArm64LoadWord64: + __ Ldr(i.OutputRegister(), i.MemoryOperand()); + break; + case kArm64StoreWord64: + __ Str(i.InputRegister(2), i.MemoryOperand()); + break; + case kArm64Float64Load: + __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand()); + break; + case kArm64Float64Store: + __ Str(i.InputDoubleRegister(2), i.MemoryOperand()); + break; + case kArm64StoreWriteBarrier: { + Register object = i.InputRegister(0); + Register index = i.InputRegister(1); + Register value = i.InputRegister(2); + __ Add(index, object, Operand(index, SXTW)); + __ Str(value, MemOperand(index)); + SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters() + ? kSaveFPRegs + : kDontSaveFPRegs; + // TODO(dcarney): we shouldn't test write barriers from c calls. + LinkRegisterStatus lr_status = kLRHasNotBeenSaved; + UseScratchRegisterScope scope(masm()); + Register temp = no_reg; + if (csp.is(masm()->StackPointer())) { + temp = scope.AcquireX(); + lr_status = kLRHasBeenSaved; + __ Push(lr, temp); // Need to push a pair + } + __ RecordWrite(object, index, value, lr_status, mode); + if (csp.is(masm()->StackPointer())) { + __ Pop(temp, lr); + } + break; + } + } +} + + +// Assemble branches after this instruction. +void CodeGenerator::AssembleArchBranch(Instruction* instr, + FlagsCondition condition) { + Arm64OperandConverter i(this, instr); + Label done; + + // Emit a branch. The true and false targets are always the last two inputs + // to the instruction. + BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2); + BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1); + bool fallthru = IsNextInAssemblyOrder(fblock); + Label* tlabel = code()->GetLabel(tblock); + Label* flabel = fallthru ? &done : code()->GetLabel(fblock); + switch (condition) { + case kUnorderedEqual: + __ B(vs, flabel); + // Fall through. + case kEqual: + __ B(eq, tlabel); + break; + case kUnorderedNotEqual: + __ B(vs, tlabel); + // Fall through. + case kNotEqual: + __ B(ne, tlabel); + break; + case kSignedLessThan: + __ B(lt, tlabel); + break; + case kSignedGreaterThanOrEqual: + __ B(ge, tlabel); + break; + case kSignedLessThanOrEqual: + __ B(le, tlabel); + break; + case kSignedGreaterThan: + __ B(gt, tlabel); + break; + case kUnorderedLessThan: + __ B(vs, flabel); + // Fall through. + case kUnsignedLessThan: + __ B(lo, tlabel); + break; + case kUnorderedGreaterThanOrEqual: + __ B(vs, tlabel); + // Fall through. + case kUnsignedGreaterThanOrEqual: + __ B(hs, tlabel); + break; + case kUnorderedLessThanOrEqual: + __ B(vs, flabel); + // Fall through. + case kUnsignedLessThanOrEqual: + __ B(ls, tlabel); + break; + case kUnorderedGreaterThan: + __ B(vs, tlabel); + // Fall through. + case kUnsignedGreaterThan: + __ B(hi, tlabel); + break; + case kOverflow: + __ B(vs, tlabel); + break; + case kNotOverflow: + __ B(vc, tlabel); + break; + } + if (!fallthru) __ B(flabel); // no fallthru to flabel. + __ Bind(&done); +} + + +// Assemble boolean materializations after this instruction. +void CodeGenerator::AssembleArchBoolean(Instruction* instr, + FlagsCondition condition) { + Arm64OperandConverter i(this, instr); + Label done; + + // Materialize a full 64-bit 1 or 0 value. The result register is always the + // last output of the instruction. + Label check; + DCHECK_NE(0, instr->OutputCount()); + Register reg = i.OutputRegister(instr->OutputCount() - 1); + Condition cc = nv; + switch (condition) { + case kUnorderedEqual: + __ B(vc, &check); + __ Mov(reg, 0); + __ B(&done); + // Fall through. + case kEqual: + cc = eq; + break; + case kUnorderedNotEqual: + __ B(vc, &check); + __ Mov(reg, 1); + __ B(&done); + // Fall through. + case kNotEqual: + cc = ne; + break; + case kSignedLessThan: + cc = lt; + break; + case kSignedGreaterThanOrEqual: + cc = ge; + break; + case kSignedLessThanOrEqual: + cc = le; + break; + case kSignedGreaterThan: + cc = gt; + break; + case kUnorderedLessThan: + __ B(vc, &check); + __ Mov(reg, 0); + __ B(&done); + // Fall through. + case kUnsignedLessThan: + cc = lo; + break; + case kUnorderedGreaterThanOrEqual: + __ B(vc, &check); + __ Mov(reg, 1); + __ B(&done); + // Fall through. + case kUnsignedGreaterThanOrEqual: + cc = hs; + break; + case kUnorderedLessThanOrEqual: + __ B(vc, &check); + __ Mov(reg, 0); + __ B(&done); + // Fall through. + case kUnsignedLessThanOrEqual: + cc = ls; + break; + case kUnorderedGreaterThan: + __ B(vc, &check); + __ Mov(reg, 1); + __ B(&done); + // Fall through. + case kUnsignedGreaterThan: + cc = hi; + break; + case kOverflow: + cc = vs; + break; + case kNotOverflow: + cc = vc; + break; + } + __ bind(&check); + __ Cset(reg, cc); + __ Bind(&done); +} + + +// TODO(dcarney): increase stack slots in frame once before first use. +static int AlignedStackSlots(int stack_slots) { + if (stack_slots & 1) stack_slots++; + return stack_slots; +} + + +void CodeGenerator::AssemblePrologue() { + CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); + if (descriptor->kind() == CallDescriptor::kCallAddress) { + __ SetStackPointer(csp); + __ Push(lr, fp); + __ Mov(fp, csp); + // TODO(dcarney): correct callee saved registers. + __ PushCalleeSavedRegisters(); + frame()->SetRegisterSaveAreaSize(20 * kPointerSize); + } else if (descriptor->IsJSFunctionCall()) { + CompilationInfo* info = linkage()->info(); + __ SetStackPointer(jssp); + __ Prologue(info->IsCodePreAgingActive()); + frame()->SetRegisterSaveAreaSize( + StandardFrameConstants::kFixedFrameSizeFromFp); + + // Sloppy mode functions and builtins need to replace the receiver with the + // global proxy when called as functions (without an explicit receiver + // object). + // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC? + if (info->strict_mode() == SLOPPY && !info->is_native()) { + Label ok; + // +2 for return address and saved frame pointer. + int receiver_slot = info->scope()->num_parameters() + 2; + __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize)); + __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok); + __ Ldr(x10, GlobalObjectMemOperand()); + __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset)); + __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize)); + __ Bind(&ok); + } + + } else { + __ SetStackPointer(jssp); + __ StubPrologue(); + frame()->SetRegisterSaveAreaSize( + StandardFrameConstants::kFixedFrameSizeFromFp); + } + int stack_slots = frame()->GetSpillSlotCount(); + if (stack_slots > 0) { + Register sp = __ StackPointer(); + if (!sp.Is(csp)) { + __ Sub(sp, sp, stack_slots * kPointerSize); + } + __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize); + } +} + + +void CodeGenerator::AssembleReturn() { + CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); + if (descriptor->kind() == CallDescriptor::kCallAddress) { + if (frame()->GetRegisterSaveAreaSize() > 0) { + // Remove this frame's spill slots first. + int stack_slots = frame()->GetSpillSlotCount(); + if (stack_slots > 0) { + __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize); + } + // Restore registers. + // TODO(dcarney): correct callee saved registers. + __ PopCalleeSavedRegisters(); + } + __ Mov(csp, fp); + __ Pop(fp, lr); + __ Ret(); + } else { + __ Mov(jssp, fp); + __ Pop(fp, lr); + int pop_count = + descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0; + __ Drop(pop_count); + __ Ret(); + } +} + + +void CodeGenerator::AssembleMove(InstructionOperand* source, + InstructionOperand* destination) { + Arm64OperandConverter g(this, NULL); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + Register src = g.ToRegister(source); + if (destination->IsRegister()) { + __ Mov(g.ToRegister(destination), src); + } else { + __ Str(src, g.ToMemOperand(destination, masm())); + } + } else if (source->IsStackSlot()) { + MemOperand src = g.ToMemOperand(source, masm()); + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + if (destination->IsRegister()) { + __ Ldr(g.ToRegister(destination), src); + } else { + UseScratchRegisterScope scope(masm()); + Register temp = scope.AcquireX(); + __ Ldr(temp, src); + __ Str(temp, g.ToMemOperand(destination, masm())); + } + } else if (source->IsConstant()) { + ConstantOperand* constant_source = ConstantOperand::cast(source); + if (destination->IsRegister() || destination->IsStackSlot()) { + UseScratchRegisterScope scope(masm()); + Register dst = destination->IsRegister() ? g.ToRegister(destination) + : scope.AcquireX(); + Constant src = g.ToConstant(source); + if (src.type() == Constant::kHeapObject) { + __ LoadObject(dst, src.ToHeapObject()); + } else { + __ Mov(dst, g.ToImmediate(source)); + } + if (destination->IsStackSlot()) { + __ Str(dst, g.ToMemOperand(destination, masm())); + } + } else if (destination->IsDoubleRegister()) { + FPRegister result = g.ToDoubleRegister(destination); + __ Fmov(result, g.ToDouble(constant_source)); + } else { + DCHECK(destination->IsDoubleStackSlot()); + UseScratchRegisterScope scope(masm()); + FPRegister temp = scope.AcquireD(); + __ Fmov(temp, g.ToDouble(constant_source)); + __ Str(temp, g.ToMemOperand(destination, masm())); + } + } else if (source->IsDoubleRegister()) { + FPRegister src = g.ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + FPRegister dst = g.ToDoubleRegister(destination); + __ Fmov(dst, src); + } else { + DCHECK(destination->IsDoubleStackSlot()); + __ Str(src, g.ToMemOperand(destination, masm())); + } + } else if (source->IsDoubleStackSlot()) { + DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); + MemOperand src = g.ToMemOperand(source, masm()); + if (destination->IsDoubleRegister()) { + __ Ldr(g.ToDoubleRegister(destination), src); + } else { + UseScratchRegisterScope scope(masm()); + FPRegister temp = scope.AcquireD(); + __ Ldr(temp, src); + __ Str(temp, g.ToMemOperand(destination, masm())); + } + } else { + UNREACHABLE(); + } +} + + +void CodeGenerator::AssembleSwap(InstructionOperand* source, + InstructionOperand* destination) { + Arm64OperandConverter g(this, NULL); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister()) { + // Register-register. + UseScratchRegisterScope scope(masm()); + Register temp = scope.AcquireX(); + Register src = g.ToRegister(source); + if (destination->IsRegister()) { + Register dst = g.ToRegister(destination); + __ Mov(temp, src); + __ Mov(src, dst); + __ Mov(dst, temp); + } else { + DCHECK(destination->IsStackSlot()); + MemOperand dst = g.ToMemOperand(destination, masm()); + __ Mov(temp, src); + __ Ldr(src, dst); + __ Str(temp, dst); + } + } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) { + UseScratchRegisterScope scope(masm()); + CPURegister temp_0 = scope.AcquireX(); + CPURegister temp_1 = scope.AcquireX(); + MemOperand src = g.ToMemOperand(source, masm()); + MemOperand dst = g.ToMemOperand(destination, masm()); + __ Ldr(temp_0, src); + __ Ldr(temp_1, dst); + __ Str(temp_0, dst); + __ Str(temp_1, src); + } else if (source->IsDoubleRegister()) { + UseScratchRegisterScope scope(masm()); + FPRegister temp = scope.AcquireD(); + FPRegister src = g.ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + FPRegister dst = g.ToDoubleRegister(destination); + __ Fmov(temp, src); + __ Fmov(src, dst); + __ Fmov(src, temp); + } else { + DCHECK(destination->IsDoubleStackSlot()); + MemOperand dst = g.ToMemOperand(destination, masm()); + __ Fmov(temp, src); + __ Ldr(src, dst); + __ Str(temp, dst); + } + } else { + // No other combinations are possible. + UNREACHABLE(); + } +} + + +void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); } + +#undef __ + +#if DEBUG + +// Checks whether the code between start_pc and end_pc is a no-op. +bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc, + int end_pc) { + if (start_pc + 4 != end_pc) { + return false; + } + Address instr_address = code->instruction_start() + start_pc; + + v8::internal::Instruction* instr = + reinterpret_cast<v8::internal::Instruction*>(instr_address); + return instr->IsMovz() && instr->Rd() == xzr.code() && instr->SixtyFourBits(); +} + +#endif // DEBUG + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/arm64/instruction-codes-arm64.h nodejs-0.11.15/deps/v8/src/compiler/arm64/instruction-codes-arm64.h --- nodejs-0.11.13/deps/v8/src/compiler/arm64/instruction-codes-arm64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/arm64/instruction-codes-arm64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,103 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ +#define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ + +namespace v8 { +namespace internal { +namespace compiler { + +// ARM64-specific opcodes that specify which assembly sequence to emit. +// Most opcodes specify a single instruction. +#define TARGET_ARCH_OPCODE_LIST(V) \ + V(Arm64Add) \ + V(Arm64Add32) \ + V(Arm64And) \ + V(Arm64And32) \ + V(Arm64Cmp) \ + V(Arm64Cmp32) \ + V(Arm64Tst) \ + V(Arm64Tst32) \ + V(Arm64Or) \ + V(Arm64Or32) \ + V(Arm64Xor) \ + V(Arm64Xor32) \ + V(Arm64Sub) \ + V(Arm64Sub32) \ + V(Arm64Mul) \ + V(Arm64Mul32) \ + V(Arm64Idiv) \ + V(Arm64Idiv32) \ + V(Arm64Udiv) \ + V(Arm64Udiv32) \ + V(Arm64Imod) \ + V(Arm64Imod32) \ + V(Arm64Umod) \ + V(Arm64Umod32) \ + V(Arm64Not) \ + V(Arm64Not32) \ + V(Arm64Neg) \ + V(Arm64Neg32) \ + V(Arm64Shl) \ + V(Arm64Shl32) \ + V(Arm64Shr) \ + V(Arm64Shr32) \ + V(Arm64Sar) \ + V(Arm64Sar32) \ + V(Arm64CallCodeObject) \ + V(Arm64CallJSFunction) \ + V(Arm64CallAddress) \ + V(Arm64Claim) \ + V(Arm64Poke) \ + V(Arm64PokePairZero) \ + V(Arm64PokePair) \ + V(Arm64Drop) \ + V(Arm64Float64Cmp) \ + V(Arm64Float64Add) \ + V(Arm64Float64Sub) \ + V(Arm64Float64Mul) \ + V(Arm64Float64Div) \ + V(Arm64Float64Mod) \ + V(Arm64Int32ToInt64) \ + V(Arm64Int64ToInt32) \ + V(Arm64Float64ToInt32) \ + V(Arm64Float64ToUint32) \ + V(Arm64Int32ToFloat64) \ + V(Arm64Uint32ToFloat64) \ + V(Arm64Float64Load) \ + V(Arm64Float64Store) \ + V(Arm64LoadWord8) \ + V(Arm64StoreWord8) \ + V(Arm64LoadWord16) \ + V(Arm64StoreWord16) \ + V(Arm64LoadWord32) \ + V(Arm64StoreWord32) \ + V(Arm64LoadWord64) \ + V(Arm64StoreWord64) \ + V(Arm64StoreWriteBarrier) + + +// Addressing modes represent the "shape" of inputs to an instruction. +// Many instructions support multiple addressing modes. Addressing modes +// are encoded into the InstructionCode of the instruction and tell the +// code generator after register allocation which assembler method to call. +// +// We use the following local notation for addressing modes: +// +// R = register +// O = register or stack slot +// D = double register +// I = immediate (handle, external, int32) +// MRI = [register + immediate] +// MRR = [register + register] +#define TARGET_ADDRESSING_MODE_LIST(V) \ + V(MRI) /* [%r0 + K] */ \ + V(MRR) /* [%r0 + %r1] */ + +} // namespace internal +} // namespace compiler +} // namespace v8 + +#endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc nodejs-0.11.15/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc --- nodejs-0.11.13/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,667 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/instruction-selector-impl.h" +#include "src/compiler/node-matchers.h" + +namespace v8 { +namespace internal { +namespace compiler { + +enum ImmediateMode { + kArithimeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits + kShift32Imm, // 0 - 31 + kShift64Imm, // 0 -63 + kLogical32Imm, + kLogical64Imm, + kLoadStoreImm, // unsigned 9 bit or signed 7 bit + kNoImmediate +}; + + +// Adds Arm64-specific methods for generating operands. +class Arm64OperandGenerator V8_FINAL : public OperandGenerator { + public: + explicit Arm64OperandGenerator(InstructionSelector* selector) + : OperandGenerator(selector) {} + + InstructionOperand* UseOperand(Node* node, ImmediateMode mode) { + if (CanBeImmediate(node, mode)) { + return UseImmediate(node); + } + return UseRegister(node); + } + + bool CanBeImmediate(Node* node, ImmediateMode mode) { + int64_t value; + switch (node->opcode()) { + // TODO(turbofan): SMI number constants as immediates. + case IrOpcode::kInt32Constant: + value = ValueOf<int32_t>(node->op()); + break; + default: + return false; + } + unsigned ignored; + switch (mode) { + case kLogical32Imm: + // TODO(dcarney): some unencodable values can be handled by + // switching instructions. + return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32, + &ignored, &ignored, &ignored); + case kLogical64Imm: + return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64, + &ignored, &ignored, &ignored); + case kArithimeticImm: + // TODO(dcarney): -values can be handled by instruction swapping + return Assembler::IsImmAddSub(value); + case kShift32Imm: + return 0 <= value && value < 31; + case kShift64Imm: + return 0 <= value && value < 63; + case kLoadStoreImm: + return (0 <= value && value < (1 << 9)) || + (-(1 << 6) <= value && value < (1 << 6)); + case kNoImmediate: + return false; + } + return false; + } +}; + + +static void VisitRR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + Arm64OperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + + +static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + Arm64OperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1))); +} + + +static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + Arm64OperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsDoubleRegister(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1))); +} + + +static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, + Node* node, ImmediateMode operand_mode) { + Arm64OperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), + g.UseOperand(node->InputAt(1), operand_mode)); +} + + +// Shared routine for multiple binary operations. +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, ImmediateMode operand_mode, + FlagsContinuation* cont) { + Arm64OperandGenerator g(selector); + Int32BinopMatcher m(node); + InstructionOperand* inputs[4]; + size_t input_count = 0; + InstructionOperand* outputs[2]; + size_t output_count = 0; + + inputs[input_count++] = g.UseRegister(m.left().node()); + inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode); + + if (cont->IsBranch()) { + inputs[input_count++] = g.Label(cont->true_block()); + inputs[input_count++] = g.Label(cont->false_block()); + } + + outputs[output_count++] = g.DefineAsRegister(node); + if (cont->IsSet()) { + outputs[output_count++] = g.DefineAsRegister(cont->result()); + } + + DCHECK_NE(0, input_count); + DCHECK_NE(0, output_count); + DCHECK_GE(ARRAY_SIZE(inputs), input_count); + DCHECK_GE(ARRAY_SIZE(outputs), output_count); + + Instruction* instr = selector->Emit(cont->Encode(opcode), output_count, + outputs, input_count, inputs); + if (cont->IsBranch()) instr->MarkAsControl(); +} + + +// Shared routine for multiple binary operations. +static void VisitBinop(InstructionSelector* selector, Node* node, + ArchOpcode opcode, ImmediateMode operand_mode) { + FlagsContinuation cont; + VisitBinop(selector, node, opcode, operand_mode, &cont); +} + + +void InstructionSelector::VisitLoad(Node* node) { + MachineType rep = OpParameter<MachineType>(node); + Arm64OperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + InstructionOperand* result = rep == kMachineFloat64 + ? g.DefineAsDoubleRegister(node) + : g.DefineAsRegister(node); + + ArchOpcode opcode; + switch (rep) { + case kMachineFloat64: + opcode = kArm64Float64Load; + break; + case kMachineWord8: + opcode = kArm64LoadWord8; + break; + case kMachineWord16: + opcode = kArm64LoadWord16; + break; + case kMachineWord32: + opcode = kArm64LoadWord32; + break; + case kMachineTagged: // Fall through. + case kMachineWord64: + opcode = kArm64LoadWord64; + break; + default: + UNREACHABLE(); + return; + } + if (g.CanBeImmediate(index, kLoadStoreImm)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), result, + g.UseRegister(base), g.UseImmediate(index)); + } else if (g.CanBeImmediate(base, kLoadStoreImm)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), result, + g.UseRegister(index), g.UseImmediate(base)); + } else { + Emit(opcode | AddressingModeField::encode(kMode_MRR), result, + g.UseRegister(base), g.UseRegister(index)); + } +} + + +void InstructionSelector::VisitStore(Node* node) { + Arm64OperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); + MachineType rep = store_rep.rep; + if (store_rep.write_barrier_kind == kFullWriteBarrier) { + DCHECK(rep == kMachineTagged); + // TODO(dcarney): refactor RecordWrite function to take temp registers + // and pass them here instead of using fixed regs + // TODO(dcarney): handle immediate indices. + InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)}; + Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10), + g.UseFixed(index, x11), g.UseFixed(value, x12), ARRAY_SIZE(temps), + temps); + return; + } + DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind); + InstructionOperand* val; + if (rep == kMachineFloat64) { + val = g.UseDoubleRegister(value); + } else { + val = g.UseRegister(value); + } + ArchOpcode opcode; + switch (rep) { + case kMachineFloat64: + opcode = kArm64Float64Store; + break; + case kMachineWord8: + opcode = kArm64StoreWord8; + break; + case kMachineWord16: + opcode = kArm64StoreWord16; + break; + case kMachineWord32: + opcode = kArm64StoreWord32; + break; + case kMachineTagged: // Fall through. + case kMachineWord64: + opcode = kArm64StoreWord64; + break; + default: + UNREACHABLE(); + return; + } + if (g.CanBeImmediate(index, kLoadStoreImm)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, + g.UseRegister(base), g.UseImmediate(index), val); + } else if (g.CanBeImmediate(base, kLoadStoreImm)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, + g.UseRegister(index), g.UseImmediate(base), val); + } else { + Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL, + g.UseRegister(base), g.UseRegister(index), val); + } +} + + +void InstructionSelector::VisitWord32And(Node* node) { + VisitBinop(this, node, kArm64And32, kLogical32Imm); +} + + +void InstructionSelector::VisitWord64And(Node* node) { + VisitBinop(this, node, kArm64And, kLogical64Imm); +} + + +void InstructionSelector::VisitWord32Or(Node* node) { + VisitBinop(this, node, kArm64Or32, kLogical32Imm); +} + + +void InstructionSelector::VisitWord64Or(Node* node) { + VisitBinop(this, node, kArm64Or, kLogical64Imm); +} + + +template <typename T> +static void VisitXor(InstructionSelector* selector, Node* node, + ArchOpcode xor_opcode, ArchOpcode not_opcode) { + Arm64OperandGenerator g(selector); + BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); + if (m.right().Is(-1)) { + selector->Emit(not_opcode, g.DefineAsRegister(node), + g.UseRegister(m.left().node())); + } else { + VisitBinop(selector, node, xor_opcode, kLogical32Imm); + } +} + + +void InstructionSelector::VisitWord32Xor(Node* node) { + VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32); +} + + +void InstructionSelector::VisitWord64Xor(Node* node) { + VisitXor<int64_t>(this, node, kArm64Xor, kArm64Not); +} + + +void InstructionSelector::VisitWord32Shl(Node* node) { + VisitRRO(this, kArm64Shl32, node, kShift32Imm); +} + + +void InstructionSelector::VisitWord64Shl(Node* node) { + VisitRRO(this, kArm64Shl, node, kShift64Imm); +} + + +void InstructionSelector::VisitWord32Shr(Node* node) { + VisitRRO(this, kArm64Shr32, node, kShift32Imm); +} + + +void InstructionSelector::VisitWord64Shr(Node* node) { + VisitRRO(this, kArm64Shr, node, kShift64Imm); +} + + +void InstructionSelector::VisitWord32Sar(Node* node) { + VisitRRO(this, kArm64Sar32, node, kShift32Imm); +} + + +void InstructionSelector::VisitWord64Sar(Node* node) { + VisitRRO(this, kArm64Sar, node, kShift64Imm); +} + + +void InstructionSelector::VisitInt32Add(Node* node) { + VisitBinop(this, node, kArm64Add32, kArithimeticImm); +} + + +void InstructionSelector::VisitInt64Add(Node* node) { + VisitBinop(this, node, kArm64Add, kArithimeticImm); +} + + +template <typename T> +static void VisitSub(InstructionSelector* selector, Node* node, + ArchOpcode sub_opcode, ArchOpcode neg_opcode) { + Arm64OperandGenerator g(selector); + BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); + if (m.left().Is(0)) { + selector->Emit(neg_opcode, g.DefineAsRegister(node), + g.UseRegister(m.right().node())); + } else { + VisitBinop(selector, node, sub_opcode, kArithimeticImm); + } +} + + +void InstructionSelector::VisitInt32Sub(Node* node) { + VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32); +} + + +void InstructionSelector::VisitInt64Sub(Node* node) { + VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg); +} + + +void InstructionSelector::VisitInt32Mul(Node* node) { + VisitRRR(this, kArm64Mul32, node); +} + + +void InstructionSelector::VisitInt64Mul(Node* node) { + VisitRRR(this, kArm64Mul, node); +} + + +void InstructionSelector::VisitInt32Div(Node* node) { + VisitRRR(this, kArm64Idiv32, node); +} + + +void InstructionSelector::VisitInt64Div(Node* node) { + VisitRRR(this, kArm64Idiv, node); +} + + +void InstructionSelector::VisitInt32UDiv(Node* node) { + VisitRRR(this, kArm64Udiv32, node); +} + + +void InstructionSelector::VisitInt64UDiv(Node* node) { + VisitRRR(this, kArm64Udiv, node); +} + + +void InstructionSelector::VisitInt32Mod(Node* node) { + VisitRRR(this, kArm64Imod32, node); +} + + +void InstructionSelector::VisitInt64Mod(Node* node) { + VisitRRR(this, kArm64Imod, node); +} + + +void InstructionSelector::VisitInt32UMod(Node* node) { + VisitRRR(this, kArm64Umod32, node); +} + + +void InstructionSelector::VisitInt64UMod(Node* node) { + VisitRRR(this, kArm64Umod, node); +} + + +void InstructionSelector::VisitConvertInt32ToInt64(Node* node) { + VisitRR(this, kArm64Int32ToInt64, node); +} + + +void InstructionSelector::VisitConvertInt64ToInt32(Node* node) { + VisitRR(this, kArm64Int64ToInt32, node); +} + + +void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { + Arm64OperandGenerator g(this); + Emit(kArm64Int32ToFloat64, g.DefineAsDoubleRegister(node), + g.UseRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { + Arm64OperandGenerator g(this); + Emit(kArm64Uint32ToFloat64, g.DefineAsDoubleRegister(node), + g.UseRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { + Arm64OperandGenerator g(this); + Emit(kArm64Float64ToInt32, g.DefineAsRegister(node), + g.UseDoubleRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { + Arm64OperandGenerator g(this); + Emit(kArm64Float64ToUint32, g.DefineAsRegister(node), + g.UseDoubleRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitFloat64Add(Node* node) { + VisitRRRFloat64(this, kArm64Float64Add, node); +} + + +void InstructionSelector::VisitFloat64Sub(Node* node) { + VisitRRRFloat64(this, kArm64Float64Sub, node); +} + + +void InstructionSelector::VisitFloat64Mul(Node* node) { + VisitRRRFloat64(this, kArm64Float64Mul, node); +} + + +void InstructionSelector::VisitFloat64Div(Node* node) { + VisitRRRFloat64(this, kArm64Float64Div, node); +} + + +void InstructionSelector::VisitFloat64Mod(Node* node) { + Arm64OperandGenerator g(this); + Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0), + g.UseFixedDouble(node->InputAt(0), d0), + g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall(); +} + + +void InstructionSelector::VisitInt32AddWithOverflow(Node* node, + FlagsContinuation* cont) { + VisitBinop(this, node, kArm64Add32, kArithimeticImm, cont); +} + + +void InstructionSelector::VisitInt32SubWithOverflow(Node* node, + FlagsContinuation* cont) { + VisitBinop(this, node, kArm64Sub32, kArithimeticImm, cont); +} + + +// Shared routine for multiple compare operations. +static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, + InstructionOperand* left, InstructionOperand* right, + FlagsContinuation* cont) { + Arm64OperandGenerator g(selector); + opcode = cont->Encode(opcode); + if (cont->IsBranch()) { + selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()), + g.Label(cont->false_block()))->MarkAsControl(); + } else { + DCHECK(cont->IsSet()); + selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right); + } +} + + +// Shared routine for multiple word compare operations. +static void VisitWordCompare(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont, + bool commutative) { + Arm64OperandGenerator g(selector); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + + // Match immediates on left or right side of comparison. + if (g.CanBeImmediate(right, kArithimeticImm)) { + VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), + cont); + } else if (g.CanBeImmediate(left, kArithimeticImm)) { + if (!commutative) cont->Commute(); + VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), + cont); + } else { + VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), + cont); + } +} + + +void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) { + switch (node->opcode()) { + case IrOpcode::kWord32And: + return VisitWordCompare(this, node, kArm64Tst32, cont, true); + default: + break; + } + + Arm64OperandGenerator g(this); + VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node), + cont); +} + + +void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) { + switch (node->opcode()) { + case IrOpcode::kWord64And: + return VisitWordCompare(this, node, kArm64Tst, cont, true); + default: + break; + } + + Arm64OperandGenerator g(this); + VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont); +} + + +void InstructionSelector::VisitWord32Compare(Node* node, + FlagsContinuation* cont) { + VisitWordCompare(this, node, kArm64Cmp32, cont, false); +} + + +void InstructionSelector::VisitWord64Compare(Node* node, + FlagsContinuation* cont) { + VisitWordCompare(this, node, kArm64Cmp, cont, false); +} + + +void InstructionSelector::VisitFloat64Compare(Node* node, + FlagsContinuation* cont) { + Arm64OperandGenerator g(this); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + VisitCompare(this, kArm64Float64Cmp, g.UseDoubleRegister(left), + g.UseDoubleRegister(right), cont); +} + + +void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, + BasicBlock* deoptimization) { + Arm64OperandGenerator g(this); + CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call); + CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here? + + // Compute InstructionOperands for inputs and outputs. + // TODO(turbofan): on ARM64 it's probably better to use the code object in a + // register if there are multiple uses of it. Improve constant pool and the + // heuristics in the register allocator for where to emit constants. + InitializeCallBuffer(call, &buffer, true, false, continuation, + deoptimization); + + // Push the arguments to the stack. + bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress; + bool pushed_count_uneven = buffer.pushed_count & 1; + int aligned_push_count = buffer.pushed_count; + if (is_c_frame && pushed_count_uneven) { + aligned_push_count++; + } + // TODO(dcarney): claim and poke probably take small immediates, + // loop here or whatever. + // Bump the stack pointer(s). + if (aligned_push_count > 0) { + // TODO(dcarney): it would be better to bump the csp here only + // and emit paired stores with increment for non c frames. + Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL); + } + // Move arguments to the stack. + { + int slot = buffer.pushed_count - 1; + // Emit the uneven pushes. + if (pushed_count_uneven) { + Node* input = buffer.pushed_nodes[slot]; + ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke; + Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input)); + slot--; + } + // Now all pushes can be done in pairs. + for (; slot >= 0; slot -= 2) { + Emit(kArm64PokePair | MiscField::encode(slot), NULL, + g.UseRegister(buffer.pushed_nodes[slot]), + g.UseRegister(buffer.pushed_nodes[slot - 1])); + } + } + + // Select the appropriate opcode based on the call type. + InstructionCode opcode; + switch (descriptor->kind()) { + case CallDescriptor::kCallCodeObject: { + bool lazy_deopt = descriptor->CanLazilyDeoptimize(); + opcode = kArm64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0); + break; + } + case CallDescriptor::kCallAddress: + opcode = kArm64CallAddress; + break; + case CallDescriptor::kCallJSFunction: + opcode = kArm64CallJSFunction; + break; + default: + UNREACHABLE(); + return; + } + + // Emit the call instruction. + Instruction* call_instr = + Emit(opcode, buffer.output_count, buffer.outputs, + buffer.fixed_and_control_count(), buffer.fixed_and_control_args); + + call_instr->MarkAsCall(); + if (deoptimization != NULL) { + DCHECK(continuation != NULL); + call_instr->MarkAsControl(); + } + + // Caller clean up of stack for C-style calls. + if (is_c_frame && aligned_push_count > 0) { + DCHECK(deoptimization == NULL && continuation == NULL); + Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL); + } +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/arm64/linkage-arm64.cc nodejs-0.11.15/deps/v8/src/compiler/arm64/linkage-arm64.cc --- nodejs-0.11.13/deps/v8/src/compiler/arm64/linkage-arm64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/arm64/linkage-arm64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,68 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/assembler.h" +#include "src/code-stubs.h" +#include "src/compiler/linkage.h" +#include "src/compiler/linkage-impl.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { +namespace compiler { + +struct LinkageHelperTraits { + static Register ReturnValueReg() { return x0; } + static Register ReturnValue2Reg() { return x1; } + static Register JSCallFunctionReg() { return x1; } + static Register ContextReg() { return cp; } + static Register RuntimeCallFunctionReg() { return x1; } + static Register RuntimeCallArgCountReg() { return x0; } + static RegList CCalleeSaveRegisters() { + // TODO(dcarney): correct callee saved registers. + return 0; + } + static Register CRegisterParameter(int i) { + static Register register_parameters[] = {x0, x1, x2, x3, x4, x5, x6, x7}; + return register_parameters[i]; + } + static int CRegisterParametersLength() { return 8; } +}; + + +CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) { + return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>( + zone, parameter_count); +} + + +CallDescriptor* Linkage::GetRuntimeCallDescriptor( + Runtime::FunctionId function, int parameter_count, + Operator::Property properties, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) { + return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>( + zone, function, parameter_count, properties, can_deoptimize); +} + + +CallDescriptor* Linkage::GetStubCallDescriptor( + CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) { + return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>( + zone, descriptor, stack_parameter_count, can_deoptimize); +} + + +CallDescriptor* Linkage::GetSimplifiedCDescriptor( + Zone* zone, int num_params, MachineType return_type, + const MachineType* param_types) { + return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>( + zone, num_params, return_type, param_types); +} + +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/ast-graph-builder.cc nodejs-0.11.15/deps/v8/src/compiler/ast-graph-builder.cc --- nodejs-0.11.13/deps/v8/src/compiler/ast-graph-builder.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/ast-graph-builder.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2055 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/ast-graph-builder.h" + +#include "src/compiler.h" +#include "src/compiler/control-builders.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/node-properties-inl.h" +#include "src/full-codegen.h" +#include "src/parser.h" +#include "src/scopes.h" + +namespace v8 { +namespace internal { +namespace compiler { + +AstGraphBuilder::AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph) + : StructuredGraphBuilder(jsgraph->graph(), jsgraph->common()), + info_(info), + jsgraph_(jsgraph), + globals_(0, info->zone()), + breakable_(NULL), + execution_context_(NULL) { + InitializeAstVisitor(info->zone()); +} + + +Node* AstGraphBuilder::GetFunctionClosure() { + if (!function_closure_.is_set()) { + // Parameter -1 is special for the function closure + Operator* op = common()->Parameter(-1); + Node* node = NewNode(op, graph()->start()); + function_closure_.set(node); + } + return function_closure_.get(); +} + + +Node* AstGraphBuilder::GetFunctionContext() { + if (!function_context_.is_set()) { + // Parameter (arity + 1) is special for the outer context of the function + Operator* op = common()->Parameter(info()->num_parameters() + 1); + Node* node = NewNode(op, graph()->start()); + function_context_.set(node); + } + return function_context_.get(); +} + + +bool AstGraphBuilder::CreateGraph() { + Scope* scope = info()->scope(); + DCHECK(graph() != NULL); + + // Set up the basic structure of the graph. + int parameter_count = info()->num_parameters(); + graph()->SetStart(graph()->NewNode(common()->Start(parameter_count))); + + // Initialize the top-level environment. + Environment env(this, scope, graph()->start()); + set_environment(&env); + + // Build node to initialize local function context. + Node* closure = GetFunctionClosure(); + Node* outer = GetFunctionContext(); + Node* inner = BuildLocalFunctionContext(outer, closure); + + // Push top-level function scope for the function body. + ContextScope top_context(this, scope, inner); + + // Build the arguments object if it is used. + BuildArgumentsObject(scope->arguments()); + + // Emit tracing call if requested to do so. + if (FLAG_trace) { + NewNode(javascript()->Runtime(Runtime::kTraceEnter, 0)); + } + + // Visit implicit declaration of the function name. + if (scope->is_function_scope() && scope->function() != NULL) { + VisitVariableDeclaration(scope->function()); + } + + // Visit declarations within the function scope. + VisitDeclarations(scope->declarations()); + + // TODO(mstarzinger): This should do an inlined stack check. + NewNode(javascript()->Runtime(Runtime::kStackGuard, 0)); + + // Visit statements in the function body. + VisitStatements(info()->function()->body()); + if (HasStackOverflow()) return false; + + // Emit tracing call if requested to do so. + if (FLAG_trace) { + // TODO(mstarzinger): Only traces implicit return. + Node* return_value = jsgraph()->UndefinedConstant(); + NewNode(javascript()->Runtime(Runtime::kTraceExit, 1), return_value); + } + + // Return 'undefined' in case we can fall off the end. + Node* control = NewNode(common()->Return(), jsgraph()->UndefinedConstant()); + UpdateControlDependencyToLeaveFunction(control); + + // Finish the basic structure of the graph. + environment()->UpdateControlDependency(exit_control()); + graph()->SetEnd(NewNode(common()->End())); + + return true; +} + + +// Left-hand side can only be a property, a global or a variable slot. +enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; + + +// Determine the left-hand side kind of an assignment. +static LhsKind DetermineLhsKind(Expression* expr) { + Property* property = expr->AsProperty(); + DCHECK(expr->IsValidReferenceExpression()); + LhsKind lhs_kind = + (property == NULL) ? VARIABLE : (property->key()->IsPropertyName()) + ? NAMED_PROPERTY + : KEYED_PROPERTY; + return lhs_kind; +} + + +// Helper to find an existing shared function info in the baseline code for the +// given function literal. Used to canonicalize SharedFunctionInfo objects. +static Handle<SharedFunctionInfo> SearchSharedFunctionInfo( + Code* unoptimized_code, FunctionLiteral* expr) { + int start_position = expr->start_position(); + for (RelocIterator it(unoptimized_code); !it.done(); it.next()) { + RelocInfo* rinfo = it.rinfo(); + if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue; + Object* obj = rinfo->target_object(); + if (obj->IsSharedFunctionInfo()) { + SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj); + if (shared->start_position() == start_position) { + return Handle<SharedFunctionInfo>(shared); + } + } + } + return Handle<SharedFunctionInfo>(); +} + + +StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment( + StructuredGraphBuilder::Environment* env) { + return new (zone()) Environment(*reinterpret_cast<Environment*>(env)); +} + + +AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder, + Scope* scope, + Node* control_dependency) + : StructuredGraphBuilder::Environment(builder, control_dependency), + parameters_count_(scope->num_parameters() + 1), + locals_count_(scope->num_stack_slots()), + parameters_node_(NULL), + locals_node_(NULL), + stack_node_(NULL), + parameters_dirty_(true), + locals_dirty_(true), + stack_dirty_(true) { + DCHECK_EQ(scope->num_parameters() + 1, parameters_count()); + + // Bind the receiver variable. + Node* receiver = builder->graph()->NewNode(common()->Parameter(0), + builder->graph()->start()); + values()->push_back(receiver); + + // Bind all parameter variables. The parameter indices are shifted by 1 + // (receiver is parameter index -1 but environment index 0). + for (int i = 0; i < scope->num_parameters(); ++i) { + Node* parameter = builder->graph()->NewNode(common()->Parameter(i + 1), + builder->graph()->start()); + values()->push_back(parameter); + } + + // Bind all local variables to undefined. + Node* undefined_constant = builder->jsgraph()->UndefinedConstant(); + values()->insert(values()->end(), locals_count(), undefined_constant); +} + + +AstGraphBuilder::Environment::Environment(const Environment& copy) + : StructuredGraphBuilder::Environment( + static_cast<StructuredGraphBuilder::Environment>(copy)), + parameters_count_(copy.parameters_count_), + locals_count_(copy.locals_count_), + parameters_node_(copy.parameters_node_), + locals_node_(copy.locals_node_), + stack_node_(copy.stack_node_), + parameters_dirty_(copy.parameters_dirty_), + locals_dirty_(copy.locals_dirty_), + stack_dirty_(copy.stack_dirty_) {} + + +Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id) { + if (parameters_dirty_) { + Operator* op = common()->StateValues(parameters_count()); + if (parameters_count() != 0) { + Node** parameters = &values()->front(); + parameters_node_ = graph()->NewNode(op, parameters_count(), parameters); + } else { + parameters_node_ = graph()->NewNode(op); + } + parameters_dirty_ = false; + } + if (locals_dirty_) { + Operator* op = common()->StateValues(locals_count()); + if (locals_count() != 0) { + Node** locals = &values()->at(parameters_count_); + locals_node_ = graph()->NewNode(op, locals_count(), locals); + } else { + locals_node_ = graph()->NewNode(op); + } + locals_dirty_ = false; + } + if (stack_dirty_) { + Operator* op = common()->StateValues(stack_height()); + if (stack_height() != 0) { + Node** stack = &values()->at(parameters_count_ + locals_count_); + stack_node_ = graph()->NewNode(op, stack_height(), stack); + } else { + stack_node_ = graph()->NewNode(op); + } + stack_dirty_ = false; + } + + Operator* op = common()->FrameState(ast_id); + + return graph()->NewNode(op, parameters_node_, locals_node_, stack_node_); +} + + +AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own, + Expression::Context kind, + BailoutId bailout_id) + : bailout_id_(bailout_id), + kind_(kind), + owner_(own), + outer_(own->ast_context()) { + owner()->set_ast_context(this); // Push. +#ifdef DEBUG + original_height_ = environment()->stack_height(); +#endif +} + + +AstGraphBuilder::AstContext::~AstContext() { + owner()->set_ast_context(outer_); // Pop. +} + + +AstGraphBuilder::AstEffectContext::~AstEffectContext() { + DCHECK(environment()->stack_height() == original_height_); +} + + +AstGraphBuilder::AstValueContext::~AstValueContext() { + DCHECK(environment()->stack_height() == original_height_ + 1); +} + + +AstGraphBuilder::AstTestContext::~AstTestContext() { + DCHECK(environment()->stack_height() == original_height_ + 1); +} + + +void AstGraphBuilder::AstEffectContext::ProduceValueWithLazyBailout( + Node* value) { + ProduceValue(value); + owner()->BuildLazyBailout(value, bailout_id_); +} + + +void AstGraphBuilder::AstValueContext::ProduceValueWithLazyBailout( + Node* value) { + ProduceValue(value); + owner()->BuildLazyBailout(value, bailout_id_); +} + + +void AstGraphBuilder::AstTestContext::ProduceValueWithLazyBailout(Node* value) { + environment()->Push(value); + owner()->BuildLazyBailout(value, bailout_id_); + environment()->Pop(); + ProduceValue(value); +} + + +void AstGraphBuilder::AstEffectContext::ProduceValue(Node* value) { + // The value is ignored. +} + + +void AstGraphBuilder::AstValueContext::ProduceValue(Node* value) { + environment()->Push(value); +} + + +void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) { + environment()->Push(owner()->BuildToBoolean(value)); +} + + +Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return NULL; } + + +Node* AstGraphBuilder::AstValueContext::ConsumeValue() { + return environment()->Pop(); +} + + +Node* AstGraphBuilder::AstTestContext::ConsumeValue() { + return environment()->Pop(); +} + + +AstGraphBuilder::BreakableScope* AstGraphBuilder::BreakableScope::FindBreakable( + BreakableStatement* target) { + BreakableScope* current = this; + while (current != NULL && current->target_ != target) { + owner_->environment()->Drop(current->drop_extra_); + current = current->next_; + } + DCHECK(current != NULL); // Always found (unless stack is malformed). + return current; +} + + +void AstGraphBuilder::BreakableScope::BreakTarget(BreakableStatement* stmt) { + FindBreakable(stmt)->control_->Break(); +} + + +void AstGraphBuilder::BreakableScope::ContinueTarget(BreakableStatement* stmt) { + FindBreakable(stmt)->control_->Continue(); +} + + +void AstGraphBuilder::VisitForValueOrNull(Expression* expr) { + if (expr == NULL) { + return environment()->Push(jsgraph()->NullConstant()); + } + VisitForValue(expr); +} + + +void AstGraphBuilder::VisitForValues(ZoneList<Expression*>* exprs) { + for (int i = 0; i < exprs->length(); ++i) { + VisitForValue(exprs->at(i)); + } +} + + +void AstGraphBuilder::VisitForValue(Expression* expr) { + AstValueContext for_value(this, expr->id()); + if (!HasStackOverflow()) { + expr->Accept(this); + } +} + + +void AstGraphBuilder::VisitForEffect(Expression* expr) { + AstEffectContext for_effect(this, expr->id()); + if (!HasStackOverflow()) { + expr->Accept(this); + } +} + + +void AstGraphBuilder::VisitForTest(Expression* expr) { + AstTestContext for_condition(this, expr->id()); + if (!HasStackOverflow()) { + expr->Accept(this); + } +} + + +void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) { + Variable* variable = decl->proxy()->var(); + VariableMode mode = decl->mode(); + bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET; + switch (variable->location()) { + case Variable::UNALLOCATED: { + Handle<Oddball> value = variable->binding_needs_init() + ? isolate()->factory()->the_hole_value() + : isolate()->factory()->undefined_value(); + globals()->Add(variable->name(), zone()); + globals()->Add(value, zone()); + break; + } + case Variable::PARAMETER: + case Variable::LOCAL: + if (hole_init) { + Node* value = jsgraph()->TheHoleConstant(); + environment()->Bind(variable, value); + } + break; + case Variable::CONTEXT: + if (hole_init) { + Node* value = jsgraph()->TheHoleConstant(); + Operator* op = javascript()->StoreContext(0, variable->index()); + NewNode(op, current_context(), value); + } + break; + case Variable::LOOKUP: + UNIMPLEMENTED(); + } +} + + +void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) { + Variable* variable = decl->proxy()->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: { + Handle<SharedFunctionInfo> function = + Compiler::BuildFunctionInfo(decl->fun(), info()->script(), info()); + // Check for stack-overflow exception. + if (function.is_null()) return SetStackOverflow(); + globals()->Add(variable->name(), zone()); + globals()->Add(function, zone()); + break; + } + case Variable::PARAMETER: + case Variable::LOCAL: { + VisitForValue(decl->fun()); + Node* value = environment()->Pop(); + environment()->Bind(variable, value); + break; + } + case Variable::CONTEXT: { + VisitForValue(decl->fun()); + Node* value = environment()->Pop(); + Operator* op = javascript()->StoreContext(0, variable->index()); + NewNode(op, current_context(), value); + break; + } + case Variable::LOOKUP: + UNIMPLEMENTED(); + } +} + + +void AstGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) { + UNREACHABLE(); +} + + +void AstGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) { + UNREACHABLE(); +} + + +void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) { + UNREACHABLE(); +} + + +void AstGraphBuilder::VisitModuleLiteral(ModuleLiteral* modl) { UNREACHABLE(); } + + +void AstGraphBuilder::VisitModuleVariable(ModuleVariable* modl) { + UNREACHABLE(); +} + + +void AstGraphBuilder::VisitModulePath(ModulePath* modl) { UNREACHABLE(); } + + +void AstGraphBuilder::VisitModuleUrl(ModuleUrl* modl) { UNREACHABLE(); } + + +void AstGraphBuilder::VisitBlock(Block* stmt) { + BlockBuilder block(this); + BreakableScope scope(this, stmt, &block, 0); + if (stmt->labels() != NULL) block.BeginBlock(); + if (stmt->scope() == NULL) { + // Visit statements in the same scope, no declarations. + VisitStatements(stmt->statements()); + } else { + Operator* op = javascript()->CreateBlockContext(); + Node* scope_info = jsgraph()->Constant(stmt->scope()->GetScopeInfo()); + Node* context = NewNode(op, scope_info, GetFunctionClosure()); + ContextScope scope(this, stmt->scope(), context); + + // Visit declarations and statements in a block scope. + VisitDeclarations(stmt->scope()->declarations()); + VisitStatements(stmt->statements()); + } + if (stmt->labels() != NULL) block.EndBlock(); +} + + +void AstGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) { + UNREACHABLE(); +} + + +void AstGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) { + VisitForEffect(stmt->expression()); +} + + +void AstGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) { + // Do nothing. +} + + +void AstGraphBuilder::VisitIfStatement(IfStatement* stmt) { + IfBuilder compare_if(this); + VisitForTest(stmt->condition()); + Node* condition = environment()->Pop(); + compare_if.If(condition); + compare_if.Then(); + Visit(stmt->then_statement()); + compare_if.Else(); + Visit(stmt->else_statement()); + compare_if.End(); +} + + +void AstGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) { + StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable(); + breakable()->ContinueTarget(stmt->target()); + set_environment(env); +} + + +void AstGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { + StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable(); + breakable()->BreakTarget(stmt->target()); + set_environment(env); +} + + +void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { + VisitForValue(stmt->expression()); + Node* result = environment()->Pop(); + Node* control = NewNode(common()->Return(), result); + UpdateControlDependencyToLeaveFunction(control); +} + + +void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) { + VisitForValue(stmt->expression()); + Node* value = environment()->Pop(); + Operator* op = javascript()->CreateWithContext(); + Node* context = NewNode(op, value, GetFunctionClosure()); + ContextScope scope(this, stmt->scope(), context); + Visit(stmt->statement()); +} + + +void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { + ZoneList<CaseClause*>* clauses = stmt->cases(); + SwitchBuilder compare_switch(this, clauses->length()); + BreakableScope scope(this, stmt, &compare_switch, 0); + compare_switch.BeginSwitch(); + int default_index = -1; + + // Keep the switch value on the stack until a case matches. + VisitForValue(stmt->tag()); + Node* tag = environment()->Top(); + + // Iterate over all cases and create nodes for label comparison. + for (int i = 0; i < clauses->length(); i++) { + CaseClause* clause = clauses->at(i); + + // The default is not a test, remember index. + if (clause->is_default()) { + default_index = i; + continue; + } + + // Create nodes to perform label comparison as if via '==='. The switch + // value is still on the operand stack while the label is evaluated. + VisitForValue(clause->label()); + Node* label = environment()->Pop(); + Operator* op = javascript()->StrictEqual(); + Node* condition = NewNode(op, tag, label); + compare_switch.BeginLabel(i, condition); + + // Discard the switch value at label match. + environment()->Pop(); + compare_switch.EndLabel(); + } + + // Discard the switch value and mark the default case. + environment()->Pop(); + if (default_index >= 0) { + compare_switch.DefaultAt(default_index); + } + + // Iterate over all cases and create nodes for case bodies. + for (int i = 0; i < clauses->length(); i++) { + CaseClause* clause = clauses->at(i); + compare_switch.BeginCase(i); + VisitStatements(clause->statements()); + compare_switch.EndCase(); + } + + compare_switch.EndSwitch(); +} + + +void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { + LoopBuilder while_loop(this); + while_loop.BeginLoop(); + VisitIterationBody(stmt, &while_loop, 0); + while_loop.EndBody(); + VisitForTest(stmt->cond()); + Node* condition = environment()->Pop(); + while_loop.BreakUnless(condition); + while_loop.EndLoop(); +} + + +void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { + LoopBuilder while_loop(this); + while_loop.BeginLoop(); + VisitForTest(stmt->cond()); + Node* condition = environment()->Pop(); + while_loop.BreakUnless(condition); + VisitIterationBody(stmt, &while_loop, 0); + while_loop.EndBody(); + while_loop.EndLoop(); +} + + +void AstGraphBuilder::VisitForStatement(ForStatement* stmt) { + LoopBuilder for_loop(this); + VisitIfNotNull(stmt->init()); + for_loop.BeginLoop(); + if (stmt->cond() != NULL) { + VisitForTest(stmt->cond()); + Node* condition = environment()->Pop(); + for_loop.BreakUnless(condition); + } + VisitIterationBody(stmt, &for_loop, 0); + for_loop.EndBody(); + VisitIfNotNull(stmt->next()); + for_loop.EndLoop(); +} + + +// TODO(dcarney): this is a big function. Try to clean up some. +void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) { + VisitForValue(stmt->subject()); + Node* obj = environment()->Pop(); + // Check for undefined or null before entering loop. + IfBuilder is_undefined(this); + Node* is_undefined_cond = + NewNode(javascript()->StrictEqual(), obj, jsgraph()->UndefinedConstant()); + is_undefined.If(is_undefined_cond); + is_undefined.Then(); + is_undefined.Else(); + { + IfBuilder is_null(this); + Node* is_null_cond = + NewNode(javascript()->StrictEqual(), obj, jsgraph()->NullConstant()); + is_null.If(is_null_cond); + is_null.Then(); + is_null.Else(); + // Convert object to jsobject. + // PrepareForBailoutForId(stmt->PrepareId(), TOS_REG); + obj = NewNode(javascript()->ToObject(), obj); + environment()->Push(obj); + // TODO(dcarney): should do a fast enum cache check here to skip runtime. + environment()->Push(obj); + Node* cache_type = ProcessArguments( + javascript()->Runtime(Runtime::kGetPropertyNamesFast, 1), 1); + // TODO(dcarney): these next runtime calls should be removed in favour of + // a few simplified instructions. + environment()->Push(obj); + environment()->Push(cache_type); + Node* cache_pair = + ProcessArguments(javascript()->Runtime(Runtime::kForInInit, 2), 2); + // cache_type may have been replaced. + Node* cache_array = NewNode(common()->Projection(0), cache_pair); + cache_type = NewNode(common()->Projection(1), cache_pair); + environment()->Push(cache_type); + environment()->Push(cache_array); + Node* cache_length = ProcessArguments( + javascript()->Runtime(Runtime::kForInCacheArrayLength, 2), 2); + { + // TODO(dcarney): this check is actually supposed to be for the + // empty enum case only. + IfBuilder have_no_properties(this); + Node* empty_array_cond = NewNode(javascript()->StrictEqual(), + cache_length, jsgraph()->ZeroConstant()); + have_no_properties.If(empty_array_cond); + have_no_properties.Then(); + // Pop obj and skip loop. + environment()->Pop(); + have_no_properties.Else(); + { + // Construct the rest of the environment. + environment()->Push(cache_type); + environment()->Push(cache_array); + environment()->Push(cache_length); + environment()->Push(jsgraph()->ZeroConstant()); + // PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); + LoopBuilder for_loop(this); + for_loop.BeginLoop(); + // Check loop termination condition. + Node* index = environment()->Peek(0); + Node* exit_cond = + NewNode(javascript()->LessThan(), index, cache_length); + // TODO(jarin): provide real bailout id. + BuildLazyBailout(exit_cond, BailoutId::None()); + for_loop.BreakUnless(exit_cond); + // TODO(dcarney): this runtime call should be a handful of + // simplified instructions that + // basically produce + // value = array[index] + environment()->Push(obj); + environment()->Push(cache_array); + environment()->Push(cache_type); + environment()->Push(index); + Node* pair = + ProcessArguments(javascript()->Runtime(Runtime::kForInNext, 4), 4); + Node* value = NewNode(common()->Projection(0), pair); + Node* should_filter = NewNode(common()->Projection(1), pair); + environment()->Push(value); + { + // Test if FILTER_KEY needs to be called. + IfBuilder test_should_filter(this); + Node* should_filter_cond = + NewNode(javascript()->StrictEqual(), should_filter, + jsgraph()->TrueConstant()); + test_should_filter.If(should_filter_cond); + test_should_filter.Then(); + value = environment()->Pop(); + // TODO(dcarney): Better load from function context. + // See comment in BuildLoadBuiltinsObject. + Handle<JSFunction> function(JSFunction::cast( + info()->context()->builtins()->javascript_builtin( + Builtins::FILTER_KEY))); + // Callee. + environment()->Push(jsgraph()->HeapConstant(function)); + // Receiver. + environment()->Push(obj); + // Args. + environment()->Push(value); + // result is either the string key or Smi(0) indicating the property + // is gone. + Node* res = ProcessArguments( + javascript()->Call(3, NO_CALL_FUNCTION_FLAGS), 3); + // TODO(jarin): provide real bailout id. + BuildLazyBailout(res, BailoutId::None()); + Node* property_missing = NewNode(javascript()->StrictEqual(), res, + jsgraph()->ZeroConstant()); + { + IfBuilder is_property_missing(this); + is_property_missing.If(property_missing); + is_property_missing.Then(); + // Inc counter and continue. + Node* index_inc = + NewNode(javascript()->Add(), index, jsgraph()->OneConstant()); + environment()->Poke(0, index_inc); + // TODO(jarin): provide real bailout id. + BuildLazyBailout(index_inc, BailoutId::None()); + for_loop.Continue(); + is_property_missing.Else(); + is_property_missing.End(); + } + // Replace 'value' in environment. + environment()->Push(res); + test_should_filter.Else(); + test_should_filter.End(); + } + value = environment()->Pop(); + // Bind value and do loop body. + VisitForInAssignment(stmt->each(), value); + VisitIterationBody(stmt, &for_loop, 5); + // Inc counter and continue. + Node* index_inc = + NewNode(javascript()->Add(), index, jsgraph()->OneConstant()); + environment()->Poke(0, index_inc); + // TODO(jarin): provide real bailout id. + BuildLazyBailout(index_inc, BailoutId::None()); + for_loop.EndBody(); + for_loop.EndLoop(); + environment()->Drop(5); + // PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); + } + have_no_properties.End(); + } + is_null.End(); + } + is_undefined.End(); +} + + +void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) { + VisitForValue(stmt->subject()); + environment()->Pop(); + // TODO(turbofan): create and use loop builder. +} + + +void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) { + UNREACHABLE(); +} + + +void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) { + UNREACHABLE(); +} + + +void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) { + // TODO(turbofan): Do we really need a separate reloc-info for this? + NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0)); +} + + +void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { + Node* context = current_context(); + + // Build a new shared function info if we cannot find one in the baseline + // code. We also have a stack overflow if the recursive compilation did. + Handle<SharedFunctionInfo> shared_info = + SearchSharedFunctionInfo(info()->shared_info()->code(), expr); + if (shared_info.is_null()) { + shared_info = Compiler::BuildFunctionInfo(expr, info()->script(), info()); + CHECK(!shared_info.is_null()); // TODO(mstarzinger): Set stack overflow? + } + + // Create node to instantiate a new closure. + Node* info = jsgraph()->Constant(shared_info); + Node* pretenure = expr->pretenure() ? jsgraph()->TrueConstant() + : jsgraph()->FalseConstant(); + Operator* op = javascript()->Runtime(Runtime::kNewClosure, 3); + Node* value = NewNode(op, context, info, pretenure); + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) { + UNREACHABLE(); +} + + +void AstGraphBuilder::VisitConditional(Conditional* expr) { + IfBuilder compare_if(this); + VisitForTest(expr->condition()); + Node* condition = environment()->Pop(); + compare_if.If(condition); + compare_if.Then(); + Visit(expr->then_expression()); + compare_if.Else(); + Visit(expr->else_expression()); + compare_if.End(); + ast_context()->ReplaceValue(); +} + + +void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) { + Node* value = BuildVariableLoad(expr->var(), expr->id()); + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitLiteral(Literal* expr) { + Node* value = jsgraph()->Constant(expr->value()); + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) { + Handle<JSFunction> closure = info()->closure(); + + // Create node to materialize a regular expression literal. + Node* literals_array = jsgraph()->Constant(handle(closure->literals())); + Node* literal_index = jsgraph()->Constant(expr->literal_index()); + Node* pattern = jsgraph()->Constant(expr->pattern()); + Node* flags = jsgraph()->Constant(expr->flags()); + Operator* op = javascript()->Runtime(Runtime::kMaterializeRegExpLiteral, 4); + Node* literal = NewNode(op, literals_array, literal_index, pattern, flags); + ast_context()->ProduceValue(literal); +} + + +void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { + Handle<JSFunction> closure = info()->closure(); + + // Create node to deep-copy the literal boilerplate. + expr->BuildConstantProperties(isolate()); + Node* literals_array = jsgraph()->Constant(handle(closure->literals())); + Node* literal_index = jsgraph()->Constant(expr->literal_index()); + Node* constants = jsgraph()->Constant(expr->constant_properties()); + Node* flags = jsgraph()->Constant(expr->ComputeFlags()); + Operator* op = javascript()->Runtime(Runtime::kCreateObjectLiteral, 4); + Node* literal = NewNode(op, literals_array, literal_index, constants, flags); + + // The object is expected on the operand stack during computation of the + // property values and is the value of the entire expression. + environment()->Push(literal); + + // Mark all computed expressions that are bound to a key that is shadowed by + // a later occurrence of the same key. For the marked expressions, no store + // code is emitted. + expr->CalculateEmitStore(zone()); + + // Create nodes to store computed values into the literal. + AccessorTable accessor_table(zone()); + for (int i = 0; i < expr->properties()->length(); i++) { + ObjectLiteral::Property* property = expr->properties()->at(i); + if (property->IsCompileTimeValue()) continue; + + Literal* key = property->key(); + switch (property->kind()) { + case ObjectLiteral::Property::CONSTANT: + UNREACHABLE(); + case ObjectLiteral::Property::MATERIALIZED_LITERAL: + DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value())); + // Fall through. + case ObjectLiteral::Property::COMPUTED: { + // It is safe to use [[Put]] here because the boilerplate already + // contains computed properties with an uninitialized value. + if (key->value()->IsInternalizedString()) { + if (property->emit_store()) { + VisitForValue(property->value()); + Node* value = environment()->Pop(); + PrintableUnique<Name> name = MakeUnique(key->AsPropertyName()); + Node* store = + NewNode(javascript()->StoreNamed(name), literal, value); + BuildLazyBailout(store, key->id()); + } else { + VisitForEffect(property->value()); + } + break; + } + environment()->Push(literal); // Duplicate receiver. + VisitForValue(property->key()); + VisitForValue(property->value()); + Node* value = environment()->Pop(); + Node* key = environment()->Pop(); + Node* receiver = environment()->Pop(); + if (property->emit_store()) { + Node* strict = jsgraph()->Constant(SLOPPY); + Operator* op = javascript()->Runtime(Runtime::kSetProperty, 4); + NewNode(op, receiver, key, value, strict); + } + break; + } + case ObjectLiteral::Property::PROTOTYPE: { + environment()->Push(literal); // Duplicate receiver. + VisitForValue(property->value()); + Node* value = environment()->Pop(); + Node* receiver = environment()->Pop(); + if (property->emit_store()) { + Operator* op = javascript()->Runtime(Runtime::kSetPrototype, 2); + NewNode(op, receiver, value); + } + break; + } + case ObjectLiteral::Property::GETTER: + accessor_table.lookup(key)->second->getter = property->value(); + break; + case ObjectLiteral::Property::SETTER: + accessor_table.lookup(key)->second->setter = property->value(); + break; + } + } + + // Create nodes to define accessors, using only a single call to the runtime + // for each pair of corresponding getters and setters. + for (AccessorTable::Iterator it = accessor_table.begin(); + it != accessor_table.end(); ++it) { + VisitForValue(it->first); + VisitForValueOrNull(it->second->getter); + VisitForValueOrNull(it->second->setter); + Node* setter = environment()->Pop(); + Node* getter = environment()->Pop(); + Node* name = environment()->Pop(); + Node* attr = jsgraph()->Constant(NONE); + Operator* op = + javascript()->Runtime(Runtime::kDefineAccessorPropertyUnchecked, 5); + NewNode(op, literal, name, getter, setter, attr); + } + + // Transform literals that contain functions to fast properties. + if (expr->has_function()) { + Operator* op = javascript()->Runtime(Runtime::kToFastProperties, 1); + NewNode(op, literal); + } + + ast_context()->ProduceValue(environment()->Pop()); +} + + +void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { + Handle<JSFunction> closure = info()->closure(); + + // Create node to deep-copy the literal boilerplate. + expr->BuildConstantElements(isolate()); + Node* literals_array = jsgraph()->Constant(handle(closure->literals())); + Node* literal_index = jsgraph()->Constant(expr->literal_index()); + Node* constants = jsgraph()->Constant(expr->constant_elements()); + Node* flags = jsgraph()->Constant(expr->ComputeFlags()); + Operator* op = javascript()->Runtime(Runtime::kCreateArrayLiteral, 4); + Node* literal = NewNode(op, literals_array, literal_index, constants, flags); + + // The array and the literal index are both expected on the operand stack + // during computation of the element values. + environment()->Push(literal); + environment()->Push(literal_index); + + // Create nodes to evaluate all the non-constant subexpressions and to store + // them into the newly cloned array. + for (int i = 0; i < expr->values()->length(); i++) { + Expression* subexpr = expr->values()->at(i); + if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue; + + VisitForValue(subexpr); + Node* value = environment()->Pop(); + Node* index = jsgraph()->Constant(i); + Node* store = NewNode(javascript()->StoreProperty(), literal, index, value); + BuildLazyBailout(store, expr->GetIdForElement(i)); + } + + environment()->Pop(); // Array literal index. + ast_context()->ProduceValue(environment()->Pop()); +} + + +void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) { + DCHECK(expr->IsValidReferenceExpression()); + + // Left-hand side can only be a property, a global or a variable slot. + Property* property = expr->AsProperty(); + LhsKind assign_type = DetermineLhsKind(expr); + + // Evaluate LHS expression and store the value. + switch (assign_type) { + case VARIABLE: { + Variable* var = expr->AsVariableProxy()->var(); + // TODO(jarin) Fill in the correct bailout id. + BuildVariableAssignment(var, value, Token::ASSIGN, BailoutId::None()); + break; + } + case NAMED_PROPERTY: { + environment()->Push(value); + VisitForValue(property->obj()); + Node* object = environment()->Pop(); + value = environment()->Pop(); + PrintableUnique<Name> name = + MakeUnique(property->key()->AsLiteral()->AsPropertyName()); + Node* store = NewNode(javascript()->StoreNamed(name), object, value); + // TODO(jarin) Fill in the correct bailout id. + BuildLazyBailout(store, BailoutId::None()); + break; + } + case KEYED_PROPERTY: { + environment()->Push(value); + VisitForValue(property->obj()); + VisitForValue(property->key()); + Node* key = environment()->Pop(); + Node* object = environment()->Pop(); + value = environment()->Pop(); + Node* store = NewNode(javascript()->StoreProperty(), object, key, value); + // TODO(jarin) Fill in the correct bailout id. + BuildLazyBailout(store, BailoutId::None()); + break; + } + } +} + + +void AstGraphBuilder::VisitAssignment(Assignment* expr) { + DCHECK(expr->target()->IsValidReferenceExpression()); + + // Left-hand side can only be a property, a global or a variable slot. + Property* property = expr->target()->AsProperty(); + LhsKind assign_type = DetermineLhsKind(expr->target()); + + // Evaluate LHS expression. + switch (assign_type) { + case VARIABLE: + // Nothing to do here. + break; + case NAMED_PROPERTY: + VisitForValue(property->obj()); + break; + case KEYED_PROPERTY: { + VisitForValue(property->obj()); + VisitForValue(property->key()); + break; + } + } + + // Evaluate the value and potentially handle compound assignments by loading + // the left-hand side value and performing a binary operation. + if (expr->is_compound()) { + Node* old_value = NULL; + switch (assign_type) { + case VARIABLE: { + Variable* variable = expr->target()->AsVariableProxy()->var(); + old_value = BuildVariableLoad(variable, expr->target()->id()); + break; + } + case NAMED_PROPERTY: { + Node* object = environment()->Top(); + PrintableUnique<Name> name = + MakeUnique(property->key()->AsLiteral()->AsPropertyName()); + old_value = NewNode(javascript()->LoadNamed(name), object); + BuildLazyBailoutWithPushedNode(old_value, property->LoadId()); + break; + } + case KEYED_PROPERTY: { + Node* key = environment()->Top(); + Node* object = environment()->Peek(1); + old_value = NewNode(javascript()->LoadProperty(), object, key); + BuildLazyBailoutWithPushedNode(old_value, property->LoadId()); + break; + } + } + environment()->Push(old_value); + VisitForValue(expr->value()); + Node* right = environment()->Pop(); + Node* left = environment()->Pop(); + Node* value = BuildBinaryOp(left, right, expr->binary_op()); + environment()->Push(value); + BuildLazyBailout(value, expr->binary_operation()->id()); + } else { + VisitForValue(expr->value()); + } + + // Store the value. + Node* value = environment()->Pop(); + switch (assign_type) { + case VARIABLE: { + Variable* variable = expr->target()->AsVariableProxy()->var(); + BuildVariableAssignment(variable, value, expr->op(), + expr->AssignmentId()); + break; + } + case NAMED_PROPERTY: { + Node* object = environment()->Pop(); + PrintableUnique<Name> name = + MakeUnique(property->key()->AsLiteral()->AsPropertyName()); + Node* store = NewNode(javascript()->StoreNamed(name), object, value); + BuildLazyBailout(store, expr->AssignmentId()); + break; + } + case KEYED_PROPERTY: { + Node* key = environment()->Pop(); + Node* object = environment()->Pop(); + Node* store = NewNode(javascript()->StoreProperty(), object, key, value); + BuildLazyBailout(store, expr->AssignmentId()); + break; + } + } + + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitYield(Yield* expr) { + VisitForValue(expr->generator_object()); + VisitForValue(expr->expression()); + environment()->Pop(); + environment()->Pop(); + // TODO(turbofan): VisitYield + ast_context()->ProduceValue(jsgraph()->UndefinedConstant()); +} + + +void AstGraphBuilder::VisitThrow(Throw* expr) { + VisitForValue(expr->exception()); + Node* exception = environment()->Pop(); + Operator* op = javascript()->Runtime(Runtime::kThrow, 1); + Node* value = NewNode(op, exception); + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitProperty(Property* expr) { + Node* value; + if (expr->key()->IsPropertyName()) { + VisitForValue(expr->obj()); + Node* object = environment()->Pop(); + PrintableUnique<Name> name = + MakeUnique(expr->key()->AsLiteral()->AsPropertyName()); + value = NewNode(javascript()->LoadNamed(name), object); + } else { + VisitForValue(expr->obj()); + VisitForValue(expr->key()); + Node* key = environment()->Pop(); + Node* object = environment()->Pop(); + value = NewNode(javascript()->LoadProperty(), object, key); + } + ast_context()->ProduceValueWithLazyBailout(value); +} + + +void AstGraphBuilder::VisitCall(Call* expr) { + Expression* callee = expr->expression(); + Call::CallType call_type = expr->GetCallType(isolate()); + + // Prepare the callee and the receiver to the function call. This depends on + // the semantics of the underlying call type. + CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS; + Node* receiver_value = NULL; + Node* callee_value = NULL; + bool possibly_eval = false; + switch (call_type) { + case Call::GLOBAL_CALL: { + Variable* variable = callee->AsVariableProxy()->var(); + callee_value = BuildVariableLoad(variable, expr->expression()->id()); + receiver_value = jsgraph()->UndefinedConstant(); + break; + } + case Call::LOOKUP_SLOT_CALL: { + Variable* variable = callee->AsVariableProxy()->var(); + DCHECK(variable->location() == Variable::LOOKUP); + Node* name = jsgraph()->Constant(variable->name()); + Operator* op = javascript()->Runtime(Runtime::kLoadLookupSlot, 2); + Node* pair = NewNode(op, current_context(), name); + callee_value = NewNode(common()->Projection(0), pair); + receiver_value = NewNode(common()->Projection(1), pair); + break; + } + case Call::PROPERTY_CALL: { + Property* property = callee->AsProperty(); + VisitForValue(property->obj()); + Node* object = environment()->Top(); + if (property->key()->IsPropertyName()) { + PrintableUnique<Name> name = + MakeUnique(property->key()->AsLiteral()->AsPropertyName()); + callee_value = NewNode(javascript()->LoadNamed(name), object); + } else { + VisitForValue(property->key()); + Node* key = environment()->Pop(); + callee_value = NewNode(javascript()->LoadProperty(), object, key); + } + BuildLazyBailoutWithPushedNode(callee_value, property->LoadId()); + receiver_value = environment()->Pop(); + // Note that a PROPERTY_CALL requires the receiver to be wrapped into an + // object for sloppy callees. This could also be modeled explicitly here, + // thereby obsoleting the need for a flag to the call operator. + flags = CALL_AS_METHOD; + break; + } + case Call::POSSIBLY_EVAL_CALL: + possibly_eval = true; + // Fall through. + case Call::OTHER_CALL: + VisitForValue(callee); + callee_value = environment()->Pop(); + receiver_value = jsgraph()->UndefinedConstant(); + break; + } + + // The callee and the receiver both have to be pushed onto the operand stack + // before arguments are being evaluated. + environment()->Push(callee_value); + environment()->Push(receiver_value); + + // Evaluate all arguments to the function call, + ZoneList<Expression*>* args = expr->arguments(); + VisitForValues(args); + + // Resolve callee and receiver for a potential direct eval call. This block + // will mutate the callee and receiver values pushed onto the environment. + if (possibly_eval && args->length() > 0) { + int arg_count = args->length(); + + // Extract callee and source string from the environment. + Node* callee = environment()->Peek(arg_count + 1); + Node* source = environment()->Peek(arg_count - 1); + + // Create node to ask for help resolving potential eval call. This will + // provide a fully resolved callee and the corresponding receiver. + Node* receiver = environment()->Lookup(info()->scope()->receiver()); + Node* strict = jsgraph()->Constant(strict_mode()); + Node* position = jsgraph()->Constant(info()->scope()->start_position()); + Operator* op = + javascript()->Runtime(Runtime::kResolvePossiblyDirectEval, 5); + Node* pair = NewNode(op, callee, source, receiver, strict, position); + Node* new_callee = NewNode(common()->Projection(0), pair); + Node* new_receiver = NewNode(common()->Projection(1), pair); + + // Patch callee and receiver on the environment. + environment()->Poke(arg_count + 1, new_callee); + environment()->Poke(arg_count + 0, new_receiver); + } + + // Create node to perform the function call. + Operator* call = javascript()->Call(args->length() + 2, flags); + Node* value = ProcessArguments(call, args->length() + 2); + ast_context()->ProduceValueWithLazyBailout(value); +} + + +void AstGraphBuilder::VisitCallNew(CallNew* expr) { + VisitForValue(expr->expression()); + + // Evaluate all arguments to the construct call. + ZoneList<Expression*>* args = expr->arguments(); + VisitForValues(args); + + // Create node to perform the construct call. + Operator* call = javascript()->CallNew(args->length() + 1); + Node* value = ProcessArguments(call, args->length() + 1); + ast_context()->ProduceValueWithLazyBailout(value); +} + + +void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) { + Handle<String> name = expr->name(); + + // The callee and the receiver both have to be pushed onto the operand stack + // before arguments are being evaluated. + CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS; + Node* receiver_value = BuildLoadBuiltinsObject(); + PrintableUnique<String> unique = MakeUnique(name); + Node* callee_value = NewNode(javascript()->LoadNamed(unique), receiver_value); + environment()->Push(callee_value); + // TODO(jarin): Find/create a bailout id to deoptimize to (crankshaft + // refuses to optimize functions with jsruntime calls). + BuildLazyBailout(callee_value, BailoutId::None()); + environment()->Push(receiver_value); + + // Evaluate all arguments to the JS runtime call. + ZoneList<Expression*>* args = expr->arguments(); + VisitForValues(args); + + // Create node to perform the JS runtime call. + Operator* call = javascript()->Call(args->length() + 2, flags); + Node* value = ProcessArguments(call, args->length() + 2); + ast_context()->ProduceValueWithLazyBailout(value); +} + + +void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) { + const Runtime::Function* function = expr->function(); + + // Handle calls to runtime functions implemented in JavaScript separately as + // the call follows JavaScript ABI and the callee is statically unknown. + if (expr->is_jsruntime()) { + DCHECK(function == NULL && expr->name()->length() > 0); + return VisitCallJSRuntime(expr); + } + + // Evaluate all arguments to the runtime call. + ZoneList<Expression*>* args = expr->arguments(); + VisitForValues(args); + + // Create node to perform the runtime call. + Runtime::FunctionId functionId = function->function_id; + Operator* call = javascript()->Runtime(functionId, args->length()); + Node* value = ProcessArguments(call, args->length()); + ast_context()->ProduceValueWithLazyBailout(value); +} + + +void AstGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { + switch (expr->op()) { + case Token::DELETE: + return VisitDelete(expr); + case Token::VOID: + return VisitVoid(expr); + case Token::TYPEOF: + return VisitTypeof(expr); + case Token::NOT: + return VisitNot(expr); + default: + UNREACHABLE(); + } +} + + +void AstGraphBuilder::VisitCountOperation(CountOperation* expr) { + DCHECK(expr->expression()->IsValidReferenceExpression()); + + // Left-hand side can only be a property, a global or a variable slot. + Property* property = expr->expression()->AsProperty(); + LhsKind assign_type = DetermineLhsKind(expr->expression()); + + // Reserve space for result of postfix operation. + bool is_postfix = expr->is_postfix() && !ast_context()->IsEffect(); + if (is_postfix) environment()->Push(jsgraph()->UndefinedConstant()); + + // Evaluate LHS expression and get old value. + Node* old_value = NULL; + int stack_depth = -1; + switch (assign_type) { + case VARIABLE: { + Variable* variable = expr->expression()->AsVariableProxy()->var(); + old_value = BuildVariableLoad(variable, expr->expression()->id()); + stack_depth = 0; + break; + } + case NAMED_PROPERTY: { + VisitForValue(property->obj()); + Node* object = environment()->Top(); + PrintableUnique<Name> name = + MakeUnique(property->key()->AsLiteral()->AsPropertyName()); + old_value = NewNode(javascript()->LoadNamed(name), object); + BuildLazyBailoutWithPushedNode(old_value, property->LoadId()); + stack_depth = 1; + break; + } + case KEYED_PROPERTY: { + VisitForValue(property->obj()); + VisitForValue(property->key()); + Node* key = environment()->Top(); + Node* object = environment()->Peek(1); + old_value = NewNode(javascript()->LoadProperty(), object, key); + BuildLazyBailoutWithPushedNode(old_value, property->LoadId()); + stack_depth = 2; + break; + } + } + + // Convert old value into a number. + old_value = NewNode(javascript()->ToNumber(), old_value); + + // Save result for postfix expressions at correct stack depth. + if (is_postfix) environment()->Poke(stack_depth, old_value); + + // Create node to perform +1/-1 operation. + Node* value = + BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op()); + // TODO(jarin) Insert proper bailout id here (will need to change + // full code generator). + BuildLazyBailout(value, BailoutId::None()); + + // Store the value. + switch (assign_type) { + case VARIABLE: { + Variable* variable = expr->expression()->AsVariableProxy()->var(); + BuildVariableAssignment(variable, value, expr->op(), + expr->AssignmentId()); + break; + } + case NAMED_PROPERTY: { + Node* object = environment()->Pop(); + PrintableUnique<Name> name = + MakeUnique(property->key()->AsLiteral()->AsPropertyName()); + Node* store = NewNode(javascript()->StoreNamed(name), object, value); + BuildLazyBailout(store, expr->AssignmentId()); + break; + } + case KEYED_PROPERTY: { + Node* key = environment()->Pop(); + Node* object = environment()->Pop(); + Node* store = NewNode(javascript()->StoreProperty(), object, key, value); + BuildLazyBailout(store, expr->AssignmentId()); + break; + } + } + + // Restore old value for postfix expressions. + if (is_postfix) value = environment()->Pop(); + + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { + switch (expr->op()) { + case Token::COMMA: + return VisitComma(expr); + case Token::OR: + case Token::AND: + return VisitLogicalExpression(expr); + default: { + VisitForValue(expr->left()); + VisitForValue(expr->right()); + Node* right = environment()->Pop(); + Node* left = environment()->Pop(); + Node* value = BuildBinaryOp(left, right, expr->op()); + ast_context()->ProduceValueWithLazyBailout(value); + } + } +} + + +void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) { + Operator* op; + switch (expr->op()) { + case Token::EQ: + op = javascript()->Equal(); + break; + case Token::NE: + op = javascript()->NotEqual(); + break; + case Token::EQ_STRICT: + op = javascript()->StrictEqual(); + break; + case Token::NE_STRICT: + op = javascript()->StrictNotEqual(); + break; + case Token::LT: + op = javascript()->LessThan(); + break; + case Token::GT: + op = javascript()->GreaterThan(); + break; + case Token::LTE: + op = javascript()->LessThanOrEqual(); + break; + case Token::GTE: + op = javascript()->GreaterThanOrEqual(); + break; + case Token::INSTANCEOF: + op = javascript()->InstanceOf(); + break; + case Token::IN: + op = javascript()->HasProperty(); + break; + default: + op = NULL; + UNREACHABLE(); + } + VisitForValue(expr->left()); + VisitForValue(expr->right()); + Node* right = environment()->Pop(); + Node* left = environment()->Pop(); + Node* value = NewNode(op, left, right); + ast_context()->ProduceValue(value); + + BuildLazyBailout(value, expr->id()); +} + + +void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) { + Node* value = GetFunctionClosure(); + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitCaseClause(CaseClause* expr) { UNREACHABLE(); } + + +void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) { + DCHECK(globals()->is_empty()); + AstVisitor::VisitDeclarations(declarations); + if (globals()->is_empty()) return; + Handle<FixedArray> data = + isolate()->factory()->NewFixedArray(globals()->length(), TENURED); + for (int i = 0; i < globals()->length(); ++i) data->set(i, *globals()->at(i)); + int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) | + DeclareGlobalsNativeFlag::encode(info()->is_native()) | + DeclareGlobalsStrictMode::encode(info()->strict_mode()); + Node* flags = jsgraph()->Constant(encoded_flags); + Node* pairs = jsgraph()->Constant(data); + Operator* op = javascript()->Runtime(Runtime::kDeclareGlobals, 3); + NewNode(op, current_context(), pairs, flags); + globals()->Rewind(0); +} + + +void AstGraphBuilder::VisitIfNotNull(Statement* stmt) { + if (stmt == NULL) return; + Visit(stmt); +} + + +void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt, + LoopBuilder* loop, int drop_extra) { + BreakableScope scope(this, stmt, loop, drop_extra); + Visit(stmt->body()); +} + + +void AstGraphBuilder::VisitDelete(UnaryOperation* expr) { + Node* value; + if (expr->expression()->IsVariableProxy()) { + // Delete of an unqualified identifier is only allowed in classic mode but + // deleting "this" is allowed in all language modes. + Variable* variable = expr->expression()->AsVariableProxy()->var(); + DCHECK(strict_mode() == SLOPPY || variable->is_this()); + value = BuildVariableDelete(variable); + } else if (expr->expression()->IsProperty()) { + Property* property = expr->expression()->AsProperty(); + VisitForValue(property->obj()); + VisitForValue(property->key()); + Node* key = environment()->Pop(); + Node* object = environment()->Pop(); + value = NewNode(javascript()->DeleteProperty(strict_mode()), object, key); + } else { + VisitForEffect(expr->expression()); + value = jsgraph()->TrueConstant(); + } + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitVoid(UnaryOperation* expr) { + VisitForEffect(expr->expression()); + Node* value = jsgraph()->UndefinedConstant(); + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) { + Node* operand; + if (expr->expression()->IsVariableProxy()) { + // Typeof does not throw a reference error on global variables, hence we + // perform a non-contextual load in case the operand is a variable proxy. + Variable* variable = expr->expression()->AsVariableProxy()->var(); + operand = + BuildVariableLoad(variable, expr->expression()->id(), NOT_CONTEXTUAL); + } else { + VisitForValue(expr->expression()); + operand = environment()->Pop(); + } + Node* value = NewNode(javascript()->TypeOf(), operand); + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitNot(UnaryOperation* expr) { + VisitForValue(expr->expression()); + Node* operand = environment()->Pop(); + // TODO(mstarzinger): Possible optimization when we are in effect context. + Node* value = NewNode(javascript()->UnaryNot(), operand); + ast_context()->ProduceValue(value); +} + + +void AstGraphBuilder::VisitComma(BinaryOperation* expr) { + VisitForEffect(expr->left()); + Visit(expr->right()); + ast_context()->ReplaceValue(); +} + + +void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) { + bool is_logical_and = expr->op() == Token::AND; + IfBuilder compare_if(this); + VisitForValue(expr->left()); + Node* condition = environment()->Top(); + compare_if.If(BuildToBoolean(condition)); + compare_if.Then(); + if (is_logical_and) { + environment()->Pop(); + Visit(expr->right()); + } else if (ast_context()->IsEffect()) { + environment()->Pop(); + } + compare_if.Else(); + if (!is_logical_and) { + environment()->Pop(); + Visit(expr->right()); + } else if (ast_context()->IsEffect()) { + environment()->Pop(); + } + compare_if.End(); + ast_context()->ReplaceValue(); +} + + +Node* AstGraphBuilder::ProcessArguments(Operator* op, int arity) { + DCHECK(environment()->stack_height() >= arity); + Node** all = info()->zone()->NewArray<Node*>(arity); // XXX: alloca? + for (int i = arity - 1; i >= 0; --i) { + all[i] = environment()->Pop(); + } + Node* value = NewNode(op, arity, all); + return value; +} + + +Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) { + int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots <= 0) return context; + set_current_context(context); + + // Allocate a new local context. + Operator* op = javascript()->CreateFunctionContext(); + Node* local_context = NewNode(op, closure); + set_current_context(local_context); + + // Copy parameters into context if necessary. + int num_parameters = info()->scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Variable* variable = info()->scope()->parameter(i); + if (!variable->IsContextSlot()) continue; + // Temporary parameter node. The parameter indices are shifted by 1 + // (receiver is parameter index -1 but environment index 0). + Node* parameter = NewNode(common()->Parameter(i + 1), graph()->start()); + // Context variable (at bottom of the context chain). + DCHECK_EQ(0, info()->scope()->ContextChainLength(variable->scope())); + Operator* op = javascript()->StoreContext(0, variable->index()); + NewNode(op, local_context, parameter); + } + + return local_context; +} + + +Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) { + if (arguments == NULL) return NULL; + + // Allocate and initialize a new arguments object. + Node* callee = GetFunctionClosure(); + Operator* op = javascript()->Runtime(Runtime::kNewArguments, 1); + Node* object = NewNode(op, callee); + + // Assign the object to the arguments variable. + DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated()); + // This should never lazy deopt, so it is fine to send invalid bailout id. + BuildVariableAssignment(arguments, object, Token::ASSIGN, BailoutId::None()); + + return object; +} + + +Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole, + Node* not_hole) { + IfBuilder hole_check(this); + Node* the_hole = jsgraph()->TheHoleConstant(); + Node* check = NewNode(javascript()->StrictEqual(), value, the_hole); + hole_check.If(check); + hole_check.Then(); + environment()->Push(for_hole); + hole_check.Else(); + environment()->Push(not_hole); + hole_check.End(); + return environment()->Pop(); +} + + +Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable, + Node* not_hole) { + IfBuilder hole_check(this); + Node* the_hole = jsgraph()->TheHoleConstant(); + Node* check = NewNode(javascript()->StrictEqual(), value, the_hole); + hole_check.If(check); + hole_check.Then(); + environment()->Push(BuildThrowReferenceError(variable)); + hole_check.Else(); + environment()->Push(not_hole); + hole_check.End(); + return environment()->Pop(); +} + + +Node* AstGraphBuilder::BuildVariableLoad(Variable* variable, + BailoutId bailout_id, + ContextualMode contextual_mode) { + Node* the_hole = jsgraph()->TheHoleConstant(); + VariableMode mode = variable->mode(); + switch (variable->location()) { + case Variable::UNALLOCATED: { + // Global var, const, or let variable. + Node* global = BuildLoadGlobalObject(); + PrintableUnique<Name> name = MakeUnique(variable->name()); + Operator* op = javascript()->LoadNamed(name, contextual_mode); + Node* node = NewNode(op, global); + BuildLazyBailoutWithPushedNode(node, bailout_id); + return node; + } + case Variable::PARAMETER: + case Variable::LOCAL: { + // Local var, const, or let variable. + Node* value = environment()->Lookup(variable); + if (mode == CONST_LEGACY) { + // Perform check for uninitialized legacy const variables. + if (value->op() == the_hole->op()) { + value = jsgraph()->UndefinedConstant(); + } else if (value->opcode() == IrOpcode::kPhi) { + Node* undefined = jsgraph()->UndefinedConstant(); + value = BuildHoleCheckSilent(value, undefined, value); + } + } else if (mode == LET || mode == CONST) { + // Perform check for uninitialized let/const variables. + if (value->op() == the_hole->op()) { + value = BuildThrowReferenceError(variable); + } else if (value->opcode() == IrOpcode::kPhi) { + value = BuildHoleCheckThrow(value, variable, value); + } + } + return value; + } + case Variable::CONTEXT: { + // Context variable (potentially up the context chain). + int depth = current_scope()->ContextChainLength(variable->scope()); + bool immutable = variable->maybe_assigned() == kNotAssigned; + Operator* op = + javascript()->LoadContext(depth, variable->index(), immutable); + Node* value = NewNode(op, current_context()); + // TODO(titzer): initialization checks are redundant for already + // initialized immutable context loads, but only specialization knows. + // Maybe specializer should be a parameter to the graph builder? + if (mode == CONST_LEGACY) { + // Perform check for uninitialized legacy const variables. + Node* undefined = jsgraph()->UndefinedConstant(); + value = BuildHoleCheckSilent(value, undefined, value); + } else if (mode == LET || mode == CONST) { + // Perform check for uninitialized let/const variables. + value = BuildHoleCheckThrow(value, variable, value); + } + return value; + } + case Variable::LOOKUP: { + // Dynamic lookup of context variable (anywhere in the chain). + Node* name = jsgraph()->Constant(variable->name()); + Runtime::FunctionId function_id = + (contextual_mode == CONTEXTUAL) + ? Runtime::kLoadLookupSlot + : Runtime::kLoadLookupSlotNoReferenceError; + Operator* op = javascript()->Runtime(function_id, 2); + Node* pair = NewNode(op, current_context(), name); + return NewNode(common()->Projection(0), pair); + } + } + UNREACHABLE(); + return NULL; +} + + +Node* AstGraphBuilder::BuildVariableDelete(Variable* variable) { + switch (variable->location()) { + case Variable::UNALLOCATED: { + // Global var, const, or let variable. + Node* global = BuildLoadGlobalObject(); + Node* name = jsgraph()->Constant(variable->name()); + Operator* op = javascript()->DeleteProperty(strict_mode()); + return NewNode(op, global, name); + } + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::CONTEXT: + // Local var, const, or let variable or context variable. + return variable->is_this() ? jsgraph()->TrueConstant() + : jsgraph()->FalseConstant(); + case Variable::LOOKUP: { + // Dynamic lookup of context variable (anywhere in the chain). + Node* name = jsgraph()->Constant(variable->name()); + Operator* op = javascript()->Runtime(Runtime::kDeleteLookupSlot, 2); + return NewNode(op, current_context(), name); + } + } + UNREACHABLE(); + return NULL; +} + + +Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value, + Token::Value op, + BailoutId bailout_id) { + Node* the_hole = jsgraph()->TheHoleConstant(); + VariableMode mode = variable->mode(); + switch (variable->location()) { + case Variable::UNALLOCATED: { + // Global var, const, or let variable. + Node* global = BuildLoadGlobalObject(); + PrintableUnique<Name> name = MakeUnique(variable->name()); + Operator* op = javascript()->StoreNamed(name); + Node* store = NewNode(op, global, value); + BuildLazyBailout(store, bailout_id); + return store; + } + case Variable::PARAMETER: + case Variable::LOCAL: + // Local var, const, or let variable. + if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) { + // Perform an initialization check for legacy const variables. + Node* current = environment()->Lookup(variable); + if (current->op() != the_hole->op()) { + value = BuildHoleCheckSilent(current, value, current); + } + } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) { + // Non-initializing assignments to legacy const is ignored. + return value; + } else if (mode == LET && op != Token::INIT_LET) { + // Perform an initialization check for let declared variables. + // Also note that the dynamic hole-check is only done to ensure that + // this does not break in the presence of do-expressions within the + // temporal dead zone of a let declared variable. + Node* current = environment()->Lookup(variable); + if (current->op() == the_hole->op()) { + value = BuildThrowReferenceError(variable); + } else if (value->opcode() == IrOpcode::kPhi) { + value = BuildHoleCheckThrow(current, variable, value); + } + } else if (mode == CONST && op != Token::INIT_CONST) { + // All assignments to const variables are early errors. + UNREACHABLE(); + } + environment()->Bind(variable, value); + return value; + case Variable::CONTEXT: { + // Context variable (potentially up the context chain). + int depth = current_scope()->ContextChainLength(variable->scope()); + if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) { + // Perform an initialization check for legacy const variables. + Operator* op = + javascript()->LoadContext(depth, variable->index(), false); + Node* current = NewNode(op, current_context()); + value = BuildHoleCheckSilent(current, value, current); + } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) { + // Non-initializing assignments to legacy const is ignored. + return value; + } else if (mode == LET && op != Token::INIT_LET) { + // Perform an initialization check for let declared variables. + Operator* op = + javascript()->LoadContext(depth, variable->index(), false); + Node* current = NewNode(op, current_context()); + value = BuildHoleCheckThrow(current, variable, value); + } else if (mode == CONST && op != Token::INIT_CONST) { + // All assignments to const variables are early errors. + UNREACHABLE(); + } + Operator* op = javascript()->StoreContext(depth, variable->index()); + return NewNode(op, current_context(), value); + } + case Variable::LOOKUP: { + // Dynamic lookup of context variable (anywhere in the chain). + Node* name = jsgraph()->Constant(variable->name()); + Node* strict = jsgraph()->Constant(strict_mode()); + // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for + // initializations of const declarations. + Operator* op = javascript()->Runtime(Runtime::kStoreLookupSlot, 4); + return NewNode(op, value, current_context(), name, strict); + } + } + UNREACHABLE(); + return NULL; +} + + +Node* AstGraphBuilder::BuildLoadBuiltinsObject() { + // TODO(mstarzinger): Better load from function context, otherwise optimized + // code cannot be shared across native contexts. + return jsgraph()->Constant(handle(info()->context()->builtins())); +} + + +Node* AstGraphBuilder::BuildLoadGlobalObject() { +#if 0 + Node* context = GetFunctionContext(); + // TODO(mstarzinger): Use mid-level operator on FixedArray instead of the + // JS-level operator that targets JSObject. + Node* index = jsgraph()->Constant(Context::GLOBAL_OBJECT_INDEX); + return NewNode(javascript()->LoadProperty(), context, index); +#else + // TODO(mstarzinger): Better load from function context, otherwise optimized + // code cannot be shared across native contexts. See unused code above. + return jsgraph()->Constant(handle(info()->context()->global_object())); +#endif +} + + +Node* AstGraphBuilder::BuildToBoolean(Node* value) { + // TODO(mstarzinger): Possible optimization is to NOP for boolean values. + return NewNode(javascript()->ToBoolean(), value); +} + + +Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable) { + // TODO(mstarzinger): Should be unified with the VisitThrow implementation. + Node* variable_name = jsgraph()->Constant(variable->name()); + Operator* op = javascript()->Runtime(Runtime::kThrowReferenceError, 1); + return NewNode(op, variable_name); +} + + +Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) { + Operator* js_op; + switch (op) { + case Token::BIT_OR: + js_op = javascript()->BitwiseOr(); + break; + case Token::BIT_AND: + js_op = javascript()->BitwiseAnd(); + break; + case Token::BIT_XOR: + js_op = javascript()->BitwiseXor(); + break; + case Token::SHL: + js_op = javascript()->ShiftLeft(); + break; + case Token::SAR: + js_op = javascript()->ShiftRight(); + break; + case Token::SHR: + js_op = javascript()->ShiftRightLogical(); + break; + case Token::ADD: + js_op = javascript()->Add(); + break; + case Token::SUB: + js_op = javascript()->Subtract(); + break; + case Token::MUL: + js_op = javascript()->Multiply(); + break; + case Token::DIV: + js_op = javascript()->Divide(); + break; + case Token::MOD: + js_op = javascript()->Modulus(); + break; + default: + UNREACHABLE(); + js_op = NULL; + } + return NewNode(js_op, left, right); +} + + +void AstGraphBuilder::BuildLazyBailout(Node* node, BailoutId ast_id) { + if (OperatorProperties::CanLazilyDeoptimize(node->op())) { + // The deopting node should have an outgoing control dependency. + DCHECK(environment()->GetControlDependency() == node); + + StructuredGraphBuilder::Environment* continuation_env = environment(); + // Create environment for the deoptimization block, and build the block. + StructuredGraphBuilder::Environment* deopt_env = + CopyEnvironment(continuation_env); + set_environment(deopt_env); + + NewNode(common()->LazyDeoptimization()); + + // TODO(jarin) If ast_id.IsNone(), perhaps we should generate an empty + // deopt block and make sure there is no patch entry for this (so + // that the deoptimizer dies when trying to deoptimize here). + + Node* state_node = environment()->Checkpoint(ast_id); + + Node* deoptimize_node = NewNode(common()->Deoptimize(), state_node); + + UpdateControlDependencyToLeaveFunction(deoptimize_node); + + // Continue with the original environment. + set_environment(continuation_env); + + NewNode(common()->Continuation()); + } +} + + +void AstGraphBuilder::BuildLazyBailoutWithPushedNode(Node* node, + BailoutId ast_id) { + environment()->Push(node); + BuildLazyBailout(node, ast_id); + environment()->Pop(); +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/ast-graph-builder.h nodejs-0.11.15/deps/v8/src/compiler/ast-graph-builder.h --- nodejs-0.11.13/deps/v8/src/compiler/ast-graph-builder.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/ast-graph-builder.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,428 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_ +#define V8_COMPILER_AST_GRAPH_BUILDER_H_ + +#include "src/v8.h" + +#include "src/ast.h" +#include "src/compiler/graph-builder.h" +#include "src/compiler/js-graph.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class ControlBuilder; +class LoopBuilder; +class Graph; + +// The AstGraphBuilder produces a high-level IR graph, based on an +// underlying AST. The produced graph can either be compiled into a +// stand-alone function or be wired into another graph for the purposes +// of function inlining. +class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor { + public: + AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph); + + // Creates a graph by visiting the entire AST. + bool CreateGraph(); + + protected: + class AstContext; + class AstEffectContext; + class AstValueContext; + class AstTestContext; + class BreakableScope; + class ContextScope; + class Environment; + + Environment* environment() { + return reinterpret_cast<Environment*>( + StructuredGraphBuilder::environment()); + } + + AstContext* ast_context() const { return ast_context_; } + BreakableScope* breakable() const { return breakable_; } + ContextScope* execution_context() const { return execution_context_; } + + void set_ast_context(AstContext* ctx) { ast_context_ = ctx; } + void set_breakable(BreakableScope* brk) { breakable_ = brk; } + void set_execution_context(ContextScope* ctx) { execution_context_ = ctx; } + + // Support for control flow builders. The concrete type of the environment + // depends on the graph builder, but environments themselves are not virtual. + typedef StructuredGraphBuilder::Environment BaseEnvironment; + virtual BaseEnvironment* CopyEnvironment(BaseEnvironment* env); + + // TODO(mstarzinger): The pipeline only needs to be a friend to access the + // function context. Remove as soon as the context is a parameter. + friend class Pipeline; + + // Getters for values in the activation record. + Node* GetFunctionClosure(); + Node* GetFunctionContext(); + + // + // The following build methods all generate graph fragments and return one + // resulting node. The operand stack height remains the same, variables and + // other dependencies tracked by the environment might be mutated though. + // + + // Builder to create a local function context. + Node* BuildLocalFunctionContext(Node* context, Node* closure); + + // Builder to create an arguments object if it is used. + Node* BuildArgumentsObject(Variable* arguments); + + // Builders for variable load and assignment. + Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op, + BailoutId bailout_id); + Node* BuildVariableDelete(Variable* var); + Node* BuildVariableLoad(Variable* var, BailoutId bailout_id, + ContextualMode mode = CONTEXTUAL); + + // Builders for accessing the function context. + Node* BuildLoadBuiltinsObject(); + Node* BuildLoadGlobalObject(); + Node* BuildLoadClosure(); + + // Builders for automatic type conversion. + Node* BuildToBoolean(Node* value); + + // Builders for error reporting at runtime. + Node* BuildThrowReferenceError(Variable* var); + + // Builders for dynamic hole-checks at runtime. + Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole); + Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole); + + // Builders for binary operations. + Node* BuildBinaryOp(Node* left, Node* right, Token::Value op); + +#define DECLARE_VISIT(type) virtual void Visit##type(type* node); + // Visiting functions for AST nodes make this an AstVisitor. + AST_NODE_LIST(DECLARE_VISIT) +#undef DECLARE_VISIT + + // Visiting function for declarations list is overridden. + virtual void VisitDeclarations(ZoneList<Declaration*>* declarations); + + private: + CompilationInfo* info_; + AstContext* ast_context_; + JSGraph* jsgraph_; + + // List of global declarations for functions and variables. + ZoneList<Handle<Object> > globals_; + + // Stack of breakable statements entered by the visitor. + BreakableScope* breakable_; + + // Stack of context objects pushed onto the chain by the visitor. + ContextScope* execution_context_; + + // Nodes representing values in the activation record. + SetOncePointer<Node> function_closure_; + SetOncePointer<Node> function_context_; + + CompilationInfo* info() { return info_; } + StrictMode strict_mode() { return info()->strict_mode(); } + JSGraph* jsgraph() { return jsgraph_; } + JSOperatorBuilder* javascript() { return jsgraph_->javascript(); } + ZoneList<Handle<Object> >* globals() { return &globals_; } + + // Current scope during visitation. + inline Scope* current_scope() const; + + // Process arguments to a call by popping {arity} elements off the operand + // stack and build a call node using the given call operator. + Node* ProcessArguments(Operator* op, int arity); + + // Visit statements. + void VisitIfNotNull(Statement* stmt); + + // Visit expressions. + void VisitForTest(Expression* expr); + void VisitForEffect(Expression* expr); + void VisitForValue(Expression* expr); + void VisitForValueOrNull(Expression* expr); + void VisitForValues(ZoneList<Expression*>* exprs); + + // Common for all IterationStatement bodies. + void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop, int); + + // Dispatched from VisitCallRuntime. + void VisitCallJSRuntime(CallRuntime* expr); + + // Dispatched from VisitUnaryOperation. + void VisitDelete(UnaryOperation* expr); + void VisitVoid(UnaryOperation* expr); + void VisitTypeof(UnaryOperation* expr); + void VisitNot(UnaryOperation* expr); + + // Dispatched from VisitBinaryOperation. + void VisitComma(BinaryOperation* expr); + void VisitLogicalExpression(BinaryOperation* expr); + void VisitArithmeticExpression(BinaryOperation* expr); + + // Dispatched from VisitForInStatement. + void VisitForInAssignment(Expression* expr, Node* value); + + void BuildLazyBailout(Node* node, BailoutId ast_id); + void BuildLazyBailoutWithPushedNode(Node* node, BailoutId ast_id); + + DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); + DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder); +}; + + +// The abstract execution environment for generated code consists of +// parameter variables, local variables and the operand stack. The +// environment will perform proper SSA-renaming of all tracked nodes +// at split and merge points in the control flow. Internally all the +// values are stored in one list using the following layout: +// +// [parameters (+receiver)] [locals] [operand stack] +// +class AstGraphBuilder::Environment + : public StructuredGraphBuilder::Environment { + public: + Environment(AstGraphBuilder* builder, Scope* scope, Node* control_dependency); + Environment(const Environment& copy); + + int parameters_count() const { return parameters_count_; } + int locals_count() const { return locals_count_; } + int stack_height() { + return static_cast<int>(values()->size()) - parameters_count_ - + locals_count_; + } + + // Operations on parameter or local variables. The parameter indices are + // shifted by 1 (receiver is parameter index -1 but environment index 0). + void Bind(Variable* variable, Node* node) { + DCHECK(variable->IsStackAllocated()); + if (variable->IsParameter()) { + values()->at(variable->index() + 1) = node; + parameters_dirty_ = true; + } else { + DCHECK(variable->IsStackLocal()); + values()->at(variable->index() + parameters_count_) = node; + locals_dirty_ = true; + } + } + Node* Lookup(Variable* variable) { + DCHECK(variable->IsStackAllocated()); + if (variable->IsParameter()) { + return values()->at(variable->index() + 1); + } else { + DCHECK(variable->IsStackLocal()); + return values()->at(variable->index() + parameters_count_); + } + } + + // Operations on the operand stack. + void Push(Node* node) { + values()->push_back(node); + stack_dirty_ = true; + } + Node* Top() { + DCHECK(stack_height() > 0); + return values()->back(); + } + Node* Pop() { + DCHECK(stack_height() > 0); + Node* back = values()->back(); + values()->pop_back(); + stack_dirty_ = true; + return back; + } + + // Direct mutations of the operand stack. + void Poke(int depth, Node* node) { + DCHECK(depth >= 0 && depth < stack_height()); + int index = static_cast<int>(values()->size()) - depth - 1; + values()->at(index) = node; + stack_dirty_ = true; + } + Node* Peek(int depth) { + DCHECK(depth >= 0 && depth < stack_height()); + int index = static_cast<int>(values()->size()) - depth - 1; + return values()->at(index); + } + void Drop(int depth) { + DCHECK(depth >= 0 && depth <= stack_height()); + values()->erase(values()->end() - depth, values()->end()); + stack_dirty_ = true; + } + + // Preserve a checkpoint of the environment for the IR graph. Any + // further mutation of the environment will not affect checkpoints. + Node* Checkpoint(BailoutId ast_id); + + private: + int parameters_count_; + int locals_count_; + Node* parameters_node_; + Node* locals_node_; + Node* stack_node_; + bool parameters_dirty_; + bool locals_dirty_; + bool stack_dirty_; +}; + + +// Each expression in the AST is evaluated in a specific context. This context +// decides how the evaluation result is passed up the visitor. +class AstGraphBuilder::AstContext BASE_EMBEDDED { + public: + bool IsEffect() const { return kind_ == Expression::kEffect; } + bool IsValue() const { return kind_ == Expression::kValue; } + bool IsTest() const { return kind_ == Expression::kTest; } + + // Plug a node into this expression context. Call this function in tail + // position in the Visit functions for expressions. + virtual void ProduceValue(Node* value) = 0; + virtual void ProduceValueWithLazyBailout(Node* value) = 0; + + // Unplugs a node from this expression context. Call this to retrieve the + // result of another Visit function that already plugged the context. + virtual Node* ConsumeValue() = 0; + + // Shortcut for "context->ProduceValue(context->ConsumeValue())". + void ReplaceValue() { ProduceValue(ConsumeValue()); } + + protected: + AstContext(AstGraphBuilder* owner, Expression::Context kind, + BailoutId bailout_id); + virtual ~AstContext(); + + AstGraphBuilder* owner() const { return owner_; } + Environment* environment() const { return owner_->environment(); } + +// We want to be able to assert, in a context-specific way, that the stack +// height makes sense when the context is filled. +#ifdef DEBUG + int original_height_; +#endif + + BailoutId bailout_id_; + + private: + Expression::Context kind_; + AstGraphBuilder* owner_; + AstContext* outer_; +}; + + +// Context to evaluate expression for its side effects only. +class AstGraphBuilder::AstEffectContext V8_FINAL : public AstContext { + public: + explicit AstEffectContext(AstGraphBuilder* owner, BailoutId bailout_id) + : AstContext(owner, Expression::kEffect, bailout_id) {} + virtual ~AstEffectContext(); + virtual void ProduceValue(Node* value) V8_OVERRIDE; + virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE; + virtual Node* ConsumeValue() V8_OVERRIDE; +}; + + +// Context to evaluate expression for its value (and side effects). +class AstGraphBuilder::AstValueContext V8_FINAL : public AstContext { + public: + explicit AstValueContext(AstGraphBuilder* owner, BailoutId bailout_id) + : AstContext(owner, Expression::kValue, bailout_id) {} + virtual ~AstValueContext(); + virtual void ProduceValue(Node* value) V8_OVERRIDE; + virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE; + virtual Node* ConsumeValue() V8_OVERRIDE; +}; + + +// Context to evaluate expression for a condition value (and side effects). +class AstGraphBuilder::AstTestContext V8_FINAL : public AstContext { + public: + explicit AstTestContext(AstGraphBuilder* owner, BailoutId bailout_id) + : AstContext(owner, Expression::kTest, bailout_id) {} + virtual ~AstTestContext(); + virtual void ProduceValue(Node* value) V8_OVERRIDE; + virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE; + virtual Node* ConsumeValue() V8_OVERRIDE; +}; + + +// Scoped class tracking breakable statements entered by the visitor. Allows to +// properly 'break' and 'continue' iteration statements as well as to 'break' +// from blocks within switch statements. +class AstGraphBuilder::BreakableScope BASE_EMBEDDED { + public: + BreakableScope(AstGraphBuilder* owner, BreakableStatement* target, + ControlBuilder* control, int drop_extra) + : owner_(owner), + target_(target), + next_(owner->breakable()), + control_(control), + drop_extra_(drop_extra) { + owner_->set_breakable(this); // Push. + } + + ~BreakableScope() { + owner_->set_breakable(next_); // Pop. + } + + // Either 'break' or 'continue' the target statement. + void BreakTarget(BreakableStatement* target); + void ContinueTarget(BreakableStatement* target); + + private: + AstGraphBuilder* owner_; + BreakableStatement* target_; + BreakableScope* next_; + ControlBuilder* control_; + int drop_extra_; + + // Find the correct scope for the target statement. Note that this also drops + // extra operands from the environment for each scope skipped along the way. + BreakableScope* FindBreakable(BreakableStatement* target); +}; + + +// Scoped class tracking context objects created by the visitor. Represents +// mutations of the context chain within the function body and allows to +// change the current {scope} and {context} during visitation. +class AstGraphBuilder::ContextScope BASE_EMBEDDED { + public: + ContextScope(AstGraphBuilder* owner, Scope* scope, Node* context) + : owner_(owner), + next_(owner->execution_context()), + outer_(owner->current_context()), + scope_(scope) { + owner_->set_execution_context(this); // Push. + owner_->set_current_context(context); + } + + ~ContextScope() { + owner_->set_execution_context(next_); // Pop. + owner_->set_current_context(outer_); + } + + // Current scope during visitation. + Scope* scope() const { return scope_; } + + private: + AstGraphBuilder* owner_; + ContextScope* next_; + Node* outer_; + Scope* scope_; +}; + +Scope* AstGraphBuilder::current_scope() const { + return execution_context_->scope(); +} +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_AST_GRAPH_BUILDER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/change-lowering.cc nodejs-0.11.15/deps/v8/src/compiler/change-lowering.cc --- nodejs-0.11.13/deps/v8/src/compiler/change-lowering.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/change-lowering.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,260 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/change-lowering.h" + +#include "src/compiler/common-node-cache.h" +#include "src/compiler/graph.h" + +namespace v8 { +namespace internal { +namespace compiler { + +ChangeLoweringBase::ChangeLoweringBase(Graph* graph, Linkage* linkage, + CommonNodeCache* cache) + : graph_(graph), + isolate_(graph->zone()->isolate()), + linkage_(linkage), + cache_(cache), + common_(graph->zone()), + machine_(graph->zone()) {} + + +ChangeLoweringBase::~ChangeLoweringBase() {} + + +Node* ChangeLoweringBase::ExternalConstant(ExternalReference reference) { + Node** loc = cache()->FindExternalConstant(reference); + if (*loc == NULL) { + *loc = graph()->NewNode(common()->ExternalConstant(reference)); + } + return *loc; +} + + +Node* ChangeLoweringBase::HeapConstant(PrintableUnique<HeapObject> value) { + // TODO(bmeurer): Use common node cache. + return graph()->NewNode(common()->HeapConstant(value)); +} + + +Node* ChangeLoweringBase::ImmovableHeapConstant(Handle<HeapObject> value) { + return HeapConstant( + PrintableUnique<HeapObject>::CreateImmovable(graph()->zone(), value)); +} + + +Node* ChangeLoweringBase::Int32Constant(int32_t value) { + Node** loc = cache()->FindInt32Constant(value); + if (*loc == NULL) { + *loc = graph()->NewNode(common()->Int32Constant(value)); + } + return *loc; +} + + +Node* ChangeLoweringBase::NumberConstant(double value) { + Node** loc = cache()->FindNumberConstant(value); + if (*loc == NULL) { + *loc = graph()->NewNode(common()->NumberConstant(value)); + } + return *loc; +} + + +Node* ChangeLoweringBase::CEntryStubConstant() { + if (!c_entry_stub_constant_.is_set()) { + c_entry_stub_constant_.set( + ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode())); + } + return c_entry_stub_constant_.get(); +} + + +Node* ChangeLoweringBase::TrueConstant() { + if (!true_constant_.is_set()) { + true_constant_.set( + ImmovableHeapConstant(isolate()->factory()->true_value())); + } + return true_constant_.get(); +} + + +Node* ChangeLoweringBase::FalseConstant() { + if (!false_constant_.is_set()) { + false_constant_.set( + ImmovableHeapConstant(isolate()->factory()->false_value())); + } + return false_constant_.get(); +} + + +Reduction ChangeLoweringBase::ChangeBitToBool(Node* val, Node* control) { + Node* branch = graph()->NewNode(common()->Branch(), val, control); + + Node* if_true = graph()->NewNode(common()->IfTrue(), branch); + Node* true_value = TrueConstant(); + + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* false_value = FalseConstant(); + + Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false); + Node* phi = + graph()->NewNode(common()->Phi(2), true_value, false_value, merge); + + return Replace(phi); +} + + +template <size_t kPointerSize> +ChangeLowering<kPointerSize>::ChangeLowering(Graph* graph, Linkage* linkage) + : ChangeLoweringBase(graph, linkage, + new (graph->zone()) CommonNodeCache(graph->zone())) {} + + +template <size_t kPointerSize> +Reduction ChangeLowering<kPointerSize>::Reduce(Node* node) { + Node* control = graph()->start(); + Node* effect = control; + switch (node->opcode()) { + case IrOpcode::kChangeBitToBool: + return ChangeBitToBool(node->InputAt(0), control); + case IrOpcode::kChangeBoolToBit: + return ChangeBoolToBit(node->InputAt(0)); + case IrOpcode::kChangeInt32ToTagged: + return ChangeInt32ToTagged(node->InputAt(0), effect, control); + case IrOpcode::kChangeTaggedToFloat64: + return ChangeTaggedToFloat64(node->InputAt(0), effect, control); + default: + return NoChange(); + } + UNREACHABLE(); + return NoChange(); +} + + +template <> +Reduction ChangeLowering<4>::ChangeBoolToBit(Node* val) { + return Replace( + graph()->NewNode(machine()->Word32Equal(), val, TrueConstant())); +} + + +template <> +Reduction ChangeLowering<8>::ChangeBoolToBit(Node* val) { + return Replace( + graph()->NewNode(machine()->Word64Equal(), val, TrueConstant())); +} + + +template <> +Reduction ChangeLowering<4>::ChangeInt32ToTagged(Node* val, Node* effect, + Node* control) { + Node* context = NumberConstant(0); + + Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), val, val); + Node* ovf = graph()->NewNode(common()->Projection(1), add); + + Node* branch = graph()->NewNode(common()->Branch(), ovf, control); + + Node* if_true = graph()->NewNode(common()->IfTrue(), branch); + Node* number = graph()->NewNode(machine()->ChangeInt32ToFloat64(), val); + + // TODO(bmeurer): Inline allocation if possible. + const Runtime::Function* fn = + Runtime::FunctionForId(Runtime::kAllocateHeapNumber); + DCHECK_EQ(0, fn->nargs); + CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor( + fn->function_id, 0, Operator::kNoProperties); + Node* heap_number = + graph()->NewNode(common()->Call(desc), CEntryStubConstant(), + ExternalConstant(ExternalReference(fn, isolate())), + Int32Constant(0), context, effect, if_true); + + Node* store = graph()->NewNode( + machine()->Store(kMachineFloat64, kNoWriteBarrier), heap_number, + Int32Constant(HeapNumber::kValueOffset - kHeapObjectTag), number, effect, + heap_number); + + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* smi = graph()->NewNode(common()->Projection(0), add); + + Node* merge = graph()->NewNode(common()->Merge(2), store, if_false); + Node* phi = graph()->NewNode(common()->Phi(2), heap_number, smi, merge); + + return Replace(phi); +} + + +template <> +Reduction ChangeLowering<8>::ChangeInt32ToTagged(Node* val, Node* effect, + Node* control) { + return Replace(graph()->NewNode( + machine()->Word64Shl(), val, + Int32Constant(SmiTagging<8>::kSmiShiftSize + kSmiTagSize))); +} + + +template <> +Reduction ChangeLowering<4>::ChangeTaggedToFloat64(Node* val, Node* effect, + Node* control) { + Node* branch = graph()->NewNode( + common()->Branch(), + graph()->NewNode(machine()->Word32And(), val, Int32Constant(kSmiTagMask)), + control); + + Node* if_true = graph()->NewNode(common()->IfTrue(), branch); + Node* load = graph()->NewNode( + machine()->Load(kMachineFloat64), val, + Int32Constant(HeapNumber::kValueOffset - kHeapObjectTag), if_true); + + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* number = graph()->NewNode( + machine()->ChangeInt32ToFloat64(), + graph()->NewNode( + machine()->Word32Sar(), val, + Int32Constant(SmiTagging<4>::kSmiShiftSize + kSmiTagSize))); + + Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false); + Node* phi = graph()->NewNode(common()->Phi(2), load, number, merge); + + return Replace(phi); +} + + +template <> +Reduction ChangeLowering<8>::ChangeTaggedToFloat64(Node* val, Node* effect, + Node* control) { + Node* branch = graph()->NewNode( + common()->Branch(), + graph()->NewNode(machine()->Word64And(), val, Int32Constant(kSmiTagMask)), + control); + + Node* if_true = graph()->NewNode(common()->IfTrue(), branch); + Node* load = graph()->NewNode( + machine()->Load(kMachineFloat64), val, + Int32Constant(HeapNumber::kValueOffset - kHeapObjectTag), if_true); + + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* number = graph()->NewNode( + machine()->ChangeInt32ToFloat64(), + graph()->NewNode( + machine()->ConvertInt64ToInt32(), + graph()->NewNode( + machine()->Word64Sar(), val, + Int32Constant(SmiTagging<8>::kSmiShiftSize + kSmiTagSize)))); + + Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false); + Node* phi = graph()->NewNode(common()->Phi(2), load, number, merge); + + return Replace(phi); +} + + +template class ChangeLowering<4>; +template class ChangeLowering<8>; + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/change-lowering.h nodejs-0.11.15/deps/v8/src/compiler/change-lowering.h --- nodejs-0.11.13/deps/v8/src/compiler/change-lowering.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/change-lowering.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,79 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_CHANGE_LOWERING_H_ +#define V8_COMPILER_CHANGE_LOWERING_H_ + +#include "include/v8.h" +#include "src/compiler/common-operator.h" +#include "src/compiler/graph-reducer.h" +#include "src/compiler/machine-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Forward declarations. +class CommonNodeCache; +class Linkage; + +class ChangeLoweringBase : public Reducer { + public: + ChangeLoweringBase(Graph* graph, Linkage* linkage, CommonNodeCache* cache); + virtual ~ChangeLoweringBase(); + + protected: + Node* ExternalConstant(ExternalReference reference); + Node* HeapConstant(PrintableUnique<HeapObject> value); + Node* ImmovableHeapConstant(Handle<HeapObject> value); + Node* Int32Constant(int32_t value); + Node* NumberConstant(double value); + Node* CEntryStubConstant(); + Node* TrueConstant(); + Node* FalseConstant(); + + Reduction ChangeBitToBool(Node* val, Node* control); + + Graph* graph() const { return graph_; } + Isolate* isolate() const { return isolate_; } + Linkage* linkage() const { return linkage_; } + CommonNodeCache* cache() const { return cache_; } + CommonOperatorBuilder* common() { return &common_; } + MachineOperatorBuilder* machine() { return &machine_; } + + private: + Graph* graph_; + Isolate* isolate_; + Linkage* linkage_; + CommonNodeCache* cache_; + CommonOperatorBuilder common_; + MachineOperatorBuilder machine_; + + SetOncePointer<Node> c_entry_stub_constant_; + SetOncePointer<Node> true_constant_; + SetOncePointer<Node> false_constant_; +}; + + +template <size_t kPointerSize = kApiPointerSize> +class ChangeLowering V8_FINAL : public ChangeLoweringBase { + public: + ChangeLowering(Graph* graph, Linkage* linkage); + ChangeLowering(Graph* graph, Linkage* linkage, CommonNodeCache* cache) + : ChangeLoweringBase(graph, linkage, cache) {} + virtual ~ChangeLowering() {} + + virtual Reduction Reduce(Node* node) V8_OVERRIDE; + + private: + Reduction ChangeBoolToBit(Node* val); + Reduction ChangeInt32ToTagged(Node* val, Node* effect, Node* control); + Reduction ChangeTaggedToFloat64(Node* val, Node* effect, Node* control); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_CHANGE_LOWERING_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/code-generator.cc nodejs-0.11.15/deps/v8/src/compiler/code-generator.cc --- nodejs-0.11.13/deps/v8/src/compiler/code-generator.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/code-generator.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,381 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/code-generator.h" + +#include "src/compiler/code-generator-impl.h" +#include "src/compiler/linkage.h" +#include "src/compiler/pipeline.h" + +namespace v8 { +namespace internal { +namespace compiler { + +CodeGenerator::CodeGenerator(InstructionSequence* code) + : code_(code), + current_block_(NULL), + current_source_position_(SourcePosition::Invalid()), + masm_(code->zone()->isolate(), NULL, 0), + resolver_(this), + safepoints_(code->zone()), + lazy_deoptimization_entries_( + LazyDeoptimizationEntries::allocator_type(code->zone())), + deoptimization_states_( + DeoptimizationStates::allocator_type(code->zone())), + deoptimization_literals_(Literals::allocator_type(code->zone())), + translations_(code->zone()) { + deoptimization_states_.resize(code->GetDeoptimizationEntryCount(), NULL); +} + + +Handle<Code> CodeGenerator::GenerateCode() { + CompilationInfo* info = linkage()->info(); + + // Emit a code line info recording start event. + PositionsRecorder* recorder = masm()->positions_recorder(); + LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder)); + + // Place function entry hook if requested to do so. + if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) { + ProfileEntryHookStub::MaybeCallEntryHook(masm()); + } + + // Architecture-specific, linkage-specific prologue. + info->set_prologue_offset(masm()->pc_offset()); + AssemblePrologue(); + + // Assemble all instructions. + for (InstructionSequence::const_iterator i = code()->begin(); + i != code()->end(); ++i) { + AssembleInstruction(*i); + } + + FinishCode(masm()); + + safepoints()->Emit(masm(), frame()->GetSpillSlotCount()); + + // TODO(titzer): what are the right code flags here? + Code::Kind kind = Code::STUB; + if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) { + kind = Code::OPTIMIZED_FUNCTION; + } + Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue( + masm(), Code::ComputeFlags(kind), info); + result->set_is_turbofanned(true); + result->set_stack_slots(frame()->GetSpillSlotCount()); + result->set_safepoint_table_offset(safepoints()->GetCodeOffset()); + + PopulateDeoptimizationData(result); + + // Emit a code line info recording stop event. + void* line_info = recorder->DetachJITHandlerData(); + LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info)); + + return result; +} + + +void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind, + int arguments, + Safepoint::DeoptMode deopt_mode) { + const ZoneList<InstructionOperand*>* operands = + pointers->GetNormalizedOperands(); + Safepoint safepoint = + safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode); + for (int i = 0; i < operands->length(); i++) { + InstructionOperand* pointer = operands->at(i); + if (pointer->IsStackSlot()) { + safepoint.DefinePointerSlot(pointer->index(), zone()); + } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { + Register reg = Register::FromAllocationIndex(pointer->index()); + safepoint.DefinePointerRegister(reg, zone()); + } + } +} + + +void CodeGenerator::AssembleInstruction(Instruction* instr) { + if (instr->IsBlockStart()) { + // Bind a label for a block start and handle parallel moves. + BlockStartInstruction* block_start = BlockStartInstruction::cast(instr); + current_block_ = block_start->block(); + if (FLAG_code_comments) { + // TODO(titzer): these code comments are a giant memory leak. + Vector<char> buffer = Vector<char>::New(32); + SNPrintF(buffer, "-- B%d start --", block_start->block()->id()); + masm()->RecordComment(buffer.start()); + } + masm()->bind(block_start->label()); + } + if (instr->IsGapMoves()) { + // Handle parallel moves associated with the gap instruction. + AssembleGap(GapInstruction::cast(instr)); + } else if (instr->IsSourcePosition()) { + AssembleSourcePosition(SourcePositionInstruction::cast(instr)); + } else { + // Assemble architecture-specific code for the instruction. + AssembleArchInstruction(instr); + + // Assemble branches or boolean materializations after this instruction. + FlagsMode mode = FlagsModeField::decode(instr->opcode()); + FlagsCondition condition = FlagsConditionField::decode(instr->opcode()); + switch (mode) { + case kFlags_none: + return; + case kFlags_set: + return AssembleArchBoolean(instr, condition); + case kFlags_branch: + return AssembleArchBranch(instr, condition); + } + UNREACHABLE(); + } +} + + +void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) { + SourcePosition source_position = instr->source_position(); + if (source_position == current_source_position_) return; + DCHECK(!source_position.IsInvalid()); + if (!source_position.IsUnknown()) { + int code_pos = source_position.raw(); + masm()->positions_recorder()->RecordPosition(source_position.raw()); + masm()->positions_recorder()->WriteRecordedPositions(); + if (FLAG_code_comments) { + Vector<char> buffer = Vector<char>::New(256); + CompilationInfo* info = linkage()->info(); + int ln = Script::GetLineNumber(info->script(), code_pos); + int cn = Script::GetColumnNumber(info->script(), code_pos); + if (info->script()->name()->IsString()) { + Handle<String> file(String::cast(info->script()->name())); + base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --", + file->ToCString().get(), ln, cn); + } else { + base::OS::SNPrintF(buffer.start(), buffer.length(), + "-- <unknown>:%d:%d --", ln, cn); + } + masm()->RecordComment(buffer.start()); + } + } + current_source_position_ = source_position; +} + + +void CodeGenerator::AssembleGap(GapInstruction* instr) { + for (int i = GapInstruction::FIRST_INNER_POSITION; + i <= GapInstruction::LAST_INNER_POSITION; i++) { + GapInstruction::InnerPosition inner_pos = + static_cast<GapInstruction::InnerPosition>(i); + ParallelMove* move = instr->GetParallelMove(inner_pos); + if (move != NULL) resolver()->Resolve(move); + } +} + + +void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) { + CompilationInfo* info = linkage()->info(); + int deopt_count = code()->GetDeoptimizationEntryCount(); + int patch_count = static_cast<int>(lazy_deoptimization_entries_.size()); + if (patch_count == 0 && deopt_count == 0) return; + Handle<DeoptimizationInputData> data = DeoptimizationInputData::New( + isolate(), deopt_count, patch_count, TENURED); + + Handle<ByteArray> translation_array = + translations_.CreateByteArray(isolate()->factory()); + + data->SetTranslationByteArray(*translation_array); + data->SetInlinedFunctionCount(Smi::FromInt(0)); + data->SetOptimizationId(Smi::FromInt(info->optimization_id())); + // TODO(jarin) The following code was copied over from Lithium, not sure + // whether the scope or the IsOptimizing condition are really needed. + if (info->IsOptimizing()) { + // Reference to shared function info does not change between phases. + AllowDeferredHandleDereference allow_handle_dereference; + data->SetSharedFunctionInfo(*info->shared_info()); + } else { + data->SetSharedFunctionInfo(Smi::FromInt(0)); + } + + Handle<FixedArray> literals = isolate()->factory()->NewFixedArray( + static_cast<int>(deoptimization_literals_.size()), TENURED); + { + AllowDeferredHandleDereference copy_handles; + for (unsigned i = 0; i < deoptimization_literals_.size(); i++) { + literals->set(i, *deoptimization_literals_[i]); + } + data->SetLiteralArray(*literals); + } + + // No OSR in Turbofan yet... + BailoutId osr_ast_id = BailoutId::None(); + data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt())); + data->SetOsrPcOffset(Smi::FromInt(-1)); + + // Populate deoptimization entries. + for (int i = 0; i < deopt_count; i++) { + FrameStateDescriptor* descriptor = code()->GetDeoptimizationEntry(i); + data->SetAstId(i, descriptor->bailout_id()); + CHECK_NE(NULL, deoptimization_states_[i]); + data->SetTranslationIndex( + i, Smi::FromInt(deoptimization_states_[i]->translation_id_)); + data->SetArgumentsStackHeight(i, Smi::FromInt(0)); + data->SetPc(i, Smi::FromInt(-1)); + } + + // Populate the return address patcher entries. + for (int i = 0; i < patch_count; ++i) { + LazyDeoptimizationEntry entry = lazy_deoptimization_entries_[i]; + DCHECK(entry.position_after_call() == entry.continuation()->pos() || + IsNopForSmiCodeInlining(code_object, entry.position_after_call(), + entry.continuation()->pos())); + data->SetReturnAddressPc(i, Smi::FromInt(entry.position_after_call())); + data->SetPatchedAddressPc(i, Smi::FromInt(entry.deoptimization()->pos())); + } + + code_object->set_deoptimization_data(*data); +} + + +void CodeGenerator::RecordLazyDeoptimizationEntry(Instruction* instr) { + InstructionOperandConverter i(this, instr); + + Label after_call; + masm()->bind(&after_call); + + // The continuation and deoptimization are the last two inputs: + BasicBlock* cont_block = + i.InputBlock(static_cast<int>(instr->InputCount()) - 2); + BasicBlock* deopt_block = + i.InputBlock(static_cast<int>(instr->InputCount()) - 1); + + Label* cont_label = code_->GetLabel(cont_block); + Label* deopt_label = code_->GetLabel(deopt_block); + + lazy_deoptimization_entries_.push_back( + LazyDeoptimizationEntry(after_call.pos(), cont_label, deopt_label)); +} + + +int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) { + int result = static_cast<int>(deoptimization_literals_.size()); + for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) { + if (deoptimization_literals_[i].is_identical_to(literal)) return i; + } + deoptimization_literals_.push_back(literal); + return result; +} + + +void CodeGenerator::BuildTranslation(Instruction* instr, + int deoptimization_id) { + // We should build translation only once. + DCHECK_EQ(NULL, deoptimization_states_[deoptimization_id]); + + FrameStateDescriptor* descriptor = + code()->GetDeoptimizationEntry(deoptimization_id); + Translation translation(&translations_, 1, 1, zone()); + translation.BeginJSFrame(descriptor->bailout_id(), + Translation::kSelfLiteralId, + descriptor->size() - descriptor->parameters_count()); + + for (int i = 0; i < descriptor->size(); i++) { + AddTranslationForOperand(&translation, instr, instr->InputAt(i)); + } + + deoptimization_states_[deoptimization_id] = + new (zone()) DeoptimizationState(translation.index()); +} + + +void CodeGenerator::AddTranslationForOperand(Translation* translation, + Instruction* instr, + InstructionOperand* op) { + if (op->IsStackSlot()) { + translation->StoreStackSlot(op->index()); + } else if (op->IsDoubleStackSlot()) { + translation->StoreDoubleStackSlot(op->index()); + } else if (op->IsRegister()) { + InstructionOperandConverter converter(this, instr); + translation->StoreRegister(converter.ToRegister(op)); + } else if (op->IsDoubleRegister()) { + InstructionOperandConverter converter(this, instr); + translation->StoreDoubleRegister(converter.ToDoubleRegister(op)); + } else if (op->IsImmediate()) { + InstructionOperandConverter converter(this, instr); + Constant constant = converter.ToConstant(op); + Handle<Object> constant_object; + switch (constant.type()) { + case Constant::kInt32: + constant_object = + isolate()->factory()->NewNumberFromInt(constant.ToInt32()); + break; + case Constant::kFloat64: + constant_object = + isolate()->factory()->NewHeapNumber(constant.ToFloat64()); + break; + case Constant::kHeapObject: + constant_object = constant.ToHeapObject(); + break; + default: + UNREACHABLE(); + } + int literal_id = DefineDeoptimizationLiteral(constant_object); + translation->StoreLiteral(literal_id); + } else { + UNREACHABLE(); + } +} + +#if !V8_TURBOFAN_BACKEND + +void CodeGenerator::AssembleArchInstruction(Instruction* instr) { + UNIMPLEMENTED(); +} + + +void CodeGenerator::AssembleArchBranch(Instruction* instr, + FlagsCondition condition) { + UNIMPLEMENTED(); +} + + +void CodeGenerator::AssembleArchBoolean(Instruction* instr, + FlagsCondition condition) { + UNIMPLEMENTED(); +} + + +void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); } + + +void CodeGenerator::AssembleReturn() { UNIMPLEMENTED(); } + + +void CodeGenerator::AssembleMove(InstructionOperand* source, + InstructionOperand* destination) { + UNIMPLEMENTED(); +} + + +void CodeGenerator::AssembleSwap(InstructionOperand* source, + InstructionOperand* destination) { + UNIMPLEMENTED(); +} + + +void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); } + + +#ifdef DEBUG +bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc, + int end_pc) { + UNIMPLEMENTED(); + return false; +} +#endif + +#endif // !V8_TURBOFAN_BACKEND + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/code-generator.h nodejs-0.11.15/deps/v8/src/compiler/code-generator.h --- nodejs-0.11.13/deps/v8/src/compiler/code-generator.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/code-generator.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,146 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_CODE_GENERATOR_H_ +#define V8_COMPILER_CODE_GENERATOR_H_ + +#include <deque> + +#include "src/compiler/gap-resolver.h" +#include "src/compiler/instruction.h" +#include "src/deoptimizer.h" +#include "src/macro-assembler.h" +#include "src/safepoint-table.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Generates native code for a sequence of instructions. +class CodeGenerator V8_FINAL : public GapResolver::Assembler { + public: + explicit CodeGenerator(InstructionSequence* code); + + // Generate native code. + Handle<Code> GenerateCode(); + + InstructionSequence* code() const { return code_; } + Frame* frame() const { return code()->frame(); } + Graph* graph() const { return code()->graph(); } + Isolate* isolate() const { return zone()->isolate(); } + Linkage* linkage() const { return code()->linkage(); } + Schedule* schedule() const { return code()->schedule(); } + + private: + MacroAssembler* masm() { return &masm_; } + GapResolver* resolver() { return &resolver_; } + SafepointTableBuilder* safepoints() { return &safepoints_; } + Zone* zone() const { return code()->zone(); } + + // Checks if {block} will appear directly after {current_block_} when + // assembling code, in which case, a fall-through can be used. + bool IsNextInAssemblyOrder(const BasicBlock* block) const { + return block->rpo_number_ == (current_block_->rpo_number_ + 1) && + block->deferred_ == current_block_->deferred_; + } + + // Record a safepoint with the given pointer map. + void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind, + int arguments, Safepoint::DeoptMode deopt_mode); + + // Assemble code for the specified instruction. + void AssembleInstruction(Instruction* instr); + void AssembleSourcePosition(SourcePositionInstruction* instr); + void AssembleGap(GapInstruction* gap); + + // =========================================================================== + // ============= Architecture-specific code generation methods. ============== + // =========================================================================== + + void AssembleArchInstruction(Instruction* instr); + void AssembleArchBranch(Instruction* instr, FlagsCondition condition); + void AssembleArchBoolean(Instruction* instr, FlagsCondition condition); + + // Generates an architecture-specific, descriptor-specific prologue + // to set up a stack frame. + void AssemblePrologue(); + // Generates an architecture-specific, descriptor-specific return sequence + // to tear down a stack frame. + void AssembleReturn(); + + // =========================================================================== + // ============== Architecture-specific gap resolver methods. ================ + // =========================================================================== + + // Interface used by the gap resolver to emit moves and swaps. + virtual void AssembleMove(InstructionOperand* source, + InstructionOperand* destination) V8_OVERRIDE; + virtual void AssembleSwap(InstructionOperand* source, + InstructionOperand* destination) V8_OVERRIDE; + + // =========================================================================== + // Deoptimization table construction + void RecordLazyDeoptimizationEntry(Instruction* instr); + void PopulateDeoptimizationData(Handle<Code> code); + int DefineDeoptimizationLiteral(Handle<Object> literal); + void BuildTranslation(Instruction* instr, int deoptimization_id); + void AddTranslationForOperand(Translation* translation, Instruction* instr, + InstructionOperand* op); + void AddNopForSmiCodeInlining(); +#if DEBUG + static bool IsNopForSmiCodeInlining(Handle<Code> code, int start_pc, + int end_pc); +#endif // DEBUG + // =========================================================================== + + class LazyDeoptimizationEntry V8_FINAL { + public: + LazyDeoptimizationEntry(int position_after_call, Label* continuation, + Label* deoptimization) + : position_after_call_(position_after_call), + continuation_(continuation), + deoptimization_(deoptimization) {} + + int position_after_call() const { return position_after_call_; } + Label* continuation() const { return continuation_; } + Label* deoptimization() const { return deoptimization_; } + + private: + int position_after_call_; + Label* continuation_; + Label* deoptimization_; + }; + + struct DeoptimizationState : ZoneObject { + int translation_id_; + + explicit DeoptimizationState(int translation_id) + : translation_id_(translation_id) {} + }; + + typedef std::deque<LazyDeoptimizationEntry, + zone_allocator<LazyDeoptimizationEntry> > + LazyDeoptimizationEntries; + typedef std::deque<DeoptimizationState*, + zone_allocator<DeoptimizationState*> > + DeoptimizationStates; + typedef std::deque<Handle<Object>, zone_allocator<Handle<Object> > > Literals; + + InstructionSequence* code_; + BasicBlock* current_block_; + SourcePosition current_source_position_; + MacroAssembler masm_; + GapResolver resolver_; + SafepointTableBuilder safepoints_; + LazyDeoptimizationEntries lazy_deoptimization_entries_; + DeoptimizationStates deoptimization_states_; + Literals deoptimization_literals_; + TranslationBuffer translations_; +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_CODE_GENERATOR_H diff -Nru nodejs-0.11.13/deps/v8/src/compiler/code-generator-impl.h nodejs-0.11.15/deps/v8/src/compiler/code-generator-impl.h --- nodejs-0.11.13/deps/v8/src/compiler/code-generator-impl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/code-generator-impl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,132 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_ +#define V8_COMPILER_CODE_GENERATOR_IMPL_H_ + +#include "src/compiler/code-generator.h" +#include "src/compiler/common-operator.h" +#include "src/compiler/generic-graph.h" +#include "src/compiler/instruction.h" +#include "src/compiler/linkage.h" +#include "src/compiler/machine-operator.h" +#include "src/compiler/node.h" +#include "src/compiler/opcodes.h" +#include "src/compiler/operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Converts InstructionOperands from a given instruction to +// architecture-specific +// registers and operands after they have been assigned by the register +// allocator. +class InstructionOperandConverter { + public: + InstructionOperandConverter(CodeGenerator* gen, Instruction* instr) + : gen_(gen), instr_(instr) {} + + Register InputRegister(int index) { + return ToRegister(instr_->InputAt(index)); + } + + DoubleRegister InputDoubleRegister(int index) { + return ToDoubleRegister(instr_->InputAt(index)); + } + + double InputDouble(int index) { return ToDouble(instr_->InputAt(index)); } + + int32_t InputInt32(int index) { + return ToConstant(instr_->InputAt(index)).ToInt32(); + } + + int8_t InputInt8(int index) { return static_cast<int8_t>(InputInt32(index)); } + + int16_t InputInt16(int index) { + return static_cast<int16_t>(InputInt32(index)); + } + + uint8_t InputInt5(int index) { + return static_cast<uint8_t>(InputInt32(index) & 0x1F); + } + + uint8_t InputInt6(int index) { + return static_cast<uint8_t>(InputInt32(index) & 0x3F); + } + + Handle<HeapObject> InputHeapObject(int index) { + return ToHeapObject(instr_->InputAt(index)); + } + + Label* InputLabel(int index) { + return gen_->code()->GetLabel(InputBlock(index)); + } + + BasicBlock* InputBlock(int index) { + NodeId block_id = static_cast<NodeId>(InputInt32(index)); + // operand should be a block id. + DCHECK(block_id >= 0); + DCHECK(block_id < gen_->schedule()->BasicBlockCount()); + return gen_->schedule()->GetBlockById(block_id); + } + + Register OutputRegister(int index = 0) { + return ToRegister(instr_->OutputAt(index)); + } + + DoubleRegister OutputDoubleRegister() { + return ToDoubleRegister(instr_->Output()); + } + + Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); } + + Register ToRegister(InstructionOperand* op) { + DCHECK(op->IsRegister()); + return Register::FromAllocationIndex(op->index()); + } + + DoubleRegister ToDoubleRegister(InstructionOperand* op) { + DCHECK(op->IsDoubleRegister()); + return DoubleRegister::FromAllocationIndex(op->index()); + } + + Constant ToConstant(InstructionOperand* operand) { + if (operand->IsImmediate()) { + return gen_->code()->GetImmediate(operand->index()); + } + return gen_->code()->GetConstant(operand->index()); + } + + double ToDouble(InstructionOperand* operand) { + return ToConstant(operand).ToFloat64(); + } + + Handle<HeapObject> ToHeapObject(InstructionOperand* operand) { + return ToConstant(operand).ToHeapObject(); + } + + Frame* frame() const { return gen_->frame(); } + Isolate* isolate() const { return gen_->isolate(); } + Linkage* linkage() const { return gen_->linkage(); } + + protected: + CodeGenerator* gen_; + Instruction* instr_; +}; + + +// TODO(dcarney): generify this on bleeding_edge and replace this call +// when merged. +static inline void FinishCode(MacroAssembler* masm) { +#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM + masm->CheckConstPool(true, false); +#endif +} + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_CODE_GENERATOR_IMPL_H diff -Nru nodejs-0.11.13/deps/v8/src/compiler/common-node-cache.h nodejs-0.11.15/deps/v8/src/compiler/common-node-cache.h --- nodejs-0.11.13/deps/v8/src/compiler/common-node-cache.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/common-node-cache.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,51 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_COMMON_NODE_CACHE_H_ +#define V8_COMPILER_COMMON_NODE_CACHE_H_ + +#include "src/assembler.h" +#include "src/compiler/node-cache.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Bundles various caches for common nodes. +class CommonNodeCache V8_FINAL : public ZoneObject { + public: + explicit CommonNodeCache(Zone* zone) : zone_(zone) {} + + Node** FindInt32Constant(int32_t value) { + return int32_constants_.Find(zone_, value); + } + + Node** FindFloat64Constant(double value) { + // We canonicalize double constants at the bit representation level. + return float64_constants_.Find(zone_, BitCast<int64_t>(value)); + } + + Node** FindExternalConstant(ExternalReference reference) { + return external_constants_.Find(zone_, reference.address()); + } + + Node** FindNumberConstant(double value) { + // We canonicalize double constants at the bit representation level. + return number_constants_.Find(zone_, BitCast<int64_t>(value)); + } + + Zone* zone() const { return zone_; } + + private: + Int32NodeCache int32_constants_; + Int64NodeCache float64_constants_; + PtrNodeCache external_constants_; + Int64NodeCache number_constants_; + Zone* zone_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_COMMON_NODE_CACHE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/common-operator.h nodejs-0.11.15/deps/v8/src/compiler/common-operator.h --- nodejs-0.11.13/deps/v8/src/compiler/common-operator.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/common-operator.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,284 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_COMMON_OPERATOR_H_ +#define V8_COMPILER_COMMON_OPERATOR_H_ + +#include "src/v8.h" + +#include "src/assembler.h" +#include "src/compiler/linkage.h" +#include "src/compiler/opcodes.h" +#include "src/compiler/operator.h" +#include "src/unique.h" + +namespace v8 { +namespace internal { + +class OStream; + +namespace compiler { + +class ControlOperator : public Operator1<int> { + public: + ControlOperator(IrOpcode::Value opcode, uint16_t properties, int inputs, + int outputs, int controls, const char* mnemonic) + : Operator1<int>(opcode, properties, inputs, outputs, mnemonic, + controls) {} + + virtual OStream& PrintParameter(OStream& os) const { return os; } // NOLINT + int ControlInputCount() const { return parameter(); } +}; + +class CallOperator : public Operator1<CallDescriptor*> { + public: + CallOperator(CallDescriptor* descriptor, const char* mnemonic) + : Operator1<CallDescriptor*>( + IrOpcode::kCall, descriptor->properties(), descriptor->InputCount(), + descriptor->ReturnCount(), mnemonic, descriptor) {} + + virtual OStream& PrintParameter(OStream& os) const { // NOLINT + return os << "[" << *parameter() << "]"; + } +}; + +// Interface for building common operators that can be used at any level of IR, +// including JavaScript, mid-level, and low-level. +// TODO(titzer): Move the mnemonics into SimpleOperator and Operator1 classes. +class CommonOperatorBuilder { + public: + explicit CommonOperatorBuilder(Zone* zone) : zone_(zone) {} + +#define CONTROL_OP(name, inputs, controls) \ + return new (zone_) ControlOperator(IrOpcode::k##name, Operator::kFoldable, \ + inputs, 0, controls, #name); + + Operator* Start(int num_formal_parameters) { + // Outputs are formal parameters, plus context, receiver, and JSFunction. + int outputs = num_formal_parameters + 3; + return new (zone_) ControlOperator(IrOpcode::kStart, Operator::kFoldable, 0, + outputs, 0, "Start"); + } + Operator* Dead() { CONTROL_OP(Dead, 0, 0); } + Operator* End() { CONTROL_OP(End, 0, 1); } + Operator* Branch() { CONTROL_OP(Branch, 1, 1); } + Operator* IfTrue() { CONTROL_OP(IfTrue, 0, 1); } + Operator* IfFalse() { CONTROL_OP(IfFalse, 0, 1); } + Operator* Throw() { CONTROL_OP(Throw, 1, 1); } + Operator* LazyDeoptimization() { CONTROL_OP(LazyDeoptimization, 0, 1); } + Operator* Continuation() { CONTROL_OP(Continuation, 0, 1); } + + Operator* Deoptimize() { + return new (zone_) + ControlOperator(IrOpcode::kDeoptimize, 0, 1, 0, 1, "Deoptimize"); + } + + Operator* Return() { + return new (zone_) ControlOperator(IrOpcode::kReturn, 0, 1, 0, 1, "Return"); + } + + Operator* Merge(int controls) { + return new (zone_) ControlOperator(IrOpcode::kMerge, Operator::kFoldable, 0, + 0, controls, "Merge"); + } + + Operator* Loop(int controls) { + return new (zone_) ControlOperator(IrOpcode::kLoop, Operator::kFoldable, 0, + 0, controls, "Loop"); + } + + Operator* Parameter(int index) { + return new (zone_) Operator1<int>(IrOpcode::kParameter, Operator::kPure, 1, + 1, "Parameter", index); + } + Operator* Int32Constant(int32_t value) { + return new (zone_) Operator1<int>(IrOpcode::kInt32Constant, Operator::kPure, + 0, 1, "Int32Constant", value); + } + Operator* Int64Constant(int64_t value) { + return new (zone_) + Operator1<int64_t>(IrOpcode::kInt64Constant, Operator::kPure, 0, 1, + "Int64Constant", value); + } + Operator* Float64Constant(double value) { + return new (zone_) + Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1, + "Float64Constant", value); + } + Operator* ExternalConstant(ExternalReference value) { + return new (zone_) Operator1<ExternalReference>(IrOpcode::kExternalConstant, + Operator::kPure, 0, 1, + "ExternalConstant", value); + } + Operator* NumberConstant(double value) { + return new (zone_) + Operator1<double>(IrOpcode::kNumberConstant, Operator::kPure, 0, 1, + "NumberConstant", value); + } + Operator* HeapConstant(PrintableUnique<Object> value) { + return new (zone_) Operator1<PrintableUnique<Object> >( + IrOpcode::kHeapConstant, Operator::kPure, 0, 1, "HeapConstant", value); + } + Operator* Phi(int arguments) { + DCHECK(arguments > 0); // Disallow empty phis. + return new (zone_) Operator1<int>(IrOpcode::kPhi, Operator::kPure, + arguments, 1, "Phi", arguments); + } + Operator* EffectPhi(int arguments) { + DCHECK(arguments > 0); // Disallow empty phis. + return new (zone_) Operator1<int>(IrOpcode::kEffectPhi, Operator::kPure, 0, + 0, "EffectPhi", arguments); + } + Operator* StateValues(int arguments) { + return new (zone_) Operator1<int>(IrOpcode::kStateValues, Operator::kPure, + arguments, 1, "StateValues", arguments); + } + Operator* FrameState(BailoutId ast_id) { + return new (zone_) Operator1<BailoutId>( + IrOpcode::kFrameState, Operator::kPure, 3, 1, "FrameState", ast_id); + } + Operator* Call(CallDescriptor* descriptor) { + return new (zone_) CallOperator(descriptor, "Call"); + } + Operator* Projection(int index) { + return new (zone_) Operator1<int>(IrOpcode::kProjection, Operator::kPure, 1, + 1, "Projection", index); + } + + private: + Zone* zone_; +}; + + +template <typename T> +struct CommonOperatorTraits { + static inline bool Equals(T a, T b); + static inline bool HasValue(Operator* op); + static inline T ValueOf(Operator* op); +}; + +template <> +struct CommonOperatorTraits<int32_t> { + static inline bool Equals(int32_t a, int32_t b) { return a == b; } + static inline bool HasValue(Operator* op) { + return op->opcode() == IrOpcode::kInt32Constant || + op->opcode() == IrOpcode::kNumberConstant; + } + static inline int32_t ValueOf(Operator* op) { + if (op->opcode() == IrOpcode::kNumberConstant) { + // TODO(titzer): cache the converted int32 value in NumberConstant. + return FastD2I(reinterpret_cast<Operator1<double>*>(op)->parameter()); + } + CHECK_EQ(IrOpcode::kInt32Constant, op->opcode()); + return static_cast<Operator1<int32_t>*>(op)->parameter(); + } +}; + +template <> +struct CommonOperatorTraits<uint32_t> { + static inline bool Equals(uint32_t a, uint32_t b) { return a == b; } + static inline bool HasValue(Operator* op) { + return CommonOperatorTraits<int32_t>::HasValue(op); + } + static inline uint32_t ValueOf(Operator* op) { + if (op->opcode() == IrOpcode::kNumberConstant) { + // TODO(titzer): cache the converted uint32 value in NumberConstant. + return FastD2UI(reinterpret_cast<Operator1<double>*>(op)->parameter()); + } + return static_cast<uint32_t>(CommonOperatorTraits<int32_t>::ValueOf(op)); + } +}; + +template <> +struct CommonOperatorTraits<int64_t> { + static inline bool Equals(int64_t a, int64_t b) { return a == b; } + static inline bool HasValue(Operator* op) { + return op->opcode() == IrOpcode::kInt32Constant || + op->opcode() == IrOpcode::kInt64Constant || + op->opcode() == IrOpcode::kNumberConstant; + } + static inline int64_t ValueOf(Operator* op) { + if (op->opcode() == IrOpcode::kInt32Constant) { + return static_cast<int64_t>(CommonOperatorTraits<int32_t>::ValueOf(op)); + } + CHECK_EQ(IrOpcode::kInt64Constant, op->opcode()); + return static_cast<Operator1<int64_t>*>(op)->parameter(); + } +}; + +template <> +struct CommonOperatorTraits<uint64_t> { + static inline bool Equals(uint64_t a, uint64_t b) { return a == b; } + static inline bool HasValue(Operator* op) { + return CommonOperatorTraits<int64_t>::HasValue(op); + } + static inline uint64_t ValueOf(Operator* op) { + return static_cast<uint64_t>(CommonOperatorTraits<int64_t>::ValueOf(op)); + } +}; + +template <> +struct CommonOperatorTraits<double> { + static inline bool Equals(double a, double b) { + return DoubleRepresentation(a).bits == DoubleRepresentation(b).bits; + } + static inline bool HasValue(Operator* op) { + return op->opcode() == IrOpcode::kFloat64Constant || + op->opcode() == IrOpcode::kInt32Constant || + op->opcode() == IrOpcode::kNumberConstant; + } + static inline double ValueOf(Operator* op) { + if (op->opcode() == IrOpcode::kFloat64Constant || + op->opcode() == IrOpcode::kNumberConstant) { + return reinterpret_cast<Operator1<double>*>(op)->parameter(); + } + return static_cast<double>(CommonOperatorTraits<int32_t>::ValueOf(op)); + } +}; + +template <> +struct CommonOperatorTraits<ExternalReference> { + static inline bool Equals(ExternalReference a, ExternalReference b) { + return a == b; + } + static inline bool HasValue(Operator* op) { + return op->opcode() == IrOpcode::kExternalConstant; + } + static inline ExternalReference ValueOf(Operator* op) { + CHECK_EQ(IrOpcode::kExternalConstant, op->opcode()); + return static_cast<Operator1<ExternalReference>*>(op)->parameter(); + } +}; + +template <typename T> +struct CommonOperatorTraits<PrintableUnique<T> > { + static inline bool HasValue(Operator* op) { + return op->opcode() == IrOpcode::kHeapConstant; + } + static inline PrintableUnique<T> ValueOf(Operator* op) { + CHECK_EQ(IrOpcode::kHeapConstant, op->opcode()); + return static_cast<Operator1<PrintableUnique<T> >*>(op)->parameter(); + } +}; + +template <typename T> +struct CommonOperatorTraits<Handle<T> > { + static inline bool HasValue(Operator* op) { + return CommonOperatorTraits<PrintableUnique<T> >::HasValue(op); + } + static inline Handle<T> ValueOf(Operator* op) { + return CommonOperatorTraits<PrintableUnique<T> >::ValueOf(op).handle(); + } +}; + + +template <typename T> +inline T ValueOf(Operator* op) { + return CommonOperatorTraits<T>::ValueOf(op); +} +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_COMMON_OPERATOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/control-builders.cc nodejs-0.11.15/deps/v8/src/compiler/control-builders.cc --- nodejs-0.11.13/deps/v8/src/compiler/control-builders.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/control-builders.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,144 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "control-builders.h" + +namespace v8 { +namespace internal { +namespace compiler { + + +void IfBuilder::If(Node* condition) { + builder_->NewBranch(condition); + else_environment_ = environment()->CopyForConditional(); +} + + +void IfBuilder::Then() { builder_->NewIfTrue(); } + + +void IfBuilder::Else() { + builder_->NewMerge(); + then_environment_ = environment(); + set_environment(else_environment_); + builder_->NewIfFalse(); +} + + +void IfBuilder::End() { + then_environment_->Merge(environment()); + set_environment(then_environment_); +} + + +void LoopBuilder::BeginLoop() { + builder_->NewLoop(); + loop_environment_ = environment()->CopyForLoop(); + continue_environment_ = environment()->CopyAsUnreachable(); + break_environment_ = environment()->CopyAsUnreachable(); +} + + +void LoopBuilder::Continue() { + continue_environment_->Merge(environment()); + environment()->MarkAsUnreachable(); +} + + +void LoopBuilder::Break() { + break_environment_->Merge(environment()); + environment()->MarkAsUnreachable(); +} + + +void LoopBuilder::EndBody() { + continue_environment_->Merge(environment()); + set_environment(continue_environment_); +} + + +void LoopBuilder::EndLoop() { + loop_environment_->Merge(environment()); + set_environment(break_environment_); +} + + +void LoopBuilder::BreakUnless(Node* condition) { + IfBuilder control_if(builder_); + control_if.If(condition); + control_if.Then(); + control_if.Else(); + Break(); + control_if.End(); +} + + +void SwitchBuilder::BeginSwitch() { + body_environment_ = environment()->CopyAsUnreachable(); + label_environment_ = environment()->CopyAsUnreachable(); + break_environment_ = environment()->CopyAsUnreachable(); + body_environments_.AddBlock(NULL, case_count(), zone()); +} + + +void SwitchBuilder::BeginLabel(int index, Node* condition) { + builder_->NewBranch(condition); + label_environment_ = environment()->CopyForConditional(); + builder_->NewIfTrue(); + body_environments_[index] = environment(); +} + + +void SwitchBuilder::EndLabel() { + set_environment(label_environment_); + builder_->NewIfFalse(); +} + + +void SwitchBuilder::DefaultAt(int index) { + label_environment_ = environment()->CopyAsUnreachable(); + body_environments_[index] = environment(); +} + + +void SwitchBuilder::BeginCase(int index) { + set_environment(body_environments_[index]); + environment()->Merge(body_environment_); +} + + +void SwitchBuilder::Break() { + break_environment_->Merge(environment()); + environment()->MarkAsUnreachable(); +} + + +void SwitchBuilder::EndCase() { body_environment_ = environment(); } + + +void SwitchBuilder::EndSwitch() { + break_environment_->Merge(label_environment_); + break_environment_->Merge(environment()); + set_environment(break_environment_); +} + + +void BlockBuilder::BeginBlock() { + break_environment_ = environment()->CopyAsUnreachable(); +} + + +void BlockBuilder::Break() { + break_environment_->Merge(environment()); + environment()->MarkAsUnreachable(); +} + + +void BlockBuilder::EndBlock() { + break_environment_->Merge(environment()); + set_environment(break_environment_); +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/control-builders.h nodejs-0.11.15/deps/v8/src/compiler/control-builders.h --- nodejs-0.11.13/deps/v8/src/compiler/control-builders.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/control-builders.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,144 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_CONTROL_BUILDERS_H_ +#define V8_COMPILER_CONTROL_BUILDERS_H_ + +#include "src/v8.h" + +#include "src/compiler/graph-builder.h" +#include "src/compiler/node.h" + +namespace v8 { +namespace internal { +namespace compiler { + + +// Base class for all control builders. Also provides a common interface for +// control builders to handle 'break' and 'continue' statements when they are +// used to model breakable statements. +class ControlBuilder { + public: + explicit ControlBuilder(StructuredGraphBuilder* builder) + : builder_(builder) {} + virtual ~ControlBuilder() {} + + // Interface for break and continue. + virtual void Break() { UNREACHABLE(); } + virtual void Continue() { UNREACHABLE(); } + + protected: + typedef StructuredGraphBuilder Builder; + typedef StructuredGraphBuilder::Environment Environment; + + Zone* zone() const { return builder_->zone(); } + Environment* environment() { return builder_->environment(); } + void set_environment(Environment* env) { builder_->set_environment(env); } + + Builder* builder_; +}; + + +// Tracks control flow for a conditional statement. +class IfBuilder : public ControlBuilder { + public: + explicit IfBuilder(StructuredGraphBuilder* builder) + : ControlBuilder(builder), + then_environment_(NULL), + else_environment_(NULL) {} + + // Primitive control commands. + void If(Node* condition); + void Then(); + void Else(); + void End(); + + private: + Environment* then_environment_; // Environment after the 'then' body. + Environment* else_environment_; // Environment for the 'else' body. +}; + + +// Tracks control flow for an iteration statement. +class LoopBuilder : public ControlBuilder { + public: + explicit LoopBuilder(StructuredGraphBuilder* builder) + : ControlBuilder(builder), + loop_environment_(NULL), + continue_environment_(NULL), + break_environment_(NULL) {} + + // Primitive control commands. + void BeginLoop(); + void EndBody(); + void EndLoop(); + + // Primitive support for break and continue. + virtual void Continue(); + virtual void Break(); + + // Compound control command for conditional break. + void BreakUnless(Node* condition); + + private: + Environment* loop_environment_; // Environment of the loop header. + Environment* continue_environment_; // Environment after the loop body. + Environment* break_environment_; // Environment after the loop exits. +}; + + +// Tracks control flow for a switch statement. +class SwitchBuilder : public ControlBuilder { + public: + explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count) + : ControlBuilder(builder), + body_environment_(NULL), + label_environment_(NULL), + break_environment_(NULL), + body_environments_(case_count, zone()) {} + + // Primitive control commands. + void BeginSwitch(); + void BeginLabel(int index, Node* condition); + void EndLabel(); + void DefaultAt(int index); + void BeginCase(int index); + void EndCase(); + void EndSwitch(); + + // Primitive support for break. + virtual void Break(); + + // The number of cases within a switch is statically known. + int case_count() const { return body_environments_.capacity(); } + + private: + Environment* body_environment_; // Environment after last case body. + Environment* label_environment_; // Environment for next label condition. + Environment* break_environment_; // Environment after the switch exits. + ZoneList<Environment*> body_environments_; +}; + + +// Tracks control flow for a block statement. +class BlockBuilder : public ControlBuilder { + public: + explicit BlockBuilder(StructuredGraphBuilder* builder) + : ControlBuilder(builder), break_environment_(NULL) {} + + // Primitive control commands. + void BeginBlock(); + void EndBlock(); + + // Primitive support for break. + virtual void Break(); + + private: + Environment* break_environment_; // Environment after the block exits. +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_CONTROL_BUILDERS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/frame.h nodejs-0.11.15/deps/v8/src/compiler/frame.h --- nodejs-0.11.13/deps/v8/src/compiler/frame.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/frame.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,104 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_FRAME_H_ +#define V8_COMPILER_FRAME_H_ + +#include "src/v8.h" + +#include "src/data-flow.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Collects the spill slot requirements and the allocated general and double +// registers for a compiled function. Frames are usually populated by the +// register allocator and are used by Linkage to generate code for the prologue +// and epilogue to compiled code. +class Frame { + public: + Frame() + : register_save_area_size_(0), + spill_slot_count_(0), + double_spill_slot_count_(0), + allocated_registers_(NULL), + allocated_double_registers_(NULL) {} + + inline int GetSpillSlotCount() { return spill_slot_count_; } + inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; } + + void SetAllocatedRegisters(BitVector* regs) { + DCHECK(allocated_registers_ == NULL); + allocated_registers_ = regs; + } + + void SetAllocatedDoubleRegisters(BitVector* regs) { + DCHECK(allocated_double_registers_ == NULL); + allocated_double_registers_ = regs; + } + + bool DidAllocateDoubleRegisters() { + return !allocated_double_registers_->IsEmpty(); + } + + void SetRegisterSaveAreaSize(int size) { + DCHECK(IsAligned(size, kPointerSize)); + register_save_area_size_ = size; + } + + int GetRegisterSaveAreaSize() { return register_save_area_size_; } + + int AllocateSpillSlot(bool is_double) { + // If 32-bit, skip one if the new slot is a double. + if (is_double) { + if (kDoubleSize > kPointerSize) { + DCHECK(kDoubleSize == kPointerSize * 2); + spill_slot_count_++; + spill_slot_count_ |= 1; + } + double_spill_slot_count_++; + } + return spill_slot_count_++; + } + + private: + int register_save_area_size_; + int spill_slot_count_; + int double_spill_slot_count_; + BitVector* allocated_registers_; + BitVector* allocated_double_registers_; +}; + + +// Represents an offset from either the stack pointer or frame pointer. +class FrameOffset { + public: + inline bool from_stack_pointer() { return (offset_ & 1) == kFromSp; } + inline bool from_frame_pointer() { return (offset_ & 1) == kFromFp; } + inline int offset() { return offset_ & ~1; } + + inline static FrameOffset FromStackPointer(int offset) { + DCHECK((offset & 1) == 0); + return FrameOffset(offset | kFromSp); + } + + inline static FrameOffset FromFramePointer(int offset) { + DCHECK((offset & 1) == 0); + return FrameOffset(offset | kFromFp); + } + + private: + explicit FrameOffset(int offset) : offset_(offset) {} + + int offset_; // Encodes SP or FP in the low order bit. + + static const int kFromSp = 1; + static const int kFromFp = 0; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_FRAME_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/gap-resolver.cc nodejs-0.11.15/deps/v8/src/compiler/gap-resolver.cc --- nodejs-0.11.13/deps/v8/src/compiler/gap-resolver.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/gap-resolver.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,136 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/gap-resolver.h" + +#include <algorithm> +#include <functional> +#include <set> + +namespace v8 { +namespace internal { +namespace compiler { + +typedef ZoneList<MoveOperands>::iterator op_iterator; + +#ifdef ENABLE_SLOW_DCHECKS +// TODO(svenpanne) Brush up InstructionOperand with comparison? +struct InstructionOperandComparator { + bool operator()(const InstructionOperand* x, + const InstructionOperand* y) const { + return (x->kind() < y->kind()) || + (x->kind() == y->kind() && x->index() < y->index()); + } +}; +#endif + +// No operand should be the destination for more than one move. +static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) { +#ifdef ENABLE_SLOW_DCHECKS + std::set<InstructionOperand*, InstructionOperandComparator> seen; + for (op_iterator i = moves->begin(); i != moves->end(); ++i) { + SLOW_DCHECK(seen.find(i->destination()) == seen.end()); + seen.insert(i->destination()); + } +#endif +} + + +void GapResolver::Resolve(ParallelMove* parallel_move) const { + ZoneList<MoveOperands>* moves = parallel_move->move_operands(); + // TODO(svenpanne) Use the member version of remove_if when we use real lists. + op_iterator end = + std::remove_if(moves->begin(), moves->end(), + std::mem_fun_ref(&MoveOperands::IsRedundant)); + moves->Rewind(static_cast<int>(end - moves->begin())); + + VerifyMovesAreInjective(moves); + + for (op_iterator move = moves->begin(); move != moves->end(); ++move) { + if (!move->IsEliminated()) PerformMove(moves, &*move); + } +} + + +void GapResolver::PerformMove(ZoneList<MoveOperands>* moves, + MoveOperands* move) const { + // Each call to this function performs a move and deletes it from the move + // graph. We first recursively perform any move blocking this one. We mark a + // move as "pending" on entry to PerformMove in order to detect cycles in the + // move graph. We use operand swaps to resolve cycles, which means that a + // call to PerformMove could change any source operand in the move graph. + DCHECK(!move->IsPending()); + DCHECK(!move->IsRedundant()); + + // Clear this move's destination to indicate a pending move. The actual + // destination is saved on the side. + DCHECK_NOT_NULL(move->source()); // Or else it will look eliminated. + InstructionOperand* destination = move->destination(); + move->set_destination(NULL); + + // Perform a depth-first traversal of the move graph to resolve dependencies. + // Any unperformed, unpending move with a source the same as this one's + // destination blocks this one so recursively perform all such moves. + for (op_iterator other = moves->begin(); other != moves->end(); ++other) { + if (other->Blocks(destination) && !other->IsPending()) { + // Though PerformMove can change any source operand in the move graph, + // this call cannot create a blocking move via a swap (this loop does not + // miss any). Assume there is a non-blocking move with source A and this + // move is blocked on source B and there is a swap of A and B. Then A and + // B must be involved in the same cycle (or they would not be swapped). + // Since this move's destination is B and there is only a single incoming + // edge to an operand, this move must also be involved in the same cycle. + // In that case, the blocking move will be created but will be "pending" + // when we return from PerformMove. + PerformMove(moves, other); + } + } + + // We are about to resolve this move and don't need it marked as pending, so + // restore its destination. + move->set_destination(destination); + + // This move's source may have changed due to swaps to resolve cycles and so + // it may now be the last move in the cycle. If so remove it. + InstructionOperand* source = move->source(); + if (source->Equals(destination)) { + move->Eliminate(); + return; + } + + // The move may be blocked on a (at most one) pending move, in which case we + // have a cycle. Search for such a blocking move and perform a swap to + // resolve it. + op_iterator blocker = std::find_if( + moves->begin(), moves->end(), + std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination)); + if (blocker == moves->end()) { + // The easy case: This move is not blocked. + assembler_->AssembleMove(source, destination); + move->Eliminate(); + return; + } + + DCHECK(blocker->IsPending()); + // Ensure source is a register or both are stack slots, to limit swap cases. + if (source->IsStackSlot() || source->IsDoubleStackSlot()) { + std::swap(source, destination); + } + assembler_->AssembleSwap(source, destination); + move->Eliminate(); + + // Any unperformed (including pending) move with a source of either this + // move's source or destination needs to have their source changed to + // reflect the state of affairs after the swap. + for (op_iterator other = moves->begin(); other != moves->end(); ++other) { + if (other->Blocks(source)) { + other->set_source(destination); + } else if (other->Blocks(destination)) { + other->set_source(source); + } + } +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/gap-resolver.h nodejs-0.11.15/deps/v8/src/compiler/gap-resolver.h --- nodejs-0.11.13/deps/v8/src/compiler/gap-resolver.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/gap-resolver.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,46 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GAP_RESOLVER_H_ +#define V8_COMPILER_GAP_RESOLVER_H_ + +#include "src/compiler/instruction.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class GapResolver V8_FINAL { + public: + // Interface used by the gap resolver to emit moves and swaps. + class Assembler { + public: + virtual ~Assembler() {} + + // Assemble move. + virtual void AssembleMove(InstructionOperand* source, + InstructionOperand* destination) = 0; + // Assemble swap. + virtual void AssembleSwap(InstructionOperand* source, + InstructionOperand* destination) = 0; + }; + + explicit GapResolver(Assembler* assembler) : assembler_(assembler) {} + + // Resolve a set of parallel moves, emitting assembler instructions. + void Resolve(ParallelMove* parallel_move) const; + + private: + // Perform the given move, possibly requiring other moves to satisfy + // dependencies. + void PerformMove(ZoneList<MoveOperands>* moves, MoveOperands* move) const; + + // Assembler used to emit moves and save registers. + Assembler* const assembler_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GAP_RESOLVER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/generic-algorithm.h nodejs-0.11.15/deps/v8/src/compiler/generic-algorithm.h --- nodejs-0.11.13/deps/v8/src/compiler/generic-algorithm.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/generic-algorithm.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,136 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GENERIC_ALGORITHM_H_ +#define V8_COMPILER_GENERIC_ALGORITHM_H_ + +#include <deque> +#include <stack> + +#include "src/compiler/generic-graph.h" +#include "src/compiler/generic-node.h" +#include "src/zone-containers.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// GenericGraphVisit allows visitation of graphs of nodes and edges in pre- and +// post-order. Visitation uses an explicitly allocated stack rather than the +// execution stack to avoid stack overflow. Although GenericGraphVisit is +// primarily intended to traverse networks of nodes through their +// dependencies and uses, it also can be used to visit any graph-like network +// by specifying custom traits. +class GenericGraphVisit { + public: + enum Control { + CONTINUE = 0x0, // Continue depth-first normally + SKIP = 0x1, // Skip this node and its successors + REENTER = 0x2, // Allow reentering this node + DEFER = SKIP | REENTER + }; + + // struct Visitor { + // Control Pre(Traits::Node* current); + // Control Post(Traits::Node* current); + // void PreEdge(Traits::Node* from, int index, Traits::Node* to); + // void PostEdge(Traits::Node* from, int index, Traits::Node* to); + // } + template <class Visitor, class Traits, class RootIterator> + static void Visit(GenericGraphBase* graph, RootIterator root_begin, + RootIterator root_end, Visitor* visitor) { + // TODO(bmeurer): Pass "local" zone as parameter. + Zone* zone = graph->zone(); + typedef typename Traits::Node Node; + typedef typename Traits::Iterator Iterator; + typedef std::pair<Iterator, Iterator> NodeState; + typedef zone_allocator<NodeState> ZoneNodeStateAllocator; + typedef std::deque<NodeState, ZoneNodeStateAllocator> NodeStateDeque; + typedef std::stack<NodeState, NodeStateDeque> NodeStateStack; + NodeStateStack stack((NodeStateDeque(ZoneNodeStateAllocator(zone)))); + BoolVector visited(Traits::max_id(graph), false, ZoneBoolAllocator(zone)); + Node* current = *root_begin; + while (true) { + DCHECK(current != NULL); + const int id = current->id(); + DCHECK(id >= 0); + DCHECK(id < Traits::max_id(graph)); // Must be a valid id. + bool visit = !GetVisited(&visited, id); + if (visit) { + Control control = visitor->Pre(current); + visit = !IsSkip(control); + if (!IsReenter(control)) SetVisited(&visited, id, true); + } + Iterator begin(visit ? Traits::begin(current) : Traits::end(current)); + Iterator end(Traits::end(current)); + stack.push(NodeState(begin, end)); + Node* post_order_node = current; + while (true) { + NodeState top = stack.top(); + if (top.first == top.second) { + if (visit) { + Control control = visitor->Post(post_order_node); + DCHECK(!IsSkip(control)); + SetVisited(&visited, post_order_node->id(), !IsReenter(control)); + } + stack.pop(); + if (stack.empty()) { + if (++root_begin == root_end) return; + current = *root_begin; + break; + } + post_order_node = Traits::from(stack.top().first); + visit = true; + } else { + visitor->PreEdge(Traits::from(top.first), top.first.edge().index(), + Traits::to(top.first)); + current = Traits::to(top.first); + if (!GetVisited(&visited, current->id())) break; + } + top = stack.top(); + visitor->PostEdge(Traits::from(top.first), top.first.edge().index(), + Traits::to(top.first)); + ++stack.top().first; + } + } + } + + template <class Visitor, class Traits> + static void Visit(GenericGraphBase* graph, typename Traits::Node* current, + Visitor* visitor) { + typename Traits::Node* array[] = {current}; + Visit<Visitor, Traits>(graph, &array[0], &array[1], visitor); + } + + template <class B, class S> + struct NullNodeVisitor { + Control Pre(GenericNode<B, S>* node) { return CONTINUE; } + Control Post(GenericNode<B, S>* node) { return CONTINUE; } + void PreEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {} + void PostEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {} + }; + + private: + static bool IsSkip(Control c) { return c & SKIP; } + static bool IsReenter(Control c) { return c & REENTER; } + + // TODO(turbofan): resizing could be optionally templatized away. + static void SetVisited(BoolVector* visited, int id, bool value) { + if (id >= static_cast<int>(visited->size())) { + // Resize and set all values to unvisited. + visited->resize((3 * id) / 2, false); + } + visited->at(id) = value; + } + + static bool GetVisited(BoolVector* visited, int id) { + if (id >= static_cast<int>(visited->size())) return false; + return visited->at(id); + } +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GENERIC_ALGORITHM_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/generic-algorithm-inl.h nodejs-0.11.15/deps/v8/src/compiler/generic-algorithm-inl.h --- nodejs-0.11.13/deps/v8/src/compiler/generic-algorithm-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/generic-algorithm-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,48 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GENERIC_ALGORITHM_INL_H_ +#define V8_COMPILER_GENERIC_ALGORITHM_INL_H_ + +#include <vector> + +#include "src/compiler/generic-algorithm.h" +#include "src/compiler/generic-graph.h" +#include "src/compiler/generic-node.h" +#include "src/compiler/generic-node-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + +template <class N> +class NodeInputIterationTraits { + public: + typedef N Node; + typedef typename N::Inputs::iterator Iterator; + + static Iterator begin(Node* node) { return node->inputs().begin(); } + static Iterator end(Node* node) { return node->inputs().end(); } + static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); } + static Node* to(Iterator iterator) { return *iterator; } + static Node* from(Iterator iterator) { return iterator.edge().from(); } +}; + +template <class N> +class NodeUseIterationTraits { + public: + typedef N Node; + typedef typename N::Uses::iterator Iterator; + + static Iterator begin(Node* node) { return node->uses().begin(); } + static Iterator end(Node* node) { return node->uses().end(); } + static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); } + static Node* to(Iterator iterator) { return *iterator; } + static Node* from(Iterator iterator) { return iterator.edge().to(); } +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GENERIC_ALGORITHM_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/generic-graph.h nodejs-0.11.15/deps/v8/src/compiler/generic-graph.h --- nodejs-0.11.13/deps/v8/src/compiler/generic-graph.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/generic-graph.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,53 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GENERIC_GRAPH_H_ +#define V8_COMPILER_GENERIC_GRAPH_H_ + +#include "src/compiler/generic-node.h" + +namespace v8 { +namespace internal { + +class Zone; + +namespace compiler { + +class GenericGraphBase : public ZoneObject { + public: + explicit GenericGraphBase(Zone* zone) : zone_(zone), next_node_id_(0) {} + + Zone* zone() const { return zone_; } + + NodeId NextNodeID() { return next_node_id_++; } + NodeId NodeCount() const { return next_node_id_; } + + private: + Zone* zone_; + NodeId next_node_id_; +}; + +template <class V> +class GenericGraph : public GenericGraphBase { + public: + explicit GenericGraph(Zone* zone) + : GenericGraphBase(zone), start_(NULL), end_(NULL) {} + + V* start() { return start_; } + V* end() { return end_; } + + void SetStart(V* start) { start_ = start; } + void SetEnd(V* end) { end_ = end; } + + private: + V* start_; + V* end_; + + DISALLOW_COPY_AND_ASSIGN(GenericGraph); +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GENERIC_GRAPH_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/generic-node.h nodejs-0.11.15/deps/v8/src/compiler/generic-node.h --- nodejs-0.11.13/deps/v8/src/compiler/generic-node.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/generic-node.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,271 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GENERIC_NODE_H_ +#define V8_COMPILER_GENERIC_NODE_H_ + +#include <deque> + +#include "src/v8.h" + +#include "src/compiler/operator.h" +#include "src/zone.h" +#include "src/zone-allocator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class Operator; +class GenericGraphBase; + +typedef int NodeId; + +// A GenericNode<> is the basic primitive of graphs. GenericNode's are +// chained together by input/use chains but by default otherwise contain only an +// identifying number which specific applications of graphs and nodes can use +// to index auxiliary out-of-line data, especially transient data. +// Specializations of the templatized GenericNode<> class must provide a base +// class B that contains all of the members to be made available in each +// specialized Node instance. GenericNode uses a mixin template pattern to +// ensure that common accessors and methods expect the derived class S type +// rather than the GenericNode<B, S> type. +template <class B, class S> +class GenericNode : public B { + public: + typedef B BaseClass; + typedef S DerivedClass; + + inline NodeId id() const { return id_; } + + int InputCount() const { return input_count_; } + S* InputAt(int index) const { + return static_cast<S*>(GetInputRecordPtr(index)->to); + } + void ReplaceInput(int index, GenericNode* new_input); + void AppendInput(Zone* zone, GenericNode* new_input); + void InsertInput(Zone* zone, int index, GenericNode* new_input); + + int UseCount() { return use_count_; } + S* UseAt(int index) { + DCHECK(index < use_count_); + Use* current = first_use_; + while (index-- != 0) { + current = current->next; + } + return static_cast<S*>(current->from); + } + inline void ReplaceUses(GenericNode* replace_to); + template <class UnaryPredicate> + inline void ReplaceUsesIf(UnaryPredicate pred, GenericNode* replace_to); + void RemoveAllInputs(); + + void TrimInputCount(int input_count); + + class Inputs { + public: + class iterator; + iterator begin(); + iterator end(); + + explicit Inputs(GenericNode* node) : node_(node) {} + + private: + GenericNode* node_; + }; + + Inputs inputs() { return Inputs(this); } + + class Uses { + public: + class iterator; + iterator begin(); + iterator end(); + bool empty() { return begin() == end(); } + + explicit Uses(GenericNode* node) : node_(node) {} + + private: + GenericNode* node_; + }; + + Uses uses() { return Uses(this); } + + class Edge; + + bool OwnedBy(GenericNode* owner) const; + + static S* New(GenericGraphBase* graph, int input_count, S** inputs); + + protected: + friend class GenericGraphBase; + + class Use : public ZoneObject { + public: + GenericNode* from; + Use* next; + Use* prev; + int input_index; + }; + + class Input { + public: + GenericNode* to; + Use* use; + + void Update(GenericNode* new_to); + }; + + void EnsureAppendableInputs(Zone* zone); + + Input* GetInputRecordPtr(int index) const { + if (has_appendable_inputs_) { + return &((*inputs_.appendable_)[index]); + } else { + return inputs_.static_ + index; + } + } + + void AppendUse(Use* use); + void RemoveUse(Use* use); + + void* operator new(size_t, void* location) { return location; } + + GenericNode(GenericGraphBase* graph, int input_count); + + private: + void AssignUniqueID(GenericGraphBase* graph); + + typedef zone_allocator<Input> ZoneInputAllocator; + typedef std::deque<Input, ZoneInputAllocator> InputDeque; + + NodeId id_; + int input_count_ : 31; + bool has_appendable_inputs_ : 1; + union { + // When a node is initially allocated, it uses a static buffer to hold its + // inputs under the assumption that the number of outputs will not increase. + // When the first input is appended, the static buffer is converted into a + // deque to allow for space-efficient growing. + Input* static_; + InputDeque* appendable_; + } inputs_; + int use_count_; + Use* first_use_; + Use* last_use_; + + DISALLOW_COPY_AND_ASSIGN(GenericNode); +}; + +// An encapsulation for information associated with a single use of node as a +// input from another node, allowing access to both the defining node and +// the ndoe having the input. +template <class B, class S> +class GenericNode<B, S>::Edge { + public: + S* from() const { return static_cast<S*>(input_->use->from); } + S* to() const { return static_cast<S*>(input_->to); } + int index() const { + int index = input_->use->input_index; + DCHECK(index < input_->use->from->input_count_); + return index; + } + + private: + friend class GenericNode<B, S>::Uses::iterator; + friend class GenericNode<B, S>::Inputs::iterator; + + explicit Edge(typename GenericNode<B, S>::Input* input) : input_(input) {} + + typename GenericNode<B, S>::Input* input_; +}; + +// A forward iterator to visit the nodes which are depended upon by a node +// in the order of input. +template <class B, class S> +class GenericNode<B, S>::Inputs::iterator { + public: + iterator(const typename GenericNode<B, S>::Inputs::iterator& other) // NOLINT + : node_(other.node_), + index_(other.index_) {} + + S* operator*() { return static_cast<S*>(GetInput()->to); } + typename GenericNode<B, S>::Edge edge() { + return typename GenericNode::Edge(GetInput()); + } + bool operator==(const iterator& other) const { + return other.index_ == index_ && other.node_ == node_; + } + bool operator!=(const iterator& other) const { return !(other == *this); } + iterator& operator++() { + DCHECK(node_ != NULL); + DCHECK(index_ < node_->input_count_); + ++index_; + return *this; + } + int index() { return index_; } + + private: + friend class GenericNode; + + explicit iterator(GenericNode* node, int index) + : node_(node), index_(index) {} + + Input* GetInput() const { return node_->GetInputRecordPtr(index_); } + + GenericNode* node_; + int index_; +}; + +// A forward iterator to visit the uses of a node. The uses are returned in +// the order in which they were added as inputs. +template <class B, class S> +class GenericNode<B, S>::Uses::iterator { + public: + iterator(const typename GenericNode<B, S>::Uses::iterator& other) // NOLINT + : current_(other.current_), + index_(other.index_) {} + + S* operator*() { return static_cast<S*>(current_->from); } + typename GenericNode<B, S>::Edge edge() { + return typename GenericNode::Edge(CurrentInput()); + } + + bool operator==(const iterator& other) { return other.current_ == current_; } + bool operator!=(const iterator& other) { return other.current_ != current_; } + iterator& operator++() { + DCHECK(current_ != NULL); + index_++; + current_ = current_->next; + return *this; + } + iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) { + DCHECK(current_ != NULL); + index_++; + typename GenericNode<B, S>::Input* input = CurrentInput(); + current_ = current_->next; + input->Update(new_to); + return *this; + } + int index() const { return index_; } + + private: + friend class GenericNode<B, S>::Uses; + + iterator() : current_(NULL), index_(0) {} + explicit iterator(GenericNode<B, S>* node) + : current_(node->first_use_), index_(0) {} + + Input* CurrentInput() const { + return current_->from->GetInputRecordPtr(current_->input_index); + } + + typename GenericNode<B, S>::Use* current_; + int index_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GENERIC_NODE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/generic-node-inl.h nodejs-0.11.15/deps/v8/src/compiler/generic-node-inl.h --- nodejs-0.11.13/deps/v8/src/compiler/generic-node-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/generic-node-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,245 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GENERIC_NODE_INL_H_ +#define V8_COMPILER_GENERIC_NODE_INL_H_ + +#include "src/v8.h" + +#include "src/compiler/generic-graph.h" +#include "src/compiler/generic-node.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { +namespace compiler { + +template <class B, class S> +GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count) + : BaseClass(graph->zone()), + input_count_(input_count), + has_appendable_inputs_(false), + use_count_(0), + first_use_(NULL), + last_use_(NULL) { + inputs_.static_ = reinterpret_cast<Input*>(this + 1), AssignUniqueID(graph); +} + +template <class B, class S> +inline void GenericNode<B, S>::AssignUniqueID(GenericGraphBase* graph) { + id_ = graph->NextNodeID(); +} + +template <class B, class S> +inline typename GenericNode<B, S>::Inputs::iterator +GenericNode<B, S>::Inputs::begin() { + return typename GenericNode<B, S>::Inputs::iterator(this->node_, 0); +} + +template <class B, class S> +inline typename GenericNode<B, S>::Inputs::iterator +GenericNode<B, S>::Inputs::end() { + return typename GenericNode<B, S>::Inputs::iterator( + this->node_, this->node_->InputCount()); +} + +template <class B, class S> +inline typename GenericNode<B, S>::Uses::iterator +GenericNode<B, S>::Uses::begin() { + return typename GenericNode<B, S>::Uses::iterator(this->node_); +} + +template <class B, class S> +inline typename GenericNode<B, S>::Uses::iterator +GenericNode<B, S>::Uses::end() { + return typename GenericNode<B, S>::Uses::iterator(); +} + +template <class B, class S> +void GenericNode<B, S>::ReplaceUses(GenericNode* replace_to) { + for (Use* use = first_use_; use != NULL; use = use->next) { + use->from->GetInputRecordPtr(use->input_index)->to = replace_to; + } + if (replace_to->last_use_ == NULL) { + DCHECK_EQ(NULL, replace_to->first_use_); + replace_to->first_use_ = first_use_; + } else { + DCHECK_NE(NULL, replace_to->first_use_); + replace_to->last_use_->next = first_use_; + first_use_->prev = replace_to->last_use_; + } + replace_to->last_use_ = last_use_; + replace_to->use_count_ += use_count_; + use_count_ = 0; + first_use_ = NULL; + last_use_ = NULL; +} + +template <class B, class S> +template <class UnaryPredicate> +void GenericNode<B, S>::ReplaceUsesIf(UnaryPredicate pred, + GenericNode* replace_to) { + for (Use* use = first_use_; use != NULL;) { + Use* next = use->next; + if (pred(static_cast<S*>(use->from))) { + RemoveUse(use); + replace_to->AppendUse(use); + use->from->GetInputRecordPtr(use->input_index)->to = replace_to; + } + use = next; + } +} + +template <class B, class S> +void GenericNode<B, S>::RemoveAllInputs() { + for (typename Inputs::iterator iter(inputs().begin()); iter != inputs().end(); + ++iter) { + iter.GetInput()->Update(NULL); + } +} + +template <class B, class S> +void GenericNode<B, S>::TrimInputCount(int new_input_count) { + if (new_input_count == input_count_) return; // Nothing to do. + + DCHECK(new_input_count < input_count_); + + // Update inline inputs. + for (int i = new_input_count; i < input_count_; i++) { + typename GenericNode<B, S>::Input* input = GetInputRecordPtr(i); + input->Update(NULL); + } + input_count_ = new_input_count; +} + +template <class B, class S> +void GenericNode<B, S>::ReplaceInput(int index, GenericNode<B, S>* new_to) { + Input* input = GetInputRecordPtr(index); + input->Update(new_to); +} + +template <class B, class S> +void GenericNode<B, S>::Input::Update(GenericNode<B, S>* new_to) { + GenericNode* old_to = this->to; + if (new_to == old_to) return; // Nothing to do. + // Snip out the use from where it used to be + if (old_to != NULL) { + old_to->RemoveUse(use); + } + to = new_to; + // And put it into the new node's use list. + if (new_to != NULL) { + new_to->AppendUse(use); + } else { + use->next = NULL; + use->prev = NULL; + } +} + +template <class B, class S> +void GenericNode<B, S>::EnsureAppendableInputs(Zone* zone) { + if (!has_appendable_inputs_) { + void* deque_buffer = zone->New(sizeof(InputDeque)); + InputDeque* deque = new (deque_buffer) InputDeque(ZoneInputAllocator(zone)); + for (int i = 0; i < input_count_; ++i) { + deque->push_back(inputs_.static_[i]); + } + inputs_.appendable_ = deque; + has_appendable_inputs_ = true; + } +} + +template <class B, class S> +void GenericNode<B, S>::AppendInput(Zone* zone, GenericNode<B, S>* to_append) { + EnsureAppendableInputs(zone); + Use* new_use = new (zone) Use; + Input new_input; + new_input.to = to_append; + new_input.use = new_use; + inputs_.appendable_->push_back(new_input); + new_use->input_index = input_count_; + new_use->from = this; + to_append->AppendUse(new_use); + input_count_++; +} + +template <class B, class S> +void GenericNode<B, S>::InsertInput(Zone* zone, int index, + GenericNode<B, S>* to_insert) { + DCHECK(index >= 0 && index < InputCount()); + // TODO(turbofan): Optimize this implementation! + AppendInput(zone, InputAt(InputCount() - 1)); + for (int i = InputCount() - 1; i > index; --i) { + ReplaceInput(i, InputAt(i - 1)); + } + ReplaceInput(index, to_insert); +} + +template <class B, class S> +void GenericNode<B, S>::AppendUse(Use* use) { + use->next = NULL; + use->prev = last_use_; + if (last_use_ == NULL) { + first_use_ = use; + } else { + last_use_->next = use; + } + last_use_ = use; + ++use_count_; +} + +template <class B, class S> +void GenericNode<B, S>::RemoveUse(Use* use) { + if (last_use_ == use) { + last_use_ = use->prev; + } + if (use->prev != NULL) { + use->prev->next = use->next; + } else { + first_use_ = use->next; + } + if (use->next != NULL) { + use->next->prev = use->prev; + } + --use_count_; +} + +template <class B, class S> +inline bool GenericNode<B, S>::OwnedBy(GenericNode* owner) const { + return first_use_ != NULL && first_use_->from == owner && + first_use_->next == NULL; +} + +template <class B, class S> +S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count, + S** inputs) { + size_t node_size = sizeof(GenericNode); + size_t inputs_size = input_count * sizeof(Input); + size_t uses_size = input_count * sizeof(Use); + int size = static_cast<int>(node_size + inputs_size + uses_size); + Zone* zone = graph->zone(); + void* buffer = zone->New(size); + S* result = new (buffer) S(graph, input_count); + Input* input = + reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size); + Use* use = + reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size); + + for (int current = 0; current < input_count; ++current) { + GenericNode* to = *inputs++; + input->to = to; + input->use = use; + use->input_index = current; + use->from = result; + to->AppendUse(use); + ++use; + ++input; + } + return result; +} +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GENERIC_NODE_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph-builder.cc nodejs-0.11.15/deps/v8/src/compiler/graph-builder.cc --- nodejs-0.11.13/deps/v8/src/compiler/graph-builder.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph-builder.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,241 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/graph-builder.h" + +#include "src/compiler.h" +#include "src/compiler/generic-graph.h" +#include "src/compiler/generic-node.h" +#include "src/compiler/generic-node-inl.h" +#include "src/compiler/graph-visualizer.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/operator-properties.h" +#include "src/compiler/operator-properties-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + + +StructuredGraphBuilder::StructuredGraphBuilder(Graph* graph, + CommonOperatorBuilder* common) + : GraphBuilder(graph), + common_(common), + environment_(NULL), + current_context_(NULL), + exit_control_(NULL) {} + + +Node* StructuredGraphBuilder::MakeNode(Operator* op, int value_input_count, + Node** value_inputs) { + bool has_context = OperatorProperties::HasContextInput(op); + bool has_control = OperatorProperties::GetControlInputCount(op) == 1; + bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1; + + DCHECK(OperatorProperties::GetControlInputCount(op) < 2); + DCHECK(OperatorProperties::GetEffectInputCount(op) < 2); + + Node* result = NULL; + if (!has_context && !has_control && !has_effect) { + result = graph()->NewNode(op, value_input_count, value_inputs); + } else { + int input_count_with_deps = value_input_count; + if (has_context) ++input_count_with_deps; + if (has_control) ++input_count_with_deps; + if (has_effect) ++input_count_with_deps; + void* raw_buffer = alloca(kPointerSize * input_count_with_deps); + Node** buffer = reinterpret_cast<Node**>(raw_buffer); + memcpy(buffer, value_inputs, kPointerSize * value_input_count); + Node** current_input = buffer + value_input_count; + if (has_context) { + *current_input++ = current_context(); + } + if (has_effect) { + *current_input++ = environment_->GetEffectDependency(); + } + if (has_control) { + *current_input++ = environment_->GetControlDependency(); + } + result = graph()->NewNode(op, input_count_with_deps, buffer); + if (has_effect) { + environment_->UpdateEffectDependency(result); + } + if (OperatorProperties::HasControlOutput(result->op()) && + !environment()->IsMarkedAsUnreachable()) { + environment_->UpdateControlDependency(result); + } + } + + return result; +} + + +void StructuredGraphBuilder::UpdateControlDependencyToLeaveFunction( + Node* exit) { + if (environment()->IsMarkedAsUnreachable()) return; + if (exit_control() != NULL) { + exit = MergeControl(exit_control(), exit); + } + environment()->MarkAsUnreachable(); + set_exit_control(exit); +} + + +StructuredGraphBuilder::Environment* StructuredGraphBuilder::CopyEnvironment( + Environment* env) { + return new (zone()) Environment(*env); +} + + +StructuredGraphBuilder::Environment::Environment( + StructuredGraphBuilder* builder, Node* control_dependency) + : builder_(builder), + control_dependency_(control_dependency), + effect_dependency_(control_dependency), + values_(NodeVector::allocator_type(zone())) {} + + +StructuredGraphBuilder::Environment::Environment(const Environment& copy) + : builder_(copy.builder()), + control_dependency_(copy.control_dependency_), + effect_dependency_(copy.effect_dependency_), + values_(copy.values_) {} + + +void StructuredGraphBuilder::Environment::Merge(Environment* other) { + DCHECK(values_.size() == other->values_.size()); + + // Nothing to do if the other environment is dead. + if (other->IsMarkedAsUnreachable()) return; + + // Resurrect a dead environment by copying the contents of the other one and + // placing a singleton merge as the new control dependency. + if (this->IsMarkedAsUnreachable()) { + Node* other_control = other->control_dependency_; + control_dependency_ = graph()->NewNode(common()->Merge(1), other_control); + effect_dependency_ = other->effect_dependency_; + values_ = other->values_; + return; + } + + // Create a merge of the control dependencies of both environments and update + // the current environment's control dependency accordingly. + Node* control = builder_->MergeControl(this->GetControlDependency(), + other->GetControlDependency()); + UpdateControlDependency(control); + + // Create a merge of the effect dependencies of both environments and update + // the current environment's effect dependency accordingly. + Node* effect = builder_->MergeEffect(this->GetEffectDependency(), + other->GetEffectDependency(), control); + UpdateEffectDependency(effect); + + // Introduce Phi nodes for values that have differing input at merge points, + // potentially extending an existing Phi node if possible. + for (int i = 0; i < static_cast<int>(values_.size()); ++i) { + values_[i] = builder_->MergeValue(values_[i], other->values_[i], control); + } +} + + +void StructuredGraphBuilder::Environment::PrepareForLoop() { + Node* control = GetControlDependency(); + for (int i = 0; i < static_cast<int>(values()->size()); ++i) { + Node* phi = builder_->NewPhi(1, values()->at(i), control); + values()->at(i) = phi; + } + Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control); + UpdateEffectDependency(effect); +} + + +Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) { + Operator* phi_op = common()->Phi(count); + void* raw_buffer = alloca(kPointerSize * (count + 1)); + Node** buffer = reinterpret_cast<Node**>(raw_buffer); + MemsetPointer(buffer, input, count); + buffer[count] = control; + return graph()->NewNode(phi_op, count + 1, buffer); +} + + +// TODO(mstarzinger): Revisit this once we have proper effect states. +Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input, + Node* control) { + Operator* phi_op = common()->EffectPhi(count); + void* raw_buffer = alloca(kPointerSize * (count + 1)); + Node** buffer = reinterpret_cast<Node**>(raw_buffer); + MemsetPointer(buffer, input, count); + buffer[count] = control; + return graph()->NewNode(phi_op, count + 1, buffer); +} + + +Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) { + int inputs = OperatorProperties::GetControlInputCount(control->op()) + 1; + if (control->opcode() == IrOpcode::kLoop) { + // Control node for loop exists, add input. + Operator* op = common()->Loop(inputs); + control->AppendInput(zone(), other); + control->set_op(op); + } else if (control->opcode() == IrOpcode::kMerge) { + // Control node for merge exists, add input. + Operator* op = common()->Merge(inputs); + control->AppendInput(zone(), other); + control->set_op(op); + } else { + // Control node is a singleton, introduce a merge. + Operator* op = common()->Merge(inputs); + control = graph()->NewNode(op, control, other); + } + return control; +} + + +Node* StructuredGraphBuilder::MergeEffect(Node* value, Node* other, + Node* control) { + int inputs = OperatorProperties::GetControlInputCount(control->op()); + if (value->opcode() == IrOpcode::kEffectPhi && + NodeProperties::GetControlInput(value) == control) { + // Phi already exists, add input. + value->set_op(common()->EffectPhi(inputs)); + value->InsertInput(zone(), inputs - 1, other); + } else if (value != other) { + // Phi does not exist yet, introduce one. + value = NewEffectPhi(inputs, value, control); + value->ReplaceInput(inputs - 1, other); + } + return value; +} + + +Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other, + Node* control) { + int inputs = OperatorProperties::GetControlInputCount(control->op()); + if (value->opcode() == IrOpcode::kPhi && + NodeProperties::GetControlInput(value) == control) { + // Phi already exists, add input. + value->set_op(common()->Phi(inputs)); + value->InsertInput(zone(), inputs - 1, other); + } else if (value != other) { + // Phi does not exist yet, introduce one. + value = NewPhi(inputs, value, control); + value->ReplaceInput(inputs - 1, other); + } + return value; +} + + +Node* StructuredGraphBuilder::dead_control() { + if (!dead_control_.is_set()) { + Node* dead_node = graph()->NewNode(common_->Dead()); + dead_control_.set(dead_node); + return dead_node; + } + return dead_control_.get(); +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph-builder.h nodejs-0.11.15/deps/v8/src/compiler/graph-builder.h --- nodejs-0.11.13/deps/v8/src/compiler/graph-builder.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph-builder.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,226 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GRAPH_BUILDER_H_ +#define V8_COMPILER_GRAPH_BUILDER_H_ + +#include "src/v8.h" + +#include "src/allocation.h" +#include "src/compiler/common-operator.h" +#include "src/compiler/graph.h" +#include "src/unique.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class Node; + +// A common base class for anything that creates nodes in a graph. +class GraphBuilder { + public: + explicit GraphBuilder(Graph* graph) : graph_(graph) {} + virtual ~GraphBuilder() {} + + Node* NewNode(Operator* op) { + return MakeNode(op, 0, static_cast<Node**>(NULL)); + } + + Node* NewNode(Operator* op, Node* n1) { return MakeNode(op, 1, &n1); } + + Node* NewNode(Operator* op, Node* n1, Node* n2) { + Node* buffer[] = {n1, n2}; + return MakeNode(op, ARRAY_SIZE(buffer), buffer); + } + + Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) { + Node* buffer[] = {n1, n2, n3}; + return MakeNode(op, ARRAY_SIZE(buffer), buffer); + } + + Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) { + Node* buffer[] = {n1, n2, n3, n4}; + return MakeNode(op, ARRAY_SIZE(buffer), buffer); + } + + Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, + Node* n5) { + Node* buffer[] = {n1, n2, n3, n4, n5}; + return MakeNode(op, ARRAY_SIZE(buffer), buffer); + } + + Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, Node* n5, + Node* n6) { + Node* nodes[] = {n1, n2, n3, n4, n5, n6}; + return MakeNode(op, ARRAY_SIZE(nodes), nodes); + } + + Node* NewNode(Operator* op, int value_input_count, Node** value_inputs) { + return MakeNode(op, value_input_count, value_inputs); + } + + Graph* graph() const { return graph_; } + + protected: + // Base implementation used by all factory methods. + virtual Node* MakeNode(Operator* op, int value_input_count, + Node** value_inputs) = 0; + + private: + Graph* graph_; +}; + + +// The StructuredGraphBuilder produces a high-level IR graph. It is used as the +// base class for concrete implementations (e.g the AstGraphBuilder or the +// StubGraphBuilder). +class StructuredGraphBuilder : public GraphBuilder { + public: + StructuredGraphBuilder(Graph* graph, CommonOperatorBuilder* common); + virtual ~StructuredGraphBuilder() {} + + // Creates a new Phi node having {count} input values. + Node* NewPhi(int count, Node* input, Node* control); + Node* NewEffectPhi(int count, Node* input, Node* control); + + // Helpers for merging control, effect or value dependencies. + Node* MergeControl(Node* control, Node* other); + Node* MergeEffect(Node* value, Node* other, Node* control); + Node* MergeValue(Node* value, Node* other, Node* control); + + // Helpers to create new control nodes. + Node* NewIfTrue() { return NewNode(common()->IfTrue()); } + Node* NewIfFalse() { return NewNode(common()->IfFalse()); } + Node* NewMerge() { return NewNode(common()->Merge(1)); } + Node* NewLoop() { return NewNode(common()->Loop(1)); } + Node* NewBranch(Node* condition) { + return NewNode(common()->Branch(), condition); + } + + protected: + class Environment; + friend class ControlBuilder; + + // The following method creates a new node having the specified operator and + // ensures effect and control dependencies are wired up. The dependencies + // tracked by the environment might be mutated. + virtual Node* MakeNode(Operator* op, int value_input_count, + Node** value_inputs); + + Environment* environment() const { return environment_; } + void set_environment(Environment* env) { environment_ = env; } + + Node* current_context() const { return current_context_; } + void set_current_context(Node* context) { current_context_ = context; } + + Node* exit_control() const { return exit_control_; } + void set_exit_control(Node* node) { exit_control_ = node; } + + Node* dead_control(); + + // TODO(mstarzinger): Use phase-local zone instead! + Zone* zone() const { return graph()->zone(); } + Isolate* isolate() const { return zone()->isolate(); } + CommonOperatorBuilder* common() const { return common_; } + + // Helper to wrap a Handle<T> into a Unique<T>. + template <class T> + PrintableUnique<T> MakeUnique(Handle<T> object) { + return PrintableUnique<T>::CreateUninitialized(zone(), object); + } + + // Support for control flow builders. The concrete type of the environment + // depends on the graph builder, but environments themselves are not virtual. + virtual Environment* CopyEnvironment(Environment* env); + + // Helper to indicate a node exits the function body. + void UpdateControlDependencyToLeaveFunction(Node* exit); + + private: + CommonOperatorBuilder* common_; + Environment* environment_; + + // Node representing the control dependency for dead code. + SetOncePointer<Node> dead_control_; + + // Node representing the current context within the function body. + Node* current_context_; + + // Merge of all control nodes that exit the function body. + Node* exit_control_; + + DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder); +}; + + +// The abstract execution environment contains static knowledge about +// execution state at arbitrary control-flow points. It allows for +// simulation of the control-flow at compile time. +class StructuredGraphBuilder::Environment : public ZoneObject { + public: + Environment(StructuredGraphBuilder* builder, Node* control_dependency); + Environment(const Environment& copy); + + // Control dependency tracked by this environment. + Node* GetControlDependency() { return control_dependency_; } + void UpdateControlDependency(Node* dependency) { + control_dependency_ = dependency; + } + + // Effect dependency tracked by this environment. + Node* GetEffectDependency() { return effect_dependency_; } + void UpdateEffectDependency(Node* dependency) { + effect_dependency_ = dependency; + } + + // Mark this environment as being unreachable. + void MarkAsUnreachable() { + UpdateControlDependency(builder()->dead_control()); + } + bool IsMarkedAsUnreachable() { + return GetControlDependency()->opcode() == IrOpcode::kDead; + } + + // Merge another environment into this one. + void Merge(Environment* other); + + // Copies this environment at a control-flow split point. + Environment* CopyForConditional() { return builder()->CopyEnvironment(this); } + + // Copies this environment to a potentially unreachable control-flow point. + Environment* CopyAsUnreachable() { + Environment* env = builder()->CopyEnvironment(this); + env->MarkAsUnreachable(); + return env; + } + + // Copies this environment at a loop header control-flow point. + Environment* CopyForLoop() { + PrepareForLoop(); + return builder()->CopyEnvironment(this); + } + + protected: + // TODO(mstarzinger): Use phase-local zone instead! + Zone* zone() const { return graph()->zone(); } + Graph* graph() const { return builder_->graph(); } + StructuredGraphBuilder* builder() const { return builder_; } + CommonOperatorBuilder* common() { return builder_->common(); } + NodeVector* values() { return &values_; } + + // Prepare environment to be used as loop header. + void PrepareForLoop(); + + private: + StructuredGraphBuilder* builder_; + Node* control_dependency_; + Node* effect_dependency_; + NodeVector values_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GRAPH_BUILDER_H__ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph.cc nodejs-0.11.15/deps/v8/src/compiler/graph.cc --- nodejs-0.11.13/deps/v8/src/compiler/graph.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/graph.h" + +#include "src/compiler/common-operator.h" +#include "src/compiler/generic-node-inl.h" +#include "src/compiler/graph-inl.h" +#include "src/compiler/node.h" +#include "src/compiler/node-aux-data-inl.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/operator-properties.h" +#include "src/compiler/operator-properties-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + +Graph::Graph(Zone* zone) + : GenericGraph<Node>(zone), + decorators_(DecoratorVector::allocator_type(zone)) {} + + +Node* Graph::NewNode(Operator* op, int input_count, Node** inputs) { + DCHECK(op->InputCount() <= input_count); + Node* result = Node::New(this, input_count, inputs); + result->Initialize(op); + for (DecoratorVector::iterator i = decorators_.begin(); + i != decorators_.end(); ++i) { + (*i)->Decorate(result); + } + return result; +} + + +void Graph::ChangeOperator(Node* node, Operator* op) { node->set_op(op); } + + +void Graph::DeleteNode(Node* node) { +#if DEBUG + // Nodes can't be deleted if they have uses. + Node::Uses::iterator use_iterator(node->uses().begin()); + DCHECK(use_iterator == node->uses().end()); +#endif + +#if DEBUG + memset(node, 0xDE, sizeof(Node)); +#endif +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph.h nodejs-0.11.15/deps/v8/src/compiler/graph.h --- nodejs-0.11.13/deps/v8/src/compiler/graph.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,97 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GRAPH_H_ +#define V8_COMPILER_GRAPH_H_ + +#include <map> +#include <set> + +#include "src/compiler/generic-algorithm.h" +#include "src/compiler/node.h" +#include "src/compiler/node-aux-data.h" +#include "src/compiler/source-position.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class GraphDecorator; + + +class Graph : public GenericGraph<Node> { + public: + explicit Graph(Zone* zone); + + // Base implementation used by all factory methods. + Node* NewNode(Operator* op, int input_count, Node** inputs); + + // Factories for nodes with static input counts. + Node* NewNode(Operator* op) { + return NewNode(op, 0, static_cast<Node**>(NULL)); + } + Node* NewNode(Operator* op, Node* n1) { return NewNode(op, 1, &n1); } + Node* NewNode(Operator* op, Node* n1, Node* n2) { + Node* nodes[] = {n1, n2}; + return NewNode(op, ARRAY_SIZE(nodes), nodes); + } + Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) { + Node* nodes[] = {n1, n2, n3}; + return NewNode(op, ARRAY_SIZE(nodes), nodes); + } + Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) { + Node* nodes[] = {n1, n2, n3, n4}; + return NewNode(op, ARRAY_SIZE(nodes), nodes); + } + Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, + Node* n5) { + Node* nodes[] = {n1, n2, n3, n4, n5}; + return NewNode(op, ARRAY_SIZE(nodes), nodes); + } + Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, Node* n5, + Node* n6) { + Node* nodes[] = {n1, n2, n3, n4, n5, n6}; + return NewNode(op, ARRAY_SIZE(nodes), nodes); + } + + void ChangeOperator(Node* node, Operator* op); + void DeleteNode(Node* node); + + template <class Visitor> + void VisitNodeUsesFrom(Node* node, Visitor* visitor); + + template <class Visitor> + void VisitNodeUsesFromStart(Visitor* visitor); + + template <class Visitor> + void VisitNodeInputsFromEnd(Visitor* visitor); + + void AddDecorator(GraphDecorator* decorator) { + decorators_.push_back(decorator); + } + + void RemoveDecorator(GraphDecorator* decorator) { + DecoratorVector::iterator it = + std::find(decorators_.begin(), decorators_.end(), decorator); + DCHECK(it != decorators_.end()); + decorators_.erase(it, it + 1); + } + + private: + typedef std::vector<GraphDecorator*, zone_allocator<GraphDecorator*> > + DecoratorVector; + DecoratorVector decorators_; +}; + + +class GraphDecorator : public ZoneObject { + public: + virtual ~GraphDecorator() {} + virtual void Decorate(Node* node) = 0; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GRAPH_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph-inl.h nodejs-0.11.15/deps/v8/src/compiler/graph-inl.h --- nodejs-0.11.13/deps/v8/src/compiler/graph-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,37 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GRAPH_INL_H_ +#define V8_COMPILER_GRAPH_INL_H_ + +#include "src/compiler/generic-algorithm-inl.h" +#include "src/compiler/graph.h" + +namespace v8 { +namespace internal { +namespace compiler { + +template <class Visitor> +void Graph::VisitNodeUsesFrom(Node* node, Visitor* visitor) { + GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(this, node, + visitor); +} + + +template <class Visitor> +void Graph::VisitNodeUsesFromStart(Visitor* visitor) { + VisitNodeUsesFrom(start(), visitor); +} + + +template <class Visitor> +void Graph::VisitNodeInputsFromEnd(Visitor* visitor) { + GenericGraphVisit::Visit<Visitor, NodeInputIterationTraits<Node> >( + this, end(), visitor); +} +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GRAPH_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph-reducer.cc nodejs-0.11.15/deps/v8/src/compiler/graph-reducer.cc --- nodejs-0.11.13/deps/v8/src/compiler/graph-reducer.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph-reducer.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,94 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/graph-reducer.h" + +#include <functional> + +#include "src/compiler/graph-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + +GraphReducer::GraphReducer(Graph* graph) + : graph_(graph), reducers_(Reducers::allocator_type(graph->zone())) {} + + +static bool NodeIdIsLessThan(const Node* node, NodeId id) { + return node->id() < id; +} + + +void GraphReducer::ReduceNode(Node* node) { + Reducers::iterator skip = reducers_.end(); + static const unsigned kMaxAttempts = 16; + bool reduce = true; + for (unsigned attempts = 0; attempts <= kMaxAttempts; ++attempts) { + if (!reduce) return; + reduce = false; // Assume we don't need to rerun any reducers. + int before = graph_->NodeCount(); + for (Reducers::iterator i = reducers_.begin(); i != reducers_.end(); ++i) { + if (i == skip) continue; // Skip this reducer. + Reduction reduction = (*i)->Reduce(node); + Node* replacement = reduction.replacement(); + if (replacement == NULL) { + // No change from this reducer. + } else if (replacement == node) { + // {replacement == node} represents an in-place reduction. + // Rerun all the reducers except the current one for this node, + // as now there may be more opportunities for reduction. + reduce = true; + skip = i; + break; + } else { + if (node == graph_->start()) graph_->SetStart(replacement); + if (node == graph_->end()) graph_->SetEnd(replacement); + // If {node} was replaced by an old node, unlink {node} and assume that + // {replacement} was already reduced and finish. + if (replacement->id() < before) { + node->RemoveAllInputs(); + node->ReplaceUses(replacement); + return; + } + // Otherwise, {node} was replaced by a new node. Replace all old uses of + // {node} with {replacement}. New nodes created by this reduction can + // use {node}. + node->ReplaceUsesIf( + std::bind2nd(std::ptr_fun(&NodeIdIsLessThan), before), replacement); + // Unlink {node} if it's no longer used. + if (node->uses().empty()) node->RemoveAllInputs(); + // Rerun all the reductions on the {replacement}. + skip = reducers_.end(); + node = replacement; + reduce = true; + break; + } + } + } +} + + +// A helper class to reuse the node traversal algorithm. +struct GraphReducerVisitor V8_FINAL : public NullNodeVisitor { + explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer) {} + GenericGraphVisit::Control Post(Node* node) { + reducer_->ReduceNode(node); + return GenericGraphVisit::CONTINUE; + } + GraphReducer* reducer_; +}; + + +void GraphReducer::ReduceGraph() { + GraphReducerVisitor visitor(this); + // Perform a post-order reduction of all nodes starting from the end. + graph()->VisitNodeInputsFromEnd(&visitor); +} + + +// TODO(titzer): partial graph reductions. +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph-reducer.h nodejs-0.11.15/deps/v8/src/compiler/graph-reducer.h --- nodejs-0.11.13/deps/v8/src/compiler/graph-reducer.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph-reducer.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,77 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GRAPH_REDUCER_H_ +#define V8_COMPILER_GRAPH_REDUCER_H_ + +#include <list> + +#include "src/zone-allocator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Forward declarations. +class Graph; +class Node; + + +// Represents the result of trying to reduce a node in the graph. +class Reduction V8_FINAL { + public: + explicit Reduction(Node* replacement = NULL) : replacement_(replacement) {} + + Node* replacement() const { return replacement_; } + bool Changed() const { return replacement() != NULL; } + + private: + Node* replacement_; +}; + + +// A reducer can reduce or simplify a given node based on its operator and +// inputs. This class functions as an extension point for the graph reducer for +// language-specific reductions (e.g. reduction based on types or constant +// folding of low-level operators) can be integrated into the graph reduction +// phase. +class Reducer { + public: + virtual ~Reducer() {} + + // Try to reduce a node if possible. + virtual Reduction Reduce(Node* node) = 0; + + // Helper functions for subclasses to produce reductions for a node. + static Reduction NoChange() { return Reduction(); } + static Reduction Replace(Node* node) { return Reduction(node); } + static Reduction Changed(Node* node) { return Reduction(node); } +}; + + +// Performs an iterative reduction of a node graph. +class GraphReducer V8_FINAL { + public: + explicit GraphReducer(Graph* graph); + + Graph* graph() const { return graph_; } + + void AddReducer(Reducer* reducer) { reducers_.push_back(reducer); } + + // Reduce a single node. + void ReduceNode(Node* node); + // Reduce the whole graph. + void ReduceGraph(); + + private: + typedef std::list<Reducer*, zone_allocator<Reducer*> > Reducers; + + Graph* graph_; + Reducers reducers_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GRAPH_REDUCER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph-replay.cc nodejs-0.11.15/deps/v8/src/compiler/graph-replay.cc --- nodejs-0.11.13/deps/v8/src/compiler/graph-replay.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph-replay.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,81 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/graph-replay.h" + +#include "src/compiler/common-operator.h" +#include "src/compiler/graph.h" +#include "src/compiler/graph-inl.h" +#include "src/compiler/node.h" +#include "src/compiler/operator.h" +#include "src/compiler/operator-properties-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#ifdef DEBUG + +void GraphReplayPrinter::PrintReplay(Graph* graph) { + GraphReplayPrinter replay; + PrintF(" Node* nil = graph.NewNode(common_builder.Dead());\n"); + graph->VisitNodeInputsFromEnd(&replay); +} + + +GenericGraphVisit::Control GraphReplayPrinter::Pre(Node* node) { + PrintReplayOpCreator(node->op()); + PrintF(" Node* n%d = graph.NewNode(op", node->id()); + for (int i = 0; i < node->InputCount(); ++i) { + PrintF(", nil"); + } + PrintF("); USE(n%d);\n", node->id()); + return GenericGraphVisit::CONTINUE; +} + + +void GraphReplayPrinter::PostEdge(Node* from, int index, Node* to) { + PrintF(" n%d->ReplaceInput(%d, n%d);\n", from->id(), index, to->id()); +} + + +void GraphReplayPrinter::PrintReplayOpCreator(Operator* op) { + IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode()); + const char* builder = + IrOpcode::IsCommonOpcode(opcode) ? "common_builder" : "js_builder"; + const char* mnemonic = IrOpcode::IsCommonOpcode(opcode) + ? IrOpcode::Mnemonic(opcode) + : IrOpcode::Mnemonic(opcode) + 2; + PrintF(" op = %s.%s(", builder, mnemonic); + switch (opcode) { + case IrOpcode::kParameter: + case IrOpcode::kNumberConstant: + PrintF("0"); + break; + case IrOpcode::kLoad: + PrintF("unique_name"); + break; + case IrOpcode::kHeapConstant: + PrintF("unique_constant"); + break; + case IrOpcode::kPhi: + PrintF("%d", op->InputCount()); + break; + case IrOpcode::kEffectPhi: + PrintF("%d", OperatorProperties::GetEffectInputCount(op)); + break; + case IrOpcode::kLoop: + case IrOpcode::kMerge: + PrintF("%d", OperatorProperties::GetControlInputCount(op)); + break; + default: + break; + } + PrintF(");\n"); +} + +#endif // DEBUG +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph-replay.h nodejs-0.11.15/deps/v8/src/compiler/graph-replay.h --- nodejs-0.11.13/deps/v8/src/compiler/graph-replay.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph-replay.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GRAPH_REPLAY_H_ +#define V8_COMPILER_GRAPH_REPLAY_H_ + +#include "src/v8.h" + +#include "src/compiler/node.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class Graph; +class Operator; + +// Helper class to print a full replay of a graph. This replay can be used to +// materialize the same graph within a C++ unit test and hence test subsequent +// optimization passes on a graph without going through the construction steps. +class GraphReplayPrinter : public NullNodeVisitor { + public: +#ifdef DEBUG + static void PrintReplay(Graph* graph); +#else + static void PrintReplay(Graph* graph) {} +#endif + + GenericGraphVisit::Control Pre(Node* node); + void PostEdge(Node* from, int index, Node* to); + + private: + GraphReplayPrinter() {} + + static void PrintReplayOpCreator(Operator* op); + + DISALLOW_COPY_AND_ASSIGN(GraphReplayPrinter); +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GRAPH_REPLAY_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph-visualizer.cc nodejs-0.11.15/deps/v8/src/compiler/graph-visualizer.cc --- nodejs-0.11.13/deps/v8/src/compiler/graph-visualizer.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph-visualizer.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,265 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/graph-visualizer.h" + +#include "src/compiler/generic-algorithm.h" +#include "src/compiler/generic-node.h" +#include "src/compiler/generic-node-inl.h" +#include "src/compiler/graph.h" +#include "src/compiler/graph-inl.h" +#include "src/compiler/node.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/opcodes.h" +#include "src/compiler/operator.h" +#include "src/ostreams.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#define DEAD_COLOR "#999999" + +class GraphVisualizer : public NullNodeVisitor { + public: + GraphVisualizer(OStream& os, const Graph* graph); // NOLINT + + void Print(); + + GenericGraphVisit::Control Pre(Node* node); + GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to); + + private: + void AnnotateNode(Node* node); + void PrintEdge(Node* from, int index, Node* to); + + NodeSet all_nodes_; + NodeSet white_nodes_; + bool use_to_def_; + OStream& os_; + const Graph* const graph_; + + DISALLOW_COPY_AND_ASSIGN(GraphVisualizer); +}; + + +static Node* GetControlCluster(Node* node) { + if (OperatorProperties::IsBasicBlockBegin(node->op())) { + return node; + } else if (OperatorProperties::GetControlInputCount(node->op()) == 1) { + Node* control = NodeProperties::GetControlInput(node, 0); + return OperatorProperties::IsBasicBlockBegin(control->op()) ? control + : NULL; + } else { + return NULL; + } +} + + +GenericGraphVisit::Control GraphVisualizer::Pre(Node* node) { + if (all_nodes_.count(node) == 0) { + Node* control_cluster = GetControlCluster(node); + if (control_cluster != NULL) { + os_ << " subgraph cluster_BasicBlock" << control_cluster->id() << " {\n"; + } + os_ << " ID" << node->id() << " [\n"; + AnnotateNode(node); + os_ << " ]\n"; + if (control_cluster != NULL) os_ << " }\n"; + all_nodes_.insert(node); + if (use_to_def_) white_nodes_.insert(node); + } + return GenericGraphVisit::CONTINUE; +} + + +GenericGraphVisit::Control GraphVisualizer::PreEdge(Node* from, int index, + Node* to) { + if (use_to_def_) return GenericGraphVisit::CONTINUE; + // When going from def to use, only consider white -> other edges, which are + // the dead nodes that use live nodes. We're probably not interested in + // dead nodes that only use other dead nodes. + if (white_nodes_.count(from) > 0) return GenericGraphVisit::CONTINUE; + return GenericGraphVisit::SKIP; +} + + +class Escaped { + public: + explicit Escaped(const OStringStream& os) : str_(os.c_str()) {} + + friend OStream& operator<<(OStream& os, const Escaped& e) { + for (const char* s = e.str_; *s != '\0'; ++s) { + if (needs_escape(*s)) os << "\\"; + os << *s; + } + return os; + } + + private: + static bool needs_escape(char ch) { + switch (ch) { + case '>': + case '<': + case '|': + case '}': + case '{': + return true; + default: + return false; + } + } + + const char* const str_; +}; + + +static bool IsLikelyBackEdge(Node* from, int index, Node* to) { + if (from->opcode() == IrOpcode::kPhi || + from->opcode() == IrOpcode::kEffectPhi) { + Node* control = NodeProperties::GetControlInput(from, 0); + return control->opcode() != IrOpcode::kMerge && control != to && index != 0; + } else if (from->opcode() == IrOpcode::kLoop) { + return index != 0; + } else { + return false; + } +} + + +void GraphVisualizer::AnnotateNode(Node* node) { + if (!use_to_def_) { + os_ << " style=\"filled\"\n" + << " fillcolor=\"" DEAD_COLOR "\"\n"; + } + + os_ << " shape=\"record\"\n"; + switch (node->opcode()) { + case IrOpcode::kEnd: + case IrOpcode::kDead: + case IrOpcode::kStart: + os_ << " style=\"diagonals\"\n"; + break; + case IrOpcode::kMerge: + case IrOpcode::kIfTrue: + case IrOpcode::kIfFalse: + case IrOpcode::kLoop: + os_ << " style=\"rounded\"\n"; + break; + default: + break; + } + + OStringStream label; + label << *node->op(); + os_ << " label=\"{{#" << node->id() << ":" << Escaped(label); + + InputIter i = node->inputs().begin(); + for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0; + ++i, j--) { + os_ << "|<I" << i.index() << ">#" << (*i)->id(); + } + for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0; + ++i, j--) { + os_ << "|<I" << i.index() << ">X #" << (*i)->id(); + } + for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0; + ++i, j--) { + os_ << "|<I" << i.index() << ">E #" << (*i)->id(); + } + + if (!use_to_def_ || OperatorProperties::IsBasicBlockBegin(node->op()) || + GetControlCluster(node) == NULL) { + for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0; + ++i, j--) { + os_ << "|<I" << i.index() << ">C #" << (*i)->id(); + } + } + os_ << "}"; + + if (FLAG_trace_turbo_types && !NodeProperties::IsControl(node)) { + Bounds bounds = NodeProperties::GetBounds(node); + OStringStream upper; + bounds.upper->PrintTo(upper); + OStringStream lower; + bounds.lower->PrintTo(lower); + os_ << "|" << Escaped(upper) << "|" << Escaped(lower); + } + os_ << "}\"\n"; +} + + +void GraphVisualizer::PrintEdge(Node* from, int index, Node* to) { + bool unconstrained = IsLikelyBackEdge(from, index, to); + os_ << " ID" << from->id(); + if (all_nodes_.count(to) == 0) { + os_ << ":I" << index << ":n -> DEAD_INPUT"; + } else if (OperatorProperties::IsBasicBlockBegin(from->op()) || + GetControlCluster(from) == NULL || + (OperatorProperties::GetControlInputCount(from->op()) > 0 && + NodeProperties::GetControlInput(from) != to)) { + os_ << ":I" << index << ":n -> ID" << to->id() << ":s"; + if (unconstrained) os_ << " [constraint=false,style=dotted]"; + } else { + os_ << " -> ID" << to->id() << ":s [color=transparent" + << (unconstrained ? ", constraint=false" : "") << "]"; + } + os_ << "\n"; +} + + +void GraphVisualizer::Print() { + os_ << "digraph D {\n" + << " node [fontsize=8,height=0.25]\n" + << " rankdir=\"BT\"\n" + << " \n"; + + // Make sure all nodes have been output before writing out the edges. + use_to_def_ = true; + // TODO(svenpanne) Remove the need for the const_casts. + const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this); + white_nodes_.insert(const_cast<Graph*>(graph_)->start()); + + // Visit all uses of white nodes. + use_to_def_ = false; + GenericGraphVisit::Visit<GraphVisualizer, NodeUseIterationTraits<Node> >( + const_cast<Graph*>(graph_), white_nodes_.begin(), white_nodes_.end(), + this); + + os_ << " DEAD_INPUT [\n" + << " style=\"filled\" \n" + << " fillcolor=\"" DEAD_COLOR "\"\n" + << " ]\n" + << "\n"; + + // With all the nodes written, add the edges. + for (NodeSetIter i = all_nodes_.begin(); i != all_nodes_.end(); ++i) { + Node::Inputs inputs = (*i)->inputs(); + for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end(); + ++iter) { + PrintEdge(iter.edge().from(), iter.edge().index(), iter.edge().to()); + } + } + os_ << "}\n"; +} + + +GraphVisualizer::GraphVisualizer(OStream& os, const Graph* graph) // NOLINT + : all_nodes_(NodeSet::key_compare(), + NodeSet::allocator_type(graph->zone())), + white_nodes_(NodeSet::key_compare(), + NodeSet::allocator_type(graph->zone())), + use_to_def_(true), + os_(os), + graph_(graph) {} + + +OStream& operator<<(OStream& os, const AsDOT& ad) { + GraphVisualizer(os, &ad.graph).Print(); + return os; +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/graph-visualizer.h nodejs-0.11.15/deps/v8/src/compiler/graph-visualizer.h --- nodejs-0.11.13/deps/v8/src/compiler/graph-visualizer.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/graph-visualizer.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,29 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GRAPH_VISUALIZER_H_ +#define V8_COMPILER_GRAPH_VISUALIZER_H_ + +#include "src/v8.h" + +namespace v8 { +namespace internal { + +class OStream; + +namespace compiler { + +class Graph; + +struct AsDOT { + explicit AsDOT(const Graph& g) : graph(g) {} + const Graph& graph; +}; + +OStream& operator<<(OStream& os, const AsDOT& ad); +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_GRAPH_VISUALIZER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/ia32/code-generator-ia32.cc nodejs-0.11.15/deps/v8/src/compiler/ia32/code-generator-ia32.cc --- nodejs-0.11.13/deps/v8/src/compiler/ia32/code-generator-ia32.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/ia32/code-generator-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,956 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/code-generator.h" + +#include "src/compiler/code-generator-impl.h" +#include "src/compiler/gap-resolver.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties-inl.h" +#include "src/ia32/assembler-ia32.h" +#include "src/ia32/macro-assembler-ia32.h" +#include "src/scopes.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#define __ masm()-> + + +// Adds IA-32 specific methods for decoding operands. +class IA32OperandConverter : public InstructionOperandConverter { + public: + IA32OperandConverter(CodeGenerator* gen, Instruction* instr) + : InstructionOperandConverter(gen, instr) {} + + Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); } + + Immediate InputImmediate(int index) { + return ToImmediate(instr_->InputAt(index)); + } + + Operand OutputOperand() { return ToOperand(instr_->Output()); } + + Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); } + + Operand ToOperand(InstructionOperand* op, int extra = 0) { + if (op->IsRegister()) { + DCHECK(extra == 0); + return Operand(ToRegister(op)); + } else if (op->IsDoubleRegister()) { + DCHECK(extra == 0); + return Operand(ToDoubleRegister(op)); + } + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); + // The linkage computes where all spill slots are located. + FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra); + return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset()); + } + + Operand HighOperand(InstructionOperand* op) { + DCHECK(op->IsDoubleStackSlot()); + return ToOperand(op, kPointerSize); + } + + Immediate ToImmediate(InstructionOperand* operand) { + Constant constant = ToConstant(operand); + switch (constant.type()) { + case Constant::kInt32: + return Immediate(constant.ToInt32()); + case Constant::kFloat64: + return Immediate( + isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED)); + case Constant::kExternalReference: + return Immediate(constant.ToExternalReference()); + case Constant::kHeapObject: + return Immediate(constant.ToHeapObject()); + case Constant::kInt64: + break; + } + UNREACHABLE(); + return Immediate(-1); + } + + Operand MemoryOperand(int* first_input) { + const int offset = *first_input; + switch (AddressingModeField::decode(instr_->opcode())) { + case kMode_MR1I: + *first_input += 2; + return Operand(InputRegister(offset + 0), InputRegister(offset + 1), + times_1, + 0); // TODO(dcarney): K != 0 + case kMode_MRI: + *first_input += 2; + return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0), + InputImmediate(offset + 1)); + case kMode_MI: + *first_input += 1; + return Operand(InputImmediate(offset + 0)); + default: + UNREACHABLE(); + return Operand(no_reg); + } + } + + Operand MemoryOperand() { + int first_input = 0; + return MemoryOperand(&first_input); + } +}; + + +static bool HasImmediateInput(Instruction* instr, int index) { + return instr->InputAt(index)->IsImmediate(); +} + + +// Assembles an instruction after register allocation, producing machine code. +void CodeGenerator::AssembleArchInstruction(Instruction* instr) { + IA32OperandConverter i(this, instr); + + switch (ArchOpcodeField::decode(instr->opcode())) { + case kArchJmp: + __ jmp(code()->GetLabel(i.InputBlock(0))); + break; + case kArchNop: + // don't emit code for nops. + break; + case kArchRet: + AssembleReturn(); + break; + case kArchDeoptimize: { + int deoptimization_id = MiscField::decode(instr->opcode()); + BuildTranslation(instr, deoptimization_id); + + Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( + isolate(), deoptimization_id, Deoptimizer::LAZY); + __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY); + break; + } + case kIA32Add: + if (HasImmediateInput(instr, 1)) { + __ add(i.InputOperand(0), i.InputImmediate(1)); + } else { + __ add(i.InputRegister(0), i.InputOperand(1)); + } + break; + case kIA32And: + if (HasImmediateInput(instr, 1)) { + __ and_(i.InputOperand(0), i.InputImmediate(1)); + } else { + __ and_(i.InputRegister(0), i.InputOperand(1)); + } + break; + case kIA32Cmp: + if (HasImmediateInput(instr, 1)) { + __ cmp(i.InputOperand(0), i.InputImmediate(1)); + } else { + __ cmp(i.InputRegister(0), i.InputOperand(1)); + } + break; + case kIA32Test: + if (HasImmediateInput(instr, 1)) { + __ test(i.InputOperand(0), i.InputImmediate(1)); + } else { + __ test(i.InputRegister(0), i.InputOperand(1)); + } + break; + case kIA32Imul: + if (HasImmediateInput(instr, 1)) { + __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1)); + } else { + __ imul(i.OutputRegister(), i.InputOperand(1)); + } + break; + case kIA32Idiv: + __ cdq(); + __ idiv(i.InputOperand(1)); + break; + case kIA32Udiv: + __ xor_(edx, edx); + __ div(i.InputOperand(1)); + break; + case kIA32Not: + __ not_(i.OutputOperand()); + break; + case kIA32Neg: + __ neg(i.OutputOperand()); + break; + case kIA32Or: + if (HasImmediateInput(instr, 1)) { + __ or_(i.InputOperand(0), i.InputImmediate(1)); + } else { + __ or_(i.InputRegister(0), i.InputOperand(1)); + } + break; + case kIA32Xor: + if (HasImmediateInput(instr, 1)) { + __ xor_(i.InputOperand(0), i.InputImmediate(1)); + } else { + __ xor_(i.InputRegister(0), i.InputOperand(1)); + } + break; + case kIA32Sub: + if (HasImmediateInput(instr, 1)) { + __ sub(i.InputOperand(0), i.InputImmediate(1)); + } else { + __ sub(i.InputRegister(0), i.InputOperand(1)); + } + break; + case kIA32Shl: + if (HasImmediateInput(instr, 1)) { + __ shl(i.OutputRegister(), i.InputInt5(1)); + } else { + __ shl_cl(i.OutputRegister()); + } + break; + case kIA32Shr: + if (HasImmediateInput(instr, 1)) { + __ shr(i.OutputRegister(), i.InputInt5(1)); + } else { + __ shr_cl(i.OutputRegister()); + } + break; + case kIA32Sar: + if (HasImmediateInput(instr, 1)) { + __ sar(i.OutputRegister(), i.InputInt5(1)); + } else { + __ sar_cl(i.OutputRegister()); + } + break; + case kIA32Push: + if (HasImmediateInput(instr, 0)) { + __ push(i.InputImmediate(0)); + } else { + __ push(i.InputOperand(0)); + } + break; + case kIA32CallCodeObject: { + if (HasImmediateInput(instr, 0)) { + Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); + __ call(code, RelocInfo::CODE_TARGET); + } else { + Register reg = i.InputRegister(0); + int entry = Code::kHeaderSize - kHeapObjectTag; + __ call(Operand(reg, entry)); + } + RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + + bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1); + if (lazy_deopt) { + RecordLazyDeoptimizationEntry(instr); + } + AddNopForSmiCodeInlining(); + break; + } + case kIA32CallAddress: + if (HasImmediateInput(instr, 0)) { + // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY. + __ call(reinterpret_cast<byte*>(i.InputInt32(0)), + RelocInfo::RUNTIME_ENTRY); + } else { + __ call(i.InputRegister(0)); + } + break; + case kPopStack: { + int words = MiscField::decode(instr->opcode()); + __ add(esp, Immediate(kPointerSize * words)); + break; + } + case kIA32CallJSFunction: { + Register func = i.InputRegister(0); + + // TODO(jarin) The load of the context should be separated from the call. + __ mov(esi, FieldOperand(func, JSFunction::kContextOffset)); + __ call(FieldOperand(func, JSFunction::kCodeEntryOffset)); + + RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + RecordLazyDeoptimizationEntry(instr); + break; + } + case kSSEFloat64Cmp: + __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1)); + break; + case kSSEFloat64Add: + __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); + break; + case kSSEFloat64Sub: + __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); + break; + case kSSEFloat64Mul: + __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); + break; + case kSSEFloat64Div: + __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); + break; + case kSSEFloat64Mod: { + // TODO(dcarney): alignment is wrong. + __ sub(esp, Immediate(kDoubleSize)); + // Move values to st(0) and st(1). + __ movsd(Operand(esp, 0), i.InputDoubleRegister(1)); + __ fld_d(Operand(esp, 0)); + __ movsd(Operand(esp, 0), i.InputDoubleRegister(0)); + __ fld_d(Operand(esp, 0)); + // Loop while fprem isn't done. + Label mod_loop; + __ bind(&mod_loop); + // This instructions traps on all kinds inputs, but we are assuming the + // floating point control word is set to ignore them all. + __ fprem(); + // The following 2 instruction implicitly use eax. + __ fnstsw_ax(); + __ sahf(); + __ j(parity_even, &mod_loop); + // Move output to stack and clean up. + __ fstp(1); + __ fstp_d(Operand(esp, 0)); + __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); + __ add(esp, Immediate(kDoubleSize)); + break; + } + case kSSEFloat64ToInt32: + __ cvttsd2si(i.OutputRegister(), i.InputOperand(0)); + break; + case kSSEFloat64ToUint32: { + XMMRegister scratch = xmm0; + __ Move(scratch, -2147483648.0); + // TODO(turbofan): IA32 SSE subsd() should take an operand. + __ addsd(scratch, i.InputDoubleRegister(0)); + __ cvttsd2si(i.OutputRegister(), scratch); + __ add(i.OutputRegister(), Immediate(0x80000000)); + break; + } + case kSSEInt32ToFloat64: + __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0)); + break; + case kSSEUint32ToFloat64: + // TODO(turbofan): IA32 SSE LoadUint32() should take an operand. + __ LoadUint32(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + case kSSELoad: + __ movsd(i.OutputDoubleRegister(), i.MemoryOperand()); + break; + case kSSEStore: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ movsd(operand, i.InputDoubleRegister(index)); + break; + } + case kIA32LoadWord8: + __ movzx_b(i.OutputRegister(), i.MemoryOperand()); + break; + case kIA32StoreWord8: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ mov_b(operand, i.InputRegister(index)); + break; + } + case kIA32StoreWord8I: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ mov_b(operand, i.InputInt8(index)); + break; + } + case kIA32LoadWord16: + __ movzx_w(i.OutputRegister(), i.MemoryOperand()); + break; + case kIA32StoreWord16: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ mov_w(operand, i.InputRegister(index)); + break; + } + case kIA32StoreWord16I: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ mov_w(operand, i.InputInt16(index)); + break; + } + case kIA32LoadWord32: + __ mov(i.OutputRegister(), i.MemoryOperand()); + break; + case kIA32StoreWord32: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ mov(operand, i.InputRegister(index)); + break; + } + case kIA32StoreWord32I: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ mov(operand, i.InputImmediate(index)); + break; + } + case kIA32StoreWriteBarrier: { + Register object = i.InputRegister(0); + Register index = i.InputRegister(1); + Register value = i.InputRegister(2); + __ mov(Operand(object, index, times_1, 0), value); + __ lea(index, Operand(object, index, times_1, 0)); + SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters() + ? kSaveFPRegs + : kDontSaveFPRegs; + __ RecordWrite(object, index, value, mode); + break; + } + } +} + + +// Assembles branches after an instruction. +void CodeGenerator::AssembleArchBranch(Instruction* instr, + FlagsCondition condition) { + IA32OperandConverter i(this, instr); + Label done; + + // Emit a branch. The true and false targets are always the last two inputs + // to the instruction. + BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2); + BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1); + bool fallthru = IsNextInAssemblyOrder(fblock); + Label* tlabel = code()->GetLabel(tblock); + Label* flabel = fallthru ? &done : code()->GetLabel(fblock); + Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar; + switch (condition) { + case kUnorderedEqual: + __ j(parity_even, flabel, flabel_distance); + // Fall through. + case kEqual: + __ j(equal, tlabel); + break; + case kUnorderedNotEqual: + __ j(parity_even, tlabel); + // Fall through. + case kNotEqual: + __ j(not_equal, tlabel); + break; + case kSignedLessThan: + __ j(less, tlabel); + break; + case kSignedGreaterThanOrEqual: + __ j(greater_equal, tlabel); + break; + case kSignedLessThanOrEqual: + __ j(less_equal, tlabel); + break; + case kSignedGreaterThan: + __ j(greater, tlabel); + break; + case kUnorderedLessThan: + __ j(parity_even, flabel, flabel_distance); + // Fall through. + case kUnsignedLessThan: + __ j(below, tlabel); + break; + case kUnorderedGreaterThanOrEqual: + __ j(parity_even, tlabel); + // Fall through. + case kUnsignedGreaterThanOrEqual: + __ j(above_equal, tlabel); + break; + case kUnorderedLessThanOrEqual: + __ j(parity_even, flabel, flabel_distance); + // Fall through. + case kUnsignedLessThanOrEqual: + __ j(below_equal, tlabel); + break; + case kUnorderedGreaterThan: + __ j(parity_even, tlabel); + // Fall through. + case kUnsignedGreaterThan: + __ j(above, tlabel); + break; + case kOverflow: + __ j(overflow, tlabel); + break; + case kNotOverflow: + __ j(no_overflow, tlabel); + break; + } + if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel. + __ bind(&done); +} + + +// Assembles boolean materializations after an instruction. +void CodeGenerator::AssembleArchBoolean(Instruction* instr, + FlagsCondition condition) { + IA32OperandConverter i(this, instr); + Label done; + + // Materialize a full 32-bit 1 or 0 value. The result register is always the + // last output of the instruction. + Label check; + DCHECK_NE(0, instr->OutputCount()); + Register reg = i.OutputRegister(instr->OutputCount() - 1); + Condition cc = no_condition; + switch (condition) { + case kUnorderedEqual: + __ j(parity_odd, &check, Label::kNear); + __ mov(reg, Immediate(0)); + __ jmp(&done, Label::kNear); + // Fall through. + case kEqual: + cc = equal; + break; + case kUnorderedNotEqual: + __ j(parity_odd, &check, Label::kNear); + __ mov(reg, Immediate(1)); + __ jmp(&done, Label::kNear); + // Fall through. + case kNotEqual: + cc = not_equal; + break; + case kSignedLessThan: + cc = less; + break; + case kSignedGreaterThanOrEqual: + cc = greater_equal; + break; + case kSignedLessThanOrEqual: + cc = less_equal; + break; + case kSignedGreaterThan: + cc = greater; + break; + case kUnorderedLessThan: + __ j(parity_odd, &check, Label::kNear); + __ mov(reg, Immediate(0)); + __ jmp(&done, Label::kNear); + // Fall through. + case kUnsignedLessThan: + cc = below; + break; + case kUnorderedGreaterThanOrEqual: + __ j(parity_odd, &check, Label::kNear); + __ mov(reg, Immediate(1)); + __ jmp(&done, Label::kNear); + // Fall through. + case kUnsignedGreaterThanOrEqual: + cc = above_equal; + break; + case kUnorderedLessThanOrEqual: + __ j(parity_odd, &check, Label::kNear); + __ mov(reg, Immediate(0)); + __ jmp(&done, Label::kNear); + // Fall through. + case kUnsignedLessThanOrEqual: + cc = below_equal; + break; + case kUnorderedGreaterThan: + __ j(parity_odd, &check, Label::kNear); + __ mov(reg, Immediate(1)); + __ jmp(&done, Label::kNear); + // Fall through. + case kUnsignedGreaterThan: + cc = above; + break; + case kOverflow: + cc = overflow; + break; + case kNotOverflow: + cc = no_overflow; + break; + } + __ bind(&check); + if (reg.is_byte_register()) { + // setcc for byte registers (al, bl, cl, dl). + __ setcc(cc, reg); + __ movzx_b(reg, reg); + } else { + // Emit a branch to set a register to either 1 or 0. + Label set; + __ j(cc, &set, Label::kNear); + __ mov(reg, Immediate(0)); + __ jmp(&done, Label::kNear); + __ bind(&set); + __ mov(reg, Immediate(1)); + } + __ bind(&done); +} + + +// The calling convention for JSFunctions on IA32 passes arguments on the +// stack and the JSFunction and context in EDI and ESI, respectively, thus +// the steps of the call look as follows: + +// --{ before the call instruction }-------------------------------------------- +// | caller frame | +// ^ esp ^ ebp + +// --{ push arguments and setup ESI, EDI }-------------------------------------- +// | args + receiver | caller frame | +// ^ esp ^ ebp +// [edi = JSFunction, esi = context] + +// --{ call [edi + kCodeEntryOffset] }------------------------------------------ +// | RET | args + receiver | caller frame | +// ^ esp ^ ebp + +// =={ prologue of called function }============================================ +// --{ push ebp }--------------------------------------------------------------- +// | FP | RET | args + receiver | caller frame | +// ^ esp ^ ebp + +// --{ mov ebp, esp }----------------------------------------------------------- +// | FP | RET | args + receiver | caller frame | +// ^ ebp,esp + +// --{ push esi }--------------------------------------------------------------- +// | CTX | FP | RET | args + receiver | caller frame | +// ^esp ^ ebp + +// --{ push edi }--------------------------------------------------------------- +// | FNC | CTX | FP | RET | args + receiver | caller frame | +// ^esp ^ ebp + +// --{ subi esp, #N }----------------------------------------------------------- +// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame | +// ^esp ^ ebp + +// =={ body of called function }================================================ + +// =={ epilogue of called function }============================================ +// --{ mov esp, ebp }----------------------------------------------------------- +// | FP | RET | args + receiver | caller frame | +// ^ esp,ebp + +// --{ pop ebp }----------------------------------------------------------- +// | | RET | args + receiver | caller frame | +// ^ esp ^ ebp + +// --{ ret #A+1 }----------------------------------------------------------- +// | | caller frame | +// ^ esp ^ ebp + + +// Runtime function calls are accomplished by doing a stub call to the +// CEntryStub (a real code object). On IA32 passes arguments on the +// stack, the number of arguments in EAX, the address of the runtime function +// in EBX, and the context in ESI. + +// --{ before the call instruction }-------------------------------------------- +// | caller frame | +// ^ esp ^ ebp + +// --{ push arguments and setup EAX, EBX, and ESI }----------------------------- +// | args + receiver | caller frame | +// ^ esp ^ ebp +// [eax = #args, ebx = runtime function, esi = context] + +// --{ call #CEntryStub }------------------------------------------------------- +// | RET | args + receiver | caller frame | +// ^ esp ^ ebp + +// =={ body of runtime function }=============================================== + +// --{ runtime returns }-------------------------------------------------------- +// | caller frame | +// ^ esp ^ ebp + +// Other custom linkages (e.g. for calling directly into and out of C++) may +// need to save callee-saved registers on the stack, which is done in the +// function prologue of generated code. + +// --{ before the call instruction }-------------------------------------------- +// | caller frame | +// ^ esp ^ ebp + +// --{ set up arguments in registers on stack }--------------------------------- +// | args | caller frame | +// ^ esp ^ ebp +// [r0 = arg0, r1 = arg1, ...] + +// --{ call code }-------------------------------------------------------------- +// | RET | args | caller frame | +// ^ esp ^ ebp + +// =={ prologue of called function }============================================ +// --{ push ebp }--------------------------------------------------------------- +// | FP | RET | args | caller frame | +// ^ esp ^ ebp + +// --{ mov ebp, esp }----------------------------------------------------------- +// | FP | RET | args | caller frame | +// ^ ebp,esp + +// --{ save registers }--------------------------------------------------------- +// | regs | FP | RET | args | caller frame | +// ^ esp ^ ebp + +// --{ subi esp, #N }----------------------------------------------------------- +// | callee frame | regs | FP | RET | args | caller frame | +// ^esp ^ ebp + +// =={ body of called function }================================================ + +// =={ epilogue of called function }============================================ +// --{ restore registers }------------------------------------------------------ +// | regs | FP | RET | args | caller frame | +// ^ esp ^ ebp + +// --{ mov esp, ebp }----------------------------------------------------------- +// | FP | RET | args | caller frame | +// ^ esp,ebp + +// --{ pop ebp }---------------------------------------------------------------- +// | RET | args | caller frame | +// ^ esp ^ ebp + + +void CodeGenerator::AssemblePrologue() { + CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); + Frame* frame = code_->frame(); + int stack_slots = frame->GetSpillSlotCount(); + if (descriptor->kind() == CallDescriptor::kCallAddress) { + // Assemble a prologue similar the to cdecl calling convention. + __ push(ebp); + __ mov(ebp, esp); + const RegList saves = descriptor->CalleeSavedRegisters(); + if (saves != 0) { // Save callee-saved registers. + int register_save_area_size = 0; + for (int i = Register::kNumRegisters - 1; i >= 0; i--) { + if (!((1 << i) & saves)) continue; + __ push(Register::from_code(i)); + register_save_area_size += kPointerSize; + } + frame->SetRegisterSaveAreaSize(register_save_area_size); + } + } else if (descriptor->IsJSFunctionCall()) { + CompilationInfo* info = linkage()->info(); + __ Prologue(info->IsCodePreAgingActive()); + frame->SetRegisterSaveAreaSize( + StandardFrameConstants::kFixedFrameSizeFromFp); + + // Sloppy mode functions and builtins need to replace the receiver with the + // global proxy when called as functions (without an explicit receiver + // object). + // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC? + if (info->strict_mode() == SLOPPY && !info->is_native()) { + Label ok; + // +2 for return address and saved frame pointer. + int receiver_slot = info->scope()->num_parameters() + 2; + __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize)); + __ cmp(ecx, isolate()->factory()->undefined_value()); + __ j(not_equal, &ok, Label::kNear); + __ mov(ecx, GlobalObjectOperand()); + __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset)); + __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx); + __ bind(&ok); + } + + } else { + __ StubPrologue(); + frame->SetRegisterSaveAreaSize( + StandardFrameConstants::kFixedFrameSizeFromFp); + } + if (stack_slots > 0) { + __ sub(esp, Immediate(stack_slots * kPointerSize)); + } +} + + +void CodeGenerator::AssembleReturn() { + CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); + if (descriptor->kind() == CallDescriptor::kCallAddress) { + const RegList saves = descriptor->CalleeSavedRegisters(); + if (frame()->GetRegisterSaveAreaSize() > 0) { + // Remove this frame's spill slots first. + int stack_slots = frame()->GetSpillSlotCount(); + if (stack_slots > 0) { + __ add(esp, Immediate(stack_slots * kPointerSize)); + } + // Restore registers. + if (saves != 0) { + for (int i = 0; i < Register::kNumRegisters; i++) { + if (!((1 << i) & saves)) continue; + __ pop(Register::from_code(i)); + } + } + __ pop(ebp); // Pop caller's frame pointer. + __ ret(0); + } else { + // No saved registers. + __ mov(esp, ebp); // Move stack pointer back to frame pointer. + __ pop(ebp); // Pop caller's frame pointer. + __ ret(0); + } + } else { + __ mov(esp, ebp); // Move stack pointer back to frame pointer. + __ pop(ebp); // Pop caller's frame pointer. + int pop_count = + descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0; + __ ret(pop_count * kPointerSize); + } +} + + +void CodeGenerator::AssembleMove(InstructionOperand* source, + InstructionOperand* destination) { + IA32OperandConverter g(this, NULL); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + Register src = g.ToRegister(source); + Operand dst = g.ToOperand(destination); + __ mov(dst, src); + } else if (source->IsStackSlot()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + Operand src = g.ToOperand(source); + if (destination->IsRegister()) { + Register dst = g.ToRegister(destination); + __ mov(dst, src); + } else { + Operand dst = g.ToOperand(destination); + __ push(src); + __ pop(dst); + } + } else if (source->IsConstant()) { + Constant src_constant = g.ToConstant(source); + if (src_constant.type() == Constant::kHeapObject) { + Handle<HeapObject> src = src_constant.ToHeapObject(); + if (destination->IsRegister()) { + Register dst = g.ToRegister(destination); + __ LoadHeapObject(dst, src); + } else { + DCHECK(destination->IsStackSlot()); + Operand dst = g.ToOperand(destination); + AllowDeferredHandleDereference embedding_raw_address; + if (isolate()->heap()->InNewSpace(*src)) { + __ PushHeapObject(src); + __ pop(dst); + } else { + __ mov(dst, src); + } + } + } else if (destination->IsRegister()) { + Register dst = g.ToRegister(destination); + __ mov(dst, g.ToImmediate(source)); + } else if (destination->IsStackSlot()) { + Operand dst = g.ToOperand(destination); + __ mov(dst, g.ToImmediate(source)); + } else { + double v = g.ToDouble(source); + uint64_t int_val = BitCast<uint64_t, double>(v); + int32_t lower = static_cast<int32_t>(int_val); + int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt); + if (destination->IsDoubleRegister()) { + XMMRegister dst = g.ToDoubleRegister(destination); + __ Move(dst, v); + } else { + DCHECK(destination->IsDoubleStackSlot()); + Operand dst0 = g.ToOperand(destination); + Operand dst1 = g.HighOperand(destination); + __ mov(dst0, Immediate(lower)); + __ mov(dst1, Immediate(upper)); + } + } + } else if (source->IsDoubleRegister()) { + XMMRegister src = g.ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + XMMRegister dst = g.ToDoubleRegister(destination); + __ movaps(dst, src); + } else { + DCHECK(destination->IsDoubleStackSlot()); + Operand dst = g.ToOperand(destination); + __ movsd(dst, src); + } + } else if (source->IsDoubleStackSlot()) { + DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); + Operand src = g.ToOperand(source); + if (destination->IsDoubleRegister()) { + XMMRegister dst = g.ToDoubleRegister(destination); + __ movsd(dst, src); + } else { + // We rely on having xmm0 available as a fixed scratch register. + Operand dst = g.ToOperand(destination); + __ movsd(xmm0, src); + __ movsd(dst, xmm0); + } + } else { + UNREACHABLE(); + } +} + + +void CodeGenerator::AssembleSwap(InstructionOperand* source, + InstructionOperand* destination) { + IA32OperandConverter g(this, NULL); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister() && destination->IsRegister()) { + // Register-register. + Register src = g.ToRegister(source); + Register dst = g.ToRegister(destination); + __ xchg(dst, src); + } else if (source->IsRegister() && destination->IsStackSlot()) { + // Register-memory. + __ xchg(g.ToRegister(source), g.ToOperand(destination)); + } else if (source->IsStackSlot() && destination->IsStackSlot()) { + // Memory-memory. + Operand src = g.ToOperand(source); + Operand dst = g.ToOperand(destination); + __ push(dst); + __ push(src); + __ pop(dst); + __ pop(src); + } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { + // XMM register-register swap. We rely on having xmm0 + // available as a fixed scratch register. + XMMRegister src = g.ToDoubleRegister(source); + XMMRegister dst = g.ToDoubleRegister(destination); + __ movaps(xmm0, src); + __ movaps(src, dst); + __ movaps(dst, xmm0); + } else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) { + // XMM register-memory swap. We rely on having xmm0 + // available as a fixed scratch register. + XMMRegister reg = g.ToDoubleRegister(source); + Operand other = g.ToOperand(destination); + __ movsd(xmm0, other); + __ movsd(other, reg); + __ movaps(reg, xmm0); + } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { + // Double-width memory-to-memory. + Operand src0 = g.ToOperand(source); + Operand src1 = g.HighOperand(source); + Operand dst0 = g.ToOperand(destination); + Operand dst1 = g.HighOperand(destination); + __ movsd(xmm0, dst0); // Save destination in xmm0. + __ push(src0); // Then use stack to copy source to destination. + __ pop(dst0); + __ push(src1); + __ pop(dst1); + __ movsd(src0, xmm0); + } else { + // No other combinations are possible. + UNREACHABLE(); + } +} + + +void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); } + +#undef __ + +#ifdef DEBUG + +// Checks whether the code between start_pc and end_pc is a no-op. +bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc, + int end_pc) { + if (start_pc + 1 != end_pc) { + return false; + } + return *(code->instruction_start() + start_pc) == + v8::internal::Assembler::kNopByte; +} + +#endif // DEBUG +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/ia32/instruction-codes-ia32.h nodejs-0.11.15/deps/v8/src/compiler/ia32/instruction-codes-ia32.h --- nodejs-0.11.13/deps/v8/src/compiler/ia32/instruction-codes-ia32.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/ia32/instruction-codes-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,88 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_ +#define V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_ + +namespace v8 { +namespace internal { +namespace compiler { + +// IA32-specific opcodes that specify which assembly sequence to emit. +// Most opcodes specify a single instruction. +#define TARGET_ARCH_OPCODE_LIST(V) \ + V(IA32Add) \ + V(IA32And) \ + V(IA32Cmp) \ + V(IA32Test) \ + V(IA32Or) \ + V(IA32Xor) \ + V(IA32Sub) \ + V(IA32Imul) \ + V(IA32Idiv) \ + V(IA32Udiv) \ + V(IA32Not) \ + V(IA32Neg) \ + V(IA32Shl) \ + V(IA32Shr) \ + V(IA32Sar) \ + V(IA32Push) \ + V(IA32CallCodeObject) \ + V(IA32CallAddress) \ + V(PopStack) \ + V(IA32CallJSFunction) \ + V(SSEFloat64Cmp) \ + V(SSEFloat64Add) \ + V(SSEFloat64Sub) \ + V(SSEFloat64Mul) \ + V(SSEFloat64Div) \ + V(SSEFloat64Mod) \ + V(SSEFloat64ToInt32) \ + V(SSEFloat64ToUint32) \ + V(SSEInt32ToFloat64) \ + V(SSEUint32ToFloat64) \ + V(SSELoad) \ + V(SSEStore) \ + V(IA32LoadWord8) \ + V(IA32StoreWord8) \ + V(IA32StoreWord8I) \ + V(IA32LoadWord16) \ + V(IA32StoreWord16) \ + V(IA32StoreWord16I) \ + V(IA32LoadWord32) \ + V(IA32StoreWord32) \ + V(IA32StoreWord32I) \ + V(IA32StoreWriteBarrier) + + +// Addressing modes represent the "shape" of inputs to an instruction. +// Many instructions support multiple addressing modes. Addressing modes +// are encoded into the InstructionCode of the instruction and tell the +// code generator after register allocation which assembler method to call. +// +// We use the following local notation for addressing modes: +// +// R = register +// O = register or stack slot +// D = double register +// I = immediate (handle, external, int32) +// MR = [register] +// MI = [immediate] +// MRN = [register + register * N in {1, 2, 4, 8}] +// MRI = [register + immediate] +// MRNI = [register + register * N in {1, 2, 4, 8} + immediate] +#define TARGET_ADDRESSING_MODE_LIST(V) \ + V(MI) /* [K] */ \ + V(MR) /* [%r0] */ \ + V(MRI) /* [%r0 + K] */ \ + V(MR1I) /* [%r0 + %r1 * 1 + K] */ \ + V(MR2I) /* [%r0 + %r1 * 2 + K] */ \ + V(MR4I) /* [%r0 + %r1 * 4 + K] */ \ + V(MR8I) /* [%r0 + %r1 * 8 + K] */ + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc nodejs-0.11.15/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc --- nodejs-0.11.13/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,560 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/instruction-selector-impl.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Adds IA32-specific methods for generating operands. +class IA32OperandGenerator V8_FINAL : public OperandGenerator { + public: + explicit IA32OperandGenerator(InstructionSelector* selector) + : OperandGenerator(selector) {} + + InstructionOperand* UseByteRegister(Node* node) { + // TODO(dcarney): relax constraint. + return UseFixed(node, edx); + } + + bool CanBeImmediate(Node* node) { + switch (node->opcode()) { + case IrOpcode::kInt32Constant: + case IrOpcode::kNumberConstant: + case IrOpcode::kExternalConstant: + return true; + case IrOpcode::kHeapConstant: { + // Constants in new space cannot be used as immediates in V8 because + // the GC does not scan code objects when collecting the new generation. + Handle<HeapObject> value = ValueOf<Handle<HeapObject> >(node->op()); + return !isolate()->heap()->InNewSpace(*value); + } + default: + return false; + } + } +}; + + +void InstructionSelector::VisitLoad(Node* node) { + MachineType rep = OpParameter<MachineType>(node); + IA32OperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + InstructionOperand* output = rep == kMachineFloat64 + ? g.DefineAsDoubleRegister(node) + : g.DefineAsRegister(node); + ArchOpcode opcode; + switch (rep) { + case kMachineFloat64: + opcode = kSSELoad; + break; + case kMachineWord8: + opcode = kIA32LoadWord8; + break; + case kMachineWord16: + opcode = kIA32LoadWord16; + break; + case kMachineTagged: // Fall through. + case kMachineWord32: + opcode = kIA32LoadWord32; + break; + default: + UNREACHABLE(); + return; + } + if (g.CanBeImmediate(base)) { + if (Int32Matcher(index).Is(0)) { // load [#base + #0] + Emit(opcode | AddressingModeField::encode(kMode_MI), output, + g.UseImmediate(base)); + } else { // load [#base + %index] + Emit(opcode | AddressingModeField::encode(kMode_MRI), output, + g.UseRegister(index), g.UseImmediate(base)); + } + } else if (g.CanBeImmediate(index)) { // load [%base + #index] + Emit(opcode | AddressingModeField::encode(kMode_MRI), output, + g.UseRegister(base), g.UseImmediate(index)); + } else { // load [%base + %index + K] + Emit(opcode | AddressingModeField::encode(kMode_MR1I), output, + g.UseRegister(base), g.UseRegister(index)); + } + // TODO(turbofan): addressing modes [r+r*{2,4,8}+K] +} + + +void InstructionSelector::VisitStore(Node* node) { + IA32OperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); + MachineType rep = store_rep.rep; + if (store_rep.write_barrier_kind == kFullWriteBarrier) { + DCHECK_EQ(kMachineTagged, rep); + // TODO(dcarney): refactor RecordWrite function to take temp registers + // and pass them here instead of using fixed regs + // TODO(dcarney): handle immediate indices. + InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)}; + Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx), + g.UseFixed(index, ecx), g.UseFixed(value, edx), ARRAY_SIZE(temps), + temps); + return; + } + DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind); + bool is_immediate = false; + InstructionOperand* val; + if (rep == kMachineFloat64) { + val = g.UseDoubleRegister(value); + } else { + is_immediate = g.CanBeImmediate(value); + if (is_immediate) { + val = g.UseImmediate(value); + } else if (rep == kMachineWord8) { + val = g.UseByteRegister(value); + } else { + val = g.UseRegister(value); + } + } + ArchOpcode opcode; + switch (rep) { + case kMachineFloat64: + opcode = kSSEStore; + break; + case kMachineWord8: + opcode = is_immediate ? kIA32StoreWord8I : kIA32StoreWord8; + break; + case kMachineWord16: + opcode = is_immediate ? kIA32StoreWord16I : kIA32StoreWord16; + break; + case kMachineTagged: // Fall through. + case kMachineWord32: + opcode = is_immediate ? kIA32StoreWord32I : kIA32StoreWord32; + break; + default: + UNREACHABLE(); + return; + } + if (g.CanBeImmediate(base)) { + if (Int32Matcher(index).Is(0)) { // store [#base], %|#value + Emit(opcode | AddressingModeField::encode(kMode_MI), NULL, + g.UseImmediate(base), val); + } else { // store [#base + %index], %|#value + Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, + g.UseRegister(index), g.UseImmediate(base), val); + } + } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value + Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, + g.UseRegister(base), g.UseImmediate(index), val); + } else { // store [%base + %index], %|#value + Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL, + g.UseRegister(base), g.UseRegister(index), val); + } + // TODO(turbofan): addressing modes [r+r*{2,4,8}+K] +} + + +// Shared routine for multiple binary operations. +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont) { + IA32OperandGenerator g(selector); + Int32BinopMatcher m(node); + InstructionOperand* inputs[4]; + size_t input_count = 0; + InstructionOperand* outputs[2]; + size_t output_count = 0; + + // TODO(turbofan): match complex addressing modes. + // TODO(turbofan): if commutative, pick the non-live-in operand as the left as + // this might be the last use and therefore its register can be reused. + if (g.CanBeImmediate(m.right().node())) { + inputs[input_count++] = g.Use(m.left().node()); + inputs[input_count++] = g.UseImmediate(m.right().node()); + } else { + inputs[input_count++] = g.UseRegister(m.left().node()); + inputs[input_count++] = g.Use(m.right().node()); + } + + if (cont->IsBranch()) { + inputs[input_count++] = g.Label(cont->true_block()); + inputs[input_count++] = g.Label(cont->false_block()); + } + + outputs[output_count++] = g.DefineSameAsFirst(node); + if (cont->IsSet()) { + // TODO(turbofan): Use byte register here. + outputs[output_count++] = g.DefineAsRegister(cont->result()); + } + + DCHECK_NE(0, input_count); + DCHECK_NE(0, output_count); + DCHECK_GE(ARRAY_SIZE(inputs), input_count); + DCHECK_GE(ARRAY_SIZE(outputs), output_count); + + Instruction* instr = selector->Emit(cont->Encode(opcode), output_count, + outputs, input_count, inputs); + if (cont->IsBranch()) instr->MarkAsControl(); +} + + +// Shared routine for multiple binary operations. +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode) { + FlagsContinuation cont; + VisitBinop(selector, node, opcode, &cont); +} + + +void InstructionSelector::VisitWord32And(Node* node) { + VisitBinop(this, node, kIA32And); +} + + +void InstructionSelector::VisitWord32Or(Node* node) { + VisitBinop(this, node, kIA32Or); +} + + +void InstructionSelector::VisitWord32Xor(Node* node) { + IA32OperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.right().Is(-1)) { + Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node())); + } else { + VisitBinop(this, node, kIA32Xor); + } +} + + +// Shared routine for multiple shift operations. +static inline void VisitShift(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + IA32OperandGenerator g(selector); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + + // TODO(turbofan): assembler only supports some addressing modes for shifts. + if (g.CanBeImmediate(right)) { + selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), + g.UseImmediate(right)); + } else { + Int32BinopMatcher m(node); + if (m.right().IsWord32And()) { + Int32BinopMatcher mright(right); + if (mright.right().Is(0x1F)) { + right = mright.left().node(); + } + } + selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), + g.UseFixed(right, ecx)); + } +} + + +void InstructionSelector::VisitWord32Shl(Node* node) { + VisitShift(this, node, kIA32Shl); +} + + +void InstructionSelector::VisitWord32Shr(Node* node) { + VisitShift(this, node, kIA32Shr); +} + + +void InstructionSelector::VisitWord32Sar(Node* node) { + VisitShift(this, node, kIA32Sar); +} + + +void InstructionSelector::VisitInt32Add(Node* node) { + VisitBinop(this, node, kIA32Add); +} + + +void InstructionSelector::VisitInt32Sub(Node* node) { + IA32OperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.left().Is(0)) { + Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node())); + } else { + VisitBinop(this, node, kIA32Sub); + } +} + + +void InstructionSelector::VisitInt32Mul(Node* node) { + IA32OperandGenerator g(this); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + if (g.CanBeImmediate(right)) { + Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left), + g.UseImmediate(right)); + } else if (g.CanBeImmediate(left)) { + Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(right), + g.UseImmediate(left)); + } else { + // TODO(turbofan): select better left operand. + Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left), + g.Use(right)); + } +} + + +static inline void VisitDiv(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + IA32OperandGenerator g(selector); + InstructionOperand* temps[] = {g.TempRegister(edx)}; + size_t temp_count = ARRAY_SIZE(temps); + selector->Emit(opcode, g.DefineAsFixed(node, eax), + g.UseFixed(node->InputAt(0), eax), + g.UseUnique(node->InputAt(1)), temp_count, temps); +} + + +void InstructionSelector::VisitInt32Div(Node* node) { + VisitDiv(this, node, kIA32Idiv); +} + + +void InstructionSelector::VisitInt32UDiv(Node* node) { + VisitDiv(this, node, kIA32Udiv); +} + + +static inline void VisitMod(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + IA32OperandGenerator g(selector); + InstructionOperand* temps[] = {g.TempRegister(eax), g.TempRegister(edx)}; + size_t temp_count = ARRAY_SIZE(temps); + selector->Emit(opcode, g.DefineAsFixed(node, edx), + g.UseFixed(node->InputAt(0), eax), + g.UseUnique(node->InputAt(1)), temp_count, temps); +} + + +void InstructionSelector::VisitInt32Mod(Node* node) { + VisitMod(this, node, kIA32Idiv); +} + + +void InstructionSelector::VisitInt32UMod(Node* node) { + VisitMod(this, node, kIA32Udiv); +} + + +void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { + IA32OperandGenerator g(this); + Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node), + g.Use(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { + IA32OperandGenerator g(this); + // TODO(turbofan): IA32 SSE LoadUint32() should take an operand. + Emit(kSSEUint32ToFloat64, g.DefineAsDoubleRegister(node), + g.UseRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { + IA32OperandGenerator g(this); + Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { + IA32OperandGenerator g(this); + // TODO(turbofan): IA32 SSE subsd() should take an operand. + Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), + g.UseDoubleRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitFloat64Add(Node* node) { + IA32OperandGenerator g(this); + Emit(kSSEFloat64Add, g.DefineSameAsFirst(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1))); +} + + +void InstructionSelector::VisitFloat64Sub(Node* node) { + IA32OperandGenerator g(this); + Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1))); +} + + +void InstructionSelector::VisitFloat64Mul(Node* node) { + IA32OperandGenerator g(this); + Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1))); +} + + +void InstructionSelector::VisitFloat64Div(Node* node) { + IA32OperandGenerator g(this); + Emit(kSSEFloat64Div, g.DefineSameAsFirst(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1))); +} + + +void InstructionSelector::VisitFloat64Mod(Node* node) { + IA32OperandGenerator g(this); + InstructionOperand* temps[] = {g.TempRegister(eax)}; + Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1)), 1, temps); +} + + +void InstructionSelector::VisitInt32AddWithOverflow(Node* node, + FlagsContinuation* cont) { + VisitBinop(this, node, kIA32Add, cont); +} + + +void InstructionSelector::VisitInt32SubWithOverflow(Node* node, + FlagsContinuation* cont) { + VisitBinop(this, node, kIA32Sub, cont); +} + + +// Shared routine for multiple compare operations. +static inline void VisitCompare(InstructionSelector* selector, + InstructionCode opcode, + InstructionOperand* left, + InstructionOperand* right, + FlagsContinuation* cont) { + IA32OperandGenerator g(selector); + if (cont->IsBranch()) { + selector->Emit(cont->Encode(opcode), NULL, left, right, + g.Label(cont->true_block()), + g.Label(cont->false_block()))->MarkAsControl(); + } else { + DCHECK(cont->IsSet()); + // TODO(titzer): Needs byte register. + selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()), + left, right); + } +} + + +// Shared routine for multiple word compare operations. +static inline void VisitWordCompare(InstructionSelector* selector, Node* node, + InstructionCode opcode, + FlagsContinuation* cont, bool commutative) { + IA32OperandGenerator g(selector); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + + // Match immediates on left or right side of comparison. + if (g.CanBeImmediate(right)) { + VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont); + } else if (g.CanBeImmediate(left)) { + if (!commutative) cont->Commute(); + VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont); + } else { + VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont); + } +} + + +void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) { + switch (node->opcode()) { + case IrOpcode::kInt32Sub: + return VisitWordCompare(this, node, kIA32Cmp, cont, false); + case IrOpcode::kWord32And: + return VisitWordCompare(this, node, kIA32Test, cont, true); + default: + break; + } + + IA32OperandGenerator g(this); + VisitCompare(this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont); +} + + +void InstructionSelector::VisitWord32Compare(Node* node, + FlagsContinuation* cont) { + VisitWordCompare(this, node, kIA32Cmp, cont, false); +} + + +void InstructionSelector::VisitFloat64Compare(Node* node, + FlagsContinuation* cont) { + IA32OperandGenerator g(this); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left), g.Use(right), + cont); +} + + +void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, + BasicBlock* deoptimization) { + IA32OperandGenerator g(this); + CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call); + CallBuffer buffer(zone(), descriptor); + + // Compute InstructionOperands for inputs and outputs. + InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization); + + // Push any stack arguments. + for (int i = buffer.pushed_count - 1; i >= 0; --i) { + Node* input = buffer.pushed_nodes[i]; + // TODO(titzer): handle pushing double parameters. + Emit(kIA32Push, NULL, + g.CanBeImmediate(input) ? g.UseImmediate(input) : g.Use(input)); + } + + // Select the appropriate opcode based on the call type. + InstructionCode opcode; + switch (descriptor->kind()) { + case CallDescriptor::kCallCodeObject: { + bool lazy_deopt = descriptor->CanLazilyDeoptimize(); + opcode = kIA32CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0); + break; + } + case CallDescriptor::kCallAddress: + opcode = kIA32CallAddress; + break; + case CallDescriptor::kCallJSFunction: + opcode = kIA32CallJSFunction; + break; + default: + UNREACHABLE(); + return; + } + + // Emit the call instruction. + Instruction* call_instr = + Emit(opcode, buffer.output_count, buffer.outputs, + buffer.fixed_and_control_count(), buffer.fixed_and_control_args); + + call_instr->MarkAsCall(); + if (deoptimization != NULL) { + DCHECK(continuation != NULL); + call_instr->MarkAsControl(); + } + + // Caller clean up of stack for C-style calls. + if (descriptor->kind() == CallDescriptor::kCallAddress && + buffer.pushed_count > 0) { + DCHECK(deoptimization == NULL && continuation == NULL); + Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL); + } +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/ia32/linkage-ia32.cc nodejs-0.11.15/deps/v8/src/compiler/ia32/linkage-ia32.cc --- nodejs-0.11.13/deps/v8/src/compiler/ia32/linkage-ia32.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/ia32/linkage-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,63 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/assembler.h" +#include "src/code-stubs.h" +#include "src/compiler/linkage.h" +#include "src/compiler/linkage-impl.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { +namespace compiler { + +struct LinkageHelperTraits { + static Register ReturnValueReg() { return eax; } + static Register ReturnValue2Reg() { return edx; } + static Register JSCallFunctionReg() { return edi; } + static Register ContextReg() { return esi; } + static Register RuntimeCallFunctionReg() { return ebx; } + static Register RuntimeCallArgCountReg() { return eax; } + static RegList CCalleeSaveRegisters() { + return esi.bit() | edi.bit() | ebx.bit(); + } + static Register CRegisterParameter(int i) { return no_reg; } + static int CRegisterParametersLength() { return 0; } +}; + + +CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) { + return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>( + zone, parameter_count); +} + + +CallDescriptor* Linkage::GetRuntimeCallDescriptor( + Runtime::FunctionId function, int parameter_count, + Operator::Property properties, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) { + return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>( + zone, function, parameter_count, properties, can_deoptimize); +} + + +CallDescriptor* Linkage::GetStubCallDescriptor( + CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) { + return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>( + zone, descriptor, stack_parameter_count, can_deoptimize); +} + + +CallDescriptor* Linkage::GetSimplifiedCDescriptor( + Zone* zone, int num_params, MachineType return_type, + const MachineType* param_types) { + return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>( + zone, num_params, return_type, param_types); +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/instruction.cc nodejs-0.11.15/deps/v8/src/compiler/instruction.cc --- nodejs-0.11.13/deps/v8/src/compiler/instruction.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/instruction.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,483 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/instruction.h" + +#include "src/compiler/common-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +OStream& operator<<(OStream& os, const InstructionOperand& op) { + switch (op.kind()) { + case InstructionOperand::INVALID: + return os << "(0)"; + case InstructionOperand::UNALLOCATED: { + const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op); + os << "v" << unalloc->virtual_register(); + if (unalloc->basic_policy() == UnallocatedOperand::FIXED_SLOT) { + return os << "(=" << unalloc->fixed_slot_index() << "S)"; + } + switch (unalloc->extended_policy()) { + case UnallocatedOperand::NONE: + return os; + case UnallocatedOperand::FIXED_REGISTER: + return os << "(=" << Register::AllocationIndexToString( + unalloc->fixed_register_index()) << ")"; + case UnallocatedOperand::FIXED_DOUBLE_REGISTER: + return os << "(=" << DoubleRegister::AllocationIndexToString( + unalloc->fixed_register_index()) << ")"; + case UnallocatedOperand::MUST_HAVE_REGISTER: + return os << "(R)"; + case UnallocatedOperand::SAME_AS_FIRST_INPUT: + return os << "(1)"; + case UnallocatedOperand::ANY: + return os << "(-)"; + } + } + case InstructionOperand::CONSTANT: + return os << "[constant:" << op.index() << "]"; + case InstructionOperand::IMMEDIATE: + return os << "[immediate:" << op.index() << "]"; + case InstructionOperand::STACK_SLOT: + return os << "[stack:" << op.index() << "]"; + case InstructionOperand::DOUBLE_STACK_SLOT: + return os << "[double_stack:" << op.index() << "]"; + case InstructionOperand::REGISTER: + return os << "[" << Register::AllocationIndexToString(op.index()) + << "|R]"; + case InstructionOperand::DOUBLE_REGISTER: + return os << "[" << DoubleRegister::AllocationIndexToString(op.index()) + << "|R]"; + } + UNREACHABLE(); + return os; +} + + +template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands> +SubKindOperand<kOperandKind, kNumCachedOperands>* + SubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL; + + +template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands> +void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() { + if (cache) return; + cache = new SubKindOperand[kNumCachedOperands]; + for (int i = 0; i < kNumCachedOperands; i++) { + cache[i].ConvertTo(kOperandKind, i); + } +} + + +template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands> +void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() { + delete[] cache; +} + + +void InstructionOperand::SetUpCaches() { +#define INSTRUCTION_OPERAND_SETUP(name, type, number) \ + name##Operand::SetUpCache(); + INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_SETUP) +#undef INSTRUCTION_OPERAND_SETUP +} + + +void InstructionOperand::TearDownCaches() { +#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \ + name##Operand::TearDownCache(); + INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_TEARDOWN) +#undef INSTRUCTION_OPERAND_TEARDOWN +} + + +OStream& operator<<(OStream& os, const MoveOperands& mo) { + os << *mo.destination(); + if (!mo.source()->Equals(mo.destination())) os << " = " << *mo.source(); + return os << ";"; +} + + +bool ParallelMove::IsRedundant() const { + for (int i = 0; i < move_operands_.length(); ++i) { + if (!move_operands_[i].IsRedundant()) return false; + } + return true; +} + + +OStream& operator<<(OStream& os, const ParallelMove& pm) { + bool first = true; + for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin(); + move != pm.move_operands()->end(); ++move) { + if (move->IsEliminated()) continue; + if (!first) os << " "; + first = false; + os << *move; + } + return os; +} + + +void PointerMap::RecordPointer(InstructionOperand* op, Zone* zone) { + // Do not record arguments as pointers. + if (op->IsStackSlot() && op->index() < 0) return; + DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); + pointer_operands_.Add(op, zone); +} + + +void PointerMap::RemovePointer(InstructionOperand* op) { + // Do not record arguments as pointers. + if (op->IsStackSlot() && op->index() < 0) return; + DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); + for (int i = 0; i < pointer_operands_.length(); ++i) { + if (pointer_operands_[i]->Equals(op)) { + pointer_operands_.Remove(i); + --i; + } + } +} + + +void PointerMap::RecordUntagged(InstructionOperand* op, Zone* zone) { + // Do not record arguments as pointers. + if (op->IsStackSlot() && op->index() < 0) return; + DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); + untagged_operands_.Add(op, zone); +} + + +OStream& operator<<(OStream& os, const PointerMap& pm) { + os << "{"; + for (ZoneList<InstructionOperand*>::iterator op = + pm.pointer_operands_.begin(); + op != pm.pointer_operands_.end(); ++op) { + if (op != pm.pointer_operands_.begin()) os << ";"; + os << *op; + } + return os << "}"; +} + + +OStream& operator<<(OStream& os, const ArchOpcode& ao) { + switch (ao) { +#define CASE(Name) \ + case k##Name: \ + return os << #Name; + ARCH_OPCODE_LIST(CASE) +#undef CASE + } + UNREACHABLE(); + return os; +} + + +OStream& operator<<(OStream& os, const AddressingMode& am) { + switch (am) { + case kMode_None: + return os; +#define CASE(Name) \ + case kMode_##Name: \ + return os << #Name; + TARGET_ADDRESSING_MODE_LIST(CASE) +#undef CASE + } + UNREACHABLE(); + return os; +} + + +OStream& operator<<(OStream& os, const FlagsMode& fm) { + switch (fm) { + case kFlags_none: + return os; + case kFlags_branch: + return os << "branch"; + case kFlags_set: + return os << "set"; + } + UNREACHABLE(); + return os; +} + + +OStream& operator<<(OStream& os, const FlagsCondition& fc) { + switch (fc) { + case kEqual: + return os << "equal"; + case kNotEqual: + return os << "not equal"; + case kSignedLessThan: + return os << "signed less than"; + case kSignedGreaterThanOrEqual: + return os << "signed greater than or equal"; + case kSignedLessThanOrEqual: + return os << "signed less than or equal"; + case kSignedGreaterThan: + return os << "signed greater than"; + case kUnsignedLessThan: + return os << "unsigned less than"; + case kUnsignedGreaterThanOrEqual: + return os << "unsigned greater than or equal"; + case kUnsignedLessThanOrEqual: + return os << "unsigned less than or equal"; + case kUnsignedGreaterThan: + return os << "unsigned greater than"; + case kUnorderedEqual: + return os << "unordered equal"; + case kUnorderedNotEqual: + return os << "unordered not equal"; + case kUnorderedLessThan: + return os << "unordered less than"; + case kUnorderedGreaterThanOrEqual: + return os << "unordered greater than or equal"; + case kUnorderedLessThanOrEqual: + return os << "unordered less than or equal"; + case kUnorderedGreaterThan: + return os << "unordered greater than"; + case kOverflow: + return os << "overflow"; + case kNotOverflow: + return os << "not overflow"; + } + UNREACHABLE(); + return os; +} + + +OStream& operator<<(OStream& os, const Instruction& instr) { + if (instr.OutputCount() > 1) os << "("; + for (size_t i = 0; i < instr.OutputCount(); i++) { + if (i > 0) os << ", "; + os << *instr.OutputAt(i); + } + + if (instr.OutputCount() > 1) os << ") = "; + if (instr.OutputCount() == 1) os << " = "; + + if (instr.IsGapMoves()) { + const GapInstruction* gap = GapInstruction::cast(&instr); + os << (instr.IsBlockStart() ? " block-start" : "gap "); + for (int i = GapInstruction::FIRST_INNER_POSITION; + i <= GapInstruction::LAST_INNER_POSITION; i++) { + os << "("; + if (gap->parallel_moves_[i] != NULL) os << *gap->parallel_moves_[i]; + os << ") "; + } + } else if (instr.IsSourcePosition()) { + const SourcePositionInstruction* pos = + SourcePositionInstruction::cast(&instr); + os << "position (" << pos->source_position().raw() << ")"; + } else { + os << ArchOpcodeField::decode(instr.opcode()); + AddressingMode am = AddressingModeField::decode(instr.opcode()); + if (am != kMode_None) { + os << " : " << AddressingModeField::decode(instr.opcode()); + } + FlagsMode fm = FlagsModeField::decode(instr.opcode()); + if (fm != kFlags_none) { + os << " && " << fm << " if " + << FlagsConditionField::decode(instr.opcode()); + } + } + if (instr.InputCount() > 0) { + for (size_t i = 0; i < instr.InputCount(); i++) { + os << " " << *instr.InputAt(i); + } + } + return os << "\n"; +} + + +OStream& operator<<(OStream& os, const Constant& constant) { + switch (constant.type()) { + case Constant::kInt32: + return os << constant.ToInt32(); + case Constant::kInt64: + return os << constant.ToInt64() << "l"; + case Constant::kFloat64: + return os << constant.ToFloat64(); + case Constant::kExternalReference: + return os << constant.ToExternalReference().address(); + case Constant::kHeapObject: + return os << Brief(*constant.ToHeapObject()); + } + UNREACHABLE(); + return os; +} + + +Label* InstructionSequence::GetLabel(BasicBlock* block) { + return GetBlockStart(block)->label(); +} + + +BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) { + return BlockStartInstruction::cast(InstructionAt(block->code_start_)); +} + + +void InstructionSequence::StartBlock(BasicBlock* block) { + block->code_start_ = static_cast<int>(instructions_.size()); + BlockStartInstruction* block_start = + BlockStartInstruction::New(zone(), block); + AddInstruction(block_start, block); +} + + +void InstructionSequence::EndBlock(BasicBlock* block) { + int end = static_cast<int>(instructions_.size()); + DCHECK(block->code_start_ >= 0 && block->code_start_ < end); + block->code_end_ = end; +} + + +int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) { + // TODO(titzer): the order of these gaps is a holdover from Lithium. + GapInstruction* gap = GapInstruction::New(zone()); + if (instr->IsControl()) instructions_.push_back(gap); + int index = static_cast<int>(instructions_.size()); + instructions_.push_back(instr); + if (!instr->IsControl()) instructions_.push_back(gap); + if (instr->NeedsPointerMap()) { + DCHECK(instr->pointer_map() == NULL); + PointerMap* pointer_map = new (zone()) PointerMap(zone()); + pointer_map->set_instruction_position(index); + instr->set_pointer_map(pointer_map); + pointer_maps_.push_back(pointer_map); + } + return index; +} + + +BasicBlock* InstructionSequence::GetBasicBlock(int instruction_index) { + // TODO(turbofan): Optimize this. + for (;;) { + DCHECK_LE(0, instruction_index); + Instruction* instruction = InstructionAt(instruction_index--); + if (instruction->IsBlockStart()) { + return BlockStartInstruction::cast(instruction)->block(); + } + } +} + + +bool InstructionSequence::IsReference(int virtual_register) const { + return references_.find(virtual_register) != references_.end(); +} + + +bool InstructionSequence::IsDouble(int virtual_register) const { + return doubles_.find(virtual_register) != doubles_.end(); +} + + +void InstructionSequence::MarkAsReference(int virtual_register) { + references_.insert(virtual_register); +} + + +void InstructionSequence::MarkAsDouble(int virtual_register) { + doubles_.insert(virtual_register); +} + + +void InstructionSequence::AddGapMove(int index, InstructionOperand* from, + InstructionOperand* to) { + GapAt(index)->GetOrCreateParallelMove(GapInstruction::START, zone())->AddMove( + from, to, zone()); +} + + +int InstructionSequence::AddDeoptimizationEntry( + FrameStateDescriptor* descriptor) { + int deoptimization_id = static_cast<int>(deoptimization_entries_.size()); + deoptimization_entries_.push_back(descriptor); + return deoptimization_id; +} + +FrameStateDescriptor* InstructionSequence::GetDeoptimizationEntry( + int deoptimization_id) { + return deoptimization_entries_[deoptimization_id]; +} + + +int InstructionSequence::GetDeoptimizationEntryCount() { + return static_cast<int>(deoptimization_entries_.size()); +} + + +OStream& operator<<(OStream& os, const InstructionSequence& code) { + for (size_t i = 0; i < code.immediates_.size(); ++i) { + Constant constant = code.immediates_[i]; + os << "IMM#" << i << ": " << constant << "\n"; + } + int i = 0; + for (ConstantMap::const_iterator it = code.constants_.begin(); + it != code.constants_.end(); ++i, ++it) { + os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n"; + } + for (int i = 0; i < code.BasicBlockCount(); i++) { + BasicBlock* block = code.BlockAt(i); + + int bid = block->id(); + os << "RPO#" << block->rpo_number_ << ": B" << bid; + CHECK(block->rpo_number_ == i); + if (block->IsLoopHeader()) { + os << " loop blocks: [" << block->rpo_number_ << ", " << block->loop_end_ + << ")"; + } + os << " instructions: [" << block->code_start_ << ", " << block->code_end_ + << ")\n predecessors:"; + + BasicBlock::Predecessors predecessors = block->predecessors(); + for (BasicBlock::Predecessors::iterator iter = predecessors.begin(); + iter != predecessors.end(); ++iter) { + os << " B" << (*iter)->id(); + } + os << "\n"; + + for (BasicBlock::const_iterator j = block->begin(); j != block->end(); + ++j) { + Node* phi = *j; + if (phi->opcode() != IrOpcode::kPhi) continue; + os << " phi: v" << phi->id() << " ="; + Node::Inputs inputs = phi->inputs(); + for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end(); + ++iter) { + os << " v" << (*iter)->id(); + } + os << "\n"; + } + + ScopedVector<char> buf(32); + for (int j = block->first_instruction_index(); + j <= block->last_instruction_index(); j++) { + // TODO(svenpanne) Add some basic formatting to our streams. + SNPrintF(buf, "%5d", j); + os << " " << buf.start() << ": " << *code.InstructionAt(j); + } + + os << " " << block->control_; + + if (block->control_input_ != NULL) { + os << " v" << block->control_input_->id(); + } + + BasicBlock::Successors successors = block->successors(); + for (BasicBlock::Successors::iterator iter = successors.begin(); + iter != successors.end(); ++iter) { + os << " B" << (*iter)->id(); + } + os << "\n"; + } + return os; +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/instruction-codes.h nodejs-0.11.15/deps/v8/src/compiler/instruction-codes.h --- nodejs-0.11.13/deps/v8/src/compiler/instruction-codes.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/instruction-codes.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,117 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_INSTRUCTION_CODES_H_ +#define V8_COMPILER_INSTRUCTION_CODES_H_ + +#if V8_TARGET_ARCH_ARM +#include "src/compiler/arm/instruction-codes-arm.h" +#elif V8_TARGET_ARCH_ARM64 +#include "src/compiler/arm64/instruction-codes-arm64.h" +#elif V8_TARGET_ARCH_IA32 +#include "src/compiler/ia32/instruction-codes-ia32.h" +#elif V8_TARGET_ARCH_X64 +#include "src/compiler/x64/instruction-codes-x64.h" +#else +#define TARGET_ARCH_OPCODE_LIST(V) +#define TARGET_ADDRESSING_MODE_LIST(V) +#endif +#include "src/utils.h" + +namespace v8 { +namespace internal { + +class OStream; + +namespace compiler { + +// Target-specific opcodes that specify which assembly sequence to emit. +// Most opcodes specify a single instruction. +#define ARCH_OPCODE_LIST(V) \ + V(ArchDeoptimize) \ + V(ArchJmp) \ + V(ArchNop) \ + V(ArchRet) \ + TARGET_ARCH_OPCODE_LIST(V) + +enum ArchOpcode { +#define DECLARE_ARCH_OPCODE(Name) k##Name, + ARCH_OPCODE_LIST(DECLARE_ARCH_OPCODE) +#undef DECLARE_ARCH_OPCODE +#define COUNT_ARCH_OPCODE(Name) +1 + kLastArchOpcode = -1 ARCH_OPCODE_LIST(COUNT_ARCH_OPCODE) +#undef COUNT_ARCH_OPCODE +}; + +OStream& operator<<(OStream& os, const ArchOpcode& ao); + +// Addressing modes represent the "shape" of inputs to an instruction. +// Many instructions support multiple addressing modes. Addressing modes +// are encoded into the InstructionCode of the instruction and tell the +// code generator after register allocation which assembler method to call. +#define ADDRESSING_MODE_LIST(V) \ + V(None) \ + TARGET_ADDRESSING_MODE_LIST(V) + +enum AddressingMode { +#define DECLARE_ADDRESSING_MODE(Name) kMode_##Name, + ADDRESSING_MODE_LIST(DECLARE_ADDRESSING_MODE) +#undef DECLARE_ADDRESSING_MODE +#define COUNT_ADDRESSING_MODE(Name) +1 + kLastAddressingMode = -1 ADDRESSING_MODE_LIST(COUNT_ADDRESSING_MODE) +#undef COUNT_ADDRESSING_MODE +}; + +OStream& operator<<(OStream& os, const AddressingMode& am); + +// The mode of the flags continuation (see below). +enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 }; + +OStream& operator<<(OStream& os, const FlagsMode& fm); + +// The condition of flags continuation (see below). +enum FlagsCondition { + kEqual, + kNotEqual, + kSignedLessThan, + kSignedGreaterThanOrEqual, + kSignedLessThanOrEqual, + kSignedGreaterThan, + kUnsignedLessThan, + kUnsignedGreaterThanOrEqual, + kUnsignedLessThanOrEqual, + kUnsignedGreaterThan, + kUnorderedEqual, + kUnorderedNotEqual, + kUnorderedLessThan, + kUnorderedGreaterThanOrEqual, + kUnorderedLessThanOrEqual, + kUnorderedGreaterThan, + kOverflow, + kNotOverflow +}; + +OStream& operator<<(OStream& os, const FlagsCondition& fc); + +// The InstructionCode is an opaque, target-specific integer that encodes +// what code to emit for an instruction in the code generator. It is not +// interesting to the register allocator, as the inputs and flags on the +// instructions specify everything of interest. +typedef int32_t InstructionCode; + +// Helpers for encoding / decoding InstructionCode into the fields needed +// for code generation. We encode the instruction, addressing mode, and flags +// continuation into a single InstructionCode which is stored as part of +// the instruction. +typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField; +typedef BitField<AddressingMode, 7, 4> AddressingModeField; +typedef BitField<FlagsMode, 11, 2> FlagsModeField; +typedef BitField<FlagsCondition, 13, 5> FlagsConditionField; +typedef BitField<int, 13, 19> MiscField; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_INSTRUCTION_CODES_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/instruction.h nodejs-0.11.15/deps/v8/src/compiler/instruction.h --- nodejs-0.11.13/deps/v8/src/compiler/instruction.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/instruction.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,871 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_INSTRUCTION_H_ +#define V8_COMPILER_INSTRUCTION_H_ + +#include <deque> +#include <map> +#include <set> + +// TODO(titzer): don't include the assembler? +#include "src/assembler.h" +#include "src/compiler/common-operator.h" +#include "src/compiler/frame.h" +#include "src/compiler/graph.h" +#include "src/compiler/instruction-codes.h" +#include "src/compiler/opcodes.h" +#include "src/compiler/schedule.h" +#include "src/zone-allocator.h" + +namespace v8 { +namespace internal { + +// Forward declarations. +class OStream; + +namespace compiler { + +// Forward declarations. +class Linkage; + +// A couple of reserved opcodes are used for internal use. +const InstructionCode kGapInstruction = -1; +const InstructionCode kBlockStartInstruction = -2; +const InstructionCode kSourcePositionInstruction = -3; + + +#define INSTRUCTION_OPERAND_LIST(V) \ + V(Constant, CONSTANT, 128) \ + V(Immediate, IMMEDIATE, 128) \ + V(StackSlot, STACK_SLOT, 128) \ + V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \ + V(Register, REGISTER, Register::kNumRegisters) \ + V(DoubleRegister, DOUBLE_REGISTER, DoubleRegister::kMaxNumRegisters) + +class InstructionOperand : public ZoneObject { + public: + enum Kind { + INVALID, + UNALLOCATED, + CONSTANT, + IMMEDIATE, + STACK_SLOT, + DOUBLE_STACK_SLOT, + REGISTER, + DOUBLE_REGISTER + }; + + InstructionOperand() : value_(KindField::encode(INVALID)) {} + InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); } + + Kind kind() const { return KindField::decode(value_); } + int index() const { return static_cast<int>(value_) >> KindField::kSize; } +#define INSTRUCTION_OPERAND_PREDICATE(name, type, number) \ + bool Is##name() const { return kind() == type; } + INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE) + INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0) + INSTRUCTION_OPERAND_PREDICATE(Ignored, INVALID, 0) +#undef INSTRUCTION_OPERAND_PREDICATE + bool Equals(InstructionOperand* other) const { + return value_ == other->value_; + } + + void ConvertTo(Kind kind, int index) { + if (kind == REGISTER || kind == DOUBLE_REGISTER) DCHECK(index >= 0); + value_ = KindField::encode(kind); + value_ |= index << KindField::kSize; + DCHECK(this->index() == index); + } + + // Calls SetUpCache()/TearDownCache() for each subclass. + static void SetUpCaches(); + static void TearDownCaches(); + + protected: + typedef BitField<Kind, 0, 3> KindField; + + unsigned value_; +}; + +OStream& operator<<(OStream& os, const InstructionOperand& op); + +class UnallocatedOperand : public InstructionOperand { + public: + enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY }; + + enum ExtendedPolicy { + NONE, + ANY, + FIXED_REGISTER, + FIXED_DOUBLE_REGISTER, + MUST_HAVE_REGISTER, + SAME_AS_FIRST_INPUT + }; + + // Lifetime of operand inside the instruction. + enum Lifetime { + // USED_AT_START operand is guaranteed to be live only at + // instruction start. Register allocator is free to assign the same register + // to some other operand used inside instruction (i.e. temporary or + // output). + USED_AT_START, + + // USED_AT_END operand is treated as live until the end of + // instruction. This means that register allocator will not reuse it's + // register for any other operand inside instruction. + USED_AT_END + }; + + explicit UnallocatedOperand(ExtendedPolicy policy) + : InstructionOperand(UNALLOCATED, 0) { + value_ |= BasicPolicyField::encode(EXTENDED_POLICY); + value_ |= ExtendedPolicyField::encode(policy); + value_ |= LifetimeField::encode(USED_AT_END); + } + + UnallocatedOperand(BasicPolicy policy, int index) + : InstructionOperand(UNALLOCATED, 0) { + DCHECK(policy == FIXED_SLOT); + value_ |= BasicPolicyField::encode(policy); + value_ |= index << FixedSlotIndexField::kShift; + DCHECK(this->fixed_slot_index() == index); + } + + UnallocatedOperand(ExtendedPolicy policy, int index) + : InstructionOperand(UNALLOCATED, 0) { + DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER); + value_ |= BasicPolicyField::encode(EXTENDED_POLICY); + value_ |= ExtendedPolicyField::encode(policy); + value_ |= LifetimeField::encode(USED_AT_END); + value_ |= FixedRegisterField::encode(index); + } + + UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime) + : InstructionOperand(UNALLOCATED, 0) { + value_ |= BasicPolicyField::encode(EXTENDED_POLICY); + value_ |= ExtendedPolicyField::encode(policy); + value_ |= LifetimeField::encode(lifetime); + } + + UnallocatedOperand* CopyUnconstrained(Zone* zone) { + UnallocatedOperand* result = new (zone) UnallocatedOperand(ANY); + result->set_virtual_register(virtual_register()); + return result; + } + + static const UnallocatedOperand* cast(const InstructionOperand* op) { + DCHECK(op->IsUnallocated()); + return static_cast<const UnallocatedOperand*>(op); + } + + static UnallocatedOperand* cast(InstructionOperand* op) { + DCHECK(op->IsUnallocated()); + return static_cast<UnallocatedOperand*>(op); + } + + // The encoding used for UnallocatedOperand operands depends on the policy + // that is + // stored within the operand. The FIXED_SLOT policy uses a compact encoding + // because it accommodates a larger pay-load. + // + // For FIXED_SLOT policy: + // +------------------------------------------+ + // | slot_index | vreg | 0 | 001 | + // +------------------------------------------+ + // + // For all other (extended) policies: + // +------------------------------------------+ + // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime + // +------------------------------------------+ P ... Policy + // + // The slot index is a signed value which requires us to decode it manually + // instead of using the BitField utility class. + + // The superclass has a KindField. + STATIC_ASSERT(KindField::kSize == 3); + + // BitFields for all unallocated operands. + class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {}; + class VirtualRegisterField : public BitField<unsigned, 4, 18> {}; + + // BitFields specific to BasicPolicy::FIXED_SLOT. + class FixedSlotIndexField : public BitField<int, 22, 10> {}; + + // BitFields specific to BasicPolicy::EXTENDED_POLICY. + class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {}; + class LifetimeField : public BitField<Lifetime, 25, 1> {}; + class FixedRegisterField : public BitField<int, 26, 6> {}; + + static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1; + static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize; + static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1; + static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1)); + + // Predicates for the operand policy. + bool HasAnyPolicy() const { + return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY; + } + bool HasFixedPolicy() const { + return basic_policy() == FIXED_SLOT || + extended_policy() == FIXED_REGISTER || + extended_policy() == FIXED_DOUBLE_REGISTER; + } + bool HasRegisterPolicy() const { + return basic_policy() == EXTENDED_POLICY && + extended_policy() == MUST_HAVE_REGISTER; + } + bool HasSameAsInputPolicy() const { + return basic_policy() == EXTENDED_POLICY && + extended_policy() == SAME_AS_FIRST_INPUT; + } + bool HasFixedSlotPolicy() const { return basic_policy() == FIXED_SLOT; } + bool HasFixedRegisterPolicy() const { + return basic_policy() == EXTENDED_POLICY && + extended_policy() == FIXED_REGISTER; + } + bool HasFixedDoubleRegisterPolicy() const { + return basic_policy() == EXTENDED_POLICY && + extended_policy() == FIXED_DOUBLE_REGISTER; + } + + // [basic_policy]: Distinguish between FIXED_SLOT and all other policies. + BasicPolicy basic_policy() const { return BasicPolicyField::decode(value_); } + + // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy. + ExtendedPolicy extended_policy() const { + DCHECK(basic_policy() == EXTENDED_POLICY); + return ExtendedPolicyField::decode(value_); + } + + // [fixed_slot_index]: Only for FIXED_SLOT. + int fixed_slot_index() const { + DCHECK(HasFixedSlotPolicy()); + return static_cast<int>(value_) >> FixedSlotIndexField::kShift; + } + + // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER. + int fixed_register_index() const { + DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy()); + return FixedRegisterField::decode(value_); + } + + // [virtual_register]: The virtual register ID for this operand. + int virtual_register() const { return VirtualRegisterField::decode(value_); } + void set_virtual_register(unsigned id) { + value_ = VirtualRegisterField::update(value_, id); + } + + // [lifetime]: Only for non-FIXED_SLOT. + bool IsUsedAtStart() { + DCHECK(basic_policy() == EXTENDED_POLICY); + return LifetimeField::decode(value_) == USED_AT_START; + } +}; + + +class MoveOperands V8_FINAL { + public: + MoveOperands(InstructionOperand* source, InstructionOperand* destination) + : source_(source), destination_(destination) {} + + InstructionOperand* source() const { return source_; } + void set_source(InstructionOperand* operand) { source_ = operand; } + + InstructionOperand* destination() const { return destination_; } + void set_destination(InstructionOperand* operand) { destination_ = operand; } + + // The gap resolver marks moves as "in-progress" by clearing the + // destination (but not the source). + bool IsPending() const { return destination_ == NULL && source_ != NULL; } + + // True if this move a move into the given destination operand. + bool Blocks(InstructionOperand* operand) const { + return !IsEliminated() && source()->Equals(operand); + } + + // A move is redundant if it's been eliminated, if its source and + // destination are the same, or if its destination is unneeded or constant. + bool IsRedundant() const { + return IsEliminated() || source_->Equals(destination_) || IsIgnored() || + (destination_ != NULL && destination_->IsConstant()); + } + + bool IsIgnored() const { + return destination_ != NULL && destination_->IsIgnored(); + } + + // We clear both operands to indicate move that's been eliminated. + void Eliminate() { source_ = destination_ = NULL; } + bool IsEliminated() const { + DCHECK(source_ != NULL || destination_ == NULL); + return source_ == NULL; + } + + private: + InstructionOperand* source_; + InstructionOperand* destination_; +}; + +OStream& operator<<(OStream& os, const MoveOperands& mo); + +template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands> +class SubKindOperand V8_FINAL : public InstructionOperand { + public: + static SubKindOperand* Create(int index, Zone* zone) { + DCHECK(index >= 0); + if (index < kNumCachedOperands) return &cache[index]; + return new (zone) SubKindOperand(index); + } + + static SubKindOperand* cast(InstructionOperand* op) { + DCHECK(op->kind() == kOperandKind); + return reinterpret_cast<SubKindOperand*>(op); + } + + static void SetUpCache(); + static void TearDownCache(); + + private: + static SubKindOperand* cache; + + SubKindOperand() : InstructionOperand() {} + explicit SubKindOperand(int index) + : InstructionOperand(kOperandKind, index) {} +}; + + +#define INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \ + typedef SubKindOperand<InstructionOperand::type, number> name##Operand; +INSTRUCTION_OPERAND_LIST(INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS) +#undef INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS + + +class ParallelMove V8_FINAL : public ZoneObject { + public: + explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {} + + void AddMove(InstructionOperand* from, InstructionOperand* to, Zone* zone) { + move_operands_.Add(MoveOperands(from, to), zone); + } + + bool IsRedundant() const; + + ZoneList<MoveOperands>* move_operands() { return &move_operands_; } + const ZoneList<MoveOperands>* move_operands() const { + return &move_operands_; + } + + private: + ZoneList<MoveOperands> move_operands_; +}; + +OStream& operator<<(OStream& os, const ParallelMove& pm); + +class PointerMap V8_FINAL : public ZoneObject { + public: + explicit PointerMap(Zone* zone) + : pointer_operands_(8, zone), + untagged_operands_(0, zone), + instruction_position_(-1) {} + + const ZoneList<InstructionOperand*>* GetNormalizedOperands() { + for (int i = 0; i < untagged_operands_.length(); ++i) { + RemovePointer(untagged_operands_[i]); + } + untagged_operands_.Clear(); + return &pointer_operands_; + } + int instruction_position() const { return instruction_position_; } + + void set_instruction_position(int pos) { + DCHECK(instruction_position_ == -1); + instruction_position_ = pos; + } + + void RecordPointer(InstructionOperand* op, Zone* zone); + void RemovePointer(InstructionOperand* op); + void RecordUntagged(InstructionOperand* op, Zone* zone); + + private: + friend OStream& operator<<(OStream& os, const PointerMap& pm); + + ZoneList<InstructionOperand*> pointer_operands_; + ZoneList<InstructionOperand*> untagged_operands_; + int instruction_position_; +}; + +OStream& operator<<(OStream& os, const PointerMap& pm); + +// TODO(titzer): s/PointerMap/ReferenceMap/ +class Instruction : public ZoneObject { + public: + size_t OutputCount() const { return OutputCountField::decode(bit_field_); } + InstructionOperand* Output() const { return OutputAt(0); } + InstructionOperand* OutputAt(size_t i) const { + DCHECK(i < OutputCount()); + return operands_[i]; + } + + size_t InputCount() const { return InputCountField::decode(bit_field_); } + InstructionOperand* InputAt(size_t i) const { + DCHECK(i < InputCount()); + return operands_[OutputCount() + i]; + } + + size_t TempCount() const { return TempCountField::decode(bit_field_); } + InstructionOperand* TempAt(size_t i) const { + DCHECK(i < TempCount()); + return operands_[OutputCount() + InputCount() + i]; + } + + InstructionCode opcode() const { return opcode_; } + ArchOpcode arch_opcode() const { return ArchOpcodeField::decode(opcode()); } + AddressingMode addressing_mode() const { + return AddressingModeField::decode(opcode()); + } + FlagsMode flags_mode() const { return FlagsModeField::decode(opcode()); } + FlagsCondition flags_condition() const { + return FlagsConditionField::decode(opcode()); + } + + // TODO(titzer): make control and call into flags. + static Instruction* New(Zone* zone, InstructionCode opcode) { + return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL); + } + + static Instruction* New(Zone* zone, InstructionCode opcode, + size_t output_count, InstructionOperand** outputs, + size_t input_count, InstructionOperand** inputs, + size_t temp_count, InstructionOperand** temps) { + DCHECK(opcode >= 0); + DCHECK(output_count == 0 || outputs != NULL); + DCHECK(input_count == 0 || inputs != NULL); + DCHECK(temp_count == 0 || temps != NULL); + InstructionOperand* none = NULL; + USE(none); + int size = static_cast<int>(RoundUp(sizeof(Instruction), kPointerSize) + + (output_count + input_count + temp_count - 1) * + sizeof(none)); + return new (zone->New(size)) Instruction( + opcode, output_count, outputs, input_count, inputs, temp_count, temps); + } + + // TODO(titzer): another holdover from lithium days; register allocator + // should not need to know about control instructions. + Instruction* MarkAsControl() { + bit_field_ = IsControlField::update(bit_field_, true); + return this; + } + Instruction* MarkAsCall() { + bit_field_ = IsCallField::update(bit_field_, true); + return this; + } + bool IsControl() const { return IsControlField::decode(bit_field_); } + bool IsCall() const { return IsCallField::decode(bit_field_); } + bool NeedsPointerMap() const { return IsCall(); } + bool HasPointerMap() const { return pointer_map_ != NULL; } + + bool IsGapMoves() const { + return opcode() == kGapInstruction || opcode() == kBlockStartInstruction; + } + bool IsBlockStart() const { return opcode() == kBlockStartInstruction; } + bool IsSourcePosition() const { + return opcode() == kSourcePositionInstruction; + } + + bool ClobbersRegisters() const { return IsCall(); } + bool ClobbersTemps() const { return IsCall(); } + bool ClobbersDoubleRegisters() const { return IsCall(); } + PointerMap* pointer_map() const { return pointer_map_; } + + void set_pointer_map(PointerMap* map) { + DCHECK(NeedsPointerMap()); + DCHECK_EQ(NULL, pointer_map_); + pointer_map_ = map; + } + + // Placement new operator so that we can smash instructions into + // zone-allocated memory. + void* operator new(size_t, void* location) { return location; } + + void operator delete(void* pointer, void* location) { UNREACHABLE(); } + + protected: + explicit Instruction(InstructionCode opcode) + : opcode_(opcode), + bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) | + TempCountField::encode(0) | IsCallField::encode(false) | + IsControlField::encode(false)), + pointer_map_(NULL) {} + + Instruction(InstructionCode opcode, size_t output_count, + InstructionOperand** outputs, size_t input_count, + InstructionOperand** inputs, size_t temp_count, + InstructionOperand** temps) + : opcode_(opcode), + bit_field_(OutputCountField::encode(output_count) | + InputCountField::encode(input_count) | + TempCountField::encode(temp_count) | + IsCallField::encode(false) | IsControlField::encode(false)), + pointer_map_(NULL) { + for (size_t i = 0; i < output_count; ++i) { + operands_[i] = outputs[i]; + } + for (size_t i = 0; i < input_count; ++i) { + operands_[output_count + i] = inputs[i]; + } + for (size_t i = 0; i < temp_count; ++i) { + operands_[output_count + input_count + i] = temps[i]; + } + } + + protected: + typedef BitField<size_t, 0, 8> OutputCountField; + typedef BitField<size_t, 8, 16> InputCountField; + typedef BitField<size_t, 24, 6> TempCountField; + typedef BitField<bool, 30, 1> IsCallField; + typedef BitField<bool, 31, 1> IsControlField; + + InstructionCode opcode_; + uint32_t bit_field_; + PointerMap* pointer_map_; + InstructionOperand* operands_[1]; +}; + +OStream& operator<<(OStream& os, const Instruction& instr); + +// Represents moves inserted before an instruction due to register allocation. +// TODO(titzer): squash GapInstruction back into Instruction, since essentially +// every instruction can possibly have moves inserted before it. +class GapInstruction : public Instruction { + public: + enum InnerPosition { + BEFORE, + START, + END, + AFTER, + FIRST_INNER_POSITION = BEFORE, + LAST_INNER_POSITION = AFTER + }; + + ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { + if (parallel_moves_[pos] == NULL) { + parallel_moves_[pos] = new (zone) ParallelMove(zone); + } + return parallel_moves_[pos]; + } + + ParallelMove* GetParallelMove(InnerPosition pos) { + return parallel_moves_[pos]; + } + + static GapInstruction* New(Zone* zone) { + void* buffer = zone->New(sizeof(GapInstruction)); + return new (buffer) GapInstruction(kGapInstruction); + } + + static GapInstruction* cast(Instruction* instr) { + DCHECK(instr->IsGapMoves()); + return static_cast<GapInstruction*>(instr); + } + + static const GapInstruction* cast(const Instruction* instr) { + DCHECK(instr->IsGapMoves()); + return static_cast<const GapInstruction*>(instr); + } + + protected: + explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) { + parallel_moves_[BEFORE] = NULL; + parallel_moves_[START] = NULL; + parallel_moves_[END] = NULL; + parallel_moves_[AFTER] = NULL; + } + + private: + friend OStream& operator<<(OStream& os, const Instruction& instr); + ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; +}; + + +// This special kind of gap move instruction represents the beginning of a +// block of code. +// TODO(titzer): move code_start and code_end from BasicBlock to here. +class BlockStartInstruction V8_FINAL : public GapInstruction { + public: + BasicBlock* block() const { return block_; } + Label* label() { return &label_; } + + static BlockStartInstruction* New(Zone* zone, BasicBlock* block) { + void* buffer = zone->New(sizeof(BlockStartInstruction)); + return new (buffer) BlockStartInstruction(block); + } + + static BlockStartInstruction* cast(Instruction* instr) { + DCHECK(instr->IsBlockStart()); + return static_cast<BlockStartInstruction*>(instr); + } + + private: + explicit BlockStartInstruction(BasicBlock* block) + : GapInstruction(kBlockStartInstruction), block_(block) {} + + BasicBlock* block_; + Label label_; +}; + + +class SourcePositionInstruction V8_FINAL : public Instruction { + public: + static SourcePositionInstruction* New(Zone* zone, SourcePosition position) { + void* buffer = zone->New(sizeof(SourcePositionInstruction)); + return new (buffer) SourcePositionInstruction(position); + } + + SourcePosition source_position() const { return source_position_; } + + static SourcePositionInstruction* cast(Instruction* instr) { + DCHECK(instr->IsSourcePosition()); + return static_cast<SourcePositionInstruction*>(instr); + } + + static const SourcePositionInstruction* cast(const Instruction* instr) { + DCHECK(instr->IsSourcePosition()); + return static_cast<const SourcePositionInstruction*>(instr); + } + + private: + explicit SourcePositionInstruction(SourcePosition source_position) + : Instruction(kSourcePositionInstruction), + source_position_(source_position) { + DCHECK(!source_position_.IsInvalid()); + DCHECK(!source_position_.IsUnknown()); + } + + SourcePosition source_position_; +}; + + +class Constant V8_FINAL { + public: + enum Type { kInt32, kInt64, kFloat64, kExternalReference, kHeapObject }; + + explicit Constant(int32_t v) : type_(kInt32), value_(v) {} + explicit Constant(int64_t v) : type_(kInt64), value_(v) {} + explicit Constant(double v) : type_(kFloat64), value_(BitCast<int64_t>(v)) {} + explicit Constant(ExternalReference ref) + : type_(kExternalReference), value_(BitCast<intptr_t>(ref)) {} + explicit Constant(Handle<HeapObject> obj) + : type_(kHeapObject), value_(BitCast<intptr_t>(obj)) {} + + Type type() const { return type_; } + + int32_t ToInt32() const { + DCHECK_EQ(kInt32, type()); + return static_cast<int32_t>(value_); + } + + int64_t ToInt64() const { + if (type() == kInt32) return ToInt32(); + DCHECK_EQ(kInt64, type()); + return value_; + } + + double ToFloat64() const { + if (type() == kInt32) return ToInt32(); + DCHECK_EQ(kFloat64, type()); + return BitCast<double>(value_); + } + + ExternalReference ToExternalReference() const { + DCHECK_EQ(kExternalReference, type()); + return BitCast<ExternalReference>(static_cast<intptr_t>(value_)); + } + + Handle<HeapObject> ToHeapObject() const { + DCHECK_EQ(kHeapObject, type()); + return BitCast<Handle<HeapObject> >(static_cast<intptr_t>(value_)); + } + + private: + Type type_; + int64_t value_; +}; + + +class FrameStateDescriptor : public ZoneObject { + public: + FrameStateDescriptor(BailoutId bailout_id, int parameters_count, + int locals_count, int stack_count) + : bailout_id_(bailout_id), + parameters_count_(parameters_count), + locals_count_(locals_count), + stack_count_(stack_count) {} + + BailoutId bailout_id() const { return bailout_id_; } + int parameters_count() { return parameters_count_; } + int locals_count() { return locals_count_; } + int stack_count() { return stack_count_; } + + int size() { return parameters_count_ + locals_count_ + stack_count_; } + + private: + BailoutId bailout_id_; + int parameters_count_; + int locals_count_; + int stack_count_; +}; + +OStream& operator<<(OStream& os, const Constant& constant); + +typedef std::deque<Constant, zone_allocator<Constant> > ConstantDeque; +typedef std::map<int, Constant, std::less<int>, + zone_allocator<std::pair<int, Constant> > > ConstantMap; + + +typedef std::deque<Instruction*, zone_allocator<Instruction*> > + InstructionDeque; +typedef std::deque<PointerMap*, zone_allocator<PointerMap*> > PointerMapDeque; +typedef std::vector<FrameStateDescriptor*, + zone_allocator<FrameStateDescriptor*> > + DeoptimizationVector; + + +// Represents architecture-specific generated code before, during, and after +// register allocation. +// TODO(titzer): s/IsDouble/IsFloat64/ +class InstructionSequence V8_FINAL { + public: + InstructionSequence(Linkage* linkage, Graph* graph, Schedule* schedule) + : graph_(graph), + linkage_(linkage), + schedule_(schedule), + constants_(ConstantMap::key_compare(), + ConstantMap::allocator_type(zone())), + immediates_(ConstantDeque::allocator_type(zone())), + instructions_(InstructionDeque::allocator_type(zone())), + next_virtual_register_(graph->NodeCount()), + pointer_maps_(PointerMapDeque::allocator_type(zone())), + doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())), + references_(std::less<int>(), + VirtualRegisterSet::allocator_type(zone())), + deoptimization_entries_(DeoptimizationVector::allocator_type(zone())) {} + + int NextVirtualRegister() { return next_virtual_register_++; } + int VirtualRegisterCount() const { return next_virtual_register_; } + + int ValueCount() const { return graph_->NodeCount(); } + + int BasicBlockCount() const { + return static_cast<int>(schedule_->rpo_order()->size()); + } + + BasicBlock* BlockAt(int rpo_number) const { + return (*schedule_->rpo_order())[rpo_number]; + } + + BasicBlock* GetContainingLoop(BasicBlock* block) { + return block->loop_header_; + } + + int GetLoopEnd(BasicBlock* block) const { return block->loop_end_; } + + BasicBlock* GetBasicBlock(int instruction_index); + + int GetVirtualRegister(Node* node) const { return node->id(); } + + bool IsReference(int virtual_register) const; + bool IsDouble(int virtual_register) const; + + void MarkAsReference(int virtual_register); + void MarkAsDouble(int virtual_register); + + void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to); + + Label* GetLabel(BasicBlock* block); + BlockStartInstruction* GetBlockStart(BasicBlock* block); + + typedef InstructionDeque::const_iterator const_iterator; + const_iterator begin() const { return instructions_.begin(); } + const_iterator end() const { return instructions_.end(); } + + GapInstruction* GapAt(int index) const { + return GapInstruction::cast(InstructionAt(index)); + } + bool IsGapAt(int index) const { return InstructionAt(index)->IsGapMoves(); } + Instruction* InstructionAt(int index) const { + DCHECK(index >= 0); + DCHECK(index < static_cast<int>(instructions_.size())); + return instructions_[index]; + } + + Frame* frame() { return &frame_; } + Graph* graph() const { return graph_; } + Isolate* isolate() const { return zone()->isolate(); } + Linkage* linkage() const { return linkage_; } + Schedule* schedule() const { return schedule_; } + const PointerMapDeque* pointer_maps() const { return &pointer_maps_; } + Zone* zone() const { return graph_->zone(); } + + // Used by the code generator while adding instructions. + int AddInstruction(Instruction* instr, BasicBlock* block); + void StartBlock(BasicBlock* block); + void EndBlock(BasicBlock* block); + + void AddConstant(int virtual_register, Constant constant) { + DCHECK(constants_.find(virtual_register) == constants_.end()); + constants_.insert(std::make_pair(virtual_register, constant)); + } + Constant GetConstant(int virtual_register) const { + ConstantMap::const_iterator it = constants_.find(virtual_register); + DCHECK(it != constants_.end()); + DCHECK_EQ(virtual_register, it->first); + return it->second; + } + + typedef ConstantDeque Immediates; + const Immediates& immediates() const { return immediates_; } + + int AddImmediate(Constant constant) { + int index = static_cast<int>(immediates_.size()); + immediates_.push_back(constant); + return index; + } + Constant GetImmediate(int index) const { + DCHECK(index >= 0); + DCHECK(index < static_cast<int>(immediates_.size())); + return immediates_[index]; + } + + int AddDeoptimizationEntry(FrameStateDescriptor* descriptor); + FrameStateDescriptor* GetDeoptimizationEntry(int deoptimization_id); + int GetDeoptimizationEntryCount(); + + private: + friend OStream& operator<<(OStream& os, const InstructionSequence& code); + + typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet; + + Graph* graph_; + Linkage* linkage_; + Schedule* schedule_; + ConstantMap constants_; + ConstantDeque immediates_; + InstructionDeque instructions_; + int next_virtual_register_; + PointerMapDeque pointer_maps_; + VirtualRegisterSet doubles_; + VirtualRegisterSet references_; + Frame frame_; + DeoptimizationVector deoptimization_entries_; +}; + +OStream& operator<<(OStream& os, const InstructionSequence& code); + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_INSTRUCTION_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/instruction-selector.cc nodejs-0.11.15/deps/v8/src/compiler/instruction-selector.cc --- nodejs-0.11.13/deps/v8/src/compiler/instruction-selector.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/instruction-selector.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1053 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/instruction-selector.h" + +#include "src/compiler/instruction-selector-impl.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/pipeline.h" + +namespace v8 { +namespace internal { +namespace compiler { + +InstructionSelector::InstructionSelector(InstructionSequence* sequence, + SourcePositionTable* source_positions, + Features features) + : zone_(sequence->isolate()), + sequence_(sequence), + source_positions_(source_positions), + features_(features), + current_block_(NULL), + instructions_(InstructionDeque::allocator_type(zone())), + defined_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())), + used_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())) {} + + +void InstructionSelector::SelectInstructions() { + // Mark the inputs of all phis in loop headers as used. + BasicBlockVector* blocks = schedule()->rpo_order(); + for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) { + BasicBlock* block = *i; + if (!block->IsLoopHeader()) continue; + DCHECK_NE(0, block->PredecessorCount()); + DCHECK_NE(1, block->PredecessorCount()); + for (BasicBlock::const_iterator j = block->begin(); j != block->end(); + ++j) { + Node* phi = *j; + if (phi->opcode() != IrOpcode::kPhi) continue; + + // Mark all inputs as used. + Node::Inputs inputs = phi->inputs(); + for (InputIter k = inputs.begin(); k != inputs.end(); ++k) { + MarkAsUsed(*k); + } + } + } + + // Visit each basic block in post order. + for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) { + VisitBlock(*i); + } + + // Schedule the selected instructions. + for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) { + BasicBlock* block = *i; + size_t end = block->code_end_; + size_t start = block->code_start_; + sequence()->StartBlock(block); + while (start-- > end) { + sequence()->AddInstruction(instructions_[start], block); + } + sequence()->EndBlock(block); + } +} + + +Instruction* InstructionSelector::Emit(InstructionCode opcode, + InstructionOperand* output, + size_t temp_count, + InstructionOperand** temps) { + size_t output_count = output == NULL ? 0 : 1; + return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps); +} + + +Instruction* InstructionSelector::Emit(InstructionCode opcode, + InstructionOperand* output, + InstructionOperand* a, size_t temp_count, + InstructionOperand** temps) { + size_t output_count = output == NULL ? 0 : 1; + return Emit(opcode, output_count, &output, 1, &a, temp_count, temps); +} + + +Instruction* InstructionSelector::Emit(InstructionCode opcode, + InstructionOperand* output, + InstructionOperand* a, + InstructionOperand* b, size_t temp_count, + InstructionOperand** temps) { + size_t output_count = output == NULL ? 0 : 1; + InstructionOperand* inputs[] = {a, b}; + size_t input_count = ARRAY_SIZE(inputs); + return Emit(opcode, output_count, &output, input_count, inputs, temp_count, + temps); +} + + +Instruction* InstructionSelector::Emit(InstructionCode opcode, + InstructionOperand* output, + InstructionOperand* a, + InstructionOperand* b, + InstructionOperand* c, size_t temp_count, + InstructionOperand** temps) { + size_t output_count = output == NULL ? 0 : 1; + InstructionOperand* inputs[] = {a, b, c}; + size_t input_count = ARRAY_SIZE(inputs); + return Emit(opcode, output_count, &output, input_count, inputs, temp_count, + temps); +} + + +Instruction* InstructionSelector::Emit( + InstructionCode opcode, InstructionOperand* output, InstructionOperand* a, + InstructionOperand* b, InstructionOperand* c, InstructionOperand* d, + size_t temp_count, InstructionOperand** temps) { + size_t output_count = output == NULL ? 0 : 1; + InstructionOperand* inputs[] = {a, b, c, d}; + size_t input_count = ARRAY_SIZE(inputs); + return Emit(opcode, output_count, &output, input_count, inputs, temp_count, + temps); +} + + +Instruction* InstructionSelector::Emit( + InstructionCode opcode, size_t output_count, InstructionOperand** outputs, + size_t input_count, InstructionOperand** inputs, size_t temp_count, + InstructionOperand** temps) { + Instruction* instr = + Instruction::New(instruction_zone(), opcode, output_count, outputs, + input_count, inputs, temp_count, temps); + return Emit(instr); +} + + +Instruction* InstructionSelector::Emit(Instruction* instr) { + instructions_.push_back(instr); + return instr; +} + + +bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const { + return block->rpo_number_ == (current_block_->rpo_number_ + 1) && + block->deferred_ == current_block_->deferred_; +} + + +bool InstructionSelector::CanCover(Node* user, Node* node) const { + return node->OwnedBy(user) && + schedule()->block(node) == schedule()->block(user); +} + + +bool InstructionSelector::IsDefined(Node* node) const { + DCHECK_NOT_NULL(node); + NodeId id = node->id(); + DCHECK(id >= 0); + DCHECK(id < static_cast<NodeId>(defined_.size())); + return defined_[id]; +} + + +void InstructionSelector::MarkAsDefined(Node* node) { + DCHECK_NOT_NULL(node); + NodeId id = node->id(); + DCHECK(id >= 0); + DCHECK(id < static_cast<NodeId>(defined_.size())); + defined_[id] = true; +} + + +bool InstructionSelector::IsUsed(Node* node) const { + if (!node->op()->HasProperty(Operator::kEliminatable)) return true; + NodeId id = node->id(); + DCHECK(id >= 0); + DCHECK(id < static_cast<NodeId>(used_.size())); + return used_[id]; +} + + +void InstructionSelector::MarkAsUsed(Node* node) { + DCHECK_NOT_NULL(node); + NodeId id = node->id(); + DCHECK(id >= 0); + DCHECK(id < static_cast<NodeId>(used_.size())); + used_[id] = true; +} + + +bool InstructionSelector::IsDouble(const Node* node) const { + DCHECK_NOT_NULL(node); + return sequence()->IsDouble(node->id()); +} + + +void InstructionSelector::MarkAsDouble(Node* node) { + DCHECK_NOT_NULL(node); + DCHECK(!IsReference(node)); + sequence()->MarkAsDouble(node->id()); + + // Propagate "doubleness" throughout phis. + for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { + Node* user = *i; + if (user->opcode() != IrOpcode::kPhi) continue; + if (IsDouble(user)) continue; + MarkAsDouble(user); + } +} + + +bool InstructionSelector::IsReference(const Node* node) const { + DCHECK_NOT_NULL(node); + return sequence()->IsReference(node->id()); +} + + +void InstructionSelector::MarkAsReference(Node* node) { + DCHECK_NOT_NULL(node); + DCHECK(!IsDouble(node)); + sequence()->MarkAsReference(node->id()); + + // Propagate "referenceness" throughout phis. + for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { + Node* user = *i; + if (user->opcode() != IrOpcode::kPhi) continue; + if (IsReference(user)) continue; + MarkAsReference(user); + } +} + + +void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) { + DCHECK_NOT_NULL(node); + if (rep == kMachineFloat64) MarkAsDouble(node); + if (rep == kMachineTagged) MarkAsReference(node); +} + + +// TODO(bmeurer): Get rid of the CallBuffer business and make +// InstructionSelector::VisitCall platform independent instead. +CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d) + : output_count(0), + descriptor(d), + output_nodes(zone->NewArray<Node*>(d->ReturnCount())), + outputs(zone->NewArray<InstructionOperand*>(d->ReturnCount())), + fixed_and_control_args( + zone->NewArray<InstructionOperand*>(input_count() + control_count())), + fixed_count(0), + pushed_nodes(zone->NewArray<Node*>(input_count())), + pushed_count(0) { + if (d->ReturnCount() > 1) { + memset(output_nodes, 0, sizeof(Node*) * d->ReturnCount()); // NOLINT + } + memset(pushed_nodes, 0, sizeof(Node*) * input_count()); // NOLINT +} + + +// TODO(bmeurer): Get rid of the CallBuffer business and make +// InstructionSelector::VisitCall platform independent instead. +void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, + bool call_code_immediate, + bool call_address_immediate, + BasicBlock* cont_node, + BasicBlock* deopt_node) { + OperandGenerator g(this); + DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount()); + DCHECK_EQ(OperatorProperties::GetValueInputCount(call->op()), + buffer->input_count()); + + if (buffer->descriptor->ReturnCount() > 0) { + // Collect the projections that represent multiple outputs from this call. + if (buffer->descriptor->ReturnCount() == 1) { + buffer->output_nodes[0] = call; + } else { + call->CollectProjections(buffer->descriptor->ReturnCount(), + buffer->output_nodes); + } + + // Filter out the outputs that aren't live because no projection uses them. + for (int i = 0; i < buffer->descriptor->ReturnCount(); i++) { + if (buffer->output_nodes[i] != NULL) { + Node* output = buffer->output_nodes[i]; + LinkageLocation location = buffer->descriptor->GetReturnLocation(i); + MarkAsRepresentation(location.representation(), output); + buffer->outputs[buffer->output_count++] = + g.DefineAsLocation(output, location); + } + } + } + + buffer->fixed_count = 1; // First argument is always the callee. + Node* callee = call->InputAt(0); + switch (buffer->descriptor->kind()) { + case CallDescriptor::kCallCodeObject: + buffer->fixed_and_control_args[0] = + (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant) + ? g.UseImmediate(callee) + : g.UseRegister(callee); + break; + case CallDescriptor::kCallAddress: + buffer->fixed_and_control_args[0] = + (call_address_immediate && + (callee->opcode() == IrOpcode::kInt32Constant || + callee->opcode() == IrOpcode::kInt64Constant)) + ? g.UseImmediate(callee) + : g.UseRegister(callee); + break; + case CallDescriptor::kCallJSFunction: + buffer->fixed_and_control_args[0] = + g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)); + break; + } + + int input_count = buffer->input_count(); + + // Split the arguments into pushed_nodes and fixed_args. Pushed arguments + // require an explicit push instruction before the call and do not appear + // as arguments to the call. Everything else ends up as an InstructionOperand + // argument to the call. + InputIter iter(call->inputs().begin()); + for (int index = 0; index < input_count; ++iter, ++index) { + DCHECK(iter != call->inputs().end()); + DCHECK(index == iter.index()); + if (index == 0) continue; // The first argument (callee) is already done. + InstructionOperand* op = + g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index)); + if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) { + int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1; + DCHECK(buffer->pushed_nodes[stack_index] == NULL); + buffer->pushed_nodes[stack_index] = *iter; + buffer->pushed_count++; + } else { + buffer->fixed_and_control_args[buffer->fixed_count] = op; + buffer->fixed_count++; + } + } + + // If the call can deoptimize, we add the continuation and deoptimization + // block labels. + if (buffer->descriptor->CanLazilyDeoptimize()) { + DCHECK(cont_node != NULL); + DCHECK(deopt_node != NULL); + buffer->fixed_and_control_args[buffer->fixed_count] = g.Label(cont_node); + buffer->fixed_and_control_args[buffer->fixed_count + 1] = + g.Label(deopt_node); + } else { + DCHECK(cont_node == NULL); + DCHECK(deopt_node == NULL); + } + + DCHECK(input_count == (buffer->fixed_count + buffer->pushed_count)); +} + + +void InstructionSelector::VisitBlock(BasicBlock* block) { + DCHECK_EQ(NULL, current_block_); + current_block_ = block; + int current_block_end = static_cast<int>(instructions_.size()); + + // Generate code for the block control "top down", but schedule the code + // "bottom up". + VisitControl(block); + std::reverse(instructions_.begin() + current_block_end, instructions_.end()); + + // Visit code in reverse control flow order, because architecture-specific + // matching may cover more than one node at a time. + for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend(); + ++i) { + Node* node = *i; + // Skip nodes that are unused or already defined. + if (!IsUsed(node) || IsDefined(node)) continue; + // Generate code for this node "top down", but schedule the code "bottom + // up". + size_t current_node_end = instructions_.size(); + VisitNode(node); + std::reverse(instructions_.begin() + current_node_end, instructions_.end()); + } + + // We're done with the block. + // TODO(bmeurer): We should not mutate the schedule. + block->code_end_ = current_block_end; + block->code_start_ = static_cast<int>(instructions_.size()); + + current_block_ = NULL; +} + + +static inline void CheckNoPhis(const BasicBlock* block) { +#ifdef DEBUG + // Branch targets should not have phis. + for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) { + const Node* node = *i; + CHECK_NE(IrOpcode::kPhi, node->opcode()); + } +#endif +} + + +void InstructionSelector::VisitControl(BasicBlock* block) { + Node* input = block->control_input_; + switch (block->control_) { + case BasicBlockData::kGoto: + return VisitGoto(block->SuccessorAt(0)); + case BasicBlockData::kBranch: { + DCHECK_EQ(IrOpcode::kBranch, input->opcode()); + BasicBlock* tbranch = block->SuccessorAt(0); + BasicBlock* fbranch = block->SuccessorAt(1); + // SSA deconstruction requires targets of branches not to have phis. + // Edge split form guarantees this property, but is more strict. + CheckNoPhis(tbranch); + CheckNoPhis(fbranch); + if (tbranch == fbranch) return VisitGoto(tbranch); + return VisitBranch(input, tbranch, fbranch); + } + case BasicBlockData::kReturn: { + // If the result itself is a return, return its input. + Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn) + ? input->InputAt(0) + : input; + return VisitReturn(value); + } + case BasicBlockData::kThrow: + return VisitThrow(input); + case BasicBlockData::kDeoptimize: + return VisitDeoptimize(input); + case BasicBlockData::kCall: { + BasicBlock* deoptimization = block->SuccessorAt(0); + BasicBlock* continuation = block->SuccessorAt(1); + VisitCall(input, continuation, deoptimization); + break; + } + case BasicBlockData::kNone: { + // TODO(titzer): exit block doesn't have control. + DCHECK(input == NULL); + break; + } + default: + UNREACHABLE(); + break; + } +} + + +void InstructionSelector::VisitNode(Node* node) { + DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes. + SourcePosition source_position = source_positions_->GetSourcePosition(node); + if (!source_position.IsUnknown()) { + DCHECK(!source_position.IsInvalid()); + if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) { + Emit(SourcePositionInstruction::New(instruction_zone(), source_position)); + } + } + switch (node->opcode()) { + case IrOpcode::kStart: + case IrOpcode::kLoop: + case IrOpcode::kEnd: + case IrOpcode::kBranch: + case IrOpcode::kIfTrue: + case IrOpcode::kIfFalse: + case IrOpcode::kEffectPhi: + case IrOpcode::kMerge: + case IrOpcode::kLazyDeoptimization: + case IrOpcode::kContinuation: + // No code needed for these graph artifacts. + return; + case IrOpcode::kParameter: { + int index = OpParameter<int>(node); + MachineType rep = linkage() + ->GetIncomingDescriptor() + ->GetInputLocation(index) + .representation(); + MarkAsRepresentation(rep, node); + return VisitParameter(node); + } + case IrOpcode::kPhi: + return VisitPhi(node); + case IrOpcode::kProjection: + return VisitProjection(node); + case IrOpcode::kInt32Constant: + case IrOpcode::kInt64Constant: + case IrOpcode::kExternalConstant: + return VisitConstant(node); + case IrOpcode::kFloat64Constant: + return MarkAsDouble(node), VisitConstant(node); + case IrOpcode::kHeapConstant: + case IrOpcode::kNumberConstant: + // TODO(turbofan): only mark non-smis as references. + return MarkAsReference(node), VisitConstant(node); + case IrOpcode::kCall: + return VisitCall(node, NULL, NULL); + case IrOpcode::kFrameState: + case IrOpcode::kStateValues: + return; + case IrOpcode::kLoad: { + MachineType load_rep = OpParameter<MachineType>(node); + MarkAsRepresentation(load_rep, node); + return VisitLoad(node); + } + case IrOpcode::kStore: + return VisitStore(node); + case IrOpcode::kWord32And: + return VisitWord32And(node); + case IrOpcode::kWord32Or: + return VisitWord32Or(node); + case IrOpcode::kWord32Xor: + return VisitWord32Xor(node); + case IrOpcode::kWord32Shl: + return VisitWord32Shl(node); + case IrOpcode::kWord32Shr: + return VisitWord32Shr(node); + case IrOpcode::kWord32Sar: + return VisitWord32Sar(node); + case IrOpcode::kWord32Equal: + return VisitWord32Equal(node); + case IrOpcode::kWord64And: + return VisitWord64And(node); + case IrOpcode::kWord64Or: + return VisitWord64Or(node); + case IrOpcode::kWord64Xor: + return VisitWord64Xor(node); + case IrOpcode::kWord64Shl: + return VisitWord64Shl(node); + case IrOpcode::kWord64Shr: + return VisitWord64Shr(node); + case IrOpcode::kWord64Sar: + return VisitWord64Sar(node); + case IrOpcode::kWord64Equal: + return VisitWord64Equal(node); + case IrOpcode::kInt32Add: + return VisitInt32Add(node); + case IrOpcode::kInt32AddWithOverflow: + return VisitInt32AddWithOverflow(node); + case IrOpcode::kInt32Sub: + return VisitInt32Sub(node); + case IrOpcode::kInt32SubWithOverflow: + return VisitInt32SubWithOverflow(node); + case IrOpcode::kInt32Mul: + return VisitInt32Mul(node); + case IrOpcode::kInt32Div: + return VisitInt32Div(node); + case IrOpcode::kInt32UDiv: + return VisitInt32UDiv(node); + case IrOpcode::kInt32Mod: + return VisitInt32Mod(node); + case IrOpcode::kInt32UMod: + return VisitInt32UMod(node); + case IrOpcode::kInt32LessThan: + return VisitInt32LessThan(node); + case IrOpcode::kInt32LessThanOrEqual: + return VisitInt32LessThanOrEqual(node); + case IrOpcode::kUint32LessThan: + return VisitUint32LessThan(node); + case IrOpcode::kUint32LessThanOrEqual: + return VisitUint32LessThanOrEqual(node); + case IrOpcode::kInt64Add: + return VisitInt64Add(node); + case IrOpcode::kInt64Sub: + return VisitInt64Sub(node); + case IrOpcode::kInt64Mul: + return VisitInt64Mul(node); + case IrOpcode::kInt64Div: + return VisitInt64Div(node); + case IrOpcode::kInt64UDiv: + return VisitInt64UDiv(node); + case IrOpcode::kInt64Mod: + return VisitInt64Mod(node); + case IrOpcode::kInt64UMod: + return VisitInt64UMod(node); + case IrOpcode::kInt64LessThan: + return VisitInt64LessThan(node); + case IrOpcode::kInt64LessThanOrEqual: + return VisitInt64LessThanOrEqual(node); + case IrOpcode::kConvertInt32ToInt64: + return VisitConvertInt32ToInt64(node); + case IrOpcode::kConvertInt64ToInt32: + return VisitConvertInt64ToInt32(node); + case IrOpcode::kChangeInt32ToFloat64: + return MarkAsDouble(node), VisitChangeInt32ToFloat64(node); + case IrOpcode::kChangeUint32ToFloat64: + return MarkAsDouble(node), VisitChangeUint32ToFloat64(node); + case IrOpcode::kChangeFloat64ToInt32: + return VisitChangeFloat64ToInt32(node); + case IrOpcode::kChangeFloat64ToUint32: + return VisitChangeFloat64ToUint32(node); + case IrOpcode::kFloat64Add: + return MarkAsDouble(node), VisitFloat64Add(node); + case IrOpcode::kFloat64Sub: + return MarkAsDouble(node), VisitFloat64Sub(node); + case IrOpcode::kFloat64Mul: + return MarkAsDouble(node), VisitFloat64Mul(node); + case IrOpcode::kFloat64Div: + return MarkAsDouble(node), VisitFloat64Div(node); + case IrOpcode::kFloat64Mod: + return MarkAsDouble(node), VisitFloat64Mod(node); + case IrOpcode::kFloat64Equal: + return VisitFloat64Equal(node); + case IrOpcode::kFloat64LessThan: + return VisitFloat64LessThan(node); + case IrOpcode::kFloat64LessThanOrEqual: + return VisitFloat64LessThanOrEqual(node); + default: + V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d", + node->opcode(), node->op()->mnemonic(), node->id()); + } +} + + +#if V8_TURBOFAN_BACKEND + +void InstructionSelector::VisitWord32Equal(Node* node) { + FlagsContinuation cont(kEqual, node); + Int32BinopMatcher m(node); + if (m.right().Is(0)) { + return VisitWord32Test(m.left().node(), &cont); + } + VisitWord32Compare(node, &cont); +} + + +void InstructionSelector::VisitInt32LessThan(Node* node) { + FlagsContinuation cont(kSignedLessThan, node); + VisitWord32Compare(node, &cont); +} + + +void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { + FlagsContinuation cont(kSignedLessThanOrEqual, node); + VisitWord32Compare(node, &cont); +} + + +void InstructionSelector::VisitUint32LessThan(Node* node) { + FlagsContinuation cont(kUnsignedLessThan, node); + VisitWord32Compare(node, &cont); +} + + +void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { + FlagsContinuation cont(kUnsignedLessThanOrEqual, node); + VisitWord32Compare(node, &cont); +} + + +void InstructionSelector::VisitWord64Equal(Node* node) { + FlagsContinuation cont(kEqual, node); + Int64BinopMatcher m(node); + if (m.right().Is(0)) { + return VisitWord64Test(m.left().node(), &cont); + } + VisitWord64Compare(node, &cont); +} + + +void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { + if (Node* ovf = node->FindProjection(1)) { + FlagsContinuation cont(kOverflow, ovf); + return VisitInt32AddWithOverflow(node, &cont); + } + FlagsContinuation cont; + VisitInt32AddWithOverflow(node, &cont); +} + + +void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { + if (Node* ovf = node->FindProjection(1)) { + FlagsContinuation cont(kOverflow, ovf); + return VisitInt32SubWithOverflow(node, &cont); + } + FlagsContinuation cont; + VisitInt32SubWithOverflow(node, &cont); +} + + +void InstructionSelector::VisitInt64LessThan(Node* node) { + FlagsContinuation cont(kSignedLessThan, node); + VisitWord64Compare(node, &cont); +} + + +void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { + FlagsContinuation cont(kSignedLessThanOrEqual, node); + VisitWord64Compare(node, &cont); +} + + +void InstructionSelector::VisitFloat64Equal(Node* node) { + FlagsContinuation cont(kUnorderedEqual, node); + VisitFloat64Compare(node, &cont); +} + + +void InstructionSelector::VisitFloat64LessThan(Node* node) { + FlagsContinuation cont(kUnorderedLessThan, node); + VisitFloat64Compare(node, &cont); +} + + +void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { + FlagsContinuation cont(kUnorderedLessThanOrEqual, node); + VisitFloat64Compare(node, &cont); +} + +#endif // V8_TURBOFAN_BACKEND + +// 32 bit targets do not implement the following instructions. +#if V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND + +void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitConvertInt64ToInt32(Node* node) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitConvertInt32ToInt64(Node* node) { + UNIMPLEMENTED(); +} + +#endif // V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND + + +// 32-bit targets and unsupported architectures need dummy implementations of +// selected 64-bit ops. +#if V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND + +void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitWord64Compare(Node* node, + FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + +#endif // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND + + +void InstructionSelector::VisitParameter(Node* node) { + OperandGenerator g(this); + Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation( + OpParameter<int>(node)))); +} + + +void InstructionSelector::VisitPhi(Node* node) { + // TODO(bmeurer): Emit a PhiInstruction here. + for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { + MarkAsUsed(*i); + } +} + + +void InstructionSelector::VisitProjection(Node* node) { + OperandGenerator g(this); + Node* value = node->InputAt(0); + switch (value->opcode()) { + case IrOpcode::kInt32AddWithOverflow: + case IrOpcode::kInt32SubWithOverflow: + if (OpParameter<int32_t>(node) == 0) { + Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); + } else { + DCHECK_EQ(1, OpParameter<int32_t>(node)); + MarkAsUsed(value); + } + break; + default: + break; + } +} + + +void InstructionSelector::VisitConstant(Node* node) { + // We must emit a NOP here because every live range needs a defining + // instruction in the register allocator. + OperandGenerator g(this); + Emit(kArchNop, g.DefineAsConstant(node)); +} + + +void InstructionSelector::VisitGoto(BasicBlock* target) { + if (IsNextInAssemblyOrder(target)) { + // fall through to the next block. + Emit(kArchNop, NULL)->MarkAsControl(); + } else { + // jump to the next block. + OperandGenerator g(this); + Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl(); + } +} + + +void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch, + BasicBlock* fbranch) { + OperandGenerator g(this); + Node* user = branch; + Node* value = branch->InputAt(0); + + FlagsContinuation cont(kNotEqual, tbranch, fbranch); + + // If we can fall through to the true block, invert the branch. + if (IsNextInAssemblyOrder(tbranch)) { + cont.Negate(); + cont.SwapBlocks(); + } + + // Try to combine with comparisons against 0 by simply inverting the branch. + while (CanCover(user, value)) { + if (value->opcode() == IrOpcode::kWord32Equal) { + Int32BinopMatcher m(value); + if (m.right().Is(0)) { + user = value; + value = m.left().node(); + cont.Negate(); + } else { + break; + } + } else if (value->opcode() == IrOpcode::kWord64Equal) { + Int64BinopMatcher m(value); + if (m.right().Is(0)) { + user = value; + value = m.left().node(); + cont.Negate(); + } else { + break; + } + } else { + break; + } + } + + // Try to combine the branch with a comparison. + if (CanCover(user, value)) { + switch (value->opcode()) { + case IrOpcode::kWord32Equal: + cont.OverwriteAndNegateIfEqual(kEqual); + return VisitWord32Compare(value, &cont); + case IrOpcode::kInt32LessThan: + cont.OverwriteAndNegateIfEqual(kSignedLessThan); + return VisitWord32Compare(value, &cont); + case IrOpcode::kInt32LessThanOrEqual: + cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); + return VisitWord32Compare(value, &cont); + case IrOpcode::kUint32LessThan: + cont.OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitWord32Compare(value, &cont); + case IrOpcode::kUint32LessThanOrEqual: + cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitWord32Compare(value, &cont); + case IrOpcode::kWord64Equal: + cont.OverwriteAndNegateIfEqual(kEqual); + return VisitWord64Compare(value, &cont); + case IrOpcode::kInt64LessThan: + cont.OverwriteAndNegateIfEqual(kSignedLessThan); + return VisitWord64Compare(value, &cont); + case IrOpcode::kInt64LessThanOrEqual: + cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); + return VisitWord64Compare(value, &cont); + case IrOpcode::kFloat64Equal: + cont.OverwriteAndNegateIfEqual(kUnorderedEqual); + return VisitFloat64Compare(value, &cont); + case IrOpcode::kFloat64LessThan: + cont.OverwriteAndNegateIfEqual(kUnorderedLessThan); + return VisitFloat64Compare(value, &cont); + case IrOpcode::kFloat64LessThanOrEqual: + cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual); + return VisitFloat64Compare(value, &cont); + case IrOpcode::kProjection: + // Check if this is the overflow output projection of an + // <Operation>WithOverflow node. + if (OpParameter<int32_t>(value) == 1) { + // We cannot combine the <Operation>WithOverflow with this branch + // unless the 0th projection (the use of the actual value of the + // <Operation> is either NULL, which means there's no use of the + // actual value, or was already defined, which means it is scheduled + // *AFTER* this branch). + Node* node = value->InputAt(0); + Node* result = node->FindProjection(0); + if (result == NULL || IsDefined(result)) { + switch (node->opcode()) { + case IrOpcode::kInt32AddWithOverflow: + cont.OverwriteAndNegateIfEqual(kOverflow); + return VisitInt32AddWithOverflow(node, &cont); + case IrOpcode::kInt32SubWithOverflow: + cont.OverwriteAndNegateIfEqual(kOverflow); + return VisitInt32SubWithOverflow(node, &cont); + default: + break; + } + } + } + break; + default: + break; + } + } + + // Branch could not be combined with a compare, emit compare against 0. + VisitWord32Test(value, &cont); +} + + +void InstructionSelector::VisitReturn(Node* value) { + OperandGenerator g(this); + if (value != NULL) { + Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation())); + } else { + Emit(kArchRet, NULL); + } +} + + +void InstructionSelector::VisitThrow(Node* value) { + UNIMPLEMENTED(); // TODO(titzer) +} + + +static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) { + switch (input->opcode()) { + case IrOpcode::kInt32Constant: + case IrOpcode::kNumberConstant: + case IrOpcode::kFloat64Constant: + case IrOpcode::kHeapConstant: + return g->UseImmediate(input); + default: + return g->Use(input); + } +} + + +void InstructionSelector::VisitDeoptimize(Node* deopt) { + DCHECK(deopt->op()->opcode() == IrOpcode::kDeoptimize); + Node* state = deopt->InputAt(0); + DCHECK(state->op()->opcode() == IrOpcode::kFrameState); + BailoutId ast_id = OpParameter<BailoutId>(state); + + // Add the inputs. + Node* parameters = state->InputAt(0); + int parameters_count = OpParameter<int>(parameters); + + Node* locals = state->InputAt(1); + int locals_count = OpParameter<int>(locals); + + Node* stack = state->InputAt(2); + int stack_count = OpParameter<int>(stack); + + OperandGenerator g(this); + std::vector<InstructionOperand*> inputs; + inputs.reserve(parameters_count + locals_count + stack_count); + for (int i = 0; i < parameters_count; i++) { + inputs.push_back(UseOrImmediate(&g, parameters->InputAt(i))); + } + for (int i = 0; i < locals_count; i++) { + inputs.push_back(UseOrImmediate(&g, locals->InputAt(i))); + } + for (int i = 0; i < stack_count; i++) { + inputs.push_back(UseOrImmediate(&g, stack->InputAt(i))); + } + + FrameStateDescriptor* descriptor = new (instruction_zone()) + FrameStateDescriptor(ast_id, parameters_count, locals_count, stack_count); + + DCHECK_EQ(descriptor->size(), inputs.size()); + + int deoptimization_id = sequence()->AddDeoptimizationEntry(descriptor); + Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), 0, NULL, + inputs.size(), &inputs.front(), 0, NULL); +} + + +#if !V8_TURBOFAN_BACKEND + +#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \ + void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); } +MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR) +#undef DECLARE_UNIMPLEMENTED_SELECTOR + + +void InstructionSelector::VisitInt32AddWithOverflow(Node* node, + FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitInt32SubWithOverflow(Node* node, + FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitWord32Compare(Node* node, + FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitFloat64Compare(Node* node, + FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, + BasicBlock* deoptimization) {} + +#endif // !V8_TURBOFAN_BACKEND + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/instruction-selector.h nodejs-0.11.15/deps/v8/src/compiler/instruction-selector.h --- nodejs-0.11.13/deps/v8/src/compiler/instruction-selector.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/instruction-selector.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,212 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_ +#define V8_COMPILER_INSTRUCTION_SELECTOR_H_ + +#include <deque> + +#include "src/compiler/common-operator.h" +#include "src/compiler/instruction.h" +#include "src/compiler/machine-operator.h" +#include "src/zone-containers.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Forward declarations. +struct CallBuffer; // TODO(bmeurer): Remove this. +class FlagsContinuation; + +class InstructionSelector V8_FINAL { + public: + // Forward declarations. + class Features; + + InstructionSelector(InstructionSequence* sequence, + SourcePositionTable* source_positions, + Features features = SupportedFeatures()); + + // Visit code for the entire graph with the included schedule. + void SelectInstructions(); + + // =========================================================================== + // ============= Architecture-independent code emission methods. ============= + // =========================================================================== + + Instruction* Emit(InstructionCode opcode, InstructionOperand* output, + size_t temp_count = 0, InstructionOperand* *temps = NULL); + Instruction* Emit(InstructionCode opcode, InstructionOperand* output, + InstructionOperand* a, size_t temp_count = 0, + InstructionOperand* *temps = NULL); + Instruction* Emit(InstructionCode opcode, InstructionOperand* output, + InstructionOperand* a, InstructionOperand* b, + size_t temp_count = 0, InstructionOperand* *temps = NULL); + Instruction* Emit(InstructionCode opcode, InstructionOperand* output, + InstructionOperand* a, InstructionOperand* b, + InstructionOperand* c, size_t temp_count = 0, + InstructionOperand* *temps = NULL); + Instruction* Emit(InstructionCode opcode, InstructionOperand* output, + InstructionOperand* a, InstructionOperand* b, + InstructionOperand* c, InstructionOperand* d, + size_t temp_count = 0, InstructionOperand* *temps = NULL); + Instruction* Emit(InstructionCode opcode, size_t output_count, + InstructionOperand** outputs, size_t input_count, + InstructionOperand** inputs, size_t temp_count = 0, + InstructionOperand* *temps = NULL); + Instruction* Emit(Instruction* instr); + + // =========================================================================== + // ============== Architecture-independent CPU feature methods. ============== + // =========================================================================== + + class Features V8_FINAL { + public: + Features() : bits_(0) {} + explicit Features(unsigned bits) : bits_(bits) {} + explicit Features(CpuFeature f) : bits_(1u << f) {} + Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {} + + bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); } + + private: + unsigned bits_; + }; + + bool IsSupported(CpuFeature feature) const { + return features_.Contains(feature); + } + + // Returns the features supported on the target platform. + static Features SupportedFeatures() { + return Features(CpuFeatures::SupportedFeatures()); + } + + private: + friend class OperandGenerator; + + // =========================================================================== + // ============ Architecture-independent graph covering methods. ============= + // =========================================================================== + + // Checks if {block} will appear directly after {current_block_} when + // assembling code, in which case, a fall-through can be used. + bool IsNextInAssemblyOrder(const BasicBlock* block) const; + + // Used in pattern matching during code generation. + // Check if {node} can be covered while generating code for the current + // instruction. A node can be covered if the {user} of the node has the only + // edge and the two are in the same basic block. + bool CanCover(Node* user, Node* node) const; + + // Checks if {node} was already defined, and therefore code was already + // generated for it. + bool IsDefined(Node* node) const; + + // Inform the instruction selection that {node} was just defined. + void MarkAsDefined(Node* node); + + // Checks if {node} has any uses, and therefore code has to be generated for + // it. + bool IsUsed(Node* node) const; + + // Inform the instruction selection that {node} has at least one use and we + // will need to generate code for it. + void MarkAsUsed(Node* node); + + // Checks if {node} is marked as double. + bool IsDouble(const Node* node) const; + + // Inform the register allocator of a double result. + void MarkAsDouble(Node* node); + + // Checks if {node} is marked as reference. + bool IsReference(const Node* node) const; + + // Inform the register allocator of a reference result. + void MarkAsReference(Node* node); + + // Inform the register allocation of the representation of the value produced + // by {node}. + void MarkAsRepresentation(MachineType rep, Node* node); + + // Initialize the call buffer with the InstructionOperands, nodes, etc, + // corresponding + // to the inputs and outputs of the call. + // {call_code_immediate} to generate immediate operands to calls of code. + // {call_address_immediate} to generate immediate operands to address calls. + void InitializeCallBuffer(Node* call, CallBuffer* buffer, + bool call_code_immediate, + bool call_address_immediate, BasicBlock* cont_node, + BasicBlock* deopt_node); + + // =========================================================================== + // ============= Architecture-specific graph covering methods. =============== + // =========================================================================== + + // Visit nodes in the given block and generate code. + void VisitBlock(BasicBlock* block); + + // Visit the node for the control flow at the end of the block, generating + // code if necessary. + void VisitControl(BasicBlock* block); + + // Visit the node and generate code, if any. + void VisitNode(Node* node); + +#define DECLARE_GENERATOR(x) void Visit##x(Node* node); + MACHINE_OP_LIST(DECLARE_GENERATOR) +#undef DECLARE_GENERATOR + + void VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont); + void VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont); + + void VisitWord32Test(Node* node, FlagsContinuation* cont); + void VisitWord64Test(Node* node, FlagsContinuation* cont); + void VisitWord32Compare(Node* node, FlagsContinuation* cont); + void VisitWord64Compare(Node* node, FlagsContinuation* cont); + void VisitFloat64Compare(Node* node, FlagsContinuation* cont); + + void VisitParameter(Node* node); + void VisitPhi(Node* node); + void VisitProjection(Node* node); + void VisitConstant(Node* node); + void VisitCall(Node* call, BasicBlock* continuation, + BasicBlock* deoptimization); + void VisitGoto(BasicBlock* target); + void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch); + void VisitReturn(Node* value); + void VisitThrow(Node* value); + void VisitDeoptimize(Node* deopt); + + // =========================================================================== + + Graph* graph() const { return sequence()->graph(); } + Linkage* linkage() const { return sequence()->linkage(); } + Schedule* schedule() const { return sequence()->schedule(); } + InstructionSequence* sequence() const { return sequence_; } + Zone* instruction_zone() const { return sequence()->zone(); } + Zone* zone() { return &zone_; } + + // =========================================================================== + + typedef zone_allocator<Instruction*> InstructionPtrZoneAllocator; + typedef std::deque<Instruction*, InstructionPtrZoneAllocator> Instructions; + + Zone zone_; + InstructionSequence* sequence_; + SourcePositionTable* source_positions_; + Features features_; + BasicBlock* current_block_; + Instructions instructions_; + BoolVector defined_; + BoolVector used_; +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_INSTRUCTION_SELECTOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/instruction-selector-impl.h nodejs-0.11.15/deps/v8/src/compiler/instruction-selector-impl.h --- nodejs-0.11.13/deps/v8/src/compiler/instruction-selector-impl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/instruction-selector-impl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,371 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_ +#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_ + +#include "src/compiler/instruction.h" +#include "src/compiler/instruction-selector.h" +#include "src/compiler/linkage.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// A helper class for the instruction selector that simplifies construction of +// Operands. This class implements a base for architecture-specific helpers. +class OperandGenerator { + public: + explicit OperandGenerator(InstructionSelector* selector) + : selector_(selector) {} + + InstructionOperand* DefineAsRegister(Node* node) { + return Define(node, new (zone()) + UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)); + } + + InstructionOperand* DefineAsDoubleRegister(Node* node) { + return Define(node, new (zone()) + UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)); + } + + InstructionOperand* DefineSameAsFirst(Node* result) { + return Define(result, new (zone()) + UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT)); + } + + InstructionOperand* DefineAsFixed(Node* node, Register reg) { + return Define(node, new (zone()) + UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, + Register::ToAllocationIndex(reg))); + } + + InstructionOperand* DefineAsFixedDouble(Node* node, DoubleRegister reg) { + return Define(node, new (zone()) + UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER, + DoubleRegister::ToAllocationIndex(reg))); + } + + InstructionOperand* DefineAsConstant(Node* node) { + selector()->MarkAsDefined(node); + sequence()->AddConstant(node->id(), ToConstant(node)); + return ConstantOperand::Create(node->id(), zone()); + } + + InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location) { + return Define(node, ToUnallocatedOperand(location)); + } + + InstructionOperand* Use(Node* node) { + return Use(node, + new (zone()) UnallocatedOperand( + UnallocatedOperand::ANY, UnallocatedOperand::USED_AT_START)); + } + + InstructionOperand* UseRegister(Node* node) { + return Use(node, new (zone()) + UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, + UnallocatedOperand::USED_AT_START)); + } + + InstructionOperand* UseDoubleRegister(Node* node) { + return Use(node, new (zone()) + UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, + UnallocatedOperand::USED_AT_START)); + } + + // Use register or operand for the node. If a register is chosen, it won't + // alias any temporary or output registers. + InstructionOperand* UseUnique(Node* node) { + return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::ANY)); + } + + // Use a unique register for the node that does not alias any temporary or + // output registers. + InstructionOperand* UseUniqueRegister(Node* node) { + return Use(node, new (zone()) + UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)); + } + + // Use a unique double register for the node that does not alias any temporary + // or output double registers. + InstructionOperand* UseUniqueDoubleRegister(Node* node) { + return Use(node, new (zone()) + UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)); + } + + InstructionOperand* UseFixed(Node* node, Register reg) { + return Use(node, new (zone()) + UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, + Register::ToAllocationIndex(reg))); + } + + InstructionOperand* UseFixedDouble(Node* node, DoubleRegister reg) { + return Use(node, new (zone()) + UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER, + DoubleRegister::ToAllocationIndex(reg))); + } + + InstructionOperand* UseImmediate(Node* node) { + int index = sequence()->AddImmediate(ToConstant(node)); + return ImmediateOperand::Create(index, zone()); + } + + InstructionOperand* UseLocation(Node* node, LinkageLocation location) { + return Use(node, ToUnallocatedOperand(location)); + } + + InstructionOperand* TempRegister() { + UnallocatedOperand* op = + new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, + UnallocatedOperand::USED_AT_START); + op->set_virtual_register(sequence()->NextVirtualRegister()); + return op; + } + + InstructionOperand* TempDoubleRegister() { + UnallocatedOperand* op = + new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, + UnallocatedOperand::USED_AT_START); + op->set_virtual_register(sequence()->NextVirtualRegister()); + sequence()->MarkAsDouble(op->virtual_register()); + return op; + } + + InstructionOperand* TempRegister(Register reg) { + return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, + Register::ToAllocationIndex(reg)); + } + + InstructionOperand* TempImmediate(int32_t imm) { + int index = sequence()->AddImmediate(Constant(imm)); + return ImmediateOperand::Create(index, zone()); + } + + InstructionOperand* Label(BasicBlock* block) { + // TODO(bmeurer): We misuse ImmediateOperand here. + return TempImmediate(block->id()); + } + + protected: + Graph* graph() const { return selector()->graph(); } + InstructionSelector* selector() const { return selector_; } + InstructionSequence* sequence() const { return selector()->sequence(); } + Isolate* isolate() const { return zone()->isolate(); } + Zone* zone() const { return selector()->instruction_zone(); } + + private: + static Constant ToConstant(const Node* node) { + switch (node->opcode()) { + case IrOpcode::kInt32Constant: + return Constant(ValueOf<int32_t>(node->op())); + case IrOpcode::kInt64Constant: + return Constant(ValueOf<int64_t>(node->op())); + case IrOpcode::kNumberConstant: + case IrOpcode::kFloat64Constant: + return Constant(ValueOf<double>(node->op())); + case IrOpcode::kExternalConstant: + return Constant(ValueOf<ExternalReference>(node->op())); + case IrOpcode::kHeapConstant: + return Constant(ValueOf<Handle<HeapObject> >(node->op())); + default: + break; + } + UNREACHABLE(); + return Constant(static_cast<int32_t>(0)); + } + + UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) { + DCHECK_NOT_NULL(node); + DCHECK_NOT_NULL(operand); + operand->set_virtual_register(node->id()); + selector()->MarkAsDefined(node); + return operand; + } + + UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) { + DCHECK_NOT_NULL(node); + DCHECK_NOT_NULL(operand); + operand->set_virtual_register(node->id()); + selector()->MarkAsUsed(node); + return operand; + } + + UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location) { + if (location.location_ == LinkageLocation::ANY_REGISTER) { + return new (zone()) + UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER); + } + if (location.location_ < 0) { + return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_SLOT, + location.location_); + } + if (location.rep_ == kMachineFloat64) { + return new (zone()) UnallocatedOperand( + UnallocatedOperand::FIXED_DOUBLE_REGISTER, location.location_); + } + return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, + location.location_); + } + + InstructionSelector* selector_; +}; + + +// The flags continuation is a way to combine a branch or a materialization +// of a boolean value with an instruction that sets the flags register. +// The whole instruction is treated as a unit by the register allocator, and +// thus no spills or moves can be introduced between the flags-setting +// instruction and the branch or set it should be combined with. +class FlagsContinuation V8_FINAL { + public: + FlagsContinuation() : mode_(kFlags_none) {} + + // Creates a new flags continuation from the given condition and true/false + // blocks. + FlagsContinuation(FlagsCondition condition, BasicBlock* true_block, + BasicBlock* false_block) + : mode_(kFlags_branch), + condition_(condition), + true_block_(true_block), + false_block_(false_block) { + DCHECK_NOT_NULL(true_block); + DCHECK_NOT_NULL(false_block); + } + + // Creates a new flags continuation from the given condition and result node. + FlagsContinuation(FlagsCondition condition, Node* result) + : mode_(kFlags_set), condition_(condition), result_(result) { + DCHECK_NOT_NULL(result); + } + + bool IsNone() const { return mode_ == kFlags_none; } + bool IsBranch() const { return mode_ == kFlags_branch; } + bool IsSet() const { return mode_ == kFlags_set; } + FlagsCondition condition() const { + DCHECK(!IsNone()); + return condition_; + } + Node* result() const { + DCHECK(IsSet()); + return result_; + } + BasicBlock* true_block() const { + DCHECK(IsBranch()); + return true_block_; + } + BasicBlock* false_block() const { + DCHECK(IsBranch()); + return false_block_; + } + + void Negate() { + DCHECK(!IsNone()); + condition_ = static_cast<FlagsCondition>(condition_ ^ 1); + } + + void Commute() { + DCHECK(!IsNone()); + switch (condition_) { + case kEqual: + case kNotEqual: + case kOverflow: + case kNotOverflow: + return; + case kSignedLessThan: + condition_ = kSignedGreaterThan; + return; + case kSignedGreaterThanOrEqual: + condition_ = kSignedLessThanOrEqual; + return; + case kSignedLessThanOrEqual: + condition_ = kSignedGreaterThanOrEqual; + return; + case kSignedGreaterThan: + condition_ = kSignedLessThan; + return; + case kUnsignedLessThan: + condition_ = kUnsignedGreaterThan; + return; + case kUnsignedGreaterThanOrEqual: + condition_ = kUnsignedLessThanOrEqual; + return; + case kUnsignedLessThanOrEqual: + condition_ = kUnsignedGreaterThanOrEqual; + return; + case kUnsignedGreaterThan: + condition_ = kUnsignedLessThan; + return; + case kUnorderedEqual: + case kUnorderedNotEqual: + return; + case kUnorderedLessThan: + condition_ = kUnorderedGreaterThan; + return; + case kUnorderedGreaterThanOrEqual: + condition_ = kUnorderedLessThanOrEqual; + return; + case kUnorderedLessThanOrEqual: + condition_ = kUnorderedGreaterThanOrEqual; + return; + case kUnorderedGreaterThan: + condition_ = kUnorderedLessThan; + return; + } + UNREACHABLE(); + } + + void OverwriteAndNegateIfEqual(FlagsCondition condition) { + bool negate = condition_ == kEqual; + condition_ = condition; + if (negate) Negate(); + } + + void SwapBlocks() { std::swap(true_block_, false_block_); } + + // Encodes this flags continuation into the given opcode. + InstructionCode Encode(InstructionCode opcode) { + opcode |= FlagsModeField::encode(mode_); + if (mode_ != kFlags_none) { + opcode |= FlagsConditionField::encode(condition_); + } + return opcode; + } + + private: + FlagsMode mode_; + FlagsCondition condition_; + Node* result_; // Only valid if mode_ == kFlags_set. + BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch. + BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch. +}; + + +// An internal helper class for generating the operands to calls. +// TODO(bmeurer): Get rid of the CallBuffer business and make +// InstructionSelector::VisitCall platform independent instead. +struct CallBuffer { + CallBuffer(Zone* zone, CallDescriptor* descriptor); + + int output_count; + CallDescriptor* descriptor; + Node** output_nodes; + InstructionOperand** outputs; + InstructionOperand** fixed_and_control_args; + int fixed_count; + Node** pushed_nodes; + int pushed_count; + + int input_count() { return descriptor->InputCount(); } + + int control_count() { return descriptor->CanLazilyDeoptimize() ? 2 : 0; } + + int fixed_and_control_count() { return fixed_count + control_count(); } +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/js-context-specialization.cc nodejs-0.11.15/deps/v8/src/compiler/js-context-specialization.cc --- nodejs-0.11.13/deps/v8/src/compiler/js-context-specialization.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/js-context-specialization.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,157 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/common-operator.h" +#include "src/compiler/generic-node-inl.h" +#include "src/compiler/graph-inl.h" +#include "src/compiler/js-context-specialization.h" +#include "src/compiler/js-operator.h" +#include "src/compiler/node-aux-data-inl.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// TODO(titzer): factor this out to a common routine with js-typed-lowering. +static void ReplaceEffectfulWithValue(Node* node, Node* value) { + Node* effect = NULL; + if (OperatorProperties::HasEffectInput(node->op())) { + effect = NodeProperties::GetEffectInput(node); + } + + // Requires distinguishing between value and effect edges. + UseIter iter = node->uses().begin(); + while (iter != node->uses().end()) { + if (NodeProperties::IsEffectEdge(iter.edge())) { + DCHECK_NE(NULL, effect); + iter = iter.UpdateToAndIncrement(effect); + } else { + iter = iter.UpdateToAndIncrement(value); + } + } +} + + +class ContextSpecializationVisitor : public NullNodeVisitor { + public: + explicit ContextSpecializationVisitor(JSContextSpecializer* spec) + : spec_(spec) {} + + GenericGraphVisit::Control Post(Node* node) { + switch (node->opcode()) { + case IrOpcode::kJSLoadContext: { + Reduction r = spec_->ReduceJSLoadContext(node); + if (r.Changed() && r.replacement() != node) { + ReplaceEffectfulWithValue(node, r.replacement()); + } + break; + } + case IrOpcode::kJSStoreContext: { + Reduction r = spec_->ReduceJSStoreContext(node); + if (r.Changed() && r.replacement() != node) { + ReplaceEffectfulWithValue(node, r.replacement()); + } + break; + } + default: + break; + } + return GenericGraphVisit::CONTINUE; + } + + private: + JSContextSpecializer* spec_; +}; + + +void JSContextSpecializer::SpecializeToContext() { + ReplaceEffectfulWithValue(context_, jsgraph_->Constant(info_->context())); + + ContextSpecializationVisitor visitor(this); + jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor); +} + + +Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) { + DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode()); + + ValueMatcher<Handle<Context> > match(NodeProperties::GetValueInput(node, 0)); + // If the context is not constant, no reduction can occur. + if (!match.HasValue()) { + return Reducer::NoChange(); + } + + ContextAccess access = OpParameter<ContextAccess>(node); + + // Find the right parent context. + Context* context = *match.Value(); + for (int i = access.depth(); i > 0; --i) { + context = context->previous(); + } + + // If the access itself is mutable, only fold-in the parent. + if (!access.immutable()) { + // The access does not have to look up a parent, nothing to fold. + if (access.depth() == 0) { + return Reducer::NoChange(); + } + Operator* op = jsgraph_->javascript()->LoadContext(0, access.index(), + access.immutable()); + node->set_op(op); + Handle<Object> context_handle = Handle<Object>(context, info_->isolate()); + node->ReplaceInput(0, jsgraph_->Constant(context_handle)); + return Reducer::Changed(node); + } + Handle<Object> value = + Handle<Object>(context->get(access.index()), info_->isolate()); + + // Even though the context slot is immutable, the context might have escaped + // before the function to which it belongs has initialized the slot. + // We must be conservative and check if the value in the slot is currently the + // hole or undefined. If it is neither of these, then it must be initialized. + if (value->IsUndefined() || value->IsTheHole()) { + return Reducer::NoChange(); + } + + // Success. The context load can be replaced with the constant. + // TODO(titzer): record the specialization for sharing code across multiple + // contexts that have the same value in the corresponding context slot. + return Reducer::Replace(jsgraph_->Constant(value)); +} + + +Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) { + DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode()); + + ValueMatcher<Handle<Context> > match(NodeProperties::GetValueInput(node, 0)); + // If the context is not constant, no reduction can occur. + if (!match.HasValue()) { + return Reducer::NoChange(); + } + + ContextAccess access = OpParameter<ContextAccess>(node); + + // The access does not have to look up a parent, nothing to fold. + if (access.depth() == 0) { + return Reducer::NoChange(); + } + + // Find the right parent context. + Context* context = *match.Value(); + for (int i = access.depth(); i > 0; --i) { + context = context->previous(); + } + + Operator* op = jsgraph_->javascript()->StoreContext(0, access.index()); + node->set_op(op); + Handle<Object> new_context_handle = Handle<Object>(context, info_->isolate()); + node->ReplaceInput(0, jsgraph_->Constant(new_context_handle)); + + return Reducer::Changed(node); +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/js-context-specialization.h nodejs-0.11.15/deps/v8/src/compiler/js-context-specialization.h --- nodejs-0.11.13/deps/v8/src/compiler/js-context-specialization.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/js-context-specialization.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,37 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_ +#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_ + +#include "src/compiler/graph-reducer.h" +#include "src/compiler/js-graph.h" +#include "src/contexts.h" +#include "src/v8.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Specializes a given JSGraph to a given context, potentially constant folding +// some {LoadContext} nodes or strength reducing some {StoreContext} nodes. +class JSContextSpecializer { + public: + JSContextSpecializer(CompilationInfo* info, JSGraph* jsgraph, Node* context) + : info_(info), jsgraph_(jsgraph), context_(context) {} + + void SpecializeToContext(); + Reduction ReduceJSLoadContext(Node* node); + Reduction ReduceJSStoreContext(Node* node); + + private: + CompilationInfo* info_; + JSGraph* jsgraph_; + Node* context_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/js-generic-lowering.cc nodejs-0.11.15/deps/v8/src/compiler/js-generic-lowering.cc --- nodejs-0.11.13/deps/v8/src/compiler/js-generic-lowering.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/js-generic-lowering.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,550 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/code-stubs.h" +#include "src/compiler/common-operator.h" +#include "src/compiler/graph-inl.h" +#include "src/compiler/js-generic-lowering.h" +#include "src/compiler/machine-operator.h" +#include "src/compiler/node-aux-data-inl.h" +#include "src/compiler/node-properties-inl.h" +#include "src/unique.h" + +namespace v8 { +namespace internal { +namespace compiler { + + +// TODO(mstarzinger): This is a temporary workaround for non-hydrogen stubs for +// which we don't have an interface descriptor yet. Use ReplaceWithICStubCall +// once these stub have been made into a HydrogenCodeStub. +template <typename T> +static CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate, + T* stub) { + CodeStub::Major key = static_cast<CodeStub*>(stub)->MajorKey(); + CodeStubInterfaceDescriptor* d = isolate->code_stub_interface_descriptor(key); + stub->InitializeInterfaceDescriptor(d); + return d; +} + + +// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub +// which doesn't have an interface descriptor yet. It mimics a hydrogen code +// stub for the underlying IC stub code. +class LoadICStubShim : public HydrogenCodeStub { + public: + LoadICStubShim(Isolate* isolate, ContextualMode contextual_mode) + : HydrogenCodeStub(isolate), contextual_mode_(contextual_mode) { + i::compiler::GetInterfaceDescriptor(isolate, this); + } + + virtual Handle<Code> GenerateCode() V8_OVERRIDE { + ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode_); + return LoadIC::initialize_stub(isolate(), extra_state); + } + + virtual void InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE { + Register registers[] = { InterfaceDescriptor::ContextRegister(), + LoadIC::ReceiverRegister(), + LoadIC::NameRegister() }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); + } + + private: + virtual Major MajorKey() const V8_OVERRIDE { return NoCache; } + virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; } + virtual bool UseSpecialCache() V8_OVERRIDE { return true; } + + ContextualMode contextual_mode_; +}; + + +// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub +// which doesn't have an interface descriptor yet. It mimics a hydrogen code +// stub for the underlying IC stub code. +class KeyedLoadICStubShim : public HydrogenCodeStub { + public: + explicit KeyedLoadICStubShim(Isolate* isolate) : HydrogenCodeStub(isolate) { + i::compiler::GetInterfaceDescriptor(isolate, this); + } + + virtual Handle<Code> GenerateCode() V8_OVERRIDE { + return isolate()->builtins()->KeyedLoadIC_Initialize(); + } + + virtual void InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE { + Register registers[] = { InterfaceDescriptor::ContextRegister(), + KeyedLoadIC::ReceiverRegister(), + KeyedLoadIC::NameRegister() }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); + } + + private: + virtual Major MajorKey() const V8_OVERRIDE { return NoCache; } + virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; } + virtual bool UseSpecialCache() V8_OVERRIDE { return true; } +}; + + +// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub +// which doesn't have an interface descriptor yet. It mimics a hydrogen code +// stub for the underlying IC stub code. +class StoreICStubShim : public HydrogenCodeStub { + public: + StoreICStubShim(Isolate* isolate, StrictMode strict_mode) + : HydrogenCodeStub(isolate), strict_mode_(strict_mode) { + i::compiler::GetInterfaceDescriptor(isolate, this); + } + + virtual Handle<Code> GenerateCode() V8_OVERRIDE { + return StoreIC::initialize_stub(isolate(), strict_mode_); + } + + virtual void InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE { + Register registers[] = { InterfaceDescriptor::ContextRegister(), + StoreIC::ReceiverRegister(), + StoreIC::NameRegister(), + StoreIC::ValueRegister() }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); + } + + private: + virtual Major MajorKey() const V8_OVERRIDE { return NoCache; } + virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; } + virtual bool UseSpecialCache() V8_OVERRIDE { return true; } + + StrictMode strict_mode_; +}; + + +// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub +// which doesn't have an interface descriptor yet. It mimics a hydrogen code +// stub for the underlying IC stub code. +class KeyedStoreICStubShim : public HydrogenCodeStub { + public: + KeyedStoreICStubShim(Isolate* isolate, StrictMode strict_mode) + : HydrogenCodeStub(isolate), strict_mode_(strict_mode) { + i::compiler::GetInterfaceDescriptor(isolate, this); + } + + virtual Handle<Code> GenerateCode() V8_OVERRIDE { + return strict_mode_ == SLOPPY + ? isolate()->builtins()->KeyedStoreIC_Initialize() + : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); + } + + virtual void InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE { + Register registers[] = { InterfaceDescriptor::ContextRegister(), + KeyedStoreIC::ReceiverRegister(), + KeyedStoreIC::NameRegister(), + KeyedStoreIC::ValueRegister() }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); + } + + private: + virtual Major MajorKey() const V8_OVERRIDE { return NoCache; } + virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; } + virtual bool UseSpecialCache() V8_OVERRIDE { return true; } + + StrictMode strict_mode_; +}; + + +JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph, + MachineOperatorBuilder* machine, + SourcePositionTable* source_positions) + : LoweringBuilder(jsgraph->graph(), source_positions), + info_(info), + jsgraph_(jsgraph), + linkage_(new (jsgraph->zone()) Linkage(info)), + machine_(machine) {} + + +void JSGenericLowering::PatchOperator(Node* node, Operator* op) { + node->set_op(op); +} + + +void JSGenericLowering::PatchInsertInput(Node* node, int index, Node* input) { + node->InsertInput(zone(), index, input); +} + + +Node* JSGenericLowering::SmiConstant(int32_t immediate) { + return jsgraph()->SmiConstant(immediate); +} + + +Node* JSGenericLowering::Int32Constant(int immediate) { + return jsgraph()->Int32Constant(immediate); +} + + +Node* JSGenericLowering::CodeConstant(Handle<Code> code) { + return jsgraph()->HeapConstant(code); +} + + +Node* JSGenericLowering::FunctionConstant(Handle<JSFunction> function) { + return jsgraph()->HeapConstant(function); +} + + +Node* JSGenericLowering::ExternalConstant(ExternalReference ref) { + return jsgraph()->ExternalConstant(ref); +} + + +void JSGenericLowering::Lower(Node* node) { + Node* replacement = NULL; + // Dispatch according to the opcode. + switch (node->opcode()) { +#define DECLARE_CASE(x) \ + case IrOpcode::k##x: \ + replacement = Lower##x(node); \ + break; + DECLARE_CASE(Branch) + JS_OP_LIST(DECLARE_CASE) +#undef DECLARE_CASE + default: + // Nothing to see. + return; + } + + // Nothing to do if lowering was done by patching the existing node. + if (replacement == node) return; + + // Iterate through uses of the original node and replace uses accordingly. + UNIMPLEMENTED(); +} + + +#define REPLACE_IC_STUB_CALL(op, StubDeclaration) \ + Node* JSGenericLowering::Lower##op(Node* node) { \ + StubDeclaration; \ + ReplaceWithICStubCall(node, &stub); \ + return node; \ + } +REPLACE_IC_STUB_CALL(JSBitwiseOr, BinaryOpICStub stub(isolate(), Token::BIT_OR)) +REPLACE_IC_STUB_CALL(JSBitwiseXor, + BinaryOpICStub stub(isolate(), Token::BIT_XOR)) +REPLACE_IC_STUB_CALL(JSBitwiseAnd, + BinaryOpICStub stub(isolate(), Token::BIT_AND)) +REPLACE_IC_STUB_CALL(JSShiftLeft, BinaryOpICStub stub(isolate(), Token::SHL)) +REPLACE_IC_STUB_CALL(JSShiftRight, BinaryOpICStub stub(isolate(), Token::SAR)) +REPLACE_IC_STUB_CALL(JSShiftRightLogical, + BinaryOpICStub stub(isolate(), Token::SHR)) +REPLACE_IC_STUB_CALL(JSAdd, BinaryOpICStub stub(isolate(), Token::ADD)) +REPLACE_IC_STUB_CALL(JSSubtract, BinaryOpICStub stub(isolate(), Token::SUB)) +REPLACE_IC_STUB_CALL(JSMultiply, BinaryOpICStub stub(isolate(), Token::MUL)) +REPLACE_IC_STUB_CALL(JSDivide, BinaryOpICStub stub(isolate(), Token::DIV)) +REPLACE_IC_STUB_CALL(JSModulus, BinaryOpICStub stub(isolate(), Token::MOD)) +REPLACE_IC_STUB_CALL(JSToNumber, ToNumberStub stub(isolate())) +#undef REPLACE_IC_STUB_CALL + + +#define REPLACE_COMPARE_IC_CALL(op, token, pure) \ + Node* JSGenericLowering::Lower##op(Node* node) { \ + ReplaceWithCompareIC(node, token, pure); \ + return node; \ + } +REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ, false) +REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE, false) +REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT, true) +REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT, true) +REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT, false) +REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT, false) +REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE, false) +REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE, false) +#undef REPLACE_COMPARE_IC_CALL + + +#define REPLACE_RUNTIME_CALL(op, fun) \ + Node* JSGenericLowering::Lower##op(Node* node) { \ + ReplaceWithRuntimeCall(node, fun); \ + return node; \ + } +REPLACE_RUNTIME_CALL(JSTypeOf, Runtime::kTypeof) +REPLACE_RUNTIME_CALL(JSCreate, Runtime::kAbort) +REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext) +REPLACE_RUNTIME_CALL(JSCreateCatchContext, Runtime::kPushCatchContext) +REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext) +REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext) +REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext) +REPLACE_RUNTIME_CALL(JSCreateGlobalContext, Runtime::kAbort) +#undef REPLACE_RUNTIME + + +#define REPLACE_UNIMPLEMENTED(op) \ + Node* JSGenericLowering::Lower##op(Node* node) { \ + UNIMPLEMENTED(); \ + return node; \ + } +REPLACE_UNIMPLEMENTED(JSToString) +REPLACE_UNIMPLEMENTED(JSToName) +REPLACE_UNIMPLEMENTED(JSYield) +REPLACE_UNIMPLEMENTED(JSDebugger) +#undef REPLACE_UNIMPLEMENTED + + +static CallDescriptor::DeoptimizationSupport DeoptimizationSupportForNode( + Node* node) { + return OperatorProperties::CanLazilyDeoptimize(node->op()) + ? CallDescriptor::kCanDeoptimize + : CallDescriptor::kCannotDeoptimize; +} + + +void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token, + bool pure) { + BinaryOpICStub stub(isolate(), Token::ADD); // TODO(mstarzinger): Hack. + CodeStubInterfaceDescriptor* d = stub.GetInterfaceDescriptor(); + CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(d); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), token); + Node* compare; + if (pure) { + // A pure (strict) comparison doesn't have an effect or control. + // But for the graph, we need to add these inputs. + compare = graph()->NewNode(common()->Call(desc_compare), CodeConstant(ic), + NodeProperties::GetValueInput(node, 0), + NodeProperties::GetValueInput(node, 1), + NodeProperties::GetContextInput(node), + graph()->start(), graph()->start()); + } else { + compare = graph()->NewNode(common()->Call(desc_compare), CodeConstant(ic), + NodeProperties::GetValueInput(node, 0), + NodeProperties::GetValueInput(node, 1), + NodeProperties::GetContextInput(node), + NodeProperties::GetEffectInput(node), + NodeProperties::GetControlInput(node)); + } + node->ReplaceInput(0, compare); + node->ReplaceInput(1, SmiConstant(token)); + ReplaceWithRuntimeCall(node, Runtime::kBooleanize); +} + + +void JSGenericLowering::ReplaceWithICStubCall(Node* node, + HydrogenCodeStub* stub) { + CodeStubInterfaceDescriptor* d = stub->GetInterfaceDescriptor(); + CallDescriptor* desc = linkage()->GetStubCallDescriptor( + d, 0, DeoptimizationSupportForNode(node)); + Node* stub_code = CodeConstant(stub->GetCode()); + PatchInsertInput(node, 0, stub_code); + PatchOperator(node, common()->Call(desc)); +} + + +void JSGenericLowering::ReplaceWithBuiltinCall(Node* node, + Builtins::JavaScript id, + int nargs) { + CallFunctionStub stub(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS); + CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub); + CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, nargs); + // TODO(mstarzinger): Accessing the builtins object this way prevents sharing + // of code across native contexts. Fix this by loading from given context. + Handle<JSFunction> function( + JSFunction::cast(info()->context()->builtins()->javascript_builtin(id))); + Node* stub_code = CodeConstant(stub.GetCode()); + Node* function_node = FunctionConstant(function); + PatchInsertInput(node, 0, stub_code); + PatchInsertInput(node, 1, function_node); + PatchOperator(node, common()->Call(desc)); +} + + +void JSGenericLowering::ReplaceWithRuntimeCall(Node* node, + Runtime::FunctionId f, + int nargs_override) { + Operator::Property props = node->op()->properties(); + const Runtime::Function* fun = Runtime::FunctionForId(f); + int nargs = (nargs_override < 0) ? fun->nargs : nargs_override; + CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor( + f, nargs, props, DeoptimizationSupportForNode(node)); + Node* ref = ExternalConstant(ExternalReference(f, isolate())); + Node* arity = Int32Constant(nargs); + if (!centrystub_constant_.is_set()) { + centrystub_constant_.set(CodeConstant(CEntryStub(isolate(), 1).GetCode())); + } + PatchInsertInput(node, 0, centrystub_constant_.get()); + PatchInsertInput(node, nargs + 1, ref); + PatchInsertInput(node, nargs + 2, arity); + PatchOperator(node, common()->Call(desc)); +} + + +Node* JSGenericLowering::LowerBranch(Node* node) { + Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0), + jsgraph()->TrueConstant()); + node->ReplaceInput(0, test); + return node; +} + + +Node* JSGenericLowering::LowerJSUnaryNot(Node* node) { + ToBooleanStub stub(isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL); + ReplaceWithICStubCall(node, &stub); + return node; +} + + +Node* JSGenericLowering::LowerJSToBoolean(Node* node) { + ToBooleanStub stub(isolate(), ToBooleanStub::RESULT_AS_ODDBALL); + ReplaceWithICStubCall(node, &stub); + return node; +} + + +Node* JSGenericLowering::LowerJSToObject(Node* node) { + ReplaceWithBuiltinCall(node, Builtins::TO_OBJECT, 1); + return node; +} + + +Node* JSGenericLowering::LowerJSLoadProperty(Node* node) { + KeyedLoadICStubShim stub(isolate()); + ReplaceWithICStubCall(node, &stub); + return node; +} + + +Node* JSGenericLowering::LowerJSLoadNamed(Node* node) { + LoadNamedParameters p = OpParameter<LoadNamedParameters>(node); + LoadICStubShim stub(isolate(), p.contextual_mode); + PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name)); + ReplaceWithICStubCall(node, &stub); + return node; +} + + +Node* JSGenericLowering::LowerJSStoreProperty(Node* node) { + // TODO(mstarzinger): The strict_mode needs to be carried along in the + // operator so that graphs are fully compositional for inlining. + StrictMode strict_mode = info()->strict_mode(); + KeyedStoreICStubShim stub(isolate(), strict_mode); + ReplaceWithICStubCall(node, &stub); + return node; +} + + +Node* JSGenericLowering::LowerJSStoreNamed(Node* node) { + PrintableUnique<Name> key = OpParameter<PrintableUnique<Name> >(node); + // TODO(mstarzinger): The strict_mode needs to be carried along in the + // operator so that graphs are fully compositional for inlining. + StrictMode strict_mode = info()->strict_mode(); + StoreICStubShim stub(isolate(), strict_mode); + PatchInsertInput(node, 1, jsgraph()->HeapConstant(key)); + ReplaceWithICStubCall(node, &stub); + return node; +} + + +Node* JSGenericLowering::LowerJSDeleteProperty(Node* node) { + StrictMode strict_mode = OpParameter<StrictMode>(node); + PatchInsertInput(node, 2, SmiConstant(strict_mode)); + ReplaceWithBuiltinCall(node, Builtins::DELETE, 3); + return node; +} + + +Node* JSGenericLowering::LowerJSHasProperty(Node* node) { + ReplaceWithBuiltinCall(node, Builtins::IN, 2); + return node; +} + + +Node* JSGenericLowering::LowerJSInstanceOf(Node* node) { + InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>( + InstanceofStub::kReturnTrueFalseObject | + InstanceofStub::kArgsInRegisters); + InstanceofStub stub(isolate(), flags); + CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub); + CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, 0); + Node* stub_code = CodeConstant(stub.GetCode()); + PatchInsertInput(node, 0, stub_code); + PatchOperator(node, common()->Call(desc)); + return node; +} + + +Node* JSGenericLowering::LowerJSLoadContext(Node* node) { + ContextAccess access = OpParameter<ContextAccess>(node); + // TODO(mstarzinger): Use simplified operators instead of machine operators + // here so that load/store optimization can be applied afterwards. + for (int i = 0; i < access.depth(); ++i) { + node->ReplaceInput( + 0, graph()->NewNode( + machine()->Load(kMachineTagged), + NodeProperties::GetValueInput(node, 0), + Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)), + NodeProperties::GetEffectInput(node))); + } + node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index()))); + PatchOperator(node, machine()->Load(kMachineTagged)); + return node; +} + + +Node* JSGenericLowering::LowerJSStoreContext(Node* node) { + ContextAccess access = OpParameter<ContextAccess>(node); + // TODO(mstarzinger): Use simplified operators instead of machine operators + // here so that load/store optimization can be applied afterwards. + for (int i = 0; i < access.depth(); ++i) { + node->ReplaceInput( + 0, graph()->NewNode( + machine()->Load(kMachineTagged), + NodeProperties::GetValueInput(node, 0), + Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)), + NodeProperties::GetEffectInput(node))); + } + node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1)); + node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index()))); + PatchOperator(node, machine()->Store(kMachineTagged, kFullWriteBarrier)); + return node; +} + + +Node* JSGenericLowering::LowerJSCallConstruct(Node* node) { + int arity = OpParameter<int>(node); + CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub); + CallDescriptor* desc = linkage()->GetStubCallDescriptor( + d, arity, DeoptimizationSupportForNode(node)); + Node* stub_code = CodeConstant(stub.GetCode()); + Node* construct = NodeProperties::GetValueInput(node, 0); + PatchInsertInput(node, 0, stub_code); + PatchInsertInput(node, 1, Int32Constant(arity - 1)); + PatchInsertInput(node, 2, construct); + PatchInsertInput(node, 3, jsgraph()->UndefinedConstant()); + PatchOperator(node, common()->Call(desc)); + return node; +} + + +Node* JSGenericLowering::LowerJSCallFunction(Node* node) { + CallParameters p = OpParameter<CallParameters>(node); + CallFunctionStub stub(isolate(), p.arity - 2, p.flags); + CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub); + CallDescriptor* desc = linkage()->GetStubCallDescriptor( + d, p.arity - 1, DeoptimizationSupportForNode(node)); + Node* stub_code = CodeConstant(stub.GetCode()); + PatchInsertInput(node, 0, stub_code); + PatchOperator(node, common()->Call(desc)); + return node; +} + + +Node* JSGenericLowering::LowerJSCallRuntime(Node* node) { + Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(node); + int arity = OperatorProperties::GetValueInputCount(node->op()); + ReplaceWithRuntimeCall(node, function, arity); + return node; +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/js-generic-lowering.h nodejs-0.11.15/deps/v8/src/compiler/js-generic-lowering.h --- nodejs-0.11.13/deps/v8/src/compiler/js-generic-lowering.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/js-generic-lowering.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,83 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_ +#define V8_COMPILER_JS_GENERIC_LOWERING_H_ + +#include "src/v8.h" + +#include "src/allocation.h" +#include "src/compiler/graph.h" +#include "src/compiler/js-graph.h" +#include "src/compiler/lowering-builder.h" +#include "src/compiler/opcodes.h" +#include "src/unique.h" + +namespace v8 { +namespace internal { + +// Forward declarations. +class HydrogenCodeStub; + +namespace compiler { + +// Forward declarations. +class CommonOperatorBuilder; +class MachineOperatorBuilder; +class Linkage; + +// Lowers JS-level operators to runtime and IC calls in the "generic" case. +class JSGenericLowering : public LoweringBuilder { + public: + JSGenericLowering(CompilationInfo* info, JSGraph* graph, + MachineOperatorBuilder* machine, + SourcePositionTable* source_positions); + virtual ~JSGenericLowering() {} + + virtual void Lower(Node* node); + + protected: +// Dispatched depending on opcode. +#define DECLARE_LOWER(x) Node* Lower##x(Node* node); + ALL_OP_LIST(DECLARE_LOWER) +#undef DECLARE_LOWER + + // Helpers to create new constant nodes. + Node* SmiConstant(int immediate); + Node* Int32Constant(int immediate); + Node* CodeConstant(Handle<Code> code); + Node* FunctionConstant(Handle<JSFunction> function); + Node* ExternalConstant(ExternalReference ref); + + // Helpers to patch existing nodes in the graph. + void PatchOperator(Node* node, Operator* new_op); + void PatchInsertInput(Node* node, int index, Node* input); + + // Helpers to replace existing nodes with a generic call. + void ReplaceWithCompareIC(Node* node, Token::Value token, bool pure); + void ReplaceWithICStubCall(Node* node, HydrogenCodeStub* stub); + void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args); + void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1); + + Zone* zone() const { return graph()->zone(); } + Isolate* isolate() const { return zone()->isolate(); } + JSGraph* jsgraph() const { return jsgraph_; } + Graph* graph() const { return jsgraph()->graph(); } + Linkage* linkage() const { return linkage_; } + CompilationInfo* info() const { return info_; } + CommonOperatorBuilder* common() const { return jsgraph()->common(); } + MachineOperatorBuilder* machine() const { return machine_; } + + private: + CompilationInfo* info_; + JSGraph* jsgraph_; + Linkage* linkage_; + MachineOperatorBuilder* machine_; + SetOncePointer<Node> centrystub_constant_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_JS_GENERIC_LOWERING_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/js-graph.cc nodejs-0.11.15/deps/v8/src/compiler/js-graph.cc --- nodejs-0.11.13/deps/v8/src/compiler/js-graph.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/js-graph.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,174 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/js-graph.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/typer.h" + +namespace v8 { +namespace internal { +namespace compiler { + +Node* JSGraph::ImmovableHeapConstant(Handle<Object> object) { + PrintableUnique<Object> unique = + PrintableUnique<Object>::CreateImmovable(zone(), object); + return NewNode(common()->HeapConstant(unique)); +} + + +Node* JSGraph::NewNode(Operator* op) { + Node* node = graph()->NewNode(op); + typer_->Init(node); + return node; +} + + +Node* JSGraph::UndefinedConstant() { + if (!undefined_constant_.is_set()) { + undefined_constant_.set( + ImmovableHeapConstant(factory()->undefined_value())); + } + return undefined_constant_.get(); +} + + +Node* JSGraph::TheHoleConstant() { + if (!the_hole_constant_.is_set()) { + the_hole_constant_.set(ImmovableHeapConstant(factory()->the_hole_value())); + } + return the_hole_constant_.get(); +} + + +Node* JSGraph::TrueConstant() { + if (!true_constant_.is_set()) { + true_constant_.set(ImmovableHeapConstant(factory()->true_value())); + } + return true_constant_.get(); +} + + +Node* JSGraph::FalseConstant() { + if (!false_constant_.is_set()) { + false_constant_.set(ImmovableHeapConstant(factory()->false_value())); + } + return false_constant_.get(); +} + + +Node* JSGraph::NullConstant() { + if (!null_constant_.is_set()) { + null_constant_.set(ImmovableHeapConstant(factory()->null_value())); + } + return null_constant_.get(); +} + + +Node* JSGraph::ZeroConstant() { + if (!zero_constant_.is_set()) zero_constant_.set(NumberConstant(0.0)); + return zero_constant_.get(); +} + + +Node* JSGraph::OneConstant() { + if (!one_constant_.is_set()) one_constant_.set(NumberConstant(1.0)); + return one_constant_.get(); +} + + +Node* JSGraph::NaNConstant() { + if (!nan_constant_.is_set()) { + nan_constant_.set(NumberConstant(base::OS::nan_value())); + } + return nan_constant_.get(); +} + + +Node* JSGraph::HeapConstant(PrintableUnique<Object> value) { + // TODO(turbofan): canonicalize heap constants using Unique<T> + return NewNode(common()->HeapConstant(value)); +} + + +Node* JSGraph::HeapConstant(Handle<Object> value) { + // TODO(titzer): We could also match against the addresses of immortable + // immovables here, even without access to the heap, thus always + // canonicalizing references to them. + return HeapConstant( + PrintableUnique<Object>::CreateUninitialized(zone(), value)); +} + + +Node* JSGraph::Constant(Handle<Object> value) { + // Dereference the handle to determine if a number constant or other + // canonicalized node can be used. + if (value->IsNumber()) { + return Constant(value->Number()); + } else if (value->IsUndefined()) { + return UndefinedConstant(); + } else if (value->IsTrue()) { + return TrueConstant(); + } else if (value->IsFalse()) { + return FalseConstant(); + } else if (value->IsNull()) { + return NullConstant(); + } else if (value->IsTheHole()) { + return TheHoleConstant(); + } else { + return HeapConstant(value); + } +} + + +Node* JSGraph::Constant(double value) { + if (BitCast<int64_t>(value) == BitCast<int64_t>(0.0)) return ZeroConstant(); + if (BitCast<int64_t>(value) == BitCast<int64_t>(1.0)) return OneConstant(); + return NumberConstant(value); +} + + +Node* JSGraph::Constant(int32_t value) { + if (value == 0) return ZeroConstant(); + if (value == 1) return OneConstant(); + return NumberConstant(value); +} + + +Node* JSGraph::Int32Constant(int32_t value) { + Node** loc = cache_.FindInt32Constant(value); + if (*loc == NULL) { + *loc = NewNode(common()->Int32Constant(value)); + } + return *loc; +} + + +Node* JSGraph::NumberConstant(double value) { + Node** loc = cache_.FindNumberConstant(value); + if (*loc == NULL) { + *loc = NewNode(common()->NumberConstant(value)); + } + return *loc; +} + + +Node* JSGraph::Float64Constant(double value) { + Node** loc = cache_.FindFloat64Constant(value); + if (*loc == NULL) { + *loc = NewNode(common()->Float64Constant(value)); + } + return *loc; +} + + +Node* JSGraph::ExternalConstant(ExternalReference reference) { + Node** loc = cache_.FindExternalConstant(reference); + if (*loc == NULL) { + *loc = NewNode(common()->ExternalConstant(reference)); + } + return *loc; +} +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/js-graph.h nodejs-0.11.15/deps/v8/src/compiler/js-graph.h --- nodejs-0.11.13/deps/v8/src/compiler/js-graph.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/js-graph.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,107 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_JS_GRAPH_H_ +#define V8_COMPILER_JS_GRAPH_H_ + +#include "src/compiler/common-node-cache.h" +#include "src/compiler/common-operator.h" +#include "src/compiler/graph.h" +#include "src/compiler/js-operator.h" +#include "src/compiler/node-properties.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class Typer; + +// Implements a facade on a Graph, enhancing the graph with JS-specific +// notions, including a builder for for JS* operators, canonicalized global +// constants, and various helper methods. +class JSGraph : public ZoneObject { + public: + JSGraph(Graph* graph, CommonOperatorBuilder* common, Typer* typer) + : graph_(graph), + common_(common), + javascript_(zone()), + typer_(typer), + cache_(zone()) {} + + // Canonicalized global constants. + Node* UndefinedConstant(); + Node* TheHoleConstant(); + Node* TrueConstant(); + Node* FalseConstant(); + Node* NullConstant(); + Node* ZeroConstant(); + Node* OneConstant(); + Node* NaNConstant(); + + // Creates a HeapConstant node, possibly canonicalized, without inspecting the + // object. + Node* HeapConstant(PrintableUnique<Object> value); + + // Creates a HeapConstant node, possibly canonicalized, and may access the + // heap to inspect the object. + Node* HeapConstant(Handle<Object> value); + + // Creates a Constant node of the appropriate type for the given object. + // Accesses the heap to inspect the object and determine whether one of the + // canonicalized globals or a number constant should be returned. + Node* Constant(Handle<Object> value); + + // Creates a NumberConstant node, usually canonicalized. + Node* Constant(double value); + + // Creates a NumberConstant node, usually canonicalized. + Node* Constant(int32_t value); + + // Creates a Int32Constant node, usually canonicalized. + Node* Int32Constant(int32_t value); + + // Creates a Float64Constant node, usually canonicalized. + Node* Float64Constant(double value); + + // Creates an ExternalConstant node, usually canonicalized. + Node* ExternalConstant(ExternalReference ref); + + Node* SmiConstant(int32_t immediate) { + DCHECK(Smi::IsValid(immediate)); + return Constant(immediate); + } + + JSOperatorBuilder* javascript() { return &javascript_; } + CommonOperatorBuilder* common() { return common_; } + Graph* graph() { return graph_; } + Zone* zone() { return graph()->zone(); } + + private: + Graph* graph_; + CommonOperatorBuilder* common_; + JSOperatorBuilder javascript_; + Typer* typer_; + + SetOncePointer<Node> undefined_constant_; + SetOncePointer<Node> the_hole_constant_; + SetOncePointer<Node> true_constant_; + SetOncePointer<Node> false_constant_; + SetOncePointer<Node> null_constant_; + SetOncePointer<Node> zero_constant_; + SetOncePointer<Node> one_constant_; + SetOncePointer<Node> nan_constant_; + + CommonNodeCache cache_; + + Node* ImmovableHeapConstant(Handle<Object> value); + Node* NumberConstant(double value); + Node* NewNode(Operator* op); + + Factory* factory() { return zone()->isolate()->factory(); } +}; +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif diff -Nru nodejs-0.11.13/deps/v8/src/compiler/js-operator.h nodejs-0.11.15/deps/v8/src/compiler/js-operator.h --- nodejs-0.11.13/deps/v8/src/compiler/js-operator.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/js-operator.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,214 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_JS_OPERATOR_H_ +#define V8_COMPILER_JS_OPERATOR_H_ + +#include "src/compiler/linkage.h" +#include "src/compiler/opcodes.h" +#include "src/compiler/operator.h" +#include "src/unique.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Defines the location of a context slot relative to a specific scope. This is +// used as a parameter by JSLoadContext and JSStoreContext operators and allows +// accessing a context-allocated variable without keeping track of the scope. +class ContextAccess { + public: + ContextAccess(int depth, int index, bool immutable) + : immutable_(immutable), depth_(depth), index_(index) { + DCHECK(0 <= depth && depth <= kMaxUInt16); + DCHECK(0 <= index && static_cast<uint32_t>(index) <= kMaxUInt32); + } + int depth() const { return depth_; } + int index() const { return index_; } + bool immutable() const { return immutable_; } + + private: + // For space reasons, we keep this tightly packed, otherwise we could just use + // a simple int/int/bool POD. + const bool immutable_; + const uint16_t depth_; + const uint32_t index_; +}; + +// Defines the property being loaded from an object by a named load. This is +// used as a parameter by JSLoadNamed operators. +struct LoadNamedParameters { + PrintableUnique<Name> name; + ContextualMode contextual_mode; +}; + +// Defines the arity and the call flags for a JavaScript function call. This is +// used as a parameter by JSCall operators. +struct CallParameters { + int arity; + CallFunctionFlags flags; +}; + +// Interface for building JavaScript-level operators, e.g. directly from the +// AST. Most operators have no parameters, thus can be globally shared for all +// graphs. +class JSOperatorBuilder { + public: + explicit JSOperatorBuilder(Zone* zone) : zone_(zone) {} + +#define SIMPLE(name, properties, inputs, outputs) \ + return new (zone_) \ + SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name); + +#define NOPROPS(name, inputs, outputs) \ + SIMPLE(name, Operator::kNoProperties, inputs, outputs) + +#define OP1(name, ptype, pname, properties, inputs, outputs) \ + return new (zone_) Operator1<ptype>(IrOpcode::k##name, properties, inputs, \ + outputs, #name, pname) + +#define BINOP(name) NOPROPS(name, 2, 1) +#define UNOP(name) NOPROPS(name, 1, 1) + +#define PURE_BINOP(name) SIMPLE(name, Operator::kPure, 2, 1) + + Operator* Equal() { BINOP(JSEqual); } + Operator* NotEqual() { BINOP(JSNotEqual); } + Operator* StrictEqual() { PURE_BINOP(JSStrictEqual); } + Operator* StrictNotEqual() { PURE_BINOP(JSStrictNotEqual); } + Operator* LessThan() { BINOP(JSLessThan); } + Operator* GreaterThan() { BINOP(JSGreaterThan); } + Operator* LessThanOrEqual() { BINOP(JSLessThanOrEqual); } + Operator* GreaterThanOrEqual() { BINOP(JSGreaterThanOrEqual); } + Operator* BitwiseOr() { BINOP(JSBitwiseOr); } + Operator* BitwiseXor() { BINOP(JSBitwiseXor); } + Operator* BitwiseAnd() { BINOP(JSBitwiseAnd); } + Operator* ShiftLeft() { BINOP(JSShiftLeft); } + Operator* ShiftRight() { BINOP(JSShiftRight); } + Operator* ShiftRightLogical() { BINOP(JSShiftRightLogical); } + Operator* Add() { BINOP(JSAdd); } + Operator* Subtract() { BINOP(JSSubtract); } + Operator* Multiply() { BINOP(JSMultiply); } + Operator* Divide() { BINOP(JSDivide); } + Operator* Modulus() { BINOP(JSModulus); } + + Operator* UnaryNot() { UNOP(JSUnaryNot); } + Operator* ToBoolean() { UNOP(JSToBoolean); } + Operator* ToNumber() { UNOP(JSToNumber); } + Operator* ToString() { UNOP(JSToString); } + Operator* ToName() { UNOP(JSToName); } + Operator* ToObject() { UNOP(JSToObject); } + Operator* Yield() { UNOP(JSYield); } + + Operator* Create() { SIMPLE(JSCreate, Operator::kEliminatable, 0, 1); } + + Operator* Call(int arguments, CallFunctionFlags flags) { + CallParameters parameters = {arguments, flags}; + OP1(JSCallFunction, CallParameters, parameters, Operator::kNoProperties, + arguments, 1); + } + + Operator* CallNew(int arguments) { + return new (zone_) + Operator1<int>(IrOpcode::kJSCallConstruct, Operator::kNoProperties, + arguments, 1, "JSCallConstruct", arguments); + } + + Operator* LoadProperty() { BINOP(JSLoadProperty); } + Operator* LoadNamed(PrintableUnique<Name> name, + ContextualMode contextual_mode = NOT_CONTEXTUAL) { + LoadNamedParameters parameters = {name, contextual_mode}; + OP1(JSLoadNamed, LoadNamedParameters, parameters, Operator::kNoProperties, + 1, 1); + } + + Operator* StoreProperty() { NOPROPS(JSStoreProperty, 3, 0); } + Operator* StoreNamed(PrintableUnique<Name> name) { + OP1(JSStoreNamed, PrintableUnique<Name>, name, Operator::kNoProperties, 2, + 0); + } + + Operator* DeleteProperty(StrictMode strict_mode) { + OP1(JSDeleteProperty, StrictMode, strict_mode, Operator::kNoProperties, 2, + 1); + } + + Operator* HasProperty() { NOPROPS(JSHasProperty, 2, 1); } + + Operator* LoadContext(uint16_t depth, uint32_t index, bool immutable) { + ContextAccess access(depth, index, immutable); + OP1(JSLoadContext, ContextAccess, access, + Operator::kEliminatable | Operator::kNoWrite, 1, 1); + } + Operator* StoreContext(uint16_t depth, uint32_t index) { + ContextAccess access(depth, index, false); + OP1(JSStoreContext, ContextAccess, access, Operator::kNoProperties, 2, 1); + } + + Operator* TypeOf() { SIMPLE(JSTypeOf, Operator::kPure, 1, 1); } + Operator* InstanceOf() { NOPROPS(JSInstanceOf, 2, 1); } + Operator* Debugger() { NOPROPS(JSDebugger, 0, 0); } + + // TODO(titzer): nail down the static parts of each of these context flavors. + Operator* CreateFunctionContext() { NOPROPS(JSCreateFunctionContext, 1, 1); } + Operator* CreateCatchContext(PrintableUnique<String> name) { + OP1(JSCreateCatchContext, PrintableUnique<String>, name, + Operator::kNoProperties, 1, 1); + } + Operator* CreateWithContext() { NOPROPS(JSCreateWithContext, 2, 1); } + Operator* CreateBlockContext() { NOPROPS(JSCreateBlockContext, 2, 1); } + Operator* CreateModuleContext() { NOPROPS(JSCreateModuleContext, 2, 1); } + Operator* CreateGlobalContext() { NOPROPS(JSCreateGlobalContext, 2, 1); } + + Operator* Runtime(Runtime::FunctionId function, int arguments) { + const Runtime::Function* f = Runtime::FunctionForId(function); + DCHECK(f->nargs == -1 || f->nargs == arguments); + OP1(JSCallRuntime, Runtime::FunctionId, function, Operator::kNoProperties, + arguments, f->result_size); + } + +#undef SIMPLE +#undef NOPROPS +#undef OP1 +#undef BINOP +#undef UNOP + + private: + Zone* zone_; +}; + +// Specialization for static parameters of type {ContextAccess}. +template <> +struct StaticParameterTraits<ContextAccess> { + static OStream& PrintTo(OStream& os, ContextAccess val) { // NOLINT + return os << val.depth() << "," << val.index() + << (val.immutable() ? ",imm" : ""); + } + static int HashCode(ContextAccess val) { + return (val.depth() << 16) | (val.index() & 0xffff); + } + static bool Equals(ContextAccess a, ContextAccess b) { + return a.immutable() == b.immutable() && a.depth() == b.depth() && + a.index() == b.index(); + } +}; + +// Specialization for static parameters of type {Runtime::FunctionId}. +template <> +struct StaticParameterTraits<Runtime::FunctionId> { + static OStream& PrintTo(OStream& os, Runtime::FunctionId val) { // NOLINT + const Runtime::Function* f = Runtime::FunctionForId(val); + return os << (f->name ? f->name : "?Runtime?"); + } + static int HashCode(Runtime::FunctionId val) { return static_cast<int>(val); } + static bool Equals(Runtime::FunctionId a, Runtime::FunctionId b) { + return a == b; + } +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_JS_OPERATOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/js-typed-lowering.cc nodejs-0.11.15/deps/v8/src/compiler/js-typed-lowering.cc --- nodejs-0.11.13/deps/v8/src/compiler/js-typed-lowering.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/js-typed-lowering.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,604 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/graph-inl.h" +#include "src/compiler/js-typed-lowering.h" +#include "src/compiler/node-aux-data-inl.h" +#include "src/compiler/node-properties-inl.h" +#include "src/types.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// TODO(turbofan): js-typed-lowering improvements possible +// - immediately put in type bounds for all new nodes +// - relax effects from generic but not-side-effecting operations +// - relax effects for ToNumber(mixed) + +// Replace value uses of {node} with {value} and effect uses of {node} with +// {effect}. If {effect == NULL}, then use the effect input to {node}. +// TODO(titzer): move into a GraphEditor? +static void ReplaceUses(Node* node, Node* value, Node* effect) { + if (value == effect) { + // Effect and value updates are the same; no special iteration needed. + if (value != node) node->ReplaceUses(value); + return; + } + + if (effect == NULL) effect = NodeProperties::GetEffectInput(node); + + // The iteration requires distinguishing between value and effect edges. + UseIter iter = node->uses().begin(); + while (iter != node->uses().end()) { + if (NodeProperties::IsEffectEdge(iter.edge())) { + iter = iter.UpdateToAndIncrement(effect); + } else { + iter = iter.UpdateToAndIncrement(value); + } + } +} + + +// Relax the effects of {node} by immediately replacing effect uses of {node} +// with the effect input to {node}. +// TODO(turbofan): replace the effect input to {node} with {graph->start()}. +// TODO(titzer): move into a GraphEditor? +static void RelaxEffects(Node* node) { ReplaceUses(node, node, NULL); } + + +Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) { + ReplaceUses(old, node, node); + return Reducer::Changed(node); +} + + +// A helper class to simplify the process of reducing a single binop node with a +// JSOperator. This class manages the rewriting of context, control, and effect +// dependencies during lowering of a binop and contains numerous helper +// functions for matching the types of inputs to an operation. +class JSBinopReduction { + public: + JSBinopReduction(JSTypedLowering* lowering, Node* node) + : lowering_(lowering), + node_(node), + left_type_(NodeProperties::GetBounds(node->InputAt(0)).upper), + right_type_(NodeProperties::GetBounds(node->InputAt(1)).upper) {} + + void ConvertInputsToNumber() { + node_->ReplaceInput(0, ConvertToNumber(left())); + node_->ReplaceInput(1, ConvertToNumber(right())); + } + + void ConvertInputsToInt32(bool left_signed, bool right_signed) { + node_->ReplaceInput(0, ConvertToI32(left_signed, left())); + node_->ReplaceInput(1, ConvertToI32(right_signed, right())); + } + + void ConvertInputsToString() { + node_->ReplaceInput(0, ConvertToString(left())); + node_->ReplaceInput(1, ConvertToString(right())); + } + + // Convert inputs for bitwise shift operation (ES5 spec 11.7). + void ConvertInputsForShift(bool left_signed) { + node_->ReplaceInput(0, ConvertToI32(left_signed, left())); + Node* rnum = ConvertToI32(false, right()); + node_->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rnum, + jsgraph()->Int32Constant(0x1F))); + } + + void SwapInputs() { + Node* l = left(); + Node* r = right(); + node_->ReplaceInput(0, r); + node_->ReplaceInput(1, l); + std::swap(left_type_, right_type_); + } + + // Remove all effect and control inputs and outputs to this node and change + // to the pure operator {op}, possibly inserting a boolean inversion. + Reduction ChangeToPureOperator(Operator* op, bool invert = false) { + DCHECK_EQ(0, OperatorProperties::GetEffectInputCount(op)); + DCHECK_EQ(false, OperatorProperties::HasContextInput(op)); + DCHECK_EQ(0, OperatorProperties::GetControlInputCount(op)); + DCHECK_EQ(2, OperatorProperties::GetValueInputCount(op)); + + // Remove the effects from the node, if any, and update its effect usages. + if (OperatorProperties::GetEffectInputCount(node_->op()) > 0) { + RelaxEffects(node_); + } + // Remove the inputs corresponding to context, effect, and control. + NodeProperties::RemoveNonValueInputs(node_); + // Finally, update the operator to the new one. + node_->set_op(op); + + if (invert) { + // Insert an boolean not to invert the value. + Node* value = graph()->NewNode(simplified()->BooleanNot(), node_); + node_->ReplaceUses(value); + // Note: ReplaceUses() smashes all uses, so smash it back here. + value->ReplaceInput(0, node_); + return lowering_->ReplaceWith(value); + } + return lowering_->Changed(node_); + } + + bool OneInputIs(Type* t) { return left_type_->Is(t) || right_type_->Is(t); } + + bool BothInputsAre(Type* t) { + return left_type_->Is(t) && right_type_->Is(t); + } + + bool OneInputCannotBe(Type* t) { + return !left_type_->Maybe(t) || !right_type_->Maybe(t); + } + + bool NeitherInputCanBe(Type* t) { + return !left_type_->Maybe(t) && !right_type_->Maybe(t); + } + + Node* effect() { return NodeProperties::GetEffectInput(node_); } + Node* control() { return NodeProperties::GetControlInput(node_); } + Node* context() { return NodeProperties::GetContextInput(node_); } + Node* left() { return NodeProperties::GetValueInput(node_, 0); } + Node* right() { return NodeProperties::GetValueInput(node_, 1); } + Type* left_type() { return left_type_; } + Type* right_type() { return right_type_; } + + SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); } + Graph* graph() { return lowering_->graph(); } + JSGraph* jsgraph() { return lowering_->jsgraph(); } + JSOperatorBuilder* javascript() { return lowering_->javascript(); } + MachineOperatorBuilder* machine() { return lowering_->machine(); } + + private: + JSTypedLowering* lowering_; // The containing lowering instance. + Node* node_; // The original node. + Type* left_type_; // Cache of the left input's type. + Type* right_type_; // Cache of the right input's type. + + Node* ConvertToString(Node* node) { + // Avoid introducing too many eager ToString() operations. + Reduction reduced = lowering_->ReduceJSToStringInput(node); + if (reduced.Changed()) return reduced.replacement(); + Node* n = graph()->NewNode(javascript()->ToString(), node, context(), + effect(), control()); + update_effect(n); + return n; + } + + Node* ConvertToNumber(Node* node) { + // Avoid introducing too many eager ToNumber() operations. + Reduction reduced = lowering_->ReduceJSToNumberInput(node); + if (reduced.Changed()) return reduced.replacement(); + Node* n = graph()->NewNode(javascript()->ToNumber(), node, context(), + effect(), control()); + update_effect(n); + return n; + } + + // Try to narrowing a double or number operation to an Int32 operation. + bool TryNarrowingToI32(Type* type, Node* node) { + switch (node->opcode()) { + case IrOpcode::kFloat64Add: + case IrOpcode::kNumberAdd: { + JSBinopReduction r(lowering_, node); + if (r.BothInputsAre(Type::Integral32())) { + node->set_op(lowering_->machine()->Int32Add()); + // TODO(titzer): narrow bounds instead of overwriting. + NodeProperties::SetBounds(node, Bounds(type)); + return true; + } + } + case IrOpcode::kFloat64Sub: + case IrOpcode::kNumberSubtract: { + JSBinopReduction r(lowering_, node); + if (r.BothInputsAre(Type::Integral32())) { + node->set_op(lowering_->machine()->Int32Sub()); + // TODO(titzer): narrow bounds instead of overwriting. + NodeProperties::SetBounds(node, Bounds(type)); + return true; + } + } + default: + return false; + } + } + + Node* ConvertToI32(bool is_signed, Node* node) { + Type* type = is_signed ? Type::Signed32() : Type::Unsigned32(); + if (node->OwnedBy(node_)) { + // If this node {node_} has the only edge to {node}, then try narrowing + // its operation to an Int32 add or subtract. + if (TryNarrowingToI32(type, node)) return node; + } else { + // Otherwise, {node} has multiple uses. Leave it as is and let the + // further lowering passes deal with it, which use a full backwards + // fixpoint. + } + + // Avoid introducing too many eager NumberToXXnt32() operations. + node = ConvertToNumber(node); + Type* input_type = NodeProperties::GetBounds(node).upper; + + if (input_type->Is(type)) return node; // already in the value range. + + Operator* op = is_signed ? simplified()->NumberToInt32() + : simplified()->NumberToUint32(); + Node* n = graph()->NewNode(op, node); + return n; + } + + void update_effect(Node* effect) { + NodeProperties::ReplaceEffectInput(node_, effect); + } +}; + + +Reduction JSTypedLowering::ReduceJSAdd(Node* node) { + JSBinopReduction r(this, node); + if (r.OneInputIs(Type::String())) { + r.ConvertInputsToString(); + return r.ChangeToPureOperator(simplified()->StringAdd()); + } else if (r.NeitherInputCanBe(Type::String())) { + r.ConvertInputsToNumber(); + return r.ChangeToPureOperator(simplified()->NumberAdd()); + } + return NoChange(); +} + + +Reduction JSTypedLowering::ReduceNumberBinop(Node* node, Operator* numberOp) { + JSBinopReduction r(this, node); + if (r.OneInputIs(Type::Primitive())) { + // If at least one input is a primitive, then insert appropriate conversions + // to number and reduce this operator to the given numeric one. + // TODO(turbofan): make this heuristic configurable for code size. + r.ConvertInputsToNumber(); + return r.ChangeToPureOperator(numberOp); + } + // TODO(turbofan): relax/remove the effects of this operator in other cases. + return NoChange(); +} + + +Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed, + bool right_signed, Operator* intOp) { + JSBinopReduction r(this, node); + // TODO(titzer): some Smi bitwise operations don't really require going + // all the way to int32, which can save tagging/untagging for some operations + // on some platforms. + // TODO(turbofan): make this heuristic configurable for code size. + r.ConvertInputsToInt32(left_signed, right_signed); + return r.ChangeToPureOperator(intOp); +} + + +Reduction JSTypedLowering::ReduceI32Shift(Node* node, bool left_signed, + Operator* shift_op) { + JSBinopReduction r(this, node); + r.ConvertInputsForShift(left_signed); + return r.ChangeToPureOperator(shift_op); +} + + +Reduction JSTypedLowering::ReduceJSComparison(Node* node) { + JSBinopReduction r(this, node); + if (r.BothInputsAre(Type::String())) { + // If both inputs are definitely strings, perform a string comparison. + Operator* stringOp; + switch (node->opcode()) { + case IrOpcode::kJSLessThan: + stringOp = simplified()->StringLessThan(); + break; + case IrOpcode::kJSGreaterThan: + stringOp = simplified()->StringLessThan(); + r.SwapInputs(); // a > b => b < a + break; + case IrOpcode::kJSLessThanOrEqual: + stringOp = simplified()->StringLessThanOrEqual(); + break; + case IrOpcode::kJSGreaterThanOrEqual: + stringOp = simplified()->StringLessThanOrEqual(); + r.SwapInputs(); // a >= b => b <= a + break; + default: + return NoChange(); + } + return r.ChangeToPureOperator(stringOp); + } else if (r.OneInputCannotBe(Type::String())) { + // If one input cannot be a string, then emit a number comparison. + Operator* less_than; + Operator* less_than_or_equal; + if (r.BothInputsAre(Type::Unsigned32())) { + less_than = machine()->Uint32LessThan(); + less_than_or_equal = machine()->Uint32LessThanOrEqual(); + } else if (r.BothInputsAre(Type::Signed32())) { + less_than = machine()->Int32LessThan(); + less_than_or_equal = machine()->Int32LessThanOrEqual(); + } else { + // TODO(turbofan): mixed signed/unsigned int32 comparisons. + r.ConvertInputsToNumber(); + less_than = simplified()->NumberLessThan(); + less_than_or_equal = simplified()->NumberLessThanOrEqual(); + } + Operator* comparison; + switch (node->opcode()) { + case IrOpcode::kJSLessThan: + comparison = less_than; + break; + case IrOpcode::kJSGreaterThan: + comparison = less_than; + r.SwapInputs(); // a > b => b < a + break; + case IrOpcode::kJSLessThanOrEqual: + comparison = less_than_or_equal; + break; + case IrOpcode::kJSGreaterThanOrEqual: + comparison = less_than_or_equal; + r.SwapInputs(); // a >= b => b <= a + break; + default: + return NoChange(); + } + return r.ChangeToPureOperator(comparison); + } + // TODO(turbofan): relax/remove effects of this operator in other cases. + return NoChange(); // Keep a generic comparison. +} + + +Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) { + JSBinopReduction r(this, node); + + if (r.BothInputsAre(Type::Number())) { + return r.ChangeToPureOperator(simplified()->NumberEqual(), invert); + } + if (r.BothInputsAre(Type::String())) { + return r.ChangeToPureOperator(simplified()->StringEqual(), invert); + } + if (r.BothInputsAre(Type::Receiver())) { + return r.ChangeToPureOperator( + simplified()->ReferenceEqual(Type::Receiver()), invert); + } + // TODO(turbofan): js-typed-lowering of Equal(undefined) + // TODO(turbofan): js-typed-lowering of Equal(null) + // TODO(turbofan): js-typed-lowering of Equal(boolean) + return NoChange(); +} + + +Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) { + JSBinopReduction r(this, node); + if (r.left() == r.right()) { + // x === x is always true if x != NaN + if (!r.left_type()->Maybe(Type::NaN())) { + return ReplaceEagerly(node, invert ? jsgraph()->FalseConstant() + : jsgraph()->TrueConstant()); + } + } + if (!r.left_type()->Maybe(r.right_type())) { + // Type intersection is empty; === is always false unless both + // inputs could be strings (one internalized and one not). + if (r.OneInputCannotBe(Type::String())) { + return ReplaceEagerly(node, invert ? jsgraph()->TrueConstant() + : jsgraph()->FalseConstant()); + } + } + if (r.OneInputIs(Type::Undefined())) { + return r.ChangeToPureOperator( + simplified()->ReferenceEqual(Type::Undefined()), invert); + } + if (r.OneInputIs(Type::Null())) { + return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Null()), + invert); + } + if (r.OneInputIs(Type::Boolean())) { + return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()), + invert); + } + if (r.OneInputIs(Type::Object())) { + return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Object()), + invert); + } + if (r.OneInputIs(Type::Receiver())) { + return r.ChangeToPureOperator( + simplified()->ReferenceEqual(Type::Receiver()), invert); + } + if (r.BothInputsAre(Type::String())) { + return r.ChangeToPureOperator(simplified()->StringEqual(), invert); + } + if (r.BothInputsAre(Type::Number())) { + return r.ChangeToPureOperator(simplified()->NumberEqual(), invert); + } + // TODO(turbofan): js-typed-lowering of StrictEqual(mixed types) + return NoChange(); +} + + +Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) { + if (input->opcode() == IrOpcode::kJSToNumber) { + // Recursively try to reduce the input first. + Reduction result = ReduceJSToNumberInput(input->InputAt(0)); + if (result.Changed()) { + RelaxEffects(input); + return result; + } + return Changed(input); // JSToNumber(JSToNumber(x)) => JSToNumber(x) + } + Type* input_type = NodeProperties::GetBounds(input).upper; + if (input_type->Is(Type::Number())) { + // JSToNumber(number) => x + return Changed(input); + } + if (input_type->Is(Type::Undefined())) { + // JSToNumber(undefined) => #NaN + return ReplaceWith(jsgraph()->NaNConstant()); + } + if (input_type->Is(Type::Null())) { + // JSToNumber(null) => #0 + return ReplaceWith(jsgraph()->ZeroConstant()); + } + // TODO(turbofan): js-typed-lowering of ToNumber(boolean) + // TODO(turbofan): js-typed-lowering of ToNumber(string) + return NoChange(); +} + + +Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) { + if (input->opcode() == IrOpcode::kJSToString) { + // Recursively try to reduce the input first. + Reduction result = ReduceJSToStringInput(input->InputAt(0)); + if (result.Changed()) { + RelaxEffects(input); + return result; + } + return Changed(input); // JSToString(JSToString(x)) => JSToString(x) + } + Type* input_type = NodeProperties::GetBounds(input).upper; + if (input_type->Is(Type::String())) { + return Changed(input); // JSToString(string) => x + } + if (input_type->Is(Type::Undefined())) { + return ReplaceWith(jsgraph()->HeapConstant( + graph()->zone()->isolate()->factory()->undefined_string())); + } + if (input_type->Is(Type::Null())) { + return ReplaceWith(jsgraph()->HeapConstant( + graph()->zone()->isolate()->factory()->null_string())); + } + // TODO(turbofan): js-typed-lowering of ToString(boolean) + // TODO(turbofan): js-typed-lowering of ToString(number) + return NoChange(); +} + + +Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) { + if (input->opcode() == IrOpcode::kJSToBoolean) { + // Recursively try to reduce the input first. + Reduction result = ReduceJSToBooleanInput(input->InputAt(0)); + if (result.Changed()) { + RelaxEffects(input); + return result; + } + return Changed(input); // JSToBoolean(JSToBoolean(x)) => JSToBoolean(x) + } + Type* input_type = NodeProperties::GetBounds(input).upper; + if (input_type->Is(Type::Boolean())) { + return Changed(input); // JSToBoolean(boolean) => x + } + if (input_type->Is(Type::Undefined())) { + // JSToBoolean(undefined) => #false + return ReplaceWith(jsgraph()->FalseConstant()); + } + if (input_type->Is(Type::Null())) { + // JSToBoolean(null) => #false + return ReplaceWith(jsgraph()->FalseConstant()); + } + if (input_type->Is(Type::DetectableReceiver())) { + // JSToBoolean(detectable) => #true + return ReplaceWith(jsgraph()->TrueConstant()); + } + if (input_type->Is(Type::Undetectable())) { + // JSToBoolean(undetectable) => #false + return ReplaceWith(jsgraph()->FalseConstant()); + } + if (input_type->Is(Type::Number())) { + // JSToBoolean(number) => BooleanNot(NumberEqual(x, #0)) + Node* cmp = graph()->NewNode(simplified()->NumberEqual(), input, + jsgraph()->ZeroConstant()); + Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp); + ReplaceEagerly(input, inv); + // TODO(titzer): Ugly. ReplaceEagerly smashes all uses. Smash it back here. + cmp->ReplaceInput(0, input); + return Changed(inv); + } + // TODO(turbofan): js-typed-lowering of ToBoolean(string) + return NoChange(); +} + + +static Reduction ReplaceWithReduction(Node* node, Reduction reduction) { + if (reduction.Changed()) { + ReplaceUses(node, reduction.replacement(), NULL); + return reduction; + } + return Reducer::NoChange(); +} + + +Reduction JSTypedLowering::Reduce(Node* node) { + switch (node->opcode()) { + case IrOpcode::kJSEqual: + return ReduceJSEqual(node, false); + case IrOpcode::kJSNotEqual: + return ReduceJSEqual(node, true); + case IrOpcode::kJSStrictEqual: + return ReduceJSStrictEqual(node, false); + case IrOpcode::kJSStrictNotEqual: + return ReduceJSStrictEqual(node, true); + case IrOpcode::kJSLessThan: // fall through + case IrOpcode::kJSGreaterThan: // fall through + case IrOpcode::kJSLessThanOrEqual: // fall through + case IrOpcode::kJSGreaterThanOrEqual: + return ReduceJSComparison(node); + case IrOpcode::kJSBitwiseOr: + return ReduceI32Binop(node, true, true, machine()->Word32Or()); + case IrOpcode::kJSBitwiseXor: + return ReduceI32Binop(node, true, true, machine()->Word32Xor()); + case IrOpcode::kJSBitwiseAnd: + return ReduceI32Binop(node, true, true, machine()->Word32And()); + case IrOpcode::kJSShiftLeft: + return ReduceI32Shift(node, true, machine()->Word32Shl()); + case IrOpcode::kJSShiftRight: + return ReduceI32Shift(node, true, machine()->Word32Sar()); + case IrOpcode::kJSShiftRightLogical: + return ReduceI32Shift(node, false, machine()->Word32Shr()); + case IrOpcode::kJSAdd: + return ReduceJSAdd(node); + case IrOpcode::kJSSubtract: + return ReduceNumberBinop(node, simplified()->NumberSubtract()); + case IrOpcode::kJSMultiply: + return ReduceNumberBinop(node, simplified()->NumberMultiply()); + case IrOpcode::kJSDivide: + return ReduceNumberBinop(node, simplified()->NumberDivide()); + case IrOpcode::kJSModulus: + return ReduceNumberBinop(node, simplified()->NumberModulus()); + case IrOpcode::kJSUnaryNot: { + Reduction result = ReduceJSToBooleanInput(node->InputAt(0)); + Node* value; + if (result.Changed()) { + // !x => BooleanNot(x) + value = + graph()->NewNode(simplified()->BooleanNot(), result.replacement()); + ReplaceUses(node, value, NULL); + return Changed(value); + } else { + // !x => BooleanNot(JSToBoolean(x)) + value = graph()->NewNode(simplified()->BooleanNot(), node); + node->set_op(javascript()->ToBoolean()); + ReplaceUses(node, value, node); + // Note: ReplaceUses() smashes all uses, so smash it back here. + value->ReplaceInput(0, node); + return ReplaceWith(value); + } + } + case IrOpcode::kJSToBoolean: + return ReplaceWithReduction(node, + ReduceJSToBooleanInput(node->InputAt(0))); + case IrOpcode::kJSToNumber: + return ReplaceWithReduction(node, + ReduceJSToNumberInput(node->InputAt(0))); + case IrOpcode::kJSToString: + return ReplaceWithReduction(node, + ReduceJSToStringInput(node->InputAt(0))); + default: + break; + } + return NoChange(); +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/js-typed-lowering.h nodejs-0.11.15/deps/v8/src/compiler/js-typed-lowering.h --- nodejs-0.11.13/deps/v8/src/compiler/js-typed-lowering.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/js-typed-lowering.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,67 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_OPERATOR_REDUCERS_H_ +#define V8_COMPILER_OPERATOR_REDUCERS_H_ + +#include "src/compiler/graph-reducer.h" +#include "src/compiler/js-graph.h" +#include "src/compiler/lowering-builder.h" +#include "src/compiler/machine-operator.h" +#include "src/compiler/node.h" +#include "src/compiler/simplified-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Lowers JS-level operators to simplified operators based on types. +class JSTypedLowering : public LoweringBuilder { + public: + explicit JSTypedLowering(JSGraph* jsgraph, + SourcePositionTable* source_positions) + : LoweringBuilder(jsgraph->graph(), source_positions), + jsgraph_(jsgraph), + simplified_(jsgraph->zone()), + machine_(jsgraph->zone()) {} + virtual ~JSTypedLowering() {} + + Reduction Reduce(Node* node); + virtual void Lower(Node* node) { Reduce(node); } + + JSGraph* jsgraph() { return jsgraph_; } + Graph* graph() { return jsgraph_->graph(); } + + private: + friend class JSBinopReduction; + JSGraph* jsgraph_; + SimplifiedOperatorBuilder simplified_; + MachineOperatorBuilder machine_; + + Reduction ReplaceEagerly(Node* old, Node* node); + Reduction NoChange() { return Reducer::NoChange(); } + Reduction ReplaceWith(Node* node) { return Reducer::Replace(node); } + Reduction Changed(Node* node) { return Reducer::Changed(node); } + Reduction ReduceJSAdd(Node* node); + Reduction ReduceJSComparison(Node* node); + Reduction ReduceJSEqual(Node* node, bool invert); + Reduction ReduceJSStrictEqual(Node* node, bool invert); + Reduction ReduceJSToNumberInput(Node* input); + Reduction ReduceJSToStringInput(Node* input); + Reduction ReduceJSToBooleanInput(Node* input); + Reduction ReduceNumberBinop(Node* node, Operator* numberOp); + Reduction ReduceI32Binop(Node* node, bool left_signed, bool right_signed, + Operator* intOp); + Reduction ReduceI32Shift(Node* node, bool left_signed, Operator* shift_op); + + JSOperatorBuilder* javascript() { return jsgraph_->javascript(); } + CommonOperatorBuilder* common() { return jsgraph_->common(); } + SimplifiedOperatorBuilder* simplified() { return &simplified_; } + MachineOperatorBuilder* machine() { return &machine_; } +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_OPERATOR_REDUCERS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/linkage.cc nodejs-0.11.15/deps/v8/src/compiler/linkage.cc --- nodejs-0.11.13/deps/v8/src/compiler/linkage.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/linkage.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,149 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/linkage.h" + +#include "src/code-stubs.h" +#include "src/compiler.h" +#include "src/compiler/node.h" +#include "src/compiler/pipeline.h" +#include "src/scopes.h" + +namespace v8 { +namespace internal { +namespace compiler { + + +OStream& operator<<(OStream& os, const CallDescriptor::Kind& k) { + switch (k) { + case CallDescriptor::kCallCodeObject: + os << "Code"; + break; + case CallDescriptor::kCallJSFunction: + os << "JS"; + break; + case CallDescriptor::kCallAddress: + os << "Addr"; + break; + } + return os; +} + + +OStream& operator<<(OStream& os, const CallDescriptor& d) { + // TODO(svenpanne) Output properties etc. and be less cryptic. + return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount() + << "p" << d.ParameterCount() << "i" << d.InputCount() + << (d.CanLazilyDeoptimize() ? "deopt" : ""); +} + + +Linkage::Linkage(CompilationInfo* info) : info_(info) { + if (info->function() != NULL) { + // If we already have the function literal, use the number of parameters + // plus the receiver. + incoming_ = GetJSCallDescriptor(1 + info->function()->parameter_count()); + } else if (!info->closure().is_null()) { + // If we are compiling a JS function, use a JS call descriptor, + // plus the receiver. + SharedFunctionInfo* shared = info->closure()->shared(); + incoming_ = GetJSCallDescriptor(1 + shared->formal_parameter_count()); + } else if (info->code_stub() != NULL) { + // Use the code stub interface descriptor. + HydrogenCodeStub* stub = info->code_stub(); + CodeStubInterfaceDescriptor* descriptor = + info_->isolate()->code_stub_interface_descriptor(stub->MajorKey()); + incoming_ = GetStubCallDescriptor(descriptor); + } else { + incoming_ = NULL; // TODO(titzer): ? + } +} + + +FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, int extra) { + if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() || + incoming_->kind() == CallDescriptor::kCallAddress) { + int offset; + int register_save_area_size = frame->GetRegisterSaveAreaSize(); + if (spill_slot >= 0) { + // Local or spill slot. Skip the frame pointer, function, and + // context in the fixed part of the frame. + offset = + -(spill_slot + 1) * kPointerSize - register_save_area_size + extra; + } else { + // Incoming parameter. Skip the return address. + offset = -(spill_slot + 1) * kPointerSize + kFPOnStackSize + + kPCOnStackSize + extra; + } + return FrameOffset::FromFramePointer(offset); + } else { + // No frame. Retrieve all parameters relative to stack pointer. + DCHECK(spill_slot < 0); // Must be a parameter. + int register_save_area_size = frame->GetRegisterSaveAreaSize(); + int offset = register_save_area_size - (spill_slot + 1) * kPointerSize + + kPCOnStackSize + extra; + return FrameOffset::FromStackPointer(offset); + } +} + + +CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) { + return GetJSCallDescriptor(parameter_count, this->info_->zone()); +} + + +CallDescriptor* Linkage::GetRuntimeCallDescriptor( + Runtime::FunctionId function, int parameter_count, + Operator::Property properties, + CallDescriptor::DeoptimizationSupport can_deoptimize) { + return GetRuntimeCallDescriptor(function, parameter_count, properties, + can_deoptimize, this->info_->zone()); +} + + +CallDescriptor* Linkage::GetStubCallDescriptor( + CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count, + CallDescriptor::DeoptimizationSupport can_deoptimize) { + return GetStubCallDescriptor(descriptor, stack_parameter_count, + can_deoptimize, this->info_->zone()); +} + + +//============================================================================== +// Provide unimplemented methods on unsupported architectures, to at least link. +//============================================================================== +#if !V8_TURBOFAN_BACKEND +CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) { + UNIMPLEMENTED(); + return NULL; +} + + +CallDescriptor* Linkage::GetRuntimeCallDescriptor( + Runtime::FunctionId function, int parameter_count, + Operator::Property properties, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) { + UNIMPLEMENTED(); + return NULL; +} + + +CallDescriptor* Linkage::GetStubCallDescriptor( + CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) { + UNIMPLEMENTED(); + return NULL; +} + + +CallDescriptor* Linkage::GetSimplifiedCDescriptor( + Zone* zone, int num_params, MachineType return_type, + const MachineType* param_types) { + UNIMPLEMENTED(); + return NULL; +} +#endif // !V8_TURBOFAN_BACKEND +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/linkage.h nodejs-0.11.15/deps/v8/src/compiler/linkage.h --- nodejs-0.11.13/deps/v8/src/compiler/linkage.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/linkage.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,193 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_LINKAGE_H_ +#define V8_COMPILER_LINKAGE_H_ + +#include "src/v8.h" + +#include "src/code-stubs.h" +#include "src/compiler/frame.h" +#include "src/compiler/machine-operator.h" +#include "src/compiler/node.h" +#include "src/compiler/operator.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Describes the location for a parameter or a return value to a call. +// TODO(titzer): replace with Radium locations when they are ready. +class LinkageLocation { + public: + LinkageLocation(MachineType rep, int location) + : rep_(rep), location_(location) {} + + inline MachineType representation() const { return rep_; } + + static const int16_t ANY_REGISTER = 32767; + + private: + friend class CallDescriptor; + friend class OperandGenerator; + MachineType rep_; + int16_t location_; // >= 0 implies register, otherwise stack slot. +}; + + +class CallDescriptor : public ZoneObject { + public: + // Describes whether the first parameter is a code object, a JSFunction, + // or an address--all of which require different machine sequences to call. + enum Kind { kCallCodeObject, kCallJSFunction, kCallAddress }; + + enum DeoptimizationSupport { kCanDeoptimize, kCannotDeoptimize }; + + CallDescriptor(Kind kind, int8_t return_count, int16_t parameter_count, + int16_t input_count, LinkageLocation* locations, + Operator::Property properties, RegList callee_saved_registers, + DeoptimizationSupport deoptimization_support, + const char* debug_name = "") + : kind_(kind), + return_count_(return_count), + parameter_count_(parameter_count), + input_count_(input_count), + locations_(locations), + properties_(properties), + callee_saved_registers_(callee_saved_registers), + deoptimization_support_(deoptimization_support), + debug_name_(debug_name) {} + // Returns the kind of this call. + Kind kind() const { return kind_; } + + // Returns {true} if this descriptor is a call to a JSFunction. + bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; } + + // The number of return values from this call, usually 0 or 1. + int ReturnCount() const { return return_count_; } + + // The number of JavaScript parameters to this call, including receiver, + // but not the context. + int ParameterCount() const { return parameter_count_; } + + int InputCount() const { return input_count_; } + + bool CanLazilyDeoptimize() const { + return deoptimization_support_ == kCanDeoptimize; + } + + LinkageLocation GetReturnLocation(int index) { + DCHECK(index < return_count_); + return locations_[0 + index]; // return locations start at 0. + } + + LinkageLocation GetInputLocation(int index) { + DCHECK(index < input_count_ + 1); // input_count + 1 is the context. + return locations_[return_count_ + index]; // inputs start after returns. + } + + // Operator properties describe how this call can be optimized, if at all. + Operator::Property properties() const { return properties_; } + + // Get the callee-saved registers, if any, across this call. + RegList CalleeSavedRegisters() { return callee_saved_registers_; } + + const char* debug_name() const { return debug_name_; } + + private: + friend class Linkage; + + Kind kind_; + int8_t return_count_; + int16_t parameter_count_; + int16_t input_count_; + LinkageLocation* locations_; + Operator::Property properties_; + RegList callee_saved_registers_; + DeoptimizationSupport deoptimization_support_; + const char* debug_name_; +}; + +OStream& operator<<(OStream& os, const CallDescriptor& d); +OStream& operator<<(OStream& os, const CallDescriptor::Kind& k); + +// Defines the linkage for a compilation, including the calling conventions +// for incoming parameters and return value(s) as well as the outgoing calling +// convention for any kind of call. Linkage is generally architecture-specific. +// +// Can be used to translate {arg_index} (i.e. index of the call node input) as +// well as {param_index} (i.e. as stored in parameter nodes) into an operator +// representing the architecture-specific location. The following call node +// layouts are supported (where {n} is the number value inputs): +// +// #0 #1 #2 #3 [...] #n +// Call[CodeStub] code, arg 1, arg 2, arg 3, [...], context +// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], context +// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context +class Linkage : public ZoneObject { + public: + explicit Linkage(CompilationInfo* info); + explicit Linkage(CompilationInfo* info, CallDescriptor* incoming) + : info_(info), incoming_(incoming) {} + + // The call descriptor for this compilation unit describes the locations + // of incoming parameters and the outgoing return value(s). + CallDescriptor* GetIncomingDescriptor() { return incoming_; } + CallDescriptor* GetJSCallDescriptor(int parameter_count); + static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone); + CallDescriptor* GetRuntimeCallDescriptor( + Runtime::FunctionId function, int parameter_count, + Operator::Property properties, + CallDescriptor::DeoptimizationSupport can_deoptimize = + CallDescriptor::kCannotDeoptimize); + static CallDescriptor* GetRuntimeCallDescriptor( + Runtime::FunctionId function, int parameter_count, + Operator::Property properties, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone); + + CallDescriptor* GetStubCallDescriptor( + CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count = 0, + CallDescriptor::DeoptimizationSupport can_deoptimize = + CallDescriptor::kCannotDeoptimize); + static CallDescriptor* GetStubCallDescriptor( + CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone); + + // Creates a call descriptor for simplified C calls that is appropriate + // for the host platform. This simplified calling convention only supports + // integers and pointers of one word size each, i.e. no floating point, + // structs, pointers to members, etc. + static CallDescriptor* GetSimplifiedCDescriptor( + Zone* zone, int num_params, MachineType return_type, + const MachineType* param_types); + + // Get the location of an (incoming) parameter to this function. + LinkageLocation GetParameterLocation(int index) { + return incoming_->GetInputLocation(index + 1); + } + + // Get the location where this function should place its return value. + LinkageLocation GetReturnLocation() { + return incoming_->GetReturnLocation(0); + } + + // Get the frame offset for a given spill slot. The location depends on the + // calling convention and the specific frame layout, and may thus be + // architecture-specific. Negative spill slots indicate arguments on the + // caller's frame. The {extra} parameter indicates an additional offset from + // the frame offset, e.g. to index into part of a double slot. + FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0); + + CompilationInfo* info() const { return info_; } + + private: + CompilationInfo* info_; + CallDescriptor* incoming_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_LINKAGE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/linkage-impl.h nodejs-0.11.15/deps/v8/src/compiler/linkage-impl.h --- nodejs-0.11.13/deps/v8/src/compiler/linkage-impl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/linkage-impl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,206 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_LINKAGE_IMPL_H_ +#define V8_COMPILER_LINKAGE_IMPL_H_ + +namespace v8 { +namespace internal { +namespace compiler { + +class LinkageHelper { + public: + static LinkageLocation TaggedStackSlot(int index) { + DCHECK(index < 0); + return LinkageLocation(kMachineTagged, index); + } + + static LinkageLocation TaggedRegisterLocation(Register reg) { + return LinkageLocation(kMachineTagged, Register::ToAllocationIndex(reg)); + } + + static inline LinkageLocation WordRegisterLocation(Register reg) { + return LinkageLocation(MachineOperatorBuilder::pointer_rep(), + Register::ToAllocationIndex(reg)); + } + + static LinkageLocation UnconstrainedRegister(MachineType rep) { + return LinkageLocation(rep, LinkageLocation::ANY_REGISTER); + } + + static const RegList kNoCalleeSaved = 0; + + // TODO(turbofan): cache call descriptors for JSFunction calls. + template <typename LinkageTraits> + static CallDescriptor* GetJSCallDescriptor(Zone* zone, int parameter_count) { + const int jsfunction_count = 1; + const int context_count = 1; + int input_count = jsfunction_count + parameter_count + context_count; + + const int return_count = 1; + LinkageLocation* locations = + zone->NewArray<LinkageLocation>(return_count + input_count); + + int index = 0; + locations[index++] = + TaggedRegisterLocation(LinkageTraits::ReturnValueReg()); + locations[index++] = + TaggedRegisterLocation(LinkageTraits::JSCallFunctionReg()); + + for (int i = 0; i < parameter_count; i++) { + // All parameters to JS calls go on the stack. + int spill_slot_index = i - parameter_count; + locations[index++] = TaggedStackSlot(spill_slot_index); + } + locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg()); + + // TODO(titzer): refactor TurboFan graph to consider context a value input. + return new (zone) + CallDescriptor(CallDescriptor::kCallJSFunction, // kind + return_count, // return_count + parameter_count, // parameter_count + input_count - context_count, // input_count + locations, // locations + Operator::kNoProperties, // properties + kNoCalleeSaved, // callee-saved registers + CallDescriptor::kCanDeoptimize); // deoptimization + } + + + // TODO(turbofan): cache call descriptors for runtime calls. + template <typename LinkageTraits> + static CallDescriptor* GetRuntimeCallDescriptor( + Zone* zone, Runtime::FunctionId function_id, int parameter_count, + Operator::Property properties, + CallDescriptor::DeoptimizationSupport can_deoptimize) { + const int code_count = 1; + const int function_count = 1; + const int num_args_count = 1; + const int context_count = 1; + const int input_count = code_count + parameter_count + function_count + + num_args_count + context_count; + + const Runtime::Function* function = Runtime::FunctionForId(function_id); + const int return_count = function->result_size; + LinkageLocation* locations = + zone->NewArray<LinkageLocation>(return_count + input_count); + + int index = 0; + if (return_count > 0) { + locations[index++] = + TaggedRegisterLocation(LinkageTraits::ReturnValueReg()); + } + if (return_count > 1) { + locations[index++] = + TaggedRegisterLocation(LinkageTraits::ReturnValue2Reg()); + } + + DCHECK_LE(return_count, 2); + + locations[index++] = UnconstrainedRegister(kMachineTagged); // CEntryStub + + for (int i = 0; i < parameter_count; i++) { + // All parameters to runtime calls go on the stack. + int spill_slot_index = i - parameter_count; + locations[index++] = TaggedStackSlot(spill_slot_index); + } + locations[index++] = + TaggedRegisterLocation(LinkageTraits::RuntimeCallFunctionReg()); + locations[index++] = + WordRegisterLocation(LinkageTraits::RuntimeCallArgCountReg()); + locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg()); + + // TODO(titzer): refactor TurboFan graph to consider context a value input. + return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject, // kind + return_count, // return_count + parameter_count, // parameter_count + input_count, // input_count + locations, // locations + properties, // properties + kNoCalleeSaved, // callee-saved registers + can_deoptimize, // deoptimization + function->name); + } + + + // TODO(turbofan): cache call descriptors for code stub calls. + template <typename LinkageTraits> + static CallDescriptor* GetStubCallDescriptor( + Zone* zone, CodeStubInterfaceDescriptor* descriptor, + int stack_parameter_count, + CallDescriptor::DeoptimizationSupport can_deoptimize) { + int register_parameter_count = descriptor->GetEnvironmentParameterCount(); + int parameter_count = register_parameter_count + stack_parameter_count; + const int code_count = 1; + const int context_count = 1; + int input_count = code_count + parameter_count + context_count; + + const int return_count = 1; + LinkageLocation* locations = + zone->NewArray<LinkageLocation>(return_count + input_count); + + int index = 0; + locations[index++] = + TaggedRegisterLocation(LinkageTraits::ReturnValueReg()); + locations[index++] = UnconstrainedRegister(kMachineTagged); // code + for (int i = 0; i < parameter_count; i++) { + if (i < register_parameter_count) { + // The first parameters to code stub calls go in registers. + Register reg = descriptor->GetEnvironmentParameterRegister(i); + locations[index++] = TaggedRegisterLocation(reg); + } else { + // The rest of the parameters go on the stack. + int stack_slot = i - register_parameter_count - stack_parameter_count; + locations[index++] = TaggedStackSlot(stack_slot); + } + } + locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg()); + + // TODO(titzer): refactor TurboFan graph to consider context a value input. + return new (zone) + CallDescriptor(CallDescriptor::kCallCodeObject, // kind + return_count, // return_count + parameter_count, // parameter_count + input_count, // input_count + locations, // locations + Operator::kNoProperties, // properties + kNoCalleeSaved, // callee-saved registers + can_deoptimize, // deoptimization + CodeStub::MajorName(descriptor->MajorKey(), false)); + } + + + template <typename LinkageTraits> + static CallDescriptor* GetSimplifiedCDescriptor( + Zone* zone, int num_params, MachineType return_type, + const MachineType* param_types) { + LinkageLocation* locations = + zone->NewArray<LinkageLocation>(num_params + 2); + int index = 0; + locations[index++] = + TaggedRegisterLocation(LinkageTraits::ReturnValueReg()); + locations[index++] = LinkageHelper::UnconstrainedRegister( + MachineOperatorBuilder::pointer_rep()); + // TODO(dcarney): test with lots of parameters. + int i = 0; + for (; i < LinkageTraits::CRegisterParametersLength() && i < num_params; + i++) { + locations[index++] = LinkageLocation( + param_types[i], + Register::ToAllocationIndex(LinkageTraits::CRegisterParameter(i))); + } + for (; i < num_params; i++) { + locations[index++] = LinkageLocation(param_types[i], -1 - i); + } + return new (zone) CallDescriptor( + CallDescriptor::kCallAddress, 1, num_params, num_params + 1, locations, + Operator::kNoProperties, LinkageTraits::CCalleeSaveRegisters(), + CallDescriptor::kCannotDeoptimize); // TODO(jarin) should deoptimize! + } +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_LINKAGE_IMPL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/lowering-builder.cc nodejs-0.11.15/deps/v8/src/compiler/lowering-builder.cc --- nodejs-0.11.13/deps/v8/src/compiler/lowering-builder.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/lowering-builder.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,45 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/graph-inl.h" +#include "src/compiler/lowering-builder.h" +#include "src/compiler/node-aux-data-inl.h" +#include "src/compiler/node-properties-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class LoweringBuilder::NodeVisitor : public NullNodeVisitor { + public: + explicit NodeVisitor(LoweringBuilder* lowering) : lowering_(lowering) {} + + GenericGraphVisit::Control Post(Node* node) { + if (lowering_->source_positions_ != NULL) { + SourcePositionTable::Scope pos(lowering_->source_positions_, node); + lowering_->Lower(node); + } else { + lowering_->Lower(node); + } + return GenericGraphVisit::CONTINUE; + } + + private: + LoweringBuilder* lowering_; +}; + + +LoweringBuilder::LoweringBuilder(Graph* graph, + SourcePositionTable* source_positions) + : graph_(graph), source_positions_(source_positions) {} + + +void LoweringBuilder::LowerAllNodes() { + NodeVisitor visitor(this); + graph()->VisitNodeInputsFromEnd(&visitor); +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/lowering-builder.h nodejs-0.11.15/deps/v8/src/compiler/lowering-builder.h --- nodejs-0.11.13/deps/v8/src/compiler/lowering-builder.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/lowering-builder.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,38 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_LOWERING_BUILDER_H_ +#define V8_COMPILER_LOWERING_BUILDER_H_ + +#include "src/v8.h" + +#include "src/compiler/graph.h" + + +namespace v8 { +namespace internal { +namespace compiler { + +// TODO(dcarney): rename this class. +class LoweringBuilder { + public: + explicit LoweringBuilder(Graph* graph, SourcePositionTable* source_positions); + virtual ~LoweringBuilder() {} + + void LowerAllNodes(); + virtual void Lower(Node* node) = 0; // Exposed for testing. + + Graph* graph() const { return graph_; } + + private: + class NodeVisitor; + Graph* graph_; + SourcePositionTable* source_positions_; +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_LOWERING_BUILDER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/machine-node-factory.h nodejs-0.11.15/deps/v8/src/compiler/machine-node-factory.h --- nodejs-0.11.13/deps/v8/src/compiler/machine-node-factory.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/machine-node-factory.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,381 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_MACHINE_NODE_FACTORY_H_ +#define V8_COMPILER_MACHINE_NODE_FACTORY_H_ + +#ifdef USE_SIMULATOR +#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 0 +#else +#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 1 +#endif + +#include "src/v8.h" + +#include "src/compiler/machine-operator.h" +#include "src/compiler/node.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class MachineCallDescriptorBuilder : public ZoneObject { + public: + MachineCallDescriptorBuilder(MachineType return_type, int parameter_count, + const MachineType* parameter_types) + : return_type_(return_type), + parameter_count_(parameter_count), + parameter_types_(parameter_types) {} + + int parameter_count() const { return parameter_count_; } + const MachineType* parameter_types() const { return parameter_types_; } + + CallDescriptor* BuildCallDescriptor(Zone* zone) { + return Linkage::GetSimplifiedCDescriptor(zone, parameter_count_, + return_type_, parameter_types_); + } + + private: + const MachineType return_type_; + const int parameter_count_; + const MachineType* const parameter_types_; +}; + + +#define ZONE() static_cast<NodeFactory*>(this)->zone() +#define COMMON() static_cast<NodeFactory*>(this)->common() +#define MACHINE() static_cast<NodeFactory*>(this)->machine() +#define NEW_NODE_0(op) static_cast<NodeFactory*>(this)->NewNode(op) +#define NEW_NODE_1(op, a) static_cast<NodeFactory*>(this)->NewNode(op, a) +#define NEW_NODE_2(op, a, b) static_cast<NodeFactory*>(this)->NewNode(op, a, b) +#define NEW_NODE_3(op, a, b, c) \ + static_cast<NodeFactory*>(this)->NewNode(op, a, b, c) + +template <typename NodeFactory> +class MachineNodeFactory { + public: + // Constants. + Node* PointerConstant(void* value) { + return IntPtrConstant(reinterpret_cast<intptr_t>(value)); + } + Node* IntPtrConstant(intptr_t value) { + // TODO(dcarney): mark generated code as unserializable if value != 0. + return kPointerSize == 8 ? Int64Constant(value) + : Int32Constant(static_cast<int>(value)); + } + Node* Int32Constant(int32_t value) { + return NEW_NODE_0(COMMON()->Int32Constant(value)); + } + Node* Int64Constant(int64_t value) { + return NEW_NODE_0(COMMON()->Int64Constant(value)); + } + Node* NumberConstant(double value) { + return NEW_NODE_0(COMMON()->NumberConstant(value)); + } + Node* Float64Constant(double value) { + return NEW_NODE_0(COMMON()->Float64Constant(value)); + } + Node* HeapConstant(Handle<Object> object) { + PrintableUnique<Object> val = + PrintableUnique<Object>::CreateUninitialized(ZONE(), object); + return NEW_NODE_0(COMMON()->HeapConstant(val)); + } + + Node* Projection(int index, Node* a) { + return NEW_NODE_1(COMMON()->Projection(index), a); + } + + // Memory Operations. + Node* Load(MachineType rep, Node* base) { + return Load(rep, base, Int32Constant(0)); + } + Node* Load(MachineType rep, Node* base, Node* index) { + return NEW_NODE_2(MACHINE()->Load(rep), base, index); + } + void Store(MachineType rep, Node* base, Node* value) { + Store(rep, base, Int32Constant(0), value); + } + void Store(MachineType rep, Node* base, Node* index, Node* value) { + NEW_NODE_3(MACHINE()->Store(rep, kNoWriteBarrier), base, index, value); + } + // Arithmetic Operations. + Node* WordAnd(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->WordAnd(), a, b); + } + Node* WordOr(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->WordOr(), a, b); + } + Node* WordXor(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->WordXor(), a, b); + } + Node* WordShl(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->WordShl(), a, b); + } + Node* WordShr(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->WordShr(), a, b); + } + Node* WordSar(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->WordSar(), a, b); + } + Node* WordEqual(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->WordEqual(), a, b); + } + Node* WordNotEqual(Node* a, Node* b) { + return WordBinaryNot(WordEqual(a, b)); + } + Node* WordNot(Node* a) { + if (MACHINE()->is32()) { + return Word32Not(a); + } else { + return Word64Not(a); + } + } + Node* WordBinaryNot(Node* a) { + if (MACHINE()->is32()) { + return Word32BinaryNot(a); + } else { + return Word64BinaryNot(a); + } + } + + Node* Word32And(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word32And(), a, b); + } + Node* Word32Or(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word32Or(), a, b); + } + Node* Word32Xor(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word32Xor(), a, b); + } + Node* Word32Shl(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word32Shl(), a, b); + } + Node* Word32Shr(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word32Shr(), a, b); + } + Node* Word32Sar(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word32Sar(), a, b); + } + Node* Word32Equal(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word32Equal(), a, b); + } + Node* Word32NotEqual(Node* a, Node* b) { + return Word32BinaryNot(Word32Equal(a, b)); + } + Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); } + Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); } + + Node* Word64And(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word64And(), a, b); + } + Node* Word64Or(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word64Or(), a, b); + } + Node* Word64Xor(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word64Xor(), a, b); + } + Node* Word64Shl(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word64Shl(), a, b); + } + Node* Word64Shr(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word64Shr(), a, b); + } + Node* Word64Sar(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word64Sar(), a, b); + } + Node* Word64Equal(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Word64Equal(), a, b); + } + Node* Word64NotEqual(Node* a, Node* b) { + return Word64BinaryNot(Word64Equal(a, b)); + } + Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); } + Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); } + + Node* Int32Add(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32Add(), a, b); + } + Node* Int32AddWithOverflow(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32AddWithOverflow(), a, b); + } + Node* Int32Sub(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32Sub(), a, b); + } + Node* Int32SubWithOverflow(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32SubWithOverflow(), a, b); + } + Node* Int32Mul(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32Mul(), a, b); + } + Node* Int32Div(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32Div(), a, b); + } + Node* Int32UDiv(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32UDiv(), a, b); + } + Node* Int32Mod(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32Mod(), a, b); + } + Node* Int32UMod(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32UMod(), a, b); + } + Node* Int32LessThan(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32LessThan(), a, b); + } + Node* Int32LessThanOrEqual(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int32LessThanOrEqual(), a, b); + } + Node* Uint32LessThan(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Uint32LessThan(), a, b); + } + Node* Uint32LessThanOrEqual(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Uint32LessThanOrEqual(), a, b); + } + Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); } + Node* Int32GreaterThanOrEqual(Node* a, Node* b) { + return Int32LessThanOrEqual(b, a); + } + Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); } + + Node* Int64Add(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int64Add(), a, b); + } + Node* Int64Sub(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int64Sub(), a, b); + } + Node* Int64Mul(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int64Mul(), a, b); + } + Node* Int64Div(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int64Div(), a, b); + } + Node* Int64UDiv(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int64UDiv(), a, b); + } + Node* Int64Mod(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int64Mod(), a, b); + } + Node* Int64UMod(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int64UMod(), a, b); + } + Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); } + Node* Int64LessThan(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int64LessThan(), a, b); + } + Node* Int64LessThanOrEqual(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Int64LessThanOrEqual(), a, b); + } + Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); } + Node* Int64GreaterThanOrEqual(Node* a, Node* b) { + return Int64LessThanOrEqual(b, a); + } + + Node* ConvertIntPtrToInt32(Node* a) { + return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a) + : a; + } + Node* ConvertInt32ToIntPtr(Node* a) { + return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a) + : a; + } + +#define INTPTR_BINOP(prefix, name) \ + Node* IntPtr##name(Node* a, Node* b) { \ + return kPointerSize == 8 ? prefix##64##name(a, b) \ + : prefix##32##name(a, b); \ + } + + INTPTR_BINOP(Int, Add); + INTPTR_BINOP(Int, Sub); + INTPTR_BINOP(Int, LessThan); + INTPTR_BINOP(Int, LessThanOrEqual); + INTPTR_BINOP(Word, Equal); + INTPTR_BINOP(Word, NotEqual); + INTPTR_BINOP(Int, GreaterThanOrEqual); + INTPTR_BINOP(Int, GreaterThan); + +#undef INTPTR_BINOP + + Node* Float64Add(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Float64Add(), a, b); + } + Node* Float64Sub(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Float64Sub(), a, b); + } + Node* Float64Mul(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Float64Mul(), a, b); + } + Node* Float64Div(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Float64Div(), a, b); + } + Node* Float64Mod(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Float64Mod(), a, b); + } + Node* Float64Equal(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Float64Equal(), a, b); + } + Node* Float64NotEqual(Node* a, Node* b) { + return WordBinaryNot(Float64Equal(a, b)); + } + Node* Float64LessThan(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Float64LessThan(), a, b); + } + Node* Float64LessThanOrEqual(Node* a, Node* b) { + return NEW_NODE_2(MACHINE()->Float64LessThanOrEqual(), a, b); + } + Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); } + Node* Float64GreaterThanOrEqual(Node* a, Node* b) { + return Float64LessThanOrEqual(b, a); + } + + // Conversions. + Node* ConvertInt32ToInt64(Node* a) { + return NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a); + } + Node* ConvertInt64ToInt32(Node* a) { + return NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a); + } + Node* ChangeInt32ToFloat64(Node* a) { + return NEW_NODE_1(MACHINE()->ChangeInt32ToFloat64(), a); + } + Node* ChangeUint32ToFloat64(Node* a) { + return NEW_NODE_1(MACHINE()->ChangeUint32ToFloat64(), a); + } + Node* ChangeFloat64ToInt32(Node* a) { + return NEW_NODE_1(MACHINE()->ChangeFloat64ToInt32(), a); + } + Node* ChangeFloat64ToUint32(Node* a) { + return NEW_NODE_1(MACHINE()->ChangeFloat64ToUint32(), a); + } + +#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C + // Call to C. + Node* CallC(Node* function_address, MachineType return_type, + MachineType* arg_types, Node** args, int n_args) { + CallDescriptor* descriptor = Linkage::GetSimplifiedCDescriptor( + ZONE(), n_args, return_type, arg_types); + Node** passed_args = + static_cast<Node**>(alloca((n_args + 1) * sizeof(args[0]))); + passed_args[0] = function_address; + for (int i = 0; i < n_args; ++i) { + passed_args[i + 1] = args[i]; + } + return NEW_NODE_2(COMMON()->Call(descriptor), n_args + 1, passed_args); + } +#endif +}; + +#undef NEW_NODE_0 +#undef NEW_NODE_1 +#undef NEW_NODE_2 +#undef NEW_NODE_3 +#undef MACHINE +#undef COMMON +#undef ZONE + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_MACHINE_NODE_FACTORY_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/machine-operator.h nodejs-0.11.15/deps/v8/src/compiler/machine-operator.h --- nodejs-0.11.13/deps/v8/src/compiler/machine-operator.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/machine-operator.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,168 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_MACHINE_OPERATOR_H_ +#define V8_COMPILER_MACHINE_OPERATOR_H_ + +#include "src/compiler/machine-type.h" +#include "src/compiler/opcodes.h" +#include "src/compiler/operator.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// TODO(turbofan): other write barriers are possible based on type +enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier }; + + +// A Store needs a MachineType and a WriteBarrierKind +// in order to emit the correct write barrier. +struct StoreRepresentation { + MachineType rep; + WriteBarrierKind write_barrier_kind; +}; + + +// Interface for building machine-level operators. These operators are +// machine-level but machine-independent and thus define a language suitable +// for generating code to run on architectures such as ia32, x64, arm, etc. +class MachineOperatorBuilder { + public: + explicit MachineOperatorBuilder(Zone* zone, MachineType word = pointer_rep()) + : zone_(zone), word_(word) { + CHECK(word == kMachineWord32 || word == kMachineWord64); + } + +#define SIMPLE(name, properties, inputs, outputs) \ + return new (zone_) \ + SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name); + +#define OP1(name, ptype, pname, properties, inputs, outputs) \ + return new (zone_) \ + Operator1<ptype>(IrOpcode::k##name, properties | Operator::kNoThrow, \ + inputs, outputs, #name, pname) + +#define BINOP(name) SIMPLE(name, Operator::kPure, 2, 1) +#define BINOP_O(name) SIMPLE(name, Operator::kPure, 2, 2) +#define BINOP_C(name) \ + SIMPLE(name, Operator::kCommutative | Operator::kPure, 2, 1) +#define BINOP_AC(name) \ + SIMPLE(name, \ + Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \ + 1) +#define BINOP_ACO(name) \ + SIMPLE(name, \ + Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \ + 2) +#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1) + +#define WORD_SIZE(x) return is64() ? Word64##x() : Word32##x() + + Operator* Load(MachineType rep) { // load [base + index] + OP1(Load, MachineType, rep, Operator::kNoWrite, 2, 1); + } + // store [base + index], value + Operator* Store(MachineType rep, WriteBarrierKind kind) { + StoreRepresentation store_rep = {rep, kind}; + OP1(Store, StoreRepresentation, store_rep, Operator::kNoRead, 3, 0); + } + + Operator* WordAnd() { WORD_SIZE(And); } + Operator* WordOr() { WORD_SIZE(Or); } + Operator* WordXor() { WORD_SIZE(Xor); } + Operator* WordShl() { WORD_SIZE(Shl); } + Operator* WordShr() { WORD_SIZE(Shr); } + Operator* WordSar() { WORD_SIZE(Sar); } + Operator* WordEqual() { WORD_SIZE(Equal); } + + Operator* Word32And() { BINOP_AC(Word32And); } + Operator* Word32Or() { BINOP_AC(Word32Or); } + Operator* Word32Xor() { BINOP_AC(Word32Xor); } + Operator* Word32Shl() { BINOP(Word32Shl); } + Operator* Word32Shr() { BINOP(Word32Shr); } + Operator* Word32Sar() { BINOP(Word32Sar); } + Operator* Word32Equal() { BINOP_C(Word32Equal); } + + Operator* Word64And() { BINOP_AC(Word64And); } + Operator* Word64Or() { BINOP_AC(Word64Or); } + Operator* Word64Xor() { BINOP_AC(Word64Xor); } + Operator* Word64Shl() { BINOP(Word64Shl); } + Operator* Word64Shr() { BINOP(Word64Shr); } + Operator* Word64Sar() { BINOP(Word64Sar); } + Operator* Word64Equal() { BINOP_C(Word64Equal); } + + Operator* Int32Add() { BINOP_AC(Int32Add); } + Operator* Int32AddWithOverflow() { BINOP_ACO(Int32AddWithOverflow); } + Operator* Int32Sub() { BINOP(Int32Sub); } + Operator* Int32SubWithOverflow() { BINOP_O(Int32SubWithOverflow); } + Operator* Int32Mul() { BINOP_AC(Int32Mul); } + Operator* Int32Div() { BINOP(Int32Div); } + Operator* Int32UDiv() { BINOP(Int32UDiv); } + Operator* Int32Mod() { BINOP(Int32Mod); } + Operator* Int32UMod() { BINOP(Int32UMod); } + Operator* Int32LessThan() { BINOP(Int32LessThan); } + Operator* Int32LessThanOrEqual() { BINOP(Int32LessThanOrEqual); } + Operator* Uint32LessThan() { BINOP(Uint32LessThan); } + Operator* Uint32LessThanOrEqual() { BINOP(Uint32LessThanOrEqual); } + + Operator* Int64Add() { BINOP_AC(Int64Add); } + Operator* Int64Sub() { BINOP(Int64Sub); } + Operator* Int64Mul() { BINOP_AC(Int64Mul); } + Operator* Int64Div() { BINOP(Int64Div); } + Operator* Int64UDiv() { BINOP(Int64UDiv); } + Operator* Int64Mod() { BINOP(Int64Mod); } + Operator* Int64UMod() { BINOP(Int64UMod); } + Operator* Int64LessThan() { BINOP(Int64LessThan); } + Operator* Int64LessThanOrEqual() { BINOP(Int64LessThanOrEqual); } + + Operator* ConvertInt32ToInt64() { UNOP(ConvertInt32ToInt64); } + Operator* ConvertInt64ToInt32() { UNOP(ConvertInt64ToInt32); } + + // Convert representation of integers between float64 and int32/uint32. + // The precise rounding mode and handling of out of range inputs are *not* + // defined for these operators, since they are intended only for use with + // integers. + // TODO(titzer): rename ConvertXXX to ChangeXXX in machine operators. + Operator* ChangeInt32ToFloat64() { UNOP(ChangeInt32ToFloat64); } + Operator* ChangeUint32ToFloat64() { UNOP(ChangeUint32ToFloat64); } + Operator* ChangeFloat64ToInt32() { UNOP(ChangeFloat64ToInt32); } + Operator* ChangeFloat64ToUint32() { UNOP(ChangeFloat64ToUint32); } + + // Floating point operators always operate with IEEE 754 round-to-nearest. + Operator* Float64Add() { BINOP_C(Float64Add); } + Operator* Float64Sub() { BINOP(Float64Sub); } + Operator* Float64Mul() { BINOP_C(Float64Mul); } + Operator* Float64Div() { BINOP(Float64Div); } + Operator* Float64Mod() { BINOP(Float64Mod); } + + // Floating point comparisons complying to IEEE 754. + Operator* Float64Equal() { BINOP_C(Float64Equal); } + Operator* Float64LessThan() { BINOP(Float64LessThan); } + Operator* Float64LessThanOrEqual() { BINOP(Float64LessThanOrEqual); } + + inline bool is32() const { return word_ == kMachineWord32; } + inline bool is64() const { return word_ == kMachineWord64; } + inline MachineType word() const { return word_; } + + static inline MachineType pointer_rep() { + return kPointerSize == 8 ? kMachineWord64 : kMachineWord32; + } + +#undef WORD_SIZE +#undef UNOP +#undef BINOP +#undef OP1 +#undef SIMPLE + + private: + Zone* zone_; + MachineType word_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_MACHINE_OPERATOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/machine-operator-reducer.cc nodejs-0.11.15/deps/v8/src/compiler/machine-operator-reducer.cc --- nodejs-0.11.13/deps/v8/src/compiler/machine-operator-reducer.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/machine-operator-reducer.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,343 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/machine-operator-reducer.h" + +#include "src/compiler/common-node-cache.h" +#include "src/compiler/generic-node-inl.h" +#include "src/compiler/graph.h" +#include "src/compiler/node-matchers.h" + +namespace v8 { +namespace internal { +namespace compiler { + +MachineOperatorReducer::MachineOperatorReducer(Graph* graph) + : graph_(graph), + cache_(new (graph->zone()) CommonNodeCache(graph->zone())), + common_(graph->zone()), + machine_(graph->zone()) {} + + +MachineOperatorReducer::MachineOperatorReducer(Graph* graph, + CommonNodeCache* cache) + : graph_(graph), + cache_(cache), + common_(graph->zone()), + machine_(graph->zone()) {} + + +Node* MachineOperatorReducer::Int32Constant(int32_t value) { + Node** loc = cache_->FindInt32Constant(value); + if (*loc == NULL) { + *loc = graph_->NewNode(common_.Int32Constant(value)); + } + return *loc; +} + + +Node* MachineOperatorReducer::Float64Constant(volatile double value) { + Node** loc = cache_->FindFloat64Constant(value); + if (*loc == NULL) { + *loc = graph_->NewNode(common_.Float64Constant(value)); + } + return *loc; +} + + +// Perform constant folding and strength reduction on machine operators. +Reduction MachineOperatorReducer::Reduce(Node* node) { + switch (node->opcode()) { + case IrOpcode::kWord32And: { + Int32BinopMatcher m(node); + if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0 + if (m.right().Is(-1)) return Replace(m.left().node()); // x & -1 => x + if (m.IsFoldable()) { // K & K => K + return ReplaceInt32(m.left().Value() & m.right().Value()); + } + if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x + break; + } + case IrOpcode::kWord32Or: { + Int32BinopMatcher m(node); + if (m.right().Is(0)) return Replace(m.left().node()); // x | 0 => x + if (m.right().Is(-1)) return Replace(m.right().node()); // x | -1 => -1 + if (m.IsFoldable()) { // K | K => K + return ReplaceInt32(m.left().Value() | m.right().Value()); + } + if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x + break; + } + case IrOpcode::kWord32Xor: { + Int32BinopMatcher m(node); + if (m.right().Is(0)) return Replace(m.left().node()); // x ^ 0 => x + if (m.IsFoldable()) { // K ^ K => K + return ReplaceInt32(m.left().Value() ^ m.right().Value()); + } + if (m.LeftEqualsRight()) return ReplaceInt32(0); // x ^ x => 0 + break; + } + case IrOpcode::kWord32Shl: { + Int32BinopMatcher m(node); + if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x + if (m.IsFoldable()) { // K << K => K + return ReplaceInt32(m.left().Value() << m.right().Value()); + } + break; + } + case IrOpcode::kWord32Shr: { + Uint32BinopMatcher m(node); + if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x + if (m.IsFoldable()) { // K >>> K => K + return ReplaceInt32(m.left().Value() >> m.right().Value()); + } + break; + } + case IrOpcode::kWord32Sar: { + Int32BinopMatcher m(node); + if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x + if (m.IsFoldable()) { // K >> K => K + return ReplaceInt32(m.left().Value() >> m.right().Value()); + } + break; + } + case IrOpcode::kWord32Equal: { + Int32BinopMatcher m(node); + if (m.IsFoldable()) { // K == K => K + return ReplaceBool(m.left().Value() == m.right().Value()); + } + if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y == 0 => x == y + Int32BinopMatcher msub(m.left().node()); + node->ReplaceInput(0, msub.left().node()); + node->ReplaceInput(1, msub.right().node()); + return Changed(node); + } + // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares + if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true + break; + } + case IrOpcode::kInt32Add: { + Int32BinopMatcher m(node); + if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => x + if (m.IsFoldable()) { // K + K => K + return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) + + static_cast<uint32_t>(m.right().Value())); + } + break; + } + case IrOpcode::kInt32Sub: { + Int32BinopMatcher m(node); + if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x + if (m.IsFoldable()) { // K - K => K + return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) - + static_cast<uint32_t>(m.right().Value())); + } + if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0 + break; + } + case IrOpcode::kInt32Mul: { + Int32BinopMatcher m(node); + if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0 + if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x + if (m.IsFoldable()) { // K * K => K + return ReplaceInt32(m.left().Value() * m.right().Value()); + } + if (m.right().Is(-1)) { // x * -1 => 0 - x + graph_->ChangeOperator(node, machine_.Int32Sub()); + node->ReplaceInput(0, Int32Constant(0)); + node->ReplaceInput(1, m.left().node()); + return Changed(node); + } + if (m.right().IsPowerOf2()) { // x * 2^n => x << n + graph_->ChangeOperator(node, machine_.Word32Shl()); + node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value()))); + return Changed(node); + } + break; + } + case IrOpcode::kInt32Div: { + Int32BinopMatcher m(node); + if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x + // TODO(turbofan): if (m.left().Is(0)) + // TODO(turbofan): if (m.right().IsPowerOf2()) + // TODO(turbofan): if (m.right().Is(0)) + // TODO(turbofan): if (m.LeftEqualsRight()) + if (m.IsFoldable() && !m.right().Is(0)) { // K / K => K + if (m.right().Is(-1)) return ReplaceInt32(-m.left().Value()); + return ReplaceInt32(m.left().Value() / m.right().Value()); + } + if (m.right().Is(-1)) { // x / -1 => 0 - x + graph_->ChangeOperator(node, machine_.Int32Sub()); + node->ReplaceInput(0, Int32Constant(0)); + node->ReplaceInput(1, m.left().node()); + return Changed(node); + } + break; + } + case IrOpcode::kInt32UDiv: { + Uint32BinopMatcher m(node); + if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x + // TODO(turbofan): if (m.left().Is(0)) + // TODO(turbofan): if (m.right().Is(0)) + // TODO(turbofan): if (m.LeftEqualsRight()) + if (m.IsFoldable() && !m.right().Is(0)) { // K / K => K + return ReplaceInt32(m.left().Value() / m.right().Value()); + } + if (m.right().IsPowerOf2()) { // x / 2^n => x >> n + graph_->ChangeOperator(node, machine_.Word32Shr()); + node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value()))); + return Changed(node); + } + break; + } + case IrOpcode::kInt32Mod: { + Int32BinopMatcher m(node); + if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0 + if (m.right().Is(-1)) return ReplaceInt32(0); // x % -1 => 0 + // TODO(turbofan): if (m.left().Is(0)) + // TODO(turbofan): if (m.right().IsPowerOf2()) + // TODO(turbofan): if (m.right().Is(0)) + // TODO(turbofan): if (m.LeftEqualsRight()) + if (m.IsFoldable() && !m.right().Is(0)) { // K % K => K + return ReplaceInt32(m.left().Value() % m.right().Value()); + } + break; + } + case IrOpcode::kInt32UMod: { + Uint32BinopMatcher m(node); + if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0 + // TODO(turbofan): if (m.left().Is(0)) + // TODO(turbofan): if (m.right().Is(0)) + // TODO(turbofan): if (m.LeftEqualsRight()) + if (m.IsFoldable() && !m.right().Is(0)) { // K % K => K + return ReplaceInt32(m.left().Value() % m.right().Value()); + } + if (m.right().IsPowerOf2()) { // x % 2^n => x & 2^n-1 + graph_->ChangeOperator(node, machine_.Word32And()); + node->ReplaceInput(1, Int32Constant(m.right().Value() - 1)); + return Changed(node); + } + break; + } + case IrOpcode::kInt32LessThan: { + Int32BinopMatcher m(node); + if (m.IsFoldable()) { // K < K => K + return ReplaceBool(m.left().Value() < m.right().Value()); + } + if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y < 0 => x < y + Int32BinopMatcher msub(m.left().node()); + node->ReplaceInput(0, msub.left().node()); + node->ReplaceInput(1, msub.right().node()); + return Changed(node); + } + if (m.left().Is(0) && m.right().IsInt32Sub()) { // 0 < x - y => y < x + Int32BinopMatcher msub(m.right().node()); + node->ReplaceInput(0, msub.right().node()); + node->ReplaceInput(1, msub.left().node()); + return Changed(node); + } + if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false + break; + } + case IrOpcode::kInt32LessThanOrEqual: { + Int32BinopMatcher m(node); + if (m.IsFoldable()) { // K <= K => K + return ReplaceBool(m.left().Value() <= m.right().Value()); + } + if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y <= 0 => x <= y + Int32BinopMatcher msub(m.left().node()); + node->ReplaceInput(0, msub.left().node()); + node->ReplaceInput(1, msub.right().node()); + return Changed(node); + } + if (m.left().Is(0) && m.right().IsInt32Sub()) { // 0 <= x - y => y <= x + Int32BinopMatcher msub(m.right().node()); + node->ReplaceInput(0, msub.right().node()); + node->ReplaceInput(1, msub.left().node()); + return Changed(node); + } + if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true + break; + } + case IrOpcode::kUint32LessThan: { + Uint32BinopMatcher m(node); + if (m.left().Is(kMaxUInt32)) return ReplaceBool(false); // M < x => false + if (m.right().Is(0)) return ReplaceBool(false); // x < 0 => false + if (m.IsFoldable()) { // K < K => K + return ReplaceBool(m.left().Value() < m.right().Value()); + } + if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false + break; + } + case IrOpcode::kUint32LessThanOrEqual: { + Uint32BinopMatcher m(node); + if (m.left().Is(0)) return ReplaceBool(true); // 0 <= x => true + if (m.right().Is(kMaxUInt32)) return ReplaceBool(true); // x <= M => true + if (m.IsFoldable()) { // K <= K => K + return ReplaceBool(m.left().Value() <= m.right().Value()); + } + if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true + break; + } + case IrOpcode::kFloat64Add: { + Float64BinopMatcher m(node); + if (m.IsFoldable()) { // K + K => K + return ReplaceFloat64(m.left().Value() + m.right().Value()); + } + break; + } + case IrOpcode::kFloat64Sub: { + Float64BinopMatcher m(node); + if (m.IsFoldable()) { // K - K => K + return ReplaceFloat64(m.left().Value() - m.right().Value()); + } + break; + } + case IrOpcode::kFloat64Mul: { + Float64BinopMatcher m(node); + if (m.right().Is(1)) return Replace(m.left().node()); // x * 1.0 => x + if (m.right().IsNaN()) { // x * NaN => NaN + return Replace(m.right().node()); + } + if (m.IsFoldable()) { // K * K => K + return ReplaceFloat64(m.left().Value() * m.right().Value()); + } + break; + } + case IrOpcode::kFloat64Div: { + Float64BinopMatcher m(node); + if (m.right().Is(1)) return Replace(m.left().node()); // x / 1.0 => x + if (m.right().IsNaN()) { // x / NaN => NaN + return Replace(m.right().node()); + } + if (m.left().IsNaN()) { // NaN / x => NaN + return Replace(m.left().node()); + } + if (m.IsFoldable()) { // K / K => K + return ReplaceFloat64(m.left().Value() / m.right().Value()); + } + break; + } + case IrOpcode::kFloat64Mod: { + Float64BinopMatcher m(node); + if (m.right().IsNaN()) { // x % NaN => NaN + return Replace(m.right().node()); + } + if (m.left().IsNaN()) { // NaN % x => NaN + return Replace(m.left().node()); + } + if (m.IsFoldable()) { // K % K => K + return ReplaceFloat64(modulo(m.left().Value(), m.right().Value())); + } + break; + } + // TODO(turbofan): strength-reduce and fold floating point operations. + default: + break; + } + return NoChange(); +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/machine-operator-reducer.h nodejs-0.11.15/deps/v8/src/compiler/machine-operator-reducer.h --- nodejs-0.11.13/deps/v8/src/compiler/machine-operator-reducer.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/machine-operator-reducer.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,52 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_ +#define V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_ + +#include "src/compiler/common-operator.h" +#include "src/compiler/graph-reducer.h" +#include "src/compiler/machine-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Forward declarations. +class CommonNodeCache; + +// Performs constant folding and strength reduction on nodes that have +// machine operators. +class MachineOperatorReducer : public Reducer { + public: + explicit MachineOperatorReducer(Graph* graph); + + MachineOperatorReducer(Graph* graph, CommonNodeCache* cache); + + virtual Reduction Reduce(Node* node); + + private: + Graph* graph_; + CommonNodeCache* cache_; + CommonOperatorBuilder common_; + MachineOperatorBuilder machine_; + + Node* Int32Constant(int32_t value); + Node* Float64Constant(volatile double value); + + Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); } + + Reduction ReplaceInt32(int32_t value) { + return Replace(Int32Constant(value)); + } + + Reduction ReplaceFloat64(volatile double value) { + return Replace(Float64Constant(value)); + } +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/machine-type.h nodejs-0.11.15/deps/v8/src/compiler/machine-type.h --- nodejs-0.11.13/deps/v8/src/compiler/machine-type.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/machine-type.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,36 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_MACHINE_TYPE_H_ +#define V8_COMPILER_MACHINE_TYPE_H_ + +namespace v8 { +namespace internal { +namespace compiler { + +// An enumeration of the storage representations at the machine level. +// - Words are uninterpreted bits of a given fixed size that can be used +// to store integers and pointers. They are normally allocated to general +// purpose registers by the backend and are not tracked for GC. +// - Floats are bits of a given fixed size that are used to store floating +// point numbers. They are normally allocated to the floating point +// registers of the machine and are not tracked for the GC. +// - Tagged values are the size of a reference into the heap and can store +// small words or references into the heap using a language and potentially +// machine-dependent tagging scheme. These values are tracked by the code +// generator for precise GC. +enum MachineType { + kMachineWord8, + kMachineWord16, + kMachineWord32, + kMachineWord64, + kMachineFloat64, + kMachineTagged, + kMachineLast +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_MACHINE_TYPE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/node-aux-data.h nodejs-0.11.15/deps/v8/src/compiler/node-aux-data.h --- nodejs-0.11.13/deps/v8/src/compiler/node-aux-data.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/node-aux-data.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,38 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_NODE_AUX_DATA_H_ +#define V8_COMPILER_NODE_AUX_DATA_H_ + +#include <vector> + +#include "src/zone-allocator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Forward declarations. +class Graph; +class Node; + +template <class T> +class NodeAuxData { + public: + inline explicit NodeAuxData(Zone* zone); + + inline void Set(Node* node, const T& data); + inline T Get(Node* node); + + private: + typedef zone_allocator<T> ZoneAllocator; + typedef std::vector<T, ZoneAllocator> TZoneVector; + + TZoneVector aux_data_; +}; +} +} +} // namespace v8::internal::compiler + +#endif diff -Nru nodejs-0.11.13/deps/v8/src/compiler/node-aux-data-inl.h nodejs-0.11.15/deps/v8/src/compiler/node-aux-data-inl.h --- nodejs-0.11.13/deps/v8/src/compiler/node-aux-data-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/node-aux-data-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,43 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_NODE_AUX_DATA_INL_H_ +#define V8_COMPILER_NODE_AUX_DATA_INL_H_ + +#include "src/compiler/graph.h" +#include "src/compiler/node.h" +#include "src/compiler/node-aux-data.h" + +namespace v8 { +namespace internal { +namespace compiler { + +template <class T> +NodeAuxData<T>::NodeAuxData(Zone* zone) + : aux_data_(ZoneAllocator(zone)) {} + + +template <class T> +void NodeAuxData<T>::Set(Node* node, const T& data) { + int id = node->id(); + if (id >= static_cast<int>(aux_data_.size())) { + aux_data_.resize(id + 1); + } + aux_data_[id] = data; +} + + +template <class T> +T NodeAuxData<T>::Get(Node* node) { + int id = node->id(); + if (id >= static_cast<int>(aux_data_.size())) { + return T(); + } + return aux_data_[id]; +} +} +} +} // namespace v8::internal::compiler + +#endif diff -Nru nodejs-0.11.13/deps/v8/src/compiler/node-cache.cc nodejs-0.11.15/deps/v8/src/compiler/node-cache.cc --- nodejs-0.11.13/deps/v8/src/compiler/node-cache.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/node-cache.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,120 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/node-cache.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#define INITIAL_SIZE 16 +#define LINEAR_PROBE 5 + +template <typename Key> +int32_t NodeCacheHash(Key key) { + UNIMPLEMENTED(); + return 0; +} + +template <> +inline int32_t NodeCacheHash(int32_t key) { + return ComputeIntegerHash(key, 0); +} + + +template <> +inline int32_t NodeCacheHash(int64_t key) { + return ComputeLongHash(key); +} + + +template <> +inline int32_t NodeCacheHash(double key) { + return ComputeLongHash(BitCast<int64_t>(key)); +} + + +template <> +inline int32_t NodeCacheHash(void* key) { + return ComputePointerHash(key); +} + + +template <typename Key> +bool NodeCache<Key>::Resize(Zone* zone) { + if (size_ >= max_) return false; // Don't grow past the maximum size. + + // Allocate a new block of entries 4x the size. + Entry* old_entries = entries_; + int old_size = size_ + LINEAR_PROBE; + size_ = size_ * 4; + int num_entries = size_ + LINEAR_PROBE; + entries_ = zone->NewArray<Entry>(num_entries); + memset(entries_, 0, sizeof(Entry) * num_entries); + + // Insert the old entries into the new block. + for (int i = 0; i < old_size; i++) { + Entry* old = &old_entries[i]; + if (old->value_ != NULL) { + int hash = NodeCacheHash(old->key_); + int start = hash & (size_ - 1); + int end = start + LINEAR_PROBE; + for (int j = start; j < end; j++) { + Entry* entry = &entries_[j]; + if (entry->value_ == NULL) { + entry->key_ = old->key_; + entry->value_ = old->value_; + break; + } + } + } + } + return true; +} + + +template <typename Key> +Node** NodeCache<Key>::Find(Zone* zone, Key key) { + int32_t hash = NodeCacheHash(key); + if (entries_ == NULL) { + // Allocate the initial entries and insert the first entry. + int num_entries = INITIAL_SIZE + LINEAR_PROBE; + entries_ = zone->NewArray<Entry>(num_entries); + size_ = INITIAL_SIZE; + memset(entries_, 0, sizeof(Entry) * num_entries); + Entry* entry = &entries_[hash & (INITIAL_SIZE - 1)]; + entry->key_ = key; + return &entry->value_; + } + + while (true) { + // Search up to N entries after (linear probing). + int start = hash & (size_ - 1); + int end = start + LINEAR_PROBE; + for (int i = start; i < end; i++) { + Entry* entry = &entries_[i]; + if (entry->key_ == key) return &entry->value_; + if (entry->value_ == NULL) { + entry->key_ = key; + return &entry->value_; + } + } + + if (!Resize(zone)) break; // Don't grow past the maximum size. + } + + // If resized to maximum and still didn't find space, overwrite an entry. + Entry* entry = &entries_[hash & (size_ - 1)]; + entry->key_ = key; + entry->value_ = NULL; + return &entry->value_; +} + + +template class NodeCache<int64_t>; +template class NodeCache<int32_t>; +template class NodeCache<void*>; +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/node-cache.h nodejs-0.11.15/deps/v8/src/compiler/node-cache.h --- nodejs-0.11.13/deps/v8/src/compiler/node-cache.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/node-cache.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,53 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_NODE_CACHE_H_ +#define V8_COMPILER_NODE_CACHE_H_ + +#include "src/v8.h" + +#include "src/compiler/node.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// A cache for nodes based on a key. Useful for implementing canonicalization of +// nodes such as constants, parameters, etc. +template <typename Key> +class NodeCache { + public: + explicit NodeCache(int max = 256) : entries_(NULL), size_(0), max_(max) {} + + // Search for node associated with {key} and return a pointer to a memory + // location in this cache that stores an entry for the key. If the location + // returned by this method contains a non-NULL node, the caller can use that + // node. Otherwise it is the responsibility of the caller to fill the entry + // with a new node. + // Note that a previous cache entry may be overwritten if the cache becomes + // too full or encounters too many hash collisions. + Node** Find(Zone* zone, Key key); + + private: + struct Entry { + Key key_; + Node* value_; + }; + + Entry* entries_; // lazily-allocated hash entries. + int32_t size_; + int32_t max_; + + bool Resize(Zone* zone); +}; + +// Various default cache types. +typedef NodeCache<int64_t> Int64NodeCache; +typedef NodeCache<int32_t> Int32NodeCache; +typedef NodeCache<void*> PtrNodeCache; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_NODE_CACHE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/node.cc nodejs-0.11.15/deps/v8/src/compiler/node.cc --- nodejs-0.11.13/deps/v8/src/compiler/node.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/node.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,55 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/node.h" + +#include "src/compiler/generic-node-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + +void Node::CollectProjections(int projection_count, Node** projections) { + for (int i = 0; i < projection_count; ++i) projections[i] = NULL; + for (UseIter i = uses().begin(); i != uses().end(); ++i) { + if ((*i)->opcode() != IrOpcode::kProjection) continue; + int32_t index = OpParameter<int32_t>(*i); + DCHECK_GE(index, 0); + DCHECK_LT(index, projection_count); + DCHECK_EQ(NULL, projections[index]); + projections[index] = *i; + } +} + + +Node* Node::FindProjection(int32_t projection_index) { + for (UseIter i = uses().begin(); i != uses().end(); ++i) { + if ((*i)->opcode() == IrOpcode::kProjection && + OpParameter<int32_t>(*i) == projection_index) { + return *i; + } + } + return NULL; +} + + +OStream& operator<<(OStream& os, const Operator& op) { return op.PrintTo(os); } + + +OStream& operator<<(OStream& os, const Node& n) { + os << n.id() << ": " << *n.op(); + if (n.op()->InputCount() != 0) { + os << "("; + for (int i = 0; i < n.op()->InputCount(); ++i) { + if (i != 0) os << ", "; + os << n.InputAt(i)->id(); + } + os << ")"; + } + return os; +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/node.h nodejs-0.11.15/deps/v8/src/compiler/node.h --- nodejs-0.11.13/deps/v8/src/compiler/node.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/node.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,95 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_NODE_H_ +#define V8_COMPILER_NODE_H_ + +#include <deque> +#include <set> +#include <vector> + +#include "src/compiler/generic-algorithm.h" +#include "src/compiler/generic-node.h" +#include "src/compiler/opcodes.h" +#include "src/compiler/operator.h" +#include "src/types.h" +#include "src/zone.h" +#include "src/zone-allocator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class NodeData { + public: + Operator* op() const { return op_; } + void set_op(Operator* op) { op_ = op; } + + IrOpcode::Value opcode() const { + DCHECK(op_->opcode() <= IrOpcode::kLast); + return static_cast<IrOpcode::Value>(op_->opcode()); + } + + Bounds bounds() { return bounds_; } + + protected: + Operator* op_; + Bounds bounds_; + explicit NodeData(Zone* zone) : bounds_(Bounds(Type::None(zone))) {} + + friend class NodeProperties; + void set_bounds(Bounds b) { bounds_ = b; } +}; + +// A Node is the basic primitive of an IR graph. In addition to the members +// inherited from Vector, Nodes only contain a mutable Operator that may change +// during compilation, e.g. during lowering passes. Other information that +// needs to be associated with Nodes during compilation must be stored +// out-of-line indexed by the Node's id. +class Node : public GenericNode<NodeData, Node> { + public: + Node(GenericGraphBase* graph, int input_count) + : GenericNode<NodeData, Node>(graph, input_count) {} + + void Initialize(Operator* op) { set_op(op); } + + void CollectProjections(int projection_count, Node** projections); + Node* FindProjection(int32_t projection_index); +}; + +OStream& operator<<(OStream& os, const Node& n); + +typedef GenericGraphVisit::NullNodeVisitor<NodeData, Node> NullNodeVisitor; + +typedef zone_allocator<Node*> NodePtrZoneAllocator; + +typedef std::set<Node*, std::less<Node*>, NodePtrZoneAllocator> NodeSet; +typedef NodeSet::iterator NodeSetIter; +typedef NodeSet::reverse_iterator NodeSetRIter; + +typedef std::deque<Node*, NodePtrZoneAllocator> NodeDeque; +typedef NodeDeque::iterator NodeDequeIter; + +typedef std::vector<Node*, NodePtrZoneAllocator> NodeVector; +typedef NodeVector::iterator NodeVectorIter; +typedef NodeVector::reverse_iterator NodeVectorRIter; + +typedef zone_allocator<NodeVector> ZoneNodeVectorAllocator; +typedef std::vector<NodeVector, ZoneNodeVectorAllocator> NodeVectorVector; +typedef NodeVectorVector::iterator NodeVectorVectorIter; +typedef NodeVectorVector::reverse_iterator NodeVectorVectorRIter; + +typedef Node::Uses::iterator UseIter; +typedef Node::Inputs::iterator InputIter; + +// Helper to extract parameters from Operator1<*> nodes. +template <typename T> +static inline T OpParameter(Node* node) { + return reinterpret_cast<Operator1<T>*>(node->op())->parameter(); +} +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_NODE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/node-matchers.h nodejs-0.11.15/deps/v8/src/compiler/node-matchers.h --- nodejs-0.11.13/deps/v8/src/compiler/node-matchers.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/node-matchers.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,133 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_NODE_MATCHERS_H_ +#define V8_COMPILER_NODE_MATCHERS_H_ + +#include "src/compiler/common-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// A pattern matcher for nodes. +struct NodeMatcher { + explicit NodeMatcher(Node* node) : node_(node) {} + + Node* node() const { return node_; } + Operator* op() const { return node()->op(); } + IrOpcode::Value opcode() const { return node()->opcode(); } + + bool HasProperty(Operator::Property property) const { + return op()->HasProperty(property); + } + Node* InputAt(int index) const { return node()->InputAt(index); } + +#define DEFINE_IS_OPCODE(Opcode) \ + bool Is##Opcode() const { return opcode() == IrOpcode::k##Opcode; } + ALL_OP_LIST(DEFINE_IS_OPCODE) +#undef DEFINE_IS_OPCODE + + private: + Node* node_; +}; + + +// A pattern matcher for abitrary value constants. +template <typename T> +struct ValueMatcher : public NodeMatcher { + explicit ValueMatcher(Node* node) + : NodeMatcher(node), + value_(), + has_value_(CommonOperatorTraits<T>::HasValue(node->op())) { + if (has_value_) value_ = CommonOperatorTraits<T>::ValueOf(node->op()); + } + + bool HasValue() const { return has_value_; } + T Value() const { + DCHECK(HasValue()); + return value_; + } + + bool Is(T value) const { + return HasValue() && CommonOperatorTraits<T>::Equals(Value(), value); + } + + bool IsInRange(T low, T high) const { + return HasValue() && low <= value_ && value_ <= high; + } + + private: + T value_; + bool has_value_; +}; + + +// A pattern matcher for integer constants. +template <typename T> +struct IntMatcher V8_FINAL : public ValueMatcher<T> { + explicit IntMatcher(Node* node) : ValueMatcher<T>(node) {} + + bool IsPowerOf2() const { + return this->HasValue() && this->Value() > 0 && + (this->Value() & (this->Value() - 1)) == 0; + } +}; + +typedef IntMatcher<int32_t> Int32Matcher; +typedef IntMatcher<uint32_t> Uint32Matcher; +typedef IntMatcher<int64_t> Int64Matcher; +typedef IntMatcher<uint64_t> Uint64Matcher; + + +// A pattern matcher for floating point constants. +template <typename T> +struct FloatMatcher V8_FINAL : public ValueMatcher<T> { + explicit FloatMatcher(Node* node) : ValueMatcher<T>(node) {} + + bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); } +}; + +typedef FloatMatcher<double> Float64Matcher; + + +// For shorter pattern matching code, this struct matches both the left and +// right hand sides of a binary operation and can put constants on the right +// if they appear on the left hand side of a commutative operation. +template <typename Left, typename Right> +struct BinopMatcher V8_FINAL : public NodeMatcher { + explicit BinopMatcher(Node* node) + : NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) { + if (HasProperty(Operator::kCommutative)) PutConstantOnRight(); + } + + const Left& left() const { return left_; } + const Right& right() const { return right_; } + + bool IsFoldable() const { return left().HasValue() && right().HasValue(); } + bool LeftEqualsRight() const { return left().node() == right().node(); } + + private: + void PutConstantOnRight() { + if (left().HasValue() && !right().HasValue()) { + std::swap(left_, right_); + node()->ReplaceInput(0, left().node()); + node()->ReplaceInput(1, right().node()); + } + } + + Left left_; + Right right_; +}; + +typedef BinopMatcher<Int32Matcher, Int32Matcher> Int32BinopMatcher; +typedef BinopMatcher<Uint32Matcher, Uint32Matcher> Uint32BinopMatcher; +typedef BinopMatcher<Int64Matcher, Int64Matcher> Int64BinopMatcher; +typedef BinopMatcher<Uint64Matcher, Uint64Matcher> Uint64BinopMatcher; +typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_NODE_MATCHERS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/node-properties.h nodejs-0.11.15/deps/v8/src/compiler/node-properties.h --- nodejs-0.11.13/deps/v8/src/compiler/node-properties.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/node-properties.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,57 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_NODE_PROPERTIES_H_ +#define V8_COMPILER_NODE_PROPERTIES_H_ + +#include "src/compiler/node.h" +#include "src/types.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class Operator; + +// A facade that simplifies access to the different kinds of inputs to a node. +class NodeProperties { + public: + static inline Node* GetValueInput(Node* node, int index); + static inline Node* GetContextInput(Node* node); + static inline Node* GetEffectInput(Node* node, int index = 0); + static inline Node* GetControlInput(Node* node, int index = 0); + + static inline bool IsValueEdge(Node::Edge edge); + static inline bool IsContextEdge(Node::Edge edge); + static inline bool IsEffectEdge(Node::Edge edge); + static inline bool IsControlEdge(Node::Edge edge); + + static inline bool IsControl(Node* node); + + static inline void ReplaceControlInput(Node* node, Node* control); + static inline void ReplaceEffectInput(Node* node, Node* effect, + int index = 0); + static inline void RemoveNonValueInputs(Node* node); + + static inline Bounds GetBounds(Node* node); + static inline void SetBounds(Node* node, Bounds bounds); + + private: + static inline int FirstValueIndex(Node* node); + static inline int FirstContextIndex(Node* node); + static inline int FirstEffectIndex(Node* node); + static inline int FirstControlIndex(Node* node); + static inline int PastValueIndex(Node* node); + static inline int PastContextIndex(Node* node); + static inline int PastEffectIndex(Node* node); + static inline int PastControlIndex(Node* node); + + static inline bool IsInputRange(Node::Edge edge, int first, int count); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_NODE_PROPERTIES_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/node-properties-inl.h nodejs-0.11.15/deps/v8/src/compiler/node-properties-inl.h --- nodejs-0.11.13/deps/v8/src/compiler/node-properties-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/node-properties-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,165 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_NODE_PROPERTIES_INL_H_ +#define V8_COMPILER_NODE_PROPERTIES_INL_H_ + +#include "src/v8.h" + +#include "src/compiler/common-operator.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/opcodes.h" +#include "src/compiler/operator.h" +#include "src/compiler/operator-properties-inl.h" +#include "src/compiler/operator-properties.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// ----------------------------------------------------------------------------- +// Input layout. +// Inputs are always arranged in order as follows: +// 0 [ values, context, effects, control ] node->InputCount() + +inline int NodeProperties::FirstValueIndex(Node* node) { return 0; } + +inline int NodeProperties::FirstContextIndex(Node* node) { + return PastValueIndex(node); +} + +inline int NodeProperties::FirstEffectIndex(Node* node) { + return PastContextIndex(node); +} + +inline int NodeProperties::FirstControlIndex(Node* node) { + return PastEffectIndex(node); +} + + +inline int NodeProperties::PastValueIndex(Node* node) { + return FirstValueIndex(node) + + OperatorProperties::GetValueInputCount(node->op()); +} + +inline int NodeProperties::PastContextIndex(Node* node) { + return FirstContextIndex(node) + + OperatorProperties::GetContextInputCount(node->op()); +} + +inline int NodeProperties::PastEffectIndex(Node* node) { + return FirstEffectIndex(node) + + OperatorProperties::GetEffectInputCount(node->op()); +} + +inline int NodeProperties::PastControlIndex(Node* node) { + return FirstControlIndex(node) + + OperatorProperties::GetControlInputCount(node->op()); +} + + +// ----------------------------------------------------------------------------- +// Input accessors. + +inline Node* NodeProperties::GetValueInput(Node* node, int index) { + DCHECK(0 <= index && + index < OperatorProperties::GetValueInputCount(node->op())); + return node->InputAt(FirstValueIndex(node) + index); +} + +inline Node* NodeProperties::GetContextInput(Node* node) { + DCHECK(OperatorProperties::HasContextInput(node->op())); + return node->InputAt(FirstContextIndex(node)); +} + +inline Node* NodeProperties::GetEffectInput(Node* node, int index) { + DCHECK(0 <= index && + index < OperatorProperties::GetEffectInputCount(node->op())); + return node->InputAt(FirstEffectIndex(node) + index); +} + +inline Node* NodeProperties::GetControlInput(Node* node, int index) { + DCHECK(0 <= index && + index < OperatorProperties::GetControlInputCount(node->op())); + return node->InputAt(FirstControlIndex(node) + index); +} + + +// ----------------------------------------------------------------------------- +// Edge kinds. + +inline bool NodeProperties::IsInputRange(Node::Edge edge, int first, int num) { + // TODO(titzer): edge.index() is linear time; + // edges maybe need to be marked as value/effect/control. + if (num == 0) return false; + int index = edge.index(); + return first <= index && index < first + num; +} + +inline bool NodeProperties::IsValueEdge(Node::Edge edge) { + Node* node = edge.from(); + return IsInputRange(edge, FirstValueIndex(node), + OperatorProperties::GetValueInputCount(node->op())); +} + +inline bool NodeProperties::IsContextEdge(Node::Edge edge) { + Node* node = edge.from(); + return IsInputRange(edge, FirstContextIndex(node), + OperatorProperties::GetContextInputCount(node->op())); +} + +inline bool NodeProperties::IsEffectEdge(Node::Edge edge) { + Node* node = edge.from(); + return IsInputRange(edge, FirstEffectIndex(node), + OperatorProperties::GetEffectInputCount(node->op())); +} + +inline bool NodeProperties::IsControlEdge(Node::Edge edge) { + Node* node = edge.from(); + return IsInputRange(edge, FirstControlIndex(node), + OperatorProperties::GetControlInputCount(node->op())); +} + + +// ----------------------------------------------------------------------------- +// Miscellaneous predicates. + +inline bool NodeProperties::IsControl(Node* node) { + return IrOpcode::IsControlOpcode(node->opcode()); +} + + +// ----------------------------------------------------------------------------- +// Miscellaneous mutators. + +inline void NodeProperties::ReplaceControlInput(Node* node, Node* control) { + node->ReplaceInput(FirstControlIndex(node), control); +} + +inline void NodeProperties::ReplaceEffectInput(Node* node, Node* effect, + int index) { + DCHECK(index < OperatorProperties::GetEffectInputCount(node->op())); + return node->ReplaceInput(FirstEffectIndex(node) + index, effect); +} + +inline void NodeProperties::RemoveNonValueInputs(Node* node) { + node->TrimInputCount(OperatorProperties::GetValueInputCount(node->op())); +} + + +// ----------------------------------------------------------------------------- +// Type Bounds. + +inline Bounds NodeProperties::GetBounds(Node* node) { return node->bounds(); } + +inline void NodeProperties::SetBounds(Node* node, Bounds b) { + node->set_bounds(b); +} + + +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_NODE_PROPERTIES_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/opcodes.h nodejs-0.11.15/deps/v8/src/compiler/opcodes.h --- nodejs-0.11.13/deps/v8/src/compiler/opcodes.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/opcodes.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,297 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_OPCODES_H_ +#define V8_COMPILER_OPCODES_H_ + +// Opcodes for control operators. +#define CONTROL_OP_LIST(V) \ + V(Start) \ + V(Dead) \ + V(Loop) \ + V(End) \ + V(Branch) \ + V(IfTrue) \ + V(IfFalse) \ + V(Merge) \ + V(Return) \ + V(Throw) \ + V(Continuation) \ + V(LazyDeoptimization) \ + V(Deoptimize) + +// Opcodes for common operators. +#define LEAF_OP_LIST(V) \ + V(Int32Constant) \ + V(Int64Constant) \ + V(Float64Constant) \ + V(ExternalConstant) \ + V(NumberConstant) \ + V(HeapConstant) + +#define INNER_OP_LIST(V) \ + V(Phi) \ + V(EffectPhi) \ + V(FrameState) \ + V(StateValues) \ + V(Call) \ + V(Parameter) \ + V(Projection) + +#define COMMON_OP_LIST(V) \ + LEAF_OP_LIST(V) \ + INNER_OP_LIST(V) + +// Opcodes for JavaScript operators. +#define JS_COMPARE_BINOP_LIST(V) \ + V(JSEqual) \ + V(JSNotEqual) \ + V(JSStrictEqual) \ + V(JSStrictNotEqual) \ + V(JSLessThan) \ + V(JSGreaterThan) \ + V(JSLessThanOrEqual) \ + V(JSGreaterThanOrEqual) + +#define JS_BITWISE_BINOP_LIST(V) \ + V(JSBitwiseOr) \ + V(JSBitwiseXor) \ + V(JSBitwiseAnd) \ + V(JSShiftLeft) \ + V(JSShiftRight) \ + V(JSShiftRightLogical) + +#define JS_ARITH_BINOP_LIST(V) \ + V(JSAdd) \ + V(JSSubtract) \ + V(JSMultiply) \ + V(JSDivide) \ + V(JSModulus) + +#define JS_SIMPLE_BINOP_LIST(V) \ + JS_COMPARE_BINOP_LIST(V) \ + JS_BITWISE_BINOP_LIST(V) \ + JS_ARITH_BINOP_LIST(V) + +#define JS_LOGIC_UNOP_LIST(V) V(JSUnaryNot) + +#define JS_CONVERSION_UNOP_LIST(V) \ + V(JSToBoolean) \ + V(JSToNumber) \ + V(JSToString) \ + V(JSToName) \ + V(JSToObject) + +#define JS_OTHER_UNOP_LIST(V) V(JSTypeOf) + +#define JS_SIMPLE_UNOP_LIST(V) \ + JS_LOGIC_UNOP_LIST(V) \ + JS_CONVERSION_UNOP_LIST(V) \ + JS_OTHER_UNOP_LIST(V) + +#define JS_OBJECT_OP_LIST(V) \ + V(JSCreate) \ + V(JSLoadProperty) \ + V(JSLoadNamed) \ + V(JSStoreProperty) \ + V(JSStoreNamed) \ + V(JSDeleteProperty) \ + V(JSHasProperty) \ + V(JSInstanceOf) + +#define JS_CONTEXT_OP_LIST(V) \ + V(JSLoadContext) \ + V(JSStoreContext) \ + V(JSCreateFunctionContext) \ + V(JSCreateCatchContext) \ + V(JSCreateWithContext) \ + V(JSCreateBlockContext) \ + V(JSCreateModuleContext) \ + V(JSCreateGlobalContext) + +#define JS_OTHER_OP_LIST(V) \ + V(JSCallConstruct) \ + V(JSCallFunction) \ + V(JSCallRuntime) \ + V(JSYield) \ + V(JSDebugger) + +#define JS_OP_LIST(V) \ + JS_SIMPLE_BINOP_LIST(V) \ + JS_SIMPLE_UNOP_LIST(V) \ + JS_OBJECT_OP_LIST(V) \ + JS_CONTEXT_OP_LIST(V) \ + JS_OTHER_OP_LIST(V) + +// Opcodes for VirtuaMachine-level operators. +#define SIMPLIFIED_OP_LIST(V) \ + V(BooleanNot) \ + V(NumberEqual) \ + V(NumberLessThan) \ + V(NumberLessThanOrEqual) \ + V(NumberAdd) \ + V(NumberSubtract) \ + V(NumberMultiply) \ + V(NumberDivide) \ + V(NumberModulus) \ + V(NumberToInt32) \ + V(NumberToUint32) \ + V(ReferenceEqual) \ + V(StringEqual) \ + V(StringLessThan) \ + V(StringLessThanOrEqual) \ + V(StringAdd) \ + V(ChangeTaggedToInt32) \ + V(ChangeTaggedToUint32) \ + V(ChangeTaggedToFloat64) \ + V(ChangeInt32ToTagged) \ + V(ChangeUint32ToTagged) \ + V(ChangeFloat64ToTagged) \ + V(ChangeBoolToBit) \ + V(ChangeBitToBool) \ + V(LoadField) \ + V(LoadElement) \ + V(StoreField) \ + V(StoreElement) + +// Opcodes for Machine-level operators. +#define MACHINE_OP_LIST(V) \ + V(Load) \ + V(Store) \ + V(Word32And) \ + V(Word32Or) \ + V(Word32Xor) \ + V(Word32Shl) \ + V(Word32Shr) \ + V(Word32Sar) \ + V(Word32Equal) \ + V(Word64And) \ + V(Word64Or) \ + V(Word64Xor) \ + V(Word64Shl) \ + V(Word64Shr) \ + V(Word64Sar) \ + V(Word64Equal) \ + V(Int32Add) \ + V(Int32AddWithOverflow) \ + V(Int32Sub) \ + V(Int32SubWithOverflow) \ + V(Int32Mul) \ + V(Int32Div) \ + V(Int32UDiv) \ + V(Int32Mod) \ + V(Int32UMod) \ + V(Int32LessThan) \ + V(Int32LessThanOrEqual) \ + V(Uint32LessThan) \ + V(Uint32LessThanOrEqual) \ + V(Int64Add) \ + V(Int64Sub) \ + V(Int64Mul) \ + V(Int64Div) \ + V(Int64UDiv) \ + V(Int64Mod) \ + V(Int64UMod) \ + V(Int64LessThan) \ + V(Int64LessThanOrEqual) \ + V(ConvertInt64ToInt32) \ + V(ConvertInt32ToInt64) \ + V(ChangeInt32ToFloat64) \ + V(ChangeUint32ToFloat64) \ + V(ChangeFloat64ToInt32) \ + V(ChangeFloat64ToUint32) \ + V(Float64Add) \ + V(Float64Sub) \ + V(Float64Mul) \ + V(Float64Div) \ + V(Float64Mod) \ + V(Float64Equal) \ + V(Float64LessThan) \ + V(Float64LessThanOrEqual) + +#define VALUE_OP_LIST(V) \ + COMMON_OP_LIST(V) \ + SIMPLIFIED_OP_LIST(V) \ + MACHINE_OP_LIST(V) \ + JS_OP_LIST(V) + +// The combination of all operators at all levels and the common operators. +#define ALL_OP_LIST(V) \ + CONTROL_OP_LIST(V) \ + VALUE_OP_LIST(V) + +namespace v8 { +namespace internal { +namespace compiler { + +// Declare an enumeration with all the opcodes at all levels so that they +// can be globally, uniquely numbered. +class IrOpcode { + public: + enum Value { +#define DECLARE_OPCODE(x) k##x, + ALL_OP_LIST(DECLARE_OPCODE) +#undef DECLARE_OPCODE + kLast = -1 +#define COUNT_OPCODE(x) +1 + ALL_OP_LIST(COUNT_OPCODE) +#undef COUNT_OPCODE + }; + + // Returns the mnemonic name of an opcode. + static const char* Mnemonic(Value val) { + switch (val) { +#define RETURN_NAME(x) \ + case k##x: \ + return #x; + ALL_OP_LIST(RETURN_NAME) +#undef RETURN_NAME + default: + return "UnknownOpcode"; + } + } + + static bool IsJsOpcode(Value val) { + switch (val) { +#define RETURN_NAME(x) \ + case k##x: \ + return true; + JS_OP_LIST(RETURN_NAME) +#undef RETURN_NAME + default: + return false; + } + } + + static bool IsControlOpcode(Value val) { + switch (val) { +#define RETURN_NAME(x) \ + case k##x: \ + return true; + CONTROL_OP_LIST(RETURN_NAME) +#undef RETURN_NAME + default: + return false; + } + } + + static bool IsCommonOpcode(Value val) { + switch (val) { +#define RETURN_NAME(x) \ + case k##x: \ + return true; + CONTROL_OP_LIST(RETURN_NAME) + COMMON_OP_LIST(RETURN_NAME) +#undef RETURN_NAME + default: + return false; + } + } +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_OPCODES_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/operator.h nodejs-0.11.15/deps/v8/src/compiler/operator.h --- nodejs-0.11.13/deps/v8/src/compiler/operator.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/operator.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,280 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_OPERATOR_H_ +#define V8_COMPILER_OPERATOR_H_ + +#include "src/v8.h" + +#include "src/assembler.h" +#include "src/ostreams.h" +#include "src/unique.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// An operator represents description of the "computation" of a node in the +// compiler IR. A computation takes values (i.e. data) as input and produces +// zero or more values as output. The side-effects of a computation must be +// captured by additional control and data dependencies which are part of the +// IR graph. +// Operators are immutable and describe the statically-known parts of a +// computation. Thus they can be safely shared by many different nodes in the +// IR graph, or even globally between graphs. Operators can have "static +// parameters" which are compile-time constant parameters to the operator, such +// as the name for a named field access, the ID of a runtime function, etc. +// Static parameters are private to the operator and only semantically +// meaningful to the operator itself. +class Operator : public ZoneObject { + public: + Operator(uint8_t opcode, uint16_t properties) + : opcode_(opcode), properties_(properties) {} + virtual ~Operator() {} + + // Properties inform the operator-independent optimizer about legal + // transformations for nodes that have this operator. + enum Property { + kNoProperties = 0, + kReducible = 1 << 0, // Participates in strength reduction. + kCommutative = 1 << 1, // OP(a, b) == OP(b, a) for all inputs. + kAssociative = 1 << 2, // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs. + kIdempotent = 1 << 3, // OP(a); OP(a) == OP(a). + kNoRead = 1 << 4, // Has no scheduling dependency on Effects + kNoWrite = 1 << 5, // Does not modify any Effects and thereby + // create new scheduling dependencies. + kNoThrow = 1 << 6, // Can never generate an exception. + kFoldable = kNoRead | kNoWrite, + kEliminatable = kNoWrite | kNoThrow, + kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent + }; + + // A small integer unique to all instances of a particular kind of operator, + // useful for quick matching for specific kinds of operators. For fast access + // the opcode is stored directly in the operator object. + inline uint8_t opcode() const { return opcode_; } + + // Returns a constant string representing the mnemonic of the operator, + // without the static parameters. Useful for debugging. + virtual const char* mnemonic() = 0; + + // Check if this operator equals another operator. Equivalent operators can + // be merged, and nodes with equivalent operators and equivalent inputs + // can be merged. + virtual bool Equals(Operator* other) = 0; + + // Compute a hashcode to speed up equivalence-set checking. + // Equal operators should always have equal hashcodes, and unequal operators + // should have unequal hashcodes with high probability. + virtual int HashCode() = 0; + + // Check whether this operator has the given property. + inline bool HasProperty(Property property) const { + return (properties_ & static_cast<int>(property)) == property; + } + + // Number of data inputs to the operator, for verifying graph structure. + virtual int InputCount() = 0; + + // Number of data outputs from the operator, for verifying graph structure. + virtual int OutputCount() = 0; + + inline Property properties() { return static_cast<Property>(properties_); } + + // TODO(titzer): API for input and output types, for typechecking graph. + private: + // Print the full operator into the given stream, including any + // static parameters. Useful for debugging and visualizing the IR. + virtual OStream& PrintTo(OStream& os) const = 0; // NOLINT + friend OStream& operator<<(OStream& os, const Operator& op); + + uint8_t opcode_; + uint16_t properties_; +}; + +OStream& operator<<(OStream& os, const Operator& op); + +// An implementation of Operator that has no static parameters. Such operators +// have just a name, an opcode, and a fixed number of inputs and outputs. +// They can represented by singletons and shared globally. +class SimpleOperator : public Operator { + public: + SimpleOperator(uint8_t opcode, uint16_t properties, int input_count, + int output_count, const char* mnemonic) + : Operator(opcode, properties), + input_count_(input_count), + output_count_(output_count), + mnemonic_(mnemonic) {} + + virtual const char* mnemonic() { return mnemonic_; } + virtual bool Equals(Operator* that) { return opcode() == that->opcode(); } + virtual int HashCode() { return opcode(); } + virtual int InputCount() { return input_count_; } + virtual int OutputCount() { return output_count_; } + + private: + virtual OStream& PrintTo(OStream& os) const { // NOLINT + return os << mnemonic_; + } + + int input_count_; + int output_count_; + const char* mnemonic_; +}; + +// Template specialization implements a kind of type class for dealing with the +// static parameters of Operator1 automatically. +template <typename T> +struct StaticParameterTraits { + static OStream& PrintTo(OStream& os, T val) { // NOLINT + return os << "??"; + } + static int HashCode(T a) { return 0; } + static bool Equals(T a, T b) { + return false; // Not every T has a ==. By default, be conservative. + } +}; + +template <> +struct StaticParameterTraits<ExternalReference> { + static OStream& PrintTo(OStream& os, ExternalReference val) { // NOLINT + os << val.address(); + const Runtime::Function* function = + Runtime::FunctionForEntry(val.address()); + if (function != NULL) { + os << " <" << function->name << ".entry>"; + } + return os; + } + static int HashCode(ExternalReference a) { + return reinterpret_cast<intptr_t>(a.address()) & 0xFFFFFFFF; + } + static bool Equals(ExternalReference a, ExternalReference b) { + return a == b; + } +}; + +// Specialization for static parameters of type {int}. +template <> +struct StaticParameterTraits<int> { + static OStream& PrintTo(OStream& os, int val) { // NOLINT + return os << val; + } + static int HashCode(int a) { return a; } + static bool Equals(int a, int b) { return a == b; } +}; + +// Specialization for static parameters of type {double}. +template <> +struct StaticParameterTraits<double> { + static OStream& PrintTo(OStream& os, double val) { // NOLINT + return os << val; + } + static int HashCode(double a) { + return static_cast<int>(BitCast<int64_t>(a)); + } + static bool Equals(double a, double b) { + return BitCast<int64_t>(a) == BitCast<int64_t>(b); + } +}; + +// Specialization for static parameters of type {PrintableUnique<Object>}. +template <> +struct StaticParameterTraits<PrintableUnique<Object> > { + static OStream& PrintTo(OStream& os, PrintableUnique<Object> val) { // NOLINT + return os << val.string(); + } + static int HashCode(PrintableUnique<Object> a) { + return static_cast<int>(a.Hashcode()); + } + static bool Equals(PrintableUnique<Object> a, PrintableUnique<Object> b) { + return a == b; + } +}; + +// Specialization for static parameters of type {PrintableUnique<Name>}. +template <> +struct StaticParameterTraits<PrintableUnique<Name> > { + static OStream& PrintTo(OStream& os, PrintableUnique<Name> val) { // NOLINT + return os << val.string(); + } + static int HashCode(PrintableUnique<Name> a) { + return static_cast<int>(a.Hashcode()); + } + static bool Equals(PrintableUnique<Name> a, PrintableUnique<Name> b) { + return a == b; + } +}; + +#if DEBUG +// Specialization for static parameters of type {Handle<Object>} to prevent any +// direct usage of Handles in constants. +template <> +struct StaticParameterTraits<Handle<Object> > { + static OStream& PrintTo(OStream& os, Handle<Object> val) { // NOLINT + UNREACHABLE(); // Should use PrintableUnique<Object> instead + return os; + } + static int HashCode(Handle<Object> a) { + UNREACHABLE(); // Should use PrintableUnique<Object> instead + return 0; + } + static bool Equals(Handle<Object> a, Handle<Object> b) { + UNREACHABLE(); // Should use PrintableUnique<Object> instead + return false; + } +}; +#endif + +// A templatized implementation of Operator that has one static parameter of +// type {T}. If a specialization of StaticParameterTraits<{T}> exists, then +// operators of this kind can automatically be hashed, compared, and printed. +template <typename T> +class Operator1 : public Operator { + public: + Operator1(uint8_t opcode, uint16_t properties, int input_count, + int output_count, const char* mnemonic, T parameter) + : Operator(opcode, properties), + input_count_(input_count), + output_count_(output_count), + mnemonic_(mnemonic), + parameter_(parameter) {} + + const T& parameter() const { return parameter_; } + + virtual const char* mnemonic() { return mnemonic_; } + virtual bool Equals(Operator* other) { + if (opcode() != other->opcode()) return false; + Operator1<T>* that = static_cast<Operator1<T>*>(other); + T temp1 = this->parameter_; + T temp2 = that->parameter_; + return StaticParameterTraits<T>::Equals(temp1, temp2); + } + virtual int HashCode() { + return opcode() + 33 * StaticParameterTraits<T>::HashCode(this->parameter_); + } + virtual int InputCount() { return input_count_; } + virtual int OutputCount() { return output_count_; } + virtual OStream& PrintParameter(OStream& os) const { // NOLINT + return StaticParameterTraits<T>::PrintTo(os << "[", parameter_) << "]"; + } + + private: + virtual OStream& PrintTo(OStream& os) const { // NOLINT + return PrintParameter(os << mnemonic_); + } + + int input_count_; + int output_count_; + const char* mnemonic_; + T parameter_; +}; + +// Type definitions for operators with specific types of parameters. +typedef Operator1<PrintableUnique<Name> > NameOperator; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_OPERATOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/operator-properties.h nodejs-0.11.15/deps/v8/src/compiler/operator-properties.h --- nodejs-0.11.13/deps/v8/src/compiler/operator-properties.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/operator-properties.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,49 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_OPERATOR_PROPERTIES_H_ +#define V8_COMPILER_OPERATOR_PROPERTIES_H_ + +#include "src/v8.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class Operator; + +class OperatorProperties { + public: + static inline bool HasValueInput(Operator* node); + static inline bool HasContextInput(Operator* node); + static inline bool HasEffectInput(Operator* node); + static inline bool HasControlInput(Operator* node); + + static inline int GetValueInputCount(Operator* op); + static inline int GetContextInputCount(Operator* op); + static inline int GetEffectInputCount(Operator* op); + static inline int GetControlInputCount(Operator* op); + static inline int GetTotalInputCount(Operator* op); + + static inline bool HasValueOutput(Operator* op); + static inline bool HasEffectOutput(Operator* op); + static inline bool HasControlOutput(Operator* op); + + static inline int GetValueOutputCount(Operator* op); + static inline int GetEffectOutputCount(Operator* op); + static inline int GetControlOutputCount(Operator* op); + + static inline bool IsBasicBlockBegin(Operator* op); + + static inline bool CanBeScheduled(Operator* op); + static inline bool HasFixedSchedulePosition(Operator* op); + static inline bool IsScheduleRoot(Operator* op); + + static inline bool CanLazilyDeoptimize(Operator* op); +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_OPERATOR_PROPERTIES_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/operator-properties-inl.h nodejs-0.11.15/deps/v8/src/compiler/operator-properties-inl.h --- nodejs-0.11.13/deps/v8/src/compiler/operator-properties-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/operator-properties-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,191 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_OPERATOR_PROPERTIES_INL_H_ +#define V8_COMPILER_OPERATOR_PROPERTIES_INL_H_ + +#include "src/v8.h" + +#include "src/compiler/js-operator.h" +#include "src/compiler/opcodes.h" +#include "src/compiler/operator-properties.h" + +namespace v8 { +namespace internal { +namespace compiler { + +inline bool OperatorProperties::HasValueInput(Operator* op) { + return OperatorProperties::GetValueInputCount(op) > 0; +} + +inline bool OperatorProperties::HasContextInput(Operator* op) { + IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode()); + return IrOpcode::IsJsOpcode(opcode); +} + +inline bool OperatorProperties::HasEffectInput(Operator* op) { + return OperatorProperties::GetEffectInputCount(op) > 0; +} + +inline bool OperatorProperties::HasControlInput(Operator* op) { + return OperatorProperties::GetControlInputCount(op) > 0; +} + + +inline int OperatorProperties::GetValueInputCount(Operator* op) { + return op->InputCount(); +} + +inline int OperatorProperties::GetContextInputCount(Operator* op) { + return OperatorProperties::HasContextInput(op) ? 1 : 0; +} + +inline int OperatorProperties::GetEffectInputCount(Operator* op) { + if (op->opcode() == IrOpcode::kEffectPhi) { + return static_cast<Operator1<int>*>(op)->parameter(); + } + if (op->HasProperty(Operator::kNoRead) && op->HasProperty(Operator::kNoWrite)) + return 0; // no effects. + return 1; +} + +inline int OperatorProperties::GetControlInputCount(Operator* op) { + switch (op->opcode()) { + case IrOpcode::kPhi: + case IrOpcode::kEffectPhi: + return 1; +#define OPCODE_CASE(x) case IrOpcode::k##x: + CONTROL_OP_LIST(OPCODE_CASE) +#undef OPCODE_CASE + return static_cast<ControlOperator*>(op)->ControlInputCount(); + default: + // If a node can lazily deoptimize, it needs control dependency. + if (CanLazilyDeoptimize(op)) { + return 1; + } + // Operators that have write effects must have a control + // dependency. Effect dependencies only ensure the correct order of + // write/read operations without consideration of control flow. Without an + // explicit control dependency writes can be float in the schedule too + // early along a path that shouldn't generate a side-effect. + return op->HasProperty(Operator::kNoWrite) ? 0 : 1; + } + return 0; +} + +inline int OperatorProperties::GetTotalInputCount(Operator* op) { + return GetValueInputCount(op) + GetContextInputCount(op) + + GetEffectInputCount(op) + GetControlInputCount(op); +} + +// ----------------------------------------------------------------------------- +// Output properties. + +inline bool OperatorProperties::HasValueOutput(Operator* op) { + return GetValueOutputCount(op) > 0; +} + +inline bool OperatorProperties::HasEffectOutput(Operator* op) { + return op->opcode() == IrOpcode::kStart || GetEffectInputCount(op) > 0; +} + +inline bool OperatorProperties::HasControlOutput(Operator* op) { + IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode()); + return (opcode != IrOpcode::kEnd && IrOpcode::IsControlOpcode(opcode)) || + CanLazilyDeoptimize(op); +} + + +inline int OperatorProperties::GetValueOutputCount(Operator* op) { + return op->OutputCount(); +} + +inline int OperatorProperties::GetEffectOutputCount(Operator* op) { + return HasEffectOutput(op) ? 1 : 0; +} + +inline int OperatorProperties::GetControlOutputCount(Operator* node) { + return node->opcode() == IrOpcode::kBranch ? 2 : HasControlOutput(node) ? 1 + : 0; +} + + +inline bool OperatorProperties::IsBasicBlockBegin(Operator* op) { + uint8_t opcode = op->opcode(); + return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd || + opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop || + opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue || + opcode == IrOpcode::kIfFalse; +} + +inline bool OperatorProperties::CanBeScheduled(Operator* op) { return true; } + +inline bool OperatorProperties::HasFixedSchedulePosition(Operator* op) { + IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode()); + return (IrOpcode::IsControlOpcode(opcode)) || + opcode == IrOpcode::kParameter || opcode == IrOpcode::kEffectPhi || + opcode == IrOpcode::kPhi; +} + +inline bool OperatorProperties::IsScheduleRoot(Operator* op) { + uint8_t opcode = op->opcode(); + return opcode == IrOpcode::kEnd || opcode == IrOpcode::kEffectPhi || + opcode == IrOpcode::kPhi; +} + +inline bool OperatorProperties::CanLazilyDeoptimize(Operator* op) { + // TODO(jarin) This function allows turning on lazy deoptimization + // incrementally. It will change as we turn on lazy deopt for + // more nodes. + + if (!FLAG_turbo_deoptimization) { + return false; + } + + switch (op->opcode()) { + case IrOpcode::kCall: { + CallOperator* call_op = reinterpret_cast<CallOperator*>(op); + CallDescriptor* descriptor = call_op->parameter(); + return descriptor->CanLazilyDeoptimize(); + } + case IrOpcode::kJSCallRuntime: { + Runtime::FunctionId function = + reinterpret_cast<Operator1<Runtime::FunctionId>*>(op)->parameter(); + // TODO(jarin) At the moment, we only support lazy deoptimization for + // the %DeoptimizeFunction runtime function. + return function == Runtime::kDeoptimizeFunction; + } + + // JS function calls + case IrOpcode::kJSCallFunction: + case IrOpcode::kJSCallConstruct: + + // Binary operations + case IrOpcode::kJSBitwiseOr: + case IrOpcode::kJSBitwiseXor: + case IrOpcode::kJSBitwiseAnd: + case IrOpcode::kJSShiftLeft: + case IrOpcode::kJSShiftRight: + case IrOpcode::kJSShiftRightLogical: + case IrOpcode::kJSAdd: + case IrOpcode::kJSSubtract: + case IrOpcode::kJSMultiply: + case IrOpcode::kJSDivide: + case IrOpcode::kJSModulus: + case IrOpcode::kJSLoadProperty: + case IrOpcode::kJSStoreProperty: + case IrOpcode::kJSLoadNamed: + case IrOpcode::kJSStoreNamed: + return true; + + default: + return false; + } + return false; +} +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_OPERATOR_PROPERTIES_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/phi-reducer.h nodejs-0.11.15/deps/v8/src/compiler/phi-reducer.h --- nodejs-0.11.13/deps/v8/src/compiler/phi-reducer.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/phi-reducer.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,42 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_PHI_REDUCER_H_ +#define V8_COMPILER_PHI_REDUCER_H_ + +#include "src/compiler/graph-reducer.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Replaces redundant phis if all the inputs are the same or the phi itself. +class PhiReducer V8_FINAL : public Reducer { + public: + virtual Reduction Reduce(Node* node) V8_OVERRIDE { + if (node->opcode() != IrOpcode::kPhi && + node->opcode() != IrOpcode::kEffectPhi) + return NoChange(); + + int n = node->op()->InputCount(); + if (n == 1) return Replace(node->InputAt(0)); + + Node* replacement = NULL; + Node::Inputs inputs = node->inputs(); + for (InputIter it = inputs.begin(); n > 0; --n, ++it) { + Node* input = *it; + if (input != node && input != replacement) { + if (replacement != NULL) return NoChange(); + replacement = input; + } + } + DCHECK_NE(node, replacement); + return Replace(replacement); + } +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_PHI_REDUCER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/pipeline.cc nodejs-0.11.15/deps/v8/src/compiler/pipeline.cc --- nodejs-0.11.13/deps/v8/src/compiler/pipeline.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/pipeline.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,341 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/pipeline.h" + +#include "src/base/platform/elapsed-timer.h" +#include "src/compiler/ast-graph-builder.h" +#include "src/compiler/code-generator.h" +#include "src/compiler/graph-replay.h" +#include "src/compiler/graph-visualizer.h" +#include "src/compiler/instruction.h" +#include "src/compiler/instruction-selector.h" +#include "src/compiler/js-context-specialization.h" +#include "src/compiler/js-generic-lowering.h" +#include "src/compiler/js-typed-lowering.h" +#include "src/compiler/phi-reducer.h" +#include "src/compiler/register-allocator.h" +#include "src/compiler/schedule.h" +#include "src/compiler/scheduler.h" +#include "src/compiler/simplified-lowering.h" +#include "src/compiler/typer.h" +#include "src/compiler/verifier.h" +#include "src/hydrogen.h" +#include "src/ostreams.h" +#include "src/utils.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class PhaseStats { + public: + enum PhaseKind { CREATE_GRAPH, OPTIMIZATION, CODEGEN }; + + PhaseStats(CompilationInfo* info, PhaseKind kind, const char* name) + : info_(info), + kind_(kind), + name_(name), + size_(info->zone()->allocation_size()) { + if (FLAG_turbo_stats) { + timer_.Start(); + } + } + + ~PhaseStats() { + if (FLAG_turbo_stats) { + base::TimeDelta delta = timer_.Elapsed(); + size_t bytes = info_->zone()->allocation_size() - size_; + HStatistics* stats = info_->isolate()->GetTStatistics(); + stats->SaveTiming(name_, delta, static_cast<int>(bytes)); + + switch (kind_) { + case CREATE_GRAPH: + stats->IncrementCreateGraph(delta); + break; + case OPTIMIZATION: + stats->IncrementOptimizeGraph(delta); + break; + case CODEGEN: + stats->IncrementGenerateCode(delta); + break; + } + } + } + + private: + CompilationInfo* info_; + PhaseKind kind_; + const char* name_; + size_t size_; + base::ElapsedTimer timer_; +}; + + +void Pipeline::VerifyAndPrintGraph(Graph* graph, const char* phase) { + if (FLAG_trace_turbo) { + char buffer[256]; + Vector<char> filename(buffer, sizeof(buffer)); + SmartArrayPointer<char> functionname = + info_->shared_info()->DebugName()->ToCString(); + if (strlen(functionname.get()) > 0) { + SNPrintF(filename, "turbo-%s-%s.dot", functionname.get(), phase); + } else { + SNPrintF(filename, "turbo-%p-%s.dot", static_cast<void*>(info_), phase); + } + std::replace(filename.start(), filename.start() + filename.length(), ' ', + '_'); + FILE* file = base::OS::FOpen(filename.start(), "w+"); + OFStream of(file); + of << AsDOT(*graph); + fclose(file); + + OFStream os(stdout); + os << "-- " << phase << " graph printed to file " << filename.start() + << "\n"; + } + if (VerifyGraphs()) Verifier::Run(graph); +} + + +class AstGraphBuilderWithPositions : public AstGraphBuilder { + public: + explicit AstGraphBuilderWithPositions(CompilationInfo* info, JSGraph* jsgraph, + SourcePositionTable* source_positions) + : AstGraphBuilder(info, jsgraph), source_positions_(source_positions) {} + + bool CreateGraph() { + SourcePositionTable::Scope pos(source_positions_, + SourcePosition::Unknown()); + return AstGraphBuilder::CreateGraph(); + } + +#define DEF_VISIT(type) \ + virtual void Visit##type(type* node) V8_OVERRIDE { \ + SourcePositionTable::Scope pos(source_positions_, \ + SourcePosition(node->position())); \ + AstGraphBuilder::Visit##type(node); \ + } + AST_NODE_LIST(DEF_VISIT) +#undef DEF_VISIT + + private: + SourcePositionTable* source_positions_; +}; + + +static void TraceSchedule(Schedule* schedule) { + if (!FLAG_trace_turbo) return; + OFStream os(stdout); + os << "-- Schedule --------------------------------------\n" << *schedule; +} + + +Handle<Code> Pipeline::GenerateCode() { + if (FLAG_turbo_stats) isolate()->GetTStatistics()->Initialize(info_); + + if (FLAG_trace_turbo) { + OFStream os(stdout); + os << "---------------------------------------------------\n" + << "Begin compiling method " + << info()->function()->debug_name()->ToCString().get() + << " using Turbofan" << endl; + } + + // Build the graph. + Graph graph(zone()); + SourcePositionTable source_positions(&graph); + source_positions.AddDecorator(); + // TODO(turbofan): there is no need to type anything during initial graph + // construction. This is currently only needed for the node cache, which the + // typer could sweep over later. + Typer typer(zone()); + CommonOperatorBuilder common(zone()); + JSGraph jsgraph(&graph, &common, &typer); + Node* context_node; + { + PhaseStats graph_builder_stats(info(), PhaseStats::CREATE_GRAPH, + "graph builder"); + AstGraphBuilderWithPositions graph_builder(info(), &jsgraph, + &source_positions); + graph_builder.CreateGraph(); + context_node = graph_builder.GetFunctionContext(); + } + { + PhaseStats phi_reducer_stats(info(), PhaseStats::CREATE_GRAPH, + "phi reduction"); + PhiReducer phi_reducer; + GraphReducer graph_reducer(&graph); + graph_reducer.AddReducer(&phi_reducer); + graph_reducer.ReduceGraph(); + // TODO(mstarzinger): Running reducer once ought to be enough for everyone. + graph_reducer.ReduceGraph(); + graph_reducer.ReduceGraph(); + } + + VerifyAndPrintGraph(&graph, "Initial untyped"); + + if (FLAG_context_specialization) { + SourcePositionTable::Scope pos_(&source_positions, + SourcePosition::Unknown()); + // Specialize the code to the context as aggressively as possible. + JSContextSpecializer spec(info(), &jsgraph, context_node); + spec.SpecializeToContext(); + VerifyAndPrintGraph(&graph, "Context specialized"); + } + + // Print a replay of the initial graph. + if (FLAG_print_turbo_replay) { + GraphReplayPrinter::PrintReplay(&graph); + } + + if (FLAG_turbo_types) { + { + // Type the graph. + PhaseStats typer_stats(info(), PhaseStats::CREATE_GRAPH, "typer"); + typer.Run(&graph, info()->context()); + } + // All new nodes must be typed. + typer.DecorateGraph(&graph); + { + // Lower JSOperators where we can determine types. + PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH, + "typed lowering"); + JSTypedLowering lowering(&jsgraph, &source_positions); + lowering.LowerAllNodes(); + + VerifyAndPrintGraph(&graph, "Lowered typed"); + } + } + + Handle<Code> code = Handle<Code>::null(); + if (SupportedTarget()) { + { + // Lower any remaining generic JSOperators. + PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH, + "generic lowering"); + MachineOperatorBuilder machine(zone()); + JSGenericLowering lowering(info(), &jsgraph, &machine, &source_positions); + lowering.LowerAllNodes(); + + VerifyAndPrintGraph(&graph, "Lowered generic"); + } + + // Compute a schedule. + Schedule* schedule = ComputeSchedule(&graph); + TraceSchedule(schedule); + + { + // Generate optimized code. + PhaseStats codegen_stats(info(), PhaseStats::CODEGEN, "codegen"); + Linkage linkage(info()); + code = GenerateCode(&linkage, &graph, schedule, &source_positions); + info()->SetCode(code); + } + + // Print optimized code. + v8::internal::CodeGenerator::PrintCode(code, info()); + } + + if (FLAG_trace_turbo) { + OFStream os(stdout); + os << "--------------------------------------------------\n" + << "Finished compiling method " + << info()->function()->debug_name()->ToCString().get() + << " using Turbofan" << endl; + } + + return code; +} + + +Schedule* Pipeline::ComputeSchedule(Graph* graph) { + PhaseStats schedule_stats(info(), PhaseStats::CODEGEN, "scheduling"); + return Scheduler::ComputeSchedule(graph); +} + + +Handle<Code> Pipeline::GenerateCodeForMachineGraph(Linkage* linkage, + Graph* graph, + Schedule* schedule) { + CHECK(SupportedBackend()); + if (schedule == NULL) { + VerifyAndPrintGraph(graph, "Machine"); + schedule = ComputeSchedule(graph); + } + TraceSchedule(schedule); + + SourcePositionTable source_positions(graph); + Handle<Code> code = GenerateCode(linkage, graph, schedule, &source_positions); +#if ENABLE_DISASSEMBLER + if (!code.is_null() && FLAG_print_opt_code) { + CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer()); + OFStream os(tracing_scope.file()); + code->Disassemble("test code", os); + } +#endif + return code; +} + + +Handle<Code> Pipeline::GenerateCode(Linkage* linkage, Graph* graph, + Schedule* schedule, + SourcePositionTable* source_positions) { + DCHECK_NOT_NULL(graph); + DCHECK_NOT_NULL(linkage); + DCHECK_NOT_NULL(schedule); + CHECK(SupportedBackend()); + + InstructionSequence sequence(linkage, graph, schedule); + + // Select and schedule instructions covering the scheduled graph. + { + InstructionSelector selector(&sequence, source_positions); + selector.SelectInstructions(); + } + + if (FLAG_trace_turbo) { + OFStream os(stdout); + os << "----- Instruction sequence before register allocation -----\n" + << sequence; + } + + // Allocate registers. + { + int node_count = graph->NodeCount(); + if (node_count > UnallocatedOperand::kMaxVirtualRegisters) { + linkage->info()->set_bailout_reason(kNotEnoughVirtualRegistersForValues); + return Handle<Code>::null(); + } + RegisterAllocator allocator(&sequence); + if (!allocator.Allocate()) { + linkage->info()->set_bailout_reason(kNotEnoughVirtualRegistersRegalloc); + return Handle<Code>::null(); + } + } + + if (FLAG_trace_turbo) { + OFStream os(stdout); + os << "----- Instruction sequence after register allocation -----\n" + << sequence; + } + + // Generate native sequence. + CodeGenerator generator(&sequence); + return generator.GenerateCode(); +} + + +void Pipeline::SetUp() { + InstructionOperand::SetUpCaches(); +} + + +void Pipeline::TearDown() { + InstructionOperand::TearDownCaches(); +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/pipeline.h nodejs-0.11.15/deps/v8/src/compiler/pipeline.h --- nodejs-0.11.13/deps/v8/src/compiler/pipeline.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/pipeline.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,68 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_PIPELINE_H_ +#define V8_COMPILER_PIPELINE_H_ + +#include "src/v8.h" + +#include "src/compiler.h" + +// Note: TODO(turbofan) implies a performance improvement opportunity, +// and TODO(name) implies an incomplete implementation + +namespace v8 { +namespace internal { +namespace compiler { + +// Clients of this interface shouldn't depend on lots of compiler internals. +class CallDescriptor; +class Graph; +class Schedule; +class SourcePositionTable; +class Linkage; + +class Pipeline { + public: + explicit Pipeline(CompilationInfo* info) : info_(info) {} + + // Run the entire pipeline and generate a handle to a code object. + Handle<Code> GenerateCode(); + + // Run the pipeline on a machine graph and generate code. If {schedule} + // is {NULL}, then compute a new schedule for code generation. + Handle<Code> GenerateCodeForMachineGraph(Linkage* linkage, Graph* graph, + Schedule* schedule = NULL); + + CompilationInfo* info() const { return info_; } + Zone* zone() { return info_->zone(); } + Isolate* isolate() { return info_->isolate(); } + + static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; } + static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; } + + static inline bool VerifyGraphs() { +#ifdef DEBUG + return true; +#else + return FLAG_turbo_verify; +#endif + } + + static void SetUp(); + static void TearDown(); + + private: + CompilationInfo* info_; + + Schedule* ComputeSchedule(Graph* graph); + void VerifyAndPrintGraph(Graph* graph, const char* phase); + Handle<Code> GenerateCode(Linkage* linkage, Graph* graph, Schedule* schedule, + SourcePositionTable* source_positions); +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_PIPELINE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/raw-machine-assembler.cc nodejs-0.11.15/deps/v8/src/compiler/raw-machine-assembler.cc --- nodejs-0.11.13/deps/v8/src/compiler/raw-machine-assembler.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/raw-machine-assembler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,158 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/pipeline.h" +#include "src/compiler/raw-machine-assembler.h" +#include "src/compiler/scheduler.h" + +namespace v8 { +namespace internal { +namespace compiler { + +RawMachineAssembler::RawMachineAssembler( + Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder, + MachineType word) + : GraphBuilder(graph), + schedule_(new (zone()) Schedule(zone())), + machine_(zone(), word), + common_(zone()), + call_descriptor_builder_(call_descriptor_builder), + parameters_(NULL), + exit_label_(schedule()->exit()), + current_block_(schedule()->entry()) { + Node* s = graph->NewNode(common_.Start(parameter_count())); + graph->SetStart(s); + if (parameter_count() == 0) return; + parameters_ = zone()->NewArray<Node*>(parameter_count()); + for (int i = 0; i < parameter_count(); ++i) { + parameters_[i] = NewNode(common()->Parameter(i), graph->start()); + } +} + + +Schedule* RawMachineAssembler::Export() { + // Compute the correct codegen order. + DCHECK(schedule_->rpo_order()->empty()); + Scheduler::ComputeSpecialRPO(schedule_); + // Invalidate MachineAssembler. + Schedule* schedule = schedule_; + schedule_ = NULL; + return schedule; +} + + +Node* RawMachineAssembler::Parameter(int index) { + DCHECK(0 <= index && index < parameter_count()); + return parameters_[index]; +} + + +RawMachineAssembler::Label* RawMachineAssembler::Exit() { + exit_label_.used_ = true; + return &exit_label_; +} + + +void RawMachineAssembler::Goto(Label* label) { + DCHECK(current_block_ != schedule()->exit()); + schedule()->AddGoto(CurrentBlock(), Use(label)); + current_block_ = NULL; +} + + +void RawMachineAssembler::Branch(Node* condition, Label* true_val, + Label* false_val) { + DCHECK(current_block_ != schedule()->exit()); + Node* branch = NewNode(common()->Branch(), condition); + schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val)); + current_block_ = NULL; +} + + +void RawMachineAssembler::Return(Node* value) { + schedule()->AddReturn(CurrentBlock(), value); + current_block_ = NULL; +} + + +void RawMachineAssembler::Deoptimize(Node* state) { + Node* deopt = graph()->NewNode(common()->Deoptimize(), state); + schedule()->AddDeoptimize(CurrentBlock(), deopt); + current_block_ = NULL; +} + + +Node* RawMachineAssembler::CallJS0(Node* function, Node* receiver, + Label* continuation, Label* deoptimization) { + CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(1, zone()); + Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver); + schedule()->AddCall(CurrentBlock(), call, Use(continuation), + Use(deoptimization)); + current_block_ = NULL; + return call; +} + + +Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function, + Node* arg0, Label* continuation, + Label* deoptimization) { + CallDescriptor* descriptor = + Linkage::GetRuntimeCallDescriptor(function, 1, Operator::kNoProperties, + CallDescriptor::kCanDeoptimize, zone()); + + Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode()); + Node* ref = NewNode( + common()->ExternalConstant(ExternalReference(function, isolate()))); + Node* arity = Int32Constant(1); + Node* context = Parameter(1); + + Node* call = graph()->NewNode(common()->Call(descriptor), centry, arg0, ref, + arity, context); + schedule()->AddCall(CurrentBlock(), call, Use(continuation), + Use(deoptimization)); + current_block_ = NULL; + return call; +} + + +void RawMachineAssembler::Bind(Label* label) { + DCHECK(current_block_ == NULL); + DCHECK(!label->bound_); + label->bound_ = true; + current_block_ = EnsureBlock(label); +} + + +BasicBlock* RawMachineAssembler::Use(Label* label) { + label->used_ = true; + return EnsureBlock(label); +} + + +BasicBlock* RawMachineAssembler::EnsureBlock(Label* label) { + if (label->block_ == NULL) label->block_ = schedule()->NewBasicBlock(); + return label->block_; +} + + +BasicBlock* RawMachineAssembler::CurrentBlock() { + DCHECK(current_block_); + return current_block_; +} + + +Node* RawMachineAssembler::MakeNode(Operator* op, int input_count, + Node** inputs) { + DCHECK(ScheduleValid()); + DCHECK(current_block_ != NULL); + Node* node = graph()->NewNode(op, input_count, inputs); + BasicBlock* block = op->opcode() == IrOpcode::kParameter ? schedule()->start() + : CurrentBlock(); + schedule()->AddNode(block, node); + return node; +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/raw-machine-assembler.h nodejs-0.11.15/deps/v8/src/compiler/raw-machine-assembler.h --- nodejs-0.11.13/deps/v8/src/compiler/raw-machine-assembler.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/raw-machine-assembler.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,129 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_ +#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_ + +#include "src/v8.h" + +#include "src/compiler/common-operator.h" +#include "src/compiler/graph-builder.h" +#include "src/compiler/machine-node-factory.h" +#include "src/compiler/machine-operator.h" +#include "src/compiler/node.h" +#include "src/compiler/operator.h" + + +namespace v8 { +namespace internal { +namespace compiler { + +class BasicBlock; +class Schedule; + + +class RawMachineAssembler : public GraphBuilder, + public MachineNodeFactory<RawMachineAssembler> { + public: + class Label { + public: + Label() : block_(NULL), used_(false), bound_(false) {} + ~Label() { DCHECK(bound_ || !used_); } + + BasicBlock* block() { return block_; } + + private: + // Private constructor for exit label. + explicit Label(BasicBlock* block) + : block_(block), used_(false), bound_(false) {} + + BasicBlock* block_; + bool used_; + bool bound_; + friend class RawMachineAssembler; + DISALLOW_COPY_AND_ASSIGN(Label); + }; + + RawMachineAssembler(Graph* graph, + MachineCallDescriptorBuilder* call_descriptor_builder, + MachineType word = MachineOperatorBuilder::pointer_rep()); + virtual ~RawMachineAssembler() {} + + Isolate* isolate() const { return zone()->isolate(); } + Zone* zone() const { return graph()->zone(); } + MachineOperatorBuilder* machine() { return &machine_; } + CommonOperatorBuilder* common() { return &common_; } + CallDescriptor* call_descriptor() const { + return call_descriptor_builder_->BuildCallDescriptor(zone()); + } + int parameter_count() const { + return call_descriptor_builder_->parameter_count(); + } + const MachineType* parameter_types() const { + return call_descriptor_builder_->parameter_types(); + } + + // Parameters. + Node* Parameter(int index); + + // Control flow. + Label* Exit(); + void Goto(Label* label); + void Branch(Node* condition, Label* true_val, Label* false_val); + // Call to a JS function with zero parameters. + Node* CallJS0(Node* function, Node* receiver, Label* continuation, + Label* deoptimization); + // Call to a runtime function with zero parameters. + Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, + Label* continuation, Label* deoptimization); + void Return(Node* value); + void Bind(Label* label); + void Deoptimize(Node* state); + + // Variables. + Node* Phi(Node* n1, Node* n2) { return NewNode(common()->Phi(2), n1, n2); } + Node* Phi(Node* n1, Node* n2, Node* n3) { + return NewNode(common()->Phi(3), n1, n2, n3); + } + Node* Phi(Node* n1, Node* n2, Node* n3, Node* n4) { + return NewNode(common()->Phi(4), n1, n2, n3, n4); + } + + // MachineAssembler is invalid after export. + Schedule* Export(); + + protected: + virtual Node* MakeNode(Operator* op, int input_count, Node** inputs); + + Schedule* schedule() { + DCHECK(ScheduleValid()); + return schedule_; + } + + private: + bool ScheduleValid() { return schedule_ != NULL; } + + BasicBlock* Use(Label* label); + BasicBlock* EnsureBlock(Label* label); + BasicBlock* CurrentBlock(); + + typedef std::vector<MachineType, zone_allocator<MachineType> > + RepresentationVector; + + Schedule* schedule_; + MachineOperatorBuilder machine_; + CommonOperatorBuilder common_; + MachineCallDescriptorBuilder* call_descriptor_builder_; + Node** parameters_; + Label exit_label_; + BasicBlock* current_block_; + + DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/register-allocator.cc nodejs-0.11.15/deps/v8/src/compiler/register-allocator.cc --- nodejs-0.11.13/deps/v8/src/compiler/register-allocator.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/register-allocator.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2232 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/register-allocator.h" + +#include "src/compiler/linkage.h" +#include "src/hydrogen.h" +#include "src/string-stream.h" + +namespace v8 { +namespace internal { +namespace compiler { + +static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) { + return a.Value() < b.Value() ? a : b; +} + + +static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) { + return a.Value() > b.Value() ? a : b; +} + + +UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand, + InstructionOperand* hint) + : operand_(operand), + hint_(hint), + pos_(pos), + next_(NULL), + requires_reg_(false), + register_beneficial_(true) { + if (operand_ != NULL && operand_->IsUnallocated()) { + const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_); + requires_reg_ = unalloc->HasRegisterPolicy(); + register_beneficial_ = !unalloc->HasAnyPolicy(); + } + DCHECK(pos_.IsValid()); +} + + +bool UsePosition::HasHint() const { + return hint_ != NULL && !hint_->IsUnallocated(); +} + + +bool UsePosition::RequiresRegister() const { return requires_reg_; } + + +bool UsePosition::RegisterIsBeneficial() const { return register_beneficial_; } + + +void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) { + DCHECK(Contains(pos) && pos.Value() != start().Value()); + UseInterval* after = new (zone) UseInterval(pos, end_); + after->next_ = next_; + next_ = after; + end_ = pos; +} + + +#ifdef DEBUG + + +void LiveRange::Verify() const { + UsePosition* cur = first_pos_; + while (cur != NULL) { + DCHECK(Start().Value() <= cur->pos().Value() && + cur->pos().Value() <= End().Value()); + cur = cur->next(); + } +} + + +bool LiveRange::HasOverlap(UseInterval* target) const { + UseInterval* current_interval = first_interval_; + while (current_interval != NULL) { + // Intervals overlap if the start of one is contained in the other. + if (current_interval->Contains(target->start()) || + target->Contains(current_interval->start())) { + return true; + } + current_interval = current_interval->next(); + } + return false; +} + + +#endif + + +LiveRange::LiveRange(int id, Zone* zone) + : id_(id), + spilled_(false), + is_phi_(false), + is_non_loop_phi_(false), + kind_(UNALLOCATED_REGISTERS), + assigned_register_(kInvalidAssignment), + last_interval_(NULL), + first_interval_(NULL), + first_pos_(NULL), + parent_(NULL), + next_(NULL), + current_interval_(NULL), + last_processed_use_(NULL), + current_hint_operand_(NULL), + spill_operand_(new (zone) InstructionOperand()), + spill_start_index_(kMaxInt) {} + + +void LiveRange::set_assigned_register(int reg, Zone* zone) { + DCHECK(!HasRegisterAssigned() && !IsSpilled()); + assigned_register_ = reg; + ConvertOperands(zone); +} + + +void LiveRange::MakeSpilled(Zone* zone) { + DCHECK(!IsSpilled()); + DCHECK(TopLevel()->HasAllocatedSpillOperand()); + spilled_ = true; + assigned_register_ = kInvalidAssignment; + ConvertOperands(zone); +} + + +bool LiveRange::HasAllocatedSpillOperand() const { + DCHECK(spill_operand_ != NULL); + return !spill_operand_->IsIgnored(); +} + + +void LiveRange::SetSpillOperand(InstructionOperand* operand) { + DCHECK(!operand->IsUnallocated()); + DCHECK(spill_operand_ != NULL); + DCHECK(spill_operand_->IsIgnored()); + spill_operand_->ConvertTo(operand->kind(), operand->index()); +} + + +UsePosition* LiveRange::NextUsePosition(LifetimePosition start) { + UsePosition* use_pos = last_processed_use_; + if (use_pos == NULL) use_pos = first_pos(); + while (use_pos != NULL && use_pos->pos().Value() < start.Value()) { + use_pos = use_pos->next(); + } + last_processed_use_ = use_pos; + return use_pos; +} + + +UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial( + LifetimePosition start) { + UsePosition* pos = NextUsePosition(start); + while (pos != NULL && !pos->RegisterIsBeneficial()) { + pos = pos->next(); + } + return pos; +} + + +UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial( + LifetimePosition start) { + UsePosition* pos = first_pos(); + UsePosition* prev = NULL; + while (pos != NULL && pos->pos().Value() < start.Value()) { + if (pos->RegisterIsBeneficial()) prev = pos; + pos = pos->next(); + } + return prev; +} + + +UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) { + UsePosition* pos = NextUsePosition(start); + while (pos != NULL && !pos->RequiresRegister()) { + pos = pos->next(); + } + return pos; +} + + +bool LiveRange::CanBeSpilled(LifetimePosition pos) { + // We cannot spill a live range that has a use requiring a register + // at the current or the immediate next position. + UsePosition* use_pos = NextRegisterPosition(pos); + if (use_pos == NULL) return true; + return use_pos->pos().Value() > + pos.NextInstruction().InstructionEnd().Value(); +} + + +InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) { + InstructionOperand* op = NULL; + if (HasRegisterAssigned()) { + DCHECK(!IsSpilled()); + switch (Kind()) { + case GENERAL_REGISTERS: + op = RegisterOperand::Create(assigned_register(), zone); + break; + case DOUBLE_REGISTERS: + op = DoubleRegisterOperand::Create(assigned_register(), zone); + break; + default: + UNREACHABLE(); + } + } else if (IsSpilled()) { + DCHECK(!HasRegisterAssigned()); + op = TopLevel()->GetSpillOperand(); + DCHECK(!op->IsUnallocated()); + } else { + UnallocatedOperand* unalloc = + new (zone) UnallocatedOperand(UnallocatedOperand::NONE); + unalloc->set_virtual_register(id_); + op = unalloc; + } + return op; +} + + +UseInterval* LiveRange::FirstSearchIntervalForPosition( + LifetimePosition position) const { + if (current_interval_ == NULL) return first_interval_; + if (current_interval_->start().Value() > position.Value()) { + current_interval_ = NULL; + return first_interval_; + } + return current_interval_; +} + + +void LiveRange::AdvanceLastProcessedMarker( + UseInterval* to_start_of, LifetimePosition but_not_past) const { + if (to_start_of == NULL) return; + if (to_start_of->start().Value() > but_not_past.Value()) return; + LifetimePosition start = current_interval_ == NULL + ? LifetimePosition::Invalid() + : current_interval_->start(); + if (to_start_of->start().Value() > start.Value()) { + current_interval_ = to_start_of; + } +} + + +void LiveRange::SplitAt(LifetimePosition position, LiveRange* result, + Zone* zone) { + DCHECK(Start().Value() < position.Value()); + DCHECK(result->IsEmpty()); + // Find the last interval that ends before the position. If the + // position is contained in one of the intervals in the chain, we + // split that interval and use the first part. + UseInterval* current = FirstSearchIntervalForPosition(position); + + // If the split position coincides with the beginning of a use interval + // we need to split use positons in a special way. + bool split_at_start = false; + + if (current->start().Value() == position.Value()) { + // When splitting at start we need to locate the previous use interval. + current = first_interval_; + } + + while (current != NULL) { + if (current->Contains(position)) { + current->SplitAt(position, zone); + break; + } + UseInterval* next = current->next(); + if (next->start().Value() >= position.Value()) { + split_at_start = (next->start().Value() == position.Value()); + break; + } + current = next; + } + + // Partition original use intervals to the two live ranges. + UseInterval* before = current; + UseInterval* after = before->next(); + result->last_interval_ = + (last_interval_ == before) + ? after // Only interval in the range after split. + : last_interval_; // Last interval of the original range. + result->first_interval_ = after; + last_interval_ = before; + + // Find the last use position before the split and the first use + // position after it. + UsePosition* use_after = first_pos_; + UsePosition* use_before = NULL; + if (split_at_start) { + // The split position coincides with the beginning of a use interval (the + // end of a lifetime hole). Use at this position should be attributed to + // the split child because split child owns use interval covering it. + while (use_after != NULL && use_after->pos().Value() < position.Value()) { + use_before = use_after; + use_after = use_after->next(); + } + } else { + while (use_after != NULL && use_after->pos().Value() <= position.Value()) { + use_before = use_after; + use_after = use_after->next(); + } + } + + // Partition original use positions to the two live ranges. + if (use_before != NULL) { + use_before->next_ = NULL; + } else { + first_pos_ = NULL; + } + result->first_pos_ = use_after; + + // Discard cached iteration state. It might be pointing + // to the use that no longer belongs to this live range. + last_processed_use_ = NULL; + current_interval_ = NULL; + + // Link the new live range in the chain before any of the other + // ranges linked from the range before the split. + result->parent_ = (parent_ == NULL) ? this : parent_; + result->kind_ = result->parent_->kind_; + result->next_ = next_; + next_ = result; + +#ifdef DEBUG + Verify(); + result->Verify(); +#endif +} + + +// This implements an ordering on live ranges so that they are ordered by their +// start positions. This is needed for the correctness of the register +// allocation algorithm. If two live ranges start at the same offset then there +// is a tie breaker based on where the value is first used. This part of the +// ordering is merely a heuristic. +bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const { + LifetimePosition start = Start(); + LifetimePosition other_start = other->Start(); + if (start.Value() == other_start.Value()) { + UsePosition* pos = first_pos(); + if (pos == NULL) return false; + UsePosition* other_pos = other->first_pos(); + if (other_pos == NULL) return true; + return pos->pos().Value() < other_pos->pos().Value(); + } + return start.Value() < other_start.Value(); +} + + +void LiveRange::ShortenTo(LifetimePosition start) { + RegisterAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, + start.Value()); + DCHECK(first_interval_ != NULL); + DCHECK(first_interval_->start().Value() <= start.Value()); + DCHECK(start.Value() < first_interval_->end().Value()); + first_interval_->set_start(start); +} + + +void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end, + Zone* zone) { + RegisterAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n", + id_, start.Value(), end.Value()); + LifetimePosition new_end = end; + while (first_interval_ != NULL && + first_interval_->start().Value() <= end.Value()) { + if (first_interval_->end().Value() > end.Value()) { + new_end = first_interval_->end(); + } + first_interval_ = first_interval_->next(); + } + + UseInterval* new_interval = new (zone) UseInterval(start, new_end); + new_interval->next_ = first_interval_; + first_interval_ = new_interval; + if (new_interval->next() == NULL) { + last_interval_ = new_interval; + } +} + + +void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end, + Zone* zone) { + RegisterAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n", id_, + start.Value(), end.Value()); + if (first_interval_ == NULL) { + UseInterval* interval = new (zone) UseInterval(start, end); + first_interval_ = interval; + last_interval_ = interval; + } else { + if (end.Value() == first_interval_->start().Value()) { + first_interval_->set_start(start); + } else if (end.Value() < first_interval_->start().Value()) { + UseInterval* interval = new (zone) UseInterval(start, end); + interval->set_next(first_interval_); + first_interval_ = interval; + } else { + // Order of instruction's processing (see ProcessInstructions) guarantees + // that each new use interval either precedes or intersects with + // last added interval. + DCHECK(start.Value() < first_interval_->end().Value()); + first_interval_->start_ = Min(start, first_interval_->start_); + first_interval_->end_ = Max(end, first_interval_->end_); + } + } +} + + +void LiveRange::AddUsePosition(LifetimePosition pos, + InstructionOperand* operand, + InstructionOperand* hint, Zone* zone) { + RegisterAllocator::TraceAlloc("Add to live range %d use position %d\n", id_, + pos.Value()); + UsePosition* use_pos = new (zone) UsePosition(pos, operand, hint); + UsePosition* prev_hint = NULL; + UsePosition* prev = NULL; + UsePosition* current = first_pos_; + while (current != NULL && current->pos().Value() < pos.Value()) { + prev_hint = current->HasHint() ? current : prev_hint; + prev = current; + current = current->next(); + } + + if (prev == NULL) { + use_pos->set_next(first_pos_); + first_pos_ = use_pos; + } else { + use_pos->next_ = prev->next_; + prev->next_ = use_pos; + } + + if (prev_hint == NULL && use_pos->HasHint()) { + current_hint_operand_ = hint; + } +} + + +void LiveRange::ConvertOperands(Zone* zone) { + InstructionOperand* op = CreateAssignedOperand(zone); + UsePosition* use_pos = first_pos(); + while (use_pos != NULL) { + DCHECK(Start().Value() <= use_pos->pos().Value() && + use_pos->pos().Value() <= End().Value()); + + if (use_pos->HasOperand()) { + DCHECK(op->IsRegister() || op->IsDoubleRegister() || + !use_pos->RequiresRegister()); + use_pos->operand()->ConvertTo(op->kind(), op->index()); + } + use_pos = use_pos->next(); + } +} + + +bool LiveRange::CanCover(LifetimePosition position) const { + if (IsEmpty()) return false; + return Start().Value() <= position.Value() && + position.Value() < End().Value(); +} + + +bool LiveRange::Covers(LifetimePosition position) { + if (!CanCover(position)) return false; + UseInterval* start_search = FirstSearchIntervalForPosition(position); + for (UseInterval* interval = start_search; interval != NULL; + interval = interval->next()) { + DCHECK(interval->next() == NULL || + interval->next()->start().Value() >= interval->start().Value()); + AdvanceLastProcessedMarker(interval, position); + if (interval->Contains(position)) return true; + if (interval->start().Value() > position.Value()) return false; + } + return false; +} + + +LifetimePosition LiveRange::FirstIntersection(LiveRange* other) { + UseInterval* b = other->first_interval(); + if (b == NULL) return LifetimePosition::Invalid(); + LifetimePosition advance_last_processed_up_to = b->start(); + UseInterval* a = FirstSearchIntervalForPosition(b->start()); + while (a != NULL && b != NULL) { + if (a->start().Value() > other->End().Value()) break; + if (b->start().Value() > End().Value()) break; + LifetimePosition cur_intersection = a->Intersect(b); + if (cur_intersection.IsValid()) { + return cur_intersection; + } + if (a->start().Value() < b->start().Value()) { + a = a->next(); + if (a == NULL || a->start().Value() > other->End().Value()) break; + AdvanceLastProcessedMarker(a, advance_last_processed_up_to); + } else { + b = b->next(); + } + } + return LifetimePosition::Invalid(); +} + + +RegisterAllocator::RegisterAllocator(InstructionSequence* code) + : zone_(code->isolate()), + code_(code), + live_in_sets_(code->BasicBlockCount(), zone()), + live_ranges_(code->VirtualRegisterCount() * 2, zone()), + fixed_live_ranges_(NULL), + fixed_double_live_ranges_(NULL), + unhandled_live_ranges_(code->VirtualRegisterCount() * 2, zone()), + active_live_ranges_(8, zone()), + inactive_live_ranges_(8, zone()), + reusable_slots_(8, zone()), + mode_(UNALLOCATED_REGISTERS), + num_registers_(-1), + allocation_ok_(true) {} + + +void RegisterAllocator::InitializeLivenessAnalysis() { + // Initialize the live_in sets for each block to NULL. + int block_count = code()->BasicBlockCount(); + live_in_sets_.Initialize(block_count, zone()); + live_in_sets_.AddBlock(NULL, block_count, zone()); +} + + +BitVector* RegisterAllocator::ComputeLiveOut(BasicBlock* block) { + // Compute live out for the given block, except not including backward + // successor edges. + BitVector* live_out = + new (zone()) BitVector(code()->VirtualRegisterCount(), zone()); + + // Process all successor blocks. + BasicBlock::Successors successors = block->successors(); + for (BasicBlock::Successors::iterator i = successors.begin(); + i != successors.end(); ++i) { + // Add values live on entry to the successor. Note the successor's + // live_in will not be computed yet for backwards edges. + BasicBlock* successor = *i; + BitVector* live_in = live_in_sets_[successor->rpo_number_]; + if (live_in != NULL) live_out->Union(*live_in); + + // All phi input operands corresponding to this successor edge are live + // out from this block. + int index = successor->PredecessorIndexOf(block); + DCHECK(index >= 0); + DCHECK(index < static_cast<int>(successor->PredecessorCount())); + for (BasicBlock::const_iterator j = successor->begin(); + j != successor->end(); ++j) { + Node* phi = *j; + if (phi->opcode() != IrOpcode::kPhi) continue; + Node* input = phi->InputAt(index); + live_out->Add(input->id()); + } + } + + return live_out; +} + + +void RegisterAllocator::AddInitialIntervals(BasicBlock* block, + BitVector* live_out) { + // Add an interval that includes the entire block to the live range for + // each live_out value. + LifetimePosition start = + LifetimePosition::FromInstructionIndex(block->first_instruction_index()); + LifetimePosition end = LifetimePosition::FromInstructionIndex( + block->last_instruction_index()).NextInstruction(); + BitVector::Iterator iterator(live_out); + while (!iterator.Done()) { + int operand_index = iterator.Current(); + LiveRange* range = LiveRangeFor(operand_index); + range->AddUseInterval(start, end, zone()); + iterator.Advance(); + } +} + + +int RegisterAllocator::FixedDoubleLiveRangeID(int index) { + return -index - 1 - Register::kMaxNumAllocatableRegisters; +} + + +InstructionOperand* RegisterAllocator::AllocateFixed( + UnallocatedOperand* operand, int pos, bool is_tagged) { + TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register()); + DCHECK(operand->HasFixedPolicy()); + if (operand->HasFixedSlotPolicy()) { + operand->ConvertTo(InstructionOperand::STACK_SLOT, + operand->fixed_slot_index()); + } else if (operand->HasFixedRegisterPolicy()) { + int reg_index = operand->fixed_register_index(); + operand->ConvertTo(InstructionOperand::REGISTER, reg_index); + } else if (operand->HasFixedDoubleRegisterPolicy()) { + int reg_index = operand->fixed_register_index(); + operand->ConvertTo(InstructionOperand::DOUBLE_REGISTER, reg_index); + } else { + UNREACHABLE(); + } + if (is_tagged) { + TraceAlloc("Fixed reg is tagged at %d\n", pos); + Instruction* instr = InstructionAt(pos); + if (instr->HasPointerMap()) { + instr->pointer_map()->RecordPointer(operand, code_zone()); + } + } + return operand; +} + + +LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) { + DCHECK(index < Register::kMaxNumAllocatableRegisters); + LiveRange* result = fixed_live_ranges_[index]; + if (result == NULL) { + // TODO(titzer): add a utility method to allocate a new LiveRange: + // The LiveRange object itself can go in this zone, but the + // InstructionOperand needs + // to go in the code zone, since it may survive register allocation. + result = new (zone()) LiveRange(FixedLiveRangeID(index), code_zone()); + DCHECK(result->IsFixed()); + result->kind_ = GENERAL_REGISTERS; + SetLiveRangeAssignedRegister(result, index); + fixed_live_ranges_[index] = result; + } + return result; +} + + +LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) { + DCHECK(index < DoubleRegister::NumAllocatableRegisters()); + LiveRange* result = fixed_double_live_ranges_[index]; + if (result == NULL) { + result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone()); + DCHECK(result->IsFixed()); + result->kind_ = DOUBLE_REGISTERS; + SetLiveRangeAssignedRegister(result, index); + fixed_double_live_ranges_[index] = result; + } + return result; +} + + +LiveRange* RegisterAllocator::LiveRangeFor(int index) { + if (index >= live_ranges_.length()) { + live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone()); + } + LiveRange* result = live_ranges_[index]; + if (result == NULL) { + result = new (zone()) LiveRange(index, code_zone()); + live_ranges_[index] = result; + } + return result; +} + + +GapInstruction* RegisterAllocator::GetLastGap(BasicBlock* block) { + int last_instruction = block->last_instruction_index(); + return code()->GapAt(last_instruction - 1); +} + + +LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) { + if (operand->IsUnallocated()) { + return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register()); + } else if (operand->IsRegister()) { + return FixedLiveRangeFor(operand->index()); + } else if (operand->IsDoubleRegister()) { + return FixedDoubleLiveRangeFor(operand->index()); + } else { + return NULL; + } +} + + +void RegisterAllocator::Define(LifetimePosition position, + InstructionOperand* operand, + InstructionOperand* hint) { + LiveRange* range = LiveRangeFor(operand); + if (range == NULL) return; + + if (range->IsEmpty() || range->Start().Value() > position.Value()) { + // Can happen if there is a definition without use. + range->AddUseInterval(position, position.NextInstruction(), zone()); + range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone()); + } else { + range->ShortenTo(position); + } + + if (operand->IsUnallocated()) { + UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); + range->AddUsePosition(position, unalloc_operand, hint, zone()); + } +} + + +void RegisterAllocator::Use(LifetimePosition block_start, + LifetimePosition position, + InstructionOperand* operand, + InstructionOperand* hint) { + LiveRange* range = LiveRangeFor(operand); + if (range == NULL) return; + if (operand->IsUnallocated()) { + UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); + range->AddUsePosition(position, unalloc_operand, hint, zone()); + } + range->AddUseInterval(block_start, position, zone()); +} + + +void RegisterAllocator::AddConstraintsGapMove(int index, + InstructionOperand* from, + InstructionOperand* to) { + GapInstruction* gap = code()->GapAt(index); + ParallelMove* move = + gap->GetOrCreateParallelMove(GapInstruction::START, code_zone()); + if (from->IsUnallocated()) { + const ZoneList<MoveOperands>* move_operands = move->move_operands(); + for (int i = 0; i < move_operands->length(); ++i) { + MoveOperands cur = move_operands->at(i); + InstructionOperand* cur_to = cur.destination(); + if (cur_to->IsUnallocated()) { + if (UnallocatedOperand::cast(cur_to)->virtual_register() == + UnallocatedOperand::cast(from)->virtual_register()) { + move->AddMove(cur.source(), to, code_zone()); + return; + } + } + } + } + move->AddMove(from, to, code_zone()); +} + + +void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) { + int start = block->first_instruction_index(); + int end = block->last_instruction_index(); + DCHECK_NE(-1, start); + for (int i = start; i <= end; ++i) { + if (code()->IsGapAt(i)) { + Instruction* instr = NULL; + Instruction* prev_instr = NULL; + if (i < end) instr = InstructionAt(i + 1); + if (i > start) prev_instr = InstructionAt(i - 1); + MeetConstraintsBetween(prev_instr, instr, i); + if (!AllocationOk()) return; + } + } + + // Meet register constraints for the instruction in the end. + if (!code()->IsGapAt(end)) { + MeetRegisterConstraintsForLastInstructionInBlock(block); + } +} + + +void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock( + BasicBlock* block) { + int end = block->last_instruction_index(); + Instruction* last_instruction = InstructionAt(end); + for (size_t i = 0; i < last_instruction->OutputCount(); i++) { + InstructionOperand* output_operand = last_instruction->OutputAt(i); + DCHECK(!output_operand->IsConstant()); + UnallocatedOperand* output = UnallocatedOperand::cast(output_operand); + int output_vreg = output->virtual_register(); + LiveRange* range = LiveRangeFor(output_vreg); + bool assigned = false; + if (output->HasFixedPolicy()) { + AllocateFixed(output, -1, false); + // This value is produced on the stack, we never need to spill it. + if (output->IsStackSlot()) { + range->SetSpillOperand(output); + range->SetSpillStartIndex(end); + assigned = true; + } + + BasicBlock::Successors successors = block->successors(); + for (BasicBlock::Successors::iterator succ = successors.begin(); + succ != successors.end(); ++succ) { + DCHECK((*succ)->PredecessorCount() == 1); + int gap_index = (*succ)->first_instruction_index() + 1; + DCHECK(code()->IsGapAt(gap_index)); + + // Create an unconstrained operand for the same virtual register + // and insert a gap move from the fixed output to the operand. + UnallocatedOperand* output_copy = + new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY); + output_copy->set_virtual_register(output_vreg); + + code()->AddGapMove(gap_index, output, output_copy); + } + } + + if (!assigned) { + BasicBlock::Successors successors = block->successors(); + for (BasicBlock::Successors::iterator succ = successors.begin(); + succ != successors.end(); ++succ) { + DCHECK((*succ)->PredecessorCount() == 1); + int gap_index = (*succ)->first_instruction_index() + 1; + range->SetSpillStartIndex(gap_index); + + // This move to spill operand is not a real use. Liveness analysis + // and splitting of live ranges do not account for it. + // Thus it should be inserted to a lifetime position corresponding to + // the instruction end. + GapInstruction* gap = code()->GapAt(gap_index); + ParallelMove* move = + gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone()); + move->AddMove(output, range->GetSpillOperand(), code_zone()); + } + } + } +} + + +void RegisterAllocator::MeetConstraintsBetween(Instruction* first, + Instruction* second, + int gap_index) { + if (first != NULL) { + // Handle fixed temporaries. + for (size_t i = 0; i < first->TempCount(); i++) { + UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i)); + if (temp->HasFixedPolicy()) { + AllocateFixed(temp, gap_index - 1, false); + } + } + + // Handle constant/fixed output operands. + for (size_t i = 0; i < first->OutputCount(); i++) { + InstructionOperand* output = first->OutputAt(i); + if (output->IsConstant()) { + int output_vreg = output->index(); + LiveRange* range = LiveRangeFor(output_vreg); + range->SetSpillStartIndex(gap_index - 1); + range->SetSpillOperand(output); + } else { + UnallocatedOperand* first_output = UnallocatedOperand::cast(output); + LiveRange* range = LiveRangeFor(first_output->virtual_register()); + bool assigned = false; + if (first_output->HasFixedPolicy()) { + UnallocatedOperand* output_copy = + first_output->CopyUnconstrained(code_zone()); + bool is_tagged = HasTaggedValue(first_output->virtual_register()); + AllocateFixed(first_output, gap_index, is_tagged); + + // This value is produced on the stack, we never need to spill it. + if (first_output->IsStackSlot()) { + range->SetSpillOperand(first_output); + range->SetSpillStartIndex(gap_index - 1); + assigned = true; + } + code()->AddGapMove(gap_index, first_output, output_copy); + } + + // Make sure we add a gap move for spilling (if we have not done + // so already). + if (!assigned) { + range->SetSpillStartIndex(gap_index); + + // This move to spill operand is not a real use. Liveness analysis + // and splitting of live ranges do not account for it. + // Thus it should be inserted to a lifetime position corresponding to + // the instruction end. + GapInstruction* gap = code()->GapAt(gap_index); + ParallelMove* move = + gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone()); + move->AddMove(first_output, range->GetSpillOperand(), code_zone()); + } + } + } + } + + if (second != NULL) { + // Handle fixed input operands of second instruction. + for (size_t i = 0; i < second->InputCount(); i++) { + InstructionOperand* input = second->InputAt(i); + if (input->IsImmediate()) continue; // Ignore immediates. + UnallocatedOperand* cur_input = UnallocatedOperand::cast(input); + if (cur_input->HasFixedPolicy()) { + UnallocatedOperand* input_copy = + cur_input->CopyUnconstrained(code_zone()); + bool is_tagged = HasTaggedValue(cur_input->virtual_register()); + AllocateFixed(cur_input, gap_index + 1, is_tagged); + AddConstraintsGapMove(gap_index, input_copy, cur_input); + } + } + + // Handle "output same as input" for second instruction. + for (size_t i = 0; i < second->OutputCount(); i++) { + InstructionOperand* output = second->OutputAt(i); + if (!output->IsUnallocated()) continue; + UnallocatedOperand* second_output = UnallocatedOperand::cast(output); + if (second_output->HasSameAsInputPolicy()) { + DCHECK(i == 0); // Only valid for first output. + UnallocatedOperand* cur_input = + UnallocatedOperand::cast(second->InputAt(0)); + int output_vreg = second_output->virtual_register(); + int input_vreg = cur_input->virtual_register(); + + UnallocatedOperand* input_copy = + cur_input->CopyUnconstrained(code_zone()); + cur_input->set_virtual_register(second_output->virtual_register()); + AddConstraintsGapMove(gap_index, input_copy, cur_input); + + if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) { + int index = gap_index + 1; + Instruction* instr = InstructionAt(index); + if (instr->HasPointerMap()) { + instr->pointer_map()->RecordPointer(input_copy, code_zone()); + } + } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) { + // The input is assumed to immediately have a tagged representation, + // before the pointer map can be used. I.e. the pointer map at the + // instruction will include the output operand (whose value at the + // beginning of the instruction is equal to the input operand). If + // this is not desired, then the pointer map at this instruction needs + // to be adjusted manually. + } + } + } + } +} + + +bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) { + for (size_t i = 0; i < instr->OutputCount(); i++) { + InstructionOperand* output = instr->OutputAt(i); + if (output->IsRegister() && output->index() == index) return true; + } + return false; +} + + +bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr, + int index) { + for (size_t i = 0; i < instr->OutputCount(); i++) { + InstructionOperand* output = instr->OutputAt(i); + if (output->IsDoubleRegister() && output->index() == index) return true; + } + return false; +} + + +void RegisterAllocator::ProcessInstructions(BasicBlock* block, + BitVector* live) { + int block_start = block->first_instruction_index(); + + LifetimePosition block_start_position = + LifetimePosition::FromInstructionIndex(block_start); + + for (int index = block->last_instruction_index(); index >= block_start; + index--) { + LifetimePosition curr_position = + LifetimePosition::FromInstructionIndex(index); + + Instruction* instr = InstructionAt(index); + DCHECK(instr != NULL); + if (instr->IsGapMoves()) { + // Process the moves of the gap instruction, making their sources live. + GapInstruction* gap = code()->GapAt(index); + + // TODO(titzer): no need to create the parallel move if it doesn't exist. + ParallelMove* move = + gap->GetOrCreateParallelMove(GapInstruction::START, code_zone()); + const ZoneList<MoveOperands>* move_operands = move->move_operands(); + for (int i = 0; i < move_operands->length(); ++i) { + MoveOperands* cur = &move_operands->at(i); + if (cur->IsIgnored()) continue; + InstructionOperand* from = cur->source(); + InstructionOperand* to = cur->destination(); + InstructionOperand* hint = to; + if (to->IsUnallocated()) { + int to_vreg = UnallocatedOperand::cast(to)->virtual_register(); + LiveRange* to_range = LiveRangeFor(to_vreg); + if (to_range->is_phi()) { + if (to_range->is_non_loop_phi()) { + hint = to_range->current_hint_operand(); + } + } else { + if (live->Contains(to_vreg)) { + Define(curr_position, to, from); + live->Remove(to_vreg); + } else { + cur->Eliminate(); + continue; + } + } + } else { + Define(curr_position, to, from); + } + Use(block_start_position, curr_position, from, hint); + if (from->IsUnallocated()) { + live->Add(UnallocatedOperand::cast(from)->virtual_register()); + } + } + } else { + // Process output, inputs, and temps of this non-gap instruction. + for (size_t i = 0; i < instr->OutputCount(); i++) { + InstructionOperand* output = instr->OutputAt(i); + if (output->IsUnallocated()) { + int out_vreg = UnallocatedOperand::cast(output)->virtual_register(); + live->Remove(out_vreg); + } else if (output->IsConstant()) { + int out_vreg = output->index(); + live->Remove(out_vreg); + } + Define(curr_position, output, NULL); + } + + if (instr->ClobbersRegisters()) { + for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) { + if (!IsOutputRegisterOf(instr, i)) { + LiveRange* range = FixedLiveRangeFor(i); + range->AddUseInterval(curr_position, curr_position.InstructionEnd(), + zone()); + } + } + } + + if (instr->ClobbersDoubleRegisters()) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { + if (!IsOutputDoubleRegisterOf(instr, i)) { + LiveRange* range = FixedDoubleLiveRangeFor(i); + range->AddUseInterval(curr_position, curr_position.InstructionEnd(), + zone()); + } + } + } + + for (size_t i = 0; i < instr->InputCount(); i++) { + InstructionOperand* input = instr->InputAt(i); + if (input->IsImmediate()) continue; // Ignore immediates. + LifetimePosition use_pos; + if (input->IsUnallocated() && + UnallocatedOperand::cast(input)->IsUsedAtStart()) { + use_pos = curr_position; + } else { + use_pos = curr_position.InstructionEnd(); + } + + Use(block_start_position, use_pos, input, NULL); + if (input->IsUnallocated()) { + live->Add(UnallocatedOperand::cast(input)->virtual_register()); + } + } + + for (size_t i = 0; i < instr->TempCount(); i++) { + InstructionOperand* temp = instr->TempAt(i); + if (instr->ClobbersTemps()) { + if (temp->IsRegister()) continue; + if (temp->IsUnallocated()) { + UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp); + if (temp_unalloc->HasFixedPolicy()) { + continue; + } + } + } + Use(block_start_position, curr_position.InstructionEnd(), temp, NULL); + Define(curr_position, temp, NULL); + } + } + } +} + + +void RegisterAllocator::ResolvePhis(BasicBlock* block) { + for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) { + Node* phi = *i; + if (phi->opcode() != IrOpcode::kPhi) continue; + + UnallocatedOperand* phi_operand = + new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE); + phi_operand->set_virtual_register(phi->id()); + + int j = 0; + Node::Inputs inputs = phi->inputs(); + for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end(); + ++iter, ++j) { + Node* op = *iter; + // TODO(mstarzinger): Use a ValueInputIterator instead. + if (j >= block->PredecessorCount()) continue; + UnallocatedOperand* operand = + new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY); + operand->set_virtual_register(op->id()); + BasicBlock* cur_block = block->PredecessorAt(j); + // The gap move must be added without any special processing as in + // the AddConstraintsGapMove. + code()->AddGapMove(cur_block->last_instruction_index() - 1, operand, + phi_operand); + + Instruction* branch = InstructionAt(cur_block->last_instruction_index()); + DCHECK(!branch->HasPointerMap()); + USE(branch); + } + + LiveRange* live_range = LiveRangeFor(phi->id()); + BlockStartInstruction* block_start = code()->GetBlockStart(block); + block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone()) + ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone()); + live_range->SetSpillStartIndex(block->first_instruction_index()); + + // We use the phi-ness of some nodes in some later heuristics. + live_range->set_is_phi(true); + if (!block->IsLoopHeader()) { + live_range->set_is_non_loop_phi(true); + } + } +} + + +bool RegisterAllocator::Allocate() { + assigned_registers_ = new (code_zone()) + BitVector(Register::NumAllocatableRegisters(), code_zone()); + assigned_double_registers_ = new (code_zone()) + BitVector(DoubleRegister::NumAllocatableRegisters(), code_zone()); + MeetRegisterConstraints(); + if (!AllocationOk()) return false; + ResolvePhis(); + BuildLiveRanges(); + AllocateGeneralRegisters(); + if (!AllocationOk()) return false; + AllocateDoubleRegisters(); + if (!AllocationOk()) return false; + PopulatePointerMaps(); + ConnectRanges(); + ResolveControlFlow(); + code()->frame()->SetAllocatedRegisters(assigned_registers_); + code()->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_); + return true; +} + + +void RegisterAllocator::MeetRegisterConstraints() { + RegisterAllocatorPhase phase("L_Register constraints", this); + for (int i = 0; i < code()->BasicBlockCount(); ++i) { + MeetRegisterConstraints(code()->BlockAt(i)); + if (!AllocationOk()) return; + } +} + + +void RegisterAllocator::ResolvePhis() { + RegisterAllocatorPhase phase("L_Resolve phis", this); + + // Process the blocks in reverse order. + for (int i = code()->BasicBlockCount() - 1; i >= 0; --i) { + ResolvePhis(code()->BlockAt(i)); + } +} + + +void RegisterAllocator::ResolveControlFlow(LiveRange* range, BasicBlock* block, + BasicBlock* pred) { + LifetimePosition pred_end = + LifetimePosition::FromInstructionIndex(pred->last_instruction_index()); + LifetimePosition cur_start = + LifetimePosition::FromInstructionIndex(block->first_instruction_index()); + LiveRange* pred_cover = NULL; + LiveRange* cur_cover = NULL; + LiveRange* cur_range = range; + while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) { + if (cur_range->CanCover(cur_start)) { + DCHECK(cur_cover == NULL); + cur_cover = cur_range; + } + if (cur_range->CanCover(pred_end)) { + DCHECK(pred_cover == NULL); + pred_cover = cur_range; + } + cur_range = cur_range->next(); + } + + if (cur_cover->IsSpilled()) return; + DCHECK(pred_cover != NULL && cur_cover != NULL); + if (pred_cover != cur_cover) { + InstructionOperand* pred_op = + pred_cover->CreateAssignedOperand(code_zone()); + InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone()); + if (!pred_op->Equals(cur_op)) { + GapInstruction* gap = NULL; + if (block->PredecessorCount() == 1) { + gap = code()->GapAt(block->first_instruction_index()); + } else { + DCHECK(pred->SuccessorCount() == 1); + gap = GetLastGap(pred); + + Instruction* branch = InstructionAt(pred->last_instruction_index()); + DCHECK(!branch->HasPointerMap()); + USE(branch); + } + gap->GetOrCreateParallelMove(GapInstruction::START, code_zone()) + ->AddMove(pred_op, cur_op, code_zone()); + } + } +} + + +ParallelMove* RegisterAllocator::GetConnectingParallelMove( + LifetimePosition pos) { + int index = pos.InstructionIndex(); + if (code()->IsGapAt(index)) { + GapInstruction* gap = code()->GapAt(index); + return gap->GetOrCreateParallelMove( + pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END, + code_zone()); + } + int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1); + return code()->GapAt(gap_pos)->GetOrCreateParallelMove( + (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::BEFORE, + code_zone()); +} + + +BasicBlock* RegisterAllocator::GetBlock(LifetimePosition pos) { + return code()->GetBasicBlock(pos.InstructionIndex()); +} + + +void RegisterAllocator::ConnectRanges() { + RegisterAllocatorPhase phase("L_Connect ranges", this); + for (int i = 0; i < live_ranges()->length(); ++i) { + LiveRange* first_range = live_ranges()->at(i); + if (first_range == NULL || first_range->parent() != NULL) continue; + + LiveRange* second_range = first_range->next(); + while (second_range != NULL) { + LifetimePosition pos = second_range->Start(); + + if (!second_range->IsSpilled()) { + // Add gap move if the two live ranges touch and there is no block + // boundary. + if (first_range->End().Value() == pos.Value()) { + bool should_insert = true; + if (IsBlockBoundary(pos)) { + should_insert = CanEagerlyResolveControlFlow(GetBlock(pos)); + } + if (should_insert) { + ParallelMove* move = GetConnectingParallelMove(pos); + InstructionOperand* prev_operand = + first_range->CreateAssignedOperand(code_zone()); + InstructionOperand* cur_operand = + second_range->CreateAssignedOperand(code_zone()); + move->AddMove(prev_operand, cur_operand, code_zone()); + } + } + } + + first_range = second_range; + second_range = second_range->next(); + } + } +} + + +bool RegisterAllocator::CanEagerlyResolveControlFlow(BasicBlock* block) const { + if (block->PredecessorCount() != 1) return false; + return block->PredecessorAt(0)->rpo_number_ == block->rpo_number_ - 1; +} + + +void RegisterAllocator::ResolveControlFlow() { + RegisterAllocatorPhase phase("L_Resolve control flow", this); + for (int block_id = 1; block_id < code()->BasicBlockCount(); ++block_id) { + BasicBlock* block = code()->BlockAt(block_id); + if (CanEagerlyResolveControlFlow(block)) continue; + BitVector* live = live_in_sets_[block->rpo_number_]; + BitVector::Iterator iterator(live); + while (!iterator.Done()) { + int operand_index = iterator.Current(); + BasicBlock::Predecessors predecessors = block->predecessors(); + for (BasicBlock::Predecessors::iterator i = predecessors.begin(); + i != predecessors.end(); ++i) { + BasicBlock* cur = *i; + LiveRange* cur_range = LiveRangeFor(operand_index); + ResolveControlFlow(cur_range, block, cur); + } + iterator.Advance(); + } + } +} + + +void RegisterAllocator::BuildLiveRanges() { + RegisterAllocatorPhase phase("L_Build live ranges", this); + InitializeLivenessAnalysis(); + // Process the blocks in reverse order. + for (int block_id = code()->BasicBlockCount() - 1; block_id >= 0; + --block_id) { + BasicBlock* block = code()->BlockAt(block_id); + BitVector* live = ComputeLiveOut(block); + // Initially consider all live_out values live for the entire block. We + // will shorten these intervals if necessary. + AddInitialIntervals(block, live); + + // Process the instructions in reverse order, generating and killing + // live values. + ProcessInstructions(block, live); + // All phi output operands are killed by this block. + for (BasicBlock::const_iterator i = block->begin(); i != block->end(); + ++i) { + Node* phi = *i; + if (phi->opcode() != IrOpcode::kPhi) continue; + + // The live range interval already ends at the first instruction of the + // block. + live->Remove(phi->id()); + + InstructionOperand* hint = NULL; + InstructionOperand* phi_operand = NULL; + GapInstruction* gap = GetLastGap(block->PredecessorAt(0)); + + // TODO(titzer): no need to create the parallel move if it doesn't exit. + ParallelMove* move = + gap->GetOrCreateParallelMove(GapInstruction::START, code_zone()); + for (int j = 0; j < move->move_operands()->length(); ++j) { + InstructionOperand* to = move->move_operands()->at(j).destination(); + if (to->IsUnallocated() && + UnallocatedOperand::cast(to)->virtual_register() == phi->id()) { + hint = move->move_operands()->at(j).source(); + phi_operand = to; + break; + } + } + DCHECK(hint != NULL); + + LifetimePosition block_start = LifetimePosition::FromInstructionIndex( + block->first_instruction_index()); + Define(block_start, phi_operand, hint); + } + + // Now live is live_in for this block except not including values live + // out on backward successor edges. + live_in_sets_[block_id] = live; + + if (block->IsLoopHeader()) { + // Add a live range stretching from the first loop instruction to the last + // for each value live on entry to the header. + BitVector::Iterator iterator(live); + LifetimePosition start = LifetimePosition::FromInstructionIndex( + block->first_instruction_index()); + int end_index = + code()->BlockAt(block->loop_end_)->last_instruction_index(); + LifetimePosition end = + LifetimePosition::FromInstructionIndex(end_index).NextInstruction(); + while (!iterator.Done()) { + int operand_index = iterator.Current(); + LiveRange* range = LiveRangeFor(operand_index); + range->EnsureInterval(start, end, zone()); + iterator.Advance(); + } + + // Insert all values into the live in sets of all blocks in the loop. + for (int i = block->rpo_number_ + 1; i < block->loop_end_; ++i) { + live_in_sets_[i]->Union(*live); + } + } + +#ifdef DEBUG + if (block_id == 0) { + BitVector::Iterator iterator(live); + bool found = false; + while (!iterator.Done()) { + found = true; + int operand_index = iterator.Current(); + PrintF("Register allocator error: live v%d reached first block.\n", + operand_index); + LiveRange* range = LiveRangeFor(operand_index); + PrintF(" (first use is at %d)\n", range->first_pos()->pos().Value()); + CompilationInfo* info = code()->linkage()->info(); + if (info->IsStub()) { + if (info->code_stub() == NULL) { + PrintF("\n"); + } else { + CodeStub::Major major_key = info->code_stub()->MajorKey(); + PrintF(" (function: %s)\n", CodeStub::MajorName(major_key, false)); + } + } else { + DCHECK(info->IsOptimizing()); + AllowHandleDereference allow_deref; + PrintF(" (function: %s)\n", + info->function()->debug_name()->ToCString().get()); + } + iterator.Advance(); + } + DCHECK(!found); + } +#endif + } + + for (int i = 0; i < live_ranges_.length(); ++i) { + if (live_ranges_[i] != NULL) { + live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id()); + + // TODO(bmeurer): This is a horrible hack to make sure that for constant + // live ranges, every use requires the constant to be in a register. + // Without this hack, all uses with "any" policy would get the constant + // operand assigned. + LiveRange* range = live_ranges_[i]; + if (range->HasAllocatedSpillOperand() && + range->GetSpillOperand()->IsConstant()) { + for (UsePosition* pos = range->first_pos(); pos != NULL; + pos = pos->next_) { + pos->register_beneficial_ = true; + pos->requires_reg_ = true; + } + } + } + } +} + + +bool RegisterAllocator::SafePointsAreInOrder() const { + int safe_point = 0; + const PointerMapDeque* pointer_maps = code()->pointer_maps(); + for (PointerMapDeque::const_iterator it = pointer_maps->begin(); + it != pointer_maps->end(); ++it) { + PointerMap* map = *it; + if (safe_point > map->instruction_position()) return false; + safe_point = map->instruction_position(); + } + return true; +} + + +void RegisterAllocator::PopulatePointerMaps() { + RegisterAllocatorPhase phase("L_Populate pointer maps", this); + + DCHECK(SafePointsAreInOrder()); + + // Iterate over all safe point positions and record a pointer + // for all spilled live ranges at this point. + int last_range_start = 0; + const PointerMapDeque* pointer_maps = code()->pointer_maps(); + PointerMapDeque::const_iterator first_it = pointer_maps->begin(); + for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) { + LiveRange* range = live_ranges()->at(range_idx); + if (range == NULL) continue; + // Iterate over the first parts of multi-part live ranges. + if (range->parent() != NULL) continue; + // Skip non-reference values. + if (!HasTaggedValue(range->id())) continue; + // Skip empty live ranges. + if (range->IsEmpty()) continue; + + // Find the extent of the range and its children. + int start = range->Start().InstructionIndex(); + int end = 0; + for (LiveRange* cur = range; cur != NULL; cur = cur->next()) { + LifetimePosition this_end = cur->End(); + if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex(); + DCHECK(cur->Start().InstructionIndex() >= start); + } + + // Most of the ranges are in order, but not all. Keep an eye on when they + // step backwards and reset the first_it so we don't miss any safe points. + if (start < last_range_start) first_it = pointer_maps->begin(); + last_range_start = start; + + // Step across all the safe points that are before the start of this range, + // recording how far we step in order to save doing this for the next range. + for (; first_it != pointer_maps->end(); ++first_it) { + PointerMap* map = *first_it; + if (map->instruction_position() >= start) break; + } + + // Step through the safe points to see whether they are in the range. + for (PointerMapDeque::const_iterator it = first_it; + it != pointer_maps->end(); ++it) { + PointerMap* map = *it; + int safe_point = map->instruction_position(); + + // The safe points are sorted so we can stop searching here. + if (safe_point - 1 > end) break; + + // Advance to the next active range that covers the current + // safe point position. + LifetimePosition safe_point_pos = + LifetimePosition::FromInstructionIndex(safe_point); + LiveRange* cur = range; + while (cur != NULL && !cur->Covers(safe_point_pos)) { + cur = cur->next(); + } + if (cur == NULL) continue; + + // Check if the live range is spilled and the safe point is after + // the spill position. + if (range->HasAllocatedSpillOperand() && + safe_point >= range->spill_start_index() && + !range->GetSpillOperand()->IsConstant()) { + TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n", + range->id(), range->spill_start_index(), safe_point); + map->RecordPointer(range->GetSpillOperand(), code_zone()); + } + + if (!cur->IsSpilled()) { + TraceAlloc( + "Pointer in register for range %d (start at %d) " + "at safe point %d\n", + cur->id(), cur->Start().Value(), safe_point); + InstructionOperand* operand = cur->CreateAssignedOperand(code_zone()); + DCHECK(!operand->IsStackSlot()); + map->RecordPointer(operand, code_zone()); + } + } + } +} + + +void RegisterAllocator::AllocateGeneralRegisters() { + RegisterAllocatorPhase phase("L_Allocate general registers", this); + num_registers_ = Register::NumAllocatableRegisters(); + mode_ = GENERAL_REGISTERS; + AllocateRegisters(); +} + + +void RegisterAllocator::AllocateDoubleRegisters() { + RegisterAllocatorPhase phase("L_Allocate double registers", this); + num_registers_ = DoubleRegister::NumAllocatableRegisters(); + mode_ = DOUBLE_REGISTERS; + AllocateRegisters(); +} + + +void RegisterAllocator::AllocateRegisters() { + DCHECK(unhandled_live_ranges_.is_empty()); + + for (int i = 0; i < live_ranges_.length(); ++i) { + if (live_ranges_[i] != NULL) { + if (live_ranges_[i]->Kind() == mode_) { + AddToUnhandledUnsorted(live_ranges_[i]); + } + } + } + SortUnhandled(); + DCHECK(UnhandledIsSorted()); + + DCHECK(reusable_slots_.is_empty()); + DCHECK(active_live_ranges_.is_empty()); + DCHECK(inactive_live_ranges_.is_empty()); + + if (mode_ == DOUBLE_REGISTERS) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { + LiveRange* current = fixed_double_live_ranges_.at(i); + if (current != NULL) { + AddToInactive(current); + } + } + } else { + DCHECK(mode_ == GENERAL_REGISTERS); + for (int i = 0; i < fixed_live_ranges_.length(); ++i) { + LiveRange* current = fixed_live_ranges_.at(i); + if (current != NULL) { + AddToInactive(current); + } + } + } + + while (!unhandled_live_ranges_.is_empty()) { + DCHECK(UnhandledIsSorted()); + LiveRange* current = unhandled_live_ranges_.RemoveLast(); + DCHECK(UnhandledIsSorted()); + LifetimePosition position = current->Start(); +#ifdef DEBUG + allocation_finger_ = position; +#endif + TraceAlloc("Processing interval %d start=%d\n", current->id(), + position.Value()); + + if (current->HasAllocatedSpillOperand()) { + TraceAlloc("Live range %d already has a spill operand\n", current->id()); + LifetimePosition next_pos = position; + if (code()->IsGapAt(next_pos.InstructionIndex())) { + next_pos = next_pos.NextInstruction(); + } + UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos); + // If the range already has a spill operand and it doesn't need a + // register immediately, split it and spill the first part of the range. + if (pos == NULL) { + Spill(current); + continue; + } else if (pos->pos().Value() > + current->Start().NextInstruction().Value()) { + // Do not spill live range eagerly if use position that can benefit from + // the register is too close to the start of live range. + SpillBetween(current, current->Start(), pos->pos()); + if (!AllocationOk()) return; + DCHECK(UnhandledIsSorted()); + continue; + } + } + + for (int i = 0; i < active_live_ranges_.length(); ++i) { + LiveRange* cur_active = active_live_ranges_.at(i); + if (cur_active->End().Value() <= position.Value()) { + ActiveToHandled(cur_active); + --i; // The live range was removed from the list of active live ranges. + } else if (!cur_active->Covers(position)) { + ActiveToInactive(cur_active); + --i; // The live range was removed from the list of active live ranges. + } + } + + for (int i = 0; i < inactive_live_ranges_.length(); ++i) { + LiveRange* cur_inactive = inactive_live_ranges_.at(i); + if (cur_inactive->End().Value() <= position.Value()) { + InactiveToHandled(cur_inactive); + --i; // Live range was removed from the list of inactive live ranges. + } else if (cur_inactive->Covers(position)) { + InactiveToActive(cur_inactive); + --i; // Live range was removed from the list of inactive live ranges. + } + } + + DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled()); + + bool result = TryAllocateFreeReg(current); + if (!AllocationOk()) return; + + if (!result) AllocateBlockedReg(current); + if (!AllocationOk()) return; + + if (current->HasRegisterAssigned()) { + AddToActive(current); + } + } + + reusable_slots_.Rewind(0); + active_live_ranges_.Rewind(0); + inactive_live_ranges_.Rewind(0); +} + + +const char* RegisterAllocator::RegisterName(int allocation_index) { + if (mode_ == GENERAL_REGISTERS) { + return Register::AllocationIndexToString(allocation_index); + } else { + return DoubleRegister::AllocationIndexToString(allocation_index); + } +} + + +void RegisterAllocator::TraceAlloc(const char* msg, ...) { + if (FLAG_trace_alloc) { + va_list arguments; + va_start(arguments, msg); + base::OS::VPrint(msg, arguments); + va_end(arguments); + } +} + + +bool RegisterAllocator::HasTaggedValue(int virtual_register) const { + return code()->IsReference(virtual_register); +} + + +RegisterKind RegisterAllocator::RequiredRegisterKind( + int virtual_register) const { + return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS + : GENERAL_REGISTERS; +} + + +void RegisterAllocator::AddToActive(LiveRange* range) { + TraceAlloc("Add live range %d to active\n", range->id()); + active_live_ranges_.Add(range, zone()); +} + + +void RegisterAllocator::AddToInactive(LiveRange* range) { + TraceAlloc("Add live range %d to inactive\n", range->id()); + inactive_live_ranges_.Add(range, zone()); +} + + +void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) { + if (range == NULL || range->IsEmpty()) return; + DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); + DCHECK(allocation_finger_.Value() <= range->Start().Value()); + for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) { + LiveRange* cur_range = unhandled_live_ranges_.at(i); + if (range->ShouldBeAllocatedBefore(cur_range)) { + TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1); + unhandled_live_ranges_.InsertAt(i + 1, range, zone()); + DCHECK(UnhandledIsSorted()); + return; + } + } + TraceAlloc("Add live range %d to unhandled at start\n", range->id()); + unhandled_live_ranges_.InsertAt(0, range, zone()); + DCHECK(UnhandledIsSorted()); +} + + +void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) { + if (range == NULL || range->IsEmpty()) return; + DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); + TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id()); + unhandled_live_ranges_.Add(range, zone()); +} + + +static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) { + DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) || + !(*b)->ShouldBeAllocatedBefore(*a)); + if ((*a)->ShouldBeAllocatedBefore(*b)) return 1; + if ((*b)->ShouldBeAllocatedBefore(*a)) return -1; + return (*a)->id() - (*b)->id(); +} + + +// Sort the unhandled live ranges so that the ranges to be processed first are +// at the end of the array list. This is convenient for the register allocation +// algorithm because it is efficient to remove elements from the end. +void RegisterAllocator::SortUnhandled() { + TraceAlloc("Sort unhandled\n"); + unhandled_live_ranges_.Sort(&UnhandledSortHelper); +} + + +bool RegisterAllocator::UnhandledIsSorted() { + int len = unhandled_live_ranges_.length(); + for (int i = 1; i < len; i++) { + LiveRange* a = unhandled_live_ranges_.at(i - 1); + LiveRange* b = unhandled_live_ranges_.at(i); + if (a->Start().Value() < b->Start().Value()) return false; + } + return true; +} + + +void RegisterAllocator::FreeSpillSlot(LiveRange* range) { + // Check that we are the last range. + if (range->next() != NULL) return; + + if (!range->TopLevel()->HasAllocatedSpillOperand()) return; + + InstructionOperand* spill_operand = range->TopLevel()->GetSpillOperand(); + if (spill_operand->IsConstant()) return; + if (spill_operand->index() >= 0) { + reusable_slots_.Add(range, zone()); + } +} + + +InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) { + if (reusable_slots_.is_empty()) return NULL; + if (reusable_slots_.first()->End().Value() > + range->TopLevel()->Start().Value()) { + return NULL; + } + InstructionOperand* result = + reusable_slots_.first()->TopLevel()->GetSpillOperand(); + reusable_slots_.Remove(0); + return result; +} + + +void RegisterAllocator::ActiveToHandled(LiveRange* range) { + DCHECK(active_live_ranges_.Contains(range)); + active_live_ranges_.RemoveElement(range); + TraceAlloc("Moving live range %d from active to handled\n", range->id()); + FreeSpillSlot(range); +} + + +void RegisterAllocator::ActiveToInactive(LiveRange* range) { + DCHECK(active_live_ranges_.Contains(range)); + active_live_ranges_.RemoveElement(range); + inactive_live_ranges_.Add(range, zone()); + TraceAlloc("Moving live range %d from active to inactive\n", range->id()); +} + + +void RegisterAllocator::InactiveToHandled(LiveRange* range) { + DCHECK(inactive_live_ranges_.Contains(range)); + inactive_live_ranges_.RemoveElement(range); + TraceAlloc("Moving live range %d from inactive to handled\n", range->id()); + FreeSpillSlot(range); +} + + +void RegisterAllocator::InactiveToActive(LiveRange* range) { + DCHECK(inactive_live_ranges_.Contains(range)); + inactive_live_ranges_.RemoveElement(range); + active_live_ranges_.Add(range, zone()); + TraceAlloc("Moving live range %d from inactive to active\n", range->id()); +} + + +// TryAllocateFreeReg and AllocateBlockedReg assume this +// when allocating local arrays. +STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >= + Register::kMaxNumAllocatableRegisters); + + +bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) { + LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters]; + + for (int i = 0; i < num_registers_; i++) { + free_until_pos[i] = LifetimePosition::MaxPosition(); + } + + for (int i = 0; i < active_live_ranges_.length(); ++i) { + LiveRange* cur_active = active_live_ranges_.at(i); + free_until_pos[cur_active->assigned_register()] = + LifetimePosition::FromInstructionIndex(0); + } + + for (int i = 0; i < inactive_live_ranges_.length(); ++i) { + LiveRange* cur_inactive = inactive_live_ranges_.at(i); + DCHECK(cur_inactive->End().Value() > current->Start().Value()); + LifetimePosition next_intersection = + cur_inactive->FirstIntersection(current); + if (!next_intersection.IsValid()) continue; + int cur_reg = cur_inactive->assigned_register(); + free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection); + } + + InstructionOperand* hint = current->FirstHint(); + if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) { + int register_index = hint->index(); + TraceAlloc( + "Found reg hint %s (free until [%d) for live range %d (end %d[).\n", + RegisterName(register_index), free_until_pos[register_index].Value(), + current->id(), current->End().Value()); + + // The desired register is free until the end of the current live range. + if (free_until_pos[register_index].Value() >= current->End().Value()) { + TraceAlloc("Assigning preferred reg %s to live range %d\n", + RegisterName(register_index), current->id()); + SetLiveRangeAssignedRegister(current, register_index); + return true; + } + } + + // Find the register which stays free for the longest time. + int reg = 0; + for (int i = 1; i < RegisterCount(); ++i) { + if (free_until_pos[i].Value() > free_until_pos[reg].Value()) { + reg = i; + } + } + + LifetimePosition pos = free_until_pos[reg]; + + if (pos.Value() <= current->Start().Value()) { + // All registers are blocked. + return false; + } + + if (pos.Value() < current->End().Value()) { + // Register reg is available at the range start but becomes blocked before + // the range end. Split current at position where it becomes blocked. + LiveRange* tail = SplitRangeAt(current, pos); + if (!AllocationOk()) return false; + AddToUnhandledSorted(tail); + } + + + // Register reg is available at the range start and is free until + // the range end. + DCHECK(pos.Value() >= current->End().Value()); + TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg), + current->id()); + SetLiveRangeAssignedRegister(current, reg); + + return true; +} + + +void RegisterAllocator::AllocateBlockedReg(LiveRange* current) { + UsePosition* register_use = current->NextRegisterPosition(current->Start()); + if (register_use == NULL) { + // There is no use in the current live range that requires a register. + // We can just spill it. + Spill(current); + return; + } + + + LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters]; + LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters]; + + for (int i = 0; i < num_registers_; i++) { + use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition(); + } + + for (int i = 0; i < active_live_ranges_.length(); ++i) { + LiveRange* range = active_live_ranges_[i]; + int cur_reg = range->assigned_register(); + if (range->IsFixed() || !range->CanBeSpilled(current->Start())) { + block_pos[cur_reg] = use_pos[cur_reg] = + LifetimePosition::FromInstructionIndex(0); + } else { + UsePosition* next_use = + range->NextUsePositionRegisterIsBeneficial(current->Start()); + if (next_use == NULL) { + use_pos[cur_reg] = range->End(); + } else { + use_pos[cur_reg] = next_use->pos(); + } + } + } + + for (int i = 0; i < inactive_live_ranges_.length(); ++i) { + LiveRange* range = inactive_live_ranges_.at(i); + DCHECK(range->End().Value() > current->Start().Value()); + LifetimePosition next_intersection = range->FirstIntersection(current); + if (!next_intersection.IsValid()) continue; + int cur_reg = range->assigned_register(); + if (range->IsFixed()) { + block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection); + use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]); + } else { + use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection); + } + } + + int reg = 0; + for (int i = 1; i < RegisterCount(); ++i) { + if (use_pos[i].Value() > use_pos[reg].Value()) { + reg = i; + } + } + + LifetimePosition pos = use_pos[reg]; + + if (pos.Value() < register_use->pos().Value()) { + // All registers are blocked before the first use that requires a register. + // Spill starting part of live range up to that use. + SpillBetween(current, current->Start(), register_use->pos()); + return; + } + + if (block_pos[reg].Value() < current->End().Value()) { + // Register becomes blocked before the current range end. Split before that + // position. + LiveRange* tail = SplitBetween(current, current->Start(), + block_pos[reg].InstructionStart()); + if (!AllocationOk()) return; + AddToUnhandledSorted(tail); + } + + // Register reg is not blocked for the whole range. + DCHECK(block_pos[reg].Value() >= current->End().Value()); + TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg), + current->id()); + SetLiveRangeAssignedRegister(current, reg); + + // This register was not free. Thus we need to find and spill + // parts of active and inactive live regions that use the same register + // at the same lifetime positions as current. + SplitAndSpillIntersecting(current); +} + + +LifetimePosition RegisterAllocator::FindOptimalSpillingPos( + LiveRange* range, LifetimePosition pos) { + BasicBlock* block = GetBlock(pos.InstructionStart()); + BasicBlock* loop_header = + block->IsLoopHeader() ? block : code()->GetContainingLoop(block); + + if (loop_header == NULL) return pos; + + UsePosition* prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos); + + while (loop_header != NULL) { + // We are going to spill live range inside the loop. + // If possible try to move spilling position backwards to loop header. + // This will reduce number of memory moves on the back edge. + LifetimePosition loop_start = LifetimePosition::FromInstructionIndex( + loop_header->first_instruction_index()); + + if (range->Covers(loop_start)) { + if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) { + // No register beneficial use inside the loop before the pos. + pos = loop_start; + } + } + + // Try hoisting out to an outer loop. + loop_header = code()->GetContainingLoop(loop_header); + } + + return pos; +} + + +void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) { + DCHECK(current->HasRegisterAssigned()); + int reg = current->assigned_register(); + LifetimePosition split_pos = current->Start(); + for (int i = 0; i < active_live_ranges_.length(); ++i) { + LiveRange* range = active_live_ranges_[i]; + if (range->assigned_register() == reg) { + UsePosition* next_pos = range->NextRegisterPosition(current->Start()); + LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos); + if (next_pos == NULL) { + SpillAfter(range, spill_pos); + } else { + // When spilling between spill_pos and next_pos ensure that the range + // remains spilled at least until the start of the current live range. + // This guarantees that we will not introduce new unhandled ranges that + // start before the current range as this violates allocation invariant + // and will lead to an inconsistent state of active and inactive + // live-ranges: ranges are allocated in order of their start positions, + // ranges are retired from active/inactive when the start of the + // current live-range is larger than their end. + SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos()); + } + if (!AllocationOk()) return; + ActiveToHandled(range); + --i; + } + } + + for (int i = 0; i < inactive_live_ranges_.length(); ++i) { + LiveRange* range = inactive_live_ranges_[i]; + DCHECK(range->End().Value() > current->Start().Value()); + if (range->assigned_register() == reg && !range->IsFixed()) { + LifetimePosition next_intersection = range->FirstIntersection(current); + if (next_intersection.IsValid()) { + UsePosition* next_pos = range->NextRegisterPosition(current->Start()); + if (next_pos == NULL) { + SpillAfter(range, split_pos); + } else { + next_intersection = Min(next_intersection, next_pos->pos()); + SpillBetween(range, split_pos, next_intersection); + } + if (!AllocationOk()) return; + InactiveToHandled(range); + --i; + } + } + } +} + + +bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) { + return pos.IsInstructionStart() && + InstructionAt(pos.InstructionIndex())->IsBlockStart(); +} + + +LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range, + LifetimePosition pos) { + DCHECK(!range->IsFixed()); + TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value()); + + if (pos.Value() <= range->Start().Value()) return range; + + // We can't properly connect liveranges if split occured at the end + // of control instruction. + DCHECK(pos.IsInstructionStart() || + !InstructionAt(pos.InstructionIndex())->IsControl()); + + int vreg = GetVirtualRegister(); + if (!AllocationOk()) return NULL; + LiveRange* result = LiveRangeFor(vreg); + range->SplitAt(pos, result, zone()); + return result; +} + + +LiveRange* RegisterAllocator::SplitBetween(LiveRange* range, + LifetimePosition start, + LifetimePosition end) { + DCHECK(!range->IsFixed()); + TraceAlloc("Splitting live range %d in position between [%d, %d]\n", + range->id(), start.Value(), end.Value()); + + LifetimePosition split_pos = FindOptimalSplitPos(start, end); + DCHECK(split_pos.Value() >= start.Value()); + return SplitRangeAt(range, split_pos); +} + + +LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start, + LifetimePosition end) { + int start_instr = start.InstructionIndex(); + int end_instr = end.InstructionIndex(); + DCHECK(start_instr <= end_instr); + + // We have no choice + if (start_instr == end_instr) return end; + + BasicBlock* start_block = GetBlock(start); + BasicBlock* end_block = GetBlock(end); + + if (end_block == start_block) { + // The interval is split in the same basic block. Split at the latest + // possible position. + return end; + } + + BasicBlock* block = end_block; + // Find header of outermost loop. + // TODO(titzer): fix redundancy below. + while (code()->GetContainingLoop(block) != NULL && + code()->GetContainingLoop(block)->rpo_number_ > + start_block->rpo_number_) { + block = code()->GetContainingLoop(block); + } + + // We did not find any suitable outer loop. Split at the latest possible + // position unless end_block is a loop header itself. + if (block == end_block && !end_block->IsLoopHeader()) return end; + + return LifetimePosition::FromInstructionIndex( + block->first_instruction_index()); +} + + +void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) { + LiveRange* second_part = SplitRangeAt(range, pos); + if (!AllocationOk()) return; + Spill(second_part); +} + + +void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start, + LifetimePosition end) { + SpillBetweenUntil(range, start, start, end); +} + + +void RegisterAllocator::SpillBetweenUntil(LiveRange* range, + LifetimePosition start, + LifetimePosition until, + LifetimePosition end) { + CHECK(start.Value() < end.Value()); + LiveRange* second_part = SplitRangeAt(range, start); + if (!AllocationOk()) return; + + if (second_part->Start().Value() < end.Value()) { + // The split result intersects with [start, end[. + // Split it at position between ]start+1, end[, spill the middle part + // and put the rest to unhandled. + LiveRange* third_part = SplitBetween( + second_part, Max(second_part->Start().InstructionEnd(), until), + end.PrevInstruction().InstructionEnd()); + if (!AllocationOk()) return; + + DCHECK(third_part != second_part); + + Spill(second_part); + AddToUnhandledSorted(third_part); + } else { + // The split result does not intersect with [start, end[. + // Nothing to spill. Just put it to unhandled as whole. + AddToUnhandledSorted(second_part); + } +} + + +void RegisterAllocator::Spill(LiveRange* range) { + DCHECK(!range->IsSpilled()); + TraceAlloc("Spilling live range %d\n", range->id()); + LiveRange* first = range->TopLevel(); + + if (!first->HasAllocatedSpillOperand()) { + InstructionOperand* op = TryReuseSpillSlot(range); + if (op == NULL) { + // Allocate a new operand referring to the spill slot. + RegisterKind kind = range->Kind(); + int index = code()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS); + if (kind == DOUBLE_REGISTERS) { + op = DoubleStackSlotOperand::Create(index, zone()); + } else { + DCHECK(kind == GENERAL_REGISTERS); + op = StackSlotOperand::Create(index, zone()); + } + } + first->SetSpillOperand(op); + } + range->MakeSpilled(code_zone()); +} + + +int RegisterAllocator::RegisterCount() const { return num_registers_; } + + +#ifdef DEBUG + + +void RegisterAllocator::Verify() const { + for (int i = 0; i < live_ranges()->length(); ++i) { + LiveRange* current = live_ranges()->at(i); + if (current != NULL) current->Verify(); + } +} + + +#endif + + +void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range, + int reg) { + if (range->Kind() == DOUBLE_REGISTERS) { + assigned_double_registers_->Add(reg); + } else { + DCHECK(range->Kind() == GENERAL_REGISTERS); + assigned_registers_->Add(reg); + } + range->set_assigned_register(reg, code_zone()); +} + + +RegisterAllocatorPhase::RegisterAllocatorPhase(const char* name, + RegisterAllocator* allocator) + : CompilationPhase(name, allocator->code()->linkage()->info()), + allocator_(allocator) { + if (FLAG_turbo_stats) { + allocator_zone_start_allocation_size_ = + allocator->zone()->allocation_size(); + } +} + + +RegisterAllocatorPhase::~RegisterAllocatorPhase() { + if (FLAG_turbo_stats) { + unsigned size = allocator_->zone()->allocation_size() - + allocator_zone_start_allocation_size_; + isolate()->GetTStatistics()->SaveTiming(name(), base::TimeDelta(), size); + } +#ifdef DEBUG + if (allocator_ != NULL) allocator_->Verify(); +#endif +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/register-allocator.h nodejs-0.11.15/deps/v8/src/compiler/register-allocator.h --- nodejs-0.11.13/deps/v8/src/compiler/register-allocator.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/register-allocator.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,548 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_REGISTER_ALLOCATOR_H_ +#define V8_REGISTER_ALLOCATOR_H_ + +#include "src/allocation.h" +#include "src/compiler/instruction.h" +#include "src/compiler/node.h" +#include "src/compiler/schedule.h" +#include "src/macro-assembler.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { + +// Forward declarations. +class BitVector; +class InstructionOperand; +class UnallocatedOperand; +class ParallelMove; +class PointerMap; + +namespace compiler { + +enum RegisterKind { + UNALLOCATED_REGISTERS, + GENERAL_REGISTERS, + DOUBLE_REGISTERS +}; + + +// This class represents a single point of a InstructionOperand's lifetime. For +// each instruction there are exactly two lifetime positions: the beginning and +// the end of the instruction. Lifetime positions for different instructions are +// disjoint. +class LifetimePosition { + public: + // Return the lifetime position that corresponds to the beginning of + // the instruction with the given index. + static LifetimePosition FromInstructionIndex(int index) { + return LifetimePosition(index * kStep); + } + + // Returns a numeric representation of this lifetime position. + int Value() const { return value_; } + + // Returns the index of the instruction to which this lifetime position + // corresponds. + int InstructionIndex() const { + DCHECK(IsValid()); + return value_ / kStep; + } + + // Returns true if this lifetime position corresponds to the instruction + // start. + bool IsInstructionStart() const { return (value_ & (kStep - 1)) == 0; } + + // Returns the lifetime position for the start of the instruction which + // corresponds to this lifetime position. + LifetimePosition InstructionStart() const { + DCHECK(IsValid()); + return LifetimePosition(value_ & ~(kStep - 1)); + } + + // Returns the lifetime position for the end of the instruction which + // corresponds to this lifetime position. + LifetimePosition InstructionEnd() const { + DCHECK(IsValid()); + return LifetimePosition(InstructionStart().Value() + kStep / 2); + } + + // Returns the lifetime position for the beginning of the next instruction. + LifetimePosition NextInstruction() const { + DCHECK(IsValid()); + return LifetimePosition(InstructionStart().Value() + kStep); + } + + // Returns the lifetime position for the beginning of the previous + // instruction. + LifetimePosition PrevInstruction() const { + DCHECK(IsValid()); + DCHECK(value_ > 1); + return LifetimePosition(InstructionStart().Value() - kStep); + } + + // Constructs the lifetime position which does not correspond to any + // instruction. + LifetimePosition() : value_(-1) {} + + // Returns true if this lifetime positions corrensponds to some + // instruction. + bool IsValid() const { return value_ != -1; } + + static inline LifetimePosition Invalid() { return LifetimePosition(); } + + static inline LifetimePosition MaxPosition() { + // We have to use this kind of getter instead of static member due to + // crash bug in GDB. + return LifetimePosition(kMaxInt); + } + + private: + static const int kStep = 2; + + // Code relies on kStep being a power of two. + STATIC_ASSERT(IS_POWER_OF_TWO(kStep)); + + explicit LifetimePosition(int value) : value_(value) {} + + int value_; +}; + + +// Representation of the non-empty interval [start,end[. +class UseInterval : public ZoneObject { + public: + UseInterval(LifetimePosition start, LifetimePosition end) + : start_(start), end_(end), next_(NULL) { + DCHECK(start.Value() < end.Value()); + } + + LifetimePosition start() const { return start_; } + LifetimePosition end() const { return end_; } + UseInterval* next() const { return next_; } + + // Split this interval at the given position without effecting the + // live range that owns it. The interval must contain the position. + void SplitAt(LifetimePosition pos, Zone* zone); + + // If this interval intersects with other return smallest position + // that belongs to both of them. + LifetimePosition Intersect(const UseInterval* other) const { + if (other->start().Value() < start_.Value()) return other->Intersect(this); + if (other->start().Value() < end_.Value()) return other->start(); + return LifetimePosition::Invalid(); + } + + bool Contains(LifetimePosition point) const { + return start_.Value() <= point.Value() && point.Value() < end_.Value(); + } + + void set_start(LifetimePosition start) { start_ = start; } + void set_next(UseInterval* next) { next_ = next; } + + LifetimePosition start_; + LifetimePosition end_; + UseInterval* next_; +}; + +// Representation of a use position. +class UsePosition : public ZoneObject { + public: + UsePosition(LifetimePosition pos, InstructionOperand* operand, + InstructionOperand* hint); + + InstructionOperand* operand() const { return operand_; } + bool HasOperand() const { return operand_ != NULL; } + + InstructionOperand* hint() const { return hint_; } + bool HasHint() const; + bool RequiresRegister() const; + bool RegisterIsBeneficial() const; + + LifetimePosition pos() const { return pos_; } + UsePosition* next() const { return next_; } + + void set_next(UsePosition* next) { next_ = next; } + + InstructionOperand* const operand_; + InstructionOperand* const hint_; + LifetimePosition const pos_; + UsePosition* next_; + bool requires_reg_; + bool register_beneficial_; +}; + +// Representation of SSA values' live ranges as a collection of (continuous) +// intervals over the instruction ordering. +class LiveRange : public ZoneObject { + public: + static const int kInvalidAssignment = 0x7fffffff; + + LiveRange(int id, Zone* zone); + + UseInterval* first_interval() const { return first_interval_; } + UsePosition* first_pos() const { return first_pos_; } + LiveRange* parent() const { return parent_; } + LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; } + LiveRange* next() const { return next_; } + bool IsChild() const { return parent() != NULL; } + int id() const { return id_; } + bool IsFixed() const { return id_ < 0; } + bool IsEmpty() const { return first_interval() == NULL; } + InstructionOperand* CreateAssignedOperand(Zone* zone); + int assigned_register() const { return assigned_register_; } + int spill_start_index() const { return spill_start_index_; } + void set_assigned_register(int reg, Zone* zone); + void MakeSpilled(Zone* zone); + bool is_phi() const { return is_phi_; } + void set_is_phi(bool is_phi) { is_phi_ = is_phi; } + bool is_non_loop_phi() const { return is_non_loop_phi_; } + void set_is_non_loop_phi(bool is_non_loop_phi) { + is_non_loop_phi_ = is_non_loop_phi; + } + + // Returns use position in this live range that follows both start + // and last processed use position. + // Modifies internal state of live range! + UsePosition* NextUsePosition(LifetimePosition start); + + // Returns use position for which register is required in this live + // range and which follows both start and last processed use position + // Modifies internal state of live range! + UsePosition* NextRegisterPosition(LifetimePosition start); + + // Returns use position for which register is beneficial in this live + // range and which follows both start and last processed use position + // Modifies internal state of live range! + UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start); + + // Returns use position for which register is beneficial in this live + // range and which precedes start. + UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start); + + // Can this live range be spilled at this position. + bool CanBeSpilled(LifetimePosition pos); + + // Split this live range at the given position which must follow the start of + // the range. + // All uses following the given position will be moved from this + // live range to the result live range. + void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone); + + RegisterKind Kind() const { return kind_; } + bool HasRegisterAssigned() const { + return assigned_register_ != kInvalidAssignment; + } + bool IsSpilled() const { return spilled_; } + + InstructionOperand* current_hint_operand() const { + DCHECK(current_hint_operand_ == FirstHint()); + return current_hint_operand_; + } + InstructionOperand* FirstHint() const { + UsePosition* pos = first_pos_; + while (pos != NULL && !pos->HasHint()) pos = pos->next(); + if (pos != NULL) return pos->hint(); + return NULL; + } + + LifetimePosition Start() const { + DCHECK(!IsEmpty()); + return first_interval()->start(); + } + + LifetimePosition End() const { + DCHECK(!IsEmpty()); + return last_interval_->end(); + } + + bool HasAllocatedSpillOperand() const; + InstructionOperand* GetSpillOperand() const { return spill_operand_; } + void SetSpillOperand(InstructionOperand* operand); + + void SetSpillStartIndex(int start) { + spill_start_index_ = Min(start, spill_start_index_); + } + + bool ShouldBeAllocatedBefore(const LiveRange* other) const; + bool CanCover(LifetimePosition position) const; + bool Covers(LifetimePosition position); + LifetimePosition FirstIntersection(LiveRange* other); + + // Add a new interval or a new use position to this live range. + void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone); + void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone); + void AddUsePosition(LifetimePosition pos, InstructionOperand* operand, + InstructionOperand* hint, Zone* zone); + + // Shorten the most recently added interval by setting a new start. + void ShortenTo(LifetimePosition start); + +#ifdef DEBUG + // True if target overlaps an existing interval. + bool HasOverlap(UseInterval* target) const; + void Verify() const; +#endif + + private: + void ConvertOperands(Zone* zone); + UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const; + void AdvanceLastProcessedMarker(UseInterval* to_start_of, + LifetimePosition but_not_past) const; + + int id_; + bool spilled_; + bool is_phi_; + bool is_non_loop_phi_; + RegisterKind kind_; + int assigned_register_; + UseInterval* last_interval_; + UseInterval* first_interval_; + UsePosition* first_pos_; + LiveRange* parent_; + LiveRange* next_; + // This is used as a cache, it doesn't affect correctness. + mutable UseInterval* current_interval_; + UsePosition* last_processed_use_; + // This is used as a cache, it's invalid outside of BuildLiveRanges. + InstructionOperand* current_hint_operand_; + InstructionOperand* spill_operand_; + int spill_start_index_; + + friend class RegisterAllocator; // Assigns to kind_. +}; + + +class RegisterAllocator BASE_EMBEDDED { + public: + explicit RegisterAllocator(InstructionSequence* code); + + static void TraceAlloc(const char* msg, ...); + + // Checks whether the value of a given virtual register is a reference. + // TODO(titzer): rename this to IsReference. + bool HasTaggedValue(int virtual_register) const; + + // Returns the register kind required by the given virtual register. + RegisterKind RequiredRegisterKind(int virtual_register) const; + + bool Allocate(); + + const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; } + const Vector<LiveRange*>* fixed_live_ranges() const { + return &fixed_live_ranges_; + } + const Vector<LiveRange*>* fixed_double_live_ranges() const { + return &fixed_double_live_ranges_; + } + + inline InstructionSequence* code() const { return code_; } + + // This zone is for datastructures only needed during register allocation. + inline Zone* zone() { return &zone_; } + + // This zone is for InstructionOperands and moves that live beyond register + // allocation. + inline Zone* code_zone() { return code()->zone(); } + + int GetVirtualRegister() { + int vreg = code()->NextVirtualRegister(); + if (vreg >= UnallocatedOperand::kMaxVirtualRegisters) { + allocation_ok_ = false; + // Maintain the invariant that we return something below the maximum. + return 0; + } + return vreg; + } + + bool AllocationOk() { return allocation_ok_; } + +#ifdef DEBUG + void Verify() const; +#endif + + BitVector* assigned_registers() { return assigned_registers_; } + BitVector* assigned_double_registers() { return assigned_double_registers_; } + + private: + void MeetRegisterConstraints(); + void ResolvePhis(); + void BuildLiveRanges(); + void AllocateGeneralRegisters(); + void AllocateDoubleRegisters(); + void ConnectRanges(); + void ResolveControlFlow(); + void PopulatePointerMaps(); // TODO(titzer): rename to PopulateReferenceMaps. + void AllocateRegisters(); + bool CanEagerlyResolveControlFlow(BasicBlock* block) const; + inline bool SafePointsAreInOrder() const; + + // Liveness analysis support. + void InitializeLivenessAnalysis(); + BitVector* ComputeLiveOut(BasicBlock* block); + void AddInitialIntervals(BasicBlock* block, BitVector* live_out); + bool IsOutputRegisterOf(Instruction* instr, int index); + bool IsOutputDoubleRegisterOf(Instruction* instr, int index); + void ProcessInstructions(BasicBlock* block, BitVector* live); + void MeetRegisterConstraints(BasicBlock* block); + void MeetConstraintsBetween(Instruction* first, Instruction* second, + int gap_index); + void MeetRegisterConstraintsForLastInstructionInBlock(BasicBlock* block); + void ResolvePhis(BasicBlock* block); + + // Helper methods for building intervals. + InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos, + bool is_tagged); + LiveRange* LiveRangeFor(InstructionOperand* operand); + void Define(LifetimePosition position, InstructionOperand* operand, + InstructionOperand* hint); + void Use(LifetimePosition block_start, LifetimePosition position, + InstructionOperand* operand, InstructionOperand* hint); + void AddConstraintsGapMove(int index, InstructionOperand* from, + InstructionOperand* to); + + // Helper methods for updating the life range lists. + void AddToActive(LiveRange* range); + void AddToInactive(LiveRange* range); + void AddToUnhandledSorted(LiveRange* range); + void AddToUnhandledUnsorted(LiveRange* range); + void SortUnhandled(); + bool UnhandledIsSorted(); + void ActiveToHandled(LiveRange* range); + void ActiveToInactive(LiveRange* range); + void InactiveToHandled(LiveRange* range); + void InactiveToActive(LiveRange* range); + void FreeSpillSlot(LiveRange* range); + InstructionOperand* TryReuseSpillSlot(LiveRange* range); + + // Helper methods for allocating registers. + bool TryAllocateFreeReg(LiveRange* range); + void AllocateBlockedReg(LiveRange* range); + + // Live range splitting helpers. + + // Split the given range at the given position. + // If range starts at or after the given position then the + // original range is returned. + // Otherwise returns the live range that starts at pos and contains + // all uses from the original range that follow pos. Uses at pos will + // still be owned by the original range after splitting. + LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos); + + // Split the given range in a position from the interval [start, end]. + LiveRange* SplitBetween(LiveRange* range, LifetimePosition start, + LifetimePosition end); + + // Find a lifetime position in the interval [start, end] which + // is optimal for splitting: it is either header of the outermost + // loop covered by this interval or the latest possible position. + LifetimePosition FindOptimalSplitPos(LifetimePosition start, + LifetimePosition end); + + // Spill the given life range after position pos. + void SpillAfter(LiveRange* range, LifetimePosition pos); + + // Spill the given life range after position [start] and up to position [end]. + void SpillBetween(LiveRange* range, LifetimePosition start, + LifetimePosition end); + + // Spill the given life range after position [start] and up to position [end]. + // Range is guaranteed to be spilled at least until position [until]. + void SpillBetweenUntil(LiveRange* range, LifetimePosition start, + LifetimePosition until, LifetimePosition end); + + void SplitAndSpillIntersecting(LiveRange* range); + + // If we are trying to spill a range inside the loop try to + // hoist spill position out to the point just before the loop. + LifetimePosition FindOptimalSpillingPos(LiveRange* range, + LifetimePosition pos); + + void Spill(LiveRange* range); + bool IsBlockBoundary(LifetimePosition pos); + + // Helper methods for resolving control flow. + void ResolveControlFlow(LiveRange* range, BasicBlock* block, + BasicBlock* pred); + + inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg); + + // Return parallel move that should be used to connect ranges split at the + // given position. + ParallelMove* GetConnectingParallelMove(LifetimePosition pos); + + // Return the block which contains give lifetime position. + BasicBlock* GetBlock(LifetimePosition pos); + + // Helper methods for the fixed registers. + int RegisterCount() const; + static int FixedLiveRangeID(int index) { return -index - 1; } + static int FixedDoubleLiveRangeID(int index); + LiveRange* FixedLiveRangeFor(int index); + LiveRange* FixedDoubleLiveRangeFor(int index); + LiveRange* LiveRangeFor(int index); + GapInstruction* GetLastGap(BasicBlock* block); + + const char* RegisterName(int allocation_index); + + inline Instruction* InstructionAt(int index) { + return code()->InstructionAt(index); + } + + Zone zone_; + InstructionSequence* code_; + + // During liveness analysis keep a mapping from block id to live_in sets + // for blocks already analyzed. + ZoneList<BitVector*> live_in_sets_; + + // Liveness analysis results. + ZoneList<LiveRange*> live_ranges_; + + // Lists of live ranges + EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters> + fixed_live_ranges_; + EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters> + fixed_double_live_ranges_; + ZoneList<LiveRange*> unhandled_live_ranges_; + ZoneList<LiveRange*> active_live_ranges_; + ZoneList<LiveRange*> inactive_live_ranges_; + ZoneList<LiveRange*> reusable_slots_; + + RegisterKind mode_; + int num_registers_; + + BitVector* assigned_registers_; + BitVector* assigned_double_registers_; + + // Indicates success or failure during register allocation. + bool allocation_ok_; + +#ifdef DEBUG + LifetimePosition allocation_finger_; +#endif + + DISALLOW_COPY_AND_ASSIGN(RegisterAllocator); +}; + + +class RegisterAllocatorPhase : public CompilationPhase { + public: + RegisterAllocatorPhase(const char* name, RegisterAllocator* allocator); + ~RegisterAllocatorPhase(); + + private: + RegisterAllocator* allocator_; + unsigned allocator_zone_start_allocation_size_; + + DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorPhase); +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_REGISTER_ALLOCATOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/representation-change.h nodejs-0.11.15/deps/v8/src/compiler/representation-change.h --- nodejs-0.11.13/deps/v8/src/compiler/representation-change.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/representation-change.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,411 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_ +#define V8_COMPILER_REPRESENTATION_CHANGE_H_ + +#include "src/compiler/js-graph.h" +#include "src/compiler/machine-operator.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/simplified-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// The types and representations tracked during representation inference +// and change insertion. +// TODO(titzer): First, merge MachineType and RepType. +// TODO(titzer): Second, Use the real type system instead of RepType. +enum RepType { + // Representations. + rBit = 1 << 0, + rWord32 = 1 << 1, + rWord64 = 1 << 2, + rFloat64 = 1 << 3, + rTagged = 1 << 4, + + // Types. + tBool = 1 << 5, + tInt32 = 1 << 6, + tUint32 = 1 << 7, + tInt64 = 1 << 8, + tUint64 = 1 << 9, + tNumber = 1 << 10, + tAny = 1 << 11 +}; + +#define REP_TYPE_STRLEN 24 + +typedef uint16_t RepTypeUnion; + + +inline void RenderRepTypeUnion(char* buf, RepTypeUnion info) { + base::OS::SNPrintF(buf, REP_TYPE_STRLEN, "{%s%s%s%s%s %s%s%s%s%s%s%s}", + (info & rBit) ? "k" : " ", (info & rWord32) ? "w" : " ", + (info & rWord64) ? "q" : " ", + (info & rFloat64) ? "f" : " ", + (info & rTagged) ? "t" : " ", (info & tBool) ? "Z" : " ", + (info & tInt32) ? "I" : " ", (info & tUint32) ? "U" : " ", + (info & tInt64) ? "L" : " ", (info & tUint64) ? "J" : " ", + (info & tNumber) ? "N" : " ", (info & tAny) ? "*" : " "); +} + + +const RepTypeUnion rMask = rBit | rWord32 | rWord64 | rFloat64 | rTagged; +const RepTypeUnion tMask = + tBool | tInt32 | tUint32 | tInt64 | tUint64 | tNumber | tAny; +const RepType rPtr = kPointerSize == 4 ? rWord32 : rWord64; + +// Contains logic related to changing the representation of values for constants +// and other nodes, as well as lowering Simplified->Machine operators. +// Eagerly folds any representation changes for constants. +class RepresentationChanger { + public: + RepresentationChanger(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified, + MachineOperatorBuilder* machine, Isolate* isolate) + : jsgraph_(jsgraph), + simplified_(simplified), + machine_(machine), + isolate_(isolate), + testing_type_errors_(false), + type_error_(false) {} + + + Node* GetRepresentationFor(Node* node, RepTypeUnion output_type, + RepTypeUnion use_type) { + if (!IsPowerOf2(output_type & rMask)) { + // There should be only one output representation. + return TypeError(node, output_type, use_type); + } + if ((use_type & rMask) == (output_type & rMask)) { + // Representations are the same. That's a no-op. + return node; + } + if (use_type & rTagged) { + return GetTaggedRepresentationFor(node, output_type); + } else if (use_type & rFloat64) { + return GetFloat64RepresentationFor(node, output_type); + } else if (use_type & rWord32) { + return GetWord32RepresentationFor(node, output_type, use_type & tUint32); + } else if (use_type & rBit) { + return GetBitRepresentationFor(node, output_type); + } else if (use_type & rWord64) { + return GetWord64RepresentationFor(node, output_type); + } else { + return node; + } + } + + Node* GetTaggedRepresentationFor(Node* node, RepTypeUnion output_type) { + // Eagerly fold representation changes for constants. + switch (node->opcode()) { + case IrOpcode::kNumberConstant: + case IrOpcode::kHeapConstant: + return node; // No change necessary. + case IrOpcode::kInt32Constant: + if (output_type & tUint32) { + uint32_t value = ValueOf<uint32_t>(node->op()); + return jsgraph()->Constant(static_cast<double>(value)); + } else if (output_type & tInt32) { + int32_t value = ValueOf<int32_t>(node->op()); + return jsgraph()->Constant(value); + } else if (output_type & rBit) { + return ValueOf<int32_t>(node->op()) == 0 ? jsgraph()->FalseConstant() + : jsgraph()->TrueConstant(); + } else { + return TypeError(node, output_type, rTagged); + } + case IrOpcode::kFloat64Constant: + return jsgraph()->Constant(ValueOf<double>(node->op())); + default: + break; + } + // Select the correct X -> Tagged operator. + Operator* op; + if (output_type & rBit) { + op = simplified()->ChangeBitToBool(); + } else if (output_type & rWord32) { + if (output_type & tUint32) { + op = simplified()->ChangeUint32ToTagged(); + } else if (output_type & tInt32) { + op = simplified()->ChangeInt32ToTagged(); + } else { + return TypeError(node, output_type, rTagged); + } + } else if (output_type & rFloat64) { + op = simplified()->ChangeFloat64ToTagged(); + } else { + return TypeError(node, output_type, rTagged); + } + return jsgraph()->graph()->NewNode(op, node); + } + + Node* GetFloat64RepresentationFor(Node* node, RepTypeUnion output_type) { + // Eagerly fold representation changes for constants. + switch (node->opcode()) { + case IrOpcode::kNumberConstant: + return jsgraph()->Float64Constant(ValueOf<double>(node->op())); + case IrOpcode::kInt32Constant: + if (output_type & tUint32) { + uint32_t value = ValueOf<uint32_t>(node->op()); + return jsgraph()->Float64Constant(static_cast<double>(value)); + } else { + int32_t value = ValueOf<int32_t>(node->op()); + return jsgraph()->Float64Constant(value); + } + case IrOpcode::kFloat64Constant: + return node; // No change necessary. + default: + break; + } + // Select the correct X -> Float64 operator. + Operator* op; + if (output_type & rWord32) { + if (output_type & tUint32) { + op = machine()->ChangeUint32ToFloat64(); + } else { + op = machine()->ChangeInt32ToFloat64(); + } + } else if (output_type & rTagged) { + op = simplified()->ChangeTaggedToFloat64(); + } else { + return TypeError(node, output_type, rFloat64); + } + return jsgraph()->graph()->NewNode(op, node); + } + + Node* GetWord32RepresentationFor(Node* node, RepTypeUnion output_type, + bool use_unsigned) { + // Eagerly fold representation changes for constants. + switch (node->opcode()) { + case IrOpcode::kInt32Constant: + return node; // No change necessary. + case IrOpcode::kNumberConstant: + case IrOpcode::kFloat64Constant: { + double value = ValueOf<double>(node->op()); + if (value < 0) { + DCHECK(IsInt32Double(value)); + int32_t iv = static_cast<int32_t>(value); + return jsgraph()->Int32Constant(iv); + } else { + DCHECK(IsUint32Double(value)); + int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value)); + return jsgraph()->Int32Constant(iv); + } + } + default: + break; + } + // Select the correct X -> Word32 operator. + Operator* op = NULL; + if (output_type & rFloat64) { + if (output_type & tUint32 || use_unsigned) { + op = machine()->ChangeFloat64ToUint32(); + } else { + op = machine()->ChangeFloat64ToInt32(); + } + } else if (output_type & rTagged) { + if (output_type & tUint32 || use_unsigned) { + op = simplified()->ChangeTaggedToUint32(); + } else { + op = simplified()->ChangeTaggedToInt32(); + } + } else if (output_type & rBit) { + return node; // Sloppy comparison -> word32. + } else { + return TypeError(node, output_type, rWord32); + } + return jsgraph()->graph()->NewNode(op, node); + } + + Node* GetBitRepresentationFor(Node* node, RepTypeUnion output_type) { + // Eagerly fold representation changes for constants. + switch (node->opcode()) { + case IrOpcode::kInt32Constant: { + int32_t value = ValueOf<int32_t>(node->op()); + if (value == 0 || value == 1) return node; + return jsgraph()->OneConstant(); // value != 0 + } + case IrOpcode::kHeapConstant: { + Handle<Object> handle = ValueOf<Handle<Object> >(node->op()); + DCHECK(*handle == isolate()->heap()->true_value() || + *handle == isolate()->heap()->false_value()); + return jsgraph()->Int32Constant( + *handle == isolate()->heap()->true_value() ? 1 : 0); + } + default: + break; + } + // Select the correct X -> Bit operator. + Operator* op; + if (output_type & rWord32) { + return node; // No change necessary. + } else if (output_type & rWord64) { + return node; // TODO(titzer): No change necessary, on 64-bit. + } else if (output_type & rTagged) { + op = simplified()->ChangeBoolToBit(); + } else { + return TypeError(node, output_type, rBit); + } + return jsgraph()->graph()->NewNode(op, node); + } + + Node* GetWord64RepresentationFor(Node* node, RepTypeUnion output_type) { + if (output_type & rBit) { + return node; // Sloppy comparison -> word64 + } + // Can't really convert Word64 to anything else. Purported to be internal. + return TypeError(node, output_type, rWord64); + } + + static RepType TypeForMachineType(MachineType rep) { + // TODO(titzer): merge MachineType and RepType. + switch (rep) { + case kMachineWord8: + return rWord32; + case kMachineWord16: + return rWord32; + case kMachineWord32: + return rWord32; + case kMachineWord64: + return rWord64; + case kMachineFloat64: + return rFloat64; + case kMachineTagged: + return rTagged; + default: + UNREACHABLE(); + return static_cast<RepType>(0); + } + } + + Operator* Int32OperatorFor(IrOpcode::Value opcode) { + switch (opcode) { + case IrOpcode::kNumberAdd: + return machine()->Int32Add(); + case IrOpcode::kNumberSubtract: + return machine()->Int32Sub(); + case IrOpcode::kNumberEqual: + return machine()->Word32Equal(); + case IrOpcode::kNumberLessThan: + return machine()->Int32LessThan(); + case IrOpcode::kNumberLessThanOrEqual: + return machine()->Int32LessThanOrEqual(); + default: + UNREACHABLE(); + return NULL; + } + } + + Operator* Uint32OperatorFor(IrOpcode::Value opcode) { + switch (opcode) { + case IrOpcode::kNumberAdd: + return machine()->Int32Add(); + case IrOpcode::kNumberSubtract: + return machine()->Int32Sub(); + case IrOpcode::kNumberEqual: + return machine()->Word32Equal(); + case IrOpcode::kNumberLessThan: + return machine()->Uint32LessThan(); + case IrOpcode::kNumberLessThanOrEqual: + return machine()->Uint32LessThanOrEqual(); + default: + UNREACHABLE(); + return NULL; + } + } + + Operator* Float64OperatorFor(IrOpcode::Value opcode) { + switch (opcode) { + case IrOpcode::kNumberAdd: + return machine()->Float64Add(); + case IrOpcode::kNumberSubtract: + return machine()->Float64Sub(); + case IrOpcode::kNumberMultiply: + return machine()->Float64Mul(); + case IrOpcode::kNumberDivide: + return machine()->Float64Div(); + case IrOpcode::kNumberModulus: + return machine()->Float64Mod(); + case IrOpcode::kNumberEqual: + return machine()->Float64Equal(); + case IrOpcode::kNumberLessThan: + return machine()->Float64LessThan(); + case IrOpcode::kNumberLessThanOrEqual: + return machine()->Float64LessThanOrEqual(); + default: + UNREACHABLE(); + return NULL; + } + } + + RepType TypeForField(const FieldAccess& access) { + RepType tElement = static_cast<RepType>(0); // TODO(titzer) + RepType rElement = TypeForMachineType(access.representation); + return static_cast<RepType>(tElement | rElement); + } + + RepType TypeForElement(const ElementAccess& access) { + RepType tElement = static_cast<RepType>(0); // TODO(titzer) + RepType rElement = TypeForMachineType(access.representation); + return static_cast<RepType>(tElement | rElement); + } + + RepType TypeForBasePointer(const FieldAccess& access) { + if (access.tag() != 0) return static_cast<RepType>(tAny | rTagged); + return kPointerSize == 8 ? rWord64 : rWord32; + } + + RepType TypeForBasePointer(const ElementAccess& access) { + if (access.tag() != 0) return static_cast<RepType>(tAny | rTagged); + return kPointerSize == 8 ? rWord64 : rWord32; + } + + RepType TypeFromUpperBound(Type* type) { + if (type->Is(Type::None())) + return tAny; // TODO(titzer): should be an error + if (type->Is(Type::Signed32())) return tInt32; + if (type->Is(Type::Unsigned32())) return tUint32; + if (type->Is(Type::Number())) return tNumber; + if (type->Is(Type::Boolean())) return tBool; + return tAny; + } + + private: + JSGraph* jsgraph_; + SimplifiedOperatorBuilder* simplified_; + MachineOperatorBuilder* machine_; + Isolate* isolate_; + + friend class RepresentationChangerTester; // accesses the below fields. + + bool testing_type_errors_; // If {true}, don't abort on a type error. + bool type_error_; // Set when a type error is detected. + + Node* TypeError(Node* node, RepTypeUnion output_type, RepTypeUnion use) { + type_error_ = true; + if (!testing_type_errors_) { + char buf1[REP_TYPE_STRLEN]; + char buf2[REP_TYPE_STRLEN]; + RenderRepTypeUnion(buf1, output_type); + RenderRepTypeUnion(buf2, use); + V8_Fatal(__FILE__, __LINE__, + "RepresentationChangerError: node #%d:%s of rep" + "%s cannot be changed to rep%s", + node->id(), node->op()->mnemonic(), buf1, buf2); + } + return node; + } + + JSGraph* jsgraph() { return jsgraph_; } + Isolate* isolate() { return isolate_; } + SimplifiedOperatorBuilder* simplified() { return simplified_; } + MachineOperatorBuilder* machine() { return machine_; } +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_REPRESENTATION_CHANGE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/schedule.cc nodejs-0.11.15/deps/v8/src/compiler/schedule.cc --- nodejs-0.11.13/deps/v8/src/compiler/schedule.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/schedule.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,92 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/node.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/schedule.h" +#include "src/ostreams.h" + +namespace v8 { +namespace internal { +namespace compiler { + +OStream& operator<<(OStream& os, const BasicBlockData::Control& c) { + switch (c) { + case BasicBlockData::kNone: + return os << "none"; + case BasicBlockData::kGoto: + return os << "goto"; + case BasicBlockData::kBranch: + return os << "branch"; + case BasicBlockData::kReturn: + return os << "return"; + case BasicBlockData::kThrow: + return os << "throw"; + case BasicBlockData::kCall: + return os << "call"; + case BasicBlockData::kDeoptimize: + return os << "deoptimize"; + } + UNREACHABLE(); + return os; +} + + +OStream& operator<<(OStream& os, const Schedule& s) { + // TODO(svenpanne) Const-correct the RPO stuff/iterators. + BasicBlockVector* rpo = const_cast<Schedule*>(&s)->rpo_order(); + for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) { + BasicBlock* block = *i; + os << "--- BLOCK B" << block->id(); + if (block->PredecessorCount() != 0) os << " <- "; + BasicBlock::Predecessors predecessors = block->predecessors(); + bool comma = false; + for (BasicBlock::Predecessors::iterator j = predecessors.begin(); + j != predecessors.end(); ++j) { + if (comma) os << ", "; + comma = true; + os << "B" << (*j)->id(); + } + os << " ---\n"; + for (BasicBlock::const_iterator j = block->begin(); j != block->end(); + ++j) { + Node* node = *j; + os << " " << *node; + if (!NodeProperties::IsControl(node)) { + Bounds bounds = NodeProperties::GetBounds(node); + os << " : "; + bounds.lower->PrintTo(os); + if (!bounds.upper->Is(bounds.lower)) { + os << ".."; + bounds.upper->PrintTo(os); + } + } + os << "\n"; + } + BasicBlock::Control control = block->control_; + if (control != BasicBlock::kNone) { + os << " "; + if (block->control_input_ != NULL) { + os << *block->control_input_; + } else { + os << "Goto"; + } + os << " -> "; + BasicBlock::Successors successors = block->successors(); + comma = false; + for (BasicBlock::Successors::iterator j = successors.begin(); + j != successors.end(); ++j) { + if (comma) os << ", "; + comma = true; + os << "B" << (*j)->id(); + } + os << "\n"; + } + } + return os; +} +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/schedule.h nodejs-0.11.15/deps/v8/src/compiler/schedule.h --- nodejs-0.11.13/deps/v8/src/compiler/schedule.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/schedule.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,335 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_SCHEDULE_H_ +#define V8_COMPILER_SCHEDULE_H_ + +#include <vector> + +#include "src/v8.h" + +#include "src/compiler/generic-algorithm.h" +#include "src/compiler/generic-graph.h" +#include "src/compiler/generic-node.h" +#include "src/compiler/generic-node-inl.h" +#include "src/compiler/node.h" +#include "src/compiler/opcodes.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class BasicBlock; +class Graph; +class ConstructScheduleData; +class CodeGenerator; // Because of a namespace bug in clang. + +class BasicBlockData { + public: + // Possible control nodes that can end a block. + enum Control { + kNone, // Control not initialized yet. + kGoto, // Goto a single successor block. + kBranch, // Branch if true to first successor, otherwise second. + kReturn, // Return a value from this method. + kThrow, // Throw an exception. + kCall, // Call to a possibly deoptimizing or throwing function. + kDeoptimize // Deoptimize. + }; + + int32_t rpo_number_; // special RPO number of the block. + BasicBlock* loop_header_; // Pointer to dominating loop header basic block, + // NULL if none. For loop headers, this points to + // enclosing loop header. + int32_t loop_depth_; // loop nesting, 0 is top-level + int32_t loop_end_; // end of the loop, if this block is a loop header. + int32_t code_start_; // start index of arch-specific code. + int32_t code_end_; // end index of arch-specific code. + bool deferred_; // {true} if this block is considered the slow + // path. + Control control_; // Control at the end of the block. + Node* control_input_; // Input value for control. + NodeVector nodes_; // nodes of this block in forward order. + + explicit BasicBlockData(Zone* zone) + : rpo_number_(-1), + loop_header_(NULL), + loop_depth_(0), + loop_end_(-1), + code_start_(-1), + code_end_(-1), + deferred_(false), + control_(kNone), + control_input_(NULL), + nodes_(NodeVector::allocator_type(zone)) {} + + inline bool IsLoopHeader() const { return loop_end_ >= 0; } + inline bool LoopContains(BasicBlockData* block) const { + // RPO numbers must be initialized. + DCHECK(rpo_number_ >= 0); + DCHECK(block->rpo_number_ >= 0); + if (loop_end_ < 0) return false; // This is not a loop. + return block->rpo_number_ >= rpo_number_ && block->rpo_number_ < loop_end_; + } + int first_instruction_index() { + DCHECK(code_start_ >= 0); + DCHECK(code_end_ > 0); + DCHECK(code_end_ >= code_start_); + return code_start_; + } + int last_instruction_index() { + DCHECK(code_start_ >= 0); + DCHECK(code_end_ > 0); + DCHECK(code_end_ >= code_start_); + return code_end_ - 1; + } +}; + +OStream& operator<<(OStream& os, const BasicBlockData::Control& c); + +// A basic block contains an ordered list of nodes and ends with a control +// node. Note that if a basic block has phis, then all phis must appear as the +// first nodes in the block. +class BasicBlock V8_FINAL : public GenericNode<BasicBlockData, BasicBlock> { + public: + BasicBlock(GenericGraphBase* graph, int input_count) + : GenericNode<BasicBlockData, BasicBlock>(graph, input_count) {} + + typedef Uses Successors; + typedef Inputs Predecessors; + + Successors successors() { return static_cast<Successors>(uses()); } + Predecessors predecessors() { return static_cast<Predecessors>(inputs()); } + + int PredecessorCount() { return InputCount(); } + BasicBlock* PredecessorAt(int index) { return InputAt(index); } + + int SuccessorCount() { return UseCount(); } + BasicBlock* SuccessorAt(int index) { return UseAt(index); } + + int PredecessorIndexOf(BasicBlock* predecessor) { + BasicBlock::Predecessors predecessors = this->predecessors(); + for (BasicBlock::Predecessors::iterator i = predecessors.begin(); + i != predecessors.end(); ++i) { + if (*i == predecessor) return i.index(); + } + return -1; + } + + inline BasicBlock* loop_header() { + return static_cast<BasicBlock*>(loop_header_); + } + inline BasicBlock* ContainingLoop() { + if (IsLoopHeader()) return this; + return static_cast<BasicBlock*>(loop_header_); + } + + typedef NodeVector::iterator iterator; + iterator begin() { return nodes_.begin(); } + iterator end() { return nodes_.end(); } + + typedef NodeVector::const_iterator const_iterator; + const_iterator begin() const { return nodes_.begin(); } + const_iterator end() const { return nodes_.end(); } + + typedef NodeVector::reverse_iterator reverse_iterator; + reverse_iterator rbegin() { return nodes_.rbegin(); } + reverse_iterator rend() { return nodes_.rend(); } + + private: + DISALLOW_COPY_AND_ASSIGN(BasicBlock); +}; + +typedef GenericGraphVisit::NullNodeVisitor<BasicBlockData, BasicBlock> + NullBasicBlockVisitor; + +typedef zone_allocator<BasicBlock*> BasicBlockPtrZoneAllocator; +typedef std::vector<BasicBlock*, BasicBlockPtrZoneAllocator> BasicBlockVector; +typedef BasicBlockVector::iterator BasicBlockVectorIter; +typedef BasicBlockVector::reverse_iterator BasicBlockVectorRIter; + +// A schedule represents the result of assigning nodes to basic blocks +// and ordering them within basic blocks. Prior to computing a schedule, +// a graph has no notion of control flow ordering other than that induced +// by the graph's dependencies. A schedule is required to generate code. +class Schedule : public GenericGraph<BasicBlock> { + public: + explicit Schedule(Zone* zone) + : GenericGraph<BasicBlock>(zone), + zone_(zone), + all_blocks_(BasicBlockVector::allocator_type(zone)), + nodeid_to_block_(BasicBlockVector::allocator_type(zone)), + rpo_order_(BasicBlockVector::allocator_type(zone)), + immediate_dominator_(BasicBlockVector::allocator_type(zone)) { + NewBasicBlock(); // entry. + NewBasicBlock(); // exit. + SetStart(entry()); + SetEnd(exit()); + } + + // TODO(titzer): rewrite users of these methods to use start() and end(). + BasicBlock* entry() const { return all_blocks_[0]; } // Return entry block. + BasicBlock* exit() const { return all_blocks_[1]; } // Return exit block. + + // Return the block which contains {node}, if any. + BasicBlock* block(Node* node) const { + if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) { + return nodeid_to_block_[node->id()]; + } + return NULL; + } + + BasicBlock* dominator(BasicBlock* block) { + return immediate_dominator_[block->id()]; + } + + bool IsScheduled(Node* node) { + int length = static_cast<int>(nodeid_to_block_.size()); + if (node->id() >= length) return false; + return nodeid_to_block_[node->id()] != NULL; + } + + BasicBlock* GetBlockById(int block_id) { return all_blocks_[block_id]; } + + int BasicBlockCount() const { return NodeCount(); } + int RpoBlockCount() const { return static_cast<int>(rpo_order_.size()); } + + typedef ContainerPointerWrapper<BasicBlockVector> BasicBlocks; + + // Return a list of all the blocks in the schedule, in arbitrary order. + BasicBlocks all_blocks() { return BasicBlocks(&all_blocks_); } + + // Check if nodes {a} and {b} are in the same block. + inline bool SameBasicBlock(Node* a, Node* b) const { + BasicBlock* block = this->block(a); + return block != NULL && block == this->block(b); + } + + // BasicBlock building: create a new block. + inline BasicBlock* NewBasicBlock() { + BasicBlock* block = + BasicBlock::New(this, 0, static_cast<BasicBlock**>(NULL)); + all_blocks_.push_back(block); + return block; + } + + // BasicBlock building: records that a node will later be added to a block but + // doesn't actually add the node to the block. + inline void PlanNode(BasicBlock* block, Node* node) { + if (FLAG_trace_turbo_scheduler) { + PrintF("Planning node %d for future add to block %d\n", node->id(), + block->id()); + } + DCHECK(this->block(node) == NULL); + SetBlockForNode(block, node); + } + + // BasicBlock building: add a node to the end of the block. + inline void AddNode(BasicBlock* block, Node* node) { + if (FLAG_trace_turbo_scheduler) { + PrintF("Adding node %d to block %d\n", node->id(), block->id()); + } + DCHECK(this->block(node) == NULL || this->block(node) == block); + block->nodes_.push_back(node); + SetBlockForNode(block, node); + } + + // BasicBlock building: add a goto to the end of {block}. + void AddGoto(BasicBlock* block, BasicBlock* succ) { + DCHECK(block->control_ == BasicBlock::kNone); + block->control_ = BasicBlock::kGoto; + AddSuccessor(block, succ); + } + + // BasicBlock building: add a (branching) call at the end of {block}. + void AddCall(BasicBlock* block, Node* call, BasicBlock* cont_block, + BasicBlock* deopt_block) { + DCHECK(block->control_ == BasicBlock::kNone); + DCHECK(call->opcode() == IrOpcode::kCall); + block->control_ = BasicBlock::kCall; + // Insert the deopt block first so that the RPO order builder picks + // it first (and thus it ends up late in the RPO order). + AddSuccessor(block, deopt_block); + AddSuccessor(block, cont_block); + SetControlInput(block, call); + } + + // BasicBlock building: add a branch at the end of {block}. + void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock, + BasicBlock* fblock) { + DCHECK(block->control_ == BasicBlock::kNone); + DCHECK(branch->opcode() == IrOpcode::kBranch); + block->control_ = BasicBlock::kBranch; + AddSuccessor(block, tblock); + AddSuccessor(block, fblock); + SetControlInput(block, branch); + } + + // BasicBlock building: add a return at the end of {block}. + void AddReturn(BasicBlock* block, Node* input) { + // TODO(titzer): require a Return node here. + DCHECK(block->control_ == BasicBlock::kNone); + block->control_ = BasicBlock::kReturn; + SetControlInput(block, input); + if (block != exit()) AddSuccessor(block, exit()); + } + + // BasicBlock building: add a throw at the end of {block}. + void AddThrow(BasicBlock* block, Node* input) { + DCHECK(block->control_ == BasicBlock::kNone); + block->control_ = BasicBlock::kThrow; + SetControlInput(block, input); + if (block != exit()) AddSuccessor(block, exit()); + } + + // BasicBlock building: add a deopt at the end of {block}. + void AddDeoptimize(BasicBlock* block, Node* state) { + DCHECK(block->control_ == BasicBlock::kNone); + block->control_ = BasicBlock::kDeoptimize; + SetControlInput(block, state); + block->deferred_ = true; // By default, consider deopts the slow path. + if (block != exit()) AddSuccessor(block, exit()); + } + + friend class Scheduler; + friend class CodeGenerator; + + void AddSuccessor(BasicBlock* block, BasicBlock* succ) { + succ->AppendInput(zone_, block); + } + + BasicBlockVector* rpo_order() { return &rpo_order_; } + + private: + friend class ScheduleVisualizer; + + void SetControlInput(BasicBlock* block, Node* node) { + block->control_input_ = node; + SetBlockForNode(block, node); + } + + void SetBlockForNode(BasicBlock* block, Node* node) { + int length = static_cast<int>(nodeid_to_block_.size()); + if (node->id() >= length) { + nodeid_to_block_.resize(node->id() + 1); + } + nodeid_to_block_[node->id()] = block; + } + + Zone* zone_; + BasicBlockVector all_blocks_; // All basic blocks in the schedule. + BasicBlockVector nodeid_to_block_; // Map from node to containing block. + BasicBlockVector rpo_order_; // Reverse-post-order block list. + BasicBlockVector immediate_dominator_; // Maps to a block's immediate + // dominator, indexed by block + // id. +}; + +OStream& operator<<(OStream& os, const Schedule& s); +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_SCHEDULE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/scheduler.cc nodejs-0.11.15/deps/v8/src/compiler/scheduler.cc --- nodejs-0.11.13/deps/v8/src/compiler/scheduler.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/scheduler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1048 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/scheduler.h" + +#include "src/compiler/graph.h" +#include "src/compiler/graph-inl.h" +#include "src/compiler/node.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/node-properties-inl.h" +#include "src/data-flow.h" + +namespace v8 { +namespace internal { +namespace compiler { + +Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) + : graph_(graph), + schedule_(schedule), + branches_(NodeVector::allocator_type(zone)), + calls_(NodeVector::allocator_type(zone)), + deopts_(NodeVector::allocator_type(zone)), + returns_(NodeVector::allocator_type(zone)), + loops_and_merges_(NodeVector::allocator_type(zone)), + node_block_placement_(BasicBlockVector::allocator_type(zone)), + unscheduled_uses_(IntVector::allocator_type(zone)), + scheduled_nodes_(NodeVectorVector::allocator_type(zone)), + schedule_root_nodes_(NodeVector::allocator_type(zone)), + schedule_early_rpo_index_(IntVector::allocator_type(zone)) {} + + +Schedule* Scheduler::ComputeSchedule(Graph* graph) { + Zone tmp_zone(graph->zone()->isolate()); + Schedule* schedule = new (graph->zone()) Schedule(graph->zone()); + Scheduler scheduler(&tmp_zone, graph, schedule); + + schedule->AddNode(schedule->end(), graph->end()); + + scheduler.PrepareAuxiliaryNodeData(); + scheduler.CreateBlocks(); + scheduler.WireBlocks(); + scheduler.PrepareAuxiliaryBlockData(); + + Scheduler::ComputeSpecialRPO(schedule); + scheduler.GenerateImmediateDominatorTree(); + + scheduler.PrepareUses(); + scheduler.ScheduleEarly(); + scheduler.ScheduleLate(); + + return schedule; +} + + +class CreateBlockVisitor : public NullNodeVisitor { + public: + explicit CreateBlockVisitor(Scheduler* scheduler) : scheduler_(scheduler) {} + + GenericGraphVisit::Control Post(Node* node) { + Schedule* schedule = scheduler_->schedule_; + switch (node->opcode()) { + case IrOpcode::kIfTrue: + case IrOpcode::kIfFalse: + case IrOpcode::kContinuation: + case IrOpcode::kLazyDeoptimization: { + BasicBlock* block = schedule->NewBasicBlock(); + schedule->AddNode(block, node); + break; + } + case IrOpcode::kLoop: + case IrOpcode::kMerge: { + BasicBlock* block = schedule->NewBasicBlock(); + schedule->AddNode(block, node); + scheduler_->loops_and_merges_.push_back(node); + break; + } + case IrOpcode::kBranch: { + scheduler_->branches_.push_back(node); + break; + } + case IrOpcode::kDeoptimize: { + scheduler_->deopts_.push_back(node); + break; + } + case IrOpcode::kCall: { + if (OperatorProperties::CanLazilyDeoptimize(node->op())) { + scheduler_->calls_.push_back(node); + } + break; + } + case IrOpcode::kReturn: + scheduler_->returns_.push_back(node); + break; + default: + break; + } + + return GenericGraphVisit::CONTINUE; + } + + private: + Scheduler* scheduler_; +}; + + +void Scheduler::CreateBlocks() { + CreateBlockVisitor create_blocks(this); + if (FLAG_trace_turbo_scheduler) { + PrintF("---------------- CREATING BLOCKS ------------------\n"); + } + schedule_->AddNode(schedule_->entry(), graph_->start()); + graph_->VisitNodeInputsFromEnd(&create_blocks); +} + + +void Scheduler::WireBlocks() { + if (FLAG_trace_turbo_scheduler) { + PrintF("----------------- WIRING BLOCKS -------------------\n"); + } + AddSuccessorsForBranches(); + AddSuccessorsForReturns(); + AddSuccessorsForCalls(); + AddSuccessorsForDeopts(); + AddPredecessorsForLoopsAndMerges(); + // TODO(danno): Handle Throw, et al. +} + + +void Scheduler::PrepareAuxiliaryNodeData() { + unscheduled_uses_.resize(graph_->NodeCount(), 0); + schedule_early_rpo_index_.resize(graph_->NodeCount(), 0); +} + + +void Scheduler::PrepareAuxiliaryBlockData() { + Zone* zone = schedule_->zone(); + scheduled_nodes_.resize(schedule_->BasicBlockCount(), + NodeVector(NodeVector::allocator_type(zone))); + schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL); +} + + +void Scheduler::AddPredecessorsForLoopsAndMerges() { + for (NodeVectorIter i = loops_and_merges_.begin(); + i != loops_and_merges_.end(); ++i) { + Node* merge_or_loop = *i; + BasicBlock* block = schedule_->block(merge_or_loop); + DCHECK(block != NULL); + // For all of the merge's control inputs, add a goto at the end to the + // merge's basic block. + for (InputIter j = (*i)->inputs().begin(); j != (*i)->inputs().end(); ++j) { + if (OperatorProperties::IsBasicBlockBegin((*i)->op())) { + BasicBlock* predecessor_block = schedule_->block(*j); + if ((*j)->opcode() != IrOpcode::kReturn && + (*j)->opcode() != IrOpcode::kDeoptimize) { + DCHECK(predecessor_block != NULL); + if (FLAG_trace_turbo_scheduler) { + IrOpcode::Value opcode = (*i)->opcode(); + PrintF("node %d (%s) in block %d -> block %d\n", (*i)->id(), + IrOpcode::Mnemonic(opcode), predecessor_block->id(), + block->id()); + } + schedule_->AddGoto(predecessor_block, block); + } + } + } + } +} + + +void Scheduler::AddSuccessorsForCalls() { + for (NodeVectorIter i = calls_.begin(); i != calls_.end(); ++i) { + Node* call = *i; + DCHECK(call->opcode() == IrOpcode::kCall); + DCHECK(OperatorProperties::CanLazilyDeoptimize(call->op())); + + Node* lazy_deopt_node = NULL; + Node* cont_node = NULL; + // Find the continuation and lazy-deopt nodes among the uses. + for (UseIter use_iter = call->uses().begin(); + use_iter != call->uses().end(); ++use_iter) { + switch ((*use_iter)->opcode()) { + case IrOpcode::kContinuation: { + DCHECK(cont_node == NULL); + cont_node = *use_iter; + break; + } + case IrOpcode::kLazyDeoptimization: { + DCHECK(lazy_deopt_node == NULL); + lazy_deopt_node = *use_iter; + break; + } + default: + break; + } + } + DCHECK(lazy_deopt_node != NULL); + DCHECK(cont_node != NULL); + BasicBlock* cont_successor_block = schedule_->block(cont_node); + BasicBlock* deopt_successor_block = schedule_->block(lazy_deopt_node); + Node* call_block_node = NodeProperties::GetControlInput(call); + BasicBlock* call_block = schedule_->block(call_block_node); + if (FLAG_trace_turbo_scheduler) { + IrOpcode::Value opcode = call->opcode(); + PrintF("node %d (%s) in block %d -> block %d\n", call->id(), + IrOpcode::Mnemonic(opcode), call_block->id(), + cont_successor_block->id()); + PrintF("node %d (%s) in block %d -> block %d\n", call->id(), + IrOpcode::Mnemonic(opcode), call_block->id(), + deopt_successor_block->id()); + } + schedule_->AddCall(call_block, call, cont_successor_block, + deopt_successor_block); + } +} + + +void Scheduler::AddSuccessorsForDeopts() { + for (NodeVectorIter i = deopts_.begin(); i != deopts_.end(); ++i) { + Node* deopt_block_node = NodeProperties::GetControlInput(*i); + BasicBlock* deopt_block = schedule_->block(deopt_block_node); + DCHECK(deopt_block != NULL); + if (FLAG_trace_turbo_scheduler) { + IrOpcode::Value opcode = (*i)->opcode(); + PrintF("node %d (%s) in block %d -> end\n", (*i)->id(), + IrOpcode::Mnemonic(opcode), deopt_block->id()); + } + schedule_->AddDeoptimize(deopt_block, *i); + } +} + + +void Scheduler::AddSuccessorsForBranches() { + for (NodeVectorIter i = branches_.begin(); i != branches_.end(); ++i) { + Node* branch = *i; + DCHECK(branch->opcode() == IrOpcode::kBranch); + Node* branch_block_node = NodeProperties::GetControlInput(branch); + BasicBlock* branch_block = schedule_->block(branch_block_node); + DCHECK(branch_block != NULL); + UseIter use_iter = branch->uses().begin(); + Node* first_successor = *use_iter; + ++use_iter; + DCHECK(use_iter != branch->uses().end()); + Node* second_successor = *use_iter; + DCHECK(++use_iter == branch->uses().end()); + Node* true_successor_node = first_successor->opcode() == IrOpcode::kIfTrue + ? first_successor + : second_successor; + Node* false_successor_node = first_successor->opcode() == IrOpcode::kIfTrue + ? second_successor + : first_successor; + DCHECK(true_successor_node->opcode() == IrOpcode::kIfTrue); + DCHECK(false_successor_node->opcode() == IrOpcode::kIfFalse); + BasicBlock* true_successor_block = schedule_->block(true_successor_node); + BasicBlock* false_successor_block = schedule_->block(false_successor_node); + DCHECK(true_successor_block != NULL); + DCHECK(false_successor_block != NULL); + if (FLAG_trace_turbo_scheduler) { + IrOpcode::Value opcode = branch->opcode(); + PrintF("node %d (%s) in block %d -> block %d\n", branch->id(), + IrOpcode::Mnemonic(opcode), branch_block->id(), + true_successor_block->id()); + PrintF("node %d (%s) in block %d -> block %d\n", branch->id(), + IrOpcode::Mnemonic(opcode), branch_block->id(), + false_successor_block->id()); + } + schedule_->AddBranch(branch_block, branch, true_successor_block, + false_successor_block); + } +} + + +void Scheduler::AddSuccessorsForReturns() { + for (NodeVectorIter i = returns_.begin(); i != returns_.end(); ++i) { + Node* return_block_node = NodeProperties::GetControlInput(*i); + BasicBlock* return_block = schedule_->block(return_block_node); + DCHECK(return_block != NULL); + if (FLAG_trace_turbo_scheduler) { + IrOpcode::Value opcode = (*i)->opcode(); + PrintF("node %d (%s) in block %d -> end\n", (*i)->id(), + IrOpcode::Mnemonic(opcode), return_block->id()); + } + schedule_->AddReturn(return_block, *i); + } +} + + +BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { + while (b1 != b2) { + int b1_rpo = GetRPONumber(b1); + int b2_rpo = GetRPONumber(b2); + DCHECK(b1_rpo != b2_rpo); + if (b1_rpo < b2_rpo) { + b2 = schedule_->immediate_dominator_[b2->id()]; + } else { + b1 = schedule_->immediate_dominator_[b1->id()]; + } + } + return b1; +} + + +void Scheduler::GenerateImmediateDominatorTree() { + // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's + // if this becomes really slow. + if (FLAG_trace_turbo_scheduler) { + PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n"); + } + for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { + BasicBlock* current_rpo = schedule_->rpo_order_[i]; + if (current_rpo != schedule_->entry()) { + BasicBlock::Predecessors::iterator current_pred = + current_rpo->predecessors().begin(); + BasicBlock::Predecessors::iterator end = + current_rpo->predecessors().end(); + DCHECK(current_pred != end); + BasicBlock* dominator = *current_pred; + ++current_pred; + // For multiple predecessors, walk up the rpo ordering until a common + // dominator is found. + int current_rpo_pos = GetRPONumber(current_rpo); + while (current_pred != end) { + // Don't examine backwards edges + BasicBlock* pred = *current_pred; + if (GetRPONumber(pred) < current_rpo_pos) { + dominator = GetCommonDominator(dominator, *current_pred); + } + ++current_pred; + } + schedule_->immediate_dominator_[current_rpo->id()] = dominator; + if (FLAG_trace_turbo_scheduler) { + PrintF("Block %d's idom is %d\n", current_rpo->id(), dominator->id()); + } + } + } +} + + +class ScheduleEarlyNodeVisitor : public NullNodeVisitor { + public: + explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) + : has_changed_rpo_constraints_(true), + scheduler_(scheduler), + schedule_(scheduler->schedule_) {} + + GenericGraphVisit::Control Pre(Node* node) { + int id = node->id(); + int max_rpo = 0; + // Fixed nodes already know their schedule early position. + if (IsFixedNode(node)) { + BasicBlock* block = schedule_->block(node); + DCHECK(block != NULL); + max_rpo = block->rpo_number_; + if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { + has_changed_rpo_constraints_ = true; + } + scheduler_->schedule_early_rpo_index_[id] = max_rpo; + if (FLAG_trace_turbo_scheduler) { + PrintF("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo); + } + } + return GenericGraphVisit::CONTINUE; + } + + GenericGraphVisit::Control Post(Node* node) { + int id = node->id(); + int max_rpo = 0; + // Otherwise, the minimum rpo for the node is the max of all of the inputs. + if (!IsFixedNode(node)) { + DCHECK(!OperatorProperties::IsBasicBlockBegin(node->op())); + for (InputIter i = node->inputs().begin(); i != node->inputs().end(); + ++i) { + int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()]; + if (control_rpo > max_rpo) { + max_rpo = control_rpo; + } + } + if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { + has_changed_rpo_constraints_ = true; + } + scheduler_->schedule_early_rpo_index_[id] = max_rpo; + if (FLAG_trace_turbo_scheduler) { + PrintF("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo); + } + } + return GenericGraphVisit::CONTINUE; + } + + static bool IsFixedNode(Node* node) { + return OperatorProperties::HasFixedSchedulePosition(node->op()) || + !OperatorProperties::CanBeScheduled(node->op()); + } + + // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be + // rewritten to use a pre-order traversal from the start instead. + bool has_changed_rpo_constraints_; + + private: + Scheduler* scheduler_; + Schedule* schedule_; +}; + + +void Scheduler::ScheduleEarly() { + if (FLAG_trace_turbo_scheduler) { + PrintF("------------------- SCHEDULE EARLY ----------------\n"); + } + + int fixpoint_count = 0; + ScheduleEarlyNodeVisitor visitor(this); + while (visitor.has_changed_rpo_constraints_) { + visitor.has_changed_rpo_constraints_ = false; + graph_->VisitNodeInputsFromEnd(&visitor); + fixpoint_count++; + } + + if (FLAG_trace_turbo_scheduler) { + PrintF("It took %d iterations to determine fixpoint\n", fixpoint_count); + } +} + + +class PrepareUsesVisitor : public NullNodeVisitor { + public: + explicit PrepareUsesVisitor(Scheduler* scheduler) + : scheduler_(scheduler), schedule_(scheduler->schedule_) {} + + GenericGraphVisit::Control Pre(Node* node) { + // Some nodes must be scheduled explicitly to ensure they are in exactly the + // right place; it's a convenient place during the preparation of use counts + // to schedule them. + if (!schedule_->IsScheduled(node) && + OperatorProperties::HasFixedSchedulePosition(node->op())) { + if (FLAG_trace_turbo_scheduler) { + PrintF("Fixed position node %d is unscheduled, scheduling now\n", + node->id()); + } + IrOpcode::Value opcode = node->opcode(); + BasicBlock* block = + opcode == IrOpcode::kParameter + ? schedule_->entry() + : schedule_->block(NodeProperties::GetControlInput(node)); + DCHECK(block != NULL); + schedule_->AddNode(block, node); + } + + if (OperatorProperties::IsScheduleRoot(node->op())) { + scheduler_->schedule_root_nodes_.push_back(node); + } + + return GenericGraphVisit::CONTINUE; + } + + void PostEdge(Node* from, int index, Node* to) { + // If the edge is from an unscheduled node, then tally it in the use count + // for all of its inputs. The same criterion will be used in ScheduleLate + // for decrementing use counts. + if (!schedule_->IsScheduled(from) && + OperatorProperties::CanBeScheduled(from->op())) { + DCHECK(!OperatorProperties::HasFixedSchedulePosition(from->op())); + ++scheduler_->unscheduled_uses_[to->id()]; + if (FLAG_trace_turbo_scheduler) { + PrintF("Incrementing uses of node %d from %d to %d\n", to->id(), + from->id(), scheduler_->unscheduled_uses_[to->id()]); + } + } + } + + private: + Scheduler* scheduler_; + Schedule* schedule_; +}; + + +void Scheduler::PrepareUses() { + if (FLAG_trace_turbo_scheduler) { + PrintF("------------------- PREPARE USES ------------------\n"); + } + // Count the uses of every node, it will be used to ensure that all of a + // node's uses are scheduled before the node itself. + PrepareUsesVisitor prepare_uses(this); + graph_->VisitNodeInputsFromEnd(&prepare_uses); +} + + +class ScheduleLateNodeVisitor : public NullNodeVisitor { + public: + explicit ScheduleLateNodeVisitor(Scheduler* scheduler) + : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} + + GenericGraphVisit::Control Pre(Node* node) { + // Don't schedule nodes that cannot be scheduled or are already scheduled. + if (!OperatorProperties::CanBeScheduled(node->op()) || + schedule_->IsScheduled(node)) { + return GenericGraphVisit::CONTINUE; + } + DCHECK(!OperatorProperties::HasFixedSchedulePosition(node->op())); + + // If all the uses of a node have been scheduled, then the node itself can + // be scheduled. + bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0; + if (FLAG_trace_turbo_scheduler) { + PrintF("Testing for schedule eligibility for node %d -> %s\n", node->id(), + eligible ? "true" : "false"); + } + if (!eligible) return GenericGraphVisit::DEFER; + + // Determine the dominating block for all of the uses of this node. It is + // the latest block that this node can be scheduled in. + BasicBlock* block = NULL; + for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); + ++i) { + BasicBlock* use_block = GetBlockForUse(i.edge()); + block = block == NULL ? use_block : use_block == NULL + ? block + : scheduler_->GetCommonDominator( + block, use_block); + } + DCHECK(block != NULL); + + int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()]; + if (FLAG_trace_turbo_scheduler) { + PrintF( + "Schedule late conservative for node %d is block %d at " + "loop depth %d, min rpo = %d\n", + node->id(), block->id(), block->loop_depth_, min_rpo); + } + // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively + // into enlcosing loop pre-headers until they would preceed their + // ScheduleEarly position. + BasicBlock* hoist_block = block; + while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) { + if (hoist_block->loop_depth_ < block->loop_depth_) { + block = hoist_block; + if (FLAG_trace_turbo_scheduler) { + PrintF("Hoisting node %d to block %d\n", node->id(), block->id()); + } + } + // Try to hoist to the pre-header of the loop header. + hoist_block = hoist_block->loop_header(); + if (hoist_block != NULL) { + BasicBlock* pre_header = schedule_->dominator(hoist_block); + DCHECK(pre_header == NULL || + *hoist_block->predecessors().begin() == pre_header); + if (FLAG_trace_turbo_scheduler) { + PrintF( + "Try hoist to pre-header block %d of loop header block %d," + " depth would be %d\n", + pre_header->id(), hoist_block->id(), pre_header->loop_depth_); + } + hoist_block = pre_header; + } + } + + ScheduleNode(block, node); + + return GenericGraphVisit::CONTINUE; + } + + private: + BasicBlock* GetBlockForUse(Node::Edge edge) { + Node* use = edge.from(); + IrOpcode::Value opcode = use->opcode(); + // If the use is a phi, forward through the the phi to the basic block + // corresponding to the phi's input. + if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { + int index = edge.index(); + if (FLAG_trace_turbo_scheduler) { + PrintF("Use %d is input %d to a phi\n", use->id(), index); + } + use = NodeProperties::GetControlInput(use, 0); + opcode = use->opcode(); + DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); + use = NodeProperties::GetControlInput(use, index); + } + BasicBlock* result = schedule_->block(use); + if (result == NULL) return NULL; + if (FLAG_trace_turbo_scheduler) { + PrintF("Must dominate use %d in block %d\n", use->id(), result->id()); + } + return result; + } + + bool IsNodeEligible(Node* node) { + bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0; + return eligible; + } + + void ScheduleNode(BasicBlock* block, Node* node) { + schedule_->PlanNode(block, node); + scheduler_->scheduled_nodes_[block->id()].push_back(node); + + // Reduce the use count of the node's inputs to potentially make them + // scheduable. + for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { + DCHECK(scheduler_->unscheduled_uses_[(*i)->id()] > 0); + --scheduler_->unscheduled_uses_[(*i)->id()]; + if (FLAG_trace_turbo_scheduler) { + PrintF("Decrementing use count for node %d from node %d (now %d)\n", + (*i)->id(), i.edge().from()->id(), + scheduler_->unscheduled_uses_[(*i)->id()]); + if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) { + PrintF("node %d is now eligible for scheduling\n", (*i)->id()); + } + } + } + } + + Scheduler* scheduler_; + Schedule* schedule_; +}; + + +void Scheduler::ScheduleLate() { + if (FLAG_trace_turbo_scheduler) { + PrintF("------------------- SCHEDULE LATE -----------------\n"); + } + + // Schedule: Places nodes in dominator block of all their uses. + ScheduleLateNodeVisitor schedule_late_visitor(this); + + for (NodeVectorIter i = schedule_root_nodes_.begin(); + i != schedule_root_nodes_.end(); ++i) { + GenericGraphVisit::Visit<ScheduleLateNodeVisitor, + NodeInputIterationTraits<Node> >( + graph_, *i, &schedule_late_visitor); + } + + // Add collected nodes for basic blocks to their blocks in the right order. + int block_num = 0; + for (NodeVectorVectorIter i = scheduled_nodes_.begin(); + i != scheduled_nodes_.end(); ++i) { + for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) { + schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j); + } + block_num++; + } +} + + +// Numbering for BasicBlockData.rpo_number_ for this block traversal: +static const int kBlockOnStack = -2; +static const int kBlockVisited1 = -3; +static const int kBlockVisited2 = -4; +static const int kBlockUnvisited1 = -1; +static const int kBlockUnvisited2 = kBlockVisited1; + +struct SpecialRPOStackFrame { + BasicBlock* block; + int index; +}; + +struct BlockList { + BasicBlock* block; + BlockList* next; + + BlockList* Add(Zone* zone, BasicBlock* b) { + BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList))); + list->block = b; + list->next = this; + return list; + } + + void Serialize(BasicBlockVector* final_order) { + for (BlockList* l = this; l != NULL; l = l->next) { + l->block->rpo_number_ = static_cast<int>(final_order->size()); + final_order->push_back(l->block); + } + } +}; + +struct LoopInfo { + BasicBlock* header; + ZoneList<BasicBlock*>* outgoing; + BitVector* members; + LoopInfo* prev; + BlockList* end; + BlockList* start; + + void AddOutgoing(Zone* zone, BasicBlock* block) { + if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone); + outgoing->Add(block, zone); + } +}; + + +static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child, + int unvisited) { + if (child->rpo_number_ == unvisited) { + stack[depth].block = child; + stack[depth].index = 0; + child->rpo_number_ = kBlockOnStack; + return depth + 1; + } + return depth; +} + + +// Computes loop membership from the backedges of the control flow graph. +static LoopInfo* ComputeLoopInfo( + Zone* zone, SpecialRPOStackFrame* queue, int num_loops, int num_blocks, + ZoneList<std::pair<BasicBlock*, int> >* backedges) { + LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops); + memset(loops, 0, num_loops * sizeof(LoopInfo)); + + // Compute loop membership starting from backedges. + // O(max(loop_depth) * max(|loop|) + for (int i = 0; i < backedges->length(); i++) { + BasicBlock* member = backedges->at(i).first; + BasicBlock* header = member->SuccessorAt(backedges->at(i).second); + int loop_num = header->loop_end_; + if (loops[loop_num].header == NULL) { + loops[loop_num].header = header; + loops[loop_num].members = new (zone) BitVector(num_blocks, zone); + } + + int queue_length = 0; + if (member != header) { + // As long as the header doesn't have a backedge to itself, + // Push the member onto the queue and process its predecessors. + if (!loops[loop_num].members->Contains(member->id())) { + loops[loop_num].members->Add(member->id()); + } + queue[queue_length++].block = member; + } + + // Propagate loop membership backwards. All predecessors of M up to the + // loop header H are members of the loop too. O(|blocks between M and H|). + while (queue_length > 0) { + BasicBlock* block = queue[--queue_length].block; + for (int i = 0; i < block->PredecessorCount(); i++) { + BasicBlock* pred = block->PredecessorAt(i); + if (pred != header) { + if (!loops[loop_num].members->Contains(pred->id())) { + loops[loop_num].members->Add(pred->id()); + queue[queue_length++].block = pred; + } + } + } + } + } + return loops; +} + + +#if DEBUG +static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) { + PrintF("-- RPO with %d loops ", num_loops); + if (num_loops > 0) { + PrintF("("); + for (int i = 0; i < num_loops; i++) { + if (i > 0) PrintF(" "); + PrintF("B%d", loops[i].header->id()); + } + PrintF(") "); + } + PrintF("-- \n"); + + for (int i = 0; i < static_cast<int>(order->size()); i++) { + BasicBlock* block = (*order)[i]; + int bid = block->id(); + PrintF("%5d:", i); + for (int i = 0; i < num_loops; i++) { + bool membership = loops[i].members->Contains(bid); + bool range = loops[i].header->LoopContains(block); + PrintF(membership ? " |" : " "); + PrintF(range ? "x" : " "); + } + PrintF(" B%d: ", bid); + if (block->loop_end_ >= 0) { + PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_); + } + PrintF("\n"); + } +} + + +static void VerifySpecialRPO(int num_loops, LoopInfo* loops, + BasicBlockVector* order) { + DCHECK(order->size() > 0); + DCHECK((*order)[0]->id() == 0); // entry should be first. + + for (int i = 0; i < num_loops; i++) { + LoopInfo* loop = &loops[i]; + BasicBlock* header = loop->header; + + DCHECK(header != NULL); + DCHECK(header->rpo_number_ >= 0); + DCHECK(header->rpo_number_ < static_cast<int>(order->size())); + DCHECK(header->loop_end_ >= 0); + DCHECK(header->loop_end_ <= static_cast<int>(order->size())); + DCHECK(header->loop_end_ > header->rpo_number_); + + // Verify the start ... end list relationship. + int links = 0; + BlockList* l = loop->start; + DCHECK(l != NULL && l->block == header); + bool end_found; + while (true) { + if (l == NULL || l == loop->end) { + end_found = (loop->end == l); + break; + } + // The list should be in same order as the final result. + DCHECK(l->block->rpo_number_ == links + loop->header->rpo_number_); + links++; + l = l->next; + DCHECK(links < static_cast<int>(2 * order->size())); // cycle? + } + DCHECK(links > 0); + DCHECK(links == (header->loop_end_ - header->rpo_number_)); + DCHECK(end_found); + + // Check the contiguousness of loops. + int count = 0; + for (int j = 0; j < static_cast<int>(order->size()); j++) { + BasicBlock* block = order->at(j); + DCHECK(block->rpo_number_ == j); + if (j < header->rpo_number_ || j >= header->loop_end_) { + DCHECK(!loop->members->Contains(block->id())); + } else { + if (block == header) { + DCHECK(!loop->members->Contains(block->id())); + } else { + DCHECK(loop->members->Contains(block->id())); + } + count++; + } + } + DCHECK(links == count); + } +} +#endif // DEBUG + + +// Compute the special reverse-post-order block ordering, which is essentially +// a RPO of the graph where loop bodies are contiguous. Properties: +// 1. If block A is a predecessor of B, then A appears before B in the order, +// unless B is a loop header and A is in the loop headed at B +// (i.e. A -> B is a backedge). +// => If block A dominates block B, then A appears before B in the order. +// => If block A is a loop header, A appears before all blocks in the loop +// headed at A. +// 2. All loops are contiguous in the order (i.e. no intervening blocks that +// do not belong to the loop.) +// Note a simple RPO traversal satisfies (1) but not (3). +BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) { + Zone tmp_zone(schedule->zone()->isolate()); + Zone* zone = &tmp_zone; + if (FLAG_trace_turbo_scheduler) { + PrintF("------------- COMPUTING SPECIAL RPO ---------------\n"); + } + // RPO should not have been computed for this schedule yet. + CHECK_EQ(kBlockUnvisited1, schedule->entry()->rpo_number_); + CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size())); + + // Perform an iterative RPO traversal using an explicit stack, + // recording backedges that form cycles. O(|B|). + ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone); + SpecialRPOStackFrame* stack = + zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount()); + BasicBlock* entry = schedule->entry(); + BlockList* order = NULL; + int stack_depth = Push(stack, 0, entry, kBlockUnvisited1); + int num_loops = 0; + + while (stack_depth > 0) { + int current = stack_depth - 1; + SpecialRPOStackFrame* frame = stack + current; + + if (frame->index < frame->block->SuccessorCount()) { + // Process the next successor. + BasicBlock* succ = frame->block->SuccessorAt(frame->index++); + if (succ->rpo_number_ == kBlockVisited1) continue; + if (succ->rpo_number_ == kBlockOnStack) { + // The successor is on the stack, so this is a backedge (cycle). + backedges.Add( + std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone); + if (succ->loop_end_ < 0) { + // Assign a new loop number to the header if it doesn't have one. + succ->loop_end_ = num_loops++; + } + } else { + // Push the successor onto the stack. + DCHECK(succ->rpo_number_ == kBlockUnvisited1); + stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1); + } + } else { + // Finished with all successors; pop the stack and add the block. + order = order->Add(zone, frame->block); + frame->block->rpo_number_ = kBlockVisited1; + stack_depth--; + } + } + + // If no loops were encountered, then the order we computed was correct. + LoopInfo* loops = NULL; + if (num_loops != 0) { + // Otherwise, compute the loop information from the backedges in order + // to perform a traversal that groups loop bodies together. + loops = ComputeLoopInfo(zone, stack, num_loops, schedule->BasicBlockCount(), + &backedges); + + // Initialize the "loop stack". Note the entry could be a loop header. + LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end_] : NULL; + order = NULL; + + // Perform an iterative post-order traversal, visiting loop bodies before + // edges that lead out of loops. Visits each block once, but linking loop + // sections together is linear in the loop size, so overall is + // O(|B| + max(loop_depth) * max(|loop|)) + stack_depth = Push(stack, 0, entry, kBlockUnvisited2); + while (stack_depth > 0) { + SpecialRPOStackFrame* frame = stack + (stack_depth - 1); + BasicBlock* block = frame->block; + BasicBlock* succ = NULL; + + if (frame->index < block->SuccessorCount()) { + // Process the next normal successor. + succ = block->SuccessorAt(frame->index++); + } else if (block->IsLoopHeader()) { + // Process additional outgoing edges from the loop header. + if (block->rpo_number_ == kBlockOnStack) { + // Finish the loop body the first time the header is left on the + // stack. + DCHECK(loop != NULL && loop->header == block); + loop->start = order->Add(zone, block); + order = loop->end; + block->rpo_number_ = kBlockVisited2; + // Pop the loop stack and continue visiting outgoing edges within the + // the context of the outer loop, if any. + loop = loop->prev; + // We leave the loop header on the stack; the rest of this iteration + // and later iterations will go through its outgoing edges list. + } + + // Use the next outgoing edge if there are any. + int outgoing_index = frame->index - block->SuccessorCount(); + LoopInfo* info = &loops[block->loop_end_]; + DCHECK(loop != info); + if (info->outgoing != NULL && + outgoing_index < info->outgoing->length()) { + succ = info->outgoing->at(outgoing_index); + frame->index++; + } + } + + if (succ != NULL) { + // Process the next successor. + if (succ->rpo_number_ == kBlockOnStack) continue; + if (succ->rpo_number_ == kBlockVisited2) continue; + DCHECK(succ->rpo_number_ == kBlockUnvisited2); + if (loop != NULL && !loop->members->Contains(succ->id())) { + // The successor is not in the current loop or any nested loop. + // Add it to the outgoing edges of this loop and visit it later. + loop->AddOutgoing(zone, succ); + } else { + // Push the successor onto the stack. + stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2); + if (succ->IsLoopHeader()) { + // Push the inner loop onto the loop stack. + DCHECK(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops); + LoopInfo* next = &loops[succ->loop_end_]; + next->end = order; + next->prev = loop; + loop = next; + } + } + } else { + // Finished with all successors of the current block. + if (block->IsLoopHeader()) { + // If we are going to pop a loop header, then add its entire body. + LoopInfo* info = &loops[block->loop_end_]; + for (BlockList* l = info->start; true; l = l->next) { + if (l->next == info->end) { + l->next = order; + info->end = order; + break; + } + } + order = info->start; + } else { + // Pop a single node off the stack and add it to the order. + order = order->Add(zone, block); + block->rpo_number_ = kBlockVisited2; + } + stack_depth--; + } + } + } + + // Construct the final order from the list. + BasicBlockVector* final_order = &schedule->rpo_order_; + order->Serialize(final_order); + + // Compute the correct loop header for every block and set the correct loop + // ends. + LoopInfo* current_loop = NULL; + BasicBlock* current_header = NULL; + int loop_depth = 0; + for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end(); + ++i) { + BasicBlock* current = *i; + current->loop_header_ = current_header; + if (current->IsLoopHeader()) { + loop_depth++; + current_loop = &loops[current->loop_end_]; + BlockList* end = current_loop->end; + current->loop_end_ = end == NULL ? static_cast<int>(final_order->size()) + : end->block->rpo_number_; + current_header = current_loop->header; + if (FLAG_trace_turbo_scheduler) { + PrintF("Block %d is a loop header, increment loop depth to %d\n", + current->id(), loop_depth); + } + } else { + while (current_header != NULL && + current->rpo_number_ >= current_header->loop_end_) { + DCHECK(current_header->IsLoopHeader()); + DCHECK(current_loop != NULL); + current_loop = current_loop->prev; + current_header = current_loop == NULL ? NULL : current_loop->header; + --loop_depth; + } + } + current->loop_depth_ = loop_depth; + if (FLAG_trace_turbo_scheduler) { + if (current->loop_header_ == NULL) { + PrintF("Block %d's loop header is NULL, loop depth %d\n", current->id(), + current->loop_depth_); + } else { + PrintF("Block %d's loop header is block %d, loop depth %d\n", + current->id(), current->loop_header_->id(), + current->loop_depth_); + } + } + } + +#if DEBUG + if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); + VerifySpecialRPO(num_loops, loops, final_order); +#endif + return final_order; +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/scheduler.h nodejs-0.11.15/deps/v8/src/compiler/scheduler.h --- nodejs-0.11.13/deps/v8/src/compiler/scheduler.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/scheduler.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,84 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_SCHEDULER_H_ +#define V8_COMPILER_SCHEDULER_H_ + +#include <vector> + +#include "src/v8.h" + +#include "src/compiler/opcodes.h" +#include "src/compiler/schedule.h" +#include "src/zone-allocator.h" +#include "src/zone-containers.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Computes a schedule from a graph, placing nodes into basic blocks and +// ordering the basic blocks in the special RPO order. +class Scheduler { + public: + // Create a new schedule and place all computations from the graph in it. + static Schedule* ComputeSchedule(Graph* graph); + + // Compute the RPO of blocks in an existing schedule. + static BasicBlockVector* ComputeSpecialRPO(Schedule* schedule); + + private: + Graph* graph_; + Schedule* schedule_; + NodeVector branches_; + NodeVector calls_; + NodeVector deopts_; + NodeVector returns_; + NodeVector loops_and_merges_; + BasicBlockVector node_block_placement_; + IntVector unscheduled_uses_; + NodeVectorVector scheduled_nodes_; + NodeVector schedule_root_nodes_; + IntVector schedule_early_rpo_index_; + + Scheduler(Zone* zone, Graph* graph, Schedule* schedule); + + int GetRPONumber(BasicBlock* block) { + DCHECK(block->rpo_number_ >= 0 && + block->rpo_number_ < static_cast<int>(schedule_->rpo_order_.size())); + DCHECK(schedule_->rpo_order_[block->rpo_number_] == block); + return block->rpo_number_; + } + + void PrepareAuxiliaryNodeData(); + void PrepareAuxiliaryBlockData(); + + friend class CreateBlockVisitor; + void CreateBlocks(); + + void WireBlocks(); + + void AddPredecessorsForLoopsAndMerges(); + void AddSuccessorsForBranches(); + void AddSuccessorsForReturns(); + void AddSuccessorsForCalls(); + void AddSuccessorsForDeopts(); + + void GenerateImmediateDominatorTree(); + BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2); + + friend class ScheduleEarlyNodeVisitor; + void ScheduleEarly(); + + friend class PrepareUsesVisitor; + void PrepareUses(); + + friend class ScheduleLateNodeVisitor; + void ScheduleLate(); +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_SCHEDULER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/simplified-lowering.cc nodejs-0.11.15/deps/v8/src/compiler/simplified-lowering.cc --- nodejs-0.11.13/deps/v8/src/compiler/simplified-lowering.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/simplified-lowering.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1014 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/simplified-lowering.h" + +#include <deque> +#include <queue> + +#include "src/compiler/common-operator.h" +#include "src/compiler/graph-inl.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/representation-change.h" +#include "src/compiler/simplified-lowering.h" +#include "src/compiler/simplified-operator.h" +#include "src/objects.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Macro for outputting trace information from representation inference. +#define TRACE(x) \ + if (FLAG_trace_representation) PrintF x + +// Representation selection and lowering of {Simplified} operators to machine +// operators are interwined. We use a fixpoint calculation to compute both the +// output representation and the best possible lowering for {Simplified} nodes. +// Representation change insertion ensures that all values are in the correct +// machine representation after this phase, as dictated by the machine +// operators themselves. +enum Phase { + // 1.) PROPAGATE: Traverse the graph from the end, pushing usage information + // backwards from uses to definitions, around cycles in phis, according + // to local rules for each operator. + // During this phase, the usage information for a node determines the best + // possible lowering for each operator so far, and that in turn determines + // the output representation. + // Therefore, to be correct, this phase must iterate to a fixpoint before + // the next phase can begin. + PROPAGATE, + + // 2.) LOWER: perform lowering for all {Simplified} nodes by replacing some + // operators for some nodes, expanding some nodes to multiple nodes, or + // removing some (redundant) nodes. + // During this phase, use the {RepresentationChanger} to insert + // representation changes between uses that demand a particular + // representation and nodes that produce a different representation. + LOWER +}; + + +class RepresentationSelector { + public: + // Information for each node tracked during the fixpoint. + struct NodeInfo { + RepTypeUnion use : 14; // Union of all usages for the node. + bool queued : 1; // Bookkeeping for the traversal. + bool visited : 1; // Bookkeeping for the traversal. + RepTypeUnion output : 14; // Output type of the node. + }; + + RepresentationSelector(JSGraph* jsgraph, Zone* zone, + RepresentationChanger* changer) + : jsgraph_(jsgraph), + count_(jsgraph->graph()->NodeCount()), + info_(zone->NewArray<NodeInfo>(count_)), + nodes_(NodeVector::allocator_type(zone)), + replacements_(NodeVector::allocator_type(zone)), + contains_js_nodes_(false), + phase_(PROPAGATE), + changer_(changer), + queue_(std::deque<Node*, NodePtrZoneAllocator>( + NodePtrZoneAllocator(zone))) { + memset(info_, 0, sizeof(NodeInfo) * count_); + } + + void Run(SimplifiedLowering* lowering) { + // Run propagation phase to a fixpoint. + TRACE(("--{Propagation phase}--\n")); + phase_ = PROPAGATE; + Enqueue(jsgraph_->graph()->end()); + // Process nodes from the queue until it is empty. + while (!queue_.empty()) { + Node* node = queue_.front(); + NodeInfo* info = GetInfo(node); + queue_.pop(); + info->queued = false; + TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic())); + VisitNode(node, info->use, NULL); + TRACE((" ==> output ")); + PrintInfo(info->output); + TRACE(("\n")); + } + + // Run lowering and change insertion phase. + TRACE(("--{Simplified lowering phase}--\n")); + phase_ = LOWER; + // Process nodes from the collected {nodes_} vector. + for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) { + Node* node = *i; + TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic())); + // Reuse {VisitNode()} so the representation rules are in one place. + VisitNode(node, GetUseInfo(node), lowering); + } + + // Perform the final replacements. + for (NodeVector::iterator i = replacements_.begin(); + i != replacements_.end(); ++i) { + Node* node = *i; + Node* replacement = *(++i); + node->ReplaceUses(replacement); + } + } + + // Enqueue {node} if the {use} contains new information for that node. + // Add {node} to {nodes_} if this is the first time it's been visited. + void Enqueue(Node* node, RepTypeUnion use = 0) { + if (phase_ != PROPAGATE) return; + NodeInfo* info = GetInfo(node); + if (!info->visited) { + // First visit of this node. + info->visited = true; + info->queued = true; + nodes_.push_back(node); + queue_.push(node); + TRACE((" initial: ")); + info->use |= use; + PrintUseInfo(node); + return; + } + TRACE((" queue?: ")); + PrintUseInfo(node); + if ((info->use & use) != use) { + // New usage information for the node is available. + if (!info->queued) { + queue_.push(node); + info->queued = true; + TRACE((" added: ")); + } else { + TRACE((" inqueue: ")); + } + info->use |= use; + PrintUseInfo(node); + } + } + + bool lower() { return phase_ == LOWER; } + + void Enqueue(Node* node, RepType use) { + Enqueue(node, static_cast<RepTypeUnion>(use)); + } + + void SetOutput(Node* node, RepTypeUnion output) { + // Every node should have at most one output representation. Note that + // phis can have 0, if they have not been used in a representation-inducing + // instruction. + DCHECK((output & rMask) == 0 || IsPowerOf2(output & rMask)); + GetInfo(node)->output = output; + } + + bool BothInputsAre(Node* node, Type* type) { + DCHECK_EQ(2, node->InputCount()); + return NodeProperties::GetBounds(node->InputAt(0)).upper->Is(type) && + NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type); + } + + void ProcessInput(Node* node, int index, RepTypeUnion use) { + Node* input = node->InputAt(index); + if (phase_ == PROPAGATE) { + // In the propagate phase, propagate the usage information backward. + Enqueue(input, use); + } else { + // In the change phase, insert a change before the use if necessary. + if ((use & rMask) == 0) return; // No input requirement on the use. + RepTypeUnion output = GetInfo(input)->output; + if ((output & rMask & use) == 0) { + // Output representation doesn't match usage. + TRACE((" change: #%d:%s(@%d #%d:%s) ", node->id(), + node->op()->mnemonic(), index, input->id(), + input->op()->mnemonic())); + TRACE((" from ")); + PrintInfo(output); + TRACE((" to ")); + PrintInfo(use); + TRACE(("\n")); + Node* n = changer_->GetRepresentationFor(input, output, use); + node->ReplaceInput(index, n); + } + } + } + + static const RepTypeUnion kFloat64 = rFloat64 | tNumber; + static const RepTypeUnion kInt32 = rWord32 | tInt32; + static const RepTypeUnion kUint32 = rWord32 | tUint32; + static const RepTypeUnion kInt64 = rWord64 | tInt64; + static const RepTypeUnion kUint64 = rWord64 | tUint64; + static const RepTypeUnion kAnyTagged = rTagged | tAny; + + // The default, most general visitation case. For {node}, process all value, + // context, effect, and control inputs, assuming that value inputs should have + // {rTagged} representation and can observe all output values {tAny}. + void VisitInputs(Node* node) { + InputIter i = node->inputs().begin(); + for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0; + ++i, j--) { + ProcessInput(node, i.index(), kAnyTagged); // Value inputs + } + for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0; + ++i, j--) { + ProcessInput(node, i.index(), kAnyTagged); // Context inputs + } + for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0; + ++i, j--) { + Enqueue(*i); // Effect inputs: just visit + } + for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0; + ++i, j--) { + Enqueue(*i); // Control inputs: just visit + } + SetOutput(node, kAnyTagged); + } + + // Helper for binops of the I x I -> O variety. + void VisitBinop(Node* node, RepTypeUnion input_use, RepTypeUnion output) { + DCHECK_EQ(2, node->InputCount()); + ProcessInput(node, 0, input_use); + ProcessInput(node, 1, input_use); + SetOutput(node, output); + } + + // Helper for unops of the I -> O variety. + void VisitUnop(Node* node, RepTypeUnion input_use, RepTypeUnion output) { + DCHECK_EQ(1, node->InputCount()); + ProcessInput(node, 0, input_use); + SetOutput(node, output); + } + + // Helper for leaf nodes. + void VisitLeaf(Node* node, RepTypeUnion output) { + DCHECK_EQ(0, node->InputCount()); + SetOutput(node, output); + } + + // Helpers for specific types of binops. + void VisitFloat64Binop(Node* node) { VisitBinop(node, kFloat64, kFloat64); } + void VisitInt32Binop(Node* node) { VisitBinop(node, kInt32, kInt32); } + void VisitUint32Binop(Node* node) { VisitBinop(node, kUint32, kUint32); } + void VisitInt64Binop(Node* node) { VisitBinop(node, kInt64, kInt64); } + void VisitUint64Binop(Node* node) { VisitBinop(node, kUint64, kUint64); } + void VisitFloat64Cmp(Node* node) { VisitBinop(node, kFloat64, rBit); } + void VisitInt32Cmp(Node* node) { VisitBinop(node, kInt32, rBit); } + void VisitUint32Cmp(Node* node) { VisitBinop(node, kUint32, rBit); } + void VisitInt64Cmp(Node* node) { VisitBinop(node, kInt64, rBit); } + void VisitUint64Cmp(Node* node) { VisitBinop(node, kUint64, rBit); } + + // Helper for handling phis. + void VisitPhi(Node* node, RepTypeUnion use) { + // First, propagate the usage information to inputs of the phi. + int values = OperatorProperties::GetValueInputCount(node->op()); + Node::Inputs inputs = node->inputs(); + for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end(); + ++iter, --values) { + // Propagate {use} of the phi to value inputs, and 0 to control. + // TODO(titzer): it'd be nice to have distinguished edge kinds here. + ProcessInput(node, iter.index(), values > 0 ? use : 0); + } + // Phis adapt to whatever output representation their uses demand, + // pushing representation changes to their inputs. + RepTypeUnion use_rep = GetUseInfo(node) & rMask; + RepTypeUnion use_type = GetUseInfo(node) & tMask; + RepTypeUnion rep = 0; + if (use_rep & rTagged) { + rep = rTagged; // Tagged overrides everything. + } else if (use_rep & rFloat64) { + rep = rFloat64; + } else if (use_rep & rWord64) { + rep = rWord64; + } else if (use_rep & rWord32) { + rep = rWord32; + } else if (use_rep & rBit) { + rep = rBit; + } else { + // There was no representation associated with any of the uses. + // TODO(titzer): Select the best rep using phi's type, not the usage type? + if (use_type & tAny) { + rep = rTagged; + } else if (use_type & tNumber) { + rep = rFloat64; + } else if (use_type & tInt64 || use_type & tUint64) { + rep = rWord64; + } else if (use_type & tInt32 || use_type & tUint32) { + rep = rWord32; + } else if (use_type & tBool) { + rep = rBit; + } else { + UNREACHABLE(); // should have at least a usage type! + } + } + // Preserve the usage type, but set the representation. + Type* upper = NodeProperties::GetBounds(node).upper; + SetOutput(node, rep | changer_->TypeFromUpperBound(upper)); + } + + Operator* Int32Op(Node* node) { + return changer_->Int32OperatorFor(node->opcode()); + } + + Operator* Uint32Op(Node* node) { + return changer_->Uint32OperatorFor(node->opcode()); + } + + Operator* Float64Op(Node* node) { + return changer_->Float64OperatorFor(node->opcode()); + } + + // Dispatching routine for visiting the node {node} with the usage {use}. + // Depending on the operator, propagate new usage info to the inputs. + void VisitNode(Node* node, RepTypeUnion use, SimplifiedLowering* lowering) { + switch (node->opcode()) { + //------------------------------------------------------------------ + // Common operators. + //------------------------------------------------------------------ + case IrOpcode::kStart: + case IrOpcode::kDead: + return VisitLeaf(node, 0); + case IrOpcode::kParameter: { + // TODO(titzer): use representation from linkage. + Type* upper = NodeProperties::GetBounds(node).upper; + ProcessInput(node, 0, 0); + SetOutput(node, rTagged | changer_->TypeFromUpperBound(upper)); + return; + } + case IrOpcode::kInt32Constant: + return VisitLeaf(node, rWord32); + case IrOpcode::kInt64Constant: + return VisitLeaf(node, rWord64); + case IrOpcode::kFloat64Constant: + return VisitLeaf(node, rFloat64); + case IrOpcode::kExternalConstant: + return VisitLeaf(node, rPtr); + case IrOpcode::kNumberConstant: + return VisitLeaf(node, rTagged); + case IrOpcode::kHeapConstant: + return VisitLeaf(node, rTagged); + + case IrOpcode::kEnd: + case IrOpcode::kIfTrue: + case IrOpcode::kIfFalse: + case IrOpcode::kReturn: + case IrOpcode::kMerge: + case IrOpcode::kThrow: + return VisitInputs(node); // default visit for all node inputs. + + case IrOpcode::kBranch: + ProcessInput(node, 0, rBit); + Enqueue(NodeProperties::GetControlInput(node, 0)); + break; + case IrOpcode::kPhi: + return VisitPhi(node, use); + +//------------------------------------------------------------------ +// JavaScript operators. +//------------------------------------------------------------------ +// For now, we assume that all JS operators were too complex to lower +// to Simplified and that they will always require tagged value inputs +// and produce tagged value outputs. +// TODO(turbofan): it might be possible to lower some JSOperators here, +// but that responsibility really lies in the typed lowering phase. +#define DEFINE_JS_CASE(x) case IrOpcode::k##x: + JS_OP_LIST(DEFINE_JS_CASE) +#undef DEFINE_JS_CASE + contains_js_nodes_ = true; + VisitInputs(node); + return SetOutput(node, rTagged); + + //------------------------------------------------------------------ + // Simplified operators. + //------------------------------------------------------------------ + case IrOpcode::kBooleanNot: { + if (lower()) { + RepTypeUnion input = GetInfo(node->InputAt(0))->output; + if (input & rBit) { + // BooleanNot(x: rBit) => WordEqual(x, #0) + node->set_op(lowering->machine()->WordEqual()); + node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0)); + } else { + // BooleanNot(x: rTagged) => WordEqual(x, #false) + node->set_op(lowering->machine()->WordEqual()); + node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant()); + } + } else { + // No input representation requirement; adapt during lowering. + ProcessInput(node, 0, tBool); + SetOutput(node, rBit); + } + break; + } + case IrOpcode::kNumberEqual: + case IrOpcode::kNumberLessThan: + case IrOpcode::kNumberLessThanOrEqual: { + // Number comparisons reduce to integer comparisons for integer inputs. + if (BothInputsAre(node, Type::Signed32())) { + // => signed Int32Cmp + VisitInt32Cmp(node); + if (lower()) node->set_op(Int32Op(node)); + } else if (BothInputsAre(node, Type::Unsigned32())) { + // => unsigned Int32Cmp + VisitUint32Cmp(node); + if (lower()) node->set_op(Uint32Op(node)); + } else { + // => Float64Cmp + VisitFloat64Cmp(node); + if (lower()) node->set_op(Float64Op(node)); + } + break; + } + case IrOpcode::kNumberAdd: + case IrOpcode::kNumberSubtract: { + // Add and subtract reduce to Int32Add/Sub if the inputs + // are already integers and all uses are truncating. + if (BothInputsAre(node, Type::Signed32()) && + (use & (tUint32 | tNumber | tAny)) == 0) { + // => signed Int32Add/Sub + VisitInt32Binop(node); + if (lower()) node->set_op(Int32Op(node)); + } else if (BothInputsAre(node, Type::Unsigned32()) && + (use & (tInt32 | tNumber | tAny)) == 0) { + // => unsigned Int32Add/Sub + VisitUint32Binop(node); + if (lower()) node->set_op(Uint32Op(node)); + } else { + // => Float64Add/Sub + VisitFloat64Binop(node); + if (lower()) node->set_op(Float64Op(node)); + } + break; + } + case IrOpcode::kNumberMultiply: + case IrOpcode::kNumberDivide: + case IrOpcode::kNumberModulus: { + // Float64Mul/Div/Mod + VisitFloat64Binop(node); + if (lower()) node->set_op(Float64Op(node)); + break; + } + case IrOpcode::kNumberToInt32: { + RepTypeUnion use_rep = use & rMask; + if (lower()) { + RepTypeUnion in = GetInfo(node->InputAt(0))->output; + if ((in & tMask) == tInt32 || (in & rMask) == rWord32) { + // If the input has type int32, or is already a word32, just change + // representation if necessary. + VisitUnop(node, tInt32 | use_rep, tInt32 | use_rep); + DeferReplacement(node, node->InputAt(0)); + } else { + // Require the input in float64 format and perform truncation. + // TODO(turbofan): could also avoid the truncation with a tag check. + VisitUnop(node, tInt32 | rFloat64, tInt32 | rWord32); + // TODO(titzer): should be a truncation. + node->set_op(lowering->machine()->ChangeFloat64ToInt32()); + } + } else { + // Propagate a type to the input, but pass through representation. + VisitUnop(node, tInt32, tInt32 | use_rep); + } + break; + } + case IrOpcode::kNumberToUint32: { + RepTypeUnion use_rep = use & rMask; + if (lower()) { + RepTypeUnion in = GetInfo(node->InputAt(0))->output; + if ((in & tMask) == tUint32 || (in & rMask) == rWord32) { + // The input has type int32, just change representation. + VisitUnop(node, tUint32 | use_rep, tUint32 | use_rep); + DeferReplacement(node, node->InputAt(0)); + } else { + // Require the input in float64 format to perform truncation. + // TODO(turbofan): could also avoid the truncation with a tag check. + VisitUnop(node, tUint32 | rFloat64, tUint32 | rWord32); + // TODO(titzer): should be a truncation. + node->set_op(lowering->machine()->ChangeFloat64ToUint32()); + } + } else { + // Propagate a type to the input, but pass through representation. + VisitUnop(node, tUint32, tUint32 | use_rep); + } + break; + } + case IrOpcode::kReferenceEqual: { + VisitBinop(node, kAnyTagged, rBit); + if (lower()) node->set_op(lowering->machine()->WordEqual()); + break; + } + case IrOpcode::kStringEqual: { + VisitBinop(node, kAnyTagged, rBit); + // TODO(titzer): lower StringEqual to stub/runtime call. + break; + } + case IrOpcode::kStringLessThan: { + VisitBinop(node, kAnyTagged, rBit); + // TODO(titzer): lower StringLessThan to stub/runtime call. + break; + } + case IrOpcode::kStringLessThanOrEqual: { + VisitBinop(node, kAnyTagged, rBit); + // TODO(titzer): lower StringLessThanOrEqual to stub/runtime call. + break; + } + case IrOpcode::kStringAdd: { + VisitBinop(node, kAnyTagged, kAnyTagged); + // TODO(titzer): lower StringAdd to stub/runtime call. + break; + } + case IrOpcode::kLoadField: { + FieldAccess access = FieldAccessOf(node->op()); + ProcessInput(node, 0, changer_->TypeForBasePointer(access)); + SetOutput(node, changer_->TypeForField(access)); + if (lower()) lowering->DoLoadField(node); + break; + } + case IrOpcode::kStoreField: { + FieldAccess access = FieldAccessOf(node->op()); + ProcessInput(node, 0, changer_->TypeForBasePointer(access)); + ProcessInput(node, 1, changer_->TypeForField(access)); + SetOutput(node, 0); + if (lower()) lowering->DoStoreField(node); + break; + } + case IrOpcode::kLoadElement: { + ElementAccess access = ElementAccessOf(node->op()); + ProcessInput(node, 0, changer_->TypeForBasePointer(access)); + ProcessInput(node, 1, kInt32); // element index + SetOutput(node, changer_->TypeForElement(access)); + if (lower()) lowering->DoLoadElement(node); + break; + } + case IrOpcode::kStoreElement: { + ElementAccess access = ElementAccessOf(node->op()); + ProcessInput(node, 0, changer_->TypeForBasePointer(access)); + ProcessInput(node, 1, kInt32); // element index + ProcessInput(node, 2, changer_->TypeForElement(access)); + SetOutput(node, 0); + if (lower()) lowering->DoStoreElement(node); + break; + } + + //------------------------------------------------------------------ + // Machine-level operators. + //------------------------------------------------------------------ + case IrOpcode::kLoad: { + // TODO(titzer): machine loads/stores need to know BaseTaggedness!? + RepType tBase = rTagged; + MachineType rep = OpParameter<MachineType>(node); + ProcessInput(node, 0, tBase); // pointer or object + ProcessInput(node, 1, kInt32); // index + SetOutput(node, changer_->TypeForMachineType(rep)); + break; + } + case IrOpcode::kStore: { + // TODO(titzer): machine loads/stores need to know BaseTaggedness!? + RepType tBase = rTagged; + StoreRepresentation rep = OpParameter<StoreRepresentation>(node); + ProcessInput(node, 0, tBase); // pointer or object + ProcessInput(node, 1, kInt32); // index + ProcessInput(node, 2, changer_->TypeForMachineType(rep.rep)); + SetOutput(node, 0); + break; + } + case IrOpcode::kWord32Shr: + // We output unsigned int32 for shift right because JavaScript. + return VisitBinop(node, rWord32, rWord32 | tUint32); + case IrOpcode::kWord32And: + case IrOpcode::kWord32Or: + case IrOpcode::kWord32Xor: + case IrOpcode::kWord32Shl: + case IrOpcode::kWord32Sar: + // We use signed int32 as the output type for these word32 operations, + // though the machine bits are the same for either signed or unsigned, + // because JavaScript considers the result from these operations signed. + return VisitBinop(node, rWord32, rWord32 | tInt32); + case IrOpcode::kWord32Equal: + return VisitBinop(node, rWord32, rBit); + + case IrOpcode::kInt32Add: + case IrOpcode::kInt32Sub: + case IrOpcode::kInt32Mul: + case IrOpcode::kInt32Div: + case IrOpcode::kInt32Mod: + return VisitInt32Binop(node); + case IrOpcode::kInt32UDiv: + case IrOpcode::kInt32UMod: + return VisitUint32Binop(node); + case IrOpcode::kInt32LessThan: + case IrOpcode::kInt32LessThanOrEqual: + return VisitInt32Cmp(node); + + case IrOpcode::kUint32LessThan: + case IrOpcode::kUint32LessThanOrEqual: + return VisitUint32Cmp(node); + + case IrOpcode::kInt64Add: + case IrOpcode::kInt64Sub: + case IrOpcode::kInt64Mul: + case IrOpcode::kInt64Div: + case IrOpcode::kInt64Mod: + return VisitInt64Binop(node); + case IrOpcode::kInt64LessThan: + case IrOpcode::kInt64LessThanOrEqual: + return VisitInt64Cmp(node); + + case IrOpcode::kInt64UDiv: + case IrOpcode::kInt64UMod: + return VisitUint64Binop(node); + + case IrOpcode::kWord64And: + case IrOpcode::kWord64Or: + case IrOpcode::kWord64Xor: + case IrOpcode::kWord64Shl: + case IrOpcode::kWord64Shr: + case IrOpcode::kWord64Sar: + return VisitBinop(node, rWord64, rWord64); + case IrOpcode::kWord64Equal: + return VisitBinop(node, rWord64, rBit); + + case IrOpcode::kConvertInt32ToInt64: + return VisitUnop(node, tInt32 | rWord32, tInt32 | rWord64); + case IrOpcode::kConvertInt64ToInt32: + return VisitUnop(node, tInt64 | rWord64, tInt32 | rWord32); + + case IrOpcode::kChangeInt32ToFloat64: + return VisitUnop(node, tInt32 | rWord32, tInt32 | rFloat64); + case IrOpcode::kChangeUint32ToFloat64: + return VisitUnop(node, tUint32 | rWord32, tUint32 | rFloat64); + case IrOpcode::kChangeFloat64ToInt32: + return VisitUnop(node, tInt32 | rFloat64, tInt32 | rWord32); + case IrOpcode::kChangeFloat64ToUint32: + return VisitUnop(node, tUint32 | rFloat64, tUint32 | rWord32); + + case IrOpcode::kFloat64Add: + case IrOpcode::kFloat64Sub: + case IrOpcode::kFloat64Mul: + case IrOpcode::kFloat64Div: + case IrOpcode::kFloat64Mod: + return VisitFloat64Binop(node); + case IrOpcode::kFloat64Equal: + case IrOpcode::kFloat64LessThan: + case IrOpcode::kFloat64LessThanOrEqual: + return VisitFloat64Cmp(node); + default: + VisitInputs(node); + break; + } + } + + void DeferReplacement(Node* node, Node* replacement) { + if (replacement->id() < count_) { + // Replace with a previously existing node eagerly. + node->ReplaceUses(replacement); + } else { + // Otherwise, we are replacing a node with a representation change. + // Such a substitution must be done after all lowering is done, because + // new nodes do not have {NodeInfo} entries, and that would confuse + // the representation change insertion for uses of it. + replacements_.push_back(node); + replacements_.push_back(replacement); + } + // TODO(titzer) node->RemoveAllInputs(); // Node is now dead. + } + + void PrintUseInfo(Node* node) { + TRACE(("#%d:%-20s ", node->id(), node->op()->mnemonic())); + PrintInfo(GetUseInfo(node)); + TRACE(("\n")); + } + + void PrintInfo(RepTypeUnion info) { + if (FLAG_trace_representation) { + char buf[REP_TYPE_STRLEN]; + RenderRepTypeUnion(buf, info); + TRACE(("%s", buf)); + } + } + + private: + JSGraph* jsgraph_; + int count_; // number of nodes in the graph + NodeInfo* info_; // node id -> usage information + NodeVector nodes_; // collected nodes + NodeVector replacements_; // replacements to be done after lowering + bool contains_js_nodes_; // {true} if a JS operator was seen + Phase phase_; // current phase of algorithm + RepresentationChanger* changer_; // for inserting representation changes + + std::queue<Node*, std::deque<Node*, NodePtrZoneAllocator> > queue_; + + NodeInfo* GetInfo(Node* node) { + DCHECK(node->id() >= 0); + DCHECK(node->id() < count_); + return &info_[node->id()]; + } + + RepTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; } +}; + + +Node* SimplifiedLowering::IsTagged(Node* node) { + // TODO(titzer): factor this out to a TaggingScheme abstraction. + STATIC_ASSERT(kSmiTagMask == 1); // Only works if tag is the low bit. + return graph()->NewNode(machine()->WordAnd(), node, + jsgraph()->Int32Constant(kSmiTagMask)); +} + + +void SimplifiedLowering::LowerAllNodes() { + SimplifiedOperatorBuilder simplified(graph()->zone()); + RepresentationChanger changer(jsgraph(), &simplified, machine(), + graph()->zone()->isolate()); + RepresentationSelector selector(jsgraph(), zone(), &changer); + selector.Run(this); + + LoweringBuilder::LowerAllNodes(); +} + + +Node* SimplifiedLowering::Untag(Node* node) { + // TODO(titzer): factor this out to a TaggingScheme abstraction. + Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize); + return graph()->NewNode(machine()->WordSar(), node, shift_amount); +} + + +Node* SimplifiedLowering::SmiTag(Node* node) { + // TODO(titzer): factor this out to a TaggingScheme abstraction. + Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize); + return graph()->NewNode(machine()->WordShl(), node, shift_amount); +} + + +Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) { + return jsgraph()->Int32Constant(offset - kHeapObjectTag); +} + + +static void UpdateControlSuccessors(Node* before, Node* node) { + DCHECK(IrOpcode::IsControlOpcode(before->opcode())); + UseIter iter = before->uses().begin(); + while (iter != before->uses().end()) { + if (IrOpcode::IsControlOpcode((*iter)->opcode()) && + NodeProperties::IsControlEdge(iter.edge())) { + iter = iter.UpdateToAndIncrement(node); + continue; + } + ++iter; + } +} + + +void SimplifiedLowering::DoChangeTaggedToUI32(Node* node, Node* effect, + Node* control, bool is_signed) { + // if (IsTagged(val)) + // ConvertFloat64To(Int32|Uint32)(Load[kMachineFloat64](input, #value_offset)) + // else Untag(val) + Node* val = node->InputAt(0); + Node* branch = graph()->NewNode(common()->Branch(), IsTagged(val), control); + + // true branch. + Node* tbranch = graph()->NewNode(common()->IfTrue(), branch); + Node* loaded = graph()->NewNode( + machine()->Load(kMachineFloat64), val, + OffsetMinusTagConstant(HeapNumber::kValueOffset), effect); + Operator* op = is_signed ? machine()->ChangeFloat64ToInt32() + : machine()->ChangeFloat64ToUint32(); + Node* converted = graph()->NewNode(op, loaded); + + // false branch. + Node* fbranch = graph()->NewNode(common()->IfFalse(), branch); + Node* untagged = Untag(val); + + // merge. + Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch); + Node* phi = graph()->NewNode(common()->Phi(2), converted, untagged, merge); + UpdateControlSuccessors(control, merge); + branch->ReplaceInput(1, control); + node->ReplaceUses(phi); +} + + +void SimplifiedLowering::DoChangeTaggedToFloat64(Node* node, Node* effect, + Node* control) { + // if (IsTagged(input)) Load[kMachineFloat64](input, #value_offset) + // else ConvertFloat64(Untag(input)) + Node* val = node->InputAt(0); + Node* branch = graph()->NewNode(common()->Branch(), IsTagged(val), control); + + // true branch. + Node* tbranch = graph()->NewNode(common()->IfTrue(), branch); + Node* loaded = graph()->NewNode( + machine()->Load(kMachineFloat64), val, + OffsetMinusTagConstant(HeapNumber::kValueOffset), effect); + + // false branch. + Node* fbranch = graph()->NewNode(common()->IfFalse(), branch); + Node* untagged = Untag(val); + Node* converted = + graph()->NewNode(machine()->ChangeInt32ToFloat64(), untagged); + + // merge. + Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch); + Node* phi = graph()->NewNode(common()->Phi(2), loaded, converted, merge); + UpdateControlSuccessors(control, merge); + branch->ReplaceInput(1, control); + node->ReplaceUses(phi); +} + + +void SimplifiedLowering::DoChangeUI32ToTagged(Node* node, Node* effect, + Node* control, bool is_signed) { + Node* val = node->InputAt(0); + Node* is_smi = NULL; + if (is_signed) { + if (SmiValuesAre32Bits()) { + // All int32s fit in this case. + DCHECK(kPointerSize == 8); + return node->ReplaceUses(SmiTag(val)); + } else { + // TODO(turbofan): use an Int32AddWithOverflow to tag and check here. + Node* lt = graph()->NewNode(machine()->Int32LessThanOrEqual(), val, + jsgraph()->Int32Constant(Smi::kMaxValue)); + Node* gt = + graph()->NewNode(machine()->Int32LessThanOrEqual(), + jsgraph()->Int32Constant(Smi::kMinValue), val); + is_smi = graph()->NewNode(machine()->Word32And(), lt, gt); + } + } else { + // Check if Uint32 value is in the smi range. + is_smi = graph()->NewNode(machine()->Uint32LessThanOrEqual(), val, + jsgraph()->Int32Constant(Smi::kMaxValue)); + } + + // TODO(turbofan): fold smi test branch eagerly. + // if (IsSmi(input)) SmiTag(input); + // else InlineAllocAndInitHeapNumber(ConvertToFloat64(input))) + Node* branch = graph()->NewNode(common()->Branch(), is_smi, control); + + // true branch. + Node* tbranch = graph()->NewNode(common()->IfTrue(), branch); + Node* smi_tagged = SmiTag(val); + + // false branch. + Node* fbranch = graph()->NewNode(common()->IfFalse(), branch); + Node* heap_num = jsgraph()->Constant(0.0); // TODO(titzer): alloc and init + + // merge. + Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch); + Node* phi = graph()->NewNode(common()->Phi(2), smi_tagged, heap_num, merge); + UpdateControlSuccessors(control, merge); + branch->ReplaceInput(1, control); + node->ReplaceUses(phi); +} + + +void SimplifiedLowering::DoChangeFloat64ToTagged(Node* node, Node* effect, + Node* control) { + return; // TODO(titzer): need to call runtime to allocate in one branch +} + + +void SimplifiedLowering::DoChangeBoolToBit(Node* node, Node* effect, + Node* control) { + Node* cmp = graph()->NewNode(machine()->WordEqual(), node->InputAt(0), + jsgraph()->TrueConstant()); + node->ReplaceUses(cmp); +} + + +void SimplifiedLowering::DoChangeBitToBool(Node* node, Node* effect, + Node* control) { + Node* val = node->InputAt(0); + Node* branch = graph()->NewNode(common()->Branch(), val, control); + + // true branch. + Node* tbranch = graph()->NewNode(common()->IfTrue(), branch); + // false branch. + Node* fbranch = graph()->NewNode(common()->IfFalse(), branch); + // merge. + Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch); + Node* phi = graph()->NewNode(common()->Phi(2), jsgraph()->TrueConstant(), + jsgraph()->FalseConstant(), merge); + UpdateControlSuccessors(control, merge); + branch->ReplaceInput(1, control); + node->ReplaceUses(phi); +} + + +static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged, + MachineType representation, + Type* type) { + // TODO(turbofan): skip write barriers for Smis, etc. + if (base_is_tagged == kTaggedBase && representation == kMachineTagged) { + // Write barriers are only for writes into heap objects (i.e. tagged base). + return kFullWriteBarrier; + } + return kNoWriteBarrier; +} + + +void SimplifiedLowering::DoLoadField(Node* node) { + const FieldAccess& access = FieldAccessOf(node->op()); + node->set_op(machine_.Load(access.representation)); + Node* offset = jsgraph()->Int32Constant(access.offset - access.tag()); + node->InsertInput(zone(), 1, offset); +} + + +void SimplifiedLowering::DoStoreField(Node* node) { + const FieldAccess& access = FieldAccessOf(node->op()); + WriteBarrierKind kind = ComputeWriteBarrierKind( + access.base_is_tagged, access.representation, access.type); + node->set_op(machine_.Store(access.representation, kind)); + Node* offset = jsgraph()->Int32Constant(access.offset - access.tag()); + node->InsertInput(zone(), 1, offset); +} + + +Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access, + Node* index) { + int element_size = 0; + switch (access.representation) { + case kMachineTagged: + element_size = kPointerSize; + break; + case kMachineWord8: + element_size = 1; + break; + case kMachineWord16: + element_size = 2; + break; + case kMachineWord32: + element_size = 4; + break; + case kMachineWord64: + case kMachineFloat64: + element_size = 8; + break; + case kMachineLast: + UNREACHABLE(); + break; + } + if (element_size != 1) { + index = graph()->NewNode(machine()->Int32Mul(), + jsgraph()->Int32Constant(element_size), index); + } + int fixed_offset = access.header_size - access.tag(); + if (fixed_offset == 0) return index; + return graph()->NewNode(machine()->Int32Add(), index, + jsgraph()->Int32Constant(fixed_offset)); +} + + +void SimplifiedLowering::DoLoadElement(Node* node) { + const ElementAccess& access = ElementAccessOf(node->op()); + node->set_op(machine_.Load(access.representation)); + node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1))); +} + + +void SimplifiedLowering::DoStoreElement(Node* node) { + const ElementAccess& access = ElementAccessOf(node->op()); + WriteBarrierKind kind = ComputeWriteBarrierKind( + access.base_is_tagged, access.representation, access.type); + node->set_op(machine_.Store(access.representation, kind)); + node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1))); +} + + +void SimplifiedLowering::Lower(Node* node) {} + + +void SimplifiedLowering::LowerChange(Node* node, Node* effect, Node* control) { + switch (node->opcode()) { + case IrOpcode::kChangeTaggedToInt32: + DoChangeTaggedToUI32(node, effect, control, true); + break; + case IrOpcode::kChangeTaggedToUint32: + DoChangeTaggedToUI32(node, effect, control, false); + break; + case IrOpcode::kChangeTaggedToFloat64: + DoChangeTaggedToFloat64(node, effect, control); + break; + case IrOpcode::kChangeInt32ToTagged: + DoChangeUI32ToTagged(node, effect, control, true); + break; + case IrOpcode::kChangeUint32ToTagged: + DoChangeUI32ToTagged(node, effect, control, false); + break; + case IrOpcode::kChangeFloat64ToTagged: + DoChangeFloat64ToTagged(node, effect, control); + break; + case IrOpcode::kChangeBoolToBit: + DoChangeBoolToBit(node, effect, control); + break; + case IrOpcode::kChangeBitToBool: + DoChangeBitToBool(node, effect, control); + break; + default: + UNREACHABLE(); + break; + } +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/simplified-lowering.h nodejs-0.11.15/deps/v8/src/compiler/simplified-lowering.h --- nodejs-0.11.13/deps/v8/src/compiler/simplified-lowering.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/simplified-lowering.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,71 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_SIMPLIFIED_LOWERING_H_ +#define V8_COMPILER_SIMPLIFIED_LOWERING_H_ + +#include "src/compiler/graph-reducer.h" +#include "src/compiler/js-graph.h" +#include "src/compiler/lowering-builder.h" +#include "src/compiler/machine-operator.h" +#include "src/compiler/node.h" +#include "src/compiler/simplified-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class SimplifiedLowering : public LoweringBuilder { + public: + explicit SimplifiedLowering(JSGraph* jsgraph, + SourcePositionTable* source_positions) + : LoweringBuilder(jsgraph->graph(), source_positions), + jsgraph_(jsgraph), + machine_(jsgraph->zone()) {} + virtual ~SimplifiedLowering() {} + + void LowerAllNodes(); + + virtual void Lower(Node* node); + void LowerChange(Node* node, Node* effect, Node* control); + + // TODO(titzer): These are exposed for direct testing. Use a friend class. + void DoLoadField(Node* node); + void DoStoreField(Node* node); + void DoLoadElement(Node* node); + void DoStoreElement(Node* node); + + private: + JSGraph* jsgraph_; + MachineOperatorBuilder machine_; + + Node* SmiTag(Node* node); + Node* IsTagged(Node* node); + Node* Untag(Node* node); + Node* OffsetMinusTagConstant(int32_t offset); + Node* ComputeIndex(const ElementAccess& access, Node* index); + + void DoChangeTaggedToUI32(Node* node, Node* effect, Node* control, + bool is_signed); + void DoChangeUI32ToTagged(Node* node, Node* effect, Node* control, + bool is_signed); + void DoChangeTaggedToFloat64(Node* node, Node* effect, Node* control); + void DoChangeFloat64ToTagged(Node* node, Node* effect, Node* control); + void DoChangeBoolToBit(Node* node, Node* effect, Node* control); + void DoChangeBitToBool(Node* node, Node* effect, Node* control); + + friend class RepresentationSelector; + + Zone* zone() { return jsgraph_->zone(); } + JSGraph* jsgraph() { return jsgraph_; } + Graph* graph() { return jsgraph()->graph(); } + CommonOperatorBuilder* common() { return jsgraph()->common(); } + MachineOperatorBuilder* machine() { return &machine_; } +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_SIMPLIFIED_LOWERING_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/simplified-node-factory.h nodejs-0.11.15/deps/v8/src/compiler/simplified-node-factory.h --- nodejs-0.11.13/deps/v8/src/compiler/simplified-node-factory.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/simplified-node-factory.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,128 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_ +#define V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_ + +#include "src/compiler/node.h" +#include "src/compiler/simplified-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#define SIMPLIFIED() static_cast<NodeFactory*>(this)->simplified() +#define NEW_NODE_1(op, a) static_cast<NodeFactory*>(this)->NewNode(op, a) +#define NEW_NODE_2(op, a, b) static_cast<NodeFactory*>(this)->NewNode(op, a, b) +#define NEW_NODE_3(op, a, b, c) \ + static_cast<NodeFactory*>(this)->NewNode(op, a, b, c) + +template <typename NodeFactory> +class SimplifiedNodeFactory { + public: + Node* BooleanNot(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->BooleanNot(), a); + } + + Node* NumberEqual(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->NumberEqual(), a, b); + } + Node* NumberNotEqual(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->NumberNotEqual(), a, b); + } + Node* NumberLessThan(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->NumberLessThan(), a, b); + } + Node* NumberLessThanOrEqual(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->NumberLessThanOrEqual(), a, b); + } + Node* NumberAdd(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->NumberAdd(), a, b); + } + Node* NumberSubtract(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->NumberSubtract(), a, b); + } + Node* NumberMultiply(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->NumberMultiply(), a, b); + } + Node* NumberDivide(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->NumberDivide(), a, b); + } + Node* NumberModulus(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->NumberModulus(), a, b); + } + Node* NumberToInt32(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->NumberToInt32(), a); + } + Node* NumberToUint32(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->NumberToUint32(), a); + } + + Node* ReferenceEqual(Type* type, Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->ReferenceEqual(), a, b); + } + + Node* StringEqual(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->StringEqual(), a, b); + } + Node* StringLessThan(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->StringLessThan(), a, b); + } + Node* StringLessThanOrEqual(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->StringLessThanOrEqual(), a, b); + } + Node* StringAdd(Node* a, Node* b) { + return NEW_NODE_2(SIMPLIFIED()->StringAdd(), a, b); + } + + Node* ChangeTaggedToInt32(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToInt32(), a); + } + Node* ChangeTaggedToUint32(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToUint32(), a); + } + Node* ChangeTaggedToFloat64(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToFloat64(), a); + } + Node* ChangeInt32ToTagged(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->ChangeInt32ToTagged(), a); + } + Node* ChangeUint32ToTagged(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->ChangeUint32ToTagged(), a); + } + Node* ChangeFloat64ToTagged(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->ChangeFloat64ToTagged(), a); + } + Node* ChangeBoolToBit(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->ChangeBoolToBit(), a); + } + Node* ChangeBitToBool(Node* a) { + return NEW_NODE_1(SIMPLIFIED()->ChangeBitToBool(), a); + } + + Node* LoadField(const FieldAccess& access, Node* object) { + return NEW_NODE_1(SIMPLIFIED()->LoadField(access), object); + } + Node* StoreField(const FieldAccess& access, Node* object, Node* value) { + return NEW_NODE_2(SIMPLIFIED()->StoreField(access), object, value); + } + Node* LoadElement(const ElementAccess& access, Node* object, Node* index) { + return NEW_NODE_2(SIMPLIFIED()->LoadElement(access), object, index); + } + Node* StoreElement(const ElementAccess& access, Node* object, Node* index, + Node* value) { + return NEW_NODE_3(SIMPLIFIED()->StoreElement(access), object, index, value); + } +}; + +#undef NEW_NODE_1 +#undef NEW_NODE_2 +#undef NEW_NODE_3 +#undef SIMPLIFIED + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/simplified-operator.h nodejs-0.11.15/deps/v8/src/compiler/simplified-operator.h --- nodejs-0.11.13/deps/v8/src/compiler/simplified-operator.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/simplified-operator.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,189 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_H_ +#define V8_COMPILER_SIMPLIFIED_OPERATOR_H_ + +#include "src/compiler/machine-operator.h" +#include "src/compiler/opcodes.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { +namespace compiler { + +enum BaseTaggedness { kUntaggedBase, kTaggedBase }; + +// An access descriptor for loads/stores of fixed structures like field +// accesses of heap objects. Accesses from either tagged or untagged base +// pointers are supported; untagging is done automatically during lowering. +struct FieldAccess { + BaseTaggedness base_is_tagged; // specifies if the base pointer is tagged. + int offset; // offset of the field, without tag. + Handle<Name> name; // debugging only. + Type* type; // type of the field. + MachineType representation; // machine representation of field. + + int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; } +}; + + +// An access descriptor for loads/stores of indexed structures like characters +// in strings or off-heap backing stores. Accesses from either tagged or +// untagged base pointers are supported; untagging is done automatically during +// lowering. +struct ElementAccess { + BaseTaggedness base_is_tagged; // specifies if the base pointer is tagged. + int header_size; // size of the header, without tag. + Type* type; // type of the element. + MachineType representation; // machine representation of element. + + int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; } +}; + + +// If the accessed object is not a heap object, add this to the header_size. +static const int kNonHeapObjectHeaderSize = kHeapObjectTag; + + +// Specialization for static parameters of type {FieldAccess}. +template <> +struct StaticParameterTraits<const FieldAccess> { + static OStream& PrintTo(OStream& os, const FieldAccess& val) { // NOLINT + return os << val.offset; + } + static int HashCode(const FieldAccess& val) { + return (val.offset < 16) | (val.representation & 0xffff); + } + static bool Equals(const FieldAccess& a, const FieldAccess& b) { + return a.base_is_tagged == b.base_is_tagged && a.offset == b.offset && + a.representation == b.representation && a.type->Is(b.type); + } +}; + + +// Specialization for static parameters of type {ElementAccess}. +template <> +struct StaticParameterTraits<const ElementAccess> { + static OStream& PrintTo(OStream& os, const ElementAccess& val) { // NOLINT + return os << val.header_size; + } + static int HashCode(const ElementAccess& val) { + return (val.header_size < 16) | (val.representation & 0xffff); + } + static bool Equals(const ElementAccess& a, const ElementAccess& b) { + return a.base_is_tagged == b.base_is_tagged && + a.header_size == b.header_size && + a.representation == b.representation && a.type->Is(b.type); + } +}; + + +inline const FieldAccess FieldAccessOf(Operator* op) { + DCHECK(op->opcode() == IrOpcode::kLoadField || + op->opcode() == IrOpcode::kStoreField); + return static_cast<Operator1<FieldAccess>*>(op)->parameter(); +} + + +inline const ElementAccess ElementAccessOf(Operator* op) { + DCHECK(op->opcode() == IrOpcode::kLoadElement || + op->opcode() == IrOpcode::kStoreElement); + return static_cast<Operator1<ElementAccess>*>(op)->parameter(); +} + + +// Interface for building simplified operators, which represent the +// medium-level operations of V8, including adding numbers, allocating objects, +// indexing into objects and arrays, etc. +// All operators are typed but many are representation independent. + +// Number values from JS can be in one of these representations: +// - Tagged: word-sized integer that is either +// - a signed small integer (31 or 32 bits plus a tag) +// - a tagged pointer to a HeapNumber object that has a float64 field +// - Int32: an untagged signed 32-bit integer +// - Uint32: an untagged unsigned 32-bit integer +// - Float64: an untagged float64 + +// Additional representations for intermediate code or non-JS code: +// - Int64: an untagged signed 64-bit integer +// - Uint64: an untagged unsigned 64-bit integer +// - Float32: an untagged float32 + +// Boolean values can be: +// - Bool: a tagged pointer to either the canonical JS #false or +// the canonical JS #true object +// - Bit: an untagged integer 0 or 1, but word-sized +class SimplifiedOperatorBuilder { + public: + explicit inline SimplifiedOperatorBuilder(Zone* zone) : zone_(zone) {} + +#define SIMPLE(name, properties, inputs, outputs) \ + return new (zone_) \ + SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name); + +#define OP1(name, ptype, pname, properties, inputs, outputs) \ + return new (zone_) \ + Operator1<ptype>(IrOpcode::k##name, properties | Operator::kNoThrow, \ + inputs, outputs, #name, pname) + +#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1) +#define BINOP(name) SIMPLE(name, Operator::kPure, 2, 1) + + Operator* BooleanNot() const { UNOP(BooleanNot); } + + Operator* NumberEqual() const { BINOP(NumberEqual); } + Operator* NumberLessThan() const { BINOP(NumberLessThan); } + Operator* NumberLessThanOrEqual() const { BINOP(NumberLessThanOrEqual); } + Operator* NumberAdd() const { BINOP(NumberAdd); } + Operator* NumberSubtract() const { BINOP(NumberSubtract); } + Operator* NumberMultiply() const { BINOP(NumberMultiply); } + Operator* NumberDivide() const { BINOP(NumberDivide); } + Operator* NumberModulus() const { BINOP(NumberModulus); } + Operator* NumberToInt32() const { UNOP(NumberToInt32); } + Operator* NumberToUint32() const { UNOP(NumberToUint32); } + + Operator* ReferenceEqual(Type* type) const { BINOP(ReferenceEqual); } + + Operator* StringEqual() const { BINOP(StringEqual); } + Operator* StringLessThan() const { BINOP(StringLessThan); } + Operator* StringLessThanOrEqual() const { BINOP(StringLessThanOrEqual); } + Operator* StringAdd() const { BINOP(StringAdd); } + + Operator* ChangeTaggedToInt32() const { UNOP(ChangeTaggedToInt32); } + Operator* ChangeTaggedToUint32() const { UNOP(ChangeTaggedToUint32); } + Operator* ChangeTaggedToFloat64() const { UNOP(ChangeTaggedToFloat64); } + Operator* ChangeInt32ToTagged() const { UNOP(ChangeInt32ToTagged); } + Operator* ChangeUint32ToTagged() const { UNOP(ChangeUint32ToTagged); } + Operator* ChangeFloat64ToTagged() const { UNOP(ChangeFloat64ToTagged); } + Operator* ChangeBoolToBit() const { UNOP(ChangeBoolToBit); } + Operator* ChangeBitToBool() const { UNOP(ChangeBitToBool); } + + Operator* LoadField(const FieldAccess& access) const { + OP1(LoadField, FieldAccess, access, Operator::kNoWrite, 1, 1); + } + Operator* StoreField(const FieldAccess& access) const { + OP1(StoreField, FieldAccess, access, Operator::kNoRead, 2, 0); + } + Operator* LoadElement(const ElementAccess& access) const { + OP1(LoadElement, ElementAccess, access, Operator::kNoWrite, 2, 1); + } + Operator* StoreElement(const ElementAccess& access) const { + OP1(StoreElement, ElementAccess, access, Operator::kNoRead, 3, 0); + } + +#undef BINOP +#undef UNOP +#undef OP1 +#undef SIMPLE + + private: + Zone* zone_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_SIMPLIFIED_OPERATOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/source-position.cc nodejs-0.11.15/deps/v8/src/compiler/source-position.cc --- nodejs-0.11.13/deps/v8/src/compiler/source-position.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/source-position.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,55 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/source-position.h" +#include "src/compiler/graph.h" +#include "src/compiler/node-aux-data-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class SourcePositionTable::Decorator : public GraphDecorator { + public: + explicit Decorator(SourcePositionTable* source_positions) + : source_positions_(source_positions) {} + + virtual void Decorate(Node* node) { + DCHECK(!source_positions_->current_position_.IsInvalid()); + source_positions_->table_.Set(node, source_positions_->current_position_); + } + + private: + SourcePositionTable* source_positions_; +}; + + +SourcePositionTable::SourcePositionTable(Graph* graph) + : graph_(graph), + decorator_(NULL), + current_position_(SourcePosition::Invalid()), + table_(graph->zone()) {} + + +void SourcePositionTable::AddDecorator() { + DCHECK(decorator_ == NULL); + decorator_ = new (graph_->zone()) Decorator(this); + graph_->AddDecorator(decorator_); +} + + +void SourcePositionTable::RemoveDecorator() { + DCHECK(decorator_ != NULL); + graph_->RemoveDecorator(decorator_); + decorator_ = NULL; +} + + +SourcePosition SourcePositionTable::GetSourcePosition(Node* node) { + return table_.Get(node); +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/source-position.h nodejs-0.11.15/deps/v8/src/compiler/source-position.h --- nodejs-0.11.13/deps/v8/src/compiler/source-position.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/source-position.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,99 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_SOURCE_POSITION_H_ +#define V8_COMPILER_SOURCE_POSITION_H_ + +#include "src/assembler.h" +#include "src/compiler/node-aux-data.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Encapsulates encoding and decoding of sources positions from which Nodes +// originated. +class SourcePosition V8_FINAL { + public: + explicit SourcePosition(int raw = kUnknownPosition) : raw_(raw) {} + + static SourcePosition Unknown() { return SourcePosition(kUnknownPosition); } + bool IsUnknown() const { return raw() == kUnknownPosition; } + + static SourcePosition Invalid() { return SourcePosition(kInvalidPosition); } + bool IsInvalid() const { return raw() == kInvalidPosition; } + + int raw() const { return raw_; } + + private: + static const int kInvalidPosition = -2; + static const int kUnknownPosition = RelocInfo::kNoPosition; + STATIC_ASSERT(kInvalidPosition != kUnknownPosition); + int raw_; +}; + + +inline bool operator==(const SourcePosition& lhs, const SourcePosition& rhs) { + return lhs.raw() == rhs.raw(); +} + +inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) { + return !(lhs == rhs); +} + + +class SourcePositionTable V8_FINAL { + public: + class Scope { + public: + Scope(SourcePositionTable* source_positions, SourcePosition position) + : source_positions_(source_positions), + prev_position_(source_positions->current_position_) { + Init(position); + } + Scope(SourcePositionTable* source_positions, Node* node) + : source_positions_(source_positions), + prev_position_(source_positions->current_position_) { + Init(source_positions_->GetSourcePosition(node)); + } + ~Scope() { source_positions_->current_position_ = prev_position_; } + + private: + void Init(SourcePosition position) { + if (!position.IsUnknown() || prev_position_.IsInvalid()) { + source_positions_->current_position_ = position; + } + } + + SourcePositionTable* source_positions_; + SourcePosition prev_position_; + DISALLOW_COPY_AND_ASSIGN(Scope); + }; + + explicit SourcePositionTable(Graph* graph); + ~SourcePositionTable() { + if (decorator_ != NULL) RemoveDecorator(); + } + + void AddDecorator(); + void RemoveDecorator(); + + SourcePosition GetSourcePosition(Node* node); + + private: + class Decorator; + + Graph* graph_; + Decorator* decorator_; + SourcePosition current_position_; + NodeAuxData<SourcePosition> table_; + + DISALLOW_COPY_AND_ASSIGN(SourcePositionTable); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif diff -Nru nodejs-0.11.13/deps/v8/src/compiler/structured-machine-assembler.cc nodejs-0.11.15/deps/v8/src/compiler/structured-machine-assembler.cc --- nodejs-0.11.13/deps/v8/src/compiler/structured-machine-assembler.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/structured-machine-assembler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,664 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/pipeline.h" +#include "src/compiler/scheduler.h" +#include "src/compiler/structured-machine-assembler.h" + +namespace v8 { +namespace internal { +namespace compiler { + +Node* Variable::Get() const { return smasm_->GetVariable(offset_); } + + +void Variable::Set(Node* value) const { smasm_->SetVariable(offset_, value); } + + +StructuredMachineAssembler::StructuredMachineAssembler( + Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder, + MachineType word) + : GraphBuilder(graph), + schedule_(new (zone()) Schedule(zone())), + machine_(zone(), word), + common_(zone()), + call_descriptor_builder_(call_descriptor_builder), + parameters_(NULL), + current_environment_(new (zone()) + Environment(zone(), schedule()->entry(), false)), + number_of_variables_(0) { + Node* s = graph->NewNode(common_.Start(parameter_count())); + graph->SetStart(s); + if (parameter_count() == 0) return; + parameters_ = zone()->NewArray<Node*>(parameter_count()); + for (int i = 0; i < parameter_count(); ++i) { + parameters_[i] = NewNode(common()->Parameter(i), graph->start()); + } +} + + +Schedule* StructuredMachineAssembler::Export() { + // Compute the correct codegen order. + DCHECK(schedule_->rpo_order()->empty()); + Scheduler::ComputeSpecialRPO(schedule_); + // Invalidate MachineAssembler. + Schedule* schedule = schedule_; + schedule_ = NULL; + return schedule; +} + + +Node* StructuredMachineAssembler::Parameter(int index) { + DCHECK(0 <= index && index < parameter_count()); + return parameters_[index]; +} + + +Node* StructuredMachineAssembler::MakeNode(Operator* op, int input_count, + Node** inputs) { + DCHECK(ScheduleValid()); + DCHECK(current_environment_ != NULL); + Node* node = graph()->NewNode(op, input_count, inputs); + BasicBlock* block = NULL; + switch (op->opcode()) { + case IrOpcode::kParameter: + case IrOpcode::kInt32Constant: + case IrOpcode::kInt64Constant: + case IrOpcode::kFloat64Constant: + case IrOpcode::kExternalConstant: + case IrOpcode::kNumberConstant: + case IrOpcode::kHeapConstant: + // Parameters and constants must be in start. + block = schedule()->start(); + break; + default: + // Verify all leaf nodes handled above. + DCHECK((op->OutputCount() == 0) == (op->opcode() == IrOpcode::kStore)); + block = current_environment_->block_; + break; + } + if (block != NULL) { + schedule()->AddNode(block, node); + } + return node; +} + + +Variable StructuredMachineAssembler::NewVariable(Node* initial_value) { + CHECK(initial_value != NULL); + int offset = number_of_variables_++; + // Extend current environment to correct number of values. + NodeVector* variables = CurrentVars(); + size_t to_add = number_of_variables_ - variables->size(); + if (to_add != 0) { + variables->reserve(number_of_variables_); + variables->insert(variables->end(), to_add, NULL); + } + variables->at(offset) = initial_value; + return Variable(this, offset); +} + + +Node* StructuredMachineAssembler::GetVariable(int offset) { + DCHECK(ScheduleValid()); + return VariableAt(current_environment_, offset); +} + + +void StructuredMachineAssembler::SetVariable(int offset, Node* value) { + DCHECK(ScheduleValid()); + Node*& ref = VariableAt(current_environment_, offset); + ref = value; +} + + +Node*& StructuredMachineAssembler::VariableAt(Environment* environment, + int32_t offset) { + // Variable used out of scope. + CHECK(static_cast<size_t>(offset) < environment->variables_.size()); + Node*& value = environment->variables_.at(offset); + CHECK(value != NULL); // Variable used out of scope. + return value; +} + + +void StructuredMachineAssembler::Return(Node* value) { + BasicBlock* block = current_environment_->block_; + if (block != NULL) { + schedule()->AddReturn(block, value); + } + CopyCurrentAsDead(); +} + + +void StructuredMachineAssembler::CopyCurrentAsDead() { + DCHECK(current_environment_ != NULL); + bool is_dead = current_environment_->is_dead_; + current_environment_->is_dead_ = true; + Environment* next = Copy(current_environment_); + current_environment_->is_dead_ = is_dead; + current_environment_ = next; +} + + +StructuredMachineAssembler::Environment* StructuredMachineAssembler::Copy( + Environment* env, int truncate_at) { + Environment* new_env = new (zone()) Environment(zone(), NULL, env->is_dead_); + if (!new_env->is_dead_) { + new_env->block_ = schedule()->NewBasicBlock(); + } + new_env->variables_.reserve(truncate_at); + NodeVectorIter end = env->variables_.end(); + DCHECK(truncate_at <= static_cast<int>(env->variables_.size())); + end -= static_cast<int>(env->variables_.size()) - truncate_at; + new_env->variables_.insert(new_env->variables_.begin(), + env->variables_.begin(), end); + return new_env; +} + + +StructuredMachineAssembler::Environment* +StructuredMachineAssembler::CopyForLoopHeader(Environment* env) { + Environment* new_env = new (zone()) Environment(zone(), NULL, env->is_dead_); + if (!new_env->is_dead_) { + new_env->block_ = schedule()->NewBasicBlock(); + } + new_env->variables_.reserve(env->variables_.size()); + for (NodeVectorIter i = env->variables_.begin(); i != env->variables_.end(); + ++i) { + Node* phi = NULL; + if (*i != NULL) { + phi = graph()->NewNode(common()->Phi(1), *i); + if (new_env->block_ != NULL) { + schedule()->AddNode(new_env->block_, phi); + } + } + new_env->variables_.push_back(phi); + } + return new_env; +} + + +void StructuredMachineAssembler::MergeBackEdgesToLoopHeader( + Environment* header, EnvironmentVector* environments) { + // Only merge as many variables are were declared before this loop. + int n = static_cast<int>(header->variables_.size()); + // TODO(dcarney): invert loop order and extend phis once. + for (EnvironmentVector::iterator i = environments->begin(); + i != environments->end(); ++i) { + Environment* from = *i; + if (from->is_dead_) continue; + AddGoto(from, header); + for (int i = 0; i < n; ++i) { + Node* phi = header->variables_[i]; + if (phi == NULL) continue; + phi->set_op(common()->Phi(phi->InputCount() + 1)); + phi->AppendInput(zone(), VariableAt(from, i)); + } + } +} + + +void StructuredMachineAssembler::Merge(EnvironmentVector* environments, + int truncate_at) { + DCHECK(current_environment_ == NULL || current_environment_->is_dead_); + Environment* next = new (zone()) Environment(zone(), NULL, false); + current_environment_ = next; + size_t n_vars = number_of_variables_; + NodeVector& vars = next->variables_; + vars.reserve(n_vars); + Node** scratch = NULL; + size_t n_envs = environments->size(); + Environment** live_environments = reinterpret_cast<Environment**>( + alloca(sizeof(environments->at(0)) * n_envs)); + size_t n_live = 0; + for (size_t i = 0; i < n_envs; i++) { + if (environments->at(i)->is_dead_) continue; + live_environments[n_live++] = environments->at(i); + } + n_envs = n_live; + if (n_live == 0) next->is_dead_ = true; + if (!next->is_dead_) { + next->block_ = schedule()->NewBasicBlock(); + } + for (size_t j = 0; j < n_vars; ++j) { + Node* resolved = NULL; + // Find first non equal variable. + size_t i = 0; + for (; i < n_envs; i++) { + DCHECK(live_environments[i]->variables_.size() <= n_vars); + Node* val = NULL; + if (j < static_cast<size_t>(truncate_at)) { + val = live_environments[i]->variables_.at(j); + // TODO(dcarney): record start position at time of split. + // all variables after this should not be NULL. + if (val != NULL) { + val = VariableAt(live_environments[i], static_cast<int>(j)); + } + } + if (val == resolved) continue; + if (i != 0) break; + resolved = val; + } + // Have to generate a phi. + if (i < n_envs) { + // All values thus far uninitialized, variable used out of scope. + CHECK(resolved != NULL); + // Init scratch buffer. + if (scratch == NULL) { + scratch = static_cast<Node**>(alloca(n_envs * sizeof(resolved))); + } + for (size_t k = 0; k < i; k++) { + scratch[k] = resolved; + } + for (; i < n_envs; i++) { + scratch[i] = live_environments[i]->variables_[j]; + } + resolved = graph()->NewNode(common()->Phi(static_cast<int>(n_envs)), + static_cast<int>(n_envs), scratch); + if (next->block_ != NULL) { + schedule()->AddNode(next->block_, resolved); + } + } + vars.push_back(resolved); + } +} + + +void StructuredMachineAssembler::AddGoto(Environment* from, Environment* to) { + if (to->is_dead_) { + DCHECK(from->is_dead_); + return; + } + DCHECK(!from->is_dead_); + schedule()->AddGoto(from->block_, to->block_); +} + + +// TODO(dcarney): add pass before rpo to schedule to compute these. +BasicBlock* StructuredMachineAssembler::TrampolineFor(BasicBlock* block) { + BasicBlock* trampoline = schedule()->NewBasicBlock(); + schedule()->AddGoto(trampoline, block); + return trampoline; +} + + +void StructuredMachineAssembler::AddBranch(Environment* environment, + Node* condition, + Environment* true_val, + Environment* false_val) { + DCHECK(environment->is_dead_ == true_val->is_dead_); + DCHECK(environment->is_dead_ == false_val->is_dead_); + if (true_val->block_ == false_val->block_) { + if (environment->is_dead_) return; + AddGoto(environment, true_val); + return; + } + Node* branch = graph()->NewNode(common()->Branch(), condition); + if (environment->is_dead_) return; + BasicBlock* true_block = TrampolineFor(true_val->block_); + BasicBlock* false_block = TrampolineFor(false_val->block_); + schedule()->AddBranch(environment->block_, branch, true_block, false_block); +} + + +StructuredMachineAssembler::Environment::Environment(Zone* zone, + BasicBlock* block, + bool is_dead) + : block_(block), + variables_(NodeVector::allocator_type(zone)), + is_dead_(is_dead) {} + + +StructuredMachineAssembler::IfBuilder::IfBuilder( + StructuredMachineAssembler* smasm) + : smasm_(smasm), + if_clauses_(IfClauses::allocator_type(smasm_->zone())), + pending_exit_merges_(EnvironmentVector::allocator_type(smasm_->zone())) { + DCHECK(smasm_->current_environment_ != NULL); + PushNewIfClause(); + DCHECK(!IsDone()); +} + + +StructuredMachineAssembler::IfBuilder& +StructuredMachineAssembler::IfBuilder::If() { + DCHECK(smasm_->current_environment_ != NULL); + IfClause* clause = CurrentClause(); + if (clause->then_environment_ != NULL || clause->else_environment_ != NULL) { + PushNewIfClause(); + } + return *this; +} + + +StructuredMachineAssembler::IfBuilder& +StructuredMachineAssembler::IfBuilder::If(Node* condition) { + If(); + IfClause* clause = CurrentClause(); + // Store branch for future resolution. + UnresolvedBranch* next = new (smasm_->zone()) + UnresolvedBranch(smasm_->current_environment_, condition, NULL); + if (clause->unresolved_list_tail_ != NULL) { + clause->unresolved_list_tail_->next_ = next; + } + clause->unresolved_list_tail_ = next; + // Push onto merge queues. + clause->pending_else_merges_.push_back(next); + clause->pending_then_merges_.push_back(next); + smasm_->current_environment_ = NULL; + return *this; +} + + +void StructuredMachineAssembler::IfBuilder::And() { + CurrentClause()->ResolvePendingMerges(smasm_, kCombineThen, kExpressionTerm); +} + + +void StructuredMachineAssembler::IfBuilder::Or() { + CurrentClause()->ResolvePendingMerges(smasm_, kCombineElse, kExpressionTerm); +} + + +void StructuredMachineAssembler::IfBuilder::Then() { + CurrentClause()->ResolvePendingMerges(smasm_, kCombineThen, kExpressionDone); +} + + +void StructuredMachineAssembler::IfBuilder::Else() { + AddCurrentToPending(); + CurrentClause()->ResolvePendingMerges(smasm_, kCombineElse, kExpressionDone); +} + + +void StructuredMachineAssembler::IfBuilder::AddCurrentToPending() { + if (smasm_->current_environment_ != NULL && + !smasm_->current_environment_->is_dead_) { + pending_exit_merges_.push_back(smasm_->current_environment_); + } + smasm_->current_environment_ = NULL; +} + + +void StructuredMachineAssembler::IfBuilder::PushNewIfClause() { + int curr_size = + static_cast<int>(smasm_->current_environment_->variables_.size()); + IfClause* clause = new (smasm_->zone()) IfClause(smasm_->zone(), curr_size); + if_clauses_.push_back(clause); +} + + +StructuredMachineAssembler::IfBuilder::IfClause::IfClause( + Zone* zone, int initial_environment_size) + : unresolved_list_tail_(NULL), + initial_environment_size_(initial_environment_size), + expression_states_(ExpressionStates::allocator_type(zone)), + pending_then_merges_(PendingMergeStack::allocator_type(zone)), + pending_else_merges_(PendingMergeStack::allocator_type(zone)), + then_environment_(NULL), + else_environment_(NULL) { + PushNewExpressionState(); +} + + +StructuredMachineAssembler::IfBuilder::PendingMergeStackRange +StructuredMachineAssembler::IfBuilder::IfClause::ComputeRelevantMerges( + CombineType combine_type) { + DCHECK(!expression_states_.empty()); + PendingMergeStack* stack; + int start; + if (combine_type == kCombineThen) { + stack = &pending_then_merges_; + start = expression_states_.back().pending_then_size_; + } else { + DCHECK(combine_type == kCombineElse); + stack = &pending_else_merges_; + start = expression_states_.back().pending_else_size_; + } + PendingMergeStackRange data; + data.merge_stack_ = stack; + data.start_ = start; + data.size_ = static_cast<int>(stack->size()) - start; + return data; +} + + +void StructuredMachineAssembler::IfBuilder::IfClause::ResolvePendingMerges( + StructuredMachineAssembler* smasm, CombineType combine_type, + ResolutionType resolution_type) { + DCHECK(smasm->current_environment_ == NULL); + PendingMergeStackRange data = ComputeRelevantMerges(combine_type); + DCHECK_EQ(data.merge_stack_->back(), unresolved_list_tail_); + DCHECK(data.size_ > 0); + // TODO(dcarney): assert no new variables created during expression building. + int truncate_at = initial_environment_size_; + if (data.size_ == 1) { + // Just copy environment in common case. + smasm->current_environment_ = + smasm->Copy(unresolved_list_tail_->environment_, truncate_at); + } else { + EnvironmentVector environments( + EnvironmentVector::allocator_type(smasm->zone())); + environments.reserve(data.size_); + CopyEnvironments(data, &environments); + DCHECK(static_cast<int>(environments.size()) == data.size_); + smasm->Merge(&environments, truncate_at); + } + Environment* then_environment = then_environment_; + Environment* else_environment = NULL; + if (resolution_type == kExpressionDone) { + DCHECK(expression_states_.size() == 1); + // Set the current then_ or else_environment_ to the new merged environment. + if (combine_type == kCombineThen) { + DCHECK(then_environment_ == NULL && else_environment_ == NULL); + this->then_environment_ = smasm->current_environment_; + } else { + DCHECK(else_environment_ == NULL); + this->else_environment_ = smasm->current_environment_; + } + } else { + DCHECK(resolution_type == kExpressionTerm); + DCHECK(then_environment_ == NULL && else_environment_ == NULL); + } + if (combine_type == kCombineThen) { + then_environment = smasm->current_environment_; + } else { + DCHECK(combine_type == kCombineElse); + else_environment = smasm->current_environment_; + } + // Finalize branches and clear the pending stack. + FinalizeBranches(smasm, data, combine_type, then_environment, + else_environment); +} + + +void StructuredMachineAssembler::IfBuilder::IfClause::CopyEnvironments( + const PendingMergeStackRange& data, EnvironmentVector* environments) { + PendingMergeStack::iterator i = data.merge_stack_->begin(); + PendingMergeStack::iterator end = data.merge_stack_->end(); + for (i += data.start_; i != end; ++i) { + environments->push_back((*i)->environment_); + } +} + + +void StructuredMachineAssembler::IfBuilder::IfClause::PushNewExpressionState() { + ExpressionState next; + next.pending_then_size_ = static_cast<int>(pending_then_merges_.size()); + next.pending_else_size_ = static_cast<int>(pending_else_merges_.size()); + expression_states_.push_back(next); +} + + +void StructuredMachineAssembler::IfBuilder::IfClause::PopExpressionState() { + expression_states_.pop_back(); + DCHECK(!expression_states_.empty()); +} + + +void StructuredMachineAssembler::IfBuilder::IfClause::FinalizeBranches( + StructuredMachineAssembler* smasm, const PendingMergeStackRange& data, + CombineType combine_type, Environment* const then_environment, + Environment* const else_environment) { + DCHECK(unresolved_list_tail_ != NULL); + DCHECK(smasm->current_environment_ != NULL); + if (data.size_ == 0) return; + PendingMergeStack::iterator curr = data.merge_stack_->begin(); + PendingMergeStack::iterator end = data.merge_stack_->end(); + // Finalize everything but the head first, + // in the order the branches enter the merge block. + end -= 1; + Environment* true_val = then_environment; + Environment* false_val = else_environment; + Environment** next; + if (combine_type == kCombineThen) { + next = &false_val; + } else { + DCHECK(combine_type == kCombineElse); + next = &true_val; + } + for (curr += data.start_; curr != end; ++curr) { + UnresolvedBranch* branch = *curr; + *next = branch->next_->environment_; + smasm->AddBranch(branch->environment_, branch->condition_, true_val, + false_val); + } + DCHECK(curr + 1 == data.merge_stack_->end()); + // Now finalize the tail if possible. + if (then_environment != NULL && else_environment != NULL) { + UnresolvedBranch* branch = *curr; + smasm->AddBranch(branch->environment_, branch->condition_, then_environment, + else_environment); + } + // Clear the merge stack. + PendingMergeStack::iterator begin = data.merge_stack_->begin(); + begin += data.start_; + data.merge_stack_->erase(begin, data.merge_stack_->end()); + DCHECK_EQ(static_cast<int>(data.merge_stack_->size()), data.start_); +} + + +void StructuredMachineAssembler::IfBuilder::End() { + DCHECK(!IsDone()); + AddCurrentToPending(); + size_t current_pending = pending_exit_merges_.size(); + // All unresolved branch edges are now set to pending. + for (IfClauses::iterator i = if_clauses_.begin(); i != if_clauses_.end(); + ++i) { + IfClause* clause = *i; + DCHECK(clause->expression_states_.size() == 1); + PendingMergeStackRange data; + // Copy then environments. + data = clause->ComputeRelevantMerges(kCombineThen); + clause->CopyEnvironments(data, &pending_exit_merges_); + Environment* head = NULL; + // Will resolve the head node in the else_merge + if (data.size_ > 0 && clause->then_environment_ == NULL && + clause->else_environment_ == NULL) { + head = pending_exit_merges_.back(); + pending_exit_merges_.pop_back(); + } + // Copy else environments. + data = clause->ComputeRelevantMerges(kCombineElse); + clause->CopyEnvironments(data, &pending_exit_merges_); + if (head != NULL) { + // Must have data to merge, or else head will never get a branch. + DCHECK(data.size_ != 0); + pending_exit_merges_.push_back(head); + } + } + smasm_->Merge(&pending_exit_merges_, + if_clauses_[0]->initial_environment_size_); + // Anything initally pending jumps into the new environment. + for (size_t i = 0; i < current_pending; ++i) { + smasm_->AddGoto(pending_exit_merges_[i], smasm_->current_environment_); + } + // Resolve all branches. + for (IfClauses::iterator i = if_clauses_.begin(); i != if_clauses_.end(); + ++i) { + IfClause* clause = *i; + // Must finalize all environments, so ensure they are set correctly. + Environment* then_environment = clause->then_environment_; + if (then_environment == NULL) { + then_environment = smasm_->current_environment_; + } + Environment* else_environment = clause->else_environment_; + PendingMergeStackRange data; + // Finalize then environments. + data = clause->ComputeRelevantMerges(kCombineThen); + clause->FinalizeBranches(smasm_, data, kCombineThen, then_environment, + else_environment); + // Finalize else environments. + // Now set the else environment so head is finalized for edge case above. + if (else_environment == NULL) { + else_environment = smasm_->current_environment_; + } + data = clause->ComputeRelevantMerges(kCombineElse); + clause->FinalizeBranches(smasm_, data, kCombineElse, then_environment, + else_environment); + } + // Future accesses to this builder should crash immediately. + pending_exit_merges_.clear(); + if_clauses_.clear(); + DCHECK(IsDone()); +} + + +StructuredMachineAssembler::LoopBuilder::LoopBuilder( + StructuredMachineAssembler* smasm) + : smasm_(smasm), + header_environment_(NULL), + pending_header_merges_(EnvironmentVector::allocator_type(smasm_->zone())), + pending_exit_merges_(EnvironmentVector::allocator_type(smasm_->zone())) { + DCHECK(smasm_->current_environment_ != NULL); + // Create header environment. + header_environment_ = smasm_->CopyForLoopHeader(smasm_->current_environment_); + smasm_->AddGoto(smasm_->current_environment_, header_environment_); + // Create body environment. + Environment* body = smasm_->Copy(header_environment_); + smasm_->AddGoto(header_environment_, body); + smasm_->current_environment_ = body; + DCHECK(!IsDone()); +} + + +void StructuredMachineAssembler::LoopBuilder::Continue() { + DCHECK(!IsDone()); + pending_header_merges_.push_back(smasm_->current_environment_); + smasm_->CopyCurrentAsDead(); +} + + +void StructuredMachineAssembler::LoopBuilder::Break() { + DCHECK(!IsDone()); + pending_exit_merges_.push_back(smasm_->current_environment_); + smasm_->CopyCurrentAsDead(); +} + + +void StructuredMachineAssembler::LoopBuilder::End() { + DCHECK(!IsDone()); + if (smasm_->current_environment_ != NULL) { + Continue(); + } + // Do loop header merges. + smasm_->MergeBackEdgesToLoopHeader(header_environment_, + &pending_header_merges_); + int initial_size = static_cast<int>(header_environment_->variables_.size()); + // Do loop exit merges, truncating loop variables away. + smasm_->Merge(&pending_exit_merges_, initial_size); + for (EnvironmentVector::iterator i = pending_exit_merges_.begin(); + i != pending_exit_merges_.end(); ++i) { + smasm_->AddGoto(*i, smasm_->current_environment_); + } + pending_header_merges_.clear(); + pending_exit_merges_.clear(); + header_environment_ = NULL; + DCHECK(IsDone()); +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/structured-machine-assembler.h nodejs-0.11.15/deps/v8/src/compiler/structured-machine-assembler.h --- nodejs-0.11.13/deps/v8/src/compiler/structured-machine-assembler.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/structured-machine-assembler.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,311 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_ +#define V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_ + +#include "src/v8.h" + +#include "src/compiler/common-operator.h" +#include "src/compiler/graph-builder.h" +#include "src/compiler/machine-node-factory.h" +#include "src/compiler/machine-operator.h" +#include "src/compiler/node.h" +#include "src/compiler/operator.h" + + +namespace v8 { +namespace internal { +namespace compiler { + +class BasicBlock; +class Schedule; +class StructuredMachineAssembler; + + +class Variable : public ZoneObject { + public: + Node* Get() const; + void Set(Node* value) const; + + private: + Variable(StructuredMachineAssembler* smasm, int offset) + : smasm_(smasm), offset_(offset) {} + + friend class StructuredMachineAssembler; + friend class StructuredMachineAssemblerFriend; + StructuredMachineAssembler* const smasm_; + const int offset_; +}; + + +class StructuredMachineAssembler + : public GraphBuilder, + public MachineNodeFactory<StructuredMachineAssembler> { + public: + class Environment : public ZoneObject { + public: + Environment(Zone* zone, BasicBlock* block, bool is_dead_); + + private: + BasicBlock* block_; + NodeVector variables_; + bool is_dead_; + friend class StructuredMachineAssembler; + DISALLOW_COPY_AND_ASSIGN(Environment); + }; + + class IfBuilder; + friend class IfBuilder; + class LoopBuilder; + friend class LoopBuilder; + + StructuredMachineAssembler( + Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder, + MachineType word = MachineOperatorBuilder::pointer_rep()); + virtual ~StructuredMachineAssembler() {} + + Isolate* isolate() const { return zone()->isolate(); } + Zone* zone() const { return graph()->zone(); } + MachineOperatorBuilder* machine() { return &machine_; } + CommonOperatorBuilder* common() { return &common_; } + CallDescriptor* call_descriptor() const { + return call_descriptor_builder_->BuildCallDescriptor(zone()); + } + int parameter_count() const { + return call_descriptor_builder_->parameter_count(); + } + const MachineType* parameter_types() const { + return call_descriptor_builder_->parameter_types(); + } + + // Parameters. + Node* Parameter(int index); + // Variables. + Variable NewVariable(Node* initial_value); + // Control flow. + void Return(Node* value); + + // MachineAssembler is invalid after export. + Schedule* Export(); + + protected: + virtual Node* MakeNode(Operator* op, int input_count, Node** inputs); + + Schedule* schedule() { + DCHECK(ScheduleValid()); + return schedule_; + } + + private: + bool ScheduleValid() { return schedule_ != NULL; } + + typedef std::vector<Environment*, zone_allocator<Environment*> > + EnvironmentVector; + + NodeVector* CurrentVars() { return ¤t_environment_->variables_; } + Node*& VariableAt(Environment* environment, int offset); + Node* GetVariable(int offset); + void SetVariable(int offset, Node* value); + + void AddBranch(Environment* environment, Node* condition, + Environment* true_val, Environment* false_val); + void AddGoto(Environment* from, Environment* to); + BasicBlock* TrampolineFor(BasicBlock* block); + + void CopyCurrentAsDead(); + Environment* Copy(Environment* environment) { + return Copy(environment, static_cast<int>(environment->variables_.size())); + } + Environment* Copy(Environment* environment, int truncate_at); + void Merge(EnvironmentVector* environments, int truncate_at); + Environment* CopyForLoopHeader(Environment* environment); + void MergeBackEdgesToLoopHeader(Environment* header, + EnvironmentVector* environments); + + typedef std::vector<MachineType, zone_allocator<MachineType> > + RepresentationVector; + + Schedule* schedule_; + MachineOperatorBuilder machine_; + CommonOperatorBuilder common_; + MachineCallDescriptorBuilder* call_descriptor_builder_; + Node** parameters_; + Environment* current_environment_; + int number_of_variables_; + + friend class Variable; + // For testing only. + friend class StructuredMachineAssemblerFriend; + DISALLOW_COPY_AND_ASSIGN(StructuredMachineAssembler); +}; + +// IfBuilder constructs of nested if-else expressions which more or less follow +// C semantics. Foe example: +// +// if (x) {do_x} else if (y) {do_y} else {do_z} +// +// would look like this: +// +// IfBuilder b; +// b.If(x).Then(); +// do_x +// b.Else(); +// b.If().Then(); +// do_y +// b.Else(); +// do_z +// b.End(); +// +// Then() and Else() can be skipped, representing an empty block in C. +// Combinations like If(x).Then().If(x).Then() are legitimate, but +// Else().Else() is not. That is, once you've nested an If(), you can't get to a +// higher level If() branch. +// TODO(dcarney): describe expressions once the api is finalized. +class StructuredMachineAssembler::IfBuilder { + public: + explicit IfBuilder(StructuredMachineAssembler* smasm); + ~IfBuilder() { + if (!IsDone()) End(); + } + + IfBuilder& If(); // TODO(dcarney): this should take an expression. + IfBuilder& If(Node* condition); + void Then(); + void Else(); + void End(); + + // The next 4 functions are exposed for expression support. + // They will be private once I have a nice expression api. + void And(); + void Or(); + IfBuilder& OpenParen() { + DCHECK(smasm_->current_environment_ != NULL); + CurrentClause()->PushNewExpressionState(); + return *this; + } + IfBuilder& CloseParen() { + DCHECK(smasm_->current_environment_ == NULL); + CurrentClause()->PopExpressionState(); + return *this; + } + + private: + // UnresolvedBranch represents the chain of environments created while + // generating an expression. At this point, a branch Node + // cannot be created, as the target environments of the branch are not yet + // available, so everything required to create the branch Node is + // stored in this structure until the target environments are resolved. + struct UnresolvedBranch : public ZoneObject { + UnresolvedBranch(Environment* environment, Node* condition, + UnresolvedBranch* next) + : environment_(environment), condition_(condition), next_(next) {} + // environment_ will eventually be terminated by a branch on condition_. + Environment* environment_; + Node* condition_; + // next_ is the next link in the UnresolvedBranch chain, and will be + // either the true or false branch jumped to from environment_. + UnresolvedBranch* next_; + }; + + struct ExpressionState { + int pending_then_size_; + int pending_else_size_; + }; + + typedef std::vector<ExpressionState, zone_allocator<ExpressionState> > + ExpressionStates; + typedef std::vector<UnresolvedBranch*, zone_allocator<UnresolvedBranch*> > + PendingMergeStack; + struct IfClause; + typedef std::vector<IfClause*, zone_allocator<IfClause*> > IfClauses; + + struct PendingMergeStackRange { + PendingMergeStack* merge_stack_; + int start_; + int size_; + }; + + enum CombineType { kCombineThen, kCombineElse }; + enum ResolutionType { kExpressionTerm, kExpressionDone }; + + // IfClause represents one level of if-then-else nesting plus the associated + // expression. + // A call to If() triggers creation of a new nesting level after expression + // creation is complete - ie Then() or Else() has been called. + struct IfClause : public ZoneObject { + IfClause(Zone* zone, int initial_environment_size); + void CopyEnvironments(const PendingMergeStackRange& data, + EnvironmentVector* environments); + void ResolvePendingMerges(StructuredMachineAssembler* smasm, + CombineType combine_type, + ResolutionType resolution_type); + PendingMergeStackRange ComputeRelevantMerges(CombineType combine_type); + void FinalizeBranches(StructuredMachineAssembler* smasm, + const PendingMergeStackRange& offset_data, + CombineType combine_type, + Environment* then_environment, + Environment* else_environment); + void PushNewExpressionState(); + void PopExpressionState(); + + // Each invocation of And or Or creates a new UnresolvedBranch. + // These form a singly-linked list, of which we only need to keep track of + // the tail. On creation of an UnresolvedBranch, pending_then_merges_ and + // pending_else_merges_ each push a copy, which are removed on merges to the + // respective environment. + UnresolvedBranch* unresolved_list_tail_; + int initial_environment_size_; + // expression_states_ keeps track of the state of pending_*_merges_, + // pushing and popping the lengths of these on + // OpenParend() and CloseParend() respectively. + ExpressionStates expression_states_; + PendingMergeStack pending_then_merges_; + PendingMergeStack pending_else_merges_; + // then_environment_ is created iff there is a call to Then(), otherwise + // branches which would merge to it merge to the exit environment instead. + // Likewise for else_environment_. + Environment* then_environment_; + Environment* else_environment_; + }; + + IfClause* CurrentClause() { return if_clauses_.back(); } + void AddCurrentToPending(); + void PushNewIfClause(); + bool IsDone() { return if_clauses_.empty(); } + + StructuredMachineAssembler* smasm_; + IfClauses if_clauses_; + EnvironmentVector pending_exit_merges_; + DISALLOW_COPY_AND_ASSIGN(IfBuilder); +}; + + +class StructuredMachineAssembler::LoopBuilder { + public: + explicit LoopBuilder(StructuredMachineAssembler* smasm); + ~LoopBuilder() { + if (!IsDone()) End(); + } + + void Break(); + void Continue(); + void End(); + + private: + friend class StructuredMachineAssembler; + bool IsDone() { return header_environment_ == NULL; } + + StructuredMachineAssembler* smasm_; + Environment* header_environment_; + EnvironmentVector pending_header_merges_; + EnvironmentVector pending_exit_merges_; + DISALLOW_COPY_AND_ASSIGN(LoopBuilder); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/typer.cc nodejs-0.11.15/deps/v8/src/compiler/typer.cc --- nodejs-0.11.13/deps/v8/src/compiler/typer.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/typer.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,842 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/graph-inl.h" +#include "src/compiler/js-operator.h" +#include "src/compiler/node.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/simplified-operator.h" +#include "src/compiler/typer.h" + +namespace v8 { +namespace internal { +namespace compiler { + +Typer::Typer(Zone* zone) : zone_(zone) { + Type* number = Type::Number(zone); + Type* signed32 = Type::Signed32(zone); + Type* unsigned32 = Type::Unsigned32(zone); + Type* integral32 = Type::Integral32(zone); + Type* object = Type::Object(zone); + Type* undefined = Type::Undefined(zone); + number_fun0_ = Type::Function(number, zone); + number_fun1_ = Type::Function(number, number, zone); + number_fun2_ = Type::Function(number, number, number, zone); + imul_fun_ = Type::Function(signed32, integral32, integral32, zone); + +#define NATIVE_TYPE(sem, rep) \ + Type::Intersect(Type::sem(zone), Type::rep(zone), zone) + // TODO(rossberg): Use range types for more precision, once we have them. + Type* int8 = NATIVE_TYPE(SignedSmall, UntaggedInt8); + Type* int16 = NATIVE_TYPE(SignedSmall, UntaggedInt16); + Type* int32 = NATIVE_TYPE(Signed32, UntaggedInt32); + Type* uint8 = NATIVE_TYPE(UnsignedSmall, UntaggedInt8); + Type* uint16 = NATIVE_TYPE(UnsignedSmall, UntaggedInt16); + Type* uint32 = NATIVE_TYPE(Unsigned32, UntaggedInt32); + Type* float32 = NATIVE_TYPE(Number, UntaggedFloat32); + Type* float64 = NATIVE_TYPE(Number, UntaggedFloat64); +#undef NATIVE_TYPE + Type* buffer = Type::Buffer(zone); + Type* int8_array = Type::Array(int8, zone); + Type* int16_array = Type::Array(int16, zone); + Type* int32_array = Type::Array(int32, zone); + Type* uint8_array = Type::Array(uint8, zone); + Type* uint16_array = Type::Array(uint16, zone); + Type* uint32_array = Type::Array(uint32, zone); + Type* float32_array = Type::Array(float32, zone); + Type* float64_array = Type::Array(float64, zone); + Type* arg1 = Type::Union(unsigned32, object, zone); + Type* arg2 = Type::Union(unsigned32, undefined, zone); + Type* arg3 = arg2; + array_buffer_fun_ = Type::Function(buffer, unsigned32, zone); + int8_array_fun_ = Type::Function(int8_array, arg1, arg2, arg3, zone); + int16_array_fun_ = Type::Function(int16_array, arg1, arg2, arg3, zone); + int32_array_fun_ = Type::Function(int32_array, arg1, arg2, arg3, zone); + uint8_array_fun_ = Type::Function(uint8_array, arg1, arg2, arg3, zone); + uint16_array_fun_ = Type::Function(uint16_array, arg1, arg2, arg3, zone); + uint32_array_fun_ = Type::Function(uint32_array, arg1, arg2, arg3, zone); + float32_array_fun_ = Type::Function(float32_array, arg1, arg2, arg3, zone); + float64_array_fun_ = Type::Function(float64_array, arg1, arg2, arg3, zone); +} + + +class Typer::Visitor : public NullNodeVisitor { + public: + Visitor(Typer* typer, MaybeHandle<Context> context) + : typer_(typer), context_(context) {} + + Bounds TypeNode(Node* node) { + switch (node->opcode()) { +#define DECLARE_CASE(x) case IrOpcode::k##x: return Type##x(node); + VALUE_OP_LIST(DECLARE_CASE) +#undef DECLARE_CASE + +#define DECLARE_CASE(x) case IrOpcode::k##x: + CONTROL_OP_LIST(DECLARE_CASE) +#undef DECLARE_CASE + break; + } + return Bounds(Type::None(zone())); + } + + Type* TypeConstant(Handle<Object> value); + + protected: +#define DECLARE_METHOD(x) inline Bounds Type##x(Node* node); + VALUE_OP_LIST(DECLARE_METHOD) +#undef DECLARE_METHOD + + Bounds OperandType(Node* node, int i) { + return NodeProperties::GetBounds(NodeProperties::GetValueInput(node, i)); + } + + Type* ContextType(Node* node) { + Bounds result = + NodeProperties::GetBounds(NodeProperties::GetContextInput(node)); + DCHECK(result.upper->Is(Type::Internal())); + DCHECK(result.lower->Equals(result.upper)); + return result.upper; + } + + Zone* zone() { return typer_->zone(); } + Isolate* isolate() { return typer_->isolate(); } + MaybeHandle<Context> context() { return context_; } + + private: + Typer* typer_; + MaybeHandle<Context> context_; +}; + + +class Typer::RunVisitor : public Typer::Visitor { + public: + RunVisitor(Typer* typer, MaybeHandle<Context> context) + : Visitor(typer, context), + phis(NodeSet::key_compare(), NodeSet::allocator_type(typer->zone())) {} + + GenericGraphVisit::Control Pre(Node* node) { + return NodeProperties::IsControl(node) + && node->opcode() != IrOpcode::kEnd + && node->opcode() != IrOpcode::kMerge + && node->opcode() != IrOpcode::kReturn + ? GenericGraphVisit::SKIP : GenericGraphVisit::CONTINUE; + } + + GenericGraphVisit::Control Post(Node* node) { + Bounds bounds = TypeNode(node); + if (node->opcode() == IrOpcode::kPhi) { + // Remember phis for least fixpoint iteration. + phis.insert(node); + } else { + NodeProperties::SetBounds(node, bounds); + } + return GenericGraphVisit::CONTINUE; + } + + NodeSet phis; +}; + + +class Typer::NarrowVisitor : public Typer::Visitor { + public: + NarrowVisitor(Typer* typer, MaybeHandle<Context> context) + : Visitor(typer, context) {} + + GenericGraphVisit::Control Pre(Node* node) { + Bounds previous = NodeProperties::GetBounds(node); + Bounds bounds = TypeNode(node); + NodeProperties::SetBounds(node, Bounds::Both(bounds, previous, zone())); + DCHECK(bounds.Narrows(previous)); + // Stop when nothing changed (but allow reentry in case it does later). + return previous.Narrows(bounds) + ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER; + } + + GenericGraphVisit::Control Post(Node* node) { + return GenericGraphVisit::REENTER; + } +}; + + +class Typer::WidenVisitor : public Typer::Visitor { + public: + WidenVisitor(Typer* typer, MaybeHandle<Context> context) + : Visitor(typer, context) {} + + GenericGraphVisit::Control Pre(Node* node) { + Bounds previous = NodeProperties::GetBounds(node); + Bounds bounds = TypeNode(node); + DCHECK(previous.lower->Is(bounds.lower)); + DCHECK(previous.upper->Is(bounds.upper)); + NodeProperties::SetBounds(node, bounds); // TODO(rossberg): Either? + // Stop when nothing changed (but allow reentry in case it does later). + return bounds.Narrows(previous) + ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER; + } + + GenericGraphVisit::Control Post(Node* node) { + return GenericGraphVisit::REENTER; + } +}; + + +void Typer::Run(Graph* graph, MaybeHandle<Context> context) { + RunVisitor typing(this, context); + graph->VisitNodeInputsFromEnd(&typing); + // Find least fixpoint. + for (NodeSetIter i = typing.phis.begin(); i != typing.phis.end(); ++i) { + Widen(graph, *i, context); + } +} + + +void Typer::Narrow(Graph* graph, Node* start, MaybeHandle<Context> context) { + NarrowVisitor typing(this, context); + graph->VisitNodeUsesFrom(start, &typing); +} + + +void Typer::Widen(Graph* graph, Node* start, MaybeHandle<Context> context) { + WidenVisitor typing(this, context); + graph->VisitNodeUsesFrom(start, &typing); +} + + +void Typer::Init(Node* node) { + Visitor typing(this, MaybeHandle<Context>()); + Bounds bounds = typing.TypeNode(node); + NodeProperties::SetBounds(node, bounds); +} + + +// Common operators. +Bounds Typer::Visitor::TypeParameter(Node* node) { + return Bounds::Unbounded(zone()); +} + + +Bounds Typer::Visitor::TypeInt32Constant(Node* node) { + // TODO(titzer): only call Type::Of() if the type is not already known. + return Bounds(Type::Of(ValueOf<int32_t>(node->op()), zone())); +} + + +Bounds Typer::Visitor::TypeInt64Constant(Node* node) { + // TODO(titzer): only call Type::Of() if the type is not already known. + return Bounds( + Type::Of(static_cast<double>(ValueOf<int64_t>(node->op())), zone())); +} + + +Bounds Typer::Visitor::TypeFloat64Constant(Node* node) { + // TODO(titzer): only call Type::Of() if the type is not already known. + return Bounds(Type::Of(ValueOf<double>(node->op()), zone())); +} + + +Bounds Typer::Visitor::TypeNumberConstant(Node* node) { + // TODO(titzer): only call Type::Of() if the type is not already known. + return Bounds(Type::Of(ValueOf<double>(node->op()), zone())); +} + + +Bounds Typer::Visitor::TypeHeapConstant(Node* node) { + return Bounds(TypeConstant(ValueOf<Handle<Object> >(node->op()))); +} + + +Bounds Typer::Visitor::TypeExternalConstant(Node* node) { + return Bounds(Type::Internal(zone())); +} + + +Bounds Typer::Visitor::TypePhi(Node* node) { + int arity = OperatorProperties::GetValueInputCount(node->op()); + Bounds bounds = OperandType(node, 0); + for (int i = 1; i < arity; ++i) { + bounds = Bounds::Either(bounds, OperandType(node, i), zone()); + } + return bounds; +} + + +Bounds Typer::Visitor::TypeEffectPhi(Node* node) { + return Bounds(Type::None(zone())); +} + + +Bounds Typer::Visitor::TypeFrameState(Node* node) { + return Bounds(Type::None(zone())); +} + + +Bounds Typer::Visitor::TypeStateValues(Node* node) { + return Bounds(Type::None(zone())); +} + + +Bounds Typer::Visitor::TypeCall(Node* node) { + return Bounds::Unbounded(zone()); +} + + +Bounds Typer::Visitor::TypeProjection(Node* node) { + // TODO(titzer): use the output type of the input to determine the bounds. + return Bounds::Unbounded(zone()); +} + + +// JS comparison operators. + +#define DEFINE_METHOD(x) \ + Bounds Typer::Visitor::Type##x(Node* node) { \ + return Bounds(Type::Boolean(zone())); \ + } +JS_COMPARE_BINOP_LIST(DEFINE_METHOD) +#undef DEFINE_METHOD + + +// JS bitwise operators. + +Bounds Typer::Visitor::TypeJSBitwiseOr(Node* node) { + Bounds left = OperandType(node, 0); + Bounds right = OperandType(node, 1); + Type* upper = Type::Union(left.upper, right.upper, zone()); + if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone()); + Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone()); + return Bounds(lower, upper); +} + + +Bounds Typer::Visitor::TypeJSBitwiseAnd(Node* node) { + Bounds left = OperandType(node, 0); + Bounds right = OperandType(node, 1); + Type* upper = Type::Union(left.upper, right.upper, zone()); + if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone()); + Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone()); + return Bounds(lower, upper); +} + + +Bounds Typer::Visitor::TypeJSBitwiseXor(Node* node) { + return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone())); +} + + +Bounds Typer::Visitor::TypeJSShiftLeft(Node* node) { + return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone())); +} + + +Bounds Typer::Visitor::TypeJSShiftRight(Node* node) { + return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone())); +} + + +Bounds Typer::Visitor::TypeJSShiftRightLogical(Node* node) { + return Bounds(Type::UnsignedSmall(zone()), Type::Unsigned32(zone())); +} + + +// JS arithmetic operators. + +Bounds Typer::Visitor::TypeJSAdd(Node* node) { + Bounds left = OperandType(node, 0); + Bounds right = OperandType(node, 1); + Type* lower = + left.lower->Is(Type::None()) || right.lower->Is(Type::None()) ? + Type::None(zone()) : + left.lower->Is(Type::Number()) && right.lower->Is(Type::Number()) ? + Type::SignedSmall(zone()) : + left.lower->Is(Type::String()) || right.lower->Is(Type::String()) ? + Type::String(zone()) : Type::None(zone()); + Type* upper = + left.upper->Is(Type::None()) && right.upper->Is(Type::None()) ? + Type::None(zone()) : + left.upper->Is(Type::Number()) && right.upper->Is(Type::Number()) ? + Type::Number(zone()) : + left.upper->Is(Type::String()) || right.upper->Is(Type::String()) ? + Type::String(zone()) : Type::NumberOrString(zone()); + return Bounds(lower, upper); +} + + +Bounds Typer::Visitor::TypeJSSubtract(Node* node) { + return Bounds(Type::SignedSmall(zone()), Type::Number(zone())); +} + + +Bounds Typer::Visitor::TypeJSMultiply(Node* node) { + return Bounds(Type::SignedSmall(zone()), Type::Number(zone())); +} + + +Bounds Typer::Visitor::TypeJSDivide(Node* node) { + return Bounds(Type::SignedSmall(zone()), Type::Number(zone())); +} + + +Bounds Typer::Visitor::TypeJSModulus(Node* node) { + return Bounds(Type::SignedSmall(zone()), Type::Number(zone())); +} + + +// JS unary operators. + +Bounds Typer::Visitor::TypeJSUnaryNot(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeJSTypeOf(Node* node) { + return Bounds(Type::InternalizedString(zone())); +} + + +// JS conversion operators. + +Bounds Typer::Visitor::TypeJSToBoolean(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeJSToNumber(Node* node) { + return Bounds(Type::SignedSmall(zone()), Type::Number(zone())); +} + + +Bounds Typer::Visitor::TypeJSToString(Node* node) { + return Bounds(Type::None(zone()), Type::String(zone())); +} + + +Bounds Typer::Visitor::TypeJSToName(Node* node) { + return Bounds(Type::None(zone()), Type::Name(zone())); +} + + +Bounds Typer::Visitor::TypeJSToObject(Node* node) { + return Bounds(Type::None(zone()), Type::Object(zone())); +} + + +// JS object operators. + +Bounds Typer::Visitor::TypeJSCreate(Node* node) { + return Bounds(Type::None(zone()), Type::Object(zone())); +} + + +Bounds Typer::Visitor::TypeJSLoadProperty(Node* node) { + Bounds object = OperandType(node, 0); + Bounds name = OperandType(node, 1); + Bounds result = Bounds::Unbounded(zone()); + // TODO(rossberg): Use range types and sized array types to filter undefined. + if (object.lower->IsArray() && name.lower->Is(Type::Integral32())) { + result.lower = Type::Union( + object.lower->AsArray()->Element(), Type::Undefined(zone()), zone()); + } + if (object.upper->IsArray() && name.upper->Is(Type::Integral32())) { + result.upper = Type::Union( + object.upper->AsArray()->Element(), Type::Undefined(zone()), zone()); + } + return result; +} + + +Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) { + return Bounds::Unbounded(zone()); +} + + +Bounds Typer::Visitor::TypeJSStoreProperty(Node* node) { + return Bounds(Type::None(zone())); +} + + +Bounds Typer::Visitor::TypeJSStoreNamed(Node* node) { + return Bounds(Type::None(zone())); +} + + +Bounds Typer::Visitor::TypeJSDeleteProperty(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeJSHasProperty(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeJSInstanceOf(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +// JS context operators. + +Bounds Typer::Visitor::TypeJSLoadContext(Node* node) { + Bounds outer = OperandType(node, 0); + DCHECK(outer.upper->Is(Type::Internal())); + DCHECK(outer.lower->Equals(outer.upper)); + ContextAccess access = OpParameter<ContextAccess>(node); + Type* context_type = outer.upper; + MaybeHandle<Context> context; + if (context_type->IsConstant()) { + context = Handle<Context>::cast(context_type->AsConstant()->Value()); + } + // Walk context chain (as far as known), mirroring dynamic lookup. + // Since contexts are mutable, the information is only useful as a lower + // bound. + // TODO(rossberg): Could use scope info to fix upper bounds for constant + // bindings if we know that this code is never shared. + for (int i = access.depth(); i > 0; --i) { + if (context_type->IsContext()) { + context_type = context_type->AsContext()->Outer(); + if (context_type->IsConstant()) { + context = Handle<Context>::cast(context_type->AsConstant()->Value()); + } + } else { + context = handle(context.ToHandleChecked()->previous(), isolate()); + } + } + if (context.is_null()) { + return Bounds::Unbounded(zone()); + } else { + Handle<Object> value = + handle(context.ToHandleChecked()->get(access.index()), isolate()); + Type* lower = TypeConstant(value); + return Bounds(lower, Type::Any(zone())); + } +} + + +Bounds Typer::Visitor::TypeJSStoreContext(Node* node) { + return Bounds(Type::None(zone())); +} + + +Bounds Typer::Visitor::TypeJSCreateFunctionContext(Node* node) { + Type* outer = ContextType(node); + return Bounds(Type::Context(outer, zone())); +} + + +Bounds Typer::Visitor::TypeJSCreateCatchContext(Node* node) { + Type* outer = ContextType(node); + return Bounds(Type::Context(outer, zone())); +} + + +Bounds Typer::Visitor::TypeJSCreateWithContext(Node* node) { + Type* outer = ContextType(node); + return Bounds(Type::Context(outer, zone())); +} + + +Bounds Typer::Visitor::TypeJSCreateBlockContext(Node* node) { + Type* outer = ContextType(node); + return Bounds(Type::Context(outer, zone())); +} + + +Bounds Typer::Visitor::TypeJSCreateModuleContext(Node* node) { + // TODO(rossberg): this is probably incorrect + Type* outer = ContextType(node); + return Bounds(Type::Context(outer, zone())); +} + + +Bounds Typer::Visitor::TypeJSCreateGlobalContext(Node* node) { + Type* outer = ContextType(node); + return Bounds(Type::Context(outer, zone())); +} + + +// JS other operators. + +Bounds Typer::Visitor::TypeJSYield(Node* node) { + return Bounds::Unbounded(zone()); +} + + +Bounds Typer::Visitor::TypeJSCallConstruct(Node* node) { + return Bounds(Type::None(zone()), Type::Receiver(zone())); +} + + +Bounds Typer::Visitor::TypeJSCallFunction(Node* node) { + Bounds fun = OperandType(node, 0); + Type* lower = fun.lower->IsFunction() + ? fun.lower->AsFunction()->Result() : Type::None(zone()); + Type* upper = fun.upper->IsFunction() + ? fun.upper->AsFunction()->Result() : Type::Any(zone()); + return Bounds(lower, upper); +} + + +Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) { + return Bounds::Unbounded(zone()); +} + + +Bounds Typer::Visitor::TypeJSDebugger(Node* node) { + return Bounds::Unbounded(zone()); +} + + +// Simplified operators. + +Bounds Typer::Visitor::TypeBooleanNot(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeNumberEqual(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeNumberLessThan(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeNumberAdd(Node* node) { + return Bounds(Type::Number(zone())); +} + + +Bounds Typer::Visitor::TypeNumberSubtract(Node* node) { + return Bounds(Type::Number(zone())); +} + + +Bounds Typer::Visitor::TypeNumberMultiply(Node* node) { + return Bounds(Type::Number(zone())); +} + + +Bounds Typer::Visitor::TypeNumberDivide(Node* node) { + return Bounds(Type::Number(zone())); +} + + +Bounds Typer::Visitor::TypeNumberModulus(Node* node) { + return Bounds(Type::Number(zone())); +} + + +Bounds Typer::Visitor::TypeNumberToInt32(Node* node) { + Bounds arg = OperandType(node, 0); + Type* s32 = Type::Signed32(zone()); + Type* lower = arg.lower->Is(s32) ? arg.lower : s32; + Type* upper = arg.upper->Is(s32) ? arg.upper : s32; + return Bounds(lower, upper); +} + + +Bounds Typer::Visitor::TypeNumberToUint32(Node* node) { + Bounds arg = OperandType(node, 0); + Type* u32 = Type::Unsigned32(zone()); + Type* lower = arg.lower->Is(u32) ? arg.lower : u32; + Type* upper = arg.upper->Is(u32) ? arg.upper : u32; + return Bounds(lower, upper); +} + + +Bounds Typer::Visitor::TypeReferenceEqual(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeStringEqual(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeStringLessThan(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeStringLessThanOrEqual(Node* node) { + return Bounds(Type::Boolean(zone())); +} + + +Bounds Typer::Visitor::TypeStringAdd(Node* node) { + return Bounds(Type::String(zone())); +} + + +Bounds Typer::Visitor::TypeChangeTaggedToInt32(Node* node) { + // TODO(titzer): type is type of input, representation is Word32. + return Bounds(Type::Integral32()); +} + + +Bounds Typer::Visitor::TypeChangeTaggedToUint32(Node* node) { + return Bounds(Type::Integral32()); // TODO(titzer): add appropriate rep +} + + +Bounds Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) { + // TODO(titzer): type is type of input, representation is Float64. + return Bounds(Type::Number()); +} + + +Bounds Typer::Visitor::TypeChangeInt32ToTagged(Node* node) { + // TODO(titzer): type is type of input, representation is Tagged. + return Bounds(Type::Integral32()); +} + + +Bounds Typer::Visitor::TypeChangeUint32ToTagged(Node* node) { + // TODO(titzer): type is type of input, representation is Tagged. + return Bounds(Type::Unsigned32()); +} + + +Bounds Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) { + // TODO(titzer): type is type of input, representation is Tagged. + return Bounds(Type::Number()); +} + + +Bounds Typer::Visitor::TypeChangeBoolToBit(Node* node) { + // TODO(titzer): type is type of input, representation is Bit. + return Bounds(Type::Boolean()); +} + + +Bounds Typer::Visitor::TypeChangeBitToBool(Node* node) { + // TODO(titzer): type is type of input, representation is Tagged. + return Bounds(Type::Boolean()); +} + + +Bounds Typer::Visitor::TypeLoadField(Node* node) { + return Bounds(FieldAccessOf(node->op()).type); +} + + +Bounds Typer::Visitor::TypeLoadElement(Node* node) { + return Bounds(ElementAccessOf(node->op()).type); +} + + +Bounds Typer::Visitor::TypeStoreField(Node* node) { + return Bounds(Type::None()); +} + + +Bounds Typer::Visitor::TypeStoreElement(Node* node) { + return Bounds(Type::None()); +} + + +// Machine operators. + +// TODO(rossberg): implement +#define DEFINE_METHOD(x) \ + Bounds Typer::Visitor::Type##x(Node* node) { return Bounds(Type::None()); } +MACHINE_OP_LIST(DEFINE_METHOD) +#undef DEFINE_METHOD + + +// Heap constants. + +Type* Typer::Visitor::TypeConstant(Handle<Object> value) { + if (value->IsJSFunction() && JSFunction::cast(*value)->IsBuiltin() && + !context().is_null()) { + Handle<Context> native = + handle(context().ToHandleChecked()->native_context(), isolate()); + if (*value == native->math_abs_fun()) { + return typer_->number_fun1_; // TODO(rossberg): can't express overloading + } else if (*value == native->math_acos_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_asin_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_atan_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_atan2_fun()) { + return typer_->number_fun2_; + } else if (*value == native->math_ceil_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_cos_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_exp_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_floor_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_imul_fun()) { + return typer_->imul_fun_; + } else if (*value == native->math_log_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_pow_fun()) { + return typer_->number_fun2_; + } else if (*value == native->math_random_fun()) { + return typer_->number_fun0_; + } else if (*value == native->math_round_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_sin_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_sqrt_fun()) { + return typer_->number_fun1_; + } else if (*value == native->math_tan_fun()) { + return typer_->number_fun1_; + } else if (*value == native->array_buffer_fun()) { + return typer_->array_buffer_fun_; + } else if (*value == native->int8_array_fun()) { + return typer_->int8_array_fun_; + } else if (*value == native->int16_array_fun()) { + return typer_->int16_array_fun_; + } else if (*value == native->int32_array_fun()) { + return typer_->int32_array_fun_; + } else if (*value == native->uint8_array_fun()) { + return typer_->uint8_array_fun_; + } else if (*value == native->uint16_array_fun()) { + return typer_->uint16_array_fun_; + } else if (*value == native->uint32_array_fun()) { + return typer_->uint32_array_fun_; + } else if (*value == native->float32_array_fun()) { + return typer_->float32_array_fun_; + } else if (*value == native->float64_array_fun()) { + return typer_->float64_array_fun_; + } + } + return Type::Constant(value, zone()); +} + + +namespace { + +class TyperDecorator : public GraphDecorator { + public: + explicit TyperDecorator(Typer* typer) : typer_(typer) {} + virtual void Decorate(Node* node) { typer_->Init(node); } + + private: + Typer* typer_; +}; + +} + + +void Typer::DecorateGraph(Graph* graph) { + graph->AddDecorator(new (zone()) TyperDecorator(this)); +} + +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/typer.h nodejs-0.11.15/deps/v8/src/compiler/typer.h --- nodejs-0.11.13/deps/v8/src/compiler/typer.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/typer.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,57 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_TYPER_H_ +#define V8_COMPILER_TYPER_H_ + +#include "src/v8.h" + +#include "src/compiler/graph.h" +#include "src/compiler/opcodes.h" +#include "src/types.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class Typer { + public: + explicit Typer(Zone* zone); + + void Init(Node* node); + void Run(Graph* graph, MaybeHandle<Context> context); + void Narrow(Graph* graph, Node* node, MaybeHandle<Context> context); + void Widen(Graph* graph, Node* node, MaybeHandle<Context> context); + + void DecorateGraph(Graph* graph); + + Zone* zone() { return zone_; } + Isolate* isolate() { return zone_->isolate(); } + + private: + class Visitor; + class RunVisitor; + class NarrowVisitor; + class WidenVisitor; + + Zone* zone_; + Type* number_fun0_; + Type* number_fun1_; + Type* number_fun2_; + Type* imul_fun_; + Type* array_buffer_fun_; + Type* int8_array_fun_; + Type* int16_array_fun_; + Type* int32_array_fun_; + Type* uint8_array_fun_; + Type* uint16_array_fun_; + Type* uint32_array_fun_; + Type* float32_array_fun_; + Type* float64_array_fun_; +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_TYPER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/verifier.cc nodejs-0.11.15/deps/v8/src/compiler/verifier.cc --- nodejs-0.11.13/deps/v8/src/compiler/verifier.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/verifier.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,245 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/verifier.h" + +#include "src/compiler/generic-algorithm.h" +#include "src/compiler/generic-node-inl.h" +#include "src/compiler/generic-node.h" +#include "src/compiler/graph-inl.h" +#include "src/compiler/graph.h" +#include "src/compiler/node.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/opcodes.h" +#include "src/compiler/operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + + +static bool IsDefUseChainLinkPresent(Node* def, Node* use) { + Node::Uses uses = def->uses(); + for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) { + if (*it == use) return true; + } + return false; +} + + +static bool IsUseDefChainLinkPresent(Node* def, Node* use) { + Node::Inputs inputs = use->inputs(); + for (Node::Inputs::iterator it = inputs.begin(); it != inputs.end(); ++it) { + if (*it == def) return true; + } + return false; +} + + +class Verifier::Visitor : public NullNodeVisitor { + public: + explicit Visitor(Zone* zone) + : reached_from_start(NodeSet::key_compare(), + NodeSet::allocator_type(zone)), + reached_from_end(NodeSet::key_compare(), + NodeSet::allocator_type(zone)) {} + + // Fulfills the PreNodeCallback interface. + GenericGraphVisit::Control Pre(Node* node); + + bool from_start; + NodeSet reached_from_start; + NodeSet reached_from_end; +}; + + +GenericGraphVisit::Control Verifier::Visitor::Pre(Node* node) { + int value_count = OperatorProperties::GetValueInputCount(node->op()); + int context_count = OperatorProperties::GetContextInputCount(node->op()); + int effect_count = OperatorProperties::GetEffectInputCount(node->op()); + int control_count = OperatorProperties::GetControlInputCount(node->op()); + + // Verify number of inputs matches up. + int input_count = value_count + context_count + effect_count + control_count; + CHECK_EQ(input_count, node->InputCount()); + + // Verify all value inputs actually produce a value. + for (int i = 0; i < value_count; ++i) { + Node* value = NodeProperties::GetValueInput(node, i); + CHECK(OperatorProperties::HasValueOutput(value->op())); + CHECK(IsDefUseChainLinkPresent(value, node)); + CHECK(IsUseDefChainLinkPresent(value, node)); + } + + // Verify all context inputs are value nodes. + for (int i = 0; i < context_count; ++i) { + Node* context = NodeProperties::GetContextInput(node); + CHECK(OperatorProperties::HasValueOutput(context->op())); + CHECK(IsDefUseChainLinkPresent(context, node)); + CHECK(IsUseDefChainLinkPresent(context, node)); + } + + // Verify all effect inputs actually have an effect. + for (int i = 0; i < effect_count; ++i) { + Node* effect = NodeProperties::GetEffectInput(node); + CHECK(OperatorProperties::HasEffectOutput(effect->op())); + CHECK(IsDefUseChainLinkPresent(effect, node)); + CHECK(IsUseDefChainLinkPresent(effect, node)); + } + + // Verify all control inputs are control nodes. + for (int i = 0; i < control_count; ++i) { + Node* control = NodeProperties::GetControlInput(node, i); + CHECK(OperatorProperties::HasControlOutput(control->op())); + CHECK(IsDefUseChainLinkPresent(control, node)); + CHECK(IsUseDefChainLinkPresent(control, node)); + } + + // Verify all successors are projections if multiple value outputs exist. + if (OperatorProperties::GetValueOutputCount(node->op()) > 1) { + Node::Uses uses = node->uses(); + for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) { + CHECK(!NodeProperties::IsValueEdge(it.edge()) || + (*it)->opcode() == IrOpcode::kProjection || + (*it)->opcode() == IrOpcode::kParameter); + } + } + + switch (node->opcode()) { + case IrOpcode::kStart: + // Start has no inputs. + CHECK_EQ(0, input_count); + break; + case IrOpcode::kEnd: + // End has no outputs. + CHECK(!OperatorProperties::HasValueOutput(node->op())); + CHECK(!OperatorProperties::HasEffectOutput(node->op())); + CHECK(!OperatorProperties::HasControlOutput(node->op())); + break; + case IrOpcode::kDead: + // Dead is never connected to the graph. + UNREACHABLE(); + case IrOpcode::kBranch: { + // Branch uses are IfTrue and IfFalse. + Node::Uses uses = node->uses(); + bool got_true = false, got_false = false; + for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) { + CHECK(((*it)->opcode() == IrOpcode::kIfTrue && !got_true) || + ((*it)->opcode() == IrOpcode::kIfFalse && !got_false)); + if ((*it)->opcode() == IrOpcode::kIfTrue) got_true = true; + if ((*it)->opcode() == IrOpcode::kIfFalse) got_false = true; + } + // TODO(rossberg): Currently fails for various tests. + // CHECK(got_true && got_false); + break; + } + case IrOpcode::kIfTrue: + case IrOpcode::kIfFalse: + CHECK_EQ(IrOpcode::kBranch, + NodeProperties::GetControlInput(node, 0)->opcode()); + break; + case IrOpcode::kLoop: + case IrOpcode::kMerge: + break; + case IrOpcode::kReturn: + // TODO(rossberg): check successor is End + break; + case IrOpcode::kThrow: + // TODO(rossberg): what are the constraints on these? + break; + case IrOpcode::kParameter: { + // Parameters have the start node as inputs. + CHECK_EQ(1, input_count); + CHECK_EQ(IrOpcode::kStart, + NodeProperties::GetValueInput(node, 0)->opcode()); + // Parameter has an input that produces enough values. + int index = static_cast<Operator1<int>*>(node->op())->parameter(); + Node* input = NodeProperties::GetValueInput(node, 0); + // Currently, parameter indices start at -1 instead of 0. + CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()), index + 1); + break; + } + case IrOpcode::kInt32Constant: + case IrOpcode::kInt64Constant: + case IrOpcode::kFloat64Constant: + case IrOpcode::kExternalConstant: + case IrOpcode::kNumberConstant: + case IrOpcode::kHeapConstant: + // Constants have no inputs. + CHECK_EQ(0, input_count); + break; + case IrOpcode::kPhi: { + // Phi input count matches parent control node. + CHECK_EQ(1, control_count); + Node* control = NodeProperties::GetControlInput(node, 0); + CHECK_EQ(value_count, + OperatorProperties::GetControlInputCount(control->op())); + break; + } + case IrOpcode::kEffectPhi: { + // EffectPhi input count matches parent control node. + CHECK_EQ(1, control_count); + Node* control = NodeProperties::GetControlInput(node, 0); + CHECK_EQ(effect_count, + OperatorProperties::GetControlInputCount(control->op())); + break; + } + case IrOpcode::kLazyDeoptimization: + // TODO(jarin): what are the constraints on these? + break; + case IrOpcode::kDeoptimize: + // TODO(jarin): what are the constraints on these? + break; + case IrOpcode::kFrameState: + // TODO(jarin): what are the constraints on these? + break; + case IrOpcode::kCall: + // TODO(rossberg): what are the constraints on these? + break; + case IrOpcode::kContinuation: + // TODO(jarin): what are the constraints on these? + break; + case IrOpcode::kProjection: { + // Projection has an input that produces enough values. + int index = static_cast<Operator1<int>*>(node->op())->parameter(); + Node* input = NodeProperties::GetValueInput(node, 0); + CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()), index); + break; + } + default: + // TODO(rossberg): Check other node kinds. + break; + } + + if (from_start) { + reached_from_start.insert(node); + } else { + reached_from_end.insert(node); + } + + return GenericGraphVisit::CONTINUE; +} + + +void Verifier::Run(Graph* graph) { + Visitor visitor(graph->zone()); + + CHECK_NE(NULL, graph->start()); + visitor.from_start = true; + graph->VisitNodeUsesFromStart(&visitor); + CHECK_NE(NULL, graph->end()); + visitor.from_start = false; + graph->VisitNodeInputsFromEnd(&visitor); + + // All control nodes reachable from end are reachable from start. + for (NodeSet::iterator it = visitor.reached_from_end.begin(); + it != visitor.reached_from_end.end(); ++it) { + CHECK(!NodeProperties::IsControl(*it) || + visitor.reached_from_start.count(*it)); + } +} +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler/verifier.h nodejs-0.11.15/deps/v8/src/compiler/verifier.h --- nodejs-0.11.13/deps/v8/src/compiler/verifier.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/verifier.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,28 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_VERIFIER_H_ +#define V8_COMPILER_VERIFIER_H_ + +#include "src/v8.h" + +#include "src/compiler/graph.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class Verifier { + public: + static void Run(Graph* graph); + + private: + class Visitor; + DISALLOW_COPY_AND_ASSIGN(Verifier); +}; +} +} +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_VERIFIER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/x64/code-generator-x64.cc nodejs-0.11.15/deps/v8/src/compiler/x64/code-generator-x64.cc --- nodejs-0.11.13/deps/v8/src/compiler/x64/code-generator-x64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/x64/code-generator-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1001 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/code-generator.h" + +#include "src/compiler/code-generator-impl.h" +#include "src/compiler/gap-resolver.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties-inl.h" +#include "src/scopes.h" +#include "src/x64/assembler-x64.h" +#include "src/x64/macro-assembler-x64.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#define __ masm()-> + + +// TODO(turbofan): Cleanup these hacks. +enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference }; + + +struct Immediate64 { + uint64_t value; + Handle<Object> handle; + ExternalReference reference; + Immediate64Type type; +}; + + +enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand }; + + +struct RegisterOrOperand { + RegisterOrOperand() : operand(no_reg, 0) {} + Register reg; + DoubleRegister double_reg; + Operand operand; + RegisterOrOperandType type; +}; + + +// Adds X64 specific methods for decoding operands. +class X64OperandConverter : public InstructionOperandConverter { + public: + X64OperandConverter(CodeGenerator* gen, Instruction* instr) + : InstructionOperandConverter(gen, instr) {} + + RegisterOrOperand InputRegisterOrOperand(int index) { + return ToRegisterOrOperand(instr_->InputAt(index)); + } + + Immediate InputImmediate(int index) { + return ToImmediate(instr_->InputAt(index)); + } + + RegisterOrOperand OutputRegisterOrOperand() { + return ToRegisterOrOperand(instr_->Output()); + } + + Immediate64 InputImmediate64(int index) { + return ToImmediate64(instr_->InputAt(index)); + } + + Immediate64 ToImmediate64(InstructionOperand* operand) { + Constant constant = ToConstant(operand); + Immediate64 immediate; + immediate.value = 0xbeefdeaddeefbeed; + immediate.type = kImm64Value; + switch (constant.type()) { + case Constant::kInt32: + case Constant::kInt64: + immediate.value = constant.ToInt64(); + return immediate; + case Constant::kFloat64: + immediate.type = kImm64Handle; + immediate.handle = + isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED); + return immediate; + case Constant::kExternalReference: + immediate.type = kImm64Reference; + immediate.reference = constant.ToExternalReference(); + return immediate; + case Constant::kHeapObject: + immediate.type = kImm64Handle; + immediate.handle = constant.ToHeapObject(); + return immediate; + } + UNREACHABLE(); + return immediate; + } + + Immediate ToImmediate(InstructionOperand* operand) { + Constant constant = ToConstant(operand); + switch (constant.type()) { + case Constant::kInt32: + return Immediate(constant.ToInt32()); + case Constant::kInt64: + case Constant::kFloat64: + case Constant::kExternalReference: + case Constant::kHeapObject: + break; + } + UNREACHABLE(); + return Immediate(-1); + } + + Operand ToOperand(InstructionOperand* op, int extra = 0) { + RegisterOrOperand result = ToRegisterOrOperand(op, extra); + DCHECK_EQ(kOperand, result.type); + return result.operand; + } + + RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) { + RegisterOrOperand result; + if (op->IsRegister()) { + DCHECK(extra == 0); + result.type = kRegister; + result.reg = ToRegister(op); + return result; + } else if (op->IsDoubleRegister()) { + DCHECK(extra == 0); + DCHECK(extra == 0); + result.type = kDoubleRegister; + result.double_reg = ToDoubleRegister(op); + return result; + } + + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); + + result.type = kOperand; + // The linkage computes where all spill slots are located. + FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra); + result.operand = + Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset()); + return result; + } + + Operand MemoryOperand(int* first_input) { + const int offset = *first_input; + switch (AddressingModeField::decode(instr_->opcode())) { + case kMode_MR1I: { + *first_input += 2; + Register index = InputRegister(offset + 1); + return Operand(InputRegister(offset + 0), index, times_1, + 0); // TODO(dcarney): K != 0 + } + case kMode_MRI: + *first_input += 2; + return Operand(InputRegister(offset + 0), InputInt32(offset + 1)); + default: + UNREACHABLE(); + return Operand(no_reg, 0); + } + } + + Operand MemoryOperand() { + int first_input = 0; + return MemoryOperand(&first_input); + } +}; + + +static bool HasImmediateInput(Instruction* instr, int index) { + return instr->InputAt(index)->IsImmediate(); +} + + +#define ASSEMBLE_BINOP(asm_instr) \ + do { \ + if (HasImmediateInput(instr, 1)) { \ + RegisterOrOperand input = i.InputRegisterOrOperand(0); \ + if (input.type == kRegister) { \ + __ asm_instr(input.reg, i.InputImmediate(1)); \ + } else { \ + __ asm_instr(input.operand, i.InputImmediate(1)); \ + } \ + } else { \ + RegisterOrOperand input = i.InputRegisterOrOperand(1); \ + if (input.type == kRegister) { \ + __ asm_instr(i.InputRegister(0), input.reg); \ + } else { \ + __ asm_instr(i.InputRegister(0), input.operand); \ + } \ + } \ + } while (0) + + +#define ASSEMBLE_SHIFT(asm_instr, width) \ + do { \ + if (HasImmediateInput(instr, 1)) { \ + __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \ + } else { \ + __ asm_instr##_cl(i.OutputRegister()); \ + } \ + } while (0) + + +// Assembles an instruction after register allocation, producing machine code. +void CodeGenerator::AssembleArchInstruction(Instruction* instr) { + X64OperandConverter i(this, instr); + + switch (ArchOpcodeField::decode(instr->opcode())) { + case kArchJmp: + __ jmp(code_->GetLabel(i.InputBlock(0))); + break; + case kArchNop: + // don't emit code for nops. + break; + case kArchRet: + AssembleReturn(); + break; + case kArchDeoptimize: { + int deoptimization_id = MiscField::decode(instr->opcode()); + BuildTranslation(instr, deoptimization_id); + + Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( + isolate(), deoptimization_id, Deoptimizer::LAZY); + __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY); + break; + } + case kX64Add32: + ASSEMBLE_BINOP(addl); + break; + case kX64Add: + ASSEMBLE_BINOP(addq); + break; + case kX64Sub32: + ASSEMBLE_BINOP(subl); + break; + case kX64Sub: + ASSEMBLE_BINOP(subq); + break; + case kX64And32: + ASSEMBLE_BINOP(andl); + break; + case kX64And: + ASSEMBLE_BINOP(andq); + break; + case kX64Cmp32: + ASSEMBLE_BINOP(cmpl); + break; + case kX64Cmp: + ASSEMBLE_BINOP(cmpq); + break; + case kX64Test32: + ASSEMBLE_BINOP(testl); + break; + case kX64Test: + ASSEMBLE_BINOP(testq); + break; + case kX64Imul32: + if (HasImmediateInput(instr, 1)) { + RegisterOrOperand input = i.InputRegisterOrOperand(0); + if (input.type == kRegister) { + __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1)); + } else { + __ movq(kScratchRegister, input.operand); + __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1)); + } + } else { + RegisterOrOperand input = i.InputRegisterOrOperand(1); + if (input.type == kRegister) { + __ imull(i.OutputRegister(), input.reg); + } else { + __ imull(i.OutputRegister(), input.operand); + } + } + break; + case kX64Imul: + if (HasImmediateInput(instr, 1)) { + RegisterOrOperand input = i.InputRegisterOrOperand(0); + if (input.type == kRegister) { + __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1)); + } else { + __ movq(kScratchRegister, input.operand); + __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1)); + } + } else { + RegisterOrOperand input = i.InputRegisterOrOperand(1); + if (input.type == kRegister) { + __ imulq(i.OutputRegister(), input.reg); + } else { + __ imulq(i.OutputRegister(), input.operand); + } + } + break; + case kX64Idiv32: + __ cdq(); + __ idivl(i.InputRegister(1)); + break; + case kX64Idiv: + __ cqo(); + __ idivq(i.InputRegister(1)); + break; + case kX64Udiv32: + __ xorl(rdx, rdx); + __ divl(i.InputRegister(1)); + break; + case kX64Udiv: + __ xorq(rdx, rdx); + __ divq(i.InputRegister(1)); + break; + case kX64Not: { + RegisterOrOperand output = i.OutputRegisterOrOperand(); + if (output.type == kRegister) { + __ notq(output.reg); + } else { + __ notq(output.operand); + } + break; + } + case kX64Not32: { + RegisterOrOperand output = i.OutputRegisterOrOperand(); + if (output.type == kRegister) { + __ notl(output.reg); + } else { + __ notl(output.operand); + } + break; + } + case kX64Neg: { + RegisterOrOperand output = i.OutputRegisterOrOperand(); + if (output.type == kRegister) { + __ negq(output.reg); + } else { + __ negq(output.operand); + } + break; + } + case kX64Neg32: { + RegisterOrOperand output = i.OutputRegisterOrOperand(); + if (output.type == kRegister) { + __ negl(output.reg); + } else { + __ negl(output.operand); + } + break; + } + case kX64Or32: + ASSEMBLE_BINOP(orl); + break; + case kX64Or: + ASSEMBLE_BINOP(orq); + break; + case kX64Xor32: + ASSEMBLE_BINOP(xorl); + break; + case kX64Xor: + ASSEMBLE_BINOP(xorq); + break; + case kX64Shl32: + ASSEMBLE_SHIFT(shll, 5); + break; + case kX64Shl: + ASSEMBLE_SHIFT(shlq, 6); + break; + case kX64Shr32: + ASSEMBLE_SHIFT(shrl, 5); + break; + case kX64Shr: + ASSEMBLE_SHIFT(shrq, 6); + break; + case kX64Sar32: + ASSEMBLE_SHIFT(sarl, 5); + break; + case kX64Sar: + ASSEMBLE_SHIFT(sarq, 6); + break; + case kX64Push: { + RegisterOrOperand input = i.InputRegisterOrOperand(0); + if (input.type == kRegister) { + __ pushq(input.reg); + } else { + __ pushq(input.operand); + } + break; + } + case kX64PushI: + __ pushq(i.InputImmediate(0)); + break; + case kX64CallCodeObject: { + if (HasImmediateInput(instr, 0)) { + Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); + __ Call(code, RelocInfo::CODE_TARGET); + } else { + Register reg = i.InputRegister(0); + int entry = Code::kHeaderSize - kHeapObjectTag; + __ Call(Operand(reg, entry)); + } + RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1); + if (lazy_deopt) { + RecordLazyDeoptimizationEntry(instr); + } + AddNopForSmiCodeInlining(); + break; + } + case kX64CallAddress: + if (HasImmediateInput(instr, 0)) { + Immediate64 imm = i.InputImmediate64(0); + DCHECK_EQ(kImm64Value, imm.type); + __ Call(reinterpret_cast<byte*>(imm.value), RelocInfo::NONE64); + } else { + __ call(i.InputRegister(0)); + } + break; + case kPopStack: { + int words = MiscField::decode(instr->opcode()); + __ addq(rsp, Immediate(kPointerSize * words)); + break; + } + case kX64CallJSFunction: { + Register func = i.InputRegister(0); + + // TODO(jarin) The load of the context should be separated from the call. + __ movp(rsi, FieldOperand(func, JSFunction::kContextOffset)); + __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset)); + + RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, + Safepoint::kNoLazyDeopt); + RecordLazyDeoptimizationEntry(instr); + break; + } + case kSSEFloat64Cmp: { + RegisterOrOperand input = i.InputRegisterOrOperand(1); + if (input.type == kDoubleRegister) { + __ ucomisd(i.InputDoubleRegister(0), input.double_reg); + } else { + __ ucomisd(i.InputDoubleRegister(0), input.operand); + } + break; + } + case kSSEFloat64Add: + __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); + break; + case kSSEFloat64Sub: + __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); + break; + case kSSEFloat64Mul: + __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); + break; + case kSSEFloat64Div: + __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); + break; + case kSSEFloat64Mod: { + __ subq(rsp, Immediate(kDoubleSize)); + // Move values to st(0) and st(1). + __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1)); + __ fld_d(Operand(rsp, 0)); + __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0)); + __ fld_d(Operand(rsp, 0)); + // Loop while fprem isn't done. + Label mod_loop; + __ bind(&mod_loop); + // This instructions traps on all kinds inputs, but we are assuming the + // floating point control word is set to ignore them all. + __ fprem(); + // The following 2 instruction implicitly use rax. + __ fnstsw_ax(); + if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) { + __ sahf(); + } else { + __ shrl(rax, Immediate(8)); + __ andl(rax, Immediate(0xFF)); + __ pushq(rax); + __ popfq(); + } + __ j(parity_even, &mod_loop); + // Move output to stack and clean up. + __ fstp(1); + __ fstp_d(Operand(rsp, 0)); + __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0)); + __ addq(rsp, Immediate(kDoubleSize)); + break; + } + case kX64Int32ToInt64: + __ movzxwq(i.OutputRegister(), i.InputRegister(0)); + break; + case kX64Int64ToInt32: + __ Move(i.OutputRegister(), i.InputRegister(0)); + break; + case kSSEFloat64ToInt32: { + RegisterOrOperand input = i.InputRegisterOrOperand(0); + if (input.type == kDoubleRegister) { + __ cvttsd2si(i.OutputRegister(), input.double_reg); + } else { + __ cvttsd2si(i.OutputRegister(), input.operand); + } + break; + } + case kSSEFloat64ToUint32: { + // TODO(turbofan): X64 SSE cvttsd2siq should support operands. + __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0)); + __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits. + // TODO(turbofan): generated code should not look at the upper 32 bits + // of the result, but those bits could escape to the outside world. + break; + } + case kSSEInt32ToFloat64: { + RegisterOrOperand input = i.InputRegisterOrOperand(0); + if (input.type == kRegister) { + __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg); + } else { + __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand); + } + break; + } + case kSSEUint32ToFloat64: { + // TODO(turbofan): X64 SSE cvtqsi2sd should support operands. + __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + } + + case kSSELoad: + __ movsd(i.OutputDoubleRegister(), i.MemoryOperand()); + break; + case kSSEStore: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ movsd(operand, i.InputDoubleRegister(index)); + break; + } + case kX64LoadWord8: + __ movzxbl(i.OutputRegister(), i.MemoryOperand()); + break; + case kX64StoreWord8: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ movb(operand, i.InputRegister(index)); + break; + } + case kX64StoreWord8I: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ movb(operand, Immediate(i.InputInt8(index))); + break; + } + case kX64LoadWord16: + __ movzxwl(i.OutputRegister(), i.MemoryOperand()); + break; + case kX64StoreWord16: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ movw(operand, i.InputRegister(index)); + break; + } + case kX64StoreWord16I: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ movw(operand, Immediate(i.InputInt16(index))); + break; + } + case kX64LoadWord32: + __ movl(i.OutputRegister(), i.MemoryOperand()); + break; + case kX64StoreWord32: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ movl(operand, i.InputRegister(index)); + break; + } + case kX64StoreWord32I: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ movl(operand, i.InputImmediate(index)); + break; + } + case kX64LoadWord64: + __ movq(i.OutputRegister(), i.MemoryOperand()); + break; + case kX64StoreWord64: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ movq(operand, i.InputRegister(index)); + break; + } + case kX64StoreWord64I: { + int index = 0; + Operand operand = i.MemoryOperand(&index); + __ movq(operand, i.InputImmediate(index)); + break; + } + case kX64StoreWriteBarrier: { + Register object = i.InputRegister(0); + Register index = i.InputRegister(1); + Register value = i.InputRegister(2); + __ movsxlq(index, index); + __ movq(Operand(object, index, times_1, 0), value); + __ leaq(index, Operand(object, index, times_1, 0)); + SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters() + ? kSaveFPRegs + : kDontSaveFPRegs; + __ RecordWrite(object, index, value, mode); + break; + } + } +} + + +// Assembles branches after this instruction. +void CodeGenerator::AssembleArchBranch(Instruction* instr, + FlagsCondition condition) { + X64OperandConverter i(this, instr); + Label done; + + // Emit a branch. The true and false targets are always the last two inputs + // to the instruction. + BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2); + BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1); + bool fallthru = IsNextInAssemblyOrder(fblock); + Label* tlabel = code()->GetLabel(tblock); + Label* flabel = fallthru ? &done : code()->GetLabel(fblock); + Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar; + switch (condition) { + case kUnorderedEqual: + __ j(parity_even, flabel, flabel_distance); + // Fall through. + case kEqual: + __ j(equal, tlabel); + break; + case kUnorderedNotEqual: + __ j(parity_even, tlabel); + // Fall through. + case kNotEqual: + __ j(not_equal, tlabel); + break; + case kSignedLessThan: + __ j(less, tlabel); + break; + case kSignedGreaterThanOrEqual: + __ j(greater_equal, tlabel); + break; + case kSignedLessThanOrEqual: + __ j(less_equal, tlabel); + break; + case kSignedGreaterThan: + __ j(greater, tlabel); + break; + case kUnorderedLessThan: + __ j(parity_even, flabel, flabel_distance); + // Fall through. + case kUnsignedLessThan: + __ j(below, tlabel); + break; + case kUnorderedGreaterThanOrEqual: + __ j(parity_even, tlabel); + // Fall through. + case kUnsignedGreaterThanOrEqual: + __ j(above_equal, tlabel); + break; + case kUnorderedLessThanOrEqual: + __ j(parity_even, flabel, flabel_distance); + // Fall through. + case kUnsignedLessThanOrEqual: + __ j(below_equal, tlabel); + break; + case kUnorderedGreaterThan: + __ j(parity_even, tlabel); + // Fall through. + case kUnsignedGreaterThan: + __ j(above, tlabel); + break; + case kOverflow: + __ j(overflow, tlabel); + break; + case kNotOverflow: + __ j(no_overflow, tlabel); + break; + } + if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel. + __ bind(&done); +} + + +// Assembles boolean materializations after this instruction. +void CodeGenerator::AssembleArchBoolean(Instruction* instr, + FlagsCondition condition) { + X64OperandConverter i(this, instr); + Label done; + + // Materialize a full 64-bit 1 or 0 value. The result register is always the + // last output of the instruction. + Label check; + DCHECK_NE(0, instr->OutputCount()); + Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1)); + Condition cc = no_condition; + switch (condition) { + case kUnorderedEqual: + __ j(parity_odd, &check, Label::kNear); + __ movl(reg, Immediate(0)); + __ jmp(&done, Label::kNear); + // Fall through. + case kEqual: + cc = equal; + break; + case kUnorderedNotEqual: + __ j(parity_odd, &check, Label::kNear); + __ movl(reg, Immediate(1)); + __ jmp(&done, Label::kNear); + // Fall through. + case kNotEqual: + cc = not_equal; + break; + case kSignedLessThan: + cc = less; + break; + case kSignedGreaterThanOrEqual: + cc = greater_equal; + break; + case kSignedLessThanOrEqual: + cc = less_equal; + break; + case kSignedGreaterThan: + cc = greater; + break; + case kUnorderedLessThan: + __ j(parity_odd, &check, Label::kNear); + __ movl(reg, Immediate(0)); + __ jmp(&done, Label::kNear); + // Fall through. + case kUnsignedLessThan: + cc = below; + break; + case kUnorderedGreaterThanOrEqual: + __ j(parity_odd, &check, Label::kNear); + __ movl(reg, Immediate(1)); + __ jmp(&done, Label::kNear); + // Fall through. + case kUnsignedGreaterThanOrEqual: + cc = above_equal; + break; + case kUnorderedLessThanOrEqual: + __ j(parity_odd, &check, Label::kNear); + __ movl(reg, Immediate(0)); + __ jmp(&done, Label::kNear); + // Fall through. + case kUnsignedLessThanOrEqual: + cc = below_equal; + break; + case kUnorderedGreaterThan: + __ j(parity_odd, &check, Label::kNear); + __ movl(reg, Immediate(1)); + __ jmp(&done, Label::kNear); + // Fall through. + case kUnsignedGreaterThan: + cc = above; + break; + case kOverflow: + cc = overflow; + break; + case kNotOverflow: + cc = no_overflow; + break; + } + __ bind(&check); + __ setcc(cc, reg); + __ movzxbl(reg, reg); + __ bind(&done); +} + + +void CodeGenerator::AssemblePrologue() { + CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); + int stack_slots = frame()->GetSpillSlotCount(); + if (descriptor->kind() == CallDescriptor::kCallAddress) { + __ pushq(rbp); + __ movq(rbp, rsp); + const RegList saves = descriptor->CalleeSavedRegisters(); + if (saves != 0) { // Save callee-saved registers. + int register_save_area_size = 0; + for (int i = Register::kNumRegisters - 1; i >= 0; i--) { + if (!((1 << i) & saves)) continue; + __ pushq(Register::from_code(i)); + register_save_area_size += kPointerSize; + } + frame()->SetRegisterSaveAreaSize(register_save_area_size); + } + } else if (descriptor->IsJSFunctionCall()) { + CompilationInfo* info = linkage()->info(); + __ Prologue(info->IsCodePreAgingActive()); + frame()->SetRegisterSaveAreaSize( + StandardFrameConstants::kFixedFrameSizeFromFp); + + // Sloppy mode functions and builtins need to replace the receiver with the + // global proxy when called as functions (without an explicit receiver + // object). + // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC? + if (info->strict_mode() == SLOPPY && !info->is_native()) { + Label ok; + StackArgumentsAccessor args(rbp, info->scope()->num_parameters()); + __ movp(rcx, args.GetReceiverOperand()); + __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex); + __ j(not_equal, &ok, Label::kNear); + __ movp(rcx, GlobalObjectOperand()); + __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset)); + __ movp(args.GetReceiverOperand(), rcx); + __ bind(&ok); + } + + } else { + __ StubPrologue(); + frame()->SetRegisterSaveAreaSize( + StandardFrameConstants::kFixedFrameSizeFromFp); + } + if (stack_slots > 0) { + __ subq(rsp, Immediate(stack_slots * kPointerSize)); + } +} + + +void CodeGenerator::AssembleReturn() { + CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); + if (descriptor->kind() == CallDescriptor::kCallAddress) { + if (frame()->GetRegisterSaveAreaSize() > 0) { + // Remove this frame's spill slots first. + int stack_slots = frame()->GetSpillSlotCount(); + if (stack_slots > 0) { + __ addq(rsp, Immediate(stack_slots * kPointerSize)); + } + const RegList saves = descriptor->CalleeSavedRegisters(); + // Restore registers. + if (saves != 0) { + for (int i = 0; i < Register::kNumRegisters; i++) { + if (!((1 << i) & saves)) continue; + __ popq(Register::from_code(i)); + } + } + __ popq(rbp); // Pop caller's frame pointer. + __ ret(0); + } else { + // No saved registers. + __ movq(rsp, rbp); // Move stack pointer back to frame pointer. + __ popq(rbp); // Pop caller's frame pointer. + __ ret(0); + } + } else { + __ movq(rsp, rbp); // Move stack pointer back to frame pointer. + __ popq(rbp); // Pop caller's frame pointer. + int pop_count = + descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0; + __ ret(pop_count * kPointerSize); + } +} + + +void CodeGenerator::AssembleMove(InstructionOperand* source, + InstructionOperand* destination) { + X64OperandConverter g(this, NULL); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + Register src = g.ToRegister(source); + if (destination->IsRegister()) { + __ movq(g.ToRegister(destination), src); + } else { + __ movq(g.ToOperand(destination), src); + } + } else if (source->IsStackSlot()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + Operand src = g.ToOperand(source); + if (destination->IsRegister()) { + Register dst = g.ToRegister(destination); + __ movq(dst, src); + } else { + // Spill on demand to use a temporary register for memory-to-memory + // moves. + Register tmp = kScratchRegister; + Operand dst = g.ToOperand(destination); + __ movq(tmp, src); + __ movq(dst, tmp); + } + } else if (source->IsConstant()) { + ConstantOperand* constant_source = ConstantOperand::cast(source); + if (destination->IsRegister() || destination->IsStackSlot()) { + Register dst = destination->IsRegister() ? g.ToRegister(destination) + : kScratchRegister; + Immediate64 imm = g.ToImmediate64(constant_source); + switch (imm.type) { + case kImm64Value: + __ Set(dst, imm.value); + break; + case kImm64Reference: + __ Move(dst, imm.reference); + break; + case kImm64Handle: + __ Move(dst, imm.handle); + break; + } + if (destination->IsStackSlot()) { + __ movq(g.ToOperand(destination), kScratchRegister); + } + } else { + __ movq(kScratchRegister, + BitCast<uint64_t, double>(g.ToDouble(constant_source))); + if (destination->IsDoubleRegister()) { + __ movq(g.ToDoubleRegister(destination), kScratchRegister); + } else { + DCHECK(destination->IsDoubleStackSlot()); + __ movq(g.ToOperand(destination), kScratchRegister); + } + } + } else if (source->IsDoubleRegister()) { + XMMRegister src = g.ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + XMMRegister dst = g.ToDoubleRegister(destination); + __ movsd(dst, src); + } else { + DCHECK(destination->IsDoubleStackSlot()); + Operand dst = g.ToOperand(destination); + __ movsd(dst, src); + } + } else if (source->IsDoubleStackSlot()) { + DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); + Operand src = g.ToOperand(source); + if (destination->IsDoubleRegister()) { + XMMRegister dst = g.ToDoubleRegister(destination); + __ movsd(dst, src); + } else { + // We rely on having xmm0 available as a fixed scratch register. + Operand dst = g.ToOperand(destination); + __ movsd(xmm0, src); + __ movsd(dst, xmm0); + } + } else { + UNREACHABLE(); + } +} + + +void CodeGenerator::AssembleSwap(InstructionOperand* source, + InstructionOperand* destination) { + X64OperandConverter g(this, NULL); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister() && destination->IsRegister()) { + // Register-register. + __ xchgq(g.ToRegister(source), g.ToRegister(destination)); + } else if (source->IsRegister() && destination->IsStackSlot()) { + Register src = g.ToRegister(source); + Operand dst = g.ToOperand(destination); + __ xchgq(src, dst); + } else if ((source->IsStackSlot() && destination->IsStackSlot()) || + (source->IsDoubleStackSlot() && + destination->IsDoubleStackSlot())) { + // Memory-memory. + Register tmp = kScratchRegister; + Operand src = g.ToOperand(source); + Operand dst = g.ToOperand(destination); + __ movq(tmp, dst); + __ xchgq(tmp, src); + __ movq(dst, tmp); + } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { + // XMM register-register swap. We rely on having xmm0 + // available as a fixed scratch register. + XMMRegister src = g.ToDoubleRegister(source); + XMMRegister dst = g.ToDoubleRegister(destination); + __ movsd(xmm0, src); + __ movsd(src, dst); + __ movsd(dst, xmm0); + } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { + // XMM register-memory swap. We rely on having xmm0 + // available as a fixed scratch register. + XMMRegister src = g.ToDoubleRegister(source); + Operand dst = g.ToOperand(destination); + __ movsd(xmm0, src); + __ movsd(src, dst); + __ movsd(dst, xmm0); + } else { + // No other combinations are possible. + UNREACHABLE(); + } +} + + +void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); } + +#undef __ + +#ifdef DEBUG + +// Checks whether the code between start_pc and end_pc is a no-op. +bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc, + int end_pc) { + if (start_pc + 1 != end_pc) { + return false; + } + return *(code->instruction_start() + start_pc) == + v8::internal::Assembler::kNopByte; +} + +#endif + +} // namespace internal +} // namespace compiler +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/x64/instruction-codes-x64.h nodejs-0.11.15/deps/v8/src/compiler/x64/instruction-codes-x64.h --- nodejs-0.11.13/deps/v8/src/compiler/x64/instruction-codes-x64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/x64/instruction-codes-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,108 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ +#define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ + +namespace v8 { +namespace internal { +namespace compiler { + +// X64-specific opcodes that specify which assembly sequence to emit. +// Most opcodes specify a single instruction. +#define TARGET_ARCH_OPCODE_LIST(V) \ + V(X64Add) \ + V(X64Add32) \ + V(X64And) \ + V(X64And32) \ + V(X64Cmp) \ + V(X64Cmp32) \ + V(X64Test) \ + V(X64Test32) \ + V(X64Or) \ + V(X64Or32) \ + V(X64Xor) \ + V(X64Xor32) \ + V(X64Sub) \ + V(X64Sub32) \ + V(X64Imul) \ + V(X64Imul32) \ + V(X64Idiv) \ + V(X64Idiv32) \ + V(X64Udiv) \ + V(X64Udiv32) \ + V(X64Not) \ + V(X64Not32) \ + V(X64Neg) \ + V(X64Neg32) \ + V(X64Shl) \ + V(X64Shl32) \ + V(X64Shr) \ + V(X64Shr32) \ + V(X64Sar) \ + V(X64Sar32) \ + V(X64Push) \ + V(X64PushI) \ + V(X64CallCodeObject) \ + V(X64CallAddress) \ + V(PopStack) \ + V(X64CallJSFunction) \ + V(SSEFloat64Cmp) \ + V(SSEFloat64Add) \ + V(SSEFloat64Sub) \ + V(SSEFloat64Mul) \ + V(SSEFloat64Div) \ + V(SSEFloat64Mod) \ + V(X64Int32ToInt64) \ + V(X64Int64ToInt32) \ + V(SSEFloat64ToInt32) \ + V(SSEFloat64ToUint32) \ + V(SSEInt32ToFloat64) \ + V(SSEUint32ToFloat64) \ + V(SSELoad) \ + V(SSEStore) \ + V(X64LoadWord8) \ + V(X64StoreWord8) \ + V(X64StoreWord8I) \ + V(X64LoadWord16) \ + V(X64StoreWord16) \ + V(X64StoreWord16I) \ + V(X64LoadWord32) \ + V(X64StoreWord32) \ + V(X64StoreWord32I) \ + V(X64LoadWord64) \ + V(X64StoreWord64) \ + V(X64StoreWord64I) \ + V(X64StoreWriteBarrier) + + +// Addressing modes represent the "shape" of inputs to an instruction. +// Many instructions support multiple addressing modes. Addressing modes +// are encoded into the InstructionCode of the instruction and tell the +// code generator after register allocation which assembler method to call. +// +// We use the following local notation for addressing modes: +// +// R = register +// O = register or stack slot +// D = double register +// I = immediate (handle, external, int32) +// MR = [register] +// MI = [immediate] +// MRN = [register + register * N in {1, 2, 4, 8}] +// MRI = [register + immediate] +// MRNI = [register + register * N in {1, 2, 4, 8} + immediate] +#define TARGET_ADDRESSING_MODE_LIST(V) \ + V(MR) /* [%r1] */ \ + V(MRI) /* [%r1 + K] */ \ + V(MR1I) /* [%r1 + %r2 + K] */ \ + V(MR2I) /* [%r1 + %r2*2 + K] */ \ + V(MR4I) /* [%r1 + %r2*4 + K] */ \ + V(MR8I) /* [%r1 + %r2*8 + K] */ + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler/x64/instruction-selector-x64.cc nodejs-0.11.15/deps/v8/src/compiler/x64/instruction-selector-x64.cc --- nodejs-0.11.13/deps/v8/src/compiler/x64/instruction-selector-x64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/x64/instruction-selector-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,722 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/instruction-selector-impl.h" +#include "src/compiler/node-matchers.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Adds X64-specific methods for generating operands. +class X64OperandGenerator V8_FINAL : public OperandGenerator { + public: + explicit X64OperandGenerator(InstructionSelector* selector) + : OperandGenerator(selector) {} + + InstructionOperand* TempRegister(Register reg) { + return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, + Register::ToAllocationIndex(reg)); + } + + InstructionOperand* UseByteRegister(Node* node) { + // TODO(dcarney): relax constraint. + return UseFixed(node, rdx); + } + + InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); } + + bool CanBeImmediate(Node* node) { + switch (node->opcode()) { + case IrOpcode::kInt32Constant: + return true; + default: + return false; + } + } + + bool CanBeImmediate64(Node* node) { + switch (node->opcode()) { + case IrOpcode::kInt32Constant: + return true; + case IrOpcode::kNumberConstant: + return true; + case IrOpcode::kHeapConstant: { + // Constants in new space cannot be used as immediates in V8 because + // the GC does not scan code objects when collecting the new generation. + Handle<HeapObject> value = ValueOf<Handle<HeapObject> >(node->op()); + return !isolate()->heap()->InNewSpace(*value); + } + default: + return false; + } + } +}; + + +void InstructionSelector::VisitLoad(Node* node) { + MachineType rep = OpParameter<MachineType>(node); + X64OperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + InstructionOperand* output = rep == kMachineFloat64 + ? g.DefineAsDoubleRegister(node) + : g.DefineAsRegister(node); + ArchOpcode opcode; + switch (rep) { + case kMachineFloat64: + opcode = kSSELoad; + break; + case kMachineWord8: + opcode = kX64LoadWord8; + break; + case kMachineWord16: + opcode = kX64LoadWord16; + break; + case kMachineWord32: + opcode = kX64LoadWord32; + break; + case kMachineTagged: // Fall through. + case kMachineWord64: + opcode = kX64LoadWord64; + break; + default: + UNREACHABLE(); + return; + } + if (g.CanBeImmediate(base)) { + // load [#base + %index] + Emit(opcode | AddressingModeField::encode(kMode_MRI), output, + g.UseRegister(index), g.UseImmediate(base)); + } else if (g.CanBeImmediate(index)) { // load [%base + #index] + Emit(opcode | AddressingModeField::encode(kMode_MRI), output, + g.UseRegister(base), g.UseImmediate(index)); + } else { // load [%base + %index + K] + Emit(opcode | AddressingModeField::encode(kMode_MR1I), output, + g.UseRegister(base), g.UseRegister(index)); + } + // TODO(turbofan): addressing modes [r+r*{2,4,8}+K] +} + + +void InstructionSelector::VisitStore(Node* node) { + X64OperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); + MachineType rep = store_rep.rep; + if (store_rep.write_barrier_kind == kFullWriteBarrier) { + DCHECK(rep == kMachineTagged); + // TODO(dcarney): refactor RecordWrite function to take temp registers + // and pass them here instead of using fixed regs + // TODO(dcarney): handle immediate indices. + InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)}; + Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx), + g.UseFixed(index, rcx), g.UseFixed(value, rdx), ARRAY_SIZE(temps), + temps); + return; + } + DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind); + bool is_immediate = false; + InstructionOperand* val; + if (rep == kMachineFloat64) { + val = g.UseDoubleRegister(value); + } else { + is_immediate = g.CanBeImmediate(value); + if (is_immediate) { + val = g.UseImmediate(value); + } else if (rep == kMachineWord8) { + val = g.UseByteRegister(value); + } else { + val = g.UseRegister(value); + } + } + ArchOpcode opcode; + switch (rep) { + case kMachineFloat64: + opcode = kSSEStore; + break; + case kMachineWord8: + opcode = is_immediate ? kX64StoreWord8I : kX64StoreWord8; + break; + case kMachineWord16: + opcode = is_immediate ? kX64StoreWord16I : kX64StoreWord16; + break; + case kMachineWord32: + opcode = is_immediate ? kX64StoreWord32I : kX64StoreWord32; + break; + case kMachineTagged: // Fall through. + case kMachineWord64: + opcode = is_immediate ? kX64StoreWord64I : kX64StoreWord64; + break; + default: + UNREACHABLE(); + return; + } + if (g.CanBeImmediate(base)) { + // store [#base + %index], %|#value + Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, + g.UseRegister(index), g.UseImmediate(base), val); + } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value + Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, + g.UseRegister(base), g.UseImmediate(index), val); + } else { // store [%base + %index], %|#value + Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL, + g.UseRegister(base), g.UseRegister(index), val); + } + // TODO(turbofan): addressing modes [r+r*{2,4,8}+K] +} + + +// Shared routine for multiple binary operations. +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont) { + X64OperandGenerator g(selector); + Int32BinopMatcher m(node); + InstructionOperand* inputs[4]; + size_t input_count = 0; + InstructionOperand* outputs[2]; + size_t output_count = 0; + + // TODO(turbofan): match complex addressing modes. + // TODO(turbofan): if commutative, pick the non-live-in operand as the left as + // this might be the last use and therefore its register can be reused. + if (g.CanBeImmediate(m.right().node())) { + inputs[input_count++] = g.Use(m.left().node()); + inputs[input_count++] = g.UseImmediate(m.right().node()); + } else { + inputs[input_count++] = g.UseRegister(m.left().node()); + inputs[input_count++] = g.Use(m.right().node()); + } + + if (cont->IsBranch()) { + inputs[input_count++] = g.Label(cont->true_block()); + inputs[input_count++] = g.Label(cont->false_block()); + } + + outputs[output_count++] = g.DefineSameAsFirst(node); + if (cont->IsSet()) { + outputs[output_count++] = g.DefineAsRegister(cont->result()); + } + + DCHECK_NE(0, input_count); + DCHECK_NE(0, output_count); + DCHECK_GE(ARRAY_SIZE(inputs), input_count); + DCHECK_GE(ARRAY_SIZE(outputs), output_count); + + Instruction* instr = selector->Emit(cont->Encode(opcode), output_count, + outputs, input_count, inputs); + if (cont->IsBranch()) instr->MarkAsControl(); +} + + +// Shared routine for multiple binary operations. +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode) { + FlagsContinuation cont; + VisitBinop(selector, node, opcode, &cont); +} + + +void InstructionSelector::VisitWord32And(Node* node) { + VisitBinop(this, node, kX64And32); +} + + +void InstructionSelector::VisitWord64And(Node* node) { + VisitBinop(this, node, kX64And); +} + + +void InstructionSelector::VisitWord32Or(Node* node) { + VisitBinop(this, node, kX64Or32); +} + + +void InstructionSelector::VisitWord64Or(Node* node) { + VisitBinop(this, node, kX64Or); +} + + +template <typename T> +static void VisitXor(InstructionSelector* selector, Node* node, + ArchOpcode xor_opcode, ArchOpcode not_opcode) { + X64OperandGenerator g(selector); + BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); + if (m.right().Is(-1)) { + selector->Emit(not_opcode, g.DefineSameAsFirst(node), + g.Use(m.left().node())); + } else { + VisitBinop(selector, node, xor_opcode); + } +} + + +void InstructionSelector::VisitWord32Xor(Node* node) { + VisitXor<int32_t>(this, node, kX64Xor32, kX64Not32); +} + + +void InstructionSelector::VisitWord64Xor(Node* node) { + VisitXor<int64_t>(this, node, kX64Xor, kX64Not); +} + + +// Shared routine for multiple 32-bit shift operations. +// TODO(bmeurer): Merge this with VisitWord64Shift using template magic? +static void VisitWord32Shift(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + X64OperandGenerator g(selector); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + + // TODO(turbofan): assembler only supports some addressing modes for shifts. + if (g.CanBeImmediate(right)) { + selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), + g.UseImmediate(right)); + } else { + Int32BinopMatcher m(node); + if (m.right().IsWord32And()) { + Int32BinopMatcher mright(right); + if (mright.right().Is(0x1F)) { + right = mright.left().node(); + } + } + selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), + g.UseFixed(right, rcx)); + } +} + + +// Shared routine for multiple 64-bit shift operations. +// TODO(bmeurer): Merge this with VisitWord32Shift using template magic? +static void VisitWord64Shift(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + X64OperandGenerator g(selector); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + + // TODO(turbofan): assembler only supports some addressing modes for shifts. + if (g.CanBeImmediate(right)) { + selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), + g.UseImmediate(right)); + } else { + Int64BinopMatcher m(node); + if (m.right().IsWord64And()) { + Int64BinopMatcher mright(right); + if (mright.right().Is(0x3F)) { + right = mright.left().node(); + } + } + selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), + g.UseFixed(right, rcx)); + } +} + + +void InstructionSelector::VisitWord32Shl(Node* node) { + VisitWord32Shift(this, node, kX64Shl32); +} + + +void InstructionSelector::VisitWord64Shl(Node* node) { + VisitWord64Shift(this, node, kX64Shl); +} + + +void InstructionSelector::VisitWord32Shr(Node* node) { + VisitWord32Shift(this, node, kX64Shr32); +} + + +void InstructionSelector::VisitWord64Shr(Node* node) { + VisitWord64Shift(this, node, kX64Shr); +} + + +void InstructionSelector::VisitWord32Sar(Node* node) { + VisitWord32Shift(this, node, kX64Sar32); +} + + +void InstructionSelector::VisitWord64Sar(Node* node) { + VisitWord64Shift(this, node, kX64Sar); +} + + +void InstructionSelector::VisitInt32Add(Node* node) { + VisitBinop(this, node, kX64Add32); +} + + +void InstructionSelector::VisitInt64Add(Node* node) { + VisitBinop(this, node, kX64Add); +} + + +template <typename T> +static void VisitSub(InstructionSelector* selector, Node* node, + ArchOpcode sub_opcode, ArchOpcode neg_opcode) { + X64OperandGenerator g(selector); + BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); + if (m.left().Is(0)) { + selector->Emit(neg_opcode, g.DefineSameAsFirst(node), + g.Use(m.right().node())); + } else { + VisitBinop(selector, node, sub_opcode); + } +} + + +void InstructionSelector::VisitInt32Sub(Node* node) { + VisitSub<int32_t>(this, node, kX64Sub32, kX64Neg32); +} + + +void InstructionSelector::VisitInt64Sub(Node* node) { + VisitSub<int64_t>(this, node, kX64Sub, kX64Neg); +} + + +static void VisitMul(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + X64OperandGenerator g(selector); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + if (g.CanBeImmediate(right)) { + selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left), + g.UseImmediate(right)); + } else if (g.CanBeImmediate(left)) { + selector->Emit(opcode, g.DefineAsRegister(node), g.Use(right), + g.UseImmediate(left)); + } else { + // TODO(turbofan): select better left operand. + selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), + g.Use(right)); + } +} + + +void InstructionSelector::VisitInt32Mul(Node* node) { + VisitMul(this, node, kX64Imul32); +} + + +void InstructionSelector::VisitInt64Mul(Node* node) { + VisitMul(this, node, kX64Imul); +} + + +static void VisitDiv(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + X64OperandGenerator g(selector); + InstructionOperand* temps[] = {g.TempRegister(rdx)}; + selector->Emit( + opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax), + g.UseUniqueRegister(node->InputAt(1)), ARRAY_SIZE(temps), temps); +} + + +void InstructionSelector::VisitInt32Div(Node* node) { + VisitDiv(this, node, kX64Idiv32); +} + + +void InstructionSelector::VisitInt64Div(Node* node) { + VisitDiv(this, node, kX64Idiv); +} + + +void InstructionSelector::VisitInt32UDiv(Node* node) { + VisitDiv(this, node, kX64Udiv32); +} + + +void InstructionSelector::VisitInt64UDiv(Node* node) { + VisitDiv(this, node, kX64Udiv); +} + + +static void VisitMod(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + X64OperandGenerator g(selector); + InstructionOperand* temps[] = {g.TempRegister(rax), g.TempRegister(rdx)}; + selector->Emit( + opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax), + g.UseUniqueRegister(node->InputAt(1)), ARRAY_SIZE(temps), temps); +} + + +void InstructionSelector::VisitInt32Mod(Node* node) { + VisitMod(this, node, kX64Idiv32); +} + + +void InstructionSelector::VisitInt64Mod(Node* node) { + VisitMod(this, node, kX64Idiv); +} + + +void InstructionSelector::VisitInt32UMod(Node* node) { + VisitMod(this, node, kX64Udiv32); +} + + +void InstructionSelector::VisitInt64UMod(Node* node) { + VisitMod(this, node, kX64Udiv); +} + + +void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { + X64OperandGenerator g(this); + Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node), + g.Use(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { + X64OperandGenerator g(this); + // TODO(turbofan): X64 SSE cvtqsi2sd should support operands. + Emit(kSSEUint32ToFloat64, g.DefineAsDoubleRegister(node), + g.UseRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { + X64OperandGenerator g(this); + Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0))); +} + + +void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { + X64OperandGenerator g(this); + // TODO(turbofan): X64 SSE cvttsd2siq should support operands. + Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), + g.UseDoubleRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitFloat64Add(Node* node) { + X64OperandGenerator g(this); + Emit(kSSEFloat64Add, g.DefineSameAsFirst(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1))); +} + + +void InstructionSelector::VisitFloat64Sub(Node* node) { + X64OperandGenerator g(this); + Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1))); +} + + +void InstructionSelector::VisitFloat64Mul(Node* node) { + X64OperandGenerator g(this); + Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1))); +} + + +void InstructionSelector::VisitFloat64Div(Node* node) { + X64OperandGenerator g(this); + Emit(kSSEFloat64Div, g.DefineSameAsFirst(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1))); +} + + +void InstructionSelector::VisitFloat64Mod(Node* node) { + X64OperandGenerator g(this); + InstructionOperand* temps[] = {g.TempRegister(rax)}; + Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node), + g.UseDoubleRegister(node->InputAt(0)), + g.UseDoubleRegister(node->InputAt(1)), 1, temps); +} + + +void InstructionSelector::VisitConvertInt64ToInt32(Node* node) { + X64OperandGenerator g(this); + // TODO(dcarney): other modes + Emit(kX64Int64ToInt32, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitConvertInt32ToInt64(Node* node) { + X64OperandGenerator g(this); + // TODO(dcarney): other modes + Emit(kX64Int32ToInt64, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + + +void InstructionSelector::VisitInt32AddWithOverflow(Node* node, + FlagsContinuation* cont) { + VisitBinop(this, node, kX64Add32, cont); +} + + +void InstructionSelector::VisitInt32SubWithOverflow(Node* node, + FlagsContinuation* cont) { + VisitBinop(this, node, kX64Sub32, cont); +} + + +// Shared routine for multiple compare operations. +static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, + InstructionOperand* left, InstructionOperand* right, + FlagsContinuation* cont) { + X64OperandGenerator g(selector); + opcode = cont->Encode(opcode); + if (cont->IsBranch()) { + selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()), + g.Label(cont->false_block()))->MarkAsControl(); + } else { + DCHECK(cont->IsSet()); + selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right); + } +} + + +// Shared routine for multiple word compare operations. +static void VisitWordCompare(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont, + bool commutative) { + X64OperandGenerator g(selector); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + + // Match immediates on left or right side of comparison. + if (g.CanBeImmediate(right)) { + VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont); + } else if (g.CanBeImmediate(left)) { + if (!commutative) cont->Commute(); + VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont); + } else { + VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont); + } +} + + +void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) { + switch (node->opcode()) { + case IrOpcode::kInt32Sub: + return VisitWordCompare(this, node, kX64Cmp32, cont, false); + case IrOpcode::kWord32And: + return VisitWordCompare(this, node, kX64Test32, cont, true); + default: + break; + } + + X64OperandGenerator g(this); + VisitCompare(this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont); +} + + +void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) { + switch (node->opcode()) { + case IrOpcode::kInt64Sub: + return VisitWordCompare(this, node, kX64Cmp, cont, false); + case IrOpcode::kWord64And: + return VisitWordCompare(this, node, kX64Test, cont, true); + default: + break; + } + + X64OperandGenerator g(this); + VisitCompare(this, kX64Test, g.Use(node), g.TempImmediate(-1), cont); +} + + +void InstructionSelector::VisitWord32Compare(Node* node, + FlagsContinuation* cont) { + VisitWordCompare(this, node, kX64Cmp32, cont, false); +} + + +void InstructionSelector::VisitWord64Compare(Node* node, + FlagsContinuation* cont) { + VisitWordCompare(this, node, kX64Cmp, cont, false); +} + + +void InstructionSelector::VisitFloat64Compare(Node* node, + FlagsContinuation* cont) { + X64OperandGenerator g(this); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left), g.Use(right), + cont); +} + + +void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, + BasicBlock* deoptimization) { + X64OperandGenerator g(this); + CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call); + CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here? + + // Compute InstructionOperands for inputs and outputs. + InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization); + + // TODO(dcarney): stack alignment for c calls. + // TODO(dcarney): shadow space on window for c calls. + // Push any stack arguments. + for (int i = buffer.pushed_count - 1; i >= 0; --i) { + Node* input = buffer.pushed_nodes[i]; + // TODO(titzer): handle pushing double parameters. + if (g.CanBeImmediate(input)) { + Emit(kX64PushI, NULL, g.UseImmediate(input)); + } else { + Emit(kX64Push, NULL, g.Use(input)); + } + } + + // Select the appropriate opcode based on the call type. + InstructionCode opcode; + switch (descriptor->kind()) { + case CallDescriptor::kCallCodeObject: { + bool lazy_deopt = descriptor->CanLazilyDeoptimize(); + opcode = kX64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0); + break; + } + case CallDescriptor::kCallAddress: + opcode = kX64CallAddress; + break; + case CallDescriptor::kCallJSFunction: + opcode = kX64CallJSFunction; + break; + default: + UNREACHABLE(); + return; + } + + // Emit the call instruction. + Instruction* call_instr = + Emit(opcode, buffer.output_count, buffer.outputs, + buffer.fixed_and_control_count(), buffer.fixed_and_control_args); + + call_instr->MarkAsCall(); + if (deoptimization != NULL) { + DCHECK(continuation != NULL); + call_instr->MarkAsControl(); + } + + // Caller clean up of stack for C-style calls. + if (descriptor->kind() == CallDescriptor::kCallAddress && + buffer.pushed_count > 0) { + DCHECK(deoptimization == NULL && continuation == NULL); + Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL); + } +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/compiler/x64/linkage-x64.cc nodejs-0.11.15/deps/v8/src/compiler/x64/linkage-x64.cc --- nodejs-0.11.13/deps/v8/src/compiler/x64/linkage-x64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler/x64/linkage-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,83 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/assembler.h" +#include "src/code-stubs.h" +#include "src/compiler/linkage.h" +#include "src/compiler/linkage-impl.h" +#include "src/zone.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#ifdef _WIN64 +const bool kWin64 = true; +#else +const bool kWin64 = false; +#endif + +struct LinkageHelperTraits { + static Register ReturnValueReg() { return rax; } + static Register ReturnValue2Reg() { return rdx; } + static Register JSCallFunctionReg() { return rdi; } + static Register ContextReg() { return rsi; } + static Register RuntimeCallFunctionReg() { return rbx; } + static Register RuntimeCallArgCountReg() { return rax; } + static RegList CCalleeSaveRegisters() { + if (kWin64) { + return rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() | + r14.bit() | r15.bit(); + } else { + return rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit(); + } + } + static Register CRegisterParameter(int i) { + if (kWin64) { + static Register register_parameters[] = {rcx, rdx, r8, r9}; + return register_parameters[i]; + } else { + static Register register_parameters[] = {rdi, rsi, rdx, rcx, r8, r9}; + return register_parameters[i]; + } + } + static int CRegisterParametersLength() { return kWin64 ? 4 : 6; } +}; + + +CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) { + return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>( + zone, parameter_count); +} + + +CallDescriptor* Linkage::GetRuntimeCallDescriptor( + Runtime::FunctionId function, int parameter_count, + Operator::Property properties, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) { + return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>( + zone, function, parameter_count, properties, can_deoptimize); +} + + +CallDescriptor* Linkage::GetStubCallDescriptor( + CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count, + CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) { + return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>( + zone, descriptor, stack_parameter_count, can_deoptimize); +} + + +CallDescriptor* Linkage::GetSimplifiedCDescriptor( + Zone* zone, int num_params, MachineType return_type, + const MachineType* param_types) { + return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>( + zone, num_params, return_type, param_types); +} + +} +} +} // namespace v8::internal::compiler diff -Nru nodejs-0.11.13/deps/v8/src/compiler.cc nodejs-0.11.15/deps/v8/src/compiler.cc --- nodejs-0.11.13/deps/v8/src/compiler.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,59 +1,49 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "compiler.h" - -#include "bootstrapper.h" -#include "codegen.h" -#include "compilation-cache.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "gdb-jit.h" -#include "typing.h" -#include "hydrogen.h" -#include "isolate-inl.h" -#include "lithium.h" -#include "liveedit.h" -#include "parser.h" -#include "rewriter.h" -#include "runtime-profiler.h" -#include "scanner-character-streams.h" -#include "scopeinfo.h" -#include "scopes.h" -#include "vm-state-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/compiler.h" + +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/compilation-cache.h" +#include "src/compiler/pipeline.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/gdb-jit.h" +#include "src/hydrogen.h" +#include "src/isolate-inl.h" +#include "src/lithium.h" +#include "src/liveedit.h" +#include "src/parser.h" +#include "src/rewriter.h" +#include "src/runtime-profiler.h" +#include "src/scanner-character-streams.h" +#include "src/scopeinfo.h" +#include "src/scopes.h" +#include "src/typing.h" +#include "src/vm-state-inl.h" namespace v8 { namespace internal { +ScriptData::ScriptData(const byte* data, int length) + : owns_data_(false), data_(data), length_(length) { + if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) { + byte* copy = NewArray<byte>(length); + DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment)); + CopyBytes(copy, data, length); + data_ = copy; + AcquireDataOwnership(); + } +} + + CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone) : flags_(StrictModeField::encode(SLOPPY)), @@ -61,11 +51,26 @@ osr_ast_id_(BailoutId::None()), parameter_count_(0), this_has_uses_(true), - optimization_id_(-1) { + optimization_id_(-1), + ast_value_factory_(NULL), + ast_value_factory_owned_(false) { Initialize(script->GetIsolate(), BASE, zone); } +CompilationInfo::CompilationInfo(Isolate* isolate, Zone* zone) + : flags_(StrictModeField::encode(SLOPPY)), + script_(Handle<Script>::null()), + osr_ast_id_(BailoutId::None()), + parameter_count_(0), + this_has_uses_(true), + optimization_id_(-1), + ast_value_factory_(NULL), + ast_value_factory_owned_(false) { + Initialize(isolate, STUB, zone); +} + + CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone) : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)), @@ -74,7 +79,9 @@ osr_ast_id_(BailoutId::None()), parameter_count_(0), this_has_uses_(true), - optimization_id_(-1) { + optimization_id_(-1), + ast_value_factory_(NULL), + ast_value_factory_owned_(false) { Initialize(script_->GetIsolate(), BASE, zone); } @@ -89,7 +96,9 @@ osr_ast_id_(BailoutId::None()), parameter_count_(0), this_has_uses_(true), - optimization_id_(-1) { + optimization_id_(-1), + ast_value_factory_(NULL), + ast_value_factory_owned_(false) { Initialize(script_->GetIsolate(), BASE, zone); } @@ -101,7 +110,9 @@ osr_ast_id_(BailoutId::None()), parameter_count_(0), this_has_uses_(true), - optimization_id_(-1) { + optimization_id_(-1), + ast_value_factory_(NULL), + ast_value_factory_owned_(false) { Initialize(isolate, STUB, zone); code_stub_ = stub; } @@ -116,7 +127,7 @@ global_scope_ = NULL; extension_ = NULL; cached_data_ = NULL; - cached_data_mode_ = NO_CACHED_DATA; + compile_options_ = ScriptCompiler::kNoCompileOptions; zone_ = zone; deferred_handles_ = NULL; code_stub_ = NULL; @@ -133,25 +144,33 @@ } mode_ = mode; abort_due_to_dependency_ = false; - if (script_->type()->value() == Script::TYPE_NATIVE) { - MarkAsNative(); - } + if (script_->type()->value() == Script::TYPE_NATIVE) MarkAsNative(); + if (isolate_->debug()->is_active()) MarkAsDebug(); + if (!shared_info_.is_null()) { - ASSERT(strict_mode() == SLOPPY); + DCHECK(strict_mode() == SLOPPY); SetStrictMode(shared_info_->strict_mode()); } set_bailout_reason(kUnknown); + + if (!shared_info().is_null() && shared_info()->is_compiled()) { + // We should initialize the CompilationInfo feedback vector from the + // passed in shared info, rather than creating a new one. + feedback_vector_ = Handle<FixedArray>(shared_info()->feedback_vector(), + isolate); + } } CompilationInfo::~CompilationInfo() { delete deferred_handles_; delete no_frame_ranges_; + if (ast_value_factory_owned_) delete ast_value_factory_; #ifdef DEBUG // Check that no dependent maps have been added or added dependent maps have // been rolled back or committed. for (int i = 0; i < DependentCode::kGroupCount; i++) { - ASSERT_EQ(NULL, dependencies_[i]); + DCHECK_EQ(NULL, dependencies_[i]); } #endif // DEBUG } @@ -161,7 +180,7 @@ for (int i = 0; i < DependentCode::kGroupCount; i++) { ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i]; if (group_objects == NULL) continue; - ASSERT(!object_wrapper_.is_null()); + DCHECK(!object_wrapper_.is_null()); for (int j = 0; j < group_objects->length(); j++) { DependentCode::DependencyGroup group = static_cast<DependentCode::DependencyGroup>(i); @@ -193,7 +212,7 @@ int CompilationInfo::num_parameters() const { if (IsStub()) { - ASSERT(parameter_count_ > 0); + DCHECK(parameter_count_ > 0); return parameter_count_; } else { return scope()->num_parameters(); @@ -247,9 +266,15 @@ void CompilationInfo::PrepareForCompilation(Scope* scope) { - ASSERT(scope_ == NULL); + DCHECK(scope_ == NULL); scope_ = scope; - function()->ProcessFeedbackSlots(isolate_); + + int length = function()->slot_count(); + if (feedback_vector_.is_null()) { + // Allocate the feedback vector too. + feedback_vector_ = isolate()->factory()->NewTypeFeedbackVector(length); + } + DCHECK(feedback_vector_->length() == length); } @@ -289,43 +314,28 @@ }; -// Determine whether to use the full compiler for all code. If the flag -// --always-full-compiler is specified this is the case. For the virtual frame -// based compiler the full compiler is also used if a debugger is connected, as -// the code from the full compiler supports mode precise break points. For the -// crankshaft adaptive compiler debugging the optimized code is not possible at -// all. However crankshaft support recompilation of functions, so in this case -// the full compiler need not be be used if a debugger is attached, but only if -// break points has actually been set. -static bool IsDebuggerActive(Isolate* isolate) { -#ifdef ENABLE_DEBUGGER_SUPPORT - return isolate->use_crankshaft() ? - isolate->debug()->has_break_points() : - isolate->debugger()->IsDebuggerActive(); -#else - return false; -#endif -} - - OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() { - ASSERT(isolate()->use_crankshaft()); - ASSERT(info()->IsOptimizing()); - ASSERT(!info()->IsCompilingForDebugging()); + DCHECK(isolate()->use_crankshaft()); + DCHECK(info()->IsOptimizing()); + DCHECK(!info()->IsCompilingForDebugging()); // We should never arrive here if there is no code object on the // shared function object. - ASSERT(info()->shared_info()->code()->kind() == Code::FUNCTION); + DCHECK(info()->shared_info()->code()->kind() == Code::FUNCTION); // We should never arrive here if optimization has been disabled on the // shared function info. - ASSERT(!info()->shared_info()->optimization_disabled()); + DCHECK(!info()->shared_info()->optimization_disabled()); // Fall back to using the full code generator if it's not possible // to use the Hydrogen-based optimizing compiler. We already have // generated code for this from the shared function object. if (FLAG_always_full_compiler) return AbortOptimization(); - if (IsDebuggerActive(isolate())) return AbortOptimization(kDebuggerIsActive); + + // Do not use crankshaft if we need to be able to set break points. + if (isolate()->DebuggerHasBreakPoints()) { + return AbortOptimization(kDebuggerHasBreakPoints); + } // Limit the number of times we re-compile a functions with // the optimizing compiler. @@ -354,18 +364,23 @@ return AbortAndDisableOptimization(kTooManyParametersLocals); } - // Take --hydrogen-filter into account. + if (scope->HasIllegalRedeclaration()) { + return AbortAndDisableOptimization(kFunctionWithIllegalRedeclaration); + } + + // Check the whitelist for Crankshaft. if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) { return AbortOptimization(kHydrogenFilter); } + // Crankshaft requires a version of fullcode with deoptimization support. // Recompile the unoptimized version of the code if the current version - // doesn't have deoptimization support. Alternatively, we may decide to - // run the full code generator to get a baseline for the compile-time - // performance of the hydrogen-based compiler. + // doesn't have deoptimization support already. + // Otherwise, if we are gathering compilation time and space statistics + // for hydrogen, gather baseline statistics for a fullcode compilation. bool should_recompile = !info()->shared_info()->has_deoptimization_support(); if (should_recompile || FLAG_hydrogen_stats) { - ElapsedTimer timer; + base::ElapsedTimer timer; if (FLAG_hydrogen_stats) { timer.Start(); } @@ -390,13 +405,21 @@ } } - // Check that the unoptimized, shared code is ready for - // optimizations. When using the always_opt flag we disregard the - // optimizable marker in the code object and optimize anyway. This - // is safe as long as the unoptimized code has deoptimization - // support. - ASSERT(FLAG_always_opt || info()->shared_info()->code()->optimizable()); - ASSERT(info()->shared_info()->has_deoptimization_support()); + DCHECK(info()->shared_info()->has_deoptimization_support()); + + // Check the whitelist for TurboFan. + if (info()->closure()->PassesFilter(FLAG_turbo_filter) && + // TODO(turbofan): Make try-catch work and remove this bailout. + info()->function()->dont_optimize_reason() != kTryCatchStatement && + info()->function()->dont_optimize_reason() != kTryFinallyStatement && + // TODO(turbofan): Make OSR work and remove this bailout. + !info()->is_osr()) { + compiler::Pipeline pipeline(info()); + pipeline.GenerateCode(); + if (!info()->code().is_null()) { + return SetLastStatus(SUCCEEDED); + } + } if (FLAG_trace_hydrogen) { Handle<String> name = info()->function()->debug_name(); @@ -408,7 +431,7 @@ // Type-check the function. AstTyper::Run(info()); - graph_builder_ = FLAG_hydrogen_track_positions + graph_builder_ = (FLAG_hydrogen_track_positions || FLAG_trace_ic) ? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info()) : new(info()->zone()) HOptimizedGraphBuilder(info()); @@ -423,7 +446,7 @@ // The function being compiled may have bailed out due to an inline // candidate bailing out. In such a case, we don't disable // optimization on the shared_info. - ASSERT(!graph_builder_->inline_bailout() || graph_ == NULL); + DCHECK(!graph_builder_->inline_bailout() || graph_ == NULL); if (graph_ == NULL) { if (graph_builder_->inline_bailout()) { return AbortOptimization(); @@ -446,9 +469,14 @@ DisallowHandleDereference no_deref; DisallowCodeDependencyChange no_dependency_change; - ASSERT(last_status() == SUCCEEDED); + DCHECK(last_status() == SUCCEEDED); + // TODO(turbofan): Currently everything is done in the first phase. + if (!info()->code().is_null()) { + return last_status(); + } + Timer t(this, &time_taken_to_optimize_); - ASSERT(graph_ != NULL); + DCHECK(graph_ != NULL); BailoutReason bailout_reason = kNoReason; if (graph_->Optimize(&bailout_reason)) { @@ -463,13 +491,20 @@ OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() { - ASSERT(last_status() == SUCCEEDED); - ASSERT(!info()->HasAbortedDueToDependencyChange()); + DCHECK(last_status() == SUCCEEDED); + // TODO(turbofan): Currently everything is done in the first phase. + if (!info()->code().is_null()) { + RecordOptimizationStats(); + return last_status(); + } + + DCHECK(!info()->HasAbortedDueToDependencyChange()); DisallowCodeDependencyChange no_dependency_change; + DisallowJavascriptExecution no_js(isolate()); { // Scope for timer. Timer timer(this, &time_taken_to_codegen_); - ASSERT(chunk_ != NULL); - ASSERT(graph_ != NULL); + DCHECK(chunk_ != NULL); + DCHECK(graph_ != NULL); // Deferred handles reference objects that were accessible during // graph creation. To make sure that we don't encounter inconsistencies // between graph creation and code generation, we disallow accessing @@ -479,6 +514,20 @@ if (optimized_code.is_null()) { if (info()->bailout_reason() == kNoReason) { info_->set_bailout_reason(kCodeGenerationFailed); + } else if (info()->bailout_reason() == kMapBecameDeprecated) { + if (FLAG_trace_opt) { + PrintF("[aborted optimizing "); + info()->closure()->ShortPrint(); + PrintF(" because a map became deprecated]\n"); + } + return AbortOptimization(); + } else if (info()->bailout_reason() == kMapBecameUnstable) { + if (FLAG_trace_opt) { + PrintF("[aborted optimizing "); + info()->closure()->ShortPrint(); + PrintF(" because a map became unstable]\n"); + } + return AbortOptimization(); } return AbortAndDisableOptimization(); } @@ -531,9 +580,6 @@ // Sets the expected number of properties based on estimate from compiler. void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared, int estimate) { - // See the comment in SetExpectedNofProperties. - if (shared->live_objects_may_exist()) return; - // If no properties are added in the constructor, they are more likely // to be added later. if (estimate == 0) estimate = 2; @@ -541,7 +587,7 @@ // TODO(yangguo): check whether those heuristics are still up-to-date. // We do not shrink objects that go into a snapshot (yet), so we adjust // the estimate conservatively. - if (Serializer::enabled()) { + if (shared->GetIsolate()->serializer_enabled()) { estimate += 2; } else if (FLAG_clever_optimizations) { // Inobject slack tracking will reclaim redundant inobject space later, @@ -559,7 +605,7 @@ // Update the shared function info with the compiled code and the // scope info. Please note, that the order of the shared function // info initialization is important since set_scope_info might - // trigger a GC, causing the ASSERT below to be invalid if the code + // trigger a GC, causing the DCHECK below to be invalid if the code // was flushed. By setting the code object last we avoid this. Handle<SharedFunctionInfo> shared = info->shared_info(); Handle<ScopeInfo> scope_info = @@ -571,15 +617,16 @@ shared->ReplaceCode(*code); if (shared->optimization_disabled()) code->set_optimizable(false); + shared->set_feedback_vector(*info->feedback_vector()); + // Set the expected number of properties for instances. FunctionLiteral* lit = info->function(); int expected = lit->expected_property_count(); SetExpectedNofPropertiesFromEstimate(shared, expected); // Check the function has compiled code. - ASSERT(shared->is_compiled()); - shared->set_dont_optimize_reason(lit->dont_optimize_reason()); - shared->set_dont_inline(lit->flags()->Contains(kDontInline)); + DCHECK(shared->is_compiled()); + shared->set_bailout_reason(lit->dont_optimize_reason()); shared->set_ast_node_count(lit->ast_node_count()); shared->set_strict_mode(lit->strict_mode()); } @@ -611,18 +658,19 @@ function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters()); function_info->set_ast_node_count(lit->ast_node_count()); function_info->set_is_function(lit->is_function()); - function_info->set_dont_optimize_reason(lit->dont_optimize_reason()); - function_info->set_dont_inline(lit->flags()->Contains(kDontInline)); + function_info->set_bailout_reason(lit->dont_optimize_reason()); function_info->set_dont_cache(lit->flags()->Contains(kDontCache)); function_info->set_is_generator(lit->is_generator()); + function_info->set_is_arrow(lit->is_arrow()); } static bool CompileUnoptimizedCode(CompilationInfo* info) { - ASSERT(info->function() != NULL); + DCHECK(AllowCompilation::IsAllowed(info->isolate())); + DCHECK(info->function() != NULL); if (!Rewriter::Rewrite(info)) return false; if (!Scope::Analyze(info)) return false; - ASSERT(info->scope() != NULL); + DCHECK(info->scope() != NULL); if (!FullCodeGenerator::MakeCode(info)) { Isolate* isolate = info->isolate(); @@ -633,69 +681,74 @@ } -static Handle<Code> GetUnoptimizedCodeCommon(CompilationInfo* info) { +MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon( + CompilationInfo* info) { VMState<COMPILER> state(info->isolate()); PostponeInterruptsScope postpone(info->isolate()); - if (!Parser::Parse(info)) return Handle<Code>::null(); + if (!Parser::Parse(info)) return MaybeHandle<Code>(); info->SetStrictMode(info->function()->strict_mode()); - if (!CompileUnoptimizedCode(info)) return Handle<Code>::null(); + if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>(); Compiler::RecordFunctionCompilation( Logger::LAZY_COMPILE_TAG, info, info->shared_info()); UpdateSharedFunctionInfo(info); - ASSERT_EQ(Code::FUNCTION, info->code()->kind()); + DCHECK_EQ(Code::FUNCTION, info->code()->kind()); return info->code(); } -Handle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) { - ASSERT(!function->GetIsolate()->has_pending_exception()); - ASSERT(!function->is_compiled()); +MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) { + DCHECK(!function->GetIsolate()->has_pending_exception()); + DCHECK(!function->is_compiled()); if (function->shared()->is_compiled()) { return Handle<Code>(function->shared()->code()); } CompilationInfoWithZone info(function); - Handle<Code> result = GetUnoptimizedCodeCommon(&info); - ASSERT_EQ(result.is_null(), info.isolate()->has_pending_exception()); + Handle<Code> result; + ASSIGN_RETURN_ON_EXCEPTION(info.isolate(), result, + GetUnoptimizedCodeCommon(&info), + Code); if (FLAG_always_opt && - !result.is_null() && info.isolate()->use_crankshaft() && !info.shared_info()->optimization_disabled() && !info.isolate()->DebuggerHasBreakPoints()) { - Handle<Code> opt_code = Compiler::GetOptimizedCode( - function, result, Compiler::NOT_CONCURRENT); - if (!opt_code.is_null()) result = opt_code; + Handle<Code> opt_code; + if (Compiler::GetOptimizedCode( + function, result, + Compiler::NOT_CONCURRENT).ToHandle(&opt_code)) { + result = opt_code; + } } return result; } -Handle<Code> Compiler::GetUnoptimizedCode(Handle<SharedFunctionInfo> shared) { - ASSERT(!shared->GetIsolate()->has_pending_exception()); - ASSERT(!shared->is_compiled()); +MaybeHandle<Code> Compiler::GetUnoptimizedCode( + Handle<SharedFunctionInfo> shared) { + DCHECK(!shared->GetIsolate()->has_pending_exception()); + DCHECK(!shared->is_compiled()); CompilationInfoWithZone info(shared); - Handle<Code> result = GetUnoptimizedCodeCommon(&info); - ASSERT_EQ(result.is_null(), info.isolate()->has_pending_exception()); - return result; + return GetUnoptimizedCodeCommon(&info); } bool Compiler::EnsureCompiled(Handle<JSFunction> function, ClearExceptionFlag flag) { if (function->is_compiled()) return true; - Handle<Code> code = Compiler::GetUnoptimizedCode(function); - if (code.is_null()) { + MaybeHandle<Code> maybe_code = Compiler::GetUnoptimizedCode(function); + Handle<Code> code; + if (!maybe_code.ToHandle(&code)) { if (flag == CLEAR_EXCEPTION) { function->GetIsolate()->clear_pending_exception(); } return false; } function->ReplaceCode(*code); - ASSERT(function->is_compiled()); + DCHECK(function->is_compiled()); return true; } @@ -709,15 +762,17 @@ // full code without debug break slots to full code with debug break slots // depends on the generated code is otherwise exactly the same. // If compilation fails, just keep the existing code. -Handle<Code> Compiler::GetCodeForDebugging(Handle<JSFunction> function) { +MaybeHandle<Code> Compiler::GetCodeForDebugging(Handle<JSFunction> function) { CompilationInfoWithZone info(function); Isolate* isolate = info.isolate(); VMState<COMPILER> state(isolate); - ASSERT(!isolate->has_pending_exception()); + info.MarkAsDebug(); + + DCHECK(!isolate->has_pending_exception()); Handle<Code> old_code(function->shared()->code()); - ASSERT(old_code->kind() == Code::FUNCTION); - ASSERT(!old_code->has_debug_break_slots()); + DCHECK(old_code->kind() == Code::FUNCTION); + DCHECK(!old_code->has_debug_break_slots()); info.MarkCompilingForDebugging(); if (old_code->is_compiled_optimizable()) { @@ -725,18 +780,18 @@ } else { info.MarkNonOptimizable(); } - Handle<Code> new_code = GetUnoptimizedCodeCommon(&info); - if (new_code.is_null()) { + MaybeHandle<Code> maybe_new_code = GetUnoptimizedCodeCommon(&info); + Handle<Code> new_code; + if (!maybe_new_code.ToHandle(&new_code)) { isolate->clear_pending_exception(); } else { - ASSERT_EQ(old_code->is_compiled_optimizable(), + DCHECK_EQ(old_code->is_compiled_optimizable(), new_code->is_compiled_optimizable()); } - return new_code; + return maybe_new_code; } -#ifdef ENABLE_DEBUGGER_SUPPORT void Compiler::CompileForLiveEdit(Handle<Script> script) { // TODO(635): support extensions. CompilationInfoWithZone info(script); @@ -756,7 +811,6 @@ } tracker.RecordRootFunctionInfo(info.code()); } -#endif static bool DebuggerWantsEagerCompilation(CompilationInfo* info, @@ -769,32 +823,32 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) { Isolate* isolate = info->isolate(); PostponeInterruptsScope postpone(isolate); - ASSERT(!isolate->native_context().is_null()); + DCHECK(!isolate->native_context().is_null()); Handle<Script> script = info->script(); // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile? FixedArray* array = isolate->native_context()->embedder_data(); script->set_context_data(array->get(0)); -#ifdef ENABLE_DEBUGGER_SUPPORT - isolate->debugger()->OnBeforeCompile(script); -#endif + isolate->debug()->OnBeforeCompile(script); - ASSERT(info->is_eval() || info->is_global()); + DCHECK(info->is_eval() || info->is_global()); bool parse_allow_lazy = - (info->cached_data_mode() == CONSUME_CACHED_DATA || + (info->compile_options() == ScriptCompiler::kConsumeParserCache || String::cast(script->source())->length() > FLAG_min_preparse_length) && !DebuggerWantsEagerCompilation(info); - if (!parse_allow_lazy && info->cached_data_mode() != NO_CACHED_DATA) { + if (!parse_allow_lazy && + (info->compile_options() == ScriptCompiler::kProduceParserCache || + info->compile_options() == ScriptCompiler::kConsumeParserCache)) { // We are going to parse eagerly, but we either 1) have cached data produced // by lazy parsing or 2) are asked to generate cached data. We cannot use // the existing data, since it won't contain all the symbols we need for // eager parsing. In addition, it doesn't make sense to produce the data // when parsing eagerly. That data would contain all symbols, but no // functions, so it cannot be used to aid lazy parsing later. - info->SetCachedData(NULL, NO_CACHED_DATA); + info->SetCachedData(NULL, ScriptCompiler::kNoCompileOptions); } Handle<SharedFunctionInfo> result; @@ -821,15 +875,14 @@ } // Allocate function. - ASSERT(!info->code().is_null()); + DCHECK(!info->code().is_null()); result = isolate->factory()->NewSharedFunctionInfo( - lit->name(), - lit->materialized_literal_count(), - lit->is_generator(), - info->code(), - ScopeInfo::Create(info->scope(), info->zone())); + lit->name(), lit->materialized_literal_count(), lit->is_generator(), + lit->is_arrow(), info->code(), + ScopeInfo::Create(info->scope(), info->zone()), + info->feedback_vector()); - ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); + DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position()); SetFunctionInfo(result, lit, true, script); Handle<String> script_name = script->name()->IsString() @@ -854,29 +907,30 @@ live_edit_tracker.RecordFunctionInfo(result, lit, info->zone()); } -#ifdef ENABLE_DEBUGGER_SUPPORT - isolate->debugger()->OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS); -#endif + isolate->debug()->OnAfterCompile(script); return result; } -Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source, - Handle<Context> context, - StrictMode strict_mode, - ParseRestriction restriction, - int scope_position) { +MaybeHandle<JSFunction> Compiler::GetFunctionFromEval( + Handle<String> source, + Handle<Context> context, + StrictMode strict_mode, + ParseRestriction restriction, + int scope_position) { Isolate* isolate = source->GetIsolate(); int source_length = source->length(); isolate->counters()->total_eval_size()->Increment(source_length); isolate->counters()->total_compile_size()->Increment(source_length); CompilationCache* compilation_cache = isolate->compilation_cache(); - Handle<SharedFunctionInfo> shared_info = compilation_cache->LookupEval( - source, context, strict_mode, scope_position); + MaybeHandle<SharedFunctionInfo> maybe_shared_info = + compilation_cache->LookupEval(source, context, strict_mode, + scope_position); + Handle<SharedFunctionInfo> shared_info; - if (shared_info.is_null()) { + if (!maybe_shared_info.ToHandle(&shared_info)) { Handle<Script> script = isolate->factory()->NewScript(source); CompilationInfoWithZone info(script); info.MarkAsEval(); @@ -885,21 +939,19 @@ info.SetParseRestriction(restriction); info.SetContext(context); -#if ENABLE_DEBUGGER_SUPPORT Debug::RecordEvalCaller(script); -#endif // ENABLE_DEBUGGER_SUPPORT shared_info = CompileToplevel(&info); if (shared_info.is_null()) { - return Handle<JSFunction>::null(); + return MaybeHandle<JSFunction>(); } else { // Explicitly disable optimization for eval code. We're not yet prepared // to handle eval-code in the optimizing compiler. shared_info->DisableOptimization(kEval); // If caller is strict mode, the result must be in strict mode as well. - ASSERT(strict_mode == SLOPPY || shared_info->strict_mode() == STRICT); + DCHECK(strict_mode == SLOPPY || shared_info->strict_mode() == STRICT); if (!shared_info->dont_cache()) { compilation_cache->PutEval( source, context, shared_info, scope_position); @@ -915,23 +967,21 @@ Handle<SharedFunctionInfo> Compiler::CompileScript( - Handle<String> source, - Handle<Object> script_name, - int line_offset, - int column_offset, - bool is_shared_cross_origin, - Handle<Context> context, - v8::Extension* extension, - ScriptDataImpl** cached_data, - CachedDataMode cached_data_mode, - NativesFlag natives) { - if (cached_data_mode == NO_CACHED_DATA) { + Handle<String> source, Handle<Object> script_name, int line_offset, + int column_offset, bool is_shared_cross_origin, Handle<Context> context, + v8::Extension* extension, ScriptData** cached_data, + ScriptCompiler::CompileOptions compile_options, NativesFlag natives) { + if (compile_options == ScriptCompiler::kNoCompileOptions) { cached_data = NULL; - } else if (cached_data_mode == PRODUCE_CACHED_DATA) { - ASSERT(cached_data && !*cached_data); + } else if (compile_options == ScriptCompiler::kProduceParserCache || + compile_options == ScriptCompiler::kProduceCodeCache) { + DCHECK(cached_data && !*cached_data); + DCHECK(extension == NULL); } else { - ASSERT(cached_data_mode == CONSUME_CACHED_DATA); - ASSERT(cached_data && *cached_data); + DCHECK(compile_options == ScriptCompiler::kConsumeParserCache || + compile_options == ScriptCompiler::kConsumeCodeCache); + DCHECK(cached_data && *cached_data); + DCHECK(extension == NULL); } Isolate* isolate = source->GetIsolate(); int source_length = source->length(); @@ -941,25 +991,28 @@ CompilationCache* compilation_cache = isolate->compilation_cache(); // Do a lookup in the compilation cache but not for extensions. + MaybeHandle<SharedFunctionInfo> maybe_result; Handle<SharedFunctionInfo> result; if (extension == NULL) { - result = compilation_cache->LookupScript(source, - script_name, - line_offset, - column_offset, - is_shared_cross_origin, - context); - } - - if (result.is_null()) { - // No cache entry found. Do pre-parsing, if it makes sense, and compile - // the script. - // Building preparse data that is only used immediately after is only a - // saving if we might skip building the AST for lazily compiled functions. - // I.e., preparse data isn't relevant when the lazy flag is off, and - // for small sources, odds are that there aren't many functions - // that would be compiled lazily anyway, so we skip the preparse step - // in that case too. + if (FLAG_serialize_toplevel && + compile_options == ScriptCompiler::kConsumeCodeCache && + !isolate->debug()->is_loaded()) { + return CodeSerializer::Deserialize(isolate, *cached_data, source); + } else { + maybe_result = compilation_cache->LookupScript( + source, script_name, line_offset, column_offset, + is_shared_cross_origin, context); + } + } + + base::ElapsedTimer timer; + if (FLAG_profile_deserialization && FLAG_serialize_toplevel && + compile_options == ScriptCompiler::kProduceCodeCache) { + timer.Start(); + } + + if (!maybe_result.ToHandle(&result)) { + // No cache entry found. Compile the script. // Create a script object describing the script to be compiled. Handle<Script> script = isolate->factory()->NewScript(source); @@ -976,30 +1029,45 @@ // Compile the function and add it to the cache. CompilationInfoWithZone info(script); info.MarkAsGlobal(); + info.SetCachedData(cached_data, compile_options); info.SetExtension(extension); - info.SetCachedData(cached_data, cached_data_mode); info.SetContext(context); + if (FLAG_serialize_toplevel && + compile_options == ScriptCompiler::kProduceCodeCache) { + info.PrepareForSerializing(); + } if (FLAG_use_strict) info.SetStrictMode(STRICT); + result = CompileToplevel(&info); if (extension == NULL && !result.is_null() && !result->dont_cache()) { compilation_cache->PutScript(source, context, result); + if (FLAG_serialize_toplevel && + compile_options == ScriptCompiler::kProduceCodeCache) { + *cached_data = CodeSerializer::Serialize(isolate, result, source); + if (FLAG_profile_deserialization) { + PrintF("[Compiling and serializing %d bytes took %0.3f ms]\n", + (*cached_data)->length(), timer.Elapsed().InMillisecondsF()); + } + } } + + if (result.is_null()) isolate->ReportPendingMessages(); } else if (result->ic_age() != isolate->heap()->global_ic_age()) { - result->ResetForNewContext(isolate->heap()->global_ic_age()); + result->ResetForNewContext(isolate->heap()->global_ic_age()); } - - if (result.is_null()) isolate->ReportPendingMessages(); return result; } -Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal, - Handle<Script> script) { +Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo( + FunctionLiteral* literal, Handle<Script> script, + CompilationInfo* outer_info) { // Precondition: code has been parsed and scopes have been analyzed. CompilationInfoWithZone info(script); info.SetFunction(literal); info.PrepareForCompilation(literal->scope()); info.SetStrictMode(literal->scope()->strict_mode()); + if (outer_info->will_serialize()) info.PrepareForSerializing(); Isolate* isolate = info.isolate(); Factory* factory = isolate->factory(); @@ -1024,19 +1092,17 @@ info.SetCode(code); scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate)); } else if (FullCodeGenerator::MakeCode(&info)) { - ASSERT(!info.code().is_null()); + DCHECK(!info.code().is_null()); scope_info = ScopeInfo::Create(info.scope(), info.zone()); } else { return Handle<SharedFunctionInfo>::null(); } // Create a shared function info object. - Handle<SharedFunctionInfo> result = - factory->NewSharedFunctionInfo(literal->name(), - literal->materialized_literal_count(), - literal->is_generator(), - info.code(), - scope_info); + Handle<SharedFunctionInfo> result = factory->NewSharedFunctionInfo( + literal->name(), literal->materialized_literal_count(), + literal->is_generator(), literal->is_arrow(), info.code(), scope_info, + info.feedback_vector()); SetFunctionInfo(result, literal, false, script); RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result); result->set_allows_lazy_compilation(allow_lazy); @@ -1051,10 +1117,13 @@ } -static Handle<Code> GetCodeFromOptimizedCodeMap(Handle<JSFunction> function, - BailoutId osr_ast_id) { +MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap( + Handle<JSFunction> function, + BailoutId osr_ast_id) { if (FLAG_cache_optimized_code) { Handle<SharedFunctionInfo> shared(function->shared()); + // Bound functions are not cached. + if (shared->bound()) return MaybeHandle<Code>(); DisallowHeapAllocation no_gc; int index = shared->SearchOptimizedCodeMap( function->context()->native_context(), osr_ast_id); @@ -1072,7 +1141,7 @@ return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index)); } } - return Handle<Code>::null(); + return MaybeHandle<Code>(); } @@ -1080,10 +1149,15 @@ Handle<Code> code = info->code(); if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do. + // Context specialization folds-in the context, so no sharing can occur. + if (code->is_turbofanned() && FLAG_context_specialization) return; + // Cache optimized code. if (FLAG_cache_optimized_code) { Handle<JSFunction> function = info->closure(); Handle<SharedFunctionInfo> shared(function->shared()); + // Do not cache bound functions. + if (shared->bound()) return; Handle<FixedArray> literals(function->literals()); Handle<Context> native_context(function->context()->native_context()); SharedFunctionInfo::AddToOptimizedCodeMap( @@ -1098,7 +1172,7 @@ if (!Rewriter::Rewrite(info)) return false; if (!Scope::Analyze(info)) return false; - ASSERT(info->scope() != NULL); + DCHECK(info->scope() != NULL); return true; } @@ -1106,8 +1180,7 @@ static bool GetOptimizedCodeNow(CompilationInfo* info) { if (!CompileOptimizedPrologue(info)) return false; - Logger::TimerEventScope timer( - info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous); + TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate()); OptimizedCompileJob job(info); if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED) return false; @@ -1115,7 +1188,7 @@ if (job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) return false; // Success! - ASSERT(!info->isolate()->has_pending_exception()); + DCHECK(!info->isolate()->has_pending_exception()); InsertCodeIntoOptimizedCodeMap(info); Compiler::RecordFunctionCompilation( Logger::LAZY_COMPILE_TAG, info, info->shared_info()); @@ -1138,8 +1211,7 @@ if (!CompileOptimizedPrologue(info)) return false; info->SaveHandles(); // Copy handles to the compilation handle scope. - Logger::TimerEventScope timer( - isolate, Logger::TimerEventScope::v8_recompile_synchronous); + TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate()); OptimizedCompileJob* job = new(info->zone()) OptimizedCompileJob(info); OptimizedCompileJob::Status status = job->CreateGraph(); @@ -1159,21 +1231,25 @@ } -Handle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function, - Handle<Code> current_code, - ConcurrencyMode mode, - BailoutId osr_ast_id) { - Handle<Code> cached_code = GetCodeFromOptimizedCodeMap(function, osr_ast_id); - if (!cached_code.is_null()) return cached_code; +MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function, + Handle<Code> current_code, + ConcurrencyMode mode, + BailoutId osr_ast_id) { + Handle<Code> cached_code; + if (GetCodeFromOptimizedCodeMap( + function, osr_ast_id).ToHandle(&cached_code)) { + return cached_code; + } SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(function)); Isolate* isolate = info->isolate(); + DCHECK(AllowCompilation::IsAllowed(isolate)); VMState<COMPILER> state(isolate); - ASSERT(!isolate->has_pending_exception()); + DCHECK(!isolate->has_pending_exception()); PostponeInterruptsScope postpone(isolate); Handle<SharedFunctionInfo> shared = info->shared_info(); - ASSERT_NE(ScopeInfo::Empty(isolate), shared->scope_info()); + DCHECK_NE(ScopeInfo::Empty(isolate), shared->scope_info()); int compiled_size = shared->end_position() - shared->start_position(); isolate->counters()->total_compile_size()->Increment(compiled_size); current_code->set_profiler_ticks(0); @@ -1197,7 +1273,7 @@ } if (isolate->has_pending_exception()) isolate->clear_pending_exception(); - return Handle<Code>::null(); + return MaybeHandle<Code>(); } @@ -1208,8 +1284,7 @@ Isolate* isolate = info->isolate(); VMState<COMPILER> state(isolate); - Logger::TimerEventScope timer( - isolate, Logger::TimerEventScope::v8_recompile_synchronous); + TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate()); Handle<SharedFunctionInfo> shared = info->shared_info(); shared->code()->set_profiler_ticks(0); @@ -1261,12 +1336,13 @@ info->isolate()->cpu_profiler()->is_profiling()) { Handle<Script> script = info->script(); Handle<Code> code = info->code(); - if (code.is_identical_to(info->isolate()->builtins()->CompileUnoptimized())) + if (code.is_identical_to( + info->isolate()->builtins()->CompileUnoptimized())) { return; - int line_num = GetScriptLineNumber(script, shared->start_position()) + 1; + } + int line_num = Script::GetLineNumber(script, shared->start_position()) + 1; int column_num = - GetScriptColumnNumber(script, shared->start_position()) + 1; - USE(line_num); + Script::GetColumnNumber(script, shared->start_position()) + 1; String* script_name = script->name()->IsString() ? String::cast(script->name()) : info->isolate()->heap()->empty_string(); @@ -1309,7 +1385,7 @@ : (FLAG_trace_hydrogen && info()->closure()->PassesFilter(FLAG_trace_hydrogen_filter)); return (tracing_on && - OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL); + base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL); } } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/compiler.h nodejs-0.11.15/deps/v8/src/compiler.h --- nodejs-0.11.13/deps/v8/src/compiler.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_COMPILER_H_ #define V8_COMPILER_H_ -#include "allocation.h" -#include "ast.h" -#include "zone.h" +#include "src/allocation.h" +#include "src/ast.h" +#include "src/zone.h" namespace v8 { namespace internal { -class ScriptDataImpl; +class AstValueFactory; class HydrogenCodeStub; // ParseRestriction is used to restrict the set of valid statements in a @@ -45,23 +22,48 @@ ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression. }; -enum CachedDataMode { - NO_CACHED_DATA, - CONSUME_CACHED_DATA, - PRODUCE_CACHED_DATA -}; - struct OffsetRange { OffsetRange(int from, int to) : from(from), to(to) {} int from; int to; }; + +class ScriptData { + public: + ScriptData(const byte* data, int length); + ~ScriptData() { + if (owns_data_) DeleteArray(data_); + } + + const byte* data() const { return data_; } + int length() const { return length_; } + + void AcquireDataOwnership() { + DCHECK(!owns_data_); + owns_data_ = true; + } + + void ReleaseDataOwnership() { + DCHECK(owns_data_); + owns_data_ = false; + } + + private: + bool owns_data_; + const byte* data_; + int length_; + + DISALLOW_COPY_AND_ASSIGN(ScriptData); +}; + + // CompilationInfo encapsulates some information known at compile time. It // is constructed based on the resources available at compile-time. class CompilationInfo { public: CompilationInfo(Handle<JSFunction> closure, Zone* zone); + CompilationInfo(Isolate* isolate, Zone* zone); virtual ~CompilationInfo(); Isolate* isolate() const { @@ -73,7 +75,6 @@ bool is_eval() const { return IsEval::decode(flags_); } bool is_global() const { return IsGlobal::decode(flags_); } StrictMode strict_mode() const { return StrictModeField::decode(flags_); } - bool is_in_loop() const { return IsInLoop::decode(flags_); } FunctionLiteral* function() const { return function_; } Scope* scope() const { return scope_; } Scope* global_scope() const { return global_scope_; } @@ -83,9 +84,9 @@ Handle<Script> script() const { return script_; } HydrogenCodeStub* code_stub() const {return code_stub_; } v8::Extension* extension() const { return extension_; } - ScriptDataImpl** cached_data() const { return cached_data_; } - CachedDataMode cached_data_mode() const { - return cached_data_mode_; + ScriptData** cached_data() const { return cached_data_; } + ScriptCompiler::CompileOptions compile_options() const { + return compile_options_; } Handle<Context> context() const { return context_; } BailoutId osr_ast_id() const { return osr_ast_id_; } @@ -96,32 +97,33 @@ Code::Flags flags() const; void MarkAsEval() { - ASSERT(!is_lazy()); + DCHECK(!is_lazy()); flags_ |= IsEval::encode(true); } + void MarkAsGlobal() { - ASSERT(!is_lazy()); + DCHECK(!is_lazy()); flags_ |= IsGlobal::encode(true); } + void set_parameter_count(int parameter_count) { - ASSERT(IsStub()); + DCHECK(IsStub()); parameter_count_ = parameter_count; } void set_this_has_uses(bool has_no_uses) { this_has_uses_ = has_no_uses; } + bool this_has_uses() { return this_has_uses_; } + void SetStrictMode(StrictMode strict_mode) { - ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode); + DCHECK(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode); flags_ = StrictModeField::update(flags_, strict_mode); } - void MarkAsInLoop() { - ASSERT(is_lazy()); - flags_ |= IsInLoop::encode(true); - } + void MarkAsNative() { flags_ |= IsNative::encode(true); } @@ -166,6 +168,33 @@ return RequiresFrame::decode(flags_); } + void MarkMustNotHaveEagerFrame() { + flags_ |= MustNotHaveEagerFrame::encode(true); + } + + bool GetMustNotHaveEagerFrame() const { + return MustNotHaveEagerFrame::decode(flags_); + } + + void MarkAsDebug() { + flags_ |= IsDebug::encode(true); + } + + bool is_debug() const { + return IsDebug::decode(flags_); + } + + void PrepareForSerializing() { + flags_ |= PrepareForSerializing::encode(true); + } + + bool will_serialize() const { return PrepareForSerializing::decode(flags_); } + + bool IsCodePreAgingActive() const { + return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() && + !is_debug(); + } + void SetParseRestriction(ParseRestriction restriction) { flags_ = ParseRestricitonField::update(flags_, restriction); } @@ -175,27 +204,29 @@ } void SetFunction(FunctionLiteral* literal) { - ASSERT(function_ == NULL); + DCHECK(function_ == NULL); function_ = literal; } - // When the scope is applied, we may have deferred work to do on the function. void PrepareForCompilation(Scope* scope); void SetGlobalScope(Scope* global_scope) { - ASSERT(global_scope_ == NULL); + DCHECK(global_scope_ == NULL); global_scope_ = global_scope; } + Handle<FixedArray> feedback_vector() const { + return feedback_vector_; + } void SetCode(Handle<Code> code) { code_ = code; } void SetExtension(v8::Extension* extension) { - ASSERT(!is_lazy()); + DCHECK(!is_lazy()); extension_ = extension; } - void SetCachedData(ScriptDataImpl** cached_data, - CachedDataMode cached_data_mode) { - cached_data_mode_ = cached_data_mode; - if (cached_data_mode == NO_CACHED_DATA) { + void SetCachedData(ScriptData** cached_data, + ScriptCompiler::CompileOptions compile_options) { + compile_options_ = compile_options; + if (compile_options == ScriptCompiler::kNoCompileOptions) { cached_data_ = NULL; } else { - ASSERT(!is_lazy()); + DCHECK(!is_lazy()); cached_data_ = cached_data; } } @@ -232,7 +263,7 @@ bool IsOptimizable() const { return mode_ == BASE; } bool IsStub() const { return mode_ == STUB; } void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) { - ASSERT(!shared_info_.is_null()); + DCHECK(!shared_info_.is_null()); SetMode(OPTIMIZE); osr_ast_id_ = osr_ast_id; unoptimized_code_ = unoptimized; @@ -245,7 +276,7 @@ return SupportsDeoptimization::decode(flags_); } void EnableDeoptimizationSupport() { - ASSERT(IsOptimizable()); + DCHECK(IsOptimizable()); flags_ |= SupportsDeoptimization::encode(true); } @@ -253,7 +284,7 @@ bool ShouldSelfOptimize(); void set_deferred_handles(DeferredHandles* deferred_handles) { - ASSERT(deferred_handles_ == NULL); + DCHECK(deferred_handles_ == NULL); deferred_handles_ = deferred_handles; } @@ -281,12 +312,12 @@ void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; } int prologue_offset() const { - ASSERT_NE(Code::kPrologueOffsetNotSet, prologue_offset_); + DCHECK_NE(Code::kPrologueOffsetNotSet, prologue_offset_); return prologue_offset_; } void set_prologue_offset(int prologue_offset) { - ASSERT_EQ(Code::kPrologueOffsetNotSet, prologue_offset_); + DCHECK_EQ(Code::kPrologueOffsetNotSet, prologue_offset_); prologue_offset_ = prologue_offset; } @@ -312,12 +343,12 @@ } void AbortDueToDependencyChange() { - ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate())); + DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate())); abort_due_to_dependency_ = true; } bool HasAbortedDueToDependencyChange() { - ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate())); + DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate())); return abort_due_to_dependency_; } @@ -327,6 +358,13 @@ int optimization_id() const { return optimization_id_; } + AstValueFactory* ast_value_factory() const { return ast_value_factory_; } + void SetAstValueFactory(AstValueFactory* ast_value_factory, + bool owned = true) { + ast_value_factory_ = ast_value_factory; + ast_value_factory_owned_ = owned; + } + protected: CompilationInfo(Handle<Script> script, Zone* zone); @@ -354,7 +392,6 @@ void Initialize(Isolate* isolate, Mode mode, Zone* zone); void SetMode(Mode mode) { - ASSERT(isolate()->use_crankshaft()); mode_ = mode; } @@ -366,8 +403,8 @@ // Flags that can be set for eager compilation. class IsEval: public BitField<bool, 1, 1> {}; class IsGlobal: public BitField<bool, 2, 1> {}; - // Flags that can be set for lazy compilation. - class IsInLoop: public BitField<bool, 3, 1> {}; + // If the function is being compiled for the debugger. + class IsDebug: public BitField<bool, 3, 1> {}; // Strict mode - used in eager compilation. class StrictModeField: public BitField<StrictMode, 4, 1> {}; // Is this a function from our natives. @@ -389,6 +426,10 @@ class ParseRestricitonField: public BitField<ParseRestriction, 12, 1> {}; // If the function requires a frame (for unspecified reasons) class RequiresFrame: public BitField<bool, 13, 1> {}; + // If the function cannot build a frame (for unspecified reasons) + class MustNotHaveEagerFrame: public BitField<bool, 14, 1> {}; + // If we plan to serialize the compiled code. + class PrepareForSerializing : public BitField<bool, 15, 1> {}; unsigned flags_; @@ -412,13 +453,16 @@ // Fields possibly needed for eager compilation, NULL by default. v8::Extension* extension_; - ScriptDataImpl** cached_data_; - CachedDataMode cached_data_mode_; + ScriptData** cached_data_; + ScriptCompiler::CompileOptions compile_options_; // The context of the caller for eval code, and the global context for a // global script. Will be a null handle otherwise. Handle<Context> context_; + // Used by codegen, ultimately kept rooted by the SharedFunctionInfo. + Handle<FixedArray> feedback_vector_; + // Compilation mode flag and whether deoptimization is allowed. Mode mode_; BailoutId osr_ast_id_; @@ -465,6 +509,9 @@ int optimization_id_; + AstValueFactory* ast_value_factory_; + bool ast_value_factory_owned_; + DISALLOW_COPY_AND_ASSIGN(CompilationInfo); }; @@ -556,12 +603,14 @@ MUST_USE_RESULT Status AbortAndDisableOptimization( BailoutReason reason = kNoReason) { if (reason != kNoReason) info_->set_bailout_reason(reason); + // Reference to shared function info does not change between phases. + AllowDeferredHandleDereference allow_handle_dereference; info_->shared_info()->DisableOptimization(info_->bailout_reason()); return SetLastStatus(BAILED_OUT); } void WaitForInstall() { - ASSERT(info_->is_osr()); + DCHECK(info_->is_osr()); awaiting_install_ = true; } @@ -572,9 +621,9 @@ HOptimizedGraphBuilder* graph_builder_; HGraph* graph_; LChunk* chunk_; - TimeDelta time_taken_to_create_graph_; - TimeDelta time_taken_to_optimize_; - TimeDelta time_taken_to_codegen_; + base::TimeDelta time_taken_to_create_graph_; + base::TimeDelta time_taken_to_optimize_; + base::TimeDelta time_taken_to_codegen_; Status last_status_; bool awaiting_install_; @@ -585,9 +634,9 @@ void RecordOptimizationStats(); struct Timer { - Timer(OptimizedCompileJob* job, TimeDelta* location) + Timer(OptimizedCompileJob* job, base::TimeDelta* location) : job_(job), location_(location) { - ASSERT(location_ != NULL); + DCHECK(location_ != NULL); timer_.Start(); } @@ -596,8 +645,8 @@ } OptimizedCompileJob* job_; - ElapsedTimer timer_; - TimeDelta* location_; + base::ElapsedTimer timer_; + base::TimeDelta* location_; }; }; @@ -615,46 +664,44 @@ class Compiler : public AllStatic { public: - static Handle<Code> GetUnoptimizedCode(Handle<JSFunction> function); - static Handle<Code> GetUnoptimizedCode(Handle<SharedFunctionInfo> shared); + MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode( + Handle<JSFunction> function); + MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode( + Handle<SharedFunctionInfo> shared); static bool EnsureCompiled(Handle<JSFunction> function, ClearExceptionFlag flag); - static Handle<Code> GetCodeForDebugging(Handle<JSFunction> function); + MUST_USE_RESULT static MaybeHandle<Code> GetCodeForDebugging( + Handle<JSFunction> function); -#ifdef ENABLE_DEBUGGER_SUPPORT static void CompileForLiveEdit(Handle<Script> script); -#endif // Compile a String source within a context for eval. - static Handle<JSFunction> GetFunctionFromEval(Handle<String> source, - Handle<Context> context, - StrictMode strict_mode, - ParseRestriction restriction, - int scope_position); + MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval( + Handle<String> source, + Handle<Context> context, + StrictMode strict_mode, + ParseRestriction restriction, + int scope_position); // Compile a String source within a context. static Handle<SharedFunctionInfo> CompileScript( - Handle<String> source, - Handle<Object> script_name, - int line_offset, - int column_offset, - bool is_shared_cross_origin, - Handle<Context> context, - v8::Extension* extension, - ScriptDataImpl** cached_data, - CachedDataMode cached_data_mode, + Handle<String> source, Handle<Object> script_name, int line_offset, + int column_offset, bool is_shared_cross_origin, Handle<Context> context, + v8::Extension* extension, ScriptData** cached_data, + ScriptCompiler::CompileOptions compile_options, NativesFlag is_natives_code); // Create a shared function info object (the code may be lazily compiled). static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node, - Handle<Script> script); + Handle<Script> script, + CompilationInfo* outer); enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT }; // Generate and return optimized code or start a concurrent optimization job. // In the latter case, return the InOptimizationQueue builtin. On failure, // return the empty handle. - static Handle<Code> GetOptimizedCode( + MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCode( Handle<JSFunction> function, Handle<Code> current_code, ConcurrencyMode mode, @@ -688,12 +735,11 @@ CompilationInfo* info_; Zone zone_; unsigned info_zone_start_allocation_size_; - ElapsedTimer timer_; + base::ElapsedTimer timer_; DISALLOW_COPY_AND_ASSIGN(CompilationPhase); }; - } } // namespace v8::internal #endif // V8_COMPILER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/compiler-intrinsics.h nodejs-0.11.15/deps/v8/src/compiler-intrinsics.h --- nodejs-0.11.13/deps/v8/src/compiler-intrinsics.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/compiler-intrinsics.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,33 +1,12 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_COMPILER_INTRINSICS_H_ #define V8_COMPILER_INTRINSICS_H_ +#include "src/base/macros.h" + namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/contexts.cc nodejs-0.11.15/deps/v8/src/contexts.cc --- nodejs-0.11.13/deps/v8/src/contexts.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/contexts.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "bootstrapper.h" -#include "debug.h" -#include "scopeinfo.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/bootstrapper.h" +#include "src/debug.h" +#include "src/scopeinfo.h" namespace v8 { namespace internal { @@ -38,7 +15,7 @@ Context* current = this; while (!current->IsFunctionContext() && !current->IsNativeContext()) { current = current->previous(); - ASSERT(current->closure() == closure()); + DCHECK(current->closure() == closure()); } return current; } @@ -49,7 +26,7 @@ if (object->IsJSGlobalObject()) { return JSGlobalObject::cast(object)->builtins(); } else { - ASSERT(object->IsJSBuiltinsObject()); + DCHECK(object->IsJSBuiltinsObject()); return JSBuiltinsObject::cast(object); } } @@ -74,7 +51,7 @@ // During bootstrapping, the global object might not be set and we // have to search the context chain to find the native context. - ASSERT(this->GetIsolate()->bootstrapper()->IsActive()); + DCHECK(this->GetIsolate()->bootstrapper()->IsActive()); Context* current = this; while (!current->IsNativeContext()) { JSFunction* closure = JSFunction::cast(current->closure()); @@ -94,6 +71,38 @@ } +/** + * Lookups a property in an object environment, taking the unscopables into + * account. This is used For HasBinding spec algorithms for ObjectEnvironment. + */ +static Maybe<PropertyAttributes> UnscopableLookup(LookupIterator* it) { + Isolate* isolate = it->isolate(); + + Maybe<PropertyAttributes> attrs = JSReceiver::GetPropertyAttributes(it); + DCHECK(attrs.has_value || isolate->has_pending_exception()); + if (!attrs.has_value || attrs.value == ABSENT) return attrs; + + Handle<Symbol> unscopables_symbol( + isolate->native_context()->unscopables_symbol(), isolate); + Handle<Object> receiver = it->GetReceiver(); + Handle<Object> unscopables; + MaybeHandle<Object> maybe_unscopables = + Object::GetProperty(receiver, unscopables_symbol); + if (!maybe_unscopables.ToHandle(&unscopables)) { + return Maybe<PropertyAttributes>(); + } + if (!unscopables->IsSpecObject()) return attrs; + Maybe<bool> blacklist = JSReceiver::HasProperty( + Handle<JSReceiver>::cast(unscopables), it->name()); + if (!blacklist.has_value) { + DCHECK(isolate->has_pending_exception()); + return Maybe<PropertyAttributes>(); + } + if (blacklist.value) return maybe(ABSENT); + return attrs; +} + + Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags, int* index, @@ -129,15 +138,22 @@ // Context extension objects needs to behave as if they have no // prototype. So even if we want to follow prototype chains, we need // to only do a local lookup for context extension objects. + Maybe<PropertyAttributes> maybe; if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 || object->IsJSContextExtensionObject()) { - *attributes = JSReceiver::GetLocalPropertyAttribute(object, name); + maybe = JSReceiver::GetOwnPropertyAttributes(object, name); + } else if (context->IsWithContext()) { + LookupIterator it(object, name); + maybe = UnscopableLookup(&it); } else { - *attributes = JSReceiver::GetPropertyAttribute(object, name); + maybe = JSReceiver::GetPropertyAttributes(object, name); } - if (isolate->has_pending_exception()) return Handle<Object>(); - if (*attributes != ABSENT) { + if (!maybe.has_value) return Handle<Object>(); + DCHECK(!isolate->has_pending_exception()); + *attributes = maybe.value; + + if (maybe.value != ABSENT) { if (FLAG_trace_contexts) { PrintF("=> found property in context object %p\n", reinterpret_cast<void*>(*object)); @@ -160,8 +176,12 @@ } VariableMode mode; InitializationFlag init_flag; - int slot_index = scope_info->ContextSlotIndex(*name, &mode, &init_flag); - ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS); + // TODO(sigurds) Figure out whether maybe_assigned_flag should + // be used to compute binding_flags. + MaybeAssignedFlag maybe_assigned_flag; + int slot_index = ScopeInfo::ContextSlotIndex( + scope_info, name, &mode, &init_flag, &maybe_assigned_flag); + DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS); if (slot_index >= 0) { if (FLAG_trace_contexts) { PrintF("=> found local in context slot %d (mode = %d)\n", @@ -222,7 +242,7 @@ } *index = function_index; *attributes = READ_ONLY; - ASSERT(mode == CONST_LEGACY || mode == CONST); + DCHECK(mode == CONST_LEGACY || mode == CONST); *binding_flags = (mode == CONST_LEGACY) ? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY; return context; @@ -231,7 +251,7 @@ } else if (context->IsCatchContext()) { // Catch contexts have the variable name in the extension slot. - if (name->Equals(String::cast(context->extension()))) { + if (String::Equals(name, handle(String::cast(context->extension())))) { if (FLAG_trace_contexts) { PrintF("=> found in catch context\n"); } @@ -258,8 +278,8 @@ void Context::AddOptimizedFunction(JSFunction* function) { - ASSERT(IsNativeContext()); -#ifdef ENABLE_SLOW_ASSERTS + DCHECK(IsNativeContext()); +#ifdef ENABLE_SLOW_DCHECKS if (FLAG_enable_slow_asserts) { Object* element = get(OPTIMIZED_FUNCTIONS_LIST); while (!element->IsUndefined()) { @@ -288,7 +308,7 @@ flusher->EvictCandidate(function); } - ASSERT(function->next_function_link()->IsUndefined()); + DCHECK(function->next_function_link()->IsUndefined()); function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST)); set(OPTIMIZED_FUNCTIONS_LIST, function); @@ -296,12 +316,12 @@ void Context::RemoveOptimizedFunction(JSFunction* function) { - ASSERT(IsNativeContext()); + DCHECK(IsNativeContext()); Object* element = get(OPTIMIZED_FUNCTIONS_LIST); JSFunction* prev = NULL; while (!element->IsUndefined()) { JSFunction* element_function = JSFunction::cast(element); - ASSERT(element_function->next_function_link()->IsUndefined() || + DCHECK(element_function->next_function_link()->IsUndefined() || element_function->next_function_link()->IsJSFunction()); if (element_function == function) { if (prev == NULL) { @@ -320,56 +340,56 @@ void Context::SetOptimizedFunctionsListHead(Object* head) { - ASSERT(IsNativeContext()); + DCHECK(IsNativeContext()); set(OPTIMIZED_FUNCTIONS_LIST, head); } Object* Context::OptimizedFunctionsListHead() { - ASSERT(IsNativeContext()); + DCHECK(IsNativeContext()); return get(OPTIMIZED_FUNCTIONS_LIST); } void Context::AddOptimizedCode(Code* code) { - ASSERT(IsNativeContext()); - ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); - ASSERT(code->next_code_link()->IsUndefined()); + DCHECK(IsNativeContext()); + DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION); + DCHECK(code->next_code_link()->IsUndefined()); code->set_next_code_link(get(OPTIMIZED_CODE_LIST)); set(OPTIMIZED_CODE_LIST, code); } void Context::SetOptimizedCodeListHead(Object* head) { - ASSERT(IsNativeContext()); + DCHECK(IsNativeContext()); set(OPTIMIZED_CODE_LIST, head); } Object* Context::OptimizedCodeListHead() { - ASSERT(IsNativeContext()); + DCHECK(IsNativeContext()); return get(OPTIMIZED_CODE_LIST); } void Context::SetDeoptimizedCodeListHead(Object* head) { - ASSERT(IsNativeContext()); + DCHECK(IsNativeContext()); set(DEOPTIMIZED_CODE_LIST, head); } Object* Context::DeoptimizedCodeListHead() { - ASSERT(IsNativeContext()); + DCHECK(IsNativeContext()); return get(DEOPTIMIZED_CODE_LIST); } Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() { - Handle<Object> result(error_message_for_code_gen_from_strings(), - GetIsolate()); + Isolate* isolate = GetIsolate(); + Handle<Object> result(error_message_for_code_gen_from_strings(), isolate); if (!result->IsUndefined()) return result; - return GetIsolate()->factory()->NewStringFromOneByte(STATIC_ASCII_VECTOR( - "Code generation from strings disallowed for this context")); + return isolate->factory()->NewStringFromStaticAscii( + "Code generation from strings disallowed for this context"); } diff -Nru nodejs-0.11.13/deps/v8/src/contexts.h nodejs-0.11.15/deps/v8/src/contexts.h --- nodejs-0.11.13/deps/v8/src/contexts.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/contexts.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CONTEXTS_H_ #define V8_CONTEXTS_H_ -#include "heap.h" -#include "objects.h" +#include "src/heap/heap.h" +#include "src/objects.h" namespace v8 { namespace internal { @@ -96,102 +73,135 @@ // must always be allocated via Heap::AllocateContext() or // Factory::NewContext. -#define NATIVE_CONTEXT_FIELDS(V) \ - V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \ - V(SECURITY_TOKEN_INDEX, Object, security_token) \ - V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \ - V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \ - V(STRING_FUNCTION_INDEX, JSFunction, string_function) \ - V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \ - V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \ - V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \ - V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \ - V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \ - V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \ - V(DATE_FUNCTION_INDEX, JSFunction, date_function) \ - V(JSON_OBJECT_INDEX, JSObject, json_object) \ - V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \ - V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \ - V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \ - V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \ - V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \ - V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \ - V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \ - V(TO_OBJECT_FUN_INDEX, JSFunction, to_object_fun) \ - V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \ - V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \ - V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \ - V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \ - V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \ - V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \ - V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \ - V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \ - V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \ - V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \ - V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \ - V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \ - V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \ - V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \ - V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \ - V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \ - V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \ - V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \ - V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \ - V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \ - sloppy_function_without_prototype_map) \ - V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \ - strict_function_without_prototype_map) \ - V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\ - V(SLOPPY_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \ - sloppy_arguments_boilerplate) \ - V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \ - aliased_arguments_boilerplate) \ - V(STRICT_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \ - strict_arguments_boilerplate) \ - V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \ - V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \ - V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \ - V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \ - V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \ - V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \ - V(NORMALIZED_MAP_CACHE_INDEX, NormalizedMapCache, normalized_map_cache) \ - V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \ - V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \ - V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \ - call_as_constructor_delegate) \ - V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \ - V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \ - V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ - V(MAP_CACHE_INDEX, Object, map_cache) \ - V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \ - V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \ - V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \ - error_message_for_code_gen_from_strings) \ - V(RUN_MICROTASKS_INDEX, JSFunction, run_microtasks) \ - V(ENQUEUE_EXTERNAL_MICROTASK_INDEX, JSFunction, enqueue_external_microtask) \ - V(IS_PROMISE_INDEX, JSFunction, is_promise) \ - V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \ - V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \ - V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \ - V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \ - V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \ - V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \ - to_complete_property_descriptor) \ - V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \ - V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \ - V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \ - V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \ - V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \ - V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \ - V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, \ - observers_begin_perform_splice) \ - V(OBSERVERS_END_SPLICE_INDEX, JSFunction, \ - observers_end_perform_splice) \ - V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \ - V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map) \ - V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \ - generator_object_prototype_map) \ - V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map) +#define NATIVE_CONTEXT_FIELDS(V) \ + V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \ + V(SECURITY_TOKEN_INDEX, Object, security_token) \ + V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \ + V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \ + V(STRING_FUNCTION_INDEX, JSFunction, string_function) \ + V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \ + V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \ + V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \ + V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \ + V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \ + V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \ + V(DATE_FUNCTION_INDEX, JSFunction, date_function) \ + V(JSON_OBJECT_INDEX, JSObject, json_object) \ + V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \ + V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \ + V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \ + V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \ + V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \ + V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \ + V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \ + V(TO_OBJECT_FUN_INDEX, JSFunction, to_object_fun) \ + V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \ + V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \ + V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \ + V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \ + V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \ + V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \ + V(MATH_ABS_FUN_INDEX, JSFunction, math_abs_fun) \ + V(MATH_ACOS_FUN_INDEX, JSFunction, math_acos_fun) \ + V(MATH_ASIN_FUN_INDEX, JSFunction, math_asin_fun) \ + V(MATH_ATAN_FUN_INDEX, JSFunction, math_atan_fun) \ + V(MATH_ATAN2_FUN_INDEX, JSFunction, math_atan2_fun) \ + V(MATH_CEIL_FUN_INDEX, JSFunction, math_ceil_fun) \ + V(MATH_COS_FUN_INDEX, JSFunction, math_cos_fun) \ + V(MATH_EXP_FUN_INDEX, JSFunction, math_exp_fun) \ + V(MATH_FLOOR_FUN_INDEX, JSFunction, math_floor_fun) \ + V(MATH_IMUL_FUN_INDEX, JSFunction, math_imul_fun) \ + V(MATH_LOG_FUN_INDEX, JSFunction, math_log_fun) \ + V(MATH_MAX_FUN_INDEX, JSFunction, math_max_fun) \ + V(MATH_MIN_FUN_INDEX, JSFunction, math_min_fun) \ + V(MATH_POW_FUN_INDEX, JSFunction, math_pow_fun) \ + V(MATH_RANDOM_FUN_INDEX, JSFunction, math_random_fun) \ + V(MATH_ROUND_FUN_INDEX, JSFunction, math_round_fun) \ + V(MATH_SIN_FUN_INDEX, JSFunction, math_sin_fun) \ + V(MATH_SQRT_FUN_INDEX, JSFunction, math_sqrt_fun) \ + V(MATH_TAN_FUN_INDEX, JSFunction, math_tan_fun) \ + V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \ + V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \ + V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \ + V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \ + V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \ + V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \ + V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \ + V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \ + V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \ + V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \ + V(INT8_ARRAY_EXTERNAL_MAP_INDEX, Map, int8_array_external_map) \ + V(UINT8_ARRAY_EXTERNAL_MAP_INDEX, Map, uint8_array_external_map) \ + V(INT16_ARRAY_EXTERNAL_MAP_INDEX, Map, int16_array_external_map) \ + V(UINT16_ARRAY_EXTERNAL_MAP_INDEX, Map, uint16_array_external_map) \ + V(INT32_ARRAY_EXTERNAL_MAP_INDEX, Map, int32_array_external_map) \ + V(UINT32_ARRAY_EXTERNAL_MAP_INDEX, Map, uint32_array_external_map) \ + V(FLOAT32_ARRAY_EXTERNAL_MAP_INDEX, Map, float32_array_external_map) \ + V(FLOAT64_ARRAY_EXTERNAL_MAP_INDEX, Map, float64_array_external_map) \ + V(UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX, Map, \ + uint8_clamped_array_external_map) \ + V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \ + V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \ + V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \ + sloppy_function_with_readonly_prototype_map) \ + V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \ + V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \ + sloppy_function_without_prototype_map) \ + V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \ + strict_function_without_prototype_map) \ + V(BOUND_FUNCTION_MAP_INDEX, Map, bound_function_map) \ + V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \ + V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \ + V(ALIASED_ARGUMENTS_MAP_INDEX, Map, aliased_arguments_map) \ + V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \ + V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \ + V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \ + V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \ + V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \ + V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \ + V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \ + V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \ + V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \ + V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \ + V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \ + call_as_constructor_delegate) \ + V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \ + V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \ + V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ + V(MAP_CACHE_INDEX, Object, map_cache) \ + V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \ + V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \ + V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \ + error_message_for_code_gen_from_strings) \ + V(IS_PROMISE_INDEX, JSFunction, is_promise) \ + V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \ + V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \ + V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \ + V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \ + V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \ + V(PROMISE_THEN_INDEX, JSFunction, promise_then) \ + V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \ + to_complete_property_descriptor) \ + V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \ + V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \ + V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \ + V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \ + V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \ + V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \ + V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, observers_begin_perform_splice) \ + V(OBSERVERS_END_SPLICE_INDEX, JSFunction, observers_end_perform_splice) \ + V(NATIVE_OBJECT_OBSERVE_INDEX, JSFunction, native_object_observe) \ + V(NATIVE_OBJECT_GET_NOTIFIER_INDEX, JSFunction, native_object_get_notifier) \ + V(NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, JSFunction, \ + native_object_notifier_perform_change) \ + V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \ + V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map) \ + V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \ + V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \ + V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \ + V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \ + V(ITERATOR_SYMBOL_INDEX, Symbol, iterator_symbol) \ + V(UNSCOPABLES_SYMBOL_INDEX, Symbol, unscopables_symbol) // JSFunctions are pairs (context, function code), sometimes also called // closures. A Context object is used to represent function contexts and @@ -242,7 +252,7 @@ public: // Conversions. static Context* cast(Object* context) { - ASSERT(context->IsContext()); + DCHECK(context->IsContext()); return reinterpret_cast<Context*>(context); } @@ -265,14 +275,16 @@ // These slots are only in native contexts. GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS, SECURITY_TOKEN_INDEX, - SLOPPY_ARGUMENTS_BOILERPLATE_INDEX, - ALIASED_ARGUMENTS_BOILERPLATE_INDEX, - STRICT_ARGUMENTS_BOILERPLATE_INDEX, + SLOPPY_ARGUMENTS_MAP_INDEX, + ALIASED_ARGUMENTS_MAP_INDEX, + STRICT_ARGUMENTS_MAP_INDEX, REGEXP_RESULT_MAP_INDEX, SLOPPY_FUNCTION_MAP_INDEX, + SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, STRICT_FUNCTION_MAP_INDEX, SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, + BOUND_FUNCTION_MAP_INDEX, INITIAL_OBJECT_PROTOTYPE_INDEX, INITIAL_ARRAY_PROTOTYPE_INDEX, BOOLEAN_FUNCTION_INDEX, @@ -299,6 +311,25 @@ GLOBAL_EVAL_FUN_INDEX, INSTANTIATE_FUN_INDEX, CONFIGURE_INSTANCE_FUN_INDEX, + MATH_ABS_FUN_INDEX, + MATH_ACOS_FUN_INDEX, + MATH_ASIN_FUN_INDEX, + MATH_ATAN_FUN_INDEX, + MATH_ATAN2_FUN_INDEX, + MATH_CEIL_FUN_INDEX, + MATH_COS_FUN_INDEX, + MATH_EXP_FUN_INDEX, + MATH_FLOOR_FUN_INDEX, + MATH_IMUL_FUN_INDEX, + MATH_LOG_FUN_INDEX, + MATH_MAX_FUN_INDEX, + MATH_MIN_FUN_INDEX, + MATH_POW_FUN_INDEX, + MATH_RANDOM_FUN_INDEX, + MATH_ROUND_FUN_INDEX, + MATH_SIN_FUN_INDEX, + MATH_SQRT_FUN_INDEX, + MATH_TAN_FUN_INDEX, ARRAY_BUFFER_FUN_INDEX, UINT8_ARRAY_FUN_INDEX, INT8_ARRAY_FUN_INDEX, @@ -309,6 +340,15 @@ FLOAT32_ARRAY_FUN_INDEX, FLOAT64_ARRAY_FUN_INDEX, UINT8_CLAMPED_ARRAY_FUN_INDEX, + INT8_ARRAY_EXTERNAL_MAP_INDEX, + UINT8_ARRAY_EXTERNAL_MAP_INDEX, + INT16_ARRAY_EXTERNAL_MAP_INDEX, + UINT16_ARRAY_EXTERNAL_MAP_INDEX, + INT32_ARRAY_EXTERNAL_MAP_INDEX, + UINT32_ARRAY_EXTERNAL_MAP_INDEX, + FLOAT32_ARRAY_EXTERNAL_MAP_INDEX, + FLOAT64_ARRAY_EXTERNAL_MAP_INDEX, + UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX, DATA_VIEW_FUN_INDEX, MESSAGE_LISTENERS_INDEX, MAKE_MESSAGE_FUN_INDEX, @@ -328,13 +368,14 @@ ALLOW_CODE_GEN_FROM_STRINGS_INDEX, ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, RUN_MICROTASKS_INDEX, - ENQUEUE_EXTERNAL_MICROTASK_INDEX, + ENQUEUE_MICROTASK_INDEX, IS_PROMISE_INDEX, PROMISE_CREATE_INDEX, PROMISE_RESOLVE_INDEX, PROMISE_REJECT_INDEX, PROMISE_CHAIN_INDEX, PROMISE_CATCH_INDEX, + PROMISE_THEN_INDEX, TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, DERIVED_HAS_TRAP_INDEX, DERIVED_GET_TRAP_INDEX, @@ -344,10 +385,17 @@ OBSERVERS_ENQUEUE_SPLICE_INDEX, OBSERVERS_BEGIN_SPLICE_INDEX, OBSERVERS_END_SPLICE_INDEX, + NATIVE_OBJECT_OBSERVE_INDEX, + NATIVE_OBJECT_GET_NOTIFIER_INDEX, + NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, STRICT_GENERATOR_FUNCTION_MAP_INDEX, GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, - GENERATOR_RESULT_MAP_INDEX, + ITERATOR_RESULT_MAP_INDEX, + MAP_ITERATOR_MAP_INDEX, + SET_ITERATOR_MAP_INDEX, + ITERATOR_SYMBOL_INDEX, + UNSCOPABLES_SYMBOL_INDEX, // Properties from here are treated as weak references by the full GC. // Scavenge treats them as strong references. @@ -359,7 +407,6 @@ // Total number of slots. NATIVE_CONTEXT_SLOTS, - FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST }; @@ -369,7 +416,7 @@ Context* previous() { Object* result = unchecked_previous(); - ASSERT(IsBootstrappingOrValidParentContext(result, this)); + DCHECK(IsBootstrappingOrValidParentContext(result, this)); return reinterpret_cast<Context*>(result); } void set_previous(Context* context) { set(PREVIOUS_INDEX, context); } @@ -387,7 +434,7 @@ GlobalObject* global_object() { Object* result = get(GLOBAL_OBJECT_INDEX); - ASSERT(IsBootstrappingOrGlobalObject(this->GetIsolate(), result)); + DCHECK(IsBootstrappingOrGlobalObject(this->GetIsolate(), result)); return reinterpret_cast<GlobalObject*>(result); } void set_global_object(GlobalObject* object) { @@ -439,6 +486,11 @@ return map == map->GetHeap()->global_context_map(); } + bool HasSameSecurityTokenAs(Context* that) { + return this->global_object()->native_context()->security_token() == + that->global_object()->native_context()->security_token(); + } + // A native context holds a list of all functions with optimized code. void AddOptimizedFunction(JSFunction* function); void RemoveOptimizedFunction(JSFunction* function); @@ -457,15 +509,15 @@ #define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \ void set_##name(type* value) { \ - ASSERT(IsNativeContext()); \ + DCHECK(IsNativeContext()); \ set(index, value); \ } \ bool is_##name(type* value) { \ - ASSERT(IsNativeContext()); \ + DCHECK(IsNativeContext()); \ return type::cast(get(index)) == value; \ } \ type* name() { \ - ASSERT(IsNativeContext()); \ + DCHECK(IsNativeContext()); \ return type::cast(get(index)); \ } NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS) @@ -530,8 +582,8 @@ static bool IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object); #endif - STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize); - STATIC_CHECK(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex); + STATIC_ASSERT(kHeaderSize == Internals::kContextHeaderSize); + STATIC_ASSERT(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex); }; } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/conversions.cc nodejs-0.11.15/deps/v8/src/conversions.cc --- nodejs-0.11.13/deps/v8/src/conversions.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/conversions.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,21 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include <stdarg.h> #include <limits.h> +#include <stdarg.h> #include <cmath> -#include "conversions-inl.h" -#include "dtoa.h" -#include "list-inl.h" -#include "strtod.h" -#include "utils.h" +#include "src/v8.h" + +#include "src/assert-scope.h" +#include "src/conversions-inl.h" +#include "src/conversions.h" +#include "src/dtoa.h" +#include "src/factory.h" +#include "src/list-inl.h" +#include "src/strtod.h" +#include "src/utils.h" #ifndef _STLP_VENDOR_CSTD // STLPort doesn't import fpclassify into the std namespace. @@ -44,6 +26,47 @@ namespace internal { +namespace { + +// C++-style iterator adaptor for StringCharacterStream +// (unlike C++ iterators the end-marker has different type). +class StringCharacterStreamIterator { + public: + class EndMarker {}; + + explicit StringCharacterStreamIterator(StringCharacterStream* stream); + + uint16_t operator*() const; + void operator++(); + bool operator==(EndMarker const&) const { return end_; } + bool operator!=(EndMarker const& m) const { return !end_; } + + private: + StringCharacterStream* const stream_; + uint16_t current_; + bool end_; +}; + + +StringCharacterStreamIterator::StringCharacterStreamIterator( + StringCharacterStream* stream) : stream_(stream) { + ++(*this); +} + +uint16_t StringCharacterStreamIterator::operator*() const { + return current_; +} + + +void StringCharacterStreamIterator::operator++() { + end_ = !stream_->HasMore(); + if (!end_) { + current_ = stream_->GetNext(); + } +} +} // End anonymous namespace. + + double StringToDouble(UnicodeCache* unicode_cache, const char* str, int flags, double empty_string_val) { // We cast to const uint8_t* here to avoid instantiating the @@ -56,7 +79,7 @@ double StringToDouble(UnicodeCache* unicode_cache, - Vector<const char> str, + Vector<const uint8_t> str, int flags, double empty_string_val) { // We cast to const uint8_t* here to avoid instantiating the @@ -78,6 +101,23 @@ } +// Converts a string into an integer. +double StringToInt(UnicodeCache* unicode_cache, + Vector<const uint8_t> vector, + int radix) { + return InternalStringToInt( + unicode_cache, vector.start(), vector.start() + vector.length(), radix); +} + + +double StringToInt(UnicodeCache* unicode_cache, + Vector<const uc16> vector, + int radix) { + return InternalStringToInt( + unicode_cache, vector.start(), vector.start() + vector.length(), radix); +} + + const char* DoubleToCString(double v, Vector<char> buffer) { switch (fpclassify(v)) { case FP_NAN: return "NaN"; @@ -157,8 +197,8 @@ const int kMaxDigitsBeforePoint = 21; const double kFirstNonFixed = 1e21; const int kMaxDigitsAfterPoint = 20; - ASSERT(f >= 0); - ASSERT(f <= kMaxDigitsAfterPoint); + DCHECK(f >= 0); + DCHECK(f <= kMaxDigitsAfterPoint); bool negative = false; double abs_value = value; @@ -256,11 +296,10 @@ } - char* DoubleToExponentialCString(double value, int f) { const int kMaxDigitsAfterPoint = 20; // f might be -1 to signal that f was undefined in JavaScript. - ASSERT(f >= -1 && f <= kMaxDigitsAfterPoint); + DCHECK(f >= -1 && f <= kMaxDigitsAfterPoint); bool negative = false; if (value < 0) { @@ -277,7 +316,7 @@ const int kV8DtoaBufferCapacity = kMaxDigitsAfterPoint + 1 + 1; // Make sure that the buffer is big enough, even if we fall back to the // shortest representation (which happens when f equals -1). - ASSERT(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1); + DCHECK(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1); char decimal_rep[kV8DtoaBufferCapacity]; int decimal_rep_length; @@ -291,8 +330,8 @@ Vector<char>(decimal_rep, kV8DtoaBufferCapacity), &sign, &decimal_rep_length, &decimal_point); } - ASSERT(decimal_rep_length > 0); - ASSERT(decimal_rep_length <= f + 1); + DCHECK(decimal_rep_length > 0); + DCHECK(decimal_rep_length <= f + 1); int exponent = decimal_point - 1; char* result = @@ -305,7 +344,7 @@ char* DoubleToPrecisionCString(double value, int p) { const int kMinimalDigits = 1; const int kMaximalDigits = 21; - ASSERT(p >= kMinimalDigits && p <= kMaximalDigits); + DCHECK(p >= kMinimalDigits && p <= kMaximalDigits); USE(kMinimalDigits); bool negative = false; @@ -325,7 +364,7 @@ DoubleToAscii(value, DTOA_PRECISION, p, Vector<char>(decimal_rep, kV8DtoaBufferCapacity), &sign, &decimal_rep_length, &decimal_point); - ASSERT(decimal_rep_length <= p); + DCHECK(decimal_rep_length <= p); int exponent = decimal_point - 1; @@ -373,7 +412,7 @@ char* DoubleToRadixCString(double value, int radix) { - ASSERT(radix >= 2 && radix <= 36); + DCHECK(radix >= 2 && radix <= 36); // Character array used for conversion. static const char chars[] = "0123456789abcdefghijklmnopqrstuvwxyz"; @@ -407,7 +446,7 @@ integer_part /= radix; } while (integer_part >= 1.0); // Sanity check. - ASSERT(integer_pos > 0); + DCHECK(integer_pos > 0); // Add sign if needed. if (is_negative) integer_buffer[integer_pos--] = '-'; @@ -443,4 +482,22 @@ return builder.Finalize(); } + +double StringToDouble(UnicodeCache* unicode_cache, + String* string, + int flags, + double empty_string_val) { + DisallowHeapAllocation no_gc; + String::FlatContent flat = string->GetFlatContent(); + // ECMA-262 section 15.1.2.3, empty string is NaN + if (flat.IsAscii()) { + return StringToDouble( + unicode_cache, flat.ToOneByteVector(), flags, empty_string_val); + } else { + return StringToDouble( + unicode_cache, flat.ToUC16Vector(), flags, empty_string_val); + } +} + + } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/conversions.h nodejs-0.11.15/deps/v8/src/conversions.h --- nodejs-0.11.13/deps/v8/src/conversions.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/conversions.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,16 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CONVERSIONS_H_ #define V8_CONVERSIONS_H_ -#include "utils.h" +#include <limits> + +#include "src/base/logging.h" +#include "src/handles.h" +#include "src/objects.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -59,7 +41,8 @@ // The fast double-to-(unsigned-)int conversion routine does not guarantee // rounding towards zero. -// For NaN and values outside the int range, return INT_MIN or INT_MAX. +// If x is NaN, the result is INT_MIN. Otherwise the result is the argument x, +// clamped to [INT_MIN, INT_MAX] and then rounded to an integer. inline int FastD2IChecked(double x) { if (!(x >= INT_MIN)) return INT_MIN; // Negation to catch NaNs. if (x > INT_MAX) return INT_MAX; @@ -122,7 +105,7 @@ // Converts a string into a double value according to ECMA-262 9.3.1 double StringToDouble(UnicodeCache* unicode_cache, - Vector<const char> str, + Vector<const uint8_t> str, int flags, double empty_string_val = 0); double StringToDouble(UnicodeCache* unicode_cache, @@ -135,6 +118,16 @@ int flags, double empty_string_val = 0); +// Converts a string into an integer. +double StringToInt(UnicodeCache* unicode_cache, + Vector<const uint8_t> vector, + int radix); + + +double StringToInt(UnicodeCache* unicode_cache, + Vector<const uc16> vector, + int radix); + const int kDoubleToCStringMinBufferSize = 100; // Converts a double to a string value according to ECMA-262 9.8.1. @@ -153,6 +146,88 @@ char* DoubleToPrecisionCString(double value, int f); char* DoubleToRadixCString(double value, int radix); + +static inline bool IsMinusZero(double value) { + static const DoubleRepresentation minus_zero(-0.0); + return DoubleRepresentation(value) == minus_zero; +} + + +// Integer32 is an integer that can be represented as a signed 32-bit +// integer. It has to be in the range [-2^31, 2^31 - 1]. +// We also have to check for negative 0 as it is not an Integer32. +static inline bool IsInt32Double(double value) { + return !IsMinusZero(value) && + value >= kMinInt && + value <= kMaxInt && + value == FastI2D(FastD2I(value)); +} + + +// UInteger32 is an integer that can be represented as an unsigned 32-bit +// integer. It has to be in the range [0, 2^32 - 1]. +// We also have to check for negative 0 as it is not a UInteger32. +static inline bool IsUint32Double(double value) { + return !IsMinusZero(value) && + value >= 0 && + value <= kMaxUInt32 && + value == FastUI2D(FastD2UI(value)); +} + + +// Convert from Number object to C integer. +inline int32_t NumberToInt32(Object* number) { + if (number->IsSmi()) return Smi::cast(number)->value(); + return DoubleToInt32(number->Number()); +} + + +inline uint32_t NumberToUint32(Object* number) { + if (number->IsSmi()) return Smi::cast(number)->value(); + return DoubleToUint32(number->Number()); +} + + +double StringToDouble(UnicodeCache* unicode_cache, + String* string, + int flags, + double empty_string_val = 0.0); + + +inline bool TryNumberToSize(Isolate* isolate, + Object* number, size_t* result) { + SealHandleScope shs(isolate); + if (number->IsSmi()) { + int value = Smi::cast(number)->value(); + DCHECK(static_cast<unsigned>(Smi::kMaxValue) + <= std::numeric_limits<size_t>::max()); + if (value >= 0) { + *result = static_cast<size_t>(value); + return true; + } + return false; + } else { + DCHECK(number->IsHeapNumber()); + double value = HeapNumber::cast(number)->value(); + if (value >= 0 && + value <= std::numeric_limits<size_t>::max()) { + *result = static_cast<size_t>(value); + return true; + } else { + return false; + } + } +} + +// Converts a number into size_t. +inline size_t NumberToSize(Isolate* isolate, + Object* number) { + size_t result = 0; + bool is_valid = TryNumberToSize(isolate, number, &result); + CHECK(is_valid); + return result; +} + } } // namespace v8::internal #endif // V8_CONVERSIONS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/conversions-inl.h nodejs-0.11.15/deps/v8/src/conversions-inl.h --- nodejs-0.11.13/deps/v8/src/conversions-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/conversions-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,47 +1,24 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CONVERSIONS_INL_H_ #define V8_CONVERSIONS_INL_H_ -#include <limits.h> // Required for INT_MAX etc. #include <float.h> // Required for DBL_MAX and on Win32 for finite() +#include <limits.h> // Required for INT_MAX etc. #include <stdarg.h> #include <cmath> -#include "globals.h" // Required for V8_INFINITY +#include "src/globals.h" // Required for V8_INFINITY // ---------------------------------------------------------------------------- // Extra POSIX/ANSI functions for Win32/MSVC. -#include "conversions.h" -#include "double.h" -#include "platform.h" -#include "scanner.h" -#include "strtod.h" +#include "src/base/platform/platform.h" +#include "src/conversions.h" +#include "src/double.h" +#include "src/scanner.h" +#include "src/strtod.h" namespace v8 { namespace internal { @@ -75,9 +52,13 @@ if (x < k2Pow52) { x += k2Pow52; uint32_t result; +#ifndef V8_TARGET_BIG_ENDIAN Address mantissa_ptr = reinterpret_cast<Address>(&x); +#else + Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize; +#endif // Copy least significant 32 bits of mantissa. - OS::MemCopy(&result, mantissa_ptr, sizeof(result)); + memcpy(&result, mantissa_ptr, sizeof(result)); return negative ? ~result + 1 : result; } // Large number (outside uint32 range), Infinity or NaN. @@ -111,7 +92,7 @@ bool SubStringEquals(Iterator* current, EndMark end, const char* substring) { - ASSERT(**current == *substring); + DCHECK(**current == *substring); for (substring++; *substring != '\0'; substring++) { ++*current; if (*current == end || **current != *substring) return false; @@ -142,7 +123,7 @@ EndMark end, bool negative, bool allow_trailing_junk) { - ASSERT(current != end); + DCHECK(current != end); // Skip leading 0s. while (*current == '0') { @@ -221,8 +202,8 @@ ++current; } while (current != end); - ASSERT(number < ((int64_t)1 << 53)); - ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number); + DCHECK(number < ((int64_t)1 << 53)); + DCHECK(static_cast<int64_t>(static_cast<double>(number)) == number); if (exponent == 0) { if (negative) { @@ -232,7 +213,7 @@ return static_cast<double>(number); } - ASSERT(number != 0); + DCHECK(number != 0); return std::ldexp(static_cast<double>(negative ? -number : number), exponent); } @@ -343,7 +324,7 @@ if (buffer_pos <= kMaxSignificantDigits) { // If the number has more than kMaxSignificantDigits it will be parsed // as infinity. - ASSERT(buffer_pos < kBufferSize); + DCHECK(buffer_pos < kBufferSize); buffer[buffer_pos++] = static_cast<char>(*current); } ++current; @@ -355,7 +336,7 @@ return JunkStringValue(); } - SLOW_ASSERT(buffer_pos < kBufferSize); + SLOW_DCHECK(buffer_pos < kBufferSize); buffer[buffer_pos] = '\0'; Vector<const char> buffer_vector(buffer, buffer_pos); return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0); @@ -403,7 +384,7 @@ if (m > kMaximumMultiplier) break; part = part * radix + d; multiplier = m; - ASSERT(multiplier > part); + DCHECK(multiplier > part); ++current; if (current == end) { @@ -492,7 +473,7 @@ return JunkStringValue(); } - ASSERT(buffer_pos == 0); + DCHECK(buffer_pos == 0); return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY; } @@ -555,7 +536,7 @@ // Copy significant digits of the integer part (if any) to the buffer. while (*current >= '0' && *current <= '9') { if (significant_digits < kMaxSignificantDigits) { - ASSERT(buffer_pos < kBufferSize); + DCHECK(buffer_pos < kBufferSize); buffer[buffer_pos++] = static_cast<char>(*current); significant_digits++; // Will later check if it's an octal in the buffer. @@ -600,7 +581,7 @@ // instead. while (*current >= '0' && *current <= '9') { if (significant_digits < kMaxSignificantDigits) { - ASSERT(buffer_pos < kBufferSize); + DCHECK(buffer_pos < kBufferSize); buffer[buffer_pos++] = static_cast<char>(*current); significant_digits++; exponent--; @@ -654,7 +635,7 @@ } const int max_exponent = INT_MAX / 2; - ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2); + DCHECK(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2); int num = 0; do { // Check overflow. @@ -692,7 +673,7 @@ exponent--; } - SLOW_ASSERT(buffer_pos < kBufferSize); + SLOW_DCHECK(buffer_pos < kBufferSize); buffer[buffer_pos] = '\0'; double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent); diff -Nru nodejs-0.11.13/deps/v8/src/counters.cc nodejs-0.11.15/deps/v8/src/counters.cc --- nodejs-0.11.13/deps/v8/src/counters.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/counters.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "counters.h" -#include "isolate.h" -#include "platform.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/base/platform/platform.h" +#include "src/counters.h" +#include "src/isolate.h" namespace v8 { namespace internal { @@ -76,4 +53,115 @@ isolate()->event_logger()(name(), Logger::END); } + +Counters::Counters(Isolate* isolate) { +#define HR(name, caption, min, max, num_buckets) \ + name##_ = Histogram(#caption, min, max, num_buckets, isolate); + HISTOGRAM_RANGE_LIST(HR) +#undef HR + +#define HT(name, caption) \ + name##_ = HistogramTimer(#caption, 0, 10000, 50, isolate); + HISTOGRAM_TIMER_LIST(HT) +#undef HT + +#define HP(name, caption) \ + name##_ = Histogram(#caption, 0, 101, 100, isolate); + HISTOGRAM_PERCENTAGE_LIST(HP) +#undef HP + +#define HM(name, caption) \ + name##_ = Histogram(#caption, 1000, 500000, 50, isolate); + HISTOGRAM_MEMORY_LIST(HM) +#undef HM + +#define SC(name, caption) \ + name##_ = StatsCounter(isolate, "c:" #caption); + + STATS_COUNTER_LIST_1(SC) + STATS_COUNTER_LIST_2(SC) +#undef SC + +#define SC(name) \ + count_of_##name##_ = StatsCounter(isolate, "c:" "V8.CountOf_" #name); \ + size_of_##name##_ = StatsCounter(isolate, "c:" "V8.SizeOf_" #name); + INSTANCE_TYPE_LIST(SC) +#undef SC + +#define SC(name) \ + count_of_CODE_TYPE_##name##_ = \ + StatsCounter(isolate, "c:" "V8.CountOf_CODE_TYPE-" #name); \ + size_of_CODE_TYPE_##name##_ = \ + StatsCounter(isolate, "c:" "V8.SizeOf_CODE_TYPE-" #name); + CODE_KIND_LIST(SC) +#undef SC + +#define SC(name) \ + count_of_FIXED_ARRAY_##name##_ = \ + StatsCounter(isolate, "c:" "V8.CountOf_FIXED_ARRAY-" #name); \ + size_of_FIXED_ARRAY_##name##_ = \ + StatsCounter(isolate, "c:" "V8.SizeOf_FIXED_ARRAY-" #name); + FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) +#undef SC + +#define SC(name) \ + count_of_CODE_AGE_##name##_ = \ + StatsCounter(isolate, "c:" "V8.CountOf_CODE_AGE-" #name); \ + size_of_CODE_AGE_##name##_ = \ + StatsCounter(isolate, "c:" "V8.SizeOf_CODE_AGE-" #name); + CODE_AGE_LIST_COMPLETE(SC) +#undef SC +} + + +void Counters::ResetCounters() { +#define SC(name, caption) name##_.Reset(); + STATS_COUNTER_LIST_1(SC) + STATS_COUNTER_LIST_2(SC) +#undef SC + +#define SC(name) \ + count_of_##name##_.Reset(); \ + size_of_##name##_.Reset(); + INSTANCE_TYPE_LIST(SC) +#undef SC + +#define SC(name) \ + count_of_CODE_TYPE_##name##_.Reset(); \ + size_of_CODE_TYPE_##name##_.Reset(); + CODE_KIND_LIST(SC) +#undef SC + +#define SC(name) \ + count_of_FIXED_ARRAY_##name##_.Reset(); \ + size_of_FIXED_ARRAY_##name##_.Reset(); + FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) +#undef SC + +#define SC(name) \ + count_of_CODE_AGE_##name##_.Reset(); \ + size_of_CODE_AGE_##name##_.Reset(); + CODE_AGE_LIST_COMPLETE(SC) +#undef SC +} + + +void Counters::ResetHistograms() { +#define HR(name, caption, min, max, num_buckets) name##_.Reset(); + HISTOGRAM_RANGE_LIST(HR) +#undef HR + +#define HT(name, caption) name##_.Reset(); + HISTOGRAM_TIMER_LIST(HT) +#undef HT + +#define HP(name, caption) name##_.Reset(); + HISTOGRAM_PERCENTAGE_LIST(HP) +#undef HP + +#define HM(name, caption) name##_.Reset(); + HISTOGRAM_MEMORY_LIST(HM) +#undef HM +} + } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/counters.h nodejs-0.11.15/deps/v8/src/counters.h --- nodejs-0.11.13/deps/v8/src/counters.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/counters.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_COUNTERS_H_ #define V8_COUNTERS_H_ -#include "../include/v8.h" -#include "allocation.h" +#include "include/v8.h" +#include "src/allocation.h" +#include "src/base/platform/elapsed-timer.h" +#include "src/globals.h" +#include "src/objects.h" namespace v8 { namespace internal { @@ -159,10 +139,13 @@ // given counter without calling the runtime system. int* GetInternalPointer() { int* loc = GetPtr(); - ASSERT(loc != NULL); + DCHECK(loc != NULL); return loc; } + // Reset the cached internal pointer. + void Reset() { lookup_done_ = false; } + protected: // Returns the cached address of this counter location. int* GetPtr() { @@ -261,11 +244,11 @@ // TODO(bmeurer): Remove this when HistogramTimerScope is fixed. #ifdef DEBUG - ElapsedTimer* timer() { return &timer_; } + base::ElapsedTimer* timer() { return &timer_; } #endif private: - ElapsedTimer timer_; + base::ElapsedTimer timer_; }; // Helper class for scoping a HistogramTimer. @@ -285,11 +268,12 @@ } else { timer_->Start(); } + } #else : timer_(timer) { timer_->Start(); -#endif } +#endif ~HistogramTimerScope() { #ifdef DEBUG if (!skipped_timer_start_) { @@ -299,6 +283,7 @@ timer_->Stop(); #endif } + private: HistogramTimer* timer_; #ifdef DEBUG @@ -306,6 +291,417 @@ #endif }; +#define HISTOGRAM_RANGE_LIST(HR) \ + /* Generic range histograms */ \ + HR(gc_idle_time_allotted_in_ms, V8.GCIdleTimeAllottedInMS, 0, 10000, 101) + +#define HISTOGRAM_TIMER_LIST(HT) \ + /* Garbage collection timers. */ \ + HT(gc_compactor, V8.GCCompactor) \ + HT(gc_scavenger, V8.GCScavenger) \ + HT(gc_context, V8.GCContext) /* GC context cleanup time */ \ + HT(gc_idle_notification, V8.GCIdleNotification) \ + HT(gc_incremental_marking, V8.GCIncrementalMarking) \ + HT(gc_low_memory_notification, V8.GCLowMemoryNotification) \ + /* Parsing timers. */ \ + HT(parse, V8.Parse) \ + HT(parse_lazy, V8.ParseLazy) \ + HT(pre_parse, V8.PreParse) \ + /* Total compilation times. */ \ + HT(compile, V8.Compile) \ + HT(compile_eval, V8.CompileEval) \ + HT(compile_lazy, V8.CompileLazy) + +#define HISTOGRAM_PERCENTAGE_LIST(HP) \ + /* Heap fragmentation. */ \ + HP(external_fragmentation_total, \ + V8.MemoryExternalFragmentationTotal) \ + HP(external_fragmentation_old_pointer_space, \ + V8.MemoryExternalFragmentationOldPointerSpace) \ + HP(external_fragmentation_old_data_space, \ + V8.MemoryExternalFragmentationOldDataSpace) \ + HP(external_fragmentation_code_space, \ + V8.MemoryExternalFragmentationCodeSpace) \ + HP(external_fragmentation_map_space, \ + V8.MemoryExternalFragmentationMapSpace) \ + HP(external_fragmentation_cell_space, \ + V8.MemoryExternalFragmentationCellSpace) \ + HP(external_fragmentation_property_cell_space, \ + V8.MemoryExternalFragmentationPropertyCellSpace) \ + HP(external_fragmentation_lo_space, \ + V8.MemoryExternalFragmentationLoSpace) \ + /* Percentages of heap committed to each space. */ \ + HP(heap_fraction_new_space, \ + V8.MemoryHeapFractionNewSpace) \ + HP(heap_fraction_old_pointer_space, \ + V8.MemoryHeapFractionOldPointerSpace) \ + HP(heap_fraction_old_data_space, \ + V8.MemoryHeapFractionOldDataSpace) \ + HP(heap_fraction_code_space, \ + V8.MemoryHeapFractionCodeSpace) \ + HP(heap_fraction_map_space, \ + V8.MemoryHeapFractionMapSpace) \ + HP(heap_fraction_cell_space, \ + V8.MemoryHeapFractionCellSpace) \ + HP(heap_fraction_property_cell_space, \ + V8.MemoryHeapFractionPropertyCellSpace) \ + HP(heap_fraction_lo_space, \ + V8.MemoryHeapFractionLoSpace) \ + /* Percentage of crankshafted codegen. */ \ + HP(codegen_fraction_crankshaft, \ + V8.CodegenFractionCrankshaft) \ + + +#define HISTOGRAM_MEMORY_LIST(HM) \ + HM(heap_sample_total_committed, V8.MemoryHeapSampleTotalCommitted) \ + HM(heap_sample_total_used, V8.MemoryHeapSampleTotalUsed) \ + HM(heap_sample_map_space_committed, \ + V8.MemoryHeapSampleMapSpaceCommitted) \ + HM(heap_sample_cell_space_committed, \ + V8.MemoryHeapSampleCellSpaceCommitted) \ + HM(heap_sample_property_cell_space_committed, \ + V8.MemoryHeapSamplePropertyCellSpaceCommitted) \ + HM(heap_sample_code_space_committed, \ + V8.MemoryHeapSampleCodeSpaceCommitted) \ + HM(heap_sample_maximum_committed, \ + V8.MemoryHeapSampleMaximumCommitted) \ + + +// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC +// Intellisense to crash. It was broken into two macros (each of length 40 +// lines) rather than one macro (of length about 80 lines) to work around +// this problem. Please avoid using recursive macros of this length when +// possible. +#define STATS_COUNTER_LIST_1(SC) \ + /* Global Handle Count*/ \ + SC(global_handles, V8.GlobalHandles) \ + /* OS Memory allocated */ \ + SC(memory_allocated, V8.OsMemoryAllocated) \ + SC(normalized_maps, V8.NormalizedMaps) \ + SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \ + SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \ + SC(alive_after_last_gc, V8.AliveAfterLastGC) \ + SC(objs_since_last_young, V8.ObjsSinceLastYoung) \ + SC(objs_since_last_full, V8.ObjsSinceLastFull) \ + SC(string_table_capacity, V8.StringTableCapacity) \ + SC(number_of_symbols, V8.NumberOfSymbols) \ + SC(script_wrappers, V8.ScriptWrappers) \ + SC(call_initialize_stubs, V8.CallInitializeStubs) \ + SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \ + SC(call_normal_stubs, V8.CallNormalStubs) \ + SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \ + SC(inlined_copied_elements, V8.InlinedCopiedElements) \ + SC(arguments_adaptors, V8.ArgumentsAdaptors) \ + SC(compilation_cache_hits, V8.CompilationCacheHits) \ + SC(compilation_cache_misses, V8.CompilationCacheMisses) \ + SC(string_ctor_calls, V8.StringConstructorCalls) \ + SC(string_ctor_conversions, V8.StringConstructorConversions) \ + SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \ + SC(string_ctor_string_value, V8.StringConstructorStringValue) \ + SC(string_ctor_gc_required, V8.StringConstructorGCRequired) \ + /* Amount of evaled source code. */ \ + SC(total_eval_size, V8.TotalEvalSize) \ + /* Amount of loaded source code. */ \ + SC(total_load_size, V8.TotalLoadSize) \ + /* Amount of parsed source code. */ \ + SC(total_parse_size, V8.TotalParseSize) \ + /* Amount of source code skipped over using preparsing. */ \ + SC(total_preparse_skipped, V8.TotalPreparseSkipped) \ + /* Number of symbol lookups skipped using preparsing */ \ + SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped) \ + /* Amount of compiled source code. */ \ + SC(total_compile_size, V8.TotalCompileSize) \ + /* Amount of source code compiled with the full codegen. */ \ + SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize) \ + /* Number of contexts created from scratch. */ \ + SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \ + /* Number of contexts created by partial snapshot. */ \ + SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \ + /* Number of code objects found from pc. */ \ + SC(pc_to_code, V8.PcToCode) \ + SC(pc_to_code_cached, V8.PcToCodeCached) \ + /* The store-buffer implementation of the write barrier. */ \ + SC(store_buffer_compactions, V8.StoreBufferCompactions) \ + SC(store_buffer_overflows, V8.StoreBufferOverflows) + + +#define STATS_COUNTER_LIST_2(SC) \ + /* Number of code stubs. */ \ + SC(code_stubs, V8.CodeStubs) \ + /* Amount of stub code. */ \ + SC(total_stubs_code_size, V8.TotalStubsCodeSize) \ + /* Amount of (JS) compiled code. */ \ + SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \ + SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \ + SC(gc_compactor_caused_by_promoted_data, \ + V8.GCCompactorCausedByPromotedData) \ + SC(gc_compactor_caused_by_oldspace_exhaustion, \ + V8.GCCompactorCausedByOldspaceExhaustion) \ + SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \ + SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \ + /* How is the generic keyed-load stub used? */ \ + SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \ + SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \ + SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \ + SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \ + SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs) \ + SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \ + /* How is the generic keyed-call stub used? */ \ + SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \ + SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \ + SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \ + SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \ + SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \ + SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \ + SC(named_load_global_stub, V8.NamedLoadGlobalStub) \ + SC(named_store_global_inline, V8.NamedStoreGlobalInline) \ + SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \ + SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \ + SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \ + SC(store_normal_miss, V8.StoreNormalMiss) \ + SC(store_normal_hit, V8.StoreNormalHit) \ + SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \ + SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \ + SC(cow_arrays_converted, V8.COWArraysConverted) \ + SC(call_miss, V8.CallMiss) \ + SC(keyed_call_miss, V8.KeyedCallMiss) \ + SC(load_miss, V8.LoadMiss) \ + SC(keyed_load_miss, V8.KeyedLoadMiss) \ + SC(call_const, V8.CallConst) \ + SC(call_const_fast_api, V8.CallConstFastApi) \ + SC(call_const_interceptor, V8.CallConstInterceptor) \ + SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \ + SC(call_global_inline, V8.CallGlobalInline) \ + SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \ + SC(constructed_objects, V8.ConstructedObjects) \ + SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \ + SC(negative_lookups, V8.NegativeLookups) \ + SC(negative_lookups_miss, V8.NegativeLookupsMiss) \ + SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \ + SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses) \ + SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \ + SC(array_function_runtime, V8.ArrayFunctionRuntime) \ + SC(array_function_native, V8.ArrayFunctionNative) \ + SC(for_in, V8.ForIn) \ + SC(enum_cache_hits, V8.EnumCacheHits) \ + SC(enum_cache_misses, V8.EnumCacheMisses) \ + SC(zone_segment_bytes, V8.ZoneSegmentBytes) \ + SC(fast_new_closure_total, V8.FastNewClosureTotal) \ + SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \ + SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \ + SC(string_add_runtime, V8.StringAddRuntime) \ + SC(string_add_native, V8.StringAddNative) \ + SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii) \ + SC(sub_string_runtime, V8.SubStringRuntime) \ + SC(sub_string_native, V8.SubStringNative) \ + SC(string_add_make_two_char, V8.StringAddMakeTwoChar) \ + SC(string_compare_native, V8.StringCompareNative) \ + SC(string_compare_runtime, V8.StringCompareRuntime) \ + SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \ + SC(regexp_entry_native, V8.RegExpEntryNative) \ + SC(number_to_string_native, V8.NumberToStringNative) \ + SC(number_to_string_runtime, V8.NumberToStringRuntime) \ + SC(math_acos, V8.MathAcos) \ + SC(math_asin, V8.MathAsin) \ + SC(math_atan, V8.MathAtan) \ + SC(math_atan2, V8.MathAtan2) \ + SC(math_exp, V8.MathExp) \ + SC(math_floor, V8.MathFloor) \ + SC(math_log, V8.MathLog) \ + SC(math_pow, V8.MathPow) \ + SC(math_round, V8.MathRound) \ + SC(math_sqrt, V8.MathSqrt) \ + SC(stack_interrupts, V8.StackInterrupts) \ + SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \ + SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \ + SC(bounds_checks_hoisted, V8.BoundsChecksHoisted) \ + SC(soft_deopts_requested, V8.SoftDeoptsRequested) \ + SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \ + SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \ + /* Number of write barriers in generated code. */ \ + SC(write_barriers_dynamic, V8.WriteBarriersDynamic) \ + SC(write_barriers_static, V8.WriteBarriersStatic) \ + SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \ + SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \ + SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \ + SC(old_pointer_space_bytes_available, \ + V8.MemoryOldPointerSpaceBytesAvailable) \ + SC(old_pointer_space_bytes_committed, \ + V8.MemoryOldPointerSpaceBytesCommitted) \ + SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed) \ + SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable) \ + SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted) \ + SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed) \ + SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable) \ + SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted) \ + SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed) \ + SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable) \ + SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted) \ + SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \ + SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \ + SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \ + SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \ + SC(property_cell_space_bytes_available, \ + V8.MemoryPropertyCellSpaceBytesAvailable) \ + SC(property_cell_space_bytes_committed, \ + V8.MemoryPropertyCellSpaceBytesCommitted) \ + SC(property_cell_space_bytes_used, \ + V8.MemoryPropertyCellSpaceBytesUsed) \ + SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \ + SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \ + SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed) + + +// This file contains all the v8 counters that are in use. +class Counters { + public: +#define HR(name, caption, min, max, num_buckets) \ + Histogram* name() { return &name##_; } + HISTOGRAM_RANGE_LIST(HR) +#undef HR + +#define HT(name, caption) \ + HistogramTimer* name() { return &name##_; } + HISTOGRAM_TIMER_LIST(HT) +#undef HT + +#define HP(name, caption) \ + Histogram* name() { return &name##_; } + HISTOGRAM_PERCENTAGE_LIST(HP) +#undef HP + +#define HM(name, caption) \ + Histogram* name() { return &name##_; } + HISTOGRAM_MEMORY_LIST(HM) +#undef HM + +#define SC(name, caption) \ + StatsCounter* name() { return &name##_; } + STATS_COUNTER_LIST_1(SC) + STATS_COUNTER_LIST_2(SC) +#undef SC + +#define SC(name) \ + StatsCounter* count_of_##name() { return &count_of_##name##_; } \ + StatsCounter* size_of_##name() { return &size_of_##name##_; } + INSTANCE_TYPE_LIST(SC) +#undef SC + +#define SC(name) \ + StatsCounter* count_of_CODE_TYPE_##name() \ + { return &count_of_CODE_TYPE_##name##_; } \ + StatsCounter* size_of_CODE_TYPE_##name() \ + { return &size_of_CODE_TYPE_##name##_; } + CODE_KIND_LIST(SC) +#undef SC + +#define SC(name) \ + StatsCounter* count_of_FIXED_ARRAY_##name() \ + { return &count_of_FIXED_ARRAY_##name##_; } \ + StatsCounter* size_of_FIXED_ARRAY_##name() \ + { return &size_of_FIXED_ARRAY_##name##_; } + FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) +#undef SC + +#define SC(name) \ + StatsCounter* count_of_CODE_AGE_##name() \ + { return &count_of_CODE_AGE_##name##_; } \ + StatsCounter* size_of_CODE_AGE_##name() \ + { return &size_of_CODE_AGE_##name##_; } + CODE_AGE_LIST_COMPLETE(SC) +#undef SC + + enum Id { +#define RATE_ID(name, caption) k_##name, + HISTOGRAM_TIMER_LIST(RATE_ID) +#undef RATE_ID +#define PERCENTAGE_ID(name, caption) k_##name, + HISTOGRAM_PERCENTAGE_LIST(PERCENTAGE_ID) +#undef PERCENTAGE_ID +#define MEMORY_ID(name, caption) k_##name, + HISTOGRAM_MEMORY_LIST(MEMORY_ID) +#undef MEMORY_ID +#define COUNTER_ID(name, caption) k_##name, + STATS_COUNTER_LIST_1(COUNTER_ID) + STATS_COUNTER_LIST_2(COUNTER_ID) +#undef COUNTER_ID +#define COUNTER_ID(name) kCountOf##name, kSizeOf##name, + INSTANCE_TYPE_LIST(COUNTER_ID) +#undef COUNTER_ID +#define COUNTER_ID(name) kCountOfCODE_TYPE_##name, \ + kSizeOfCODE_TYPE_##name, + CODE_KIND_LIST(COUNTER_ID) +#undef COUNTER_ID +#define COUNTER_ID(name) kCountOfFIXED_ARRAY__##name, \ + kSizeOfFIXED_ARRAY__##name, + FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID) +#undef COUNTER_ID +#define COUNTER_ID(name) kCountOfCODE_AGE__##name, \ + kSizeOfCODE_AGE__##name, + CODE_AGE_LIST_COMPLETE(COUNTER_ID) +#undef COUNTER_ID + stats_counter_count + }; + + void ResetCounters(); + void ResetHistograms(); + + private: +#define HR(name, caption, min, max, num_buckets) Histogram name##_; + HISTOGRAM_RANGE_LIST(HR) +#undef HR + +#define HT(name, caption) \ + HistogramTimer name##_; + HISTOGRAM_TIMER_LIST(HT) +#undef HT + +#define HP(name, caption) \ + Histogram name##_; + HISTOGRAM_PERCENTAGE_LIST(HP) +#undef HP + +#define HM(name, caption) \ + Histogram name##_; + HISTOGRAM_MEMORY_LIST(HM) +#undef HM + +#define SC(name, caption) \ + StatsCounter name##_; + STATS_COUNTER_LIST_1(SC) + STATS_COUNTER_LIST_2(SC) +#undef SC + +#define SC(name) \ + StatsCounter size_of_##name##_; \ + StatsCounter count_of_##name##_; + INSTANCE_TYPE_LIST(SC) +#undef SC + +#define SC(name) \ + StatsCounter size_of_CODE_TYPE_##name##_; \ + StatsCounter count_of_CODE_TYPE_##name##_; + CODE_KIND_LIST(SC) +#undef SC + +#define SC(name) \ + StatsCounter size_of_FIXED_ARRAY_##name##_; \ + StatsCounter count_of_FIXED_ARRAY_##name##_; + FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) +#undef SC + +#define SC(name) \ + StatsCounter size_of_CODE_AGE_##name##_; \ + StatsCounter count_of_CODE_AGE_##name##_; + CODE_AGE_LIST_COMPLETE(SC) +#undef SC + + friend class Isolate; + + explicit Counters(Isolate* isolate); + + DISALLOW_IMPLICIT_CONSTRUCTORS(Counters); +}; } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/cpu.cc nodejs-0.11.15/deps/v8/src/cpu.cc --- nodejs-0.11.13/deps/v8/src/cpu.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/cpu.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,505 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "cpu.h" - -#if V8_LIBC_MSVCRT -#include <intrin.h> // __cpuid() -#endif -#if V8_OS_POSIX -#include <unistd.h> // sysconf() -#endif -#if V8_OS_QNX -#include <sys/syspage.h> // cpuinfo -#endif - -#include <ctype.h> -#include <limits.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <algorithm> - -#include "checks.h" -#if V8_OS_WIN -#include "win32-headers.h" -#endif - -namespace v8 { -namespace internal { - -#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64 - -// Define __cpuid() for non-MSVC libraries. -#if !V8_LIBC_MSVCRT - -static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { -#if defined(__i386__) && defined(__pic__) - // Make sure to preserve ebx, which contains the pointer - // to the GOT in case we're generating PIC. - __asm__ volatile ( - "mov %%ebx, %%edi\n\t" - "cpuid\n\t" - "xchg %%edi, %%ebx\n\t" - : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) - : "a"(info_type) - ); -#else - __asm__ volatile ( - "cpuid \n\t" - : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) - : "a"(info_type) - ); -#endif // defined(__i386__) && defined(__pic__) -} - -#endif // !V8_LIBC_MSVCRT - -#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS - -#if V8_OS_LINUX - -#if V8_HOST_ARCH_ARM - -// See <uapi/asm/hwcap.h> kernel header. -/* - * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP - */ -#define HWCAP_SWP (1 << 0) -#define HWCAP_HALF (1 << 1) -#define HWCAP_THUMB (1 << 2) -#define HWCAP_26BIT (1 << 3) /* Play it safe */ -#define HWCAP_FAST_MULT (1 << 4) -#define HWCAP_FPA (1 << 5) -#define HWCAP_VFP (1 << 6) -#define HWCAP_EDSP (1 << 7) -#define HWCAP_JAVA (1 << 8) -#define HWCAP_IWMMXT (1 << 9) -#define HWCAP_CRUNCH (1 << 10) -#define HWCAP_THUMBEE (1 << 11) -#define HWCAP_NEON (1 << 12) -#define HWCAP_VFPv3 (1 << 13) -#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */ -#define HWCAP_TLS (1 << 15) -#define HWCAP_VFPv4 (1 << 16) -#define HWCAP_IDIVA (1 << 17) -#define HWCAP_IDIVT (1 << 18) -#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ -#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) -#define HWCAP_LPAE (1 << 20) - -#define AT_HWCAP 16 - -// Read the ELF HWCAP flags by parsing /proc/self/auxv. -static uint32_t ReadELFHWCaps() { - uint32_t result = 0; - FILE* fp = fopen("/proc/self/auxv", "r"); - if (fp != NULL) { - struct { uint32_t tag; uint32_t value; } entry; - for (;;) { - size_t n = fread(&entry, sizeof(entry), 1, fp); - if (n == 0 || (entry.tag == 0 && entry.value == 0)) { - break; - } - if (entry.tag == AT_HWCAP) { - result = entry.value; - break; - } - } - fclose(fp); - } - return result; -} - -#endif // V8_HOST_ARCH_ARM - -// Extract the information exposed by the kernel via /proc/cpuinfo. -class CPUInfo V8_FINAL BASE_EMBEDDED { - public: - CPUInfo() : datalen_(0) { - // Get the size of the cpuinfo file by reading it until the end. This is - // required because files under /proc do not always return a valid size - // when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed. - static const char PATHNAME[] = "/proc/cpuinfo"; - FILE* fp = fopen(PATHNAME, "r"); - if (fp != NULL) { - for (;;) { - char buffer[256]; - size_t n = fread(buffer, 1, sizeof(buffer), fp); - if (n == 0) { - break; - } - datalen_ += n; - } - fclose(fp); - } - - // Read the contents of the cpuinfo file. - data_ = new char[datalen_ + 1]; - fp = fopen(PATHNAME, "r"); - if (fp != NULL) { - for (size_t offset = 0; offset < datalen_; ) { - size_t n = fread(data_ + offset, 1, datalen_ - offset, fp); - if (n == 0) { - break; - } - offset += n; - } - fclose(fp); - } - - // Zero-terminate the data. - data_[datalen_] = '\0'; - } - - ~CPUInfo() { - delete[] data_; - } - - // Extract the content of a the first occurence of a given field in - // the content of the cpuinfo file and return it as a heap-allocated - // string that must be freed by the caller using delete[]. - // Return NULL if not found. - char* ExtractField(const char* field) const { - ASSERT(field != NULL); - - // Look for first field occurence, and ensure it starts the line. - size_t fieldlen = strlen(field); - char* p = data_; - for (;;) { - p = strstr(p, field); - if (p == NULL) { - return NULL; - } - if (p == data_ || p[-1] == '\n') { - break; - } - p += fieldlen; - } - - // Skip to the first colon followed by a space. - p = strchr(p + fieldlen, ':'); - if (p == NULL || !isspace(p[1])) { - return NULL; - } - p += 2; - - // Find the end of the line. - char* q = strchr(p, '\n'); - if (q == NULL) { - q = data_ + datalen_; - } - - // Copy the line into a heap-allocated buffer. - size_t len = q - p; - char* result = new char[len + 1]; - if (result != NULL) { - memcpy(result, p, len); - result[len] = '\0'; - } - return result; - } - - private: - char* data_; - size_t datalen_; -}; - - -// Checks that a space-separated list of items contains one given 'item'. -static bool HasListItem(const char* list, const char* item) { - ssize_t item_len = strlen(item); - const char* p = list; - if (p != NULL) { - while (*p != '\0') { - // Skip whitespace. - while (isspace(*p)) ++p; - - // Find end of current list item. - const char* q = p; - while (*q != '\0' && !isspace(*q)) ++q; - - if (item_len == q - p && memcmp(p, item, item_len) == 0) { - return true; - } - - // Skip to next item. - p = q; - } - } - return false; -} - -#endif // V8_OS_LINUX - -#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64 - -CPU::CPU() : stepping_(0), - model_(0), - ext_model_(0), - family_(0), - ext_family_(0), - type_(0), - implementer_(0), - architecture_(0), - part_(0), - has_fpu_(false), - has_cmov_(false), - has_sahf_(false), - has_mmx_(false), - has_sse_(false), - has_sse2_(false), - has_sse3_(false), - has_ssse3_(false), - has_sse41_(false), - has_sse42_(false), - has_idiva_(false), - has_neon_(false), - has_thumbee_(false), - has_vfp_(false), - has_vfp3_(false), - has_vfp3_d32_(false) { - memcpy(vendor_, "Unknown", 8); -#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64 - int cpu_info[4]; - - // __cpuid with an InfoType argument of 0 returns the number of - // valid Ids in CPUInfo[0] and the CPU identification string in - // the other three array elements. The CPU identification string is - // not in linear order. The code below arranges the information - // in a human readable form. The human readable order is CPUInfo[1] | - // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped - // before using memcpy to copy these three array elements to cpu_string. - __cpuid(cpu_info, 0); - unsigned num_ids = cpu_info[0]; - std::swap(cpu_info[2], cpu_info[3]); - memcpy(vendor_, cpu_info + 1, 12); - vendor_[12] = '\0'; - - // Interpret CPU feature information. - if (num_ids > 0) { - __cpuid(cpu_info, 1); - stepping_ = cpu_info[0] & 0xf; - model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0); - family_ = (cpu_info[0] >> 8) & 0xf; - type_ = (cpu_info[0] >> 12) & 0x3; - ext_model_ = (cpu_info[0] >> 16) & 0xf; - ext_family_ = (cpu_info[0] >> 20) & 0xff; - has_fpu_ = (cpu_info[3] & 0x00000001) != 0; - has_cmov_ = (cpu_info[3] & 0x00008000) != 0; - has_mmx_ = (cpu_info[3] & 0x00800000) != 0; - has_sse_ = (cpu_info[3] & 0x02000000) != 0; - has_sse2_ = (cpu_info[3] & 0x04000000) != 0; - has_sse3_ = (cpu_info[2] & 0x00000001) != 0; - has_ssse3_ = (cpu_info[2] & 0x00000200) != 0; - has_sse41_ = (cpu_info[2] & 0x00080000) != 0; - has_sse42_ = (cpu_info[2] & 0x00100000) != 0; - } - - // Query extended IDs. - __cpuid(cpu_info, 0x80000000); - unsigned num_ext_ids = cpu_info[0]; - - // Interpret extended CPU feature information. - if (num_ext_ids > 0x80000000) { - __cpuid(cpu_info, 0x80000001); - // SAHF is always available in compat/legacy mode, - // but must be probed in long mode. -#if V8_HOST_ARCH_IA32 - has_sahf_ = true; -#else - has_sahf_ = (cpu_info[2] & 0x00000001) != 0; -#endif - } - -#elif V8_HOST_ARCH_ARM - -#if V8_OS_LINUX - - CPUInfo cpu_info; - - // Extract implementor from the "CPU implementer" field. - char* implementer = cpu_info.ExtractField("CPU implementer"); - if (implementer != NULL) { - char* end ; - implementer_ = strtol(implementer, &end, 0); - if (end == implementer) { - implementer_ = 0; - } - delete[] implementer; - } - - // Extract part number from the "CPU part" field. - char* part = cpu_info.ExtractField("CPU part"); - if (part != NULL) { - char* end ; - part_ = strtol(part, &end, 0); - if (end == part) { - part_ = 0; - } - delete[] part; - } - - // Extract architecture from the "CPU Architecture" field. - // The list is well-known, unlike the the output of - // the 'Processor' field which can vary greatly. - // See the definition of the 'proc_arch' array in - // $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in - // same file. - char* architecture = cpu_info.ExtractField("CPU architecture"); - if (architecture != NULL) { - char* end; - architecture_ = strtol(architecture, &end, 10); - if (end == architecture) { - architecture_ = 0; - } - delete[] architecture; - - // Unfortunately, it seems that certain ARMv6-based CPUs - // report an incorrect architecture number of 7! - // - // See http://code.google.com/p/android/issues/detail?id=10812 - // - // We try to correct this by looking at the 'elf_format' - // field reported by the 'Processor' field, which is of the - // form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for - // an ARMv6-one. For example, the Raspberry Pi is one popular - // ARMv6 device that reports architecture 7. - if (architecture_ == 7) { - char* processor = cpu_info.ExtractField("Processor"); - if (HasListItem(processor, "(v6l)")) { - architecture_ = 6; - } - delete[] processor; - } - } - - // Try to extract the list of CPU features from ELF hwcaps. - uint32_t hwcaps = ReadELFHWCaps(); - if (hwcaps != 0) { - has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0; - has_neon_ = (hwcaps & HWCAP_NEON) != 0; - has_thumbee_ = (hwcaps & HWCAP_THUMBEE) != 0; - has_vfp_ = (hwcaps & HWCAP_VFP) != 0; - has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0; - has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 || - (hwcaps & HWCAP_VFPD32) != 0)); - } else { - // Try to fallback to "Features" CPUInfo field. - char* features = cpu_info.ExtractField("Features"); - has_idiva_ = HasListItem(features, "idiva"); - has_neon_ = HasListItem(features, "neon"); - has_thumbee_ = HasListItem(features, "thumbee"); - has_vfp_ = HasListItem(features, "vfp"); - if (HasListItem(features, "vfpv3")) { - has_vfp3_ = true; - has_vfp3_d32_ = true; - } else if (HasListItem(features, "vfpv3d16")) { - has_vfp3_ = true; - } - delete[] features; - } - - // Some old kernels will report vfp not vfpv3. Here we make an attempt - // to detect vfpv3 by checking for vfp *and* neon, since neon is only - // available on architectures with vfpv3. Checking neon on its own is - // not enough as it is possible to have neon without vfp. - if (has_vfp_ && has_neon_) { - has_vfp3_ = true; - } - - // VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. - if (architecture_ < 7 && has_vfp3_) { - architecture_ = 7; - } - - // ARMv7 implies ThumbEE. - if (architecture_ >= 7) { - has_thumbee_ = true; - } - - // The earliest architecture with ThumbEE is ARMv6T2. - if (has_thumbee_ && architecture_ < 6) { - architecture_ = 6; - } - - // We don't support any FPUs other than VFP. - has_fpu_ = has_vfp_; - -#elif V8_OS_QNX - - uint32_t cpu_flags = SYSPAGE_ENTRY(cpuinfo)->flags; - if (cpu_flags & ARM_CPU_FLAG_V7) { - architecture_ = 7; - has_thumbee_ = true; - } else if (cpu_flags & ARM_CPU_FLAG_V6) { - architecture_ = 6; - // QNX doesn't say if ThumbEE is available. - // Assume false for the architectures older than ARMv7. - } - ASSERT(architecture_ >= 6); - has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0; - has_vfp_ = has_fpu_; - if (cpu_flags & ARM_CPU_FLAG_NEON) { - has_neon_ = true; - has_vfp3_ = has_vfp_; -#ifdef ARM_CPU_FLAG_VFP_D32 - has_vfp3_d32_ = (cpu_flags & ARM_CPU_FLAG_VFP_D32) != 0; -#endif - } - has_idiva_ = (cpu_flags & ARM_CPU_FLAG_IDIV) != 0; - -#endif // V8_OS_LINUX - -#elif V8_HOST_ARCH_MIPS - - // Simple detection of FPU at runtime for Linux. - // It is based on /proc/cpuinfo, which reveals hardware configuration - // to user-space applications. According to MIPS (early 2010), no similar - // facility is universally available on the MIPS architectures, - // so it's up to individual OSes to provide such. - CPUInfo cpu_info; - char* cpu_model = cpu_info.ExtractField("cpu model"); - has_fpu_ = HasListItem(cpu_model, "FPU"); - delete[] cpu_model; - -#endif -} - - -// static -int CPU::NumberOfProcessorsOnline() { -#if V8_OS_WIN - SYSTEM_INFO info; - GetSystemInfo(&info); - return info.dwNumberOfProcessors; -#else - return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN)); -#endif -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/cpu.h nodejs-0.11.15/deps/v8/src/cpu.h --- nodejs-0.11.13/deps/v8/src/cpu.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/cpu.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,144 +0,0 @@ -// Copyright 2006-2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This module contains the architecture-specific code. This make the rest of -// the code less dependent on differences between different processor -// architecture. -// The classes have the same definition for all architectures. The -// implementation for a particular architecture is put in cpu_<arch>.cc. -// The build system then uses the implementation for the target architecture. -// - -#ifndef V8_CPU_H_ -#define V8_CPU_H_ - -#include "allocation.h" - -namespace v8 { -namespace internal { - -// ---------------------------------------------------------------------------- -// CPU -// -// Query information about the processor. -// -// This class also has static methods for the architecture specific functions. -// Add methods here to cope with differences between the supported -// architectures. For each architecture the file cpu_<arch>.cc contains the -// implementation of these static functions. - -class CPU V8_FINAL BASE_EMBEDDED { - public: - CPU(); - - // x86 CPUID information - const char* vendor() const { return vendor_; } - int stepping() const { return stepping_; } - int model() const { return model_; } - int ext_model() const { return ext_model_; } - int family() const { return family_; } - int ext_family() const { return ext_family_; } - int type() const { return type_; } - - // arm implementer/part information - int implementer() const { return implementer_; } - static const int ARM = 0x41; - static const int QUALCOMM = 0x51; - int architecture() const { return architecture_; } - int part() const { return part_; } - static const int ARM_CORTEX_A5 = 0xc05; - static const int ARM_CORTEX_A7 = 0xc07; - static const int ARM_CORTEX_A8 = 0xc08; - static const int ARM_CORTEX_A9 = 0xc09; - static const int ARM_CORTEX_A12 = 0xc0c; - static const int ARM_CORTEX_A15 = 0xc0f; - - // General features - bool has_fpu() const { return has_fpu_; } - - // x86 features - bool has_cmov() const { return has_cmov_; } - bool has_sahf() const { return has_sahf_; } - bool has_mmx() const { return has_mmx_; } - bool has_sse() const { return has_sse_; } - bool has_sse2() const { return has_sse2_; } - bool has_sse3() const { return has_sse3_; } - bool has_ssse3() const { return has_ssse3_; } - bool has_sse41() const { return has_sse41_; } - bool has_sse42() const { return has_sse42_; } - - // arm features - bool has_idiva() const { return has_idiva_; } - bool has_neon() const { return has_neon_; } - bool has_thumbee() const { return has_thumbee_; } - bool has_vfp() const { return has_vfp_; } - bool has_vfp3() const { return has_vfp3_; } - bool has_vfp3_d32() const { return has_vfp3_d32_; } - - // Returns the number of processors online. - static int NumberOfProcessorsOnline(); - - // Initializes the cpu architecture support. Called once at VM startup. - static void SetUp(); - - static bool SupportsCrankshaft(); - - // Flush instruction cache. - static void FlushICache(void* start, size_t size); - - private: - char vendor_[13]; - int stepping_; - int model_; - int ext_model_; - int family_; - int ext_family_; - int type_; - int implementer_; - int architecture_; - int part_; - bool has_fpu_; - bool has_cmov_; - bool has_sahf_; - bool has_mmx_; - bool has_sse_; - bool has_sse2_; - bool has_sse3_; - bool has_ssse3_; - bool has_sse41_; - bool has_sse42_; - bool has_idiva_; - bool has_neon_; - bool has_thumbee_; - bool has_vfp_; - bool has_vfp3_; - bool has_vfp3_d32_; -}; - -} } // namespace v8::internal - -#endif // V8_CPU_H_ diff -Nru nodejs-0.11.13/deps/v8/src/cpu-profiler.cc nodejs-0.11.15/deps/v8/src/cpu-profiler.cc --- nodejs-0.11.13/deps/v8/src/cpu-profiler.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/cpu-profiler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "cpu-profiler-inl.h" - -#include "compiler.h" -#include "frames-inl.h" -#include "hashmap.h" -#include "log-inl.h" -#include "vm-state-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "../include/v8-profiler.h" +#include "src/v8.h" + +#include "src/cpu-profiler-inl.h" + +#include "src/compiler.h" +#include "src/frames-inl.h" +#include "src/hashmap.h" +#include "src/log-inl.h" +#include "src/vm-state-inl.h" + +#include "include/v8-profiler.h" namespace v8 { namespace internal { @@ -46,7 +23,7 @@ ProfilerEventsProcessor::ProfilerEventsProcessor( ProfileGenerator* generator, Sampler* sampler, - TimeDelta period) + base::TimeDelta period) : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), generator_(generator), sampler_(sampler), @@ -131,7 +108,7 @@ void ProfilerEventsProcessor::Run() { while (running_) { - ElapsedTimer timer; + base::ElapsedTimer timer; timer.Start(); // Keep processing existing events until we need to do next sample. do { @@ -245,21 +222,21 @@ } -void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, +void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, - CompilationInfo* info, - Name* name) { + CompilationInfo* info, Name* script_name) { if (FilterOutCodeCreateEvent(tag)) return; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->start = code->address(); - rec->entry = profiles_->NewCodeEntry(tag, profiles_->GetFunctionName(name)); + rec->entry = profiles_->NewCodeEntry( + tag, profiles_->GetFunctionName(shared->DebugName()), + CodeEntry::kEmptyNamePrefix, profiles_->GetName(script_name)); if (info) { rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges()); } if (shared->script()->IsScript()) { - ASSERT(Script::cast(shared->script())); + DCHECK(Script::cast(shared->script())); Script* script = Script::cast(shared->script()); rec->entry->set_script_id(script->id()->value()); rec->entry->set_bailout_reason( @@ -271,26 +248,22 @@ } -void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, +void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, - CompilationInfo* info, - Name* source, int line, int column) { + CompilationInfo* info, Name* script_name, + int line, int column) { if (FilterOutCodeCreateEvent(tag)) return; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->start = code->address(); rec->entry = profiles_->NewCodeEntry( - tag, - profiles_->GetFunctionName(shared->DebugName()), - CodeEntry::kEmptyNamePrefix, - profiles_->GetName(source), - line, + tag, profiles_->GetFunctionName(shared->DebugName()), + CodeEntry::kEmptyNamePrefix, profiles_->GetName(script_name), line, column); if (info) { rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges()); } - ASSERT(Script::cast(shared->script())); + DCHECK(Script::cast(shared->script())); Script* script = Script::cast(shared->script()); rec->entry->set_script_id(script->id()->value()); rec->size = code->ExecutableSize(); @@ -327,6 +300,15 @@ } +void CpuProfiler::CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { + CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT); + CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_; + rec->start = code->address(); + rec->bailout_reason = GetBailoutReason(shared->DisableOptimizationReason()); + processor_->Enqueue(evt_rec); +} + + void CpuProfiler::CodeDeleteEvent(Address from) { } @@ -387,7 +369,7 @@ CpuProfiler::CpuProfiler(Isolate* isolate) : isolate_(isolate), - sampling_interval_(TimeDelta::FromMicroseconds( + sampling_interval_(base::TimeDelta::FromMicroseconds( FLAG_cpu_profiler_sampling_interval)), profiles_(new CpuProfilesCollection(isolate->heap())), generator_(NULL), @@ -401,7 +383,7 @@ ProfileGenerator* test_generator, ProfilerEventsProcessor* test_processor) : isolate_(isolate), - sampling_interval_(TimeDelta::FromMicroseconds( + sampling_interval_(base::TimeDelta::FromMicroseconds( FLAG_cpu_profiler_sampling_interval)), profiles_(test_profiles), generator_(test_generator), @@ -411,13 +393,13 @@ CpuProfiler::~CpuProfiler() { - ASSERT(!is_profiling_); + DCHECK(!is_profiling_); delete profiles_; } -void CpuProfiler::set_sampling_interval(TimeDelta value) { - ASSERT(!is_profiling_); +void CpuProfiler::set_sampling_interval(base::TimeDelta value) { + DCHECK(!is_profiling_); sampling_interval_ = value; } @@ -432,7 +414,6 @@ if (profiles_->StartProfiling(title, record_samples)) { StartProcessorIfNotStarted(); } - processor_->AddCurrentStack(isolate_); } @@ -442,29 +423,32 @@ void CpuProfiler::StartProcessorIfNotStarted() { - if (processor_ == NULL) { - Logger* logger = isolate_->logger(); - // Disable logging when using the new implementation. - saved_is_logging_ = logger->is_logging_; - logger->is_logging_ = false; - generator_ = new ProfileGenerator(profiles_); - Sampler* sampler = logger->sampler(); - processor_ = new ProfilerEventsProcessor( - generator_, sampler, sampling_interval_); - is_profiling_ = true; - // Enumerate stuff we already have in the heap. - ASSERT(isolate_->heap()->HasBeenSetUp()); - if (!FLAG_prof_browser_mode) { - logger->LogCodeObjects(); - } - logger->LogCompiledFunctions(); - logger->LogAccessorCallbacks(); - LogBuiltins(); - // Enable stack sampling. - sampler->SetHasProcessingThread(true); - sampler->IncreaseProfilingDepth(); - processor_->StartSynchronously(); + if (processor_ != NULL) { + processor_->AddCurrentStack(isolate_); + return; } + Logger* logger = isolate_->logger(); + // Disable logging when using the new implementation. + saved_is_logging_ = logger->is_logging_; + logger->is_logging_ = false; + generator_ = new ProfileGenerator(profiles_); + Sampler* sampler = logger->sampler(); + processor_ = new ProfilerEventsProcessor( + generator_, sampler, sampling_interval_); + is_profiling_ = true; + // Enumerate stuff we already have in the heap. + DCHECK(isolate_->heap()->HasBeenSetUp()); + if (!FLAG_prof_browser_mode) { + logger->LogCodeObjects(); + } + logger->LogCompiledFunctions(); + logger->LogAccessorCallbacks(); + LogBuiltins(); + // Enable stack sampling. + sampler->SetHasProcessingThread(true); + sampler->IncreaseProfilingDepth(); + processor_->AddCurrentStack(isolate_); + processor_->StartSynchronously(); } @@ -509,7 +493,7 @@ void CpuProfiler::LogBuiltins() { Builtins* builtins = isolate_->builtins(); - ASSERT(builtins->is_initialized()); + DCHECK(builtins->is_initialized()); for (int i = 0; i < Builtins::builtin_count; i++) { CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN); ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_; diff -Nru nodejs-0.11.13/deps/v8/src/cpu-profiler.h nodejs-0.11.15/deps/v8/src/cpu-profiler.h --- nodejs-0.11.13/deps/v8/src/cpu-profiler.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/cpu-profiler.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,16 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CPU_PROFILER_H_ #define V8_CPU_PROFILER_H_ -#include "allocation.h" -#include "atomicops.h" -#include "circular-queue.h" -#include "platform/time.h" -#include "sampler.h" -#include "unbound-queue.h" +#include "src/allocation.h" +#include "src/base/atomicops.h" +#include "src/base/platform/time.h" +#include "src/circular-queue.h" +#include "src/sampler.h" +#include "src/unbound-queue.h" namespace v8 { namespace internal { @@ -49,6 +26,7 @@ #define CODE_EVENTS_TYPE_LIST(V) \ V(CODE_CREATION, CodeCreateEventRecord) \ V(CODE_MOVE, CodeMoveEventRecord) \ + V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \ V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \ V(REPORT_BUILTIN, ReportBuiltinEventRecord) @@ -88,6 +66,15 @@ }; +class CodeDisableOptEventRecord : public CodeEventRecord { + public: + Address start; + const char* bailout_reason; + + INLINE(void UpdateCodeMap(CodeMap* code_map)); +}; + + class SharedFunctionInfoMoveEventRecord : public CodeEventRecord { public: Address from; @@ -135,11 +122,11 @@ // This class implements both the profile events processor thread and // methods called by event producers: VM and stack sampler threads. -class ProfilerEventsProcessor : public Thread { +class ProfilerEventsProcessor : public base::Thread { public: ProfilerEventsProcessor(ProfileGenerator* generator, Sampler* sampler, - TimeDelta period); + base::TimeDelta period); virtual ~ProfilerEventsProcessor() {} // Thread control. @@ -178,7 +165,7 @@ Sampler* sampler_; bool running_; // Sampling period in microseconds. - const TimeDelta period_; + const base::TimeDelta period_; UnboundQueue<CodeEventsContainer> events_buffer_; static const size_t kTickSampleBufferSize = 1 * MB; static const size_t kTickSampleQueueLength = @@ -213,7 +200,7 @@ virtual ~CpuProfiler(); - void set_sampling_interval(TimeDelta value); + void set_sampling_interval(base::TimeDelta value); void StartProfiling(const char* title, bool record_samples = false); void StartProfiling(String* title, bool record_samples); CpuProfile* StopProfiling(const char* title); @@ -234,20 +221,18 @@ Code* code, const char* comment); virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, Name* name); - virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, + virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, - CompilationInfo* info, - Name* name); - virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, + CompilationInfo* info, Name* script_name); + virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, - CompilationInfo* info, - Name* source, int line, int column); + CompilationInfo* info, Name* script_name, + int line, int column); virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, int args_count); virtual void CodeMovingGCEvent() {} virtual void CodeMoveEvent(Address from, Address to); + virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared); virtual void CodeDeleteEvent(Address from); virtual void GetterCallbackEvent(Name* name, Address entry_point); virtual void RegExpCodeCreateEvent(Code* code, String* source); @@ -271,7 +256,7 @@ void LogBuiltins(); Isolate* isolate_; - TimeDelta sampling_interval_; + base::TimeDelta sampling_interval_; CpuProfilesCollection* profiles_; ProfileGenerator* generator_; ProfilerEventsProcessor* processor_; diff -Nru nodejs-0.11.13/deps/v8/src/cpu-profiler-inl.h nodejs-0.11.15/deps/v8/src/cpu-profiler-inl.h --- nodejs-0.11.13/deps/v8/src/cpu-profiler-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/cpu-profiler-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,16 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_CPU_PROFILER_INL_H_ #define V8_CPU_PROFILER_INL_H_ -#include "cpu-profiler.h" +#include "src/cpu-profiler.h" #include <new> -#include "circular-queue-inl.h" -#include "profile-generator-inl.h" -#include "unbound-queue-inl.h" +#include "src/circular-queue-inl.h" +#include "src/profile-generator-inl.h" +#include "src/unbound-queue-inl.h" namespace v8 { namespace internal { @@ -51,6 +28,14 @@ } +void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) { + CodeEntry* entry = code_map->FindEntry(start); + if (entry != NULL) { + entry->set_bailout_reason(bailout_reason); + } +} + + void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) { code_map->MoveCode(from, to); } diff -Nru nodejs-0.11.13/deps/v8/src/d8.cc nodejs-0.11.15/deps/v8/src/d8.cc --- nodejs-0.11.13/deps/v8/src/d8.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/d8.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Defined when linking against shared lib on Windows. @@ -49,32 +26,37 @@ #endif // !V8_SHARED #ifdef V8_SHARED -#include "../include/v8-testing.h" +#include "include/v8-testing.h" #endif // V8_SHARED +#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE) +#include "src/gdb-jit.h" +#endif + #ifdef ENABLE_VTUNE_JIT_INTERFACE -#include "third_party/vtune/v8-vtune.h" +#include "src/third_party/vtune/v8-vtune.h" #endif -#include "d8.h" +#include "src/d8.h" +#include "include/libplatform/libplatform.h" #ifndef V8_SHARED -#include "api.h" -#include "checks.h" -#include "cpu.h" -#include "d8-debug.h" -#include "debug.h" -#include "natives.h" -#include "platform.h" -#include "v8.h" -#endif // V8_SHARED +#include "src/api.h" +#include "src/base/cpu.h" +#include "src/base/logging.h" +#include "src/base/platform/platform.h" +#include "src/d8-debug.h" +#include "src/debug.h" +#include "src/natives.h" +#include "src/v8.h" +#endif // !V8_SHARED #if !defined(_WIN32) && !defined(_WIN64) #include <unistd.h> // NOLINT #endif -#ifndef ASSERT -#define ASSERT(condition) assert(condition) +#ifndef DCHECK +#define DCHECK(condition) assert(condition) #endif namespace v8 { @@ -157,13 +139,14 @@ #ifndef V8_SHARED CounterMap* Shell::counter_map_; -i::OS::MemoryMappedFile* Shell::counters_file_ = NULL; +base::OS::MemoryMappedFile* Shell::counters_file_ = NULL; CounterCollection Shell::local_counters_; CounterCollection* Shell::counters_ = &local_counters_; -i::Mutex Shell::context_mutex_; -const i::TimeTicks Shell::kInitialTicks = i::TimeTicks::HighResolutionNow(); +base::Mutex Shell::context_mutex_; +const base::TimeTicks Shell::kInitialTicks = + base::TimeTicks::HighResolutionNow(); Persistent<Context> Shell::utility_context_; -#endif // V8_SHARED +#endif // !V8_SHARED Persistent<Context> Shell::evaluation_context_; ShellOptions Shell::options; @@ -178,7 +161,7 @@ const char* name2 = reinterpret_cast<const char*>(key2); return strcmp(name1, name2) == 0; } -#endif // V8_SHARED +#endif // !V8_SHARED // Converts a V8 value to a C string. @@ -187,17 +170,47 @@ } +// Compile a string within the current v8 context. +Local<UnboundScript> Shell::CompileString( + Isolate* isolate, Local<String> source, Local<Value> name, + v8::ScriptCompiler::CompileOptions compile_options) { + ScriptOrigin origin(name); + ScriptCompiler::Source script_source(source, origin); + Local<UnboundScript> script = + ScriptCompiler::CompileUnbound(isolate, &script_source, compile_options); + + // Was caching requested & successful? Then compile again, now with cache. + if (script_source.GetCachedData()) { + if (compile_options == ScriptCompiler::kProduceCodeCache) { + compile_options = ScriptCompiler::kConsumeCodeCache; + } else if (compile_options == ScriptCompiler::kProduceParserCache) { + compile_options = ScriptCompiler::kConsumeParserCache; + } else { + DCHECK(false); // A new compile option? + } + ScriptCompiler::Source cached_source( + source, origin, new v8::ScriptCompiler::CachedData( + script_source.GetCachedData()->data, + script_source.GetCachedData()->length, + v8::ScriptCompiler::CachedData::BufferNotOwned)); + script = ScriptCompiler::CompileUnbound(isolate, &cached_source, + compile_options); + } + return script; +} + + // Executes a string within the current v8 context. bool Shell::ExecuteString(Isolate* isolate, Handle<String> source, Handle<Value> name, bool print_result, bool report_exceptions) { -#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) +#ifndef V8_SHARED bool FLAG_debugger = i::FLAG_debugger; #else bool FLAG_debugger = false; -#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT +#endif // !V8_SHARED HandleScope handle_scope(isolate); TryCatch try_catch; options.script_executed = true; @@ -205,10 +218,9 @@ // When debugging make exceptions appear to be uncaught. try_catch.SetVerbose(true); } - ScriptOrigin origin(name); - ScriptCompiler::Source script_source(source, origin); + Handle<UnboundScript> script = - ScriptCompiler::CompileUnbound(isolate, &script_source); + Shell::CompileString(isolate, source, name, options.compile_options); if (script.IsEmpty()) { // Print errors that happened during compilation. if (report_exceptions && !FLAG_debugger) @@ -223,13 +235,13 @@ realm->Exit(); data->realm_current_ = data->realm_switch_; if (result.IsEmpty()) { - ASSERT(try_catch.HasCaught()); + DCHECK(try_catch.HasCaught()); // Print errors that happened during execution. if (report_exceptions && !FLAG_debugger) ReportException(isolate, &try_catch); return false; } else { - ASSERT(!try_catch.HasCaught()); + DCHECK(!try_catch.HasCaught()); if (print_result) { #if !defined(V8_SHARED) if (options.test_shell) { @@ -313,11 +325,21 @@ #ifndef V8_SHARED // performance.now() returns a time stamp as double, measured in milliseconds. +// When FLAG_verify_predictable mode is enabled it returns current value +// of Heap::allocations_count(). void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) { - i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks; - args.GetReturnValue().Set(delta.InMillisecondsF()); + if (i::FLAG_verify_predictable) { + Isolate* v8_isolate = args.GetIsolate(); + i::Heap* heap = reinterpret_cast<i::Isolate*>(v8_isolate)->heap(); + args.GetReturnValue().Set(heap->synthetic_time()); + + } else { + base::TimeDelta delta = + base::TimeTicks::HighResolutionNow() - kInitialTicks; + args.GetReturnValue().Set(delta.InMillisecondsF()); + } } -#endif // V8_SHARED +#endif // !V8_SHARED // Realm.current() returns the index of the currently active realm. @@ -495,10 +517,7 @@ // not been fully read into the buffer yet (does not end with '\n'). // If fgets gets an error, just give up. char* input = NULL; - { // Release lock for blocking input. - Unlocker unlock(isolate); - input = fgets(buffer, kBufferSize, stdin); - } + input = fgets(buffer, kBufferSize, stdin); if (input == NULL) return Handle<String>(); length = static_cast<int>(strlen(buffer)); if (length == 0) { @@ -561,14 +580,14 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) { HandleScope handle_scope(isolate); -#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) +#ifndef V8_SHARED Handle<Context> utility_context; bool enter_context = !isolate->InContext(); if (enter_context) { utility_context = Local<Context>::New(isolate, utility_context_); utility_context->Enter(); } -#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT +#endif // !V8_SHARED v8::String::Utf8Value exception(try_catch->Exception()); const char* exception_string = ToCString(exception); Handle<Message> message = try_catch->Message(); @@ -578,7 +597,7 @@ printf("%s\n", exception_string); } else { // Print (filename):(line number): (message). - v8::String::Utf8Value filename(message->GetScriptResourceName()); + v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName()); const char* filename_string = ToCString(filename); int linenum = message->GetLineNumber(); printf("%s:%i: %s\n", filename_string, linenum, exception_string); @@ -603,9 +622,9 @@ } } printf("\n"); -#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) +#ifndef V8_SHARED if (enter_context) utility_context->Exit(); -#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT +#endif // !V8_SHARED } @@ -629,7 +648,6 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT Local<Object> Shell::DebugMessageDetails(Isolate* isolate, Handle<String> message) { EscapableHandleScope handle_scope(isolate); @@ -662,19 +680,6 @@ } -void Shell::DispatchDebugMessages() { - Isolate* isolate = v8::Isolate::GetCurrent(); - HandleScope handle_scope(isolate); - v8::Local<v8::Context> context = - v8::Local<v8::Context>::New(isolate, Shell::evaluation_context_); - v8::Context::Scope context_scope(context); - v8::Debug::ProcessDebugMessages(); -} -#endif // ENABLE_DEBUGGER_SUPPORT -#endif // V8_SHARED - - -#ifndef V8_SHARED int32_t* Counter::Bind(const char* name, bool is_histogram) { int i; for (i = 0; i < kMaxNameSize - 1 && name[i]; i++) @@ -705,8 +710,8 @@ } -void Shell::MapCounters(const char* name) { - counters_file_ = i::OS::MemoryMappedFile::create( +void Shell::MapCounters(v8::Isolate* isolate, const char* name) { + counters_file_ = base::OS::MemoryMappedFile::create( name, sizeof(CounterCollection), &local_counters_); void* memory = (counters_file_ == NULL) ? NULL : counters_file_->memory(); @@ -715,9 +720,9 @@ Exit(1); } counters_ = static_cast<CounterCollection*>(memory); - V8::SetCounterFunction(LookupCounter); - V8::SetCreateHistogramFunction(CreateHistogram); - V8::SetAddHistogramSampleFunction(AddHistogramSample); + isolate->SetCounterFunction(LookupCounter); + isolate->SetCreateHistogramFunction(CreateHistogram); + isolate->SetAddHistogramSampleFunction(AddHistogramSample); } @@ -742,7 +747,7 @@ counter->Bind(name, is_histogram); } } else { - ASSERT(counter->is_histogram() == is_histogram); + DCHECK(counter->is_histogram() == is_histogram); } return counter; } @@ -774,7 +779,6 @@ void Shell::InstallUtilityScript(Isolate* isolate) { - Locker lock(isolate); HandleScope scope(isolate); // If we use the utility context, we have to set the security tokens so that // utility, evaluation and debug context can all access each other. @@ -786,18 +790,17 @@ evaluation_context->SetSecurityToken(Undefined(isolate)); v8::Context::Scope context_scope(utility_context); -#ifdef ENABLE_DEBUGGER_SUPPORT if (i::FLAG_debugger) printf("JavaScript debugger enabled\n"); // Install the debugger object in the utility scope i::Debug* debug = reinterpret_cast<i::Isolate*>(isolate)->debug(); debug->Load(); + i::Handle<i::Context> debug_context = debug->debug_context(); i::Handle<i::JSObject> js_debug - = i::Handle<i::JSObject>(debug->debug_context()->global_object()); + = i::Handle<i::JSObject>(debug_context->global_object()); utility_context->Global()->Set(String::NewFromUtf8(isolate, "$debug"), Utils::ToLocal(js_debug)); - debug->debug_context()->set_security_token( + debug_context->set_security_token( reinterpret_cast<i::Isolate*>(isolate)->heap()->undefined_value()); -#endif // ENABLE_DEBUGGER_SUPPORT // Run the d8 shell utility script in the utility context int source_index = i::NativesCollection<i::D8>::GetIndex("d8"); @@ -824,14 +827,10 @@ i::SharedFunctionInfo::cast(*compiled_script)->script())); script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE)); -#ifdef ENABLE_DEBUGGER_SUPPORT // Start the in-process debugger if requested. - if (i::FLAG_debugger && !i::FLAG_debugger_agent) { - v8::Debug::SetDebugEventListener2(HandleDebugEvent); - } -#endif // ENABLE_DEBUGGER_SUPPORT + if (i::FLAG_debugger) v8::Debug::SetDebugEventListener(HandleDebugEvent); } -#endif // V8_SHARED +#endif // !V8_SHARED #ifdef COMPRESS_STARTUP_DATA_BZ2 @@ -844,7 +843,7 @@ int* raw_data_size, const char* compressed_data, int compressed_data_size) { - ASSERT_EQ(v8::StartupData::kBZip2, + DCHECK_EQ(v8::StartupData::kBZip2, v8::V8::GetCompressedStartupDataAlgorithm()); unsigned int decompressed_size = *raw_data_size; int result = @@ -907,13 +906,11 @@ FunctionTemplate::New(isolate, PerformanceNow)); global_template->Set(String::NewFromUtf8(isolate, "performance"), performance_template); -#endif // V8_SHARED +#endif // !V8_SHARED -#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64) Handle<ObjectTemplate> os_templ = ObjectTemplate::New(isolate); AddOSMethods(isolate, os_templ); global_template->Set(String::NewFromUtf8(isolate, "os"), os_templ); -#endif // V8_SHARED return global_template; } @@ -933,46 +930,37 @@ Shell::counter_map_ = new CounterMap(); // Set up counters if (i::StrLength(i::FLAG_map_counters) != 0) - MapCounters(i::FLAG_map_counters); + MapCounters(isolate, i::FLAG_map_counters); if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) { - V8::SetCounterFunction(LookupCounter); - V8::SetCreateHistogramFunction(CreateHistogram); - V8::SetAddHistogramSampleFunction(AddHistogramSample); + isolate->SetCounterFunction(LookupCounter); + isolate->SetCreateHistogramFunction(CreateHistogram); + isolate->SetAddHistogramSampleFunction(AddHistogramSample); } -#endif // V8_SHARED +#endif // !V8_SHARED } void Shell::InitializeDebugger(Isolate* isolate) { if (options.test_shell) return; #ifndef V8_SHARED - Locker lock(isolate); HandleScope scope(isolate); Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate); utility_context_.Reset(isolate, Context::New(isolate, NULL, global_template)); - -#ifdef ENABLE_DEBUGGER_SUPPORT - // Start the debugger agent if requested. - if (i::FLAG_debugger_agent) { - v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true); - v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true); - } -#endif // ENABLE_DEBUGGER_SUPPORT -#endif // V8_SHARED +#endif // !V8_SHARED } Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) { #ifndef V8_SHARED // This needs to be a critical section since this is not thread-safe - i::LockGuard<i::Mutex> lock_guard(&context_mutex_); -#endif // V8_SHARED + base::LockGuard<base::Mutex> lock_guard(&context_mutex_); +#endif // !V8_SHARED // Initialize the global objects Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate); EscapableHandleScope handle_scope(isolate); Local<Context> context = Context::New(isolate, NULL, global_template); - ASSERT(!context.IsEmpty()); + DCHECK(!context.IsEmpty()); Context::Scope scope(context); #ifndef V8_SHARED @@ -982,14 +970,14 @@ factory->NewFixedArray(js_args.argc); for (int j = 0; j < js_args.argc; j++) { i::Handle<i::String> arg = - factory->NewStringFromUtf8(i::CStrVector(js_args[j])); + factory->NewStringFromUtf8(i::CStrVector(js_args[j])).ToHandleChecked(); arguments_array->set(j, *arg); } i::Handle<i::JSArray> arguments_jsarray = factory->NewJSArrayWithElements(arguments_array); context->Global()->Set(String::NewFromUtf8(isolate, "arguments"), Utils::ToLocal(arguments_jsarray)); -#endif // V8_SHARED +#endif // !V8_SHARED return handle_scope.Escape(context); } @@ -1013,7 +1001,7 @@ inline bool operator<(const CounterAndKey& lhs, const CounterAndKey& rhs) { return strcmp(lhs.key, rhs.key) < 0; } -#endif // V8_SHARED +#endif // !V8_SHARED void Shell::OnExit() { @@ -1054,7 +1042,7 @@ } delete counters_file_; delete counter_map_; -#endif // V8_SHARED +#endif // !V8_SHARED } @@ -1081,8 +1069,6 @@ static char* ReadChars(Isolate* isolate, const char* name, int* size_out) { - // Release the V8 lock while reading files. - v8::Unlocker unlocker(isolate); FILE* file = FOpen(name, "rb"); if (file == NULL) return NULL; @@ -1121,7 +1107,7 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) { - ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT + DCHECK(sizeof(char) == sizeof(uint8_t)); // NOLINT String::Utf8Value filename(args[0]); int length; if (*filename == NULL) { @@ -1149,29 +1135,6 @@ } -#ifndef V8_SHARED -static char* ReadToken(char* data, char token) { - char* next = i::OS::StrChr(data, token); - if (next != NULL) { - *next = '\0'; - return (next + 1); - } - - return NULL; -} - - -static char* ReadLine(char* data) { - return ReadToken(data, '\n'); -} - - -static char* ReadWord(char* data) { - return ReadToken(data, ' '); -} -#endif // V8_SHARED - - // Reads a file into a v8 string. Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) { int size = 0; @@ -1185,7 +1148,6 @@ void Shell::RunShell(Isolate* isolate) { - Locker locker(isolate); HandleScope outer_scope(isolate); v8::Local<v8::Context> context = v8::Local<v8::Context>::New(isolate, evaluation_context_); @@ -1205,76 +1167,11 @@ } -#ifndef V8_SHARED -class ShellThread : public i::Thread { - public: - // Takes ownership of the underlying char array of |files|. - ShellThread(Isolate* isolate, char* files) - : Thread("d8:ShellThread"), - isolate_(isolate), files_(files) { } - - ~ShellThread() { - delete[] files_; - } - - virtual void Run(); - private: - Isolate* isolate_; - char* files_; -}; - - -void ShellThread::Run() { - char* ptr = files_; - while ((ptr != NULL) && (*ptr != '\0')) { - // For each newline-separated line. - char* next_line = ReadLine(ptr); - - if (*ptr == '#') { - // Skip comment lines. - ptr = next_line; - continue; - } - - // Prepare the context for this thread. - Locker locker(isolate_); - HandleScope outer_scope(isolate_); - Local<Context> thread_context = - Shell::CreateEvaluationContext(isolate_); - Context::Scope context_scope(thread_context); - PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_)); - - while ((ptr != NULL) && (*ptr != '\0')) { - HandleScope inner_scope(isolate_); - char* filename = ptr; - ptr = ReadWord(ptr); - - // Skip empty strings. - if (strlen(filename) == 0) { - continue; - } - - Handle<String> str = Shell::ReadFile(isolate_, filename); - if (str.IsEmpty()) { - printf("File '%s' not found\n", filename); - Shell::Exit(1); - } - - Shell::ExecuteString( - isolate_, str, String::NewFromUtf8(isolate_, filename), false, false); - } - - ptr = next_line; - } -} -#endif // V8_SHARED - - SourceGroup::~SourceGroup() { #ifndef V8_SHARED delete thread_; thread_ = NULL; -#endif // V8_SHARED +#endif // !V8_SHARED } @@ -1327,12 +1224,12 @@ #ifndef V8_SHARED -i::Thread::Options SourceGroup::GetThreadOptions() { +base::Thread::Options SourceGroup::GetThreadOptions() { // On some systems (OSX 10.6) the stack size default is 0.5Mb or less // which is not enough to parse the big literal expressions used in tests. // The stack size should be at least StackGuard::kLimitSize + some // OS-specific padding for thread startup code. 2Mbytes seems to be enough. - return i::Thread::Options("IsolateThread", 2 * MB); + return base::Thread::Options("IsolateThread", 2 * MB); } @@ -1342,7 +1239,6 @@ next_semaphore_.Wait(); { Isolate::Scope iscope(isolate); - Locker lock(isolate); { HandleScope scope(isolate); PerIsolateData data(isolate); @@ -1355,12 +1251,19 @@ } if (Shell::options.send_idle_notification) { const int kLongIdlePauseInMs = 1000; - V8::ContextDisposedNotification(); - V8::IdleNotification(kLongIdlePauseInMs); + isolate->ContextDisposedNotification(); + isolate->IdleNotification(kLongIdlePauseInMs); + } + if (Shell::options.invoke_weak_callbacks) { + // By sending a low memory notifications, we will try hard to collect + // all garbage and will therefore also invoke all weak callbacks of + // actually unreachable persistent handles. + isolate->LowMemoryNotification(); } } done_semaphore_.Signal(); } while (!Shell::options.last_run); + isolate->Dispose(); } @@ -1382,10 +1285,16 @@ done_semaphore_.Wait(); } } -#endif // V8_SHARED +#endif // !V8_SHARED + + +void SetFlagsFromString(const char* flags) { + v8::V8::SetFlagsFromString(flags, static_cast<int>(strlen(flags))); +} bool Shell::SetOptions(int argc, char* argv[]) { + bool logfile_per_isolate = false; for (int i = 0; i < argc; i++) { if (strcmp(argv[i], "--stress-opt") == 0) { options.stress_opt = true; @@ -1403,6 +1312,9 @@ // No support for stressing if we can't use --always-opt. options.stress_opt = false; options.stress_deopt = false; + } else if (strcmp(argv[i], "--logfile-per-isolate") == 0) { + logfile_per_isolate = true; + argv[i] = NULL; } else if (strcmp(argv[i], "--shell") == 0) { options.interactive_shell = true; argv[i] = NULL; @@ -1412,6 +1324,11 @@ } else if (strcmp(argv[i], "--send-idle-notification") == 0) { options.send_idle_notification = true; argv[i] = NULL; + } else if (strcmp(argv[i], "--invoke-weak-callbacks") == 0) { + options.invoke_weak_callbacks = true; + // TODO(jochen) See issue 3351 + options.send_idle_notification = true; + argv[i] = NULL; } else if (strcmp(argv[i], "-f") == 0) { // Ignore any -f flags for compatibility with other stand-alone // JavaScript engines. @@ -1422,13 +1339,6 @@ return false; #endif // V8_SHARED options.num_isolates++; - } else if (strcmp(argv[i], "-p") == 0) { -#ifdef V8_SHARED - printf("D8 with shared library does not support multi-threading\n"); - return false; -#else - options.num_parallel_files++; -#endif // V8_SHARED } else if (strcmp(argv[i], "--dump-heap-constants") == 0) { #ifdef V8_SHARED printf("D8 with shared library does not support constant dumping\n"); @@ -1436,48 +1346,45 @@ #else options.dump_heap_constants = true; argv[i] = NULL; -#endif +#endif // V8_SHARED } else if (strcmp(argv[i], "--throws") == 0) { options.expected_to_throw = true; argv[i] = NULL; } else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) { options.icu_data_file = argv[i] + 16; argv[i] = NULL; - } #ifdef V8_SHARED - else if (strcmp(argv[i], "--dump-counters") == 0) { + } else if (strcmp(argv[i], "--dump-counters") == 0) { printf("D8 with shared library does not include counters\n"); return false; } else if (strcmp(argv[i], "--debugger") == 0) { printf("Javascript debugger not included\n"); return false; - } #endif // V8_SHARED - } - -#ifndef V8_SHARED - // Run parallel threads if we are not using --isolate - options.parallel_files = new char*[options.num_parallel_files]; - int parallel_files_set = 0; - for (int i = 1; i < argc; i++) { - if (argv[i] == NULL) continue; - if (strcmp(argv[i], "-p") == 0 && i + 1 < argc) { - if (options.num_isolates > 1) { - printf("-p is not compatible with --isolate\n"); +#ifdef V8_USE_EXTERNAL_STARTUP_DATA + } else if (strncmp(argv[i], "--natives_blob=", 15) == 0) { + options.natives_blob = argv[i] + 15; + argv[i] = NULL; + } else if (strncmp(argv[i], "--snapshot_blob=", 16) == 0) { + options.snapshot_blob = argv[i] + 16; + argv[i] = NULL; +#endif // V8_USE_EXTERNAL_STARTUP_DATA + } else if (strcmp(argv[i], "--cache") == 0 || + strncmp(argv[i], "--cache=", 8) == 0) { + const char* value = argv[i] + 7; + if (!*value || strncmp(value, "=code", 6) == 0) { + options.compile_options = v8::ScriptCompiler::kProduceCodeCache; + } else if (strncmp(value, "=parse", 7) == 0) { + options.compile_options = v8::ScriptCompiler::kProduceParserCache; + } else if (strncmp(value, "=none", 6) == 0) { + options.compile_options = v8::ScriptCompiler::kNoCompileOptions; + } else { + printf("Unknown option to --cache.\n"); return false; } argv[i] = NULL; - i++; - options.parallel_files[parallel_files_set] = argv[i]; - parallel_files_set++; - argv[i] = NULL; } } - if (parallel_files_set != options.num_parallel_files) { - printf("-p requires a file containing a list of files as parameter\n"); - return false; - } -#endif // V8_SHARED v8::V8::SetFlagsFromCommandLine(&argc, argv, true); @@ -1497,94 +1404,61 @@ } current->End(argc); + if (!logfile_per_isolate && options.num_isolates) { + SetFlagsFromString("--nologfile_per_isolate"); + } + return true; } int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) { #ifndef V8_SHARED - i::List<i::Thread*> threads(1); - if (options.parallel_files != NULL) { - for (int i = 0; i < options.num_parallel_files; i++) { - char* files = NULL; - { Locker lock(isolate); - int size = 0; - files = ReadChars(isolate, options.parallel_files[i], &size); - } - if (files == NULL) { - printf("File list '%s' not found\n", options.parallel_files[i]); - Exit(1); - } - ShellThread* thread = new ShellThread(isolate, files); - thread->Start(); - threads.Add(thread); - } - } for (int i = 1; i < options.num_isolates; ++i) { options.isolate_sources[i].StartExecuteInThread(); } -#endif // V8_SHARED - { // NOLINT - Locker lock(isolate); - { - HandleScope scope(isolate); - Local<Context> context = CreateEvaluationContext(isolate); - if (options.last_run) { - // Keep using the same context in the interactive shell. - evaluation_context_.Reset(isolate, context); -#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) - // If the interactive debugger is enabled make sure to activate - // it before running the files passed on the command line. - if (i::FLAG_debugger) { - InstallUtilityScript(isolate); - } -#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT - } - { - Context::Scope cscope(context); - PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate)); - options.isolate_sources[0].Execute(isolate); +#endif // !V8_SHARED + { + HandleScope scope(isolate); + Local<Context> context = CreateEvaluationContext(isolate); + if (options.last_run && options.use_interactive_shell()) { + // Keep using the same context in the interactive shell. + evaluation_context_.Reset(isolate, context); +#ifndef V8_SHARED + // If the interactive debugger is enabled make sure to activate + // it before running the files passed on the command line. + if (i::FLAG_debugger) { + InstallUtilityScript(isolate); } +#endif // !V8_SHARED } - if (!options.last_run) { - if (options.send_idle_notification) { - const int kLongIdlePauseInMs = 1000; - V8::ContextDisposedNotification(); - V8::IdleNotification(kLongIdlePauseInMs); - } + { + Context::Scope cscope(context); + PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate)); + options.isolate_sources[0].Execute(isolate); } } + if (options.send_idle_notification) { + const int kLongIdlePauseInMs = 1000; + isolate->ContextDisposedNotification(); + isolate->IdleNotification(kLongIdlePauseInMs); + } + if (options.invoke_weak_callbacks) { + // By sending a low memory notifications, we will try hard to collect all + // garbage and will therefore also invoke all weak callbacks of actually + // unreachable persistent handles. + isolate->LowMemoryNotification(); + } #ifndef V8_SHARED for (int i = 1; i < options.num_isolates; ++i) { options.isolate_sources[i].WaitForThread(); } - - for (int i = 0; i < threads.length(); i++) { - i::Thread* thread = threads[i]; - thread->Join(); - delete thread; - } -#endif // V8_SHARED +#endif // !V8_SHARED return 0; } -#ifdef V8_SHARED -static void SetStandaloneFlagsViaCommandLine() { - int fake_argc = 3; - char **fake_argv = new char*[3]; - fake_argv[0] = NULL; - fake_argv[1] = strdup("--trace-hydrogen-file=hydrogen.cfg"); - fake_argv[2] = strdup("--redirect-code-traces-to=code.asm"); - v8::V8::SetFlagsFromCommandLine(&fake_argc, fake_argv, false); - free(fake_argv[1]); - free(fake_argv[2]); - delete[] fake_argv; -} -#endif - - #ifndef V8_SHARED static void DumpHeapConstants(i::Isolate* isolate) { i::Heap* heap = isolate->heap(); @@ -1639,26 +1513,17 @@ printf("}\n"); #undef ROOT_LIST_CASE } -#endif // V8_SHARED +#endif // !V8_SHARED class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator { public: virtual void* Allocate(size_t length) { - void* result = malloc(length); - memset(result, 0, length); - return result; - } - virtual void* AllocateUninitialized(size_t length) { - return malloc(length); + void* data = AllocateUninitialized(length); + return data == NULL ? data : memset(data, 0, length); } + virtual void* AllocateUninitialized(size_t length) { return malloc(length); } virtual void Free(void* data, size_t) { free(data); } - // TODO(dslomov): Remove when v8:2823 is fixed. - virtual void Free(void* data) { -#ifndef V8_SHARED - UNREACHABLE(); -#endif - } }; @@ -1670,20 +1535,75 @@ virtual void* AllocateUninitialized(size_t length) V8_OVERRIDE { return malloc(0); } - virtual void Free(void*, size_t) V8_OVERRIDE { + virtual void Free(void* p, size_t) V8_OVERRIDE { + free(p); + } +}; + + +#ifdef V8_USE_EXTERNAL_STARTUP_DATA +class StartupDataHandler { + public: + StartupDataHandler(const char* natives_blob, + const char* snapshot_blob) { + Load(natives_blob, &natives_, v8::V8::SetNativesDataBlob); + Load(snapshot_blob, &snapshot_, v8::V8::SetSnapshotDataBlob); + } + + ~StartupDataHandler() { + delete[] natives_.data; + delete[] snapshot_.data; } + + private: + void Load(const char* blob_file, + v8::StartupData* startup_data, + void (*setter_fn)(v8::StartupData*)) { + startup_data->data = NULL; + startup_data->compressed_size = 0; + startup_data->raw_size = 0; + + if (!blob_file) + return; + + FILE* file = fopen(blob_file, "rb"); + if (!file) + return; + + fseek(file, 0, SEEK_END); + startup_data->raw_size = ftell(file); + rewind(file); + + startup_data->data = new char[startup_data->raw_size]; + startup_data->compressed_size = fread( + const_cast<char*>(startup_data->data), 1, startup_data->raw_size, + file); + fclose(file); + + if (startup_data->raw_size == startup_data->compressed_size) + (*setter_fn)(startup_data); + } + + v8::StartupData natives_; + v8::StartupData snapshot_; + + // Disallow copy & assign. + StartupDataHandler(const StartupDataHandler& other); + void operator=(const StartupDataHandler& other); }; +#endif // V8_USE_EXTERNAL_STARTUP_DATA int Shell::Main(int argc, char* argv[]) { if (!SetOptions(argc, argv)) return 1; v8::V8::InitializeICU(options.icu_data_file); -#ifndef V8_SHARED - i::FLAG_trace_hydrogen_file = "hydrogen.cfg"; - i::FLAG_redirect_code_traces_to = "code.asm"; -#else - SetStandaloneFlagsViaCommandLine(); + v8::Platform* platform = v8::platform::CreateDefaultPlatform(); + v8::V8::InitializePlatform(platform); +#ifdef V8_USE_EXTERNAL_STARTUP_DATA + StartupDataHandler startup_data(options.natives_blob, options.snapshot_blob); #endif + SetFlagsFromString("--trace-hydrogen-file=hydrogen.cfg"); + SetFlagsFromString("--redirect-code-traces-to=code.asm"); ShellArrayBufferAllocator array_buffer_allocator; MockArrayBufferAllocator mock_arraybuffer_allocator; if (options.mock_arraybuffer_allocator) { @@ -1692,16 +1612,24 @@ v8::V8::SetArrayBufferAllocator(&array_buffer_allocator); } int result = 0; - Isolate* isolate = Isolate::GetCurrent(); + Isolate* isolate = Isolate::New(); #ifndef V8_SHARED v8::ResourceConstraints constraints; - constraints.ConfigureDefaults(i::OS::TotalPhysicalMemory(), - i::CPU::NumberOfProcessorsOnline()); + constraints.ConfigureDefaults(base::OS::TotalPhysicalMemory(), + base::OS::MaxVirtualMemory(), + base::OS::NumberOfProcessorsOnline()); v8::SetResourceConstraints(isolate, &constraints); #endif DumbLineEditor dumb_line_editor(isolate); { + Isolate::Scope scope(isolate); Initialize(isolate); +#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE) + if (i::FLAG_gdbjit) { + v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, + i::GDBJITInterface::EventHandler); + } +#endif #ifdef ENABLE_VTUNE_JIT_INTERFACE vTune::InitializeVtuneForV8(); #endif @@ -1741,30 +1669,21 @@ result = RunMain(isolate, argc, argv); } - -#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) - // Run remote debugger if requested, but never on --test - if (i::FLAG_remote_debugger && !options.test_shell) { - InstallUtilityScript(isolate); - RunRemoteDebugger(isolate, i::FLAG_debugger_port); - return 0; - } -#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT - // Run interactive shell if explicitly requested or if no script has been // executed, but never on --test - - if (( options.interactive_shell || !options.script_executed ) - && !options.test_shell ) { -#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) + if (options.use_interactive_shell()) { +#ifndef V8_SHARED if (!i::FLAG_debugger) { InstallUtilityScript(isolate); } -#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT +#endif // !V8_SHARED RunShell(isolate); } } + isolate->Dispose(); V8::Dispose(); + V8::ShutdownPlatform(); + delete platform; OnExit(); diff -Nru nodejs-0.11.13/deps/v8/src/d8-debug.cc nodejs-0.11.15/deps/v8/src/d8-debug.cc --- nodejs-0.11.13/deps/v8/src/d8-debug.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/d8-debug.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,55 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifdef ENABLE_DEBUGGER_SUPPORT - -#include "d8.h" -#include "d8-debug.h" -#include "debug-agent.h" -#include "platform/socket.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +#include "src/d8.h" +#include "src/d8-debug.h" namespace v8 { -static bool was_running = true; - void PrintPrompt(bool is_running) { const char* prompt = is_running? "> " : "dbg> "; - was_running = is_running; printf("%s", prompt); fflush(stdout); } -void PrintPrompt() { - PrintPrompt(was_running); -} - - void HandleDebugEvent(const Debug::EventDetails& event_details) { // TODO(svenpanne) There should be a way to retrieve this in the callback. Isolate* isolate = Isolate::GetCurrent(); @@ -165,210 +129,4 @@ } } - -void RunRemoteDebugger(Isolate* isolate, int port) { - RemoteDebugger debugger(isolate, port); - debugger.Run(); -} - - -void RemoteDebugger::Run() { - bool ok; - - // Connect to the debugger agent. - conn_ = new i::Socket; - static const int kPortStrSize = 6; - char port_str[kPortStrSize]; - i::OS::SNPrintF(i::Vector<char>(port_str, kPortStrSize), "%d", port_); - ok = conn_->Connect("localhost", port_str); - if (!ok) { - printf("Unable to connect to debug agent %d\n", i::Socket::GetLastError()); - return; - } - - // Start the receiver thread. - ReceiverThread receiver(this); - receiver.Start(); - - // Start the keyboard thread. - KeyboardThread keyboard(this); - keyboard.Start(); - PrintPrompt(); - - // Process events received from debugged VM and from the keyboard. - bool terminate = false; - while (!terminate) { - event_available_.Wait(); - RemoteDebuggerEvent* event = GetEvent(); - switch (event->type()) { - case RemoteDebuggerEvent::kMessage: - HandleMessageReceived(event->data()); - break; - case RemoteDebuggerEvent::kKeyboard: - HandleKeyboardCommand(event->data()); - break; - case RemoteDebuggerEvent::kDisconnect: - terminate = true; - break; - - default: - UNREACHABLE(); - } - delete event; - } - - delete conn_; - conn_ = NULL; - // Wait for the receiver thread to end. - receiver.Join(); -} - - -void RemoteDebugger::MessageReceived(i::SmartArrayPointer<char> message) { - RemoteDebuggerEvent* event = - new RemoteDebuggerEvent(RemoteDebuggerEvent::kMessage, message); - AddEvent(event); -} - - -void RemoteDebugger::KeyboardCommand(i::SmartArrayPointer<char> command) { - RemoteDebuggerEvent* event = - new RemoteDebuggerEvent(RemoteDebuggerEvent::kKeyboard, command); - AddEvent(event); -} - - -void RemoteDebugger::ConnectionClosed() { - RemoteDebuggerEvent* event = - new RemoteDebuggerEvent(RemoteDebuggerEvent::kDisconnect, - i::SmartArrayPointer<char>()); - AddEvent(event); -} - - -void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) { - i::LockGuard<i::Mutex> lock_guard(&event_access_); - if (head_ == NULL) { - ASSERT(tail_ == NULL); - head_ = event; - tail_ = event; - } else { - ASSERT(tail_ != NULL); - tail_->set_next(event); - tail_ = event; - } - event_available_.Signal(); -} - - -RemoteDebuggerEvent* RemoteDebugger::GetEvent() { - i::LockGuard<i::Mutex> lock_guard(&event_access_); - ASSERT(head_ != NULL); - RemoteDebuggerEvent* result = head_; - head_ = head_->next(); - if (head_ == NULL) { - ASSERT(tail_ == result); - tail_ = NULL; - } - return result; -} - - -void RemoteDebugger::HandleMessageReceived(char* message) { - Locker lock(isolate_); - HandleScope scope(isolate_); - - // Print the event details. - TryCatch try_catch; - Handle<Object> details = Shell::DebugMessageDetails( - isolate_, Handle<String>::Cast(String::NewFromUtf8(isolate_, message))); - if (try_catch.HasCaught()) { - Shell::ReportException(isolate_, &try_catch); - PrintPrompt(); - return; - } - String::Utf8Value str(details->Get(String::NewFromUtf8(isolate_, "text"))); - if (str.length() == 0) { - // Empty string is used to signal not to process this event. - return; - } - if (*str != NULL) { - printf("%s\n", *str); - } else { - printf("???\n"); - } - - bool is_running = details->Get(String::NewFromUtf8(isolate_, "running")) - ->ToBoolean() - ->Value(); - PrintPrompt(is_running); -} - - -void RemoteDebugger::HandleKeyboardCommand(char* command) { - Locker lock(isolate_); - HandleScope scope(isolate_); - - // Convert the debugger command to a JSON debugger request. - TryCatch try_catch; - Handle<Value> request = Shell::DebugCommandToJSONRequest( - isolate_, String::NewFromUtf8(isolate_, command)); - if (try_catch.HasCaught()) { - Shell::ReportException(isolate_, &try_catch); - PrintPrompt(); - return; - } - - // If undefined is returned the command was handled internally and there is - // no JSON to send. - if (request->IsUndefined()) { - PrintPrompt(); - return; - } - - // Send the JSON debugger request. - i::DebuggerAgentUtil::SendMessage(conn_, Handle<String>::Cast(request)); -} - - -void ReceiverThread::Run() { - // Receive the connect message (with empty body). - i::SmartArrayPointer<char> message = - i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn()); - ASSERT(message.get() == NULL); - - while (true) { - // Receive a message. - i::SmartArrayPointer<char> message = - i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn()); - if (message.get() == NULL) { - remote_debugger_->ConnectionClosed(); - return; - } - - // Pass the message to the main thread. - remote_debugger_->MessageReceived(message); - } -} - - -void KeyboardThread::Run() { - static const int kBufferSize = 256; - while (true) { - // read keyboard input. - char command[kBufferSize]; - char* str = fgets(command, kBufferSize, stdin); - if (str == NULL) { - break; - } - - // Pass the keyboard command to the main thread. - remote_debugger_->KeyboardCommand( - i::SmartArrayPointer<char>(i::StrDup(command))); - } -} - - } // namespace v8 - -#endif // ENABLE_DEBUGGER_SUPPORT diff -Nru nodejs-0.11.13/deps/v8/src/d8-debug.h nodejs-0.11.15/deps/v8/src/d8-debug.h --- nodejs-0.11.13/deps/v8/src/d8-debug.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/d8-debug.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,155 +1,19 @@ // Copyright 2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_D8_DEBUG_H_ #define V8_D8_DEBUG_H_ -#include "d8.h" -#include "debug.h" -#include "platform/socket.h" +#include "src/d8.h" +#include "src/debug.h" namespace v8 { - void HandleDebugEvent(const Debug::EventDetails& event_details); -// Start the remove debugger connecting to a V8 debugger agent on the specified -// port. -void RunRemoteDebugger(Isolate* isolate, int port); - -// Forward declerations. -class RemoteDebuggerEvent; -class ReceiverThread; - - -// Remote debugging class. -class RemoteDebugger { - public: - explicit RemoteDebugger(Isolate* isolate, int port) - : isolate_(isolate), - port_(port), - event_available_(0), - head_(NULL), tail_(NULL) {} - void Run(); - - // Handle events from the subordinate threads. - void MessageReceived(i::SmartArrayPointer<char> message); - void KeyboardCommand(i::SmartArrayPointer<char> command); - void ConnectionClosed(); - - private: - // Add new debugger event to the list. - void AddEvent(RemoteDebuggerEvent* event); - // Read next debugger event from the list. - RemoteDebuggerEvent* GetEvent(); - - // Handle a message from the debugged V8. - void HandleMessageReceived(char* message); - // Handle a keyboard command. - void HandleKeyboardCommand(char* command); - - // Get connection to agent in debugged V8. - i::Socket* conn() { return conn_; } - - Isolate* isolate_; - int port_; // Port used to connect to debugger V8. - i::Socket* conn_; // Connection to debugger agent in debugged V8. - - // Linked list of events from debugged V8 and from keyboard input. Access to - // the list is guarded by a mutex and a semaphore signals new items in the - // list. - i::Mutex event_access_; - i::Semaphore event_available_; - RemoteDebuggerEvent* head_; - RemoteDebuggerEvent* tail_; - - friend class ReceiverThread; -}; - - -// Thread reading from debugged V8 instance. -class ReceiverThread: public i::Thread { - public: - explicit ReceiverThread(RemoteDebugger* remote_debugger) - : Thread("d8:ReceiverThrd"), - remote_debugger_(remote_debugger) {} - ~ReceiverThread() {} - - void Run(); - - private: - RemoteDebugger* remote_debugger_; -}; - - -// Thread reading keyboard input. -class KeyboardThread: public i::Thread { - public: - explicit KeyboardThread(RemoteDebugger* remote_debugger) - : Thread("d8:KeyboardThrd"), - remote_debugger_(remote_debugger) {} - ~KeyboardThread() {} - - void Run(); - - private: - RemoteDebugger* remote_debugger_; -}; - - -// Events processed by the main deubgger thread. -class RemoteDebuggerEvent { - public: - RemoteDebuggerEvent(int type, i::SmartArrayPointer<char> data) - : type_(type), data_(data), next_(NULL) { - ASSERT(type == kMessage || type == kKeyboard || type == kDisconnect); - } - - static const int kMessage = 1; - static const int kKeyboard = 2; - static const int kDisconnect = 3; - - int type() { return type_; } - char* data() { return data_.get(); } - - private: - void set_next(RemoteDebuggerEvent* event) { next_ = event; } - RemoteDebuggerEvent* next() { return next_; } - - int type_; - i::SmartArrayPointer<char> data_; - RemoteDebuggerEvent* next_; - - friend class RemoteDebugger; -}; - - } // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/d8.gyp nodejs-0.11.15/deps/v8/src/d8.gyp --- nodejs-0.11.13/deps/v8/src/d8.gyp 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/d8.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -41,10 +41,11 @@ 'type': 'executable', 'dependencies': [ '../tools/gyp/v8.gyp:v8', + '../tools/gyp/v8.gyp:v8_libplatform', ], # Generated source files need this explicitly: 'include_dirs+': [ - '../src', + '..', ], 'sources': [ 'd8.cc', @@ -57,6 +58,14 @@ 'libraries': [ '-lreadline', ], 'sources': [ 'd8-readline.cc' ], }], + ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \ + or OS=="openbsd" or OS=="solaris" or OS=="android" \ + or OS=="qnx")', { + 'sources': [ 'd8-posix.cc', ] + }], + [ 'OS=="win"', { + 'sources': [ 'd8-windows.cc', ] + }], [ 'component!="shared_library"', { 'sources': [ 'd8-debug.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', ], 'conditions': [ @@ -69,14 +78,6 @@ 'd8_js2c', ], }], - ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \ - or OS=="openbsd" or OS=="solaris" or OS=="android" \ - or OS=="qnx")', { - 'sources': [ 'd8-posix.cc', ] - }], - [ 'OS=="win"', { - 'sources': [ 'd8-windows.cc', ] - }], ], }], ['v8_enable_vtunejit==1', { diff -Nru nodejs-0.11.13/deps/v8/src/d8.h nodejs-0.11.15/deps/v8/src/d8.h --- nodejs-0.11.13/deps/v8/src/d8.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/d8.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_D8_H_ #define V8_D8_H_ #ifndef V8_SHARED -#include "allocation.h" -#include "hashmap.h" -#include "smart-pointers.h" -#include "v8.h" +#include "src/allocation.h" +#include "src/hashmap.h" +#include "src/smart-pointers.h" +#include "src/v8.h" #else -#include "../include/v8.h" -#endif // V8_SHARED +#include "include/v8.h" +#endif // !V8_SHARED namespace v8 { @@ -92,7 +69,7 @@ const_cast<char*>(name), Hash(name), true); - ASSERT(answer != NULL); + DCHECK(answer != NULL); answer->value = value; } class Iterator { @@ -113,7 +90,7 @@ static bool Match(void* key1, void* key2); i::HashMap hash_map_; }; -#endif // V8_SHARED +#endif // !V8_SHARED class LineEditor { @@ -143,7 +120,7 @@ next_semaphore_(0), done_semaphore_(0), thread_(NULL), -#endif // V8_SHARED +#endif // !V8_SHARED argv_(NULL), begin_offset_(0), end_offset_(0) {} @@ -164,10 +141,10 @@ void WaitForThread(); private: - class IsolateThread : public i::Thread { + class IsolateThread : public base::Thread { public: explicit IsolateThread(SourceGroup* group) - : i::Thread(GetThreadOptions()), group_(group) {} + : base::Thread(GetThreadOptions()), group_(group) {} virtual void Run() { group_->ExecuteInThread(); @@ -177,13 +154,13 @@ SourceGroup* group_; }; - static i::Thread::Options GetThreadOptions(); + static base::Thread::Options GetThreadOptions(); void ExecuteInThread(); - i::Semaphore next_semaphore_; - i::Semaphore done_semaphore_; - i::Thread* thread_; -#endif // V8_SHARED + base::Semaphore next_semaphore_; + base::Semaphore done_semaphore_; + base::Thread* thread_; +#endif // !V8_SHARED void ExitShell(int exit_code); Handle<String> ReadFile(Isolate* isolate, const char* name); @@ -217,39 +194,37 @@ class ShellOptions { public: - ShellOptions() : -#ifndef V8_SHARED - num_parallel_files(0), - parallel_files(NULL), -#endif // V8_SHARED - script_executed(false), - last_run(true), - send_idle_notification(false), - stress_opt(false), - stress_deopt(false), - interactive_shell(false), - test_shell(false), - dump_heap_constants(false), - expected_to_throw(false), - mock_arraybuffer_allocator(false), - num_isolates(1), - isolate_sources(NULL), - icu_data_file(NULL) { } + ShellOptions() + : script_executed(false), + last_run(true), + send_idle_notification(false), + invoke_weak_callbacks(false), + stress_opt(false), + stress_deopt(false), + interactive_shell(false), + test_shell(false), + dump_heap_constants(false), + expected_to_throw(false), + mock_arraybuffer_allocator(false), + num_isolates(1), + compile_options(v8::ScriptCompiler::kNoCompileOptions), + isolate_sources(NULL), + icu_data_file(NULL), + natives_blob(NULL), + snapshot_blob(NULL) {} ~ShellOptions() { -#ifndef V8_SHARED - delete[] parallel_files; -#endif // V8_SHARED delete[] isolate_sources; } -#ifndef V8_SHARED - int num_parallel_files; - char** parallel_files; -#endif // V8_SHARED + bool use_interactive_shell() { + return (interactive_shell || !script_executed) && !test_shell; + } + bool script_executed; bool last_run; bool send_idle_notification; + bool invoke_weak_callbacks; bool stress_opt; bool stress_deopt; bool interactive_shell; @@ -258,8 +233,11 @@ bool expected_to_throw; bool mock_arraybuffer_allocator; int num_isolates; + v8::ScriptCompiler::CompileOptions compile_options; SourceGroup* isolate_sources; const char* icu_data_file; + const char* natives_blob; + const char* snapshot_blob; }; #ifdef V8_SHARED @@ -269,6 +247,9 @@ #endif // V8_SHARED public: + static Local<UnboundScript> CompileString( + Isolate* isolate, Local<String> source, Local<Value> name, + v8::ScriptCompiler::CompileOptions compile_options); static bool ExecuteString(Isolate* isolate, Handle<String> source, Handle<Value> name, @@ -293,18 +274,15 @@ int max, size_t buckets); static void AddHistogramSample(void* histogram, int sample); - static void MapCounters(const char* name); + static void MapCounters(v8::Isolate* isolate, const char* name); -#ifdef ENABLE_DEBUGGER_SUPPORT static Local<Object> DebugMessageDetails(Isolate* isolate, Handle<String> message); static Local<Value> DebugCommandToJSONRequest(Isolate* isolate, Handle<String> command); - static void DispatchDebugMessages(); -#endif // ENABLE_DEBUGGER_SUPPORT static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args); -#endif // V8_SHARED +#endif // !V8_SHARED static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args); static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args); @@ -394,13 +372,13 @@ // don't want to store the stats in a memory-mapped file static CounterCollection local_counters_; static CounterCollection* counters_; - static i::OS::MemoryMappedFile* counters_file_; - static i::Mutex context_mutex_; - static const i::TimeTicks kInitialTicks; + static base::OS::MemoryMappedFile* counters_file_; + static base::Mutex context_mutex_; + static const base::TimeTicks kInitialTicks; static Counter* GetCounter(const char* name, bool is_histogram); static void InstallUtilityScript(Isolate* isolate); -#endif // V8_SHARED +#endif // !V8_SHARED static void Initialize(Isolate* isolate); static void InitializeDebugger(Isolate* isolate); static void RunShell(Isolate* isolate); diff -Nru nodejs-0.11.13/deps/v8/src/d8.js nodejs-0.11.15/deps/v8/src/d8.js --- nodejs-0.11.13/deps/v8/src/d8.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/d8.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. "use strict"; @@ -231,10 +208,6 @@ details.text = result; break; - case 'scriptCollected': - details.text = result; - break; - default: details.text = 'Unknown debug event ' + response.event(); } @@ -526,7 +499,7 @@ json += '"seq":' + this.seq; json += ',"type":"' + this.type + '"'; if (this.command) { - json += ',"command":' + StringToJSON_(this.command); + json += ',"command":' + JSON.stringify(this.command); } if (this.arguments) { json += ',"arguments":'; @@ -534,7 +507,7 @@ if (this.arguments.toJSONProtocol) { json += this.arguments.toJSONProtocol(); } else { - json += SimpleObjectToJSON_(this.arguments); + json += JSON.stringify(this.arguments); } } json += '}'; @@ -1988,214 +1961,6 @@ }; -function MakeJSONPair_(name, value) { - return '"' + name + '":' + value; -} - - -function ArrayToJSONObject_(content) { - return '{' + content.join(',') + '}'; -} - - -function ArrayToJSONArray_(content) { - return '[' + content.join(',') + ']'; -} - - -function BooleanToJSON_(value) { - return String(value); -} - - -function NumberToJSON_(value) { - return String(value); -} - - -// Mapping of some control characters to avoid the \uXXXX syntax for most -// commonly used control cahracters. -var ctrlCharMap_ = { - '\b': '\\b', - '\t': '\\t', - '\n': '\\n', - '\f': '\\f', - '\r': '\\r', - '"' : '\\"', - '\\': '\\\\' -}; - - -// Regular expression testing for ", \ and control characters (0x00 - 0x1F). -var ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]'); - - -// Regular expression matching ", \ and control characters (0x00 - 0x1F) -// globally. -var ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g'); - - -/** - * Convert a String to its JSON representation (see http://www.json.org/). To - * avoid depending on the String object this method calls the functions in - * string.js directly and not through the value. - * @param {String} value The String value to format as JSON - * @return {string} JSON formatted String value - */ -function StringToJSON_(value) { - // Check for" , \ and control characters (0x00 - 0x1F). No need to call - // RegExpTest as ctrlchar is constructed using RegExp. - if (ctrlCharTest_.test(value)) { - // Replace ", \ and control characters (0x00 - 0x1F). - return '"' + - value.replace(ctrlCharMatch_, function (char) { - // Use charmap if possible. - var mapped = ctrlCharMap_[char]; - if (mapped) return mapped; - mapped = char.charCodeAt(); - // Convert control character to unicode escape sequence. - return '\\u00' + - '0' + // TODO %NumberToRadixString(Math.floor(mapped / 16), 16) + - '0'; // TODO %NumberToRadixString(mapped % 16, 16) - }) - + '"'; - } - - // Simple string with no special characters. - return '"' + value + '"'; -} - - -/** - * Convert a Date to ISO 8601 format. To avoid depending on the Date object - * this method calls the functions in date.js directly and not through the - * value. - * @param {Date} value The Date value to format as JSON - * @return {string} JSON formatted Date value - */ -function DateToISO8601_(value) { - var f = function(n) { - return n < 10 ? '0' + n : n; - }; - var g = function(n) { - return n < 10 ? '00' + n : n < 100 ? '0' + n : n; - }; - return builtins.GetUTCFullYearFrom(value) + '-' + - f(builtins.GetUTCMonthFrom(value) + 1) + '-' + - f(builtins.GetUTCDateFrom(value)) + 'T' + - f(builtins.GetUTCHoursFrom(value)) + ':' + - f(builtins.GetUTCMinutesFrom(value)) + ':' + - f(builtins.GetUTCSecondsFrom(value)) + '.' + - g(builtins.GetUTCMillisecondsFrom(value)) + 'Z'; -} - - -/** - * Convert a Date to ISO 8601 format. To avoid depending on the Date object - * this method calls the functions in date.js directly and not through the - * value. - * @param {Date} value The Date value to format as JSON - * @return {string} JSON formatted Date value - */ -function DateToJSON_(value) { - return '"' + DateToISO8601_(value) + '"'; -} - - -/** - * Convert an Object to its JSON representation (see http://www.json.org/). - * This implementation simply runs through all string property names and adds - * each property to the JSON representation for some predefined types. For type - * "object" the function calls itself recursively unless the object has the - * function property "toJSONProtocol" in which case that is used. This is not - * a general implementation but sufficient for the debugger. Note that circular - * structures will cause infinite recursion. - * @param {Object} object The object to format as JSON - * @return {string} JSON formatted object value - */ -function SimpleObjectToJSON_(object) { - var content = []; - for (var key in object) { - // Only consider string keys. - if (typeof key == 'string') { - var property_value = object[key]; - - // Format the value based on its type. - var property_value_json; - switch (typeof property_value) { - case 'object': - if (IS_NULL(property_value)) { - property_value_json = 'null'; - } else if (typeof property_value.toJSONProtocol == 'function') { - property_value_json = property_value.toJSONProtocol(true); - } else if (property_value.constructor.name == 'Array'){ - property_value_json = SimpleArrayToJSON_(property_value); - } else { - property_value_json = SimpleObjectToJSON_(property_value); - } - break; - - case 'boolean': - property_value_json = BooleanToJSON_(property_value); - break; - - case 'number': - property_value_json = NumberToJSON_(property_value); - break; - - case 'string': - property_value_json = StringToJSON_(property_value); - break; - - default: - property_value_json = null; - } - - // Add the property if relevant. - if (property_value_json) { - content.push(StringToJSON_(key) + ':' + property_value_json); - } - } - } - - // Make JSON object representation. - return '{' + content.join(',') + '}'; -} - - -/** - * Convert an array to its JSON representation. This is a VERY simple - * implementation just to support what is needed for the debugger. - * @param {Array} arrya The array to format as JSON - * @return {string} JSON formatted array value - */ -function SimpleArrayToJSON_(array) { - // Make JSON array representation. - var json = '['; - for (var i = 0; i < array.length; i++) { - if (i != 0) { - json += ','; - } - var elem = array[i]; - if (elem.toJSONProtocol) { - json += elem.toJSONProtocol(true); - } else if (typeof(elem) === 'object') { - json += SimpleObjectToJSON_(elem); - } else if (typeof(elem) === 'boolean') { - json += BooleanToJSON_(elem); - } else if (typeof(elem) === 'number') { - json += NumberToJSON_(elem); - } else if (typeof(elem) === 'string') { - json += StringToJSON_(elem); - } else { - json += elem; - } - } - json += ']'; - return json; -} - - // A more universal stringify that supports more types than JSON. // Used by the d8 shell to output results. var stringifyDepthLimit = 4; // To avoid crashing on cyclic objects @@ -2215,7 +1980,7 @@ case "string": return "\"" + x.toString() + "\""; case "symbol": - return "Symbol(" + (x.name ? Stringify(x.name, depth) : "") + ")" + return x.toString(); case "object": if (IS_NULL(x)) return "null"; if (x.constructor && x.constructor.name === "Array") { @@ -2231,18 +1996,22 @@ if (string && string !== "[object Object]") return string; } catch(e) {} var props = []; - for (var name in x) { + var names = Object.getOwnPropertyNames(x); + names = names.concat(Object.getOwnPropertySymbols(x)); + for (var i in names) { + var name = names[i]; var desc = Object.getOwnPropertyDescriptor(x, name); if (IS_UNDEFINED(desc)) continue; + if (IS_SYMBOL(name)) name = "[" + Stringify(name) + "]"; if ("value" in desc) { props.push(name + ": " + Stringify(desc.value, depth - 1)); } - if ("get" in desc) { - var getter = desc.get.toString(); + if (desc.get) { + var getter = Stringify(desc.get); props.push("get " + name + getter.slice(getter.indexOf('('))); } - if ("set" in desc) { - var setter = desc.set.toString(); + if (desc.set) { + var setter = Stringify(desc.set); props.push("set " + name + setter.slice(setter.indexOf('('))); } } diff -Nru nodejs-0.11.13/deps/v8/src/d8-posix.cc nodejs-0.11.15/deps/v8/src/d8-posix.cc --- nodejs-0.11.13/deps/v8/src/d8-posix.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/d8-posix.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,20 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. - -#include <stdlib.h> #include <errno.h> -#include <sys/types.h> +#include <fcntl.h> +#include <signal.h> +#include <stdlib.h> +#include <string.h> +#include <sys/select.h> #include <sys/stat.h> #include <sys/time.h> -#include <time.h> -#include <unistd.h> -#include <fcntl.h> +#include <sys/types.h> #include <sys/wait.h> -#include <signal.h> - +#include <unistd.h> -#include "d8.h" -#include "d8-debug.h" -#include "debug.h" +#include "src/d8.h" namespace v8 { @@ -106,7 +80,7 @@ static bool WaitOnFD(int fd, int read_timeout, int total_timeout, - struct timeval& start_time) { + const struct timeval& start_time) { fd_set readfds, writefds, exceptfds; struct timeval timeout; int gone = 0; @@ -229,8 +203,8 @@ } } static const unsigned kMaxArgs = 1000; - char** arg_array() { return exec_args_; } - char* arg0() { return exec_args_[0]; } + char* const* arg_array() const { return exec_args_; } + const char* arg0() const { return exec_args_[0]; } private: char* exec_args_[kMaxArgs + 1]; @@ -272,7 +246,7 @@ // It only returns if an error occurred. static void ExecSubprocess(int* exec_error_fds, int* stdout_fds, - ExecArgs& exec_args) { + const ExecArgs& exec_args) { close(exec_error_fds[kReadFD]); // Don't need this in the child. close(stdout_fds[kReadFD]); // Don't need this in the child. close(1); // Close stdout. @@ -311,7 +285,7 @@ // succeeded or false if an exception was thrown. static Handle<Value> GetStdout(Isolate* isolate, int child_fd, - struct timeval& start_time, + const struct timeval& start_time, int read_timeout, int total_timeout) { Handle<String> accumulator = String::Empty(isolate); @@ -383,8 +357,8 @@ // Get exit status of child. static bool WaitForChild(Isolate* isolate, int pid, - ZombieProtector& child_waiter, - struct timeval& start_time, + ZombieProtector& child_waiter, // NOLINT + const struct timeval& start_time, int read_timeout, int total_timeout) { #ifdef HAS_WAITID diff -Nru nodejs-0.11.13/deps/v8/src/d8-readline.cc nodejs-0.11.15/deps/v8/src/d8-readline.cc --- nodejs-0.11.13/deps/v8/src/d8-readline.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/d8-readline.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdio.h> // NOLINT #include <string.h> // NOLINT @@ -33,7 +10,7 @@ // The readline includes leaves RETURN defined which breaks V8 compilation. #undef RETURN -#include "d8.h" +#include "src/d8.h" // There are incompatibilities between different versions and different // implementations of readline. This smooths out one known incompatibility. @@ -105,10 +82,7 @@ Handle<String> ReadLineEditor::Prompt(const char* prompt) { char* result = NULL; - { // Release lock for blocking input. - Unlocker unlock(Isolate::GetCurrent()); - result = readline(prompt); - } + result = readline(prompt); if (result == NULL) return Handle<String>(); AddHistory(result); return String::NewFromUtf8(isolate_, result); @@ -146,7 +120,6 @@ static unsigned current_index; static Persistent<Array> current_completions; Isolate* isolate = read_line_editor.isolate_; - Locker lock(isolate); HandleScope scope(isolate); Handle<Array> completions; if (state == 0) { diff -Nru nodejs-0.11.13/deps/v8/src/d8-windows.cc nodejs-0.11.15/deps/v8/src/d8-windows.cc --- nodejs-0.11.13/deps/v8/src/d8-windows.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/d8-windows.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,8 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. - -#include "d8.h" -#include "d8-debug.h" -#include "debug.h" -#include "api.h" +#include "src/d8.h" namespace v8 { diff -Nru nodejs-0.11.13/deps/v8/src/data-flow.cc nodejs-0.11.15/deps/v8/src/data-flow.cc --- nodejs-0.11.13/deps/v8/src/data-flow.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/data-flow.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "data-flow.h" -#include "scopes.h" +#include "src/data-flow.h" +#include "src/scopes.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/data-flow.h nodejs-0.11.15/deps/v8/src/data-flow.h --- nodejs-0.11.13/deps/v8/src/data-flow.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/data-flow.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,16 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DATAFLOW_H_ #define V8_DATAFLOW_H_ -#include "v8.h" +#include "src/v8.h" -#include "allocation.h" -#include "ast.h" -#include "compiler.h" -#include "zone-inl.h" +#include "src/allocation.h" +#include "src/ast.h" +#include "src/compiler.h" +#include "src/zone-inl.h" namespace v8 { namespace internal { @@ -48,7 +25,7 @@ current_index_(0), current_value_(target->data_[0]), current_(-1) { - ASSERT(target->data_length_ > 0); + DCHECK(target->data_length_ > 0); Advance(); } ~Iterator() { } @@ -57,7 +34,7 @@ void Advance(); int Current() const { - ASSERT(!Done()); + DCHECK(!Done()); return current_; } @@ -89,7 +66,7 @@ : length_(length), data_length_(SizeFor(length)), data_(zone->NewArray<uint32_t>(data_length_)) { - ASSERT(length > 0); + DCHECK(length > 0); Clear(); } @@ -110,7 +87,7 @@ } void CopyFrom(const BitVector& other) { - ASSERT(other.length() <= length()); + DCHECK(other.length() <= length()); for (int i = 0; i < other.data_length_; i++) { data_[i] = other.data_[i]; } @@ -120,30 +97,30 @@ } bool Contains(int i) const { - ASSERT(i >= 0 && i < length()); + DCHECK(i >= 0 && i < length()); uint32_t block = data_[i / 32]; return (block & (1U << (i % 32))) != 0; } void Add(int i) { - ASSERT(i >= 0 && i < length()); + DCHECK(i >= 0 && i < length()); data_[i / 32] |= (1U << (i % 32)); } void Remove(int i) { - ASSERT(i >= 0 && i < length()); + DCHECK(i >= 0 && i < length()); data_[i / 32] &= ~(1U << (i % 32)); } void Union(const BitVector& other) { - ASSERT(other.length() == length()); + DCHECK(other.length() == length()); for (int i = 0; i < data_length_; i++) { data_[i] |= other.data_[i]; } } bool UnionIsChanged(const BitVector& other) { - ASSERT(other.length() == length()); + DCHECK(other.length() == length()); bool changed = false; for (int i = 0; i < data_length_; i++) { uint32_t old_data = data_[i]; @@ -154,14 +131,14 @@ } void Intersect(const BitVector& other) { - ASSERT(other.length() == length()); + DCHECK(other.length() == length()); for (int i = 0; i < data_length_; i++) { data_[i] &= other.data_[i]; } } void Subtract(const BitVector& other) { - ASSERT(other.length() == length()); + DCHECK(other.length() == length()); for (int i = 0; i < data_length_; i++) { data_[i] &= ~other.data_[i]; } @@ -187,6 +164,15 @@ return true; } + int Count() const { + int count = 0; + for (int i = 0; i < data_length_; i++) { + int data = data_[i]; + if (data != 0) count += CompilerIntrinsics::CountSetBits(data); + } + return count; + } + int length() const { return length_; } #ifdef DEBUG diff -Nru nodejs-0.11.13/deps/v8/src/date.cc nodejs-0.11.15/deps/v8/src/date.cc --- nodejs-0.11.13/deps/v8/src/date.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/date.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "date.h" +#include "src/date.h" -#include "v8.h" +#include "src/v8.h" -#include "objects.h" -#include "objects-inl.h" +#include "src/objects.h" +#include "src/objects-inl.h" namespace v8 { namespace internal { @@ -49,11 +26,12 @@ void DateCache::ResetDateCache() { static const int kMaxStamp = Smi::kMaxValue; - stamp_ = Smi::FromInt(stamp_->value() + 1); - if (stamp_->value() > kMaxStamp) { + if (stamp_->value() >= kMaxStamp) { stamp_ = Smi::FromInt(0); + } else { + stamp_ = Smi::FromInt(stamp_->value() + 1); } - ASSERT(stamp_ != Smi::FromInt(kInvalidStamp)); + DCHECK(stamp_ != Smi::FromInt(kInvalidStamp)); for (int i = 0; i < kDSTSize; ++i) { ClearSegment(&dst_[i]); } @@ -62,7 +40,7 @@ after_ = &dst_[1]; local_offset_ms_ = kInvalidLocalOffsetInMs; ymd_valid_ = false; - OS::ClearTimezoneCache(tz_cache_); + base::OS::ClearTimezoneCache(tz_cache_); } @@ -95,7 +73,7 @@ *year = 400 * (days / kDaysIn400Years) - kYearsOffset; days %= kDaysIn400Years; - ASSERT(DaysFromYearMonth(*year, 0) + days == save_days); + DCHECK(DaysFromYearMonth(*year, 0) + days == save_days); days--; int yd1 = days / kDaysIn100Years; @@ -115,12 +93,12 @@ bool is_leap = (!yd1 || yd2) && !yd3; - ASSERT(days >= -1); - ASSERT(is_leap || (days >= 0)); - ASSERT((days < 365) || (is_leap && (days < 366))); - ASSERT(is_leap == ((*year % 4 == 0) && (*year % 100 || (*year % 400 == 0)))); - ASSERT(is_leap || ((DaysFromYearMonth(*year, 0) + days) == save_days)); - ASSERT(!is_leap || ((DaysFromYearMonth(*year, 0) + days + 1) == save_days)); + DCHECK(days >= -1); + DCHECK(is_leap || (days >= 0)); + DCHECK((days < 365) || (is_leap && (days < 366))); + DCHECK(is_leap == ((*year % 4 == 0) && (*year % 100 || (*year % 400 == 0)))); + DCHECK(is_leap || ((DaysFromYearMonth(*year, 0) + days) == save_days)); + DCHECK(!is_leap || ((DaysFromYearMonth(*year, 0) + days + 1) == save_days)); days += is_leap; @@ -146,7 +124,7 @@ *day = days - 31 + 1; } } - ASSERT(DaysFromYearMonth(*year, *month) + *day - 1 == save_days); + DCHECK(DaysFromYearMonth(*year, *month) + *day - 1 == save_days); ymd_valid_ = true; ymd_year_ = *year; ymd_month_ = *month; @@ -168,8 +146,8 @@ month += 12; } - ASSERT(month >= 0); - ASSERT(month < 12); + DCHECK(month >= 0); + DCHECK(month < 12); // year_delta is an arbitrary number such that: // a) year_delta = -1 (mod 400) @@ -244,8 +222,8 @@ ProbeDST(time_sec); - ASSERT(InvalidSegment(before_) || before_->start_sec <= time_sec); - ASSERT(InvalidSegment(after_) || time_sec < after_->start_sec); + DCHECK(InvalidSegment(before_) || before_->start_sec <= time_sec); + DCHECK(InvalidSegment(after_) || time_sec < after_->start_sec); if (InvalidSegment(before_)) { // Cache miss. @@ -286,7 +264,7 @@ int new_offset_ms = GetDaylightSavingsOffsetFromOS(new_after_start_sec); ExtendTheAfterSegment(new_after_start_sec, new_offset_ms); } else { - ASSERT(!InvalidSegment(after_)); + DCHECK(!InvalidSegment(after_)); // Update the usage counter of after_ since it is going to be used. after_->last_used = ++dst_usage_counter_; } @@ -313,7 +291,7 @@ return offset_ms; } } else { - ASSERT(after_->offset_ms == offset_ms); + DCHECK(after_->offset_ms == offset_ms); after_->start_sec = middle_sec; if (time_sec >= after_->start_sec) { // This swap helps the optimistic fast check in subsequent invocations. @@ -332,7 +310,7 @@ void DateCache::ProbeDST(int time_sec) { DST* before = NULL; DST* after = NULL; - ASSERT(before_ != after_); + DCHECK(before_ != after_); for (int i = 0; i < kDSTSize; ++i) { if (dst_[i].start_sec <= time_sec) { @@ -356,12 +334,12 @@ ? after_ : LeastRecentlyUsedDST(before); } - ASSERT(before != NULL); - ASSERT(after != NULL); - ASSERT(before != after); - ASSERT(InvalidSegment(before) || before->start_sec <= time_sec); - ASSERT(InvalidSegment(after) || time_sec < after->start_sec); - ASSERT(InvalidSegment(before) || InvalidSegment(after) || + DCHECK(before != NULL); + DCHECK(after != NULL); + DCHECK(before != after); + DCHECK(InvalidSegment(before) || before->start_sec <= time_sec); + DCHECK(InvalidSegment(after) || time_sec < after->start_sec); + DCHECK(InvalidSegment(before) || InvalidSegment(after) || before->end_sec < after->start_sec); before_ = before; diff -Nru nodejs-0.11.13/deps/v8/src/date.h nodejs-0.11.15/deps/v8/src/date.h --- nodejs-0.11.13/deps/v8/src/date.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/date.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DATE_H_ #define V8_DATE_H_ -#include "allocation.h" -#include "globals.h" -#include "platform.h" +#include "src/allocation.h" +#include "src/base/platform/platform.h" +#include "src/globals.h" namespace v8 { @@ -62,12 +39,12 @@ // It is an invariant of DateCache that cache stamp is non-negative. static const int kInvalidStamp = -1; - DateCache() : stamp_(0), tz_cache_(OS::CreateTimezoneCache()) { + DateCache() : stamp_(0), tz_cache_(base::OS::CreateTimezoneCache()) { ResetDateCache(); } virtual ~DateCache() { - OS::DisposeTimezoneCache(tz_cache_); + base::OS::DisposeTimezoneCache(tz_cache_); tz_cache_ = NULL; } @@ -116,7 +93,7 @@ if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) { time_ms = EquivalentTime(time_ms); } - return OS::LocalTimezone(static_cast<double>(time_ms), tz_cache_); + return base::OS::LocalTimezone(static_cast<double>(time_ms), tz_cache_); } // ECMA 262 - 15.9.5.26 @@ -185,12 +162,13 @@ // These functions are virtual so that we can override them when testing. virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) { double time_ms = static_cast<double>(time_sec * 1000); - return static_cast<int>(OS::DaylightSavingsOffset(time_ms, tz_cache_)); + return static_cast<int>( + base::OS::DaylightSavingsOffset(time_ms, tz_cache_)); } virtual int GetLocalOffsetFromOS() { - double offset = OS::LocalTimeOffset(tz_cache_); - ASSERT(offset < kInvalidLocalOffsetInMs); + double offset = base::OS::LocalTimeOffset(tz_cache_); + DCHECK(offset < kInvalidLocalOffsetInMs); return static_cast<int>(offset); } @@ -257,7 +235,7 @@ int ymd_month_; int ymd_day_; - TimezoneCache* tz_cache_; + base::TimezoneCache* tz_cache_; }; } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/date.js nodejs-0.11.15/deps/v8/src/date.js --- nodejs-0.11.13/deps/v8/src/date.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/date.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,8 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; // This file relies on the fact that the following declarations have been made // in v8natives.js: @@ -784,7 +763,7 @@ )); // Set up non-enumerable constructor property of the Date prototype object. - %SetProperty($Date.prototype, "constructor", $Date, DONT_ENUM); + %AddNamedProperty($Date.prototype, "constructor", $Date, DONT_ENUM); // Set up non-enumerable functions of the Date prototype object and // set their names. diff -Nru nodejs-0.11.13/deps/v8/src/dateparser.cc nodejs-0.11.15/deps/v8/src/dateparser.cc --- nodejs-0.11.13/deps/v8/src/dateparser.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/dateparser.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,33 +1,10 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "dateparser.h" +#include "src/dateparser.h" namespace v8 { namespace internal { @@ -200,7 +177,7 @@ // most significant digits. int factor = 1; do { - ASSERT(factor <= 100000000); // factor won't overflow. + DCHECK(factor <= 100000000); // factor won't overflow. factor *= 10; length--; } while (length > 3); diff -Nru nodejs-0.11.13/deps/v8/src/dateparser.h nodejs-0.11.15/deps/v8/src/dateparser.h --- nodejs-0.11.13/deps/v8/src/dateparser.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/dateparser.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DATEPARSER_H_ #define V8_DATEPARSER_H_ -#include "allocation.h" -#include "char-predicates-inl.h" +#include "src/allocation.h" +#include "src/char-predicates-inl.h" namespace v8 { namespace internal { @@ -174,19 +151,19 @@ int length() { return length_; } int number() { - ASSERT(IsNumber()); + DCHECK(IsNumber()); return value_; } KeywordType keyword_type() { - ASSERT(IsKeyword()); + DCHECK(IsKeyword()); return static_cast<KeywordType>(tag_); } int keyword_value() { - ASSERT(IsKeyword()); + DCHECK(IsKeyword()); return value_; } char symbol() { - ASSERT(IsSymbol()); + DCHECK(IsSymbol()); return static_cast<char>(value_); } bool IsSymbol(char symbol) { @@ -202,7 +179,7 @@ return tag_ == kSymbolTag && (value_ == '-' || value_ == '+'); } int ascii_sign() { - ASSERT(IsAsciiSign()); + DCHECK(IsAsciiSign()); return 44 - value_; } bool IsKeywordZ() { diff -Nru nodejs-0.11.13/deps/v8/src/dateparser-inl.h nodejs-0.11.15/deps/v8/src/dateparser-inl.h --- nodejs-0.11.13/deps/v8/src/dateparser-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/dateparser-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DATEPARSER_INL_H_ #define V8_DATEPARSER_INL_H_ -#include "dateparser.h" +#include "src/dateparser.h" namespace v8 { namespace internal { @@ -37,7 +14,7 @@ bool DateParser::Parse(Vector<Char> str, FixedArray* out, UnicodeCache* unicode_cache) { - ASSERT(out->length() >= OUTPUT_SIZE); + DCHECK(out->length() >= OUTPUT_SIZE); InputReader<Char> in(unicode_cache, str); DateStringTokenizer<Char> scanner(&in); TimeZoneComposer tz; @@ -198,7 +175,7 @@ if (in_->Skip('.')) return DateToken::Symbol('.'); if (in_->Skip(')')) return DateToken::Symbol(')'); if (in_->IsAsciiAlphaOrAbove()) { - ASSERT(KeywordTable::kPrefixLength == 3); + DCHECK(KeywordTable::kPrefixLength == 3); uint32_t buffer[3] = {0, 0, 0}; int length = in_->ReadWord(buffer, 3); int index = KeywordTable::Lookup(buffer, length); @@ -223,9 +200,9 @@ DayComposer* day, TimeComposer* time, TimeZoneComposer* tz) { - ASSERT(day->IsEmpty()); - ASSERT(time->IsEmpty()); - ASSERT(tz->IsEmpty()); + DCHECK(day->IsEmpty()); + DCHECK(time->IsEmpty()); + DCHECK(tz->IsEmpty()); // Parse mandatory date string: [('-'|'+')yy]yyyy[':'MM[':'DD]] if (scanner->Peek().IsAsciiSign()) { diff -Nru nodejs-0.11.13/deps/v8/src/debug-agent.cc nodejs-0.11.15/deps/v8/src/debug-agent.cc --- nodejs-0.11.13/deps/v8/src/debug-agent.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/debug-agent.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,508 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifdef ENABLE_DEBUGGER_SUPPORT - -#include "v8.h" -#include "debug.h" -#include "debug-agent.h" -#include "platform/socket.h" - -namespace v8 { -namespace internal { - -// Public V8 debugger API message handler function. This function just delegates -// to the debugger agent through it's data parameter. -void DebuggerAgentMessageHandler(const v8::Debug::Message& message) { - Isolate* isolate = reinterpret_cast<Isolate*>(message.GetIsolate()); - DebuggerAgent* agent = isolate->debugger_agent_instance(); - ASSERT(agent != NULL); - agent->DebuggerMessage(message); -} - - -DebuggerAgent::DebuggerAgent(Isolate* isolate, const char* name, int port) - : Thread(name), - isolate_(isolate), - name_(StrDup(name)), - port_(port), - server_(new Socket), - terminate_(false), - session_(NULL), - terminate_now_(0), - listening_(0) { - ASSERT(isolate_->debugger_agent_instance() == NULL); - isolate_->set_debugger_agent_instance(this); -} - - -DebuggerAgent::~DebuggerAgent() { - isolate_->set_debugger_agent_instance(NULL); - delete server_; -} - - -// Debugger agent main thread. -void DebuggerAgent::Run() { - // Allow this socket to reuse port even if still in TIME_WAIT. - server_->SetReuseAddress(true); - - // First bind the socket to the requested port. - bool bound = false; - while (!bound && !terminate_) { - bound = server_->Bind(port_); - - // If an error occurred wait a bit before retrying. The most common error - // would be that the port is already in use so this avoids a busy loop and - // make the agent take over the port when it becomes free. - if (!bound) { - const TimeDelta kTimeout = TimeDelta::FromSeconds(1); - PrintF("Failed to open socket on port %d, " - "waiting %d ms before retrying\n", port_, - static_cast<int>(kTimeout.InMilliseconds())); - if (!terminate_now_.WaitFor(kTimeout)) { - if (terminate_) return; - } - } - } - - // Accept connections on the bound port. - while (!terminate_) { - bool ok = server_->Listen(1); - listening_.Signal(); - if (ok) { - // Accept the new connection. - Socket* client = server_->Accept(); - ok = client != NULL; - if (ok) { - // Create and start a new session. - CreateSession(client); - } - } - } -} - - -void DebuggerAgent::Shutdown() { - // Set the termination flag. - terminate_ = true; - - // Signal termination and make the server exit either its listen call or its - // binding loop. This makes sure that no new sessions can be established. - terminate_now_.Signal(); - server_->Shutdown(); - Join(); - - // Close existing session if any. - CloseSession(); -} - - -void DebuggerAgent::WaitUntilListening() { - listening_.Wait(); -} - -static const char* kCreateSessionMessage = - "Remote debugging session already active\r\n"; - -void DebuggerAgent::CreateSession(Socket* client) { - LockGuard<RecursiveMutex> session_access_guard(&session_access_); - - // If another session is already established terminate this one. - if (session_ != NULL) { - int len = StrLength(kCreateSessionMessage); - int res = client->Send(kCreateSessionMessage, len); - delete client; - USE(res); - return; - } - - // Create a new session and hook up the debug message handler. - session_ = new DebuggerAgentSession(this, client); - isolate_->debugger()->SetMessageHandler(DebuggerAgentMessageHandler); - session_->Start(); -} - - -void DebuggerAgent::CloseSession() { - LockGuard<RecursiveMutex> session_access_guard(&session_access_); - - // Terminate the session. - if (session_ != NULL) { - session_->Shutdown(); - session_->Join(); - delete session_; - session_ = NULL; - } -} - - -void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) { - LockGuard<RecursiveMutex> session_access_guard(&session_access_); - - // Forward the message handling to the session. - if (session_ != NULL) { - v8::String::Value val(message.GetJSON()); - session_->DebuggerMessage(Vector<uint16_t>(const_cast<uint16_t*>(*val), - val.length())); - } -} - - -DebuggerAgentSession::~DebuggerAgentSession() { - delete client_; -} - - -void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) { - // Don't do anything during termination. - if (terminate_) { - return; - } - - // Terminate the session. - LockGuard<RecursiveMutex> session_access_guard(&session_access_); - ASSERT(session == session_); - if (session == session_) { - session_->Shutdown(); - delete session_; - session_ = NULL; - } -} - - -void DebuggerAgentSession::Run() { - // Send the hello message. - bool ok = DebuggerAgentUtil::SendConnectMessage(client_, agent_->name_.get()); - if (!ok) return; - - while (true) { - // Read data from the debugger front end. - SmartArrayPointer<char> message = - DebuggerAgentUtil::ReceiveMessage(client_); - - const char* msg = message.get(); - bool is_closing_session = (msg == NULL); - - if (msg == NULL) { - // If we lost the connection, then simulate a disconnect msg: - msg = "{\"seq\":1,\"type\":\"request\",\"command\":\"disconnect\"}"; - - } else { - // Check if we're getting a disconnect request: - const char* disconnectRequestStr = - "\"type\":\"request\",\"command\":\"disconnect\"}"; - const char* result = strstr(msg, disconnectRequestStr); - if (result != NULL) { - is_closing_session = true; - } - } - - // Convert UTF-8 to UTF-16. - unibrow::Utf8Decoder<128> decoder(msg, StrLength(msg)); - int utf16_length = decoder.Utf16Length(); - ScopedVector<uint16_t> temp(utf16_length + 1); - decoder.WriteUtf16(temp.start(), utf16_length); - - // Send the request received to the debugger. - v8::Debug::SendCommand(temp.start(), - utf16_length, - NULL, - reinterpret_cast<v8::Isolate*>(agent_->isolate())); - - if (is_closing_session) { - // Session is closed. - agent_->OnSessionClosed(this); - return; - } - } -} - - -void DebuggerAgentSession::DebuggerMessage(Vector<uint16_t> message) { - DebuggerAgentUtil::SendMessage(client_, message); -} - - -void DebuggerAgentSession::Shutdown() { - // Shutdown the socket to end the blocking receive. - client_->Shutdown(); -} - - -const char* const DebuggerAgentUtil::kContentLength = "Content-Length"; - - -SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(Socket* conn) { - int received; - - // Read header. - int content_length = 0; - while (true) { - const int kHeaderBufferSize = 80; - char header_buffer[kHeaderBufferSize]; - int header_buffer_position = 0; - char c = '\0'; // One character receive buffer. - char prev_c = '\0'; // Previous character. - - // Read until CRLF. - while (!(c == '\n' && prev_c == '\r')) { - prev_c = c; - received = conn->Receive(&c, 1); - if (received == 0) { - PrintF("Error %d\n", Socket::GetLastError()); - return SmartArrayPointer<char>(); - } - - // Add character to header buffer. - if (header_buffer_position < kHeaderBufferSize) { - header_buffer[header_buffer_position++] = c; - } - } - - // Check for end of header (empty header line). - if (header_buffer_position == 2) { // Receive buffer contains CRLF. - break; - } - - // Terminate header. - ASSERT(header_buffer_position > 1); // At least CRLF is received. - ASSERT(header_buffer_position <= kHeaderBufferSize); - header_buffer[header_buffer_position - 2] = '\0'; - - // Split header. - char* key = header_buffer; - char* value = NULL; - for (int i = 0; header_buffer[i] != '\0'; i++) { - if (header_buffer[i] == ':') { - header_buffer[i] = '\0'; - value = header_buffer + i + 1; - while (*value == ' ') { - value++; - } - break; - } - } - - // Check that key is Content-Length. - if (strcmp(key, kContentLength) == 0) { - // Get the content length value if present and within a sensible range. - if (value == NULL || strlen(value) > 7) { - return SmartArrayPointer<char>(); - } - for (int i = 0; value[i] != '\0'; i++) { - // Bail out if illegal data. - if (value[i] < '0' || value[i] > '9') { - return SmartArrayPointer<char>(); - } - content_length = 10 * content_length + (value[i] - '0'); - } - } else { - // For now just print all other headers than Content-Length. - PrintF("%s: %s\n", key, value != NULL ? value : "(no value)"); - } - } - - // Return now if no body. - if (content_length == 0) { - return SmartArrayPointer<char>(); - } - - // Read body. - char* buffer = NewArray<char>(content_length + 1); - received = ReceiveAll(conn, buffer, content_length); - if (received < content_length) { - PrintF("Error %d\n", Socket::GetLastError()); - return SmartArrayPointer<char>(); - } - buffer[content_length] = '\0'; - - return SmartArrayPointer<char>(buffer); -} - - -bool DebuggerAgentUtil::SendConnectMessage(Socket* conn, - const char* embedding_host) { - static const int kBufferSize = 80; - char buffer[kBufferSize]; // Sending buffer. - bool ok; - int len; - - // Send the header. - len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), - "Type: connect\r\n"); - ok = conn->Send(buffer, len); - if (!ok) return false; - - len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), - "V8-Version: %s\r\n", v8::V8::GetVersion()); - ok = conn->Send(buffer, len); - if (!ok) return false; - - len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), - "Protocol-Version: 1\r\n"); - ok = conn->Send(buffer, len); - if (!ok) return false; - - if (embedding_host != NULL) { - len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), - "Embedding-Host: %s\r\n", embedding_host); - ok = conn->Send(buffer, len); - if (!ok) return false; - } - - len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), - "%s: 0\r\n", kContentLength); - ok = conn->Send(buffer, len); - if (!ok) return false; - - // Terminate header with empty line. - len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n"); - ok = conn->Send(buffer, len); - if (!ok) return false; - - // No body for connect message. - - return true; -} - - -bool DebuggerAgentUtil::SendMessage(Socket* conn, - const Vector<uint16_t> message) { - static const int kBufferSize = 80; - char buffer[kBufferSize]; // Sending buffer both for header and body. - - // Calculate the message size in UTF-8 encoding. - int utf8_len = 0; - int previous = unibrow::Utf16::kNoPreviousCharacter; - for (int i = 0; i < message.length(); i++) { - uint16_t character = message[i]; - utf8_len += unibrow::Utf8::Length(character, previous); - previous = character; - } - - // Send the header. - int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), - "%s: %d\r\n", kContentLength, utf8_len); - if (conn->Send(buffer, len) < len) { - return false; - } - - // Terminate header with empty line. - len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n"); - if (conn->Send(buffer, len) < len) { - return false; - } - - // Send message body as UTF-8. - int buffer_position = 0; // Current buffer position. - previous = unibrow::Utf16::kNoPreviousCharacter; - for (int i = 0; i < message.length(); i++) { - // Write next UTF-8 encoded character to buffer. - uint16_t character = message[i]; - buffer_position += - unibrow::Utf8::Encode(buffer + buffer_position, character, previous); - ASSERT(buffer_position <= kBufferSize); - - // Send buffer if full or last character is encoded. - if (kBufferSize - buffer_position < - unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit || - i == message.length() - 1) { - if (unibrow::Utf16::IsLeadSurrogate(character)) { - const int kEncodedSurrogateLength = - unibrow::Utf16::kUtf8BytesToCodeASurrogate; - ASSERT(buffer_position >= kEncodedSurrogateLength); - len = buffer_position - kEncodedSurrogateLength; - if (conn->Send(buffer, len) < len) { - return false; - } - for (int i = 0; i < kEncodedSurrogateLength; i++) { - buffer[i] = buffer[buffer_position + i]; - } - buffer_position = kEncodedSurrogateLength; - } else { - len = buffer_position; - if (conn->Send(buffer, len) < len) { - return false; - } - buffer_position = 0; - } - } - previous = character; - } - - return true; -} - - -bool DebuggerAgentUtil::SendMessage(Socket* conn, - const v8::Handle<v8::String> request) { - static const int kBufferSize = 80; - char buffer[kBufferSize]; // Sending buffer both for header and body. - - // Convert the request to UTF-8 encoding. - v8::String::Utf8Value utf8_request(request); - - // Send the header. - int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), - "Content-Length: %d\r\n", utf8_request.length()); - if (conn->Send(buffer, len) < len) { - return false; - } - - // Terminate header with empty line. - len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n"); - if (conn->Send(buffer, len) < len) { - return false; - } - - // Send message body as UTF-8. - len = utf8_request.length(); - if (conn->Send(*utf8_request, len) < len) { - return false; - } - - return true; -} - - -// Receive the full buffer before returning unless an error occours. -int DebuggerAgentUtil::ReceiveAll(Socket* conn, char* data, int len) { - int total_received = 0; - while (total_received < len) { - int received = conn->Receive(data + total_received, len - total_received); - if (received == 0) { - return total_received; - } - total_received += received; - } - return total_received; -} - -} } // namespace v8::internal - -#endif // ENABLE_DEBUGGER_SUPPORT diff -Nru nodejs-0.11.13/deps/v8/src/debug-agent.h nodejs-0.11.15/deps/v8/src/debug-agent.h --- nodejs-0.11.13/deps/v8/src/debug-agent.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/debug-agent.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_DEBUG_AGENT_H_ -#define V8_DEBUG_AGENT_H_ - -#ifdef ENABLE_DEBUGGER_SUPPORT -#include "../include/v8-debug.h" -#include "platform.h" - -namespace v8 { -namespace internal { - -// Forward decelrations. -class DebuggerAgentSession; -class Socket; - - -// Debugger agent which starts a socket listener on the debugger port and -// handles connection from a remote debugger. -class DebuggerAgent: public Thread { - public: - DebuggerAgent(Isolate* isolate, const char* name, int port); - ~DebuggerAgent(); - - void Shutdown(); - void WaitUntilListening(); - - Isolate* isolate() { return isolate_; } - - private: - void Run(); - void CreateSession(Socket* socket); - void DebuggerMessage(const v8::Debug::Message& message); - void CloseSession(); - void OnSessionClosed(DebuggerAgentSession* session); - - Isolate* isolate_; - SmartArrayPointer<const char> name_; // Name of the embedding application. - int port_; // Port to use for the agent. - Socket* server_; // Server socket for listen/accept. - bool terminate_; // Termination flag. - RecursiveMutex session_access_; // Mutex guarding access to session_. - DebuggerAgentSession* session_; // Current active session if any. - Semaphore terminate_now_; // Semaphore to signal termination. - Semaphore listening_; - - friend class DebuggerAgentSession; - friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message); - - DISALLOW_COPY_AND_ASSIGN(DebuggerAgent); -}; - - -// Debugger agent session. The session receives requests from the remote -// debugger and sends debugger events/responses to the remote debugger. -class DebuggerAgentSession: public Thread { - public: - DebuggerAgentSession(DebuggerAgent* agent, Socket* client) - : Thread("v8:DbgAgntSessn"), - agent_(agent), client_(client) {} - ~DebuggerAgentSession(); - - void DebuggerMessage(Vector<uint16_t> message); - void Shutdown(); - - private: - void Run(); - - void DebuggerMessage(Vector<char> message); - - DebuggerAgent* agent_; - Socket* client_; - - DISALLOW_COPY_AND_ASSIGN(DebuggerAgentSession); -}; - - -// Utility methods factored out to be used by the D8 shell as well. -class DebuggerAgentUtil { - public: - static const char* const kContentLength; - - static SmartArrayPointer<char> ReceiveMessage(Socket* conn); - static bool SendConnectMessage(Socket* conn, const char* embedding_host); - static bool SendMessage(Socket* conn, const Vector<uint16_t> message); - static bool SendMessage(Socket* conn, const v8::Handle<v8::String> message); - static int ReceiveAll(Socket* conn, char* data, int len); -}; - -} } // namespace v8::internal - -#endif // ENABLE_DEBUGGER_SUPPORT - -#endif // V8_DEBUG_AGENT_H_ diff -Nru nodejs-0.11.13/deps/v8/src/debug.cc nodejs-0.11.15/deps/v8/src/debug.cc --- nodejs-0.11.13/deps/v8/src/debug.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/debug.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,88 +1,54 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "api.h" -#include "arguments.h" -#include "bootstrapper.h" -#include "code-stubs.h" -#include "codegen.h" -#include "compilation-cache.h" -#include "compiler.h" -#include "debug.h" -#include "deoptimizer.h" -#include "execution.h" -#include "full-codegen.h" -#include "global-handles.h" -#include "ic.h" -#include "ic-inl.h" -#include "isolate-inl.h" -#include "list.h" -#include "messages.h" -#include "natives.h" -#include "stub-cache.h" -#include "log.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "../include/v8-debug.h" +#include "src/v8.h" -namespace v8 { -namespace internal { +#include "src/api.h" +#include "src/arguments.h" +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/compilation-cache.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/execution.h" +#include "src/full-codegen.h" +#include "src/global-handles.h" +#include "src/ic.h" +#include "src/ic-inl.h" +#include "src/isolate-inl.h" +#include "src/list.h" +#include "src/log.h" +#include "src/messages.h" +#include "src/natives.h" +#include "src/stub-cache.h" -#ifdef ENABLE_DEBUGGER_SUPPORT +#include "include/v8-debug.h" +namespace v8 { +namespace internal { Debug::Debug(Isolate* isolate) - : has_break_points_(false), - script_cache_(NULL), - debug_info_list_(NULL), - disable_break_(false), + : debug_context_(Handle<Context>()), + event_listener_(Handle<Object>()), + event_listener_data_(Handle<Object>()), + message_handler_(NULL), + command_received_(0), + command_queue_(isolate->logger(), kQueueInitialSize), + event_command_queue_(isolate->logger(), kQueueInitialSize), + is_active_(false), + is_suppressed_(false), + live_edit_enabled_(true), // TODO(yangguo): set to false by default. + has_break_points_(false), + break_disabled_(false), break_on_exception_(false), break_on_uncaught_exception_(false), - debug_break_return_(NULL), - debug_break_slot_(NULL), + script_cache_(NULL), + debug_info_list_(NULL), isolate_(isolate) { - memset(registers_, 0, sizeof(JSCallerSavedBuffer)); -} - - -Debug::~Debug() { -} - - -static void PrintLn(v8::Local<v8::Value> value) { - v8::Local<v8::String> s = value->ToString(); - ScopedVector<char> data(s->Utf8Length() + 1); - if (data.start() == NULL) { - V8::FatalProcessOutOfMemory("PrintLn"); - return; - } - s->WriteUtf8(data.start()); - PrintF("%s\n", data.start()); + ThreadInit(); } @@ -107,16 +73,32 @@ BreakLocationIterator::~BreakLocationIterator() { - ASSERT(reloc_iterator_ != NULL); - ASSERT(reloc_iterator_original_ != NULL); + DCHECK(reloc_iterator_ != NULL); + DCHECK(reloc_iterator_original_ != NULL); delete reloc_iterator_; delete reloc_iterator_original_; } +// Check whether a code stub with the specified major key is a possible break +// point location when looking for source break locations. +static bool IsSourceBreakStub(Code* code) { + CodeStub::Major major_key = CodeStub::GetMajorKey(code); + return major_key == CodeStub::CallFunction; +} + + +// Check whether a code stub with the specified major key is a possible break +// location. +static bool IsBreakStub(Code* code) { + CodeStub::Major major_key = CodeStub::GetMajorKey(code); + return major_key == CodeStub::CallFunction; +} + + void BreakLocationIterator::Next() { DisallowHeapAllocation no_gc; - ASSERT(!RinfoDone()); + DCHECK(!RinfoDone()); // Iterate through reloc info for code and original code stopping at each // breakable code target. @@ -137,8 +119,8 @@ // statement position. position_ = static_cast<int>( rinfo()->data() - debug_info_->shared()->start_position()); - ASSERT(position_ >= 0); - ASSERT(statement_position_ >= 0); + DCHECK(position_ >= 0); + DCHECK(statement_position_ >= 0); } if (IsDebugBreakSlot()) { @@ -163,15 +145,14 @@ if (IsDebuggerStatement()) { break_point_++; return; - } - if (type_ == ALL_BREAK_LOCATIONS) { - if (Debug::IsBreakStub(code)) { + } else if (type_ == ALL_BREAK_LOCATIONS) { + if (IsBreakStub(code)) { break_point_++; return; } } else { - ASSERT(type_ == SOURCE_BREAK_LOCATIONS); - if (Debug::IsSourceBreakStub(code)) { + DCHECK(type_ == SOURCE_BREAK_LOCATIONS); + if (IsSourceBreakStub(code)) { break_point_++; return; } @@ -291,10 +272,8 @@ void BreakLocationIterator::SetBreakPoint(Handle<Object> break_point_object) { // If there is not already a real break point here patch code with debug // break. - if (!HasBreakPoint()) { - SetDebugBreak(); - } - ASSERT(IsDebugBreak() || IsDebuggerStatement()); + if (!HasBreakPoint()) SetDebugBreak(); + DCHECK(IsDebugBreak() || IsDebuggerStatement()); // Set the break point information. DebugInfo::SetBreakPoint(debug_info_, code_position(), position(), statement_position(), @@ -308,20 +287,18 @@ // If there are no more break points here remove the debug break. if (!HasBreakPoint()) { ClearDebugBreak(); - ASSERT(!IsDebugBreak()); + DCHECK(!IsDebugBreak()); } } void BreakLocationIterator::SetOneShot() { // Debugger statement always calls debugger. No need to modify it. - if (IsDebuggerStatement()) { - return; - } + if (IsDebuggerStatement()) return; // If there is a real break point here no more to do. if (HasBreakPoint()) { - ASSERT(IsDebugBreak()); + DCHECK(IsDebugBreak()); return; } @@ -332,35 +309,29 @@ void BreakLocationIterator::ClearOneShot() { // Debugger statement always calls debugger. No need to modify it. - if (IsDebuggerStatement()) { - return; - } + if (IsDebuggerStatement()) return; // If there is a real break point here no more to do. if (HasBreakPoint()) { - ASSERT(IsDebugBreak()); + DCHECK(IsDebugBreak()); return; } // Patch code removing debug break. ClearDebugBreak(); - ASSERT(!IsDebugBreak()); + DCHECK(!IsDebugBreak()); } void BreakLocationIterator::SetDebugBreak() { // Debugger statement always calls debugger. No need to modify it. - if (IsDebuggerStatement()) { - return; - } + if (IsDebuggerStatement()) return; // If there is already a break point here just return. This might happen if // the same code is flooded with break points twice. Flooding the same // function twice might happen when stepping in a function with an exception // handler as the handler and the function is the same. - if (IsDebugBreak()) { - return; - } + if (IsDebugBreak()) return; if (RelocInfo::IsJSReturn(rmode())) { // Patch the frame exit code with a break point. @@ -372,15 +343,13 @@ // Patch the IC call. SetDebugBreakAtIC(); } - ASSERT(IsDebugBreak()); + DCHECK(IsDebugBreak()); } void BreakLocationIterator::ClearDebugBreak() { // Debugger statement always calls debugger. No need to modify it. - if (IsDebuggerStatement()) { - return; - } + if (IsDebuggerStatement()) return; if (RelocInfo::IsJSReturn(rmode())) { // Restore the frame exit code. @@ -392,7 +361,7 @@ // Patch the IC call. ClearDebugBreakAtIC(); } - ASSERT(!IsDebugBreak()); + DCHECK(!IsDebugBreak()); } @@ -404,8 +373,9 @@ Address target = original_rinfo()->target_address(); Handle<Code> target_code(Code::GetCodeFromTargetAddress(target)); if (target_code->kind() == Code::STUB) { - return target_code->major_key() == CodeStub::CallFunction; + return CodeStub::GetMajorKey(*target_code) == CodeStub::CallFunction; } + return target_code->is_call_stub(); } return false; } @@ -428,7 +398,8 @@ } bool is_call_function_stub = (maybe_call_function_stub->kind() == Code::STUB && - maybe_call_function_stub->major_key() == CodeStub::CallFunction); + CodeStub::GetMajorKey(*maybe_call_function_stub) == + CodeStub::CallFunction); // Step in through construct call requires no changes to the running code. // Step in through getters/setters should already be prepared as well @@ -437,7 +408,7 @@ // Step in through CallFunction stub should also be prepared by caller of // this function (Debug::PrepareStep) which should flood target function // with breakpoints. - ASSERT(RelocInfo::IsConstructCall(rmode()) || + DCHECK(RelocInfo::IsConstructCall(rmode()) || target_code->is_inline_cache_stub() || is_call_function_stub); #endif @@ -467,6 +438,53 @@ } +// Find the builtin to use for invoking the debug break +static Handle<Code> DebugBreakForIC(Handle<Code> code, RelocInfo::Mode mode) { + Isolate* isolate = code->GetIsolate(); + + // Find the builtin debug break function matching the calling convention + // used by the call site. + if (code->is_inline_cache_stub()) { + switch (code->kind()) { + case Code::CALL_IC: + return isolate->builtins()->CallICStub_DebugBreak(); + + case Code::LOAD_IC: + return isolate->builtins()->LoadIC_DebugBreak(); + + case Code::STORE_IC: + return isolate->builtins()->StoreIC_DebugBreak(); + + case Code::KEYED_LOAD_IC: + return isolate->builtins()->KeyedLoadIC_DebugBreak(); + + case Code::KEYED_STORE_IC: + return isolate->builtins()->KeyedStoreIC_DebugBreak(); + + case Code::COMPARE_NIL_IC: + return isolate->builtins()->CompareNilIC_DebugBreak(); + + default: + UNREACHABLE(); + } + } + if (RelocInfo::IsConstructCall(mode)) { + if (code->has_function_cache()) { + return isolate->builtins()->CallConstructStub_Recording_DebugBreak(); + } else { + return isolate->builtins()->CallConstructStub_DebugBreak(); + } + } + if (code->kind() == Code::STUB) { + DCHECK(CodeStub::GetMajorKey(*code) == CodeStub::CallFunction); + return isolate->builtins()->CallFunctionStub_DebugBreak(); + } + + UNREACHABLE(); + return Handle<Code>::null(); +} + + void BreakLocationIterator::SetDebugBreakAtIC() { // Patch the original code with the current address as the current address // might have changed by the inline caching since the code was copied. @@ -479,7 +497,7 @@ // Patch the code to invoke the builtin debug break function matching the // calling convention used by the call site. - Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode)); + Handle<Code> dbgbrk_code = DebugBreakForIC(target_code, mode); rinfo()->set_target_address(dbgbrk_code->entry()); } } @@ -518,7 +536,7 @@ bool BreakLocationIterator::RinfoDone() const { - ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done()); + DCHECK(reloc_iterator_->done() == reloc_iterator_original_->done()); return reloc_iterator_->done(); } @@ -527,9 +545,9 @@ reloc_iterator_->next(); reloc_iterator_original_->next(); #ifdef DEBUG - ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done()); + DCHECK(reloc_iterator_->done() == reloc_iterator_original_->done()); if (!reloc_iterator_->done()) { - ASSERT(rmode() == original_rmode()); + DCHECK(rmode() == original_rmode()); } #endif } @@ -547,66 +565,55 @@ thread_local_.queued_step_count_ = 0; thread_local_.step_into_fp_ = 0; thread_local_.step_out_fp_ = 0; - thread_local_.after_break_target_ = 0; // TODO(isolates): frames_are_dropped_? - thread_local_.debugger_entry_ = NULL; - thread_local_.pending_interrupts_ = 0; + thread_local_.current_debug_scope_ = NULL; thread_local_.restarter_frame_function_pointer_ = NULL; + thread_local_.promise_on_stack_ = NULL; } char* Debug::ArchiveDebug(char* storage) { char* to = storage; - OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal)); - to += sizeof(ThreadLocal); - OS::MemCopy(to, reinterpret_cast<char*>(®isters_), sizeof(registers_)); + MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal)); ThreadInit(); - ASSERT(to <= storage + ArchiveSpacePerThread()); return storage + ArchiveSpacePerThread(); } char* Debug::RestoreDebug(char* storage) { char* from = storage; - OS::MemCopy( - reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal)); - from += sizeof(ThreadLocal); - OS::MemCopy(reinterpret_cast<char*>(®isters_), from, sizeof(registers_)); - ASSERT(from <= storage + ArchiveSpacePerThread()); + MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal)); return storage + ArchiveSpacePerThread(); } int Debug::ArchiveSpacePerThread() { - return sizeof(ThreadLocal) + sizeof(JSCallerSavedBuffer); + return sizeof(ThreadLocal); } -// Frame structure (conforms InternalFrame structure): -// -- code -// -- SMI maker -// -- function (slot is called "context") -// -- frame base -Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame, - Handle<Code> code) { - ASSERT(bottom_js_frame->is_java_script()); - - Address fp = bottom_js_frame->fp(); +ScriptCache::ScriptCache(Isolate* isolate) : HashMap(HashMap::PointersMatch), + isolate_(isolate) { + Heap* heap = isolate_->heap(); + HandleScope scope(isolate_); - // Move function pointer into "context" slot. - Memory::Object_at(fp + StandardFrameConstants::kContextOffset) = - Memory::Object_at(fp + JavaScriptFrameConstants::kFunctionOffset); + // Perform two GCs to get rid of all unreferenced scripts. The first GC gets + // rid of all the cached script wrappers and the second gets rid of the + // scripts which are no longer referenced. + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "ScriptCache"); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "ScriptCache"); - Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code; - Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) = - Smi::FromInt(StackFrame::INTERNAL); + // Scan heap for Script objects. + HeapIterator iterator(heap); + DisallowHeapAllocation no_allocation; - return reinterpret_cast<Object**>(&Memory::Object_at( - fp + StandardFrameConstants::kContextOffset)); + for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { + if (obj->IsScript() && Script::cast(obj)->HasValidSource()) { + Add(Handle<Script>(Script::cast(obj))); + } + } } -const int Debug::kFrameDropperFrameSize = 4; - void ScriptCache::Add(Handle<Script> script) { GlobalHandles* global_handles = isolate_->global_handles(); @@ -615,7 +622,14 @@ HashMap::Entry* entry = HashMap::Lookup(reinterpret_cast<void*>(id), Hash(id), true); if (entry->value != NULL) { - ASSERT(*script == *reinterpret_cast<Script**>(entry->value)); +#ifdef DEBUG + // The code deserializer may introduce duplicate Script objects. + // Assert that the Script objects with the same id have the same name. + Handle<Script> found(reinterpret_cast<Script**>(entry->value)); + DCHECK(script->id() == found->id()); + DCHECK(!script->name()->IsString() || + String::cast(script->name())->Equals(String::cast(found->name()))); +#endif return; } // Globalize the script object, make it weak and use the location of the @@ -634,7 +648,7 @@ Handle<FixedArray> instances = factory->NewFixedArray(occupancy()); int count = 0; for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) { - ASSERT(entry->value != NULL); + DCHECK(entry->value != NULL); if (entry->value != NULL) { instances->set(count, *reinterpret_cast<Script**>(entry->value)); count++; @@ -644,21 +658,12 @@ } -void ScriptCache::ProcessCollectedScripts() { - Debugger* debugger = isolate_->debugger(); - for (int i = 0; i < collected_scripts_.length(); i++) { - debugger->OnScriptCollected(collected_scripts_[i]); - } - collected_scripts_.Clear(); -} - - void ScriptCache::Clear() { // Iterate the script cache to get rid of all the weak handles. for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) { - ASSERT(entry != NULL); + DCHECK(entry != NULL); Object** location = reinterpret_cast<Object**>(entry->value); - ASSERT((*location)->IsScript()); + DCHECK((*location)->IsScript()); GlobalHandles::ClearWeakness(location); GlobalHandles::Destroy(location); } @@ -681,28 +686,12 @@ HashMap::Entry* entry = script_cache->Lookup(key, hash, false); Object** location = reinterpret_cast<Object**>(entry->value); script_cache->Remove(key, hash); - script_cache->collected_scripts_.Add(id); // Clear the weak handle. GlobalHandles::Destroy(location); } -void Debug::SetUp(bool create_heap_objects) { - ThreadInit(); - if (create_heap_objects) { - // Get code to handle debug break on return. - debug_break_return_ = - isolate_->builtins()->builtin(Builtins::kReturn_DebugBreak); - ASSERT(debug_break_return_->IsCode()); - // Get code to handle debug break in debug break slots. - debug_break_slot_ = - isolate_->builtins()->builtin(Builtins::kSlot_DebugBreak); - ASSERT(debug_break_slot_->IsCode()); - } -} - - void Debug::HandleWeakDebugInfo( const v8::WeakCallbackData<v8::Value, void>& data) { Debug* debug = reinterpret_cast<Isolate*>(data.GetIsolate())->debug(); @@ -719,7 +708,7 @@ for (DebugInfoListNode* n = debug->debug_info_list_; n != NULL; n = n->next()) { - ASSERT(n != node); + DCHECK(n != node); } #endif } @@ -745,55 +734,50 @@ HandleScope scope(isolate); // Bail out if the index is invalid. - if (index == -1) { - return false; - } + if (index == -1) return false; // Find source and name for the requested script. Handle<String> source_code = isolate->bootstrapper()->NativesSourceLookup(index); Vector<const char> name = Natives::GetScriptName(index); - Handle<String> script_name = factory->NewStringFromAscii(name); - ASSERT(!script_name.is_null()); + Handle<String> script_name = + factory->NewStringFromAscii(name).ToHandleChecked(); Handle<Context> context = isolate->native_context(); // Compile the script. Handle<SharedFunctionInfo> function_info; - function_info = Compiler::CompileScript(source_code, - script_name, 0, 0, - false, - context, - NULL, NULL, NO_CACHED_DATA, - NATIVES_CODE); + function_info = Compiler::CompileScript( + source_code, script_name, 0, 0, false, context, NULL, NULL, + ScriptCompiler::kNoCompileOptions, NATIVES_CODE); // Silently ignore stack overflows during compilation. if (function_info.is_null()) { - ASSERT(isolate->has_pending_exception()); + DCHECK(isolate->has_pending_exception()); isolate->clear_pending_exception(); return false; } // Execute the shared function in the debugger context. - bool caught_exception; Handle<JSFunction> function = factory->NewFunctionFromSharedFunctionInfo(function_info, context); - Handle<Object> exception = + Handle<Object> exception; + MaybeHandle<Object> result = Execution::TryCall(function, - Handle<Object>(context->global_object(), isolate), + handle(context->global_proxy()), 0, NULL, - &caught_exception); + &exception); // Check for caught exceptions. - if (caught_exception) { - ASSERT(!isolate->has_pending_exception()); + if (result.is_null()) { + DCHECK(!isolate->has_pending_exception()); MessageLocation computed_location; isolate->ComputeLocation(&computed_location); Handle<Object> message = MessageHandler::MakeMessageObject( isolate, "error_loading_debugger", &computed_location, Vector<Handle<Object> >::empty(), Handle<JSArray>()); - ASSERT(!isolate->has_pending_exception()); + DCHECK(!isolate->has_pending_exception()); if (!exception.is_null()) { isolate->set_pending_exception(*exception); MessageHandler::ReportMessage(isolate, NULL, message); @@ -811,20 +795,16 @@ bool Debug::Load() { // Return if debugger is already loaded. - if (IsLoaded()) return true; - - Debugger* debugger = isolate_->debugger(); + if (is_loaded()) return true; // Bail out if we're already in the process of compiling the native // JavaScript source code for the debugger. - if (debugger->compiling_natives() || - debugger->is_loading_debugger()) - return false; - debugger->set_loading_debugger(true); + if (is_suppressed_) return false; + SuppressDebug while_loading(this); // Disable breakpoints and interrupts while compiling and running the // debugger scripts including the context creation code. - DisableBreak disable(isolate_, true); + DisableBreak disable(this, true); PostponeInterruptsScope postpone(isolate_); // Create the debugger context. @@ -832,7 +812,7 @@ ExtensionConfiguration no_extensions; Handle<Context> context = isolate_->bootstrapper()->CreateEnvironment( - Handle<Object>::null(), + MaybeHandle<JSGlobalProxy>(), v8::Handle<ObjectTemplate>(), &no_extensions); @@ -846,18 +826,14 @@ // Expose the builtins object in the debugger context. Handle<String> key = isolate_->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("builtins")); - Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object()); - RETURN_IF_EMPTY_HANDLE_VALUE( - isolate_, - JSReceiver::SetProperty(global, - key, - Handle<Object>(global->builtins(), isolate_), - NONE, - SLOPPY), - false); + Handle<GlobalObject> global = + Handle<GlobalObject>(context->global_object(), isolate_); + Handle<JSBuiltinsObject> builtin = + Handle<JSBuiltinsObject>(global->builtins(), isolate_); + RETURN_ON_EXCEPTION_VALUE( + isolate_, Object::SetProperty(global, key, builtin, SLOPPY), false); // Compile the JavaScript for the debugger in the debugger context. - debugger->set_compiling_natives(true); bool caught_exception = !CompileDebuggerScript(isolate_, Natives::GetIndex("mirror")) || !CompileDebuggerScript(isolate_, Natives::GetIndex("debug")); @@ -866,74 +842,51 @@ caught_exception = caught_exception || !CompileDebuggerScript(isolate_, Natives::GetIndex("liveedit")); } - - debugger->set_compiling_natives(false); - - // Make sure we mark the debugger as not loading before we might - // return. - debugger->set_loading_debugger(false); - // Check for caught exceptions. if (caught_exception) return false; - // Debugger loaded, create debugger context global handle. debug_context_ = Handle<Context>::cast( isolate_->global_handles()->Create(*context)); - return true; } void Debug::Unload() { + ClearAllBreakPoints(); + ClearStepping(); + + // Match unmatched PopPromise calls. + while (thread_local_.promise_on_stack_) PopPromise(); + // Return debugger is not loaded. - if (!IsLoaded()) { - return; - } + if (!is_loaded()) return; // Clear the script cache. - DestroyScriptCache(); + if (script_cache_ != NULL) { + delete script_cache_; + script_cache_ = NULL; + } // Clear debugger context global handle. - GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_context_.location())); + GlobalHandles::Destroy(Handle<Object>::cast(debug_context_).location()); debug_context_ = Handle<Context>(); } -// Set the flag indicating that preemption happened during debugging. -void Debug::PreemptionWhileInDebugger() { - ASSERT(InDebugger()); - Debug::set_interrupts_pending(PREEMPT); -} - - -void Debug::Iterate(ObjectVisitor* v) { - v->VisitPointer(BitCast<Object**>(&(debug_break_return_))); - v->VisitPointer(BitCast<Object**>(&(debug_break_slot_))); -} - - -Object* Debug::Break(Arguments args) { +void Debug::Break(Arguments args, JavaScriptFrame* frame) { Heap* heap = isolate_->heap(); HandleScope scope(isolate_); - ASSERT(args.length() == 0); - - thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED; + DCHECK(args.length() == 0); - // Get the top-most JavaScript frame. - JavaScriptFrameIterator it(isolate_); - JavaScriptFrame* frame = it.frame(); + // Initialize LiveEdit. + LiveEdit::InitializeThreadLocal(this); // Just continue if breaks are disabled or debugger cannot be loaded. - if (disable_break() || !Load()) { - SetAfterBreakTarget(frame); - return heap->undefined_value(); - } + if (break_disabled_) return; // Enter the debugger. - EnterDebugger debugger(isolate_); - if (debugger.FailedToEnter()) { - return heap->undefined_value(); - } + DebugScope debug_scope(this); + if (debug_scope.failed()) return; // Postpone interrupt during breakpoint processing. PostponeInterruptsScope postpone(isolate_); @@ -969,10 +922,11 @@ // If step out is active skip everything until the frame where we need to step // out to is reached, unless real breakpoint is hit. - if (StepOutActive() && frame->fp() != step_out_fp() && + if (StepOutActive() && + frame->fp() != thread_local_.step_out_fp_ && break_points_hit->IsUndefined() ) { // Step count should always be 0 for StepOut. - ASSERT(thread_local_.step_count_ == 0); + DCHECK(thread_local_.step_count_ == 0); } else if (!break_points_hit->IsUndefined() || (thread_local_.last_step_action_ != StepNone && thread_local_.step_count_ == 0)) { @@ -992,7 +946,7 @@ PrepareStep(StepNext, step_count, StackFrame::NO_ID); } else { // Notify the debug event listeners. - isolate_->debugger()->OnDebugBreak(break_points_hit, false); + OnDebugBreak(break_points_hit, false); } } else if (thread_local_.last_step_action_ != StepNone) { // Hold on to last step action as it is cleared by the call to @@ -1029,40 +983,15 @@ // Set up for the remaining steps. PrepareStep(step_action, step_count, StackFrame::NO_ID); } - - if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) { - SetAfterBreakTarget(frame); - } else if (thread_local_.frame_drop_mode_ == - FRAME_DROPPED_IN_IC_CALL) { - // We must have been calling IC stub. Do not go there anymore. - Code* plain_return = isolate_->builtins()->builtin( - Builtins::kPlainReturn_LiveEdit); - thread_local_.after_break_target_ = plain_return->entry(); - } else if (thread_local_.frame_drop_mode_ == - FRAME_DROPPED_IN_DEBUG_SLOT_CALL) { - // Debug break slot stub does not return normally, instead it manually - // cleans the stack and jumps. We should patch the jump address. - Code* plain_return = isolate_->builtins()->builtin( - Builtins::kFrameDropper_LiveEdit); - thread_local_.after_break_target_ = plain_return->entry(); - } else if (thread_local_.frame_drop_mode_ == - FRAME_DROPPED_IN_DIRECT_CALL) { - // Nothing to do, after_break_target is not used here. - } else if (thread_local_.frame_drop_mode_ == - FRAME_DROPPED_IN_RETURN_CALL) { - Code* plain_return = isolate_->builtins()->builtin( - Builtins::kFrameDropper_LiveEdit); - thread_local_.after_break_target_ = plain_return->entry(); - } else { - UNREACHABLE(); - } - - return heap->undefined_value(); } -RUNTIME_FUNCTION(Object*, Debug_Break) { - return isolate->debug()->Break(args); +RUNTIME_FUNCTION(Debug_Break) { + // Get the top-most JavaScript frame. + JavaScriptFrameIterator it(isolate); + isolate->debug()->Break(args, it.frame()); + isolate->debug()->SetAfterBreakTarget(it.frame()); + return isolate->heap()->undefined_value(); } @@ -1076,7 +1005,7 @@ // they are in a FixedArray. Handle<FixedArray> break_points_hit; int break_points_hit_count = 0; - ASSERT(!break_point_objects->IsUndefined()); + DCHECK(!break_point_objects->IsUndefined()); if (break_point_objects->IsFixedArray()) { Handle<FixedArray> array(FixedArray::cast(*break_point_objects)); break_points_hit = factory->NewFixedArray(array->length()); @@ -1116,31 +1045,26 @@ Handle<String> is_break_point_triggered_string = factory->InternalizeOneByteString( STATIC_ASCII_VECTOR("IsBreakPointTriggered")); + Handle<GlobalObject> debug_global(debug_context()->global_object()); Handle<JSFunction> check_break_point = - Handle<JSFunction>(JSFunction::cast( - debug_context()->global_object()->GetPropertyNoExceptionThrown( - *is_break_point_triggered_string))); + Handle<JSFunction>::cast(Object::GetProperty( + debug_global, is_break_point_triggered_string).ToHandleChecked()); // Get the break id as an object. Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id()); // Call HandleBreakPointx. - bool caught_exception; Handle<Object> argv[] = { break_id, break_point_object }; - Handle<Object> result = Execution::TryCall(check_break_point, - isolate_->js_builtins_object(), - ARRAY_SIZE(argv), - argv, - &caught_exception); - - // If exception or non boolean result handle as not triggered - if (caught_exception || !result->IsBoolean()) { + Handle<Object> result; + if (!Execution::TryCall(check_break_point, + isolate_->js_builtins_object(), + ARRAY_SIZE(argv), + argv).ToHandle(&result)) { return false; } // Return whether the break point is triggered. - ASSERT(!result.is_null()); - return (*result)->IsTrue(); + return result->IsTrue(); } @@ -1153,12 +1077,12 @@ // Return the debug info for this function. EnsureDebugInfo must be called // prior to ensure the debug info has been generated for shared. Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) { - ASSERT(HasDebugInfo(shared)); + DCHECK(HasDebugInfo(shared)); return Handle<DebugInfo>(DebugInfo::cast(shared->debug_info())); } -void Debug::SetBreakPoint(Handle<JSFunction> function, +bool Debug::SetBreakPoint(Handle<JSFunction> function, Handle<Object> break_point_object, int* source_position) { HandleScope scope(isolate_); @@ -1169,12 +1093,12 @@ Handle<SharedFunctionInfo> shared(function->shared()); if (!EnsureDebugInfo(shared, function)) { // Return if retrieving debug info failed. - return; + return true; } Handle<DebugInfo> debug_info = GetDebugInfo(shared); // Source positions starts with zero. - ASSERT(*source_position >= 0); + DCHECK(*source_position >= 0); // Find the break point and change it. BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS); @@ -1184,7 +1108,7 @@ *source_position = it.position(); // At least one active break point now. - ASSERT(debug_info->GetBreakPointCount() > 0); + return debug_info->GetBreakPointCount() > 0; } @@ -1218,7 +1142,7 @@ Handle<DebugInfo> debug_info = GetDebugInfo(shared); // Source positions starts with zero. - ASSERT(position >= 0); + DCHECK(position >= 0); // Find the break point and change it. BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS); @@ -1228,7 +1152,7 @@ *source_position = it.position() + shared->start_position(); // At least one active break point now. - ASSERT(debug_info->GetBreakPointCount() > 0); + DCHECK(debug_info->GetBreakPointCount() > 0); return true; } @@ -1305,7 +1229,7 @@ isolate_); if (!bindee.is_null() && bindee->IsJSFunction() && - !JSFunction::cast(*bindee)->IsBuiltin()) { + !JSFunction::cast(*bindee)->IsFromNativeScript()) { Handle<JSFunction> bindee_function(JSFunction::cast(*bindee)); Debug::FloodWithOneShot(bindee_function); } @@ -1348,6 +1272,68 @@ } +PromiseOnStack::PromiseOnStack(Isolate* isolate, PromiseOnStack* prev, + Handle<JSObject> promise) + : isolate_(isolate), prev_(prev) { + handler_ = StackHandler::FromAddress( + Isolate::handler(isolate->thread_local_top())); + promise_ = + Handle<JSObject>::cast(isolate->global_handles()->Create(*promise)); +} + + +PromiseOnStack::~PromiseOnStack() { + isolate_->global_handles()->Destroy( + Handle<Object>::cast(promise_).location()); +} + + +void Debug::PushPromise(Handle<JSObject> promise) { + PromiseOnStack* prev = thread_local_.promise_on_stack_; + thread_local_.promise_on_stack_ = new PromiseOnStack(isolate_, prev, promise); +} + + +void Debug::PopPromise() { + if (thread_local_.promise_on_stack_ == NULL) return; + PromiseOnStack* prev = thread_local_.promise_on_stack_->prev(); + delete thread_local_.promise_on_stack_; + thread_local_.promise_on_stack_ = prev; +} + + +Handle<Object> Debug::GetPromiseOnStackOnThrow() { + Handle<Object> undefined = isolate_->factory()->undefined_value(); + if (thread_local_.promise_on_stack_ == NULL) return undefined; + StackHandler* promise_try = thread_local_.promise_on_stack_->handler(); + // Find the top-most try-catch handler. + StackHandler* handler = StackHandler::FromAddress( + Isolate::handler(isolate_->thread_local_top())); + do { + if (handler == promise_try) { + // Mark the pushed try-catch handler to prevent a later duplicate event + // triggered with the following reject. + return thread_local_.promise_on_stack_->promise(); + } + handler = handler->next(); + // Throwing inside a Promise can be intercepted by an inner try-catch, so + // we stop at the first try-catch handler. + } while (handler != NULL && !handler->is_catch()); + return undefined; +} + + +bool Debug::PromiseHasRejectHandler(Handle<JSObject> promise) { + Handle<JSFunction> fun = Handle<JSFunction>::cast( + JSObject::GetDataProperty(isolate_->js_builtins_object(), + isolate_->factory()->NewStringFromStaticAscii( + "PromiseHasRejectHandler"))); + Handle<Object> result = + Execution::Call(isolate_, fun, promise, 0, NULL).ToHandleChecked(); + return result->IsTrue(); +} + + void Debug::PrepareStep(StepAction step_action, int step_count, StackFrame::Id frame_id) { @@ -1355,7 +1341,7 @@ PrepareForBreakPoints(); - ASSERT(Debug::InDebugger()); + DCHECK(in_debug_scope()); // Remember this step action and count. thread_local_.last_step_action_ = step_action; @@ -1425,6 +1411,9 @@ bool is_call_target = false; Address target = it.rinfo()->target_address(); Code* code = Code::GetCodeFromTargetAddress(target); + if (code->is_call_stub()) { + is_call_target = true; + } if (code->is_inline_cache_stub()) { is_inline_cache_stub = true; is_load_or_store = !is_call_target; @@ -1439,8 +1428,10 @@ maybe_call_function_stub = Code::GetCodeFromTargetAddress(original_target); } - if (maybe_call_function_stub->kind() == Code::STUB && - maybe_call_function_stub->major_key() == CodeStub::CallFunction) { + if ((maybe_call_function_stub->kind() == Code::STUB && + CodeStub::GetMajorKey(maybe_call_function_stub) == + CodeStub::CallFunction) || + maybe_call_function_stub->kind() == Code::CALL_IC) { // Save reference to the code as we may need it to find out arguments // count for 'step in' later. call_function_stub = Handle<Code>(maybe_call_function_stub); @@ -1458,11 +1449,12 @@ frames_it.Advance(); } } else { - ASSERT(it.IsExit()); + DCHECK(it.IsExit()); frames_it.Advance(); } // Skip builtin functions on the stack. - while (!frames_it.done() && frames_it.frame()->function()->IsBuiltin()) { + while (!frames_it.done() && + frames_it.frame()->function()->IsFromNativeScript()) { frames_it.Advance(); } // Step out: If there is a JavaScript caller frame, we need to @@ -1496,26 +1488,20 @@ } else if (!call_function_stub.is_null()) { // If it's CallFunction stub ensure target function is compiled and flood // it with one shot breakpoints. + bool is_call_ic = call_function_stub->kind() == Code::CALL_IC; // Find out number of arguments from the stub minor key. - // Reverse lookup required as the minor key cannot be retrieved - // from the code object. - Handle<Object> obj( - isolate_->heap()->code_stubs()->SlowReverseLookup( - *call_function_stub), - isolate_); - ASSERT(!obj.is_null()); - ASSERT(!(*obj)->IsUndefined()); - ASSERT(obj->IsSmi()); - // Get the STUB key and extract major and minor key. - uint32_t key = Smi::cast(*obj)->value(); + uint32_t key = call_function_stub->stub_key(); // Argc in the stub is the number of arguments passed - not the // expected arguments of the called function. - int call_function_arg_count = - CallFunctionStub::ExtractArgcFromMinorKey( + int call_function_arg_count = is_call_ic + ? CallICStub::ExtractArgcFromMinorKey(CodeStub::MinorKeyFromKey(key)) + : CallFunctionStub::ExtractArgcFromMinorKey( CodeStub::MinorKeyFromKey(key)); - ASSERT(call_function_stub->major_key() == - CodeStub::MajorKeyFromKey(key)); + + DCHECK(is_call_ic || + CodeStub::GetMajorKey(*call_function_stub) == + CodeStub::MajorKeyFromKey(key)); // Find target function on the expression stack. // Expression stack looks like this (top to bottom): @@ -1525,7 +1511,7 @@ // Receiver // Function to call int expressions_count = frame->ComputeExpressionsCount(); - ASSERT(expressions_count - 2 - call_function_arg_count >= 0); + DCHECK(expressions_count - 2 - call_function_arg_count >= 0); Object* fun = frame->GetExpression( expressions_count - 2 - call_function_arg_count); @@ -1546,7 +1532,7 @@ Handle<JSFunction> js_function(JSFunction::cast(fun)); if (js_function->shared()->bound()) { Debug::FloodBoundFunctionWithOneShot(js_function); - } else if (!js_function->IsBuiltin()) { + } else if (!js_function->IsFromNativeScript()) { // Don't step into builtins. // It will also compile target function if it's not compiled yet. FloodWithOneShot(js_function); @@ -1563,7 +1549,7 @@ if (is_load_or_store) { // Remember source position and frame to handle step in getter/setter. If // there is a custom getter/setter it will be handled in - // Object::Get/SetPropertyWithCallback, otherwise the step action will be + // Object::Get/SetPropertyWithAccessor, otherwise the step action will be // propagated on the next Debug::Break. thread_local_.last_statement_position_ = debug_info->code()->SourceStatementPosition(frame->pc()); @@ -1619,68 +1605,7 @@ } -// Check whether a code stub with the specified major key is a possible break -// point location when looking for source break locations. -bool Debug::IsSourceBreakStub(Code* code) { - CodeStub::Major major_key = CodeStub::GetMajorKey(code); - return major_key == CodeStub::CallFunction; -} - - -// Check whether a code stub with the specified major key is a possible break -// location. -bool Debug::IsBreakStub(Code* code) { - CodeStub::Major major_key = CodeStub::GetMajorKey(code); - return major_key == CodeStub::CallFunction; -} - - -// Find the builtin to use for invoking the debug break -Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) { - Isolate* isolate = code->GetIsolate(); - - // Find the builtin debug break function matching the calling convention - // used by the call site. - if (code->is_inline_cache_stub()) { - switch (code->kind()) { - case Code::LOAD_IC: - return isolate->builtins()->LoadIC_DebugBreak(); - - case Code::STORE_IC: - return isolate->builtins()->StoreIC_DebugBreak(); - - case Code::KEYED_LOAD_IC: - return isolate->builtins()->KeyedLoadIC_DebugBreak(); - - case Code::KEYED_STORE_IC: - return isolate->builtins()->KeyedStoreIC_DebugBreak(); - - case Code::COMPARE_NIL_IC: - return isolate->builtins()->CompareNilIC_DebugBreak(); - - default: - UNREACHABLE(); - } - } - if (RelocInfo::IsConstructCall(mode)) { - if (code->has_function_cache()) { - return isolate->builtins()->CallConstructStub_Recording_DebugBreak(); - } else { - return isolate->builtins()->CallConstructStub_DebugBreak(); - } - } - if (code->kind() == Code::STUB) { - ASSERT(code->major_key() == CodeStub::CallFunction); - if (code->has_function_cache()) { - return isolate->builtins()->CallFunctionStub_Recording_DebugBreak(); - } else { - return isolate->builtins()->CallFunctionStub_DebugBreak(); - } - } - UNREACHABLE(); - return Handle<Code>::null(); -} // Simple function for returning the source positions for active break points. @@ -1725,18 +1650,6 @@ } -void Debug::NewBreak(StackFrame::Id break_frame_id) { - thread_local_.break_frame_id_ = break_frame_id; - thread_local_.break_id_ = ++thread_local_.break_count_; -} - - -void Debug::SetBreak(StackFrame::Id break_frame_id, int break_id) { - thread_local_.break_frame_id_ = break_frame_id; - thread_local_.break_id_ = break_id; -} - - // Handle stepping into a function. void Debug::HandleStepIn(Handle<JSFunction> function, Handle<Object> holder, @@ -1749,7 +1662,7 @@ it.Advance(); // For constructor functions skip another frame. if (is_constructor) { - ASSERT(it.frame()->is_construct()); + DCHECK(it.frame()->is_construct()); it.Advance(); } fp = it.frame()->fp(); @@ -1757,11 +1670,11 @@ // Flood the function with one-shot break points if it is called from where // step into was requested. - if (fp == step_in_fp()) { + if (fp == thread_local_.step_into_fp_) { if (function->shared()->bound()) { // Handle Function.prototype.bind Debug::FloodBoundFunctionWithOneShot(function); - } else if (!function->IsBuiltin()) { + } else if (!function->IsFromNativeScript()) { // Don't allow step into functions in the native context. if (function->shared()->code() == isolate->builtins()->builtin(Builtins::kFunctionApply) || @@ -1773,7 +1686,7 @@ // function. if (!holder.is_null() && holder->IsJSFunction()) { Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder); - if (!js_function->IsBuiltin()) { + if (!js_function->IsFromNativeScript()) { Debug::FloodWithOneShot(js_function); } else if (js_function->shared()->bound()) { // Handle Function.prototype.bind @@ -1821,7 +1734,7 @@ void Debug::ActivateStepIn(StackFrame* frame) { - ASSERT(!StepOutActive()); + DCHECK(!StepOutActive()); thread_local_.step_into_fp_ = frame->UnpaddedFP(); } @@ -1832,7 +1745,7 @@ void Debug::ActivateStepOut(StackFrame* frame) { - ASSERT(!StepInActive()); + DCHECK(!StepInActive()); thread_local_.step_out_fp_ = frame->UnpaddedFP(); } @@ -1870,7 +1783,7 @@ } } else if (frame->function()->IsJSFunction()) { JSFunction* function = frame->function(); - ASSERT(frame->LookupCode()->kind() == Code::FUNCTION); + DCHECK(frame->LookupCode()->kind() == Code::FUNCTION); active_functions->Add(Handle<JSFunction>(function)); function->shared()->code()->set_gc_metadata(active_code_marker); } @@ -1878,6 +1791,59 @@ } +// Figure out how many bytes of "pc_offset" correspond to actual code by +// subtracting off the bytes that correspond to constant/veneer pools. See +// Assembler::CheckConstPool() and Assembler::CheckVeneerPool(). Note that this +// is only useful for architectures using constant pools or veneer pools. +static int ComputeCodeOffsetFromPcOffset(Code *code, int pc_offset) { + DCHECK_EQ(code->kind(), Code::FUNCTION); + DCHECK(!code->has_debug_break_slots()); + DCHECK_LE(0, pc_offset); + DCHECK_LT(pc_offset, code->instruction_end() - code->instruction_start()); + + int mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) | + RelocInfo::ModeMask(RelocInfo::VENEER_POOL); + byte *pc = code->instruction_start() + pc_offset; + int code_offset = pc_offset; + for (RelocIterator it(code, mask); !it.done(); it.next()) { + RelocInfo* info = it.rinfo(); + if (info->pc() >= pc) break; + DCHECK(RelocInfo::IsConstPool(info->rmode())); + code_offset -= static_cast<int>(info->data()); + DCHECK_LE(0, code_offset); + } + + return code_offset; +} + + +// The inverse of ComputeCodeOffsetFromPcOffset. +static int ComputePcOffsetFromCodeOffset(Code *code, int code_offset) { + DCHECK_EQ(code->kind(), Code::FUNCTION); + + int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | + RelocInfo::ModeMask(RelocInfo::CONST_POOL) | + RelocInfo::ModeMask(RelocInfo::VENEER_POOL); + int reloc = 0; + for (RelocIterator it(code, mask); !it.done(); it.next()) { + RelocInfo* info = it.rinfo(); + if (info->pc() - code->instruction_start() - reloc >= code_offset) break; + if (RelocInfo::IsDebugBreakSlot(info->rmode())) { + reloc += Assembler::kDebugBreakSlotLength; + } else { + DCHECK(RelocInfo::IsConstPool(info->rmode())); + reloc += static_cast<int>(info->data()); + } + } + + int pc_offset = code_offset + reloc; + + DCHECK_LT(code->instruction_start() + pc_offset, code->instruction_end()); + + return pc_offset; +} + + static void RedirectActivationsToRecompiledCodeOnThread( Isolate* isolate, ThreadLocalTop* top) { @@ -1888,7 +1854,7 @@ JSFunction* function = frame->function(); - ASSERT(frame->LookupCode()->kind() == Code::FUNCTION); + DCHECK(frame->LookupCode()->kind() == Code::FUNCTION); Handle<Code> frame_code(frame->LookupCode()); if (frame_code->has_debug_break_slots()) continue; @@ -1899,51 +1865,13 @@ continue; } - // Iterate over the RelocInfo in the original code to compute the sum of the - // constant pools and veneer pools sizes. (See Assembler::CheckConstPool() - // and Assembler::CheckVeneerPool()) - // Note that this is only useful for architectures using constant pools or - // veneer pools. - int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) | - RelocInfo::ModeMask(RelocInfo::VENEER_POOL); - int frame_pool_size = 0; - for (RelocIterator it(*frame_code, pool_mask); !it.done(); it.next()) { - RelocInfo* info = it.rinfo(); - if (info->pc() >= frame->pc()) break; - frame_pool_size += static_cast<int>(info->data()); - } - intptr_t frame_offset = - frame->pc() - frame_code->instruction_start() - frame_pool_size; - - // Iterate over the RelocInfo for new code to find the number of bytes - // generated for debug slots and constant pools. - int debug_break_slot_bytes = 0; - int new_code_pool_size = 0; - int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | - RelocInfo::ModeMask(RelocInfo::CONST_POOL) | - RelocInfo::ModeMask(RelocInfo::VENEER_POOL); - for (RelocIterator it(*new_code, mask); !it.done(); it.next()) { - // Check if the pc in the new code with debug break - // slots is before this slot. - RelocInfo* info = it.rinfo(); - intptr_t new_offset = info->pc() - new_code->instruction_start() - - new_code_pool_size - debug_break_slot_bytes; - if (new_offset >= frame_offset) { - break; - } - - if (RelocInfo::IsDebugBreakSlot(info->rmode())) { - debug_break_slot_bytes += Assembler::kDebugBreakSlotLength; - } else { - ASSERT(RelocInfo::IsConstPool(info->rmode())); - // The size of the pools is encoded in the data. - new_code_pool_size += static_cast<int>(info->data()); - } - } + int old_pc_offset = + static_cast<int>(frame->pc() - frame_code->instruction_start()); + int code_offset = ComputeCodeOffsetFromPcOffset(*frame_code, old_pc_offset); + int new_pc_offset = ComputePcOffsetFromCodeOffset(*new_code, code_offset); // Compute the equivalent pc in the new code. - byte* new_pc = new_code->instruction_start() + frame_offset + - debug_break_slot_bytes + new_code_pool_size; + byte* new_pc = new_code->instruction_start() + new_pc_offset; if (FLAG_trace_deopt) { PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) " @@ -1964,6 +1892,11 @@ reinterpret_cast<intptr_t>(new_pc)); } + if (FLAG_enable_ool_constant_pool) { + // Update constant pool pointer for new code. + frame->set_constant_pool(new_code->constant_pool()); + } + // Patch the return address to return into the code with // debug break slots. frame->set_pc(new_pc); @@ -1999,8 +1932,41 @@ }; -void Debug::PrepareForBreakPoints() { - // If preparing for the first break point make sure to deoptimize all +static void EnsureFunctionHasDebugBreakSlots(Handle<JSFunction> function) { + if (function->code()->kind() == Code::FUNCTION && + function->code()->has_debug_break_slots()) { + // Nothing to do. Function code already had debug break slots. + return; + } + // Make sure that the shared full code is compiled with debug + // break slots. + if (!function->shared()->code()->has_debug_break_slots()) { + MaybeHandle<Code> code = Compiler::GetCodeForDebugging(function); + // Recompilation can fail. In that case leave the code as it was. + if (!code.is_null()) function->ReplaceCode(*code.ToHandleChecked()); + } else { + // Simply use shared code if it has debug break slots. + function->ReplaceCode(function->shared()->code()); + } +} + + +static void RecompileAndRelocateSuspendedGenerators( + const List<Handle<JSGeneratorObject> > &generators) { + for (int i = 0; i < generators.length(); i++) { + Handle<JSFunction> fun(generators[i]->function()); + + EnsureFunctionHasDebugBreakSlots(fun); + + int code_offset = generators[i]->continuation(); + int pc_offset = ComputePcOffsetFromCodeOffset(fun->code(), code_offset); + generators[i]->set_continuation(pc_offset); + } +} + + +void Debug::PrepareForBreakPoints() { + // If preparing for the first break point make sure to deoptimize all // functions as debugging does not work with optimized code. if (!has_break_points_) { if (isolate_->concurrent_recompilation_enabled()) { @@ -2018,12 +1984,28 @@ // is used both in GC and non-GC code. List<Handle<JSFunction> > active_functions(100); + // A list of all suspended generators. + List<Handle<JSGeneratorObject> > suspended_generators; + + // A list of all generator functions. We need to recompile all functions, + // but we don't know until after visiting the whole heap which generator + // functions have suspended activations and which do not. As in the case of + // functions with activations on the stack, we need to be careful with + // generator functions with suspended activations because although they + // should be recompiled, recompilation can fail, and we need to avoid + // leaving the heap in an inconsistent state. + // + // We could perhaps avoid this list and instead re-use the GC metadata + // links. + List<Handle<JSFunction> > generator_functions; + { // We are going to iterate heap to find all functions without // debug break slots. Heap* heap = isolate_->heap(); heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "preparing for breakpoints"); + HeapIterator iterator(heap); // Ensure no GC in this scope as we are going to use gc_metadata // field in the Code object to mark active functions. @@ -2043,7 +2025,6 @@ // Scan the heap for all non-optimized functions which have no // debug break slots and are not active or inlined into an active // function and mark them for lazy compilation. - HeapIterator iterator(heap); HeapObject* obj = NULL; while (((obj = iterator.next()) != NULL)) { if (obj->IsJSFunction()) { @@ -2052,14 +2033,19 @@ if (!shared->allows_lazy_compilation()) continue; if (!shared->script()->IsScript()) continue; - if (function->IsBuiltin()) continue; + if (function->IsFromNativeScript()) continue; if (shared->code()->gc_metadata() == active_code_marker) continue; + if (shared->is_generator()) { + generator_functions.Add(Handle<JSFunction>(function, isolate_)); + continue; + } + Code::Kind kind = function->code()->kind(); if (kind == Code::FUNCTION && !function->code()->has_debug_break_slots()) { - function->set_code(*lazy_compile); - function->shared()->set_code(*lazy_compile); + function->ReplaceCode(*lazy_compile); + function->shared()->ReplaceCode(*lazy_compile); } else if (kind == Code::BUILTIN && (function->IsInOptimizationQueue() || function->IsMarkedForOptimization() || @@ -2068,12 +2054,30 @@ Code* shared_code = function->shared()->code(); if (shared_code->kind() == Code::FUNCTION && shared_code->has_debug_break_slots()) { - function->set_code(shared_code); + function->ReplaceCode(shared_code); } else { - function->set_code(*lazy_compile); - function->shared()->set_code(*lazy_compile); + function->ReplaceCode(*lazy_compile); + function->shared()->ReplaceCode(*lazy_compile); } } + } else if (obj->IsJSGeneratorObject()) { + JSGeneratorObject* gen = JSGeneratorObject::cast(obj); + if (!gen->is_suspended()) continue; + + JSFunction* fun = gen->function(); + DCHECK_EQ(fun->code()->kind(), Code::FUNCTION); + if (fun->code()->has_debug_break_slots()) continue; + + int pc_offset = gen->continuation(); + DCHECK_LT(0, pc_offset); + + int code_offset = + ComputeCodeOffsetFromPcOffset(fun->code(), pc_offset); + + // This will be fixed after we recompile the functions. + gen->set_continuation(code_offset); + + suspended_generators.Add(Handle<JSGeneratorObject>(gen, isolate_)); } } @@ -2084,41 +2088,35 @@ } } + // Recompile generator functions that have suspended activations, and + // relocate those activations. + RecompileAndRelocateSuspendedGenerators(suspended_generators); + + // Mark generator functions that didn't have suspended activations for lazy + // recompilation. Note that this set does not include any active functions. + for (int i = 0; i < generator_functions.length(); i++) { + Handle<JSFunction> &function = generator_functions[i]; + if (function->code()->kind() != Code::FUNCTION) continue; + if (function->code()->has_debug_break_slots()) continue; + function->ReplaceCode(*lazy_compile); + function->shared()->ReplaceCode(*lazy_compile); + } + // Now recompile all functions with activation frames and and - // patch the return address to run in the new compiled code. + // patch the return address to run in the new compiled code. It could be + // that some active functions were recompiled already by the suspended + // generator recompilation pass above; a generator with suspended + // activations could also have active activations. That's fine. for (int i = 0; i < active_functions.length(); i++) { Handle<JSFunction> function = active_functions[i]; Handle<SharedFunctionInfo> shared(function->shared()); - if (function->code()->kind() == Code::FUNCTION && - function->code()->has_debug_break_slots()) { - // Nothing to do. Function code already had debug break slots. - continue; - } - // If recompilation is not possible just skip it. - if (shared->is_toplevel() || - !shared->allows_lazy_compilation() || - shared->code()->kind() == Code::BUILTIN) { - continue; - } - - // Make sure that the shared full code is compiled with debug - // break slots. - if (!shared->code()->has_debug_break_slots()) { - // Try to compile the full code with debug break slots. If it - // fails just keep the current code. - bool prev_force_debugger_active = - isolate_->debugger()->force_debugger_active(); - isolate_->debugger()->set_force_debugger_active(true); - Handle<Code> code = Compiler::GetCodeForDebugging(function); - function->ReplaceCode(*code); - isolate_->debugger()->set_force_debugger_active( - prev_force_debugger_active); - } + if (shared->is_toplevel()) continue; + if (!shared->allows_lazy_compilation()) continue; + if (shared->code()->kind() == Code::BUILTIN) continue; - // Keep function code in sync with shared function info. - function->set_code(shared->code()); + EnsureFunctionHasDebugBreakSlots(function); } RedirectActivationsToRecompiledCodeOnThread(isolate_, @@ -2151,9 +2149,7 @@ Handle<SharedFunctionInfo> target; Heap* heap = isolate_->heap(); while (!done) { - { // Extra scope for iterator and no-allocation. - heap->EnsureHeapIsIterable(); - DisallowHeapAllocation no_alloc_during_heap_iteration; + { // Extra scope for iterator. HeapIterator iterator(heap); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { @@ -2163,7 +2159,7 @@ if (obj->IsJSFunction()) { function = Handle<JSFunction>(JSFunction::cast(obj)); shared = Handle<SharedFunctionInfo>(function->shared()); - ASSERT(shared->allows_lazy_compilation() || shared->is_compiled()); + DCHECK(shared->allows_lazy_compilation() || shared->is_compiled()); found_next_candidate = true; } else if (obj->IsSharedFunctionInfo()) { shared = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj)); @@ -2227,10 +2223,10 @@ // will compile all inner functions that cannot be compiled without a // context, because Compiler::BuildFunctionInfo checks whether the // debugger is active. - Handle<Code> result = target_function.is_null() + MaybeHandle<Code> maybe_result = target_function.is_null() ? Compiler::GetUnoptimizedCode(target) : Compiler::GetUnoptimizedCode(target_function); - if (result.is_null()) return isolate_->heap()->undefined_value(); + if (maybe_result.is_null()) return isolate_->heap()->undefined_value(); } } // End while loop. @@ -2245,7 +2241,7 @@ // Return if we already have the debug info for shared. if (HasDebugInfo(shared)) { - ASSERT(shared->is_compiled()); + DCHECK(shared->is_compiled()); return true; } @@ -2271,7 +2267,7 @@ void Debug::RemoveDebugInfo(Handle<DebugInfo> debug_info) { - ASSERT(debug_info_list_ != NULL); + DCHECK(debug_info_list_ != NULL); // Run through the debug info objects to find this one and remove it. DebugInfoListNode* prev = NULL; DebugInfoListNode* current = debug_info_list_; @@ -2302,8 +2298,11 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) { - HandleScope scope(isolate_); + after_break_target_ = NULL; + + if (LiveEdit::SetAfterBreakTarget(this)) return; // LiveEdit did the job. + HandleScope scope(isolate_); PrepareForBreakPoints(); // Get the executing function in which the debug break occurred. @@ -2319,13 +2318,13 @@ #ifdef DEBUG // Get the code which is actually executing. Handle<Code> frame_code(frame->LookupCode()); - ASSERT(frame_code.is_identical_to(code)); + DCHECK(frame_code.is_identical_to(code)); #endif // Find the call address in the running code. This address holds the call to // either a DebugBreakXXX or to the debug break return entry code if the // break point is still active after processing the break point. - Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset; + Address addr = Assembler::break_address_from_return_address(frame->pc()); // Check if the location is at JS exit or debug break slot. bool at_js_return = false; @@ -2352,38 +2351,38 @@ // place in the original code. If not the break point was removed during // break point processing. if (break_at_js_return_active) { - addr += original_code->instruction_start() - code->instruction_start(); + addr += original_code->instruction_start() - code->instruction_start(); } // Move back to where the call instruction sequence started. - thread_local_.after_break_target_ = - addr - Assembler::kPatchReturnSequenceAddressOffset; + after_break_target_ = addr - Assembler::kPatchReturnSequenceAddressOffset; } else if (at_debug_break_slot) { // Address of where the debug break slot starts. addr = addr - Assembler::kPatchDebugBreakSlotAddressOffset; // Continue just after the slot. - thread_local_.after_break_target_ = addr + Assembler::kDebugBreakSlotLength; - } else if (IsDebugBreak(Assembler::target_address_at(addr, *code))) { - // We now know that there is still a debug break call at the target address, - // so the break point is still there and the original code will hold the - // address to jump to in order to complete the call which is replaced by a - // call to DebugBreakXXX. - - // Find the corresponding address in the original code. - addr += original_code->instruction_start() - code->instruction_start(); - - // Install jump to the call address in the original code. This will be the - // call which was overwritten by the call to DebugBreakXXX. - thread_local_.after_break_target_ = - Assembler::target_address_at(addr, *original_code); + after_break_target_ = addr + Assembler::kDebugBreakSlotLength; } else { - // There is no longer a break point present. Don't try to look in the - // original code as the running code will have the right address. This takes - // care of the case where the last break point is removed from the function - // and therefore no "original code" is available. - thread_local_.after_break_target_ = - Assembler::target_address_at(addr, *code); + addr = Assembler::target_address_from_return_address(frame->pc()); + if (IsDebugBreak(Assembler::target_address_at(addr, *code))) { + // We now know that there is still a debug break call at the target + // address, so the break point is still there and the original code will + // hold the address to jump to in order to complete the call which is + // replaced by a call to DebugBreakXXX. + + // Find the corresponding address in the original code. + addr += original_code->instruction_start() - code->instruction_start(); + + // Install jump to the call address in the original code. This will be the + // call which was overwritten by the call to DebugBreakXXX. + after_break_target_ = Assembler::target_address_at(addr, *original_code); + } else { + // There is no longer a break point present. Don't try to look in the + // original code as the running code will have the right address. This + // takes care of the case where the last break point is removed from the + // function and therefore no "original code" is available. + after_break_target_ = Assembler::target_address_at(addr, *code); + } } } @@ -2412,11 +2411,11 @@ #ifdef DEBUG // Get the code which is actually executing. Handle<Code> frame_code(frame->LookupCode()); - ASSERT(frame_code.is_identical_to(code)); + DCHECK(frame_code.is_identical_to(code)); #endif // Find the call address in the running code. - Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset; + Address addr = Assembler::break_address_from_return_address(frame->pc()); // Check if the location is at JS return. RelocIterator it(debug_info->code()); @@ -2432,9 +2431,9 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id, - FrameDropMode mode, + LiveEdit::FrameDropMode mode, Object** restarter_frame_function_pointer) { - if (mode != CURRENTLY_SET_MODE) { + if (mode != LiveEdit::CURRENTLY_SET_MODE) { thread_local_.frame_drop_mode_ = mode; } thread_local_.break_frame_id_ = new_break_frame_id; @@ -2443,94 +2442,32 @@ } -const int Debug::FramePaddingLayout::kInitialSize = 1; - - -// Any even value bigger than kInitialSize as needed for stack scanning. -const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1; - - bool Debug::IsDebugGlobal(GlobalObject* global) { - return IsLoaded() && global == debug_context()->global_object(); + return is_loaded() && global == debug_context()->global_object(); } void Debug::ClearMirrorCache() { PostponeInterruptsScope postpone(isolate_); HandleScope scope(isolate_); - ASSERT(isolate_->context() == *Debug::debug_context()); - - // Clear the mirror cache. - Handle<String> function_name = isolate_->factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("ClearMirrorCache")); - Handle<Object> fun( - isolate_->global_object()->GetPropertyNoExceptionThrown(*function_name), - isolate_); - ASSERT(fun->IsJSFunction()); - bool caught_exception; - Execution::TryCall(Handle<JSFunction>::cast(fun), - Handle<JSObject>(Debug::debug_context()->global_object()), - 0, NULL, &caught_exception); -} - - -void Debug::CreateScriptCache() { - Heap* heap = isolate_->heap(); - HandleScope scope(isolate_); - - // Perform two GCs to get rid of all unreferenced scripts. The first GC gets - // rid of all the cached script wrappers and the second gets rid of the - // scripts which are no longer referenced. The second also sweeps precisely, - // which saves us doing yet another GC to make the heap iterable. - heap->CollectAllGarbage(Heap::kNoGCFlags, "Debug::CreateScriptCache"); - heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, - "Debug::CreateScriptCache"); - - ASSERT(script_cache_ == NULL); - script_cache_ = new ScriptCache(isolate_); - - // Scan heap for Script objects. - int count = 0; - HeapIterator iterator(heap); - DisallowHeapAllocation no_allocation; - - for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { - if (obj->IsScript() && Script::cast(obj)->HasValidSource()) { - script_cache_->Add(Handle<Script>(Script::cast(obj))); - count++; - } - } -} - - -void Debug::DestroyScriptCache() { - // Get rid of the script cache if it was created. - if (script_cache_ != NULL) { - delete script_cache_; - script_cache_ = NULL; - } -} - - -void Debug::AddScriptToScriptCache(Handle<Script> script) { - if (script_cache_ != NULL) { - script_cache_->Add(script); - } + AssertDebugContext(); + Factory* factory = isolate_->factory(); + Handle<GlobalObject> global(isolate_->global_object()); + JSObject::SetProperty(global, + factory->NewStringFromAsciiChecked("next_handle_"), + handle(Smi::FromInt(0), isolate_), + SLOPPY).Check(); + JSObject::SetProperty(global, + factory->NewStringFromAsciiChecked("mirror_cache_"), + factory->NewJSArray(0, FAST_ELEMENTS), + SLOPPY).Check(); } Handle<FixedArray> Debug::GetLoadedScripts() { // Create and fill the script cache when the loaded scripts is requested for // the first time. - if (script_cache_ == NULL) { - CreateScriptCache(); - } - - // If the script cache is not active just return an empty array. - ASSERT(script_cache_ != NULL); - if (script_cache_ == NULL) { - isolate_->factory()->NewFixedArray(0); - } + if (script_cache_ == NULL) script_cache_ = new ScriptCache(isolate_); // Perform GC to get unreferenced scripts evicted from the cache before // returning the content. @@ -2557,184 +2494,115 @@ } -void Debug::AfterGarbageCollection() { - // Generate events for collected scripts. - if (script_cache_ != NULL) { - script_cache_->ProcessCollectedScripts(); - } -} - - -Debugger::Debugger(Isolate* isolate) - : debugger_access_(isolate->debugger_access()), - event_listener_(Handle<Object>()), - event_listener_data_(Handle<Object>()), - compiling_natives_(false), - is_loading_debugger_(false), - live_edit_enabled_(true), - never_unload_debugger_(false), - force_debugger_active_(false), - message_handler_(NULL), - debugger_unload_pending_(false), - host_dispatch_handler_(NULL), - debug_message_dispatch_handler_(NULL), - message_dispatch_helper_thread_(NULL), - host_dispatch_period_(TimeDelta::FromMilliseconds(100)), - agent_(NULL), - command_queue_(isolate->logger(), kQueueInitialSize), - command_received_(0), - event_command_queue_(isolate->logger(), kQueueInitialSize), - isolate_(isolate) { -} - - -Debugger::~Debugger() {} - - -Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name, - int argc, - Handle<Object> argv[], - bool* caught_exception) { - ASSERT(isolate_->context() == *isolate_->debug()->debug_context()); - +MaybeHandle<Object> Debug::MakeJSObject(const char* constructor_name, + int argc, + Handle<Object> argv[]) { + AssertDebugContext(); // Create the execution state object. - Handle<String> constructor_str = - isolate_->factory()->InternalizeUtf8String(constructor_name); - ASSERT(!constructor_str.is_null()); - Handle<Object> constructor( - isolate_->global_object()->GetPropertyNoExceptionThrown(*constructor_str), - isolate_); - ASSERT(constructor->IsJSFunction()); - if (!constructor->IsJSFunction()) { - *caught_exception = true; - return isolate_->factory()->undefined_value(); - } - Handle<Object> js_object = Execution::TryCall( - Handle<JSFunction>::cast(constructor), - Handle<JSObject>(isolate_->debug()->debug_context()->global_object()), - argc, - argv, - caught_exception); - return js_object; + Handle<GlobalObject> global(isolate_->global_object()); + Handle<Object> constructor = Object::GetProperty( + isolate_, global, constructor_name).ToHandleChecked(); + DCHECK(constructor->IsJSFunction()); + if (!constructor->IsJSFunction()) return MaybeHandle<Object>(); + // We do not handle interrupts here. In particular, termination interrupts. + PostponeInterruptsScope no_interrupts(isolate_); + return Execution::TryCall(Handle<JSFunction>::cast(constructor), + handle(debug_context()->global_proxy()), + argc, + argv); } -Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) { +MaybeHandle<Object> Debug::MakeExecutionState() { // Create the execution state object. - Handle<Object> break_id = isolate_->factory()->NewNumberFromInt( - isolate_->debug()->break_id()); - Handle<Object> argv[] = { break_id }; - return MakeJSObject(CStrVector("MakeExecutionState"), - ARRAY_SIZE(argv), - argv, - caught_exception); + Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()) }; + return MakeJSObject("MakeExecutionState", ARRAY_SIZE(argv), argv); } -Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state, - Handle<Object> break_points_hit, - bool* caught_exception) { +MaybeHandle<Object> Debug::MakeBreakEvent(Handle<Object> break_points_hit) { // Create the new break event object. - Handle<Object> argv[] = { exec_state, break_points_hit }; - return MakeJSObject(CStrVector("MakeBreakEvent"), - ARRAY_SIZE(argv), - argv, - caught_exception); + Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()), + break_points_hit }; + return MakeJSObject("MakeBreakEvent", ARRAY_SIZE(argv), argv); } -Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state, - Handle<Object> exception, - bool uncaught, - bool* caught_exception) { - Factory* factory = isolate_->factory(); +MaybeHandle<Object> Debug::MakeExceptionEvent(Handle<Object> exception, + bool uncaught, + Handle<Object> promise) { // Create the new exception event object. - Handle<Object> argv[] = { exec_state, + Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()), exception, - factory->ToBoolean(uncaught) }; - return MakeJSObject(CStrVector("MakeExceptionEvent"), - ARRAY_SIZE(argv), - argv, - caught_exception); + isolate_->factory()->ToBoolean(uncaught), + promise }; + return MakeJSObject("MakeExceptionEvent", ARRAY_SIZE(argv), argv); } -Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function, - bool* caught_exception) { - // Create the new function event object. - Handle<Object> argv[] = { function }; - return MakeJSObject(CStrVector("MakeNewFunctionEvent"), - ARRAY_SIZE(argv), - argv, - caught_exception); +MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script, + v8::DebugEvent type) { + // Create the compile event object. + Handle<Object> script_wrapper = Script::GetWrapper(script); + Handle<Object> argv[] = { script_wrapper, + isolate_->factory()->NewNumberFromInt(type) }; + return MakeJSObject("MakeCompileEvent", ARRAY_SIZE(argv), argv); } -Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script, - bool before, - bool* caught_exception) { - Factory* factory = isolate_->factory(); - // Create the compile event object. - Handle<Object> exec_state = MakeExecutionState(caught_exception); - Handle<Object> script_wrapper = GetScriptWrapper(script); - Handle<Object> argv[] = { exec_state, - script_wrapper, - factory->ToBoolean(before) }; - return MakeJSObject(CStrVector("MakeCompileEvent"), - ARRAY_SIZE(argv), - argv, - caught_exception); +MaybeHandle<Object> Debug::MakePromiseEvent(Handle<JSObject> event_data) { + // Create the promise event object. + Handle<Object> argv[] = { event_data }; + return MakeJSObject("MakePromiseEvent", ARRAY_SIZE(argv), argv); } -Handle<Object> Debugger::MakeScriptCollectedEvent(int id, - bool* caught_exception) { - // Create the script collected event object. - Handle<Object> exec_state = MakeExecutionState(caught_exception); - Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id), isolate_); - Handle<Object> argv[] = { exec_state, id_object }; +MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) { + // Create the async task event object. + Handle<Object> argv[] = { task_event }; + return MakeJSObject("MakeAsyncTaskEvent", ARRAY_SIZE(argv), argv); +} + - return MakeJSObject(CStrVector("MakeScriptCollectedEvent"), - ARRAY_SIZE(argv), - argv, - caught_exception); +void Debug::OnThrow(Handle<Object> exception, bool uncaught) { + if (in_debug_scope() || ignore_events()) return; + HandleScope scope(isolate_); + OnException(exception, uncaught, GetPromiseOnStackOnThrow()); } -void Debugger::OnException(Handle<Object> exception, bool uncaught) { +void Debug::OnPromiseReject(Handle<JSObject> promise, Handle<Object> value) { + if (in_debug_scope() || ignore_events()) return; HandleScope scope(isolate_); - Debug* debug = isolate_->debug(); + OnException(value, false, promise); +} - // Bail out based on state or if there is no listener for this event - if (debug->InDebugger()) return; - if (!Debugger::EventActive(v8::Exception)) return; +void Debug::OnException(Handle<Object> exception, bool uncaught, + Handle<Object> promise) { + if (promise->IsJSObject()) { + uncaught |= !PromiseHasRejectHandler(Handle<JSObject>::cast(promise)); + } // Bail out if exception breaks are not active if (uncaught) { // Uncaught exceptions are reported by either flags. - if (!(debug->break_on_uncaught_exception() || - debug->break_on_exception())) return; + if (!(break_on_uncaught_exception_ || break_on_exception_)) return; } else { // Caught exceptions are reported is activated. - if (!debug->break_on_exception()) return; + if (!break_on_exception_) return; } - // Enter the debugger. - EnterDebugger debugger(isolate_); - if (debugger.FailedToEnter()) return; + DebugScope debug_scope(this); + if (debug_scope.failed()) return; // Clear all current stepping setup. - debug->ClearStepping(); + ClearStepping(); + // Create the event data object. - bool caught_exception = false; - Handle<Object> exec_state = MakeExecutionState(&caught_exception); Handle<Object> event_data; - if (!caught_exception) { - event_data = MakeExceptionEvent(exec_state, exception, uncaught, - &caught_exception); - } // Bail out and don't call debugger if exception. - if (caught_exception) { + if (!MakeExceptionEvent( + exception, uncaught, promise).ToHandle(&event_data)) { return; } @@ -2744,31 +2612,36 @@ } -void Debugger::OnDebugBreak(Handle<Object> break_points_hit, - bool auto_continue) { +void Debug::OnCompileError(Handle<Script> script) { + // No more to do if not debugging. + if (in_debug_scope() || ignore_events()) return; + HandleScope scope(isolate_); + DebugScope debug_scope(this); + if (debug_scope.failed()) return; - // Debugger has already been entered by caller. - ASSERT(isolate_->context() == *isolate_->debug()->debug_context()); + // Create the compile state object. + Handle<Object> event_data; + // Bail out and don't call debugger if exception. + if (!MakeCompileEvent(script, v8::CompileError).ToHandle(&event_data)) return; + + // Process debug event. + ProcessDebugEvent(v8::CompileError, Handle<JSObject>::cast(event_data), true); +} - // Bail out if there is no listener for this event - if (!Debugger::EventActive(v8::Break)) return; - // Debugger must be entered in advance. - ASSERT(isolate_->context() == *isolate_->debug()->debug_context()); +void Debug::OnDebugBreak(Handle<Object> break_points_hit, + bool auto_continue) { + // The caller provided for DebugScope. + AssertDebugContext(); + // Bail out if there is no listener for this event + if (ignore_events()) return; + HandleScope scope(isolate_); // Create the event data object. - bool caught_exception = false; - Handle<Object> exec_state = MakeExecutionState(&caught_exception); Handle<Object> event_data; - if (!caught_exception) { - event_data = MakeBreakEvent(exec_state, break_points_hit, - &caught_exception); - } // Bail out and don't call debugger if exception. - if (caught_exception) { - return; - } + if (!MakeBreakEvent(break_points_hit).ToHandle(&event_data)) return; // Process debug event. ProcessDebugEvent(v8::Break, @@ -2777,25 +2650,18 @@ } -void Debugger::OnBeforeCompile(Handle<Script> script) { - HandleScope scope(isolate_); +void Debug::OnBeforeCompile(Handle<Script> script) { + if (in_debug_scope() || ignore_events()) return; - // Bail out based on state or if there is no listener for this event - if (isolate_->debug()->InDebugger()) return; - if (compiling_natives()) return; - if (!EventActive(v8::BeforeCompile)) return; - - // Enter the debugger. - EnterDebugger debugger(isolate_); - if (debugger.FailedToEnter()) return; + HandleScope scope(isolate_); + DebugScope debug_scope(this); + if (debug_scope.failed()) return; // Create the event data object. - bool caught_exception = false; - Handle<Object> event_data = MakeCompileEvent(script, true, &caught_exception); + Handle<Object> event_data; // Bail out and don't call debugger if exception. - if (caught_exception) { + if (!MakeCompileEvent(script, v8::BeforeCompile).ToHandle(&event_data)) return; - } // Process debug event. ProcessDebugEvent(v8::BeforeCompile, @@ -2805,26 +2671,16 @@ // Handle debugger actions when a new script is compiled. -void Debugger::OnAfterCompile(Handle<Script> script, - AfterCompileFlags after_compile_flags) { - HandleScope scope(isolate_); - Debug* debug = isolate_->debug(); - +void Debug::OnAfterCompile(Handle<Script> script) { // Add the newly compiled script to the script cache. - debug->AddScriptToScriptCache(script); + if (script_cache_ != NULL) script_cache_->Add(script); // No more to do if not debugging. - if (!IsDebuggerActive()) return; - - // No compile events while compiling natives. - if (compiling_natives()) return; - - // Store whether in debugger before entering debugger. - bool in_debugger = debug->InDebugger(); + if (in_debug_scope() || ignore_events()) return; - // Enter the debugger. - EnterDebugger debugger(isolate_); - if (debugger.FailedToEnter()) return; + HandleScope scope(isolate_); + DebugScope debug_scope(this); + if (debug_scope.failed()) return; // If debugging there might be script break points registered for this // script. Make sure that these break points are set. @@ -2833,94 +2689,86 @@ Handle<String> update_script_break_points_string = isolate_->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("UpdateScriptBreakPoints")); + Handle<GlobalObject> debug_global(debug_context()->global_object()); Handle<Object> update_script_break_points = - Handle<Object>( - debug->debug_context()->global_object()->GetPropertyNoExceptionThrown( - *update_script_break_points_string), - isolate_); + Object::GetProperty( + debug_global, update_script_break_points_string).ToHandleChecked(); if (!update_script_break_points->IsJSFunction()) { return; } - ASSERT(update_script_break_points->IsJSFunction()); + DCHECK(update_script_break_points->IsJSFunction()); // Wrap the script object in a proper JS object before passing it // to JavaScript. - Handle<JSValue> wrapper = GetScriptWrapper(script); + Handle<Object> wrapper = Script::GetWrapper(script); // Call UpdateScriptBreakPoints expect no exceptions. - bool caught_exception; Handle<Object> argv[] = { wrapper }; - Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points), - isolate_->js_builtins_object(), - ARRAY_SIZE(argv), - argv, - &caught_exception); - if (caught_exception) { + if (Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points), + isolate_->js_builtins_object(), + ARRAY_SIZE(argv), + argv).is_null()) { return; } - // Bail out based on state or if there is no listener for this event - if (in_debugger && (after_compile_flags & SEND_WHEN_DEBUGGING) == 0) return; - if (!Debugger::EventActive(v8::AfterCompile)) return; // Create the compile state object. - Handle<Object> event_data = MakeCompileEvent(script, - false, - &caught_exception); + Handle<Object> event_data; // Bail out and don't call debugger if exception. - if (caught_exception) { - return; - } + if (!MakeCompileEvent(script, v8::AfterCompile).ToHandle(&event_data)) return; + // Process debug event. - ProcessDebugEvent(v8::AfterCompile, - Handle<JSObject>::cast(event_data), - true); + ProcessDebugEvent(v8::AfterCompile, Handle<JSObject>::cast(event_data), true); } -void Debugger::OnScriptCollected(int id) { +void Debug::OnPromiseEvent(Handle<JSObject> data) { + if (in_debug_scope() || ignore_events()) return; + HandleScope scope(isolate_); + DebugScope debug_scope(this); + if (debug_scope.failed()) return; - // No more to do if not debugging. - if (isolate_->debug()->InDebugger()) return; - if (!IsDebuggerActive()) return; - if (!Debugger::EventActive(v8::ScriptCollected)) return; + // Create the script collected state object. + Handle<Object> event_data; + // Bail out and don't call debugger if exception. + if (!MakePromiseEvent(data).ToHandle(&event_data)) return; - // Enter the debugger. - EnterDebugger debugger(isolate_); - if (debugger.FailedToEnter()) return; + // Process debug event. + ProcessDebugEvent(v8::PromiseEvent, + Handle<JSObject>::cast(event_data), + true); +} + + +void Debug::OnAsyncTaskEvent(Handle<JSObject> data) { + if (in_debug_scope() || ignore_events()) return; + + HandleScope scope(isolate_); + DebugScope debug_scope(this); + if (debug_scope.failed()) return; // Create the script collected state object. - bool caught_exception = false; - Handle<Object> event_data = MakeScriptCollectedEvent(id, - &caught_exception); + Handle<Object> event_data; // Bail out and don't call debugger if exception. - if (caught_exception) { - return; - } + if (!MakeAsyncTaskEvent(data).ToHandle(&event_data)) return; // Process debug event. - ProcessDebugEvent(v8::ScriptCollected, + ProcessDebugEvent(v8::AsyncTaskEvent, Handle<JSObject>::cast(event_data), true); } -void Debugger::ProcessDebugEvent(v8::DebugEvent event, - Handle<JSObject> event_data, - bool auto_continue) { +void Debug::ProcessDebugEvent(v8::DebugEvent event, + Handle<JSObject> event_data, + bool auto_continue) { HandleScope scope(isolate_); - // Clear any pending debug break if this is a real break. - if (!auto_continue) { - isolate_->debug()->clear_interrupt_pending(DEBUGBREAK); - } - // Create the execution state. - bool caught_exception = false; - Handle<Object> exec_state = MakeExecutionState(&caught_exception); - if (caught_exception) { - return; - } + Handle<Object> exec_state; + // Bail out and don't call debugger if exception. + if (!MakeExecutionState().ToHandle(&exec_state)) return; + // First notify the message handler if any. if (message_handler_ != NULL) { NotifyMessageHandler(event, @@ -2950,89 +2798,52 @@ } -void Debugger::CallEventCallback(v8::DebugEvent event, - Handle<Object> exec_state, - Handle<Object> event_data, - v8::Debug::ClientData* client_data) { +void Debug::CallEventCallback(v8::DebugEvent event, + Handle<Object> exec_state, + Handle<Object> event_data, + v8::Debug::ClientData* client_data) { if (event_listener_->IsForeign()) { - CallCEventCallback(event, exec_state, event_data, client_data); + // Invoke the C debug event listener. + v8::Debug::EventCallback callback = + FUNCTION_CAST<v8::Debug::EventCallback>( + Handle<Foreign>::cast(event_listener_)->foreign_address()); + EventDetailsImpl event_details(event, + Handle<JSObject>::cast(exec_state), + Handle<JSObject>::cast(event_data), + event_listener_data_, + client_data); + callback(event_details); + DCHECK(!isolate_->has_scheduled_exception()); } else { - CallJSEventCallback(event, exec_state, event_data); + // Invoke the JavaScript debug event listener. + DCHECK(event_listener_->IsJSFunction()); + Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_), + exec_state, + event_data, + event_listener_data_ }; + Handle<JSReceiver> global(isolate_->global_proxy()); + Execution::TryCall(Handle<JSFunction>::cast(event_listener_), + global, ARRAY_SIZE(argv), argv); } } -void Debugger::CallCEventCallback(v8::DebugEvent event, - Handle<Object> exec_state, - Handle<Object> event_data, - v8::Debug::ClientData* client_data) { - Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_)); - v8::Debug::EventCallback2 callback = - FUNCTION_CAST<v8::Debug::EventCallback2>( - callback_obj->foreign_address()); - EventDetailsImpl event_details( - event, - Handle<JSObject>::cast(exec_state), - Handle<JSObject>::cast(event_data), - event_listener_data_, - client_data); - callback(event_details); -} - - -void Debugger::CallJSEventCallback(v8::DebugEvent event, - Handle<Object> exec_state, - Handle<Object> event_data) { - ASSERT(event_listener_->IsJSFunction()); - Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_)); - - // Invoke the JavaScript debug event listener. - Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_), - exec_state, - event_data, - event_listener_data_ }; - bool caught_exception; - Execution::TryCall(fun, - isolate_->global_object(), - ARRAY_SIZE(argv), - argv, - &caught_exception); - // Silently ignore exceptions from debug event listeners. -} - - -Handle<Context> Debugger::GetDebugContext() { - never_unload_debugger_ = true; - EnterDebugger debugger(isolate_); - return isolate_->debug()->debug_context(); -} - - -void Debugger::UnloadDebugger() { - Debug* debug = isolate_->debug(); - - // Make sure that there are no breakpoints left. - debug->ClearAllBreakPoints(); - - // Unload the debugger if feasible. - if (!never_unload_debugger_) { - debug->Unload(); - } - - // Clear the flag indicating that the debugger should be unloaded. - debugger_unload_pending_ = false; +Handle<Context> Debug::GetDebugContext() { + DebugScope debug_scope(this); + // The global handle may be destroyed soon after. Return it reboxed. + return handle(*debug_context(), isolate_); } -void Debugger::NotifyMessageHandler(v8::DebugEvent event, - Handle<JSObject> exec_state, - Handle<JSObject> event_data, - bool auto_continue) { - v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_); +void Debug::NotifyMessageHandler(v8::DebugEvent event, + Handle<JSObject> exec_state, + Handle<JSObject> event_data, + bool auto_continue) { + // Prevent other interrupts from triggering, for example API callbacks, + // while dispatching message handler callbacks. + PostponeInterruptsScope no_interrupts(isolate_); + DCHECK(is_active_); HandleScope scope(isolate_); - - if (!isolate_->debug()->Load()) return; - // Process the individual events. bool sendEventMessage = false; switch (event) { @@ -3048,9 +2859,6 @@ case v8::AfterCompile: sendEventMessage = true; break; - case v8::ScriptCollected: - sendEventMessage = true; - break; case v8::NewFunction: break; default: @@ -3060,8 +2868,8 @@ // The debug command interrupt flag might have been set when the command was // added. It should be enough to clear the flag only once while we are in the // debugger. - ASSERT(isolate_->debug()->InDebugger()); - isolate_->stack_guard()->Continue(DEBUGCOMMAND); + DCHECK(in_debug_scope()); + isolate_->stack_guard()->ClearDebugCommand(); // Notify the debugger that a debug event has occurred unless auto continue is // active in which case no event is send. @@ -3078,216 +2886,135 @@ // in the queue if any. For script collected events don't even process // messages in the queue as the execution state might not be what is expected // by the client. - if ((auto_continue && !HasCommands()) || event == v8::ScriptCollected) { - return; - } - - v8::TryCatch try_catch; + if (auto_continue && !has_commands()) return; // DebugCommandProcessor goes here. - v8::Local<v8::Object> cmd_processor; - { - v8::Local<v8::Object> api_exec_state = - v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)); - v8::Local<v8::String> fun_name = v8::String::NewFromUtf8( - isolate, "debugCommandProcessor"); - v8::Local<v8::Function> fun = - v8::Local<v8::Function>::Cast(api_exec_state->Get(fun_name)); - - v8::Handle<v8::Boolean> running = v8::Boolean::New(isolate, auto_continue); - static const int kArgc = 1; - v8::Handle<Value> argv[kArgc] = { running }; - cmd_processor = v8::Local<v8::Object>::Cast( - fun->Call(api_exec_state, kArgc, argv)); - if (try_catch.HasCaught()) { - PrintLn(try_catch.Exception()); - return; - } - } - bool running = auto_continue; + Handle<Object> cmd_processor_ctor = Object::GetProperty( + isolate_, exec_state, "debugCommandProcessor").ToHandleChecked(); + Handle<Object> ctor_args[] = { isolate_->factory()->ToBoolean(running) }; + Handle<Object> cmd_processor = Execution::Call( + isolate_, cmd_processor_ctor, exec_state, 1, ctor_args).ToHandleChecked(); + Handle<JSFunction> process_debug_request = Handle<JSFunction>::cast( + Object::GetProperty( + isolate_, cmd_processor, "processDebugRequest").ToHandleChecked()); + Handle<Object> is_running = Object::GetProperty( + isolate_, cmd_processor, "isRunning").ToHandleChecked(); + // Process requests from the debugger. - while (true) { + do { // Wait for new command in the queue. - if (Debugger::host_dispatch_handler_) { - // In case there is a host dispatch - do periodic dispatches. - if (!command_received_.WaitFor(host_dispatch_period_)) { - // Timout expired, do the dispatch. - Debugger::host_dispatch_handler_(); - continue; - } - } else { - // In case there is no host dispatch - just wait. - command_received_.Wait(); - } + command_received_.Wait(); // Get the command from the queue. CommandMessage command = command_queue_.Get(); isolate_->logger()->DebugTag( "Got request from command queue, in interactive loop."); - if (!Debugger::IsDebuggerActive()) { + if (!is_active()) { // Delete command text and user data. command.Dispose(); return; } - // Invoke JavaScript to process the debug request. - v8::Local<v8::String> fun_name; - v8::Local<v8::Function> fun; - v8::Local<v8::Value> request; - v8::TryCatch try_catch; - fun_name = v8::String::NewFromUtf8(isolate, "processDebugRequest"); - fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name)); - - request = v8::String::NewFromTwoByte(isolate, command.text().start(), - v8::String::kNormalString, - command.text().length()); - static const int kArgc = 1; - v8::Handle<Value> argv[kArgc] = { request }; - v8::Local<v8::Value> response_val = fun->Call(cmd_processor, kArgc, argv); - - // Get the response. - v8::Local<v8::String> response; - if (!try_catch.HasCaught()) { - // Get response string. - if (!response_val->IsUndefined()) { - response = v8::Local<v8::String>::Cast(response_val); + Vector<const uc16> command_text( + const_cast<const uc16*>(command.text().start()), + command.text().length()); + Handle<String> request_text = isolate_->factory()->NewStringFromTwoByte( + command_text).ToHandleChecked(); + Handle<Object> request_args[] = { request_text }; + Handle<Object> exception; + Handle<Object> answer_value; + Handle<String> answer; + MaybeHandle<Object> maybe_result = Execution::TryCall( + process_debug_request, cmd_processor, 1, request_args, &exception); + + if (maybe_result.ToHandle(&answer_value)) { + if (answer_value->IsUndefined()) { + answer = isolate_->factory()->empty_string(); } else { - response = v8::String::NewFromUtf8(isolate, ""); + answer = Handle<String>::cast(answer_value); } // Log the JSON request/response. if (FLAG_trace_debug_json) { - PrintLn(request); - PrintLn(response); + PrintF("%s\n", request_text->ToCString().get()); + PrintF("%s\n", answer->ToCString().get()); } - // Get the running state. - fun_name = v8::String::NewFromUtf8(isolate, "isRunning"); - fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name)); - static const int kArgc = 1; - v8::Handle<Value> argv[kArgc] = { response }; - v8::Local<v8::Value> running_val = fun->Call(cmd_processor, kArgc, argv); - if (!try_catch.HasCaught()) { - running = running_val->ToBoolean()->Value(); - } + Handle<Object> is_running_args[] = { answer }; + maybe_result = Execution::Call( + isolate_, is_running, cmd_processor, 1, is_running_args); + running = maybe_result.ToHandleChecked()->IsTrue(); } else { - // In case of failure the result text is the exception text. - response = try_catch.Exception()->ToString(); + answer = Handle<String>::cast( + Execution::ToString(isolate_, exception).ToHandleChecked()); } // Return the result. MessageImpl message = MessageImpl::NewResponse( - event, - running, - Handle<JSObject>::cast(exec_state), - Handle<JSObject>::cast(event_data), - Handle<String>(Utils::OpenHandle(*response)), - command.client_data()); + event, running, exec_state, event_data, answer, command.client_data()); InvokeMessageHandler(message); command.Dispose(); // Return from debug event processing if either the VM is put into the // running state (through a continue command) or auto continue is active // and there are no more commands queued. - if (running && !HasCommands()) { - return; - } - } + } while (!running || has_commands()); } -void Debugger::SetEventListener(Handle<Object> callback, - Handle<Object> data) { - HandleScope scope(isolate_); +void Debug::SetEventListener(Handle<Object> callback, + Handle<Object> data) { GlobalHandles* global_handles = isolate_->global_handles(); - // Clear the global handles for the event listener and the event listener data - // object. - if (!event_listener_.is_null()) { - GlobalHandles::Destroy( - reinterpret_cast<Object**>(event_listener_.location())); - event_listener_ = Handle<Object>(); - } - if (!event_listener_data_.is_null()) { - GlobalHandles::Destroy( - reinterpret_cast<Object**>(event_listener_data_.location())); - event_listener_data_ = Handle<Object>(); - } + // Remove existing entry. + GlobalHandles::Destroy(event_listener_.location()); + event_listener_ = Handle<Object>(); + GlobalHandles::Destroy(event_listener_data_.location()); + event_listener_data_ = Handle<Object>(); - // If there is a new debug event listener register it together with its data - // object. + // Set new entry. if (!callback->IsUndefined() && !callback->IsNull()) { - event_listener_ = Handle<Object>::cast( - global_handles->Create(*callback)); - if (data.is_null()) { - data = isolate_->factory()->undefined_value(); - } - event_listener_data_ = Handle<Object>::cast( - global_handles->Create(*data)); + event_listener_ = global_handles->Create(*callback); + if (data.is_null()) data = isolate_->factory()->undefined_value(); + event_listener_data_ = global_handles->Create(*data); } - ListenersChanged(); + UpdateState(); } -void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) { - LockGuard<RecursiveMutex> with(debugger_access_); - +void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) { message_handler_ = handler; - ListenersChanged(); - if (handler == NULL) { + UpdateState(); + if (handler == NULL && in_debug_scope()) { // Send an empty command to the debugger if in a break to make JavaScript // run again if the debugger is closed. - if (isolate_->debug()->InDebugger()) { - ProcessCommand(Vector<const uint16_t>::empty()); - } + EnqueueCommandMessage(Vector<const uint16_t>::empty()); } } -void Debugger::ListenersChanged() { - if (IsDebuggerActive()) { - // Disable the compilation cache when the debugger is active. + +void Debug::UpdateState() { + is_active_ = message_handler_ != NULL || !event_listener_.is_null(); + if (is_active_ || in_debug_scope()) { + // Note that the debug context could have already been loaded to + // bootstrap test cases. isolate_->compilation_cache()->Disable(); - debugger_unload_pending_ = false; - } else { + is_active_ = Load(); + } else if (is_loaded()) { isolate_->compilation_cache()->Enable(); - // Unload the debugger if event listener and message handler cleared. - // Schedule this for later, because we may be in non-V8 thread. - debugger_unload_pending_ = true; - } -} - - -void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler, - TimeDelta period) { - host_dispatch_handler_ = handler; - host_dispatch_period_ = period; -} - - -void Debugger::SetDebugMessageDispatchHandler( - v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) { - LockGuard<Mutex> lock_guard(&dispatch_handler_access_); - debug_message_dispatch_handler_ = handler; - - if (provide_locker && message_dispatch_helper_thread_ == NULL) { - message_dispatch_helper_thread_ = new MessageDispatchHelperThread(isolate_); - message_dispatch_helper_thread_->Start(); + Unload(); } } // Calls the registered debug message handler. This callback is part of the // public API. -void Debugger::InvokeMessageHandler(MessageImpl message) { - LockGuard<RecursiveMutex> with(debugger_access_); - - if (message_handler_ != NULL) { - message_handler_(message); - } +void Debug::InvokeMessageHandler(MessageImpl message) { + if (message_handler_ != NULL) message_handler_(message); } @@ -3295,8 +3022,8 @@ // a copy of the command string managed by the debugger. Up to this // point, the command data was managed by the API client. Called // by the API client thread. -void Debugger::ProcessCommand(Vector<const uint16_t> command, - v8::Debug::ClientData* client_data) { +void Debug::EnqueueCommandMessage(Vector<const uint16_t> command, + v8::Debug::ClientData* client_data) { // Need to cast away const. CommandMessage message = CommandMessage::New( Vector<uint16_t>(const_cast<uint16_t*>(command.start()), @@ -3307,219 +3034,139 @@ command_received_.Signal(); // Set the debug command break flag to have the command processed. - if (!isolate_->debug()->InDebugger()) { - isolate_->stack_guard()->DebugCommand(); - } - - MessageDispatchHelperThread* dispatch_thread; - { - LockGuard<Mutex> lock_guard(&dispatch_handler_access_); - dispatch_thread = message_dispatch_helper_thread_; - } - - if (dispatch_thread == NULL) { - CallMessageDispatchHandler(); - } else { - dispatch_thread->Schedule(); - } -} - - -bool Debugger::HasCommands() { - return !command_queue_.IsEmpty(); + if (!in_debug_scope()) isolate_->stack_guard()->RequestDebugCommand(); } -void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) { +void Debug::EnqueueDebugCommand(v8::Debug::ClientData* client_data) { CommandMessage message = CommandMessage::New(Vector<uint16_t>(), client_data); event_command_queue_.Put(message); // Set the debug command break flag to have the command processed. - if (!isolate_->debug()->InDebugger()) { - isolate_->stack_guard()->DebugCommand(); - } + if (!in_debug_scope()) isolate_->stack_guard()->RequestDebugCommand(); } -bool Debugger::IsDebuggerActive() { - LockGuard<RecursiveMutex> with(debugger_access_); - - return message_handler_ != NULL || - !event_listener_.is_null() || - force_debugger_active_; -} - - -Handle<Object> Debugger::Call(Handle<JSFunction> fun, - Handle<Object> data, - bool* pending_exception) { - // When calling functions in the debugger prevent it from beeing unloaded. - Debugger::never_unload_debugger_ = true; - - // Enter the debugger. - EnterDebugger debugger(isolate_); - if (debugger.FailedToEnter()) { - return isolate_->factory()->undefined_value(); - } +MaybeHandle<Object> Debug::Call(Handle<JSFunction> fun, Handle<Object> data) { + DebugScope debug_scope(this); + if (debug_scope.failed()) return isolate_->factory()->undefined_value(); // Create the execution state. - bool caught_exception = false; - Handle<Object> exec_state = MakeExecutionState(&caught_exception); - if (caught_exception) { + Handle<Object> exec_state; + if (!MakeExecutionState().ToHandle(&exec_state)) { return isolate_->factory()->undefined_value(); } Handle<Object> argv[] = { exec_state, data }; - Handle<Object> result = Execution::Call( + return Execution::Call( isolate_, fun, - Handle<Object>(isolate_->debug()->debug_context_->global_proxy(), - isolate_), + Handle<Object>(debug_context()->global_proxy(), isolate_), ARRAY_SIZE(argv), - argv, - pending_exception); - return result; + argv); } -static void StubMessageHandler2(const v8::Debug::Message& message) { - // Simply ignore message. -} +void Debug::HandleDebugBreak() { + // Ignore debug break during bootstrapping. + if (isolate_->bootstrapper()->IsActive()) return; + // Just continue if breaks are disabled. + if (break_disabled_) return; + // Ignore debug break if debugger is not active. + if (!is_active()) return; + StackLimitCheck check(isolate_); + if (check.HasOverflowed()) return; -bool Debugger::StartAgent(const char* name, int port, - bool wait_for_connection) { - if (wait_for_connection) { - // Suspend V8 if it is already running or set V8 to suspend whenever - // it starts. - // Provide stub message handler; V8 auto-continues each suspend - // when there is no message handler; we doesn't need it. - // Once become suspended, V8 will stay so indefinitely long, until remote - // debugger connects and issues "continue" command. - Debugger::message_handler_ = StubMessageHandler2; - v8::Debug::DebugBreak(); - } - - if (agent_ == NULL) { - agent_ = new DebuggerAgent(isolate_, name, port); - agent_->Start(); + { JavaScriptFrameIterator it(isolate_); + DCHECK(!it.done()); + Object* fun = it.frame()->function(); + if (fun && fun->IsJSFunction()) { + // Don't stop in builtin functions. + if (JSFunction::cast(fun)->IsBuiltin()) return; + GlobalObject* global = JSFunction::cast(fun)->context()->global_object(); + // Don't stop in debugger functions. + if (IsDebugGlobal(global)) return; + } } - return true; -} + // Collect the break state before clearing the flags. + bool debug_command_only = isolate_->stack_guard()->CheckDebugCommand() && + !isolate_->stack_guard()->CheckDebugBreak(); -void Debugger::StopAgent() { - if (agent_ != NULL) { - agent_->Shutdown(); - agent_->Join(); - delete agent_; - agent_ = NULL; - } + isolate_->stack_guard()->ClearDebugBreak(); + + ProcessDebugMessages(debug_command_only); } -void Debugger::WaitForAgent() { - if (agent_ != NULL) - agent_->WaitUntilListening(); -} +void Debug::ProcessDebugMessages(bool debug_command_only) { + isolate_->stack_guard()->ClearDebugCommand(); + StackLimitCheck check(isolate_); + if (check.HasOverflowed()) return; -void Debugger::CallMessageDispatchHandler() { - v8::Debug::DebugMessageDispatchHandler handler; - { - LockGuard<Mutex> lock_guard(&dispatch_handler_access_); - handler = Debugger::debug_message_dispatch_handler_; - } - if (handler != NULL) { - handler(); - } -} + HandleScope scope(isolate_); + DebugScope debug_scope(this); + if (debug_scope.failed()) return; + // Notify the debug event listeners. Indicate auto continue if the break was + // a debug command break. + OnDebugBreak(isolate_->factory()->undefined_value(), debug_command_only); +} -EnterDebugger::EnterDebugger(Isolate* isolate) - : isolate_(isolate), - prev_(isolate_->debug()->debugger_entry()), - it_(isolate_), - has_js_frames_(!it_.done()), - save_(isolate_) { - Debug* debug = isolate_->debug(); - ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT)); - ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK)); +DebugScope::DebugScope(Debug* debug) + : debug_(debug), + prev_(debug->debugger_entry()), + save_(debug_->isolate_), + no_termination_exceptons_(debug_->isolate_, + StackGuard::TERMINATE_EXECUTION) { // Link recursive debugger entry. - debug->set_debugger_entry(this); + debug_->thread_local_.current_debug_scope_ = this; // Store the previous break id and frame id. - break_id_ = debug->break_id(); - break_frame_id_ = debug->break_frame_id(); + break_id_ = debug_->break_id(); + break_frame_id_ = debug_->break_frame_id(); // Create the new break info. If there is no JavaScript frames there is no // break frame id. - if (has_js_frames_) { - debug->NewBreak(it_.frame()->id()); - } else { - debug->NewBreak(StackFrame::NO_ID); - } + JavaScriptFrameIterator it(isolate()); + bool has_js_frames = !it.done(); + debug_->thread_local_.break_frame_id_ = has_js_frames ? it.frame()->id() + : StackFrame::NO_ID; + debug_->SetNextBreakId(); + debug_->UpdateState(); // Make sure that debugger is loaded and enter the debugger context. - load_failed_ = !debug->Load(); - if (!load_failed_) { - // NOTE the member variable save which saves the previous context before - // this change. - isolate_->set_context(*debug->debug_context()); - } + // The previous context is kept in save_. + failed_ = !debug_->is_loaded(); + if (!failed_) isolate()->set_context(*debug->debug_context()); } -EnterDebugger::~EnterDebugger() { - Debug* debug = isolate_->debug(); - - // Restore to the previous break state. - debug->SetBreak(break_frame_id_, break_id_); - // Check for leaving the debugger. - if (!load_failed_ && prev_ == NULL) { +DebugScope::~DebugScope() { + if (!failed_ && prev_ == NULL) { // Clear mirror cache when leaving the debugger. Skip this if there is a // pending exception as clearing the mirror cache calls back into // JavaScript. This can happen if the v8::Debug::Call is used in which // case the exception should end up in the calling code. - if (!isolate_->has_pending_exception()) { - // Try to avoid any pending debug break breaking in the clear mirror - // cache JavaScript code. - if (isolate_->stack_guard()->IsDebugBreak()) { - debug->set_interrupts_pending(DEBUGBREAK); - isolate_->stack_guard()->Continue(DEBUGBREAK); - } - debug->ClearMirrorCache(); - } - - // Request preemption and debug break when leaving the last debugger entry - // if any of these where recorded while debugging. - if (debug->is_interrupt_pending(PREEMPT)) { - // This re-scheduling of preemption is to avoid starvation in some - // debugging scenarios. - debug->clear_interrupt_pending(PREEMPT); - isolate_->stack_guard()->Preempt(); - } - if (debug->is_interrupt_pending(DEBUGBREAK)) { - debug->clear_interrupt_pending(DEBUGBREAK); - isolate_->stack_guard()->DebugBreak(); - } + if (!isolate()->has_pending_exception()) debug_->ClearMirrorCache(); // If there are commands in the queue when leaving the debugger request // that these commands are processed. - if (isolate_->debugger()->HasCommands()) { - isolate_->stack_guard()->DebugCommand(); - } - - // If leaving the debugger with the debugger no longer active unload it. - if (!isolate_->debugger()->IsDebuggerActive()) { - isolate_->debugger()->UnloadDebugger(); - } + if (debug_->has_commands()) isolate()->stack_guard()->RequestDebugCommand(); } // Leaving this debugger entry. - debug->set_debugger_entry(prev_); + debug_->thread_local_.current_debug_scope_ = prev_; + + // Restore to the previous break state. + debug_->thread_local_.break_frame_id_ = break_frame_id_; + debug_->thread_local_.break_id_ = break_id_; + + debug_->UpdateState(); } @@ -3597,20 +3244,21 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const { - v8::EscapableHandleScope scope( - reinterpret_cast<v8::Isolate*>(event_data_->GetIsolate())); + Isolate* isolate = event_data_->GetIsolate(); + v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate)); if (IsEvent()) { // Call toJSONProtocol on the debug event object. - Handle<Object> fun = GetProperty(event_data_, "toJSONProtocol"); + Handle<Object> fun = Object::GetProperty( + isolate, event_data_, "toJSONProtocol").ToHandleChecked(); if (!fun->IsJSFunction()) { return v8::Handle<v8::String>(); } - bool caught_exception; - Handle<Object> json = Execution::TryCall(Handle<JSFunction>::cast(fun), - event_data_, - 0, NULL, &caught_exception); - if (caught_exception || !json->IsString()) { + + MaybeHandle<Object> maybe_json = + Execution::TryCall(Handle<JSFunction>::cast(fun), event_data_, 0, NULL); + Handle<Object> json; + if (!maybe_json.ToHandle(&json) || !json->IsString()) { return v8::Handle<v8::String>(); } return scope.Escape(v8::Utils::ToLocal(Handle<String>::cast(json))); @@ -3624,7 +3272,7 @@ Isolate* isolate = event_data_->GetIsolate(); v8::Handle<v8::Context> context = GetDebugEventContext(isolate); // Isolate::context() may be NULL when "script collected" event occures. - ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected); + DCHECK(!context.IsEmpty()); return context; } @@ -3688,10 +3336,6 @@ } -CommandMessage::~CommandMessage() { -} - - void CommandMessage::Dispose() { text_.Dispose(); delete client_data_; @@ -3712,16 +3356,13 @@ CommandMessageQueue::~CommandMessageQueue() { - while (!IsEmpty()) { - CommandMessage m = Get(); - m.Dispose(); - } + while (!IsEmpty()) Get().Dispose(); DeleteArray(messages_); } CommandMessage CommandMessageQueue::Get() { - ASSERT(!IsEmpty()); + DCHECK(!IsEmpty()); int result = start_; start_ = (start_ + 1) % size_; return messages_[result]; @@ -3756,13 +3397,13 @@ bool LockingCommandMessageQueue::IsEmpty() const { - LockGuard<Mutex> lock_guard(&mutex_); + base::LockGuard<base::Mutex> lock_guard(&mutex_); return queue_.IsEmpty(); } CommandMessage LockingCommandMessageQueue::Get() { - LockGuard<Mutex> lock_guard(&mutex_); + base::LockGuard<base::Mutex> lock_guard(&mutex_); CommandMessage result = queue_.Get(); logger_->DebugEvent("Get", result.text()); return result; @@ -3770,51 +3411,15 @@ void LockingCommandMessageQueue::Put(const CommandMessage& message) { - LockGuard<Mutex> lock_guard(&mutex_); + base::LockGuard<base::Mutex> lock_guard(&mutex_); queue_.Put(message); logger_->DebugEvent("Put", message.text()); } void LockingCommandMessageQueue::Clear() { - LockGuard<Mutex> lock_guard(&mutex_); + base::LockGuard<base::Mutex> lock_guard(&mutex_); queue_.Clear(); } - -MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate) - : Thread("v8:MsgDispHelpr"), - isolate_(isolate), sem_(0), - already_signalled_(false) { -} - - -void MessageDispatchHelperThread::Schedule() { - { - LockGuard<Mutex> lock_guard(&mutex_); - if (already_signalled_) { - return; - } - already_signalled_ = true; - } - sem_.Signal(); -} - - -void MessageDispatchHelperThread::Run() { - while (true) { - sem_.Wait(); - { - LockGuard<Mutex> lock_guard(&mutex_); - already_signalled_ = false; - } - { - Locker locker(reinterpret_cast<v8::Isolate*>(isolate_)); - isolate_->debugger()->CallMessageDispatchHandler(); - } - } -} - -#endif // ENABLE_DEBUGGER_SUPPORT - } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/debug-debugger.js nodejs-0.11.15/deps/v8/src/debug-debugger.js --- nodejs-0.11.13/deps/v8/src/debug-debugger.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/debug-debugger.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Default number of frames to include in the response to backtrace request. var kDefaultBacktraceLength = 10; @@ -42,7 +19,9 @@ NewFunction: 3, BeforeCompile: 4, AfterCompile: 5, - ScriptCollected: 6 }; + CompileError: 6, + PromiseEvent: 7, + AsyncTaskEvent: 8 }; // Types of exceptions that can be broken upon. Debug.ExceptionBreak = { Caught : 0, @@ -451,7 +430,7 @@ if (IS_NULL(position)) return; // Create a break point object and set the break point. - break_point = MakeBreakPoint(position, this); + var break_point = MakeBreakPoint(position, this); break_point.setIgnoreCount(this.ignoreCount()); var actual_position = %SetScriptBreakPoint(script, position, this.position_alignment_, @@ -1009,44 +988,39 @@ }; -function MakeBreakEvent(exec_state, break_points_hit) { - return new BreakEvent(exec_state, break_points_hit); +function MakeBreakEvent(break_id, break_points_hit) { + return new BreakEvent(break_id, break_points_hit); } -function BreakEvent(exec_state, break_points_hit) { - this.exec_state_ = exec_state; +function BreakEvent(break_id, break_points_hit) { + this.frame_ = new FrameMirror(break_id, 0); this.break_points_hit_ = break_points_hit; } -BreakEvent.prototype.executionState = function() { - return this.exec_state_; -}; - - BreakEvent.prototype.eventType = function() { return Debug.DebugEvent.Break; }; BreakEvent.prototype.func = function() { - return this.exec_state_.frame(0).func(); + return this.frame_.func(); }; BreakEvent.prototype.sourceLine = function() { - return this.exec_state_.frame(0).sourceLine(); + return this.frame_.sourceLine(); }; BreakEvent.prototype.sourceColumn = function() { - return this.exec_state_.frame(0).sourceColumn(); + return this.frame_.sourceColumn(); }; BreakEvent.prototype.sourceLineText = function() { - return this.exec_state_.frame(0).sourceLineText(); + return this.frame_.sourceLineText(); }; @@ -1059,8 +1033,7 @@ var o = { seq: next_response_seq++, type: "event", event: "break", - body: { invocationText: this.exec_state_.frame(0).invocationText(), - } + body: { invocationText: this.frame_.invocationText() } }; // Add script related information to the event if available. @@ -1093,23 +1066,19 @@ }; -function MakeExceptionEvent(exec_state, exception, uncaught) { - return new ExceptionEvent(exec_state, exception, uncaught); +function MakeExceptionEvent(break_id, exception, uncaught, promise) { + return new ExceptionEvent(break_id, exception, uncaught, promise); } -function ExceptionEvent(exec_state, exception, uncaught) { - this.exec_state_ = exec_state; +function ExceptionEvent(break_id, exception, uncaught, promise) { + this.exec_state_ = new ExecutionState(break_id); this.exception_ = exception; this.uncaught_ = uncaught; + this.promise_ = promise; } -ExceptionEvent.prototype.executionState = function() { - return this.exec_state_; -}; - - ExceptionEvent.prototype.eventType = function() { return Debug.DebugEvent.Exception; }; @@ -1125,6 +1094,11 @@ }; +ExceptionEvent.prototype.promise = function() { + return this.promise_; +}; + + ExceptionEvent.prototype.func = function() { return this.exec_state_.frame(0).func(); }; @@ -1171,29 +1145,19 @@ }; -function MakeCompileEvent(exec_state, script, before) { - return new CompileEvent(exec_state, script, before); +function MakeCompileEvent(script, type) { + return new CompileEvent(script, type); } -function CompileEvent(exec_state, script, before) { - this.exec_state_ = exec_state; +function CompileEvent(script, type) { this.script_ = MakeMirror(script); - this.before_ = before; + this.type_ = type; } -CompileEvent.prototype.executionState = function() { - return this.exec_state_; -}; - - CompileEvent.prototype.eventType = function() { - if (this.before_) { - return Debug.DebugEvent.BeforeCompile; - } else { - return Debug.DebugEvent.AfterCompile; - } + return this.type_; }; @@ -1205,10 +1169,13 @@ CompileEvent.prototype.toJSONProtocol = function() { var o = new ProtocolMessage(); o.running = true; - if (this.before_) { - o.event = "beforeCompile"; - } else { - o.event = "afterCompile"; + switch (this.type_) { + case Debug.DebugEvent.BeforeCompile: + o.event = "beforeCompile"; + case Debug.DebugEvent.AfterCompile: + o.event = "afterCompile"; + case Debug.DebugEvent.CompileError: + o.event = "compileError"; } o.body = {}; o.body.script = this.script_; @@ -1217,76 +1184,80 @@ }; -function MakeNewFunctionEvent(func) { - return new NewFunctionEvent(func); +function MakeScriptObject_(script, include_source) { + var o = { id: script.id(), + name: script.name(), + lineOffset: script.lineOffset(), + columnOffset: script.columnOffset(), + lineCount: script.lineCount(), + }; + if (!IS_UNDEFINED(script.data())) { + o.data = script.data(); + } + if (include_source) { + o.source = script.source(); + } + return o; } -function NewFunctionEvent(func) { - this.func = func; +function MakePromiseEvent(event_data) { + return new PromiseEvent(event_data); } -NewFunctionEvent.prototype.eventType = function() { - return Debug.DebugEvent.NewFunction; -}; +function PromiseEvent(event_data) { + this.promise_ = event_data.promise; + this.parentPromise_ = event_data.parentPromise; + this.status_ = event_data.status; + this.value_ = event_data.value; +} -NewFunctionEvent.prototype.name = function() { - return this.func.name; -}; +PromiseEvent.prototype.promise = function() { + return MakeMirror(this.promise_); +} -NewFunctionEvent.prototype.setBreakPoint = function(p) { - Debug.setBreakPoint(this.func, p || 0); -}; +PromiseEvent.prototype.parentPromise = function() { + return MakeMirror(this.parentPromise_); +} -function MakeScriptCollectedEvent(exec_state, id) { - return new ScriptCollectedEvent(exec_state, id); +PromiseEvent.prototype.status = function() { + return this.status_; } -function ScriptCollectedEvent(exec_state, id) { - this.exec_state_ = exec_state; - this.id_ = id; +PromiseEvent.prototype.value = function() { + return MakeMirror(this.value_); } -ScriptCollectedEvent.prototype.id = function() { - return this.id_; -}; +function MakeAsyncTaskEvent(event_data) { + return new AsyncTaskEvent(event_data); +} -ScriptCollectedEvent.prototype.executionState = function() { - return this.exec_state_; -}; +function AsyncTaskEvent(event_data) { + this.type_ = event_data.type; + this.name_ = event_data.name; + this.id_ = event_data.id; +} -ScriptCollectedEvent.prototype.toJSONProtocol = function() { - var o = new ProtocolMessage(); - o.running = true; - o.event = "scriptCollected"; - o.body = {}; - o.body.script = { id: this.id() }; - return o.toJSONProtocol(); -}; +AsyncTaskEvent.prototype.type = function() { + return this.type_; +} -function MakeScriptObject_(script, include_source) { - var o = { id: script.id(), - name: script.name(), - lineOffset: script.lineOffset(), - columnOffset: script.columnOffset(), - lineCount: script.lineCount(), - }; - if (!IS_UNDEFINED(script.data())) { - o.data = script.data(); - } - if (include_source) { - o.source = script.source(); - } - return o; +AsyncTaskEvent.prototype.name = function() { + return this.name_; +} + + +AsyncTaskEvent.prototype.id = function() { + return this.id_; } @@ -1430,63 +1401,10 @@ } } - if (request.command == 'continue') { - this.continueRequest_(request, response); - } else if (request.command == 'break') { - this.breakRequest_(request, response); - } else if (request.command == 'setbreakpoint') { - this.setBreakPointRequest_(request, response); - } else if (request.command == 'changebreakpoint') { - this.changeBreakPointRequest_(request, response); - } else if (request.command == 'clearbreakpoint') { - this.clearBreakPointRequest_(request, response); - } else if (request.command == 'clearbreakpointgroup') { - this.clearBreakPointGroupRequest_(request, response); - } else if (request.command == 'disconnect') { - this.disconnectRequest_(request, response); - } else if (request.command == 'setexceptionbreak') { - this.setExceptionBreakRequest_(request, response); - } else if (request.command == 'listbreakpoints') { - this.listBreakpointsRequest_(request, response); - } else if (request.command == 'backtrace') { - this.backtraceRequest_(request, response); - } else if (request.command == 'frame') { - this.frameRequest_(request, response); - } else if (request.command == 'scopes') { - this.scopesRequest_(request, response); - } else if (request.command == 'scope') { - this.scopeRequest_(request, response); - } else if (request.command == 'setVariableValue') { - this.setVariableValueRequest_(request, response); - } else if (request.command == 'evaluate') { - this.evaluateRequest_(request, response); - } else if (request.command == 'lookup') { - this.lookupRequest_(request, response); - } else if (request.command == 'references') { - this.referencesRequest_(request, response); - } else if (request.command == 'source') { - this.sourceRequest_(request, response); - } else if (request.command == 'scripts') { - this.scriptsRequest_(request, response); - } else if (request.command == 'threads') { - this.threadsRequest_(request, response); - } else if (request.command == 'suspend') { - this.suspendRequest_(request, response); - } else if (request.command == 'version') { - this.versionRequest_(request, response); - } else if (request.command == 'changelive') { - this.changeLiveRequest_(request, response); - } else if (request.command == 'restartframe') { - this.restartFrameRequest_(request, response); - } else if (request.command == 'flags') { - this.debuggerFlagsRequest_(request, response); - } else if (request.command == 'v8flags') { - this.v8FlagsRequest_(request, response); - - // GC tools: - } else if (request.command == 'gc') { - this.gcRequest_(request, response); - + var key = request.command.toLowerCase(); + var handler = DebugCommandProcessor.prototype.dispatch_[key]; + if (IS_FUNCTION(handler)) { + %_CallFunction(this, request, response, handler); } else { throw new Error('Unknown command "' + request.command + '" in request'); } @@ -2534,6 +2452,40 @@ }; +DebugCommandProcessor.prototype.dispatch_ = (function() { + var proto = DebugCommandProcessor.prototype; + return { + "continue": proto.continueRequest_, + "break" : proto.breakRequest_, + "setbreakpoint" : proto.setBreakPointRequest_, + "changebreakpoint": proto.changeBreakPointRequest_, + "clearbreakpoint": proto.clearBreakPointRequest_, + "clearbreakpointgroup": proto.clearBreakPointGroupRequest_, + "disconnect": proto.disconnectRequest_, + "setexceptionbreak": proto.setExceptionBreakRequest_, + "listbreakpoints": proto.listBreakpointsRequest_, + "backtrace": proto.backtraceRequest_, + "frame": proto.frameRequest_, + "scopes": proto.scopesRequest_, + "scope": proto.scopeRequest_, + "setvariablevalue": proto.setVariableValueRequest_, + "evaluate": proto.evaluateRequest_, + "lookup": proto.lookupRequest_, + "references": proto.referencesRequest_, + "source": proto.sourceRequest_, + "scripts": proto.scriptsRequest_, + "threads": proto.threadsRequest_, + "suspend": proto.suspendRequest_, + "version": proto.versionRequest_, + "changelive": proto.changeLiveRequest_, + "restartframe": proto.restartFrameRequest_, + "flags": proto.debuggerFlagsRequest_, + "v8flag": proto.v8FlagsRequest_, + "gc": proto.gcRequest_, + }; +})(); + + // Check whether the previously processed command caused the VM to become // running. DebugCommandProcessor.prototype.isRunning = function() { @@ -2546,17 +2498,6 @@ }; -function NumberToHex8Str(n) { - var r = ""; - for (var i = 0; i < 8; ++i) { - var c = hexCharArray[n & 0x0F]; // hexCharArray is defined in uri.js - r = c + r; - n = n >>> 4; - } - return r; -} - - /** * Convert an Object to its debugger protocol representation. The representation * may be serilized to a JSON object using JSON.stringify(). diff -Nru nodejs-0.11.13/deps/v8/src/debug.h nodejs-0.11.15/deps/v8/src/debug.h --- nodejs-0.11.13/deps/v8/src/debug.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/debug.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,55 +1,31 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DEBUG_H_ #define V8_DEBUG_H_ -#include "allocation.h" -#include "arguments.h" -#include "assembler.h" -#include "debug-agent.h" -#include "execution.h" -#include "factory.h" -#include "flags.h" -#include "frames-inl.h" -#include "hashmap.h" -#include "platform.h" -#include "string-stream.h" -#include "v8threads.h" +#include "src/allocation.h" +#include "src/arguments.h" +#include "src/assembler.h" +#include "src/base/platform/platform.h" +#include "src/execution.h" +#include "src/factory.h" +#include "src/flags.h" +#include "src/frames-inl.h" +#include "src/hashmap.h" +#include "src/liveedit.h" +#include "src/string-stream.h" +#include "src/v8threads.h" -#ifdef ENABLE_DEBUGGER_SUPPORT -#include "../include/v8-debug.h" +#include "include/v8-debug.h" namespace v8 { namespace internal { // Forward declarations. -class EnterDebugger; +class DebugScope; // Step actions. NOTE: These values are in macros.py as well. @@ -174,8 +150,7 @@ // the cache is the script id. class ScriptCache : private HashMap { public: - explicit ScriptCache(Isolate* isolate) - : HashMap(ScriptMatch), isolate_(isolate), collected_scripts_(10) {} + explicit ScriptCache(Isolate* isolate); virtual ~ScriptCache() { Clear(); } // Add script to the cache. @@ -184,18 +159,12 @@ // Return the scripts in the cache. Handle<FixedArray> GetScripts(); - // Generate debugger events for collected scripts. - void ProcessCollectedScripts(); - private: // Calculate the hash value from the key (script id). static uint32_t Hash(int key) { return ComputeIntegerHash(key, v8::internal::kZeroHashSeed); } - // Scripts match if their keys (script id) match. - static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; } - // Clear the cache releasing all the weak handles. void Clear(); @@ -204,8 +173,6 @@ const v8::WeakCallbackData<v8::Value, void>& data); Isolate* isolate_; - // List used during GC to temporarily store id's of collected scripts. - List<int> collected_scripts_; }; @@ -228,417 +195,6 @@ DebugInfoListNode* next_; }; -// This class contains the debugger support. The main purpose is to handle -// setting break points in the code. -// -// This class controls the debug info for all functions which currently have -// active breakpoints in them. This debug info is held in the heap root object -// debug_info which is a FixedArray. Each entry in this list is of class -// DebugInfo. -class Debug { - public: - void SetUp(bool create_heap_objects); - bool Load(); - void Unload(); - bool IsLoaded() { return !debug_context_.is_null(); } - bool InDebugger() { return thread_local_.debugger_entry_ != NULL; } - void PreemptionWhileInDebugger(); - void Iterate(ObjectVisitor* v); - - Object* Break(Arguments args); - void SetBreakPoint(Handle<JSFunction> function, - Handle<Object> break_point_object, - int* source_position); - bool SetBreakPointForScript(Handle<Script> script, - Handle<Object> break_point_object, - int* source_position, - BreakPositionAlignment alignment); - void ClearBreakPoint(Handle<Object> break_point_object); - void ClearAllBreakPoints(); - void FloodWithOneShot(Handle<JSFunction> function); - void FloodBoundFunctionWithOneShot(Handle<JSFunction> function); - void FloodHandlerWithOneShot(); - void ChangeBreakOnException(ExceptionBreakType type, bool enable); - bool IsBreakOnException(ExceptionBreakType type); - void PrepareStep(StepAction step_action, - int step_count, - StackFrame::Id frame_id); - void ClearStepping(); - void ClearStepOut(); - bool IsStepping() { return thread_local_.step_count_ > 0; } - bool StepNextContinue(BreakLocationIterator* break_location_iterator, - JavaScriptFrame* frame); - static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared); - static bool HasDebugInfo(Handle<SharedFunctionInfo> shared); - - void PrepareForBreakPoints(); - - // This function is used in FunctionNameUsing* tests. - Object* FindSharedFunctionInfoInScript(Handle<Script> script, int position); - - // Returns whether the operation succeeded. Compilation can only be triggered - // if a valid closure is passed as the second argument, otherwise the shared - // function needs to be compiled already. - bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared, - Handle<JSFunction> function); - - // Returns true if the current stub call is patched to call the debugger. - static bool IsDebugBreak(Address addr); - // Returns true if the current return statement has been patched to be - // a debugger breakpoint. - static bool IsDebugBreakAtReturn(RelocInfo* rinfo); - - // Check whether a code stub with the specified major key is a possible break - // point location. - static bool IsSourceBreakStub(Code* code); - static bool IsBreakStub(Code* code); - - // Find the builtin to use for invoking the debug break - static Handle<Code> FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode); - - static Handle<Object> GetSourceBreakLocations( - Handle<SharedFunctionInfo> shared, - BreakPositionAlignment position_aligment); - - // Getter for the debug_context. - inline Handle<Context> debug_context() { return debug_context_; } - - // Check whether a global object is the debug global object. - bool IsDebugGlobal(GlobalObject* global); - - // Check whether this frame is just about to return. - bool IsBreakAtReturn(JavaScriptFrame* frame); - - // Fast check to see if any break points are active. - inline bool has_break_points() { return has_break_points_; } - - void NewBreak(StackFrame::Id break_frame_id); - void SetBreak(StackFrame::Id break_frame_id, int break_id); - StackFrame::Id break_frame_id() { - return thread_local_.break_frame_id_; - } - int break_id() { return thread_local_.break_id_; } - - bool StepInActive() { return thread_local_.step_into_fp_ != 0; } - void HandleStepIn(Handle<JSFunction> function, - Handle<Object> holder, - Address fp, - bool is_constructor); - Address step_in_fp() { return thread_local_.step_into_fp_; } - Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; } - - bool StepOutActive() { return thread_local_.step_out_fp_ != 0; } - Address step_out_fp() { return thread_local_.step_out_fp_; } - - EnterDebugger* debugger_entry() { - return thread_local_.debugger_entry_; - } - void set_debugger_entry(EnterDebugger* entry) { - thread_local_.debugger_entry_ = entry; - } - - // Check whether any of the specified interrupts are pending. - bool is_interrupt_pending(InterruptFlag what) { - return (thread_local_.pending_interrupts_ & what) != 0; - } - - // Set specified interrupts as pending. - void set_interrupts_pending(InterruptFlag what) { - thread_local_.pending_interrupts_ |= what; - } - - // Clear specified interrupts from pending. - void clear_interrupt_pending(InterruptFlag what) { - thread_local_.pending_interrupts_ &= ~static_cast<int>(what); - } - - // Getter and setter for the disable break state. - bool disable_break() { return disable_break_; } - void set_disable_break(bool disable_break) { - disable_break_ = disable_break; - } - - // Getters for the current exception break state. - bool break_on_exception() { return break_on_exception_; } - bool break_on_uncaught_exception() { - return break_on_uncaught_exception_; - } - - enum AddressId { - k_after_break_target_address, - k_debug_break_return_address, - k_debug_break_slot_address, - k_restarter_frame_function_pointer - }; - - // Support for setting the address to jump to when returning from break point. - Address* after_break_target_address() { - return reinterpret_cast<Address*>(&thread_local_.after_break_target_); - } - Address* restarter_frame_function_pointer_address() { - Object*** address = &thread_local_.restarter_frame_function_pointer_; - return reinterpret_cast<Address*>(address); - } - - // Support for saving/restoring registers when handling debug break calls. - Object** register_address(int r) { - return ®isters_[r]; - } - - // Access to the debug break on return code. - Code* debug_break_return() { return debug_break_return_; } - Code** debug_break_return_address() { - return &debug_break_return_; - } - - // Access to the debug break in debug break slot code. - Code* debug_break_slot() { return debug_break_slot_; } - Code** debug_break_slot_address() { - return &debug_break_slot_; - } - - static const int kEstimatedNofDebugInfoEntries = 16; - static const int kEstimatedNofBreakPointsInFunction = 16; - - // Passed to MakeWeak. - static void HandleWeakDebugInfo( - const v8::WeakCallbackData<v8::Value, void>& data); - - friend class Debugger; - friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc - friend void CheckDebuggerUnloaded(bool check_functions); // In test-debug.cc - - // Threading support. - char* ArchiveDebug(char* to); - char* RestoreDebug(char* from); - static int ArchiveSpacePerThread(); - void FreeThreadResources() { } - - // Mirror cache handling. - void ClearMirrorCache(); - - // Script cache handling. - void CreateScriptCache(); - void DestroyScriptCache(); - void AddScriptToScriptCache(Handle<Script> script); - Handle<FixedArray> GetLoadedScripts(); - - // Record function from which eval was called. - static void RecordEvalCaller(Handle<Script> script); - - // Garbage collection notifications. - void AfterGarbageCollection(); - - // Code generator routines. - static void GenerateSlot(MacroAssembler* masm); - static void GenerateLoadICDebugBreak(MacroAssembler* masm); - static void GenerateStoreICDebugBreak(MacroAssembler* masm); - static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm); - static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm); - static void GenerateCompareNilICDebugBreak(MacroAssembler* masm); - static void GenerateReturnDebugBreak(MacroAssembler* masm); - static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm); - static void GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm); - static void GenerateCallConstructStubDebugBreak(MacroAssembler* masm); - static void GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm); - static void GenerateSlotDebugBreak(MacroAssembler* masm); - static void GeneratePlainReturnLiveEdit(MacroAssembler* masm); - - // FrameDropper is a code replacement for a JavaScript frame with possibly - // several frames above. - // There is no calling conventions here, because it never actually gets - // called, it only gets returned to. - static void GenerateFrameDropperLiveEdit(MacroAssembler* masm); - - // Called from stub-cache.cc. - static void GenerateCallICDebugBreak(MacroAssembler* masm); - - // Describes how exactly a frame has been dropped from stack. - enum FrameDropMode { - // No frame has been dropped. - FRAMES_UNTOUCHED, - // The top JS frame had been calling IC stub. IC stub mustn't be called now. - FRAME_DROPPED_IN_IC_CALL, - // The top JS frame had been calling debug break slot stub. Patch the - // address this stub jumps to in the end. - FRAME_DROPPED_IN_DEBUG_SLOT_CALL, - // The top JS frame had been calling some C++ function. The return address - // gets patched automatically. - FRAME_DROPPED_IN_DIRECT_CALL, - FRAME_DROPPED_IN_RETURN_CALL, - CURRENTLY_SET_MODE - }; - - void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id, - FrameDropMode mode, - Object** restarter_frame_function_pointer); - - // Initializes an artificial stack frame. The data it contains is used for: - // a. successful work of frame dropper code which eventually gets control, - // b. being compatible with regular stack structure for various stack - // iterators. - // Returns address of stack allocated pointer to restarted function, - // the value that is called 'restarter_frame_function_pointer'. The value - // at this address (possibly updated by GC) may be used later when preparing - // 'step in' operation. - static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame, - Handle<Code> code); - - static const int kFrameDropperFrameSize; - - // Architecture-specific constant. - static const bool kFrameDropperSupported; - - /** - * Defines layout of a stack frame that supports padding. This is a regular - * internal frame that has a flexible stack structure. LiveEdit can shift - * its lower part up the stack, taking up the 'padding' space when additional - * stack memory is required. - * Such frame is expected immediately above the topmost JavaScript frame. - * - * Stack Layout: - * --- Top - * LiveEdit routine frames - * --- - * C frames of debug handler - * --- - * ... - * --- - * An internal frame that has n padding words: - * - any number of words as needed by code -- upper part of frame - * - padding size: a Smi storing n -- current size of padding - * - padding: n words filled with kPaddingValue in form of Smi - * - 3 context/type words of a regular InternalFrame - * - fp - * --- - * Topmost JavaScript frame - * --- - * ... - * --- Bottom - */ - class FramePaddingLayout : public AllStatic { - public: - // Architecture-specific constant. - static const bool kIsSupported; - - // A size of frame base including fp. Padding words starts right above - // the base. - static const int kFrameBaseSize = 4; - - // A number of words that should be reserved on stack for the LiveEdit use. - // Normally equals 1. Stored on stack in form of Smi. - static const int kInitialSize; - // A value that padding words are filled with (in form of Smi). Going - // bottom-top, the first word not having this value is a counter word. - static const int kPaddingValue; - }; - - private: - explicit Debug(Isolate* isolate); - ~Debug(); - - static bool CompileDebuggerScript(Isolate* isolate, int index); - void ClearOneShot(); - void ActivateStepIn(StackFrame* frame); - void ClearStepIn(); - void ActivateStepOut(StackFrame* frame); - void ClearStepNext(); - // Returns whether the compile succeeded. - void RemoveDebugInfo(Handle<DebugInfo> debug_info); - void SetAfterBreakTarget(JavaScriptFrame* frame); - Handle<Object> CheckBreakPoints(Handle<Object> break_point); - bool CheckBreakPoint(Handle<Object> break_point_object); - - // Global handle to debug context where all the debugger JavaScript code is - // loaded. - Handle<Context> debug_context_; - - // Boolean state indicating whether any break points are set. - bool has_break_points_; - - // Cache of all scripts in the heap. - ScriptCache* script_cache_; - - // List of active debug info objects. - DebugInfoListNode* debug_info_list_; - - bool disable_break_; - bool break_on_exception_; - bool break_on_uncaught_exception_; - - // Per-thread data. - class ThreadLocal { - public: - // Counter for generating next break id. - int break_count_; - - // Current break id. - int break_id_; - - // Frame id for the frame of the current break. - StackFrame::Id break_frame_id_; - - // Step action for last step performed. - StepAction last_step_action_; - - // Source statement position from last step next action. - int last_statement_position_; - - // Number of steps left to perform before debug event. - int step_count_; - - // Frame pointer from last step next action. - Address last_fp_; - - // Number of queued steps left to perform before debug event. - int queued_step_count_; - - // Frame pointer for frame from which step in was performed. - Address step_into_fp_; - - // Frame pointer for the frame where debugger should be called when current - // step out action is completed. - Address step_out_fp_; - - // Storage location for jump when exiting debug break calls. - Address after_break_target_; - - // Stores the way how LiveEdit has patched the stack. It is used when - // debugger returns control back to user script. - FrameDropMode frame_drop_mode_; - - // Top debugger entry. - EnterDebugger* debugger_entry_; - - // Pending interrupts scheduled while debugging. - int pending_interrupts_; - - // When restarter frame is on stack, stores the address - // of the pointer to function being restarted. Otherwise (most of the time) - // stores NULL. This pointer is used with 'step in' implementation. - Object** restarter_frame_function_pointer_; - }; - - // Storage location for registers when handling debug break calls - JSCallerSavedBuffer registers_; - ThreadLocal thread_local_; - void ThreadInit(); - - // Code to call for handling debug break on return. - Code* debug_break_return_; - - // Code to call for handling debug break in debug break slots. - Code* debug_break_slot_; - - Isolate* isolate_; - - friend class Isolate; - - DISALLOW_COPY_AND_ASSIGN(Debug); -}; - - -DECLARE_RUNTIME_FUNCTION(Object*, Debug_Break); // Message delivered to the message handler callback. This is either a debugger @@ -723,7 +279,6 @@ static CommandMessage New(const Vector<uint16_t>& command, v8::Debug::ClientData* data); CommandMessage(); - ~CommandMessage(); // Deletes user data and disposes of the text. void Dispose(); @@ -737,6 +292,7 @@ v8::Debug::ClientData* client_data_; }; + // A Queue of CommandMessage objects. A thread-safe version is // LockingCommandMessageQueue, based on this class. class CommandMessageQueue BASE_EMBEDDED { @@ -758,9 +314,6 @@ }; -class MessageDispatchHelperThread; - - // LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage // messages. The message data is not managed by LockingCommandMessageQueue. // Pointers to the data are passed in and out. Implemented by adding a @@ -775,301 +328,449 @@ private: Logger* logger_; CommandMessageQueue queue_; - mutable Mutex mutex_; + mutable base::Mutex mutex_; DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue); }; -class Debugger { +class PromiseOnStack { public: - ~Debugger(); + PromiseOnStack(Isolate* isolate, PromiseOnStack* prev, + Handle<JSObject> getter); + ~PromiseOnStack(); + StackHandler* handler() { return handler_; } + Handle<JSObject> promise() { return promise_; } + PromiseOnStack* prev() { return prev_; } + private: + Isolate* isolate_; + StackHandler* handler_; + Handle<JSObject> promise_; + PromiseOnStack* prev_; +}; - void DebugRequest(const uint16_t* json_request, int length); - Handle<Object> MakeJSObject(Vector<const char> constructor_name, - int argc, - Handle<Object> argv[], - bool* caught_exception); - Handle<Object> MakeExecutionState(bool* caught_exception); - Handle<Object> MakeBreakEvent(Handle<Object> exec_state, - Handle<Object> break_points_hit, - bool* caught_exception); - Handle<Object> MakeExceptionEvent(Handle<Object> exec_state, - Handle<Object> exception, - bool uncaught, - bool* caught_exception); - Handle<Object> MakeNewFunctionEvent(Handle<Object> func, - bool* caught_exception); - Handle<Object> MakeCompileEvent(Handle<Script> script, - bool before, - bool* caught_exception); - Handle<Object> MakeScriptCollectedEvent(int id, - bool* caught_exception); +// This class contains the debugger support. The main purpose is to handle +// setting break points in the code. +// +// This class controls the debug info for all functions which currently have +// active breakpoints in them. This debug info is held in the heap root object +// debug_info which is a FixedArray. Each entry in this list is of class +// DebugInfo. +class Debug { + public: + // Debug event triggers. void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue); - void OnException(Handle<Object> exception, bool uncaught); + + void OnThrow(Handle<Object> exception, bool uncaught); + void OnPromiseReject(Handle<JSObject> promise, Handle<Object> value); + void OnCompileError(Handle<Script> script); void OnBeforeCompile(Handle<Script> script); + void OnAfterCompile(Handle<Script> script); + void OnPromiseEvent(Handle<JSObject> data); + void OnAsyncTaskEvent(Handle<JSObject> data); - enum AfterCompileFlags { - NO_AFTER_COMPILE_FLAGS, - SEND_WHEN_DEBUGGING - }; - void OnAfterCompile(Handle<Script> script, - AfterCompileFlags after_compile_flags); - void OnScriptCollected(int id); - void ProcessDebugEvent(v8::DebugEvent event, - Handle<JSObject> event_data, - bool auto_continue); - void NotifyMessageHandler(v8::DebugEvent event, - Handle<JSObject> exec_state, - Handle<JSObject> event_data, - bool auto_continue); + // API facing. void SetEventListener(Handle<Object> callback, Handle<Object> data); - void SetMessageHandler(v8::Debug::MessageHandler2 handler); - void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler, - TimeDelta period); - void SetDebugMessageDispatchHandler( - v8::Debug::DebugMessageDispatchHandler handler, - bool provide_locker); + void SetMessageHandler(v8::Debug::MessageHandler handler); + void EnqueueCommandMessage(Vector<const uint16_t> command, + v8::Debug::ClientData* client_data = NULL); + // Enqueue a debugger command to the command queue for event listeners. + void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL); + MUST_USE_RESULT MaybeHandle<Object> Call(Handle<JSFunction> fun, + Handle<Object> data); + Handle<Context> GetDebugContext(); + void HandleDebugBreak(); + void ProcessDebugMessages(bool debug_command_only); - // Invoke the message handler function. - void InvokeMessageHandler(MessageImpl message); + // Internal logic + bool Load(); + void Break(Arguments args, JavaScriptFrame*); + void SetAfterBreakTarget(JavaScriptFrame* frame); - // Add a debugger command to the command queue. - void ProcessCommand(Vector<const uint16_t> command, - v8::Debug::ClientData* client_data = NULL); + // Scripts handling. + Handle<FixedArray> GetLoadedScripts(); - // Check whether there are commands in the command queue. - bool HasCommands(); + // Break point handling. + bool SetBreakPoint(Handle<JSFunction> function, + Handle<Object> break_point_object, + int* source_position); + bool SetBreakPointForScript(Handle<Script> script, + Handle<Object> break_point_object, + int* source_position, + BreakPositionAlignment alignment); + void ClearBreakPoint(Handle<Object> break_point_object); + void ClearAllBreakPoints(); + void FloodWithOneShot(Handle<JSFunction> function); + void FloodBoundFunctionWithOneShot(Handle<JSFunction> function); + void FloodHandlerWithOneShot(); + void ChangeBreakOnException(ExceptionBreakType type, bool enable); + bool IsBreakOnException(ExceptionBreakType type); - // Enqueue a debugger command to the command queue for event listeners. - void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL); + // Stepping handling. + void PrepareStep(StepAction step_action, + int step_count, + StackFrame::Id frame_id); + void ClearStepping(); + void ClearStepOut(); + bool IsStepping() { return thread_local_.step_count_ > 0; } + bool StepNextContinue(BreakLocationIterator* break_location_iterator, + JavaScriptFrame* frame); + bool StepInActive() { return thread_local_.step_into_fp_ != 0; } + void HandleStepIn(Handle<JSFunction> function, + Handle<Object> holder, + Address fp, + bool is_constructor); + bool StepOutActive() { return thread_local_.step_out_fp_ != 0; } - Handle<Object> Call(Handle<JSFunction> fun, - Handle<Object> data, - bool* pending_exception); + // Purge all code objects that have no debug break slots. + void PrepareForBreakPoints(); - // Start the debugger agent listening on the provided port. - bool StartAgent(const char* name, int port, - bool wait_for_connection = false); + // Returns whether the operation succeeded. Compilation can only be triggered + // if a valid closure is passed as the second argument, otherwise the shared + // function needs to be compiled already. + bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared, + Handle<JSFunction> function); + static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared); + static bool HasDebugInfo(Handle<SharedFunctionInfo> shared); - // Stop the debugger agent. - void StopAgent(); + // This function is used in FunctionNameUsing* tests. + Object* FindSharedFunctionInfoInScript(Handle<Script> script, int position); - // Blocks until the agent has started listening for connections - void WaitForAgent(); + // Returns true if the current stub call is patched to call the debugger. + static bool IsDebugBreak(Address addr); + // Returns true if the current return statement has been patched to be + // a debugger breakpoint. + static bool IsDebugBreakAtReturn(RelocInfo* rinfo); - void CallMessageDispatchHandler(); + static Handle<Object> GetSourceBreakLocations( + Handle<SharedFunctionInfo> shared, + BreakPositionAlignment position_aligment); - Handle<Context> GetDebugContext(); + // Check whether a global object is the debug global object. + bool IsDebugGlobal(GlobalObject* global); - // Unload the debugger if possible. Only called when no debugger is currently - // active. - void UnloadDebugger(); - friend void ForceUnloadDebugger(); // In test-debug.cc - - inline bool EventActive(v8::DebugEvent event) { - LockGuard<RecursiveMutex> lock_guard(debugger_access_); - - // Check whether the message handler was been cleared. - if (debugger_unload_pending_) { - if (isolate_->debug()->debugger_entry() == NULL) { - UnloadDebugger(); - } - } - - if (((event == v8::BeforeCompile) || (event == v8::AfterCompile)) && - !FLAG_debug_compile_events) { - return false; - - } else if ((event == v8::ScriptCollected) && - !FLAG_debug_script_collected_events) { - return false; - } + // Check whether this frame is just about to return. + bool IsBreakAtReturn(JavaScriptFrame* frame); - // Currently argument event is not used. - return !compiling_natives_ && Debugger::IsDebuggerActive(); - } + // Promise handling. + // Push and pop a promise and the current try-catch handler. + void PushPromise(Handle<JSObject> promise); + void PopPromise(); - void set_compiling_natives(bool compiling_natives) { - compiling_natives_ = compiling_natives; - } - bool compiling_natives() const { return compiling_natives_; } - void set_loading_debugger(bool v) { is_loading_debugger_ = v; } - bool is_loading_debugger() const { return is_loading_debugger_; } + // Support for LiveEdit + void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id, + LiveEdit::FrameDropMode mode, + Object** restarter_frame_function_pointer); + + // Passed to MakeWeak. + static void HandleWeakDebugInfo( + const v8::WeakCallbackData<v8::Value, void>& data); + + // Threading support. + char* ArchiveDebug(char* to); + char* RestoreDebug(char* from); + static int ArchiveSpacePerThread(); + void FreeThreadResources() { } + + // Record function from which eval was called. + static void RecordEvalCaller(Handle<Script> script); + + // Flags and states. + DebugScope* debugger_entry() { return thread_local_.current_debug_scope_; } + inline Handle<Context> debug_context() { return debug_context_; } void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; } bool live_edit_enabled() const { return FLAG_enable_liveedit && live_edit_enabled_ ; } - void set_force_debugger_active(bool force_debugger_active) { - force_debugger_active_ = force_debugger_active; + + inline bool is_active() const { return is_active_; } + inline bool is_loaded() const { return !debug_context_.is_null(); } + inline bool has_break_points() const { return has_break_points_; } + inline bool in_debug_scope() const { + return thread_local_.current_debug_scope_ != NULL; + } + void set_disable_break(bool v) { break_disabled_ = v; } + + StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; } + int break_id() { return thread_local_.break_id_; } + + // Support for embedding into generated code. + Address is_active_address() { + return reinterpret_cast<Address>(&is_active_); + } + + Address after_break_target_address() { + return reinterpret_cast<Address>(&after_break_target_); + } + + Address restarter_frame_function_pointer_address() { + Object*** address = &thread_local_.restarter_frame_function_pointer_; + return reinterpret_cast<Address>(address); } - bool force_debugger_active() const { return force_debugger_active_; } - bool IsDebuggerActive(); + Address step_in_fp_addr() { + return reinterpret_cast<Address>(&thread_local_.step_into_fp_); + } private: - explicit Debugger(Isolate* isolate); + explicit Debug(Isolate* isolate); + + void UpdateState(); + void Unload(); + void SetNextBreakId() { + thread_local_.break_id_ = ++thread_local_.break_count_; + } + + // Check whether there are commands in the command queue. + inline bool has_commands() const { return !command_queue_.IsEmpty(); } + inline bool ignore_events() const { return is_suppressed_ || !is_active_; } + + void OnException(Handle<Object> exception, bool uncaught, + Handle<Object> promise); + + // Constructors for debug event objects. + MUST_USE_RESULT MaybeHandle<Object> MakeJSObject( + const char* constructor_name, + int argc, + Handle<Object> argv[]); + MUST_USE_RESULT MaybeHandle<Object> MakeExecutionState(); + MUST_USE_RESULT MaybeHandle<Object> MakeBreakEvent( + Handle<Object> break_points_hit); + MUST_USE_RESULT MaybeHandle<Object> MakeExceptionEvent( + Handle<Object> exception, + bool uncaught, + Handle<Object> promise); + MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent( + Handle<Script> script, v8::DebugEvent type); + MUST_USE_RESULT MaybeHandle<Object> MakePromiseEvent( + Handle<JSObject> promise_event); + MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent( + Handle<JSObject> task_event); + + // Mirror cache handling. + void ClearMirrorCache(); + + // Returns a promise if the pushed try-catch handler matches the current one. + Handle<Object> GetPromiseOnStackOnThrow(); + bool PromiseHasRejectHandler(Handle<JSObject> promise); void CallEventCallback(v8::DebugEvent event, Handle<Object> exec_state, Handle<Object> event_data, v8::Debug::ClientData* client_data); - void CallCEventCallback(v8::DebugEvent event, - Handle<Object> exec_state, - Handle<Object> event_data, - v8::Debug::ClientData* client_data); - void CallJSEventCallback(v8::DebugEvent event, - Handle<Object> exec_state, - Handle<Object> event_data); - void ListenersChanged(); + void ProcessDebugEvent(v8::DebugEvent event, + Handle<JSObject> event_data, + bool auto_continue); + void NotifyMessageHandler(v8::DebugEvent event, + Handle<JSObject> exec_state, + Handle<JSObject> event_data, + bool auto_continue); + void InvokeMessageHandler(MessageImpl message); - RecursiveMutex* debugger_access_; // Mutex guarding debugger variables. - Handle<Object> event_listener_; // Global handle to listener. + static bool CompileDebuggerScript(Isolate* isolate, int index); + void ClearOneShot(); + void ActivateStepIn(StackFrame* frame); + void ClearStepIn(); + void ActivateStepOut(StackFrame* frame); + void ClearStepNext(); + // Returns whether the compile succeeded. + void RemoveDebugInfo(Handle<DebugInfo> debug_info); + Handle<Object> CheckBreakPoints(Handle<Object> break_point); + bool CheckBreakPoint(Handle<Object> break_point_object); + + inline void AssertDebugContext() { + DCHECK(isolate_->context() == *debug_context()); + DCHECK(in_debug_scope()); + } + + void ThreadInit(); + + // Global handles. + Handle<Context> debug_context_; + Handle<Object> event_listener_; Handle<Object> event_listener_data_; - bool compiling_natives_; // Are we compiling natives? - bool is_loading_debugger_; // Are we loading the debugger? - bool live_edit_enabled_; // Enable LiveEdit. - bool never_unload_debugger_; // Can we unload the debugger? - bool force_debugger_active_; // Activate debugger without event listeners. - v8::Debug::MessageHandler2 message_handler_; - bool debugger_unload_pending_; // Was message handler cleared? - v8::Debug::HostDispatchHandler host_dispatch_handler_; - Mutex dispatch_handler_access_; // Mutex guarding dispatch handler. - v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_; - MessageDispatchHelperThread* message_dispatch_helper_thread_; - TimeDelta host_dispatch_period_; - DebuggerAgent* agent_; + v8::Debug::MessageHandler message_handler_; static const int kQueueInitialSize = 4; + base::Semaphore command_received_; // Signaled for each command received. LockingCommandMessageQueue command_queue_; - Semaphore command_received_; // Signaled for each command received. LockingCommandMessageQueue event_command_queue_; + bool is_active_; + bool is_suppressed_; + bool live_edit_enabled_; + bool has_break_points_; + bool break_disabled_; + bool break_on_exception_; + bool break_on_uncaught_exception_; + + ScriptCache* script_cache_; // Cache of all scripts in the heap. + DebugInfoListNode* debug_info_list_; // List of active debug info objects. + + // Storage location for jump when exiting debug break calls. + // Note that this address is not GC safe. It should be computed immediately + // before returning to the DebugBreakCallHelper. + Address after_break_target_; + + // Per-thread data. + class ThreadLocal { + public: + // Top debugger entry. + DebugScope* current_debug_scope_; + + // Counter for generating next break id. + int break_count_; + + // Current break id. + int break_id_; + + // Frame id for the frame of the current break. + StackFrame::Id break_frame_id_; + + // Step action for last step performed. + StepAction last_step_action_; + + // Source statement position from last step next action. + int last_statement_position_; + + // Number of steps left to perform before debug event. + int step_count_; + + // Frame pointer from last step next action. + Address last_fp_; + + // Number of queued steps left to perform before debug event. + int queued_step_count_; + + // Frame pointer for frame from which step in was performed. + Address step_into_fp_; + + // Frame pointer for the frame where debugger should be called when current + // step out action is completed. + Address step_out_fp_; + + // Stores the way how LiveEdit has patched the stack. It is used when + // debugger returns control back to user script. + LiveEdit::FrameDropMode frame_drop_mode_; + + // When restarter frame is on stack, stores the address + // of the pointer to function being restarted. Otherwise (most of the time) + // stores NULL. This pointer is used with 'step in' implementation. + Object** restarter_frame_function_pointer_; + + // When a promise is being resolved, we may want to trigger a debug event + // if we catch a throw. For this purpose we remember the try-catch + // handler address that would catch the exception. We also hold onto a + // closure that returns a promise if the exception is considered uncaught. + // Due to the possibility of reentry we use a linked list. + PromiseOnStack* promise_on_stack_; + }; + + // Storage location for registers when handling debug break calls + ThreadLocal thread_local_; + Isolate* isolate_; - friend class EnterDebugger; friend class Isolate; + friend class DebugScope; + friend class DisableBreak; + friend class LiveEdit; + friend class SuppressDebug; + + friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc + friend void CheckDebuggerUnloaded(bool check_functions); // In test-debug.cc - DISALLOW_COPY_AND_ASSIGN(Debugger); + DISALLOW_COPY_AND_ASSIGN(Debug); }; -// This class is used for entering the debugger. Create an instance in the stack -// to enter the debugger. This will set the current break state, make sure the -// debugger is loaded and switch to the debugger context. If the debugger for -// some reason could not be entered FailedToEnter will return true. -class EnterDebugger BASE_EMBEDDED { - public: - explicit EnterDebugger(Isolate* isolate); - ~EnterDebugger(); +DECLARE_RUNTIME_FUNCTION(Debug_Break); - // Check whether the debugger could be entered. - inline bool FailedToEnter() { return load_failed_; } - // Check whether there are any JavaScript frames on the stack. - inline bool HasJavaScriptFrames() { return has_js_frames_; } +// This scope is used to load and enter the debug context and create a new +// break state. Leaving the scope will restore the previous state. +// On failure to load, FailedToEnter returns true. +class DebugScope BASE_EMBEDDED { + public: + explicit DebugScope(Debug* debug); + ~DebugScope(); + + // Check whether loading was successful. + inline bool failed() { return failed_; } // Get the active context from before entering the debugger. inline Handle<Context> GetContext() { return save_.context(); } private: - Isolate* isolate_; - EnterDebugger* prev_; // Previous debugger entry if entered recursively. - JavaScriptFrameIterator it_; - const bool has_js_frames_; // Were there any JavaScript frames? + Isolate* isolate() { return debug_->isolate_; } + + Debug* debug_; + DebugScope* prev_; // Previous scope if entered recursively. StackFrame::Id break_frame_id_; // Previous break frame id. - int break_id_; // Previous break id. - bool load_failed_; // Did the debugger fail to load? - SaveContext save_; // Saves previous context. + int break_id_; // Previous break id. + bool failed_; // Did the debug context fail to load? + SaveContext save_; // Saves previous context. + PostponeInterruptsScope no_termination_exceptons_; }; // Stack allocated class for disabling break. class DisableBreak BASE_EMBEDDED { public: - explicit DisableBreak(Isolate* isolate, bool disable_break) - : isolate_(isolate) { - prev_disable_break_ = isolate_->debug()->disable_break(); - isolate_->debug()->set_disable_break(disable_break); - } - ~DisableBreak() { - isolate_->debug()->set_disable_break(prev_disable_break_); + explicit DisableBreak(Debug* debug, bool disable_break) + : debug_(debug), old_state_(debug->break_disabled_) { + debug_->break_disabled_ = disable_break; } + ~DisableBreak() { debug_->break_disabled_ = old_state_; } private: - Isolate* isolate_; - // The previous state of the disable break used to restore the value when this - // object is destructed. - bool prev_disable_break_; + Debug* debug_; + bool old_state_; + DISALLOW_COPY_AND_ASSIGN(DisableBreak); }; -// Debug_Address encapsulates the Address pointers used in generating debug -// code. -class Debug_Address { +class SuppressDebug BASE_EMBEDDED { public: - explicit Debug_Address(Debug::AddressId id) : id_(id) { } - - static Debug_Address AfterBreakTarget() { - return Debug_Address(Debug::k_after_break_target_address); - } - - static Debug_Address DebugBreakReturn() { - return Debug_Address(Debug::k_debug_break_return_address); - } - - static Debug_Address RestarterFrameFunctionPointer() { - return Debug_Address(Debug::k_restarter_frame_function_pointer); - } - - Address address(Isolate* isolate) const { - Debug* debug = isolate->debug(); - switch (id_) { - case Debug::k_after_break_target_address: - return reinterpret_cast<Address>(debug->after_break_target_address()); - case Debug::k_debug_break_return_address: - return reinterpret_cast<Address>(debug->debug_break_return_address()); - case Debug::k_debug_break_slot_address: - return reinterpret_cast<Address>(debug->debug_break_slot_address()); - case Debug::k_restarter_frame_function_pointer: - return reinterpret_cast<Address>( - debug->restarter_frame_function_pointer_address()); - default: - UNREACHABLE(); - return NULL; - } + explicit SuppressDebug(Debug* debug) + : debug_(debug), old_state_(debug->is_suppressed_) { + debug_->is_suppressed_ = true; } + ~SuppressDebug() { debug_->is_suppressed_ = old_state_; } private: - Debug::AddressId id_; + Debug* debug_; + bool old_state_; + DISALLOW_COPY_AND_ASSIGN(SuppressDebug); }; -// The optional thread that Debug Agent may use to temporary call V8 to process -// pending debug requests if debuggee is not running V8 at the moment. -// Techincally it does not call V8 itself, rather it asks embedding program -// to do this via v8::Debug::HostDispatchHandler -class MessageDispatchHelperThread: public Thread { - public: - explicit MessageDispatchHelperThread(Isolate* isolate); - ~MessageDispatchHelperThread() {} - - void Schedule(); - private: - void Run(); - - Isolate* isolate_; - Semaphore sem_; - Mutex mutex_; - bool already_signalled_; +// Code generator routines. +class DebugCodegen : public AllStatic { + public: + static void GenerateSlot(MacroAssembler* masm); + static void GenerateCallICStubDebugBreak(MacroAssembler* masm); + static void GenerateLoadICDebugBreak(MacroAssembler* masm); + static void GenerateStoreICDebugBreak(MacroAssembler* masm); + static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm); + static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm); + static void GenerateCompareNilICDebugBreak(MacroAssembler* masm); + static void GenerateReturnDebugBreak(MacroAssembler* masm); + static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm); + static void GenerateCallConstructStubDebugBreak(MacroAssembler* masm); + static void GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm); + static void GenerateSlotDebugBreak(MacroAssembler* masm); + static void GeneratePlainReturnLiveEdit(MacroAssembler* masm); - DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread); + // FrameDropper is a code replacement for a JavaScript frame with possibly + // several frames above. + // There is no calling conventions here, because it never actually gets + // called, it only gets returned to. + static void GenerateFrameDropperLiveEdit(MacroAssembler* masm); }; } } // namespace v8::internal -#endif // ENABLE_DEBUGGER_SUPPORT - #endif // V8_DEBUG_H_ diff -Nru nodejs-0.11.13/deps/v8/src/deoptimizer.cc nodejs-0.11.15/deps/v8/src/deoptimizer.cc --- nodejs-0.11.13/deps/v8/src/deoptimizer.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/deoptimizer.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "accessors.h" -#include "codegen.h" -#include "deoptimizer.h" -#include "disasm.h" -#include "full-codegen.h" -#include "global-handles.h" -#include "macro-assembler.h" -#include "prettyprinter.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/accessors.h" +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/disasm.h" +#include "src/full-codegen.h" +#include "src/global-handles.h" +#include "src/macro-assembler.h" +#include "src/prettyprinter.h" namespace v8 { @@ -42,7 +19,7 @@ static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) { return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(), - OS::CommitPageSize(), + base::OS::CommitPageSize(), #if defined(__native_client__) // The Native Client port of V8 uses an interpreter, // so code pages don't need PROT_EXEC. @@ -56,9 +33,7 @@ DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator) : allocator_(allocator), -#ifdef ENABLE_DEBUGGER_SUPPORT deoptimized_frame_info_(NULL), -#endif current_(NULL) { for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) { deopt_entry_code_entries_[i] = -1; @@ -75,13 +50,11 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT void DeoptimizerData::Iterate(ObjectVisitor* v) { if (deoptimized_frame_info_ != NULL) { deoptimized_frame_info_->Iterate(v); } } -#endif Code* Deoptimizer::FindDeoptimizingCode(Address addr) { @@ -91,7 +64,7 @@ Object* element = native_context->DeoptimizedCodeListHead(); while (!element->IsUndefined()) { Code* code = Code::cast(element); - ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); + CHECK(code->kind() == Code::OPTIMIZED_FUNCTION); if (code->contains(addr)) return code; element = code->next_code_link(); } @@ -115,7 +88,7 @@ from, fp_to_sp_delta, NULL); - ASSERT(isolate->deoptimizer_data()->current_ == NULL); + CHECK(isolate->deoptimizer_data()->current_ == NULL); isolate->deoptimizer_data()->current_ = deoptimizer; return deoptimizer; } @@ -128,7 +101,7 @@ size_t Deoptimizer::GetMaxDeoptTableSize() { int entries_size = Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_; - int commit_page_size = static_cast<int>(OS::CommitPageSize()); + int commit_page_size = static_cast<int>(base::OS::CommitPageSize()); int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) / commit_page_size) + 1; return static_cast<size_t>(commit_page_size * page_count); @@ -137,7 +110,7 @@ Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { Deoptimizer* result = isolate->deoptimizer_data()->current_; - ASSERT(result != NULL); + CHECK_NE(result, NULL); result->DeleteFrameDescriptions(); isolate->deoptimizer_data()->current_ = NULL; return result; @@ -160,13 +133,12 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( JavaScriptFrame* frame, int jsframe_index, Isolate* isolate) { - ASSERT(frame->is_optimized()); - ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL); + CHECK(frame->is_optimized()); + CHECK(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL); // Get the function and code from the frame. JSFunction* function = frame->function(); @@ -176,7 +148,7 @@ // return address must be at a place in the code with deoptimization support. SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc()); int deoptimization_index = safepoint_entry.deoptimization_index(); - ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex); + CHECK_NE(deoptimization_index, Safepoint::kNoDeoptimizationIndex); // Always use the actual stack slots when calculating the fp to sp // delta adding two for the function and context. @@ -199,7 +171,7 @@ // Create the GC safe output frame information and register it for GC // handling. - ASSERT_LT(jsframe_index, deoptimizer->jsframe_count()); + CHECK_LT(jsframe_index, deoptimizer->jsframe_count()); // Convert JS frame index into frame index. int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index); @@ -251,11 +223,11 @@ void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info, Isolate* isolate) { - ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info); + CHECK_EQ(isolate->deoptimizer_data()->deoptimized_frame_info_, info); delete info; isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL; } -#endif + void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, int count, @@ -269,7 +241,7 @@ Context* context, OptimizedFunctionVisitor* visitor) { DisallowHeapAllocation no_allocation; - ASSERT(context->IsNativeContext()); + CHECK(context->IsNativeContext()); visitor->EnterContext(context); @@ -292,13 +264,13 @@ context->SetOptimizedFunctionsListHead(next); } // The visitor should not alter the link directly. - ASSERT(function->next_function_link() == next); + CHECK_EQ(function->next_function_link(), next); // Set the next function link to undefined to indicate it is no longer // in the optimized functions list. function->set_next_function_link(context->GetHeap()->undefined_value()); } else { // The visitor should not alter the link directly. - ASSERT(function->next_function_link() == next); + CHECK_EQ(function->next_function_link(), next); // preserve this element. prev = function; } @@ -380,8 +352,11 @@ } SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc()); int deopt_index = safepoint.deoptimization_index(); - bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex; - CHECK(topmost_optimized_code == NULL || safe_to_deopt); + // Turbofan deopt is checked when we are patching addresses on stack. + bool turbofanned = code->is_turbofanned(); + bool safe_to_deopt = + deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned; + CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned); if (topmost_optimized_code == NULL) { topmost_optimized_code = code; safe_to_deopt_topmost_optimized_code = safe_to_deopt; @@ -400,8 +375,9 @@ Object* element = context->OptimizedCodeListHead(); while (!element->IsUndefined()) { Code* code = Code::cast(element); - ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); + CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION); Object* next = code->next_code_link(); + if (code->marked_for_deoptimization()) { // Put the code into the list for later patching. codes.Add(code, &zone); @@ -424,6 +400,10 @@ element = next; } + if (FLAG_turbo_deoptimization) { + PatchStackForMarkedCode(isolate); + } + // TODO(titzer): we need a handle scope only because of the macro assembler, // which is only used in EnsureCodeForDeoptimizationEntry. HandleScope scope(isolate); @@ -432,17 +412,81 @@ for (int i = 0; i < codes.length(); i++) { #ifdef DEBUG if (codes[i] == topmost_optimized_code) { - ASSERT(safe_to_deopt_topmost_optimized_code); + DCHECK(safe_to_deopt_topmost_optimized_code); } #endif // It is finally time to die, code object. + + // Remove the code from optimized code map. + DeoptimizationInputData* deopt_data = + DeoptimizationInputData::cast(codes[i]->deoptimization_data()); + SharedFunctionInfo* shared = + SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()); + shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code"); + // Do platform-specific patching to force any activations to lazy deopt. - PatchCodeForDeoptimization(isolate, codes[i]); + // + // We skip patching Turbofan code - we patch return addresses on stack. + // TODO(jarin) We should still zap the code object (but we have to + // be careful not to zap the deoptimization block). + if (!codes[i]->is_turbofanned()) { + PatchCodeForDeoptimization(isolate, codes[i]); + + // We might be in the middle of incremental marking with compaction. + // Tell collector to treat this code object in a special way and + // ignore all slots that might have been recorded on it. + isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]); + } + } +} + - // We might be in the middle of incremental marking with compaction. - // Tell collector to treat this code object in a special way and - // ignore all slots that might have been recorded on it. - isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]); +static int FindPatchAddressForReturnAddress(Code* code, int pc) { + DeoptimizationInputData* input_data = + DeoptimizationInputData::cast(code->deoptimization_data()); + int patch_count = input_data->ReturnAddressPatchCount(); + for (int i = 0; i < patch_count; i++) { + int return_pc = input_data->ReturnAddressPc(i)->value(); + int patch_pc = input_data->PatchedAddressPc(i)->value(); + // If the supplied pc matches the return pc or if the address + // has been already patched, return the patch pc. + if (pc == return_pc || pc == patch_pc) { + return patch_pc; + } + } + return -1; +} + + +// For all marked Turbofanned code on stack, change the return address to go +// to the deoptimization block. +void Deoptimizer::PatchStackForMarkedCode(Isolate* isolate) { + // TODO(jarin) We should tolerate missing patch entry for the topmost frame. + for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done(); + it.Advance()) { + StackFrame::Type type = it.frame()->type(); + if (type == StackFrame::OPTIMIZED) { + Code* code = it.frame()->LookupCode(); + if (code->is_turbofanned() && code->marked_for_deoptimization()) { + JSFunction* function = + static_cast<OptimizedFrame*>(it.frame())->function(); + Address* pc_address = it.frame()->pc_address(); + int pc_offset = + static_cast<int>(*pc_address - code->instruction_start()); + int new_pc_offset = FindPatchAddressForReturnAddress(code, pc_offset); + + if (FLAG_trace_deopt) { + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintF(scope.file(), "[patching stack address for function: "); + function->PrintName(scope.file()); + PrintF(scope.file(), " (Pc offset %i -> %i)]\n", pc_offset, + new_pc_offset); + } + + CHECK_LE(0, new_pc_offset); + *pc_address += new_pc_offset - pc_offset; + } + } } } @@ -487,9 +531,11 @@ reinterpret_cast<intptr_t>(object)); } if (object->IsJSGlobalProxy()) { - Object* proto = object->GetPrototype(); - ASSERT(proto->IsJSGlobalObject()); - Context* native_context = GlobalObject::cast(proto)->native_context(); + PrototypeIterator iter(object->GetIsolate(), object); + // TODO(verwaest): This CHECK will be hit if the global proxy is detached. + CHECK(iter.GetCurrent()->IsJSGlobalObject()); + Context* native_context = + GlobalObject::cast(iter.GetCurrent())->native_context(); MarkAllCodeForContext(native_context); DeoptimizeMarkedCodeForContext(native_context); } else if (object->IsGlobalObject()) { @@ -504,7 +550,7 @@ Object* element = context->OptimizedCodeListHead(); while (!element->IsUndefined()) { Code* code = Code::cast(element); - ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); + CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION); code->set_marked_for_deoptimization(true); element = code->next_code_link(); } @@ -539,7 +585,7 @@ ? FLAG_trace_stub_failures : FLAG_trace_deopt; } - UNREACHABLE(); + FATAL("Unsupported deopt type"); return false; } @@ -551,7 +597,7 @@ case LAZY: return "lazy"; case DEBUGGER: return "debugger"; } - UNREACHABLE(); + FATAL("Unsupported deopt type"); return NULL; } @@ -590,7 +636,7 @@ if (function->IsSmi()) { function = NULL; } - ASSERT(from != NULL); + DCHECK(from != NULL); if (function != NULL && function->IsOptimized()) { function->shared()->increment_deopt_count(); if (bailout_type_ == Deoptimizer::SOFT) { @@ -605,9 +651,9 @@ compiled_code_ = FindOptimizedCode(function, optimized_code); #if DEBUG - ASSERT(compiled_code_ != NULL); + DCHECK(compiled_code_ != NULL); if (type == EAGER || type == SOFT || type == LAZY) { - ASSERT(compiled_code_->kind() != Code::FUNCTION); + DCHECK(compiled_code_->kind() != Code::FUNCTION); } #endif @@ -638,10 +684,10 @@ : compiled_code; } case Deoptimizer::DEBUGGER: - ASSERT(optimized_code->contains(from_)); + DCHECK(optimized_code->contains(from_)); return optimized_code; } - UNREACHABLE(); + FATAL("Could not find code for optimized function"); return NULL; } @@ -657,8 +703,8 @@ Deoptimizer::~Deoptimizer() { - ASSERT(input_ == NULL && output_ == NULL); - ASSERT(disallow_heap_allocation_ == NULL); + DCHECK(input_ == NULL && output_ == NULL); + DCHECK(disallow_heap_allocation_ == NULL); delete trace_scope_; } @@ -684,15 +730,15 @@ int id, BailoutType type, GetEntryMode mode) { - ASSERT(id >= 0); + CHECK_GE(id, 0); if (id >= kMaxNumberOfEntries) return NULL; if (mode == ENSURE_ENTRY_CODE) { EnsureCodeForDeoptimizationEntry(isolate, type, id); } else { - ASSERT(mode == CALCULATE_ENTRY_ADDRESS); + CHECK_EQ(mode, CALCULATE_ENTRY_ADDRESS); } DeoptimizerData* data = isolate->deoptimizer_data(); - ASSERT(type < kBailoutTypesWithCodeEntry); + CHECK_LT(type, kBailoutTypesWithCodeEntry); MemoryChunk* base = data->deopt_entry_code_[type]; return base->area_start() + (id * table_entry_size_); } @@ -709,7 +755,7 @@ addr >= start + (kMaxNumberOfEntries * table_entry_size_)) { return kNotDeoptimizationEntry; } - ASSERT_EQ(0, + DCHECK_EQ(0, static_cast<int>(addr - start) % table_entry_size_); return static_cast<int>(addr - start) / table_entry_size_; } @@ -727,13 +773,10 @@ return data->PcAndState(i)->value(); } } - PrintF(stderr, "[couldn't find pc offset for node=%d]\n", id.ToInt()); - PrintF(stderr, "[method: %s]\n", shared->DebugName()->ToCString().get()); - // Print the source code if available. - HeapStringAllocator string_allocator; - StringStream stream(&string_allocator); - shared->SourceCodePrint(&stream, -1); - PrintF(stderr, "[source:\n%s\n]", stream.ToCString().get()); + OFStream os(stderr); + os << "[couldn't find pc offset for node=" << id.ToInt() << "]\n" + << "[method: " << shared->DebugName()->ToCString().get() << "]\n" + << "[source:\n" << SourceCodeOf(shared) << "\n]" << endl; FATAL("unable to find pc offset during deoptimization"); return -1; @@ -749,7 +792,7 @@ Object* element = native_context->DeoptimizedCodeListHead(); while (!element->IsUndefined()) { Code* code = Code::cast(element); - ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); + DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION); length++; element = code->next_code_link(); } @@ -767,7 +810,7 @@ compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { LOG(isolate(), CodeDeoptEvent(compiled_code_)); } - ElapsedTimer timer; + base::ElapsedTimer timer; // Determine basic deoptimization information. The optimized frame is // described by the input data. @@ -786,7 +829,8 @@ input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_); - if (bailout_type_ == EAGER || bailout_type_ == SOFT) { + if (bailout_type_ == EAGER || bailout_type_ == SOFT || + (compiled_code_->is_hydrogen_stub())) { compiled_code_->PrintDeoptLocation(trace_scope_->file(), bailout_id_); } } @@ -800,13 +844,13 @@ TranslationIterator iterator(translations, translation_index); Translation::Opcode opcode = static_cast<Translation::Opcode>(iterator.Next()); - ASSERT(Translation::BEGIN == opcode); + DCHECK(Translation::BEGIN == opcode); USE(opcode); // Read the number of output frames and allocate an array for their // descriptions. int count = iterator.Next(); iterator.Next(); // Drop JS frames count. - ASSERT(output_ == NULL); + DCHECK(output_ == NULL); output_ = new FrameDescription*[count]; for (int i = 0; i < count; ++i) { output_[i] = NULL; @@ -855,7 +899,7 @@ case Translation::LITERAL: case Translation::ARGUMENTS_OBJECT: default: - UNREACHABLE(); + FATAL("Unsupported translation"); break; } } @@ -894,7 +938,7 @@ } else { int closure_id = iterator->Next(); USE(closure_id); - ASSERT_EQ(Translation::kSelfLiteralId, closure_id); + CHECK_EQ(Translation::kSelfLiteralId, closure_id); function = function_; } unsigned height = iterator->Next(); @@ -919,8 +963,8 @@ bool is_bottommost = (0 == frame_index); bool is_topmost = (output_count_ - 1 == frame_index); - ASSERT(frame_index >= 0 && frame_index < output_count_); - ASSERT(output_[frame_index] == NULL); + CHECK(frame_index >= 0 && frame_index < output_count_); + CHECK_EQ(output_[frame_index], NULL); output_[frame_index] = output_frame; // The top address for the bottommost output frame can be computed from @@ -931,7 +975,10 @@ intptr_t top_address; if (is_bottommost) { // Determine whether the input frame contains alignment padding. - has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0; + has_alignment_padding_ = + (!compiled_code_->is_turbofanned() && HasAlignmentPadding(function)) + ? 1 + : 0; // 2 = context and function in the frame. // If the optimized frame had alignment padding, adjust the frame pointer // to point to the new position of the old frame pointer after padding @@ -991,7 +1038,7 @@ } output_frame->SetCallerFp(output_offset, value); intptr_t fp_value = top_address + output_offset; - ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) + + DCHECK(!is_bottommost || (input_->GetRegister(fp_reg.code()) + has_alignment_padding_ * kPointerSize) == fp_value); output_frame->SetFp(fp_value); if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value); @@ -1001,7 +1048,7 @@ V8PRIxPTR " ; caller's fp\n", fp_value, output_offset, value); } - ASSERT(!is_bottommost || !has_alignment_padding_ || + DCHECK(!is_bottommost || !has_alignment_padding_ || (fp_value & kPointerSize) != 0); if (FLAG_enable_ool_constant_pool) { @@ -1050,7 +1097,7 @@ value = reinterpret_cast<intptr_t>(function); // The function for the bottommost output frame should also agree with the // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); + DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value); output_frame->SetFrameSlot(output_offset, value); if (trace_scope_ != NULL) { PrintF(trace_scope_->file(), @@ -1064,7 +1111,7 @@ output_offset -= kPointerSize; DoTranslateCommand(iterator, frame_index, output_offset); } - ASSERT(0 == output_offset); + CHECK_EQ(0, output_offset); // Compute this frame's PC, state, and continuation. Code* non_optimized_code = function->shared()->code(); @@ -1101,7 +1148,7 @@ } else if (bailout_type_ == SOFT) { continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized); } else { - ASSERT(bailout_type_ == EAGER); + CHECK_EQ(bailout_type_, EAGER); } output_frame->SetContinuation( reinterpret_cast<intptr_t>(continuation->entry())); @@ -1128,8 +1175,8 @@ output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR); // Arguments adaptor can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); + CHECK(frame_index > 0 && frame_index < output_count_ - 1); + CHECK(output_[frame_index] == NULL); output_[frame_index] = output_frame; // The top address of the frame is computed from the previous @@ -1216,7 +1263,7 @@ top_address + output_offset, output_offset, value, height - 1); } - ASSERT(0 == output_offset); + DCHECK(0 == output_offset); Builtins* builtins = isolate_->builtins(); Code* adaptor_trampoline = @@ -1254,8 +1301,8 @@ output_frame->SetFrameType(StackFrame::CONSTRUCT); // Construct stub can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); + DCHECK(frame_index > 0 && frame_index < output_count_ - 1); + DCHECK(output_[frame_index] == NULL); output_[frame_index] = output_frame; // The top address of the frame is computed from the previous @@ -1275,7 +1322,7 @@ // receiver parameter through the translation. It might be encoding // a captured object, patch the slot address for a captured object. if (i == 0 && deferred_objects_.length() > deferred_object_index) { - ASSERT(!deferred_objects_[deferred_object_index].is_arguments()); + CHECK(!deferred_objects_[deferred_object_index].is_arguments()); deferred_objects_[deferred_object_index].patch_slot_address(top_address); } } @@ -1386,7 +1433,7 @@ top_address + output_offset, output_offset, value); } - ASSERT(0 == output_offset); + CHECK_EQ(0, output_offset); intptr_t pc = reinterpret_cast<intptr_t>( construct_stub->instruction_start() + @@ -1432,8 +1479,8 @@ output_frame->SetFrameType(StackFrame::INTERNAL); // A frame for an accessor stub can not be the topmost or bottommost one. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); + CHECK(frame_index > 0 && frame_index < output_count_ - 1); + CHECK_EQ(output_[frame_index], NULL); output_[frame_index] = output_frame; // The top address of the frame is computed from the previous frame's top and @@ -1517,9 +1564,7 @@ } // Skip receiver. - Translation::Opcode opcode = - static_cast<Translation::Opcode>(iterator->Next()); - iterator->Skip(Translation::NumberOfOperandsFor(opcode)); + DoTranslateObjectAndSkip(iterator); if (is_setter_stub_frame) { // The implicit return value was part of the artificial setter stub @@ -1528,7 +1573,7 @@ DoTranslateCommand(iterator, frame_index, output_offset); } - ASSERT(0 == output_offset); + CHECK_EQ(output_offset, 0); Smi* offset = is_setter_stub_frame ? isolate_->heap()->setter_stub_deopt_pc_offset() : @@ -1578,19 +1623,23 @@ // reg = JSFunction context // - ASSERT(compiled_code_->is_crankshafted() && - compiled_code_->kind() != Code::OPTIMIZED_FUNCTION); - int major_key = compiled_code_->major_key(); + CHECK(compiled_code_->is_hydrogen_stub()); + int major_key = CodeStub::GetMajorKey(compiled_code_); CodeStubInterfaceDescriptor* descriptor = isolate_->code_stub_interface_descriptor(major_key); + // Check that there is a matching descriptor to the major key. + // This will fail if there has not been one installed to the isolate. + DCHECK_EQ(descriptor->MajorKey(), major_key); // The output frame must have room for all pushed register parameters // and the standard stack frame slots. Include space for an argument // object to the callee and optionally the space to pass the argument // object to the stub failure handler. - ASSERT(descriptor->register_param_count_ >= 0); - int height_in_bytes = kPointerSize * descriptor->register_param_count_ + - sizeof(Arguments) + kPointerSize; + int param_count = descriptor->GetEnvironmentParameterCount(); + CHECK_GE(param_count, 0); + + int height_in_bytes = kPointerSize * param_count + sizeof(Arguments) + + kPointerSize; int fixed_frame_size = StandardFrameConstants::kFixedFrameSize; int input_frame_size = input_->GetFrameSize(); int output_frame_size = height_in_bytes + fixed_frame_size; @@ -1605,7 +1654,7 @@ FrameDescription* output_frame = new(output_frame_size) FrameDescription(output_frame_size, NULL); output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE); - ASSERT(frame_index == 0); + CHECK_EQ(frame_index, 0); output_[frame_index] = output_frame; // The top address for the output frame can be computed from the input @@ -1663,7 +1712,7 @@ output_frame->SetRegister(context_reg.code(), value); output_frame_offset -= kPointerSize; output_frame->SetFrameSlot(output_frame_offset, value); - ASSERT(reinterpret_cast<Object*>(value)->IsContext()); + CHECK(reinterpret_cast<Object*>(value)->IsContext()); if (trace_scope_ != NULL) { PrintF(trace_scope_->file(), " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" @@ -1684,7 +1733,7 @@ } intptr_t caller_arg_count = 0; - bool arg_count_known = !descriptor->stack_parameter_count_.is_valid(); + bool arg_count_known = !descriptor->stack_parameter_count().is_valid(); // Build the Arguments object for the caller's parameters and a pointer to it. output_frame_offset -= kPointerSize; @@ -1732,19 +1781,20 @@ // Copy the register parameters to the failure frame. int arguments_length_offset = -1; - for (int i = 0; i < descriptor->register_param_count_; ++i) { + for (int i = 0; i < param_count; ++i) { output_frame_offset -= kPointerSize; DoTranslateCommand(iterator, 0, output_frame_offset); - if (!arg_count_known && descriptor->IsParameterCountRegister(i)) { + if (!arg_count_known && + descriptor->IsEnvironmentParameterCountRegister(i)) { arguments_length_offset = output_frame_offset; } } - ASSERT(0 == output_frame_offset); + CHECK_EQ(output_frame_offset, 0); if (!arg_count_known) { - ASSERT(arguments_length_offset >= 0); + CHECK_GE(arguments_length_offset, 0); // We know it's a smi because 1) the code stub guarantees the stack // parameter count is in smi range, and 2) the DoTranslateCommand in the // parameter loop above translated that to a tagged value. @@ -1779,10 +1829,10 @@ // Compute this frame's PC, state, and continuation. Code* trampoline = NULL; - StubFunctionMode function_mode = descriptor->function_mode_; - StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline, - isolate_); - ASSERT(trampoline != NULL); + StubFunctionMode function_mode = descriptor->function_mode(); + StubFailureTrampolineStub(isolate_, + function_mode).FindCodeInCache(&trampoline); + DCHECK(trampoline != NULL); output_frame->SetPc(reinterpret_cast<intptr_t>( trampoline->instruction_start())); if (FLAG_enable_ool_constant_pool) { @@ -1794,7 +1844,8 @@ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value); } output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS)); - Code* notify_failure = NotifyStubFailureBuiltin(); + Code* notify_failure = + isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); output_frame->SetContinuation( reinterpret_cast<intptr_t>(notify_failure->entry())); } @@ -1828,7 +1879,7 @@ Handle<JSObject> arguments = isolate_->factory()->NewArgumentsObject(function, length); Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length); - ASSERT(array->length() == length); + DCHECK_EQ(array->length(), length); arguments->set_elements(*array); materialized_objects_->Add(arguments); for (int i = 0; i < length; ++i) { @@ -1840,11 +1891,13 @@ // We also need to make sure that the representation of all fields // in the given object are general enough to hold a tagged value. Handle<Map> map = Map::GeneralizeAllFieldRepresentations( - Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged()); + Handle<Map>::cast(MaterializeNextValue())); switch (map->instance_type()) { + case MUTABLE_HEAP_NUMBER_TYPE: case HEAP_NUMBER_TYPE: { // Reuse the HeapNumber value directly as it is already properly - // tagged and skip materializing the HeapNumber explicitly. + // tagged and skip materializing the HeapNumber explicitly. Turn mutable + // heap numbers immutable. Handle<Object> object = MaterializeNextValue(); if (object_index < prev_materialized_count_) { materialized_objects_->Add(Handle<Object>( @@ -1870,7 +1923,8 @@ object->set_elements(FixedArrayBase::cast(*elements)); for (int i = 0; i < length - 3; ++i) { Handle<Object> value = MaterializeNextValue(); - object->FastPropertyAtPut(i, *value); + FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i); + object->FastPropertyAtPut(index, *value); } break; } @@ -1894,7 +1948,7 @@ default: PrintF(stderr, "[couldn't handle instance type %d]\n", map->instance_type()); - UNREACHABLE(); + FATAL("Unsupported instance type"); } } @@ -1905,6 +1959,9 @@ Handle<Object> Deoptimizer::MaterializeNextValue() { int value_index = materialization_value_index_++; Handle<Object> value = materialized_values_->at(value_index); + if (value->IsMutableHeapNumber()) { + HeapNumber::cast(*value)->set_map(isolate_->heap()->heap_number_map()); + } if (*value == isolate_->heap()->arguments_marker()) { value = MaterializeNextHeapObject(); } @@ -1913,7 +1970,7 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) { - ASSERT_NE(DEBUGGER, bailout_type_); + DCHECK_NE(DEBUGGER, bailout_type_); MaterializedObjectStore* materialized_store = isolate_->materialized_object_store(); @@ -1966,7 +2023,7 @@ d.value(), d.destination()); } - ASSERT(values.at(d.destination())->IsTheHole()); + DCHECK(values.at(d.destination())->IsTheHole()); values.Set(d.destination(), num); } @@ -1988,7 +2045,9 @@ // materialize a new instance of the object if necessary. Store // the materialized object into the frame slot. Handle<Object> object = MaterializeNextHeapObject(); - Memory::Object_at(descriptor.slot_address()) = *object; + if (descriptor.slot_address() != NULL) { + Memory::Object_at(descriptor.slot_address()) = *object; + } if (trace_scope_ != NULL) { if (descriptor.is_arguments()) { PrintF(trace_scope_->file(), @@ -2007,8 +2066,8 @@ } } - ASSERT(materialization_object_index_ == materialized_objects_->length()); - ASSERT(materialization_value_index_ == materialized_values_->length()); + CHECK_EQ(materialization_object_index_, materialized_objects_->length()); + CHECK_EQ(materialization_value_index_, materialized_values_->length()); } if (prev_materialized_count_ > 0) { @@ -2017,14 +2076,13 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame( Address parameters_top, uint32_t parameters_size, Address expressions_top, uint32_t expressions_size, DeoptimizedFrameInfo* info) { - ASSERT_EQ(DEBUGGER, bailout_type_); + CHECK_EQ(DEBUGGER, bailout_type_); Address parameters_bottom = parameters_top + parameters_size; Address expressions_bottom = expressions_top + expressions_size; for (int i = 0; i < deferred_heap_numbers_.length(); i++) { @@ -2070,7 +2128,6 @@ } } } -#endif static const char* TraceValueType(bool is_smi) { @@ -2082,6 +2139,73 @@ } +void Deoptimizer::DoTranslateObjectAndSkip(TranslationIterator* iterator) { + Translation::Opcode opcode = + static_cast<Translation::Opcode>(iterator->Next()); + + switch (opcode) { + case Translation::BEGIN: + case Translation::JS_FRAME: + case Translation::ARGUMENTS_ADAPTOR_FRAME: + case Translation::CONSTRUCT_STUB_FRAME: + case Translation::GETTER_STUB_FRAME: + case Translation::SETTER_STUB_FRAME: + case Translation::COMPILED_STUB_FRAME: { + FATAL("Unexpected frame start translation opcode"); + return; + } + + case Translation::REGISTER: + case Translation::INT32_REGISTER: + case Translation::UINT32_REGISTER: + case Translation::DOUBLE_REGISTER: + case Translation::STACK_SLOT: + case Translation::INT32_STACK_SLOT: + case Translation::UINT32_STACK_SLOT: + case Translation::DOUBLE_STACK_SLOT: + case Translation::LITERAL: { + // The value is not part of any materialized object, so we can ignore it. + iterator->Skip(Translation::NumberOfOperandsFor(opcode)); + return; + } + + case Translation::DUPLICATED_OBJECT: { + int object_index = iterator->Next(); + if (trace_scope_ != NULL) { + PrintF(trace_scope_->file(), " skipping object "); + PrintF(trace_scope_->file(), + " ; duplicate of object #%d\n", object_index); + } + AddObjectDuplication(0, object_index); + return; + } + + case Translation::ARGUMENTS_OBJECT: + case Translation::CAPTURED_OBJECT: { + int length = iterator->Next(); + bool is_args = opcode == Translation::ARGUMENTS_OBJECT; + if (trace_scope_ != NULL) { + PrintF(trace_scope_->file(), " skipping object "); + PrintF(trace_scope_->file(), + " ; object (length = %d, is_args = %d)\n", length, is_args); + } + + AddObjectStart(0, length, is_args); + + // We save the object values on the side and materialize the actual + // object after the deoptimized frame is built. + int object_index = deferred_objects_.length() - 1; + for (int i = 0; i < length; i++) { + DoTranslateObject(iterator, object_index, i); + } + return; + } + } + + FATAL("Unexpected translation opcode"); +} + + void Deoptimizer::DoTranslateObject(TranslationIterator* iterator, int object_index, int field_index) { @@ -2099,7 +2223,7 @@ case Translation::GETTER_STUB_FRAME: case Translation::SETTER_STUB_FRAME: case Translation::COMPILED_STUB_FRAME: - UNREACHABLE(); + FATAL("Unexpected frame start translation opcode"); return; case Translation::REGISTER: { @@ -2339,6 +2463,8 @@ return; } } + + FATAL("Unexpected translation opcode"); } @@ -2360,7 +2486,7 @@ case Translation::GETTER_STUB_FRAME: case Translation::SETTER_STUB_FRAME: case Translation::COMPILED_STUB_FRAME: - UNREACHABLE(); + FATAL("Unexpected translation opcode"); return; case Translation::REGISTER: { @@ -2632,13 +2758,11 @@ // function into account so we have to avoid double counting them. unsigned result = fixed_size + fp_to_sp_delta_ - StandardFrameConstants::kFixedFrameSizeFromFp; -#ifdef DEBUG if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { unsigned stack_slots = compiled_code_->stack_slots(); unsigned outgoing_size = ComputeOutgoingArgumentSize(); - ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size); + CHECK(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size); } -#endif return result; } @@ -2655,7 +2779,7 @@ // The incoming arguments is the values for formal parameters and // the receiver. Every slot contains a pointer. if (function->IsSmi()) { - ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB)); + CHECK_EQ(Smi::cast(function), Smi::FromInt(StackFrame::STUB)); return 0; } unsigned arguments = function->shared()->formal_parameter_count() + 1; @@ -2720,28 +2844,28 @@ // cause us to emit relocation information for the external // references. This is fine because the deoptimizer's code section // isn't meant to be serialized at all. - ASSERT(type == EAGER || type == SOFT || type == LAZY); + CHECK(type == EAGER || type == SOFT || type == LAZY); DeoptimizerData* data = isolate->deoptimizer_data(); int entry_count = data->deopt_entry_code_entries_[type]; if (max_entry_id < entry_count) return; entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries); while (max_entry_id >= entry_count) entry_count *= 2; - ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries); + CHECK(entry_count <= Deoptimizer::kMaxNumberOfEntries); MacroAssembler masm(isolate, NULL, 16 * KB); masm.set_emit_debug_code(false); GenerateDeoptimizationEntries(&masm, entry_count, type); CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); MemoryChunk* chunk = data->deopt_entry_code_[type]; - ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >= - desc.instr_size); + CHECK(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >= + desc.instr_size); chunk->CommitArea(desc.instr_size); CopyBytes(chunk->area_start(), desc.buffer, static_cast<size_t>(desc.instr_size)); - CPU::FlushICache(chunk->area_start(), desc.instr_size); + CpuFeatures::FlushICache(chunk->area_start(), desc.instr_size); data->deopt_entry_code_entries_[type] = entry_count; } @@ -2804,15 +2928,15 @@ case StackFrame::STUB: return -1; // Minus receiver. default: - UNREACHABLE(); + FATAL("Unexpected stack frame type"); return 0; } } Object* FrameDescription::GetParameter(int index) { - ASSERT(index >= 0); - ASSERT(index < ComputeParametersCount()); + CHECK_GE(index, 0); + CHECK_LT(index, ComputeParametersCount()); // The slot indexes for incoming arguments are negative. unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount()); return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset)); @@ -2820,14 +2944,14 @@ unsigned FrameDescription::GetExpressionCount() { - ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_); + CHECK_EQ(StackFrame::JAVA_SCRIPT, type_); unsigned size = GetFrameSize() - ComputeFixedSize(); return size / kPointerSize; } Object* FrameDescription::GetExpression(int index) { - ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_); + DCHECK_EQ(StackFrame::JAVA_SCRIPT, type_); unsigned offset = GetOffsetFromSlotIndex(index); return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset)); } @@ -2853,7 +2977,7 @@ // bit of zero (marks the end). uint32_t bits = 0; for (int i = 0; true; i += 7) { - ASSERT(HasNext()); + DCHECK(HasNext()); uint8_t next = buffer_->get(index_++); bits |= (next >> 1) << i; if ((next & 1) == 0) break; @@ -2868,8 +2992,7 @@ Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) { int length = contents_.length(); Handle<ByteArray> result = factory->NewByteArray(length, TENURED); - OS::MemCopy( - result->GetDataStartAddress(), contents_.ToVector().start(), length); + MemCopy(result->GetDataStartAddress(), contents_.ToVector().start(), length); return result; } @@ -3022,7 +3145,7 @@ case JS_FRAME: return 3; } - UNREACHABLE(); + FATAL("Unexpected translation type"); return -1; } @@ -3132,14 +3255,13 @@ TranslationIterator it(data->TranslationByteArray(), data->TranslationIndex(deopt_index)->value()); Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next()); - ASSERT(opcode == Translation::BEGIN); + CHECK_EQ(opcode, Translation::BEGIN); it.Next(); // Drop frame count. stack_frame_id_ = frame->fp(); int jsframe_count = it.Next(); - USE(jsframe_count); - ASSERT(jsframe_count > inlined_jsframe_index); + CHECK_GT(jsframe_count, inlined_jsframe_index); int jsframes_to_skip = inlined_jsframe_index; int number_of_slots = -1; // Number of slots inside our frame (yet unknown) bool should_deopt = false; @@ -3148,7 +3270,7 @@ bool processed = false; if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) { if (jsframes_to_skip == 0) { - ASSERT(Translation::NumberOfOperandsFor(opcode) == 2); + CHECK_EQ(Translation::NumberOfOperandsFor(opcode), 2); it.Skip(1); // literal id int height = it.Next(); @@ -3195,7 +3317,7 @@ // the nested slots of captured objects number_of_slots--; SlotRef& slot = slot_refs_.last(); - ASSERT(slot.Representation() != SlotRef::ARGUMENTS_OBJECT); + CHECK_NE(slot.Representation(), SlotRef::ARGUMENTS_OBJECT); number_of_slots += slot.GetChildrenCount(); if (slot.Representation() == SlotRef::DEFERRED_OBJECT || slot.Representation() == SlotRef::DUPLICATE_OBJECT) { @@ -3224,7 +3346,11 @@ return Handle<Object>(Memory::Object_at(addr_), isolate); case INT32: { +#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT + int value = Memory::int32_at(addr_ + kIntSize); +#else int value = Memory::int32_at(addr_); +#endif if (Smi::IsValid(value)) { return Handle<Object>(Smi::FromInt(value), isolate); } else { @@ -3233,7 +3359,11 @@ } case UINT32: { +#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT + uint32_t value = Memory::uint32_at(addr_ + kIntSize); +#else uint32_t value = Memory::uint32_at(addr_); +#endif if (value <= static_cast<uint32_t>(Smi::kMaxValue)) { return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate); } else { @@ -3269,7 +3399,7 @@ while (current_slot_ < first_slot_index_) { GetNext(isolate, 0); } - ASSERT(current_slot_ == first_slot_index_); + CHECK_EQ(current_slot_, first_slot_index_); } @@ -3331,8 +3461,8 @@ } case SlotRef::DEFERRED_OBJECT: { int length = slot.GetChildrenCount(); - ASSERT(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL || - slot_refs_[current_slot_].Representation() == SlotRef::TAGGED); + CHECK(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL || + slot_refs_[current_slot_].Representation() == SlotRef::TAGGED); int object_index = materialized_objects_.length(); if (object_index < prev_materialized_count_) { @@ -3341,11 +3471,12 @@ Handle<Object> map_object = slot_refs_[current_slot_].GetValue(isolate); Handle<Map> map = Map::GeneralizeAllFieldRepresentations( - Handle<Map>::cast(map_object), Representation::Tagged()); + Handle<Map>::cast(map_object)); current_slot_++; // TODO(jarin) this should be unified with the code in // Deoptimizer::MaterializeNextHeapObject() switch (map->instance_type()) { + case MUTABLE_HEAP_NUMBER_TYPE: case HEAP_NUMBER_TYPE: { // Reuse the HeapNumber value directly as it is already properly // tagged and skip materializing the HeapNumber explicitly. @@ -3370,7 +3501,8 @@ object->set_elements(FixedArrayBase::cast(*elements)); for (int i = 0; i < length - 3; ++i) { Handle<Object> value = GetNext(isolate, lvl + 1); - object->FastPropertyAtPut(i, *value); + FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i); + object->FastPropertyAtPut(index, *value); } return object; } @@ -3393,6 +3525,7 @@ break; } UNREACHABLE(); + break; } case SlotRef::DUPLICATE_OBJECT: { @@ -3413,7 +3546,7 @@ void SlotRefValueBuilder::Finish(Isolate* isolate) { // We should have processed all the slots - ASSERT(slot_refs_.length() == current_slot_); + CHECK_EQ(slot_refs_.length(), current_slot_); if (materialized_objects_.length() > prev_materialized_count_) { // We have materialized some new objects, so we have to store them @@ -3434,7 +3567,7 @@ return Handle<FixedArray>::null(); } Handle<FixedArray> array = GetStackEntries(); - ASSERT(array->length() > index); + CHECK_GT(array->length(), index); return Handle<FixedArray>::cast(Handle<Object>(array->get(index), isolate())); } @@ -3455,11 +3588,11 @@ void MaterializedObjectStore::Remove(Address fp) { int index = StackIdToIndex(fp); - ASSERT(index >= 0); + CHECK_GE(index, 0); frame_fps_.Remove(index); Handle<FixedArray> array = GetStackEntries(); - ASSERT(array->length() > index); + CHECK_LT(index, array->length()); for (int i = index; i < frame_fps_.length(); i++) { array->set(i, array->get(i + 1)); } @@ -3505,7 +3638,6 @@ return new_array; } -#ifdef ENABLE_DEBUGGER_SUPPORT DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer, int frame_index, @@ -3527,7 +3659,7 @@ if (has_arguments_adaptor) { output_frame = deoptimizer->output_[frame_index - 1]; - ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR); + CHECK_EQ(output_frame->GetFrameType(), StackFrame::ARGUMENTS_ADAPTOR); } parameters_count_ = output_frame->ComputeParametersCount(); @@ -3550,6 +3682,4 @@ v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); } -#endif // ENABLE_DEBUGGER_SUPPORT - } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/deoptimizer.h nodejs-0.11.15/deps/v8/src/deoptimizer.h --- nodejs-0.11.13/deps/v8/src/deoptimizer.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/deoptimizer.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DEOPTIMIZER_H_ #define V8_DEOPTIMIZER_H_ -#include "v8.h" +#include "src/v8.h" -#include "allocation.h" -#include "macro-assembler.h" -#include "zone-inl.h" +#include "src/allocation.h" +#include "src/macro-assembler.h" +#include "src/zone-inl.h" namespace v8 { @@ -169,7 +146,6 @@ Isolate* isolate); static Deoptimizer* Grab(Isolate* isolate); -#ifdef ENABLE_DEBUGGER_SUPPORT // The returned object with information on the optimized frame needs to be // freed before another one can be generated. static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame, @@ -177,7 +153,6 @@ Isolate* isolate); static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info, Isolate* isolate); -#endif // Makes sure that there is enough room in the relocation // information of a code object to perform lazy deoptimization @@ -202,6 +177,8 @@ // refer to that code. static void DeoptimizeMarkedCode(Isolate* isolate); + static void PatchStackForMarkedCode(Isolate* isolate); + // Visit all the known optimized functions in a given isolate. static void VisitAllOptimizedFunctions( Isolate* isolate, OptimizedFunctionVisitor* visitor); @@ -212,14 +189,13 @@ ~Deoptimizer(); void MaterializeHeapObjects(JavaScriptFrameIterator* it); -#ifdef ENABLE_DEBUGGER_SUPPORT + void MaterializeHeapNumbersForDebuggerInspectableFrame( Address parameters_top, uint32_t parameters_size, Address expressions_top, uint32_t expressions_size, DeoptimizedFrameInfo* info); -#endif static void ComputeOutputFrames(Deoptimizer* deoptimizer); @@ -329,14 +305,21 @@ void DoComputeCompiledStubFrame(TranslationIterator* iterator, int frame_index); + // Translate object, store the result into an auxiliary array + // (deferred_objects_tagged_values_). void DoTranslateObject(TranslationIterator* iterator, int object_index, int field_index); + // Translate value, store the result into the given frame slot. void DoTranslateCommand(TranslationIterator* iterator, int frame_index, unsigned output_offset); + // Translate object, do not store the result anywhere (but do update + // the deferred materialization array). + void DoTranslateObjectAndSkip(TranslationIterator* iterator); + unsigned ComputeInputFrameSize() const; unsigned ComputeFixedSize(JSFunction* function) const; @@ -406,10 +389,6 @@ // at the dynamic alignment state slot inside the frame. bool HasAlignmentPadding(JSFunction* function); - // Select the version of NotifyStubFailure builtin that either saves or - // doesn't save the double registers depending on CPU features. - Code* NotifyStubFailureBuiltin(); - Isolate* isolate_; JSFunction* function_; Code* compiled_code_; @@ -483,7 +462,7 @@ } uint32_t GetFrameSize() const { - ASSERT(static_cast<uint32_t>(frame_size_) == frame_size_); + DCHECK(static_cast<uint32_t>(frame_size_) == frame_size_); return static_cast<uint32_t>(frame_size_); } @@ -512,11 +491,11 @@ intptr_t GetRegister(unsigned n) const { #if DEBUG - // This convoluted ASSERT is needed to work around a gcc problem that + // This convoluted DCHECK is needed to work around a gcc problem that // improperly detects an array bounds overflow in optimized debug builds - // when using a plain ASSERT. + // when using a plain DCHECK. if (n >= ARRAY_SIZE(registers_)) { - ASSERT(false); + DCHECK(false); return 0; } #endif @@ -524,17 +503,17 @@ } double GetDoubleRegister(unsigned n) const { - ASSERT(n < ARRAY_SIZE(double_registers_)); + DCHECK(n < ARRAY_SIZE(double_registers_)); return double_registers_[n]; } void SetRegister(unsigned n, intptr_t value) { - ASSERT(n < ARRAY_SIZE(registers_)); + DCHECK(n < ARRAY_SIZE(registers_)); registers_[n] = value; } void SetDoubleRegister(unsigned n, double value) { - ASSERT(n < ARRAY_SIZE(double_registers_)); + DCHECK(n < ARRAY_SIZE(double_registers_)); double_registers_[n] = value; } @@ -630,7 +609,7 @@ intptr_t frame_content_[1]; intptr_t* GetFrameSlotPointer(unsigned offset) { - ASSERT(offset < frame_size_); + DCHECK(offset < frame_size_); return reinterpret_cast<intptr_t*>( reinterpret_cast<Address>(this) + frame_content_offset() + offset); } @@ -644,18 +623,14 @@ explicit DeoptimizerData(MemoryAllocator* allocator); ~DeoptimizerData(); -#ifdef ENABLE_DEBUGGER_SUPPORT void Iterate(ObjectVisitor* v); -#endif private: MemoryAllocator* allocator_; int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry]; MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry]; -#ifdef ENABLE_DEBUGGER_SUPPORT DeoptimizedFrameInfo* deoptimized_frame_info_; -#endif Deoptimizer* current_; @@ -683,7 +658,7 @@ public: TranslationIterator(ByteArray* buffer, int index) : buffer_(buffer), index_(index) { - ASSERT(index >= 0 && index < buffer->length()); + DCHECK(index >= 0 && index < buffer->length()); } int32_t Next(); @@ -919,7 +894,6 @@ }; -#ifdef ENABLE_DEBUGGER_SUPPORT // Class used to represent an unoptimized frame when the debugger // needs to inspect a frame that is part of an optimized frame. The // internally used FrameDescription objects are not GC safe so for use @@ -956,13 +930,13 @@ // Get an incoming argument. Object* GetParameter(int index) { - ASSERT(0 <= index && index < parameters_count()); + DCHECK(0 <= index && index < parameters_count()); return parameters_[index]; } // Get an expression from the expression stack. Object* GetExpression(int index) { - ASSERT(0 <= index && index < expression_count()); + DCHECK(0 <= index && index < expression_count()); return expression_stack_[index]; } @@ -973,13 +947,13 @@ private: // Set an incoming argument. void SetParameter(int index, Object* obj) { - ASSERT(0 <= index && index < parameters_count()); + DCHECK(0 <= index && index < parameters_count()); parameters_[index] = obj; } // Set an expression on the expression stack. void SetExpression(int index, Object* obj) { - ASSERT(0 <= index && index < expression_count()); + DCHECK(0 <= index && index < expression_count()); expression_stack_[index] = obj; } @@ -993,7 +967,6 @@ friend class Deoptimizer; }; -#endif } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/DEPS nodejs-0.11.15/deps/v8/src/DEPS --- nodejs-0.11.13/deps/v8/src/DEPS 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/DEPS 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +include_rules = [ + "+src", + "-src/compiler", + "+src/compiler/pipeline.h", + "-src/libplatform", + "-include/libplatform", +] + +specific_include_rules = { + "(mksnapshot|d8)\.cc": [ + "+include/libplatform/libplatform.h", + ], +} diff -Nru nodejs-0.11.13/deps/v8/src/disasm.h nodejs-0.11.15/deps/v8/src/disasm.h --- nodejs-0.11.13/deps/v8/src/disasm.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/disasm.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2007-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DISASM_H_ #define V8_DISASM_H_ diff -Nru nodejs-0.11.13/deps/v8/src/disassembler.cc nodejs-0.11.15/deps/v8/src/disassembler.cc --- nodejs-0.11.13/deps/v8/src/disassembler.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/disassembler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "code-stubs.h" -#include "codegen.h" -#include "debug.h" -#include "deoptimizer.h" -#include "disasm.h" -#include "disassembler.h" -#include "macro-assembler.h" -#include "serialize.h" -#include "string-stream.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/disasm.h" +#include "src/disassembler.h" +#include "src/macro-assembler.h" +#include "src/serialize.h" +#include "src/string-stream.h" namespace v8 { namespace internal { @@ -73,7 +50,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const { const char* name = code_->GetIsolate()->builtins()->Lookup(pc); if (name != NULL) { - OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc); + SNPrintF(v8_buffer_, "%s (%p)", name, pc); return v8_buffer_.start(); } @@ -81,7 +58,7 @@ int offs = static_cast<int>(pc - code_->instruction_start()); // print as code offset, if it seems reasonable if (0 <= offs && offs < code_->instruction_size()) { - OS::SNPrintF(v8_buffer_, "%d (%p)", offs, pc); + SNPrintF(v8_buffer_, "%d (%p)", offs, pc); return v8_buffer_.start(); } } @@ -118,7 +95,6 @@ SealHandleScope shs(isolate); DisallowHeapAllocation no_alloc; ExternalReferenceEncoder ref_encoder(isolate); - Heap* heap = isolate->heap(); v8::internal::EmbeddedVector<char, 128> decode_buffer; v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer; @@ -137,27 +113,27 @@ // First decode instruction so that we know its length. byte* prev_pc = pc; if (constants > 0) { - OS::SNPrintF(decode_buffer, - "%08x constant", - *reinterpret_cast<int32_t*>(pc)); + SNPrintF(decode_buffer, + "%08x constant", + *reinterpret_cast<int32_t*>(pc)); constants--; pc += 4; } else { int num_const = d.ConstantPoolSizeAt(pc); if (num_const >= 0) { - OS::SNPrintF(decode_buffer, - "%08x constant pool begin", - *reinterpret_cast<int32_t*>(pc)); + SNPrintF(decode_buffer, + "%08x constant pool begin", + *reinterpret_cast<int32_t*>(pc)); constants = num_const; pc += 4; } else if (it != NULL && !it->done() && it->rinfo()->pc() == pc && it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) { // raw pointer embedded in code stream, e.g., jump table byte* ptr = *reinterpret_cast<byte**>(pc); - OS::SNPrintF(decode_buffer, - "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR, - ptr, - ptr - begin); + SNPrintF(decode_buffer, + "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR, + reinterpret_cast<intptr_t>(ptr), + ptr - begin); pc += 4; } else { decode_buffer[0] = '\0'; @@ -249,29 +225,21 @@ out.AddFormatted(", %s", Code::StubType2String(type)); } } else if (kind == Code::STUB || kind == Code::HANDLER) { - // Reverse lookup required as the minor key cannot be retrieved - // from the code object. - Object* obj = heap->code_stubs()->SlowReverseLookup(code); - if (obj != heap->undefined_value()) { - ASSERT(obj->IsSmi()); - // Get the STUB key and extract major and minor key. - uint32_t key = Smi::cast(obj)->value(); - uint32_t minor_key = CodeStub::MinorKeyFromKey(key); - CodeStub::Major major_key = CodeStub::GetMajorKey(code); - ASSERT(major_key == CodeStub::MajorKeyFromKey(key)); - out.AddFormatted(" %s, %s, ", - Code::Kind2String(kind), - CodeStub::MajorName(major_key, false)); - switch (major_key) { - case CodeStub::CallFunction: { - int argc = - CallFunctionStub::ExtractArgcFromMinorKey(minor_key); - out.AddFormatted("argc = %d", argc); - break; - } - default: - out.AddFormatted("minor: %d", minor_key); + // Get the STUB key and extract major and minor key. + uint32_t key = code->stub_key(); + uint32_t minor_key = CodeStub::MinorKeyFromKey(key); + CodeStub::Major major_key = CodeStub::GetMajorKey(code); + DCHECK(major_key == CodeStub::MajorKeyFromKey(key)); + out.AddFormatted(" %s, %s, ", Code::Kind2String(kind), + CodeStub::MajorName(major_key, false)); + switch (major_key) { + case CodeStub::CallFunction: { + int argc = CallFunctionStub::ExtractArgcFromMinorKey(minor_key); + out.AddFormatted("argc = %d", argc); + break; } + default: + out.AddFormatted("minor: %d", minor_key); } } else { out.AddFormatted(" %s", Code::Kind2String(kind)); diff -Nru nodejs-0.11.13/deps/v8/src/disassembler.h nodejs-0.11.15/deps/v8/src/disassembler.h --- nodejs-0.11.13/deps/v8/src/disassembler.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/disassembler.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DISASSEMBLER_H_ #define V8_DISASSEMBLER_H_ -#include "allocation.h" +#include "src/allocation.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/diy-fp.cc nodejs-0.11.15/deps/v8/src/diy-fp.cc --- nodejs-0.11.13/deps/v8/src/diy-fp.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/diy-fp.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "../include/v8stdint.h" -#include "globals.h" -#include "checks.h" -#include "diy-fp.h" +#include "include/v8stdint.h" +#include "src/base/logging.h" +#include "src/diy-fp.h" +#include "src/globals.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/diy-fp.h nodejs-0.11.15/deps/v8/src/diy-fp.h --- nodejs-0.11.13/deps/v8/src/diy-fp.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/diy-fp.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DIY_FP_H_ #define V8_DIY_FP_H_ @@ -48,8 +25,8 @@ // must be bigger than the significand of other. // The result will not be normalized. void Subtract(const DiyFp& other) { - ASSERT(e_ == other.e_); - ASSERT(f_ >= other.f_); + DCHECK(e_ == other.e_); + DCHECK(f_ >= other.f_); f_ -= other.f_; } @@ -74,7 +51,7 @@ } void Normalize() { - ASSERT(f_ != 0); + DCHECK(f_ != 0); uint64_t f = f_; int e = e_; diff -Nru nodejs-0.11.13/deps/v8/src/double.h nodejs-0.11.15/deps/v8/src/double.h --- nodejs-0.11.13/deps/v8/src/double.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/double.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DOUBLE_H_ #define V8_DOUBLE_H_ -#include "diy-fp.h" +#include "src/diy-fp.h" namespace v8 { namespace internal { @@ -57,14 +34,14 @@ // The value encoded by this Double must be greater or equal to +0.0. // It must not be special (infinity, or NaN). DiyFp AsDiyFp() const { - ASSERT(Sign() > 0); - ASSERT(!IsSpecial()); + DCHECK(Sign() > 0); + DCHECK(!IsSpecial()); return DiyFp(Significand(), Exponent()); } // The value encoded by this Double must be strictly greater than 0. DiyFp AsNormalizedDiyFp() const { - ASSERT(value() > 0.0); + DCHECK(value() > 0.0); uint64_t f = Significand(); int e = Exponent(); @@ -144,7 +121,7 @@ // Precondition: the value encoded by this Double must be greater or equal // than +0.0. DiyFp UpperBoundary() const { - ASSERT(Sign() > 0); + DCHECK(Sign() > 0); return DiyFp(Significand() * 2 + 1, Exponent() - 1); } @@ -153,7 +130,7 @@ // exponent as m_plus. // Precondition: the value encoded by this Double must be greater than 0. void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const { - ASSERT(value() > 0.0); + DCHECK(value() > 0.0); DiyFp v = this->AsDiyFp(); bool significand_is_zero = (v.f() == kHiddenBit); DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1)); diff -Nru nodejs-0.11.13/deps/v8/src/dtoa.cc nodejs-0.11.15/deps/v8/src/dtoa.cc --- nodejs-0.11.13/deps/v8/src/dtoa.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/dtoa.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <cmath> -#include "../include/v8stdint.h" -#include "checks.h" -#include "utils.h" - -#include "dtoa.h" - -#include "bignum-dtoa.h" -#include "double.h" -#include "fast-dtoa.h" -#include "fixed-dtoa.h" +#include "include/v8stdint.h" +#include "src/base/logging.h" +#include "src/utils.h" + +#include "src/dtoa.h" + +#include "src/bignum-dtoa.h" +#include "src/double.h" +#include "src/fast-dtoa.h" +#include "src/fixed-dtoa.h" namespace v8 { namespace internal { @@ -55,8 +32,8 @@ void DoubleToAscii(double v, DtoaMode mode, int requested_digits, Vector<char> buffer, int* sign, int* length, int* point) { - ASSERT(!Double(v).IsSpecial()); - ASSERT(mode == DTOA_SHORTEST || requested_digits >= 0); + DCHECK(!Double(v).IsSpecial()); + DCHECK(mode == DTOA_SHORTEST || requested_digits >= 0); if (Double(v).Sign() < 0) { *sign = 1; diff -Nru nodejs-0.11.13/deps/v8/src/dtoa.h nodejs-0.11.15/deps/v8/src/dtoa.h --- nodejs-0.11.13/deps/v8/src/dtoa.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/dtoa.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_DTOA_H_ #define V8_DTOA_H_ diff -Nru nodejs-0.11.13/deps/v8/src/effects.h nodejs-0.11.15/deps/v8/src/effects.h --- nodejs-0.11.13/deps/v8/src/effects.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/effects.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_EFFECTS_H_ #define V8_EFFECTS_H_ -#include "v8.h" +#include "src/v8.h" -#include "types.h" +#include "src/types.h" namespace v8 { namespace internal { @@ -56,7 +33,7 @@ Bounds bounds; Effect() : modality(DEFINITE) {} - Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {} + explicit Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {} // The unknown effect. static Effect Unknown(Zone* zone) { @@ -218,15 +195,15 @@ typedef typename Mapping::Locator Locator; bool Contains(Var var) { - ASSERT(var != kNoVar); + DCHECK(var != kNoVar); return map_->Contains(var); } bool Find(Var var, Locator* locator) { - ASSERT(var != kNoVar); + DCHECK(var != kNoVar); return map_->Find(var, locator); } bool Insert(Var var, Locator* locator) { - ASSERT(var != kNoVar); + DCHECK(var != kNoVar); return map_->Insert(var, locator); } @@ -282,7 +259,7 @@ bool is_empty() { return node_ == NULL; } bool Contains(Var var) { - ASSERT(var != kNoVar); + DCHECK(var != kNoVar); for (Node* node = node_; node != NULL; node = node->previous) { if (node->effects.Contains(var)) return true; } @@ -290,7 +267,7 @@ } bool Find(Var var, Locator* locator) { - ASSERT(var != kNoVar); + DCHECK(var != kNoVar); for (Node* node = node_; node != NULL; node = node->previous) { if (node->effects.Find(var, locator)) return true; } @@ -316,7 +293,7 @@ template<class Var, Var kNoVar> bool NestedEffectsBase<Var, kNoVar>::Insert(Var var, Locator* locator) { - ASSERT(var != kNoVar); + DCHECK(var != kNoVar); if (!node_->effects.Insert(var, locator)) return false; Locator shadowed; for (Node* node = node_->previous; node != NULL; node = node->previous) { @@ -349,7 +326,7 @@ NestedEffects Pop() { NestedEffects result = *this; result.pop(); - ASSERT(!this->is_empty()); + DCHECK(!this->is_empty()); return result; } }; diff -Nru nodejs-0.11.13/deps/v8/src/elements.cc nodejs-0.11.15/deps/v8/src/elements.cc --- nodejs-0.11.13/deps/v8/src/elements.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/elements.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "arguments.h" -#include "objects.h" -#include "elements.h" -#include "utils.h" -#include "v8conversions.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/arguments.h" +#include "src/conversions.h" +#include "src/elements.h" +#include "src/objects.h" +#include "src/utils.h" // Each concrete ElementsAccessor can handle exactly one ElementsKind, // several abstract ElementsAccessor classes are used to allow sharing @@ -135,7 +112,7 @@ #define ELEMENTS_TRAITS(Class, KindParam, Store) \ template<> class ElementsKindTraits<KindParam> { \ - public: \ + public: /* NOLINT */ \ static const ElementsKind Kind = KindParam; \ typedef Store BackingStore; \ }; @@ -146,7 +123,9 @@ ElementsAccessor** ElementsAccessor::elements_accessors_; -static bool HasKey(FixedArray* array, Object* key) { +static bool HasKey(Handle<FixedArray> array, Handle<Object> key_handle) { + DisallowHeapAllocation no_gc; + Object* key = *key_handle; int len0 = array->length(); for (int i = 0; i < len0; i++) { Object* element = array->get(i); @@ -160,27 +139,26 @@ } -static Handle<Object> ThrowArrayLengthRangeError(Isolate* isolate) { - isolate->Throw( - *isolate->factory()->NewRangeError("invalid_array_length", - HandleVector<Object>(NULL, 0))); - return Handle<Object>(); +MUST_USE_RESULT +static MaybeHandle<Object> ThrowArrayLengthRangeError(Isolate* isolate) { + return isolate->Throw<Object>( + isolate->factory()->NewRangeError("invalid_array_length", + HandleVector<Object>(NULL, 0))); } -static void CopyObjectToObjectElements(Handle<FixedArrayBase> from_base, +static void CopyObjectToObjectElements(FixedArrayBase* from_base, ElementsKind from_kind, uint32_t from_start, - Handle<FixedArrayBase> to_base, - ElementsKind to_kind, - uint32_t to_start, + FixedArrayBase* to_base, + ElementsKind to_kind, uint32_t to_start, int raw_copy_size) { - ASSERT(to_base->map() != + DCHECK(to_base->map() != from_base->GetIsolate()->heap()->fixed_cow_array_map()); DisallowHeapAllocation no_allocation; int copy_size = raw_copy_size; if (raw_copy_size < 0) { - ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = Min(from_base->length() - from_start, to_base->length() - to_start); @@ -189,18 +167,18 @@ int length = to_base->length() - start; if (length > 0) { Heap* heap = from_base->GetHeap(); - MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start, + MemsetPointer(FixedArray::cast(to_base)->data_start() + start, heap->the_hole_value(), length); } } } - ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; - Handle<FixedArray> from = Handle<FixedArray>::cast(from_base); - Handle<FixedArray> to = Handle<FixedArray>::cast(to_base); - ASSERT(IsFastSmiOrObjectElementsKind(from_kind)); - ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); + FixedArray* from = FixedArray::cast(from_base); + FixedArray* to = FixedArray::cast(to_base); + DCHECK(IsFastSmiOrObjectElementsKind(from_kind)); + DCHECK(IsFastSmiOrObjectElementsKind(to_kind)); Address to_address = to->address() + FixedArray::kHeaderSize; Address from_address = from->address() + FixedArray::kHeaderSize; CopyWords(reinterpret_cast<Object**>(to_address) + to_start, @@ -209,29 +187,25 @@ if (IsFastObjectElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) { Heap* heap = from->GetHeap(); - if (!heap->InNewSpace(*to)) { + if (!heap->InNewSpace(to)) { heap->RecordWrites(to->address(), to->OffsetOfElementAt(to_start), copy_size); } - heap->incremental_marking()->RecordWrites(*to); + heap->incremental_marking()->RecordWrites(to); } } -static void CopyDictionaryToObjectElements(Handle<FixedArrayBase> from_base, - uint32_t from_start, - Handle<FixedArrayBase> to_base, - ElementsKind to_kind, - uint32_t to_start, - int raw_copy_size) { - Handle<SeededNumberDictionary> from = - Handle<SeededNumberDictionary>::cast(from_base); +static void CopyDictionaryToObjectElements( + FixedArrayBase* from_base, uint32_t from_start, FixedArrayBase* to_base, + ElementsKind to_kind, uint32_t to_start, int raw_copy_size) { DisallowHeapAllocation no_allocation; + SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base); int copy_size = raw_copy_size; Heap* heap = from->GetHeap(); if (raw_copy_size < 0) { - ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = from->max_number_key() + 1 - from_start; if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { @@ -239,15 +213,15 @@ int length = to_base->length() - start; if (length > 0) { Heap* heap = from->GetHeap(); - MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start, + MemsetPointer(FixedArray::cast(to_base)->data_start() + start, heap->the_hole_value(), length); } } } - ASSERT(*to_base != *from_base); - ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); + DCHECK(to_base != from_base); + DCHECK(IsFastSmiOrObjectElementsKind(to_kind)); if (copy_size == 0) return; - Handle<FixedArray> to = Handle<FixedArray>::cast(to_base); + FixedArray* to = FixedArray::cast(to_base); uint32_t to_length = to->length(); if (to_start + copy_size > to_length) { copy_size = to_length - to_start; @@ -256,19 +230,19 @@ int entry = from->FindEntry(i + from_start); if (entry != SeededNumberDictionary::kNotFound) { Object* value = from->ValueAt(entry); - ASSERT(!value->IsTheHole()); + DCHECK(!value->IsTheHole()); to->set(i + to_start, value, SKIP_WRITE_BARRIER); } else { to->set_the_hole(i + to_start); } } if (IsFastObjectElementsKind(to_kind)) { - if (!heap->InNewSpace(*to)) { + if (!heap->InNewSpace(to)) { heap->RecordWrites(to->address(), to->OffsetOfElementAt(to_start), copy_size); } - heap->incremental_marking()->RecordWrites(*to); + heap->incremental_marking()->RecordWrites(to); } } @@ -279,10 +253,10 @@ ElementsKind to_kind, uint32_t to_start, int raw_copy_size) { - ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); + DCHECK(IsFastSmiOrObjectElementsKind(to_kind)); int copy_size = raw_copy_size; if (raw_copy_size < 0) { - ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = Min(from_base->length() - from_start, to_base->length() - to_start); @@ -294,52 +268,52 @@ int length = to_base->length() - start; if (length > 0) { Heap* heap = from_base->GetHeap(); - MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start, + MemsetPointer(FixedArray::cast(*to_base)->data_start() + start, heap->the_hole_value(), length); } } } - ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; + Isolate* isolate = from_base->GetIsolate(); Handle<FixedDoubleArray> from = Handle<FixedDoubleArray>::cast(from_base); Handle<FixedArray> to = Handle<FixedArray>::cast(to_base); for (int i = 0; i < copy_size; ++i) { - HandleScope scope(from_base->GetIsolate()); + HandleScope scope(isolate); if (IsFastSmiElementsKind(to_kind)) { UNIMPLEMENTED(); } else { - ASSERT(IsFastObjectElementsKind(to_kind)); - Handle<Object> value = from->get_as_handle(i + from_start); + DCHECK(IsFastObjectElementsKind(to_kind)); + Handle<Object> value = FixedDoubleArray::get(from, i + from_start); to->set(i + to_start, *value, UPDATE_WRITE_BARRIER); } } } -static void CopyDoubleToDoubleElements(Handle<FixedArrayBase> from_base, +static void CopyDoubleToDoubleElements(FixedArrayBase* from_base, uint32_t from_start, - Handle<FixedArrayBase> to_base, - uint32_t to_start, - int raw_copy_size) { + FixedArrayBase* to_base, + uint32_t to_start, int raw_copy_size) { DisallowHeapAllocation no_allocation; int copy_size = raw_copy_size; if (raw_copy_size < 0) { - ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = Min(from_base->length() - from_start, to_base->length() - to_start); if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { for (int i = to_start + copy_size; i < to_base->length(); ++i) { - Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i); + FixedDoubleArray::cast(to_base)->set_the_hole(i); } } } - ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; - Handle<FixedDoubleArray> from = Handle<FixedDoubleArray>::cast(from_base); - Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base); + FixedDoubleArray* from = FixedDoubleArray::cast(from_base); + FixedDoubleArray* to = FixedDoubleArray::cast(to_base); Address to_address = to->address() + FixedDoubleArray::kHeaderSize; Address from_address = from->address() + FixedDoubleArray::kHeaderSize; to_address += kDoubleSize * to_start; @@ -351,33 +325,32 @@ } -static void CopySmiToDoubleElements(Handle<FixedArrayBase> from_base, +static void CopySmiToDoubleElements(FixedArrayBase* from_base, uint32_t from_start, - Handle<FixedArrayBase> to_base, - uint32_t to_start, + FixedArrayBase* to_base, uint32_t to_start, int raw_copy_size) { DisallowHeapAllocation no_allocation; int copy_size = raw_copy_size; if (raw_copy_size < 0) { - ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = from_base->length() - from_start; if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { for (int i = to_start + copy_size; i < to_base->length(); ++i) { - Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i); + FixedDoubleArray::cast(to_base)->set_the_hole(i); } } } - ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; - Handle<FixedArray> from = Handle<FixedArray>::cast(from_base); - Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base); - Handle<Object> the_hole = from->GetIsolate()->factory()->the_hole_value(); + FixedArray* from = FixedArray::cast(from_base); + FixedDoubleArray* to = FixedDoubleArray::cast(to_base); + Object* the_hole = from->GetHeap()->the_hole_value(); for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size); from_start < from_end; from_start++, to_start++) { Object* hole_or_smi = from->get(from_start); - if (hole_or_smi == *the_hole) { + if (hole_or_smi == the_hole) { to->set_the_hole(to_start); } else { to->set(to_start, Smi::cast(hole_or_smi)->value()); @@ -386,23 +359,22 @@ } -static void CopyPackedSmiToDoubleElements(Handle<FixedArrayBase> from_base, +static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base, uint32_t from_start, - Handle<FixedArrayBase> to_base, - uint32_t to_start, - int packed_size, + FixedArrayBase* to_base, + uint32_t to_start, int packed_size, int raw_copy_size) { DisallowHeapAllocation no_allocation; int copy_size = raw_copy_size; uint32_t to_end; if (raw_copy_size < 0) { - ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = packed_size - from_start; if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { to_end = to_base->length(); for (uint32_t i = to_start + copy_size; i < to_end; ++i) { - Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i); + FixedDoubleArray::cast(to_base)->set_the_hole(i); } } else { to_end = to_start + static_cast<uint32_t>(copy_size); @@ -410,49 +382,48 @@ } else { to_end = to_start + static_cast<uint32_t>(copy_size); } - ASSERT(static_cast<int>(to_end) <= to_base->length()); - ASSERT(packed_size >= 0 && packed_size <= copy_size); - ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + DCHECK(static_cast<int>(to_end) <= to_base->length()); + DCHECK(packed_size >= 0 && packed_size <= copy_size); + DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; - Handle<FixedArray> from = Handle<FixedArray>::cast(from_base); - Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base); + FixedArray* from = FixedArray::cast(from_base); + FixedDoubleArray* to = FixedDoubleArray::cast(to_base); for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size); from_start < from_end; from_start++, to_start++) { Object* smi = from->get(from_start); - ASSERT(!smi->IsTheHole()); + DCHECK(!smi->IsTheHole()); to->set(to_start, Smi::cast(smi)->value()); } } -static void CopyObjectToDoubleElements(Handle<FixedArrayBase> from_base, +static void CopyObjectToDoubleElements(FixedArrayBase* from_base, uint32_t from_start, - Handle<FixedArrayBase> to_base, - uint32_t to_start, - int raw_copy_size) { + FixedArrayBase* to_base, + uint32_t to_start, int raw_copy_size) { DisallowHeapAllocation no_allocation; int copy_size = raw_copy_size; if (raw_copy_size < 0) { - ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = from_base->length() - from_start; if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { for (int i = to_start + copy_size; i < to_base->length(); ++i) { - Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i); + FixedDoubleArray::cast(to_base)->set_the_hole(i); } } } - ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; - Handle<FixedArray> from = Handle<FixedArray>::cast(from_base); - Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base); - Handle<Object> the_hole = from->GetIsolate()->factory()->the_hole_value(); + FixedArray* from = FixedArray::cast(from_base); + FixedDoubleArray* to = FixedDoubleArray::cast(to_base); + Object* the_hole = from->GetHeap()->the_hole_value(); for (uint32_t from_end = from_start + copy_size; from_start < from_end; from_start++, to_start++) { Object* hole_or_object = from->get(from_start); - if (hole_or_object == *the_hole) { + if (hole_or_object == the_hole) { to->set_the_hole(to_start); } else { to->set(to_start, hole_or_object->Number()); @@ -461,27 +432,26 @@ } -static void CopyDictionaryToDoubleElements(Handle<FixedArrayBase> from_base, +static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base, uint32_t from_start, - Handle<FixedArrayBase> to_base, + FixedArrayBase* to_base, uint32_t to_start, int raw_copy_size) { - Handle<SeededNumberDictionary> from = - Handle<SeededNumberDictionary>::cast(from_base); DisallowHeapAllocation no_allocation; + SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base); int copy_size = raw_copy_size; if (copy_size < 0) { - ASSERT(copy_size == ElementsAccessor::kCopyToEnd || + DCHECK(copy_size == ElementsAccessor::kCopyToEnd || copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = from->max_number_key() + 1 - from_start; if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { for (int i = to_start + copy_size; i < to_base->length(); ++i) { - Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i); + FixedDoubleArray::cast(to_base)->set_the_hole(i); } } } if (copy_size == 0) return; - Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base); + FixedDoubleArray* to = FixedDoubleArray::cast(to_base); uint32_t to_length = to->length(); if (to_start + copy_size > to_length) { copy_size = to_length - to_start; @@ -517,12 +487,13 @@ } -void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key, +void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t key, bool allow_appending) { + DisallowHeapAllocation no_allocation; Object* raw_length = NULL; const char* elements_type = "array"; if (obj->IsJSArray()) { - JSArray* array = JSArray::cast(obj); + JSArray* array = JSArray::cast(*obj); raw_length = array->length(); } else { raw_length = Smi::FromInt(obj->elements()->length()); @@ -587,19 +558,17 @@ return ElementsTraits::Kind; } - static void ValidateContents(JSObject* holder, int length) { + static void ValidateContents(Handle<JSObject> holder, int length) { } - static void ValidateImpl(JSObject* holder) { - FixedArrayBase* fixed_array_base = holder->elements(); - // When objects are first allocated, its elements are Failures. - if (fixed_array_base->IsFailure()) return; + static void ValidateImpl(Handle<JSObject> holder) { + Handle<FixedArrayBase> fixed_array_base(holder->elements()); if (!fixed_array_base->IsHeapObject()) return; // Arrays that have been shifted in place can't be verified. if (fixed_array_base->IsFiller()) return; int length = 0; if (holder->IsJSArray()) { - Object* length_obj = JSArray::cast(holder)->length(); + Object* length_obj = Handle<JSArray>::cast(holder)->length(); if (length_obj->IsSmi()) { length = Smi::cast(length_obj)->value(); } @@ -609,51 +578,33 @@ ElementsAccessorSubclass::ValidateContents(holder, length); } - virtual void Validate(JSObject* holder) V8_FINAL V8_OVERRIDE { + virtual void Validate(Handle<JSObject> holder) V8_FINAL V8_OVERRIDE { + DisallowHeapAllocation no_gc; ElementsAccessorSubclass::ValidateImpl(holder); } - static bool HasElementImpl(Object* receiver, - JSObject* holder, + static bool HasElementImpl(Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* backing_store) { + Handle<FixedArrayBase> backing_store) { return ElementsAccessorSubclass::GetAttributesImpl( receiver, holder, key, backing_store) != ABSENT; } - virtual bool HasElement(Object* receiver, - JSObject* holder, - uint32_t key, - FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE { - if (backing_store == NULL) { - backing_store = holder->elements(); - } + virtual bool HasElement( + Handle<Object> receiver, + Handle<JSObject> holder, + uint32_t key, + Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE { return ElementsAccessorSubclass::HasElementImpl( receiver, holder, key, backing_store); } - // TODO(ishell): Temporary wrapper until handlified. - MUST_USE_RESULT virtual Handle<Object> Get( + MUST_USE_RESULT virtual MaybeHandle<Object> Get( Handle<Object> receiver, Handle<JSObject> holder, uint32_t key, Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE { - CALL_HEAP_FUNCTION(holder->GetIsolate(), - Get(*receiver, *holder, key, - backing_store.is_null() - ? NULL : *backing_store), - Object); - } - - MUST_USE_RESULT virtual MaybeObject* Get( - Object* receiver, - JSObject* holder, - uint32_t key, - FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE { - if (backing_store == NULL) { - backing_store = holder->elements(); - } - if (!IsExternalArrayElementsKind(ElementsTraits::Kind) && FLAG_trace_js_array_abuse) { CheckArrayAbuse(holder, "elements read", key); @@ -668,90 +619,87 @@ receiver, holder, key, backing_store); } - MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, - JSObject* obj, - uint32_t key, - FixedArrayBase* backing_store) { - return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store)) - ? BackingStore::cast(backing_store)->get(key) - : backing_store->GetHeap()->the_hole_value(); + MUST_USE_RESULT static MaybeHandle<Object> GetImpl( + Handle<Object> receiver, + Handle<JSObject> obj, + uint32_t key, + Handle<FixedArrayBase> backing_store) { + if (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store)) { + return BackingStore::get(Handle<BackingStore>::cast(backing_store), key); + } else { + return backing_store->GetIsolate()->factory()->the_hole_value(); + } } MUST_USE_RESULT virtual PropertyAttributes GetAttributes( - Object* receiver, - JSObject* holder, + Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE { - if (backing_store == NULL) { - backing_store = holder->elements(); - } + Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE { return ElementsAccessorSubclass::GetAttributesImpl( receiver, holder, key, backing_store); } MUST_USE_RESULT static PropertyAttributes GetAttributesImpl( - Object* receiver, - JSObject* obj, + Handle<Object> receiver, + Handle<JSObject> obj, uint32_t key, - FixedArrayBase* backing_store) { + Handle<FixedArrayBase> backing_store) { if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) { return ABSENT; } - return BackingStore::cast(backing_store)->is_the_hole(key) ? ABSENT : NONE; + return + Handle<BackingStore>::cast(backing_store)->is_the_hole(key) + ? ABSENT : NONE; } MUST_USE_RESULT virtual PropertyType GetType( - Object* receiver, - JSObject* holder, + Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE { - if (backing_store == NULL) { - backing_store = holder->elements(); - } + Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE { return ElementsAccessorSubclass::GetTypeImpl( receiver, holder, key, backing_store); } MUST_USE_RESULT static PropertyType GetTypeImpl( - Object* receiver, - JSObject* obj, - uint32_t key, - FixedArrayBase* backing_store) { + Handle<Object> receiver, + Handle<JSObject> obj, + uint32_t key, + Handle<FixedArrayBase> backing_store) { if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) { return NONEXISTENT; } - return BackingStore::cast(backing_store)->is_the_hole(key) - ? NONEXISTENT : FIELD; + return + Handle<BackingStore>::cast(backing_store)->is_the_hole(key) + ? NONEXISTENT : FIELD; } - MUST_USE_RESULT virtual AccessorPair* GetAccessorPair( - Object* receiver, - JSObject* holder, + MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair( + Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE { - if (backing_store == NULL) { - backing_store = holder->elements(); - } + Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE { return ElementsAccessorSubclass::GetAccessorPairImpl( receiver, holder, key, backing_store); } - MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl( - Object* receiver, - JSObject* obj, - uint32_t key, - FixedArrayBase* backing_store) { - return NULL; + MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl( + Handle<Object> receiver, + Handle<JSObject> obj, + uint32_t key, + Handle<FixedArrayBase> backing_store) { + return MaybeHandle<AccessorPair>(); } - MUST_USE_RESULT virtual Handle<Object> SetLength( + MUST_USE_RESULT virtual MaybeHandle<Object> SetLength( Handle<JSArray> array, Handle<Object> length) V8_FINAL V8_OVERRIDE { return ElementsAccessorSubclass::SetLengthImpl( array, length, handle(array->elements())); } - MUST_USE_RESULT static Handle<Object> SetLengthImpl( + MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl( Handle<JSObject> obj, Handle<Object> length, Handle<FixedArrayBase> backing_store); @@ -771,7 +719,7 @@ UNIMPLEMENTED(); } - MUST_USE_RESULT virtual Handle<Object> Delete( + MUST_USE_RESULT virtual MaybeHandle<Object> Delete( Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) V8_OVERRIDE = 0; @@ -787,66 +735,74 @@ } virtual void CopyElements( - Handle<JSObject> from_holder, + Handle<FixedArrayBase> from, uint32_t from_start, ElementsKind from_kind, Handle<FixedArrayBase> to, uint32_t to_start, - int copy_size, - Handle<FixedArrayBase> from) V8_FINAL V8_OVERRIDE { - int packed_size = kPackedSizeNotKnown; - if (from.is_null()) { - from = handle(from_holder->elements()); - } + int copy_size) V8_FINAL V8_OVERRIDE { + DCHECK(!from.is_null()); + ElementsAccessorSubclass::CopyElementsImpl( + from, from_start, to, from_kind, to_start, kPackedSizeNotKnown, + copy_size); + } - if (!from_holder.is_null()) { - bool is_packed = IsFastPackedElementsKind(from_kind) && - from_holder->IsJSArray(); - if (is_packed) { - packed_size = - Smi::cast(Handle<JSArray>::cast(from_holder)->length())->value(); - if (copy_size >= 0 && packed_size > copy_size) { - packed_size = copy_size; - } + virtual void CopyElements( + JSObject* from_holder, + uint32_t from_start, + ElementsKind from_kind, + Handle<FixedArrayBase> to, + uint32_t to_start, + int copy_size) V8_FINAL V8_OVERRIDE { + int packed_size = kPackedSizeNotKnown; + bool is_packed = IsFastPackedElementsKind(from_kind) && + from_holder->IsJSArray(); + if (is_packed) { + packed_size = + Smi::cast(JSArray::cast(from_holder)->length())->value(); + if (copy_size >= 0 && packed_size > copy_size) { + packed_size = copy_size; } } + Handle<FixedArrayBase> from(from_holder->elements()); ElementsAccessorSubclass::CopyElementsImpl( from, from_start, to, from_kind, to_start, packed_size, copy_size); } - MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray( - Object* receiver, - JSObject* holder, - FixedArray* to, - FixedArrayBase* from) V8_FINAL V8_OVERRIDE { + virtual MaybeHandle<FixedArray> AddElementsToFixedArray( + Handle<Object> receiver, + Handle<JSObject> holder, + Handle<FixedArray> to, + Handle<FixedArrayBase> from) V8_FINAL V8_OVERRIDE { int len0 = to->length(); -#ifdef ENABLE_SLOW_ASSERTS +#ifdef ENABLE_SLOW_DCHECKS if (FLAG_enable_slow_asserts) { for (int i = 0; i < len0; i++) { - ASSERT(!to->get(i)->IsTheHole()); + DCHECK(!to->get(i)->IsTheHole()); } } #endif - if (from == NULL) { - from = holder->elements(); - } // Optimize if 'other' is empty. // We cannot optimize if 'this' is empty, as other may have holes. uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(from); if (len1 == 0) return to; + Isolate* isolate = from->GetIsolate(); + // Compute how many elements are not in other. uint32_t extra = 0; for (uint32_t y = 0; y < len1; y++) { uint32_t key = ElementsAccessorSubclass::GetKeyForIndexImpl(from, y); if (ElementsAccessorSubclass::HasElementImpl( receiver, holder, key, from)) { - MaybeObject* maybe_value = - ElementsAccessorSubclass::GetImpl(receiver, holder, key, from); - Object* value; - if (!maybe_value->To(&value)) return maybe_value; - ASSERT(!value->IsTheHole()); + Handle<Object> value; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, value, + ElementsAccessorSubclass::GetImpl(receiver, holder, key, from), + FixedArray); + + DCHECK(!value->IsTheHole()); if (!HasKey(to, value)) { extra++; } @@ -856,9 +812,7 @@ if (extra == 0) return to; // Allocate the result - FixedArray* result; - MaybeObject* maybe_obj = from->GetHeap()->AllocateFixedArray(len0 + extra); - if (!maybe_obj->To(&result)) return maybe_obj; + Handle<FixedArray> result = isolate->factory()->NewFixedArray(len0 + extra); // Fill in the content { @@ -866,7 +820,7 @@ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); for (int i = 0; i < len0; i++) { Object* e = to->get(i); - ASSERT(e->IsString() || e->IsNumber()); + DCHECK(e->IsString() || e->IsNumber()); result->set(i, e, mode); } } @@ -877,36 +831,37 @@ ElementsAccessorSubclass::GetKeyForIndexImpl(from, y); if (ElementsAccessorSubclass::HasElementImpl( receiver, holder, key, from)) { - MaybeObject* maybe_value = - ElementsAccessorSubclass::GetImpl(receiver, holder, key, from); - Object* value; - if (!maybe_value->To(&value)) return maybe_value; + Handle<Object> value; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, value, + ElementsAccessorSubclass::GetImpl(receiver, holder, key, from), + FixedArray); if (!value->IsTheHole() && !HasKey(to, value)) { - result->set(len0 + index, value); + result->set(len0 + index, *value); index++; } } } - ASSERT(extra == index); + DCHECK(extra == index); return result; } protected: - static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) { + static uint32_t GetCapacityImpl(Handle<FixedArrayBase> backing_store) { return backing_store->length(); } - virtual uint32_t GetCapacity(FixedArrayBase* backing_store) + virtual uint32_t GetCapacity(Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE { return ElementsAccessorSubclass::GetCapacityImpl(backing_store); } - static uint32_t GetKeyForIndexImpl(FixedArrayBase* backing_store, + static uint32_t GetKeyForIndexImpl(Handle<FixedArrayBase> backing_store, uint32_t index) { return index; } - virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store, + virtual uint32_t GetKeyForIndex(Handle<FixedArrayBase> backing_store, uint32_t index) V8_FINAL V8_OVERRIDE { return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index); } @@ -918,8 +873,7 @@ // Super class for all fast element arrays. template<typename FastElementsAccessorSubclass, - typename KindTraits, - int ElementSize> + typename KindTraits> class FastElementsAccessor : public ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits> { public: @@ -932,8 +886,7 @@ typedef typename KindTraits::BackingStore BackingStore; - // Adjusts the length of the fast backing store or returns the new length or - // undefined in case conversion to a slow backing store should be performed. + // Adjusts the length of the fast backing store. static Handle<Object> SetLengthWithoutNormalize( Handle<FixedArrayBase> backing_store, Handle<JSArray> array, @@ -962,11 +915,8 @@ if (length == 0) { array->initialize_elements(); } else { - backing_store->set_length(length); - Address filler_start = backing_store->address() + - BackingStore::OffsetOfElementAt(length); - int filler_size = (old_capacity - length) * ElementSize; - array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size); + isolate->heap()->RightTrimFixedArray<Heap::FROM_MUTATOR>( + *backing_store, old_capacity - length); } } else { // Otherwise, fill the unused tail with holes. @@ -981,21 +931,16 @@ // Check whether the backing store should be expanded. uint32_t min = JSObject::NewElementsCapacity(old_capacity); uint32_t new_capacity = length > min ? length : min; - if (!array->ShouldConvertToSlowElements(new_capacity)) { - FastElementsAccessorSubclass:: - SetFastElementsCapacityAndLength(array, new_capacity, length); - array->ValidateElements(); - return length_object; - } - - // Request conversion to slow elements. - return isolate->factory()->undefined_value(); + FastElementsAccessorSubclass::SetFastElementsCapacityAndLength( + array, new_capacity, length); + JSObject::ValidateElements(array); + return length_object; } static Handle<Object> DeleteCommon(Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) { - ASSERT(obj->HasFastSmiOrObjectElements() || + DCHECK(obj->HasFastSmiOrObjectElements() || obj->HasFastDoubleElements() || obj->HasFastArgumentsElements()); Isolate* isolate = obj->GetIsolate(); @@ -1009,7 +954,8 @@ backing_store->map() == heap->sloppy_arguments_elements_map(); if (is_sloppy_arguments_elements_map) { backing_store = handle( - BackingStore::cast(Handle<FixedArray>::cast(backing_store)->get(1))); + BackingStore::cast(Handle<FixedArray>::cast(backing_store)->get(1)), + isolate); } uint32_t length = static_cast<uint32_t>( obj->IsJSArray() @@ -1050,7 +996,7 @@ return isolate->factory()->true_value(); } - virtual Handle<Object> Delete( + virtual MaybeHandle<Object> Delete( Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE { @@ -1058,31 +1004,34 @@ } static bool HasElementImpl( - Object* receiver, - JSObject* holder, + Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* backing_store) { + Handle<FixedArrayBase> backing_store) { if (key >= static_cast<uint32_t>(backing_store->length())) { return false; } - return !BackingStore::cast(backing_store)->is_the_hole(key); + return !Handle<BackingStore>::cast(backing_store)->is_the_hole(key); } - static void ValidateContents(JSObject* holder, int length) { + static void ValidateContents(Handle<JSObject> holder, int length) { #if DEBUG - FixedArrayBase* elements = holder->elements(); - Heap* heap = elements->GetHeap(); + Isolate* isolate = holder->GetIsolate(); + HandleScope scope(isolate); + Handle<FixedArrayBase> elements(holder->elements(), isolate); Map* map = elements->map(); - ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) && - (map == heap->fixed_array_map() || - map == heap->fixed_cow_array_map())) || + DCHECK((IsFastSmiOrObjectElementsKind(KindTraits::Kind) && + (map == isolate->heap()->fixed_array_map() || + map == isolate->heap()->fixed_cow_array_map())) || (IsFastDoubleElementsKind(KindTraits::Kind) == - ((map == heap->fixed_array_map() && length == 0) || - map == heap->fixed_double_array_map()))); + ((map == isolate->heap()->fixed_array_map() && length == 0) || + map == isolate->heap()->fixed_double_array_map()))); + DisallowHeapAllocation no_gc; for (int i = 0; i < length; i++) { - BackingStore* backing_store = BackingStore::cast(elements); - ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) || - static_cast<Object*>(backing_store->get(i))->IsSmi()) || + HandleScope scope(isolate); + Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements); + DCHECK((!IsFastSmiElementsKind(KindTraits::Kind) || + BackingStore::get(backing_store, i)->IsSmi()) || (IsFastHoleyElementsKind(KindTraits::Kind) == backing_store->is_the_hole(i))); } @@ -1091,7 +1040,7 @@ }; -static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) { +static inline ElementsKind ElementsKindForArray(Handle<FixedArrayBase> array) { switch (array->map()->instance_type()) { case FIXED_ARRAY_TYPE: if (array->IsDictionary()) { @@ -1121,14 +1070,11 @@ template<typename FastElementsAccessorSubclass, typename KindTraits> class FastSmiOrObjectElementsAccessor - : public FastElementsAccessor<FastElementsAccessorSubclass, - KindTraits, - kPointerSize> { + : public FastElementsAccessor<FastElementsAccessorSubclass, KindTraits> { public: explicit FastSmiOrObjectElementsAccessor(const char* name) : FastElementsAccessor<FastElementsAccessorSubclass, - KindTraits, - kPointerSize>(name) {} + KindTraits>(name) {} static void CopyElementsImpl(Handle<FixedArrayBase> from, uint32_t from_start, @@ -1143,8 +1089,8 @@ case FAST_HOLEY_SMI_ELEMENTS: case FAST_ELEMENTS: case FAST_HOLEY_ELEMENTS: - CopyObjectToObjectElements( - from, from_kind, from_start, to, to_kind, to_start, copy_size); + CopyObjectToObjectElements(*from, from_kind, from_start, *to, to_kind, + to_start, copy_size); break; case FAST_DOUBLE_ELEMENTS: case FAST_HOLEY_DOUBLE_ELEMENTS: @@ -1152,8 +1098,8 @@ from, from_start, to, to_kind, to_start, copy_size); break; case DICTIONARY_ELEMENTS: - CopyDictionaryToObjectElements( - from, from_start, to, to_kind, to_start, copy_size); + CopyDictionaryToObjectElements(*from, from_start, *to, to_kind, + to_start, copy_size); break; case SLOPPY_ARGUMENTS_ELEMENTS: { // TODO(verwaest): This is a temporary hack to support extending @@ -1162,7 +1108,7 @@ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(from); Handle<FixedArrayBase> arguments( FixedArrayBase::cast(parameter_map->get(1))); - ElementsKind from_kind = ElementsKindForArray(*arguments); + ElementsKind from_kind = ElementsKindForArray(arguments); CopyElementsImpl(arguments, from_start, to, from_kind, to_start, packed_size, copy_size); break; @@ -1242,14 +1188,11 @@ template<typename FastElementsAccessorSubclass, typename KindTraits> class FastDoubleElementsAccessor - : public FastElementsAccessor<FastElementsAccessorSubclass, - KindTraits, - kDoubleSize> { + : public FastElementsAccessor<FastElementsAccessorSubclass, KindTraits> { public: explicit FastDoubleElementsAccessor(const char* name) : FastElementsAccessor<FastElementsAccessorSubclass, - KindTraits, - kDoubleSize>(name) {} + KindTraits>(name) {} static void SetFastElementsCapacityAndLength(Handle<JSObject> obj, uint32_t capacity, @@ -1267,23 +1210,23 @@ int copy_size) { switch (from_kind) { case FAST_SMI_ELEMENTS: - CopyPackedSmiToDoubleElements( - from, from_start, to, to_start, packed_size, copy_size); + CopyPackedSmiToDoubleElements(*from, from_start, *to, to_start, + packed_size, copy_size); break; case FAST_HOLEY_SMI_ELEMENTS: - CopySmiToDoubleElements(from, from_start, to, to_start, copy_size); + CopySmiToDoubleElements(*from, from_start, *to, to_start, copy_size); break; case FAST_DOUBLE_ELEMENTS: case FAST_HOLEY_DOUBLE_ELEMENTS: - CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size); + CopyDoubleToDoubleElements(*from, from_start, *to, to_start, copy_size); break; case FAST_ELEMENTS: case FAST_HOLEY_ELEMENTS: - CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size); + CopyObjectToDoubleElements(*from, from_start, *to, to_start, copy_size); break; case DICTIONARY_ELEMENTS: - CopyDictionaryToDoubleElements( - from, from_start, to, to_start, copy_size); + CopyDictionaryToDoubleElements(*from, from_start, *to, to_start, + copy_size); break; case SLOPPY_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -1345,37 +1288,39 @@ friend class ElementsAccessorBase<AccessorClass, ElementsKindTraits<Kind> >; - MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, - JSObject* obj, - uint32_t key, - FixedArrayBase* backing_store) { - return - key < AccessorClass::GetCapacityImpl(backing_store) - ? BackingStore::cast(backing_store)->get(key) - : backing_store->GetHeap()->undefined_value(); + MUST_USE_RESULT static MaybeHandle<Object> GetImpl( + Handle<Object> receiver, + Handle<JSObject> obj, + uint32_t key, + Handle<FixedArrayBase> backing_store) { + if (key < AccessorClass::GetCapacityImpl(backing_store)) { + return BackingStore::get(Handle<BackingStore>::cast(backing_store), key); + } else { + return backing_store->GetIsolate()->factory()->undefined_value(); + } } MUST_USE_RESULT static PropertyAttributes GetAttributesImpl( - Object* receiver, - JSObject* obj, + Handle<Object> receiver, + Handle<JSObject> obj, uint32_t key, - FixedArrayBase* backing_store) { + Handle<FixedArrayBase> backing_store) { return key < AccessorClass::GetCapacityImpl(backing_store) ? NONE : ABSENT; } MUST_USE_RESULT static PropertyType GetTypeImpl( - Object* receiver, - JSObject* obj, + Handle<Object> receiver, + Handle<JSObject> obj, uint32_t key, - FixedArrayBase* backing_store) { + Handle<FixedArrayBase> backing_store) { return key < AccessorClass::GetCapacityImpl(backing_store) ? FIELD : NONEXISTENT; } - MUST_USE_RESULT static Handle<Object> SetLengthImpl( + MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl( Handle<JSObject> obj, Handle<Object> length, Handle<FixedArrayBase> backing_store) { @@ -1384,7 +1329,7 @@ return obj; } - MUST_USE_RESULT virtual Handle<Object> Delete( + MUST_USE_RESULT virtual MaybeHandle<Object> Delete( Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE { @@ -1392,10 +1337,10 @@ return obj->GetIsolate()->factory()->true_value(); } - static bool HasElementImpl(Object* receiver, - JSObject* holder, + static bool HasElementImpl(Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* backing_store) { + Handle<FixedArrayBase> backing_store) { uint32_t capacity = AccessorClass::GetCapacityImpl(backing_store); return key < capacity; @@ -1430,13 +1375,14 @@ // Adjusts the length of the dictionary backing store and returns the new // length according to ES5 section 15.4.5.2 behavior. - MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize( - FixedArrayBase* store, - JSArray* array, - Object* length_object, + static Handle<Object> SetLengthWithoutNormalize( + Handle<FixedArrayBase> store, + Handle<JSArray> array, + Handle<Object> length_object, uint32_t length) { - SeededNumberDictionary* dict = SeededNumberDictionary::cast(store); - Heap* heap = array->GetHeap(); + Handle<SeededNumberDictionary> dict = + Handle<SeededNumberDictionary>::cast(store); + Isolate* isolate = array->GetIsolate(); int capacity = dict->Capacity(); uint32_t new_length = length; uint32_t old_length = static_cast<uint32_t>(array->length()->Number()); @@ -1444,6 +1390,7 @@ // Find last non-deletable element in range of elements to be // deleted and adjust range accordingly. for (int i = 0; i < capacity; i++) { + DisallowHeapAllocation no_gc; Object* key = dict->KeyAt(i); if (key->IsNumber()) { uint32_t number = static_cast<uint32_t>(key->Number()); @@ -1454,22 +1401,18 @@ } } if (new_length != length) { - MaybeObject* maybe_object = heap->NumberFromUint32(new_length); - if (!maybe_object->To(&length_object)) return maybe_object; + length_object = isolate->factory()->NewNumberFromUint(new_length); } } if (new_length == 0) { - // If the length of a slow array is reset to zero, we clear - // the array and flush backing storage. This has the added - // benefit that the array returns to fast mode. - Object* obj; - MaybeObject* maybe_obj = array->ResetElements(); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; + // Flush the backing store. + JSObject::ResetElements(array); } else { + DisallowHeapAllocation no_gc; // Remove elements that should be deleted. int removed_entries = 0; - Object* the_hole_value = heap->the_hole_value(); + Handle<Object> the_hole_value = isolate->factory()->the_hole_value(); for (int i = 0; i < capacity; i++) { Object* key = dict->KeyAt(i); if (key->IsNumber()) { @@ -1487,71 +1430,46 @@ return length_object; } - // TODO(ishell): Temporary wrapper until handlified. - MUST_USE_RESULT static Handle<Object> SetLengthWithoutNormalize( - Handle<FixedArrayBase> store, - Handle<JSArray> array, - Handle<Object> length_object, - uint32_t length) { - CALL_HEAP_FUNCTION(array->GetIsolate(), - SetLengthWithoutNormalize( - *store, *array, *length_object, length), - Object); - } - - MUST_USE_RESULT static MaybeObject* DeleteCommon( - JSObject* obj, + MUST_USE_RESULT static MaybeHandle<Object> DeleteCommon( + Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) { Isolate* isolate = obj->GetIsolate(); - Heap* heap = isolate->heap(); - FixedArray* backing_store = FixedArray::cast(obj->elements()); + Handle<FixedArray> backing_store(FixedArray::cast(obj->elements()), + isolate); bool is_arguments = (obj->GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS); if (is_arguments) { - backing_store = FixedArray::cast(backing_store->get(1)); + backing_store = handle(FixedArray::cast(backing_store->get(1)), isolate); } - SeededNumberDictionary* dictionary = - SeededNumberDictionary::cast(backing_store); + Handle<SeededNumberDictionary> dictionary = + Handle<SeededNumberDictionary>::cast(backing_store); int entry = dictionary->FindEntry(key); if (entry != SeededNumberDictionary::kNotFound) { - Object* result = dictionary->DeleteProperty(entry, mode); - if (result == heap->false_value()) { + Handle<Object> result = + SeededNumberDictionary::DeleteProperty(dictionary, entry, mode); + if (*result == *isolate->factory()->false_value()) { if (mode == JSObject::STRICT_DELETION) { // Deleting a non-configurable property in strict mode. - HandleScope scope(isolate); - Handle<Object> holder(obj, isolate); Handle<Object> name = isolate->factory()->NewNumberFromUint(key); - Handle<Object> args[2] = { name, holder }; + Handle<Object> args[2] = { name, obj }; Handle<Object> error = isolate->factory()->NewTypeError("strict_delete_property", HandleVector(args, 2)); - return isolate->Throw(*error); + return isolate->Throw<Object>(error); } - return heap->false_value(); - } - MaybeObject* maybe_elements = dictionary->Shrink(key); - FixedArray* new_elements = NULL; - if (!maybe_elements->To(&new_elements)) { - return maybe_elements; + return isolate->factory()->false_value(); } + Handle<FixedArray> new_elements = + SeededNumberDictionary::Shrink(dictionary, key); + if (is_arguments) { - FixedArray::cast(obj->elements())->set(1, new_elements); + FixedArray::cast(obj->elements())->set(1, *new_elements); } else { - obj->set_elements(new_elements); + obj->set_elements(*new_elements); } } - return heap->true_value(); - } - - // TODO(ishell): Temporary wrapper until handlified. - MUST_USE_RESULT static Handle<Object> DeleteCommon( - Handle<JSObject> obj, - uint32_t key, - JSReceiver::DeleteMode mode) { - CALL_HEAP_FUNCTION(obj->GetIsolate(), - DeleteCommon(*obj, key, mode), - Object); + return isolate->factory()->true_value(); } static void CopyElementsImpl(Handle<FixedArrayBase> from, @@ -1569,42 +1487,42 @@ friend class ElementsAccessorBase<DictionaryElementsAccessor, ElementsKindTraits<DICTIONARY_ELEMENTS> >; - MUST_USE_RESULT virtual Handle<Object> Delete( + MUST_USE_RESULT virtual MaybeHandle<Object> Delete( Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE { return DeleteCommon(obj, key, mode); } - MUST_USE_RESULT static MaybeObject* GetImpl( - Object* receiver, - JSObject* obj, + MUST_USE_RESULT static MaybeHandle<Object> GetImpl( + Handle<Object> receiver, + Handle<JSObject> obj, uint32_t key, - FixedArrayBase* store) { - SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store); + Handle<FixedArrayBase> store) { + Handle<SeededNumberDictionary> backing_store = + Handle<SeededNumberDictionary>::cast(store); + Isolate* isolate = backing_store->GetIsolate(); int entry = backing_store->FindEntry(key); if (entry != SeededNumberDictionary::kNotFound) { - Object* element = backing_store->ValueAt(entry); + Handle<Object> element(backing_store->ValueAt(entry), isolate); PropertyDetails details = backing_store->DetailsAt(entry); if (details.type() == CALLBACKS) { - return obj->GetElementWithCallback(receiver, - element, - key, - obj); + return JSObject::GetElementWithCallback( + obj, receiver, element, key, obj); } else { return element; } } - return obj->GetHeap()->the_hole_value(); + return isolate->factory()->the_hole_value(); } MUST_USE_RESULT static PropertyAttributes GetAttributesImpl( - Object* receiver, - JSObject* obj, + Handle<Object> receiver, + Handle<JSObject> obj, uint32_t key, - FixedArrayBase* backing_store) { - SeededNumberDictionary* dictionary = - SeededNumberDictionary::cast(backing_store); + Handle<FixedArrayBase> backing_store) { + Handle<SeededNumberDictionary> dictionary = + Handle<SeededNumberDictionary>::cast(backing_store); int entry = dictionary->FindEntry(key); if (entry != SeededNumberDictionary::kNotFound) { return dictionary->DetailsAt(entry).attributes(); @@ -1613,11 +1531,12 @@ } MUST_USE_RESULT static PropertyType GetTypeImpl( - Object* receiver, - JSObject* obj, + Handle<Object> receiver, + Handle<JSObject> obj, uint32_t key, - FixedArrayBase* store) { - SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store); + Handle<FixedArrayBase> store) { + Handle<SeededNumberDictionary> backing_store = + Handle<SeededNumberDictionary>::cast(store); int entry = backing_store->FindEntry(key); if (entry != SeededNumberDictionary::kNotFound) { return backing_store->DetailsAt(entry).type(); @@ -1625,32 +1544,36 @@ return NONEXISTENT; } - MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl( - Object* receiver, - JSObject* obj, + MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl( + Handle<Object> receiver, + Handle<JSObject> obj, uint32_t key, - FixedArrayBase* store) { - SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store); + Handle<FixedArrayBase> store) { + Handle<SeededNumberDictionary> backing_store = + Handle<SeededNumberDictionary>::cast(store); int entry = backing_store->FindEntry(key); if (entry != SeededNumberDictionary::kNotFound && backing_store->DetailsAt(entry).type() == CALLBACKS && backing_store->ValueAt(entry)->IsAccessorPair()) { - return AccessorPair::cast(backing_store->ValueAt(entry)); + return handle(AccessorPair::cast(backing_store->ValueAt(entry))); } - return NULL; + return MaybeHandle<AccessorPair>(); } - static bool HasElementImpl(Object* receiver, - JSObject* holder, + static bool HasElementImpl(Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* backing_store) { - return SeededNumberDictionary::cast(backing_store)->FindEntry(key) != - SeededNumberDictionary::kNotFound; + Handle<FixedArrayBase> store) { + Handle<SeededNumberDictionary> backing_store = + Handle<SeededNumberDictionary>::cast(store); + return backing_store->FindEntry(key) != SeededNumberDictionary::kNotFound; } - static uint32_t GetKeyForIndexImpl(FixedArrayBase* store, + static uint32_t GetKeyForIndexImpl(Handle<FixedArrayBase> store, uint32_t index) { - SeededNumberDictionary* dict = SeededNumberDictionary::cast(store); + DisallowHeapAllocation no_gc; + Handle<SeededNumberDictionary> dict = + Handle<SeededNumberDictionary>::cast(store); Object* key = dict->KeyAt(index); return Smi::cast(key)->value(); } @@ -1670,31 +1593,38 @@ SloppyArgumentsElementsAccessor, ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> >; - MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, - JSObject* obj, - uint32_t key, - FixedArrayBase* parameters) { - FixedArray* parameter_map = FixedArray::cast(parameters); - Object* probe = GetParameterMapArg(obj, parameter_map, key); + MUST_USE_RESULT static MaybeHandle<Object> GetImpl( + Handle<Object> receiver, + Handle<JSObject> obj, + uint32_t key, + Handle<FixedArrayBase> parameters) { + Isolate* isolate = obj->GetIsolate(); + Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters); + Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key); if (!probe->IsTheHole()) { + DisallowHeapAllocation no_gc; Context* context = Context::cast(parameter_map->get(0)); - int context_index = Smi::cast(probe)->value(); - ASSERT(!context->get(context_index)->IsTheHole()); - return context->get(context_index); + int context_index = Handle<Smi>::cast(probe)->value(); + DCHECK(!context->get(context_index)->IsTheHole()); + return handle(context->get(context_index), isolate); } else { // Object is not mapped, defer to the arguments. - FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); - MaybeObject* maybe_result = ElementsAccessor::ForArray(arguments)->Get( - receiver, obj, key, arguments); - Object* result; - if (!maybe_result->ToObject(&result)) return maybe_result; + Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)), + isolate); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + ElementsAccessor::ForArray(arguments)->Get( + receiver, obj, key, arguments), + Object); // Elements of the arguments object in slow mode might be slow aliases. if (result->IsAliasedArgumentsEntry()) { - AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(result); + DisallowHeapAllocation no_gc; + AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*result); Context* context = Context::cast(parameter_map->get(0)); int context_index = entry->aliased_context_slot(); - ASSERT(!context->get(context_index)->IsTheHole()); - return context->get(context_index); + DCHECK(!context->get(context_index)->IsTheHole()); + return handle(context->get(context_index), isolate); } else { return result; } @@ -1702,57 +1632,57 @@ } MUST_USE_RESULT static PropertyAttributes GetAttributesImpl( - Object* receiver, - JSObject* obj, + Handle<Object> receiver, + Handle<JSObject> obj, uint32_t key, - FixedArrayBase* backing_store) { - FixedArray* parameter_map = FixedArray::cast(backing_store); - Object* probe = GetParameterMapArg(obj, parameter_map, key); + Handle<FixedArrayBase> backing_store) { + Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(backing_store); + Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key); if (!probe->IsTheHole()) { return NONE; } else { // If not aliased, check the arguments. - FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); + Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1))); return ElementsAccessor::ForArray(arguments)->GetAttributes( receiver, obj, key, arguments); } } MUST_USE_RESULT static PropertyType GetTypeImpl( - Object* receiver, - JSObject* obj, + Handle<Object> receiver, + Handle<JSObject> obj, uint32_t key, - FixedArrayBase* parameters) { - FixedArray* parameter_map = FixedArray::cast(parameters); - Object* probe = GetParameterMapArg(obj, parameter_map, key); + Handle<FixedArrayBase> parameters) { + Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters); + Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key); if (!probe->IsTheHole()) { return FIELD; } else { // If not aliased, check the arguments. - FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); + Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1))); return ElementsAccessor::ForArray(arguments)->GetType( receiver, obj, key, arguments); } } - MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl( - Object* receiver, - JSObject* obj, - uint32_t key, - FixedArrayBase* parameters) { - FixedArray* parameter_map = FixedArray::cast(parameters); - Object* probe = GetParameterMapArg(obj, parameter_map, key); + MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl( + Handle<Object> receiver, + Handle<JSObject> obj, + uint32_t key, + Handle<FixedArrayBase> parameters) { + Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters); + Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key); if (!probe->IsTheHole()) { - return NULL; + return MaybeHandle<AccessorPair>(); } else { // If not aliased, check the arguments. - FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); + Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1))); return ElementsAccessor::ForArray(arguments)->GetAccessorPair( receiver, obj, key, arguments); } } - MUST_USE_RESULT static Handle<Object> SetLengthImpl( + MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl( Handle<JSObject> obj, Handle<Object> length, Handle<FixedArrayBase> parameter_map) { @@ -1762,7 +1692,7 @@ return obj; } - MUST_USE_RESULT virtual Handle<Object> Delete( + MUST_USE_RESULT virtual MaybeHandle<Object> Delete( Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE { @@ -1798,47 +1728,42 @@ UNREACHABLE(); } - static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) { - FixedArray* parameter_map = FixedArray::cast(backing_store); - FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1)); + static uint32_t GetCapacityImpl(Handle<FixedArrayBase> backing_store) { + Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(backing_store); + Handle<FixedArrayBase> arguments( + FixedArrayBase::cast(parameter_map->get(1))); return Max(static_cast<uint32_t>(parameter_map->length() - 2), ForArray(arguments)->GetCapacity(arguments)); } - static uint32_t GetKeyForIndexImpl(FixedArrayBase* dict, + static uint32_t GetKeyForIndexImpl(Handle<FixedArrayBase> dict, uint32_t index) { return index; } - static bool HasElementImpl(Object* receiver, - JSObject* holder, + static bool HasElementImpl(Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* parameters) { - FixedArray* parameter_map = FixedArray::cast(parameters); - Object* probe = GetParameterMapArg(holder, parameter_map, key); + Handle<FixedArrayBase> parameters) { + Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters); + Handle<Object> probe = GetParameterMapArg(holder, parameter_map, key); if (!probe->IsTheHole()) { return true; } else { - FixedArrayBase* arguments = - FixedArrayBase::cast(FixedArray::cast(parameter_map)->get(1)); + Isolate* isolate = holder->GetIsolate(); + Handle<FixedArrayBase> arguments(FixedArrayBase::cast( + Handle<FixedArray>::cast(parameter_map)->get(1)), isolate); ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments); - return !accessor->Get(receiver, holder, key, arguments)->IsTheHole(); + Handle<Object> value; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, value, + accessor->Get(receiver, holder, key, arguments), + false); + return !value->IsTheHole(); } } private: - // TODO(ishell): remove when all usages are handlified. - static Object* GetParameterMapArg(JSObject* holder, - FixedArray* parameter_map, - uint32_t key) { - uint32_t length = holder->IsJSArray() - ? Smi::cast(JSArray::cast(holder)->length())->value() - : parameter_map->length(); - return key < (length - 2) - ? parameter_map->get(key + 2) - : parameter_map->GetHeap()->the_hole_value(); - } - static Handle<Object> GetParameterMapArg(Handle<JSObject> holder, Handle<FixedArray> parameter_map, uint32_t key) { @@ -1853,7 +1778,7 @@ }; -ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) { +ElementsAccessor* ElementsAccessor::ForArray(Handle<FixedArrayBase> array) { return elements_accessors_[ElementsKindForArray(array)]; } @@ -1881,8 +1806,9 @@ template <typename ElementsAccessorSubclass, typename ElementsKindTraits> -MUST_USE_RESULT Handle<Object> ElementsAccessorBase<ElementsAccessorSubclass, - ElementsKindTraits>:: +MUST_USE_RESULT +MaybeHandle<Object> ElementsAccessorBase<ElementsAccessorSubclass, + ElementsKindTraits>:: SetLengthImpl(Handle<JSObject> obj, Handle<Object> length, Handle<FixedArrayBase> backing_store) { @@ -1890,20 +1816,21 @@ Handle<JSArray> array = Handle<JSArray>::cast(obj); // Fast case: The new length fits into a Smi. - Handle<Object> smi_length = Object::ToSmi(isolate, length); + Handle<Object> smi_length; - if (!smi_length.is_null() && smi_length->IsSmi()) { + if (Object::ToSmi(isolate, length).ToHandle(&smi_length) && + smi_length->IsSmi()) { const int value = Handle<Smi>::cast(smi_length)->value(); if (value >= 0) { Handle<Object> new_length = ElementsAccessorSubclass:: SetLengthWithoutNormalize(backing_store, array, smi_length, value); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, new_length, new_length); + DCHECK(!new_length.is_null()); // even though the proposed length was a smi, new_length could // still be a heap number because SetLengthWithoutNormalize doesn't // allow the array length property to drop below the index of // non-deletable elements. - ASSERT(new_length->IsSmi() || new_length->IsHeapNumber() || + DCHECK(new_length->IsSmi() || new_length->IsHeapNumber() || new_length->IsUndefined()); if (new_length->IsSmi()) { array->set_length(*Handle<Smi>::cast(new_length)); @@ -1924,13 +1851,13 @@ if (length->ToArrayIndex(&value)) { Handle<SeededNumberDictionary> dictionary = JSObject::NormalizeElements(array); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, dictionary, dictionary); + DCHECK(!dictionary.is_null()); Handle<Object> new_length = DictionaryElementsAccessor:: SetLengthWithoutNormalize(dictionary, array, length, value); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, new_length, new_length); + DCHECK(!new_length.is_null()); - ASSERT(new_length->IsNumber()); + DCHECK(new_length->IsNumber()); array->set_length(*new_length); return array; } else { @@ -1947,8 +1874,8 @@ } -Handle<Object> ArrayConstructInitializeElements(Handle<JSArray> array, - Arguments* args) { +MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array, + Arguments* args) { // Optimize the case where there is one argument and the argument is a // small smi. if (args->length() == 1) { diff -Nru nodejs-0.11.13/deps/v8/src/elements.h nodejs-0.11.15/deps/v8/src/elements.h --- nodejs-0.11.13/deps/v8/src/elements.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/elements.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ELEMENTS_H_ #define V8_ELEMENTS_H_ -#include "elements-kind.h" -#include "objects.h" -#include "heap.h" -#include "isolate.h" +#include "src/elements-kind.h" +#include "src/heap/heap.h" +#include "src/isolate.h" +#include "src/objects.h" namespace v8 { namespace internal { @@ -48,35 +25,43 @@ // Checks the elements of an object for consistency, asserting when a problem // is found. - virtual void Validate(JSObject* obj) = 0; + virtual void Validate(Handle<JSObject> obj) = 0; // Returns true if a holder contains an element with the specified key // without iterating up the prototype chain. The caller can optionally pass // in the backing store to use for the check, which must be compatible with // the ElementsKind of the ElementsAccessor. If backing_store is NULL, the // holder->elements() is used as the backing store. - virtual bool HasElement(Object* receiver, - JSObject* holder, - uint32_t key, - FixedArrayBase* backing_store = NULL) = 0; + virtual bool HasElement( + Handle<Object> receiver, + Handle<JSObject> holder, + uint32_t key, + Handle<FixedArrayBase> backing_store) = 0; + + inline bool HasElement( + Handle<Object> receiver, + Handle<JSObject> holder, + uint32_t key) { + return HasElement(receiver, holder, key, handle(holder->elements())); + } // Returns the element with the specified key or undefined if there is no such // element. This method doesn't iterate up the prototype chain. The caller // can optionally pass in the backing store to use for the check, which must // be compatible with the ElementsKind of the ElementsAccessor. If // backing_store is NULL, the holder->elements() is used as the backing store. - MUST_USE_RESULT virtual Handle<Object> Get( + MUST_USE_RESULT virtual MaybeHandle<Object> Get( Handle<Object> receiver, Handle<JSObject> holder, uint32_t key, - Handle<FixedArrayBase> backing_store = - Handle<FixedArrayBase>::null()) = 0; + Handle<FixedArrayBase> backing_store) = 0; - MUST_USE_RESULT virtual MaybeObject* Get( - Object* receiver, - JSObject* holder, - uint32_t key, - FixedArrayBase* backing_store = NULL) = 0; + MUST_USE_RESULT inline MaybeHandle<Object> Get( + Handle<Object> receiver, + Handle<JSObject> holder, + uint32_t key) { + return Get(receiver, holder, key, handle(holder->elements())); + } // Returns an element's attributes, or ABSENT if there is no such // element. This method doesn't iterate up the prototype chain. The caller @@ -84,10 +69,17 @@ // be compatible with the ElementsKind of the ElementsAccessor. If // backing_store is NULL, the holder->elements() is used as the backing store. MUST_USE_RESULT virtual PropertyAttributes GetAttributes( - Object* receiver, - JSObject* holder, + Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* backing_store = NULL) = 0; + Handle<FixedArrayBase> backing_store) = 0; + + MUST_USE_RESULT inline PropertyAttributes GetAttributes( + Handle<Object> receiver, + Handle<JSObject> holder, + uint32_t key) { + return GetAttributes(receiver, holder, key, handle(holder->elements())); + } // Returns an element's type, or NONEXISTENT if there is no such // element. This method doesn't iterate up the prototype chain. The caller @@ -95,28 +87,42 @@ // be compatible with the ElementsKind of the ElementsAccessor. If // backing_store is NULL, the holder->elements() is used as the backing store. MUST_USE_RESULT virtual PropertyType GetType( - Object* receiver, - JSObject* holder, + Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* backing_store = NULL) = 0; + Handle<FixedArrayBase> backing_store) = 0; + + MUST_USE_RESULT inline PropertyType GetType( + Handle<Object> receiver, + Handle<JSObject> holder, + uint32_t key) { + return GetType(receiver, holder, key, handle(holder->elements())); + } // Returns an element's accessors, or NULL if the element does not exist or // is plain. This method doesn't iterate up the prototype chain. The caller // can optionally pass in the backing store to use for the check, which must // be compatible with the ElementsKind of the ElementsAccessor. If // backing_store is NULL, the holder->elements() is used as the backing store. - MUST_USE_RESULT virtual AccessorPair* GetAccessorPair( - Object* receiver, - JSObject* holder, + MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair( + Handle<Object> receiver, + Handle<JSObject> holder, uint32_t key, - FixedArrayBase* backing_store = NULL) = 0; + Handle<FixedArrayBase> backing_store) = 0; + + MUST_USE_RESULT inline MaybeHandle<AccessorPair> GetAccessorPair( + Handle<Object> receiver, + Handle<JSObject> holder, + uint32_t key) { + return GetAccessorPair(receiver, holder, key, handle(holder->elements())); + } // Modifies the length data property as specified for JSArrays and resizes the // underlying backing store accordingly. The method honors the semantics of // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that // have non-deletable elements can only be shrunk to the size of highest // element that is non-deletable. - MUST_USE_RESULT virtual Handle<Object> SetLength( + MUST_USE_RESULT virtual MaybeHandle<Object> SetLength( Handle<JSArray> holder, Handle<Object> new_length) = 0; @@ -132,7 +138,7 @@ int length) = 0; // Deletes an element in an object, returning a new elements backing store. - MUST_USE_RESULT virtual Handle<Object> Delete( + MUST_USE_RESULT virtual MaybeHandle<Object> Delete( Handle<JSObject> holder, uint32_t key, JSReceiver::DeleteMode mode) = 0; @@ -151,36 +157,53 @@ // store is available, it can be passed in source and source_holder is // ignored. virtual void CopyElements( - Handle<JSObject> source_holder, + Handle<FixedArrayBase> source, + uint32_t source_start, + ElementsKind source_kind, + Handle<FixedArrayBase> destination, + uint32_t destination_start, + int copy_size) = 0; + + // TODO(ishell): Keeping |source_holder| parameter in a non-handlified form + // helps avoiding ArrayConcat() builtin performance degradation. + // Revisit this later. + virtual void CopyElements( + JSObject* source_holder, uint32_t source_start, ElementsKind source_kind, Handle<FixedArrayBase> destination, uint32_t destination_start, - int copy_size, - Handle<FixedArrayBase> source = Handle<FixedArrayBase>::null()) = 0; + int copy_size) = 0; - void CopyElements( + inline void CopyElements( Handle<JSObject> from_holder, Handle<FixedArrayBase> to, - ElementsKind from_kind, - Handle<FixedArrayBase> from = Handle<FixedArrayBase>::null()) { - CopyElements(from_holder, 0, from_kind, to, 0, - kCopyToEndAndInitializeToHole, from); + ElementsKind from_kind) { + CopyElements( + *from_holder, 0, from_kind, to, 0, kCopyToEndAndInitializeToHole); } - MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray( - Object* receiver, - JSObject* holder, - FixedArray* to, - FixedArrayBase* from = NULL) = 0; + MUST_USE_RESULT virtual MaybeHandle<FixedArray> AddElementsToFixedArray( + Handle<Object> receiver, + Handle<JSObject> holder, + Handle<FixedArray> to, + Handle<FixedArrayBase> from) = 0; + + MUST_USE_RESULT inline MaybeHandle<FixedArray> AddElementsToFixedArray( + Handle<Object> receiver, + Handle<JSObject> holder, + Handle<FixedArray> to) { + return AddElementsToFixedArray( + receiver, holder, to, handle(holder->elements())); + } // Returns a shared ElementsAccessor for the specified ElementsKind. static ElementsAccessor* ForKind(ElementsKind elements_kind) { - ASSERT(elements_kind < kElementsKindCount); + DCHECK(elements_kind < kElementsKindCount); return elements_accessors_[elements_kind]; } - static ElementsAccessor* ForArray(FixedArrayBase* array); + static ElementsAccessor* ForArray(Handle<FixedArrayBase> array); static void InitializeOncePerProcess(); static void TearDown(); @@ -188,7 +211,7 @@ protected: friend class SloppyArgumentsElementsAccessor; - virtual uint32_t GetCapacity(FixedArrayBase* backing_store) = 0; + virtual uint32_t GetCapacity(Handle<FixedArrayBase> backing_store) = 0; // Element handlers distinguish between indexes and keys when they manipulate // elements. Indexes refer to elements in terms of their location in the @@ -198,7 +221,7 @@ // keys are equivalent to indexes, and GetKeyForIndex returns the same value // it is passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps // the index to a key using the KeyAt method on the NumberDictionary. - virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store, + virtual uint32_t GetKeyForIndex(Handle<FixedArrayBase> backing_store, uint32_t index) = 0; private: @@ -208,11 +231,12 @@ DISALLOW_COPY_AND_ASSIGN(ElementsAccessor); }; -void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key, +void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t key, bool allow_appending = false); -Handle<Object> ArrayConstructInitializeElements(Handle<JSArray> array, - Arguments* args); +MUST_USE_RESULT MaybeHandle<Object> ArrayConstructInitializeElements( + Handle<JSArray> array, + Arguments* args); } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/elements-kind.cc nodejs-0.11.15/deps/v8/src/elements-kind.cc --- nodejs-0.11.13/deps/v8/src/elements-kind.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/elements-kind.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "elements-kind.h" - -#include "api.h" -#include "elements.h" -#include "objects.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/elements-kind.h" + +#include "src/api.h" +#include "src/base/lazy-instance.h" +#include "src/elements.h" +#include "src/objects.h" namespace v8 { namespace internal { @@ -74,23 +52,16 @@ } -const char* ElementsKindToString(ElementsKind kind) { - ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); - return accessor->name(); -} - - -void PrintElementsKind(FILE* out, ElementsKind kind) { - PrintF(out, "%s", ElementsKindToString(kind)); +int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) { + STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize); + return IsExternalArrayElementsKind(elements_kind) + ? 0 : (FixedArray::kHeaderSize - kHeapObjectTag); } -ElementsKind GetInitialFastElementsKind() { - if (FLAG_packed_arrays) { - return FAST_SMI_ELEMENTS; - } else { - return FAST_HOLEY_SMI_ELEMENTS; - } +const char* ElementsKindToString(ElementsKind kind) { + ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); + return accessor->name(); } @@ -119,13 +90,13 @@ }; -static LazyInstance<ElementsKind*, - InitializeFastElementsKindSequence>::type +static base::LazyInstance<ElementsKind*, + InitializeFastElementsKindSequence>::type fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER; ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) { - ASSERT(sequence_number >= 0 && + DCHECK(sequence_number >= 0 && sequence_number < kFastElementsKindCount); return fast_elements_kind_sequence.Get()[sequence_number]; } @@ -159,8 +130,8 @@ ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed) { - ASSERT(IsFastElementsKind(elements_kind)); - ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND); + DCHECK(IsFastElementsKind(elements_kind)); + DCHECK(elements_kind != TERMINAL_FAST_ELEMENTS_KIND); while (true) { elements_kind = GetNextTransitionElementsKind(elements_kind); if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) { @@ -172,28 +143,55 @@ } +static bool IsTypedArrayElementsKind(ElementsKind elements_kind) { + return IsFixedTypedArrayElementsKind(elements_kind) || + IsExternalArrayElementsKind(elements_kind); +} + + +static inline bool IsFastTransitionTarget(ElementsKind elements_kind) { + return IsFastElementsKind(elements_kind) || + elements_kind == DICTIONARY_ELEMENTS; +} + bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, ElementsKind to_kind) { - switch (from_kind) { - case FAST_SMI_ELEMENTS: - return to_kind != FAST_SMI_ELEMENTS; - case FAST_HOLEY_SMI_ELEMENTS: - return to_kind != FAST_SMI_ELEMENTS && - to_kind != FAST_HOLEY_SMI_ELEMENTS; - case FAST_DOUBLE_ELEMENTS: - return to_kind != FAST_SMI_ELEMENTS && - to_kind != FAST_HOLEY_SMI_ELEMENTS && - to_kind != FAST_DOUBLE_ELEMENTS; - case FAST_HOLEY_DOUBLE_ELEMENTS: - return to_kind == FAST_ELEMENTS || - to_kind == FAST_HOLEY_ELEMENTS; - case FAST_ELEMENTS: - return to_kind == FAST_HOLEY_ELEMENTS; - case FAST_HOLEY_ELEMENTS: - return false; - default: - return false; + if (IsTypedArrayElementsKind(from_kind) || + IsTypedArrayElementsKind(to_kind)) { + switch (from_kind) { +#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \ + case TYPE##_ELEMENTS: \ + return to_kind == EXTERNAL_##TYPE##_ELEMENTS; + + TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE); +#undef FIXED_TYPED_ARRAY_CASE + default: + return false; + } + } + if (IsFastElementsKind(from_kind) && IsFastTransitionTarget(to_kind)) { + switch (from_kind) { + case FAST_SMI_ELEMENTS: + return to_kind != FAST_SMI_ELEMENTS; + case FAST_HOLEY_SMI_ELEMENTS: + return to_kind != FAST_SMI_ELEMENTS && + to_kind != FAST_HOLEY_SMI_ELEMENTS; + case FAST_DOUBLE_ELEMENTS: + return to_kind != FAST_SMI_ELEMENTS && + to_kind != FAST_HOLEY_SMI_ELEMENTS && + to_kind != FAST_DOUBLE_ELEMENTS; + case FAST_HOLEY_DOUBLE_ELEMENTS: + return to_kind == FAST_ELEMENTS || + to_kind == FAST_HOLEY_ELEMENTS; + case FAST_ELEMENTS: + return to_kind == FAST_HOLEY_ELEMENTS; + case FAST_HOLEY_ELEMENTS: + return false; + default: + return false; + } } + return false; } diff -Nru nodejs-0.11.13/deps/v8/src/elements-kind.h nodejs-0.11.15/deps/v8/src/elements-kind.h --- nodejs-0.11.13/deps/v8/src/elements-kind.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/elements-kind.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ELEMENTS_KIND_H_ #define V8_ELEMENTS_KIND_H_ -#include "v8checks.h" +#include "src/checks.h" namespace v8 { namespace internal { @@ -95,10 +72,10 @@ FAST_HOLEY_SMI_ELEMENTS - FAST_SMI_ELEMENTS; int ElementsKindToShiftSize(ElementsKind elements_kind); +int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind); const char* ElementsKindToString(ElementsKind kind); -void PrintElementsKind(FILE* out, ElementsKind kind); -ElementsKind GetInitialFastElementsKind(); +inline ElementsKind GetInitialFastElementsKind() { return FAST_SMI_ELEMENTS; } ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number); int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind); @@ -129,7 +106,7 @@ inline bool IsFastElementsKind(ElementsKind kind) { - ASSERT(FIRST_FAST_ELEMENTS_KIND == 0); + DCHECK(FIRST_FAST_ELEMENTS_KIND == 0); return kind <= FAST_HOLEY_DOUBLE_ELEMENTS; } @@ -232,7 +209,7 @@ inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) { - ASSERT(IsFastSmiElementsKind(from_kind)); + DCHECK(IsFastSmiElementsKind(from_kind)); return (from_kind == FAST_SMI_ELEMENTS) ? FAST_ELEMENTS : FAST_HOLEY_ELEMENTS; diff -Nru nodejs-0.11.13/deps/v8/src/execution.cc nodejs-0.11.15/deps/v8/src/execution.cc --- nodejs-0.11.13/deps/v8/src/execution.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/execution.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,58 +1,25 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include <stdlib.h> - -#include "v8.h" - -#include "api.h" -#include "bootstrapper.h" -#include "codegen.h" -#include "debug.h" -#include "deoptimizer.h" -#include "isolate-inl.h" -#include "runtime-profiler.h" -#include "simulator.h" -#include "v8threads.h" -#include "vm-state-inl.h" +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/execution.h" + +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/isolate-inl.h" +#include "src/vm-state-inl.h" namespace v8 { namespace internal { - StackGuard::StackGuard() : isolate_(NULL) { } void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) { - ASSERT(isolate_ != NULL); - // Ignore attempts to interrupt when interrupts are postponed. - if (should_postpone_interrupts(lock)) return; + DCHECK(isolate_ != NULL); thread_local_.jslimit_ = kInterruptLimit; thread_local_.climit_ = kInterruptLimit; isolate_->heap()->SetStackLimits(); @@ -60,19 +27,19 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) { - ASSERT(isolate_ != NULL); + DCHECK(isolate_ != NULL); thread_local_.jslimit_ = thread_local_.real_jslimit_; thread_local_.climit_ = thread_local_.real_climit_; isolate_->heap()->SetStackLimits(); } -static Handle<Object> Invoke(bool is_construct, - Handle<JSFunction> function, - Handle<Object> receiver, - int argc, - Handle<Object> args[], - bool* has_pending_exception) { +MUST_USE_RESULT static MaybeHandle<Object> Invoke( + bool is_construct, + Handle<JSFunction> function, + Handle<Object> receiver, + int argc, + Handle<Object> args[]) { Isolate* isolate = function->GetIsolate(); // Entering JavaScript. @@ -80,13 +47,12 @@ CHECK(AllowJavascriptExecution::IsAllowed(isolate)); if (!ThrowOnJavascriptExecution::IsAllowed(isolate)) { isolate->ThrowIllegalOperation(); - *has_pending_exception = true; isolate->ReportPendingMessages(); - return Handle<Object>(); + return MaybeHandle<Object>(); } // Placeholder for return value. - MaybeObject* value = reinterpret_cast<Object*>(kZapValue); + Object* value = NULL; typedef Object* (*JSEntryFunction)(byte* entry, Object* function, @@ -102,13 +68,12 @@ // receiver instead to avoid having a 'this' pointer which refers // directly to a global object. if (receiver->IsGlobalObject()) { - Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver); - receiver = Handle<JSObject>(global->global_receiver()); + receiver = handle(Handle<GlobalObject>::cast(receiver)->global_proxy()); } // Make sure that the global object of the context we're about to // make the current one is indeed a global object. - ASSERT(function->context()->global_object()->IsGlobalObject()); + DCHECK(function->context()->global_object()->IsGlobalObject()); { // Save and restore context around invocation and block the @@ -127,41 +92,36 @@ } #ifdef VERIFY_HEAP - value->Verify(); + value->ObjectVerify(); #endif // Update the pending exception flag and return the value. - *has_pending_exception = value->IsException(); - ASSERT(*has_pending_exception == isolate->has_pending_exception()); - if (*has_pending_exception) { + bool has_exception = value->IsException(); + DCHECK(has_exception == isolate->has_pending_exception()); + if (has_exception) { isolate->ReportPendingMessages(); -#ifdef ENABLE_DEBUGGER_SUPPORT // Reset stepping state when script exits with uncaught exception. - if (isolate->debugger()->IsDebuggerActive()) { + if (isolate->debug()->is_active()) { isolate->debug()->ClearStepping(); } -#endif // ENABLE_DEBUGGER_SUPPORT - return Handle<Object>(); + return MaybeHandle<Object>(); } else { isolate->clear_pending_message(); } - return Handle<Object>(value->ToObjectUnchecked(), isolate); + return Handle<Object>(value, isolate); } -Handle<Object> Execution::Call(Isolate* isolate, - Handle<Object> callable, - Handle<Object> receiver, - int argc, - Handle<Object> argv[], - bool* pending_exception, - bool convert_receiver) { - *pending_exception = false; - +MaybeHandle<Object> Execution::Call(Isolate* isolate, + Handle<Object> callable, + Handle<Object> receiver, + int argc, + Handle<Object> argv[], + bool convert_receiver) { if (!callable->IsJSFunction()) { - callable = TryGetFunctionDelegate(isolate, callable, pending_exception); - if (*pending_exception) return callable; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, callable, TryGetFunctionDelegate(isolate, callable), Object); } Handle<JSFunction> func = Handle<JSFunction>::cast(callable); @@ -170,37 +130,30 @@ !func->shared()->native() && func->shared()->strict_mode() == SLOPPY) { if (receiver->IsUndefined() || receiver->IsNull()) { - Object* global = func->context()->global_object()->global_receiver(); - // Under some circumstances, 'global' can be the JSBuiltinsObject - // In that case, don't rewrite. (FWIW, the same holds for - // GetIsolate()->global_object()->global_receiver().) - if (!global->IsJSBuiltinsObject()) { - receiver = Handle<Object>(global, func->GetIsolate()); - } + receiver = handle(func->global_proxy()); + DCHECK(!receiver->IsJSBuiltinsObject()); } else { - receiver = ToObject(isolate, receiver, pending_exception); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, receiver, ToObject(isolate, receiver), Object); } - if (*pending_exception) return callable; } - return Invoke(false, func, receiver, argc, argv, pending_exception); + return Invoke(false, func, receiver, argc, argv); } -Handle<Object> Execution::New(Handle<JSFunction> func, - int argc, - Handle<Object> argv[], - bool* pending_exception) { - return Invoke(true, func, func->GetIsolate()->global_object(), argc, argv, - pending_exception); +MaybeHandle<Object> Execution::New(Handle<JSFunction> func, + int argc, + Handle<Object> argv[]) { + return Invoke(true, func, handle(func->global_proxy()), argc, argv); } -Handle<Object> Execution::TryCall(Handle<JSFunction> func, - Handle<Object> receiver, - int argc, - Handle<Object> args[], - bool* caught_exception) { +MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func, + Handle<Object> receiver, + int argc, + Handle<Object> args[], + Handle<Object>* exception_out) { // Enter a try-block while executing the JavaScript code. To avoid // duplicate error printing it must be non-verbose. Also, to avoid // creating message objects during stack overflow we shouldn't @@ -208,36 +161,36 @@ v8::TryCatch catcher; catcher.SetVerbose(false); catcher.SetCaptureMessage(false); - *caught_exception = false; // Get isolate now, because handle might be persistent // and get destroyed in the next call. Isolate* isolate = func->GetIsolate(); - Handle<Object> result = Invoke(false, func, receiver, argc, args, - caught_exception); + MaybeHandle<Object> maybe_result = Invoke(false, func, receiver, argc, args); - if (*caught_exception) { - ASSERT(catcher.HasCaught()); - ASSERT(isolate->has_pending_exception()); - ASSERT(isolate->external_caught_exception()); - if (isolate->pending_exception() == - isolate->heap()->termination_exception()) { - result = isolate->factory()->termination_exception(); - } else { - result = v8::Utils::OpenHandle(*catcher.Exception()); + if (maybe_result.is_null()) { + DCHECK(catcher.HasCaught()); + DCHECK(isolate->has_pending_exception()); + DCHECK(isolate->external_caught_exception()); + if (exception_out != NULL) { + if (isolate->pending_exception() == + isolate->heap()->termination_exception()) { + *exception_out = isolate->factory()->termination_exception(); + } else { + *exception_out = v8::Utils::OpenHandle(*catcher.Exception()); + } } isolate->OptionalRescheduleException(true); } - ASSERT(!isolate->has_pending_exception()); - ASSERT(!isolate->external_caught_exception()); - return result; + DCHECK(!isolate->has_pending_exception()); + DCHECK(!isolate->external_caught_exception()); + return maybe_result; } Handle<Object> Execution::GetFunctionDelegate(Isolate* isolate, Handle<Object> object) { - ASSERT(!object->IsJSFunction()); + DCHECK(!object->IsJSFunction()); Factory* factory = isolate->factory(); // If you return a function from here, it will be called when an @@ -262,10 +215,9 @@ } -Handle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate, - Handle<Object> object, - bool* has_pending_exception) { - ASSERT(!object->IsJSFunction()); +MaybeHandle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate, + Handle<Object> object) { + DCHECK(!object->IsJSFunction()); // If object is a function proxy, get its handler. Iterate if necessary. Object* fun = *object; @@ -286,16 +238,14 @@ // throw a non-callable exception. i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError( "called_non_callable", i::HandleVector<i::Object>(&object, 1)); - isolate->Throw(*error_obj); - *has_pending_exception = true; - return isolate->factory()->undefined_value(); + return isolate->Throw<Object>(error_obj); } Handle<Object> Execution::GetConstructorDelegate(Isolate* isolate, Handle<Object> object) { - ASSERT(!object->IsJSFunction()); + DCHECK(!object->IsJSFunction()); // If you return a function from here, it will be called when an // attempt is made to call the given object as a constructor. @@ -319,11 +269,9 @@ } -Handle<Object> Execution::TryGetConstructorDelegate( - Isolate* isolate, - Handle<Object> object, - bool* has_pending_exception) { - ASSERT(!object->IsJSFunction()); +MaybeHandle<Object> Execution::TryGetConstructorDelegate( + Isolate* isolate, Handle<Object> object) { + DCHECK(!object->IsJSFunction()); // If you return a function from here, it will be called when an // attempt is made to call the given object as a constructor. @@ -347,45 +295,7 @@ // throw a non-callable exception. i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError( "called_non_callable", i::HandleVector<i::Object>(&object, 1)); - isolate->Throw(*error_obj); - *has_pending_exception = true; - - return isolate->factory()->undefined_value(); -} - - -void Execution::RunMicrotasks(Isolate* isolate) { - ASSERT(isolate->microtask_pending()); - bool threw = false; - Execution::Call( - isolate, - isolate->run_microtasks(), - isolate->factory()->undefined_value(), - 0, - NULL, - &threw); - ASSERT(!threw); -} - - -void Execution::EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask) { - bool threw = false; - Handle<Object> args[] = { microtask }; - Execution::Call( - isolate, - isolate->enqueue_external_microtask(), - isolate->factory()->undefined_value(), - 1, - args, - &threw); - ASSERT(!threw); -} - - -bool StackGuard::IsStackOverflow() { - ExecutionAccess access(isolate_); - return (thread_local_.jslimit_ != kInterruptLimit && - thread_local_.climit_ != kInterruptLimit); + return isolate->Throw<Object>(error_obj); } @@ -419,199 +329,78 @@ } -bool StackGuard::ShouldPostponeInterrupts() { - ExecutionAccess access(isolate_); - return should_postpone_interrupts(access); -} - - -bool StackGuard::IsInterrupted() { - ExecutionAccess access(isolate_); - return (thread_local_.interrupt_flags_ & INTERRUPT) != 0; -} - - -void StackGuard::Interrupt() { +void StackGuard::PushPostponeInterruptsScope(PostponeInterruptsScope* scope) { ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ |= INTERRUPT; - set_interrupt_limits(access); + // Intercept already requested interrupts. + int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_; + scope->intercepted_flags_ = intercepted; + thread_local_.interrupt_flags_ &= ~intercepted; + if (!has_pending_interrupts(access)) reset_limits(access); + // Add scope to the chain. + scope->prev_ = thread_local_.postpone_interrupts_; + thread_local_.postpone_interrupts_ = scope; } -bool StackGuard::IsPreempted() { +void StackGuard::PopPostponeInterruptsScope() { ExecutionAccess access(isolate_); - return thread_local_.interrupt_flags_ & PREEMPT; + PostponeInterruptsScope* top = thread_local_.postpone_interrupts_; + // Make intercepted interrupts active. + DCHECK((thread_local_.interrupt_flags_ & top->intercept_mask_) == 0); + thread_local_.interrupt_flags_ |= top->intercepted_flags_; + if (has_pending_interrupts(access)) set_interrupt_limits(access); + // Remove scope from chain. + thread_local_.postpone_interrupts_ = top->prev_; } -void StackGuard::Preempt() { +bool StackGuard::CheckInterrupt(InterruptFlag flag) { ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ |= PREEMPT; - set_interrupt_limits(access); + return thread_local_.interrupt_flags_ & flag; } -bool StackGuard::IsTerminateExecution() { +void StackGuard::RequestInterrupt(InterruptFlag flag) { ExecutionAccess access(isolate_); - return (thread_local_.interrupt_flags_ & TERMINATE) != 0; -} - - -void StackGuard::CancelTerminateExecution() { - ExecutionAccess access(isolate_); - Continue(TERMINATE); - isolate_->CancelTerminateExecution(); -} - - -void StackGuard::TerminateExecution() { - ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ |= TERMINATE; - set_interrupt_limits(access); -} - - -bool StackGuard::IsGCRequest() { - ExecutionAccess access(isolate_); - return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0; -} - - -void StackGuard::RequestGC() { - ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ |= GC_REQUEST; - if (thread_local_.postpone_interrupts_nesting_ == 0) { - thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit; - isolate_->heap()->SetStackLimits(); - } -} - - -bool StackGuard::IsInstallCodeRequest() { - ExecutionAccess access(isolate_); - return (thread_local_.interrupt_flags_ & INSTALL_CODE) != 0; -} - - -void StackGuard::RequestInstallCode() { - ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ |= INSTALL_CODE; - if (thread_local_.postpone_interrupts_nesting_ == 0) { - thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit; - isolate_->heap()->SetStackLimits(); + // Check the chain of PostponeInterruptsScopes for interception. + if (thread_local_.postpone_interrupts_ && + thread_local_.postpone_interrupts_->Intercept(flag)) { + return; } -} - - -bool StackGuard::IsFullDeopt() { - ExecutionAccess access(isolate_); - return (thread_local_.interrupt_flags_ & FULL_DEOPT) != 0; -} - - -void StackGuard::FullDeopt() { - ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ |= FULL_DEOPT; - set_interrupt_limits(access); -} - -bool StackGuard::IsDeoptMarkedAllocationSites() { - ExecutionAccess access(isolate_); - return (thread_local_.interrupt_flags_ & DEOPT_MARKED_ALLOCATION_SITES) != 0; -} - - -void StackGuard::DeoptMarkedAllocationSites() { - ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ |= DEOPT_MARKED_ALLOCATION_SITES; + // Not intercepted. Set as active interrupt flag. + thread_local_.interrupt_flags_ |= flag; set_interrupt_limits(access); } -#ifdef ENABLE_DEBUGGER_SUPPORT -bool StackGuard::IsDebugBreak() { +void StackGuard::ClearInterrupt(InterruptFlag flag) { ExecutionAccess access(isolate_); - return thread_local_.interrupt_flags_ & DEBUGBREAK; -} - - -void StackGuard::DebugBreak() { - ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ |= DEBUGBREAK; - set_interrupt_limits(access); -} - - -bool StackGuard::IsDebugCommand() { - ExecutionAccess access(isolate_); - return thread_local_.interrupt_flags_ & DEBUGCOMMAND; -} - - -void StackGuard::DebugCommand() { - if (FLAG_debugger_auto_break) { - ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ |= DEBUGCOMMAND; - set_interrupt_limits(access); - } -} -#endif - -void StackGuard::Continue(InterruptFlag after_what) { - ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ &= ~static_cast<int>(after_what); - if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) { - reset_limits(access); + // Clear the interrupt flag from the chain of PostponeInterruptsScopes. + for (PostponeInterruptsScope* current = thread_local_.postpone_interrupts_; + current != NULL; + current = current->prev_) { + current->intercepted_flags_ &= ~flag; } -} - -void StackGuard::RequestInterrupt(InterruptCallback callback, void* data) { - ExecutionAccess access(isolate_); - thread_local_.interrupt_flags_ |= API_INTERRUPT; - thread_local_.interrupt_callback_ = callback; - thread_local_.interrupt_callback_data_ = data; - set_interrupt_limits(access); + // Clear the interrupt flag from the active interrupt flags. + thread_local_.interrupt_flags_ &= ~flag; + if (!has_pending_interrupts(access)) reset_limits(access); } -void StackGuard::ClearInterrupt() { - thread_local_.interrupt_callback_ = 0; - thread_local_.interrupt_callback_data_ = 0; - Continue(API_INTERRUPT); -} - - -bool StackGuard::IsAPIInterrupt() { +bool StackGuard::CheckAndClearInterrupt(InterruptFlag flag) { ExecutionAccess access(isolate_); - return thread_local_.interrupt_flags_ & API_INTERRUPT; -} - - -void StackGuard::InvokeInterruptCallback() { - InterruptCallback callback = 0; - void* data = 0; - - { - ExecutionAccess access(isolate_); - callback = thread_local_.interrupt_callback_; - data = thread_local_.interrupt_callback_data_; - thread_local_.interrupt_callback_ = NULL; - thread_local_.interrupt_callback_data_ = NULL; - } - - if (callback != NULL) { - VMState<EXTERNAL> state(isolate_); - HandleScope handle_scope(isolate_); - callback(reinterpret_cast<v8::Isolate*>(isolate_), data); - } + bool result = (thread_local_.interrupt_flags_ & flag); + thread_local_.interrupt_flags_ &= ~flag; + if (!has_pending_interrupts(access)) reset_limits(access); + return result; } char* StackGuard::ArchiveStackGuard(char* to) { ExecutionAccess access(isolate_); - OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal)); + MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal)); ThreadLocal blank; // Set the stack limits using the old thread_local_. @@ -628,8 +417,7 @@ char* StackGuard::RestoreStackGuard(char* from) { ExecutionAccess access(isolate_); - OS::MemCopy( - reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal)); + MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal)); isolate_->heap()->SetStackLimits(); return from + sizeof(ThreadLocal); } @@ -647,33 +435,25 @@ jslimit_ = kIllegalLimit; real_climit_ = kIllegalLimit; climit_ = kIllegalLimit; - nesting_ = 0; - postpone_interrupts_nesting_ = 0; + postpone_interrupts_ = NULL; interrupt_flags_ = 0; - interrupt_callback_ = NULL; - interrupt_callback_data_ = NULL; } bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) { bool should_set_stack_limits = false; if (real_climit_ == kIllegalLimit) { - // Takes the address of the limit variable in order to find out where - // the top of stack is right now. const uintptr_t kLimitSize = FLAG_stack_size * KB; - uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize; - ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize); + DCHECK(GetCurrentStackPosition() > kLimitSize); + uintptr_t limit = GetCurrentStackPosition() - kLimitSize; real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit); jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit); real_climit_ = limit; climit_ = limit; should_set_stack_limits = true; } - nesting_ = 0; - postpone_interrupts_nesting_ = 0; + postpone_interrupts_ = NULL; interrupt_flags_ = 0; - interrupt_callback_ = NULL; - interrupt_callback_data_ = NULL; return should_set_stack_limits; } @@ -698,78 +478,78 @@ // --- C a l l s t o n a t i v e s --- -#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \ +#define RETURN_NATIVE_CALL(name, args) \ do { \ Handle<Object> argv[] = args; \ - ASSERT(has_pending_exception != NULL); \ return Call(isolate, \ isolate->name##_fun(), \ isolate->js_builtins_object(), \ - ARRAY_SIZE(argv), argv, \ - has_pending_exception); \ + ARRAY_SIZE(argv), argv); \ } while (false) -Handle<Object> Execution::ToNumber( - Isolate* isolate, Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_number, { obj }, exc); +MaybeHandle<Object> Execution::ToNumber( + Isolate* isolate, Handle<Object> obj) { + RETURN_NATIVE_CALL(to_number, { obj }); } -Handle<Object> Execution::ToString( - Isolate* isolate, Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_string, { obj }, exc); +MaybeHandle<Object> Execution::ToString( + Isolate* isolate, Handle<Object> obj) { + RETURN_NATIVE_CALL(to_string, { obj }); } -Handle<Object> Execution::ToDetailString( - Isolate* isolate, Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_detail_string, { obj }, exc); +MaybeHandle<Object> Execution::ToDetailString( + Isolate* isolate, Handle<Object> obj) { + RETURN_NATIVE_CALL(to_detail_string, { obj }); } -Handle<Object> Execution::ToObject( - Isolate* isolate, Handle<Object> obj, bool* exc) { +MaybeHandle<Object> Execution::ToObject( + Isolate* isolate, Handle<Object> obj) { if (obj->IsSpecObject()) return obj; - RETURN_NATIVE_CALL(to_object, { obj }, exc); + RETURN_NATIVE_CALL(to_object, { obj }); } -Handle<Object> Execution::ToInteger( - Isolate* isolate, Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_integer, { obj }, exc); +MaybeHandle<Object> Execution::ToInteger( + Isolate* isolate, Handle<Object> obj) { + RETURN_NATIVE_CALL(to_integer, { obj }); } -Handle<Object> Execution::ToUint32( - Isolate* isolate, Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_uint32, { obj }, exc); +MaybeHandle<Object> Execution::ToUint32( + Isolate* isolate, Handle<Object> obj) { + RETURN_NATIVE_CALL(to_uint32, { obj }); } -Handle<Object> Execution::ToInt32( - Isolate* isolate, Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_int32, { obj }, exc); +MaybeHandle<Object> Execution::ToInt32( + Isolate* isolate, Handle<Object> obj) { + RETURN_NATIVE_CALL(to_int32, { obj }); } -Handle<Object> Execution::NewDate(Isolate* isolate, double time, bool* exc) { +MaybeHandle<Object> Execution::NewDate(Isolate* isolate, double time) { Handle<Object> time_obj = isolate->factory()->NewNumber(time); - RETURN_NATIVE_CALL(create_date, { time_obj }, exc); + RETURN_NATIVE_CALL(create_date, { time_obj }); } #undef RETURN_NATIVE_CALL -Handle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern, - Handle<String> flags, - bool* exc) { +MaybeHandle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern, + Handle<String> flags) { + Isolate* isolate = pattern->GetIsolate(); Handle<JSFunction> function = Handle<JSFunction>( - pattern->GetIsolate()->native_context()->regexp_function()); - Handle<Object> re_obj = RegExpImpl::CreateRegExpLiteral( - function, pattern, flags, exc); - if (*exc) return Handle<JSRegExp>(); + isolate->native_context()->regexp_function()); + Handle<Object> re_obj; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, re_obj, + RegExpImpl::CreateRegExpLiteral(function, pattern, flags), + JSRegExp); return Handle<JSRegExp>::cast(re_obj); } @@ -783,97 +563,90 @@ return factory->undefined_value(); } - Handle<Object> char_at = GetProperty( - isolate, isolate->js_builtins_object(), factory->char_at_string()); + Handle<Object> char_at = Object::GetProperty( + isolate->js_builtins_object(), + factory->char_at_string()).ToHandleChecked(); if (!char_at->IsJSFunction()) { return factory->undefined_value(); } - bool caught_exception; Handle<Object> index_object = factory->NewNumberFromInt(int_index); Handle<Object> index_arg[] = { index_object }; - Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at), - string, - ARRAY_SIZE(index_arg), - index_arg, - &caught_exception); - if (caught_exception) { + Handle<Object> result; + if (!TryCall(Handle<JSFunction>::cast(char_at), + string, + ARRAY_SIZE(index_arg), + index_arg).ToHandle(&result)) { return factory->undefined_value(); } return result; } -Handle<JSFunction> Execution::InstantiateFunction( - Handle<FunctionTemplateInfo> data, - bool* exc) { +MaybeHandle<JSFunction> Execution::InstantiateFunction( + Handle<FunctionTemplateInfo> data) { Isolate* isolate = data->GetIsolate(); if (!data->do_not_cache()) { // Fast case: see if the function has already been instantiated int serial_number = Smi::cast(data->serial_number())->value(); Handle<JSObject> cache(isolate->native_context()->function_cache()); Handle<Object> elm = - Object::GetElementNoExceptionThrown(isolate, cache, serial_number); + Object::GetElement(isolate, cache, serial_number).ToHandleChecked(); if (elm->IsJSFunction()) return Handle<JSFunction>::cast(elm); } // The function has not yet been instantiated in this context; do it. Handle<Object> args[] = { data }; - Handle<Object> result = Call(isolate, - isolate->instantiate_fun(), - isolate->js_builtins_object(), - ARRAY_SIZE(args), - args, - exc); - if (*exc) return Handle<JSFunction>::null(); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + Call(isolate, + isolate->instantiate_fun(), + isolate->js_builtins_object(), + ARRAY_SIZE(args), + args), + JSFunction); return Handle<JSFunction>::cast(result); } -Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data, - bool* exc) { +MaybeHandle<JSObject> Execution::InstantiateObject( + Handle<ObjectTemplateInfo> data) { Isolate* isolate = data->GetIsolate(); + Handle<Object> result; if (data->property_list()->IsUndefined() && !data->constructor()->IsUndefined()) { - // Initialization to make gcc happy. - Object* result = NULL; - { - HandleScope scope(isolate); - Handle<FunctionTemplateInfo> cons_template = - Handle<FunctionTemplateInfo>( - FunctionTemplateInfo::cast(data->constructor())); - Handle<JSFunction> cons = InstantiateFunction(cons_template, exc); - if (*exc) return Handle<JSObject>::null(); - Handle<Object> value = New(cons, 0, NULL, exc); - if (*exc) return Handle<JSObject>::null(); - result = *value; - } - ASSERT(!*exc); - return Handle<JSObject>(JSObject::cast(result)); + Handle<FunctionTemplateInfo> cons_template = + Handle<FunctionTemplateInfo>( + FunctionTemplateInfo::cast(data->constructor())); + Handle<JSFunction> cons; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, cons, InstantiateFunction(cons_template), JSObject); + ASSIGN_RETURN_ON_EXCEPTION(isolate, result, New(cons, 0, NULL), JSObject); } else { Handle<Object> args[] = { data }; - Handle<Object> result = Call(isolate, - isolate->instantiate_fun(), - isolate->js_builtins_object(), - ARRAY_SIZE(args), - args, - exc); - if (*exc) return Handle<JSObject>::null(); - return Handle<JSObject>::cast(result); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + Call(isolate, + isolate->instantiate_fun(), + isolate->js_builtins_object(), + ARRAY_SIZE(args), + args), + JSObject); } + return Handle<JSObject>::cast(result); } -void Execution::ConfigureInstance(Isolate* isolate, - Handle<Object> instance, - Handle<Object> instance_template, - bool* exc) { +MaybeHandle<Object> Execution::ConfigureInstance( + Isolate* isolate, + Handle<Object> instance, + Handle<Object> instance_template) { Handle<Object> args[] = { instance, instance_template }; - Execution::Call(isolate, - isolate->configure_instance_fun(), - isolate->js_builtins_object(), - ARRAY_SIZE(args), - args, - exc); + return Execution::Call(isolate, + isolate->configure_instance_fun(), + isolate->js_builtins_object(), + ARRAY_SIZE(args), + args); } @@ -883,175 +656,52 @@ Handle<Object> is_global) { Isolate* isolate = fun->GetIsolate(); Handle<Object> args[] = { recv, fun, pos, is_global }; - bool caught_exception; - Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(), - isolate->js_builtins_object(), - ARRAY_SIZE(args), - args, - &caught_exception); - if (caught_exception || !result->IsString()) { - return isolate->factory()->empty_string(); + MaybeHandle<Object> maybe_result = + TryCall(isolate->get_stack_trace_line_fun(), + isolate->js_builtins_object(), + ARRAY_SIZE(args), + args); + Handle<Object> result; + if (!maybe_result.ToHandle(&result) || !result->IsString()) { + return isolate->factory()->empty_string(); } return Handle<String>::cast(result); } -static Object* RuntimePreempt(Isolate* isolate) { - // Clear the preempt request flag. - isolate->stack_guard()->Continue(PREEMPT); - -#ifdef ENABLE_DEBUGGER_SUPPORT - if (isolate->debug()->InDebugger()) { - // If currently in the debugger don't do any actual preemption but record - // that preemption occoured while in the debugger. - isolate->debug()->PreemptionWhileInDebugger(); - } else { - // Perform preemption. - v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate)); - Thread::YieldCPU(); - } -#else - { // NOLINT - // Perform preemption. - v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate)); - Thread::YieldCPU(); - } -#endif - - return isolate->heap()->undefined_value(); -} - - -#ifdef ENABLE_DEBUGGER_SUPPORT -Object* Execution::DebugBreakHelper(Isolate* isolate) { - // Just continue if breaks are disabled. - if (isolate->debug()->disable_break()) { - return isolate->heap()->undefined_value(); - } - - // Ignore debug break during bootstrapping. - if (isolate->bootstrapper()->IsActive()) { - return isolate->heap()->undefined_value(); +Object* StackGuard::HandleInterrupts() { + if (CheckAndClearInterrupt(GC_REQUEST)) { + isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt"); } - // Ignore debug break if debugger is not active. - if (!isolate->debugger()->IsDebuggerActive()) { - return isolate->heap()->undefined_value(); + if (CheckDebugBreak() || CheckDebugCommand()) { + isolate_->debug()->HandleDebugBreak(); } - StackLimitCheck check(isolate); - if (check.HasOverflowed()) { - return isolate->heap()->undefined_value(); - } - - { - JavaScriptFrameIterator it(isolate); - ASSERT(!it.done()); - Object* fun = it.frame()->function(); - if (fun && fun->IsJSFunction()) { - // Don't stop in builtin functions. - if (JSFunction::cast(fun)->IsBuiltin()) { - return isolate->heap()->undefined_value(); - } - GlobalObject* global = JSFunction::cast(fun)->context()->global_object(); - // Don't stop in debugger functions. - if (isolate->debug()->IsDebugGlobal(global)) { - return isolate->heap()->undefined_value(); - } - } - } - - // Collect the break state before clearing the flags. - bool debug_command_only = - isolate->stack_guard()->IsDebugCommand() && - !isolate->stack_guard()->IsDebugBreak(); - - // Clear the debug break request flag. - isolate->stack_guard()->Continue(DEBUGBREAK); - - ProcessDebugMessages(isolate, debug_command_only); - - // Return to continue execution. - return isolate->heap()->undefined_value(); -} - - -void Execution::ProcessDebugMessages(Isolate* isolate, - bool debug_command_only) { - // Clear the debug command request flag. - isolate->stack_guard()->Continue(DEBUGCOMMAND); - - StackLimitCheck check(isolate); - if (check.HasOverflowed()) { - return; + if (CheckAndClearInterrupt(TERMINATE_EXECUTION)) { + return isolate_->TerminateExecution(); } - HandleScope scope(isolate); - // Enter the debugger. Just continue if we fail to enter the debugger. - EnterDebugger debugger(isolate); - if (debugger.FailedToEnter()) { - return; + if (CheckAndClearInterrupt(DEOPT_MARKED_ALLOCATION_SITES)) { + isolate_->heap()->DeoptMarkedAllocationSites(); } - // Notify the debug event listeners. Indicate auto continue if the break was - // a debug command break. - isolate->debugger()->OnDebugBreak(isolate->factory()->undefined_value(), - debug_command_only); -} - - -#endif - -MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) { - StackGuard* stack_guard = isolate->stack_guard(); - if (stack_guard->ShouldPostponeInterrupts()) { - return isolate->heap()->undefined_value(); + if (CheckAndClearInterrupt(INSTALL_CODE)) { + DCHECK(isolate_->concurrent_recompilation_enabled()); + isolate_->optimizing_compiler_thread()->InstallOptimizedFunctions(); } - if (stack_guard->IsAPIInterrupt()) { - stack_guard->InvokeInterruptCallback(); - stack_guard->Continue(API_INTERRUPT); + if (CheckAndClearInterrupt(API_INTERRUPT)) { + // Callback must be invoked outside of ExecusionAccess lock. + isolate_->InvokeApiInterruptCallback(); } - if (stack_guard->IsGCRequest()) { - isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, - "StackGuard GC request"); - stack_guard->Continue(GC_REQUEST); - } + isolate_->counters()->stack_interrupts()->Increment(); + isolate_->counters()->runtime_profiler_ticks()->Increment(); + isolate_->runtime_profiler()->OptimizeNow(); - isolate->counters()->stack_interrupts()->Increment(); - isolate->counters()->runtime_profiler_ticks()->Increment(); -#ifdef ENABLE_DEBUGGER_SUPPORT - if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) { - DebugBreakHelper(isolate); - } -#endif - if (stack_guard->IsPreempted()) RuntimePreempt(isolate); - if (stack_guard->IsTerminateExecution()) { - stack_guard->Continue(TERMINATE); - return isolate->TerminateExecution(); - } - if (stack_guard->IsInterrupted()) { - stack_guard->Continue(INTERRUPT); - return isolate->StackOverflow(); - } - if (stack_guard->IsFullDeopt()) { - stack_guard->Continue(FULL_DEOPT); - Deoptimizer::DeoptimizeAll(isolate); - } - if (stack_guard->IsDeoptMarkedAllocationSites()) { - stack_guard->Continue(DEOPT_MARKED_ALLOCATION_SITES); - isolate->heap()->DeoptMarkedAllocationSites(); - } - if (stack_guard->IsInstallCodeRequest()) { - ASSERT(isolate->concurrent_recompilation_enabled()); - stack_guard->Continue(INSTALL_CODE); - isolate->optimizing_compiler_thread()->InstallOptimizedFunctions(); - } - isolate->runtime_profiler()->OptimizeNow(); - return isolate->heap()->undefined_value(); + return isolate_->heap()->undefined_value(); } - } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/execution.h nodejs-0.11.15/deps/v8/src/execution.h --- nodejs-0.11.13/deps/v8/src/execution.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/execution.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,58 +1,16 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_EXECUTION_H_ #define V8_EXECUTION_H_ -#include "allocation.h" +#include "src/handles.h" namespace v8 { namespace internal { - -// Flag used to set the interrupt causes. -enum InterruptFlag { - INTERRUPT = 1 << 0, - DEBUGBREAK = 1 << 1, - DEBUGCOMMAND = 1 << 2, - PREEMPT = 1 << 3, - TERMINATE = 1 << 4, - GC_REQUEST = 1 << 5, - FULL_DEOPT = 1 << 6, - INSTALL_CODE = 1 << 7, - API_INTERRUPT = 1 << 8, - DEOPT_MARKED_ALLOCATION_SITES = 1 << 9 -}; - - -class Isolate; - - -class Execution : public AllStatic { +class Execution V8_FINAL : public AllStatic { public: // Call a function, the caller supplies a receiver and an array // of arguments. Arguments are Object* type. After function returns, @@ -65,13 +23,13 @@ // and the function called is not in strict mode, receiver is converted to // an object. // - static Handle<Object> Call(Isolate* isolate, - Handle<Object> callable, - Handle<Object> receiver, - int argc, - Handle<Object> argv[], - bool* pending_exception, - bool convert_receiver = false); + MUST_USE_RESULT static MaybeHandle<Object> Call( + Isolate* isolate, + Handle<Object> callable, + Handle<Object> receiver, + int argc, + Handle<Object> argv[], + bool convert_receiver = false); // Construct object from function, the caller supplies an array of // arguments. Arguments are Object* type. After function returns, @@ -80,112 +38,97 @@ // *pending_exception tells whether the invoke resulted in // a pending exception. // - static Handle<Object> New(Handle<JSFunction> func, - int argc, - Handle<Object> argv[], - bool* pending_exception); + MUST_USE_RESULT static MaybeHandle<Object> New(Handle<JSFunction> func, + int argc, + Handle<Object> argv[]); // Call a function, just like Call(), but make sure to silently catch // any thrown exceptions. The return value is either the result of // calling the function (if caught exception is false) or the exception // that occurred (if caught exception is true). - static Handle<Object> TryCall(Handle<JSFunction> func, - Handle<Object> receiver, - int argc, - Handle<Object> argv[], - bool* caught_exception); + static MaybeHandle<Object> TryCall( + Handle<JSFunction> func, + Handle<Object> receiver, + int argc, + Handle<Object> argv[], + Handle<Object>* exception_out = NULL); // ECMA-262 9.3 - static Handle<Object> ToNumber( - Isolate* isolate, Handle<Object> obj, bool* exc); + MUST_USE_RESULT static MaybeHandle<Object> ToNumber( + Isolate* isolate, Handle<Object> obj); // ECMA-262 9.4 - static Handle<Object> ToInteger( - Isolate* isolate, Handle<Object> obj, bool* exc); + MUST_USE_RESULT static MaybeHandle<Object> ToInteger( + Isolate* isolate, Handle<Object> obj); // ECMA-262 9.5 - static Handle<Object> ToInt32( - Isolate* isolate, Handle<Object> obj, bool* exc); + MUST_USE_RESULT static MaybeHandle<Object> ToInt32( + Isolate* isolate, Handle<Object> obj); // ECMA-262 9.6 - static Handle<Object> ToUint32( - Isolate* isolate, Handle<Object> obj, bool* exc); + MUST_USE_RESULT static MaybeHandle<Object> ToUint32( + Isolate* isolate, Handle<Object> obj); // ECMA-262 9.8 - static Handle<Object> ToString( - Isolate* isolate, Handle<Object> obj, bool* exc); + MUST_USE_RESULT static MaybeHandle<Object> ToString( + Isolate* isolate, Handle<Object> obj); // ECMA-262 9.8 - static Handle<Object> ToDetailString( - Isolate* isolate, Handle<Object> obj, bool* exc); + MUST_USE_RESULT static MaybeHandle<Object> ToDetailString( + Isolate* isolate, Handle<Object> obj); // ECMA-262 9.9 - static Handle<Object> ToObject( - Isolate* isolate, Handle<Object> obj, bool* exc); + MUST_USE_RESULT static MaybeHandle<Object> ToObject( + Isolate* isolate, Handle<Object> obj); // Create a new date object from 'time'. - static Handle<Object> NewDate( - Isolate* isolate, double time, bool* exc); + MUST_USE_RESULT static MaybeHandle<Object> NewDate( + Isolate* isolate, double time); // Create a new regular expression object from 'pattern' and 'flags'. - static Handle<JSRegExp> NewJSRegExp(Handle<String> pattern, - Handle<String> flags, - bool* exc); + MUST_USE_RESULT static MaybeHandle<JSRegExp> NewJSRegExp( + Handle<String> pattern, Handle<String> flags); // Used to implement [] notation on strings (calls JS code) static Handle<Object> CharAt(Handle<String> str, uint32_t index); static Handle<Object> GetFunctionFor(); - static Handle<JSFunction> InstantiateFunction( - Handle<FunctionTemplateInfo> data, bool* exc); - static Handle<JSObject> InstantiateObject(Handle<ObjectTemplateInfo> data, - bool* exc); - static void ConfigureInstance(Isolate* isolate, - Handle<Object> instance, - Handle<Object> data, - bool* exc); + MUST_USE_RESULT static MaybeHandle<JSFunction> InstantiateFunction( + Handle<FunctionTemplateInfo> data); + MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject( + Handle<ObjectTemplateInfo> data); + MUST_USE_RESULT static MaybeHandle<Object> ConfigureInstance( + Isolate* isolate, Handle<Object> instance, Handle<Object> data); static Handle<String> GetStackTraceLine(Handle<Object> recv, Handle<JSFunction> fun, Handle<Object> pos, Handle<Object> is_global); -#ifdef ENABLE_DEBUGGER_SUPPORT - static Object* DebugBreakHelper(Isolate* isolate); - static void ProcessDebugMessages(Isolate* isolate, bool debug_command_only); -#endif - - // If the stack guard is triggered, but it is not an actual - // stack overflow, then handle the interruption accordingly. - MUST_USE_RESULT static MaybeObject* HandleStackGuardInterrupt( - Isolate* isolate); // Get a function delegate (or undefined) for the given non-function // object. Used for support calling objects as functions. static Handle<Object> GetFunctionDelegate(Isolate* isolate, Handle<Object> object); - static Handle<Object> TryGetFunctionDelegate(Isolate* isolate, - Handle<Object> object, - bool* has_pending_exception); + MUST_USE_RESULT static MaybeHandle<Object> TryGetFunctionDelegate( + Isolate* isolate, + Handle<Object> object); // Get a function delegate (or undefined) for the given non-function // object. Used for support calling objects as constructors. static Handle<Object> GetConstructorDelegate(Isolate* isolate, Handle<Object> object); - static Handle<Object> TryGetConstructorDelegate(Isolate* isolate, - Handle<Object> object, - bool* has_pending_exception); - - static void RunMicrotasks(Isolate* isolate); - static void EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask); + static MaybeHandle<Object> TryGetConstructorDelegate(Isolate* isolate, + Handle<Object> object); }; class ExecutionAccess; +class PostponeInterruptsScope; // StackGuard contains the handling of the limits that are used to limit the // number of nested invocations of JavaScript and the stack size used in each // invocation. -class StackGuard { +class StackGuard V8_FINAL { public: // Pass the address beyond which the stack should not grow. The stack // is assumed to grow downwards. @@ -203,34 +146,31 @@ // it has been set up. void ClearThread(const ExecutionAccess& lock); - bool IsStackOverflow(); - bool IsPreempted(); - void Preempt(); - bool IsInterrupted(); - void Interrupt(); - bool IsTerminateExecution(); - void TerminateExecution(); - void CancelTerminateExecution(); -#ifdef ENABLE_DEBUGGER_SUPPORT - bool IsDebugBreak(); - void DebugBreak(); - bool IsDebugCommand(); - void DebugCommand(); -#endif - bool IsGCRequest(); - void RequestGC(); - bool IsInstallCodeRequest(); - void RequestInstallCode(); - bool IsFullDeopt(); - void FullDeopt(); - bool IsDeoptMarkedAllocationSites(); - void DeoptMarkedAllocationSites(); - void Continue(InterruptFlag after_what); - - void RequestInterrupt(InterruptCallback callback, void* data); - void ClearInterrupt(); - bool IsAPIInterrupt(); - void InvokeInterruptCallback(); +#define INTERRUPT_LIST(V) \ + V(DEBUGBREAK, DebugBreak, 0) \ + V(DEBUGCOMMAND, DebugCommand, 1) \ + V(TERMINATE_EXECUTION, TerminateExecution, 2) \ + V(GC_REQUEST, GC, 3) \ + V(INSTALL_CODE, InstallCode, 4) \ + V(API_INTERRUPT, ApiInterrupt, 5) \ + V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 6) + +#define V(NAME, Name, id) \ + inline bool Check##Name() { return CheckInterrupt(NAME); } \ + inline void Request##Name() { RequestInterrupt(NAME); } \ + inline void Clear##Name() { ClearInterrupt(NAME); } + INTERRUPT_LIST(V) +#undef V + + // Flag used to set the interrupt causes. + enum InterruptFlag { + #define V(NAME, Name, id) NAME = (1 << id), + INTERRUPT_LIST(V) + #undef V + #define V(NAME, Name, id) NAME | + ALL_INTERRUPTS = INTERRUPT_LIST(V) 0 + #undef V + }; // This provides an asynchronous read of the stack limits for the current // thread. There are no locks protecting this, but it is assumed that you @@ -253,25 +193,25 @@ Address address_of_real_jslimit() { return reinterpret_cast<Address>(&thread_local_.real_jslimit_); } - bool ShouldPostponeInterrupts(); + + // If the stack guard is triggered, but it is not an actual + // stack overflow, then handle the interruption accordingly. + Object* HandleInterrupts(); private: StackGuard(); + bool CheckInterrupt(InterruptFlag flag); + void RequestInterrupt(InterruptFlag flag); + void ClearInterrupt(InterruptFlag flag); + bool CheckAndClearInterrupt(InterruptFlag flag); + // You should hold the ExecutionAccess lock when calling this method. bool has_pending_interrupts(const ExecutionAccess& lock) { - // Sanity check: We shouldn't be asking about pending interrupts - // unless we're not postponing them anymore. - ASSERT(!should_postpone_interrupts(lock)); return thread_local_.interrupt_flags_ != 0; } // You should hold the ExecutionAccess lock when calling this method. - bool should_postpone_interrupts(const ExecutionAccess& lock) { - return thread_local_.postpone_interrupts_nesting_ > 0; - } - - // You should hold the ExecutionAccess lock when calling this method. inline void set_interrupt_limits(const ExecutionAccess& lock); // Reset limits to actual values. For example after handling interrupt. @@ -282,7 +222,7 @@ void EnableInterrupts(); void DisableInterrupts(); -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 +#if V8_TARGET_ARCH_64_BIT static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe); static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8); #else @@ -290,7 +230,10 @@ static const uintptr_t kIllegalLimit = 0xfffffff8; #endif - class ThreadLocal { + void PushPostponeInterruptsScope(PostponeInterruptsScope* scope); + void PopPostponeInterruptsScope(); + + class ThreadLocal V8_FINAL { public: ThreadLocal() { Clear(); } // You should hold the ExecutionAccess lock when you call Initialize or @@ -314,12 +257,8 @@ uintptr_t real_climit_; // Actual C++ stack limit set for the VM. uintptr_t climit_; - int nesting_; - int postpone_interrupts_nesting_; + PostponeInterruptsScope* postpone_interrupts_; int interrupt_flags_; - - InterruptCallback interrupt_callback_; - void* interrupt_callback_data_; }; // TODO(isolates): Technically this could be calculated directly from a @@ -334,7 +273,6 @@ DISALLOW_COPY_AND_ASSIGN(StackGuard); }; - } } // namespace v8::internal #endif // V8_EXECUTION_H_ diff -Nru nodejs-0.11.13/deps/v8/src/extensions/externalize-string-extension.cc nodejs-0.11.15/deps/v8/src/extensions/externalize-string-extension.cc --- nodejs-0.11.13/deps/v8/src/extensions/externalize-string-extension.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/extensions/externalize-string-extension.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "externalize-string-extension.h" +#include "src/extensions/externalize-string-extension.h" namespace v8 { namespace internal { @@ -67,7 +44,7 @@ return v8::FunctionTemplate::New(isolate, ExternalizeStringExtension::Externalize); } else { - ASSERT(strcmp(*v8::String::Utf8Value(str), "isAsciiString") == 0); + DCHECK(strcmp(*v8::String::Utf8Value(str), "isAsciiString") == 0); return v8::FunctionTemplate::New(isolate, ExternalizeStringExtension::IsAscii); } diff -Nru nodejs-0.11.13/deps/v8/src/extensions/externalize-string-extension.h nodejs-0.11.15/deps/v8/src/extensions/externalize-string-extension.h --- nodejs-0.11.13/deps/v8/src/extensions/externalize-string-extension.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/extensions/externalize-string-extension.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_ #define V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_ -#include "v8.h" +#include "src/v8.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/extensions/free-buffer-extension.cc nodejs-0.11.15/deps/v8/src/extensions/free-buffer-extension.cc --- nodejs-0.11.13/deps/v8/src/extensions/free-buffer-extension.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/extensions/free-buffer-extension.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,33 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "free-buffer-extension.h" -#include "platform.h" -#include "v8.h" +#include "src/extensions/free-buffer-extension.h" + +#include "src/base/platform/platform.h" +#include "src/v8.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/extensions/free-buffer-extension.h nodejs-0.11.15/deps/v8/src/extensions/free-buffer-extension.h --- nodejs-0.11.13/deps/v8/src/extensions/free-buffer-extension.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/extensions/free-buffer-extension.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_ #define V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_ -#include "v8.h" +#include "src/v8.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/extensions/gc-extension.cc nodejs-0.11.15/deps/v8/src/extensions/gc-extension.cc --- nodejs-0.11.13/deps/v8/src/extensions/gc-extension.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/extensions/gc-extension.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,10 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "gc-extension.h" -#include "platform.h" +#include "src/extensions/gc-extension.h" + +#include "src/base/platform/platform.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/extensions/gc-extension.h nodejs-0.11.15/deps/v8/src/extensions/gc-extension.h --- nodejs-0.11.13/deps/v8/src/extensions/gc-extension.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/extensions/gc-extension.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_EXTENSIONS_GC_EXTENSION_H_ #define V8_EXTENSIONS_GC_EXTENSION_H_ -#include "v8.h" +#include "src/v8.h" namespace v8 { namespace internal { @@ -45,8 +22,8 @@ private: static const char* BuildSource(char* buf, size_t size, const char* fun_name) { - OS::SNPrintF(Vector<char>(buf, static_cast<int>(size)), - "native function %s();", fun_name); + SNPrintF(Vector<char>(buf, static_cast<int>(size)), + "native function %s();", fun_name); return buf; } diff -Nru nodejs-0.11.13/deps/v8/src/extensions/statistics-extension.cc nodejs-0.11.15/deps/v8/src/extensions/statistics-extension.cc --- nodejs-0.11.13/deps/v8/src/extensions/statistics-extension.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/extensions/statistics-extension.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "statistics-extension.h" +#include "src/extensions/statistics-extension.h" namespace v8 { namespace internal { @@ -37,7 +14,7 @@ v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunctionTemplate( v8::Isolate* isolate, v8::Handle<v8::String> str) { - ASSERT(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0); + DCHECK(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0); return v8::FunctionTemplate::New(isolate, StatisticsExtension::GetCounters); } diff -Nru nodejs-0.11.13/deps/v8/src/extensions/statistics-extension.h nodejs-0.11.15/deps/v8/src/extensions/statistics-extension.h --- nodejs-0.11.13/deps/v8/src/extensions/statistics-extension.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/extensions/statistics-extension.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_ #define V8_EXTENSIONS_STATISTICS_EXTENSION_H_ -#include "v8.h" +#include "src/v8.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/extensions/trigger-failure-extension.cc nodejs-0.11.15/deps/v8/src/extensions/trigger-failure-extension.cc --- nodejs-0.11.13/deps/v8/src/extensions/trigger-failure-extension.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/extensions/trigger-failure-extension.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,9 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "trigger-failure-extension.h" -#include "v8.h" +#include "src/extensions/trigger-failure-extension.h" +#include "src/v8.h" namespace v8 { namespace internal { @@ -67,13 +44,13 @@ void TriggerFailureExtension::TriggerAssertFalse( const v8::FunctionCallbackInfo<v8::Value>& args) { - ASSERT(false); + DCHECK(false); } void TriggerFailureExtension::TriggerSlowAssertFalse( const v8::FunctionCallbackInfo<v8::Value>& args) { - SLOW_ASSERT(false); + SLOW_DCHECK(false); } } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/extensions/trigger-failure-extension.h nodejs-0.11.15/deps/v8/src/extensions/trigger-failure-extension.h --- nodejs-0.11.13/deps/v8/src/extensions/trigger-failure-extension.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/extensions/trigger-failure-extension.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_ #define V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_ -#include "v8.h" +#include "src/v8.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/factory.cc nodejs-0.11.15/deps/v8/src/factory.cc --- nodejs-0.11.13/deps/v8/src/factory.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/factory.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,57 +1,67 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "api.h" -#include "debug.h" -#include "execution.h" -#include "factory.h" -#include "isolate-inl.h" -#include "macro-assembler.h" -#include "objects.h" -#include "objects-visiting.h" -#include "platform.h" -#include "scopeinfo.h" +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/factory.h" + +#include "src/allocation-site-scopes.h" +#include "src/conversions.h" +#include "src/isolate-inl.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { -Handle<Box> Factory::NewBox(Handle<Object> value, PretenureFlag pretenure) { +template<typename T> +Handle<T> Factory::New(Handle<Map> map, AllocationSpace space) { CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateBox(*value, pretenure), - Box); + isolate()->heap()->Allocate(*map, space), + T); +} + + +template<typename T> +Handle<T> Factory::New(Handle<Map> map, + AllocationSpace space, + Handle<AllocationSite> allocation_site) { + CALL_HEAP_FUNCTION( + isolate(), + isolate()->heap()->Allocate(*map, space, *allocation_site), + T); +} + + +Handle<HeapObject> Factory::NewFillerObject(int size, + bool double_align, + AllocationSpace space) { + CALL_HEAP_FUNCTION( + isolate(), + isolate()->heap()->AllocateFillerObject(size, double_align, space), + HeapObject); +} + + +Handle<Box> Factory::NewBox(Handle<Object> value) { + Handle<Box> result = Handle<Box>::cast(NewStruct(BOX_TYPE)); + result->set_value(*value); + return result; +} + + +Handle<Oddball> Factory::NewOddball(Handle<Map> map, + const char* to_string, + Handle<Object> to_number, + byte kind) { + Handle<Oddball> oddball = New<Oddball>(map, OLD_POINTER_SPACE); + Oddball::Initialize(isolate(), oddball, to_string, to_number, kind); + return oddball; } Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) { - ASSERT(0 <= size); + DCHECK(0 <= size); CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateFixedArray(size, pretenure), @@ -61,10 +71,12 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size, PretenureFlag pretenure) { - ASSERT(0 <= size); + DCHECK(0 <= size); CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateFixedArrayWithHoles(size, pretenure), + isolate()->heap()->AllocateFixedArrayWithFiller(size, + pretenure, + *the_hole_value()), FixedArray); } @@ -77,140 +89,78 @@ } -Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size, - PretenureFlag pretenure) { - ASSERT(0 <= size); +Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int size, + PretenureFlag pretenure) { + DCHECK(0 <= size); CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure), - FixedDoubleArray); + FixedArrayBase); +} + + +Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles( + int size, + PretenureFlag pretenure) { + DCHECK(0 <= size); + Handle<FixedArrayBase> array = NewFixedDoubleArray(size, pretenure); + if (size > 0) { + Handle<FixedDoubleArray> double_array = + Handle<FixedDoubleArray>::cast(array); + for (int i = 0; i < size; ++i) { + double_array->set_the_hole(i); + } + } + return array; } Handle<ConstantPoolArray> Factory::NewConstantPoolArray( - int number_of_int64_entries, - int number_of_code_ptr_entries, - int number_of_heap_ptr_entries, - int number_of_int32_entries) { - ASSERT(number_of_int64_entries > 0 || number_of_code_ptr_entries > 0 || - number_of_heap_ptr_entries > 0 || number_of_int32_entries > 0); + const ConstantPoolArray::NumberOfEntries& small) { + DCHECK(small.total_count() > 0); CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateConstantPoolArray(number_of_int64_entries, - number_of_code_ptr_entries, - number_of_heap_ptr_entries, - number_of_int32_entries), + isolate()->heap()->AllocateConstantPoolArray(small), ConstantPoolArray); } -Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) { - ASSERT(0 <= at_least_space_for); - CALL_HEAP_FUNCTION(isolate(), - NameDictionary::Allocate(isolate()->heap(), - at_least_space_for), - NameDictionary); -} - - -Handle<SeededNumberDictionary> Factory::NewSeededNumberDictionary( - int at_least_space_for) { - ASSERT(0 <= at_least_space_for); - CALL_HEAP_FUNCTION(isolate(), - SeededNumberDictionary::Allocate(isolate()->heap(), - at_least_space_for), - SeededNumberDictionary); -} - - -Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary( - int at_least_space_for) { - ASSERT(0 <= at_least_space_for); - CALL_HEAP_FUNCTION(isolate(), - UnseededNumberDictionary::Allocate(isolate()->heap(), - at_least_space_for), - UnseededNumberDictionary); -} - - -Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) { - ASSERT(0 <= at_least_space_for); - CALL_HEAP_FUNCTION(isolate(), - ObjectHashSet::Allocate(isolate()->heap(), - at_least_space_for), - ObjectHashSet); -} - - -Handle<ObjectHashTable> Factory::NewObjectHashTable( - int at_least_space_for, - MinimumCapacity capacity_option) { - ASSERT(0 <= at_least_space_for); - CALL_HEAP_FUNCTION(isolate(), - ObjectHashTable::Allocate(isolate()->heap(), - at_least_space_for, - capacity_option), - ObjectHashTable); -} - - -Handle<WeakHashTable> Factory::NewWeakHashTable(int at_least_space_for) { - ASSERT(0 <= at_least_space_for); +Handle<ConstantPoolArray> Factory::NewExtendedConstantPoolArray( + const ConstantPoolArray::NumberOfEntries& small, + const ConstantPoolArray::NumberOfEntries& extended) { + DCHECK(small.total_count() > 0); + DCHECK(extended.total_count() > 0); CALL_HEAP_FUNCTION( isolate(), - WeakHashTable::Allocate(isolate()->heap(), - at_least_space_for, - USE_DEFAULT_MINIMUM_CAPACITY, - TENURED), - WeakHashTable); -} - - -Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors, - int slack) { - ASSERT(0 <= number_of_descriptors); - CALL_HEAP_FUNCTION(isolate(), - DescriptorArray::Allocate( - isolate(), number_of_descriptors, slack), - DescriptorArray); + isolate()->heap()->AllocateExtendedConstantPoolArray(small, extended), + ConstantPoolArray); } -Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData( - int deopt_entry_count, - PretenureFlag pretenure) { - ASSERT(deopt_entry_count > 0); - CALL_HEAP_FUNCTION(isolate(), - DeoptimizationInputData::Allocate(isolate(), - deopt_entry_count, - pretenure), - DeoptimizationInputData); +Handle<OrderedHashSet> Factory::NewOrderedHashSet() { + return OrderedHashSet::Allocate(isolate(), 4); } -Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData( - int deopt_entry_count, - PretenureFlag pretenure) { - ASSERT(deopt_entry_count > 0); - CALL_HEAP_FUNCTION(isolate(), - DeoptimizationOutputData::Allocate(isolate(), - deopt_entry_count, - pretenure), - DeoptimizationOutputData); +Handle<OrderedHashMap> Factory::NewOrderedHashMap() { + return OrderedHashMap::Allocate(isolate(), 4); } Handle<AccessorPair> Factory::NewAccessorPair() { - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->AllocateAccessorPair(), - AccessorPair); + Handle<AccessorPair> accessors = + Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE)); + accessors->set_getter(*the_hole_value(), SKIP_WRITE_BARRIER); + accessors->set_setter(*the_hole_value(), SKIP_WRITE_BARRIER); + return accessors; } Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() { - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->AllocateTypeFeedbackInfo(), - TypeFeedbackInfo); + Handle<TypeFeedbackInfo> info = + Handle<TypeFeedbackInfo>::cast(NewStruct(TYPE_FEEDBACK_INFO_TYPE)); + info->initialize_storage(); + return info; } @@ -223,9 +173,8 @@ // Internalized strings are created in the old generation (data space). Handle<String> Factory::InternalizeString(Handle<String> string) { - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->InternalizeString(*string), - String); + if (string->IsInternalizedString()) return string; + return StringTable::LookupString(isolate(), string); } @@ -250,9 +199,7 @@ template<class StringTableKey> Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) { - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->InternalizeStringWithKey(key), - String); + return StringTable::LookupKey(isolate(), key); } @@ -262,34 +209,160 @@ SubStringKey<uint16_t> > (SubStringKey<uint16_t>* key); -Handle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string, - PretenureFlag pretenure) { +MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string, + PretenureFlag pretenure) { + int length = string.length(); + if (length == 1) return LookupSingleCharacterStringFromCode(string[0]); + Handle<SeqOneByteString> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + result, + NewRawOneByteString(string.length(), pretenure), + String); + + DisallowHeapAllocation no_gc; + // Copy the characters into the new object. + CopyChars(SeqOneByteString::cast(*result)->GetChars(), + string.start(), + length); + return result; +} + +MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string, + PretenureFlag pretenure) { + // Check for ASCII first since this is the common case. + const char* start = string.start(); + int length = string.length(); + int non_ascii_start = String::NonAsciiStart(start, length); + if (non_ascii_start >= length) { + // If the string is ASCII, we do not need to convert the characters + // since UTF8 is backwards compatible with ASCII. + return NewStringFromOneByte(Vector<const uint8_t>::cast(string), pretenure); + } + + // Non-ASCII and we need to decode. + Access<UnicodeCache::Utf8Decoder> + decoder(isolate()->unicode_cache()->utf8_decoder()); + decoder->Reset(string.start() + non_ascii_start, + length - non_ascii_start); + int utf16_length = decoder->Utf16Length(); + DCHECK(utf16_length > 0); + // Allocate string. + Handle<SeqTwoByteString> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), result, + NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), + String); + // Copy ascii portion. + uint16_t* data = result->GetChars(); + const char* ascii_data = string.start(); + for (int i = 0; i < non_ascii_start; i++) { + *data++ = *ascii_data++; + } + // Now write the remainder. + decoder->WriteUtf16(data, utf16_length); + return result; +} + + +MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string, + PretenureFlag pretenure) { + int length = string.length(); + const uc16* start = string.start(); + if (String::IsOneByte(start, length)) { + if (length == 1) return LookupSingleCharacterStringFromCode(string[0]); + Handle<SeqOneByteString> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + result, + NewRawOneByteString(length, pretenure), + String); + CopyChars(result->GetChars(), start, length); + return result; + } else { + Handle<SeqTwoByteString> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + result, + NewRawTwoByteString(length, pretenure), + String); + CopyChars(result->GetChars(), start, length); + return result; + } +} + + +Handle<String> Factory::NewInternalizedStringFromUtf8(Vector<const char> str, + int chars, + uint32_t hash_field) { CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateStringFromOneByte(string, pretenure), + isolate()->heap()->AllocateInternalizedStringFromUtf8( + str, chars, hash_field), String); } -Handle<String> Factory::NewStringFromUtf8(Vector<const char> string, - PretenureFlag pretenure) { + +MUST_USE_RESULT Handle<String> Factory::NewOneByteInternalizedString( + Vector<const uint8_t> str, + uint32_t hash_field) { CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateStringFromUtf8(string, pretenure), + isolate()->heap()->AllocateOneByteInternalizedString(str, hash_field), String); } -Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string, - PretenureFlag pretenure) { +MUST_USE_RESULT Handle<String> Factory::NewTwoByteInternalizedString( + Vector<const uc16> str, + uint32_t hash_field) { CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateStringFromTwoByte(string, pretenure), + isolate()->heap()->AllocateTwoByteInternalizedString(str, hash_field), String); } -Handle<SeqOneByteString> Factory::NewRawOneByteString(int length, - PretenureFlag pretenure) { +Handle<String> Factory::NewInternalizedStringImpl( + Handle<String> string, int chars, uint32_t hash_field) { + CALL_HEAP_FUNCTION( + isolate(), + isolate()->heap()->AllocateInternalizedStringImpl( + *string, chars, hash_field), + String); +} + + +MaybeHandle<Map> Factory::InternalizedStringMapForString( + Handle<String> string) { + // If the string is in new space it cannot be used as internalized. + if (isolate()->heap()->InNewSpace(*string)) return MaybeHandle<Map>(); + + // Find the corresponding internalized string map for strings. + switch (string->map()->instance_type()) { + case STRING_TYPE: return internalized_string_map(); + case ASCII_STRING_TYPE: return ascii_internalized_string_map(); + case EXTERNAL_STRING_TYPE: return external_internalized_string_map(); + case EXTERNAL_ASCII_STRING_TYPE: + return external_ascii_internalized_string_map(); + case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE: + return external_internalized_string_with_one_byte_data_map(); + case SHORT_EXTERNAL_STRING_TYPE: + return short_external_internalized_string_map(); + case SHORT_EXTERNAL_ASCII_STRING_TYPE: + return short_external_ascii_internalized_string_map(); + case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE: + return short_external_internalized_string_with_one_byte_data_map(); + default: return MaybeHandle<Map>(); // No match found. + } +} + + +MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString( + int length, PretenureFlag pretenure) { + if (length > String::kMaxLength || length < 0) { + return isolate()->Throw<SeqOneByteString>(NewInvalidStringLengthError()); + } CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateRawOneByteString(length, pretenure), @@ -297,8 +370,11 @@ } -Handle<SeqTwoByteString> Factory::NewRawTwoByteString(int length, - PretenureFlag pretenure) { +MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString( + int length, PretenureFlag pretenure) { + if (length > String::kMaxLength || length < 0) { + return isolate()->Throw<SeqTwoByteString>(NewInvalidStringLengthError()); + } CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateRawTwoByteString(length, pretenure), @@ -306,6 +382,30 @@ } +Handle<String> Factory::LookupSingleCharacterStringFromCode(uint32_t code) { + if (code <= String::kMaxOneByteCharCodeU) { + { + DisallowHeapAllocation no_allocation; + Object* value = single_character_string_cache()->get(code); + if (value != *undefined_value()) { + return handle(String::cast(value), isolate()); + } + } + uint8_t buffer[1]; + buffer[0] = static_cast<uint8_t>(code); + Handle<String> result = + InternalizeOneByteString(Vector<const uint8_t>(buffer, 1)); + single_character_string_cache()->set(code, *result); + return result; + } + DCHECK(code <= String::kMaxUtf16CodeUnitU); + + Handle<SeqTwoByteString> result = NewRawTwoByteString(1).ToHandleChecked(); + result->SeqTwoByteStringSet(0, static_cast<uint16_t>(code)); + return result; +} + + // Returns true for a character in a range. Both limits are inclusive. static inline bool Between(uint32_t character, uint32_t from, uint32_t to) { // This makes uses of the the unsigned wraparound. @@ -319,10 +419,10 @@ // Numeric strings have a different hash algorithm not known by // LookupTwoCharsStringIfExists, so we skip this step for such strings. if (!Between(c1, '0', '9') || !Between(c2, '0', '9')) { - String* result; - StringTable* table = isolate->heap()->string_table(); - if (table->LookupTwoCharsStringIfExists(c1, c2, &result)) { - return handle(result); + Handle<String> result; + if (StringTable::LookupTwoCharsStringIfExists(isolate, c1, c2). + ToHandle(&result)) { + return result; } } @@ -330,14 +430,16 @@ // when building the new string. if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) { // We can do this. - ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this. - Handle<SeqOneByteString> str = isolate->factory()->NewRawOneByteString(2); + DCHECK(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this. + Handle<SeqOneByteString> str = + isolate->factory()->NewRawOneByteString(2).ToHandleChecked(); uint8_t* dest = str->GetChars(); dest[0] = static_cast<uint8_t>(c1); dest[1] = static_cast<uint8_t>(c2); return str; } else { - Handle<SeqTwoByteString> str = isolate->factory()->NewRawTwoByteString(2); + Handle<SeqTwoByteString> str = + isolate->factory()->NewRawTwoByteString(2).ToHandleChecked(); uc16* dest = str->GetChars(); dest[0] = c1; dest[1] = c2; @@ -358,17 +460,8 @@ } -Handle<ConsString> Factory::NewRawConsString(String::Encoding encoding) { - Handle<Map> map = (encoding == String::ONE_BYTE_ENCODING) - ? cons_ascii_string_map() : cons_string_map(); - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->Allocate(*map, NEW_SPACE), - ConsString); -} - - -Handle<String> Factory::NewConsString(Handle<String> left, - Handle<String> right) { +MaybeHandle<String> Factory::NewConsString(Handle<String> left, + Handle<String> right) { int left_length = left->length(); if (left_length == 0) return right; int right_length = right->length(); @@ -385,8 +478,7 @@ // Make sure that an out of memory exception is thrown if the length // of the new cons string is too large. if (length > String::kMaxLength || length < 0) { - isolate()->ThrowInvalidStringLength(); - return Handle<String>::null(); + return isolate()->Throw<String>(NewInvalidStringLengthError()); } bool left_is_one_byte = left->IsOneByteRepresentation(); @@ -408,12 +500,13 @@ if (length < ConsString::kMinLength) { // Note that neither of the two inputs can be a slice because: STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength); - ASSERT(left->IsFlat()); - ASSERT(right->IsFlat()); + DCHECK(left->IsFlat()); + DCHECK(right->IsFlat()); STATIC_ASSERT(ConsString::kMinLength <= String::kMaxLength); if (is_one_byte) { - Handle<SeqOneByteString> result = NewRawOneByteString(length); + Handle<SeqOneByteString> result = + NewRawOneByteString(length).ToHandleChecked(); DisallowHeapAllocation no_gc; uint8_t* dest = result->GetChars(); // Copy left part. @@ -430,14 +523,15 @@ } return (is_one_byte_data_in_two_byte_string) - ? ConcatStringContent<uint8_t>(NewRawOneByteString(length), left, right) - : ConcatStringContent<uc16>(NewRawTwoByteString(length), left, right); + ? ConcatStringContent<uint8_t>( + NewRawOneByteString(length).ToHandleChecked(), left, right) + : ConcatStringContent<uc16>( + NewRawTwoByteString(length).ToHandleChecked(), left, right); } - Handle<ConsString> result = NewRawConsString( - (is_one_byte || is_one_byte_data_in_two_byte_string) - ? String::ONE_BYTE_ENCODING - : String::TWO_BYTE_ENCODING); + Handle<Map> map = (is_one_byte || is_one_byte_data_in_two_byte_string) + ? cons_ascii_string_map() : cons_string_map(); + Handle<ConsString> result = New<ConsString>(map, NEW_SPACE); DisallowHeapAllocation no_gc; WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); @@ -450,40 +544,20 @@ } -Handle<String> Factory::NewFlatConcatString(Handle<String> first, - Handle<String> second) { - int total_length = first->length() + second->length(); - if (first->IsOneByteRepresentation() && second->IsOneByteRepresentation()) { - return ConcatStringContent<uint8_t>( - NewRawOneByteString(total_length), first, second); - } else { - return ConcatStringContent<uc16>( - NewRawTwoByteString(total_length), first, second); - } -} - - -Handle<SlicedString> Factory::NewRawSlicedString(String::Encoding encoding) { - Handle<Map> map = (encoding == String::ONE_BYTE_ENCODING) - ? sliced_ascii_string_map() : sliced_string_map(); - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->Allocate(*map, NEW_SPACE), - SlicedString); -} - - Handle<String> Factory::NewProperSubString(Handle<String> str, int begin, int end) { #if VERIFY_HEAP if (FLAG_verify_heap) str->StringVerify(); #endif - ASSERT(begin > 0 || end < str->length()); + DCHECK(begin > 0 || end < str->length()); + + str = String::Flatten(str); int length = end - begin; if (length <= 0) return empty_string(); if (length == 1) { - return LookupSingleCharacterStringFromCode(isolate(), str->Get(begin)); + return LookupSingleCharacterStringFromCode(str->Get(begin)); } if (length == 2) { // Optimization for 2-byte strings often used as keys in a decompression @@ -496,15 +570,15 @@ if (!FLAG_string_slices || length < SlicedString::kMinLength) { if (str->IsOneByteRepresentation()) { - Handle<SeqOneByteString> result = NewRawOneByteString(length); - ASSERT(!result.is_null()); + Handle<SeqOneByteString> result = + NewRawOneByteString(length).ToHandleChecked(); uint8_t* dest = result->GetChars(); DisallowHeapAllocation no_gc; String::WriteToFlat(*str, dest, begin, end); return result; } else { - Handle<SeqTwoByteString> result = NewRawTwoByteString(length); - ASSERT(!result.is_null()); + Handle<SeqTwoByteString> result = + NewRawTwoByteString(length).ToHandleChecked(); uc16* dest = result->GetChars(); DisallowHeapAllocation no_gc; String::WriteToFlat(*str, dest, begin, end); @@ -514,34 +588,16 @@ int offset = begin; - while (str->IsConsString()) { - Handle<ConsString> cons = Handle<ConsString>::cast(str); - int split = cons->first()->length(); - if (split <= offset) { - // Slice is fully contained in the second part. - str = Handle<String>(cons->second(), isolate()); - offset -= split; // Adjust for offset. - continue; - } else if (offset + length <= split) { - // Slice is fully contained in the first part. - str = Handle<String>(cons->first(), isolate()); - continue; - } - break; - } - if (str->IsSlicedString()) { Handle<SlicedString> slice = Handle<SlicedString>::cast(str); str = Handle<String>(slice->parent(), isolate()); offset += slice->offset(); - } else { - str = FlattenGetString(str); } - ASSERT(str->IsSeqString() || str->IsExternalString()); - Handle<SlicedString> slice = NewRawSlicedString( - str->IsOneByteRepresentation() ? String::ONE_BYTE_ENCODING - : String::TWO_BYTE_ENCODING); + DCHECK(str->IsSeqString() || str->IsExternalString()); + Handle<Map> map = str->IsOneByteRepresentation() ? sliced_ascii_string_map() + : sliced_string_map(); + Handle<SlicedString> slice = New<SlicedString>(map, NEW_SPACE); slice->set_hash_field(String::kEmptyHashField); slice->set_length(length); @@ -551,21 +607,45 @@ } -Handle<String> Factory::NewExternalStringFromAscii( +MaybeHandle<String> Factory::NewExternalStringFromAscii( const ExternalAsciiString::Resource* resource) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateExternalStringFromAscii(resource), - String); + size_t length = resource->length(); + if (length > static_cast<size_t>(String::kMaxLength)) { + return isolate()->Throw<String>(NewInvalidStringLengthError()); + } + + Handle<Map> map = external_ascii_string_map(); + Handle<ExternalAsciiString> external_string = + New<ExternalAsciiString>(map, NEW_SPACE); + external_string->set_length(static_cast<int>(length)); + external_string->set_hash_field(String::kEmptyHashField); + external_string->set_resource(resource); + + return external_string; } -Handle<String> Factory::NewExternalStringFromTwoByte( +MaybeHandle<String> Factory::NewExternalStringFromTwoByte( const ExternalTwoByteString::Resource* resource) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateExternalStringFromTwoByte(resource), - String); + size_t length = resource->length(); + if (length > static_cast<size_t>(String::kMaxLength)) { + return isolate()->Throw<String>(NewInvalidStringLengthError()); + } + + // For small strings we check whether the resource contains only + // one byte characters. If yes, we use a different string map. + static const size_t kOneByteCheckLengthLimit = 32; + bool is_one_byte = length <= kOneByteCheckLengthLimit && + String::IsOneByte(resource->data(), static_cast<int>(length)); + Handle<Map> map = is_one_byte ? + external_string_with_one_byte_data_map() : external_string_map(); + Handle<ExternalTwoByteString> external_string = + New<ExternalTwoByteString>(map, NEW_SPACE); + external_string->set_length(static_cast<int>(length)); + external_string->set_hash_field(String::kEmptyHashField); + external_string->set_resource(resource); + + return external_string; } @@ -578,44 +658,67 @@ Handle<Symbol> Factory::NewPrivateSymbol() { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocatePrivateSymbol(), - Symbol); + Handle<Symbol> symbol = NewSymbol(); + symbol->set_is_private(true); + return symbol; +} + + +Handle<Symbol> Factory::NewPrivateOwnSymbol() { + Handle<Symbol> symbol = NewSymbol(); + symbol->set_is_private(true); + symbol->set_is_own(true); + return symbol; } Handle<Context> Factory::NewNativeContext() { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateNativeContext(), - Context); + Handle<FixedArray> array = NewFixedArray(Context::NATIVE_CONTEXT_SLOTS); + array->set_map_no_write_barrier(*native_context_map()); + Handle<Context> context = Handle<Context>::cast(array); + context->set_js_array_maps(*undefined_value()); + DCHECK(context->IsNativeContext()); + return context; } Handle<Context> Factory::NewGlobalContext(Handle<JSFunction> function, Handle<ScopeInfo> scope_info) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateGlobalContext(*function, *scope_info), - Context); + Handle<FixedArray> array = + NewFixedArray(scope_info->ContextLength(), TENURED); + array->set_map_no_write_barrier(*global_context_map()); + Handle<Context> context = Handle<Context>::cast(array); + context->set_closure(*function); + context->set_previous(function->context()); + context->set_extension(*scope_info); + context->set_global_object(function->context()->global_object()); + DCHECK(context->IsGlobalContext()); + return context; } Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateModuleContext(*scope_info), - Context); + Handle<FixedArray> array = + NewFixedArray(scope_info->ContextLength(), TENURED); + array->set_map_no_write_barrier(*module_context_map()); + // Instance link will be set later. + Handle<Context> context = Handle<Context>::cast(array); + context->set_extension(Smi::FromInt(0)); + return context; } Handle<Context> Factory::NewFunctionContext(int length, Handle<JSFunction> function) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateFunctionContext(length, *function), - Context); + DCHECK(length >= Context::MIN_CONTEXT_SLOTS); + Handle<FixedArray> array = NewFixedArray(length); + array->set_map_no_write_barrier(*function_context_map()); + Handle<Context> context = Handle<Context>::cast(array); + context->set_closure(*function); + context->set_previous(function->context()); + context->set_extension(Smi::FromInt(0)); + context->set_global_object(function->context()->global_object()); + return context; } @@ -623,35 +726,45 @@ Handle<Context> previous, Handle<String> name, Handle<Object> thrown_object) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateCatchContext(*function, - *previous, - *name, - *thrown_object), - Context); + STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX); + Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS + 1); + array->set_map_no_write_barrier(*catch_context_map()); + Handle<Context> context = Handle<Context>::cast(array); + context->set_closure(*function); + context->set_previous(*previous); + context->set_extension(*name); + context->set_global_object(previous->global_object()); + context->set(Context::THROWN_OBJECT_INDEX, *thrown_object); + return context; } Handle<Context> Factory::NewWithContext(Handle<JSFunction> function, Handle<Context> previous, - Handle<JSObject> extension) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateWithContext(*function, *previous, *extension), - Context); + Handle<JSReceiver> extension) { + Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS); + array->set_map_no_write_barrier(*with_context_map()); + Handle<Context> context = Handle<Context>::cast(array); + context->set_closure(*function); + context->set_previous(*previous); + context->set_extension(*extension); + context->set_global_object(previous->global_object()); + return context; } Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function, Handle<Context> previous, Handle<ScopeInfo> scope_info) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateBlockContext(*function, - *previous, - *scope_info), - Context); + Handle<FixedArray> array = + NewFixedArrayWithHoles(scope_info->ContextLength()); + array->set_map_no_write_barrier(*block_context_map()); + Handle<Context> context = Handle<Context>::cast(array); + context->set_closure(*function); + context->set_previous(*previous); + context->set_extension(*scope_info); + context->set_global_object(previous->global_object()); + return context; } @@ -663,6 +776,15 @@ } +Handle<CodeCache> Factory::NewCodeCache() { + Handle<CodeCache> code_cache = + Handle<CodeCache>::cast(NewStruct(CODE_CACHE_TYPE)); + code_cache->set_default_cache(*empty_fixed_array(), SKIP_WRITE_BARRIER); + code_cache->set_normal_type_cache(*undefined_value(), SKIP_WRITE_BARRIER); + return code_cache; +} + + Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry( int aliased_context_slot) { Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast( @@ -736,7 +858,7 @@ Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) { - ASSERT(0 <= length); + DCHECK(0 <= length); CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateByteArray(length, pretenure), @@ -748,7 +870,7 @@ ExternalArrayType array_type, void* external_pointer, PretenureFlag pretenure) { - ASSERT(0 <= length && length <= Smi::kMaxValue); + DCHECK(0 <= length && length <= Smi::kMaxValue); CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateExternalArray(length, @@ -763,7 +885,7 @@ int length, ExternalArrayType array_type, PretenureFlag pretenure) { - ASSERT(0 <= length && length <= Smi::kMaxValue); + DCHECK(0 <= length && length <= Smi::kMaxValue); CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateFixedTypedArray(length, @@ -799,10 +921,14 @@ Handle<AllocationSite> Factory::NewAllocationSite() { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateAllocationSite(), - AllocationSite); + Handle<Map> map = allocation_site_map(); + Handle<AllocationSite> site = New<AllocationSite>(map, OLD_POINTER_SPACE); + site->Initialize(); + + // Link the site + site->set_weak_next(isolate()->heap()->allocation_sites_list()); + isolate()->heap()->set_allocation_sites_list(*site); + return site; } @@ -816,223 +942,106 @@ } -Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) { - // Make sure to use globals from the function's context, since the function - // can be from a different context. - Handle<Context> native_context(function->context()->native_context()); - Handle<Map> new_map; - if (function->shared()->is_generator()) { - // Generator prototypes can share maps since they don't have "constructor" - // properties. - new_map = handle(native_context->generator_object_prototype_map()); - } else { - // Each function prototype gets a fresh map to avoid unwanted sharing of - // maps between prototypes of different constructors. - Handle<JSFunction> object_function(native_context->object_function()); - ASSERT(object_function->has_initial_map()); - new_map = Map::Copy(handle(object_function->initial_map())); - } - - Handle<JSObject> prototype = NewJSObjectFromMap(new_map); - - if (!function->shared()->is_generator()) { - JSObject::SetLocalPropertyIgnoreAttributes(prototype, - constructor_string(), - function, - DONT_ENUM); - } - - return prototype; -} - - -Handle<Map> Factory::CopyWithPreallocatedFieldDescriptors(Handle<Map> src) { - CALL_HEAP_FUNCTION( - isolate(), src->CopyWithPreallocatedFieldDescriptors(), Map); +Handle<JSObject> Factory::CopyJSObject(Handle<JSObject> object) { + CALL_HEAP_FUNCTION(isolate(), + isolate()->heap()->CopyJSObject(*object, NULL), + JSObject); } -Handle<Map> Factory::CopyMap(Handle<Map> src, - int extra_inobject_properties) { - Handle<Map> copy = CopyWithPreallocatedFieldDescriptors(src); - // Check that we do not overflow the instance size when adding the - // extra inobject properties. - int instance_size_delta = extra_inobject_properties * kPointerSize; - int max_instance_size_delta = - JSObject::kMaxInstanceSize - copy->instance_size(); - int max_extra_properties = max_instance_size_delta >> kPointerSizeLog2; - if (extra_inobject_properties > max_extra_properties) { - // If the instance size overflows, we allocate as many properties - // as we can as inobject properties. - instance_size_delta = max_instance_size_delta; - extra_inobject_properties = max_extra_properties; - } - // Adjust the map with the extra inobject properties. - int inobject_properties = - copy->inobject_properties() + extra_inobject_properties; - copy->set_inobject_properties(inobject_properties); - copy->set_unused_property_fields(inobject_properties); - copy->set_instance_size(copy->instance_size() + instance_size_delta); - copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy)); - return copy; +Handle<JSObject> Factory::CopyJSObjectWithAllocationSite( + Handle<JSObject> object, + Handle<AllocationSite> site) { + CALL_HEAP_FUNCTION(isolate(), + isolate()->heap()->CopyJSObject( + *object, + site.is_null() ? NULL : *site), + JSObject); } -Handle<Map> Factory::CopyMap(Handle<Map> src) { - CALL_HEAP_FUNCTION(isolate(), src->Copy(), Map); +Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array, + Handle<Map> map) { + CALL_HEAP_FUNCTION(isolate(), + isolate()->heap()->CopyFixedArrayWithMap(*array, *map), + FixedArray); } Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) { - CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedArray); + CALL_HEAP_FUNCTION(isolate(), + isolate()->heap()->CopyFixedArray(*array), + FixedArray); } Handle<FixedArray> Factory::CopyAndTenureFixedCOWArray( Handle<FixedArray> array) { - ASSERT(isolate()->heap()->InNewSpace(*array)); + DCHECK(isolate()->heap()->InNewSpace(*array)); CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->CopyAndTenureFixedCOWArray(*array), FixedArray); } -Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array, - int new_length, - PretenureFlag pretenure) { - CALL_HEAP_FUNCTION(isolate(), - array->CopySize(new_length, pretenure), - FixedArray); -} - - Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray( Handle<FixedDoubleArray> array) { - CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray); + CALL_HEAP_FUNCTION(isolate(), + isolate()->heap()->CopyFixedDoubleArray(*array), + FixedDoubleArray); } Handle<ConstantPoolArray> Factory::CopyConstantPoolArray( Handle<ConstantPoolArray> array) { - CALL_HEAP_FUNCTION(isolate(), array->Copy(), ConstantPoolArray); -} - - -Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo( - Handle<SharedFunctionInfo> function_info, - Handle<Map> function_map, - PretenureFlag pretenure) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateFunction(*function_map, - *function_info, - isolate()->heap()->the_hole_value(), - pretenure), - JSFunction); -} - - -static Handle<Map> MapForNewFunction(Isolate *isolate, - Handle<SharedFunctionInfo> function_info) { - Context *context = isolate->context()->native_context(); - int map_index = Context::FunctionMapIndex(function_info->strict_mode(), - function_info->is_generator()); - return Handle<Map>(Map::cast(context->get(map_index))); -} - - -Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo( - Handle<SharedFunctionInfo> function_info, - Handle<Context> context, - PretenureFlag pretenure) { - Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo( - function_info, - MapForNewFunction(isolate(), function_info), - pretenure); - - if (function_info->ic_age() != isolate()->heap()->global_ic_age()) { - function_info->ResetForNewContext(isolate()->heap()->global_ic_age()); - } - - result->set_context(*context); - - int index = function_info->SearchOptimizedCodeMap(context->native_context(), - BailoutId::None()); - if (!function_info->bound() && index < 0) { - int number_of_literals = function_info->num_literals(); - Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure); - if (number_of_literals > 0) { - // Store the native context in the literals array prefix. This - // context will be used when creating object, regexp and array - // literals in this function. - literals->set(JSFunction::kLiteralNativeContextIndex, - context->native_context()); - } - result->set_literals(*literals); - } - - if (index > 0) { - // Caching of optimized code enabled and optimized code found. - FixedArray* literals = - function_info->GetLiteralsFromOptimizedCodeMap(index); - if (literals != NULL) result->set_literals(literals); - Code* code = function_info->GetCodeFromOptimizedCodeMap(index); - ASSERT(!code->marked_for_deoptimization()); - result->ReplaceCode(code); - return result; - } - - if (isolate()->use_crankshaft() && - FLAG_always_opt && - result->is_compiled() && - !function_info->is_toplevel() && - function_info->allows_lazy_compilation() && - !function_info->optimization_disabled() && - !isolate()->DebuggerHasBreakPoints()) { - result->MarkForOptimization(); - } - return result; + CALL_HEAP_FUNCTION(isolate(), + isolate()->heap()->CopyConstantPoolArray(*array), + ConstantPoolArray); } Handle<Object> Factory::NewNumber(double value, PretenureFlag pretenure) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->NumberFromDouble(value, pretenure), Object); + // We need to distinguish the minus zero value and this cannot be + // done after conversion to int. Doing this by comparing bit + // patterns is faster than using fpclassify() et al. + if (IsMinusZero(value)) return NewHeapNumber(-0.0, IMMUTABLE, pretenure); + + int int_value = FastD2I(value); + if (value == int_value && Smi::IsValid(int_value)) { + return handle(Smi::FromInt(int_value), isolate()); + } + + // Materialize the value in the heap. + return NewHeapNumber(value, IMMUTABLE, pretenure); } Handle<Object> Factory::NewNumberFromInt(int32_t value, PretenureFlag pretenure) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->NumberFromInt32(value, pretenure), Object); + if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate()); + // Bypass NewNumber to avoid various redundant checks. + return NewHeapNumber(FastI2D(value), IMMUTABLE, pretenure); } Handle<Object> Factory::NewNumberFromUint(uint32_t value, - PretenureFlag pretenure) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->NumberFromUint32(value, pretenure), Object); + PretenureFlag pretenure) { + int32_t int32v = static_cast<int32_t>(value); + if (int32v >= 0 && Smi::IsValid(int32v)) { + return handle(Smi::FromInt(int32v), isolate()); + } + return NewHeapNumber(FastUI2D(value), IMMUTABLE, pretenure); } Handle<HeapNumber> Factory::NewHeapNumber(double value, + MutableMode mode, PretenureFlag pretenure) { CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateHeapNumber(value, pretenure), HeapNumber); -} - - -Handle<JSObject> Factory::NewNeanderObject() { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateJSObjectFromMap( - isolate()->heap()->neander_map()), - JSObject); + isolate()->heap()->AllocateHeapNumber(value, mode, pretenure), + HeapNumber); } @@ -1121,7 +1130,7 @@ char* p = &buffer[0]; Vector<char> v(buffer, kBufferSize); - OS::StrNCpy(v, message, space); + StrNCpy(v, message, space); space -= Min(space, strlen(message)); p = &buffer[kBufferSize] - space; @@ -1131,10 +1140,10 @@ space--; if (space > 0) { Handle<String> arg_str = Handle<String>::cast( - Object::GetElementNoExceptionThrown(isolate(), args, i)); + Object::GetElement(isolate(), args, i).ToHandleChecked()); SmartArrayPointer<char> arg = arg_str->ToCString(); Vector<char> v2(p, static_cast<int>(space)); - OS::StrNCpy(v2, arg.get(), space); + StrNCpy(v2, arg.get(), space); space -= Min(space, strlen(arg.get())); p = &buffer[kBufferSize] - space; } @@ -1145,8 +1154,7 @@ } else { buffer[kBufferSize - 1] = '\0'; } - Handle<String> error_string = NewStringFromUtf8(CStrVector(buffer), TENURED); - return error_string; + return NewStringFromUtf8(CStrVector(buffer), TENURED).ToHandleChecked(); } @@ -1154,9 +1162,8 @@ const char* message, Handle<JSArray> args) { Handle<String> make_str = InternalizeUtf8String(maker); - Handle<Object> fun_obj( - isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str), - isolate()); + Handle<Object> fun_obj = Object::GetProperty( + isolate()->js_builtins_object(), make_str).ToHandleChecked(); // If the builtins haven't been properly configured yet this error // constructor may not have been defined. Bail out. if (!fun_obj->IsJSFunction()) { @@ -1168,12 +1175,15 @@ // Invoke the JavaScript factory method. If an exception is thrown while // running the factory method, use the exception as the result. - bool caught_exception; - Handle<Object> result = Execution::TryCall(fun, - isolate()->js_builtins_object(), - ARRAY_SIZE(argv), - argv, - &caught_exception); + Handle<Object> result; + Handle<Object> exception; + if (!Execution::TryCall(fun, + isolate()->js_builtins_object(), + ARRAY_SIZE(argv), + argv, + &exception).ToHandle(&result)) { + return exception; + } return result; } @@ -1186,105 +1196,222 @@ Handle<Object> Factory::NewError(const char* constructor, Handle<String> message) { Handle<String> constr = InternalizeUtf8String(constructor); - Handle<JSFunction> fun = Handle<JSFunction>( - JSFunction::cast(isolate()->js_builtins_object()-> - GetPropertyNoExceptionThrown(*constr))); + Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty( + isolate()->js_builtins_object(), constr).ToHandleChecked()); Handle<Object> argv[] = { message }; // Invoke the JavaScript factory method. If an exception is thrown while // running the factory method, use the exception as the result. - bool caught_exception; - Handle<Object> result = Execution::TryCall(fun, - isolate()->js_builtins_object(), - ARRAY_SIZE(argv), - argv, - &caught_exception); + Handle<Object> result; + Handle<Object> exception; + if (!Execution::TryCall(fun, + isolate()->js_builtins_object(), + ARRAY_SIZE(argv), + argv, + &exception).ToHandle(&result)) { + return exception; + } + return result; +} + + +void Factory::InitializeFunction(Handle<JSFunction> function, + Handle<SharedFunctionInfo> info, + Handle<Context> context) { + function->initialize_properties(); + function->initialize_elements(); + function->set_shared(*info); + function->set_code(info->code()); + function->set_context(*context); + function->set_prototype_or_initial_map(*the_hole_value()); + function->set_literals_or_bindings(*empty_fixed_array()); + function->set_next_function_link(*undefined_value()); + if (info->is_arrow()) function->RemovePrototype(); +} + + +Handle<JSFunction> Factory::NewFunction(Handle<Map> map, + Handle<SharedFunctionInfo> info, + Handle<Context> context, + PretenureFlag pretenure) { + AllocationSpace space = pretenure == TENURED ? OLD_POINTER_SPACE : NEW_SPACE; + Handle<JSFunction> result = New<JSFunction>(map, space); + InitializeFunction(result, info, context); + return result; +} + + +Handle<JSFunction> Factory::NewFunction(Handle<Map> map, + Handle<String> name, + MaybeHandle<Code> code) { + Handle<Context> context(isolate()->native_context()); + Handle<SharedFunctionInfo> info = NewSharedFunctionInfo(name, code); + DCHECK((info->strict_mode() == SLOPPY) && + (map.is_identical_to(isolate()->sloppy_function_map()) || + map.is_identical_to( + isolate()->sloppy_function_without_prototype_map()) || + map.is_identical_to( + isolate()->sloppy_function_with_readonly_prototype_map()))); + return NewFunction(map, info, context); +} + + +Handle<JSFunction> Factory::NewFunction(Handle<String> name) { + return NewFunction( + isolate()->sloppy_function_map(), name, MaybeHandle<Code>()); +} + + +Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name, + Handle<Code> code) { + return NewFunction( + isolate()->sloppy_function_without_prototype_map(), name, code); +} + + +Handle<JSFunction> Factory::NewFunction(Handle<String> name, + Handle<Code> code, + Handle<Object> prototype, + bool read_only_prototype) { + Handle<Map> map = read_only_prototype + ? isolate()->sloppy_function_with_readonly_prototype_map() + : isolate()->sloppy_function_map(); + Handle<JSFunction> result = NewFunction(map, name, code); + result->set_prototype_or_initial_map(*prototype); return result; } Handle<JSFunction> Factory::NewFunction(Handle<String> name, + Handle<Code> code, + Handle<Object> prototype, InstanceType type, int instance_size, - Handle<Code> code, - bool force_initial_map) { + bool read_only_prototype) { // Allocate the function - Handle<JSFunction> function = NewFunction(name, the_hole_value()); + Handle<JSFunction> function = NewFunction( + name, code, prototype, read_only_prototype); - // Set up the code pointer in both the shared function info and in - // the function itself. - function->shared()->set_code(*code); - function->set_code(*code); - - if (force_initial_map || - type != JS_OBJECT_TYPE || - instance_size != JSObject::kHeaderSize) { - Handle<Map> initial_map = NewMap(type, instance_size); - Handle<JSObject> prototype = NewFunctionPrototype(function); - initial_map->set_prototype(*prototype); - function->set_initial_map(*initial_map); - initial_map->set_constructor(*function); - } else { - ASSERT(!function->has_initial_map()); - ASSERT(!function->has_prototype()); + Handle<Map> initial_map = NewMap( + type, instance_size, GetInitialFastElementsKind()); + if (prototype->IsTheHole() && !function->shared()->is_generator()) { + prototype = NewFunctionPrototype(function); } + JSFunction::SetInitialMap(function, initial_map, + Handle<JSReceiver>::cast(prototype)); + return function; } -Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name, - InstanceType type, - int instance_size, - Handle<JSObject> prototype, - Handle<Code> code, - bool force_initial_map) { - // Allocate the function. - Handle<JSFunction> function = NewFunction(name, prototype); - - // Set up the code pointer in both the shared function info and in - // the function itself. - function->shared()->set_code(*code); - function->set_code(*code); - - if (force_initial_map || - type != JS_OBJECT_TYPE || - instance_size != JSObject::kHeaderSize) { - Handle<Map> initial_map = NewMap(type, - instance_size, - GetInitialFastElementsKind()); - function->set_initial_map(*initial_map); - initial_map->set_constructor(*function); +Handle<JSFunction> Factory::NewFunction(Handle<String> name, + Handle<Code> code, + InstanceType type, + int instance_size) { + return NewFunction(name, code, the_hole_value(), type, instance_size); +} + + +Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) { + // Make sure to use globals from the function's context, since the function + // can be from a different context. + Handle<Context> native_context(function->context()->native_context()); + Handle<Map> new_map; + if (function->shared()->is_generator()) { + // Generator prototypes can share maps since they don't have "constructor" + // properties. + new_map = handle(native_context->generator_object_prototype_map()); + } else { + // Each function prototype gets a fresh map to avoid unwanted sharing of + // maps between prototypes of different constructors. + Handle<JSFunction> object_function(native_context->object_function()); + DCHECK(object_function->has_initial_map()); + new_map = handle(object_function->initial_map()); + } + + DCHECK(!new_map->is_prototype_map()); + Handle<JSObject> prototype = NewJSObjectFromMap(new_map); + + if (!function->shared()->is_generator()) { + JSObject::AddProperty(prototype, constructor_string(), function, DONT_ENUM); } - JSFunction::SetPrototype(function, prototype); - return function; + return prototype; } -Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name, - Handle<Code> code) { - Handle<JSFunction> function = NewFunctionWithoutPrototype(name, SLOPPY); - function->shared()->set_code(*code); - function->set_code(*code); - ASSERT(!function->has_initial_map()); - ASSERT(!function->has_prototype()); - return function; +Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo( + Handle<SharedFunctionInfo> info, + Handle<Context> context, + PretenureFlag pretenure) { + int map_index = Context::FunctionMapIndex(info->strict_mode(), + info->is_generator()); + Handle<Map> map(Map::cast(context->native_context()->get(map_index))); + Handle<JSFunction> result = NewFunction(map, info, context, pretenure); + + if (info->ic_age() != isolate()->heap()->global_ic_age()) { + info->ResetForNewContext(isolate()->heap()->global_ic_age()); + } + + int index = info->SearchOptimizedCodeMap(context->native_context(), + BailoutId::None()); + if (!info->bound() && index < 0) { + int number_of_literals = info->num_literals(); + Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure); + if (number_of_literals > 0) { + // Store the native context in the literals array prefix. This + // context will be used when creating object, regexp and array + // literals in this function. + literals->set(JSFunction::kLiteralNativeContextIndex, + context->native_context()); + } + result->set_literals(*literals); + } + + if (index > 0) { + // Caching of optimized code enabled and optimized code found. + FixedArray* literals = info->GetLiteralsFromOptimizedCodeMap(index); + if (literals != NULL) result->set_literals(literals); + Code* code = info->GetCodeFromOptimizedCodeMap(index); + DCHECK(!code->marked_for_deoptimization()); + result->ReplaceCode(code); + return result; + } + + if (isolate()->use_crankshaft() && + FLAG_always_opt && + result->is_compiled() && + !info->is_toplevel() && + info->allows_lazy_compilation() && + !info->optimization_disabled() && + !isolate()->DebuggerHasBreakPoints()) { + result->MarkForOptimization(); + } + return result; } Handle<ScopeInfo> Factory::NewScopeInfo(int length) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateScopeInfo(length), - ScopeInfo); + Handle<FixedArray> array = NewFixedArray(length, TENURED); + array->set_map_no_write_barrier(*scope_info_map()); + Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(array); + return scope_info; } Handle<JSObject> Factory::NewExternal(void* value) { + Handle<Foreign> foreign = NewForeign(static_cast<Address>(value)); + Handle<JSObject> external = NewJSObjectFromMap(external_map()); + external->SetInternalField(0, *foreign); + return external; +} + + +Handle<Code> Factory::NewCodeRaw(int object_size, bool immovable) { CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->AllocateExternal(value), - JSObject); + isolate()->heap()->AllocateCode(object_size, immovable), + Code); } @@ -1293,12 +1420,64 @@ Handle<Object> self_ref, bool immovable, bool crankshafted, - int prologue_offset) { - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->CreateCode( - desc, flags, self_ref, immovable, crankshafted, - prologue_offset), - Code); + int prologue_offset, + bool is_debug) { + Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED); + Handle<ConstantPoolArray> constant_pool = + desc.origin->NewConstantPool(isolate()); + + // Compute size. + int body_size = RoundUp(desc.instr_size, kObjectAlignment); + int obj_size = Code::SizeFor(body_size); + + Handle<Code> code = NewCodeRaw(obj_size, immovable); + DCHECK(isolate()->code_range() == NULL || + !isolate()->code_range()->valid() || + isolate()->code_range()->contains(code->address())); + + // The code object has not been fully initialized yet. We rely on the + // fact that no allocation will happen from this point on. + DisallowHeapAllocation no_gc; + code->set_gc_metadata(Smi::FromInt(0)); + code->set_ic_age(isolate()->heap()->global_ic_age()); + code->set_instruction_size(desc.instr_size); + code->set_relocation_info(*reloc_info); + code->set_flags(flags); + code->set_raw_kind_specific_flags1(0); + code->set_raw_kind_specific_flags2(0); + code->set_is_crankshafted(crankshafted); + code->set_deoptimization_data(*empty_fixed_array(), SKIP_WRITE_BARRIER); + code->set_raw_type_feedback_info(Smi::FromInt(0)); + code->set_next_code_link(*undefined_value()); + code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER); + code->set_prologue_offset(prologue_offset); + if (code->kind() == Code::OPTIMIZED_FUNCTION) { + code->set_marked_for_deoptimization(false); + } + + if (is_debug) { + DCHECK(code->kind() == Code::FUNCTION); + code->set_has_debug_break_slots(true); + } + + desc.origin->PopulateConstantPool(*constant_pool); + code->set_constant_pool(*constant_pool); + + // Allow self references to created code object by patching the handle to + // point to the newly allocated Code object. + if (!self_ref.is_null()) *(self_ref.location()) = *code; + + // Migrate generated code. + // The generated code can contain Object** values (typically from handles) + // that are dereferenced during the copy to point directly to the actual heap + // objects. These pointers can include references to the code object itself, + // through the self_reference parameter. + code->CopyFrom(desc); + +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) code->ObjectVerify(); +#endif + return code; } @@ -1338,45 +1517,31 @@ Handle<JSModule> Factory::NewJSModule(Handle<Context> context, Handle<ScopeInfo> scope_info) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateJSModule(*context, *scope_info), JSModule); -} - - -// TODO(mstarzinger): Temporary wrapper until handlified. -static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict, - Handle<Name> name, - Handle<Object> value, - PropertyDetails details) { - CALL_HEAP_FUNCTION(dict->GetIsolate(), - dict->Add(*name, *value, details), - NameDictionary); -} - - -static Handle<GlobalObject> NewGlobalObjectFromMap(Isolate* isolate, - Handle<Map> map) { - CALL_HEAP_FUNCTION(isolate, - isolate->heap()->Allocate(*map, OLD_POINTER_SPACE), - GlobalObject); + // Allocate a fresh map. Modules do not have a prototype. + Handle<Map> map = NewMap(JS_MODULE_TYPE, JSModule::kSize); + // Allocate the object based on the map. + Handle<JSModule> module = + Handle<JSModule>::cast(NewJSObjectFromMap(map, TENURED)); + module->set_context(*context); + module->set_scope_info(*scope_info); + return module; } Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) { - ASSERT(constructor->has_initial_map()); + DCHECK(constructor->has_initial_map()); Handle<Map> map(constructor->initial_map()); - ASSERT(map->is_dictionary_map()); + DCHECK(map->is_dictionary_map()); // Make sure no field properties are described in the initial map. // This guarantees us that normalizing the properties does not // require us to change property values to PropertyCells. - ASSERT(map->NextFreePropertyIndex() == 0); + DCHECK(map->NextFreePropertyIndex() == 0); // Make sure we don't have a ton of pre-allocated slots in the // global objects. They will be unused once we normalize the object. - ASSERT(map->unused_property_fields() == 0); - ASSERT(map->inobject_properties() == 0); + DCHECK(map->unused_property_fields() == 0); + DCHECK(map->inobject_properties() == 0); // Initial size of the backing store to avoid resize of the storage during // bootstrapping. The size differs between the JS global object ad the @@ -1385,23 +1550,25 @@ // Allocate a dictionary object for backing storage. int at_least_space_for = map->NumberOfOwnDescriptors() * 2 + initial_size; - Handle<NameDictionary> dictionary = NewNameDictionary(at_least_space_for); + Handle<NameDictionary> dictionary = + NameDictionary::New(isolate(), at_least_space_for); // The global object might be created from an object template with accessors. // Fill these accessors into the dictionary. Handle<DescriptorArray> descs(map->instance_descriptors()); for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { PropertyDetails details = descs->GetDetails(i); - ASSERT(details.type() == CALLBACKS); // Only accessors are expected. + DCHECK(details.type() == CALLBACKS); // Only accessors are expected. PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1); Handle<Name> name(descs->GetKey(i)); Handle<Object> value(descs->GetCallbacksObject(i), isolate()); Handle<PropertyCell> cell = NewPropertyCell(value); - NameDictionaryAdd(dictionary, name, cell, d); + // |dictionary| already contains enough space for all properties. + USE(NameDictionary::Add(dictionary, name, cell, d)); } // Allocate the global object and initialize it with the backing store. - Handle<GlobalObject> global = NewGlobalObjectFromMap(isolate(), map); + Handle<GlobalObject> global = New<GlobalObject>(map, OLD_POINTER_SPACE); isolate()->heap()->InitializeJSObjectFromMap(*global, *dictionary, *map); // Create a new map for the global object. @@ -1413,7 +1580,7 @@ global->set_properties(*dictionary); // Make sure result is a global object with properties in dictionary. - ASSERT(global->IsGlobalObject() && !global->HasFastProperties()); + DCHECK(global->IsGlobalObject() && !global->HasFastProperties()); return global; } @@ -1435,21 +1602,24 @@ Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, + PretenureFlag pretenure) { + Context* native_context = isolate()->context()->native_context(); + JSFunction* array_function = native_context->array_function(); + Map* map = array_function->initial_map(); + Map* transition_map = isolate()->get_initial_js_array_map(elements_kind); + if (transition_map != NULL) map = transition_map; + return Handle<JSArray>::cast(NewJSObjectFromMap(handle(map), pretenure)); +} + + +Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length, int capacity, ArrayStorageAllocationMode mode, PretenureFlag pretenure) { - if (capacity != 0) { - elements_kind = GetHoleyElementsKind(elements_kind); - } - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->AllocateJSArrayAndStorage( - elements_kind, - length, - capacity, - mode, - pretenure), - JSArray); + Handle<JSArray> array = NewJSArray(elements_kind, pretenure); + NewJSArrayStorage(array, length, capacity, mode); + return array; } @@ -1457,35 +1627,58 @@ ElementsKind elements_kind, int length, PretenureFlag pretenure) { - ASSERT(length <= elements->length()); - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateJSArrayWithElements(*elements, - elements_kind, - length, - pretenure), - JSArray); + DCHECK(length <= elements->length()); + Handle<JSArray> array = NewJSArray(elements_kind, pretenure); + + array->set_elements(*elements); + array->set_length(Smi::FromInt(length)); + JSObject::ValidateElements(array); + return array; } void Factory::NewJSArrayStorage(Handle<JSArray> array, - int length, - int capacity, - ArrayStorageAllocationMode mode) { - CALL_HEAP_FUNCTION_VOID(isolate(), - isolate()->heap()->AllocateJSArrayStorage(*array, - length, - capacity, - mode)); + int length, + int capacity, + ArrayStorageAllocationMode mode) { + DCHECK(capacity >= length); + + if (capacity == 0) { + array->set_length(Smi::FromInt(0)); + array->set_elements(*empty_fixed_array()); + return; + } + + Handle<FixedArrayBase> elms; + ElementsKind elements_kind = array->GetElementsKind(); + if (IsFastDoubleElementsKind(elements_kind)) { + if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { + elms = NewFixedDoubleArray(capacity); + } else { + DCHECK(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); + elms = NewFixedDoubleArrayWithHoles(capacity); + } + } else { + DCHECK(IsFastSmiOrObjectElementsKind(elements_kind)); + if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { + elms = NewUninitializedFixedArray(capacity); + } else { + DCHECK(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); + elms = NewFixedArrayWithHoles(capacity); + } + } + + array->set_elements(*elms); + array->set_length(Smi::FromInt(length)); } Handle<JSGeneratorObject> Factory::NewJSGeneratorObject( Handle<JSFunction> function) { - ASSERT(function->shared()->is_generator()); + DCHECK(function->shared()->is_generator()); JSFunction::EnsureHasInitialMap(function); Handle<Map> map(function->initial_map()); - ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE); + DCHECK(map->instance_type() == JS_GENERATOR_OBJECT_TYPE); CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateJSObjectFromMap(*map), @@ -1495,7 +1688,7 @@ Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() { Handle<JSFunction> array_buffer_fun( - isolate()->context()->native_context()->array_buffer_fun()); + isolate()->native_context()->array_buffer_fun()); CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateJSObject(*array_buffer_fun), @@ -1505,7 +1698,7 @@ Handle<JSDataView> Factory::NewJSDataView() { Handle<JSFunction> data_view_fun( - isolate()->context()->native_context()->data_view_fun()); + isolate()->native_context()->data_view_fun()); CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateJSObject(*data_view_fun), @@ -1543,38 +1736,164 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler, Handle<Object> prototype) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateJSProxy(*handler, *prototype), - JSProxy); + // Allocate map. + // TODO(rossberg): Once we optimize proxies, think about a scheme to share + // maps. Will probably depend on the identity of the handler object, too. + Handle<Map> map = NewMap(JS_PROXY_TYPE, JSProxy::kSize); + map->set_prototype(*prototype); + + // Allocate the proxy object. + Handle<JSProxy> result = New<JSProxy>(map, NEW_SPACE); + result->InitializeBody(map->instance_size(), Smi::FromInt(0)); + result->set_handler(*handler); + result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER); + return result; +} + + +Handle<JSProxy> Factory::NewJSFunctionProxy(Handle<Object> handler, + Handle<Object> call_trap, + Handle<Object> construct_trap, + Handle<Object> prototype) { + // Allocate map. + // TODO(rossberg): Once we optimize proxies, think about a scheme to share + // maps. Will probably depend on the identity of the handler object, too. + Handle<Map> map = NewMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize); + map->set_prototype(*prototype); + + // Allocate the proxy object. + Handle<JSFunctionProxy> result = New<JSFunctionProxy>(map, NEW_SPACE); + result->InitializeBody(map->instance_size(), Smi::FromInt(0)); + result->set_handler(*handler); + result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER); + result->set_call_trap(*call_trap); + result->set_construct_trap(*construct_trap); + return result; +} + + +void Factory::ReinitializeJSReceiver(Handle<JSReceiver> object, + InstanceType type, + int size) { + DCHECK(type >= FIRST_JS_OBJECT_TYPE); + + // Allocate fresh map. + // TODO(rossberg): Once we optimize proxies, cache these maps. + Handle<Map> map = NewMap(type, size); + + // Check that the receiver has at least the size of the fresh object. + int size_difference = object->map()->instance_size() - map->instance_size(); + DCHECK(size_difference >= 0); + + map->set_prototype(object->map()->prototype()); + + // Allocate the backing storage for the properties. + int prop_size = map->InitialPropertiesLength(); + Handle<FixedArray> properties = NewFixedArray(prop_size, TENURED); + + Heap* heap = isolate()->heap(); + MaybeHandle<SharedFunctionInfo> shared; + if (type == JS_FUNCTION_TYPE) { + OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"), + heap->HashSeed()); + Handle<String> name = InternalizeStringWithKey(&key); + shared = NewSharedFunctionInfo(name, MaybeHandle<Code>()); + } + + // In order to keep heap in consistent state there must be no allocations + // before object re-initialization is finished and filler object is installed. + DisallowHeapAllocation no_allocation; + + // Put in filler if the new object is smaller than the old. + if (size_difference > 0) { + Address address = object->address(); + heap->CreateFillerObjectAt(address + map->instance_size(), size_difference); + heap->AdjustLiveBytes(address, -size_difference, Heap::FROM_MUTATOR); + } + + // Reset the map for the object. + object->synchronized_set_map(*map); + Handle<JSObject> jsobj = Handle<JSObject>::cast(object); + + // Reinitialize the object from the constructor map. + heap->InitializeJSObjectFromMap(*jsobj, *properties, *map); + + // Functions require some minimal initialization. + if (type == JS_FUNCTION_TYPE) { + map->set_function_with_prototype(true); + Handle<JSFunction> js_function = Handle<JSFunction>::cast(object); + Handle<Context> context(isolate()->native_context()); + InitializeFunction(js_function, shared.ToHandleChecked(), context); + } +} + + +void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object, + Handle<JSFunction> constructor) { + DCHECK(constructor->has_initial_map()); + Handle<Map> map(constructor->initial_map(), isolate()); + + // The proxy's hash should be retained across reinitialization. + Handle<Object> hash(object->hash(), isolate()); + + // Check that the already allocated object has the same size and type as + // objects allocated using the constructor. + DCHECK(map->instance_size() == object->map()->instance_size()); + DCHECK(map->instance_type() == object->map()->instance_type()); + + // Allocate the backing storage for the properties. + int prop_size = map->InitialPropertiesLength(); + Handle<FixedArray> properties = NewFixedArray(prop_size, TENURED); + + // In order to keep heap in consistent state there must be no allocations + // before object re-initialization is finished. + DisallowHeapAllocation no_allocation; + + // Reset the map for the object. + object->synchronized_set_map(*map); + + Heap* heap = isolate()->heap(); + // Reinitialize the object from the constructor map. + heap->InitializeJSObjectFromMap(*object, *properties, *map); + + // Restore the saved hash. + object->set_hash(*hash); } void Factory::BecomeJSObject(Handle<JSReceiver> object) { - CALL_HEAP_FUNCTION_VOID( - isolate(), - isolate()->heap()->ReinitializeJSReceiver( - *object, JS_OBJECT_TYPE, JSObject::kHeaderSize)); + ReinitializeJSReceiver(object, JS_OBJECT_TYPE, JSObject::kHeaderSize); } void Factory::BecomeJSFunction(Handle<JSReceiver> object) { - CALL_HEAP_FUNCTION_VOID( + ReinitializeJSReceiver(object, JS_FUNCTION_TYPE, JSFunction::kSize); +} + + +Handle<FixedArray> Factory::NewTypeFeedbackVector(int slot_count) { + // Ensure we can skip the write barrier + DCHECK_EQ(isolate()->heap()->uninitialized_symbol(), + *TypeFeedbackInfo::UninitializedSentinel(isolate())); + + CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->ReinitializeJSReceiver( - *object, JS_FUNCTION_TYPE, JSFunction::kSize)); + isolate()->heap()->AllocateFixedArrayWithFiller( + slot_count, + TENURED, + *TypeFeedbackInfo::UninitializedSentinel(isolate())), + FixedArray); } Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo( - Handle<String> name, - int number_of_literals, - bool is_generator, - Handle<Code> code, - Handle<ScopeInfo> scope_info) { - Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name); - shared->set_code(*code); + Handle<String> name, int number_of_literals, bool is_generator, + bool is_arrow, Handle<Code> code, Handle<ScopeInfo> scope_info, + Handle<FixedArray> feedback_vector) { + Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name, code); shared->set_scope_info(*scope_info); + shared->set_feedback_vector(*feedback_vector); + shared->set_is_arrow(is_arrow); int literals_array_size = number_of_literals; // If the function contains object, regexp or array literals, // allocate extra space for a literals array prefix containing the @@ -1598,113 +1917,141 @@ int end_position, Handle<Object> script, Handle<Object> stack_frames) { - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->AllocateJSMessageObject(*type, - *arguments, - start_position, - end_position, - *script, - *stack_frames), - JSMessageObject); -} - - -Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) { - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->AllocateSharedFunctionInfo(*name), - SharedFunctionInfo); -} - - -Handle<String> Factory::NumberToString(Handle<Object> number) { - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->NumberToString(*number), String); -} - - -Handle<String> Factory::Uint32ToString(uint32_t value) { - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->Uint32ToString(value), String); -} - - -Handle<SeededNumberDictionary> Factory::DictionaryAtNumberPut( - Handle<SeededNumberDictionary> dictionary, - uint32_t key, - Handle<Object> value) { - CALL_HEAP_FUNCTION(isolate(), - dictionary->AtNumberPut(key, *value), - SeededNumberDictionary); + Handle<Map> map = message_object_map(); + Handle<JSMessageObject> message = New<JSMessageObject>(map, NEW_SPACE); + message->set_properties(*empty_fixed_array(), SKIP_WRITE_BARRIER); + message->initialize_elements(); + message->set_elements(*empty_fixed_array(), SKIP_WRITE_BARRIER); + message->set_type(*type); + message->set_arguments(*arguments); + message->set_start_position(start_position); + message->set_end_position(end_position); + message->set_script(*script); + message->set_stack_frames(*stack_frames); + return message; } -Handle<UnseededNumberDictionary> Factory::DictionaryAtNumberPut( - Handle<UnseededNumberDictionary> dictionary, - uint32_t key, - Handle<Object> value) { - CALL_HEAP_FUNCTION(isolate(), - dictionary->AtNumberPut(key, *value), - UnseededNumberDictionary); -} - - -Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name, - Handle<Object> prototype) { - Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name); - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateFunction(*isolate()->sloppy_function_map(), - *function_share, - *prototype), - JSFunction); +Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo( + Handle<String> name, + MaybeHandle<Code> maybe_code) { + Handle<Map> map = shared_function_info_map(); + Handle<SharedFunctionInfo> share = New<SharedFunctionInfo>(map, + OLD_POINTER_SPACE); + + // Set pointer fields. + share->set_name(*name); + Handle<Code> code; + if (!maybe_code.ToHandle(&code)) { + code = handle(isolate()->builtins()->builtin(Builtins::kIllegal)); + } + share->set_code(*code); + share->set_optimized_code_map(Smi::FromInt(0)); + share->set_scope_info(ScopeInfo::Empty(isolate())); + Code* construct_stub = + isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric); + share->set_construct_stub(construct_stub); + share->set_instance_class_name(*Object_string()); + share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER); + share->set_script(*undefined_value(), SKIP_WRITE_BARRIER); + share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER); + share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER); + share->set_feedback_vector(*empty_fixed_array(), SKIP_WRITE_BARRIER); + share->set_profiler_ticks(0); + share->set_ast_node_count(0); + share->set_counters(0); + + // Set integer fields (smi or int, depending on the architecture). + share->set_length(0); + share->set_formal_parameter_count(0); + share->set_expected_nof_properties(0); + share->set_num_literals(0); + share->set_start_position_and_type(0); + share->set_end_position(0); + share->set_function_token_position(0); + // All compiler hints default to false or 0. + share->set_compiler_hints(0); + share->set_opt_count_and_bailout_reason(0); + + return share; } -Handle<JSFunction> Factory::NewFunction(Handle<String> name, - Handle<Object> prototype) { - Handle<JSFunction> fun = NewFunctionHelper(name, prototype); - fun->set_context(isolate()->context()->native_context()); - return fun; +static inline int NumberCacheHash(Handle<FixedArray> cache, + Handle<Object> number) { + int mask = (cache->length() >> 1) - 1; + if (number->IsSmi()) { + return Handle<Smi>::cast(number)->value() & mask; + } else { + DoubleRepresentation rep(number->Number()); + return + (static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) & mask; + } } -Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper( - Handle<String> name, - StrictMode strict_mode) { - Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name); - Handle<Map> map = strict_mode == SLOPPY - ? isolate()->sloppy_function_without_prototype_map() - : isolate()->strict_function_without_prototype_map(); - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->AllocateFunction( - *map, - *function_share, - *the_hole_value()), - JSFunction); +Handle<Object> Factory::GetNumberStringCache(Handle<Object> number) { + DisallowHeapAllocation no_gc; + int hash = NumberCacheHash(number_string_cache(), number); + Object* key = number_string_cache()->get(hash * 2); + if (key == *number || (key->IsHeapNumber() && number->IsHeapNumber() && + key->Number() == number->Number())) { + return Handle<String>( + String::cast(number_string_cache()->get(hash * 2 + 1)), isolate()); + } + return undefined_value(); } -Handle<JSFunction> Factory::NewFunctionWithoutPrototype( - Handle<String> name, - StrictMode strict_mode) { - Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name, strict_mode); - fun->set_context(isolate()->context()->native_context()); - return fun; +void Factory::SetNumberStringCache(Handle<Object> number, + Handle<String> string) { + int hash = NumberCacheHash(number_string_cache(), number); + if (number_string_cache()->get(hash * 2) != *undefined_value()) { + int full_size = isolate()->heap()->FullSizeNumberStringCacheLength(); + if (number_string_cache()->length() != full_size) { + // The first time we have a hash collision, we move to the full sized + // number string cache. The idea is to have a small number string + // cache in the snapshot to keep boot-time memory usage down. + // If we expand the number string cache already while creating + // the snapshot then that didn't work out. + DCHECK(!isolate()->serializer_enabled() || FLAG_extra_code != NULL); + Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED); + isolate()->heap()->set_number_string_cache(*new_cache); + return; + } + } + number_string_cache()->set(hash * 2, *number); + number_string_cache()->set(hash * 2 + 1, *string); } -Handle<Object> Factory::ToObject(Handle<Object> object) { - CALL_HEAP_FUNCTION(isolate(), object->ToObject(isolate()), Object); -} +Handle<String> Factory::NumberToString(Handle<Object> number, + bool check_number_string_cache) { + isolate()->counters()->number_to_string_runtime()->Increment(); + if (check_number_string_cache) { + Handle<Object> cached = GetNumberStringCache(number); + if (!cached->IsUndefined()) return Handle<String>::cast(cached); + } + char arr[100]; + Vector<char> buffer(arr, ARRAY_SIZE(arr)); + const char* str; + if (number->IsSmi()) { + int num = Handle<Smi>::cast(number)->value(); + str = IntToCString(num, buffer); + } else { + double num = Handle<HeapNumber>::cast(number)->value(); + str = DoubleToCString(num, buffer); + } -Handle<Object> Factory::ToObject(Handle<Object> object, - Handle<Context> native_context) { - CALL_HEAP_FUNCTION(isolate(), object->ToObject(*native_context), Object); + // We tenure the allocated string since it is referenced from the + // number-string cache which lives in the old space. + Handle<String> js_string = NewStringFromAsciiChecked(str, TENURED); + SetNumberStringCache(number, js_string); + return js_string; } -#ifdef ENABLE_DEBUGGER_SUPPORT Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) { // Get the original code of the function. Handle<Code> code(shared->code()); @@ -1717,7 +2064,7 @@ // debug info object to avoid allocation while setting up the debug info // object. Handle<FixedArray> break_points( - NewFixedArray(Debug::kEstimatedNofBreakPointsInFunction)); + NewFixedArray(DebugInfo::kEstimatedNofBreakPointsInFunction)); // Create and set up the debug info object. Debug info contains function, a // copy of the original code, the executing code and initial fixed array for @@ -1734,72 +2081,109 @@ return debug_info; } -#endif -Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee, +Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee, int length) { - CALL_HEAP_FUNCTION( - isolate(), - isolate()->heap()->AllocateArgumentsObject(*callee, length), JSObject); + bool strict_mode_callee = callee->shared()->strict_mode() == STRICT; + Handle<Map> map = strict_mode_callee ? isolate()->strict_arguments_map() + : isolate()->sloppy_arguments_map(); + + AllocationSiteUsageContext context(isolate(), Handle<AllocationSite>(), + false); + DCHECK(!isolate()->has_pending_exception()); + Handle<JSObject> result = NewJSObjectFromMap(map); + Handle<Smi> value(Smi::FromInt(length), isolate()); + Object::SetProperty(result, length_string(), value, STRICT).Assert(); + if (!strict_mode_callee) { + Object::SetProperty(result, callee_string(), callee, STRICT).Assert(); + } + return result; } Handle<JSFunction> Factory::CreateApiFunction( - Handle<FunctionTemplateInfo> obj, ApiInstanceType instance_type) { + Handle<FunctionTemplateInfo> obj, + Handle<Object> prototype, + ApiInstanceType instance_type) { Handle<Code> code = isolate()->builtins()->HandleApiCall(); Handle<Code> construct_stub = isolate()->builtins()->JSConstructStubApi(); - int internal_field_count = 0; - if (!obj->instance_template()->IsUndefined()) { - Handle<ObjectTemplateInfo> instance_template = - Handle<ObjectTemplateInfo>( - ObjectTemplateInfo::cast(obj->instance_template())); - internal_field_count = - Smi::cast(instance_template->internal_field_count())->value(); - } - - // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing - // JSObject::GetHeaderSize. - int instance_size = kPointerSize * internal_field_count; - InstanceType type; - switch (instance_type) { - case JavaScriptObject: - type = JS_OBJECT_TYPE; - instance_size += JSObject::kHeaderSize; - break; - case InnerGlobalObject: - type = JS_GLOBAL_OBJECT_TYPE; - instance_size += JSGlobalObject::kSize; - break; - case OuterGlobalObject: - type = JS_GLOBAL_PROXY_TYPE; - instance_size += JSGlobalProxy::kSize; - break; - default: - UNREACHABLE(); - type = JS_OBJECT_TYPE; // Keep the compiler happy. - break; - } + Handle<JSFunction> result; + if (obj->remove_prototype()) { + result = NewFunctionWithoutPrototype(empty_string(), code); + } else { + int internal_field_count = 0; + if (!obj->instance_template()->IsUndefined()) { + Handle<ObjectTemplateInfo> instance_template = + Handle<ObjectTemplateInfo>( + ObjectTemplateInfo::cast(obj->instance_template())); + internal_field_count = + Smi::cast(instance_template->internal_field_count())->value(); + } - Handle<JSFunction> result = - NewFunction(Factory::empty_string(), - type, - instance_size, - code, - true); + // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing + // JSObject::GetHeaderSize. + int instance_size = kPointerSize * internal_field_count; + InstanceType type; + switch (instance_type) { + case JavaScriptObjectType: + type = JS_OBJECT_TYPE; + instance_size += JSObject::kHeaderSize; + break; + case GlobalObjectType: + type = JS_GLOBAL_OBJECT_TYPE; + instance_size += JSGlobalObject::kSize; + break; + case GlobalProxyType: + type = JS_GLOBAL_PROXY_TYPE; + instance_size += JSGlobalProxy::kSize; + break; + default: + UNREACHABLE(); + type = JS_OBJECT_TYPE; // Keep the compiler happy. + break; + } - // Set length. - result->shared()->set_length(obj->length()); + result = NewFunction(empty_string(), code, prototype, type, + instance_size, obj->read_only_prototype()); + } - // Set class name. - Handle<Object> class_name = Handle<Object>(obj->class_name(), isolate()); + result->shared()->set_length(obj->length()); + Handle<Object> class_name(obj->class_name(), isolate()); if (class_name->IsString()) { result->shared()->set_instance_class_name(*class_name); result->shared()->set_name(*class_name); } + result->shared()->set_function_data(*obj); + result->shared()->set_construct_stub(*construct_stub); + result->shared()->DontAdaptArguments(); + + if (obj->remove_prototype()) { + DCHECK(result->shared()->IsApiFunction()); + DCHECK(!result->has_initial_map()); + DCHECK(!result->has_prototype()); + return result; + } + + if (prototype->IsTheHole()) { +#ifdef DEBUG + LookupIterator it(handle(JSObject::cast(result->prototype())), + constructor_string(), + LookupIterator::CHECK_OWN_REAL); + MaybeHandle<Object> maybe_prop = Object::GetProperty(&it); + DCHECK(it.IsFound()); + DCHECK(maybe_prop.ToHandleChecked().is_identical_to(result)); +#endif + } else { + JSObject::AddProperty(handle(JSObject::cast(result->prototype())), + constructor_string(), result, DONT_ENUM); + } - Handle<Map> map = Handle<Map>(result->initial_map()); + // Down from here is only valid for API functions that can be used as a + // constructor (don't set the "remove prototype" flag). + + Handle<Map> map(result->initial_map()); // Mark as undetectable if needed. if (obj->undetectable()) { @@ -1829,10 +2213,6 @@ map->set_has_instance_call_handler(); } - result->shared()->set_function_data(*obj); - result->shared()->set_construct_stub(*construct_stub); - result->shared()->DontAdaptArguments(); - // Recursively copy parent instance templates' accessors, // 'data' may be modified. int max_number_of_additional_properties = 0; @@ -1899,31 +2279,10 @@ // Install accumulated static accessors for (int i = 0; i < valid_descriptors; i++) { Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i))); - JSObject::SetAccessor(result, accessor); + JSObject::SetAccessor(result, accessor).Assert(); } - ASSERT(result->shared()->IsApiFunction()); - return result; -} - - -Handle<MapCache> Factory::NewMapCache(int at_least_space_for) { - CALL_HEAP_FUNCTION(isolate(), - MapCache::Allocate(isolate()->heap(), - at_least_space_for), - MapCache); -} - - -MUST_USE_RESULT static MaybeObject* UpdateMapCacheWith(Context* context, - FixedArray* keys, - Map* map) { - Object* result; - { MaybeObject* maybe_result = - MapCache::cast(context->map_cache())->Put(keys, map); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - context->set_map_cache(MapCache::cast(result)); + DCHECK(result->shared()->IsApiFunction()); return result; } @@ -1931,8 +2290,10 @@ Handle<MapCache> Factory::AddToMapCache(Handle<Context> context, Handle<FixedArray> keys, Handle<Map> map) { - CALL_HEAP_FUNCTION(isolate(), - UpdateMapCacheWith(*context, *keys, *map), MapCache); + Handle<MapCache> map_cache = handle(MapCache::cast(context->map_cache())); + Handle<MapCache> result = MapCache::Put(map_cache, keys, map); + context->set_map_cache(*result); + return result; } @@ -1940,7 +2301,7 @@ Handle<FixedArray> keys) { if (context->map_cache()->IsUndefined()) { // Allocate the new map cache for the native context. - Handle<MapCache> new_cache = NewMapCache(24); + Handle<MapCache> new_cache = MapCache::New(isolate(), 24); context->set_map_cache(*new_cache); } // Check to see whether there is a matching element in the cache. @@ -1949,11 +2310,10 @@ Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate()); if (result->IsMap()) return Handle<Map>::cast(result); // Create a new map and add it to the cache. - Handle<Map> map = - CopyMap(Handle<Map>(context->object_function()->initial_map()), - keys->length()); + Handle<Map> map = Map::Create( + handle(context->object_function()), keys->length()); AddToMapCache(context, keys, map); - return Handle<Map>(map); + return map; } @@ -1993,28 +2353,25 @@ -void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc, - Handle<JSObject> instance, - bool* pending_exception) { +MaybeHandle<FunctionTemplateInfo> Factory::ConfigureInstance( + Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance) { // Configure the instance by adding the properties specified by the // instance template. Handle<Object> instance_template(desc->instance_template(), isolate()); if (!instance_template->IsUndefined()) { - Execution::ConfigureInstance(isolate(), - instance, - instance_template, - pending_exception); - } else { - *pending_exception = false; + RETURN_ON_EXCEPTION( + isolate(), + Execution::ConfigureInstance(isolate(), instance, instance_template), + FunctionTemplateInfo); } + return desc; } Handle<Object> Factory::GlobalConstantFor(Handle<String> name) { - Heap* h = isolate()->heap(); - if (name->Equals(h->undefined_string())) return undefined_value(); - if (name->Equals(h->nan_string())) return nan_value(); - if (name->Equals(h->infinity_string())) return infinity_value(); + if (String::Equals(name, undefined_string())) return undefined_value(); + if (String::Equals(name, nan_string())) return nan_value(); + if (String::Equals(name, infinity_string())) return infinity_value(); return Handle<Object>::null(); } diff -Nru nodejs-0.11.13/deps/v8/src/factory.h nodejs-0.11.15/deps/v8/src/factory.h --- nodejs-0.11.13/deps/v8/src/factory.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/factory.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,48 +1,23 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_FACTORY_H_ #define V8_FACTORY_H_ -#include "globals.h" -#include "handles.h" -#include "heap.h" +#include "src/isolate.h" namespace v8 { namespace internal { // Interface for handle based allocation. -class Factory { +class Factory V8_FINAL { public: - // Allocate a new boxed value. - Handle<Box> NewBox( - Handle<Object> value, - PretenureFlag pretenure = NOT_TENURED); + Handle<Oddball> NewOddball(Handle<Map> map, + const char* to_string, + Handle<Object> to_number, + byte kind); // Allocates a fixed array initialized with undefined values. Handle<FixedArray> NewFixedArray( @@ -58,45 +33,38 @@ Handle<FixedArray> NewUninitializedFixedArray(int size); // Allocate a new uninitialized fixed double array. - Handle<FixedDoubleArray> NewFixedDoubleArray( + // The function returns a pre-allocated empty fixed array for capacity = 0, + // so the return type must be the general fixed array class. + Handle<FixedArrayBase> NewFixedDoubleArray( + int size, + PretenureFlag pretenure = NOT_TENURED); + + // Allocate a new fixed double array with hole values. + Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles( int size, PretenureFlag pretenure = NOT_TENURED); Handle<ConstantPoolArray> NewConstantPoolArray( - int number_of_int64_entries, - int number_of_code_ptr_entries, - int number_of_heap_ptr_entries, - int number_of_int32_entries); - - Handle<SeededNumberDictionary> NewSeededNumberDictionary( - int at_least_space_for); - - Handle<UnseededNumberDictionary> NewUnseededNumberDictionary( - int at_least_space_for); - - Handle<NameDictionary> NewNameDictionary(int at_least_space_for); - - Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for); - - Handle<ObjectHashTable> NewObjectHashTable( - int at_least_space_for, - MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY); - - Handle<WeakHashTable> NewWeakHashTable(int at_least_space_for); - - Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors, - int slack = 0); - Handle<DeoptimizationInputData> NewDeoptimizationInputData( - int deopt_entry_count, - PretenureFlag pretenure); - Handle<DeoptimizationOutputData> NewDeoptimizationOutputData( - int deopt_entry_count, - PretenureFlag pretenure); - // Allocates a pre-tenured empty AccessorPair. + const ConstantPoolArray::NumberOfEntries& small); + + Handle<ConstantPoolArray> NewExtendedConstantPoolArray( + const ConstantPoolArray::NumberOfEntries& small, + const ConstantPoolArray::NumberOfEntries& extended); + + Handle<OrderedHashSet> NewOrderedHashSet(); + Handle<OrderedHashMap> NewOrderedHashMap(); + + // Create a new boxed value. + Handle<Box> NewBox(Handle<Object> value); + + // Create a pre-tenured empty AccessorPair. Handle<AccessorPair> NewAccessorPair(); + // Create an empty TypeFeedbackInfo. Handle<TypeFeedbackInfo> NewTypeFeedbackInfo(); + // Finds the internalized copy for string in the string table. + // If not found, a new string is added to the table and returned. Handle<String> InternalizeUtf8String(Vector<const char> str); Handle<String> InternalizeUtf8String(const char* str) { return InternalizeUtf8String(CStrVector(str)); @@ -134,11 +102,45 @@ // two byte. // // ASCII strings are pretenured when used as keys in the SourceCodeCache. - Handle<String> NewStringFromOneByte( + MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte( Vector<const uint8_t> str, PretenureFlag pretenure = NOT_TENURED); + + template<size_t N> + inline Handle<String> NewStringFromStaticAscii( + const char (&str)[N], + PretenureFlag pretenure = NOT_TENURED) { + DCHECK(N == StrLength(str) + 1); + return NewStringFromOneByte( + STATIC_ASCII_VECTOR(str), pretenure).ToHandleChecked(); + } + + inline Handle<String> NewStringFromAsciiChecked( + const char* str, + PretenureFlag pretenure = NOT_TENURED) { + return NewStringFromOneByte( + OneByteVector(str), pretenure).ToHandleChecked(); + } + + + // Allocates and fully initializes a String. There are two String + // encodings: ASCII and two byte. One should choose between the three string + // allocation functions based on the encoding of the string buffer used to + // initialized the string. + // - ...FromAscii initializes the string from a buffer that is ASCII + // encoded (it does not check that the buffer is ASCII encoded) and the + // result will be ASCII encoded. + // - ...FromUTF8 initializes the string from a buffer that is UTF-8 + // encoded. If the characters are all single-byte characters, the + // result will be ASCII encoded, otherwise it will converted to two + // byte. + // - ...FromTwoByte initializes the string from a buffer that is two-byte + // encoded. If the characters are all single-byte characters, the + // result will be converted to ASCII, otherwise it will be left as + // two-byte. + // TODO(dcarney): remove this function. - inline Handle<String> NewStringFromAscii( + MUST_USE_RESULT inline MaybeHandle<String> NewStringFromAscii( Vector<const char> str, PretenureFlag pretenure = NOT_TENURED) { return NewStringFromOneByte(Vector<const uint8_t>::cast(str), pretenure); @@ -146,33 +148,54 @@ // UTF8 strings are pretenured when used for regexp literal patterns and // flags in the parser. - Handle<String> NewStringFromUtf8( + MUST_USE_RESULT MaybeHandle<String> NewStringFromUtf8( Vector<const char> str, PretenureFlag pretenure = NOT_TENURED); - Handle<String> NewStringFromTwoByte( + MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte( Vector<const uc16> str, PretenureFlag pretenure = NOT_TENURED); + // Allocates an internalized string in old space based on the character + // stream. + MUST_USE_RESULT Handle<String> NewInternalizedStringFromUtf8( + Vector<const char> str, + int chars, + uint32_t hash_field); + + MUST_USE_RESULT Handle<String> NewOneByteInternalizedString( + Vector<const uint8_t> str, + uint32_t hash_field); + + MUST_USE_RESULT Handle<String> NewTwoByteInternalizedString( + Vector<const uc16> str, + uint32_t hash_field); + + MUST_USE_RESULT Handle<String> NewInternalizedStringImpl( + Handle<String> string, int chars, uint32_t hash_field); + + // Compute the matching internalized string map for a string if possible. + // Empty handle is returned if string is in new space or not flattened. + MUST_USE_RESULT MaybeHandle<Map> InternalizedStringMapForString( + Handle<String> string); + // Allocates and partially initializes an ASCII or TwoByte String. The // characters of the string are uninitialized. Currently used in regexp code // only, where they are pretenured. - Handle<SeqOneByteString> NewRawOneByteString( + MUST_USE_RESULT MaybeHandle<SeqOneByteString> NewRawOneByteString( int length, PretenureFlag pretenure = NOT_TENURED); - Handle<SeqTwoByteString> NewRawTwoByteString( + MUST_USE_RESULT MaybeHandle<SeqTwoByteString> NewRawTwoByteString( int length, PretenureFlag pretenure = NOT_TENURED); - // Create a new cons string object which consists of a pair of strings. - Handle<String> NewConsString(Handle<String> left, - Handle<String> right); - - Handle<ConsString> NewRawConsString(String::Encoding encoding); + // Creates a single character string where the character has given code. + // A cache is used for ASCII codes. + Handle<String> LookupSingleCharacterStringFromCode(uint32_t code); - // Create a new sequential string containing the concatenation of the inputs. - Handle<String> NewFlatConcatString(Handle<String> first, - Handle<String> second); + // Create a new cons string object which consists of a pair of strings. + MUST_USE_RESULT MaybeHandle<String> NewConsString(Handle<String> left, + Handle<String> right); // Create a new string object which holds a proper substring of a string. Handle<String> NewProperSubString(Handle<String> str, @@ -185,20 +208,20 @@ return NewProperSubString(str, begin, end); } - Handle<SlicedString> NewRawSlicedString(String::Encoding encoding); - // Creates a new external String object. There are two String encodings // in the system: ASCII and two byte. Unlike other String types, it does // not make sense to have a UTF-8 factory function for external strings, - // because we cannot change the underlying buffer. - Handle<String> NewExternalStringFromAscii( + // because we cannot change the underlying buffer. Note that these strings + // are backed by a string resource that resides outside the V8 heap. + MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromAscii( const ExternalAsciiString::Resource* resource); - Handle<String> NewExternalStringFromTwoByte( + MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromTwoByte( const ExternalTwoByteString::Resource* resource); // Create a symbol. Handle<Symbol> NewSymbol(); Handle<Symbol> NewPrivateSymbol(); + Handle<Symbol> NewPrivateOwnSymbol(); // Create a global (but otherwise uninitialized) context. Handle<Context> NewNativeContext(); @@ -222,7 +245,7 @@ // Create a 'with' context. Handle<Context> NewWithContext(Handle<JSFunction> function, Handle<Context> previous, - Handle<JSObject> extension); + Handle<JSReceiver> extension); // Create a block context. Handle<Context> NewBlockContext(Handle<JSFunction> function, @@ -233,6 +256,8 @@ // the old generation). Handle<Struct> NewStruct(InstanceType type); + Handle<CodeCache> NewCodeCache(); + Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry( int aliased_context_slot); @@ -272,6 +297,7 @@ Handle<PropertyCell> NewPropertyCell(Handle<Object> value); + // Allocate a tenured AllocationSite. It's payload is null. Handle<AllocationSite> NewAllocationSite(); Handle<Map> NewMap( @@ -279,14 +305,19 @@ int instance_size, ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); + Handle<HeapObject> NewFillerObject(int size, + bool double_align, + AllocationSpace space); + Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function); - Handle<Map> CopyWithPreallocatedFieldDescriptors(Handle<Map> map); + Handle<JSObject> CopyJSObject(Handle<JSObject> object); - // Copy the map adding more inobject properties if possible without - // overflowing the instance size. - Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props); - Handle<Map> CopyMap(Handle<Map> map); + Handle<JSObject> CopyJSObjectWithAllocationSite(Handle<JSObject> object, + Handle<AllocationSite> site); + + Handle<FixedArray> CopyFixedArrayWithMap(Handle<FixedArray> array, + Handle<Map> map); Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array); @@ -294,10 +325,6 @@ // of it in old space. Handle<FixedArray> CopyAndTenureFixedCOWArray(Handle<FixedArray> array); - Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array, - int new_length, - PretenureFlag pretenure = NOT_TENURED); - Handle<FixedDoubleArray> CopyFixedDoubleArray( Handle<FixedDoubleArray> array); @@ -305,6 +332,7 @@ Handle<ConstantPoolArray> array); // Numbers (e.g. literals) are pretenured by the parser. + // The return value may be a smi or a heap number. Handle<Object> NewNumber(double value, PretenureFlag pretenure = NOT_TENURED); @@ -312,17 +340,25 @@ PretenureFlag pretenure = NOT_TENURED); Handle<Object> NewNumberFromUint(uint32_t value, PretenureFlag pretenure = NOT_TENURED); - inline Handle<Object> NewNumberFromSize(size_t value, - PretenureFlag pretenure = NOT_TENURED); + Handle<Object> NewNumberFromSize(size_t value, + PretenureFlag pretenure = NOT_TENURED) { + if (Smi::IsValid(static_cast<intptr_t>(value))) { + return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)), + isolate()); + } + return NewNumber(static_cast<double>(value), pretenure); + } Handle<HeapNumber> NewHeapNumber(double value, + MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED); - // These objects are used by the api to create env-independent data // structures in the heap. - Handle<JSObject> NewNeanderObject(); + inline Handle<JSObject> NewNeanderObject() { + return NewJSObjectFromMap(neander_map()); + } - Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length); + Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length); // JS objects are pretenured when allocated by the bootstrapper and // runtime. @@ -343,29 +379,36 @@ bool allocate_properties = true, Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null()); - Handle<JSObject> NewJSObjectFromMapForDeoptimizer( - Handle<Map> map, PretenureFlag pretenure = NOT_TENURED); - // JS modules are pretenured. Handle<JSModule> NewJSModule(Handle<Context> context, Handle<ScopeInfo> scope_info); // JS arrays are pretenured when allocated by the parser. + + // Create a JSArray with no elements. Handle<JSArray> NewJSArray( ElementsKind elements_kind, - int length, - int capacity, - ArrayStorageAllocationMode mode = INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, + PretenureFlag pretenure = NOT_TENURED); + + // Create a JSArray with a specified length and elements initialized + // according to the specified mode. + Handle<JSArray> NewJSArray( + ElementsKind elements_kind, int length, int capacity, + ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS, PretenureFlag pretenure = NOT_TENURED); Handle<JSArray> NewJSArray( int capacity, ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND, PretenureFlag pretenure = NOT_TENURED) { + if (capacity != 0) { + elements_kind = GetHoleyElementsKind(elements_kind); + } return NewJSArray(elements_kind, 0, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, pretenure); } + // Create a JSArray with the given elements. Handle<JSArray> NewJSArrayWithElements( Handle<FixedArrayBase> elements, ElementsKind elements_kind, @@ -394,50 +437,78 @@ Handle<JSDataView> NewJSDataView(); + // Allocates a Harmony proxy. Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype); + // Allocates a Harmony function proxy. + Handle<JSProxy> NewJSFunctionProxy(Handle<Object> handler, + Handle<Object> call_trap, + Handle<Object> construct_trap, + Handle<Object> prototype); + + // Reinitialize a JSReceiver into an (empty) JS object of respective type and + // size, but keeping the original prototype. The receiver must have at least + // the size of the new object. The object is reinitialized and behaves as an + // object that has been freshly allocated. + void ReinitializeJSReceiver( + Handle<JSReceiver> object, InstanceType type, int size); + + // Reinitialize an JSGlobalProxy based on a constructor. The object + // must have the same size as objects allocated using the + // constructor. The object is reinitialized and behaves as an + // object that has been freshly allocated using the constructor. + void ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> global, + Handle<JSFunction> constructor); + // Change the type of the argument into a JS object/function and reinitialize. void BecomeJSObject(Handle<JSReceiver> object); void BecomeJSFunction(Handle<JSReceiver> object); Handle<JSFunction> NewFunction(Handle<String> name, - Handle<Object> prototype); - - Handle<JSFunction> NewFunctionWithoutPrototype( - Handle<String> name, - StrictMode strict_mode); - - Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global); - - Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo( - Handle<SharedFunctionInfo> function_info, - Handle<Map> function_map, - PretenureFlag pretenure); + Handle<Code> code, + Handle<Object> prototype, + bool read_only_prototype = false); + Handle<JSFunction> NewFunction(Handle<String> name); + Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name, + Handle<Code> code); Handle<JSFunction> NewFunctionFromSharedFunctionInfo( Handle<SharedFunctionInfo> function_info, Handle<Context> context, PretenureFlag pretenure = TENURED); + Handle<JSFunction> NewFunction(Handle<String> name, + Handle<Code> code, + Handle<Object> prototype, + InstanceType type, + int instance_size, + bool read_only_prototype = false); + Handle<JSFunction> NewFunction(Handle<String> name, + Handle<Code> code, + InstanceType type, + int instance_size); + + // Create a serialized scope info. Handle<ScopeInfo> NewScopeInfo(int length); + // Create an External object for V8's external API. Handle<JSObject> NewExternal(void* value); + // The reference to the Code object is stored in self_reference. + // This allows generated code to reference its own Code object + // by containing this handle. Handle<Code> NewCode(const CodeDesc& desc, Code::Flags flags, Handle<Object> self_reference, bool immovable = false, bool crankshafted = false, - int prologue_offset = Code::kPrologueOffsetNotSet); + int prologue_offset = Code::kPrologueOffsetNotSet, + bool is_debug = false); Handle<Code> CopyCode(Handle<Code> code); Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info); - Handle<Object> ToObject(Handle<Object> object); - Handle<Object> ToObject(Handle<Object> object, - Handle<Context> native_context); - // Interface for creating error objects. Handle<Object> NewError(const char* maker, const char* message, @@ -459,6 +530,11 @@ Vector< Handle<Object> > args); Handle<Object> NewRangeError(Handle<String> message); + Handle<Object> NewInvalidStringLengthError() { + return NewRangeError("invalid_string_length", + HandleVector<Object>(NULL, 0)); + } + Handle<Object> NewSyntaxError(const char* message, Handle<JSArray> args); Handle<Object> NewSyntaxError(Handle<String> message); @@ -470,48 +546,31 @@ Handle<Object> NewEvalError(const char* message, Vector< Handle<Object> > args); + Handle<String> NumberToString(Handle<Object> number, + bool check_number_string_cache = true); - Handle<JSFunction> NewFunction(Handle<String> name, - InstanceType type, - int instance_size, - Handle<Code> code, - bool force_initial_map); - - Handle<JSFunction> NewFunction(Handle<Map> function_map, - Handle<SharedFunctionInfo> shared, Handle<Object> prototype); - - - Handle<JSFunction> NewFunctionWithPrototype(Handle<String> name, - InstanceType type, - int instance_size, - Handle<JSObject> prototype, - Handle<Code> code, - bool force_initial_map); - - Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name, - Handle<Code> code); - - Handle<String> NumberToString(Handle<Object> number); - Handle<String> Uint32ToString(uint32_t value); + Handle<String> Uint32ToString(uint32_t value) { + return NumberToString(NewNumberFromUint(value)); + } enum ApiInstanceType { - JavaScriptObject, - InnerGlobalObject, - OuterGlobalObject + JavaScriptObjectType, + GlobalObjectType, + GlobalProxyType }; Handle<JSFunction> CreateApiFunction( Handle<FunctionTemplateInfo> data, - ApiInstanceType type = JavaScriptObject); + Handle<Object> prototype, + ApiInstanceType type = JavaScriptObjectType); Handle<JSFunction> InstallMembers(Handle<JSFunction> function); // Installs interceptors on the instance. 'desc' is a function template, // and instance is an object instance created by the function of this // function template. - void ConfigureInstance(Handle<FunctionTemplateInfo> desc, - Handle<JSObject> instance, - bool* pending_exception); + MUST_USE_RESULT MaybeHandle<FunctionTemplateInfo> ConfigureInstance( + Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance); #define ROOT_ACCESSOR(type, name, camel_name) \ inline Handle<type> name() { \ @@ -537,18 +596,26 @@ INTERNALIZED_STRING_LIST(STRING_ACCESSOR) #undef STRING_ACCESSOR + inline void set_string_table(Handle<StringTable> table) { + isolate()->heap()->set_string_table(*table); + } + Handle<String> hidden_string() { return Handle<String>(&isolate()->heap()->hidden_string_); } + // Allocates a new SharedFunctionInfo object. Handle<SharedFunctionInfo> NewSharedFunctionInfo( - Handle<String> name, - int number_of_literals, - bool is_generator, - Handle<Code> code, - Handle<ScopeInfo> scope_info); - Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name); + Handle<String> name, int number_of_literals, bool is_generator, + bool is_arrow, Handle<Code> code, Handle<ScopeInfo> scope_info, + Handle<FixedArray> feedback_vector); + Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name, + MaybeHandle<Code> code); + // Allocate a new type feedback vector + Handle<FixedArray> NewTypeFeedbackVector(int slot_count); + + // Allocates a new JSMessageObject object. Handle<JSMessageObject> NewJSMessageObject( Handle<String> type, Handle<JSArray> arguments, @@ -557,19 +624,7 @@ Handle<Object> script, Handle<Object> stack_frames); - Handle<SeededNumberDictionary> DictionaryAtNumberPut( - Handle<SeededNumberDictionary>, - uint32_t key, - Handle<Object> value); - - Handle<UnseededNumberDictionary> DictionaryAtNumberPut( - Handle<UnseededNumberDictionary>, - uint32_t key, - Handle<Object> value); - -#ifdef ENABLE_DEBUGGER_SUPPORT Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared); -#endif // Return a map using the map cache in the native context. // The key the an ordered set of property names. @@ -603,12 +658,19 @@ private: Isolate* isolate() { return reinterpret_cast<Isolate*>(this); } - Handle<JSFunction> NewFunctionHelper(Handle<String> name, - Handle<Object> prototype); + // Creates a heap object based on the map. The fields of the heap object are + // not initialized by New<>() functions. It's the responsibility of the caller + // to do that. + template<typename T> + Handle<T> New(Handle<Map> map, AllocationSpace space); + + template<typename T> + Handle<T> New(Handle<Map> map, + AllocationSpace space, + Handle<AllocationSite> allocation_site); - Handle<JSFunction> NewFunctionWithoutPrototypeHelper( - Handle<String> name, - StrictMode strict_mode); + // Creates a code object that is not yet fully initialized yet. + inline Handle<Code> NewCodeRaw(int object_size, bool immovable); // Create a new map cache. Handle<MapCache> NewMapCache(int at_least_space_for); @@ -617,19 +679,32 @@ Handle<MapCache> AddToMapCache(Handle<Context> context, Handle<FixedArray> keys, Handle<Map> map); -}; - - -Handle<Object> Factory::NewNumberFromSize(size_t value, - PretenureFlag pretenure) { - if (Smi::IsValid(static_cast<intptr_t>(value))) { - return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)), - isolate()); - } else { - return NewNumber(static_cast<double>(value), pretenure); - } -} + // Attempt to find the number in a small cache. If we finds it, return + // the string representation of the number. Otherwise return undefined. + Handle<Object> GetNumberStringCache(Handle<Object> number); + + // Update the cache with a new number-string pair. + void SetNumberStringCache(Handle<Object> number, Handle<String> string); + + // Initializes a function with a shared part and prototype. + // Note: this code was factored out of NewFunction such that other parts of + // the VM could use it. Specifically, a function that creates instances of + // type JS_FUNCTION_TYPE benefit from the use of this function. + inline void InitializeFunction(Handle<JSFunction> function, + Handle<SharedFunctionInfo> info, + Handle<Context> context); + + // Creates a function initialized with a shared part. + Handle<JSFunction> NewFunction(Handle<Map> map, + Handle<SharedFunctionInfo> info, + Handle<Context> context, + PretenureFlag pretenure = TENURED); + + Handle<JSFunction> NewFunction(Handle<Map> map, + Handle<String> name, + MaybeHandle<Code> maybe_code); +}; } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/fast-dtoa.cc nodejs-0.11.15/deps/v8/src/fast-dtoa.cc --- nodejs-0.11.13/deps/v8/src/fast-dtoa.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/fast-dtoa.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,16 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "../include/v8stdint.h" -#include "checks.h" -#include "utils.h" - -#include "fast-dtoa.h" - -#include "cached-powers.h" -#include "diy-fp.h" -#include "double.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "include/v8stdint.h" +#include "src/base/logging.h" +#include "src/utils.h" + +#include "src/fast-dtoa.h" + +#include "src/cached-powers.h" +#include "src/diy-fp.h" +#include "src/double.h" namespace v8 { namespace internal { @@ -143,7 +120,7 @@ // Conceptually rest ~= too_high - buffer // We need to do the following tests in this order to avoid over- and // underflows. - ASSERT(rest <= unsafe_interval); + DCHECK(rest <= unsafe_interval); while (rest < small_distance && // Negated condition 1 unsafe_interval - rest >= ten_kappa && // Negated condition 2 (rest + ten_kappa < small_distance || // buffer{-1} > w_high @@ -189,7 +166,7 @@ uint64_t ten_kappa, uint64_t unit, int* kappa) { - ASSERT(rest < ten_kappa); + DCHECK(rest < ten_kappa); // The following tests are done in a specific order to avoid overflows. They // will work correctly with any uint64 values of rest < ten_kappa and unit. // @@ -388,9 +365,9 @@ Vector<char> buffer, int* length, int* kappa) { - ASSERT(low.e() == w.e() && w.e() == high.e()); - ASSERT(low.f() + 1 <= high.f() - 1); - ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent); + DCHECK(low.e() == w.e() && w.e() == high.e()); + DCHECK(low.f() + 1 <= high.f() - 1); + DCHECK(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent); // low, w and high are imprecise, but by less than one ulp (unit in the last // place). // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that @@ -458,9 +435,9 @@ // data (like the interval or 'unit'), too. // Note that the multiplication by 10 does not overflow, because w.e >= -60 // and thus one.e >= -60. - ASSERT(one.e() >= -60); - ASSERT(fractionals < one.f()); - ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f()); + DCHECK(one.e() >= -60); + DCHECK(fractionals < one.f()); + DCHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f()); while (true) { fractionals *= 10; unit *= 10; @@ -513,9 +490,9 @@ Vector<char> buffer, int* length, int* kappa) { - ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent); - ASSERT(kMinimalTargetExponent >= -60); - ASSERT(kMaximalTargetExponent <= -32); + DCHECK(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent); + DCHECK(kMinimalTargetExponent >= -60); + DCHECK(kMaximalTargetExponent <= -32); // w is assumed to have an error less than 1 unit. Whenever w is scaled we // also scale its error. uint64_t w_error = 1; @@ -566,9 +543,9 @@ // data (the 'unit'), too. // Note that the multiplication by 10 does not overflow, because w.e >= -60 // and thus one.e >= -60. - ASSERT(one.e() >= -60); - ASSERT(fractionals < one.f()); - ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f()); + DCHECK(one.e() >= -60); + DCHECK(fractionals < one.f()); + DCHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f()); while (requested_digits > 0 && fractionals > w_error) { fractionals *= 10; w_error *= 10; @@ -608,7 +585,7 @@ // Grisu3 will never output representations that lie exactly on a boundary. DiyFp boundary_minus, boundary_plus; Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus); - ASSERT(boundary_plus.e() == w.e()); + DCHECK(boundary_plus.e() == w.e()); DiyFp ten_mk; // Cached power of ten: 10^-k int mk; // -k int ten_mk_minimal_binary_exponent = @@ -619,7 +596,7 @@ ten_mk_minimal_binary_exponent, ten_mk_maximal_binary_exponent, &ten_mk, &mk); - ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() + + DCHECK((kMinimalTargetExponent <= w.e() + ten_mk.e() + DiyFp::kSignificandSize) && (kMaximalTargetExponent >= w.e() + ten_mk.e() + DiyFp::kSignificandSize)); @@ -633,7 +610,7 @@ // In other words: let f = scaled_w.f() and e = scaled_w.e(), then // (f-1) * 2^e < w*10^k < (f+1) * 2^e DiyFp scaled_w = DiyFp::Times(w, ten_mk); - ASSERT(scaled_w.e() == + DCHECK(scaled_w.e() == boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize); // In theory it would be possible to avoid some recomputations by computing // the difference between w and boundary_minus/plus (a power of 2) and to @@ -678,7 +655,7 @@ ten_mk_minimal_binary_exponent, ten_mk_maximal_binary_exponent, &ten_mk, &mk); - ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() + + DCHECK((kMinimalTargetExponent <= w.e() + ten_mk.e() + DiyFp::kSignificandSize) && (kMaximalTargetExponent >= w.e() + ten_mk.e() + DiyFp::kSignificandSize)); @@ -712,8 +689,8 @@ Vector<char> buffer, int* length, int* decimal_point) { - ASSERT(v > 0); - ASSERT(!Double(v).IsSpecial()); + DCHECK(v > 0); + DCHECK(!Double(v).IsSpecial()); bool result = false; int decimal_exponent = 0; diff -Nru nodejs-0.11.13/deps/v8/src/fast-dtoa.h nodejs-0.11.15/deps/v8/src/fast-dtoa.h --- nodejs-0.11.13/deps/v8/src/fast-dtoa.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/fast-dtoa.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_FAST_DTOA_H_ #define V8_FAST_DTOA_H_ diff -Nru nodejs-0.11.13/deps/v8/src/feedback-slots.h nodejs-0.11.15/deps/v8/src/feedback-slots.h --- nodejs-0.11.13/deps/v8/src/feedback-slots.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/feedback-slots.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,110 +1,27 @@ // Copyright 2014 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_FEEDBACK_SLOTS_H_ #define V8_FEEDBACK_SLOTS_H_ -#include "v8.h" +#include "src/v8.h" -#include "isolate.h" +#include "src/isolate.h" namespace v8 { namespace internal { -enum ComputablePhase { - DURING_PARSE, - AFTER_SCOPING -}; - - class FeedbackSlotInterface { public: static const int kInvalidFeedbackSlot = -1; virtual ~FeedbackSlotInterface() {} - // When can we ask how many feedback slots are necessary? - virtual ComputablePhase GetComputablePhase() = 0; - virtual int ComputeFeedbackSlotCount(Isolate* isolate) = 0; + virtual int ComputeFeedbackSlotCount() = 0; virtual void SetFirstFeedbackSlot(int slot) = 0; }; - -class DeferredFeedbackSlotProcessor { - public: - DeferredFeedbackSlotProcessor() - : slot_nodes_(NULL), - slot_count_(0) { } - - void add_slot_node(Zone* zone, FeedbackSlotInterface* slot) { - if (slot->GetComputablePhase() == DURING_PARSE) { - // No need to add to the list - int count = slot->ComputeFeedbackSlotCount(zone->isolate()); - slot->SetFirstFeedbackSlot(slot_count_); - slot_count_ += count; - } else { - if (slot_nodes_ == NULL) { - slot_nodes_ = new(zone) ZoneList<FeedbackSlotInterface*>(10, zone); - } - slot_nodes_->Add(slot, zone); - } - } - - void ProcessFeedbackSlots(Isolate* isolate) { - // Scope analysis must have been done. - if (slot_nodes_ == NULL) { - return; - } - - int current_slot = slot_count_; - for (int i = 0; i < slot_nodes_->length(); i++) { - FeedbackSlotInterface* slot_interface = slot_nodes_->at(i); - int count = slot_interface->ComputeFeedbackSlotCount(isolate); - if (count > 0) { - slot_interface->SetFirstFeedbackSlot(current_slot); - current_slot += count; - } - } - - slot_count_ = current_slot; - slot_nodes_->Clear(); - } - - int slot_count() { - ASSERT(slot_count_ >= 0); - return slot_count_; - } - - private: - ZoneList<FeedbackSlotInterface*>* slot_nodes_; - int slot_count_; -}; - - } } // namespace v8::internal #endif // V8_FEEDBACK_SLOTS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/field-index.cc nodejs-0.11.15/deps/v8/src/field-index.cc --- nodejs-0.11.13/deps/v8/src/field-index.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/field-index.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,23 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/field-index.h" +#include "src/objects.h" +#include "src/objects-inl.h" + +namespace v8 { +namespace internal { + + +FieldIndex FieldIndex::ForLookupResult(const LookupResult* lookup_result) { + Map* map = lookup_result->holder()->map(); + return ForPropertyIndex(map, + lookup_result->GetFieldIndexFromMap(map), + lookup_result->representation().IsDouble()); +} + + +} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/field-index.h nodejs-0.11.15/deps/v8/src/field-index.h --- nodejs-0.11.13/deps/v8/src/field-index.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/field-index.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,112 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_FIELD_INDEX_H_ +#define V8_FIELD_INDEX_H_ + +#include "src/property-details.h" +#include "src/utils.h" + +namespace v8 { +namespace internal { + +class Map; + +// Wrapper class to hold a field index, usually but not necessarily generated +// from a property index. When available, the wrapper class captures additional +// information to allow the field index to be translated back into the property +// index it was originally generated from. +class FieldIndex V8_FINAL { + public: + static FieldIndex ForPropertyIndex(Map* map, + int index, + bool is_double = false); + static FieldIndex ForInObjectOffset(int offset, Map* map = NULL); + static FieldIndex ForLookupResult(const LookupResult* result); + static FieldIndex ForDescriptor(Map* map, int descriptor_index); + static FieldIndex ForLoadByFieldIndex(Map* map, int index); + static FieldIndex ForKeyedLookupCacheIndex(Map* map, int index); + + int GetLoadByFieldIndex() const; + + bool is_inobject() const { + return IsInObjectBits::decode(bit_field_); + } + + bool is_double() const { + return IsDoubleBits::decode(bit_field_); + } + + int offset() const { + return index() * kPointerSize; + } + + // Zero-indexed from beginning of the object. + int index() const { + return IndexBits::decode(bit_field_); + } + + int outobject_array_index() const { + DCHECK(!is_inobject()); + return index() - first_inobject_property_offset() / kPointerSize; + } + + // Zero-based from the first inobject property. Overflows to out-of-object + // properties. + int property_index() const { + DCHECK(!IsHiddenField::decode(bit_field_)); + int result = index() - first_inobject_property_offset() / kPointerSize; + if (!is_inobject()) { + result += InObjectPropertyBits::decode(bit_field_); + } + return result; + } + + int GetKeyedLookupCacheIndex() const; + + int GetFieldAccessStubKey() const { + return bit_field_ & + (IsInObjectBits::kMask | IsDoubleBits::kMask | IndexBits::kMask); + } + + private: + FieldIndex(bool is_inobject, int local_index, bool is_double, + int inobject_properties, int first_inobject_property_offset, + bool is_hidden = false) { + DCHECK((first_inobject_property_offset & (kPointerSize - 1)) == 0); + bit_field_ = IsInObjectBits::encode(is_inobject) | + IsDoubleBits::encode(is_double) | + FirstInobjectPropertyOffsetBits::encode(first_inobject_property_offset) | + IsHiddenField::encode(is_hidden) | + IndexBits::encode(local_index) | + InObjectPropertyBits::encode(inobject_properties); + } + + int first_inobject_property_offset() const { + DCHECK(!IsHiddenField::decode(bit_field_)); + return FirstInobjectPropertyOffsetBits::decode(bit_field_); + } + + static const int kIndexBitsSize = kDescriptorIndexBitCount + 1; + + // Index from beginning of object. + class IndexBits: public BitField<int, 0, kIndexBitsSize> {}; + class IsInObjectBits: public BitField<bool, IndexBits::kNext, 1> {}; + class IsDoubleBits: public BitField<bool, IsInObjectBits::kNext, 1> {}; + // Number of inobject properties. + class InObjectPropertyBits + : public BitField<int, IsDoubleBits::kNext, kDescriptorIndexBitCount> {}; + // Offset of first inobject property from beginning of object. + class FirstInobjectPropertyOffsetBits + : public BitField<int, InObjectPropertyBits::kNext, 7> {}; + class IsHiddenField + : public BitField<bool, FirstInobjectPropertyOffsetBits::kNext, 1> {}; + STATIC_ASSERT(IsHiddenField::kNext <= 32); + + int bit_field_; +}; + +} } // namespace v8::internal + +#endif diff -Nru nodejs-0.11.13/deps/v8/src/field-index-inl.h nodejs-0.11.15/deps/v8/src/field-index-inl.h --- nodejs-0.11.13/deps/v8/src/field-index-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/field-index-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,124 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_FIELD_INDEX_INL_H_ +#define V8_FIELD_INDEX_INL_H_ + +#include "src/field-index.h" + +namespace v8 { +namespace internal { + + +inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Map* map) { + DCHECK((offset % kPointerSize) == 0); + int index = offset / kPointerSize; + if (map == NULL) { + return FieldIndex(true, index, false, index + 1, 0, true); + } + int first_inobject_offset = map->GetInObjectPropertyOffset(0); + if (offset < first_inobject_offset) { + return FieldIndex(true, index, false, 0, 0, true); + } else { + return FieldIndex::ForPropertyIndex(map, offset / kPointerSize); + } +} + + +inline FieldIndex FieldIndex::ForPropertyIndex(Map* map, + int property_index, + bool is_double) { + DCHECK(map->instance_type() >= FIRST_NONSTRING_TYPE); + int inobject_properties = map->inobject_properties(); + bool is_inobject = property_index < inobject_properties; + int first_inobject_offset; + if (is_inobject) { + first_inobject_offset = map->GetInObjectPropertyOffset(0); + } else { + first_inobject_offset = FixedArray::kHeaderSize; + property_index -= inobject_properties; + } + return FieldIndex(is_inobject, + property_index + first_inobject_offset / kPointerSize, + is_double, inobject_properties, first_inobject_offset); +} + + +// Takes an index as computed by GetLoadFieldByIndex and reconstructs a +// FieldIndex object from it. +inline FieldIndex FieldIndex::ForLoadByFieldIndex(Map* map, int orig_index) { + int field_index = orig_index; + int is_inobject = true; + bool is_double = field_index & 1; + int first_inobject_offset = 0; + field_index >>= 1; + if (field_index < 0) { + field_index = -(field_index + 1); + is_inobject = false; + first_inobject_offset = FixedArray::kHeaderSize; + field_index += FixedArray::kHeaderSize / kPointerSize; + } else { + first_inobject_offset = map->GetInObjectPropertyOffset(0); + field_index += JSObject::kHeaderSize / kPointerSize; + } + FieldIndex result(is_inobject, field_index, is_double, + map->inobject_properties(), first_inobject_offset); + DCHECK(result.GetLoadByFieldIndex() == orig_index); + return result; +} + + +// Returns the index format accepted by the HLoadFieldByIndex instruction. +// (In-object: zero-based from (object start + JSObject::kHeaderSize), +// out-of-object: zero-based from FixedArray::kHeaderSize.) +inline int FieldIndex::GetLoadByFieldIndex() const { + // For efficiency, the LoadByFieldIndex instruction takes an index that is + // optimized for quick access. If the property is inline, the index is + // positive. If it's out-of-line, the encoded index is -raw_index - 1 to + // disambiguate the zero out-of-line index from the zero inobject case. + // The index itself is shifted up by one bit, the lower-most bit + // signifying if the field is a mutable double box (1) or not (0). + int result = index(); + if (is_inobject()) { + result -= JSObject::kHeaderSize / kPointerSize; + } else { + result -= FixedArray::kHeaderSize / kPointerSize; + result = -result - 1; + } + result <<= 1; + return is_double() ? (result | 1) : result; +} + + +inline FieldIndex FieldIndex::ForDescriptor(Map* map, int descriptor_index) { + PropertyDetails details = + map->instance_descriptors()->GetDetails(descriptor_index); + int field_index = + map->instance_descriptors()->GetFieldIndex(descriptor_index); + return ForPropertyIndex(map, field_index, + details.representation().IsDouble()); +} + + +inline FieldIndex FieldIndex::ForKeyedLookupCacheIndex(Map* map, int index) { + if (FLAG_compiled_keyed_generic_loads) { + return ForLoadByFieldIndex(map, index); + } else { + return ForPropertyIndex(map, index); + } +} + + +inline int FieldIndex::GetKeyedLookupCacheIndex() const { + if (FLAG_compiled_keyed_generic_loads) { + return GetLoadByFieldIndex(); + } else { + return property_index(); + } +} + + +} } // namespace v8::internal + +#endif diff -Nru nodejs-0.11.13/deps/v8/src/fixed-dtoa.cc nodejs-0.11.15/deps/v8/src/fixed-dtoa.cc --- nodejs-0.11.13/deps/v8/src/fixed-dtoa.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/fixed-dtoa.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <cmath> -#include "../include/v8stdint.h" -#include "checks.h" -#include "utils.h" +#include "include/v8stdint.h" +#include "src/base/logging.h" +#include "src/utils.h" -#include "double.h" -#include "fixed-dtoa.h" +#include "src/double.h" +#include "src/fixed-dtoa.h" namespace v8 { namespace internal { @@ -58,11 +35,11 @@ accumulator >>= 32; accumulator = accumulator + (high_bits_ >> 32) * multiplicand; high_bits_ = (accumulator << 32) + part; - ASSERT((accumulator >> 32) == 0); + DCHECK((accumulator >> 32) == 0); } void Shift(int shift_amount) { - ASSERT(-64 <= shift_amount && shift_amount <= 64); + DCHECK(-64 <= shift_amount && shift_amount <= 64); if (shift_amount == 0) { return; } else if (shift_amount == -64) { @@ -235,13 +212,13 @@ static void FillFractionals(uint64_t fractionals, int exponent, int fractional_count, Vector<char> buffer, int* length, int* decimal_point) { - ASSERT(-128 <= exponent && exponent <= 0); + DCHECK(-128 <= exponent && exponent <= 0); // 'fractionals' is a fixed-point number, with binary point at bit // (-exponent). Inside the function the non-converted remainder of fractionals // is a fixed-point number, with binary point at bit 'point'. if (-exponent <= 64) { // One 64 bit number is sufficient. - ASSERT(fractionals >> 56 == 0); + DCHECK(fractionals >> 56 == 0); int point = -exponent; for (int i = 0; i < fractional_count; ++i) { if (fractionals == 0) break; @@ -267,7 +244,7 @@ RoundUp(buffer, length, decimal_point); } } else { // We need 128 bits. - ASSERT(64 < -exponent && -exponent <= 128); + DCHECK(64 < -exponent && -exponent <= 128); UInt128 fractionals128 = UInt128(fractionals, 0); fractionals128.Shift(-exponent - 64); int point = 128; @@ -385,7 +362,7 @@ } else if (exponent < -128) { // This configuration (with at most 20 digits) means that all digits must be // 0. - ASSERT(fractional_count <= 20); + DCHECK(fractional_count <= 20); buffer[0] = '\0'; *length = 0; *decimal_point = -fractional_count; diff -Nru nodejs-0.11.13/deps/v8/src/fixed-dtoa.h nodejs-0.11.15/deps/v8/src/fixed-dtoa.h --- nodejs-0.11.13/deps/v8/src/fixed-dtoa.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/fixed-dtoa.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_FIXED_DTOA_H_ #define V8_FIXED_DTOA_H_ diff -Nru nodejs-0.11.13/deps/v8/src/flag-definitions.h nodejs-0.11.15/deps/v8/src/flag-definitions.h --- nodejs-0.11.13/deps/v8/src/flag-definitions.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/flag-definitions.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // This file defines all of the flags. It is separated into different section, // for Debug, Release, Logging and Profiling, etc. To add a new flag, find the @@ -37,16 +14,14 @@ // this will just be an extern declaration, but for a readonly flag we let the // compiler make better optimizations by giving it the value. #if defined(FLAG_MODE_DECLARE) -#define FLAG_FULL(ftype, ctype, nam, def, cmt) \ - extern ctype FLAG_##nam; +#define FLAG_FULL(ftype, ctype, nam, def, cmt) extern ctype FLAG_##nam; #define FLAG_READONLY(ftype, ctype, nam, def, cmt) \ static ctype const FLAG_##nam = def; // We want to supply the actual storage and value for the flag variable in the // .cc file. We only do this for writable flags. #elif defined(FLAG_MODE_DEFINE) -#define FLAG_FULL(ftype, ctype, nam, def, cmt) \ - ctype FLAG_##nam = def; +#define FLAG_FULL(ftype, ctype, nam, def, cmt) ctype FLAG_##nam = def; // We need to define all of our default values so that the Flag structure can // access them by pointer. These are just used internally inside of one .cc, @@ -58,18 +33,22 @@ // We want to write entries into our meta data table, for internal parsing and // printing / etc in the flag parser code. We only do this for writable flags. #elif defined(FLAG_MODE_META) -#define FLAG_FULL(ftype, ctype, nam, def, cmt) \ - { Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false }, -#define FLAG_ALIAS(ftype, ctype, alias, nam) \ - { Flag::TYPE_##ftype, #alias, &FLAG_##nam, &FLAGDEFAULT_##nam, \ - "alias for --"#nam, false }, +#define FLAG_FULL(ftype, ctype, nam, def, cmt) \ + { Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false } \ + , +#define FLAG_ALIAS(ftype, ctype, alias, nam) \ + { \ + Flag::TYPE_##ftype, #alias, &FLAG_##nam, &FLAGDEFAULT_##nam, \ + "alias for --" #nam, false \ + } \ + , // We produce the code to set flags when it is implied by another flag. #elif defined(FLAG_MODE_DEFINE_IMPLICATIONS) -#define DEFINE_implication(whenflag, thenflag) \ +#define DEFINE_IMPLICATION(whenflag, thenflag) \ if (FLAG_##whenflag) FLAG_##thenflag = true; -#define DEFINE_neg_implication(whenflag, thenflag) \ +#define DEFINE_NEG_IMPLICATION(whenflag, thenflag) \ if (FLAG_##whenflag) FLAG_##thenflag = false; #else @@ -89,12 +68,12 @@ #define FLAG_ALIAS(ftype, ctype, alias, nam) #endif -#ifndef DEFINE_implication -#define DEFINE_implication(whenflag, thenflag) +#ifndef DEFINE_IMPLICATION +#define DEFINE_IMPLICATION(whenflag, thenflag) #endif -#ifndef DEFINE_neg_implication -#define DEFINE_neg_implication(whenflag, thenflag) +#ifndef DEFINE_NEG_IMPLICATION +#define DEFINE_NEG_IMPLICATION(whenflag, thenflag) #endif #define COMMA , @@ -102,10 +81,8 @@ #ifdef FLAG_MODE_DECLARE // Structure used to hold a collection of arguments to the JavaScript code. struct JSArguments { -public: - inline const char*& operator[] (int idx) const { - return argv[idx]; - } + public: + inline const char*& operator[](int idx) const { return argv[idx]; } static JSArguments Create(int argc, const char** argv) { JSArguments args; args.argc = argc; @@ -128,37 +105,41 @@ }; #endif -#if (defined CAN_USE_VFP3_INSTRUCTIONS) || !(defined ARM_TEST) -# define ENABLE_VFP3_DEFAULT true +#if (defined CAN_USE_VFP3_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE) +#define ENABLE_VFP3_DEFAULT true #else -# define ENABLE_VFP3_DEFAULT false +#define ENABLE_VFP3_DEFAULT false #endif -#if (defined CAN_USE_ARMV7_INSTRUCTIONS) || !(defined ARM_TEST) -# define ENABLE_ARMV7_DEFAULT true +#if (defined CAN_USE_ARMV7_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE) +#define ENABLE_ARMV7_DEFAULT true #else -# define ENABLE_ARMV7_DEFAULT false +#define ENABLE_ARMV7_DEFAULT false #endif -#if (defined CAN_USE_VFP32DREGS) || !(defined ARM_TEST) -# define ENABLE_32DREGS_DEFAULT true +#if (defined CAN_USE_VFP32DREGS) || !(defined ARM_TEST_NO_FEATURE_PROBE) +#define ENABLE_32DREGS_DEFAULT true #else -# define ENABLE_32DREGS_DEFAULT false +#define ENABLE_32DREGS_DEFAULT false +#endif +#if (defined CAN_USE_NEON) || !(defined ARM_TEST_NO_FEATURE_PROBE) +# define ENABLE_NEON_DEFAULT true +#else +# define ENABLE_NEON_DEFAULT false #endif -#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt) -#define DEFINE_maybe_bool(nam, cmt) FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, \ - { false COMMA false }, cmt) -#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt) -#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt) -#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt) -#define DEFINE_args(nam, cmt) FLAG(ARGS, JSArguments, nam, \ - { 0 COMMA NULL }, cmt) - -#define DEFINE_ALIAS_bool(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam) -#define DEFINE_ALIAS_int(alias, nam) FLAG_ALIAS(INT, int, alias, nam) -#define DEFINE_ALIAS_float(alias, nam) FLAG_ALIAS(FLOAT, double, alias, nam) -#define DEFINE_ALIAS_string(alias, nam) \ +#define DEFINE_BOOL(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt) +#define DEFINE_MAYBE_BOOL(nam, cmt) \ + FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, {false COMMA false}, cmt) +#define DEFINE_INT(nam, def, cmt) FLAG(INT, int, nam, def, cmt) +#define DEFINE_FLOAT(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt) +#define DEFINE_STRING(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt) +#define DEFINE_ARGS(nam, cmt) FLAG(ARGS, JSArguments, nam, {0 COMMA NULL}, cmt) + +#define DEFINE_ALIAS_BOOL(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam) +#define DEFINE_ALIAS_INT(alias, nam) FLAG_ALIAS(INT, int, alias, nam) +#define DEFINE_ALIAS_FLOAT(alias, nam) FLAG_ALIAS(FLOAT, double, alias, nam) +#define DEFINE_ALIAS_STRING(alias, nam) \ FLAG_ALIAS(STRING, const char*, alias, nam) -#define DEFINE_ALIAS_args(alias, nam) FLAG_ALIAS(ARGS, JSArguments, alias, nam) +#define DEFINE_ALIAS_ARGS(alias, nam) FLAG_ALIAS(ARGS, JSArguments, alias, nam) // // Flags in all modes. @@ -166,543 +147,548 @@ #define FLAG FLAG_FULL // Flags for language modes and experimental language features. -DEFINE_bool(use_strict, false, "enforce strict mode") -DEFINE_bool(es_staging, false, "enable upcoming ES6+ features") +DEFINE_BOOL(use_strict, false, "enforce strict mode") +DEFINE_BOOL(es_staging, false, "enable upcoming ES6+ features") -DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof") -DEFINE_bool(harmony_scoping, false, "enable harmony block scoping") -DEFINE_bool(harmony_modules, false, +DEFINE_BOOL(harmony_scoping, false, "enable harmony block scoping") +DEFINE_BOOL(harmony_modules, false, "enable harmony modules (implies block scoping)") -DEFINE_bool(harmony_symbols, false, - "enable harmony symbols (a.k.a. private names)") -DEFINE_bool(harmony_proxies, false, "enable harmony proxies") -DEFINE_bool(harmony_collections, false, - "enable harmony collections (sets, maps)") -DEFINE_bool(harmony_generators, false, "enable harmony generators") -DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)") -DEFINE_bool(harmony_numeric_literals, false, +DEFINE_BOOL(harmony_proxies, false, "enable harmony proxies") +DEFINE_BOOL(harmony_generators, false, "enable harmony generators") +DEFINE_BOOL(harmony_numeric_literals, false, "enable harmony numeric literals (0o77, 0b11)") -DEFINE_bool(harmony_strings, false, "enable harmony string") -DEFINE_bool(harmony_arrays, false, "enable harmony arrays") -DEFINE_bool(harmony_maths, false, "enable harmony math functions") -DEFINE_bool(harmony, false, "enable all harmony features (except typeof)") - -DEFINE_implication(harmony, harmony_scoping) -DEFINE_implication(harmony, harmony_modules) -DEFINE_implication(harmony, harmony_symbols) -DEFINE_implication(harmony, harmony_proxies) -DEFINE_implication(harmony, harmony_collections) -DEFINE_implication(harmony, harmony_generators) -DEFINE_implication(harmony, harmony_iteration) -DEFINE_implication(harmony, harmony_numeric_literals) -DEFINE_implication(harmony, harmony_strings) -DEFINE_implication(harmony, harmony_arrays) -DEFINE_implication(harmony_modules, harmony_scoping) +DEFINE_BOOL(harmony_strings, false, "enable harmony string") +DEFINE_BOOL(harmony_arrays, false, "enable harmony arrays") +DEFINE_BOOL(harmony_arrow_functions, false, "enable harmony arrow functions") +DEFINE_BOOL(harmony, false, "enable all harmony features (except proxies)") + +DEFINE_IMPLICATION(harmony, harmony_scoping) +DEFINE_IMPLICATION(harmony, harmony_modules) +// TODO(rossberg): Reenable when problems are sorted out. +// DEFINE_IMPLICATION(harmony, harmony_proxies) +DEFINE_IMPLICATION(harmony, harmony_generators) +DEFINE_IMPLICATION(harmony, harmony_numeric_literals) +DEFINE_IMPLICATION(harmony, harmony_strings) +DEFINE_IMPLICATION(harmony, harmony_arrays) +DEFINE_IMPLICATION(harmony, harmony_arrow_functions) +DEFINE_IMPLICATION(harmony_modules, harmony_scoping) -DEFINE_implication(harmony, es_staging) -DEFINE_implication(es_staging, harmony_maths) +DEFINE_IMPLICATION(harmony, es_staging) // Flags for experimental implementation features. -DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes") -DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values") -DEFINE_bool(compiled_keyed_dictionary_loads, true, +DEFINE_BOOL(compiled_keyed_dictionary_loads, true, "use optimizing compiler to generate keyed dictionary load stubs") -DEFINE_bool(clever_optimizations, true, +DEFINE_BOOL(compiled_keyed_generic_loads, false, + "use optimizing compiler to generate keyed generic load stubs") +DEFINE_BOOL(clever_optimizations, true, "Optimize object size, Array shift, DOM strings and string +") -DEFINE_bool(pretenuring, true, "allocate objects in old space") // TODO(hpayer): We will remove this flag as soon as we have pretenuring // support for specific allocation sites. -DEFINE_bool(pretenuring_call_new, false, "pretenure call new") -DEFINE_bool(allocation_site_pretenuring, true, +DEFINE_BOOL(pretenuring_call_new, false, "pretenure call new") +DEFINE_BOOL(allocation_site_pretenuring, true, "pretenure with allocation sites") -DEFINE_bool(trace_pretenuring, false, +DEFINE_BOOL(trace_pretenuring, false, "trace pretenuring decisions of HAllocate instructions") -DEFINE_bool(trace_pretenuring_statistics, false, +DEFINE_BOOL(trace_pretenuring_statistics, false, "trace allocation site pretenuring statistics") -DEFINE_bool(track_fields, true, "track fields with only smi values") -DEFINE_bool(track_double_fields, true, "track fields with double values") -DEFINE_bool(track_heap_object_fields, true, "track fields with heap values") -DEFINE_bool(track_computed_fields, true, "track computed boilerplate fields") -DEFINE_implication(track_double_fields, track_fields) -DEFINE_implication(track_heap_object_fields, track_fields) -DEFINE_implication(track_computed_fields, track_fields) -DEFINE_bool(smi_binop, true, "support smi representation in binary operations") +DEFINE_BOOL(track_fields, true, "track fields with only smi values") +DEFINE_BOOL(track_double_fields, true, "track fields with double values") +DEFINE_BOOL(track_heap_object_fields, true, "track fields with heap values") +DEFINE_BOOL(track_computed_fields, true, "track computed boilerplate fields") +DEFINE_IMPLICATION(track_double_fields, track_fields) +DEFINE_IMPLICATION(track_heap_object_fields, track_fields) +DEFINE_IMPLICATION(track_computed_fields, track_fields) +DEFINE_BOOL(track_field_types, true, "track field types") +DEFINE_IMPLICATION(track_field_types, track_fields) +DEFINE_IMPLICATION(track_field_types, track_heap_object_fields) +DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations") +DEFINE_BOOL(vector_ics, false, "support vector-based ics") // Flags for optimization types. -DEFINE_bool(optimize_for_size, false, +DEFINE_BOOL(optimize_for_size, false, "Enables optimizations which favor memory size over execution " "speed.") // Flags for data representation optimizations -DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles") -DEFINE_bool(string_slices, true, "use string slices") +DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles") +DEFINE_BOOL(string_slices, true, "use string slices") // Flags for Crankshaft. -DEFINE_bool(crankshaft, true, "use crankshaft") -DEFINE_string(hydrogen_filter, "*", "optimization filter") -DEFINE_bool(use_gvn, true, "use hydrogen global value numbering") -DEFINE_int(gvn_iterations, 3, "maximum number of GVN fix-point iterations") -DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing") -DEFINE_bool(use_inlining, true, "use function inlining") -DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis") -DEFINE_bool(use_allocation_folding, true, "use allocation folding") -DEFINE_bool(use_local_allocation_folding, false, "only fold in basic blocks") -DEFINE_bool(use_write_barrier_elimination, true, +DEFINE_BOOL(crankshaft, true, "use crankshaft") +DEFINE_STRING(hydrogen_filter, "*", "optimization filter") +DEFINE_BOOL(use_gvn, true, "use hydrogen global value numbering") +DEFINE_INT(gvn_iterations, 3, "maximum number of GVN fix-point iterations") +DEFINE_BOOL(use_canonicalizing, true, "use hydrogen instruction canonicalizing") +DEFINE_BOOL(use_inlining, true, "use function inlining") +DEFINE_BOOL(use_escape_analysis, true, "use hydrogen escape analysis") +DEFINE_BOOL(use_allocation_folding, true, "use allocation folding") +DEFINE_BOOL(use_local_allocation_folding, false, "only fold in basic blocks") +DEFINE_BOOL(use_write_barrier_elimination, true, "eliminate write barriers targeting allocations in optimized code") -DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels") -DEFINE_int(max_inlined_source_size, 600, +DEFINE_INT(max_inlining_levels, 5, "maximum number of inlining levels") +DEFINE_INT(max_inlined_source_size, 600, "maximum source size in bytes considered for a single inlining") -DEFINE_int(max_inlined_nodes, 196, +DEFINE_INT(max_inlined_nodes, 196, "maximum number of AST nodes considered for a single inlining") -DEFINE_int(max_inlined_nodes_cumulative, 400, +DEFINE_INT(max_inlined_nodes_cumulative, 400, "maximum cumulative number of AST nodes considered for inlining") -DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion") -DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions") -DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true, +DEFINE_BOOL(loop_invariant_code_motion, true, "loop invariant code motion") +DEFINE_BOOL(fast_math, true, "faster (but maybe less accurate) math functions") +DEFINE_BOOL(collect_megamorphic_maps_from_stub_cache, true, "crankshaft harvests type feedback from stub cache") -DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen") -DEFINE_bool(trace_check_elimination, false, "trace check elimination phase") -DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file") -DEFINE_string(trace_hydrogen_filter, "*", "hydrogen tracing filter") -DEFINE_bool(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs") -DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name") -DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases") -DEFINE_bool(trace_inlining, false, "trace inlining decisions") -DEFINE_bool(trace_load_elimination, false, "trace load elimination") -DEFINE_bool(trace_store_elimination, false, "trace store elimination") -DEFINE_bool(trace_alloc, false, "trace register allocator") -DEFINE_bool(trace_all_uses, false, "trace all use positions") -DEFINE_bool(trace_range, false, "trace range analysis") -DEFINE_bool(trace_gvn, false, "trace global value numbering") -DEFINE_bool(trace_representation, false, "trace representation types") -DEFINE_bool(trace_escape_analysis, false, "trace hydrogen escape analysis") -DEFINE_bool(trace_allocation_folding, false, "trace allocation folding") -DEFINE_bool(trace_track_allocation_sites, false, +DEFINE_BOOL(hydrogen_stats, false, "print statistics for hydrogen") +DEFINE_BOOL(trace_check_elimination, false, "trace check elimination phase") +DEFINE_BOOL(trace_hydrogen, false, "trace generated hydrogen to file") +DEFINE_STRING(trace_hydrogen_filter, "*", "hydrogen tracing filter") +DEFINE_BOOL(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs") +DEFINE_STRING(trace_hydrogen_file, NULL, "trace hydrogen to given file name") +DEFINE_STRING(trace_phase, "HLZ", "trace generated IR for specified phases") +DEFINE_BOOL(trace_inlining, false, "trace inlining decisions") +DEFINE_BOOL(trace_load_elimination, false, "trace load elimination") +DEFINE_BOOL(trace_store_elimination, false, "trace store elimination") +DEFINE_BOOL(trace_alloc, false, "trace register allocator") +DEFINE_BOOL(trace_all_uses, false, "trace all use positions") +DEFINE_BOOL(trace_range, false, "trace range analysis") +DEFINE_BOOL(trace_gvn, false, "trace global value numbering") +DEFINE_BOOL(trace_representation, false, "trace representation types") +DEFINE_BOOL(trace_removable_simulates, false, "trace removable simulates") +DEFINE_BOOL(trace_escape_analysis, false, "trace hydrogen escape analysis") +DEFINE_BOOL(trace_allocation_folding, false, "trace allocation folding") +DEFINE_BOOL(trace_track_allocation_sites, false, "trace the tracking of allocation sites") -DEFINE_bool(trace_migration, false, "trace object migration") -DEFINE_bool(trace_generalization, false, "trace map generalization") -DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction") -DEFINE_bool(stress_environments, false, "environment for every instruction") -DEFINE_int(deopt_every_n_times, 0, +DEFINE_BOOL(trace_migration, false, "trace object migration") +DEFINE_BOOL(trace_generalization, false, "trace map generalization") +DEFINE_BOOL(stress_pointer_maps, false, "pointer map for every instruction") +DEFINE_BOOL(stress_environments, false, "environment for every instruction") +DEFINE_INT(deopt_every_n_times, 0, "deoptimize every n times a deopt point is passed") -DEFINE_int(deopt_every_n_garbage_collections, 0, +DEFINE_INT(deopt_every_n_garbage_collections, 0, "deoptimize every n garbage collections") -DEFINE_bool(print_deopt_stress, false, "print number of possible deopt points") -DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing") -DEFINE_bool(trap_on_stub_deopt, false, +DEFINE_BOOL(print_deopt_stress, false, "print number of possible deopt points") +DEFINE_BOOL(trap_on_deopt, false, "put a break point before deoptimizing") +DEFINE_BOOL(trap_on_stub_deopt, false, "put a break point before deoptimizing a stub") -DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases") -DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") -DEFINE_bool(use_osr, true, "use on-stack replacement") -DEFINE_bool(array_bounds_checks_elimination, true, +DEFINE_BOOL(deoptimize_uncommon_cases, true, "deoptimize uncommon cases") +DEFINE_BOOL(polymorphic_inlining, true, "polymorphic inlining") +DEFINE_BOOL(use_osr, true, "use on-stack replacement") +DEFINE_BOOL(array_bounds_checks_elimination, true, "perform array bounds checks elimination") -DEFINE_bool(trace_bce, false, "trace array bounds check elimination") -DEFINE_bool(array_bounds_checks_hoisting, false, +DEFINE_BOOL(trace_bce, false, "trace array bounds check elimination") +DEFINE_BOOL(array_bounds_checks_hoisting, false, "perform array bounds checks hoisting") -DEFINE_bool(array_index_dehoisting, true, - "perform array index dehoisting") -DEFINE_bool(analyze_environment_liveness, true, +DEFINE_BOOL(array_index_dehoisting, true, "perform array index dehoisting") +DEFINE_BOOL(analyze_environment_liveness, true, "analyze liveness of environment slots and zap dead values") -DEFINE_bool(load_elimination, true, "use load elimination") -DEFINE_bool(check_elimination, true, "use check elimination") -DEFINE_bool(store_elimination, false, "use store elimination") -DEFINE_bool(dead_code_elimination, true, "use dead code elimination") -DEFINE_bool(fold_constants, true, "use constant folding") -DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination") -DEFINE_bool(unreachable_code_elimination, true, "eliminate unreachable code") -DEFINE_bool(trace_osr, false, "trace on-stack replacement") -DEFINE_int(stress_runs, 0, "number of stress runs") -DEFINE_bool(optimize_closures, true, "optimize closures") -DEFINE_bool(lookup_sample_by_shared, true, +DEFINE_BOOL(load_elimination, true, "use load elimination") +DEFINE_BOOL(check_elimination, true, "use check elimination") +DEFINE_BOOL(store_elimination, false, "use store elimination") +DEFINE_BOOL(dead_code_elimination, true, "use dead code elimination") +DEFINE_BOOL(fold_constants, true, "use constant folding") +DEFINE_BOOL(trace_dead_code_elimination, false, "trace dead code elimination") +DEFINE_BOOL(unreachable_code_elimination, true, "eliminate unreachable code") +DEFINE_BOOL(trace_osr, false, "trace on-stack replacement") +DEFINE_INT(stress_runs, 0, "number of stress runs") +DEFINE_BOOL(optimize_closures, true, "optimize closures") +DEFINE_BOOL(lookup_sample_by_shared, true, "when picking a function to optimize, watch for shared function " "info, not JSFunction itself") -DEFINE_bool(cache_optimized_code, true, - "cache optimized code for closures") -DEFINE_bool(flush_optimized_code_cache, true, +DEFINE_BOOL(cache_optimized_code, true, "cache optimized code for closures") +DEFINE_BOOL(flush_optimized_code_cache, true, "flushes the cache of optimized code for closures on every GC") -DEFINE_bool(inline_construct, true, "inline constructor calls") -DEFINE_bool(inline_arguments, true, "inline functions with arguments object") -DEFINE_bool(inline_accessors, true, "inline JavaScript accessors") -DEFINE_int(escape_analysis_iterations, 2, +DEFINE_BOOL(inline_construct, true, "inline constructor calls") +DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object") +DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors") +DEFINE_INT(escape_analysis_iterations, 2, "maximum number of escape analysis fix-point iterations") -DEFINE_bool(optimize_for_in, true, - "optimize functions containing for-in loops") -DEFINE_bool(opt_safe_uint32_operations, true, +DEFINE_BOOL(optimize_for_in, true, "optimize functions containing for-in loops") +DEFINE_BOOL(opt_safe_uint32_operations, true, "allow uint32 values on optimize frames if they are used only in " "safe operations") -DEFINE_bool(concurrent_recompilation, true, +DEFINE_BOOL(concurrent_recompilation, true, "optimizing hot functions asynchronously on a separate thread") -DEFINE_bool(trace_concurrent_recompilation, false, +DEFINE_BOOL(trace_concurrent_recompilation, false, "track concurrent recompilation") -DEFINE_int(concurrent_recompilation_queue_length, 8, +DEFINE_INT(concurrent_recompilation_queue_length, 8, "the length of the concurrent compilation queue") -DEFINE_int(concurrent_recompilation_delay, 0, +DEFINE_INT(concurrent_recompilation_delay, 0, "artificial compilation delay in ms") -DEFINE_bool(block_concurrent_recompilation, false, +DEFINE_BOOL(block_concurrent_recompilation, false, "block queued jobs until released") -DEFINE_bool(concurrent_osr, false, - "concurrent on-stack replacement") -DEFINE_implication(concurrent_osr, concurrent_recompilation) +DEFINE_BOOL(concurrent_osr, true, "concurrent on-stack replacement") +DEFINE_IMPLICATION(concurrent_osr, concurrent_recompilation) -DEFINE_bool(omit_map_checks_for_leaf_maps, true, +DEFINE_BOOL(omit_map_checks_for_leaf_maps, true, "do not emit check maps for constant values that have a leaf map, " "deoptimize the optimized code if the layout of the maps changes.") -DEFINE_int(typed_array_max_size_in_heap, 64, - "threshold for in-heap typed array") +// Flags for TurboFan. +DEFINE_STRING(turbo_filter, "~", "optimization filter for TurboFan compiler") +DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR") +DEFINE_BOOL(trace_turbo_types, true, "trace generated TurboFan types") +DEFINE_BOOL(trace_turbo_scheduler, false, "trace generated TurboFan scheduler") +DEFINE_BOOL(turbo_verify, false, "verify TurboFan graphs at each phase") +DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics") +DEFINE_BOOL(turbo_types, false, "use typed lowering in TurboFan") +DEFINE_BOOL(turbo_source_positions, false, + "track source code positions when building TurboFan IR") +DEFINE_BOOL(context_specialization, true, + "enable context specialization in TurboFan") +DEFINE_BOOL(turbo_deoptimization, false, "enable deoptimization in TurboFan") + +DEFINE_INT(typed_array_max_size_in_heap, 64, + "threshold for in-heap typed array") // Profiler flags. -DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler") - // 0x1800 fits in the immediate field of an ARM instruction. -DEFINE_int(interrupt_budget, 0x1800, +DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler") +// 0x1800 fits in the immediate field of an ARM instruction. +DEFINE_INT(interrupt_budget, 0x1800, "execution budget before interrupt is triggered") -DEFINE_int(type_info_threshold, 25, +DEFINE_INT(type_info_threshold, 25, "percentage of ICs that must have type info to allow optimization") -DEFINE_int(self_opt_count, 130, "call count before self-optimization") +DEFINE_INT(generic_ic_threshold, 30, + "max percentage of megamorphic/generic ICs to allow optimization") +DEFINE_INT(self_opt_count, 130, "call count before self-optimization") -DEFINE_bool(trace_opt_verbose, false, "extra verbose compilation tracing") -DEFINE_implication(trace_opt_verbose, trace_opt) +DEFINE_BOOL(trace_opt_verbose, false, "extra verbose compilation tracing") +DEFINE_IMPLICATION(trace_opt_verbose, trace_opt) // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc -DEFINE_bool(debug_code, false, - "generate extra code (assertions) for debugging") -DEFINE_bool(code_comments, false, "emit comments in code disassembly") -DEFINE_bool(enable_sse2, true, - "enable use of SSE2 instructions if available") -DEFINE_bool(enable_sse3, true, - "enable use of SSE3 instructions if available") -DEFINE_bool(enable_sse4_1, true, +DEFINE_BOOL(debug_code, false, "generate extra code (assertions) for debugging") +DEFINE_BOOL(code_comments, false, "emit comments in code disassembly") +DEFINE_BOOL(enable_sse3, true, "enable use of SSE3 instructions if available") +DEFINE_BOOL(enable_sse4_1, true, "enable use of SSE4.1 instructions if available") -DEFINE_bool(enable_cmov, true, - "enable use of CMOV instruction if available") -DEFINE_bool(enable_sahf, true, +DEFINE_BOOL(enable_sahf, true, "enable use of SAHF instruction if available (X64 only)") -DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT, +DEFINE_BOOL(enable_vfp3, ENABLE_VFP3_DEFAULT, "enable use of VFP3 instructions if available") -DEFINE_bool(enable_armv7, ENABLE_ARMV7_DEFAULT, +DEFINE_BOOL(enable_armv7, ENABLE_ARMV7_DEFAULT, "enable use of ARMv7 instructions if available (ARM only)") -DEFINE_bool(enable_neon, true, +DEFINE_BOOL(enable_neon, ENABLE_NEON_DEFAULT, "enable use of NEON instructions if available (ARM only)") -DEFINE_bool(enable_sudiv, true, +DEFINE_BOOL(enable_sudiv, true, "enable use of SDIV and UDIV instructions if available (ARM only)") -DEFINE_bool(enable_movw_movt, false, +DEFINE_BOOL(enable_mls, true, + "enable use of MLS instructions if available (ARM only)") +DEFINE_BOOL(enable_movw_movt, false, "enable loading 32-bit constant by means of movw/movt " "instruction pairs (ARM only)") -DEFINE_bool(enable_unaligned_accesses, true, +DEFINE_BOOL(enable_unaligned_accesses, true, "enable unaligned accesses for ARMv7 (ARM only)") -DEFINE_bool(enable_32dregs, ENABLE_32DREGS_DEFAULT, +DEFINE_BOOL(enable_32dregs, ENABLE_32DREGS_DEFAULT, "enable use of d16-d31 registers on ARM - this requires VFP3") -DEFINE_bool(enable_vldr_imm, false, +DEFINE_BOOL(enable_vldr_imm, false, "enable use of constant pools for double immediate (ARM only)") -DEFINE_bool(force_long_branches, false, +DEFINE_BOOL(force_long_branches, false, "force all emitted branches to be in long mode (MIPS only)") +// cpu-arm64.cc +DEFINE_BOOL(enable_always_align_csp, true, + "enable alignment of csp to 16 bytes on platforms which prefer " + "the register to always be aligned (ARM64 only)") + // bootstrapper.cc -DEFINE_string(expose_natives_as, NULL, "expose natives in global object") -DEFINE_string(expose_debug_as, NULL, "expose debug in global object") -DEFINE_bool(expose_free_buffer, false, "expose freeBuffer extension") -DEFINE_bool(expose_gc, false, "expose gc extension") -DEFINE_string(expose_gc_as, NULL, +DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object") +DEFINE_STRING(expose_debug_as, NULL, "expose debug in global object") +DEFINE_BOOL(expose_free_buffer, false, "expose freeBuffer extension") +DEFINE_BOOL(expose_gc, false, "expose gc extension") +DEFINE_STRING(expose_gc_as, NULL, "expose gc extension under the specified name") -DEFINE_implication(expose_gc_as, expose_gc) -DEFINE_bool(expose_externalize_string, false, +DEFINE_IMPLICATION(expose_gc_as, expose_gc) +DEFINE_BOOL(expose_externalize_string, false, "expose externalize string extension") -DEFINE_bool(expose_trigger_failure, false, "expose trigger-failure extension") -DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture") -DEFINE_bool(builtins_in_stack_traces, false, +DEFINE_BOOL(expose_trigger_failure, false, "expose trigger-failure extension") +DEFINE_INT(stack_trace_limit, 10, "number of stack frames to capture") +DEFINE_BOOL(builtins_in_stack_traces, false, "show built-in functions in stack traces") -DEFINE_bool(disable_native_files, false, "disable builtin natives files") +DEFINE_BOOL(disable_native_files, false, "disable builtin natives files") // builtins-ia32.cc -DEFINE_bool(inline_new, true, "use fast inline allocation") +DEFINE_BOOL(inline_new, true, "use fast inline allocation") // codegen-ia32.cc / codegen-arm.cc -DEFINE_bool(trace_codegen, false, +DEFINE_BOOL(trace_codegen, false, "print name of functions for which code is generated") -DEFINE_bool(trace, false, "trace function calls") -DEFINE_bool(mask_constants_with_cookie, true, +DEFINE_BOOL(trace, false, "trace function calls") +DEFINE_BOOL(mask_constants_with_cookie, true, "use random jit cookie to mask large constants") // codegen.cc -DEFINE_bool(lazy, true, "use lazy compilation") -DEFINE_bool(trace_opt, false, "trace lazy optimization") -DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics") -DEFINE_bool(opt, true, "use adaptive optimizations") -DEFINE_bool(always_opt, false, "always try to optimize functions") -DEFINE_bool(always_osr, false, "always try to OSR functions") -DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt") -DEFINE_bool(trace_deopt, false, "trace optimize function deoptimization") -DEFINE_bool(trace_stub_failures, false, +DEFINE_BOOL(lazy, true, "use lazy compilation") +DEFINE_BOOL(trace_opt, false, "trace lazy optimization") +DEFINE_BOOL(trace_opt_stats, false, "trace lazy optimization statistics") +DEFINE_BOOL(opt, true, "use adaptive optimizations") +DEFINE_BOOL(always_opt, false, "always try to optimize functions") +DEFINE_BOOL(always_osr, false, "always try to OSR functions") +DEFINE_BOOL(prepare_always_opt, false, "prepare for turning on always opt") +DEFINE_BOOL(trace_deopt, false, "trace optimize function deoptimization") +DEFINE_BOOL(trace_stub_failures, false, "trace deoptimization of generated code stubs") +DEFINE_BOOL(serialize_toplevel, false, "enable caching of toplevel scripts") + // compiler.cc -DEFINE_int(min_preparse_length, 1024, +DEFINE_INT(min_preparse_length, 1024, "minimum length for automatic enable preparsing") -DEFINE_bool(always_full_compiler, false, +DEFINE_BOOL(always_full_compiler, false, "try to use the dedicated run-once backend for all code") -DEFINE_int(max_opt_count, 10, +DEFINE_INT(max_opt_count, 10, "maximum number of optimization attempts before giving up.") // compilation-cache.cc -DEFINE_bool(compilation_cache, true, "enable compilation cache") +DEFINE_BOOL(compilation_cache, true, "enable compilation cache") -DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions") +DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions") // cpu-profiler.cc -DEFINE_int(cpu_profiler_sampling_interval, 1000, +DEFINE_INT(cpu_profiler_sampling_interval, 1000, "CPU profiler sampling interval in microseconds") // debug.cc -DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response") -DEFINE_bool(trace_js_array_abuse, false, +DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response") +DEFINE_BOOL(trace_js_array_abuse, false, "trace out-of-bounds accesses to JS arrays") -DEFINE_bool(trace_external_array_abuse, false, +DEFINE_BOOL(trace_external_array_abuse, false, "trace out-of-bounds-accesses to external arrays") -DEFINE_bool(trace_array_abuse, false, +DEFINE_BOOL(trace_array_abuse, false, "trace out-of-bounds accesses to all arrays") -DEFINE_implication(trace_array_abuse, trace_js_array_abuse) -DEFINE_implication(trace_array_abuse, trace_external_array_abuse) -DEFINE_bool(debugger_auto_break, true, - "automatically set the debug break flag when debugger commands are " - "in the queue") -DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature") -DEFINE_bool(hard_abort, true, "abort by crashing") +DEFINE_IMPLICATION(trace_array_abuse, trace_js_array_abuse) +DEFINE_IMPLICATION(trace_array_abuse, trace_external_array_abuse) +DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature") +DEFINE_BOOL(hard_abort, true, "abort by crashing") // execution.cc -// Slightly less than 1MB on 64-bit, since Windows' default stack size for +// Slightly less than 1MB, since Windows' default stack size for // the main execution thread is 1MB for both 32 and 64-bit. -DEFINE_int(stack_size, kPointerSize * 123, +DEFINE_INT(stack_size, 984, "default size of stack region v8 is allowed to use (in kBytes)") // frames.cc -DEFINE_int(max_stack_trace_source_length, 300, +DEFINE_INT(max_stack_trace_source_length, 300, "maximum length of function source code printed in a stack trace.") // full-codegen.cc -DEFINE_bool(always_inline_smi_code, false, +DEFINE_BOOL(always_inline_smi_code, false, "always inline smi code in non-opt code") // heap.cc -DEFINE_int(max_new_space_size, 0, "max size of the new generation (in kBytes)") -DEFINE_int(max_old_space_size, 0, "max size of the old generation (in Mbytes)") -DEFINE_int(max_executable_size, 0, "max size of executable memory (in Mbytes)") -DEFINE_bool(gc_global, false, "always perform global GCs") -DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations") -DEFINE_bool(trace_gc, false, +DEFINE_INT(min_semi_space_size, 0, + "min size of a semi-space (in MBytes), the new space consists of two" + "semi-spaces") +DEFINE_INT(max_semi_space_size, 0, + "max size of a semi-space (in MBytes), the new space consists of two" + "semi-spaces") +DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)") +DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)") +DEFINE_BOOL(gc_global, false, "always perform global GCs") +DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations") +DEFINE_BOOL(trace_gc, false, "print one trace line following each garbage collection") -DEFINE_bool(trace_gc_nvp, false, +DEFINE_BOOL(trace_gc_nvp, false, "print one detailed trace line in name=value format " "after each garbage collection") -DEFINE_bool(trace_gc_ignore_scavenger, false, +DEFINE_BOOL(trace_gc_ignore_scavenger, false, "do not print trace line after scavenger collection") -DEFINE_bool(print_cumulative_gc_stat, false, +DEFINE_BOOL(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") -DEFINE_bool(print_max_heap_committed, false, +DEFINE_BOOL(print_max_heap_committed, false, "print statistics of the maximum memory committed for the heap " "in name=value format on exit") -DEFINE_bool(trace_gc_verbose, false, +DEFINE_BOOL(trace_gc_verbose, false, "print more details following each garbage collection") -DEFINE_bool(trace_fragmentation, false, +DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old pointer and data pages") -DEFINE_bool(trace_external_memory, false, - "print amount of external allocated memory after each time " - "it is adjusted.") -DEFINE_bool(collect_maps, true, +DEFINE_BOOL(collect_maps, true, "garbage collect maps from which no objects can be reached") -DEFINE_bool(weak_embedded_maps_in_optimized_code, true, +DEFINE_BOOL(weak_embedded_maps_in_ic, true, + "make maps embedded in inline cache stubs") +DEFINE_BOOL(weak_embedded_maps_in_optimized_code, true, "make maps embedded in optimized code weak") -DEFINE_bool(weak_embedded_objects_in_optimized_code, true, +DEFINE_BOOL(weak_embedded_objects_in_optimized_code, true, "make objects embedded in optimized code weak") -DEFINE_bool(flush_code, true, +DEFINE_BOOL(flush_code, true, "flush code that we expect not to use again (during full gc)") -DEFINE_bool(flush_code_incrementally, true, +DEFINE_BOOL(flush_code_incrementally, true, "flush code that we expect not to use again (incrementally)") -DEFINE_bool(trace_code_flushing, false, "trace code flushing progress") -DEFINE_bool(age_code, true, +DEFINE_BOOL(trace_code_flushing, false, "trace code flushing progress") +DEFINE_BOOL(age_code, true, "track un-executed functions to age code and flush only " "old code (required for code flushing)") -DEFINE_bool(incremental_marking, true, "use incremental marking") -DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") -DEFINE_bool(trace_incremental_marking, false, +DEFINE_BOOL(incremental_marking, true, "use incremental marking") +DEFINE_BOOL(incremental_marking_steps, true, "do incremental marking steps") +DEFINE_BOOL(trace_incremental_marking, false, "trace progress of the incremental marking") -DEFINE_bool(track_gc_object_stats, false, +DEFINE_BOOL(track_gc_object_stats, false, "track object counts and memory usage") -DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping") -DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping") -DEFINE_int(sweeper_threads, 0, +DEFINE_BOOL(always_precise_sweeping, true, "always sweep precisely") +DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping") +DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping") +DEFINE_INT(sweeper_threads, 0, "number of parallel and concurrent sweeping threads") -DEFINE_bool(job_based_sweeping, false, "enable job based sweeping") +DEFINE_BOOL(job_based_sweeping, false, "enable job based sweeping") #ifdef VERIFY_HEAP -DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC") +DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC") #endif // heap-snapshot-generator.cc -DEFINE_bool(heap_profiler_trace_objects, false, +DEFINE_BOOL(heap_profiler_trace_objects, false, "Dump heap object allocations/movements/size_updates") // v8.cc -DEFINE_bool(use_idle_notification, true, +DEFINE_BOOL(use_idle_notification, true, "Use idle notification to reduce memory footprint.") // ic.cc -DEFINE_bool(use_ic, true, "use inline caching") +DEFINE_BOOL(use_ic, true, "use inline caching") +DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions") // macro-assembler-ia32.cc -DEFINE_bool(native_code_counters, false, +DEFINE_BOOL(native_code_counters, false, "generate extra code for manipulating stats counters") // mark-compact.cc -DEFINE_bool(always_compact, false, "Perform compaction on every full GC") -DEFINE_bool(lazy_sweeping, true, - "Use lazy sweeping for old pointer and data spaces") -DEFINE_bool(never_compact, false, +DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC") +DEFINE_BOOL(never_compact, false, "Never perform compaction on full GC - testing only") -DEFINE_bool(compact_code_space, true, +DEFINE_BOOL(compact_code_space, true, "Compact code space on full non-incremental collections") -DEFINE_bool(incremental_code_compaction, true, +DEFINE_BOOL(incremental_code_compaction, true, "Compact code space on full incremental collections") -DEFINE_bool(cleanup_code_caches_at_gc, true, +DEFINE_BOOL(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and " "flush code caches in maps during mark compact cycle.") -DEFINE_bool(use_marking_progress_bar, true, +DEFINE_BOOL(use_marking_progress_bar, true, "Use a progress bar to scan large objects in increments when " "incremental marking is active.") -DEFINE_bool(zap_code_space, true, +DEFINE_BOOL(zap_code_space, true, "Zap free memory in code space with 0xCC while sweeping.") -DEFINE_int(random_seed, 0, +DEFINE_INT(random_seed, 0, "Default seed for initializing random generator " "(0, the default, means to use system random).") // objects.cc -DEFINE_bool(use_verbose_printer, true, "allows verbose printing") +DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing") // parser.cc -DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") -DEFINE_bool(trace_parse, false, "trace parsing and preparsing") +DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax") +DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing") // simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc -DEFINE_bool(trace_sim, false, "Trace simulator execution") -DEFINE_bool(debug_sim, false, "Enable debugging the simulator") -DEFINE_bool(check_icache, false, +DEFINE_BOOL(trace_sim, false, "Trace simulator execution") +DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator") +DEFINE_BOOL(check_icache, false, "Check icache flushes in ARM and MIPS simulator") -DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") -#ifdef V8_TARGET_ARCH_ARM64 -DEFINE_int(sim_stack_alignment, 16, +DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions") +#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) +DEFINE_INT(sim_stack_alignment, 16, "Stack alignment in bytes in simulator. This must be a power of two " "and it must be at least 16. 16 is default.") #else -DEFINE_int(sim_stack_alignment, 8, +DEFINE_INT(sim_stack_alignment, 8, "Stack alingment in bytes in simulator (4 or 8, 8 is default)") #endif -DEFINE_int(sim_stack_size, 2 * MB / KB, - "Stack size of the ARM64 simulator in kBytes (default is 2 MB)") -DEFINE_bool(log_regs_modified, true, +DEFINE_INT(sim_stack_size, 2 * MB / KB, + "Stack size of the ARM64 and MIPS64 simulator " + "in kBytes (default is 2 MB)") +DEFINE_BOOL(log_regs_modified, true, "When logging register values, only print modified registers.") -DEFINE_bool(log_colour, true, - "When logging, try to use coloured output.") -DEFINE_bool(ignore_asm_unimplemented_break, false, +DEFINE_BOOL(log_colour, true, "When logging, try to use coloured output.") +DEFINE_BOOL(ignore_asm_unimplemented_break, false, "Don't break for ASM_UNIMPLEMENTED_BREAK macros.") -DEFINE_bool(trace_sim_messages, false, +DEFINE_BOOL(trace_sim_messages, false, "Trace simulator debug messages. Implied by --trace-sim.") // isolate.cc -DEFINE_bool(stack_trace_on_illegal, false, +DEFINE_BOOL(stack_trace_on_illegal, false, "print stack trace when an illegal exception is thrown") -DEFINE_bool(abort_on_uncaught_exception, false, +DEFINE_BOOL(abort_on_uncaught_exception, false, "abort program (dump core) when an uncaught exception is thrown") -DEFINE_bool(randomize_hashes, true, +DEFINE_BOOL(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions " "(with snapshots this option cannot override the baked-in seed)") -DEFINE_int(hash_seed, 0, +DEFINE_INT(hash_seed, 0, "Fixed seed to use to hash property keys (0 means random)" "(with snapshots this option cannot override the baked-in seed)") // snapshot-common.cc -DEFINE_bool(profile_deserialization, false, +DEFINE_BOOL(profile_deserialization, false, "Print the time it takes to deserialize the snapshot.") // Regexp -DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") +DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code") // Testing flags test/cctest/test-{flags,api,serialization}.cc -DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") -DEFINE_maybe_bool(testing_maybe_bool_flag, "testing_maybe_bool_flag") -DEFINE_int(testing_int_flag, 13, "testing_int_flag") -DEFINE_float(testing_float_flag, 2.5, "float-flag") -DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") -DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") +DEFINE_BOOL(testing_bool_flag, true, "testing_bool_flag") +DEFINE_MAYBE_BOOL(testing_maybe_bool_flag, "testing_maybe_bool_flag") +DEFINE_INT(testing_int_flag, 13, "testing_int_flag") +DEFINE_FLOAT(testing_float_flag, 2.5, "float-flag") +DEFINE_STRING(testing_string_flag, "Hello, world!", "string-flag") +DEFINE_INT(testing_prng_seed, 42, "Seed used for threading test randomness") #ifdef _WIN32 -DEFINE_string(testing_serialization_file, "C:\\Windows\\Temp\\serdes", +DEFINE_STRING(testing_serialization_file, "C:\\Windows\\Temp\\serdes", "file in which to testing_serialize heap") #else -DEFINE_string(testing_serialization_file, "/tmp/serdes", +DEFINE_STRING(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") #endif // mksnapshot.cc -DEFINE_string(extra_code, NULL, "A filename with extra code to be included in" - " the snapshot (mksnapshot only)") +DEFINE_STRING(extra_code, NULL, + "A filename with extra code to be included in" + " the snapshot (mksnapshot only)") +DEFINE_STRING(raw_file, NULL, + "A file to write the raw snapshot bytes to. " + "(mksnapshot only)") +DEFINE_STRING(raw_context_file, NULL, + "A file to write the raw context " + "snapshot bytes to. (mksnapshot only)") +DEFINE_STRING(startup_blob, NULL, + "Write V8 startup blob file. " + "(mksnapshot only)") // code-stubs-hydrogen.cc -DEFINE_bool(profile_hydrogen_code_stub_compilation, false, +DEFINE_BOOL(profile_hydrogen_code_stub_compilation, false, "Print the time it takes to lazily compile hydrogen code stubs.") -DEFINE_bool(predictable, false, "enable predictable mode") -DEFINE_neg_implication(predictable, concurrent_recompilation) -DEFINE_neg_implication(predictable, concurrent_osr) -DEFINE_neg_implication(predictable, concurrent_sweeping) -DEFINE_neg_implication(predictable, parallel_sweeping) +DEFINE_BOOL(predictable, false, "enable predictable mode") +DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation) +DEFINE_NEG_IMPLICATION(predictable, concurrent_osr) +DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping) +DEFINE_NEG_IMPLICATION(predictable, parallel_sweeping) // // Dev shell flags // -DEFINE_bool(help, false, "Print usage message, including flags, on console") -DEFINE_bool(dump_counters, false, "Dump counters on exit") +DEFINE_BOOL(help, false, "Print usage message, including flags, on console") +DEFINE_BOOL(dump_counters, false, "Dump counters on exit") -#ifdef ENABLE_DEBUGGER_SUPPORT -DEFINE_bool(debugger, false, "Enable JavaScript debugger") -DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the " - "debugger agent in another process") -DEFINE_bool(debugger_agent, false, "Enable debugger agent") -DEFINE_int(debugger_port, 5858, "Port to use for remote debugging") -#endif // ENABLE_DEBUGGER_SUPPORT +DEFINE_BOOL(debugger, false, "Enable JavaScript debugger") -DEFINE_string(map_counters, "", "Map counters to a file") -DEFINE_args(js_arguments, +DEFINE_STRING(map_counters, "", "Map counters to a file") +DEFINE_ARGS(js_arguments, "Pass all remaining arguments to the script. Alias for \"--\".") -#if defined(WEBOS__) -DEFINE_bool(debug_compile_events, false, "Enable debugger compile events") -DEFINE_bool(debug_script_collected_events, false, - "Enable debugger script collected events") -#else -DEFINE_bool(debug_compile_events, true, "Enable debugger compile events") -DEFINE_bool(debug_script_collected_events, true, - "Enable debugger script collected events") -#endif - - // // GDB JIT integration flags. // -DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)") -DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects") -DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk") -DEFINE_string(gdbjit_dump_filter, "", +DEFINE_BOOL(gdbjit, false, "enable GDBJIT interface (disables compacting GC)") +DEFINE_BOOL(gdbjit_full, false, "enable GDBJIT interface for all code objects") +DEFINE_BOOL(gdbjit_dump, false, "dump elf objects with debug info to disk") +DEFINE_STRING(gdbjit_dump_filter, "", "dump only objects containing this substring") // mark-compact.cc -DEFINE_bool(force_marking_deque_overflows, false, +DEFINE_BOOL(force_marking_deque_overflows, false, "force overflows of marking deque by reducing it's size " "to 64 words") -DEFINE_bool(stress_compaction, false, +DEFINE_BOOL(stress_compaction, false, "stress the GC compactor to flush out bugs (implies " "--force_marking_deque_overflows)") @@ -717,64 +703,64 @@ #endif // checks.cc -#ifdef ENABLE_SLOW_ASSERTS -DEFINE_bool(enable_slow_asserts, false, +#ifdef ENABLE_SLOW_DCHECKS +DEFINE_BOOL(enable_slow_asserts, false, "enable asserts that are slow to execute") #endif // codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc -DEFINE_bool(print_source, false, "pretty print source code") -DEFINE_bool(print_builtin_source, false, +DEFINE_BOOL(print_source, false, "pretty print source code") +DEFINE_BOOL(print_builtin_source, false, "pretty print source code for builtins") -DEFINE_bool(print_ast, false, "print source AST") -DEFINE_bool(print_builtin_ast, false, "print source AST for builtins") -DEFINE_string(stop_at, "", "function name where to insert a breakpoint") -DEFINE_bool(trap_on_abort, false, "replace aborts by breakpoints") +DEFINE_BOOL(print_ast, false, "print source AST") +DEFINE_BOOL(print_builtin_ast, false, "print source AST for builtins") +DEFINE_STRING(stop_at, "", "function name where to insert a breakpoint") +DEFINE_BOOL(trap_on_abort, false, "replace aborts by breakpoints") // compiler.cc -DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins") -DEFINE_bool(print_scopes, false, "print scopes") +DEFINE_BOOL(print_builtin_scopes, false, "print scopes for builtins") +DEFINE_BOOL(print_scopes, false, "print scopes") // contexts.cc -DEFINE_bool(trace_contexts, false, "trace contexts operations") +DEFINE_BOOL(trace_contexts, false, "trace contexts operations") // heap.cc -DEFINE_bool(gc_greedy, false, "perform GC prior to some allocations") -DEFINE_bool(gc_verbose, false, "print stuff during garbage collection") -DEFINE_bool(heap_stats, false, "report heap statistics before and after GC") -DEFINE_bool(code_stats, false, "report code statistics after GC") -DEFINE_bool(verify_native_context_separation, false, +DEFINE_BOOL(gc_verbose, false, "print stuff during garbage collection") +DEFINE_BOOL(heap_stats, false, "report heap statistics before and after GC") +DEFINE_BOOL(code_stats, false, "report code statistics after GC") +DEFINE_BOOL(verify_native_context_separation, false, "verify that code holds on to at most one native context after GC") -DEFINE_bool(print_handles, false, "report handles after GC") -DEFINE_bool(print_global_handles, false, "report global handles after GC") +DEFINE_BOOL(print_handles, false, "report handles after GC") +DEFINE_BOOL(print_global_handles, false, "report global handles after GC") -// ic.cc -DEFINE_bool(trace_ic, false, "trace inline cache state transitions") +// TurboFan debug-only flags. +DEFINE_BOOL(print_turbo_replay, false, + "print C++ code to recreate TurboFan graphs") // interface.cc -DEFINE_bool(print_interfaces, false, "print interfaces") -DEFINE_bool(print_interface_details, false, "print interface inference details") -DEFINE_int(print_interface_depth, 5, "depth for printing interfaces") +DEFINE_BOOL(print_interfaces, false, "print interfaces") +DEFINE_BOOL(print_interface_details, false, "print interface inference details") +DEFINE_INT(print_interface_depth, 5, "depth for printing interfaces") // objects.cc -DEFINE_bool(trace_normalization, false, +DEFINE_BOOL(trace_normalization, false, "prints when objects are turned into dictionaries.") // runtime.cc -DEFINE_bool(trace_lazy, false, "trace lazy compilation") +DEFINE_BOOL(trace_lazy, false, "trace lazy compilation") // spaces.cc -DEFINE_bool(collect_heap_spill_statistics, false, +DEFINE_BOOL(collect_heap_spill_statistics, false, "report heap spill statistics along with heap_stats " "(requires heap_stats)") -DEFINE_bool(trace_isolates, false, "trace isolate state changes") +DEFINE_BOOL(trace_isolates, false, "trace isolate state changes") // Regexp -DEFINE_bool(regexp_possessive_quantifier, false, +DEFINE_BOOL(regexp_possessive_quantifier, false, "enable possessive quantifier syntax for testing") -DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution") -DEFINE_bool(trace_regexp_assembler, false, +DEFINE_BOOL(trace_regexp_bytecodes, false, "trace regexp bytecode execution") +DEFINE_BOOL(trace_regexp_assembler, false, "trace regexp macro assembler calls.") // @@ -784,51 +770,52 @@ #define FLAG FLAG_FULL // log.cc -DEFINE_bool(log, false, +DEFINE_BOOL(log, false, "Minimal logging (no API, code, GC, suspect, or handles samples).") -DEFINE_bool(log_all, false, "Log all events to the log file.") -DEFINE_bool(log_runtime, false, "Activate runtime system %Log call.") -DEFINE_bool(log_api, false, "Log API events to the log file.") -DEFINE_bool(log_code, false, +DEFINE_BOOL(log_all, false, "Log all events to the log file.") +DEFINE_BOOL(log_api, false, "Log API events to the log file.") +DEFINE_BOOL(log_code, false, "Log code events to the log file without profiling.") -DEFINE_bool(log_gc, false, +DEFINE_BOOL(log_gc, false, "Log heap samples on garbage collection for the hp2ps tool.") -DEFINE_bool(log_handles, false, "Log global handle events.") -DEFINE_bool(log_snapshot_positions, false, +DEFINE_BOOL(log_handles, false, "Log global handle events.") +DEFINE_BOOL(log_snapshot_positions, false, "log positions of (de)serialized objects in the snapshot.") -DEFINE_bool(log_suspect, false, "Log suspect operations.") -DEFINE_bool(prof, false, +DEFINE_BOOL(log_suspect, false, "Log suspect operations.") +DEFINE_BOOL(prof, false, "Log statistical profiling information (implies --log-code).") -DEFINE_bool(prof_browser_mode, true, +DEFINE_BOOL(prof_browser_mode, true, "Used with --prof, turns on browser-compatible mode for profiling.") -DEFINE_bool(log_regexp, false, "Log regular expression execution.") -DEFINE_string(logfile, "v8.log", "Specify the name of the log file.") -DEFINE_bool(logfile_per_isolate, true, "Separate log files for each isolate.") -DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.") -DEFINE_bool(perf_basic_prof, false, +DEFINE_BOOL(log_regexp, false, "Log regular expression execution.") +DEFINE_STRING(logfile, "v8.log", "Specify the name of the log file.") +DEFINE_BOOL(logfile_per_isolate, true, "Separate log files for each isolate.") +DEFINE_BOOL(ll_prof, false, "Enable low-level linux profiler.") +DEFINE_BOOL(perf_basic_prof, false, "Enable perf linux profiler (basic support).") -DEFINE_bool(perf_jit_prof, false, +DEFINE_NEG_IMPLICATION(perf_basic_prof, compact_code_space) +DEFINE_BOOL(perf_jit_prof, false, "Enable perf linux profiler (experimental annotate support).") -DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__", +DEFINE_NEG_IMPLICATION(perf_jit_prof, compact_code_space) +DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__", "Specify the name of the file for fake gc mmap used in ll_prof") -DEFINE_bool(log_internal_timer_events, false, "Time internal events.") -DEFINE_bool(log_timer_events, false, +DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.") +DEFINE_BOOL(log_timer_events, false, "Time events including external callbacks.") -DEFINE_implication(log_timer_events, log_internal_timer_events) -DEFINE_implication(log_internal_timer_events, prof) -DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.") -DEFINE_string(log_instruction_file, "arm64_inst.csv", +DEFINE_IMPLICATION(log_timer_events, log_internal_timer_events) +DEFINE_IMPLICATION(log_internal_timer_events, prof) +DEFINE_BOOL(log_instruction_stats, false, "Log AArch64 instruction statistics.") +DEFINE_STRING(log_instruction_file, "arm64_inst.csv", "AArch64 instruction statistics log file.") -DEFINE_int(log_instruction_period, 1 << 22, +DEFINE_INT(log_instruction_period, 1 << 22, "AArch64 instruction statistics logging period.") -DEFINE_bool(redirect_code_traces, false, +DEFINE_BOOL(redirect_code_traces, false, "output deopt information and disassembly into file " "code-<pid>-<isolate id>.asm") -DEFINE_string(redirect_code_traces_to, NULL, - "output deopt information and disassembly into the given file") +DEFINE_STRING(redirect_code_traces_to, NULL, + "output deopt information and disassembly into the given file") -DEFINE_bool(hydrogen_track_positions, false, +DEFINE_BOOL(hydrogen_track_positions, false, "track source code positions when building IR") // @@ -842,51 +829,71 @@ #endif // elements.cc -DEFINE_bool(trace_elements_transitions, false, "trace elements transitions") +DEFINE_BOOL(trace_elements_transitions, false, "trace elements transitions") -DEFINE_bool(trace_creation_allocation_sites, false, +DEFINE_BOOL(trace_creation_allocation_sites, false, "trace the creation of allocation sites") // code-stubs.cc -DEFINE_bool(print_code_stubs, false, "print code stubs") -DEFINE_bool(test_secondary_stub_cache, false, +DEFINE_BOOL(print_code_stubs, false, "print code stubs") +DEFINE_BOOL(test_secondary_stub_cache, false, "test secondary stub cache by disabling the primary one") -DEFINE_bool(test_primary_stub_cache, false, +DEFINE_BOOL(test_primary_stub_cache, false, "test primary stub cache by disabling the secondary one") // codegen-ia32.cc / codegen-arm.cc -DEFINE_bool(print_code, false, "print generated code") -DEFINE_bool(print_opt_code, false, "print optimized code") -DEFINE_bool(print_unopt_code, false, "print unoptimized code before " +DEFINE_BOOL(print_code, false, "print generated code") +DEFINE_BOOL(print_opt_code, false, "print optimized code") +DEFINE_BOOL(print_unopt_code, false, + "print unoptimized code before " "printing optimized code based on it") -DEFINE_bool(print_code_verbose, false, "print more information for code") -DEFINE_bool(print_builtin_code, false, "print generated code for builtins") +DEFINE_BOOL(print_code_verbose, false, "print more information for code") +DEFINE_BOOL(print_builtin_code, false, "print generated code for builtins") #ifdef ENABLE_DISASSEMBLER -DEFINE_bool(sodium, false, "print generated code output suitable for use with " +DEFINE_BOOL(sodium, false, + "print generated code output suitable for use with " "the Sodium code viewer") -DEFINE_implication(sodium, print_code_stubs) -DEFINE_implication(sodium, print_code) -DEFINE_implication(sodium, print_opt_code) -DEFINE_implication(sodium, hydrogen_track_positions) -DEFINE_implication(sodium, code_comments) - -DEFINE_bool(print_all_code, false, "enable all flags related to printing code") -DEFINE_implication(print_all_code, print_code) -DEFINE_implication(print_all_code, print_opt_code) -DEFINE_implication(print_all_code, print_unopt_code) -DEFINE_implication(print_all_code, print_code_verbose) -DEFINE_implication(print_all_code, print_builtin_code) -DEFINE_implication(print_all_code, print_code_stubs) -DEFINE_implication(print_all_code, code_comments) +DEFINE_IMPLICATION(sodium, print_code_stubs) +DEFINE_IMPLICATION(sodium, print_code) +DEFINE_IMPLICATION(sodium, print_opt_code) +DEFINE_IMPLICATION(sodium, hydrogen_track_positions) +DEFINE_IMPLICATION(sodium, code_comments) + +DEFINE_BOOL(print_all_code, false, "enable all flags related to printing code") +DEFINE_IMPLICATION(print_all_code, print_code) +DEFINE_IMPLICATION(print_all_code, print_opt_code) +DEFINE_IMPLICATION(print_all_code, print_unopt_code) +DEFINE_IMPLICATION(print_all_code, print_code_verbose) +DEFINE_IMPLICATION(print_all_code, print_builtin_code) +DEFINE_IMPLICATION(print_all_code, print_code_stubs) +DEFINE_IMPLICATION(print_all_code, code_comments) #ifdef DEBUG -DEFINE_implication(print_all_code, trace_codegen) +DEFINE_IMPLICATION(print_all_code, trace_codegen) #endif #endif + +// +// VERIFY_PREDICTABLE related flags +// +#undef FLAG + +#ifdef VERIFY_PREDICTABLE +#define FLAG FLAG_FULL +#else +#define FLAG FLAG_READONLY +#endif + +DEFINE_BOOL(verify_predictable, false, + "this mode is used for checking that V8 behaves predictably") +DEFINE_INT(dump_allocations_digest_at_alloc, 0, + "dump allocations digest each n-th allocation") + + // // Read-only flags // @@ -894,7 +901,7 @@ #define FLAG FLAG_READONLY // assembler-arm.h -DEFINE_bool(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL, +DEFINE_BOOL(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL, "enable use of out-of-line constant pools (ARM only)") // Cleanup... @@ -903,19 +910,19 @@ #undef FLAG #undef FLAG_ALIAS -#undef DEFINE_bool -#undef DEFINE_maybe_bool -#undef DEFINE_int -#undef DEFINE_string -#undef DEFINE_float -#undef DEFINE_args -#undef DEFINE_implication -#undef DEFINE_neg_implication -#undef DEFINE_ALIAS_bool -#undef DEFINE_ALIAS_int -#undef DEFINE_ALIAS_string -#undef DEFINE_ALIAS_float -#undef DEFINE_ALIAS_args +#undef DEFINE_BOOL +#undef DEFINE_MAYBE_BOOL +#undef DEFINE_INT +#undef DEFINE_STRING +#undef DEFINE_FLOAT +#undef DEFINE_ARGS +#undef DEFINE_IMPLICATION +#undef DEFINE_NEG_IMPLICATION +#undef DEFINE_ALIAS_BOOL +#undef DEFINE_ALIAS_INT +#undef DEFINE_ALIAS_STRING +#undef DEFINE_ALIAS_FLOAT +#undef DEFINE_ALIAS_ARGS #undef FLAG_MODE_DECLARE #undef FLAG_MODE_DEFINE diff -Nru nodejs-0.11.13/deps/v8/src/flags.cc nodejs-0.11.15/deps/v8/src/flags.cc --- nodejs-0.11.13/deps/v8/src/flags.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/flags.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,53 +1,26 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <ctype.h> #include <stdlib.h> -#include "v8.h" +#include "src/v8.h" -#include "platform.h" -#include "smart-pointers.h" -#include "string-stream.h" - -#if V8_TARGET_ARCH_ARM -#include "arm/assembler-arm-inl.h" -#endif +#include "src/assembler.h" +#include "src/base/platform/platform.h" +#include "src/ostreams.h" namespace v8 { namespace internal { // Define all of our flags. #define FLAG_MODE_DEFINE -#include "flag-definitions.h" +#include "src/flag-definitions.h" // NOLINT // Define all of our flags default values. #define FLAG_MODE_DEFINE_DEFAULTS -#include "flag-definitions.h" +#include "src/flag-definitions.h" // NOLINT namespace { @@ -72,32 +45,32 @@ const char* comment() const { return cmt_; } bool* bool_variable() const { - ASSERT(type_ == TYPE_BOOL); + DCHECK(type_ == TYPE_BOOL); return reinterpret_cast<bool*>(valptr_); } MaybeBoolFlag* maybe_bool_variable() const { - ASSERT(type_ == TYPE_MAYBE_BOOL); + DCHECK(type_ == TYPE_MAYBE_BOOL); return reinterpret_cast<MaybeBoolFlag*>(valptr_); } int* int_variable() const { - ASSERT(type_ == TYPE_INT); + DCHECK(type_ == TYPE_INT); return reinterpret_cast<int*>(valptr_); } double* float_variable() const { - ASSERT(type_ == TYPE_FLOAT); + DCHECK(type_ == TYPE_FLOAT); return reinterpret_cast<double*>(valptr_); } const char* string_value() const { - ASSERT(type_ == TYPE_STRING); + DCHECK(type_ == TYPE_STRING); return *reinterpret_cast<const char**>(valptr_); } void set_string_value(const char* value, bool owns_ptr) { - ASSERT(type_ == TYPE_STRING); + DCHECK(type_ == TYPE_STRING); const char** ptr = reinterpret_cast<const char**>(valptr_); if (owns_ptr_ && *ptr != NULL) DeleteArray(*ptr); *ptr = value; @@ -105,32 +78,32 @@ } JSArguments* args_variable() const { - ASSERT(type_ == TYPE_ARGS); + DCHECK(type_ == TYPE_ARGS); return reinterpret_cast<JSArguments*>(valptr_); } bool bool_default() const { - ASSERT(type_ == TYPE_BOOL); + DCHECK(type_ == TYPE_BOOL); return *reinterpret_cast<const bool*>(defptr_); } int int_default() const { - ASSERT(type_ == TYPE_INT); + DCHECK(type_ == TYPE_INT); return *reinterpret_cast<const int*>(defptr_); } double float_default() const { - ASSERT(type_ == TYPE_FLOAT); + DCHECK(type_ == TYPE_FLOAT); return *reinterpret_cast<const double*>(defptr_); } const char* string_default() const { - ASSERT(type_ == TYPE_STRING); + DCHECK(type_ == TYPE_STRING); return *reinterpret_cast<const char* const *>(defptr_); } JSArguments args_default() const { - ASSERT(type_ == TYPE_ARGS); + DCHECK(type_ == TYPE_ARGS); return *reinterpret_cast<const JSArguments*>(defptr_); } @@ -186,7 +159,7 @@ Flag flags[] = { #define FLAG_MODE_META -#include "flag-definitions.h" +#include "src/flag-definitions.h" }; const size_t num_flags = sizeof(flags) / sizeof(*flags); @@ -208,41 +181,39 @@ } -static SmartArrayPointer<const char> ToString(Flag* flag) { - HeapStringAllocator string_allocator; - StringStream buffer(&string_allocator); - switch (flag->type()) { +OStream& operator<<(OStream& os, const Flag& flag) { // NOLINT + switch (flag.type()) { case Flag::TYPE_BOOL: - buffer.Add("%s", (*flag->bool_variable() ? "true" : "false")); + os << (*flag.bool_variable() ? "true" : "false"); break; case Flag::TYPE_MAYBE_BOOL: - buffer.Add("%s", flag->maybe_bool_variable()->has_value - ? (flag->maybe_bool_variable()->value ? "true" : "false") - : "unset"); + os << (flag.maybe_bool_variable()->has_value + ? (flag.maybe_bool_variable()->value ? "true" : "false") + : "unset"); break; case Flag::TYPE_INT: - buffer.Add("%d", *flag->int_variable()); + os << *flag.int_variable(); break; case Flag::TYPE_FLOAT: - buffer.Add("%f", FmtElm(*flag->float_variable())); + os << *flag.float_variable(); break; case Flag::TYPE_STRING: { - const char* str = flag->string_value(); - buffer.Add("%s", str ? str : "NULL"); + const char* str = flag.string_value(); + os << (str ? str : "NULL"); break; } case Flag::TYPE_ARGS: { - JSArguments args = *flag->args_variable(); + JSArguments args = *flag.args_variable(); if (args.argc > 0) { - buffer.Add("%s", args[0]); + os << args[0]; for (int i = 1; i < args.argc; i++) { - buffer.Add(" %s", args[i]); + os << args[i]; } } break; } } - return buffer.ToCString(); + return os; } @@ -254,28 +225,27 @@ Flag* f = &flags[i]; if (!f->IsDefault()) { if (f->type() == Flag::TYPE_ARGS) { - ASSERT(args_flag == NULL); + DCHECK(args_flag == NULL); args_flag = f; // Must be last in arguments. continue; } - HeapStringAllocator string_allocator; - StringStream buffer(&string_allocator); - if (f->type() != Flag::TYPE_BOOL || *(f->bool_variable())) { - buffer.Add("--%s", f->name()); - } else { - buffer.Add("--no%s", f->name()); + { + bool disabled = f->type() == Flag::TYPE_BOOL && !*f->bool_variable(); + OStringStream os; + os << (disabled ? "--no" : "--") << f->name(); + args->Add(StrDup(os.c_str())); } - args->Add(buffer.ToCString().Detach()); if (f->type() != Flag::TYPE_BOOL) { - args->Add(ToString(f).Detach()); + OStringStream os; + os << *f; + args->Add(StrDup(os.c_str())); } } } if (args_flag != NULL) { - HeapStringAllocator string_allocator; - StringStream buffer(&string_allocator); - buffer.Add("--%s", args_flag->name()); - args->Add(buffer.ToCString().Detach()); + OStringStream os; + os << "--" << args_flag->name(); + args->Add(StrDup(os.c_str())); JSArguments jsargs = *args_flag->args_variable(); for (int j = 0; j < jsargs.argc; j++) { args->Add(StrDup(jsargs[j])); @@ -331,7 +301,7 @@ // make a copy so we can NUL-terminate flag name size_t n = arg - *name; CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small - OS::MemCopy(buffer, *name, n); + MemCopy(buffer, *name, n); buffer[n] = '\0'; *name = buffer; // get the value @@ -402,7 +372,8 @@ value == NULL) { if (i < *argc) { value = argv[i++]; - } else { + } + if (!value) { PrintF(stderr, "Error: missing value for flag %s of type %s\n" "Try --help for options\n", arg, Type2String(flag->type())); @@ -501,7 +472,7 @@ int FlagList::SetFlagsFromString(const char* str, int len) { // make a 0-terminated copy of str ScopedVector<char> copy0(len + 1); - OS::MemCopy(copy0.start(), str, len); + MemCopy(copy0.start(), str, len); copy0[len] = '\0'; // strip leading white space @@ -543,30 +514,29 @@ // static void FlagList::PrintHelp() { -#if V8_TARGET_ARCH_ARM + CpuFeatures::Probe(false); CpuFeatures::PrintTarget(); - CpuFeatures::Probe(); CpuFeatures::PrintFeatures(); -#endif // V8_TARGET_ARCH_ARM - printf("Usage:\n"); - printf(" shell [options] -e string\n"); - printf(" execute string in V8\n"); - printf(" shell [options] file1 file2 ... filek\n"); - printf(" run JavaScript scripts in file1, file2, ..., filek\n"); - printf(" shell [options]\n"); - printf(" shell [options] --shell [file1 file2 ... filek]\n"); - printf(" run an interactive JavaScript shell\n"); - printf(" d8 [options] file1 file2 ... filek\n"); - printf(" d8 [options]\n"); - printf(" d8 [options] --shell [file1 file2 ... filek]\n"); - printf(" run the new debugging shell\n\n"); - printf("Options:\n"); + OFStream os(stdout); + os << "Usage:\n" + << " shell [options] -e string\n" + << " execute string in V8\n" + << " shell [options] file1 file2 ... filek\n" + << " run JavaScript scripts in file1, file2, ..., filek\n" + << " shell [options]\n" + << " shell [options] --shell [file1 file2 ... filek]\n" + << " run an interactive JavaScript shell\n" + << " d8 [options] file1 file2 ... filek\n" + << " d8 [options]\n" + << " d8 [options] --shell [file1 file2 ... filek]\n" + << " run the new debugging shell\n\n" + << "Options:\n"; for (size_t i = 0; i < num_flags; ++i) { Flag* f = &flags[i]; - SmartArrayPointer<const char> value = ToString(f); - printf(" --%s (%s)\n type: %s default: %s\n", - f->name(), f->comment(), Type2String(f->type()), value.get()); + os << " --" << f->name() << " (" << f->comment() << ")\n" + << " type: " << Type2String(f->type()) << " default: " << *f + << "\n"; } } @@ -574,7 +544,7 @@ // static void FlagList::EnforceFlagImplications() { #define FLAG_MODE_DEFINE_IMPLICATIONS -#include "flag-definitions.h" +#include "src/flag-definitions.h" #undef FLAG_MODE_DEFINE_IMPLICATIONS } diff -Nru nodejs-0.11.13/deps/v8/src/flags.h nodejs-0.11.15/deps/v8/src/flags.h --- nodejs-0.11.13/deps/v8/src/flags.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/flags.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_FLAGS_H_ #define V8_FLAGS_H_ -#include "atomicops.h" +#include "src/globals.h" namespace v8 { namespace internal { // Declare all of our flags. #define FLAG_MODE_DECLARE -#include "flag-definitions.h" +#include "src/flag-definitions.h" // NOLINT // The global list of all flags. class FlagList { @@ -63,7 +40,9 @@ // --flag=value (non-bool flags only, no spaces around '=') // --flag value (non-bool flags only) // -- (equivalent to --js_arguments, captures all remaining args) - static int SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags); + static int SetFlagsFromCommandLine(int* argc, + char** argv, + bool remove_flags); // Set the flag values by parsing the string str. Splits string into argc // substrings argv[], each of which consisting of non-white-space chars, diff -Nru nodejs-0.11.13/deps/v8/src/frames.cc nodejs-0.11.15/deps/v8/src/frames.cc --- nodejs-0.11.13/deps/v8/src/frames.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/frames.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "ast.h" -#include "deoptimizer.h" -#include "frames-inl.h" -#include "full-codegen.h" -#include "lazy-instance.h" -#include "mark-compact.h" -#include "safepoint-table.h" -#include "scopeinfo.h" -#include "string-stream.h" -#include "vm-state-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/ast.h" +#include "src/deoptimizer.h" +#include "src/frames-inl.h" +#include "src/full-codegen.h" +#include "src/heap/mark-compact.h" +#include "src/safepoint-table.h" +#include "src/scopeinfo.h" +#include "src/string-stream.h" +#include "src/vm-state-inl.h" namespace v8 { namespace internal { @@ -53,7 +29,7 @@ StackHandlerIterator(const StackFrame* frame, StackHandler* handler) : limit_(frame->fp()), handler_(handler) { // Make sure the handler has already been unwound to this frame. - ASSERT(frame->sp() <= handler->address()); + DCHECK(frame->sp() <= handler->address()); } StackHandler* handler() const { return handler_; } @@ -62,7 +38,7 @@ return handler_ == NULL || handler_->address() > limit_; } void Advance() { - ASSERT(!done()); + DCHECK(!done()); handler_ = handler_->next(); } @@ -99,7 +75,7 @@ void StackFrameIterator::Advance() { - ASSERT(!done()); + DCHECK(!done()); // Compute the state of the calling frame before restoring // callee-saved registers and unwinding handlers. This allows the // frame code that computes the caller state to access the top @@ -117,7 +93,7 @@ // When we're done iterating over the stack frames, the handler // chain must have been completely unwound. - ASSERT(!done() || handler_ == NULL); + DCHECK(!done() || handler_ == NULL); } @@ -135,7 +111,7 @@ StackFrame::State* state) { if (type == StackFrame::NONE) return NULL; StackFrame* result = SingletonFor(type); - ASSERT(result != NULL); + DCHECK(result != NULL); result->state_ = *state; return result; } @@ -180,7 +156,7 @@ void JavaScriptFrameIterator::AdvanceToArgumentsFrame() { if (!frame()->has_adapted_arguments()) return; iterator_.Advance(); - ASSERT(iterator_.frame()->is_arguments_adaptor()); + DCHECK(iterator_.frame()->is_arguments_adaptor()); } @@ -229,7 +205,7 @@ type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state); top_frame_type_ = type; } else if (IsValidStackAddress(fp)) { - ASSERT(fp != NULL); + DCHECK(fp != NULL); state.fp = fp; state.sp = sp; state.pc_address = StackFrame::ResolveReturnAddressLocation( @@ -282,7 +258,7 @@ void SafeStackFrameIterator::AdvanceOneFrame() { - ASSERT(!done()); + DCHECK(!done()); StackFrame* last_frame = frame_; Address last_sp = last_frame->sp(), last_fp = last_frame->fp(); // Before advancing to the next stack frame, perform pointer validity tests. @@ -365,7 +341,7 @@ frame_->state_.pc_address = callback_address; } external_callback_scope_ = external_callback_scope_->previous(); - ASSERT(external_callback_scope_ == NULL || + DCHECK(external_callback_scope_ == NULL || external_callback_scope_->scope_address() > frame_->fp()); return; } @@ -385,9 +361,9 @@ isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer); if (!entry->safepoint_entry.is_valid()) { entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer); - ASSERT(entry->safepoint_entry.is_valid()); + DCHECK(entry->safepoint_entry.is_valid()); } else { - ASSERT(entry->safepoint_entry.Equals( + DCHECK(entry->safepoint_entry.Equals( entry->code->GetSafepointEntry(inner_pointer))); } @@ -414,7 +390,7 @@ Address* pc_address, Code* holder) { Address pc = *pc_address; - ASSERT(GcSafeCodeContains(holder, pc)); + DCHECK(GcSafeCodeContains(holder, pc)); unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start()); Object* code = holder; v->VisitPointer(&code); @@ -428,14 +404,14 @@ void StackFrame::SetReturnAddressLocationResolver( ReturnAddressLocationResolver resolver) { - ASSERT(return_address_location_resolver_ == NULL); + DCHECK(return_address_location_resolver_ == NULL); return_address_location_resolver_ = resolver; } StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator, State* state) { - ASSERT(state->fp != NULL); + DCHECK(state->fp != NULL); if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) { return ARGUMENTS_ADAPTOR; } @@ -452,7 +428,7 @@ if (!iterator->can_access_heap_objects_) return JAVA_SCRIPT; Code::Kind kind = GetContainingCode(iterator->isolate(), *(state->pc_address))->kind(); - ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION); + DCHECK(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION); return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT; } return static_cast<StackFrame::Type>(Smi::cast(marker)->value()); @@ -473,7 +449,7 @@ Address StackFrame::UnpaddedFP() const { -#if V8_TARGET_ARCH_IA32 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 if (!is_optimized()) return fp(); int32_t alignment_state = Memory::int32_at( fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset); @@ -563,7 +539,7 @@ if (fp == 0) return NONE; Address sp = ComputeStackPointer(fp); FillState(fp, sp, state); - ASSERT(*state->pc_address != NULL); + DCHECK(*state->pc_address != NULL); return EXIT; } @@ -605,7 +581,7 @@ StandardFrameConstants::kExpressionsOffset + kPointerSize; Address base = fp() + offset; Address limit = sp(); - ASSERT(base >= limit); // stack grows downwards + DCHECK(base >= limit); // stack grows downwards // Include register-allocated locals in number of expressions. return static_cast<int>((base - limit) / kPointerSize); } @@ -639,7 +615,7 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const { // Make sure that we're not doing "safe" stack frame iteration. We cannot // possibly find pointers in optimized frames in that state. - ASSERT(can_access_heap_objects()); + DCHECK(can_access_heap_objects()); // Compute the safepoint information. unsigned stack_slots = 0; @@ -663,7 +639,7 @@ // Skip saved double registers. if (safepoint_entry.has_doubles()) { // Number of doubles not known at snapshot time. - ASSERT(!Serializer::enabled()); + DCHECK(!isolate()->serializer_enabled()); parameters_base += DoubleRegister::NumAllocatableRegisters() * kDoubleSize / kPointerSize; } @@ -732,7 +708,7 @@ #ifdef DEBUG // Make sure that optimized frames do not contain any stack handlers. StackHandlerIterator it(this, top_handler()); - ASSERT(it.done()); + DCHECK(it.done()); #endif IterateCompiledFrame(v); @@ -770,7 +746,7 @@ int JavaScriptFrame::GetNumberOfIncomingArguments() const { - ASSERT(can_access_heap_objects() && + DCHECK(can_access_heap_objects() && isolate()->heap()->gc_state() == Heap::NOT_IN_GC); return function()->shared()->formal_parameter_count(); @@ -783,13 +759,13 @@ void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) { - ASSERT(functions->length() == 0); + DCHECK(functions->length() == 0); functions->Add(function()); } void JavaScriptFrame::Summarize(List<FrameSummary>* functions) { - ASSERT(functions->length() == 0); + DCHECK(functions->length() == 0); Code* code_pointer = LookupCode(); int offset = static_cast<int>(pc() - code_pointer->address()); FrameSummary summary(receiver(), @@ -801,49 +777,47 @@ } -void JavaScriptFrame::PrintTop(Isolate* isolate, - FILE* file, - bool print_args, +void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function, Code* code, + Address pc, FILE* file, + bool print_line_number) { + PrintF(file, "%s", function->IsOptimized() ? "*" : "~"); + function->PrintName(file); + int code_offset = static_cast<int>(pc - code->instruction_start()); + PrintF(file, "+%d", code_offset); + if (print_line_number) { + SharedFunctionInfo* shared = function->shared(); + int source_pos = code->SourcePosition(pc); + Object* maybe_script = shared->script(); + if (maybe_script->IsScript()) { + Script* script = Script::cast(maybe_script); + int line = script->GetLineNumber(source_pos) + 1; + Object* script_name_raw = script->name(); + if (script_name_raw->IsString()) { + String* script_name = String::cast(script->name()); + SmartArrayPointer<char> c_script_name = + script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + PrintF(file, " at %s:%d", c_script_name.get(), line); + } else { + PrintF(file, " at <unknown>:%d", line); + } + } else { + PrintF(file, " at <unknown>:<unknown>"); + } + } +} + + +void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args, bool print_line_number) { // constructor calls - HandleScope scope(isolate); DisallowHeapAllocation no_allocation; JavaScriptFrameIterator it(isolate); while (!it.done()) { if (it.frame()->is_java_script()) { JavaScriptFrame* frame = it.frame(); if (frame->IsConstructor()) PrintF(file, "new "); - // function name - JSFunction* fun = frame->function(); - fun->PrintName(); - Code* js_code = frame->unchecked_code(); - Address pc = frame->pc(); - int code_offset = - static_cast<int>(pc - js_code->instruction_start()); - PrintF("+%d", code_offset); - SharedFunctionInfo* shared = fun->shared(); - if (print_line_number) { - Code* code = Code::cast(isolate->FindCodeObject(pc)); - int source_pos = code->SourcePosition(pc); - Object* maybe_script = shared->script(); - if (maybe_script->IsScript()) { - Handle<Script> script(Script::cast(maybe_script)); - int line = GetScriptLineNumberSafe(script, source_pos) + 1; - Object* script_name_raw = script->name(); - if (script_name_raw->IsString()) { - String* script_name = String::cast(script->name()); - SmartArrayPointer<char> c_script_name = - script_name->ToCString(DISALLOW_NULLS, - ROBUST_STRING_TRAVERSAL); - PrintF(file, " at %s:%d", c_script_name.get(), line); - } else { - PrintF(file, " at <unknown>:%d", line); - } - } else { - PrintF(file, " at <unknown>:<unknown>"); - } - } - + PrintFunctionAndOffset(frame->function(), frame->unchecked_code(), + frame->pc(), file, print_line_number); if (print_args) { // function arguments // (we are intentionally only printing the actually @@ -867,7 +841,7 @@ void JavaScriptFrame::SaveOperandStack(FixedArray* store, int* stack_handler_index) const { int operands_count = store->length(); - ASSERT_LE(operands_count, ComputeOperandsCount()); + DCHECK_LE(operands_count, ComputeOperandsCount()); // Visit the stack in LIFO order, saving operands and stack handlers into the // array. The saved stack handlers store a link to the next stack handler, @@ -881,8 +855,8 @@ for (; GetOperandSlot(i) < handler->address(); i--) { store->set(i, GetOperand(i)); } - ASSERT_GE(i + 1, StackHandlerConstants::kSlotCount); - ASSERT_EQ(handler->address(), GetOperandSlot(i)); + DCHECK_GE(i + 1, StackHandlerConstants::kSlotCount); + DCHECK_EQ(handler->address(), GetOperandSlot(i)); int next_stack_handler_index = i + 1 - StackHandlerConstants::kSlotCount; handler->Unwind(isolate(), store, next_stack_handler_index, *stack_handler_index); @@ -900,17 +874,17 @@ void JavaScriptFrame::RestoreOperandStack(FixedArray* store, int stack_handler_index) { int operands_count = store->length(); - ASSERT_LE(operands_count, ComputeOperandsCount()); + DCHECK_LE(operands_count, ComputeOperandsCount()); int i = 0; while (i <= stack_handler_index) { if (i < stack_handler_index) { // An operand. - ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value()); + DCHECK_EQ(GetOperand(i), isolate()->heap()->the_hole_value()); Memory::Object_at(GetOperandSlot(i)) = store->get(i); i++; } else { // A stack handler. - ASSERT_EQ(i, stack_handler_index); + DCHECK_EQ(i, stack_handler_index); // The FixedArray store grows up. The stack grows down. So the operand // slot for i actually points to the bottom of the top word in the // handler. The base of the StackHandler* is the address of the bottom @@ -924,7 +898,7 @@ } for (; i < operands_count; i++) { - ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value()); + DCHECK_EQ(GetOperand(i), isolate()->heap()->the_hole_value()); Memory::Object_at(GetOperandSlot(i)) = store->get(i); } } @@ -954,8 +928,14 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) { - ASSERT(frames->length() == 0); - ASSERT(is_optimized()); + DCHECK(frames->length() == 0); + DCHECK(is_optimized()); + + // Delegate to JS frame in absence of inlining. + // TODO(turbofan): Revisit once we support inlining. + if (LookupCode()->is_turbofanned()) { + return JavaScriptFrame::Summarize(frames); + } int deopt_index = Safepoint::kNoDeoptimizationIndex; DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index); @@ -966,15 +946,12 @@ // throw. An entry with no deoptimization index indicates a call-site // without a lazy-deopt. As a consequence we are not allowed to inline // functions containing throw. - if (deopt_index == Safepoint::kNoDeoptimizationIndex) { - JavaScriptFrame::Summarize(frames); - return; - } + DCHECK(deopt_index != Safepoint::kNoDeoptimizationIndex); TranslationIterator it(data->TranslationByteArray(), data->TranslationIndex(deopt_index)->value()); Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next()); - ASSERT(opcode == Translation::BEGIN); + DCHECK(opcode == Translation::BEGIN); it.Next(); // Drop frame count. int jsframe_count = it.Next(); @@ -991,13 +968,10 @@ it.Next(); // Skip height. // The translation commands are ordered and the receiver is always - // at the first position. Since we are always at a call when we need - // to construct a stack trace, the receiver is always in a stack slot. + // at the first position. + // If we are at a call, the receiver is always in a stack slot. + // Otherwise we are not guaranteed to get the receiver value. opcode = static_cast<Translation::Opcode>(it.Next()); - ASSERT(opcode == Translation::STACK_SLOT || - opcode == Translation::LITERAL || - opcode == Translation::CAPTURED_OBJECT || - opcode == Translation::DUPLICATED_OBJECT); int index = it.Next(); // Get the correct receiver in the optimized frame. @@ -1021,6 +995,7 @@ : this->GetParameter(parameter_index); } } else { + // The receiver is not in a stack slot nor in a literal. We give up. // TODO(3029): Materializing a captured object (or duplicated // object) is hard, we return undefined for now. This breaks the // produced stack trace, as constructor frames aren't marked as @@ -1036,7 +1011,7 @@ function->shared()); unsigned pc_offset = FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize; - ASSERT(pc_offset > 0); + DCHECK(pc_offset > 0); FrameSummary summary(receiver, function, code, pc_offset, is_constructor); frames->Add(summary); @@ -1044,20 +1019,20 @@ } else if (opcode == Translation::CONSTRUCT_STUB_FRAME) { // The next encountered JS_FRAME will be marked as a constructor call. it.Skip(Translation::NumberOfOperandsFor(opcode)); - ASSERT(!is_constructor); + DCHECK(!is_constructor); is_constructor = true; } else { // Skip over operands to advance to the next opcode. it.Skip(Translation::NumberOfOperandsFor(opcode)); } } - ASSERT(!is_constructor); + DCHECK(!is_constructor); } DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData( int* deopt_index) { - ASSERT(is_optimized()); + DCHECK(is_optimized()); JSFunction* opt_function = function(); Code* code = opt_function->code(); @@ -1069,19 +1044,25 @@ code = isolate()->inner_pointer_to_code_cache()-> GcSafeFindCodeForInnerPointer(pc()); } - ASSERT(code != NULL); - ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); + DCHECK(code != NULL); + DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION); SafepointEntry safepoint_entry = code->GetSafepointEntry(pc()); *deopt_index = safepoint_entry.deoptimization_index(); - ASSERT(*deopt_index != Safepoint::kNoDeoptimizationIndex); + DCHECK(*deopt_index != Safepoint::kNoDeoptimizationIndex); return DeoptimizationInputData::cast(code->deoptimization_data()); } int OptimizedFrame::GetInlineCount() { - ASSERT(is_optimized()); + DCHECK(is_optimized()); + + // Delegate to JS frame in absence of inlining. + // TODO(turbofan): Revisit once we support inlining. + if (LookupCode()->is_turbofanned()) { + return JavaScriptFrame::GetInlineCount(); + } int deopt_index = Safepoint::kNoDeoptimizationIndex; DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index); @@ -1089,7 +1070,7 @@ TranslationIterator it(data->TranslationByteArray(), data->TranslationIndex(deopt_index)->value()); Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next()); - ASSERT(opcode == Translation::BEGIN); + DCHECK(opcode == Translation::BEGIN); USE(opcode); it.Next(); // Drop frame count. int jsframe_count = it.Next(); @@ -1098,8 +1079,14 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) { - ASSERT(functions->length() == 0); - ASSERT(is_optimized()); + DCHECK(functions->length() == 0); + DCHECK(is_optimized()); + + // Delegate to JS frame in absence of inlining. + // TODO(turbofan): Revisit once we support inlining. + if (LookupCode()->is_turbofanned()) { + return JavaScriptFrame::GetFunctions(functions); + } int deopt_index = Safepoint::kNoDeoptimizationIndex; DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index); @@ -1108,7 +1095,7 @@ TranslationIterator it(data->TranslationByteArray(), data->TranslationIndex(deopt_index)->value()); Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next()); - ASSERT(opcode == Translation::BEGIN); + DCHECK(opcode == Translation::BEGIN); it.Next(); // Drop frame count. int jsframe_count = it.Next(); @@ -1156,7 +1143,7 @@ Code* InternalFrame::unchecked_code() const { const int offset = InternalFrameConstants::kCodeOffset; Object* code = Memory::Object_at(fp() + offset); - ASSERT(code != NULL); + DCHECK(code != NULL); return reinterpret_cast<Code*>(code); } @@ -1171,7 +1158,7 @@ void JavaScriptFrame::Print(StringStream* accumulator, PrintMode mode, int index) const { - HandleScope scope(isolate()); + DisallowHeapAllocation no_gc; Object* receiver = this->receiver(); JSFunction* function = this->function(); @@ -1185,13 +1172,11 @@ // doesn't contain scope info, scope_info will return 0 for the number of // parameters, stack local variables, context local variables, stack slots, // or context slots. - Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate())); - - Handle<SharedFunctionInfo> shared(function->shared()); - scope_info = Handle<ScopeInfo>(shared->scope_info()); + SharedFunctionInfo* shared = function->shared(); + ScopeInfo* scope_info = shared->scope_info(); Object* script_obj = shared->script(); if (script_obj->IsScript()) { - Handle<Script> script(Script::cast(script_obj)); + Script* script = Script::cast(script_obj); accumulator->Add(" ["); accumulator->PrintName(script->name()); @@ -1199,11 +1184,11 @@ if (code != NULL && code->kind() == Code::FUNCTION && pc >= code->instruction_start() && pc < code->instruction_end()) { int source_pos = code->SourcePosition(pc); - int line = GetScriptLineNumberSafe(script, source_pos) + 1; + int line = script->GetLineNumber(source_pos) + 1; accumulator->Add(":%d", line); } else { int function_start_pos = shared->start_position(); - int line = GetScriptLineNumberSafe(script, function_start_pos) + 1; + int line = script->GetLineNumber(function_start_pos) + 1; accumulator->Add(":~%d", line); } @@ -1263,6 +1248,10 @@ if (this->context() != NULL && this->context()->IsContext()) { context = Context::cast(this->context()); } + while (context->IsWithContext()) { + context = context->previous(); + DCHECK(context != NULL); + } // Print heap-allocated local variables. if (heap_locals_count > 0) { @@ -1273,8 +1262,9 @@ accumulator->PrintName(scope_info->ContextLocalName(i)); accumulator->Add(" = "); if (context != NULL) { - if (i < context->length()) { - accumulator->Add("%o", context->get(Context::MIN_CONTEXT_SLOTS + i)); + int index = Context::MIN_CONTEXT_SLOTS + i; + if (index < context->length()) { + accumulator->Add("%o", context->get(index)); } else { accumulator->Add( "// warning: missing context slot - inconsistent frame?"); @@ -1297,10 +1287,12 @@ // Print details about the function. if (FLAG_max_stack_trace_source_length != 0 && code != NULL) { + OStringStream os; SharedFunctionInfo* shared = function->shared(); - accumulator->Add("--------- s o u r c e c o d e ---------\n"); - shared->SourceCodePrint(accumulator, FLAG_max_stack_trace_source_length); - accumulator->Add("\n-----------------------------------------\n"); + os << "--------- s o u r c e c o d e ---------\n" + << SourceCodeOf(shared, FLAG_max_stack_trace_source_length) + << "\n-----------------------------------------\n"; + accumulator->Add(os.c_str()); } accumulator->Add("}\n\n"); @@ -1339,15 +1331,15 @@ void EntryFrame::Iterate(ObjectVisitor* v) const { StackHandlerIterator it(this, top_handler()); - ASSERT(!it.done()); + DCHECK(!it.done()); StackHandler* handler = it.handler(); - ASSERT(handler->is_js_entry()); + DCHECK(handler->is_js_entry()); handler->Iterate(v, LookupCode()); #ifdef DEBUG // Make sure that the entry frame does not contain more than one // stack handler. it.Advance(); - ASSERT(it.done()); + DCHECK(it.done()); #endif IteratePc(v, pc_address(), LookupCode()); } @@ -1406,14 +1398,14 @@ Code* StubFailureTrampolineFrame::unchecked_code() const { Code* trampoline; - StubFailureTrampolineStub(NOT_JS_FUNCTION_STUB_MODE). - FindCodeInCache(&trampoline, isolate()); + StubFailureTrampolineStub(isolate(), NOT_JS_FUNCTION_STUB_MODE). + FindCodeInCache(&trampoline); if (trampoline->contains(pc())) { return trampoline; } - StubFailureTrampolineStub(JS_FUNCTION_STUB_MODE). - FindCodeInCache(&trampoline, isolate()); + StubFailureTrampolineStub(isolate(), JS_FUNCTION_STUB_MODE). + FindCodeInCache(&trampoline); if (trampoline->contains(pc())) { return trampoline; } @@ -1427,7 +1419,7 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) { - ASSERT(n >= 0); + DCHECK(n >= 0); for (int i = 0; i <= n; i++) { while (!iterator_.frame()->is_java_script()) iterator_.Advance(); if (i == n) return JavaScriptFrame::cast(iterator_.frame()); @@ -1456,7 +1448,7 @@ #ifdef DEBUG static bool GcSafeCodeContains(HeapObject* code, Address addr) { Map* map = GcSafeMapOfCodeSpaceObject(code); - ASSERT(map == code->GetHeap()->code_map()); + DCHECK(map == code->GetHeap()->code_map()); Address start = code->address(); Address end = code->address() + code->SizeFromMap(map); return start <= addr && addr < end; @@ -1467,7 +1459,7 @@ Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object, Address inner_pointer) { Code* code = reinterpret_cast<Code*>(object); - ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer)); + DCHECK(code != NULL && GcSafeCodeContains(code, inner_pointer)); return code; } @@ -1508,7 +1500,7 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) { isolate_->counters()->pc_to_code()->Increment(); - ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize)); + DCHECK(IsPowerOf2(kInnerPointerToCodeCacheSize)); uint32_t hash = ComputeIntegerHash( static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)), v8::internal::kZeroHashSeed); @@ -1516,7 +1508,7 @@ InnerPointerToCodeCacheEntry* entry = cache(index); if (entry->inner_pointer == inner_pointer) { isolate_->counters()->pc_to_code_cached()->Increment(); - ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer)); + DCHECK(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer)); } else { // Because this code may be interrupted by a profiling signal that // also queries the cache, we cannot update inner_pointer before the code @@ -1538,8 +1530,8 @@ int offset, int previous_handler_offset) const { STATIC_ASSERT(StackHandlerConstants::kSlotCount >= 5); - ASSERT_LE(0, offset); - ASSERT_GE(array->length(), offset + StackHandlerConstants::kSlotCount); + DCHECK_LE(0, offset); + DCHECK_GE(array->length(), offset + StackHandlerConstants::kSlotCount); // Unwinding a stack handler into an array chains it in the opposite // direction, re-using the "next" slot as a "previous" link, so that stack // handlers can be later re-wound in the correct order. Decode the "state" @@ -1559,8 +1551,8 @@ int offset, Address fp) { STATIC_ASSERT(StackHandlerConstants::kSlotCount >= 5); - ASSERT_LE(0, offset); - ASSERT_GE(array->length(), offset + StackHandlerConstants::kSlotCount); + DCHECK_LE(0, offset); + DCHECK_GE(array->length(), offset + StackHandlerConstants::kSlotCount); Smi* prev_handler_offset = Smi::cast(array->get(offset)); Code* code = Code::cast(array->get(offset + 1)); Smi* smi_index = Smi::cast(array->get(offset + 2)); @@ -1603,12 +1595,12 @@ if ((kJSCallerSaved & (1 << r)) != 0) caller_saved_code_data.reg_code[i++] = r; - ASSERT(i == kNumJSCallerSaved); + DCHECK(i == kNumJSCallerSaved); } int JSCallerSavedCode(int n) { - ASSERT(0 <= n && n < kNumJSCallerSaved); + DCHECK(0 <= n && n < kNumJSCallerSaved); return caller_saved_code_data.reg_code[n]; } diff -Nru nodejs-0.11.13/deps/v8/src/frames.h nodejs-0.11.15/deps/v8/src/frames.h --- nodejs-0.11.13/deps/v8/src/frames.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/frames.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_FRAMES_H_ #define V8_FRAMES_H_ -#include "allocation.h" -#include "handles.h" -#include "safepoint-table.h" +#include "src/allocation.h" +#include "src/handles.h" +#include "src/safepoint-table.h" namespace v8 { namespace internal { @@ -394,7 +371,7 @@ virtual void Iterate(ObjectVisitor* v) const; static EntryFrame* cast(StackFrame* frame) { - ASSERT(frame->is_entry()); + DCHECK(frame->is_entry()); return static_cast<EntryFrame*>(frame); } virtual void SetCallerFp(Address caller_fp); @@ -422,7 +399,7 @@ virtual Code* unchecked_code() const; static EntryConstructFrame* cast(StackFrame* frame) { - ASSERT(frame->is_entry_construct()); + DCHECK(frame->is_entry_construct()); return static_cast<EntryConstructFrame*>(frame); } @@ -450,7 +427,7 @@ virtual void SetCallerFp(Address caller_fp); static ExitFrame* cast(StackFrame* frame) { - ASSERT(frame->is_exit()); + DCHECK(frame->is_exit()); return static_cast<ExitFrame*>(frame); } @@ -490,7 +467,7 @@ virtual void SetCallerFp(Address caller_fp); static StandardFrame* cast(StackFrame* frame) { - ASSERT(frame->is_standard()); + DCHECK(frame->is_standard()); return static_cast<StandardFrame*>(frame); } @@ -633,13 +610,15 @@ static Register constant_pool_pointer_register(); static JavaScriptFrame* cast(StackFrame* frame) { - ASSERT(frame->is_java_script()); + DCHECK(frame->is_java_script()); return static_cast<JavaScriptFrame*>(frame); } - static void PrintTop(Isolate* isolate, - FILE* file, - bool print_args, + static void PrintFunctionAndOffset(JSFunction* function, Code* code, + Address pc, FILE* file, + bool print_line_number); + + static void PrintTop(Isolate* isolate, FILE* file, bool print_args, bool print_line_number); protected: @@ -720,7 +699,7 @@ virtual Code* unchecked_code() const; static ArgumentsAdaptorFrame* cast(StackFrame* frame) { - ASSERT(frame->is_arguments_adaptor()); + DCHECK(frame->is_arguments_adaptor()); return static_cast<ArgumentsAdaptorFrame*>(frame); } @@ -752,7 +731,7 @@ virtual Code* unchecked_code() const; static InternalFrame* cast(StackFrame* frame) { - ASSERT(frame->is_internal()); + DCHECK(frame->is_internal()); return static_cast<InternalFrame*>(frame); } @@ -807,7 +786,7 @@ virtual Type type() const { return CONSTRUCT; } static ConstructFrame* cast(StackFrame* frame) { - ASSERT(frame->is_construct()); + DCHECK(frame->is_construct()); return static_cast<ConstructFrame*>(frame); } @@ -838,7 +817,7 @@ const bool can_access_heap_objects_; StackHandler* handler() const { - ASSERT(!done()); + DCHECK(!done()); return handler_; } @@ -861,7 +840,7 @@ StackFrameIterator(Isolate* isolate, ThreadLocalTop* t); StackFrame* frame() const { - ASSERT(!done()); + DCHECK(!done()); return frame_; } void Advance(); @@ -953,13 +932,6 @@ }; -// Used specify the type of prologue to generate. -enum PrologueFrameMode { - BUILD_FUNCTION_FRAME, - BUILD_STUB_FRAME -}; - - // Reads all frames on the current stack and copies them into the current // zone memory. Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone); diff -Nru nodejs-0.11.13/deps/v8/src/frames-inl.h nodejs-0.11.15/deps/v8/src/frames-inl.h --- nodejs-0.11.13/deps/v8/src/frames-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/frames-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,47 +1,28 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_FRAMES_INL_H_ #define V8_FRAMES_INL_H_ -#include "frames.h" -#include "isolate.h" -#include "v8memory.h" +#include "src/frames.h" +#include "src/isolate.h" +#include "src/v8memory.h" #if V8_TARGET_ARCH_IA32 -#include "ia32/frames-ia32.h" +#include "src/ia32/frames-ia32.h" // NOLINT #elif V8_TARGET_ARCH_X64 -#include "x64/frames-x64.h" +#include "src/x64/frames-x64.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 -#include "arm64/frames-arm64.h" +#include "src/arm64/frames-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM -#include "arm/frames-arm.h" +#include "src/arm/frames-arm.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/frames-mips.h" +#include "src/mips/frames-mips.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/frames-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/frames-x87.h" // NOLINT #else #error Unsupported target architecture. #endif @@ -227,7 +208,7 @@ Address JavaScriptFrame::GetParameterSlot(int index) const { int param_count = ComputeParametersCount(); - ASSERT(-1 <= index && index < param_count); + DCHECK(-1 <= index && index < param_count); int parameter_offset = (param_count - index - 1) * kPointerSize; return caller_sp() + parameter_offset; } @@ -240,10 +221,10 @@ inline Address JavaScriptFrame::GetOperandSlot(int index) const { Address base = fp() + JavaScriptFrameConstants::kLocal0Offset; - ASSERT(IsAddressAligned(base, kPointerSize)); - ASSERT_EQ(type(), JAVA_SCRIPT); - ASSERT_LT(index, ComputeOperandsCount()); - ASSERT_LE(0, index); + DCHECK(IsAddressAligned(base, kPointerSize)); + DCHECK_EQ(type(), JAVA_SCRIPT); + DCHECK_LT(index, ComputeOperandsCount()); + DCHECK_LE(0, index); // Operand stack grows down. return base - index * kPointerSize; } @@ -259,9 +240,9 @@ // Base points to low address of first operand and stack grows down, so add // kPointerSize to get the actual stack size. intptr_t stack_size_in_bytes = (base + kPointerSize) - sp(); - ASSERT(IsAligned(stack_size_in_bytes, kPointerSize)); - ASSERT(type() == JAVA_SCRIPT); - ASSERT(stack_size_in_bytes >= 0); + DCHECK(IsAligned(stack_size_in_bytes, kPointerSize)); + DCHECK(type() == JAVA_SCRIPT); + DCHECK(stack_size_in_bytes >= 0); return static_cast<int>(stack_size_in_bytes >> kPointerSizeLog2); } @@ -336,14 +317,14 @@ // the JavaScript frame type, because we may encounter arguments // adaptor frames. StackFrame* frame = iterator_.frame(); - ASSERT(frame->is_java_script() || frame->is_arguments_adaptor()); + DCHECK(frame->is_java_script() || frame->is_arguments_adaptor()); return static_cast<JavaScriptFrame*>(frame); } inline StackFrame* SafeStackFrameIterator::frame() const { - ASSERT(!done()); - ASSERT(frame_->is_java_script() || frame_->is_exit()); + DCHECK(!done()); + DCHECK(frame_->is_java_script() || frame_->is_exit()); return frame_; } diff -Nru nodejs-0.11.13/deps/v8/src/full-codegen.cc nodejs-0.11.15/deps/v8/src/full-codegen.cc --- nodejs-0.11.13/deps/v8/src/full-codegen.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/full-codegen.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,20 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "codegen.h" -#include "compiler.h" -#include "debug.h" -#include "full-codegen.h" -#include "liveedit.h" -#include "macro-assembler.h" -#include "prettyprinter.h" -#include "scopes.h" -#include "scopeinfo.h" -#include "snapshot.h" -#include "stub-cache.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/full-codegen.h" +#include "src/liveedit.h" +#include "src/macro-assembler.h" +#include "src/prettyprinter.h" +#include "src/scopeinfo.h" +#include "src/scopes.h" +#include "src/snapshot.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -313,8 +290,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { Isolate* isolate = info->isolate(); - Logger::TimerEventScope timer( - isolate, Logger::TimerEventScope::v8_compile_full_code); + TimerEventScope<TimerEventCompileFullCode> timer(info->isolate()); Handle<Script> script = info->script(); if (!script->IsUndefined() && !script->source()->IsUndefined()) { @@ -324,16 +300,15 @@ CodeGenerator::MakeCodePrologue(info, "full"); const int kInitialBufferSize = 4 * KB; MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize); -#ifdef ENABLE_GDB_JIT_INTERFACE - masm.positions_recorder()->StartGDBJITLineInfoRecording(); -#endif + if (info->will_serialize()) masm.enable_serializer(); + LOG_CODE_EVENT(isolate, CodeStartLinePosInfoRecordEvent(masm.positions_recorder())); FullCodeGenerator cgen(&masm, info); cgen.Generate(); if (cgen.HasStackOverflow()) { - ASSERT(!isolate->has_pending_exception()); + DCHECK(!isolate->has_pending_exception()); return false; } unsigned table_offset = cgen.EmitBackEdgeTable(); @@ -347,22 +322,12 @@ cgen.PopulateTypeFeedbackInfo(code); code->set_has_deoptimization_support(info->HasDeoptimizationSupport()); code->set_handler_table(*cgen.handler_table()); -#ifdef ENABLE_DEBUGGER_SUPPORT code->set_compiled_optimizable(info->IsOptimizable()); -#endif // ENABLE_DEBUGGER_SUPPORT code->set_allow_osr_at_loop_nesting_level(0); code->set_profiler_ticks(0); code->set_back_edge_table_offset(table_offset); - code->set_back_edges_patched_for_osr(false); CodeGenerator::PrintCode(code, info); info->SetCode(code); -#ifdef ENABLE_GDB_JIT_INTERFACE - if (FLAG_gdbjit) { - GDBJITLineInfo* lineinfo = - masm.positions_recorder()->DetachGDBJITLineInfo(); - GDBJIT(RegisterDetailedLineInfo(*code, lineinfo)); - } -#endif void* line_info = masm.positions_recorder()->DetachJITHandlerData(); LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info)); return true; @@ -373,7 +338,7 @@ // The back edge table consists of a length (in number of entries) // field, and then a sequence of entries. Each entry is a pair of AST id // and code-relative pc offset. - masm()->Align(kIntSize); + masm()->Align(kPointerSize); unsigned offset = masm()->pc_offset(); unsigned length = back_edges_.length(); __ dd(length); @@ -386,25 +351,23 @@ } -void FullCodeGenerator::InitializeFeedbackVector() { - int length = info_->function()->slot_count(); - feedback_vector_ = isolate()->factory()->NewFixedArray(length, TENURED); - Handle<Object> sentinel = TypeFeedbackInfo::UninitializedSentinel(isolate()); - // Ensure that it's safe to set without using a write barrier. - ASSERT_EQ(isolate()->heap()->uninitialized_symbol(), *sentinel); - for (int i = 0; i < length; i++) { - feedback_vector_->set(i, *sentinel, SKIP_WRITE_BARRIER); +void FullCodeGenerator::EnsureSlotContainsAllocationSite(int slot) { + Handle<FixedArray> vector = FeedbackVector(); + if (!vector->get(slot)->IsAllocationSite()) { + Handle<AllocationSite> allocation_site = + isolate()->factory()->NewAllocationSite(); + vector->set(slot, *allocation_site); } } void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) { // Fill in the deoptimization information. - ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty()); + DCHECK(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty()); if (!info_->HasDeoptimizationSupport()) return; int length = bailout_entries_.length(); - Handle<DeoptimizationOutputData> data = isolate()->factory()-> - NewDeoptimizationOutputData(length, TENURED); + Handle<DeoptimizationOutputData> data = + DeoptimizationOutputData::New(isolate(), length, TENURED); for (int i = 0; i < length; i++) { data->SetAstId(i, bailout_entries_[i].id); data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state)); @@ -416,24 +379,23 @@ void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) { Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo(); info->set_ic_total_count(ic_total_count_); - info->set_feedback_vector(*FeedbackVector()); - ASSERT(!isolate()->heap()->InNewSpace(*info)); + DCHECK(!isolate()->heap()->InNewSpace(*info)); code->set_type_feedback_info(*info); } void FullCodeGenerator::Initialize() { + InitializeAstVisitor(info_->zone()); // The generation of debug code must match between the snapshot code and the // code that is generated later. This is assumed by the debugger when it is // calculating PC offsets after generating a debug version of code. Therefore // we disable the production of debug code in the full compiler if we are // either generating a snapshot or we booted from a snapshot. generate_debug_code_ = FLAG_debug_code && - !Serializer::enabled() && + !masm_->serializer_enabled() && !Snapshot::HaveASnapshotToStartFrom(); masm_->set_emit_debug_code(generate_debug_code_); masm_->set_predictable_code_size(true); - InitializeAstVisitor(info_->zone()); } @@ -467,7 +429,7 @@ #ifdef DEBUG // In debug builds, mark the return so we can verify that this function // was called. - ASSERT(!call->return_is_recorded_); + DCHECK(!call->return_is_recorded_); call->return_is_recorded_ = true; #endif } @@ -479,18 +441,21 @@ if (!info_->HasDeoptimizationSupport()) return; unsigned pc_and_state = StateField::encode(state) | PcField::encode(masm_->pc_offset()); - ASSERT(Smi::IsValid(pc_and_state)); + DCHECK(Smi::IsValid(pc_and_state)); +#ifdef DEBUG + for (int i = 0; i < bailout_entries_.length(); ++i) { + DCHECK(bailout_entries_[i].id != id); + } +#endif BailoutEntry entry = { id, pc_and_state }; - ASSERT(!prepared_bailout_ids_.Contains(id.ToInt())); - prepared_bailout_ids_.Add(id.ToInt(), zone()); bailout_entries_.Add(entry, zone()); } void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) { // The pc offset does not need to be encoded and packed together with a state. - ASSERT(masm_->pc_offset() > 0); - ASSERT(loop_depth() > 0); + DCHECK(masm_->pc_offset() > 0); + DCHECK(loop_depth() > 0); uint8_t depth = Min(loop_depth(), Code::kMaxLoopNestingMarker); BackEdgeEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()), depth }; @@ -606,7 +571,7 @@ void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) { - ASSERT(scope_->is_global_scope()); + DCHECK(scope_->is_global_scope()); for (int i = 0; i < declarations->length(); i++) { ModuleDeclaration* declaration = declarations->at(i)->AsModuleDeclaration(); @@ -616,15 +581,15 @@ Comment cmnt(masm_, "[ Link nested modules"); Scope* scope = module->body()->scope(); Interface* interface = scope->interface(); - ASSERT(interface->IsModule() && interface->IsFrozen()); + DCHECK(interface->IsModule() && interface->IsFrozen()); interface->Allocate(scope->module_var()->index()); // Set up module context. - ASSERT(scope->interface()->Index() >= 0); + DCHECK(scope->interface()->Index() >= 0); __ Push(Smi::FromInt(scope->interface()->Index())); __ Push(scope->GetScopeInfo()); - __ CallRuntime(Runtime::kHiddenPushModuleContext, 2); + __ CallRuntime(Runtime::kPushModuleContext, 2); StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); @@ -713,7 +678,7 @@ // This is a scope hosting modules. Allocate a descriptor array to pass // to the runtime for initialization. Comment cmnt(masm_, "[ Allocate modules"); - ASSERT(scope_->is_global_scope()); + DCHECK(scope_->is_global_scope()); modules_ = isolate()->factory()->NewFixedArray(scope_->num_modules(), TENURED); module_index_ = 0; @@ -727,7 +692,7 @@ if (scope_->num_modules() != 0) { // Initialize modules from descriptor array. - ASSERT(module_index_ == modules_->length()); + DCHECK(module_index_ == modules_->length()); DeclareModules(modules_); modules_ = saved_modules; module_index_ = saved_module_index; @@ -756,15 +721,15 @@ Comment cmnt(masm_, "[ ModuleLiteral"); SetStatementPosition(block); - ASSERT(!modules_.is_null()); - ASSERT(module_index_ < modules_->length()); + DCHECK(!modules_.is_null()); + DCHECK(module_index_ < modules_->length()); int index = module_index_++; // Set up module context. - ASSERT(interface->Index() >= 0); + DCHECK(interface->Index() >= 0); __ Push(Smi::FromInt(interface->Index())); __ Push(Smi::FromInt(0)); - __ CallRuntime(Runtime::kHiddenPushModuleContext, 2); + __ CallRuntime(Runtime::kPushModuleContext, 2); StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); { @@ -802,9 +767,9 @@ Scope* scope = module->body()->scope(); Interface* interface = scope_->interface(); - ASSERT(interface->IsModule() && interface->IsFrozen()); - ASSERT(!modules_.is_null()); - ASSERT(module_index_ < modules_->length()); + DCHECK(interface->IsModule() && interface->IsFrozen()); + DCHECK(!modules_.is_null()); + DCHECK(module_index_ < modules_->length()); interface->Allocate(scope->module_var()->index()); int index = module_index_++; @@ -815,7 +780,7 @@ int FullCodeGenerator::DeclareGlobalsFlags() { - ASSERT(DeclareGlobalsStrictMode::is_valid(strict_mode())); + DCHECK(DeclareGlobalsStrictMode::is_valid(strict_mode())); return DeclareGlobalsEvalFlag::encode(is_eval()) | DeclareGlobalsNativeFlag::encode(is_native()) | DeclareGlobalsStrictMode::encode(strict_mode()); @@ -833,8 +798,7 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) { -#ifdef ENABLE_DEBUGGER_SUPPORT - if (!isolate()->debugger()->IsDebuggerActive()) { + if (!info_->is_debug()) { CodeGenerator::RecordPositions(masm_, stmt->position()); } else { // Check if the statement will be breakable without adding a debug break @@ -849,18 +813,14 @@ // If the position recording did record a new position generate a debug // break slot to make the statement breakable. if (position_recorded) { - Debug::GenerateSlot(masm_); + DebugCodegen::GenerateSlot(masm_); } } -#else - CodeGenerator::RecordPositions(masm_, stmt->position()); -#endif } void FullCodeGenerator::SetExpressionPosition(Expression* expr) { -#ifdef ENABLE_DEBUGGER_SUPPORT - if (!isolate()->debugger()->IsDebuggerActive()) { + if (!info_->is_debug()) { CodeGenerator::RecordPositions(masm_, expr->position()); } else { // Check if the expression will be breakable without adding a debug break @@ -879,17 +839,9 @@ // If the position recording did record a new position generate a debug // break slot to make the statement breakable. if (position_recorded) { - Debug::GenerateSlot(masm_); + DebugCodegen::GenerateSlot(masm_); } } -#else - CodeGenerator::RecordPositions(masm_, expr->position()); -#endif -} - - -void FullCodeGenerator::SetStatementPosition(int pos) { - CodeGenerator::RecordPositions(masm_, pos); } @@ -916,8 +868,8 @@ FullCodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) { int lookup_index = static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction); - ASSERT(lookup_index >= 0); - ASSERT(static_cast<size_t>(lookup_index) < + DCHECK(lookup_index >= 0); + DCHECK(static_cast<size_t>(lookup_index) < ARRAY_SIZE(kInlineFunctionGenerators)); return kInlineFunctionGenerators[lookup_index]; } @@ -925,8 +877,8 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) { const Runtime::Function* function = expr->function(); - ASSERT(function != NULL); - ASSERT(function->intrinsic_type == Runtime::INLINE); + DCHECK(function != NULL); + DCHECK(function->intrinsic_type == Runtime::INLINE); InlineFunctionGenerator generator = FindInlineFunctionGenerator(function->function_id); ((*this).*(generator))(expr); @@ -935,14 +887,14 @@ void FullCodeGenerator::EmitGeneratorNext(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::NEXT); } void FullCodeGenerator::EmitGeneratorThrow(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::THROW); } @@ -1040,7 +992,7 @@ PrepareForBailoutForId(right_id, NO_REGISTERS); } else { - ASSERT(context()->IsEffect()); + DCHECK(context()->IsEffect()); Label eval_right; if (is_logical_and) { VisitForControl(left, &eval_right, &done, &eval_right); @@ -1085,28 +1037,30 @@ Scope* saved_scope = scope(); // Push a block context when entering a block with block scoped variables. - if (stmt->scope() != NULL) { + if (stmt->scope() == NULL) { + PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); + } else { scope_ = stmt->scope(); - ASSERT(!scope_->is_module_scope()); + DCHECK(!scope_->is_module_scope()); { Comment cmnt(masm_, "[ Extend block context"); __ Push(scope_->GetScopeInfo()); PushFunctionArgumentForContextAllocation(); - __ CallRuntime(Runtime::kHiddenPushBlockContext, 2); + __ CallRuntime(Runtime::kPushBlockContext, 2); // Replace the context stored in the frame. StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); + PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); } { Comment cmnt(masm_, "[ Declarations"); VisitDeclarations(scope_->declarations()); + PrepareForBailoutForId(stmt->DeclsId(), NO_REGISTERS); } } - PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); VisitStatements(stmt->statements()); scope_ = saved_scope; __ bind(nested_block.break_label()); - PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); // Pop block context if necessary. if (stmt->scope() != NULL) { @@ -1115,6 +1069,7 @@ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); } + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); } @@ -1123,7 +1078,7 @@ __ Push(Smi::FromInt(stmt->proxy()->interface()->Index())); __ Push(Smi::FromInt(0)); - __ CallRuntime(Runtime::kHiddenPushModuleContext, 2); + __ CallRuntime(Runtime::kPushModuleContext, 2); StoreToFrameField( StandardFrameConstants::kContextOffset, context_register()); @@ -1262,7 +1217,7 @@ VisitForStackValue(stmt->expression()); PushFunctionArgumentForContextAllocation(); - __ CallRuntime(Runtime::kHiddenPushWithContext, 2); + __ CallRuntime(Runtime::kPushWithContext, 2); StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); Scope* saved_scope = scope(); @@ -1314,31 +1269,28 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) { Comment cmnt(masm_, "[ WhileStatement"); - Label test, body; + Label loop, body; Iteration loop_statement(this, stmt); increment_loop_depth(); - // Emit the test at the bottom of the loop. - __ jmp(&test); + __ bind(&loop); + + SetExpressionPosition(stmt->cond()); + VisitForControl(stmt->cond(), + &body, + loop_statement.break_label(), + &body); PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); __ bind(&body); Visit(stmt->body()); - // Emit the statement position here as this is where the while - // statement code starts. __ bind(loop_statement.continue_label()); - SetStatementPosition(stmt); // Check stack before looping. - EmitBackEdgeBookkeeping(stmt, &body); - - __ bind(&test); - VisitForControl(stmt->cond(), - &body, - loop_statement.break_label(), - loop_statement.break_label()); + EmitBackEdgeBookkeeping(stmt, &loop); + __ jmp(&loop); PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); __ bind(loop_statement.break_label()); @@ -1415,14 +1367,14 @@ __ Push(stmt->variable()->name()); __ Push(result_register()); PushFunctionArgumentForContextAllocation(); - __ CallRuntime(Runtime::kHiddenPushCatchContext, 3); + __ CallRuntime(Runtime::kPushCatchContext, 3); StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); } Scope* saved_scope = scope(); scope_ = stmt->scope(); - ASSERT(scope_->declarations()->is_empty()); + DCHECK(scope_->declarations()->is_empty()); { WithOrCatch catch_body(this); Visit(stmt->catch_block()); } @@ -1479,7 +1431,7 @@ // rethrow the exception if it returns. __ Call(&finally_entry); __ Push(result_register()); - __ CallRuntime(Runtime::kHiddenReThrow, 1); + __ CallRuntime(Runtime::kReThrow, 1); // Finally block implementation. __ bind(&finally_entry); @@ -1506,13 +1458,11 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) { -#ifdef ENABLE_DEBUGGER_SUPPORT Comment cmnt(masm_, "[ DebuggerStatement"); SetStatementPosition(stmt); __ DebugBreak(); // Ignore the return value. -#endif } @@ -1562,7 +1512,7 @@ // Build the function boilerplate and instantiate it. Handle<SharedFunctionInfo> function_info = - Compiler::BuildFunctionInfo(expr, script()); + Compiler::BuildFunctionInfo(expr, script(), info_); if (function_info.is_null()) { SetStackOverflow(); return; @@ -1580,7 +1530,7 @@ v8::Handle<v8::FunctionTemplate> fun_template = expr->extension()->GetNativeFunctionTemplate( reinterpret_cast<v8::Isolate*>(isolate()), v8::Utils::ToLocal(name)); - ASSERT(!fun_template.IsEmpty()); + DCHECK(!fun_template.IsEmpty()); // Instantiate the function and create a shared function info from it. Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction()); @@ -1588,9 +1538,12 @@ Handle<Code> code = Handle<Code>(fun->shared()->code()); Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub()); bool is_generator = false; + bool is_arrow = false; Handle<SharedFunctionInfo> shared = - isolate()->factory()->NewSharedFunctionInfo(name, literals, is_generator, - code, Handle<ScopeInfo>(fun->shared()->scope_info())); + isolate()->factory()->NewSharedFunctionInfo( + name, literals, is_generator, is_arrow, code, + Handle<ScopeInfo>(fun->shared()->scope_info()), + Handle<FixedArray>(fun->shared()->feedback_vector())); shared->set_construct_stub(*construct_stub); // Copy the function data to the shared function info. @@ -1605,7 +1558,7 @@ void FullCodeGenerator::VisitThrow(Throw* expr) { Comment cmnt(masm_, "[ Throw"); VisitForStackValue(expr->exception()); - __ CallRuntime(Runtime::kHiddenThrow, 1); + __ CallRuntime(Runtime::kThrow, 1); // Never returns here. } @@ -1647,22 +1600,24 @@ DisallowHeapAllocation no_gc; Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement); - // Iterate over the back edge table and patch every interrupt + // Increment loop nesting level by one and iterate over the back edge table + // to find the matching loops to patch the interrupt // call to an unconditional call to the replacement code. - int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); + int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level() + 1; + if (loop_nesting_level > Code::kMaxLoopNestingMarker) return; BackEdgeTable back_edges(unoptimized, &no_gc); for (uint32_t i = 0; i < back_edges.length(); i++) { if (static_cast<int>(back_edges.loop_depth(i)) == loop_nesting_level) { - ASSERT_EQ(INTERRUPT, GetBackEdgeState(isolate, + DCHECK_EQ(INTERRUPT, GetBackEdgeState(isolate, unoptimized, back_edges.pc(i))); PatchAt(unoptimized, back_edges.pc(i), ON_STACK_REPLACEMENT, patch); } } - unoptimized->set_back_edges_patched_for_osr(true); - ASSERT(Verify(isolate, unoptimized, loop_nesting_level)); + unoptimized->set_allow_osr_at_loop_nesting_level(loop_nesting_level); + DCHECK(Verify(isolate, unoptimized)); } @@ -1671,23 +1626,21 @@ Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck); // Iterate over the back edge table and revert the patched interrupt calls. - ASSERT(unoptimized->back_edges_patched_for_osr()); int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); BackEdgeTable back_edges(unoptimized, &no_gc); for (uint32_t i = 0; i < back_edges.length(); i++) { if (static_cast<int>(back_edges.loop_depth(i)) <= loop_nesting_level) { - ASSERT_NE(INTERRUPT, GetBackEdgeState(isolate, + DCHECK_NE(INTERRUPT, GetBackEdgeState(isolate, unoptimized, back_edges.pc(i))); PatchAt(unoptimized, back_edges.pc(i), INTERRUPT, patch); } } - unoptimized->set_back_edges_patched_for_osr(false); unoptimized->set_allow_osr_at_loop_nesting_level(0); // Assert that none of the back edges are patched anymore. - ASSERT(Verify(isolate, unoptimized, -1)); + DCHECK(Verify(isolate, unoptimized)); } @@ -1713,10 +1666,9 @@ #ifdef DEBUG -bool BackEdgeTable::Verify(Isolate* isolate, - Code* unoptimized, - int loop_nesting_level) { +bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) { DisallowHeapAllocation no_gc; + int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); BackEdgeTable back_edges(unoptimized, &no_gc); for (uint32_t i = 0; i < back_edges.length(); i++) { uint32_t loop_depth = back_edges.loop_depth(i); diff -Nru nodejs-0.11.13/deps/v8/src/full-codegen.h nodejs-0.11.15/deps/v8/src/full-codegen.h --- nodejs-0.11.13/deps/v8/src/full-codegen.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/full-codegen.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,21 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_FULL_CODEGEN_H_ #define V8_FULL_CODEGEN_H_ -#include "v8.h" +#include "src/v8.h" -#include "allocation.h" -#include "assert-scope.h" -#include "ast.h" -#include "code-stubs.h" -#include "codegen.h" -#include "compiler.h" -#include "data-flow.h" -#include "globals.h" -#include "objects.h" +#include "src/allocation.h" +#include "src/assert-scope.h" +#include "src/ast.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/data-flow.h" +#include "src/globals.h" +#include "src/objects.h" namespace v8 { namespace internal { @@ -97,6 +74,7 @@ info->zone()), back_edges_(2, info->zone()), ic_total_count_(0) { + DCHECK(!info->IsStub()); Initialize(); } @@ -121,17 +99,25 @@ static const int kMaxBackEdgeWeight = 127; // Platform-specific code size multiplier. -#if V8_TARGET_ARCH_IA32 - static const int kCodeSizeMultiplier = 100; +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 + static const int kCodeSizeMultiplier = 105; + static const int kBootCodeSizeMultiplier = 100; #elif V8_TARGET_ARCH_X64 - static const int kCodeSizeMultiplier = 162; + static const int kCodeSizeMultiplier = 170; + static const int kBootCodeSizeMultiplier = 140; #elif V8_TARGET_ARCH_ARM - static const int kCodeSizeMultiplier = 142; + static const int kCodeSizeMultiplier = 149; + static const int kBootCodeSizeMultiplier = 110; #elif V8_TARGET_ARCH_ARM64 // TODO(all): Copied ARM value. Check this is sensible for ARM64. - static const int kCodeSizeMultiplier = 142; + static const int kCodeSizeMultiplier = 149; + static const int kBootCodeSizeMultiplier = 110; #elif V8_TARGET_ARCH_MIPS - static const int kCodeSizeMultiplier = 142; + static const int kCodeSizeMultiplier = 149; + static const int kBootCodeSizeMultiplier = 120; +#elif V8_TARGET_ARCH_MIPS64 + static const int kCodeSizeMultiplier = 149; + static const int kBootCodeSizeMultiplier = 120; #else #error Unsupported target architecture. #endif @@ -151,7 +137,7 @@ } virtual ~NestedStatement() { // Unlink from codegen's nesting stack. - ASSERT_EQ(this, codegen_->nesting_stack_); + DCHECK_EQ(this, codegen_->nesting_stack_); codegen_->nesting_stack_ = previous_; } @@ -234,7 +220,7 @@ ++(*context_length); } return previous_; - }; + } }; // The try block of a try/catch statement. @@ -337,6 +323,13 @@ Label* if_true, Label* if_false, Label* fall_through); +#elif V8_TARGET_ARCH_MIPS64 + void Split(Condition cc, + Register lhs, + const Operand& rhs, + Label* if_true, + Label* if_false, + Label* fall_through); #else // All non-mips arch. void Split(Condition cc, Label* if_true, @@ -437,12 +430,9 @@ // Feedback slot support. The feedback vector will be cleared during gc and // collected by the type-feedback oracle. Handle<FixedArray> FeedbackVector() { - return feedback_vector_; - } - void StoreFeedbackVectorSlot(int slot, Handle<Object> object) { - feedback_vector_->set(slot, *object); + return info_->feedback_vector(); } - void InitializeFeedbackVector(); + void EnsureSlotContainsAllocationSite(int slot); // Record a call's return site offset, used to rebuild the frame if the // called function was inlined at the site. @@ -485,9 +475,9 @@ void EmitReturnSequence(); // Platform-specific code sequences for calls - void EmitCallWithStub(Call* expr); - void EmitCallWithIC(Call* expr); - void EmitKeyedCallWithIC(Call* expr, Expression* key); + void EmitCall(Call* expr, CallIC::CallType = CallIC::FUNCTION); + void EmitCallWithLoadIC(Call* expr); + void EmitKeyedCallWithLoadIC(Call* expr, Expression* key); // Platform-specific code for inline runtime calls. InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id); @@ -505,11 +495,11 @@ JSGeneratorObject::ResumeMode resume_mode); // Platform-specific code for loading variables. - void EmitLoadGlobalCheckExtensions(Variable* var, + void EmitLoadGlobalCheckExtensions(VariableProxy* proxy, TypeofState typeof_state, Label* slow); MemOperand ContextSlotOperandCheckExtensions(Variable* var, Label* slow); - void EmitDynamicLookupFastCase(Variable* var, + void EmitDynamicLookupFastCase(VariableProxy* proxy, TypeofState typeof_state, Label* slow, Label* done); @@ -560,7 +550,6 @@ // Helper functions to EmitVariableAssignment void EmitStoreToStackLocalOrContextSlot(Variable* var, MemOperand location); - void EmitCallStoreContextSlot(Handle<String> name, StrictMode strict_mode); // Complete a named property assignment. The receiver is expected on top // of the stack and the right-hand-side value in the accumulator. @@ -582,7 +571,6 @@ void SetReturnPosition(FunctionLiteral* fun); void SetStatementPosition(Statement* stmt); void SetExpressionPosition(Expression* expr); - void SetStatementPosition(int pos); void SetSourcePosition(int pos); // Non-local control flow support. @@ -593,7 +581,7 @@ int loop_depth() { return loop_depth_; } void increment_loop_depth() { loop_depth_++; } void decrement_loop_depth() { - ASSERT(loop_depth_ > 0); + DCHECK(loop_depth_ > 0); loop_depth_--; } @@ -777,7 +765,7 @@ fall_through_(fall_through) { } static const TestContext* cast(const ExpressionContext* context) { - ASSERT(context->IsTest()); + DCHECK(context->IsTest()); return reinterpret_cast<const TestContext*>(context); } @@ -840,11 +828,9 @@ int module_index_; const ExpressionContext* context_; ZoneList<BailoutEntry> bailout_entries_; - GrowableBitVector prepared_bailout_ids_; ZoneList<BackEdgeEntry> back_edges_; int ic_total_count_; Handle<FixedArray> handler_table_; - Handle<FixedArray> feedback_vector_; Handle<Cell> profiling_counter_; bool generate_debug_code_; @@ -880,7 +866,7 @@ class BackEdgeTable { public: BackEdgeTable(Code* code, DisallowHeapAllocation* required) { - ASSERT(code->kind() == Code::FUNCTION); + DCHECK(code->kind() == Code::FUNCTION); instruction_start_ = code->instruction_start(); Address table_address = instruction_start_ + code->back_edge_table_offset(); length_ = Memory::uint32_at(table_address); @@ -912,10 +898,8 @@ OSR_AFTER_STACK_CHECK }; - // Patch all interrupts with allowed loop depth in the unoptimized code to - // unconditionally call replacement_code. - static void Patch(Isolate* isolate, - Code* unoptimized_code); + // Increase allowed loop nesting level by one and patch those matching loops. + static void Patch(Isolate* isolate, Code* unoptimized_code); // Patch the back edge to the target state, provided the correct callee. static void PatchAt(Code* unoptimized_code, @@ -941,14 +925,12 @@ #ifdef DEBUG // Verify that all back edges of a certain loop depth are patched. - static bool Verify(Isolate* isolate, - Code* unoptimized_code, - int loop_nesting_level); + static bool Verify(Isolate* isolate, Code* unoptimized_code); #endif // DEBUG private: Address entry_at(uint32_t index) { - ASSERT(index < length_); + DCHECK(index < length_); return start_ + index * kEntrySize; } diff -Nru nodejs-0.11.13/deps/v8/src/func-name-inferrer.cc nodejs-0.11.15/deps/v8/src/func-name-inferrer.cc --- nodejs-0.11.13/deps/v8/src/func-name-inferrer.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/func-name-inferrer.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,20 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "ast.h" -#include "func-name-inferrer.h" -#include "list-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/ast.h" +#include "src/ast-value-factory.h" +#include "src/func-name-inferrer.h" +#include "src/list-inl.h" namespace v8 { namespace internal { -FuncNameInferrer::FuncNameInferrer(Isolate* isolate, Zone* zone) - : isolate_(isolate), +FuncNameInferrer::FuncNameInferrer(AstValueFactory* ast_value_factory, + Zone* zone) + : ast_value_factory_(ast_value_factory), entries_stack_(10, zone), names_stack_(5, zone), funcs_to_infer_(4, zone), @@ -43,38 +22,36 @@ } -void FuncNameInferrer::PushEnclosingName(Handle<String> name) { +void FuncNameInferrer::PushEnclosingName(const AstRawString* name) { // Enclosing name is a name of a constructor function. To check // that it is really a constructor, we check that it is not empty // and starts with a capital letter. - if (name->length() > 0 && Runtime::IsUpperCaseChar( - isolate()->runtime_state(), name->Get(0))) { + if (!name->IsEmpty() && unibrow::Uppercase::Is(name->FirstCharacter())) { names_stack_.Add(Name(name, kEnclosingConstructorName), zone()); } } -void FuncNameInferrer::PushLiteralName(Handle<String> name) { - if (IsOpen() && !isolate()->heap()->prototype_string()->Equals(*name)) { +void FuncNameInferrer::PushLiteralName(const AstRawString* name) { + if (IsOpen() && name != ast_value_factory_->prototype_string()) { names_stack_.Add(Name(name, kLiteralName), zone()); } } -void FuncNameInferrer::PushVariableName(Handle<String> name) { - if (IsOpen() && !isolate()->heap()->dot_result_string()->Equals(*name)) { +void FuncNameInferrer::PushVariableName(const AstRawString* name) { + if (IsOpen() && name != ast_value_factory_->dot_result_string()) { names_stack_.Add(Name(name, kVariableName), zone()); } } -Handle<String> FuncNameInferrer::MakeNameFromStack() { - return MakeNameFromStackHelper(0, isolate()->factory()->empty_string()); +const AstString* FuncNameInferrer::MakeNameFromStack() { + return MakeNameFromStackHelper(0, ast_value_factory_->empty_string()); } - -Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos, - Handle<String> prev) { +const AstString* FuncNameInferrer::MakeNameFromStackHelper( + int pos, const AstString* prev) { if (pos >= names_stack_.length()) return prev; if (pos < names_stack_.length() - 1 && names_stack_.at(pos).type == kVariableName && @@ -83,13 +60,11 @@ return MakeNameFromStackHelper(pos + 1, prev); } else { if (prev->length() > 0) { - Handle<String> name = names_stack_.at(pos).name; + const AstRawString* name = names_stack_.at(pos).name; if (prev->length() + name->length() + 1 > String::kMaxLength) return prev; - Factory* factory = isolate()->factory(); - Handle<String> curr = factory->NewConsString(factory->dot_string(), name); - CHECK_NOT_EMPTY_HANDLE(isolate(), curr); - curr = factory->NewConsString(prev, curr); - CHECK_NOT_EMPTY_HANDLE(isolate(), curr); + const AstConsString* curr = ast_value_factory_->NewConsString( + ast_value_factory_->dot_string(), name); + curr = ast_value_factory_->NewConsString(prev, curr); return MakeNameFromStackHelper(pos + 1, curr); } else { return MakeNameFromStackHelper(pos + 1, names_stack_.at(pos).name); @@ -99,9 +74,9 @@ void FuncNameInferrer::InferFunctionsNames() { - Handle<String> func_name = MakeNameFromStack(); + const AstString* func_name = MakeNameFromStack(); for (int i = 0; i < funcs_to_infer_.length(); ++i) { - funcs_to_infer_[i]->set_inferred_name(func_name); + funcs_to_infer_[i]->set_raw_inferred_name(func_name); } funcs_to_infer_.Rewind(0); } diff -Nru nodejs-0.11.13/deps/v8/src/func-name-inferrer.h nodejs-0.11.15/deps/v8/src/func-name-inferrer.h --- nodejs-0.11.13/deps/v8/src/func-name-inferrer.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/func-name-inferrer.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,20 @@ // Copyright 2006-2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_FUNC_NAME_INFERRER_H_ #define V8_FUNC_NAME_INFERRER_H_ -#include "handles.h" -#include "zone.h" +#include "src/handles.h" +#include "src/zone.h" namespace v8 { namespace internal { +class AstRawString; +class AstString; +class AstValueFactory; class FunctionLiteral; -class Isolate; // FuncNameInferrer is a stateful class that is used to perform name // inference for anonymous functions during static analysis of source code. @@ -49,13 +28,13 @@ // a name. class FuncNameInferrer : public ZoneObject { public: - FuncNameInferrer(Isolate* isolate, Zone* zone); + FuncNameInferrer(AstValueFactory* ast_value_factory, Zone* zone); // Returns whether we have entered name collection state. bool IsOpen() const { return !entries_stack_.is_empty(); } // Pushes an enclosing the name of enclosing function onto names stack. - void PushEnclosingName(Handle<String> name); + void PushEnclosingName(const AstRawString* name); // Enters name collection state. void Enter() { @@ -63,9 +42,9 @@ } // Pushes an encountered name onto names stack when in collection state. - void PushLiteralName(Handle<String> name); + void PushLiteralName(const AstRawString* name); - void PushVariableName(Handle<String> name); + void PushVariableName(const AstRawString* name); // Adds a function to infer name for. void AddFunction(FunctionLiteral* func_to_infer) { @@ -82,7 +61,7 @@ // Infers a function name and leaves names collection state. void Infer() { - ASSERT(IsOpen()); + DCHECK(IsOpen()); if (!funcs_to_infer_.is_empty()) { InferFunctionsNames(); } @@ -90,7 +69,7 @@ // Leaves names collection state. void Leave() { - ASSERT(IsOpen()); + DCHECK(IsOpen()); names_stack_.Rewind(entries_stack_.RemoveLast()); if (entries_stack_.is_empty()) funcs_to_infer_.Clear(); @@ -103,24 +82,24 @@ kVariableName }; struct Name { - Name(Handle<String> name, NameType type) : name(name), type(type) { } - Handle<String> name; + Name(const AstRawString* name, NameType type) : name(name), type(type) {} + const AstRawString* name; NameType type; }; - Isolate* isolate() { return isolate_; } Zone* zone() const { return zone_; } // Constructs a full name in dotted notation from gathered names. - Handle<String> MakeNameFromStack(); + const AstString* MakeNameFromStack(); // A helper function for MakeNameFromStack. - Handle<String> MakeNameFromStackHelper(int pos, Handle<String> prev); + const AstString* MakeNameFromStackHelper(int pos, + const AstString* prev); // Performs name inferring for added functions. void InferFunctionsNames(); - Isolate* isolate_; + AstValueFactory* ast_value_factory_; ZoneList<int> entries_stack_; ZoneList<Name> names_stack_; ZoneList<FunctionLiteral*> funcs_to_infer_; diff -Nru nodejs-0.11.13/deps/v8/src/gdb-jit.cc nodejs-0.11.15/deps/v8/src/gdb-jit.cc --- nodejs-0.11.13/deps/v8/src/gdb-jit.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/gdb-jit.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,21 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifdef ENABLE_GDB_JIT_INTERFACE -#include "v8.h" -#include "gdb-jit.h" +#include "src/v8.h" -#include "bootstrapper.h" -#include "compiler.h" -#include "frames.h" -#include "frames-inl.h" -#include "global-handles.h" -#include "messages.h" -#include "natives.h" -#include "platform.h" -#include "scopes.h" +#include "src/base/platform/platform.h" +#include "src/bootstrapper.h" +#include "src/compiler.h" +#include "src/frames-inl.h" +#include "src/frames.h" +#include "src/gdb-jit.h" +#include "src/global-handles.h" +#include "src/messages.h" +#include "src/natives.h" +#include "src/ostreams.h" +#include "src/scopes.h" namespace v8 { namespace internal { @@ -137,7 +115,7 @@ if (delta == 0) return; uintptr_t padding = align - delta; Ensure(position_ += padding); - ASSERT((position_ % align) == 0); + DCHECK((position_ % align) == 0); } void WriteULEB128(uintptr_t value) { @@ -177,7 +155,7 @@ template<typename T> T* RawSlotAt(uintptr_t offset) { - ASSERT(offset < capacity_ && offset + sizeof(T) <= capacity_); + DCHECK(offset < capacity_ && offset + sizeof(T) <= capacity_); return reinterpret_cast<T*>(&buffer_[offset]); } @@ -217,7 +195,7 @@ struct MachOSectionHeader { char sectname[16]; char segname[16]; -#if V8_TARGET_ARCH_IA32 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 uint32_t addr; uint32_t size; #else @@ -252,8 +230,8 @@ segment_(segment), align_(align), flags_(flags) { - ASSERT(IsPowerOf2(align)); if (align_ != 0) { + DCHECK(IsPowerOf2(align)); align_ = WhichPowerOf2(align_); } } @@ -272,8 +250,8 @@ header->reserved2 = 0; memset(header->sectname, 0, sizeof(header->sectname)); memset(header->segname, 0, sizeof(header->segname)); - ASSERT(strlen(name_) < sizeof(header->sectname)); - ASSERT(strlen(segment_) < sizeof(header->segname)); + DCHECK(strlen(name_) < sizeof(header->sectname)); + DCHECK(strlen(segment_) < sizeof(header->segname)); strncpy(header->sectname, name_, sizeof(header->sectname)); strncpy(header->segname, segment_, sizeof(header->segname)); } @@ -465,7 +443,7 @@ } virtual void WriteBody(Writer::Slot<Header> header, Writer* w) { - ASSERT(writer_ == NULL); + DCHECK(writer_ == NULL); header->offset = offset_; header->size = size_; } @@ -534,7 +512,7 @@ uint32_t cmd; uint32_t cmdsize; char segname[16]; -#if V8_TARGET_ARCH_IA32 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 uint32_t vmaddr; uint32_t vmsize; uint32_t fileoff; @@ -558,9 +536,9 @@ Writer::Slot<MachOHeader> WriteHeader(Writer* w) { - ASSERT(w->position() == 0); + DCHECK(w->position() == 0); Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>(); -#if V8_TARGET_ARCH_IA32 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 header->magic = 0xFEEDFACEu; header->cputype = 7; // i386 header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL @@ -585,7 +563,7 @@ uintptr_t code_size) { Writer::Slot<MachOSegmentCommand> cmd = w->CreateSlotHere<MachOSegmentCommand>(); -#if V8_TARGET_ARCH_IA32 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 cmd->cmd = LC_SEGMENT_32; #else cmd->cmd = LC_SEGMENT_64; @@ -670,20 +648,21 @@ void WriteHeader(Writer* w) { - ASSERT(w->position() == 0); + DCHECK(w->position() == 0); Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>(); -#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM +#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \ + (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT)) const uint8_t ident[16] = { 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}; -#elif V8_TARGET_ARCH_X64 +#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT const uint8_t ident[16] = { 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #else #error Unsupported target architecture. #endif - OS::MemCopy(header->ident, ident, 16); + memcpy(header->ident, ident, 16); header->type = 1; -#if V8_TARGET_ARCH_IA32 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 header->machine = 3; #elif V8_TARGET_ARCH_X64 // Processor identification value for x64 is 62 as defined in @@ -712,7 +691,7 @@ void WriteSectionTable(Writer* w) { // Section headers table immediately follows file header. - ASSERT(w->position() == sizeof(ELFHeader)); + DCHECK(w->position() == sizeof(ELFHeader)); Writer::Slot<ELFSection::Header> headers = w->CreateSlotsHere<ELFSection::Header>(sections_.length()); @@ -785,7 +764,8 @@ Binding binding() const { return static_cast<Binding>(info >> 4); } -#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM +#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \ + (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT)) struct SerializedLayout { SerializedLayout(uint32_t name, uintptr_t value, @@ -808,7 +788,7 @@ uint8_t other; uint16_t section; }; -#elif V8_TARGET_ARCH_X64 +#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT struct SerializedLayout { SerializedLayout(uint32_t name, uintptr_t value, @@ -920,6 +900,32 @@ #endif // defined(__ELF) +class LineInfo : public Malloced { + public: + LineInfo() : pc_info_(10) {} + + void SetPosition(intptr_t pc, int pos, bool is_statement) { + AddPCInfo(PCInfo(pc, pos, is_statement)); + } + + struct PCInfo { + PCInfo(intptr_t pc, int pos, bool is_statement) + : pc_(pc), pos_(pos), is_statement_(is_statement) {} + + intptr_t pc_; + int pos_; + bool is_statement_; + }; + + List<PCInfo>* pc_info() { return &pc_info_; } + + private: + void AddPCInfo(const PCInfo& pc_info) { pc_info_.Add(pc_info); } + + List<PCInfo> pc_info_; +}; + + class CodeDescription BASE_EMBEDDED { public: #if V8_TARGET_ARCH_X64 @@ -931,27 +937,21 @@ }; #endif - CodeDescription(const char* name, - Code* code, - Handle<Script> script, - GDBJITLineInfo* lineinfo, - GDBJITInterface::CodeTag tag, + CodeDescription(const char* name, Code* code, Handle<Script> script, + LineInfo* lineinfo, GDBJITInterface::CodeTag tag, CompilationInfo* info) : name_(name), code_(code), script_(script), lineinfo_(lineinfo), tag_(tag), - info_(info) { - } + info_(info) {} const char* name() const { return name_; } - GDBJITLineInfo* lineinfo() const { - return lineinfo_; - } + LineInfo* lineinfo() const { return lineinfo_; } GDBJITInterface::CodeTag tag() const { return tag_; @@ -987,12 +987,12 @@ #if V8_TARGET_ARCH_X64 uintptr_t GetStackStateStartAddress(StackState state) const { - ASSERT(state < STACK_STATE_MAX); + DCHECK(state < STACK_STATE_MAX); return stack_state_start_addresses_[state]; } void SetStackStateStartAddress(StackState state, uintptr_t addr) { - ASSERT(state < STACK_STATE_MAX); + DCHECK(state < STACK_STATE_MAX); stack_state_start_addresses_[state] = addr; } #endif @@ -1002,7 +1002,7 @@ } int GetScriptLineNumber(int pos) { - return GetScriptLineNumberSafe(script_, pos) + 1; + return script_->GetLineNumber(pos) + 1; } @@ -1010,7 +1010,7 @@ const char* name_; Code* code_; Handle<Script> script_; - GDBJITLineInfo* lineinfo_; + LineInfo* lineinfo_; GDBJITInterface::CodeTag tag_; CompilationInfo* info_; #if V8_TARGET_ARCH_X64 @@ -1107,7 +1107,7 @@ w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize()); Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>(); uintptr_t fb_block_start = w->position(); -#if V8_TARGET_ARCH_IA32 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32 #elif V8_TARGET_ARCH_X64 w->Write<uint8_t>(DW_OP_reg6); // and here on x64. @@ -1115,6 +1115,8 @@ UNIMPLEMENTED(); #elif V8_TARGET_ARCH_MIPS UNIMPLEMENTED(); +#elif V8_TARGET_ARCH_MIPS64 + UNIMPLEMENTED(); #else #error Unsupported target architecture. #endif @@ -1153,11 +1155,11 @@ } // See contexts.h for more information. - ASSERT(Context::MIN_CONTEXT_SLOTS == 4); - ASSERT(Context::CLOSURE_INDEX == 0); - ASSERT(Context::PREVIOUS_INDEX == 1); - ASSERT(Context::EXTENSION_INDEX == 2); - ASSERT(Context::GLOBAL_OBJECT_INDEX == 3); + DCHECK(Context::MIN_CONTEXT_SLOTS == 4); + DCHECK(Context::CLOSURE_INDEX == 0); + DCHECK(Context::PREVIOUS_INDEX == 1); + DCHECK(Context::EXTENSION_INDEX == 2); + DCHECK(Context::GLOBAL_OBJECT_INDEX == 3); w->WriteULEB128(current_abbreviation++); w->WriteString(".closure"); w->WriteULEB128(current_abbreviation++); @@ -1305,7 +1307,7 @@ bool WriteBodyInternal(Writer* w) { int current_abbreviation = 1; bool extra_info = desc_->IsInfoAvailable(); - ASSERT(desc_->IsLineInfoAvailable()); + DCHECK(desc_->IsLineInfoAvailable()); w->WriteULEB128(current_abbreviation++); w->WriteULEB128(DW_TAG_COMPILE_UNIT); w->Write<uint8_t>(extra_info ? DW_CHILDREN_YES : DW_CHILDREN_NO); @@ -1470,13 +1472,13 @@ intptr_t line = 1; bool is_statement = true; - List<GDBJITLineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info(); + List<LineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info(); pc_info->Sort(&ComparePCInfo); int pc_info_length = pc_info->length(); for (int i = 0; i < pc_info_length; i++) { - GDBJITLineInfo::PCInfo* info = &pc_info->at(i); - ASSERT(info->pc_ >= pc); + LineInfo::PCInfo* info = &pc_info->at(i); + DCHECK(info->pc_ >= pc); // Reduce bloating in the debug line table by removing duplicate line // entries (per DWARF2 standard). @@ -1546,8 +1548,8 @@ w->Write<uint8_t>(op); } - static int ComparePCInfo(const GDBJITLineInfo::PCInfo* a, - const GDBJITLineInfo::PCInfo* b) { + static int ComparePCInfo(const LineInfo::PCInfo* a, + const LineInfo::PCInfo* b) { if (a->pc_ == b->pc_) { if (a->is_statement_ != b->is_statement_) { return b->is_statement_ ? +1 : -1; @@ -1646,7 +1648,7 @@ } } - ASSERT((w->position() - initial_position) % kPointerSize == 0); + DCHECK((w->position() - initial_position) % kPointerSize == 0); length_slot->set(w->position() - initial_position); } @@ -1841,9 +1843,10 @@ JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 }; #ifdef OBJECT_PRINT - void __gdb_print_v8_object(MaybeObject* object) { - object->Print(); - PrintF(stdout, "\n"); + void __gdb_print_v8_object(Object* object) { + OFStream os(stdout); + object->Print(os); + os << flush; } #endif } @@ -1856,7 +1859,7 @@ entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1); entry->symfile_size_ = symfile_size; - OS::MemCopy(entry->symfile_addr_, symfile_addr, symfile_size); + MemCopy(entry->symfile_addr_, symfile_addr, symfile_size); entry->prev_ = entry->next_ = NULL; @@ -1880,12 +1883,12 @@ static const char* kObjFileExt = ".o"; char file_name[64]; - OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize), - "%s%s%d%s", - kElfFilePrefix, - (name_hint != NULL) ? name_hint : "", - file_num++, - kObjFileExt); + SNPrintF(Vector<char>(file_name, kMaxFileNameSize), + "%s%s%d%s", + kElfFilePrefix, + (name_hint != NULL) ? name_hint : "", + file_num++, + kObjFileExt); WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_); } #endif @@ -1985,15 +1988,15 @@ } -static void* TagLineInfo(GDBJITLineInfo* ptr) { +static void* TagLineInfo(LineInfo* ptr) { return reinterpret_cast<void*>( reinterpret_cast<intptr_t>(ptr) | kLineInfoTag); } -static GDBJITLineInfo* UntagLineInfo(void* ptr) { - return reinterpret_cast<GDBJITLineInfo*>( - reinterpret_cast<intptr_t>(ptr) & ~kLineInfoTag); +static LineInfo* UntagLineInfo(void* ptr) { + return reinterpret_cast<LineInfo*>(reinterpret_cast<intptr_t>(ptr) & + ~kLineInfoTag); } @@ -2003,8 +2006,7 @@ CompilationInfo* info) { if (!FLAG_gdbjit) return; - // Force initialization of line_ends array. - GetScriptLineNumber(script, 0); + Script::InitLineEnds(script); if (!name.is_null() && name->IsString()) { SmartArrayPointer<char> name_cstring = @@ -2054,7 +2056,7 @@ } -static LazyMutex mutex = LAZY_MUTEX_INITIALIZER; +static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER; void GDBJITInterface::AddCode(const char* name, @@ -2062,15 +2064,13 @@ GDBJITInterface::CodeTag tag, Script* script, CompilationInfo* info) { - if (!FLAG_gdbjit) return; - - LockGuard<Mutex> lock_guard(mutex.Pointer()); + base::LockGuard<base::Mutex> lock_guard(mutex.Pointer()); DisallowHeapAllocation no_gc; HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true); if (e->value != NULL && !IsLineInfoTagged(e->value)) return; - GDBJITLineInfo* lineinfo = UntagLineInfo(e->value); + LineInfo* lineinfo = UntagLineInfo(e->value); CodeDescription code_desc(name, code, script != NULL ? Handle<Script>(script) @@ -2088,7 +2088,7 @@ AddUnwindInfo(&code_desc); Isolate* isolate = code->GetIsolate(); JITCodeEntry* entry = CreateELFObject(&code_desc, isolate); - ASSERT(!IsLineInfoTagged(entry)); + DCHECK(!IsLineInfoTagged(entry)); delete lineinfo; e->value = entry; @@ -2108,49 +2108,10 @@ } -void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, - const char* name, - Code* code) { - if (!FLAG_gdbjit) return; - - EmbeddedVector<char, 256> buffer; - StringBuilder builder(buffer.start(), buffer.length()); - - builder.AddString(Tag2String(tag)); - if ((name != NULL) && (*name != '\0')) { - builder.AddString(": "); - builder.AddString(name); - } else { - builder.AddFormatted(": code object %p", static_cast<void*>(code)); - } - - AddCode(builder.Finalize(), code, tag, NULL, NULL); -} - - -void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, - Name* name, - Code* code) { - if (!FLAG_gdbjit) return; - if (name != NULL && name->IsString()) { - AddCode(tag, String::cast(name)->ToCString(DISALLOW_NULLS).get(), code); - } else { - AddCode(tag, "", code); - } -} - - -void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) { - if (!FLAG_gdbjit) return; - - AddCode(tag, "", code); -} - - void GDBJITInterface::RemoveCode(Code* code) { if (!FLAG_gdbjit) return; - LockGuard<Mutex> lock_guard(mutex.Pointer()); + base::LockGuard<base::Mutex> lock_guard(mutex.Pointer()); HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), false); @@ -2186,15 +2147,62 @@ } -void GDBJITInterface::RegisterDetailedLineInfo(Code* code, - GDBJITLineInfo* line_info) { - LockGuard<Mutex> lock_guard(mutex.Pointer()); - ASSERT(!IsLineInfoTagged(line_info)); +static void RegisterDetailedLineInfo(Code* code, LineInfo* line_info) { + base::LockGuard<base::Mutex> lock_guard(mutex.Pointer()); + DCHECK(!IsLineInfoTagged(line_info)); HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true); - ASSERT(e->value == NULL); + DCHECK(e->value == NULL); e->value = TagLineInfo(line_info); } +void GDBJITInterface::EventHandler(const v8::JitCodeEvent* event) { + if (!FLAG_gdbjit) return; + switch (event->type) { + case v8::JitCodeEvent::CODE_ADDED: { + Code* code = Code::GetCodeFromTargetAddress( + reinterpret_cast<Address>(event->code_start)); + if (code->kind() == Code::OPTIMIZED_FUNCTION || + code->kind() == Code::FUNCTION) { + break; + } + EmbeddedVector<char, 256> buffer; + StringBuilder builder(buffer.start(), buffer.length()); + builder.AddSubstring(event->name.str, static_cast<int>(event->name.len)); + AddCode(builder.Finalize(), code, NON_FUNCTION, NULL, NULL); + break; + } + case v8::JitCodeEvent::CODE_MOVED: + break; + case v8::JitCodeEvent::CODE_REMOVED: { + Code* code = Code::GetCodeFromTargetAddress( + reinterpret_cast<Address>(event->code_start)); + RemoveCode(code); + break; + } + case v8::JitCodeEvent::CODE_ADD_LINE_POS_INFO: { + LineInfo* line_info = reinterpret_cast<LineInfo*>(event->user_data); + line_info->SetPosition(static_cast<intptr_t>(event->line_info.offset), + static_cast<int>(event->line_info.pos), + event->line_info.position_type == + v8::JitCodeEvent::STATEMENT_POSITION); + break; + } + case v8::JitCodeEvent::CODE_START_LINE_INFO_RECORDING: { + v8::JitCodeEvent* mutable_event = const_cast<v8::JitCodeEvent*>(event); + mutable_event->user_data = new LineInfo(); + break; + } + case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: { + LineInfo* line_info = reinterpret_cast<LineInfo*>(event->user_data); + Code* code = Code::GetCodeFromTargetAddress( + reinterpret_cast<Address>(event->code_start)); + RegisterDetailedLineInfo(code, line_info); + break; + } + } +} + + } } // namespace v8::internal #endif diff -Nru nodejs-0.11.13/deps/v8/src/gdb-jit.h nodejs-0.11.15/deps/v8/src/gdb-jit.h --- nodejs-0.11.13/deps/v8/src/gdb-jit.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/gdb-jit.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_GDB_JIT_H_ #define V8_GDB_JIT_H_ -#include "allocation.h" +#include "src/allocation.h" // // Basic implementation of GDB JIT Interface client. @@ -37,97 +14,34 @@ // #ifdef ENABLE_GDB_JIT_INTERFACE -#include "v8.h" -#include "factory.h" +#include "src/v8.h" + +#include "src/factory.h" namespace v8 { namespace internal { class CompilationInfo; -#define CODE_TAGS_LIST(V) \ - V(LOAD_IC) \ - V(KEYED_LOAD_IC) \ - V(STORE_IC) \ - V(KEYED_STORE_IC) \ - V(STUB) \ - V(BUILTIN) \ - V(SCRIPT) \ - V(EVAL) \ - V(FUNCTION) - -class GDBJITLineInfo : public Malloced { - public: - GDBJITLineInfo() - : pc_info_(10) { } - - void SetPosition(intptr_t pc, int pos, bool is_statement) { - AddPCInfo(PCInfo(pc, pos, is_statement)); - } - - struct PCInfo { - PCInfo(intptr_t pc, int pos, bool is_statement) - : pc_(pc), pos_(pos), is_statement_(is_statement) { } - - intptr_t pc_; - int pos_; - bool is_statement_; - }; - - List<PCInfo>* pc_info() { - return &pc_info_; - } - - private: - void AddPCInfo(const PCInfo& pc_info) { - pc_info_.Add(pc_info); - } - - List<PCInfo> pc_info_; -}; - - class GDBJITInterface: public AllStatic { public: - enum CodeTag { -#define V(x) x, - CODE_TAGS_LIST(V) -#undef V - TAG_COUNT - }; - - static const char* Tag2String(CodeTag tag) { - switch (tag) { -#define V(x) case x: return #x; - CODE_TAGS_LIST(V) -#undef V - default: - return NULL; - } - } - - static void AddCode(const char* name, - Code* code, - CodeTag tag, - Script* script, - CompilationInfo* info); + enum CodeTag { NON_FUNCTION, FUNCTION }; + + // Main entry point into GDB JIT realized as a JitCodeEventHandler. + static void EventHandler(const v8::JitCodeEvent* event); static void AddCode(Handle<Name> name, Handle<Script> script, Handle<Code> code, CompilationInfo* info); - static void AddCode(CodeTag tag, Name* name, Code* code); - - static void AddCode(CodeTag tag, const char* name, Code* code); + static void RemoveCodeRange(Address start, Address end); - static void AddCode(CodeTag tag, Code* code); + private: + static void AddCode(const char* name, Code* code, CodeTag tag, Script* script, + CompilationInfo* info); static void RemoveCode(Code* code); - - static void RemoveCodeRange(Address start, Address end); - - static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info); }; #define GDBJIT(action) GDBJITInterface::action diff -Nru nodejs-0.11.13/deps/v8/src/generator.js nodejs-0.11.15/deps/v8/src/generator.js --- nodejs-0.11.13/deps/v8/src/generator.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/generator.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. "use strict"; @@ -55,6 +32,10 @@ return %_GeneratorThrow(this, exn); } +function GeneratorObjectIterator() { + return this; +} + function GeneratorFunctionPrototypeConstructor(x) { if (%_IsConstructCall()) { throw MakeTypeError('not_constructor', ['GeneratorFunctionPrototype']); @@ -63,10 +44,12 @@ function GeneratorFunctionConstructor(arg1) { // length == 1 var source = NewFunctionString(arguments, 'function*'); - var global_receiver = %GlobalReceiver(global); + var global_proxy = %GlobalProxy(global); // Compile the string in the constructor and not a helper so that errors // appear to come from here. - var f = %_CallFunction(global_receiver, %CompileString(source, true)); + var f = %CompileString(source, true); + if (!IS_FUNCTION(f)) return f; + f = %_CallFunction(global_proxy, f); %FunctionMarkNameShouldPrintAsAnonymous(f); return f; } @@ -79,13 +62,16 @@ DONT_ENUM | DONT_DELETE | READ_ONLY, ["next", GeneratorObjectNext, "throw", GeneratorObjectThrow]); - %SetProperty(GeneratorObjectPrototype, "constructor", - GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY); - %SetPrototype(GeneratorFunctionPrototype, $Function.prototype); + %FunctionSetName(GeneratorObjectIterator, '[Symbol.iterator]'); + %AddNamedProperty(GeneratorObjectPrototype, symbolIterator, + GeneratorObjectIterator, DONT_ENUM | DONT_DELETE | READ_ONLY); + %AddNamedProperty(GeneratorObjectPrototype, "constructor", + GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY); + %InternalSetPrototype(GeneratorFunctionPrototype, $Function.prototype); %SetCode(GeneratorFunctionPrototype, GeneratorFunctionPrototypeConstructor); - %SetProperty(GeneratorFunctionPrototype, "constructor", - GeneratorFunction, DONT_ENUM | DONT_DELETE | READ_ONLY); - %SetPrototype(GeneratorFunction, $Function); + %AddNamedProperty(GeneratorFunctionPrototype, "constructor", + GeneratorFunction, DONT_ENUM | DONT_DELETE | READ_ONLY); + %InternalSetPrototype(GeneratorFunction, $Function); %SetCode(GeneratorFunction, GeneratorFunctionConstructor); } diff -Nru nodejs-0.11.13/deps/v8/src/global-handles.cc nodejs-0.11.15/deps/v8/src/global-handles.cc --- nodejs-0.11.13/deps/v8/src/global-handles.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/global-handles.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "api.h" -#include "global-handles.h" +#include "src/api.h" +#include "src/global-handles.h" -#include "vm-state-inl.h" +#include "src/vm-state-inl.h" namespace v8 { namespace internal { @@ -61,13 +38,13 @@ // Maps handle location (slot) to the containing node. static Node* FromLocation(Object** location) { - ASSERT(OFFSET_OF(Node, object_) == 0); + DCHECK(OFFSET_OF(Node, object_) == 0); return reinterpret_cast<Node*>(location); } Node() { - ASSERT(OFFSET_OF(Node, class_id_) == Internals::kNodeClassIdOffset); - ASSERT(OFFSET_OF(Node, flags_) == Internals::kNodeFlagsOffset); + DCHECK(OFFSET_OF(Node, class_id_) == Internals::kNodeClassIdOffset); + DCHECK(OFFSET_OF(Node, flags_) == Internals::kNodeFlagsOffset); STATIC_ASSERT(static_cast<int>(NodeState::kMask) == Internals::kNodeStateMask); STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue); @@ -96,7 +73,7 @@ void Initialize(int index, Node** first_free) { index_ = static_cast<uint8_t>(index); - ASSERT(static_cast<int>(index_) == index); + DCHECK(static_cast<int>(index_) == index); set_state(FREE); set_in_new_space_list(false); parameter_or_next_free_.next_free = *first_free; @@ -104,7 +81,7 @@ } void Acquire(Object* object) { - ASSERT(state() == FREE); + DCHECK(state() == FREE); object_ = object; class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId; set_independent(false); @@ -116,7 +93,7 @@ } void Release() { - ASSERT(state() != FREE); + DCHECK(state() != FREE); set_state(FREE); // Zap the values for eager trapping. object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue); @@ -185,18 +162,18 @@ } void MarkPending() { - ASSERT(state() == WEAK); + DCHECK(state() == WEAK); set_state(PENDING); } // Independent flag accessors. void MarkIndependent() { - ASSERT(state() != FREE); + DCHECK(state() != FREE); set_independent(true); } void MarkPartiallyDependent() { - ASSERT(state() != FREE); + DCHECK(state() != FREE); if (GetGlobalHandles()->isolate()->heap()->InNewSpace(object_)) { set_partially_dependent(true); } @@ -209,34 +186,35 @@ // Callback parameter accessors. void set_parameter(void* parameter) { - ASSERT(state() != FREE); + DCHECK(state() != FREE); parameter_or_next_free_.parameter = parameter; } void* parameter() const { - ASSERT(state() != FREE); + DCHECK(state() != FREE); return parameter_or_next_free_.parameter; } // Accessors for next free node in the free list. Node* next_free() { - ASSERT(state() == FREE); + DCHECK(state() == FREE); return parameter_or_next_free_.next_free; } void set_next_free(Node* value) { - ASSERT(state() == FREE); + DCHECK(state() == FREE); parameter_or_next_free_.next_free = value; } void MakeWeak(void* parameter, WeakCallback weak_callback) { - ASSERT(weak_callback != NULL); - ASSERT(state() != FREE); + DCHECK(weak_callback != NULL); + DCHECK(state() != FREE); + CHECK(object_ != NULL); set_state(WEAK); set_parameter(parameter); weak_callback_ = weak_callback; } void* ClearWeakness() { - ASSERT(state() != FREE); + DCHECK(state() != FREE); void* p = parameter(); set_state(NORMAL); set_parameter(NULL); @@ -257,9 +235,9 @@ { // Check that we are not passing a finalized external string to // the callback. - ASSERT(!object_->IsExternalAsciiString() || + DCHECK(!object_->IsExternalAsciiString() || ExternalAsciiString::cast(object_)->resource() != NULL); - ASSERT(!object_->IsExternalTwoByteString() || + DCHECK(!object_->IsExternalTwoByteString() || ExternalTwoByteString::cast(object_)->resource() != NULL); // Leaving V8. VMState<EXTERNAL> state(isolate); @@ -338,12 +316,12 @@ } Node* node_at(int index) { - ASSERT(0 <= index && index < kSize); + DCHECK(0 <= index && index < kSize); return &nodes_[index]; } void IncreaseUses() { - ASSERT(used_nodes_ < kSize); + DCHECK(used_nodes_ < kSize); if (used_nodes_++ == 0) { NodeBlock* old_first = global_handles_->first_used_block_; global_handles_->first_used_block_ = this; @@ -355,7 +333,7 @@ } void DecreaseUses() { - ASSERT(used_nodes_ > 0); + DCHECK(used_nodes_ > 0); if (--used_nodes_ == 0) { if (next_used_ != NULL) next_used_->prev_used_ = prev_used_; if (prev_used_ != NULL) prev_used_->next_used_ = next_used_; @@ -393,7 +371,7 @@ intptr_t ptr = reinterpret_cast<intptr_t>(this); ptr = ptr - index_ * sizeof(Node); NodeBlock* block = reinterpret_cast<NodeBlock*>(ptr); - ASSERT(block->node_at(index_) == this); + DCHECK(block->node_at(index_) == this); return block; } @@ -427,12 +405,12 @@ bool done() const { return block_ == NULL; } Node* node() const { - ASSERT(!done()); + DCHECK(!done()); return block_->node_at(index_); } void Advance() { - ASSERT(!done()); + DCHECK(!done()); if (++index_ < NodeBlock::kSize) return; index_ = 0; block_ = block_->next_used(); @@ -472,7 +450,7 @@ first_block_ = new NodeBlock(this, first_block_); first_block_->PutNodesOnFreeList(&first_free_); } - ASSERT(first_free_ != NULL); + DCHECK(first_free_ != NULL); // Take the first node in the free list. Node* result = first_free_; first_free_ = result->next_free(); @@ -487,7 +465,7 @@ Handle<Object> GlobalHandles::CopyGlobal(Object** location) { - ASSERT(location != NULL); + DCHECK(location != NULL); return Node::FromLocation(location)->GetGlobalHandles()->Create(*location); } @@ -566,7 +544,7 @@ WeakSlotCallbackWithHeap f) { for (int i = 0; i < new_space_nodes_.length(); ++i) { Node* node = new_space_nodes_[i]; - ASSERT(node->is_in_new_space_list()); + DCHECK(node->is_in_new_space_list()); if ((node->is_independent() || node->is_partially_dependent()) && node->IsWeak() && f(isolate_->heap(), node->location())) { node->MarkPending(); @@ -578,7 +556,7 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) { for (int i = 0; i < new_space_nodes_.length(); ++i) { Node* node = new_space_nodes_[i]; - ASSERT(node->is_in_new_space_list()); + DCHECK(node->is_in_new_space_list()); if ((node->is_independent() || node->is_partially_dependent()) && node->IsWeakRetainer()) { v->VisitPointer(node->location()); @@ -594,7 +572,7 @@ bool any_group_was_visited = false; for (int i = 0; i < object_groups_.length(); i++) { ObjectGroup* entry = object_groups_.at(i); - ASSERT(entry != NULL); + DCHECK(entry != NULL); Object*** objects = entry->objects; bool group_should_be_visited = false; @@ -633,21 +611,21 @@ } -bool GlobalHandles::PostGarbageCollectionProcessing( - GarbageCollector collector, GCTracer* tracer) { +int GlobalHandles::PostGarbageCollectionProcessing( + GarbageCollector collector) { // Process weak global handle callbacks. This must be done after the // GC is completely done, because the callbacks may invoke arbitrary // API functions. - ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC); + DCHECK(isolate_->heap()->gc_state() == Heap::NOT_IN_GC); const int initial_post_gc_processing_count = ++post_gc_processing_count_; - bool next_gc_likely_to_collect_more = false; + int freed_nodes = 0; if (collector == SCAVENGER) { for (int i = 0; i < new_space_nodes_.length(); ++i) { Node* node = new_space_nodes_[i]; - ASSERT(node->is_in_new_space_list()); + DCHECK(node->is_in_new_space_list()); if (!node->IsRetainer()) { // Free nodes do not have weak callbacks. Do not use them to compute - // the next_gc_likely_to_collect_more. + // the freed_nodes. continue; } // Skip dependent handles. Their weak callbacks might expect to be @@ -663,29 +641,29 @@ // PostGarbageCollection processing. The current node might // have been deleted in that round, so we need to bail out (or // restart the processing). - return next_gc_likely_to_collect_more; + return freed_nodes; } } if (!node->IsRetainer()) { - next_gc_likely_to_collect_more = true; + freed_nodes++; } } } else { for (NodeIterator it(this); !it.done(); it.Advance()) { if (!it.node()->IsRetainer()) { // Free nodes do not have weak callbacks. Do not use them to compute - // the next_gc_likely_to_collect_more. + // the freed_nodes. continue; } it.node()->clear_partially_dependent(); if (it.node()->PostGarbageCollectionProcessing(isolate_)) { if (initial_post_gc_processing_count != post_gc_processing_count_) { // See the comment above. - return next_gc_likely_to_collect_more; + return freed_nodes; } } if (!it.node()->IsRetainer()) { - next_gc_likely_to_collect_more = true; + freed_nodes++; } } } @@ -693,22 +671,22 @@ int last = 0; for (int i = 0; i < new_space_nodes_.length(); ++i) { Node* node = new_space_nodes_[i]; - ASSERT(node->is_in_new_space_list()); + DCHECK(node->is_in_new_space_list()); if (node->IsRetainer()) { if (isolate_->heap()->InNewSpace(node->object())) { new_space_nodes_[last++] = node; - tracer->increment_nodes_copied_in_new_space(); + isolate_->heap()->IncrementNodesCopiedInNewSpace(); } else { node->set_in_new_space_list(false); - tracer->increment_nodes_promoted(); + isolate_->heap()->IncrementNodesPromoted(); } } else { node->set_in_new_space_list(false); - tracer->increment_nodes_died_in_new_space(); + isolate_->heap()->IncrementNodesDiedInNewSpace(); } } new_space_nodes_.Rewind(last); - return next_gc_likely_to_collect_more; + return freed_nodes; } @@ -840,7 +818,7 @@ v8::RetainedObjectInfo* info) { #ifdef DEBUG for (size_t i = 0; i < length; ++i) { - ASSERT(!Node::FromLocation(handles[i])->is_independent()); + DCHECK(!Node::FromLocation(handles[i])->is_independent()); } #endif if (length == 0) { @@ -871,9 +849,9 @@ Object*** children, size_t length) { #ifdef DEBUG - ASSERT(!Node::FromLocation(BitCast<Object**>(parent))->is_independent()); + DCHECK(!Node::FromLocation(BitCast<Object**>(parent))->is_independent()); for (size_t i = 0; i < length; ++i) { - ASSERT(!Node::FromLocation(children[i])->is_independent()); + DCHECK(!Node::FromLocation(children[i])->is_independent()); } #endif if (length == 0) return; @@ -885,13 +863,13 @@ void GlobalHandles::SetReferenceFromGroup(UniqueId id, Object** child) { - ASSERT(!Node::FromLocation(child)->is_independent()); + DCHECK(!Node::FromLocation(child)->is_independent()); implicit_ref_connections_.Add(ObjectGroupConnection(id, child)); } void GlobalHandles::SetReference(HeapObject** parent, Object** child) { - ASSERT(!Node::FromLocation(child)->is_independent()); + DCHECK(!Node::FromLocation(child)->is_independent()); ImplicitRefGroup* group = new ImplicitRefGroup(parent, 1); group->children[0] = child; implicit_ref_groups_.Add(group); @@ -1043,7 +1021,7 @@ void EternalHandles::IterateAllRoots(ObjectVisitor* visitor) { int limit = size_; for (int i = 0; i < blocks_.length(); i++) { - ASSERT(limit > 0); + DCHECK(limit > 0); Object** block = blocks_[i]; visitor->VisitPointers(block, block + Min(limit, kSize)); limit -= kSize; @@ -1071,9 +1049,9 @@ void EternalHandles::Create(Isolate* isolate, Object* object, int* index) { - ASSERT_EQ(kInvalidIndex, *index); + DCHECK_EQ(kInvalidIndex, *index); if (object == NULL) return; - ASSERT_NE(isolate->heap()->the_hole_value(), object); + DCHECK_NE(isolate->heap()->the_hole_value(), object); int block = size_ >> kShift; int offset = size_ & kMask; // need to resize @@ -1083,7 +1061,7 @@ MemsetPointer(next_block, the_hole, kSize); blocks_.Add(next_block); } - ASSERT_EQ(isolate->heap()->the_hole_value(), blocks_[block][offset]); + DCHECK_EQ(isolate->heap()->the_hole_value(), blocks_[block][offset]); blocks_[block][offset] = object; if (isolate->heap()->InNewSpace(object)) { new_space_indices_.Add(size_); diff -Nru nodejs-0.11.13/deps/v8/src/global-handles.h nodejs-0.11.15/deps/v8/src/global-handles.h --- nodejs-0.11.13/deps/v8/src/global-handles.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/global-handles.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,20 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_GLOBAL_HANDLES_H_ #define V8_GLOBAL_HANDLES_H_ -#include "../include/v8.h" -#include "../include/v8-profiler.h" +#include "include/v8.h" +#include "include/v8-profiler.h" -#include "handles.h" -#include "list.h" -#include "v8utils.h" +#include "src/handles.h" +#include "src/list.h" +#include "src/utils.h" namespace v8 { namespace internal { -class GCTracer; class HeapStats; class ObjectVisitor; @@ -61,7 +37,7 @@ struct ObjectGroup { explicit ObjectGroup(size_t length) : info(NULL), length(length) { - ASSERT(length > 0); + DCHECK(length > 0); objects = new Object**[length]; } ~ObjectGroup(); @@ -75,7 +51,7 @@ struct ImplicitRefGroup { ImplicitRefGroup(HeapObject** parent, size_t length) : parent(parent), length(length) { - ASSERT(length > 0); + DCHECK(length > 0); children = new Object**[length]; } ~ImplicitRefGroup(); @@ -178,9 +154,8 @@ static bool IsWeak(Object** location); // Process pending weak handles. - // Returns true if next major GC is likely to collect more garbage. - bool PostGarbageCollectionProcessing(GarbageCollector collector, - GCTracer* tracer); + // Returns the number of freed nodes. + int PostGarbageCollectionProcessing(GarbageCollector collector); // Iterates over all strong handles. void IterateStrongRoots(ObjectVisitor* v); @@ -360,7 +335,7 @@ // Grab the handle for an existing SingletonHandle. inline Handle<Object> GetSingleton(SingletonHandle singleton) { - ASSERT(Exists(singleton)); + DCHECK(Exists(singleton)); return Get(singleton_handles_[singleton]); } @@ -392,7 +367,7 @@ // Gets the slot for an index inline Object** GetLocation(int index) { - ASSERT(index >= 0 && index < size_); + DCHECK(index >= 0 && index < size_); return &blocks_[index >> kShift][index & kMask]; } diff -Nru nodejs-0.11.13/deps/v8/src/globals.h nodejs-0.11.15/deps/v8/src/globals.h --- nodejs-0.11.13/deps/v8/src/globals.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/globals.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_GLOBALS_H_ #define V8_GLOBALS_H_ -#include "../include/v8stdint.h" +#include "include/v8stdint.h" + +#include "src/base/build_config.h" +#include "src/base/logging.h" +#include "src/base/macros.h" // Unfortunately, the INFINITY macro cannot be used with the '-pedantic' // warning flag and certain versions of GCC due to a bug: @@ -44,93 +25,27 @@ # define V8_INFINITY INFINITY #endif -namespace v8 { -namespace internal { - -// Processor architecture detection. For more info on what's defined, see: -// http://msdn.microsoft.com/en-us/library/b0084kay.aspx -// http://www.agner.org/optimize/calling_conventions.pdf -// or with gcc, run: "echo | gcc -E -dM -" -#if defined(_M_X64) || defined(__x86_64__) -#if defined(__native_client__) -// For Native Client builds of V8, use V8_TARGET_ARCH_ARM, so that V8 -// generates ARM machine code, together with a portable ARM simulator -// compiled for the host architecture in question. -// -// Since Native Client is ILP-32 on all architectures we use -// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86. -#define V8_HOST_ARCH_IA32 1 -#define V8_HOST_ARCH_32_BIT 1 -#define V8_HOST_CAN_READ_UNALIGNED 1 -#else -#define V8_HOST_ARCH_X64 1 -#define V8_HOST_ARCH_64_BIT 1 -#define V8_HOST_CAN_READ_UNALIGNED 1 -#endif // __native_client__ -#elif defined(_M_IX86) || defined(__i386__) -#define V8_HOST_ARCH_IA32 1 -#define V8_HOST_ARCH_32_BIT 1 -#define V8_HOST_CAN_READ_UNALIGNED 1 -#elif defined(__AARCH64EL__) -#define V8_HOST_ARCH_ARM64 1 -#define V8_HOST_ARCH_64_BIT 1 -#define V8_HOST_CAN_READ_UNALIGNED 1 -#elif defined(__ARMEL__) -#define V8_HOST_ARCH_ARM 1 -#define V8_HOST_ARCH_32_BIT 1 -#elif defined(__MIPSEL__) -#define V8_HOST_ARCH_MIPS 1 -#define V8_HOST_ARCH_32_BIT 1 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM || \ + V8_TARGET_ARCH_ARM64 +#define V8_TURBOFAN_BACKEND 1 #else -#error "Host architecture was not detected as supported by v8" +#define V8_TURBOFAN_BACKEND 0 #endif - -#if defined(__ARM_ARCH_7A__) || \ - defined(__ARM_ARCH_7R__) || \ - defined(__ARM_ARCH_7__) -# define CAN_USE_ARMV7_INSTRUCTIONS 1 -# ifndef CAN_USE_VFP3_INSTRUCTIONS -# define CAN_USE_VFP3_INSTRUCTIONS -# endif +#if V8_TURBOFAN_BACKEND && !(V8_OS_WIN && V8_TARGET_ARCH_X64) +#define V8_TURBOFAN_TARGET 1 +#else +#define V8_TURBOFAN_TARGET 0 #endif +namespace v8 { -// Target architecture detection. This may be set externally. If not, detect -// in the same way as the host architecture, that is, target the native -// environment as presented by the compiler. -#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \ - !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS -#if defined(_M_X64) || defined(__x86_64__) -#define V8_TARGET_ARCH_X64 1 -#elif defined(_M_IX86) || defined(__i386__) -#define V8_TARGET_ARCH_IA32 1 -#elif defined(__AARCH64EL__) -#define V8_TARGET_ARCH_ARM64 1 -#elif defined(__ARMEL__) -#define V8_TARGET_ARCH_ARM 1 -#elif defined(__MIPSEL__) -#define V8_TARGET_ARCH_MIPS 1 -#else -#error Target architecture was not detected as supported by v8 -#endif -#endif +namespace base { +class Mutex; +class RecursiveMutex; +class VirtualMemory; +} -// Check for supported combinations of host and target architectures. -#if V8_TARGET_ARCH_IA32 && !V8_HOST_ARCH_IA32 -#error Target architecture ia32 is only supported on ia32 host -#endif -#if V8_TARGET_ARCH_X64 && !V8_HOST_ARCH_X64 -#error Target architecture x64 is only supported on x64 host -#endif -#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM)) -#error Target architecture arm is only supported on arm and ia32 host -#endif -#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64)) -#error Target architecture arm64 is only supported on arm64 and x64 host -#endif -#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS)) -#error Target architecture mips is only supported on mips and ia32 host -#endif +namespace internal { // Determine whether we are running in a simulated environment. // Setting USE_SIMULATOR explicitly from the build script will force @@ -145,21 +60,9 @@ #if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS) #define USE_SIMULATOR 1 #endif +#if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64) +#define USE_SIMULATOR 1 #endif - -// Determine architecture endiannes (we only support little-endian). -#if V8_TARGET_ARCH_IA32 -#define V8_TARGET_LITTLE_ENDIAN 1 -#elif V8_TARGET_ARCH_X64 -#define V8_TARGET_LITTLE_ENDIAN 1 -#elif V8_TARGET_ARCH_ARM -#define V8_TARGET_LITTLE_ENDIAN 1 -#elif V8_TARGET_ARCH_ARM64 -#define V8_TARGET_LITTLE_ENDIAN 1 -#elif V8_TARGET_ARCH_MIPS -#define V8_TARGET_LITTLE_ENDIAN 1 -#else -#error Unknown target architecture endiannes #endif // Determine whether the architecture uses an out-of-line constant pool. @@ -185,60 +88,6 @@ typedef uint8_t byte; typedef byte* Address; -// Define our own macros for writing 64-bit constants. This is less fragile -// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it -// works on compilers that don't have it (like MSVC). -#if V8_CC_MSVC -# define V8_UINT64_C(x) (x ## UI64) -# define V8_INT64_C(x) (x ## I64) -# if V8_HOST_ARCH_64_BIT -# define V8_INTPTR_C(x) (x ## I64) -# define V8_PTR_PREFIX "ll" -# else -# define V8_INTPTR_C(x) (x) -# define V8_PTR_PREFIX "" -# endif // V8_HOST_ARCH_64_BIT -#elif V8_CC_MINGW64 -# define V8_UINT64_C(x) (x ## ULL) -# define V8_INT64_C(x) (x ## LL) -# define V8_INTPTR_C(x) (x ## LL) -# define V8_PTR_PREFIX "I64" -#elif V8_HOST_ARCH_64_BIT -# if V8_OS_MACOSX -# define V8_UINT64_C(x) (x ## ULL) -# define V8_INT64_C(x) (x ## LL) -# else -# define V8_UINT64_C(x) (x ## UL) -# define V8_INT64_C(x) (x ## L) -# endif -# define V8_INTPTR_C(x) (x ## L) -# define V8_PTR_PREFIX "l" -#else -# define V8_UINT64_C(x) (x ## ULL) -# define V8_INT64_C(x) (x ## LL) -# define V8_INTPTR_C(x) (x) -# define V8_PTR_PREFIX "" -#endif - -// The following macro works on both 32 and 64-bit platforms. -// Usage: instead of writing 0x1234567890123456 -// write V8_2PART_UINT64_C(0x12345678,90123456); -#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u)) - -#define V8PRIxPTR V8_PTR_PREFIX "x" -#define V8PRIdPTR V8_PTR_PREFIX "d" -#define V8PRIuPTR V8_PTR_PREFIX "u" - -// Fix for Mac OS X defining uintptr_t as "unsigned long": -#if V8_OS_MACOSX -#undef V8PRIxPTR -#define V8PRIxPTR "lx" -#endif - -#if V8_OS_MACOSX || defined(__FreeBSD__) || defined(__OpenBSD__) -#define USING_BSD_ABI -#endif - // ----------------------------------------------------------------------------- // Constants @@ -266,7 +115,11 @@ const int kDoubleSize = sizeof(double); // NOLINT const int kIntptrSize = sizeof(intptr_t); // NOLINT const int kPointerSize = sizeof(void*); // NOLINT +#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT +const int kRegisterSize = kPointerSize + kPointerSize; +#else const int kRegisterSize = kPointerSize; +#endif const int kPCOnStackSize = kRegisterSize; const int kFPOnStackSize = kRegisterSize; @@ -276,13 +129,23 @@ const int kPointerSizeLog2 = 3; const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000); const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF); -const bool kIs64BitArch = true; +const bool kRequiresCodeRange = true; +const size_t kMaximalCodeRangeSize = 512 * MB; #else const int kPointerSizeLog2 = 2; const intptr_t kIntptrSignBit = 0x80000000; const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu; -const bool kIs64BitArch = false; +#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT +// x32 port also requires code range. +const bool kRequiresCodeRange = true; +const size_t kMaximalCodeRangeSize = 256 * MB; +#else +const bool kRequiresCodeRange = false; +const size_t kMaximalCodeRangeSize = 0 * MB; #endif +#endif + +STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2)); const int kBitsPerByte = 8; const int kBitsPerByteLog2 = 3; @@ -316,31 +179,6 @@ #define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1)) -// The expression OFFSET_OF(type, field) computes the byte-offset -// of the specified field relative to the containing type. This -// corresponds to 'offsetof' (in stddef.h), except that it doesn't -// use 0 or NULL, which causes a problem with the compiler warnings -// we have enabled (which is also why 'offsetof' doesn't seem to work). -// Here we simply use the non-zero value 4, which seems to work. -#define OFFSET_OF(type, field) \ - (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4) - - -// The expression ARRAY_SIZE(a) is a compile-time constant of type -// size_t which represents the number of elements of the given -// array. You should only use ARRAY_SIZE on statically allocated -// arrays. -#define ARRAY_SIZE(a) \ - ((sizeof(a) / sizeof(*(a))) / \ - static_cast<size_t>(!(sizeof(a) % sizeof(*(a))))) - - -// The USE(x) template is used to silence C++ compiler warnings -// issued for (yet) unused variables (typically parameters). -template <typename T> -inline void USE(T) { } - - // FUNCTION_ADDR(f) gets the address of a C function f. #define FUNCTION_ADDR(f) \ (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f))) @@ -354,67 +192,568 @@ } -// A macro to disallow the evil copy constructor and operator= functions -// This should be used in the private: declarations for a class -#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&) V8_DELETE; \ - void operator=(const TypeName&) V8_DELETE +// ----------------------------------------------------------------------------- +// Forward declarations for frequently used classes +// (sorted alphabetically) +class FreeStoreAllocationPolicy; +template <typename T, class P = FreeStoreAllocationPolicy> class List; -// A macro to disallow all the implicit constructors, namely the -// default constructor, copy constructor and operator= functions. -// -// This should be used in the private: declarations for a class -// that wants to prevent anyone from instantiating it. This is -// especially useful for classes containing only static methods. -#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ - TypeName() V8_DELETE; \ - DISALLOW_COPY_AND_ASSIGN(TypeName) +// ----------------------------------------------------------------------------- +// Declarations for use in both the preparser and the rest of V8. +// The Strict Mode (ECMA-262 5th edition, 4.2.2). -// Newly written code should use V8_INLINE and V8_NOINLINE directly. -#define INLINE(declarator) V8_INLINE declarator -#define NO_INLINE(declarator) V8_NOINLINE declarator +enum StrictMode { SLOPPY, STRICT }; -// Newly written code should use V8_WARN_UNUSED_RESULT. -#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT +// Mask for the sign bit in a smi. +const intptr_t kSmiSignMask = kIntptrSignBit; +const int kObjectAlignmentBits = kPointerSizeLog2; +const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits; +const intptr_t kObjectAlignmentMask = kObjectAlignment - 1; + +// Desired alignment for pointers. +const intptr_t kPointerAlignment = (1 << kPointerSizeLog2); +const intptr_t kPointerAlignmentMask = kPointerAlignment - 1; + +// Desired alignment for double values. +const intptr_t kDoubleAlignment = 8; +const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1; + +// Desired alignment for generated code is 32 bytes (to improve cache line +// utilization). +const int kCodeAlignmentBits = 5; +const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits; +const intptr_t kCodeAlignmentMask = kCodeAlignment - 1; + +// The owner field of a page is tagged with the page header tag. We need that +// to find out if a slot is part of a large object. If we mask out the lower +// 0xfffff bits (1M pages), go to the owner offset, and see that this field +// is tagged with the page header tag, we can just look up the owner. +// Otherwise, we know that we are somewhere (not within the first 1M) in a +// large object. +const int kPageHeaderTag = 3; +const int kPageHeaderTagSize = 2; +const intptr_t kPageHeaderTagMask = (1 << kPageHeaderTagSize) - 1; + + +// Zap-value: The value used for zapping dead objects. +// Should be a recognizable hex value tagged as a failure. +#ifdef V8_HOST_ARCH_64_BIT +const Address kZapValue = + reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef)); +const Address kHandleZapValue = + reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf)); +const Address kGlobalHandleZapValue = + reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf)); +const Address kFromSpaceZapValue = + reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf)); +const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb); +const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef); +const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf; +#else +const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef); +const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf); +const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf); +const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf); +const uint32_t kSlotsZapValue = 0xbeefdeef; +const uint32_t kDebugZapValue = 0xbadbaddb; +const uint32_t kFreeListZapValue = 0xfeed1eaf; +#endif + +const int kCodeZapValue = 0xbadc0de; + +// On Intel architecture, cache line size is 64 bytes. +// On ARM it may be less (32 bytes), but as far this constant is +// used for aligning data, it doesn't hurt to align on a greater value. +#define PROCESSOR_CACHE_LINE_SIZE 64 + +// Constants relevant to double precision floating point numbers. +// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30. +const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32); -// Define DISABLE_ASAN macros. -#if defined(__has_feature) -#if __has_feature(address_sanitizer) -#define DISABLE_ASAN __attribute__((no_sanitize_address)) -#endif -#endif +// ----------------------------------------------------------------------------- +// Forward declarations for frequently used classes -#ifndef DISABLE_ASAN -#define DISABLE_ASAN -#endif +class AccessorInfo; +class Allocation; +class Arguments; +class Assembler; +class Code; +class CodeGenerator; +class CodeStub; +class Context; +class Debug; +class Debugger; +class DebugInfo; +class Descriptor; +class DescriptorArray; +class TransitionArray; +class ExternalReference; +class FixedArray; +class FunctionTemplateInfo; +class MemoryChunk; +class SeededNumberDictionary; +class UnseededNumberDictionary; +class NameDictionary; +template <typename T> class MaybeHandle; +template <typename T> class Handle; +class Heap; +class HeapObject; +class IC; +class InterceptorInfo; +class Isolate; +class JSReceiver; +class JSArray; +class JSFunction; +class JSObject; +class LargeObjectSpace; +class LookupResult; +class MacroAssembler; +class Map; +class MapSpace; +class MarkCompactCollector; +class NewSpace; +class Object; +class OldSpace; +class Foreign; +class Scope; +class ScopeInfo; +class Script; +class Smi; +template <typename Config, class Allocator = FreeStoreAllocationPolicy> + class SplayTree; +class String; +class Name; +class Struct; +class Variable; +class RelocInfo; +class Deserializer; +class MessageLocation; -#if V8_CC_GNU -#define V8_IMMEDIATE_CRASH() __builtin_trap() -#else -#define V8_IMMEDIATE_CRASH() ((void(*)())0)() -#endif +typedef bool (*WeakSlotCallback)(Object** pointer); +typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer); // ----------------------------------------------------------------------------- -// Forward declarations for frequently used classes -// (sorted alphabetically) +// Miscellaneous + +// NOTE: SpaceIterator depends on AllocationSpace enumeration values being +// consecutive. +enum AllocationSpace { + NEW_SPACE, // Semispaces collected with copying collector. + OLD_POINTER_SPACE, // May contain pointers to new space. + OLD_DATA_SPACE, // Must not have pointers to new space. + CODE_SPACE, // No pointers to new space, marked executable. + MAP_SPACE, // Only and all map objects. + CELL_SPACE, // Only and all cell objects. + PROPERTY_CELL_SPACE, // Only and all global property cell objects. + LO_SPACE, // Promoted large objects. + INVALID_SPACE, // Only used in AllocationResult to signal success. + + FIRST_SPACE = NEW_SPACE, + LAST_SPACE = LO_SPACE, + FIRST_PAGED_SPACE = OLD_POINTER_SPACE, + LAST_PAGED_SPACE = PROPERTY_CELL_SPACE +}; +const int kSpaceTagSize = 3; +const int kSpaceTagMask = (1 << kSpaceTagSize) - 1; + + +// A flag that indicates whether objects should be pretenured when +// allocated (allocated directly into the old generation) or not +// (allocated in the young generation if the object size and type +// allows). +enum PretenureFlag { NOT_TENURED, TENURED }; + +enum MinimumCapacity { + USE_DEFAULT_MINIMUM_CAPACITY, + USE_CUSTOM_MINIMUM_CAPACITY +}; + +enum GarbageCollector { SCAVENGER, MARK_COMPACTOR }; + +enum Executability { NOT_EXECUTABLE, EXECUTABLE }; + +enum VisitMode { + VISIT_ALL, + VISIT_ALL_IN_SCAVENGE, + VISIT_ALL_IN_SWEEP_NEWSPACE, + VISIT_ONLY_STRONG +}; + +// Flag indicating whether code is built into the VM (one of the natives files). +enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE }; + + +// A CodeDesc describes a buffer holding instructions and relocation +// information. The instructions start at the beginning of the buffer +// and grow forward, the relocation information starts at the end of +// the buffer and grows backward. +// +// |<--------------- buffer_size ---------------->| +// |<-- instr_size -->| |<-- reloc_size -->| +// +==================+========+==================+ +// | instructions | free | reloc info | +// +==================+========+==================+ +// ^ +// | +// buffer + +struct CodeDesc { + byte* buffer; + int buffer_size; + int instr_size; + int reloc_size; + Assembler* origin; +}; + + +// Callback function used for iterating objects in heap spaces, +// for example, scanning heap objects. +typedef int (*HeapObjectCallback)(HeapObject* obj); + + +// Callback function used for checking constraints when copying/relocating +// objects. Returns true if an object can be copied/relocated from its +// old_addr to a new_addr. +typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr); + + +// Callback function on inline caches, used for iterating over inline caches +// in compiled code. +typedef void (*InlineCacheCallback)(Code* code, Address ic); + + +// State for inline cache call sites. Aliased as IC::State. +enum InlineCacheState { + // Has never been executed. + UNINITIALIZED, + // Has been executed but monomorhic state has been delayed. + PREMONOMORPHIC, + // Has been executed and only one receiver type has been seen. + MONOMORPHIC, + // Check failed due to prototype (or map deprecation). + PROTOTYPE_FAILURE, + // Multiple receiver types have been seen. + POLYMORPHIC, + // Many receiver types have been seen. + MEGAMORPHIC, + // A generic handler is installed and no extra typefeedback is recorded. + GENERIC, + // Special state for debug break or step in prepare stubs. + DEBUG_STUB, + // Type-vector-based ICs have a default state, with the full calculation + // of IC state only determined by a look at the IC and the typevector + // together. + DEFAULT +}; + + +enum CallFunctionFlags { + NO_CALL_FUNCTION_FLAGS, + CALL_AS_METHOD, + // Always wrap the receiver and call to the JSFunction. Only use this flag + // both the receiver type and the target method are statically known. + WRAP_AND_CALL +}; + + +enum CallConstructorFlags { + NO_CALL_CONSTRUCTOR_FLAGS, + // The call target is cached in the instruction stream. + RECORD_CONSTRUCTOR_TARGET +}; + + +enum CacheHolderFlag { + kCacheOnPrototype, + kCacheOnPrototypeReceiverIsDictionary, + kCacheOnPrototypeReceiverIsPrimitive, + kCacheOnReceiver +}; + + +// The Store Buffer (GC). +typedef enum { + kStoreBufferFullEvent, + kStoreBufferStartScanningPagesEvent, + kStoreBufferScanningPageEvent +} StoreBufferEvent; + + +typedef void (*StoreBufferCallback)(Heap* heap, + MemoryChunk* page, + StoreBufferEvent event); + + +// Union used for fast testing of specific double values. +union DoubleRepresentation { + double value; + int64_t bits; + DoubleRepresentation(double x) { value = x; } + bool operator==(const DoubleRepresentation& other) const { + return bits == other.bits; + } +}; + + +// Union used for customized checking of the IEEE double types +// inlined within v8 runtime, rather than going to the underlying +// platform headers and libraries +union IeeeDoubleLittleEndianArchType { + double d; + struct { + unsigned int man_low :32; + unsigned int man_high :20; + unsigned int exp :11; + unsigned int sign :1; + } bits; +}; + + +union IeeeDoubleBigEndianArchType { + double d; + struct { + unsigned int sign :1; + unsigned int exp :11; + unsigned int man_high :20; + unsigned int man_low :32; + } bits; +}; + + +// AccessorCallback +struct AccessorDescriptor { + Object* (*getter)(Isolate* isolate, Object* object, void* data); + Object* (*setter)( + Isolate* isolate, JSObject* object, Object* value, void* data); + void* data; +}; + + +// Logging and profiling. A StateTag represents a possible state of +// the VM. The logger maintains a stack of these. Creating a VMState +// object enters a state by pushing on the stack, and destroying a +// VMState object leaves a state by popping the current state from the +// stack. + +enum StateTag { + JS, + GC, + COMPILER, + OTHER, + EXTERNAL, + IDLE +}; -class FreeStoreAllocationPolicy; -template <typename T, class P = FreeStoreAllocationPolicy> class List; // ----------------------------------------------------------------------------- -// Declarations for use in both the preparser and the rest of V8. +// Macros -// The Strict Mode (ECMA-262 5th edition, 4.2.2). +// Testers for test. + +#define HAS_SMI_TAG(value) \ + ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag) + +#define HAS_FAILURE_TAG(value) \ + ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag) + +// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer +#define OBJECT_POINTER_ALIGN(value) \ + (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask) + +// POINTER_SIZE_ALIGN returns the value aligned as a pointer. +#define POINTER_SIZE_ALIGN(value) \ + (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask) + +// CODE_POINTER_ALIGN returns the value aligned as a generated code segment. +#define CODE_POINTER_ALIGN(value) \ + (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask) + +// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk") +// inside a C++ class and new and delete will be overloaded so logging is +// performed. +// This file (globals.h) is included before log.h, so we use direct calls to +// the Logger rather than the LOG macro. +#ifdef DEBUG +#define TRACK_MEMORY(name) \ + void* operator new(size_t size) { \ + void* result = ::operator new(size); \ + Logger::NewEventStatic(name, result, size); \ + return result; \ + } \ + void operator delete(void* object) { \ + Logger::DeleteEventStatic(name, object); \ + ::operator delete(object); \ + } +#else +#define TRACK_MEMORY(name) +#endif -enum StrictMode { SLOPPY, STRICT }; +// CPU feature flags. +enum CpuFeature { + // x86 + SSE4_1, + SSE3, + SAHF, + // ARM + VFP3, + ARMv7, + SUDIV, + MLS, + UNALIGNED_ACCESSES, + MOVW_MOVT_IMMEDIATE_LOADS, + VFP32DREGS, + NEON, + // MIPS + FPU, + // ARM64 + ALWAYS_ALIGN_CSP, + NUMBER_OF_CPU_FEATURES +}; + + +// Used to specify if a macro instruction must perform a smi check on tagged +// values. +enum SmiCheckType { + DONT_DO_SMI_CHECK, + DO_SMI_CHECK +}; + + +enum ScopeType { + EVAL_SCOPE, // The top-level scope for an eval source. + FUNCTION_SCOPE, // The top-level scope for a function. + MODULE_SCOPE, // The scope introduced by a module literal + GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval. + CATCH_SCOPE, // The scope introduced by catch. + BLOCK_SCOPE, // The scope introduced by a new block. + WITH_SCOPE // The scope introduced by with. +}; + + +const uint32_t kHoleNanUpper32 = 0x7FFFFFFF; +const uint32_t kHoleNanLower32 = 0xFFFFFFFF; +const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000; + +const uint64_t kHoleNanInt64 = + (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32; +const uint64_t kLastNonNaNInt64 = + (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32); + + +// The order of this enum has to be kept in sync with the predicates below. +enum VariableMode { + // User declared variables: + VAR, // declared via 'var', and 'function' declarations + + CONST_LEGACY, // declared via legacy 'const' declarations + + LET, // declared via 'let' declarations (first lexical) + + CONST, // declared via 'const' declarations + + MODULE, // declared via 'module' declaration (last lexical) + + // Variables introduced by the compiler: + INTERNAL, // like VAR, but not user-visible (may or may not + // be in a context) + + TEMPORARY, // temporary variables (not user-visible), stack-allocated + // unless the scope as a whole has forced context allocation + + DYNAMIC, // always require dynamic lookup (we don't know + // the declaration) + + DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the + // variable is global unless it has been shadowed + // by an eval-introduced variable + + DYNAMIC_LOCAL // requires dynamic lookup, but we know that the + // variable is local and where it is unless it + // has been shadowed by an eval-introduced + // variable +}; + + +inline bool IsDynamicVariableMode(VariableMode mode) { + return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL; +} + + +inline bool IsDeclaredVariableMode(VariableMode mode) { + return mode >= VAR && mode <= MODULE; +} + + +inline bool IsLexicalVariableMode(VariableMode mode) { + return mode >= LET && mode <= MODULE; +} + + +inline bool IsImmutableVariableMode(VariableMode mode) { + return (mode >= CONST && mode <= MODULE) || mode == CONST_LEGACY; +} + + +// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable +// and immutable bindings that can be in two states: initialized and +// uninitialized. In ES5 only immutable bindings have these two states. When +// accessing a binding, it needs to be checked for initialization. However in +// the following cases the binding is initialized immediately after creation +// so the initialization check can always be skipped: +// 1. Var declared local variables. +// var foo; +// 2. A local variable introduced by a function declaration. +// function foo() {} +// 3. Parameters +// function x(foo) {} +// 4. Catch bound variables. +// try {} catch (foo) {} +// 6. Function variables of named function expressions. +// var x = function foo() {} +// 7. Implicit binding of 'this'. +// 8. Implicit binding of 'arguments' in functions. +// +// ES5 specified object environment records which are introduced by ES elements +// such as Program and WithStatement that associate identifier bindings with the +// properties of some object. In the specification only mutable bindings exist +// (which may be non-writable) and have no distinct initialization step. However +// V8 allows const declarations in global code with distinct creation and +// initialization steps which are represented by non-writable properties in the +// global object. As a result also these bindings need to be checked for +// initialization. +// +// The following enum specifies a flag that indicates if the binding needs a +// distinct initialization step (kNeedsInitialization) or if the binding is +// immediately initialized upon creation (kCreatedInitialized). +enum InitializationFlag { + kNeedsInitialization, + kCreatedInitialized +}; + + +enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned }; + + +enum ClearExceptionFlag { + KEEP_EXCEPTION, + CLEAR_EXCEPTION +}; + + +enum MinusZeroMode { + TREAT_MINUS_ZERO_AS_ZERO, + FAIL_ON_MINUS_ZERO +}; } } // namespace v8::internal +namespace i = v8::internal; + #endif // V8_GLOBALS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/handles.cc nodejs-0.11.15/deps/v8/src/handles.cc --- nodejs-0.11.13/deps/v8/src/handles.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/handles.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,45 +1,10 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "accessors.h" -#include "api.h" -#include "arguments.h" -#include "bootstrapper.h" -#include "compiler.h" -#include "debug.h" -#include "execution.h" -#include "global-handles.h" -#include "natives.h" -#include "runtime.h" -#include "string-search.h" -#include "stub-cache.h" -#include "vm-state-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/handles.h" namespace v8 { namespace internal { @@ -59,7 +24,7 @@ Object** result = current->next; - ASSERT(result == current->limit); + DCHECK(result == current->limit); // Make sure there's at least one scope on the stack and that the // top of the scope stack isn't a barrier. if (!Utils::ApiCheck(current->level != 0, @@ -74,7 +39,7 @@ Object** limit = &impl->blocks()->last()[kHandleBlockSize]; if (current->limit != limit) { current->limit = limit; - ASSERT(limit - current->next < kHandleBlockSize); + DCHECK(limit - current->next < kHandleBlockSize); } } @@ -101,7 +66,7 @@ #ifdef ENABLE_HANDLE_ZAPPING void HandleScope::ZapRange(Object** start, Object** end) { - ASSERT(end - start <= kHandleBlockSize); + DCHECK(end - start <= kHandleBlockSize); for (Object** p = start; p != end; p++) { *reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue; } @@ -124,612 +89,13 @@ } -Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content, - Handle<JSArray> array) { - CALL_HEAP_FUNCTION(content->GetIsolate(), - content->AddKeysFromJSArray(*array), FixedArray); -} - - -Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first, - Handle<FixedArray> second) { - CALL_HEAP_FUNCTION(first->GetIsolate(), - first->UnionOfKeys(*second), FixedArray); -} - - -Handle<JSGlobalProxy> ReinitializeJSGlobalProxy( - Handle<JSFunction> constructor, - Handle<JSGlobalProxy> global) { - CALL_HEAP_FUNCTION( - constructor->GetIsolate(), - constructor->GetHeap()->ReinitializeJSGlobalProxy(*constructor, *global), - JSGlobalProxy); -} - - -void FlattenString(Handle<String> string) { - CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten()); -} - - -Handle<String> FlattenGetString(Handle<String> string) { - CALL_HEAP_FUNCTION(string->GetIsolate(), string->TryFlatten(), String); -} - - -Handle<Object> ForceSetProperty(Handle<JSObject> object, - Handle<Object> key, - Handle<Object> value, - PropertyAttributes attributes) { - return Runtime::ForceSetObjectProperty(object->GetIsolate(), object, key, - value, attributes); -} - - -Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Object> key) { - Isolate* isolate = object->GetIsolate(); - CALL_HEAP_FUNCTION(isolate, - Runtime::DeleteObjectProperty( - isolate, object, key, JSReceiver::NORMAL_DELETION), - Object); -} - - -Handle<Object> ForceDeleteProperty(Handle<JSObject> object, - Handle<Object> key) { - Isolate* isolate = object->GetIsolate(); - CALL_HEAP_FUNCTION(isolate, - Runtime::DeleteObjectProperty( - isolate, object, key, JSReceiver::FORCE_DELETION), - Object); -} - - -Handle<Object> HasProperty(Handle<JSReceiver> obj, Handle<Object> key) { - Isolate* isolate = obj->GetIsolate(); - CALL_HEAP_FUNCTION(isolate, - Runtime::HasObjectProperty(isolate, obj, key), Object); -} - - -Handle<Object> GetProperty(Handle<JSReceiver> obj, - const char* name) { - Isolate* isolate = obj->GetIsolate(); - Handle<String> str = isolate->factory()->InternalizeUtf8String(name); - CALL_HEAP_FUNCTION(isolate, obj->GetProperty(*str), Object); -} - - -Handle<Object> GetProperty(Isolate* isolate, - Handle<Object> obj, - Handle<Object> key) { - CALL_HEAP_FUNCTION(isolate, - Runtime::GetObjectProperty(isolate, obj, key), Object); -} - - -Handle<String> LookupSingleCharacterStringFromCode(Isolate* isolate, - uint32_t index) { - CALL_HEAP_FUNCTION( - isolate, - isolate->heap()->LookupSingleCharacterStringFromCode(index), - String); -} - - -// Wrappers for scripts are kept alive and cached in weak global -// handles referred from foreign objects held by the scripts as long as -// they are used. When they are not used anymore, the garbage -// collector will call the weak callback on the global handle -// associated with the wrapper and get rid of both the wrapper and the -// handle. -static void ClearWrapperCache( - const v8::WeakCallbackData<v8::Value, void>& data) { - Object** location = reinterpret_cast<Object**>(data.GetParameter()); - JSValue* wrapper = JSValue::cast(*location); - Foreign* foreign = Script::cast(wrapper->value())->wrapper(); - ASSERT_EQ(foreign->foreign_address(), reinterpret_cast<Address>(location)); - foreign->set_foreign_address(0); - GlobalHandles::Destroy(location); - Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate()); - isolate->counters()->script_wrappers()->Decrement(); -} - - -Handle<JSValue> GetScriptWrapper(Handle<Script> script) { - if (script->wrapper()->foreign_address() != NULL) { - // Return a handle for the existing script wrapper from the cache. - return Handle<JSValue>( - *reinterpret_cast<JSValue**>(script->wrapper()->foreign_address())); - } - Isolate* isolate = script->GetIsolate(); - // Construct a new script wrapper. - isolate->counters()->script_wrappers()->Increment(); - Handle<JSFunction> constructor = isolate->script_function(); - Handle<JSValue> result = - Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor)); - - // The allocation might have triggered a GC, which could have called this - // function recursively, and a wrapper has already been created and cached. - // In that case, simply return a handle for the cached wrapper. - if (script->wrapper()->foreign_address() != NULL) { - return Handle<JSValue>( - *reinterpret_cast<JSValue**>(script->wrapper()->foreign_address())); - } - - result->set_value(*script); - - // Create a new weak global handle and use it to cache the wrapper - // for future use. The cache will automatically be cleared by the - // garbage collector when it is not used anymore. - Handle<Object> handle = isolate->global_handles()->Create(*result); - GlobalHandles::MakeWeak(handle.location(), - reinterpret_cast<void*>(handle.location()), - &ClearWrapperCache); - script->wrapper()->set_foreign_address( - reinterpret_cast<Address>(handle.location())); - return result; -} - - -// Init line_ends array with code positions of line ends inside script -// source. -void InitScriptLineEnds(Handle<Script> script) { - if (!script->line_ends()->IsUndefined()) return; - - Isolate* isolate = script->GetIsolate(); - - if (!script->source()->IsString()) { - ASSERT(script->source()->IsUndefined()); - Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0); - script->set_line_ends(*empty); - ASSERT(script->line_ends()->IsFixedArray()); - return; - } - - Handle<String> src(String::cast(script->source()), isolate); - - Handle<FixedArray> array = CalculateLineEnds(src, true); - - if (*array != isolate->heap()->empty_fixed_array()) { - array->set_map(isolate->heap()->fixed_cow_array_map()); - } - - script->set_line_ends(*array); - ASSERT(script->line_ends()->IsFixedArray()); -} - - -template <typename SourceChar> -static void CalculateLineEnds(Isolate* isolate, - List<int>* line_ends, - Vector<const SourceChar> src, - bool with_last_line) { - const int src_len = src.length(); - StringSearch<uint8_t, SourceChar> search(isolate, STATIC_ASCII_VECTOR("\n")); - - // Find and record line ends. - int position = 0; - while (position != -1 && position < src_len) { - position = search.Search(src, position); - if (position != -1) { - line_ends->Add(position); - position++; - } else if (with_last_line) { - // Even if the last line misses a line end, it is counted. - line_ends->Add(src_len); - return; - } - } -} - - -Handle<FixedArray> CalculateLineEnds(Handle<String> src, - bool with_last_line) { - src = FlattenGetString(src); - // Rough estimate of line count based on a roughly estimated average - // length of (unpacked) code. - int line_count_estimate = src->length() >> 4; - List<int> line_ends(line_count_estimate); - Isolate* isolate = src->GetIsolate(); - { - DisallowHeapAllocation no_allocation; // ensure vectors stay valid. - // Dispatch on type of strings. - String::FlatContent content = src->GetFlatContent(); - ASSERT(content.IsFlat()); - if (content.IsAscii()) { - CalculateLineEnds(isolate, - &line_ends, - content.ToOneByteVector(), - with_last_line); - } else { - CalculateLineEnds(isolate, - &line_ends, - content.ToUC16Vector(), - with_last_line); - } - } - int line_count = line_ends.length(); - Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count); - for (int i = 0; i < line_count; i++) { - array->set(i, Smi::FromInt(line_ends[i])); - } - return array; -} - - -// Convert code position into line number. -int GetScriptLineNumber(Handle<Script> script, int code_pos) { - InitScriptLineEnds(script); - DisallowHeapAllocation no_allocation; - FixedArray* line_ends_array = FixedArray::cast(script->line_ends()); - const int line_ends_len = line_ends_array->length(); - - if (!line_ends_len) return -1; - - if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos) { - return script->line_offset()->value(); - } - - int left = 0; - int right = line_ends_len; - while (int half = (right - left) / 2) { - if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) { - right -= half; - } else { - left += half; - } - } - return right + script->line_offset()->value(); -} - - -// Convert code position into column number. -int GetScriptColumnNumber(Handle<Script> script, int code_pos) { - int line_number = GetScriptLineNumber(script, code_pos); - if (line_number == -1) return -1; - - DisallowHeapAllocation no_allocation; - FixedArray* line_ends_array = FixedArray::cast(script->line_ends()); - line_number = line_number - script->line_offset()->value(); - if (line_number == 0) return code_pos + script->column_offset()->value(); - int prev_line_end_pos = - Smi::cast(line_ends_array->get(line_number - 1))->value(); - return code_pos - (prev_line_end_pos + 1); -} - - -int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) { - DisallowHeapAllocation no_allocation; - if (!script->line_ends()->IsUndefined()) { - return GetScriptLineNumber(script, code_pos); - } - // Slow mode: we do not have line_ends. We have to iterate through source. - if (!script->source()->IsString()) { - return -1; - } - String* source = String::cast(script->source()); - int line = 0; - int len = source->length(); - for (int pos = 0; pos < len; pos++) { - if (pos == code_pos) { - break; - } - if (source->Get(pos) == '\n') { - line++; - } - } - return line; -} - - -// Compute the property keys from the interceptor. -// TODO(rossberg): support symbols in API, and filter here if needed. -v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver, - Handle<JSObject> object) { - Isolate* isolate = receiver->GetIsolate(); - Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor()); - PropertyCallbackArguments - args(isolate, interceptor->data(), *receiver, *object); - v8::Handle<v8::Array> result; - if (!interceptor->enumerator()->IsUndefined()) { - v8::NamedPropertyEnumeratorCallback enum_fun = - v8::ToCData<v8::NamedPropertyEnumeratorCallback>( - interceptor->enumerator()); - LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object)); - result = args.Call(enum_fun); - } -#if ENABLE_EXTRA_CHECKS - CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject()); -#endif - return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate), - result); -} - - -// Compute the element keys from the interceptor. -v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver, - Handle<JSObject> object) { - Isolate* isolate = receiver->GetIsolate(); - Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor()); - PropertyCallbackArguments - args(isolate, interceptor->data(), *receiver, *object); - v8::Handle<v8::Array> result; - if (!interceptor->enumerator()->IsUndefined()) { - v8::IndexedPropertyEnumeratorCallback enum_fun = - v8::ToCData<v8::IndexedPropertyEnumeratorCallback>( - interceptor->enumerator()); - LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object)); - result = args.Call(enum_fun); -#if ENABLE_EXTRA_CHECKS - CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject()); -#endif - } - return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate), - result); -} - - -Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) { - Isolate* isolate = script->GetIsolate(); - Handle<String> name_or_source_url_key = - isolate->factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("nameOrSourceURL")); - Handle<JSValue> script_wrapper = GetScriptWrapper(script); - Handle<Object> property = GetProperty(isolate, - script_wrapper, - name_or_source_url_key); - ASSERT(property->IsJSFunction()); - Handle<JSFunction> method = Handle<JSFunction>::cast(property); - bool caught_exception; - Handle<Object> result = Execution::TryCall(method, script_wrapper, 0, - NULL, &caught_exception); - if (caught_exception) { - result = isolate->factory()->undefined_value(); - } - return result; -} - - -static bool ContainsOnlyValidKeys(Handle<FixedArray> array) { - int len = array->length(); - for (int i = 0; i < len; i++) { - Object* e = array->get(i); - if (!(e->IsString() || e->IsNumber())) return false; - } - return true; -} - - -Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object, - KeyCollectionType type, - bool* threw) { - USE(ContainsOnlyValidKeys); - Isolate* isolate = object->GetIsolate(); - Handle<FixedArray> content = isolate->factory()->empty_fixed_array(); - Handle<JSObject> arguments_boilerplate = Handle<JSObject>( - isolate->context()->native_context()->sloppy_arguments_boilerplate(), - isolate); - Handle<JSFunction> arguments_function = Handle<JSFunction>( - JSFunction::cast(arguments_boilerplate->map()->constructor()), - isolate); - - // Only collect keys if access is permitted. - for (Handle<Object> p = object; - *p != isolate->heap()->null_value(); - p = Handle<Object>(p->GetPrototype(isolate), isolate)) { - if (p->IsJSProxy()) { - Handle<JSProxy> proxy(JSProxy::cast(*p), isolate); - Handle<Object> args[] = { proxy }; - Handle<Object> names = Execution::Call(isolate, - isolate->proxy_enumerate(), - object, - ARRAY_SIZE(args), - args, - threw); - if (*threw) return content; - content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names)); - break; - } - - Handle<JSObject> current(JSObject::cast(*p), isolate); - - // Check access rights if required. - if (current->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(current, - isolate->factory()->undefined_value(), - v8::ACCESS_KEYS)) { - isolate->ReportFailedAccessCheckWrapper(current, v8::ACCESS_KEYS); - if (isolate->has_scheduled_exception()) { - isolate->PromoteScheduledException(); - *threw = true; - } - break; - } - - // Compute the element keys. - Handle<FixedArray> element_keys = - isolate->factory()->NewFixedArray(current->NumberOfEnumElements()); - current->GetEnumElementKeys(*element_keys); - content = UnionOfKeys(content, element_keys); - ASSERT(ContainsOnlyValidKeys(content)); - - // Add the element keys from the interceptor. - if (current->HasIndexedInterceptor()) { - v8::Handle<v8::Array> result = - GetKeysForIndexedInterceptor(object, current); - if (!result.IsEmpty()) - content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result)); - ASSERT(ContainsOnlyValidKeys(content)); - } - - // We can cache the computed property keys if access checks are - // not needed and no interceptors are involved. - // - // We do not use the cache if the object has elements and - // therefore it does not make sense to cache the property names - // for arguments objects. Arguments objects will always have - // elements. - // Wrapped strings have elements, but don't have an elements - // array or dictionary. So the fast inline test for whether to - // use the cache says yes, so we should not create a cache. - bool cache_enum_keys = - ((current->map()->constructor() != *arguments_function) && - !current->IsJSValue() && - !current->IsAccessCheckNeeded() && - !current->HasNamedInterceptor() && - !current->HasIndexedInterceptor()); - // Compute the property keys and cache them if possible. - content = - UnionOfKeys(content, GetEnumPropertyKeys(current, cache_enum_keys)); - ASSERT(ContainsOnlyValidKeys(content)); - - // Add the property keys from the interceptor. - if (current->HasNamedInterceptor()) { - v8::Handle<v8::Array> result = - GetKeysForNamedInterceptor(object, current); - if (!result.IsEmpty()) - content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result)); - ASSERT(ContainsOnlyValidKeys(content)); - } - - // If we only want local properties we bail out after the first - // iteration. - if (type == LOCAL_ONLY) - break; - } - return content; -} - - -Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) { - Isolate* isolate = object->GetIsolate(); - isolate->counters()->for_in()->Increment(); - Handle<FixedArray> elements = - GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, threw); - return isolate->factory()->NewJSArrayWithElements(elements); -} - - -Handle<FixedArray> ReduceFixedArrayTo(Handle<FixedArray> array, int length) { - ASSERT(array->length() >= length); - if (array->length() == length) return array; - - Handle<FixedArray> new_array = - array->GetIsolate()->factory()->NewFixedArray(length); - for (int i = 0; i < length; ++i) new_array->set(i, array->get(i)); - return new_array; -} - - -Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, - bool cache_result) { - Isolate* isolate = object->GetIsolate(); - if (object->HasFastProperties()) { - if (object->map()->instance_descriptors()->HasEnumCache()) { - int own_property_count = object->map()->EnumLength(); - // If we have an enum cache, but the enum length of the given map is set - // to kInvalidEnumCache, this means that the map itself has never used the - // present enum cache. The first step to using the cache is to set the - // enum length of the map by counting the number of own descriptors that - // are not DONT_ENUM or SYMBOLIC. - if (own_property_count == kInvalidEnumCacheSentinel) { - own_property_count = object->map()->NumberOfDescribedProperties( - OWN_DESCRIPTORS, DONT_SHOW); - - if (cache_result) object->map()->SetEnumLength(own_property_count); - } - - DescriptorArray* desc = object->map()->instance_descriptors(); - Handle<FixedArray> keys(desc->GetEnumCache(), isolate); - - // In case the number of properties required in the enum are actually - // present, we can reuse the enum cache. Otherwise, this means that the - // enum cache was generated for a previous (smaller) version of the - // Descriptor Array. In that case we regenerate the enum cache. - if (own_property_count <= keys->length()) { - isolate->counters()->enum_cache_hits()->Increment(); - return ReduceFixedArrayTo(keys, own_property_count); - } - } - - Handle<Map> map(object->map()); - - if (map->instance_descriptors()->IsEmpty()) { - isolate->counters()->enum_cache_hits()->Increment(); - if (cache_result) map->SetEnumLength(0); - return isolate->factory()->empty_fixed_array(); - } - - isolate->counters()->enum_cache_misses()->Increment(); - int num_enum = map->NumberOfDescribedProperties(ALL_DESCRIPTORS, DONT_SHOW); - - Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum); - Handle<FixedArray> indices = isolate->factory()->NewFixedArray(num_enum); - - Handle<DescriptorArray> descs = - Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate); - - int real_size = map->NumberOfOwnDescriptors(); - int enum_size = 0; - int index = 0; - - for (int i = 0; i < descs->number_of_descriptors(); i++) { - PropertyDetails details = descs->GetDetails(i); - Object* key = descs->GetKey(i); - if (!(details.IsDontEnum() || key->IsSymbol())) { - if (i < real_size) ++enum_size; - storage->set(index, key); - if (!indices.is_null()) { - if (details.type() != FIELD) { - indices = Handle<FixedArray>(); - } else { - int field_index = descs->GetFieldIndex(i); - if (field_index >= map->inobject_properties()) { - field_index = -(field_index - map->inobject_properties() + 1); - } - indices->set(index, Smi::FromInt(field_index)); - } - } - index++; - } - } - ASSERT(index == storage->length()); - - Handle<FixedArray> bridge_storage = - isolate->factory()->NewFixedArray( - DescriptorArray::kEnumCacheBridgeLength); - DescriptorArray* desc = object->map()->instance_descriptors(); - desc->SetEnumCache(*bridge_storage, - *storage, - indices.is_null() ? Object::cast(Smi::FromInt(0)) - : Object::cast(*indices)); - if (cache_result) { - object->map()->SetEnumLength(enum_size); - } - - return ReduceFixedArrayTo(storage, enum_size); - } else { - Handle<NameDictionary> dictionary(object->property_dictionary()); - int length = dictionary->NumberOfEnumElements(); - if (length == 0) { - return Handle<FixedArray>(isolate->heap()->empty_fixed_array()); - } - Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length); - dictionary->CopyEnumKeysTo(*storage); - return storage; - } -} - - DeferredHandleScope::DeferredHandleScope(Isolate* isolate) : impl_(isolate->handle_scope_implementer()) { impl_->BeginDeferredScope(); HandleScopeData* data = impl_->isolate()->handle_scope_data(); Object** new_next = impl_->GetSpareOrNewBlock(); Object** new_limit = &new_next[kHandleBlockSize]; - ASSERT(data->limit == &impl_->blocks()->last()[kHandleBlockSize]); + DCHECK(data->limit == &impl_->blocks()->last()[kHandleBlockSize]); impl_->blocks()->Add(new_next); #ifdef DEBUG @@ -745,8 +111,8 @@ DeferredHandleScope::~DeferredHandleScope() { impl_->isolate()->handle_scope_data()->level--; - ASSERT(handles_detached_); - ASSERT(impl_->isolate()->handle_scope_data()->level == prev_level_); + DCHECK(handles_detached_); + DCHECK(impl_->isolate()->handle_scope_data()->level == prev_level_); } @@ -761,16 +127,4 @@ return deferred; } - -void AddWeakObjectToCodeDependency(Heap* heap, - Handle<Object> object, - Handle<Code> code) { - heap->EnsureWeakObjectToCodeTable(); - Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(*object)); - dep = DependentCode::Insert(dep, DependentCode::kWeaklyEmbeddedGroup, code); - CALL_HEAP_FUNCTION_VOID(heap->isolate(), - heap->AddWeakObjectToCodeDependency(*object, *dep)); -} - - } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/handles.h nodejs-0.11.15/deps/v8/src/handles.h --- nodejs-0.11.13/deps/v8/src/handles.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/handles.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,78 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HANDLES_H_ #define V8_HANDLES_H_ -#include "allocation.h" -#include "objects.h" +#include "src/objects.h" namespace v8 { namespace internal { +// A Handle can be converted into a MaybeHandle. Converting a MaybeHandle +// into a Handle requires checking that it does not point to NULL. This +// ensures NULL checks before use. +// Do not use MaybeHandle as argument type. + +template<typename T> +class MaybeHandle { + public: + INLINE(MaybeHandle()) : location_(NULL) { } + + // Constructor for handling automatic up casting from Handle. + // Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected. + template <class S> MaybeHandle(Handle<S> handle) { +#ifdef DEBUG + T* a = NULL; + S* b = NULL; + a = b; // Fake assignment to enforce type checks. + USE(a); +#endif + this->location_ = reinterpret_cast<T**>(handle.location()); + } + + // Constructor for handling automatic up casting. + // Ex. MaybeHandle<JSArray> can be passed when Handle<Object> is expected. + template <class S> MaybeHandle(MaybeHandle<S> maybe_handle) { +#ifdef DEBUG + T* a = NULL; + S* b = NULL; + a = b; // Fake assignment to enforce type checks. + USE(a); +#endif + location_ = reinterpret_cast<T**>(maybe_handle.location_); + } + + INLINE(void Assert() const) { DCHECK(location_ != NULL); } + INLINE(void Check() const) { CHECK(location_ != NULL); } + + INLINE(Handle<T> ToHandleChecked()) const { + Check(); + return Handle<T>(location_); + } + + // Convert to a Handle with a type that can be upcasted to. + template <class S> INLINE(bool ToHandle(Handle<S>* out)) { + if (location_ == NULL) { + *out = Handle<T>::null(); + return false; + } else { + *out = Handle<T>(location_); + return true; + } + } + + bool is_null() const { return location_ == NULL; } + + protected: + T** location_; + + // MaybeHandles of different classes are allowed to access each + // other's location_. + template<class S> friend class MaybeHandle; +}; + // ---------------------------------------------------------------------------- // A Handle provides a reference to an object that survives relocation by // the garbage collector. @@ -47,7 +86,9 @@ INLINE(explicit Handle(T* obj)); INLINE(Handle(T* obj, Isolate* isolate)); - INLINE(Handle()) : location_(NULL) {} + // TODO(yangguo): Values that contain empty handles should be declared as + // MaybeHandle to force validation before being used as handles. + INLINE(Handle()) : location_(NULL) { } // Constructor for handling automatic up casting. // Ex. Handle<JSFunction> can be passed when Handle<Object> is expected. @@ -77,6 +118,8 @@ return Handle<T>(reinterpret_cast<T**>(that.location_)); } + // TODO(yangguo): Values that contain empty handles should be declared as + // MaybeHandle to force validation before being used as handles. static Handle<T> null() { return Handle<T>(); } bool is_null() const { return location_ == NULL; } @@ -112,6 +155,13 @@ } +// Key comparison function for Map handles. +inline bool operator<(const Handle<Map>& lhs, const Handle<Map>& rhs) { + // This is safe because maps don't move. + return *lhs < *rhs; +} + + class DeferredHandles; class HandleScopeImplementer; @@ -214,91 +264,6 @@ }; -// ---------------------------------------------------------------------------- -// Handle operations. -// They might invoke garbage collection. The result is an handle to -// an object of expected type, or the handle is an error if running out -// of space or encountering an internal error. - -// Flattens a string. -void FlattenString(Handle<String> str); - -// Flattens a string and returns the underlying external or sequential -// string. -Handle<String> FlattenGetString(Handle<String> str); - -Handle<Object> ForceSetProperty(Handle<JSObject> object, - Handle<Object> key, - Handle<Object> value, - PropertyAttributes attributes); - -Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Object> key); - -Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> key); - -Handle<Object> HasProperty(Handle<JSReceiver> obj, Handle<Object> key); - -Handle<Object> GetProperty(Handle<JSReceiver> obj, const char* name); - -Handle<Object> GetProperty(Isolate* isolate, - Handle<Object> obj, - Handle<Object> key); - -Handle<String> LookupSingleCharacterStringFromCode(Isolate* isolate, - uint32_t index); - -Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>, - Handle<JSArray> array); - -// Get the JS object corresponding to the given script; create it -// if none exists. -Handle<JSValue> GetScriptWrapper(Handle<Script> script); - -// Script line number computations. Note that the line number is zero-based. -void InitScriptLineEnds(Handle<Script> script); -// For string calculates an array of line end positions. If the string -// does not end with a new line character, this character may optionally be -// imagined. -Handle<FixedArray> CalculateLineEnds(Handle<String> string, - bool with_imaginary_last_new_line); -int GetScriptLineNumber(Handle<Script> script, int code_position); -// The safe version does not make heap allocations but may work much slower. -int GetScriptLineNumberSafe(Handle<Script> script, int code_position); -int GetScriptColumnNumber(Handle<Script> script, int code_position); -Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script); - -// Computes the enumerable keys from interceptors. Used for debug mirrors and -// by GetKeysInFixedArrayFor below. -v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver, - Handle<JSObject> object); -v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver, - Handle<JSObject> object); - -enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS }; - -// Computes the enumerable keys for a JSObject. Used for implementing -// "for (n in object) { }". -Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object, - KeyCollectionType type, - bool* threw); -Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw); -Handle<FixedArray> ReduceFixedArrayTo(Handle<FixedArray> array, int length); -Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, - bool cache_result); - -// Computes the union of keys and return the result. -// Used for implementing "for (n in object) { }" -Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first, - Handle<FixedArray> second); - -Handle<JSGlobalProxy> ReinitializeJSGlobalProxy( - Handle<JSFunction> constructor, - Handle<JSGlobalProxy> global); - -void AddWeakObjectToCodeDependency(Heap* heap, - Handle<Object> object, - Handle<Code> code); - // Seal off the current HandleScope so that new handles can only be created // if a new HandleScope is entered. class SealHandleScope BASE_EMBEDDED { diff -Nru nodejs-0.11.13/deps/v8/src/handles-inl.h nodejs-0.11.15/deps/v8/src/handles-inl.h --- nodejs-0.11.13/deps/v8/src/handles-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/handles-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,79 +1,52 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // #ifndef V8_HANDLES_INL_H_ #define V8_HANDLES_INL_H_ -#include "api.h" -#include "handles.h" -#include "heap.h" -#include "isolate.h" +#include "src/api.h" +#include "src/handles.h" +#include "src/heap/heap.h" +#include "src/isolate.h" namespace v8 { namespace internal { template<typename T> Handle<T>::Handle(T* obj) { - ASSERT(!obj->IsFailure()); location_ = HandleScope::CreateHandle(obj->GetIsolate(), obj); } template<typename T> Handle<T>::Handle(T* obj, Isolate* isolate) { - ASSERT(!obj->IsFailure()); location_ = HandleScope::CreateHandle(isolate, obj); } template <typename T> -inline bool Handle<T>::is_identical_to(const Handle<T> other) const { - ASSERT(location_ == NULL || !(*location_)->IsFailure()); - if (location_ == other.location_) return true; - if (location_ == NULL || other.location_ == NULL) return false; +inline bool Handle<T>::is_identical_to(const Handle<T> o) const { // Dereferencing deferred handles to check object equality is safe. - SLOW_ASSERT(IsDereferenceAllowed(NO_DEFERRED_CHECK) && - other.IsDereferenceAllowed(NO_DEFERRED_CHECK)); - return *location_ == *other.location_; + SLOW_DCHECK( + (location_ == NULL || IsDereferenceAllowed(NO_DEFERRED_CHECK)) && + (o.location_ == NULL || o.IsDereferenceAllowed(NO_DEFERRED_CHECK))); + if (location_ == o.location_) return true; + if (location_ == NULL || o.location_ == NULL) return false; + return *location_ == *o.location_; } template <typename T> inline T* Handle<T>::operator*() const { - ASSERT(location_ != NULL && !(*location_)->IsFailure()); - SLOW_ASSERT(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK)); + SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK)); return *BitCast<T**>(location_); } template <typename T> inline T** Handle<T>::location() const { - ASSERT(location_ == NULL || !(*location_)->IsFailure()); - SLOW_ASSERT(location_ == NULL || + SLOW_DCHECK(location_ == NULL || IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK)); return location_; } @@ -81,7 +54,7 @@ #ifdef DEBUG template <typename T> bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const { - ASSERT(location_ != NULL); + DCHECK(location_ != NULL); Object* object = *BitCast<T**>(location_); if (object->IsSmi()) return true; HeapObject* heap_object = HeapObject::cast(object); @@ -150,7 +123,7 @@ // Throw away all handles in the current scope. CloseScope(isolate_, prev_next_, prev_limit_); // Allocate one handle in the parent scope. - ASSERT(current->level > 0); + DCHECK(current->level > 0); Handle<T> result(CreateHandle<T>(isolate_, value)); // Reinitialize the current scope (so that it's ready // to be used or closed again). @@ -163,14 +136,14 @@ template <typename T> T** HandleScope::CreateHandle(Isolate* isolate, T* value) { - ASSERT(AllowHandleAllocation::IsAllowed()); + DCHECK(AllowHandleAllocation::IsAllowed()); HandleScopeData* current = isolate->handle_scope_data(); internal::Object** cur = current->next; if (cur == current->limit) cur = Extend(isolate); // Update the current next field, set the value in the created // handle, and return the result. - ASSERT(cur < current->limit); + DCHECK(cur < current->limit); current->next = cur + 1; T** result = reinterpret_cast<T**>(cur); @@ -197,9 +170,9 @@ // Restore state in current handle scope to re-enable handle // allocations. HandleScopeData* current = isolate_->handle_scope_data(); - ASSERT_EQ(0, current->level); + DCHECK_EQ(0, current->level); current->level = level_; - ASSERT_EQ(current->next, current->limit); + DCHECK_EQ(current->next, current->limit); current->limit = limit_; } diff -Nru nodejs-0.11.13/deps/v8/src/harmony-array.js nodejs-0.11.15/deps/v8/src/harmony-array.js --- nodejs-0.11.13/deps/v8/src/harmony-array.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/harmony-array.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. 'use strict'; @@ -103,6 +80,49 @@ } +// ES6, draft 04-05-14, section 22.1.3.6 +function ArrayFill(value /* [, start [, end ] ] */) { // length == 1 + CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill"); + + var array = ToObject(this); + var length = TO_UINT32(array.length); + + var i = 0; + var end = length; + + if (%_ArgumentsLength() > 1) { + i = %_Arguments(1); + i = IS_UNDEFINED(i) ? 0 : TO_INTEGER(i); + if (%_ArgumentsLength() > 2) { + end = %_Arguments(2); + end = IS_UNDEFINED(end) ? length : TO_INTEGER(end); + } + } + + if (i < 0) { + i += length; + if (i < 0) i = 0; + } else { + if (i > length) i = length; + } + + if (end < 0) { + end += length; + if (end < 0) end = 0; + } else { + if (end > length) end = length; + } + + if ((end - i) > 0 && ObjectIsFrozen(array)) { + throw MakeTypeError("array_functions_on_frozen", + ["Array.prototype.fill"]); + } + + for (; i < end; i++) + array[i] = value; + return array; +} + // ------------------------------------------------------------------- function HarmonyArrayExtendArrayPrototype() { @@ -111,7 +131,8 @@ // Set up the non-enumerable functions on the Array prototype object. InstallFunctions($Array.prototype, DONT_ENUM, $Array( "find", ArrayFind, - "findIndex", ArrayFindIndex + "findIndex", ArrayFindIndex, + "fill", ArrayFill )); } diff -Nru nodejs-0.11.13/deps/v8/src/harmony-math.js nodejs-0.11.15/deps/v8/src/harmony-math.js --- nodejs-0.11.13/deps/v8/src/harmony-math.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/harmony-math.js 1970-01-01 00:00:00.000000000 +0000 @@ -1,269 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -'use strict'; - -// ES6 draft 09-27-13, section 20.2.2.28. -function MathSign(x) { - x = TO_NUMBER_INLINE(x); - if (x > 0) return 1; - if (x < 0) return -1; - if (x === 0) return x; - return NAN; -} - - -// ES6 draft 09-27-13, section 20.2.2.34. -function MathTrunc(x) { - x = TO_NUMBER_INLINE(x); - if (x > 0) return MathFloor(x); - if (x < 0) return MathCeil(x); - if (x === 0) return x; - return NAN; -} - - -// ES6 draft 09-27-13, section 20.2.2.30. -function MathSinh(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - // Idempotent for NaN, +/-0 and +/-Infinity. - if (x === 0 || !NUMBER_IS_FINITE(x)) return x; - return (MathExp(x) - MathExp(-x)) / 2; -} - - -// ES6 draft 09-27-13, section 20.2.2.12. -function MathCosh(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - if (!NUMBER_IS_FINITE(x)) return MathAbs(x); - return (MathExp(x) + MathExp(-x)) / 2; -} - - -// ES6 draft 09-27-13, section 20.2.2.33. -function MathTanh(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - // Idempotent for +/-0. - if (x === 0) return x; - // Returns +/-1 for +/-Infinity. - if (!NUMBER_IS_FINITE(x)) return MathSign(x); - var exp1 = MathExp(x); - var exp2 = MathExp(-x); - return (exp1 - exp2) / (exp1 + exp2); -} - - -// ES6 draft 09-27-13, section 20.2.2.5. -function MathAsinh(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - // Idempotent for NaN, +/-0 and +/-Infinity. - if (x === 0 || !NUMBER_IS_FINITE(x)) return x; - if (x > 0) return MathLog(x + MathSqrt(x * x + 1)); - // This is to prevent numerical errors caused by large negative x. - return -MathLog(-x + MathSqrt(x * x + 1)); -} - - -// ES6 draft 09-27-13, section 20.2.2.3. -function MathAcosh(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - if (x < 1) return NAN; - // Idempotent for NaN and +Infinity. - if (!NUMBER_IS_FINITE(x)) return x; - return MathLog(x + MathSqrt(x + 1) * MathSqrt(x - 1)); -} - - -// ES6 draft 09-27-13, section 20.2.2.7. -function MathAtanh(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - // Idempotent for +/-0. - if (x === 0) return x; - // Returns NaN for NaN and +/- Infinity. - if (!NUMBER_IS_FINITE(x)) return NAN; - return 0.5 * MathLog((1 + x) / (1 - x)); -} - - -// ES6 draft 09-27-13, section 20.2.2.21. -function MathLog10(x) { - return MathLog(x) * 0.434294481903251828; // log10(x) = log(x)/log(10). -} - - -// ES6 draft 09-27-13, section 20.2.2.22. -function MathLog2(x) { - return MathLog(x) * 1.442695040888963407; // log2(x) = log(x)/log(2). -} - - -// ES6 draft 09-27-13, section 20.2.2.17. -function MathHypot(x, y) { // Function length is 2. - // We may want to introduce fast paths for two arguments and when - // normalization to avoid overflow is not necessary. For now, we - // simply assume the general case. - var length = %_ArgumentsLength(); - var args = new InternalArray(length); - var max = 0; - for (var i = 0; i < length; i++) { - var n = %_Arguments(i); - if (!IS_NUMBER(n)) n = NonNumberToNumber(n); - if (n === INFINITY || n === -INFINITY) return INFINITY; - n = MathAbs(n); - if (n > max) max = n; - args[i] = n; - } - - // Kahan summation to avoid rounding errors. - // Normalize the numbers to the largest one to avoid overflow. - if (max === 0) max = 1; - var sum = 0; - var compensation = 0; - for (var i = 0; i < length; i++) { - var n = args[i] / max; - var summand = n * n - compensation; - var preliminary = sum + summand; - compensation = (preliminary - sum) - summand; - sum = preliminary; - } - return MathSqrt(sum) * max; -} - - -// ES6 draft 09-27-13, section 20.2.2.16. -function MathFround(x) { - return %Math_fround(TO_NUMBER_INLINE(x)); -} - - -function MathClz32(x) { - x = ToUint32(TO_NUMBER_INLINE(x)); - if (x == 0) return 32; - var result = 0; - // Binary search. - if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; }; - if ((x & 0xFF000000) === 0) { x <<= 8; result += 8; }; - if ((x & 0xF0000000) === 0) { x <<= 4; result += 4; }; - if ((x & 0xC0000000) === 0) { x <<= 2; result += 2; }; - if ((x & 0x80000000) === 0) { x <<= 1; result += 1; }; - return result; -} - - -// ES6 draft 09-27-13, section 20.2.2.9. -// Cube root approximation, refer to: http://metamerist.com/cbrt/cbrt.htm -// Using initial approximation adapted from Kahan's cbrt and 4 iterations -// of Newton's method. -function MathCbrt(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - if (x == 0 || !NUMBER_IS_FINITE(x)) return x; - return x >= 0 ? CubeRoot(x) : -CubeRoot(-x); -} - -macro NEWTON_ITERATION_CBRT(x, approx) - (1.0 / 3.0) * (x / (approx * approx) + 2 * approx); -endmacro - -function CubeRoot(x) { - var approx_hi = MathFloor(%_DoubleHi(x) / 3) + 0x2A9F7893; - var approx = %_ConstructDouble(approx_hi, 0); - approx = NEWTON_ITERATION_CBRT(x, approx); - approx = NEWTON_ITERATION_CBRT(x, approx); - approx = NEWTON_ITERATION_CBRT(x, approx); - return NEWTON_ITERATION_CBRT(x, approx); -} - - - -// ES6 draft 09-27-13, section 20.2.2.14. -// Use Taylor series to approximate. -// exp(x) - 1 at 0 == -1 + exp(0) + exp'(0)*x/1! + exp''(0)*x^2/2! + ... -// == x/1! + x^2/2! + x^3/3! + ... -// The closer x is to 0, the fewer terms are required. -function MathExpm1(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - var xabs = MathAbs(x); - if (xabs < 2E-7) { - return x * (1 + x * (1/2)); - } else if (xabs < 6E-5) { - return x * (1 + x * (1/2 + x * (1/6))); - } else if (xabs < 2E-2) { - return x * (1 + x * (1/2 + x * (1/6 + - x * (1/24 + x * (1/120 + x * (1/720)))))); - } else { // Use regular exp if not close enough to 0. - return MathExp(x) - 1; - } -} - - -// ES6 draft 09-27-13, section 20.2.2.20. -// Use Taylor series to approximate. With y = x + 1; -// log(y) at 1 == log(1) + log'(1)(y-1)/1! + log''(1)(y-1)^2/2! + ... -// == 0 + x - x^2/2 + x^3/3 ... -// The closer x is to 0, the fewer terms are required. -function MathLog1p(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - var xabs = MathAbs(x); - if (xabs < 1E-7) { - return x * (1 - x * (1/2)); - } else if (xabs < 3E-5) { - return x * (1 - x * (1/2 - x * (1/3))); - } else if (xabs < 7E-3) { - return x * (1 - x * (1/2 - x * (1/3 - x * (1/4 - - x * (1/5 - x * (1/6 - x * (1/7))))))); - } else { // Use regular log if not close enough to 0. - return MathLog(1 + x); - } -} - - -function ExtendMath() { - %CheckIsBootstrapping(); - - // Set up the non-enumerable functions on the Math object. - InstallFunctions($Math, DONT_ENUM, $Array( - "sign", MathSign, - "trunc", MathTrunc, - "sinh", MathSinh, - "cosh", MathCosh, - "tanh", MathTanh, - "asinh", MathAsinh, - "acosh", MathAcosh, - "atanh", MathAtanh, - "log10", MathLog10, - "log2", MathLog2, - "hypot", MathHypot, - "fround", MathFround, - "clz32", MathClz32, - "cbrt", MathCbrt, - "log1p", MathLog1p, - "expm1", MathExpm1 - )); -} - - -ExtendMath(); diff -Nru nodejs-0.11.13/deps/v8/src/harmony-string.js nodejs-0.11.15/deps/v8/src/harmony-string.js --- nodejs-0.11.13/deps/v8/src/harmony-string.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/harmony-string.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2014 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. 'use strict'; @@ -53,7 +30,7 @@ } -// ES6 draft 01-20-14, section 21.1.3.18 +// ES6 draft 04-05-14, section 21.1.3.18 function StringStartsWith(searchString /* position */) { // length == 1 CHECK_OBJECT_COERCIBLE(this, "String.prototype.startsWith"); @@ -82,7 +59,7 @@ } -// ES6 draft 01-20-14, section 21.1.3.7 +// ES6 draft 04-05-14, section 21.1.3.7 function StringEndsWith(searchString /* position */) { // length == 1 CHECK_OBJECT_COERCIBLE(this, "String.prototype.endsWith"); @@ -114,11 +91,17 @@ } -// ES6 draft 01-20-14, section 21.1.3.6 +// ES6 draft 04-05-14, section 21.1.3.6 function StringContains(searchString /* position */) { // length == 1 CHECK_OBJECT_COERCIBLE(this, "String.prototype.contains"); var s = TO_STRING_INLINE(this); + + if (IS_REGEXP(searchString)) { + throw MakeTypeError("first_argument_not_regexp", + ["String.prototype.contains"]); + } + var ss = TO_STRING_INLINE(searchString); var pos = 0; if (%_ArgumentsLength() > 1) { @@ -137,17 +120,71 @@ } +// ES6 Draft 05-22-2014, section 21.1.3.3 +function StringCodePointAt(pos) { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.codePointAt"); + + var string = TO_STRING_INLINE(this); + var size = string.length; + pos = TO_INTEGER(pos); + if (pos < 0 || pos >= size) { + return UNDEFINED; + } + var first = %_StringCharCodeAt(string, pos); + if (first < 0xD800 || first > 0xDBFF || pos + 1 == size) { + return first; + } + var second = %_StringCharCodeAt(string, pos + 1); + if (second < 0xDC00 || second > 0xDFFF) { + return first; + } + return (first - 0xD800) * 0x400 + second + 0x2400; +} + + +// ES6 Draft 05-22-2014, section 21.1.2.2 +function StringFromCodePoint(_) { // length = 1 + var code; + var length = %_ArgumentsLength(); + var index; + var result = ""; + for (index = 0; index < length; index++) { + code = %_Arguments(index); + if (!%_IsSmi(code)) { + code = ToNumber(code); + } + if (code < 0 || code > 0x10FFFF || code !== TO_INTEGER(code)) { + throw MakeRangeError("invalid_code_point", [code]); + } + if (code <= 0xFFFF) { + result += %_StringCharFromCode(code); + } else { + code -= 0x10000; + result += %_StringCharFromCode((code >>> 10) & 0x3FF | 0xD800); + result += %_StringCharFromCode(code & 0x3FF | 0xDC00); + } + } + return result; +} + + // ------------------------------------------------------------------- function ExtendStringPrototype() { %CheckIsBootstrapping(); + // Set up the non-enumerable functions on the String object. + InstallFunctions($String, DONT_ENUM, $Array( + "fromCodePoint", StringFromCodePoint + )); + // Set up the non-enumerable functions on the String prototype object. InstallFunctions($String.prototype, DONT_ENUM, $Array( - "repeat", StringRepeat, - "startsWith", StringStartsWith, + "codePointAt", StringCodePointAt, + "contains", StringContains, "endsWith", StringEndsWith, - "contains", StringContains + "repeat", StringRepeat, + "startsWith", StringStartsWith )); } diff -Nru nodejs-0.11.13/deps/v8/src/hashmap.h nodejs-0.11.15/deps/v8/src/hashmap.h --- nodejs-0.11.13/deps/v8/src/hashmap.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hashmap.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HASHMAP_H_ #define V8_HASHMAP_H_ -#include "allocation.h" -#include "checks.h" -#include "utils.h" +#include "src/allocation.h" +#include "src/base/logging.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -98,6 +75,11 @@ Entry* Start() const; Entry* Next(Entry* p) const; + // Some match functions defined for convenience. + static bool PointersMatch(void* key1, void* key2) { + return key1 == key2; + } + private: MatchFun match_; Entry* map_; @@ -182,7 +164,7 @@ // This guarantees loop termination as there is at least one empty entry so // eventually the removed entry will have an empty entry after it. - ASSERT(occupancy_ < capacity_); + DCHECK(occupancy_ < capacity_); // p is the candidate entry to clear. q is used to scan forwards. Entry* q = p; // Start at the entry to remove. @@ -242,7 +224,7 @@ typename TemplateHashMapImpl<AllocationPolicy>::Entry* TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const { const Entry* end = map_end(); - ASSERT(map_ - 1 <= p && p < end); + DCHECK(map_ - 1 <= p && p < end); for (p++; p < end; p++) { if (p->key != NULL) { return p; @@ -255,14 +237,14 @@ template<class AllocationPolicy> typename TemplateHashMapImpl<AllocationPolicy>::Entry* TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) { - ASSERT(key != NULL); + DCHECK(key != NULL); - ASSERT(IsPowerOf2(capacity_)); + DCHECK(IsPowerOf2(capacity_)); Entry* p = map_ + (hash & (capacity_ - 1)); const Entry* end = map_end(); - ASSERT(map_ <= p && p < end); + DCHECK(map_ <= p && p < end); - ASSERT(occupancy_ < capacity_); // Guarantees loop termination. + DCHECK(occupancy_ < capacity_); // Guarantees loop termination. while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) { p++; if (p >= end) { @@ -277,7 +259,7 @@ template<class AllocationPolicy> void TemplateHashMapImpl<AllocationPolicy>::Initialize( uint32_t capacity, AllocationPolicy allocator) { - ASSERT(IsPowerOf2(capacity)); + DCHECK(IsPowerOf2(capacity)); map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry))); if (map_ == NULL) { v8::internal::FatalProcessOutOfMemory("HashMap::Initialize"); diff -Nru nodejs-0.11.13/deps/v8/src/heap/gc-tracer.cc nodejs-0.11.15/deps/v8/src/heap/gc-tracer.cc --- nodejs-0.11.13/deps/v8/src/heap/gc-tracer.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/gc-tracer.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,402 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/heap/gc-tracer.h" + +namespace v8 { +namespace internal { + +static intptr_t CountTotalHolesSize(Heap* heap) { + intptr_t holes_size = 0; + OldSpaces spaces(heap); + for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { + holes_size += space->Waste() + space->Available(); + } + return holes_size; +} + + +GCTracer::Event::Event(Type type, const char* gc_reason, + const char* collector_reason) + : type(type), + gc_reason(gc_reason), + collector_reason(collector_reason), + start_time(0.0), + end_time(0.0), + start_object_size(0), + end_object_size(0), + start_memory_size(0), + end_memory_size(0), + start_holes_size(0), + end_holes_size(0), + cumulative_incremental_marking_steps(0), + incremental_marking_steps(0), + cumulative_incremental_marking_bytes(0), + incremental_marking_bytes(0), + cumulative_incremental_marking_duration(0.0), + incremental_marking_duration(0.0), + cumulative_pure_incremental_marking_duration(0.0), + pure_incremental_marking_duration(0.0), + longest_incremental_marking_step(0.0) { + for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) { + scopes[i] = 0; + } +} + + +const char* GCTracer::Event::TypeName(bool short_name) const { + switch (type) { + case SCAVENGER: + if (short_name) { + return "s"; + } else { + return "Scavenge"; + } + case MARK_COMPACTOR: + if (short_name) { + return "ms"; + } else { + return "Mark-sweep"; + } + case START: + if (short_name) { + return "st"; + } else { + return "Start"; + } + } + return "Unknown Event Type"; +} + + +GCTracer::GCTracer(Heap* heap) + : heap_(heap), + cumulative_incremental_marking_steps_(0), + cumulative_incremental_marking_bytes_(0), + cumulative_incremental_marking_duration_(0.0), + cumulative_pure_incremental_marking_duration_(0.0), + longest_incremental_marking_step_(0.0), + cumulative_marking_duration_(0.0), + cumulative_sweeping_duration_(0.0) { + current_ = Event(Event::START, NULL, NULL); + current_.end_time = base::OS::TimeCurrentMillis(); + previous_ = previous_mark_compactor_event_ = current_; +} + + +void GCTracer::Start(GarbageCollector collector, const char* gc_reason, + const char* collector_reason) { + previous_ = current_; + if (current_.type == Event::MARK_COMPACTOR) + previous_mark_compactor_event_ = current_; + + if (collector == SCAVENGER) { + current_ = Event(Event::SCAVENGER, gc_reason, collector_reason); + } else { + current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason); + } + + current_.start_time = base::OS::TimeCurrentMillis(); + current_.start_object_size = heap_->SizeOfObjects(); + current_.start_memory_size = heap_->isolate()->memory_allocator()->Size(); + current_.start_holes_size = CountTotalHolesSize(heap_); + + current_.cumulative_incremental_marking_steps = + cumulative_incremental_marking_steps_; + current_.cumulative_incremental_marking_bytes = + cumulative_incremental_marking_bytes_; + current_.cumulative_incremental_marking_duration = + cumulative_incremental_marking_duration_; + current_.cumulative_pure_incremental_marking_duration = + cumulative_pure_incremental_marking_duration_; + current_.longest_incremental_marking_step = longest_incremental_marking_step_; + + for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) { + current_.scopes[i] = 0; + } +} + + +void GCTracer::Stop() { + current_.end_time = base::OS::TimeCurrentMillis(); + current_.end_object_size = heap_->SizeOfObjects(); + current_.end_memory_size = heap_->isolate()->memory_allocator()->Size(); + current_.end_holes_size = CountTotalHolesSize(heap_); + + if (current_.type == Event::SCAVENGER) { + current_.incremental_marking_steps = + current_.cumulative_incremental_marking_steps - + previous_.cumulative_incremental_marking_steps; + current_.incremental_marking_bytes = + current_.cumulative_incremental_marking_bytes - + previous_.cumulative_incremental_marking_bytes; + current_.incremental_marking_duration = + current_.cumulative_incremental_marking_duration - + previous_.cumulative_incremental_marking_duration; + current_.pure_incremental_marking_duration = + current_.cumulative_pure_incremental_marking_duration - + previous_.cumulative_pure_incremental_marking_duration; + scavenger_events_.push_front(current_); + } else { + current_.incremental_marking_steps = + current_.cumulative_incremental_marking_steps - + previous_mark_compactor_event_.cumulative_incremental_marking_steps; + current_.incremental_marking_bytes = + current_.cumulative_incremental_marking_bytes - + previous_mark_compactor_event_.cumulative_incremental_marking_bytes; + current_.incremental_marking_duration = + current_.cumulative_incremental_marking_duration - + previous_mark_compactor_event_.cumulative_incremental_marking_duration; + current_.pure_incremental_marking_duration = + current_.cumulative_pure_incremental_marking_duration - + previous_mark_compactor_event_ + .cumulative_pure_incremental_marking_duration; + longest_incremental_marking_step_ = 0.0; + mark_compactor_events_.push_front(current_); + } + + // TODO(ernstm): move the code below out of GCTracer. + + if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; + + double duration = current_.end_time - current_.start_time; + double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0); + + heap_->UpdateCumulativeGCStatistics(duration, spent_in_mutator, + current_.scopes[Scope::MC_MARK]); + + if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger) + return; + + if (FLAG_trace_gc) { + if (FLAG_trace_gc_nvp) + PrintNVP(); + else + Print(); + + heap_->PrintShortHeapStatistics(); + } +} + + +void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) { + cumulative_incremental_marking_steps_++; + cumulative_incremental_marking_bytes_ += bytes; + cumulative_incremental_marking_duration_ += duration; + longest_incremental_marking_step_ = + Max(longest_incremental_marking_step_, duration); + cumulative_marking_duration_ += duration; + if (bytes > 0) { + cumulative_pure_incremental_marking_duration_ += duration; + } +} + + +void GCTracer::Print() const { + PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init()); + + PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ", current_.TypeName(false), + static_cast<double>(current_.start_object_size) / MB, + static_cast<double>(current_.start_memory_size) / MB, + static_cast<double>(current_.end_object_size) / MB, + static_cast<double>(current_.end_memory_size) / MB); + + int external_time = static_cast<int>(current_.scopes[Scope::EXTERNAL]); + if (external_time > 0) PrintF("%d / ", external_time); + + double duration = current_.end_time - current_.start_time; + PrintF("%.1f ms", duration); + if (current_.type == Event::SCAVENGER) { + if (current_.incremental_marking_steps > 0) { + PrintF(" (+ %.1f ms in %d steps since last GC)", + current_.incremental_marking_duration, + current_.incremental_marking_steps); + } + } else { + if (current_.incremental_marking_steps > 0) { + PrintF( + " (+ %.1f ms in %d steps since start of marking, " + "biggest step %.1f ms)", + current_.incremental_marking_duration, + current_.incremental_marking_steps, + current_.longest_incremental_marking_step); + } + } + + if (current_.gc_reason != NULL) { + PrintF(" [%s]", current_.gc_reason); + } + + if (current_.collector_reason != NULL) { + PrintF(" [%s]", current_.collector_reason); + } + + PrintF(".\n"); +} + + +void GCTracer::PrintNVP() const { + PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init()); + + double duration = current_.end_time - current_.start_time; + double spent_in_mutator = current_.start_time - previous_.end_time; + + PrintF("pause=%.1f ", duration); + PrintF("mutator=%.1f ", spent_in_mutator); + PrintF("gc=%s ", current_.TypeName(true)); + + PrintF("external=%.1f ", current_.scopes[Scope::EXTERNAL]); + PrintF("mark=%.1f ", current_.scopes[Scope::MC_MARK]); + PrintF("sweep=%.2f ", current_.scopes[Scope::MC_SWEEP]); + PrintF("sweepns=%.2f ", current_.scopes[Scope::MC_SWEEP_NEWSPACE]); + PrintF("sweepos=%.2f ", current_.scopes[Scope::MC_SWEEP_OLDSPACE]); + PrintF("sweepcode=%.2f ", current_.scopes[Scope::MC_SWEEP_CODE]); + PrintF("sweepcell=%.2f ", current_.scopes[Scope::MC_SWEEP_CELL]); + PrintF("sweepmap=%.2f ", current_.scopes[Scope::MC_SWEEP_MAP]); + PrintF("evacuate=%.1f ", current_.scopes[Scope::MC_EVACUATE_PAGES]); + PrintF("new_new=%.1f ", + current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]); + PrintF("root_new=%.1f ", + current_.scopes[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]); + PrintF("old_new=%.1f ", + current_.scopes[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]); + PrintF("compaction_ptrs=%.1f ", + current_.scopes[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]); + PrintF("intracompaction_ptrs=%.1f ", + current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]); + PrintF("misc_compaction=%.1f ", + current_.scopes[Scope::MC_UPDATE_MISC_POINTERS]); + PrintF("weakcollection_process=%.1f ", + current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS]); + PrintF("weakcollection_clear=%.1f ", + current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR]); + PrintF("weakcollection_abort=%.1f ", + current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT]); + + PrintF("total_size_before=%" V8_PTR_PREFIX "d ", current_.start_object_size); + PrintF("total_size_after=%" V8_PTR_PREFIX "d ", current_.end_object_size); + PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", current_.start_holes_size); + PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", current_.end_holes_size); + + intptr_t allocated_since_last_gc = + current_.start_object_size - previous_.end_object_size; + PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc); + PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size_); + PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ", + heap_->semi_space_copied_object_size_); + PrintF("nodes_died_in_new=%d ", heap_->nodes_died_in_new_space_); + PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_); + PrintF("nodes_promoted=%d ", heap_->nodes_promoted_); + PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_); + PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_); + + if (current_.type == Event::SCAVENGER) { + PrintF("steps_count=%d ", current_.incremental_marking_steps); + PrintF("steps_took=%.1f ", current_.incremental_marking_duration); + } else { + PrintF("steps_count=%d ", current_.incremental_marking_steps); + PrintF("steps_took=%.1f ", current_.incremental_marking_duration); + PrintF("longest_step=%.1f ", current_.longest_incremental_marking_step); + PrintF("incremental_marking_throughput=%" V8_PTR_PREFIX "d ", + IncrementalMarkingSpeedInBytesPerMillisecond()); + } + + PrintF("\n"); +} + + +double GCTracer::MeanDuration(const EventBuffer& events) const { + if (events.empty()) return 0.0; + + double mean = 0.0; + EventBuffer::const_iterator iter = events.begin(); + while (iter != events.end()) { + mean += iter->end_time - iter->start_time; + ++iter; + } + + return mean / events.size(); +} + + +double GCTracer::MaxDuration(const EventBuffer& events) const { + if (events.empty()) return 0.0; + + double maximum = 0.0f; + EventBuffer::const_iterator iter = events.begin(); + while (iter != events.end()) { + maximum = Max(iter->end_time - iter->start_time, maximum); + ++iter; + } + + return maximum; +} + + +double GCTracer::MeanIncrementalMarkingDuration() const { + if (cumulative_incremental_marking_steps_ == 0) return 0.0; + + // We haven't completed an entire round of incremental marking, yet. + // Use data from GCTracer instead of data from event buffers. + if (mark_compactor_events_.empty()) { + return cumulative_incremental_marking_duration_ / + cumulative_incremental_marking_steps_; + } + + int steps = 0; + double durations = 0.0; + EventBuffer::const_iterator iter = mark_compactor_events_.begin(); + while (iter != mark_compactor_events_.end()) { + steps += iter->incremental_marking_steps; + durations += iter->incremental_marking_duration; + ++iter; + } + + if (steps == 0) return 0.0; + + return durations / steps; +} + + +double GCTracer::MaxIncrementalMarkingDuration() const { + // We haven't completed an entire round of incremental marking, yet. + // Use data from GCTracer instead of data from event buffers. + if (mark_compactor_events_.empty()) return longest_incremental_marking_step_; + + double max_duration = 0.0; + EventBuffer::const_iterator iter = mark_compactor_events_.begin(); + while (iter != mark_compactor_events_.end()) + max_duration = Max(iter->longest_incremental_marking_step, max_duration); + + return max_duration; +} + + +intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const { + if (cumulative_incremental_marking_duration_ == 0.0) return 0; + + // We haven't completed an entire round of incremental marking, yet. + // Use data from GCTracer instead of data from event buffers. + if (mark_compactor_events_.empty()) { + return static_cast<intptr_t>(cumulative_incremental_marking_bytes_ / + cumulative_pure_incremental_marking_duration_); + } + + intptr_t bytes = 0; + double durations = 0.0; + EventBuffer::const_iterator iter = mark_compactor_events_.begin(); + while (iter != mark_compactor_events_.end()) { + bytes += iter->incremental_marking_bytes; + durations += iter->pure_incremental_marking_duration; + ++iter; + } + + if (durations == 0.0) return 0; + + return static_cast<intptr_t>(bytes / durations); +} +} +} // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/heap/gc-tracer.h nodejs-0.11.15/deps/v8/src/heap/gc-tracer.h --- nodejs-0.11.13/deps/v8/src/heap/gc-tracer.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/gc-tracer.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,356 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_GC_TRACER_H_ +#define V8_HEAP_GC_TRACER_H_ + +namespace v8 { +namespace internal { + +// A simple ring buffer class with maximum size known at compile time. +// The class only implements the functionality required in GCTracer. +template <typename T, size_t MAX_SIZE> +class RingBuffer { + public: + class const_iterator { + public: + const_iterator() : index_(0), elements_(NULL) {} + + const_iterator(size_t index, const T* elements) + : index_(index), elements_(elements) {} + + bool operator==(const const_iterator& rhs) const { + return elements_ == rhs.elements_ && index_ == rhs.index_; + } + + bool operator!=(const const_iterator& rhs) const { + return elements_ != rhs.elements_ || index_ != rhs.index_; + } + + operator const T*() const { return elements_ + index_; } + + const T* operator->() const { return elements_ + index_; } + + const T& operator*() const { return elements_[index_]; } + + const_iterator& operator++() { + index_ = (index_ + 1) % (MAX_SIZE + 1); + return *this; + } + + const_iterator& operator--() { + index_ = (index_ + MAX_SIZE) % (MAX_SIZE + 1); + return *this; + } + + private: + size_t index_; + const T* elements_; + }; + + RingBuffer() : begin_(0), end_(0) {} + + bool empty() const { return begin_ == end_; } + size_t size() const { + return (end_ - begin_ + MAX_SIZE + 1) % (MAX_SIZE + 1); + } + const_iterator begin() const { return const_iterator(begin_, elements_); } + const_iterator end() const { return const_iterator(end_, elements_); } + const_iterator back() const { return --end(); } + void push_back(const T& element) { + elements_[end_] = element; + end_ = (end_ + 1) % (MAX_SIZE + 1); + if (end_ == begin_) begin_ = (begin_ + 1) % (MAX_SIZE + 1); + } + void push_front(const T& element) { + begin_ = (begin_ + MAX_SIZE) % (MAX_SIZE + 1); + if (begin_ == end_) end_ = (end_ + MAX_SIZE) % (MAX_SIZE + 1); + elements_[begin_] = element; + } + + private: + T elements_[MAX_SIZE + 1]; + size_t begin_; + size_t end_; + + DISALLOW_COPY_AND_ASSIGN(RingBuffer); +}; + + +// GCTracer collects and prints ONE line after each garbage collector +// invocation IFF --trace_gc is used. +// TODO(ernstm): Unit tests. +class GCTracer BASE_EMBEDDED { + public: + class Scope BASE_EMBEDDED { + public: + enum ScopeId { + EXTERNAL, + MC_MARK, + MC_SWEEP, + MC_SWEEP_NEWSPACE, + MC_SWEEP_OLDSPACE, + MC_SWEEP_CODE, + MC_SWEEP_CELL, + MC_SWEEP_MAP, + MC_EVACUATE_PAGES, + MC_UPDATE_NEW_TO_NEW_POINTERS, + MC_UPDATE_ROOT_TO_NEW_POINTERS, + MC_UPDATE_OLD_TO_NEW_POINTERS, + MC_UPDATE_POINTERS_TO_EVACUATED, + MC_UPDATE_POINTERS_BETWEEN_EVACUATED, + MC_UPDATE_MISC_POINTERS, + MC_WEAKCOLLECTION_PROCESS, + MC_WEAKCOLLECTION_CLEAR, + MC_WEAKCOLLECTION_ABORT, + MC_FLUSH_CODE, + NUMBER_OF_SCOPES + }; + + Scope(GCTracer* tracer, ScopeId scope) : tracer_(tracer), scope_(scope) { + start_time_ = base::OS::TimeCurrentMillis(); + } + + ~Scope() { + DCHECK(scope_ < NUMBER_OF_SCOPES); // scope_ is unsigned. + tracer_->current_.scopes[scope_] += + base::OS::TimeCurrentMillis() - start_time_; + } + + private: + GCTracer* tracer_; + ScopeId scope_; + double start_time_; + + DISALLOW_COPY_AND_ASSIGN(Scope); + }; + + + class Event { + public: + enum Type { SCAVENGER = 0, MARK_COMPACTOR = 1, START = 2 }; + + // Default constructor leaves the event uninitialized. + Event() {} + + Event(Type type, const char* gc_reason, const char* collector_reason); + + // Returns a string describing the event type. + const char* TypeName(bool short_name) const; + + // Type of event + Type type; + + const char* gc_reason; + const char* collector_reason; + + // Timestamp set in the constructor. + double start_time; + + // Timestamp set in the destructor. + double end_time; + + // Size of objects in heap set in constructor. + intptr_t start_object_size; + + // Size of objects in heap set in destructor. + intptr_t end_object_size; + + // Size of memory allocated from OS set in constructor. + intptr_t start_memory_size; + + // Size of memory allocated from OS set in destructor. + intptr_t end_memory_size; + + // Total amount of space either wasted or contained in one of free lists + // before the current GC. + intptr_t start_holes_size; + + // Total amount of space either wasted or contained in one of free lists + // after the current GC. + intptr_t end_holes_size; + + // Number of incremental marking steps since creation of tracer. + // (value at start of event) + int cumulative_incremental_marking_steps; + + // Incremental marking steps since + // - last event for SCAVENGER events + // - last MARK_COMPACTOR event for MARK_COMPACTOR events + int incremental_marking_steps; + + // Bytes marked since creation of tracer (value at start of event). + intptr_t cumulative_incremental_marking_bytes; + + // Bytes marked since + // - last event for SCAVENGER events + // - last MARK_COMPACTOR event for MARK_COMPACTOR events + intptr_t incremental_marking_bytes; + + // Cumulative duration of incremental marking steps since creation of + // tracer. (value at start of event) + double cumulative_incremental_marking_duration; + + // Duration of incremental marking steps since + // - last event for SCAVENGER events + // - last MARK_COMPACTOR event for MARK_COMPACTOR events + double incremental_marking_duration; + + // Cumulative pure duration of incremental marking steps since creation of + // tracer. (value at start of event) + double cumulative_pure_incremental_marking_duration; + + // Duration of pure incremental marking steps since + // - last event for SCAVENGER events + // - last MARK_COMPACTOR event for MARK_COMPACTOR events + double pure_incremental_marking_duration; + + // Longest incremental marking step since start of marking. + // (value at start of event) + double longest_incremental_marking_step; + + // Amounts of time spent in different scopes during GC. + double scopes[Scope::NUMBER_OF_SCOPES]; + }; + + static const int kRingBufferMaxSize = 10; + + typedef RingBuffer<Event, kRingBufferMaxSize> EventBuffer; + + explicit GCTracer(Heap* heap); + + // Start collecting data. + void Start(GarbageCollector collector, const char* gc_reason, + const char* collector_reason); + + // Stop collecting data and print results. + void Stop(); + + // Log an incremental marking step. + void AddIncrementalMarkingStep(double duration, intptr_t bytes); + + // Log time spent in marking. + void AddMarkingTime(double duration) { + cumulative_marking_duration_ += duration; + } + + // Time spent in marking. + double cumulative_marking_duration() const { + return cumulative_marking_duration_; + } + + // Log time spent in sweeping on main thread. + void AddSweepingTime(double duration) { + cumulative_sweeping_duration_ += duration; + } + + // Time spent in sweeping on main thread. + double cumulative_sweeping_duration() const { + return cumulative_sweeping_duration_; + } + + // Compute the mean duration of the last scavenger events. Returns 0 if no + // events have been recorded. + double MeanScavengerDuration() const { + return MeanDuration(scavenger_events_); + } + + // Compute the max duration of the last scavenger events. Returns 0 if no + // events have been recorded. + double MaxScavengerDuration() const { return MaxDuration(scavenger_events_); } + + // Compute the mean duration of the last mark compactor events. Returns 0 if + // no events have been recorded. + double MeanMarkCompactorDuration() const { + return MeanDuration(mark_compactor_events_); + } + + // Compute the max duration of the last mark compactor events. Return 0 if no + // events have been recorded. + double MaxMarkCompactorDuration() const { + return MaxDuration(mark_compactor_events_); + } + + // Compute the mean step duration of the last incremental marking round. + // Returns 0 if no incremental marking round has been completed. + double MeanIncrementalMarkingDuration() const; + + // Compute the max step duration of the last incremental marking round. + // Returns 0 if no incremental marking round has been completed. + double MaxIncrementalMarkingDuration() const; + + // Compute the average incremental marking speed in bytes/second. Returns 0 if + // no events have been recorded. + intptr_t IncrementalMarkingSpeedInBytesPerMillisecond() const; + + private: + // Print one detailed trace line in name=value format. + // TODO(ernstm): Move to Heap. + void PrintNVP() const; + + // Print one trace line. + // TODO(ernstm): Move to Heap. + void Print() const; + + // Compute the mean duration of the events in the given ring buffer. + double MeanDuration(const EventBuffer& events) const; + + // Compute the max duration of the events in the given ring buffer. + double MaxDuration(const EventBuffer& events) const; + + // Pointer to the heap that owns this tracer. + Heap* heap_; + + // Current tracer event. Populated during Start/Stop cycle. Valid after Stop() + // has returned. + Event current_; + + // Previous tracer event. + Event previous_; + + // Previous MARK_COMPACTOR event. + Event previous_mark_compactor_event_; + + // RingBuffers for SCAVENGER events. + EventBuffer scavenger_events_; + + // RingBuffers for MARK_COMPACTOR events. + EventBuffer mark_compactor_events_; + + // Cumulative number of incremental marking steps since creation of tracer. + int cumulative_incremental_marking_steps_; + + // Cumulative size of incremental marking steps (in bytes) since creation of + // tracer. + intptr_t cumulative_incremental_marking_bytes_; + + // Cumulative duration of incremental marking steps since creation of tracer. + double cumulative_incremental_marking_duration_; + + // Cumulative duration of pure incremental marking steps since creation of + // tracer. + double cumulative_pure_incremental_marking_duration_; + + // Longest incremental marking step since start of marking. + double longest_incremental_marking_step_; + + // Total marking time. + // This timer is precise when run with --print-cumulative-gc-stat + double cumulative_marking_duration_; + + // Total sweeping time on the main thread. + // This timer is precise when run with --print-cumulative-gc-stat + // TODO(hpayer): Account for sweeping time on sweeper threads. Add a + // different field for that. + // TODO(hpayer): This timer right now just holds the sweeping time + // of the initial atomic sweeping pause. Make sure that it accumulates + // all sweeping operations performed on the main thread. + double cumulative_sweeping_duration_; + + DISALLOW_COPY_AND_ASSIGN(GCTracer); +}; +} +} // namespace v8::internal + +#endif // V8_HEAP_GC_TRACER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/heap.cc nodejs-0.11.15/deps/v8/src/heap/heap.cc --- nodejs-0.11.13/deps/v8/src/heap/heap.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/heap.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,6152 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/accessors.h" +#include "src/api.h" +#include "src/base/once.h" +#include "src/base/utils/random-number-generator.h" +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/compilation-cache.h" +#include "src/conversions.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/global-handles.h" +#include "src/heap/incremental-marking.h" +#include "src/heap/mark-compact.h" +#include "src/heap/objects-visiting-inl.h" +#include "src/heap/objects-visiting.h" +#include "src/heap/store-buffer.h" +#include "src/heap-profiler.h" +#include "src/isolate-inl.h" +#include "src/natives.h" +#include "src/runtime-profiler.h" +#include "src/scopeinfo.h" +#include "src/snapshot.h" +#include "src/utils.h" +#include "src/v8threads.h" +#include "src/vm-state-inl.h" + +#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP +#include "src/regexp-macro-assembler.h" // NOLINT +#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT +#endif +#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP +#include "src/regexp-macro-assembler.h" // NOLINT +#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT +#endif +#if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP +#include "src/regexp-macro-assembler.h" +#include "src/mips64/regexp-macro-assembler-mips64.h" +#endif + +namespace v8 { +namespace internal { + + +Heap::Heap() + : amount_of_external_allocated_memory_(0), + amount_of_external_allocated_memory_at_last_global_gc_(0), + isolate_(NULL), + code_range_size_(0), + // semispace_size_ should be a power of 2 and old_generation_size_ should + // be a multiple of Page::kPageSize. + reserved_semispace_size_(8 * (kPointerSize / 4) * MB), + max_semi_space_size_(8 * (kPointerSize / 4) * MB), + initial_semispace_size_(Page::kPageSize), + max_old_generation_size_(700ul * (kPointerSize / 4) * MB), + max_executable_size_(256ul * (kPointerSize / 4) * MB), + // Variables set based on semispace_size_ and old_generation_size_ in + // ConfigureHeap. + // Will be 4 * reserved_semispace_size_ to ensure that young + // generation can be aligned to its size. + maximum_committed_(0), + survived_since_last_expansion_(0), + sweep_generation_(0), + always_allocate_scope_depth_(0), + contexts_disposed_(0), + global_ic_age_(0), + flush_monomorphic_ics_(false), + scan_on_scavenge_pages_(0), + new_space_(this), + old_pointer_space_(NULL), + old_data_space_(NULL), + code_space_(NULL), + map_space_(NULL), + cell_space_(NULL), + property_cell_space_(NULL), + lo_space_(NULL), + gc_state_(NOT_IN_GC), + gc_post_processing_depth_(0), + allocations_count_(0), + raw_allocations_hash_(0), + dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc), + ms_count_(0), + gc_count_(0), + remembered_unmapped_pages_index_(0), + unflattened_strings_length_(0), +#ifdef DEBUG + allocation_timeout_(0), +#endif // DEBUG + old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit), + old_gen_exhausted_(false), + inline_allocation_disabled_(false), + store_buffer_rebuilder_(store_buffer()), + hidden_string_(NULL), + gc_safe_size_of_old_object_(NULL), + total_regexp_code_generated_(0), + tracer_(this), + high_survival_rate_period_length_(0), + promoted_objects_size_(0), + promotion_rate_(0), + semi_space_copied_object_size_(0), + semi_space_copied_rate_(0), + nodes_died_in_new_space_(0), + nodes_copied_in_new_space_(0), + nodes_promoted_(0), + maximum_size_scavenges_(0), + max_gc_pause_(0.0), + total_gc_time_ms_(0.0), + max_alive_after_gc_(0), + min_in_mutator_(kMaxInt), + marking_time_(0.0), + sweeping_time_(0.0), + mark_compact_collector_(this), + store_buffer_(this), + marking_(this), + incremental_marking_(this), + number_idle_notifications_(0), + last_idle_notification_gc_count_(0), + last_idle_notification_gc_count_init_(false), + mark_sweeps_since_idle_round_started_(0), + gc_count_at_last_idle_gc_(0), + scavenges_since_last_idle_round_(kIdleScavengeThreshold), + full_codegen_bytes_generated_(0), + crankshaft_codegen_bytes_generated_(0), + gcs_since_last_deopt_(0), +#ifdef VERIFY_HEAP + no_weak_object_verification_scope_depth_(0), +#endif + allocation_sites_scratchpad_length_(0), + promotion_queue_(this), + configured_(false), + external_string_table_(this), + chunks_queued_for_free_(NULL), + gc_callbacks_depth_(0) { +// Allow build-time customization of the max semispace size. Building +// V8 with snapshots and a non-default max semispace size is much +// easier if you can define it as part of the build environment. +#if defined(V8_MAX_SEMISPACE_SIZE) + max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; +#endif + + // Ensure old_generation_size_ is a multiple of kPageSize. + DCHECK(MB >= Page::kPageSize); + + memset(roots_, 0, sizeof(roots_[0]) * kRootListLength); + set_native_contexts_list(NULL); + set_array_buffers_list(Smi::FromInt(0)); + set_allocation_sites_list(Smi::FromInt(0)); + set_encountered_weak_collections(Smi::FromInt(0)); + // Put a dummy entry in the remembered pages so we can find the list the + // minidump even if there are no real unmapped pages. + RememberUnmappedPage(NULL, false); + + ClearObjectStats(true); +} + + +intptr_t Heap::Capacity() { + if (!HasBeenSetUp()) return 0; + + return new_space_.Capacity() + old_pointer_space_->Capacity() + + old_data_space_->Capacity() + code_space_->Capacity() + + map_space_->Capacity() + cell_space_->Capacity() + + property_cell_space_->Capacity(); +} + + +intptr_t Heap::CommittedMemory() { + if (!HasBeenSetUp()) return 0; + + return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() + + old_data_space_->CommittedMemory() + code_space_->CommittedMemory() + + map_space_->CommittedMemory() + cell_space_->CommittedMemory() + + property_cell_space_->CommittedMemory() + lo_space_->Size(); +} + + +size_t Heap::CommittedPhysicalMemory() { + if (!HasBeenSetUp()) return 0; + + return new_space_.CommittedPhysicalMemory() + + old_pointer_space_->CommittedPhysicalMemory() + + old_data_space_->CommittedPhysicalMemory() + + code_space_->CommittedPhysicalMemory() + + map_space_->CommittedPhysicalMemory() + + cell_space_->CommittedPhysicalMemory() + + property_cell_space_->CommittedPhysicalMemory() + + lo_space_->CommittedPhysicalMemory(); +} + + +intptr_t Heap::CommittedMemoryExecutable() { + if (!HasBeenSetUp()) return 0; + + return isolate()->memory_allocator()->SizeExecutable(); +} + + +void Heap::UpdateMaximumCommitted() { + if (!HasBeenSetUp()) return; + + intptr_t current_committed_memory = CommittedMemory(); + if (current_committed_memory > maximum_committed_) { + maximum_committed_ = current_committed_memory; + } +} + + +intptr_t Heap::Available() { + if (!HasBeenSetUp()) return 0; + + return new_space_.Available() + old_pointer_space_->Available() + + old_data_space_->Available() + code_space_->Available() + + map_space_->Available() + cell_space_->Available() + + property_cell_space_->Available(); +} + + +bool Heap::HasBeenSetUp() { + return old_pointer_space_ != NULL && old_data_space_ != NULL && + code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL && + property_cell_space_ != NULL && lo_space_ != NULL; +} + + +int Heap::GcSafeSizeOfOldObject(HeapObject* object) { + if (IntrusiveMarking::IsMarked(object)) { + return IntrusiveMarking::SizeOfMarkedObject(object); + } + return object->SizeFromMap(object->map()); +} + + +GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, + const char** reason) { + // Is global GC requested? + if (space != NEW_SPACE) { + isolate_->counters()->gc_compactor_caused_by_request()->Increment(); + *reason = "GC in old space requested"; + return MARK_COMPACTOR; + } + + if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) { + *reason = "GC in old space forced by flags"; + return MARK_COMPACTOR; + } + + // Is enough data promoted to justify a global GC? + if (OldGenerationAllocationLimitReached()) { + isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment(); + *reason = "promotion limit reached"; + return MARK_COMPACTOR; + } + + // Have allocation in OLD and LO failed? + if (old_gen_exhausted_) { + isolate_->counters() + ->gc_compactor_caused_by_oldspace_exhaustion() + ->Increment(); + *reason = "old generations exhausted"; + return MARK_COMPACTOR; + } + + // Is there enough space left in OLD to guarantee that a scavenge can + // succeed? + // + // Note that MemoryAllocator->MaxAvailable() undercounts the memory available + // for object promotion. It counts only the bytes that the memory + // allocator has not yet allocated from the OS and assigned to any space, + // and does not count available bytes already in the old space or code + // space. Undercounting is safe---we may get an unrequested full GC when + // a scavenge would have succeeded. + if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) { + isolate_->counters() + ->gc_compactor_caused_by_oldspace_exhaustion() + ->Increment(); + *reason = "scavenge might not succeed"; + return MARK_COMPACTOR; + } + + // Default + *reason = NULL; + return SCAVENGER; +} + + +// TODO(1238405): Combine the infrastructure for --heap-stats and +// --log-gc to avoid the complicated preprocessor and flag testing. +void Heap::ReportStatisticsBeforeGC() { +// Heap::ReportHeapStatistics will also log NewSpace statistics when +// compiled --log-gc is set. The following logic is used to avoid +// double logging. +#ifdef DEBUG + if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics(); + if (FLAG_heap_stats) { + ReportHeapStatistics("Before GC"); + } else if (FLAG_log_gc) { + new_space_.ReportStatistics(); + } + if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms(); +#else + if (FLAG_log_gc) { + new_space_.CollectStatistics(); + new_space_.ReportStatistics(); + new_space_.ClearHistograms(); + } +#endif // DEBUG +} + + +void Heap::PrintShortHeapStatistics() { + if (!FLAG_trace_gc_verbose) return; + PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX + "d KB" + ", available: %6" V8_PTR_PREFIX "d KB\n", + isolate_->memory_allocator()->Size() / KB, + isolate_->memory_allocator()->Available() / KB); + PrintPID("New space, used: %6" V8_PTR_PREFIX + "d KB" + ", available: %6" V8_PTR_PREFIX + "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + new_space_.Size() / KB, new_space_.Available() / KB, + new_space_.CommittedMemory() / KB); + PrintPID("Old pointers, used: %6" V8_PTR_PREFIX + "d KB" + ", available: %6" V8_PTR_PREFIX + "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + old_pointer_space_->SizeOfObjects() / KB, + old_pointer_space_->Available() / KB, + old_pointer_space_->CommittedMemory() / KB); + PrintPID("Old data space, used: %6" V8_PTR_PREFIX + "d KB" + ", available: %6" V8_PTR_PREFIX + "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + old_data_space_->SizeOfObjects() / KB, + old_data_space_->Available() / KB, + old_data_space_->CommittedMemory() / KB); + PrintPID("Code space, used: %6" V8_PTR_PREFIX + "d KB" + ", available: %6" V8_PTR_PREFIX + "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + code_space_->SizeOfObjects() / KB, code_space_->Available() / KB, + code_space_->CommittedMemory() / KB); + PrintPID("Map space, used: %6" V8_PTR_PREFIX + "d KB" + ", available: %6" V8_PTR_PREFIX + "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + map_space_->SizeOfObjects() / KB, map_space_->Available() / KB, + map_space_->CommittedMemory() / KB); + PrintPID("Cell space, used: %6" V8_PTR_PREFIX + "d KB" + ", available: %6" V8_PTR_PREFIX + "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB, + cell_space_->CommittedMemory() / KB); + PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX + "d KB" + ", available: %6" V8_PTR_PREFIX + "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + property_cell_space_->SizeOfObjects() / KB, + property_cell_space_->Available() / KB, + property_cell_space_->CommittedMemory() / KB); + PrintPID("Large object space, used: %6" V8_PTR_PREFIX + "d KB" + ", available: %6" V8_PTR_PREFIX + "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB, + lo_space_->CommittedMemory() / KB); + PrintPID("All spaces, used: %6" V8_PTR_PREFIX + "d KB" + ", available: %6" V8_PTR_PREFIX + "d KB" + ", committed: %6" V8_PTR_PREFIX "d KB\n", + this->SizeOfObjects() / KB, this->Available() / KB, + this->CommittedMemory() / KB); + PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n", + static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB)); + PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_); +} + + +// TODO(1238405): Combine the infrastructure for --heap-stats and +// --log-gc to avoid the complicated preprocessor and flag testing. +void Heap::ReportStatisticsAfterGC() { +// Similar to the before GC, we use some complicated logic to ensure that +// NewSpace statistics are logged exactly once when --log-gc is turned on. +#if defined(DEBUG) + if (FLAG_heap_stats) { + new_space_.CollectStatistics(); + ReportHeapStatistics("After GC"); + } else if (FLAG_log_gc) { + new_space_.ReportStatistics(); + } +#else + if (FLAG_log_gc) new_space_.ReportStatistics(); +#endif // DEBUG +} + + +void Heap::GarbageCollectionPrologue() { + { + AllowHeapAllocation for_the_first_part_of_prologue; + ClearJSFunctionResultCaches(); + gc_count_++; + unflattened_strings_length_ = 0; + + if (FLAG_flush_code && FLAG_flush_code_incrementally) { + mark_compact_collector()->EnableCodeFlushing(true); + } + +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + Verify(); + } +#endif + } + + // Reset GC statistics. + promoted_objects_size_ = 0; + semi_space_copied_object_size_ = 0; + nodes_died_in_new_space_ = 0; + nodes_copied_in_new_space_ = 0; + nodes_promoted_ = 0; + + UpdateMaximumCommitted(); + +#ifdef DEBUG + DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); + + if (FLAG_gc_verbose) Print(); + + ReportStatisticsBeforeGC(); +#endif // DEBUG + + store_buffer()->GCPrologue(); + + if (isolate()->concurrent_osr_enabled()) { + isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs(); + } + + if (new_space_.IsAtMaximumCapacity()) { + maximum_size_scavenges_++; + } else { + maximum_size_scavenges_ = 0; + } + CheckNewSpaceExpansionCriteria(); +} + + +intptr_t Heap::SizeOfObjects() { + intptr_t total = 0; + AllSpaces spaces(this); + for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { + total += space->SizeOfObjects(); + } + return total; +} + + +void Heap::ClearAllICsByKind(Code::Kind kind) { + HeapObjectIterator it(code_space()); + + for (Object* object = it.Next(); object != NULL; object = it.Next()) { + Code* code = Code::cast(object); + Code::Kind current_kind = code->kind(); + if (current_kind == Code::FUNCTION || + current_kind == Code::OPTIMIZED_FUNCTION) { + code->ClearInlineCaches(kind); + } + } +} + + +void Heap::RepairFreeListsAfterBoot() { + PagedSpaces spaces(this); + for (PagedSpace* space = spaces.next(); space != NULL; + space = spaces.next()) { + space->RepairFreeListsAfterBoot(); + } +} + + +void Heap::ProcessPretenuringFeedback() { + if (FLAG_allocation_site_pretenuring) { + int tenure_decisions = 0; + int dont_tenure_decisions = 0; + int allocation_mementos_found = 0; + int allocation_sites = 0; + int active_allocation_sites = 0; + + // If the scratchpad overflowed, we have to iterate over the allocation + // sites list. + // TODO(hpayer): We iterate over the whole list of allocation sites when + // we grew to the maximum semi-space size to deopt maybe tenured + // allocation sites. We could hold the maybe tenured allocation sites + // in a seperate data structure if this is a performance problem. + bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites(); + bool use_scratchpad = + allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize && + !deopt_maybe_tenured; + + int i = 0; + Object* list_element = allocation_sites_list(); + bool trigger_deoptimization = false; + bool maximum_size_scavenge = MaximumSizeScavenge(); + while (use_scratchpad ? i < allocation_sites_scratchpad_length_ + : list_element->IsAllocationSite()) { + AllocationSite* site = + use_scratchpad + ? AllocationSite::cast(allocation_sites_scratchpad()->get(i)) + : AllocationSite::cast(list_element); + allocation_mementos_found += site->memento_found_count(); + if (site->memento_found_count() > 0) { + active_allocation_sites++; + if (site->DigestPretenuringFeedback(maximum_size_scavenge)) { + trigger_deoptimization = true; + } + if (site->GetPretenureMode() == TENURED) { + tenure_decisions++; + } else { + dont_tenure_decisions++; + } + allocation_sites++; + } + + if (deopt_maybe_tenured && site->IsMaybeTenure()) { + site->set_deopt_dependent_code(true); + trigger_deoptimization = true; + } + + if (use_scratchpad) { + i++; + } else { + list_element = site->weak_next(); + } + } + + if (trigger_deoptimization) { + isolate_->stack_guard()->RequestDeoptMarkedAllocationSites(); + } + + FlushAllocationSitesScratchpad(); + + if (FLAG_trace_pretenuring_statistics && + (allocation_mementos_found > 0 || tenure_decisions > 0 || + dont_tenure_decisions > 0)) { + PrintF( + "GC: (mode, #visited allocation sites, #active allocation sites, " + "#mementos, #tenure decisions, #donttenure decisions) " + "(%s, %d, %d, %d, %d, %d)\n", + use_scratchpad ? "use scratchpad" : "use list", allocation_sites, + active_allocation_sites, allocation_mementos_found, tenure_decisions, + dont_tenure_decisions); + } + } +} + + +void Heap::DeoptMarkedAllocationSites() { + // TODO(hpayer): If iterating over the allocation sites list becomes a + // performance issue, use a cache heap data structure instead (similar to the + // allocation sites scratchpad). + Object* list_element = allocation_sites_list(); + while (list_element->IsAllocationSite()) { + AllocationSite* site = AllocationSite::cast(list_element); + if (site->deopt_dependent_code()) { + site->dependent_code()->MarkCodeForDeoptimization( + isolate_, DependentCode::kAllocationSiteTenuringChangedGroup); + site->set_deopt_dependent_code(false); + } + list_element = site->weak_next(); + } + Deoptimizer::DeoptimizeMarkedCode(isolate_); +} + + +void Heap::GarbageCollectionEpilogue() { + store_buffer()->GCEpilogue(); + + // In release mode, we only zap the from space under heap verification. + if (Heap::ShouldZapGarbage()) { + ZapFromSpace(); + } + + // Process pretenuring feedback and update allocation sites. + ProcessPretenuringFeedback(); + +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + Verify(); + } +#endif + + AllowHeapAllocation for_the_rest_of_the_epilogue; + +#ifdef DEBUG + if (FLAG_print_global_handles) isolate_->global_handles()->Print(); + if (FLAG_print_handles) PrintHandles(); + if (FLAG_gc_verbose) Print(); + if (FLAG_code_stats) ReportCodeStatistics("After GC"); +#endif + if (FLAG_deopt_every_n_garbage_collections > 0) { + // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that + // the topmost optimized frame can be deoptimized safely, because it + // might not have a lazy bailout point right after its current PC. + if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) { + Deoptimizer::DeoptimizeAll(isolate()); + gcs_since_last_deopt_ = 0; + } + } + + UpdateMaximumCommitted(); + + isolate_->counters()->alive_after_last_gc()->Set( + static_cast<int>(SizeOfObjects())); + + isolate_->counters()->string_table_capacity()->Set( + string_table()->Capacity()); + isolate_->counters()->number_of_symbols()->Set( + string_table()->NumberOfElements()); + + if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) { + isolate_->counters()->codegen_fraction_crankshaft()->AddSample( + static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) / + (crankshaft_codegen_bytes_generated_ + + full_codegen_bytes_generated_))); + } + + if (CommittedMemory() > 0) { + isolate_->counters()->external_fragmentation_total()->AddSample( + static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory())); + + isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>( + (new_space()->CommittedMemory() * 100.0) / CommittedMemory())); + isolate_->counters()->heap_fraction_old_pointer_space()->AddSample( + static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) / + CommittedMemory())); + isolate_->counters()->heap_fraction_old_data_space()->AddSample( + static_cast<int>((old_data_space()->CommittedMemory() * 100.0) / + CommittedMemory())); + isolate_->counters()->heap_fraction_code_space()->AddSample( + static_cast<int>((code_space()->CommittedMemory() * 100.0) / + CommittedMemory())); + isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>( + (map_space()->CommittedMemory() * 100.0) / CommittedMemory())); + isolate_->counters()->heap_fraction_cell_space()->AddSample( + static_cast<int>((cell_space()->CommittedMemory() * 100.0) / + CommittedMemory())); + isolate_->counters()->heap_fraction_property_cell_space()->AddSample( + static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) / + CommittedMemory())); + isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>( + (lo_space()->CommittedMemory() * 100.0) / CommittedMemory())); + + isolate_->counters()->heap_sample_total_committed()->AddSample( + static_cast<int>(CommittedMemory() / KB)); + isolate_->counters()->heap_sample_total_used()->AddSample( + static_cast<int>(SizeOfObjects() / KB)); + isolate_->counters()->heap_sample_map_space_committed()->AddSample( + static_cast<int>(map_space()->CommittedMemory() / KB)); + isolate_->counters()->heap_sample_cell_space_committed()->AddSample( + static_cast<int>(cell_space()->CommittedMemory() / KB)); + isolate_->counters() + ->heap_sample_property_cell_space_committed() + ->AddSample( + static_cast<int>(property_cell_space()->CommittedMemory() / KB)); + isolate_->counters()->heap_sample_code_space_committed()->AddSample( + static_cast<int>(code_space()->CommittedMemory() / KB)); + + isolate_->counters()->heap_sample_maximum_committed()->AddSample( + static_cast<int>(MaximumCommittedMemory() / KB)); + } + +#define UPDATE_COUNTERS_FOR_SPACE(space) \ + isolate_->counters()->space##_bytes_available()->Set( \ + static_cast<int>(space()->Available())); \ + isolate_->counters()->space##_bytes_committed()->Set( \ + static_cast<int>(space()->CommittedMemory())); \ + isolate_->counters()->space##_bytes_used()->Set( \ + static_cast<int>(space()->SizeOfObjects())); +#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \ + if (space()->CommittedMemory() > 0) { \ + isolate_->counters()->external_fragmentation_##space()->AddSample( \ + static_cast<int>(100 - \ + (space()->SizeOfObjects() * 100.0) / \ + space()->CommittedMemory())); \ + } +#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \ + UPDATE_COUNTERS_FOR_SPACE(space) \ + UPDATE_FRAGMENTATION_FOR_SPACE(space) + + UPDATE_COUNTERS_FOR_SPACE(new_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space) + UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space) +#undef UPDATE_COUNTERS_FOR_SPACE +#undef UPDATE_FRAGMENTATION_FOR_SPACE +#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE + +#ifdef DEBUG + ReportStatisticsAfterGC(); +#endif // DEBUG + + // Remember the last top pointer so that we can later find out + // whether we allocated in new space since the last GC. + new_space_top_after_last_gc_ = new_space()->top(); +} + + +void Heap::CollectAllGarbage(int flags, const char* gc_reason, + const v8::GCCallbackFlags gc_callback_flags) { + // Since we are ignoring the return value, the exact choice of space does + // not matter, so long as we do not specify NEW_SPACE, which would not + // cause a full GC. + mark_compact_collector_.SetFlags(flags); + CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags); + mark_compact_collector_.SetFlags(kNoGCFlags); +} + + +void Heap::CollectAllAvailableGarbage(const char* gc_reason) { + // Since we are ignoring the return value, the exact choice of space does + // not matter, so long as we do not specify NEW_SPACE, which would not + // cause a full GC. + // Major GC would invoke weak handle callbacks on weakly reachable + // handles, but won't collect weakly reachable objects until next + // major GC. Therefore if we collect aggressively and weak handle callback + // has been invoked, we rerun major GC to release objects which become + // garbage. + // Note: as weak callbacks can execute arbitrary code, we cannot + // hope that eventually there will be no weak callbacks invocations. + // Therefore stop recollecting after several attempts. + if (isolate()->concurrent_recompilation_enabled()) { + // The optimizing compiler may be unnecessarily holding on to memory. + DisallowHeapAllocation no_recursive_gc; + isolate()->optimizing_compiler_thread()->Flush(); + } + mark_compact_collector()->SetFlags(kMakeHeapIterableMask | + kReduceMemoryFootprintMask); + isolate_->compilation_cache()->Clear(); + const int kMaxNumberOfAttempts = 7; + const int kMinNumberOfAttempts = 2; + for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { + if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) && + attempt + 1 >= kMinNumberOfAttempts) { + break; + } + } + mark_compact_collector()->SetFlags(kNoGCFlags); + new_space_.Shrink(); + UncommitFromSpace(); + incremental_marking()->UncommitMarkingDeque(); +} + + +void Heap::EnsureFillerObjectAtTop() { + // There may be an allocation memento behind every object in new space. + // If we evacuate a not full new space or if we are on the last page of + // the new space, then there may be uninitialized memory behind the top + // pointer of the new space page. We store a filler object there to + // identify the unused space. + Address from_top = new_space_.top(); + Address from_limit = new_space_.limit(); + if (from_top < from_limit) { + int remaining_in_page = static_cast<int>(from_limit - from_top); + CreateFillerObjectAt(from_top, remaining_in_page); + } +} + + +bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, + const char* collector_reason, + const v8::GCCallbackFlags gc_callback_flags) { + // The VM is in the GC state until exiting this function. + VMState<GC> state(isolate_); + +#ifdef DEBUG + // Reset the allocation timeout to the GC interval, but make sure to + // allow at least a few allocations after a collection. The reason + // for this is that we have a lot of allocation sequences and we + // assume that a garbage collection will allow the subsequent + // allocation attempts to go through. + allocation_timeout_ = Max(6, FLAG_gc_interval); +#endif + + EnsureFillerObjectAtTop(); + + if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Scavenge during marking.\n"); + } + } + + if (collector == MARK_COMPACTOR && + !mark_compact_collector()->abort_incremental_marking() && + !incremental_marking()->IsStopped() && + !incremental_marking()->should_hurry() && + FLAG_incremental_marking_steps) { + // Make progress in incremental marking. + const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB; + incremental_marking()->Step(kStepSizeWhenDelayedByScavenge, + IncrementalMarking::NO_GC_VIA_STACK_GUARD); + if (!incremental_marking()->IsComplete() && !FLAG_gc_global) { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); + } + collector = SCAVENGER; + collector_reason = "incremental marking delaying mark-sweep"; + } + } + + bool next_gc_likely_to_collect_more = false; + + { + tracer()->Start(collector, gc_reason, collector_reason); + DCHECK(AllowHeapAllocation::IsAllowed()); + DisallowHeapAllocation no_allocation_during_gc; + GarbageCollectionPrologue(); + + { + HistogramTimerScope histogram_timer_scope( + (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() + : isolate_->counters()->gc_compactor()); + next_gc_likely_to_collect_more = + PerformGarbageCollection(collector, gc_callback_flags); + } + + GarbageCollectionEpilogue(); + tracer()->Stop(); + } + + // Start incremental marking for the next cycle. The heap snapshot + // generator needs incremental marking to stay off after it aborted. + if (!mark_compact_collector()->abort_incremental_marking() && + incremental_marking()->IsStopped() && + incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) { + incremental_marking()->Start(); + } + + return next_gc_likely_to_collect_more; +} + + +int Heap::NotifyContextDisposed() { + if (isolate()->concurrent_recompilation_enabled()) { + // Flush the queued recompilation tasks. + isolate()->optimizing_compiler_thread()->Flush(); + } + flush_monomorphic_ics_ = true; + AgeInlineCaches(); + return ++contexts_disposed_; +} + + +void Heap::MoveElements(FixedArray* array, int dst_index, int src_index, + int len) { + if (len == 0) return; + + DCHECK(array->map() != fixed_cow_array_map()); + Object** dst_objects = array->data_start() + dst_index; + MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize); + if (!InNewSpace(array)) { + for (int i = 0; i < len; i++) { + // TODO(hpayer): check store buffer for entries + if (InNewSpace(dst_objects[i])) { + RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i)); + } + } + } + incremental_marking()->RecordWrites(array); +} + + +#ifdef VERIFY_HEAP +// Helper class for verifying the string table. +class StringTableVerifier : public ObjectVisitor { + public: + void VisitPointers(Object** start, Object** end) { + // Visit all HeapObject pointers in [start, end). + for (Object** p = start; p < end; p++) { + if ((*p)->IsHeapObject()) { + // Check that the string is actually internalized. + CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || + (*p)->IsInternalizedString()); + } + } + } +}; + + +static void VerifyStringTable(Heap* heap) { + StringTableVerifier verifier; + heap->string_table()->IterateElements(&verifier); +} +#endif // VERIFY_HEAP + + +static bool AbortIncrementalMarkingAndCollectGarbage( + Heap* heap, AllocationSpace space, const char* gc_reason = NULL) { + heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask); + bool result = heap->CollectGarbage(space, gc_reason); + heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags); + return result; +} + + +void Heap::ReserveSpace(int* sizes, Address* locations_out) { + bool gc_performed = true; + int counter = 0; + static const int kThreshold = 20; + while (gc_performed && counter++ < kThreshold) { + gc_performed = false; + DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1); + for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) { + if (sizes[space] != 0) { + AllocationResult allocation; + if (space == NEW_SPACE) { + allocation = new_space()->AllocateRaw(sizes[space]); + } else { + allocation = paged_space(space)->AllocateRaw(sizes[space]); + } + FreeListNode* node; + if (!allocation.To(&node)) { + if (space == NEW_SPACE) { + Heap::CollectGarbage(NEW_SPACE, + "failed to reserve space in the new space"); + } else { + AbortIncrementalMarkingAndCollectGarbage( + this, static_cast<AllocationSpace>(space), + "failed to reserve space in paged space"); + } + gc_performed = true; + break; + } else { + // Mark with a free list node, in case we have a GC before + // deserializing. + node->set_size(this, sizes[space]); + locations_out[space] = node->address(); + } + } + } + } + + if (gc_performed) { + // Failed to reserve the space after several attempts. + V8::FatalProcessOutOfMemory("Heap::ReserveSpace"); + } +} + + +void Heap::EnsureFromSpaceIsCommitted() { + if (new_space_.CommitFromSpaceIfNeeded()) return; + + // Committing memory to from space failed. + // Memory is exhausted and we will die. + V8::FatalProcessOutOfMemory("Committing semi space failed."); +} + + +void Heap::ClearJSFunctionResultCaches() { + if (isolate_->bootstrapper()->IsActive()) return; + + Object* context = native_contexts_list(); + while (!context->IsUndefined()) { + // Get the caches for this context. GC can happen when the context + // is not fully initialized, so the caches can be undefined. + Object* caches_or_undefined = + Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX); + if (!caches_or_undefined->IsUndefined()) { + FixedArray* caches = FixedArray::cast(caches_or_undefined); + // Clear the caches: + int length = caches->length(); + for (int i = 0; i < length; i++) { + JSFunctionResultCache::cast(caches->get(i))->Clear(); + } + } + // Get the next context: + context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); + } +} + + +void Heap::ClearNormalizedMapCaches() { + if (isolate_->bootstrapper()->IsActive() && + !incremental_marking()->IsMarking()) { + return; + } + + Object* context = native_contexts_list(); + while (!context->IsUndefined()) { + // GC can happen when the context is not fully initialized, + // so the cache can be undefined. + Object* cache = + Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX); + if (!cache->IsUndefined()) { + NormalizedMapCache::cast(cache)->Clear(); + } + context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); + } +} + + +void Heap::UpdateSurvivalStatistics(int start_new_space_size) { + if (start_new_space_size == 0) return; + + promotion_rate_ = (static_cast<double>(promoted_objects_size_) / + static_cast<double>(start_new_space_size) * 100); + + semi_space_copied_rate_ = + (static_cast<double>(semi_space_copied_object_size_) / + static_cast<double>(start_new_space_size) * 100); + + double survival_rate = promotion_rate_ + semi_space_copied_rate_; + + if (survival_rate > kYoungSurvivalRateHighThreshold) { + high_survival_rate_period_length_++; + } else { + high_survival_rate_period_length_ = 0; + } +} + +bool Heap::PerformGarbageCollection( + GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { + int freed_global_handles = 0; + + if (collector != SCAVENGER) { + PROFILE(isolate_, CodeMovingGCEvent()); + } + +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + VerifyStringTable(this); + } +#endif + + GCType gc_type = + collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge; + + { + GCCallbacksScope scope(this); + if (scope.CheckReenter()) { + AllowHeapAllocation allow_allocation; + GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); + VMState<EXTERNAL> state(isolate_); + HandleScope handle_scope(isolate_); + CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags); + } + } + + EnsureFromSpaceIsCommitted(); + + int start_new_space_size = Heap::new_space()->SizeAsInt(); + + if (IsHighSurvivalRate()) { + // We speed up the incremental marker if it is running so that it + // does not fall behind the rate of promotion, which would cause a + // constantly growing old space. + incremental_marking()->NotifyOfHighPromotionRate(); + } + + if (collector == MARK_COMPACTOR) { + // Perform mark-sweep with optional compaction. + MarkCompact(); + sweep_generation_++; + // Temporarily set the limit for case when PostGarbageCollectionProcessing + // allocates and triggers GC. The real limit is set at after + // PostGarbageCollectionProcessing. + old_generation_allocation_limit_ = + OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0); + old_gen_exhausted_ = false; + } else { + Scavenge(); + } + + UpdateSurvivalStatistics(start_new_space_size); + + isolate_->counters()->objs_since_last_young()->Set(0); + + // Callbacks that fire after this point might trigger nested GCs and + // restart incremental marking, the assertion can't be moved down. + DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped()); + + gc_post_processing_depth_++; + { + AllowHeapAllocation allow_allocation; + GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); + freed_global_handles = + isolate_->global_handles()->PostGarbageCollectionProcessing(collector); + } + gc_post_processing_depth_--; + + isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); + + // Update relocatables. + Relocatable::PostGarbageCollectionProcessing(isolate_); + + if (collector == MARK_COMPACTOR) { + // Register the amount of external allocated memory. + amount_of_external_allocated_memory_at_last_global_gc_ = + amount_of_external_allocated_memory_; + old_generation_allocation_limit_ = OldGenerationAllocationLimit( + PromotedSpaceSizeOfObjects(), freed_global_handles); + } + + { + GCCallbacksScope scope(this); + if (scope.CheckReenter()) { + AllowHeapAllocation allow_allocation; + GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); + VMState<EXTERNAL> state(isolate_); + HandleScope handle_scope(isolate_); + CallGCEpilogueCallbacks(gc_type, gc_callback_flags); + } + } + +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + VerifyStringTable(this); + } +#endif + + return freed_global_handles > 0; +} + + +void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { + for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { + if (gc_type & gc_prologue_callbacks_[i].gc_type) { + if (!gc_prologue_callbacks_[i].pass_isolate_) { + v8::GCPrologueCallback callback = + reinterpret_cast<v8::GCPrologueCallback>( + gc_prologue_callbacks_[i].callback); + callback(gc_type, flags); + } else { + v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); + gc_prologue_callbacks_[i].callback(isolate, gc_type, flags); + } + } + } +} + + +void Heap::CallGCEpilogueCallbacks(GCType gc_type, + GCCallbackFlags gc_callback_flags) { + for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { + if (gc_type & gc_epilogue_callbacks_[i].gc_type) { + if (!gc_epilogue_callbacks_[i].pass_isolate_) { + v8::GCPrologueCallback callback = + reinterpret_cast<v8::GCPrologueCallback>( + gc_epilogue_callbacks_[i].callback); + callback(gc_type, gc_callback_flags); + } else { + v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); + gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags); + } + } + } +} + + +void Heap::MarkCompact() { + gc_state_ = MARK_COMPACT; + LOG(isolate_, ResourceEvent("markcompact", "begin")); + + uint64_t size_of_objects_before_gc = SizeOfObjects(); + + mark_compact_collector_.Prepare(); + + ms_count_++; + + MarkCompactPrologue(); + + mark_compact_collector_.CollectGarbage(); + + LOG(isolate_, ResourceEvent("markcompact", "end")); + + gc_state_ = NOT_IN_GC; + + isolate_->counters()->objs_since_last_full()->Set(0); + + flush_monomorphic_ics_ = false; + + if (FLAG_allocation_site_pretenuring) { + EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc); + } +} + + +void Heap::MarkCompactPrologue() { + // At any old GC clear the keyed lookup cache to enable collection of unused + // maps. + isolate_->keyed_lookup_cache()->Clear(); + isolate_->context_slot_cache()->Clear(); + isolate_->descriptor_lookup_cache()->Clear(); + RegExpResultsCache::Clear(string_split_cache()); + RegExpResultsCache::Clear(regexp_multiple_cache()); + + isolate_->compilation_cache()->MarkCompactPrologue(); + + CompletelyClearInstanceofCache(); + + FlushNumberStringCache(); + if (FLAG_cleanup_code_caches_at_gc) { + polymorphic_code_cache()->set_cache(undefined_value()); + } + + ClearNormalizedMapCaches(); +} + + +// Helper class for copying HeapObjects +class ScavengeVisitor : public ObjectVisitor { + public: + explicit ScavengeVisitor(Heap* heap) : heap_(heap) {} + + void VisitPointer(Object** p) { ScavengePointer(p); } + + void VisitPointers(Object** start, Object** end) { + // Copy all HeapObject pointers in [start, end) + for (Object** p = start; p < end; p++) ScavengePointer(p); + } + + private: + void ScavengePointer(Object** p) { + Object* object = *p; + if (!heap_->InNewSpace(object)) return; + Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), + reinterpret_cast<HeapObject*>(object)); + } + + Heap* heap_; +}; + + +#ifdef VERIFY_HEAP +// Visitor class to verify pointers in code or data space do not point into +// new space. +class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor { + public: + explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {} + void VisitPointers(Object** start, Object** end) { + for (Object** current = start; current < end; current++) { + if ((*current)->IsHeapObject()) { + CHECK(!heap_->InNewSpace(HeapObject::cast(*current))); + } + } + } + + private: + Heap* heap_; +}; + + +static void VerifyNonPointerSpacePointers(Heap* heap) { + // Verify that there are no pointers to new space in spaces where we + // do not expect them. + VerifyNonPointerSpacePointersVisitor v(heap); + HeapObjectIterator code_it(heap->code_space()); + for (HeapObject* object = code_it.Next(); object != NULL; + object = code_it.Next()) + object->Iterate(&v); + + // The old data space was normally swept conservatively so that the iterator + // doesn't work, so we normally skip the next bit. + if (heap->old_data_space()->swept_precisely()) { + HeapObjectIterator data_it(heap->old_data_space()); + for (HeapObject* object = data_it.Next(); object != NULL; + object = data_it.Next()) + object->Iterate(&v); + } +} +#endif // VERIFY_HEAP + + +void Heap::CheckNewSpaceExpansionCriteria() { + if (new_space_.Capacity() < new_space_.MaximumCapacity() && + survived_since_last_expansion_ > new_space_.Capacity()) { + // Grow the size of new space if there is room to grow, enough data + // has survived scavenge since the last expansion and we are not in + // high promotion mode. + new_space_.Grow(); + survived_since_last_expansion_ = 0; + } +} + + +static bool IsUnscavengedHeapObject(Heap* heap, Object** p) { + return heap->InNewSpace(*p) && + !HeapObject::cast(*p)->map_word().IsForwardingAddress(); +} + + +void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, + StoreBufferEvent event) { + heap->store_buffer_rebuilder_.Callback(page, event); +} + + +void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) { + if (event == kStoreBufferStartScanningPagesEvent) { + start_of_current_page_ = NULL; + current_page_ = NULL; + } else if (event == kStoreBufferScanningPageEvent) { + if (current_page_ != NULL) { + // If this page already overflowed the store buffer during this iteration. + if (current_page_->scan_on_scavenge()) { + // Then we should wipe out the entries that have been added for it. + store_buffer_->SetTop(start_of_current_page_); + } else if (store_buffer_->Top() - start_of_current_page_ >= + (store_buffer_->Limit() - store_buffer_->Top()) >> 2) { + // Did we find too many pointers in the previous page? The heuristic is + // that no page can take more then 1/5 the remaining slots in the store + // buffer. + current_page_->set_scan_on_scavenge(true); + store_buffer_->SetTop(start_of_current_page_); + } else { + // In this case the page we scanned took a reasonable number of slots in + // the store buffer. It has now been rehabilitated and is no longer + // marked scan_on_scavenge. + DCHECK(!current_page_->scan_on_scavenge()); + } + } + start_of_current_page_ = store_buffer_->Top(); + current_page_ = page; + } else if (event == kStoreBufferFullEvent) { + // The current page overflowed the store buffer again. Wipe out its entries + // in the store buffer and mark it scan-on-scavenge again. This may happen + // several times while scanning. + if (current_page_ == NULL) { + // Store Buffer overflowed while scanning promoted objects. These are not + // in any particular page, though they are likely to be clustered by the + // allocation routines. + store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2); + } else { + // Store Buffer overflowed while scanning a particular old space page for + // pointers to new space. + DCHECK(current_page_ == page); + DCHECK(page != NULL); + current_page_->set_scan_on_scavenge(true); + DCHECK(start_of_current_page_ != store_buffer_->Top()); + store_buffer_->SetTop(start_of_current_page_); + } + } else { + UNREACHABLE(); + } +} + + +void PromotionQueue::Initialize() { + // Assumes that a NewSpacePage exactly fits a number of promotion queue + // entries (where each is a pair of intptr_t). This allows us to simplify + // the test fpr when to switch pages. + DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) == + 0); + limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart()); + front_ = rear_ = + reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); + emergency_stack_ = NULL; + guard_ = false; +} + + +void PromotionQueue::RelocateQueueHead() { + DCHECK(emergency_stack_ == NULL); + + Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); + intptr_t* head_start = rear_; + intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end())); + + int entries_count = + static_cast<int>(head_end - head_start) / kEntrySizeInWords; + + emergency_stack_ = new List<Entry>(2 * entries_count); + + while (head_start != head_end) { + int size = static_cast<int>(*(head_start++)); + HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++)); + emergency_stack_->Add(Entry(obj, size)); + } + rear_ = head_end; +} + + +class ScavengeWeakObjectRetainer : public WeakObjectRetainer { + public: + explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {} + + virtual Object* RetainAs(Object* object) { + if (!heap_->InFromSpace(object)) { + return object; + } + + MapWord map_word = HeapObject::cast(object)->map_word(); + if (map_word.IsForwardingAddress()) { + return map_word.ToForwardingAddress(); + } + return NULL; + } + + private: + Heap* heap_; +}; + + +void Heap::Scavenge() { + RelocationLock relocation_lock(this); + +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this); +#endif + + gc_state_ = SCAVENGE; + + // Implements Cheney's copying algorithm + LOG(isolate_, ResourceEvent("scavenge", "begin")); + + // Clear descriptor cache. + isolate_->descriptor_lookup_cache()->Clear(); + + // Used for updating survived_since_last_expansion_ at function end. + intptr_t survived_watermark = PromotedSpaceSizeOfObjects(); + + SelectScavengingVisitorsTable(); + + incremental_marking()->PrepareForScavenge(); + + // Flip the semispaces. After flipping, to space is empty, from space has + // live objects. + new_space_.Flip(); + new_space_.ResetAllocationInfo(); + + // We need to sweep newly copied objects which can be either in the + // to space or promoted to the old generation. For to-space + // objects, we treat the bottom of the to space as a queue. Newly + // copied and unswept objects lie between a 'front' mark and the + // allocation pointer. + // + // Promoted objects can go into various old-generation spaces, and + // can be allocated internally in the spaces (from the free list). + // We treat the top of the to space as a queue of addresses of + // promoted objects. The addresses of newly promoted and unswept + // objects lie between a 'front' mark and a 'rear' mark that is + // updated as a side effect of promoting an object. + // + // There is guaranteed to be enough room at the top of the to space + // for the addresses of promoted objects: every object promoted + // frees up its size in bytes from the top of the new space, and + // objects are at least one pointer in size. + Address new_space_front = new_space_.ToSpaceStart(); + promotion_queue_.Initialize(); + +#ifdef DEBUG + store_buffer()->Clean(); +#endif + + ScavengeVisitor scavenge_visitor(this); + // Copy roots. + IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); + + // Copy objects reachable from the old generation. + { + StoreBufferRebuildScope scope(this, store_buffer(), + &ScavengeStoreBufferCallback); + store_buffer()->IteratePointersToNewSpace(&ScavengeObject); + } + + // Copy objects reachable from simple cells by scavenging cell values + // directly. + HeapObjectIterator cell_iterator(cell_space_); + for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL; + heap_object = cell_iterator.Next()) { + if (heap_object->IsCell()) { + Cell* cell = Cell::cast(heap_object); + Address value_address = cell->ValueAddress(); + scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); + } + } + + // Copy objects reachable from global property cells by scavenging global + // property cell values directly. + HeapObjectIterator js_global_property_cell_iterator(property_cell_space_); + for (HeapObject* heap_object = js_global_property_cell_iterator.Next(); + heap_object != NULL; + heap_object = js_global_property_cell_iterator.Next()) { + if (heap_object->IsPropertyCell()) { + PropertyCell* cell = PropertyCell::cast(heap_object); + Address value_address = cell->ValueAddress(); + scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); + Address type_address = cell->TypeAddress(); + scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address)); + } + } + + // Copy objects reachable from the encountered weak collections list. + scavenge_visitor.VisitPointer(&encountered_weak_collections_); + + // Copy objects reachable from the code flushing candidates list. + MarkCompactCollector* collector = mark_compact_collector(); + if (collector->is_code_flushing_enabled()) { + collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor); + } + + new_space_front = DoScavenge(&scavenge_visitor, new_space_front); + + while (isolate()->global_handles()->IterateObjectGroups( + &scavenge_visitor, &IsUnscavengedHeapObject)) { + new_space_front = DoScavenge(&scavenge_visitor, new_space_front); + } + isolate()->global_handles()->RemoveObjectGroups(); + isolate()->global_handles()->RemoveImplicitRefGroups(); + + isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles( + &IsUnscavengedHeapObject); + isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots( + &scavenge_visitor); + new_space_front = DoScavenge(&scavenge_visitor, new_space_front); + + UpdateNewSpaceReferencesInExternalStringTable( + &UpdateNewSpaceReferenceInExternalStringTableEntry); + + promotion_queue_.Destroy(); + + incremental_marking()->UpdateMarkingDequeAfterScavenge(); + + ScavengeWeakObjectRetainer weak_object_retainer(this); + ProcessWeakReferences(&weak_object_retainer); + + DCHECK(new_space_front == new_space_.top()); + + // Set age mark. + new_space_.set_age_mark(new_space_.top()); + + new_space_.LowerInlineAllocationLimit( + new_space_.inline_allocation_limit_step()); + + // Update how much has survived scavenge. + IncrementYoungSurvivorsCounter(static_cast<int>( + (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size())); + + LOG(isolate_, ResourceEvent("scavenge", "end")); + + gc_state_ = NOT_IN_GC; + + scavenges_since_last_idle_round_++; +} + + +String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, + Object** p) { + MapWord first_word = HeapObject::cast(*p)->map_word(); + + if (!first_word.IsForwardingAddress()) { + // Unreachable external string can be finalized. + heap->FinalizeExternalString(String::cast(*p)); + return NULL; + } + + // String is still reachable. + return String::cast(first_word.ToForwardingAddress()); +} + + +void Heap::UpdateNewSpaceReferencesInExternalStringTable( + ExternalStringTableUpdaterCallback updater_func) { +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + external_string_table_.Verify(); + } +#endif + + if (external_string_table_.new_space_strings_.is_empty()) return; + + Object** start = &external_string_table_.new_space_strings_[0]; + Object** end = start + external_string_table_.new_space_strings_.length(); + Object** last = start; + + for (Object** p = start; p < end; ++p) { + DCHECK(InFromSpace(*p)); + String* target = updater_func(this, p); + + if (target == NULL) continue; + + DCHECK(target->IsExternalString()); + + if (InNewSpace(target)) { + // String is still in new space. Update the table entry. + *last = target; + ++last; + } else { + // String got promoted. Move it to the old string list. + external_string_table_.AddOldString(target); + } + } + + DCHECK(last <= end); + external_string_table_.ShrinkNewStrings(static_cast<int>(last - start)); +} + + +void Heap::UpdateReferencesInExternalStringTable( + ExternalStringTableUpdaterCallback updater_func) { + // Update old space string references. + if (external_string_table_.old_space_strings_.length() > 0) { + Object** start = &external_string_table_.old_space_strings_[0]; + Object** end = start + external_string_table_.old_space_strings_.length(); + for (Object** p = start; p < end; ++p) *p = updater_func(this, p); + } + + UpdateNewSpaceReferencesInExternalStringTable(updater_func); +} + + +void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { + ProcessArrayBuffers(retainer); + ProcessNativeContexts(retainer); + // TODO(mvstanton): AllocationSites only need to be processed during + // MARK_COMPACT, as they live in old space. Verify and address. + ProcessAllocationSites(retainer); +} + + +void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) { + Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer); + // Update the head of the list of contexts. + set_native_contexts_list(head); +} + + +void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) { + Object* array_buffer_obj = + VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer); + set_array_buffers_list(array_buffer_obj); +} + + +void Heap::TearDownArrayBuffers() { + Object* undefined = undefined_value(); + for (Object* o = array_buffers_list(); o != undefined;) { + JSArrayBuffer* buffer = JSArrayBuffer::cast(o); + Runtime::FreeArrayBuffer(isolate(), buffer); + o = buffer->weak_next(); + } + set_array_buffers_list(undefined); +} + + +void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) { + Object* allocation_site_obj = + VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer); + set_allocation_sites_list(allocation_site_obj); +} + + +void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) { + DisallowHeapAllocation no_allocation_scope; + Object* cur = allocation_sites_list(); + bool marked = false; + while (cur->IsAllocationSite()) { + AllocationSite* casted = AllocationSite::cast(cur); + if (casted->GetPretenureMode() == flag) { + casted->ResetPretenureDecision(); + casted->set_deopt_dependent_code(true); + marked = true; + } + cur = casted->weak_next(); + } + if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites(); +} + + +void Heap::EvaluateOldSpaceLocalPretenuring( + uint64_t size_of_objects_before_gc) { + uint64_t size_of_objects_after_gc = SizeOfObjects(); + double old_generation_survival_rate = + (static_cast<double>(size_of_objects_after_gc) * 100) / + static_cast<double>(size_of_objects_before_gc); + + if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) { + // Too many objects died in the old generation, pretenuring of wrong + // allocation sites may be the cause for that. We have to deopt all + // dependent code registered in the allocation sites to re-evaluate + // our pretenuring decisions. + ResetAllAllocationSitesDependentCode(TENURED); + if (FLAG_trace_pretenuring) { + PrintF( + "Deopt all allocation sites dependent code due to low survival " + "rate in the old generation %f\n", + old_generation_survival_rate); + } + } +} + + +void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { + DisallowHeapAllocation no_allocation; + // All external strings are listed in the external string table. + + class ExternalStringTableVisitorAdapter : public ObjectVisitor { + public: + explicit ExternalStringTableVisitorAdapter( + v8::ExternalResourceVisitor* visitor) + : visitor_(visitor) {} + virtual void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) { + DCHECK((*p)->IsExternalString()); + visitor_->VisitExternalString( + Utils::ToLocal(Handle<String>(String::cast(*p)))); + } + } + + private: + v8::ExternalResourceVisitor* visitor_; + } external_string_table_visitor(visitor); + + external_string_table_.Iterate(&external_string_table_visitor); +} + + +class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> { + public: + static inline void VisitPointer(Heap* heap, Object** p) { + Object* object = *p; + if (!heap->InNewSpace(object)) return; + Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), + reinterpret_cast<HeapObject*>(object)); + } +}; + + +Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, + Address new_space_front) { + do { + SemiSpace::AssertValidRange(new_space_front, new_space_.top()); + // The addresses new_space_front and new_space_.top() define a + // queue of unprocessed copied objects. Process them until the + // queue is empty. + while (new_space_front != new_space_.top()) { + if (!NewSpacePage::IsAtEnd(new_space_front)) { + HeapObject* object = HeapObject::FromAddress(new_space_front); + new_space_front += + NewSpaceScavenger::IterateBody(object->map(), object); + } else { + new_space_front = + NewSpacePage::FromLimit(new_space_front)->next_page()->area_start(); + } + } + + // Promote and process all the to-be-promoted objects. + { + StoreBufferRebuildScope scope(this, store_buffer(), + &ScavengeStoreBufferCallback); + while (!promotion_queue()->is_empty()) { + HeapObject* target; + int size; + promotion_queue()->remove(&target, &size); + + // Promoted object might be already partially visited + // during old space pointer iteration. Thus we search specificly + // for pointers to from semispace instead of looking for pointers + // to new space. + DCHECK(!target->IsMap()); + IterateAndMarkPointersToFromSpace( + target->address(), target->address() + size, &ScavengeObject); + } + } + + // Take another spin if there are now unswept objects in new space + // (there are currently no more unswept promoted objects). + } while (new_space_front != new_space_.top()); + + return new_space_front; +} + + +STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == + 0); // NOLINT +STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) == + 0); // NOLINT +STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset & + kDoubleAlignmentMask) == 0); // NOLINT + + +INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object, + int size)); + +static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object, + int size) { + if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) { + heap->CreateFillerObjectAt(object->address(), kPointerSize); + return HeapObject::FromAddress(object->address() + kPointerSize); + } else { + heap->CreateFillerObjectAt(object->address() + size - kPointerSize, + kPointerSize); + return object; + } +} + + +enum LoggingAndProfiling { + LOGGING_AND_PROFILING_ENABLED, + LOGGING_AND_PROFILING_DISABLED +}; + + +enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS }; + + +template <MarksHandling marks_handling, + LoggingAndProfiling logging_and_profiling_mode> +class ScavengingVisitor : public StaticVisitorBase { + public: + static void Initialize() { + table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString); + table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString); + table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate); + table_.Register(kVisitByteArray, &EvacuateByteArray); + table_.Register(kVisitFixedArray, &EvacuateFixedArray); + table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray); + table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray); + table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array); + + table_.Register( + kVisitNativeContext, + &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< + Context::kSize>); + + table_.Register( + kVisitConsString, + &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< + ConsString::kSize>); + + table_.Register( + kVisitSlicedString, + &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< + SlicedString::kSize>); + + table_.Register( + kVisitSymbol, + &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< + Symbol::kSize>); + + table_.Register( + kVisitSharedFunctionInfo, + &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< + SharedFunctionInfo::kSize>); + + table_.Register(kVisitJSWeakCollection, + &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit); + + table_.Register(kVisitJSArrayBuffer, + &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit); + + table_.Register(kVisitJSTypedArray, + &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit); + + table_.Register(kVisitJSDataView, + &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit); + + table_.Register(kVisitJSRegExp, + &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit); + + if (marks_handling == IGNORE_MARKS) { + table_.Register( + kVisitJSFunction, + &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< + JSFunction::kSize>); + } else { + table_.Register(kVisitJSFunction, &EvacuateJSFunction); + } + + table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, + kVisitDataObject, kVisitDataObjectGeneric>(); + + table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, + kVisitJSObject, kVisitJSObjectGeneric>(); + + table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, + kVisitStruct, kVisitStructGeneric>(); + } + + static VisitorDispatchTable<ScavengingCallback>* GetTable() { + return &table_; + } + + private: + enum ObjectContents { DATA_OBJECT, POINTER_OBJECT }; + + static void RecordCopiedObject(Heap* heap, HeapObject* obj) { + bool should_record = false; +#ifdef DEBUG + should_record = FLAG_heap_stats; +#endif + should_record = should_record || FLAG_log_gc; + if (should_record) { + if (heap->new_space()->Contains(obj)) { + heap->new_space()->RecordAllocation(obj); + } else { + heap->new_space()->RecordPromotion(obj); + } + } + } + + // Helper function used by CopyObject to copy a source object to an + // allocated target object and update the forwarding pointer in the source + // object. Returns the target object. + INLINE(static void MigrateObject(Heap* heap, HeapObject* source, + HeapObject* target, int size)) { + // If we migrate into to-space, then the to-space top pointer should be + // right after the target object. Incorporate double alignment + // over-allocation. + DCHECK(!heap->InToSpace(target) || + target->address() + size == heap->new_space()->top() || + target->address() + size + kPointerSize == heap->new_space()->top()); + + // Make sure that we do not overwrite the promotion queue which is at + // the end of to-space. + DCHECK(!heap->InToSpace(target) || + heap->promotion_queue()->IsBelowPromotionQueue( + heap->new_space()->top())); + + // Copy the content of source to target. + heap->CopyBlock(target->address(), source->address(), size); + + // Set the forwarding address. + source->set_map_word(MapWord::FromForwardingAddress(target)); + + if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) { + // Update NewSpace stats if necessary. + RecordCopiedObject(heap, target); + heap->OnMoveEvent(target, source, size); + } + + if (marks_handling == TRANSFER_MARKS) { + if (Marking::TransferColor(source, target)) { + MemoryChunk::IncrementLiveBytesFromGC(target->address(), size); + } + } + } + + template <int alignment> + static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot, + HeapObject* object, int object_size) { + Heap* heap = map->GetHeap(); + + int allocation_size = object_size; + if (alignment != kObjectAlignment) { + DCHECK(alignment == kDoubleAlignment); + allocation_size += kPointerSize; + } + + DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE)); + AllocationResult allocation = + heap->new_space()->AllocateRaw(allocation_size); + + HeapObject* target = NULL; // Initialization to please compiler. + if (allocation.To(&target)) { + if (alignment != kObjectAlignment) { + target = EnsureDoubleAligned(heap, target, allocation_size); + } + + // Order is important here: Set the promotion limit before migrating + // the object. Otherwise we may end up overwriting promotion queue + // entries when we migrate the object. + heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); + + // Order is important: slot might be inside of the target if target + // was allocated over a dead object and slot comes from the store + // buffer. + *slot = target; + MigrateObject(heap, object, target, object_size); + + heap->IncrementSemiSpaceCopiedObjectSize(object_size); + return true; + } + return false; + } + + + template <ObjectContents object_contents, int alignment> + static inline bool PromoteObject(Map* map, HeapObject** slot, + HeapObject* object, int object_size) { + Heap* heap = map->GetHeap(); + + int allocation_size = object_size; + if (alignment != kObjectAlignment) { + DCHECK(alignment == kDoubleAlignment); + allocation_size += kPointerSize; + } + + AllocationResult allocation; + if (object_contents == DATA_OBJECT) { + DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); + allocation = heap->old_data_space()->AllocateRaw(allocation_size); + } else { + DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); + allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); + } + + HeapObject* target = NULL; // Initialization to please compiler. + if (allocation.To(&target)) { + if (alignment != kObjectAlignment) { + target = EnsureDoubleAligned(heap, target, allocation_size); + } + + // Order is important: slot might be inside of the target if target + // was allocated over a dead object and slot comes from the store + // buffer. + *slot = target; + MigrateObject(heap, object, target, object_size); + + if (object_contents == POINTER_OBJECT) { + if (map->instance_type() == JS_FUNCTION_TYPE) { + heap->promotion_queue()->insert(target, + JSFunction::kNonWeakFieldsEndOffset); + } else { + heap->promotion_queue()->insert(target, object_size); + } + } + heap->IncrementPromotedObjectsSize(object_size); + return true; + } + return false; + } + + + template <ObjectContents object_contents, int alignment> + static inline void EvacuateObject(Map* map, HeapObject** slot, + HeapObject* object, int object_size) { + SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); + SLOW_DCHECK(object->Size() == object_size); + Heap* heap = map->GetHeap(); + + if (!heap->ShouldBePromoted(object->address(), object_size)) { + // A semi-space copy may fail due to fragmentation. In that case, we + // try to promote the object. + if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) { + return; + } + } + + if (PromoteObject<object_contents, alignment>(map, slot, object, + object_size)) { + return; + } + + // If promotion failed, we try to copy the object to the other semi-space + if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return; + + UNREACHABLE(); + } + + + static inline void EvacuateJSFunction(Map* map, HeapObject** slot, + HeapObject* object) { + ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< + JSFunction::kSize>(map, slot, object); + + HeapObject* target = *slot; + MarkBit mark_bit = Marking::MarkBitFrom(target); + if (Marking::IsBlack(mark_bit)) { + // This object is black and it might not be rescanned by marker. + // We should explicitly record code entry slot for compaction because + // promotion queue processing (IterateAndMarkPointersToFromSpace) will + // miss it as it is not HeapObject-tagged. + Address code_entry_slot = + target->address() + JSFunction::kCodeEntryOffset; + Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot)); + map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot( + code_entry_slot, code); + } + } + + + static inline void EvacuateFixedArray(Map* map, HeapObject** slot, + HeapObject* object) { + int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); + EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object, + object_size); + } + + + static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot, + HeapObject* object) { + int length = reinterpret_cast<FixedDoubleArray*>(object)->length(); + int object_size = FixedDoubleArray::SizeFor(length); + EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object, + object_size); + } + + + static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot, + HeapObject* object) { + int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size(); + EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object, + object_size); + } + + + static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot, + HeapObject* object) { + int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size(); + EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object, + object_size); + } + + + static inline void EvacuateByteArray(Map* map, HeapObject** slot, + HeapObject* object) { + int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize(); + EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object, + object_size); + } + + + static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot, + HeapObject* object) { + int object_size = SeqOneByteString::cast(object) + ->SeqOneByteStringSize(map->instance_type()); + EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object, + object_size); + } + + + static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot, + HeapObject* object) { + int object_size = SeqTwoByteString::cast(object) + ->SeqTwoByteStringSize(map->instance_type()); + EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object, + object_size); + } + + + static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot, + HeapObject* object) { + DCHECK(IsShortcutCandidate(map->instance_type())); + + Heap* heap = map->GetHeap(); + + if (marks_handling == IGNORE_MARKS && + ConsString::cast(object)->unchecked_second() == heap->empty_string()) { + HeapObject* first = + HeapObject::cast(ConsString::cast(object)->unchecked_first()); + + *slot = first; + + if (!heap->InNewSpace(first)) { + object->set_map_word(MapWord::FromForwardingAddress(first)); + return; + } + + MapWord first_word = first->map_word(); + if (first_word.IsForwardingAddress()) { + HeapObject* target = first_word.ToForwardingAddress(); + + *slot = target; + object->set_map_word(MapWord::FromForwardingAddress(target)); + return; + } + + heap->DoScavengeObject(first->map(), slot, first); + object->set_map_word(MapWord::FromForwardingAddress(*slot)); + return; + } + + int object_size = ConsString::kSize; + EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object, + object_size); + } + + template <ObjectContents object_contents> + class ObjectEvacuationStrategy { + public: + template <int object_size> + static inline void VisitSpecialized(Map* map, HeapObject** slot, + HeapObject* object) { + EvacuateObject<object_contents, kObjectAlignment>(map, slot, object, + object_size); + } + + static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) { + int object_size = map->instance_size(); + EvacuateObject<object_contents, kObjectAlignment>(map, slot, object, + object_size); + } + }; + + static VisitorDispatchTable<ScavengingCallback> table_; +}; + + +template <MarksHandling marks_handling, + LoggingAndProfiling logging_and_profiling_mode> +VisitorDispatchTable<ScavengingCallback> + ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_; + + +static void InitializeScavengingVisitorsTables() { + ScavengingVisitor<TRANSFER_MARKS, + LOGGING_AND_PROFILING_DISABLED>::Initialize(); + ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize(); + ScavengingVisitor<TRANSFER_MARKS, + LOGGING_AND_PROFILING_ENABLED>::Initialize(); + ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize(); +} + + +void Heap::SelectScavengingVisitorsTable() { + bool logging_and_profiling = + FLAG_verify_predictable || isolate()->logger()->is_logging() || + isolate()->cpu_profiler()->is_profiling() || + (isolate()->heap_profiler() != NULL && + isolate()->heap_profiler()->is_tracking_object_moves()); + + if (!incremental_marking()->IsMarking()) { + if (!logging_and_profiling) { + scavenging_visitors_table_.CopyFrom(ScavengingVisitor< + IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable()); + } else { + scavenging_visitors_table_.CopyFrom(ScavengingVisitor< + IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable()); + } + } else { + if (!logging_and_profiling) { + scavenging_visitors_table_.CopyFrom(ScavengingVisitor< + TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable()); + } else { + scavenging_visitors_table_.CopyFrom(ScavengingVisitor< + TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable()); + } + + if (incremental_marking()->IsCompacting()) { + // When compacting forbid short-circuiting of cons-strings. + // Scavenging code relies on the fact that new space object + // can't be evacuated into evacuation candidate but + // short-circuiting violates this assumption. + scavenging_visitors_table_.Register( + StaticVisitorBase::kVisitShortcutCandidate, + scavenging_visitors_table_.GetVisitorById( + StaticVisitorBase::kVisitConsString)); + } + } +} + + +void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { + SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object)); + MapWord first_word = object->map_word(); + SLOW_DCHECK(!first_word.IsForwardingAddress()); + Map* map = first_word.ToMap(); + map->GetHeap()->DoScavengeObject(map, p, object); +} + + +AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, + int instance_size) { + Object* result; + AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); + if (!allocation.To(&result)) return allocation; + + // Map::cast cannot be used due to uninitialized map field. + reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); + reinterpret_cast<Map*>(result)->set_instance_type(instance_type); + reinterpret_cast<Map*>(result)->set_instance_size(instance_size); + reinterpret_cast<Map*>(result)->set_visitor_id( + StaticVisitorBase::GetVisitorId(instance_type, instance_size)); + reinterpret_cast<Map*>(result)->set_inobject_properties(0); + reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0); + reinterpret_cast<Map*>(result)->set_unused_property_fields(0); + reinterpret_cast<Map*>(result)->set_bit_field(0); + reinterpret_cast<Map*>(result)->set_bit_field2(0); + int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) | + Map::OwnsDescriptors::encode(true); + reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3); + return result; +} + + +AllocationResult Heap::AllocateMap(InstanceType instance_type, + int instance_size, + ElementsKind elements_kind) { + HeapObject* result; + AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); + if (!allocation.To(&result)) return allocation; + + result->set_map_no_write_barrier(meta_map()); + Map* map = Map::cast(result); + map->set_instance_type(instance_type); + map->set_visitor_id( + StaticVisitorBase::GetVisitorId(instance_type, instance_size)); + map->set_prototype(null_value(), SKIP_WRITE_BARRIER); + map->set_constructor(null_value(), SKIP_WRITE_BARRIER); + map->set_instance_size(instance_size); + map->set_inobject_properties(0); + map->set_pre_allocated_property_fields(0); + map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER); + map->set_dependent_code(DependentCode::cast(empty_fixed_array()), + SKIP_WRITE_BARRIER); + map->init_back_pointer(undefined_value()); + map->set_unused_property_fields(0); + map->set_instance_descriptors(empty_descriptor_array()); + map->set_bit_field(0); + map->set_bit_field2(1 << Map::kIsExtensible); + int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) | + Map::OwnsDescriptors::encode(true); + map->set_bit_field3(bit_field3); + map->set_elements_kind(elements_kind); + + return map; +} + + +AllocationResult Heap::AllocateFillerObject(int size, bool double_align, + AllocationSpace space) { + HeapObject* obj; + { + AllocationResult allocation = AllocateRaw(size, space, space); + if (!allocation.To(&obj)) return allocation; + } +#ifdef DEBUG + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + DCHECK(chunk->owner()->identity() == space); +#endif + CreateFillerObjectAt(obj->address(), size); + return obj; +} + + +const Heap::StringTypeTable Heap::string_type_table[] = { +#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \ + { type, size, k##camel_name##MapRootIndex } \ + , + STRING_TYPE_LIST(STRING_TYPE_ELEMENT) +#undef STRING_TYPE_ELEMENT +}; + + +const Heap::ConstantStringTable Heap::constant_string_table[] = { +#define CONSTANT_STRING_ELEMENT(name, contents) \ + { contents, k##name##RootIndex } \ + , + INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT) +#undef CONSTANT_STRING_ELEMENT +}; + + +const Heap::StructTable Heap::struct_table[] = { +#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \ + { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \ + , + STRUCT_LIST(STRUCT_TABLE_ELEMENT) +#undef STRUCT_TABLE_ELEMENT +}; + + +bool Heap::CreateInitialMaps() { + HeapObject* obj; + { + AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize); + if (!allocation.To(&obj)) return false; + } + // Map::cast cannot be used due to uninitialized map field. + Map* new_meta_map = reinterpret_cast<Map*>(obj); + set_meta_map(new_meta_map); + new_meta_map->set_map(new_meta_map); + + { // Partial map allocation +#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \ + { \ + Map* map; \ + if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \ + set_##field_name##_map(map); \ + } + + ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array); + ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined); + ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null); + ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel, + constant_pool_array); + +#undef ALLOCATE_PARTIAL_MAP + } + + // Allocate the empty array. + { + AllocationResult allocation = AllocateEmptyFixedArray(); + if (!allocation.To(&obj)) return false; + } + set_empty_fixed_array(FixedArray::cast(obj)); + + { + AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE); + if (!allocation.To(&obj)) return false; + } + set_null_value(Oddball::cast(obj)); + Oddball::cast(obj)->set_kind(Oddball::kNull); + + { + AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE); + if (!allocation.To(&obj)) return false; + } + set_undefined_value(Oddball::cast(obj)); + Oddball::cast(obj)->set_kind(Oddball::kUndefined); + DCHECK(!InNewSpace(undefined_value())); + + // Set preliminary exception sentinel value before actually initializing it. + set_exception(null_value()); + + // Allocate the empty descriptor array. + { + AllocationResult allocation = AllocateEmptyFixedArray(); + if (!allocation.To(&obj)) return false; + } + set_empty_descriptor_array(DescriptorArray::cast(obj)); + + // Allocate the constant pool array. + { + AllocationResult allocation = AllocateEmptyConstantPoolArray(); + if (!allocation.To(&obj)) return false; + } + set_empty_constant_pool_array(ConstantPoolArray::cast(obj)); + + // Fix the instance_descriptors for the existing maps. + meta_map()->set_code_cache(empty_fixed_array()); + meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array())); + meta_map()->init_back_pointer(undefined_value()); + meta_map()->set_instance_descriptors(empty_descriptor_array()); + + fixed_array_map()->set_code_cache(empty_fixed_array()); + fixed_array_map()->set_dependent_code( + DependentCode::cast(empty_fixed_array())); + fixed_array_map()->init_back_pointer(undefined_value()); + fixed_array_map()->set_instance_descriptors(empty_descriptor_array()); + + undefined_map()->set_code_cache(empty_fixed_array()); + undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array())); + undefined_map()->init_back_pointer(undefined_value()); + undefined_map()->set_instance_descriptors(empty_descriptor_array()); + + null_map()->set_code_cache(empty_fixed_array()); + null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array())); + null_map()->init_back_pointer(undefined_value()); + null_map()->set_instance_descriptors(empty_descriptor_array()); + + constant_pool_array_map()->set_code_cache(empty_fixed_array()); + constant_pool_array_map()->set_dependent_code( + DependentCode::cast(empty_fixed_array())); + constant_pool_array_map()->init_back_pointer(undefined_value()); + constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array()); + + // Fix prototype object for existing maps. + meta_map()->set_prototype(null_value()); + meta_map()->set_constructor(null_value()); + + fixed_array_map()->set_prototype(null_value()); + fixed_array_map()->set_constructor(null_value()); + + undefined_map()->set_prototype(null_value()); + undefined_map()->set_constructor(null_value()); + + null_map()->set_prototype(null_value()); + null_map()->set_constructor(null_value()); + + constant_pool_array_map()->set_prototype(null_value()); + constant_pool_array_map()->set_constructor(null_value()); + + { // Map allocation +#define ALLOCATE_MAP(instance_type, size, field_name) \ + { \ + Map* map; \ + if (!AllocateMap((instance_type), size).To(&map)) return false; \ + set_##field_name##_map(map); \ + } + +#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \ + ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name) + + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array) + DCHECK(fixed_array_map() != fixed_cow_array_map()); + + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info) + ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number) + ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize, + mutable_heap_number) + ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol) + ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign) + + ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole); + ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean); + ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized); + ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker); + ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel); + ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception); + ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception); + + for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) { + const StringTypeTable& entry = string_type_table[i]; + { + AllocationResult allocation = AllocateMap(entry.type, entry.size); + if (!allocation.To(&obj)) return false; + } + // Mark cons string maps as unstable, because their objects can change + // maps during GC. + Map* map = Map::cast(obj); + if (StringShape(entry.type).IsCons()) map->mark_unstable(); + roots_[entry.index] = map; + } + + ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string) + undetectable_string_map()->set_is_undetectable(); + + ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string); + undetectable_ascii_string_map()->set_is_undetectable(); + + ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array) + ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array) + ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space) + +#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \ + ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \ + external_##type##_array) + + TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP) +#undef ALLOCATE_EXTERNAL_ARRAY_MAP + +#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \ + ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array) + + TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP) +#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP + + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements) + + ALLOCATE_VARSIZE_MAP(CODE_TYPE, code) + + ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell) + ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell) + ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler) + ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler) + + + for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) { + const StructTable& entry = struct_table[i]; + Map* map; + if (!AllocateMap(entry.type, entry.size).To(&map)) return false; + roots_[entry.index] = map; + } + + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table) + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table) + + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context) + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context) + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context) + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context) + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context) + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context) + + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context) + native_context_map()->set_dictionary_map(true); + native_context_map()->set_visitor_id( + StaticVisitorBase::kVisitNativeContext); + + ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize, + shared_function_info) + + ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object) + ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external) + external_map()->set_is_extensible(false); +#undef ALLOCATE_VARSIZE_MAP +#undef ALLOCATE_MAP + } + + { // Empty arrays + { + ByteArray* byte_array; + if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false; + set_empty_byte_array(byte_array); + } + +#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \ + { \ + ExternalArray* obj; \ + if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \ + return false; \ + set_empty_external_##type##_array(obj); \ + } + + TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY) +#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY + +#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \ + { \ + FixedTypedArrayBase* obj; \ + if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \ + return false; \ + set_empty_fixed_##type##_array(obj); \ + } + + TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY) +#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY + } + DCHECK(!InNewSpace(empty_fixed_array())); + return true; +} + + +AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode, + PretenureFlag pretenure) { + // Statically ensure that it is safe to allocate heap numbers in paged + // spaces. + int size = HeapNumber::kSize; + STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize); + + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); + + HeapObject* result; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!allocation.To(&result)) return allocation; + } + + Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map(); + HeapObject::cast(result)->set_map_no_write_barrier(map); + HeapNumber::cast(result)->set_value(value); + return result; +} + + +AllocationResult Heap::AllocateCell(Object* value) { + int size = Cell::kSize; + STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize); + + HeapObject* result; + { + AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE); + if (!allocation.To(&result)) return allocation; + } + result->set_map_no_write_barrier(cell_map()); + Cell::cast(result)->set_value(value); + return result; +} + + +AllocationResult Heap::AllocatePropertyCell() { + int size = PropertyCell::kSize; + STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize); + + HeapObject* result; + AllocationResult allocation = + AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE); + if (!allocation.To(&result)) return allocation; + + result->set_map_no_write_barrier(global_property_cell_map()); + PropertyCell* cell = PropertyCell::cast(result); + cell->set_dependent_code(DependentCode::cast(empty_fixed_array()), + SKIP_WRITE_BARRIER); + cell->set_value(the_hole_value()); + cell->set_type(HeapType::None()); + return result; +} + + +void Heap::CreateApiObjects() { + HandleScope scope(isolate()); + Factory* factory = isolate()->factory(); + Handle<Map> new_neander_map = + factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); + + // Don't use Smi-only elements optimizations for objects with the neander + // map. There are too many cases where element values are set directly with a + // bottleneck to trap the Smi-only -> fast elements transition, and there + // appears to be no benefit for optimize this case. + new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND); + set_neander_map(*new_neander_map); + + Handle<JSObject> listeners = factory->NewNeanderObject(); + Handle<FixedArray> elements = factory->NewFixedArray(2); + elements->set(0, Smi::FromInt(0)); + listeners->set_elements(*elements); + set_message_listeners(*listeners); +} + + +void Heap::CreateJSEntryStub() { + JSEntryStub stub(isolate()); + set_js_entry_code(*stub.GetCode()); +} + + +void Heap::CreateJSConstructEntryStub() { + JSConstructEntryStub stub(isolate()); + set_js_construct_entry_code(*stub.GetCode()); +} + + +void Heap::CreateFixedStubs() { + // Here we create roots for fixed stubs. They are needed at GC + // for cooking and uncooking (check out frames.cc). + // The eliminates the need for doing dictionary lookup in the + // stub cache for these stubs. + HandleScope scope(isolate()); + + // Create stubs that should be there, so we don't unexpectedly have to + // create them if we need them during the creation of another stub. + // Stub creation mixes raw pointers and handles in an unsafe manner so + // we cannot create stubs while we are creating stubs. + CodeStub::GenerateStubsAheadOfTime(isolate()); + + // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on + // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub + // is created. + + // gcc-4.4 has problem generating correct code of following snippet: + // { JSEntryStub stub; + // js_entry_code_ = *stub.GetCode(); + // } + // { JSConstructEntryStub stub; + // js_construct_entry_code_ = *stub.GetCode(); + // } + // To workaround the problem, make separate functions without inlining. + Heap::CreateJSEntryStub(); + Heap::CreateJSConstructEntryStub(); +} + + +void Heap::CreateInitialObjects() { + HandleScope scope(isolate()); + Factory* factory = isolate()->factory(); + + // The -0 value must be set before NewNumber works. + set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED)); + DCHECK(std::signbit(minus_zero_value()->Number()) != 0); + + set_nan_value( + *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED)); + set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED)); + + // The hole has not been created yet, but we want to put something + // predictable in the gaps in the string table, so lets make that Smi zero. + set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0))); + + // Allocate initial string table. + set_string_table(*StringTable::New(isolate(), kInitialStringTableSize)); + + // Finish initializing oddballs after creating the string table. + Oddball::Initialize(isolate(), factory->undefined_value(), "undefined", + factory->nan_value(), Oddball::kUndefined); + + // Initialize the null_value. + Oddball::Initialize(isolate(), factory->null_value(), "null", + handle(Smi::FromInt(0), isolate()), Oddball::kNull); + + set_true_value(*factory->NewOddball(factory->boolean_map(), "true", + handle(Smi::FromInt(1), isolate()), + Oddball::kTrue)); + + set_false_value(*factory->NewOddball(factory->boolean_map(), "false", + handle(Smi::FromInt(0), isolate()), + Oddball::kFalse)); + + set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole", + handle(Smi::FromInt(-1), isolate()), + Oddball::kTheHole)); + + set_uninitialized_value(*factory->NewOddball( + factory->uninitialized_map(), "uninitialized", + handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized)); + + set_arguments_marker(*factory->NewOddball( + factory->arguments_marker_map(), "arguments_marker", + handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker)); + + set_no_interceptor_result_sentinel(*factory->NewOddball( + factory->no_interceptor_result_sentinel_map(), + "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()), + Oddball::kOther)); + + set_termination_exception(*factory->NewOddball( + factory->termination_exception_map(), "termination_exception", + handle(Smi::FromInt(-3), isolate()), Oddball::kOther)); + + set_exception(*factory->NewOddball(factory->exception_map(), "exception", + handle(Smi::FromInt(-5), isolate()), + Oddball::kException)); + + for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) { + Handle<String> str = + factory->InternalizeUtf8String(constant_string_table[i].contents); + roots_[constant_string_table[i].index] = *str; + } + + // Allocate the hidden string which is used to identify the hidden properties + // in JSObjects. The hash code has a special value so that it will not match + // the empty string when searching for the property. It cannot be part of the + // loop above because it needs to be allocated manually with the special + // hash code in place. The hash code for the hidden_string is zero to ensure + // that it will always be at the first entry in property descriptors. + hidden_string_ = *factory->NewOneByteInternalizedString( + OneByteVector("", 0), String::kEmptyStringHash); + + // Create the code_stubs dictionary. The initial size is set to avoid + // expanding the dictionary during bootstrapping. + set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128)); + + // Create the non_monomorphic_cache used in stub-cache.cc. The initial size + // is set to avoid expanding the dictionary during bootstrapping. + set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64)); + + set_polymorphic_code_cache(PolymorphicCodeCache::cast( + *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE))); + + set_instanceof_cache_function(Smi::FromInt(0)); + set_instanceof_cache_map(Smi::FromInt(0)); + set_instanceof_cache_answer(Smi::FromInt(0)); + + CreateFixedStubs(); + + // Allocate the dictionary of intrinsic function names. + Handle<NameDictionary> intrinsic_names = + NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED); + Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names); + set_intrinsic_function_names(*intrinsic_names); + + set_number_string_cache( + *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED)); + + // Allocate cache for single character one byte strings. + set_single_character_string_cache( + *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED)); + + // Allocate cache for string split and regexp-multiple. + set_string_split_cache(*factory->NewFixedArray( + RegExpResultsCache::kRegExpResultsCacheSize, TENURED)); + set_regexp_multiple_cache(*factory->NewFixedArray( + RegExpResultsCache::kRegExpResultsCacheSize, TENURED)); + + // Allocate cache for external strings pointing to native source code. + set_natives_source_cache( + *factory->NewFixedArray(Natives::GetBuiltinsCount())); + + set_undefined_cell(*factory->NewCell(factory->undefined_value())); + + // The symbol registry is initialized lazily. + set_symbol_registry(undefined_value()); + + // Allocate object to hold object observation state. + set_observation_state(*factory->NewJSObjectFromMap( + factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize))); + + // Microtask queue uses the empty fixed array as a sentinel for "empty". + // Number of queued microtasks stored in Isolate::pending_microtask_count(). + set_microtask_queue(empty_fixed_array()); + + set_detailed_stack_trace_symbol(*factory->NewPrivateSymbol()); + set_elements_transition_symbol(*factory->NewPrivateSymbol()); + set_frozen_symbol(*factory->NewPrivateSymbol()); + set_megamorphic_symbol(*factory->NewPrivateSymbol()); + set_nonexistent_symbol(*factory->NewPrivateSymbol()); + set_normal_ic_symbol(*factory->NewPrivateSymbol()); + set_observed_symbol(*factory->NewPrivateSymbol()); + set_stack_trace_symbol(*factory->NewPrivateSymbol()); + set_uninitialized_symbol(*factory->NewPrivateSymbol()); + + Handle<SeededNumberDictionary> slow_element_dictionary = + SeededNumberDictionary::New(isolate(), 0, TENURED); + slow_element_dictionary->set_requires_slow_elements(); + set_empty_slow_element_dictionary(*slow_element_dictionary); + + set_materialized_objects(*factory->NewFixedArray(0, TENURED)); + + // Handling of script id generation is in Factory::NewScript. + set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId)); + + set_allocation_sites_scratchpad( + *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED)); + InitializeAllocationSitesScratchpad(); + + // Initialize keyed lookup cache. + isolate_->keyed_lookup_cache()->Clear(); + + // Initialize context slot cache. + isolate_->context_slot_cache()->Clear(); + + // Initialize descriptor cache. + isolate_->descriptor_lookup_cache()->Clear(); + + // Initialize compilation cache. + isolate_->compilation_cache()->Clear(); +} + + +bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) { + RootListIndex writable_roots[] = { + kStoreBufferTopRootIndex, + kStackLimitRootIndex, + kNumberStringCacheRootIndex, + kInstanceofCacheFunctionRootIndex, + kInstanceofCacheMapRootIndex, + kInstanceofCacheAnswerRootIndex, + kCodeStubsRootIndex, + kNonMonomorphicCacheRootIndex, + kPolymorphicCodeCacheRootIndex, + kLastScriptIdRootIndex, + kEmptyScriptRootIndex, + kRealStackLimitRootIndex, + kArgumentsAdaptorDeoptPCOffsetRootIndex, + kConstructStubDeoptPCOffsetRootIndex, + kGetterStubDeoptPCOffsetRootIndex, + kSetterStubDeoptPCOffsetRootIndex, + kStringTableRootIndex, + }; + + for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) { + if (root_index == writable_roots[i]) return true; + } + return false; +} + + +bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) { + return !RootCanBeWrittenAfterInitialization(root_index) && + !InNewSpace(roots_array_start()[root_index]); +} + + +Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string, + Object* key_pattern, ResultsCacheType type) { + FixedArray* cache; + if (!key_string->IsInternalizedString()) return Smi::FromInt(0); + if (type == STRING_SPLIT_SUBSTRINGS) { + DCHECK(key_pattern->IsString()); + if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0); + cache = heap->string_split_cache(); + } else { + DCHECK(type == REGEXP_MULTIPLE_INDICES); + DCHECK(key_pattern->IsFixedArray()); + cache = heap->regexp_multiple_cache(); + } + + uint32_t hash = key_string->Hash(); + uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & + ~(kArrayEntriesPerCacheEntry - 1)); + if (cache->get(index + kStringOffset) == key_string && + cache->get(index + kPatternOffset) == key_pattern) { + return cache->get(index + kArrayOffset); + } + index = + ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1)); + if (cache->get(index + kStringOffset) == key_string && + cache->get(index + kPatternOffset) == key_pattern) { + return cache->get(index + kArrayOffset); + } + return Smi::FromInt(0); +} + + +void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string, + Handle<Object> key_pattern, + Handle<FixedArray> value_array, + ResultsCacheType type) { + Factory* factory = isolate->factory(); + Handle<FixedArray> cache; + if (!key_string->IsInternalizedString()) return; + if (type == STRING_SPLIT_SUBSTRINGS) { + DCHECK(key_pattern->IsString()); + if (!key_pattern->IsInternalizedString()) return; + cache = factory->string_split_cache(); + } else { + DCHECK(type == REGEXP_MULTIPLE_INDICES); + DCHECK(key_pattern->IsFixedArray()); + cache = factory->regexp_multiple_cache(); + } + + uint32_t hash = key_string->Hash(); + uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & + ~(kArrayEntriesPerCacheEntry - 1)); + if (cache->get(index + kStringOffset) == Smi::FromInt(0)) { + cache->set(index + kStringOffset, *key_string); + cache->set(index + kPatternOffset, *key_pattern); + cache->set(index + kArrayOffset, *value_array); + } else { + uint32_t index2 = + ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1)); + if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) { + cache->set(index2 + kStringOffset, *key_string); + cache->set(index2 + kPatternOffset, *key_pattern); + cache->set(index2 + kArrayOffset, *value_array); + } else { + cache->set(index2 + kStringOffset, Smi::FromInt(0)); + cache->set(index2 + kPatternOffset, Smi::FromInt(0)); + cache->set(index2 + kArrayOffset, Smi::FromInt(0)); + cache->set(index + kStringOffset, *key_string); + cache->set(index + kPatternOffset, *key_pattern); + cache->set(index + kArrayOffset, *value_array); + } + } + // If the array is a reasonably short list of substrings, convert it into a + // list of internalized strings. + if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) { + for (int i = 0; i < value_array->length(); i++) { + Handle<String> str(String::cast(value_array->get(i)), isolate); + Handle<String> internalized_str = factory->InternalizeString(str); + value_array->set(i, *internalized_str); + } + } + // Convert backing store to a copy-on-write array. + value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map()); +} + + +void RegExpResultsCache::Clear(FixedArray* cache) { + for (int i = 0; i < kRegExpResultsCacheSize; i++) { + cache->set(i, Smi::FromInt(0)); + } +} + + +int Heap::FullSizeNumberStringCacheLength() { + // Compute the size of the number string cache based on the max newspace size. + // The number string cache has a minimum size based on twice the initial cache + // size to ensure that it is bigger after being made 'full size'. + int number_string_cache_size = max_semi_space_size_ / 512; + number_string_cache_size = Max(kInitialNumberStringCacheSize * 2, + Min(0x4000, number_string_cache_size)); + // There is a string and a number per entry so the length is twice the number + // of entries. + return number_string_cache_size * 2; +} + + +void Heap::FlushNumberStringCache() { + // Flush the number to string cache. + int len = number_string_cache()->length(); + for (int i = 0; i < len; i++) { + number_string_cache()->set_undefined(i); + } +} + + +void Heap::FlushAllocationSitesScratchpad() { + for (int i = 0; i < allocation_sites_scratchpad_length_; i++) { + allocation_sites_scratchpad()->set_undefined(i); + } + allocation_sites_scratchpad_length_ = 0; +} + + +void Heap::InitializeAllocationSitesScratchpad() { + DCHECK(allocation_sites_scratchpad()->length() == + kAllocationSiteScratchpadSize); + for (int i = 0; i < kAllocationSiteScratchpadSize; i++) { + allocation_sites_scratchpad()->set_undefined(i); + } +} + + +void Heap::AddAllocationSiteToScratchpad(AllocationSite* site, + ScratchpadSlotMode mode) { + if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) { + // We cannot use the normal write-barrier because slots need to be + // recorded with non-incremental marking as well. We have to explicitly + // record the slot to take evacuation candidates into account. + allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_, + site, SKIP_WRITE_BARRIER); + Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt( + allocation_sites_scratchpad_length_); + + if (mode == RECORD_SCRATCHPAD_SLOT) { + // We need to allow slots buffer overflow here since the evacuation + // candidates are not part of the global list of old space pages and + // releasing an evacuation candidate due to a slots buffer overflow + // results in lost pages. + mark_compact_collector()->RecordSlot(slot, slot, *slot, + SlotsBuffer::IGNORE_OVERFLOW); + } + allocation_sites_scratchpad_length_++; + } +} + + +Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) { + return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]); +} + + +Heap::RootListIndex Heap::RootIndexForExternalArrayType( + ExternalArrayType array_type) { + switch (array_type) { +#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ + case kExternal##Type##Array: \ + return kExternal##Type##ArrayMapRootIndex; + + TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX) +#undef ARRAY_TYPE_TO_ROOT_INDEX + + default: + UNREACHABLE(); + return kUndefinedValueRootIndex; + } +} + + +Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) { + return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]); +} + + +Heap::RootListIndex Heap::RootIndexForFixedTypedArray( + ExternalArrayType array_type) { + switch (array_type) { +#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ + case kExternal##Type##Array: \ + return kFixed##Type##ArrayMapRootIndex; + + TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX) +#undef ARRAY_TYPE_TO_ROOT_INDEX + + default: + UNREACHABLE(); + return kUndefinedValueRootIndex; + } +} + + +Heap::RootListIndex Heap::RootIndexForEmptyExternalArray( + ElementsKind elementsKind) { + switch (elementsKind) { +#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ + case EXTERNAL_##TYPE##_ELEMENTS: \ + return kEmptyExternal##Type##ArrayRootIndex; + + TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX) +#undef ELEMENT_KIND_TO_ROOT_INDEX + + default: + UNREACHABLE(); + return kUndefinedValueRootIndex; + } +} + + +Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray( + ElementsKind elementsKind) { + switch (elementsKind) { +#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ + case TYPE##_ELEMENTS: \ + return kEmptyFixed##Type##ArrayRootIndex; + + TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX) +#undef ELEMENT_KIND_TO_ROOT_INDEX + default: + UNREACHABLE(); + return kUndefinedValueRootIndex; + } +} + + +ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) { + return ExternalArray::cast( + roots_[RootIndexForEmptyExternalArray(map->elements_kind())]); +} + + +FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) { + return FixedTypedArrayBase::cast( + roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]); +} + + +AllocationResult Heap::AllocateForeign(Address address, + PretenureFlag pretenure) { + // Statically ensure that it is safe to allocate foreigns in paged spaces. + STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize); + AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; + Foreign* result; + AllocationResult allocation = Allocate(foreign_map(), space); + if (!allocation.To(&result)) return allocation; + result->set_foreign_address(address); + return result; +} + + +AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) { + if (length < 0 || length > ByteArray::kMaxLength) { + v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); + } + int size = ByteArray::SizeFor(length); + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); + HeapObject* result; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!allocation.To(&result)) return allocation; + } + + result->set_map_no_write_barrier(byte_array_map()); + ByteArray::cast(result)->set_length(length); + return result; +} + + +void Heap::CreateFillerObjectAt(Address addr, int size) { + if (size == 0) return; + HeapObject* filler = HeapObject::FromAddress(addr); + if (size == kPointerSize) { + filler->set_map_no_write_barrier(one_pointer_filler_map()); + } else if (size == 2 * kPointerSize) { + filler->set_map_no_write_barrier(two_pointer_filler_map()); + } else { + filler->set_map_no_write_barrier(free_space_map()); + FreeSpace::cast(filler)->set_size(size); + } +} + + +bool Heap::CanMoveObjectStart(HeapObject* object) { + Address address = object->address(); + bool is_in_old_pointer_space = InOldPointerSpace(address); + bool is_in_old_data_space = InOldDataSpace(address); + + if (lo_space()->Contains(object)) return false; + + Page* page = Page::FromAddress(address); + // We can move the object start if: + // (1) the object is not in old pointer or old data space, + // (2) the page of the object was already swept, + // (3) the page was already concurrently swept. This case is an optimization + // for concurrent sweeping. The WasSwept predicate for concurrently swept + // pages is set after sweeping all pages. + return (!is_in_old_pointer_space && !is_in_old_data_space) || + page->WasSwept() || page->SweepingCompleted(); +} + + +void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) { + if (incremental_marking()->IsMarking() && + Marking::IsBlack(Marking::MarkBitFrom(address))) { + if (mode == FROM_GC) { + MemoryChunk::IncrementLiveBytesFromGC(address, by); + } else { + MemoryChunk::IncrementLiveBytesFromMutator(address, by); + } + } +} + + +FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, + int elements_to_trim) { + const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize; + const int bytes_to_trim = elements_to_trim * element_size; + Map* map = object->map(); + + // For now this trick is only applied to objects in new and paged space. + // In large object space the object's start must coincide with chunk + // and thus the trick is just not applicable. + DCHECK(!lo_space()->Contains(object)); + DCHECK(object->map() != fixed_cow_array_map()); + + STATIC_ASSERT(FixedArrayBase::kMapOffset == 0); + STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize); + STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize); + + const int len = object->length(); + DCHECK(elements_to_trim <= len); + + // Calculate location of new array start. + Address new_start = object->address() + bytes_to_trim; + + // Technically in new space this write might be omitted (except for + // debug mode which iterates through the heap), but to play safer + // we still do it. + CreateFillerObjectAt(object->address(), bytes_to_trim); + + // Initialize header of the trimmed array. Since left trimming is only + // performed on pages which are not concurrently swept creating a filler + // object does not require synchronization. + DCHECK(CanMoveObjectStart(object)); + Object** former_start = HeapObject::RawField(object, 0); + int new_start_index = elements_to_trim * (element_size / kPointerSize); + former_start[new_start_index] = map; + former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim); + FixedArrayBase* new_object = + FixedArrayBase::cast(HeapObject::FromAddress(new_start)); + + // Maintain consistency of live bytes during incremental marking + marking()->TransferMark(object->address(), new_start); + AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR); + + // Notify the heap profiler of change in object layout. + OnMoveEvent(new_object, object, new_object->Size()); + return new_object; +} + + +// Force instantiation of templatized method. +template +void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int); +template +void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int); + + +template<Heap::InvocationMode mode> +void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) { + const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize; + const int bytes_to_trim = elements_to_trim * element_size; + + // For now this trick is only applied to objects in new and paged space. + DCHECK(!lo_space()->Contains(object)); + DCHECK(object->map() != fixed_cow_array_map()); + + const int len = object->length(); + DCHECK(elements_to_trim < len); + + // Calculate location of new array end. + Address new_end = object->address() + object->Size() - bytes_to_trim; + + // Technically in new space this write might be omitted (except for + // debug mode which iterates through the heap), but to play safer + // we still do it. + CreateFillerObjectAt(new_end, bytes_to_trim); + + // Initialize header of the trimmed array. We are storing the new length + // using release store after creating a filler for the left-over space to + // avoid races with the sweeper thread. + object->synchronized_set_length(len - elements_to_trim); + + // Maintain consistency of live bytes during incremental marking + AdjustLiveBytes(object->address(), -bytes_to_trim, mode); + + // Notify the heap profiler of change in object layout. The array may not be + // moved during GC, and size has to be adjusted nevertheless. + HeapProfiler* profiler = isolate()->heap_profiler(); + if (profiler->is_tracking_allocations()) { + profiler->UpdateObjectSizeEvent(object->address(), object->Size()); + } +} + + +AllocationResult Heap::AllocateExternalArray(int length, + ExternalArrayType array_type, + void* external_pointer, + PretenureFlag pretenure) { + int size = ExternalArray::kAlignedSize; + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); + HeapObject* result; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!allocation.To(&result)) return allocation; + } + + result->set_map_no_write_barrier(MapForExternalArrayType(array_type)); + ExternalArray::cast(result)->set_length(length); + ExternalArray::cast(result)->set_external_pointer(external_pointer); + return result; +} + +static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size, + ElementsKind* element_kind) { + switch (array_type) { +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \ + case kExternal##Type##Array: \ + *element_size = size; \ + *element_kind = TYPE##_ELEMENTS; \ + return; + + TYPED_ARRAYS(TYPED_ARRAY_CASE) +#undef TYPED_ARRAY_CASE + + default: + *element_size = 0; // Bogus + *element_kind = UINT8_ELEMENTS; // Bogus + UNREACHABLE(); + } +} + + +AllocationResult Heap::AllocateFixedTypedArray(int length, + ExternalArrayType array_type, + PretenureFlag pretenure) { + int element_size; + ElementsKind elements_kind; + ForFixedTypedArray(array_type, &element_size, &elements_kind); + int size = OBJECT_POINTER_ALIGN(length * element_size + + FixedTypedArrayBase::kDataOffset); +#ifndef V8_HOST_ARCH_64_BIT + if (array_type == kExternalFloat64Array) { + size += kPointerSize; + } +#endif + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); + + HeapObject* object; + AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!allocation.To(&object)) return allocation; + + if (array_type == kExternalFloat64Array) { + object = EnsureDoubleAligned(this, object, size); + } + + object->set_map(MapForFixedTypedArray(array_type)); + FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object); + elements->set_length(length); + memset(elements->DataPtr(), 0, elements->DataSize()); + return elements; +} + + +AllocationResult Heap::AllocateCode(int object_size, bool immovable) { + DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment)); + AllocationResult allocation = + AllocateRaw(object_size, CODE_SPACE, CODE_SPACE); + + HeapObject* result; + if (!allocation.To(&result)) return allocation; + + if (immovable) { + Address address = result->address(); + // Code objects which should stay at a fixed address are allocated either + // in the first page of code space (objects on the first page of each space + // are never moved) or in large object space. + if (!code_space_->FirstPage()->Contains(address) && + MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) { + // Discard the first code allocation, which was on a page where it could + // be moved. + CreateFillerObjectAt(result->address(), object_size); + allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE); + if (!allocation.To(&result)) return allocation; + OnAllocationEvent(result, object_size); + } + } + + result->set_map_no_write_barrier(code_map()); + Code* code = Code::cast(result); + DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || + isolate_->code_range()->contains(code->address())); + code->set_gc_metadata(Smi::FromInt(0)); + code->set_ic_age(global_ic_age_); + return code; +} + + +AllocationResult Heap::CopyCode(Code* code) { + AllocationResult allocation; + HeapObject* new_constant_pool; + if (FLAG_enable_ool_constant_pool && + code->constant_pool() != empty_constant_pool_array()) { + // Copy the constant pool, since edits to the copied code may modify + // the constant pool. + allocation = CopyConstantPoolArray(code->constant_pool()); + if (!allocation.To(&new_constant_pool)) return allocation; + } else { + new_constant_pool = empty_constant_pool_array(); + } + + HeapObject* result; + // Allocate an object the same size as the code object. + int obj_size = code->Size(); + allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE); + if (!allocation.To(&result)) return allocation; + + // Copy code object. + Address old_addr = code->address(); + Address new_addr = result->address(); + CopyBlock(new_addr, old_addr, obj_size); + Code* new_code = Code::cast(result); + + // Update the constant pool. + new_code->set_constant_pool(new_constant_pool); + + // Relocate the copy. + DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || + isolate_->code_range()->contains(code->address())); + new_code->Relocate(new_addr - old_addr); + return new_code; +} + + +AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) { + // Allocate ByteArray and ConstantPoolArray before the Code object, so that we + // do not risk leaving uninitialized Code object (and breaking the heap). + ByteArray* reloc_info_array; + { + AllocationResult allocation = + AllocateByteArray(reloc_info.length(), TENURED); + if (!allocation.To(&reloc_info_array)) return allocation; + } + HeapObject* new_constant_pool; + if (FLAG_enable_ool_constant_pool && + code->constant_pool() != empty_constant_pool_array()) { + // Copy the constant pool, since edits to the copied code may modify + // the constant pool. + AllocationResult allocation = CopyConstantPoolArray(code->constant_pool()); + if (!allocation.To(&new_constant_pool)) return allocation; + } else { + new_constant_pool = empty_constant_pool_array(); + } + + int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment); + + int new_obj_size = Code::SizeFor(new_body_size); + + Address old_addr = code->address(); + + size_t relocation_offset = + static_cast<size_t>(code->instruction_end() - old_addr); + + HeapObject* result; + AllocationResult allocation = + AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE); + if (!allocation.To(&result)) return allocation; + + // Copy code object. + Address new_addr = result->address(); + + // Copy header and instructions. + CopyBytes(new_addr, old_addr, relocation_offset); + + Code* new_code = Code::cast(result); + new_code->set_relocation_info(reloc_info_array); + + // Update constant pool. + new_code->set_constant_pool(new_constant_pool); + + // Copy patched rinfo. + CopyBytes(new_code->relocation_start(), reloc_info.start(), + static_cast<size_t>(reloc_info.length())); + + // Relocate the copy. + DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || + isolate_->code_range()->contains(code->address())); + new_code->Relocate(new_addr - old_addr); + +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) code->ObjectVerify(); +#endif + return new_code; +} + + +void Heap::InitializeAllocationMemento(AllocationMemento* memento, + AllocationSite* allocation_site) { + memento->set_map_no_write_barrier(allocation_memento_map()); + DCHECK(allocation_site->map() == allocation_site_map()); + memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER); + if (FLAG_allocation_site_pretenuring) { + allocation_site->IncrementMementoCreateCount(); + } +} + + +AllocationResult Heap::Allocate(Map* map, AllocationSpace space, + AllocationSite* allocation_site) { + DCHECK(gc_state_ == NOT_IN_GC); + DCHECK(map->instance_type() != MAP_TYPE); + // If allocation failures are disallowed, we may allocate in a different + // space when new space is full and the object is not a large object. + AllocationSpace retry_space = + (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); + int size = map->instance_size(); + if (allocation_site != NULL) { + size += AllocationMemento::kSize; + } + HeapObject* result; + AllocationResult allocation = AllocateRaw(size, space, retry_space); + if (!allocation.To(&result)) return allocation; + // No need for write barrier since object is white and map is in old space. + result->set_map_no_write_barrier(map); + if (allocation_site != NULL) { + AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( + reinterpret_cast<Address>(result) + map->instance_size()); + InitializeAllocationMemento(alloc_memento, allocation_site); + } + return result; +} + + +void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, + Map* map) { + obj->set_properties(properties); + obj->initialize_elements(); + // TODO(1240798): Initialize the object's body using valid initial values + // according to the object's initial map. For example, if the map's + // instance type is JS_ARRAY_TYPE, the length field should be initialized + // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a + // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object + // verification code has to cope with (temporarily) invalid objects. See + // for example, JSArray::JSArrayVerify). + Object* filler; + // We cannot always fill with one_pointer_filler_map because objects + // created from API functions expect their internal fields to be initialized + // with undefined_value. + // Pre-allocated fields need to be initialized with undefined_value as well + // so that object accesses before the constructor completes (e.g. in the + // debugger) will not cause a crash. + if (map->constructor()->IsJSFunction() && + JSFunction::cast(map->constructor()) + ->IsInobjectSlackTrackingInProgress()) { + // We might want to shrink the object later. + DCHECK(obj->GetInternalFieldCount() == 0); + filler = Heap::one_pointer_filler_map(); + } else { + filler = Heap::undefined_value(); + } + obj->InitializeBody(map, Heap::undefined_value(), filler); +} + + +AllocationResult Heap::AllocateJSObjectFromMap( + Map* map, PretenureFlag pretenure, bool allocate_properties, + AllocationSite* allocation_site) { + // JSFunctions should be allocated using AllocateFunction to be + // properly initialized. + DCHECK(map->instance_type() != JS_FUNCTION_TYPE); + + // Both types of global objects should be allocated using + // AllocateGlobalObject to be properly initialized. + DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE); + DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE); + + // Allocate the backing storage for the properties. + FixedArray* properties; + if (allocate_properties) { + int prop_size = map->InitialPropertiesLength(); + DCHECK(prop_size >= 0); + { + AllocationResult allocation = AllocateFixedArray(prop_size, pretenure); + if (!allocation.To(&properties)) return allocation; + } + } else { + properties = empty_fixed_array(); + } + + // Allocate the JSObject. + int size = map->instance_size(); + AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); + JSObject* js_obj; + AllocationResult allocation = Allocate(map, space, allocation_site); + if (!allocation.To(&js_obj)) return allocation; + + // Initialize the JSObject. + InitializeJSObjectFromMap(js_obj, properties, map); + DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() || + js_obj->HasFixedTypedArrayElements()); + return js_obj; +} + + +AllocationResult Heap::AllocateJSObject(JSFunction* constructor, + PretenureFlag pretenure, + AllocationSite* allocation_site) { + DCHECK(constructor->has_initial_map()); + + // Allocate the object based on the constructors initial map. + AllocationResult allocation = AllocateJSObjectFromMap( + constructor->initial_map(), pretenure, true, allocation_site); +#ifdef DEBUG + // Make sure result is NOT a global object if valid. + HeapObject* obj; + DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject()); +#endif + return allocation; +} + + +AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) { + // Never used to copy functions. If functions need to be copied we + // have to be careful to clear the literals array. + SLOW_DCHECK(!source->IsJSFunction()); + + // Make the clone. + Map* map = source->map(); + int object_size = map->instance_size(); + HeapObject* clone; + + DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type())); + + WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; + + // If we're forced to always allocate, we use the general allocation + // functions which may leave us with an object in old space. + if (always_allocate()) { + { + AllocationResult allocation = + AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); + if (!allocation.To(&clone)) return allocation; + } + Address clone_address = clone->address(); + CopyBlock(clone_address, source->address(), object_size); + // Update write barrier for all fields that lie beyond the header. + RecordWrites(clone_address, JSObject::kHeaderSize, + (object_size - JSObject::kHeaderSize) / kPointerSize); + } else { + wb_mode = SKIP_WRITE_BARRIER; + + { + int adjusted_object_size = + site != NULL ? object_size + AllocationMemento::kSize : object_size; + AllocationResult allocation = + AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE); + if (!allocation.To(&clone)) return allocation; + } + SLOW_DCHECK(InNewSpace(clone)); + // Since we know the clone is allocated in new space, we can copy + // the contents without worrying about updating the write barrier. + CopyBlock(clone->address(), source->address(), object_size); + + if (site != NULL) { + AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( + reinterpret_cast<Address>(clone) + object_size); + InitializeAllocationMemento(alloc_memento, site); + } + } + + SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() == + source->GetElementsKind()); + FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); + FixedArray* properties = FixedArray::cast(source->properties()); + // Update elements if necessary. + if (elements->length() > 0) { + FixedArrayBase* elem; + { + AllocationResult allocation; + if (elements->map() == fixed_cow_array_map()) { + allocation = FixedArray::cast(elements); + } else if (source->HasFastDoubleElements()) { + allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements)); + } else { + allocation = CopyFixedArray(FixedArray::cast(elements)); + } + if (!allocation.To(&elem)) return allocation; + } + JSObject::cast(clone)->set_elements(elem, wb_mode); + } + // Update properties if necessary. + if (properties->length() > 0) { + FixedArray* prop; + { + AllocationResult allocation = CopyFixedArray(properties); + if (!allocation.To(&prop)) return allocation; + } + JSObject::cast(clone)->set_properties(prop, wb_mode); + } + // Return the new clone. + return clone; +} + + +static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars, + int len) { + // Only works for ascii. + DCHECK(vector.length() == len); + MemCopy(chars, vector.start(), len); +} + +static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars, + int len) { + const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start()); + unsigned stream_length = vector.length(); + while (stream_length != 0) { + unsigned consumed = 0; + uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed); + DCHECK(c != unibrow::Utf8::kBadChar); + DCHECK(consumed <= stream_length); + stream_length -= consumed; + stream += consumed; + if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) { + len -= 2; + if (len < 0) break; + *chars++ = unibrow::Utf16::LeadSurrogate(c); + *chars++ = unibrow::Utf16::TrailSurrogate(c); + } else { + len -= 1; + if (len < 0) break; + *chars++ = c; + } + } + DCHECK(stream_length == 0); + DCHECK(len == 0); +} + + +static inline void WriteOneByteData(String* s, uint8_t* chars, int len) { + DCHECK(s->length() == len); + String::WriteToFlat(s, chars, 0, len); +} + + +static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) { + DCHECK(s->length() == len); + String::WriteToFlat(s, chars, 0, len); +} + + +template <bool is_one_byte, typename T> +AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars, + uint32_t hash_field) { + DCHECK(chars >= 0); + // Compute map and object size. + int size; + Map* map; + + DCHECK_LE(0, chars); + DCHECK_GE(String::kMaxLength, chars); + if (is_one_byte) { + map = ascii_internalized_string_map(); + size = SeqOneByteString::SizeFor(chars); + } else { + map = internalized_string_map(); + size = SeqTwoByteString::SizeFor(chars); + } + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); + + // Allocate string. + HeapObject* result; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!allocation.To(&result)) return allocation; + } + + result->set_map_no_write_barrier(map); + // Set length and hash fields of the allocated string. + String* answer = String::cast(result); + answer->set_length(chars); + answer->set_hash_field(hash_field); + + DCHECK_EQ(size, answer->Size()); + + if (is_one_byte) { + WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars); + } else { + WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars); + } + return answer; +} + + +// Need explicit instantiations. +template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*, + int, + uint32_t); +template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*, + int, + uint32_t); +template AllocationResult Heap::AllocateInternalizedStringImpl<false>( + Vector<const char>, int, uint32_t); + + +AllocationResult Heap::AllocateRawOneByteString(int length, + PretenureFlag pretenure) { + DCHECK_LE(0, length); + DCHECK_GE(String::kMaxLength, length); + int size = SeqOneByteString::SizeFor(length); + DCHECK(size <= SeqOneByteString::kMaxSize); + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); + + HeapObject* result; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!allocation.To(&result)) return allocation; + } + + // Partially initialize the object. + result->set_map_no_write_barrier(ascii_string_map()); + String::cast(result)->set_length(length); + String::cast(result)->set_hash_field(String::kEmptyHashField); + DCHECK_EQ(size, HeapObject::cast(result)->Size()); + + return result; +} + + +AllocationResult Heap::AllocateRawTwoByteString(int length, + PretenureFlag pretenure) { + DCHECK_LE(0, length); + DCHECK_GE(String::kMaxLength, length); + int size = SeqTwoByteString::SizeFor(length); + DCHECK(size <= SeqTwoByteString::kMaxSize); + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); + + HeapObject* result; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!allocation.To(&result)) return allocation; + } + + // Partially initialize the object. + result->set_map_no_write_barrier(string_map()); + String::cast(result)->set_length(length); + String::cast(result)->set_hash_field(String::kEmptyHashField); + DCHECK_EQ(size, HeapObject::cast(result)->Size()); + return result; +} + + +AllocationResult Heap::AllocateEmptyFixedArray() { + int size = FixedArray::SizeFor(0); + HeapObject* result; + { + AllocationResult allocation = + AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); + if (!allocation.To(&result)) return allocation; + } + // Initialize the object. + result->set_map_no_write_barrier(fixed_array_map()); + FixedArray::cast(result)->set_length(0); + return result; +} + + +AllocationResult Heap::AllocateEmptyExternalArray( + ExternalArrayType array_type) { + return AllocateExternalArray(0, array_type, NULL, TENURED); +} + + +AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) { + if (!InNewSpace(src)) { + return src; + } + + int len = src->length(); + HeapObject* obj; + { + AllocationResult allocation = AllocateRawFixedArray(len, TENURED); + if (!allocation.To(&obj)) return allocation; + } + obj->set_map_no_write_barrier(fixed_array_map()); + FixedArray* result = FixedArray::cast(obj); + result->set_length(len); + + // Copy the content + DisallowHeapAllocation no_gc; + WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); + for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); + + // TODO(mvstanton): The map is set twice because of protection against calling + // set() on a COW FixedArray. Issue v8:3221 created to track this, and + // we might then be able to remove this whole method. + HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map()); + return result; +} + + +AllocationResult Heap::AllocateEmptyFixedTypedArray( + ExternalArrayType array_type) { + return AllocateFixedTypedArray(0, array_type, TENURED); +} + + +AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) { + int len = src->length(); + HeapObject* obj; + { + AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED); + if (!allocation.To(&obj)) return allocation; + } + if (InNewSpace(obj)) { + obj->set_map_no_write_barrier(map); + CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize, + FixedArray::SizeFor(len) - kPointerSize); + return obj; + } + obj->set_map_no_write_barrier(map); + FixedArray* result = FixedArray::cast(obj); + result->set_length(len); + + // Copy the content + DisallowHeapAllocation no_gc; + WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); + for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); + return result; +} + + +AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, + Map* map) { + int len = src->length(); + HeapObject* obj; + { + AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED); + if (!allocation.To(&obj)) return allocation; + } + obj->set_map_no_write_barrier(map); + CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset, + src->address() + FixedDoubleArray::kLengthOffset, + FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset); + return obj; +} + + +AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src, + Map* map) { + HeapObject* obj; + if (src->is_extended_layout()) { + ConstantPoolArray::NumberOfEntries small(src, + ConstantPoolArray::SMALL_SECTION); + ConstantPoolArray::NumberOfEntries extended( + src, ConstantPoolArray::EXTENDED_SECTION); + AllocationResult allocation = + AllocateExtendedConstantPoolArray(small, extended); + if (!allocation.To(&obj)) return allocation; + } else { + ConstantPoolArray::NumberOfEntries small(src, + ConstantPoolArray::SMALL_SECTION); + AllocationResult allocation = AllocateConstantPoolArray(small); + if (!allocation.To(&obj)) return allocation; + } + obj->set_map_no_write_barrier(map); + CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset, + src->address() + ConstantPoolArray::kFirstEntryOffset, + src->size() - ConstantPoolArray::kFirstEntryOffset); + return obj; +} + + +AllocationResult Heap::AllocateRawFixedArray(int length, + PretenureFlag pretenure) { + if (length < 0 || length > FixedArray::kMaxLength) { + v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); + } + int size = FixedArray::SizeFor(length); + AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); + + return AllocateRaw(size, space, OLD_POINTER_SPACE); +} + + +AllocationResult Heap::AllocateFixedArrayWithFiller(int length, + PretenureFlag pretenure, + Object* filler) { + DCHECK(length >= 0); + DCHECK(empty_fixed_array()->IsFixedArray()); + if (length == 0) return empty_fixed_array(); + + DCHECK(!InNewSpace(filler)); + HeapObject* result; + { + AllocationResult allocation = AllocateRawFixedArray(length, pretenure); + if (!allocation.To(&result)) return allocation; + } + + result->set_map_no_write_barrier(fixed_array_map()); + FixedArray* array = FixedArray::cast(result); + array->set_length(length); + MemsetPointer(array->data_start(), filler, length); + return array; +} + + +AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) { + return AllocateFixedArrayWithFiller(length, pretenure, undefined_value()); +} + + +AllocationResult Heap::AllocateUninitializedFixedArray(int length) { + if (length == 0) return empty_fixed_array(); + + HeapObject* obj; + { + AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED); + if (!allocation.To(&obj)) return allocation; + } + + obj->set_map_no_write_barrier(fixed_array_map()); + FixedArray::cast(obj)->set_length(length); + return obj; +} + + +AllocationResult Heap::AllocateUninitializedFixedDoubleArray( + int length, PretenureFlag pretenure) { + if (length == 0) return empty_fixed_array(); + + HeapObject* elements; + AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure); + if (!allocation.To(&elements)) return allocation; + + elements->set_map_no_write_barrier(fixed_double_array_map()); + FixedDoubleArray::cast(elements)->set_length(length); + return elements; +} + + +AllocationResult Heap::AllocateRawFixedDoubleArray(int length, + PretenureFlag pretenure) { + if (length < 0 || length > FixedDoubleArray::kMaxLength) { + v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); + } + int size = FixedDoubleArray::SizeFor(length); +#ifndef V8_HOST_ARCH_64_BIT + size += kPointerSize; +#endif + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); + + HeapObject* object; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!allocation.To(&object)) return allocation; + } + + return EnsureDoubleAligned(this, object, size); +} + + +AllocationResult Heap::AllocateConstantPoolArray( + const ConstantPoolArray::NumberOfEntries& small) { + CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType)); + int size = ConstantPoolArray::SizeFor(small); +#ifndef V8_HOST_ARCH_64_BIT + size += kPointerSize; +#endif + AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED); + + HeapObject* object; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE); + if (!allocation.To(&object)) return allocation; + } + object = EnsureDoubleAligned(this, object, size); + object->set_map_no_write_barrier(constant_pool_array_map()); + + ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object); + constant_pool->Init(small); + constant_pool->ClearPtrEntries(isolate()); + return constant_pool; +} + + +AllocationResult Heap::AllocateExtendedConstantPoolArray( + const ConstantPoolArray::NumberOfEntries& small, + const ConstantPoolArray::NumberOfEntries& extended) { + CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType)); + CHECK(extended.are_in_range(0, kMaxInt)); + int size = ConstantPoolArray::SizeForExtended(small, extended); +#ifndef V8_HOST_ARCH_64_BIT + size += kPointerSize; +#endif + AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED); + + HeapObject* object; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE); + if (!allocation.To(&object)) return allocation; + } + object = EnsureDoubleAligned(this, object, size); + object->set_map_no_write_barrier(constant_pool_array_map()); + + ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object); + constant_pool->InitExtended(small, extended); + constant_pool->ClearPtrEntries(isolate()); + return constant_pool; +} + + +AllocationResult Heap::AllocateEmptyConstantPoolArray() { + ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0); + int size = ConstantPoolArray::SizeFor(small); + HeapObject* result; + { + AllocationResult allocation = + AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); + if (!allocation.To(&result)) return allocation; + } + result->set_map_no_write_barrier(constant_pool_array_map()); + ConstantPoolArray::cast(result)->Init(small); + return result; +} + + +AllocationResult Heap::AllocateSymbol() { + // Statically ensure that it is safe to allocate symbols in paged spaces. + STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize); + + HeapObject* result; + AllocationResult allocation = + AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE); + if (!allocation.To(&result)) return allocation; + + result->set_map_no_write_barrier(symbol_map()); + + // Generate a random hash value. + int hash; + int attempts = 0; + do { + hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask; + attempts++; + } while (hash == 0 && attempts < 30); + if (hash == 0) hash = 1; // never return 0 + + Symbol::cast(result) + ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift)); + Symbol::cast(result)->set_name(undefined_value()); + Symbol::cast(result)->set_flags(Smi::FromInt(0)); + + DCHECK(!Symbol::cast(result)->is_private()); + return result; +} + + +AllocationResult Heap::AllocateStruct(InstanceType type) { + Map* map; + switch (type) { +#define MAKE_CASE(NAME, Name, name) \ + case NAME##_TYPE: \ + map = name##_map(); \ + break; + STRUCT_LIST(MAKE_CASE) +#undef MAKE_CASE + default: + UNREACHABLE(); + return exception(); + } + int size = map->instance_size(); + AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED); + Struct* result; + { + AllocationResult allocation = Allocate(map, space); + if (!allocation.To(&result)) return allocation; + } + result->InitializeBody(size); + return result; +} + + +bool Heap::IsHeapIterable() { + // TODO(hpayer): This function is not correct. Allocation folding in old + // space breaks the iterability. + return (old_pointer_space()->swept_precisely() && + old_data_space()->swept_precisely() && + new_space_top_after_last_gc_ == new_space()->top()); +} + + +void Heap::MakeHeapIterable() { + DCHECK(AllowHeapAllocation::IsAllowed()); + if (!IsHeapIterable()) { + CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable"); + } + if (mark_compact_collector()->sweeping_in_progress()) { + mark_compact_collector()->EnsureSweepingCompleted(); + } + DCHECK(IsHeapIterable()); +} + + +void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) { + incremental_marking()->Step(step_size, + IncrementalMarking::NO_GC_VIA_STACK_GUARD, true); + + if (incremental_marking()->IsComplete()) { + bool uncommit = false; + if (gc_count_at_last_idle_gc_ == gc_count_) { + // No GC since the last full GC, the mutator is probably not active. + isolate_->compilation_cache()->Clear(); + uncommit = true; + } + CollectAllGarbage(kReduceMemoryFootprintMask, + "idle notification: finalize incremental"); + mark_sweeps_since_idle_round_started_++; + gc_count_at_last_idle_gc_ = gc_count_; + if (uncommit) { + new_space_.Shrink(); + UncommitFromSpace(); + } + } +} + + +bool Heap::IdleNotification(int hint) { + // If incremental marking is off, we do not perform idle notification. + if (!FLAG_incremental_marking) return true; + + // Hints greater than this value indicate that + // the embedder is requesting a lot of GC work. + const int kMaxHint = 1000; + const int kMinHintForIncrementalMarking = 10; + // Minimal hint that allows to do full GC. + const int kMinHintForFullGC = 100; + intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4; + // The size factor is in range [5..250]. The numbers here are chosen from + // experiments. If you changes them, make sure to test with + // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.* + intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold; + + isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(hint); + HistogramTimerScope idle_notification_scope( + isolate_->counters()->gc_idle_notification()); + + if (contexts_disposed_ > 0) { + contexts_disposed_ = 0; + int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000); + if (hint >= mark_sweep_time && !FLAG_expose_gc && + incremental_marking()->IsStopped()) { + HistogramTimerScope scope(isolate_->counters()->gc_context()); + CollectAllGarbage(kReduceMemoryFootprintMask, + "idle notification: contexts disposed"); + } else { + AdvanceIdleIncrementalMarking(step_size); + } + + // After context disposal there is likely a lot of garbage remaining, reset + // the idle notification counters in order to trigger more incremental GCs + // on subsequent idle notifications. + StartIdleRound(); + return false; + } + + // By doing small chunks of GC work in each IdleNotification, + // perform a round of incremental GCs and after that wait until + // the mutator creates enough garbage to justify a new round. + // An incremental GC progresses as follows: + // 1. many incremental marking steps, + // 2. one old space mark-sweep-compact, + // Use mark-sweep-compact events to count incremental GCs in a round. + + if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) { + if (EnoughGarbageSinceLastIdleRound()) { + StartIdleRound(); + } else { + return true; + } + } + + int remaining_mark_sweeps = + kMaxMarkSweepsInIdleRound - mark_sweeps_since_idle_round_started_; + + if (incremental_marking()->IsStopped()) { + // If there are no more than two GCs left in this idle round and we are + // allowed to do a full GC, then make those GCs full in order to compact + // the code space. + // TODO(ulan): Once we enable code compaction for incremental marking, + // we can get rid of this special case and always start incremental marking. + if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) { + CollectAllGarbage(kReduceMemoryFootprintMask, + "idle notification: finalize idle round"); + mark_sweeps_since_idle_round_started_++; + } else if (hint > kMinHintForIncrementalMarking) { + incremental_marking()->Start(); + } + } + if (!incremental_marking()->IsStopped() && + hint > kMinHintForIncrementalMarking) { + AdvanceIdleIncrementalMarking(step_size); + } + + if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) { + FinishIdleRound(); + return true; + } + + // If the IdleNotifcation is called with a large hint we will wait for + // the sweepter threads here. + if (hint >= kMinHintForFullGC && + mark_compact_collector()->sweeping_in_progress()) { + mark_compact_collector()->EnsureSweepingCompleted(); + } + + return false; +} + + +#ifdef DEBUG + +void Heap::Print() { + if (!HasBeenSetUp()) return; + isolate()->PrintStack(stdout); + AllSpaces spaces(this); + for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { + space->Print(); + } +} + + +void Heap::ReportCodeStatistics(const char* title) { + PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title); + PagedSpace::ResetCodeStatistics(isolate()); + // We do not look for code in new space, map space, or old space. If code + // somehow ends up in those spaces, we would miss it here. + code_space_->CollectCodeStatistics(); + lo_space_->CollectCodeStatistics(); + PagedSpace::ReportCodeStatistics(isolate()); +} + + +// This function expects that NewSpace's allocated objects histogram is +// populated (via a call to CollectStatistics or else as a side effect of a +// just-completed scavenge collection). +void Heap::ReportHeapStatistics(const char* title) { + USE(title); + PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title, + gc_count_); + PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n", + old_generation_allocation_limit_); + + PrintF("\n"); + PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_)); + isolate_->global_handles()->PrintStats(); + PrintF("\n"); + + PrintF("Heap statistics : "); + isolate_->memory_allocator()->ReportStatistics(); + PrintF("To space : "); + new_space_.ReportStatistics(); + PrintF("Old pointer space : "); + old_pointer_space_->ReportStatistics(); + PrintF("Old data space : "); + old_data_space_->ReportStatistics(); + PrintF("Code space : "); + code_space_->ReportStatistics(); + PrintF("Map space : "); + map_space_->ReportStatistics(); + PrintF("Cell space : "); + cell_space_->ReportStatistics(); + PrintF("PropertyCell space : "); + property_cell_space_->ReportStatistics(); + PrintF("Large object space : "); + lo_space_->ReportStatistics(); + PrintF(">>>>>> ========================================= >>>>>>\n"); +} + +#endif // DEBUG + +bool Heap::Contains(HeapObject* value) { return Contains(value->address()); } + + +bool Heap::Contains(Address addr) { + if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false; + return HasBeenSetUp() && + (new_space_.ToSpaceContains(addr) || + old_pointer_space_->Contains(addr) || + old_data_space_->Contains(addr) || code_space_->Contains(addr) || + map_space_->Contains(addr) || cell_space_->Contains(addr) || + property_cell_space_->Contains(addr) || + lo_space_->SlowContains(addr)); +} + + +bool Heap::InSpace(HeapObject* value, AllocationSpace space) { + return InSpace(value->address(), space); +} + + +bool Heap::InSpace(Address addr, AllocationSpace space) { + if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false; + if (!HasBeenSetUp()) return false; + + switch (space) { + case NEW_SPACE: + return new_space_.ToSpaceContains(addr); + case OLD_POINTER_SPACE: + return old_pointer_space_->Contains(addr); + case OLD_DATA_SPACE: + return old_data_space_->Contains(addr); + case CODE_SPACE: + return code_space_->Contains(addr); + case MAP_SPACE: + return map_space_->Contains(addr); + case CELL_SPACE: + return cell_space_->Contains(addr); + case PROPERTY_CELL_SPACE: + return property_cell_space_->Contains(addr); + case LO_SPACE: + return lo_space_->SlowContains(addr); + case INVALID_SPACE: + break; + } + UNREACHABLE(); + return false; +} + + +#ifdef VERIFY_HEAP +void Heap::Verify() { + CHECK(HasBeenSetUp()); + HandleScope scope(isolate()); + + store_buffer()->Verify(); + + if (mark_compact_collector()->sweeping_in_progress()) { + // We have to wait here for the sweeper threads to have an iterable heap. + mark_compact_collector()->EnsureSweepingCompleted(); + } + + VerifyPointersVisitor visitor; + IterateRoots(&visitor, VISIT_ONLY_STRONG); + + VerifySmisVisitor smis_visitor; + IterateSmiRoots(&smis_visitor); + + new_space_.Verify(); + + old_pointer_space_->Verify(&visitor); + map_space_->Verify(&visitor); + + VerifyPointersVisitor no_dirty_regions_visitor; + old_data_space_->Verify(&no_dirty_regions_visitor); + code_space_->Verify(&no_dirty_regions_visitor); + cell_space_->Verify(&no_dirty_regions_visitor); + property_cell_space_->Verify(&no_dirty_regions_visitor); + + lo_space_->Verify(); +} +#endif + + +void Heap::ZapFromSpace() { + NewSpacePageIterator it(new_space_.FromSpaceStart(), + new_space_.FromSpaceEnd()); + while (it.has_next()) { + NewSpacePage* page = it.next(); + for (Address cursor = page->area_start(), limit = page->area_end(); + cursor < limit; cursor += kPointerSize) { + Memory::Address_at(cursor) = kFromSpaceZapValue; + } + } +} + + +void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end, + ObjectSlotCallback callback) { + Address slot_address = start; + + // We are not collecting slots on new space objects during mutation + // thus we have to scan for pointers to evacuation candidates when we + // promote objects. But we should not record any slots in non-black + // objects. Grey object's slots would be rescanned. + // White object might not survive until the end of collection + // it would be a violation of the invariant to record it's slots. + bool record_slots = false; + if (incremental_marking()->IsCompacting()) { + MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start)); + record_slots = Marking::IsBlack(mark_bit); + } + + while (slot_address < end) { + Object** slot = reinterpret_cast<Object**>(slot_address); + Object* object = *slot; + // If the store buffer becomes overfull we mark pages as being exempt from + // the store buffer. These pages are scanned to find pointers that point + // to the new space. In that case we may hit newly promoted objects and + // fix the pointers before the promotion queue gets to them. Thus the 'if'. + if (object->IsHeapObject()) { + if (Heap::InFromSpace(object)) { + callback(reinterpret_cast<HeapObject**>(slot), + HeapObject::cast(object)); + Object* new_object = *slot; + if (InNewSpace(new_object)) { + SLOW_DCHECK(Heap::InToSpace(new_object)); + SLOW_DCHECK(new_object->IsHeapObject()); + store_buffer_.EnterDirectlyIntoStoreBuffer( + reinterpret_cast<Address>(slot)); + } + SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object)); + } else if (record_slots && + MarkCompactCollector::IsOnEvacuationCandidate(object)) { + mark_compact_collector()->RecordSlot(slot, slot, object); + } + } + slot_address += kPointerSize; + } +} + + +#ifdef DEBUG +typedef bool (*CheckStoreBufferFilter)(Object** addr); + + +bool IsAMapPointerAddress(Object** addr) { + uintptr_t a = reinterpret_cast<uintptr_t>(addr); + int mod = a % Map::kSize; + return mod >= Map::kPointerFieldsBeginOffset && + mod < Map::kPointerFieldsEndOffset; +} + + +bool EverythingsAPointer(Object** addr) { return true; } + + +static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit, + Object**** store_buffer_position, + Object*** store_buffer_top, + CheckStoreBufferFilter filter, + Address special_garbage_start, + Address special_garbage_end) { + Map* free_space_map = heap->free_space_map(); + for (; current < limit; current++) { + Object* o = *current; + Address current_address = reinterpret_cast<Address>(current); + // Skip free space. + if (o == free_space_map) { + Address current_address = reinterpret_cast<Address>(current); + FreeSpace* free_space = + FreeSpace::cast(HeapObject::FromAddress(current_address)); + int skip = free_space->Size(); + DCHECK(current_address + skip <= reinterpret_cast<Address>(limit)); + DCHECK(skip > 0); + current_address += skip - kPointerSize; + current = reinterpret_cast<Object**>(current_address); + continue; + } + // Skip the current linear allocation space between top and limit which is + // unmarked with the free space map, but can contain junk. + if (current_address == special_garbage_start && + special_garbage_end != special_garbage_start) { + current_address = special_garbage_end - kPointerSize; + current = reinterpret_cast<Object**>(current_address); + continue; + } + if (!(*filter)(current)) continue; + DCHECK(current_address < special_garbage_start || + current_address >= special_garbage_end); + DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue); + // We have to check that the pointer does not point into new space + // without trying to cast it to a heap object since the hash field of + // a string can contain values like 1 and 3 which are tagged null + // pointers. + if (!heap->InNewSpace(o)) continue; + while (**store_buffer_position < current && + *store_buffer_position < store_buffer_top) { + (*store_buffer_position)++; + } + if (**store_buffer_position != current || + *store_buffer_position == store_buffer_top) { + Object** obj_start = current; + while (!(*obj_start)->IsMap()) obj_start--; + UNREACHABLE(); + } + } +} + + +// Check that the store buffer contains all intergenerational pointers by +// scanning a page and ensuring that all pointers to young space are in the +// store buffer. +void Heap::OldPointerSpaceCheckStoreBuffer() { + OldSpace* space = old_pointer_space(); + PageIterator pages(space); + + store_buffer()->SortUniq(); + + while (pages.has_next()) { + Page* page = pages.next(); + Object** current = reinterpret_cast<Object**>(page->area_start()); + + Address end = page->area_end(); + + Object*** store_buffer_position = store_buffer()->Start(); + Object*** store_buffer_top = store_buffer()->Top(); + + Object** limit = reinterpret_cast<Object**>(end); + CheckStoreBuffer(this, current, limit, &store_buffer_position, + store_buffer_top, &EverythingsAPointer, space->top(), + space->limit()); + } +} + + +void Heap::MapSpaceCheckStoreBuffer() { + MapSpace* space = map_space(); + PageIterator pages(space); + + store_buffer()->SortUniq(); + + while (pages.has_next()) { + Page* page = pages.next(); + Object** current = reinterpret_cast<Object**>(page->area_start()); + + Address end = page->area_end(); + + Object*** store_buffer_position = store_buffer()->Start(); + Object*** store_buffer_top = store_buffer()->Top(); + + Object** limit = reinterpret_cast<Object**>(end); + CheckStoreBuffer(this, current, limit, &store_buffer_position, + store_buffer_top, &IsAMapPointerAddress, space->top(), + space->limit()); + } +} + + +void Heap::LargeObjectSpaceCheckStoreBuffer() { + LargeObjectIterator it(lo_space()); + for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { + // We only have code, sequential strings, or fixed arrays in large + // object space, and only fixed arrays can possibly contain pointers to + // the young generation. + if (object->IsFixedArray()) { + Object*** store_buffer_position = store_buffer()->Start(); + Object*** store_buffer_top = store_buffer()->Top(); + Object** current = reinterpret_cast<Object**>(object->address()); + Object** limit = + reinterpret_cast<Object**>(object->address() + object->Size()); + CheckStoreBuffer(this, current, limit, &store_buffer_position, + store_buffer_top, &EverythingsAPointer, NULL, NULL); + } + } +} +#endif + + +void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { + IterateStrongRoots(v, mode); + IterateWeakRoots(v, mode); +} + + +void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { + v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex])); + v->Synchronize(VisitorSynchronization::kStringTable); + if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) { + // Scavenge collections have special processing for this. + external_string_table_.Iterate(v); + } + v->Synchronize(VisitorSynchronization::kExternalStringsTable); +} + + +void Heap::IterateSmiRoots(ObjectVisitor* v) { + // Acquire execution access since we are going to read stack limit values. + ExecutionAccess access(isolate()); + v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]); + v->Synchronize(VisitorSynchronization::kSmiRootList); +} + + +void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { + v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); + v->Synchronize(VisitorSynchronization::kStrongRootList); + + v->VisitPointer(BitCast<Object**>(&hidden_string_)); + v->Synchronize(VisitorSynchronization::kInternalizedString); + + isolate_->bootstrapper()->Iterate(v); + v->Synchronize(VisitorSynchronization::kBootstrapper); + isolate_->Iterate(v); + v->Synchronize(VisitorSynchronization::kTop); + Relocatable::Iterate(isolate_, v); + v->Synchronize(VisitorSynchronization::kRelocatable); + + if (isolate_->deoptimizer_data() != NULL) { + isolate_->deoptimizer_data()->Iterate(v); + } + v->Synchronize(VisitorSynchronization::kDebug); + isolate_->compilation_cache()->Iterate(v); + v->Synchronize(VisitorSynchronization::kCompilationCache); + + // Iterate over local handles in handle scopes. + isolate_->handle_scope_implementer()->Iterate(v); + isolate_->IterateDeferredHandles(v); + v->Synchronize(VisitorSynchronization::kHandleScope); + + // Iterate over the builtin code objects and code stubs in the + // heap. Note that it is not necessary to iterate over code objects + // on scavenge collections. + if (mode != VISIT_ALL_IN_SCAVENGE) { + isolate_->builtins()->IterateBuiltins(v); + } + v->Synchronize(VisitorSynchronization::kBuiltins); + + // Iterate over global handles. + switch (mode) { + case VISIT_ONLY_STRONG: + isolate_->global_handles()->IterateStrongRoots(v); + break; + case VISIT_ALL_IN_SCAVENGE: + isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v); + break; + case VISIT_ALL_IN_SWEEP_NEWSPACE: + case VISIT_ALL: + isolate_->global_handles()->IterateAllRoots(v); + break; + } + v->Synchronize(VisitorSynchronization::kGlobalHandles); + + // Iterate over eternal handles. + if (mode == VISIT_ALL_IN_SCAVENGE) { + isolate_->eternal_handles()->IterateNewSpaceRoots(v); + } else { + isolate_->eternal_handles()->IterateAllRoots(v); + } + v->Synchronize(VisitorSynchronization::kEternalHandles); + + // Iterate over pointers being held by inactive threads. + isolate_->thread_manager()->Iterate(v); + v->Synchronize(VisitorSynchronization::kThreadManager); + + // Iterate over the pointers the Serialization/Deserialization code is + // holding. + // During garbage collection this keeps the partial snapshot cache alive. + // During deserialization of the startup snapshot this creates the partial + // snapshot cache and deserializes the objects it refers to. During + // serialization this does nothing, since the partial snapshot cache is + // empty. However the next thing we do is create the partial snapshot, + // filling up the partial snapshot cache with objects it needs as we go. + SerializerDeserializer::Iterate(isolate_, v); + // We don't do a v->Synchronize call here, because in debug mode that will + // output a flag to the snapshot. However at this point the serializer and + // deserializer are deliberately a little unsynchronized (see above) so the + // checking of the sync flag in the snapshot would fail. +} + + +// TODO(1236194): Since the heap size is configurable on the command line +// and through the API, we should gracefully handle the case that the heap +// size is not big enough to fit all the initial objects. +bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size, + int max_executable_size, size_t code_range_size) { + if (HasBeenSetUp()) return false; + + // Overwrite default configuration. + if (max_semi_space_size > 0) { + max_semi_space_size_ = max_semi_space_size * MB; + } + if (max_old_space_size > 0) { + max_old_generation_size_ = max_old_space_size * MB; + } + if (max_executable_size > 0) { + max_executable_size_ = max_executable_size * MB; + } + + // If max space size flags are specified overwrite the configuration. + if (FLAG_max_semi_space_size > 0) { + max_semi_space_size_ = FLAG_max_semi_space_size * MB; + } + if (FLAG_max_old_space_size > 0) { + max_old_generation_size_ = FLAG_max_old_space_size * MB; + } + if (FLAG_max_executable_size > 0) { + max_executable_size_ = FLAG_max_executable_size * MB; + } + + if (FLAG_stress_compaction) { + // This will cause more frequent GCs when stressing. + max_semi_space_size_ = Page::kPageSize; + } + + if (Snapshot::HaveASnapshotToStartFrom()) { + // If we are using a snapshot we always reserve the default amount + // of memory for each semispace because code in the snapshot has + // write-barrier code that relies on the size and alignment of new + // space. We therefore cannot use a larger max semispace size + // than the default reserved semispace size. + if (max_semi_space_size_ > reserved_semispace_size_) { + max_semi_space_size_ = reserved_semispace_size_; + if (FLAG_trace_gc) { + PrintPID("Max semi-space size cannot be more than %d kbytes\n", + reserved_semispace_size_ >> 10); + } + } + } else { + // If we are not using snapshots we reserve space for the actual + // max semispace size. + reserved_semispace_size_ = max_semi_space_size_; + } + + // The max executable size must be less than or equal to the max old + // generation size. + if (max_executable_size_ > max_old_generation_size_) { + max_executable_size_ = max_old_generation_size_; + } + + // The new space size must be a power of two to support single-bit testing + // for containment. + max_semi_space_size_ = RoundUpToPowerOf2(max_semi_space_size_); + reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_); + + if (FLAG_min_semi_space_size > 0) { + int initial_semispace_size = FLAG_min_semi_space_size * MB; + if (initial_semispace_size > max_semi_space_size_) { + initial_semispace_size_ = max_semi_space_size_; + if (FLAG_trace_gc) { + PrintPID( + "Min semi-space size cannot be more than the maximum" + "semi-space size of %d MB\n", + max_semi_space_size_); + } + } else { + initial_semispace_size_ = initial_semispace_size; + } + } + + initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_); + + // The old generation is paged and needs at least one page for each space. + int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; + max_old_generation_size_ = + Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize), + max_old_generation_size_); + + // We rely on being able to allocate new arrays in paged spaces. + DCHECK(Page::kMaxRegularHeapObjectSize >= + (JSArray::kSize + + FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) + + AllocationMemento::kSize)); + + code_range_size_ = code_range_size * MB; + + configured_ = true; + return true; +} + + +bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); } + + +void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { + *stats->start_marker = HeapStats::kStartMarker; + *stats->end_marker = HeapStats::kEndMarker; + *stats->new_space_size = new_space_.SizeAsInt(); + *stats->new_space_capacity = static_cast<int>(new_space_.Capacity()); + *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects(); + *stats->old_pointer_space_capacity = old_pointer_space_->Capacity(); + *stats->old_data_space_size = old_data_space_->SizeOfObjects(); + *stats->old_data_space_capacity = old_data_space_->Capacity(); + *stats->code_space_size = code_space_->SizeOfObjects(); + *stats->code_space_capacity = code_space_->Capacity(); + *stats->map_space_size = map_space_->SizeOfObjects(); + *stats->map_space_capacity = map_space_->Capacity(); + *stats->cell_space_size = cell_space_->SizeOfObjects(); + *stats->cell_space_capacity = cell_space_->Capacity(); + *stats->property_cell_space_size = property_cell_space_->SizeOfObjects(); + *stats->property_cell_space_capacity = property_cell_space_->Capacity(); + *stats->lo_space_size = lo_space_->Size(); + isolate_->global_handles()->RecordStats(stats); + *stats->memory_allocator_size = isolate()->memory_allocator()->Size(); + *stats->memory_allocator_capacity = + isolate()->memory_allocator()->Size() + + isolate()->memory_allocator()->Available(); + *stats->os_error = base::OS::GetLastError(); + isolate()->memory_allocator()->Available(); + if (take_snapshot) { + HeapIterator iterator(this); + for (HeapObject* obj = iterator.next(); obj != NULL; + obj = iterator.next()) { + InstanceType type = obj->map()->instance_type(); + DCHECK(0 <= type && type <= LAST_TYPE); + stats->objects_per_type[type]++; + stats->size_per_type[type] += obj->Size(); + } + } +} + + +intptr_t Heap::PromotedSpaceSizeOfObjects() { + return old_pointer_space_->SizeOfObjects() + + old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() + + map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() + + property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects(); +} + + +int64_t Heap::PromotedExternalMemorySize() { + if (amount_of_external_allocated_memory_ <= + amount_of_external_allocated_memory_at_last_global_gc_) + return 0; + return amount_of_external_allocated_memory_ - + amount_of_external_allocated_memory_at_last_global_gc_; +} + + +intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size, + int freed_global_handles) { + const int kMaxHandles = 1000; + const int kMinHandles = 100; + double min_factor = 1.1; + double max_factor = 4; + // We set the old generation growing factor to 2 to grow the heap slower on + // memory-constrained devices. + if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) { + max_factor = 2; + } + // If there are many freed global handles, then the next full GC will + // likely collect a lot of garbage. Choose the heap growing factor + // depending on freed global handles. + // TODO(ulan, hpayer): Take into account mutator utilization. + double factor; + if (freed_global_handles <= kMinHandles) { + factor = max_factor; + } else if (freed_global_handles >= kMaxHandles) { + factor = min_factor; + } else { + // Compute factor using linear interpolation between points + // (kMinHandles, max_factor) and (kMaxHandles, min_factor). + factor = max_factor - + (freed_global_handles - kMinHandles) * (max_factor - min_factor) / + (kMaxHandles - kMinHandles); + } + + if (FLAG_stress_compaction || + mark_compact_collector()->reduce_memory_footprint_) { + factor = min_factor; + } + + intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); + limit = Max(limit, kMinimumOldGenerationAllocationLimit); + limit += new_space_.Capacity(); + intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; + return Min(limit, halfway_to_the_max); +} + + +void Heap::EnableInlineAllocation() { + if (!inline_allocation_disabled_) return; + inline_allocation_disabled_ = false; + + // Update inline allocation limit for new space. + new_space()->UpdateInlineAllocationLimit(0); +} + + +void Heap::DisableInlineAllocation() { + if (inline_allocation_disabled_) return; + inline_allocation_disabled_ = true; + + // Update inline allocation limit for new space. + new_space()->UpdateInlineAllocationLimit(0); + + // Update inline allocation limit for old spaces. + PagedSpaces spaces(this); + for (PagedSpace* space = spaces.next(); space != NULL; + space = spaces.next()) { + space->EmptyAllocationInfo(); + } +} + + +V8_DECLARE_ONCE(initialize_gc_once); + +static void InitializeGCOnce() { + InitializeScavengingVisitorsTables(); + NewSpaceScavenger::Initialize(); + MarkCompactCollector::Initialize(); +} + + +bool Heap::SetUp() { +#ifdef DEBUG + allocation_timeout_ = FLAG_gc_interval; +#endif + + // Initialize heap spaces and initial maps and objects. Whenever something + // goes wrong, just return false. The caller should check the results and + // call Heap::TearDown() to release allocated memory. + // + // If the heap is not yet configured (e.g. through the API), configure it. + // Configuration is based on the flags new-space-size (really the semispace + // size) and old-space-size if set or the initial values of semispace_size_ + // and old_generation_size_ otherwise. + if (!configured_) { + if (!ConfigureHeapDefault()) return false; + } + + base::CallOnce(&initialize_gc_once, &InitializeGCOnce); + + MarkMapPointersAsEncoded(false); + + // Set up memory allocator. + if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize())) + return false; + + // Set up new space. + if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) { + return false; + } + new_space_top_after_last_gc_ = new_space()->top(); + + // Initialize old pointer space. + old_pointer_space_ = new OldSpace(this, max_old_generation_size_, + OLD_POINTER_SPACE, NOT_EXECUTABLE); + if (old_pointer_space_ == NULL) return false; + if (!old_pointer_space_->SetUp()) return false; + + // Initialize old data space. + old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE, + NOT_EXECUTABLE); + if (old_data_space_ == NULL) return false; + if (!old_data_space_->SetUp()) return false; + + if (!isolate_->code_range()->SetUp(code_range_size_)) return false; + + // Initialize the code space, set its maximum capacity to the old + // generation size. It needs executable memory. + code_space_ = + new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE); + if (code_space_ == NULL) return false; + if (!code_space_->SetUp()) return false; + + // Initialize map space. + map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE); + if (map_space_ == NULL) return false; + if (!map_space_->SetUp()) return false; + + // Initialize simple cell space. + cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE); + if (cell_space_ == NULL) return false; + if (!cell_space_->SetUp()) return false; + + // Initialize global property cell space. + property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_, + PROPERTY_CELL_SPACE); + if (property_cell_space_ == NULL) return false; + if (!property_cell_space_->SetUp()) return false; + + // The large object code space may contain code or data. We set the memory + // to be non-executable here for safety, but this means we need to enable it + // explicitly when allocating large code objects. + lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE); + if (lo_space_ == NULL) return false; + if (!lo_space_->SetUp()) return false; + + // Set up the seed that is used to randomize the string hash function. + DCHECK(hash_seed() == 0); + if (FLAG_randomize_hashes) { + if (FLAG_hash_seed == 0) { + int rnd = isolate()->random_number_generator()->NextInt(); + set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask)); + } else { + set_hash_seed(Smi::FromInt(FLAG_hash_seed)); + } + } + + LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); + LOG(isolate_, IntPtrTEvent("heap-available", Available())); + + store_buffer()->SetUp(); + + mark_compact_collector()->SetUp(); + + return true; +} + + +bool Heap::CreateHeapObjects() { + // Create initial maps. + if (!CreateInitialMaps()) return false; + CreateApiObjects(); + + // Create initial objects + CreateInitialObjects(); + CHECK_EQ(0, gc_count_); + + set_native_contexts_list(undefined_value()); + set_array_buffers_list(undefined_value()); + set_allocation_sites_list(undefined_value()); + weak_object_to_code_table_ = undefined_value(); + return true; +} + + +void Heap::SetStackLimits() { + DCHECK(isolate_ != NULL); + DCHECK(isolate_ == isolate()); + // On 64 bit machines, pointers are generally out of range of Smis. We write + // something that looks like an out of range Smi to the GC. + + // Set up the special root array entries containing the stack limits. + // These are actually addresses, but the tag makes the GC ignore it. + roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>( + (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag); + roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>( + (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag); +} + + +void Heap::TearDown() { +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + Verify(); + } +#endif + + UpdateMaximumCommitted(); + + if (FLAG_print_cumulative_gc_stat) { + PrintF("\n"); + PrintF("gc_count=%d ", gc_count_); + PrintF("mark_sweep_count=%d ", ms_count_); + PrintF("max_gc_pause=%.1f ", get_max_gc_pause()); + PrintF("total_gc_time=%.1f ", total_gc_time_ms_); + PrintF("min_in_mutator=%.1f ", get_min_in_mutator()); + PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc()); + PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration()); + PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration()); + PrintF("\n\n"); + } + + if (FLAG_print_max_heap_committed) { + PrintF("\n"); + PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ", + MaximumCommittedMemory()); + PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ", + new_space_.MaximumCommittedMemory()); + PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ", + old_data_space_->MaximumCommittedMemory()); + PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ", + old_pointer_space_->MaximumCommittedMemory()); + PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ", + old_pointer_space_->MaximumCommittedMemory()); + PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ", + code_space_->MaximumCommittedMemory()); + PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ", + map_space_->MaximumCommittedMemory()); + PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ", + cell_space_->MaximumCommittedMemory()); + PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ", + property_cell_space_->MaximumCommittedMemory()); + PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ", + lo_space_->MaximumCommittedMemory()); + PrintF("\n\n"); + } + + if (FLAG_verify_predictable) { + PrintAlloctionsHash(); + } + + TearDownArrayBuffers(); + + isolate_->global_handles()->TearDown(); + + external_string_table_.TearDown(); + + mark_compact_collector()->TearDown(); + + new_space_.TearDown(); + + if (old_pointer_space_ != NULL) { + old_pointer_space_->TearDown(); + delete old_pointer_space_; + old_pointer_space_ = NULL; + } + + if (old_data_space_ != NULL) { + old_data_space_->TearDown(); + delete old_data_space_; + old_data_space_ = NULL; + } + + if (code_space_ != NULL) { + code_space_->TearDown(); + delete code_space_; + code_space_ = NULL; + } + + if (map_space_ != NULL) { + map_space_->TearDown(); + delete map_space_; + map_space_ = NULL; + } + + if (cell_space_ != NULL) { + cell_space_->TearDown(); + delete cell_space_; + cell_space_ = NULL; + } + + if (property_cell_space_ != NULL) { + property_cell_space_->TearDown(); + delete property_cell_space_; + property_cell_space_ = NULL; + } + + if (lo_space_ != NULL) { + lo_space_->TearDown(); + delete lo_space_; + lo_space_ = NULL; + } + + store_buffer()->TearDown(); + incremental_marking()->TearDown(); + + isolate_->memory_allocator()->TearDown(); +} + + +void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, + GCType gc_type, bool pass_isolate) { + DCHECK(callback != NULL); + GCPrologueCallbackPair pair(callback, gc_type, pass_isolate); + DCHECK(!gc_prologue_callbacks_.Contains(pair)); + return gc_prologue_callbacks_.Add(pair); +} + + +void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) { + DCHECK(callback != NULL); + for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { + if (gc_prologue_callbacks_[i].callback == callback) { + gc_prologue_callbacks_.Remove(i); + return; + } + } + UNREACHABLE(); +} + + +void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, + GCType gc_type, bool pass_isolate) { + DCHECK(callback != NULL); + GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate); + DCHECK(!gc_epilogue_callbacks_.Contains(pair)); + return gc_epilogue_callbacks_.Add(pair); +} + + +void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) { + DCHECK(callback != NULL); + for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { + if (gc_epilogue_callbacks_[i].callback == callback) { + gc_epilogue_callbacks_.Remove(i); + return; + } + } + UNREACHABLE(); +} + + +// TODO(ishell): Find a better place for this. +void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj, + Handle<DependentCode> dep) { + DCHECK(!InNewSpace(*obj)); + DCHECK(!InNewSpace(*dep)); + // This handle scope keeps the table handle local to this function, which + // allows us to safely skip write barriers in table update operations. + HandleScope scope(isolate()); + Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_), + isolate()); + table = WeakHashTable::Put(table, obj, dep); + + if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) { + WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value()); + } + set_weak_object_to_code_table(*table); + DCHECK_EQ(*dep, table->Lookup(obj)); +} + + +DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) { + Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj); + if (dep->IsDependentCode()) return DependentCode::cast(dep); + return DependentCode::cast(empty_fixed_array()); +} + + +void Heap::EnsureWeakObjectToCodeTable() { + if (!weak_object_to_code_table()->IsHashTable()) { + set_weak_object_to_code_table( + *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY, + TENURED)); + } +} + + +void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) { + v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot); +} + +#ifdef DEBUG + +class PrintHandleVisitor : public ObjectVisitor { + public: + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) + PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p), + reinterpret_cast<void*>(*p)); + } +}; + + +void Heap::PrintHandles() { + PrintF("Handles:\n"); + PrintHandleVisitor v; + isolate_->handle_scope_implementer()->Iterate(&v); +} + +#endif + + +Space* AllSpaces::next() { + switch (counter_++) { + case NEW_SPACE: + return heap_->new_space(); + case OLD_POINTER_SPACE: + return heap_->old_pointer_space(); + case OLD_DATA_SPACE: + return heap_->old_data_space(); + case CODE_SPACE: + return heap_->code_space(); + case MAP_SPACE: + return heap_->map_space(); + case CELL_SPACE: + return heap_->cell_space(); + case PROPERTY_CELL_SPACE: + return heap_->property_cell_space(); + case LO_SPACE: + return heap_->lo_space(); + default: + return NULL; + } +} + + +PagedSpace* PagedSpaces::next() { + switch (counter_++) { + case OLD_POINTER_SPACE: + return heap_->old_pointer_space(); + case OLD_DATA_SPACE: + return heap_->old_data_space(); + case CODE_SPACE: + return heap_->code_space(); + case MAP_SPACE: + return heap_->map_space(); + case CELL_SPACE: + return heap_->cell_space(); + case PROPERTY_CELL_SPACE: + return heap_->property_cell_space(); + default: + return NULL; + } +} + + +OldSpace* OldSpaces::next() { + switch (counter_++) { + case OLD_POINTER_SPACE: + return heap_->old_pointer_space(); + case OLD_DATA_SPACE: + return heap_->old_data_space(); + case CODE_SPACE: + return heap_->code_space(); + default: + return NULL; + } +} + + +SpaceIterator::SpaceIterator(Heap* heap) + : heap_(heap), + current_space_(FIRST_SPACE), + iterator_(NULL), + size_func_(NULL) {} + + +SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func) + : heap_(heap), + current_space_(FIRST_SPACE), + iterator_(NULL), + size_func_(size_func) {} + + +SpaceIterator::~SpaceIterator() { + // Delete active iterator if any. + delete iterator_; +} + + +bool SpaceIterator::has_next() { + // Iterate until no more spaces. + return current_space_ != LAST_SPACE; +} + + +ObjectIterator* SpaceIterator::next() { + if (iterator_ != NULL) { + delete iterator_; + iterator_ = NULL; + // Move to the next space + current_space_++; + if (current_space_ > LAST_SPACE) { + return NULL; + } + } + + // Return iterator for the new current space. + return CreateIterator(); +} + + +// Create an iterator for the space to iterate. +ObjectIterator* SpaceIterator::CreateIterator() { + DCHECK(iterator_ == NULL); + + switch (current_space_) { + case NEW_SPACE: + iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_); + break; + case OLD_POINTER_SPACE: + iterator_ = + new HeapObjectIterator(heap_->old_pointer_space(), size_func_); + break; + case OLD_DATA_SPACE: + iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_); + break; + case CODE_SPACE: + iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_); + break; + case MAP_SPACE: + iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_); + break; + case CELL_SPACE: + iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_); + break; + case PROPERTY_CELL_SPACE: + iterator_ = + new HeapObjectIterator(heap_->property_cell_space(), size_func_); + break; + case LO_SPACE: + iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_); + break; + } + + // Return the newly allocated iterator; + DCHECK(iterator_ != NULL); + return iterator_; +} + + +class HeapObjectsFilter { + public: + virtual ~HeapObjectsFilter() {} + virtual bool SkipObject(HeapObject* object) = 0; +}; + + +class UnreachableObjectsFilter : public HeapObjectsFilter { + public: + explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) { + MarkReachableObjects(); + } + + ~UnreachableObjectsFilter() { + heap_->mark_compact_collector()->ClearMarkbits(); + } + + bool SkipObject(HeapObject* object) { + MarkBit mark_bit = Marking::MarkBitFrom(object); + return !mark_bit.Get(); + } + + private: + class MarkingVisitor : public ObjectVisitor { + public: + MarkingVisitor() : marking_stack_(10) {} + + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) { + if (!(*p)->IsHeapObject()) continue; + HeapObject* obj = HeapObject::cast(*p); + MarkBit mark_bit = Marking::MarkBitFrom(obj); + if (!mark_bit.Get()) { + mark_bit.Set(); + marking_stack_.Add(obj); + } + } + } + + void TransitiveClosure() { + while (!marking_stack_.is_empty()) { + HeapObject* obj = marking_stack_.RemoveLast(); + obj->Iterate(this); + } + } + + private: + List<HeapObject*> marking_stack_; + }; + + void MarkReachableObjects() { + MarkingVisitor visitor; + heap_->IterateRoots(&visitor, VISIT_ALL); + visitor.TransitiveClosure(); + } + + Heap* heap_; + DisallowHeapAllocation no_allocation_; +}; + + +HeapIterator::HeapIterator(Heap* heap) + : make_heap_iterable_helper_(heap), + no_heap_allocation_(), + heap_(heap), + filtering_(HeapIterator::kNoFiltering), + filter_(NULL) { + Init(); +} + + +HeapIterator::HeapIterator(Heap* heap, + HeapIterator::HeapObjectsFiltering filtering) + : make_heap_iterable_helper_(heap), + no_heap_allocation_(), + heap_(heap), + filtering_(filtering), + filter_(NULL) { + Init(); +} + + +HeapIterator::~HeapIterator() { Shutdown(); } + + +void HeapIterator::Init() { + // Start the iteration. + space_iterator_ = new SpaceIterator(heap_); + switch (filtering_) { + case kFilterUnreachable: + filter_ = new UnreachableObjectsFilter(heap_); + break; + default: + break; + } + object_iterator_ = space_iterator_->next(); +} + + +void HeapIterator::Shutdown() { +#ifdef DEBUG + // Assert that in filtering mode we have iterated through all + // objects. Otherwise, heap will be left in an inconsistent state. + if (filtering_ != kNoFiltering) { + DCHECK(object_iterator_ == NULL); + } +#endif + // Make sure the last iterator is deallocated. + delete space_iterator_; + space_iterator_ = NULL; + object_iterator_ = NULL; + delete filter_; + filter_ = NULL; +} + + +HeapObject* HeapIterator::next() { + if (filter_ == NULL) return NextObject(); + + HeapObject* obj = NextObject(); + while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject(); + return obj; +} + + +HeapObject* HeapIterator::NextObject() { + // No iterator means we are done. + if (object_iterator_ == NULL) return NULL; + + if (HeapObject* obj = object_iterator_->next_object()) { + // If the current iterator has more objects we are fine. + return obj; + } else { + // Go though the spaces looking for one that has objects. + while (space_iterator_->has_next()) { + object_iterator_ = space_iterator_->next(); + if (HeapObject* obj = object_iterator_->next_object()) { + return obj; + } + } + } + // Done with the last space. + object_iterator_ = NULL; + return NULL; +} + + +void HeapIterator::reset() { + // Restart the iterator. + Shutdown(); + Init(); +} + + +#ifdef DEBUG + +Object* const PathTracer::kAnyGlobalObject = NULL; + +class PathTracer::MarkVisitor : public ObjectVisitor { + public: + explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {} + void VisitPointers(Object** start, Object** end) { + // Scan all HeapObject pointers in [start, end) + for (Object** p = start; !tracer_->found() && (p < end); p++) { + if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this); + } + } + + private: + PathTracer* tracer_; +}; + + +class PathTracer::UnmarkVisitor : public ObjectVisitor { + public: + explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {} + void VisitPointers(Object** start, Object** end) { + // Scan all HeapObject pointers in [start, end) + for (Object** p = start; p < end; p++) { + if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this); + } + } + + private: + PathTracer* tracer_; +}; + + +void PathTracer::VisitPointers(Object** start, Object** end) { + bool done = ((what_to_find_ == FIND_FIRST) && found_target_); + // Visit all HeapObject pointers in [start, end) + for (Object** p = start; !done && (p < end); p++) { + if ((*p)->IsHeapObject()) { + TracePathFrom(p); + done = ((what_to_find_ == FIND_FIRST) && found_target_); + } + } +} + + +void PathTracer::Reset() { + found_target_ = false; + object_stack_.Clear(); +} + + +void PathTracer::TracePathFrom(Object** root) { + DCHECK((search_target_ == kAnyGlobalObject) || + search_target_->IsHeapObject()); + found_target_in_trace_ = false; + Reset(); + + MarkVisitor mark_visitor(this); + MarkRecursively(root, &mark_visitor); + + UnmarkVisitor unmark_visitor(this); + UnmarkRecursively(root, &unmark_visitor); + + ProcessResults(); +} + + +static bool SafeIsNativeContext(HeapObject* obj) { + return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map(); +} + + +void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { + if (!(*p)->IsHeapObject()) return; + + HeapObject* obj = HeapObject::cast(*p); + + MapWord map_word = obj->map_word(); + if (!map_word.ToMap()->IsHeapObject()) return; // visited before + + if (found_target_in_trace_) return; // stop if target found + object_stack_.Add(obj); + if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) || + (obj == search_target_)) { + found_target_in_trace_ = true; + found_target_ = true; + return; + } + + bool is_native_context = SafeIsNativeContext(obj); + + // not visited yet + Map* map = Map::cast(map_word.ToMap()); + + MapWord marked_map_word = + MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag); + obj->set_map_word(marked_map_word); + + // Scan the object body. + if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) { + // This is specialized to scan Context's properly. + Object** start = + reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize); + Object** end = + reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize + + Context::FIRST_WEAK_SLOT * kPointerSize); + mark_visitor->VisitPointers(start, end); + } else { + obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor); + } + + // Scan the map after the body because the body is a lot more interesting + // when doing leak detection. + MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor); + + if (!found_target_in_trace_) { // don't pop if found the target + object_stack_.RemoveLast(); + } +} + + +void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) { + if (!(*p)->IsHeapObject()) return; + + HeapObject* obj = HeapObject::cast(*p); + + MapWord map_word = obj->map_word(); + if (map_word.ToMap()->IsHeapObject()) return; // unmarked already + + MapWord unmarked_map_word = + MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag); + obj->set_map_word(unmarked_map_word); + + Map* map = Map::cast(unmarked_map_word.ToMap()); + + UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor); + + obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor); +} + + +void PathTracer::ProcessResults() { + if (found_target_) { + OFStream os(stdout); + os << "=====================================\n" + << "==== Path to object ====\n" + << "=====================================\n\n"; + + DCHECK(!object_stack_.is_empty()); + for (int i = 0; i < object_stack_.length(); i++) { + if (i > 0) os << "\n |\n |\n V\n\n"; + object_stack_[i]->Print(os); + } + os << "=====================================\n"; + } +} + + +// Triggers a depth-first traversal of reachable objects from one +// given root object and finds a path to a specific heap object and +// prints it. +void Heap::TracePathToObjectFrom(Object* target, Object* root) { + PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL); + tracer.VisitPointer(&root); +} + + +// Triggers a depth-first traversal of reachable objects from roots +// and finds a path to a specific heap object and prints it. +void Heap::TracePathToObject(Object* target) { + PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL); + IterateRoots(&tracer, VISIT_ONLY_STRONG); +} + + +// Triggers a depth-first traversal of reachable objects from roots +// and finds a path to any global object and prints it. Useful for +// determining the source for leaks of global objects. +void Heap::TracePathToGlobal() { + PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL, + VISIT_ALL); + IterateRoots(&tracer, VISIT_ONLY_STRONG); +} +#endif + + +void Heap::UpdateCumulativeGCStatistics(double duration, + double spent_in_mutator, + double marking_time) { + if (FLAG_print_cumulative_gc_stat) { + total_gc_time_ms_ += duration; + max_gc_pause_ = Max(max_gc_pause_, duration); + max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects()); + min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator); + } else if (FLAG_trace_gc_verbose) { + total_gc_time_ms_ += duration; + } + + marking_time_ += marking_time; +} + + +int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) { + DisallowHeapAllocation no_gc; + // Uses only lower 32 bits if pointers are larger. + uintptr_t addr_hash = + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift; + return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); +} + + +int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) { + DisallowHeapAllocation no_gc; + int index = (Hash(map, name) & kHashMask); + for (int i = 0; i < kEntriesPerBucket; i++) { + Key& key = keys_[index + i]; + if ((key.map == *map) && key.name->Equals(*name)) { + return field_offsets_[index + i]; + } + } + return kNotFound; +} + + +void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name, + int field_offset) { + DisallowHeapAllocation no_gc; + if (!name->IsUniqueName()) { + if (!StringTable::InternalizeStringIfExists( + name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) { + return; + } + } + // This cache is cleared only between mark compact passes, so we expect the + // cache to only contain old space names. + DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name)); + + int index = (Hash(map, name) & kHashMask); + // After a GC there will be free slots, so we use them in order (this may + // help to get the most frequently used one in position 0). + for (int i = 0; i < kEntriesPerBucket; i++) { + Key& key = keys_[index]; + Object* free_entry_indicator = NULL; + if (key.map == free_entry_indicator) { + key.map = *map; + key.name = *name; + field_offsets_[index + i] = field_offset; + return; + } + } + // No free entry found in this bucket, so we move them all down one and + // put the new entry at position zero. + for (int i = kEntriesPerBucket - 1; i > 0; i--) { + Key& key = keys_[index + i]; + Key& key2 = keys_[index + i - 1]; + key = key2; + field_offsets_[index + i] = field_offsets_[index + i - 1]; + } + + // Write the new first entry. + Key& key = keys_[index]; + key.map = *map; + key.name = *name; + field_offsets_[index] = field_offset; +} + + +void KeyedLookupCache::Clear() { + for (int index = 0; index < kLength; index++) keys_[index].map = NULL; +} + + +void DescriptorLookupCache::Clear() { + for (int index = 0; index < kLength; index++) keys_[index].source = NULL; +} + + +void ExternalStringTable::CleanUp() { + int last = 0; + for (int i = 0; i < new_space_strings_.length(); ++i) { + if (new_space_strings_[i] == heap_->the_hole_value()) { + continue; + } + DCHECK(new_space_strings_[i]->IsExternalString()); + if (heap_->InNewSpace(new_space_strings_[i])) { + new_space_strings_[last++] = new_space_strings_[i]; + } else { + old_space_strings_.Add(new_space_strings_[i]); + } + } + new_space_strings_.Rewind(last); + new_space_strings_.Trim(); + + last = 0; + for (int i = 0; i < old_space_strings_.length(); ++i) { + if (old_space_strings_[i] == heap_->the_hole_value()) { + continue; + } + DCHECK(old_space_strings_[i]->IsExternalString()); + DCHECK(!heap_->InNewSpace(old_space_strings_[i])); + old_space_strings_[last++] = old_space_strings_[i]; + } + old_space_strings_.Rewind(last); + old_space_strings_.Trim(); +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + Verify(); + } +#endif +} + + +void ExternalStringTable::TearDown() { + for (int i = 0; i < new_space_strings_.length(); ++i) { + heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i])); + } + new_space_strings_.Free(); + for (int i = 0; i < old_space_strings_.length(); ++i) { + heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i])); + } + old_space_strings_.Free(); +} + + +void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) { + chunk->set_next_chunk(chunks_queued_for_free_); + chunks_queued_for_free_ = chunk; +} + + +void Heap::FreeQueuedChunks() { + if (chunks_queued_for_free_ == NULL) return; + MemoryChunk* next; + MemoryChunk* chunk; + for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { + next = chunk->next_chunk(); + chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); + + if (chunk->owner()->identity() == LO_SPACE) { + // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress. + // If FromAnyPointerAddress encounters a slot that belongs to a large + // chunk queued for deletion it will fail to find the chunk because + // it try to perform a search in the list of pages owned by of the large + // object space and queued chunks were detached from that list. + // To work around this we split large chunk into normal kPageSize aligned + // pieces and initialize size, owner and flags field of every piece. + // If FromAnyPointerAddress encounters a slot that belongs to one of + // these smaller pieces it will treat it as a slot on a normal Page. + Address chunk_end = chunk->address() + chunk->size(); + MemoryChunk* inner = + MemoryChunk::FromAddress(chunk->address() + Page::kPageSize); + MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1); + while (inner <= inner_last) { + // Size of a large chunk is always a multiple of + // OS::AllocateAlignment() so there is always + // enough space for a fake MemoryChunk header. + Address area_end = Min(inner->address() + Page::kPageSize, chunk_end); + // Guard against overflow. + if (area_end < inner->address()) area_end = chunk_end; + inner->SetArea(inner->address(), area_end); + inner->set_size(Page::kPageSize); + inner->set_owner(lo_space()); + inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); + inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize); + } + } + } + isolate_->heap()->store_buffer()->Compact(); + isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); + for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { + next = chunk->next_chunk(); + isolate_->memory_allocator()->Free(chunk); + } + chunks_queued_for_free_ = NULL; +} + + +void Heap::RememberUnmappedPage(Address page, bool compacted) { + uintptr_t p = reinterpret_cast<uintptr_t>(page); + // Tag the page pointer to make it findable in the dump file. + if (compacted) { + p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared. + } else { + p ^= 0x1d1ed & (Page::kPageSize - 1); // I died. + } + remembered_unmapped_pages_[remembered_unmapped_pages_index_] = + reinterpret_cast<Address>(p); + remembered_unmapped_pages_index_++; + remembered_unmapped_pages_index_ %= kRememberedUnmappedPages; +} + + +void Heap::ClearObjectStats(bool clear_last_time_stats) { + memset(object_counts_, 0, sizeof(object_counts_)); + memset(object_sizes_, 0, sizeof(object_sizes_)); + if (clear_last_time_stats) { + memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_)); + memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_)); + } +} + + +static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER; + + +void Heap::CheckpointObjectStats() { + base::LockGuard<base::Mutex> lock_guard( + checkpoint_object_stats_mutex.Pointer()); + Counters* counters = isolate()->counters(); +#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ + counters->count_of_##name()->Increment( \ + static_cast<int>(object_counts_[name])); \ + counters->count_of_##name()->Decrement( \ + static_cast<int>(object_counts_last_time_[name])); \ + counters->size_of_##name()->Increment( \ + static_cast<int>(object_sizes_[name])); \ + counters->size_of_##name()->Decrement( \ + static_cast<int>(object_sizes_last_time_[name])); + INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) +#undef ADJUST_LAST_TIME_OBJECT_COUNT + int index; +#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ + index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \ + counters->count_of_CODE_TYPE_##name()->Increment( \ + static_cast<int>(object_counts_[index])); \ + counters->count_of_CODE_TYPE_##name()->Decrement( \ + static_cast<int>(object_counts_last_time_[index])); \ + counters->size_of_CODE_TYPE_##name()->Increment( \ + static_cast<int>(object_sizes_[index])); \ + counters->size_of_CODE_TYPE_##name()->Decrement( \ + static_cast<int>(object_sizes_last_time_[index])); + CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) +#undef ADJUST_LAST_TIME_OBJECT_COUNT +#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ + index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \ + counters->count_of_FIXED_ARRAY_##name()->Increment( \ + static_cast<int>(object_counts_[index])); \ + counters->count_of_FIXED_ARRAY_##name()->Decrement( \ + static_cast<int>(object_counts_last_time_[index])); \ + counters->size_of_FIXED_ARRAY_##name()->Increment( \ + static_cast<int>(object_sizes_[index])); \ + counters->size_of_FIXED_ARRAY_##name()->Decrement( \ + static_cast<int>(object_sizes_last_time_[index])); + FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) +#undef ADJUST_LAST_TIME_OBJECT_COUNT +#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ + index = \ + FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \ + counters->count_of_CODE_AGE_##name()->Increment( \ + static_cast<int>(object_counts_[index])); \ + counters->count_of_CODE_AGE_##name()->Decrement( \ + static_cast<int>(object_counts_last_time_[index])); \ + counters->size_of_CODE_AGE_##name()->Increment( \ + static_cast<int>(object_sizes_[index])); \ + counters->size_of_CODE_AGE_##name()->Decrement( \ + static_cast<int>(object_sizes_last_time_[index])); + CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) +#undef ADJUST_LAST_TIME_OBJECT_COUNT + + MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); + MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); + ClearObjectStats(); +} +} +} // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/heap/heap.h nodejs-0.11.15/deps/v8/src/heap/heap.h --- nodejs-0.11.13/deps/v8/src/heap/heap.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/heap.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2547 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_HEAP_H_ +#define V8_HEAP_HEAP_H_ + +#include <cmath> + +#include "src/allocation.h" +#include "src/assert-scope.h" +#include "src/counters.h" +#include "src/globals.h" +#include "src/heap/gc-tracer.h" +#include "src/heap/incremental-marking.h" +#include "src/heap/mark-compact.h" +#include "src/heap/objects-visiting.h" +#include "src/heap/spaces.h" +#include "src/heap/store-buffer.h" +#include "src/list.h" +#include "src/splay-tree-inl.h" + +namespace v8 { +namespace internal { + +// Defines all the roots in Heap. +#define STRONG_ROOT_LIST(V) \ + V(Map, byte_array_map, ByteArrayMap) \ + V(Map, free_space_map, FreeSpaceMap) \ + V(Map, one_pointer_filler_map, OnePointerFillerMap) \ + V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ + /* Cluster the most popular ones in a few cache lines here at the top. */ \ + V(Smi, store_buffer_top, StoreBufferTop) \ + V(Oddball, undefined_value, UndefinedValue) \ + V(Oddball, the_hole_value, TheHoleValue) \ + V(Oddball, null_value, NullValue) \ + V(Oddball, true_value, TrueValue) \ + V(Oddball, false_value, FalseValue) \ + V(Oddball, uninitialized_value, UninitializedValue) \ + V(Oddball, exception, Exception) \ + V(Map, cell_map, CellMap) \ + V(Map, global_property_cell_map, GlobalPropertyCellMap) \ + V(Map, shared_function_info_map, SharedFunctionInfoMap) \ + V(Map, meta_map, MetaMap) \ + V(Map, heap_number_map, HeapNumberMap) \ + V(Map, mutable_heap_number_map, MutableHeapNumberMap) \ + V(Map, native_context_map, NativeContextMap) \ + V(Map, fixed_array_map, FixedArrayMap) \ + V(Map, code_map, CodeMap) \ + V(Map, scope_info_map, ScopeInfoMap) \ + V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ + V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ + V(Map, constant_pool_array_map, ConstantPoolArrayMap) \ + V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ + V(Map, hash_table_map, HashTableMap) \ + V(Map, ordered_hash_table_map, OrderedHashTableMap) \ + V(FixedArray, empty_fixed_array, EmptyFixedArray) \ + V(ByteArray, empty_byte_array, EmptyByteArray) \ + V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ + V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \ + V(Oddball, arguments_marker, ArgumentsMarker) \ + /* The roots above this line should be boring from a GC point of view. */ \ + /* This means they are never in new space and never on a page that is */ \ + /* being compacted. */ \ + V(FixedArray, number_string_cache, NumberStringCache) \ + V(Object, instanceof_cache_function, InstanceofCacheFunction) \ + V(Object, instanceof_cache_map, InstanceofCacheMap) \ + V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \ + V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ + V(FixedArray, string_split_cache, StringSplitCache) \ + V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \ + V(Oddball, termination_exception, TerminationException) \ + V(Smi, hash_seed, HashSeed) \ + V(Map, symbol_map, SymbolMap) \ + V(Map, string_map, StringMap) \ + V(Map, ascii_string_map, AsciiStringMap) \ + V(Map, cons_string_map, ConsStringMap) \ + V(Map, cons_ascii_string_map, ConsAsciiStringMap) \ + V(Map, sliced_string_map, SlicedStringMap) \ + V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \ + V(Map, external_string_map, ExternalStringMap) \ + V(Map, external_string_with_one_byte_data_map, \ + ExternalStringWithOneByteDataMap) \ + V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ + V(Map, short_external_string_map, ShortExternalStringMap) \ + V(Map, short_external_string_with_one_byte_data_map, \ + ShortExternalStringWithOneByteDataMap) \ + V(Map, internalized_string_map, InternalizedStringMap) \ + V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \ + V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \ + V(Map, external_internalized_string_with_one_byte_data_map, \ + ExternalInternalizedStringWithOneByteDataMap) \ + V(Map, external_ascii_internalized_string_map, \ + ExternalAsciiInternalizedStringMap) \ + V(Map, short_external_internalized_string_map, \ + ShortExternalInternalizedStringMap) \ + V(Map, short_external_internalized_string_with_one_byte_data_map, \ + ShortExternalInternalizedStringWithOneByteDataMap) \ + V(Map, short_external_ascii_internalized_string_map, \ + ShortExternalAsciiInternalizedStringMap) \ + V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \ + V(Map, undetectable_string_map, UndetectableStringMap) \ + V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \ + V(Map, external_int8_array_map, ExternalInt8ArrayMap) \ + V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \ + V(Map, external_int16_array_map, ExternalInt16ArrayMap) \ + V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \ + V(Map, external_int32_array_map, ExternalInt32ArrayMap) \ + V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \ + V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \ + V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \ + V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \ + V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array) \ + V(ExternalArray, empty_external_uint8_array, EmptyExternalUint8Array) \ + V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \ + V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array) \ + V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \ + V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array) \ + V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \ + V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \ + V(ExternalArray, empty_external_uint8_clamped_array, \ + EmptyExternalUint8ClampedArray) \ + V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \ + V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \ + V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \ + V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \ + V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \ + V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \ + V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \ + V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \ + V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \ + V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \ + V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \ + V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \ + V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \ + V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \ + V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \ + V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \ + V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \ + V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \ + EmptyFixedUint8ClampedArray) \ + V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \ + V(Map, function_context_map, FunctionContextMap) \ + V(Map, catch_context_map, CatchContextMap) \ + V(Map, with_context_map, WithContextMap) \ + V(Map, block_context_map, BlockContextMap) \ + V(Map, module_context_map, ModuleContextMap) \ + V(Map, global_context_map, GlobalContextMap) \ + V(Map, undefined_map, UndefinedMap) \ + V(Map, the_hole_map, TheHoleMap) \ + V(Map, null_map, NullMap) \ + V(Map, boolean_map, BooleanMap) \ + V(Map, uninitialized_map, UninitializedMap) \ + V(Map, arguments_marker_map, ArgumentsMarkerMap) \ + V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \ + V(Map, exception_map, ExceptionMap) \ + V(Map, termination_exception_map, TerminationExceptionMap) \ + V(Map, message_object_map, JSMessageObjectMap) \ + V(Map, foreign_map, ForeignMap) \ + V(HeapNumber, nan_value, NanValue) \ + V(HeapNumber, infinity_value, InfinityValue) \ + V(HeapNumber, minus_zero_value, MinusZeroValue) \ + V(Map, neander_map, NeanderMap) \ + V(JSObject, message_listeners, MessageListeners) \ + V(UnseededNumberDictionary, code_stubs, CodeStubs) \ + V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \ + V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \ + V(Code, js_entry_code, JsEntryCode) \ + V(Code, js_construct_entry_code, JsConstructEntryCode) \ + V(FixedArray, natives_source_cache, NativesSourceCache) \ + V(Script, empty_script, EmptyScript) \ + V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ + V(Cell, undefined_cell, UndefineCell) \ + V(JSObject, observation_state, ObservationState) \ + V(Map, external_map, ExternalMap) \ + V(Object, symbol_registry, SymbolRegistry) \ + V(Symbol, frozen_symbol, FrozenSymbol) \ + V(Symbol, nonexistent_symbol, NonExistentSymbol) \ + V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \ + V(SeededNumberDictionary, empty_slow_element_dictionary, \ + EmptySlowElementDictionary) \ + V(Symbol, observed_symbol, ObservedSymbol) \ + V(Symbol, uninitialized_symbol, UninitializedSymbol) \ + V(Symbol, megamorphic_symbol, MegamorphicSymbol) \ + V(Symbol, stack_trace_symbol, StackTraceSymbol) \ + V(Symbol, detailed_stack_trace_symbol, DetailedStackTraceSymbol) \ + V(Symbol, normal_ic_symbol, NormalICSymbol) \ + V(FixedArray, materialized_objects, MaterializedObjects) \ + V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \ + V(FixedArray, microtask_queue, MicrotaskQueue) + +// Entries in this list are limited to Smis and are not visited during GC. +#define SMI_ROOT_LIST(V) \ + V(Smi, stack_limit, StackLimit) \ + V(Smi, real_stack_limit, RealStackLimit) \ + V(Smi, last_script_id, LastScriptId) \ + V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \ + V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \ + V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \ + V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) + +#define ROOT_LIST(V) \ + STRONG_ROOT_LIST(V) \ + SMI_ROOT_LIST(V) \ + V(StringTable, string_table, StringTable) + +// Heap roots that are known to be immortal immovable, for which we can safely +// skip write barriers. +#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \ + V(byte_array_map) \ + V(free_space_map) \ + V(one_pointer_filler_map) \ + V(two_pointer_filler_map) \ + V(undefined_value) \ + V(the_hole_value) \ + V(null_value) \ + V(true_value) \ + V(false_value) \ + V(uninitialized_value) \ + V(cell_map) \ + V(global_property_cell_map) \ + V(shared_function_info_map) \ + V(meta_map) \ + V(heap_number_map) \ + V(mutable_heap_number_map) \ + V(native_context_map) \ + V(fixed_array_map) \ + V(code_map) \ + V(scope_info_map) \ + V(fixed_cow_array_map) \ + V(fixed_double_array_map) \ + V(constant_pool_array_map) \ + V(no_interceptor_result_sentinel) \ + V(hash_table_map) \ + V(ordered_hash_table_map) \ + V(empty_fixed_array) \ + V(empty_byte_array) \ + V(empty_descriptor_array) \ + V(empty_constant_pool_array) \ + V(arguments_marker) \ + V(symbol_map) \ + V(sloppy_arguments_elements_map) \ + V(function_context_map) \ + V(catch_context_map) \ + V(with_context_map) \ + V(block_context_map) \ + V(module_context_map) \ + V(global_context_map) \ + V(undefined_map) \ + V(the_hole_map) \ + V(null_map) \ + V(boolean_map) \ + V(uninitialized_map) \ + V(message_object_map) \ + V(foreign_map) \ + V(neander_map) + +#define INTERNALIZED_STRING_LIST(V) \ + V(Array_string, "Array") \ + V(Object_string, "Object") \ + V(proto_string, "__proto__") \ + V(arguments_string, "arguments") \ + V(Arguments_string, "Arguments") \ + V(call_string, "call") \ + V(apply_string, "apply") \ + V(caller_string, "caller") \ + V(boolean_string, "boolean") \ + V(Boolean_string, "Boolean") \ + V(callee_string, "callee") \ + V(constructor_string, "constructor") \ + V(dot_result_string, ".result") \ + V(dot_for_string, ".for.") \ + V(eval_string, "eval") \ + V(empty_string, "") \ + V(function_string, "function") \ + V(length_string, "length") \ + V(name_string, "name") \ + V(null_string, "null") \ + V(number_string, "number") \ + V(Number_string, "Number") \ + V(nan_string, "NaN") \ + V(RegExp_string, "RegExp") \ + V(source_string, "source") \ + V(source_url_string, "source_url") \ + V(source_mapping_url_string, "source_mapping_url") \ + V(global_string, "global") \ + V(ignore_case_string, "ignoreCase") \ + V(multiline_string, "multiline") \ + V(input_string, "input") \ + V(index_string, "index") \ + V(last_index_string, "lastIndex") \ + V(object_string, "object") \ + V(literals_string, "literals") \ + V(prototype_string, "prototype") \ + V(string_string, "string") \ + V(String_string, "String") \ + V(symbol_string, "symbol") \ + V(Symbol_string, "Symbol") \ + V(for_string, "for") \ + V(for_api_string, "for_api") \ + V(for_intern_string, "for_intern") \ + V(private_api_string, "private_api") \ + V(private_intern_string, "private_intern") \ + V(Date_string, "Date") \ + V(to_string_string, "toString") \ + V(char_at_string, "CharAt") \ + V(undefined_string, "undefined") \ + V(value_of_string, "valueOf") \ + V(stack_string, "stack") \ + V(toJSON_string, "toJSON") \ + V(InitializeVarGlobal_string, "InitializeVarGlobal") \ + V(InitializeConstGlobal_string, "InitializeConstGlobal") \ + V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \ + V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \ + V(stack_overflow_string, "kStackOverflowBoilerplate") \ + V(illegal_access_string, "illegal access") \ + V(get_string, "get") \ + V(set_string, "set") \ + V(map_field_string, "%map") \ + V(elements_field_string, "%elements") \ + V(length_field_string, "%length") \ + V(cell_value_string, "%cell_value") \ + V(function_class_string, "Function") \ + V(illegal_argument_string, "illegal argument") \ + V(space_string, " ") \ + V(exec_string, "exec") \ + V(zero_string, "0") \ + V(global_eval_string, "GlobalEval") \ + V(identity_hash_string, "v8::IdentityHash") \ + V(closure_string, "(closure)") \ + V(dot_string, ".") \ + V(compare_ic_string, "==") \ + V(strict_compare_ic_string, "===") \ + V(infinity_string, "Infinity") \ + V(minus_infinity_string, "-Infinity") \ + V(query_colon_string, "(?:)") \ + V(Generator_string, "Generator") \ + V(throw_string, "throw") \ + V(done_string, "done") \ + V(value_string, "value") \ + V(next_string, "next") \ + V(byte_length_string, "byteLength") \ + V(byte_offset_string, "byteOffset") \ + V(buffer_string, "buffer") \ + V(intl_initialized_marker_string, "v8::intl_initialized_marker") \ + V(intl_impl_object_string, "v8::intl_object") + +// Forward declarations. +class HeapStats; +class Isolate; +class WeakObjectRetainer; + + +typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, + Object** pointer); + +class StoreBufferRebuilder { + public: + explicit StoreBufferRebuilder(StoreBuffer* store_buffer) + : store_buffer_(store_buffer) {} + + void Callback(MemoryChunk* page, StoreBufferEvent event); + + private: + StoreBuffer* store_buffer_; + + // We record in this variable how full the store buffer was when we started + // iterating over the current page, finding pointers to new space. If the + // store buffer overflows again we can exempt the page from the store buffer + // by rewinding to this point instead of having to search the store buffer. + Object*** start_of_current_page_; + // The current page we are scanning in the store buffer iterator. + MemoryChunk* current_page_; +}; + + +// A queue of objects promoted during scavenge. Each object is accompanied +// by it's size to avoid dereferencing a map pointer for scanning. +class PromotionQueue { + public: + explicit PromotionQueue(Heap* heap) + : front_(NULL), + rear_(NULL), + limit_(NULL), + emergency_stack_(0), + heap_(heap) {} + + void Initialize(); + + void Destroy() { + DCHECK(is_empty()); + delete emergency_stack_; + emergency_stack_ = NULL; + } + + inline void ActivateGuardIfOnTheSamePage(); + + Page* GetHeadPage() { + return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); + } + + void SetNewLimit(Address limit) { + if (!guard_) { + return; + } + + DCHECK(GetHeadPage() == Page::FromAllocationTop(limit)); + limit_ = reinterpret_cast<intptr_t*>(limit); + + if (limit_ <= rear_) { + return; + } + + RelocateQueueHead(); + } + + bool IsBelowPromotionQueue(Address to_space_top) { + // If the given to-space top pointer and the head of the promotion queue + // are not on the same page, then the to-space objects are below the + // promotion queue. + if (GetHeadPage() != Page::FromAddress(to_space_top)) { + return true; + } + // If the to space top pointer is smaller or equal than the promotion + // queue head, then the to-space objects are below the promotion queue. + return reinterpret_cast<intptr_t*>(to_space_top) <= rear_; + } + + bool is_empty() { + return (front_ == rear_) && + (emergency_stack_ == NULL || emergency_stack_->length() == 0); + } + + inline void insert(HeapObject* target, int size); + + void remove(HeapObject** target, int* size) { + DCHECK(!is_empty()); + if (front_ == rear_) { + Entry e = emergency_stack_->RemoveLast(); + *target = e.obj_; + *size = e.size_; + return; + } + + if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { + NewSpacePage* front_page = + NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); + DCHECK(!front_page->prev_page()->is_anchor()); + front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end()); + } + *target = reinterpret_cast<HeapObject*>(*(--front_)); + *size = static_cast<int>(*(--front_)); + // Assert no underflow. + SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), + reinterpret_cast<Address>(front_)); + } + + private: + // The front of the queue is higher in the memory page chain than the rear. + intptr_t* front_; + intptr_t* rear_; + intptr_t* limit_; + + bool guard_; + + static const int kEntrySizeInWords = 2; + + struct Entry { + Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {} + + HeapObject* obj_; + int size_; + }; + List<Entry>* emergency_stack_; + + Heap* heap_; + + void RelocateQueueHead(); + + DISALLOW_COPY_AND_ASSIGN(PromotionQueue); +}; + + +typedef void (*ScavengingCallback)(Map* map, HeapObject** slot, + HeapObject* object); + + +// External strings table is a place where all external strings are +// registered. We need to keep track of such strings to properly +// finalize them. +class ExternalStringTable { + public: + // Registers an external string. + inline void AddString(String* string); + + inline void Iterate(ObjectVisitor* v); + + // Restores internal invariant and gets rid of collected strings. + // Must be called after each Iterate() that modified the strings. + void CleanUp(); + + // Destroys all allocated memory. + void TearDown(); + + private: + explicit ExternalStringTable(Heap* heap) : heap_(heap) {} + + friend class Heap; + + inline void Verify(); + + inline void AddOldString(String* string); + + // Notifies the table that only a prefix of the new list is valid. + inline void ShrinkNewStrings(int position); + + // To speed up scavenge collections new space string are kept + // separate from old space strings. + List<Object*> new_space_strings_; + List<Object*> old_space_strings_; + + Heap* heap_; + + DISALLOW_COPY_AND_ASSIGN(ExternalStringTable); +}; + + +enum ArrayStorageAllocationMode { + DONT_INITIALIZE_ARRAY_ELEMENTS, + INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE +}; + + +class Heap { + public: + // Configure heap size in MB before setup. Return false if the heap has been + // set up already. + bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, + int max_executable_size, size_t code_range_size); + bool ConfigureHeapDefault(); + + // Prepares the heap, setting up memory areas that are needed in the isolate + // without actually creating any objects. + bool SetUp(); + + // Bootstraps the object heap with the core set of objects required to run. + // Returns whether it succeeded. + bool CreateHeapObjects(); + + // Destroys all memory allocated by the heap. + void TearDown(); + + // Set the stack limit in the roots_ array. Some architectures generate + // code that looks here, because it is faster than loading from the static + // jslimit_/real_jslimit_ variable in the StackGuard. + void SetStackLimits(); + + // Returns whether SetUp has been called. + bool HasBeenSetUp(); + + // Returns the maximum amount of memory reserved for the heap. For + // the young generation, we reserve 4 times the amount needed for a + // semi space. The young generation consists of two semi spaces and + // we reserve twice the amount needed for those in order to ensure + // that new space can be aligned to its size. + intptr_t MaxReserved() { + return 4 * reserved_semispace_size_ + max_old_generation_size_; + } + int MaxSemiSpaceSize() { return max_semi_space_size_; } + int ReservedSemiSpaceSize() { return reserved_semispace_size_; } + int InitialSemiSpaceSize() { return initial_semispace_size_; } + intptr_t MaxOldGenerationSize() { return max_old_generation_size_; } + intptr_t MaxExecutableSize() { return max_executable_size_; } + + // Returns the capacity of the heap in bytes w/o growing. Heap grows when + // more spaces are needed until it reaches the limit. + intptr_t Capacity(); + + // Returns the amount of memory currently committed for the heap. + intptr_t CommittedMemory(); + + // Returns the amount of executable memory currently committed for the heap. + intptr_t CommittedMemoryExecutable(); + + // Returns the amount of phyical memory currently committed for the heap. + size_t CommittedPhysicalMemory(); + + // Returns the maximum amount of memory ever committed for the heap. + intptr_t MaximumCommittedMemory() { return maximum_committed_; } + + // Updates the maximum committed memory for the heap. Should be called + // whenever a space grows. + void UpdateMaximumCommitted(); + + // Returns the available bytes in space w/o growing. + // Heap doesn't guarantee that it can allocate an object that requires + // all available bytes. Check MaxHeapObjectSize() instead. + intptr_t Available(); + + // Returns of size of all objects residing in the heap. + intptr_t SizeOfObjects(); + + // Return the starting address and a mask for the new space. And-masking an + // address with the mask will result in the start address of the new space + // for all addresses in either semispace. + Address NewSpaceStart() { return new_space_.start(); } + uintptr_t NewSpaceMask() { return new_space_.mask(); } + Address NewSpaceTop() { return new_space_.top(); } + + NewSpace* new_space() { return &new_space_; } + OldSpace* old_pointer_space() { return old_pointer_space_; } + OldSpace* old_data_space() { return old_data_space_; } + OldSpace* code_space() { return code_space_; } + MapSpace* map_space() { return map_space_; } + CellSpace* cell_space() { return cell_space_; } + PropertyCellSpace* property_cell_space() { return property_cell_space_; } + LargeObjectSpace* lo_space() { return lo_space_; } + PagedSpace* paged_space(int idx) { + switch (idx) { + case OLD_POINTER_SPACE: + return old_pointer_space(); + case OLD_DATA_SPACE: + return old_data_space(); + case MAP_SPACE: + return map_space(); + case CELL_SPACE: + return cell_space(); + case PROPERTY_CELL_SPACE: + return property_cell_space(); + case CODE_SPACE: + return code_space(); + case NEW_SPACE: + case LO_SPACE: + UNREACHABLE(); + } + return NULL; + } + + bool always_allocate() { return always_allocate_scope_depth_ != 0; } + Address always_allocate_scope_depth_address() { + return reinterpret_cast<Address>(&always_allocate_scope_depth_); + } + + Address* NewSpaceAllocationTopAddress() { + return new_space_.allocation_top_address(); + } + Address* NewSpaceAllocationLimitAddress() { + return new_space_.allocation_limit_address(); + } + + Address* OldPointerSpaceAllocationTopAddress() { + return old_pointer_space_->allocation_top_address(); + } + Address* OldPointerSpaceAllocationLimitAddress() { + return old_pointer_space_->allocation_limit_address(); + } + + Address* OldDataSpaceAllocationTopAddress() { + return old_data_space_->allocation_top_address(); + } + Address* OldDataSpaceAllocationLimitAddress() { + return old_data_space_->allocation_limit_address(); + } + + // Returns a deep copy of the JavaScript object. + // Properties and elements are copied too. + // Optionally takes an AllocationSite to be appended in an AllocationMemento. + MUST_USE_RESULT AllocationResult + CopyJSObject(JSObject* source, AllocationSite* site = NULL); + + // Clear the Instanceof cache (used when a prototype changes). + inline void ClearInstanceofCache(); + + // Iterates the whole code space to clear all ICs of the given kind. + void ClearAllICsByKind(Code::Kind kind); + + // For use during bootup. + void RepairFreeListsAfterBoot(); + + template <typename T> + static inline bool IsOneByte(T t, int chars); + + // Move len elements within a given array from src_index index to dst_index + // index. + void MoveElements(FixedArray* array, int dst_index, int src_index, int len); + + // Sloppy mode arguments object size. + static const int kSloppyArgumentsObjectSize = + JSObject::kHeaderSize + 2 * kPointerSize; + // Strict mode arguments has no callee so it is smaller. + static const int kStrictArgumentsObjectSize = + JSObject::kHeaderSize + 1 * kPointerSize; + // Indicies for direct access into argument objects. + static const int kArgumentsLengthIndex = 0; + // callee is only valid in sloppy mode. + static const int kArgumentsCalleeIndex = 1; + + // Finalizes an external string by deleting the associated external + // data and clearing the resource pointer. + inline void FinalizeExternalString(String* string); + + // Initialize a filler object to keep the ability to iterate over the heap + // when introducing gaps within pages. + void CreateFillerObjectAt(Address addr, int size); + + bool CanMoveObjectStart(HeapObject* object); + + // Indicates whether live bytes adjustment is triggered from within the GC + // code or from mutator code. + enum InvocationMode { FROM_GC, FROM_MUTATOR }; + + // Maintain consistency of live bytes during incremental marking. + void AdjustLiveBytes(Address address, int by, InvocationMode mode); + + // Trim the given array from the left. Note that this relocates the object + // start and hence is only valid if there is only a single reference to it. + FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); + + // Trim the given array from the right. + template<Heap::InvocationMode mode> + void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); + + // Converts the given boolean condition to JavaScript boolean value. + inline Object* ToBoolean(bool condition); + + // Performs garbage collection operation. + // Returns whether there is a chance that another major GC could + // collect more garbage. + inline bool CollectGarbage( + AllocationSpace space, const char* gc_reason = NULL, + const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); + + static const int kNoGCFlags = 0; + static const int kSweepPreciselyMask = 1; + static const int kReduceMemoryFootprintMask = 2; + static const int kAbortIncrementalMarkingMask = 4; + + // Making the heap iterable requires us to sweep precisely and abort any + // incremental marking as well. + static const int kMakeHeapIterableMask = + kSweepPreciselyMask | kAbortIncrementalMarkingMask; + + // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is + // non-zero, then the slower precise sweeper is used, which leaves the heap + // in a state where we can iterate over the heap visiting all objects. + void CollectAllGarbage( + int flags, const char* gc_reason = NULL, + const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); + + // Last hope GC, should try to squeeze as much as possible. + void CollectAllAvailableGarbage(const char* gc_reason = NULL); + + // Check whether the heap is currently iterable. + bool IsHeapIterable(); + + // Notify the heap that a context has been disposed. + int NotifyContextDisposed(); + + inline void increment_scan_on_scavenge_pages() { + scan_on_scavenge_pages_++; + if (FLAG_gc_verbose) { + PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); + } + } + + inline void decrement_scan_on_scavenge_pages() { + scan_on_scavenge_pages_--; + if (FLAG_gc_verbose) { + PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); + } + } + + PromotionQueue* promotion_queue() { return &promotion_queue_; } + + void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, + GCType gc_type_filter, bool pass_isolate = true); + void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback); + + void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, + GCType gc_type_filter, bool pass_isolate = true); + void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback); + +// Heap root getters. We have versions with and without type::cast() here. +// You can't use type::cast during GC because the assert fails. +// TODO(1490): Try removing the unchecked accessors, now that GC marking does +// not corrupt the map. +#define ROOT_ACCESSOR(type, name, camel_name) \ + type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \ + type* raw_unchecked_##name() { \ + return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ + } + ROOT_LIST(ROOT_ACCESSOR) +#undef ROOT_ACCESSOR + +// Utility type maps +#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \ + Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); } + STRUCT_LIST(STRUCT_MAP_ACCESSOR) +#undef STRUCT_MAP_ACCESSOR + +#define STRING_ACCESSOR(name, str) \ + String* name() { return String::cast(roots_[k##name##RootIndex]); } + INTERNALIZED_STRING_LIST(STRING_ACCESSOR) +#undef STRING_ACCESSOR + + // The hidden_string is special because it is the empty string, but does + // not match the empty string. + String* hidden_string() { return hidden_string_; } + + void set_native_contexts_list(Object* object) { + native_contexts_list_ = object; + } + Object* native_contexts_list() const { return native_contexts_list_; } + + void set_array_buffers_list(Object* object) { array_buffers_list_ = object; } + Object* array_buffers_list() const { return array_buffers_list_; } + + void set_allocation_sites_list(Object* object) { + allocation_sites_list_ = object; + } + Object* allocation_sites_list() { return allocation_sites_list_; } + + // Used in CreateAllocationSiteStub and the (de)serializer. + Object** allocation_sites_list_address() { return &allocation_sites_list_; } + + Object* weak_object_to_code_table() { return weak_object_to_code_table_; } + + void set_encountered_weak_collections(Object* weak_collection) { + encountered_weak_collections_ = weak_collection; + } + Object* encountered_weak_collections() const { + return encountered_weak_collections_; + } + + // Number of mark-sweeps. + unsigned int ms_count() { return ms_count_; } + + // Iterates over all roots in the heap. + void IterateRoots(ObjectVisitor* v, VisitMode mode); + // Iterates over all strong roots in the heap. + void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); + // Iterates over entries in the smi roots list. Only interesting to the + // serializer/deserializer, since GC does not care about smis. + void IterateSmiRoots(ObjectVisitor* v); + // Iterates over all the other roots in the heap. + void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); + + // Iterate pointers to from semispace of new space found in memory interval + // from start to end. + void IterateAndMarkPointersToFromSpace(Address start, Address end, + ObjectSlotCallback callback); + + // Returns whether the object resides in new space. + inline bool InNewSpace(Object* object); + inline bool InNewSpace(Address address); + inline bool InNewSpacePage(Address address); + inline bool InFromSpace(Object* object); + inline bool InToSpace(Object* object); + + // Returns whether the object resides in old pointer space. + inline bool InOldPointerSpace(Address address); + inline bool InOldPointerSpace(Object* object); + + // Returns whether the object resides in old data space. + inline bool InOldDataSpace(Address address); + inline bool InOldDataSpace(Object* object); + + // Checks whether an address/object in the heap (including auxiliary + // area and unused area). + bool Contains(Address addr); + bool Contains(HeapObject* value); + + // Checks whether an address/object in a space. + // Currently used by tests, serialization and heap verification only. + bool InSpace(Address addr, AllocationSpace space); + bool InSpace(HeapObject* value, AllocationSpace space); + + // Finds out which space an object should get promoted to based on its type. + inline OldSpace* TargetSpace(HeapObject* object); + static inline AllocationSpace TargetSpaceId(InstanceType type); + + // Checks whether the given object is allowed to be migrated from it's + // current space into the given destination space. Used for debugging. + inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); + + // Sets the stub_cache_ (only used when expanding the dictionary). + void public_set_code_stubs(UnseededNumberDictionary* value) { + roots_[kCodeStubsRootIndex] = value; + } + + // Support for computing object sizes for old objects during GCs. Returns + // a function that is guaranteed to be safe for computing object sizes in + // the current GC phase. + HeapObjectCallback GcSafeSizeOfOldObjectFunction() { + return gc_safe_size_of_old_object_; + } + + // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). + void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) { + roots_[kNonMonomorphicCacheRootIndex] = value; + } + + void public_set_empty_script(Script* script) { + roots_[kEmptyScriptRootIndex] = script; + } + + void public_set_store_buffer_top(Address* top) { + roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top); + } + + void public_set_materialized_objects(FixedArray* objects) { + roots_[kMaterializedObjectsRootIndex] = objects; + } + + // Generated code can embed this address to get access to the roots. + Object** roots_array_start() { return roots_; } + + Address* store_buffer_top_address() { + return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); + } + +#ifdef VERIFY_HEAP + // Verify the heap is in its normal state before or after a GC. + void Verify(); + + + bool weak_embedded_objects_verification_enabled() { + return no_weak_object_verification_scope_depth_ == 0; + } +#endif + +#ifdef DEBUG + void Print(); + void PrintHandles(); + + void OldPointerSpaceCheckStoreBuffer(); + void MapSpaceCheckStoreBuffer(); + void LargeObjectSpaceCheckStoreBuffer(); + + // Report heap statistics. + void ReportHeapStatistics(const char* title); + void ReportCodeStatistics(const char* title); +#endif + + // Zapping is needed for verify heap, and always done in debug builds. + static inline bool ShouldZapGarbage() { +#ifdef DEBUG + return true; +#else +#ifdef VERIFY_HEAP + return FLAG_verify_heap; +#else + return false; +#endif +#endif + } + + // Number of "runtime allocations" done so far. + uint32_t allocations_count() { return allocations_count_; } + + // Returns deterministic "time" value in ms. Works only with + // FLAG_verify_predictable. + double synthetic_time() { return allocations_count_ / 100.0; } + + // Print short heap statistics. + void PrintShortHeapStatistics(); + + // Write barrier support for address[offset] = o. + INLINE(void RecordWrite(Address address, int offset)); + + // Write barrier support for address[start : start + len[ = o. + INLINE(void RecordWrites(Address address, int start, int len)); + + enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; + inline HeapState gc_state() { return gc_state_; } + + inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } + +#ifdef DEBUG + void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } + + void TracePathToObjectFrom(Object* target, Object* root); + void TracePathToObject(Object* target); + void TracePathToGlobal(); +#endif + + // Callback function passed to Heap::Iterate etc. Copies an object if + // necessary, the object might be promoted to an old space. The caller must + // ensure the precondition that the object is (a) a heap object and (b) in + // the heap's from space. + static inline void ScavengePointer(HeapObject** p); + static inline void ScavengeObject(HeapObject** p, HeapObject* object); + + enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; + + // If an object has an AllocationMemento trailing it, return it, otherwise + // return NULL; + inline AllocationMemento* FindAllocationMemento(HeapObject* object); + + // An object may have an AllocationSite associated with it through a trailing + // AllocationMemento. Its feedback should be updated when objects are found + // in the heap. + static inline void UpdateAllocationSiteFeedback(HeapObject* object, + ScratchpadSlotMode mode); + + // Support for partial snapshots. After calling this we have a linear + // space to write objects in each space. + void ReserveSpace(int* sizes, Address* addresses); + + // + // Support for the API. + // + + void CreateApiObjects(); + + inline intptr_t PromotedTotalSize() { + int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); + if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt); + if (total < 0) return 0; + return static_cast<intptr_t>(total); + } + + inline intptr_t OldGenerationSpaceAvailable() { + return old_generation_allocation_limit_ - PromotedTotalSize(); + } + + inline intptr_t OldGenerationCapacityAvailable() { + return max_old_generation_size_ - PromotedTotalSize(); + } + + static const intptr_t kMinimumOldGenerationAllocationLimit = + 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); + + static const int kPointerMultiplier = i::kPointerSize / 4; + + // The new space size has to be a power of 2. Sizes are in MB. + static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; + static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; + static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; + static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; + + // The old space size has to be a multiple of Page::kPageSize. + // Sizes are in MB. + static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; + static const int kMaxOldSpaceSizeMediumMemoryDevice = + 256 * kPointerMultiplier; + static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; + static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; + + // The executable size has to be a multiple of Page::kPageSize. + // Sizes are in MB. + static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; + static const int kMaxExecutableSizeMediumMemoryDevice = + 192 * kPointerMultiplier; + static const int kMaxExecutableSizeHighMemoryDevice = + 256 * kPointerMultiplier; + static const int kMaxExecutableSizeHugeMemoryDevice = + 256 * kPointerMultiplier; + + intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size, + int freed_global_handles); + + // Indicates whether inline bump-pointer allocation has been disabled. + bool inline_allocation_disabled() { return inline_allocation_disabled_; } + + // Switch whether inline bump-pointer allocation should be used. + void EnableInlineAllocation(); + void DisableInlineAllocation(); + + // Implements the corresponding V8 API function. + bool IdleNotification(int hint); + + // Declare all the root indices. This defines the root list order. + enum RootListIndex { +#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, + STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) +#undef ROOT_INDEX_DECLARATION + +#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, + INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) +#undef STRING_DECLARATION + +// Utility type maps +#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, + STRUCT_LIST(DECLARE_STRUCT_MAP) +#undef DECLARE_STRUCT_MAP + kStringTableRootIndex, + +#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, + SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) +#undef ROOT_INDEX_DECLARATION + kRootListLength, + kStrongRootListLength = kStringTableRootIndex, + kSmiRootsStart = kStringTableRootIndex + 1 + }; + + STATIC_ASSERT(kUndefinedValueRootIndex == + Internals::kUndefinedValueRootIndex); + STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); + STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); + STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); + STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); + + // Generated code can embed direct references to non-writable roots if + // they are in new space. + static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); + // Generated code can treat direct references to this root as constant. + bool RootCanBeTreatedAsConstant(RootListIndex root_index); + + Map* MapForFixedTypedArray(ExternalArrayType array_type); + RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type); + + Map* MapForExternalArrayType(ExternalArrayType array_type); + RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type); + + RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind); + RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); + ExternalArray* EmptyExternalArrayForMap(Map* map); + FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); + + void RecordStats(HeapStats* stats, bool take_snapshot = false); + + // Copy block of memory from src to dst. Size of block should be aligned + // by pointer size. + static inline void CopyBlock(Address dst, Address src, int byte_size); + + // Optimized version of memmove for blocks with pointer size aligned sizes and + // pointer size aligned addresses. + static inline void MoveBlock(Address dst, Address src, int byte_size); + + // Check new space expansion criteria and expand semispaces if it was hit. + void CheckNewSpaceExpansionCriteria(); + + inline void IncrementPromotedObjectsSize(int object_size) { + DCHECK(object_size > 0); + promoted_objects_size_ += object_size; + } + + inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { + DCHECK(object_size > 0); + semi_space_copied_object_size_ += object_size; + } + + inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } + + inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } + + inline void IncrementNodesPromoted() { nodes_promoted_++; } + + inline void IncrementYoungSurvivorsCounter(int survived) { + DCHECK(survived >= 0); + survived_since_last_expansion_ += survived; + } + + inline bool NextGCIsLikelyToBeFull() { + if (FLAG_gc_global) return true; + + if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; + + intptr_t adjusted_allocation_limit = + old_generation_allocation_limit_ - new_space_.Capacity(); + + if (PromotedTotalSize() >= adjusted_allocation_limit) return true; + + return false; + } + + void UpdateNewSpaceReferencesInExternalStringTable( + ExternalStringTableUpdaterCallback updater_func); + + void UpdateReferencesInExternalStringTable( + ExternalStringTableUpdaterCallback updater_func); + + void ProcessWeakReferences(WeakObjectRetainer* retainer); + + void VisitExternalResources(v8::ExternalResourceVisitor* visitor); + + // An object should be promoted if the object has survived a + // scavenge operation. + inline bool ShouldBePromoted(Address old_address, int object_size); + + void ClearJSFunctionResultCaches(); + + void ClearNormalizedMapCaches(); + + GCTracer* tracer() { return &tracer_; } + + // Returns the size of objects residing in non new spaces. + intptr_t PromotedSpaceSizeOfObjects(); + + double total_regexp_code_generated() { return total_regexp_code_generated_; } + void IncreaseTotalRegexpCodeGenerated(int size) { + total_regexp_code_generated_ += size; + } + + void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) { + if (is_crankshafted) { + crankshaft_codegen_bytes_generated_ += size; + } else { + full_codegen_bytes_generated_ += size; + } + } + + // Update GC statistics that are tracked on the Heap. + void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator, + double marking_time); + + // Returns maximum GC pause. + double get_max_gc_pause() { return max_gc_pause_; } + + // Returns maximum size of objects alive after GC. + intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } + + // Returns minimal interval between two subsequent collections. + double get_min_in_mutator() { return min_in_mutator_; } + + MarkCompactCollector* mark_compact_collector() { + return &mark_compact_collector_; + } + + StoreBuffer* store_buffer() { return &store_buffer_; } + + Marking* marking() { return &marking_; } + + IncrementalMarking* incremental_marking() { return &incremental_marking_; } + + ExternalStringTable* external_string_table() { + return &external_string_table_; + } + + // Returns the current sweep generation. + int sweep_generation() { return sweep_generation_; } + + inline Isolate* isolate(); + + void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); + void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); + + inline bool OldGenerationAllocationLimitReached(); + + inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) { + scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); + } + + void QueueMemoryChunkForFree(MemoryChunk* chunk); + void FreeQueuedChunks(); + + int gc_count() const { return gc_count_; } + + // Completely clear the Instanceof cache (to stop it keeping objects alive + // around a GC). + inline void CompletelyClearInstanceofCache(); + + // The roots that have an index less than this are always in old space. + static const int kOldSpaceRoots = 0x20; + + uint32_t HashSeed() { + uint32_t seed = static_cast<uint32_t>(hash_seed()->value()); + DCHECK(FLAG_randomize_hashes || seed == 0); + return seed; + } + + void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { + DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0)); + set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); + } + + void SetConstructStubDeoptPCOffset(int pc_offset) { + DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0)); + set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); + } + + void SetGetterStubDeoptPCOffset(int pc_offset) { + DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0)); + set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); + } + + void SetSetterStubDeoptPCOffset(int pc_offset) { + DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0)); + set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); + } + + // For post mortem debugging. + void RememberUnmappedPage(Address page, bool compacted); + + // Global inline caching age: it is incremented on some GCs after context + // disposal. We use it to flush inline caches. + int global_ic_age() { return global_ic_age_; } + + void AgeInlineCaches() { + global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax; + } + + bool flush_monomorphic_ics() { return flush_monomorphic_ics_; } + + int64_t amount_of_external_allocated_memory() { + return amount_of_external_allocated_memory_; + } + + void DeoptMarkedAllocationSites(); + + bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; } + + bool DeoptMaybeTenuredAllocationSites() { + return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; + } + + // ObjectStats are kept in two arrays, counts and sizes. Related stats are + // stored in a contiguous linear buffer. Stats groups are stored one after + // another. + enum { + FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, + FIRST_FIXED_ARRAY_SUB_TYPE = + FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, + FIRST_CODE_AGE_SUB_TYPE = + FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, + OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 + }; + + void RecordObjectStats(InstanceType type, size_t size) { + DCHECK(type <= LAST_TYPE); + object_counts_[type]++; + object_sizes_[type] += size; + } + + void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { + int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; + int code_age_index = + FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; + DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE && + code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE); + DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE && + code_age_index < OBJECT_STATS_COUNT); + object_counts_[code_sub_type_index]++; + object_sizes_[code_sub_type_index] += size; + object_counts_[code_age_index]++; + object_sizes_[code_age_index] += size; + } + + void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) { + DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE); + object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++; + object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size; + } + + void CheckpointObjectStats(); + + // We don't use a LockGuard here since we want to lock the heap + // only when FLAG_concurrent_recompilation is true. + class RelocationLock { + public: + explicit RelocationLock(Heap* heap) : heap_(heap) { + heap_->relocation_mutex_.Lock(); + } + + + ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } + + private: + Heap* heap_; + }; + + void AddWeakObjectToCodeDependency(Handle<Object> obj, + Handle<DependentCode> dep); + + DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj); + + void InitializeWeakObjectToCodeTable() { + set_weak_object_to_code_table(undefined_value()); + } + + void EnsureWeakObjectToCodeTable(); + + static void FatalProcessOutOfMemory(const char* location, + bool take_snapshot = false); + + // This event is triggered after successful allocation of a new object made + // by runtime. Allocations of target space for object evacuation do not + // trigger the event. In order to track ALL allocations one must turn off + // FLAG_inline_new and FLAG_use_allocation_folding. + inline void OnAllocationEvent(HeapObject* object, int size_in_bytes); + + // This event is triggered after object is moved to a new place. + inline void OnMoveEvent(HeapObject* target, HeapObject* source, + int size_in_bytes); + + protected: + // Methods made available to tests. + + // Allocates a JS Map in the heap. + MUST_USE_RESULT AllocationResult + AllocateMap(InstanceType instance_type, int instance_size, + ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); + + // Allocates and initializes a new JavaScript object based on a + // constructor. + // If allocation_site is non-null, then a memento is emitted after the object + // that points to the site. + MUST_USE_RESULT AllocationResult + AllocateJSObject(JSFunction* constructor, + PretenureFlag pretenure = NOT_TENURED, + AllocationSite* allocation_site = NULL); + + // Allocates and initializes a new JavaScript object based on a map. + // Passing an allocation site means that a memento will be created that + // points to the site. + MUST_USE_RESULT AllocationResult + AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, + bool alloc_props = true, + AllocationSite* allocation_site = NULL); + + // Allocated a HeapNumber from value. + MUST_USE_RESULT AllocationResult + AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, + PretenureFlag pretenure = NOT_TENURED); + + // Allocate a byte array of the specified length + MUST_USE_RESULT AllocationResult + AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); + + // Copy the code and scope info part of the code object, but insert + // the provided data as the relocation information. + MUST_USE_RESULT AllocationResult + CopyCode(Code* code, Vector<byte> reloc_info); + + MUST_USE_RESULT AllocationResult CopyCode(Code* code); + + // Allocates a fixed array initialized with undefined values + MUST_USE_RESULT AllocationResult + AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); + + private: + Heap(); + + // The amount of external memory registered through the API kept alive + // by global handles + int64_t amount_of_external_allocated_memory_; + + // Caches the amount of external memory registered at the last global gc. + int64_t amount_of_external_allocated_memory_at_last_global_gc_; + + // This can be calculated directly from a pointer to the heap; however, it is + // more expedient to get at the isolate directly from within Heap methods. + Isolate* isolate_; + + Object* roots_[kRootListLength]; + + size_t code_range_size_; + int reserved_semispace_size_; + int max_semi_space_size_; + int initial_semispace_size_; + intptr_t max_old_generation_size_; + intptr_t max_executable_size_; + intptr_t maximum_committed_; + + // For keeping track of how much data has survived + // scavenge since last new space expansion. + int survived_since_last_expansion_; + + // For keeping track on when to flush RegExp code. + int sweep_generation_; + + int always_allocate_scope_depth_; + + // For keeping track of context disposals. + int contexts_disposed_; + + int global_ic_age_; + + bool flush_monomorphic_ics_; + + int scan_on_scavenge_pages_; + + NewSpace new_space_; + OldSpace* old_pointer_space_; + OldSpace* old_data_space_; + OldSpace* code_space_; + MapSpace* map_space_; + CellSpace* cell_space_; + PropertyCellSpace* property_cell_space_; + LargeObjectSpace* lo_space_; + HeapState gc_state_; + int gc_post_processing_depth_; + Address new_space_top_after_last_gc_; + + // Returns the amount of external memory registered since last global gc. + int64_t PromotedExternalMemorySize(); + + // How many "runtime allocations" happened. + uint32_t allocations_count_; + + // Running hash over allocations performed. + uint32_t raw_allocations_hash_; + + // Countdown counter, dumps allocation hash when 0. + uint32_t dump_allocations_hash_countdown_; + + // How many mark-sweep collections happened. + unsigned int ms_count_; + + // How many gc happened. + unsigned int gc_count_; + + // For post mortem debugging. + static const int kRememberedUnmappedPages = 128; + int remembered_unmapped_pages_index_; + Address remembered_unmapped_pages_[kRememberedUnmappedPages]; + + // Total length of the strings we failed to flatten since the last GC. + int unflattened_strings_length_; + +#define ROOT_ACCESSOR(type, name, camel_name) \ + inline void set_##name(type* value) { \ + /* The deserializer makes use of the fact that these common roots are */ \ + /* never in new space and never on a page that is being compacted. */ \ + DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \ + roots_[k##camel_name##RootIndex] = value; \ + } + ROOT_LIST(ROOT_ACCESSOR) +#undef ROOT_ACCESSOR + +#ifdef DEBUG + // If the --gc-interval flag is set to a positive value, this + // variable holds the value indicating the number of allocations + // remain until the next failure and garbage collection. + int allocation_timeout_; +#endif // DEBUG + + // Limit that triggers a global GC on the next (normally caused) GC. This + // is checked when we have already decided to do a GC to help determine + // which collector to invoke, before expanding a paged space in the old + // generation and on every allocation in large object space. + intptr_t old_generation_allocation_limit_; + + // Indicates that an allocation has failed in the old generation since the + // last GC. + bool old_gen_exhausted_; + + // Indicates that inline bump-pointer allocation has been globally disabled + // for all spaces. This is used to disable allocations in generated code. + bool inline_allocation_disabled_; + + // Weak list heads, threaded through the objects. + // List heads are initilized lazily and contain the undefined_value at start. + Object* native_contexts_list_; + Object* array_buffers_list_; + Object* allocation_sites_list_; + + // WeakHashTable that maps objects embedded in optimized code to dependent + // code list. It is initilized lazily and contains the undefined_value at + // start. + Object* weak_object_to_code_table_; + + // List of encountered weak collections (JSWeakMap and JSWeakSet) during + // marking. It is initialized during marking, destroyed after marking and + // contains Smi(0) while marking is not active. + Object* encountered_weak_collections_; + + StoreBufferRebuilder store_buffer_rebuilder_; + + struct StringTypeTable { + InstanceType type; + int size; + RootListIndex index; + }; + + struct ConstantStringTable { + const char* contents; + RootListIndex index; + }; + + struct StructTable { + InstanceType type; + int size; + RootListIndex index; + }; + + static const StringTypeTable string_type_table[]; + static const ConstantStringTable constant_string_table[]; + static const StructTable struct_table[]; + + // The special hidden string which is an empty string, but does not match + // any string when looked up in properties. + String* hidden_string_; + + // GC callback function, called before and after mark-compact GC. + // Allocations in the callback function are disallowed. + struct GCPrologueCallbackPair { + GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback, + GCType gc_type, bool pass_isolate) + : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {} + bool operator==(const GCPrologueCallbackPair& pair) const { + return pair.callback == callback; + } + v8::Isolate::GCPrologueCallback callback; + GCType gc_type; + // TODO(dcarney): remove variable + bool pass_isolate_; + }; + List<GCPrologueCallbackPair> gc_prologue_callbacks_; + + struct GCEpilogueCallbackPair { + GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback, + GCType gc_type, bool pass_isolate) + : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {} + bool operator==(const GCEpilogueCallbackPair& pair) const { + return pair.callback == callback; + } + v8::Isolate::GCPrologueCallback callback; + GCType gc_type; + // TODO(dcarney): remove variable + bool pass_isolate_; + }; + List<GCEpilogueCallbackPair> gc_epilogue_callbacks_; + + // Support for computing object sizes during GC. + HeapObjectCallback gc_safe_size_of_old_object_; + static int GcSafeSizeOfOldObject(HeapObject* object); + + // Update the GC state. Called from the mark-compact collector. + void MarkMapPointersAsEncoded(bool encoded) { + DCHECK(!encoded); + gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; + } + + // Code that should be run before and after each GC. Includes some + // reporting/verification activities when compiled with DEBUG set. + void GarbageCollectionPrologue(); + void GarbageCollectionEpilogue(); + + // Pretenuring decisions are made based on feedback collected during new + // space evacuation. Note that between feedback collection and calling this + // method object in old space must not move. + // Right now we only process pretenuring feedback in high promotion mode. + void ProcessPretenuringFeedback(); + + // Checks whether a global GC is necessary + GarbageCollector SelectGarbageCollector(AllocationSpace space, + const char** reason); + + // Make sure there is a filler value behind the top of the new space + // so that the GC does not confuse some unintialized/stale memory + // with the allocation memento of the object at the top + void EnsureFillerObjectAtTop(); + + // Ensure that we have swept all spaces in such a way that we can iterate + // over all objects. May cause a GC. + void MakeHeapIterable(); + + // Performs garbage collection operation. + // Returns whether there is a chance that another major GC could + // collect more garbage. + bool CollectGarbage( + GarbageCollector collector, const char* gc_reason, + const char* collector_reason, + const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); + + // Performs garbage collection + // Returns whether there is a chance another major GC could + // collect more garbage. + bool PerformGarbageCollection( + GarbageCollector collector, + const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); + + inline void UpdateOldSpaceLimits(); + + // Selects the proper allocation space depending on the given object + // size, pretenuring decision, and preferred old-space. + static AllocationSpace SelectSpace(int object_size, + AllocationSpace preferred_old_space, + PretenureFlag pretenure) { + DCHECK(preferred_old_space == OLD_POINTER_SPACE || + preferred_old_space == OLD_DATA_SPACE); + if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; + return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE; + } + + // Allocate an uninitialized object. The memory is non-executable if the + // hardware and OS allow. This is the single choke-point for allocations + // performed by the runtime and should not be bypassed (to extend this to + // inlined allocations, use the Heap::DisableInlineAllocation() support). + MUST_USE_RESULT inline AllocationResult AllocateRaw( + int size_in_bytes, AllocationSpace space, AllocationSpace retry_space); + + // Allocates a heap object based on the map. + MUST_USE_RESULT AllocationResult + Allocate(Map* map, AllocationSpace space, + AllocationSite* allocation_site = NULL); + + // Allocates a partial map for bootstrapping. + MUST_USE_RESULT AllocationResult + AllocatePartialMap(InstanceType instance_type, int instance_size); + + // Initializes a JSObject based on its map. + void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, + Map* map); + void InitializeAllocationMemento(AllocationMemento* memento, + AllocationSite* allocation_site); + + // Allocate a block of memory in the given space (filled with a filler). + // Used as a fall-back for generated code when the space is full. + MUST_USE_RESULT AllocationResult + AllocateFillerObject(int size, bool double_align, AllocationSpace space); + + // Allocate an uninitialized fixed array. + MUST_USE_RESULT AllocationResult + AllocateRawFixedArray(int length, PretenureFlag pretenure); + + // Allocate an uninitialized fixed double array. + MUST_USE_RESULT AllocationResult + AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure); + + // Allocate an initialized fixed array with the given filler value. + MUST_USE_RESULT AllocationResult + AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure, + Object* filler); + + // Allocate and partially initializes a String. There are two String + // encodings: ASCII and two byte. These functions allocate a string of the + // given length and set its map and length fields. The characters of the + // string are uninitialized. + MUST_USE_RESULT AllocationResult + AllocateRawOneByteString(int length, PretenureFlag pretenure); + MUST_USE_RESULT AllocationResult + AllocateRawTwoByteString(int length, PretenureFlag pretenure); + + bool CreateInitialMaps(); + void CreateInitialObjects(); + + // Allocates an internalized string in old space based on the character + // stream. + MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( + Vector<const char> str, int chars, uint32_t hash_field); + + MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString( + Vector<const uint8_t> str, uint32_t hash_field); + + MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString( + Vector<const uc16> str, uint32_t hash_field); + + template <bool is_one_byte, typename T> + MUST_USE_RESULT AllocationResult + AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field); + + template <typename T> + MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl( + T t, int chars, uint32_t hash_field); + + // Allocates an uninitialized fixed array. It must be filled by the caller. + MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length); + + // Make a copy of src and return it. Returns + // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. + MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src); + + // Make a copy of src, set the map, and return the copy. Returns + // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. + MUST_USE_RESULT AllocationResult + CopyFixedArrayWithMap(FixedArray* src, Map* map); + + // Make a copy of src and return it. Returns + // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. + MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray( + FixedDoubleArray* src); + + // Make a copy of src and return it. Returns + // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. + MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray( + ConstantPoolArray* src); + + + // Computes a single character string where the character has code. + // A cache is used for ASCII codes. + MUST_USE_RESULT AllocationResult + LookupSingleCharacterStringFromCode(uint16_t code); + + // Allocate a symbol in old space. + MUST_USE_RESULT AllocationResult AllocateSymbol(); + + // Make a copy of src, set the map, and return the copy. + MUST_USE_RESULT AllocationResult + CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map); + + MUST_USE_RESULT AllocationResult AllocateConstantPoolArray( + const ConstantPoolArray::NumberOfEntries& small); + + MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray( + const ConstantPoolArray::NumberOfEntries& small, + const ConstantPoolArray::NumberOfEntries& extended); + + // Allocates an external array of the specified length and type. + MUST_USE_RESULT AllocationResult + AllocateExternalArray(int length, ExternalArrayType array_type, + void* external_pointer, PretenureFlag pretenure); + + // Allocates a fixed typed array of the specified length and type. + MUST_USE_RESULT AllocationResult + AllocateFixedTypedArray(int length, ExternalArrayType array_type, + PretenureFlag pretenure); + + // Make a copy of src and return it. + MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src); + + // Make a copy of src, set the map, and return the copy. + MUST_USE_RESULT AllocationResult + CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map); + + // Allocates a fixed double array with uninitialized values. Returns + MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( + int length, PretenureFlag pretenure = NOT_TENURED); + + // These five Create*EntryStub functions are here and forced to not be inlined + // because of a gcc-4.4 bug that assigns wrong vtable entries. + NO_INLINE(void CreateJSEntryStub()); + NO_INLINE(void CreateJSConstructEntryStub()); + + void CreateFixedStubs(); + + // Allocate empty fixed array. + MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); + + // Allocate empty external array of given type. + MUST_USE_RESULT AllocationResult + AllocateEmptyExternalArray(ExternalArrayType array_type); + + // Allocate empty fixed typed array of given type. + MUST_USE_RESULT AllocationResult + AllocateEmptyFixedTypedArray(ExternalArrayType array_type); + + // Allocate empty constant pool array. + MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray(); + + // Allocate a tenured simple cell. + MUST_USE_RESULT AllocationResult AllocateCell(Object* value); + + // Allocate a tenured JS global property cell initialized with the hole. + MUST_USE_RESULT AllocationResult AllocatePropertyCell(); + + // Allocates a new utility object in the old generation. + MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type); + + // Allocates a new foreign object. + MUST_USE_RESULT AllocationResult + AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED); + + MUST_USE_RESULT AllocationResult + AllocateCode(int object_size, bool immovable); + + MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key); + + MUST_USE_RESULT AllocationResult InternalizeString(String* str); + + // Performs a minor collection in new generation. + void Scavenge(); + + // Commits from space if it is uncommitted. + void EnsureFromSpaceIsCommitted(); + + // Uncommit unused semi space. + bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } + + // Fill in bogus values in from space + void ZapFromSpace(); + + static String* UpdateNewSpaceReferenceInExternalStringTableEntry( + Heap* heap, Object** pointer); + + Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); + static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, + StoreBufferEvent event); + + // Performs a major collection in the whole heap. + void MarkCompact(); + + // Code to be run before and after mark-compact. + void MarkCompactPrologue(); + + void ProcessNativeContexts(WeakObjectRetainer* retainer); + void ProcessArrayBuffers(WeakObjectRetainer* retainer); + void ProcessAllocationSites(WeakObjectRetainer* retainer); + + // Deopts all code that contains allocation instruction which are tenured or + // not tenured. Moreover it clears the pretenuring allocation site statistics. + void ResetAllAllocationSitesDependentCode(PretenureFlag flag); + + // Evaluates local pretenuring for the old space and calls + // ResetAllTenuredAllocationSitesDependentCode if too many objects died in + // the old space. + void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); + + // Called on heap tear-down. + void TearDownArrayBuffers(); + + // Record statistics before and after garbage collection. + void ReportStatisticsBeforeGC(); + void ReportStatisticsAfterGC(); + + // Slow part of scavenge object. + static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); + + // Total RegExp code ever generated + double total_regexp_code_generated_; + + GCTracer tracer_; + + // Creates and installs the full-sized number string cache. + int FullSizeNumberStringCacheLength(); + // Flush the number to string cache. + void FlushNumberStringCache(); + + // Sets used allocation sites entries to undefined. + void FlushAllocationSitesScratchpad(); + + // Initializes the allocation sites scratchpad with undefined values. + void InitializeAllocationSitesScratchpad(); + + // Adds an allocation site to the scratchpad if there is space left. + void AddAllocationSiteToScratchpad(AllocationSite* site, + ScratchpadSlotMode mode); + + void UpdateSurvivalStatistics(int start_new_space_size); + + static const int kYoungSurvivalRateHighThreshold = 90; + static const int kYoungSurvivalRateAllowedDeviation = 15; + + static const int kOldSurvivalRateLowThreshold = 10; + + int high_survival_rate_period_length_; + intptr_t promoted_objects_size_; + double promotion_rate_; + intptr_t semi_space_copied_object_size_; + double semi_space_copied_rate_; + int nodes_died_in_new_space_; + int nodes_copied_in_new_space_; + int nodes_promoted_; + + // This is the pretenuring trigger for allocation sites that are in maybe + // tenure state. When we switched to the maximum new space size we deoptimize + // the code that belongs to the allocation site and derive the lifetime + // of the allocation site. + unsigned int maximum_size_scavenges_; + + // TODO(hpayer): Allocation site pretenuring may make this method obsolete. + // Re-visit incremental marking heuristics. + bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } + + void SelectScavengingVisitorsTable(); + + void StartIdleRound() { mark_sweeps_since_idle_round_started_ = 0; } + + void FinishIdleRound() { + mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound; + scavenges_since_last_idle_round_ = 0; + } + + bool EnoughGarbageSinceLastIdleRound() { + return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold); + } + + // Estimates how many milliseconds a Mark-Sweep would take to complete. + // In idle notification handler we assume that this function will return: + // - a number less than 10 for small heaps, which are less than 8Mb. + // - a number greater than 10 for large heaps, which are greater than 32Mb. + int TimeMarkSweepWouldTakeInMs() { + // Rough estimate of how many megabytes of heap can be processed in 1 ms. + static const int kMbPerMs = 2; + + int heap_size_mb = static_cast<int>(SizeOfObjects() / MB); + return heap_size_mb / kMbPerMs; + } + + void AdvanceIdleIncrementalMarking(intptr_t step_size); + + void ClearObjectStats(bool clear_last_time_stats = false); + + void set_weak_object_to_code_table(Object* value) { + DCHECK(!InNewSpace(value)); + weak_object_to_code_table_ = value; + } + + Object** weak_object_to_code_table_address() { + return &weak_object_to_code_table_; + } + + inline void UpdateAllocationsHash(HeapObject* object); + inline void UpdateAllocationsHash(uint32_t value); + inline void PrintAlloctionsHash(); + + static const int kInitialStringTableSize = 2048; + static const int kInitialEvalCacheSize = 64; + static const int kInitialNumberStringCacheSize = 256; + + // Object counts and used memory by InstanceType + size_t object_counts_[OBJECT_STATS_COUNT]; + size_t object_counts_last_time_[OBJECT_STATS_COUNT]; + size_t object_sizes_[OBJECT_STATS_COUNT]; + size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; + + // Maximum GC pause. + double max_gc_pause_; + + // Total time spent in GC. + double total_gc_time_ms_; + + // Maximum size of objects alive after GC. + intptr_t max_alive_after_gc_; + + // Minimal interval between two subsequent collections. + double min_in_mutator_; + + // Cumulative GC time spent in marking + double marking_time_; + + // Cumulative GC time spent in sweeping + double sweeping_time_; + + MarkCompactCollector mark_compact_collector_; + + StoreBuffer store_buffer_; + + Marking marking_; + + IncrementalMarking incremental_marking_; + + int number_idle_notifications_; + unsigned int last_idle_notification_gc_count_; + bool last_idle_notification_gc_count_init_; + + int mark_sweeps_since_idle_round_started_; + unsigned int gc_count_at_last_idle_gc_; + int scavenges_since_last_idle_round_; + + // These two counters are monotomically increasing and never reset. + size_t full_codegen_bytes_generated_; + size_t crankshaft_codegen_bytes_generated_; + + // If the --deopt_every_n_garbage_collections flag is set to a positive value, + // this variable holds the number of garbage collections since the last + // deoptimization triggered by garbage collection. + int gcs_since_last_deopt_; + +#ifdef VERIFY_HEAP + int no_weak_object_verification_scope_depth_; +#endif + + static const int kAllocationSiteScratchpadSize = 256; + int allocation_sites_scratchpad_length_; + + static const int kMaxMarkSweepsInIdleRound = 7; + static const int kIdleScavengeThreshold = 5; + + // Shared state read by the scavenge collector and set by ScavengeObject. + PromotionQueue promotion_queue_; + + // Flag is set when the heap has been configured. The heap can be repeatedly + // configured through the API until it is set up. + bool configured_; + + ExternalStringTable external_string_table_; + + VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; + + MemoryChunk* chunks_queued_for_free_; + + base::Mutex relocation_mutex_; + + int gc_callbacks_depth_; + + friend class AlwaysAllocateScope; + friend class Factory; + friend class GCCallbacksScope; + friend class GCTracer; + friend class HeapIterator; + friend class Isolate; + friend class MarkCompactCollector; + friend class MarkCompactMarkingVisitor; + friend class MapCompact; +#ifdef VERIFY_HEAP + friend class NoWeakObjectVerificationScope; +#endif + friend class Page; + + DISALLOW_COPY_AND_ASSIGN(Heap); +}; + + +class HeapStats { + public: + static const int kStartMarker = 0xDECADE00; + static const int kEndMarker = 0xDECADE01; + + int* start_marker; // 0 + int* new_space_size; // 1 + int* new_space_capacity; // 2 + intptr_t* old_pointer_space_size; // 3 + intptr_t* old_pointer_space_capacity; // 4 + intptr_t* old_data_space_size; // 5 + intptr_t* old_data_space_capacity; // 6 + intptr_t* code_space_size; // 7 + intptr_t* code_space_capacity; // 8 + intptr_t* map_space_size; // 9 + intptr_t* map_space_capacity; // 10 + intptr_t* cell_space_size; // 11 + intptr_t* cell_space_capacity; // 12 + intptr_t* lo_space_size; // 13 + int* global_handle_count; // 14 + int* weak_global_handle_count; // 15 + int* pending_global_handle_count; // 16 + int* near_death_global_handle_count; // 17 + int* free_global_handle_count; // 18 + intptr_t* memory_allocator_size; // 19 + intptr_t* memory_allocator_capacity; // 20 + int* objects_per_type; // 21 + int* size_per_type; // 22 + int* os_error; // 23 + int* end_marker; // 24 + intptr_t* property_cell_space_size; // 25 + intptr_t* property_cell_space_capacity; // 26 +}; + + +class AlwaysAllocateScope { + public: + explicit inline AlwaysAllocateScope(Isolate* isolate); + inline ~AlwaysAllocateScope(); + + private: + // Implicitly disable artificial allocation failures. + Heap* heap_; + DisallowAllocationFailure daf_; +}; + + +#ifdef VERIFY_HEAP +class NoWeakObjectVerificationScope { + public: + inline NoWeakObjectVerificationScope(); + inline ~NoWeakObjectVerificationScope(); +}; +#endif + + +class GCCallbacksScope { + public: + explicit inline GCCallbacksScope(Heap* heap); + inline ~GCCallbacksScope(); + + inline bool CheckReenter(); + + private: + Heap* heap_; +}; + + +// Visitor class to verify interior pointers in spaces that do not contain +// or care about intergenerational references. All heap object pointers have to +// point into the heap to a location that has a map pointer at its first word. +// Caveat: Heap::Contains is an approximation because it can return true for +// objects in a heap space but above the allocation pointer. +class VerifyPointersVisitor : public ObjectVisitor { + public: + inline void VisitPointers(Object** start, Object** end); +}; + + +// Verify that all objects are Smis. +class VerifySmisVisitor : public ObjectVisitor { + public: + inline void VisitPointers(Object** start, Object** end); +}; + + +// Space iterator for iterating over all spaces of the heap. Returns each space +// in turn, and null when it is done. +class AllSpaces BASE_EMBEDDED { + public: + explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {} + Space* next(); + + private: + Heap* heap_; + int counter_; +}; + + +// Space iterator for iterating over all old spaces of the heap: Old pointer +// space, old data space and code space. Returns each space in turn, and null +// when it is done. +class OldSpaces BASE_EMBEDDED { + public: + explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} + OldSpace* next(); + + private: + Heap* heap_; + int counter_; +}; + + +// Space iterator for iterating over all the paged spaces of the heap: Map +// space, old pointer space, old data space, code space and cell space. Returns +// each space in turn, and null when it is done. +class PagedSpaces BASE_EMBEDDED { + public: + explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} + PagedSpace* next(); + + private: + Heap* heap_; + int counter_; +}; + + +// Space iterator for iterating over all spaces of the heap. +// For each space an object iterator is provided. The deallocation of the +// returned object iterators is handled by the space iterator. +class SpaceIterator : public Malloced { + public: + explicit SpaceIterator(Heap* heap); + SpaceIterator(Heap* heap, HeapObjectCallback size_func); + virtual ~SpaceIterator(); + + bool has_next(); + ObjectIterator* next(); + + private: + ObjectIterator* CreateIterator(); + + Heap* heap_; + int current_space_; // from enum AllocationSpace. + ObjectIterator* iterator_; // object iterator for the current space. + HeapObjectCallback size_func_; +}; + + +// A HeapIterator provides iteration over the whole heap. It +// aggregates the specific iterators for the different spaces as +// these can only iterate over one space only. +// +// HeapIterator ensures there is no allocation during its lifetime +// (using an embedded DisallowHeapAllocation instance). +// +// HeapIterator can skip free list nodes (that is, de-allocated heap +// objects that still remain in the heap). As implementation of free +// nodes filtering uses GC marks, it can't be used during MS/MC GC +// phases. Also, it is forbidden to interrupt iteration in this mode, +// as this will leave heap objects marked (and thus, unusable). +class HeapObjectsFilter; + +class HeapIterator BASE_EMBEDDED { + public: + enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable }; + + explicit HeapIterator(Heap* heap); + HeapIterator(Heap* heap, HeapObjectsFiltering filtering); + ~HeapIterator(); + + HeapObject* next(); + void reset(); + + private: + struct MakeHeapIterableHelper { + explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); } + }; + + // Perform the initialization. + void Init(); + // Perform all necessary shutdown (destruction) work. + void Shutdown(); + HeapObject* NextObject(); + + MakeHeapIterableHelper make_heap_iterable_helper_; + DisallowHeapAllocation no_heap_allocation_; + Heap* heap_; + HeapObjectsFiltering filtering_; + HeapObjectsFilter* filter_; + // Space iterator for iterating all the spaces. + SpaceIterator* space_iterator_; + // Object iterator for the space currently being iterated. + ObjectIterator* object_iterator_; +}; + + +// Cache for mapping (map, property name) into field offset. +// Cleared at startup and prior to mark sweep collection. +class KeyedLookupCache { + public: + // Lookup field offset for (map, name). If absent, -1 is returned. + int Lookup(Handle<Map> map, Handle<Name> name); + + // Update an element in the cache. + void Update(Handle<Map> map, Handle<Name> name, int field_offset); + + // Clear the cache. + void Clear(); + + static const int kLength = 256; + static const int kCapacityMask = kLength - 1; + static const int kMapHashShift = 5; + static const int kHashMask = -4; // Zero the last two bits. + static const int kEntriesPerBucket = 4; + static const int kEntryLength = 2; + static const int kMapIndex = 0; + static const int kKeyIndex = 1; + static const int kNotFound = -1; + + // kEntriesPerBucket should be a power of 2. + STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0); + STATIC_ASSERT(kEntriesPerBucket == -kHashMask); + + private: + KeyedLookupCache() { + for (int i = 0; i < kLength; ++i) { + keys_[i].map = NULL; + keys_[i].name = NULL; + field_offsets_[i] = kNotFound; + } + } + + static inline int Hash(Handle<Map> map, Handle<Name> name); + + // Get the address of the keys and field_offsets arrays. Used in + // generated code to perform cache lookups. + Address keys_address() { return reinterpret_cast<Address>(&keys_); } + + Address field_offsets_address() { + return reinterpret_cast<Address>(&field_offsets_); + } + + struct Key { + Map* map; + Name* name; + }; + + Key keys_[kLength]; + int field_offsets_[kLength]; + + friend class ExternalReference; + friend class Isolate; + DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache); +}; + + +// Cache for mapping (map, property name) into descriptor index. +// The cache contains both positive and negative results. +// Descriptor index equals kNotFound means the property is absent. +// Cleared at startup and prior to any gc. +class DescriptorLookupCache { + public: + // Lookup descriptor index for (map, name). + // If absent, kAbsent is returned. + int Lookup(Map* source, Name* name) { + if (!name->IsUniqueName()) return kAbsent; + int index = Hash(source, name); + Key& key = keys_[index]; + if ((key.source == source) && (key.name == name)) return results_[index]; + return kAbsent; + } + + // Update an element in the cache. + void Update(Map* source, Name* name, int result) { + DCHECK(result != kAbsent); + if (name->IsUniqueName()) { + int index = Hash(source, name); + Key& key = keys_[index]; + key.source = source; + key.name = name; + results_[index] = result; + } + } + + // Clear the cache. + void Clear(); + + static const int kAbsent = -2; + + private: + DescriptorLookupCache() { + for (int i = 0; i < kLength; ++i) { + keys_[i].source = NULL; + keys_[i].name = NULL; + results_[i] = kAbsent; + } + } + + static int Hash(Object* source, Name* name) { + // Uses only lower 32 bits if pointers are larger. + uint32_t source_hash = + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >> + kPointerSizeLog2; + uint32_t name_hash = + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> + kPointerSizeLog2; + return (source_hash ^ name_hash) % kLength; + } + + static const int kLength = 64; + struct Key { + Map* source; + Name* name; + }; + + Key keys_[kLength]; + int results_[kLength]; + + friend class Isolate; + DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache); +}; + + +class RegExpResultsCache { + public: + enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS }; + + // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi. + // On success, the returned result is guaranteed to be a COW-array. + static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern, + ResultsCacheType type); + // Attempt to add value_array to the cache specified by type. On success, + // value_array is turned into a COW-array. + static void Enter(Isolate* isolate, Handle<String> key_string, + Handle<Object> key_pattern, Handle<FixedArray> value_array, + ResultsCacheType type); + static void Clear(FixedArray* cache); + static const int kRegExpResultsCacheSize = 0x100; + + private: + static const int kArrayEntriesPerCacheEntry = 4; + static const int kStringOffset = 0; + static const int kPatternOffset = 1; + static const int kArrayOffset = 2; +}; + + +// Abstract base class for checking whether a weak object should be retained. +class WeakObjectRetainer { + public: + virtual ~WeakObjectRetainer() {} + + // Return whether this object should be retained. If NULL is returned the + // object has no references. Otherwise the address of the retained object + // should be returned as in some GC situations the object has been moved. + virtual Object* RetainAs(Object* object) = 0; +}; + + +// Intrusive object marking uses least significant bit of +// heap object's map word to mark objects. +// Normally all map words have least significant bit set +// because they contain tagged map pointer. +// If the bit is not set object is marked. +// All objects should be unmarked before resuming +// JavaScript execution. +class IntrusiveMarking { + public: + static bool IsMarked(HeapObject* object) { + return (object->map_word().ToRawValue() & kNotMarkedBit) == 0; + } + + static void ClearMark(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit)); + DCHECK(!IsMarked(object)); + } + + static void SetMark(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit)); + DCHECK(IsMarked(object)); + } + + static Map* MapOfMarkedObject(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap(); + } + + static int SizeOfMarkedObject(HeapObject* object) { + return object->SizeFromMap(MapOfMarkedObject(object)); + } + + private: + static const uintptr_t kNotMarkedBit = 0x1; + STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); // NOLINT +}; + + +#ifdef DEBUG +// Helper class for tracing paths to a search target Object from all roots. +// The TracePathFrom() method can be used to trace paths from a specific +// object to the search target object. +class PathTracer : public ObjectVisitor { + public: + enum WhatToFind { + FIND_ALL, // Will find all matches. + FIND_FIRST // Will stop the search after first match. + }; + + // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. + static const int kMarkTag = 2; + + // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop + // after the first match. If FIND_ALL is specified, then tracing will be + // done for all matches. + PathTracer(Object* search_target, WhatToFind what_to_find, + VisitMode visit_mode) + : search_target_(search_target), + found_target_(false), + found_target_in_trace_(false), + what_to_find_(what_to_find), + visit_mode_(visit_mode), + object_stack_(20), + no_allocation() {} + + virtual void VisitPointers(Object** start, Object** end); + + void Reset(); + void TracePathFrom(Object** root); + + bool found() const { return found_target_; } + + static Object* const kAnyGlobalObject; + + protected: + class MarkVisitor; + class UnmarkVisitor; + + void MarkRecursively(Object** p, MarkVisitor* mark_visitor); + void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor); + virtual void ProcessResults(); + + Object* search_target_; + bool found_target_; + bool found_target_in_trace_; + WhatToFind what_to_find_; + VisitMode visit_mode_; + List<Object*> object_stack_; + + DisallowHeapAllocation no_allocation; // i.e. no gc allowed. + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); +}; +#endif // DEBUG +} +} // namespace v8::internal + +#endif // V8_HEAP_HEAP_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/heap-inl.h nodejs-0.11.15/deps/v8/src/heap/heap-inl.h --- nodejs-0.11.13/deps/v8/src/heap/heap-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/heap-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,789 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_HEAP_INL_H_ +#define V8_HEAP_HEAP_INL_H_ + +#include <cmath> + +#include "src/base/platform/platform.h" +#include "src/cpu-profiler.h" +#include "src/heap/heap.h" +#include "src/heap/store-buffer.h" +#include "src/heap/store-buffer-inl.h" +#include "src/heap-profiler.h" +#include "src/isolate.h" +#include "src/list-inl.h" +#include "src/objects.h" + +namespace v8 { +namespace internal { + +void PromotionQueue::insert(HeapObject* target, int size) { + if (emergency_stack_ != NULL) { + emergency_stack_->Add(Entry(target, size)); + return; + } + + if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) { + NewSpacePage* rear_page = + NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_)); + DCHECK(!rear_page->prev_page()->is_anchor()); + rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end()); + ActivateGuardIfOnTheSamePage(); + } + + if (guard_) { + DCHECK(GetHeadPage() == + Page::FromAllocationTop(reinterpret_cast<Address>(limit_))); + + if ((rear_ - 2) < limit_) { + RelocateQueueHead(); + emergency_stack_->Add(Entry(target, size)); + return; + } + } + + *(--rear_) = reinterpret_cast<intptr_t>(target); + *(--rear_) = size; +// Assert no overflow into live objects. +#ifdef DEBUG + SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(), + reinterpret_cast<Address>(rear_)); +#endif +} + + +void PromotionQueue::ActivateGuardIfOnTheSamePage() { + guard_ = guard_ || + heap_->new_space()->active_space()->current_page()->address() == + GetHeadPage()->address(); +} + + +template <> +bool inline Heap::IsOneByte(Vector<const char> str, int chars) { + // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported? + // ASCII only check. + return chars == str.length(); +} + + +template <> +bool inline Heap::IsOneByte(String* str, int chars) { + return str->IsOneByteRepresentation(); +} + + +AllocationResult Heap::AllocateInternalizedStringFromUtf8( + Vector<const char> str, int chars, uint32_t hash_field) { + if (IsOneByte(str, chars)) { + return AllocateOneByteInternalizedString(Vector<const uint8_t>::cast(str), + hash_field); + } + return AllocateInternalizedStringImpl<false>(str, chars, hash_field); +} + + +template <typename T> +AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars, + uint32_t hash_field) { + if (IsOneByte(t, chars)) { + return AllocateInternalizedStringImpl<true>(t, chars, hash_field); + } + return AllocateInternalizedStringImpl<false>(t, chars, hash_field); +} + + +AllocationResult Heap::AllocateOneByteInternalizedString( + Vector<const uint8_t> str, uint32_t hash_field) { + CHECK_GE(String::kMaxLength, str.length()); + // Compute map and object size. + Map* map = ascii_internalized_string_map(); + int size = SeqOneByteString::SizeFor(str.length()); + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); + + // Allocate string. + HeapObject* result; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!allocation.To(&result)) return allocation; + } + + // String maps are all immortal immovable objects. + result->set_map_no_write_barrier(map); + // Set length and hash fields of the allocated string. + String* answer = String::cast(result); + answer->set_length(str.length()); + answer->set_hash_field(hash_field); + + DCHECK_EQ(size, answer->Size()); + + // Fill in the characters. + MemCopy(answer->address() + SeqOneByteString::kHeaderSize, str.start(), + str.length()); + + return answer; +} + + +AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str, + uint32_t hash_field) { + CHECK_GE(String::kMaxLength, str.length()); + // Compute map and object size. + Map* map = internalized_string_map(); + int size = SeqTwoByteString::SizeFor(str.length()); + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); + + // Allocate string. + HeapObject* result; + { + AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!allocation.To(&result)) return allocation; + } + + result->set_map(map); + // Set length and hash fields of the allocated string. + String* answer = String::cast(result); + answer->set_length(str.length()); + answer->set_hash_field(hash_field); + + DCHECK_EQ(size, answer->Size()); + + // Fill in the characters. + MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(), + str.length() * kUC16Size); + + return answer; +} + +AllocationResult Heap::CopyFixedArray(FixedArray* src) { + if (src->length() == 0) return src; + return CopyFixedArrayWithMap(src, src->map()); +} + + +AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) { + if (src->length() == 0) return src; + return CopyFixedDoubleArrayWithMap(src, src->map()); +} + + +AllocationResult Heap::CopyConstantPoolArray(ConstantPoolArray* src) { + if (src->length() == 0) return src; + return CopyConstantPoolArrayWithMap(src, src->map()); +} + + +AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space, + AllocationSpace retry_space) { + DCHECK(AllowHandleAllocation::IsAllowed()); + DCHECK(AllowHeapAllocation::IsAllowed()); + DCHECK(gc_state_ == NOT_IN_GC); +#ifdef DEBUG + if (FLAG_gc_interval >= 0 && AllowAllocationFailure::IsAllowed(isolate_) && + Heap::allocation_timeout_-- <= 0) { + return AllocationResult::Retry(space); + } + isolate_->counters()->objs_since_last_full()->Increment(); + isolate_->counters()->objs_since_last_young()->Increment(); +#endif + + HeapObject* object; + AllocationResult allocation; + if (NEW_SPACE == space) { + allocation = new_space_.AllocateRaw(size_in_bytes); + if (always_allocate() && allocation.IsRetry() && retry_space != NEW_SPACE) { + space = retry_space; + } else { + if (allocation.To(&object)) { + OnAllocationEvent(object, size_in_bytes); + } + return allocation; + } + } + + if (OLD_POINTER_SPACE == space) { + allocation = old_pointer_space_->AllocateRaw(size_in_bytes); + } else if (OLD_DATA_SPACE == space) { + allocation = old_data_space_->AllocateRaw(size_in_bytes); + } else if (CODE_SPACE == space) { + if (size_in_bytes <= code_space()->AreaSize()) { + allocation = code_space_->AllocateRaw(size_in_bytes); + } else { + // Large code objects are allocated in large object space. + allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE); + } + } else if (LO_SPACE == space) { + allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE); + } else if (CELL_SPACE == space) { + allocation = cell_space_->AllocateRaw(size_in_bytes); + } else if (PROPERTY_CELL_SPACE == space) { + allocation = property_cell_space_->AllocateRaw(size_in_bytes); + } else { + DCHECK(MAP_SPACE == space); + allocation = map_space_->AllocateRaw(size_in_bytes); + } + if (allocation.To(&object)) { + OnAllocationEvent(object, size_in_bytes); + } else { + old_gen_exhausted_ = true; + } + return allocation; +} + + +void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) { + HeapProfiler* profiler = isolate_->heap_profiler(); + if (profiler->is_tracking_allocations()) { + profiler->AllocationEvent(object->address(), size_in_bytes); + } + + if (FLAG_verify_predictable) { + ++allocations_count_; + + UpdateAllocationsHash(object); + UpdateAllocationsHash(size_in_bytes); + + if ((FLAG_dump_allocations_digest_at_alloc > 0) && + (--dump_allocations_hash_countdown_ == 0)) { + dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc; + PrintAlloctionsHash(); + } + } +} + + +void Heap::OnMoveEvent(HeapObject* target, HeapObject* source, + int size_in_bytes) { + HeapProfiler* heap_profiler = isolate_->heap_profiler(); + if (heap_profiler->is_tracking_object_moves()) { + heap_profiler->ObjectMoveEvent(source->address(), target->address(), + size_in_bytes); + } + + if (isolate_->logger()->is_logging_code_events() || + isolate_->cpu_profiler()->is_profiling()) { + if (target->IsSharedFunctionInfo()) { + PROFILE(isolate_, SharedFunctionInfoMoveEvent(source->address(), + target->address())); + } + } + + if (FLAG_verify_predictable) { + ++allocations_count_; + + UpdateAllocationsHash(source); + UpdateAllocationsHash(target); + UpdateAllocationsHash(size_in_bytes); + + if ((FLAG_dump_allocations_digest_at_alloc > 0) && + (--dump_allocations_hash_countdown_ == 0)) { + dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc; + PrintAlloctionsHash(); + } + } +} + + +void Heap::UpdateAllocationsHash(HeapObject* object) { + Address object_address = object->address(); + MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address); + AllocationSpace allocation_space = memory_chunk->owner()->identity(); + + STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32); + uint32_t value = + static_cast<uint32_t>(object_address - memory_chunk->address()) | + (static_cast<uint32_t>(allocation_space) << kPageSizeBits); + + UpdateAllocationsHash(value); +} + + +void Heap::UpdateAllocationsHash(uint32_t value) { + uint16_t c1 = static_cast<uint16_t>(value); + uint16_t c2 = static_cast<uint16_t>(value >> 16); + raw_allocations_hash_ = + StringHasher::AddCharacterCore(raw_allocations_hash_, c1); + raw_allocations_hash_ = + StringHasher::AddCharacterCore(raw_allocations_hash_, c2); +} + + +void Heap::PrintAlloctionsHash() { + uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_); + PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count_, hash); +} + + +void Heap::FinalizeExternalString(String* string) { + DCHECK(string->IsExternalString()); + v8::String::ExternalStringResourceBase** resource_addr = + reinterpret_cast<v8::String::ExternalStringResourceBase**>( + reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset - + kHeapObjectTag); + + // Dispose of the C++ object if it has not already been disposed. + if (*resource_addr != NULL) { + (*resource_addr)->Dispose(); + *resource_addr = NULL; + } +} + + +bool Heap::InNewSpace(Object* object) { + bool result = new_space_.Contains(object); + DCHECK(!result || // Either not in new space + gc_state_ != NOT_IN_GC || // ... or in the middle of GC + InToSpace(object)); // ... or in to-space (where we allocate). + return result; +} + + +bool Heap::InNewSpace(Address address) { return new_space_.Contains(address); } + + +bool Heap::InFromSpace(Object* object) { + return new_space_.FromSpaceContains(object); +} + + +bool Heap::InToSpace(Object* object) { + return new_space_.ToSpaceContains(object); +} + + +bool Heap::InOldPointerSpace(Address address) { + return old_pointer_space_->Contains(address); +} + + +bool Heap::InOldPointerSpace(Object* object) { + return InOldPointerSpace(reinterpret_cast<Address>(object)); +} + + +bool Heap::InOldDataSpace(Address address) { + return old_data_space_->Contains(address); +} + + +bool Heap::InOldDataSpace(Object* object) { + return InOldDataSpace(reinterpret_cast<Address>(object)); +} + + +bool Heap::OldGenerationAllocationLimitReached() { + if (!incremental_marking()->IsStopped()) return false; + return OldGenerationSpaceAvailable() < 0; +} + + +bool Heap::ShouldBePromoted(Address old_address, int object_size) { + NewSpacePage* page = NewSpacePage::FromAddress(old_address); + Address age_mark = new_space_.age_mark(); + return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && + (!page->ContainsLimit(age_mark) || old_address < age_mark); +} + + +void Heap::RecordWrite(Address address, int offset) { + if (!InNewSpace(address)) store_buffer_.Mark(address + offset); +} + + +void Heap::RecordWrites(Address address, int start, int len) { + if (!InNewSpace(address)) { + for (int i = 0; i < len; i++) { + store_buffer_.Mark(address + start + i * kPointerSize); + } + } +} + + +OldSpace* Heap::TargetSpace(HeapObject* object) { + InstanceType type = object->map()->instance_type(); + AllocationSpace space = TargetSpaceId(type); + return (space == OLD_POINTER_SPACE) ? old_pointer_space_ : old_data_space_; +} + + +AllocationSpace Heap::TargetSpaceId(InstanceType type) { + // Heap numbers and sequential strings are promoted to old data space, all + // other object types are promoted to old pointer space. We do not use + // object->IsHeapNumber() and object->IsSeqString() because we already + // know that object has the heap object tag. + + // These objects are never allocated in new space. + DCHECK(type != MAP_TYPE); + DCHECK(type != CODE_TYPE); + DCHECK(type != ODDBALL_TYPE); + DCHECK(type != CELL_TYPE); + DCHECK(type != PROPERTY_CELL_TYPE); + + if (type <= LAST_NAME_TYPE) { + if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE; + DCHECK(type < FIRST_NONSTRING_TYPE); + // There are four string representations: sequential strings, external + // strings, cons strings, and sliced strings. + // Only the latter two contain non-map-word pointers to heap objects. + return ((type & kIsIndirectStringMask) == kIsIndirectStringTag) + ? OLD_POINTER_SPACE + : OLD_DATA_SPACE; + } else { + return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE; + } +} + + +bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) { + // Object migration is governed by the following rules: + // + // 1) Objects in new-space can be migrated to one of the old spaces + // that matches their target space or they stay in new-space. + // 2) Objects in old-space stay in the same space when migrating. + // 3) Fillers (two or more words) can migrate due to left-trimming of + // fixed arrays in new-space, old-data-space and old-pointer-space. + // 4) Fillers (one word) can never migrate, they are skipped by + // incremental marking explicitly to prevent invalid pattern. + // 5) Short external strings can end up in old pointer space when a cons + // string in old pointer space is made external (String::MakeExternal). + // + // Since this function is used for debugging only, we do not place + // asserts here, but check everything explicitly. + if (obj->map() == one_pointer_filler_map()) return false; + InstanceType type = obj->map()->instance_type(); + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + AllocationSpace src = chunk->owner()->identity(); + switch (src) { + case NEW_SPACE: + return dst == src || dst == TargetSpaceId(type); + case OLD_POINTER_SPACE: + return dst == src && (dst == TargetSpaceId(type) || obj->IsFiller() || + (obj->IsExternalString() && + ExternalString::cast(obj)->is_short())); + case OLD_DATA_SPACE: + return dst == src && dst == TargetSpaceId(type); + case CODE_SPACE: + return dst == src && type == CODE_TYPE; + case MAP_SPACE: + case CELL_SPACE: + case PROPERTY_CELL_SPACE: + case LO_SPACE: + return false; + case INVALID_SPACE: + break; + } + UNREACHABLE(); + return false; +} + + +void Heap::CopyBlock(Address dst, Address src, int byte_size) { + CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src), + static_cast<size_t>(byte_size / kPointerSize)); +} + + +void Heap::MoveBlock(Address dst, Address src, int byte_size) { + DCHECK(IsAligned(byte_size, kPointerSize)); + + int size_in_words = byte_size / kPointerSize; + + if ((dst < src) || (dst >= (src + byte_size))) { + Object** src_slot = reinterpret_cast<Object**>(src); + Object** dst_slot = reinterpret_cast<Object**>(dst); + Object** end_slot = src_slot + size_in_words; + + while (src_slot != end_slot) { + *dst_slot++ = *src_slot++; + } + } else { + MemMove(dst, src, static_cast<size_t>(byte_size)); + } +} + + +void Heap::ScavengePointer(HeapObject** p) { ScavengeObject(p, *p); } + + +AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) { + // Check if there is potentially a memento behind the object. If + // the last word of the momento is on another page we return + // immediately. + Address object_address = object->address(); + Address memento_address = object_address + object->Size(); + Address last_memento_word_address = memento_address + kPointerSize; + if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) { + return NULL; + } + + HeapObject* candidate = HeapObject::FromAddress(memento_address); + if (candidate->map() != allocation_memento_map()) return NULL; + + // Either the object is the last object in the new space, or there is another + // object of at least word size (the header map word) following it, so + // suffices to compare ptr and top here. Note that technically we do not have + // to compare with the current top pointer of the from space page during GC, + // since we always install filler objects above the top pointer of a from + // space page when performing a garbage collection. However, always performing + // the test makes it possible to have a single, unified version of + // FindAllocationMemento that is used both by the GC and the mutator. + Address top = NewSpaceTop(); + DCHECK(memento_address == top || + memento_address + HeapObject::kHeaderSize <= top || + !NewSpacePage::OnSamePage(memento_address, top)); + if (memento_address == top) return NULL; + + AllocationMemento* memento = AllocationMemento::cast(candidate); + if (!memento->IsValid()) return NULL; + return memento; +} + + +void Heap::UpdateAllocationSiteFeedback(HeapObject* object, + ScratchpadSlotMode mode) { + Heap* heap = object->GetHeap(); + DCHECK(heap->InFromSpace(object)); + + if (!FLAG_allocation_site_pretenuring || + !AllocationSite::CanTrack(object->map()->instance_type())) + return; + + AllocationMemento* memento = heap->FindAllocationMemento(object); + if (memento == NULL) return; + + if (memento->GetAllocationSite()->IncrementMementoFoundCount()) { + heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode); + } +} + + +void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { + DCHECK(object->GetIsolate()->heap()->InFromSpace(object)); + + // We use the first word (where the map pointer usually is) of a heap + // object to record the forwarding pointer. A forwarding pointer can + // point to an old space, the code space, or the to space of the new + // generation. + MapWord first_word = object->map_word(); + + // If the first word is a forwarding address, the object has already been + // copied. + if (first_word.IsForwardingAddress()) { + HeapObject* dest = first_word.ToForwardingAddress(); + DCHECK(object->GetIsolate()->heap()->InFromSpace(*p)); + *p = dest; + return; + } + + UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT); + + // AllocationMementos are unrooted and shouldn't survive a scavenge + DCHECK(object->map() != object->GetHeap()->allocation_memento_map()); + // Call the slow part of scavenge object. + return ScavengeObjectSlow(p, object); +} + + +bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason, + const v8::GCCallbackFlags callbackFlags) { + const char* collector_reason = NULL; + GarbageCollector collector = SelectGarbageCollector(space, &collector_reason); + return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags); +} + + +Isolate* Heap::isolate() { + return reinterpret_cast<Isolate*>( + reinterpret_cast<intptr_t>(this) - + reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4); +} + + +// Calls the FUNCTION_CALL function and retries it up to three times +// to guarantee that any allocations performed during the call will +// succeed if there's enough memory. + +// Warning: Do not use the identifiers __object__, __maybe_object__ or +// __scope__ in a call to this macro. + +#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ + if (__allocation__.To(&__object__)) { \ + DCHECK(__object__ != (ISOLATE)->heap()->exception()); \ + RETURN_VALUE; \ + } + +#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \ + do { \ + AllocationResult __allocation__ = FUNCTION_CALL; \ + Object* __object__ = NULL; \ + RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ + (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \ + "allocation failure"); \ + __allocation__ = FUNCTION_CALL; \ + RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ + (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \ + (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \ + { \ + AlwaysAllocateScope __scope__(ISOLATE); \ + __allocation__ = FUNCTION_CALL; \ + } \ + RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ + /* TODO(1181417): Fix this. */ \ + v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \ + RETURN_EMPTY; \ + } while (false) + +#define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \ + RETURN_EMPTY) \ + CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) + +#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \ + CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, \ + return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \ + return Handle<TYPE>()) + + +#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \ + CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return) + + +void ExternalStringTable::AddString(String* string) { + DCHECK(string->IsExternalString()); + if (heap_->InNewSpace(string)) { + new_space_strings_.Add(string); + } else { + old_space_strings_.Add(string); + } +} + + +void ExternalStringTable::Iterate(ObjectVisitor* v) { + if (!new_space_strings_.is_empty()) { + Object** start = &new_space_strings_[0]; + v->VisitPointers(start, start + new_space_strings_.length()); + } + if (!old_space_strings_.is_empty()) { + Object** start = &old_space_strings_[0]; + v->VisitPointers(start, start + old_space_strings_.length()); + } +} + + +// Verify() is inline to avoid ifdef-s around its calls in release +// mode. +void ExternalStringTable::Verify() { +#ifdef DEBUG + for (int i = 0; i < new_space_strings_.length(); ++i) { + Object* obj = Object::cast(new_space_strings_[i]); + DCHECK(heap_->InNewSpace(obj)); + DCHECK(obj != heap_->the_hole_value()); + } + for (int i = 0; i < old_space_strings_.length(); ++i) { + Object* obj = Object::cast(old_space_strings_[i]); + DCHECK(!heap_->InNewSpace(obj)); + DCHECK(obj != heap_->the_hole_value()); + } +#endif +} + + +void ExternalStringTable::AddOldString(String* string) { + DCHECK(string->IsExternalString()); + DCHECK(!heap_->InNewSpace(string)); + old_space_strings_.Add(string); +} + + +void ExternalStringTable::ShrinkNewStrings(int position) { + new_space_strings_.Rewind(position); +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + Verify(); + } +#endif +} + + +void Heap::ClearInstanceofCache() { + set_instanceof_cache_function(the_hole_value()); +} + + +Object* Heap::ToBoolean(bool condition) { + return condition ? true_value() : false_value(); +} + + +void Heap::CompletelyClearInstanceofCache() { + set_instanceof_cache_map(the_hole_value()); + set_instanceof_cache_function(the_hole_value()); +} + + +AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate) + : heap_(isolate->heap()), daf_(isolate) { + // We shouldn't hit any nested scopes, because that requires + // non-handle code to call handle code. The code still works but + // performance will degrade, so we want to catch this situation + // in debug mode. + DCHECK(heap_->always_allocate_scope_depth_ == 0); + heap_->always_allocate_scope_depth_++; +} + + +AlwaysAllocateScope::~AlwaysAllocateScope() { + heap_->always_allocate_scope_depth_--; + DCHECK(heap_->always_allocate_scope_depth_ == 0); +} + + +#ifdef VERIFY_HEAP +NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() { + Isolate* isolate = Isolate::Current(); + isolate->heap()->no_weak_object_verification_scope_depth_++; +} + + +NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() { + Isolate* isolate = Isolate::Current(); + isolate->heap()->no_weak_object_verification_scope_depth_--; +} +#endif + + +GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) { + heap_->gc_callbacks_depth_++; +} + + +GCCallbacksScope::~GCCallbacksScope() { heap_->gc_callbacks_depth_--; } + + +bool GCCallbacksScope::CheckReenter() { + return heap_->gc_callbacks_depth_ == 1; +} + + +void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) { + for (Object** current = start; current < end; current++) { + if ((*current)->IsHeapObject()) { + HeapObject* object = HeapObject::cast(*current); + CHECK(object->GetIsolate()->heap()->Contains(object)); + CHECK(object->map()->IsMap()); + } + } +} + + +void VerifySmisVisitor::VisitPointers(Object** start, Object** end) { + for (Object** current = start; current < end; current++) { + CHECK((*current)->IsSmi()); + } +} +} +} // namespace v8::internal + +#endif // V8_HEAP_HEAP_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/incremental-marking.cc nodejs-0.11.15/deps/v8/src/heap/incremental-marking.cc --- nodejs-0.11.13/deps/v8/src/heap/incremental-marking.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/incremental-marking.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,971 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/heap/incremental-marking.h" + +#include "src/code-stubs.h" +#include "src/compilation-cache.h" +#include "src/conversions.h" +#include "src/heap/objects-visiting.h" +#include "src/heap/objects-visiting-inl.h" + +namespace v8 { +namespace internal { + + +IncrementalMarking::IncrementalMarking(Heap* heap) + : heap_(heap), + state_(STOPPED), + marking_deque_memory_(NULL), + marking_deque_memory_committed_(false), + steps_count_(0), + old_generation_space_available_at_start_of_incremental_(0), + old_generation_space_used_at_start_of_incremental_(0), + should_hurry_(false), + marking_speed_(0), + allocated_(0), + no_marking_scope_depth_(0), + unscanned_bytes_of_large_object_(0) {} + + +void IncrementalMarking::TearDown() { delete marking_deque_memory_; } + + +void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot, + Object* value) { + if (BaseRecordWrite(obj, slot, value) && slot != NULL) { + MarkBit obj_bit = Marking::MarkBitFrom(obj); + if (Marking::IsBlack(obj_bit)) { + // Object is not going to be rescanned we need to record the slot. + heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0), + slot, value); + } + } +} + + +void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, + Isolate* isolate) { + DCHECK(obj->IsHeapObject()); + IncrementalMarking* marking = isolate->heap()->incremental_marking(); + + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + int counter = chunk->write_barrier_counter(); + if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { + marking->write_barriers_invoked_since_last_step_ += + MemoryChunk::kWriteBarrierCounterGranularity - + chunk->write_barrier_counter(); + chunk->set_write_barrier_counter( + MemoryChunk::kWriteBarrierCounterGranularity); + } + + marking->RecordWrite(obj, slot, *slot); +} + + +void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc, + HeapObject* value) { + if (IsMarking()) { + RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); + RecordWriteIntoCode(host, &rinfo, value); + } +} + + +void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { + if (IsMarking()) { + Code* host = heap_->isolate() + ->inner_pointer_to_code_cache() + ->GcSafeFindCodeForInnerPointer(pc); + RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); + RecordWriteIntoCode(host, &rinfo, value); + } +} + + +void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host, + Object** slot, + Code* value) { + if (BaseRecordWrite(host, slot, value)) { + DCHECK(slot != NULL); + heap_->mark_compact_collector()->RecordCodeEntrySlot( + reinterpret_cast<Address>(slot), value); + } +} + + +void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj, + RelocInfo* rinfo, + Object* value) { + MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value)); + if (Marking::IsWhite(value_bit)) { + MarkBit obj_bit = Marking::MarkBitFrom(obj); + if (Marking::IsBlack(obj_bit)) { + BlackToGreyAndUnshift(obj, obj_bit); + RestartIfNotMarking(); + } + // Object is either grey or white. It will be scanned if survives. + return; + } + + if (is_compacting_) { + MarkBit obj_bit = Marking::MarkBitFrom(obj); + if (Marking::IsBlack(obj_bit)) { + // Object is not going to be rescanned. We need to record the slot. + heap_->mark_compact_collector()->RecordRelocSlot(rinfo, + Code::cast(value)); + } + } +} + + +static void MarkObjectGreyDoNotEnqueue(Object* obj) { + if (obj->IsHeapObject()) { + HeapObject* heap_obj = HeapObject::cast(obj); + MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); + if (Marking::IsBlack(mark_bit)) { + MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), + -heap_obj->Size()); + } + Marking::AnyToGrey(mark_bit); + } +} + + +static inline void MarkBlackOrKeepGrey(HeapObject* heap_object, + MarkBit mark_bit, int size) { + DCHECK(!Marking::IsImpossible(mark_bit)); + if (mark_bit.Get()) return; + mark_bit.Set(); + MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); + DCHECK(Marking::IsBlack(mark_bit)); +} + + +static inline void MarkBlackOrKeepBlack(HeapObject* heap_object, + MarkBit mark_bit, int size) { + DCHECK(!Marking::IsImpossible(mark_bit)); + if (Marking::IsBlack(mark_bit)) return; + Marking::MarkBlack(mark_bit); + MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); + DCHECK(Marking::IsBlack(mark_bit)); +} + + +class IncrementalMarkingMarkingVisitor + : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> { + public: + static void Initialize() { + StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize(); + table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental); + table_.Register(kVisitNativeContext, &VisitNativeContextIncremental); + table_.Register(kVisitJSRegExp, &VisitJSRegExp); + } + + static const int kProgressBarScanningChunk = 32 * 1024; + + static void VisitFixedArrayIncremental(Map* map, HeapObject* object) { + MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); + // TODO(mstarzinger): Move setting of the flag to the allocation site of + // the array. The visitor should just check the flag. + if (FLAG_use_marking_progress_bar && + chunk->owner()->identity() == LO_SPACE) { + chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR); + } + if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { + Heap* heap = map->GetHeap(); + // When using a progress bar for large fixed arrays, scan only a chunk of + // the array and try to push it onto the marking deque again until it is + // fully scanned. Fall back to scanning it through to the end in case this + // fails because of a full deque. + int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); + int start_offset = + Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar()); + int end_offset = + Min(object_size, start_offset + kProgressBarScanningChunk); + int already_scanned_offset = start_offset; + bool scan_until_end = false; + do { + VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0), + HeapObject::RawField(object, start_offset), + HeapObject::RawField(object, end_offset)); + start_offset = end_offset; + end_offset = Min(object_size, end_offset + kProgressBarScanningChunk); + scan_until_end = heap->incremental_marking()->marking_deque()->IsFull(); + } while (scan_until_end && start_offset < object_size); + chunk->set_progress_bar(start_offset); + if (start_offset < object_size) { + heap->incremental_marking()->marking_deque()->UnshiftGrey(object); + heap->incremental_marking()->NotifyIncompleteScanOfObject( + object_size - (start_offset - already_scanned_offset)); + } + } else { + FixedArrayVisitor::Visit(map, object); + } + } + + static void VisitNativeContextIncremental(Map* map, HeapObject* object) { + Context* context = Context::cast(object); + + // We will mark cache black with a separate pass when we finish marking. + // Note that GC can happen when the context is not fully initialized, + // so the cache can be undefined. + Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX); + if (!cache->IsUndefined()) { + MarkObjectGreyDoNotEnqueue(cache); + } + VisitNativeContext(map, context); + } + + INLINE(static void VisitPointer(Heap* heap, Object** p)) { + Object* obj = *p; + if (obj->IsHeapObject()) { + heap->mark_compact_collector()->RecordSlot(p, p, obj); + MarkObject(heap, obj); + } + } + + INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { + for (Object** p = start; p < end; p++) { + Object* obj = *p; + if (obj->IsHeapObject()) { + heap->mark_compact_collector()->RecordSlot(start, p, obj); + MarkObject(heap, obj); + } + } + } + + INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor, + Object** start, Object** end)) { + for (Object** p = start; p < end; p++) { + Object* obj = *p; + if (obj->IsHeapObject()) { + heap->mark_compact_collector()->RecordSlot(anchor, p, obj); + MarkObject(heap, obj); + } + } + } + + // Marks the object grey and pushes it on the marking stack. + INLINE(static void MarkObject(Heap* heap, Object* obj)) { + HeapObject* heap_object = HeapObject::cast(obj); + MarkBit mark_bit = Marking::MarkBitFrom(heap_object); + if (mark_bit.data_only()) { + MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size()); + } else if (Marking::IsWhite(mark_bit)) { + heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit); + } + } + + // Marks the object black without pushing it on the marking stack. + // Returns true if object needed marking and false otherwise. + INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) { + HeapObject* heap_object = HeapObject::cast(obj); + MarkBit mark_bit = Marking::MarkBitFrom(heap_object); + if (Marking::IsWhite(mark_bit)) { + mark_bit.Set(); + MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), + heap_object->Size()); + return true; + } + return false; + } +}; + + +class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { + public: + explicit IncrementalMarkingRootMarkingVisitor( + IncrementalMarking* incremental_marking) + : incremental_marking_(incremental_marking) {} + + void VisitPointer(Object** p) { MarkObjectByPointer(p); } + + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) MarkObjectByPointer(p); + } + + private: + void MarkObjectByPointer(Object** p) { + Object* obj = *p; + if (!obj->IsHeapObject()) return; + + HeapObject* heap_object = HeapObject::cast(obj); + MarkBit mark_bit = Marking::MarkBitFrom(heap_object); + if (mark_bit.data_only()) { + MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size()); + } else { + if (Marking::IsWhite(mark_bit)) { + incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); + } + } + } + + IncrementalMarking* incremental_marking_; +}; + + +void IncrementalMarking::Initialize() { + IncrementalMarkingMarkingVisitor::Initialize(); +} + + +void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, + bool is_marking, + bool is_compacting) { + if (is_marking) { + chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); + chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + + // It's difficult to filter out slots recorded for large objects. + if (chunk->owner()->identity() == LO_SPACE && + chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) { + chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION); + } + } else if (chunk->owner()->identity() == CELL_SPACE || + chunk->owner()->identity() == PROPERTY_CELL_SPACE || + chunk->scan_on_scavenge()) { + chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); + chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + } else { + chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); + chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + } +} + + +void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk, + bool is_marking) { + chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); + if (is_marking) { + chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + } else { + chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + } + chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE); +} + + +void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( + PagedSpace* space) { + PageIterator it(space); + while (it.has_next()) { + Page* p = it.next(); + SetOldSpacePageFlags(p, false, false); + } +} + + +void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( + NewSpace* space) { + NewSpacePageIterator it(space); + while (it.has_next()) { + NewSpacePage* p = it.next(); + SetNewSpacePageFlags(p, false); + } +} + + +void IncrementalMarking::DeactivateIncrementalWriteBarrier() { + DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->map_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->code_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); + + LargePage* lop = heap_->lo_space()->first_page(); + while (lop->is_valid()) { + SetOldSpacePageFlags(lop, false, false); + lop = lop->next_page(); + } +} + + +void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) { + PageIterator it(space); + while (it.has_next()) { + Page* p = it.next(); + SetOldSpacePageFlags(p, true, is_compacting_); + } +} + + +void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) { + NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); + while (it.has_next()) { + NewSpacePage* p = it.next(); + SetNewSpacePageFlags(p, true); + } +} + + +void IncrementalMarking::ActivateIncrementalWriteBarrier() { + ActivateIncrementalWriteBarrier(heap_->old_pointer_space()); + ActivateIncrementalWriteBarrier(heap_->old_data_space()); + ActivateIncrementalWriteBarrier(heap_->cell_space()); + ActivateIncrementalWriteBarrier(heap_->property_cell_space()); + ActivateIncrementalWriteBarrier(heap_->map_space()); + ActivateIncrementalWriteBarrier(heap_->code_space()); + ActivateIncrementalWriteBarrier(heap_->new_space()); + + LargePage* lop = heap_->lo_space()->first_page(); + while (lop->is_valid()) { + SetOldSpacePageFlags(lop, true, is_compacting_); + lop = lop->next_page(); + } +} + + +bool IncrementalMarking::WorthActivating() { +#ifndef DEBUG + static const intptr_t kActivationThreshold = 8 * MB; +#else + // TODO(gc) consider setting this to some low level so that some + // debug tests run with incremental marking and some without. + static const intptr_t kActivationThreshold = 0; +#endif + // Only start incremental marking in a safe state: 1) when incremental + // marking is turned on, 2) when we are currently not in a GC, and + // 3) when we are currently not serializing or deserializing the heap. + return FLAG_incremental_marking && FLAG_incremental_marking_steps && + heap_->gc_state() == Heap::NOT_IN_GC && + !heap_->isolate()->serializer_enabled() && + heap_->isolate()->IsInitialized() && + heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold; +} + + +void IncrementalMarking::ActivateGeneratedStub(Code* stub) { + DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY); + + if (!IsMarking()) { + // Initially stub is generated in STORE_BUFFER_ONLY mode thus + // we don't need to do anything if incremental marking is + // not active. + } else if (IsCompacting()) { + RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); + } else { + RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); + } +} + + +static void PatchIncrementalMarkingRecordWriteStubs( + Heap* heap, RecordWriteStub::Mode mode) { + UnseededNumberDictionary* stubs = heap->code_stubs(); + + int capacity = stubs->Capacity(); + for (int i = 0; i < capacity; i++) { + Object* k = stubs->KeyAt(i); + if (stubs->IsKey(k)) { + uint32_t key = NumberToUint32(k); + + if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) { + Object* e = stubs->ValueAt(i); + if (e->IsCode()) { + RecordWriteStub::Patch(Code::cast(e), mode); + } + } + } + } +} + + +void IncrementalMarking::EnsureMarkingDequeIsCommitted() { + if (marking_deque_memory_ == NULL) { + marking_deque_memory_ = new base::VirtualMemory(4 * MB); + } + if (!marking_deque_memory_committed_) { + bool success = marking_deque_memory_->Commit( + reinterpret_cast<Address>(marking_deque_memory_->address()), + marking_deque_memory_->size(), + false); // Not executable. + CHECK(success); + marking_deque_memory_committed_ = true; + } +} + + +void IncrementalMarking::UncommitMarkingDeque() { + if (state_ == STOPPED && marking_deque_memory_committed_) { + bool success = marking_deque_memory_->Uncommit( + reinterpret_cast<Address>(marking_deque_memory_->address()), + marking_deque_memory_->size()); + CHECK(success); + marking_deque_memory_committed_ = false; + } +} + + +void IncrementalMarking::Start(CompactionFlag flag) { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Start\n"); + } + DCHECK(FLAG_incremental_marking); + DCHECK(FLAG_incremental_marking_steps); + DCHECK(state_ == STOPPED); + DCHECK(heap_->gc_state() == Heap::NOT_IN_GC); + DCHECK(!heap_->isolate()->serializer_enabled()); + DCHECK(heap_->isolate()->IsInitialized()); + + ResetStepCounters(); + + if (!heap_->mark_compact_collector()->sweeping_in_progress()) { + StartMarking(flag); + } else { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Start sweeping.\n"); + } + state_ = SWEEPING; + } + + heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); +} + + +void IncrementalMarking::StartMarking(CompactionFlag flag) { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Start marking\n"); + } + + is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) && + heap_->mark_compact_collector()->StartCompaction( + MarkCompactCollector::INCREMENTAL_COMPACTION); + + state_ = MARKING; + + RecordWriteStub::Mode mode = is_compacting_ + ? RecordWriteStub::INCREMENTAL_COMPACTION + : RecordWriteStub::INCREMENTAL; + + PatchIncrementalMarkingRecordWriteStubs(heap_, mode); + + EnsureMarkingDequeIsCommitted(); + + // Initialize marking stack. + Address addr = static_cast<Address>(marking_deque_memory_->address()); + size_t size = marking_deque_memory_->size(); + if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; + marking_deque_.Initialize(addr, addr + size); + + ActivateIncrementalWriteBarrier(); + +// Marking bits are cleared by the sweeper. +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); + } +#endif + + heap_->CompletelyClearInstanceofCache(); + heap_->isolate()->compilation_cache()->MarkCompactPrologue(); + + if (FLAG_cleanup_code_caches_at_gc) { + // We will mark cache black with a separate pass + // when we finish marking. + MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache()); + } + + // Mark strong roots grey. + IncrementalMarkingRootMarkingVisitor visitor(this); + heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); + + heap_->mark_compact_collector()->MarkWeakObjectToCodeTable(); + + // Ready to start incremental marking. + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Running\n"); + } +} + + +void IncrementalMarking::PrepareForScavenge() { + if (!IsMarking()) return; + NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(), + heap_->new_space()->FromSpaceEnd()); + while (it.has_next()) { + Bitmap::Clear(it.next()); + } +} + + +void IncrementalMarking::UpdateMarkingDequeAfterScavenge() { + if (!IsMarking()) return; + + int current = marking_deque_.bottom(); + int mask = marking_deque_.mask(); + int limit = marking_deque_.top(); + HeapObject** array = marking_deque_.array(); + int new_top = current; + + Map* filler_map = heap_->one_pointer_filler_map(); + + while (current != limit) { + HeapObject* obj = array[current]; + DCHECK(obj->IsHeapObject()); + current = ((current + 1) & mask); + if (heap_->InNewSpace(obj)) { + MapWord map_word = obj->map_word(); + if (map_word.IsForwardingAddress()) { + HeapObject* dest = map_word.ToForwardingAddress(); + array[new_top] = dest; + new_top = ((new_top + 1) & mask); + DCHECK(new_top != marking_deque_.bottom()); +#ifdef DEBUG + MarkBit mark_bit = Marking::MarkBitFrom(obj); + DCHECK(Marking::IsGrey(mark_bit) || + (obj->IsFiller() && Marking::IsWhite(mark_bit))); +#endif + } + } else if (obj->map() != filler_map) { + // Skip one word filler objects that appear on the + // stack when we perform in place array shift. + array[new_top] = obj; + new_top = ((new_top + 1) & mask); + DCHECK(new_top != marking_deque_.bottom()); +#ifdef DEBUG + MarkBit mark_bit = Marking::MarkBitFrom(obj); + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + DCHECK(Marking::IsGrey(mark_bit) || + (obj->IsFiller() && Marking::IsWhite(mark_bit)) || + (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && + Marking::IsBlack(mark_bit))); +#endif + } + } + marking_deque_.set_top(new_top); +} + + +void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { + MarkBit map_mark_bit = Marking::MarkBitFrom(map); + if (Marking::IsWhite(map_mark_bit)) { + WhiteToGreyAndPush(map, map_mark_bit); + } + + IncrementalMarkingMarkingVisitor::IterateBody(map, obj); + + MarkBit mark_bit = Marking::MarkBitFrom(obj); +#if ENABLE_SLOW_DCHECKS + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + SLOW_DCHECK(Marking::IsGrey(mark_bit) || + (obj->IsFiller() && Marking::IsWhite(mark_bit)) || + (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && + Marking::IsBlack(mark_bit))); +#endif + MarkBlackOrKeepBlack(obj, mark_bit, size); +} + + +intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) { + intptr_t bytes_processed = 0; + Map* filler_map = heap_->one_pointer_filler_map(); + while (!marking_deque_.IsEmpty() && bytes_processed < bytes_to_process) { + HeapObject* obj = marking_deque_.Pop(); + + // Explicitly skip one word fillers. Incremental markbit patterns are + // correct only for objects that occupy at least two words. + Map* map = obj->map(); + if (map == filler_map) continue; + + int size = obj->SizeFromMap(map); + unscanned_bytes_of_large_object_ = 0; + VisitObject(map, obj, size); + int delta = (size - unscanned_bytes_of_large_object_); + // TODO(jochen): remove after http://crbug.com/381820 is resolved. + CHECK_LT(0, delta); + bytes_processed += delta; + } + return bytes_processed; +} + + +void IncrementalMarking::ProcessMarkingDeque() { + Map* filler_map = heap_->one_pointer_filler_map(); + while (!marking_deque_.IsEmpty()) { + HeapObject* obj = marking_deque_.Pop(); + + // Explicitly skip one word fillers. Incremental markbit patterns are + // correct only for objects that occupy at least two words. + Map* map = obj->map(); + if (map == filler_map) continue; + + VisitObject(map, obj, obj->SizeFromMap(map)); + } +} + + +void IncrementalMarking::Hurry() { + if (state() == MARKING) { + double start = 0.0; + if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { + start = base::OS::TimeCurrentMillis(); + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Hurry\n"); + } + } + // TODO(gc) hurry can mark objects it encounters black as mutator + // was stopped. + ProcessMarkingDeque(); + state_ = COMPLETE; + if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { + double end = base::OS::TimeCurrentMillis(); + double delta = end - start; + heap_->tracer()->AddMarkingTime(delta); + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n", + static_cast<int>(delta)); + } + } + } + + if (FLAG_cleanup_code_caches_at_gc) { + PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache(); + Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache)); + MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(), + PolymorphicCodeCache::kSize); + } + + Object* context = heap_->native_contexts_list(); + while (!context->IsUndefined()) { + // GC can happen when the context is not fully initialized, + // so the cache can be undefined. + HeapObject* cache = HeapObject::cast( + Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX)); + if (!cache->IsUndefined()) { + MarkBit mark_bit = Marking::MarkBitFrom(cache); + if (Marking::IsGrey(mark_bit)) { + Marking::GreyToBlack(mark_bit); + MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size()); + } + } + context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); + } +} + + +void IncrementalMarking::Abort() { + if (IsStopped()) return; + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Aborting.\n"); + } + heap_->new_space()->LowerInlineAllocationLimit(0); + IncrementalMarking::set_should_hurry(false); + ResetStepCounters(); + if (IsMarking()) { + PatchIncrementalMarkingRecordWriteStubs(heap_, + RecordWriteStub::STORE_BUFFER_ONLY); + DeactivateIncrementalWriteBarrier(); + + if (is_compacting_) { + LargeObjectIterator it(heap_->lo_space()); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + Page* p = Page::FromAddress(obj->address()); + if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { + p->ClearFlag(Page::RESCAN_ON_EVACUATION); + } + } + } + } + heap_->isolate()->stack_guard()->ClearGC(); + state_ = STOPPED; + is_compacting_ = false; +} + + +void IncrementalMarking::Finalize() { + Hurry(); + state_ = STOPPED; + is_compacting_ = false; + heap_->new_space()->LowerInlineAllocationLimit(0); + IncrementalMarking::set_should_hurry(false); + ResetStepCounters(); + PatchIncrementalMarkingRecordWriteStubs(heap_, + RecordWriteStub::STORE_BUFFER_ONLY); + DeactivateIncrementalWriteBarrier(); + DCHECK(marking_deque_.IsEmpty()); + heap_->isolate()->stack_guard()->ClearGC(); +} + + +void IncrementalMarking::MarkingComplete(CompletionAction action) { + state_ = COMPLETE; + // We will set the stack guard to request a GC now. This will mean the rest + // of the GC gets performed as soon as possible (we can't do a GC here in a + // record-write context). If a few things get allocated between now and then + // that shouldn't make us do a scavenge and keep being incremental, so we set + // the should-hurry flag to indicate that there can't be much work left to do. + set_should_hurry(true); + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Complete (normal).\n"); + } + if (action == GC_VIA_STACK_GUARD) { + heap_->isolate()->stack_guard()->RequestGC(); + } +} + + +void IncrementalMarking::OldSpaceStep(intptr_t allocated) { + if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) { + // TODO(hpayer): Let's play safe for now, but compaction should be + // in principle possible. + Start(PREVENT_COMPACTION); + } else { + Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD); + } +} + + +void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action, + bool force_marking) { + if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking || + !FLAG_incremental_marking_steps || + (state_ != SWEEPING && state_ != MARKING)) { + return; + } + + allocated_ += allocated_bytes; + + if (!force_marking && allocated_ < kAllocatedThreshold && + write_barriers_invoked_since_last_step_ < + kWriteBarriersInvokedThreshold) { + return; + } + + if (state_ == MARKING && no_marking_scope_depth_ > 0) return; + + { + HistogramTimerScope incremental_marking_scope( + heap_->isolate()->counters()->gc_incremental_marking()); + double start = base::OS::TimeCurrentMillis(); + + // The marking speed is driven either by the allocation rate or by the rate + // at which we are having to check the color of objects in the write + // barrier. + // It is possible for a tight non-allocating loop to run a lot of write + // barriers before we get here and check them (marking can only take place + // on + // allocation), so to reduce the lumpiness we don't use the write barriers + // invoked since last step directly to determine the amount of work to do. + intptr_t bytes_to_process = + marking_speed_ * + Max(allocated_, write_barriers_invoked_since_last_step_); + allocated_ = 0; + write_barriers_invoked_since_last_step_ = 0; + + bytes_scanned_ += bytes_to_process; + intptr_t bytes_processed = 0; + + if (state_ == SWEEPING) { + if (heap_->mark_compact_collector()->sweeping_in_progress() && + heap_->mark_compact_collector()->IsSweepingCompleted()) { + heap_->mark_compact_collector()->EnsureSweepingCompleted(); + } + if (!heap_->mark_compact_collector()->sweeping_in_progress()) { + bytes_scanned_ = 0; + StartMarking(PREVENT_COMPACTION); + } + } else if (state_ == MARKING) { + bytes_processed = ProcessMarkingDeque(bytes_to_process); + if (marking_deque_.IsEmpty()) MarkingComplete(action); + } + + steps_count_++; + + bool speed_up = false; + + if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { + if (FLAG_trace_gc) { + PrintPID("Speed up marking after %d steps\n", + static_cast<int>(kMarkingSpeedAccellerationInterval)); + } + speed_up = true; + } + + bool space_left_is_very_small = + (old_generation_space_available_at_start_of_incremental_ < 10 * MB); + + bool only_1_nth_of_space_that_was_available_still_left = + (SpaceLeftInOldSpace() * (marking_speed_ + 1) < + old_generation_space_available_at_start_of_incremental_); + + if (space_left_is_very_small || + only_1_nth_of_space_that_was_available_still_left) { + if (FLAG_trace_gc) + PrintPID("Speed up marking because of low space left\n"); + speed_up = true; + } + + bool size_of_old_space_multiplied_by_n_during_marking = + (heap_->PromotedTotalSize() > + (marking_speed_ + 1) * + old_generation_space_used_at_start_of_incremental_); + if (size_of_old_space_multiplied_by_n_during_marking) { + speed_up = true; + if (FLAG_trace_gc) { + PrintPID("Speed up marking because of heap size increase\n"); + } + } + + int64_t promoted_during_marking = + heap_->PromotedTotalSize() - + old_generation_space_used_at_start_of_incremental_; + intptr_t delay = marking_speed_ * MB; + intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); + + // We try to scan at at least twice the speed that we are allocating. + if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { + if (FLAG_trace_gc) { + PrintPID("Speed up marking because marker was not keeping up\n"); + } + speed_up = true; + } + + if (speed_up) { + if (state_ != MARKING) { + if (FLAG_trace_gc) { + PrintPID("Postponing speeding up marking until marking starts\n"); + } + } else { + marking_speed_ += kMarkingSpeedAccelleration; + marking_speed_ = static_cast<int>( + Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3))); + if (FLAG_trace_gc) { + PrintPID("Marking speed increased to %d\n", marking_speed_); + } + } + } + + double end = base::OS::TimeCurrentMillis(); + double duration = (end - start); + // Note that we report zero bytes here when sweeping was in progress or + // when we just started incremental marking. In these cases we did not + // process the marking deque. + heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed); + } +} + + +void IncrementalMarking::ResetStepCounters() { + steps_count_ = 0; + old_generation_space_available_at_start_of_incremental_ = + SpaceLeftInOldSpace(); + old_generation_space_used_at_start_of_incremental_ = + heap_->PromotedTotalSize(); + bytes_rescanned_ = 0; + marking_speed_ = kInitialMarkingSpeed; + bytes_scanned_ = 0; + write_barriers_invoked_since_last_step_ = 0; +} + + +int64_t IncrementalMarking::SpaceLeftInOldSpace() { + return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); +} +} +} // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/heap/incremental-marking.h nodejs-0.11.15/deps/v8/src/heap/incremental-marking.h --- nodejs-0.11.13/deps/v8/src/heap/incremental-marking.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/incremental-marking.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,222 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_INCREMENTAL_MARKING_H_ +#define V8_HEAP_INCREMENTAL_MARKING_H_ + + +#include "src/execution.h" +#include "src/heap/mark-compact.h" +#include "src/objects.h" + +namespace v8 { +namespace internal { + + +class IncrementalMarking { + public: + enum State { STOPPED, SWEEPING, MARKING, COMPLETE }; + + enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD }; + + explicit IncrementalMarking(Heap* heap); + + static void Initialize(); + + void TearDown(); + + State state() { + DCHECK(state_ == STOPPED || FLAG_incremental_marking); + return state_; + } + + bool should_hurry() { return should_hurry_; } + void set_should_hurry(bool val) { should_hurry_ = val; } + + inline bool IsStopped() { return state() == STOPPED; } + + INLINE(bool IsMarking()) { return state() >= MARKING; } + + inline bool IsMarkingIncomplete() { return state() == MARKING; } + + inline bool IsComplete() { return state() == COMPLETE; } + + bool WorthActivating(); + + enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION }; + + void Start(CompactionFlag flag = ALLOW_COMPACTION); + + void Stop(); + + void PrepareForScavenge(); + + void UpdateMarkingDequeAfterScavenge(); + + void Hurry(); + + void Finalize(); + + void Abort(); + + void MarkingComplete(CompletionAction action); + + // It's hard to know how much work the incremental marker should do to make + // progress in the face of the mutator creating new work for it. We start + // of at a moderate rate of work and gradually increase the speed of the + // incremental marker until it completes. + // Do some marking every time this much memory has been allocated or that many + // heavy (color-checking) write barriers have been invoked. + static const intptr_t kAllocatedThreshold = 65536; + static const intptr_t kWriteBarriersInvokedThreshold = 32768; + // Start off by marking this many times more memory than has been allocated. + static const intptr_t kInitialMarkingSpeed = 1; + // But if we are promoting a lot of data we need to mark faster to keep up + // with the data that is entering the old space through promotion. + static const intptr_t kFastMarking = 3; + // After this many steps we increase the marking/allocating factor. + static const intptr_t kMarkingSpeedAccellerationInterval = 1024; + // This is how much we increase the marking/allocating factor by. + static const intptr_t kMarkingSpeedAccelleration = 2; + static const intptr_t kMaxMarkingSpeed = 1000; + + void OldSpaceStep(intptr_t allocated); + + void Step(intptr_t allocated, CompletionAction action, + bool force_marking = false); + + inline void RestartIfNotMarking() { + if (state_ == COMPLETE) { + state_ = MARKING; + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Restarting (new grey objects)\n"); + } + } + } + + static void RecordWriteFromCode(HeapObject* obj, Object** slot, + Isolate* isolate); + + // Record a slot for compaction. Returns false for objects that are + // guaranteed to be rescanned or not guaranteed to survive. + // + // No slots in white objects should be recorded, as some slots are typed and + // cannot be interpreted correctly if the underlying object does not survive + // the incremental cycle (stays white). + INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value)); + INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value)); + INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo, + Object* value)); + INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot, + Code* value)); + + + void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value); + void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo, + Object* value); + void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value); + void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value); + void RecordCodeTargetPatch(Address pc, HeapObject* value); + + inline void RecordWrites(HeapObject* obj); + + inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit); + + inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit); + + inline void SetOldSpacePageFlags(MemoryChunk* chunk) { + SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting()); + } + + inline void SetNewSpacePageFlags(NewSpacePage* chunk) { + SetNewSpacePageFlags(chunk, IsMarking()); + } + + MarkingDeque* marking_deque() { return &marking_deque_; } + + bool IsCompacting() { return IsMarking() && is_compacting_; } + + void ActivateGeneratedStub(Code* stub); + + void NotifyOfHighPromotionRate() { + if (IsMarking()) { + if (marking_speed_ < kFastMarking) { + if (FLAG_trace_gc) { + PrintPID( + "Increasing marking speed to %d " + "due to high promotion rate\n", + static_cast<int>(kFastMarking)); + } + marking_speed_ = kFastMarking; + } + } + } + + void EnterNoMarkingScope() { no_marking_scope_depth_++; } + + void LeaveNoMarkingScope() { no_marking_scope_depth_--; } + + void UncommitMarkingDeque(); + + void NotifyIncompleteScanOfObject(int unscanned_bytes) { + unscanned_bytes_of_large_object_ = unscanned_bytes; + } + + private: + int64_t SpaceLeftInOldSpace(); + + void ResetStepCounters(); + + void StartMarking(CompactionFlag flag); + + void ActivateIncrementalWriteBarrier(PagedSpace* space); + static void ActivateIncrementalWriteBarrier(NewSpace* space); + void ActivateIncrementalWriteBarrier(); + + static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space); + static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space); + void DeactivateIncrementalWriteBarrier(); + + static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking, + bool is_compacting); + + static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking); + + void EnsureMarkingDequeIsCommitted(); + + INLINE(void ProcessMarkingDeque()); + + INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process)); + + INLINE(void VisitObject(Map* map, HeapObject* obj, int size)); + + Heap* heap_; + + State state_; + bool is_compacting_; + + base::VirtualMemory* marking_deque_memory_; + bool marking_deque_memory_committed_; + MarkingDeque marking_deque_; + + int steps_count_; + int64_t old_generation_space_available_at_start_of_incremental_; + int64_t old_generation_space_used_at_start_of_incremental_; + int64_t bytes_rescanned_; + bool should_hurry_; + int marking_speed_; + intptr_t bytes_scanned_; + intptr_t allocated_; + intptr_t write_barriers_invoked_since_last_step_; + + int no_marking_scope_depth_; + + int unscanned_bytes_of_large_object_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); +}; +} +} // namespace v8::internal + +#endif // V8_HEAP_INCREMENTAL_MARKING_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/incremental-marking-inl.h nodejs-0.11.15/deps/v8/src/heap/incremental-marking-inl.h --- nodejs-0.11.13/deps/v8/src/heap/incremental-marking-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/incremental-marking-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,117 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_INCREMENTAL_MARKING_INL_H_ +#define V8_HEAP_INCREMENTAL_MARKING_INL_H_ + +#include "src/heap/incremental-marking.h" + +namespace v8 { +namespace internal { + + +bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot, + Object* value) { + HeapObject* value_heap_obj = HeapObject::cast(value); + MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj); + if (Marking::IsWhite(value_bit)) { + MarkBit obj_bit = Marking::MarkBitFrom(obj); + if (Marking::IsBlack(obj_bit)) { + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { + if (chunk->IsLeftOfProgressBar(slot)) { + WhiteToGreyAndPush(value_heap_obj, value_bit); + RestartIfNotMarking(); + } else { + return false; + } + } else { + BlackToGreyAndUnshift(obj, obj_bit); + RestartIfNotMarking(); + return false; + } + } else { + return false; + } + } + if (!is_compacting_) return false; + MarkBit obj_bit = Marking::MarkBitFrom(obj); + return Marking::IsBlack(obj_bit); +} + + +void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot, + Object* value) { + if (IsMarking() && value->IsHeapObject()) { + RecordWriteSlow(obj, slot, value); + } +} + + +void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot, + Code* value) { + if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value); +} + + +void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo, + Object* value) { + if (IsMarking() && value->IsHeapObject()) { + RecordWriteIntoCodeSlow(obj, rinfo, value); + } +} + + +void IncrementalMarking::RecordWrites(HeapObject* obj) { + if (IsMarking()) { + MarkBit obj_bit = Marking::MarkBitFrom(obj); + if (Marking::IsBlack(obj_bit)) { + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { + chunk->set_progress_bar(0); + } + BlackToGreyAndUnshift(obj, obj_bit); + RestartIfNotMarking(); + } + } +} + + +void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj, + MarkBit mark_bit) { + DCHECK(Marking::MarkBitFrom(obj) == mark_bit); + DCHECK(obj->Size() >= 2 * kPointerSize); + DCHECK(IsMarking()); + Marking::BlackToGrey(mark_bit); + int obj_size = obj->Size(); + MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size); + bytes_scanned_ -= obj_size; + int64_t old_bytes_rescanned = bytes_rescanned_; + bytes_rescanned_ = old_bytes_rescanned + obj_size; + if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) { + if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) { + // If we have queued twice the heap size for rescanning then we are + // going around in circles, scanning the same objects again and again + // as the program mutates the heap faster than we can incrementally + // trace it. In this case we switch to non-incremental marking in + // order to finish off this marking phase. + if (FLAG_trace_gc) { + PrintPID("Hurrying incremental marking because of lack of progress\n"); + } + marking_speed_ = kMaxMarkingSpeed; + } + } + + marking_deque_.UnshiftGrey(obj); +} + + +void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) { + Marking::WhiteToGrey(mark_bit); + marking_deque_.PushGrey(obj); +} +} +} // namespace v8::internal + +#endif // V8_HEAP_INCREMENTAL_MARKING_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/mark-compact.cc nodejs-0.11.15/deps/v8/src/heap/mark-compact.cc --- nodejs-0.11.13/deps/v8/src/heap/mark-compact.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/mark-compact.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4786 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/base/atomicops.h" +#include "src/code-stubs.h" +#include "src/compilation-cache.h" +#include "src/cpu-profiler.h" +#include "src/deoptimizer.h" +#include "src/execution.h" +#include "src/gdb-jit.h" +#include "src/global-handles.h" +#include "src/heap/incremental-marking.h" +#include "src/heap/mark-compact.h" +#include "src/heap/objects-visiting.h" +#include "src/heap/objects-visiting-inl.h" +#include "src/heap/spaces-inl.h" +#include "src/heap/sweeper-thread.h" +#include "src/heap-profiler.h" +#include "src/ic-inl.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + + +const char* Marking::kWhiteBitPattern = "00"; +const char* Marking::kBlackBitPattern = "10"; +const char* Marking::kGreyBitPattern = "11"; +const char* Marking::kImpossibleBitPattern = "01"; + + +// ------------------------------------------------------------------------- +// MarkCompactCollector + +MarkCompactCollector::MarkCompactCollector(Heap* heap) + : // NOLINT +#ifdef DEBUG + state_(IDLE), +#endif + sweep_precisely_(false), + reduce_memory_footprint_(false), + abort_incremental_marking_(false), + marking_parity_(ODD_MARKING_PARITY), + compacting_(false), + was_marked_incrementally_(false), + sweeping_in_progress_(false), + pending_sweeper_jobs_semaphore_(0), + sequential_sweeping_(false), + migration_slots_buffer_(NULL), + heap_(heap), + code_flusher_(NULL), + have_code_to_deoptimize_(false) { +} + +#ifdef VERIFY_HEAP +class VerifyMarkingVisitor : public ObjectVisitor { + public: + explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} + + void VisitPointers(Object** start, Object** end) { + for (Object** current = start; current < end; current++) { + if ((*current)->IsHeapObject()) { + HeapObject* object = HeapObject::cast(*current); + CHECK(heap_->mark_compact_collector()->IsMarked(object)); + } + } + } + + void VisitEmbeddedPointer(RelocInfo* rinfo) { + DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); + if (!rinfo->host()->IsWeakObject(rinfo->target_object())) { + Object* p = rinfo->target_object(); + VisitPointer(&p); + } + } + + void VisitCell(RelocInfo* rinfo) { + Code* code = rinfo->host(); + DCHECK(rinfo->rmode() == RelocInfo::CELL); + if (!code->IsWeakObject(rinfo->target_cell())) { + ObjectVisitor::VisitCell(rinfo); + } + } + + private: + Heap* heap_; +}; + + +static void VerifyMarking(Heap* heap, Address bottom, Address top) { + VerifyMarkingVisitor visitor(heap); + HeapObject* object; + Address next_object_must_be_here_or_later = bottom; + + for (Address current = bottom; current < top; current += kPointerSize) { + object = HeapObject::FromAddress(current); + if (MarkCompactCollector::IsMarked(object)) { + CHECK(current >= next_object_must_be_here_or_later); + object->Iterate(&visitor); + next_object_must_be_here_or_later = current + object->Size(); + } + } +} + + +static void VerifyMarking(NewSpace* space) { + Address end = space->top(); + NewSpacePageIterator it(space->bottom(), end); + // The bottom position is at the start of its page. Allows us to use + // page->area_start() as start of range on all pages. + CHECK_EQ(space->bottom(), + NewSpacePage::FromAddress(space->bottom())->area_start()); + while (it.has_next()) { + NewSpacePage* page = it.next(); + Address limit = it.has_next() ? page->area_end() : end; + CHECK(limit == end || !page->Contains(end)); + VerifyMarking(space->heap(), page->area_start(), limit); + } +} + + +static void VerifyMarking(PagedSpace* space) { + PageIterator it(space); + + while (it.has_next()) { + Page* p = it.next(); + VerifyMarking(space->heap(), p->area_start(), p->area_end()); + } +} + + +static void VerifyMarking(Heap* heap) { + VerifyMarking(heap->old_pointer_space()); + VerifyMarking(heap->old_data_space()); + VerifyMarking(heap->code_space()); + VerifyMarking(heap->cell_space()); + VerifyMarking(heap->property_cell_space()); + VerifyMarking(heap->map_space()); + VerifyMarking(heap->new_space()); + + VerifyMarkingVisitor visitor(heap); + + LargeObjectIterator it(heap->lo_space()); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + if (MarkCompactCollector::IsMarked(obj)) { + obj->Iterate(&visitor); + } + } + + heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); +} + + +class VerifyEvacuationVisitor : public ObjectVisitor { + public: + void VisitPointers(Object** start, Object** end) { + for (Object** current = start; current < end; current++) { + if ((*current)->IsHeapObject()) { + HeapObject* object = HeapObject::cast(*current); + CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); + } + } + } +}; + + +static void VerifyEvacuation(Page* page) { + VerifyEvacuationVisitor visitor; + HeapObjectIterator iterator(page, NULL); + for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; + heap_object = iterator.Next()) { + // We skip free space objects. + if (!heap_object->IsFiller()) { + heap_object->Iterate(&visitor); + } + } +} + + +static void VerifyEvacuation(NewSpace* space) { + NewSpacePageIterator it(space->bottom(), space->top()); + VerifyEvacuationVisitor visitor; + + while (it.has_next()) { + NewSpacePage* page = it.next(); + Address current = page->area_start(); + Address limit = it.has_next() ? page->area_end() : space->top(); + CHECK(limit == space->top() || !page->Contains(space->top())); + while (current < limit) { + HeapObject* object = HeapObject::FromAddress(current); + object->Iterate(&visitor); + current += object->Size(); + } + } +} + + +static void VerifyEvacuation(Heap* heap, PagedSpace* space) { + if (!space->swept_precisely()) return; + if (FLAG_use_allocation_folding && + (space == heap->old_pointer_space() || space == heap->old_data_space())) { + return; + } + PageIterator it(space); + + while (it.has_next()) { + Page* p = it.next(); + if (p->IsEvacuationCandidate()) continue; + VerifyEvacuation(p); + } +} + + +static void VerifyEvacuation(Heap* heap) { + VerifyEvacuation(heap, heap->old_pointer_space()); + VerifyEvacuation(heap, heap->old_data_space()); + VerifyEvacuation(heap, heap->code_space()); + VerifyEvacuation(heap, heap->cell_space()); + VerifyEvacuation(heap, heap->property_cell_space()); + VerifyEvacuation(heap, heap->map_space()); + VerifyEvacuation(heap->new_space()); + + VerifyEvacuationVisitor visitor; + heap->IterateStrongRoots(&visitor, VISIT_ALL); +} +#endif // VERIFY_HEAP + + +#ifdef DEBUG +class VerifyNativeContextSeparationVisitor : public ObjectVisitor { + public: + VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {} + + void VisitPointers(Object** start, Object** end) { + for (Object** current = start; current < end; current++) { + if ((*current)->IsHeapObject()) { + HeapObject* object = HeapObject::cast(*current); + if (object->IsString()) continue; + switch (object->map()->instance_type()) { + case JS_FUNCTION_TYPE: + CheckContext(JSFunction::cast(object)->context()); + break; + case JS_GLOBAL_PROXY_TYPE: + CheckContext(JSGlobalProxy::cast(object)->native_context()); + break; + case JS_GLOBAL_OBJECT_TYPE: + case JS_BUILTINS_OBJECT_TYPE: + CheckContext(GlobalObject::cast(object)->native_context()); + break; + case JS_ARRAY_TYPE: + case JS_DATE_TYPE: + case JS_OBJECT_TYPE: + case JS_REGEXP_TYPE: + VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset)); + break; + case MAP_TYPE: + VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset)); + VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset)); + break; + case FIXED_ARRAY_TYPE: + if (object->IsContext()) { + CheckContext(object); + } else { + FixedArray* array = FixedArray::cast(object); + int length = array->length(); + // Set array length to zero to prevent cycles while iterating + // over array bodies, this is easier than intrusive marking. + array->set_length(0); + array->IterateBody(FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), + this); + array->set_length(length); + } + break; + case CELL_TYPE: + case JS_PROXY_TYPE: + case JS_VALUE_TYPE: + case TYPE_FEEDBACK_INFO_TYPE: + object->Iterate(this); + break; + case DECLARED_ACCESSOR_INFO_TYPE: + case EXECUTABLE_ACCESSOR_INFO_TYPE: + case BYTE_ARRAY_TYPE: + case CALL_HANDLER_INFO_TYPE: + case CODE_TYPE: + case FIXED_DOUBLE_ARRAY_TYPE: + case HEAP_NUMBER_TYPE: + case MUTABLE_HEAP_NUMBER_TYPE: + case INTERCEPTOR_INFO_TYPE: + case ODDBALL_TYPE: + case SCRIPT_TYPE: + case SHARED_FUNCTION_INFO_TYPE: + break; + default: + UNREACHABLE(); + } + } + } + } + + private: + void CheckContext(Object* context) { + if (!context->IsContext()) return; + Context* native_context = Context::cast(context)->native_context(); + if (current_native_context_ == NULL) { + current_native_context_ = native_context; + } else { + CHECK_EQ(current_native_context_, native_context); + } + } + + Context* current_native_context_; +}; + + +static void VerifyNativeContextSeparation(Heap* heap) { + HeapObjectIterator it(heap->code_space()); + + for (Object* object = it.Next(); object != NULL; object = it.Next()) { + VerifyNativeContextSeparationVisitor visitor; + Code::cast(object)->CodeIterateBody(&visitor); + } +} +#endif + + +void MarkCompactCollector::SetUp() { + free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space())); + free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space())); +} + + +void MarkCompactCollector::TearDown() { AbortCompaction(); } + + +void MarkCompactCollector::AddEvacuationCandidate(Page* p) { + p->MarkEvacuationCandidate(); + evacuation_candidates_.Add(p); +} + + +static void TraceFragmentation(PagedSpace* space) { + int number_of_pages = space->CountTotalPages(); + intptr_t reserved = (number_of_pages * space->AreaSize()); + intptr_t free = reserved - space->SizeOfObjects(); + PrintF("[%s]: %d pages, %d (%.1f%%) free\n", + AllocationSpaceName(space->identity()), number_of_pages, + static_cast<int>(free), static_cast<double>(free) * 100 / reserved); +} + + +bool MarkCompactCollector::StartCompaction(CompactionMode mode) { + if (!compacting_) { + DCHECK(evacuation_candidates_.length() == 0); + +#ifdef ENABLE_GDB_JIT_INTERFACE + // If GDBJIT interface is active disable compaction. + if (FLAG_gdbjit) return false; +#endif + + CollectEvacuationCandidates(heap()->old_pointer_space()); + CollectEvacuationCandidates(heap()->old_data_space()); + + if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION || + FLAG_incremental_code_compaction)) { + CollectEvacuationCandidates(heap()->code_space()); + } else if (FLAG_trace_fragmentation) { + TraceFragmentation(heap()->code_space()); + } + + if (FLAG_trace_fragmentation) { + TraceFragmentation(heap()->map_space()); + TraceFragmentation(heap()->cell_space()); + TraceFragmentation(heap()->property_cell_space()); + } + + heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); + heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); + heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); + + compacting_ = evacuation_candidates_.length() > 0; + } + + return compacting_; +} + + +void MarkCompactCollector::CollectGarbage() { + // Make sure that Prepare() has been called. The individual steps below will + // update the state as they proceed. + DCHECK(state_ == PREPARE_GC); + + MarkLiveObjects(); + DCHECK(heap_->incremental_marking()->IsStopped()); + + if (FLAG_collect_maps) ClearNonLiveReferences(); + + ClearWeakCollections(); + +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + VerifyMarking(heap_); + } +#endif + + SweepSpaces(); + +#ifdef DEBUG + if (FLAG_verify_native_context_separation) { + VerifyNativeContextSeparation(heap_); + } +#endif + +#ifdef VERIFY_HEAP + if (heap()->weak_embedded_objects_verification_enabled()) { + VerifyWeakEmbeddedObjectsInCode(); + } + if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) { + VerifyOmittedMapChecks(); + } +#endif + + Finish(); + + if (marking_parity_ == EVEN_MARKING_PARITY) { + marking_parity_ = ODD_MARKING_PARITY; + } else { + DCHECK(marking_parity_ == ODD_MARKING_PARITY); + marking_parity_ = EVEN_MARKING_PARITY; + } +} + + +#ifdef VERIFY_HEAP +void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { + PageIterator it(space); + + while (it.has_next()) { + Page* p = it.next(); + CHECK(p->markbits()->IsClean()); + CHECK_EQ(0, p->LiveBytes()); + } +} + + +void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { + NewSpacePageIterator it(space->bottom(), space->top()); + + while (it.has_next()) { + NewSpacePage* p = it.next(); + CHECK(p->markbits()->IsClean()); + CHECK_EQ(0, p->LiveBytes()); + } +} + + +void MarkCompactCollector::VerifyMarkbitsAreClean() { + VerifyMarkbitsAreClean(heap_->old_pointer_space()); + VerifyMarkbitsAreClean(heap_->old_data_space()); + VerifyMarkbitsAreClean(heap_->code_space()); + VerifyMarkbitsAreClean(heap_->cell_space()); + VerifyMarkbitsAreClean(heap_->property_cell_space()); + VerifyMarkbitsAreClean(heap_->map_space()); + VerifyMarkbitsAreClean(heap_->new_space()); + + LargeObjectIterator it(heap_->lo_space()); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + MarkBit mark_bit = Marking::MarkBitFrom(obj); + CHECK(Marking::IsWhite(mark_bit)); + CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); + } +} + + +void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() { + HeapObjectIterator code_iterator(heap()->code_space()); + for (HeapObject* obj = code_iterator.Next(); obj != NULL; + obj = code_iterator.Next()) { + Code* code = Code::cast(obj); + if (!code->is_optimized_code() && !code->is_weak_stub()) continue; + if (WillBeDeoptimized(code)) continue; + code->VerifyEmbeddedObjectsDependency(); + } +} + + +void MarkCompactCollector::VerifyOmittedMapChecks() { + HeapObjectIterator iterator(heap()->map_space()); + for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) { + Map* map = Map::cast(obj); + map->VerifyOmittedMapChecks(); + } +} +#endif // VERIFY_HEAP + + +static void ClearMarkbitsInPagedSpace(PagedSpace* space) { + PageIterator it(space); + + while (it.has_next()) { + Bitmap::Clear(it.next()); + } +} + + +static void ClearMarkbitsInNewSpace(NewSpace* space) { + NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); + + while (it.has_next()) { + Bitmap::Clear(it.next()); + } +} + + +void MarkCompactCollector::ClearMarkbits() { + ClearMarkbitsInPagedSpace(heap_->code_space()); + ClearMarkbitsInPagedSpace(heap_->map_space()); + ClearMarkbitsInPagedSpace(heap_->old_pointer_space()); + ClearMarkbitsInPagedSpace(heap_->old_data_space()); + ClearMarkbitsInPagedSpace(heap_->cell_space()); + ClearMarkbitsInPagedSpace(heap_->property_cell_space()); + ClearMarkbitsInNewSpace(heap_->new_space()); + + LargeObjectIterator it(heap_->lo_space()); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + MarkBit mark_bit = Marking::MarkBitFrom(obj); + mark_bit.Clear(); + mark_bit.Next().Clear(); + Page::FromAddress(obj->address())->ResetProgressBar(); + Page::FromAddress(obj->address())->ResetLiveBytes(); + } +} + + +class MarkCompactCollector::SweeperTask : public v8::Task { + public: + SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} + + virtual ~SweeperTask() {} + + private: + // v8::Task overrides. + virtual void Run() V8_OVERRIDE { + heap_->mark_compact_collector()->SweepInParallel(space_, 0); + heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal(); + } + + Heap* heap_; + PagedSpace* space_; + + DISALLOW_COPY_AND_ASSIGN(SweeperTask); +}; + + +void MarkCompactCollector::StartSweeperThreads() { + DCHECK(free_list_old_pointer_space_.get()->IsEmpty()); + DCHECK(free_list_old_data_space_.get()->IsEmpty()); + sweeping_in_progress_ = true; + for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { + isolate()->sweeper_threads()[i]->StartSweeping(); + } + if (FLAG_job_based_sweeping) { + V8::GetCurrentPlatform()->CallOnBackgroundThread( + new SweeperTask(heap(), heap()->old_data_space()), + v8::Platform::kShortRunningTask); + V8::GetCurrentPlatform()->CallOnBackgroundThread( + new SweeperTask(heap(), heap()->old_pointer_space()), + v8::Platform::kShortRunningTask); + } +} + + +void MarkCompactCollector::EnsureSweepingCompleted() { + DCHECK(sweeping_in_progress_ == true); + + // If sweeping is not completed, we try to complete it here. If we do not + // have sweeper threads we have to complete since we do not have a good + // indicator for a swept space in that case. + if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) { + SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0); + SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0); + } + + for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { + isolate()->sweeper_threads()[i]->WaitForSweeperThread(); + } + if (FLAG_job_based_sweeping) { + // Wait twice for both jobs. + pending_sweeper_jobs_semaphore_.Wait(); + pending_sweeper_jobs_semaphore_.Wait(); + } + ParallelSweepSpacesComplete(); + sweeping_in_progress_ = false; + RefillFreeList(heap()->paged_space(OLD_DATA_SPACE)); + RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE)); + heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); + heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); + +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + VerifyEvacuation(heap_); + } +#endif +} + + +bool MarkCompactCollector::IsSweepingCompleted() { + for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { + if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) { + return false; + } + } + + if (FLAG_job_based_sweeping) { + if (!pending_sweeper_jobs_semaphore_.WaitFor( + base::TimeDelta::FromSeconds(0))) { + return false; + } + pending_sweeper_jobs_semaphore_.Signal(); + } + + return true; +} + + +void MarkCompactCollector::RefillFreeList(PagedSpace* space) { + FreeList* free_list; + + if (space == heap()->old_pointer_space()) { + free_list = free_list_old_pointer_space_.get(); + } else if (space == heap()->old_data_space()) { + free_list = free_list_old_data_space_.get(); + } else { + // Any PagedSpace might invoke RefillFreeLists, so we need to make sure + // to only refill them for old data and pointer spaces. + return; + } + + intptr_t freed_bytes = space->free_list()->Concatenate(free_list); + space->AddToAccountingStats(freed_bytes); + space->DecrementUnsweptFreeBytes(freed_bytes); +} + + +bool MarkCompactCollector::AreSweeperThreadsActivated() { + return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping; +} + + +void Marking::TransferMark(Address old_start, Address new_start) { + // This is only used when resizing an object. + DCHECK(MemoryChunk::FromAddress(old_start) == + MemoryChunk::FromAddress(new_start)); + + if (!heap_->incremental_marking()->IsMarking()) return; + + // If the mark doesn't move, we don't check the color of the object. + // It doesn't matter whether the object is black, since it hasn't changed + // size, so the adjustment to the live data count will be zero anyway. + if (old_start == new_start) return; + + MarkBit new_mark_bit = MarkBitFrom(new_start); + MarkBit old_mark_bit = MarkBitFrom(old_start); + +#ifdef DEBUG + ObjectColor old_color = Color(old_mark_bit); +#endif + + if (Marking::IsBlack(old_mark_bit)) { + old_mark_bit.Clear(); + DCHECK(IsWhite(old_mark_bit)); + Marking::MarkBlack(new_mark_bit); + return; + } else if (Marking::IsGrey(old_mark_bit)) { + old_mark_bit.Clear(); + old_mark_bit.Next().Clear(); + DCHECK(IsWhite(old_mark_bit)); + heap_->incremental_marking()->WhiteToGreyAndPush( + HeapObject::FromAddress(new_start), new_mark_bit); + heap_->incremental_marking()->RestartIfNotMarking(); + } + +#ifdef DEBUG + ObjectColor new_color = Color(new_mark_bit); + DCHECK(new_color == old_color); +#endif +} + + +const char* AllocationSpaceName(AllocationSpace space) { + switch (space) { + case NEW_SPACE: + return "NEW_SPACE"; + case OLD_POINTER_SPACE: + return "OLD_POINTER_SPACE"; + case OLD_DATA_SPACE: + return "OLD_DATA_SPACE"; + case CODE_SPACE: + return "CODE_SPACE"; + case MAP_SPACE: + return "MAP_SPACE"; + case CELL_SPACE: + return "CELL_SPACE"; + case PROPERTY_CELL_SPACE: + return "PROPERTY_CELL_SPACE"; + case LO_SPACE: + return "LO_SPACE"; + default: + UNREACHABLE(); + } + + return NULL; +} + + +// Returns zero for pages that have so little fragmentation that it is not +// worth defragmenting them. Otherwise a positive integer that gives an +// estimate of fragmentation on an arbitrary scale. +static int FreeListFragmentation(PagedSpace* space, Page* p) { + // If page was not swept then there are no free list items on it. + if (!p->WasSwept()) { + if (FLAG_trace_fragmentation) { + PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p), + AllocationSpaceName(space->identity()), p->LiveBytes()); + } + return 0; + } + + PagedSpace::SizeStats sizes; + space->ObtainFreeListStatistics(p, &sizes); + + intptr_t ratio; + intptr_t ratio_threshold; + intptr_t area_size = space->AreaSize(); + if (space->identity() == CODE_SPACE) { + ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size; + ratio_threshold = 10; + } else { + ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size; + ratio_threshold = 15; + } + + if (FLAG_trace_fragmentation) { + PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", + reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()), + static_cast<int>(sizes.small_size_), + static_cast<double>(sizes.small_size_ * 100) / area_size, + static_cast<int>(sizes.medium_size_), + static_cast<double>(sizes.medium_size_ * 100) / area_size, + static_cast<int>(sizes.large_size_), + static_cast<double>(sizes.large_size_ * 100) / area_size, + static_cast<int>(sizes.huge_size_), + static_cast<double>(sizes.huge_size_ * 100) / area_size, + (ratio > ratio_threshold) ? "[fragmented]" : ""); + } + + if (FLAG_always_compact && sizes.Total() != area_size) { + return 1; + } + + if (ratio <= ratio_threshold) return 0; // Not fragmented. + + return static_cast<int>(ratio - ratio_threshold); +} + + +void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { + DCHECK(space->identity() == OLD_POINTER_SPACE || + space->identity() == OLD_DATA_SPACE || + space->identity() == CODE_SPACE); + + static const int kMaxMaxEvacuationCandidates = 1000; + int number_of_pages = space->CountTotalPages(); + int max_evacuation_candidates = + static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1); + + if (FLAG_stress_compaction || FLAG_always_compact) { + max_evacuation_candidates = kMaxMaxEvacuationCandidates; + } + + class Candidate { + public: + Candidate() : fragmentation_(0), page_(NULL) {} + Candidate(int f, Page* p) : fragmentation_(f), page_(p) {} + + int fragmentation() { return fragmentation_; } + Page* page() { return page_; } + + private: + int fragmentation_; + Page* page_; + }; + + enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT }; + + CompactionMode mode = COMPACT_FREE_LISTS; + + intptr_t reserved = number_of_pages * space->AreaSize(); + intptr_t over_reserved = reserved - space->SizeOfObjects(); + static const intptr_t kFreenessThreshold = 50; + + if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) { + // If reduction of memory footprint was requested, we are aggressive + // about choosing pages to free. We expect that half-empty pages + // are easier to compact so slightly bump the limit. + mode = REDUCE_MEMORY_FOOTPRINT; + max_evacuation_candidates += 2; + } + + + if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) { + // If over-usage is very high (more than a third of the space), we + // try to free all mostly empty pages. We expect that almost empty + // pages are even easier to compact so bump the limit even more. + mode = REDUCE_MEMORY_FOOTPRINT; + max_evacuation_candidates *= 2; + } + + if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { + PrintF( + "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), " + "evacuation candidate limit: %d\n", + static_cast<double>(over_reserved) / MB, + static_cast<double>(reserved) / MB, + static_cast<int>(kFreenessThreshold), max_evacuation_candidates); + } + + intptr_t estimated_release = 0; + + Candidate candidates[kMaxMaxEvacuationCandidates]; + + max_evacuation_candidates = + Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates); + + int count = 0; + int fragmentation = 0; + Candidate* least = NULL; + + PageIterator it(space); + if (it.has_next()) it.next(); // Never compact the first page. + + while (it.has_next()) { + Page* p = it.next(); + p->ClearEvacuationCandidate(); + + if (FLAG_stress_compaction) { + unsigned int counter = space->heap()->ms_count(); + uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; + if ((counter & 1) == (page_number & 1)) fragmentation = 1; + } else if (mode == REDUCE_MEMORY_FOOTPRINT) { + // Don't try to release too many pages. + if (estimated_release >= over_reserved) { + continue; + } + + intptr_t free_bytes = 0; + + if (!p->WasSwept()) { + free_bytes = (p->area_size() - p->LiveBytes()); + } else { + PagedSpace::SizeStats sizes; + space->ObtainFreeListStatistics(p, &sizes); + free_bytes = sizes.Total(); + } + + int free_pct = static_cast<int>(free_bytes * 100) / p->area_size(); + + if (free_pct >= kFreenessThreshold) { + estimated_release += free_bytes; + fragmentation = free_pct; + } else { + fragmentation = 0; + } + + if (FLAG_trace_fragmentation) { + PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p), + AllocationSpaceName(space->identity()), + static_cast<int>(free_bytes), + static_cast<double>(free_bytes * 100) / p->area_size(), + (fragmentation > 0) ? "[fragmented]" : ""); + } + } else { + fragmentation = FreeListFragmentation(space, p); + } + + if (fragmentation != 0) { + if (count < max_evacuation_candidates) { + candidates[count++] = Candidate(fragmentation, p); + } else { + if (least == NULL) { + for (int i = 0; i < max_evacuation_candidates; i++) { + if (least == NULL || + candidates[i].fragmentation() < least->fragmentation()) { + least = candidates + i; + } + } + } + if (least->fragmentation() < fragmentation) { + *least = Candidate(fragmentation, p); + least = NULL; + } + } + } + } + + for (int i = 0; i < count; i++) { + AddEvacuationCandidate(candidates[i].page()); + } + + if (count > 0 && FLAG_trace_fragmentation) { + PrintF("Collected %d evacuation candidates for space %s\n", count, + AllocationSpaceName(space->identity())); + } +} + + +void MarkCompactCollector::AbortCompaction() { + if (compacting_) { + int npages = evacuation_candidates_.length(); + for (int i = 0; i < npages; i++) { + Page* p = evacuation_candidates_[i]; + slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); + p->ClearEvacuationCandidate(); + p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); + } + compacting_ = false; + evacuation_candidates_.Rewind(0); + invalidated_code_.Rewind(0); + } + DCHECK_EQ(0, evacuation_candidates_.length()); +} + + +void MarkCompactCollector::Prepare() { + was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); + +#ifdef DEBUG + DCHECK(state_ == IDLE); + state_ = PREPARE_GC; +#endif + + DCHECK(!FLAG_never_compact || !FLAG_always_compact); + + if (sweeping_in_progress()) { + // Instead of waiting we could also abort the sweeper threads here. + EnsureSweepingCompleted(); + } + + // Clear marking bits if incremental marking is aborted. + if (was_marked_incrementally_ && abort_incremental_marking_) { + heap()->incremental_marking()->Abort(); + ClearMarkbits(); + AbortWeakCollections(); + AbortCompaction(); + was_marked_incrementally_ = false; + } + + // Don't start compaction if we are in the middle of incremental + // marking cycle. We did not collect any slots. + if (!FLAG_never_compact && !was_marked_incrementally_) { + StartCompaction(NON_INCREMENTAL_COMPACTION); + } + + PagedSpaces spaces(heap()); + for (PagedSpace* space = spaces.next(); space != NULL; + space = spaces.next()) { + space->PrepareForMarkCompact(); + } + +#ifdef VERIFY_HEAP + if (!was_marked_incrementally_ && FLAG_verify_heap) { + VerifyMarkbitsAreClean(); + } +#endif +} + + +void MarkCompactCollector::Finish() { +#ifdef DEBUG + DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); + state_ = IDLE; +#endif + // The stub cache is not traversed during GC; clear the cache to + // force lazy re-initialization of it. This must be done after the + // GC, because it relies on the new address of certain old space + // objects (empty string, illegal builtin). + isolate()->stub_cache()->Clear(); + + if (have_code_to_deoptimize_) { + // Some code objects were marked for deoptimization during the GC. + Deoptimizer::DeoptimizeMarkedCode(isolate()); + have_code_to_deoptimize_ = false; + } +} + + +// ------------------------------------------------------------------------- +// Phase 1: tracing and marking live objects. +// before: all objects are in normal state. +// after: a live object's map pointer is marked as '00'. + +// Marking all live objects in the heap as part of mark-sweep or mark-compact +// collection. Before marking, all objects are in their normal state. After +// marking, live objects' map pointers are marked indicating that the object +// has been found reachable. +// +// The marking algorithm is a (mostly) depth-first (because of possible stack +// overflow) traversal of the graph of objects reachable from the roots. It +// uses an explicit stack of pointers rather than recursion. The young +// generation's inactive ('from') space is used as a marking stack. The +// objects in the marking stack are the ones that have been reached and marked +// but their children have not yet been visited. +// +// The marking stack can overflow during traversal. In that case, we set an +// overflow flag. When the overflow flag is set, we continue marking objects +// reachable from the objects on the marking stack, but no longer push them on +// the marking stack. Instead, we mark them as both marked and overflowed. +// When the stack is in the overflowed state, objects marked as overflowed +// have been reached and marked but their children have not been visited yet. +// After emptying the marking stack, we clear the overflow flag and traverse +// the heap looking for objects marked as overflowed, push them on the stack, +// and continue with marking. This process repeats until all reachable +// objects have been marked. + +void CodeFlusher::ProcessJSFunctionCandidates() { + Code* lazy_compile = + isolate_->builtins()->builtin(Builtins::kCompileUnoptimized); + Object* undefined = isolate_->heap()->undefined_value(); + + JSFunction* candidate = jsfunction_candidates_head_; + JSFunction* next_candidate; + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + ClearNextCandidate(candidate, undefined); + + SharedFunctionInfo* shared = candidate->shared(); + + Code* code = shared->code(); + MarkBit code_mark = Marking::MarkBitFrom(code); + if (!code_mark.Get()) { + if (FLAG_trace_code_flushing && shared->is_compiled()) { + PrintF("[code-flushing clears: "); + shared->ShortPrint(); + PrintF(" - age: %d]\n", code->GetAge()); + } + shared->set_code(lazy_compile); + candidate->set_code(lazy_compile); + } else { + candidate->set_code(code); + } + + // We are in the middle of a GC cycle so the write barrier in the code + // setter did not record the slot update and we have to do that manually. + Address slot = candidate->address() + JSFunction::kCodeEntryOffset; + Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot)); + isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot, + target); + + Object** shared_code_slot = + HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset); + isolate_->heap()->mark_compact_collector()->RecordSlot( + shared_code_slot, shared_code_slot, *shared_code_slot); + + candidate = next_candidate; + } + + jsfunction_candidates_head_ = NULL; +} + + +void CodeFlusher::ProcessSharedFunctionInfoCandidates() { + Code* lazy_compile = + isolate_->builtins()->builtin(Builtins::kCompileUnoptimized); + + SharedFunctionInfo* candidate = shared_function_info_candidates_head_; + SharedFunctionInfo* next_candidate; + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + ClearNextCandidate(candidate); + + Code* code = candidate->code(); + MarkBit code_mark = Marking::MarkBitFrom(code); + if (!code_mark.Get()) { + if (FLAG_trace_code_flushing && candidate->is_compiled()) { + PrintF("[code-flushing clears: "); + candidate->ShortPrint(); + PrintF(" - age: %d]\n", code->GetAge()); + } + candidate->set_code(lazy_compile); + } + + Object** code_slot = + HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset); + isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot, + *code_slot); + + candidate = next_candidate; + } + + shared_function_info_candidates_head_ = NULL; +} + + +void CodeFlusher::ProcessOptimizedCodeMaps() { + STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4); + + SharedFunctionInfo* holder = optimized_code_map_holder_head_; + SharedFunctionInfo* next_holder; + + while (holder != NULL) { + next_holder = GetNextCodeMap(holder); + ClearNextCodeMap(holder); + + FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); + int new_length = SharedFunctionInfo::kEntriesStart; + int old_length = code_map->length(); + for (int i = SharedFunctionInfo::kEntriesStart; i < old_length; + i += SharedFunctionInfo::kEntryLength) { + Code* code = + Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset)); + if (!Marking::MarkBitFrom(code).Get()) continue; + + // Move every slot in the entry. + for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) { + int dst_index = new_length++; + Object** slot = code_map->RawFieldOfElementAt(dst_index); + Object* object = code_map->get(i + j); + code_map->set(dst_index, object); + if (j == SharedFunctionInfo::kOsrAstIdOffset) { + DCHECK(object->IsSmi()); + } else { + DCHECK( + Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot)))); + isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot, + *slot); + } + } + } + + // Trim the optimized code map if entries have been removed. + if (new_length < old_length) { + holder->TrimOptimizedCodeMap(old_length - new_length); + } + + holder = next_holder; + } + + optimized_code_map_holder_head_ = NULL; +} + + +void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) { + // Make sure previous flushing decisions are revisited. + isolate_->heap()->incremental_marking()->RecordWrites(shared_info); + + if (FLAG_trace_code_flushing) { + PrintF("[code-flushing abandons function-info: "); + shared_info->ShortPrint(); + PrintF("]\n"); + } + + SharedFunctionInfo* candidate = shared_function_info_candidates_head_; + SharedFunctionInfo* next_candidate; + if (candidate == shared_info) { + next_candidate = GetNextCandidate(shared_info); + shared_function_info_candidates_head_ = next_candidate; + ClearNextCandidate(shared_info); + } else { + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + + if (next_candidate == shared_info) { + next_candidate = GetNextCandidate(shared_info); + SetNextCandidate(candidate, next_candidate); + ClearNextCandidate(shared_info); + break; + } + + candidate = next_candidate; + } + } +} + + +void CodeFlusher::EvictCandidate(JSFunction* function) { + DCHECK(!function->next_function_link()->IsUndefined()); + Object* undefined = isolate_->heap()->undefined_value(); + + // Make sure previous flushing decisions are revisited. + isolate_->heap()->incremental_marking()->RecordWrites(function); + isolate_->heap()->incremental_marking()->RecordWrites(function->shared()); + + if (FLAG_trace_code_flushing) { + PrintF("[code-flushing abandons closure: "); + function->shared()->ShortPrint(); + PrintF("]\n"); + } + + JSFunction* candidate = jsfunction_candidates_head_; + JSFunction* next_candidate; + if (candidate == function) { + next_candidate = GetNextCandidate(function); + jsfunction_candidates_head_ = next_candidate; + ClearNextCandidate(function, undefined); + } else { + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + + if (next_candidate == function) { + next_candidate = GetNextCandidate(function); + SetNextCandidate(candidate, next_candidate); + ClearNextCandidate(function, undefined); + break; + } + + candidate = next_candidate; + } + } +} + + +void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) { + DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map()) + ->get(SharedFunctionInfo::kNextMapIndex) + ->IsUndefined()); + + // Make sure previous flushing decisions are revisited. + isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder); + + if (FLAG_trace_code_flushing) { + PrintF("[code-flushing abandons code-map: "); + code_map_holder->ShortPrint(); + PrintF("]\n"); + } + + SharedFunctionInfo* holder = optimized_code_map_holder_head_; + SharedFunctionInfo* next_holder; + if (holder == code_map_holder) { + next_holder = GetNextCodeMap(code_map_holder); + optimized_code_map_holder_head_ = next_holder; + ClearNextCodeMap(code_map_holder); + } else { + while (holder != NULL) { + next_holder = GetNextCodeMap(holder); + + if (next_holder == code_map_holder) { + next_holder = GetNextCodeMap(code_map_holder); + SetNextCodeMap(holder, next_holder); + ClearNextCodeMap(code_map_holder); + break; + } + + holder = next_holder; + } + } +} + + +void CodeFlusher::EvictJSFunctionCandidates() { + JSFunction* candidate = jsfunction_candidates_head_; + JSFunction* next_candidate; + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + EvictCandidate(candidate); + candidate = next_candidate; + } + DCHECK(jsfunction_candidates_head_ == NULL); +} + + +void CodeFlusher::EvictSharedFunctionInfoCandidates() { + SharedFunctionInfo* candidate = shared_function_info_candidates_head_; + SharedFunctionInfo* next_candidate; + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + EvictCandidate(candidate); + candidate = next_candidate; + } + DCHECK(shared_function_info_candidates_head_ == NULL); +} + + +void CodeFlusher::EvictOptimizedCodeMaps() { + SharedFunctionInfo* holder = optimized_code_map_holder_head_; + SharedFunctionInfo* next_holder; + while (holder != NULL) { + next_holder = GetNextCodeMap(holder); + EvictOptimizedCodeMap(holder); + holder = next_holder; + } + DCHECK(optimized_code_map_holder_head_ == NULL); +} + + +void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) { + Heap* heap = isolate_->heap(); + + JSFunction** slot = &jsfunction_candidates_head_; + JSFunction* candidate = jsfunction_candidates_head_; + while (candidate != NULL) { + if (heap->InFromSpace(candidate)) { + v->VisitPointer(reinterpret_cast<Object**>(slot)); + } + candidate = GetNextCandidate(*slot); + slot = GetNextCandidateSlot(*slot); + } +} + + +MarkCompactCollector::~MarkCompactCollector() { + if (code_flusher_ != NULL) { + delete code_flusher_; + code_flusher_ = NULL; + } +} + + +static inline HeapObject* ShortCircuitConsString(Object** p) { + // Optimization: If the heap object pointed to by p is a non-internalized + // cons string whose right substring is HEAP->empty_string, update + // it in place to its left substring. Return the updated value. + // + // Here we assume that if we change *p, we replace it with a heap object + // (i.e., the left substring of a cons string is always a heap object). + // + // The check performed is: + // object->IsConsString() && !object->IsInternalizedString() && + // (ConsString::cast(object)->second() == HEAP->empty_string()) + // except the maps for the object and its possible substrings might be + // marked. + HeapObject* object = HeapObject::cast(*p); + if (!FLAG_clever_optimizations) return object; + Map* map = object->map(); + InstanceType type = map->instance_type(); + if (!IsShortcutCandidate(type)) return object; + + Object* second = reinterpret_cast<ConsString*>(object)->second(); + Heap* heap = map->GetHeap(); + if (second != heap->empty_string()) { + return object; + } + + // Since we don't have the object's start, it is impossible to update the + // page dirty marks. Therefore, we only replace the string with its left + // substring when page dirty marks do not change. + Object* first = reinterpret_cast<ConsString*>(object)->first(); + if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object; + + *p = first; + return HeapObject::cast(first); +} + + +class MarkCompactMarkingVisitor + : public StaticMarkingVisitor<MarkCompactMarkingVisitor> { + public: + static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map, + HeapObject* obj); + + static void ObjectStatsCountFixedArray( + FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type, + FixedArraySubInstanceType dictionary_type); + + template <MarkCompactMarkingVisitor::VisitorId id> + class ObjectStatsTracker { + public: + static inline void Visit(Map* map, HeapObject* obj); + }; + + static void Initialize(); + + INLINE(static void VisitPointer(Heap* heap, Object** p)) { + MarkObjectByPointer(heap->mark_compact_collector(), p, p); + } + + INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { + // Mark all objects pointed to in [start, end). + const int kMinRangeForMarkingRecursion = 64; + if (end - start >= kMinRangeForMarkingRecursion) { + if (VisitUnmarkedObjects(heap, start, end)) return; + // We are close to a stack overflow, so just mark the objects. + } + MarkCompactCollector* collector = heap->mark_compact_collector(); + for (Object** p = start; p < end; p++) { + MarkObjectByPointer(collector, start, p); + } + } + + // Marks the object black and pushes it on the marking stack. + INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { + MarkBit mark = Marking::MarkBitFrom(object); + heap->mark_compact_collector()->MarkObject(object, mark); + } + + // Marks the object black without pushing it on the marking stack. + // Returns true if object needed marking and false otherwise. + INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) { + MarkBit mark_bit = Marking::MarkBitFrom(object); + if (!mark_bit.Get()) { + heap->mark_compact_collector()->SetMark(object, mark_bit); + return true; + } + return false; + } + + // Mark object pointed to by p. + INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, + Object** anchor_slot, Object** p)) { + if (!(*p)->IsHeapObject()) return; + HeapObject* object = ShortCircuitConsString(p); + collector->RecordSlot(anchor_slot, p, object); + MarkBit mark = Marking::MarkBitFrom(object); + collector->MarkObject(object, mark); + } + + + // Visit an unmarked object. + INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, + HeapObject* obj)) { +#ifdef DEBUG + DCHECK(collector->heap()->Contains(obj)); + DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj)); +#endif + Map* map = obj->map(); + Heap* heap = obj->GetHeap(); + MarkBit mark = Marking::MarkBitFrom(obj); + heap->mark_compact_collector()->SetMark(obj, mark); + // Mark the map pointer and the body. + MarkBit map_mark = Marking::MarkBitFrom(map); + heap->mark_compact_collector()->MarkObject(map, map_mark); + IterateBody(map, obj); + } + + // Visit all unmarked objects pointed to by [start, end). + // Returns false if the operation fails (lack of stack space). + INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start, + Object** end)) { + // Return false is we are close to the stack limit. + StackLimitCheck check(heap->isolate()); + if (check.HasOverflowed()) return false; + + MarkCompactCollector* collector = heap->mark_compact_collector(); + // Visit the unmarked objects. + for (Object** p = start; p < end; p++) { + Object* o = *p; + if (!o->IsHeapObject()) continue; + collector->RecordSlot(start, p, o); + HeapObject* obj = HeapObject::cast(o); + MarkBit mark = Marking::MarkBitFrom(obj); + if (mark.Get()) continue; + VisitUnmarkedObject(collector, obj); + } + return true; + } + + private: + template <int id> + static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj); + + // Code flushing support. + + static const int kRegExpCodeThreshold = 5; + + static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re, + bool is_ascii) { + // Make sure that the fixed array is in fact initialized on the RegExp. + // We could potentially trigger a GC when initializing the RegExp. + if (HeapObject::cast(re->data())->map()->instance_type() != + FIXED_ARRAY_TYPE) + return; + + // Make sure this is a RegExp that actually contains code. + if (re->TypeTag() != JSRegExp::IRREGEXP) return; + + Object* code = re->DataAt(JSRegExp::code_index(is_ascii)); + if (!code->IsSmi() && + HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) { + // Save a copy that can be reinstated if we need the code again. + re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code); + + // Saving a copy might create a pointer into compaction candidate + // that was not observed by marker. This might happen if JSRegExp data + // was marked through the compilation cache before marker reached JSRegExp + // object. + FixedArray* data = FixedArray::cast(re->data()); + Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii); + heap->mark_compact_collector()->RecordSlot(slot, slot, code); + + // Set a number in the 0-255 range to guarantee no smi overflow. + re->SetDataAt(JSRegExp::code_index(is_ascii), + Smi::FromInt(heap->sweep_generation() & 0xff)); + } else if (code->IsSmi()) { + int value = Smi::cast(code)->value(); + // The regexp has not been compiled yet or there was a compilation error. + if (value == JSRegExp::kUninitializedValue || + value == JSRegExp::kCompilationErrorValue) { + return; + } + + // Check if we should flush now. + if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) { + re->SetDataAt(JSRegExp::code_index(is_ascii), + Smi::FromInt(JSRegExp::kUninitializedValue)); + re->SetDataAt(JSRegExp::saved_code_index(is_ascii), + Smi::FromInt(JSRegExp::kUninitializedValue)); + } + } + } + + + // Works by setting the current sweep_generation (as a smi) in the + // code object place in the data array of the RegExp and keeps a copy + // around that can be reinstated if we reuse the RegExp before flushing. + // If we did not use the code for kRegExpCodeThreshold mark sweep GCs + // we flush the code. + static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { + Heap* heap = map->GetHeap(); + MarkCompactCollector* collector = heap->mark_compact_collector(); + if (!collector->is_code_flushing_enabled()) { + VisitJSRegExp(map, object); + return; + } + JSRegExp* re = reinterpret_cast<JSRegExp*>(object); + // Flush code or set age on both ASCII and two byte code. + UpdateRegExpCodeAgeAndFlush(heap, re, true); + UpdateRegExpCodeAgeAndFlush(heap, re, false); + // Visit the fields of the RegExp, including the updated FixedArray. + VisitJSRegExp(map, object); + } + + static VisitorDispatchTable<Callback> non_count_table_; +}; + + +void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( + FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type, + FixedArraySubInstanceType dictionary_type) { + Heap* heap = fixed_array->map()->GetHeap(); + if (fixed_array->map() != heap->fixed_cow_array_map() && + fixed_array->map() != heap->fixed_double_array_map() && + fixed_array != heap->empty_fixed_array()) { + if (fixed_array->IsDictionary()) { + heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size()); + } else { + heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size()); + } + } +} + + +void MarkCompactMarkingVisitor::ObjectStatsVisitBase( + MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) { + Heap* heap = map->GetHeap(); + int object_size = obj->Size(); + heap->RecordObjectStats(map->instance_type(), object_size); + non_count_table_.GetVisitorById(id)(map, obj); + if (obj->IsJSObject()) { + JSObject* object = JSObject::cast(obj); + ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE, + FAST_ELEMENTS_SUB_TYPE); + ObjectStatsCountFixedArray(object->properties(), + DICTIONARY_PROPERTIES_SUB_TYPE, + FAST_PROPERTIES_SUB_TYPE); + } +} + + +template <MarkCompactMarkingVisitor::VisitorId id> +void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map, + HeapObject* obj) { + ObjectStatsVisitBase(id, map, obj); +} + + +template <> +class MarkCompactMarkingVisitor::ObjectStatsTracker< + MarkCompactMarkingVisitor::kVisitMap> { + public: + static inline void Visit(Map* map, HeapObject* obj) { + Heap* heap = map->GetHeap(); + Map* map_obj = Map::cast(obj); + DCHECK(map->instance_type() == MAP_TYPE); + DescriptorArray* array = map_obj->instance_descriptors(); + if (map_obj->owns_descriptors() && + array != heap->empty_descriptor_array()) { + int fixed_array_size = array->Size(); + heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE, + fixed_array_size); + } + if (map_obj->HasTransitionArray()) { + int fixed_array_size = map_obj->transitions()->Size(); + heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE, + fixed_array_size); + } + if (map_obj->has_code_cache()) { + CodeCache* cache = CodeCache::cast(map_obj->code_cache()); + heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE, + cache->default_cache()->Size()); + if (!cache->normal_type_cache()->IsUndefined()) { + heap->RecordFixedArraySubTypeStats( + MAP_CODE_CACHE_SUB_TYPE, + FixedArray::cast(cache->normal_type_cache())->Size()); + } + } + ObjectStatsVisitBase(kVisitMap, map, obj); + } +}; + + +template <> +class MarkCompactMarkingVisitor::ObjectStatsTracker< + MarkCompactMarkingVisitor::kVisitCode> { + public: + static inline void Visit(Map* map, HeapObject* obj) { + Heap* heap = map->GetHeap(); + int object_size = obj->Size(); + DCHECK(map->instance_type() == CODE_TYPE); + Code* code_obj = Code::cast(obj); + heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(), + object_size); + ObjectStatsVisitBase(kVisitCode, map, obj); + } +}; + + +template <> +class MarkCompactMarkingVisitor::ObjectStatsTracker< + MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> { + public: + static inline void Visit(Map* map, HeapObject* obj) { + Heap* heap = map->GetHeap(); + SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj); + if (sfi->scope_info() != heap->empty_fixed_array()) { + heap->RecordFixedArraySubTypeStats( + SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size()); + } + ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj); + } +}; + + +template <> +class MarkCompactMarkingVisitor::ObjectStatsTracker< + MarkCompactMarkingVisitor::kVisitFixedArray> { + public: + static inline void Visit(Map* map, HeapObject* obj) { + Heap* heap = map->GetHeap(); + FixedArray* fixed_array = FixedArray::cast(obj); + if (fixed_array == heap->string_table()) { + heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE, + fixed_array->Size()); + } + ObjectStatsVisitBase(kVisitFixedArray, map, obj); + } +}; + + +void MarkCompactMarkingVisitor::Initialize() { + StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize(); + + table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode); + + if (FLAG_track_gc_object_stats) { + // Copy the visitor table to make call-through possible. + non_count_table_.CopyFrom(&table_); +#define VISITOR_ID_COUNT_FUNCTION(id) \ + table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit); + VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION) +#undef VISITOR_ID_COUNT_FUNCTION + } +} + + +VisitorDispatchTable<MarkCompactMarkingVisitor::Callback> + MarkCompactMarkingVisitor::non_count_table_; + + +class CodeMarkingVisitor : public ThreadVisitor { + public: + explicit CodeMarkingVisitor(MarkCompactCollector* collector) + : collector_(collector) {} + + void VisitThread(Isolate* isolate, ThreadLocalTop* top) { + collector_->PrepareThreadForCodeFlushing(isolate, top); + } + + private: + MarkCompactCollector* collector_; +}; + + +class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { + public: + explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) + : collector_(collector) {} + + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) VisitPointer(p); + } + + void VisitPointer(Object** slot) { + Object* obj = *slot; + if (obj->IsSharedFunctionInfo()) { + SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); + MarkBit shared_mark = Marking::MarkBitFrom(shared); + MarkBit code_mark = Marking::MarkBitFrom(shared->code()); + collector_->MarkObject(shared->code(), code_mark); + collector_->MarkObject(shared, shared_mark); + } + } + + private: + MarkCompactCollector* collector_; +}; + + +void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, + ThreadLocalTop* top) { + for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { + // Note: for the frame that has a pending lazy deoptimization + // StackFrame::unchecked_code will return a non-optimized code object for + // the outermost function and StackFrame::LookupCode will return + // actual optimized code object. + StackFrame* frame = it.frame(); + Code* code = frame->unchecked_code(); + MarkBit code_mark = Marking::MarkBitFrom(code); + MarkObject(code, code_mark); + if (frame->is_optimized()) { + MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(), + frame->LookupCode()); + } + } +} + + +void MarkCompactCollector::PrepareForCodeFlushing() { + // Enable code flushing for non-incremental cycles. + if (FLAG_flush_code && !FLAG_flush_code_incrementally) { + EnableCodeFlushing(!was_marked_incrementally_); + } + + // If code flushing is disabled, there is no need to prepare for it. + if (!is_code_flushing_enabled()) return; + + // Ensure that empty descriptor array is marked. Method MarkDescriptorArray + // relies on it being marked before any other descriptor array. + HeapObject* descriptor_array = heap()->empty_descriptor_array(); + MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); + MarkObject(descriptor_array, descriptor_array_mark); + + // Make sure we are not referencing the code from the stack. + DCHECK(this == heap()->mark_compact_collector()); + PrepareThreadForCodeFlushing(heap()->isolate(), + heap()->isolate()->thread_local_top()); + + // Iterate the archived stacks in all threads to check if + // the code is referenced. + CodeMarkingVisitor code_marking_visitor(this); + heap()->isolate()->thread_manager()->IterateArchivedThreads( + &code_marking_visitor); + + SharedFunctionInfoMarkingVisitor visitor(this); + heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); + heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); + + ProcessMarkingDeque(); +} + + +// Visitor class for marking heap roots. +class RootMarkingVisitor : public ObjectVisitor { + public: + explicit RootMarkingVisitor(Heap* heap) + : collector_(heap->mark_compact_collector()) {} + + void VisitPointer(Object** p) { MarkObjectByPointer(p); } + + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) MarkObjectByPointer(p); + } + + // Skip the weak next code link in a code object, which is visited in + // ProcessTopOptimizedFrame. + void VisitNextCodeLink(Object** p) {} + + private: + void MarkObjectByPointer(Object** p) { + if (!(*p)->IsHeapObject()) return; + + // Replace flat cons strings in place. + HeapObject* object = ShortCircuitConsString(p); + MarkBit mark_bit = Marking::MarkBitFrom(object); + if (mark_bit.Get()) return; + + Map* map = object->map(); + // Mark the object. + collector_->SetMark(object, mark_bit); + + // Mark the map pointer and body, and push them on the marking stack. + MarkBit map_mark = Marking::MarkBitFrom(map); + collector_->MarkObject(map, map_mark); + MarkCompactMarkingVisitor::IterateBody(map, object); + + // Mark all the objects reachable from the map and body. May leave + // overflowed objects in the heap. + collector_->EmptyMarkingDeque(); + } + + MarkCompactCollector* collector_; +}; + + +// Helper class for pruning the string table. +template <bool finalize_external_strings> +class StringTableCleaner : public ObjectVisitor { + public: + explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {} + + virtual void VisitPointers(Object** start, Object** end) { + // Visit all HeapObject pointers in [start, end). + for (Object** p = start; p < end; p++) { + Object* o = *p; + if (o->IsHeapObject() && + !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { + if (finalize_external_strings) { + DCHECK(o->IsExternalString()); + heap_->FinalizeExternalString(String::cast(*p)); + } else { + pointers_removed_++; + } + // Set the entry to the_hole_value (as deleted). + *p = heap_->the_hole_value(); + } + } + } + + int PointersRemoved() { + DCHECK(!finalize_external_strings); + return pointers_removed_; + } + + private: + Heap* heap_; + int pointers_removed_; +}; + + +typedef StringTableCleaner<false> InternalizedStringTableCleaner; +typedef StringTableCleaner<true> ExternalStringTableCleaner; + + +// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects +// are retained. +class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { + public: + virtual Object* RetainAs(Object* object) { + if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) { + return object; + } else if (object->IsAllocationSite() && + !(AllocationSite::cast(object)->IsZombie())) { + // "dead" AllocationSites need to live long enough for a traversal of new + // space. These sites get a one-time reprieve. + AllocationSite* site = AllocationSite::cast(object); + site->MarkZombie(); + site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site); + return object; + } else { + return NULL; + } + } +}; + + +// Fill the marking stack with overflowed objects returned by the given +// iterator. Stop when the marking stack is filled or the end of the space +// is reached, whichever comes first. +template <class T> +static void DiscoverGreyObjectsWithIterator(Heap* heap, + MarkingDeque* marking_deque, + T* it) { + // The caller should ensure that the marking stack is initially not full, + // so that we don't waste effort pointlessly scanning for objects. + DCHECK(!marking_deque->IsFull()); + + Map* filler_map = heap->one_pointer_filler_map(); + for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) { + MarkBit markbit = Marking::MarkBitFrom(object); + if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { + Marking::GreyToBlack(markbit); + MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size()); + marking_deque->PushBlack(object); + if (marking_deque->IsFull()) return; + } + } +} + + +static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts); + + +static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, + MemoryChunk* p) { + DCHECK(!marking_deque->IsFull()); + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { + Address cell_base = it.CurrentCellBase(); + MarkBit::CellType* cell = it.CurrentCell(); + + const MarkBit::CellType current_cell = *cell; + if (current_cell == 0) continue; + + MarkBit::CellType grey_objects; + if (it.HasNext()) { + const MarkBit::CellType next_cell = *(cell + 1); + grey_objects = current_cell & ((current_cell >> 1) | + (next_cell << (Bitmap::kBitsPerCell - 1))); + } else { + grey_objects = current_cell & (current_cell >> 1); + } + + int offset = 0; + while (grey_objects != 0) { + int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects); + grey_objects >>= trailing_zeros; + offset += trailing_zeros; + MarkBit markbit(cell, 1 << offset, false); + DCHECK(Marking::IsGrey(markbit)); + Marking::GreyToBlack(markbit); + Address addr = cell_base + offset * kPointerSize; + HeapObject* object = HeapObject::FromAddress(addr); + MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size()); + marking_deque->PushBlack(object); + if (marking_deque->IsFull()) return; + offset += 2; + grey_objects >>= 2; + } + + grey_objects >>= (Bitmap::kBitsPerCell - 1); + } +} + + +int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( + NewSpace* new_space, NewSpacePage* p) { + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + MarkBit::CellType* cells = p->markbits()->cells(); + int survivors_size = 0; + + for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { + Address cell_base = it.CurrentCellBase(); + MarkBit::CellType* cell = it.CurrentCell(); + + MarkBit::CellType current_cell = *cell; + if (current_cell == 0) continue; + + int offset = 0; + while (current_cell != 0) { + int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell); + current_cell >>= trailing_zeros; + offset += trailing_zeros; + Address address = cell_base + offset * kPointerSize; + HeapObject* object = HeapObject::FromAddress(address); + + int size = object->Size(); + survivors_size += size; + + Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); + + offset++; + current_cell >>= 1; + + // TODO(hpayer): Refactor EvacuateObject and call this function instead. + if (heap()->ShouldBePromoted(object->address(), size) && + TryPromoteObject(object, size)) { + continue; + } + + AllocationResult allocation = new_space->AllocateRaw(size); + if (allocation.IsRetry()) { + if (!new_space->AddFreshPage()) { + // Shouldn't happen. We are sweeping linearly, and to-space + // has the same number of pages as from-space, so there is + // always room. + UNREACHABLE(); + } + allocation = new_space->AllocateRaw(size); + DCHECK(!allocation.IsRetry()); + } + Object* target = allocation.ToObjectChecked(); + + MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE); + heap()->IncrementSemiSpaceCopiedObjectSize(size); + } + *cells = 0; + } + return survivors_size; +} + + +static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque, + PagedSpace* space) { + PageIterator it(space); + while (it.has_next()) { + Page* p = it.next(); + DiscoverGreyObjectsOnPage(marking_deque, p); + if (marking_deque->IsFull()) return; + } +} + + +static void DiscoverGreyObjectsInNewSpace(Heap* heap, + MarkingDeque* marking_deque) { + NewSpace* space = heap->new_space(); + NewSpacePageIterator it(space->bottom(), space->top()); + while (it.has_next()) { + NewSpacePage* page = it.next(); + DiscoverGreyObjectsOnPage(marking_deque, page); + if (marking_deque->IsFull()) return; + } +} + + +bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { + Object* o = *p; + if (!o->IsHeapObject()) return false; + HeapObject* heap_object = HeapObject::cast(o); + MarkBit mark = Marking::MarkBitFrom(heap_object); + return !mark.Get(); +} + + +bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap, + Object** p) { + Object* o = *p; + DCHECK(o->IsHeapObject()); + HeapObject* heap_object = HeapObject::cast(o); + MarkBit mark = Marking::MarkBitFrom(heap_object); + return !mark.Get(); +} + + +void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) { + StringTable* string_table = heap()->string_table(); + // Mark the string table itself. + MarkBit string_table_mark = Marking::MarkBitFrom(string_table); + if (!string_table_mark.Get()) { + // String table could have already been marked by visiting the handles list. + SetMark(string_table, string_table_mark); + } + // Explicitly mark the prefix. + string_table->IteratePrefix(visitor); + ProcessMarkingDeque(); +} + + +void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) { + MarkBit mark_bit = Marking::MarkBitFrom(site); + SetMark(site, mark_bit); +} + + +void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { + // Mark the heap roots including global variables, stack variables, + // etc., and all objects reachable from them. + heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); + + // Handle the string table specially. + MarkStringTable(visitor); + + MarkWeakObjectToCodeTable(); + + // There may be overflowed objects in the heap. Visit them now. + while (marking_deque_.overflowed()) { + RefillMarkingDeque(); + EmptyMarkingDeque(); + } +} + + +void MarkCompactCollector::MarkImplicitRefGroups() { + List<ImplicitRefGroup*>* ref_groups = + isolate()->global_handles()->implicit_ref_groups(); + + int last = 0; + for (int i = 0; i < ref_groups->length(); i++) { + ImplicitRefGroup* entry = ref_groups->at(i); + DCHECK(entry != NULL); + + if (!IsMarked(*entry->parent)) { + (*ref_groups)[last++] = entry; + continue; + } + + Object*** children = entry->children; + // A parent object is marked, so mark all child heap objects. + for (size_t j = 0; j < entry->length; ++j) { + if ((*children[j])->IsHeapObject()) { + HeapObject* child = HeapObject::cast(*children[j]); + MarkBit mark = Marking::MarkBitFrom(child); + MarkObject(child, mark); + } + } + + // Once the entire group has been marked, dispose it because it's + // not needed anymore. + delete entry; + } + ref_groups->Rewind(last); +} + + +void MarkCompactCollector::MarkWeakObjectToCodeTable() { + HeapObject* weak_object_to_code_table = + HeapObject::cast(heap()->weak_object_to_code_table()); + if (!IsMarked(weak_object_to_code_table)) { + MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table); + SetMark(weak_object_to_code_table, mark); + } +} + + +// Mark all objects reachable from the objects on the marking stack. +// Before: the marking stack contains zero or more heap object pointers. +// After: the marking stack is empty, and all objects reachable from the +// marking stack have been marked, or are overflowed in the heap. +void MarkCompactCollector::EmptyMarkingDeque() { + while (!marking_deque_.IsEmpty()) { + HeapObject* object = marking_deque_.Pop(); + DCHECK(object->IsHeapObject()); + DCHECK(heap()->Contains(object)); + DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); + + Map* map = object->map(); + MarkBit map_mark = Marking::MarkBitFrom(map); + MarkObject(map, map_mark); + + MarkCompactMarkingVisitor::IterateBody(map, object); + } +} + + +// Sweep the heap for overflowed objects, clear their overflow bits, and +// push them on the marking stack. Stop early if the marking stack fills +// before sweeping completes. If sweeping completes, there are no remaining +// overflowed objects in the heap so the overflow flag on the markings stack +// is cleared. +void MarkCompactCollector::RefillMarkingDeque() { + DCHECK(marking_deque_.overflowed()); + + DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_); + if (marking_deque_.IsFull()) return; + + DiscoverGreyObjectsInSpace(heap(), &marking_deque_, + heap()->old_pointer_space()); + if (marking_deque_.IsFull()) return; + + DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space()); + if (marking_deque_.IsFull()) return; + + DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space()); + if (marking_deque_.IsFull()) return; + + DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space()); + if (marking_deque_.IsFull()) return; + + DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space()); + if (marking_deque_.IsFull()) return; + + DiscoverGreyObjectsInSpace(heap(), &marking_deque_, + heap()->property_cell_space()); + if (marking_deque_.IsFull()) return; + + LargeObjectIterator lo_it(heap()->lo_space()); + DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it); + if (marking_deque_.IsFull()) return; + + marking_deque_.ClearOverflowed(); +} + + +// Mark all objects reachable (transitively) from objects on the marking +// stack. Before: the marking stack contains zero or more heap object +// pointers. After: the marking stack is empty and there are no overflowed +// objects in the heap. +void MarkCompactCollector::ProcessMarkingDeque() { + EmptyMarkingDeque(); + while (marking_deque_.overflowed()) { + RefillMarkingDeque(); + EmptyMarkingDeque(); + } +} + + +// Mark all objects reachable (transitively) from objects on the marking +// stack including references only considered in the atomic marking pause. +void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) { + bool work_to_do = true; + DCHECK(marking_deque_.IsEmpty()); + while (work_to_do) { + isolate()->global_handles()->IterateObjectGroups( + visitor, &IsUnmarkedHeapObjectWithHeap); + MarkImplicitRefGroups(); + ProcessWeakCollections(); + work_to_do = !marking_deque_.IsEmpty(); + ProcessMarkingDeque(); + } +} + + +void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { + for (StackFrameIterator it(isolate(), isolate()->thread_local_top()); + !it.done(); it.Advance()) { + if (it.frame()->type() == StackFrame::JAVA_SCRIPT) { + return; + } + if (it.frame()->type() == StackFrame::OPTIMIZED) { + Code* code = it.frame()->LookupCode(); + if (!code->CanDeoptAt(it.frame()->pc())) { + code->CodeIterateBody(visitor); + } + ProcessMarkingDeque(); + return; + } + } +} + + +void MarkCompactCollector::MarkLiveObjects() { + GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); + double start_time = 0.0; + if (FLAG_print_cumulative_gc_stat) { + start_time = base::OS::TimeCurrentMillis(); + } + // The recursive GC marker detects when it is nearing stack overflow, + // and switches to a different marking system. JS interrupts interfere + // with the C stack limit check. + PostponeInterruptsScope postpone(isolate()); + + bool incremental_marking_overflowed = false; + IncrementalMarking* incremental_marking = heap_->incremental_marking(); + if (was_marked_incrementally_) { + // Finalize the incremental marking and check whether we had an overflow. + // Both markers use grey color to mark overflowed objects so + // non-incremental marker can deal with them as if overflow + // occured during normal marking. + // But incremental marker uses a separate marking deque + // so we have to explicitly copy its overflow state. + incremental_marking->Finalize(); + incremental_marking_overflowed = + incremental_marking->marking_deque()->overflowed(); + incremental_marking->marking_deque()->ClearOverflowed(); + } else { + // Abort any pending incremental activities e.g. incremental sweeping. + incremental_marking->Abort(); + } + +#ifdef DEBUG + DCHECK(state_ == PREPARE_GC); + state_ = MARK_LIVE_OBJECTS; +#endif + // The to space contains live objects, a page in from space is used as a + // marking stack. + Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); + Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); + if (FLAG_force_marking_deque_overflows) { + marking_deque_end = marking_deque_start + 64 * kPointerSize; + } + marking_deque_.Initialize(marking_deque_start, marking_deque_end); + DCHECK(!marking_deque_.overflowed()); + + if (incremental_marking_overflowed) { + // There are overflowed objects left in the heap after incremental marking. + marking_deque_.SetOverflowed(); + } + + PrepareForCodeFlushing(); + + if (was_marked_incrementally_) { + // There is no write barrier on cells so we have to scan them now at the end + // of the incremental marking. + { + HeapObjectIterator cell_iterator(heap()->cell_space()); + HeapObject* cell; + while ((cell = cell_iterator.Next()) != NULL) { + DCHECK(cell->IsCell()); + if (IsMarked(cell)) { + int offset = Cell::kValueOffset; + MarkCompactMarkingVisitor::VisitPointer( + heap(), reinterpret_cast<Object**>(cell->address() + offset)); + } + } + } + { + HeapObjectIterator js_global_property_cell_iterator( + heap()->property_cell_space()); + HeapObject* cell; + while ((cell = js_global_property_cell_iterator.Next()) != NULL) { + DCHECK(cell->IsPropertyCell()); + if (IsMarked(cell)) { + MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell); + } + } + } + } + + RootMarkingVisitor root_visitor(heap()); + MarkRoots(&root_visitor); + + ProcessTopOptimizedFrame(&root_visitor); + + // The objects reachable from the roots are marked, yet unreachable + // objects are unmarked. Mark objects reachable due to host + // application specific logic or through Harmony weak maps. + ProcessEphemeralMarking(&root_visitor); + + // The objects reachable from the roots, weak maps or object groups + // are marked, yet unreachable objects are unmarked. Mark objects + // reachable only from weak global handles. + // + // First we identify nonlive weak handles and mark them as pending + // destruction. + heap()->isolate()->global_handles()->IdentifyWeakHandles( + &IsUnmarkedHeapObject); + // Then we mark the objects and process the transitive closure. + heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); + while (marking_deque_.overflowed()) { + RefillMarkingDeque(); + EmptyMarkingDeque(); + } + + // Repeat host application specific and Harmony weak maps marking to + // mark unmarked objects reachable from the weak roots. + ProcessEphemeralMarking(&root_visitor); + + AfterMarking(); + + if (FLAG_print_cumulative_gc_stat) { + heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time); + } +} + + +void MarkCompactCollector::AfterMarking() { + // Object literal map caches reference strings (cache keys) and maps + // (cache values). At this point still useful maps have already been + // marked. Mark the keys for the alive values before we process the + // string table. + ProcessMapCaches(); + + // Prune the string table removing all strings only pointed to by the + // string table. Cannot use string_table() here because the string + // table is marked. + StringTable* string_table = heap()->string_table(); + InternalizedStringTableCleaner internalized_visitor(heap()); + string_table->IterateElements(&internalized_visitor); + string_table->ElementsRemoved(internalized_visitor.PointersRemoved()); + + ExternalStringTableCleaner external_visitor(heap()); + heap()->external_string_table_.Iterate(&external_visitor); + heap()->external_string_table_.CleanUp(); + + // Process the weak references. + MarkCompactWeakObjectRetainer mark_compact_object_retainer; + heap()->ProcessWeakReferences(&mark_compact_object_retainer); + + // Remove object groups after marking phase. + heap()->isolate()->global_handles()->RemoveObjectGroups(); + heap()->isolate()->global_handles()->RemoveImplicitRefGroups(); + + // Flush code from collected candidates. + if (is_code_flushing_enabled()) { + code_flusher_->ProcessCandidates(); + // If incremental marker does not support code flushing, we need to + // disable it before incremental marking steps for next cycle. + if (FLAG_flush_code && !FLAG_flush_code_incrementally) { + EnableCodeFlushing(false); + } + } + + if (FLAG_track_gc_object_stats) { + heap()->CheckpointObjectStats(); + } +} + + +void MarkCompactCollector::ProcessMapCaches() { + Object* raw_context = heap()->native_contexts_list(); + while (raw_context != heap()->undefined_value()) { + Context* context = reinterpret_cast<Context*>(raw_context); + if (IsMarked(context)) { + HeapObject* raw_map_cache = + HeapObject::cast(context->get(Context::MAP_CACHE_INDEX)); + // A map cache may be reachable from the stack. In this case + // it's already transitively marked and it's too late to clean + // up its parts. + if (!IsMarked(raw_map_cache) && + raw_map_cache != heap()->undefined_value()) { + MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache); + int existing_elements = map_cache->NumberOfElements(); + int used_elements = 0; + for (int i = MapCache::kElementsStartIndex; i < map_cache->length(); + i += MapCache::kEntrySize) { + Object* raw_key = map_cache->get(i); + if (raw_key == heap()->undefined_value() || + raw_key == heap()->the_hole_value()) + continue; + STATIC_ASSERT(MapCache::kEntrySize == 2); + Object* raw_map = map_cache->get(i + 1); + if (raw_map->IsHeapObject() && IsMarked(raw_map)) { + ++used_elements; + } else { + // Delete useless entries with unmarked maps. + DCHECK(raw_map->IsMap()); + map_cache->set_the_hole(i); + map_cache->set_the_hole(i + 1); + } + } + if (used_elements == 0) { + context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value()); + } else { + // Note: we don't actually shrink the cache here to avoid + // extra complexity during GC. We rely on subsequent cache + // usages (EnsureCapacity) to do this. + map_cache->ElementsRemoved(existing_elements - used_elements); + MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache); + MarkObject(map_cache, map_cache_markbit); + } + } + } + // Move to next element in the list. + raw_context = context->get(Context::NEXT_CONTEXT_LINK); + } + ProcessMarkingDeque(); +} + + +void MarkCompactCollector::ClearNonLiveReferences() { + // Iterate over the map space, setting map transitions that go from + // a marked map to an unmarked map to null transitions. This action + // is carried out only on maps of JSObjects and related subtypes. + HeapObjectIterator map_iterator(heap()->map_space()); + for (HeapObject* obj = map_iterator.Next(); obj != NULL; + obj = map_iterator.Next()) { + Map* map = Map::cast(obj); + + if (!map->CanTransition()) continue; + + MarkBit map_mark = Marking::MarkBitFrom(map); + ClearNonLivePrototypeTransitions(map); + ClearNonLiveMapTransitions(map, map_mark); + + if (map_mark.Get()) { + ClearNonLiveDependentCode(map->dependent_code()); + } else { + ClearDependentCode(map->dependent_code()); + map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); + } + } + + // Iterate over property cell space, removing dependent code that is not + // otherwise kept alive by strong references. + HeapObjectIterator cell_iterator(heap_->property_cell_space()); + for (HeapObject* cell = cell_iterator.Next(); cell != NULL; + cell = cell_iterator.Next()) { + if (IsMarked(cell)) { + ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code()); + } + } + + // Iterate over allocation sites, removing dependent code that is not + // otherwise kept alive by strong references. + Object* undefined = heap()->undefined_value(); + for (Object* site = heap()->allocation_sites_list(); site != undefined; + site = AllocationSite::cast(site)->weak_next()) { + if (IsMarked(site)) { + ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code()); + } + } + + if (heap_->weak_object_to_code_table()->IsHashTable()) { + WeakHashTable* table = + WeakHashTable::cast(heap_->weak_object_to_code_table()); + uint32_t capacity = table->Capacity(); + for (uint32_t i = 0; i < capacity; i++) { + uint32_t key_index = table->EntryToIndex(i); + Object* key = table->get(key_index); + if (!table->IsKey(key)) continue; + uint32_t value_index = table->EntryToValueIndex(i); + Object* value = table->get(value_index); + if (key->IsCell() && !IsMarked(key)) { + Cell* cell = Cell::cast(key); + Object* object = cell->value(); + if (IsMarked(object)) { + MarkBit mark = Marking::MarkBitFrom(cell); + SetMark(cell, mark); + Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset); + RecordSlot(value_slot, value_slot, *value_slot); + } + } + if (IsMarked(key)) { + if (!IsMarked(value)) { + HeapObject* obj = HeapObject::cast(value); + MarkBit mark = Marking::MarkBitFrom(obj); + SetMark(obj, mark); + } + ClearNonLiveDependentCode(DependentCode::cast(value)); + } else { + ClearDependentCode(DependentCode::cast(value)); + table->set(key_index, heap_->the_hole_value()); + table->set(value_index, heap_->the_hole_value()); + table->ElementRemoved(); + } + } + } +} + + +void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) { + int number_of_transitions = map->NumberOfProtoTransitions(); + FixedArray* prototype_transitions = map->GetPrototypeTransitions(); + + int new_number_of_transitions = 0; + const int header = Map::kProtoTransitionHeaderSize; + const int proto_offset = header + Map::kProtoTransitionPrototypeOffset; + const int map_offset = header + Map::kProtoTransitionMapOffset; + const int step = Map::kProtoTransitionElementsPerEntry; + for (int i = 0; i < number_of_transitions; i++) { + Object* prototype = prototype_transitions->get(proto_offset + i * step); + Object* cached_map = prototype_transitions->get(map_offset + i * step); + if (IsMarked(prototype) && IsMarked(cached_map)) { + DCHECK(!prototype->IsUndefined()); + int proto_index = proto_offset + new_number_of_transitions * step; + int map_index = map_offset + new_number_of_transitions * step; + if (new_number_of_transitions != i) { + prototype_transitions->set(proto_index, prototype, + UPDATE_WRITE_BARRIER); + prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER); + } + Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index); + RecordSlot(slot, slot, prototype); + new_number_of_transitions++; + } + } + + if (new_number_of_transitions != number_of_transitions) { + map->SetNumberOfProtoTransitions(new_number_of_transitions); + } + + // Fill slots that became free with undefined value. + for (int i = new_number_of_transitions * step; + i < number_of_transitions * step; i++) { + prototype_transitions->set_undefined(header + i); + } +} + + +void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map, + MarkBit map_mark) { + Object* potential_parent = map->GetBackPointer(); + if (!potential_parent->IsMap()) return; + Map* parent = Map::cast(potential_parent); + + // Follow back pointer, check whether we are dealing with a map transition + // from a live map to a dead path and in case clear transitions of parent. + bool current_is_alive = map_mark.Get(); + bool parent_is_alive = Marking::MarkBitFrom(parent).Get(); + if (!current_is_alive && parent_is_alive) { + ClearMapTransitions(parent); + } +} + + +// Clear a possible back pointer in case the transition leads to a dead map. +// Return true in case a back pointer has been cleared and false otherwise. +bool MarkCompactCollector::ClearMapBackPointer(Map* target) { + if (Marking::MarkBitFrom(target).Get()) return false; + target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER); + return true; +} + + +void MarkCompactCollector::ClearMapTransitions(Map* map) { + // If there are no transitions to be cleared, return. + // TODO(verwaest) Should be an assert, otherwise back pointers are not + // properly cleared. + if (!map->HasTransitionArray()) return; + + TransitionArray* t = map->transitions(); + + int transition_index = 0; + + DescriptorArray* descriptors = map->instance_descriptors(); + bool descriptors_owner_died = false; + + // Compact all live descriptors to the left. + for (int i = 0; i < t->number_of_transitions(); ++i) { + Map* target = t->GetTarget(i); + if (ClearMapBackPointer(target)) { + if (target->instance_descriptors() == descriptors) { + descriptors_owner_died = true; + } + } else { + if (i != transition_index) { + Name* key = t->GetKey(i); + t->SetKey(transition_index, key); + Object** key_slot = t->GetKeySlot(transition_index); + RecordSlot(key_slot, key_slot, key); + // Target slots do not need to be recorded since maps are not compacted. + t->SetTarget(transition_index, t->GetTarget(i)); + } + transition_index++; + } + } + + // If there are no transitions to be cleared, return. + // TODO(verwaest) Should be an assert, otherwise back pointers are not + // properly cleared. + if (transition_index == t->number_of_transitions()) return; + + int number_of_own_descriptors = map->NumberOfOwnDescriptors(); + + if (descriptors_owner_died) { + if (number_of_own_descriptors > 0) { + TrimDescriptorArray(map, descriptors, number_of_own_descriptors); + DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors); + map->set_owns_descriptors(true); + } else { + DCHECK(descriptors == heap_->empty_descriptor_array()); + } + } + + // Note that we never eliminate a transition array, though we might right-trim + // such that number_of_transitions() == 0. If this assumption changes, + // TransitionArray::CopyInsert() will need to deal with the case that a + // transition array disappeared during GC. + int trim = t->number_of_transitions() - transition_index; + if (trim > 0) { + heap_->RightTrimFixedArray<Heap::FROM_GC>( + t, t->IsSimpleTransition() ? trim + : trim * TransitionArray::kTransitionSize); + } + DCHECK(map->HasTransitionArray()); +} + + +void MarkCompactCollector::TrimDescriptorArray(Map* map, + DescriptorArray* descriptors, + int number_of_own_descriptors) { + int number_of_descriptors = descriptors->number_of_descriptors_storage(); + int to_trim = number_of_descriptors - number_of_own_descriptors; + if (to_trim == 0) return; + + heap_->RightTrimFixedArray<Heap::FROM_GC>( + descriptors, to_trim * DescriptorArray::kDescriptorSize); + descriptors->SetNumberOfDescriptors(number_of_own_descriptors); + + if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors); + descriptors->Sort(); +} + + +void MarkCompactCollector::TrimEnumCache(Map* map, + DescriptorArray* descriptors) { + int live_enum = map->EnumLength(); + if (live_enum == kInvalidEnumCacheSentinel) { + live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM); + } + if (live_enum == 0) return descriptors->ClearEnumCache(); + + FixedArray* enum_cache = descriptors->GetEnumCache(); + + int to_trim = enum_cache->length() - live_enum; + if (to_trim <= 0) return; + heap_->RightTrimFixedArray<Heap::FROM_GC>(descriptors->GetEnumCache(), + to_trim); + + if (!descriptors->HasEnumIndicesCache()) return; + FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache(); + heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim); +} + + +void MarkCompactCollector::ClearDependentICList(Object* head) { + Object* current = head; + Object* undefined = heap()->undefined_value(); + while (current != undefined) { + Code* code = Code::cast(current); + if (IsMarked(code)) { + DCHECK(code->is_weak_stub()); + IC::InvalidateMaps(code); + } + current = code->next_code_link(); + code->set_next_code_link(undefined); + } +} + + +void MarkCompactCollector::ClearDependentCode(DependentCode* entries) { + DisallowHeapAllocation no_allocation; + DependentCode::GroupStartIndexes starts(entries); + int number_of_entries = starts.number_of_entries(); + if (number_of_entries == 0) return; + int g = DependentCode::kWeakICGroup; + if (starts.at(g) != starts.at(g + 1)) { + int i = starts.at(g); + DCHECK(i + 1 == starts.at(g + 1)); + Object* head = entries->object_at(i); + ClearDependentICList(head); + } + g = DependentCode::kWeakCodeGroup; + for (int i = starts.at(g); i < starts.at(g + 1); i++) { + // If the entry is compilation info then the map must be alive, + // and ClearDependentCode shouldn't be called. + DCHECK(entries->is_code_at(i)); + Code* code = entries->code_at(i); + if (IsMarked(code) && !code->marked_for_deoptimization()) { + code->set_marked_for_deoptimization(true); + code->InvalidateEmbeddedObjects(); + have_code_to_deoptimize_ = true; + } + } + for (int i = 0; i < number_of_entries; i++) { + entries->clear_at(i); + } +} + + +int MarkCompactCollector::ClearNonLiveDependentCodeInGroup( + DependentCode* entries, int group, int start, int end, int new_start) { + int survived = 0; + if (group == DependentCode::kWeakICGroup) { + // Dependent weak IC stubs form a linked list and only the head is stored + // in the dependent code array. + if (start != end) { + DCHECK(start + 1 == end); + Object* old_head = entries->object_at(start); + MarkCompactWeakObjectRetainer retainer; + Object* head = VisitWeakList<Code>(heap(), old_head, &retainer); + entries->set_object_at(new_start, head); + Object** slot = entries->slot_at(new_start); + RecordSlot(slot, slot, head); + // We do not compact this group even if the head is undefined, + // more dependent ICs are likely to be added later. + survived = 1; + } + } else { + for (int i = start; i < end; i++) { + Object* obj = entries->object_at(i); + DCHECK(obj->IsCode() || IsMarked(obj)); + if (IsMarked(obj) && + (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) { + if (new_start + survived != i) { + entries->set_object_at(new_start + survived, obj); + } + Object** slot = entries->slot_at(new_start + survived); + RecordSlot(slot, slot, obj); + survived++; + } + } + } + entries->set_number_of_entries( + static_cast<DependentCode::DependencyGroup>(group), survived); + return survived; +} + + +void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) { + DisallowHeapAllocation no_allocation; + DependentCode::GroupStartIndexes starts(entries); + int number_of_entries = starts.number_of_entries(); + if (number_of_entries == 0) return; + int new_number_of_entries = 0; + // Go through all groups, remove dead codes and compact. + for (int g = 0; g < DependentCode::kGroupCount; g++) { + int survived = ClearNonLiveDependentCodeInGroup( + entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries); + new_number_of_entries += survived; + } + for (int i = new_number_of_entries; i < number_of_entries; i++) { + entries->clear_at(i); + } +} + + +void MarkCompactCollector::ProcessWeakCollections() { + GCTracer::Scope gc_scope(heap()->tracer(), + GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS); + Object* weak_collection_obj = heap()->encountered_weak_collections(); + while (weak_collection_obj != Smi::FromInt(0)) { + JSWeakCollection* weak_collection = + reinterpret_cast<JSWeakCollection*>(weak_collection_obj); + DCHECK(MarkCompactCollector::IsMarked(weak_collection)); + if (weak_collection->table()->IsHashTable()) { + ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table()); + Object** anchor = reinterpret_cast<Object**>(table->address()); + for (int i = 0; i < table->Capacity(); i++) { + if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { + Object** key_slot = + table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i)); + RecordSlot(anchor, key_slot, *key_slot); + Object** value_slot = + table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i)); + MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor, + value_slot); + } + } + } + weak_collection_obj = weak_collection->next(); + } +} + + +void MarkCompactCollector::ClearWeakCollections() { + GCTracer::Scope gc_scope(heap()->tracer(), + GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR); + Object* weak_collection_obj = heap()->encountered_weak_collections(); + while (weak_collection_obj != Smi::FromInt(0)) { + JSWeakCollection* weak_collection = + reinterpret_cast<JSWeakCollection*>(weak_collection_obj); + DCHECK(MarkCompactCollector::IsMarked(weak_collection)); + if (weak_collection->table()->IsHashTable()) { + ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table()); + for (int i = 0; i < table->Capacity(); i++) { + HeapObject* key = HeapObject::cast(table->KeyAt(i)); + if (!MarkCompactCollector::IsMarked(key)) { + table->RemoveEntry(i); + } + } + } + weak_collection_obj = weak_collection->next(); + weak_collection->set_next(heap()->undefined_value()); + } + heap()->set_encountered_weak_collections(Smi::FromInt(0)); +} + + +void MarkCompactCollector::AbortWeakCollections() { + GCTracer::Scope gc_scope(heap()->tracer(), + GCTracer::Scope::MC_WEAKCOLLECTION_ABORT); + Object* weak_collection_obj = heap()->encountered_weak_collections(); + while (weak_collection_obj != Smi::FromInt(0)) { + JSWeakCollection* weak_collection = + reinterpret_cast<JSWeakCollection*>(weak_collection_obj); + weak_collection_obj = weak_collection->next(); + weak_collection->set_next(heap()->undefined_value()); + } + heap()->set_encountered_weak_collections(Smi::FromInt(0)); +} + + +void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) { + if (heap_->InNewSpace(value)) { + heap_->store_buffer()->Mark(slot); + } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { + SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_, + reinterpret_cast<Object**>(slot), + SlotsBuffer::IGNORE_OVERFLOW); + } +} + + +// We scavange new space simultaneously with sweeping. This is done in two +// passes. +// +// The first pass migrates all alive objects from one semispace to another or +// promotes them to old space. Forwarding address is written directly into +// first word of object without any encoding. If object is dead we write +// NULL as a forwarding address. +// +// The second pass updates pointers to new space in all spaces. It is possible +// to encounter pointers to dead new space objects during traversal of pointers +// to new space. We should clear them to avoid encountering them during next +// pointer iteration. This is an issue if the store buffer overflows and we +// have to scan the entire old space, including dead objects, looking for +// pointers to new space. +void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src, + int size, AllocationSpace dest) { + Address dst_addr = dst->address(); + Address src_addr = src->address(); + DCHECK(heap()->AllowedToBeMigrated(src, dest)); + DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize); + if (dest == OLD_POINTER_SPACE) { + Address src_slot = src_addr; + Address dst_slot = dst_addr; + DCHECK(IsAligned(size, kPointerSize)); + + for (int remaining = size / kPointerSize; remaining > 0; remaining--) { + Object* value = Memory::Object_at(src_slot); + + Memory::Object_at(dst_slot) = value; + + // We special case ConstantPoolArrays below since they could contain + // integers value entries which look like tagged pointers. + // TODO(mstarzinger): restructure this code to avoid this special-casing. + if (!src->IsConstantPoolArray()) { + RecordMigratedSlot(value, dst_slot); + } + + src_slot += kPointerSize; + dst_slot += kPointerSize; + } + + if (compacting_ && dst->IsJSFunction()) { + Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset; + Address code_entry = Memory::Address_at(code_entry_slot); + + if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { + SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_, + SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot, + SlotsBuffer::IGNORE_OVERFLOW); + } + } else if (dst->IsConstantPoolArray()) { + ConstantPoolArray* array = ConstantPoolArray::cast(dst); + ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR); + while (!code_iter.is_finished()) { + Address code_entry_slot = + dst_addr + array->OffsetOfElementAt(code_iter.next_index()); + Address code_entry = Memory::Address_at(code_entry_slot); + + if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { + SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_, + SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot, + SlotsBuffer::IGNORE_OVERFLOW); + } + } + ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR); + while (!heap_iter.is_finished()) { + Address heap_slot = + dst_addr + array->OffsetOfElementAt(heap_iter.next_index()); + Object* value = Memory::Object_at(heap_slot); + RecordMigratedSlot(value, heap_slot); + } + } + } else if (dest == CODE_SPACE) { + PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); + heap()->MoveBlock(dst_addr, src_addr, size); + SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_, + SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr, + SlotsBuffer::IGNORE_OVERFLOW); + Code::cast(dst)->Relocate(dst_addr - src_addr); + } else { + DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE); + heap()->MoveBlock(dst_addr, src_addr, size); + } + heap()->OnMoveEvent(dst, src, size); + Memory::Address_at(src_addr) = dst_addr; +} + + +// Visitor for updating pointers from live objects in old spaces to new space. +// It does not expect to encounter pointers to dead objects. +class PointersUpdatingVisitor : public ObjectVisitor { + public: + explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {} + + void VisitPointer(Object** p) { UpdatePointer(p); } + + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) UpdatePointer(p); + } + + void VisitEmbeddedPointer(RelocInfo* rinfo) { + DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); + Object* target = rinfo->target_object(); + Object* old_target = target; + VisitPointer(&target); + // Avoid unnecessary changes that might unnecessary flush the instruction + // cache. + if (target != old_target) { + rinfo->set_target_object(target); + } + } + + void VisitCodeTarget(RelocInfo* rinfo) { + DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); + Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); + Object* old_target = target; + VisitPointer(&target); + if (target != old_target) { + rinfo->set_target_address(Code::cast(target)->instruction_start()); + } + } + + void VisitCodeAgeSequence(RelocInfo* rinfo) { + DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); + Object* stub = rinfo->code_age_stub(); + DCHECK(stub != NULL); + VisitPointer(&stub); + if (stub != rinfo->code_age_stub()) { + rinfo->set_code_age_stub(Code::cast(stub)); + } + } + + void VisitDebugTarget(RelocInfo* rinfo) { + DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) && + rinfo->IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && + rinfo->IsPatchedDebugBreakSlotSequence())); + Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); + VisitPointer(&target); + rinfo->set_call_address(Code::cast(target)->instruction_start()); + } + + static inline void UpdateSlot(Heap* heap, Object** slot) { + Object* obj = *slot; + + if (!obj->IsHeapObject()) return; + + HeapObject* heap_obj = HeapObject::cast(obj); + + MapWord map_word = heap_obj->map_word(); + if (map_word.IsForwardingAddress()) { + DCHECK(heap->InFromSpace(heap_obj) || + MarkCompactCollector::IsOnEvacuationCandidate(heap_obj)); + HeapObject* target = map_word.ToForwardingAddress(); + *slot = target; + DCHECK(!heap->InFromSpace(target) && + !MarkCompactCollector::IsOnEvacuationCandidate(target)); + } + } + + private: + inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); } + + Heap* heap_; +}; + + +static void UpdatePointer(HeapObject** address, HeapObject* object) { + Address new_addr = Memory::Address_at(object->address()); + + // The new space sweep will overwrite the map word of dead objects + // with NULL. In this case we do not need to transfer this entry to + // the store buffer which we are rebuilding. + // We perform the pointer update with a no barrier compare-and-swap. The + // compare and swap may fail in the case where the pointer update tries to + // update garbage memory which was concurrently accessed by the sweeper. + if (new_addr != NULL) { + base::NoBarrier_CompareAndSwap( + reinterpret_cast<base::AtomicWord*>(address), + reinterpret_cast<base::AtomicWord>(object), + reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr))); + } +} + + +static String* UpdateReferenceInExternalStringTableEntry(Heap* heap, + Object** p) { + MapWord map_word = HeapObject::cast(*p)->map_word(); + + if (map_word.IsForwardingAddress()) { + return String::cast(map_word.ToForwardingAddress()); + } + + return String::cast(*p); +} + + +bool MarkCompactCollector::TryPromoteObject(HeapObject* object, + int object_size) { + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); + + OldSpace* target_space = heap()->TargetSpace(object); + + DCHECK(target_space == heap()->old_pointer_space() || + target_space == heap()->old_data_space()); + HeapObject* target; + AllocationResult allocation = target_space->AllocateRaw(object_size); + if (allocation.To(&target)) { + MigrateObject(target, object, object_size, target_space->identity()); + heap()->IncrementPromotedObjectsSize(object_size); + return true; + } + + return false; +} + + +void MarkCompactCollector::EvacuateNewSpace() { + // There are soft limits in the allocation code, designed trigger a mark + // sweep collection by failing allocations. But since we are already in + // a mark-sweep allocation, there is no sense in trying to trigger one. + AlwaysAllocateScope scope(isolate()); + + NewSpace* new_space = heap()->new_space(); + + // Store allocation range before flipping semispaces. + Address from_bottom = new_space->bottom(); + Address from_top = new_space->top(); + + // Flip the semispaces. After flipping, to space is empty, from space has + // live objects. + new_space->Flip(); + new_space->ResetAllocationInfo(); + + int survivors_size = 0; + + // First pass: traverse all objects in inactive semispace, remove marks, + // migrate live objects and write forwarding addresses. This stage puts + // new entries in the store buffer and may cause some pages to be marked + // scan-on-scavenge. + NewSpacePageIterator it(from_bottom, from_top); + while (it.has_next()) { + NewSpacePage* p = it.next(); + survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); + } + + heap_->IncrementYoungSurvivorsCounter(survivors_size); + new_space->set_age_mark(new_space->top()); +} + + +void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { + AlwaysAllocateScope always_allocate(isolate()); + PagedSpace* space = static_cast<PagedSpace*>(p->owner()); + DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); + p->MarkSweptPrecisely(); + + int offsets[16]; + + for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { + Address cell_base = it.CurrentCellBase(); + MarkBit::CellType* cell = it.CurrentCell(); + + if (*cell == 0) continue; + + int live_objects = MarkWordToObjectStarts(*cell, offsets); + for (int i = 0; i < live_objects; i++) { + Address object_addr = cell_base + offsets[i] * kPointerSize; + HeapObject* object = HeapObject::FromAddress(object_addr); + DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); + + int size = object->Size(); + + HeapObject* target_object; + AllocationResult allocation = space->AllocateRaw(size); + if (!allocation.To(&target_object)) { + // If allocation failed, use emergency memory and re-try allocation. + CHECK(space->HasEmergencyMemory()); + space->UseEmergencyMemory(); + allocation = space->AllocateRaw(size); + } + if (!allocation.To(&target_object)) { + // OS refused to give us memory. + V8::FatalProcessOutOfMemory("Evacuation"); + return; + } + + MigrateObject(target_object, object, size, space->identity()); + DCHECK(object->map_word().IsForwardingAddress()); + } + + // Clear marking bits for current cell. + *cell = 0; + } + p->ResetLiveBytes(); +} + + +void MarkCompactCollector::EvacuatePages() { + int npages = evacuation_candidates_.length(); + for (int i = 0; i < npages; i++) { + Page* p = evacuation_candidates_[i]; + DCHECK(p->IsEvacuationCandidate() || + p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); + DCHECK(static_cast<int>(p->parallel_sweeping()) == + MemoryChunk::SWEEPING_DONE); + PagedSpace* space = static_cast<PagedSpace*>(p->owner()); + // Allocate emergency memory for the case when compaction fails due to out + // of memory. + if (!space->HasEmergencyMemory()) { + space->CreateEmergencyMemory(); + } + if (p->IsEvacuationCandidate()) { + // During compaction we might have to request a new page. Check that we + // have an emergency page and the space still has room for that. + if (space->HasEmergencyMemory() && space->CanExpand()) { + EvacuateLiveObjectsFromPage(p); + } else { + // Without room for expansion evacuation is not guaranteed to succeed. + // Pessimistically abandon unevacuated pages. + for (int j = i; j < npages; j++) { + Page* page = evacuation_candidates_[j]; + slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); + page->ClearEvacuationCandidate(); + page->SetFlag(Page::RESCAN_ON_EVACUATION); + } + break; + } + } + } + if (npages > 0) { + // Release emergency memory. + PagedSpaces spaces(heap()); + for (PagedSpace* space = spaces.next(); space != NULL; + space = spaces.next()) { + if (space->HasEmergencyMemory()) { + space->FreeEmergencyMemory(); + } + } + } +} + + +class EvacuationWeakObjectRetainer : public WeakObjectRetainer { + public: + virtual Object* RetainAs(Object* object) { + if (object->IsHeapObject()) { + HeapObject* heap_object = HeapObject::cast(object); + MapWord map_word = heap_object->map_word(); + if (map_word.IsForwardingAddress()) { + return map_word.ToForwardingAddress(); + } + } + return object; + } +}; + + +static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v, + SlotsBuffer::SlotType slot_type, Address addr) { + switch (slot_type) { + case SlotsBuffer::CODE_TARGET_SLOT: { + RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL); + rinfo.Visit(isolate, v); + break; + } + case SlotsBuffer::CODE_ENTRY_SLOT: { + v->VisitCodeEntry(addr); + break; + } + case SlotsBuffer::RELOCATED_CODE_OBJECT: { + HeapObject* obj = HeapObject::FromAddress(addr); + Code::cast(obj)->CodeIterateBody(v); + break; + } + case SlotsBuffer::DEBUG_TARGET_SLOT: { + RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL); + if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v); + break; + } + case SlotsBuffer::JS_RETURN_SLOT: { + RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL); + if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v); + break; + } + case SlotsBuffer::EMBEDDED_OBJECT_SLOT: { + RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL); + rinfo.Visit(isolate, v); + break; + } + default: + UNREACHABLE(); + break; + } +} + + +enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS }; + + +enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST }; + + +enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE }; + + +template <MarkCompactCollector::SweepingParallelism mode> +static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start, + int size) { + if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) { + DCHECK(free_list == NULL); + return space->Free(start, size); + } else { + // TODO(hpayer): account for wasted bytes in concurrent sweeping too. + return size - free_list->Free(start, size); + } +} + + +// Sweep a space precisely. After this has been done the space can +// be iterated precisely, hitting only the live objects. Code space +// is always swept precisely because we want to be able to iterate +// over it. Map space is swept precisely, because it is not compacted. +// Slots in live objects pointing into evacuation candidates are updated +// if requested. +// Returns the size of the biggest continuous freed memory chunk in bytes. +template <SweepingMode sweeping_mode, + MarkCompactCollector::SweepingParallelism parallelism, + SkipListRebuildingMode skip_list_mode, + FreeSpaceTreatmentMode free_space_mode> +static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p, + ObjectVisitor* v) { + DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); + DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, + space->identity() == CODE_SPACE); + DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); + DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || + sweeping_mode == SWEEP_ONLY); + + Address free_start = p->area_start(); + DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); + int offsets[16]; + + SkipList* skip_list = p->skip_list(); + int curr_region = -1; + if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { + skip_list->Clear(); + } + + intptr_t freed_bytes = 0; + intptr_t max_freed_bytes = 0; + + for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { + Address cell_base = it.CurrentCellBase(); + MarkBit::CellType* cell = it.CurrentCell(); + int live_objects = MarkWordToObjectStarts(*cell, offsets); + int live_index = 0; + for (; live_objects != 0; live_objects--) { + Address free_end = cell_base + offsets[live_index++] * kPointerSize; + if (free_end != free_start) { + int size = static_cast<int>(free_end - free_start); + if (free_space_mode == ZAP_FREE_SPACE) { + memset(free_start, 0xcc, size); + } + freed_bytes = Free<parallelism>(space, free_list, free_start, size); + max_freed_bytes = Max(freed_bytes, max_freed_bytes); +#ifdef ENABLE_GDB_JIT_INTERFACE + if (FLAG_gdbjit && space->identity() == CODE_SPACE) { + GDBJITInterface::RemoveCodeRange(free_start, free_end); + } +#endif + } + HeapObject* live_object = HeapObject::FromAddress(free_end); + DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object))); + Map* map = live_object->map(); + int size = live_object->SizeFromMap(map); + if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { + live_object->IterateBody(map->instance_type(), size, v); + } + if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { + int new_region_start = SkipList::RegionNumber(free_end); + int new_region_end = + SkipList::RegionNumber(free_end + size - kPointerSize); + if (new_region_start != curr_region || new_region_end != curr_region) { + skip_list->AddObject(free_end, size); + curr_region = new_region_end; + } + } + free_start = free_end + size; + } + // Clear marking bits for current cell. + *cell = 0; + } + if (free_start != p->area_end()) { + int size = static_cast<int>(p->area_end() - free_start); + if (free_space_mode == ZAP_FREE_SPACE) { + memset(free_start, 0xcc, size); + } + freed_bytes = Free<parallelism>(space, free_list, free_start, size); + max_freed_bytes = Max(freed_bytes, max_freed_bytes); +#ifdef ENABLE_GDB_JIT_INTERFACE + if (FLAG_gdbjit && space->identity() == CODE_SPACE) { + GDBJITInterface::RemoveCodeRange(free_start, p->area_end()); + } +#endif + } + p->ResetLiveBytes(); + + if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) { + // When concurrent sweeping is active, the page will be marked after + // sweeping by the main thread. + p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); + } else { + p->MarkSweptPrecisely(); + } + return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); +} + + +static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { + Page* p = Page::FromAddress(code->address()); + + if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { + return false; + } + + Address code_start = code->address(); + Address code_end = code_start + code->Size(); + + uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start); + uint32_t end_index = + MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize); + + Bitmap* b = p->markbits(); + + MarkBit start_mark_bit = b->MarkBitFromIndex(start_index); + MarkBit end_mark_bit = b->MarkBitFromIndex(end_index); + + MarkBit::CellType* start_cell = start_mark_bit.cell(); + MarkBit::CellType* end_cell = end_mark_bit.cell(); + + if (value) { + MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1); + MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1; + + if (start_cell == end_cell) { + *start_cell |= start_mask & end_mask; + } else { + *start_cell |= start_mask; + for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) { + *cell = ~0; + } + *end_cell |= end_mask; + } + } else { + for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) { + *cell = 0; + } + } + + return true; +} + + +static bool IsOnInvalidatedCodeObject(Address addr) { + // We did not record any slots in large objects thus + // we can safely go to the page from the slot address. + Page* p = Page::FromAddress(addr); + + // First check owner's identity because old pointer and old data spaces + // are swept lazily and might still have non-zero mark-bits on some + // pages. + if (p->owner()->identity() != CODE_SPACE) return false; + + // In code space only bits on evacuation candidates (but we don't record + // any slots on them) and under invalidated code objects are non-zero. + MarkBit mark_bit = + p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr)); + + return mark_bit.Get(); +} + + +void MarkCompactCollector::InvalidateCode(Code* code) { + if (heap_->incremental_marking()->IsCompacting() && + !ShouldSkipEvacuationSlotRecording(code)) { + DCHECK(compacting_); + + // If the object is white than no slots were recorded on it yet. + MarkBit mark_bit = Marking::MarkBitFrom(code); + if (Marking::IsWhite(mark_bit)) return; + + invalidated_code_.Add(code); + } +} + + +// Return true if the given code is deoptimized or will be deoptimized. +bool MarkCompactCollector::WillBeDeoptimized(Code* code) { + return code->is_optimized_code() && code->marked_for_deoptimization(); +} + + +bool MarkCompactCollector::MarkInvalidatedCode() { + bool code_marked = false; + + int length = invalidated_code_.length(); + for (int i = 0; i < length; i++) { + Code* code = invalidated_code_[i]; + + if (SetMarkBitsUnderInvalidatedCode(code, true)) { + code_marked = true; + } + } + + return code_marked; +} + + +void MarkCompactCollector::RemoveDeadInvalidatedCode() { + int length = invalidated_code_.length(); + for (int i = 0; i < length; i++) { + if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL; + } +} + + +void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) { + int length = invalidated_code_.length(); + for (int i = 0; i < length; i++) { + Code* code = invalidated_code_[i]; + if (code != NULL) { + code->Iterate(visitor); + SetMarkBitsUnderInvalidatedCode(code, false); + } + } + invalidated_code_.Rewind(0); +} + + +void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { + Heap::RelocationLock relocation_lock(heap()); + + bool code_slots_filtering_required; + { + GCTracer::Scope gc_scope(heap()->tracer(), + GCTracer::Scope::MC_SWEEP_NEWSPACE); + code_slots_filtering_required = MarkInvalidatedCode(); + EvacuateNewSpace(); + } + + { + GCTracer::Scope gc_scope(heap()->tracer(), + GCTracer::Scope::MC_EVACUATE_PAGES); + EvacuatePages(); + } + + // Second pass: find pointers to new space and update them. + PointersUpdatingVisitor updating_visitor(heap()); + + { + GCTracer::Scope gc_scope(heap()->tracer(), + GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); + // Update pointers in to space. + SemiSpaceIterator to_it(heap()->new_space()->bottom(), + heap()->new_space()->top()); + for (HeapObject* object = to_it.Next(); object != NULL; + object = to_it.Next()) { + Map* map = object->map(); + object->IterateBody(map->instance_type(), object->SizeFromMap(map), + &updating_visitor); + } + } + + { + GCTracer::Scope gc_scope(heap()->tracer(), + GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS); + // Update roots. + heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); + } + + { + GCTracer::Scope gc_scope(heap()->tracer(), + GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS); + StoreBufferRebuildScope scope(heap_, heap_->store_buffer(), + &Heap::ScavengeStoreBufferCallback); + heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps( + &UpdatePointer); + } + + { + GCTracer::Scope gc_scope(heap()->tracer(), + GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED); + SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_, + code_slots_filtering_required); + if (FLAG_trace_fragmentation) { + PrintF(" migration slots buffer: %d\n", + SlotsBuffer::SizeOfChain(migration_slots_buffer_)); + } + + if (compacting_ && was_marked_incrementally_) { + // It's difficult to filter out slots recorded for large objects. + LargeObjectIterator it(heap_->lo_space()); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + // LargeObjectSpace is not swept yet thus we have to skip + // dead objects explicitly. + if (!IsMarked(obj)) continue; + + Page* p = Page::FromAddress(obj->address()); + if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { + obj->Iterate(&updating_visitor); + p->ClearFlag(Page::RESCAN_ON_EVACUATION); + } + } + } + } + + int npages = evacuation_candidates_.length(); + { + GCTracer::Scope gc_scope( + heap()->tracer(), + GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED); + for (int i = 0; i < npages; i++) { + Page* p = evacuation_candidates_[i]; + DCHECK(p->IsEvacuationCandidate() || + p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); + + if (p->IsEvacuationCandidate()) { + SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(), + code_slots_filtering_required); + if (FLAG_trace_fragmentation) { + PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), + SlotsBuffer::SizeOfChain(p->slots_buffer())); + } + + // Important: skip list should be cleared only after roots were updated + // because root iteration traverses the stack and might have to find + // code objects from non-updated pc pointing into evacuation candidate. + SkipList* list = p->skip_list(); + if (list != NULL) list->Clear(); + } else { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", + reinterpret_cast<intptr_t>(p)); + } + PagedSpace* space = static_cast<PagedSpace*>(p->owner()); + p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); + + switch (space->identity()) { + case OLD_DATA_SPACE: + SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); + break; + case OLD_POINTER_SPACE: + SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, + IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( + space, NULL, p, &updating_visitor); + break; + case CODE_SPACE: + if (FLAG_zap_code_space) { + SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, + REBUILD_SKIP_LIST, ZAP_FREE_SPACE>( + space, NULL, p, &updating_visitor); + } else { + SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, + REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>( + space, NULL, p, &updating_visitor); + } + break; + default: + UNREACHABLE(); + break; + } + } + } + } + + GCTracer::Scope gc_scope(heap()->tracer(), + GCTracer::Scope::MC_UPDATE_MISC_POINTERS); + + // Update pointers from cells. + HeapObjectIterator cell_iterator(heap_->cell_space()); + for (HeapObject* cell = cell_iterator.Next(); cell != NULL; + cell = cell_iterator.Next()) { + if (cell->IsCell()) { + Cell::BodyDescriptor::IterateBody(cell, &updating_visitor); + } + } + + HeapObjectIterator js_global_property_cell_iterator( + heap_->property_cell_space()); + for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL; + cell = js_global_property_cell_iterator.Next()) { + if (cell->IsPropertyCell()) { + PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor); + } + } + + heap_->string_table()->Iterate(&updating_visitor); + updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address()); + if (heap_->weak_object_to_code_table()->IsHashTable()) { + WeakHashTable* table = + WeakHashTable::cast(heap_->weak_object_to_code_table()); + table->Iterate(&updating_visitor); + table->Rehash(heap_->isolate()->factory()->undefined_value()); + } + + // Update pointers from external string table. + heap_->UpdateReferencesInExternalStringTable( + &UpdateReferenceInExternalStringTableEntry); + + EvacuationWeakObjectRetainer evacuation_object_retainer; + heap()->ProcessWeakReferences(&evacuation_object_retainer); + + // Visit invalidated code (we ignored all slots on it) and clear mark-bits + // under it. + ProcessInvalidatedCode(&updating_visitor); + + heap_->isolate()->inner_pointer_to_code_cache()->Flush(); + + slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); + DCHECK(migration_slots_buffer_ == NULL); +} + + +void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() { + int npages = evacuation_candidates_.length(); + for (int i = 0; i < npages; i++) { + Page* p = evacuation_candidates_[i]; + if (!p->IsEvacuationCandidate()) continue; + p->Unlink(); + PagedSpace* space = static_cast<PagedSpace*>(p->owner()); + p->InsertAfter(space->LastPage()); + } +} + + +void MarkCompactCollector::ReleaseEvacuationCandidates() { + int npages = evacuation_candidates_.length(); + for (int i = 0; i < npages; i++) { + Page* p = evacuation_candidates_[i]; + if (!p->IsEvacuationCandidate()) continue; + PagedSpace* space = static_cast<PagedSpace*>(p->owner()); + space->Free(p->area_start(), p->area_size()); + p->set_scan_on_scavenge(false); + slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); + p->ResetLiveBytes(); + space->ReleasePage(p); + } + evacuation_candidates_.Rewind(0); + compacting_ = false; + heap()->FreeQueuedChunks(); +} + + +static const int kStartTableEntriesPerLine = 5; +static const int kStartTableLines = 171; +static const int kStartTableInvalidLine = 127; +static const int kStartTableUnusedEntry = 126; + +#define _ kStartTableUnusedEntry +#define X kStartTableInvalidLine +// Mark-bit to object start offset table. +// +// The line is indexed by the mark bits in a byte. The first number on +// the line describes the number of live object starts for the line and the +// other numbers on the line describe the offsets (in words) of the object +// starts. +// +// Since objects are at least 2 words large we don't have entries for two +// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits. +char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = { + 0, _, _, + _, _, // 0 + 1, 0, _, + _, _, // 1 + 1, 1, _, + _, _, // 2 + X, _, _, + _, _, // 3 + 1, 2, _, + _, _, // 4 + 2, 0, 2, + _, _, // 5 + X, _, _, + _, _, // 6 + X, _, _, + _, _, // 7 + 1, 3, _, + _, _, // 8 + 2, 0, 3, + _, _, // 9 + 2, 1, 3, + _, _, // 10 + X, _, _, + _, _, // 11 + X, _, _, + _, _, // 12 + X, _, _, + _, _, // 13 + X, _, _, + _, _, // 14 + X, _, _, + _, _, // 15 + 1, 4, _, + _, _, // 16 + 2, 0, 4, + _, _, // 17 + 2, 1, 4, + _, _, // 18 + X, _, _, + _, _, // 19 + 2, 2, 4, + _, _, // 20 + 3, 0, 2, + 4, _, // 21 + X, _, _, + _, _, // 22 + X, _, _, + _, _, // 23 + X, _, _, + _, _, // 24 + X, _, _, + _, _, // 25 + X, _, _, + _, _, // 26 + X, _, _, + _, _, // 27 + X, _, _, + _, _, // 28 + X, _, _, + _, _, // 29 + X, _, _, + _, _, // 30 + X, _, _, + _, _, // 31 + 1, 5, _, + _, _, // 32 + 2, 0, 5, + _, _, // 33 + 2, 1, 5, + _, _, // 34 + X, _, _, + _, _, // 35 + 2, 2, 5, + _, _, // 36 + 3, 0, 2, + 5, _, // 37 + X, _, _, + _, _, // 38 + X, _, _, + _, _, // 39 + 2, 3, 5, + _, _, // 40 + 3, 0, 3, + 5, _, // 41 + 3, 1, 3, + 5, _, // 42 + X, _, _, + _, _, // 43 + X, _, _, + _, _, // 44 + X, _, _, + _, _, // 45 + X, _, _, + _, _, // 46 + X, _, _, + _, _, // 47 + X, _, _, + _, _, // 48 + X, _, _, + _, _, // 49 + X, _, _, + _, _, // 50 + X, _, _, + _, _, // 51 + X, _, _, + _, _, // 52 + X, _, _, + _, _, // 53 + X, _, _, + _, _, // 54 + X, _, _, + _, _, // 55 + X, _, _, + _, _, // 56 + X, _, _, + _, _, // 57 + X, _, _, + _, _, // 58 + X, _, _, + _, _, // 59 + X, _, _, + _, _, // 60 + X, _, _, + _, _, // 61 + X, _, _, + _, _, // 62 + X, _, _, + _, _, // 63 + 1, 6, _, + _, _, // 64 + 2, 0, 6, + _, _, // 65 + 2, 1, 6, + _, _, // 66 + X, _, _, + _, _, // 67 + 2, 2, 6, + _, _, // 68 + 3, 0, 2, + 6, _, // 69 + X, _, _, + _, _, // 70 + X, _, _, + _, _, // 71 + 2, 3, 6, + _, _, // 72 + 3, 0, 3, + 6, _, // 73 + 3, 1, 3, + 6, _, // 74 + X, _, _, + _, _, // 75 + X, _, _, + _, _, // 76 + X, _, _, + _, _, // 77 + X, _, _, + _, _, // 78 + X, _, _, + _, _, // 79 + 2, 4, 6, + _, _, // 80 + 3, 0, 4, + 6, _, // 81 + 3, 1, 4, + 6, _, // 82 + X, _, _, + _, _, // 83 + 3, 2, 4, + 6, _, // 84 + 4, 0, 2, + 4, 6, // 85 + X, _, _, + _, _, // 86 + X, _, _, + _, _, // 87 + X, _, _, + _, _, // 88 + X, _, _, + _, _, // 89 + X, _, _, + _, _, // 90 + X, _, _, + _, _, // 91 + X, _, _, + _, _, // 92 + X, _, _, + _, _, // 93 + X, _, _, + _, _, // 94 + X, _, _, + _, _, // 95 + X, _, _, + _, _, // 96 + X, _, _, + _, _, // 97 + X, _, _, + _, _, // 98 + X, _, _, + _, _, // 99 + X, _, _, + _, _, // 100 + X, _, _, + _, _, // 101 + X, _, _, + _, _, // 102 + X, _, _, + _, _, // 103 + X, _, _, + _, _, // 104 + X, _, _, + _, _, // 105 + X, _, _, + _, _, // 106 + X, _, _, + _, _, // 107 + X, _, _, + _, _, // 108 + X, _, _, + _, _, // 109 + X, _, _, + _, _, // 110 + X, _, _, + _, _, // 111 + X, _, _, + _, _, // 112 + X, _, _, + _, _, // 113 + X, _, _, + _, _, // 114 + X, _, _, + _, _, // 115 + X, _, _, + _, _, // 116 + X, _, _, + _, _, // 117 + X, _, _, + _, _, // 118 + X, _, _, + _, _, // 119 + X, _, _, + _, _, // 120 + X, _, _, + _, _, // 121 + X, _, _, + _, _, // 122 + X, _, _, + _, _, // 123 + X, _, _, + _, _, // 124 + X, _, _, + _, _, // 125 + X, _, _, + _, _, // 126 + X, _, _, + _, _, // 127 + 1, 7, _, + _, _, // 128 + 2, 0, 7, + _, _, // 129 + 2, 1, 7, + _, _, // 130 + X, _, _, + _, _, // 131 + 2, 2, 7, + _, _, // 132 + 3, 0, 2, + 7, _, // 133 + X, _, _, + _, _, // 134 + X, _, _, + _, _, // 135 + 2, 3, 7, + _, _, // 136 + 3, 0, 3, + 7, _, // 137 + 3, 1, 3, + 7, _, // 138 + X, _, _, + _, _, // 139 + X, _, _, + _, _, // 140 + X, _, _, + _, _, // 141 + X, _, _, + _, _, // 142 + X, _, _, + _, _, // 143 + 2, 4, 7, + _, _, // 144 + 3, 0, 4, + 7, _, // 145 + 3, 1, 4, + 7, _, // 146 + X, _, _, + _, _, // 147 + 3, 2, 4, + 7, _, // 148 + 4, 0, 2, + 4, 7, // 149 + X, _, _, + _, _, // 150 + X, _, _, + _, _, // 151 + X, _, _, + _, _, // 152 + X, _, _, + _, _, // 153 + X, _, _, + _, _, // 154 + X, _, _, + _, _, // 155 + X, _, _, + _, _, // 156 + X, _, _, + _, _, // 157 + X, _, _, + _, _, // 158 + X, _, _, + _, _, // 159 + 2, 5, 7, + _, _, // 160 + 3, 0, 5, + 7, _, // 161 + 3, 1, 5, + 7, _, // 162 + X, _, _, + _, _, // 163 + 3, 2, 5, + 7, _, // 164 + 4, 0, 2, + 5, 7, // 165 + X, _, _, + _, _, // 166 + X, _, _, + _, _, // 167 + 3, 3, 5, + 7, _, // 168 + 4, 0, 3, + 5, 7, // 169 + 4, 1, 3, + 5, 7 // 170 +}; +#undef _ +#undef X + + +// Takes a word of mark bits. Returns the number of objects that start in the +// range. Puts the offsets of the words in the supplied array. +static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) { + int objects = 0; + int offset = 0; + + // No consecutive 1 bits. + DCHECK((mark_bits & 0x180) != 0x180); + DCHECK((mark_bits & 0x18000) != 0x18000); + DCHECK((mark_bits & 0x1800000) != 0x1800000); + + while (mark_bits != 0) { + int byte = (mark_bits & 0xff); + mark_bits >>= 8; + if (byte != 0) { + DCHECK(byte < kStartTableLines); // No consecutive 1 bits. + char* table = kStartTable + byte * kStartTableEntriesPerLine; + int objects_in_these_8_words = table[0]; + DCHECK(objects_in_these_8_words != kStartTableInvalidLine); + DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine); + for (int i = 0; i < objects_in_these_8_words; i++) { + starts[objects++] = offset + table[1 + i]; + } + } + offset += 8; + } + return objects; +} + + +static inline Address DigestFreeStart(Address approximate_free_start, + uint32_t free_start_cell) { + DCHECK(free_start_cell != 0); + + // No consecutive 1 bits. + DCHECK((free_start_cell & (free_start_cell << 1)) == 0); + + int offsets[16]; + uint32_t cell = free_start_cell; + int offset_of_last_live; + if ((cell & 0x80000000u) != 0) { + // This case would overflow below. + offset_of_last_live = 31; + } else { + // Remove all but one bit, the most significant. This is an optimization + // that may or may not be worthwhile. + cell |= cell >> 16; + cell |= cell >> 8; + cell |= cell >> 4; + cell |= cell >> 2; + cell |= cell >> 1; + cell = (cell + 1) >> 1; + int live_objects = MarkWordToObjectStarts(cell, offsets); + DCHECK(live_objects == 1); + offset_of_last_live = offsets[live_objects - 1]; + } + Address last_live_start = + approximate_free_start + offset_of_last_live * kPointerSize; + HeapObject* last_live = HeapObject::FromAddress(last_live_start); + Address free_start = last_live_start + last_live->Size(); + return free_start; +} + + +static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { + DCHECK(cell != 0); + + // No consecutive 1 bits. + DCHECK((cell & (cell << 1)) == 0); + + int offsets[16]; + if (cell == 0x80000000u) { // Avoid overflow below. + return block_address + 31 * kPointerSize; + } + uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; + DCHECK((first_set_bit & cell) == first_set_bit); + int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); + DCHECK(live_objects == 1); + USE(live_objects); + return block_address + offsets[0] * kPointerSize; +} + + +// Force instantiation of templatized SweepConservatively method for +// SWEEP_ON_MAIN_THREAD mode. +template int MarkCompactCollector::SweepConservatively< + MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*); + + +// Force instantiation of templatized SweepConservatively method for +// SWEEP_IN_PARALLEL mode. +template int MarkCompactCollector::SweepConservatively< + MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*); + + +// Sweeps a space conservatively. After this has been done the larger free +// spaces have been put on the free list and the smaller ones have been +// ignored and left untouched. A free space is always either ignored or put +// on the free list, never split up into two parts. This is important +// because it means that any FreeSpace maps left actually describe a region of +// memory that can be ignored when scanning. Dead objects other than free +// spaces will not contain the free space map. +template <MarkCompactCollector::SweepingParallelism mode> +int MarkCompactCollector::SweepConservatively(PagedSpace* space, + FreeList* free_list, Page* p) { + DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); + DCHECK( + (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) || + (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD && + free_list == NULL)); + + intptr_t freed_bytes = 0; + intptr_t max_freed_bytes = 0; + size_t size = 0; + + // Skip over all the dead objects at the start of the page and mark them free. + Address cell_base = 0; + MarkBit::CellType* cell = NULL; + MarkBitCellIterator it(p); + for (; !it.Done(); it.Advance()) { + cell_base = it.CurrentCellBase(); + cell = it.CurrentCell(); + if (*cell != 0) break; + } + + if (it.Done()) { + size = p->area_end() - p->area_start(); + freed_bytes = + Free<mode>(space, free_list, p->area_start(), static_cast<int>(size)); + max_freed_bytes = Max(freed_bytes, max_freed_bytes); + DCHECK_EQ(0, p->LiveBytes()); + if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { + // When concurrent sweeping is active, the page will be marked after + // sweeping by the main thread. + p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); + } else { + p->MarkSweptConservatively(); + } + return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); + } + + // Grow the size of the start-of-page free space a little to get up to the + // first live object. + Address free_end = StartOfLiveObject(cell_base, *cell); + // Free the first free space. + size = free_end - p->area_start(); + freed_bytes = + Free<mode>(space, free_list, p->area_start(), static_cast<int>(size)); + max_freed_bytes = Max(freed_bytes, max_freed_bytes); + + // The start of the current free area is represented in undigested form by + // the address of the last 32-word section that contained a live object and + // the marking bitmap for that cell, which describes where the live object + // started. Unless we find a large free space in the bitmap we will not + // digest this pair into a real address. We start the iteration here at the + // first word in the marking bit map that indicates a live object. + Address free_start = cell_base; + MarkBit::CellType free_start_cell = *cell; + + for (; !it.Done(); it.Advance()) { + cell_base = it.CurrentCellBase(); + cell = it.CurrentCell(); + if (*cell != 0) { + // We have a live object. Check approximately whether it is more than 32 + // words since the last live object. + if (cell_base - free_start > 32 * kPointerSize) { + free_start = DigestFreeStart(free_start, free_start_cell); + if (cell_base - free_start > 32 * kPointerSize) { + // Now that we know the exact start of the free space it still looks + // like we have a large enough free space to be worth bothering with. + // so now we need to find the start of the first live object at the + // end of the free space. + free_end = StartOfLiveObject(cell_base, *cell); + freed_bytes = Free<mode>(space, free_list, free_start, + static_cast<int>(free_end - free_start)); + max_freed_bytes = Max(freed_bytes, max_freed_bytes); + } + } + // Update our undigested record of where the current free area started. + free_start = cell_base; + free_start_cell = *cell; + // Clear marking bits for current cell. + *cell = 0; + } + } + + // Handle the free space at the end of the page. + if (cell_base - free_start > 32 * kPointerSize) { + free_start = DigestFreeStart(free_start, free_start_cell); + freed_bytes = Free<mode>(space, free_list, free_start, + static_cast<int>(p->area_end() - free_start)); + max_freed_bytes = Max(freed_bytes, max_freed_bytes); + } + + p->ResetLiveBytes(); + if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { + // When concurrent sweeping is active, the page will be marked after + // sweeping by the main thread. + p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); + } else { + p->MarkSweptConservatively(); + } + return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); +} + + +int MarkCompactCollector::SweepInParallel(PagedSpace* space, + int required_freed_bytes) { + int max_freed = 0; + int max_freed_overall = 0; + PageIterator it(space); + while (it.has_next()) { + Page* p = it.next(); + max_freed = SweepInParallel(p, space); + DCHECK(max_freed >= 0); + if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { + return max_freed; + } + max_freed_overall = Max(max_freed, max_freed_overall); + if (p == space->end_of_unswept_pages()) break; + } + return max_freed_overall; +} + + +int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { + int max_freed = 0; + if (page->TryParallelSweeping()) { + FreeList* free_list = space == heap()->old_pointer_space() + ? free_list_old_pointer_space_.get() + : free_list_old_data_space_.get(); + FreeList private_free_list(space); + if (space->swept_precisely()) { + max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL, + IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( + space, &private_free_list, page, NULL); + } else { + max_freed = SweepConservatively<SWEEP_IN_PARALLEL>( + space, &private_free_list, page); + } + free_list->Concatenate(&private_free_list); + } + return max_freed; +} + + +void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { + space->set_swept_precisely(sweeper == PRECISE || + sweeper == CONCURRENT_PRECISE || + sweeper == PARALLEL_PRECISE); + space->ClearStats(); + + // We defensively initialize end_of_unswept_pages_ here with the first page + // of the pages list. + space->set_end_of_unswept_pages(space->FirstPage()); + + PageIterator it(space); + + int pages_swept = 0; + bool unused_page_present = false; + bool parallel_sweeping_active = false; + + while (it.has_next()) { + Page* p = it.next(); + DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); + + // Clear sweeping flags indicating that marking bits are still intact. + p->ClearSweptPrecisely(); + p->ClearSweptConservatively(); + + if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || + p->IsEvacuationCandidate()) { + // Will be processed in EvacuateNewSpaceAndCandidates. + DCHECK(evacuation_candidates_.length() > 0); + continue; + } + + // One unused page is kept, all further are released before sweeping them. + if (p->LiveBytes() == 0) { + if (unused_page_present) { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", + reinterpret_cast<intptr_t>(p)); + } + // Adjust unswept free bytes because releasing a page expects said + // counter to be accurate for unswept pages. + space->IncreaseUnsweptFreeBytes(p); + space->ReleasePage(p); + continue; + } + unused_page_present = true; + } + + switch (sweeper) { + case CONCURRENT_CONSERVATIVE: + case PARALLEL_CONSERVATIVE: { + if (!parallel_sweeping_active) { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", + reinterpret_cast<intptr_t>(p)); + } + SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); + pages_swept++; + parallel_sweeping_active = true; + } else { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", + reinterpret_cast<intptr_t>(p)); + } + p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); + space->IncreaseUnsweptFreeBytes(p); + } + space->set_end_of_unswept_pages(p); + break; + } + case CONCURRENT_PRECISE: + case PARALLEL_PRECISE: + if (!parallel_sweeping_active) { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", + reinterpret_cast<intptr_t>(p)); + } + SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, + IGNORE_FREE_SPACE>(space, NULL, p, NULL); + pages_swept++; + parallel_sweeping_active = true; + } else { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", + reinterpret_cast<intptr_t>(p)); + } + p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); + space->IncreaseUnsweptFreeBytes(p); + } + space->set_end_of_unswept_pages(p); + break; + case PRECISE: { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", + reinterpret_cast<intptr_t>(p)); + } + if (space->identity() == CODE_SPACE && FLAG_zap_code_space) { + SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, + ZAP_FREE_SPACE>(space, NULL, p, NULL); + } else if (space->identity() == CODE_SPACE) { + SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, + IGNORE_FREE_SPACE>(space, NULL, p, NULL); + } else { + SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, + IGNORE_FREE_SPACE>(space, NULL, p, NULL); + } + pages_swept++; + break; + } + default: { UNREACHABLE(); } + } + } + + if (FLAG_gc_verbose) { + PrintF("SweepSpace: %s (%d pages swept)\n", + AllocationSpaceName(space->identity()), pages_swept); + } + + // Give pages that are queued to be freed back to the OS. + heap()->FreeQueuedChunks(); +} + + +static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) { + return type == MarkCompactCollector::PARALLEL_CONSERVATIVE || + type == MarkCompactCollector::CONCURRENT_CONSERVATIVE || + type == MarkCompactCollector::PARALLEL_PRECISE || + type == MarkCompactCollector::CONCURRENT_PRECISE; +} + + +static bool ShouldWaitForSweeperThreads( + MarkCompactCollector::SweeperType type) { + return type == MarkCompactCollector::PARALLEL_CONSERVATIVE || + type == MarkCompactCollector::PARALLEL_PRECISE; +} + + +void MarkCompactCollector::SweepSpaces() { + GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP); + double start_time = 0.0; + if (FLAG_print_cumulative_gc_stat) { + start_time = base::OS::TimeCurrentMillis(); + } + +#ifdef DEBUG + state_ = SWEEP_SPACES; +#endif + SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE; + if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; + if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; + if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) { + how_to_sweep = PARALLEL_PRECISE; + } + if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) { + how_to_sweep = CONCURRENT_PRECISE; + } + if (sweep_precisely_) how_to_sweep = PRECISE; + + MoveEvacuationCandidatesToEndOfPagesList(); + + // Noncompacting collections simply sweep the spaces to clear the mark + // bits and free the nonlive blocks (for old and map spaces). We sweep + // the map space last because freeing non-live maps overwrites them and + // the other spaces rely on possibly non-live maps to get the sizes for + // non-live objects. + { + GCTracer::Scope sweep_scope(heap()->tracer(), + GCTracer::Scope::MC_SWEEP_OLDSPACE); + { + SequentialSweepingScope scope(this); + SweepSpace(heap()->old_pointer_space(), how_to_sweep); + SweepSpace(heap()->old_data_space(), how_to_sweep); + } + + if (ShouldStartSweeperThreads(how_to_sweep)) { + StartSweeperThreads(); + } + + if (ShouldWaitForSweeperThreads(how_to_sweep)) { + EnsureSweepingCompleted(); + } + } + RemoveDeadInvalidatedCode(); + + { + GCTracer::Scope sweep_scope(heap()->tracer(), + GCTracer::Scope::MC_SWEEP_CODE); + SweepSpace(heap()->code_space(), PRECISE); + } + + { + GCTracer::Scope sweep_scope(heap()->tracer(), + GCTracer::Scope::MC_SWEEP_CELL); + SweepSpace(heap()->cell_space(), PRECISE); + SweepSpace(heap()->property_cell_space(), PRECISE); + } + + EvacuateNewSpaceAndCandidates(); + + // ClearNonLiveTransitions depends on precise sweeping of map space to + // detect whether unmarked map became dead in this collection or in one + // of the previous ones. + { + GCTracer::Scope sweep_scope(heap()->tracer(), + GCTracer::Scope::MC_SWEEP_MAP); + SweepSpace(heap()->map_space(), PRECISE); + } + + // Deallocate unmarked objects and clear marked bits for marked objects. + heap_->lo_space()->FreeUnmarkedObjects(); + + // Deallocate evacuated candidate pages. + ReleaseEvacuationCandidates(); + + if (FLAG_print_cumulative_gc_stat) { + heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - + start_time); + } +} + + +void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { + PageIterator it(space); + while (it.has_next()) { + Page* p = it.next(); + if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) { + p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE); + if (space->swept_precisely()) { + p->MarkSweptPrecisely(); + } else { + p->MarkSweptConservatively(); + } + } + DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); + } +} + + +void MarkCompactCollector::ParallelSweepSpacesComplete() { + ParallelSweepSpaceComplete(heap()->old_pointer_space()); + ParallelSweepSpaceComplete(heap()->old_data_space()); +} + + +void MarkCompactCollector::EnableCodeFlushing(bool enable) { + if (isolate()->debug()->is_loaded() || + isolate()->debug()->has_break_points()) { + enable = false; + } + + if (enable) { + if (code_flusher_ != NULL) return; + code_flusher_ = new CodeFlusher(isolate()); + } else { + if (code_flusher_ == NULL) return; + code_flusher_->EvictAllCandidates(); + delete code_flusher_; + code_flusher_ = NULL; + } + + if (FLAG_trace_code_flushing) { + PrintF("[code-flushing is now %s]\n", enable ? "on" : "off"); + } +} + + +// TODO(1466) ReportDeleteIfNeeded is not called currently. +// Our profiling tools do not expect intersections between +// code objects. We should either reenable it or change our tools. +void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, + Isolate* isolate) { + if (obj->IsCode()) { + PROFILE(isolate, CodeDeleteEvent(obj->address())); + } +} + + +Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); } + + +void MarkCompactCollector::Initialize() { + MarkCompactMarkingVisitor::Initialize(); + IncrementalMarking::Initialize(); +} + + +bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) { + return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES; +} + + +bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator, + SlotsBuffer** buffer_address, SlotType type, + Address addr, AdditionMode mode) { + SlotsBuffer* buffer = *buffer_address; + if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) { + if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { + allocator->DeallocateChain(buffer_address); + return false; + } + buffer = allocator->AllocateBuffer(buffer); + *buffer_address = buffer; + } + DCHECK(buffer->HasSpaceForTypedSlot()); + buffer->Add(reinterpret_cast<ObjectSlot>(type)); + buffer->Add(reinterpret_cast<ObjectSlot>(addr)); + return true; +} + + +static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { + if (RelocInfo::IsCodeTarget(rmode)) { + return SlotsBuffer::CODE_TARGET_SLOT; + } else if (RelocInfo::IsEmbeddedObject(rmode)) { + return SlotsBuffer::EMBEDDED_OBJECT_SLOT; + } else if (RelocInfo::IsDebugBreakSlot(rmode)) { + return SlotsBuffer::DEBUG_TARGET_SLOT; + } else if (RelocInfo::IsJSReturn(rmode)) { + return SlotsBuffer::JS_RETURN_SLOT; + } + UNREACHABLE(); + return SlotsBuffer::NUMBER_OF_SLOT_TYPES; +} + + +void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) { + Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); + RelocInfo::Mode rmode = rinfo->rmode(); + if (target_page->IsEvacuationCandidate() && + (rinfo->host() == NULL || + !ShouldSkipEvacuationSlotRecording(rinfo->host()))) { + bool success; + if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) { + // This doesn't need to be typed since it is just a normal heap pointer. + Object** target_pointer = + reinterpret_cast<Object**>(rinfo->constant_pool_entry_address()); + success = SlotsBuffer::AddTo( + &slots_buffer_allocator_, target_page->slots_buffer_address(), + target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW); + } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) { + success = SlotsBuffer::AddTo( + &slots_buffer_allocator_, target_page->slots_buffer_address(), + SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(), + SlotsBuffer::FAIL_ON_OVERFLOW); + } else { + success = SlotsBuffer::AddTo( + &slots_buffer_allocator_, target_page->slots_buffer_address(), + SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW); + } + if (!success) { + EvictEvacuationCandidate(target_page); + } + } +} + + +void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) { + Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); + if (target_page->IsEvacuationCandidate() && + !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) { + if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, + target_page->slots_buffer_address(), + SlotsBuffer::CODE_ENTRY_SLOT, slot, + SlotsBuffer::FAIL_ON_OVERFLOW)) { + EvictEvacuationCandidate(target_page); + } + } +} + + +void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) { + DCHECK(heap()->gc_state() == Heap::MARK_COMPACT); + if (is_compacting()) { + Code* host = + isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer( + pc); + MarkBit mark_bit = Marking::MarkBitFrom(host); + if (Marking::IsBlack(mark_bit)) { + RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); + RecordRelocSlot(&rinfo, target); + } + } +} + + +static inline SlotsBuffer::SlotType DecodeSlotType( + SlotsBuffer::ObjectSlot slot) { + return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot)); +} + + +void SlotsBuffer::UpdateSlots(Heap* heap) { + PointersUpdatingVisitor v(heap); + + for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { + ObjectSlot slot = slots_[slot_idx]; + if (!IsTypedSlot(slot)) { + PointersUpdatingVisitor::UpdateSlot(heap, slot); + } else { + ++slot_idx; + DCHECK(slot_idx < idx_); + UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot), + reinterpret_cast<Address>(slots_[slot_idx])); + } + } +} + + +void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) { + PointersUpdatingVisitor v(heap); + + for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { + ObjectSlot slot = slots_[slot_idx]; + if (!IsTypedSlot(slot)) { + if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) { + PointersUpdatingVisitor::UpdateSlot(heap, slot); + } + } else { + ++slot_idx; + DCHECK(slot_idx < idx_); + Address pc = reinterpret_cast<Address>(slots_[slot_idx]); + if (!IsOnInvalidatedCodeObject(pc)) { + UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot), + reinterpret_cast<Address>(slots_[slot_idx])); + } + } + } +} + + +SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { + return new SlotsBuffer(next_buffer); +} + + +void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { + delete buffer; +} + + +void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { + SlotsBuffer* buffer = *buffer_address; + while (buffer != NULL) { + SlotsBuffer* next_buffer = buffer->next(); + DeallocateBuffer(buffer); + buffer = next_buffer; + } + *buffer_address = NULL; +} +} +} // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/heap/mark-compact.h nodejs-0.11.15/deps/v8/src/heap/mark-compact.h --- nodejs-0.11.13/deps/v8/src/heap/mark-compact.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/mark-compact.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,966 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_MARK_COMPACT_H_ +#define V8_HEAP_MARK_COMPACT_H_ + +#include "src/compiler-intrinsics.h" +#include "src/heap/spaces.h" + +namespace v8 { +namespace internal { + +// Callback function, returns whether an object is alive. The heap size +// of the object is returned in size. It optionally updates the offset +// to the first live object in the page (only used for old and map objects). +typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); + +// Forward declarations. +class CodeFlusher; +class MarkCompactCollector; +class MarkingVisitor; +class RootMarkingVisitor; + + +class Marking { + public: + explicit Marking(Heap* heap) : heap_(heap) {} + + INLINE(static MarkBit MarkBitFrom(Address addr)); + + INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) { + return MarkBitFrom(reinterpret_cast<Address>(obj)); + } + + // Impossible markbits: 01 + static const char* kImpossibleBitPattern; + INLINE(static bool IsImpossible(MarkBit mark_bit)) { + return !mark_bit.Get() && mark_bit.Next().Get(); + } + + // Black markbits: 10 - this is required by the sweeper. + static const char* kBlackBitPattern; + INLINE(static bool IsBlack(MarkBit mark_bit)) { + return mark_bit.Get() && !mark_bit.Next().Get(); + } + + // White markbits: 00 - this is required by the mark bit clearer. + static const char* kWhiteBitPattern; + INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); } + + // Grey markbits: 11 + static const char* kGreyBitPattern; + INLINE(static bool IsGrey(MarkBit mark_bit)) { + return mark_bit.Get() && mark_bit.Next().Get(); + } + + INLINE(static void MarkBlack(MarkBit mark_bit)) { + mark_bit.Set(); + mark_bit.Next().Clear(); + } + + INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); } + + INLINE(static void WhiteToGrey(MarkBit markbit)) { + markbit.Set(); + markbit.Next().Set(); + } + + INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); } + + INLINE(static void BlackToGrey(HeapObject* obj)) { + BlackToGrey(MarkBitFrom(obj)); + } + + INLINE(static void AnyToGrey(MarkBit markbit)) { + markbit.Set(); + markbit.Next().Set(); + } + + void TransferMark(Address old_start, Address new_start); + +#ifdef DEBUG + enum ObjectColor { + BLACK_OBJECT, + WHITE_OBJECT, + GREY_OBJECT, + IMPOSSIBLE_COLOR + }; + + static const char* ColorName(ObjectColor color) { + switch (color) { + case BLACK_OBJECT: + return "black"; + case WHITE_OBJECT: + return "white"; + case GREY_OBJECT: + return "grey"; + case IMPOSSIBLE_COLOR: + return "impossible"; + } + return "error"; + } + + static ObjectColor Color(HeapObject* obj) { + return Color(Marking::MarkBitFrom(obj)); + } + + static ObjectColor Color(MarkBit mark_bit) { + if (IsBlack(mark_bit)) return BLACK_OBJECT; + if (IsWhite(mark_bit)) return WHITE_OBJECT; + if (IsGrey(mark_bit)) return GREY_OBJECT; + UNREACHABLE(); + return IMPOSSIBLE_COLOR; + } +#endif + + // Returns true if the transferred color is black. + INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) { + MarkBit from_mark_bit = MarkBitFrom(from); + MarkBit to_mark_bit = MarkBitFrom(to); + bool is_black = false; + if (from_mark_bit.Get()) { + to_mark_bit.Set(); + is_black = true; // Looks black so far. + } + if (from_mark_bit.Next().Get()) { + to_mark_bit.Next().Set(); + is_black = false; // Was actually gray. + } + return is_black; + } + + private: + Heap* heap_; +}; + +// ---------------------------------------------------------------------------- +// Marking deque for tracing live objects. +class MarkingDeque { + public: + MarkingDeque() + : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {} + + void Initialize(Address low, Address high) { + HeapObject** obj_low = reinterpret_cast<HeapObject**>(low); + HeapObject** obj_high = reinterpret_cast<HeapObject**>(high); + array_ = obj_low; + mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1; + top_ = bottom_ = 0; + overflowed_ = false; + } + + inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } + + inline bool IsEmpty() { return top_ == bottom_; } + + bool overflowed() const { return overflowed_; } + + void ClearOverflowed() { overflowed_ = false; } + + void SetOverflowed() { overflowed_ = true; } + + // Push the (marked) object on the marking stack if there is room, + // otherwise mark the object as overflowed and wait for a rescan of the + // heap. + INLINE(void PushBlack(HeapObject* object)) { + DCHECK(object->IsHeapObject()); + if (IsFull()) { + Marking::BlackToGrey(object); + MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size()); + SetOverflowed(); + } else { + array_[top_] = object; + top_ = ((top_ + 1) & mask_); + } + } + + INLINE(void PushGrey(HeapObject* object)) { + DCHECK(object->IsHeapObject()); + if (IsFull()) { + SetOverflowed(); + } else { + array_[top_] = object; + top_ = ((top_ + 1) & mask_); + } + } + + INLINE(HeapObject* Pop()) { + DCHECK(!IsEmpty()); + top_ = ((top_ - 1) & mask_); + HeapObject* object = array_[top_]; + DCHECK(object->IsHeapObject()); + return object; + } + + INLINE(void UnshiftGrey(HeapObject* object)) { + DCHECK(object->IsHeapObject()); + if (IsFull()) { + SetOverflowed(); + } else { + bottom_ = ((bottom_ - 1) & mask_); + array_[bottom_] = object; + } + } + + HeapObject** array() { return array_; } + int bottom() { return bottom_; } + int top() { return top_; } + int mask() { return mask_; } + void set_top(int top) { top_ = top; } + + private: + HeapObject** array_; + // array_[(top - 1) & mask_] is the top element in the deque. The Deque is + // empty when top_ == bottom_. It is full when top_ + 1 == bottom + // (mod mask + 1). + int top_; + int bottom_; + int mask_; + bool overflowed_; + + DISALLOW_COPY_AND_ASSIGN(MarkingDeque); +}; + + +class SlotsBufferAllocator { + public: + SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer); + void DeallocateBuffer(SlotsBuffer* buffer); + + void DeallocateChain(SlotsBuffer** buffer_address); +}; + + +// SlotsBuffer records a sequence of slots that has to be updated +// after live objects were relocated from evacuation candidates. +// All slots are either untyped or typed: +// - Untyped slots are expected to contain a tagged object pointer. +// They are recorded by an address. +// - Typed slots are expected to contain an encoded pointer to a heap +// object where the way of encoding depends on the type of the slot. +// They are recorded as a pair (SlotType, slot address). +// We assume that zero-page is never mapped this allows us to distinguish +// untyped slots from typed slots during iteration by a simple comparison: +// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it +// is the first element of typed slot's pair. +class SlotsBuffer { + public: + typedef Object** ObjectSlot; + + explicit SlotsBuffer(SlotsBuffer* next_buffer) + : idx_(0), chain_length_(1), next_(next_buffer) { + if (next_ != NULL) { + chain_length_ = next_->chain_length_ + 1; + } + } + + ~SlotsBuffer() {} + + void Add(ObjectSlot slot) { + DCHECK(0 <= idx_ && idx_ < kNumberOfElements); + slots_[idx_++] = slot; + } + + enum SlotType { + EMBEDDED_OBJECT_SLOT, + RELOCATED_CODE_OBJECT, + CODE_TARGET_SLOT, + CODE_ENTRY_SLOT, + DEBUG_TARGET_SLOT, + JS_RETURN_SLOT, + NUMBER_OF_SLOT_TYPES + }; + + static const char* SlotTypeToString(SlotType type) { + switch (type) { + case EMBEDDED_OBJECT_SLOT: + return "EMBEDDED_OBJECT_SLOT"; + case RELOCATED_CODE_OBJECT: + return "RELOCATED_CODE_OBJECT"; + case CODE_TARGET_SLOT: + return "CODE_TARGET_SLOT"; + case CODE_ENTRY_SLOT: + return "CODE_ENTRY_SLOT"; + case DEBUG_TARGET_SLOT: + return "DEBUG_TARGET_SLOT"; + case JS_RETURN_SLOT: + return "JS_RETURN_SLOT"; + case NUMBER_OF_SLOT_TYPES: + return "NUMBER_OF_SLOT_TYPES"; + } + return "UNKNOWN SlotType"; + } + + void UpdateSlots(Heap* heap); + + void UpdateSlotsWithFilter(Heap* heap); + + SlotsBuffer* next() { return next_; } + + static int SizeOfChain(SlotsBuffer* buffer) { + if (buffer == NULL) return 0; + return static_cast<int>(buffer->idx_ + + (buffer->chain_length_ - 1) * kNumberOfElements); + } + + inline bool IsFull() { return idx_ == kNumberOfElements; } + + inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; } + + static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer, + bool code_slots_filtering_required) { + while (buffer != NULL) { + if (code_slots_filtering_required) { + buffer->UpdateSlotsWithFilter(heap); + } else { + buffer->UpdateSlots(heap); + } + buffer = buffer->next(); + } + } + + enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW }; + + static bool ChainLengthThresholdReached(SlotsBuffer* buffer) { + return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold; + } + + INLINE(static bool AddTo(SlotsBufferAllocator* allocator, + SlotsBuffer** buffer_address, ObjectSlot slot, + AdditionMode mode)) { + SlotsBuffer* buffer = *buffer_address; + if (buffer == NULL || buffer->IsFull()) { + if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { + allocator->DeallocateChain(buffer_address); + return false; + } + buffer = allocator->AllocateBuffer(buffer); + *buffer_address = buffer; + } + buffer->Add(slot); + return true; + } + + static bool IsTypedSlot(ObjectSlot slot); + + static bool AddTo(SlotsBufferAllocator* allocator, + SlotsBuffer** buffer_address, SlotType type, Address addr, + AdditionMode mode); + + static const int kNumberOfElements = 1021; + + private: + static const int kChainLengthThreshold = 15; + + intptr_t idx_; + intptr_t chain_length_; + SlotsBuffer* next_; + ObjectSlot slots_[kNumberOfElements]; +}; + + +// CodeFlusher collects candidates for code flushing during marking and +// processes those candidates after marking has completed in order to +// reset those functions referencing code objects that would otherwise +// be unreachable. Code objects can be referenced in three ways: +// - SharedFunctionInfo references unoptimized code. +// - JSFunction references either unoptimized or optimized code. +// - OptimizedCodeMap references optimized code. +// We are not allowed to flush unoptimized code for functions that got +// optimized or inlined into optimized code, because we might bailout +// into the unoptimized code again during deoptimization. +class CodeFlusher { + public: + explicit CodeFlusher(Isolate* isolate) + : isolate_(isolate), + jsfunction_candidates_head_(NULL), + shared_function_info_candidates_head_(NULL), + optimized_code_map_holder_head_(NULL) {} + + void AddCandidate(SharedFunctionInfo* shared_info) { + if (GetNextCandidate(shared_info) == NULL) { + SetNextCandidate(shared_info, shared_function_info_candidates_head_); + shared_function_info_candidates_head_ = shared_info; + } + } + + void AddCandidate(JSFunction* function) { + DCHECK(function->code() == function->shared()->code()); + if (GetNextCandidate(function)->IsUndefined()) { + SetNextCandidate(function, jsfunction_candidates_head_); + jsfunction_candidates_head_ = function; + } + } + + void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) { + if (GetNextCodeMap(code_map_holder)->IsUndefined()) { + SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_); + optimized_code_map_holder_head_ = code_map_holder; + } + } + + void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder); + void EvictCandidate(SharedFunctionInfo* shared_info); + void EvictCandidate(JSFunction* function); + + void ProcessCandidates() { + ProcessOptimizedCodeMaps(); + ProcessSharedFunctionInfoCandidates(); + ProcessJSFunctionCandidates(); + } + + void EvictAllCandidates() { + EvictOptimizedCodeMaps(); + EvictJSFunctionCandidates(); + EvictSharedFunctionInfoCandidates(); + } + + void IteratePointersToFromSpace(ObjectVisitor* v); + + private: + void ProcessOptimizedCodeMaps(); + void ProcessJSFunctionCandidates(); + void ProcessSharedFunctionInfoCandidates(); + void EvictOptimizedCodeMaps(); + void EvictJSFunctionCandidates(); + void EvictSharedFunctionInfoCandidates(); + + static JSFunction** GetNextCandidateSlot(JSFunction* candidate) { + return reinterpret_cast<JSFunction**>( + HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset)); + } + + static JSFunction* GetNextCandidate(JSFunction* candidate) { + Object* next_candidate = candidate->next_function_link(); + return reinterpret_cast<JSFunction*>(next_candidate); + } + + static void SetNextCandidate(JSFunction* candidate, + JSFunction* next_candidate) { + candidate->set_next_function_link(next_candidate); + } + + static void ClearNextCandidate(JSFunction* candidate, Object* undefined) { + DCHECK(undefined->IsUndefined()); + candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER); + } + + static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) { + Object* next_candidate = candidate->code()->gc_metadata(); + return reinterpret_cast<SharedFunctionInfo*>(next_candidate); + } + + static void SetNextCandidate(SharedFunctionInfo* candidate, + SharedFunctionInfo* next_candidate) { + candidate->code()->set_gc_metadata(next_candidate); + } + + static void ClearNextCandidate(SharedFunctionInfo* candidate) { + candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER); + } + + static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) { + FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); + Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex); + return reinterpret_cast<SharedFunctionInfo*>(next_map); + } + + static void SetNextCodeMap(SharedFunctionInfo* holder, + SharedFunctionInfo* next_holder) { + FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); + code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder); + } + + static void ClearNextCodeMap(SharedFunctionInfo* holder) { + FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); + code_map->set_undefined(SharedFunctionInfo::kNextMapIndex); + } + + Isolate* isolate_; + JSFunction* jsfunction_candidates_head_; + SharedFunctionInfo* shared_function_info_candidates_head_; + SharedFunctionInfo* optimized_code_map_holder_head_; + + DISALLOW_COPY_AND_ASSIGN(CodeFlusher); +}; + + +// Defined in isolate.h. +class ThreadLocalTop; + + +// ------------------------------------------------------------------------- +// Mark-Compact collector +class MarkCompactCollector { + public: + // Set the global flags, it must be called before Prepare to take effect. + inline void SetFlags(int flags); + + static void Initialize(); + + void SetUp(); + + void TearDown(); + + void CollectEvacuationCandidates(PagedSpace* space); + + void AddEvacuationCandidate(Page* p); + + // Prepares for GC by resetting relocation info in old and map spaces and + // choosing spaces to compact. + void Prepare(); + + // Performs a global garbage collection. + void CollectGarbage(); + + enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION }; + + bool StartCompaction(CompactionMode mode); + + void AbortCompaction(); + +#ifdef DEBUG + // Checks whether performing mark-compact collection. + bool in_use() { return state_ > PREPARE_GC; } + bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; } +#endif + + // Determine type of object and emit deletion log event. + static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate); + + // Distinguishable invalid map encodings (for single word and multiple words) + // that indicate free regions. + static const uint32_t kSingleFreeEncoding = 0; + static const uint32_t kMultiFreeEncoding = 1; + + static inline bool IsMarked(Object* obj); + + inline Heap* heap() const { return heap_; } + inline Isolate* isolate() const; + + CodeFlusher* code_flusher() { return code_flusher_; } + inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; } + void EnableCodeFlushing(bool enable); + + enum SweeperType { + PARALLEL_CONSERVATIVE, + CONCURRENT_CONSERVATIVE, + PARALLEL_PRECISE, + CONCURRENT_PRECISE, + PRECISE + }; + + enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL }; + +#ifdef VERIFY_HEAP + void VerifyMarkbitsAreClean(); + static void VerifyMarkbitsAreClean(PagedSpace* space); + static void VerifyMarkbitsAreClean(NewSpace* space); + void VerifyWeakEmbeddedObjectsInCode(); + void VerifyOmittedMapChecks(); +#endif + + // Sweep a single page from the given space conservatively. + // Returns the size of the biggest continuous freed memory chunk in bytes. + template <SweepingParallelism type> + static int SweepConservatively(PagedSpace* space, FreeList* free_list, + Page* p); + + INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { + return Page::FromAddress(reinterpret_cast<Address>(anchor)) + ->ShouldSkipEvacuationSlotRecording(); + } + + INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) { + return Page::FromAddress(reinterpret_cast<Address>(host)) + ->ShouldSkipEvacuationSlotRecording(); + } + + INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { + return Page::FromAddress(reinterpret_cast<Address>(obj)) + ->IsEvacuationCandidate(); + } + + INLINE(void EvictEvacuationCandidate(Page* page)) { + if (FLAG_trace_fragmentation) { + PrintF("Page %p is too popular. Disabling evacuation.\n", + reinterpret_cast<void*>(page)); + } + + // TODO(gc) If all evacuation candidates are too popular we + // should stop slots recording entirely. + page->ClearEvacuationCandidate(); + + // We were not collecting slots on this page that point + // to other evacuation candidates thus we have to + // rescan the page after evacuation to discover and update all + // pointers to evacuated objects. + if (page->owner()->identity() == OLD_DATA_SPACE) { + evacuation_candidates_.RemoveElement(page); + } else { + page->SetFlag(Page::RESCAN_ON_EVACUATION); + } + } + + void RecordRelocSlot(RelocInfo* rinfo, Object* target); + void RecordCodeEntrySlot(Address slot, Code* target); + void RecordCodeTargetPatch(Address pc, Code* target); + + INLINE(void RecordSlot( + Object** anchor_slot, Object** slot, Object* object, + SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW)); + + void MigrateObject(HeapObject* dst, HeapObject* src, int size, + AllocationSpace to_old_space); + + bool TryPromoteObject(HeapObject* object, int object_size); + + void InvalidateCode(Code* code); + + void ClearMarkbits(); + + bool abort_incremental_marking() const { return abort_incremental_marking_; } + + bool is_compacting() const { return compacting_; } + + MarkingParity marking_parity() { return marking_parity_; } + + // Concurrent and parallel sweeping support. If required_freed_bytes was set + // to a value larger than 0, then sweeping returns after a block of at least + // required_freed_bytes was freed. If required_freed_bytes was set to zero + // then the whole given space is swept. It returns the size of the maximum + // continuous freed memory chunk. + int SweepInParallel(PagedSpace* space, int required_freed_bytes); + + // Sweeps a given page concurrently to the sweeper threads. It returns the + // size of the maximum continuous freed memory chunk. + int SweepInParallel(Page* page, PagedSpace* space); + + void EnsureSweepingCompleted(); + + // If sweeper threads are not active this method will return true. If + // this is a latency issue we should be smarter here. Otherwise, it will + // return true if the sweeper threads are done processing the pages. + bool IsSweepingCompleted(); + + void RefillFreeList(PagedSpace* space); + + bool AreSweeperThreadsActivated(); + + // Checks if sweeping is in progress right now on any space. + bool sweeping_in_progress() { return sweeping_in_progress_; } + + void set_sequential_sweeping(bool sequential_sweeping) { + sequential_sweeping_ = sequential_sweeping; + } + + bool sequential_sweeping() const { return sequential_sweeping_; } + + // Mark the global table which maps weak objects to dependent code without + // marking its contents. + void MarkWeakObjectToCodeTable(); + + // Special case for processing weak references in a full collection. We need + // to artificially keep AllocationSites alive for a time. + void MarkAllocationSite(AllocationSite* site); + + private: + class SweeperTask; + + explicit MarkCompactCollector(Heap* heap); + ~MarkCompactCollector(); + + bool MarkInvalidatedCode(); + bool WillBeDeoptimized(Code* code); + void RemoveDeadInvalidatedCode(); + void ProcessInvalidatedCode(ObjectVisitor* visitor); + + void StartSweeperThreads(); + +#ifdef DEBUG + enum CollectorState { + IDLE, + PREPARE_GC, + MARK_LIVE_OBJECTS, + SWEEP_SPACES, + ENCODE_FORWARDING_ADDRESSES, + UPDATE_POINTERS, + RELOCATE_OBJECTS + }; + + // The current stage of the collector. + CollectorState state_; +#endif + + // Global flag that forces sweeping to be precise, so we can traverse the + // heap. + bool sweep_precisely_; + + bool reduce_memory_footprint_; + + bool abort_incremental_marking_; + + MarkingParity marking_parity_; + + // True if we are collecting slots to perform evacuation from evacuation + // candidates. + bool compacting_; + + bool was_marked_incrementally_; + + // True if concurrent or parallel sweeping is currently in progress. + bool sweeping_in_progress_; + + base::Semaphore pending_sweeper_jobs_semaphore_; + + bool sequential_sweeping_; + + SlotsBufferAllocator slots_buffer_allocator_; + + SlotsBuffer* migration_slots_buffer_; + + // Finishes GC, performs heap verification if enabled. + void Finish(); + + // ----------------------------------------------------------------------- + // Phase 1: Marking live objects. + // + // Before: The heap has been prepared for garbage collection by + // MarkCompactCollector::Prepare() and is otherwise in its + // normal state. + // + // After: Live objects are marked and non-live objects are unmarked. + + friend class RootMarkingVisitor; + friend class MarkingVisitor; + friend class MarkCompactMarkingVisitor; + friend class CodeMarkingVisitor; + friend class SharedFunctionInfoMarkingVisitor; + + // Mark code objects that are active on the stack to prevent them + // from being flushed. + void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top); + + void PrepareForCodeFlushing(); + + // Marking operations for objects reachable from roots. + void MarkLiveObjects(); + + void AfterMarking(); + + // Marks the object black and pushes it on the marking stack. + // This is for non-incremental marking only. + INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); + + // Marks the object black assuming that it is not yet marked. + // This is for non-incremental marking only. + INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); + + // Mark the heap roots and all objects reachable from them. + void MarkRoots(RootMarkingVisitor* visitor); + + // Mark the string table specially. References to internalized strings from + // the string table are weak. + void MarkStringTable(RootMarkingVisitor* visitor); + + // Mark objects in implicit references groups if their parent object + // is marked. + void MarkImplicitRefGroups(); + + // Mark objects reachable (transitively) from objects in the marking stack + // or overflowed in the heap. + void ProcessMarkingDeque(); + + // Mark objects reachable (transitively) from objects in the marking stack + // or overflowed in the heap. This respects references only considered in + // the final atomic marking pause including the following: + // - Processing of objects reachable through Harmony WeakMaps. + // - Objects reachable due to host application logic like object groups + // or implicit references' groups. + void ProcessEphemeralMarking(ObjectVisitor* visitor); + + // If the call-site of the top optimized code was not prepared for + // deoptimization, then treat the maps in the code as strong pointers, + // otherwise a map can die and deoptimize the code. + void ProcessTopOptimizedFrame(ObjectVisitor* visitor); + + // Mark objects reachable (transitively) from objects in the marking + // stack. This function empties the marking stack, but may leave + // overflowed objects in the heap, in which case the marking stack's + // overflow flag will be set. + void EmptyMarkingDeque(); + + // Refill the marking stack with overflowed objects from the heap. This + // function either leaves the marking stack full or clears the overflow + // flag on the marking stack. + void RefillMarkingDeque(); + + // After reachable maps have been marked process per context object + // literal map caches removing unmarked entries. + void ProcessMapCaches(); + + // Callback function for telling whether the object *p is an unmarked + // heap object. + static bool IsUnmarkedHeapObject(Object** p); + static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p); + + // Map transitions from a live map to a dead map must be killed. + // We replace them with a null descriptor, with the same key. + void ClearNonLiveReferences(); + void ClearNonLivePrototypeTransitions(Map* map); + void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark); + void ClearMapTransitions(Map* map); + bool ClearMapBackPointer(Map* map); + void TrimDescriptorArray(Map* map, DescriptorArray* descriptors, + int number_of_own_descriptors); + void TrimEnumCache(Map* map, DescriptorArray* descriptors); + + void ClearDependentCode(DependentCode* dependent_code); + void ClearDependentICList(Object* head); + void ClearNonLiveDependentCode(DependentCode* dependent_code); + int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group, + int start, int end, int new_start); + + // Mark all values associated with reachable keys in weak collections + // encountered so far. This might push new object or even new weak maps onto + // the marking stack. + void ProcessWeakCollections(); + + // After all reachable objects have been marked those weak map entries + // with an unreachable key are removed from all encountered weak maps. + // The linked list of all encountered weak maps is destroyed. + void ClearWeakCollections(); + + // We have to remove all encountered weak maps from the list of weak + // collections when incremental marking is aborted. + void AbortWeakCollections(); + + // ----------------------------------------------------------------------- + // Phase 2: Sweeping to clear mark bits and free non-live objects for + // a non-compacting collection. + // + // Before: Live objects are marked and non-live objects are unmarked. + // + // After: Live objects are unmarked, non-live regions have been added to + // their space's free list. Active eden semispace is compacted by + // evacuation. + // + + // If we are not compacting the heap, we simply sweep the spaces except + // for the large object space, clearing mark bits and adding unmarked + // regions to each space's free list. + void SweepSpaces(); + + int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space, + NewSpacePage* p); + + void EvacuateNewSpace(); + + void EvacuateLiveObjectsFromPage(Page* p); + + void EvacuatePages(); + + void EvacuateNewSpaceAndCandidates(); + + void ReleaseEvacuationCandidates(); + + // Moves the pages of the evacuation_candidates_ list to the end of their + // corresponding space pages list. + void MoveEvacuationCandidatesToEndOfPagesList(); + + void SweepSpace(PagedSpace* space, SweeperType sweeper); + + // Finalizes the parallel sweeping phase. Marks all the pages that were + // swept in parallel. + void ParallelSweepSpacesComplete(); + + void ParallelSweepSpaceComplete(PagedSpace* space); + + // Updates store buffer and slot buffer for a pointer in a migrating object. + void RecordMigratedSlot(Object* value, Address slot); + +#ifdef DEBUG + friend class MarkObjectVisitor; + static void VisitObject(HeapObject* obj); + + friend class UnmarkObjectVisitor; + static void UnmarkObject(HeapObject* obj); +#endif + + Heap* heap_; + MarkingDeque marking_deque_; + CodeFlusher* code_flusher_; + bool have_code_to_deoptimize_; + + List<Page*> evacuation_candidates_; + List<Code*> invalidated_code_; + + SmartPointer<FreeList> free_list_old_data_space_; + SmartPointer<FreeList> free_list_old_pointer_space_; + + friend class Heap; +}; + + +class MarkBitCellIterator BASE_EMBEDDED { + public: + explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) { + last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex( + chunk_->AddressToMarkbitIndex(chunk_->area_end()))); + cell_base_ = chunk_->area_start(); + cell_index_ = Bitmap::IndexToCell( + Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_))); + cells_ = chunk_->markbits()->cells(); + } + + inline bool Done() { return cell_index_ == last_cell_index_; } + + inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; } + + inline MarkBit::CellType* CurrentCell() { + DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( + chunk_->AddressToMarkbitIndex(cell_base_)))); + return &cells_[cell_index_]; + } + + inline Address CurrentCellBase() { + DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( + chunk_->AddressToMarkbitIndex(cell_base_)))); + return cell_base_; + } + + inline void Advance() { + cell_index_++; + cell_base_ += 32 * kPointerSize; + } + + private: + MemoryChunk* chunk_; + MarkBit::CellType* cells_; + unsigned int last_cell_index_; + unsigned int cell_index_; + Address cell_base_; +}; + + +class SequentialSweepingScope BASE_EMBEDDED { + public: + explicit SequentialSweepingScope(MarkCompactCollector* collector) + : collector_(collector) { + collector_->set_sequential_sweeping(true); + } + + ~SequentialSweepingScope() { collector_->set_sequential_sweeping(false); } + + private: + MarkCompactCollector* collector_; +}; + + +const char* AllocationSpaceName(AllocationSpace space); +} +} // namespace v8::internal + +#endif // V8_HEAP_MARK_COMPACT_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/mark-compact-inl.h nodejs-0.11.15/deps/v8/src/heap/mark-compact-inl.h --- nodejs-0.11.13/deps/v8/src/heap/mark-compact-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/mark-compact-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,75 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_MARK_COMPACT_INL_H_ +#define V8_HEAP_MARK_COMPACT_INL_H_ + +#include <memory.h> + +#include "src/heap/mark-compact.h" +#include "src/isolate.h" + + +namespace v8 { +namespace internal { + + +MarkBit Marking::MarkBitFrom(Address addr) { + MemoryChunk* p = MemoryChunk::FromAddress(addr); + return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr), + p->ContainsOnlyData()); +} + + +void MarkCompactCollector::SetFlags(int flags) { + sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0); + reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0); + abort_incremental_marking_ = + ((flags & Heap::kAbortIncrementalMarkingMask) != 0); +} + + +void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { + DCHECK(Marking::MarkBitFrom(obj) == mark_bit); + if (!mark_bit.Get()) { + mark_bit.Set(); + MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); + DCHECK(IsMarked(obj)); + DCHECK(obj->GetIsolate()->heap()->Contains(obj)); + marking_deque_.PushBlack(obj); + } +} + + +void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) { + DCHECK(!mark_bit.Get()); + DCHECK(Marking::MarkBitFrom(obj) == mark_bit); + mark_bit.Set(); + MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); +} + + +bool MarkCompactCollector::IsMarked(Object* obj) { + DCHECK(obj->IsHeapObject()); + HeapObject* heap_object = HeapObject::cast(obj); + return Marking::MarkBitFrom(heap_object).Get(); +} + + +void MarkCompactCollector::RecordSlot(Object** anchor_slot, Object** slot, + Object* object, + SlotsBuffer::AdditionMode mode) { + Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object)); + if (object_page->IsEvacuationCandidate() && + !ShouldSkipEvacuationSlotRecording(anchor_slot)) { + if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, + object_page->slots_buffer_address(), slot, mode)) { + EvictEvacuationCandidate(object_page); + } + } +} +} +} // namespace v8::internal + +#endif // V8_HEAP_MARK_COMPACT_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/objects-visiting.cc nodejs-0.11.15/deps/v8/src/heap/objects-visiting.cc --- nodejs-0.11.13/deps/v8/src/heap/objects-visiting.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/objects-visiting.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,414 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/heap/objects-visiting.h" +#include "src/ic-inl.h" + +namespace v8 { +namespace internal { + + +StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId( + int instance_type, int instance_size) { + if (instance_type < FIRST_NONSTRING_TYPE) { + switch (instance_type & kStringRepresentationMask) { + case kSeqStringTag: + if ((instance_type & kStringEncodingMask) == kOneByteStringTag) { + return kVisitSeqOneByteString; + } else { + return kVisitSeqTwoByteString; + } + + case kConsStringTag: + if (IsShortcutCandidate(instance_type)) { + return kVisitShortcutCandidate; + } else { + return kVisitConsString; + } + + case kSlicedStringTag: + return kVisitSlicedString; + + case kExternalStringTag: + return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric, + instance_size); + } + UNREACHABLE(); + } + + switch (instance_type) { + case BYTE_ARRAY_TYPE: + return kVisitByteArray; + + case FREE_SPACE_TYPE: + return kVisitFreeSpace; + + case FIXED_ARRAY_TYPE: + return kVisitFixedArray; + + case FIXED_DOUBLE_ARRAY_TYPE: + return kVisitFixedDoubleArray; + + case CONSTANT_POOL_ARRAY_TYPE: + return kVisitConstantPoolArray; + + case ODDBALL_TYPE: + return kVisitOddball; + + case MAP_TYPE: + return kVisitMap; + + case CODE_TYPE: + return kVisitCode; + + case CELL_TYPE: + return kVisitCell; + + case PROPERTY_CELL_TYPE: + return kVisitPropertyCell; + + case JS_SET_TYPE: + return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric, + JSSet::kSize); + + case JS_MAP_TYPE: + return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric, + JSMap::kSize); + + case JS_WEAK_MAP_TYPE: + case JS_WEAK_SET_TYPE: + return kVisitJSWeakCollection; + + case JS_REGEXP_TYPE: + return kVisitJSRegExp; + + case SHARED_FUNCTION_INFO_TYPE: + return kVisitSharedFunctionInfo; + + case JS_PROXY_TYPE: + return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric, + JSProxy::kSize); + + case JS_FUNCTION_PROXY_TYPE: + return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric, + JSFunctionProxy::kSize); + + case FOREIGN_TYPE: + return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric, + Foreign::kSize); + + case SYMBOL_TYPE: + return kVisitSymbol; + + case FILLER_TYPE: + return kVisitDataObjectGeneric; + + case JS_ARRAY_BUFFER_TYPE: + return kVisitJSArrayBuffer; + + case JS_TYPED_ARRAY_TYPE: + return kVisitJSTypedArray; + + case JS_DATA_VIEW_TYPE: + return kVisitJSDataView; + + case JS_OBJECT_TYPE: + case JS_CONTEXT_EXTENSION_OBJECT_TYPE: + case JS_GENERATOR_OBJECT_TYPE: + case JS_MODULE_TYPE: + case JS_VALUE_TYPE: + case JS_DATE_TYPE: + case JS_ARRAY_TYPE: + case JS_GLOBAL_PROXY_TYPE: + case JS_GLOBAL_OBJECT_TYPE: + case JS_BUILTINS_OBJECT_TYPE: + case JS_MESSAGE_OBJECT_TYPE: + case JS_SET_ITERATOR_TYPE: + case JS_MAP_ITERATOR_TYPE: + return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric, + instance_size); + + case JS_FUNCTION_TYPE: + return kVisitJSFunction; + + case HEAP_NUMBER_TYPE: + case MUTABLE_HEAP_NUMBER_TYPE: +#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size) \ + case EXTERNAL_##TYPE##_ARRAY_TYPE: + + TYPED_ARRAYS(EXTERNAL_ARRAY_CASE) + return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric, + instance_size); +#undef EXTERNAL_ARRAY_CASE + + case FIXED_UINT8_ARRAY_TYPE: + case FIXED_INT8_ARRAY_TYPE: + case FIXED_UINT16_ARRAY_TYPE: + case FIXED_INT16_ARRAY_TYPE: + case FIXED_UINT32_ARRAY_TYPE: + case FIXED_INT32_ARRAY_TYPE: + case FIXED_FLOAT32_ARRAY_TYPE: + case FIXED_UINT8_CLAMPED_ARRAY_TYPE: + return kVisitFixedTypedArray; + + case FIXED_FLOAT64_ARRAY_TYPE: + return kVisitFixedFloat64Array; + +#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: + STRUCT_LIST(MAKE_STRUCT_CASE) +#undef MAKE_STRUCT_CASE + if (instance_type == ALLOCATION_SITE_TYPE) { + return kVisitAllocationSite; + } + + return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric, + instance_size); + + default: + UNREACHABLE(); + return kVisitorIdCount; + } +} + + +// We don't record weak slots during marking or scavenges. Instead we do it +// once when we complete mark-compact cycle. Note that write barrier has no +// effect if we are already in the middle of compacting mark-sweep cycle and we +// have to record slots manually. +static bool MustRecordSlots(Heap* heap) { + return heap->gc_state() == Heap::MARK_COMPACT && + heap->mark_compact_collector()->is_compacting(); +} + + +template <class T> +struct WeakListVisitor; + + +template <class T> +Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) { + Object* undefined = heap->undefined_value(); + Object* head = undefined; + T* tail = NULL; + MarkCompactCollector* collector = heap->mark_compact_collector(); + bool record_slots = MustRecordSlots(heap); + while (list != undefined) { + // Check whether to keep the candidate in the list. + T* candidate = reinterpret_cast<T*>(list); + Object* retained = retainer->RetainAs(list); + if (retained != NULL) { + if (head == undefined) { + // First element in the list. + head = retained; + } else { + // Subsequent elements in the list. + DCHECK(tail != NULL); + WeakListVisitor<T>::SetWeakNext(tail, retained); + if (record_slots) { + Object** next_slot = + HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset()); + collector->RecordSlot(next_slot, next_slot, retained); + } + } + // Retained object is new tail. + DCHECK(!retained->IsUndefined()); + candidate = reinterpret_cast<T*>(retained); + tail = candidate; + + + // tail is a live object, visit it. + WeakListVisitor<T>::VisitLiveObject(heap, tail, retainer); + } else { + WeakListVisitor<T>::VisitPhantomObject(heap, candidate); + } + + // Move to next element in the list. + list = WeakListVisitor<T>::WeakNext(candidate); + } + + // Terminate the list if there is one or more elements. + if (tail != NULL) { + WeakListVisitor<T>::SetWeakNext(tail, undefined); + } + return head; +} + + +template <class T> +static void ClearWeakList(Heap* heap, Object* list) { + Object* undefined = heap->undefined_value(); + while (list != undefined) { + T* candidate = reinterpret_cast<T*>(list); + list = WeakListVisitor<T>::WeakNext(candidate); + WeakListVisitor<T>::SetWeakNext(candidate, undefined); + } +} + + +template <> +struct WeakListVisitor<JSFunction> { + static void SetWeakNext(JSFunction* function, Object* next) { + function->set_next_function_link(next); + } + + static Object* WeakNext(JSFunction* function) { + return function->next_function_link(); + } + + static int WeakNextOffset() { return JSFunction::kNextFunctionLinkOffset; } + + static void VisitLiveObject(Heap*, JSFunction*, WeakObjectRetainer*) {} + + static void VisitPhantomObject(Heap*, JSFunction*) {} +}; + + +template <> +struct WeakListVisitor<Code> { + static void SetWeakNext(Code* code, Object* next) { + code->set_next_code_link(next); + } + + static Object* WeakNext(Code* code) { return code->next_code_link(); } + + static int WeakNextOffset() { return Code::kNextCodeLinkOffset; } + + static void VisitLiveObject(Heap*, Code*, WeakObjectRetainer*) {} + + static void VisitPhantomObject(Heap*, Code*) {} +}; + + +template <> +struct WeakListVisitor<Context> { + static void SetWeakNext(Context* context, Object* next) { + context->set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WRITE_BARRIER); + } + + static Object* WeakNext(Context* context) { + return context->get(Context::NEXT_CONTEXT_LINK); + } + + static int WeakNextOffset() { + return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK); + } + + static void VisitLiveObject(Heap* heap, Context* context, + WeakObjectRetainer* retainer) { + // Process the three weak lists linked off the context. + DoWeakList<JSFunction>(heap, context, retainer, + Context::OPTIMIZED_FUNCTIONS_LIST); + DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST); + DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST); + } + + template <class T> + static void DoWeakList(Heap* heap, Context* context, + WeakObjectRetainer* retainer, int index) { + // Visit the weak list, removing dead intermediate elements. + Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer); + + // Update the list head. + context->set(index, list_head, UPDATE_WRITE_BARRIER); + + if (MustRecordSlots(heap)) { + // Record the updated slot if necessary. + Object** head_slot = + HeapObject::RawField(context, FixedArray::SizeFor(index)); + heap->mark_compact_collector()->RecordSlot(head_slot, head_slot, + list_head); + } + } + + static void VisitPhantomObject(Heap* heap, Context* context) { + ClearWeakList<JSFunction>(heap, + context->get(Context::OPTIMIZED_FUNCTIONS_LIST)); + ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST)); + ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST)); + } +}; + + +template <> +struct WeakListVisitor<JSArrayBufferView> { + static void SetWeakNext(JSArrayBufferView* obj, Object* next) { + obj->set_weak_next(next); + } + + static Object* WeakNext(JSArrayBufferView* obj) { return obj->weak_next(); } + + static int WeakNextOffset() { return JSArrayBufferView::kWeakNextOffset; } + + static void VisitLiveObject(Heap*, JSArrayBufferView*, WeakObjectRetainer*) {} + + static void VisitPhantomObject(Heap*, JSArrayBufferView*) {} +}; + + +template <> +struct WeakListVisitor<JSArrayBuffer> { + static void SetWeakNext(JSArrayBuffer* obj, Object* next) { + obj->set_weak_next(next); + } + + static Object* WeakNext(JSArrayBuffer* obj) { return obj->weak_next(); } + + static int WeakNextOffset() { return JSArrayBuffer::kWeakNextOffset; } + + static void VisitLiveObject(Heap* heap, JSArrayBuffer* array_buffer, + WeakObjectRetainer* retainer) { + Object* typed_array_obj = VisitWeakList<JSArrayBufferView>( + heap, array_buffer->weak_first_view(), retainer); + array_buffer->set_weak_first_view(typed_array_obj); + if (typed_array_obj != heap->undefined_value() && MustRecordSlots(heap)) { + Object** slot = HeapObject::RawField(array_buffer, + JSArrayBuffer::kWeakFirstViewOffset); + heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj); + } + } + + static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) { + Runtime::FreeArrayBuffer(heap->isolate(), phantom); + } +}; + + +template <> +struct WeakListVisitor<AllocationSite> { + static void SetWeakNext(AllocationSite* obj, Object* next) { + obj->set_weak_next(next); + } + + static Object* WeakNext(AllocationSite* obj) { return obj->weak_next(); } + + static int WeakNextOffset() { return AllocationSite::kWeakNextOffset; } + + static void VisitLiveObject(Heap*, AllocationSite*, WeakObjectRetainer*) {} + + static void VisitPhantomObject(Heap*, AllocationSite*) {} +}; + + +template Object* VisitWeakList<Code>(Heap* heap, Object* list, + WeakObjectRetainer* retainer); + + +template Object* VisitWeakList<JSFunction>(Heap* heap, Object* list, + WeakObjectRetainer* retainer); + + +template Object* VisitWeakList<Context>(Heap* heap, Object* list, + WeakObjectRetainer* retainer); + + +template Object* VisitWeakList<JSArrayBuffer>(Heap* heap, Object* list, + WeakObjectRetainer* retainer); + + +template Object* VisitWeakList<AllocationSite>(Heap* heap, Object* list, + WeakObjectRetainer* retainer); +} +} // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/heap/objects-visiting.h nodejs-0.11.15/deps/v8/src/heap/objects-visiting.h --- nodejs-0.11.13/deps/v8/src/heap/objects-visiting.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/objects-visiting.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,452 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_VISITING_H_ +#define V8_OBJECTS_VISITING_H_ + +#include "src/allocation.h" + +// This file provides base classes and auxiliary methods for defining +// static object visitors used during GC. +// Visiting HeapObject body with a normal ObjectVisitor requires performing +// two switches on object's instance type to determine object size and layout +// and one or more virtual method calls on visitor itself. +// Static visitor is different: it provides a dispatch table which contains +// pointers to specialized visit functions. Each map has the visitor_id +// field which contains an index of specialized visitor to use. + +namespace v8 { +namespace internal { + + +// Base class for all static visitors. +class StaticVisitorBase : public AllStatic { + public: +#define VISITOR_ID_LIST(V) \ + V(SeqOneByteString) \ + V(SeqTwoByteString) \ + V(ShortcutCandidate) \ + V(ByteArray) \ + V(FreeSpace) \ + V(FixedArray) \ + V(FixedDoubleArray) \ + V(FixedTypedArray) \ + V(FixedFloat64Array) \ + V(ConstantPoolArray) \ + V(NativeContext) \ + V(AllocationSite) \ + V(DataObject2) \ + V(DataObject3) \ + V(DataObject4) \ + V(DataObject5) \ + V(DataObject6) \ + V(DataObject7) \ + V(DataObject8) \ + V(DataObject9) \ + V(DataObjectGeneric) \ + V(JSObject2) \ + V(JSObject3) \ + V(JSObject4) \ + V(JSObject5) \ + V(JSObject6) \ + V(JSObject7) \ + V(JSObject8) \ + V(JSObject9) \ + V(JSObjectGeneric) \ + V(Struct2) \ + V(Struct3) \ + V(Struct4) \ + V(Struct5) \ + V(Struct6) \ + V(Struct7) \ + V(Struct8) \ + V(Struct9) \ + V(StructGeneric) \ + V(ConsString) \ + V(SlicedString) \ + V(Symbol) \ + V(Oddball) \ + V(Code) \ + V(Map) \ + V(Cell) \ + V(PropertyCell) \ + V(SharedFunctionInfo) \ + V(JSFunction) \ + V(JSWeakCollection) \ + V(JSArrayBuffer) \ + V(JSTypedArray) \ + V(JSDataView) \ + V(JSRegExp) + + // For data objects, JS objects and structs along with generic visitor which + // can visit object of any size we provide visitors specialized by + // object size in words. + // Ids of specialized visitors are declared in a linear order (without + // holes) starting from the id of visitor specialized for 2 words objects + // (base visitor id) and ending with the id of generic visitor. + // Method GetVisitorIdForSize depends on this ordering to calculate visitor + // id of specialized visitor from given instance size, base visitor id and + // generic visitor's id. + enum VisitorId { +#define VISITOR_ID_ENUM_DECL(id) kVisit##id, + VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL) +#undef VISITOR_ID_ENUM_DECL + kVisitorIdCount, + kVisitDataObject = kVisitDataObject2, + kVisitJSObject = kVisitJSObject2, + kVisitStruct = kVisitStruct2, + kMinObjectSizeInWords = 2 + }; + + // Visitor ID should fit in one byte. + STATIC_ASSERT(kVisitorIdCount <= 256); + + // Determine which specialized visitor should be used for given instance type + // and instance type. + static VisitorId GetVisitorId(int instance_type, int instance_size); + + static VisitorId GetVisitorId(Map* map) { + return GetVisitorId(map->instance_type(), map->instance_size()); + } + + // For visitors that allow specialization by size calculate VisitorId based + // on size, base visitor id and generic visitor id. + static VisitorId GetVisitorIdForSize(VisitorId base, VisitorId generic, + int object_size) { + DCHECK((base == kVisitDataObject) || (base == kVisitStruct) || + (base == kVisitJSObject)); + DCHECK(IsAligned(object_size, kPointerSize)); + DCHECK(kMinObjectSizeInWords * kPointerSize <= object_size); + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); + + const VisitorId specialization = static_cast<VisitorId>( + base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords); + + return Min(specialization, generic); + } +}; + + +template <typename Callback> +class VisitorDispatchTable { + public: + void CopyFrom(VisitorDispatchTable* other) { + // We are not using memcpy to guarantee that during update + // every element of callbacks_ array will remain correct + // pointer (memcpy might be implemented as a byte copying loop). + for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) { + base::NoBarrier_Store(&callbacks_[i], other->callbacks_[i]); + } + } + + inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) { + return reinterpret_cast<Callback>(callbacks_[id]); + } + + inline Callback GetVisitor(Map* map) { + return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]); + } + + void Register(StaticVisitorBase::VisitorId id, Callback callback) { + DCHECK(id < StaticVisitorBase::kVisitorIdCount); // id is unsigned. + callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback); + } + + template <typename Visitor, StaticVisitorBase::VisitorId base, + StaticVisitorBase::VisitorId generic, int object_size_in_words> + void RegisterSpecialization() { + static const int size = object_size_in_words * kPointerSize; + Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size), + &Visitor::template VisitSpecialized<size>); + } + + + template <typename Visitor, StaticVisitorBase::VisitorId base, + StaticVisitorBase::VisitorId generic> + void RegisterSpecializations() { + STATIC_ASSERT((generic - base + StaticVisitorBase::kMinObjectSizeInWords) == + 10); + RegisterSpecialization<Visitor, base, generic, 2>(); + RegisterSpecialization<Visitor, base, generic, 3>(); + RegisterSpecialization<Visitor, base, generic, 4>(); + RegisterSpecialization<Visitor, base, generic, 5>(); + RegisterSpecialization<Visitor, base, generic, 6>(); + RegisterSpecialization<Visitor, base, generic, 7>(); + RegisterSpecialization<Visitor, base, generic, 8>(); + RegisterSpecialization<Visitor, base, generic, 9>(); + Register(generic, &Visitor::Visit); + } + + private: + base::AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount]; +}; + + +template <typename StaticVisitor> +class BodyVisitorBase : public AllStatic { + public: + INLINE(static void IteratePointers(Heap* heap, HeapObject* object, + int start_offset, int end_offset)) { + Object** start_slot = + reinterpret_cast<Object**>(object->address() + start_offset); + Object** end_slot = + reinterpret_cast<Object**>(object->address() + end_offset); + StaticVisitor::VisitPointers(heap, start_slot, end_slot); + } +}; + + +template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType> +class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> { + public: + INLINE(static ReturnType Visit(Map* map, HeapObject* object)) { + int object_size = BodyDescriptor::SizeOf(map, object); + BodyVisitorBase<StaticVisitor>::IteratePointers( + map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size); + return static_cast<ReturnType>(object_size); + } + + template <int object_size> + static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) { + DCHECK(BodyDescriptor::SizeOf(map, object) == object_size); + BodyVisitorBase<StaticVisitor>::IteratePointers( + map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size); + return static_cast<ReturnType>(object_size); + } +}; + + +template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType> +class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> { + public: + INLINE(static ReturnType Visit(Map* map, HeapObject* object)) { + BodyVisitorBase<StaticVisitor>::IteratePointers( + map->GetHeap(), object, BodyDescriptor::kStartOffset, + BodyDescriptor::kEndOffset); + return static_cast<ReturnType>(BodyDescriptor::kSize); + } +}; + + +// Base class for visitors used for a linear new space iteration. +// IterateBody returns size of visited object. +// Certain types of objects (i.e. Code objects) are not handled +// by dispatch table of this visitor because they cannot appear +// in the new space. +// +// This class is intended to be used in the following way: +// +// class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> { +// ... +// } +// +// This is an example of Curiously recurring template pattern +// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern). +// We use CRTP to guarantee aggressive compile time optimizations (i.e. +// inlining and specialization of StaticVisitor::VisitPointers methods). +template <typename StaticVisitor> +class StaticNewSpaceVisitor : public StaticVisitorBase { + public: + static void Initialize(); + + INLINE(static int IterateBody(Map* map, HeapObject* obj)) { + return table_.GetVisitor(map)(map, obj); + } + + INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { + for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p); + } + + private: + INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) { + Heap* heap = map->GetHeap(); + VisitPointers(heap, + HeapObject::RawField(object, JSFunction::kPropertiesOffset), + HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); + + // Don't visit code entry. We are using this visitor only during scavenges. + + VisitPointers( + heap, HeapObject::RawField(object, + JSFunction::kCodeEntryOffset + kPointerSize), + HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset)); + return JSFunction::kSize; + } + + INLINE(static int VisitByteArray(Map* map, HeapObject* object)) { + return reinterpret_cast<ByteArray*>(object)->ByteArraySize(); + } + + INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) { + int length = reinterpret_cast<FixedDoubleArray*>(object)->length(); + return FixedDoubleArray::SizeFor(length); + } + + INLINE(static int VisitFixedTypedArray(Map* map, HeapObject* object)) { + return reinterpret_cast<FixedTypedArrayBase*>(object)->size(); + } + + INLINE(static int VisitJSObject(Map* map, HeapObject* object)) { + return JSObjectVisitor::Visit(map, object); + } + + INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) { + return SeqOneByteString::cast(object) + ->SeqOneByteStringSize(map->instance_type()); + } + + INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) { + return SeqTwoByteString::cast(object) + ->SeqTwoByteStringSize(map->instance_type()); + } + + INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) { + return FreeSpace::cast(object)->Size(); + } + + INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object)); + INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object)); + INLINE(static int VisitJSDataView(Map* map, HeapObject* object)); + + class DataObjectVisitor { + public: + template <int object_size> + static inline int VisitSpecialized(Map* map, HeapObject* object) { + return object_size; + } + + INLINE(static int Visit(Map* map, HeapObject* object)) { + return map->instance_size(); + } + }; + + typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, int> + StructVisitor; + + typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, int> + JSObjectVisitor; + + typedef int (*Callback)(Map* map, HeapObject* object); + + static VisitorDispatchTable<Callback> table_; +}; + + +template <typename StaticVisitor> +VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback> + StaticNewSpaceVisitor<StaticVisitor>::table_; + + +// Base class for visitors used to transitively mark the entire heap. +// IterateBody returns nothing. +// Certain types of objects might not be handled by this base class and +// no visitor function is registered by the generic initialization. A +// specialized visitor function needs to be provided by the inheriting +// class itself for those cases. +// +// This class is intended to be used in the following way: +// +// class SomeVisitor : public StaticMarkingVisitor<SomeVisitor> { +// ... +// } +// +// This is an example of Curiously recurring template pattern. +template <typename StaticVisitor> +class StaticMarkingVisitor : public StaticVisitorBase { + public: + static void Initialize(); + + INLINE(static void IterateBody(Map* map, HeapObject* obj)) { + table_.GetVisitor(map)(map, obj); + } + + INLINE(static void VisitPropertyCell(Map* map, HeapObject* object)); + INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address)); + INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo)); + INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo)); + INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo)); + INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo)); + INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo)); + INLINE(static void VisitExternalReference(RelocInfo* rinfo)) {} + INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) {} + // Skip the weak next code link in a code object. + INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) {} + + // TODO(mstarzinger): This should be made protected once refactoring is done. + // Mark non-optimize code for functions inlined into the given optimized + // code. This will prevent it from being flushed. + static void MarkInlinedFunctionsCode(Heap* heap, Code* code); + + protected: + INLINE(static void VisitMap(Map* map, HeapObject* object)); + INLINE(static void VisitCode(Map* map, HeapObject* object)); + INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object)); + INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object)); + INLINE(static void VisitAllocationSite(Map* map, HeapObject* object)); + INLINE(static void VisitWeakCollection(Map* map, HeapObject* object)); + INLINE(static void VisitJSFunction(Map* map, HeapObject* object)); + INLINE(static void VisitJSRegExp(Map* map, HeapObject* object)); + INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object)); + INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object)); + INLINE(static void VisitJSDataView(Map* map, HeapObject* object)); + INLINE(static void VisitNativeContext(Map* map, HeapObject* object)); + + // Mark pointers in a Map and its TransitionArray together, possibly + // treating transitions or back pointers weak. + static void MarkMapContents(Heap* heap, Map* map); + static void MarkTransitionArray(Heap* heap, TransitionArray* transitions); + + // Code flushing support. + INLINE(static bool IsFlushable(Heap* heap, JSFunction* function)); + INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info)); + + // Helpers used by code flushing support that visit pointer fields and treat + // references to code objects either strongly or weakly. + static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object); + static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object); + static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object); + static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object); + + class DataObjectVisitor { + public: + template <int size> + static inline void VisitSpecialized(Map* map, HeapObject* object) {} + + INLINE(static void Visit(Map* map, HeapObject* object)) {} + }; + + typedef FlexibleBodyVisitor<StaticVisitor, FixedArray::BodyDescriptor, void> + FixedArrayVisitor; + + typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, void> + JSObjectVisitor; + + typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, void> + StructObjectVisitor; + + typedef void (*Callback)(Map* map, HeapObject* object); + + static VisitorDispatchTable<Callback> table_; +}; + + +template <typename StaticVisitor> +VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback> + StaticMarkingVisitor<StaticVisitor>::table_; + + +class WeakObjectRetainer; + + +// A weak list is single linked list where each element has a weak pointer to +// the next element. Given the head of the list, this function removes dead +// elements from the list and if requested records slots for next-element +// pointers. The template parameter T is a WeakListVisitor that defines how to +// access the next-element pointers. +template <class T> +Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer); +} +} // namespace v8::internal + +#endif // V8_OBJECTS_VISITING_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/objects-visiting-inl.h nodejs-0.11.15/deps/v8/src/heap/objects-visiting-inl.h --- nodejs-0.11.13/deps/v8/src/heap/objects-visiting-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/objects-visiting-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,932 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_VISITING_INL_H_ +#define V8_OBJECTS_VISITING_INL_H_ + + +namespace v8 { +namespace internal { + +template <typename StaticVisitor> +void StaticNewSpaceVisitor<StaticVisitor>::Initialize() { + table_.Register( + kVisitShortcutCandidate, + &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit); + + table_.Register( + kVisitConsString, + &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit); + + table_.Register(kVisitSlicedString, + &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor, + int>::Visit); + + table_.Register( + kVisitSymbol, + &FixedBodyVisitor<StaticVisitor, Symbol::BodyDescriptor, int>::Visit); + + table_.Register(kVisitFixedArray, + &FlexibleBodyVisitor<StaticVisitor, + FixedArray::BodyDescriptor, int>::Visit); + + table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray); + table_.Register(kVisitFixedTypedArray, &VisitFixedTypedArray); + table_.Register(kVisitFixedFloat64Array, &VisitFixedTypedArray); + + table_.Register( + kVisitNativeContext, + &FixedBodyVisitor<StaticVisitor, Context::ScavengeBodyDescriptor, + int>::Visit); + + table_.Register(kVisitByteArray, &VisitByteArray); + + table_.Register( + kVisitSharedFunctionInfo, + &FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor, + int>::Visit); + + table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString); + + table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString); + + table_.Register(kVisitJSFunction, &VisitJSFunction); + + table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer); + + table_.Register(kVisitJSTypedArray, &VisitJSTypedArray); + + table_.Register(kVisitJSDataView, &VisitJSDataView); + + table_.Register(kVisitFreeSpace, &VisitFreeSpace); + + table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit); + + table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit); + + table_.template RegisterSpecializations<DataObjectVisitor, kVisitDataObject, + kVisitDataObjectGeneric>(); + + table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSObject, + kVisitJSObjectGeneric>(); + table_.template RegisterSpecializations<StructVisitor, kVisitStruct, + kVisitStructGeneric>(); +} + + +template <typename StaticVisitor> +int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer( + Map* map, HeapObject* object) { + Heap* heap = map->GetHeap(); + + STATIC_ASSERT(JSArrayBuffer::kWeakFirstViewOffset == + JSArrayBuffer::kWeakNextOffset + kPointerSize); + VisitPointers(heap, HeapObject::RawField( + object, JSArrayBuffer::BodyDescriptor::kStartOffset), + HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset)); + VisitPointers( + heap, HeapObject::RawField( + object, JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize), + HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields)); + return JSArrayBuffer::kSizeWithInternalFields; +} + + +template <typename StaticVisitor> +int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray( + Map* map, HeapObject* object) { + VisitPointers( + map->GetHeap(), + HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset), + HeapObject::RawField(object, JSTypedArray::kWeakNextOffset)); + VisitPointers( + map->GetHeap(), HeapObject::RawField( + object, JSTypedArray::kWeakNextOffset + kPointerSize), + HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields)); + return JSTypedArray::kSizeWithInternalFields; +} + + +template <typename StaticVisitor> +int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(Map* map, + HeapObject* object) { + VisitPointers( + map->GetHeap(), + HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset), + HeapObject::RawField(object, JSDataView::kWeakNextOffset)); + VisitPointers( + map->GetHeap(), + HeapObject::RawField(object, JSDataView::kWeakNextOffset + kPointerSize), + HeapObject::RawField(object, JSDataView::kSizeWithInternalFields)); + return JSDataView::kSizeWithInternalFields; +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::Initialize() { + table_.Register(kVisitShortcutCandidate, + &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, + void>::Visit); + + table_.Register(kVisitConsString, + &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, + void>::Visit); + + table_.Register(kVisitSlicedString, + &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor, + void>::Visit); + + table_.Register( + kVisitSymbol, + &FixedBodyVisitor<StaticVisitor, Symbol::BodyDescriptor, void>::Visit); + + table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit); + + table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit); + + table_.Register(kVisitFixedTypedArray, &DataObjectVisitor::Visit); + + table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit); + + table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray); + + table_.Register(kVisitNativeContext, &VisitNativeContext); + + table_.Register(kVisitAllocationSite, &VisitAllocationSite); + + table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); + + table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit); + + table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit); + + table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); + + table_.Register(kVisitJSWeakCollection, &VisitWeakCollection); + + table_.Register( + kVisitOddball, + &FixedBodyVisitor<StaticVisitor, Oddball::BodyDescriptor, void>::Visit); + + table_.Register(kVisitMap, &VisitMap); + + table_.Register(kVisitCode, &VisitCode); + + table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo); + + table_.Register(kVisitJSFunction, &VisitJSFunction); + + table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer); + + table_.Register(kVisitJSTypedArray, &VisitJSTypedArray); + + table_.Register(kVisitJSDataView, &VisitJSDataView); + + // Registration for kVisitJSRegExp is done by StaticVisitor. + + table_.Register( + kVisitCell, + &FixedBodyVisitor<StaticVisitor, Cell::BodyDescriptor, void>::Visit); + + table_.Register(kVisitPropertyCell, &VisitPropertyCell); + + table_.template RegisterSpecializations<DataObjectVisitor, kVisitDataObject, + kVisitDataObjectGeneric>(); + + table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSObject, + kVisitJSObjectGeneric>(); + + table_.template RegisterSpecializations<StructObjectVisitor, kVisitStruct, + kVisitStructGeneric>(); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry( + Heap* heap, Address entry_address) { + Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); + heap->mark_compact_collector()->RecordCodeEntrySlot(entry_address, code); + StaticVisitor::MarkObject(heap, code); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer( + Heap* heap, RelocInfo* rinfo) { + DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); + HeapObject* object = HeapObject::cast(rinfo->target_object()); + heap->mark_compact_collector()->RecordRelocSlot(rinfo, object); + // TODO(ulan): It could be better to record slots only for strongly embedded + // objects here and record slots for weakly embedded object during clearing + // of non-live references in mark-compact. + if (!rinfo->host()->IsWeakObject(object)) { + StaticVisitor::MarkObject(heap, object); + } +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitCell(Heap* heap, + RelocInfo* rinfo) { + DCHECK(rinfo->rmode() == RelocInfo::CELL); + Cell* cell = rinfo->target_cell(); + // No need to record slots because the cell space is not compacted during GC. + if (!rinfo->host()->IsWeakObject(cell)) { + StaticVisitor::MarkObject(heap, cell); + } +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(Heap* heap, + RelocInfo* rinfo) { + DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) && + rinfo->IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && + rinfo->IsPatchedDebugBreakSlotSequence())); + Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); + heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); + StaticVisitor::MarkObject(heap, target); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap, + RelocInfo* rinfo) { + DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); + Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); + // Monomorphic ICs are preserved when possible, but need to be flushed + // when they might be keeping a Context alive, or when the heap is about + // to be serialized. + if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() && + (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC || + target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() || + heap->isolate()->serializer_enabled() || + target->ic_age() != heap->global_ic_age() || + target->is_invalidated_weak_stub())) { + IC::Clear(heap->isolate(), rinfo->pc(), rinfo->host()->constant_pool()); + target = Code::GetCodeFromTargetAddress(rinfo->target_address()); + } + heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); + StaticVisitor::MarkObject(heap, target); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence( + Heap* heap, RelocInfo* rinfo) { + DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); + Code* target = rinfo->code_age_stub(); + DCHECK(target != NULL); + heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); + StaticVisitor::MarkObject(heap, target); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext( + Map* map, HeapObject* object) { + FixedBodyVisitor<StaticVisitor, Context::MarkCompactBodyDescriptor, + void>::Visit(map, object); + + MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); + for (int idx = Context::FIRST_WEAK_SLOT; idx < Context::NATIVE_CONTEXT_SLOTS; + ++idx) { + Object** slot = Context::cast(object)->RawFieldOfElementAt(idx); + collector->RecordSlot(slot, slot, *slot); + } +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitMap(Map* map, + HeapObject* object) { + Heap* heap = map->GetHeap(); + Map* map_object = Map::cast(object); + + // Clears the cache of ICs related to this map. + if (FLAG_cleanup_code_caches_at_gc) { + map_object->ClearCodeCache(heap); + } + + // When map collection is enabled we have to mark through map's transitions + // and back pointers in a special way to make these links weak. + if (FLAG_collect_maps && map_object->CanTransition()) { + MarkMapContents(heap, map_object); + } else { + StaticVisitor::VisitPointers( + heap, HeapObject::RawField(object, Map::kPointerFieldsBeginOffset), + HeapObject::RawField(object, Map::kPointerFieldsEndOffset)); + } +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell( + Map* map, HeapObject* object) { + Heap* heap = map->GetHeap(); + + Object** slot = + HeapObject::RawField(object, PropertyCell::kDependentCodeOffset); + if (FLAG_collect_maps) { + // Mark property cell dependent codes array but do not push it onto marking + // stack, this will make references from it weak. We will clean dead + // codes when we iterate over property cells in ClearNonLiveReferences. + HeapObject* obj = HeapObject::cast(*slot); + heap->mark_compact_collector()->RecordSlot(slot, slot, obj); + StaticVisitor::MarkObjectWithoutPush(heap, obj); + } else { + StaticVisitor::VisitPointer(heap, slot); + } + + StaticVisitor::VisitPointers( + heap, + HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset), + HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset)); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite( + Map* map, HeapObject* object) { + Heap* heap = map->GetHeap(); + + Object** slot = + HeapObject::RawField(object, AllocationSite::kDependentCodeOffset); + if (FLAG_collect_maps) { + // Mark allocation site dependent codes array but do not push it onto + // marking stack, this will make references from it weak. We will clean + // dead codes when we iterate over allocation sites in + // ClearNonLiveReferences. + HeapObject* obj = HeapObject::cast(*slot); + heap->mark_compact_collector()->RecordSlot(slot, slot, obj); + StaticVisitor::MarkObjectWithoutPush(heap, obj); + } else { + StaticVisitor::VisitPointer(heap, slot); + } + + StaticVisitor::VisitPointers( + heap, + HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset), + HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset)); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection( + Map* map, HeapObject* object) { + Heap* heap = map->GetHeap(); + JSWeakCollection* weak_collection = + reinterpret_cast<JSWeakCollection*>(object); + + // Enqueue weak collection in linked list of encountered weak collections. + if (weak_collection->next() == heap->undefined_value()) { + weak_collection->set_next(heap->encountered_weak_collections()); + heap->set_encountered_weak_collections(weak_collection); + } + + // Skip visiting the backing hash table containing the mappings and the + // pointer to the other enqueued weak collections, both are post-processed. + StaticVisitor::VisitPointers( + heap, HeapObject::RawField(object, JSWeakCollection::kPropertiesOffset), + HeapObject::RawField(object, JSWeakCollection::kTableOffset)); + STATIC_ASSERT(JSWeakCollection::kTableOffset + kPointerSize == + JSWeakCollection::kNextOffset); + STATIC_ASSERT(JSWeakCollection::kNextOffset + kPointerSize == + JSWeakCollection::kSize); + + // Partially initialized weak collection is enqueued, but table is ignored. + if (!weak_collection->table()->IsHashTable()) return; + + // Mark the backing hash table without pushing it on the marking stack. + Object** slot = HeapObject::RawField(object, JSWeakCollection::kTableOffset); + HeapObject* obj = HeapObject::cast(*slot); + heap->mark_compact_collector()->RecordSlot(slot, slot, obj); + StaticVisitor::MarkObjectWithoutPush(heap, obj); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map, + HeapObject* object) { + Heap* heap = map->GetHeap(); + Code* code = Code::cast(object); + if (FLAG_age_code && !heap->isolate()->serializer_enabled()) { + code->MakeOlder(heap->mark_compact_collector()->marking_parity()); + } + code->CodeIterateBody<StaticVisitor>(heap); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo( + Map* map, HeapObject* object) { + Heap* heap = map->GetHeap(); + SharedFunctionInfo* shared = SharedFunctionInfo::cast(object); + if (shared->ic_age() != heap->global_ic_age()) { + shared->ResetForNewContext(heap->global_ic_age()); + } + if (FLAG_cleanup_code_caches_at_gc) { + shared->ClearTypeFeedbackInfo(); + } + if (FLAG_cache_optimized_code && FLAG_flush_optimized_code_cache && + !shared->optimized_code_map()->IsSmi()) { + // Always flush the optimized code map if requested by flag. + shared->ClearOptimizedCodeMap(); + } + MarkCompactCollector* collector = heap->mark_compact_collector(); + if (collector->is_code_flushing_enabled()) { + if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) { + // Add the shared function info holding an optimized code map to + // the code flusher for processing of code maps after marking. + collector->code_flusher()->AddOptimizedCodeMap(shared); + // Treat all references within the code map weakly by marking the + // code map itself but not pushing it onto the marking deque. + FixedArray* code_map = FixedArray::cast(shared->optimized_code_map()); + StaticVisitor::MarkObjectWithoutPush(heap, code_map); + } + if (IsFlushable(heap, shared)) { + // This function's code looks flushable. But we have to postpone + // the decision until we see all functions that point to the same + // SharedFunctionInfo because some of them might be optimized. + // That would also make the non-optimized version of the code + // non-flushable, because it is required for bailing out from + // optimized code. + collector->code_flusher()->AddCandidate(shared); + // Treat the reference to the code object weakly. + VisitSharedFunctionInfoWeakCode(heap, object); + return; + } + } else { + if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) { + // Flush optimized code map on major GCs without code flushing, + // needed because cached code doesn't contain breakpoints. + shared->ClearOptimizedCodeMap(); + } + } + VisitSharedFunctionInfoStrongCode(heap, object); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray( + Map* map, HeapObject* object) { + Heap* heap = map->GetHeap(); + ConstantPoolArray* array = ConstantPoolArray::cast(object); + ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR); + while (!code_iter.is_finished()) { + Address code_entry = reinterpret_cast<Address>( + array->RawFieldOfElementAt(code_iter.next_index())); + StaticVisitor::VisitCodeEntry(heap, code_entry); + } + + ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR); + while (!heap_iter.is_finished()) { + Object** slot = array->RawFieldOfElementAt(heap_iter.next_index()); + HeapObject* object = HeapObject::cast(*slot); + heap->mark_compact_collector()->RecordSlot(slot, slot, object); + bool is_weak_object = + (array->get_weak_object_state() == + ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE && + Code::IsWeakObjectInOptimizedCode(object)) || + (array->get_weak_object_state() == + ConstantPoolArray::WEAK_OBJECTS_IN_IC && + Code::IsWeakObjectInIC(object)); + if (!is_weak_object) { + StaticVisitor::MarkObject(heap, object); + } + } +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map, + HeapObject* object) { + Heap* heap = map->GetHeap(); + JSFunction* function = JSFunction::cast(object); + MarkCompactCollector* collector = heap->mark_compact_collector(); + if (collector->is_code_flushing_enabled()) { + if (IsFlushable(heap, function)) { + // This function's code looks flushable. But we have to postpone + // the decision until we see all functions that point to the same + // SharedFunctionInfo because some of them might be optimized. + // That would also make the non-optimized version of the code + // non-flushable, because it is required for bailing out from + // optimized code. + collector->code_flusher()->AddCandidate(function); + // Visit shared function info immediately to avoid double checking + // of its flushability later. This is just an optimization because + // the shared function info would eventually be visited. + SharedFunctionInfo* shared = function->shared(); + if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) { + StaticVisitor::MarkObject(heap, shared->map()); + VisitSharedFunctionInfoWeakCode(heap, shared); + } + // Treat the reference to the code object weakly. + VisitJSFunctionWeakCode(heap, object); + return; + } else { + // Visit all unoptimized code objects to prevent flushing them. + StaticVisitor::MarkObject(heap, function->shared()->code()); + if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) { + MarkInlinedFunctionsCode(heap, function->code()); + } + } + } + VisitJSFunctionStrongCode(heap, object); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(Map* map, + HeapObject* object) { + int last_property_offset = + JSRegExp::kSize + kPointerSize * map->inobject_properties(); + StaticVisitor::VisitPointers( + map->GetHeap(), HeapObject::RawField(object, JSRegExp::kPropertiesOffset), + HeapObject::RawField(object, last_property_offset)); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer( + Map* map, HeapObject* object) { + Heap* heap = map->GetHeap(); + + STATIC_ASSERT(JSArrayBuffer::kWeakFirstViewOffset == + JSArrayBuffer::kWeakNextOffset + kPointerSize); + StaticVisitor::VisitPointers( + heap, + HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset), + HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset)); + StaticVisitor::VisitPointers( + heap, HeapObject::RawField( + object, JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize), + HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields)); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray( + Map* map, HeapObject* object) { + StaticVisitor::VisitPointers( + map->GetHeap(), + HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset), + HeapObject::RawField(object, JSTypedArray::kWeakNextOffset)); + StaticVisitor::VisitPointers( + map->GetHeap(), HeapObject::RawField( + object, JSTypedArray::kWeakNextOffset + kPointerSize), + HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields)); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(Map* map, + HeapObject* object) { + StaticVisitor::VisitPointers( + map->GetHeap(), + HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset), + HeapObject::RawField(object, JSDataView::kWeakNextOffset)); + StaticVisitor::VisitPointers( + map->GetHeap(), + HeapObject::RawField(object, JSDataView::kWeakNextOffset + kPointerSize), + HeapObject::RawField(object, JSDataView::kSizeWithInternalFields)); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap, + Map* map) { + // Make sure that the back pointer stored either in the map itself or + // inside its transitions array is marked. Skip recording the back + // pointer slot since map space is not compacted. + StaticVisitor::MarkObject(heap, HeapObject::cast(map->GetBackPointer())); + + // Treat pointers in the transitions array as weak and also mark that + // array to prevent visiting it later. Skip recording the transition + // array slot, since it will be implicitly recorded when the pointer + // fields of this map are visited. + if (map->HasTransitionArray()) { + TransitionArray* transitions = map->transitions(); + MarkTransitionArray(heap, transitions); + } + + // Since descriptor arrays are potentially shared, ensure that only the + // descriptors that belong to this map are marked. The first time a + // non-empty descriptor array is marked, its header is also visited. The slot + // holding the descriptor array will be implicitly recorded when the pointer + // fields of this map are visited. + DescriptorArray* descriptors = map->instance_descriptors(); + if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) && + descriptors->length() > 0) { + StaticVisitor::VisitPointers(heap, descriptors->GetFirstElementAddress(), + descriptors->GetDescriptorEndSlot(0)); + } + int start = 0; + int end = map->NumberOfOwnDescriptors(); + if (start < end) { + StaticVisitor::VisitPointers(heap, + descriptors->GetDescriptorStartSlot(start), + descriptors->GetDescriptorEndSlot(end)); + } + + // Mark prototype dependent codes array but do not push it onto marking + // stack, this will make references from it weak. We will clean dead + // codes when we iterate over maps in ClearNonLiveTransitions. + Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset); + HeapObject* obj = HeapObject::cast(*slot); + heap->mark_compact_collector()->RecordSlot(slot, slot, obj); + StaticVisitor::MarkObjectWithoutPush(heap, obj); + + // Mark the pointer fields of the Map. Since the transitions array has + // been marked already, it is fine that one of these fields contains a + // pointer to it. + StaticVisitor::VisitPointers( + heap, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset), + HeapObject::RawField(map, Map::kPointerFieldsEndOffset)); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray( + Heap* heap, TransitionArray* transitions) { + if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return; + + // Simple transitions do not have keys nor prototype transitions. + if (transitions->IsSimpleTransition()) return; + + if (transitions->HasPrototypeTransitions()) { + // Mark prototype transitions array but do not push it onto marking + // stack, this will make references from it weak. We will clean dead + // prototype transitions in ClearNonLiveTransitions. + Object** slot = transitions->GetPrototypeTransitionsSlot(); + HeapObject* obj = HeapObject::cast(*slot); + heap->mark_compact_collector()->RecordSlot(slot, slot, obj); + StaticVisitor::MarkObjectWithoutPush(heap, obj); + } + + for (int i = 0; i < transitions->number_of_transitions(); ++i) { + StaticVisitor::VisitPointer(heap, transitions->GetKeySlot(i)); + } +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(Heap* heap, + Code* code) { + // Skip in absence of inlining. + // TODO(turbofan): Revisit once we support inlining. + if (code->is_turbofanned()) return; + // For optimized functions we should retain both non-optimized version + // of its code and non-optimized version of all inlined functions. + // This is required to support bailing out from inlined code. + DeoptimizationInputData* data = + DeoptimizationInputData::cast(code->deoptimization_data()); + FixedArray* literals = data->LiteralArray(); + for (int i = 0, count = data->InlinedFunctionCount()->value(); i < count; + i++) { + JSFunction* inlined = JSFunction::cast(literals->get(i)); + StaticVisitor::MarkObject(heap, inlined->shared()->code()); + } +} + + +inline static bool IsValidNonBuiltinContext(Object* context) { + return context->IsContext() && + !Context::cast(context)->global_object()->IsJSBuiltinsObject(); +} + + +inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) { + Object* undefined = heap->undefined_value(); + return (info->script() != undefined) && + (reinterpret_cast<Script*>(info->script())->source() != undefined); +} + + +template <typename StaticVisitor> +bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap, + JSFunction* function) { + SharedFunctionInfo* shared_info = function->shared(); + + // Code is either on stack, in compilation cache or referenced + // by optimized version of function. + MarkBit code_mark = Marking::MarkBitFrom(function->code()); + if (code_mark.Get()) { + return false; + } + + // The function must have a valid context and not be a builtin. + if (!IsValidNonBuiltinContext(function->context())) { + return false; + } + + // We do not (yet) flush code for optimized functions. + if (function->code() != shared_info->code()) { + return false; + } + + // Check age of optimized code. + if (FLAG_age_code && !function->code()->IsOld()) { + return false; + } + + return IsFlushable(heap, shared_info); +} + + +template <typename StaticVisitor> +bool StaticMarkingVisitor<StaticVisitor>::IsFlushable( + Heap* heap, SharedFunctionInfo* shared_info) { + // Code is either on stack, in compilation cache or referenced + // by optimized version of function. + MarkBit code_mark = Marking::MarkBitFrom(shared_info->code()); + if (code_mark.Get()) { + return false; + } + + // The function must be compiled and have the source code available, + // to be able to recompile it in case we need the function again. + if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) { + return false; + } + + // We never flush code for API functions. + Object* function_data = shared_info->function_data(); + if (function_data->IsFunctionTemplateInfo()) { + return false; + } + + // Only flush code for functions. + if (shared_info->code()->kind() != Code::FUNCTION) { + return false; + } + + // Function must be lazy compilable. + if (!shared_info->allows_lazy_compilation()) { + return false; + } + + // We do not (yet?) flush code for generator functions, because we don't know + // if there are still live activations (generator objects) on the heap. + if (shared_info->is_generator()) { + return false; + } + + // If this is a full script wrapped in a function we do not flush the code. + if (shared_info->is_toplevel()) { + return false; + } + + // If this is a function initialized with %SetCode then the one-to-one + // relation between SharedFunctionInfo and Code is broken. + if (shared_info->dont_flush()) { + return false; + } + + // Check age of code. If code aging is disabled we never flush. + if (!FLAG_age_code || !shared_info->code()->IsOld()) { + return false; + } + + return true; +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode( + Heap* heap, HeapObject* object) { + Object** start_slot = HeapObject::RawField( + object, SharedFunctionInfo::BodyDescriptor::kStartOffset); + Object** end_slot = HeapObject::RawField( + object, SharedFunctionInfo::BodyDescriptor::kEndOffset); + StaticVisitor::VisitPointers(heap, start_slot, end_slot); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode( + Heap* heap, HeapObject* object) { + Object** name_slot = + HeapObject::RawField(object, SharedFunctionInfo::kNameOffset); + StaticVisitor::VisitPointer(heap, name_slot); + + // Skip visiting kCodeOffset as it is treated weakly here. + STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize == + SharedFunctionInfo::kCodeOffset); + STATIC_ASSERT(SharedFunctionInfo::kCodeOffset + kPointerSize == + SharedFunctionInfo::kOptimizedCodeMapOffset); + + Object** start_slot = + HeapObject::RawField(object, SharedFunctionInfo::kOptimizedCodeMapOffset); + Object** end_slot = HeapObject::RawField( + object, SharedFunctionInfo::BodyDescriptor::kEndOffset); + StaticVisitor::VisitPointers(heap, start_slot, end_slot); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode( + Heap* heap, HeapObject* object) { + Object** start_slot = + HeapObject::RawField(object, JSFunction::kPropertiesOffset); + Object** end_slot = + HeapObject::RawField(object, JSFunction::kCodeEntryOffset); + StaticVisitor::VisitPointers(heap, start_slot, end_slot); + + VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); + STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize == + JSFunction::kPrototypeOrInitialMapOffset); + + start_slot = + HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset); + end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset); + StaticVisitor::VisitPointers(heap, start_slot, end_slot); +} + + +template <typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode( + Heap* heap, HeapObject* object) { + Object** start_slot = + HeapObject::RawField(object, JSFunction::kPropertiesOffset); + Object** end_slot = + HeapObject::RawField(object, JSFunction::kCodeEntryOffset); + StaticVisitor::VisitPointers(heap, start_slot, end_slot); + + // Skip visiting kCodeEntryOffset as it is treated weakly here. + STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize == + JSFunction::kPrototypeOrInitialMapOffset); + + start_slot = + HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset); + end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset); + StaticVisitor::VisitPointers(heap, start_slot, end_slot); +} + + +void Code::CodeIterateBody(ObjectVisitor* v) { + int mode_mask = RelocInfo::kCodeTargetMask | + RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | + RelocInfo::ModeMask(RelocInfo::CELL) | + RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | + RelocInfo::ModeMask(RelocInfo::JS_RETURN) | + RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | + RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); + + // There are two places where we iterate code bodies: here and the + // templated CodeIterateBody (below). They should be kept in sync. + IteratePointer(v, kRelocationInfoOffset); + IteratePointer(v, kHandlerTableOffset); + IteratePointer(v, kDeoptimizationDataOffset); + IteratePointer(v, kTypeFeedbackInfoOffset); + IterateNextCodeLink(v, kNextCodeLinkOffset); + IteratePointer(v, kConstantPoolOffset); + + RelocIterator it(this, mode_mask); + Isolate* isolate = this->GetIsolate(); + for (; !it.done(); it.next()) { + it.rinfo()->Visit(isolate, v); + } +} + + +template <typename StaticVisitor> +void Code::CodeIterateBody(Heap* heap) { + int mode_mask = RelocInfo::kCodeTargetMask | + RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | + RelocInfo::ModeMask(RelocInfo::CELL) | + RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | + RelocInfo::ModeMask(RelocInfo::JS_RETURN) | + RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | + RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); + + // There are two places where we iterate code bodies: here and the non- + // templated CodeIterateBody (above). They should be kept in sync. + StaticVisitor::VisitPointer( + heap, + reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset)); + StaticVisitor::VisitPointer( + heap, reinterpret_cast<Object**>(this->address() + kHandlerTableOffset)); + StaticVisitor::VisitPointer( + heap, + reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset)); + StaticVisitor::VisitPointer( + heap, + reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset)); + StaticVisitor::VisitNextCodeLink( + heap, reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset)); + StaticVisitor::VisitPointer( + heap, reinterpret_cast<Object**>(this->address() + kConstantPoolOffset)); + + + RelocIterator it(this, mode_mask); + for (; !it.done(); it.next()) { + it.rinfo()->template Visit<StaticVisitor>(heap); + } +} +} +} // namespace v8::internal + +#endif // V8_OBJECTS_VISITING_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/spaces.cc nodejs-0.11.15/deps/v8/src/heap/spaces.cc --- nodejs-0.11.13/deps/v8/src/heap/spaces.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/spaces.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3108 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/base/platform/platform.h" +#include "src/full-codegen.h" +#include "src/heap/mark-compact.h" +#include "src/macro-assembler.h" +#include "src/msan.h" + +namespace v8 { +namespace internal { + + +// ---------------------------------------------------------------------------- +// HeapObjectIterator + +HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { + // You can't actually iterate over the anchor page. It is not a real page, + // just an anchor for the double linked page list. Initialize as if we have + // reached the end of the anchor page, then the first iteration will move on + // to the first page. + Initialize(space, NULL, NULL, kAllPagesInSpace, NULL); +} + + +HeapObjectIterator::HeapObjectIterator(PagedSpace* space, + HeapObjectCallback size_func) { + // You can't actually iterate over the anchor page. It is not a real page, + // just an anchor for the double linked page list. Initialize the current + // address and end as NULL, then the first iteration will move on + // to the first page. + Initialize(space, NULL, NULL, kAllPagesInSpace, size_func); +} + + +HeapObjectIterator::HeapObjectIterator(Page* page, + HeapObjectCallback size_func) { + Space* owner = page->owner(); + DCHECK(owner == page->heap()->old_pointer_space() || + owner == page->heap()->old_data_space() || + owner == page->heap()->map_space() || + owner == page->heap()->cell_space() || + owner == page->heap()->property_cell_space() || + owner == page->heap()->code_space()); + Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(), + page->area_end(), kOnePageOnly, size_func); + DCHECK(page->WasSweptPrecisely() || + (static_cast<PagedSpace*>(owner)->swept_precisely() && + page->SweepingCompleted())); +} + + +void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end, + HeapObjectIterator::PageMode mode, + HeapObjectCallback size_f) { + // Check that we actually can iterate this space. + DCHECK(space->swept_precisely()); + + space_ = space; + cur_addr_ = cur; + cur_end_ = end; + page_mode_ = mode; + size_func_ = size_f; +} + + +// We have hit the end of the page and should advance to the next block of +// objects. This happens at the end of the page. +bool HeapObjectIterator::AdvanceToNextPage() { + DCHECK(cur_addr_ == cur_end_); + if (page_mode_ == kOnePageOnly) return false; + Page* cur_page; + if (cur_addr_ == NULL) { + cur_page = space_->anchor(); + } else { + cur_page = Page::FromAddress(cur_addr_ - 1); + DCHECK(cur_addr_ == cur_page->area_end()); + } + cur_page = cur_page->next_page(); + if (cur_page == space_->anchor()) return false; + cur_addr_ = cur_page->area_start(); + cur_end_ = cur_page->area_end(); + DCHECK(cur_page->WasSweptPrecisely() || + (static_cast<PagedSpace*>(cur_page->owner())->swept_precisely() && + cur_page->SweepingCompleted())); + return true; +} + + +// ----------------------------------------------------------------------------- +// CodeRange + + +CodeRange::CodeRange(Isolate* isolate) + : isolate_(isolate), + code_range_(NULL), + free_list_(0), + allocation_list_(0), + current_allocation_block_index_(0) {} + + +bool CodeRange::SetUp(size_t requested) { + DCHECK(code_range_ == NULL); + + if (requested == 0) { + // When a target requires the code range feature, we put all code objects + // in a kMaximalCodeRangeSize range of virtual address space, so that + // they can call each other with near calls. + if (kRequiresCodeRange) { + requested = kMaximalCodeRangeSize; + } else { + return true; + } + } + + DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); + code_range_ = new base::VirtualMemory(requested); + CHECK(code_range_ != NULL); + if (!code_range_->IsReserved()) { + delete code_range_; + code_range_ = NULL; + return false; + } + + // We are sure that we have mapped a block of requested addresses. + DCHECK(code_range_->size() == requested); + LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); + Address base = reinterpret_cast<Address>(code_range_->address()); + Address aligned_base = + RoundUp(reinterpret_cast<Address>(code_range_->address()), + MemoryChunk::kAlignment); + size_t size = code_range_->size() - (aligned_base - base); + allocation_list_.Add(FreeBlock(aligned_base, size)); + current_allocation_block_index_ = 0; + return true; +} + + +int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, + const FreeBlock* right) { + // The entire point of CodeRange is that the difference between two + // addresses in the range can be represented as a signed 32-bit int, + // so the cast is semantically correct. + return static_cast<int>(left->start - right->start); +} + + +bool CodeRange::GetNextAllocationBlock(size_t requested) { + for (current_allocation_block_index_++; + current_allocation_block_index_ < allocation_list_.length(); + current_allocation_block_index_++) { + if (requested <= allocation_list_[current_allocation_block_index_].size) { + return true; // Found a large enough allocation block. + } + } + + // Sort and merge the free blocks on the free list and the allocation list. + free_list_.AddAll(allocation_list_); + allocation_list_.Clear(); + free_list_.Sort(&CompareFreeBlockAddress); + for (int i = 0; i < free_list_.length();) { + FreeBlock merged = free_list_[i]; + i++; + // Add adjacent free blocks to the current merged block. + while (i < free_list_.length() && + free_list_[i].start == merged.start + merged.size) { + merged.size += free_list_[i].size; + i++; + } + if (merged.size > 0) { + allocation_list_.Add(merged); + } + } + free_list_.Clear(); + + for (current_allocation_block_index_ = 0; + current_allocation_block_index_ < allocation_list_.length(); + current_allocation_block_index_++) { + if (requested <= allocation_list_[current_allocation_block_index_].size) { + return true; // Found a large enough allocation block. + } + } + current_allocation_block_index_ = 0; + // Code range is full or too fragmented. + return false; +} + + +Address CodeRange::AllocateRawMemory(const size_t requested_size, + const size_t commit_size, + size_t* allocated) { + DCHECK(commit_size <= requested_size); + DCHECK(current_allocation_block_index_ < allocation_list_.length()); + if (requested_size > allocation_list_[current_allocation_block_index_].size) { + // Find an allocation block large enough. + if (!GetNextAllocationBlock(requested_size)) return NULL; + } + // Commit the requested memory at the start of the current allocation block. + size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); + FreeBlock current = allocation_list_[current_allocation_block_index_]; + if (aligned_requested >= (current.size - Page::kPageSize)) { + // Don't leave a small free block, useless for a large object or chunk. + *allocated = current.size; + } else { + *allocated = aligned_requested; + } + DCHECK(*allocated <= current.size); + DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); + if (!isolate_->memory_allocator()->CommitExecutableMemory( + code_range_, current.start, commit_size, *allocated)) { + *allocated = 0; + return NULL; + } + allocation_list_[current_allocation_block_index_].start += *allocated; + allocation_list_[current_allocation_block_index_].size -= *allocated; + if (*allocated == current.size) { + // This block is used up, get the next one. + if (!GetNextAllocationBlock(0)) return NULL; + } + return current.start; +} + + +bool CodeRange::CommitRawMemory(Address start, size_t length) { + return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); +} + + +bool CodeRange::UncommitRawMemory(Address start, size_t length) { + return code_range_->Uncommit(start, length); +} + + +void CodeRange::FreeRawMemory(Address address, size_t length) { + DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); + free_list_.Add(FreeBlock(address, length)); + code_range_->Uncommit(address, length); +} + + +void CodeRange::TearDown() { + delete code_range_; // Frees all memory in the virtual memory range. + code_range_ = NULL; + free_list_.Free(); + allocation_list_.Free(); +} + + +// ----------------------------------------------------------------------------- +// MemoryAllocator +// + +MemoryAllocator::MemoryAllocator(Isolate* isolate) + : isolate_(isolate), + capacity_(0), + capacity_executable_(0), + size_(0), + size_executable_(0), + lowest_ever_allocated_(reinterpret_cast<void*>(-1)), + highest_ever_allocated_(reinterpret_cast<void*>(0)) {} + + +bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { + capacity_ = RoundUp(capacity, Page::kPageSize); + capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); + DCHECK_GE(capacity_, capacity_executable_); + + size_ = 0; + size_executable_ = 0; + + return true; +} + + +void MemoryAllocator::TearDown() { + // Check that spaces were torn down before MemoryAllocator. + DCHECK(size_ == 0); + // TODO(gc) this will be true again when we fix FreeMemory. + // DCHECK(size_executable_ == 0); + capacity_ = 0; + capacity_executable_ = 0; +} + + +bool MemoryAllocator::CommitMemory(Address base, size_t size, + Executability executable) { + if (!base::VirtualMemory::CommitRegion(base, size, + executable == EXECUTABLE)) { + return false; + } + UpdateAllocatedSpaceLimits(base, base + size); + return true; +} + + +void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, + Executability executable) { + // TODO(gc) make code_range part of memory allocator? + DCHECK(reservation->IsReserved()); + size_t size = reservation->size(); + DCHECK(size_ >= size); + size_ -= size; + + isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); + + if (executable == EXECUTABLE) { + DCHECK(size_executable_ >= size); + size_executable_ -= size; + } + // Code which is part of the code-range does not have its own VirtualMemory. + DCHECK(isolate_->code_range() == NULL || + !isolate_->code_range()->contains( + static_cast<Address>(reservation->address()))); + DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || + !isolate_->code_range()->valid()); + reservation->Release(); +} + + +void MemoryAllocator::FreeMemory(Address base, size_t size, + Executability executable) { + // TODO(gc) make code_range part of memory allocator? + DCHECK(size_ >= size); + size_ -= size; + + isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); + + if (executable == EXECUTABLE) { + DCHECK(size_executable_ >= size); + size_executable_ -= size; + } + if (isolate_->code_range() != NULL && + isolate_->code_range()->contains(static_cast<Address>(base))) { + DCHECK(executable == EXECUTABLE); + isolate_->code_range()->FreeRawMemory(base, size); + } else { + DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || + !isolate_->code_range()->valid()); + bool result = base::VirtualMemory::ReleaseRegion(base, size); + USE(result); + DCHECK(result); + } +} + + +Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, + base::VirtualMemory* controller) { + base::VirtualMemory reservation(size, alignment); + + if (!reservation.IsReserved()) return NULL; + size_ += reservation.size(); + Address base = + RoundUp(static_cast<Address>(reservation.address()), alignment); + controller->TakeControl(&reservation); + return base; +} + + +Address MemoryAllocator::AllocateAlignedMemory( + size_t reserve_size, size_t commit_size, size_t alignment, + Executability executable, base::VirtualMemory* controller) { + DCHECK(commit_size <= reserve_size); + base::VirtualMemory reservation; + Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); + if (base == NULL) return NULL; + + if (executable == EXECUTABLE) { + if (!CommitExecutableMemory(&reservation, base, commit_size, + reserve_size)) { + base = NULL; + } + } else { + if (reservation.Commit(base, commit_size, false)) { + UpdateAllocatedSpaceLimits(base, base + commit_size); + } else { + base = NULL; + } + } + + if (base == NULL) { + // Failed to commit the body. Release the mapping and any partially + // commited regions inside it. + reservation.Release(); + return NULL; + } + + controller->TakeControl(&reservation); + return base; +} + + +void Page::InitializeAsAnchor(PagedSpace* owner) { + set_owner(owner); + set_prev_page(this); + set_next_page(this); +} + + +NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start, + SemiSpace* semi_space) { + Address area_start = start + NewSpacePage::kObjectStartOffset; + Address area_end = start + Page::kPageSize; + + MemoryChunk* chunk = + MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start, + area_end, NOT_EXECUTABLE, semi_space); + chunk->set_next_chunk(NULL); + chunk->set_prev_chunk(NULL); + chunk->initialize_scan_on_scavenge(true); + bool in_to_space = (semi_space->id() != kFromSpace); + chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE + : MemoryChunk::IN_FROM_SPACE); + DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE + : MemoryChunk::IN_TO_SPACE)); + NewSpacePage* page = static_cast<NewSpacePage*>(chunk); + heap->incremental_marking()->SetNewSpacePageFlags(page); + return page; +} + + +void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { + set_owner(semi_space); + set_next_chunk(this); + set_prev_chunk(this); + // Flags marks this invalid page as not being in new-space. + // All real new-space pages will be in new-space. + SetFlags(0, ~0); +} + + +MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, + Address area_start, Address area_end, + Executability executable, Space* owner) { + MemoryChunk* chunk = FromAddress(base); + + DCHECK(base == chunk->address()); + + chunk->heap_ = heap; + chunk->size_ = size; + chunk->area_start_ = area_start; + chunk->area_end_ = area_end; + chunk->flags_ = 0; + chunk->set_owner(owner); + chunk->InitializeReservedMemory(); + chunk->slots_buffer_ = NULL; + chunk->skip_list_ = NULL; + chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; + chunk->progress_bar_ = 0; + chunk->high_water_mark_ = static_cast<int>(area_start - base); + chunk->set_parallel_sweeping(SWEEPING_DONE); + chunk->available_in_small_free_list_ = 0; + chunk->available_in_medium_free_list_ = 0; + chunk->available_in_large_free_list_ = 0; + chunk->available_in_huge_free_list_ = 0; + chunk->non_available_small_blocks_ = 0; + chunk->ResetLiveBytes(); + Bitmap::Clear(chunk); + chunk->initialize_scan_on_scavenge(false); + chunk->SetFlag(WAS_SWEPT_PRECISELY); + + DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); + DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); + + if (executable == EXECUTABLE) { + chunk->SetFlag(IS_EXECUTABLE); + } + + if (owner == heap->old_data_space()) { + chunk->SetFlag(CONTAINS_ONLY_DATA); + } + + return chunk; +} + + +// Commit MemoryChunk area to the requested size. +bool MemoryChunk::CommitArea(size_t requested) { + size_t guard_size = + IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; + size_t header_size = area_start() - address() - guard_size; + size_t commit_size = + RoundUp(header_size + requested, base::OS::CommitPageSize()); + size_t committed_size = RoundUp(header_size + (area_end() - area_start()), + base::OS::CommitPageSize()); + + if (commit_size > committed_size) { + // Commit size should be less or equal than the reserved size. + DCHECK(commit_size <= size() - 2 * guard_size); + // Append the committed area. + Address start = address() + committed_size + guard_size; + size_t length = commit_size - committed_size; + if (reservation_.IsReserved()) { + Executability executable = + IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; + if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length, + executable)) { + return false; + } + } else { + CodeRange* code_range = heap_->isolate()->code_range(); + DCHECK(code_range != NULL && code_range->valid() && + IsFlagSet(IS_EXECUTABLE)); + if (!code_range->CommitRawMemory(start, length)) return false; + } + + if (Heap::ShouldZapGarbage()) { + heap_->isolate()->memory_allocator()->ZapBlock(start, length); + } + } else if (commit_size < committed_size) { + DCHECK(commit_size > 0); + // Shrink the committed area. + size_t length = committed_size - commit_size; + Address start = address() + committed_size + guard_size - length; + if (reservation_.IsReserved()) { + if (!reservation_.Uncommit(start, length)) return false; + } else { + CodeRange* code_range = heap_->isolate()->code_range(); + DCHECK(code_range != NULL && code_range->valid() && + IsFlagSet(IS_EXECUTABLE)); + if (!code_range->UncommitRawMemory(start, length)) return false; + } + } + + area_end_ = area_start_ + requested; + return true; +} + + +void MemoryChunk::InsertAfter(MemoryChunk* other) { + MemoryChunk* other_next = other->next_chunk(); + + set_next_chunk(other_next); + set_prev_chunk(other); + other_next->set_prev_chunk(this); + other->set_next_chunk(this); +} + + +void MemoryChunk::Unlink() { + MemoryChunk* next_element = next_chunk(); + MemoryChunk* prev_element = prev_chunk(); + next_element->set_prev_chunk(prev_element); + prev_element->set_next_chunk(next_element); + set_prev_chunk(NULL); + set_next_chunk(NULL); +} + + +MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, + intptr_t commit_area_size, + Executability executable, + Space* owner) { + DCHECK(commit_area_size <= reserve_area_size); + + size_t chunk_size; + Heap* heap = isolate_->heap(); + Address base = NULL; + base::VirtualMemory reservation; + Address area_start = NULL; + Address area_end = NULL; + + // + // MemoryChunk layout: + // + // Executable + // +----------------------------+<- base aligned with MemoryChunk::kAlignment + // | Header | + // +----------------------------+<- base + CodePageGuardStartOffset + // | Guard | + // +----------------------------+<- area_start_ + // | Area | + // +----------------------------+<- area_end_ (area_start + commit_area_size) + // | Committed but not used | + // +----------------------------+<- aligned at OS page boundary + // | Reserved but not committed | + // +----------------------------+<- aligned at OS page boundary + // | Guard | + // +----------------------------+<- base + chunk_size + // + // Non-executable + // +----------------------------+<- base aligned with MemoryChunk::kAlignment + // | Header | + // +----------------------------+<- area_start_ (base + kObjectStartOffset) + // | Area | + // +----------------------------+<- area_end_ (area_start + commit_area_size) + // | Committed but not used | + // +----------------------------+<- aligned at OS page boundary + // | Reserved but not committed | + // +----------------------------+<- base + chunk_size + // + + if (executable == EXECUTABLE) { + chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, + base::OS::CommitPageSize()) + + CodePageGuardSize(); + + // Check executable memory limit. + if (size_executable_ + chunk_size > capacity_executable_) { + LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", + "V8 Executable Allocation capacity exceeded")); + return NULL; + } + + // Size of header (not executable) plus area (executable). + size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, + base::OS::CommitPageSize()); + // Allocate executable memory either from code range or from the + // OS. + if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { + base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size, + &chunk_size); + DCHECK( + IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); + if (base == NULL) return NULL; + size_ += chunk_size; + // Update executable memory size. + size_executable_ += chunk_size; + } else { + base = AllocateAlignedMemory(chunk_size, commit_size, + MemoryChunk::kAlignment, executable, + &reservation); + if (base == NULL) return NULL; + // Update executable memory size. + size_executable_ += reservation.size(); + } + + if (Heap::ShouldZapGarbage()) { + ZapBlock(base, CodePageGuardStartOffset()); + ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); + } + + area_start = base + CodePageAreaStartOffset(); + area_end = area_start + commit_area_size; + } else { + chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, + base::OS::CommitPageSize()); + size_t commit_size = + RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size, + base::OS::CommitPageSize()); + base = + AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, + executable, &reservation); + + if (base == NULL) return NULL; + + if (Heap::ShouldZapGarbage()) { + ZapBlock(base, Page::kObjectStartOffset + commit_area_size); + } + + area_start = base + Page::kObjectStartOffset; + area_end = area_start + commit_area_size; + } + + // Use chunk_size for statistics and callbacks because we assume that they + // treat reserved but not-yet committed memory regions of chunks as allocated. + isolate_->counters()->memory_allocated()->Increment( + static_cast<int>(chunk_size)); + + LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); + if (owner != NULL) { + ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); + PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); + } + + MemoryChunk* result = MemoryChunk::Initialize( + heap, base, chunk_size, area_start, area_end, executable, owner); + result->set_reserved_memory(&reservation); + MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size); + return result; +} + + +void Page::ResetFreeListStatistics() { + non_available_small_blocks_ = 0; + available_in_small_free_list_ = 0; + available_in_medium_free_list_ = 0; + available_in_large_free_list_ = 0; + available_in_huge_free_list_ = 0; +} + + +Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner, + Executability executable) { + MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); + + if (chunk == NULL) return NULL; + + return Page::Initialize(isolate_->heap(), chunk, executable, owner); +} + + +LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, + Space* owner, + Executability executable) { + MemoryChunk* chunk = + AllocateChunk(object_size, object_size, executable, owner); + if (chunk == NULL) return NULL; + return LargePage::Initialize(isolate_->heap(), chunk); +} + + +void MemoryAllocator::Free(MemoryChunk* chunk) { + LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); + if (chunk->owner() != NULL) { + ObjectSpace space = + static_cast<ObjectSpace>(1 << chunk->owner()->identity()); + PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); + } + + isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), + chunk->IsEvacuationCandidate()); + + delete chunk->slots_buffer(); + delete chunk->skip_list(); + + base::VirtualMemory* reservation = chunk->reserved_memory(); + if (reservation->IsReserved()) { + FreeMemory(reservation, chunk->executable()); + } else { + FreeMemory(chunk->address(), chunk->size(), chunk->executable()); + } +} + + +bool MemoryAllocator::CommitBlock(Address start, size_t size, + Executability executable) { + if (!CommitMemory(start, size, executable)) return false; + + if (Heap::ShouldZapGarbage()) { + ZapBlock(start, size); + } + + isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); + return true; +} + + +bool MemoryAllocator::UncommitBlock(Address start, size_t size) { + if (!base::VirtualMemory::UncommitRegion(start, size)) return false; + isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); + return true; +} + + +void MemoryAllocator::ZapBlock(Address start, size_t size) { + for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { + Memory::Address_at(start + s) = kZapValue; + } +} + + +void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, + AllocationAction action, + size_t size) { + for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { + MemoryAllocationCallbackRegistration registration = + memory_allocation_callbacks_[i]; + if ((registration.space & space) == space && + (registration.action & action) == action) + registration.callback(space, action, static_cast<int>(size)); + } +} + + +bool MemoryAllocator::MemoryAllocationCallbackRegistered( + MemoryAllocationCallback callback) { + for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { + if (memory_allocation_callbacks_[i].callback == callback) return true; + } + return false; +} + + +void MemoryAllocator::AddMemoryAllocationCallback( + MemoryAllocationCallback callback, ObjectSpace space, + AllocationAction action) { + DCHECK(callback != NULL); + MemoryAllocationCallbackRegistration registration(callback, space, action); + DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); + return memory_allocation_callbacks_.Add(registration); +} + + +void MemoryAllocator::RemoveMemoryAllocationCallback( + MemoryAllocationCallback callback) { + DCHECK(callback != NULL); + for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { + if (memory_allocation_callbacks_[i].callback == callback) { + memory_allocation_callbacks_.Remove(i); + return; + } + } + UNREACHABLE(); +} + + +#ifdef DEBUG +void MemoryAllocator::ReportStatistics() { + float pct = static_cast<float>(capacity_ - size_) / capacity_; + PrintF(" capacity: %" V8_PTR_PREFIX + "d" + ", used: %" V8_PTR_PREFIX + "d" + ", available: %%%d\n\n", + capacity_, size_, static_cast<int>(pct * 100)); +} +#endif + + +int MemoryAllocator::CodePageGuardStartOffset() { + // We are guarding code pages: the first OS page after the header + // will be protected as non-writable. + return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); +} + + +int MemoryAllocator::CodePageGuardSize() { + return static_cast<int>(base::OS::CommitPageSize()); +} + + +int MemoryAllocator::CodePageAreaStartOffset() { + // We are guarding code pages: the first OS page after the header + // will be protected as non-writable. + return CodePageGuardStartOffset() + CodePageGuardSize(); +} + + +int MemoryAllocator::CodePageAreaEndOffset() { + // We are guarding code pages: the last OS page will be protected as + // non-writable. + return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize()); +} + + +bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, + Address start, size_t commit_size, + size_t reserved_size) { + // Commit page header (not executable). + if (!vm->Commit(start, CodePageGuardStartOffset(), false)) { + return false; + } + + // Create guard page after the header. + if (!vm->Guard(start + CodePageGuardStartOffset())) { + return false; + } + + // Commit page body (executable). + if (!vm->Commit(start + CodePageAreaStartOffset(), + commit_size - CodePageGuardStartOffset(), true)) { + return false; + } + + // Create guard page before the end. + if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { + return false; + } + + UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() + + commit_size - + CodePageGuardStartOffset()); + return true; +} + + +// ----------------------------------------------------------------------------- +// MemoryChunk implementation + +void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { + MemoryChunk* chunk = MemoryChunk::FromAddress(address); + if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { + static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); + } + chunk->IncrementLiveBytes(by); +} + + +// ----------------------------------------------------------------------------- +// PagedSpace implementation + +PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, + Executability executable) + : Space(heap, id, executable), + free_list_(this), + swept_precisely_(true), + unswept_free_bytes_(0), + end_of_unswept_pages_(NULL), + emergency_memory_(NULL) { + if (id == CODE_SPACE) { + area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize(); + } else { + area_size_ = Page::kPageSize - Page::kObjectStartOffset; + } + max_capacity_ = + (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize(); + accounting_stats_.Clear(); + + allocation_info_.set_top(NULL); + allocation_info_.set_limit(NULL); + + anchor_.InitializeAsAnchor(this); +} + + +bool PagedSpace::SetUp() { return true; } + + +bool PagedSpace::HasBeenSetUp() { return true; } + + +void PagedSpace::TearDown() { + PageIterator iterator(this); + while (iterator.has_next()) { + heap()->isolate()->memory_allocator()->Free(iterator.next()); + } + anchor_.set_next_page(&anchor_); + anchor_.set_prev_page(&anchor_); + accounting_stats_.Clear(); +} + + +size_t PagedSpace::CommittedPhysicalMemory() { + if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); + MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); + size_t size = 0; + PageIterator it(this); + while (it.has_next()) { + size += it.next()->CommittedPhysicalMemory(); + } + return size; +} + + +Object* PagedSpace::FindObject(Address addr) { + // Note: this function can only be called on precisely swept spaces. + DCHECK(!heap()->mark_compact_collector()->in_use()); + + if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found. + + Page* p = Page::FromAddress(addr); + HeapObjectIterator it(p, NULL); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + Address cur = obj->address(); + Address next = cur + obj->Size(); + if ((cur <= addr) && (addr < next)) return obj; + } + + UNREACHABLE(); + return Smi::FromInt(0); +} + + +bool PagedSpace::CanExpand() { + DCHECK(max_capacity_ % AreaSize() == 0); + + if (Capacity() == max_capacity_) return false; + + DCHECK(Capacity() < max_capacity_); + + // Are we going to exceed capacity for this space? + if ((Capacity() + Page::kPageSize) > max_capacity_) return false; + + return true; +} + + +bool PagedSpace::Expand() { + if (!CanExpand()) return false; + + intptr_t size = AreaSize(); + + if (anchor_.next_page() == &anchor_) { + size = SizeOfFirstPage(); + } + + Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, + executable()); + if (p == NULL) return false; + + DCHECK(Capacity() <= max_capacity_); + + p->InsertAfter(anchor_.prev_page()); + + return true; +} + + +intptr_t PagedSpace::SizeOfFirstPage() { + int size = 0; + switch (identity()) { + case OLD_POINTER_SPACE: + size = 112 * kPointerSize * KB; + break; + case OLD_DATA_SPACE: + size = 192 * KB; + break; + case MAP_SPACE: + size = 16 * kPointerSize * KB; + break; + case CELL_SPACE: + size = 16 * kPointerSize * KB; + break; + case PROPERTY_CELL_SPACE: + size = 8 * kPointerSize * KB; + break; + case CODE_SPACE: { + CodeRange* code_range = heap()->isolate()->code_range(); + if (code_range != NULL && code_range->valid()) { + // When code range exists, code pages are allocated in a special way + // (from the reserved code range). That part of the code is not yet + // upgraded to handle small pages. + size = AreaSize(); + } else { + size = + RoundUp(480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100, + kPointerSize); + } + break; + } + default: + UNREACHABLE(); + } + return Min(size, AreaSize()); +} + + +int PagedSpace::CountTotalPages() { + PageIterator it(this); + int count = 0; + while (it.has_next()) { + it.next(); + count++; + } + return count; +} + + +void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) { + sizes->huge_size_ = page->available_in_huge_free_list(); + sizes->small_size_ = page->available_in_small_free_list(); + sizes->medium_size_ = page->available_in_medium_free_list(); + sizes->large_size_ = page->available_in_large_free_list(); +} + + +void PagedSpace::ResetFreeListStatistics() { + PageIterator page_iterator(this); + while (page_iterator.has_next()) { + Page* page = page_iterator.next(); + page->ResetFreeListStatistics(); + } +} + + +void PagedSpace::IncreaseCapacity(int size) { + accounting_stats_.ExpandSpace(size); +} + + +void PagedSpace::ReleasePage(Page* page) { + DCHECK(page->LiveBytes() == 0); + DCHECK(AreaSize() == page->area_size()); + + if (page->WasSwept()) { + intptr_t size = free_list_.EvictFreeListItems(page); + accounting_stats_.AllocateBytes(size); + DCHECK_EQ(AreaSize(), static_cast<int>(size)); + } else { + DecreaseUnsweptFreeBytes(page); + } + + if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) { + heap()->decrement_scan_on_scavenge_pages(); + page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE); + } + + DCHECK(!free_list_.ContainsPageFreeListItems(page)); + + if (Page::FromAllocationTop(allocation_info_.top()) == page) { + allocation_info_.set_top(NULL); + allocation_info_.set_limit(NULL); + } + + page->Unlink(); + if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { + heap()->isolate()->memory_allocator()->Free(page); + } else { + heap()->QueueMemoryChunkForFree(page); + } + + DCHECK(Capacity() > 0); + accounting_stats_.ShrinkSpace(AreaSize()); +} + + +void PagedSpace::CreateEmergencyMemory() { + emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( + AreaSize(), AreaSize(), executable(), this); +} + + +void PagedSpace::FreeEmergencyMemory() { + Page* page = static_cast<Page*>(emergency_memory_); + DCHECK(page->LiveBytes() == 0); + DCHECK(AreaSize() == page->area_size()); + DCHECK(!free_list_.ContainsPageFreeListItems(page)); + heap()->isolate()->memory_allocator()->Free(page); + emergency_memory_ = NULL; +} + + +void PagedSpace::UseEmergencyMemory() { + Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this); + page->InsertAfter(anchor_.prev_page()); + emergency_memory_ = NULL; +} + + +#ifdef DEBUG +void PagedSpace::Print() {} +#endif + +#ifdef VERIFY_HEAP +void PagedSpace::Verify(ObjectVisitor* visitor) { + // We can only iterate over the pages if they were swept precisely. + if (!swept_precisely_) return; + + bool allocation_pointer_found_in_space = + (allocation_info_.top() == allocation_info_.limit()); + PageIterator page_iterator(this); + while (page_iterator.has_next()) { + Page* page = page_iterator.next(); + CHECK(page->owner() == this); + if (page == Page::FromAllocationTop(allocation_info_.top())) { + allocation_pointer_found_in_space = true; + } + CHECK(page->WasSweptPrecisely()); + HeapObjectIterator it(page, NULL); + Address end_of_previous_object = page->area_start(); + Address top = page->area_end(); + int black_size = 0; + for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { + CHECK(end_of_previous_object <= object->address()); + + // The first word should be a map, and we expect all map pointers to + // be in map space. + Map* map = object->map(); + CHECK(map->IsMap()); + CHECK(heap()->map_space()->Contains(map)); + + // Perform space-specific object verification. + VerifyObject(object); + + // The object itself should look OK. + object->ObjectVerify(); + + // All the interior pointers should be contained in the heap. + int size = object->Size(); + object->IterateBody(map->instance_type(), size, visitor); + if (Marking::IsBlack(Marking::MarkBitFrom(object))) { + black_size += size; + } + + CHECK(object->address() + size <= top); + end_of_previous_object = object->address() + size; + } + CHECK_LE(black_size, page->LiveBytes()); + } + CHECK(allocation_pointer_found_in_space); +} +#endif // VERIFY_HEAP + +// ----------------------------------------------------------------------------- +// NewSpace implementation + + +bool NewSpace::SetUp(int reserved_semispace_capacity, + int maximum_semispace_capacity) { + // Set up new space based on the preallocated memory block defined by + // start and size. The provided space is divided into two semi-spaces. + // To support fast containment testing in the new space, the size of + // this chunk must be a power of two and it must be aligned to its size. + int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); + + size_t size = 2 * reserved_semispace_capacity; + Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory( + size, size, &reservation_); + if (base == NULL) return false; + + chunk_base_ = base; + chunk_size_ = static_cast<uintptr_t>(size); + LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); + + DCHECK(initial_semispace_capacity <= maximum_semispace_capacity); + DCHECK(IsPowerOf2(maximum_semispace_capacity)); + + // Allocate and set up the histogram arrays if necessary. + allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); + promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); + +#define SET_NAME(name) \ + allocated_histogram_[name].set_name(#name); \ + promoted_histogram_[name].set_name(#name); + INSTANCE_TYPE_LIST(SET_NAME) +#undef SET_NAME + + DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); + DCHECK(static_cast<intptr_t>(chunk_size_) >= + 2 * heap()->ReservedSemiSpaceSize()); + DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); + + to_space_.SetUp(chunk_base_, initial_semispace_capacity, + maximum_semispace_capacity); + from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, + initial_semispace_capacity, maximum_semispace_capacity); + if (!to_space_.Commit()) { + return false; + } + DCHECK(!from_space_.is_committed()); // No need to use memory yet. + + start_ = chunk_base_; + address_mask_ = ~(2 * reserved_semispace_capacity - 1); + object_mask_ = address_mask_ | kHeapObjectTagMask; + object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; + + ResetAllocationInfo(); + + return true; +} + + +void NewSpace::TearDown() { + if (allocated_histogram_) { + DeleteArray(allocated_histogram_); + allocated_histogram_ = NULL; + } + if (promoted_histogram_) { + DeleteArray(promoted_histogram_); + promoted_histogram_ = NULL; + } + + start_ = NULL; + allocation_info_.set_top(NULL); + allocation_info_.set_limit(NULL); + + to_space_.TearDown(); + from_space_.TearDown(); + + LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); + + DCHECK(reservation_.IsReserved()); + heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, + NOT_EXECUTABLE); + chunk_base_ = NULL; + chunk_size_ = 0; +} + + +void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } + + +void NewSpace::Grow() { + // Double the semispace size but only up to maximum capacity. + DCHECK(Capacity() < MaximumCapacity()); + int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); + if (to_space_.GrowTo(new_capacity)) { + // Only grow from space if we managed to grow to-space. + if (!from_space_.GrowTo(new_capacity)) { + // If we managed to grow to-space but couldn't grow from-space, + // attempt to shrink to-space. + if (!to_space_.ShrinkTo(from_space_.Capacity())) { + // We are in an inconsistent state because we could not + // commit/uncommit memory from new space. + V8::FatalProcessOutOfMemory("Failed to grow new space."); + } + } + } + DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); +} + + +void NewSpace::Shrink() { + int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); + int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); + if (rounded_new_capacity < Capacity() && + to_space_.ShrinkTo(rounded_new_capacity)) { + // Only shrink from-space if we managed to shrink to-space. + from_space_.Reset(); + if (!from_space_.ShrinkTo(rounded_new_capacity)) { + // If we managed to shrink to-space but couldn't shrink from + // space, attempt to grow to-space again. + if (!to_space_.GrowTo(from_space_.Capacity())) { + // We are in an inconsistent state because we could not + // commit/uncommit memory from new space. + V8::FatalProcessOutOfMemory("Failed to shrink new space."); + } + } + } + DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); +} + + +void NewSpace::UpdateAllocationInfo() { + MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); + allocation_info_.set_top(to_space_.page_low()); + allocation_info_.set_limit(to_space_.page_high()); + UpdateInlineAllocationLimit(0); + DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); +} + + +void NewSpace::ResetAllocationInfo() { + to_space_.Reset(); + UpdateAllocationInfo(); + pages_used_ = 0; + // Clear all mark-bits in the to-space. + NewSpacePageIterator it(&to_space_); + while (it.has_next()) { + Bitmap::Clear(it.next()); + } +} + + +void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { + if (heap()->inline_allocation_disabled()) { + // Lowest limit when linear allocation was disabled. + Address high = to_space_.page_high(); + Address new_top = allocation_info_.top() + size_in_bytes; + allocation_info_.set_limit(Min(new_top, high)); + } else if (inline_allocation_limit_step() == 0) { + // Normal limit is the end of the current page. + allocation_info_.set_limit(to_space_.page_high()); + } else { + // Lower limit during incremental marking. + Address high = to_space_.page_high(); + Address new_top = allocation_info_.top() + size_in_bytes; + Address new_limit = new_top + inline_allocation_limit_step_; + allocation_info_.set_limit(Min(new_limit, high)); + } + DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); +} + + +bool NewSpace::AddFreshPage() { + Address top = allocation_info_.top(); + if (NewSpacePage::IsAtStart(top)) { + // The current page is already empty. Don't try to make another. + + // We should only get here if someone asks to allocate more + // than what can be stored in a single page. + // TODO(gc): Change the limit on new-space allocation to prevent this + // from happening (all such allocations should go directly to LOSpace). + return false; + } + if (!to_space_.AdvancePage()) { + // Failed to get a new page in to-space. + return false; + } + + // Clear remainder of current page. + Address limit = NewSpacePage::FromLimit(top)->area_end(); + if (heap()->gc_state() == Heap::SCAVENGE) { + heap()->promotion_queue()->SetNewLimit(limit); + heap()->promotion_queue()->ActivateGuardIfOnTheSamePage(); + } + + int remaining_in_page = static_cast<int>(limit - top); + heap()->CreateFillerObjectAt(top, remaining_in_page); + pages_used_++; + UpdateAllocationInfo(); + + return true; +} + + +AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) { + Address old_top = allocation_info_.top(); + Address high = to_space_.page_high(); + if (allocation_info_.limit() < high) { + // Either the limit has been lowered because linear allocation was disabled + // or because incremental marking wants to get a chance to do a step. Set + // the new limit accordingly. + Address new_top = old_top + size_in_bytes; + int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); + heap()->incremental_marking()->Step(bytes_allocated, + IncrementalMarking::GC_VIA_STACK_GUARD); + UpdateInlineAllocationLimit(size_in_bytes); + top_on_previous_step_ = new_top; + return AllocateRaw(size_in_bytes); + } else if (AddFreshPage()) { + // Switched to new page. Try allocating again. + int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); + heap()->incremental_marking()->Step(bytes_allocated, + IncrementalMarking::GC_VIA_STACK_GUARD); + top_on_previous_step_ = to_space_.page_low(); + return AllocateRaw(size_in_bytes); + } else { + return AllocationResult::Retry(); + } +} + + +#ifdef VERIFY_HEAP +// We do not use the SemiSpaceIterator because verification doesn't assume +// that it works (it depends on the invariants we are checking). +void NewSpace::Verify() { + // The allocation pointer should be in the space or at the very end. + DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); + + // There should be objects packed in from the low address up to the + // allocation pointer. + Address current = to_space_.first_page()->area_start(); + CHECK_EQ(current, to_space_.space_start()); + + while (current != top()) { + if (!NewSpacePage::IsAtEnd(current)) { + // The allocation pointer should not be in the middle of an object. + CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || + current < top()); + + HeapObject* object = HeapObject::FromAddress(current); + + // The first word should be a map, and we expect all map pointers to + // be in map space. + Map* map = object->map(); + CHECK(map->IsMap()); + CHECK(heap()->map_space()->Contains(map)); + + // The object should not be code or a map. + CHECK(!object->IsMap()); + CHECK(!object->IsCode()); + + // The object itself should look OK. + object->ObjectVerify(); + + // All the interior pointers should be contained in the heap. + VerifyPointersVisitor visitor; + int size = object->Size(); + object->IterateBody(map->instance_type(), size, &visitor); + + current += size; + } else { + // At end of page, switch to next page. + NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); + // Next page should be valid. + CHECK(!page->is_anchor()); + current = page->area_start(); + } + } + + // Check semi-spaces. + CHECK_EQ(from_space_.id(), kFromSpace); + CHECK_EQ(to_space_.id(), kToSpace); + from_space_.Verify(); + to_space_.Verify(); +} +#endif + +// ----------------------------------------------------------------------------- +// SemiSpace implementation + +void SemiSpace::SetUp(Address start, int initial_capacity, + int maximum_capacity) { + // Creates a space in the young generation. The constructor does not + // allocate memory from the OS. A SemiSpace is given a contiguous chunk of + // memory of size 'capacity' when set up, and does not grow or shrink + // otherwise. In the mark-compact collector, the memory region of the from + // space is used as the marking stack. It requires contiguous memory + // addresses. + DCHECK(maximum_capacity >= Page::kPageSize); + initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); + capacity_ = initial_capacity; + maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); + maximum_committed_ = 0; + committed_ = false; + start_ = start; + address_mask_ = ~(maximum_capacity - 1); + object_mask_ = address_mask_ | kHeapObjectTagMask; + object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; + age_mark_ = start_; +} + + +void SemiSpace::TearDown() { + start_ = NULL; + capacity_ = 0; +} + + +bool SemiSpace::Commit() { + DCHECK(!is_committed()); + int pages = capacity_ / Page::kPageSize; + if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, capacity_, + executable())) { + return false; + } + + NewSpacePage* current = anchor(); + for (int i = 0; i < pages; i++) { + NewSpacePage* new_page = + NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); + new_page->InsertAfter(current); + current = new_page; + } + + SetCapacity(capacity_); + committed_ = true; + Reset(); + return true; +} + + +bool SemiSpace::Uncommit() { + DCHECK(is_committed()); + Address start = start_ + maximum_capacity_ - capacity_; + if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { + return false; + } + anchor()->set_next_page(anchor()); + anchor()->set_prev_page(anchor()); + + committed_ = false; + return true; +} + + +size_t SemiSpace::CommittedPhysicalMemory() { + if (!is_committed()) return 0; + size_t size = 0; + NewSpacePageIterator it(this); + while (it.has_next()) { + size += it.next()->CommittedPhysicalMemory(); + } + return size; +} + + +bool SemiSpace::GrowTo(int new_capacity) { + if (!is_committed()) { + if (!Commit()) return false; + } + DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); + DCHECK(new_capacity <= maximum_capacity_); + DCHECK(new_capacity > capacity_); + int pages_before = capacity_ / Page::kPageSize; + int pages_after = new_capacity / Page::kPageSize; + + size_t delta = new_capacity - capacity_; + + DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); + if (!heap()->isolate()->memory_allocator()->CommitBlock( + start_ + capacity_, delta, executable())) { + return false; + } + SetCapacity(new_capacity); + NewSpacePage* last_page = anchor()->prev_page(); + DCHECK(last_page != anchor()); + for (int i = pages_before; i < pages_after; i++) { + Address page_address = start_ + i * Page::kPageSize; + NewSpacePage* new_page = + NewSpacePage::Initialize(heap(), page_address, this); + new_page->InsertAfter(last_page); + Bitmap::Clear(new_page); + // Duplicate the flags that was set on the old page. + new_page->SetFlags(last_page->GetFlags(), + NewSpacePage::kCopyOnFlipFlagsMask); + last_page = new_page; + } + return true; +} + + +bool SemiSpace::ShrinkTo(int new_capacity) { + DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); + DCHECK(new_capacity >= initial_capacity_); + DCHECK(new_capacity < capacity_); + if (is_committed()) { + size_t delta = capacity_ - new_capacity; + DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); + + MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); + if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { + return false; + } + + int pages_after = new_capacity / Page::kPageSize; + NewSpacePage* new_last_page = + NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); + new_last_page->set_next_page(anchor()); + anchor()->set_prev_page(new_last_page); + DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page)); + } + + SetCapacity(new_capacity); + + return true; +} + + +void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { + anchor_.set_owner(this); + // Fixup back-pointers to anchor. Address of anchor changes + // when we swap. + anchor_.prev_page()->set_next_page(&anchor_); + anchor_.next_page()->set_prev_page(&anchor_); + + bool becomes_to_space = (id_ == kFromSpace); + id_ = becomes_to_space ? kToSpace : kFromSpace; + NewSpacePage* page = anchor_.next_page(); + while (page != &anchor_) { + page->set_owner(this); + page->SetFlags(flags, mask); + if (becomes_to_space) { + page->ClearFlag(MemoryChunk::IN_FROM_SPACE); + page->SetFlag(MemoryChunk::IN_TO_SPACE); + page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); + page->ResetLiveBytes(); + } else { + page->SetFlag(MemoryChunk::IN_FROM_SPACE); + page->ClearFlag(MemoryChunk::IN_TO_SPACE); + } + DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); + DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) || + page->IsFlagSet(MemoryChunk::IN_FROM_SPACE)); + page = page->next_page(); + } +} + + +void SemiSpace::Reset() { + DCHECK(anchor_.next_page() != &anchor_); + current_page_ = anchor_.next_page(); +} + + +void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { + // We won't be swapping semispaces without data in them. + DCHECK(from->anchor_.next_page() != &from->anchor_); + DCHECK(to->anchor_.next_page() != &to->anchor_); + + // Swap bits. + SemiSpace tmp = *from; + *from = *to; + *to = tmp; + + // Fixup back-pointers to the page list anchor now that its address + // has changed. + // Swap to/from-space bits on pages. + // Copy GC flags from old active space (from-space) to new (to-space). + intptr_t flags = from->current_page()->GetFlags(); + to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); + + from->FlipPages(0, 0); +} + + +void SemiSpace::SetCapacity(int new_capacity) { + capacity_ = new_capacity; + if (capacity_ > maximum_committed_) { + maximum_committed_ = capacity_; + } +} + + +void SemiSpace::set_age_mark(Address mark) { + DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this); + age_mark_ = mark; + // Mark all pages up to the one containing mark. + NewSpacePageIterator it(space_start(), mark); + while (it.has_next()) { + it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); + } +} + + +#ifdef DEBUG +void SemiSpace::Print() {} +#endif + +#ifdef VERIFY_HEAP +void SemiSpace::Verify() { + bool is_from_space = (id_ == kFromSpace); + NewSpacePage* page = anchor_.next_page(); + CHECK(anchor_.semi_space() == this); + while (page != &anchor_) { + CHECK(page->semi_space() == this); + CHECK(page->InNewSpace()); + CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE + : MemoryChunk::IN_TO_SPACE)); + CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE + : MemoryChunk::IN_FROM_SPACE)); + CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING)); + if (!is_from_space) { + // The pointers-from-here-are-interesting flag isn't updated dynamically + // on from-space pages, so it might be out of sync with the marking state. + if (page->heap()->incremental_marking()->IsMarking()) { + CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); + } else { + CHECK( + !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); + } + // TODO(gc): Check that the live_bytes_count_ field matches the + // black marking on the page (if we make it match in new-space). + } + CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); + CHECK(page->prev_page()->next_page() == page); + page = page->next_page(); + } +} +#endif + +#ifdef DEBUG +void SemiSpace::AssertValidRange(Address start, Address end) { + // Addresses belong to same semi-space + NewSpacePage* page = NewSpacePage::FromLimit(start); + NewSpacePage* end_page = NewSpacePage::FromLimit(end); + SemiSpace* space = page->semi_space(); + CHECK_EQ(space, end_page->semi_space()); + // Start address is before end address, either on same page, + // or end address is on a later page in the linked list of + // semi-space pages. + if (page == end_page) { + CHECK(start <= end); + } else { + while (page != end_page) { + page = page->next_page(); + CHECK_NE(page, space->anchor()); + } + } +} +#endif + + +// ----------------------------------------------------------------------------- +// SemiSpaceIterator implementation. +SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) { + Initialize(space->bottom(), space->top(), NULL); +} + + +SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, + HeapObjectCallback size_func) { + Initialize(space->bottom(), space->top(), size_func); +} + + +SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { + Initialize(start, space->top(), NULL); +} + + +SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) { + Initialize(from, to, NULL); +} + + +void SemiSpaceIterator::Initialize(Address start, Address end, + HeapObjectCallback size_func) { + SemiSpace::AssertValidRange(start, end); + current_ = start; + limit_ = end; + size_func_ = size_func; +} + + +#ifdef DEBUG +// heap_histograms is shared, always clear it before using it. +static void ClearHistograms(Isolate* isolate) { +// We reset the name each time, though it hasn't changed. +#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name); + INSTANCE_TYPE_LIST(DEF_TYPE_NAME) +#undef DEF_TYPE_NAME + +#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear(); + INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) +#undef CLEAR_HISTOGRAM + + isolate->js_spill_information()->Clear(); +} + + +static void ClearCodeKindStatistics(int* code_kind_statistics) { + for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { + code_kind_statistics[i] = 0; + } +} + + +static void ReportCodeKindStatistics(int* code_kind_statistics) { + PrintF("\n Code kind histograms: \n"); + for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { + if (code_kind_statistics[i] > 0) { + PrintF(" %-20s: %10d bytes\n", + Code::Kind2String(static_cast<Code::Kind>(i)), + code_kind_statistics[i]); + } + } + PrintF("\n"); +} + + +static int CollectHistogramInfo(HeapObject* obj) { + Isolate* isolate = obj->GetIsolate(); + InstanceType type = obj->map()->instance_type(); + DCHECK(0 <= type && type <= LAST_TYPE); + DCHECK(isolate->heap_histograms()[type].name() != NULL); + isolate->heap_histograms()[type].increment_number(1); + isolate->heap_histograms()[type].increment_bytes(obj->Size()); + + if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { + JSObject::cast(obj) + ->IncrementSpillStatistics(isolate->js_spill_information()); + } + + return obj->Size(); +} + + +static void ReportHistogram(Isolate* isolate, bool print_spill) { + PrintF("\n Object Histogram:\n"); + for (int i = 0; i <= LAST_TYPE; i++) { + if (isolate->heap_histograms()[i].number() > 0) { + PrintF(" %-34s%10d (%10d bytes)\n", + isolate->heap_histograms()[i].name(), + isolate->heap_histograms()[i].number(), + isolate->heap_histograms()[i].bytes()); + } + } + PrintF("\n"); + + // Summarize string types. + int string_number = 0; + int string_bytes = 0; +#define INCREMENT(type, size, name, camel_name) \ + string_number += isolate->heap_histograms()[type].number(); \ + string_bytes += isolate->heap_histograms()[type].bytes(); + STRING_TYPE_LIST(INCREMENT) +#undef INCREMENT + if (string_number > 0) { + PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, + string_bytes); + } + + if (FLAG_collect_heap_spill_statistics && print_spill) { + isolate->js_spill_information()->Print(); + } +} +#endif // DEBUG + + +// Support for statistics gathering for --heap-stats and --log-gc. +void NewSpace::ClearHistograms() { + for (int i = 0; i <= LAST_TYPE; i++) { + allocated_histogram_[i].clear(); + promoted_histogram_[i].clear(); + } +} + + +// Because the copying collector does not touch garbage objects, we iterate +// the new space before a collection to get a histogram of allocated objects. +// This only happens when --log-gc flag is set. +void NewSpace::CollectStatistics() { + ClearHistograms(); + SemiSpaceIterator it(this); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) + RecordAllocation(obj); +} + + +static void DoReportStatistics(Isolate* isolate, HistogramInfo* info, + const char* description) { + LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); + // Lump all the string types together. + int string_number = 0; + int string_bytes = 0; +#define INCREMENT(type, size, name, camel_name) \ + string_number += info[type].number(); \ + string_bytes += info[type].bytes(); + STRING_TYPE_LIST(INCREMENT) +#undef INCREMENT + if (string_number > 0) { + LOG(isolate, + HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); + } + + // Then do the other types. + for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { + if (info[i].number() > 0) { + LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(), + info[i].bytes())); + } + } + LOG(isolate, HeapSampleEndEvent("NewSpace", description)); +} + + +void NewSpace::ReportStatistics() { +#ifdef DEBUG + if (FLAG_heap_stats) { + float pct = static_cast<float>(Available()) / Capacity(); + PrintF(" capacity: %" V8_PTR_PREFIX + "d" + ", available: %" V8_PTR_PREFIX "d, %%%d\n", + Capacity(), Available(), static_cast<int>(pct * 100)); + PrintF("\n Object Histogram:\n"); + for (int i = 0; i <= LAST_TYPE; i++) { + if (allocated_histogram_[i].number() > 0) { + PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(), + allocated_histogram_[i].number(), + allocated_histogram_[i].bytes()); + } + } + PrintF("\n"); + } +#endif // DEBUG + + if (FLAG_log_gc) { + Isolate* isolate = heap()->isolate(); + DoReportStatistics(isolate, allocated_histogram_, "allocated"); + DoReportStatistics(isolate, promoted_histogram_, "promoted"); + } +} + + +void NewSpace::RecordAllocation(HeapObject* obj) { + InstanceType type = obj->map()->instance_type(); + DCHECK(0 <= type && type <= LAST_TYPE); + allocated_histogram_[type].increment_number(1); + allocated_histogram_[type].increment_bytes(obj->Size()); +} + + +void NewSpace::RecordPromotion(HeapObject* obj) { + InstanceType type = obj->map()->instance_type(); + DCHECK(0 <= type && type <= LAST_TYPE); + promoted_histogram_[type].increment_number(1); + promoted_histogram_[type].increment_bytes(obj->Size()); +} + + +size_t NewSpace::CommittedPhysicalMemory() { + if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); + MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); + size_t size = to_space_.CommittedPhysicalMemory(); + if (from_space_.is_committed()) { + size += from_space_.CommittedPhysicalMemory(); + } + return size; +} + + +// ----------------------------------------------------------------------------- +// Free lists for old object spaces implementation + +void FreeListNode::set_size(Heap* heap, int size_in_bytes) { + DCHECK(size_in_bytes > 0); + DCHECK(IsAligned(size_in_bytes, kPointerSize)); + + // We write a map and possibly size information to the block. If the block + // is big enough to be a FreeSpace with at least one extra word (the next + // pointer), we set its map to be the free space map and its size to an + // appropriate array length for the desired size from HeapObject::Size(). + // If the block is too small (eg, one or two words), to hold both a size + // field and a next pointer, we give it a filler map that gives it the + // correct size. + if (size_in_bytes > FreeSpace::kHeaderSize) { + // Can't use FreeSpace::cast because it fails during deserialization. + // We have to set the size first with a release store before we store + // the map because a concurrent store buffer scan on scavenge must not + // observe a map with an invalid size. + FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); + this_as_free_space->nobarrier_set_size(size_in_bytes); + synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); + } else if (size_in_bytes == kPointerSize) { + set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); + } else if (size_in_bytes == 2 * kPointerSize) { + set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map()); + } else { + UNREACHABLE(); + } + // We would like to DCHECK(Size() == size_in_bytes) but this would fail during + // deserialization because the free space map is not done yet. +} + + +FreeListNode* FreeListNode::next() { + DCHECK(IsFreeListNode(this)); + if (map() == GetHeap()->raw_unchecked_free_space_map()) { + DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize); + return reinterpret_cast<FreeListNode*>( + Memory::Address_at(address() + kNextOffset)); + } else { + return reinterpret_cast<FreeListNode*>( + Memory::Address_at(address() + kPointerSize)); + } +} + + +FreeListNode** FreeListNode::next_address() { + DCHECK(IsFreeListNode(this)); + if (map() == GetHeap()->raw_unchecked_free_space_map()) { + DCHECK(Size() >= kNextOffset + kPointerSize); + return reinterpret_cast<FreeListNode**>(address() + kNextOffset); + } else { + return reinterpret_cast<FreeListNode**>(address() + kPointerSize); + } +} + + +void FreeListNode::set_next(FreeListNode* next) { + DCHECK(IsFreeListNode(this)); + // While we are booting the VM the free space map will actually be null. So + // we have to make sure that we don't try to use it for anything at that + // stage. + if (map() == GetHeap()->raw_unchecked_free_space_map()) { + DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize); + base::NoBarrier_Store( + reinterpret_cast<base::AtomicWord*>(address() + kNextOffset), + reinterpret_cast<base::AtomicWord>(next)); + } else { + base::NoBarrier_Store( + reinterpret_cast<base::AtomicWord*>(address() + kPointerSize), + reinterpret_cast<base::AtomicWord>(next)); + } +} + + +intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { + intptr_t free_bytes = 0; + if (category->top() != NULL) { + // This is safe (not going to deadlock) since Concatenate operations + // are never performed on the same free lists at the same time in + // reverse order. + base::LockGuard<base::Mutex> target_lock_guard(mutex()); + base::LockGuard<base::Mutex> source_lock_guard(category->mutex()); + DCHECK(category->end_ != NULL); + free_bytes = category->available(); + if (end_ == NULL) { + end_ = category->end(); + } else { + category->end()->set_next(top()); + } + set_top(category->top()); + base::NoBarrier_Store(&top_, category->top_); + available_ += category->available(); + category->Reset(); + } + return free_bytes; +} + + +void FreeListCategory::Reset() { + set_top(NULL); + set_end(NULL); + set_available(0); +} + + +intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) { + int sum = 0; + FreeListNode* t = top(); + FreeListNode** n = &t; + while (*n != NULL) { + if (Page::FromAddress((*n)->address()) == p) { + FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); + sum += free_space->Size(); + *n = (*n)->next(); + } else { + n = (*n)->next_address(); + } + } + set_top(t); + if (top() == NULL) { + set_end(NULL); + } + available_ -= sum; + return sum; +} + + +bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) { + FreeListNode* node = top(); + while (node != NULL) { + if (Page::FromAddress(node->address()) == p) return true; + node = node->next(); + } + return false; +} + + +FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) { + FreeListNode* node = top(); + + if (node == NULL) return NULL; + + while (node != NULL && + Page::FromAddress(node->address())->IsEvacuationCandidate()) { + available_ -= reinterpret_cast<FreeSpace*>(node)->Size(); + node = node->next(); + } + + if (node != NULL) { + set_top(node->next()); + *node_size = reinterpret_cast<FreeSpace*>(node)->Size(); + available_ -= *node_size; + } else { + set_top(NULL); + } + + if (top() == NULL) { + set_end(NULL); + } + + return node; +} + + +FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes, + int* node_size) { + FreeListNode* node = PickNodeFromList(node_size); + if (node != NULL && *node_size < size_in_bytes) { + Free(node, *node_size); + *node_size = 0; + return NULL; + } + return node; +} + + +void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) { + node->set_next(top()); + set_top(node); + if (end_ == NULL) { + end_ = node; + } + available_ += size_in_bytes; +} + + +void FreeListCategory::RepairFreeList(Heap* heap) { + FreeListNode* n = top(); + while (n != NULL) { + Map** map_location = reinterpret_cast<Map**>(n->address()); + if (*map_location == NULL) { + *map_location = heap->free_space_map(); + } else { + DCHECK(*map_location == heap->free_space_map()); + } + n = n->next(); + } +} + + +FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) { + Reset(); +} + + +intptr_t FreeList::Concatenate(FreeList* free_list) { + intptr_t free_bytes = 0; + free_bytes += small_list_.Concatenate(free_list->small_list()); + free_bytes += medium_list_.Concatenate(free_list->medium_list()); + free_bytes += large_list_.Concatenate(free_list->large_list()); + free_bytes += huge_list_.Concatenate(free_list->huge_list()); + return free_bytes; +} + + +void FreeList::Reset() { + small_list_.Reset(); + medium_list_.Reset(); + large_list_.Reset(); + huge_list_.Reset(); +} + + +int FreeList::Free(Address start, int size_in_bytes) { + if (size_in_bytes == 0) return 0; + + FreeListNode* node = FreeListNode::FromAddress(start); + node->set_size(heap_, size_in_bytes); + Page* page = Page::FromAddress(start); + + // Early return to drop too-small blocks on the floor. + if (size_in_bytes < kSmallListMin) { + page->add_non_available_small_blocks(size_in_bytes); + return size_in_bytes; + } + + // Insert other blocks at the head of a free list of the appropriate + // magnitude. + if (size_in_bytes <= kSmallListMax) { + small_list_.Free(node, size_in_bytes); + page->add_available_in_small_free_list(size_in_bytes); + } else if (size_in_bytes <= kMediumListMax) { + medium_list_.Free(node, size_in_bytes); + page->add_available_in_medium_free_list(size_in_bytes); + } else if (size_in_bytes <= kLargeListMax) { + large_list_.Free(node, size_in_bytes); + page->add_available_in_large_free_list(size_in_bytes); + } else { + huge_list_.Free(node, size_in_bytes); + page->add_available_in_huge_free_list(size_in_bytes); + } + + DCHECK(IsVeryLong() || available() == SumFreeLists()); + return 0; +} + + +FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { + FreeListNode* node = NULL; + Page* page = NULL; + + if (size_in_bytes <= kSmallAllocationMax) { + node = small_list_.PickNodeFromList(node_size); + if (node != NULL) { + DCHECK(size_in_bytes <= *node_size); + page = Page::FromAddress(node->address()); + page->add_available_in_small_free_list(-(*node_size)); + DCHECK(IsVeryLong() || available() == SumFreeLists()); + return node; + } + } + + if (size_in_bytes <= kMediumAllocationMax) { + node = medium_list_.PickNodeFromList(node_size); + if (node != NULL) { + DCHECK(size_in_bytes <= *node_size); + page = Page::FromAddress(node->address()); + page->add_available_in_medium_free_list(-(*node_size)); + DCHECK(IsVeryLong() || available() == SumFreeLists()); + return node; + } + } + + if (size_in_bytes <= kLargeAllocationMax) { + node = large_list_.PickNodeFromList(node_size); + if (node != NULL) { + DCHECK(size_in_bytes <= *node_size); + page = Page::FromAddress(node->address()); + page->add_available_in_large_free_list(-(*node_size)); + DCHECK(IsVeryLong() || available() == SumFreeLists()); + return node; + } + } + + int huge_list_available = huge_list_.available(); + FreeListNode* top_node = huge_list_.top(); + for (FreeListNode** cur = &top_node; *cur != NULL; + cur = (*cur)->next_address()) { + FreeListNode* cur_node = *cur; + while (cur_node != NULL && + Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { + int size = reinterpret_cast<FreeSpace*>(cur_node)->Size(); + huge_list_available -= size; + page = Page::FromAddress(cur_node->address()); + page->add_available_in_huge_free_list(-size); + cur_node = cur_node->next(); + } + + *cur = cur_node; + if (cur_node == NULL) { + huge_list_.set_end(NULL); + break; + } + + DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map()); + FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); + int size = cur_as_free_space->Size(); + if (size >= size_in_bytes) { + // Large enough node found. Unlink it from the list. + node = *cur; + *cur = node->next(); + *node_size = size; + huge_list_available -= size; + page = Page::FromAddress(node->address()); + page->add_available_in_huge_free_list(-size); + break; + } + } + + huge_list_.set_top(top_node); + if (huge_list_.top() == NULL) { + huge_list_.set_end(NULL); + } + huge_list_.set_available(huge_list_available); + + if (node != NULL) { + DCHECK(IsVeryLong() || available() == SumFreeLists()); + return node; + } + + if (size_in_bytes <= kSmallListMax) { + node = small_list_.PickNodeFromList(size_in_bytes, node_size); + if (node != NULL) { + DCHECK(size_in_bytes <= *node_size); + page = Page::FromAddress(node->address()); + page->add_available_in_small_free_list(-(*node_size)); + } + } else if (size_in_bytes <= kMediumListMax) { + node = medium_list_.PickNodeFromList(size_in_bytes, node_size); + if (node != NULL) { + DCHECK(size_in_bytes <= *node_size); + page = Page::FromAddress(node->address()); + page->add_available_in_medium_free_list(-(*node_size)); + } + } else if (size_in_bytes <= kLargeListMax) { + node = large_list_.PickNodeFromList(size_in_bytes, node_size); + if (node != NULL) { + DCHECK(size_in_bytes <= *node_size); + page = Page::FromAddress(node->address()); + page->add_available_in_large_free_list(-(*node_size)); + } + } + + DCHECK(IsVeryLong() || available() == SumFreeLists()); + return node; +} + + +// Allocation on the old space free list. If it succeeds then a new linear +// allocation space has been set up with the top and limit of the space. If +// the allocation fails then NULL is returned, and the caller can perform a GC +// or allocate a new page before retrying. +HeapObject* FreeList::Allocate(int size_in_bytes) { + DCHECK(0 < size_in_bytes); + DCHECK(size_in_bytes <= kMaxBlockSize); + DCHECK(IsAligned(size_in_bytes, kPointerSize)); + // Don't free list allocate if there is linear space available. + DCHECK(owner_->limit() - owner_->top() < size_in_bytes); + + int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); + // Mark the old linear allocation area with a free space map so it can be + // skipped when scanning the heap. This also puts it back in the free list + // if it is big enough. + owner_->Free(owner_->top(), old_linear_size); + + owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes - + old_linear_size); + + int new_node_size = 0; + FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); + if (new_node == NULL) { + owner_->SetTopAndLimit(NULL, NULL); + return NULL; + } + + int bytes_left = new_node_size - size_in_bytes; + DCHECK(bytes_left >= 0); + +#ifdef DEBUG + for (int i = 0; i < size_in_bytes / kPointerSize; i++) { + reinterpret_cast<Object**>(new_node->address())[i] = + Smi::FromInt(kCodeZapValue); + } +#endif + + // The old-space-step might have finished sweeping and restarted marking. + // Verify that it did not turn the page of the new node into an evacuation + // candidate. + DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); + + const int kThreshold = IncrementalMarking::kAllocatedThreshold; + + // Memory in the linear allocation area is counted as allocated. We may free + // a little of this again immediately - see below. + owner_->Allocate(new_node_size); + + if (owner_->heap()->inline_allocation_disabled()) { + // Keep the linear allocation area empty if requested to do so, just + // return area back to the free list instead. + owner_->Free(new_node->address() + size_in_bytes, bytes_left); + DCHECK(owner_->top() == NULL && owner_->limit() == NULL); + } else if (bytes_left > kThreshold && + owner_->heap()->incremental_marking()->IsMarkingIncomplete() && + FLAG_incremental_marking_steps) { + int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); + // We don't want to give too large linear areas to the allocator while + // incremental marking is going on, because we won't check again whether + // we want to do another increment until the linear area is used up. + owner_->Free(new_node->address() + size_in_bytes + linear_size, + new_node_size - size_in_bytes - linear_size); + owner_->SetTopAndLimit(new_node->address() + size_in_bytes, + new_node->address() + size_in_bytes + linear_size); + } else if (bytes_left > 0) { + // Normally we give the rest of the node to the allocator as its new + // linear allocation area. + owner_->SetTopAndLimit(new_node->address() + size_in_bytes, + new_node->address() + new_node_size); + } else { + // TODO(gc) Try not freeing linear allocation region when bytes_left + // are zero. + owner_->SetTopAndLimit(NULL, NULL); + } + + return new_node; +} + + +intptr_t FreeList::EvictFreeListItems(Page* p) { + intptr_t sum = huge_list_.EvictFreeListItemsInList(p); + p->set_available_in_huge_free_list(0); + + if (sum < p->area_size()) { + sum += small_list_.EvictFreeListItemsInList(p) + + medium_list_.EvictFreeListItemsInList(p) + + large_list_.EvictFreeListItemsInList(p); + p->set_available_in_small_free_list(0); + p->set_available_in_medium_free_list(0); + p->set_available_in_large_free_list(0); + } + + return sum; +} + + +bool FreeList::ContainsPageFreeListItems(Page* p) { + return huge_list_.EvictFreeListItemsInList(p) || + small_list_.EvictFreeListItemsInList(p) || + medium_list_.EvictFreeListItemsInList(p) || + large_list_.EvictFreeListItemsInList(p); +} + + +void FreeList::RepairLists(Heap* heap) { + small_list_.RepairFreeList(heap); + medium_list_.RepairFreeList(heap); + large_list_.RepairFreeList(heap); + huge_list_.RepairFreeList(heap); +} + + +#ifdef DEBUG +intptr_t FreeListCategory::SumFreeList() { + intptr_t sum = 0; + FreeListNode* cur = top(); + while (cur != NULL) { + DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map()); + FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); + sum += cur_as_free_space->nobarrier_size(); + cur = cur->next(); + } + return sum; +} + + +static const int kVeryLongFreeList = 500; + + +int FreeListCategory::FreeListLength() { + int length = 0; + FreeListNode* cur = top(); + while (cur != NULL) { + length++; + cur = cur->next(); + if (length == kVeryLongFreeList) return length; + } + return length; +} + + +bool FreeList::IsVeryLong() { + if (small_list_.FreeListLength() == kVeryLongFreeList) return true; + if (medium_list_.FreeListLength() == kVeryLongFreeList) return true; + if (large_list_.FreeListLength() == kVeryLongFreeList) return true; + if (huge_list_.FreeListLength() == kVeryLongFreeList) return true; + return false; +} + + +// This can take a very long time because it is linear in the number of entries +// on the free list, so it should not be called if FreeListLength returns +// kVeryLongFreeList. +intptr_t FreeList::SumFreeLists() { + intptr_t sum = small_list_.SumFreeList(); + sum += medium_list_.SumFreeList(); + sum += large_list_.SumFreeList(); + sum += huge_list_.SumFreeList(); + return sum; +} +#endif + + +// ----------------------------------------------------------------------------- +// OldSpace implementation + +void PagedSpace::PrepareForMarkCompact() { + // We don't have a linear allocation area while sweeping. It will be restored + // on the first allocation after the sweep. + EmptyAllocationInfo(); + + // This counter will be increased for pages which will be swept by the + // sweeper threads. + unswept_free_bytes_ = 0; + + // Clear the free list before a full GC---it will be rebuilt afterward. + free_list_.Reset(); +} + + +intptr_t PagedSpace::SizeOfObjects() { + DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() || + (unswept_free_bytes_ == 0)); + return Size() - unswept_free_bytes_ - (limit() - top()); +} + + +// After we have booted, we have created a map which represents free space +// on the heap. If there was already a free list then the elements on it +// were created with the wrong FreeSpaceMap (normally NULL), so we need to +// fix them. +void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); } + + +void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { + if (allocation_info_.top() >= allocation_info_.limit()) return; + + if (Page::FromAllocationTop(allocation_info_.top()) + ->IsEvacuationCandidate()) { + // Create filler object to keep page iterable if it was iterable. + int remaining = + static_cast<int>(allocation_info_.limit() - allocation_info_.top()); + heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); + + allocation_info_.set_top(NULL); + allocation_info_.set_limit(NULL); + } +} + + +HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation( + int size_in_bytes) { + MarkCompactCollector* collector = heap()->mark_compact_collector(); + if (collector->sweeping_in_progress()) { + // Wait for the sweeper threads here and complete the sweeping phase. + collector->EnsureSweepingCompleted(); + + // After waiting for the sweeper threads, there may be new free-list + // entries. + return free_list_.Allocate(size_in_bytes); + } + return NULL; +} + + +HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { + // Allocation in this space has failed. + + MarkCompactCollector* collector = heap()->mark_compact_collector(); + // Sweeping is still in progress. + if (collector->sweeping_in_progress()) { + // First try to refill the free-list, concurrent sweeper threads + // may have freed some objects in the meantime. + collector->RefillFreeList(this); + + // Retry the free list allocation. + HeapObject* object = free_list_.Allocate(size_in_bytes); + if (object != NULL) return object; + + // If sweeping is still in progress try to sweep pages on the main thread. + int free_chunk = collector->SweepInParallel(this, size_in_bytes); + collector->RefillFreeList(this); + if (free_chunk >= size_in_bytes) { + HeapObject* object = free_list_.Allocate(size_in_bytes); + // We should be able to allocate an object here since we just freed that + // much memory. + DCHECK(object != NULL); + if (object != NULL) return object; + } + } + + // Free list allocation failed and there is no next page. Fail if we have + // hit the old generation size limit that should cause a garbage + // collection. + if (!heap()->always_allocate() && + heap()->OldGenerationAllocationLimitReached()) { + // If sweeper threads are active, wait for them at that point and steal + // elements form their free-lists. + HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); + if (object != NULL) return object; + } + + // Try to expand the space and allocate in the new next page. + if (Expand()) { + DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); + return free_list_.Allocate(size_in_bytes); + } + + // If sweeper threads are active, wait for them at that point and steal + // elements form their free-lists. Allocation may still fail their which + // would indicate that there is not enough memory for the given allocation. + return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); +} + + +#ifdef DEBUG +void PagedSpace::ReportCodeStatistics(Isolate* isolate) { + CommentStatistic* comments_statistics = + isolate->paged_space_comments_statistics(); + ReportCodeKindStatistics(isolate->code_kind_statistics()); + PrintF( + "Code comment statistics (\" [ comment-txt : size/ " + "count (average)\"):\n"); + for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { + const CommentStatistic& cs = comments_statistics[i]; + if (cs.size > 0) { + PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, + cs.size / cs.count); + } + } + PrintF("\n"); +} + + +void PagedSpace::ResetCodeStatistics(Isolate* isolate) { + CommentStatistic* comments_statistics = + isolate->paged_space_comments_statistics(); + ClearCodeKindStatistics(isolate->code_kind_statistics()); + for (int i = 0; i < CommentStatistic::kMaxComments; i++) { + comments_statistics[i].Clear(); + } + comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown"; + comments_statistics[CommentStatistic::kMaxComments].size = 0; + comments_statistics[CommentStatistic::kMaxComments].count = 0; +} + + +// Adds comment to 'comment_statistics' table. Performance OK as long as +// 'kMaxComments' is small +static void EnterComment(Isolate* isolate, const char* comment, int delta) { + CommentStatistic* comments_statistics = + isolate->paged_space_comments_statistics(); + // Do not count empty comments + if (delta <= 0) return; + CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments]; + // Search for a free or matching entry in 'comments_statistics': 'cs' + // points to result. + for (int i = 0; i < CommentStatistic::kMaxComments; i++) { + if (comments_statistics[i].comment == NULL) { + cs = &comments_statistics[i]; + cs->comment = comment; + break; + } else if (strcmp(comments_statistics[i].comment, comment) == 0) { + cs = &comments_statistics[i]; + break; + } + } + // Update entry for 'comment' + cs->size += delta; + cs->count += 1; +} + + +// Call for each nested comment start (start marked with '[ xxx', end marked +// with ']'. RelocIterator 'it' must point to a comment reloc info. +static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { + DCHECK(!it->done()); + DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT); + const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); + if (tmp[0] != '[') { + // Not a nested comment; skip + return; + } + + // Search for end of nested comment or a new nested comment + const char* const comment_txt = + reinterpret_cast<const char*>(it->rinfo()->data()); + const byte* prev_pc = it->rinfo()->pc(); + int flat_delta = 0; + it->next(); + while (true) { + // All nested comments must be terminated properly, and therefore exit + // from loop. + DCHECK(!it->done()); + if (it->rinfo()->rmode() == RelocInfo::COMMENT) { + const char* const txt = + reinterpret_cast<const char*>(it->rinfo()->data()); + flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); + if (txt[0] == ']') break; // End of nested comment + // A new comment + CollectCommentStatistics(isolate, it); + // Skip code that was covered with previous comment + prev_pc = it->rinfo()->pc(); + } + it->next(); + } + EnterComment(isolate, comment_txt, flat_delta); +} + + +// Collects code size statistics: +// - by code kind +// - by code comment +void PagedSpace::CollectCodeStatistics() { + Isolate* isolate = heap()->isolate(); + HeapObjectIterator obj_it(this); + for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { + if (obj->IsCode()) { + Code* code = Code::cast(obj); + isolate->code_kind_statistics()[code->kind()] += code->Size(); + RelocIterator it(code); + int delta = 0; + const byte* prev_pc = code->instruction_start(); + while (!it.done()) { + if (it.rinfo()->rmode() == RelocInfo::COMMENT) { + delta += static_cast<int>(it.rinfo()->pc() - prev_pc); + CollectCommentStatistics(isolate, &it); + prev_pc = it.rinfo()->pc(); + } + it.next(); + } + + DCHECK(code->instruction_start() <= prev_pc && + prev_pc <= code->instruction_end()); + delta += static_cast<int>(code->instruction_end() - prev_pc); + EnterComment(isolate, "NoComment", delta); + } + } +} + + +void PagedSpace::ReportStatistics() { + int pct = static_cast<int>(Available() * 100 / Capacity()); + PrintF(" capacity: %" V8_PTR_PREFIX + "d" + ", waste: %" V8_PTR_PREFIX + "d" + ", available: %" V8_PTR_PREFIX "d, %%%d\n", + Capacity(), Waste(), Available(), pct); + + if (!swept_precisely_) return; + ClearHistograms(heap()->isolate()); + HeapObjectIterator obj_it(this); + for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) + CollectHistogramInfo(obj); + ReportHistogram(heap()->isolate(), true); +} +#endif + + +// ----------------------------------------------------------------------------- +// MapSpace implementation +// TODO(mvstanton): this is weird...the compiler can't make a vtable unless +// there is at least one non-inlined virtual function. I would prefer to hide +// the VerifyObject definition behind VERIFY_HEAP. + +void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } + + +// ----------------------------------------------------------------------------- +// CellSpace and PropertyCellSpace implementation +// TODO(mvstanton): this is weird...the compiler can't make a vtable unless +// there is at least one non-inlined virtual function. I would prefer to hide +// the VerifyObject definition behind VERIFY_HEAP. + +void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); } + + +void PropertyCellSpace::VerifyObject(HeapObject* object) { + CHECK(object->IsPropertyCell()); +} + + +// ----------------------------------------------------------------------------- +// LargeObjectIterator + +LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { + current_ = space->first_page_; + size_func_ = NULL; +} + + +LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space, + HeapObjectCallback size_func) { + current_ = space->first_page_; + size_func_ = size_func; +} + + +HeapObject* LargeObjectIterator::Next() { + if (current_ == NULL) return NULL; + + HeapObject* object = current_->GetObject(); + current_ = current_->next_page(); + return object; +} + + +// ----------------------------------------------------------------------------- +// LargeObjectSpace +static bool ComparePointers(void* key1, void* key2) { return key1 == key2; } + + +LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity, + AllocationSpace id) + : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis + max_capacity_(max_capacity), + first_page_(NULL), + size_(0), + page_count_(0), + objects_size_(0), + chunk_map_(ComparePointers, 1024) {} + + +bool LargeObjectSpace::SetUp() { + first_page_ = NULL; + size_ = 0; + maximum_committed_ = 0; + page_count_ = 0; + objects_size_ = 0; + chunk_map_.Clear(); + return true; +} + + +void LargeObjectSpace::TearDown() { + while (first_page_ != NULL) { + LargePage* page = first_page_; + first_page_ = first_page_->next_page(); + LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); + + ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); + heap()->isolate()->memory_allocator()->PerformAllocationCallback( + space, kAllocationActionFree, page->size()); + heap()->isolate()->memory_allocator()->Free(page); + } + SetUp(); +} + + +AllocationResult LargeObjectSpace::AllocateRaw(int object_size, + Executability executable) { + // Check if we want to force a GC before growing the old space further. + // If so, fail the allocation. + if (!heap()->always_allocate() && + heap()->OldGenerationAllocationLimitReached()) { + return AllocationResult::Retry(identity()); + } + + if (Size() + object_size > max_capacity_) { + return AllocationResult::Retry(identity()); + } + + LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage( + object_size, this, executable); + if (page == NULL) return AllocationResult::Retry(identity()); + DCHECK(page->area_size() >= object_size); + + size_ += static_cast<int>(page->size()); + objects_size_ += object_size; + page_count_++; + page->set_next_page(first_page_); + first_page_ = page; + + if (size_ > maximum_committed_) { + maximum_committed_ = size_; + } + + // Register all MemoryChunk::kAlignment-aligned chunks covered by + // this large page in the chunk map. + uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; + uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; + for (uintptr_t key = base; key <= limit; key++) { + HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), + static_cast<uint32_t>(key), true); + DCHECK(entry != NULL); + entry->value = page; + } + + HeapObject* object = page->GetObject(); + + if (Heap::ShouldZapGarbage()) { + // Make the object consistent so the heap can be verified in OldSpaceStep. + // We only need to do this in debug builds or if verify_heap is on. + reinterpret_cast<Object**>(object->address())[0] = + heap()->fixed_array_map(); + reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); + } + + heap()->incremental_marking()->OldSpaceStep(object_size); + return object; +} + + +size_t LargeObjectSpace::CommittedPhysicalMemory() { + if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); + size_t size = 0; + LargePage* current = first_page_; + while (current != NULL) { + size += current->CommittedPhysicalMemory(); + current = current->next_page(); + } + return size; +} + + +// GC support +Object* LargeObjectSpace::FindObject(Address a) { + LargePage* page = FindPage(a); + if (page != NULL) { + return page->GetObject(); + } + return Smi::FromInt(0); // Signaling not found. +} + + +LargePage* LargeObjectSpace::FindPage(Address a) { + uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; + HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), + static_cast<uint32_t>(key), false); + if (e != NULL) { + DCHECK(e->value != NULL); + LargePage* page = reinterpret_cast<LargePage*>(e->value); + DCHECK(page->is_valid()); + if (page->Contains(a)) { + return page; + } + } + return NULL; +} + + +void LargeObjectSpace::FreeUnmarkedObjects() { + LargePage* previous = NULL; + LargePage* current = first_page_; + while (current != NULL) { + HeapObject* object = current->GetObject(); + // Can this large page contain pointers to non-trivial objects. No other + // pointer object is this big. + bool is_pointer_object = object->IsFixedArray(); + MarkBit mark_bit = Marking::MarkBitFrom(object); + if (mark_bit.Get()) { + mark_bit.Clear(); + Page::FromAddress(object->address())->ResetProgressBar(); + Page::FromAddress(object->address())->ResetLiveBytes(); + previous = current; + current = current->next_page(); + } else { + LargePage* page = current; + // Cut the chunk out from the chunk list. + current = current->next_page(); + if (previous == NULL) { + first_page_ = current; + } else { + previous->set_next_page(current); + } + + // Free the chunk. + heap()->mark_compact_collector()->ReportDeleteIfNeeded(object, + heap()->isolate()); + size_ -= static_cast<int>(page->size()); + objects_size_ -= object->Size(); + page_count_--; + + // Remove entries belonging to this page. + // Use variable alignment to help pass length check (<= 80 characters) + // of single line in tools/presubmit.py. + const intptr_t alignment = MemoryChunk::kAlignment; + uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; + uintptr_t limit = base + (page->size() - 1) / alignment; + for (uintptr_t key = base; key <= limit; key++) { + chunk_map_.Remove(reinterpret_cast<void*>(key), + static_cast<uint32_t>(key)); + } + + if (is_pointer_object) { + heap()->QueueMemoryChunkForFree(page); + } else { + heap()->isolate()->memory_allocator()->Free(page); + } + } + } + heap()->FreeQueuedChunks(); +} + + +bool LargeObjectSpace::Contains(HeapObject* object) { + Address address = object->address(); + MemoryChunk* chunk = MemoryChunk::FromAddress(address); + + bool owned = (chunk->owner() == this); + + SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject()); + + return owned; +} + + +#ifdef VERIFY_HEAP +// We do not assume that the large object iterator works, because it depends +// on the invariants we are checking during verification. +void LargeObjectSpace::Verify() { + for (LargePage* chunk = first_page_; chunk != NULL; + chunk = chunk->next_page()) { + // Each chunk contains an object that starts at the large object page's + // object area start. + HeapObject* object = chunk->GetObject(); + Page* page = Page::FromAddress(object->address()); + CHECK(object->address() == page->area_start()); + + // The first word should be a map, and we expect all map pointers to be + // in map space. + Map* map = object->map(); + CHECK(map->IsMap()); + CHECK(heap()->map_space()->Contains(map)); + + // We have only code, sequential strings, external strings + // (sequential strings that have been morphed into external + // strings), fixed arrays, byte arrays, and constant pool arrays in the + // large object space. + CHECK(object->IsCode() || object->IsSeqString() || + object->IsExternalString() || object->IsFixedArray() || + object->IsFixedDoubleArray() || object->IsByteArray() || + object->IsConstantPoolArray()); + + // The object itself should look OK. + object->ObjectVerify(); + + // Byte arrays and strings don't have interior pointers. + if (object->IsCode()) { + VerifyPointersVisitor code_visitor; + object->IterateBody(map->instance_type(), object->Size(), &code_visitor); + } else if (object->IsFixedArray()) { + FixedArray* array = FixedArray::cast(object); + for (int j = 0; j < array->length(); j++) { + Object* element = array->get(j); + if (element->IsHeapObject()) { + HeapObject* element_object = HeapObject::cast(element); + CHECK(heap()->Contains(element_object)); + CHECK(element_object->map()->IsMap()); + } + } + } + } +} +#endif + + +#ifdef DEBUG +void LargeObjectSpace::Print() { + OFStream os(stdout); + LargeObjectIterator it(this); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + obj->Print(os); + } +} + + +void LargeObjectSpace::ReportStatistics() { + PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); + int num_objects = 0; + ClearHistograms(heap()->isolate()); + LargeObjectIterator it(this); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + num_objects++; + CollectHistogramInfo(obj); + } + + PrintF( + " number of objects %d, " + "size of objects %" V8_PTR_PREFIX "d\n", + num_objects, objects_size_); + if (num_objects > 0) ReportHistogram(heap()->isolate(), false); +} + + +void LargeObjectSpace::CollectCodeStatistics() { + Isolate* isolate = heap()->isolate(); + LargeObjectIterator obj_it(this); + for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { + if (obj->IsCode()) { + Code* code = Code::cast(obj); + isolate->code_kind_statistics()[code->kind()] += code->Size(); + } + } +} + + +void Page::Print() { + // Make a best-effort to print the objects in the page. + PrintF("Page@%p in %s\n", this->address(), + AllocationSpaceName(this->owner()->identity())); + printf(" --------------------------------------\n"); + HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction()); + unsigned mark_size = 0; + for (HeapObject* object = objects.Next(); object != NULL; + object = objects.Next()) { + bool is_marked = Marking::MarkBitFrom(object).Get(); + PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little. + if (is_marked) { + mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object); + } + object->ShortPrint(); + PrintF("\n"); + } + printf(" --------------------------------------\n"); + printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); +} + +#endif // DEBUG +} +} // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/heap/spaces.h nodejs-0.11.15/deps/v8/src/heap/spaces.h --- nodejs-0.11.13/deps/v8/src/heap/spaces.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/spaces.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2897 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_SPACES_H_ +#define V8_HEAP_SPACES_H_ + +#include "src/allocation.h" +#include "src/base/atomicops.h" +#include "src/base/platform/mutex.h" +#include "src/hashmap.h" +#include "src/list.h" +#include "src/log.h" +#include "src/utils.h" + +namespace v8 { +namespace internal { + +class Isolate; + +// ----------------------------------------------------------------------------- +// Heap structures: +// +// A JS heap consists of a young generation, an old generation, and a large +// object space. The young generation is divided into two semispaces. A +// scavenger implements Cheney's copying algorithm. The old generation is +// separated into a map space and an old object space. The map space contains +// all (and only) map objects, the rest of old objects go into the old space. +// The old generation is collected by a mark-sweep-compact collector. +// +// The semispaces of the young generation are contiguous. The old and map +// spaces consists of a list of pages. A page has a page header and an object +// area. +// +// There is a separate large object space for objects larger than +// Page::kMaxHeapObjectSize, so that they do not have to move during +// collection. The large object space is paged. Pages in large object space +// may be larger than the page size. +// +// A store-buffer based write barrier is used to keep track of intergenerational +// references. See heap/store-buffer.h. +// +// During scavenges and mark-sweep collections we sometimes (after a store +// buffer overflow) iterate intergenerational pointers without decoding heap +// object maps so if the page belongs to old pointer space or large object +// space it is essential to guarantee that the page does not contain any +// garbage pointers to new space: every pointer aligned word which satisfies +// the Heap::InNewSpace() predicate must be a pointer to a live heap object in +// new space. Thus objects in old pointer and large object spaces should have a +// special layout (e.g. no bare integer fields). This requirement does not +// apply to map space which is iterated in a special fashion. However we still +// require pointer fields of dead maps to be cleaned. +// +// To enable lazy cleaning of old space pages we can mark chunks of the page +// as being garbage. Garbage sections are marked with a special map. These +// sections are skipped when scanning the page, even if we are otherwise +// scanning without regard for object boundaries. Garbage sections are chained +// together to form a free list after a GC. Garbage sections created outside +// of GCs by object trunctation etc. may not be in the free list chain. Very +// small free spaces are ignored, they need only be cleaned of bogus pointers +// into new space. +// +// Each page may have up to one special garbage section. The start of this +// section is denoted by the top field in the space. The end of the section +// is denoted by the limit field in the space. This special garbage section +// is not marked with a free space map in the data. The point of this section +// is to enable linear allocation without having to constantly update the byte +// array every time the top field is updated and a new object is created. The +// special garbage section is not in the chain of garbage sections. +// +// Since the top and limit fields are in the space, not the page, only one page +// has a special garbage section, and if the top and limit are equal then there +// is no special garbage section. + +// Some assertion macros used in the debugging mode. + +#define DCHECK_PAGE_ALIGNED(address) \ + DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) + +#define DCHECK_OBJECT_ALIGNED(address) \ + DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) + +#define DCHECK_OBJECT_SIZE(size) \ + DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) + +#define DCHECK_PAGE_OFFSET(offset) \ + DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize)) + +#define DCHECK_MAP_PAGE_INDEX(index) \ + DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) + + +class PagedSpace; +class MemoryAllocator; +class AllocationInfo; +class Space; +class FreeList; +class MemoryChunk; + +class MarkBit { + public: + typedef uint32_t CellType; + + inline MarkBit(CellType* cell, CellType mask, bool data_only) + : cell_(cell), mask_(mask), data_only_(data_only) {} + + inline CellType* cell() { return cell_; } + inline CellType mask() { return mask_; } + +#ifdef DEBUG + bool operator==(const MarkBit& other) { + return cell_ == other.cell_ && mask_ == other.mask_; + } +#endif + + inline void Set() { *cell_ |= mask_; } + inline bool Get() { return (*cell_ & mask_) != 0; } + inline void Clear() { *cell_ &= ~mask_; } + + inline bool data_only() { return data_only_; } + + inline MarkBit Next() { + CellType new_mask = mask_ << 1; + if (new_mask == 0) { + return MarkBit(cell_ + 1, 1, data_only_); + } else { + return MarkBit(cell_, new_mask, data_only_); + } + } + + private: + CellType* cell_; + CellType mask_; + // This boolean indicates that the object is in a data-only space with no + // pointers. This enables some optimizations when marking. + // It is expected that this field is inlined and turned into control flow + // at the place where the MarkBit object is created. + bool data_only_; +}; + + +// Bitmap is a sequence of cells each containing fixed number of bits. +class Bitmap { + public: + static const uint32_t kBitsPerCell = 32; + static const uint32_t kBitsPerCellLog2 = 5; + static const uint32_t kBitIndexMask = kBitsPerCell - 1; + static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; + static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; + + static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2); + + static const size_t kSize = + (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); + + + static int CellsForLength(int length) { + return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; + } + + int CellsCount() { return CellsForLength(kLength); } + + static int SizeFor(int cells_count) { + return sizeof(MarkBit::CellType) * cells_count; + } + + INLINE(static uint32_t IndexToCell(uint32_t index)) { + return index >> kBitsPerCellLog2; + } + + INLINE(static uint32_t CellToIndex(uint32_t index)) { + return index << kBitsPerCellLog2; + } + + INLINE(static uint32_t CellAlignIndex(uint32_t index)) { + return (index + kBitIndexMask) & ~kBitIndexMask; + } + + INLINE(MarkBit::CellType* cells()) { + return reinterpret_cast<MarkBit::CellType*>(this); + } + + INLINE(Address address()) { return reinterpret_cast<Address>(this); } + + INLINE(static Bitmap* FromAddress(Address addr)) { + return reinterpret_cast<Bitmap*>(addr); + } + + inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { + MarkBit::CellType mask = 1 << (index & kBitIndexMask); + MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); + return MarkBit(cell, mask, data_only); + } + + static inline void Clear(MemoryChunk* chunk); + + static void PrintWord(uint32_t word, uint32_t himask = 0) { + for (uint32_t mask = 1; mask != 0; mask <<= 1) { + if ((mask & himask) != 0) PrintF("["); + PrintF((mask & word) ? "1" : "0"); + if ((mask & himask) != 0) PrintF("]"); + } + } + + class CellPrinter { + public: + CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {} + + void Print(uint32_t pos, uint32_t cell) { + if (cell == seq_type) { + seq_length++; + return; + } + + Flush(); + + if (IsSeq(cell)) { + seq_start = pos; + seq_length = 0; + seq_type = cell; + return; + } + + PrintF("%d: ", pos); + PrintWord(cell); + PrintF("\n"); + } + + void Flush() { + if (seq_length > 0) { + PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1, + seq_length * kBitsPerCell); + seq_length = 0; + } + } + + static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } + + private: + uint32_t seq_start; + uint32_t seq_type; + uint32_t seq_length; + }; + + void Print() { + CellPrinter printer; + for (int i = 0; i < CellsCount(); i++) { + printer.Print(i, cells()[i]); + } + printer.Flush(); + PrintF("\n"); + } + + bool IsClean() { + for (int i = 0; i < CellsCount(); i++) { + if (cells()[i] != 0) { + return false; + } + } + return true; + } +}; + + +class SkipList; +class SlotsBuffer; + +// MemoryChunk represents a memory region owned by a specific space. +// It is divided into the header and the body. Chunk start is always +// 1MB aligned. Start of the body is aligned so it can accommodate +// any heap object. +class MemoryChunk { + public: + // Only works if the pointer is in the first kPageSize of the MemoryChunk. + static MemoryChunk* FromAddress(Address a) { + return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); + } + static const MemoryChunk* FromAddress(const byte* a) { + return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & + ~kAlignmentMask); + } + + // Only works for addresses in pointer spaces, not data or code spaces. + static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); + + Address address() { return reinterpret_cast<Address>(this); } + + bool is_valid() { return address() != NULL; } + + MemoryChunk* next_chunk() const { + return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_)); + } + + MemoryChunk* prev_chunk() const { + return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_)); + } + + void set_next_chunk(MemoryChunk* next) { + base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next)); + } + + void set_prev_chunk(MemoryChunk* prev) { + base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev)); + } + + Space* owner() const { + if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == + kPageHeaderTag) { + return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - + kPageHeaderTag); + } else { + return NULL; + } + } + + void set_owner(Space* space) { + DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); + owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; + DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == + kPageHeaderTag); + } + + base::VirtualMemory* reserved_memory() { return &reservation_; } + + void InitializeReservedMemory() { reservation_.Reset(); } + + void set_reserved_memory(base::VirtualMemory* reservation) { + DCHECK_NOT_NULL(reservation); + reservation_.TakeControl(reservation); + } + + bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } + void initialize_scan_on_scavenge(bool scan) { + if (scan) { + SetFlag(SCAN_ON_SCAVENGE); + } else { + ClearFlag(SCAN_ON_SCAVENGE); + } + } + inline void set_scan_on_scavenge(bool scan); + + int store_buffer_counter() { return store_buffer_counter_; } + void set_store_buffer_counter(int counter) { + store_buffer_counter_ = counter; + } + + bool Contains(Address addr) { + return addr >= area_start() && addr < area_end(); + } + + // Checks whether addr can be a limit of addresses in this page. + // It's a limit if it's in the page, or if it's just after the + // last byte of the page. + bool ContainsLimit(Address addr) { + return addr >= area_start() && addr <= area_end(); + } + + // Every n write barrier invocations we go to runtime even though + // we could have handled it in generated code. This lets us check + // whether we have hit the limit and should do some more marking. + static const int kWriteBarrierCounterGranularity = 500; + + enum MemoryChunkFlags { + IS_EXECUTABLE, + ABOUT_TO_BE_FREED, + POINTERS_TO_HERE_ARE_INTERESTING, + POINTERS_FROM_HERE_ARE_INTERESTING, + SCAN_ON_SCAVENGE, + IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. + IN_TO_SPACE, // All pages in new space has one of these two set. + NEW_SPACE_BELOW_AGE_MARK, + CONTAINS_ONLY_DATA, + EVACUATION_CANDIDATE, + RESCAN_ON_EVACUATION, + + // Pages swept precisely can be iterated, hitting only the live objects. + // Whereas those swept conservatively cannot be iterated over. Both flags + // indicate that marking bits have been cleared by the sweeper, otherwise + // marking bits are still intact. + WAS_SWEPT_PRECISELY, + WAS_SWEPT_CONSERVATIVELY, + + // Large objects can have a progress bar in their page header. These object + // are scanned in increments and will be kept black while being scanned. + // Even if the mutator writes to them they will be kept black and a white + // to grey transition is performed in the value. + HAS_PROGRESS_BAR, + + // Last flag, keep at bottom. + NUM_MEMORY_CHUNK_FLAGS + }; + + + static const int kPointersToHereAreInterestingMask = + 1 << POINTERS_TO_HERE_ARE_INTERESTING; + + static const int kPointersFromHereAreInterestingMask = + 1 << POINTERS_FROM_HERE_ARE_INTERESTING; + + static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE; + + static const int kSkipEvacuationSlotsRecordingMask = + (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) | + (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE); + + + void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; } + + void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); } + + void SetFlagTo(int flag, bool value) { + if (value) { + SetFlag(flag); + } else { + ClearFlag(flag); + } + } + + bool IsFlagSet(int flag) { + return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; + } + + // Set or clear multiple flags at a time. The flags in the mask + // are set to the value in "flags", the rest retain the current value + // in flags_. + void SetFlags(intptr_t flags, intptr_t mask) { + flags_ = (flags_ & ~mask) | (flags & mask); + } + + // Return all current flags. + intptr_t GetFlags() { return flags_; } + + + // SWEEPING_DONE - The page state when sweeping is complete or sweeping must + // not be performed on that page. + // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will + // not touch the page memory anymore. + // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread. + // SWEEPING_PENDING - This page is ready for parallel sweeping. + enum ParallelSweepingState { + SWEEPING_DONE, + SWEEPING_FINALIZE, + SWEEPING_IN_PROGRESS, + SWEEPING_PENDING + }; + + ParallelSweepingState parallel_sweeping() { + return static_cast<ParallelSweepingState>( + base::Acquire_Load(¶llel_sweeping_)); + } + + void set_parallel_sweeping(ParallelSweepingState state) { + base::Release_Store(¶llel_sweeping_, state); + } + + bool TryParallelSweeping() { + return base::Acquire_CompareAndSwap(¶llel_sweeping_, SWEEPING_PENDING, + SWEEPING_IN_PROGRESS) == + SWEEPING_PENDING; + } + + bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; } + + // Manage live byte count (count of bytes known to be live, + // because they are marked black). + void ResetLiveBytes() { + if (FLAG_gc_verbose) { + PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), + live_byte_count_); + } + live_byte_count_ = 0; + } + void IncrementLiveBytes(int by) { + if (FLAG_gc_verbose) { + printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this), + live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), + live_byte_count_ + by); + } + live_byte_count_ += by; + DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); + } + int LiveBytes() { + DCHECK(static_cast<unsigned>(live_byte_count_) <= size_); + return live_byte_count_; + } + + int write_barrier_counter() { + return static_cast<int>(write_barrier_counter_); + } + + void set_write_barrier_counter(int counter) { + write_barrier_counter_ = counter; + } + + int progress_bar() { + DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); + return progress_bar_; + } + + void set_progress_bar(int progress_bar) { + DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); + progress_bar_ = progress_bar; + } + + void ResetProgressBar() { + if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { + set_progress_bar(0); + ClearFlag(MemoryChunk::HAS_PROGRESS_BAR); + } + } + + bool IsLeftOfProgressBar(Object** slot) { + Address slot_address = reinterpret_cast<Address>(slot); + DCHECK(slot_address > this->address()); + return (slot_address - (this->address() + kObjectStartOffset)) < + progress_bar(); + } + + static void IncrementLiveBytesFromGC(Address address, int by) { + MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); + } + + static void IncrementLiveBytesFromMutator(Address address, int by); + + static const intptr_t kAlignment = + (static_cast<uintptr_t>(1) << kPageSizeBits); + + static const intptr_t kAlignmentMask = kAlignment - 1; + + static const intptr_t kSizeOffset = 0; + + static const intptr_t kLiveBytesOffset = + kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize + + kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize; + + static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; + + static const size_t kWriteBarrierCounterOffset = + kSlotsBufferOffset + kPointerSize + kPointerSize; + + static const size_t kHeaderSize = + kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize + + kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize; + + static const int kBodyOffset = + CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); + + // The start offset of the object area in a page. Aligned to both maps and + // code alignment to be suitable for both. Also aligned to 32 words because + // the marking bitmap is arranged in 32 bit chunks. + static const int kObjectStartAlignment = 32 * kPointerSize; + static const int kObjectStartOffset = + kBodyOffset - 1 + + (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); + + size_t size() const { return size_; } + + void set_size(size_t size) { size_ = size; } + + void SetArea(Address area_start, Address area_end) { + area_start_ = area_start; + area_end_ = area_end; + } + + Executability executable() { + return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; + } + + bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); } + + bool InNewSpace() { + return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; + } + + bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } + + bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } + + // --------------------------------------------------------------------- + // Markbits support + + inline Bitmap* markbits() { + return Bitmap::FromAddress(address() + kHeaderSize); + } + + void PrintMarkbits() { markbits()->Print(); } + + inline uint32_t AddressToMarkbitIndex(Address addr) { + return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; + } + + inline static uint32_t FastAddressToMarkbitIndex(Address addr) { + const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask; + + return static_cast<uint32_t>(offset) >> kPointerSizeLog2; + } + + inline Address MarkbitIndexToAddress(uint32_t index) { + return this->address() + (index << kPointerSizeLog2); + } + + void InsertAfter(MemoryChunk* other); + void Unlink(); + + inline Heap* heap() const { return heap_; } + + static const int kFlagsOffset = kPointerSize; + + bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } + + bool ShouldSkipEvacuationSlotRecording() { + return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; + } + + inline SkipList* skip_list() { return skip_list_; } + + inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } + + inline SlotsBuffer* slots_buffer() { return slots_buffer_; } + + inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } + + void MarkEvacuationCandidate() { + DCHECK(slots_buffer_ == NULL); + SetFlag(EVACUATION_CANDIDATE); + } + + void ClearEvacuationCandidate() { + DCHECK(slots_buffer_ == NULL); + ClearFlag(EVACUATION_CANDIDATE); + } + + Address area_start() { return area_start_; } + Address area_end() { return area_end_; } + int area_size() { return static_cast<int>(area_end() - area_start()); } + bool CommitArea(size_t requested); + + // Approximate amount of physical memory committed for this chunk. + size_t CommittedPhysicalMemory() { return high_water_mark_; } + + static inline void UpdateHighWaterMark(Address mark); + + protected: + size_t size_; + intptr_t flags_; + + // Start and end of allocatable memory on this chunk. + Address area_start_; + Address area_end_; + + // If the chunk needs to remember its memory reservation, it is stored here. + base::VirtualMemory reservation_; + // The identity of the owning space. This is tagged as a failure pointer, but + // no failure can be in an object, so this can be distinguished from any entry + // in a fixed array. + Address owner_; + Heap* heap_; + // Used by the store buffer to keep track of which pages to mark scan-on- + // scavenge. + int store_buffer_counter_; + // Count of bytes marked black on page. + int live_byte_count_; + SlotsBuffer* slots_buffer_; + SkipList* skip_list_; + intptr_t write_barrier_counter_; + // Used by the incremental marker to keep track of the scanning progress in + // large objects that have a progress bar and are scanned in increments. + int progress_bar_; + // Assuming the initial allocation on a page is sequential, + // count highest number of bytes ever allocated on the page. + int high_water_mark_; + + base::AtomicWord parallel_sweeping_; + + // PagedSpace free-list statistics. + intptr_t available_in_small_free_list_; + intptr_t available_in_medium_free_list_; + intptr_t available_in_large_free_list_; + intptr_t available_in_huge_free_list_; + intptr_t non_available_small_blocks_; + + static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, + Address area_start, Address area_end, + Executability executable, Space* owner); + + private: + // next_chunk_ holds a pointer of type MemoryChunk + base::AtomicWord next_chunk_; + // prev_chunk_ holds a pointer of type MemoryChunk + base::AtomicWord prev_chunk_; + + friend class MemoryAllocator; +}; + + +STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); + + +// ----------------------------------------------------------------------------- +// A page is a memory chunk of a size 1MB. Large object pages may be larger. +// +// The only way to get a page pointer is by calling factory methods: +// Page* p = Page::FromAddress(addr); or +// Page* p = Page::FromAllocationTop(top); +class Page : public MemoryChunk { + public: + // Returns the page containing a given address. The address ranges + // from [page_addr .. page_addr + kPageSize[ + // This only works if the object is in fact in a page. See also MemoryChunk:: + // FromAddress() and FromAnyAddress(). + INLINE(static Page* FromAddress(Address a)) { + return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); + } + + // Returns the page containing an allocation top. Because an allocation + // top address can be the upper bound of the page, we need to subtract + // it with kPointerSize first. The address ranges from + // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. + INLINE(static Page* FromAllocationTop(Address top)) { + Page* p = FromAddress(top - kPointerSize); + return p; + } + + // Returns the next page in the chain of pages owned by a space. + inline Page* next_page(); + inline Page* prev_page(); + inline void set_next_page(Page* page); + inline void set_prev_page(Page* page); + + // Checks whether an address is page aligned. + static bool IsAlignedToPageSize(Address a) { + return 0 == (OffsetFrom(a) & kPageAlignmentMask); + } + + // Returns the offset of a given address to this page. + INLINE(int Offset(Address a)) { + int offset = static_cast<int>(a - address()); + return offset; + } + + // Returns the address for a given offset to the this page. + Address OffsetToAddress(int offset) { + DCHECK_PAGE_OFFSET(offset); + return address() + offset; + } + + // --------------------------------------------------------------------- + + // Page size in bytes. This must be a multiple of the OS page size. + static const int kPageSize = 1 << kPageSizeBits; + + // Maximum object size that fits in a page. Objects larger than that size + // are allocated in large object space and are never moved in memory. This + // also applies to new space allocation, since objects are never migrated + // from new space to large object space. Takes double alignment into account. + static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset; + + // Page size mask. + static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; + + inline void ClearGCFields(); + + static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, + Executability executable, PagedSpace* owner); + + void InitializeAsAnchor(PagedSpace* owner); + + bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } + bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } + bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } + + void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } + void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } + + void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } + void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } + + void ResetFreeListStatistics(); + +#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ + type name() { return name##_; } \ + void set_##name(type name) { name##_ = name; } \ + void add_##name(type name) { name##_ += name; } + + FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) + FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) + FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) + FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) + FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) + +#undef FRAGMENTATION_STATS_ACCESSORS + +#ifdef DEBUG + void Print(); +#endif // DEBUG + + friend class MemoryAllocator; +}; + + +STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); + + +class LargePage : public MemoryChunk { + public: + HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } + + inline LargePage* next_page() const { + return static_cast<LargePage*>(next_chunk()); + } + + inline void set_next_page(LargePage* page) { set_next_chunk(page); } + + private: + static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); + + friend class MemoryAllocator; +}; + +STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); + +// ---------------------------------------------------------------------------- +// Space is the abstract superclass for all allocation spaces. +class Space : public Malloced { + public: + Space(Heap* heap, AllocationSpace id, Executability executable) + : heap_(heap), id_(id), executable_(executable) {} + + virtual ~Space() {} + + Heap* heap() const { return heap_; } + + // Does the space need executable memory? + Executability executable() { return executable_; } + + // Identity used in error reporting. + AllocationSpace identity() { return id_; } + + // Returns allocated size. + virtual intptr_t Size() = 0; + + // Returns size of objects. Can differ from the allocated size + // (e.g. see LargeObjectSpace). + virtual intptr_t SizeOfObjects() { return Size(); } + + virtual int RoundSizeDownToObjectAlignment(int size) { + if (id_ == CODE_SPACE) { + return RoundDown(size, kCodeAlignment); + } else { + return RoundDown(size, kPointerSize); + } + } + +#ifdef DEBUG + virtual void Print() = 0; +#endif + + private: + Heap* heap_; + AllocationSpace id_; + Executability executable_; +}; + + +// ---------------------------------------------------------------------------- +// All heap objects containing executable code (code objects) must be allocated +// from a 2 GB range of memory, so that they can call each other using 32-bit +// displacements. This happens automatically on 32-bit platforms, where 32-bit +// displacements cover the entire 4GB virtual address space. On 64-bit +// platforms, we support this using the CodeRange object, which reserves and +// manages a range of virtual memory. +class CodeRange { + public: + explicit CodeRange(Isolate* isolate); + ~CodeRange() { TearDown(); } + + // Reserves a range of virtual memory, but does not commit any of it. + // Can only be called once, at heap initialization time. + // Returns false on failure. + bool SetUp(size_t requested_size); + + // Frees the range of virtual memory, and frees the data structures used to + // manage it. + void TearDown(); + + bool valid() { return code_range_ != NULL; } + Address start() { + DCHECK(valid()); + return static_cast<Address>(code_range_->address()); + } + bool contains(Address address) { + if (!valid()) return false; + Address start = static_cast<Address>(code_range_->address()); + return start <= address && address < start + code_range_->size(); + } + + // Allocates a chunk of memory from the large-object portion of + // the code range. On platforms with no separate code range, should + // not be called. + MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, + const size_t commit_size, + size_t* allocated); + bool CommitRawMemory(Address start, size_t length); + bool UncommitRawMemory(Address start, size_t length); + void FreeRawMemory(Address buf, size_t length); + + private: + Isolate* isolate_; + + // The reserved range of virtual memory that all code objects are put in. + base::VirtualMemory* code_range_; + // Plain old data class, just a struct plus a constructor. + class FreeBlock { + public: + FreeBlock(Address start_arg, size_t size_arg) + : start(start_arg), size(size_arg) { + DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); + DCHECK(size >= static_cast<size_t>(Page::kPageSize)); + } + FreeBlock(void* start_arg, size_t size_arg) + : start(static_cast<Address>(start_arg)), size(size_arg) { + DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); + DCHECK(size >= static_cast<size_t>(Page::kPageSize)); + } + + Address start; + size_t size; + }; + + // Freed blocks of memory are added to the free list. When the allocation + // list is exhausted, the free list is sorted and merged to make the new + // allocation list. + List<FreeBlock> free_list_; + // Memory is allocated from the free blocks on the allocation list. + // The block at current_allocation_block_index_ is the current block. + List<FreeBlock> allocation_list_; + int current_allocation_block_index_; + + // Finds a block on the allocation list that contains at least the + // requested amount of memory. If none is found, sorts and merges + // the existing free memory blocks, and searches again. + // If none can be found, returns false. + bool GetNextAllocationBlock(size_t requested); + // Compares the start addresses of two free blocks. + static int CompareFreeBlockAddress(const FreeBlock* left, + const FreeBlock* right); + + DISALLOW_COPY_AND_ASSIGN(CodeRange); +}; + + +class SkipList { + public: + SkipList() { Clear(); } + + void Clear() { + for (int idx = 0; idx < kSize; idx++) { + starts_[idx] = reinterpret_cast<Address>(-1); + } + } + + Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; } + + void AddObject(Address addr, int size) { + int start_region = RegionNumber(addr); + int end_region = RegionNumber(addr + size - kPointerSize); + for (int idx = start_region; idx <= end_region; idx++) { + if (starts_[idx] > addr) starts_[idx] = addr; + } + } + + static inline int RegionNumber(Address addr) { + return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2; + } + + static void Update(Address addr, int size) { + Page* page = Page::FromAddress(addr); + SkipList* list = page->skip_list(); + if (list == NULL) { + list = new SkipList(); + page->set_skip_list(list); + } + + list->AddObject(addr, size); + } + + private: + static const int kRegionSizeLog2 = 13; + static const int kRegionSize = 1 << kRegionSizeLog2; + static const int kSize = Page::kPageSize / kRegionSize; + + STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); + + Address starts_[kSize]; +}; + + +// ---------------------------------------------------------------------------- +// A space acquires chunks of memory from the operating system. The memory +// allocator allocated and deallocates pages for the paged heap spaces and large +// pages for large object space. +// +// Each space has to manage it's own pages. +// +class MemoryAllocator { + public: + explicit MemoryAllocator(Isolate* isolate); + + // Initializes its internal bookkeeping structures. + // Max capacity of the total space and executable memory limit. + bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); + + void TearDown(); + + Page* AllocatePage(intptr_t size, PagedSpace* owner, + Executability executable); + + LargePage* AllocateLargePage(intptr_t object_size, Space* owner, + Executability executable); + + void Free(MemoryChunk* chunk); + + // Returns the maximum available bytes of heaps. + intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } + + // Returns allocated spaces in bytes. + intptr_t Size() { return size_; } + + // Returns the maximum available executable bytes of heaps. + intptr_t AvailableExecutable() { + if (capacity_executable_ < size_executable_) return 0; + return capacity_executable_ - size_executable_; + } + + // Returns allocated executable spaces in bytes. + intptr_t SizeExecutable() { return size_executable_; } + + // Returns maximum available bytes that the old space can have. + intptr_t MaxAvailable() { + return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; + } + + // Returns an indication of whether a pointer is in a space that has + // been allocated by this MemoryAllocator. + V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { + return address < lowest_ever_allocated_ || + address >= highest_ever_allocated_; + } + +#ifdef DEBUG + // Reports statistic info of the space. + void ReportStatistics(); +#endif + + // Returns a MemoryChunk in which the memory region from commit_area_size to + // reserve_area_size of the chunk area is reserved but not committed, it + // could be committed later by calling MemoryChunk::CommitArea. + MemoryChunk* AllocateChunk(intptr_t reserve_area_size, + intptr_t commit_area_size, + Executability executable, Space* space); + + Address ReserveAlignedMemory(size_t requested, size_t alignment, + base::VirtualMemory* controller); + Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, + size_t alignment, Executability executable, + base::VirtualMemory* controller); + + bool CommitMemory(Address addr, size_t size, Executability executable); + + void FreeMemory(base::VirtualMemory* reservation, Executability executable); + void FreeMemory(Address addr, size_t size, Executability executable); + + // Commit a contiguous block of memory from the initial chunk. Assumes that + // the address is not NULL, the size is greater than zero, and that the + // block is contained in the initial chunk. Returns true if it succeeded + // and false otherwise. + bool CommitBlock(Address start, size_t size, Executability executable); + + // Uncommit a contiguous block of memory [start..(start+size)[. + // start is not NULL, the size is greater than zero, and the + // block is contained in the initial chunk. Returns true if it succeeded + // and false otherwise. + bool UncommitBlock(Address start, size_t size); + + // Zaps a contiguous block of memory [start..(start+size)[ thus + // filling it up with a recognizable non-NULL bit pattern. + void ZapBlock(Address start, size_t size); + + void PerformAllocationCallback(ObjectSpace space, AllocationAction action, + size_t size); + + void AddMemoryAllocationCallback(MemoryAllocationCallback callback, + ObjectSpace space, AllocationAction action); + + void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); + + bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback); + + static int CodePageGuardStartOffset(); + + static int CodePageGuardSize(); + + static int CodePageAreaStartOffset(); + + static int CodePageAreaEndOffset(); + + static int CodePageAreaSize() { + return CodePageAreaEndOffset() - CodePageAreaStartOffset(); + } + + MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, + Address start, size_t commit_size, + size_t reserved_size); + + private: + Isolate* isolate_; + + // Maximum space size in bytes. + size_t capacity_; + // Maximum subset of capacity_ that can be executable + size_t capacity_executable_; + + // Allocated space size in bytes. + size_t size_; + // Allocated executable space size in bytes. + size_t size_executable_; + + // We keep the lowest and highest addresses allocated as a quick way + // of determining that pointers are outside the heap. The estimate is + // conservative, i.e. not all addrsses in 'allocated' space are allocated + // to our heap. The range is [lowest, highest[, inclusive on the low end + // and exclusive on the high end. + void* lowest_ever_allocated_; + void* highest_ever_allocated_; + + struct MemoryAllocationCallbackRegistration { + MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, + ObjectSpace space, + AllocationAction action) + : callback(callback), space(space), action(action) {} + MemoryAllocationCallback callback; + ObjectSpace space; + AllocationAction action; + }; + + // A List of callback that are triggered when memory is allocated or free'd + List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_; + + // Initializes pages in a chunk. Returns the first page address. + // This function and GetChunkId() are provided for the mark-compact + // collector to rebuild page headers in the from space, which is + // used as a marking stack and its page headers are destroyed. + Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, + PagedSpace* owner); + + void UpdateAllocatedSpaceLimits(void* low, void* high) { + lowest_ever_allocated_ = Min(lowest_ever_allocated_, low); + highest_ever_allocated_ = Max(highest_ever_allocated_, high); + } + + DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); +}; + + +// ----------------------------------------------------------------------------- +// Interface for heap object iterator to be implemented by all object space +// object iterators. +// +// NOTE: The space specific object iterators also implements the own next() +// method which is used to avoid using virtual functions +// iterating a specific space. + +class ObjectIterator : public Malloced { + public: + virtual ~ObjectIterator() {} + + virtual HeapObject* next_object() = 0; +}; + + +// ----------------------------------------------------------------------------- +// Heap object iterator in new/old/map spaces. +// +// A HeapObjectIterator iterates objects from the bottom of the given space +// to its top or from the bottom of the given page to its top. +// +// If objects are allocated in the page during iteration the iterator may +// or may not iterate over those objects. The caller must create a new +// iterator in order to be sure to visit these new objects. +class HeapObjectIterator : public ObjectIterator { + public: + // Creates a new object iterator in a given space. + // If the size function is not given, the iterator calls the default + // Object::Size(). + explicit HeapObjectIterator(PagedSpace* space); + HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); + HeapObjectIterator(Page* page, HeapObjectCallback size_func); + + // Advance to the next object, skipping free spaces and other fillers and + // skipping the special garbage section of which there is one per space. + // Returns NULL when the iteration has ended. + inline HeapObject* Next() { + do { + HeapObject* next_obj = FromCurrentPage(); + if (next_obj != NULL) return next_obj; + } while (AdvanceToNextPage()); + return NULL; + } + + virtual HeapObject* next_object() { return Next(); } + + private: + enum PageMode { kOnePageOnly, kAllPagesInSpace }; + + Address cur_addr_; // Current iteration point. + Address cur_end_; // End iteration point. + HeapObjectCallback size_func_; // Size function or NULL. + PagedSpace* space_; + PageMode page_mode_; + + // Fast (inlined) path of next(). + inline HeapObject* FromCurrentPage(); + + // Slow path of next(), goes into the next page. Returns false if the + // iteration has ended. + bool AdvanceToNextPage(); + + // Initializes fields. + inline void Initialize(PagedSpace* owner, Address start, Address end, + PageMode mode, HeapObjectCallback size_func); +}; + + +// ----------------------------------------------------------------------------- +// A PageIterator iterates the pages in a paged space. + +class PageIterator BASE_EMBEDDED { + public: + explicit inline PageIterator(PagedSpace* space); + + inline bool has_next(); + inline Page* next(); + + private: + PagedSpace* space_; + Page* prev_page_; // Previous page returned. + // Next page that will be returned. Cached here so that we can use this + // iterator for operations that deallocate pages. + Page* next_page_; +}; + + +// ----------------------------------------------------------------------------- +// A space has a circular list of pages. The next page can be accessed via +// Page::next_page() call. + +// An abstraction of allocation and relocation pointers in a page-structured +// space. +class AllocationInfo { + public: + AllocationInfo() : top_(NULL), limit_(NULL) {} + + INLINE(void set_top(Address top)) { + SLOW_DCHECK(top == NULL || + (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0); + top_ = top; + } + + INLINE(Address top()) const { + SLOW_DCHECK(top_ == NULL || + (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0); + return top_; + } + + Address* top_address() { return &top_; } + + INLINE(void set_limit(Address limit)) { + SLOW_DCHECK(limit == NULL || + (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0); + limit_ = limit; + } + + INLINE(Address limit()) const { + SLOW_DCHECK(limit_ == NULL || + (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == + 0); + return limit_; + } + + Address* limit_address() { return &limit_; } + +#ifdef DEBUG + bool VerifyPagedAllocation() { + return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) && + (top_ <= limit_); + } +#endif + + private: + // Current allocation top. + Address top_; + // Current allocation limit. + Address limit_; +}; + + +// An abstraction of the accounting statistics of a page-structured space. +// The 'capacity' of a space is the number of object-area bytes (i.e., not +// including page bookkeeping structures) currently in the space. The 'size' +// of a space is the number of allocated bytes, the 'waste' in the space is +// the number of bytes that are not allocated and not available to +// allocation without reorganizing the space via a GC (e.g. small blocks due +// to internal fragmentation, top of page areas in map space), and the bytes +// 'available' is the number of unallocated bytes that are not waste. The +// capacity is the sum of size, waste, and available. +// +// The stats are only set by functions that ensure they stay balanced. These +// functions increase or decrease one of the non-capacity stats in +// conjunction with capacity, or else they always balance increases and +// decreases to the non-capacity stats. +class AllocationStats BASE_EMBEDDED { + public: + AllocationStats() { Clear(); } + + // Zero out all the allocation statistics (i.e., no capacity). + void Clear() { + capacity_ = 0; + max_capacity_ = 0; + size_ = 0; + waste_ = 0; + } + + void ClearSizeWaste() { + size_ = capacity_; + waste_ = 0; + } + + // Reset the allocation statistics (i.e., available = capacity with no + // wasted or allocated bytes). + void Reset() { + size_ = 0; + waste_ = 0; + } + + // Accessors for the allocation statistics. + intptr_t Capacity() { return capacity_; } + intptr_t MaxCapacity() { return max_capacity_; } + intptr_t Size() { return size_; } + intptr_t Waste() { return waste_; } + + // Grow the space by adding available bytes. They are initially marked as + // being in use (part of the size), but will normally be immediately freed, + // putting them on the free list and removing them from size_. + void ExpandSpace(int size_in_bytes) { + capacity_ += size_in_bytes; + size_ += size_in_bytes; + if (capacity_ > max_capacity_) { + max_capacity_ = capacity_; + } + DCHECK(size_ >= 0); + } + + // Shrink the space by removing available bytes. Since shrinking is done + // during sweeping, bytes have been marked as being in use (part of the size) + // and are hereby freed. + void ShrinkSpace(int size_in_bytes) { + capacity_ -= size_in_bytes; + size_ -= size_in_bytes; + DCHECK(size_ >= 0); + } + + // Allocate from available bytes (available -> size). + void AllocateBytes(intptr_t size_in_bytes) { + size_ += size_in_bytes; + DCHECK(size_ >= 0); + } + + // Free allocated bytes, making them available (size -> available). + void DeallocateBytes(intptr_t size_in_bytes) { + size_ -= size_in_bytes; + DCHECK(size_ >= 0); + } + + // Waste free bytes (available -> waste). + void WasteBytes(int size_in_bytes) { + DCHECK(size_in_bytes >= 0); + waste_ += size_in_bytes; + } + + private: + intptr_t capacity_; + intptr_t max_capacity_; + intptr_t size_; + intptr_t waste_; +}; + + +// ----------------------------------------------------------------------------- +// Free lists for old object spaces +// +// Free-list nodes are free blocks in the heap. They look like heap objects +// (free-list node pointers have the heap object tag, and they have a map like +// a heap object). They have a size and a next pointer. The next pointer is +// the raw address of the next free list node (or NULL). +class FreeListNode : public HeapObject { + public: + // Obtain a free-list node from a raw address. This is not a cast because + // it does not check nor require that the first word at the address is a map + // pointer. + static FreeListNode* FromAddress(Address address) { + return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); + } + + static inline bool IsFreeListNode(HeapObject* object); + + // Set the size in bytes, which can be read with HeapObject::Size(). This + // function also writes a map to the first word of the block so that it + // looks like a heap object to the garbage collector and heap iteration + // functions. + void set_size(Heap* heap, int size_in_bytes); + + // Accessors for the next field. + inline FreeListNode* next(); + inline FreeListNode** next_address(); + inline void set_next(FreeListNode* next); + + inline void Zap(); + + static inline FreeListNode* cast(Object* object) { + return reinterpret_cast<FreeListNode*>(object); + } + + private: + static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); + + DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); +}; + + +// The free list category holds a pointer to the top element and a pointer to +// the end element of the linked list of free memory blocks. +class FreeListCategory { + public: + FreeListCategory() : top_(0), end_(NULL), available_(0) {} + + intptr_t Concatenate(FreeListCategory* category); + + void Reset(); + + void Free(FreeListNode* node, int size_in_bytes); + + FreeListNode* PickNodeFromList(int* node_size); + FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size); + + intptr_t EvictFreeListItemsInList(Page* p); + bool ContainsPageFreeListItemsInList(Page* p); + + void RepairFreeList(Heap* heap); + + FreeListNode* top() const { + return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_)); + } + + void set_top(FreeListNode* top) { + base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top)); + } + + FreeListNode** GetEndAddress() { return &end_; } + FreeListNode* end() const { return end_; } + void set_end(FreeListNode* end) { end_ = end; } + + int* GetAvailableAddress() { return &available_; } + int available() const { return available_; } + void set_available(int available) { available_ = available; } + + base::Mutex* mutex() { return &mutex_; } + + bool IsEmpty() { return top() == 0; } + +#ifdef DEBUG + intptr_t SumFreeList(); + int FreeListLength(); +#endif + + private: + // top_ points to the top FreeListNode* in the free list category. + base::AtomicWord top_; + FreeListNode* end_; + base::Mutex mutex_; + + // Total available bytes in all blocks of this free list category. + int available_; +}; + + +// The free list for the old space. The free list is organized in such a way +// as to encourage objects allocated around the same time to be near each +// other. The normal way to allocate is intended to be by bumping a 'top' +// pointer until it hits a 'limit' pointer. When the limit is hit we need to +// find a new space to allocate from. This is done with the free list, which +// is divided up into rough categories to cut down on waste. Having finer +// categories would scatter allocation more. + +// The old space free list is organized in categories. +// 1-31 words: Such small free areas are discarded for efficiency reasons. +// They can be reclaimed by the compactor. However the distance between top +// and limit may be this small. +// 32-255 words: There is a list of spaces this large. It is used for top and +// limit when the object we need to allocate is 1-31 words in size. These +// spaces are called small. +// 256-2047 words: There is a list of spaces this large. It is used for top and +// limit when the object we need to allocate is 32-255 words in size. These +// spaces are called medium. +// 1048-16383 words: There is a list of spaces this large. It is used for top +// and limit when the object we need to allocate is 256-2047 words in size. +// These spaces are call large. +// At least 16384 words. This list is for objects of 2048 words or larger. +// Empty pages are added to this list. These spaces are called huge. +class FreeList { + public: + explicit FreeList(PagedSpace* owner); + + intptr_t Concatenate(FreeList* free_list); + + // Clear the free list. + void Reset(); + + // Return the number of bytes available on the free list. + intptr_t available() { + return small_list_.available() + medium_list_.available() + + large_list_.available() + huge_list_.available(); + } + + // Place a node on the free list. The block of size 'size_in_bytes' + // starting at 'start' is placed on the free list. The return value is the + // number of bytes that have been lost due to internal fragmentation by + // freeing the block. Bookkeeping information will be written to the block, + // i.e., its contents will be destroyed. The start address should be word + // aligned, and the size should be a non-zero multiple of the word size. + int Free(Address start, int size_in_bytes); + + // This method returns how much memory can be allocated after freeing + // maximum_freed memory. + static inline int GuaranteedAllocatable(int maximum_freed) { + if (maximum_freed < kSmallListMin) { + return 0; + } else if (maximum_freed <= kSmallListMax) { + return kSmallAllocationMax; + } else if (maximum_freed <= kMediumListMax) { + return kMediumAllocationMax; + } else if (maximum_freed <= kLargeListMax) { + return kLargeAllocationMax; + } + return maximum_freed; + } + + // Allocate a block of size 'size_in_bytes' from the free list. The block + // is unitialized. A failure is returned if no block is available. The + // number of bytes lost to fragmentation is returned in the output parameter + // 'wasted_bytes'. The size should be a non-zero multiple of the word size. + MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); + + bool IsEmpty() { + return small_list_.IsEmpty() && medium_list_.IsEmpty() && + large_list_.IsEmpty() && huge_list_.IsEmpty(); + } + +#ifdef DEBUG + void Zap(); + intptr_t SumFreeLists(); + bool IsVeryLong(); +#endif + + // Used after booting the VM. + void RepairLists(Heap* heap); + + intptr_t EvictFreeListItems(Page* p); + bool ContainsPageFreeListItems(Page* p); + + FreeListCategory* small_list() { return &small_list_; } + FreeListCategory* medium_list() { return &medium_list_; } + FreeListCategory* large_list() { return &large_list_; } + FreeListCategory* huge_list() { return &huge_list_; } + + private: + // The size range of blocks, in bytes. + static const int kMinBlockSize = 3 * kPointerSize; + static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize; + + FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); + + PagedSpace* owner_; + Heap* heap_; + + static const int kSmallListMin = 0x20 * kPointerSize; + static const int kSmallListMax = 0xff * kPointerSize; + static const int kMediumListMax = 0x7ff * kPointerSize; + static const int kLargeListMax = 0x3fff * kPointerSize; + static const int kSmallAllocationMax = kSmallListMin - kPointerSize; + static const int kMediumAllocationMax = kSmallListMax; + static const int kLargeAllocationMax = kMediumListMax; + FreeListCategory small_list_; + FreeListCategory medium_list_; + FreeListCategory large_list_; + FreeListCategory huge_list_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); +}; + + +class AllocationResult { + public: + // Implicit constructor from Object*. + AllocationResult(Object* object) // NOLINT + : object_(object), + retry_space_(INVALID_SPACE) {} + + AllocationResult() : object_(NULL), retry_space_(INVALID_SPACE) {} + + static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) { + return AllocationResult(space); + } + + inline bool IsRetry() { return retry_space_ != INVALID_SPACE; } + + template <typename T> + bool To(T** obj) { + if (IsRetry()) return false; + *obj = T::cast(object_); + return true; + } + + Object* ToObjectChecked() { + CHECK(!IsRetry()); + return object_; + } + + AllocationSpace RetrySpace() { + DCHECK(IsRetry()); + return retry_space_; + } + + private: + explicit AllocationResult(AllocationSpace space) + : object_(NULL), retry_space_(space) {} + + Object* object_; + AllocationSpace retry_space_; +}; + + +class PagedSpace : public Space { + public: + // Creates a space with a maximum capacity, and an id. + PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, + Executability executable); + + virtual ~PagedSpace() {} + + // Set up the space using the given address range of virtual memory (from + // the memory allocator's initial chunk) if possible. If the block of + // addresses is not big enough to contain a single page-aligned page, a + // fresh chunk will be allocated. + bool SetUp(); + + // Returns true if the space has been successfully set up and not + // subsequently torn down. + bool HasBeenSetUp(); + + // Cleans up the space, frees all pages in this space except those belonging + // to the initial chunk, uncommits addresses in the initial chunk. + void TearDown(); + + // Checks whether an object/address is in this space. + inline bool Contains(Address a); + bool Contains(HeapObject* o) { return Contains(o->address()); } + + // Given an address occupied by a live object, return that object if it is + // in this space, or a Smi if it is not. The implementation iterates over + // objects in the page containing the address, the cost is linear in the + // number of objects in the page. It may be slow. + Object* FindObject(Address addr); + + // During boot the free_space_map is created, and afterwards we may need + // to write it into the free list nodes that were already created. + void RepairFreeListsAfterBoot(); + + // Prepares for a mark-compact GC. + void PrepareForMarkCompact(); + + // Current capacity without growing (Size() + Available()). + intptr_t Capacity() { return accounting_stats_.Capacity(); } + + // Total amount of memory committed for this space. For paged + // spaces this equals the capacity. + intptr_t CommittedMemory() { return Capacity(); } + + // The maximum amount of memory ever committed for this space. + intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); } + + // Approximate amount of physical memory committed for this space. + size_t CommittedPhysicalMemory(); + + struct SizeStats { + intptr_t Total() { + return small_size_ + medium_size_ + large_size_ + huge_size_; + } + + intptr_t small_size_; + intptr_t medium_size_; + intptr_t large_size_; + intptr_t huge_size_; + }; + + void ObtainFreeListStatistics(Page* p, SizeStats* sizes); + void ResetFreeListStatistics(); + + // Sets the capacity, the available space and the wasted space to zero. + // The stats are rebuilt during sweeping by adding each page to the + // capacity and the size when it is encountered. As free spaces are + // discovered during the sweeping they are subtracted from the size and added + // to the available and wasted totals. + void ClearStats() { + accounting_stats_.ClearSizeWaste(); + ResetFreeListStatistics(); + } + + // Increases the number of available bytes of that space. + void AddToAccountingStats(intptr_t bytes) { + accounting_stats_.DeallocateBytes(bytes); + } + + // Available bytes without growing. These are the bytes on the free list. + // The bytes in the linear allocation area are not included in this total + // because updating the stats would slow down allocation. New pages are + // immediately added to the free list so they show up here. + intptr_t Available() { return free_list_.available(); } + + // Allocated bytes in this space. Garbage bytes that were not found due to + // concurrent sweeping are counted as being allocated! The bytes in the + // current linear allocation area (between top and limit) are also counted + // here. + virtual intptr_t Size() { return accounting_stats_.Size(); } + + // As size, but the bytes in lazily swept pages are estimated and the bytes + // in the current linear allocation area are not included. + virtual intptr_t SizeOfObjects(); + + // Wasted bytes in this space. These are just the bytes that were thrown away + // due to being too small to use for allocation. They do not include the + // free bytes that were not found at all due to lazy sweeping. + virtual intptr_t Waste() { return accounting_stats_.Waste(); } + + // Returns the allocation pointer in this space. + Address top() { return allocation_info_.top(); } + Address limit() { return allocation_info_.limit(); } + + // The allocation top address. + Address* allocation_top_address() { return allocation_info_.top_address(); } + + // The allocation limit address. + Address* allocation_limit_address() { + return allocation_info_.limit_address(); + } + + // Allocate the requested number of bytes in the space if possible, return a + // failure object if not. + MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes); + + // Give a block of memory to the space's free list. It might be added to + // the free list or accounted as waste. + // If add_to_freelist is false then just accounting stats are updated and + // no attempt to add area to free list is made. + int Free(Address start, int size_in_bytes) { + int wasted = free_list_.Free(start, size_in_bytes); + accounting_stats_.DeallocateBytes(size_in_bytes); + accounting_stats_.WasteBytes(wasted); + return size_in_bytes - wasted; + } + + void ResetFreeList() { free_list_.Reset(); } + + // Set space allocation info. + void SetTopAndLimit(Address top, Address limit) { + DCHECK(top == limit || + Page::FromAddress(top) == Page::FromAddress(limit - 1)); + MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); + allocation_info_.set_top(top); + allocation_info_.set_limit(limit); + } + + // Empty space allocation info, returning unused area to free list. + void EmptyAllocationInfo() { + // Mark the old linear allocation area with a free space map so it can be + // skipped when scanning the heap. + int old_linear_size = static_cast<int>(limit() - top()); + Free(top(), old_linear_size); + SetTopAndLimit(NULL, NULL); + } + + void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } + + void IncreaseCapacity(int size); + + // Releases an unused page and shrinks the space. + void ReleasePage(Page* page); + + // The dummy page that anchors the linked list of pages. + Page* anchor() { return &anchor_; } + +#ifdef VERIFY_HEAP + // Verify integrity of this space. + virtual void Verify(ObjectVisitor* visitor); + + // Overridden by subclasses to verify space-specific object + // properties (e.g., only maps or free-list nodes are in map space). + virtual void VerifyObject(HeapObject* obj) {} +#endif + +#ifdef DEBUG + // Print meta info and objects in this space. + virtual void Print(); + + // Reports statistics for the space + void ReportStatistics(); + + // Report code object related statistics + void CollectCodeStatistics(); + static void ReportCodeStatistics(Isolate* isolate); + static void ResetCodeStatistics(Isolate* isolate); +#endif + + bool swept_precisely() { return swept_precisely_; } + void set_swept_precisely(bool b) { swept_precisely_ = b; } + + // Evacuation candidates are swept by evacuator. Needs to return a valid + // result before _and_ after evacuation has finished. + static bool ShouldBeSweptBySweeperThreads(Page* p) { + return !p->IsEvacuationCandidate() && + !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSweptPrecisely(); + } + + void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; } + + void IncreaseUnsweptFreeBytes(Page* p) { + DCHECK(ShouldBeSweptBySweeperThreads(p)); + unswept_free_bytes_ += (p->area_size() - p->LiveBytes()); + } + + void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; } + + void DecreaseUnsweptFreeBytes(Page* p) { + DCHECK(ShouldBeSweptBySweeperThreads(p)); + unswept_free_bytes_ -= (p->area_size() - p->LiveBytes()); + } + + void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; } + + // This function tries to steal size_in_bytes memory from the sweeper threads + // free-lists. If it does not succeed stealing enough memory, it will wait + // for the sweeper threads to finish sweeping. + // It returns true when sweeping is completed and false otherwise. + bool EnsureSweeperProgress(intptr_t size_in_bytes); + + void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; } + + Page* end_of_unswept_pages() { return end_of_unswept_pages_; } + + Page* FirstPage() { return anchor_.next_page(); } + Page* LastPage() { return anchor_.prev_page(); } + + void EvictEvacuationCandidatesFromFreeLists(); + + bool CanExpand(); + + // Returns the number of total pages in this space. + int CountTotalPages(); + + // Return size of allocatable area on a page in this space. + inline int AreaSize() { return area_size_; } + + void CreateEmergencyMemory(); + void FreeEmergencyMemory(); + void UseEmergencyMemory(); + + bool HasEmergencyMemory() { return emergency_memory_ != NULL; } + + protected: + FreeList* free_list() { return &free_list_; } + + int area_size_; + + // Maximum capacity of this space. + intptr_t max_capacity_; + + intptr_t SizeOfFirstPage(); + + // Accounting information for this space. + AllocationStats accounting_stats_; + + // The dummy page that anchors the double linked list of pages. + Page anchor_; + + // The space's free list. + FreeList free_list_; + + // Normal allocation information. + AllocationInfo allocation_info_; + + // This space was swept precisely, hence it is iterable. + bool swept_precisely_; + + // The number of free bytes which could be reclaimed by advancing the + // concurrent sweeper threads. This is only an estimation because concurrent + // sweeping is done conservatively. + intptr_t unswept_free_bytes_; + + // The sweeper threads iterate over the list of pointer and data space pages + // and sweep these pages concurrently. They will stop sweeping after the + // end_of_unswept_pages_ page. + Page* end_of_unswept_pages_; + + // Emergency memory is the memory of a full page for a given space, allocated + // conservatively before evacuating a page. If compaction fails due to out + // of memory error the emergency memory can be used to complete compaction. + // If not used, the emergency memory is released after compaction. + MemoryChunk* emergency_memory_; + + // Expands the space by allocating a fixed number of pages. Returns false if + // it cannot allocate requested number of pages from OS, or if the hard heap + // size limit has been hit. + bool Expand(); + + // Generic fast case allocation function that tries linear allocation at the + // address denoted by top in allocation_info_. + inline HeapObject* AllocateLinearly(int size_in_bytes); + + // If sweeping is still in progress try to sweep unswept pages. If that is + // not successful, wait for the sweeper threads and re-try free-list + // allocation. + MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation( + int size_in_bytes); + + // Slow path of AllocateRaw. This function is space-dependent. + MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); + + friend class PageIterator; + friend class MarkCompactCollector; +}; + + +class NumberAndSizeInfo BASE_EMBEDDED { + public: + NumberAndSizeInfo() : number_(0), bytes_(0) {} + + int number() const { return number_; } + void increment_number(int num) { number_ += num; } + + int bytes() const { return bytes_; } + void increment_bytes(int size) { bytes_ += size; } + + void clear() { + number_ = 0; + bytes_ = 0; + } + + private: + int number_; + int bytes_; +}; + + +// HistogramInfo class for recording a single "bar" of a histogram. This +// class is used for collecting statistics to print to the log file. +class HistogramInfo : public NumberAndSizeInfo { + public: + HistogramInfo() : NumberAndSizeInfo() {} + + const char* name() { return name_; } + void set_name(const char* name) { name_ = name; } + + private: + const char* name_; +}; + + +enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 }; + + +class SemiSpace; + + +class NewSpacePage : public MemoryChunk { + public: + // GC related flags copied from from-space to to-space when + // flipping semispaces. + static const intptr_t kCopyOnFlipFlagsMask = + (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | + (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | + (1 << MemoryChunk::SCAN_ON_SCAVENGE); + + static const int kAreaSize = Page::kMaxRegularHeapObjectSize; + + inline NewSpacePage* next_page() const { + return static_cast<NewSpacePage*>(next_chunk()); + } + + inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); } + + inline NewSpacePage* prev_page() const { + return static_cast<NewSpacePage*>(prev_chunk()); + } + + inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); } + + SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); } + + bool is_anchor() { return !this->InNewSpace(); } + + static bool IsAtStart(Address addr) { + return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == + kObjectStartOffset; + } + + static bool IsAtEnd(Address addr) { + return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; + } + + Address address() { return reinterpret_cast<Address>(this); } + + // Finds the NewSpacePage containg the given address. + static inline NewSpacePage* FromAddress(Address address_in_page) { + Address page_start = + reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & + ~Page::kPageAlignmentMask); + NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); + return page; + } + + // Find the page for a limit address. A limit address is either an address + // inside a page, or the address right after the last byte of a page. + static inline NewSpacePage* FromLimit(Address address_limit) { + return NewSpacePage::FromAddress(address_limit - 1); + } + + // Checks if address1 and address2 are on the same new space page. + static inline bool OnSamePage(Address address1, Address address2) { + return NewSpacePage::FromAddress(address1) == + NewSpacePage::FromAddress(address2); + } + + private: + // Create a NewSpacePage object that is only used as anchor + // for the doubly-linked list of real pages. + explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); } + + static NewSpacePage* Initialize(Heap* heap, Address start, + SemiSpace* semi_space); + + // Intialize a fake NewSpacePage used as sentinel at the ends + // of a doubly-linked list of real NewSpacePages. + // Only uses the prev/next links, and sets flags to not be in new-space. + void InitializeAsAnchor(SemiSpace* owner); + + friend class SemiSpace; + friend class SemiSpaceIterator; +}; + + +// ----------------------------------------------------------------------------- +// SemiSpace in young generation +// +// A semispace is a contiguous chunk of memory holding page-like memory +// chunks. The mark-compact collector uses the memory of the first page in +// the from space as a marking stack when tracing live objects. + +class SemiSpace : public Space { + public: + // Constructor. + SemiSpace(Heap* heap, SemiSpaceId semispace) + : Space(heap, NEW_SPACE, NOT_EXECUTABLE), + start_(NULL), + age_mark_(NULL), + id_(semispace), + anchor_(this), + current_page_(NULL) {} + + // Sets up the semispace using the given chunk. + void SetUp(Address start, int initial_capacity, int maximum_capacity); + + // Tear down the space. Heap memory was not allocated by the space, so it + // is not deallocated here. + void TearDown(); + + // True if the space has been set up but not torn down. + bool HasBeenSetUp() { return start_ != NULL; } + + // Grow the semispace to the new capacity. The new capacity + // requested must be larger than the current capacity and less than + // the maximum capacity. + bool GrowTo(int new_capacity); + + // Shrinks the semispace to the new capacity. The new capacity + // requested must be more than the amount of used memory in the + // semispace and less than the current capacity. + bool ShrinkTo(int new_capacity); + + // Returns the start address of the first page of the space. + Address space_start() { + DCHECK(anchor_.next_page() != &anchor_); + return anchor_.next_page()->area_start(); + } + + // Returns the start address of the current page of the space. + Address page_low() { return current_page_->area_start(); } + + // Returns one past the end address of the space. + Address space_end() { return anchor_.prev_page()->area_end(); } + + // Returns one past the end address of the current page of the space. + Address page_high() { return current_page_->area_end(); } + + bool AdvancePage() { + NewSpacePage* next_page = current_page_->next_page(); + if (next_page == anchor()) return false; + current_page_ = next_page; + return true; + } + + // Resets the space to using the first page. + void Reset(); + + // Age mark accessors. + Address age_mark() { return age_mark_; } + void set_age_mark(Address mark); + + // True if the address is in the address range of this semispace (not + // necessarily below the allocation pointer). + bool Contains(Address a) { + return (reinterpret_cast<uintptr_t>(a) & address_mask_) == + reinterpret_cast<uintptr_t>(start_); + } + + // True if the object is a heap object in the address range of this + // semispace (not necessarily below the allocation pointer). + bool Contains(Object* o) { + return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; + } + + // If we don't have these here then SemiSpace will be abstract. However + // they should never be called. + virtual intptr_t Size() { + UNREACHABLE(); + return 0; + } + + bool is_committed() { return committed_; } + bool Commit(); + bool Uncommit(); + + NewSpacePage* first_page() { return anchor_.next_page(); } + NewSpacePage* current_page() { return current_page_; } + +#ifdef VERIFY_HEAP + virtual void Verify(); +#endif + +#ifdef DEBUG + virtual void Print(); + // Validate a range of of addresses in a SemiSpace. + // The "from" address must be on a page prior to the "to" address, + // in the linked page order, or it must be earlier on the same page. + static void AssertValidRange(Address from, Address to); +#else + // Do nothing. + inline static void AssertValidRange(Address from, Address to) {} +#endif + + // Returns the current capacity of the semi space. + int Capacity() { return capacity_; } + + // Returns the maximum capacity of the semi space. + int MaximumCapacity() { return maximum_capacity_; } + + // Returns the initial capacity of the semi space. + int InitialCapacity() { return initial_capacity_; } + + SemiSpaceId id() { return id_; } + + static void Swap(SemiSpace* from, SemiSpace* to); + + // Returns the maximum amount of memory ever committed by the semi space. + size_t MaximumCommittedMemory() { return maximum_committed_; } + + // Approximate amount of physical memory committed for this space. + size_t CommittedPhysicalMemory(); + + private: + // Flips the semispace between being from-space and to-space. + // Copies the flags into the masked positions on all pages in the space. + void FlipPages(intptr_t flags, intptr_t flag_mask); + + // Updates Capacity and MaximumCommitted based on new capacity. + void SetCapacity(int new_capacity); + + NewSpacePage* anchor() { return &anchor_; } + + // The current and maximum capacity of the space. + int capacity_; + int maximum_capacity_; + int initial_capacity_; + + intptr_t maximum_committed_; + + // The start address of the space. + Address start_; + // Used to govern object promotion during mark-compact collection. + Address age_mark_; + + // Masks and comparison values to test for containment in this semispace. + uintptr_t address_mask_; + uintptr_t object_mask_; + uintptr_t object_expected_; + + bool committed_; + SemiSpaceId id_; + + NewSpacePage anchor_; + NewSpacePage* current_page_; + + friend class SemiSpaceIterator; + friend class NewSpacePageIterator; + + public: + TRACK_MEMORY("SemiSpace") +}; + + +// A SemiSpaceIterator is an ObjectIterator that iterates over the active +// semispace of the heap's new space. It iterates over the objects in the +// semispace from a given start address (defaulting to the bottom of the +// semispace) to the top of the semispace. New objects allocated after the +// iterator is created are not iterated. +class SemiSpaceIterator : public ObjectIterator { + public: + // Create an iterator over the objects in the given space. If no start + // address is given, the iterator starts from the bottom of the space. If + // no size function is given, the iterator calls Object::Size(). + + // Iterate over all of allocated to-space. + explicit SemiSpaceIterator(NewSpace* space); + // Iterate over all of allocated to-space, with a custome size function. + SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func); + // Iterate over part of allocated to-space, from start to the end + // of allocation. + SemiSpaceIterator(NewSpace* space, Address start); + // Iterate from one address to another in the same semi-space. + SemiSpaceIterator(Address from, Address to); + + HeapObject* Next() { + if (current_ == limit_) return NULL; + if (NewSpacePage::IsAtEnd(current_)) { + NewSpacePage* page = NewSpacePage::FromLimit(current_); + page = page->next_page(); + DCHECK(!page->is_anchor()); + current_ = page->area_start(); + if (current_ == limit_) return NULL; + } + + HeapObject* object = HeapObject::FromAddress(current_); + int size = (size_func_ == NULL) ? object->Size() : size_func_(object); + + current_ += size; + return object; + } + + // Implementation of the ObjectIterator functions. + virtual HeapObject* next_object() { return Next(); } + + private: + void Initialize(Address start, Address end, HeapObjectCallback size_func); + + // The current iteration point. + Address current_; + // The end of iteration. + Address limit_; + // The callback function. + HeapObjectCallback size_func_; +}; + + +// ----------------------------------------------------------------------------- +// A PageIterator iterates the pages in a semi-space. +class NewSpacePageIterator BASE_EMBEDDED { + public: + // Make an iterator that runs over all pages in to-space. + explicit inline NewSpacePageIterator(NewSpace* space); + + // Make an iterator that runs over all pages in the given semispace, + // even those not used in allocation. + explicit inline NewSpacePageIterator(SemiSpace* space); + + // Make iterator that iterates from the page containing start + // to the page that contains limit in the same semispace. + inline NewSpacePageIterator(Address start, Address limit); + + inline bool has_next(); + inline NewSpacePage* next(); + + private: + NewSpacePage* prev_page_; // Previous page returned. + // Next page that will be returned. Cached here so that we can use this + // iterator for operations that deallocate pages. + NewSpacePage* next_page_; + // Last page returned. + NewSpacePage* last_page_; +}; + + +// ----------------------------------------------------------------------------- +// The young generation space. +// +// The new space consists of a contiguous pair of semispaces. It simply +// forwards most functions to the appropriate semispace. + +class NewSpace : public Space { + public: + // Constructor. + explicit NewSpace(Heap* heap) + : Space(heap, NEW_SPACE, NOT_EXECUTABLE), + to_space_(heap, kToSpace), + from_space_(heap, kFromSpace), + reservation_(), + inline_allocation_limit_step_(0) {} + + // Sets up the new space using the given chunk. + bool SetUp(int reserved_semispace_size_, int max_semi_space_size); + + // Tears down the space. Heap memory was not allocated by the space, so it + // is not deallocated here. + void TearDown(); + + // True if the space has been set up but not torn down. + bool HasBeenSetUp() { + return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp(); + } + + // Flip the pair of spaces. + void Flip(); + + // Grow the capacity of the semispaces. Assumes that they are not at + // their maximum capacity. + void Grow(); + + // Shrink the capacity of the semispaces. + void Shrink(); + + // True if the address or object lies in the address range of either + // semispace (not necessarily below the allocation pointer). + bool Contains(Address a) { + return (reinterpret_cast<uintptr_t>(a) & address_mask_) == + reinterpret_cast<uintptr_t>(start_); + } + + bool Contains(Object* o) { + Address a = reinterpret_cast<Address>(o); + return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; + } + + // Return the allocated bytes in the active semispace. + virtual intptr_t Size() { + return pages_used_ * NewSpacePage::kAreaSize + + static_cast<int>(top() - to_space_.page_low()); + } + + // The same, but returning an int. We have to have the one that returns + // intptr_t because it is inherited, but if we know we are dealing with the + // new space, which can't get as big as the other spaces then this is useful: + int SizeAsInt() { return static_cast<int>(Size()); } + + // Return the current capacity of a semispace. + intptr_t EffectiveCapacity() { + SLOW_DCHECK(to_space_.Capacity() == from_space_.Capacity()); + return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize; + } + + // Return the current capacity of a semispace. + intptr_t Capacity() { + DCHECK(to_space_.Capacity() == from_space_.Capacity()); + return to_space_.Capacity(); + } + + // Return the total amount of memory committed for new space. + intptr_t CommittedMemory() { + if (from_space_.is_committed()) return 2 * Capacity(); + return Capacity(); + } + + // Return the total amount of memory committed for new space. + intptr_t MaximumCommittedMemory() { + return to_space_.MaximumCommittedMemory() + + from_space_.MaximumCommittedMemory(); + } + + // Approximate amount of physical memory committed for this space. + size_t CommittedPhysicalMemory(); + + // Return the available bytes without growing. + intptr_t Available() { return Capacity() - Size(); } + + // Return the maximum capacity of a semispace. + int MaximumCapacity() { + DCHECK(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); + return to_space_.MaximumCapacity(); + } + + bool IsAtMaximumCapacity() { return Capacity() == MaximumCapacity(); } + + // Returns the initial capacity of a semispace. + int InitialCapacity() { + DCHECK(to_space_.InitialCapacity() == from_space_.InitialCapacity()); + return to_space_.InitialCapacity(); + } + + // Return the address of the allocation pointer in the active semispace. + Address top() { + DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top())); + return allocation_info_.top(); + } + + void set_top(Address top) { + DCHECK(to_space_.current_page()->ContainsLimit(top)); + allocation_info_.set_top(top); + } + + // Return the address of the allocation pointer limit in the active semispace. + Address limit() { + DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit())); + return allocation_info_.limit(); + } + + // Return the address of the first object in the active semispace. + Address bottom() { return to_space_.space_start(); } + + // Get the age mark of the inactive semispace. + Address age_mark() { return from_space_.age_mark(); } + // Set the age mark in the active semispace. + void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } + + // The start address of the space and a bit mask. Anding an address in the + // new space with the mask will result in the start address. + Address start() { return start_; } + uintptr_t mask() { return address_mask_; } + + INLINE(uint32_t AddressToMarkbitIndex(Address addr)) { + DCHECK(Contains(addr)); + DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) || + IsAligned(OffsetFrom(addr) - 1, kPointerSize)); + return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; + } + + INLINE(Address MarkbitIndexToAddress(uint32_t index)) { + return reinterpret_cast<Address>(index << kPointerSizeLog2); + } + + // The allocation top and limit address. + Address* allocation_top_address() { return allocation_info_.top_address(); } + + // The allocation limit address. + Address* allocation_limit_address() { + return allocation_info_.limit_address(); + } + + MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes)); + + // Reset the allocation pointer to the beginning of the active semispace. + void ResetAllocationInfo(); + + void UpdateInlineAllocationLimit(int size_in_bytes); + void LowerInlineAllocationLimit(intptr_t step) { + inline_allocation_limit_step_ = step; + UpdateInlineAllocationLimit(0); + top_on_previous_step_ = allocation_info_.top(); + } + + // Get the extent of the inactive semispace (for use as a marking stack, + // or to zap it). Notice: space-addresses are not necessarily on the + // same page, so FromSpaceStart() might be above FromSpaceEnd(). + Address FromSpacePageLow() { return from_space_.page_low(); } + Address FromSpacePageHigh() { return from_space_.page_high(); } + Address FromSpaceStart() { return from_space_.space_start(); } + Address FromSpaceEnd() { return from_space_.space_end(); } + + // Get the extent of the active semispace's pages' memory. + Address ToSpaceStart() { return to_space_.space_start(); } + Address ToSpaceEnd() { return to_space_.space_end(); } + + inline bool ToSpaceContains(Address address) { + return to_space_.Contains(address); + } + inline bool FromSpaceContains(Address address) { + return from_space_.Contains(address); + } + + // True if the object is a heap object in the address range of the + // respective semispace (not necessarily below the allocation pointer of the + // semispace). + inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } + inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } + + // Try to switch the active semispace to a new, empty, page. + // Returns false if this isn't possible or reasonable (i.e., there + // are no pages, or the current page is already empty), or true + // if successful. + bool AddFreshPage(); + +#ifdef VERIFY_HEAP + // Verify the active semispace. + virtual void Verify(); +#endif + +#ifdef DEBUG + // Print the active semispace. + virtual void Print() { to_space_.Print(); } +#endif + + // Iterates the active semispace to collect statistics. + void CollectStatistics(); + // Reports previously collected statistics of the active semispace. + void ReportStatistics(); + // Clears previously collected statistics. + void ClearHistograms(); + + // Record the allocation or promotion of a heap object. Note that we don't + // record every single allocation, but only those that happen in the + // to space during a scavenge GC. + void RecordAllocation(HeapObject* obj); + void RecordPromotion(HeapObject* obj); + + // Return whether the operation succeded. + bool CommitFromSpaceIfNeeded() { + if (from_space_.is_committed()) return true; + return from_space_.Commit(); + } + + bool UncommitFromSpace() { + if (!from_space_.is_committed()) return true; + return from_space_.Uncommit(); + } + + inline intptr_t inline_allocation_limit_step() { + return inline_allocation_limit_step_; + } + + SemiSpace* active_space() { return &to_space_; } + + private: + // Update allocation info to match the current to-space page. + void UpdateAllocationInfo(); + + Address chunk_base_; + uintptr_t chunk_size_; + + // The semispaces. + SemiSpace to_space_; + SemiSpace from_space_; + base::VirtualMemory reservation_; + int pages_used_; + + // Start address and bit mask for containment testing. + Address start_; + uintptr_t address_mask_; + uintptr_t object_mask_; + uintptr_t object_expected_; + + // Allocation pointer and limit for normal allocation and allocation during + // mark-compact collection. + AllocationInfo allocation_info_; + + // When incremental marking is active we will set allocation_info_.limit + // to be lower than actual limit and then will gradually increase it + // in steps to guarantee that we do incremental marking steps even + // when all allocation is performed from inlined generated code. + intptr_t inline_allocation_limit_step_; + + Address top_on_previous_step_; + + HistogramInfo* allocated_histogram_; + HistogramInfo* promoted_histogram_; + + MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes); + + friend class SemiSpaceIterator; + + public: + TRACK_MEMORY("NewSpace") +}; + + +// ----------------------------------------------------------------------------- +// Old object space (excluding map objects) + +class OldSpace : public PagedSpace { + public: + // Creates an old space object with a given maximum capacity. + // The constructor does not allocate pages from OS. + OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, + Executability executable) + : PagedSpace(heap, max_capacity, id, executable) {} + + public: + TRACK_MEMORY("OldSpace") +}; + + +// For contiguous spaces, top should be in the space (or at the end) and limit +// should be the end of the space. +#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \ + SLOW_DCHECK((space).page_low() <= (info).top() && \ + (info).top() <= (space).page_high() && \ + (info).limit() <= (space).page_high()) + + +// ----------------------------------------------------------------------------- +// Old space for all map objects + +class MapSpace : public PagedSpace { + public: + // Creates a map space object with a maximum capacity. + MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) + : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), + max_map_space_pages_(kMaxMapPageIndex - 1) {} + + // Given an index, returns the page address. + // TODO(1600): this limit is artifical just to keep code compilable + static const int kMaxMapPageIndex = 1 << 16; + + virtual int RoundSizeDownToObjectAlignment(int size) { + if (IsPowerOf2(Map::kSize)) { + return RoundDown(size, Map::kSize); + } else { + return (size / Map::kSize) * Map::kSize; + } + } + + protected: + virtual void VerifyObject(HeapObject* obj); + + private: + static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize; + + // Do map space compaction if there is a page gap. + int CompactionThreshold() { + return kMapsPerPage * (max_map_space_pages_ - 1); + } + + const int max_map_space_pages_; + + public: + TRACK_MEMORY("MapSpace") +}; + + +// ----------------------------------------------------------------------------- +// Old space for simple property cell objects + +class CellSpace : public PagedSpace { + public: + // Creates a property cell space object with a maximum capacity. + CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) + : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {} + + virtual int RoundSizeDownToObjectAlignment(int size) { + if (IsPowerOf2(Cell::kSize)) { + return RoundDown(size, Cell::kSize); + } else { + return (size / Cell::kSize) * Cell::kSize; + } + } + + protected: + virtual void VerifyObject(HeapObject* obj); + + public: + TRACK_MEMORY("CellSpace") +}; + + +// ----------------------------------------------------------------------------- +// Old space for all global object property cell objects + +class PropertyCellSpace : public PagedSpace { + public: + // Creates a property cell space object with a maximum capacity. + PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) + : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {} + + virtual int RoundSizeDownToObjectAlignment(int size) { + if (IsPowerOf2(PropertyCell::kSize)) { + return RoundDown(size, PropertyCell::kSize); + } else { + return (size / PropertyCell::kSize) * PropertyCell::kSize; + } + } + + protected: + virtual void VerifyObject(HeapObject* obj); + + public: + TRACK_MEMORY("PropertyCellSpace") +}; + + +// ----------------------------------------------------------------------------- +// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by +// the large object space. A large object is allocated from OS heap with +// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). +// A large object always starts at Page::kObjectStartOffset to a page. +// Large objects do not move during garbage collections. + +class LargeObjectSpace : public Space { + public: + LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id); + virtual ~LargeObjectSpace() {} + + // Initializes internal data structures. + bool SetUp(); + + // Releases internal resources, frees objects in this space. + void TearDown(); + + static intptr_t ObjectSizeFor(intptr_t chunk_size) { + if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; + return chunk_size - Page::kPageSize - Page::kObjectStartOffset; + } + + // Shared implementation of AllocateRaw, AllocateRawCode and + // AllocateRawFixedArray. + MUST_USE_RESULT AllocationResult + AllocateRaw(int object_size, Executability executable); + + // Available bytes for objects in this space. + inline intptr_t Available(); + + virtual intptr_t Size() { return size_; } + + virtual intptr_t SizeOfObjects() { return objects_size_; } + + intptr_t MaximumCommittedMemory() { return maximum_committed_; } + + intptr_t CommittedMemory() { return Size(); } + + // Approximate amount of physical memory committed for this space. + size_t CommittedPhysicalMemory(); + + int PageCount() { return page_count_; } + + // Finds an object for a given address, returns a Smi if it is not found. + // The function iterates through all objects in this space, may be slow. + Object* FindObject(Address a); + + // Finds a large object page containing the given address, returns NULL + // if such a page doesn't exist. + LargePage* FindPage(Address a); + + // Frees unmarked objects. + void FreeUnmarkedObjects(); + + // Checks whether a heap object is in this space; O(1). + bool Contains(HeapObject* obj); + + // Checks whether the space is empty. + bool IsEmpty() { return first_page_ == NULL; } + + LargePage* first_page() { return first_page_; } + +#ifdef VERIFY_HEAP + virtual void Verify(); +#endif + +#ifdef DEBUG + virtual void Print(); + void ReportStatistics(); + void CollectCodeStatistics(); +#endif + // Checks whether an address is in the object area in this space. It + // iterates all objects in the space. May be slow. + bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); } + + private: + intptr_t max_capacity_; + intptr_t maximum_committed_; + // The head of the linked list of large object chunks. + LargePage* first_page_; + intptr_t size_; // allocated bytes + int page_count_; // number of chunks + intptr_t objects_size_; // size of objects + // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them + HashMap chunk_map_; + + friend class LargeObjectIterator; + + public: + TRACK_MEMORY("LargeObjectSpace") +}; + + +class LargeObjectIterator : public ObjectIterator { + public: + explicit LargeObjectIterator(LargeObjectSpace* space); + LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); + + HeapObject* Next(); + + // implementation of ObjectIterator. + virtual HeapObject* next_object() { return Next(); } + + private: + LargePage* current_; + HeapObjectCallback size_func_; +}; + + +// Iterates over the chunks (pages and large object pages) that can contain +// pointers to new space. +class PointerChunkIterator BASE_EMBEDDED { + public: + inline explicit PointerChunkIterator(Heap* heap); + + // Return NULL when the iterator is done. + MemoryChunk* next() { + switch (state_) { + case kOldPointerState: { + if (old_pointer_iterator_.has_next()) { + return old_pointer_iterator_.next(); + } + state_ = kMapState; + // Fall through. + } + case kMapState: { + if (map_iterator_.has_next()) { + return map_iterator_.next(); + } + state_ = kLargeObjectState; + // Fall through. + } + case kLargeObjectState: { + HeapObject* heap_object; + do { + heap_object = lo_iterator_.Next(); + if (heap_object == NULL) { + state_ = kFinishedState; + return NULL; + } + // Fixed arrays are the only pointer-containing objects in large + // object space. + } while (!heap_object->IsFixedArray()); + MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address()); + return answer; + } + case kFinishedState: + return NULL; + default: + break; + } + UNREACHABLE(); + return NULL; + } + + + private: + enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState }; + State state_; + PageIterator old_pointer_iterator_; + PageIterator map_iterator_; + LargeObjectIterator lo_iterator_; +}; + + +#ifdef DEBUG +struct CommentStatistic { + const char* comment; + int size; + int count; + void Clear() { + comment = NULL; + size = 0; + count = 0; + } + // Must be small, since an iteration is used for lookup. + static const int kMaxComments = 64; +}; +#endif +} +} // namespace v8::internal + +#endif // V8_HEAP_SPACES_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/spaces-inl.h nodejs-0.11.15/deps/v8/src/heap/spaces-inl.h --- nodejs-0.11.13/deps/v8/src/heap/spaces-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/spaces-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,308 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_SPACES_INL_H_ +#define V8_HEAP_SPACES_INL_H_ + +#include "src/heap/spaces.h" +#include "src/heap-profiler.h" +#include "src/isolate.h" +#include "src/v8memory.h" + +namespace v8 { +namespace internal { + + +// ----------------------------------------------------------------------------- +// Bitmap + +void Bitmap::Clear(MemoryChunk* chunk) { + Bitmap* bitmap = chunk->markbits(); + for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0; + chunk->ResetLiveBytes(); +} + + +// ----------------------------------------------------------------------------- +// PageIterator + + +PageIterator::PageIterator(PagedSpace* space) + : space_(space), + prev_page_(&space->anchor_), + next_page_(prev_page_->next_page()) {} + + +bool PageIterator::has_next() { return next_page_ != &space_->anchor_; } + + +Page* PageIterator::next() { + DCHECK(has_next()); + prev_page_ = next_page_; + next_page_ = next_page_->next_page(); + return prev_page_; +} + + +// ----------------------------------------------------------------------------- +// NewSpacePageIterator + + +NewSpacePageIterator::NewSpacePageIterator(NewSpace* space) + : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()), + next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())), + last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {} + +NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space) + : prev_page_(space->anchor()), + next_page_(prev_page_->next_page()), + last_page_(prev_page_->prev_page()) {} + +NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit) + : prev_page_(NewSpacePage::FromAddress(start)->prev_page()), + next_page_(NewSpacePage::FromAddress(start)), + last_page_(NewSpacePage::FromLimit(limit)) { + SemiSpace::AssertValidRange(start, limit); +} + + +bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; } + + +NewSpacePage* NewSpacePageIterator::next() { + DCHECK(has_next()); + prev_page_ = next_page_; + next_page_ = next_page_->next_page(); + return prev_page_; +} + + +// ----------------------------------------------------------------------------- +// HeapObjectIterator +HeapObject* HeapObjectIterator::FromCurrentPage() { + while (cur_addr_ != cur_end_) { + if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) { + cur_addr_ = space_->limit(); + continue; + } + HeapObject* obj = HeapObject::FromAddress(cur_addr_); + int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); + cur_addr_ += obj_size; + DCHECK(cur_addr_ <= cur_end_); + if (!obj->IsFiller()) { + DCHECK_OBJECT_SIZE(obj_size); + return obj; + } + } + return NULL; +} + + +// ----------------------------------------------------------------------------- +// MemoryAllocator + +#ifdef ENABLE_HEAP_PROTECTION + +void MemoryAllocator::Protect(Address start, size_t size) { + base::OS::Protect(start, size); +} + + +void MemoryAllocator::Unprotect(Address start, size_t size, + Executability executable) { + base::OS::Unprotect(start, size, executable); +} + + +void MemoryAllocator::ProtectChunkFromPage(Page* page) { + int id = GetChunkId(page); + base::OS::Protect(chunks_[id].address(), chunks_[id].size()); +} + + +void MemoryAllocator::UnprotectChunkFromPage(Page* page) { + int id = GetChunkId(page); + base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(), + chunks_[id].owner()->executable() == EXECUTABLE); +} + +#endif + + +// -------------------------------------------------------------------------- +// PagedSpace +Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, + PagedSpace* owner) { + Page* page = reinterpret_cast<Page*>(chunk); + DCHECK(page->area_size() <= kMaxRegularHeapObjectSize); + DCHECK(chunk->owner() == owner); + owner->IncreaseCapacity(page->area_size()); + owner->Free(page->area_start(), page->area_size()); + + heap->incremental_marking()->SetOldSpacePageFlags(chunk); + + return page; +} + + +bool PagedSpace::Contains(Address addr) { + Page* p = Page::FromAddress(addr); + if (!p->is_valid()) return false; + return p->owner() == this; +} + + +void MemoryChunk::set_scan_on_scavenge(bool scan) { + if (scan) { + if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages(); + SetFlag(SCAN_ON_SCAVENGE); + } else { + if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages(); + ClearFlag(SCAN_ON_SCAVENGE); + } + heap_->incremental_marking()->SetOldSpacePageFlags(this); +} + + +MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) { + MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>( + OffsetFrom(addr) & ~Page::kPageAlignmentMask); + if (maybe->owner() != NULL) return maybe; + LargeObjectIterator iterator(heap->lo_space()); + for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) { + // Fixed arrays are the only pointer-containing objects in large object + // space. + if (o->IsFixedArray()) { + MemoryChunk* chunk = MemoryChunk::FromAddress(o->address()); + if (chunk->Contains(addr)) { + return chunk; + } + } + } + UNREACHABLE(); + return NULL; +} + + +void MemoryChunk::UpdateHighWaterMark(Address mark) { + if (mark == NULL) return; + // Need to subtract one from the mark because when a chunk is full the + // top points to the next address after the chunk, which effectively belongs + // to another chunk. See the comment to Page::FromAllocationTop. + MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); + int new_mark = static_cast<int>(mark - chunk->address()); + if (new_mark > chunk->high_water_mark_) { + chunk->high_water_mark_ = new_mark; + } +} + + +PointerChunkIterator::PointerChunkIterator(Heap* heap) + : state_(kOldPointerState), + old_pointer_iterator_(heap->old_pointer_space()), + map_iterator_(heap->map_space()), + lo_iterator_(heap->lo_space()) {} + + +Page* Page::next_page() { + DCHECK(next_chunk()->owner() == owner()); + return static_cast<Page*>(next_chunk()); +} + + +Page* Page::prev_page() { + DCHECK(prev_chunk()->owner() == owner()); + return static_cast<Page*>(prev_chunk()); +} + + +void Page::set_next_page(Page* page) { + DCHECK(page->owner() == owner()); + set_next_chunk(page); +} + + +void Page::set_prev_page(Page* page) { + DCHECK(page->owner() == owner()); + set_prev_chunk(page); +} + + +// Try linear allocation in the page of alloc_info's allocation top. Does +// not contain slow case logic (e.g. move to the next page or try free list +// allocation) so it can be used by all the allocation functions and for all +// the paged spaces. +HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { + Address current_top = allocation_info_.top(); + Address new_top = current_top + size_in_bytes; + if (new_top > allocation_info_.limit()) return NULL; + + allocation_info_.set_top(new_top); + return HeapObject::FromAddress(current_top); +} + + +// Raw allocation. +AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) { + HeapObject* object = AllocateLinearly(size_in_bytes); + + if (object == NULL) { + object = free_list_.Allocate(size_in_bytes); + if (object == NULL) { + object = SlowAllocateRaw(size_in_bytes); + } + } + + if (object != NULL) { + if (identity() == CODE_SPACE) { + SkipList::Update(object->address(), size_in_bytes); + } + return object; + } + + return AllocationResult::Retry(identity()); +} + + +// ----------------------------------------------------------------------------- +// NewSpace + + +AllocationResult NewSpace::AllocateRaw(int size_in_bytes) { + Address old_top = allocation_info_.top(); + + if (allocation_info_.limit() - old_top < size_in_bytes) { + return SlowAllocateRaw(size_in_bytes); + } + + HeapObject* obj = HeapObject::FromAddress(old_top); + allocation_info_.set_top(allocation_info_.top() + size_in_bytes); + DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); + + return obj; +} + + +LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { + heap->incremental_marking()->SetOldSpacePageFlags(chunk); + return static_cast<LargePage*>(chunk); +} + + +intptr_t LargeObjectSpace::Available() { + return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); +} + + +bool FreeListNode::IsFreeListNode(HeapObject* object) { + Map* map = object->map(); + Heap* heap = object->GetHeap(); + return map == heap->raw_unchecked_free_space_map() || + map == heap->raw_unchecked_one_pointer_filler_map() || + map == heap->raw_unchecked_two_pointer_filler_map(); +} +} +} // namespace v8::internal + +#endif // V8_HEAP_SPACES_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/store-buffer.cc nodejs-0.11.15/deps/v8/src/heap/store-buffer.cc --- nodejs-0.11.13/deps/v8/src/heap/store-buffer.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/store-buffer.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,589 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <algorithm> + +#include "src/v8.h" + +#include "src/base/atomicops.h" +#include "src/counters.h" +#include "src/heap/store-buffer-inl.h" + +namespace v8 { +namespace internal { + +StoreBuffer::StoreBuffer(Heap* heap) + : heap_(heap), + start_(NULL), + limit_(NULL), + old_start_(NULL), + old_limit_(NULL), + old_top_(NULL), + old_reserved_limit_(NULL), + old_buffer_is_sorted_(false), + old_buffer_is_filtered_(false), + during_gc_(false), + store_buffer_rebuilding_enabled_(false), + callback_(NULL), + may_move_store_buffer_entries_(true), + virtual_memory_(NULL), + hash_set_1_(NULL), + hash_set_2_(NULL), + hash_sets_are_empty_(true) {} + + +void StoreBuffer::SetUp() { + virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3); + uintptr_t start_as_int = + reinterpret_cast<uintptr_t>(virtual_memory_->address()); + start_ = + reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); + limit_ = start_ + (kStoreBufferSize / kPointerSize); + + old_virtual_memory_ = + new base::VirtualMemory(kOldStoreBufferLength * kPointerSize); + old_top_ = old_start_ = + reinterpret_cast<Address*>(old_virtual_memory_->address()); + // Don't know the alignment requirements of the OS, but it is certainly not + // less than 0xfff. + DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); + int initial_length = + static_cast<int>(base::OS::CommitPageSize() / kPointerSize); + DCHECK(initial_length > 0); + DCHECK(initial_length <= kOldStoreBufferLength); + old_limit_ = old_start_ + initial_length; + old_reserved_limit_ = old_start_ + kOldStoreBufferLength; + + CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_), + (old_limit_ - old_start_) * kPointerSize, + false)); + + DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); + DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); + Address* vm_limit = reinterpret_cast<Address*>( + reinterpret_cast<char*>(virtual_memory_->address()) + + virtual_memory_->size()); + DCHECK(start_ <= vm_limit); + DCHECK(limit_ <= vm_limit); + USE(vm_limit); + DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); + DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == + 0); + + CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), + kStoreBufferSize, + false)); // Not executable. + heap_->public_set_store_buffer_top(start_); + + hash_set_1_ = new uintptr_t[kHashSetLength]; + hash_set_2_ = new uintptr_t[kHashSetLength]; + hash_sets_are_empty_ = false; + + ClearFilteringHashSets(); +} + + +void StoreBuffer::TearDown() { + delete virtual_memory_; + delete old_virtual_memory_; + delete[] hash_set_1_; + delete[] hash_set_2_; + old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL; + start_ = limit_ = NULL; + heap_->public_set_store_buffer_top(start_); +} + + +void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { + isolate->heap()->store_buffer()->Compact(); + isolate->counters()->store_buffer_overflows()->Increment(); +} + + +void StoreBuffer::Uniq() { + // Remove adjacent duplicates and cells that do not point at new space. + Address previous = NULL; + Address* write = old_start_; + DCHECK(may_move_store_buffer_entries_); + for (Address* read = old_start_; read < old_top_; read++) { + Address current = *read; + if (current != previous) { + if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { + *write++ = current; + } + } + previous = current; + } + old_top_ = write; +} + + +bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { + return old_limit_ - old_top_ >= space_needed; +} + + +void StoreBuffer::EnsureSpace(intptr_t space_needed) { + while (old_limit_ - old_top_ < space_needed && + old_limit_ < old_reserved_limit_) { + size_t grow = old_limit_ - old_start_; // Double size. + CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), + grow * kPointerSize, false)); + old_limit_ += grow; + } + + if (SpaceAvailable(space_needed)) return; + + if (old_buffer_is_filtered_) return; + DCHECK(may_move_store_buffer_entries_); + Compact(); + + old_buffer_is_filtered_ = true; + bool page_has_scan_on_scavenge_flag = false; + + PointerChunkIterator it(heap_); + MemoryChunk* chunk; + while ((chunk = it.next()) != NULL) { + if (chunk->scan_on_scavenge()) { + page_has_scan_on_scavenge_flag = true; + break; + } + } + + if (page_has_scan_on_scavenge_flag) { + Filter(MemoryChunk::SCAN_ON_SCAVENGE); + } + + if (SpaceAvailable(space_needed)) return; + + // Sample 1 entry in 97 and filter out the pages where we estimate that more + // than 1 in 8 pointers are to new space. + static const int kSampleFinenesses = 5; + static const struct Samples { + int prime_sample_step; + int threshold; + } samples[kSampleFinenesses] = { + {97, ((Page::kPageSize / kPointerSize) / 97) / 8}, + {23, ((Page::kPageSize / kPointerSize) / 23) / 16}, + {7, ((Page::kPageSize / kPointerSize) / 7) / 32}, + {3, ((Page::kPageSize / kPointerSize) / 3) / 256}, + {1, 0}}; + for (int i = 0; i < kSampleFinenesses; i++) { + ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); + // As a last resort we mark all pages as being exempt from the store buffer. + DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_); + if (SpaceAvailable(space_needed)) return; + } + UNREACHABLE(); +} + + +// Sample the store buffer to see if some pages are taking up a lot of space +// in the store buffer. +void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { + PointerChunkIterator it(heap_); + MemoryChunk* chunk; + while ((chunk = it.next()) != NULL) { + chunk->set_store_buffer_counter(0); + } + bool created_new_scan_on_scavenge_pages = false; + MemoryChunk* previous_chunk = NULL; + for (Address* p = old_start_; p < old_top_; p += prime_sample_step) { + Address addr = *p; + MemoryChunk* containing_chunk = NULL; + if (previous_chunk != NULL && previous_chunk->Contains(addr)) { + containing_chunk = previous_chunk; + } else { + containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); + } + int old_counter = containing_chunk->store_buffer_counter(); + if (old_counter >= threshold) { + containing_chunk->set_scan_on_scavenge(true); + created_new_scan_on_scavenge_pages = true; + } + containing_chunk->set_store_buffer_counter(old_counter + 1); + previous_chunk = containing_chunk; + } + if (created_new_scan_on_scavenge_pages) { + Filter(MemoryChunk::SCAN_ON_SCAVENGE); + } + old_buffer_is_filtered_ = true; +} + + +void StoreBuffer::Filter(int flag) { + Address* new_top = old_start_; + MemoryChunk* previous_chunk = NULL; + for (Address* p = old_start_; p < old_top_; p++) { + Address addr = *p; + MemoryChunk* containing_chunk = NULL; + if (previous_chunk != NULL && previous_chunk->Contains(addr)) { + containing_chunk = previous_chunk; + } else { + containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); + previous_chunk = containing_chunk; + } + if (!containing_chunk->IsFlagSet(flag)) { + *new_top++ = addr; + } + } + old_top_ = new_top; + + // Filtering hash sets are inconsistent with the store buffer after this + // operation. + ClearFilteringHashSets(); +} + + +void StoreBuffer::SortUniq() { + Compact(); + if (old_buffer_is_sorted_) return; + std::sort(old_start_, old_top_); + Uniq(); + + old_buffer_is_sorted_ = true; + + // Filtering hash sets are inconsistent with the store buffer after this + // operation. + ClearFilteringHashSets(); +} + + +bool StoreBuffer::PrepareForIteration() { + Compact(); + PointerChunkIterator it(heap_); + MemoryChunk* chunk; + bool page_has_scan_on_scavenge_flag = false; + while ((chunk = it.next()) != NULL) { + if (chunk->scan_on_scavenge()) { + page_has_scan_on_scavenge_flag = true; + break; + } + } + + if (page_has_scan_on_scavenge_flag) { + Filter(MemoryChunk::SCAN_ON_SCAVENGE); + } + + // Filtering hash sets are inconsistent with the store buffer after + // iteration. + ClearFilteringHashSets(); + + return page_has_scan_on_scavenge_flag; +} + + +#ifdef DEBUG +void StoreBuffer::Clean() { + ClearFilteringHashSets(); + Uniq(); // Also removes things that no longer point to new space. + EnsureSpace(kStoreBufferSize / 2); +} + + +static Address* in_store_buffer_1_element_cache = NULL; + + +bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { + if (!FLAG_enable_slow_asserts) return true; + if (in_store_buffer_1_element_cache != NULL && + *in_store_buffer_1_element_cache == cell_address) { + return true; + } + Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); + for (Address* current = top - 1; current >= start_; current--) { + if (*current == cell_address) { + in_store_buffer_1_element_cache = current; + return true; + } + } + for (Address* current = old_top_ - 1; current >= old_start_; current--) { + if (*current == cell_address) { + in_store_buffer_1_element_cache = current; + return true; + } + } + return false; +} +#endif + + +void StoreBuffer::ClearFilteringHashSets() { + if (!hash_sets_are_empty_) { + memset(reinterpret_cast<void*>(hash_set_1_), 0, + sizeof(uintptr_t) * kHashSetLength); + memset(reinterpret_cast<void*>(hash_set_2_), 0, + sizeof(uintptr_t) * kHashSetLength); + hash_sets_are_empty_ = true; + } +} + + +void StoreBuffer::GCPrologue() { + ClearFilteringHashSets(); + during_gc_ = true; +} + + +#ifdef VERIFY_HEAP +void StoreBuffer::VerifyPointers(LargeObjectSpace* space) { + LargeObjectIterator it(space); + for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { + if (object->IsFixedArray()) { + Address slot_address = object->address(); + Address end = object->address() + object->Size(); + + while (slot_address < end) { + HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); + // When we are not in GC the Heap::InNewSpace() predicate + // checks that pointers which satisfy predicate point into + // the active semispace. + Object* object = reinterpret_cast<Object*>( + base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); + heap_->InNewSpace(object); + slot_address += kPointerSize; + } + } + } +} +#endif + + +void StoreBuffer::Verify() { +#ifdef VERIFY_HEAP + VerifyPointers(heap_->lo_space()); +#endif +} + + +void StoreBuffer::GCEpilogue() { + during_gc_ = false; +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) { + Verify(); + } +#endif +} + + +void StoreBuffer::FindPointersToNewSpaceInRegion( + Address start, Address end, ObjectSlotCallback slot_callback, + bool clear_maps) { + for (Address slot_address = start; slot_address < end; + slot_address += kPointerSize) { + Object** slot = reinterpret_cast<Object**>(slot_address); + Object* object = reinterpret_cast<Object*>( + base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); + if (heap_->InNewSpace(object)) { + HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); + DCHECK(heap_object->IsHeapObject()); + // The new space object was not promoted if it still contains a map + // pointer. Clear the map field now lazily. + if (clear_maps) ClearDeadObject(heap_object); + slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); + object = reinterpret_cast<Object*>( + base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); + if (heap_->InNewSpace(object)) { + EnterDirectlyIntoStoreBuffer(slot_address); + } + } + } +} + + +void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback, + bool clear_maps) { + Address* limit = old_top_; + old_top_ = old_start_; + { + DontMoveStoreBufferEntriesScope scope(this); + for (Address* current = old_start_; current < limit; current++) { +#ifdef DEBUG + Address* saved_top = old_top_; +#endif + Object** slot = reinterpret_cast<Object**>(*current); + Object* object = reinterpret_cast<Object*>( + base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); + if (heap_->InFromSpace(object)) { + HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); + // The new space object was not promoted if it still contains a map + // pointer. Clear the map field now lazily. + if (clear_maps) ClearDeadObject(heap_object); + slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); + object = reinterpret_cast<Object*>( + base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); + if (heap_->InNewSpace(object)) { + EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); + } + } + DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top); + } + } +} + + +void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { + IteratePointersToNewSpace(slot_callback, false); +} + + +void StoreBuffer::IteratePointersToNewSpaceAndClearMaps( + ObjectSlotCallback slot_callback) { + IteratePointersToNewSpace(slot_callback, true); +} + + +void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, + bool clear_maps) { + // We do not sort or remove duplicated entries from the store buffer because + // we expect that callback will rebuild the store buffer thus removing + // all duplicates and pointers to old space. + bool some_pages_to_scan = PrepareForIteration(); + + // TODO(gc): we want to skip slots on evacuation candidates + // but we can't simply figure that out from slot address + // because slot can belong to a large object. + IteratePointersInStoreBuffer(slot_callback, clear_maps); + + // We are done scanning all the pointers that were in the store buffer, but + // there may be some pages marked scan_on_scavenge that have pointers to new + // space that are not in the store buffer. We must scan them now. As we + // scan, the surviving pointers to new space will be added to the store + // buffer. If there are still a lot of pointers to new space then we will + // keep the scan_on_scavenge flag on the page and discard the pointers that + // were added to the store buffer. If there are not many pointers to new + // space left on the page we will keep the pointers in the store buffer and + // remove the flag from the page. + if (some_pages_to_scan) { + if (callback_ != NULL) { + (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); + } + PointerChunkIterator it(heap_); + MemoryChunk* chunk; + while ((chunk = it.next()) != NULL) { + if (chunk->scan_on_scavenge()) { + chunk->set_scan_on_scavenge(false); + if (callback_ != NULL) { + (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); + } + if (chunk->owner() == heap_->lo_space()) { + LargePage* large_page = reinterpret_cast<LargePage*>(chunk); + HeapObject* array = large_page->GetObject(); + DCHECK(array->IsFixedArray()); + Address start = array->address(); + Address end = start + array->Size(); + FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps); + } else { + Page* page = reinterpret_cast<Page*>(chunk); + PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); + Address start = page->area_start(); + Address end = page->area_end(); + if (owner == heap_->map_space()) { + DCHECK(page->WasSweptPrecisely()); + HeapObjectIterator iterator(page, NULL); + for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; + heap_object = iterator.Next()) { + // We skip free space objects. + if (!heap_object->IsFiller()) { + FindPointersToNewSpaceInRegion( + heap_object->address() + HeapObject::kHeaderSize, + heap_object->address() + heap_object->Size(), slot_callback, + clear_maps); + } + } + } else { + if (!page->SweepingCompleted()) { + heap_->mark_compact_collector()->SweepInParallel(page, owner); + if (!page->SweepingCompleted()) { + // We were not able to sweep that page, i.e., a concurrent + // sweeper thread currently owns this page. + // TODO(hpayer): This may introduce a huge pause here. We + // just care about finish sweeping of the scan on scavenge page. + heap_->mark_compact_collector()->EnsureSweepingCompleted(); + } + } + // TODO(hpayer): remove the special casing and merge map and pointer + // space handling as soon as we removed conservative sweeping. + CHECK(page->owner() == heap_->old_pointer_space()); + if (heap_->old_pointer_space()->swept_precisely()) { + HeapObjectIterator iterator(page, NULL); + for (HeapObject* heap_object = iterator.Next(); + heap_object != NULL; heap_object = iterator.Next()) { + // We iterate over objects that contain new space pointers only. + if (heap_object->MayContainNewSpacePointers()) { + FindPointersToNewSpaceInRegion( + heap_object->address() + HeapObject::kHeaderSize, + heap_object->address() + heap_object->Size(), + slot_callback, clear_maps); + } + } + } else { + FindPointersToNewSpaceInRegion(start, end, slot_callback, + clear_maps); + } + } + } + } + } + if (callback_ != NULL) { + (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent); + } + } +} + + +void StoreBuffer::Compact() { + Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); + + if (top == start_) return; + + // There's no check of the limit in the loop below so we check here for + // the worst case (compaction doesn't eliminate any pointers). + DCHECK(top <= limit_); + heap_->public_set_store_buffer_top(start_); + EnsureSpace(top - start_); + DCHECK(may_move_store_buffer_entries_); + // Goes through the addresses in the store buffer attempting to remove + // duplicates. In the interest of speed this is a lossy operation. Some + // duplicates will remain. We have two hash sets with different hash + // functions to reduce the number of unnecessary clashes. + hash_sets_are_empty_ = false; // Hash sets are in use. + for (Address* current = start_; current < top; current++) { + DCHECK(!heap_->cell_space()->Contains(*current)); + DCHECK(!heap_->code_space()->Contains(*current)); + DCHECK(!heap_->old_data_space()->Contains(*current)); + uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); + // Shift out the last bits including any tags. + int_addr >>= kPointerSizeLog2; + // The upper part of an address is basically random because of ASLR and OS + // non-determinism, so we use only the bits within a page for hashing to + // make v8's behavior (more) deterministic. + uintptr_t hash_addr = + int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2); + int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) & + (kHashSetLength - 1)); + if (hash_set_1_[hash1] == int_addr) continue; + uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2)); + hash2 ^= hash2 >> (kHashSetLengthLog2 * 2); + hash2 &= (kHashSetLength - 1); + if (hash_set_2_[hash2] == int_addr) continue; + if (hash_set_1_[hash1] == 0) { + hash_set_1_[hash1] = int_addr; + } else if (hash_set_2_[hash2] == 0) { + hash_set_2_[hash2] = int_addr; + } else { + // Rather than slowing down we just throw away some entries. This will + // cause some duplicates to remain undetected. + hash_set_1_[hash1] = int_addr; + hash_set_2_[hash2] = 0; + } + old_buffer_is_sorted_ = false; + old_buffer_is_filtered_ = false; + *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); + DCHECK(old_top_ <= old_limit_); + } + heap_->isolate()->counters()->store_buffer_compactions()->Increment(); +} +} +} // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/heap/store-buffer.h nodejs-0.11.15/deps/v8/src/heap/store-buffer.h --- nodejs-0.11.13/deps/v8/src/heap/store-buffer.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/store-buffer.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,221 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_STORE_BUFFER_H_ +#define V8_STORE_BUFFER_H_ + +#include "src/allocation.h" +#include "src/base/logging.h" +#include "src/base/platform/platform.h" +#include "src/globals.h" + +namespace v8 { +namespace internal { + +class Page; +class PagedSpace; +class StoreBuffer; + +typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to); + +typedef void (StoreBuffer::*RegionCallback)(Address start, Address end, + ObjectSlotCallback slot_callback, + bool clear_maps); + +// Used to implement the write barrier by collecting addresses of pointers +// between spaces. +class StoreBuffer { + public: + explicit StoreBuffer(Heap* heap); + + static void StoreBufferOverflow(Isolate* isolate); + + inline Address TopAddress(); + + void SetUp(); + void TearDown(); + + // This is used by the mutator to enter addresses into the store buffer. + inline void Mark(Address addr); + + // This is used by the heap traversal to enter the addresses into the store + // buffer that should still be in the store buffer after GC. It enters + // addresses directly into the old buffer because the GC starts by wiping the + // old buffer and thereafter only visits each cell once so there is no need + // to attempt to remove any dupes. During the first part of a GC we + // are using the store buffer to access the old spaces and at the same time + // we are rebuilding the store buffer using this function. There is, however + // no issue of overwriting the buffer we are iterating over, because this + // stage of the scavenge can only reduce the number of addresses in the store + // buffer (some objects are promoted so pointers to them do not need to be in + // the store buffer). The later parts of the GC scan the pages that are + // exempt from the store buffer and process the promotion queue. These steps + // can overflow this buffer. We check for this and on overflow we call the + // callback set up with the StoreBufferRebuildScope object. + inline void EnterDirectlyIntoStoreBuffer(Address addr); + + // Iterates over all pointers that go from old space to new space. It will + // delete the store buffer as it starts so the callback should reenter + // surviving old-to-new pointers into the store buffer to rebuild it. + void IteratePointersToNewSpace(ObjectSlotCallback callback); + + // Same as IteratePointersToNewSpace but additonally clears maps in objects + // referenced from the store buffer that do not contain a forwarding pointer. + void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback); + + static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2); + static const int kStoreBufferSize = kStoreBufferOverflowBit; + static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address); + static const int kOldStoreBufferLength = kStoreBufferLength * 16; + static const int kHashSetLengthLog2 = 12; + static const int kHashSetLength = 1 << kHashSetLengthLog2; + + void Compact(); + + void GCPrologue(); + void GCEpilogue(); + + Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); } + Object*** Start() { return reinterpret_cast<Object***>(old_start_); } + Object*** Top() { return reinterpret_cast<Object***>(old_top_); } + void SetTop(Object*** top) { + DCHECK(top >= Start()); + DCHECK(top <= Limit()); + old_top_ = reinterpret_cast<Address*>(top); + } + + bool old_buffer_is_sorted() { return old_buffer_is_sorted_; } + bool old_buffer_is_filtered() { return old_buffer_is_filtered_; } + + // Goes through the store buffer removing pointers to things that have + // been promoted. Rebuilds the store buffer completely if it overflowed. + void SortUniq(); + + void EnsureSpace(intptr_t space_needed); + void Verify(); + + bool PrepareForIteration(); + +#ifdef DEBUG + void Clean(); + // Slow, for asserts only. + bool CellIsInStoreBuffer(Address cell); +#endif + + void Filter(int flag); + + private: + Heap* heap_; + + // The store buffer is divided up into a new buffer that is constantly being + // filled by mutator activity and an old buffer that is filled with the data + // from the new buffer after compression. + Address* start_; + Address* limit_; + + Address* old_start_; + Address* old_limit_; + Address* old_top_; + Address* old_reserved_limit_; + base::VirtualMemory* old_virtual_memory_; + + bool old_buffer_is_sorted_; + bool old_buffer_is_filtered_; + bool during_gc_; + // The garbage collector iterates over many pointers to new space that are not + // handled by the store buffer. This flag indicates whether the pointers + // found by the callbacks should be added to the store buffer or not. + bool store_buffer_rebuilding_enabled_; + StoreBufferCallback callback_; + bool may_move_store_buffer_entries_; + + base::VirtualMemory* virtual_memory_; + + // Two hash sets used for filtering. + // If address is in the hash set then it is guaranteed to be in the + // old part of the store buffer. + uintptr_t* hash_set_1_; + uintptr_t* hash_set_2_; + bool hash_sets_are_empty_; + + void ClearFilteringHashSets(); + + bool SpaceAvailable(intptr_t space_needed); + void Uniq(); + void ExemptPopularPages(int prime_sample_step, int threshold); + + // Set the map field of the object to NULL if contains a map. + inline void ClearDeadObject(HeapObject* object); + + void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps); + + void FindPointersToNewSpaceInRegion(Address start, Address end, + ObjectSlotCallback slot_callback, + bool clear_maps); + + // For each region of pointers on a page in use from an old space call + // visit_pointer_region callback. + // If either visit_pointer_region or callback can cause an allocation + // in old space and changes in allocation watermark then + // can_preallocate_during_iteration should be set to true. + void IteratePointersOnPage(PagedSpace* space, Page* page, + RegionCallback region_callback, + ObjectSlotCallback slot_callback); + + void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback, + bool clear_maps); + +#ifdef VERIFY_HEAP + void VerifyPointers(LargeObjectSpace* space); +#endif + + friend class StoreBufferRebuildScope; + friend class DontMoveStoreBufferEntriesScope; +}; + + +class StoreBufferRebuildScope { + public: + explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer, + StoreBufferCallback callback) + : store_buffer_(store_buffer), + stored_state_(store_buffer->store_buffer_rebuilding_enabled_), + stored_callback_(store_buffer->callback_) { + store_buffer_->store_buffer_rebuilding_enabled_ = true; + store_buffer_->callback_ = callback; + (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent); + } + + ~StoreBufferRebuildScope() { + store_buffer_->callback_ = stored_callback_; + store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_; + } + + private: + StoreBuffer* store_buffer_; + bool stored_state_; + StoreBufferCallback stored_callback_; +}; + + +class DontMoveStoreBufferEntriesScope { + public: + explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer) + : store_buffer_(store_buffer), + stored_state_(store_buffer->may_move_store_buffer_entries_) { + store_buffer_->may_move_store_buffer_entries_ = false; + } + + ~DontMoveStoreBufferEntriesScope() { + store_buffer_->may_move_store_buffer_entries_ = stored_state_; + } + + private: + StoreBuffer* store_buffer_; + bool stored_state_; +}; +} +} // namespace v8::internal + +#endif // V8_STORE_BUFFER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/store-buffer-inl.h nodejs-0.11.15/deps/v8/src/heap/store-buffer-inl.h --- nodejs-0.11.13/deps/v8/src/heap/store-buffer-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/store-buffer-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,63 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_STORE_BUFFER_INL_H_ +#define V8_STORE_BUFFER_INL_H_ + +#include "src/heap/store-buffer.h" + +namespace v8 { +namespace internal { + +Address StoreBuffer::TopAddress() { + return reinterpret_cast<Address>(heap_->store_buffer_top_address()); +} + + +void StoreBuffer::Mark(Address addr) { + DCHECK(!heap_->cell_space()->Contains(addr)); + DCHECK(!heap_->code_space()->Contains(addr)); + DCHECK(!heap_->old_data_space()->Contains(addr)); + Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); + *top++ = addr; + heap_->public_set_store_buffer_top(top); + if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) { + DCHECK(top == limit_); + Compact(); + } else { + DCHECK(top < limit_); + } +} + + +void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) { + if (store_buffer_rebuilding_enabled_) { + SLOW_DCHECK(!heap_->cell_space()->Contains(addr) && + !heap_->code_space()->Contains(addr) && + !heap_->old_data_space()->Contains(addr) && + !heap_->new_space()->Contains(addr)); + Address* top = old_top_; + *top++ = addr; + old_top_ = top; + old_buffer_is_sorted_ = false; + old_buffer_is_filtered_ = false; + if (top >= old_limit_) { + DCHECK(callback_ != NULL); + (*callback_)(heap_, MemoryChunk::FromAnyPointerAddress(heap_, addr), + kStoreBufferFullEvent); + } + } +} + + +void StoreBuffer::ClearDeadObject(HeapObject* object) { + Address& map_field = Memory::Address_at(object->address()); + if (heap_->map_space()->Contains(map_field)) { + map_field = NULL; + } +} +} +} // namespace v8::internal + +#endif // V8_STORE_BUFFER_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap/sweeper-thread.cc nodejs-0.11.15/deps/v8/src/heap/sweeper-thread.cc --- nodejs-0.11.13/deps/v8/src/heap/sweeper-thread.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/sweeper-thread.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,82 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/heap/sweeper-thread.h" + +#include "src/v8.h" + +#include "src/isolate.h" +#include "src/v8threads.h" + +namespace v8 { +namespace internal { + +static const int kSweeperThreadStackSize = 64 * KB; + +SweeperThread::SweeperThread(Isolate* isolate) + : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)), + isolate_(isolate), + heap_(isolate->heap()), + collector_(heap_->mark_compact_collector()), + start_sweeping_semaphore_(0), + end_sweeping_semaphore_(0), + stop_semaphore_(0) { + DCHECK(!FLAG_job_based_sweeping); + base::NoBarrier_Store(&stop_thread_, static_cast<base::AtomicWord>(false)); +} + + +void SweeperThread::Run() { + Isolate::SetIsolateThreadLocals(isolate_, NULL); + DisallowHeapAllocation no_allocation; + DisallowHandleAllocation no_handles; + DisallowHandleDereference no_deref; + + while (true) { + start_sweeping_semaphore_.Wait(); + + if (base::Acquire_Load(&stop_thread_)) { + stop_semaphore_.Signal(); + return; + } + + collector_->SweepInParallel(heap_->old_data_space(), 0); + collector_->SweepInParallel(heap_->old_pointer_space(), 0); + end_sweeping_semaphore_.Signal(); + } +} + + +void SweeperThread::Stop() { + base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(true)); + start_sweeping_semaphore_.Signal(); + stop_semaphore_.Wait(); + Join(); +} + + +void SweeperThread::StartSweeping() { start_sweeping_semaphore_.Signal(); } + + +void SweeperThread::WaitForSweeperThread() { end_sweeping_semaphore_.Wait(); } + + +bool SweeperThread::SweepingCompleted() { + bool value = end_sweeping_semaphore_.WaitFor(base::TimeDelta::FromSeconds(0)); + if (value) { + end_sweeping_semaphore_.Signal(); + } + return value; +} + + +int SweeperThread::NumberOfThreads(int max_available) { + if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) return 0; + if (FLAG_sweeper_threads > 0) return FLAG_sweeper_threads; + if (FLAG_concurrent_sweeping) return max_available - 1; + DCHECK(FLAG_parallel_sweeping); + return max_available; +} +} +} // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/heap/sweeper-thread.h nodejs-0.11.15/deps/v8/src/heap/sweeper-thread.h --- nodejs-0.11.13/deps/v8/src/heap/sweeper-thread.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap/sweeper-thread.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,45 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_SWEEPER_THREAD_H_ +#define V8_HEAP_SWEEPER_THREAD_H_ + +#include "src/base/atomicops.h" +#include "src/base/platform/platform.h" +#include "src/flags.h" +#include "src/utils.h" + +#include "src/heap/spaces.h" + +#include "src/heap/heap.h" + +namespace v8 { +namespace internal { + +class SweeperThread : public base::Thread { + public: + explicit SweeperThread(Isolate* isolate); + ~SweeperThread() {} + + void Run(); + void Stop(); + void StartSweeping(); + void WaitForSweeperThread(); + bool SweepingCompleted(); + + static int NumberOfThreads(int max_available); + + private: + Isolate* isolate_; + Heap* heap_; + MarkCompactCollector* collector_; + base::Semaphore start_sweeping_semaphore_; + base::Semaphore end_sweeping_semaphore_; + base::Semaphore stop_semaphore_; + volatile base::AtomicWord stop_thread_; +}; +} +} // namespace v8::internal + +#endif // V8_HEAP_SWEEPER_THREAD_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap.cc nodejs-0.11.15/deps/v8/src/heap.cc --- nodejs-0.11.13/deps/v8/src/heap.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,7876 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "accessors.h" -#include "api.h" -#include "bootstrapper.h" -#include "codegen.h" -#include "compilation-cache.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "deoptimizer.h" -#include "global-handles.h" -#include "heap-profiler.h" -#include "incremental-marking.h" -#include "isolate-inl.h" -#include "mark-compact.h" -#include "natives.h" -#include "objects-visiting.h" -#include "objects-visiting-inl.h" -#include "once.h" -#include "runtime-profiler.h" -#include "scopeinfo.h" -#include "snapshot.h" -#include "store-buffer.h" -#include "utils/random-number-generator.h" -#include "v8conversions.h" -#include "v8threads.h" -#include "v8utils.h" -#include "vm-state-inl.h" -#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP -#include "regexp-macro-assembler.h" -#include "arm/regexp-macro-assembler-arm.h" -#endif -#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP -#include "regexp-macro-assembler.h" -#include "mips/regexp-macro-assembler-mips.h" -#endif - -namespace v8 { -namespace internal { - - -Heap::Heap() - : isolate_(NULL), - code_range_size_(kIs64BitArch ? 512 * MB : 0), -// semispace_size_ should be a power of 2 and old_generation_size_ should be -// a multiple of Page::kPageSize. - reserved_semispace_size_(8 * (kPointerSize / 4) * MB), - max_semispace_size_(8 * (kPointerSize / 4) * MB), - initial_semispace_size_(Page::kPageSize), - max_old_generation_size_(700ul * (kPointerSize / 4) * MB), - max_executable_size_(256ul * (kPointerSize / 4) * MB), -// Variables set based on semispace_size_ and old_generation_size_ in -// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_) -// Will be 4 * reserved_semispace_size_ to ensure that young -// generation can be aligned to its size. - maximum_committed_(0), - survived_since_last_expansion_(0), - sweep_generation_(0), - always_allocate_scope_depth_(0), - linear_allocation_scope_depth_(0), - contexts_disposed_(0), - global_ic_age_(0), - flush_monomorphic_ics_(false), - scan_on_scavenge_pages_(0), - new_space_(this), - old_pointer_space_(NULL), - old_data_space_(NULL), - code_space_(NULL), - map_space_(NULL), - cell_space_(NULL), - property_cell_space_(NULL), - lo_space_(NULL), - gc_state_(NOT_IN_GC), - gc_post_processing_depth_(0), - ms_count_(0), - gc_count_(0), - remembered_unmapped_pages_index_(0), - unflattened_strings_length_(0), -#ifdef DEBUG - allocation_timeout_(0), -#endif // DEBUG - new_space_high_promotion_mode_active_(false), - old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit), - size_of_old_gen_at_last_old_space_gc_(0), - external_allocation_limit_(0), - amount_of_external_allocated_memory_(0), - amount_of_external_allocated_memory_at_last_global_gc_(0), - old_gen_exhausted_(false), - inline_allocation_disabled_(false), - store_buffer_rebuilder_(store_buffer()), - hidden_string_(NULL), - gc_safe_size_of_old_object_(NULL), - total_regexp_code_generated_(0), - tracer_(NULL), - young_survivors_after_last_gc_(0), - high_survival_rate_period_length_(0), - low_survival_rate_period_length_(0), - survival_rate_(0), - previous_survival_rate_trend_(Heap::STABLE), - survival_rate_trend_(Heap::STABLE), - max_gc_pause_(0.0), - total_gc_time_ms_(0.0), - max_alive_after_gc_(0), - min_in_mutator_(kMaxInt), - alive_after_last_gc_(0), - last_gc_end_timestamp_(0.0), - marking_time_(0.0), - sweeping_time_(0.0), - mark_compact_collector_(this), - store_buffer_(this), - marking_(this), - incremental_marking_(this), - number_idle_notifications_(0), - last_idle_notification_gc_count_(0), - last_idle_notification_gc_count_init_(false), - mark_sweeps_since_idle_round_started_(0), - gc_count_at_last_idle_gc_(0), - scavenges_since_last_idle_round_(kIdleScavengeThreshold), - full_codegen_bytes_generated_(0), - crankshaft_codegen_bytes_generated_(0), - gcs_since_last_deopt_(0), -#ifdef VERIFY_HEAP - no_weak_object_verification_scope_depth_(0), -#endif - allocation_sites_scratchpad_length_(0), - promotion_queue_(this), - configured_(false), - external_string_table_(this), - chunks_queued_for_free_(NULL), - gc_callbacks_depth_(0) { - // Allow build-time customization of the max semispace size. Building - // V8 with snapshots and a non-default max semispace size is much - // easier if you can define it as part of the build environment. -#if defined(V8_MAX_SEMISPACE_SIZE) - max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; -#endif - - // Ensure old_generation_size_ is a multiple of kPageSize. - ASSERT(MB >= Page::kPageSize); - - intptr_t max_virtual = OS::MaxVirtualMemory(); - - if (max_virtual > 0) { - if (code_range_size_ > 0) { - // Reserve no more than 1/8 of the memory for the code range. - code_range_size_ = Min(code_range_size_, max_virtual >> 3); - } - } - - memset(roots_, 0, sizeof(roots_[0]) * kRootListLength); - native_contexts_list_ = NULL; - array_buffers_list_ = Smi::FromInt(0); - allocation_sites_list_ = Smi::FromInt(0); - // Put a dummy entry in the remembered pages so we can find the list the - // minidump even if there are no real unmapped pages. - RememberUnmappedPage(NULL, false); - - ClearObjectStats(true); -} - - -intptr_t Heap::Capacity() { - if (!HasBeenSetUp()) return 0; - - return new_space_.Capacity() + - old_pointer_space_->Capacity() + - old_data_space_->Capacity() + - code_space_->Capacity() + - map_space_->Capacity() + - cell_space_->Capacity() + - property_cell_space_->Capacity(); -} - - -intptr_t Heap::CommittedMemory() { - if (!HasBeenSetUp()) return 0; - - return new_space_.CommittedMemory() + - old_pointer_space_->CommittedMemory() + - old_data_space_->CommittedMemory() + - code_space_->CommittedMemory() + - map_space_->CommittedMemory() + - cell_space_->CommittedMemory() + - property_cell_space_->CommittedMemory() + - lo_space_->Size(); -} - - -size_t Heap::CommittedPhysicalMemory() { - if (!HasBeenSetUp()) return 0; - - return new_space_.CommittedPhysicalMemory() + - old_pointer_space_->CommittedPhysicalMemory() + - old_data_space_->CommittedPhysicalMemory() + - code_space_->CommittedPhysicalMemory() + - map_space_->CommittedPhysicalMemory() + - cell_space_->CommittedPhysicalMemory() + - property_cell_space_->CommittedPhysicalMemory() + - lo_space_->CommittedPhysicalMemory(); -} - - -intptr_t Heap::CommittedMemoryExecutable() { - if (!HasBeenSetUp()) return 0; - - return isolate()->memory_allocator()->SizeExecutable(); -} - - -void Heap::UpdateMaximumCommitted() { - if (!HasBeenSetUp()) return; - - intptr_t current_committed_memory = CommittedMemory(); - if (current_committed_memory > maximum_committed_) { - maximum_committed_ = current_committed_memory; - } -} - - -intptr_t Heap::Available() { - if (!HasBeenSetUp()) return 0; - - return new_space_.Available() + - old_pointer_space_->Available() + - old_data_space_->Available() + - code_space_->Available() + - map_space_->Available() + - cell_space_->Available() + - property_cell_space_->Available(); -} - - -bool Heap::HasBeenSetUp() { - return old_pointer_space_ != NULL && - old_data_space_ != NULL && - code_space_ != NULL && - map_space_ != NULL && - cell_space_ != NULL && - property_cell_space_ != NULL && - lo_space_ != NULL; -} - - -int Heap::GcSafeSizeOfOldObject(HeapObject* object) { - if (IntrusiveMarking::IsMarked(object)) { - return IntrusiveMarking::SizeOfMarkedObject(object); - } - return object->SizeFromMap(object->map()); -} - - -GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, - const char** reason) { - // Is global GC requested? - if (space != NEW_SPACE) { - isolate_->counters()->gc_compactor_caused_by_request()->Increment(); - *reason = "GC in old space requested"; - return MARK_COMPACTOR; - } - - if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) { - *reason = "GC in old space forced by flags"; - return MARK_COMPACTOR; - } - - // Is enough data promoted to justify a global GC? - if (OldGenerationAllocationLimitReached()) { - isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment(); - *reason = "promotion limit reached"; - return MARK_COMPACTOR; - } - - // Have allocation in OLD and LO failed? - if (old_gen_exhausted_) { - isolate_->counters()-> - gc_compactor_caused_by_oldspace_exhaustion()->Increment(); - *reason = "old generations exhausted"; - return MARK_COMPACTOR; - } - - // Is there enough space left in OLD to guarantee that a scavenge can - // succeed? - // - // Note that MemoryAllocator->MaxAvailable() undercounts the memory available - // for object promotion. It counts only the bytes that the memory - // allocator has not yet allocated from the OS and assigned to any space, - // and does not count available bytes already in the old space or code - // space. Undercounting is safe---we may get an unrequested full GC when - // a scavenge would have succeeded. - if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) { - isolate_->counters()-> - gc_compactor_caused_by_oldspace_exhaustion()->Increment(); - *reason = "scavenge might not succeed"; - return MARK_COMPACTOR; - } - - // Default - *reason = NULL; - return SCAVENGER; -} - - -// TODO(1238405): Combine the infrastructure for --heap-stats and -// --log-gc to avoid the complicated preprocessor and flag testing. -void Heap::ReportStatisticsBeforeGC() { - // Heap::ReportHeapStatistics will also log NewSpace statistics when - // compiled --log-gc is set. The following logic is used to avoid - // double logging. -#ifdef DEBUG - if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics(); - if (FLAG_heap_stats) { - ReportHeapStatistics("Before GC"); - } else if (FLAG_log_gc) { - new_space_.ReportStatistics(); - } - if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms(); -#else - if (FLAG_log_gc) { - new_space_.CollectStatistics(); - new_space_.ReportStatistics(); - new_space_.ClearHistograms(); - } -#endif // DEBUG -} - - -void Heap::PrintShortHeapStatistics() { - if (!FLAG_trace_gc_verbose) return; - PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB" - ", available: %6" V8_PTR_PREFIX "d KB\n", - isolate_->memory_allocator()->Size() / KB, - isolate_->memory_allocator()->Available() / KB); - PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB" - ", available: %6" V8_PTR_PREFIX "d KB" - ", committed: %6" V8_PTR_PREFIX "d KB\n", - new_space_.Size() / KB, - new_space_.Available() / KB, - new_space_.CommittedMemory() / KB); - PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB" - ", available: %6" V8_PTR_PREFIX "d KB" - ", committed: %6" V8_PTR_PREFIX "d KB\n", - old_pointer_space_->SizeOfObjects() / KB, - old_pointer_space_->Available() / KB, - old_pointer_space_->CommittedMemory() / KB); - PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB" - ", available: %6" V8_PTR_PREFIX "d KB" - ", committed: %6" V8_PTR_PREFIX "d KB\n", - old_data_space_->SizeOfObjects() / KB, - old_data_space_->Available() / KB, - old_data_space_->CommittedMemory() / KB); - PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB" - ", available: %6" V8_PTR_PREFIX "d KB" - ", committed: %6" V8_PTR_PREFIX "d KB\n", - code_space_->SizeOfObjects() / KB, - code_space_->Available() / KB, - code_space_->CommittedMemory() / KB); - PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB" - ", available: %6" V8_PTR_PREFIX "d KB" - ", committed: %6" V8_PTR_PREFIX "d KB\n", - map_space_->SizeOfObjects() / KB, - map_space_->Available() / KB, - map_space_->CommittedMemory() / KB); - PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB" - ", available: %6" V8_PTR_PREFIX "d KB" - ", committed: %6" V8_PTR_PREFIX "d KB\n", - cell_space_->SizeOfObjects() / KB, - cell_space_->Available() / KB, - cell_space_->CommittedMemory() / KB); - PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB" - ", available: %6" V8_PTR_PREFIX "d KB" - ", committed: %6" V8_PTR_PREFIX "d KB\n", - property_cell_space_->SizeOfObjects() / KB, - property_cell_space_->Available() / KB, - property_cell_space_->CommittedMemory() / KB); - PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB" - ", available: %6" V8_PTR_PREFIX "d KB" - ", committed: %6" V8_PTR_PREFIX "d KB\n", - lo_space_->SizeOfObjects() / KB, - lo_space_->Available() / KB, - lo_space_->CommittedMemory() / KB); - PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB" - ", available: %6" V8_PTR_PREFIX "d KB" - ", committed: %6" V8_PTR_PREFIX "d KB\n", - this->SizeOfObjects() / KB, - this->Available() / KB, - this->CommittedMemory() / KB); - PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n", - static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB)); - PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_); -} - - -// TODO(1238405): Combine the infrastructure for --heap-stats and -// --log-gc to avoid the complicated preprocessor and flag testing. -void Heap::ReportStatisticsAfterGC() { - // Similar to the before GC, we use some complicated logic to ensure that - // NewSpace statistics are logged exactly once when --log-gc is turned on. -#if defined(DEBUG) - if (FLAG_heap_stats) { - new_space_.CollectStatistics(); - ReportHeapStatistics("After GC"); - } else if (FLAG_log_gc) { - new_space_.ReportStatistics(); - } -#else - if (FLAG_log_gc) new_space_.ReportStatistics(); -#endif // DEBUG -} - - -void Heap::GarbageCollectionPrologue() { - { AllowHeapAllocation for_the_first_part_of_prologue; - ClearJSFunctionResultCaches(); - gc_count_++; - unflattened_strings_length_ = 0; - - if (FLAG_flush_code && FLAG_flush_code_incrementally) { - mark_compact_collector()->EnableCodeFlushing(true); - } - -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - Verify(); - } -#endif - } - - UpdateMaximumCommitted(); - -#ifdef DEBUG - ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); - - if (FLAG_gc_verbose) Print(); - - ReportStatisticsBeforeGC(); -#endif // DEBUG - - store_buffer()->GCPrologue(); - - if (isolate()->concurrent_osr_enabled()) { - isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs(); - } -} - - -intptr_t Heap::SizeOfObjects() { - intptr_t total = 0; - AllSpaces spaces(this); - for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { - total += space->SizeOfObjects(); - } - return total; -} - - -void Heap::ClearAllICsByKind(Code::Kind kind) { - HeapObjectIterator it(code_space()); - - for (Object* object = it.Next(); object != NULL; object = it.Next()) { - Code* code = Code::cast(object); - Code::Kind current_kind = code->kind(); - if (current_kind == Code::FUNCTION || - current_kind == Code::OPTIMIZED_FUNCTION) { - code->ClearInlineCaches(kind); - } - } -} - - -void Heap::RepairFreeListsAfterBoot() { - PagedSpaces spaces(this); - for (PagedSpace* space = spaces.next(); - space != NULL; - space = spaces.next()) { - space->RepairFreeListsAfterBoot(); - } -} - - -void Heap::ProcessPretenuringFeedback() { - if (FLAG_allocation_site_pretenuring) { - int tenure_decisions = 0; - int dont_tenure_decisions = 0; - int allocation_mementos_found = 0; - int allocation_sites = 0; - int active_allocation_sites = 0; - - // If the scratchpad overflowed, we have to iterate over the allocation - // sites list. - bool use_scratchpad = - allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize; - - int i = 0; - Object* list_element = allocation_sites_list(); - bool trigger_deoptimization = false; - while (use_scratchpad ? - i < allocation_sites_scratchpad_length_ : - list_element->IsAllocationSite()) { - AllocationSite* site = use_scratchpad ? - AllocationSite::cast(allocation_sites_scratchpad()->get(i)) : - AllocationSite::cast(list_element); - allocation_mementos_found += site->memento_found_count(); - if (site->memento_found_count() > 0) { - active_allocation_sites++; - } - if (site->DigestPretenuringFeedback()) trigger_deoptimization = true; - if (site->GetPretenureMode() == TENURED) { - tenure_decisions++; - } else { - dont_tenure_decisions++; - } - allocation_sites++; - if (use_scratchpad) { - i++; - } else { - list_element = site->weak_next(); - } - } - - if (trigger_deoptimization) { - isolate_->stack_guard()->DeoptMarkedAllocationSites(); - } - - FlushAllocationSitesScratchpad(); - - if (FLAG_trace_pretenuring_statistics && - (allocation_mementos_found > 0 || - tenure_decisions > 0 || - dont_tenure_decisions > 0)) { - PrintF("GC: (mode, #visited allocation sites, #active allocation sites, " - "#mementos, #tenure decisions, #donttenure decisions) " - "(%s, %d, %d, %d, %d, %d)\n", - use_scratchpad ? "use scratchpad" : "use list", - allocation_sites, - active_allocation_sites, - allocation_mementos_found, - tenure_decisions, - dont_tenure_decisions); - } - } -} - - -void Heap::DeoptMarkedAllocationSites() { - // TODO(hpayer): If iterating over the allocation sites list becomes a - // performance issue, use a cache heap data structure instead (similar to the - // allocation sites scratchpad). - Object* list_element = allocation_sites_list(); - while (list_element->IsAllocationSite()) { - AllocationSite* site = AllocationSite::cast(list_element); - if (site->deopt_dependent_code()) { - site->dependent_code()->MarkCodeForDeoptimization( - isolate_, - DependentCode::kAllocationSiteTenuringChangedGroup); - site->set_deopt_dependent_code(false); - } - list_element = site->weak_next(); - } - Deoptimizer::DeoptimizeMarkedCode(isolate_); -} - - -void Heap::GarbageCollectionEpilogue() { - store_buffer()->GCEpilogue(); - - // In release mode, we only zap the from space under heap verification. - if (Heap::ShouldZapGarbage()) { - ZapFromSpace(); - } - - // Process pretenuring feedback and update allocation sites. - ProcessPretenuringFeedback(); - -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - Verify(); - } -#endif - - AllowHeapAllocation for_the_rest_of_the_epilogue; - -#ifdef DEBUG - if (FLAG_print_global_handles) isolate_->global_handles()->Print(); - if (FLAG_print_handles) PrintHandles(); - if (FLAG_gc_verbose) Print(); - if (FLAG_code_stats) ReportCodeStatistics("After GC"); -#endif - if (FLAG_deopt_every_n_garbage_collections > 0) { - // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that - // the topmost optimized frame can be deoptimized safely, because it - // might not have a lazy bailout point right after its current PC. - if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) { - Deoptimizer::DeoptimizeAll(isolate()); - gcs_since_last_deopt_ = 0; - } - } - - UpdateMaximumCommitted(); - - isolate_->counters()->alive_after_last_gc()->Set( - static_cast<int>(SizeOfObjects())); - - isolate_->counters()->string_table_capacity()->Set( - string_table()->Capacity()); - isolate_->counters()->number_of_symbols()->Set( - string_table()->NumberOfElements()); - - if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) { - isolate_->counters()->codegen_fraction_crankshaft()->AddSample( - static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) / - (crankshaft_codegen_bytes_generated_ - + full_codegen_bytes_generated_))); - } - - if (CommittedMemory() > 0) { - isolate_->counters()->external_fragmentation_total()->AddSample( - static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory())); - - isolate_->counters()->heap_fraction_new_space()-> - AddSample(static_cast<int>( - (new_space()->CommittedMemory() * 100.0) / CommittedMemory())); - isolate_->counters()->heap_fraction_old_pointer_space()->AddSample( - static_cast<int>( - (old_pointer_space()->CommittedMemory() * 100.0) / - CommittedMemory())); - isolate_->counters()->heap_fraction_old_data_space()->AddSample( - static_cast<int>( - (old_data_space()->CommittedMemory() * 100.0) / - CommittedMemory())); - isolate_->counters()->heap_fraction_code_space()-> - AddSample(static_cast<int>( - (code_space()->CommittedMemory() * 100.0) / CommittedMemory())); - isolate_->counters()->heap_fraction_map_space()->AddSample( - static_cast<int>( - (map_space()->CommittedMemory() * 100.0) / CommittedMemory())); - isolate_->counters()->heap_fraction_cell_space()->AddSample( - static_cast<int>( - (cell_space()->CommittedMemory() * 100.0) / CommittedMemory())); - isolate_->counters()->heap_fraction_property_cell_space()-> - AddSample(static_cast<int>( - (property_cell_space()->CommittedMemory() * 100.0) / - CommittedMemory())); - isolate_->counters()->heap_fraction_lo_space()-> - AddSample(static_cast<int>( - (lo_space()->CommittedMemory() * 100.0) / CommittedMemory())); - - isolate_->counters()->heap_sample_total_committed()->AddSample( - static_cast<int>(CommittedMemory() / KB)); - isolate_->counters()->heap_sample_total_used()->AddSample( - static_cast<int>(SizeOfObjects() / KB)); - isolate_->counters()->heap_sample_map_space_committed()->AddSample( - static_cast<int>(map_space()->CommittedMemory() / KB)); - isolate_->counters()->heap_sample_cell_space_committed()->AddSample( - static_cast<int>(cell_space()->CommittedMemory() / KB)); - isolate_->counters()-> - heap_sample_property_cell_space_committed()-> - AddSample(static_cast<int>( - property_cell_space()->CommittedMemory() / KB)); - isolate_->counters()->heap_sample_code_space_committed()->AddSample( - static_cast<int>(code_space()->CommittedMemory() / KB)); - - isolate_->counters()->heap_sample_maximum_committed()->AddSample( - static_cast<int>(MaximumCommittedMemory() / KB)); - } - -#define UPDATE_COUNTERS_FOR_SPACE(space) \ - isolate_->counters()->space##_bytes_available()->Set( \ - static_cast<int>(space()->Available())); \ - isolate_->counters()->space##_bytes_committed()->Set( \ - static_cast<int>(space()->CommittedMemory())); \ - isolate_->counters()->space##_bytes_used()->Set( \ - static_cast<int>(space()->SizeOfObjects())); -#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \ - if (space()->CommittedMemory() > 0) { \ - isolate_->counters()->external_fragmentation_##space()->AddSample( \ - static_cast<int>(100 - \ - (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \ - } -#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \ - UPDATE_COUNTERS_FOR_SPACE(space) \ - UPDATE_FRAGMENTATION_FOR_SPACE(space) - - UPDATE_COUNTERS_FOR_SPACE(new_space) - UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space) - UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space) - UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space) - UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space) - UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space) - UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space) - UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space) -#undef UPDATE_COUNTERS_FOR_SPACE -#undef UPDATE_FRAGMENTATION_FOR_SPACE -#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE - -#if defined(DEBUG) - ReportStatisticsAfterGC(); -#endif // DEBUG -#ifdef ENABLE_DEBUGGER_SUPPORT - isolate_->debug()->AfterGarbageCollection(); -#endif // ENABLE_DEBUGGER_SUPPORT -} - - -void Heap::CollectAllGarbage(int flags, - const char* gc_reason, - const v8::GCCallbackFlags gc_callback_flags) { - // Since we are ignoring the return value, the exact choice of space does - // not matter, so long as we do not specify NEW_SPACE, which would not - // cause a full GC. - mark_compact_collector_.SetFlags(flags); - CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags); - mark_compact_collector_.SetFlags(kNoGCFlags); -} - - -void Heap::CollectAllAvailableGarbage(const char* gc_reason) { - // Since we are ignoring the return value, the exact choice of space does - // not matter, so long as we do not specify NEW_SPACE, which would not - // cause a full GC. - // Major GC would invoke weak handle callbacks on weakly reachable - // handles, but won't collect weakly reachable objects until next - // major GC. Therefore if we collect aggressively and weak handle callback - // has been invoked, we rerun major GC to release objects which become - // garbage. - // Note: as weak callbacks can execute arbitrary code, we cannot - // hope that eventually there will be no weak callbacks invocations. - // Therefore stop recollecting after several attempts. - if (isolate()->concurrent_recompilation_enabled()) { - // The optimizing compiler may be unnecessarily holding on to memory. - DisallowHeapAllocation no_recursive_gc; - isolate()->optimizing_compiler_thread()->Flush(); - } - mark_compact_collector()->SetFlags(kMakeHeapIterableMask | - kReduceMemoryFootprintMask); - isolate_->compilation_cache()->Clear(); - const int kMaxNumberOfAttempts = 7; - const int kMinNumberOfAttempts = 2; - for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { - if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) && - attempt + 1 >= kMinNumberOfAttempts) { - break; - } - } - mark_compact_collector()->SetFlags(kNoGCFlags); - new_space_.Shrink(); - UncommitFromSpace(); - incremental_marking()->UncommitMarkingDeque(); -} - - -void Heap::EnsureFillerObjectAtTop() { - // There may be an allocation memento behind every object in new space. - // If we evacuate a not full new space or if we are on the last page of - // the new space, then there may be uninitialized memory behind the top - // pointer of the new space page. We store a filler object there to - // identify the unused space. - Address from_top = new_space_.top(); - Address from_limit = new_space_.limit(); - if (from_top < from_limit) { - int remaining_in_page = static_cast<int>(from_limit - from_top); - CreateFillerObjectAt(from_top, remaining_in_page); - } -} - - -bool Heap::CollectGarbage(GarbageCollector collector, - const char* gc_reason, - const char* collector_reason, - const v8::GCCallbackFlags gc_callback_flags) { - // The VM is in the GC state until exiting this function. - VMState<GC> state(isolate_); - -#ifdef DEBUG - // Reset the allocation timeout to the GC interval, but make sure to - // allow at least a few allocations after a collection. The reason - // for this is that we have a lot of allocation sequences and we - // assume that a garbage collection will allow the subsequent - // allocation attempts to go through. - allocation_timeout_ = Max(6, FLAG_gc_interval); -#endif - - EnsureFillerObjectAtTop(); - - if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Scavenge during marking.\n"); - } - } - - if (collector == MARK_COMPACTOR && - !mark_compact_collector()->abort_incremental_marking() && - !incremental_marking()->IsStopped() && - !incremental_marking()->should_hurry() && - FLAG_incremental_marking_steps) { - // Make progress in incremental marking. - const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB; - incremental_marking()->Step(kStepSizeWhenDelayedByScavenge, - IncrementalMarking::NO_GC_VIA_STACK_GUARD); - if (!incremental_marking()->IsComplete()) { - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); - } - collector = SCAVENGER; - collector_reason = "incremental marking delaying mark-sweep"; - } - } - - bool next_gc_likely_to_collect_more = false; - - { GCTracer tracer(this, gc_reason, collector_reason); - ASSERT(AllowHeapAllocation::IsAllowed()); - DisallowHeapAllocation no_allocation_during_gc; - GarbageCollectionPrologue(); - // The GC count was incremented in the prologue. Tell the tracer about - // it. - tracer.set_gc_count(gc_count_); - - // Tell the tracer which collector we've selected. - tracer.set_collector(collector); - - { - HistogramTimerScope histogram_timer_scope( - (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() - : isolate_->counters()->gc_compactor()); - next_gc_likely_to_collect_more = - PerformGarbageCollection(collector, &tracer, gc_callback_flags); - } - - GarbageCollectionEpilogue(); - } - - // Start incremental marking for the next cycle. The heap snapshot - // generator needs incremental marking to stay off after it aborted. - if (!mark_compact_collector()->abort_incremental_marking() && - incremental_marking()->IsStopped() && - incremental_marking()->WorthActivating() && - NextGCIsLikelyToBeFull()) { - incremental_marking()->Start(); - } - - return next_gc_likely_to_collect_more; -} - - -int Heap::NotifyContextDisposed() { - if (isolate()->concurrent_recompilation_enabled()) { - // Flush the queued recompilation tasks. - isolate()->optimizing_compiler_thread()->Flush(); - } - flush_monomorphic_ics_ = true; - AgeInlineCaches(); - return ++contexts_disposed_; -} - - -void Heap::MoveElements(FixedArray* array, - int dst_index, - int src_index, - int len) { - if (len == 0) return; - - ASSERT(array->map() != fixed_cow_array_map()); - Object** dst_objects = array->data_start() + dst_index; - OS::MemMove(dst_objects, - array->data_start() + src_index, - len * kPointerSize); - if (!InNewSpace(array)) { - for (int i = 0; i < len; i++) { - // TODO(hpayer): check store buffer for entries - if (InNewSpace(dst_objects[i])) { - RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i)); - } - } - } - incremental_marking()->RecordWrites(array); -} - - -#ifdef VERIFY_HEAP -// Helper class for verifying the string table. -class StringTableVerifier : public ObjectVisitor { - public: - void VisitPointers(Object** start, Object** end) { - // Visit all HeapObject pointers in [start, end). - for (Object** p = start; p < end; p++) { - if ((*p)->IsHeapObject()) { - // Check that the string is actually internalized. - CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || - (*p)->IsInternalizedString()); - } - } - } -}; - - -static void VerifyStringTable(Heap* heap) { - StringTableVerifier verifier; - heap->string_table()->IterateElements(&verifier); -} -#endif // VERIFY_HEAP - - -static bool AbortIncrementalMarkingAndCollectGarbage( - Heap* heap, - AllocationSpace space, - const char* gc_reason = NULL) { - heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask); - bool result = heap->CollectGarbage(space, gc_reason); - heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags); - return result; -} - - -void Heap::ReserveSpace(int *sizes, Address *locations_out) { - bool gc_performed = true; - int counter = 0; - static const int kThreshold = 20; - while (gc_performed && counter++ < kThreshold) { - gc_performed = false; - ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1); - for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) { - if (sizes[space] != 0) { - MaybeObject* allocation; - if (space == NEW_SPACE) { - allocation = new_space()->AllocateRaw(sizes[space]); - } else { - allocation = paged_space(space)->AllocateRaw(sizes[space]); - } - FreeListNode* node; - if (!allocation->To<FreeListNode>(&node)) { - if (space == NEW_SPACE) { - Heap::CollectGarbage(NEW_SPACE, - "failed to reserve space in the new space"); - } else { - AbortIncrementalMarkingAndCollectGarbage( - this, - static_cast<AllocationSpace>(space), - "failed to reserve space in paged space"); - } - gc_performed = true; - break; - } else { - // Mark with a free list node, in case we have a GC before - // deserializing. - node->set_size(this, sizes[space]); - locations_out[space] = node->address(); - } - } - } - } - - if (gc_performed) { - // Failed to reserve the space after several attempts. - V8::FatalProcessOutOfMemory("Heap::ReserveSpace"); - } -} - - -void Heap::EnsureFromSpaceIsCommitted() { - if (new_space_.CommitFromSpaceIfNeeded()) return; - - // Committing memory to from space failed. - // Memory is exhausted and we will die. - V8::FatalProcessOutOfMemory("Committing semi space failed."); -} - - -void Heap::ClearJSFunctionResultCaches() { - if (isolate_->bootstrapper()->IsActive()) return; - - Object* context = native_contexts_list_; - while (!context->IsUndefined()) { - // Get the caches for this context. GC can happen when the context - // is not fully initialized, so the caches can be undefined. - Object* caches_or_undefined = - Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX); - if (!caches_or_undefined->IsUndefined()) { - FixedArray* caches = FixedArray::cast(caches_or_undefined); - // Clear the caches: - int length = caches->length(); - for (int i = 0; i < length; i++) { - JSFunctionResultCache::cast(caches->get(i))->Clear(); - } - } - // Get the next context: - context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); - } -} - - -void Heap::ClearNormalizedMapCaches() { - if (isolate_->bootstrapper()->IsActive() && - !incremental_marking()->IsMarking()) { - return; - } - - Object* context = native_contexts_list_; - while (!context->IsUndefined()) { - // GC can happen when the context is not fully initialized, - // so the cache can be undefined. - Object* cache = - Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX); - if (!cache->IsUndefined()) { - NormalizedMapCache::cast(cache)->Clear(); - } - context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); - } -} - - -void Heap::UpdateSurvivalRateTrend(int start_new_space_size) { - if (start_new_space_size == 0) return; - - double survival_rate = - (static_cast<double>(young_survivors_after_last_gc_) * 100) / - start_new_space_size; - - if (survival_rate > kYoungSurvivalRateHighThreshold) { - high_survival_rate_period_length_++; - } else { - high_survival_rate_period_length_ = 0; - } - - if (survival_rate < kYoungSurvivalRateLowThreshold) { - low_survival_rate_period_length_++; - } else { - low_survival_rate_period_length_ = 0; - } - - double survival_rate_diff = survival_rate_ - survival_rate; - - if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) { - set_survival_rate_trend(DECREASING); - } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) { - set_survival_rate_trend(INCREASING); - } else { - set_survival_rate_trend(STABLE); - } - - survival_rate_ = survival_rate; -} - -bool Heap::PerformGarbageCollection( - GarbageCollector collector, - GCTracer* tracer, - const v8::GCCallbackFlags gc_callback_flags) { - bool next_gc_likely_to_collect_more = false; - - if (collector != SCAVENGER) { - PROFILE(isolate_, CodeMovingGCEvent()); - } - -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - VerifyStringTable(this); - } -#endif - - GCType gc_type = - collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge; - - { GCCallbacksScope scope(this); - if (scope.CheckReenter()) { - AllowHeapAllocation allow_allocation; - GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); - VMState<EXTERNAL> state(isolate_); - HandleScope handle_scope(isolate_); - CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags); - } - } - - EnsureFromSpaceIsCommitted(); - - int start_new_space_size = Heap::new_space()->SizeAsInt(); - - if (IsHighSurvivalRate()) { - // We speed up the incremental marker if it is running so that it - // does not fall behind the rate of promotion, which would cause a - // constantly growing old space. - incremental_marking()->NotifyOfHighPromotionRate(); - } - - if (collector == MARK_COMPACTOR) { - // Perform mark-sweep with optional compaction. - MarkCompact(tracer); - sweep_generation_++; - - UpdateSurvivalRateTrend(start_new_space_size); - - size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects(); - - old_generation_allocation_limit_ = - OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_); - - old_gen_exhausted_ = false; - } else { - tracer_ = tracer; - Scavenge(); - tracer_ = NULL; - - UpdateSurvivalRateTrend(start_new_space_size); - } - - if (!new_space_high_promotion_mode_active_ && - new_space_.Capacity() == new_space_.MaximumCapacity() && - IsStableOrIncreasingSurvivalTrend() && - IsHighSurvivalRate()) { - // Stable high survival rates even though young generation is at - // maximum capacity indicates that most objects will be promoted. - // To decrease scavenger pauses and final mark-sweep pauses, we - // have to limit maximal capacity of the young generation. - SetNewSpaceHighPromotionModeActive(true); - if (FLAG_trace_gc) { - PrintPID("Limited new space size due to high promotion rate: %d MB\n", - new_space_.InitialCapacity() / MB); - } - // The high promotion mode is our indicator to turn on pretenuring. We have - // to deoptimize all optimized code in global pretenuring mode and all - // code which should be tenured in local pretenuring mode. - if (FLAG_pretenuring) { - if (!FLAG_allocation_site_pretenuring) { - isolate_->stack_guard()->FullDeopt(); - } - } - } else if (new_space_high_promotion_mode_active_ && - IsStableOrDecreasingSurvivalTrend() && - IsLowSurvivalRate()) { - // Decreasing low survival rates might indicate that the above high - // promotion mode is over and we should allow the young generation - // to grow again. - SetNewSpaceHighPromotionModeActive(false); - if (FLAG_trace_gc) { - PrintPID("Unlimited new space size due to low promotion rate: %d MB\n", - new_space_.MaximumCapacity() / MB); - } - // Trigger deoptimization here to turn off global pretenuring as soon as - // possible. - if (FLAG_pretenuring && !FLAG_allocation_site_pretenuring) { - isolate_->stack_guard()->FullDeopt(); - } - } - - if (new_space_high_promotion_mode_active_ && - new_space_.Capacity() > new_space_.InitialCapacity()) { - new_space_.Shrink(); - } - - isolate_->counters()->objs_since_last_young()->Set(0); - - // Callbacks that fire after this point might trigger nested GCs and - // restart incremental marking, the assertion can't be moved down. - ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped()); - - gc_post_processing_depth_++; - { AllowHeapAllocation allow_allocation; - GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); - next_gc_likely_to_collect_more = - isolate_->global_handles()->PostGarbageCollectionProcessing( - collector, tracer); - } - gc_post_processing_depth_--; - - isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); - - // Update relocatables. - Relocatable::PostGarbageCollectionProcessing(isolate_); - - if (collector == MARK_COMPACTOR) { - // Register the amount of external allocated memory. - amount_of_external_allocated_memory_at_last_global_gc_ = - amount_of_external_allocated_memory_; - } - - { GCCallbacksScope scope(this); - if (scope.CheckReenter()) { - AllowHeapAllocation allow_allocation; - GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); - VMState<EXTERNAL> state(isolate_); - HandleScope handle_scope(isolate_); - CallGCEpilogueCallbacks(gc_type, gc_callback_flags); - } - } - -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - VerifyStringTable(this); - } -#endif - - return next_gc_likely_to_collect_more; -} - - -void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { - for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { - if (gc_type & gc_prologue_callbacks_[i].gc_type) { - if (!gc_prologue_callbacks_[i].pass_isolate_) { - v8::GCPrologueCallback callback = - reinterpret_cast<v8::GCPrologueCallback>( - gc_prologue_callbacks_[i].callback); - callback(gc_type, flags); - } else { - v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); - gc_prologue_callbacks_[i].callback(isolate, gc_type, flags); - } - } - } -} - - -void Heap::CallGCEpilogueCallbacks(GCType gc_type, - GCCallbackFlags gc_callback_flags) { - for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { - if (gc_type & gc_epilogue_callbacks_[i].gc_type) { - if (!gc_epilogue_callbacks_[i].pass_isolate_) { - v8::GCPrologueCallback callback = - reinterpret_cast<v8::GCPrologueCallback>( - gc_epilogue_callbacks_[i].callback); - callback(gc_type, gc_callback_flags); - } else { - v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); - gc_epilogue_callbacks_[i].callback( - isolate, gc_type, gc_callback_flags); - } - } - } -} - - -void Heap::MarkCompact(GCTracer* tracer) { - gc_state_ = MARK_COMPACT; - LOG(isolate_, ResourceEvent("markcompact", "begin")); - - uint64_t size_of_objects_before_gc = SizeOfObjects(); - - mark_compact_collector_.Prepare(tracer); - - ms_count_++; - tracer->set_full_gc_count(ms_count_); - - MarkCompactPrologue(); - - mark_compact_collector_.CollectGarbage(); - - LOG(isolate_, ResourceEvent("markcompact", "end")); - - gc_state_ = NOT_IN_GC; - - isolate_->counters()->objs_since_last_full()->Set(0); - - flush_monomorphic_ics_ = false; - - if (FLAG_allocation_site_pretenuring) { - EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc); - } -} - - -void Heap::MarkCompactPrologue() { - // At any old GC clear the keyed lookup cache to enable collection of unused - // maps. - isolate_->keyed_lookup_cache()->Clear(); - isolate_->context_slot_cache()->Clear(); - isolate_->descriptor_lookup_cache()->Clear(); - RegExpResultsCache::Clear(string_split_cache()); - RegExpResultsCache::Clear(regexp_multiple_cache()); - - isolate_->compilation_cache()->MarkCompactPrologue(); - - CompletelyClearInstanceofCache(); - - FlushNumberStringCache(); - if (FLAG_cleanup_code_caches_at_gc) { - polymorphic_code_cache()->set_cache(undefined_value()); - } - - ClearNormalizedMapCaches(); -} - - -// Helper class for copying HeapObjects -class ScavengeVisitor: public ObjectVisitor { - public: - explicit ScavengeVisitor(Heap* heap) : heap_(heap) {} - - void VisitPointer(Object** p) { ScavengePointer(p); } - - void VisitPointers(Object** start, Object** end) { - // Copy all HeapObject pointers in [start, end) - for (Object** p = start; p < end; p++) ScavengePointer(p); - } - - private: - void ScavengePointer(Object** p) { - Object* object = *p; - if (!heap_->InNewSpace(object)) return; - Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), - reinterpret_cast<HeapObject*>(object)); - } - - Heap* heap_; -}; - - -#ifdef VERIFY_HEAP -// Visitor class to verify pointers in code or data space do not point into -// new space. -class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor { - public: - explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {} - void VisitPointers(Object** start, Object**end) { - for (Object** current = start; current < end; current++) { - if ((*current)->IsHeapObject()) { - CHECK(!heap_->InNewSpace(HeapObject::cast(*current))); - } - } - } - - private: - Heap* heap_; -}; - - -static void VerifyNonPointerSpacePointers(Heap* heap) { - // Verify that there are no pointers to new space in spaces where we - // do not expect them. - VerifyNonPointerSpacePointersVisitor v(heap); - HeapObjectIterator code_it(heap->code_space()); - for (HeapObject* object = code_it.Next(); - object != NULL; object = code_it.Next()) - object->Iterate(&v); - - // The old data space was normally swept conservatively so that the iterator - // doesn't work, so we normally skip the next bit. - if (!heap->old_data_space()->was_swept_conservatively()) { - HeapObjectIterator data_it(heap->old_data_space()); - for (HeapObject* object = data_it.Next(); - object != NULL; object = data_it.Next()) - object->Iterate(&v); - } -} -#endif // VERIFY_HEAP - - -void Heap::CheckNewSpaceExpansionCriteria() { - if (new_space_.Capacity() < new_space_.MaximumCapacity() && - survived_since_last_expansion_ > new_space_.Capacity() && - !new_space_high_promotion_mode_active_) { - // Grow the size of new space if there is room to grow, enough data - // has survived scavenge since the last expansion and we are not in - // high promotion mode. - new_space_.Grow(); - survived_since_last_expansion_ = 0; - } -} - - -static bool IsUnscavengedHeapObject(Heap* heap, Object** p) { - return heap->InNewSpace(*p) && - !HeapObject::cast(*p)->map_word().IsForwardingAddress(); -} - - -void Heap::ScavengeStoreBufferCallback( - Heap* heap, - MemoryChunk* page, - StoreBufferEvent event) { - heap->store_buffer_rebuilder_.Callback(page, event); -} - - -void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) { - if (event == kStoreBufferStartScanningPagesEvent) { - start_of_current_page_ = NULL; - current_page_ = NULL; - } else if (event == kStoreBufferScanningPageEvent) { - if (current_page_ != NULL) { - // If this page already overflowed the store buffer during this iteration. - if (current_page_->scan_on_scavenge()) { - // Then we should wipe out the entries that have been added for it. - store_buffer_->SetTop(start_of_current_page_); - } else if (store_buffer_->Top() - start_of_current_page_ >= - (store_buffer_->Limit() - store_buffer_->Top()) >> 2) { - // Did we find too many pointers in the previous page? The heuristic is - // that no page can take more then 1/5 the remaining slots in the store - // buffer. - current_page_->set_scan_on_scavenge(true); - store_buffer_->SetTop(start_of_current_page_); - } else { - // In this case the page we scanned took a reasonable number of slots in - // the store buffer. It has now been rehabilitated and is no longer - // marked scan_on_scavenge. - ASSERT(!current_page_->scan_on_scavenge()); - } - } - start_of_current_page_ = store_buffer_->Top(); - current_page_ = page; - } else if (event == kStoreBufferFullEvent) { - // The current page overflowed the store buffer again. Wipe out its entries - // in the store buffer and mark it scan-on-scavenge again. This may happen - // several times while scanning. - if (current_page_ == NULL) { - // Store Buffer overflowed while scanning promoted objects. These are not - // in any particular page, though they are likely to be clustered by the - // allocation routines. - store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2); - } else { - // Store Buffer overflowed while scanning a particular old space page for - // pointers to new space. - ASSERT(current_page_ == page); - ASSERT(page != NULL); - current_page_->set_scan_on_scavenge(true); - ASSERT(start_of_current_page_ != store_buffer_->Top()); - store_buffer_->SetTop(start_of_current_page_); - } - } else { - UNREACHABLE(); - } -} - - -void PromotionQueue::Initialize() { - // Assumes that a NewSpacePage exactly fits a number of promotion queue - // entries (where each is a pair of intptr_t). This allows us to simplify - // the test fpr when to switch pages. - ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) - == 0); - limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart()); - front_ = rear_ = - reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); - emergency_stack_ = NULL; - guard_ = false; -} - - -void PromotionQueue::RelocateQueueHead() { - ASSERT(emergency_stack_ == NULL); - - Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); - intptr_t* head_start = rear_; - intptr_t* head_end = - Min(front_, reinterpret_cast<intptr_t*>(p->area_end())); - - int entries_count = - static_cast<int>(head_end - head_start) / kEntrySizeInWords; - - emergency_stack_ = new List<Entry>(2 * entries_count); - - while (head_start != head_end) { - int size = static_cast<int>(*(head_start++)); - HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++)); - emergency_stack_->Add(Entry(obj, size)); - } - rear_ = head_end; -} - - -class ScavengeWeakObjectRetainer : public WeakObjectRetainer { - public: - explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { } - - virtual Object* RetainAs(Object* object) { - if (!heap_->InFromSpace(object)) { - return object; - } - - MapWord map_word = HeapObject::cast(object)->map_word(); - if (map_word.IsForwardingAddress()) { - return map_word.ToForwardingAddress(); - } - return NULL; - } - - private: - Heap* heap_; -}; - - -void Heap::Scavenge() { - RelocationLock relocation_lock(this); - -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this); -#endif - - gc_state_ = SCAVENGE; - - // Implements Cheney's copying algorithm - LOG(isolate_, ResourceEvent("scavenge", "begin")); - - // Clear descriptor cache. - isolate_->descriptor_lookup_cache()->Clear(); - - // Used for updating survived_since_last_expansion_ at function end. - intptr_t survived_watermark = PromotedSpaceSizeOfObjects(); - - CheckNewSpaceExpansionCriteria(); - - SelectScavengingVisitorsTable(); - - incremental_marking()->PrepareForScavenge(); - - paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size()); - paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size()); - - // Flip the semispaces. After flipping, to space is empty, from space has - // live objects. - new_space_.Flip(); - new_space_.ResetAllocationInfo(); - - // We need to sweep newly copied objects which can be either in the - // to space or promoted to the old generation. For to-space - // objects, we treat the bottom of the to space as a queue. Newly - // copied and unswept objects lie between a 'front' mark and the - // allocation pointer. - // - // Promoted objects can go into various old-generation spaces, and - // can be allocated internally in the spaces (from the free list). - // We treat the top of the to space as a queue of addresses of - // promoted objects. The addresses of newly promoted and unswept - // objects lie between a 'front' mark and a 'rear' mark that is - // updated as a side effect of promoting an object. - // - // There is guaranteed to be enough room at the top of the to space - // for the addresses of promoted objects: every object promoted - // frees up its size in bytes from the top of the new space, and - // objects are at least one pointer in size. - Address new_space_front = new_space_.ToSpaceStart(); - promotion_queue_.Initialize(); - -#ifdef DEBUG - store_buffer()->Clean(); -#endif - - ScavengeVisitor scavenge_visitor(this); - // Copy roots. - IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); - - // Copy objects reachable from the old generation. - { - StoreBufferRebuildScope scope(this, - store_buffer(), - &ScavengeStoreBufferCallback); - store_buffer()->IteratePointersToNewSpace(&ScavengeObject); - } - - // Copy objects reachable from simple cells by scavenging cell values - // directly. - HeapObjectIterator cell_iterator(cell_space_); - for (HeapObject* heap_object = cell_iterator.Next(); - heap_object != NULL; - heap_object = cell_iterator.Next()) { - if (heap_object->IsCell()) { - Cell* cell = Cell::cast(heap_object); - Address value_address = cell->ValueAddress(); - scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); - } - } - - // Copy objects reachable from global property cells by scavenging global - // property cell values directly. - HeapObjectIterator js_global_property_cell_iterator(property_cell_space_); - for (HeapObject* heap_object = js_global_property_cell_iterator.Next(); - heap_object != NULL; - heap_object = js_global_property_cell_iterator.Next()) { - if (heap_object->IsPropertyCell()) { - PropertyCell* cell = PropertyCell::cast(heap_object); - Address value_address = cell->ValueAddress(); - scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); - Address type_address = cell->TypeAddress(); - scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address)); - } - } - - // Copy objects reachable from the code flushing candidates list. - MarkCompactCollector* collector = mark_compact_collector(); - if (collector->is_code_flushing_enabled()) { - collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor); - } - - // Scavenge object reachable from the native contexts list directly. - scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_)); - - new_space_front = DoScavenge(&scavenge_visitor, new_space_front); - - while (isolate()->global_handles()->IterateObjectGroups( - &scavenge_visitor, &IsUnscavengedHeapObject)) { - new_space_front = DoScavenge(&scavenge_visitor, new_space_front); - } - isolate()->global_handles()->RemoveObjectGroups(); - isolate()->global_handles()->RemoveImplicitRefGroups(); - - isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles( - &IsUnscavengedHeapObject); - isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots( - &scavenge_visitor); - new_space_front = DoScavenge(&scavenge_visitor, new_space_front); - - UpdateNewSpaceReferencesInExternalStringTable( - &UpdateNewSpaceReferenceInExternalStringTableEntry); - - promotion_queue_.Destroy(); - - incremental_marking()->UpdateMarkingDequeAfterScavenge(); - - ScavengeWeakObjectRetainer weak_object_retainer(this); - ProcessWeakReferences(&weak_object_retainer); - - ASSERT(new_space_front == new_space_.top()); - - // Set age mark. - new_space_.set_age_mark(new_space_.top()); - - new_space_.LowerInlineAllocationLimit( - new_space_.inline_allocation_limit_step()); - - // Update how much has survived scavenge. - IncrementYoungSurvivorsCounter(static_cast<int>( - (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size())); - - LOG(isolate_, ResourceEvent("scavenge", "end")); - - gc_state_ = NOT_IN_GC; - - scavenges_since_last_idle_round_++; -} - - -String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, - Object** p) { - MapWord first_word = HeapObject::cast(*p)->map_word(); - - if (!first_word.IsForwardingAddress()) { - // Unreachable external string can be finalized. - heap->FinalizeExternalString(String::cast(*p)); - return NULL; - } - - // String is still reachable. - return String::cast(first_word.ToForwardingAddress()); -} - - -void Heap::UpdateNewSpaceReferencesInExternalStringTable( - ExternalStringTableUpdaterCallback updater_func) { -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - external_string_table_.Verify(); - } -#endif - - if (external_string_table_.new_space_strings_.is_empty()) return; - - Object** start = &external_string_table_.new_space_strings_[0]; - Object** end = start + external_string_table_.new_space_strings_.length(); - Object** last = start; - - for (Object** p = start; p < end; ++p) { - ASSERT(InFromSpace(*p)); - String* target = updater_func(this, p); - - if (target == NULL) continue; - - ASSERT(target->IsExternalString()); - - if (InNewSpace(target)) { - // String is still in new space. Update the table entry. - *last = target; - ++last; - } else { - // String got promoted. Move it to the old string list. - external_string_table_.AddOldString(target); - } - } - - ASSERT(last <= end); - external_string_table_.ShrinkNewStrings(static_cast<int>(last - start)); -} - - -void Heap::UpdateReferencesInExternalStringTable( - ExternalStringTableUpdaterCallback updater_func) { - - // Update old space string references. - if (external_string_table_.old_space_strings_.length() > 0) { - Object** start = &external_string_table_.old_space_strings_[0]; - Object** end = start + external_string_table_.old_space_strings_.length(); - for (Object** p = start; p < end; ++p) *p = updater_func(this, p); - } - - UpdateNewSpaceReferencesInExternalStringTable(updater_func); -} - - -template <class T> -struct WeakListVisitor; - - -template <class T> -static Object* VisitWeakList(Heap* heap, - Object* list, - WeakObjectRetainer* retainer, - bool record_slots) { - Object* undefined = heap->undefined_value(); - Object* head = undefined; - T* tail = NULL; - MarkCompactCollector* collector = heap->mark_compact_collector(); - while (list != undefined) { - // Check whether to keep the candidate in the list. - T* candidate = reinterpret_cast<T*>(list); - Object* retained = retainer->RetainAs(list); - if (retained != NULL) { - if (head == undefined) { - // First element in the list. - head = retained; - } else { - // Subsequent elements in the list. - ASSERT(tail != NULL); - WeakListVisitor<T>::SetWeakNext(tail, retained); - if (record_slots) { - Object** next_slot = - HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset()); - collector->RecordSlot(next_slot, next_slot, retained); - } - } - // Retained object is new tail. - ASSERT(!retained->IsUndefined()); - candidate = reinterpret_cast<T*>(retained); - tail = candidate; - - - // tail is a live object, visit it. - WeakListVisitor<T>::VisitLiveObject( - heap, tail, retainer, record_slots); - } else { - WeakListVisitor<T>::VisitPhantomObject(heap, candidate); - } - - // Move to next element in the list. - list = WeakListVisitor<T>::WeakNext(candidate); - } - - // Terminate the list if there is one or more elements. - if (tail != NULL) { - WeakListVisitor<T>::SetWeakNext(tail, undefined); - } - return head; -} - - -template <class T> -static void ClearWeakList(Heap* heap, - Object* list) { - Object* undefined = heap->undefined_value(); - while (list != undefined) { - T* candidate = reinterpret_cast<T*>(list); - list = WeakListVisitor<T>::WeakNext(candidate); - WeakListVisitor<T>::SetWeakNext(candidate, undefined); - } -} - - -template<> -struct WeakListVisitor<JSFunction> { - static void SetWeakNext(JSFunction* function, Object* next) { - function->set_next_function_link(next); - } - - static Object* WeakNext(JSFunction* function) { - return function->next_function_link(); - } - - static int WeakNextOffset() { - return JSFunction::kNextFunctionLinkOffset; - } - - static void VisitLiveObject(Heap*, JSFunction*, - WeakObjectRetainer*, bool) { - } - - static void VisitPhantomObject(Heap*, JSFunction*) { - } -}; - - -template<> -struct WeakListVisitor<Code> { - static void SetWeakNext(Code* code, Object* next) { - code->set_next_code_link(next); - } - - static Object* WeakNext(Code* code) { - return code->next_code_link(); - } - - static int WeakNextOffset() { - return Code::kNextCodeLinkOffset; - } - - static void VisitLiveObject(Heap*, Code*, - WeakObjectRetainer*, bool) { - } - - static void VisitPhantomObject(Heap*, Code*) { - } -}; - - -template<> -struct WeakListVisitor<Context> { - static void SetWeakNext(Context* context, Object* next) { - context->set(Context::NEXT_CONTEXT_LINK, - next, - UPDATE_WRITE_BARRIER); - } - - static Object* WeakNext(Context* context) { - return context->get(Context::NEXT_CONTEXT_LINK); - } - - static void VisitLiveObject(Heap* heap, - Context* context, - WeakObjectRetainer* retainer, - bool record_slots) { - // Process the three weak lists linked off the context. - DoWeakList<JSFunction>(heap, context, retainer, record_slots, - Context::OPTIMIZED_FUNCTIONS_LIST); - DoWeakList<Code>(heap, context, retainer, record_slots, - Context::OPTIMIZED_CODE_LIST); - DoWeakList<Code>(heap, context, retainer, record_slots, - Context::DEOPTIMIZED_CODE_LIST); - } - - template<class T> - static void DoWeakList(Heap* heap, - Context* context, - WeakObjectRetainer* retainer, - bool record_slots, - int index) { - // Visit the weak list, removing dead intermediate elements. - Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer, - record_slots); - - // Update the list head. - context->set(index, list_head, UPDATE_WRITE_BARRIER); - - if (record_slots) { - // Record the updated slot if necessary. - Object** head_slot = HeapObject::RawField( - context, FixedArray::SizeFor(index)); - heap->mark_compact_collector()->RecordSlot( - head_slot, head_slot, list_head); - } - } - - static void VisitPhantomObject(Heap* heap, Context* context) { - ClearWeakList<JSFunction>(heap, - context->get(Context::OPTIMIZED_FUNCTIONS_LIST)); - ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST)); - ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST)); - } - - static int WeakNextOffset() { - return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK); - } -}; - - -void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { - // We don't record weak slots during marking or scavenges. - // Instead we do it once when we complete mark-compact cycle. - // Note that write barrier has no effect if we are already in the middle of - // compacting mark-sweep cycle and we have to record slots manually. - bool record_slots = - gc_state() == MARK_COMPACT && - mark_compact_collector()->is_compacting(); - ProcessArrayBuffers(retainer, record_slots); - ProcessNativeContexts(retainer, record_slots); - // TODO(mvstanton): AllocationSites only need to be processed during - // MARK_COMPACT, as they live in old space. Verify and address. - ProcessAllocationSites(retainer, record_slots); -} - -void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer, - bool record_slots) { - Object* head = - VisitWeakList<Context>( - this, native_contexts_list(), retainer, record_slots); - // Update the head of the list of contexts. - native_contexts_list_ = head; -} - - -template<> -struct WeakListVisitor<JSArrayBufferView> { - static void SetWeakNext(JSArrayBufferView* obj, Object* next) { - obj->set_weak_next(next); - } - - static Object* WeakNext(JSArrayBufferView* obj) { - return obj->weak_next(); - } - - static void VisitLiveObject(Heap*, - JSArrayBufferView* obj, - WeakObjectRetainer* retainer, - bool record_slots) {} - - static void VisitPhantomObject(Heap*, JSArrayBufferView*) {} - - static int WeakNextOffset() { - return JSArrayBufferView::kWeakNextOffset; - } -}; - - -template<> -struct WeakListVisitor<JSArrayBuffer> { - static void SetWeakNext(JSArrayBuffer* obj, Object* next) { - obj->set_weak_next(next); - } - - static Object* WeakNext(JSArrayBuffer* obj) { - return obj->weak_next(); - } - - static void VisitLiveObject(Heap* heap, - JSArrayBuffer* array_buffer, - WeakObjectRetainer* retainer, - bool record_slots) { - Object* typed_array_obj = - VisitWeakList<JSArrayBufferView>( - heap, - array_buffer->weak_first_view(), - retainer, record_slots); - array_buffer->set_weak_first_view(typed_array_obj); - if (typed_array_obj != heap->undefined_value() && record_slots) { - Object** slot = HeapObject::RawField( - array_buffer, JSArrayBuffer::kWeakFirstViewOffset); - heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj); - } - } - - static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) { - Runtime::FreeArrayBuffer(heap->isolate(), phantom); - } - - static int WeakNextOffset() { - return JSArrayBuffer::kWeakNextOffset; - } -}; - - -void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer, - bool record_slots) { - Object* array_buffer_obj = - VisitWeakList<JSArrayBuffer>(this, - array_buffers_list(), - retainer, record_slots); - set_array_buffers_list(array_buffer_obj); -} - - -void Heap::TearDownArrayBuffers() { - Object* undefined = undefined_value(); - for (Object* o = array_buffers_list(); o != undefined;) { - JSArrayBuffer* buffer = JSArrayBuffer::cast(o); - Runtime::FreeArrayBuffer(isolate(), buffer); - o = buffer->weak_next(); - } - array_buffers_list_ = undefined; -} - - -template<> -struct WeakListVisitor<AllocationSite> { - static void SetWeakNext(AllocationSite* obj, Object* next) { - obj->set_weak_next(next); - } - - static Object* WeakNext(AllocationSite* obj) { - return obj->weak_next(); - } - - static void VisitLiveObject(Heap* heap, - AllocationSite* site, - WeakObjectRetainer* retainer, - bool record_slots) {} - - static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {} - - static int WeakNextOffset() { - return AllocationSite::kWeakNextOffset; - } -}; - - -void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer, - bool record_slots) { - Object* allocation_site_obj = - VisitWeakList<AllocationSite>(this, - allocation_sites_list(), - retainer, record_slots); - set_allocation_sites_list(allocation_site_obj); -} - - -void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) { - DisallowHeapAllocation no_allocation_scope; - Object* cur = allocation_sites_list(); - bool marked = false; - while (cur->IsAllocationSite()) { - AllocationSite* casted = AllocationSite::cast(cur); - if (casted->GetPretenureMode() == flag) { - casted->ResetPretenureDecision(); - casted->set_deopt_dependent_code(true); - marked = true; - } - cur = casted->weak_next(); - } - if (marked) isolate_->stack_guard()->DeoptMarkedAllocationSites(); -} - - -void Heap::EvaluateOldSpaceLocalPretenuring( - uint64_t size_of_objects_before_gc) { - uint64_t size_of_objects_after_gc = SizeOfObjects(); - double old_generation_survival_rate = - (static_cast<double>(size_of_objects_after_gc) * 100) / - static_cast<double>(size_of_objects_before_gc); - - if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) { - // Too many objects died in the old generation, pretenuring of wrong - // allocation sites may be the cause for that. We have to deopt all - // dependent code registered in the allocation sites to re-evaluate - // our pretenuring decisions. - ResetAllAllocationSitesDependentCode(TENURED); - if (FLAG_trace_pretenuring) { - PrintF("Deopt all allocation sites dependent code due to low survival " - "rate in the old generation %f\n", old_generation_survival_rate); - } - } -} - - -void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { - DisallowHeapAllocation no_allocation; - // All external strings are listed in the external string table. - - class ExternalStringTableVisitorAdapter : public ObjectVisitor { - public: - explicit ExternalStringTableVisitorAdapter( - v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {} - virtual void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) { - ASSERT((*p)->IsExternalString()); - visitor_->VisitExternalString(Utils::ToLocal( - Handle<String>(String::cast(*p)))); - } - } - private: - v8::ExternalResourceVisitor* visitor_; - } external_string_table_visitor(visitor); - - external_string_table_.Iterate(&external_string_table_visitor); -} - - -class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> { - public: - static inline void VisitPointer(Heap* heap, Object** p) { - Object* object = *p; - if (!heap->InNewSpace(object)) return; - Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), - reinterpret_cast<HeapObject*>(object)); - } -}; - - -Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, - Address new_space_front) { - do { - SemiSpace::AssertValidRange(new_space_front, new_space_.top()); - // The addresses new_space_front and new_space_.top() define a - // queue of unprocessed copied objects. Process them until the - // queue is empty. - while (new_space_front != new_space_.top()) { - if (!NewSpacePage::IsAtEnd(new_space_front)) { - HeapObject* object = HeapObject::FromAddress(new_space_front); - new_space_front += - NewSpaceScavenger::IterateBody(object->map(), object); - } else { - new_space_front = - NewSpacePage::FromLimit(new_space_front)->next_page()->area_start(); - } - } - - // Promote and process all the to-be-promoted objects. - { - StoreBufferRebuildScope scope(this, - store_buffer(), - &ScavengeStoreBufferCallback); - while (!promotion_queue()->is_empty()) { - HeapObject* target; - int size; - promotion_queue()->remove(&target, &size); - - // Promoted object might be already partially visited - // during old space pointer iteration. Thus we search specificly - // for pointers to from semispace instead of looking for pointers - // to new space. - ASSERT(!target->IsMap()); - IterateAndMarkPointersToFromSpace(target->address(), - target->address() + size, - &ScavengeObject); - } - } - - // Take another spin if there are now unswept objects in new space - // (there are currently no more unswept promoted objects). - } while (new_space_front != new_space_.top()); - - return new_space_front; -} - - -STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0); -STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0); - - -INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, - HeapObject* object, - int size)); - -static HeapObject* EnsureDoubleAligned(Heap* heap, - HeapObject* object, - int size) { - if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) { - heap->CreateFillerObjectAt(object->address(), kPointerSize); - return HeapObject::FromAddress(object->address() + kPointerSize); - } else { - heap->CreateFillerObjectAt(object->address() + size - kPointerSize, - kPointerSize); - return object; - } -} - - -enum LoggingAndProfiling { - LOGGING_AND_PROFILING_ENABLED, - LOGGING_AND_PROFILING_DISABLED -}; - - -enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS }; - - -template<MarksHandling marks_handling, - LoggingAndProfiling logging_and_profiling_mode> -class ScavengingVisitor : public StaticVisitorBase { - public: - static void Initialize() { - table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString); - table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString); - table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate); - table_.Register(kVisitByteArray, &EvacuateByteArray); - table_.Register(kVisitFixedArray, &EvacuateFixedArray); - table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray); - table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray); - table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array); - - table_.Register(kVisitNativeContext, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - template VisitSpecialized<Context::kSize>); - - table_.Register(kVisitConsString, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - template VisitSpecialized<ConsString::kSize>); - - table_.Register(kVisitSlicedString, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - template VisitSpecialized<SlicedString::kSize>); - - table_.Register(kVisitSymbol, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - template VisitSpecialized<Symbol::kSize>); - - table_.Register(kVisitSharedFunctionInfo, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - template VisitSpecialized<SharedFunctionInfo::kSize>); - - table_.Register(kVisitJSWeakMap, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - Visit); - - table_.Register(kVisitJSWeakSet, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - Visit); - - table_.Register(kVisitJSArrayBuffer, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - Visit); - - table_.Register(kVisitJSTypedArray, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - Visit); - - table_.Register(kVisitJSDataView, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - Visit); - - table_.Register(kVisitJSRegExp, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - Visit); - - if (marks_handling == IGNORE_MARKS) { - table_.Register(kVisitJSFunction, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - template VisitSpecialized<JSFunction::kSize>); - } else { - table_.Register(kVisitJSFunction, &EvacuateJSFunction); - } - - table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, - kVisitDataObject, - kVisitDataObjectGeneric>(); - - table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, - kVisitJSObject, - kVisitJSObjectGeneric>(); - - table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, - kVisitStruct, - kVisitStructGeneric>(); - } - - static VisitorDispatchTable<ScavengingCallback>* GetTable() { - return &table_; - } - - private: - enum ObjectContents { DATA_OBJECT, POINTER_OBJECT }; - - static void RecordCopiedObject(Heap* heap, HeapObject* obj) { - bool should_record = false; -#ifdef DEBUG - should_record = FLAG_heap_stats; -#endif - should_record = should_record || FLAG_log_gc; - if (should_record) { - if (heap->new_space()->Contains(obj)) { - heap->new_space()->RecordAllocation(obj); - } else { - heap->new_space()->RecordPromotion(obj); - } - } - } - - // Helper function used by CopyObject to copy a source object to an - // allocated target object and update the forwarding pointer in the source - // object. Returns the target object. - INLINE(static void MigrateObject(Heap* heap, - HeapObject* source, - HeapObject* target, - int size)) { - // Copy the content of source to target. - heap->CopyBlock(target->address(), source->address(), size); - - // Set the forwarding address. - source->set_map_word(MapWord::FromForwardingAddress(target)); - - if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) { - // Update NewSpace stats if necessary. - RecordCopiedObject(heap, target); - Isolate* isolate = heap->isolate(); - HeapProfiler* heap_profiler = isolate->heap_profiler(); - if (heap_profiler->is_tracking_object_moves()) { - heap_profiler->ObjectMoveEvent(source->address(), target->address(), - size); - } - if (isolate->logger()->is_logging_code_events() || - isolate->cpu_profiler()->is_profiling()) { - if (target->IsSharedFunctionInfo()) { - PROFILE(isolate, SharedFunctionInfoMoveEvent( - source->address(), target->address())); - } - } - } - - if (marks_handling == TRANSFER_MARKS) { - if (Marking::TransferColor(source, target)) { - MemoryChunk::IncrementLiveBytesFromGC(target->address(), size); - } - } - } - - - template<ObjectContents object_contents, int alignment> - static inline void EvacuateObject(Map* map, - HeapObject** slot, - HeapObject* object, - int object_size) { - SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); - SLOW_ASSERT(object->Size() == object_size); - - int allocation_size = object_size; - if (alignment != kObjectAlignment) { - ASSERT(alignment == kDoubleAlignment); - allocation_size += kPointerSize; - } - - Heap* heap = map->GetHeap(); - if (heap->ShouldBePromoted(object->address(), object_size)) { - MaybeObject* maybe_result; - - if (object_contents == DATA_OBJECT) { - ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); - maybe_result = heap->old_data_space()->AllocateRaw(allocation_size); - } else { - ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); - maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size); - } - - Object* result = NULL; // Initialization to please compiler. - if (maybe_result->ToObject(&result)) { - HeapObject* target = HeapObject::cast(result); - - if (alignment != kObjectAlignment) { - target = EnsureDoubleAligned(heap, target, allocation_size); - } - - // Order is important: slot might be inside of the target if target - // was allocated over a dead object and slot comes from the store - // buffer. - *slot = target; - MigrateObject(heap, object, target, object_size); - - if (object_contents == POINTER_OBJECT) { - if (map->instance_type() == JS_FUNCTION_TYPE) { - heap->promotion_queue()->insert( - target, JSFunction::kNonWeakFieldsEndOffset); - } else { - heap->promotion_queue()->insert(target, object_size); - } - } - - heap->tracer()->increment_promoted_objects_size(object_size); - return; - } - } - ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); - MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size); - heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); - Object* result = allocation->ToObjectUnchecked(); - HeapObject* target = HeapObject::cast(result); - - if (alignment != kObjectAlignment) { - target = EnsureDoubleAligned(heap, target, allocation_size); - } - - // Order is important: slot might be inside of the target if target - // was allocated over a dead object and slot comes from the store - // buffer. - *slot = target; - MigrateObject(heap, object, target, object_size); - return; - } - - - static inline void EvacuateJSFunction(Map* map, - HeapObject** slot, - HeapObject* object) { - ObjectEvacuationStrategy<POINTER_OBJECT>:: - template VisitSpecialized<JSFunction::kSize>(map, slot, object); - - HeapObject* target = *slot; - MarkBit mark_bit = Marking::MarkBitFrom(target); - if (Marking::IsBlack(mark_bit)) { - // This object is black and it might not be rescanned by marker. - // We should explicitly record code entry slot for compaction because - // promotion queue processing (IterateAndMarkPointersToFromSpace) will - // miss it as it is not HeapObject-tagged. - Address code_entry_slot = - target->address() + JSFunction::kCodeEntryOffset; - Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot)); - map->GetHeap()->mark_compact_collector()-> - RecordCodeEntrySlot(code_entry_slot, code); - } - } - - - static inline void EvacuateFixedArray(Map* map, - HeapObject** slot, - HeapObject* object) { - int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); - EvacuateObject<POINTER_OBJECT, kObjectAlignment>( - map, slot, object, object_size); - } - - - static inline void EvacuateFixedDoubleArray(Map* map, - HeapObject** slot, - HeapObject* object) { - int length = reinterpret_cast<FixedDoubleArray*>(object)->length(); - int object_size = FixedDoubleArray::SizeFor(length); - EvacuateObject<DATA_OBJECT, kDoubleAlignment>( - map, slot, object, object_size); - } - - - static inline void EvacuateFixedTypedArray(Map* map, - HeapObject** slot, - HeapObject* object) { - int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size(); - EvacuateObject<DATA_OBJECT, kObjectAlignment>( - map, slot, object, object_size); - } - - - static inline void EvacuateFixedFloat64Array(Map* map, - HeapObject** slot, - HeapObject* object) { - int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size(); - EvacuateObject<DATA_OBJECT, kDoubleAlignment>( - map, slot, object, object_size); - } - - - static inline void EvacuateByteArray(Map* map, - HeapObject** slot, - HeapObject* object) { - int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize(); - EvacuateObject<DATA_OBJECT, kObjectAlignment>( - map, slot, object, object_size); - } - - - static inline void EvacuateSeqOneByteString(Map* map, - HeapObject** slot, - HeapObject* object) { - int object_size = SeqOneByteString::cast(object)-> - SeqOneByteStringSize(map->instance_type()); - EvacuateObject<DATA_OBJECT, kObjectAlignment>( - map, slot, object, object_size); - } - - - static inline void EvacuateSeqTwoByteString(Map* map, - HeapObject** slot, - HeapObject* object) { - int object_size = SeqTwoByteString::cast(object)-> - SeqTwoByteStringSize(map->instance_type()); - EvacuateObject<DATA_OBJECT, kObjectAlignment>( - map, slot, object, object_size); - } - - - static inline bool IsShortcutCandidate(int type) { - return ((type & kShortcutTypeMask) == kShortcutTypeTag); - } - - static inline void EvacuateShortcutCandidate(Map* map, - HeapObject** slot, - HeapObject* object) { - ASSERT(IsShortcutCandidate(map->instance_type())); - - Heap* heap = map->GetHeap(); - - if (marks_handling == IGNORE_MARKS && - ConsString::cast(object)->unchecked_second() == - heap->empty_string()) { - HeapObject* first = - HeapObject::cast(ConsString::cast(object)->unchecked_first()); - - *slot = first; - - if (!heap->InNewSpace(first)) { - object->set_map_word(MapWord::FromForwardingAddress(first)); - return; - } - - MapWord first_word = first->map_word(); - if (first_word.IsForwardingAddress()) { - HeapObject* target = first_word.ToForwardingAddress(); - - *slot = target; - object->set_map_word(MapWord::FromForwardingAddress(target)); - return; - } - - heap->DoScavengeObject(first->map(), slot, first); - object->set_map_word(MapWord::FromForwardingAddress(*slot)); - return; - } - - int object_size = ConsString::kSize; - EvacuateObject<POINTER_OBJECT, kObjectAlignment>( - map, slot, object, object_size); - } - - template<ObjectContents object_contents> - class ObjectEvacuationStrategy { - public: - template<int object_size> - static inline void VisitSpecialized(Map* map, - HeapObject** slot, - HeapObject* object) { - EvacuateObject<object_contents, kObjectAlignment>( - map, slot, object, object_size); - } - - static inline void Visit(Map* map, - HeapObject** slot, - HeapObject* object) { - int object_size = map->instance_size(); - EvacuateObject<object_contents, kObjectAlignment>( - map, slot, object, object_size); - } - }; - - static VisitorDispatchTable<ScavengingCallback> table_; -}; - - -template<MarksHandling marks_handling, - LoggingAndProfiling logging_and_profiling_mode> -VisitorDispatchTable<ScavengingCallback> - ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_; - - -static void InitializeScavengingVisitorsTables() { - ScavengingVisitor<TRANSFER_MARKS, - LOGGING_AND_PROFILING_DISABLED>::Initialize(); - ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize(); - ScavengingVisitor<TRANSFER_MARKS, - LOGGING_AND_PROFILING_ENABLED>::Initialize(); - ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize(); -} - - -void Heap::SelectScavengingVisitorsTable() { - bool logging_and_profiling = - isolate()->logger()->is_logging() || - isolate()->cpu_profiler()->is_profiling() || - (isolate()->heap_profiler() != NULL && - isolate()->heap_profiler()->is_tracking_object_moves()); - - if (!incremental_marking()->IsMarking()) { - if (!logging_and_profiling) { - scavenging_visitors_table_.CopyFrom( - ScavengingVisitor<IGNORE_MARKS, - LOGGING_AND_PROFILING_DISABLED>::GetTable()); - } else { - scavenging_visitors_table_.CopyFrom( - ScavengingVisitor<IGNORE_MARKS, - LOGGING_AND_PROFILING_ENABLED>::GetTable()); - } - } else { - if (!logging_and_profiling) { - scavenging_visitors_table_.CopyFrom( - ScavengingVisitor<TRANSFER_MARKS, - LOGGING_AND_PROFILING_DISABLED>::GetTable()); - } else { - scavenging_visitors_table_.CopyFrom( - ScavengingVisitor<TRANSFER_MARKS, - LOGGING_AND_PROFILING_ENABLED>::GetTable()); - } - - if (incremental_marking()->IsCompacting()) { - // When compacting forbid short-circuiting of cons-strings. - // Scavenging code relies on the fact that new space object - // can't be evacuated into evacuation candidate but - // short-circuiting violates this assumption. - scavenging_visitors_table_.Register( - StaticVisitorBase::kVisitShortcutCandidate, - scavenging_visitors_table_.GetVisitorById( - StaticVisitorBase::kVisitConsString)); - } - } -} - - -void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { - SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object)); - MapWord first_word = object->map_word(); - SLOW_ASSERT(!first_word.IsForwardingAddress()); - Map* map = first_word.ToMap(); - map->GetHeap()->DoScavengeObject(map, p, object); -} - - -MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type, - int instance_size) { - Object* result; - MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - - // Map::cast cannot be used due to uninitialized map field. - reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); - reinterpret_cast<Map*>(result)->set_instance_type(instance_type); - reinterpret_cast<Map*>(result)->set_instance_size(instance_size); - reinterpret_cast<Map*>(result)->set_visitor_id( - StaticVisitorBase::GetVisitorId(instance_type, instance_size)); - reinterpret_cast<Map*>(result)->set_inobject_properties(0); - reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0); - reinterpret_cast<Map*>(result)->set_unused_property_fields(0); - reinterpret_cast<Map*>(result)->set_bit_field(0); - reinterpret_cast<Map*>(result)->set_bit_field2(0); - int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) | - Map::OwnsDescriptors::encode(true); - reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3); - return result; -} - - -MaybeObject* Heap::AllocateMap(InstanceType instance_type, - int instance_size, - ElementsKind elements_kind) { - Object* result; - MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); - if (!maybe_result->To(&result)) return maybe_result; - - Map* map = reinterpret_cast<Map*>(result); - map->set_map_no_write_barrier(meta_map()); - map->set_instance_type(instance_type); - map->set_visitor_id( - StaticVisitorBase::GetVisitorId(instance_type, instance_size)); - map->set_prototype(null_value(), SKIP_WRITE_BARRIER); - map->set_constructor(null_value(), SKIP_WRITE_BARRIER); - map->set_instance_size(instance_size); - map->set_inobject_properties(0); - map->set_pre_allocated_property_fields(0); - map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER); - map->set_dependent_code(DependentCode::cast(empty_fixed_array()), - SKIP_WRITE_BARRIER); - map->init_back_pointer(undefined_value()); - map->set_unused_property_fields(0); - map->set_instance_descriptors(empty_descriptor_array()); - map->set_bit_field(0); - map->set_bit_field2(1 << Map::kIsExtensible); - int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) | - Map::OwnsDescriptors::encode(true); - map->set_bit_field3(bit_field3); - map->set_elements_kind(elements_kind); - - return map; -} - - -MaybeObject* Heap::AllocateCodeCache() { - CodeCache* code_cache; - { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE); - if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache; - } - code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER); - code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER); - return code_cache; -} - - -MaybeObject* Heap::AllocatePolymorphicCodeCache() { - return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE); -} - - -MaybeObject* Heap::AllocateAccessorPair() { - AccessorPair* accessors; - { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE); - if (!maybe_accessors->To(&accessors)) return maybe_accessors; - } - accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER); - accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER); - accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER); - return accessors; -} - - -MaybeObject* Heap::AllocateTypeFeedbackInfo() { - TypeFeedbackInfo* info; - { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE); - if (!maybe_info->To(&info)) return maybe_info; - } - info->initialize_storage(); - info->set_feedback_vector(empty_fixed_array(), SKIP_WRITE_BARRIER); - return info; -} - - -MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) { - AliasedArgumentsEntry* entry; - { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE); - if (!maybe_entry->To(&entry)) return maybe_entry; - } - entry->set_aliased_context_slot(aliased_context_slot); - return entry; -} - - -const Heap::StringTypeTable Heap::string_type_table[] = { -#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \ - {type, size, k##camel_name##MapRootIndex}, - STRING_TYPE_LIST(STRING_TYPE_ELEMENT) -#undef STRING_TYPE_ELEMENT -}; - - -const Heap::ConstantStringTable Heap::constant_string_table[] = { -#define CONSTANT_STRING_ELEMENT(name, contents) \ - {contents, k##name##RootIndex}, - INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT) -#undef CONSTANT_STRING_ELEMENT -}; - - -const Heap::StructTable Heap::struct_table[] = { -#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \ - { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex }, - STRUCT_LIST(STRUCT_TABLE_ELEMENT) -#undef STRUCT_TABLE_ELEMENT -}; - - -bool Heap::CreateInitialMaps() { - Object* obj; - { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize); - if (!maybe_obj->ToObject(&obj)) return false; - } - // Map::cast cannot be used due to uninitialized map field. - Map* new_meta_map = reinterpret_cast<Map*>(obj); - set_meta_map(new_meta_map); - new_meta_map->set_map(new_meta_map); - - { MaybeObject* maybe_obj = - AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_fixed_array_map(Map::cast(obj)); - - { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_oddball_map(Map::cast(obj)); - - { MaybeObject* maybe_obj = - AllocatePartialMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_constant_pool_array_map(Map::cast(obj)); - - // Allocate the empty array. - { MaybeObject* maybe_obj = AllocateEmptyFixedArray(); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_empty_fixed_array(FixedArray::cast(obj)); - - { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_null_value(Oddball::cast(obj)); - Oddball::cast(obj)->set_kind(Oddball::kNull); - - { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_undefined_value(Oddball::cast(obj)); - Oddball::cast(obj)->set_kind(Oddball::kUndefined); - ASSERT(!InNewSpace(undefined_value())); - - // Allocate the empty descriptor array. - { MaybeObject* maybe_obj = AllocateEmptyFixedArray(); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_empty_descriptor_array(DescriptorArray::cast(obj)); - - // Allocate the constant pool array. - { MaybeObject* maybe_obj = AllocateEmptyConstantPoolArray(); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_empty_constant_pool_array(ConstantPoolArray::cast(obj)); - - // Fix the instance_descriptors for the existing maps. - meta_map()->set_code_cache(empty_fixed_array()); - meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array())); - meta_map()->init_back_pointer(undefined_value()); - meta_map()->set_instance_descriptors(empty_descriptor_array()); - - fixed_array_map()->set_code_cache(empty_fixed_array()); - fixed_array_map()->set_dependent_code( - DependentCode::cast(empty_fixed_array())); - fixed_array_map()->init_back_pointer(undefined_value()); - fixed_array_map()->set_instance_descriptors(empty_descriptor_array()); - - oddball_map()->set_code_cache(empty_fixed_array()); - oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array())); - oddball_map()->init_back_pointer(undefined_value()); - oddball_map()->set_instance_descriptors(empty_descriptor_array()); - - constant_pool_array_map()->set_code_cache(empty_fixed_array()); - constant_pool_array_map()->set_dependent_code( - DependentCode::cast(empty_fixed_array())); - constant_pool_array_map()->init_back_pointer(undefined_value()); - constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array()); - - // Fix prototype object for existing maps. - meta_map()->set_prototype(null_value()); - meta_map()->set_constructor(null_value()); - - fixed_array_map()->set_prototype(null_value()); - fixed_array_map()->set_constructor(null_value()); - - oddball_map()->set_prototype(null_value()); - oddball_map()->set_constructor(null_value()); - - constant_pool_array_map()->set_prototype(null_value()); - constant_pool_array_map()->set_constructor(null_value()); - - { // Map allocation -#define ALLOCATE_MAP(instance_type, size, field_name) \ - { Map* map; \ - if (!AllocateMap((instance_type), size)->To(&map)) return false; \ - set_##field_name##_map(map); \ - } - -#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \ - ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name) - - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array) - ASSERT(fixed_array_map() != fixed_cow_array_map()); - - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info) - ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number) - ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol) - ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign) - - for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) { - const StringTypeTable& entry = string_type_table[i]; - { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size); - if (!maybe_obj->ToObject(&obj)) return false; - } - roots_[entry.index] = Map::cast(obj); - } - - ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string) - undetectable_string_map()->set_is_undetectable(); - - ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string); - undetectable_ascii_string_map()->set_is_undetectable(); - - ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array) - ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array) - ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space) - -#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \ - ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \ - external_##type##_array) - - TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP) -#undef ALLOCATE_EXTERNAL_ARRAY_MAP - -#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \ - ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, \ - fixed_##type##_array) - - TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP) -#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP - - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements) - - ALLOCATE_VARSIZE_MAP(CODE_TYPE, code) - - ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell) - ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell) - ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler) - ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler) - - - for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) { - const StructTable& entry = struct_table[i]; - Map* map; - if (!AllocateMap(entry.type, entry.size)->To(&map)) - return false; - roots_[entry.index] = map; - } - - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table) - - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context) - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context) - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context) - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context) - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context) - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context) - - ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context) - native_context_map()->set_dictionary_map(true); - native_context_map()->set_visitor_id( - StaticVisitorBase::kVisitNativeContext); - - ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize, - shared_function_info) - - ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, - message_object) - ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, - external) - external_map()->set_is_extensible(false); -#undef ALLOCATE_VARSIZE_MAP -#undef ALLOCATE_MAP - } - - { // Empty arrays - { ByteArray* byte_array; - if (!AllocateByteArray(0, TENURED)->To(&byte_array)) return false; - set_empty_byte_array(byte_array); - } - -#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \ - { ExternalArray* obj; \ - if (!AllocateEmptyExternalArray(kExternal##Type##Array)->To(&obj)) \ - return false; \ - set_empty_external_##type##_array(obj); \ - } - - TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY) -#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY - -#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \ - { FixedTypedArrayBase* obj; \ - if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array)->To(&obj)) \ - return false; \ - set_empty_fixed_##type##_array(obj); \ - } - - TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY) -#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY - } - ASSERT(!InNewSpace(empty_fixed_array())); - return true; -} - - -MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) { - // Statically ensure that it is safe to allocate heap numbers in paged - // spaces. - int size = HeapNumber::kSize; - STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize); - - AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); - - Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map()); - HeapNumber::cast(result)->set_value(value); - return result; -} - - -MaybeObject* Heap::AllocateCell(Object* value) { - int size = Cell::kSize; - STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize); - - Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - HeapObject::cast(result)->set_map_no_write_barrier(cell_map()); - Cell::cast(result)->set_value(value); - return result; -} - - -MaybeObject* Heap::AllocatePropertyCell() { - int size = PropertyCell::kSize; - STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize); - - Object* result; - MaybeObject* maybe_result = - AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - - HeapObject::cast(result)->set_map_no_write_barrier( - global_property_cell_map()); - PropertyCell* cell = PropertyCell::cast(result); - cell->set_dependent_code(DependentCode::cast(empty_fixed_array()), - SKIP_WRITE_BARRIER); - cell->set_value(the_hole_value()); - cell->set_type(HeapType::None()); - return result; -} - - -MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) { - Box* result; - MaybeObject* maybe_result = AllocateStruct(BOX_TYPE); - if (!maybe_result->To(&result)) return maybe_result; - result->set_value(value); - return result; -} - - -MaybeObject* Heap::AllocateAllocationSite() { - AllocationSite* site; - MaybeObject* maybe_result = Allocate(allocation_site_map(), - OLD_POINTER_SPACE); - if (!maybe_result->To(&site)) return maybe_result; - site->Initialize(); - - // Link the site - site->set_weak_next(allocation_sites_list()); - set_allocation_sites_list(site); - return site; -} - - -MaybeObject* Heap::CreateOddball(const char* to_string, - Object* to_number, - byte kind) { - Object* result; - { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - return Oddball::cast(result)->Initialize(this, to_string, to_number, kind); -} - - -bool Heap::CreateApiObjects() { - Object* obj; - - { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); - if (!maybe_obj->ToObject(&obj)) return false; - } - // Don't use Smi-only elements optimizations for objects with the neander - // map. There are too many cases where element values are set directly with a - // bottleneck to trap the Smi-only -> fast elements transition, and there - // appears to be no benefit for optimize this case. - Map* new_neander_map = Map::cast(obj); - new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND); - set_neander_map(new_neander_map); - - { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map()); - if (!maybe_obj->ToObject(&obj)) return false; - } - Object* elements; - { MaybeObject* maybe_elements = AllocateFixedArray(2); - if (!maybe_elements->ToObject(&elements)) return false; - } - FixedArray::cast(elements)->set(0, Smi::FromInt(0)); - JSObject::cast(obj)->set_elements(FixedArray::cast(elements)); - set_message_listeners(JSObject::cast(obj)); - - return true; -} - - -void Heap::CreateJSEntryStub() { - JSEntryStub stub; - set_js_entry_code(*stub.GetCode(isolate())); -} - - -void Heap::CreateJSConstructEntryStub() { - JSConstructEntryStub stub; - set_js_construct_entry_code(*stub.GetCode(isolate())); -} - - -void Heap::CreateFixedStubs() { - // Here we create roots for fixed stubs. They are needed at GC - // for cooking and uncooking (check out frames.cc). - // The eliminates the need for doing dictionary lookup in the - // stub cache for these stubs. - HandleScope scope(isolate()); - - // Create stubs that should be there, so we don't unexpectedly have to - // create them if we need them during the creation of another stub. - // Stub creation mixes raw pointers and handles in an unsafe manner so - // we cannot create stubs while we are creating stubs. - CodeStub::GenerateStubsAheadOfTime(isolate()); - - // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on - // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub - // is created. - - // gcc-4.4 has problem generating correct code of following snippet: - // { JSEntryStub stub; - // js_entry_code_ = *stub.GetCode(); - // } - // { JSConstructEntryStub stub; - // js_construct_entry_code_ = *stub.GetCode(); - // } - // To workaround the problem, make separate functions without inlining. - Heap::CreateJSEntryStub(); - Heap::CreateJSConstructEntryStub(); -} - - -bool Heap::CreateInitialObjects() { - Object* obj; - - // The -0 value must be set before NumberFromDouble works. - { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_minus_zero_value(HeapNumber::cast(obj)); - ASSERT(std::signbit(minus_zero_value()->Number()) != 0); - - { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_nan_value(HeapNumber::cast(obj)); - - { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_infinity_value(HeapNumber::cast(obj)); - - // The hole has not been created yet, but we want to put something - // predictable in the gaps in the string table, so lets make that Smi zero. - set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0))); - - // Allocate initial string table. - { MaybeObject* maybe_obj = - StringTable::Allocate(this, kInitialStringTableSize); - if (!maybe_obj->ToObject(&obj)) return false; - } - // Don't use set_string_table() due to asserts. - roots_[kStringTableRootIndex] = obj; - - // Finish initializing oddballs after creating the string table. - { MaybeObject* maybe_obj = - undefined_value()->Initialize(this, - "undefined", - nan_value(), - Oddball::kUndefined); - if (!maybe_obj->ToObject(&obj)) return false; - } - - // Initialize the null_value. - { MaybeObject* maybe_obj = null_value()->Initialize( - this, "null", Smi::FromInt(0), Oddball::kNull); - if (!maybe_obj->ToObject(&obj)) return false; - } - - { MaybeObject* maybe_obj = CreateOddball("true", - Smi::FromInt(1), - Oddball::kTrue); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_true_value(Oddball::cast(obj)); - - { MaybeObject* maybe_obj = CreateOddball("false", - Smi::FromInt(0), - Oddball::kFalse); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_false_value(Oddball::cast(obj)); - - { MaybeObject* maybe_obj = CreateOddball("hole", - Smi::FromInt(-1), - Oddball::kTheHole); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_the_hole_value(Oddball::cast(obj)); - - { MaybeObject* maybe_obj = CreateOddball("uninitialized", - Smi::FromInt(-1), - Oddball::kUninitialized); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_uninitialized_value(Oddball::cast(obj)); - - { MaybeObject* maybe_obj = CreateOddball("arguments_marker", - Smi::FromInt(-4), - Oddball::kArgumentMarker); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_arguments_marker(Oddball::cast(obj)); - - { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel", - Smi::FromInt(-2), - Oddball::kOther); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_no_interceptor_result_sentinel(obj); - - { MaybeObject* maybe_obj = CreateOddball("termination_exception", - Smi::FromInt(-3), - Oddball::kOther); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_termination_exception(obj); - - for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) { - { MaybeObject* maybe_obj = - InternalizeUtf8String(constant_string_table[i].contents); - if (!maybe_obj->ToObject(&obj)) return false; - } - roots_[constant_string_table[i].index] = String::cast(obj); - } - - // Allocate the hidden string which is used to identify the hidden properties - // in JSObjects. The hash code has a special value so that it will not match - // the empty string when searching for the property. It cannot be part of the - // loop above because it needs to be allocated manually with the special - // hash code in place. The hash code for the hidden_string is zero to ensure - // that it will always be at the first entry in property descriptors. - { MaybeObject* maybe_obj = AllocateOneByteInternalizedString( - OneByteVector("", 0), String::kEmptyStringHash); - if (!maybe_obj->ToObject(&obj)) return false; - } - hidden_string_ = String::cast(obj); - - // Allocate the code_stubs dictionary. The initial size is set to avoid - // expanding the dictionary during bootstrapping. - { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_code_stubs(UnseededNumberDictionary::cast(obj)); - - - // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size - // is set to avoid expanding the dictionary during bootstrapping. - { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj)); - - { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache(); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj)); - - set_instanceof_cache_function(Smi::FromInt(0)); - set_instanceof_cache_map(Smi::FromInt(0)); - set_instanceof_cache_answer(Smi::FromInt(0)); - - CreateFixedStubs(); - - // Allocate the dictionary of intrinsic function names. - { MaybeObject* maybe_obj = - NameDictionary::Allocate(this, Runtime::kNumFunctions); - if (!maybe_obj->ToObject(&obj)) return false; - } - { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this, - obj); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_intrinsic_function_names(NameDictionary::cast(obj)); - - { MaybeObject* maybe_obj = AllocateInitialNumberStringCache(); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_number_string_cache(FixedArray::cast(obj)); - - // Allocate cache for single character one byte strings. - { MaybeObject* maybe_obj = - AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_single_character_string_cache(FixedArray::cast(obj)); - - // Allocate cache for string split. - { MaybeObject* maybe_obj = AllocateFixedArray( - RegExpResultsCache::kRegExpResultsCacheSize, TENURED); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_string_split_cache(FixedArray::cast(obj)); - - { MaybeObject* maybe_obj = AllocateFixedArray( - RegExpResultsCache::kRegExpResultsCacheSize, TENURED); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_regexp_multiple_cache(FixedArray::cast(obj)); - - // Allocate cache for external strings pointing to native source code. - { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount()); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_natives_source_cache(FixedArray::cast(obj)); - - { MaybeObject* maybe_obj = AllocateCell(undefined_value()); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_undefined_cell(Cell::cast(obj)); - - // The symbol registry is initialized lazily. - set_symbol_registry(undefined_value()); - - // Allocate object to hold object observation state. - { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); - if (!maybe_obj->ToObject(&obj)) return false; - } - { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj)); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_observation_state(JSObject::cast(obj)); - - // Allocate object to hold object microtask state. - { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); - if (!maybe_obj->ToObject(&obj)) return false; - } - { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj)); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_microtask_state(JSObject::cast(obj)); - - { MaybeObject* maybe_obj = AllocateSymbol(); - if (!maybe_obj->ToObject(&obj)) return false; - } - Symbol::cast(obj)->set_is_private(true); - set_frozen_symbol(Symbol::cast(obj)); - - { MaybeObject* maybe_obj = AllocateSymbol(); - if (!maybe_obj->ToObject(&obj)) return false; - } - Symbol::cast(obj)->set_is_private(true); - set_nonexistent_symbol(Symbol::cast(obj)); - - { MaybeObject* maybe_obj = AllocateSymbol(); - if (!maybe_obj->ToObject(&obj)) return false; - } - Symbol::cast(obj)->set_is_private(true); - set_elements_transition_symbol(Symbol::cast(obj)); - - { MaybeObject* maybe_obj = AllocateSymbol(); - if (!maybe_obj->ToObject(&obj)) return false; - } - Symbol::cast(obj)->set_is_private(true); - set_uninitialized_symbol(Symbol::cast(obj)); - - { MaybeObject* maybe_obj = AllocateSymbol(); - if (!maybe_obj->ToObject(&obj)) return false; - } - Symbol::cast(obj)->set_is_private(true); - set_megamorphic_symbol(Symbol::cast(obj)); - - { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED); - if (!maybe_obj->ToObject(&obj)) return false; - } - SeededNumberDictionary::cast(obj)->set_requires_slow_elements(); - set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj)); - - { MaybeObject* maybe_obj = AllocateSymbol(); - if (!maybe_obj->ToObject(&obj)) return false; - } - Symbol::cast(obj)->set_is_private(true); - set_observed_symbol(Symbol::cast(obj)); - - { MaybeObject* maybe_obj = AllocateFixedArray(0, TENURED); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_materialized_objects(FixedArray::cast(obj)); - - // Handling of script id generation is in Factory::NewScript. - set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId)); - - { MaybeObject* maybe_obj = AllocateAllocationSitesScratchpad(); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_allocation_sites_scratchpad(FixedArray::cast(obj)); - InitializeAllocationSitesScratchpad(); - - // Initialize keyed lookup cache. - isolate_->keyed_lookup_cache()->Clear(); - - // Initialize context slot cache. - isolate_->context_slot_cache()->Clear(); - - // Initialize descriptor cache. - isolate_->descriptor_lookup_cache()->Clear(); - - // Initialize compilation cache. - isolate_->compilation_cache()->Clear(); - - return true; -} - - -bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) { - RootListIndex writable_roots[] = { - kStoreBufferTopRootIndex, - kStackLimitRootIndex, - kNumberStringCacheRootIndex, - kInstanceofCacheFunctionRootIndex, - kInstanceofCacheMapRootIndex, - kInstanceofCacheAnswerRootIndex, - kCodeStubsRootIndex, - kNonMonomorphicCacheRootIndex, - kPolymorphicCodeCacheRootIndex, - kLastScriptIdRootIndex, - kEmptyScriptRootIndex, - kRealStackLimitRootIndex, - kArgumentsAdaptorDeoptPCOffsetRootIndex, - kConstructStubDeoptPCOffsetRootIndex, - kGetterStubDeoptPCOffsetRootIndex, - kSetterStubDeoptPCOffsetRootIndex, - kStringTableRootIndex, - }; - - for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) { - if (root_index == writable_roots[i]) - return true; - } - return false; -} - - -bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) { - return !RootCanBeWrittenAfterInitialization(root_index) && - !InNewSpace(roots_array_start()[root_index]); -} - - -Object* RegExpResultsCache::Lookup(Heap* heap, - String* key_string, - Object* key_pattern, - ResultsCacheType type) { - FixedArray* cache; - if (!key_string->IsInternalizedString()) return Smi::FromInt(0); - if (type == STRING_SPLIT_SUBSTRINGS) { - ASSERT(key_pattern->IsString()); - if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0); - cache = heap->string_split_cache(); - } else { - ASSERT(type == REGEXP_MULTIPLE_INDICES); - ASSERT(key_pattern->IsFixedArray()); - cache = heap->regexp_multiple_cache(); - } - - uint32_t hash = key_string->Hash(); - uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & - ~(kArrayEntriesPerCacheEntry - 1)); - if (cache->get(index + kStringOffset) == key_string && - cache->get(index + kPatternOffset) == key_pattern) { - return cache->get(index + kArrayOffset); - } - index = - ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1)); - if (cache->get(index + kStringOffset) == key_string && - cache->get(index + kPatternOffset) == key_pattern) { - return cache->get(index + kArrayOffset); - } - return Smi::FromInt(0); -} - - -void RegExpResultsCache::Enter(Heap* heap, - String* key_string, - Object* key_pattern, - FixedArray* value_array, - ResultsCacheType type) { - FixedArray* cache; - if (!key_string->IsInternalizedString()) return; - if (type == STRING_SPLIT_SUBSTRINGS) { - ASSERT(key_pattern->IsString()); - if (!key_pattern->IsInternalizedString()) return; - cache = heap->string_split_cache(); - } else { - ASSERT(type == REGEXP_MULTIPLE_INDICES); - ASSERT(key_pattern->IsFixedArray()); - cache = heap->regexp_multiple_cache(); - } - - uint32_t hash = key_string->Hash(); - uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & - ~(kArrayEntriesPerCacheEntry - 1)); - if (cache->get(index + kStringOffset) == Smi::FromInt(0)) { - cache->set(index + kStringOffset, key_string); - cache->set(index + kPatternOffset, key_pattern); - cache->set(index + kArrayOffset, value_array); - } else { - uint32_t index2 = - ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1)); - if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) { - cache->set(index2 + kStringOffset, key_string); - cache->set(index2 + kPatternOffset, key_pattern); - cache->set(index2 + kArrayOffset, value_array); - } else { - cache->set(index2 + kStringOffset, Smi::FromInt(0)); - cache->set(index2 + kPatternOffset, Smi::FromInt(0)); - cache->set(index2 + kArrayOffset, Smi::FromInt(0)); - cache->set(index + kStringOffset, key_string); - cache->set(index + kPatternOffset, key_pattern); - cache->set(index + kArrayOffset, value_array); - } - } - // If the array is a reasonably short list of substrings, convert it into a - // list of internalized strings. - if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) { - for (int i = 0; i < value_array->length(); i++) { - String* str = String::cast(value_array->get(i)); - Object* internalized_str; - MaybeObject* maybe_string = heap->InternalizeString(str); - if (maybe_string->ToObject(&internalized_str)) { - value_array->set(i, internalized_str); - } - } - } - // Convert backing store to a copy-on-write array. - value_array->set_map_no_write_barrier(heap->fixed_cow_array_map()); -} - - -void RegExpResultsCache::Clear(FixedArray* cache) { - for (int i = 0; i < kRegExpResultsCacheSize; i++) { - cache->set(i, Smi::FromInt(0)); - } -} - - -MaybeObject* Heap::AllocateInitialNumberStringCache() { - MaybeObject* maybe_obj = - AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED); - return maybe_obj; -} - - -int Heap::FullSizeNumberStringCacheLength() { - // Compute the size of the number string cache based on the max newspace size. - // The number string cache has a minimum size based on twice the initial cache - // size to ensure that it is bigger after being made 'full size'. - int number_string_cache_size = max_semispace_size_ / 512; - number_string_cache_size = Max(kInitialNumberStringCacheSize * 2, - Min(0x4000, number_string_cache_size)); - // There is a string and a number per entry so the length is twice the number - // of entries. - return number_string_cache_size * 2; -} - - -void Heap::AllocateFullSizeNumberStringCache() { - // The idea is to have a small number string cache in the snapshot to keep - // boot-time memory usage down. If we expand the number string cache already - // while creating the snapshot then that didn't work out. - ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL); - MaybeObject* maybe_obj = - AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED); - Object* new_cache; - if (maybe_obj->ToObject(&new_cache)) { - // We don't bother to repopulate the cache with entries from the old cache. - // It will be repopulated soon enough with new strings. - set_number_string_cache(FixedArray::cast(new_cache)); - } - // If allocation fails then we just return without doing anything. It is only - // a cache, so best effort is OK here. -} - - -void Heap::FlushNumberStringCache() { - // Flush the number to string cache. - int len = number_string_cache()->length(); - for (int i = 0; i < len; i++) { - number_string_cache()->set_undefined(i); - } -} - - -static inline int double_get_hash(double d) { - DoubleRepresentation rep(d); - return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32); -} - - -static inline int smi_get_hash(Smi* smi) { - return smi->value(); -} - - -Object* Heap::GetNumberStringCache(Object* number) { - int hash; - int mask = (number_string_cache()->length() >> 1) - 1; - if (number->IsSmi()) { - hash = smi_get_hash(Smi::cast(number)) & mask; - } else { - hash = double_get_hash(number->Number()) & mask; - } - Object* key = number_string_cache()->get(hash * 2); - if (key == number) { - return String::cast(number_string_cache()->get(hash * 2 + 1)); - } else if (key->IsHeapNumber() && - number->IsHeapNumber() && - key->Number() == number->Number()) { - return String::cast(number_string_cache()->get(hash * 2 + 1)); - } - return undefined_value(); -} - - -void Heap::SetNumberStringCache(Object* number, String* string) { - int hash; - int mask = (number_string_cache()->length() >> 1) - 1; - if (number->IsSmi()) { - hash = smi_get_hash(Smi::cast(number)) & mask; - } else { - hash = double_get_hash(number->Number()) & mask; - } - if (number_string_cache()->get(hash * 2) != undefined_value() && - number_string_cache()->length() != FullSizeNumberStringCacheLength()) { - // The first time we have a hash collision, we move to the full sized - // number string cache. - AllocateFullSizeNumberStringCache(); - return; - } - number_string_cache()->set(hash * 2, number); - number_string_cache()->set(hash * 2 + 1, string); -} - - -MaybeObject* Heap::NumberToString(Object* number, - bool check_number_string_cache) { - isolate_->counters()->number_to_string_runtime()->Increment(); - if (check_number_string_cache) { - Object* cached = GetNumberStringCache(number); - if (cached != undefined_value()) { - return cached; - } - } - - char arr[100]; - Vector<char> buffer(arr, ARRAY_SIZE(arr)); - const char* str; - if (number->IsSmi()) { - int num = Smi::cast(number)->value(); - str = IntToCString(num, buffer); - } else { - double num = HeapNumber::cast(number)->value(); - str = DoubleToCString(num, buffer); - } - - Object* js_string; - - // We tenure the allocated string since it is referenced from the - // number-string cache which lives in the old space. - MaybeObject* maybe_js_string = - AllocateStringFromOneByte(CStrVector(str), TENURED); - if (maybe_js_string->ToObject(&js_string)) { - SetNumberStringCache(number, String::cast(js_string)); - } - return maybe_js_string; -} - - -MaybeObject* Heap::Uint32ToString(uint32_t value, - bool check_number_string_cache) { - Object* number; - MaybeObject* maybe = NumberFromUint32(value); - if (!maybe->To<Object>(&number)) return maybe; - return NumberToString(number, check_number_string_cache); -} - - -MaybeObject* Heap::AllocateAllocationSitesScratchpad() { - MaybeObject* maybe_obj = - AllocateFixedArray(kAllocationSiteScratchpadSize, TENURED); - return maybe_obj; -} - - -void Heap::FlushAllocationSitesScratchpad() { - for (int i = 0; i < allocation_sites_scratchpad_length_; i++) { - allocation_sites_scratchpad()->set_undefined(i); - } - allocation_sites_scratchpad_length_ = 0; -} - - -void Heap::InitializeAllocationSitesScratchpad() { - ASSERT(allocation_sites_scratchpad()->length() == - kAllocationSiteScratchpadSize); - for (int i = 0; i < kAllocationSiteScratchpadSize; i++) { - allocation_sites_scratchpad()->set_undefined(i); - } -} - - -void Heap::AddAllocationSiteToScratchpad(AllocationSite* site, - ScratchpadSlotMode mode) { - if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) { - // We cannot use the normal write-barrier because slots need to be - // recorded with non-incremental marking as well. We have to explicitly - // record the slot to take evacuation candidates into account. - allocation_sites_scratchpad()->set( - allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER); - Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt( - allocation_sites_scratchpad_length_); - - if (mode == RECORD_SCRATCHPAD_SLOT) { - // We need to allow slots buffer overflow here since the evacuation - // candidates are not part of the global list of old space pages and - // releasing an evacuation candidate due to a slots buffer overflow - // results in lost pages. - mark_compact_collector()->RecordSlot( - slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW); - } - allocation_sites_scratchpad_length_++; - } -} - - -Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) { - return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]); -} - - -Heap::RootListIndex Heap::RootIndexForExternalArrayType( - ExternalArrayType array_type) { - switch (array_type) { -#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ - case kExternal##Type##Array: \ - return kExternal##Type##ArrayMapRootIndex; - - TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX) -#undef ARRAY_TYPE_TO_ROOT_INDEX - - default: - UNREACHABLE(); - return kUndefinedValueRootIndex; - } -} - - -Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) { - return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]); -} - - -Heap::RootListIndex Heap::RootIndexForFixedTypedArray( - ExternalArrayType array_type) { - switch (array_type) { -#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ - case kExternal##Type##Array: \ - return kFixed##Type##ArrayMapRootIndex; - - TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX) -#undef ARRAY_TYPE_TO_ROOT_INDEX - - default: - UNREACHABLE(); - return kUndefinedValueRootIndex; - } -} - - -Heap::RootListIndex Heap::RootIndexForEmptyExternalArray( - ElementsKind elementsKind) { - switch (elementsKind) { -#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ - case EXTERNAL_##TYPE##_ELEMENTS: \ - return kEmptyExternal##Type##ArrayRootIndex; - - TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX) -#undef ELEMENT_KIND_TO_ROOT_INDEX - - default: - UNREACHABLE(); - return kUndefinedValueRootIndex; - } -} - - -Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray( - ElementsKind elementsKind) { - switch (elementsKind) { -#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ - case TYPE##_ELEMENTS: \ - return kEmptyFixed##Type##ArrayRootIndex; - - TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX) -#undef ELEMENT_KIND_TO_ROOT_INDEX - default: - UNREACHABLE(); - return kUndefinedValueRootIndex; - } -} - - -ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) { - return ExternalArray::cast( - roots_[RootIndexForEmptyExternalArray(map->elements_kind())]); -} - - -FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) { - return FixedTypedArrayBase::cast( - roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]); -} - - -MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) { - // We need to distinguish the minus zero value and this cannot be - // done after conversion to int. Doing this by comparing bit - // patterns is faster than using fpclassify() et al. - if (IsMinusZero(value)) { - return AllocateHeapNumber(-0.0, pretenure); - } - - int int_value = FastD2I(value); - if (value == int_value && Smi::IsValid(int_value)) { - return Smi::FromInt(int_value); - } - - // Materialize the value in the heap. - return AllocateHeapNumber(value, pretenure); -} - - -MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) { - // Statically ensure that it is safe to allocate foreigns in paged spaces. - STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize); - AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; - Foreign* result; - MaybeObject* maybe_result = Allocate(foreign_map(), space); - if (!maybe_result->To(&result)) return maybe_result; - result->set_foreign_address(address); - return result; -} - - -MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) { - SharedFunctionInfo* share; - MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE); - if (!maybe->To<SharedFunctionInfo>(&share)) return maybe; - - // Set pointer fields. - share->set_name(name); - Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal); - share->set_code(illegal); - share->set_optimized_code_map(Smi::FromInt(0)); - share->set_scope_info(ScopeInfo::Empty(isolate_)); - Code* construct_stub = - isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric); - share->set_construct_stub(construct_stub); - share->set_instance_class_name(Object_string()); - share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER); - share->set_script(undefined_value(), SKIP_WRITE_BARRIER); - share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER); - share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER); - share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER); - share->set_ast_node_count(0); - share->set_counters(0); - - // Set integer fields (smi or int, depending on the architecture). - share->set_length(0); - share->set_formal_parameter_count(0); - share->set_expected_nof_properties(0); - share->set_num_literals(0); - share->set_start_position_and_type(0); - share->set_end_position(0); - share->set_function_token_position(0); - // All compiler hints default to false or 0. - share->set_compiler_hints(0); - share->set_opt_count_and_bailout_reason(0); - - return share; -} - - -MaybeObject* Heap::AllocateJSMessageObject(String* type, - JSArray* arguments, - int start_position, - int end_position, - Object* script, - Object* stack_frames) { - Object* result; - { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - JSMessageObject* message = JSMessageObject::cast(result); - message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER); - message->initialize_elements(); - message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER); - message->set_type(type); - message->set_arguments(arguments); - message->set_start_position(start_position); - message->set_end_position(end_position); - message->set_script(script); - message->set_stack_frames(stack_frames); - return result; -} - - -MaybeObject* Heap::AllocateExternalStringFromAscii( - const ExternalAsciiString::Resource* resource) { - size_t length = resource->length(); - if (length > static_cast<size_t>(String::kMaxLength)) { - return isolate()->ThrowInvalidStringLength(); - } - - Map* map = external_ascii_string_map(); - Object* result; - { MaybeObject* maybe_result = Allocate(map, NEW_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - ExternalAsciiString* external_string = ExternalAsciiString::cast(result); - external_string->set_length(static_cast<int>(length)); - external_string->set_hash_field(String::kEmptyHashField); - external_string->set_resource(resource); - - return result; -} - - -MaybeObject* Heap::AllocateExternalStringFromTwoByte( - const ExternalTwoByteString::Resource* resource) { - size_t length = resource->length(); - if (length > static_cast<size_t>(String::kMaxLength)) { - return isolate()->ThrowInvalidStringLength(); - } - - // For small strings we check whether the resource contains only - // one byte characters. If yes, we use a different string map. - static const size_t kOneByteCheckLengthLimit = 32; - bool is_one_byte = length <= kOneByteCheckLengthLimit && - String::IsOneByte(resource->data(), static_cast<int>(length)); - Map* map = is_one_byte ? - external_string_with_one_byte_data_map() : external_string_map(); - Object* result; - { MaybeObject* maybe_result = Allocate(map, NEW_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result); - external_string->set_length(static_cast<int>(length)); - external_string->set_hash_field(String::kEmptyHashField); - external_string->set_resource(resource); - - return result; -} - - -MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) { - if (code <= String::kMaxOneByteCharCode) { - Object* value = single_character_string_cache()->get(code); - if (value != undefined_value()) return value; - - uint8_t buffer[1]; - buffer[0] = static_cast<uint8_t>(code); - Object* result; - OneByteStringKey key(Vector<const uint8_t>(buffer, 1), HashSeed()); - MaybeObject* maybe_result = InternalizeStringWithKey(&key); - - if (!maybe_result->ToObject(&result)) return maybe_result; - single_character_string_cache()->set(code, result); - return result; - } - - SeqTwoByteString* result; - { MaybeObject* maybe_result = AllocateRawTwoByteString(1); - if (!maybe_result->To<SeqTwoByteString>(&result)) return maybe_result; - } - result->SeqTwoByteStringSet(0, code); - return result; -} - - -MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) { - if (length < 0 || length > ByteArray::kMaxLength) { - v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); - } - int size = ByteArray::SizeFor(length); - AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); - Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier( - byte_array_map()); - reinterpret_cast<ByteArray*>(result)->set_length(length); - return result; -} - - -void Heap::CreateFillerObjectAt(Address addr, int size) { - if (size == 0) return; - HeapObject* filler = HeapObject::FromAddress(addr); - if (size == kPointerSize) { - filler->set_map_no_write_barrier(one_pointer_filler_map()); - } else if (size == 2 * kPointerSize) { - filler->set_map_no_write_barrier(two_pointer_filler_map()); - } else { - filler->set_map_no_write_barrier(free_space_map()); - FreeSpace::cast(filler)->set_size(size); - } -} - - -bool Heap::CanMoveObjectStart(HeapObject* object) { - Address address = object->address(); - bool is_in_old_pointer_space = InOldPointerSpace(address); - bool is_in_old_data_space = InOldDataSpace(address); - - if (lo_space()->Contains(object)) return false; - - // We cannot move the object start if the given old space page is - // concurrently swept. - return (!is_in_old_pointer_space && !is_in_old_data_space) || - Page::FromAddress(address)->parallel_sweeping() <= - MemoryChunk::PARALLEL_SWEEPING_FINALIZE; -} - - -void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) { - if (incremental_marking()->IsMarking() && - Marking::IsBlack(Marking::MarkBitFrom(address))) { - if (mode == FROM_GC) { - MemoryChunk::IncrementLiveBytesFromGC(address, by); - } else { - MemoryChunk::IncrementLiveBytesFromMutator(address, by); - } - } -} - - -MaybeObject* Heap::AllocateExternalArray(int length, - ExternalArrayType array_type, - void* external_pointer, - PretenureFlag pretenure) { - int size = ExternalArray::kAlignedSize; - AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); - Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier( - MapForExternalArrayType(array_type)); - reinterpret_cast<ExternalArray*>(result)->set_length(length); - reinterpret_cast<ExternalArray*>(result)->set_external_pointer( - external_pointer); - - return result; -} - -static void ForFixedTypedArray(ExternalArrayType array_type, - int* element_size, - ElementsKind* element_kind) { - switch (array_type) { -#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \ - case kExternal##Type##Array: \ - *element_size = size; \ - *element_kind = TYPE##_ELEMENTS; \ - return; - - TYPED_ARRAYS(TYPED_ARRAY_CASE) -#undef TYPED_ARRAY_CASE - - default: - *element_size = 0; // Bogus - *element_kind = UINT8_ELEMENTS; // Bogus - UNREACHABLE(); - } -} - - -MaybeObject* Heap::AllocateFixedTypedArray(int length, - ExternalArrayType array_type, - PretenureFlag pretenure) { - int element_size; - ElementsKind elements_kind; - ForFixedTypedArray(array_type, &element_size, &elements_kind); - int size = OBJECT_POINTER_ALIGN( - length * element_size + FixedTypedArrayBase::kDataOffset); -#ifndef V8_HOST_ARCH_64_BIT - if (array_type == kExternalFloat64Array) { - size += kPointerSize; - } -#endif - AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); - - HeapObject* object; - MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE); - if (!maybe_object->To(&object)) return maybe_object; - - if (array_type == kExternalFloat64Array) { - object = EnsureDoubleAligned(this, object, size); - } - - FixedTypedArrayBase* elements = - reinterpret_cast<FixedTypedArrayBase*>(object); - elements->set_map(MapForFixedTypedArray(array_type)); - elements->set_length(length); - memset(elements->DataPtr(), 0, elements->DataSize()); - return elements; -} - - -MaybeObject* Heap::CreateCode(const CodeDesc& desc, - Code::Flags flags, - Handle<Object> self_reference, - bool immovable, - bool crankshafted, - int prologue_offset) { - // Allocate ByteArray and ConstantPoolArray before the Code object, so that we - // do not risk leaving uninitialized Code object (and breaking the heap). - ByteArray* reloc_info; - MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED); - if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info; - - ConstantPoolArray* constant_pool; - if (FLAG_enable_ool_constant_pool) { - MaybeObject* maybe_constant_pool = desc.origin->AllocateConstantPool(this); - if (!maybe_constant_pool->To(&constant_pool)) return maybe_constant_pool; - } else { - constant_pool = empty_constant_pool_array(); - } - - // Compute size. - int body_size = RoundUp(desc.instr_size, kObjectAlignment); - int obj_size = Code::SizeFor(body_size); - ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment)); - MaybeObject* maybe_result; - // Large code objects and code objects which should stay at a fixed address - // are allocated in large object space. - HeapObject* result; - bool force_lo_space = obj_size > code_space()->AreaSize(); - if (force_lo_space) { - maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); - } else { - maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE); - } - if (!maybe_result->To<HeapObject>(&result)) return maybe_result; - - if (immovable && !force_lo_space && - // Objects on the first page of each space are never moved. - !code_space_->FirstPage()->Contains(result->address())) { - // Discard the first code allocation, which was on a page where it could be - // moved. - CreateFillerObjectAt(result->address(), obj_size); - maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); - if (!maybe_result->To<HeapObject>(&result)) return maybe_result; - } - - // Initialize the object - result->set_map_no_write_barrier(code_map()); - Code* code = Code::cast(result); - ASSERT(!isolate_->code_range()->exists() || - isolate_->code_range()->contains(code->address())); - code->set_instruction_size(desc.instr_size); - code->set_relocation_info(reloc_info); - code->set_flags(flags); - code->set_raw_kind_specific_flags1(0); - code->set_raw_kind_specific_flags2(0); - code->set_is_crankshafted(crankshafted); - code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER); - code->set_raw_type_feedback_info(undefined_value()); - code->set_next_code_link(undefined_value()); - code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER); - code->set_gc_metadata(Smi::FromInt(0)); - code->set_ic_age(global_ic_age_); - code->set_prologue_offset(prologue_offset); - if (code->kind() == Code::OPTIMIZED_FUNCTION) { - code->set_marked_for_deoptimization(false); - } - - if (FLAG_enable_ool_constant_pool) { - desc.origin->PopulateConstantPool(constant_pool); - } - code->set_constant_pool(constant_pool); - -#ifdef ENABLE_DEBUGGER_SUPPORT - if (code->kind() == Code::FUNCTION) { - code->set_has_debug_break_slots( - isolate_->debugger()->IsDebuggerActive()); - } -#endif - - // Allow self references to created code object by patching the handle to - // point to the newly allocated Code object. - if (!self_reference.is_null()) { - *(self_reference.location()) = code; - } - // Migrate generated code. - // The generated code can contain Object** values (typically from handles) - // that are dereferenced during the copy to point directly to the actual heap - // objects. These pointers can include references to the code object itself, - // through the self_reference parameter. - code->CopyFrom(desc); - -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - code->Verify(); - } -#endif - return code; -} - - -MaybeObject* Heap::CopyCode(Code* code) { - MaybeObject* maybe_result; - Object* new_constant_pool; - if (FLAG_enable_ool_constant_pool && - code->constant_pool() != empty_constant_pool_array()) { - // Copy the constant pool, since edits to the copied code may modify - // the constant pool. - maybe_result = CopyConstantPoolArray(code->constant_pool()); - if (!maybe_result->ToObject(&new_constant_pool)) return maybe_result; - } else { - new_constant_pool = empty_constant_pool_array(); - } - - // Allocate an object the same size as the code object. - int obj_size = code->Size(); - if (obj_size > code_space()->AreaSize()) { - maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); - } else { - maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE); - } - - Object* result; - if (!maybe_result->ToObject(&result)) return maybe_result; - - // Copy code object. - Address old_addr = code->address(); - Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); - CopyBlock(new_addr, old_addr, obj_size); - Code* new_code = Code::cast(result); - - // Update the constant pool. - new_code->set_constant_pool(new_constant_pool); - - // Relocate the copy. - ASSERT(!isolate_->code_range()->exists() || - isolate_->code_range()->contains(code->address())); - new_code->Relocate(new_addr - old_addr); - return new_code; -} - - -MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { - // Allocate ByteArray and ConstantPoolArray before the Code object, so that we - // do not risk leaving uninitialized Code object (and breaking the heap). - Object* reloc_info_array; - { MaybeObject* maybe_reloc_info_array = - AllocateByteArray(reloc_info.length(), TENURED); - if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) { - return maybe_reloc_info_array; - } - } - Object* new_constant_pool; - if (FLAG_enable_ool_constant_pool && - code->constant_pool() != empty_constant_pool_array()) { - // Copy the constant pool, since edits to the copied code may modify - // the constant pool. - MaybeObject* maybe_constant_pool = - CopyConstantPoolArray(code->constant_pool()); - if (!maybe_constant_pool->ToObject(&new_constant_pool)) - return maybe_constant_pool; - } else { - new_constant_pool = empty_constant_pool_array(); - } - - int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment); - - int new_obj_size = Code::SizeFor(new_body_size); - - Address old_addr = code->address(); - - size_t relocation_offset = - static_cast<size_t>(code->instruction_end() - old_addr); - - MaybeObject* maybe_result; - if (new_obj_size > code_space()->AreaSize()) { - maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE); - } else { - maybe_result = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE); - } - - Object* result; - if (!maybe_result->ToObject(&result)) return maybe_result; - - // Copy code object. - Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); - - // Copy header and instructions. - CopyBytes(new_addr, old_addr, relocation_offset); - - Code* new_code = Code::cast(result); - new_code->set_relocation_info(ByteArray::cast(reloc_info_array)); - - // Update constant pool. - new_code->set_constant_pool(new_constant_pool); - - // Copy patched rinfo. - CopyBytes(new_code->relocation_start(), - reloc_info.start(), - static_cast<size_t>(reloc_info.length())); - - // Relocate the copy. - ASSERT(!isolate_->code_range()->exists() || - isolate_->code_range()->contains(code->address())); - new_code->Relocate(new_addr - old_addr); - -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - code->Verify(); - } -#endif - return new_code; -} - - -void Heap::InitializeAllocationMemento(AllocationMemento* memento, - AllocationSite* allocation_site) { - memento->set_map_no_write_barrier(allocation_memento_map()); - ASSERT(allocation_site->map() == allocation_site_map()); - memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER); - if (FLAG_allocation_site_pretenuring) { - allocation_site->IncrementMementoCreateCount(); - } -} - - -MaybeObject* Heap::Allocate(Map* map, AllocationSpace space, - AllocationSite* allocation_site) { - ASSERT(gc_state_ == NOT_IN_GC); - ASSERT(map->instance_type() != MAP_TYPE); - // If allocation failures are disallowed, we may allocate in a different - // space when new space is full and the object is not a large object. - AllocationSpace retry_space = - (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); - int size = map->instance_size(); - if (allocation_site != NULL) { - size += AllocationMemento::kSize; - } - Object* result; - MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); - if (!maybe_result->ToObject(&result)) return maybe_result; - // No need for write barrier since object is white and map is in old space. - HeapObject::cast(result)->set_map_no_write_barrier(map); - if (allocation_site != NULL) { - AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( - reinterpret_cast<Address>(result) + map->instance_size()); - InitializeAllocationMemento(alloc_memento, allocation_site); - } - return result; -} - - -void Heap::InitializeFunction(JSFunction* function, - SharedFunctionInfo* shared, - Object* prototype) { - ASSERT(!prototype->IsMap()); - function->initialize_properties(); - function->initialize_elements(); - function->set_shared(shared); - function->set_code(shared->code()); - function->set_prototype_or_initial_map(prototype); - function->set_context(undefined_value()); - function->set_literals_or_bindings(empty_fixed_array()); - function->set_next_function_link(undefined_value()); -} - - -MaybeObject* Heap::AllocateFunction(Map* function_map, - SharedFunctionInfo* shared, - Object* prototype, - PretenureFlag pretenure) { - AllocationSpace space = - (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; - Object* result; - { MaybeObject* maybe_result = Allocate(function_map, space); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - InitializeFunction(JSFunction::cast(result), shared, prototype); - return result; -} - - -MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) { - // To get fast allocation and map sharing for arguments objects we - // allocate them based on an arguments boilerplate. - - JSObject* boilerplate; - int arguments_object_size; - bool strict_mode_callee = callee->IsJSFunction() && - JSFunction::cast(callee)->shared()->strict_mode() == STRICT; - if (strict_mode_callee) { - boilerplate = - isolate()->context()->native_context()->strict_arguments_boilerplate(); - arguments_object_size = kStrictArgumentsObjectSize; - } else { - boilerplate = - isolate()->context()->native_context()->sloppy_arguments_boilerplate(); - arguments_object_size = kSloppyArgumentsObjectSize; - } - - // Check that the size of the boilerplate matches our - // expectations. The ArgumentsAccessStub::GenerateNewObject relies - // on the size being a known constant. - ASSERT(arguments_object_size == boilerplate->map()->instance_size()); - - // Do the allocation. - Object* result; - { MaybeObject* maybe_result = - AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - // Copy the content. The arguments boilerplate doesn't have any - // fields that point to new space so it's safe to skip the write - // barrier here. - CopyBlock(HeapObject::cast(result)->address(), - boilerplate->address(), - JSObject::kHeaderSize); - - // Set the length property. - JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex, - Smi::FromInt(length), - SKIP_WRITE_BARRIER); - // Set the callee property for sloppy mode arguments object only. - if (!strict_mode_callee) { - JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex, - callee); - } - - // Check the state of the object - ASSERT(JSObject::cast(result)->HasFastProperties()); - ASSERT(JSObject::cast(result)->HasFastObjectElements()); - - return result; -} - - -void Heap::InitializeJSObjectFromMap(JSObject* obj, - FixedArray* properties, - Map* map) { - obj->set_properties(properties); - obj->initialize_elements(); - // TODO(1240798): Initialize the object's body using valid initial values - // according to the object's initial map. For example, if the map's - // instance type is JS_ARRAY_TYPE, the length field should be initialized - // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a - // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object - // verification code has to cope with (temporarily) invalid objects. See - // for example, JSArray::JSArrayVerify). - Object* filler; - // We cannot always fill with one_pointer_filler_map because objects - // created from API functions expect their internal fields to be initialized - // with undefined_value. - // Pre-allocated fields need to be initialized with undefined_value as well - // so that object accesses before the constructor completes (e.g. in the - // debugger) will not cause a crash. - if (map->constructor()->IsJSFunction() && - JSFunction::cast(map->constructor())->shared()-> - IsInobjectSlackTrackingInProgress()) { - // We might want to shrink the object later. - ASSERT(obj->GetInternalFieldCount() == 0); - filler = Heap::one_pointer_filler_map(); - } else { - filler = Heap::undefined_value(); - } - obj->InitializeBody(map, Heap::undefined_value(), filler); -} - - -MaybeObject* Heap::AllocateJSObjectFromMap( - Map* map, - PretenureFlag pretenure, - bool allocate_properties, - AllocationSite* allocation_site) { - // JSFunctions should be allocated using AllocateFunction to be - // properly initialized. - ASSERT(map->instance_type() != JS_FUNCTION_TYPE); - - // Both types of global objects should be allocated using - // AllocateGlobalObject to be properly initialized. - ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE); - ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE); - - // Allocate the backing storage for the properties. - FixedArray* properties; - if (allocate_properties) { - int prop_size = map->InitialPropertiesLength(); - ASSERT(prop_size >= 0); - { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure); - if (!maybe_properties->To(&properties)) return maybe_properties; - } - } else { - properties = empty_fixed_array(); - } - - // Allocate the JSObject. - int size = map->instance_size(); - AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); - Object* obj; - MaybeObject* maybe_obj = Allocate(map, space, allocation_site); - if (!maybe_obj->To(&obj)) return maybe_obj; - - // Initialize the JSObject. - InitializeJSObjectFromMap(JSObject::cast(obj), properties, map); - ASSERT(JSObject::cast(obj)->HasFastElements() || - JSObject::cast(obj)->HasExternalArrayElements() || - JSObject::cast(obj)->HasFixedTypedArrayElements()); - return obj; -} - - -MaybeObject* Heap::AllocateJSObject(JSFunction* constructor, - PretenureFlag pretenure, - AllocationSite* allocation_site) { - ASSERT(constructor->has_initial_map()); - - // Allocate the object based on the constructors initial map. - MaybeObject* result = AllocateJSObjectFromMap(constructor->initial_map(), - pretenure, - true, - allocation_site); -#ifdef DEBUG - // Make sure result is NOT a global object if valid. - Object* non_failure; - ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject()); -#endif - return result; -} - - -MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) { - // Allocate a fresh map. Modules do not have a prototype. - Map* map; - MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize); - if (!maybe_map->To(&map)) return maybe_map; - // Allocate the object based on the map. - JSModule* module; - MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED); - if (!maybe_module->To(&module)) return maybe_module; - module->set_context(context); - module->set_scope_info(scope_info); - return module; -} - - -MaybeObject* Heap::AllocateJSArrayAndStorage( - ElementsKind elements_kind, - int length, - int capacity, - ArrayStorageAllocationMode mode, - PretenureFlag pretenure) { - MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure); - JSArray* array; - if (!maybe_array->To(&array)) return maybe_array; - - // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage - // for performance reasons. - ASSERT(capacity >= length); - - if (capacity == 0) { - array->set_length(Smi::FromInt(0)); - array->set_elements(empty_fixed_array()); - return array; - } - - FixedArrayBase* elms; - MaybeObject* maybe_elms = NULL; - if (IsFastDoubleElementsKind(elements_kind)) { - if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { - maybe_elms = AllocateUninitializedFixedDoubleArray(capacity); - } else { - ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); - maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity); - } - } else { - ASSERT(IsFastSmiOrObjectElementsKind(elements_kind)); - if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { - maybe_elms = AllocateUninitializedFixedArray(capacity); - } else { - ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); - maybe_elms = AllocateFixedArrayWithHoles(capacity); - } - } - if (!maybe_elms->To(&elms)) return maybe_elms; - - array->set_elements(elms); - array->set_length(Smi::FromInt(length)); - return array; -} - - -MaybeObject* Heap::AllocateJSArrayStorage( - JSArray* array, - int length, - int capacity, - ArrayStorageAllocationMode mode) { - ASSERT(capacity >= length); - - if (capacity == 0) { - array->set_length(Smi::FromInt(0)); - array->set_elements(empty_fixed_array()); - return array; - } - - FixedArrayBase* elms; - MaybeObject* maybe_elms = NULL; - ElementsKind elements_kind = array->GetElementsKind(); - if (IsFastDoubleElementsKind(elements_kind)) { - if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { - maybe_elms = AllocateUninitializedFixedDoubleArray(capacity); - } else { - ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); - maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity); - } - } else { - ASSERT(IsFastSmiOrObjectElementsKind(elements_kind)); - if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { - maybe_elms = AllocateUninitializedFixedArray(capacity); - } else { - ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); - maybe_elms = AllocateFixedArrayWithHoles(capacity); - } - } - if (!maybe_elms->To(&elms)) return maybe_elms; - - array->set_elements(elms); - array->set_length(Smi::FromInt(length)); - return array; -} - - -MaybeObject* Heap::AllocateJSArrayWithElements( - FixedArrayBase* elements, - ElementsKind elements_kind, - int length, - PretenureFlag pretenure) { - MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure); - JSArray* array; - if (!maybe_array->To(&array)) return maybe_array; - - array->set_elements(elements); - array->set_length(Smi::FromInt(length)); - array->ValidateElements(); - return array; -} - - -MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) { - // Allocate map. - // TODO(rossberg): Once we optimize proxies, think about a scheme to share - // maps. Will probably depend on the identity of the handler object, too. - Map* map; - MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize); - if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj; - map->set_prototype(prototype); - - // Allocate the proxy object. - JSProxy* result; - MaybeObject* maybe_result = Allocate(map, NEW_SPACE); - if (!maybe_result->To<JSProxy>(&result)) return maybe_result; - result->InitializeBody(map->instance_size(), Smi::FromInt(0)); - result->set_handler(handler); - result->set_hash(undefined_value(), SKIP_WRITE_BARRIER); - return result; -} - - -MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler, - Object* call_trap, - Object* construct_trap, - Object* prototype) { - // Allocate map. - // TODO(rossberg): Once we optimize proxies, think about a scheme to share - // maps. Will probably depend on the identity of the handler object, too. - Map* map; - MaybeObject* maybe_map_obj = - AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize); - if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj; - map->set_prototype(prototype); - - // Allocate the proxy object. - JSFunctionProxy* result; - MaybeObject* maybe_result = Allocate(map, NEW_SPACE); - if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result; - result->InitializeBody(map->instance_size(), Smi::FromInt(0)); - result->set_handler(handler); - result->set_hash(undefined_value(), SKIP_WRITE_BARRIER); - result->set_call_trap(call_trap); - result->set_construct_trap(construct_trap); - return result; -} - - -MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) { - // Never used to copy functions. If functions need to be copied we - // have to be careful to clear the literals array. - SLOW_ASSERT(!source->IsJSFunction()); - - // Make the clone. - Map* map = source->map(); - int object_size = map->instance_size(); - Object* clone; - - ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type())); - - WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; - - // If we're forced to always allocate, we use the general allocation - // functions which may leave us with an object in old space. - if (always_allocate()) { - { MaybeObject* maybe_clone = - AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); - if (!maybe_clone->ToObject(&clone)) return maybe_clone; - } - Address clone_address = HeapObject::cast(clone)->address(); - CopyBlock(clone_address, - source->address(), - object_size); - // Update write barrier for all fields that lie beyond the header. - RecordWrites(clone_address, - JSObject::kHeaderSize, - (object_size - JSObject::kHeaderSize) / kPointerSize); - } else { - wb_mode = SKIP_WRITE_BARRIER; - - { int adjusted_object_size = site != NULL - ? object_size + AllocationMemento::kSize - : object_size; - MaybeObject* maybe_clone = - AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE); - if (!maybe_clone->ToObject(&clone)) return maybe_clone; - } - SLOW_ASSERT(InNewSpace(clone)); - // Since we know the clone is allocated in new space, we can copy - // the contents without worrying about updating the write barrier. - CopyBlock(HeapObject::cast(clone)->address(), - source->address(), - object_size); - - if (site != NULL) { - AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( - reinterpret_cast<Address>(clone) + object_size); - InitializeAllocationMemento(alloc_memento, site); - } - } - - SLOW_ASSERT( - JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); - FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); - FixedArray* properties = FixedArray::cast(source->properties()); - // Update elements if necessary. - if (elements->length() > 0) { - Object* elem; - { MaybeObject* maybe_elem; - if (elements->map() == fixed_cow_array_map()) { - maybe_elem = FixedArray::cast(elements); - } else if (source->HasFastDoubleElements()) { - maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements)); - } else { - maybe_elem = CopyFixedArray(FixedArray::cast(elements)); - } - if (!maybe_elem->ToObject(&elem)) return maybe_elem; - } - JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode); - } - // Update properties if necessary. - if (properties->length() > 0) { - Object* prop; - { MaybeObject* maybe_prop = CopyFixedArray(properties); - if (!maybe_prop->ToObject(&prop)) return maybe_prop; - } - JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode); - } - // Return the new clone. - return clone; -} - - -MaybeObject* Heap::ReinitializeJSReceiver( - JSReceiver* object, InstanceType type, int size) { - ASSERT(type >= FIRST_JS_OBJECT_TYPE); - - // Allocate fresh map. - // TODO(rossberg): Once we optimize proxies, cache these maps. - Map* map; - MaybeObject* maybe = AllocateMap(type, size); - if (!maybe->To<Map>(&map)) return maybe; - - // Check that the receiver has at least the size of the fresh object. - int size_difference = object->map()->instance_size() - map->instance_size(); - ASSERT(size_difference >= 0); - - map->set_prototype(object->map()->prototype()); - - // Allocate the backing storage for the properties. - int prop_size = map->unused_property_fields() - map->inobject_properties(); - Object* properties; - maybe = AllocateFixedArray(prop_size, TENURED); - if (!maybe->ToObject(&properties)) return maybe; - - // Functions require some allocation, which might fail here. - SharedFunctionInfo* shared = NULL; - if (type == JS_FUNCTION_TYPE) { - String* name; - OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"), - HashSeed()); - maybe = InternalizeStringWithKey(&key); - if (!maybe->To<String>(&name)) return maybe; - maybe = AllocateSharedFunctionInfo(name); - if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe; - } - - // Because of possible retries of this function after failure, - // we must NOT fail after this point, where we have changed the type! - - // Reset the map for the object. - object->set_map(map); - JSObject* jsobj = JSObject::cast(object); - - // Reinitialize the object from the constructor map. - InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map); - - // Functions require some minimal initialization. - if (type == JS_FUNCTION_TYPE) { - map->set_function_with_prototype(true); - InitializeFunction(JSFunction::cast(object), shared, the_hole_value()); - JSFunction::cast(object)->set_context( - isolate()->context()->native_context()); - } - - // Put in filler if the new object is smaller than the old. - if (size_difference > 0) { - CreateFillerObjectAt( - object->address() + map->instance_size(), size_difference); - } - - return object; -} - - -MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor, - JSGlobalProxy* object) { - ASSERT(constructor->has_initial_map()); - Map* map = constructor->initial_map(); - - // Check that the already allocated object has the same size and type as - // objects allocated using the constructor. - ASSERT(map->instance_size() == object->map()->instance_size()); - ASSERT(map->instance_type() == object->map()->instance_type()); - - // Allocate the backing storage for the properties. - int prop_size = map->unused_property_fields() - map->inobject_properties(); - Object* properties; - { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED); - if (!maybe_properties->ToObject(&properties)) return maybe_properties; - } - - // Reset the map for the object. - object->set_map(constructor->initial_map()); - - // Reinitialize the object from the constructor map. - InitializeJSObjectFromMap(object, FixedArray::cast(properties), map); - return object; -} - - -MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string, - PretenureFlag pretenure) { - int length = string.length(); - if (length == 1) { - return Heap::LookupSingleCharacterStringFromCode(string[0]); - } - Object* result; - { MaybeObject* maybe_result = - AllocateRawOneByteString(string.length(), pretenure); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - // Copy the characters into the new object. - CopyChars(SeqOneByteString::cast(result)->GetChars(), - string.start(), - length); - return result; -} - - -MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string, - int non_ascii_start, - PretenureFlag pretenure) { - // Continue counting the number of characters in the UTF-8 string, starting - // from the first non-ascii character or word. - Access<UnicodeCache::Utf8Decoder> - decoder(isolate_->unicode_cache()->utf8_decoder()); - decoder->Reset(string.start() + non_ascii_start, - string.length() - non_ascii_start); - int utf16_length = decoder->Utf16Length(); - ASSERT(utf16_length > 0); - // Allocate string. - Object* result; - { - int chars = non_ascii_start + utf16_length; - MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - // Convert and copy the characters into the new object. - SeqTwoByteString* twobyte = SeqTwoByteString::cast(result); - // Copy ascii portion. - uint16_t* data = twobyte->GetChars(); - if (non_ascii_start != 0) { - const char* ascii_data = string.start(); - for (int i = 0; i < non_ascii_start; i++) { - *data++ = *ascii_data++; - } - } - // Now write the remainder. - decoder->WriteUtf16(data, utf16_length); - return result; -} - - -MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string, - PretenureFlag pretenure) { - // Check if the string is an ASCII string. - Object* result; - int length = string.length(); - const uc16* start = string.start(); - - if (String::IsOneByte(start, length)) { - MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure); - if (!maybe_result->ToObject(&result)) return maybe_result; - CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length); - } else { // It's not a one byte string. - MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure); - if (!maybe_result->ToObject(&result)) return maybe_result; - CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length); - } - return result; -} - - -Map* Heap::InternalizedStringMapForString(String* string) { - // If the string is in new space it cannot be used as internalized. - if (InNewSpace(string)) return NULL; - - // Find the corresponding internalized string map for strings. - switch (string->map()->instance_type()) { - case STRING_TYPE: return internalized_string_map(); - case ASCII_STRING_TYPE: return ascii_internalized_string_map(); - case CONS_STRING_TYPE: return cons_internalized_string_map(); - case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map(); - case EXTERNAL_STRING_TYPE: return external_internalized_string_map(); - case EXTERNAL_ASCII_STRING_TYPE: - return external_ascii_internalized_string_map(); - case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE: - return external_internalized_string_with_one_byte_data_map(); - case SHORT_EXTERNAL_STRING_TYPE: - return short_external_internalized_string_map(); - case SHORT_EXTERNAL_ASCII_STRING_TYPE: - return short_external_ascii_internalized_string_map(); - case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE: - return short_external_internalized_string_with_one_byte_data_map(); - default: return NULL; // No match found. - } -} - - -static inline void WriteOneByteData(Vector<const char> vector, - uint8_t* chars, - int len) { - // Only works for ascii. - ASSERT(vector.length() == len); - OS::MemCopy(chars, vector.start(), len); -} - -static inline void WriteTwoByteData(Vector<const char> vector, - uint16_t* chars, - int len) { - const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start()); - unsigned stream_length = vector.length(); - while (stream_length != 0) { - unsigned consumed = 0; - uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed); - ASSERT(c != unibrow::Utf8::kBadChar); - ASSERT(consumed <= stream_length); - stream_length -= consumed; - stream += consumed; - if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) { - len -= 2; - if (len < 0) break; - *chars++ = unibrow::Utf16::LeadSurrogate(c); - *chars++ = unibrow::Utf16::TrailSurrogate(c); - } else { - len -= 1; - if (len < 0) break; - *chars++ = c; - } - } - ASSERT(stream_length == 0); - ASSERT(len == 0); -} - - -static inline void WriteOneByteData(String* s, uint8_t* chars, int len) { - ASSERT(s->length() == len); - String::WriteToFlat(s, chars, 0, len); -} - - -static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) { - ASSERT(s->length() == len); - String::WriteToFlat(s, chars, 0, len); -} - - -template<bool is_one_byte, typename T> -MaybeObject* Heap::AllocateInternalizedStringImpl( - T t, int chars, uint32_t hash_field) { - ASSERT(chars >= 0); - // Compute map and object size. - int size; - Map* map; - - if (chars < 0 || chars > String::kMaxLength) { - return isolate()->ThrowInvalidStringLength(); - } - if (is_one_byte) { - map = ascii_internalized_string_map(); - size = SeqOneByteString::SizeFor(chars); - } else { - map = internalized_string_map(); - size = SeqTwoByteString::SizeFor(chars); - } - AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); - - // Allocate string. - Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map); - // Set length and hash fields of the allocated string. - String* answer = String::cast(result); - answer->set_length(chars); - answer->set_hash_field(hash_field); - - ASSERT_EQ(size, answer->Size()); - - if (is_one_byte) { - WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars); - } else { - WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars); - } - return answer; -} - - -// Need explicit instantiations. -template -MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t); -template -MaybeObject* Heap::AllocateInternalizedStringImpl<false>( - String*, int, uint32_t); -template -MaybeObject* Heap::AllocateInternalizedStringImpl<false>( - Vector<const char>, int, uint32_t); - - -MaybeObject* Heap::AllocateRawOneByteString(int length, - PretenureFlag pretenure) { - if (length < 0 || length > String::kMaxLength) { - return isolate()->ThrowInvalidStringLength(); - } - int size = SeqOneByteString::SizeFor(length); - ASSERT(size <= SeqOneByteString::kMaxSize); - AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); - - Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - // Partially initialize the object. - HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map()); - String::cast(result)->set_length(length); - String::cast(result)->set_hash_field(String::kEmptyHashField); - ASSERT_EQ(size, HeapObject::cast(result)->Size()); - - return result; -} - - -MaybeObject* Heap::AllocateRawTwoByteString(int length, - PretenureFlag pretenure) { - if (length < 0 || length > String::kMaxLength) { - return isolate()->ThrowInvalidStringLength(); - } - int size = SeqTwoByteString::SizeFor(length); - ASSERT(size <= SeqTwoByteString::kMaxSize); - AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); - - Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - // Partially initialize the object. - HeapObject::cast(result)->set_map_no_write_barrier(string_map()); - String::cast(result)->set_length(length); - String::cast(result)->set_hash_field(String::kEmptyHashField); - ASSERT_EQ(size, HeapObject::cast(result)->Size()); - return result; -} - - -MaybeObject* Heap::AllocateJSArray( - ElementsKind elements_kind, - PretenureFlag pretenure) { - Context* native_context = isolate()->context()->native_context(); - JSFunction* array_function = native_context->array_function(); - Map* map = array_function->initial_map(); - Map* transition_map = isolate()->get_initial_js_array_map(elements_kind); - if (transition_map != NULL) map = transition_map; - return AllocateJSObjectFromMap(map, pretenure); -} - - -MaybeObject* Heap::AllocateEmptyFixedArray() { - int size = FixedArray::SizeFor(0); - Object* result; - { MaybeObject* maybe_result = - AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - // Initialize the object. - reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier( - fixed_array_map()); - reinterpret_cast<FixedArray*>(result)->set_length(0); - return result; -} - - -MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) { - return AllocateExternalArray(0, array_type, NULL, TENURED); -} - - -MaybeObject* Heap::CopyAndTenureFixedCOWArray(FixedArray* src) { - if (!InNewSpace(src)) { - return src; - } - - int len = src->length(); - Object* obj; - { MaybeObject* maybe_obj = AllocateRawFixedArray(len, TENURED); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - HeapObject::cast(obj)->set_map_no_write_barrier(fixed_array_map()); - FixedArray* result = FixedArray::cast(obj); - result->set_length(len); - - // Copy the content - DisallowHeapAllocation no_gc; - WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); - for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); - - // TODO(mvstanton): The map is set twice because of protection against calling - // set() on a COW FixedArray. Issue v8:3221 created to track this, and - // we might then be able to remove this whole method. - HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map()); - return result; -} - - -MaybeObject* Heap::AllocateEmptyFixedTypedArray(ExternalArrayType array_type) { - return AllocateFixedTypedArray(0, array_type, TENURED); -} - - -MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) { - int len = src->length(); - Object* obj; - { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - if (InNewSpace(obj)) { - HeapObject* dst = HeapObject::cast(obj); - dst->set_map_no_write_barrier(map); - CopyBlock(dst->address() + kPointerSize, - src->address() + kPointerSize, - FixedArray::SizeFor(len) - kPointerSize); - return obj; - } - HeapObject::cast(obj)->set_map_no_write_barrier(map); - FixedArray* result = FixedArray::cast(obj); - result->set_length(len); - - // Copy the content - DisallowHeapAllocation no_gc; - WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); - for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); - return result; -} - - -MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, - Map* map) { - int len = src->length(); - Object* obj; - { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - HeapObject* dst = HeapObject::cast(obj); - dst->set_map_no_write_barrier(map); - CopyBlock( - dst->address() + FixedDoubleArray::kLengthOffset, - src->address() + FixedDoubleArray::kLengthOffset, - FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset); - return obj; -} - - -MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src, - Map* map) { - int int64_entries = src->count_of_int64_entries(); - int code_ptr_entries = src->count_of_code_ptr_entries(); - int heap_ptr_entries = src->count_of_heap_ptr_entries(); - int int32_entries = src->count_of_int32_entries(); - Object* obj; - { MaybeObject* maybe_obj = - AllocateConstantPoolArray(int64_entries, code_ptr_entries, - heap_ptr_entries, int32_entries); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - HeapObject* dst = HeapObject::cast(obj); - dst->set_map_no_write_barrier(map); - int size = ConstantPoolArray::SizeFor( - int64_entries, code_ptr_entries, heap_ptr_entries, int32_entries); - CopyBlock( - dst->address() + ConstantPoolArray::kLengthOffset, - src->address() + ConstantPoolArray::kLengthOffset, - size - ConstantPoolArray::kLengthOffset); - return obj; -} - - -MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { - if (length < 0 || length > FixedArray::kMaxLength) { - v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); - } - int size = FixedArray::SizeFor(length); - AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); - - return AllocateRaw(size, space, OLD_POINTER_SPACE); -} - - -MaybeObject* Heap::AllocateFixedArrayWithFiller(int length, - PretenureFlag pretenure, - Object* filler) { - ASSERT(length >= 0); - ASSERT(empty_fixed_array()->IsFixedArray()); - if (length == 0) return empty_fixed_array(); - - ASSERT(!InNewSpace(filler)); - Object* result; - { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map()); - FixedArray* array = FixedArray::cast(result); - array->set_length(length); - MemsetPointer(array->data_start(), filler, length); - return array; -} - - -MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) { - return AllocateFixedArrayWithFiller(length, pretenure, undefined_value()); -} - - -MaybeObject* Heap::AllocateFixedArrayWithHoles(int length, - PretenureFlag pretenure) { - return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value()); -} - - -MaybeObject* Heap::AllocateUninitializedFixedArray(int length) { - if (length == 0) return empty_fixed_array(); - - Object* obj; - { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - - reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier( - fixed_array_map()); - FixedArray::cast(obj)->set_length(length); - return obj; -} - - -MaybeObject* Heap::AllocateEmptyFixedDoubleArray() { - int size = FixedDoubleArray::SizeFor(0); - Object* result; - { MaybeObject* maybe_result = - AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - // Initialize the object. - reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier( - fixed_double_array_map()); - reinterpret_cast<FixedDoubleArray*>(result)->set_length(0); - return result; -} - - -MaybeObject* Heap::AllocateUninitializedFixedDoubleArray( - int length, - PretenureFlag pretenure) { - if (length == 0) return empty_fixed_array(); - - Object* elements_object; - MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure); - if (!maybe_obj->ToObject(&elements_object)) return maybe_obj; - FixedDoubleArray* elements = - reinterpret_cast<FixedDoubleArray*>(elements_object); - - elements->set_map_no_write_barrier(fixed_double_array_map()); - elements->set_length(length); - return elements; -} - - -MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles( - int length, - PretenureFlag pretenure) { - if (length == 0) return empty_fixed_array(); - - Object* elements_object; - MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure); - if (!maybe_obj->ToObject(&elements_object)) return maybe_obj; - FixedDoubleArray* elements = - reinterpret_cast<FixedDoubleArray*>(elements_object); - - for (int i = 0; i < length; ++i) { - elements->set_the_hole(i); - } - - elements->set_map_no_write_barrier(fixed_double_array_map()); - elements->set_length(length); - return elements; -} - - -MaybeObject* Heap::AllocateRawFixedDoubleArray(int length, - PretenureFlag pretenure) { - if (length < 0 || length > FixedDoubleArray::kMaxLength) { - v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); - } - int size = FixedDoubleArray::SizeFor(length); -#ifndef V8_HOST_ARCH_64_BIT - size += kPointerSize; -#endif - AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); - - HeapObject* object; - { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE); - if (!maybe_object->To<HeapObject>(&object)) return maybe_object; - } - - return EnsureDoubleAligned(this, object, size); -} - - -MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries, - int number_of_code_ptr_entries, - int number_of_heap_ptr_entries, - int number_of_int32_entries) { - ASSERT(number_of_int64_entries > 0 || number_of_code_ptr_entries > 0 || - number_of_heap_ptr_entries > 0 || number_of_int32_entries > 0); - int size = ConstantPoolArray::SizeFor(number_of_int64_entries, - number_of_code_ptr_entries, - number_of_heap_ptr_entries, - number_of_int32_entries); -#ifndef V8_HOST_ARCH_64_BIT - size += kPointerSize; -#endif - AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED); - - HeapObject* object; - { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE); - if (!maybe_object->To<HeapObject>(&object)) return maybe_object; - } - object = EnsureDoubleAligned(this, object, size); - HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map()); - - ConstantPoolArray* constant_pool = - reinterpret_cast<ConstantPoolArray*>(object); - constant_pool->SetEntryCounts(number_of_int64_entries, - number_of_code_ptr_entries, - number_of_heap_ptr_entries, - number_of_int32_entries); - if (number_of_code_ptr_entries > 0) { - int offset = - constant_pool->OffsetOfElementAt(constant_pool->first_code_ptr_index()); - MemsetPointer( - reinterpret_cast<Address*>(HeapObject::RawField(constant_pool, offset)), - isolate()->builtins()->builtin(Builtins::kIllegal)->entry(), - number_of_code_ptr_entries); - } - if (number_of_heap_ptr_entries > 0) { - int offset = - constant_pool->OffsetOfElementAt(constant_pool->first_heap_ptr_index()); - MemsetPointer( - HeapObject::RawField(constant_pool, offset), - undefined_value(), - number_of_heap_ptr_entries); - } - return constant_pool; -} - - -MaybeObject* Heap::AllocateEmptyConstantPoolArray() { - int size = ConstantPoolArray::SizeFor(0, 0, 0, 0); - Object* result; - { MaybeObject* maybe_result = - AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - HeapObject::cast(result)->set_map_no_write_barrier(constant_pool_array_map()); - ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0, 0); - return result; -} - - -MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { - Object* result; - { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier( - hash_table_map()); - ASSERT(result->IsHashTable()); - return result; -} - - -MaybeObject* Heap::AllocateSymbol() { - // Statically ensure that it is safe to allocate symbols in paged spaces. - STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize); - - Object* result; - MaybeObject* maybe = - AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE); - if (!maybe->ToObject(&result)) return maybe; - - HeapObject::cast(result)->set_map_no_write_barrier(symbol_map()); - - // Generate a random hash value. - int hash; - int attempts = 0; - do { - hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask; - attempts++; - } while (hash == 0 && attempts < 30); - if (hash == 0) hash = 1; // never return 0 - - Symbol::cast(result)->set_hash_field( - Name::kIsNotArrayIndexMask | (hash << Name::kHashShift)); - Symbol::cast(result)->set_name(undefined_value()); - Symbol::cast(result)->set_flags(Smi::FromInt(0)); - - ASSERT(!Symbol::cast(result)->is_private()); - return result; -} - - -MaybeObject* Heap::AllocatePrivateSymbol() { - MaybeObject* maybe = AllocateSymbol(); - Symbol* symbol; - if (!maybe->To(&symbol)) return maybe; - symbol->set_is_private(true); - return symbol; -} - - -MaybeObject* Heap::AllocateNativeContext() { - Object* result; - { MaybeObject* maybe_result = - AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - Context* context = reinterpret_cast<Context*>(result); - context->set_map_no_write_barrier(native_context_map()); - context->set_js_array_maps(undefined_value()); - ASSERT(context->IsNativeContext()); - ASSERT(result->IsContext()); - return result; -} - - -MaybeObject* Heap::AllocateGlobalContext(JSFunction* function, - ScopeInfo* scope_info) { - Object* result; - { MaybeObject* maybe_result = - AllocateFixedArray(scope_info->ContextLength(), TENURED); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - Context* context = reinterpret_cast<Context*>(result); - context->set_map_no_write_barrier(global_context_map()); - context->set_closure(function); - context->set_previous(function->context()); - context->set_extension(scope_info); - context->set_global_object(function->context()->global_object()); - ASSERT(context->IsGlobalContext()); - ASSERT(result->IsContext()); - return context; -} - - -MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) { - Object* result; - { MaybeObject* maybe_result = - AllocateFixedArray(scope_info->ContextLength(), TENURED); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - Context* context = reinterpret_cast<Context*>(result); - context->set_map_no_write_barrier(module_context_map()); - // Instance link will be set later. - context->set_extension(Smi::FromInt(0)); - return context; -} - - -MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) { - ASSERT(length >= Context::MIN_CONTEXT_SLOTS); - Object* result; - { MaybeObject* maybe_result = AllocateFixedArray(length); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - Context* context = reinterpret_cast<Context*>(result); - context->set_map_no_write_barrier(function_context_map()); - context->set_closure(function); - context->set_previous(function->context()); - context->set_extension(Smi::FromInt(0)); - context->set_global_object(function->context()->global_object()); - return context; -} - - -MaybeObject* Heap::AllocateCatchContext(JSFunction* function, - Context* previous, - String* name, - Object* thrown_object) { - STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX); - Object* result; - { MaybeObject* maybe_result = - AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - Context* context = reinterpret_cast<Context*>(result); - context->set_map_no_write_barrier(catch_context_map()); - context->set_closure(function); - context->set_previous(previous); - context->set_extension(name); - context->set_global_object(previous->global_object()); - context->set(Context::THROWN_OBJECT_INDEX, thrown_object); - return context; -} - - -MaybeObject* Heap::AllocateWithContext(JSFunction* function, - Context* previous, - JSReceiver* extension) { - Object* result; - { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - Context* context = reinterpret_cast<Context*>(result); - context->set_map_no_write_barrier(with_context_map()); - context->set_closure(function); - context->set_previous(previous); - context->set_extension(extension); - context->set_global_object(previous->global_object()); - return context; -} - - -MaybeObject* Heap::AllocateBlockContext(JSFunction* function, - Context* previous, - ScopeInfo* scope_info) { - Object* result; - { MaybeObject* maybe_result = - AllocateFixedArrayWithHoles(scope_info->ContextLength()); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - Context* context = reinterpret_cast<Context*>(result); - context->set_map_no_write_barrier(block_context_map()); - context->set_closure(function); - context->set_previous(previous); - context->set_extension(scope_info); - context->set_global_object(previous->global_object()); - return context; -} - - -MaybeObject* Heap::AllocateScopeInfo(int length) { - FixedArray* scope_info; - MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED); - if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info; - scope_info->set_map_no_write_barrier(scope_info_map()); - return scope_info; -} - - -MaybeObject* Heap::AllocateExternal(void* value) { - Foreign* foreign; - { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value)); - if (!maybe_result->To(&foreign)) return maybe_result; - } - JSObject* external; - { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map()); - if (!maybe_result->To(&external)) return maybe_result; - } - external->SetInternalField(0, foreign); - return external; -} - - -MaybeObject* Heap::AllocateStruct(InstanceType type) { - Map* map; - switch (type) { -#define MAKE_CASE(NAME, Name, name) \ - case NAME##_TYPE: map = name##_map(); break; -STRUCT_LIST(MAKE_CASE) -#undef MAKE_CASE - default: - UNREACHABLE(); - return Failure::InternalError(); - } - int size = map->instance_size(); - AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED); - Object* result; - { MaybeObject* maybe_result = Allocate(map, space); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - Struct::cast(result)->InitializeBody(size); - return result; -} - - -bool Heap::IsHeapIterable() { - return (!old_pointer_space()->was_swept_conservatively() && - !old_data_space()->was_swept_conservatively()); -} - - -void Heap::EnsureHeapIsIterable() { - ASSERT(AllowHeapAllocation::IsAllowed()); - if (!IsHeapIterable()) { - CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable"); - } - ASSERT(IsHeapIterable()); -} - - -void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) { - incremental_marking()->Step(step_size, - IncrementalMarking::NO_GC_VIA_STACK_GUARD); - - if (incremental_marking()->IsComplete()) { - bool uncommit = false; - if (gc_count_at_last_idle_gc_ == gc_count_) { - // No GC since the last full GC, the mutator is probably not active. - isolate_->compilation_cache()->Clear(); - uncommit = true; - } - CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental"); - mark_sweeps_since_idle_round_started_++; - gc_count_at_last_idle_gc_ = gc_count_; - if (uncommit) { - new_space_.Shrink(); - UncommitFromSpace(); - } - } -} - - -bool Heap::IdleNotification(int hint) { - // Hints greater than this value indicate that - // the embedder is requesting a lot of GC work. - const int kMaxHint = 1000; - const int kMinHintForIncrementalMarking = 10; - // Minimal hint that allows to do full GC. - const int kMinHintForFullGC = 100; - intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4; - // The size factor is in range [5..250]. The numbers here are chosen from - // experiments. If you changes them, make sure to test with - // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.* - intptr_t step_size = - size_factor * IncrementalMarking::kAllocatedThreshold; - - if (contexts_disposed_ > 0) { - contexts_disposed_ = 0; - int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000); - if (hint >= mark_sweep_time && !FLAG_expose_gc && - incremental_marking()->IsStopped()) { - HistogramTimerScope scope(isolate_->counters()->gc_context()); - CollectAllGarbage(kReduceMemoryFootprintMask, - "idle notification: contexts disposed"); - } else { - AdvanceIdleIncrementalMarking(step_size); - } - - // After context disposal there is likely a lot of garbage remaining, reset - // the idle notification counters in order to trigger more incremental GCs - // on subsequent idle notifications. - StartIdleRound(); - return false; - } - - if (!FLAG_incremental_marking || Serializer::enabled()) { - return IdleGlobalGC(); - } - - // By doing small chunks of GC work in each IdleNotification, - // perform a round of incremental GCs and after that wait until - // the mutator creates enough garbage to justify a new round. - // An incremental GC progresses as follows: - // 1. many incremental marking steps, - // 2. one old space mark-sweep-compact, - // 3. many lazy sweep steps. - // Use mark-sweep-compact events to count incremental GCs in a round. - - if (incremental_marking()->IsStopped()) { - if (!mark_compact_collector()->AreSweeperThreadsActivated() && - !IsSweepingComplete() && - !AdvanceSweepers(static_cast<int>(step_size))) { - return false; - } - } - - if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) { - if (EnoughGarbageSinceLastIdleRound()) { - StartIdleRound(); - } else { - return true; - } - } - - int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound - - mark_sweeps_since_idle_round_started_; - - if (incremental_marking()->IsStopped()) { - // If there are no more than two GCs left in this idle round and we are - // allowed to do a full GC, then make those GCs full in order to compact - // the code space. - // TODO(ulan): Once we enable code compaction for incremental marking, - // we can get rid of this special case and always start incremental marking. - if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) { - CollectAllGarbage(kReduceMemoryFootprintMask, - "idle notification: finalize idle round"); - mark_sweeps_since_idle_round_started_++; - } else if (hint > kMinHintForIncrementalMarking) { - incremental_marking()->Start(); - } - } - if (!incremental_marking()->IsStopped() && - hint > kMinHintForIncrementalMarking) { - AdvanceIdleIncrementalMarking(step_size); - } - - if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) { - FinishIdleRound(); - return true; - } - - return false; -} - - -bool Heap::IdleGlobalGC() { - static const int kIdlesBeforeScavenge = 4; - static const int kIdlesBeforeMarkSweep = 7; - static const int kIdlesBeforeMarkCompact = 8; - static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1; - static const unsigned int kGCsBetweenCleanup = 4; - - if (!last_idle_notification_gc_count_init_) { - last_idle_notification_gc_count_ = gc_count_; - last_idle_notification_gc_count_init_ = true; - } - - bool uncommit = true; - bool finished = false; - - // Reset the number of idle notifications received when a number of - // GCs have taken place. This allows another round of cleanup based - // on idle notifications if enough work has been carried out to - // provoke a number of garbage collections. - if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) { - number_idle_notifications_ = - Min(number_idle_notifications_ + 1, kMaxIdleCount); - } else { - number_idle_notifications_ = 0; - last_idle_notification_gc_count_ = gc_count_; - } - - if (number_idle_notifications_ == kIdlesBeforeScavenge) { - CollectGarbage(NEW_SPACE, "idle notification"); - new_space_.Shrink(); - last_idle_notification_gc_count_ = gc_count_; - } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) { - // Before doing the mark-sweep collections we clear the - // compilation cache to avoid hanging on to source code and - // generated code for cached functions. - isolate_->compilation_cache()->Clear(); - - CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification"); - new_space_.Shrink(); - last_idle_notification_gc_count_ = gc_count_; - - } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) { - CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification"); - new_space_.Shrink(); - last_idle_notification_gc_count_ = gc_count_; - number_idle_notifications_ = 0; - finished = true; - } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) { - // If we have received more than kIdlesBeforeMarkCompact idle - // notifications we do not perform any cleanup because we don't - // expect to gain much by doing so. - finished = true; - } - - if (uncommit) UncommitFromSpace(); - - return finished; -} - - -#ifdef DEBUG - -void Heap::Print() { - if (!HasBeenSetUp()) return; - isolate()->PrintStack(stdout); - AllSpaces spaces(this); - for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { - space->Print(); - } -} - - -void Heap::ReportCodeStatistics(const char* title) { - PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title); - PagedSpace::ResetCodeStatistics(isolate()); - // We do not look for code in new space, map space, or old space. If code - // somehow ends up in those spaces, we would miss it here. - code_space_->CollectCodeStatistics(); - lo_space_->CollectCodeStatistics(); - PagedSpace::ReportCodeStatistics(isolate()); -} - - -// This function expects that NewSpace's allocated objects histogram is -// populated (via a call to CollectStatistics or else as a side effect of a -// just-completed scavenge collection). -void Heap::ReportHeapStatistics(const char* title) { - USE(title); - PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", - title, gc_count_); - PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n", - old_generation_allocation_limit_); - - PrintF("\n"); - PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_)); - isolate_->global_handles()->PrintStats(); - PrintF("\n"); - - PrintF("Heap statistics : "); - isolate_->memory_allocator()->ReportStatistics(); - PrintF("To space : "); - new_space_.ReportStatistics(); - PrintF("Old pointer space : "); - old_pointer_space_->ReportStatistics(); - PrintF("Old data space : "); - old_data_space_->ReportStatistics(); - PrintF("Code space : "); - code_space_->ReportStatistics(); - PrintF("Map space : "); - map_space_->ReportStatistics(); - PrintF("Cell space : "); - cell_space_->ReportStatistics(); - PrintF("PropertyCell space : "); - property_cell_space_->ReportStatistics(); - PrintF("Large object space : "); - lo_space_->ReportStatistics(); - PrintF(">>>>>> ========================================= >>>>>>\n"); -} - -#endif // DEBUG - -bool Heap::Contains(HeapObject* value) { - return Contains(value->address()); -} - - -bool Heap::Contains(Address addr) { - if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false; - return HasBeenSetUp() && - (new_space_.ToSpaceContains(addr) || - old_pointer_space_->Contains(addr) || - old_data_space_->Contains(addr) || - code_space_->Contains(addr) || - map_space_->Contains(addr) || - cell_space_->Contains(addr) || - property_cell_space_->Contains(addr) || - lo_space_->SlowContains(addr)); -} - - -bool Heap::InSpace(HeapObject* value, AllocationSpace space) { - return InSpace(value->address(), space); -} - - -bool Heap::InSpace(Address addr, AllocationSpace space) { - if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false; - if (!HasBeenSetUp()) return false; - - switch (space) { - case NEW_SPACE: - return new_space_.ToSpaceContains(addr); - case OLD_POINTER_SPACE: - return old_pointer_space_->Contains(addr); - case OLD_DATA_SPACE: - return old_data_space_->Contains(addr); - case CODE_SPACE: - return code_space_->Contains(addr); - case MAP_SPACE: - return map_space_->Contains(addr); - case CELL_SPACE: - return cell_space_->Contains(addr); - case PROPERTY_CELL_SPACE: - return property_cell_space_->Contains(addr); - case LO_SPACE: - return lo_space_->SlowContains(addr); - } - - return false; -} - - -#ifdef VERIFY_HEAP -void Heap::Verify() { - CHECK(HasBeenSetUp()); - - store_buffer()->Verify(); - - VerifyPointersVisitor visitor; - IterateRoots(&visitor, VISIT_ONLY_STRONG); - - VerifySmisVisitor smis_visitor; - IterateSmiRoots(&smis_visitor); - - new_space_.Verify(); - - old_pointer_space_->Verify(&visitor); - map_space_->Verify(&visitor); - - VerifyPointersVisitor no_dirty_regions_visitor; - old_data_space_->Verify(&no_dirty_regions_visitor); - code_space_->Verify(&no_dirty_regions_visitor); - cell_space_->Verify(&no_dirty_regions_visitor); - property_cell_space_->Verify(&no_dirty_regions_visitor); - - lo_space_->Verify(); -} -#endif - - -MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) { - Utf8StringKey key(string, HashSeed()); - return InternalizeStringWithKey(&key); -} - - -MaybeObject* Heap::InternalizeString(String* string) { - if (string->IsInternalizedString()) return string; - Object* result = NULL; - Object* new_table; - { MaybeObject* maybe_new_table = - string_table()->LookupString(string, &result); - if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table; - } - // Can't use set_string_table because StringTable::cast knows that - // StringTable is a singleton and checks for identity. - roots_[kStringTableRootIndex] = new_table; - ASSERT(result != NULL); - return result; -} - - -bool Heap::InternalizeStringIfExists(String* string, String** result) { - if (string->IsInternalizedString()) { - *result = string; - return true; - } - return string_table()->LookupStringIfExists(string, result); -} - - -MaybeObject* Heap::InternalizeStringWithKey(HashTableKey* key) { - Object* result = NULL; - Object* new_table; - { MaybeObject* maybe_new_table = - string_table()->LookupKey(key, &result); - if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table; - } - // Can't use set_string_table because StringTable::cast knows that - // StringTable is a singleton and checks for identity. - roots_[kStringTableRootIndex] = new_table; - ASSERT(result != NULL); - return result; -} - - -void Heap::ZapFromSpace() { - NewSpacePageIterator it(new_space_.FromSpaceStart(), - new_space_.FromSpaceEnd()); - while (it.has_next()) { - NewSpacePage* page = it.next(); - for (Address cursor = page->area_start(), limit = page->area_end(); - cursor < limit; - cursor += kPointerSize) { - Memory::Address_at(cursor) = kFromSpaceZapValue; - } - } -} - - -void Heap::IterateAndMarkPointersToFromSpace(Address start, - Address end, - ObjectSlotCallback callback) { - Address slot_address = start; - - // We are not collecting slots on new space objects during mutation - // thus we have to scan for pointers to evacuation candidates when we - // promote objects. But we should not record any slots in non-black - // objects. Grey object's slots would be rescanned. - // White object might not survive until the end of collection - // it would be a violation of the invariant to record it's slots. - bool record_slots = false; - if (incremental_marking()->IsCompacting()) { - MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start)); - record_slots = Marking::IsBlack(mark_bit); - } - - while (slot_address < end) { - Object** slot = reinterpret_cast<Object**>(slot_address); - Object* object = *slot; - // If the store buffer becomes overfull we mark pages as being exempt from - // the store buffer. These pages are scanned to find pointers that point - // to the new space. In that case we may hit newly promoted objects and - // fix the pointers before the promotion queue gets to them. Thus the 'if'. - if (object->IsHeapObject()) { - if (Heap::InFromSpace(object)) { - callback(reinterpret_cast<HeapObject**>(slot), - HeapObject::cast(object)); - Object* new_object = *slot; - if (InNewSpace(new_object)) { - SLOW_ASSERT(Heap::InToSpace(new_object)); - SLOW_ASSERT(new_object->IsHeapObject()); - store_buffer_.EnterDirectlyIntoStoreBuffer( - reinterpret_cast<Address>(slot)); - } - SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object)); - } else if (record_slots && - MarkCompactCollector::IsOnEvacuationCandidate(object)) { - mark_compact_collector()->RecordSlot(slot, slot, object); - } - } - slot_address += kPointerSize; - } -} - - -#ifdef DEBUG -typedef bool (*CheckStoreBufferFilter)(Object** addr); - - -bool IsAMapPointerAddress(Object** addr) { - uintptr_t a = reinterpret_cast<uintptr_t>(addr); - int mod = a % Map::kSize; - return mod >= Map::kPointerFieldsBeginOffset && - mod < Map::kPointerFieldsEndOffset; -} - - -bool EverythingsAPointer(Object** addr) { - return true; -} - - -static void CheckStoreBuffer(Heap* heap, - Object** current, - Object** limit, - Object**** store_buffer_position, - Object*** store_buffer_top, - CheckStoreBufferFilter filter, - Address special_garbage_start, - Address special_garbage_end) { - Map* free_space_map = heap->free_space_map(); - for ( ; current < limit; current++) { - Object* o = *current; - Address current_address = reinterpret_cast<Address>(current); - // Skip free space. - if (o == free_space_map) { - Address current_address = reinterpret_cast<Address>(current); - FreeSpace* free_space = - FreeSpace::cast(HeapObject::FromAddress(current_address)); - int skip = free_space->Size(); - ASSERT(current_address + skip <= reinterpret_cast<Address>(limit)); - ASSERT(skip > 0); - current_address += skip - kPointerSize; - current = reinterpret_cast<Object**>(current_address); - continue; - } - // Skip the current linear allocation space between top and limit which is - // unmarked with the free space map, but can contain junk. - if (current_address == special_garbage_start && - special_garbage_end != special_garbage_start) { - current_address = special_garbage_end - kPointerSize; - current = reinterpret_cast<Object**>(current_address); - continue; - } - if (!(*filter)(current)) continue; - ASSERT(current_address < special_garbage_start || - current_address >= special_garbage_end); - ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue); - // We have to check that the pointer does not point into new space - // without trying to cast it to a heap object since the hash field of - // a string can contain values like 1 and 3 which are tagged null - // pointers. - if (!heap->InNewSpace(o)) continue; - while (**store_buffer_position < current && - *store_buffer_position < store_buffer_top) { - (*store_buffer_position)++; - } - if (**store_buffer_position != current || - *store_buffer_position == store_buffer_top) { - Object** obj_start = current; - while (!(*obj_start)->IsMap()) obj_start--; - UNREACHABLE(); - } - } -} - - -// Check that the store buffer contains all intergenerational pointers by -// scanning a page and ensuring that all pointers to young space are in the -// store buffer. -void Heap::OldPointerSpaceCheckStoreBuffer() { - OldSpace* space = old_pointer_space(); - PageIterator pages(space); - - store_buffer()->SortUniq(); - - while (pages.has_next()) { - Page* page = pages.next(); - Object** current = reinterpret_cast<Object**>(page->area_start()); - - Address end = page->area_end(); - - Object*** store_buffer_position = store_buffer()->Start(); - Object*** store_buffer_top = store_buffer()->Top(); - - Object** limit = reinterpret_cast<Object**>(end); - CheckStoreBuffer(this, - current, - limit, - &store_buffer_position, - store_buffer_top, - &EverythingsAPointer, - space->top(), - space->limit()); - } -} - - -void Heap::MapSpaceCheckStoreBuffer() { - MapSpace* space = map_space(); - PageIterator pages(space); - - store_buffer()->SortUniq(); - - while (pages.has_next()) { - Page* page = pages.next(); - Object** current = reinterpret_cast<Object**>(page->area_start()); - - Address end = page->area_end(); - - Object*** store_buffer_position = store_buffer()->Start(); - Object*** store_buffer_top = store_buffer()->Top(); - - Object** limit = reinterpret_cast<Object**>(end); - CheckStoreBuffer(this, - current, - limit, - &store_buffer_position, - store_buffer_top, - &IsAMapPointerAddress, - space->top(), - space->limit()); - } -} - - -void Heap::LargeObjectSpaceCheckStoreBuffer() { - LargeObjectIterator it(lo_space()); - for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { - // We only have code, sequential strings, or fixed arrays in large - // object space, and only fixed arrays can possibly contain pointers to - // the young generation. - if (object->IsFixedArray()) { - Object*** store_buffer_position = store_buffer()->Start(); - Object*** store_buffer_top = store_buffer()->Top(); - Object** current = reinterpret_cast<Object**>(object->address()); - Object** limit = - reinterpret_cast<Object**>(object->address() + object->Size()); - CheckStoreBuffer(this, - current, - limit, - &store_buffer_position, - store_buffer_top, - &EverythingsAPointer, - NULL, - NULL); - } - } -} -#endif - - -void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { - IterateStrongRoots(v, mode); - IterateWeakRoots(v, mode); -} - - -void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { - v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex])); - v->Synchronize(VisitorSynchronization::kStringTable); - if (mode != VISIT_ALL_IN_SCAVENGE && - mode != VISIT_ALL_IN_SWEEP_NEWSPACE) { - // Scavenge collections have special processing for this. - external_string_table_.Iterate(v); - } - v->Synchronize(VisitorSynchronization::kExternalStringsTable); -} - - -void Heap::IterateSmiRoots(ObjectVisitor* v) { - // Acquire execution access since we are going to read stack limit values. - ExecutionAccess access(isolate()); - v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]); - v->Synchronize(VisitorSynchronization::kSmiRootList); -} - - -void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { - v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); - v->Synchronize(VisitorSynchronization::kStrongRootList); - - v->VisitPointer(BitCast<Object**>(&hidden_string_)); - v->Synchronize(VisitorSynchronization::kInternalizedString); - - isolate_->bootstrapper()->Iterate(v); - v->Synchronize(VisitorSynchronization::kBootstrapper); - isolate_->Iterate(v); - v->Synchronize(VisitorSynchronization::kTop); - Relocatable::Iterate(isolate_, v); - v->Synchronize(VisitorSynchronization::kRelocatable); - -#ifdef ENABLE_DEBUGGER_SUPPORT - isolate_->debug()->Iterate(v); - if (isolate_->deoptimizer_data() != NULL) { - isolate_->deoptimizer_data()->Iterate(v); - } -#endif - v->Synchronize(VisitorSynchronization::kDebug); - isolate_->compilation_cache()->Iterate(v); - v->Synchronize(VisitorSynchronization::kCompilationCache); - - // Iterate over local handles in handle scopes. - isolate_->handle_scope_implementer()->Iterate(v); - isolate_->IterateDeferredHandles(v); - v->Synchronize(VisitorSynchronization::kHandleScope); - - // Iterate over the builtin code objects and code stubs in the - // heap. Note that it is not necessary to iterate over code objects - // on scavenge collections. - if (mode != VISIT_ALL_IN_SCAVENGE) { - isolate_->builtins()->IterateBuiltins(v); - } - v->Synchronize(VisitorSynchronization::kBuiltins); - - // Iterate over global handles. - switch (mode) { - case VISIT_ONLY_STRONG: - isolate_->global_handles()->IterateStrongRoots(v); - break; - case VISIT_ALL_IN_SCAVENGE: - isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v); - break; - case VISIT_ALL_IN_SWEEP_NEWSPACE: - case VISIT_ALL: - isolate_->global_handles()->IterateAllRoots(v); - break; - } - v->Synchronize(VisitorSynchronization::kGlobalHandles); - - // Iterate over eternal handles. - if (mode == VISIT_ALL_IN_SCAVENGE) { - isolate_->eternal_handles()->IterateNewSpaceRoots(v); - } else { - isolate_->eternal_handles()->IterateAllRoots(v); - } - v->Synchronize(VisitorSynchronization::kEternalHandles); - - // Iterate over pointers being held by inactive threads. - isolate_->thread_manager()->Iterate(v); - v->Synchronize(VisitorSynchronization::kThreadManager); - - // Iterate over the pointers the Serialization/Deserialization code is - // holding. - // During garbage collection this keeps the partial snapshot cache alive. - // During deserialization of the startup snapshot this creates the partial - // snapshot cache and deserializes the objects it refers to. During - // serialization this does nothing, since the partial snapshot cache is - // empty. However the next thing we do is create the partial snapshot, - // filling up the partial snapshot cache with objects it needs as we go. - SerializerDeserializer::Iterate(isolate_, v); - // We don't do a v->Synchronize call here, because in debug mode that will - // output a flag to the snapshot. However at this point the serializer and - // deserializer are deliberately a little unsynchronized (see above) so the - // checking of the sync flag in the snapshot would fail. -} - - -// TODO(1236194): Since the heap size is configurable on the command line -// and through the API, we should gracefully handle the case that the heap -// size is not big enough to fit all the initial objects. -bool Heap::ConfigureHeap(int max_semispace_size, - intptr_t max_old_gen_size, - intptr_t max_executable_size) { - if (HasBeenSetUp()) return false; - - if (FLAG_stress_compaction) { - // This will cause more frequent GCs when stressing. - max_semispace_size_ = Page::kPageSize; - } - - if (max_semispace_size > 0) { - if (max_semispace_size < Page::kPageSize) { - max_semispace_size = Page::kPageSize; - if (FLAG_trace_gc) { - PrintPID("Max semispace size cannot be less than %dkbytes\n", - Page::kPageSize >> 10); - } - } - max_semispace_size_ = max_semispace_size; - } - - if (Snapshot::IsEnabled()) { - // If we are using a snapshot we always reserve the default amount - // of memory for each semispace because code in the snapshot has - // write-barrier code that relies on the size and alignment of new - // space. We therefore cannot use a larger max semispace size - // than the default reserved semispace size. - if (max_semispace_size_ > reserved_semispace_size_) { - max_semispace_size_ = reserved_semispace_size_; - if (FLAG_trace_gc) { - PrintPID("Max semispace size cannot be more than %dkbytes\n", - reserved_semispace_size_ >> 10); - } - } - } else { - // If we are not using snapshots we reserve space for the actual - // max semispace size. - reserved_semispace_size_ = max_semispace_size_; - } - - if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size; - if (max_executable_size > 0) { - max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize); - } - - // The max executable size must be less than or equal to the max old - // generation size. - if (max_executable_size_ > max_old_generation_size_) { - max_executable_size_ = max_old_generation_size_; - } - - // The new space size must be a power of two to support single-bit testing - // for containment. - max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_); - reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_); - initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_); - - // The external allocation limit should be below 256 MB on all architectures - // to avoid unnecessary low memory notifications, as that is the threshold - // for some embedders. - external_allocation_limit_ = 12 * max_semispace_size_; - ASSERT(external_allocation_limit_ <= 256 * MB); - - // The old generation is paged and needs at least one page for each space. - int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; - max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count * - Page::kPageSize), - RoundUp(max_old_generation_size_, - Page::kPageSize)); - - // We rely on being able to allocate new arrays in paged spaces. - ASSERT(Page::kMaxRegularHeapObjectSize >= - (JSArray::kSize + - FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) + - AllocationMemento::kSize)); - - configured_ = true; - return true; -} - - -bool Heap::ConfigureHeapDefault() { - return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB, - static_cast<intptr_t>(FLAG_max_old_space_size) * MB, - static_cast<intptr_t>(FLAG_max_executable_size) * MB); -} - - -void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { - *stats->start_marker = HeapStats::kStartMarker; - *stats->end_marker = HeapStats::kEndMarker; - *stats->new_space_size = new_space_.SizeAsInt(); - *stats->new_space_capacity = static_cast<int>(new_space_.Capacity()); - *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects(); - *stats->old_pointer_space_capacity = old_pointer_space_->Capacity(); - *stats->old_data_space_size = old_data_space_->SizeOfObjects(); - *stats->old_data_space_capacity = old_data_space_->Capacity(); - *stats->code_space_size = code_space_->SizeOfObjects(); - *stats->code_space_capacity = code_space_->Capacity(); - *stats->map_space_size = map_space_->SizeOfObjects(); - *stats->map_space_capacity = map_space_->Capacity(); - *stats->cell_space_size = cell_space_->SizeOfObjects(); - *stats->cell_space_capacity = cell_space_->Capacity(); - *stats->property_cell_space_size = property_cell_space_->SizeOfObjects(); - *stats->property_cell_space_capacity = property_cell_space_->Capacity(); - *stats->lo_space_size = lo_space_->Size(); - isolate_->global_handles()->RecordStats(stats); - *stats->memory_allocator_size = isolate()->memory_allocator()->Size(); - *stats->memory_allocator_capacity = - isolate()->memory_allocator()->Size() + - isolate()->memory_allocator()->Available(); - *stats->os_error = OS::GetLastError(); - isolate()->memory_allocator()->Available(); - if (take_snapshot) { - HeapIterator iterator(this); - for (HeapObject* obj = iterator.next(); - obj != NULL; - obj = iterator.next()) { - InstanceType type = obj->map()->instance_type(); - ASSERT(0 <= type && type <= LAST_TYPE); - stats->objects_per_type[type]++; - stats->size_per_type[type] += obj->Size(); - } - } -} - - -intptr_t Heap::PromotedSpaceSizeOfObjects() { - return old_pointer_space_->SizeOfObjects() - + old_data_space_->SizeOfObjects() - + code_space_->SizeOfObjects() - + map_space_->SizeOfObjects() - + cell_space_->SizeOfObjects() - + property_cell_space_->SizeOfObjects() - + lo_space_->SizeOfObjects(); -} - - -bool Heap::AdvanceSweepers(int step_size) { - ASSERT(!mark_compact_collector()->AreSweeperThreadsActivated()); - bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size); - sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size); - return sweeping_complete; -} - - -int64_t Heap::PromotedExternalMemorySize() { - if (amount_of_external_allocated_memory_ - <= amount_of_external_allocated_memory_at_last_global_gc_) return 0; - return amount_of_external_allocated_memory_ - - amount_of_external_allocated_memory_at_last_global_gc_; -} - - -void Heap::EnableInlineAllocation() { - if (!inline_allocation_disabled_) return; - inline_allocation_disabled_ = false; - - // Update inline allocation limit for new space. - new_space()->UpdateInlineAllocationLimit(0); -} - - -void Heap::DisableInlineAllocation() { - if (inline_allocation_disabled_) return; - inline_allocation_disabled_ = true; - - // Update inline allocation limit for new space. - new_space()->UpdateInlineAllocationLimit(0); - - // Update inline allocation limit for old spaces. - PagedSpaces spaces(this); - for (PagedSpace* space = spaces.next(); - space != NULL; - space = spaces.next()) { - space->EmptyAllocationInfo(); - } -} - - -V8_DECLARE_ONCE(initialize_gc_once); - -static void InitializeGCOnce() { - InitializeScavengingVisitorsTables(); - NewSpaceScavenger::Initialize(); - MarkCompactCollector::Initialize(); -} - - -bool Heap::SetUp() { -#ifdef DEBUG - allocation_timeout_ = FLAG_gc_interval; -#endif - - // Initialize heap spaces and initial maps and objects. Whenever something - // goes wrong, just return false. The caller should check the results and - // call Heap::TearDown() to release allocated memory. - // - // If the heap is not yet configured (e.g. through the API), configure it. - // Configuration is based on the flags new-space-size (really the semispace - // size) and old-space-size if set or the initial values of semispace_size_ - // and old_generation_size_ otherwise. - if (!configured_) { - if (!ConfigureHeapDefault()) return false; - } - - CallOnce(&initialize_gc_once, &InitializeGCOnce); - - MarkMapPointersAsEncoded(false); - - // Set up memory allocator. - if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize())) - return false; - - // Set up new space. - if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) { - return false; - } - - // Initialize old pointer space. - old_pointer_space_ = - new OldSpace(this, - max_old_generation_size_, - OLD_POINTER_SPACE, - NOT_EXECUTABLE); - if (old_pointer_space_ == NULL) return false; - if (!old_pointer_space_->SetUp()) return false; - - // Initialize old data space. - old_data_space_ = - new OldSpace(this, - max_old_generation_size_, - OLD_DATA_SPACE, - NOT_EXECUTABLE); - if (old_data_space_ == NULL) return false; - if (!old_data_space_->SetUp()) return false; - - // Initialize the code space, set its maximum capacity to the old - // generation size. It needs executable memory. - // On 64-bit platform(s), we put all code objects in a 2 GB range of - // virtual address space, so that they can call each other with near calls. - if (code_range_size_ > 0) { - if (!isolate_->code_range()->SetUp(code_range_size_)) { - return false; - } - } - - code_space_ = - new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE); - if (code_space_ == NULL) return false; - if (!code_space_->SetUp()) return false; - - // Initialize map space. - map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE); - if (map_space_ == NULL) return false; - if (!map_space_->SetUp()) return false; - - // Initialize simple cell space. - cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE); - if (cell_space_ == NULL) return false; - if (!cell_space_->SetUp()) return false; - - // Initialize global property cell space. - property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_, - PROPERTY_CELL_SPACE); - if (property_cell_space_ == NULL) return false; - if (!property_cell_space_->SetUp()) return false; - - // The large object code space may contain code or data. We set the memory - // to be non-executable here for safety, but this means we need to enable it - // explicitly when allocating large code objects. - lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE); - if (lo_space_ == NULL) return false; - if (!lo_space_->SetUp()) return false; - - // Set up the seed that is used to randomize the string hash function. - ASSERT(hash_seed() == 0); - if (FLAG_randomize_hashes) { - if (FLAG_hash_seed == 0) { - int rnd = isolate()->random_number_generator()->NextInt(); - set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask)); - } else { - set_hash_seed(Smi::FromInt(FLAG_hash_seed)); - } - } - - LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); - LOG(isolate_, IntPtrTEvent("heap-available", Available())); - - store_buffer()->SetUp(); - - mark_compact_collector()->SetUp(); - - return true; -} - - -bool Heap::CreateHeapObjects() { - // Create initial maps. - if (!CreateInitialMaps()) return false; - if (!CreateApiObjects()) return false; - - // Create initial objects - if (!CreateInitialObjects()) return false; - - native_contexts_list_ = undefined_value(); - array_buffers_list_ = undefined_value(); - allocation_sites_list_ = undefined_value(); - weak_object_to_code_table_ = undefined_value(); - return true; -} - - -void Heap::SetStackLimits() { - ASSERT(isolate_ != NULL); - ASSERT(isolate_ == isolate()); - // On 64 bit machines, pointers are generally out of range of Smis. We write - // something that looks like an out of range Smi to the GC. - - // Set up the special root array entries containing the stack limits. - // These are actually addresses, but the tag makes the GC ignore it. - roots_[kStackLimitRootIndex] = - reinterpret_cast<Object*>( - (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag); - roots_[kRealStackLimitRootIndex] = - reinterpret_cast<Object*>( - (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag); -} - - -void Heap::TearDown() { -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - Verify(); - } -#endif - - UpdateMaximumCommitted(); - - if (FLAG_print_cumulative_gc_stat) { - PrintF("\n"); - PrintF("gc_count=%d ", gc_count_); - PrintF("mark_sweep_count=%d ", ms_count_); - PrintF("max_gc_pause=%.1f ", get_max_gc_pause()); - PrintF("total_gc_time=%.1f ", total_gc_time_ms_); - PrintF("min_in_mutator=%.1f ", get_min_in_mutator()); - PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", - get_max_alive_after_gc()); - PrintF("total_marking_time=%.1f ", marking_time()); - PrintF("total_sweeping_time=%.1f ", sweeping_time()); - PrintF("\n\n"); - } - - if (FLAG_print_max_heap_committed) { - PrintF("\n"); - PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ", - MaximumCommittedMemory()); - PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ", - new_space_.MaximumCommittedMemory()); - PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ", - old_data_space_->MaximumCommittedMemory()); - PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ", - old_pointer_space_->MaximumCommittedMemory()); - PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ", - old_pointer_space_->MaximumCommittedMemory()); - PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ", - code_space_->MaximumCommittedMemory()); - PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ", - map_space_->MaximumCommittedMemory()); - PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ", - cell_space_->MaximumCommittedMemory()); - PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ", - property_cell_space_->MaximumCommittedMemory()); - PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ", - lo_space_->MaximumCommittedMemory()); - PrintF("\n\n"); - } - - TearDownArrayBuffers(); - - isolate_->global_handles()->TearDown(); - - external_string_table_.TearDown(); - - mark_compact_collector()->TearDown(); - - new_space_.TearDown(); - - if (old_pointer_space_ != NULL) { - old_pointer_space_->TearDown(); - delete old_pointer_space_; - old_pointer_space_ = NULL; - } - - if (old_data_space_ != NULL) { - old_data_space_->TearDown(); - delete old_data_space_; - old_data_space_ = NULL; - } - - if (code_space_ != NULL) { - code_space_->TearDown(); - delete code_space_; - code_space_ = NULL; - } - - if (map_space_ != NULL) { - map_space_->TearDown(); - delete map_space_; - map_space_ = NULL; - } - - if (cell_space_ != NULL) { - cell_space_->TearDown(); - delete cell_space_; - cell_space_ = NULL; - } - - if (property_cell_space_ != NULL) { - property_cell_space_->TearDown(); - delete property_cell_space_; - property_cell_space_ = NULL; - } - - if (lo_space_ != NULL) { - lo_space_->TearDown(); - delete lo_space_; - lo_space_ = NULL; - } - - store_buffer()->TearDown(); - incremental_marking()->TearDown(); - - isolate_->memory_allocator()->TearDown(); -} - - -void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, - GCType gc_type, - bool pass_isolate) { - ASSERT(callback != NULL); - GCPrologueCallbackPair pair(callback, gc_type, pass_isolate); - ASSERT(!gc_prologue_callbacks_.Contains(pair)); - return gc_prologue_callbacks_.Add(pair); -} - - -void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) { - ASSERT(callback != NULL); - for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { - if (gc_prologue_callbacks_[i].callback == callback) { - gc_prologue_callbacks_.Remove(i); - return; - } - } - UNREACHABLE(); -} - - -void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, - GCType gc_type, - bool pass_isolate) { - ASSERT(callback != NULL); - GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate); - ASSERT(!gc_epilogue_callbacks_.Contains(pair)); - return gc_epilogue_callbacks_.Add(pair); -} - - -void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) { - ASSERT(callback != NULL); - for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { - if (gc_epilogue_callbacks_[i].callback == callback) { - gc_epilogue_callbacks_.Remove(i); - return; - } - } - UNREACHABLE(); -} - - -MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj, - DependentCode* dep) { - ASSERT(!InNewSpace(obj)); - ASSERT(!InNewSpace(dep)); - MaybeObject* maybe_obj = - WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep); - WeakHashTable* table; - if (!maybe_obj->To(&table)) return maybe_obj; - if (ShouldZapGarbage() && weak_object_to_code_table_ != table) { - WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value()); - } - set_weak_object_to_code_table(table); - ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj)); - return weak_object_to_code_table_; -} - - -DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) { - Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj); - if (dep->IsDependentCode()) return DependentCode::cast(dep); - return DependentCode::cast(empty_fixed_array()); -} - - -void Heap::EnsureWeakObjectToCodeTable() { - if (!weak_object_to_code_table()->IsHashTable()) { - set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16)); - } -} - - -void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) { - v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot); -} - -#ifdef DEBUG - -class PrintHandleVisitor: public ObjectVisitor { - public: - void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) - PrintF(" handle %p to %p\n", - reinterpret_cast<void*>(p), - reinterpret_cast<void*>(*p)); - } -}; - - -void Heap::PrintHandles() { - PrintF("Handles:\n"); - PrintHandleVisitor v; - isolate_->handle_scope_implementer()->Iterate(&v); -} - -#endif - - -Space* AllSpaces::next() { - switch (counter_++) { - case NEW_SPACE: - return heap_->new_space(); - case OLD_POINTER_SPACE: - return heap_->old_pointer_space(); - case OLD_DATA_SPACE: - return heap_->old_data_space(); - case CODE_SPACE: - return heap_->code_space(); - case MAP_SPACE: - return heap_->map_space(); - case CELL_SPACE: - return heap_->cell_space(); - case PROPERTY_CELL_SPACE: - return heap_->property_cell_space(); - case LO_SPACE: - return heap_->lo_space(); - default: - return NULL; - } -} - - -PagedSpace* PagedSpaces::next() { - switch (counter_++) { - case OLD_POINTER_SPACE: - return heap_->old_pointer_space(); - case OLD_DATA_SPACE: - return heap_->old_data_space(); - case CODE_SPACE: - return heap_->code_space(); - case MAP_SPACE: - return heap_->map_space(); - case CELL_SPACE: - return heap_->cell_space(); - case PROPERTY_CELL_SPACE: - return heap_->property_cell_space(); - default: - return NULL; - } -} - - - -OldSpace* OldSpaces::next() { - switch (counter_++) { - case OLD_POINTER_SPACE: - return heap_->old_pointer_space(); - case OLD_DATA_SPACE: - return heap_->old_data_space(); - case CODE_SPACE: - return heap_->code_space(); - default: - return NULL; - } -} - - -SpaceIterator::SpaceIterator(Heap* heap) - : heap_(heap), - current_space_(FIRST_SPACE), - iterator_(NULL), - size_func_(NULL) { -} - - -SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func) - : heap_(heap), - current_space_(FIRST_SPACE), - iterator_(NULL), - size_func_(size_func) { -} - - -SpaceIterator::~SpaceIterator() { - // Delete active iterator if any. - delete iterator_; -} - - -bool SpaceIterator::has_next() { - // Iterate until no more spaces. - return current_space_ != LAST_SPACE; -} - - -ObjectIterator* SpaceIterator::next() { - if (iterator_ != NULL) { - delete iterator_; - iterator_ = NULL; - // Move to the next space - current_space_++; - if (current_space_ > LAST_SPACE) { - return NULL; - } - } - - // Return iterator for the new current space. - return CreateIterator(); -} - - -// Create an iterator for the space to iterate. -ObjectIterator* SpaceIterator::CreateIterator() { - ASSERT(iterator_ == NULL); - - switch (current_space_) { - case NEW_SPACE: - iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_); - break; - case OLD_POINTER_SPACE: - iterator_ = - new HeapObjectIterator(heap_->old_pointer_space(), size_func_); - break; - case OLD_DATA_SPACE: - iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_); - break; - case CODE_SPACE: - iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_); - break; - case MAP_SPACE: - iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_); - break; - case CELL_SPACE: - iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_); - break; - case PROPERTY_CELL_SPACE: - iterator_ = new HeapObjectIterator(heap_->property_cell_space(), - size_func_); - break; - case LO_SPACE: - iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_); - break; - } - - // Return the newly allocated iterator; - ASSERT(iterator_ != NULL); - return iterator_; -} - - -class HeapObjectsFilter { - public: - virtual ~HeapObjectsFilter() {} - virtual bool SkipObject(HeapObject* object) = 0; -}; - - -class UnreachableObjectsFilter : public HeapObjectsFilter { - public: - explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) { - MarkReachableObjects(); - } - - ~UnreachableObjectsFilter() { - heap_->mark_compact_collector()->ClearMarkbits(); - } - - bool SkipObject(HeapObject* object) { - MarkBit mark_bit = Marking::MarkBitFrom(object); - return !mark_bit.Get(); - } - - private: - class MarkingVisitor : public ObjectVisitor { - public: - MarkingVisitor() : marking_stack_(10) {} - - void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) { - if (!(*p)->IsHeapObject()) continue; - HeapObject* obj = HeapObject::cast(*p); - MarkBit mark_bit = Marking::MarkBitFrom(obj); - if (!mark_bit.Get()) { - mark_bit.Set(); - marking_stack_.Add(obj); - } - } - } - - void TransitiveClosure() { - while (!marking_stack_.is_empty()) { - HeapObject* obj = marking_stack_.RemoveLast(); - obj->Iterate(this); - } - } - - private: - List<HeapObject*> marking_stack_; - }; - - void MarkReachableObjects() { - MarkingVisitor visitor; - heap_->IterateRoots(&visitor, VISIT_ALL); - visitor.TransitiveClosure(); - } - - Heap* heap_; - DisallowHeapAllocation no_allocation_; -}; - - -HeapIterator::HeapIterator(Heap* heap) - : heap_(heap), - filtering_(HeapIterator::kNoFiltering), - filter_(NULL) { - Init(); -} - - -HeapIterator::HeapIterator(Heap* heap, - HeapIterator::HeapObjectsFiltering filtering) - : heap_(heap), - filtering_(filtering), - filter_(NULL) { - Init(); -} - - -HeapIterator::~HeapIterator() { - Shutdown(); -} - - -void HeapIterator::Init() { - // Start the iteration. - space_iterator_ = new SpaceIterator(heap_); - switch (filtering_) { - case kFilterUnreachable: - filter_ = new UnreachableObjectsFilter(heap_); - break; - default: - break; - } - object_iterator_ = space_iterator_->next(); -} - - -void HeapIterator::Shutdown() { -#ifdef DEBUG - // Assert that in filtering mode we have iterated through all - // objects. Otherwise, heap will be left in an inconsistent state. - if (filtering_ != kNoFiltering) { - ASSERT(object_iterator_ == NULL); - } -#endif - // Make sure the last iterator is deallocated. - delete space_iterator_; - space_iterator_ = NULL; - object_iterator_ = NULL; - delete filter_; - filter_ = NULL; -} - - -HeapObject* HeapIterator::next() { - if (filter_ == NULL) return NextObject(); - - HeapObject* obj = NextObject(); - while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject(); - return obj; -} - - -HeapObject* HeapIterator::NextObject() { - // No iterator means we are done. - if (object_iterator_ == NULL) return NULL; - - if (HeapObject* obj = object_iterator_->next_object()) { - // If the current iterator has more objects we are fine. - return obj; - } else { - // Go though the spaces looking for one that has objects. - while (space_iterator_->has_next()) { - object_iterator_ = space_iterator_->next(); - if (HeapObject* obj = object_iterator_->next_object()) { - return obj; - } - } - } - // Done with the last space. - object_iterator_ = NULL; - return NULL; -} - - -void HeapIterator::reset() { - // Restart the iterator. - Shutdown(); - Init(); -} - - -#ifdef DEBUG - -Object* const PathTracer::kAnyGlobalObject = NULL; - -class PathTracer::MarkVisitor: public ObjectVisitor { - public: - explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {} - void VisitPointers(Object** start, Object** end) { - // Scan all HeapObject pointers in [start, end) - for (Object** p = start; !tracer_->found() && (p < end); p++) { - if ((*p)->IsHeapObject()) - tracer_->MarkRecursively(p, this); - } - } - - private: - PathTracer* tracer_; -}; - - -class PathTracer::UnmarkVisitor: public ObjectVisitor { - public: - explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {} - void VisitPointers(Object** start, Object** end) { - // Scan all HeapObject pointers in [start, end) - for (Object** p = start; p < end; p++) { - if ((*p)->IsHeapObject()) - tracer_->UnmarkRecursively(p, this); - } - } - - private: - PathTracer* tracer_; -}; - - -void PathTracer::VisitPointers(Object** start, Object** end) { - bool done = ((what_to_find_ == FIND_FIRST) && found_target_); - // Visit all HeapObject pointers in [start, end) - for (Object** p = start; !done && (p < end); p++) { - if ((*p)->IsHeapObject()) { - TracePathFrom(p); - done = ((what_to_find_ == FIND_FIRST) && found_target_); - } - } -} - - -void PathTracer::Reset() { - found_target_ = false; - object_stack_.Clear(); -} - - -void PathTracer::TracePathFrom(Object** root) { - ASSERT((search_target_ == kAnyGlobalObject) || - search_target_->IsHeapObject()); - found_target_in_trace_ = false; - Reset(); - - MarkVisitor mark_visitor(this); - MarkRecursively(root, &mark_visitor); - - UnmarkVisitor unmark_visitor(this); - UnmarkRecursively(root, &unmark_visitor); - - ProcessResults(); -} - - -static bool SafeIsNativeContext(HeapObject* obj) { - return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map(); -} - - -void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { - if (!(*p)->IsHeapObject()) return; - - HeapObject* obj = HeapObject::cast(*p); - - Object* map = obj->map(); - - if (!map->IsHeapObject()) return; // visited before - - if (found_target_in_trace_) return; // stop if target found - object_stack_.Add(obj); - if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) || - (obj == search_target_)) { - found_target_in_trace_ = true; - found_target_ = true; - return; - } - - bool is_native_context = SafeIsNativeContext(obj); - - // not visited yet - Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map)); - - Address map_addr = map_p->address(); - - obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag)); - - // Scan the object body. - if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) { - // This is specialized to scan Context's properly. - Object** start = reinterpret_cast<Object**>(obj->address() + - Context::kHeaderSize); - Object** end = reinterpret_cast<Object**>(obj->address() + - Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize); - mark_visitor->VisitPointers(start, end); - } else { - obj->IterateBody(map_p->instance_type(), - obj->SizeFromMap(map_p), - mark_visitor); - } - - // Scan the map after the body because the body is a lot more interesting - // when doing leak detection. - MarkRecursively(&map, mark_visitor); - - if (!found_target_in_trace_) // don't pop if found the target - object_stack_.RemoveLast(); -} - - -void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) { - if (!(*p)->IsHeapObject()) return; - - HeapObject* obj = HeapObject::cast(*p); - - Object* map = obj->map(); - - if (map->IsHeapObject()) return; // unmarked already - - Address map_addr = reinterpret_cast<Address>(map); - - map_addr -= kMarkTag; - - ASSERT_TAG_ALIGNED(map_addr); - - HeapObject* map_p = HeapObject::FromAddress(map_addr); - - obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p)); - - UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor); - - obj->IterateBody(Map::cast(map_p)->instance_type(), - obj->SizeFromMap(Map::cast(map_p)), - unmark_visitor); -} - - -void PathTracer::ProcessResults() { - if (found_target_) { - PrintF("=====================================\n"); - PrintF("==== Path to object ====\n"); - PrintF("=====================================\n\n"); - - ASSERT(!object_stack_.is_empty()); - for (int i = 0; i < object_stack_.length(); i++) { - if (i > 0) PrintF("\n |\n |\n V\n\n"); - Object* obj = object_stack_[i]; - obj->Print(); - } - PrintF("=====================================\n"); - } -} - - -// Triggers a depth-first traversal of reachable objects from one -// given root object and finds a path to a specific heap object and -// prints it. -void Heap::TracePathToObjectFrom(Object* target, Object* root) { - PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL); - tracer.VisitPointer(&root); -} - - -// Triggers a depth-first traversal of reachable objects from roots -// and finds a path to a specific heap object and prints it. -void Heap::TracePathToObject(Object* target) { - PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL); - IterateRoots(&tracer, VISIT_ONLY_STRONG); -} - - -// Triggers a depth-first traversal of reachable objects from roots -// and finds a path to any global object and prints it. Useful for -// determining the source for leaks of global objects. -void Heap::TracePathToGlobal() { - PathTracer tracer(PathTracer::kAnyGlobalObject, - PathTracer::FIND_ALL, - VISIT_ALL); - IterateRoots(&tracer, VISIT_ONLY_STRONG); -} -#endif - - -static intptr_t CountTotalHolesSize(Heap* heap) { - intptr_t holes_size = 0; - OldSpaces spaces(heap); - for (OldSpace* space = spaces.next(); - space != NULL; - space = spaces.next()) { - holes_size += space->Waste() + space->Available(); - } - return holes_size; -} - - -GCTracer::GCTracer(Heap* heap, - const char* gc_reason, - const char* collector_reason) - : start_time_(0.0), - start_object_size_(0), - start_memory_size_(0), - gc_count_(0), - full_gc_count_(0), - allocated_since_last_gc_(0), - spent_in_mutator_(0), - promoted_objects_size_(0), - nodes_died_in_new_space_(0), - nodes_copied_in_new_space_(0), - nodes_promoted_(0), - heap_(heap), - gc_reason_(gc_reason), - collector_reason_(collector_reason) { - if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; - start_time_ = OS::TimeCurrentMillis(); - start_object_size_ = heap_->SizeOfObjects(); - start_memory_size_ = heap_->isolate()->memory_allocator()->Size(); - - for (int i = 0; i < Scope::kNumberOfScopes; i++) { - scopes_[i] = 0; - } - - in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap); - - allocated_since_last_gc_ = - heap_->SizeOfObjects() - heap_->alive_after_last_gc_; - - if (heap_->last_gc_end_timestamp_ > 0) { - spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0); - } - - steps_count_ = heap_->incremental_marking()->steps_count(); - steps_took_ = heap_->incremental_marking()->steps_took(); - longest_step_ = heap_->incremental_marking()->longest_step(); - steps_count_since_last_gc_ = - heap_->incremental_marking()->steps_count_since_last_gc(); - steps_took_since_last_gc_ = - heap_->incremental_marking()->steps_took_since_last_gc(); -} - - -GCTracer::~GCTracer() { - // Printf ONE line iff flag is set. - if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; - - bool first_gc = (heap_->last_gc_end_timestamp_ == 0); - - heap_->alive_after_last_gc_ = heap_->SizeOfObjects(); - heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis(); - - double time = heap_->last_gc_end_timestamp_ - start_time_; - - // Update cumulative GC statistics if required. - if (FLAG_print_cumulative_gc_stat) { - heap_->total_gc_time_ms_ += time; - heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time); - heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_, - heap_->alive_after_last_gc_); - if (!first_gc) { - heap_->min_in_mutator_ = Min(heap_->min_in_mutator_, - spent_in_mutator_); - } - } else if (FLAG_trace_gc_verbose) { - heap_->total_gc_time_ms_ += time; - } - - if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return; - - heap_->AddMarkingTime(scopes_[Scope::MC_MARK]); - - if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return; - PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init()); - - if (!FLAG_trace_gc_nvp) { - int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]); - - double end_memory_size_mb = - static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB; - - PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ", - CollectorString(), - static_cast<double>(start_object_size_) / MB, - static_cast<double>(start_memory_size_) / MB, - SizeOfHeapObjects(), - end_memory_size_mb); - - if (external_time > 0) PrintF("%d / ", external_time); - PrintF("%.1f ms", time); - if (steps_count_ > 0) { - if (collector_ == SCAVENGER) { - PrintF(" (+ %.1f ms in %d steps since last GC)", - steps_took_since_last_gc_, - steps_count_since_last_gc_); - } else { - PrintF(" (+ %.1f ms in %d steps since start of marking, " - "biggest step %.1f ms)", - steps_took_, - steps_count_, - longest_step_); - } - } - - if (gc_reason_ != NULL) { - PrintF(" [%s]", gc_reason_); - } - - if (collector_reason_ != NULL) { - PrintF(" [%s]", collector_reason_); - } - - PrintF(".\n"); - } else { - PrintF("pause=%.1f ", time); - PrintF("mutator=%.1f ", spent_in_mutator_); - PrintF("gc="); - switch (collector_) { - case SCAVENGER: - PrintF("s"); - break; - case MARK_COMPACTOR: - PrintF("ms"); - break; - default: - UNREACHABLE(); - } - PrintF(" "); - - PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]); - PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]); - PrintF("sweep=%.2f ", scopes_[Scope::MC_SWEEP]); - PrintF("sweepns=%.2f ", scopes_[Scope::MC_SWEEP_NEWSPACE]); - PrintF("sweepos=%.2f ", scopes_[Scope::MC_SWEEP_OLDSPACE]); - PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]); - PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]); - PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]); - PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]); - PrintF("compaction_ptrs=%.1f ", - scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]); - PrintF("intracompaction_ptrs=%.1f ", - scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]); - PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]); - PrintF("weakcollection_process=%.1f ", - scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]); - PrintF("weakcollection_clear=%.1f ", - scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]); - - PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_); - PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects()); - PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", - in_free_list_or_wasted_before_gc_); - PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_)); - - PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_); - PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_); - PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_); - PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_); - PrintF("nodes_promoted=%d ", nodes_promoted_); - - if (collector_ == SCAVENGER) { - PrintF("stepscount=%d ", steps_count_since_last_gc_); - PrintF("stepstook=%.1f ", steps_took_since_last_gc_); - } else { - PrintF("stepscount=%d ", steps_count_); - PrintF("stepstook=%.1f ", steps_took_); - PrintF("longeststep=%.1f ", longest_step_); - } - - PrintF("\n"); - } - - heap_->PrintShortHeapStatistics(); -} - - -const char* GCTracer::CollectorString() { - switch (collector_) { - case SCAVENGER: - return "Scavenge"; - case MARK_COMPACTOR: - return "Mark-sweep"; - } - return "Unknown GC"; -} - - -int KeyedLookupCache::Hash(Map* map, Name* name) { - // Uses only lower 32 bits if pointers are larger. - uintptr_t addr_hash = - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift; - return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); -} - - -int KeyedLookupCache::Lookup(Map* map, Name* name) { - int index = (Hash(map, name) & kHashMask); - for (int i = 0; i < kEntriesPerBucket; i++) { - Key& key = keys_[index + i]; - if ((key.map == map) && key.name->Equals(name)) { - return field_offsets_[index + i]; - } - } - return kNotFound; -} - - -void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) { - if (!name->IsUniqueName()) { - String* internalized_string; - if (!map->GetIsolate()->heap()->InternalizeStringIfExists( - String::cast(name), &internalized_string)) { - return; - } - name = internalized_string; - } - // This cache is cleared only between mark compact passes, so we expect the - // cache to only contain old space names. - ASSERT(!map->GetIsolate()->heap()->InNewSpace(name)); - - int index = (Hash(map, name) & kHashMask); - // After a GC there will be free slots, so we use them in order (this may - // help to get the most frequently used one in position 0). - for (int i = 0; i< kEntriesPerBucket; i++) { - Key& key = keys_[index]; - Object* free_entry_indicator = NULL; - if (key.map == free_entry_indicator) { - key.map = map; - key.name = name; - field_offsets_[index + i] = field_offset; - return; - } - } - // No free entry found in this bucket, so we move them all down one and - // put the new entry at position zero. - for (int i = kEntriesPerBucket - 1; i > 0; i--) { - Key& key = keys_[index + i]; - Key& key2 = keys_[index + i - 1]; - key = key2; - field_offsets_[index + i] = field_offsets_[index + i - 1]; - } - - // Write the new first entry. - Key& key = keys_[index]; - key.map = map; - key.name = name; - field_offsets_[index] = field_offset; -} - - -void KeyedLookupCache::Clear() { - for (int index = 0; index < kLength; index++) keys_[index].map = NULL; -} - - -void DescriptorLookupCache::Clear() { - for (int index = 0; index < kLength; index++) keys_[index].source = NULL; -} - - -#ifdef DEBUG -void Heap::GarbageCollectionGreedyCheck() { - ASSERT(FLAG_gc_greedy); - if (isolate_->bootstrapper()->IsActive()) return; - if (!AllowAllocationFailure::IsAllowed(isolate_)) return; - CollectGarbage(NEW_SPACE); -} -#endif - - -void ExternalStringTable::CleanUp() { - int last = 0; - for (int i = 0; i < new_space_strings_.length(); ++i) { - if (new_space_strings_[i] == heap_->the_hole_value()) { - continue; - } - ASSERT(new_space_strings_[i]->IsExternalString()); - if (heap_->InNewSpace(new_space_strings_[i])) { - new_space_strings_[last++] = new_space_strings_[i]; - } else { - old_space_strings_.Add(new_space_strings_[i]); - } - } - new_space_strings_.Rewind(last); - new_space_strings_.Trim(); - - last = 0; - for (int i = 0; i < old_space_strings_.length(); ++i) { - if (old_space_strings_[i] == heap_->the_hole_value()) { - continue; - } - ASSERT(old_space_strings_[i]->IsExternalString()); - ASSERT(!heap_->InNewSpace(old_space_strings_[i])); - old_space_strings_[last++] = old_space_strings_[i]; - } - old_space_strings_.Rewind(last); - old_space_strings_.Trim(); -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - Verify(); - } -#endif -} - - -void ExternalStringTable::TearDown() { - for (int i = 0; i < new_space_strings_.length(); ++i) { - heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i])); - } - new_space_strings_.Free(); - for (int i = 0; i < old_space_strings_.length(); ++i) { - heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i])); - } - old_space_strings_.Free(); -} - - -void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) { - chunk->set_next_chunk(chunks_queued_for_free_); - chunks_queued_for_free_ = chunk; -} - - -void Heap::FreeQueuedChunks() { - if (chunks_queued_for_free_ == NULL) return; - MemoryChunk* next; - MemoryChunk* chunk; - for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { - next = chunk->next_chunk(); - chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); - - if (chunk->owner()->identity() == LO_SPACE) { - // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress. - // If FromAnyPointerAddress encounters a slot that belongs to a large - // chunk queued for deletion it will fail to find the chunk because - // it try to perform a search in the list of pages owned by of the large - // object space and queued chunks were detached from that list. - // To work around this we split large chunk into normal kPageSize aligned - // pieces and initialize size, owner and flags field of every piece. - // If FromAnyPointerAddress encounters a slot that belongs to one of - // these smaller pieces it will treat it as a slot on a normal Page. - Address chunk_end = chunk->address() + chunk->size(); - MemoryChunk* inner = MemoryChunk::FromAddress( - chunk->address() + Page::kPageSize); - MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1); - while (inner <= inner_last) { - // Size of a large chunk is always a multiple of - // OS::AllocateAlignment() so there is always - // enough space for a fake MemoryChunk header. - Address area_end = Min(inner->address() + Page::kPageSize, chunk_end); - // Guard against overflow. - if (area_end < inner->address()) area_end = chunk_end; - inner->SetArea(inner->address(), area_end); - inner->set_size(Page::kPageSize); - inner->set_owner(lo_space()); - inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); - inner = MemoryChunk::FromAddress( - inner->address() + Page::kPageSize); - } - } - } - isolate_->heap()->store_buffer()->Compact(); - isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); - for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { - next = chunk->next_chunk(); - isolate_->memory_allocator()->Free(chunk); - } - chunks_queued_for_free_ = NULL; -} - - -void Heap::RememberUnmappedPage(Address page, bool compacted) { - uintptr_t p = reinterpret_cast<uintptr_t>(page); - // Tag the page pointer to make it findable in the dump file. - if (compacted) { - p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared. - } else { - p ^= 0x1d1ed & (Page::kPageSize - 1); // I died. - } - remembered_unmapped_pages_[remembered_unmapped_pages_index_] = - reinterpret_cast<Address>(p); - remembered_unmapped_pages_index_++; - remembered_unmapped_pages_index_ %= kRememberedUnmappedPages; -} - - -void Heap::ClearObjectStats(bool clear_last_time_stats) { - memset(object_counts_, 0, sizeof(object_counts_)); - memset(object_sizes_, 0, sizeof(object_sizes_)); - if (clear_last_time_stats) { - memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_)); - memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_)); - } -} - - -static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER; - - -void Heap::CheckpointObjectStats() { - LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer()); - Counters* counters = isolate()->counters(); -#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ - counters->count_of_##name()->Increment( \ - static_cast<int>(object_counts_[name])); \ - counters->count_of_##name()->Decrement( \ - static_cast<int>(object_counts_last_time_[name])); \ - counters->size_of_##name()->Increment( \ - static_cast<int>(object_sizes_[name])); \ - counters->size_of_##name()->Decrement( \ - static_cast<int>(object_sizes_last_time_[name])); - INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) -#undef ADJUST_LAST_TIME_OBJECT_COUNT - int index; -#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ - index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \ - counters->count_of_CODE_TYPE_##name()->Increment( \ - static_cast<int>(object_counts_[index])); \ - counters->count_of_CODE_TYPE_##name()->Decrement( \ - static_cast<int>(object_counts_last_time_[index])); \ - counters->size_of_CODE_TYPE_##name()->Increment( \ - static_cast<int>(object_sizes_[index])); \ - counters->size_of_CODE_TYPE_##name()->Decrement( \ - static_cast<int>(object_sizes_last_time_[index])); - CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) -#undef ADJUST_LAST_TIME_OBJECT_COUNT -#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ - index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \ - counters->count_of_FIXED_ARRAY_##name()->Increment( \ - static_cast<int>(object_counts_[index])); \ - counters->count_of_FIXED_ARRAY_##name()->Decrement( \ - static_cast<int>(object_counts_last_time_[index])); \ - counters->size_of_FIXED_ARRAY_##name()->Increment( \ - static_cast<int>(object_sizes_[index])); \ - counters->size_of_FIXED_ARRAY_##name()->Decrement( \ - static_cast<int>(object_sizes_last_time_[index])); - FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) -#undef ADJUST_LAST_TIME_OBJECT_COUNT -#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ - index = \ - FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \ - counters->count_of_CODE_AGE_##name()->Increment( \ - static_cast<int>(object_counts_[index])); \ - counters->count_of_CODE_AGE_##name()->Decrement( \ - static_cast<int>(object_counts_last_time_[index])); \ - counters->size_of_CODE_AGE_##name()->Increment( \ - static_cast<int>(object_sizes_[index])); \ - counters->size_of_CODE_AGE_##name()->Decrement( \ - static_cast<int>(object_sizes_last_time_[index])); - CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) -#undef ADJUST_LAST_TIME_OBJECT_COUNT - - OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); - OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); - ClearObjectStats(); -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/heap.h nodejs-0.11.15/deps/v8/src/heap.h --- nodejs-0.11.13/deps/v8/src/heap.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,3151 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_HEAP_H_ -#define V8_HEAP_H_ - -#include <cmath> - -#include "allocation.h" -#include "assert-scope.h" -#include "globals.h" -#include "incremental-marking.h" -#include "list.h" -#include "mark-compact.h" -#include "objects-visiting.h" -#include "spaces.h" -#include "splay-tree-inl.h" -#include "store-buffer.h" -#include "v8-counters.h" -#include "v8globals.h" - -namespace v8 { -namespace internal { - -// Defines all the roots in Heap. -#define STRONG_ROOT_LIST(V) \ - V(Map, byte_array_map, ByteArrayMap) \ - V(Map, free_space_map, FreeSpaceMap) \ - V(Map, one_pointer_filler_map, OnePointerFillerMap) \ - V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ - /* Cluster the most popular ones in a few cache lines here at the top. */ \ - V(Smi, store_buffer_top, StoreBufferTop) \ - V(Oddball, undefined_value, UndefinedValue) \ - V(Oddball, the_hole_value, TheHoleValue) \ - V(Oddball, null_value, NullValue) \ - V(Oddball, true_value, TrueValue) \ - V(Oddball, false_value, FalseValue) \ - V(Oddball, uninitialized_value, UninitializedValue) \ - V(Map, cell_map, CellMap) \ - V(Map, global_property_cell_map, GlobalPropertyCellMap) \ - V(Map, shared_function_info_map, SharedFunctionInfoMap) \ - V(Map, meta_map, MetaMap) \ - V(Map, heap_number_map, HeapNumberMap) \ - V(Map, native_context_map, NativeContextMap) \ - V(Map, fixed_array_map, FixedArrayMap) \ - V(Map, code_map, CodeMap) \ - V(Map, scope_info_map, ScopeInfoMap) \ - V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ - V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ - V(Map, constant_pool_array_map, ConstantPoolArrayMap) \ - V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ - V(Map, hash_table_map, HashTableMap) \ - V(FixedArray, empty_fixed_array, EmptyFixedArray) \ - V(ByteArray, empty_byte_array, EmptyByteArray) \ - V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ - V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \ - V(Oddball, arguments_marker, ArgumentsMarker) \ - /* The roots above this line should be boring from a GC point of view. */ \ - /* This means they are never in new space and never on a page that is */ \ - /* being compacted. */ \ - V(FixedArray, number_string_cache, NumberStringCache) \ - V(Object, instanceof_cache_function, InstanceofCacheFunction) \ - V(Object, instanceof_cache_map, InstanceofCacheMap) \ - V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \ - V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ - V(FixedArray, string_split_cache, StringSplitCache) \ - V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \ - V(Object, termination_exception, TerminationException) \ - V(Smi, hash_seed, HashSeed) \ - V(Map, symbol_map, SymbolMap) \ - V(Map, string_map, StringMap) \ - V(Map, ascii_string_map, AsciiStringMap) \ - V(Map, cons_string_map, ConsStringMap) \ - V(Map, cons_ascii_string_map, ConsAsciiStringMap) \ - V(Map, sliced_string_map, SlicedStringMap) \ - V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \ - V(Map, external_string_map, ExternalStringMap) \ - V(Map, \ - external_string_with_one_byte_data_map, \ - ExternalStringWithOneByteDataMap) \ - V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ - V(Map, short_external_string_map, ShortExternalStringMap) \ - V(Map, \ - short_external_string_with_one_byte_data_map, \ - ShortExternalStringWithOneByteDataMap) \ - V(Map, internalized_string_map, InternalizedStringMap) \ - V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \ - V(Map, cons_internalized_string_map, ConsInternalizedStringMap) \ - V(Map, cons_ascii_internalized_string_map, ConsAsciiInternalizedStringMap) \ - V(Map, \ - external_internalized_string_map, \ - ExternalInternalizedStringMap) \ - V(Map, \ - external_internalized_string_with_one_byte_data_map, \ - ExternalInternalizedStringWithOneByteDataMap) \ - V(Map, \ - external_ascii_internalized_string_map, \ - ExternalAsciiInternalizedStringMap) \ - V(Map, \ - short_external_internalized_string_map, \ - ShortExternalInternalizedStringMap) \ - V(Map, \ - short_external_internalized_string_with_one_byte_data_map, \ - ShortExternalInternalizedStringWithOneByteDataMap) \ - V(Map, \ - short_external_ascii_internalized_string_map, \ - ShortExternalAsciiInternalizedStringMap) \ - V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \ - V(Map, undetectable_string_map, UndetectableStringMap) \ - V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \ - V(Map, external_int8_array_map, ExternalInt8ArrayMap) \ - V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \ - V(Map, external_int16_array_map, ExternalInt16ArrayMap) \ - V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \ - V(Map, external_int32_array_map, ExternalInt32ArrayMap) \ - V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \ - V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \ - V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \ - V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \ - V(ExternalArray, empty_external_int8_array, \ - EmptyExternalInt8Array) \ - V(ExternalArray, empty_external_uint8_array, \ - EmptyExternalUint8Array) \ - V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \ - V(ExternalArray, empty_external_uint16_array, \ - EmptyExternalUint16Array) \ - V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \ - V(ExternalArray, empty_external_uint32_array, \ - EmptyExternalUint32Array) \ - V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \ - V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \ - V(ExternalArray, empty_external_uint8_clamped_array, \ - EmptyExternalUint8ClampedArray) \ - V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \ - V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \ - V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \ - V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \ - V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \ - V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \ - V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \ - V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \ - V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \ - V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \ - V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \ - V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \ - V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \ - V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \ - V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \ - V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \ - V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \ - V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \ - EmptyFixedUint8ClampedArray) \ - V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \ - V(Map, function_context_map, FunctionContextMap) \ - V(Map, catch_context_map, CatchContextMap) \ - V(Map, with_context_map, WithContextMap) \ - V(Map, block_context_map, BlockContextMap) \ - V(Map, module_context_map, ModuleContextMap) \ - V(Map, global_context_map, GlobalContextMap) \ - V(Map, oddball_map, OddballMap) \ - V(Map, message_object_map, JSMessageObjectMap) \ - V(Map, foreign_map, ForeignMap) \ - V(HeapNumber, nan_value, NanValue) \ - V(HeapNumber, infinity_value, InfinityValue) \ - V(HeapNumber, minus_zero_value, MinusZeroValue) \ - V(Map, neander_map, NeanderMap) \ - V(JSObject, message_listeners, MessageListeners) \ - V(UnseededNumberDictionary, code_stubs, CodeStubs) \ - V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \ - V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \ - V(Code, js_entry_code, JsEntryCode) \ - V(Code, js_construct_entry_code, JsConstructEntryCode) \ - V(FixedArray, natives_source_cache, NativesSourceCache) \ - V(Script, empty_script, EmptyScript) \ - V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ - V(Cell, undefined_cell, UndefineCell) \ - V(JSObject, observation_state, ObservationState) \ - V(Map, external_map, ExternalMap) \ - V(Object, symbol_registry, SymbolRegistry) \ - V(Symbol, frozen_symbol, FrozenSymbol) \ - V(Symbol, nonexistent_symbol, NonExistentSymbol) \ - V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \ - V(SeededNumberDictionary, empty_slow_element_dictionary, \ - EmptySlowElementDictionary) \ - V(Symbol, observed_symbol, ObservedSymbol) \ - V(Symbol, uninitialized_symbol, UninitializedSymbol) \ - V(Symbol, megamorphic_symbol, MegamorphicSymbol) \ - V(FixedArray, materialized_objects, MaterializedObjects) \ - V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \ - V(JSObject, microtask_state, MicrotaskState) - -// Entries in this list are limited to Smis and are not visited during GC. -#define SMI_ROOT_LIST(V) \ - V(Smi, stack_limit, StackLimit) \ - V(Smi, real_stack_limit, RealStackLimit) \ - V(Smi, last_script_id, LastScriptId) \ - V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \ - V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \ - V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \ - V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) - -#define ROOT_LIST(V) \ - STRONG_ROOT_LIST(V) \ - SMI_ROOT_LIST(V) \ - V(StringTable, string_table, StringTable) - -// Heap roots that are known to be immortal immovable, for which we can safely -// skip write barriers. -#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \ - V(byte_array_map) \ - V(free_space_map) \ - V(one_pointer_filler_map) \ - V(two_pointer_filler_map) \ - V(undefined_value) \ - V(the_hole_value) \ - V(null_value) \ - V(true_value) \ - V(false_value) \ - V(uninitialized_value) \ - V(cell_map) \ - V(global_property_cell_map) \ - V(shared_function_info_map) \ - V(meta_map) \ - V(heap_number_map) \ - V(native_context_map) \ - V(fixed_array_map) \ - V(code_map) \ - V(scope_info_map) \ - V(fixed_cow_array_map) \ - V(fixed_double_array_map) \ - V(constant_pool_array_map) \ - V(no_interceptor_result_sentinel) \ - V(hash_table_map) \ - V(empty_fixed_array) \ - V(empty_byte_array) \ - V(empty_descriptor_array) \ - V(empty_constant_pool_array) \ - V(arguments_marker) \ - V(symbol_map) \ - V(sloppy_arguments_elements_map) \ - V(function_context_map) \ - V(catch_context_map) \ - V(with_context_map) \ - V(block_context_map) \ - V(module_context_map) \ - V(global_context_map) \ - V(oddball_map) \ - V(message_object_map) \ - V(foreign_map) \ - V(neander_map) - -#define INTERNALIZED_STRING_LIST(V) \ - V(Array_string, "Array") \ - V(Object_string, "Object") \ - V(proto_string, "__proto__") \ - V(arguments_string, "arguments") \ - V(Arguments_string, "Arguments") \ - V(call_string, "call") \ - V(apply_string, "apply") \ - V(caller_string, "caller") \ - V(boolean_string, "boolean") \ - V(Boolean_string, "Boolean") \ - V(callee_string, "callee") \ - V(constructor_string, "constructor") \ - V(dot_result_string, ".result") \ - V(dot_for_string, ".for.") \ - V(dot_iterator_string, ".iterator") \ - V(dot_generator_object_string, ".generator_object") \ - V(eval_string, "eval") \ - V(empty_string, "") \ - V(function_string, "function") \ - V(length_string, "length") \ - V(module_string, "module") \ - V(name_string, "name") \ - V(native_string, "native") \ - V(null_string, "null") \ - V(number_string, "number") \ - V(Number_string, "Number") \ - V(nan_string, "NaN") \ - V(RegExp_string, "RegExp") \ - V(source_string, "source") \ - V(global_string, "global") \ - V(ignore_case_string, "ignoreCase") \ - V(multiline_string, "multiline") \ - V(input_string, "input") \ - V(index_string, "index") \ - V(last_index_string, "lastIndex") \ - V(object_string, "object") \ - V(literals_string, "literals") \ - V(prototype_string, "prototype") \ - V(string_string, "string") \ - V(String_string, "String") \ - V(symbol_string, "symbol") \ - V(Symbol_string, "Symbol") \ - V(for_string, "for") \ - V(for_api_string, "for_api") \ - V(for_intern_string, "for_intern") \ - V(private_api_string, "private_api") \ - V(private_intern_string, "private_intern") \ - V(Date_string, "Date") \ - V(this_string, "this") \ - V(to_string_string, "toString") \ - V(char_at_string, "CharAt") \ - V(undefined_string, "undefined") \ - V(value_of_string, "valueOf") \ - V(stack_string, "stack") \ - V(toJSON_string, "toJSON") \ - V(InitializeVarGlobal_string, "InitializeVarGlobal") \ - V(InitializeConstGlobal_string, "InitializeConstGlobal") \ - V(KeyedLoadElementMonomorphic_string, \ - "KeyedLoadElementMonomorphic") \ - V(KeyedStoreElementMonomorphic_string, \ - "KeyedStoreElementMonomorphic") \ - V(stack_overflow_string, "kStackOverflowBoilerplate") \ - V(illegal_access_string, "illegal access") \ - V(illegal_execution_state_string, "illegal execution state") \ - V(get_string, "get") \ - V(set_string, "set") \ - V(map_field_string, "%map") \ - V(elements_field_string, "%elements") \ - V(length_field_string, "%length") \ - V(cell_value_string, "%cell_value") \ - V(function_class_string, "Function") \ - V(illegal_argument_string, "illegal argument") \ - V(MakeReferenceError_string, "MakeReferenceError") \ - V(MakeSyntaxError_string, "MakeSyntaxError") \ - V(MakeTypeError_string, "MakeTypeError") \ - V(illegal_return_string, "illegal_return") \ - V(illegal_break_string, "illegal_break") \ - V(illegal_continue_string, "illegal_continue") \ - V(unknown_label_string, "unknown_label") \ - V(redeclaration_string, "redeclaration") \ - V(space_string, " ") \ - V(exec_string, "exec") \ - V(zero_string, "0") \ - V(global_eval_string, "GlobalEval") \ - V(identity_hash_string, "v8::IdentityHash") \ - V(closure_string, "(closure)") \ - V(use_strict_string, "use strict") \ - V(dot_string, ".") \ - V(anonymous_function_string, "(anonymous function)") \ - V(compare_ic_string, "==") \ - V(strict_compare_ic_string, "===") \ - V(infinity_string, "Infinity") \ - V(minus_infinity_string, "-Infinity") \ - V(hidden_stack_trace_string, "v8::hidden_stack_trace") \ - V(query_colon_string, "(?:)") \ - V(Generator_string, "Generator") \ - V(throw_string, "throw") \ - V(done_string, "done") \ - V(value_string, "value") \ - V(next_string, "next") \ - V(byte_length_string, "byteLength") \ - V(byte_offset_string, "byteOffset") \ - V(buffer_string, "buffer") - -// Forward declarations. -class GCTracer; -class HeapStats; -class Isolate; -class WeakObjectRetainer; - - -typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, - Object** pointer); - -class StoreBufferRebuilder { - public: - explicit StoreBufferRebuilder(StoreBuffer* store_buffer) - : store_buffer_(store_buffer) { - } - - void Callback(MemoryChunk* page, StoreBufferEvent event); - - private: - StoreBuffer* store_buffer_; - - // We record in this variable how full the store buffer was when we started - // iterating over the current page, finding pointers to new space. If the - // store buffer overflows again we can exempt the page from the store buffer - // by rewinding to this point instead of having to search the store buffer. - Object*** start_of_current_page_; - // The current page we are scanning in the store buffer iterator. - MemoryChunk* current_page_; -}; - - - -// A queue of objects promoted during scavenge. Each object is accompanied -// by it's size to avoid dereferencing a map pointer for scanning. -class PromotionQueue { - public: - explicit PromotionQueue(Heap* heap) - : front_(NULL), - rear_(NULL), - limit_(NULL), - emergency_stack_(0), - heap_(heap) { } - - void Initialize(); - - void Destroy() { - ASSERT(is_empty()); - delete emergency_stack_; - emergency_stack_ = NULL; - } - - inline void ActivateGuardIfOnTheSamePage(); - - Page* GetHeadPage() { - return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); - } - - void SetNewLimit(Address limit) { - if (!guard_) { - return; - } - - ASSERT(GetHeadPage() == Page::FromAllocationTop(limit)); - limit_ = reinterpret_cast<intptr_t*>(limit); - - if (limit_ <= rear_) { - return; - } - - RelocateQueueHead(); - } - - bool is_empty() { - return (front_ == rear_) && - (emergency_stack_ == NULL || emergency_stack_->length() == 0); - } - - inline void insert(HeapObject* target, int size); - - void remove(HeapObject** target, int* size) { - ASSERT(!is_empty()); - if (front_ == rear_) { - Entry e = emergency_stack_->RemoveLast(); - *target = e.obj_; - *size = e.size_; - return; - } - - if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { - NewSpacePage* front_page = - NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); - ASSERT(!front_page->prev_page()->is_anchor()); - front_ = - reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end()); - } - *target = reinterpret_cast<HeapObject*>(*(--front_)); - *size = static_cast<int>(*(--front_)); - // Assert no underflow. - SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), - reinterpret_cast<Address>(front_)); - } - - private: - // The front of the queue is higher in the memory page chain than the rear. - intptr_t* front_; - intptr_t* rear_; - intptr_t* limit_; - - bool guard_; - - static const int kEntrySizeInWords = 2; - - struct Entry { - Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { } - - HeapObject* obj_; - int size_; - }; - List<Entry>* emergency_stack_; - - Heap* heap_; - - void RelocateQueueHead(); - - DISALLOW_COPY_AND_ASSIGN(PromotionQueue); -}; - - -typedef void (*ScavengingCallback)(Map* map, - HeapObject** slot, - HeapObject* object); - - -// External strings table is a place where all external strings are -// registered. We need to keep track of such strings to properly -// finalize them. -class ExternalStringTable { - public: - // Registers an external string. - inline void AddString(String* string); - - inline void Iterate(ObjectVisitor* v); - - // Restores internal invariant and gets rid of collected strings. - // Must be called after each Iterate() that modified the strings. - void CleanUp(); - - // Destroys all allocated memory. - void TearDown(); - - private: - explicit ExternalStringTable(Heap* heap) : heap_(heap) { } - - friend class Heap; - - inline void Verify(); - - inline void AddOldString(String* string); - - // Notifies the table that only a prefix of the new list is valid. - inline void ShrinkNewStrings(int position); - - // To speed up scavenge collections new space string are kept - // separate from old space strings. - List<Object*> new_space_strings_; - List<Object*> old_space_strings_; - - Heap* heap_; - - DISALLOW_COPY_AND_ASSIGN(ExternalStringTable); -}; - - -enum ArrayStorageAllocationMode { - DONT_INITIALIZE_ARRAY_ELEMENTS, - INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE -}; - - -class Heap { - public: - // Configure heap size before setup. Return false if the heap has been - // set up already. - bool ConfigureHeap(int max_semispace_size, - intptr_t max_old_gen_size, - intptr_t max_executable_size); - bool ConfigureHeapDefault(); - - // Prepares the heap, setting up memory areas that are needed in the isolate - // without actually creating any objects. - bool SetUp(); - - // Bootstraps the object heap with the core set of objects required to run. - // Returns whether it succeeded. - bool CreateHeapObjects(); - - // Destroys all memory allocated by the heap. - void TearDown(); - - // Set the stack limit in the roots_ array. Some architectures generate - // code that looks here, because it is faster than loading from the static - // jslimit_/real_jslimit_ variable in the StackGuard. - void SetStackLimits(); - - // Returns whether SetUp has been called. - bool HasBeenSetUp(); - - // Returns the maximum amount of memory reserved for the heap. For - // the young generation, we reserve 4 times the amount needed for a - // semi space. The young generation consists of two semi spaces and - // we reserve twice the amount needed for those in order to ensure - // that new space can be aligned to its size. - intptr_t MaxReserved() { - return 4 * reserved_semispace_size_ + max_old_generation_size_; - } - int MaxSemiSpaceSize() { return max_semispace_size_; } - int ReservedSemiSpaceSize() { return reserved_semispace_size_; } - int InitialSemiSpaceSize() { return initial_semispace_size_; } - intptr_t MaxOldGenerationSize() { return max_old_generation_size_; } - intptr_t MaxExecutableSize() { return max_executable_size_; } - - // Returns the capacity of the heap in bytes w/o growing. Heap grows when - // more spaces are needed until it reaches the limit. - intptr_t Capacity(); - - // Returns the amount of memory currently committed for the heap. - intptr_t CommittedMemory(); - - // Returns the amount of executable memory currently committed for the heap. - intptr_t CommittedMemoryExecutable(); - - // Returns the amount of phyical memory currently committed for the heap. - size_t CommittedPhysicalMemory(); - - // Returns the maximum amount of memory ever committed for the heap. - intptr_t MaximumCommittedMemory() { return maximum_committed_; } - - // Updates the maximum committed memory for the heap. Should be called - // whenever a space grows. - void UpdateMaximumCommitted(); - - // Returns the available bytes in space w/o growing. - // Heap doesn't guarantee that it can allocate an object that requires - // all available bytes. Check MaxHeapObjectSize() instead. - intptr_t Available(); - - // Returns of size of all objects residing in the heap. - intptr_t SizeOfObjects(); - - // Return the starting address and a mask for the new space. And-masking an - // address with the mask will result in the start address of the new space - // for all addresses in either semispace. - Address NewSpaceStart() { return new_space_.start(); } - uintptr_t NewSpaceMask() { return new_space_.mask(); } - Address NewSpaceTop() { return new_space_.top(); } - - NewSpace* new_space() { return &new_space_; } - OldSpace* old_pointer_space() { return old_pointer_space_; } - OldSpace* old_data_space() { return old_data_space_; } - OldSpace* code_space() { return code_space_; } - MapSpace* map_space() { return map_space_; } - CellSpace* cell_space() { return cell_space_; } - PropertyCellSpace* property_cell_space() { - return property_cell_space_; - } - LargeObjectSpace* lo_space() { return lo_space_; } - PagedSpace* paged_space(int idx) { - switch (idx) { - case OLD_POINTER_SPACE: - return old_pointer_space(); - case OLD_DATA_SPACE: - return old_data_space(); - case MAP_SPACE: - return map_space(); - case CELL_SPACE: - return cell_space(); - case PROPERTY_CELL_SPACE: - return property_cell_space(); - case CODE_SPACE: - return code_space(); - case NEW_SPACE: - case LO_SPACE: - UNREACHABLE(); - } - return NULL; - } - - bool always_allocate() { return always_allocate_scope_depth_ != 0; } - Address always_allocate_scope_depth_address() { - return reinterpret_cast<Address>(&always_allocate_scope_depth_); - } - bool linear_allocation() { - return linear_allocation_scope_depth_ != 0; - } - - Address* NewSpaceAllocationTopAddress() { - return new_space_.allocation_top_address(); - } - Address* NewSpaceAllocationLimitAddress() { - return new_space_.allocation_limit_address(); - } - - Address* OldPointerSpaceAllocationTopAddress() { - return old_pointer_space_->allocation_top_address(); - } - Address* OldPointerSpaceAllocationLimitAddress() { - return old_pointer_space_->allocation_limit_address(); - } - - Address* OldDataSpaceAllocationTopAddress() { - return old_data_space_->allocation_top_address(); - } - Address* OldDataSpaceAllocationLimitAddress() { - return old_data_space_->allocation_limit_address(); - } - - // Allocates and initializes a new JavaScript object based on a - // constructor. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // If allocation_site is non-null, then a memento is emitted after the object - // that points to the site. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateJSObject( - JSFunction* constructor, - PretenureFlag pretenure = NOT_TENURED, - AllocationSite* allocation_site = NULL); - - MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context, - ScopeInfo* scope_info); - - // Allocate a JSArray with no elements - MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray( - ElementsKind elements_kind, - PretenureFlag pretenure = NOT_TENURED) { - return AllocateJSArrayAndStorage(elements_kind, 0, 0, - DONT_INITIALIZE_ARRAY_ELEMENTS, - pretenure); - } - - // Allocate a JSArray with a specified length but elements that are left - // uninitialized. - MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage( - ElementsKind elements_kind, - int length, - int capacity, - ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS, - PretenureFlag pretenure = NOT_TENURED); - - MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage( - JSArray* array, - int length, - int capacity, - ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS); - - // Allocate a JSArray with no elements - MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements( - FixedArrayBase* array_base, - ElementsKind elements_kind, - int length, - PretenureFlag pretenure = NOT_TENURED); - - // Returns a deep copy of the JavaScript object. - // Properties and elements are copied too. - // Returns failure if allocation failed. - // Optionally takes an AllocationSite to be appended in an AllocationMemento. - MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source, - AllocationSite* site = NULL); - - // Allocates a JS ArrayBuffer object. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateJSArrayBuffer(); - - // Allocates a Harmony proxy or function proxy. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateJSProxy(Object* handler, - Object* prototype); - - MUST_USE_RESULT MaybeObject* AllocateJSFunctionProxy(Object* handler, - Object* call_trap, - Object* construct_trap, - Object* prototype); - - // Reinitialize a JSReceiver into an (empty) JS object of respective type and - // size, but keeping the original prototype. The receiver must have at least - // the size of the new object. The object is reinitialized and behaves as an - // object that has been freshly allocated. - // Returns failure if an error occured, otherwise object. - MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object, - InstanceType type, - int size); - - // Reinitialize an JSGlobalProxy based on a constructor. The object - // must have the same size as objects allocated using the - // constructor. The object is reinitialized and behaves as an - // object that has been freshly allocated using the constructor. - MUST_USE_RESULT MaybeObject* ReinitializeJSGlobalProxy( - JSFunction* constructor, JSGlobalProxy* global); - - // Allocates and initializes a new JavaScript object based on a map. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Passing an allocation site means that a memento will be created that - // points to the site. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap( - Map* map, - PretenureFlag pretenure = NOT_TENURED, - bool alloc_props = true, - AllocationSite* allocation_site = NULL); - - // Allocates a heap object based on the map. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this function does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space, - AllocationSite* allocation_site = NULL); - - // Allocates a JS Map in the heap. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this function does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateMap( - InstanceType instance_type, - int instance_size, - ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); - - // Allocates a partial map for bootstrapping. - MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type, - int instance_size); - - // Allocates an empty code cache. - MUST_USE_RESULT MaybeObject* AllocateCodeCache(); - - // Allocates a serialized scope info. - MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length); - - // Allocates an External object for v8's external API. - MUST_USE_RESULT MaybeObject* AllocateExternal(void* value); - - // Allocates an empty PolymorphicCodeCache. - MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache(); - - // Allocates a pre-tenured empty AccessorPair. - MUST_USE_RESULT MaybeObject* AllocateAccessorPair(); - - // Allocates an empty TypeFeedbackInfo. - MUST_USE_RESULT MaybeObject* AllocateTypeFeedbackInfo(); - - // Allocates an AliasedArgumentsEntry. - MUST_USE_RESULT MaybeObject* AllocateAliasedArgumentsEntry(int slot); - - // Clear the Instanceof cache (used when a prototype changes). - inline void ClearInstanceofCache(); - - // Iterates the whole code space to clear all ICs of the given kind. - void ClearAllICsByKind(Code::Kind kind); - - // For use during bootup. - void RepairFreeListsAfterBoot(); - - // Allocates and fully initializes a String. There are two String - // encodings: ASCII and two byte. One should choose between the three string - // allocation functions based on the encoding of the string buffer used to - // initialized the string. - // - ...FromAscii initializes the string from a buffer that is ASCII - // encoded (it does not check that the buffer is ASCII encoded) and the - // result will be ASCII encoded. - // - ...FromUTF8 initializes the string from a buffer that is UTF-8 - // encoded. If the characters are all single-byte characters, the - // result will be ASCII encoded, otherwise it will converted to two - // byte. - // - ...FromTwoByte initializes the string from a buffer that is two-byte - // encoded. If the characters are all single-byte characters, the - // result will be converted to ASCII, otherwise it will be left as - // two-byte. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateStringFromOneByte( - Vector<const uint8_t> str, - PretenureFlag pretenure = NOT_TENURED); - // TODO(dcarney): remove this function. - MUST_USE_RESULT inline MaybeObject* AllocateStringFromOneByte( - Vector<const char> str, - PretenureFlag pretenure = NOT_TENURED) { - return AllocateStringFromOneByte(Vector<const uint8_t>::cast(str), - pretenure); - } - MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8( - Vector<const char> str, - PretenureFlag pretenure = NOT_TENURED); - MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow( - Vector<const char> str, - int non_ascii_start, - PretenureFlag pretenure = NOT_TENURED); - MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte( - Vector<const uc16> str, - PretenureFlag pretenure = NOT_TENURED); - - // Allocates an internalized string in old space based on the character - // stream. Returns Failure::RetryAfterGC(requested_bytes, space) if the - // allocation failed. - // Please note this function does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringFromUtf8( - Vector<const char> str, - int chars, - uint32_t hash_field); - - MUST_USE_RESULT inline MaybeObject* AllocateOneByteInternalizedString( - Vector<const uint8_t> str, - uint32_t hash_field); - - MUST_USE_RESULT inline MaybeObject* AllocateTwoByteInternalizedString( - Vector<const uc16> str, - uint32_t hash_field); - - template<typename T> - static inline bool IsOneByte(T t, int chars); - - template<typename T> - MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringImpl( - T t, int chars, uint32_t hash_field); - - template<bool is_one_byte, typename T> - MUST_USE_RESULT MaybeObject* AllocateInternalizedStringImpl( - T t, int chars, uint32_t hash_field); - - // Allocates and partially initializes a String. There are two String - // encodings: ASCII and two byte. These functions allocate a string of the - // given length and set its map and length fields. The characters of the - // string are uninitialized. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateRawOneByteString( - int length, - PretenureFlag pretenure = NOT_TENURED); - MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString( - int length, - PretenureFlag pretenure = NOT_TENURED); - - // Computes a single character string where the character has code. - // A cache is used for ASCII codes. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode( - uint16_t code); - - // Allocate a byte array of the specified length - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateByteArray( - int length, - PretenureFlag pretenure = NOT_TENURED); - - // Allocates an external array of the specified length and type. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateExternalArray( - int length, - ExternalArrayType array_type, - void* external_pointer, - PretenureFlag pretenure); - - // Allocates a fixed typed array of the specified length and type. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateFixedTypedArray( - int length, - ExternalArrayType array_type, - PretenureFlag pretenure); - - // Allocate a symbol in old space. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateSymbol(); - MUST_USE_RESULT MaybeObject* AllocatePrivateSymbol(); - - // Allocate a tenured AllocationSite. It's payload is null - MUST_USE_RESULT MaybeObject* AllocateAllocationSite(); - - // Allocates a fixed array initialized with undefined values - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateFixedArray( - int length, - PretenureFlag pretenure = NOT_TENURED); - - // Allocates an uninitialized fixed array. It must be filled by the caller. - // - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedArray(int length); - - // Move len elements within a given array from src_index index to dst_index - // index. - void MoveElements(FixedArray* array, int dst_index, int src_index, int len); - - // Make a copy of src and return it. Returns - // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. - MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src); - - // Make a copy of src and return it. Returns - // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. - MUST_USE_RESULT MaybeObject* CopyAndTenureFixedCOWArray(FixedArray* src); - - // Make a copy of src, set the map, and return the copy. Returns - // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. - MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map); - - // Make a copy of src and return it. Returns - // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. - MUST_USE_RESULT inline MaybeObject* CopyFixedDoubleArray( - FixedDoubleArray* src); - - // Make a copy of src, set the map, and return the copy. Returns - // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. - MUST_USE_RESULT MaybeObject* CopyFixedDoubleArrayWithMap( - FixedDoubleArray* src, Map* map); - - // Make a copy of src and return it. Returns - // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. - MUST_USE_RESULT inline MaybeObject* CopyConstantPoolArray( - ConstantPoolArray* src); - - // Make a copy of src, set the map, and return the copy. Returns - // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. - MUST_USE_RESULT MaybeObject* CopyConstantPoolArrayWithMap( - ConstantPoolArray* src, Map* map); - - // Allocates a fixed array initialized with the hole values. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithHoles( - int length, - PretenureFlag pretenure = NOT_TENURED); - - MUST_USE_RESULT MaybeObject* AllocateConstantPoolArray( - int number_of_int64_entries, - int number_of_code_ptr_entries, - int number_of_heap_ptr_entries, - int number_of_int32_entries); - - // Allocates a fixed double array with uninitialized values. Returns - // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedDoubleArray( - int length, - PretenureFlag pretenure = NOT_TENURED); - - // Allocates a fixed double array with hole values. Returns - // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateFixedDoubleArrayWithHoles( - int length, - PretenureFlag pretenure = NOT_TENURED); - - // AllocateHashTable is identical to AllocateFixedArray except - // that the resulting object has hash_table_map as map. - MUST_USE_RESULT MaybeObject* AllocateHashTable( - int length, PretenureFlag pretenure = NOT_TENURED); - - // Allocate a native (but otherwise uninitialized) context. - MUST_USE_RESULT MaybeObject* AllocateNativeContext(); - - // Allocate a global context. - MUST_USE_RESULT MaybeObject* AllocateGlobalContext(JSFunction* function, - ScopeInfo* scope_info); - - // Allocate a module context. - MUST_USE_RESULT MaybeObject* AllocateModuleContext(ScopeInfo* scope_info); - - // Allocate a function context. - MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length, - JSFunction* function); - - // Allocate a catch context. - MUST_USE_RESULT MaybeObject* AllocateCatchContext(JSFunction* function, - Context* previous, - String* name, - Object* thrown_object); - // Allocate a 'with' context. - MUST_USE_RESULT MaybeObject* AllocateWithContext(JSFunction* function, - Context* previous, - JSReceiver* extension); - - // Allocate a block context. - MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function, - Context* previous, - ScopeInfo* info); - - // Allocates a new utility object in the old generation. - MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type); - - // Allocates a function initialized with a shared part. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateFunction( - Map* function_map, - SharedFunctionInfo* shared, - Object* prototype, - PretenureFlag pretenure = TENURED); - - // Sloppy mode arguments object size. - static const int kSloppyArgumentsObjectSize = - JSObject::kHeaderSize + 2 * kPointerSize; - // Strict mode arguments has no callee so it is smaller. - static const int kStrictArgumentsObjectSize = - JSObject::kHeaderSize + 1 * kPointerSize; - // Indicies for direct access into argument objects. - static const int kArgumentsLengthIndex = 0; - // callee is only valid in sloppy mode. - static const int kArgumentsCalleeIndex = 1; - - // Allocates an arguments object - optionally with an elements array. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateArgumentsObject( - Object* callee, int length); - - // Same as NewNumberFromDouble, but may return a preallocated/immutable - // number object (e.g., minus_zero_value_, nan_value_) - MUST_USE_RESULT MaybeObject* NumberFromDouble( - double value, PretenureFlag pretenure = NOT_TENURED); - - // Allocated a HeapNumber from value. - MUST_USE_RESULT MaybeObject* AllocateHeapNumber( - double value, PretenureFlag pretenure = NOT_TENURED); - - // Converts an int into either a Smi or a HeapNumber object. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* NumberFromInt32( - int32_t value, PretenureFlag pretenure = NOT_TENURED); - - // Converts an int into either a Smi or a HeapNumber object. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* NumberFromUint32( - uint32_t value, PretenureFlag pretenure = NOT_TENURED); - - // Allocates a new foreign object. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateForeign( - Address address, PretenureFlag pretenure = NOT_TENURED); - - // Allocates a new SharedFunctionInfo object. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateSharedFunctionInfo(Object* name); - - // Allocates a new JSMessageObject object. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note that this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateJSMessageObject( - String* type, - JSArray* arguments, - int start_position, - int end_position, - Object* script, - Object* stack_frames); - - // Allocate a new external string object, which is backed by a string - // resource that resides outside the V8 heap. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii( - const ExternalAsciiString::Resource* resource); - MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte( - const ExternalTwoByteString::Resource* resource); - - // Finalizes an external string by deleting the associated external - // data and clearing the resource pointer. - inline void FinalizeExternalString(String* string); - - // Allocates an uninitialized object. The memory is non-executable if the - // hardware and OS allow. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this function does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes, - AllocationSpace space, - AllocationSpace retry_space); - - // Initialize a filler object to keep the ability to iterate over the heap - // when shortening objects. - void CreateFillerObjectAt(Address addr, int size); - - bool CanMoveObjectStart(HeapObject* object); - - enum InvocationMode { FROM_GC, FROM_MUTATOR }; - - // Maintain marking consistency for IncrementalMarking. - void AdjustLiveBytes(Address address, int by, InvocationMode mode); - - // Makes a new native code object - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. On success, the pointer to the Code object is stored in the - // self_reference. This allows generated code to reference its own Code - // object by containing this pointer. - // Please note this function does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* CreateCode( - const CodeDesc& desc, - Code::Flags flags, - Handle<Object> self_reference, - bool immovable = false, - bool crankshafted = false, - int prologue_offset = Code::kPrologueOffsetNotSet); - - MUST_USE_RESULT MaybeObject* CopyCode(Code* code); - - // Copy the code and scope info part of the code object, but insert - // the provided data as the relocation information. - MUST_USE_RESULT MaybeObject* CopyCode(Code* code, Vector<byte> reloc_info); - - // Finds the internalized copy for string in the string table. - // If not found, a new string is added to the table and returned. - // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation - // failed. - // Please note this function does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* InternalizeUtf8String(const char* str) { - return InternalizeUtf8String(CStrVector(str)); - } - MUST_USE_RESULT MaybeObject* InternalizeUtf8String(Vector<const char> str); - - MUST_USE_RESULT MaybeObject* InternalizeString(String* str); - MUST_USE_RESULT MaybeObject* InternalizeStringWithKey(HashTableKey* key); - - bool InternalizeStringIfExists(String* str, String** result); - bool InternalizeTwoCharsStringIfExists(String* str, String** result); - - // Compute the matching internalized string map for a string if possible. - // NULL is returned if string is in new space or not flattened. - Map* InternalizedStringMapForString(String* str); - - // Tries to flatten a string before compare operation. - // - // Returns a failure in case it was decided that flattening was - // necessary and failed. Note, if flattening is not necessary the - // string might stay non-flat even when not a failure is returned. - // - // Please note this function does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* PrepareForCompare(String* str); - - // Converts the given boolean condition to JavaScript boolean value. - inline Object* ToBoolean(bool condition); - - // Performs garbage collection operation. - // Returns whether there is a chance that another major GC could - // collect more garbage. - inline bool CollectGarbage( - AllocationSpace space, - const char* gc_reason = NULL, - const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); - - static const int kNoGCFlags = 0; - static const int kSweepPreciselyMask = 1; - static const int kReduceMemoryFootprintMask = 2; - static const int kAbortIncrementalMarkingMask = 4; - - // Making the heap iterable requires us to sweep precisely and abort any - // incremental marking as well. - static const int kMakeHeapIterableMask = - kSweepPreciselyMask | kAbortIncrementalMarkingMask; - - // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is - // non-zero, then the slower precise sweeper is used, which leaves the heap - // in a state where we can iterate over the heap visiting all objects. - void CollectAllGarbage( - int flags, - const char* gc_reason = NULL, - const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); - - // Last hope GC, should try to squeeze as much as possible. - void CollectAllAvailableGarbage(const char* gc_reason = NULL); - - // Check whether the heap is currently iterable. - bool IsHeapIterable(); - - // Ensure that we have swept all spaces in such a way that we can iterate - // over all objects. May cause a GC. - void EnsureHeapIsIterable(); - - // Notify the heap that a context has been disposed. - int NotifyContextDisposed(); - - inline void increment_scan_on_scavenge_pages() { - scan_on_scavenge_pages_++; - if (FLAG_gc_verbose) { - PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); - } - } - - inline void decrement_scan_on_scavenge_pages() { - scan_on_scavenge_pages_--; - if (FLAG_gc_verbose) { - PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); - } - } - - PromotionQueue* promotion_queue() { return &promotion_queue_; } - -#ifdef DEBUG - // Utility used with flag gc-greedy. - void GarbageCollectionGreedyCheck(); -#endif - - void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, - GCType gc_type_filter, - bool pass_isolate = true); - void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback); - - void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, - GCType gc_type_filter, - bool pass_isolate = true); - void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback); - - // Heap root getters. We have versions with and without type::cast() here. - // You can't use type::cast during GC because the assert fails. - // TODO(1490): Try removing the unchecked accessors, now that GC marking does - // not corrupt the map. -#define ROOT_ACCESSOR(type, name, camel_name) \ - type* name() { \ - return type::cast(roots_[k##camel_name##RootIndex]); \ - } \ - type* raw_unchecked_##name() { \ - return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ - } - ROOT_LIST(ROOT_ACCESSOR) -#undef ROOT_ACCESSOR - -// Utility type maps -#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \ - Map* name##_map() { \ - return Map::cast(roots_[k##Name##MapRootIndex]); \ - } - STRUCT_LIST(STRUCT_MAP_ACCESSOR) -#undef STRUCT_MAP_ACCESSOR - -#define STRING_ACCESSOR(name, str) String* name() { \ - return String::cast(roots_[k##name##RootIndex]); \ - } - INTERNALIZED_STRING_LIST(STRING_ACCESSOR) -#undef STRING_ACCESSOR - - // The hidden_string is special because it is the empty string, but does - // not match the empty string. - String* hidden_string() { return hidden_string_; } - - void set_native_contexts_list(Object* object) { - native_contexts_list_ = object; - } - Object* native_contexts_list() { return native_contexts_list_; } - - void set_array_buffers_list(Object* object) { - array_buffers_list_ = object; - } - Object* array_buffers_list() { return array_buffers_list_; } - - void set_allocation_sites_list(Object* object) { - allocation_sites_list_ = object; - } - Object* allocation_sites_list() { return allocation_sites_list_; } - Object** allocation_sites_list_address() { return &allocation_sites_list_; } - - Object* weak_object_to_code_table() { return weak_object_to_code_table_; } - - // Number of mark-sweeps. - unsigned int ms_count() { return ms_count_; } - - // Iterates over all roots in the heap. - void IterateRoots(ObjectVisitor* v, VisitMode mode); - // Iterates over all strong roots in the heap. - void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); - // Iterates over entries in the smi roots list. Only interesting to the - // serializer/deserializer, since GC does not care about smis. - void IterateSmiRoots(ObjectVisitor* v); - // Iterates over all the other roots in the heap. - void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); - - // Iterate pointers to from semispace of new space found in memory interval - // from start to end. - void IterateAndMarkPointersToFromSpace(Address start, - Address end, - ObjectSlotCallback callback); - - // Returns whether the object resides in new space. - inline bool InNewSpace(Object* object); - inline bool InNewSpace(Address address); - inline bool InNewSpacePage(Address address); - inline bool InFromSpace(Object* object); - inline bool InToSpace(Object* object); - - // Returns whether the object resides in old pointer space. - inline bool InOldPointerSpace(Address address); - inline bool InOldPointerSpace(Object* object); - - // Returns whether the object resides in old data space. - inline bool InOldDataSpace(Address address); - inline bool InOldDataSpace(Object* object); - - // Checks whether an address/object in the heap (including auxiliary - // area and unused area). - bool Contains(Address addr); - bool Contains(HeapObject* value); - - // Checks whether an address/object in a space. - // Currently used by tests, serialization and heap verification only. - bool InSpace(Address addr, AllocationSpace space); - bool InSpace(HeapObject* value, AllocationSpace space); - - // Finds out which space an object should get promoted to based on its type. - inline OldSpace* TargetSpace(HeapObject* object); - static inline AllocationSpace TargetSpaceId(InstanceType type); - - // Checks whether the given object is allowed to be migrated from it's - // current space into the given destination space. Used for debugging. - inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); - - // Sets the stub_cache_ (only used when expanding the dictionary). - void public_set_code_stubs(UnseededNumberDictionary* value) { - roots_[kCodeStubsRootIndex] = value; - } - - // Support for computing object sizes for old objects during GCs. Returns - // a function that is guaranteed to be safe for computing object sizes in - // the current GC phase. - HeapObjectCallback GcSafeSizeOfOldObjectFunction() { - return gc_safe_size_of_old_object_; - } - - // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). - void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) { - roots_[kNonMonomorphicCacheRootIndex] = value; - } - - void public_set_empty_script(Script* script) { - roots_[kEmptyScriptRootIndex] = script; - } - - void public_set_store_buffer_top(Address* top) { - roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top); - } - - void public_set_materialized_objects(FixedArray* objects) { - roots_[kMaterializedObjectsRootIndex] = objects; - } - - // Generated code can embed this address to get access to the roots. - Object** roots_array_start() { return roots_; } - - Address* store_buffer_top_address() { - return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); - } - - // Get address of native contexts list for serialization support. - Object** native_contexts_list_address() { - return &native_contexts_list_; - } - -#ifdef VERIFY_HEAP - // Verify the heap is in its normal state before or after a GC. - void Verify(); - - - bool weak_embedded_objects_verification_enabled() { - return no_weak_object_verification_scope_depth_ == 0; - } -#endif - -#ifdef DEBUG - void Print(); - void PrintHandles(); - - void OldPointerSpaceCheckStoreBuffer(); - void MapSpaceCheckStoreBuffer(); - void LargeObjectSpaceCheckStoreBuffer(); - - // Report heap statistics. - void ReportHeapStatistics(const char* title); - void ReportCodeStatistics(const char* title); -#endif - - // Zapping is needed for verify heap, and always done in debug builds. - static inline bool ShouldZapGarbage() { -#ifdef DEBUG - return true; -#else -#ifdef VERIFY_HEAP - return FLAG_verify_heap; -#else - return false; -#endif -#endif - } - - // Print short heap statistics. - void PrintShortHeapStatistics(); - - // Write barrier support for address[offset] = o. - INLINE(void RecordWrite(Address address, int offset)); - - // Write barrier support for address[start : start + len[ = o. - INLINE(void RecordWrites(Address address, int start, int len)); - - enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; - inline HeapState gc_state() { return gc_state_; } - - inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } - -#ifdef DEBUG - void set_allocation_timeout(int timeout) { - allocation_timeout_ = timeout; - } - - void TracePathToObjectFrom(Object* target, Object* root); - void TracePathToObject(Object* target); - void TracePathToGlobal(); -#endif - - // Callback function passed to Heap::Iterate etc. Copies an object if - // necessary, the object might be promoted to an old space. The caller must - // ensure the precondition that the object is (a) a heap object and (b) in - // the heap's from space. - static inline void ScavengePointer(HeapObject** p); - static inline void ScavengeObject(HeapObject** p, HeapObject* object); - - enum ScratchpadSlotMode { - IGNORE_SCRATCHPAD_SLOT, - RECORD_SCRATCHPAD_SLOT - }; - - // An object may have an AllocationSite associated with it through a trailing - // AllocationMemento. Its feedback should be updated when objects are found - // in the heap. - static inline void UpdateAllocationSiteFeedback( - HeapObject* object, ScratchpadSlotMode mode); - - // Support for partial snapshots. After calling this we have a linear - // space to write objects in each space. - void ReserveSpace(int *sizes, Address* addresses); - - // - // Support for the API. - // - - bool CreateApiObjects(); - - // Attempt to find the number in a small cache. If we finds it, return - // the string representation of the number. Otherwise return undefined. - Object* GetNumberStringCache(Object* number); - - // Update the cache with a new number-string pair. - void SetNumberStringCache(Object* number, String* str); - - // Adjusts the amount of registered external memory. - // Returns the adjusted value. - inline int64_t AdjustAmountOfExternalAllocatedMemory( - int64_t change_in_bytes); - - // This is only needed for testing high promotion mode. - void SetNewSpaceHighPromotionModeActive(bool mode) { - new_space_high_promotion_mode_active_ = mode; - } - - // Returns the allocation mode (pre-tenuring) based on observed promotion - // rates of previous collections. - inline PretenureFlag GetPretenureMode() { - return FLAG_pretenuring && new_space_high_promotion_mode_active_ - ? TENURED : NOT_TENURED; - } - - inline Address* NewSpaceHighPromotionModeActiveAddress() { - return reinterpret_cast<Address*>(&new_space_high_promotion_mode_active_); - } - - inline intptr_t PromotedTotalSize() { - int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); - if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt); - if (total < 0) return 0; - return static_cast<intptr_t>(total); - } - - inline intptr_t OldGenerationSpaceAvailable() { - return old_generation_allocation_limit_ - PromotedTotalSize(); - } - - inline intptr_t OldGenerationCapacityAvailable() { - return max_old_generation_size_ - PromotedTotalSize(); - } - - static const intptr_t kMinimumOldGenerationAllocationLimit = - 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); - - intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) { - const int divisor = FLAG_stress_compaction ? 10 : 1; - intptr_t limit = - Max(old_gen_size + old_gen_size / divisor, - kMinimumOldGenerationAllocationLimit); - limit += new_space_.Capacity(); - intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; - return Min(limit, halfway_to_the_max); - } - - // Indicates whether inline bump-pointer allocation has been disabled. - bool inline_allocation_disabled() { return inline_allocation_disabled_; } - - // Switch whether inline bump-pointer allocation should be used. - void EnableInlineAllocation(); - void DisableInlineAllocation(); - - // Implements the corresponding V8 API function. - bool IdleNotification(int hint); - - // Declare all the root indices. This defines the root list order. - enum RootListIndex { -#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, - STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) -#undef ROOT_INDEX_DECLARATION - -#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, - INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) -#undef STRING_DECLARATION - - // Utility type maps -#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, - STRUCT_LIST(DECLARE_STRUCT_MAP) -#undef DECLARE_STRUCT_MAP - - kStringTableRootIndex, - -#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, - SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) -#undef ROOT_INDEX_DECLARATION - - kRootListLength, - kStrongRootListLength = kStringTableRootIndex, - kSmiRootsStart = kStringTableRootIndex + 1 - }; - - STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex); - STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex); - STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex); - STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex); - STATIC_CHECK(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); - - // Generated code can embed direct references to non-writable roots if - // they are in new space. - static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); - // Generated code can treat direct references to this root as constant. - bool RootCanBeTreatedAsConstant(RootListIndex root_index); - - MUST_USE_RESULT MaybeObject* NumberToString( - Object* number, bool check_number_string_cache = true); - MUST_USE_RESULT MaybeObject* Uint32ToString( - uint32_t value, bool check_number_string_cache = true); - - Map* MapForFixedTypedArray(ExternalArrayType array_type); - RootListIndex RootIndexForFixedTypedArray( - ExternalArrayType array_type); - - Map* MapForExternalArrayType(ExternalArrayType array_type); - RootListIndex RootIndexForExternalArrayType( - ExternalArrayType array_type); - - RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind); - RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); - ExternalArray* EmptyExternalArrayForMap(Map* map); - FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); - - void RecordStats(HeapStats* stats, bool take_snapshot = false); - - // Copy block of memory from src to dst. Size of block should be aligned - // by pointer size. - static inline void CopyBlock(Address dst, Address src, int byte_size); - - // Optimized version of memmove for blocks with pointer size aligned sizes and - // pointer size aligned addresses. - static inline void MoveBlock(Address dst, Address src, int byte_size); - - // Check new space expansion criteria and expand semispaces if it was hit. - void CheckNewSpaceExpansionCriteria(); - - inline void IncrementYoungSurvivorsCounter(int survived) { - ASSERT(survived >= 0); - young_survivors_after_last_gc_ = survived; - survived_since_last_expansion_ += survived; - } - - inline bool NextGCIsLikelyToBeFull() { - if (FLAG_gc_global) return true; - - if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; - - intptr_t adjusted_allocation_limit = - old_generation_allocation_limit_ - new_space_.Capacity(); - - if (PromotedTotalSize() >= adjusted_allocation_limit) return true; - - return false; - } - - void UpdateNewSpaceReferencesInExternalStringTable( - ExternalStringTableUpdaterCallback updater_func); - - void UpdateReferencesInExternalStringTable( - ExternalStringTableUpdaterCallback updater_func); - - void ProcessWeakReferences(WeakObjectRetainer* retainer); - - void VisitExternalResources(v8::ExternalResourceVisitor* visitor); - - // Helper function that governs the promotion policy from new space to - // old. If the object's old address lies below the new space's age - // mark or if we've already filled the bottom 1/16th of the to space, - // we try to promote this object. - inline bool ShouldBePromoted(Address old_address, int object_size); - - void ClearJSFunctionResultCaches(); - - void ClearNormalizedMapCaches(); - - GCTracer* tracer() { return tracer_; } - - // Returns the size of objects residing in non new spaces. - intptr_t PromotedSpaceSizeOfObjects(); - - double total_regexp_code_generated() { return total_regexp_code_generated_; } - void IncreaseTotalRegexpCodeGenerated(int size) { - total_regexp_code_generated_ += size; - } - - void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) { - if (is_crankshafted) { - crankshaft_codegen_bytes_generated_ += size; - } else { - full_codegen_bytes_generated_ += size; - } - } - - // Returns maximum GC pause. - double get_max_gc_pause() { return max_gc_pause_; } - - // Returns maximum size of objects alive after GC. - intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } - - // Returns minimal interval between two subsequent collections. - double get_min_in_mutator() { return min_in_mutator_; } - - // TODO(hpayer): remove, should be handled by GCTracer - void AddMarkingTime(double marking_time) { - marking_time_ += marking_time; - } - - double marking_time() const { - return marking_time_; - } - - // TODO(hpayer): remove, should be handled by GCTracer - void AddSweepingTime(double sweeping_time) { - sweeping_time_ += sweeping_time; - } - - double sweeping_time() const { - return sweeping_time_; - } - - MarkCompactCollector* mark_compact_collector() { - return &mark_compact_collector_; - } - - StoreBuffer* store_buffer() { - return &store_buffer_; - } - - Marking* marking() { - return &marking_; - } - - IncrementalMarking* incremental_marking() { - return &incremental_marking_; - } - - bool IsSweepingComplete() { - return !mark_compact_collector()->IsConcurrentSweepingInProgress() && - old_data_space()->IsLazySweepingComplete() && - old_pointer_space()->IsLazySweepingComplete(); - } - - bool AdvanceSweepers(int step_size); - - bool EnsureSweepersProgressed(int step_size) { - bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size); - sweeping_complete &= old_pointer_space()->EnsureSweeperProgress(step_size); - return sweeping_complete; - } - - ExternalStringTable* external_string_table() { - return &external_string_table_; - } - - // Returns the current sweep generation. - int sweep_generation() { - return sweep_generation_; - } - - inline Isolate* isolate(); - - void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); - void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); - - inline bool OldGenerationAllocationLimitReached(); - - inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) { - scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); - } - - void QueueMemoryChunkForFree(MemoryChunk* chunk); - void FreeQueuedChunks(); - - int gc_count() const { return gc_count_; } - - // Completely clear the Instanceof cache (to stop it keeping objects alive - // around a GC). - inline void CompletelyClearInstanceofCache(); - - // The roots that have an index less than this are always in old space. - static const int kOldSpaceRoots = 0x20; - - uint32_t HashSeed() { - uint32_t seed = static_cast<uint32_t>(hash_seed()->value()); - ASSERT(FLAG_randomize_hashes || seed == 0); - return seed; - } - - void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { - ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0)); - set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); - } - - void SetConstructStubDeoptPCOffset(int pc_offset) { - ASSERT(construct_stub_deopt_pc_offset() == Smi::FromInt(0)); - set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); - } - - void SetGetterStubDeoptPCOffset(int pc_offset) { - ASSERT(getter_stub_deopt_pc_offset() == Smi::FromInt(0)); - set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); - } - - void SetSetterStubDeoptPCOffset(int pc_offset) { - ASSERT(setter_stub_deopt_pc_offset() == Smi::FromInt(0)); - set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); - } - - // For post mortem debugging. - void RememberUnmappedPage(Address page, bool compacted); - - // Global inline caching age: it is incremented on some GCs after context - // disposal. We use it to flush inline caches. - int global_ic_age() { - return global_ic_age_; - } - - void AgeInlineCaches() { - global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax; - } - - bool flush_monomorphic_ics() { return flush_monomorphic_ics_; } - - int64_t amount_of_external_allocated_memory() { - return amount_of_external_allocated_memory_; - } - - void DeoptMarkedAllocationSites(); - - // ObjectStats are kept in two arrays, counts and sizes. Related stats are - // stored in a contiguous linear buffer. Stats groups are stored one after - // another. - enum { - FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, - FIRST_FIXED_ARRAY_SUB_TYPE = - FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, - FIRST_CODE_AGE_SUB_TYPE = - FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, - OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 - }; - - void RecordObjectStats(InstanceType type, size_t size) { - ASSERT(type <= LAST_TYPE); - object_counts_[type]++; - object_sizes_[type] += size; - } - - void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { - int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; - int code_age_index = - FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; - ASSERT(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE && - code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE); - ASSERT(code_age_index >= FIRST_CODE_AGE_SUB_TYPE && - code_age_index < OBJECT_STATS_COUNT); - object_counts_[code_sub_type_index]++; - object_sizes_[code_sub_type_index] += size; - object_counts_[code_age_index]++; - object_sizes_[code_age_index] += size; - } - - void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) { - ASSERT(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE); - object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++; - object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size; - } - - void CheckpointObjectStats(); - - // We don't use a LockGuard here since we want to lock the heap - // only when FLAG_concurrent_recompilation is true. - class RelocationLock { - public: - explicit RelocationLock(Heap* heap) : heap_(heap) { - heap_->relocation_mutex_.Lock(); - } - - - ~RelocationLock() { - heap_->relocation_mutex_.Unlock(); - } - - private: - Heap* heap_; - }; - - MaybeObject* AddWeakObjectToCodeDependency(Object* obj, DependentCode* dep); - - DependentCode* LookupWeakObjectToCodeDependency(Object* obj); - - void InitializeWeakObjectToCodeTable() { - set_weak_object_to_code_table(undefined_value()); - } - - void EnsureWeakObjectToCodeTable(); - - static void FatalProcessOutOfMemory(const char* location, - bool take_snapshot = false); - - private: - Heap(); - - // This can be calculated directly from a pointer to the heap; however, it is - // more expedient to get at the isolate directly from within Heap methods. - Isolate* isolate_; - - Object* roots_[kRootListLength]; - - intptr_t code_range_size_; - int reserved_semispace_size_; - int max_semispace_size_; - int initial_semispace_size_; - intptr_t max_old_generation_size_; - intptr_t max_executable_size_; - intptr_t maximum_committed_; - - // For keeping track of how much data has survived - // scavenge since last new space expansion. - int survived_since_last_expansion_; - - // For keeping track on when to flush RegExp code. - int sweep_generation_; - - int always_allocate_scope_depth_; - int linear_allocation_scope_depth_; - - // For keeping track of context disposals. - int contexts_disposed_; - - int global_ic_age_; - - bool flush_monomorphic_ics_; - - int scan_on_scavenge_pages_; - - NewSpace new_space_; - OldSpace* old_pointer_space_; - OldSpace* old_data_space_; - OldSpace* code_space_; - MapSpace* map_space_; - CellSpace* cell_space_; - PropertyCellSpace* property_cell_space_; - LargeObjectSpace* lo_space_; - HeapState gc_state_; - int gc_post_processing_depth_; - - // Returns the amount of external memory registered since last global gc. - int64_t PromotedExternalMemorySize(); - - unsigned int ms_count_; // how many mark-sweep collections happened - unsigned int gc_count_; // how many gc happened - - // For post mortem debugging. - static const int kRememberedUnmappedPages = 128; - int remembered_unmapped_pages_index_; - Address remembered_unmapped_pages_[kRememberedUnmappedPages]; - - // Total length of the strings we failed to flatten since the last GC. - int unflattened_strings_length_; - -#define ROOT_ACCESSOR(type, name, camel_name) \ - inline void set_##name(type* value) { \ - /* The deserializer makes use of the fact that these common roots are */ \ - /* never in new space and never on a page that is being compacted. */ \ - ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \ - roots_[k##camel_name##RootIndex] = value; \ - } - ROOT_LIST(ROOT_ACCESSOR) -#undef ROOT_ACCESSOR - -#ifdef DEBUG - // If the --gc-interval flag is set to a positive value, this - // variable holds the value indicating the number of allocations - // remain until the next failure and garbage collection. - int allocation_timeout_; -#endif // DEBUG - - // Indicates that the new space should be kept small due to high promotion - // rates caused by the mutator allocating a lot of long-lived objects. - // TODO(hpayer): change to bool if no longer accessed from generated code - intptr_t new_space_high_promotion_mode_active_; - - // Limit that triggers a global GC on the next (normally caused) GC. This - // is checked when we have already decided to do a GC to help determine - // which collector to invoke, before expanding a paged space in the old - // generation and on every allocation in large object space. - intptr_t old_generation_allocation_limit_; - - // Used to adjust the limits that control the timing of the next GC. - intptr_t size_of_old_gen_at_last_old_space_gc_; - - // Limit on the amount of externally allocated memory allowed - // between global GCs. If reached a global GC is forced. - intptr_t external_allocation_limit_; - - // The amount of external memory registered through the API kept alive - // by global handles - int64_t amount_of_external_allocated_memory_; - - // Caches the amount of external memory registered at the last global gc. - int64_t amount_of_external_allocated_memory_at_last_global_gc_; - - // Indicates that an allocation has failed in the old generation since the - // last GC. - bool old_gen_exhausted_; - - // Indicates that inline bump-pointer allocation has been globally disabled - // for all spaces. This is used to disable allocations in generated code. - bool inline_allocation_disabled_; - - // Weak list heads, threaded through the objects. - // List heads are initilized lazily and contain the undefined_value at start. - Object* native_contexts_list_; - Object* array_buffers_list_; - Object* allocation_sites_list_; - - // WeakHashTable that maps objects embedded in optimized code to dependent - // code list. It is initilized lazily and contains the undefined_value at - // start. - Object* weak_object_to_code_table_; - - StoreBufferRebuilder store_buffer_rebuilder_; - - struct StringTypeTable { - InstanceType type; - int size; - RootListIndex index; - }; - - struct ConstantStringTable { - const char* contents; - RootListIndex index; - }; - - struct StructTable { - InstanceType type; - int size; - RootListIndex index; - }; - - static const StringTypeTable string_type_table[]; - static const ConstantStringTable constant_string_table[]; - static const StructTable struct_table[]; - - // The special hidden string which is an empty string, but does not match - // any string when looked up in properties. - String* hidden_string_; - - // GC callback function, called before and after mark-compact GC. - // Allocations in the callback function are disallowed. - struct GCPrologueCallbackPair { - GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback, - GCType gc_type, - bool pass_isolate) - : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) { - } - bool operator==(const GCPrologueCallbackPair& pair) const { - return pair.callback == callback; - } - v8::Isolate::GCPrologueCallback callback; - GCType gc_type; - // TODO(dcarney): remove variable - bool pass_isolate_; - }; - List<GCPrologueCallbackPair> gc_prologue_callbacks_; - - struct GCEpilogueCallbackPair { - GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback, - GCType gc_type, - bool pass_isolate) - : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) { - } - bool operator==(const GCEpilogueCallbackPair& pair) const { - return pair.callback == callback; - } - v8::Isolate::GCPrologueCallback callback; - GCType gc_type; - // TODO(dcarney): remove variable - bool pass_isolate_; - }; - List<GCEpilogueCallbackPair> gc_epilogue_callbacks_; - - // Support for computing object sizes during GC. - HeapObjectCallback gc_safe_size_of_old_object_; - static int GcSafeSizeOfOldObject(HeapObject* object); - - // Update the GC state. Called from the mark-compact collector. - void MarkMapPointersAsEncoded(bool encoded) { - ASSERT(!encoded); - gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; - } - - // Code that should be run before and after each GC. Includes some - // reporting/verification activities when compiled with DEBUG set. - void GarbageCollectionPrologue(); - void GarbageCollectionEpilogue(); - - // Pretenuring decisions are made based on feedback collected during new - // space evacuation. Note that between feedback collection and calling this - // method object in old space must not move. - // Right now we only process pretenuring feedback in high promotion mode. - void ProcessPretenuringFeedback(); - - // Checks whether a global GC is necessary - GarbageCollector SelectGarbageCollector(AllocationSpace space, - const char** reason); - - // Make sure there is a filler value behind the top of the new space - // so that the GC does not confuse some unintialized/stale memory - // with the allocation memento of the object at the top - void EnsureFillerObjectAtTop(); - - // Performs garbage collection operation. - // Returns whether there is a chance that another major GC could - // collect more garbage. - bool CollectGarbage( - GarbageCollector collector, - const char* gc_reason, - const char* collector_reason, - const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); - - // Performs garbage collection - // Returns whether there is a chance another major GC could - // collect more garbage. - bool PerformGarbageCollection( - GarbageCollector collector, - GCTracer* tracer, - const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); - - inline void UpdateOldSpaceLimits(); - - // Selects the proper allocation space depending on the given object - // size, pretenuring decision, and preferred old-space. - static AllocationSpace SelectSpace(int object_size, - AllocationSpace preferred_old_space, - PretenureFlag pretenure) { - ASSERT(preferred_old_space == OLD_POINTER_SPACE || - preferred_old_space == OLD_DATA_SPACE); - if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; - return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE; - } - - // Allocate an uninitialized fixed array. - MUST_USE_RESULT MaybeObject* AllocateRawFixedArray( - int length, PretenureFlag pretenure); - - // Allocate an uninitialized fixed double array. - MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray( - int length, PretenureFlag pretenure); - - // Allocate an initialized fixed array with the given filler value. - MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithFiller( - int length, PretenureFlag pretenure, Object* filler); - - // Initializes a JSObject based on its map. - void InitializeJSObjectFromMap(JSObject* obj, - FixedArray* properties, - Map* map); - void InitializeAllocationMemento(AllocationMemento* memento, - AllocationSite* allocation_site); - - bool CreateInitialMaps(); - bool CreateInitialObjects(); - - // These five Create*EntryStub functions are here and forced to not be inlined - // because of a gcc-4.4 bug that assigns wrong vtable entries. - NO_INLINE(void CreateJSEntryStub()); - NO_INLINE(void CreateJSConstructEntryStub()); - - void CreateFixedStubs(); - - MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string, - Object* to_number, - byte kind); - - // Allocate a JSArray with no elements - MUST_USE_RESULT MaybeObject* AllocateJSArray( - ElementsKind elements_kind, - PretenureFlag pretenure = NOT_TENURED); - - // Allocate empty fixed array. - MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray(); - - // Allocate empty external array of given type. - MUST_USE_RESULT MaybeObject* AllocateEmptyExternalArray( - ExternalArrayType array_type); - - // Allocate empty fixed typed array of given type. - MUST_USE_RESULT MaybeObject* AllocateEmptyFixedTypedArray( - ExternalArrayType array_type); - - // Allocate empty fixed double array. - MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray(); - - // Allocate empty constant pool array. - MUST_USE_RESULT MaybeObject* AllocateEmptyConstantPoolArray(); - - // Allocate a tenured simple cell. - MUST_USE_RESULT MaybeObject* AllocateCell(Object* value); - - // Allocate a tenured JS global property cell initialized with the hole. - MUST_USE_RESULT MaybeObject* AllocatePropertyCell(); - - // Allocate Box. - MUST_USE_RESULT MaybeObject* AllocateBox(Object* value, - PretenureFlag pretenure); - - // Performs a minor collection in new generation. - void Scavenge(); - - // Commits from space if it is uncommitted. - void EnsureFromSpaceIsCommitted(); - - // Uncommit unused semi space. - bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } - - // Fill in bogus values in from space - void ZapFromSpace(); - - static String* UpdateNewSpaceReferenceInExternalStringTableEntry( - Heap* heap, - Object** pointer); - - Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); - static void ScavengeStoreBufferCallback(Heap* heap, - MemoryChunk* page, - StoreBufferEvent event); - - // Performs a major collection in the whole heap. - void MarkCompact(GCTracer* tracer); - - // Code to be run before and after mark-compact. - void MarkCompactPrologue(); - - void ProcessNativeContexts(WeakObjectRetainer* retainer, bool record_slots); - void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots); - void ProcessAllocationSites(WeakObjectRetainer* retainer, bool record_slots); - - // Deopts all code that contains allocation instruction which are tenured or - // not tenured. Moreover it clears the pretenuring allocation site statistics. - void ResetAllAllocationSitesDependentCode(PretenureFlag flag); - - // Evaluates local pretenuring for the old space and calls - // ResetAllTenuredAllocationSitesDependentCode if too many objects died in - // the old space. - void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); - - // Called on heap tear-down. - void TearDownArrayBuffers(); - - // Record statistics before and after garbage collection. - void ReportStatisticsBeforeGC(); - void ReportStatisticsAfterGC(); - - // Slow part of scavenge object. - static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); - - // Initializes a function with a shared part and prototype. - // Note: this code was factored out of AllocateFunction such that - // other parts of the VM could use it. Specifically, a function that creates - // instances of type JS_FUNCTION_TYPE benefit from the use of this function. - // Please note this does not perform a garbage collection. - inline void InitializeFunction( - JSFunction* function, - SharedFunctionInfo* shared, - Object* prototype); - - // Total RegExp code ever generated - double total_regexp_code_generated_; - - GCTracer* tracer_; - - // Allocates a small number to string cache. - MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache(); - // Creates and installs the full-sized number string cache. - void AllocateFullSizeNumberStringCache(); - // Get the length of the number to string cache based on the max semispace - // size. - int FullSizeNumberStringCacheLength(); - // Flush the number to string cache. - void FlushNumberStringCache(); - - // Allocates a fixed-size allocation sites scratchpad. - MUST_USE_RESULT MaybeObject* AllocateAllocationSitesScratchpad(); - - // Sets used allocation sites entries to undefined. - void FlushAllocationSitesScratchpad(); - - // Initializes the allocation sites scratchpad with undefined values. - void InitializeAllocationSitesScratchpad(); - - // Adds an allocation site to the scratchpad if there is space left. - void AddAllocationSiteToScratchpad(AllocationSite* site, - ScratchpadSlotMode mode); - - void UpdateSurvivalRateTrend(int start_new_space_size); - - enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING }; - - static const int kYoungSurvivalRateHighThreshold = 90; - static const int kYoungSurvivalRateLowThreshold = 10; - static const int kYoungSurvivalRateAllowedDeviation = 15; - - static const int kOldSurvivalRateLowThreshold = 20; - - int young_survivors_after_last_gc_; - int high_survival_rate_period_length_; - int low_survival_rate_period_length_; - double survival_rate_; - SurvivalRateTrend previous_survival_rate_trend_; - SurvivalRateTrend survival_rate_trend_; - - void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) { - ASSERT(survival_rate_trend != FLUCTUATING); - previous_survival_rate_trend_ = survival_rate_trend_; - survival_rate_trend_ = survival_rate_trend; - } - - SurvivalRateTrend survival_rate_trend() { - if (survival_rate_trend_ == STABLE) { - return STABLE; - } else if (previous_survival_rate_trend_ == STABLE) { - return survival_rate_trend_; - } else if (survival_rate_trend_ != previous_survival_rate_trend_) { - return FLUCTUATING; - } else { - return survival_rate_trend_; - } - } - - bool IsStableOrIncreasingSurvivalTrend() { - switch (survival_rate_trend()) { - case STABLE: - case INCREASING: - return true; - default: - return false; - } - } - - bool IsStableOrDecreasingSurvivalTrend() { - switch (survival_rate_trend()) { - case STABLE: - case DECREASING: - return true; - default: - return false; - } - } - - bool IsIncreasingSurvivalTrend() { - return survival_rate_trend() == INCREASING; - } - - bool IsHighSurvivalRate() { - return high_survival_rate_period_length_ > 0; - } - - bool IsLowSurvivalRate() { - return low_survival_rate_period_length_ > 0; - } - - void SelectScavengingVisitorsTable(); - - void StartIdleRound() { - mark_sweeps_since_idle_round_started_ = 0; - } - - void FinishIdleRound() { - mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound; - scavenges_since_last_idle_round_ = 0; - } - - bool EnoughGarbageSinceLastIdleRound() { - return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold); - } - - // Estimates how many milliseconds a Mark-Sweep would take to complete. - // In idle notification handler we assume that this function will return: - // - a number less than 10 for small heaps, which are less than 8Mb. - // - a number greater than 10 for large heaps, which are greater than 32Mb. - int TimeMarkSweepWouldTakeInMs() { - // Rough estimate of how many megabytes of heap can be processed in 1 ms. - static const int kMbPerMs = 2; - - int heap_size_mb = static_cast<int>(SizeOfObjects() / MB); - return heap_size_mb / kMbPerMs; - } - - // Returns true if no more GC work is left. - bool IdleGlobalGC(); - - void AdvanceIdleIncrementalMarking(intptr_t step_size); - - void ClearObjectStats(bool clear_last_time_stats = false); - - void set_weak_object_to_code_table(Object* value) { - ASSERT(!InNewSpace(value)); - weak_object_to_code_table_ = value; - } - - Object** weak_object_to_code_table_address() { - return &weak_object_to_code_table_; - } - - static const int kInitialStringTableSize = 2048; - static const int kInitialEvalCacheSize = 64; - static const int kInitialNumberStringCacheSize = 256; - - // Object counts and used memory by InstanceType - size_t object_counts_[OBJECT_STATS_COUNT]; - size_t object_counts_last_time_[OBJECT_STATS_COUNT]; - size_t object_sizes_[OBJECT_STATS_COUNT]; - size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; - - // Maximum GC pause. - double max_gc_pause_; - - // Total time spent in GC. - double total_gc_time_ms_; - - // Maximum size of objects alive after GC. - intptr_t max_alive_after_gc_; - - // Minimal interval between two subsequent collections. - double min_in_mutator_; - - // Size of objects alive after last GC. - intptr_t alive_after_last_gc_; - - double last_gc_end_timestamp_; - - // Cumulative GC time spent in marking - double marking_time_; - - // Cumulative GC time spent in sweeping - double sweeping_time_; - - MarkCompactCollector mark_compact_collector_; - - StoreBuffer store_buffer_; - - Marking marking_; - - IncrementalMarking incremental_marking_; - - int number_idle_notifications_; - unsigned int last_idle_notification_gc_count_; - bool last_idle_notification_gc_count_init_; - - int mark_sweeps_since_idle_round_started_; - unsigned int gc_count_at_last_idle_gc_; - int scavenges_since_last_idle_round_; - - // These two counters are monotomically increasing and never reset. - size_t full_codegen_bytes_generated_; - size_t crankshaft_codegen_bytes_generated_; - - // If the --deopt_every_n_garbage_collections flag is set to a positive value, - // this variable holds the number of garbage collections since the last - // deoptimization triggered by garbage collection. - int gcs_since_last_deopt_; - -#ifdef VERIFY_HEAP - int no_weak_object_verification_scope_depth_; -#endif - - static const int kAllocationSiteScratchpadSize = 256; - int allocation_sites_scratchpad_length_; - - static const int kMaxMarkSweepsInIdleRound = 7; - static const int kIdleScavengeThreshold = 5; - - // Shared state read by the scavenge collector and set by ScavengeObject. - PromotionQueue promotion_queue_; - - // Flag is set when the heap has been configured. The heap can be repeatedly - // configured through the API until it is set up. - bool configured_; - - ExternalStringTable external_string_table_; - - VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; - - MemoryChunk* chunks_queued_for_free_; - - Mutex relocation_mutex_; - - int gc_callbacks_depth_; - - friend class Factory; - friend class GCTracer; - friend class AlwaysAllocateScope; - friend class Page; - friend class Isolate; - friend class MarkCompactCollector; - friend class MarkCompactMarkingVisitor; - friend class MapCompact; -#ifdef VERIFY_HEAP - friend class NoWeakObjectVerificationScope; -#endif - friend class GCCallbacksScope; - - DISALLOW_COPY_AND_ASSIGN(Heap); -}; - - -class HeapStats { - public: - static const int kStartMarker = 0xDECADE00; - static const int kEndMarker = 0xDECADE01; - - int* start_marker; // 0 - int* new_space_size; // 1 - int* new_space_capacity; // 2 - intptr_t* old_pointer_space_size; // 3 - intptr_t* old_pointer_space_capacity; // 4 - intptr_t* old_data_space_size; // 5 - intptr_t* old_data_space_capacity; // 6 - intptr_t* code_space_size; // 7 - intptr_t* code_space_capacity; // 8 - intptr_t* map_space_size; // 9 - intptr_t* map_space_capacity; // 10 - intptr_t* cell_space_size; // 11 - intptr_t* cell_space_capacity; // 12 - intptr_t* lo_space_size; // 13 - int* global_handle_count; // 14 - int* weak_global_handle_count; // 15 - int* pending_global_handle_count; // 16 - int* near_death_global_handle_count; // 17 - int* free_global_handle_count; // 18 - intptr_t* memory_allocator_size; // 19 - intptr_t* memory_allocator_capacity; // 20 - int* objects_per_type; // 21 - int* size_per_type; // 22 - int* os_error; // 23 - int* end_marker; // 24 - intptr_t* property_cell_space_size; // 25 - intptr_t* property_cell_space_capacity; // 26 -}; - - -class AlwaysAllocateScope { - public: - explicit inline AlwaysAllocateScope(Isolate* isolate); - inline ~AlwaysAllocateScope(); - - private: - // Implicitly disable artificial allocation failures. - Heap* heap_; - DisallowAllocationFailure daf_; -}; - - -#ifdef VERIFY_HEAP -class NoWeakObjectVerificationScope { - public: - inline NoWeakObjectVerificationScope(); - inline ~NoWeakObjectVerificationScope(); -}; -#endif - - -class GCCallbacksScope { - public: - explicit inline GCCallbacksScope(Heap* heap); - inline ~GCCallbacksScope(); - - inline bool CheckReenter(); - - private: - Heap* heap_; -}; - - -// Visitor class to verify interior pointers in spaces that do not contain -// or care about intergenerational references. All heap object pointers have to -// point into the heap to a location that has a map pointer at its first word. -// Caveat: Heap::Contains is an approximation because it can return true for -// objects in a heap space but above the allocation pointer. -class VerifyPointersVisitor: public ObjectVisitor { - public: - inline void VisitPointers(Object** start, Object** end); -}; - - -// Verify that all objects are Smis. -class VerifySmisVisitor: public ObjectVisitor { - public: - inline void VisitPointers(Object** start, Object** end); -}; - - -// Space iterator for iterating over all spaces of the heap. Returns each space -// in turn, and null when it is done. -class AllSpaces BASE_EMBEDDED { - public: - explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {} - Space* next(); - private: - Heap* heap_; - int counter_; -}; - - -// Space iterator for iterating over all old spaces of the heap: Old pointer -// space, old data space and code space. Returns each space in turn, and null -// when it is done. -class OldSpaces BASE_EMBEDDED { - public: - explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} - OldSpace* next(); - private: - Heap* heap_; - int counter_; -}; - - -// Space iterator for iterating over all the paged spaces of the heap: Map -// space, old pointer space, old data space, code space and cell space. Returns -// each space in turn, and null when it is done. -class PagedSpaces BASE_EMBEDDED { - public: - explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} - PagedSpace* next(); - private: - Heap* heap_; - int counter_; -}; - - -// Space iterator for iterating over all spaces of the heap. -// For each space an object iterator is provided. The deallocation of the -// returned object iterators is handled by the space iterator. -class SpaceIterator : public Malloced { - public: - explicit SpaceIterator(Heap* heap); - SpaceIterator(Heap* heap, HeapObjectCallback size_func); - virtual ~SpaceIterator(); - - bool has_next(); - ObjectIterator* next(); - - private: - ObjectIterator* CreateIterator(); - - Heap* heap_; - int current_space_; // from enum AllocationSpace. - ObjectIterator* iterator_; // object iterator for the current space. - HeapObjectCallback size_func_; -}; - - -// A HeapIterator provides iteration over the whole heap. It -// aggregates the specific iterators for the different spaces as -// these can only iterate over one space only. -// -// HeapIterator can skip free list nodes (that is, de-allocated heap -// objects that still remain in the heap). As implementation of free -// nodes filtering uses GC marks, it can't be used during MS/MC GC -// phases. Also, it is forbidden to interrupt iteration in this mode, -// as this will leave heap objects marked (and thus, unusable). -class HeapObjectsFilter; - -class HeapIterator BASE_EMBEDDED { - public: - enum HeapObjectsFiltering { - kNoFiltering, - kFilterUnreachable - }; - - explicit HeapIterator(Heap* heap); - HeapIterator(Heap* heap, HeapObjectsFiltering filtering); - ~HeapIterator(); - - HeapObject* next(); - void reset(); - - private: - // Perform the initialization. - void Init(); - // Perform all necessary shutdown (destruction) work. - void Shutdown(); - HeapObject* NextObject(); - - Heap* heap_; - HeapObjectsFiltering filtering_; - HeapObjectsFilter* filter_; - // Space iterator for iterating all the spaces. - SpaceIterator* space_iterator_; - // Object iterator for the space currently being iterated. - ObjectIterator* object_iterator_; -}; - - -// Cache for mapping (map, property name) into field offset. -// Cleared at startup and prior to mark sweep collection. -class KeyedLookupCache { - public: - // Lookup field offset for (map, name). If absent, -1 is returned. - int Lookup(Map* map, Name* name); - - // Update an element in the cache. - void Update(Map* map, Name* name, int field_offset); - - // Clear the cache. - void Clear(); - - static const int kLength = 256; - static const int kCapacityMask = kLength - 1; - static const int kMapHashShift = 5; - static const int kHashMask = -4; // Zero the last two bits. - static const int kEntriesPerBucket = 4; - static const int kNotFound = -1; - - // kEntriesPerBucket should be a power of 2. - STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0); - STATIC_ASSERT(kEntriesPerBucket == -kHashMask); - - private: - KeyedLookupCache() { - for (int i = 0; i < kLength; ++i) { - keys_[i].map = NULL; - keys_[i].name = NULL; - field_offsets_[i] = kNotFound; - } - } - - static inline int Hash(Map* map, Name* name); - - // Get the address of the keys and field_offsets arrays. Used in - // generated code to perform cache lookups. - Address keys_address() { - return reinterpret_cast<Address>(&keys_); - } - - Address field_offsets_address() { - return reinterpret_cast<Address>(&field_offsets_); - } - - struct Key { - Map* map; - Name* name; - }; - - Key keys_[kLength]; - int field_offsets_[kLength]; - - friend class ExternalReference; - friend class Isolate; - DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache); -}; - - -// Cache for mapping (map, property name) into descriptor index. -// The cache contains both positive and negative results. -// Descriptor index equals kNotFound means the property is absent. -// Cleared at startup and prior to any gc. -class DescriptorLookupCache { - public: - // Lookup descriptor index for (map, name). - // If absent, kAbsent is returned. - int Lookup(Map* source, Name* name) { - if (!name->IsUniqueName()) return kAbsent; - int index = Hash(source, name); - Key& key = keys_[index]; - if ((key.source == source) && (key.name == name)) return results_[index]; - return kAbsent; - } - - // Update an element in the cache. - void Update(Map* source, Name* name, int result) { - ASSERT(result != kAbsent); - if (name->IsUniqueName()) { - int index = Hash(source, name); - Key& key = keys_[index]; - key.source = source; - key.name = name; - results_[index] = result; - } - } - - // Clear the cache. - void Clear(); - - static const int kAbsent = -2; - - private: - DescriptorLookupCache() { - for (int i = 0; i < kLength; ++i) { - keys_[i].source = NULL; - keys_[i].name = NULL; - results_[i] = kAbsent; - } - } - - static int Hash(Object* source, Name* name) { - // Uses only lower 32 bits if pointers are larger. - uint32_t source_hash = - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) - >> kPointerSizeLog2; - uint32_t name_hash = - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) - >> kPointerSizeLog2; - return (source_hash ^ name_hash) % kLength; - } - - static const int kLength = 64; - struct Key { - Map* source; - Name* name; - }; - - Key keys_[kLength]; - int results_[kLength]; - - friend class Isolate; - DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache); -}; - - -// GCTracer collects and prints ONE line after each garbage collector -// invocation IFF --trace_gc is used. - -class GCTracer BASE_EMBEDDED { - public: - class Scope BASE_EMBEDDED { - public: - enum ScopeId { - EXTERNAL, - MC_MARK, - MC_SWEEP, - MC_SWEEP_NEWSPACE, - MC_SWEEP_OLDSPACE, - MC_EVACUATE_PAGES, - MC_UPDATE_NEW_TO_NEW_POINTERS, - MC_UPDATE_ROOT_TO_NEW_POINTERS, - MC_UPDATE_OLD_TO_NEW_POINTERS, - MC_UPDATE_POINTERS_TO_EVACUATED, - MC_UPDATE_POINTERS_BETWEEN_EVACUATED, - MC_UPDATE_MISC_POINTERS, - MC_WEAKCOLLECTION_PROCESS, - MC_WEAKCOLLECTION_CLEAR, - MC_FLUSH_CODE, - kNumberOfScopes - }; - - Scope(GCTracer* tracer, ScopeId scope) - : tracer_(tracer), - scope_(scope) { - start_time_ = OS::TimeCurrentMillis(); - } - - ~Scope() { - ASSERT(scope_ < kNumberOfScopes); // scope_ is unsigned. - tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_; - } - - private: - GCTracer* tracer_; - ScopeId scope_; - double start_time_; - }; - - explicit GCTracer(Heap* heap, - const char* gc_reason, - const char* collector_reason); - ~GCTracer(); - - // Sets the collector. - void set_collector(GarbageCollector collector) { collector_ = collector; } - - // Sets the GC count. - void set_gc_count(unsigned int count) { gc_count_ = count; } - - // Sets the full GC count. - void set_full_gc_count(int count) { full_gc_count_ = count; } - - void increment_promoted_objects_size(int object_size) { - promoted_objects_size_ += object_size; - } - - void increment_nodes_died_in_new_space() { - nodes_died_in_new_space_++; - } - - void increment_nodes_copied_in_new_space() { - nodes_copied_in_new_space_++; - } - - void increment_nodes_promoted() { - nodes_promoted_++; - } - - private: - // Returns a string matching the collector. - const char* CollectorString(); - - // Returns size of object in heap (in MB). - inline double SizeOfHeapObjects(); - - // Timestamp set in the constructor. - double start_time_; - - // Size of objects in heap set in constructor. - intptr_t start_object_size_; - - // Size of memory allocated from OS set in constructor. - intptr_t start_memory_size_; - - // Type of collector. - GarbageCollector collector_; - - // A count (including this one, e.g. the first collection is 1) of the - // number of garbage collections. - unsigned int gc_count_; - - // A count (including this one) of the number of full garbage collections. - int full_gc_count_; - - // Amounts of time spent in different scopes during GC. - double scopes_[Scope::kNumberOfScopes]; - - // Total amount of space either wasted or contained in one of free lists - // before the current GC. - intptr_t in_free_list_or_wasted_before_gc_; - - // Difference between space used in the heap at the beginning of the current - // collection and the end of the previous collection. - intptr_t allocated_since_last_gc_; - - // Amount of time spent in mutator that is time elapsed between end of the - // previous collection and the beginning of the current one. - double spent_in_mutator_; - - // Size of objects promoted during the current collection. - intptr_t promoted_objects_size_; - - // Number of died nodes in the new space. - int nodes_died_in_new_space_; - - // Number of copied nodes to the new space. - int nodes_copied_in_new_space_; - - // Number of promoted nodes to the old space. - int nodes_promoted_; - - // Incremental marking steps counters. - int steps_count_; - double steps_took_; - double longest_step_; - int steps_count_since_last_gc_; - double steps_took_since_last_gc_; - - Heap* heap_; - - const char* gc_reason_; - const char* collector_reason_; -}; - - -class RegExpResultsCache { - public: - enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS }; - - // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi. - // On success, the returned result is guaranteed to be a COW-array. - static Object* Lookup(Heap* heap, - String* key_string, - Object* key_pattern, - ResultsCacheType type); - // Attempt to add value_array to the cache specified by type. On success, - // value_array is turned into a COW-array. - static void Enter(Heap* heap, - String* key_string, - Object* key_pattern, - FixedArray* value_array, - ResultsCacheType type); - static void Clear(FixedArray* cache); - static const int kRegExpResultsCacheSize = 0x100; - - private: - static const int kArrayEntriesPerCacheEntry = 4; - static const int kStringOffset = 0; - static const int kPatternOffset = 1; - static const int kArrayOffset = 2; -}; - - -// Abstract base class for checking whether a weak object should be retained. -class WeakObjectRetainer { - public: - virtual ~WeakObjectRetainer() {} - - // Return whether this object should be retained. If NULL is returned the - // object has no references. Otherwise the address of the retained object - // should be returned as in some GC situations the object has been moved. - virtual Object* RetainAs(Object* object) = 0; -}; - - -// Intrusive object marking uses least significant bit of -// heap object's map word to mark objects. -// Normally all map words have least significant bit set -// because they contain tagged map pointer. -// If the bit is not set object is marked. -// All objects should be unmarked before resuming -// JavaScript execution. -class IntrusiveMarking { - public: - static bool IsMarked(HeapObject* object) { - return (object->map_word().ToRawValue() & kNotMarkedBit) == 0; - } - - static void ClearMark(HeapObject* object) { - uintptr_t map_word = object->map_word().ToRawValue(); - object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit)); - ASSERT(!IsMarked(object)); - } - - static void SetMark(HeapObject* object) { - uintptr_t map_word = object->map_word().ToRawValue(); - object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit)); - ASSERT(IsMarked(object)); - } - - static Map* MapOfMarkedObject(HeapObject* object) { - uintptr_t map_word = object->map_word().ToRawValue(); - return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap(); - } - - static int SizeOfMarkedObject(HeapObject* object) { - return object->SizeFromMap(MapOfMarkedObject(object)); - } - - private: - static const uintptr_t kNotMarkedBit = 0x1; - STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); -}; - - -#ifdef DEBUG -// Helper class for tracing paths to a search target Object from all roots. -// The TracePathFrom() method can be used to trace paths from a specific -// object to the search target object. -class PathTracer : public ObjectVisitor { - public: - enum WhatToFind { - FIND_ALL, // Will find all matches. - FIND_FIRST // Will stop the search after first match. - }; - - // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop - // after the first match. If FIND_ALL is specified, then tracing will be - // done for all matches. - PathTracer(Object* search_target, - WhatToFind what_to_find, - VisitMode visit_mode) - : search_target_(search_target), - found_target_(false), - found_target_in_trace_(false), - what_to_find_(what_to_find), - visit_mode_(visit_mode), - object_stack_(20), - no_allocation() {} - - virtual void VisitPointers(Object** start, Object** end); - - void Reset(); - void TracePathFrom(Object** root); - - bool found() const { return found_target_; } - - static Object* const kAnyGlobalObject; - - protected: - class MarkVisitor; - class UnmarkVisitor; - - void MarkRecursively(Object** p, MarkVisitor* mark_visitor); - void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor); - virtual void ProcessResults(); - - // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. - static const int kMarkTag = 2; - - Object* search_target_; - bool found_target_; - bool found_target_in_trace_; - WhatToFind what_to_find_; - VisitMode visit_mode_; - List<Object*> object_stack_; - - DisallowHeapAllocation no_allocation; // i.e. no gc allowed. - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); -}; -#endif // DEBUG - -} } // namespace v8::internal - -#endif // V8_HEAP_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap-inl.h nodejs-0.11.15/deps/v8/src/heap-inl.h --- nodejs-0.11.13/deps/v8/src/heap-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap-inl.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,841 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_HEAP_INL_H_ -#define V8_HEAP_INL_H_ - -#include <cmath> - -#include "heap.h" -#include "heap-profiler.h" -#include "isolate.h" -#include "list-inl.h" -#include "objects.h" -#include "platform.h" -#include "v8-counters.h" -#include "store-buffer.h" -#include "store-buffer-inl.h" - -namespace v8 { -namespace internal { - -void PromotionQueue::insert(HeapObject* target, int size) { - if (emergency_stack_ != NULL) { - emergency_stack_->Add(Entry(target, size)); - return; - } - - if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) { - NewSpacePage* rear_page = - NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_)); - ASSERT(!rear_page->prev_page()->is_anchor()); - rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end()); - ActivateGuardIfOnTheSamePage(); - } - - if (guard_) { - ASSERT(GetHeadPage() == - Page::FromAllocationTop(reinterpret_cast<Address>(limit_))); - - if ((rear_ - 2) < limit_) { - RelocateQueueHead(); - emergency_stack_->Add(Entry(target, size)); - return; - } - } - - *(--rear_) = reinterpret_cast<intptr_t>(target); - *(--rear_) = size; - // Assert no overflow into live objects. -#ifdef DEBUG - SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(), - reinterpret_cast<Address>(rear_)); -#endif -} - - -void PromotionQueue::ActivateGuardIfOnTheSamePage() { - guard_ = guard_ || - heap_->new_space()->active_space()->current_page()->address() == - GetHeadPage()->address(); -} - - -MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str, - PretenureFlag pretenure) { - // Check for ASCII first since this is the common case. - const char* start = str.start(); - int length = str.length(); - int non_ascii_start = String::NonAsciiStart(start, length); - if (non_ascii_start >= length) { - // If the string is ASCII, we do not need to convert the characters - // since UTF8 is backwards compatible with ASCII. - return AllocateStringFromOneByte(str, pretenure); - } - // Non-ASCII and we need to decode. - return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure); -} - - -template<> -bool inline Heap::IsOneByte(Vector<const char> str, int chars) { - // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported? - // ASCII only check. - return chars == str.length(); -} - - -template<> -bool inline Heap::IsOneByte(String* str, int chars) { - return str->IsOneByteRepresentation(); -} - - -MaybeObject* Heap::AllocateInternalizedStringFromUtf8( - Vector<const char> str, int chars, uint32_t hash_field) { - if (IsOneByte(str, chars)) { - return AllocateOneByteInternalizedString( - Vector<const uint8_t>::cast(str), hash_field); - } - return AllocateInternalizedStringImpl<false>(str, chars, hash_field); -} - - -template<typename T> -MaybeObject* Heap::AllocateInternalizedStringImpl( - T t, int chars, uint32_t hash_field) { - if (IsOneByte(t, chars)) { - return AllocateInternalizedStringImpl<true>(t, chars, hash_field); - } - return AllocateInternalizedStringImpl<false>(t, chars, hash_field); -} - - -MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str, - uint32_t hash_field) { - if (str.length() > String::kMaxLength) { - return isolate()->ThrowInvalidStringLength(); - } - // Compute map and object size. - Map* map = ascii_internalized_string_map(); - int size = SeqOneByteString::SizeFor(str.length()); - AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); - - // Allocate string. - Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - // String maps are all immortal immovable objects. - reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map); - // Set length and hash fields of the allocated string. - String* answer = String::cast(result); - answer->set_length(str.length()); - answer->set_hash_field(hash_field); - - ASSERT_EQ(size, answer->Size()); - - // Fill in the characters. - OS::MemCopy(answer->address() + SeqOneByteString::kHeaderSize, - str.start(), str.length()); - - return answer; -} - - -MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str, - uint32_t hash_field) { - if (str.length() > String::kMaxLength) { - return isolate()->ThrowInvalidStringLength(); - } - // Compute map and object size. - Map* map = internalized_string_map(); - int size = SeqTwoByteString::SizeFor(str.length()); - AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); - - // Allocate string. - Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - reinterpret_cast<HeapObject*>(result)->set_map(map); - // Set length and hash fields of the allocated string. - String* answer = String::cast(result); - answer->set_length(str.length()); - answer->set_hash_field(hash_field); - - ASSERT_EQ(size, answer->Size()); - - // Fill in the characters. - OS::MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, - str.start(), str.length() * kUC16Size); - - return answer; -} - -MaybeObject* Heap::CopyFixedArray(FixedArray* src) { - return CopyFixedArrayWithMap(src, src->map()); -} - - -MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) { - return CopyFixedDoubleArrayWithMap(src, src->map()); -} - - -MaybeObject* Heap::CopyConstantPoolArray(ConstantPoolArray* src) { - return CopyConstantPoolArrayWithMap(src, src->map()); -} - - -MaybeObject* Heap::AllocateRaw(int size_in_bytes, - AllocationSpace space, - AllocationSpace retry_space) { - ASSERT(AllowHandleAllocation::IsAllowed()); - ASSERT(AllowHeapAllocation::IsAllowed()); - ASSERT(gc_state_ == NOT_IN_GC); - HeapProfiler* profiler = isolate_->heap_profiler(); -#ifdef DEBUG - if (FLAG_gc_interval >= 0 && - AllowAllocationFailure::IsAllowed(isolate_) && - Heap::allocation_timeout_-- <= 0) { - return Failure::RetryAfterGC(space); - } - isolate_->counters()->objs_since_last_full()->Increment(); - isolate_->counters()->objs_since_last_young()->Increment(); -#endif - - HeapObject* object; - MaybeObject* result; - if (NEW_SPACE == space) { - result = new_space_.AllocateRaw(size_in_bytes); - if (always_allocate() && result->IsFailure() && retry_space != NEW_SPACE) { - space = retry_space; - } else { - if (profiler->is_tracking_allocations() && result->To(&object)) { - profiler->AllocationEvent(object->address(), size_in_bytes); - } - return result; - } - } - - if (OLD_POINTER_SPACE == space) { - result = old_pointer_space_->AllocateRaw(size_in_bytes); - } else if (OLD_DATA_SPACE == space) { - result = old_data_space_->AllocateRaw(size_in_bytes); - } else if (CODE_SPACE == space) { - result = code_space_->AllocateRaw(size_in_bytes); - } else if (LO_SPACE == space) { - result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE); - } else if (CELL_SPACE == space) { - result = cell_space_->AllocateRaw(size_in_bytes); - } else if (PROPERTY_CELL_SPACE == space) { - result = property_cell_space_->AllocateRaw(size_in_bytes); - } else { - ASSERT(MAP_SPACE == space); - result = map_space_->AllocateRaw(size_in_bytes); - } - if (result->IsFailure()) old_gen_exhausted_ = true; - if (profiler->is_tracking_allocations() && result->To(&object)) { - profiler->AllocationEvent(object->address(), size_in_bytes); - } - return result; -} - - -MaybeObject* Heap::NumberFromInt32( - int32_t value, PretenureFlag pretenure) { - if (Smi::IsValid(value)) return Smi::FromInt(value); - // Bypass NumberFromDouble to avoid various redundant checks. - return AllocateHeapNumber(FastI2D(value), pretenure); -} - - -MaybeObject* Heap::NumberFromUint32( - uint32_t value, PretenureFlag pretenure) { - if (static_cast<int32_t>(value) >= 0 && - Smi::IsValid(static_cast<int32_t>(value))) { - return Smi::FromInt(static_cast<int32_t>(value)); - } - // Bypass NumberFromDouble to avoid various redundant checks. - return AllocateHeapNumber(FastUI2D(value), pretenure); -} - - -void Heap::FinalizeExternalString(String* string) { - ASSERT(string->IsExternalString()); - v8::String::ExternalStringResourceBase** resource_addr = - reinterpret_cast<v8::String::ExternalStringResourceBase**>( - reinterpret_cast<byte*>(string) + - ExternalString::kResourceOffset - - kHeapObjectTag); - - // Dispose of the C++ object if it has not already been disposed. - if (*resource_addr != NULL) { - (*resource_addr)->Dispose(); - *resource_addr = NULL; - } -} - - -bool Heap::InNewSpace(Object* object) { - bool result = new_space_.Contains(object); - ASSERT(!result || // Either not in new space - gc_state_ != NOT_IN_GC || // ... or in the middle of GC - InToSpace(object)); // ... or in to-space (where we allocate). - return result; -} - - -bool Heap::InNewSpace(Address address) { - return new_space_.Contains(address); -} - - -bool Heap::InFromSpace(Object* object) { - return new_space_.FromSpaceContains(object); -} - - -bool Heap::InToSpace(Object* object) { - return new_space_.ToSpaceContains(object); -} - - -bool Heap::InOldPointerSpace(Address address) { - return old_pointer_space_->Contains(address); -} - - -bool Heap::InOldPointerSpace(Object* object) { - return InOldPointerSpace(reinterpret_cast<Address>(object)); -} - - -bool Heap::InOldDataSpace(Address address) { - return old_data_space_->Contains(address); -} - - -bool Heap::InOldDataSpace(Object* object) { - return InOldDataSpace(reinterpret_cast<Address>(object)); -} - - -bool Heap::OldGenerationAllocationLimitReached() { - if (!incremental_marking()->IsStopped()) return false; - return OldGenerationSpaceAvailable() < 0; -} - - -bool Heap::ShouldBePromoted(Address old_address, int object_size) { - // An object should be promoted if: - // - the object has survived a scavenge operation or - // - to space is already 25% full. - NewSpacePage* page = NewSpacePage::FromAddress(old_address); - Address age_mark = new_space_.age_mark(); - bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && - (!page->ContainsLimit(age_mark) || old_address < age_mark); - return below_mark || (new_space_.Size() + object_size) >= - (new_space_.EffectiveCapacity() >> 2); -} - - -void Heap::RecordWrite(Address address, int offset) { - if (!InNewSpace(address)) store_buffer_.Mark(address + offset); -} - - -void Heap::RecordWrites(Address address, int start, int len) { - if (!InNewSpace(address)) { - for (int i = 0; i < len; i++) { - store_buffer_.Mark(address + start + i * kPointerSize); - } - } -} - - -OldSpace* Heap::TargetSpace(HeapObject* object) { - InstanceType type = object->map()->instance_type(); - AllocationSpace space = TargetSpaceId(type); - return (space == OLD_POINTER_SPACE) - ? old_pointer_space_ - : old_data_space_; -} - - -AllocationSpace Heap::TargetSpaceId(InstanceType type) { - // Heap numbers and sequential strings are promoted to old data space, all - // other object types are promoted to old pointer space. We do not use - // object->IsHeapNumber() and object->IsSeqString() because we already - // know that object has the heap object tag. - - // These objects are never allocated in new space. - ASSERT(type != MAP_TYPE); - ASSERT(type != CODE_TYPE); - ASSERT(type != ODDBALL_TYPE); - ASSERT(type != CELL_TYPE); - ASSERT(type != PROPERTY_CELL_TYPE); - - if (type <= LAST_NAME_TYPE) { - if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE; - ASSERT(type < FIRST_NONSTRING_TYPE); - // There are four string representations: sequential strings, external - // strings, cons strings, and sliced strings. - // Only the latter two contain non-map-word pointers to heap objects. - return ((type & kIsIndirectStringMask) == kIsIndirectStringTag) - ? OLD_POINTER_SPACE - : OLD_DATA_SPACE; - } else { - return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE; - } -} - - -bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) { - // Object migration is governed by the following rules: - // - // 1) Objects in new-space can be migrated to one of the old spaces - // that matches their target space or they stay in new-space. - // 2) Objects in old-space stay in the same space when migrating. - // 3) Fillers (two or more words) can migrate due to left-trimming of - // fixed arrays in new-space, old-data-space and old-pointer-space. - // 4) Fillers (one word) can never migrate, they are skipped by - // incremental marking explicitly to prevent invalid pattern. - // 5) Short external strings can end up in old pointer space when a cons - // string in old pointer space is made external (String::MakeExternal). - // - // Since this function is used for debugging only, we do not place - // asserts here, but check everything explicitly. - if (obj->map() == one_pointer_filler_map()) return false; - InstanceType type = obj->map()->instance_type(); - MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); - AllocationSpace src = chunk->owner()->identity(); - switch (src) { - case NEW_SPACE: - return dst == src || dst == TargetSpaceId(type); - case OLD_POINTER_SPACE: - return dst == src && - (dst == TargetSpaceId(type) || obj->IsFiller() || - (obj->IsExternalString() && ExternalString::cast(obj)->is_short())); - case OLD_DATA_SPACE: - return dst == src && dst == TargetSpaceId(type); - case CODE_SPACE: - return dst == src && type == CODE_TYPE; - case MAP_SPACE: - case CELL_SPACE: - case PROPERTY_CELL_SPACE: - case LO_SPACE: - return false; - } - UNREACHABLE(); - return false; -} - - -void Heap::CopyBlock(Address dst, Address src, int byte_size) { - CopyWords(reinterpret_cast<Object**>(dst), - reinterpret_cast<Object**>(src), - static_cast<size_t>(byte_size / kPointerSize)); -} - - -void Heap::MoveBlock(Address dst, Address src, int byte_size) { - ASSERT(IsAligned(byte_size, kPointerSize)); - - int size_in_words = byte_size / kPointerSize; - - if ((dst < src) || (dst >= (src + byte_size))) { - Object** src_slot = reinterpret_cast<Object**>(src); - Object** dst_slot = reinterpret_cast<Object**>(dst); - Object** end_slot = src_slot + size_in_words; - - while (src_slot != end_slot) { - *dst_slot++ = *src_slot++; - } - } else { - OS::MemMove(dst, src, static_cast<size_t>(byte_size)); - } -} - - -void Heap::ScavengePointer(HeapObject** p) { - ScavengeObject(p, *p); -} - - -void Heap::UpdateAllocationSiteFeedback(HeapObject* object, - ScratchpadSlotMode mode) { - Heap* heap = object->GetHeap(); - ASSERT(heap->InFromSpace(object)); - - if (!FLAG_allocation_site_pretenuring || - !AllocationSite::CanTrack(object->map()->instance_type())) return; - - // Check if there is potentially a memento behind the object. If - // the last word of the momento is on another page we return - // immediatelly. Note that we do not have to compare with the current - // top pointer of the from space page, since we always install filler - // objects above the top pointer of a from space page when performing - // a garbage collection. - Address object_address = object->address(); - Address memento_address = object_address + object->Size(); - Address last_memento_word_address = memento_address + kPointerSize; - if (!NewSpacePage::OnSamePage(object_address, - last_memento_word_address)) { - return; - } - - HeapObject* candidate = HeapObject::FromAddress(memento_address); - if (candidate->map() != heap->allocation_memento_map()) return; - - AllocationMemento* memento = AllocationMemento::cast(candidate); - if (!memento->IsValid()) return; - - if (memento->GetAllocationSite()->IncrementMementoFoundCount()) { - heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode); - } -} - - -void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { - ASSERT(object->GetIsolate()->heap()->InFromSpace(object)); - - // We use the first word (where the map pointer usually is) of a heap - // object to record the forwarding pointer. A forwarding pointer can - // point to an old space, the code space, or the to space of the new - // generation. - MapWord first_word = object->map_word(); - - // If the first word is a forwarding address, the object has already been - // copied. - if (first_word.IsForwardingAddress()) { - HeapObject* dest = first_word.ToForwardingAddress(); - ASSERT(object->GetIsolate()->heap()->InFromSpace(*p)); - *p = dest; - return; - } - - UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT); - - // AllocationMementos are unrooted and shouldn't survive a scavenge - ASSERT(object->map() != object->GetHeap()->allocation_memento_map()); - // Call the slow part of scavenge object. - return ScavengeObjectSlow(p, object); -} - - -bool Heap::CollectGarbage(AllocationSpace space, - const char* gc_reason, - const v8::GCCallbackFlags callbackFlags) { - const char* collector_reason = NULL; - GarbageCollector collector = SelectGarbageCollector(space, &collector_reason); - return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags); -} - - -MaybeObject* Heap::PrepareForCompare(String* str) { - // Always flatten small strings and force flattening of long strings - // after we have accumulated a certain amount we failed to flatten. - static const int kMaxAlwaysFlattenLength = 32; - static const int kFlattenLongThreshold = 16*KB; - - const int length = str->length(); - MaybeObject* obj = str->TryFlatten(); - if (length <= kMaxAlwaysFlattenLength || - unflattened_strings_length_ >= kFlattenLongThreshold) { - return obj; - } - if (obj->IsFailure()) { - unflattened_strings_length_ += length; - } - return str; -} - - -int64_t Heap::AdjustAmountOfExternalAllocatedMemory( - int64_t change_in_bytes) { - ASSERT(HasBeenSetUp()); - int64_t amount = amount_of_external_allocated_memory_ + change_in_bytes; - if (change_in_bytes > 0) { - // Avoid overflow. - if (amount > amount_of_external_allocated_memory_) { - amount_of_external_allocated_memory_ = amount; - } else { - // Give up and reset the counters in case of an overflow. - amount_of_external_allocated_memory_ = 0; - amount_of_external_allocated_memory_at_last_global_gc_ = 0; - } - int64_t amount_since_last_global_gc = PromotedExternalMemorySize(); - if (amount_since_last_global_gc > external_allocation_limit_) { - CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached"); - } - } else { - // Avoid underflow. - if (amount >= 0) { - amount_of_external_allocated_memory_ = amount; - } else { - // Give up and reset the counters in case of an underflow. - amount_of_external_allocated_memory_ = 0; - amount_of_external_allocated_memory_at_last_global_gc_ = 0; - } - } - if (FLAG_trace_external_memory) { - PrintPID("%8.0f ms: ", isolate()->time_millis_since_init()); - PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, " - "amount=%6" V8_PTR_PREFIX "d KB, since_gc=%6" V8_PTR_PREFIX "d KB, " - "isolate=0x%08" V8PRIxPTR ".\n", - static_cast<intptr_t>(change_in_bytes / KB), - static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB), - static_cast<intptr_t>(PromotedExternalMemorySize() / KB), - reinterpret_cast<intptr_t>(isolate())); - } - ASSERT(amount_of_external_allocated_memory_ >= 0); - return amount_of_external_allocated_memory_; -} - - -Isolate* Heap::isolate() { - return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) - - reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4); -} - - -#ifdef DEBUG -#define GC_GREEDY_CHECK(ISOLATE) \ - if (FLAG_gc_greedy) (ISOLATE)->heap()->GarbageCollectionGreedyCheck() -#else -#define GC_GREEDY_CHECK(ISOLATE) { } -#endif - -// Calls the FUNCTION_CALL function and retries it up to three times -// to guarantee that any allocations performed during the call will -// succeed if there's enough memory. - -// Warning: Do not use the identifiers __object__, __maybe_object__ or -// __scope__ in a call to this macro. - -#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \ - do { \ - GC_GREEDY_CHECK(ISOLATE); \ - MaybeObject* __maybe_object__ = FUNCTION_CALL; \ - Object* __object__ = NULL; \ - if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ - if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ - (ISOLATE)->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \ - allocation_space(), \ - "allocation failure"); \ - __maybe_object__ = FUNCTION_CALL; \ - if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ - if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ - (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \ - (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \ - { \ - AlwaysAllocateScope __scope__(ISOLATE); \ - __maybe_object__ = FUNCTION_CALL; \ - } \ - if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ - if (__maybe_object__->IsRetryAfterGC()) { \ - /* TODO(1181417): Fix this. */ \ - v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true);\ - } \ - RETURN_EMPTY; \ - } while (false) - -#define CALL_AND_RETRY_OR_DIE( \ - ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \ - CALL_AND_RETRY( \ - ISOLATE, \ - FUNCTION_CALL, \ - RETURN_VALUE, \ - RETURN_EMPTY) - -#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \ - CALL_AND_RETRY_OR_DIE(ISOLATE, \ - FUNCTION_CALL, \ - return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \ - return Handle<TYPE>()) \ - - -#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \ - CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return) - - -#define CALL_HEAP_FUNCTION_PASS_EXCEPTION(ISOLATE, FUNCTION_CALL) \ - CALL_AND_RETRY(ISOLATE, \ - FUNCTION_CALL, \ - return __object__, \ - return __maybe_object__) - - -void ExternalStringTable::AddString(String* string) { - ASSERT(string->IsExternalString()); - if (heap_->InNewSpace(string)) { - new_space_strings_.Add(string); - } else { - old_space_strings_.Add(string); - } -} - - -void ExternalStringTable::Iterate(ObjectVisitor* v) { - if (!new_space_strings_.is_empty()) { - Object** start = &new_space_strings_[0]; - v->VisitPointers(start, start + new_space_strings_.length()); - } - if (!old_space_strings_.is_empty()) { - Object** start = &old_space_strings_[0]; - v->VisitPointers(start, start + old_space_strings_.length()); - } -} - - -// Verify() is inline to avoid ifdef-s around its calls in release -// mode. -void ExternalStringTable::Verify() { -#ifdef DEBUG - for (int i = 0; i < new_space_strings_.length(); ++i) { - Object* obj = Object::cast(new_space_strings_[i]); - ASSERT(heap_->InNewSpace(obj)); - ASSERT(obj != heap_->the_hole_value()); - } - for (int i = 0; i < old_space_strings_.length(); ++i) { - Object* obj = Object::cast(old_space_strings_[i]); - ASSERT(!heap_->InNewSpace(obj)); - ASSERT(obj != heap_->the_hole_value()); - } -#endif -} - - -void ExternalStringTable::AddOldString(String* string) { - ASSERT(string->IsExternalString()); - ASSERT(!heap_->InNewSpace(string)); - old_space_strings_.Add(string); -} - - -void ExternalStringTable::ShrinkNewStrings(int position) { - new_space_strings_.Rewind(position); -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - Verify(); - } -#endif -} - - -void Heap::ClearInstanceofCache() { - set_instanceof_cache_function(the_hole_value()); -} - - -Object* Heap::ToBoolean(bool condition) { - return condition ? true_value() : false_value(); -} - - -void Heap::CompletelyClearInstanceofCache() { - set_instanceof_cache_map(the_hole_value()); - set_instanceof_cache_function(the_hole_value()); -} - - -AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate) - : heap_(isolate->heap()), daf_(isolate) { - // We shouldn't hit any nested scopes, because that requires - // non-handle code to call handle code. The code still works but - // performance will degrade, so we want to catch this situation - // in debug mode. - ASSERT(heap_->always_allocate_scope_depth_ == 0); - heap_->always_allocate_scope_depth_++; -} - - -AlwaysAllocateScope::~AlwaysAllocateScope() { - heap_->always_allocate_scope_depth_--; - ASSERT(heap_->always_allocate_scope_depth_ == 0); -} - - -#ifdef VERIFY_HEAP -NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() { - Isolate* isolate = Isolate::Current(); - isolate->heap()->no_weak_object_verification_scope_depth_++; -} - - -NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() { - Isolate* isolate = Isolate::Current(); - isolate->heap()->no_weak_object_verification_scope_depth_--; -} -#endif - - -GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) { - heap_->gc_callbacks_depth_++; -} - - -GCCallbacksScope::~GCCallbacksScope() { - heap_->gc_callbacks_depth_--; -} - - -bool GCCallbacksScope::CheckReenter() { - return heap_->gc_callbacks_depth_ == 1; -} - - -void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) { - for (Object** current = start; current < end; current++) { - if ((*current)->IsHeapObject()) { - HeapObject* object = HeapObject::cast(*current); - CHECK(object->GetIsolate()->heap()->Contains(object)); - CHECK(object->map()->IsMap()); - } - } -} - - -void VerifySmisVisitor::VisitPointers(Object** start, Object** end) { - for (Object** current = start; current < end; current++) { - CHECK((*current)->IsSmi()); - } -} - - -double GCTracer::SizeOfHeapObjects() { - return (static_cast<double>(heap_->SizeOfObjects())) / MB; -} - - -} } // namespace v8::internal - -#endif // V8_HEAP_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/heap-profiler.cc nodejs-0.11.15/deps/v8/src/heap-profiler.cc --- nodejs-0.11.13/deps/v8/src/heap-profiler.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap-profiler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2009-2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "heap-profiler.h" +#include "src/heap-profiler.h" -#include "allocation-tracker.h" -#include "heap-snapshot-generator-inl.h" +#include "src/allocation-tracker.h" +#include "src/heap-snapshot-generator-inl.h" namespace v8 { namespace internal { @@ -68,7 +45,7 @@ void HeapProfiler::DefineWrapperClass( uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) { - ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId); + DCHECK(class_id != v8::HeapProfiler::kPersistentHandleNoClassId); if (wrapper_callbacks_.length() <= class_id) { wrapper_callbacks_.AddBlock( NULL, class_id - wrapper_callbacks_.length() + 1); @@ -116,7 +93,7 @@ void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) { ids_->UpdateHeapObjectsMap(); is_tracking_object_moves_ = true; - ASSERT(!is_tracking_allocations()); + DCHECK(!is_tracking_allocations()); if (track_allocations) { allocation_tracker_.Reset(new AllocationTracker(ids_.get(), names_.get())); heap()->DisableInlineAllocation(); @@ -196,9 +173,6 @@ Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) { - heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask, - "HeapProfiler::FindHeapObjectById"); - DisallowHeapAllocation no_allocation; HeapObject* object = NULL; HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable); // Make sure that object with the given id is still reachable. @@ -206,7 +180,7 @@ obj != NULL; obj = iterator.next()) { if (ids_->FindEntry(obj->address()) == id) { - ASSERT(object == NULL); + DCHECK(object == NULL); object = obj; // Can't break -- kFilterUnreachable requires full heap traversal. } diff -Nru nodejs-0.11.13/deps/v8/src/heap-profiler.h nodejs-0.11.15/deps/v8/src/heap-profiler.h --- nodejs-0.11.13/deps/v8/src/heap-profiler.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap-profiler.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2009-2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HEAP_PROFILER_H_ #define V8_HEAP_PROFILER_H_ -#include "heap-snapshot-generator-inl.h" -#include "isolate.h" -#include "smart-pointers.h" +#include "src/heap-snapshot-generator-inl.h" +#include "src/isolate.h" +#include "src/smart-pointers.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/heap-snapshot-generator.cc nodejs-0.11.15/deps/v8/src/heap-snapshot-generator.cc --- nodejs-0.11.13/deps/v8/src/heap-snapshot-generator.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap-snapshot-generator.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "heap-snapshot-generator-inl.h" - -#include "allocation-tracker.h" -#include "code-stubs.h" -#include "heap-profiler.h" -#include "debug.h" -#include "types.h" -#include "v8conversions.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/heap-snapshot-generator-inl.h" + +#include "src/allocation-tracker.h" +#include "src/code-stubs.h" +#include "src/conversions.h" +#include "src/debug.h" +#include "src/heap-profiler.h" +#include "src/types.h" namespace v8 { namespace internal { @@ -45,7 +22,7 @@ from_index_(from), to_index_(to), name_(name) { - ASSERT(type == kContextVariable + DCHECK(type == kContextVariable || type == kProperty || type == kInternal || type == kShortcut @@ -58,7 +35,7 @@ from_index_(from), to_index_(to), index_(index) { - ASSERT(type == kElement || type == kHidden); + DCHECK(type == kElement || type == kHidden); } @@ -105,22 +82,22 @@ void HeapEntry::Print( const char* prefix, const char* edge_name, int max_depth, int indent) { - STATIC_CHECK(sizeof(unsigned) == sizeof(id())); - OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ", - self_size(), id(), indent, ' ', prefix, edge_name); + STATIC_ASSERT(sizeof(unsigned) == sizeof(id())); + base::OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ", self_size(), id(), indent, + ' ', prefix, edge_name); if (type() != kString) { - OS::Print("%s %.40s\n", TypeAsString(), name_); + base::OS::Print("%s %.40s\n", TypeAsString(), name_); } else { - OS::Print("\""); + base::OS::Print("\""); const char* c = name_; while (*c && (c - name_) <= 40) { if (*c != '\n') - OS::Print("%c", *c); + base::OS::Print("%c", *c); else - OS::Print("\\n"); + base::OS::Print("\\n"); ++c; } - OS::Print("\"\n"); + base::OS::Print("\"\n"); } if (--max_depth == 0) return; Vector<HeapGraphEdge*> ch = children(); @@ -135,7 +112,7 @@ edge_name = edge.name(); break; case HeapGraphEdge::kElement: - OS::SNPrintF(index, "%d", edge.index()); + SNPrintF(index, "%d", edge.index()); break; case HeapGraphEdge::kInternal: edge_prefix = "$"; @@ -146,7 +123,7 @@ break; case HeapGraphEdge::kHidden: edge_prefix = "$"; - OS::SNPrintF(index, "%d", edge.index()); + SNPrintF(index, "%d", edge.index()); break; case HeapGraphEdge::kShortcut: edge_prefix = "^"; @@ -157,7 +134,7 @@ edge_name = edge.name(); break; default: - OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type()); + SNPrintF(index, "!!! unknown edge type: %d ", edge.type()); } edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2); } @@ -178,6 +155,7 @@ case kSynthetic: return "/synthetic/"; case kConsString: return "/concatenated string/"; case kSlicedString: return "/sliced string/"; + case kSymbol: return "/symbol/"; default: return "???"; } } @@ -212,10 +190,10 @@ gc_roots_index_(HeapEntry::kNoEntry), natives_root_index_(HeapEntry::kNoEntry), max_snapshot_js_object_id_(0) { - STATIC_CHECK( + STATIC_ASSERT( sizeof(HeapGraphEdge) == SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize); - STATIC_CHECK( + STATIC_ASSERT( sizeof(HeapEntry) == SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize); USE(SnapshotSizeConstants<4>::kExpectedHeapGraphEdgeSize); @@ -240,21 +218,21 @@ HeapEntry* HeapSnapshot::AddRootEntry() { - ASSERT(root_index_ == HeapEntry::kNoEntry); - ASSERT(entries_.is_empty()); // Root entry must be the first one. + DCHECK(root_index_ == HeapEntry::kNoEntry); + DCHECK(entries_.is_empty()); // Root entry must be the first one. HeapEntry* entry = AddEntry(HeapEntry::kSynthetic, "", HeapObjectsMap::kInternalRootObjectId, 0, 0); root_index_ = entry->index(); - ASSERT(root_index_ == 0); + DCHECK(root_index_ == 0); return entry; } HeapEntry* HeapSnapshot::AddGcRootsEntry() { - ASSERT(gc_roots_index_ == HeapEntry::kNoEntry); + DCHECK(gc_roots_index_ == HeapEntry::kNoEntry); HeapEntry* entry = AddEntry(HeapEntry::kSynthetic, "(GC roots)", HeapObjectsMap::kGcRootsObjectId, @@ -266,8 +244,8 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) { - ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry); - ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags); + DCHECK(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry); + DCHECK(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags); HeapEntry* entry = AddEntry( HeapEntry::kSynthetic, VisitorSynchronization::kTagNames[tag], @@ -291,14 +269,14 @@ void HeapSnapshot::FillChildren() { - ASSERT(children().is_empty()); + DCHECK(children().is_empty()); children().Allocate(edges().length()); int children_index = 0; for (int i = 0; i < entries().length(); ++i) { HeapEntry* entry = &entries()[i]; children_index = entry->set_children_index(children_index); } - ASSERT(edges().length() == children_index); + DCHECK(edges().length() == children_index); for (int i = 0; i < edges().length(); ++i) { HeapGraphEdge* edge = &edges()[i]; edge->ReplaceToIndexWithEntry(this); @@ -397,8 +375,8 @@ bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) { - ASSERT(to != NULL); - ASSERT(from != NULL); + DCHECK(to != NULL); + DCHECK(from != NULL); if (from == to) return false; void* from_value = entries_map_.Remove(from, ComputePointerHash(from)); if (from_value == NULL) { @@ -455,7 +433,7 @@ if (entry == NULL) return 0; int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); EntryInfo& entry_info = entries_.at(entry_index); - ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); + DCHECK(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); return entry_info.id; } @@ -463,7 +441,7 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr, unsigned int size, bool accessed) { - ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); + DCHECK(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr), true); if (entry->value != NULL) { @@ -484,7 +462,7 @@ SnapshotObjectId id = next_id_; next_id_ += kObjectIdStep; entries_.Add(EntryInfo(id, addr, size, accessed)); - ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); + DCHECK(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); return id; } @@ -637,7 +615,7 @@ time_intervals_.Add(TimeInterval(next_id_)); int prefered_chunk_size = stream->GetChunkSize(); List<v8::HeapStatsUpdate> stats_buffer; - ASSERT(!entries_.is_empty()); + DCHECK(!entries_.is_empty()); EntryInfo* entry_info = &entries_.first(); EntryInfo* end_entry_info = &entries_.last() + 1; for (int time_interval_index = 0; @@ -667,7 +645,7 @@ } } } - ASSERT(entry_info == end_entry_info); + DCHECK(entry_info == end_entry_info); if (!stats_buffer.is_empty()) { OutputStream::WriteResult result = stream->WriteHeapStatsChunk( &stats_buffer.first(), stats_buffer.length()); @@ -679,7 +657,7 @@ void HeapObjectsMap::RemoveDeadEntries() { - ASSERT(entries_.length() > 0 && + DCHECK(entries_.length() > 0 && entries_.at(0).id == 0 && entries_.at(0).addr == NULL); int first_free_entry = 1; @@ -692,7 +670,7 @@ entries_.at(first_free_entry).accessed = false; HashMap::Entry* entry = entries_map_.Lookup( entry_info.addr, ComputePointerHash(entry_info.addr), false); - ASSERT(entry); + DCHECK(entry); entry->value = reinterpret_cast<void*>(first_free_entry); ++first_free_entry; } else { @@ -703,7 +681,7 @@ } } entries_.Rewind(first_free_entry); - ASSERT(static_cast<uint32_t>(entries_.length()) - 1 == + DCHECK(static_cast<uint32_t>(entries_.length()) - 1 == entries_map_.occupancy()); } @@ -732,7 +710,7 @@ HeapEntriesMap::HeapEntriesMap() - : entries_(HeapThingsMatch) { + : entries_(HashMap::PointersMatch) { } @@ -745,13 +723,13 @@ void HeapEntriesMap::Pair(HeapThing thing, int entry) { HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true); - ASSERT(cache_entry->value == NULL); + DCHECK(cache_entry->value == NULL); cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry)); } HeapObjectsSet::HeapObjectsSet() - : entries_(HeapEntriesMap::HeapThingsMatch) { + : entries_(HashMap::PointersMatch) { } @@ -874,6 +852,8 @@ return AddEntry(object, HeapEntry::kString, names_->GetName(String::cast(object))); + } else if (object->IsSymbol()) { + return AddEntry(object, HeapEntry::kSymbol, "symbol"); } else if (object->IsCode()) { return AddEntry(object, HeapEntry::kCode, ""); } else if (object->IsSharedFunctionInfo()) { @@ -1079,21 +1059,30 @@ static void MarkVisitedField(HeapObject* obj, int offset) { if (offset < 0) return; Address field = obj->address() + offset; - ASSERT(!Memory::Object_at(field)->IsFailure()); - ASSERT(Memory::Object_at(field)->IsHeapObject()); - *field |= kFailureTag; + DCHECK(Memory::Object_at(field)->IsHeapObject()); + intptr_t p = reinterpret_cast<intptr_t>(Memory::Object_at(field)); + DCHECK(!IsMarked(p)); + intptr_t p_tagged = p | kTag; + Memory::Object_at(field) = reinterpret_cast<Object*>(p_tagged); } private: bool CheckVisitedAndUnmark(Object** field) { - if ((*field)->IsFailure()) { - intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask; - *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag); - ASSERT((*field)->IsHeapObject()); + intptr_t p = reinterpret_cast<intptr_t>(*field); + if (IsMarked(p)) { + intptr_t p_untagged = (p & ~kTaggingMask) | kHeapObjectTag; + *field = reinterpret_cast<Object*>(p_untagged); + DCHECK((*field)->IsHeapObject()); return true; } return false; } + + static const intptr_t kTaggingMask = 3; + static const intptr_t kTag = 3; + + static bool IsMarked(intptr_t p) { return (p & kTaggingMask) == kTag; } + V8HeapExplorer* generator_; HeapObject* parent_obj_; int parent_; @@ -1101,21 +1090,28 @@ }; -void V8HeapExplorer::ExtractReferences(HeapObject* obj) { - HeapEntry* heap_entry = GetEntry(obj); - if (heap_entry == NULL) return; // No interest in this object. - int entry = heap_entry->index(); +bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) { + if (obj->IsFixedArray()) return false; // FixedArrays are processed on pass 2 if (obj->IsJSGlobalProxy()) { ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj)); } else if (obj->IsJSArrayBuffer()) { ExtractJSArrayBufferReferences(entry, JSArrayBuffer::cast(obj)); } else if (obj->IsJSObject()) { + if (obj->IsJSWeakSet()) { + ExtractJSWeakCollectionReferences(entry, JSWeakSet::cast(obj)); + } else if (obj->IsJSWeakMap()) { + ExtractJSWeakCollectionReferences(entry, JSWeakMap::cast(obj)); + } else if (obj->IsJSSet()) { + ExtractJSCollectionReferences(entry, JSSet::cast(obj)); + } else if (obj->IsJSMap()) { + ExtractJSCollectionReferences(entry, JSMap::cast(obj)); + } ExtractJSObjectReferences(entry, JSObject::cast(obj)); } else if (obj->IsString()) { ExtractStringReferences(entry, String::cast(obj)); - } else if (obj->IsContext()) { - ExtractContextReferences(entry, Context::cast(obj)); + } else if (obj->IsSymbol()) { + ExtractSymbolReferences(entry, Symbol::cast(obj)); } else if (obj->IsMap()) { ExtractMapReferences(entry, Map::cast(obj)); } else if (obj->IsSharedFunctionInfo()) { @@ -1137,12 +1133,19 @@ } else if (obj->IsAllocationSite()) { ExtractAllocationSiteReferences(entry, AllocationSite::cast(obj)); } - SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset); + return true; +} - // Extract unvisited fields as hidden references and restore tags - // of visited fields. - IndexedReferencesExtractor refs_extractor(this, obj, entry); - obj->Iterate(&refs_extractor); + +bool V8HeapExplorer::ExtractReferencesPass2(int entry, HeapObject* obj) { + if (!obj->IsFixedArray()) return false; + + if (obj->IsContext()) { + ExtractContextReferences(entry, Context::cast(obj)); + } else { + ExtractFixedArrayReferences(entry, FixedArray::cast(obj)); + } + return true; } @@ -1161,8 +1164,8 @@ ExtractPropertyReferences(js_obj, entry); ExtractElementReferences(js_obj, entry); ExtractInternalReferences(js_obj, entry); - SetPropertyReference( - obj, entry, heap_->proto_string(), js_obj->GetPrototype()); + PrototypeIterator iter(heap_->isolate(), js_obj); + SetPropertyReference(obj, entry, heap_->proto_string(), iter.GetCurrent()); if (obj->IsJSFunction()) { JSFunction* js_fun = JSFunction::cast(js_obj); Object* proto_or_map = js_fun->prototype_or_initial_map(); @@ -1202,9 +1205,9 @@ SetWeakReference(js_fun, entry, "next_function_link", js_fun->next_function_link(), JSFunction::kNextFunctionLinkOffset); - STATIC_CHECK(JSFunction::kNextFunctionLinkOffset + STATIC_ASSERT(JSFunction::kNextFunctionLinkOffset == JSFunction::kNonWeakFieldsEndOffset); - STATIC_CHECK(JSFunction::kNextFunctionLinkOffset + kPointerSize + STATIC_ASSERT(JSFunction::kNextFunctionLinkOffset + kPointerSize == JSFunction::kSize); } else if (obj->IsGlobalObject()) { GlobalObject* global_obj = GlobalObject::cast(obj); @@ -1218,9 +1221,9 @@ "global_context", global_obj->global_context(), GlobalObject::kGlobalContextOffset); SetInternalReference(global_obj, entry, - "global_receiver", global_obj->global_receiver(), - GlobalObject::kGlobalReceiverOffset); - STATIC_CHECK(GlobalObject::kHeaderSize - JSObject::kHeaderSize == + "global_proxy", global_obj->global_proxy(), + GlobalObject::kGlobalProxyOffset); + STATIC_ASSERT(GlobalObject::kHeaderSize - JSObject::kHeaderSize == 4 * kPointerSize); } else if (obj->IsJSArrayBufferView()) { JSArrayBufferView* view = JSArrayBufferView::cast(obj); @@ -1255,6 +1258,29 @@ } +void V8HeapExplorer::ExtractSymbolReferences(int entry, Symbol* symbol) { + SetInternalReference(symbol, entry, + "name", symbol->name(), + Symbol::kNameOffset); +} + + +void V8HeapExplorer::ExtractJSCollectionReferences(int entry, + JSCollection* collection) { + SetInternalReference(collection, entry, "table", collection->table(), + JSCollection::kTableOffset); +} + + +void V8HeapExplorer::ExtractJSWeakCollectionReferences( + int entry, JSWeakCollection* collection) { + MarkAsWeakContainer(collection->table()); + SetInternalReference(collection, entry, + "table", collection->table(), + JSWeakCollection::kTableOffset); +} + + void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) { if (context == context->declaration_context()) { ScopeInfo* scope_info = context->closure()->shared()->scope_info(); @@ -1303,10 +1329,12 @@ EXTRACT_CONTEXT_FIELD(DEOPTIMIZED_CODE_LIST, unused, deoptimized_code_list); EXTRACT_CONTEXT_FIELD(NEXT_CONTEXT_LINK, unused, next_context_link); #undef EXTRACT_CONTEXT_FIELD - STATIC_CHECK(Context::OPTIMIZED_FUNCTIONS_LIST == Context::FIRST_WEAK_SLOT); - STATIC_CHECK(Context::NEXT_CONTEXT_LINK + 1 - == Context::NATIVE_CONTEXT_SLOTS); - STATIC_CHECK(Context::FIRST_WEAK_SLOT + 5 == Context::NATIVE_CONTEXT_SLOTS); + STATIC_ASSERT(Context::OPTIMIZED_FUNCTIONS_LIST == + Context::FIRST_WEAK_SLOT); + STATIC_ASSERT(Context::NEXT_CONTEXT_LINK + 1 == + Context::NATIVE_CONTEXT_SLOTS); + STATIC_ASSERT(Context::FIRST_WEAK_SLOT + 5 == + Context::NATIVE_CONTEXT_SLOTS); } } @@ -1319,6 +1347,22 @@ TagObject(back_pointer, "(back pointer)"); SetInternalReference(transitions, transitions_entry, "back_pointer", back_pointer); + + if (FLAG_collect_maps && map->CanTransition()) { + if (!transitions->IsSimpleTransition()) { + if (transitions->HasPrototypeTransitions()) { + FixedArray* prototype_transitions = + transitions->GetPrototypeTransitions(); + MarkAsWeakContainer(prototype_transitions); + TagObject(prototype_transitions, "(prototype transitions"); + SetInternalReference(transitions, transitions_entry, + "prototype_transitions", prototype_transitions); + } + // TODO(alph): transitions keys are strong links. + MarkAsWeakContainer(transitions); + } + } + TagObject(transitions, "(transition array)"); SetInternalReference(map, entry, "transitions", transitions, @@ -1336,6 +1380,7 @@ "descriptors", descriptors, Map::kDescriptorsOffset); + MarkAsWeakContainer(map->code_cache()); SetInternalReference(map, entry, "code_cache", map->code_cache(), Map::kCodeCacheOffset); @@ -1345,6 +1390,7 @@ "constructor", map->constructor(), Map::kConstructorOffset); TagObject(map->dependent_code(), "(dependent code)"); + MarkAsWeakContainer(map->dependent_code()); SetInternalReference(map, entry, "dependent_code", map->dependent_code(), Map::kDependentCodeOffset); @@ -1399,9 +1445,9 @@ SetInternalReference(obj, entry, "optimized_code_map", shared->optimized_code_map(), SharedFunctionInfo::kOptimizedCodeMapOffset); - SetWeakReference(obj, entry, - "initial_map", shared->initial_map(), - SharedFunctionInfo::kInitialMapOffset); + SetInternalReference(obj, entry, + "feedback_vector", shared->feedback_vector(), + SharedFunctionInfo::kFeedbackVectorOffset); } @@ -1453,8 +1499,8 @@ void V8HeapExplorer::TagCodeObject(Code* code) { if (code->kind() == Code::STUB) { TagObject(code, names_->GetFormatted( - "(%s code)", CodeStub::MajorName( - static_cast<CodeStub::Major>(code->major_key()), true))); + "(%s code)", CodeStub::MajorName( + CodeStub::GetMajorKey(code), true))); } } @@ -1506,6 +1552,7 @@ ExtractCellReferences(entry, cell); SetInternalReference(cell, entry, "type", cell->type(), PropertyCell::kTypeOffset); + MarkAsWeakContainer(cell->dependent_code()); SetInternalReference(cell, entry, "dependent_code", cell->dependent_code(), PropertyCell::kDependentCodeOffset); } @@ -1517,11 +1564,12 @@ AllocationSite::kTransitionInfoOffset); SetInternalReference(site, entry, "nested_site", site->nested_site(), AllocationSite::kNestedSiteOffset); + MarkAsWeakContainer(site->dependent_code()); SetInternalReference(site, entry, "dependent_code", site->dependent_code(), AllocationSite::kDependentCodeOffset); // Do not visit weak_next as it is not visited by the StaticVisitor, // and we're not very interested in weak_next field here. - STATIC_CHECK(AllocationSite::kWeakNextOffset >= + STATIC_ASSERT(AllocationSite::kWeakNextOffset >= AllocationSite::BodyDescriptor::kEndOffset); } @@ -1562,6 +1610,20 @@ } +void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) { + bool is_weak = weak_containers_.Contains(array); + for (int i = 0, l = array->length(); i < l; ++i) { + if (is_weak) { + SetWeakReference(array, entry, + i, array->get(i), array->OffsetOfElementAt(i)); + } else { + SetInternalReference(array, entry, + i, array->get(i), array->OffsetOfElementAt(i)); + } + } +} + + void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) { if (!js_obj->IsJSFunction()) return; @@ -1591,6 +1653,8 @@ for (int i = 0; i < real_size; i++) { switch (descs->GetType(i)) { case FIELD: { + Representation r = descs->GetDetails(i).representation(); + if (r.IsSmi() || r.IsDouble()) break; int index = descs->GetFieldIndex(i); Name* k = descs->GetKey(i); @@ -1610,7 +1674,9 @@ js_obj->GetInObjectPropertyOffset(index)); } } else { - Object* value = js_obj->RawFastPropertyAt(index); + FieldIndex field_index = + FieldIndex::ForDescriptor(js_obj->map(), i); + Object* value = js_obj->RawFastPropertyAt(field_index); if (k != heap_->hidden_string()) { SetPropertyReference(js_obj, entry, k, value); } else { @@ -1634,7 +1700,6 @@ case HANDLER: // only in lookup results, not in descriptors case INTERCEPTOR: // only in lookup results, not in descriptors break; - case TRANSITION: case NONEXISTENT: UNREACHABLE(); break; @@ -1697,7 +1762,7 @@ for (int i = 0; i < length; ++i) { Object* k = dictionary->KeyAt(i); if (dictionary->IsKey(k)) { - ASSERT(k->IsNumber()); + DCHECK(k->IsNumber()); uint32_t index = static_cast<uint32_t>(k->Number()); SetElementReference(js_obj, entry, index, dictionary->ValueAt(i)); } @@ -1725,8 +1790,10 @@ // return its name. This is for instances of binding objects, which // have prototype constructor type "Object". Object* constructor_prop = NULL; - LookupResult result(heap->isolate()); - object->LocalLookupRealNamedProperty(heap->constructor_string(), &result); + Isolate* isolate = heap->isolate(); + LookupResult result(isolate); + object->LookupOwnRealNamedProperty( + isolate->factory()->constructor_string(), &result); if (!result.IsFound()) return object->constructor_name(); constructor_prop = result.GetLazyValue(); @@ -1776,7 +1843,7 @@ void SetCollectingAllReferences() { collecting_all_references_ = true; } void FillReferences(V8HeapExplorer* explorer) { - ASSERT(strong_references_.length() <= all_references_.length()); + DCHECK(strong_references_.length() <= all_references_.length()); Builtins* builtins = heap_->isolate()->builtins(); for (int i = 0; i < reference_tags_.length(); ++i) { explorer->SetGcRootsReference(reference_tags_[i].tag); @@ -1790,7 +1857,7 @@ all_references_[all_index]); if (reference_tags_[tags_index].tag == VisitorSynchronization::kBuiltins) { - ASSERT(all_references_[all_index]->IsCode()); + DCHECK(all_references_[all_index]->IsCode()); explorer->TagBuiltinCodeObject( Code::cast(all_references_[all_index]), builtins->name(builtin_index++)); @@ -1833,6 +1900,25 @@ heap_->IterateRoots(&extractor, VISIT_ALL); extractor.FillReferences(this); + // We have to do two passes as sometimes FixedArrays are used + // to weakly hold their items, and it's impossible to distinguish + // between these cases without processing the array owner first. + bool interrupted = + IterateAndExtractSinglePass<&V8HeapExplorer::ExtractReferencesPass1>() || + IterateAndExtractSinglePass<&V8HeapExplorer::ExtractReferencesPass2>(); + + if (interrupted) { + filler_ = NULL; + return false; + } + + filler_ = NULL; + return progress_->ProgressReport(true); +} + + +template<V8HeapExplorer::ExtractReferencesMethod extractor> +bool V8HeapExplorer::IterateAndExtractSinglePass() { // Now iterate the whole heap. bool interrupted = false; HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable); @@ -1840,18 +1926,22 @@ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next(), progress_->ProgressStep()) { - if (!interrupted) { - ExtractReferences(obj); - if (!progress_->ProgressReport(false)) interrupted = true; + if (interrupted) continue; + + HeapEntry* heap_entry = GetEntry(obj); + int entry = heap_entry->index(); + if ((this->*extractor)(entry, obj)) { + SetInternalReference(obj, entry, + "map", obj->map(), HeapObject::kMapOffset); + // Extract unvisited fields as hidden references and restore tags + // of visited fields. + IndexedReferencesExtractor refs_extractor(this, obj, entry); + obj->Iterate(&refs_extractor); } - } - if (interrupted) { - filler_ = NULL; - return false; - } - filler_ = NULL; - return progress_->ProgressReport(true); + if (!progress_->ProgressReport(false)) interrupted = true; + } + return interrupted; } @@ -1876,7 +1966,7 @@ String* reference_name, Object* child_obj, int field_offset) { - ASSERT(parent_entry == GetEntry(parent_obj)->index()); + DCHECK(parent_entry == GetEntry(parent_obj)->index()); HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { filler_->SetNamedReference(HeapGraphEdge::kContextVariable, @@ -1892,7 +1982,7 @@ int parent_entry, const char* reference_name, Object* child_obj) { - ASSERT(parent_entry == GetEntry(parent_obj)->index()); + DCHECK(parent_entry == GetEntry(parent_obj)->index()); HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { filler_->SetNamedReference(HeapGraphEdge::kShortcut, @@ -1907,7 +1997,7 @@ int parent_entry, int index, Object* child_obj) { - ASSERT(parent_entry == GetEntry(parent_obj)->index()); + DCHECK(parent_entry == GetEntry(parent_obj)->index()); HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { filler_->SetIndexedReference(HeapGraphEdge::kElement, @@ -1923,7 +2013,7 @@ const char* reference_name, Object* child_obj, int field_offset) { - ASSERT(parent_entry == GetEntry(parent_obj)->index()); + DCHECK(parent_entry == GetEntry(parent_obj)->index()); HeapEntry* child_entry = GetEntry(child_obj); if (child_entry == NULL) return; if (IsEssentialObject(child_obj)) { @@ -1941,7 +2031,7 @@ int index, Object* child_obj, int field_offset) { - ASSERT(parent_entry == GetEntry(parent_obj)->index()); + DCHECK(parent_entry == GetEntry(parent_obj)->index()); HeapEntry* child_entry = GetEntry(child_obj); if (child_entry == NULL) return; if (IsEssentialObject(child_obj)) { @@ -1958,7 +2048,7 @@ int parent_entry, int index, Object* child_obj) { - ASSERT(parent_entry == GetEntry(parent_obj)->index()); + DCHECK(parent_entry == GetEntry(parent_obj)->index()); HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL && IsEssentialObject(child_obj)) { filler_->SetIndexedReference(HeapGraphEdge::kHidden, @@ -1974,7 +2064,7 @@ const char* reference_name, Object* child_obj, int field_offset) { - ASSERT(parent_entry == GetEntry(parent_obj)->index()); + DCHECK(parent_entry == GetEntry(parent_obj)->index()); HeapEntry* child_entry = GetEntry(child_obj); if (child_entry == NULL) return; if (IsEssentialObject(child_obj)) { @@ -1987,13 +2077,31 @@ } +void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj, + int parent_entry, + int index, + Object* child_obj, + int field_offset) { + DCHECK(parent_entry == GetEntry(parent_obj)->index()); + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry == NULL) return; + if (IsEssentialObject(child_obj)) { + filler_->SetNamedReference(HeapGraphEdge::kWeak, + parent_entry, + names_->GetFormatted("%d", index), + child_entry); + } + IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset); +} + + void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj, int parent_entry, Name* reference_name, Object* child_obj, const char* name_format_string, int field_offset) { - ASSERT(parent_entry == GetEntry(parent_obj)->index()); + DCHECK(parent_entry == GetEntry(parent_obj)->index()); HeapEntry* child_entry = GetEntry(child_obj); if (child_entry != NULL) { HeapGraphEdge::Type type = @@ -2025,7 +2133,7 @@ void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) { HeapEntry* child_entry = GetEntry(child_obj); - ASSERT(child_entry != NULL); + DCHECK(child_entry != NULL); filler_->SetNamedAutoIndexReference( HeapGraphEdge::kShortcut, snapshot_->root()->index(), @@ -2072,9 +2180,7 @@ GlobalObject* global = context->global_object(); if (global->IsJSGlobalObject()) { bool is_debug_object = false; -#ifdef ENABLE_DEBUGGER_SUPPORT is_debug_object = heap_->isolate()->debug()->IsDebugGlobal(global); -#endif if (!is_debug_object && !user_roots_.Contains(global)) { user_roots_.Insert(global); SetUserGlobalReference(global); @@ -2114,6 +2220,13 @@ } +void V8HeapExplorer::MarkAsWeakContainer(Object* object) { + if (IsEssentialObject(object) && object->IsFixedArray()) { + weak_containers_.Insert(object); + } +} + + class GlobalObjectsEnumerator : public ObjectVisitor { public: virtual void VisitPointers(Object** start, Object** end) { @@ -2301,7 +2414,7 @@ HeapObject* parent = *group->parent; int parent_entry = filler_->FindOrAddEntry(parent, native_entries_allocator_)->index(); - ASSERT(parent_entry != HeapEntry::kNoEntry); + DCHECK(parent_entry != HeapEntry::kNoEntry); Object*** children = group->children; for (size_t j = 0; j < group->length; ++j) { Object* child = *children[j]; @@ -2402,7 +2515,7 @@ v8::RetainedObjectInfo* info) { HeapEntry* child_entry = filler_->FindOrAddEntry(info, native_entries_allocator_); - ASSERT(child_entry != NULL); + DCHECK(child_entry != NULL); NativeGroupRetainedObjectInfo* group_info = FindOrAddGroupInfo(info->GetGroupLabel()); HeapEntry* group_entry = @@ -2417,10 +2530,10 @@ void NativeObjectsExplorer::SetWrapperNativeReferences( HeapObject* wrapper, v8::RetainedObjectInfo* info) { HeapEntry* wrapper_entry = filler_->FindEntry(wrapper); - ASSERT(wrapper_entry != NULL); + DCHECK(wrapper_entry != NULL); HeapEntry* info_entry = filler_->FindOrAddEntry(info, native_entries_allocator_); - ASSERT(info_entry != NULL); + DCHECK(info_entry != NULL); filler_->SetNamedReference(HeapGraphEdge::kInternal, wrapper_entry->index(), "native", @@ -2439,7 +2552,7 @@ static_cast<NativeGroupRetainedObjectInfo*>(entry->value); HeapEntry* group_entry = filler_->FindOrAddEntry(group_info, native_entries_allocator_); - ASSERT(group_entry != NULL); + DCHECK(group_entry != NULL); filler_->SetIndexedAutoIndexReference( HeapGraphEdge::kElement, snapshot_->root()->index(), @@ -2487,24 +2600,19 @@ #ifdef VERIFY_HEAP Heap* debug_heap = heap_; - CHECK(!debug_heap->old_data_space()->was_swept_conservatively()); - CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively()); - CHECK(!debug_heap->code_space()->was_swept_conservatively()); - CHECK(!debug_heap->cell_space()->was_swept_conservatively()); - CHECK(!debug_heap->property_cell_space()-> - was_swept_conservatively()); - CHECK(!debug_heap->map_space()->was_swept_conservatively()); + CHECK(debug_heap->old_data_space()->swept_precisely()); + CHECK(debug_heap->old_pointer_space()->swept_precisely()); + CHECK(debug_heap->code_space()->swept_precisely()); + CHECK(debug_heap->cell_space()->swept_precisely()); + CHECK(debug_heap->property_cell_space()->swept_precisely()); + CHECK(debug_heap->map_space()->swept_precisely()); #endif - // The following code uses heap iterators, so we want the heap to be - // stable. It should follow TagGlobalObjects as that can allocate. - DisallowHeapAllocation no_alloc; - #ifdef VERIFY_HEAP debug_heap->Verify(); #endif - SetProgressTotal(1); // 1 pass. + SetProgressTotal(2); // 2 passes. #ifdef VERIFY_HEAP debug_heap->Verify(); @@ -2575,12 +2683,12 @@ chunk_(chunk_size_), chunk_pos_(0), aborted_(false) { - ASSERT(chunk_size_ > 0); + DCHECK(chunk_size_ > 0); } bool aborted() { return aborted_; } void AddCharacter(char c) { - ASSERT(c != '\0'); - ASSERT(chunk_pos_ < chunk_size_); + DCHECK(c != '\0'); + DCHECK(chunk_pos_ < chunk_size_); chunk_[chunk_pos_++] = c; MaybeWriteChunk(); } @@ -2589,13 +2697,13 @@ } void AddSubstring(const char* s, int n) { if (n <= 0) return; - ASSERT(static_cast<size_t>(n) <= strlen(s)); + DCHECK(static_cast<size_t>(n) <= strlen(s)); const char* s_end = s + n; while (s < s_end) { - int s_chunk_size = Min( - chunk_size_ - chunk_pos_, static_cast<int>(s_end - s)); - ASSERT(s_chunk_size > 0); - OS::MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size); + int s_chunk_size = + Min(chunk_size_ - chunk_pos_, static_cast<int>(s_end - s)); + DCHECK(s_chunk_size > 0); + MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size); s += s_chunk_size; chunk_pos_ += s_chunk_size; MaybeWriteChunk(); @@ -2604,7 +2712,7 @@ void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); } void Finalize() { if (aborted_) return; - ASSERT(chunk_pos_ < chunk_size_); + DCHECK(chunk_pos_ < chunk_size_); if (chunk_pos_ != 0) { WriteChunk(); } @@ -2618,21 +2726,21 @@ static const int kMaxNumberSize = MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1; if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) { - int result = OS::SNPrintF( + int result = SNPrintF( chunk_.SubVector(chunk_pos_, chunk_size_), format, n); - ASSERT(result != -1); + DCHECK(result != -1); chunk_pos_ += result; MaybeWriteChunk(); } else { EmbeddedVector<char, kMaxNumberSize> buffer; - int result = OS::SNPrintF(buffer, format, n); + int result = SNPrintF(buffer, format, n); USE(result); - ASSERT(result != -1); + DCHECK(result != -1); AddString(buffer.start()); } } void MaybeWriteChunk() { - ASSERT(chunk_pos_ <= chunk_size_); + DCHECK(chunk_pos_ <= chunk_size_); if (chunk_pos_ == chunk_size_) { WriteChunk(); } @@ -2662,7 +2770,7 @@ snapshot_->profiler()->allocation_tracker()) { allocation_tracker->PrepareForSerialization(); } - ASSERT(writer_ == NULL); + DCHECK(writer_ == NULL); writer_ = new OutputStreamWriter(stream); SerializeImpl(); delete writer_; @@ -2671,7 +2779,7 @@ void HeapSnapshotJSONSerializer::SerializeImpl() { - ASSERT(0 == snapshot_->root()->index()); + DCHECK(0 == snapshot_->root()->index()); writer_->AddCharacter('{'); writer_->AddString("\"snapshot\":{"); SerializeSnapshot(); @@ -2731,7 +2839,7 @@ template<typename T> static int utoa_impl(T value, const Vector<char>& buffer, int buffer_pos) { - STATIC_CHECK(static_cast<T>(-1) > 0); // Check that T is unsigned + STATIC_ASSERT(static_cast<T>(-1) > 0); // Check that T is unsigned int number_of_digits = 0; T t = value; do { @@ -2752,7 +2860,7 @@ template<typename T> static int utoa(T value, const Vector<char>& buffer, int buffer_pos) { typename ToUnsigned<sizeof(value)>::Type unsigned_value = value; - STATIC_CHECK(sizeof(value) == sizeof(unsigned_value)); + STATIC_ASSERT(sizeof(value) == sizeof(unsigned_value)); return utoa_impl(unsigned_value, buffer, buffer_pos); } @@ -2784,7 +2892,7 @@ void HeapSnapshotJSONSerializer::SerializeEdges() { List<HeapGraphEdge*>& edges = snapshot_->children(); for (int i = 0; i < edges.length(); ++i) { - ASSERT(i == 0 || + DCHECK(i == 0 || edges[i - 1]->from()->index() <= edges[i]->from()->index()); SerializeEdge(edges[i], i == 0); if (writer_->aborted()) return; @@ -2968,7 +3076,7 @@ if (position == -1) { buffer[buffer_pos++] = '0'; } else { - ASSERT(position >= 0); + DCHECK(position >= 0); buffer_pos = utoa(static_cast<unsigned>(position + 1), buffer, buffer_pos); } return buffer_pos; @@ -3052,7 +3160,7 @@ unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor); if (c != unibrow::Utf8::kBadChar) { WriteUChar(writer_, c); - ASSERT(cursor != 0); + DCHECK(cursor != 0); s += cursor - 1; } else { writer_->AddCharacter('?'); diff -Nru nodejs-0.11.13/deps/v8/src/heap-snapshot-generator.h nodejs-0.11.15/deps/v8/src/heap-snapshot-generator.h --- nodejs-0.11.13/deps/v8/src/heap-snapshot-generator.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap-snapshot-generator.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HEAP_SNAPSHOT_GENERATOR_H_ #define V8_HEAP_SNAPSHOT_GENERATOR_H_ -#include "profile-generator-inl.h" +#include "src/profile-generator-inl.h" namespace v8 { namespace internal { @@ -58,11 +35,11 @@ Type type() const { return static_cast<Type>(type_); } int index() const { - ASSERT(type_ == kElement || type_ == kHidden); + DCHECK(type_ == kElement || type_ == kHidden); return index_; } const char* name() const { - ASSERT(type_ == kContextVariable + DCHECK(type_ == kContextVariable || type_ == kProperty || type_ == kInternal || type_ == kShortcut @@ -106,7 +83,8 @@ kNative = v8::HeapGraphNode::kNative, kSynthetic = v8::HeapGraphNode::kSynthetic, kConsString = v8::HeapGraphNode::kConsString, - kSlicedString = v8::HeapGraphNode::kSlicedString + kSlicedString = v8::HeapGraphNode::kSlicedString, + kSymbol = v8::HeapGraphNode::kSymbol }; static const int kNoEntry; @@ -315,9 +293,6 @@ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)), v8::internal::kZeroHashSeed); } - static bool HeapThingsMatch(HeapThing key1, HeapThing key2) { - return key1 == key2; - } HashMap entries_; @@ -376,6 +351,9 @@ static HeapObject* const kInternalRootObject; private: + typedef bool (V8HeapExplorer::*ExtractReferencesMethod)(int entry, + HeapObject* object); + HeapEntry* AddEntry(HeapObject* object); HeapEntry* AddEntry(HeapObject* object, HeapEntry::Type type, @@ -383,10 +361,18 @@ const char* GetSystemEntryName(HeapObject* object); - void ExtractReferences(HeapObject* obj); + template<V8HeapExplorer::ExtractReferencesMethod extractor> + bool IterateAndExtractSinglePass(); + + bool ExtractReferencesPass1(int entry, HeapObject* obj); + bool ExtractReferencesPass2(int entry, HeapObject* obj); void ExtractJSGlobalProxyReferences(int entry, JSGlobalProxy* proxy); void ExtractJSObjectReferences(int entry, JSObject* js_obj); void ExtractStringReferences(int entry, String* obj); + void ExtractSymbolReferences(int entry, Symbol* symbol); + void ExtractJSCollectionReferences(int entry, JSCollection* collection); + void ExtractJSWeakCollectionReferences(int entry, + JSWeakCollection* collection); void ExtractContextReferences(int entry, Context* context); void ExtractMapReferences(int entry, Map* map); void ExtractSharedFunctionInfoReferences(int entry, @@ -400,12 +386,14 @@ void ExtractPropertyCellReferences(int entry, PropertyCell* cell); void ExtractAllocationSiteReferences(int entry, AllocationSite* site); void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer); + void ExtractFixedArrayReferences(int entry, FixedArray* array); void ExtractClosureReferences(JSObject* js_obj, int entry); void ExtractPropertyReferences(JSObject* js_obj, int entry); bool ExtractAccessorPairProperty(JSObject* js_obj, int entry, Object* key, Object* callback_obj); void ExtractElementReferences(JSObject* js_obj, int entry); void ExtractInternalReferences(JSObject* js_obj, int entry); + bool IsEssentialObject(Object* object); void SetContextReference(HeapObject* parent_obj, int parent, @@ -439,6 +427,11 @@ const char* reference_name, Object* child_obj, int field_offset); + void SetWeakReference(HeapObject* parent_obj, + int parent, + int index, + Object* child_obj, + int field_offset); void SetPropertyReference(HeapObject* parent_obj, int parent, Name* reference_name, @@ -452,6 +445,7 @@ VisitorSynchronization::SyncTag tag, bool is_weak, Object* child); const char* GetStrongGcSubrootName(Object* object); void TagObject(Object* obj, const char* tag); + void MarkAsWeakContainer(Object* object); HeapEntry* GetEntry(Object* obj); @@ -467,6 +461,7 @@ HeapObjectsSet objects_tags_; HeapObjectsSet strong_gc_subroot_names_; HeapObjectsSet user_roots_; + HeapObjectsSet weak_containers_; v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_; static HeapObject* const kGcRootsObject; diff -Nru nodejs-0.11.13/deps/v8/src/heap-snapshot-generator-inl.h nodejs-0.11.15/deps/v8/src/heap-snapshot-generator-inl.h --- nodejs-0.11.13/deps/v8/src/heap-snapshot-generator-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/heap-snapshot-generator-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HEAP_SNAPSHOT_GENERATOR_INL_H_ #define V8_HEAP_SNAPSHOT_GENERATOR_INL_H_ -#include "heap-snapshot-generator.h" +#include "src/heap-snapshot-generator.h" namespace v8 { namespace internal { @@ -58,8 +35,8 @@ HeapGraphEdge** HeapEntry::children_arr() { - ASSERT(children_index_ >= 0); - SLOW_ASSERT(children_index_ < snapshot_->children().length() || + DCHECK(children_index_ >= 0); + SLOW_DCHECK(children_index_ < snapshot_->children().length() || (children_index_ == snapshot_->children().length() && children_count_ == 0)); return &snapshot_->children().first() + children_index_; diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-alias-analysis.h nodejs-0.11.15/deps/v8/src/hydrogen-alias-analysis.h --- nodejs-0.11.13/deps/v8/src/hydrogen-alias-analysis.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-alias-analysis.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_ALIAS_ANALYSIS_H_ #define V8_HYDROGEN_ALIAS_ANALYSIS_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-bce.cc nodejs-0.11.15/deps/v8/src/hydrogen-bce.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-bce.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-bce.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-bce.h" +#include "src/hydrogen-bce.h" namespace v8 { namespace internal { @@ -70,13 +47,13 @@ } else if (check->index()->IsSub()) { HSub* index = HSub::cast(check->index()); is_sub = true; - if (index->left()->IsConstant()) { - constant = HConstant::cast(index->left()); - index_base = index->right(); - } else if (index->right()->IsConstant()) { + if (index->right()->IsConstant()) { constant = HConstant::cast(index->right()); index_base = index->left(); } + } else if (check->index()->IsConstant()) { + index_base = check->block()->graph()->GetConstant0(); + constant = HConstant::cast(check->index()); } if (constant != NULL && constant->HasInteger32Value()) { @@ -136,7 +113,7 @@ void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) { BoundsCheckBbData* data = FatherInDominatorTree(); while (data != NULL && data->UpperCheck() == check) { - ASSERT(data->upper_offset_ < offset); + DCHECK(data->upper_offset_ < offset); data->upper_offset_ = offset; data = data->FatherInDominatorTree(); } @@ -145,7 +122,7 @@ void UpdateLowerOffsets(HBoundsCheck* check, int32_t offset) { BoundsCheckBbData* data = FatherInDominatorTree(); while (data != NULL && data->LowerCheck() == check) { - ASSERT(data->lower_offset_ > offset); + DCHECK(data->lower_offset_ > offset); data->lower_offset_ = offset; data = data->FatherInDominatorTree(); } @@ -165,7 +142,7 @@ // new_offset, and new_check is removed. void CoverCheck(HBoundsCheck* new_check, int32_t new_offset) { - ASSERT(new_check->index()->representation().IsSmiOrInteger32()); + DCHECK(new_check->index()->representation().IsSmiOrInteger32()); bool keep_new_check = false; if (new_offset > upper_offset_) { @@ -193,8 +170,8 @@ if (!keep_new_check) { if (FLAG_trace_bce) { - OS::Print("Eliminating check #%d after tightening\n", - new_check->id()); + base::OS::Print("Eliminating check #%d after tightening\n", + new_check->id()); } new_check->block()->graph()->isolate()->counters()-> bounds_checks_eliminated()->Increment(); @@ -203,11 +180,11 @@ HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_ : lower_check_; if (FLAG_trace_bce) { - OS::Print("Moving second check #%d after first check #%d\n", - new_check->id(), first_check->id()); + base::OS::Print("Moving second check #%d after first check #%d\n", + new_check->id(), first_check->id()); } // The length is guaranteed to be live at first_check. - ASSERT(new_check->length() == first_check->length()); + DCHECK(new_check->length() == first_check->length()); HInstruction* old_position = new_check->next(); new_check->Unlink(); new_check->InsertAfter(first_check); @@ -245,54 +222,69 @@ void MoveIndexIfNecessary(HValue* index_raw, HBoundsCheck* insert_before, HInstruction* end_of_scan_range) { - if (!index_raw->IsAdd() && !index_raw->IsSub()) { - // index_raw can be HAdd(index_base, offset), HSub(index_base, offset), - // or index_base directly. In the latter case, no need to move anything. - return; - } - HArithmeticBinaryOperation* index = - HArithmeticBinaryOperation::cast(index_raw); - HValue* left_input = index->left(); - HValue* right_input = index->right(); - bool must_move_index = false; - bool must_move_left_input = false; - bool must_move_right_input = false; - for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) { - if (cursor == left_input) must_move_left_input = true; - if (cursor == right_input) must_move_right_input = true; - if (cursor == index) must_move_index = true; - if (cursor->previous() == NULL) { - cursor = cursor->block()->dominator()->end(); - } else { - cursor = cursor->previous(); + // index_raw can be HAdd(index_base, offset), HSub(index_base, offset), + // HConstant(offset) or index_base directly. + // In the latter case, no need to move anything. + if (index_raw->IsAdd() || index_raw->IsSub()) { + HArithmeticBinaryOperation* index = + HArithmeticBinaryOperation::cast(index_raw); + HValue* left_input = index->left(); + HValue* right_input = index->right(); + bool must_move_index = false; + bool must_move_left_input = false; + bool must_move_right_input = false; + for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) { + if (cursor == left_input) must_move_left_input = true; + if (cursor == right_input) must_move_right_input = true; + if (cursor == index) must_move_index = true; + if (cursor->previous() == NULL) { + cursor = cursor->block()->dominator()->end(); + } else { + cursor = cursor->previous(); + } + } + if (must_move_index) { + index->Unlink(); + index->InsertBefore(insert_before); + } + // The BCE algorithm only selects mergeable bounds checks that share + // the same "index_base", so we'll only ever have to move constants. + if (must_move_left_input) { + HConstant::cast(left_input)->Unlink(); + HConstant::cast(left_input)->InsertBefore(index); + } + if (must_move_right_input) { + HConstant::cast(right_input)->Unlink(); + HConstant::cast(right_input)->InsertBefore(index); + } + } else if (index_raw->IsConstant()) { + HConstant* index = HConstant::cast(index_raw); + bool must_move = false; + for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) { + if (cursor == index) must_move = true; + if (cursor->previous() == NULL) { + cursor = cursor->block()->dominator()->end(); + } else { + cursor = cursor->previous(); + } + } + if (must_move) { + index->Unlink(); + index->InsertBefore(insert_before); } - } - if (must_move_index) { - index->Unlink(); - index->InsertBefore(insert_before); - } - // The BCE algorithm only selects mergeable bounds checks that share - // the same "index_base", so we'll only ever have to move constants. - if (must_move_left_input) { - HConstant::cast(left_input)->Unlink(); - HConstant::cast(left_input)->InsertBefore(index); - } - if (must_move_right_input) { - HConstant::cast(right_input)->Unlink(); - HConstant::cast(right_input)->InsertBefore(index); } } void TightenCheck(HBoundsCheck* original_check, HBoundsCheck* tighter_check, int32_t new_offset) { - ASSERT(original_check->length() == tighter_check->length()); + DCHECK(original_check->length() == tighter_check->length()); MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check); original_check->ReplaceAllUsesWith(original_check->index()); original_check->SetOperandAt(0, tighter_check->index()); if (FLAG_trace_bce) { - OS::Print("Tightened check #%d with offset %d from #%d\n", - original_check->id(), new_offset, tighter_check->id()); + base::OS::Print("Tightened check #%d with offset %d from #%d\n", + original_check->id(), new_offset, tighter_check->id()); } } @@ -404,15 +396,15 @@ NULL); *data_p = bb_data_list; if (FLAG_trace_bce) { - OS::Print("Fresh bounds check data for block #%d: [%d]\n", - bb->block_id(), offset); + base::OS::Print("Fresh bounds check data for block #%d: [%d]\n", + bb->block_id(), offset); } } else if (data->OffsetIsCovered(offset)) { bb->graph()->isolate()->counters()-> bounds_checks_eliminated()->Increment(); if (FLAG_trace_bce) { - OS::Print("Eliminating bounds check #%d, offset %d is covered\n", - check->id(), offset); + base::OS::Print("Eliminating bounds check #%d, offset %d is covered\n", + check->id(), offset); } check->DeleteAndReplaceWith(check->ActualValue()); } else if (data->BasicBlock() == bb) { @@ -447,8 +439,8 @@ bb_data_list, data); if (FLAG_trace_bce) { - OS::Print("Updated bounds check data for block #%d: [%d - %d]\n", - bb->block_id(), new_lower_offset, new_upper_offset); + base::OS::Print("Updated bounds check data for block #%d: [%d - %d]\n", + bb->block_id(), new_lower_offset, new_upper_offset); } table_.Insert(key, bb_data_list, zone()); } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-bce.h nodejs-0.11.15/deps/v8/src/hydrogen-bce.h --- nodejs-0.11.13/deps/v8/src/hydrogen-bce.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-bce.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_BCE_H_ #define V8_HYDROGEN_BCE_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-bch.cc nodejs-0.11.15/deps/v8/src/hydrogen-bch.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-bch.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-bch.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-bch.h" +#include "src/hydrogen-bch.h" namespace v8 { namespace internal { @@ -67,7 +44,7 @@ * induction variable). */ void InitializeLoop(InductionVariableData* data) { - ASSERT(data->limit() != NULL); + DCHECK(data->limit() != NULL); HLoopInformation* loop = data->phi()->block()->current_loop(); is_start_ = (block() == loop->loop_header()); is_proper_exit_ = (block() == data->induction_exit_target()); @@ -78,7 +55,7 @@ // Utility methods to iterate over dominated blocks. void ResetCurrentDominatedBlock() { current_dominated_block_ = kNoBlock; } HBasicBlock* CurrentDominatedBlock() { - ASSERT(current_dominated_block_ != kNoBlock); + DCHECK(current_dominated_block_ != kNoBlock); return current_dominated_block_ < block()->dominated_blocks()->length() ? block()->dominated_blocks()->at(current_dominated_block_) : NULL; } @@ -204,7 +181,7 @@ Element element; element.set_block(graph->blocks()->at(i)); elements_.Add(element, graph->zone()); - ASSERT(at(i)->block()->block_id() == i); + DCHECK(at(i)->block()->block_id() == i); } } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-bch.h nodejs-0.11.15/deps/v8/src/hydrogen-bch.h --- nodejs-0.11.13/deps/v8/src/hydrogen-bch.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-bch.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_BCH_H_ #define V8_HYDROGEN_BCH_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-canonicalize.cc nodejs-0.11.15/deps/v8/src/hydrogen-canonicalize.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-canonicalize.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-canonicalize.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,9 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-canonicalize.h" -#include "hydrogen-redundant-phi.h" +#include "src/hydrogen-canonicalize.h" +#include "src/hydrogen-redundant-phi.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-canonicalize.h nodejs-0.11.15/deps/v8/src/hydrogen-canonicalize.h --- nodejs-0.11.13/deps/v8/src/hydrogen-canonicalize.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-canonicalize.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_CANONICALIZE_H_ #define V8_HYDROGEN_CANONICALIZE_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen.cc nodejs-0.11.15/deps/v8/src/hydrogen.cc --- nodejs-0.11.13/deps/v8/src/hydrogen.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,79 +1,61 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen.h" +#include "src/hydrogen.h" #include <algorithm> -#include "v8.h" -#include "allocation-site-scopes.h" -#include "codegen.h" -#include "full-codegen.h" -#include "hashmap.h" -#include "hydrogen-bce.h" -#include "hydrogen-bch.h" -#include "hydrogen-canonicalize.h" -#include "hydrogen-check-elimination.h" -#include "hydrogen-dce.h" -#include "hydrogen-dehoist.h" -#include "hydrogen-environment-liveness.h" -#include "hydrogen-escape-analysis.h" -#include "hydrogen-infer-representation.h" -#include "hydrogen-infer-types.h" -#include "hydrogen-load-elimination.h" -#include "hydrogen-gvn.h" -#include "hydrogen-mark-deoptimize.h" -#include "hydrogen-mark-unreachable.h" -#include "hydrogen-osr.h" -#include "hydrogen-range-analysis.h" -#include "hydrogen-redundant-phi.h" -#include "hydrogen-removable-simulates.h" -#include "hydrogen-representation-changes.h" -#include "hydrogen-sce.h" -#include "hydrogen-store-elimination.h" -#include "hydrogen-uint32-analysis.h" -#include "lithium-allocator.h" -#include "parser.h" -#include "runtime.h" -#include "scopeinfo.h" -#include "scopes.h" -#include "stub-cache.h" -#include "typing.h" +#include "src/v8.h" + +#include "src/allocation-site-scopes.h" +#include "src/codegen.h" +#include "src/full-codegen.h" +#include "src/hashmap.h" +#include "src/hydrogen-bce.h" +#include "src/hydrogen-bch.h" +#include "src/hydrogen-canonicalize.h" +#include "src/hydrogen-check-elimination.h" +#include "src/hydrogen-dce.h" +#include "src/hydrogen-dehoist.h" +#include "src/hydrogen-environment-liveness.h" +#include "src/hydrogen-escape-analysis.h" +#include "src/hydrogen-gvn.h" +#include "src/hydrogen-infer-representation.h" +#include "src/hydrogen-infer-types.h" +#include "src/hydrogen-load-elimination.h" +#include "src/hydrogen-mark-deoptimize.h" +#include "src/hydrogen-mark-unreachable.h" +#include "src/hydrogen-osr.h" +#include "src/hydrogen-range-analysis.h" +#include "src/hydrogen-redundant-phi.h" +#include "src/hydrogen-removable-simulates.h" +#include "src/hydrogen-representation-changes.h" +#include "src/hydrogen-sce.h" +#include "src/hydrogen-store-elimination.h" +#include "src/hydrogen-uint32-analysis.h" +#include "src/lithium-allocator.h" +#include "src/parser.h" +#include "src/runtime.h" +#include "src/scopeinfo.h" +#include "src/scopes.h" +#include "src/stub-cache.h" +#include "src/typing.h" #if V8_TARGET_ARCH_IA32 -#include "ia32/lithium-codegen-ia32.h" +#include "src/ia32/lithium-codegen-ia32.h" // NOLINT #elif V8_TARGET_ARCH_X64 -#include "x64/lithium-codegen-x64.h" +#include "src/x64/lithium-codegen-x64.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 -#include "arm64/lithium-codegen-arm64.h" +#include "src/arm64/lithium-codegen-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM -#include "arm/lithium-codegen-arm.h" +#include "src/arm/lithium-codegen-arm.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/lithium-codegen-mips.h" +#include "src/mips/lithium-codegen-mips.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/lithium-codegen-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/lithium-codegen-x87.h" // NOLINT #else #error Unsupported target architecture. #endif @@ -102,7 +84,8 @@ is_inline_return_target_(false), is_reachable_(true), dominates_loop_successors_(false), - is_osr_entry_(false) { } + is_osr_entry_(false), + is_ordered_(false) { } Isolate* HBasicBlock::isolate() const { @@ -116,27 +99,27 @@ void HBasicBlock::AttachLoopInformation() { - ASSERT(!IsLoopHeader()); + DCHECK(!IsLoopHeader()); loop_information_ = new(zone()) HLoopInformation(this, zone()); } void HBasicBlock::DetachLoopInformation() { - ASSERT(IsLoopHeader()); + DCHECK(IsLoopHeader()); loop_information_ = NULL; } void HBasicBlock::AddPhi(HPhi* phi) { - ASSERT(!IsStartBlock()); + DCHECK(!IsStartBlock()); phis_.Add(phi, zone()); phi->SetBlock(this); } void HBasicBlock::RemovePhi(HPhi* phi) { - ASSERT(phi->block() == this); - ASSERT(phis_.Contains(phi)); + DCHECK(phi->block() == this); + DCHECK(phis_.Contains(phi)); phi->Kill(); phis_.RemoveElement(phi); phi->SetBlock(NULL); @@ -145,22 +128,22 @@ void HBasicBlock::AddInstruction(HInstruction* instr, HSourcePosition position) { - ASSERT(!IsStartBlock() || !IsFinished()); - ASSERT(!instr->IsLinked()); - ASSERT(!IsFinished()); + DCHECK(!IsStartBlock() || !IsFinished()); + DCHECK(!instr->IsLinked()); + DCHECK(!IsFinished()); if (!position.IsUnknown()) { instr->set_position(position); } if (first_ == NULL) { - ASSERT(last_environment() != NULL); - ASSERT(!last_environment()->ast_id().IsNone()); + DCHECK(last_environment() != NULL); + DCHECK(!last_environment()->ast_id().IsNone()); HBlockEntry* entry = new(zone()) HBlockEntry(); entry->InitializeAsFirst(this); if (!position.IsUnknown()) { entry->set_position(position); } else { - ASSERT(!FLAG_hydrogen_track_positions || + DCHECK(!FLAG_hydrogen_track_positions || !graph()->info()->IsOptimizing()); } first_ = last_ = entry; @@ -181,9 +164,9 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id, RemovableSimulate removable) { - ASSERT(HasEnvironment()); + DCHECK(HasEnvironment()); HEnvironment* environment = last_environment(); - ASSERT(ast_id.IsNone() || + DCHECK(ast_id.IsNone() || ast_id == BailoutId::StubEntry() || environment->closure()->shared()->VerifyBailoutId(ast_id)); @@ -214,7 +197,7 @@ void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) { - ASSERT(!IsFinished()); + DCHECK(!IsFinished()); AddInstruction(end, position); end_ = end; for (HSuccessorIterator it(end); !it.Done(); it.Advance()) { @@ -251,8 +234,8 @@ HBasicBlock* target = state->function_return(); bool drop_extra = state->inlining_kind() == NORMAL_RETURN; - ASSERT(target->IsInlineReturnTarget()); - ASSERT(return_value != NULL); + DCHECK(target->IsInlineReturnTarget()); + DCHECK(return_value != NULL); HEnvironment* env = last_environment(); int argument_count = env->arguments_environment()->parameter_count(); AddInstruction(new(zone()) HLeaveInlined(state->entry(), argument_count), @@ -266,8 +249,8 @@ void HBasicBlock::SetInitialEnvironment(HEnvironment* env) { - ASSERT(!HasEnvironment()); - ASSERT(first() == NULL); + DCHECK(!HasEnvironment()); + DCHECK(first() == NULL); UpdateEnvironment(env); } @@ -280,12 +263,12 @@ void HBasicBlock::SetJoinId(BailoutId ast_id) { int length = predecessors_.length(); - ASSERT(length > 0); + DCHECK(length > 0); for (int i = 0; i < length; i++) { HBasicBlock* predecessor = predecessors_[i]; - ASSERT(predecessor->end()->IsGoto()); + DCHECK(predecessor->end()->IsGoto()); HSimulate* simulate = HSimulate::cast(predecessor->end()->previous()); - ASSERT(i != 0 || + DCHECK(i != 0 || (predecessor->last_environment()->closure().is_null() || predecessor->last_environment()->closure()->shared() ->VerifyBailoutId(ast_id))); @@ -323,7 +306,7 @@ void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) { - ASSERT(IsLoopHeader()); + DCHECK(IsLoopHeader()); SetJoinId(stmt->EntryId()); if (predecessors()->length() == 1) { @@ -341,10 +324,10 @@ void HBasicBlock::MarkSuccEdgeUnreachable(int succ) { - ASSERT(IsFinished()); + DCHECK(IsFinished()); HBasicBlock* succ_block = end()->SuccessorAt(succ); - ASSERT(succ_block->predecessors()->length() == 1); + DCHECK(succ_block->predecessors()->length() == 1); succ_block->MarkUnreachable(); } @@ -354,10 +337,10 @@ // Only loop header blocks can have a predecessor added after // instructions have been added to the block (they have phis for all // values in the environment, these phis may be eliminated later). - ASSERT(IsLoopHeader() || first_ == NULL); + DCHECK(IsLoopHeader() || first_ == NULL); HEnvironment* incoming_env = pred->last_environment(); if (IsLoopHeader()) { - ASSERT(phis()->length() == incoming_env->length()); + DCHECK(phis()->length() == incoming_env->length()); for (int i = 0; i < phis_.length(); ++i) { phis_[i]->AddInput(incoming_env->values()->at(i)); } @@ -365,7 +348,7 @@ last_environment()->AddIncomingEdge(this, pred->last_environment()); } } else if (!HasEnvironment() && !IsFinished()) { - ASSERT(!IsLoopHeader()); + DCHECK(!IsLoopHeader()); SetInitialEnvironment(pred->last_environment()->Copy()); } @@ -374,7 +357,7 @@ void HBasicBlock::AddDominatedBlock(HBasicBlock* block) { - ASSERT(!dominated_blocks_.Contains(block)); + DCHECK(!dominated_blocks_.Contains(block)); // Keep the list of dominated blocks sorted such that if there is two // succeeding block in this list, the predecessor is before the successor. int index = 0; @@ -400,11 +383,11 @@ } else { second = second->dominator(); } - ASSERT(first != NULL && second != NULL); + DCHECK(first != NULL && second != NULL); } if (dominator_ != first) { - ASSERT(dominator_->dominated_blocks_.Contains(this)); + DCHECK(dominator_->dominated_blocks_.Contains(this)); dominator_->dominated_blocks_.RemoveElement(this); dominator_ = first; first->AddDominatedBlock(this); @@ -446,7 +429,7 @@ // dominator information about the current loop that's being processed, // and not nested loops, which will be processed when // AssignLoopSuccessorDominators gets called on their header. - ASSERT(outstanding_successors >= 0); + DCHECK(outstanding_successors >= 0); HBasicBlock* parent_loop_header = dominator_candidate->parent_loop_header(); if (outstanding_successors == 0 && (parent_loop_header == this && !dominator_candidate->IsLoopHeader())) { @@ -460,7 +443,7 @@ if (successor->block_id() > dominator_candidate->block_id() && successor->block_id() <= last->block_id()) { // Backwards edges must land on loop headers. - ASSERT(successor->block_id() > dominator_candidate->block_id() || + DCHECK(successor->block_id() > dominator_candidate->block_id() || successor->IsLoopHeader()); outstanding_successors++; } @@ -481,13 +464,13 @@ #ifdef DEBUG void HBasicBlock::Verify() { // Check that every block is finished. - ASSERT(IsFinished()); - ASSERT(block_id() >= 0); + DCHECK(IsFinished()); + DCHECK(block_id() >= 0); // Check that the incoming edges are in edge split form. if (predecessors_.length() > 1) { for (int i = 0; i < predecessors_.length(); ++i) { - ASSERT(predecessors_[i]->end()->SecondSuccessor() == NULL); + DCHECK(predecessors_[i]->end()->SecondSuccessor() == NULL); } } } @@ -590,10 +573,10 @@ // Check that every block contains at least one node and that only the last // node is a control instruction. HInstruction* current = block->first(); - ASSERT(current != NULL && current->IsBlockEntry()); + DCHECK(current != NULL && current->IsBlockEntry()); while (current != NULL) { - ASSERT((current->next() == NULL) == current->IsControlInstruction()); - ASSERT(current->block() == block); + DCHECK((current->next() == NULL) == current->IsControlInstruction()); + DCHECK(current->block() == block); current->Verify(); current = current->next(); } @@ -601,13 +584,13 @@ // Check that successors are correctly set. HBasicBlock* first = block->end()->FirstSuccessor(); HBasicBlock* second = block->end()->SecondSuccessor(); - ASSERT(second == NULL || first != NULL); + DCHECK(second == NULL || first != NULL); // Check that the predecessor array is correct. if (first != NULL) { - ASSERT(first->predecessors()->Contains(block)); + DCHECK(first->predecessors()->Contains(block)); if (second != NULL) { - ASSERT(second->predecessors()->Contains(block)); + DCHECK(second->predecessors()->Contains(block)); } } @@ -624,36 +607,36 @@ block->predecessors()->first()->last_environment()->ast_id(); for (int k = 0; k < block->predecessors()->length(); k++) { HBasicBlock* predecessor = block->predecessors()->at(k); - ASSERT(predecessor->end()->IsGoto() || + DCHECK(predecessor->end()->IsGoto() || predecessor->end()->IsDeoptimize()); - ASSERT(predecessor->last_environment()->ast_id() == id); + DCHECK(predecessor->last_environment()->ast_id() == id); } } } // Check special property of first block to have no predecessors. - ASSERT(blocks_.at(0)->predecessors()->is_empty()); + DCHECK(blocks_.at(0)->predecessors()->is_empty()); if (do_full_verify) { // Check that the graph is fully connected. ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL); - ASSERT(analyzer.visited_count() == blocks_.length()); + DCHECK(analyzer.visited_count() == blocks_.length()); // Check that entry block dominator is NULL. - ASSERT(entry_block_->dominator() == NULL); + DCHECK(entry_block_->dominator() == NULL); // Check dominators. for (int i = 0; i < blocks_.length(); ++i) { HBasicBlock* block = blocks_.at(i); if (block->dominator() == NULL) { // Only start block may have no dominator assigned to. - ASSERT(i == 0); + DCHECK(i == 0); } else { // Assert that block is unreachable if dominator must not be visited. ReachabilityAnalyzer dominator_analyzer(entry_block_, blocks_.length(), block->dominator()); - ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id())); + DCHECK(!dominator_analyzer.reachable()->Contains(block->block_id())); } } } @@ -701,11 +684,13 @@ } -#define DEFINE_GET_CONSTANT(Name, name, htype, boolean_value) \ +#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value) \ HConstant* HGraph::GetConstant##Name() { \ if (!constant_##name##_.is_set()) { \ HConstant* constant = new(zone()) HConstant( \ Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \ + Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \ + false, \ Representation::Tagged(), \ htype, \ true, \ @@ -719,11 +704,11 @@ } -DEFINE_GET_CONSTANT(Undefined, undefined, HType::Tagged(), false) -DEFINE_GET_CONSTANT(True, true, HType::Boolean(), true) -DEFINE_GET_CONSTANT(False, false, HType::Boolean(), false) -DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false) -DEFINE_GET_CONSTANT(Null, null, HType::Tagged(), false) +DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false) +DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true) +DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false) +DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false) +DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false) #undef DEFINE_GET_CONSTANT @@ -762,54 +747,52 @@ } +HGraphBuilder::IfBuilder::IfBuilder() : builder_(NULL), needs_compare_(true) {} + + HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder) - : builder_(builder), - finished_(false), - did_then_(false), - did_else_(false), - did_else_if_(false), - did_and_(false), - did_or_(false), - captured_(false), - needs_compare_(true), - pending_merge_block_(false), - split_edge_merge_block_(NULL), - merge_at_join_blocks_(NULL), - normal_merge_at_join_block_count_(0), - deopt_merge_at_join_block_count_(0) { - HEnvironment* env = builder->environment(); - first_true_block_ = builder->CreateBasicBlock(env->Copy()); - first_false_block_ = builder->CreateBasicBlock(env->Copy()); + : needs_compare_(true) { + Initialize(builder); +} + + +HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, + HIfContinuation* continuation) + : needs_compare_(false), first_true_block_(NULL), first_false_block_(NULL) { + InitializeDontCreateBlocks(builder); + continuation->Continue(&first_true_block_, &first_false_block_); } -HGraphBuilder::IfBuilder::IfBuilder( - HGraphBuilder* builder, - HIfContinuation* continuation) - : builder_(builder), - finished_(false), - did_then_(false), - did_else_(false), - did_else_if_(false), - did_and_(false), - did_or_(false), - captured_(false), - needs_compare_(false), - pending_merge_block_(false), - first_true_block_(NULL), - first_false_block_(NULL), - split_edge_merge_block_(NULL), - merge_at_join_blocks_(NULL), - normal_merge_at_join_block_count_(0), - deopt_merge_at_join_block_count_(0) { - continuation->Continue(&first_true_block_, - &first_false_block_); +void HGraphBuilder::IfBuilder::InitializeDontCreateBlocks( + HGraphBuilder* builder) { + builder_ = builder; + finished_ = false; + did_then_ = false; + did_else_ = false; + did_else_if_ = false; + did_and_ = false; + did_or_ = false; + captured_ = false; + pending_merge_block_ = false; + split_edge_merge_block_ = NULL; + merge_at_join_blocks_ = NULL; + normal_merge_at_join_block_count_ = 0; + deopt_merge_at_join_block_count_ = 0; +} + + +void HGraphBuilder::IfBuilder::Initialize(HGraphBuilder* builder) { + InitializeDontCreateBlocks(builder); + HEnvironment* env = builder->environment(); + first_true_block_ = builder->CreateBasicBlock(env->Copy()); + first_false_block_ = builder->CreateBasicBlock(env->Copy()); } HControlInstruction* HGraphBuilder::IfBuilder::AddCompare( HControlInstruction* compare) { - ASSERT(did_then_ == did_else_); + DCHECK(did_then_ == did_else_); if (did_else_) { // Handle if-then-elseif did_else_if_ = true; @@ -819,14 +802,13 @@ did_or_ = false; pending_merge_block_ = false; split_edge_merge_block_ = NULL; - HEnvironment* env = builder_->environment(); - first_true_block_ = builder_->CreateBasicBlock(env->Copy()); - first_false_block_ = builder_->CreateBasicBlock(env->Copy()); + HEnvironment* env = builder()->environment(); + first_true_block_ = builder()->CreateBasicBlock(env->Copy()); + first_false_block_ = builder()->CreateBasicBlock(env->Copy()); } if (split_edge_merge_block_ != NULL) { HEnvironment* env = first_false_block_->last_environment(); - HBasicBlock* split_edge = - builder_->CreateBasicBlock(env->Copy()); + HBasicBlock* split_edge = builder()->CreateBasicBlock(env->Copy()); if (did_or_) { compare->SetSuccessorAt(0, split_edge); compare->SetSuccessorAt(1, first_false_block_); @@ -834,81 +816,80 @@ compare->SetSuccessorAt(0, first_true_block_); compare->SetSuccessorAt(1, split_edge); } - builder_->GotoNoSimulate(split_edge, split_edge_merge_block_); + builder()->GotoNoSimulate(split_edge, split_edge_merge_block_); } else { compare->SetSuccessorAt(0, first_true_block_); compare->SetSuccessorAt(1, first_false_block_); } - builder_->FinishCurrentBlock(compare); + builder()->FinishCurrentBlock(compare); needs_compare_ = false; return compare; } void HGraphBuilder::IfBuilder::Or() { - ASSERT(!needs_compare_); - ASSERT(!did_and_); + DCHECK(!needs_compare_); + DCHECK(!did_and_); did_or_ = true; HEnvironment* env = first_false_block_->last_environment(); if (split_edge_merge_block_ == NULL) { - split_edge_merge_block_ = - builder_->CreateBasicBlock(env->Copy()); - builder_->GotoNoSimulate(first_true_block_, split_edge_merge_block_); + split_edge_merge_block_ = builder()->CreateBasicBlock(env->Copy()); + builder()->GotoNoSimulate(first_true_block_, split_edge_merge_block_); first_true_block_ = split_edge_merge_block_; } - builder_->set_current_block(first_false_block_); - first_false_block_ = builder_->CreateBasicBlock(env->Copy()); + builder()->set_current_block(first_false_block_); + first_false_block_ = builder()->CreateBasicBlock(env->Copy()); } void HGraphBuilder::IfBuilder::And() { - ASSERT(!needs_compare_); - ASSERT(!did_or_); + DCHECK(!needs_compare_); + DCHECK(!did_or_); did_and_ = true; HEnvironment* env = first_false_block_->last_environment(); if (split_edge_merge_block_ == NULL) { - split_edge_merge_block_ = builder_->CreateBasicBlock(env->Copy()); - builder_->GotoNoSimulate(first_false_block_, split_edge_merge_block_); + split_edge_merge_block_ = builder()->CreateBasicBlock(env->Copy()); + builder()->GotoNoSimulate(first_false_block_, split_edge_merge_block_); first_false_block_ = split_edge_merge_block_; } - builder_->set_current_block(first_true_block_); - first_true_block_ = builder_->CreateBasicBlock(env->Copy()); + builder()->set_current_block(first_true_block_); + first_true_block_ = builder()->CreateBasicBlock(env->Copy()); } void HGraphBuilder::IfBuilder::CaptureContinuation( HIfContinuation* continuation) { - ASSERT(!did_else_if_); - ASSERT(!finished_); - ASSERT(!captured_); + DCHECK(!did_else_if_); + DCHECK(!finished_); + DCHECK(!captured_); HBasicBlock* true_block = NULL; HBasicBlock* false_block = NULL; Finish(&true_block, &false_block); - ASSERT(true_block != NULL); - ASSERT(false_block != NULL); + DCHECK(true_block != NULL); + DCHECK(false_block != NULL); continuation->Capture(true_block, false_block); captured_ = true; - builder_->set_current_block(NULL); + builder()->set_current_block(NULL); End(); } void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) { - ASSERT(!did_else_if_); - ASSERT(!finished_); - ASSERT(!captured_); + DCHECK(!did_else_if_); + DCHECK(!finished_); + DCHECK(!captured_); HBasicBlock* true_block = NULL; HBasicBlock* false_block = NULL; Finish(&true_block, &false_block); merge_at_join_blocks_ = NULL; if (true_block != NULL && !true_block->IsFinished()) { - ASSERT(continuation->IsTrueReachable()); - builder_->GotoNoSimulate(true_block, continuation->true_branch()); + DCHECK(continuation->IsTrueReachable()); + builder()->GotoNoSimulate(true_block, continuation->true_branch()); } if (false_block != NULL && !false_block->IsFinished()) { - ASSERT(continuation->IsFalseReachable()); - builder_->GotoNoSimulate(false_block, continuation->false_branch()); + DCHECK(continuation->IsFalseReachable()); + builder()->GotoNoSimulate(false_block, continuation->false_branch()); } captured_ = true; End(); @@ -916,75 +897,74 @@ void HGraphBuilder::IfBuilder::Then() { - ASSERT(!captured_); - ASSERT(!finished_); + DCHECK(!captured_); + DCHECK(!finished_); did_then_ = true; if (needs_compare_) { // Handle if's without any expressions, they jump directly to the "else" // branch. However, we must pretend that the "then" branch is reachable, // so that the graph builder visits it and sees any live range extending // constructs within it. - HConstant* constant_false = builder_->graph()->GetConstantFalse(); + HConstant* constant_false = builder()->graph()->GetConstantFalse(); ToBooleanStub::Types boolean_type = ToBooleanStub::Types(); boolean_type.Add(ToBooleanStub::BOOLEAN); HBranch* branch = builder()->New<HBranch>( constant_false, boolean_type, first_true_block_, first_false_block_); - builder_->FinishCurrentBlock(branch); + builder()->FinishCurrentBlock(branch); } - builder_->set_current_block(first_true_block_); + builder()->set_current_block(first_true_block_); pending_merge_block_ = true; } void HGraphBuilder::IfBuilder::Else() { - ASSERT(did_then_); - ASSERT(!captured_); - ASSERT(!finished_); + DCHECK(did_then_); + DCHECK(!captured_); + DCHECK(!finished_); AddMergeAtJoinBlock(false); - builder_->set_current_block(first_false_block_); + builder()->set_current_block(first_false_block_); pending_merge_block_ = true; did_else_ = true; } void HGraphBuilder::IfBuilder::Deopt(const char* reason) { - ASSERT(did_then_); - builder_->Add<HDeoptimize>(reason, Deoptimizer::EAGER); + DCHECK(did_then_); + builder()->Add<HDeoptimize>(reason, Deoptimizer::EAGER); AddMergeAtJoinBlock(true); } void HGraphBuilder::IfBuilder::Return(HValue* value) { - HValue* parameter_count = builder_->graph()->GetConstantMinus1(); - builder_->FinishExitCurrentBlock( - builder_->New<HReturn>(value, parameter_count)); + HValue* parameter_count = builder()->graph()->GetConstantMinus1(); + builder()->FinishExitCurrentBlock( + builder()->New<HReturn>(value, parameter_count)); AddMergeAtJoinBlock(false); } void HGraphBuilder::IfBuilder::AddMergeAtJoinBlock(bool deopt) { if (!pending_merge_block_) return; - HBasicBlock* block = builder_->current_block(); - ASSERT(block == NULL || !block->IsFinished()); - MergeAtJoinBlock* record = - new(builder_->zone()) MergeAtJoinBlock(block, deopt, - merge_at_join_blocks_); + HBasicBlock* block = builder()->current_block(); + DCHECK(block == NULL || !block->IsFinished()); + MergeAtJoinBlock* record = new (builder()->zone()) + MergeAtJoinBlock(block, deopt, merge_at_join_blocks_); merge_at_join_blocks_ = record; if (block != NULL) { - ASSERT(block->end() == NULL); + DCHECK(block->end() == NULL); if (deopt) { normal_merge_at_join_block_count_++; } else { deopt_merge_at_join_block_count_++; } } - builder_->set_current_block(NULL); + builder()->set_current_block(NULL); pending_merge_block_ = false; } void HGraphBuilder::IfBuilder::Finish() { - ASSERT(!finished_); + DCHECK(!finished_); if (!did_then_) { Then(); } @@ -1009,7 +989,7 @@ if (then_continuation != NULL) { *then_continuation = then_record->block_; } - ASSERT(then_record->next_ == NULL); + DCHECK(then_record->next_ == NULL); } @@ -1019,9 +999,9 @@ int total_merged_blocks = normal_merge_at_join_block_count_ + deopt_merge_at_join_block_count_; - ASSERT(total_merged_blocks >= 1); - HBasicBlock* merge_block = total_merged_blocks == 1 - ? NULL : builder_->graph()->CreateBasicBlock(); + DCHECK(total_merged_blocks >= 1); + HBasicBlock* merge_block = + total_merged_blocks == 1 ? NULL : builder()->graph()->CreateBasicBlock(); // Merge non-deopt blocks first to ensure environment has right size for // padding. @@ -1032,10 +1012,10 @@ // if, then just set it as the current block and continue rather then // creating an unnecessary merge block. if (total_merged_blocks == 1) { - builder_->set_current_block(current->block_); + builder()->set_current_block(current->block_); return; } - builder_->GotoNoSimulate(current->block_, merge_block); + builder()->GotoNoSimulate(current->block_, merge_block); } current = current->next_; } @@ -1044,44 +1024,48 @@ current = merge_at_join_blocks_; while (current != NULL) { if (current->deopt_ && current->block_ != NULL) { - current->block_->FinishExit( - HAbnormalExit::New(builder_->zone(), NULL), - HSourcePosition::Unknown()); + current->block_->FinishExit(HAbnormalExit::New(builder()->zone(), NULL), + HSourcePosition::Unknown()); } current = current->next_; } - builder_->set_current_block(merge_block); + builder()->set_current_block(merge_block); } -HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, - HValue* context, - LoopBuilder::Direction direction) - : builder_(builder), - context_(context), - direction_(direction), - finished_(false) { - header_block_ = builder->CreateLoopHeaderBlock(); - body_block_ = NULL; - exit_block_ = NULL; - exit_trampoline_block_ = NULL; - increment_amount_ = builder_->graph()->GetConstant1(); +HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder) { + Initialize(builder, NULL, kWhileTrue, NULL); +} + + +HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, HValue* context, + LoopBuilder::Direction direction) { + Initialize(builder, context, direction, builder->graph()->GetConstant1()); } -HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, - HValue* context, +HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, HValue* context, LoopBuilder::Direction direction, - HValue* increment_amount) - : builder_(builder), - context_(context), - direction_(direction), - finished_(false) { + HValue* increment_amount) { + Initialize(builder, context, direction, increment_amount); + increment_amount_ = increment_amount; +} + + +void HGraphBuilder::LoopBuilder::Initialize(HGraphBuilder* builder, + HValue* context, + Direction direction, + HValue* increment_amount) { + builder_ = builder; + context_ = context; + direction_ = direction; + increment_amount_ = increment_amount; + + finished_ = false; header_block_ = builder->CreateLoopHeaderBlock(); body_block_ = NULL; exit_block_ = NULL; exit_trampoline_block_ = NULL; - increment_amount_ = increment_amount; } @@ -1089,6 +1073,7 @@ HValue* initial, HValue* terminating, Token::Value token) { + DCHECK(direction_ != kWhileTrue); HEnvironment* env = builder_->environment(); phi_ = header_block_->AddNewPhi(env->values()->length()); phi_->AddInput(initial); @@ -1125,12 +1110,26 @@ } +void HGraphBuilder::LoopBuilder::BeginBody(int drop_count) { + DCHECK(direction_ == kWhileTrue); + HEnvironment* env = builder_->environment(); + builder_->GotoNoSimulate(header_block_); + builder_->set_current_block(header_block_); + env->Drop(drop_count); +} + + void HGraphBuilder::LoopBuilder::Break() { if (exit_trampoline_block_ == NULL) { // Its the first time we saw a break. - HEnvironment* env = exit_block_->last_environment()->Copy(); - exit_trampoline_block_ = builder_->CreateBasicBlock(env); - builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_); + if (direction_ == kWhileTrue) { + HEnvironment* env = builder_->environment()->Copy(); + exit_trampoline_block_ = builder_->CreateBasicBlock(env); + } else { + HEnvironment* env = exit_block_->last_environment()->Copy(); + exit_trampoline_block_ = builder_->CreateBasicBlock(env); + builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_); + } } builder_->GotoNoSimulate(exit_trampoline_block_); @@ -1139,7 +1138,7 @@ void HGraphBuilder::LoopBuilder::EndBody() { - ASSERT(!finished_); + DCHECK(!finished_); if (direction_ == kPostIncrement || direction_ == kPostDecrement) { if (direction_ == kPostIncrement) { @@ -1151,8 +1150,11 @@ builder_->AddInstruction(increment_); } - // Push the new increment value on the expression stack to merge into the phi. - builder_->environment()->Push(increment_); + if (direction_ != kWhileTrue) { + // Push the new increment value on the expression stack to merge into + // the phi. + builder_->environment()->Push(increment_); + } HBasicBlock* last_block = builder_->current_block(); builder_->GotoNoSimulate(last_block, header_block_); header_block_->loop_information()->RegisterBackEdge(last_block); @@ -1178,8 +1180,8 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) { - ASSERT(current_block() != NULL); - ASSERT(!FLAG_hydrogen_track_positions || + DCHECK(current_block() != NULL); + DCHECK(!FLAG_hydrogen_track_positions || !position_.IsUnknown() || !info_->IsOptimizing()); current_block()->AddInstruction(instr, source_position()); @@ -1191,7 +1193,7 @@ void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) { - ASSERT(!FLAG_hydrogen_track_positions || + DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() || !position_.IsUnknown()); current_block()->Finish(last, source_position()); @@ -1202,7 +1204,7 @@ void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) { - ASSERT(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() || + DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() || !position_.IsUnknown()); current_block()->FinishExit(instruction, source_position()); if (instruction->IsReturn() || instruction->IsAbnormalExit()) { @@ -1226,8 +1228,8 @@ void HGraphBuilder::AddSimulate(BailoutId id, RemovableSimulate removable) { - ASSERT(current_block() != NULL); - ASSERT(!graph()->IsInsideNoSideEffectsScope()); + DCHECK(current_block() != NULL); + DCHECK(!graph()->IsInsideNoSideEffectsScope()); current_block()->AddNewSimulate(id, source_position(), removable); } @@ -1248,6 +1250,16 @@ } +HValue* HGraphBuilder::BuildGetElementsKind(HValue* object) { + HValue* map = Add<HLoadNamedField>(object, static_cast<HValue*>(NULL), + HObjectAccess::ForMap()); + + HValue* bit_field2 = Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), + HObjectAccess::ForMapBitField2()); + return BuildDecodeField<Map::ElementsKindBits>(bit_field2); +} + + HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) { if (obj->type().IsHeapObject()) return obj; return Add<HCheckHeapObject>(obj); @@ -1260,14 +1272,9 @@ } -HValue* HGraphBuilder::BuildCheckMap(HValue* obj, Handle<Map> map) { - return Add<HCheckMaps>(obj, map, top_info()); -} - - HValue* HGraphBuilder::BuildCheckString(HValue* string) { if (!string->type().IsString()) { - ASSERT(!string->IsConstant() || + DCHECK(!string->IsConstant() || !HConstant::cast(string)->HasStringValue()); BuildCheckHeapObject(string); return Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING); @@ -1314,11 +1321,8 @@ HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap)); HValue* max_capacity = AddUncasted<HAdd>(current_capacity, max_gap); - IfBuilder key_checker(this); - key_checker.If<HCompareNumericAndBranch>(key, max_capacity, Token::LT); - key_checker.Then(); - key_checker.ElseDeopt("Key out of capacity range"); - key_checker.End(); + + Add<HBoundsCheck>(key, max_capacity); HValue* new_capacity = BuildNewElementsCapacity(key); HValue* new_elements = BuildGrowElementsCapacity(object, elements, @@ -1389,7 +1393,7 @@ ElementsKind from_kind, ElementsKind to_kind, bool is_jsarray) { - ASSERT(!IsFastHoleyElementsKind(from_kind) || + DCHECK(!IsFastHoleyElementsKind(from_kind) || IsFastHoleyElementsKind(to_kind)); if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) { @@ -1425,81 +1429,202 @@ } -HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper( - HValue* elements, - HValue* key, - HValue* hash, - HValue* mask, - int current_probe) { - if (current_probe == kNumberDictionaryProbes) { - return NULL; - } - - int32_t offset = SeededNumberDictionary::GetProbeOffset(current_probe); - HValue* raw_index = (current_probe == 0) - ? hash - : AddUncasted<HAdd>(hash, Add<HConstant>(offset)); - raw_index = AddUncasted<HBitwise>(Token::BIT_AND, raw_index, mask); - int32_t entry_size = SeededNumberDictionary::kEntrySize; - raw_index = AddUncasted<HMul>(raw_index, Add<HConstant>(entry_size)); - raw_index->ClearFlag(HValue::kCanOverflow); +void HGraphBuilder::BuildJSObjectCheck(HValue* receiver, + int bit_field_mask) { + // Check that the object isn't a smi. + Add<HCheckHeapObject>(receiver); + + // Get the map of the receiver. + HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL), + HObjectAccess::ForMap()); + + // Check the instance type and if an access check is needed, this can be + // done with a single load, since both bytes are adjacent in the map. + HObjectAccess access(HObjectAccess::ForMapInstanceTypeAndBitField()); + HValue* instance_type_and_bit_field = + Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), access); + + HValue* mask = Add<HConstant>(0x00FF | (bit_field_mask << 8)); + HValue* and_result = AddUncasted<HBitwise>(Token::BIT_AND, + instance_type_and_bit_field, + mask); + HValue* sub_result = AddUncasted<HSub>(and_result, + Add<HConstant>(JS_OBJECT_TYPE)); + Add<HBoundsCheck>(sub_result, + Add<HConstant>(LAST_JS_OBJECT_TYPE + 1 - JS_OBJECT_TYPE)); +} - int32_t base_offset = SeededNumberDictionary::kElementsStartIndex; - HValue* key_index = AddUncasted<HAdd>(raw_index, Add<HConstant>(base_offset)); - key_index->ClearFlag(HValue::kCanOverflow); - HValue* candidate_key = Add<HLoadKeyed>(elements, key_index, - static_cast<HValue*>(NULL), - FAST_ELEMENTS); - - IfBuilder key_compare(this); - key_compare.IfNot<HCompareObjectEqAndBranch>(key, candidate_key); - key_compare.Then(); +void HGraphBuilder::BuildKeyedIndexCheck(HValue* key, + HIfContinuation* join_continuation) { + // The sometimes unintuitively backward ordering of the ifs below is + // convoluted, but necessary. All of the paths must guarantee that the + // if-true of the continuation returns a smi element index and the if-false of + // the continuation returns either a symbol or a unique string key. All other + // object types cause a deopt to fall back to the runtime. + + IfBuilder key_smi_if(this); + key_smi_if.If<HIsSmiAndBranch>(key); + key_smi_if.Then(); { - // Key at the current probe doesn't match, try at the next probe. - HValue* result = BuildUncheckedDictionaryElementLoadHelper( - elements, key, hash, mask, current_probe + 1); - if (result == NULL) { - key_compare.Deopt("probes exhausted in keyed load dictionary lookup"); - result = graph()->GetConstantUndefined(); - } else { - Push(result); - } + Push(key); // Nothing to do, just continue to true of continuation. } - key_compare.Else(); + key_smi_if.Else(); { - // Key at current probe matches. Details must be zero, otherwise the - // dictionary element requires special handling. - HValue* details_index = AddUncasted<HAdd>( - raw_index, Add<HConstant>(base_offset + 2)); - details_index->ClearFlag(HValue::kCanOverflow); - - HValue* details = Add<HLoadKeyed>(elements, details_index, - static_cast<HValue*>(NULL), - FAST_ELEMENTS); - IfBuilder details_compare(this); - details_compare.If<HCompareNumericAndBranch>(details, - graph()->GetConstant0(), - Token::NE); - details_compare.ThenDeopt("keyed load dictionary element not fast case"); + HValue* map = Add<HLoadNamedField>(key, static_cast<HValue*>(NULL), + HObjectAccess::ForMap()); + HValue* instance_type = + Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), + HObjectAccess::ForMapInstanceType()); + + // Non-unique string, check for a string with a hash code that is actually + // an index. + STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); + IfBuilder not_string_or_name_if(this); + not_string_or_name_if.If<HCompareNumericAndBranch>( + instance_type, + Add<HConstant>(LAST_UNIQUE_NAME_TYPE), + Token::GT); - details_compare.Else(); + not_string_or_name_if.Then(); { - // Key matches and details are zero --> fast case. Load and return the - // value. - HValue* result_index = AddUncasted<HAdd>( - raw_index, Add<HConstant>(base_offset + 1)); - result_index->ClearFlag(HValue::kCanOverflow); - - Push(Add<HLoadKeyed>(elements, result_index, - static_cast<HValue*>(NULL), - FAST_ELEMENTS)); + // Non-smi, non-Name, non-String: Try to convert to smi in case of + // HeapNumber. + // TODO(danno): This could call some variant of ToString + Push(AddUncasted<HForceRepresentation>(key, Representation::Smi())); } - details_compare.End(); + not_string_or_name_if.Else(); + { + // String or Name: check explicitly for Name, they can short-circuit + // directly to unique non-index key path. + IfBuilder not_symbol_if(this); + not_symbol_if.If<HCompareNumericAndBranch>( + instance_type, + Add<HConstant>(SYMBOL_TYPE), + Token::NE); + + not_symbol_if.Then(); + { + // String: check whether the String is a String of an index. If it is, + // extract the index value from the hash. + HValue* hash = + Add<HLoadNamedField>(key, static_cast<HValue*>(NULL), + HObjectAccess::ForNameHashField()); + HValue* not_index_mask = Add<HConstant>(static_cast<int>( + String::kContainsCachedArrayIndexMask)); + + HValue* not_index_test = AddUncasted<HBitwise>( + Token::BIT_AND, hash, not_index_mask); + + IfBuilder string_index_if(this); + string_index_if.If<HCompareNumericAndBranch>(not_index_test, + graph()->GetConstant0(), + Token::EQ); + string_index_if.Then(); + { + // String with index in hash: extract string and merge to index path. + Push(BuildDecodeField<String::ArrayIndexValueBits>(hash)); + } + string_index_if.Else(); + { + // Key is a non-index String, check for uniqueness/internalization. + // If it's not internalized yet, internalize it now. + HValue* not_internalized_bit = AddUncasted<HBitwise>( + Token::BIT_AND, + instance_type, + Add<HConstant>(static_cast<int>(kIsNotInternalizedMask))); + + IfBuilder internalized(this); + internalized.If<HCompareNumericAndBranch>(not_internalized_bit, + graph()->GetConstant0(), + Token::EQ); + internalized.Then(); + Push(key); + + internalized.Else(); + Add<HPushArguments>(key); + HValue* intern_key = Add<HCallRuntime>( + isolate()->factory()->empty_string(), + Runtime::FunctionForId(Runtime::kInternalizeString), 1); + Push(intern_key); + + internalized.End(); + // Key guaranteed to be a unique string + } + string_index_if.JoinContinuation(join_continuation); + } + not_symbol_if.Else(); + { + Push(key); // Key is symbol + } + not_symbol_if.JoinContinuation(join_continuation); + } + not_string_or_name_if.JoinContinuation(join_continuation); } - key_compare.End(); + key_smi_if.JoinContinuation(join_continuation); +} - return Pop(); + +void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) { + // Get the the instance type of the receiver, and make sure that it is + // not one of the global object types. + HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL), + HObjectAccess::ForMap()); + HValue* instance_type = + Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), + HObjectAccess::ForMapInstanceType()); + STATIC_ASSERT(JS_BUILTINS_OBJECT_TYPE == JS_GLOBAL_OBJECT_TYPE + 1); + HValue* min_global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE); + HValue* max_global_type = Add<HConstant>(JS_BUILTINS_OBJECT_TYPE); + + IfBuilder if_global_object(this); + if_global_object.If<HCompareNumericAndBranch>(instance_type, + max_global_type, + Token::LTE); + if_global_object.And(); + if_global_object.If<HCompareNumericAndBranch>(instance_type, + min_global_type, + Token::GTE); + if_global_object.ThenDeopt("receiver was a global object"); + if_global_object.End(); +} + + +void HGraphBuilder::BuildTestForDictionaryProperties( + HValue* object, + HIfContinuation* continuation) { + HValue* properties = Add<HLoadNamedField>( + object, static_cast<HValue*>(NULL), + HObjectAccess::ForPropertiesPointer()); + HValue* properties_map = + Add<HLoadNamedField>(properties, static_cast<HValue*>(NULL), + HObjectAccess::ForMap()); + HValue* hash_map = Add<HLoadRoot>(Heap::kHashTableMapRootIndex); + IfBuilder builder(this); + builder.If<HCompareObjectEqAndBranch>(properties_map, hash_map); + builder.CaptureContinuation(continuation); +} + + +HValue* HGraphBuilder::BuildKeyedLookupCacheHash(HValue* object, + HValue* key) { + // Load the map of the receiver, compute the keyed lookup cache hash + // based on 32 bits of the map pointer and the string hash. + HValue* object_map = + Add<HLoadNamedField>(object, static_cast<HValue*>(NULL), + HObjectAccess::ForMapAsInteger32()); + HValue* shifted_map = AddUncasted<HShr>( + object_map, Add<HConstant>(KeyedLookupCache::kMapHashShift)); + HValue* string_hash = + Add<HLoadNamedField>(key, static_cast<HValue*>(NULL), + HObjectAccess::ForStringHashField()); + HValue* shifted_hash = AddUncasted<HShr>( + string_hash, Add<HConstant>(String::kHashShift)); + HValue* xor_result = AddUncasted<HBitwise>(Token::BIT_XOR, shifted_map, + shifted_hash); + int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask); + return AddUncasted<HBitwise>(Token::BIT_AND, xor_result, + Add<HConstant>(mask)); } @@ -1537,11 +1662,9 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver, - HValue* key) { - HValue* elements = AddLoadElements(receiver); - - HValue* hash = BuildElementIndexHash(key); - + HValue* elements, + HValue* key, + HValue* hash) { HValue* capacity = Add<HLoadKeyed>( elements, Add<HConstant>(NameDictionary::kCapacityIndex), @@ -1552,8 +1675,129 @@ mask->ChangeRepresentation(Representation::Integer32()); mask->ClearFlag(HValue::kCanOverflow); - return BuildUncheckedDictionaryElementLoadHelper(elements, key, - hash, mask, 0); + HValue* entry = hash; + HValue* count = graph()->GetConstant1(); + Push(entry); + Push(count); + + HIfContinuation return_or_loop_continuation(graph()->CreateBasicBlock(), + graph()->CreateBasicBlock()); + HIfContinuation found_key_match_continuation(graph()->CreateBasicBlock(), + graph()->CreateBasicBlock()); + LoopBuilder probe_loop(this); + probe_loop.BeginBody(2); // Drop entry, count from last environment to + // appease live range building without simulates. + + count = Pop(); + entry = Pop(); + entry = AddUncasted<HBitwise>(Token::BIT_AND, entry, mask); + int entry_size = SeededNumberDictionary::kEntrySize; + HValue* base_index = AddUncasted<HMul>(entry, Add<HConstant>(entry_size)); + base_index->ClearFlag(HValue::kCanOverflow); + int start_offset = SeededNumberDictionary::kElementsStartIndex; + HValue* key_index = + AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset)); + key_index->ClearFlag(HValue::kCanOverflow); + + HValue* candidate_key = Add<HLoadKeyed>( + elements, key_index, static_cast<HValue*>(NULL), FAST_ELEMENTS); + IfBuilder if_undefined(this); + if_undefined.If<HCompareObjectEqAndBranch>(candidate_key, + graph()->GetConstantUndefined()); + if_undefined.Then(); + { + // element == undefined means "not found". Call the runtime. + // TODO(jkummerow): walk the prototype chain instead. + Add<HPushArguments>(receiver, key); + Push(Add<HCallRuntime>(isolate()->factory()->empty_string(), + Runtime::FunctionForId(Runtime::kKeyedGetProperty), + 2)); + } + if_undefined.Else(); + { + IfBuilder if_match(this); + if_match.If<HCompareObjectEqAndBranch>(candidate_key, key); + if_match.Then(); + if_match.Else(); + + // Update non-internalized string in the dictionary with internalized key? + IfBuilder if_update_with_internalized(this); + HValue* smi_check = + if_update_with_internalized.IfNot<HIsSmiAndBranch>(candidate_key); + if_update_with_internalized.And(); + HValue* map = AddLoadMap(candidate_key, smi_check); + HValue* instance_type = Add<HLoadNamedField>( + map, static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType()); + HValue* not_internalized_bit = AddUncasted<HBitwise>( + Token::BIT_AND, instance_type, + Add<HConstant>(static_cast<int>(kIsNotInternalizedMask))); + if_update_with_internalized.If<HCompareNumericAndBranch>( + not_internalized_bit, graph()->GetConstant0(), Token::NE); + if_update_with_internalized.And(); + if_update_with_internalized.IfNot<HCompareObjectEqAndBranch>( + candidate_key, graph()->GetConstantHole()); + if_update_with_internalized.AndIf<HStringCompareAndBranch>(candidate_key, + key, Token::EQ); + if_update_with_internalized.Then(); + // Replace a key that is a non-internalized string by the equivalent + // internalized string for faster further lookups. + Add<HStoreKeyed>(elements, key_index, key, FAST_ELEMENTS); + if_update_with_internalized.Else(); + + if_update_with_internalized.JoinContinuation(&found_key_match_continuation); + if_match.JoinContinuation(&found_key_match_continuation); + + IfBuilder found_key_match(this, &found_key_match_continuation); + found_key_match.Then(); + // Key at current probe matches. Relevant bits in the |details| field must + // be zero, otherwise the dictionary element requires special handling. + HValue* details_index = + AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 2)); + details_index->ClearFlag(HValue::kCanOverflow); + HValue* details = Add<HLoadKeyed>( + elements, details_index, static_cast<HValue*>(NULL), FAST_ELEMENTS); + int details_mask = PropertyDetails::TypeField::kMask | + PropertyDetails::DeletedField::kMask; + details = AddUncasted<HBitwise>(Token::BIT_AND, details, + Add<HConstant>(details_mask)); + IfBuilder details_compare(this); + details_compare.If<HCompareNumericAndBranch>( + details, graph()->GetConstant0(), Token::EQ); + details_compare.Then(); + HValue* result_index = + AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 1)); + result_index->ClearFlag(HValue::kCanOverflow); + Push(Add<HLoadKeyed>(elements, result_index, static_cast<HValue*>(NULL), + FAST_ELEMENTS)); + details_compare.Else(); + Add<HPushArguments>(receiver, key); + Push(Add<HCallRuntime>(isolate()->factory()->empty_string(), + Runtime::FunctionForId(Runtime::kKeyedGetProperty), + 2)); + details_compare.End(); + + found_key_match.Else(); + found_key_match.JoinContinuation(&return_or_loop_continuation); + } + if_undefined.JoinContinuation(&return_or_loop_continuation); + + IfBuilder return_or_loop(this, &return_or_loop_continuation); + return_or_loop.Then(); + probe_loop.Break(); + + return_or_loop.Else(); + entry = AddUncasted<HAdd>(entry, count); + entry->ClearFlag(HValue::kCanOverflow); + count = AddUncasted<HAdd>(count, graph()->GetConstant1()); + count->ClearFlag(HValue::kCanOverflow); + Push(entry); + Push(count); + + probe_loop.EndBody(); + + return_or_loop.End(); + + return Pop(); } @@ -1561,23 +1805,18 @@ HValue* index, HValue* input) { NoObservableSideEffectsScope scope(this); + HConstant* max_length = Add<HConstant>(JSObject::kInitialMaxFastElementArray); + Add<HBoundsCheck>(length, max_length); - // Compute the size of the RegExpResult followed by FixedArray with length. - HValue* size = length; - size = AddUncasted<HShl>(size, Add<HConstant>(kPointerSizeLog2)); - size = AddUncasted<HAdd>(size, Add<HConstant>(static_cast<int32_t>( - JSRegExpResult::kSize + FixedArray::kHeaderSize))); - - // Make sure size does not exceeds max regular heap object size. - Add<HBoundsCheck>(size, Add<HConstant>(Page::kMaxRegularHeapObjectSize)); + // Generate size calculation code here in order to make it dominate + // the JSRegExpResult allocation. + ElementsKind elements_kind = FAST_ELEMENTS; + HValue* size = BuildCalculateElementsSize(elements_kind, length); // Allocate the JSRegExpResult and the FixedArray in one step. HValue* result = Add<HAllocate>( - size, HType::JSArray(), NOT_TENURED, JS_ARRAY_TYPE); - - // Determine the elements FixedArray. - HValue* elements = Add<HInnerAllocatedObject>( - result, Add<HConstant>(JSRegExpResult::kSize)); + Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(), + NOT_TENURED, JS_ARRAY_TYPE); // Initialize the JSRegExpResult header. HValue* global_object = Add<HLoadNamedField>( @@ -1586,15 +1825,19 @@ HValue* native_context = Add<HLoadNamedField>( global_object, static_cast<HValue*>(NULL), HObjectAccess::ForGlobalObjectNativeContext()); - AddStoreMapNoWriteBarrier(result, Add<HLoadNamedField>( + Add<HStoreNamedField>( + result, HObjectAccess::ForMap(), + Add<HLoadNamedField>( native_context, static_cast<HValue*>(NULL), HObjectAccess::ForContextSlot(Context::REGEXP_RESULT_MAP_INDEX))); + HConstant* empty_fixed_array = + Add<HConstant>(isolate()->factory()->empty_fixed_array()); Add<HStoreNamedField>( result, HObjectAccess::ForJSArrayOffset(JSArray::kPropertiesOffset), - Add<HConstant>(isolate()->factory()->empty_fixed_array())); + empty_fixed_array); Add<HStoreNamedField>( result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset), - elements); + empty_fixed_array); Add<HStoreNamedField>( result, HObjectAccess::ForJSArrayOffset(JSArray::kLengthOffset), length); @@ -1606,19 +1849,22 @@ result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kInputOffset), input); - // Initialize the elements header. - AddStoreMapConstantNoWriteBarrier(elements, - isolate()->factory()->fixed_array_map()); - Add<HStoreNamedField>(elements, HObjectAccess::ForFixedArrayLength(), length); + // Allocate and initialize the elements header. + HAllocate* elements = BuildAllocateElements(elements_kind, size); + BuildInitializeElementsHeader(elements, elements_kind, length); + + HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize( + elements_kind, max_length->Integer32Value()); + elements->set_size_upper_bound(size_in_bytes_upper_bound); + + Add<HStoreNamedField>( + result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset), + elements); // Initialize the elements contents with undefined. - LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement); - index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT); - { - Add<HStoreKeyed>(elements, index, graph()->GetConstantUndefined(), - FAST_ELEMENTS); - } - loop.EndBody(); + BuildFillElementsWithValue( + elements, elements_kind, graph()->GetConstant0(), length, + graph()->GetConstantUndefined()); return result; } @@ -1700,26 +1946,32 @@ static_cast<HValue*>(NULL), FAST_ELEMENTS, ALLOW_RETURN_HOLE); - // Check if key is a heap number (the number string cache contains only - // SMIs and heap number, so it is sufficient to do a SMI check here). + // Check if the key is a heap number and compare it with the object. IfBuilder if_keyisnotsmi(this); HValue* keyisnotsmi = if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key); if_keyisnotsmi.Then(); { - // Check if values of key and object match. - IfBuilder if_keyeqobject(this); - if_keyeqobject.If<HCompareNumericAndBranch>( - Add<HLoadNamedField>(key, keyisnotsmi, - HObjectAccess::ForHeapNumberValue()), - Add<HLoadNamedField>(object, objectisnumber, - HObjectAccess::ForHeapNumberValue()), - Token::EQ); - if_keyeqobject.Then(); + IfBuilder if_keyisheapnumber(this); + if_keyisheapnumber.If<HCompareMap>( + key, isolate()->factory()->heap_number_map()); + if_keyisheapnumber.Then(); { - // Make the key_index available. - Push(key_index); + // Check if values of key and object match. + IfBuilder if_keyeqobject(this); + if_keyeqobject.If<HCompareNumericAndBranch>( + Add<HLoadNamedField>(key, keyisnotsmi, + HObjectAccess::ForHeapNumberValue()), + Add<HLoadNamedField>(object, objectisnumber, + HObjectAccess::ForHeapNumberValue()), + Token::EQ); + if_keyeqobject.Then(); + { + // Make the key_index available. + Push(key_index); + } + if_keyeqobject.JoinContinuation(&found); } - if_keyeqobject.JoinContinuation(&found); + if_keyisheapnumber.JoinContinuation(&found); } if_keyisnotsmi.JoinContinuation(&found); } @@ -1751,10 +2003,10 @@ if_found.Else(); { // Cache miss, fallback to runtime. - Add<HPushArgument>(object); + Add<HPushArguments>(object); Push(Add<HCallRuntime>( isolate()->factory()->empty_string(), - Runtime::FunctionForId(Runtime::kHiddenNumberToStringSkipCache), + Runtime::FunctionForId(Runtime::kNumberToStringSkipCache), 1)); } if_found.End(); @@ -1794,7 +2046,8 @@ HValue* right_length) { // Compute the combined string length and check against max string length. HValue* length = AddUncasted<HAdd>(left_length, right_length); - HValue* max_length = Add<HConstant>(String::kMaxLength); + // Check that length <= kMaxLength <=> length < MaxLength + 1. + HValue* max_length = Add<HConstant>(String::kMaxLength + 1); Add<HBoundsCheck>(length, max_length); return length; } @@ -1813,7 +2066,7 @@ // pass CONS_STRING_TYPE or CONS_ASCII_STRING_TYPE here, so we just use // CONS_STRING_TYPE here. Below we decide whether the cons string is // one-byte or two-byte and set the appropriate map. - ASSERT(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE, + DCHECK(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE, CONS_ASCII_STRING_TYPE)); HAllocate* result = BuildAllocate(Add<HConstant>(ConsString::kSize), HType::String(), CONS_STRING_TYPE, @@ -1857,14 +2110,16 @@ if_onebyte.Then(); { // We can safely skip the write barrier for storing the map here. - Handle<Map> map = isolate()->factory()->cons_ascii_string_map(); - AddStoreMapConstantNoWriteBarrier(result, map); + Add<HStoreNamedField>( + result, HObjectAccess::ForMap(), + Add<HConstant>(isolate()->factory()->cons_ascii_string_map())); } if_onebyte.Else(); { // We can safely skip the write barrier for storing the map here. - Handle<Map> map = isolate()->factory()->cons_string_map(); - AddStoreMapConstantNoWriteBarrier(result, map); + Add<HStoreNamedField>( + result, HObjectAccess::ForMap(), + Add<HConstant>(isolate()->factory()->cons_string_map())); } if_onebyte.End(); @@ -1889,7 +2144,7 @@ HValue* dst_offset, String::Encoding dst_encoding, HValue* length) { - ASSERT(dst_encoding != String::ONE_BYTE_ENCODING || + DCHECK(dst_encoding != String::ONE_BYTE_ENCODING || src_encoding == String::ONE_BYTE_ENCODING); LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement); HValue* index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT); @@ -1906,7 +2161,7 @@ HValue* HGraphBuilder::BuildObjectSizeAlignment( HValue* unaligned_size, int header_size) { - ASSERT((header_size & kObjectAlignmentMask) == 0); + DCHECK((header_size & kObjectAlignmentMask) == 0); HValue* size = AddUncasted<HAdd>( unaligned_size, Add<HConstant>(static_cast<int32_t>( header_size + kObjectAlignmentMask))); @@ -1931,14 +2186,14 @@ // Do some manual constant folding here. if (left_length->IsConstant()) { HConstant* c_left_length = HConstant::cast(left_length); - ASSERT_NE(0, c_left_length->Integer32Value()); + DCHECK_NE(0, c_left_length->Integer32Value()); if (c_left_length->Integer32Value() + 1 >= ConsString::kMinLength) { // The right string contains at least one character. return BuildCreateConsString(length, left, right, allocation_mode); } } else if (right_length->IsConstant()) { HConstant* c_right_length = HConstant::cast(right_length); - ASSERT_NE(0, c_right_length->Integer32Value()); + DCHECK_NE(0, c_right_length->Integer32Value()); if (c_right_length->Integer32Value() + 1 >= ConsString::kMinLength) { // The left string contains at least one character. return BuildCreateConsString(length, left, right, allocation_mode); @@ -2023,9 +2278,7 @@ // STRING_TYPE or ASCII_STRING_TYPE here, so we just use STRING_TYPE here. HAllocate* result = BuildAllocate( size, HType::String(), STRING_TYPE, allocation_mode); - - // We can safely skip the write barrier for storing map here. - AddStoreMapNoWriteBarrier(result, map); + Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map); // Initialize the string fields. Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(), @@ -2074,11 +2327,10 @@ if_sameencodingandsequential.Else(); { // Fallback to the runtime to add the two strings. - Add<HPushArgument>(left); - Add<HPushArgument>(right); + Add<HPushArguments>(left, right); Push(Add<HCallRuntime>( isolate()->factory()->empty_string(), - Runtime::FunctionForId(Runtime::kHiddenStringAdd), + Runtime::FunctionForId(Runtime::kStringAdd), 2)); } if_sameencodingandsequential.End(); @@ -2147,7 +2399,7 @@ PropertyAccessType access_type, LoadKeyedHoleMode load_mode, KeyedAccessStoreMode store_mode) { - ASSERT((!IsExternalArrayElementsKind(elements_kind) && + DCHECK((!IsExternalArrayElementsKind(elements_kind) && !IsFixedTypedArrayElementsKind(elements_kind)) || !is_js_array); // No GVNFlag is necessary for ElementsKind if there is an explicit dependency @@ -2167,13 +2419,13 @@ if (access_type == STORE && (fast_elements || fast_smi_only_elements) && store_mode != STORE_NO_TRANSITION_HANDLE_COW) { HCheckMaps* check_cow_map = Add<HCheckMaps>( - elements, isolate()->factory()->fixed_array_map(), top_info()); + elements, isolate()->factory()->fixed_array_map()); check_cow_map->ClearDependsOnFlag(kElementsKind); } HInstruction* length = NULL; if (is_js_array) { length = Add<HLoadNamedField>( - checked_object, static_cast<HValue*>(NULL), + checked_object->ActualValue(), checked_object, HObjectAccess::ForArrayLength(elements_kind)); } else { length = AddLoadFixedArrayLength(elements); @@ -2206,14 +2458,14 @@ length_checker.End(); return result; } else { - ASSERT(store_mode == STANDARD_STORE); + DCHECK(store_mode == STANDARD_STORE); checked_key = Add<HBoundsCheck>(key, length); return AddElementAccess( backing_store, checked_key, val, checked_object, elements_kind, access_type); } } - ASSERT(fast_smi_only_elements || + DCHECK(fast_smi_only_elements || fast_elements || IsFastDoubleElementsKind(elements_kind)); @@ -2227,6 +2479,9 @@ if (IsGrowStoreMode(store_mode)) { NoObservableSideEffectsScope no_effects(this); + Representation representation = HStoreKeyed::RequiredValueRepresentation( + elements_kind, STORE_TO_INITIALIZED_ENTRY); + val = AddUncasted<HForceRepresentation>(val, representation); elements = BuildCheckForCapacityGrow(checked_object, elements, elements_kind, length, key, is_js_array, access_type); @@ -2241,7 +2496,7 @@ elements_kind, length); } else { HCheckMaps* check_cow_map = Add<HCheckMaps>( - elements, isolate()->factory()->fixed_array_map(), top_info()); + elements, isolate()->factory()->fixed_array_map()); check_cow_map->ClearDependsOnFlag(kElementsKind); } } @@ -2251,17 +2506,19 @@ } - HValue* HGraphBuilder::BuildAllocateArrayFromLength( JSArrayBuilder* array_builder, HValue* length_argument) { if (length_argument->IsConstant() && HConstant::cast(length_argument)->HasSmiValue()) { int array_length = HConstant::cast(length_argument)->Integer32Value(); - HValue* new_object = array_length == 0 - ? array_builder->AllocateEmptyArray() - : array_builder->AllocateArray(length_argument, length_argument); - return new_object; + if (array_length == 0) { + return array_builder->AllocateEmptyArray(); + } else { + return array_builder->AllocateArray(length_argument, + array_length, + length_argument); + } } HValue* constant_zero = graph()->GetConstant0(); @@ -2291,35 +2548,62 @@ // Figure out total size HValue* length = Pop(); HValue* capacity = Pop(); - return array_builder->AllocateArray(capacity, length); + return array_builder->AllocateArray(capacity, max_alloc_length, length); } -HValue* HGraphBuilder::BuildAllocateElements(ElementsKind kind, - HValue* capacity) { - int elements_size; - InstanceType instance_type; - if (IsFastDoubleElementsKind(kind)) { - elements_size = kDoubleSize; - instance_type = FIXED_DOUBLE_ARRAY_TYPE; - } else { - elements_size = kPointerSize; - instance_type = FIXED_ARRAY_TYPE; - } +HValue* HGraphBuilder::BuildCalculateElementsSize(ElementsKind kind, + HValue* capacity) { + int elements_size = IsFastDoubleElementsKind(kind) + ? kDoubleSize + : kPointerSize; HConstant* elements_size_value = Add<HConstant>(elements_size); - HValue* mul = AddUncasted<HMul>(capacity, elements_size_value); + HInstruction* mul = HMul::NewImul(zone(), context(), + capacity->ActualValue(), + elements_size_value); + AddInstruction(mul); mul->ClearFlag(HValue::kCanOverflow); + STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize); + HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize); HValue* total_size = AddUncasted<HAdd>(mul, header_size); total_size->ClearFlag(HValue::kCanOverflow); + return total_size; +} + + +HAllocate* HGraphBuilder::AllocateJSArrayObject(AllocationSiteMode mode) { + int base_size = JSArray::kSize; + if (mode == TRACK_ALLOCATION_SITE) { + base_size += AllocationMemento::kSize; + } + HConstant* size_in_bytes = Add<HConstant>(base_size); + return Add<HAllocate>( + size_in_bytes, HType::JSArray(), NOT_TENURED, JS_OBJECT_TYPE); +} + - PretenureFlag pretenure_flag = !FLAG_allocation_site_pretenuring ? - isolate()->heap()->GetPretenureMode() : NOT_TENURED; +HConstant* HGraphBuilder::EstablishElementsAllocationSize( + ElementsKind kind, + int capacity) { + int base_size = IsFastDoubleElementsKind(kind) + ? FixedDoubleArray::SizeFor(capacity) + : FixedArray::SizeFor(capacity); + + return Add<HConstant>(base_size); +} - return Add<HAllocate>(total_size, HType::Tagged(), pretenure_flag, - instance_type); + +HAllocate* HGraphBuilder::BuildAllocateElements(ElementsKind kind, + HValue* size_in_bytes) { + InstanceType instance_type = IsFastDoubleElementsKind(kind) + ? FIXED_DOUBLE_ARRAY_TYPE + : FIXED_ARRAY_TYPE; + + return Add<HAllocate>(size_in_bytes, HType::HeapObject(), NOT_TENURED, + instance_type); } @@ -2331,7 +2615,7 @@ ? factory->fixed_double_array_map() : factory->fixed_array_map(); - AddStoreMapConstant(elements, map); + Add<HStoreNamedField>(elements, HObjectAccess::ForMap(), Add<HConstant>(map)); Add<HStoreNamedField>(elements, HObjectAccess::ForFixedArrayLength(), capacity); } @@ -2343,43 +2627,39 @@ // The HForceRepresentation is to prevent possible deopt on int-smi // conversion after allocation but before the new object fields are set. capacity = AddUncasted<HForceRepresentation>(capacity, Representation::Smi()); - HValue* new_elements = BuildAllocateElements(kind, capacity); + HValue* size_in_bytes = BuildCalculateElementsSize(kind, capacity); + HValue* new_elements = BuildAllocateElements(kind, size_in_bytes); BuildInitializeElementsHeader(new_elements, kind, capacity); return new_elements; } -HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array, - HValue* array_map, - AllocationSiteMode mode, - ElementsKind elements_kind, - HValue* allocation_site_payload, - HValue* length_field) { - +void HGraphBuilder::BuildJSArrayHeader(HValue* array, + HValue* array_map, + HValue* elements, + AllocationSiteMode mode, + ElementsKind elements_kind, + HValue* allocation_site_payload, + HValue* length_field) { Add<HStoreNamedField>(array, HObjectAccess::ForMap(), array_map); HConstant* empty_fixed_array = Add<HConstant>(isolate()->factory()->empty_fixed_array()); - HObjectAccess access = HObjectAccess::ForPropertiesPointer(); - Add<HStoreNamedField>(array, access, empty_fixed_array); - Add<HStoreNamedField>(array, HObjectAccess::ForArrayLength(elements_kind), - length_field); + Add<HStoreNamedField>( + array, HObjectAccess::ForPropertiesPointer(), empty_fixed_array); + + Add<HStoreNamedField>( + array, HObjectAccess::ForElementsPointer(), + elements != NULL ? elements : empty_fixed_array); + + Add<HStoreNamedField>( + array, HObjectAccess::ForArrayLength(elements_kind), length_field); if (mode == TRACK_ALLOCATION_SITE) { BuildCreateAllocationMemento( array, Add<HConstant>(JSArray::kSize), allocation_site_payload); } - - int elements_location = JSArray::kSize; - if (mode == TRACK_ALLOCATION_SITE) { - elements_location += AllocationMemento::kSize; - } - - HInnerAllocatedObject* elements = Add<HInnerAllocatedObject>( - array, Add<HConstant>(elements_location)); - Add<HStoreNamedField>(array, HObjectAccess::ForElementsPointer(), elements); - return elements; } @@ -2392,19 +2672,17 @@ PropertyAccessType access_type, LoadKeyedHoleMode load_mode) { if (access_type == STORE) { - ASSERT(val != NULL); + DCHECK(val != NULL); if (elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS || elements_kind == UINT8_CLAMPED_ELEMENTS) { val = Add<HClampToUint8>(val); } return Add<HStoreKeyed>(elements, checked_key, val, elements_kind, - elements_kind == FAST_SMI_ELEMENTS - ? STORE_TO_INITIALIZED_ENTRY - : INITIALIZING_STORE); + STORE_TO_INITIALIZED_ENTRY); } - ASSERT(access_type == LOAD); - ASSERT(val == NULL); + DCHECK(access_type == LOAD); + DCHECK(val == NULL); HLoadKeyed* load = Add<HLoadKeyed>( elements, checked_key, dependency, elements_kind, load_mode); if (FLAG_opt_safe_uint32_operations && @@ -2416,15 +2694,32 @@ } -HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object) { +HLoadNamedField* HGraphBuilder::AddLoadMap(HValue* object, + HValue* dependency) { + return Add<HLoadNamedField>(object, dependency, HObjectAccess::ForMap()); +} + + +HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object, + HValue* dependency) { + return Add<HLoadNamedField>( + object, dependency, HObjectAccess::ForElementsPointer()); +} + + +HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength( + HValue* array, + HValue* dependency) { return Add<HLoadNamedField>( - object, static_cast<HValue*>(NULL), HObjectAccess::ForElementsPointer()); + array, dependency, HObjectAccess::ForFixedArrayLength()); } -HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(HValue* object) { +HLoadNamedField* HGraphBuilder::AddLoadArrayLength(HValue* array, + ElementsKind kind, + HValue* dependency) { return Add<HLoadNamedField>( - object, static_cast<HValue*>(NULL), HObjectAccess::ForFixedArrayLength()); + array, dependency, HObjectAccess::ForArrayLength(kind)); } @@ -2444,30 +2739,21 @@ } -void HGraphBuilder::BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind) { - int element_size = IsFastDoubleElementsKind(kind) ? kDoubleSize - : kPointerSize; - int max_size = Page::kMaxRegularHeapObjectSize / element_size; - max_size -= JSArray::kSize / element_size; - HConstant* max_size_constant = Add<HConstant>(max_size); - Add<HBoundsCheck>(length, max_size_constant); -} - - HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object, HValue* elements, ElementsKind kind, ElementsKind new_kind, HValue* length, HValue* new_capacity) { - BuildNewSpaceArrayCheck(new_capacity, new_kind); + Add<HBoundsCheck>(new_capacity, Add<HConstant>( + (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >> + ElementsKindToShiftSize(new_kind))); HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader( new_kind, new_capacity); - BuildCopyElements(elements, kind, - new_elements, new_kind, - length, new_capacity); + BuildCopyElements(elements, kind, new_elements, + new_kind, length, new_capacity); Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(), new_elements); @@ -2476,28 +2762,24 @@ } -void HGraphBuilder::BuildFillElementsWithHole(HValue* elements, - ElementsKind elements_kind, - HValue* from, - HValue* to) { - // Fast elements kinds need to be initialized in case statements below cause - // a garbage collection. - Factory* factory = isolate()->factory(); - - double nan_double = FixedDoubleArray::hole_nan_as_double(); - HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind) - ? Add<HConstant>(factory->the_hole_value()) - : Add<HConstant>(nan_double); +void HGraphBuilder::BuildFillElementsWithValue(HValue* elements, + ElementsKind elements_kind, + HValue* from, + HValue* to, + HValue* value) { + if (to == NULL) { + to = AddLoadFixedArrayLength(elements); + } // Special loop unfolding case - static const int kLoopUnfoldLimit = 8; - STATIC_ASSERT(JSArray::kPreallocatedArrayElements <= kLoopUnfoldLimit); + STATIC_ASSERT(JSArray::kPreallocatedArrayElements <= + kElementLoopUnrollThreshold); int initial_capacity = -1; if (from->IsInteger32Constant() && to->IsInteger32Constant()) { int constant_from = from->GetInteger32Constant(); int constant_to = to->GetInteger32Constant(); - if (constant_from == 0 && constant_to <= kLoopUnfoldLimit) { + if (constant_from == 0 && constant_to <= kElementLoopUnrollThreshold) { initial_capacity = constant_to; } } @@ -2511,150 +2793,225 @@ if (initial_capacity >= 0) { for (int i = 0; i < initial_capacity; i++) { HInstruction* key = Add<HConstant>(i); - Add<HStoreKeyed>(elements, key, hole, elements_kind); + Add<HStoreKeyed>(elements, key, value, elements_kind); } } else { - LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement); + // Carefully loop backwards so that the "from" remains live through the loop + // rather than the to. This often corresponds to keeping length live rather + // then capacity, which helps register allocation, since length is used more + // other than capacity after filling with holes. + LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement); - HValue* key = builder.BeginBody(from, to, Token::LT); + HValue* key = builder.BeginBody(to, from, Token::GT); - Add<HStoreKeyed>(elements, key, hole, elements_kind); + HValue* adjusted_key = AddUncasted<HSub>(key, graph()->GetConstant1()); + adjusted_key->ClearFlag(HValue::kCanOverflow); + + Add<HStoreKeyed>(elements, adjusted_key, value, elements_kind); builder.EndBody(); } } +void HGraphBuilder::BuildFillElementsWithHole(HValue* elements, + ElementsKind elements_kind, + HValue* from, + HValue* to) { + // Fast elements kinds need to be initialized in case statements below cause a + // garbage collection. + Factory* factory = isolate()->factory(); + + double nan_double = FixedDoubleArray::hole_nan_as_double(); + HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind) + ? Add<HConstant>(factory->the_hole_value()) + : Add<HConstant>(nan_double); + + BuildFillElementsWithValue(elements, elements_kind, from, to, hole); +} + + void HGraphBuilder::BuildCopyElements(HValue* from_elements, ElementsKind from_elements_kind, HValue* to_elements, ElementsKind to_elements_kind, HValue* length, HValue* capacity) { - bool pre_fill_with_holes = - IsFastDoubleElementsKind(from_elements_kind) && - IsFastObjectElementsKind(to_elements_kind); + int constant_capacity = -1; + if (capacity != NULL && + capacity->IsConstant() && + HConstant::cast(capacity)->HasInteger32Value()) { + int constant_candidate = HConstant::cast(capacity)->Integer32Value(); + if (constant_candidate <= kElementLoopUnrollThreshold) { + constant_capacity = constant_candidate; + } + } + bool pre_fill_with_holes = + IsFastDoubleElementsKind(from_elements_kind) && + IsFastObjectElementsKind(to_elements_kind); if (pre_fill_with_holes) { // If the copy might trigger a GC, make sure that the FixedArray is - // pre-initialized with holes to make sure that it's always in a consistent - // state. + // pre-initialized with holes to make sure that it's always in a + // consistent state. BuildFillElementsWithHole(to_elements, to_elements_kind, - graph()->GetConstant0(), capacity); + graph()->GetConstant0(), NULL); } - LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement); - - HValue* key = builder.BeginBody(graph()->GetConstant0(), length, Token::LT); - - HValue* element = Add<HLoadKeyed>(from_elements, key, - static_cast<HValue*>(NULL), - from_elements_kind, - ALLOW_RETURN_HOLE); - - ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) && - IsFastSmiElementsKind(to_elements_kind)) - ? FAST_HOLEY_ELEMENTS : to_elements_kind; - - if (IsHoleyElementsKind(from_elements_kind) && - from_elements_kind != to_elements_kind) { - IfBuilder if_hole(this); - if_hole.If<HCompareHoleAndBranch>(element); - if_hole.Then(); - HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind) - ? Add<HConstant>(FixedDoubleArray::hole_nan_as_double()) - : graph()->GetConstantHole(); - Add<HStoreKeyed>(to_elements, key, hole_constant, kind); - if_hole.Else(); - HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind); - store->SetFlag(HValue::kAllowUndefinedAsNaN); - if_hole.End(); + if (constant_capacity != -1) { + // Unroll the loop for small elements kinds. + for (int i = 0; i < constant_capacity; i++) { + HValue* key_constant = Add<HConstant>(i); + HInstruction* value = Add<HLoadKeyed>(from_elements, key_constant, + static_cast<HValue*>(NULL), + from_elements_kind); + Add<HStoreKeyed>(to_elements, key_constant, value, to_elements_kind); + } } else { - HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind); - store->SetFlag(HValue::kAllowUndefinedAsNaN); - } + if (!pre_fill_with_holes && + (capacity == NULL || !length->Equals(capacity))) { + BuildFillElementsWithHole(to_elements, to_elements_kind, + length, NULL); + } - builder.EndBody(); + if (capacity == NULL) { + capacity = AddLoadFixedArrayLength(to_elements); + } - if (!pre_fill_with_holes && length != capacity) { - // Fill unused capacity with the hole. - BuildFillElementsWithHole(to_elements, to_elements_kind, - key, capacity); - } -} + LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement); + HValue* key = builder.BeginBody(length, graph()->GetConstant0(), + Token::GT); -HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate, - HValue* allocation_site, - AllocationSiteMode mode, - ElementsKind kind, - int length) { - NoObservableSideEffectsScope no_effects(this); + key = AddUncasted<HSub>(key, graph()->GetConstant1()); + key->ClearFlag(HValue::kCanOverflow); - // All sizes here are multiples of kPointerSize. - int size = JSArray::kSize; - if (mode == TRACK_ALLOCATION_SITE) { - size += AllocationMemento::kSize; - } + HValue* element = Add<HLoadKeyed>(from_elements, key, + static_cast<HValue*>(NULL), + from_elements_kind, + ALLOW_RETURN_HOLE); - HValue* size_in_bytes = Add<HConstant>(size); - HInstruction* object = Add<HAllocate>(size_in_bytes, - HType::JSObject(), - NOT_TENURED, - JS_OBJECT_TYPE); + ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) && + IsFastSmiElementsKind(to_elements_kind)) + ? FAST_HOLEY_ELEMENTS : to_elements_kind; - // Copy the JS array part. - for (int i = 0; i < JSArray::kSize; i += kPointerSize) { - if ((i != JSArray::kElementsOffset) || (length == 0)) { - HObjectAccess access = HObjectAccess::ForJSArrayOffset(i); - Add<HStoreNamedField>( - object, access, Add<HLoadNamedField>( - boilerplate, static_cast<HValue*>(NULL), access)); + if (IsHoleyElementsKind(from_elements_kind) && + from_elements_kind != to_elements_kind) { + IfBuilder if_hole(this); + if_hole.If<HCompareHoleAndBranch>(element); + if_hole.Then(); + HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind) + ? Add<HConstant>(FixedDoubleArray::hole_nan_as_double()) + : graph()->GetConstantHole(); + Add<HStoreKeyed>(to_elements, key, hole_constant, kind); + if_hole.Else(); + HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind); + store->SetFlag(HValue::kAllowUndefinedAsNaN); + if_hole.End(); + } else { + HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind); + store->SetFlag(HValue::kAllowUndefinedAsNaN); } - } - // Create an allocation site info if requested. - if (mode == TRACK_ALLOCATION_SITE) { - BuildCreateAllocationMemento( - object, Add<HConstant>(JSArray::kSize), allocation_site); + builder.EndBody(); } - if (length > 0) { - HValue* boilerplate_elements = AddLoadElements(boilerplate); - HValue* object_elements; - if (IsFastDoubleElementsKind(kind)) { - HValue* elems_size = Add<HConstant>(FixedDoubleArray::SizeFor(length)); - object_elements = Add<HAllocate>(elems_size, HType::Tagged(), - NOT_TENURED, FIXED_DOUBLE_ARRAY_TYPE); - } else { - HValue* elems_size = Add<HConstant>(FixedArray::SizeFor(length)); - object_elements = Add<HAllocate>(elems_size, HType::Tagged(), - NOT_TENURED, FIXED_ARRAY_TYPE); - } - Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(), - object_elements); - - // Copy the elements array header. - for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) { - HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i); - Add<HStoreNamedField>( - object_elements, access, Add<HLoadNamedField>( - boilerplate_elements, static_cast<HValue*>(NULL), access)); - } - - // Copy the elements array contents. - // TODO(mstarzinger): Teach HGraphBuilder::BuildCopyElements to unfold - // copying loops with constant length up to a given boundary and use this - // helper here instead. - for (int i = 0; i < length; i++) { - HValue* key_constant = Add<HConstant>(i); - HInstruction* value = Add<HLoadKeyed>(boilerplate_elements, key_constant, - static_cast<HValue*>(NULL), kind); - Add<HStoreKeyed>(object_elements, key_constant, value, kind); - } - } + Counters* counters = isolate()->counters(); + AddIncrementCounter(counters->inlined_copied_elements()); +} - return object; + +HValue* HGraphBuilder::BuildCloneShallowArrayCow(HValue* boilerplate, + HValue* allocation_site, + AllocationSiteMode mode, + ElementsKind kind) { + HAllocate* array = AllocateJSArrayObject(mode); + + HValue* map = AddLoadMap(boilerplate); + HValue* elements = AddLoadElements(boilerplate); + HValue* length = AddLoadArrayLength(boilerplate, kind); + + BuildJSArrayHeader(array, + map, + elements, + mode, + FAST_ELEMENTS, + allocation_site, + length); + return array; +} + + +HValue* HGraphBuilder::BuildCloneShallowArrayEmpty(HValue* boilerplate, + HValue* allocation_site, + AllocationSiteMode mode) { + HAllocate* array = AllocateJSArrayObject(mode); + + HValue* map = AddLoadMap(boilerplate); + + BuildJSArrayHeader(array, + map, + NULL, // set elements to empty fixed array + mode, + FAST_ELEMENTS, + allocation_site, + graph()->GetConstant0()); + return array; +} + + +HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate, + HValue* allocation_site, + AllocationSiteMode mode, + ElementsKind kind) { + HValue* boilerplate_elements = AddLoadElements(boilerplate); + HValue* capacity = AddLoadFixedArrayLength(boilerplate_elements); + + // Generate size calculation code here in order to make it dominate + // the JSArray allocation. + HValue* elements_size = BuildCalculateElementsSize(kind, capacity); + + // Create empty JSArray object for now, store elimination should remove + // redundant initialization of elements and length fields and at the same + // time the object will be fully prepared for GC if it happens during + // elements allocation. + HValue* result = BuildCloneShallowArrayEmpty( + boilerplate, allocation_site, mode); + + HAllocate* elements = BuildAllocateElements(kind, elements_size); + + // This function implicitly relies on the fact that the + // FastCloneShallowArrayStub is called only for literals shorter than + // JSObject::kInitialMaxFastElementArray. + // Can't add HBoundsCheck here because otherwise the stub will eager a frame. + HConstant* size_upper_bound = EstablishElementsAllocationSize( + kind, JSObject::kInitialMaxFastElementArray); + elements->set_size_upper_bound(size_upper_bound); + + Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements); + + // The allocation for the cloned array above causes register pressure on + // machines with low register counts. Force a reload of the boilerplate + // elements here to free up a register for the allocation to avoid unnecessary + // spillage. + boilerplate_elements = AddLoadElements(boilerplate); + boilerplate_elements->SetFlag(HValue::kCantBeReplaced); + + // Copy the elements array header. + for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) { + HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i); + Add<HStoreNamedField>(elements, access, + Add<HLoadNamedField>(boilerplate_elements, + static_cast<HValue*>(NULL), access)); + } + + // And the result of the length + HValue* length = AddLoadArrayLength(boilerplate, kind); + Add<HStoreNamedField>(result, HObjectAccess::ForArrayLength(kind), length); + + BuildCopyElements(boilerplate_elements, kind, elements, + kind, length, NULL); + return result; } @@ -2700,7 +3057,7 @@ // the monomorphic map when the code is used as a template to generate a // new IC. For optimized functions, there is no sentinel map, the map // emitted below is the actual monomorphic map. - BuildCheckMap(value, type->Classes().Current()); + Add<HCheckMaps>(value, type->Classes().Current()); } else { if_nil.Deopt("Too many undetectable types"); } @@ -2714,9 +3071,9 @@ HValue* previous_object, HValue* previous_object_size, HValue* allocation_site) { - ASSERT(allocation_site != NULL); + DCHECK(allocation_site != NULL); HInnerAllocatedObject* allocation_memento = Add<HInnerAllocatedObject>( - previous_object, previous_object_size); + previous_object, previous_object_size, HType::HeapObject()); AddStoreMapConstant( allocation_memento, isolate()->factory()->allocation_memento_map()); Add<HStoreNamedField>( @@ -2733,11 +3090,9 @@ // This smi value is reset to zero after every gc, overflow isn't a problem // since the counter is bounded by the new space size. memento_create_count->ClearFlag(HValue::kCanOverflow); - HStoreNamedField* store = Add<HStoreNamedField>( + Add<HStoreNamedField>( allocation_site, HObjectAccess::ForAllocationSiteOffset( AllocationSite::kPretenureCreateCountOffset), memento_create_count); - // No write barrier needed to store a smi. - store->SkipWriteBarrier(); } } @@ -2787,7 +3142,7 @@ kind_(kind), allocation_site_payload_(allocation_site_payload), constructor_function_(constructor_function) { - ASSERT(!allocation_site_payload->IsConstant() || + DCHECK(!allocation_site_payload->IsConstant() || HConstant::cast(allocation_site_payload)->handle( builder_->isolate())->IsAllocationSite()); mode_ = override_mode == DISABLE_ALLOCATION_SITES @@ -2850,67 +3205,47 @@ } -HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize( - HValue* length_node) { - ASSERT(length_node != NULL); - - int base_size = JSArray::kSize; - if (mode_ == TRACK_ALLOCATION_SITE) { - base_size += AllocationMemento::kSize; - } - - STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize); - base_size += FixedArray::kHeaderSize; - - HInstruction* elements_size_value = - builder()->Add<HConstant>(elements_size()); - HInstruction* mul = HMul::NewImul(builder()->zone(), builder()->context(), - length_node, elements_size_value); - builder()->AddInstruction(mul); - HInstruction* base = builder()->Add<HConstant>(base_size); - HInstruction* total_size = HAdd::New(builder()->zone(), builder()->context(), - base, mul); - total_size->ClearFlag(HValue::kCanOverflow); - builder()->AddInstruction(total_size); - return total_size; -} - - -HValue* HGraphBuilder::JSArrayBuilder::EstablishEmptyArrayAllocationSize() { - int base_size = JSArray::kSize; - if (mode_ == TRACK_ALLOCATION_SITE) { - base_size += AllocationMemento::kSize; - } - - base_size += IsFastDoubleElementsKind(kind_) - ? FixedDoubleArray::SizeFor(initial_capacity()) - : FixedArray::SizeFor(initial_capacity()); - - return builder()->Add<HConstant>(base_size); -} - - -HValue* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() { - HValue* size_in_bytes = EstablishEmptyArrayAllocationSize(); +HAllocate* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() { HConstant* capacity = builder()->Add<HConstant>(initial_capacity()); - return AllocateArray(size_in_bytes, + return AllocateArray(capacity, capacity, builder()->graph()->GetConstant0()); } -HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* capacity, - HValue* length_field, - FillMode fill_mode) { - HValue* size_in_bytes = EstablishAllocationSize(capacity); - return AllocateArray(size_in_bytes, capacity, length_field, fill_mode); +HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray( + HValue* capacity, + HConstant* capacity_upper_bound, + HValue* length_field, + FillMode fill_mode) { + return AllocateArray(capacity, + capacity_upper_bound->GetInteger32Constant(), + length_field, + fill_mode); +} + + +HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray( + HValue* capacity, + int capacity_upper_bound, + HValue* length_field, + FillMode fill_mode) { + HConstant* elememts_size_upper_bound = capacity->IsInteger32Constant() + ? HConstant::cast(capacity) + : builder()->EstablishElementsAllocationSize(kind_, capacity_upper_bound); + + HAllocate* array = AllocateArray(capacity, length_field, fill_mode); + if (!elements_location_->has_size_upper_bound()) { + elements_location_->set_size_upper_bound(elememts_size_upper_bound); + } + return array; } -HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes, - HValue* capacity, - HValue* length_field, - FillMode fill_mode) { +HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray( + HValue* capacity, + HValue* length_field, + FillMode fill_mode) { // These HForceRepresentations are because we store these as fields in the // objects we construct, and an int32-to-smi HChange could deopt. Accept // the deopt possibility now, before allocation occurs. @@ -2920,14 +3255,14 @@ length_field = builder()->AddUncasted<HForceRepresentation>(length_field, Representation::Smi()); - // Allocate (dealing with failure appropriately) - HAllocate* new_object = builder()->Add<HAllocate>(size_in_bytes, - HType::JSArray(), NOT_TENURED, JS_ARRAY_TYPE); - // Folded array allocation should be aligned if it has fast double elements. - if (IsFastDoubleElementsKind(kind_)) { - new_object->MakeDoubleAligned(); - } + // Generate size calculation code here in order to make it dominate + // the JSArray allocation. + HValue* elements_size = + builder()->BuildCalculateElementsSize(kind_, capacity); + + // Allocate (dealing with failure appropriately) + HAllocate* array_object = builder()->AllocateJSArrayObject(mode_); // Fill in the fields: map, properties, length HValue* map; @@ -2936,29 +3271,30 @@ } else { map = EmitMapCode(); } - elements_location_ = builder()->BuildJSArrayHeader(new_object, - map, - mode_, - kind_, - allocation_site_payload_, - length_field); - // Initialize the elements + builder()->BuildJSArrayHeader(array_object, + map, + NULL, // set elements to empty fixed array + mode_, + kind_, + allocation_site_payload_, + length_field); + + // Allocate and initialize the elements + elements_location_ = builder()->BuildAllocateElements(kind_, elements_size); + builder()->BuildInitializeElementsHeader(elements_location_, kind_, capacity); + // Set the elements + builder()->Add<HStoreNamedField>( + array_object, HObjectAccess::ForElementsPointer(), elements_location_); + if (fill_mode == FILL_WITH_HOLE) { builder()->BuildFillElementsWithHole(elements_location_, kind_, graph()->GetConstant0(), capacity); } - return new_object; -} - - -HStoreNamedField* HGraphBuilder::AddStoreMapConstant(HValue *object, - Handle<Map> map) { - return Add<HStoreNamedField>(object, HObjectAccess::ForMap(), - Add<HConstant>(map)); + return array_object; } @@ -3068,6 +3404,11 @@ } +OStream& operator<<(OStream& os, const HBasicBlock& b) { + return os << "B" << b.block_id(); +} + + HGraph::HGraph(CompilationInfo* info) : isolate_(info->isolate()), next_block_id_(0), @@ -3090,10 +3431,9 @@ inlined_functions_(5, info->zone()) { if (info->IsStub()) { HydrogenCodeStub* stub = info->code_stub(); - CodeStubInterfaceDescriptor* descriptor = - stub->GetInterfaceDescriptor(isolate_); - start_environment_ = - new(zone_) HEnvironment(zone_, descriptor->environment_length()); + CodeStubInterfaceDescriptor* descriptor = stub->GetInterfaceDescriptor(); + start_environment_ = new(zone_) HEnvironment( + zone_, descriptor->GetEnvironmentParameterCount()); } else { TraceInlinedFunction(info->shared_info(), HSourcePosition::Unknown()); start_environment_ = @@ -3114,7 +3454,7 @@ void HGraph::FinalizeUniqueness() { DisallowHeapAllocation no_gc; - ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate())); + DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate())); for (int i = 0; i < blocks()->length(); ++i) { for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) { it.Current()->FinalizeUniqueness(); @@ -3143,13 +3483,10 @@ if (!shared->script()->IsUndefined()) { Handle<Script> script(Script::cast(shared->script())); if (!script->source()->IsUndefined()) { - CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer()); - PrintF(tracing_scope.file(), - "--- FUNCTION SOURCE (%s) id{%d,%d} ---\n", - shared->DebugName()->ToCString().get(), - info()->optimization_id(), - id); - + CodeTracer::Scope tracing_scopex(isolate()->GetCodeTracer()); + OFStream os(tracing_scopex.file()); + os << "--- FUNCTION SOURCE (" << shared->DebugName()->ToCString().get() + << ") id{" << info()->optimization_id() << "," << id << "} ---\n"; { ConsStringIteratorOp op; StringCharacterStream stream(String::cast(script->source()), @@ -3161,12 +3498,12 @@ shared->end_position() - shared->start_position() + 1; for (int i = 0; i < source_len; i++) { if (stream.HasMore()) { - PrintF(tracing_scope.file(), "%c", stream.GetNext()); + os << AsReversiblyEscapedUC16(stream.GetNext()); } } } - PrintF(tracing_scope.file(), "\n--- END ---\n"); + os << "\n--- END ---\n"; } } } @@ -3175,13 +3512,10 @@ if (inline_id != 0) { CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer()); - PrintF(tracing_scope.file(), "INLINE (%s) id{%d,%d} AS %d AT ", - shared->DebugName()->ToCString().get(), - info()->optimization_id(), - id, - inline_id); - position.PrintTo(tracing_scope.file()); - PrintF(tracing_scope.file(), "\n"); + OFStream os(tracing_scope.file()); + os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{" + << info()->optimization_id() << "," << id << "} AS " << inline_id + << " AT " << position << endl; } return inline_id; @@ -3254,21 +3588,19 @@ HBasicBlock* loop_header() { return loop_header_; } static PostorderProcessor* CreateEntryProcessor(Zone* zone, - HBasicBlock* block, - BitVector* visited) { + HBasicBlock* block) { PostorderProcessor* result = new(zone) PostorderProcessor(NULL); - return result->SetupSuccessors(zone, block, NULL, visited); + return result->SetupSuccessors(zone, block, NULL); } PostorderProcessor* PerformStep(Zone* zone, - BitVector* visited, ZoneList<HBasicBlock*>* order) { PostorderProcessor* next = - PerformNonBacktrackingStep(zone, visited, order); + PerformNonBacktrackingStep(zone, order); if (next != NULL) { return next; } else { - return Backtrack(zone, visited, order); + return Backtrack(zone, order); } } @@ -3288,9 +3620,8 @@ // Each "Setup..." method is like a constructor for a cycle state. PostorderProcessor* SetupSuccessors(Zone* zone, HBasicBlock* block, - HBasicBlock* loop_header, - BitVector* visited) { - if (block == NULL || visited->Contains(block->block_id()) || + HBasicBlock* loop_header) { + if (block == NULL || block->IsOrdered() || block->parent_loop_header() != loop_header) { kind_ = NONE; block_ = NULL; @@ -3300,7 +3631,7 @@ } else { block_ = block; loop_ = NULL; - visited->Add(block->block_id()); + block->MarkAsOrdered(); if (block->IsLoopHeader()) { kind_ = SUCCESSORS_OF_LOOP_HEADER; @@ -3310,7 +3641,7 @@ return result->SetupLoopMembers(zone, block, block->loop_information(), loop_header); } else { - ASSERT(block->IsFinished()); + DCHECK(block->IsFinished()); kind_ = SUCCESSORS; loop_header_ = loop_header; InitializeSuccessors(); @@ -3352,10 +3683,10 @@ } void ClosePostorder(ZoneList<HBasicBlock*>* order, Zone* zone) { - ASSERT(block_->end()->FirstSuccessor() == NULL || + DCHECK(block_->end()->FirstSuccessor() == NULL || order->Contains(block_->end()->FirstSuccessor()) || block_->end()->FirstSuccessor()->IsLoopHeader()); - ASSERT(block_->end()->SecondSuccessor() == NULL || + DCHECK(block_->end()->SecondSuccessor() == NULL || order->Contains(block_->end()->SecondSuccessor()) || block_->end()->SecondSuccessor()->IsLoopHeader()); order->Add(block_, zone); @@ -3363,7 +3694,6 @@ // This method is the basic block to walk up the stack. PostorderProcessor* Pop(Zone* zone, - BitVector* visited, ZoneList<HBasicBlock*>* order) { switch (kind_) { case SUCCESSORS: @@ -3390,16 +3720,15 @@ // Walks up the stack. PostorderProcessor* Backtrack(Zone* zone, - BitVector* visited, ZoneList<HBasicBlock*>* order) { - PostorderProcessor* parent = Pop(zone, visited, order); + PostorderProcessor* parent = Pop(zone, order); while (parent != NULL) { PostorderProcessor* next = - parent->PerformNonBacktrackingStep(zone, visited, order); + parent->PerformNonBacktrackingStep(zone, order); if (next != NULL) { return next; } else { - parent = parent->Pop(zone, visited, order); + parent = parent->Pop(zone, order); } } return NULL; @@ -3407,7 +3736,6 @@ PostorderProcessor* PerformNonBacktrackingStep( Zone* zone, - BitVector* visited, ZoneList<HBasicBlock*>* order) { HBasicBlock* next_block; switch (kind_) { @@ -3415,16 +3743,14 @@ next_block = AdvanceSuccessors(); if (next_block != NULL) { PostorderProcessor* result = Push(zone); - return result->SetupSuccessors(zone, next_block, - loop_header_, visited); + return result->SetupSuccessors(zone, next_block, loop_header_); } break; case SUCCESSORS_OF_LOOP_HEADER: next_block = AdvanceSuccessors(); if (next_block != NULL) { PostorderProcessor* result = Push(zone); - return result->SetupSuccessors(zone, next_block, - block(), visited); + return result->SetupSuccessors(zone, next_block, block()); } break; case LOOP_MEMBERS: @@ -3439,8 +3765,7 @@ next_block = AdvanceSuccessors(); if (next_block != NULL) { PostorderProcessor* result = Push(zone); - return result->SetupSuccessors(zone, next_block, - loop_header_, visited); + return result->SetupSuccessors(zone, next_block, loop_header_); } break; case NONE: @@ -3495,21 +3820,36 @@ void HGraph::OrderBlocks() { CompilationPhase phase("H_Block ordering", info()); - BitVector visited(blocks_.length(), zone()); - ZoneList<HBasicBlock*> reverse_result(8, zone()); - HBasicBlock* start = blocks_[0]; - PostorderProcessor* postorder = - PostorderProcessor::CreateEntryProcessor(zone(), start, &visited); - while (postorder != NULL) { - postorder = postorder->PerformStep(zone(), &visited, &reverse_result); +#ifdef DEBUG + // Initially the blocks must not be ordered. + for (int i = 0; i < blocks_.length(); ++i) { + DCHECK(!blocks_[i]->IsOrdered()); } +#endif + + PostorderProcessor* postorder = + PostorderProcessor::CreateEntryProcessor(zone(), blocks_[0]); blocks_.Rewind(0); - int index = 0; - for (int i = reverse_result.length() - 1; i >= 0; --i) { - HBasicBlock* b = reverse_result[i]; - blocks_.Add(b, zone()); - b->set_block_id(index++); + while (postorder) { + postorder = postorder->PerformStep(zone(), &blocks_); + } + +#ifdef DEBUG + // Now all blocks must be marked as ordered. + for (int i = 0; i < blocks_.length(); ++i) { + DCHECK(blocks_[i]->IsOrdered()); + } +#endif + + // Reverse block list and assign block IDs. + for (int i = 0, j = blocks_.length(); --j >= i; ++i) { + HBasicBlock* bi = blocks_[i]; + HBasicBlock* bj = blocks_[j]; + bi->set_block_id(j); + bj->set_block_id(i); + blocks_[i] = bj; + blocks_[j] = bi; } } @@ -3645,7 +3985,7 @@ for_typeof_(false) { owner->set_ast_context(this); // Push. #ifdef DEBUG - ASSERT(owner->environment()->frame_type() == JS_FUNCTION); + DCHECK(owner->environment()->frame_type() == JS_FUNCTION); original_length_ = owner->environment()->length(); #endif } @@ -3657,7 +3997,7 @@ EffectContext::~EffectContext() { - ASSERT(owner()->HasStackOverflow() || + DCHECK(owner()->HasStackOverflow() || owner()->current_block() == NULL || (owner()->environment()->length() == original_length_ && owner()->environment()->frame_type() == JS_FUNCTION)); @@ -3665,7 +4005,7 @@ ValueContext::~ValueContext() { - ASSERT(owner()->HasStackOverflow() || + DCHECK(owner()->HasStackOverflow() || owner()->current_block() == NULL || (owner()->environment()->length() == original_length_ + 1 && owner()->environment()->frame_type() == JS_FUNCTION)); @@ -3693,7 +4033,7 @@ void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) { - ASSERT(!instr->IsControlInstruction()); + DCHECK(!instr->IsControlInstruction()); owner()->AddInstruction(instr); if (instr->HasObservableSideEffects()) { owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE); @@ -3703,7 +4043,7 @@ void EffectContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) { - ASSERT(!instr->HasObservableSideEffects()); + DCHECK(!instr->HasObservableSideEffects()); HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock(); HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock(); instr->SetSuccessorAt(0, empty_true); @@ -3731,7 +4071,7 @@ void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) { - ASSERT(!instr->IsControlInstruction()); + DCHECK(!instr->IsControlInstruction()); if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) { return owner()->Bailout(kBadValueContextForArgumentsObjectValue); } @@ -3744,7 +4084,7 @@ void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) { - ASSERT(!instr->HasObservableSideEffects()); + DCHECK(!instr->HasObservableSideEffects()); if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) { return owner()->Bailout(kBadValueContextForArgumentsObjectValue); } @@ -3787,7 +4127,7 @@ void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) { - ASSERT(!instr->IsControlInstruction()); + DCHECK(!instr->IsControlInstruction()); HOptimizedGraphBuilder* builder = owner(); builder->AddInstruction(instr); // We expect a simulate after every expression with side effects, though @@ -3802,7 +4142,7 @@ void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) { - ASSERT(!instr->HasObservableSideEffects()); + DCHECK(!instr->HasObservableSideEffects()); HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock(); HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock(); instr->SetSuccessorAt(0, empty_true); @@ -3967,7 +4307,7 @@ // due to missing/inadequate type feedback, but rather too aggressive // optimization. Disable optimistic LICM in that case. Handle<Code> unoptimized_code(current_info()->shared_info()->code()); - ASSERT(unoptimized_code->kind() == Code::FUNCTION); + DCHECK(unoptimized_code->kind() == Code::FUNCTION); Handle<TypeFeedbackInfo> type_info( TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info())); int checksum = type_info->own_type_change_checksum(); @@ -4081,7 +4421,7 @@ #ifdef DEBUG for (int i = 0; i < block->phis()->length(); i++) { HPhi* phi = block->phis()->at(i); - ASSERT(phi->ActualValue() == phi); + DCHECK(phi->ActualValue() == phi); } #endif @@ -4094,7 +4434,7 @@ // instructions. instruction->DeleteAndReplaceWith(instruction->ActualValue()); } else { - ASSERT(instruction->IsInformativeDefinition()); + DCHECK(instruction->IsInformativeDefinition()); if (instruction->IsPurelyInformativeDefinition()) { instruction->DeleteAndReplaceWith(instruction->RedefinedOperand()); } else { @@ -4112,9 +4452,11 @@ arguments.Add(Pop(), zone()); } + HPushArguments* push_args = New<HPushArguments>(); while (!arguments.is_empty()) { - Add<HPushArgument>(arguments.RemoveLast()); + push_args->AddInput(arguments.RemoveLast()); } + AddInstruction(push_args); } @@ -4132,7 +4474,7 @@ // Create an arguments object containing the initial parameters. Set the // initial values of parameters including "this" having parameter index 0. - ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count()); + DCHECK_EQ(scope->num_parameters() + 1, environment()->parameter_count()); HArgumentsObject* arguments_object = New<HArgumentsObject>(environment()->parameter_count()); for (int i = 0; i < environment()->parameter_count(); ++i) { @@ -4174,16 +4516,55 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); - if (stmt->scope() != NULL) { - return Bailout(kScopedBlock); - } - BreakAndContinueInfo break_info(stmt); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); + + Scope* outer_scope = scope(); + Scope* scope = stmt->scope(); + BreakAndContinueInfo break_info(stmt, outer_scope); + { BreakAndContinueScope push(&break_info, this); + if (scope != NULL) { + // Load the function object. + Scope* declaration_scope = scope->DeclarationScope(); + HInstruction* function; + HValue* outer_context = environment()->context(); + if (declaration_scope->is_global_scope() || + declaration_scope->is_eval_scope()) { + function = new(zone()) HLoadContextSlot( + outer_context, Context::CLOSURE_INDEX, HLoadContextSlot::kNoCheck); + } else { + function = New<HThisFunction>(); + } + AddInstruction(function); + // Allocate a block context and store it to the stack frame. + HInstruction* inner_context = Add<HAllocateBlockContext>( + outer_context, function, scope->GetScopeInfo()); + HInstruction* instr = Add<HStoreFrameContext>(inner_context); + if (instr->HasObservableSideEffects()) { + AddSimulate(stmt->EntryId(), REMOVABLE_SIMULATE); + } + set_scope(scope); + environment()->BindContext(inner_context); + VisitDeclarations(scope->declarations()); + AddSimulate(stmt->DeclsId(), REMOVABLE_SIMULATE); + } CHECK_BAILOUT(VisitStatements(stmt->statements())); } + set_scope(outer_scope); + if (scope != NULL && current_block() != NULL) { + HValue* inner_context = environment()->context(); + HValue* outer_context = Add<HLoadNamedField>( + inner_context, static_cast<HValue*>(NULL), + HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX)); + + HInstruction* instr = Add<HStoreFrameContext>(outer_context); + if (instr->HasObservableSideEffects()) { + AddSimulate(stmt->ExitId(), REMOVABLE_SIMULATE); + } + environment()->BindContext(outer_context); + } HBasicBlock* break_block = break_info.break_block(); if (break_block != NULL) { if (current_block() != NULL) Goto(break_block); @@ -4195,24 +4576,24 @@ void HOptimizedGraphBuilder::VisitExpressionStatement( ExpressionStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); VisitForEffect(stmt->expression()); } void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); } void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); if (stmt->condition()->ToBooleanIsTrue()) { Add<HSimulate>(stmt->ThenId()); Visit(stmt->then_statement()); @@ -4251,6 +4632,7 @@ HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get( BreakableStatement* stmt, BreakType type, + Scope** scope, int* drop_extra) { *drop_extra = 0; BreakAndContinueScope* current = this; @@ -4258,7 +4640,8 @@ *drop_extra += current->info()->drop_extra(); current = current->next(); } - ASSERT(current != NULL); // Always found (unless stack is malformed). + DCHECK(current != NULL); // Always found (unless stack is malformed). + *scope = current->info()->scope(); if (type == BREAK) { *drop_extra += current->info()->drop_extra(); @@ -4289,35 +4672,72 @@ void HOptimizedGraphBuilder::VisitContinueStatement( ContinueStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); + Scope* outer_scope = NULL; + Scope* inner_scope = scope(); int drop_extra = 0; HBasicBlock* continue_block = break_scope()->Get( - stmt->target(), BreakAndContinueScope::CONTINUE, &drop_extra); + stmt->target(), BreakAndContinueScope::CONTINUE, + &outer_scope, &drop_extra); + HValue* context = environment()->context(); Drop(drop_extra); + int context_pop_count = inner_scope->ContextChainLength(outer_scope); + if (context_pop_count > 0) { + while (context_pop_count-- > 0) { + HInstruction* context_instruction = Add<HLoadNamedField>( + context, static_cast<HValue*>(NULL), + HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX)); + context = context_instruction; + } + HInstruction* instr = Add<HStoreFrameContext>(context); + if (instr->HasObservableSideEffects()) { + AddSimulate(stmt->target()->EntryId(), REMOVABLE_SIMULATE); + } + environment()->BindContext(context); + } + Goto(continue_block); set_current_block(NULL); } void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); + Scope* outer_scope = NULL; + Scope* inner_scope = scope(); int drop_extra = 0; HBasicBlock* break_block = break_scope()->Get( - stmt->target(), BreakAndContinueScope::BREAK, &drop_extra); + stmt->target(), BreakAndContinueScope::BREAK, + &outer_scope, &drop_extra); + HValue* context = environment()->context(); Drop(drop_extra); + int context_pop_count = inner_scope->ContextChainLength(outer_scope); + if (context_pop_count > 0) { + while (context_pop_count-- > 0) { + HInstruction* context_instruction = Add<HLoadNamedField>( + context, static_cast<HValue*>(NULL), + HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX)); + context = context_instruction; + } + HInstruction* instr = Add<HStoreFrameContext>(context); + if (instr->HasObservableSideEffects()) { + AddSimulate(stmt->target()->ExitId(), REMOVABLE_SIMULATE); + } + environment()->BindContext(context); + } Goto(break_block); set_current_block(NULL); } void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); FunctionState* state = function_state(); AstContext* context = call_context(); if (context == NULL) { @@ -4337,7 +4757,7 @@ CHECK_ALIVE(VisitForEffect(stmt->expression())); Goto(function_return(), state); } else { - ASSERT(context->IsValue()); + DCHECK(context->IsValue()); CHECK_ALIVE(VisitForValue(stmt->expression())); HValue* return_value = Pop(); HValue* receiver = environment()->arguments_environment()->Lookup(0); @@ -4363,7 +4783,7 @@ } else if (context->IsEffect()) { Goto(function_return(), state); } else { - ASSERT(context->IsValue()); + DCHECK(context->IsValue()); HValue* rhs = environment()->arguments_environment()->Lookup(1); AddLeaveInlined(rhs, state); } @@ -4382,7 +4802,7 @@ Pop(); Goto(function_return(), state); } else { - ASSERT(context->IsValue()); + DCHECK(context->IsValue()); CHECK_ALIVE(VisitForValue(stmt->expression())); AddLeaveInlined(Pop(), state); } @@ -4392,17 +4812,17 @@ void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); return Bailout(kWithStatement); } void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); // We only optimize switch statements with a bounded number of clauses. const int kCaseClauseLimit = 128; @@ -4463,7 +4883,7 @@ // translating the clause bodies. HBasicBlock* fall_through_block = NULL; - BreakAndContinueInfo break_info(stmt); + BreakAndContinueInfo break_info(stmt, scope()); { BreakAndContinueScope push(&break_info, this); for (int i = 0; i < clause_count; ++i) { CaseClause* clause = clauses->at(i); @@ -4510,27 +4930,28 @@ void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt, - HBasicBlock* loop_entry, - BreakAndContinueInfo* break_info) { - BreakAndContinueScope push(break_info, this); + HBasicBlock* loop_entry) { Add<HSimulate>(stmt->StackCheckId()); HStackCheck* stack_check = HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch)); - ASSERT(loop_entry->IsLoopHeader()); + DCHECK(loop_entry->IsLoopHeader()); loop_entry->loop_information()->set_stack_check(stack_check); CHECK_BAILOUT(Visit(stmt->body())); } void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); - ASSERT(current_block() != NULL); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); + DCHECK(current_block() != NULL); HBasicBlock* loop_entry = BuildLoopEntry(stmt); - BreakAndContinueInfo break_info(stmt); - CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info)); + BreakAndContinueInfo break_info(stmt, scope()); + { + BreakAndContinueScope push(&break_info, this); + CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry)); + } HBasicBlock* body_exit = JoinContinue(stmt, current_block(), break_info.continue_block()); HBasicBlock* loop_successor = NULL; @@ -4538,6 +4959,7 @@ set_current_block(body_exit); loop_successor = graph()->CreateBasicBlock(); if (stmt->cond()->ToBooleanIsFalse()) { + loop_entry->loop_information()->stack_check()->Eliminate(); Goto(loop_successor); body_exit = NULL; } else { @@ -4567,10 +4989,10 @@ void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); - ASSERT(current_block() != NULL); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); + DCHECK(current_block() != NULL); HBasicBlock* loop_entry = BuildLoopEntry(stmt); // If the condition is constant true, do not generate a branch. @@ -4590,9 +5012,10 @@ } } - BreakAndContinueInfo break_info(stmt); + BreakAndContinueInfo break_info(stmt, scope()); if (current_block() != NULL) { - CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info)); + BreakAndContinueScope push(&break_info, this); + CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry)); } HBasicBlock* body_exit = JoinContinue(stmt, current_block(), break_info.continue_block()); @@ -4606,13 +5029,13 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); if (stmt->init() != NULL) { CHECK_ALIVE(Visit(stmt->init())); } - ASSERT(current_block() != NULL); + DCHECK(current_block() != NULL); HBasicBlock* loop_entry = BuildLoopEntry(stmt); HBasicBlock* loop_successor = NULL; @@ -4631,9 +5054,10 @@ } } - BreakAndContinueInfo break_info(stmt); + BreakAndContinueInfo break_info(stmt, scope()); if (current_block() != NULL) { - CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info)); + BreakAndContinueScope push(&break_info, this); + CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry)); } HBasicBlock* body_exit = JoinContinue(stmt, current_block(), break_info.continue_block()); @@ -4654,9 +5078,9 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); if (!FLAG_optimize_for_in) { return Bailout(kForInStatementOptimizationIsDisabled); @@ -4732,8 +5156,11 @@ Bind(each_var, key); - BreakAndContinueInfo break_info(stmt, 5); - CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info)); + BreakAndContinueInfo break_info(stmt, scope(), 5); + { + BreakAndContinueScope push(&break_info, this); + CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry)); + } HBasicBlock* body_exit = JoinContinue(stmt, current_block(), break_info.continue_block()); @@ -4757,34 +5184,34 @@ void HOptimizedGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); return Bailout(kForOfStatement); } void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); return Bailout(kTryCatchStatement); } void HOptimizedGraphBuilder::VisitTryFinallyStatement( TryFinallyStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); return Bailout(kTryFinallyStatement); } void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); return Bailout(kDebuggerStatement); } @@ -4795,12 +5222,13 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); Handle<SharedFunctionInfo> shared_info = expr->shared_info(); if (shared_info.is_null()) { - shared_info = Compiler::BuildFunctionInfo(expr, current_info()->script()); + shared_info = + Compiler::BuildFunctionInfo(expr, current_info()->script(), top_info()); } // We also have a stack overflow if the recursive compilation did. if (HasStackOverflow()) return; @@ -4812,17 +5240,17 @@ void HOptimizedGraphBuilder::VisitNativeFunctionLiteral( NativeFunctionLiteral* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); return Bailout(kNativeFunctionLiteral); } void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); HBasicBlock* cond_true = graph()->CreateBasicBlock(); HBasicBlock* cond_false = graph()->CreateBasicBlock(); CHECK_BAILOUT(VisitForControl(expr->condition(), cond_true, cond_false)); @@ -4864,7 +5292,7 @@ return kUseGeneric; } Handle<GlobalObject> global(current_info()->global_object()); - global->Lookup(*var->name(), lookup); + global->Lookup(var->name(), lookup); if (!lookup->IsNormal() || (access_type == STORE && lookup->IsReadOnly()) || lookup->holder() != *global) { @@ -4876,9 +5304,9 @@ HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) { - ASSERT(var->IsContextSlot()); + DCHECK(var->IsContextSlot()); HValue* context = environment()->context(); - int length = current_info()->scope()->ContextChainLength(var->scope()); + int length = scope()->ContextChainLength(var->scope()); while (length-- > 0) { context = Add<HLoadNamedField>( context, static_cast<HValue*>(NULL), @@ -4893,14 +5321,14 @@ current_info()->set_this_has_uses(true); } - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); Variable* variable = expr->var(); switch (variable->location()) { case Variable::UNALLOCATED: { if (IsLexicalVariableMode(variable->mode())) { - // TODO(rossberg): should this be an ASSERT? + // TODO(rossberg): should this be an DCHECK? return Bailout(kReferenceToGlobalLexicalVariable); } // Handle known global constants like 'undefined' specially to avoid a @@ -4924,11 +5352,11 @@ Handle<GlobalObject> global(current_info()->global_object()); Handle<PropertyCell> cell(global->GetPropertyCell(&lookup)); if (cell->type()->IsConstant()) { - cell->AddDependentCompilationInfo(top_info()); - Handle<Object> constant_object = cell->type()->AsConstant(); + PropertyCell::AddDependentCompilationInfo(cell, top_info()); + Handle<Object> constant_object = cell->type()->AsConstant()->Value(); if (constant_object->IsConsString()) { constant_object = - FlattenGetString(Handle<String>::cast(constant_object)); + String::Flatten(Handle<String>::cast(constant_object)); } HConstant* constant = New<HConstant>(constant_object); return ast_context()->ReturnInstruction(constant, expr->id()); @@ -4945,6 +5373,13 @@ New<HLoadGlobalGeneric>(global_object, variable->name(), ast_context()->is_for_typeof()); + if (FLAG_vector_ics) { + Handle<SharedFunctionInfo> current_shared = + function_state()->compilation_info()->shared_info(); + instr->SetVectorAndSlot( + handle(current_shared->feedback_vector(), isolate()), + expr->VariableFeedbackSlot()); + } return ast_context()->ReturnInstruction(instr, expr->id()); } } @@ -4953,7 +5388,7 @@ case Variable::LOCAL: { HValue* value = LookupAndMakeLive(variable); if (value == graph()->GetConstantHole()) { - ASSERT(IsDeclaredVariableMode(variable->mode()) && + DCHECK(IsDeclaredVariableMode(variable->mode()) && variable->mode() != VAR); return Bailout(kReferenceToUninitializedVariable); } @@ -4962,7 +5397,21 @@ case Variable::CONTEXT: { HValue* context = BuildContextChainWalk(variable); - HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, variable); + HLoadContextSlot::Mode mode; + switch (variable->mode()) { + case LET: + case CONST: + mode = HLoadContextSlot::kCheckDeoptimize; + break; + case CONST_LEGACY: + mode = HLoadContextSlot::kCheckReturnUndefined; + break; + default: + mode = HLoadContextSlot::kNoCheck; + break; + } + HLoadContextSlot* instr = + new(zone()) HLoadContextSlot(context, variable->index(), mode); return ast_context()->ReturnInstruction(instr, expr->id()); } @@ -4973,18 +5422,18 @@ void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); HConstant* instr = New<HConstant>(expr->value()); return ast_context()->ReturnInstruction(instr, expr->id()); } void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); Handle<JSFunction> closure = function_state()->compilation_info()->closure(); Handle<FixedArray> literals(closure->literals()); HRegExpLiteral* instr = New<HRegExpLiteral>(literals, @@ -4998,7 +5447,7 @@ static bool CanInlinePropertyAccess(Type* type) { if (type->Is(Type::NumberOrString())) return true; if (!type->IsClass()) return false; - Handle<Map> map = type->AsClass(); + Handle<Map> map = type->AsClass()->Map(); return map->IsJSObjectMap() && !map->is_dictionary_map() && !map->has_named_interceptor(); @@ -5011,12 +5460,12 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth, int* max_properties) { - if (boilerplate->map()->is_deprecated()) { - Handle<Object> result = JSObject::TryMigrateInstance(boilerplate); - if (result.is_null()) return false; + if (boilerplate->map()->is_deprecated() && + !JSObject::TryMigrateInstance(boilerplate)) { + return false; } - ASSERT(max_depth >= 0 && *max_properties >= 0); + DCHECK(max_depth >= 0 && *max_properties >= 0); if (max_depth == 0) return false; Isolate* isolate = boilerplate->GetIsolate(); @@ -5071,9 +5520,9 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); expr->BuildConstantProperties(isolate()); Handle<JSFunction> closure = function_state()->compilation_info()->closure(); HInstruction* literal; @@ -5107,15 +5556,15 @@ flags |= expr->has_function() ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags; - Add<HPushArgument>(Add<HConstant>(closure_literals)); - Add<HPushArgument>(Add<HConstant>(literal_index)); - Add<HPushArgument>(Add<HConstant>(constant_properties)); - Add<HPushArgument>(Add<HConstant>(flags)); + Add<HPushArguments>(Add<HConstant>(closure_literals), + Add<HConstant>(literal_index), + Add<HConstant>(constant_properties), + Add<HConstant>(flags)); // TODO(mvstanton): Add a flag to turn off creation of any // AllocationMementos for this call: we are in crankshaft and should have // learned enough about transition behavior to stop emitting mementos. - Runtime::FunctionId function_id = Runtime::kHiddenCreateObjectLiteral; + Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral; literal = Add<HCallRuntime>(isolate()->factory()->empty_string(), Runtime::FunctionForId(function_id), 4); @@ -5136,7 +5585,7 @@ switch (property->kind()) { case ObjectLiteral::Property::MATERIALIZED_LITERAL: - ASSERT(!CompileTimeValue::IsCompileTimeValue(value)); + DCHECK(!CompileTimeValue::IsCompileTimeValue(value)); // Fall through. case ObjectLiteral::Property::COMPUTED: if (key->value()->IsInternalizedString()) { @@ -5149,18 +5598,18 @@ if (map.is_null()) { // If we don't know the monomorphic type, do a generic store. CHECK_ALIVE(store = BuildNamedGeneric( - STORE, literal, name, value)); + STORE, NULL, literal, name, value)); } else { PropertyAccessInfo info(this, STORE, ToType(map), name); if (info.CanAccessMonomorphic()) { - HValue* checked_literal = BuildCheckMap(literal, map); - ASSERT(!info.lookup()->IsPropertyCallbacks()); + HValue* checked_literal = Add<HCheckMaps>(literal, map); + DCHECK(!info.lookup()->IsPropertyCallbacks()); store = BuildMonomorphicAccess( &info, literal, checked_literal, value, BailoutId::None(), BailoutId::None()); } else { CHECK_ALIVE(store = BuildNamedGeneric( - STORE, literal, name, value)); + STORE, NULL, literal, name, value)); } } AddInstruction(store); @@ -5196,9 +5645,9 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); expr->BuildConstantElements(isolate()); ZoneList<Expression*>* subexprs = expr->values(); int length = subexprs->length(); @@ -5212,11 +5661,12 @@ Handle<JSObject> boilerplate_object; if (literals_cell->IsUndefined()) { uninitialized = true; - Handle<Object> raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate( - isolate(), literals, expr->constant_elements()); - if (raw_boilerplate.is_null()) { - return Bailout(kArrayBoilerplateCreationFailed); - } + Handle<Object> raw_boilerplate; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate(), raw_boilerplate, + Runtime::CreateArrayLiteralBoilerplate( + isolate(), literals, expr->constant_elements()), + Bailout(kArrayBoilerplateCreationFailed)); boilerplate_object = Handle<JSObject>::cast(raw_boilerplate); AllocationSiteCreationContext creation_context(isolate()); @@ -5232,14 +5682,14 @@ isolate()->counters()->cow_arrays_created_runtime()->Increment(); } } else { - ASSERT(literals_cell->IsAllocationSite()); + DCHECK(literals_cell->IsAllocationSite()); site = Handle<AllocationSite>::cast(literals_cell); boilerplate_object = Handle<JSObject>( JSObject::cast(site->transition_info()), isolate()); } - ASSERT(!boilerplate_object.is_null()); - ASSERT(site->SitePointsToLiteral()); + DCHECK(!boilerplate_object.is_null()); + DCHECK(site->SitePointsToLiteral()); ElementsKind boilerplate_elements_kind = boilerplate_object->GetElementsKind(); @@ -5264,22 +5714,22 @@ : ArrayLiteral::kNoFlags; flags |= ArrayLiteral::kDisableMementos; - Add<HPushArgument>(Add<HConstant>(literals)); - Add<HPushArgument>(Add<HConstant>(literal_index)); - Add<HPushArgument>(Add<HConstant>(constants)); - Add<HPushArgument>(Add<HConstant>(flags)); + Add<HPushArguments>(Add<HConstant>(literals), + Add<HConstant>(literal_index), + Add<HConstant>(constants), + Add<HConstant>(flags)); // TODO(mvstanton): Consider a flag to turn off creation of any // AllocationMementos for this call: we are in crankshaft and should have // learned enough about transition behavior to stop emitting mementos. - Runtime::FunctionId function_id = Runtime::kHiddenCreateArrayLiteral; + Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral; literal = Add<HCallRuntime>(isolate()->factory()->empty_string(), Runtime::FunctionForId(function_id), 4); // De-opt if elements kind changed from boilerplate_elements_kind. Handle<Map> map = Handle<Map>(boilerplate_object->map(), isolate()); - literal = Add<HCheckMaps>(literal, map, top_info()); + literal = Add<HCheckMaps>(literal, map); } // The array is expected in the bailout environment during computation @@ -5332,25 +5782,51 @@ HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) { BuildCheckHeapObject(object); - return Add<HCheckMaps>(object, map, top_info()); + return Add<HCheckMaps>(object, map); } HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField( PropertyAccessInfo* info, HValue* checked_object) { + // See if this is a load for an immutable property + if (checked_object->ActualValue()->IsConstant() && + info->lookup()->IsCacheable() && + info->lookup()->IsReadOnly() && info->lookup()->IsDontDelete()) { + Handle<Object> object( + HConstant::cast(checked_object->ActualValue())->handle(isolate())); + + if (object->IsJSObject()) { + LookupResult lookup(isolate()); + Handle<JSObject>::cast(object)->Lookup(info->name(), &lookup); + Handle<Object> value(lookup.GetLazyValue(), isolate()); + + DCHECK(!value->IsTheHole()); + return New<HConstant>(value); + } + } + HObjectAccess access = info->access(); if (access.representation().IsDouble()) { // Load the heap number. checked_object = Add<HLoadNamedField>( checked_object, static_cast<HValue*>(NULL), access.WithRepresentation(Representation::Tagged())); - checked_object->set_type(HType::HeapNumber()); // Load the double value from it. access = HObjectAccess::ForHeapNumberValue(); } + + SmallMapList* map_list = info->field_maps(); + if (map_list->length() == 0) { + return New<HLoadNamedField>(checked_object, checked_object, access); + } + + UniqueSet<Map>* maps = new(zone()) UniqueSet<Map>(map_list->length(), zone()); + for (int i = 0; i < map_list->length(); ++i) { + maps->Add(Unique<Map>::CreateImmovable(map_list->at(i)), zone()); + } return New<HLoadNamedField>( - checked_object, static_cast<HValue*>(NULL), access); + checked_object, checked_object, access, maps, info->field_type()); } @@ -5360,8 +5836,7 @@ HValue* value) { bool transition_to_field = info->lookup()->IsTransition(); // TODO(verwaest): Move this logic into PropertyAccessInfo. - HObjectAccess field_access = HObjectAccess::ForField( - info->map(), info->lookup(), info->name()); + HObjectAccess field_access = info->access(); HStoreNamedField *instr; if (field_access.representation().IsDouble()) { @@ -5372,14 +5847,13 @@ NoObservableSideEffectsScope no_side_effects(this); HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize); - PretenureFlag pretenure_flag = !FLAG_allocation_site_pretenuring ? - isolate()->heap()->GetPretenureMode() : NOT_TENURED; - + // TODO(hpayer): Allocation site pretenuring support. HInstruction* heap_number = Add<HAllocate>(heap_number_size, - HType::HeapNumber(), - pretenure_flag, - HEAP_NUMBER_TYPE); - AddStoreMapConstant(heap_number, isolate()->factory()->heap_number_map()); + HType::HeapObject(), + NOT_TENURED, + MUTABLE_HEAP_NUMBER_TYPE); + AddStoreMapConstant( + heap_number, isolate()->factory()->mutable_heap_number_map()); Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(), value); instr = New<HStoreNamedField>(checked_object->ActualValue(), @@ -5389,12 +5863,20 @@ // Already holds a HeapNumber; load the box and write its value field. HInstruction* heap_number = Add<HLoadNamedField>( checked_object, static_cast<HValue*>(NULL), heap_number_access); - heap_number->set_type(HType::HeapNumber()); instr = New<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(), value, STORE_TO_INITIALIZED_ENTRY); } } else { + if (field_access.representation().IsHeapObject()) { + BuildCheckHeapObject(value); + } + + if (!info->field_maps()->is_empty()) { + DCHECK(field_access.representation().IsHeapObject()); + value = Add<HCheckMaps>(value, info->field_maps()); + } + // This is a normal store. instr = New<HStoreNamedField>( checked_object->ActualValue(), field_access, value, @@ -5402,9 +5884,9 @@ } if (transition_to_field) { - HConstant* transition_constant = Add<HConstant>(info->transition()); - instr->SetTransition(transition_constant, top_info()); - instr->SetChangesFlag(kMaps); + Handle<Map> transition(info->transition()); + DCHECK(!transition->is_deprecated()); + instr->SetTransition(Add<HConstant>(transition)); } return instr; } @@ -5447,7 +5929,7 @@ return constant_.is_identical_to(info->constant_); } - ASSERT(lookup_.IsField()); + DCHECK(lookup_.IsField()); if (!info->lookup_.IsField()) return false; Representation r = access_.representation(); @@ -5458,7 +5940,27 @@ } if (info->access_.offset() != access_.offset()) return false; if (info->access_.IsInobject() != access_.IsInobject()) return false; + if (IsLoad()) { + if (field_maps_.is_empty()) { + info->field_maps_.Clear(); + } else if (!info->field_maps_.is_empty()) { + for (int i = 0; i < field_maps_.length(); ++i) { + info->field_maps_.AddMapIfMissing(field_maps_.at(i), info->zone()); + } + info->field_maps_.Sort(); + } + } else { + // We can only merge stores that agree on their field maps. The comparison + // below is safe, since we keep the field maps sorted. + if (field_maps_.length() != info->field_maps_.length()) return false; + for (int i = 0; i < field_maps_.length(); ++i) { + if (!field_maps_.at(i).is_identical_to(info->field_maps_.at(i))) { + return false; + } + } + } info->GeneralizeRepresentation(r); + info->field_type_ = info->field_type_.Combine(field_type_); return true; } @@ -5477,7 +5979,11 @@ } if (lookup_.IsField()) { + // Construct the object field access. access_ = HObjectAccess::ForField(map, &lookup_, name_); + + // Load field map for heap objects. + LoadFieldMaps(map); } else if (lookup_.IsPropertyCallbacks()) { Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate()); if (!callback->IsAccessorPair()) return false; @@ -5488,16 +5994,11 @@ Handle<JSFunction> accessor = handle(JSFunction::cast(raw_accessor)); if (accessor->shared()->IsApiFunction()) { CallOptimization call_optimization(accessor); - if (!call_optimization.is_simple_api_call()) return false; - CallOptimization::HolderLookup holder_lookup; - api_holder_ = call_optimization.LookupHolderOfExpectedType( - map, &holder_lookup); - switch (holder_lookup) { - case CallOptimization::kHolderNotFound: - return false; - case CallOptimization::kHolderIsReceiver: - case CallOptimization::kHolderFound: - break; + if (call_optimization.is_simple_api_call()) { + CallOptimization::HolderLookup holder_lookup; + Handle<Map> receiver_map = this->map(); + api_holder_ = call_optimization.LookupHolderOfExpectedType( + receiver_map, &holder_lookup); } } accessor_ = accessor; @@ -5509,6 +6010,44 @@ } +void HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps( + Handle<Map> map) { + // Clear any previously collected field maps/type. + field_maps_.Clear(); + field_type_ = HType::Tagged(); + + // Figure out the field type from the accessor map. + Handle<HeapType> field_type(lookup_.GetFieldTypeFromMap(*map), isolate()); + + // Collect the (stable) maps from the field type. + int num_field_maps = field_type->NumClasses(); + if (num_field_maps == 0) return; + DCHECK(access_.representation().IsHeapObject()); + field_maps_.Reserve(num_field_maps, zone()); + HeapType::Iterator<Map> it = field_type->Classes(); + while (!it.Done()) { + Handle<Map> field_map = it.Current(); + if (!field_map->is_stable()) { + field_maps_.Clear(); + return; + } + field_maps_.Add(field_map, zone()); + it.Advance(); + } + field_maps_.Sort(); + DCHECK_EQ(num_field_maps, field_maps_.length()); + + // Determine field HType from field HeapType. + field_type_ = HType::FromType<HeapType>(field_type); + DCHECK(field_type_.IsHeapObject()); + + // Add dependency on the map that introduced the field. + Map::AddDependentCompilationInfo( + handle(lookup_.GetFieldOwnerFromMap(*map), isolate()), + DependentCode::kFieldTypeGroup, top_info()); +} + + bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() { Handle<Map> map = this->map(); @@ -5533,6 +6072,11 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() { if (!CanInlinePropertyAccess(type_)) return false; if (IsJSObjectFieldAccessor()) return IsLoad(); + if (this->map()->function_with_prototype() && + !this->map()->has_non_instance_prototype() && + name_.is_identical_to(isolate()->factory()->prototype_string())) { + return IsLoad(); + } if (!LookupDescriptor()) return false; if (lookup_.IsFound()) { if (IsLoad()) return true; @@ -5545,6 +6089,11 @@ Handle<Map> map = this->map(); map->LookupTransition(NULL, *name_, &lookup_); if (lookup_.IsTransitionToField() && map->unused_property_fields() > 0) { + // Construct the object field access. + access_ = HObjectAccess::ForField(map, &lookup_, name_); + + // Load field map for heap objects. + LoadFieldMaps(transition()); return true; } return false; @@ -5553,7 +6102,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic( SmallMapList* types) { - ASSERT(type_->Is(ToType(types->first()))); + DCHECK(type_->Is(ToType(types->first()))); if (!CanAccessMonomorphic()) return false; STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism); if (types->length() > kMaxLoadPolymorphism) return false; @@ -5576,7 +6125,7 @@ if (type_->Is(Type::Number())) return false; // Multiple maps cannot transition to the same target map. - ASSERT(!IsLoad() || !lookup_.IsTransition()); + DCHECK(!IsLoad() || !lookup_.IsTransition()); if (lookup_.IsTransition() && types->length() > 1) return false; for (int i = 1; i < types->length(); ++i) { @@ -5589,6 +6138,14 @@ } +Handle<Map> HOptimizedGraphBuilder::PropertyAccessInfo::map() { + JSFunction* ctor = IC::GetRootConstructor( + type_, current_info()->closure()->context()->native_context()); + if (ctor != NULL) return handle(ctor->initial_map()); + return type_->AsClass()->Map(); +} + + static bool NeedsWrappingFor(Type* type, Handle<JSFunction> target) { return type->Is(Type::NumberOrString()) && target->shared()->strict_mode() == SLOPPY && @@ -5607,10 +6164,16 @@ HObjectAccess access = HObjectAccess::ForMap(); // bogus default if (info->GetJSObjectFieldAccess(&access)) { - ASSERT(info->IsLoad()); + DCHECK(info->IsLoad()); return New<HLoadNamedField>(object, checked_object, access); } + if (info->name().is_identical_to(isolate()->factory()->prototype_string()) && + info->map()->function_with_prototype()) { + DCHECK(!info->map()->has_non_instance_prototype()); + return New<HLoadFunctionPrototype>(checked_object); + } + HValue* checked_holder = checked_object; if (info->has_holder()) { Handle<JSObject> prototype(JSObject::cast(info->map()->prototype())); @@ -5618,7 +6181,7 @@ } if (!info->lookup()->IsFound()) { - ASSERT(info->IsLoad()); + DCHECK(info->IsLoad()); return graph()->GetConstantUndefined(); } @@ -5631,7 +6194,7 @@ } if (info->lookup()->IsTransition()) { - ASSERT(!info->IsLoad()); + DCHECK(!info->IsLoad()); return BuildStoreNamedField(info, checked_object, value); } @@ -5652,14 +6215,14 @@ ? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id) : TryInlineSetter( info->accessor(), info->map(), ast_id, return_id, value); - if (success) return NULL; + if (success || HasStackOverflow()) return NULL; } PushArgumentsFromEnvironment(argument_count); return BuildCallConstantFunction(info->accessor(), argument_count); } - ASSERT(info->lookup()->IsConstant()); + DCHECK(info->lookup()->IsConstant()); if (info->IsLoad()) { return New<HConstant>(info->constant()); } else { @@ -5670,6 +6233,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess( PropertyAccessType access_type, + Expression* expr, BailoutId ast_id, BailoutId return_id, HValue* object, @@ -5783,7 +6347,8 @@ if (count == types->length() && FLAG_deoptimize_uncommon_cases) { FinishExitWithHardDeoptimization("Uknown map in polymorphic access"); } else { - HInstruction* instr = BuildNamedGeneric(access_type, object, name, value); + HInstruction* instr = BuildNamedGeneric(access_type, expr, object, name, + value); AddInstruction(instr); if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value); @@ -5796,7 +6361,7 @@ } } - ASSERT(join != NULL); + DCHECK(join != NULL); if (join->HasPredecessor()) { join->SetJoinId(ast_id); set_current_block(join); @@ -5857,7 +6422,7 @@ Literal* key = prop->key()->AsLiteral(); Handle<String> name = Handle<String>::cast(key->value()); - ASSERT(!name.is_null()); + DCHECK(!name.is_null()); HInstruction* instr = BuildNamedAccess(STORE, ast_id, return_id, expr, object, name, value, is_uninitialized); @@ -5875,7 +6440,7 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) { Property* prop = expr->target()->AsProperty(); - ASSERT(prop != NULL); + DCHECK(prop != NULL); CHECK_ALIVE(VisitForValue(prop->obj())); if (!prop->key()->IsPropertyName()) { CHECK_ALIVE(VisitForValue(prop->key())); @@ -5899,7 +6464,7 @@ Handle<GlobalObject> global(current_info()->global_object()); Handle<PropertyCell> cell(global->GetPropertyCell(&lookup)); if (cell->type()->IsConstant()) { - Handle<Object> constant = cell->type()->AsConstant(); + Handle<Object> constant = cell->type()->AsConstant()->Value(); if (value->IsConstant()) { HConstant* c_value = HConstant::cast(value); if (!constant.is_identical_to(c_value->handle(isolate()))) { @@ -5934,7 +6499,7 @@ Add<HStoreNamedGeneric>(global_object, var->name(), value, function_strict_mode()); USE(instr); - ASSERT(instr->HasObservableSideEffects()); + DCHECK(instr->HasObservableSideEffects()); Add<HSimulate>(ast_id, REMOVABLE_SIMULATE); } } @@ -5944,7 +6509,7 @@ Expression* target = expr->target(); VariableProxy* proxy = target->AsVariableProxy(); Property* prop = target->AsProperty(); - ASSERT(proxy == NULL || prop == NULL); + DCHECK(proxy == NULL || prop == NULL); // We have a second position recorded in the FullCodeGenerator to have // type feedback for the binary operation. @@ -6023,8 +6588,7 @@ CHECK_ALIVE(VisitForValue(prop->obj())); HValue* object = Top(); HValue* key = NULL; - if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) || - prop->IsStringAccess()) { + if (!prop->key()->IsPropertyName() || prop->IsStringAccess()) { CHECK_ALIVE(VisitForValue(prop->key())); key = Top(); } @@ -6046,12 +6610,12 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); VariableProxy* proxy = expr->target()->AsVariableProxy(); Property* prop = expr->target()->AsProperty(); - ASSERT(proxy == NULL || prop == NULL); + DCHECK(proxy == NULL || prop == NULL); if (expr->is_compound()) { HandleCompoundAssignment(expr); @@ -6147,7 +6711,7 @@ expr->op() == Token::INIT_CONST) { mode = HStoreContextSlot::kNoCheck; } else { - ASSERT(expr->op() == Token::INIT_CONST_LEGACY); + DCHECK(expr->op() == Token::INIT_CONST_LEGACY); mode = HStoreContextSlot::kCheckIgnoreAssignment; } @@ -6177,20 +6741,20 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); // We don't optimize functions with invalid left-hand sides in // assignments, count operations, or for-in. Consequently throw can // currently only occur in an effect context. - ASSERT(ast_context()->IsEffect()); + DCHECK(ast_context()->IsEffect()); CHECK_ALIVE(VisitForValue(expr->exception())); HValue* value = environment()->Pop(); if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position()); - Add<HPushArgument>(value); + Add<HPushArguments>(value); Add<HCallRuntime>(isolate()->factory()->empty_string(), - Runtime::FunctionForId(Runtime::kHiddenThrow), 1); + Runtime::FunctionForId(Runtime::kThrow), 1); Add<HSimulate>(expr->id()); // If the throw definitely exits the function, we can finish with a dummy @@ -6230,6 +6794,7 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric( PropertyAccessType access_type, + Expression* expr, HValue* object, Handle<String> name, HValue* value, @@ -6239,7 +6804,15 @@ Deoptimizer::SOFT); } if (access_type == LOAD) { - return New<HLoadNamedGeneric>(object, name); + HLoadNamedGeneric* result = New<HLoadNamedGeneric>(object, name); + if (FLAG_vector_ics) { + Handle<SharedFunctionInfo> current_shared = + function_state()->compilation_info()->shared_info(); + result->SetVectorAndSlot( + handle(current_shared->feedback_vector(), isolate()), + expr->AsProperty()->PropertyFeedbackSlot()); + } + return result; } else { return New<HStoreNamedGeneric>(object, name, value, function_strict_mode()); } @@ -6249,11 +6822,20 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric( PropertyAccessType access_type, + Expression* expr, HValue* object, HValue* key, HValue* value) { if (access_type == LOAD) { - return New<HLoadKeyedGeneric>(object, key); + HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(object, key); + if (FLAG_vector_ics) { + Handle<SharedFunctionInfo> current_shared = + function_state()->compilation_info()->shared_info(); + result->SetVectorAndSlot( + handle(current_shared->feedback_vector(), isolate()), + expr->AsProperty()->PropertyFeedbackSlot()); + } + return result; } else { return New<HStoreKeyedGeneric>(object, key, value, function_strict_mode()); } @@ -6284,8 +6866,7 @@ Handle<Map> map, PropertyAccessType access_type, KeyedAccessStoreMode store_mode) { - HCheckMaps* checked_object = Add<HCheckMaps>(object, map, top_info(), - dependency); + HCheckMaps* checked_object = Add<HCheckMaps>(object, map, dependency); if (dependency) { checked_object->ClearDependsOnFlag(kElementsKind); } @@ -6294,15 +6875,16 @@ // monomorphic stores need a prototype chain check because shape // changes could allow callbacks on elements in the chain that // aren't compatible with monomorphic keyed stores. - Handle<JSObject> prototype(JSObject::cast(map->prototype())); - Object* holder = map->prototype(); - while (holder->GetPrototype(isolate())->IsJSObject()) { - holder = holder->GetPrototype(isolate()); + PrototypeIterator iter(map); + JSObject* holder = NULL; + while (!iter.IsAtEnd()) { + holder = JSObject::cast(*PrototypeIterator::GetCurrent(iter)); + iter.Advance(); } - ASSERT(holder->GetPrototype(isolate())->IsNull()); + DCHECK(holder && holder->IsJSObject()); - BuildCheckPrototypeMaps(prototype, - Handle<JSObject>(JSObject::cast(holder))); + BuildCheckPrototypeMaps(handle(JSObject::cast(map->prototype())), + Handle<JSObject>(holder)); } LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map); @@ -6381,6 +6963,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( + Expression* expr, HValue* object, HValue* key, HValue* val, @@ -6412,7 +6995,8 @@ possible_transitioned_maps.Add(map); } if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) { - HInstruction* result = BuildKeyedGeneric(access_type, object, key, val); + HInstruction* result = BuildKeyedGeneric(access_type, expr, object, key, + val); *has_side_effects = result->HasObservableSideEffects(); return AddInstruction(result); } @@ -6429,9 +7013,9 @@ HTransitionElementsKind* transition = NULL; for (int i = 0; i < maps->length(); ++i) { Handle<Map> map = maps->at(i); - ASSERT(map->IsMap()); + DCHECK(map->IsMap()); if (!transition_target.at(i).is_null()) { - ASSERT(Map::IsValidElementsTransition( + DCHECK(Map::IsValidElementsTransition( map->elements_kind(), transition_target.at(i)->elements_kind())); transition = Add<HTransitionElementsKind>(object, map, @@ -6443,13 +7027,14 @@ // If only one map is left after transitioning, handle this case // monomorphically. - ASSERT(untransitionable_maps.length() >= 1); + DCHECK(untransitionable_maps.length() >= 1); if (untransitionable_maps.length() == 1) { Handle<Map> untransitionable_map = untransitionable_maps[0]; HInstruction* instr = NULL; if (untransitionable_map->has_slow_elements_kind() || !untransitionable_map->IsJSObjectMap()) { - instr = AddInstruction(BuildKeyedGeneric(access_type, object, key, val)); + instr = AddInstruction(BuildKeyedGeneric(access_type, expr, object, key, + val)); } else { instr = BuildMonomorphicElementAccess( object, key, val, transition, untransitionable_map, access_type, @@ -6474,9 +7059,10 @@ set_current_block(this_map); HInstruction* access = NULL; if (IsDictionaryElementsKind(elements_kind)) { - access = AddInstruction(BuildKeyedGeneric(access_type, object, key, val)); + access = AddInstruction(BuildKeyedGeneric(access_type, expr, object, key, + val)); } else { - ASSERT(IsFastElementsKind(elements_kind) || + DCHECK(IsFastElementsKind(elements_kind) || IsExternalArrayElementsKind(elements_kind) || IsFixedTypedArrayElementsKind(elements_kind)); LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map); @@ -6503,7 +7089,7 @@ // necessary because FinishExitWithHardDeoptimization does an AbnormalExit // rather than joining the join block. If this becomes an issue, insert a // generic access in the case length() == 0. - ASSERT(join->predecessors()->length() > 0); + DCHECK(join->predecessors()->length() > 0); // Deopt if none of the cases matched. NoObservableSideEffectsScope scope(this); FinishExitWithHardDeoptimization("Unknown map in polymorphic element access"); @@ -6519,7 +7105,7 @@ Expression* expr, PropertyAccessType access_type, bool* has_side_effects) { - ASSERT(!expr->IsPropertyName()); + DCHECK(!expr->IsPropertyName()); HInstruction* instr = NULL; SmallMapList* types; @@ -6545,7 +7131,8 @@ if (monomorphic) { Handle<Map> map = types->first(); if (map->has_slow_elements_kind() || !map->IsJSObjectMap()) { - instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val)); + instr = AddInstruction(BuildKeyedGeneric(access_type, expr, obj, key, + val)); } else { BuildCheckHeapObject(obj); instr = BuildMonomorphicElementAccess( @@ -6553,7 +7140,7 @@ } } else if (!force_generic && (types != NULL && !types->is_empty())) { return HandlePolymorphicElementAccess( - obj, key, val, types, access_type, + expr, obj, key, val, types, access_type, expr->GetStoreMode(), has_side_effects); } else { if (access_type == STORE) { @@ -6568,7 +7155,7 @@ Deoptimizer::SOFT); } } - instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val)); + instr = AddInstruction(BuildKeyedGeneric(access_type, expr, obj, key, val)); } *has_side_effects = instr->HasObservableSideEffects(); return instr; @@ -6591,7 +7178,7 @@ HInstruction* insert_after = entry; for (int i = 0; i < arguments_values->length(); i++) { HValue* argument = arguments_values->at(i); - HInstruction* push_argument = New<HPushArgument>(argument); + HInstruction* push_argument = New<HPushArguments>(argument); push_argument->InsertAfter(insert_after); insert_after = push_argument; } @@ -6663,19 +7250,19 @@ bool is_uninitialized) { SmallMapList* types; ComputeReceiverTypes(expr, object, &types, zone()); - ASSERT(types != NULL); + DCHECK(types != NULL); if (types->length() > 0) { PropertyAccessInfo info(this, access, ToType(types->first()), name); if (!info.CanAccessAsMonomorphic(types)) { HandlePolymorphicNamedFieldAccess( - access, ast_id, return_id, object, value, types, name); + access, expr, ast_id, return_id, object, value, types, name); return NULL; } HValue* checked_object; // Type::Number() is only supported by polymorphic load/call handling. - ASSERT(!info.type()->Is(Type::Number())); + DCHECK(!info.type()->Is(Type::Number())); BuildCheckHeapObject(object); if (AreStringTypes(types)) { checked_object = @@ -6687,7 +7274,7 @@ &info, object, checked_object, value, ast_id, return_id); } - return BuildNamedGeneric(access, object, name, value, is_uninitialized); + return BuildNamedGeneric(access, expr, object, name, value, is_uninitialized); } @@ -6711,11 +7298,6 @@ AddInstruction(char_code); instr = NewUncasted<HStringCharFromCode>(char_code); - } else if (expr->IsFunctionPrototype()) { - HValue* function = Pop(); - BuildCheckHeapObject(function); - instr = New<HLoadFunctionPrototype>(function); - } else if (expr->key()->IsPropertyName()) { Handle<String> name = expr->key()->AsLiteral()->AsPropertyName(); HValue* object = Pop(); @@ -6748,15 +7330,14 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); if (TryArgumentsAccess(expr)) return; CHECK_ALIVE(VisitForValue(expr->obj())); - if ((!expr->IsFunctionPrototype() && !expr->key()->IsPropertyName()) || - expr->IsStringAccess()) { + if (!expr->key()->IsPropertyName() || expr->IsStringAccess()) { CHECK_ALIVE(VisitForValue(expr->key())); } @@ -6764,19 +7345,9 @@ } -HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant, - CompilationInfo* info) { - HConstant* constant_value = New<HConstant>(constant); - - if (constant->map()->CanOmitMapChecks()) { - constant->map()->AddDependentCompilationInfo( - DependentCode::kPrototypeCheckGroup, info); - return constant_value; - } - - AddInstruction(constant_value); - HCheckMaps* check = - Add<HCheckMaps>(constant_value, handle(constant->map()), info); +HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant) { + HCheckMaps* check = Add<HCheckMaps>( + Add<HConstant>(constant), handle(constant->map())); check->ClearDependsOnFlag(kElementsKind); return check; } @@ -6784,14 +7355,19 @@ HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder) { - while (!prototype.is_identical_to(holder)) { - BuildConstantMapCheck(prototype, top_info()); - prototype = handle(JSObject::cast(prototype->GetPrototype())); + PrototypeIterator iter(isolate(), prototype, + PrototypeIterator::START_AT_RECEIVER); + while (holder.is_null() || + !PrototypeIterator::GetCurrent(iter).is_identical_to(holder)) { + BuildConstantMapCheck( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter))); + iter.Advance(); + if (iter.IsAtEnd()) { + return NULL; + } } - - HInstruction* checked_object = BuildConstantMapCheck(prototype, top_info()); - if (!checked_object->IsLinked()) AddInstruction(checked_object); - return checked_object; + return BuildConstantMapCheck( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter))); } @@ -6819,7 +7395,7 @@ HValue* arity = Add<HConstant>(argument_count - 1); - HValue* op_vals[] = { fun, context, arity, expected_param_count }; + HValue* op_vals[] = { context, fun, arity, expected_param_count }; Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); @@ -6827,7 +7403,7 @@ return New<HCallWithDescriptor>( adaptor_value, argument_count, descriptor, - Vector<HValue*>(op_vals, descriptor->environment_length())); + Vector<HValue*>(op_vals, descriptor->GetEnvironmentLength())); } @@ -6861,13 +7437,36 @@ } +class FunctionSorter { + public: + explicit FunctionSorter(int index = 0, int ticks = 0, int size = 0) + : index_(index), ticks_(ticks), size_(size) {} + + int index() const { return index_; } + int ticks() const { return ticks_; } + int size() const { return size_; } + + private: + int index_; + int ticks_; + int size_; +}; + + +inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) { + int diff = lhs.ticks() - rhs.ticks(); + if (diff != 0) return diff > 0; + return lhs.size() < rhs.size(); +} + + void HOptimizedGraphBuilder::HandlePolymorphicCallNamed( Call* expr, HValue* receiver, SmallMapList* types, Handle<String> name) { int argument_count = expr->arguments()->length() + 1; // Includes receiver. - int order[kMaxCallPolymorphism]; + FunctionSorter order[kMaxCallPolymorphism]; bool handle_smi = false; bool handled_string = false; @@ -6889,17 +7488,20 @@ handle_smi = true; } expr->set_target(target); - order[ordered_functions++] = i; + order[ordered_functions++] = FunctionSorter( + i, target->shared()->profiler_ticks(), InliningAstSize(target)); } } + std::sort(order, order + ordered_functions); + HBasicBlock* number_block = NULL; HBasicBlock* join = NULL; handled_string = false; int count = 0; for (int fn = 0; fn < ordered_functions; ++fn) { - int i = order[fn]; + int i = order[fn].index(); PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name); if (info.type()->Is(Type::String())) { if (handled_string) continue; @@ -6995,7 +7597,7 @@ } else { Property* prop = expr->expression()->AsProperty(); HInstruction* function = BuildNamedGeneric( - LOAD, receiver, name, NULL, prop->IsUninitialized()); + LOAD, prop, receiver, name, NULL, prop->IsUninitialized()); AddInstruction(function); Push(function); AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE); @@ -7025,7 +7627,7 @@ // We assume that control flow is always live after an expression. So // even without predecessors to the join block, we set it as the exit // block and continue by adding instructions there. - ASSERT(join != NULL); + DCHECK(join != NULL); if (join->HasPredecessor()) { set_current_block(join); join->SetJoinId(expr->id()); @@ -7071,6 +7673,11 @@ return target_shared->inline_builtin() ? 0 : kNotInlinable; } + if (target_shared->IsApiFunction()) { + TraceInline(target, caller, "target is api function"); + return kNotInlinable; + } + // Do a quick check on source code length to avoid parsing large // inlining candidates. if (target_shared->SourceSize() > @@ -7084,7 +7691,7 @@ TraceInline(target, caller, "target not inlineable"); return kNotInlinable; } - if (target_shared->dont_inline() || target_shared->dont_optimize()) { + if (target_shared->DisableOptimizationReason() != kNoReason) { TraceInline(target, caller, "target contains unsupported syntax [early]"); return kNotInlinable; } @@ -7144,6 +7751,9 @@ // Parse and allocate variables. CompilationInfo target_info(target, zone()); + // Use the same AstValueFactory for creating strings in the sub-compilation + // step, but don't transfer ownership to target_info. + target_info.SetAstValueFactory(top_info()->ast_value_factory(), false); Handle<SharedFunctionInfo> target_shared(target->shared()); if (!Parser::Parse(&target_info) || !Scope::Analyze(&target_info)) { if (target_info.isolate()->has_pending_exception()) { @@ -7168,8 +7778,7 @@ TraceInline(target, caller, "target AST is too large [late]"); return false; } - AstProperties::Flags* flags(function->flags()); - if (flags->Contains(kDontInline) || function->dont_optimize()) { + if (function->dont_optimize()) { TraceInline(target, caller, "target contains unsupported syntax [late]"); return false; } @@ -7219,6 +7828,7 @@ target_shared->set_scope_info(*target_scope_info); } target_shared->EnableDeoptimizationSupport(*target_info.code()); + target_shared->set_feedback_vector(*target_info.feedback_vector()); Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG, &target_info, target_shared); @@ -7229,7 +7839,7 @@ // TryInline should always return true). // Type-check the inlined function. - ASSERT(target_shared->has_deoptimization_support()); + DCHECK(target_shared->has_deoptimization_support()); AstTyper::Run(&target_info); int function_id = graph()->TraceInlinedFunction(target_shared, position); @@ -7252,25 +7862,33 @@ HConstant* context = Add<HConstant>(Handle<Context>(target->context())); inner_env->BindContext(context); - Add<HSimulate>(return_id); - current_block()->UpdateEnvironment(inner_env); - HArgumentsObject* arguments_object = NULL; - - // If the function uses arguments object create and bind one, also copy + // Create a dematerialized arguments object for the function, also copy the // current arguments values to use them for materialization. + HEnvironment* arguments_env = inner_env->arguments_environment(); + int parameter_count = arguments_env->parameter_count(); + HArgumentsObject* arguments_object = Add<HArgumentsObject>(parameter_count); + for (int i = 0; i < parameter_count; i++) { + arguments_object->AddArgument(arguments_env->Lookup(i), zone()); + } + + // If the function uses arguments object then bind bind one. if (function->scope()->arguments() != NULL) { - ASSERT(function->scope()->arguments()->IsStackAllocated()); - HEnvironment* arguments_env = inner_env->arguments_environment(); - int arguments_count = arguments_env->parameter_count(); - arguments_object = Add<HArgumentsObject>(arguments_count); + DCHECK(function->scope()->arguments()->IsStackAllocated()); inner_env->Bind(function->scope()->arguments(), arguments_object); - for (int i = 0; i < arguments_count; i++) { - arguments_object->AddArgument(arguments_env->Lookup(i), zone()); - } } + // Capture the state before invoking the inlined function for deopt in the + // inlined function. This simulate has no bailout-id since it's not directly + // reachable for deopt, and is only used to capture the state. If the simulate + // becomes reachable by merging, the ast id of the simulate merged into it is + // adopted. + Add<HSimulate>(BailoutId::None()); + + current_block()->UpdateEnvironment(inner_env); + Scope* saved_scope = scope(); + set_scope(target_info.scope()); HEnterInlined* enter_inlined = - Add<HEnterInlined>(target, arguments_count, function, + Add<HEnterInlined>(return_id, target, arguments_count, function, function_state()->inlining_kind(), function->scope()->arguments(), arguments_object); @@ -7278,6 +7896,7 @@ VisitDeclarations(target_info.scope()->declarations()); VisitStatements(function->body()); + set_scope(saved_scope); if (HasStackOverflow()) { // Bail out if the inline function did, as we cannot residualize a call // instead. @@ -7292,7 +7911,7 @@ inlined_count_ += nodes_added; Handle<Code> unoptimized_code(target_shared->code()); - ASSERT(unoptimized_code->kind() == Code::FUNCTION); + DCHECK(unoptimized_code->kind() == Code::FUNCTION); Handle<TypeFeedbackInfo> type_info( TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info())); graph()->update_type_change_checksum(type_info->own_type_change_checksum()); @@ -7310,7 +7929,7 @@ } else if (call_context()->IsEffect()) { Goto(function_return(), state); } else { - ASSERT(call_context()->IsValue()); + DCHECK(call_context()->IsValue()); AddLeaveInlined(implicit_return_value, state); } } else if (state->inlining_kind() == SETTER_CALL_RETURN) { @@ -7322,7 +7941,7 @@ } else if (call_context()->IsEffect()) { Goto(function_return(), state); } else { - ASSERT(call_context()->IsValue()); + DCHECK(call_context()->IsValue()); AddLeaveInlined(implicit_return_value, state); } } else { @@ -7333,7 +7952,7 @@ } else if (call_context()->IsEffect()) { Goto(function_return(), state); } else { - ASSERT(call_context()->IsValue()); + DCHECK(call_context()->IsValue()); AddLeaveInlined(undefined, state); } } @@ -7347,7 +7966,7 @@ HEnterInlined* entry = function_state()->entry(); // Pop the return test context from the expression context stack. - ASSERT(ast_context() == inlined_test_context()); + DCHECK(ast_context() == inlined_test_context()); ClearInlinedTestContext(); delete target_state; @@ -7453,6 +8072,7 @@ if (!FLAG_fast_math) break; // Fall through if FLAG_fast_math. case kMathRound: + case kMathFround: case kMathFloor: case kMathAbs: case kMathSqrt: @@ -7524,6 +8144,7 @@ if (!FLAG_fast_math) break; // Fall through if FLAG_fast_math. case kMathRound: + case kMathFround: case kMathFloor: case kMathAbs: case kMathSqrt: @@ -7554,7 +8175,7 @@ left, kMathPowHalf); // MathPowHalf doesn't have side effects so there's no need for // an environment simulation here. - ASSERT(!sqrt->HasObservableSideEffects()); + DCHECK(!sqrt->HasObservableSideEffects()); result = NewUncasted<HDiv>(one, sqrt); } else if (exponent == 2.0) { result = NewUncasted<HMul>(left, left); @@ -7596,6 +8217,8 @@ if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false; ElementsKind elements_kind = receiver_map->elements_kind(); if (!IsFastElementsKind(elements_kind)) return false; + if (receiver_map->is_observed()) return false; + DCHECK(receiver_map->is_extensible()); Drop(expr->arguments()->length()); HValue* result; @@ -7658,32 +8281,202 @@ if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false; ElementsKind elements_kind = receiver_map->elements_kind(); if (!IsFastElementsKind(elements_kind)) return false; - - HValue* op_vals[] = { - context(), - // Receiver. - environment()->ExpressionStackAt(expr->arguments()->length()) - }; + if (receiver_map->is_observed()) return false; + if (JSArray::IsReadOnlyLengthDescriptor(receiver_map)) return false; + DCHECK(receiver_map->is_extensible()); + + // If there may be elements accessors in the prototype chain, the fast + // inlined version can't be used. + if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false; + // If there currently can be no elements accessors on the prototype chain, + // it doesn't mean that there won't be any later. Install a full prototype + // chain check to trap element accessors being installed on the prototype + // chain, which would cause elements to go to dictionary mode and result + // in a map change. + Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype())); + BuildCheckPrototypeMaps(prototype, Handle<JSObject>()); const int argc = expr->arguments()->length(); - // Includes receiver. - PushArgumentsFromEnvironment(argc + 1); + if (argc != 1) return false; + + HValue* value_to_push = Pop(); + HValue* array = Pop(); + Drop(1); // Drop function. + + HInstruction* new_size = NULL; + HValue* length = NULL; + + { + NoObservableSideEffectsScope scope(this); + + length = Add<HLoadNamedField>(array, static_cast<HValue*>(NULL), + HObjectAccess::ForArrayLength(elements_kind)); + + new_size = AddUncasted<HAdd>(length, graph()->GetConstant1()); + + bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE; + BuildUncheckedMonomorphicElementAccess(array, length, + value_to_push, is_array, + elements_kind, STORE, + NEVER_RETURN_HOLE, + STORE_AND_GROW_NO_TRANSITION); + + if (!ast_context()->IsEffect()) Push(new_size); + Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE); + if (!ast_context()->IsEffect()) Drop(1); + } + + ast_context()->ReturnValue(new_size); + return true; + } + case kArrayShift: { + if (receiver_map.is_null()) return false; + if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false; + ElementsKind kind = receiver_map->elements_kind(); + if (!IsFastElementsKind(kind)) return false; + if (receiver_map->is_observed()) return false; + DCHECK(receiver_map->is_extensible()); + + // If there may be elements accessors in the prototype chain, the fast + // inlined version can't be used. + if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false; + + // If there currently can be no elements accessors on the prototype chain, + // it doesn't mean that there won't be any later. Install a full prototype + // chain check to trap element accessors being installed on the prototype + // chain, which would cause elements to go to dictionary mode and result + // in a map change. + BuildCheckPrototypeMaps( + handle(JSObject::cast(receiver_map->prototype()), isolate()), + Handle<JSObject>::null()); + + // Threshold for fast inlined Array.shift(). + HConstant* inline_threshold = Add<HConstant>(static_cast<int32_t>(16)); + + Drop(expr->arguments()->length()); + HValue* receiver = Pop(); + HValue* function = Pop(); + HValue* result; + + { + NoObservableSideEffectsScope scope(this); + + HValue* length = Add<HLoadNamedField>( + receiver, static_cast<HValue*>(NULL), + HObjectAccess::ForArrayLength(kind)); + + IfBuilder if_lengthiszero(this); + HValue* lengthiszero = if_lengthiszero.If<HCompareNumericAndBranch>( + length, graph()->GetConstant0(), Token::EQ); + if_lengthiszero.Then(); + { + if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined()); + } + if_lengthiszero.Else(); + { + HValue* elements = AddLoadElements(receiver); - CallInterfaceDescriptor* descriptor = - isolate()->call_descriptor(Isolate::CallHandler); + // Check if we can use the fast inlined Array.shift(). + IfBuilder if_inline(this); + if_inline.If<HCompareNumericAndBranch>( + length, inline_threshold, Token::LTE); + if (IsFastSmiOrObjectElementsKind(kind)) { + // We cannot handle copy-on-write backing stores here. + if_inline.AndIf<HCompareMap>( + elements, isolate()->factory()->fixed_array_map()); + } + if_inline.Then(); + { + // Remember the result. + if (!ast_context()->IsEffect()) { + Push(AddElementAccess(elements, graph()->GetConstant0(), NULL, + lengthiszero, kind, LOAD)); + } + + // Compute the new length. + HValue* new_length = AddUncasted<HSub>( + length, graph()->GetConstant1()); + new_length->ClearFlag(HValue::kCanOverflow); + + // Copy the remaining elements. + LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement); + { + HValue* new_key = loop.BeginBody( + graph()->GetConstant0(), new_length, Token::LT); + HValue* key = AddUncasted<HAdd>(new_key, graph()->GetConstant1()); + key->ClearFlag(HValue::kCanOverflow); + HValue* element = AddUncasted<HLoadKeyed>( + elements, key, lengthiszero, kind, ALLOW_RETURN_HOLE); + HStoreKeyed* store = Add<HStoreKeyed>( + elements, new_key, element, kind); + store->SetFlag(HValue::kAllowUndefinedAsNaN); + } + loop.EndBody(); + + // Put a hole at the end. + HValue* hole = IsFastSmiOrObjectElementsKind(kind) + ? Add<HConstant>(isolate()->factory()->the_hole_value()) + : Add<HConstant>(FixedDoubleArray::hole_nan_as_double()); + if (IsFastSmiOrObjectElementsKind(kind)) kind = FAST_HOLEY_ELEMENTS; + Add<HStoreKeyed>( + elements, new_length, hole, kind, INITIALIZING_STORE); + + // Remember new length. + Add<HStoreNamedField>( + receiver, HObjectAccess::ForArrayLength(kind), + new_length, STORE_TO_INITIALIZED_ENTRY); + } + if_inline.Else(); + { + Add<HPushArguments>(receiver); + result = Add<HCallJSFunction>(function, 1, true); + if (!ast_context()->IsEffect()) Push(result); + } + if_inline.End(); + } + if_lengthiszero.End(); + } + result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top(); + Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE); + if (!ast_context()->IsEffect()) Drop(1); + ast_context()->ReturnValue(result); + return true; + } + case kArrayIndexOf: + case kArrayLastIndexOf: { + if (receiver_map.is_null()) return false; + if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false; + ElementsKind kind = receiver_map->elements_kind(); + if (!IsFastElementsKind(kind)) return false; + if (receiver_map->is_observed()) return false; + if (argument_count != 2) return false; + DCHECK(receiver_map->is_extensible()); + + // If there may be elements accessors in the prototype chain, the fast + // inlined version can't be used. + if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false; + + // If there currently can be no elements accessors on the prototype chain, + // it doesn't mean that there won't be any later. Install a full prototype + // chain check to trap element accessors being installed on the prototype + // chain, which would cause elements to go to dictionary mode and result + // in a map change. + BuildCheckPrototypeMaps( + handle(JSObject::cast(receiver_map->prototype()), isolate()), + Handle<JSObject>::null()); - ArrayPushStub stub(receiver_map->elements_kind(), argc); - Handle<Code> code = stub.GetCode(isolate()); - HConstant* code_value = Add<HConstant>(code); - - ASSERT((sizeof(op_vals) / kPointerSize) == - descriptor->environment_length()); - - HInstruction* call = New<HCallWithDescriptor>( - code_value, argc + 1, descriptor, - Vector<HValue*>(op_vals, descriptor->environment_length())); + HValue* search_element = Pop(); + HValue* receiver = Pop(); Drop(1); // Drop function. - ast_context()->ReturnInstruction(call, expr->id()); + + ArrayIndexOfMode mode = (id == kArrayIndexOf) + ? kFirstIndexOf : kLastIndexOf; + HValue* index = BuildArrayIndexOf(receiver, search_element, kind, mode); + + if (!ast_context()->IsEffect()) Push(index); + Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE); + if (!ast_context()->IsEffect()) Drop(1); + ast_context()->ReturnValue(index); return true; } default: @@ -7763,11 +8556,9 @@ if (call_type == kCallApiFunction) { // Cannot embed a direct reference to the global proxy map // as it maybe dropped on deserialization. - CHECK(!Serializer::enabled()); - ASSERT_EQ(0, receiver_maps->length()); - receiver_maps->Add(handle( - function->context()->global_object()->global_receiver()->map()), - zone()); + CHECK(!isolate()->serializer_enabled()); + DCHECK_EQ(0, receiver_maps->length()); + receiver_maps->Add(handle(function->global_proxy()->map()), zone()); } CallOptimization::HolderLookup holder_lookup = CallOptimization::kHolderNotFound; @@ -7792,7 +8583,7 @@ if (holder_lookup == CallOptimization::kHolderFound) { AddCheckPrototypeMaps(api_holder, receiver_maps->first()); } else { - ASSERT_EQ(holder_lookup, CallOptimization::kHolderIsReceiver); + DCHECK_EQ(holder_lookup, CallOptimization::kHolderIsReceiver); } // Includes receiver. PushArgumentsFromEnvironment(argc + 1); @@ -7801,23 +8592,22 @@ break; case kCallApiGetter: // Receiver and prototype chain cannot have changed. - ASSERT_EQ(0, argc); - ASSERT_EQ(NULL, receiver); + DCHECK_EQ(0, argc); + DCHECK_EQ(NULL, receiver); // Receiver is on expression stack. receiver = Pop(); - Add<HPushArgument>(receiver); + Add<HPushArguments>(receiver); break; case kCallApiSetter: { is_store = true; // Receiver and prototype chain cannot have changed. - ASSERT_EQ(1, argc); - ASSERT_EQ(NULL, receiver); + DCHECK_EQ(1, argc); + DCHECK_EQ(NULL, receiver); // Receiver and value are on expression stack. HValue* value = Pop(); receiver = Pop(); - Add<HPushArgument>(receiver); - Add<HPushArgument>(value); + Add<HPushArguments>(receiver, value); break; } } @@ -7845,26 +8635,26 @@ HValue* api_function_address = Add<HConstant>(ExternalReference(ref)); HValue* op_vals[] = { + context(), Add<HConstant>(function), call_data, holder, - api_function_address, - context() + api_function_address }; CallInterfaceDescriptor* descriptor = isolate()->call_descriptor(Isolate::ApiFunctionCall); - CallApiFunctionStub stub(is_store, call_data_is_undefined, argc); - Handle<Code> code = stub.GetCode(isolate()); + CallApiFunctionStub stub(isolate(), is_store, call_data_is_undefined, argc); + Handle<Code> code = stub.GetCode(); HConstant* code_value = Add<HConstant>(code); - ASSERT((sizeof(op_vals) / kPointerSize) == - descriptor->environment_length()); + DCHECK((sizeof(op_vals) / kPointerSize) == + descriptor->GetEnvironmentLength()); HInstruction* call = New<HCallWithDescriptor>( code_value, argc + 1, descriptor, - Vector<HValue*>(op_vals, descriptor->environment_length())); + Vector<HValue*>(op_vals, descriptor->GetEnvironmentLength())); if (drop_extra) Drop(1); // Drop function. ast_context()->ReturnInstruction(call, ast_id); @@ -7873,7 +8663,7 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) { - ASSERT(expr->expression()->IsProperty()); + DCHECK(expr->expression()->IsProperty()); if (!expr->IsMonomorphic()) { return false; @@ -7901,10 +8691,12 @@ HValue* function = Pop(); // f Drop(1); // apply + HValue* checked_function = AddCheckMap(function, function_map); + if (function_state()->outer() == NULL) { HInstruction* elements = Add<HArgumentsElements>(false); HInstruction* length = Add<HArgumentsLength>(elements); - HValue* wrapped_receiver = BuildWrapReceiver(receiver, function); + HValue* wrapped_receiver = BuildWrapReceiver(receiver, checked_function); HInstruction* result = New<HApplyArguments>(function, wrapped_receiver, length, @@ -7914,13 +8706,13 @@ } else { // We are inside inlined function and we know exactly what is inside // arguments object. But we need to be able to materialize at deopt. - ASSERT_EQ(environment()->arguments_environment()->parameter_count(), + DCHECK_EQ(environment()->arguments_environment()->parameter_count(), function_state()->entry()->arguments_object()->arguments_count()); HArgumentsObject* args = function_state()->entry()->arguments_object(); const ZoneList<HValue*>* arguments_values = args->arguments_values(); int arguments_count = arguments_values->length(); Push(function); - Push(BuildWrapReceiver(receiver, function)); + Push(BuildWrapReceiver(receiver, checked_function)); for (int i = 1; i < arguments_count; i++) { Push(arguments_values->at(i)); } @@ -7950,19 +8742,217 @@ if (shared->strict_mode() == SLOPPY && !shared->native()) { // Cannot embed a direct reference to the global proxy // as is it dropped on deserialization. - CHECK(!Serializer::enabled()); - Handle<JSObject> global_receiver( - target->context()->global_object()->global_receiver()); - return Add<HConstant>(global_receiver); + CHECK(!isolate()->serializer_enabled()); + Handle<JSObject> global_proxy(target->context()->global_proxy()); + return Add<HConstant>(global_proxy); } return graph()->GetConstantUndefined(); } +void HOptimizedGraphBuilder::BuildArrayCall(Expression* expression, + int arguments_count, + HValue* function, + Handle<AllocationSite> site) { + Add<HCheckValue>(function, array_function()); + + if (IsCallArrayInlineable(arguments_count, site)) { + BuildInlinedCallArray(expression, arguments_count, site); + return; + } + + HInstruction* call = PreProcessCall(New<HCallNewArray>( + function, arguments_count + 1, site->GetElementsKind())); + if (expression->IsCall()) { + Drop(1); + } + ast_context()->ReturnInstruction(call, expression->id()); +} + + +HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver, + HValue* search_element, + ElementsKind kind, + ArrayIndexOfMode mode) { + DCHECK(IsFastElementsKind(kind)); + + NoObservableSideEffectsScope no_effects(this); + + HValue* elements = AddLoadElements(receiver); + HValue* length = AddLoadArrayLength(receiver, kind); + + HValue* initial; + HValue* terminating; + Token::Value token; + LoopBuilder::Direction direction; + if (mode == kFirstIndexOf) { + initial = graph()->GetConstant0(); + terminating = length; + token = Token::LT; + direction = LoopBuilder::kPostIncrement; + } else { + DCHECK_EQ(kLastIndexOf, mode); + initial = length; + terminating = graph()->GetConstant0(); + token = Token::GT; + direction = LoopBuilder::kPreDecrement; + } + + Push(graph()->GetConstantMinus1()); + if (IsFastDoubleElementsKind(kind) || IsFastSmiElementsKind(kind)) { + LoopBuilder loop(this, context(), direction); + { + HValue* index = loop.BeginBody(initial, terminating, token); + HValue* element = AddUncasted<HLoadKeyed>( + elements, index, static_cast<HValue*>(NULL), + kind, ALLOW_RETURN_HOLE); + IfBuilder if_issame(this); + if (IsFastDoubleElementsKind(kind)) { + if_issame.If<HCompareNumericAndBranch>( + element, search_element, Token::EQ_STRICT); + } else { + if_issame.If<HCompareObjectEqAndBranch>(element, search_element); + } + if_issame.Then(); + { + Drop(1); + Push(index); + loop.Break(); + } + if_issame.End(); + } + loop.EndBody(); + } else { + IfBuilder if_isstring(this); + if_isstring.If<HIsStringAndBranch>(search_element); + if_isstring.Then(); + { + LoopBuilder loop(this, context(), direction); + { + HValue* index = loop.BeginBody(initial, terminating, token); + HValue* element = AddUncasted<HLoadKeyed>( + elements, index, static_cast<HValue*>(NULL), + kind, ALLOW_RETURN_HOLE); + IfBuilder if_issame(this); + if_issame.If<HIsStringAndBranch>(element); + if_issame.AndIf<HStringCompareAndBranch>( + element, search_element, Token::EQ_STRICT); + if_issame.Then(); + { + Drop(1); + Push(index); + loop.Break(); + } + if_issame.End(); + } + loop.EndBody(); + } + if_isstring.Else(); + { + IfBuilder if_isnumber(this); + if_isnumber.If<HIsSmiAndBranch>(search_element); + if_isnumber.OrIf<HCompareMap>( + search_element, isolate()->factory()->heap_number_map()); + if_isnumber.Then(); + { + HValue* search_number = + AddUncasted<HForceRepresentation>(search_element, + Representation::Double()); + LoopBuilder loop(this, context(), direction); + { + HValue* index = loop.BeginBody(initial, terminating, token); + HValue* element = AddUncasted<HLoadKeyed>( + elements, index, static_cast<HValue*>(NULL), + kind, ALLOW_RETURN_HOLE); + + IfBuilder if_element_isnumber(this); + if_element_isnumber.If<HIsSmiAndBranch>(element); + if_element_isnumber.OrIf<HCompareMap>( + element, isolate()->factory()->heap_number_map()); + if_element_isnumber.Then(); + { + HValue* number = + AddUncasted<HForceRepresentation>(element, + Representation::Double()); + IfBuilder if_issame(this); + if_issame.If<HCompareNumericAndBranch>( + number, search_number, Token::EQ_STRICT); + if_issame.Then(); + { + Drop(1); + Push(index); + loop.Break(); + } + if_issame.End(); + } + if_element_isnumber.End(); + } + loop.EndBody(); + } + if_isnumber.Else(); + { + LoopBuilder loop(this, context(), direction); + { + HValue* index = loop.BeginBody(initial, terminating, token); + HValue* element = AddUncasted<HLoadKeyed>( + elements, index, static_cast<HValue*>(NULL), + kind, ALLOW_RETURN_HOLE); + IfBuilder if_issame(this); + if_issame.If<HCompareObjectEqAndBranch>( + element, search_element); + if_issame.Then(); + { + Drop(1); + Push(index); + loop.Break(); + } + if_issame.End(); + } + loop.EndBody(); + } + if_isnumber.End(); + } + if_isstring.End(); + } + + return Pop(); +} + + +bool HOptimizedGraphBuilder::TryHandleArrayCall(Call* expr, HValue* function) { + if (!array_function().is_identical_to(expr->target())) { + return false; + } + + Handle<AllocationSite> site = expr->allocation_site(); + if (site.is_null()) return false; + + BuildArrayCall(expr, + expr->arguments()->length(), + function, + site); + return true; +} + + +bool HOptimizedGraphBuilder::TryHandleArrayCallNew(CallNew* expr, + HValue* function) { + if (!array_function().is_identical_to(expr->target())) { + return false; + } + + BuildArrayCall(expr, + expr->arguments()->length(), + function, + expr->allocation_site()); + return true; +} + + void HOptimizedGraphBuilder::VisitCall(Call* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); Expression* callee = expr->expression(); int argument_count = expr->arguments()->length() + 1; // Plus receiver. HInstruction* call = NULL; @@ -8053,8 +9043,7 @@ // evaluation of the arguments. CHECK_ALIVE(VisitForValue(expr->expression())); HValue* function = Top(); - bool global_call = proxy != NULL && proxy->var()->IsUnallocated(); - if (global_call) { + if (expr->global_call()) { Variable* var = proxy->var(); bool known_global_function = false; // If there is a global property cell for the name at compile time and @@ -8088,6 +9077,7 @@ return; } if (TryInlineApiFunctionCall(expr, receiver)) return; + if (TryHandleArrayCall(expr, function)) return; if (TryInlineCall(expr)) return; PushArgumentsFromEnvironment(argument_count); @@ -8137,20 +9127,21 @@ } -void HOptimizedGraphBuilder::BuildInlinedCallNewArray(CallNew* expr) { +void HOptimizedGraphBuilder::BuildInlinedCallArray( + Expression* expression, + int argument_count, + Handle<AllocationSite> site) { + DCHECK(!site.is_null()); + DCHECK(argument_count >= 0 && argument_count <= 1); NoObservableSideEffectsScope no_effects(this); - int argument_count = expr->arguments()->length(); // We should at least have the constructor on the expression stack. HValue* constructor = environment()->ExpressionStackAt(argument_count); - ElementsKind kind = expr->elements_kind(); - Handle<AllocationSite> site = expr->allocation_site(); - ASSERT(!site.is_null()); - // Register on the site for deoptimization if the transition feedback changes. AllocationSite::AddDependentCompilationInfo( site, AllocationSite::TRANSITIONS, top_info()); + ElementsKind kind = site->GetElementsKind(); HInstruction* site_instruction = Add<HConstant>(site); // In the single constant argument case, we may have to adjust elements kind @@ -8159,7 +9150,7 @@ HValue* argument = environment()->Top(); if (argument->IsConstant()) { HConstant* constant_argument = HConstant::cast(argument); - ASSERT(constant_argument->HasSmiValue()); + DCHECK(constant_argument->HasSmiValue()); int constant_array_size = constant_argument->Integer32Value(); if (constant_array_size != 0) { kind = GetHoleyElementsKind(kind); @@ -8173,32 +9164,12 @@ site_instruction, constructor, DISABLE_ALLOCATION_SITES); - HValue* new_object; - if (argument_count == 0) { - new_object = array_builder.AllocateEmptyArray(); - } else if (argument_count == 1) { - HValue* argument = environment()->Top(); - new_object = BuildAllocateArrayFromLength(&array_builder, argument); - } else { - HValue* length = Add<HConstant>(argument_count); - // Smi arrays need to initialize array elements with the hole because - // bailout could occur if the arguments don't fit in a smi. - // - // TODO(mvstanton): If all the arguments are constants in smi range, then - // we could set fill_with_hole to false and save a few instructions. - JSArrayBuilder::FillMode fill_mode = IsFastSmiElementsKind(kind) - ? JSArrayBuilder::FILL_WITH_HOLE - : JSArrayBuilder::DONT_FILL_WITH_HOLE; - new_object = array_builder.AllocateArray(length, length, fill_mode); - HValue* elements = array_builder.GetElementsLocation(); - for (int i = 0; i < argument_count; i++) { - HValue* value = environment()->ExpressionStackAt(argument_count - i - 1); - HValue* constant_i = Add<HConstant>(i); - Add<HStoreKeyed>(elements, constant_i, value, kind); - } - } + HValue* new_object = argument_count == 0 + ? array_builder.AllocateEmptyArray() + : BuildAllocateArrayFromLength(&array_builder, Top()); - Drop(argument_count + 1); // drop constructor and args. + int args_to_drop = argument_count + (expression->IsCall() ? 2 : 1); + Drop(args_to_drop); ast_context()->ReturnValue(new_object); } @@ -8212,15 +9183,14 @@ } -bool HOptimizedGraphBuilder::IsCallNewArrayInlineable(CallNew* expr) { +bool HOptimizedGraphBuilder::IsCallArrayInlineable( + int argument_count, + Handle<AllocationSite> site) { Handle<JSFunction> caller = current_info()->closure(); - Handle<JSFunction> target(isolate()->native_context()->array_function(), - isolate()); - int argument_count = expr->arguments()->length(); + Handle<JSFunction> target = array_function(); // We should have the function plus array arguments on the environment stack. - ASSERT(environment()->length() >= (argument_count + 1)); - Handle<AllocationSite> site = expr->allocation_site(); - ASSERT(!site.is_null()); + DCHECK(environment()->length() >= (argument_count + 1)); + DCHECK(!site.is_null()); bool inline_ok = false; if (site->CanInlineCall()) { @@ -8229,22 +9199,24 @@ HValue* argument = Top(); if (argument->IsConstant()) { // Do not inline if the constant length argument is not a smi or - // outside the valid range for a fast array. + // outside the valid range for unrolled loop initialization. HConstant* constant_argument = HConstant::cast(argument); if (constant_argument->HasSmiValue()) { int value = constant_argument->Integer32Value(); - inline_ok = value >= 0 && - value < JSObject::kInitialMaxFastElementArray; + inline_ok = value >= 0 && value <= kElementLoopUnrollThreshold; if (!inline_ok) { TraceInline(target, caller, - "Length outside of valid array range"); + "Constant length outside of valid inlining range."); } } } else { - inline_ok = true; + TraceInline(target, caller, + "Dont inline [new] Array(n) where n isn't constant."); } - } else { + } else if (argument_count == 0) { inline_ok = true; + } else { + TraceInline(target, caller, "Too many arguments to inline."); } } else { TraceInline(target, caller, "AllocationSite requested no inlining."); @@ -8258,9 +9230,9 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position()); int argument_count = expr->arguments()->length() + 1; // Plus constructor. Factory* factory = isolate()->factory(); @@ -8279,15 +9251,15 @@ // Force completion of inobject slack tracking before generating // allocation code to finalize instance size. - if (constructor->shared()->IsInobjectSlackTrackingInProgress()) { - constructor->shared()->CompleteInobjectSlackTracking(); + if (constructor->IsInobjectSlackTrackingInProgress()) { + constructor->CompleteInobjectSlackTracking(); } // Calculate instance size from initial map of constructor. - ASSERT(constructor->has_initial_map()); + DCHECK(constructor->has_initial_map()); Handle<Map> initial_map(constructor->initial_map()); int instance_size = initial_map->instance_size(); - ASSERT(initial_map->InitialPropertiesLength() == 0); + DCHECK(initial_map->InitialPropertiesLength() == 0); // Allocate an instance of the implicit receiver object. HValue* size_in_bytes = Add<HConstant>(instance_size); @@ -8301,31 +9273,19 @@ AllocationSite::AddDependentCompilationInfo(allocation_site, AllocationSite::TENURING, top_info()); - } else { - allocation_mode = HAllocationMode( - isolate()->heap()->GetPretenureMode()); } } - HAllocate* receiver = - BuildAllocate(size_in_bytes, HType::JSObject(), JS_OBJECT_TYPE, - allocation_mode); + HAllocate* receiver = BuildAllocate( + size_in_bytes, HType::JSObject(), JS_OBJECT_TYPE, allocation_mode); receiver->set_known_initial_map(initial_map); - // Load the initial map from the constructor. - HValue* constructor_value = Add<HConstant>(constructor); - HValue* initial_map_value = - Add<HLoadNamedField>(constructor_value, static_cast<HValue*>(NULL), - HObjectAccess::ForMapAndOffset( - handle(constructor->map()), - JSFunction::kPrototypeOrInitialMapOffset)); - // Initialize map and fields of the newly allocated object. { NoObservableSideEffectsScope no_effects(this); - ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE); + DCHECK(initial_map->instance_type() == JS_OBJECT_TYPE); Add<HStoreNamedField>(receiver, HObjectAccess::ForMapAndOffset(initial_map, JSObject::kMapOffset), - initial_map_value); + Add<HConstant>(initial_map)); HValue* empty_fixed_array = Add<HConstant>(factory->empty_fixed_array()); Add<HStoreNamedField>(receiver, HObjectAccess::ForMapAndOffset(initial_map, @@ -8349,24 +9309,28 @@ // Replace the constructor function with a newly allocated receiver using // the index of the receiver from the top of the expression stack. const int receiver_index = argument_count - 1; - ASSERT(environment()->ExpressionStackAt(receiver_index) == function); + DCHECK(environment()->ExpressionStackAt(receiver_index) == function); environment()->SetExpressionStackAt(receiver_index, receiver); - if (TryInlineConstruct(expr, receiver)) return; + if (TryInlineConstruct(expr, receiver)) { + // Inlining worked, add a dependency on the initial map to make sure that + // this code is deoptimized whenever the initial map of the constructor + // changes. + Map::AddDependentCompilationInfo( + initial_map, DependentCode::kInitialMapChangedGroup, top_info()); + return; + } // TODO(mstarzinger): For now we remove the previous HAllocate and all - // corresponding instructions and instead add HPushArgument for the + // corresponding instructions and instead add HPushArguments for the // arguments in case inlining failed. What we actually should do is for // inlining to try to build a subgraph without mutating the parent graph. HInstruction* instr = current_block()->last(); - while (instr != initial_map_value) { + do { HInstruction* prev_instr = instr->previous(); instr->DeleteAndReplaceWith(NULL); instr = prev_instr; - } - initial_map_value->DeleteAndReplaceWith(NULL); - receiver->DeleteAndReplaceWith(NULL); - check->DeleteAndReplaceWith(NULL); + } while (instr != check); environment()->SetExpressionStackAt(receiver_index, function); HInstruction* call = PreProcessCall(New<HCallNew>(function, argument_count)); @@ -8374,25 +9338,10 @@ } else { // The constructor function is both an operand to the instruction and an // argument to the construct call. - Handle<JSFunction> array_function( - isolate()->native_context()->array_function(), isolate()); - bool use_call_new_array = expr->target().is_identical_to(array_function); - if (use_call_new_array && IsCallNewArrayInlineable(expr)) { - // Verify we are still calling the array function for our native context. - Add<HCheckValue>(function, array_function); - BuildInlinedCallNewArray(expr); - return; - } + if (TryHandleArrayCallNew(expr, function)) return; - HBinaryCall* call; - if (use_call_new_array) { - Add<HCheckValue>(function, array_function); - call = New<HCallNewArray>(function, argument_count, - expr->elements_kind()); - } else { - call = New<HCallNew>(function, argument_count); - } - PreProcessCall(call); + HInstruction* call = + PreProcessCall(New<HCallNew>(function, argument_count)); return ast_context()->ReturnInstruction(call, expr->id()); } } @@ -8466,8 +9415,7 @@ CallRuntime* expr) { ZoneList<Expression*>* arguments = expr->arguments(); - NoObservableSideEffectsScope scope(this); - ASSERT(arguments->length()== 4); + DCHECK(arguments->length()== 4); CHECK_ALIVE(VisitForValue(arguments->at(0))); HValue* obj = Pop(); @@ -8480,8 +9428,11 @@ CHECK_ALIVE(VisitForValue(arguments->at(3))); HValue* byte_length = Pop(); - BuildArrayBufferViewInitialization<JSDataView>( - obj, buffer, byte_offset, byte_length); + { + NoObservableSideEffectsScope scope(this); + BuildArrayBufferViewInitialization<JSDataView>( + obj, buffer, byte_offset, byte_length); + } } @@ -8510,14 +9461,20 @@ HValue* buffer, HValue* byte_offset, HValue* length) { Handle<Map> external_array_map( isolate()->heap()->MapForExternalArrayType(array_type)); + + // The HForceRepresentation is to prevent possible deopt on int-smi + // conversion after allocation but before the new object fields are set. + length = AddUncasted<HForceRepresentation>(length, Representation::Smi()); HValue* elements = Add<HAllocate>( Add<HConstant>(ExternalArray::kAlignedSize), - HType::Tagged(), + HType::HeapObject(), NOT_TENURED, external_array_map->instance_type()); AddStoreMapConstant(elements, external_array_map); + Add<HStoreNamedField>(elements, + HObjectAccess::ForFixedArrayLength(), length); HValue* backing_store = Add<HLoadNamedField>( buffer, static_cast<HValue*>(NULL), @@ -8535,13 +9492,10 @@ typed_array_start = external_pointer; } - Add<HStoreNamedField>(elements, HObjectAccess::ForExternalArrayExternalPointer(), typed_array_start); - Add<HStoreNamedField>(elements, - HObjectAccess::ForFixedArrayLength(), length); return elements; } @@ -8565,17 +9519,20 @@ total_size->ClearFlag(HValue::kCanOverflow); } + // The HForceRepresentation is to prevent possible deopt on int-smi + // conversion after allocation but before the new object fields are set. + length = AddUncasted<HForceRepresentation>(length, Representation::Smi()); Handle<Map> fixed_typed_array_map( isolate()->heap()->MapForFixedTypedArray(array_type)); HValue* elements = - Add<HAllocate>(total_size, HType::Tagged(), - NOT_TENURED, - fixed_typed_array_map->instance_type()); + Add<HAllocate>(total_size, HType::HeapObject(), + NOT_TENURED, fixed_typed_array_map->instance_type()); AddStoreMapConstant(elements, fixed_typed_array_map); Add<HStoreNamedField>(elements, HObjectAccess::ForFixedArrayLength(), length); + HValue* filler = Add<HConstant>(static_cast<int32_t>(0)); { @@ -8588,8 +9545,6 @@ builder.EndBody(); } - Add<HStoreNamedField>( - elements, HObjectAccess::ForFixedArrayLength(), length); return elements; } @@ -8598,23 +9553,32 @@ CallRuntime* expr) { ZoneList<Expression*>* arguments = expr->arguments(); - NoObservableSideEffectsScope scope(this); static const int kObjectArg = 0; static const int kArrayIdArg = 1; static const int kBufferArg = 2; static const int kByteOffsetArg = 3; static const int kByteLengthArg = 4; static const int kArgsLength = 5; - ASSERT(arguments->length() == kArgsLength); + DCHECK(arguments->length() == kArgsLength); CHECK_ALIVE(VisitForValue(arguments->at(kObjectArg))); HValue* obj = Pop(); - ASSERT(arguments->at(kArrayIdArg)->node_type() == AstNode::kLiteral); + if (arguments->at(kArrayIdArg)->IsLiteral()) { + // This should never happen in real use, but can happen when fuzzing. + // Just bail out. + Bailout(kNeedSmiLiteral); + return; + } Handle<Object> value = static_cast<Literal*>(arguments->at(kArrayIdArg))->value(); - ASSERT(value->IsSmi()); + if (!value->IsSmi()) { + // This should never happen in real use, but can happen when fuzzing. + // Just bail out. + Bailout(kNeedSmiLiteral); + return; + } int array_id = Smi::cast(*value)->value(); HValue* buffer; @@ -8628,7 +9592,7 @@ HValue* byte_offset; bool is_zero_byte_offset; - if (arguments->at(kByteOffsetArg)->node_type() == AstNode::kLiteral + if (arguments->at(kByteOffsetArg)->IsLiteral() && Smi::FromInt(0) == *static_cast<Literal*>(arguments->at(kByteOffsetArg))->value()) { byte_offset = Add<HConstant>(static_cast<int32_t>(0)); @@ -8637,12 +9601,13 @@ CHECK_ALIVE(VisitForValue(arguments->at(kByteOffsetArg))); byte_offset = Pop(); is_zero_byte_offset = false; - ASSERT(buffer != NULL); + DCHECK(buffer != NULL); } CHECK_ALIVE(VisitForValue(arguments->at(kByteLengthArg))); HValue* byte_length = Pop(); + NoObservableSideEffectsScope scope(this); IfBuilder byte_offset_smi(this); if (!is_zero_byte_offset) { @@ -8684,7 +9649,7 @@ isolate(), array_type, external_elements_kind); AddStoreMapConstant(obj, obj_map); } else { - ASSERT(is_zero_byte_offset); + DCHECK(is_zero_byte_offset); elements = BuildAllocateFixedTypedArray( array_type, element_size, fixed_elements_kind, byte_length, length); @@ -8710,7 +9675,7 @@ void HOptimizedGraphBuilder::GenerateMaxSmi(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue)); return ast_context()->ReturnInstruction(max_smi, expr->id()); } @@ -8718,40 +9683,92 @@ void HOptimizedGraphBuilder::GenerateTypedArrayMaxSizeInHeap( CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); HConstant* result = New<HConstant>(static_cast<int32_t>( FLAG_typed_array_max_size_in_heap)); return ast_context()->ReturnInstruction(result, expr->id()); } +void HOptimizedGraphBuilder::GenerateArrayBufferGetByteLength( + CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 1); + CHECK_ALIVE(VisitForValue(expr->arguments()->at(0))); + HValue* buffer = Pop(); + HInstruction* result = New<HLoadNamedField>( + buffer, + static_cast<HValue*>(NULL), + HObjectAccess::ForJSArrayBufferByteLength()); + return ast_context()->ReturnInstruction(result, expr->id()); +} + + +void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteLength( + CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 1); + CHECK_ALIVE(VisitForValue(expr->arguments()->at(0))); + HValue* buffer = Pop(); + HInstruction* result = New<HLoadNamedField>( + buffer, + static_cast<HValue*>(NULL), + HObjectAccess::ForJSArrayBufferViewByteLength()); + return ast_context()->ReturnInstruction(result, expr->id()); +} + + +void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteOffset( + CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 1); + CHECK_ALIVE(VisitForValue(expr->arguments()->at(0))); + HValue* buffer = Pop(); + HInstruction* result = New<HLoadNamedField>( + buffer, + static_cast<HValue*>(NULL), + HObjectAccess::ForJSArrayBufferViewByteOffset()); + return ast_context()->ReturnInstruction(result, expr->id()); +} + + +void HOptimizedGraphBuilder::GenerateTypedArrayGetLength( + CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 1); + CHECK_ALIVE(VisitForValue(expr->arguments()->at(0))); + HValue* buffer = Pop(); + HInstruction* result = New<HLoadNamedField>( + buffer, + static_cast<HValue*>(NULL), + HObjectAccess::ForJSTypedArrayLength()); + return ast_context()->ReturnInstruction(result, expr->id()); +} + + void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); if (expr->is_jsruntime()) { return Bailout(kCallToAJavaScriptRuntimeFunction); } const Runtime::Function* function = expr->function(); - ASSERT(function != NULL); + DCHECK(function != NULL); if (function->intrinsic_type == Runtime::INLINE || function->intrinsic_type == Runtime::INLINE_OPTIMIZED) { - ASSERT(expr->name()->length() > 0); - ASSERT(expr->name()->Get(0) == '_'); + DCHECK(expr->name()->length() > 0); + DCHECK(expr->name()->Get(0) == '_'); // Call to an inline function. int lookup_index = static_cast<int>(function->function_id) - static_cast<int>(Runtime::kFirstInlineFunction); - ASSERT(lookup_index >= 0); - ASSERT(static_cast<size_t>(lookup_index) < + DCHECK(lookup_index >= 0); + DCHECK(static_cast<size_t>(lookup_index) < ARRAY_SIZE(kInlineFunctionGenerators)); InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index]; // Call the inline code generator using the pointer-to-member. (this->*generator)(expr); } else { - ASSERT(function->intrinsic_type == Runtime::RUNTIME); + DCHECK(function->intrinsic_type == Runtime::RUNTIME); Handle<String> name = expr->name(); int argument_count = expr->arguments()->length(); CHECK_ALIVE(VisitExpressions(expr->arguments())); @@ -8764,9 +9781,9 @@ void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); switch (expr->op()) { case Token::DELETE: return VisitDelete(expr); case Token::VOID: return VisitVoid(expr); @@ -8786,9 +9803,7 @@ HValue* key = Pop(); HValue* obj = Pop(); HValue* function = AddLoadJSBuiltin(Builtins::DELETE); - Add<HPushArgument>(obj); - Add<HPushArgument>(key); - Add<HPushArgument>(Add<HConstant>(function_strict_mode())); + Add<HPushArguments>(obj, key, Add<HConstant>(function_strict_mode())); // TODO(olivf) InvokeFunction produces a check for the parameter count, // even though we are certain to pass the correct number of arguments here. HInstruction* instr = New<HInvokeFunction>(function, 3); @@ -8845,7 +9860,7 @@ return; } - ASSERT(ast_context()->IsValue()); + DCHECK(ast_context()->IsValue()); HBasicBlock* materialize_false = graph()->CreateBasicBlock(); HBasicBlock* materialize_true = graph()->CreateBasicBlock(); CHECK_BAILOUT(VisitForControl(expr->expression(), @@ -8931,9 +9946,9 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position()); Expression* target = expr->expression(); VariableProxy* proxy = target->AsVariableProxy(); @@ -8956,7 +9971,7 @@ return Bailout(kUnsupportedCountOperationWithConst); } // Argument of the count operation is a variable, not a property. - ASSERT(prop == NULL); + DCHECK(prop == NULL); CHECK_ALIVE(VisitForValue(target)); after = BuildIncrement(returns_original_input, expr); @@ -9011,15 +10026,14 @@ } // Argument of the count operation is a property. - ASSERT(prop != NULL); + DCHECK(prop != NULL); if (returns_original_input) Push(graph()->GetConstantUndefined()); CHECK_ALIVE(VisitForValue(prop->obj())); HValue* object = Top(); HValue* key = NULL; - if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) || - prop->IsStringAccess()) { + if (!prop->key()->IsPropertyName() || prop->IsStringAccess()) { CHECK_ALIVE(VisitForValue(prop->key())); key = Top(); } @@ -9053,7 +10067,7 @@ int32_t i = c_index->NumberValueAsInteger32(); Handle<String> s = c_string->StringValue(); if (i < 0 || i >= s->length()) { - return New<HConstant>(OS::nan_value()); + return New<HConstant>(base::OS::nan_value()); } return New<HConstant>(s->Get(i)); } @@ -9162,13 +10176,13 @@ // We expect to get a number. // (We need to check first, since Type::None->Is(Type::Any()) == true. if (expected_obj->Is(Type::None())) { - ASSERT(!expected_number->Is(Type::None(zone()))); + DCHECK(!expected_number->Is(Type::None(zone()))); return value; } if (expected_obj->Is(Type::Undefined(zone()))) { // This is already done by HChange. - *expected = Type::Union(expected_number, Type::Float(zone()), zone()); + *expected = Type::Union(expected_number, Type::Number(zone()), zone()); return value; } @@ -9187,15 +10201,10 @@ Maybe<int> fixed_right_arg = expr->fixed_right_arg(); Handle<AllocationSite> allocation_site = expr->allocation_site(); - PretenureFlag pretenure_flag = !FLAG_allocation_site_pretenuring ? - isolate()->heap()->GetPretenureMode() : NOT_TENURED; - - HAllocationMode allocation_mode = - FLAG_allocation_site_pretenuring - ? (allocation_site.is_null() - ? HAllocationMode(NOT_TENURED) - : HAllocationMode(allocation_site)) - : HAllocationMode(pretenure_flag); + HAllocationMode allocation_mode; + if (FLAG_allocation_site_pretenuring && !allocation_site.is_null()) { + allocation_mode = HAllocationMode(allocation_site); + } HValue* result = HGraphBuilder::BuildBinaryOperation( expr->op(), left, right, left_type, right_type, result_type, @@ -9231,7 +10240,9 @@ bool maybe_string_add = op == Token::ADD && (left_type->Maybe(Type::String()) || - right_type->Maybe(Type::String())); + left_type->Maybe(Type::Receiver()) || + right_type->Maybe(Type::String()) || + right_type->Maybe(Type::Receiver())); if (left_type->Is(Type::None())) { Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation", @@ -9268,25 +10279,23 @@ // Convert left argument as necessary. if (left_type->Is(Type::Number())) { - ASSERT(right_type->Is(Type::String())); + DCHECK(right_type->Is(Type::String())); left = BuildNumberToString(left, left_type); } else if (!left_type->Is(Type::String())) { - ASSERT(right_type->Is(Type::String())); + DCHECK(right_type->Is(Type::String())); HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT); - Add<HPushArgument>(left); - Add<HPushArgument>(right); + Add<HPushArguments>(left, right); return AddUncasted<HInvokeFunction>(function, 2); } // Convert right argument as necessary. if (right_type->Is(Type::Number())) { - ASSERT(left_type->Is(Type::String())); + DCHECK(left_type->Is(Type::String())); right = BuildNumberToString(right, right_type); } else if (!right_type->Is(Type::String())) { - ASSERT(left_type->Is(Type::String())); + DCHECK(left_type->Is(Type::String())); HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT); - Add<HPushArgument>(left); - Add<HPushArgument>(right); + Add<HPushArguments>(left, right); return AddUncasted<HInvokeFunction>(function, 2); } @@ -9304,7 +10313,7 @@ // Register the dependent code with the allocation site. if (!allocation_mode.feedback_site().is_null()) { - ASSERT(!graph()->info()->IsStub()); + DCHECK(!graph()->info()->IsStub()); Handle<AllocationSite> site(allocation_mode.feedback_site()); AllocationSite::AddDependentCompilationInfo( site, AllocationSite::TENURING, top_info()); @@ -9348,8 +10357,7 @@ // operation in optimized code, which is more expensive, than a stub call. if (graph()->info()->IsStub() && is_non_primitive) { HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op)); - Add<HPushArgument>(left); - Add<HPushArgument>(right); + Add<HPushArguments>(left, right); instr = AddUncasted<HInvokeFunction>(function, 2); } else { switch (op) { @@ -9446,15 +10454,15 @@ if (!call->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_ClassOf"))) { return false; } - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); return true; } void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); switch (expr->op()) { case Token::COMMA: return VisitComma(expr); @@ -9501,7 +10509,7 @@ } else if (ast_context()->IsValue()) { CHECK_ALIVE(VisitForValue(expr->left())); - ASSERT(current_block() != NULL); + DCHECK(current_block() != NULL); HValue* left_value = Top(); // Short-circuit left values that always evaluate to the same boolean value. @@ -9536,7 +10544,7 @@ return ast_context()->ReturnValue(Pop()); } else { - ASSERT(ast_context()->IsEffect()); + DCHECK(ast_context()->IsEffect()); // In an effect context, we don't need the value of the left subexpression, // only its control flow and side effects. We need an extra block to // maintain edge-split form. @@ -9622,9 +10630,9 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position()); @@ -9645,7 +10653,7 @@ if (IsClassOfTest(expr)) { CallRuntime* call = expr->left()->AsCallRuntime(); - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); Literal* literal = expr->right()->AsLiteral(); @@ -9686,7 +10694,7 @@ Handle<String> name = proxy->name(); Handle<GlobalObject> global(current_info()->global_object()); LookupResult lookup(isolate()); - global->Lookup(*name, &lookup); + global->Lookup(name, &lookup); if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) { Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue())); // If the function is in new space we assume it's more likely to @@ -9713,8 +10721,7 @@ UNREACHABLE(); } else if (op == Token::IN) { HValue* function = AddLoadJSBuiltin(Builtins::IN); - Add<HPushArgument>(left); - Add<HPushArgument>(right); + Add<HPushArguments>(left, right); // TODO(olivf) InvokeFunction produces a check for the parameter count, // even though we are certain to pass the correct number of arguments here. HInstruction* result = New<HInvokeFunction>(function, 2); @@ -9760,11 +10767,22 @@ if (combined_type->Is(Type::Receiver())) { if (Token::IsEqualityOp(op)) { + // HCompareObjectEqAndBranch can only deal with object, so + // exclude numbers. + if ((left->IsConstant() && + HConstant::cast(left)->HasNumberValue()) || + (right->IsConstant() && + HConstant::cast(right)->HasNumberValue())) { + Add<HDeoptimize>("Type mismatch between feedback and constant", + Deoptimizer::SOFT); + // The caller expects a branch instruction, so make it happy. + return New<HBranch>(graph()->GetConstantTrue()); + } // Can we get away with map check and not instance type check? HValue* operand_to_check = left->block()->block_id() < right->block()->block_id() ? left : right; if (combined_type->IsClass()) { - Handle<Map> map = combined_type->AsClass(); + Handle<Map> map = combined_type->AsClass()->Map(); AddCheckMap(operand_to_check, map); HCompareObjectEqAndBranch* result = New<HCompareObjectEqAndBranch>(left, right); @@ -9787,6 +10805,17 @@ } } else if (combined_type->Is(Type::InternalizedString()) && Token::IsEqualityOp(op)) { + // If we have a constant argument, it should be consistent with the type + // feedback (otherwise we fail assertions in HCompareObjectEqAndBranch). + if ((left->IsConstant() && + !HConstant::cast(left)->HasInternalizedStringValue()) || + (right->IsConstant() && + !HConstant::cast(right)->HasInternalizedStringValue())) { + Add<HDeoptimize>("Type mismatch between feedback and constant", + Deoptimizer::SOFT); + // The caller expects a branch instruction, so make it happy. + return New<HBranch>(graph()->GetConstantTrue()); + } BuildCheckHeapObject(left); Add<HCheckInstanceType>(left, HCheckInstanceType::IS_INTERNALIZED_STRING); BuildCheckHeapObject(right); @@ -9835,10 +10864,10 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr, Expression* sub_expr, NilValue nil) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); - ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); + DCHECK(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT); if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position()); CHECK_ALIVE(VisitForValue(sub_expr)); HValue* value = Pop(); @@ -9850,7 +10879,7 @@ New<HCompareObjectEqAndBranch>(value, nil_constant); return ast_context()->ReturnControl(instr, expr->id()); } else { - ASSERT_EQ(Token::EQ, expr->op()); + DCHECK_EQ(Token::EQ, expr->op()); Type* type = expr->combined_type()->Is(Type::None()) ? Type::Any(zone()) : expr->combined_type(); HIfContinuation continuation; @@ -9877,14 +10906,14 @@ AllocationSiteUsageContext* site_context) { NoObservableSideEffectsScope no_effects(this); InstanceType instance_type = boilerplate_object->map()->instance_type(); - ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE); + DCHECK(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE); HType type = instance_type == JS_ARRAY_TYPE ? HType::JSArray() : HType::JSObject(); HValue* object_size_constant = Add<HConstant>( boilerplate_object->map()->instance_size()); - PretenureFlag pretenure_flag = isolate()->heap()->GetPretenureMode(); + PretenureFlag pretenure_flag = NOT_TENURED; if (FLAG_allocation_site_pretenuring) { pretenure_flag = site_context->current()->GetPretenureMode(); Handle<AllocationSite> site(site_context->current()); @@ -9902,7 +10931,7 @@ HConstant* empty_fixed_array = Add<HConstant>( isolate()->factory()->empty_fixed_array()); Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(), - empty_fixed_array, INITIALIZING_STORE); + empty_fixed_array); BuildEmitObjectHeader(boilerplate_object, object); @@ -9926,13 +10955,11 @@ HInstruction* object_elements = NULL; if (elements_size > 0) { HValue* object_elements_size = Add<HConstant>(elements_size); - if (boilerplate_object->HasFastDoubleElements()) { - object_elements = Add<HAllocate>(object_elements_size, HType::Tagged(), - pretenure_flag, FIXED_DOUBLE_ARRAY_TYPE, site_context->current()); - } else { - object_elements = Add<HAllocate>(object_elements_size, HType::Tagged(), - pretenure_flag, FIXED_ARRAY_TYPE, site_context->current()); - } + InstanceType instance_type = boilerplate_object->HasFastDoubleElements() + ? FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE; + object_elements = Add<HAllocate>( + object_elements_size, HType::HeapObject(), + pretenure_flag, instance_type, site_context->current()); } BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements); @@ -9954,14 +10981,14 @@ void HOptimizedGraphBuilder::BuildEmitObjectHeader( Handle<JSObject> boilerplate_object, HInstruction* object) { - ASSERT(boilerplate_object->properties()->length() == 0); + DCHECK(boilerplate_object->properties()->length() == 0); Handle<Map> boilerplate_object_map(boilerplate_object->map()); AddStoreMapConstant(object, boilerplate_object_map); Handle<Object> properties_field = Handle<Object>(boilerplate_object->properties(), isolate()); - ASSERT(*properties_field == isolate()->heap()->empty_fixed_array()); + DCHECK(*properties_field == isolate()->heap()->empty_fixed_array()); HInstruction* properties = Add<HConstant>(properties_field); HObjectAccess access = HObjectAccess::ForPropertiesPointer(); Add<HStoreNamedField>(object, access, properties); @@ -9973,7 +11000,7 @@ Handle<Object>(boilerplate_array->length(), isolate()); HInstruction* length = Add<HConstant>(length_field); - ASSERT(boilerplate_array->length()->IsSmi()); + DCHECK(boilerplate_array->length()->IsSmi()); Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength( boilerplate_array->GetElementsKind()), length); } @@ -9984,7 +11011,7 @@ Handle<JSObject> boilerplate_object, HInstruction* object, HInstruction* object_elements) { - ASSERT(boilerplate_object->properties()->length() == 0); + DCHECK(boilerplate_object->properties()->length() == 0); if (object_elements == NULL) { Handle<Object> elements_field = Handle<Object>(boilerplate_object->elements(), isolate()); @@ -10040,12 +11067,15 @@ // 1) it's a child object of another object with a valid allocation site // 2) we can just use the mode of the parent object for pretenuring HInstruction* double_box = - Add<HAllocate>(heap_number_constant, HType::HeapNumber(), - pretenure_flag, HEAP_NUMBER_TYPE); + Add<HAllocate>(heap_number_constant, HType::HeapObject(), + pretenure_flag, MUTABLE_HEAP_NUMBER_TYPE); AddStoreMapConstant(double_box, - isolate()->factory()->heap_number_map()); - Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(), - Add<HConstant>(value)); + isolate()->factory()->mutable_heap_number_map()); + // Unwrap the mutable heap number from the boilerplate. + HValue* double_value = + Add<HConstant>(Handle<HeapNumber>::cast(value)->value()); + Add<HStoreNamedField>( + double_box, HObjectAccess::ForHeapNumberValue(), double_value); value_instruction = double_box; } else if (representation.IsSmi()) { value_instruction = value->IsUninitialized() @@ -10065,7 +11095,7 @@ HInstruction* value_instruction = Add<HConstant>(isolate()->factory()->one_pointer_filler_map()); for (int i = copied_fields; i < inobject_properties; i++) { - ASSERT(boilerplate_object->IsJSObject()); + DCHECK(boilerplate_object->IsJSObject()); int property_offset = boilerplate_object->GetInObjectPropertyOffset(i); HObjectAccess access = HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset); @@ -10145,9 +11175,9 @@ void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) { - ASSERT(!HasStackOverflow()); - ASSERT(current_block() != NULL); - ASSERT(current_block()->HasPredecessor()); + DCHECK(!HasStackOverflow()); + DCHECK(current_block() != NULL); + DCHECK(current_block()->HasPredecessor()); HInstruction* instr = BuildThisFunction(); return ast_context()->ReturnInstruction(instr, expr->id()); } @@ -10155,7 +11185,7 @@ void HOptimizedGraphBuilder::VisitDeclarations( ZoneList<Declaration*>* declarations) { - ASSERT(globals_.is_empty()); + DCHECK(globals_.is_empty()); AstVisitor::VisitDeclarations(declarations); if (!globals_.is_empty()) { Handle<FixedArray> array = @@ -10165,7 +11195,7 @@ DeclareGlobalsNativeFlag::encode(current_info()->is_native()) | DeclareGlobalsStrictMode::encode(current_info()->strict_mode()); Add<HDeclareGlobals>(array, flags); - globals_.Clear(); + globals_.Rewind(0); } } @@ -10215,7 +11245,7 @@ case Variable::UNALLOCATED: { globals_.Add(variable->name(), zone()); Handle<SharedFunctionInfo> function = Compiler::BuildFunctionInfo( - declaration->fun(), current_info()->script()); + declaration->fun(), current_info()->script(), top_info()); // Check for stack-overflow exception. if (function.is_null()) return SetStackOverflow(); globals_.Add(function, zone()); @@ -10291,7 +11321,7 @@ // Generators for inline runtime functions. // Support for types. void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HIsSmiAndBranch* result = New<HIsSmiAndBranch>(value); @@ -10300,7 +11330,7 @@ void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasInstanceTypeAndBranch* result = @@ -10312,7 +11342,7 @@ void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasInstanceTypeAndBranch* result = @@ -10322,7 +11352,7 @@ void HOptimizedGraphBuilder::GenerateIsMinusZero(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HCompareMinusZeroAndBranch* result = New<HCompareMinusZeroAndBranch>(value); @@ -10331,7 +11361,7 @@ void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasCachedArrayIndexAndBranch* result = @@ -10341,7 +11371,7 @@ void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasInstanceTypeAndBranch* result = @@ -10351,7 +11381,7 @@ void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasInstanceTypeAndBranch* result = @@ -10361,7 +11391,7 @@ void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HIsObjectAndBranch* result = New<HIsObjectAndBranch>(value); @@ -10375,7 +11405,7 @@ void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HIsUndetectableAndBranch* result = New<HIsUndetectableAndBranch>(value); @@ -10391,7 +11421,7 @@ // Support for construct call checks. void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) { - ASSERT(call->arguments()->length() == 0); + DCHECK(call->arguments()->length() == 0); if (function_state()->outer() != NULL) { // We are generating graph for inlined function. HValue* value = function_state()->inlining_kind() == CONSTRUCT_CALL_RETURN @@ -10407,30 +11437,42 @@ // Support for arguments.length and arguments[?]. void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) { - // Our implementation of arguments (based on this stack frame or an - // adapter below it) does not work for inlined functions. This runtime - // function is blacklisted by AstNode::IsInlineable. - ASSERT(function_state()->outer() == NULL); - ASSERT(call->arguments()->length() == 0); - HInstruction* elements = Add<HArgumentsElements>(false); - HArgumentsLength* result = New<HArgumentsLength>(elements); + DCHECK(call->arguments()->length() == 0); + HInstruction* result = NULL; + if (function_state()->outer() == NULL) { + HInstruction* elements = Add<HArgumentsElements>(false); + result = New<HArgumentsLength>(elements); + } else { + // Number of arguments without receiver. + int argument_count = environment()-> + arguments_environment()->parameter_count() - 1; + result = New<HConstant>(argument_count); + } return ast_context()->ReturnInstruction(result, call->id()); } void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) { - // Our implementation of arguments (based on this stack frame or an - // adapter below it) does not work for inlined functions. This runtime - // function is blacklisted by AstNode::IsInlineable. - ASSERT(function_state()->outer() == NULL); - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* index = Pop(); - HInstruction* elements = Add<HArgumentsElements>(false); - HInstruction* length = Add<HArgumentsLength>(elements); - HInstruction* checked_index = Add<HBoundsCheck>(index, length); - HAccessArgumentsAt* result = New<HAccessArgumentsAt>( - elements, length, checked_index); + HInstruction* result = NULL; + if (function_state()->outer() == NULL) { + HInstruction* elements = Add<HArgumentsElements>(false); + HInstruction* length = Add<HArgumentsLength>(elements); + HInstruction* checked_index = Add<HBoundsCheck>(index, length); + result = New<HAccessArgumentsAt>(elements, length, checked_index); + } else { + EnsureArgumentsArePushedForAccess(); + + // Number of arguments without receiver. + HInstruction* elements = function_state()->arguments_elements(); + int argument_count = environment()-> + arguments_environment()->parameter_count() - 1; + HInstruction* length = Add<HConstant>(argument_count); + HInstruction* checked_key = Add<HBoundsCheck>(index, length); + result = New<HAccessArgumentsAt>(elements, length, checked_key); + } return ast_context()->ReturnInstruction(result, call->id()); } @@ -10444,7 +11486,7 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* object = Pop(); @@ -10472,8 +11514,8 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) { - ASSERT(call->arguments()->length() == 2); - ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral()); + DCHECK(call->arguments()->length() == 2); + DCHECK_NE(NULL, call->arguments()->at(1)->AsLiteral()); Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value())); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* date = Pop(); @@ -10484,7 +11526,7 @@ void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar( CallRuntime* call) { - ASSERT(call->arguments()->length() == 3); + DCHECK(call->arguments()->length() == 3); // We need to follow the evaluation order of full codegen. CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); CHECK_ALIVE(VisitForValue(call->arguments()->at(2))); @@ -10501,7 +11543,7 @@ void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar( CallRuntime* call) { - ASSERT(call->arguments()->length() == 3); + DCHECK(call->arguments()->length() == 3); // We need to follow the evaluation order of full codegen. CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); CHECK_ALIVE(VisitForValue(call->arguments()->at(2))); @@ -10517,7 +11559,7 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) { - ASSERT(call->arguments()->length() == 2); + DCHECK(call->arguments()->length() == 2); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* value = Pop(); @@ -10555,7 +11597,7 @@ // Fast support for charCodeAt(n). void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) { - ASSERT(call->arguments()->length() == 2); + DCHECK(call->arguments()->length() == 2); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* index = Pop(); @@ -10567,7 +11609,7 @@ // Fast support for string.charAt(n) and string[n]. void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* char_code = Pop(); HInstruction* result = NewUncasted<HStringCharFromCode>(char_code); @@ -10577,7 +11619,7 @@ // Fast support for string.charAt(n) and string[n]. void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) { - ASSERT(call->arguments()->length() == 2); + DCHECK(call->arguments()->length() == 2); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* index = Pop(); @@ -10591,7 +11633,7 @@ // Fast support for object equality testing. void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) { - ASSERT(call->arguments()->length() == 2); + DCHECK(call->arguments()->length() == 2); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* right = Pop(); @@ -10602,15 +11644,9 @@ } -void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) { - // %_Log is ignored in optimized code. - return ast_context()->ReturnValue(graph()->GetConstantUndefined()); -} - - // Fast support for StringAdd. void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) { - ASSERT_EQ(2, call->arguments()->length()); + DCHECK_EQ(2, call->arguments()->length()); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* right = Pop(); @@ -10622,7 +11658,7 @@ // Fast support for SubString. void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) { - ASSERT_EQ(3, call->arguments()->length()); + DCHECK_EQ(3, call->arguments()->length()); CHECK_ALIVE(VisitExpressions(call->arguments())); PushArgumentsFromEnvironment(call->arguments()->length()); HCallStub* result = New<HCallStub>(CodeStub::SubString, 3); @@ -10632,7 +11668,7 @@ // Fast support for StringCompare. void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) { - ASSERT_EQ(2, call->arguments()->length()); + DCHECK_EQ(2, call->arguments()->length()); CHECK_ALIVE(VisitExpressions(call->arguments())); PushArgumentsFromEnvironment(call->arguments()->length()); HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2); @@ -10642,7 +11678,7 @@ // Support for direct calls from JavaScript to native RegExp code. void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) { - ASSERT_EQ(4, call->arguments()->length()); + DCHECK_EQ(4, call->arguments()->length()); CHECK_ALIVE(VisitExpressions(call->arguments())); PushArgumentsFromEnvironment(call->arguments()->length()); HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4); @@ -10651,7 +11687,7 @@ void HOptimizedGraphBuilder::GenerateDoubleLo(CallRuntime* call) { - ASSERT_EQ(1, call->arguments()->length()); + DCHECK_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::LOW); @@ -10660,7 +11696,7 @@ void HOptimizedGraphBuilder::GenerateDoubleHi(CallRuntime* call) { - ASSERT_EQ(1, call->arguments()->length()); + DCHECK_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::HIGH); @@ -10669,7 +11705,7 @@ void HOptimizedGraphBuilder::GenerateConstructDouble(CallRuntime* call) { - ASSERT_EQ(2, call->arguments()->length()); + DCHECK_EQ(2, call->arguments()->length()); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* lo = Pop(); @@ -10681,7 +11717,7 @@ // Construct a RegExp exec result with two in-object properties. void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) { - ASSERT_EQ(3, call->arguments()->length()); + DCHECK_EQ(3, call->arguments()->length()); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); CHECK_ALIVE(VisitForValue(call->arguments()->at(2))); @@ -10701,7 +11737,7 @@ // Fast support for number to string. void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) { - ASSERT_EQ(1, call->arguments()->length()); + DCHECK_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* number = Pop(); HValue* result = BuildNumberToString(number, Type::Any(zone())); @@ -10713,7 +11749,7 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) { // 1 ~ The function to call is not itself an argument to the call. int arg_count = call->arguments()->length() - 1; - ASSERT(arg_count >= 1); // There's always at least a receiver. + DCHECK(arg_count >= 1); // There's always at least a receiver. CHECK_ALIVE(VisitExpressions(call->arguments())); // The function is the last argument @@ -10757,7 +11793,7 @@ // Fast call to math functions. void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) { - ASSERT_EQ(2, call->arguments()->length()); + DCHECK_EQ(2, call->arguments()->length()); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* right = Pop(); @@ -10767,8 +11803,8 @@ } -void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); +void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) { + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathLog); @@ -10776,8 +11812,8 @@ } -void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); +void HOptimizedGraphBuilder::GenerateMathSqrtRT(CallRuntime* call) { + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathSqrt); @@ -10786,7 +11822,7 @@ void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) { - ASSERT(call->arguments()->length() == 1); + DCHECK(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HGetCachedArrayIndex* result = New<HGetCachedArrayIndex>(value); @@ -10817,6 +11853,16 @@ } +void HOptimizedGraphBuilder::GenerateDebugIsActive(CallRuntime* call) { + DCHECK(call->arguments()->length() == 0); + HValue* ref = + Add<HConstant>(ExternalReference::debug_is_active_address(isolate())); + HValue* value = Add<HLoadNamedField>( + ref, static_cast<HValue*>(NULL), HObjectAccess::ForExternalUInteger8()); + return ast_context()->ReturnValue(value); +} + + #undef CHECK_BAILOUT #undef CHECK_ALIVE @@ -10837,7 +11883,9 @@ push_count_(0), ast_id_(BailoutId::None()), zone_(zone) { - Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0); + Scope* declaration_scope = scope->DeclarationScope(); + Initialize(declaration_scope->num_parameters() + 1, + declaration_scope->num_stack_slots(), 0); } @@ -10923,8 +11971,8 @@ void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) { - ASSERT(!block->IsLoopHeader()); - ASSERT(values_.length() == other->values_.length()); + DCHECK(!block->IsLoopHeader()); + DCHECK(values_.length() == other->values_.length()); int length = values_.length(); for (int i = 0; i < length; ++i) { @@ -10933,12 +11981,12 @@ // There is already a phi for the i'th value. HPhi* phi = HPhi::cast(value); // Assert index is correct and that we haven't missed an incoming edge. - ASSERT(phi->merged_index() == i || !phi->HasMergedIndex()); - ASSERT(phi->OperandCount() == block->predecessors()->length()); + DCHECK(phi->merged_index() == i || !phi->HasMergedIndex()); + DCHECK(phi->OperandCount() == block->predecessors()->length()); phi->AddInput(other->values_[i]); } else if (values_[i] != other->values_[i]) { // There is a fresh value on the incoming edge, a phi is needed. - ASSERT(values_[i] != NULL && other->values_[i] != NULL); + DCHECK(values_[i] != NULL && other->values_[i] != NULL); HPhi* phi = block->AddNewPhi(i); HValue* old_value = values_[i]; for (int j = 0; j < block->predecessors()->length(); j++) { @@ -10952,7 +12000,7 @@ void HEnvironment::Bind(int index, HValue* value) { - ASSERT(value != NULL); + DCHECK(value != NULL); assigned_variables_.Add(index, zone()); values_[index] = value; } @@ -10964,7 +12012,7 @@ bool HEnvironment::ExpressionStackIsEmpty() const { - ASSERT(length() >= first_expression_index()); + DCHECK(length() >= first_expression_index()); return length() == first_expression_index(); } @@ -10972,7 +12020,7 @@ void HEnvironment::SetExpressionStackAt(int index_from_top, HValue* value) { int count = index_from_top + 1; int index = values_.length() - count; - ASSERT(HasExpressionAt(index)); + DCHECK(HasExpressionAt(index)); // The push count must include at least the element in question or else // the new value will not be included in this environment's history. if (push_count_ < count) { @@ -11036,7 +12084,7 @@ FunctionLiteral* function, HConstant* undefined, InliningKind inlining_kind) const { - ASSERT(frame_type() == JS_FUNCTION); + DCHECK(frame_type() == JS_FUNCTION); // Outer environment is a copy of this one without the arguments. int arity = function->scope()->num_parameters(); @@ -11083,32 +12131,24 @@ } -void HEnvironment::PrintTo(StringStream* stream) { - for (int i = 0; i < length(); i++) { - if (i == 0) stream->Add("parameters\n"); - if (i == parameter_count()) stream->Add("specials\n"); - if (i == parameter_count() + specials_count()) stream->Add("locals\n"); - if (i == parameter_count() + specials_count() + local_count()) { - stream->Add("expressions\n"); +OStream& operator<<(OStream& os, const HEnvironment& env) { + for (int i = 0; i < env.length(); i++) { + if (i == 0) os << "parameters\n"; + if (i == env.parameter_count()) os << "specials\n"; + if (i == env.parameter_count() + env.specials_count()) os << "locals\n"; + if (i == env.parameter_count() + env.specials_count() + env.local_count()) { + os << "expressions\n"; } - HValue* val = values_.at(i); - stream->Add("%d: ", i); + HValue* val = env.values()->at(i); + os << i << ": "; if (val != NULL) { - val->PrintNameTo(stream); + os << val; } else { - stream->Add("NULL"); + os << "NULL"; } - stream->Add("\n"); + os << "\n"; } - PrintF("\n"); -} - - -void HEnvironment::PrintToStd() { - HeapStringAllocator string_allocator; - StringStream trace(&string_allocator); - PrintTo(&trace); - PrintF("%s", trace.ToCString().get()); + return os << "\n"; } @@ -11126,12 +12166,13 @@ PrintStringProperty("name", CodeStub::MajorName(major_key, false)); PrintStringProperty("method", "stub"); } - PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis())); + PrintLongProperty("date", + static_cast<int64_t>(base::OS::TimeCurrentMillis())); } void HTracer::TraceLithium(const char* name, LChunk* chunk) { - ASSERT(!chunk->isolate()->concurrent_recompilation_enabled()); + DCHECK(!chunk->isolate()->concurrent_recompilation_enabled()); AllowHandleDereference allow_deref; AllowDeferredHandleDereference allow_deferred_deref; Trace(name, chunk->graph(), chunk); @@ -11139,7 +12180,7 @@ void HTracer::TraceHydrogen(const char* name, HGraph* graph) { - ASSERT(!graph->isolate()->concurrent_recompilation_enabled()); + DCHECK(!graph->isolate()->concurrent_recompilation_enabled()); AllowHandleDereference allow_deref; AllowDeferredHandleDereference allow_deferred_deref; Trace(name, graph, NULL); @@ -11222,11 +12263,9 @@ for (int j = 0; j < total; ++j) { HPhi* phi = current->phis()->at(j); PrintIndent(); - trace_.Add("%d ", phi->merged_index()); - phi->PrintNameTo(&trace_); - trace_.Add(" "); - phi->PrintTo(&trace_); - trace_.Add("\n"); + OStringStream os; + os << phi->merged_index() << " " << NameOf(phi) << " " << *phi << "\n"; + trace_.Add(os.c_str()); } } @@ -11236,21 +12275,18 @@ HInstruction* instruction = it.Current(); int uses = instruction->UseCount(); PrintIndent(); - trace_.Add("0 %d ", uses); - instruction->PrintNameTo(&trace_); - trace_.Add(" "); - instruction->PrintTo(&trace_); + OStringStream os; + os << "0 " << uses << " " << NameOf(instruction) << " " << *instruction; if (FLAG_hydrogen_track_positions && instruction->has_position() && instruction->position().raw() != 0) { const HSourcePosition pos = instruction->position(); - trace_.Add(" pos:"); - if (pos.inlining_id() != 0) { - trace_.Add("%d_", pos.inlining_id()); - } - trace_.Add("%d", pos.position()); + os << " pos:"; + if (pos.inlining_id() != 0) os << pos.inlining_id() << "_"; + os << pos.position(); } - trace_.Add(" <|@\n"); + os << " <|@\n"; + trace_.Add(os.c_str()); } } @@ -11268,10 +12304,9 @@ trace_.Add("%d ", LifetimePosition::FromInstructionIndex(i).Value()); linstr->PrintTo(&trace_); - trace_.Add(" [hir:"); - linstr->hydrogen_value()->PrintNameTo(&trace_); - trace_.Add("]"); - trace_.Add(" <|@\n"); + OStringStream os; + os << " [hir:" << NameOf(linstr->hydrogen_value()) << "] <|@\n"; + trace_.Add(os.c_str()); } } } @@ -11313,7 +12348,7 @@ trace_.Add(" \"%s\"", DoubleRegister::AllocationIndexToString(assigned_reg)); } else { - ASSERT(op->IsRegister()); + DCHECK(op->IsRegister()); trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg)); } } else if (range->IsSpilled()) { @@ -11321,7 +12356,7 @@ if (op->IsDoubleStackSlot()) { trace_.Add(" \"double_stack:%d\"", op->index()); } else { - ASSERT(op->IsStackSlot()); + DCHECK(op->IsStackSlot()); trace_.Add(" \"stack:%d\"", op->index()); } } @@ -11371,15 +12406,22 @@ } -void HStatistics::Print() { - PrintF("Timing results:\n"); - TimeDelta sum; +void HStatistics::Print(const char* stats_name) { + PrintF( + "\n" + "----------------------------------------" + "----------------------------------------\n" + "--- %s timing results:\n" + "----------------------------------------" + "----------------------------------------\n", + stats_name); + base::TimeDelta sum; for (int i = 0; i < times_.length(); ++i) { sum += times_[i]; } for (int i = 0; i < names_.length(); ++i) { - PrintF("%32s", names_[i]); + PrintF("%33s", names_[i]); double ms = times_[i].InMillisecondsF(); double percent = times_[i].PercentOf(sum); PrintF(" %8.3f ms / %4.1f %% ", ms, percent); @@ -11389,26 +12431,22 @@ PrintF(" %9u bytes / %4.1f %%\n", size, size_percent); } - PrintF("----------------------------------------" - "---------------------------------------\n"); - TimeDelta total = create_graph_ + optimize_graph_ + generate_code_; - PrintF("%32s %8.3f ms / %4.1f %% \n", - "Create graph", - create_graph_.InMillisecondsF(), - create_graph_.PercentOf(total)); - PrintF("%32s %8.3f ms / %4.1f %% \n", - "Optimize graph", - optimize_graph_.InMillisecondsF(), - optimize_graph_.PercentOf(total)); - PrintF("%32s %8.3f ms / %4.1f %% \n", - "Generate and install code", - generate_code_.InMillisecondsF(), - generate_code_.PercentOf(total)); - PrintF("----------------------------------------" - "---------------------------------------\n"); - PrintF("%32s %8.3f ms (%.1f times slower than full code gen)\n", - "Total", - total.InMillisecondsF(), + PrintF( + "----------------------------------------" + "----------------------------------------\n"); + base::TimeDelta total = create_graph_ + optimize_graph_ + generate_code_; + PrintF("%33s %8.3f ms / %4.1f %% \n", "Create graph", + create_graph_.InMillisecondsF(), create_graph_.PercentOf(total)); + PrintF("%33s %8.3f ms / %4.1f %% \n", "Optimize graph", + optimize_graph_.InMillisecondsF(), optimize_graph_.PercentOf(total)); + PrintF("%33s %8.3f ms / %4.1f %% \n", "Generate and install code", + generate_code_.InMillisecondsF(), generate_code_.PercentOf(total)); + PrintF( + "----------------------------------------" + "----------------------------------------\n"); + PrintF("%33s %8.3f ms %9u bytes\n", "Total", + total.InMillisecondsF(), total_size_); + PrintF("%33s (%.1f times slower than full code gen)\n", "", total.TimesOf(full_code_gen_)); double source_size_in_kb = static_cast<double>(source_size_) / 1024; @@ -11418,13 +12456,13 @@ double normalized_size_in_kb = source_size_in_kb > 0 ? total_size_ / 1024 / source_size_in_kb : 0; - PrintF("%32s %8.3f ms %7.3f kB allocated\n", - "Average per kB source", - normalized_time, normalized_size_in_kb); + PrintF("%33s %8.3f ms %7.3f kB allocated\n", + "Average per kB source", normalized_time, normalized_size_in_kb); } -void HStatistics::SaveTiming(const char* name, TimeDelta time, unsigned size) { +void HStatistics::SaveTiming(const char* name, base::TimeDelta time, + unsigned size) { total_size_ += size; for (int i = 0; i < names_.length(); ++i) { if (strcmp(names_[i], name) == 0) { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-check-elimination.cc nodejs-0.11.15/deps/v8/src/hydrogen-check-elimination.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-check-elimination.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-check-elimination.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,33 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "hydrogen-check-elimination.h" -#include "hydrogen-alias-analysis.h" -#include "hydrogen-flow-engine.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/hydrogen-check-elimination.h" + +#include "src/hydrogen-alias-analysis.h" +#include "src/hydrogen-flow-engine.h" #define GLOBAL 1 @@ -44,12 +22,50 @@ namespace v8 { namespace internal { -typedef UniqueSet<Map>* MapSet; +typedef const UniqueSet<Map>* MapSet; struct HCheckTableEntry { + enum State { + // We have seen a map check (i.e. an HCheckMaps) for these maps, so we can + // use this information to eliminate further map checks, elements kind + // transitions, etc. + CHECKED, + // Same as CHECKED, but we also know that these maps are stable. + CHECKED_STABLE, + // These maps are stable, but not checked (i.e. we learned this via field + // type tracking or from a constant, or they were initially CHECKED_STABLE, + // but became UNCHECKED_STABLE because of an instruction that changes maps + // or elements kind), and we need a stability check for them in order to use + // this information for check elimination (which turns them back to + // CHECKED_STABLE). + UNCHECKED_STABLE + }; + + static const char* State2String(State state) { + switch (state) { + case CHECKED: return "checked"; + case CHECKED_STABLE: return "checked stable"; + case UNCHECKED_STABLE: return "unchecked stable"; + } + UNREACHABLE(); + return NULL; + } + + static State StateMerge(State state1, State state2) { + if (state1 == state2) return state1; + if ((state1 == CHECKED && state2 == CHECKED_STABLE) || + (state2 == CHECKED && state1 == CHECKED_STABLE)) { + return CHECKED; + } + DCHECK((state1 == CHECKED_STABLE && state2 == UNCHECKED_STABLE) || + (state2 == CHECKED_STABLE && state1 == UNCHECKED_STABLE)); + return UNCHECKED_STABLE; + } + HValue* object_; // The object being approximated. NULL => invalid entry. HInstruction* check_; // The last check instruction. MapSet maps_; // The set of known maps for the object. + State state_; // The state of this entry. }; @@ -57,7 +73,7 @@ // set of known maps for each object. class HCheckTable : public ZoneObject { public: - static const int kMaxTrackedObjects = 10; + static const int kMaxTrackedObjects = 16; explicit HCheckTable(HCheckEliminationPhase* phase) : phase_(phase), @@ -72,10 +88,6 @@ ReduceCheckMaps(HCheckMaps::cast(instr)); break; } - case HValue::kCheckValue: { - ReduceCheckValue(HCheckValue::cast(instr)); - break; - } case HValue::kLoadNamedField: { ReduceLoadNamedField(HLoadNamedField::cast(instr)); break; @@ -92,28 +104,36 @@ ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch::cast(instr)); break; } + case HValue::kIsStringAndBranch: { + ReduceIsStringAndBranch(HIsStringAndBranch::cast(instr)); + break; + } case HValue::kTransitionElementsKind: { ReduceTransitionElementsKind( HTransitionElementsKind::cast(instr)); break; } - case HValue::kCheckMapValue: { - ReduceCheckMapValue(HCheckMapValue::cast(instr)); - break; - } case HValue::kCheckHeapObject: { ReduceCheckHeapObject(HCheckHeapObject::cast(instr)); break; } + case HValue::kCheckInstanceType: { + ReduceCheckInstanceType(HCheckInstanceType::cast(instr)); + break; + } default: { // If the instruction changes maps uncontrollably, drop everything. - if (instr->CheckChangesFlag(kMaps) || - instr->CheckChangesFlag(kOsrEntries)) { + if (instr->CheckChangesFlag(kOsrEntries)) { Kill(); + break; + } + if (instr->CheckChangesFlag(kElementsKind) || + instr->CheckChangesFlag(kMaps)) { + KillUnstableEntries(); } } // Improvements possible: - // - eliminate redundant HCheckSmi, HCheckInstanceType instructions + // - eliminate redundant HCheckSmi instructions // - track which values have been HCheckHeapObject'd } @@ -154,13 +174,14 @@ private: // Copy state to successor block. HCheckTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, Zone* zone) { - HCheckTable* copy = new(phase_->zone()) HCheckTable(phase_); + HCheckTable* copy = new(zone) HCheckTable(phase_); for (int i = 0; i < size_; i++) { HCheckTableEntry* old_entry = &entries_[i]; - ASSERT(old_entry->maps_->size() > 0); + DCHECK(old_entry->maps_->size() > 0); HCheckTableEntry* new_entry = ©->entries_[i]; new_entry->object_ = old_entry->object_; - new_entry->maps_ = old_entry->maps_->Copy(phase_->zone()); + new_entry->maps_ = old_entry->maps_; + new_entry->state_ = old_entry->state_; // Keep the check if the existing check's block dominates the successor. if (old_entry->check_ != NULL && old_entry->check_->block()->Dominates(succ)) { @@ -186,7 +207,7 @@ HCheckTableEntry* pred_entry = copy->Find(phi_operand); if (pred_entry != NULL) { // Create an entry for a phi in the table. - copy->Insert(phi, NULL, pred_entry->maps_->Copy(phase_->zone())); + copy->Insert(phi, NULL, pred_entry->maps_, pred_entry->state_); } } } @@ -202,19 +223,25 @@ HValue* object = cmp->value()->ActualValue(); HCheckTableEntry* entry = copy->Find(object); if (is_true_branch) { + HCheckTableEntry::State state = cmp->map_is_stable() + ? HCheckTableEntry::CHECKED_STABLE + : HCheckTableEntry::CHECKED; // Learn on the true branch of if(CompareMap(x)). if (entry == NULL) { - copy->Insert(object, cmp, cmp->map()); + copy->Insert(object, cmp, cmp->map(), state); } else { - MapSet list = new(phase_->zone()) UniqueSet<Map>(); - list->Add(cmp->map(), phase_->zone()); - entry->maps_ = list; + entry->maps_ = new(zone) UniqueSet<Map>(cmp->map(), zone); entry->check_ = cmp; + entry->state_ = state; } } else { // Learn on the false branch of if(CompareMap(x)). if (entry != NULL) { - entry->maps_->Remove(cmp->map()); + EnsureChecked(entry, object, cmp); + UniqueSet<Map>* maps = entry->maps_->Copy(zone); + maps->Remove(cmp->map()); + entry->maps_ = maps; + DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_); } } learned = true; @@ -228,16 +255,42 @@ HCheckTableEntry* re = copy->Find(right); if (le == NULL) { if (re != NULL) { - copy->Insert(left, NULL, re->maps_->Copy(zone)); + copy->Insert(left, NULL, re->maps_, re->state_); } } else if (re == NULL) { - copy->Insert(right, NULL, le->maps_->Copy(zone)); + copy->Insert(right, NULL, le->maps_, le->state_); } else { - MapSet intersect = le->maps_->Intersect(re->maps_, zone); - le->maps_ = intersect; - re->maps_ = intersect->Copy(zone); + EnsureChecked(le, cmp->left(), cmp); + EnsureChecked(re, cmp->right(), cmp); + le->maps_ = re->maps_ = le->maps_->Intersect(re->maps_, zone); + le->state_ = re->state_ = HCheckTableEntry::StateMerge( + le->state_, re->state_); + DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, le->state_); + DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, re->state_); } learned = true; + } else if (end->IsIsStringAndBranch()) { + HIsStringAndBranch* cmp = HIsStringAndBranch::cast(end); + HValue* object = cmp->value()->ActualValue(); + HCheckTableEntry* entry = copy->Find(object); + if (is_true_branch) { + // Learn on the true branch of if(IsString(x)). + if (entry == NULL) { + copy->Insert(object, NULL, string_maps(), + HCheckTableEntry::CHECKED); + } else { + EnsureChecked(entry, object, cmp); + entry->maps_ = entry->maps_->Intersect(string_maps(), zone); + DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_); + } + } else { + // Learn on the false branch of if(IsString(x)). + if (entry != NULL) { + EnsureChecked(entry, object, cmp); + entry->maps_ = entry->maps_->Subtract(string_maps(), zone); + DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_); + } + } } // Learning on false branches requires storing negative facts. } @@ -276,16 +329,22 @@ that_entry = that->Find(this_entry->object_); } - if (that_entry == NULL) { + if (that_entry == NULL || + (that_entry->state_ == HCheckTableEntry::CHECKED && + this_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) || + (this_entry->state_ == HCheckTableEntry::CHECKED && + that_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE)) { this_entry->object_ = NULL; compact = true; } else { this_entry->maps_ = - this_entry->maps_->Union(that_entry->maps_, phase_->zone()); + this_entry->maps_->Union(that_entry->maps_, zone); + this_entry->state_ = HCheckTableEntry::StateMerge( + this_entry->state_, that_entry->state_); if (this_entry->check_ != that_entry->check_) { this_entry->check_ = NULL; } - ASSERT(this_entry->maps_->size() > 0); + DCHECK(this_entry->maps_->size() > 0); } } if (compact) Compact(); @@ -304,16 +363,23 @@ HCheckTableEntry* entry = Find(object); if (entry != NULL) { // entry found; - MapSet a = entry->maps_; - MapSet i = instr->map_set().Copy(phase_->zone()); - if (a->IsSubset(i)) { + HGraph* graph = instr->block()->graph(); + if (entry->maps_->IsSubset(instr->maps())) { // The first check is more strict; the second is redundant. if (entry->check_ != NULL) { + DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_); TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n", instr->id(), instr->block()->block_id(), entry->check_->id())); instr->DeleteAndReplaceWith(entry->check_); INC_STAT(redundant_); - } else { + } else if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) { + DCHECK_EQ(NULL, entry->check_); + TRACE(("Marking redundant CheckMaps #%d at B%d as stability check\n", + instr->id(), instr->block()->block_id())); + instr->set_maps(entry->maps_->Copy(graph->zone())); + instr->MarkAsStabilityCheck(); + entry->state_ = HCheckTableEntry::CHECKED_STABLE; + } else if (!instr->IsStabilityCheck()) { TRACE(("Marking redundant CheckMaps #%d at B%d as dead\n", instr->id(), instr->block()->block_id())); // Mark check as dead but leave it in the graph as a checkpoint for @@ -324,27 +390,34 @@ } return; } - MapSet intersection = i->Intersect(a, phase_->zone()); + MapSet intersection = instr->maps()->Intersect( + entry->maps_, graph->zone()); if (intersection->size() == 0) { - // Intersection is empty; probably megamorphic, which is likely to - // deopt anyway, so just leave things as they are. + // Intersection is empty; probably megamorphic. INC_STAT(empty_); + entry->object_ = NULL; + Compact(); } else { // Update set of maps in the entry. entry->maps_ = intersection; - if (intersection->size() != i->size()) { + // Update state of the entry. + if (instr->maps_are_stable() || + entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) { + entry->state_ = HCheckTableEntry::CHECKED_STABLE; + } + if (intersection->size() != instr->maps()->size()) { // Narrow set of maps in the second check maps instruction. - HGraph* graph = instr->block()->graph(); if (entry->check_ != NULL && entry->check_->block() == instr->block() && entry->check_->IsCheckMaps()) { // There is a check in the same block so replace it with a more // strict check and eliminate the second check entirely. HCheckMaps* check = HCheckMaps::cast(entry->check_); + DCHECK(!check->IsStabilityCheck()); TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(), check->block()->block_id())); // Update map set and ensure that the check is alive. - check->set_map_set(intersection, graph->zone()); + check->set_maps(intersection); check->ClearFlag(HValue::kIsDead); TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n", instr->id(), instr->block()->block_id(), entry->check_->id())); @@ -352,8 +425,8 @@ } else { TRACE(("CheckMaps #%d at B%d narrowed\n", instr->id(), instr->block()->block_id())); - instr->set_map_set(intersection, graph->zone()); - entry->check_ = instr; + instr->set_maps(intersection); + entry->check_ = instr->IsStabilityCheck() ? NULL : instr; } if (FLAG_trace_check_elimination) { @@ -364,76 +437,88 @@ } } else { // No entry; insert a new one. - Insert(object, instr, instr->map_set().Copy(phase_->zone())); + HCheckTableEntry::State state = instr->maps_are_stable() + ? HCheckTableEntry::CHECKED_STABLE + : HCheckTableEntry::CHECKED; + HCheckMaps* check = instr->IsStabilityCheck() ? NULL : instr; + Insert(object, check, instr->maps(), state); } } - void ReduceCheckValue(HCheckValue* instr) { - // Canonicalize HCheckValues; they might have their values load-eliminated. - HValue* value = instr->Canonicalize(); - if (value == NULL) { - instr->DeleteAndReplaceWith(instr->value()); - INC_STAT(removed_); - } else if (value != instr) { + void ReduceCheckInstanceType(HCheckInstanceType* instr) { + HValue* value = instr->value()->ActualValue(); + HCheckTableEntry* entry = Find(value); + if (entry == NULL) { + if (instr->check() == HCheckInstanceType::IS_STRING) { + Insert(value, NULL, string_maps(), HCheckTableEntry::CHECKED); + } + return; + } + UniqueSet<Map>* maps = new(zone()) UniqueSet<Map>( + entry->maps_->size(), zone()); + for (int i = 0; i < entry->maps_->size(); ++i) { + InstanceType type; + Unique<Map> map = entry->maps_->at(i); + { + // This is safe, because maps don't move and their instance type does + // not change. + AllowHandleDereference allow_deref; + type = map.handle()->instance_type(); + } + if (instr->is_interval_check()) { + InstanceType first_type, last_type; + instr->GetCheckInterval(&first_type, &last_type); + if (first_type <= type && type <= last_type) maps->Add(map, zone()); + } else { + uint8_t mask, tag; + instr->GetCheckMaskAndTag(&mask, &tag); + if ((type & mask) == tag) maps->Add(map, zone()); + } + } + if (maps->size() == entry->maps_->size()) { + TRACE(("Removing redundant CheckInstanceType #%d at B%d\n", + instr->id(), instr->block()->block_id())); + EnsureChecked(entry, value, instr); instr->DeleteAndReplaceWith(value); - INC_STAT(redundant_); + INC_STAT(removed_cit_); + } else if (maps->size() != 0) { + entry->maps_ = maps; + if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) { + entry->state_ = HCheckTableEntry::CHECKED_STABLE; + } } } void ReduceLoadNamedField(HLoadNamedField* instr) { // Reduce a load of the map field when it is known to be a constant. - if (!IsMapAccess(instr->access())) return; + if (!instr->access().IsMap()) { + // Check if we introduce field maps here. + MapSet maps = instr->maps(); + if (maps != NULL) { + DCHECK_NE(0, maps->size()); + Insert(instr, NULL, maps, HCheckTableEntry::UNCHECKED_STABLE); + } + return; + } HValue* object = instr->object()->ActualValue(); - MapSet maps = FindMaps(object); - if (maps == NULL || maps->size() != 1) return; // Not a constant. + HCheckTableEntry* entry = Find(object); + if (entry == NULL || entry->maps_->size() != 1) return; // Not a constant. - Unique<Map> map = maps->at(0); + EnsureChecked(entry, object, instr); + Unique<Map> map = entry->maps_->at(0); + bool map_is_stable = (entry->state_ != HCheckTableEntry::CHECKED); HConstant* constant = HConstant::CreateAndInsertBefore( - instr->block()->graph()->zone(), map, true, instr); + instr->block()->graph()->zone(), map, map_is_stable, instr); instr->DeleteAndReplaceWith(constant); INC_STAT(loads_); } - void ReduceCheckMapValue(HCheckMapValue* instr) { - if (!instr->map()->IsConstant()) return; // Nothing to learn. - - HValue* object = instr->value()->ActualValue(); - // Match a HCheckMapValue(object, HConstant(map)) - Unique<Map> map = MapConstant(instr->map()); - - HCheckTableEntry* entry = Find(object); - if (entry != NULL) { - MapSet maps = entry->maps_; - if (maps->Contains(map)) { - if (maps->size() == 1) { - // Object is known to have exactly this map. - if (entry->check_ != NULL) { - instr->DeleteAndReplaceWith(entry->check_); - } else { - // Mark check as dead but leave it in the graph as a checkpoint for - // subsequent checks. - instr->SetFlag(HValue::kIsDead); - entry->check_ = instr; - } - INC_STAT(removed_); - } else { - // Only one map survives the check. - maps->Clear(); - maps->Add(map, phase_->zone()); - entry->check_ = instr; - } - } - } else { - // No prior information. - Insert(object, instr, map); - } - } - void ReduceCheckHeapObject(HCheckHeapObject* instr) { - if (FindMaps(instr->value()->ActualValue()) != NULL) { + HValue* value = instr->value()->ActualValue(); + if (Find(value) != NULL) { // If the object has known maps, it's definitely a heap object. - instr->DeleteAndReplaceWith(instr->value()); + instr->DeleteAndReplaceWith(value); INC_STAT(removed_cho_); } } @@ -443,12 +528,20 @@ if (instr->has_transition()) { // This store transitions the object to a new map. Kill(object); - Insert(object, NULL, MapConstant(instr->transition())); - } else if (IsMapAccess(instr->access())) { + HConstant* c_transition = HConstant::cast(instr->transition()); + HCheckTableEntry::State state = c_transition->HasStableMapValue() + ? HCheckTableEntry::CHECKED_STABLE + : HCheckTableEntry::CHECKED; + Insert(object, NULL, c_transition->MapValue(), state); + } else if (instr->access().IsMap()) { // This is a store directly to the map field of the object. Kill(object); if (!instr->value()->IsConstant()) return; - Insert(object, NULL, MapConstant(instr->value())); + HConstant* c_value = HConstant::cast(instr->value()); + HCheckTableEntry::State state = c_value->HasStableMapValue() + ? HCheckTableEntry::CHECKED_STABLE + : HCheckTableEntry::CHECKED; + Insert(object, NULL, c_value->MapValue(), state); } else { // If the instruction changes maps, it should be handled above. CHECK(!instr->CheckChangesFlag(kMaps)); @@ -456,12 +549,14 @@ } void ReduceCompareMap(HCompareMap* instr) { - MapSet maps = FindMaps(instr->value()->ActualValue()); - if (maps == NULL) return; + HCheckTableEntry* entry = Find(instr->value()->ActualValue()); + if (entry == NULL) return; + + EnsureChecked(entry, instr->value(), instr); int succ; - if (maps->Contains(instr->map())) { - if (maps->size() != 1) { + if (entry->maps_->Contains(instr->map())) { + if (entry->maps_->size() != 1) { TRACE(("CompareMap #%d for #%d at B%d can't be eliminated: " "ambiguous set of maps\n", instr->id(), instr->value()->id(), instr->block()->block_id())); @@ -484,11 +579,18 @@ } void ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch* instr) { - MapSet maps_left = FindMaps(instr->left()->ActualValue()); - if (maps_left == NULL) return; - MapSet maps_right = FindMaps(instr->right()->ActualValue()); - if (maps_right == NULL) return; - MapSet intersection = maps_left->Intersect(maps_right, phase_->zone()); + HValue* left = instr->left()->ActualValue(); + HCheckTableEntry* le = Find(left); + if (le == NULL) return; + HValue* right = instr->right()->ActualValue(); + HCheckTableEntry* re = Find(right); + if (re == NULL) return; + + EnsureChecked(le, left, instr); + EnsureChecked(re, right, instr); + + // TODO(bmeurer): Add a predicate here instead of computing the intersection + MapSet intersection = le->maps_->Intersect(re->maps_, zone()); if (intersection->size() > 0) return; TRACE(("Marking redundant CompareObjectEqAndBranch #%d at B%d as false\n", @@ -500,40 +602,96 @@ instr->block()->MarkSuccEdgeUnreachable(unreachable_succ); } + void ReduceIsStringAndBranch(HIsStringAndBranch* instr) { + HValue* value = instr->value()->ActualValue(); + HCheckTableEntry* entry = Find(value); + if (entry == NULL) return; + EnsureChecked(entry, value, instr); + int succ; + if (entry->maps_->IsSubset(string_maps())) { + TRACE(("Marking redundant IsStringAndBranch #%d at B%d as true\n", + instr->id(), instr->block()->block_id())); + succ = 0; + } else { + MapSet intersection = entry->maps_->Intersect(string_maps(), zone()); + if (intersection->size() > 0) return; + TRACE(("Marking redundant IsStringAndBranch #%d at B%d as false\n", + instr->id(), instr->block()->block_id())); + succ = 1; + } + instr->set_known_successor_index(succ); + int unreachable_succ = 1 - succ; + instr->block()->MarkSuccEdgeUnreachable(unreachable_succ); + } + void ReduceTransitionElementsKind(HTransitionElementsKind* instr) { - MapSet maps = FindMaps(instr->object()->ActualValue()); + HValue* object = instr->object()->ActualValue(); + HCheckTableEntry* entry = Find(object); // Can only learn more about an object that already has a known set of maps. - if (maps == NULL) return; - if (maps->Contains(instr->original_map())) { + if (entry == NULL) return; + EnsureChecked(entry, object, instr); + if (entry->maps_->Contains(instr->original_map())) { // If the object has the original map, it will be transitioned. + UniqueSet<Map>* maps = entry->maps_->Copy(zone()); maps->Remove(instr->original_map()); - maps->Add(instr->transitioned_map(), phase_->zone()); + maps->Add(instr->transitioned_map(), zone()); + entry->maps_ = maps; } else { // Object does not have the given map, thus the transition is redundant. - instr->DeleteAndReplaceWith(instr->object()); + instr->DeleteAndReplaceWith(object); INC_STAT(transitions_); } } + void EnsureChecked(HCheckTableEntry* entry, + HValue* value, + HInstruction* instr) { + if (entry->state_ != HCheckTableEntry::UNCHECKED_STABLE) return; + HGraph* graph = instr->block()->graph(); + HCheckMaps* check = HCheckMaps::CreateAndInsertBefore( + graph->zone(), value, entry->maps_->Copy(graph->zone()), true, instr); + check->MarkAsStabilityCheck(); + entry->state_ = HCheckTableEntry::CHECKED_STABLE; + entry->check_ = NULL; + } + // Kill everything in the table. void Kill() { size_ = 0; cursor_ = 0; } + // Kill all unstable entries in the table. + void KillUnstableEntries() { + bool compact = false; + for (int i = 0; i < size_; ++i) { + HCheckTableEntry* entry = &entries_[i]; + DCHECK_NOT_NULL(entry->object_); + if (entry->state_ == HCheckTableEntry::CHECKED) { + entry->object_ = NULL; + compact = true; + } else { + // All checked stable entries become unchecked stable. + entry->state_ = HCheckTableEntry::UNCHECKED_STABLE; + entry->check_ = NULL; + } + } + if (compact) Compact(); + } + // Kill everything in the table that may alias {object}. void Kill(HValue* object) { bool compact = false; for (int i = 0; i < size_; i++) { HCheckTableEntry* entry = &entries_[i]; - ASSERT(entry->object_ != NULL); + DCHECK(entry->object_ != NULL); if (phase_->aliasing_->MayAlias(entry->object_, object)) { entry->object_ = NULL; compact = true; } } if (compact) Compact(); - ASSERT(Find(object) == NULL); + DCHECK(Find(object) == NULL); } void Compact() { @@ -548,8 +706,8 @@ size_--; } } - ASSERT(size_ == dest); - ASSERT(cursor_ <= size_); + DCHECK(size_ == dest); + DCHECK(cursor_ <= size_); // Preserve the age of the entries by moving the older entries to the end. if (cursor_ == size_) return; // Cursor already points at end. @@ -560,9 +718,9 @@ int L = cursor_; int R = size_ - cursor_; - OS::MemMove(&tmp_entries[0], &entries_[0], L * sizeof(HCheckTableEntry)); - OS::MemMove(&entries_[0], &entries_[L], R * sizeof(HCheckTableEntry)); - OS::MemMove(&entries_[R], &tmp_entries[0], L * sizeof(HCheckTableEntry)); + MemMove(&tmp_entries[0], &entries_[0], L * sizeof(HCheckTableEntry)); + MemMove(&entries_[0], &entries_[L], R * sizeof(HCheckTableEntry)); + MemMove(&entries_[R], &tmp_entries[0], L * sizeof(HCheckTableEntry)); } cursor_ = size_; // Move cursor to end. @@ -576,14 +734,15 @@ for (int i = 0; i < table->size_; i++) { HCheckTableEntry* entry = &table->entries_[i]; - ASSERT(entry->object_ != NULL); + DCHECK(entry->object_ != NULL); PrintF(" checkmaps-table @%d: %s #%d ", i, entry->object_->IsPhi() ? "phi" : "object", entry->object_->id()); if (entry->check_ != NULL) { PrintF("check #%d ", entry->check_->id()); } MapSet list = entry->maps_; - PrintF("%d maps { ", list->size()); + PrintF("%d %s maps { ", list->size(), + HCheckTableEntry::State2String(entry->state_)); for (int j = 0; j < list->size(); j++) { if (j > 0) PrintF(", "); PrintF("%" V8PRIxPTR, list->at(j).Hashcode()); @@ -596,40 +755,36 @@ for (int i = size_ - 1; i >= 0; i--) { // Search from most-recently-inserted to least-recently-inserted. HCheckTableEntry* entry = &entries_[i]; - ASSERT(entry->object_ != NULL); + DCHECK(entry->object_ != NULL); if (phase_->aliasing_->MustAlias(entry->object_, object)) return entry; } return NULL; } - MapSet FindMaps(HValue* object) { - HCheckTableEntry* entry = Find(object); - return entry == NULL ? NULL : entry->maps_; + void Insert(HValue* object, + HInstruction* check, + Unique<Map> map, + HCheckTableEntry::State state) { + Insert(object, check, new(zone()) UniqueSet<Map>(map, zone()), state); } - void Insert(HValue* object, HInstruction* check, Unique<Map> map) { - MapSet list = new(phase_->zone()) UniqueSet<Map>(); - list->Add(map, phase_->zone()); - Insert(object, check, list); - } - - void Insert(HValue* object, HInstruction* check, MapSet maps) { + void Insert(HValue* object, + HInstruction* check, + MapSet maps, + HCheckTableEntry::State state) { + DCHECK(state != HCheckTableEntry::UNCHECKED_STABLE || check == NULL); HCheckTableEntry* entry = &entries_[cursor_++]; entry->object_ = object; entry->check_ = check; entry->maps_ = maps; + entry->state_ = state; // If the table becomes full, wrap around and overwrite older entries. if (cursor_ == kMaxTrackedObjects) cursor_ = 0; if (size_ < kMaxTrackedObjects) size_++; } - bool IsMapAccess(HObjectAccess access) { - return access.IsInobject() && access.offset() == JSObject::kMapOffset; - } - - Unique<Map> MapConstant(HValue* value) { - return Unique<Map>::cast(HConstant::cast(value)->GetUnique()); - } + Zone* zone() const { return phase_->zone(); } + MapSet string_maps() const { return phase_->string_maps(); } friend class HCheckMapsEffects; friend class HCheckEliminationPhase; @@ -638,7 +793,7 @@ HCheckTableEntry entries_[kMaxTrackedObjects]; int16_t cursor_; // Must be <= kMaxTrackedObjects int16_t size_; // Must be <= kMaxTrackedObjects - // TODO(titzer): STATIC_ASSERT kMaxTrackedObjects < max(cursor_) + STATIC_ASSERT(kMaxTrackedObjects < (1 << 15)); }; @@ -646,60 +801,62 @@ // needed for check elimination. class HCheckMapsEffects : public ZoneObject { public: - explicit HCheckMapsEffects(Zone* zone) - : maps_stored_(false), - stores_(5, zone) { } + explicit HCheckMapsEffects(Zone* zone) : objects_(0, zone) { } - inline bool Disabled() { - return false; // Effects are _not_ disabled. - } + // Effects are _not_ disabled. + inline bool Disabled() const { return false; } // Process a possibly side-effecting instruction. void Process(HInstruction* instr, Zone* zone) { switch (instr->opcode()) { case HValue::kStoreNamedField: { - stores_.Add(HStoreNamedField::cast(instr), zone); + HStoreNamedField* store = HStoreNamedField::cast(instr); + if (store->access().IsMap() || store->has_transition()) { + objects_.Add(store->object(), zone); + } break; } - case HValue::kOsrEntry: { - // Kill everything. Loads must not be hoisted past the OSR entry. - maps_stored_ = true; + case HValue::kTransitionElementsKind: { + objects_.Add(HTransitionElementsKind::cast(instr)->object(), zone); + break; } default: { - maps_stored_ |= (instr->CheckChangesFlag(kMaps) | - instr->CheckChangesFlag(kElementsKind)); + flags_.Add(instr->ChangesFlags()); + break; } } } // Apply these effects to the given check elimination table. void Apply(HCheckTable* table) { - if (maps_stored_) { + if (flags_.Contains(kOsrEntries)) { // Uncontrollable map modifications; kill everything. table->Kill(); return; } - // Kill maps for each store contained in these effects. - for (int i = 0; i < stores_.length(); i++) { - HStoreNamedField* s = stores_[i]; - if (table->IsMapAccess(s->access()) || s->has_transition()) { - table->Kill(s->object()->ActualValue()); - } + // Kill all unstable entries. + if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) { + table->KillUnstableEntries(); + } + + // Kill maps for each object contained in these effects. + for (int i = 0; i < objects_.length(); ++i) { + table->Kill(objects_[i]->ActualValue()); } } // Union these effects with the other effects. void Union(HCheckMapsEffects* that, Zone* zone) { - maps_stored_ |= that->maps_stored_; - for (int i = 0; i < that->stores_.length(); i++) { - stores_.Add(that->stores_[i], zone); + flags_.Add(that->flags_); + for (int i = 0; i < that->objects_.length(); ++i) { + objects_.Add(that->objects_[i], zone); } } private: - bool maps_stored_ : 1; - ZoneList<HStoreNamedField*> stores_; + ZoneList<HValue*> objects_; + GVNFlagSet flags_; }; @@ -734,6 +891,7 @@ PRINT_STAT(redundant); PRINT_STAT(removed); PRINT_STAT(removed_cho); + PRINT_STAT(removed_cit); PRINT_STAT(narrowed); PRINT_STAT(loads); PRINT_STAT(empty); diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-check-elimination.h nodejs-0.11.15/deps/v8/src/hydrogen-check-elimination.h --- nodejs-0.11.13/deps/v8/src/hydrogen-check-elimination.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-check-elimination.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_CHECK_ELIMINATION_H_ #define V8_HYDROGEN_CHECK_ELIMINATION_H_ -#include "hydrogen.h" -#include "hydrogen-alias-analysis.h" +#include "src/hydrogen.h" +#include "src/hydrogen-alias-analysis.h" namespace v8 { namespace internal { @@ -39,11 +16,20 @@ class HCheckEliminationPhase : public HPhase { public: explicit HCheckEliminationPhase(HGraph* graph) - : HPhase("H_Check Elimination", graph), aliasing_() { + : HPhase("H_Check Elimination", graph), aliasing_(), + string_maps_(kStringMapsSize, zone()) { + // Compute the set of string maps. + #define ADD_STRING_MAP(type, size, name, Name) \ + string_maps_.Add(Unique<Map>::CreateImmovable( \ + graph->isolate()->factory()->name##_map()), zone()); + STRING_TYPE_LIST(ADD_STRING_MAP) + #undef ADD_STRING_MAP + DCHECK_EQ(kStringMapsSize, string_maps_.size()); #ifdef DEBUG redundant_ = 0; removed_ = 0; removed_cho_ = 0; + removed_cit_ = 0; narrowed_ = 0; loads_ = 0; empty_ = 0; @@ -58,13 +44,20 @@ friend class HCheckTable; private: + const UniqueSet<Map>* string_maps() const { return &string_maps_; } + void PrintStats(); HAliasAnalyzer* aliasing_; + #define COUNT(type, size, name, Name) + 1 + static const int kStringMapsSize = 0 STRING_TYPE_LIST(COUNT); + #undef COUNT + UniqueSet<Map> string_maps_; #ifdef DEBUG int redundant_; int removed_; int removed_cho_; + int removed_cit_; int narrowed_; int loads_; int empty_; diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-dce.cc nodejs-0.11.15/deps/v8/src/hydrogen-dce.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-dce.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-dce.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,9 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-dce.h" -#include "v8.h" +#include "src/hydrogen-dce.h" +#include "src/v8.h" namespace v8 { namespace internal { @@ -55,16 +32,14 @@ void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) { - HeapStringAllocator allocator; - StringStream stream(&allocator); + OFStream os(stdout); + os << "[MarkLive "; if (ref != NULL) { - ref->PrintTo(&stream); + os << *ref; } else { - stream.Add("root "); + os << "root "; } - stream.Add(" -> "); - instr->PrintTo(&stream); - PrintF("[MarkLive %s]\n", stream.ToCString().get()); + os << " -> " << *instr << "]" << endl; } @@ -84,7 +59,7 @@ } } - ASSERT(worklist.is_empty()); // Should have processed everything. + DCHECK(worklist.is_empty()); // Should have processed everything. } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-dce.h nodejs-0.11.15/deps/v8/src/hydrogen-dce.h --- nodejs-0.11.13/deps/v8/src/hydrogen-dce.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-dce.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_DCE_H_ #define V8_HYDROGEN_DCE_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-dehoist.cc nodejs-0.11.15/deps/v8/src/hydrogen-dehoist.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-dehoist.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-dehoist.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,9 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-dehoist.h" +#include "src/hydrogen-dehoist.h" +#include "src/base/safe_math.h" namespace v8 { namespace internal { @@ -51,14 +29,25 @@ if (!constant->HasInteger32Value()) return; int32_t sign = binary_operation->IsSub() ? -1 : 1; int32_t value = constant->Integer32Value() * sign; - // We limit offset values to 30 bits because we want to avoid the risk of - // overflows when the offset is added to the object header size. - if (value >= 1 << array_operation->MaxIndexOffsetBits() || value < 0) return; + if (value < 0) return; + + // Multiply value by elements size, bailing out on overflow. + int32_t elements_kind_size = + 1 << ElementsKindToShiftSize(array_operation->elements_kind()); + v8::base::internal::CheckedNumeric<int32_t> multiply_result = value; + multiply_result = multiply_result * elements_kind_size; + if (!multiply_result.IsValid()) return; + value = multiply_result.ValueOrDie(); + + // Ensure that the array operation can add value to existing base offset + // without overflowing. + if (!array_operation->TryIncreaseBaseOffset(value)) return; + array_operation->SetKey(subexpression); if (binary_operation->HasNoUses()) { binary_operation->DeleteAndReplaceWith(NULL); } - array_operation->SetIndexOffset(static_cast<uint32_t>(value)); + array_operation->SetDehoisted(true); } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-dehoist.h nodejs-0.11.15/deps/v8/src/hydrogen-dehoist.h --- nodejs-0.11.13/deps/v8/src/hydrogen-dehoist.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-dehoist.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_DEHOIST_H_ #define V8_HYDROGEN_DEHOIST_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-environment-liveness.cc nodejs-0.11.15/deps/v8/src/hydrogen-environment-liveness.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-environment-liveness.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-environment-liveness.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,9 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-environment-liveness.h" +#include "src/hydrogen-environment-liveness.h" namespace v8 { @@ -45,7 +22,7 @@ collect_markers_(true), last_simulate_(NULL), went_live_since_last_simulate_(maximum_environment_size_, zone()) { - ASSERT(maximum_environment_size_ > 0); + DCHECK(maximum_environment_size_ > 0); for (int i = 0; i < block_count_; ++i) { live_at_block_start_.Add( new(zone()) BitVector(maximum_environment_size_, zone()), zone()); @@ -84,8 +61,8 @@ } HSimulate* simulate = first_simulate_.at(successor_id); if (simulate == NULL) continue; - ASSERT(simulate->closure().is_identical_to( - block->last_environment()->closure())); + DCHECK(VerifyClosures(simulate->closure(), + block->last_environment()->closure())); ZapEnvironmentSlot(i, simulate); } } @@ -97,7 +74,7 @@ if (!marker->CheckFlag(HValue::kEndsLiveRange)) return; HSimulate* simulate = marker->next_simulate(); if (simulate != NULL) { - ASSERT(simulate->closure().is_identical_to(marker->closure())); + DCHECK(VerifyClosures(simulate->closure(), marker->closure())); ZapEnvironmentSlot(marker->index(), simulate); } } @@ -132,7 +109,7 @@ if (marker->kind() == HEnvironmentMarker::LOOKUP) { live->Add(index); } else { - ASSERT(marker->kind() == HEnvironmentMarker::BIND); + DCHECK(marker->kind() == HEnvironmentMarker::BIND); live->Remove(index); went_live_since_last_simulate_.Add(index); } @@ -147,10 +124,10 @@ live->Clear(); last_simulate_ = NULL; - // The following ASSERTs guard the assumption used in case + // The following DCHECKs guard the assumption used in case // kEnterInlined below: - ASSERT(instr->next()->IsSimulate()); - ASSERT(instr->next()->next()->IsGoto()); + DCHECK(instr->next()->IsSimulate()); + DCHECK(instr->next()->next()->IsGoto()); break; case HValue::kEnterInlined: { @@ -158,7 +135,7 @@ // target block. Here we make use of the fact that the end of an // inline sequence always looks like this: HLeaveInlined, HSimulate, // HGoto (to return_target block), with no environment lookups in - // between (see ASSERTs above). + // between (see DCHECKs above). HEnterInlined* enter = HEnterInlined::cast(instr); live->Clear(); for (int i = 0; i < enter->return_targets()->length(); ++i) { @@ -179,7 +156,7 @@ void HEnvironmentLivenessAnalysisPhase::Run() { - ASSERT(maximum_environment_size_ > 0); + DCHECK(maximum_environment_size_ > 0); // Main iteration. Compute liveness of environment slots, and store it // for each block until it doesn't change any more. For efficiency, visit @@ -241,4 +218,14 @@ } } + +#ifdef DEBUG +bool HEnvironmentLivenessAnalysisPhase::VerifyClosures( + Handle<JSFunction> a, Handle<JSFunction> b) { + Heap::RelocationLock for_heap_access(isolate()->heap()); + AllowHandleDereference for_verification; + return a.is_identical_to(b); +} +#endif + } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-environment-liveness.h nodejs-0.11.15/deps/v8/src/hydrogen-environment-liveness.h --- nodejs-0.11.13/deps/v8/src/hydrogen-environment-liveness.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-environment-liveness.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_ #define V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { @@ -55,6 +32,9 @@ void ZapEnvironmentSlotsForInstruction(HEnvironmentMarker* marker); void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live); void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live); +#ifdef DEBUG + bool VerifyClosures(Handle<JSFunction> a, Handle<JSFunction> b); +#endif int block_count_; diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-escape-analysis.cc nodejs-0.11.15/deps/v8/src/hydrogen-escape-analysis.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-escape-analysis.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-escape-analysis.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-escape-analysis.h" +#include "src/hydrogen-escape-analysis.h" namespace v8 { namespace internal { @@ -155,12 +132,29 @@ // TODO(mstarzinger): This will narrow a map check against a set of maps // down to the first element in the set. Revisit and fix this. HCheckValue* check = HCheckValue::New( - zone, NULL, value, mapcheck->first_map(), false); + zone, NULL, value, mapcheck->maps()->at(0), false); check->InsertBefore(mapcheck); return check; } +// Replace a field load with a given value, forcing Smi representation if +// necessary. +HValue* HEscapeAnalysisPhase::NewLoadReplacement( + HLoadNamedField* load, HValue* load_value) { + HValue* replacement = load_value; + Representation representation = load->representation(); + if (representation.IsSmiOrInteger32() || representation.IsDouble()) { + Zone* zone = graph()->zone(); + HInstruction* new_instr = + HForceRepresentation::New(zone, NULL, load_value, representation); + new_instr->InsertAfter(load); + replacement = new_instr; + } + return replacement; +} + + // Performs a forward data-flow analysis of all loads and stores on the // given captured allocation. This uses a reverse post-order iteration // over affected basic blocks. All non-escaping instructions are handled @@ -195,11 +189,12 @@ HLoadNamedField* load = HLoadNamedField::cast(instr); int index = load->access().offset() / kPointerSize; if (load->object() != allocate) continue; - ASSERT(load->access().IsInobject()); - HValue* replacement = state->OperandAt(index); + DCHECK(load->access().IsInobject()); + HValue* replacement = + NewLoadReplacement(load, state->OperandAt(index)); load->DeleteAndReplaceWith(replacement); if (FLAG_trace_escape_analysis) { - PrintF("Replacing load #%d with #%d (%s)\n", instr->id(), + PrintF("Replacing load #%d with #%d (%s)\n", load->id(), replacement->id(), replacement->Mnemonic()); } break; @@ -208,7 +203,7 @@ HStoreNamedField* store = HStoreNamedField::cast(instr); int index = store->access().offset() / kPointerSize; if (store->object() != allocate) continue; - ASSERT(store->access().IsInobject()); + DCHECK(store->access().IsInobject()); state = NewStateCopy(store->previous(), state); state->SetOperandAt(index, store->value()); if (store->has_transition()) { @@ -291,7 +286,7 @@ } // All uses have been handled. - ASSERT(allocate->HasNoUses()); + DCHECK(allocate->HasNoUses()); allocate->DeleteAndReplaceWith(NULL); } @@ -304,14 +299,14 @@ int size_in_bytes = allocate->size()->GetInteger32Constant(); number_of_values_ = size_in_bytes / kPointerSize; number_of_objects_++; - block_states_.Clear(); + block_states_.Rewind(0); // Perform actual analysis step. AnalyzeDataFlow(allocate); cumulative_values_ += number_of_values_; - ASSERT(allocate->HasNoUses()); - ASSERT(!allocate->IsLinked()); + DCHECK(allocate->HasNoUses()); + DCHECK(!allocate->IsLinked()); } } @@ -325,7 +320,7 @@ CollectCapturedValues(); if (captured_.is_empty()) break; PerformScalarReplacement(); - captured_.Clear(); + captured_.Rewind(0); } } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-escape-analysis.h nodejs-0.11.15/deps/v8/src/hydrogen-escape-analysis.h --- nodejs-0.11.13/deps/v8/src/hydrogen-escape-analysis.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-escape-analysis.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_ESCAPE_ANALYSIS_H_ #define V8_HYDROGEN_ESCAPE_ANALYSIS_H_ -#include "allocation.h" -#include "hydrogen.h" +#include "src/allocation.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { @@ -62,6 +39,8 @@ HValue* NewMapCheckAndInsert(HCapturedObject* state, HCheckMaps* mapcheck); + HValue* NewLoadReplacement(HLoadNamedField* load, HValue* load_value); + HCapturedObject* StateAt(HBasicBlock* block) { return block_states_.at(block->block_id()); } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-flow-engine.h nodejs-0.11.15/deps/v8/src/hydrogen-flow-engine.h --- nodejs-0.11.13/deps/v8/src/hydrogen-flow-engine.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-flow-engine.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_FLOW_ENGINE_H_ #define V8_HYDROGEN_FLOW_ENGINE_H_ -#include "hydrogen.h" -#include "hydrogen-instructions.h" -#include "zone.h" +#include "src/hydrogen.h" +#include "src/hydrogen-instructions.h" +#include "src/zone.h" namespace v8 { namespace internal { @@ -125,7 +102,7 @@ State* state = State::Finish(StateAt(block), block, zone_); if (block->IsReachable()) { - ASSERT(state != NULL); + DCHECK(state != NULL); if (block->IsLoopHeader()) { // Apply loop effects before analyzing loop body. ComputeLoopEffects(block)->Apply(state); @@ -162,7 +139,7 @@ // Computes and caches the loop effects for the loop which has the given // block as its loop header. Effects* ComputeLoopEffects(HBasicBlock* block) { - ASSERT(block->IsLoopHeader()); + DCHECK(block->IsLoopHeader()); Effects* effects = loop_effects_[block->block_id()]; if (effects != NULL) return effects; // Already analyzed this loop. @@ -177,7 +154,7 @@ HBasicBlock* member = graph_->blocks()->at(i); if (i != block->block_id() && member->IsLoopHeader()) { // Recursively compute and cache the effects of the nested loop. - ASSERT(member->loop_information()->parent_loop() == loop); + DCHECK(member->loop_information()->parent_loop() == loop); Effects* nested = ComputeLoopEffects(member); effects->Union(nested, zone_); // Skip the nested loop's blocks. @@ -185,7 +162,7 @@ } else { // Process all the effects of the block. if (member->IsUnreachable()) continue; - ASSERT(member->current_loop() == loop); + DCHECK(member->current_loop() == loop); for (HInstructionIterator it(member); !it.Done(); it.Advance()) { effects->Process(it.Current(), zone_); } @@ -218,7 +195,7 @@ } inline void CheckPredecessorCount(HBasicBlock* block) { - ASSERT(block->predecessors()->length() == pred_counts_[block->block_id()]); + DCHECK(block->predecessors()->length() == pred_counts_[block->block_id()]); } inline void IncrementPredecessorCount(HBasicBlock* block) { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-gvn.cc nodejs-0.11.15/deps/v8/src/hydrogen-gvn.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-gvn.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-gvn.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,33 +1,10 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "hydrogen.h" -#include "hydrogen-gvn.h" -#include "v8.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/hydrogen.h" +#include "src/hydrogen-gvn.h" +#include "src/v8.h" namespace v8 { namespace internal { @@ -106,8 +83,8 @@ bool IsEmpty() const { return count_ == 0; } inline HInstruction* operator[](int i) const { - ASSERT(0 <= i); - ASSERT(i < kNumberOfTrackedSideEffects); + DCHECK(0 <= i); + DCHECK(i < kNumberOfTrackedSideEffects); return data_[i]; } inline HInstruction* at(int i) const { return operator[](i); } @@ -121,7 +98,7 @@ void TraceGVN(const char* msg, ...) { va_list arguments; va_start(arguments, msg); - OS::VPrint(msg, arguments); + base::OS::VPrint(msg, arguments); va_end(arguments); } @@ -163,10 +140,10 @@ lists_(zone->NewArray<HInstructionMapListElement>(other->lists_size_)), free_list_head_(other->free_list_head_), side_effects_tracker_(other->side_effects_tracker_) { - OS::MemCopy( - array_, other->array_, array_size_ * sizeof(HInstructionMapListElement)); - OS::MemCopy( - lists_, other->lists_, lists_size_ * sizeof(HInstructionMapListElement)); + MemCopy(array_, other->array_, + array_size_ * sizeof(HInstructionMapListElement)); + MemCopy(lists_, other->lists_, + lists_size_ * sizeof(HInstructionMapListElement)); } @@ -235,7 +212,7 @@ void HInstructionMap::Resize(int new_size, Zone* zone) { - ASSERT(new_size > count_); + DCHECK(new_size > count_); // Hashing the values into the new array has no more collisions than in the // old hash map, so we can use the existing lists_ array, if we are careful. @@ -275,12 +252,12 @@ } } USE(old_count); - ASSERT(count_ == old_count); + DCHECK(count_ == old_count); } void HInstructionMap::ResizeLists(int new_size, Zone* zone) { - ASSERT(new_size > lists_size_); + DCHECK(new_size > lists_size_); HInstructionMapListElement* new_lists = zone->NewArray<HInstructionMapListElement>(new_size); @@ -293,8 +270,7 @@ lists_ = new_lists; if (old_lists != NULL) { - OS::MemCopy( - lists_, old_lists, old_size * sizeof(HInstructionMapListElement)); + MemCopy(lists_, old_lists, old_size * sizeof(HInstructionMapListElement)); } for (int i = old_size; i < lists_size_; ++i) { lists_[i].next = free_list_head_; @@ -304,10 +280,10 @@ void HInstructionMap::Insert(HInstruction* instr, Zone* zone) { - ASSERT(instr != NULL); + DCHECK(instr != NULL); // Resizing when half of the hashtable is filled up. if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone); - ASSERT(count_ < array_size_); + DCHECK(count_ < array_size_); count_++; uint32_t pos = Bound(static_cast<uint32_t>(instr->Hashcode())); if (array_[pos].instr == NULL) { @@ -318,11 +294,11 @@ ResizeLists(lists_size_ << 1, zone); } int new_element_pos = free_list_head_; - ASSERT(new_element_pos != kNil); + DCHECK(new_element_pos != kNil); free_list_head_ = lists_[free_list_head_].next; lists_[new_element_pos].instr = instr; lists_[new_element_pos].next = array_[pos].next; - ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL); + DCHECK(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL); array_[pos].next = new_element_pos; } } @@ -338,9 +314,9 @@ } -HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) { +HSideEffectMap& HSideEffectMap::operator=(const HSideEffectMap& other) { if (this != &other) { - OS::MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize); + MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize); } return *this; } @@ -424,20 +400,20 @@ } -void SideEffectsTracker::PrintSideEffectsTo(StringStream* stream, - SideEffects side_effects) const { +OStream& operator<<(OStream& os, const TrackedEffects& te) { + SideEffectsTracker* t = te.tracker; const char* separator = ""; - stream->Add("["); + os << "["; for (int bit = 0; bit < kNumberOfFlags; ++bit) { GVNFlag flag = GVNFlagFromInt(bit); - if (side_effects.ContainsFlag(flag)) { - stream->Add(separator); + if (te.effects.ContainsFlag(flag)) { + os << separator; separator = ", "; switch (flag) { -#define DECLARE_FLAG(Type) \ - case k##Type: \ - stream->Add(#Type); \ - break; +#define DECLARE_FLAG(Type) \ + case k##Type: \ + os << #Type; \ + break; GVN_TRACKED_FLAG_LIST(DECLARE_FLAG) GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG) #undef DECLARE_FLAG @@ -446,21 +422,20 @@ } } } - for (int index = 0; index < num_global_vars_; ++index) { - if (side_effects.ContainsSpecial(GlobalVar(index))) { - stream->Add(separator); + for (int index = 0; index < t->num_global_vars_; ++index) { + if (te.effects.ContainsSpecial(t->GlobalVar(index))) { + os << separator << "[" << *t->global_vars_[index].handle() << "]"; separator = ", "; - stream->Add("[%p]", *global_vars_[index].handle()); } } - for (int index = 0; index < num_inobject_fields_; ++index) { - if (side_effects.ContainsSpecial(InobjectField(index))) { - stream->Add(separator); + for (int index = 0; index < t->num_inobject_fields_; ++index) { + if (te.effects.ContainsSpecial(t->InobjectField(index))) { + os << separator << t->inobject_fields_[index]; separator = ", "; - inobject_fields_[index].PrintTo(stream); } } - stream->Add("]"); + os << "]"; + return os; } @@ -473,11 +448,9 @@ } if (num_global_vars_ < kNumberOfGlobalVars) { if (FLAG_trace_gvn) { - HeapStringAllocator allocator; - StringStream stream(&allocator); - stream.Add("Tracking global var [%p] (mapped to index %d)\n", - *cell.handle(), num_global_vars_); - stream.OutputToStdOut(); + OFStream os(stdout); + os << "Tracking global var [" << *cell.handle() << "] " + << "(mapped to index " << num_global_vars_ << ")" << endl; } *index = num_global_vars_; global_vars_[num_global_vars_++] = cell; @@ -497,12 +470,9 @@ } if (num_inobject_fields_ < kNumberOfInobjectFields) { if (FLAG_trace_gvn) { - HeapStringAllocator allocator; - StringStream stream(&allocator); - stream.Add("Tracking inobject field access "); - access.PrintTo(&stream); - stream.Add(" (mapped to index %d)\n", num_inobject_fields_); - stream.OutputToStdOut(); + OFStream os(stdout); + os << "Tracking inobject field access " << access << " (mapped to index " + << num_inobject_fields_ << ")" << endl; } *index = num_inobject_fields_; inobject_fields_[num_inobject_fields_++] = access; @@ -518,7 +488,7 @@ block_side_effects_(graph->blocks()->length(), zone()), loop_side_effects_(graph->blocks()->length(), zone()), visited_on_paths_(graph->blocks()->length(), zone()) { - ASSERT(!AllowHandleAllocation::IsAllowed()); + DCHECK(!AllowHandleAllocation::IsAllowed()); block_side_effects_.AddBlock( SideEffects(), graph->blocks()->length(), zone()); loop_side_effects_.AddBlock( @@ -527,7 +497,7 @@ void HGlobalValueNumberingPhase::Run() { - ASSERT(!removed_side_effects_); + DCHECK(!removed_side_effects_); for (int i = FLAG_gvn_iterations; i > 0; --i) { // Compute the side effects. ComputeBlockSideEffects(); @@ -543,8 +513,8 @@ removed_side_effects_ = false; // Clear all side effects. - ASSERT_EQ(block_side_effects_.length(), graph()->blocks()->length()); - ASSERT_EQ(loop_side_effects_.length(), graph()->blocks()->length()); + DCHECK_EQ(block_side_effects_.length(), graph()->blocks()->length()); + DCHECK_EQ(loop_side_effects_.length(), graph()->blocks()->length()); for (int i = 0; i < graph()->blocks()->length(); ++i) { block_side_effects_[i].RemoveAll(); loop_side_effects_[i].RemoveAll(); @@ -595,13 +565,9 @@ if (block->IsLoopHeader()) { SideEffects side_effects = loop_side_effects_[block->block_id()]; if (FLAG_trace_gvn) { - HeapStringAllocator allocator; - StringStream stream(&allocator); - stream.Add("Try loop invariant motion for block B%d changes ", - block->block_id()); - side_effects_tracker_.PrintSideEffectsTo(&stream, side_effects); - stream.Add("\n"); - stream.OutputToStdOut(); + OFStream os(stdout); + os << "Try loop invariant motion for " << *block << " changes " + << Print(side_effects) << endl; } HBasicBlock* last = block->loop_information()->GetLastBackEdge(); for (int j = block->block_id(); j <= last->block_id(); ++j) { @@ -618,13 +584,9 @@ SideEffects loop_kills) { HBasicBlock* pre_header = loop_header->predecessors()->at(0); if (FLAG_trace_gvn) { - HeapStringAllocator allocator; - StringStream stream(&allocator); - stream.Add("Loop invariant code motion for B%d depends on ", - block->block_id()); - side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills); - stream.Add("\n"); - stream.OutputToStdOut(); + OFStream os(stdout); + os << "Loop invariant code motion for " << *block << " depends on " + << Print(loop_kills) << endl; } HInstruction* instr = block->first(); while (instr != NULL) { @@ -633,17 +595,11 @@ SideEffects changes = side_effects_tracker_.ComputeChanges(instr); SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr); if (FLAG_trace_gvn) { - HeapStringAllocator allocator; - StringStream stream(&allocator); - stream.Add("Checking instruction i%d (%s) changes ", - instr->id(), instr->Mnemonic()); - side_effects_tracker_.PrintSideEffectsTo(&stream, changes); - stream.Add(", depends on "); - side_effects_tracker_.PrintSideEffectsTo(&stream, depends_on); - stream.Add(". Loop changes "); - side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills); - stream.Add("\n"); - stream.OutputToStdOut(); + OFStream os(stdout); + os << "Checking instruction i" << instr->id() << " (" + << instr->Mnemonic() << ") changes " << Print(changes) + << ", depends on " << Print(depends_on) << ". Loop changes " + << Print(loop_kills) << endl; } bool can_hoist = !depends_on.ContainsAnyOf(loop_kills); if (can_hoist && !graph()->use_optimistic_licm()) { @@ -878,19 +834,17 @@ map->Kill(changes); dominators->Store(changes, instr); if (FLAG_trace_gvn) { - HeapStringAllocator allocator; - StringStream stream(&allocator); - stream.Add("Instruction i%d changes ", instr->id()); - side_effects_tracker_.PrintSideEffectsTo(&stream, changes); - stream.Add("\n"); - stream.OutputToStdOut(); + OFStream os(stdout); + os << "Instruction i" << instr->id() << " changes " << Print(changes) + << endl; } } - if (instr->CheckFlag(HValue::kUseGVN)) { - ASSERT(!instr->HasObservableSideEffects()); + if (instr->CheckFlag(HValue::kUseGVN) && + !instr->CheckFlag(HValue::kCantBeReplaced)) { + DCHECK(!instr->HasObservableSideEffects()); HInstruction* other = map->Lookup(instr); if (other != NULL) { - ASSERT(instr->Equals(other) && other->Equals(instr)); + DCHECK(instr->Equals(other) && other->Equals(instr)); TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n", instr->id(), instr->Mnemonic(), diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-gvn.h nodejs-0.11.15/deps/v8/src/hydrogen-gvn.h --- nodejs-0.11.13/deps/v8/src/hydrogen-gvn.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-gvn.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,20 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_GVN_H_ #define V8_HYDROGEN_GVN_H_ -#include "hydrogen.h" -#include "hydrogen-instructions.h" -#include "compiler.h" -#include "zone.h" +#include "src/compiler.h" +#include "src/hydrogen.h" +#include "src/hydrogen-instructions.h" +#include "src/zone.h" namespace v8 { namespace internal { +class OStream; + // This class extends GVNFlagSet with additional "special" dynamic side effects, // which can be used to represent side effects that cannot be expressed using // the GVNFlags of an HInstruction. These special side effects are tracked by a @@ -45,7 +24,7 @@ static const int kNumberOfSpecials = 64 - kNumberOfFlags; SideEffects() : bits_(0) { - ASSERT(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT); + DCHECK(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT); } explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {} bool IsEmpty() const { return bits_ == 0; } @@ -61,15 +40,14 @@ void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); } void RemoveAll() { bits_ = 0; } uint64_t ToIntegral() const { return bits_; } - void PrintTo(StringStream* stream) const; private: uint64_t MaskFlag(GVNFlag flag) const { return static_cast<uint64_t>(1) << static_cast<unsigned>(flag); } uint64_t MaskSpecial(int special) const { - ASSERT(special >= 0); - ASSERT(special < kNumberOfSpecials); + DCHECK(special >= 0); + DCHECK(special < kNumberOfSpecials); return static_cast<uint64_t>(1) << static_cast<unsigned>( special + kNumberOfFlags); } @@ -78,6 +56,8 @@ }; +struct TrackedEffects; + // Tracks global variable and inobject field loads/stores in a fine grained // fashion, and represents them using the "special" dynamic side effects of the // SideEffects class (see above). This way unrelated global variable/inobject @@ -88,20 +68,20 @@ SideEffectsTracker() : num_global_vars_(0), num_inobject_fields_(0) {} SideEffects ComputeChanges(HInstruction* instr); SideEffects ComputeDependsOn(HInstruction* instr); - void PrintSideEffectsTo(StringStream* stream, SideEffects side_effects) const; private: + friend OStream& operator<<(OStream& os, const TrackedEffects& f); bool ComputeGlobalVar(Unique<Cell> cell, int* index); bool ComputeInobjectField(HObjectAccess access, int* index); static int GlobalVar(int index) { - ASSERT(index >= 0); - ASSERT(index < kNumberOfGlobalVars); + DCHECK(index >= 0); + DCHECK(index < kNumberOfGlobalVars); return index; } static int InobjectField(int index) { - ASSERT(index >= 0); - ASSERT(index < kNumberOfInobjectFields); + DCHECK(index >= 0); + DCHECK(index < kNumberOfInobjectFields); return index + kNumberOfGlobalVars; } @@ -118,6 +98,18 @@ }; +// Helper class for printing, because the effects don't know their tracker. +struct TrackedEffects { + TrackedEffects(SideEffectsTracker* t, SideEffects e) + : tracker(t), effects(e) {} + SideEffectsTracker* tracker; + SideEffects effects; +}; + + +OStream& operator<<(OStream& os, const TrackedEffects& f); + + // Perform common subexpression elimination and loop-invariant code motion. class HGlobalValueNumberingPhase V8_FINAL : public HPhase { public: @@ -137,6 +129,9 @@ SideEffects loop_kills); bool AllowCodeMotion(); bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header); + TrackedEffects Print(SideEffects side_effects) { + return TrackedEffects(&side_effects_tracker_, side_effects); + } SideEffectsTracker side_effects_tracker_; bool removed_side_effects_; diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen.h nodejs-0.11.15/deps/v8/src/hydrogen.h --- nodejs-0.11.13/deps/v8/src/hydrogen.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_H_ #define V8_HYDROGEN_H_ -#include "v8.h" +#include "src/v8.h" -#include "accessors.h" -#include "allocation.h" -#include "ast.h" -#include "compiler.h" -#include "hydrogen-instructions.h" -#include "zone.h" -#include "scopes.h" +#include "src/accessors.h" +#include "src/allocation.h" +#include "src/ast.h" +#include "src/compiler.h" +#include "src/hydrogen-instructions.h" +#include "src/scopes.h" +#include "src/zone.h" namespace v8 { namespace internal { @@ -117,8 +94,8 @@ void SetInitialEnvironment(HEnvironment* env); void ClearEnvironment() { - ASSERT(IsFinished()); - ASSERT(end()->SuccessorCount() == 0); + DCHECK(IsFinished()); + DCHECK(end()->SuccessorCount() == 0); last_environment_ = NULL; } bool HasEnvironment() const { return last_environment_ != NULL; } @@ -126,7 +103,7 @@ HBasicBlock* parent_loop_header() const { return parent_loop_header_; } void set_parent_loop_header(HBasicBlock* block) { - ASSERT(parent_loop_header_ == NULL); + DCHECK(parent_loop_header_ == NULL); parent_loop_header_ = block; } @@ -174,6 +151,9 @@ dominates_loop_successors_ = true; } + bool IsOrdered() const { return is_ordered_; } + void MarkAsOrdered() { is_ordered_ = true; } + void MarkSuccEdgeUnreachable(int succ); inline Zone* zone() const; @@ -230,9 +210,13 @@ bool is_reachable_ : 1; bool dominates_loop_successors_ : 1; bool is_osr_entry_ : 1; + bool is_ordered_ : 1; }; +OStream& operator<<(OStream& os, const HBasicBlock& b); + + class HPredecessorIterator V8_FINAL BASE_EMBEDDED { public: explicit HPredecessorIterator(HBasicBlock* block) @@ -377,7 +361,7 @@ int GetMaximumValueID() const { return values_.length(); } int GetNextBlockID() { return next_block_id_++; } int GetNextValueID(HValue* value) { - ASSERT(!disallow_adding_new_values_); + DCHECK(!disallow_adding_new_values_); values_.Add(value, zone()); return values_.length() - 1; } @@ -438,9 +422,11 @@ void MarkDependsOnEmptyArrayProtoElements() { // Add map dependency if not already added. if (depends_on_empty_array_proto_elements_) return; - isolate()->initial_object_prototype()->map()->AddDependentCompilationInfo( + Map::AddDependentCompilationInfo( + handle(isolate()->initial_object_prototype()->map()), DependentCode::kElementsCantBeAddedGroup, info()); - isolate()->initial_array_prototype()->map()->AddDependentCompilationInfo( + Map::AddDependentCompilationInfo( + handle(isolate()->initial_array_prototype()->map()), DependentCode::kElementsCantBeAddedGroup, info()); depends_on_empty_array_proto_elements_ = true; } @@ -450,17 +436,17 @@ } bool has_uint32_instructions() { - ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty()); + DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty()); return uint32_instructions_ != NULL; } ZoneList<HInstruction*>* uint32_instructions() { - ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty()); + DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty()); return uint32_instructions_; } void RecordUint32Instruction(HInstruction* instr) { - ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty()); + DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty()); if (uint32_instructions_ == NULL) { uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone()); } @@ -620,7 +606,7 @@ HValue* Lookup(int index) const { HValue* result = values_[index]; - ASSERT(result != NULL); + DCHECK(result != NULL); return result; } @@ -630,13 +616,13 @@ } void Push(HValue* value) { - ASSERT(value != NULL); + DCHECK(value != NULL); ++push_count_; values_.Add(value, zone()); } HValue* Pop() { - ASSERT(!ExpressionStackIsEmpty()); + DCHECK(!ExpressionStackIsEmpty()); if (push_count_ > 0) { --push_count_; } else { @@ -653,7 +639,7 @@ HValue* ExpressionStackAt(int index_from_top) const { int index = length() - index_from_top - 1; - ASSERT(HasExpressionAt(index)); + DCHECK(HasExpressionAt(index)); return values_[index]; } @@ -688,7 +674,7 @@ } void SetValueAt(int index, HValue* value) { - ASSERT(index < length()); + DCHECK(index < length()); values_[index] = value; } @@ -696,7 +682,7 @@ // by 1 (receiver is parameter index -1 but environment index 0). // Stack-allocated local indices are shifted by the number of parameters. int IndexFor(Variable* variable) const { - ASSERT(variable->IsStackAllocated()); + DCHECK(variable->IsStackAllocated()); int shift = variable->IsParameter() ? 1 : parameter_count_ + specials_count_; @@ -715,9 +701,6 @@ return i >= parameter_count() && i < parameter_count() + specials_count(); } - void PrintTo(StringStream* stream); - void PrintToStd(); - Zone* zone() const { return zone_; } private: @@ -759,6 +742,9 @@ }; +OStream& operator<<(OStream& os, const HEnvironment& env); + + class HOptimizedGraphBuilder; enum ArgumentsAllowedFlag { @@ -886,7 +872,7 @@ BailoutId ast_id) V8_OVERRIDE; static TestContext* cast(AstContext* context) { - ASSERT(context->IsTest()); + DCHECK(context->IsTest()); return reinterpret_cast<TestContext*>(context); } @@ -988,11 +974,11 @@ HBasicBlock* false_branch) : continuation_captured_(true), true_branch_(true_branch), false_branch_(false_branch) {} - ~HIfContinuation() { ASSERT(!continuation_captured_); } + ~HIfContinuation() { DCHECK(!continuation_captured_); } void Capture(HBasicBlock* true_branch, HBasicBlock* false_branch) { - ASSERT(!continuation_captured_); + DCHECK(!continuation_captured_); true_branch_ = true_branch; false_branch_ = false_branch; continuation_captured_ = true; @@ -1000,7 +986,7 @@ void Continue(HBasicBlock** true_branch, HBasicBlock** false_branch) { - ASSERT(continuation_captured_); + DCHECK(continuation_captured_); *true_branch = true_branch_; *false_branch = false_branch_; continuation_captured_ = false; @@ -1059,10 +1045,14 @@ : info_(info), graph_(NULL), current_block_(NULL), + scope_(info->scope()), position_(HSourcePosition::Unknown()), start_position_(0) {} virtual ~HGraphBuilder() {} + Scope* scope() const { return scope_; } + void set_scope(Scope* scope) { scope_ = scope; } + HBasicBlock* current_block() const { return current_block_; } void set_current_block(HBasicBlock* block) { current_block_ = block; } HEnvironment* environment() const { @@ -1137,7 +1127,7 @@ HInstruction* result = AddInstruction(NewUncasted<I>(p1)); // Specializations must have their parameters properly casted // to avoid landing here. - ASSERT(!result->IsReturn() && !result->IsSimulate() && + DCHECK(!result->IsReturn() && !result->IsSimulate() && !result->IsDeoptimize()); return result; } @@ -1147,7 +1137,7 @@ I* result = AddInstructionTyped(New<I>(p1)); // Specializations must have their parameters properly casted // to avoid landing here. - ASSERT(!result->IsReturn() && !result->IsSimulate() && + DCHECK(!result->IsReturn() && !result->IsSimulate() && !result->IsDeoptimize()); return result; } @@ -1167,7 +1157,7 @@ HInstruction* result = AddInstruction(NewUncasted<I>(p1, p2)); // Specializations must have their parameters properly casted // to avoid landing here. - ASSERT(!result->IsSimulate()); + DCHECK(!result->IsSimulate()); return result; } @@ -1176,7 +1166,7 @@ I* result = AddInstructionTyped(New<I>(p1, p2)); // Specializations must have their parameters properly casted // to avoid landing here. - ASSERT(!result->IsSimulate()); + DCHECK(!result->IsSimulate()); return result; } @@ -1312,14 +1302,28 @@ void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE); + // When initializing arrays, we'll unfold the loop if the number of elements + // is known at compile time and is <= kElementLoopUnrollThreshold. + static const int kElementLoopUnrollThreshold = 8; + protected: virtual bool BuildGraph() = 0; HBasicBlock* CreateBasicBlock(HEnvironment* env); HBasicBlock* CreateLoopHeaderBlock(); + template <class BitFieldClass> + HValue* BuildDecodeField(HValue* encoded_field) { + HValue* mask_value = Add<HConstant>(static_cast<int>(BitFieldClass::kMask)); + HValue* masked_field = + AddUncasted<HBitwise>(Token::BIT_AND, encoded_field, mask_value); + return AddUncasted<HShr>(masked_field, + Add<HConstant>(static_cast<int>(BitFieldClass::kShift))); + } + + HValue* BuildGetElementsKind(HValue* object); + HValue* BuildCheckHeapObject(HValue* object); - HValue* BuildCheckMap(HValue* obj, Handle<Map> map); HValue* BuildCheckString(HValue* string); HValue* BuildWrapReceiver(HValue* object, HValue* function); @@ -1345,8 +1349,32 @@ HValue* BuildNumberToString(HValue* object, Type* type); + void BuildJSObjectCheck(HValue* receiver, + int bit_field_mask); + + // Checks a key value that's being used for a keyed element access context. If + // the key is a index, i.e. a smi or a number in a unique string with a cached + // numeric value, the "true" of the continuation is joined. Otherwise, + // if the key is a name or a unique string, the "false" of the continuation is + // joined. Otherwise, a deoptimization is triggered. In both paths of the + // continuation, the key is pushed on the top of the environment. + void BuildKeyedIndexCheck(HValue* key, + HIfContinuation* join_continuation); + + // Checks the properties of an object if they are in dictionary case, in which + // case "true" of continuation is taken, otherwise the "false" + void BuildTestForDictionaryProperties(HValue* object, + HIfContinuation* continuation); + + void BuildNonGlobalObjectCheck(HValue* receiver); + + HValue* BuildKeyedLookupCacheHash(HValue* object, + HValue* key); + HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver, - HValue* key); + HValue* elements, + HValue* key, + HValue* hash); HValue* BuildRegExpConstructResult(HValue* length, HValue* index, @@ -1406,20 +1434,14 @@ HInstruction* AddLoadStringInstanceType(HValue* string); HInstruction* AddLoadStringLength(HValue* string); - HStoreNamedField* AddStoreMapNoWriteBarrier(HValue* object, HValue* map) { - HStoreNamedField* store_map = Add<HStoreNamedField>( - object, HObjectAccess::ForMap(), map); - store_map->SkipWriteBarrier(); - return store_map; - } - HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map> map); - HStoreNamedField* AddStoreMapConstantNoWriteBarrier(HValue* object, - Handle<Map> map) { - HStoreNamedField* store_map = AddStoreMapConstant(object, map); - store_map->SkipWriteBarrier(); - return store_map; - } - HLoadNamedField* AddLoadElements(HValue* object); + HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map> map) { + return Add<HStoreNamedField>(object, HObjectAccess::ForMap(), + Add<HConstant>(map)); + } + HLoadNamedField* AddLoadMap(HValue* object, + HValue* dependency = NULL); + HLoadNamedField* AddLoadElements(HValue* object, + HValue* dependency = NULL); bool MatchRotateRight(HValue* left, HValue* right, @@ -1435,7 +1457,12 @@ Maybe<int> fixed_right_arg, HAllocationMode allocation_mode); - HLoadNamedField* AddLoadFixedArrayLength(HValue *object); + HLoadNamedField* AddLoadFixedArrayLength(HValue *object, + HValue *dependency = NULL); + + HLoadNamedField* AddLoadArrayLength(HValue *object, + ElementsKind kind, + HValue *dependency = NULL); HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin); @@ -1448,6 +1475,9 @@ class IfBuilder V8_FINAL { public: + // If using this constructor, Initialize() must be called explicitly! + IfBuilder(); + explicit IfBuilder(HGraphBuilder* builder); IfBuilder(HGraphBuilder* builder, HIfContinuation* continuation); @@ -1456,6 +1486,8 @@ if (!finished_) End(); } + void Initialize(HGraphBuilder* builder); + template<class Condition> Condition* If(HValue *p) { Condition* compare = builder()->New<Condition>(p); @@ -1598,9 +1630,14 @@ void Return(HValue* value); private: + void InitializeDontCreateBlocks(HGraphBuilder* builder); + HControlInstruction* AddCompare(HControlInstruction* compare); - HGraphBuilder* builder() const { return builder_; } + HGraphBuilder* builder() const { + DCHECK(builder_ != NULL); // Have you called "Initialize"? + return builder_; + } void AddMergeAtJoinBlock(bool deopt); @@ -1645,9 +1682,11 @@ kPreIncrement, kPostIncrement, kPreDecrement, - kPostDecrement + kPostDecrement, + kWhileTrue }; + explicit LoopBuilder(HGraphBuilder* builder); // while (true) {...} LoopBuilder(HGraphBuilder* builder, HValue* context, Direction direction); @@ -1657,7 +1696,7 @@ HValue* increment_amount); ~LoopBuilder() { - ASSERT(finished_); + DCHECK(finished_); } HValue* BeginBody( @@ -1665,11 +1704,15 @@ HValue* terminating, Token::Value token); + void BeginBody(int drop_count); + void Break(); void EndBody(); private: + void Initialize(HGraphBuilder* builder, HValue* context, + Direction direction, HValue* increment_amount); Zone* zone() { return builder_->zone(); } HGraphBuilder* builder_; @@ -1685,10 +1728,28 @@ bool finished_; }; - HValue* BuildNewElementsCapacity(HValue* old_capacity); + template <class A, class P1> + void DeoptimizeIf(P1 p1, char* const reason) { + IfBuilder builder(this); + builder.If<A>(p1); + builder.ThenDeopt(reason); + } + + template <class A, class P1, class P2> + void DeoptimizeIf(P1 p1, P2 p2, const char* reason) { + IfBuilder builder(this); + builder.If<A>(p1, p2); + builder.ThenDeopt(reason); + } + + template <class A, class P1, class P2, class P3> + void DeoptimizeIf(P1 p1, P2 p2, P3 p3, const char* reason) { + IfBuilder builder(this); + builder.If<A>(p1, p2, p3); + builder.ThenDeopt(reason); + } - void BuildNewSpaceArrayCheck(HValue* length, - ElementsKind kind); + HValue* BuildNewElementsCapacity(HValue* old_capacity); class JSArrayBuilder V8_FINAL { public: @@ -1708,10 +1769,24 @@ }; ElementsKind kind() { return kind_; } + HAllocate* elements_location() { return elements_location_; } - HValue* AllocateEmptyArray(); - HValue* AllocateArray(HValue* capacity, HValue* length_field, - FillMode fill_mode = FILL_WITH_HOLE); + HAllocate* AllocateEmptyArray(); + HAllocate* AllocateArray(HValue* capacity, + HValue* length_field, + FillMode fill_mode = FILL_WITH_HOLE); + // Use these allocators when capacity could be unknown at compile time + // but its limit is known. For constant |capacity| the value of + // |capacity_upper_bound| is ignored and the actual |capacity| + // value is used as an upper bound. + HAllocate* AllocateArray(HValue* capacity, + int capacity_upper_bound, + HValue* length_field, + FillMode fill_mode = FILL_WITH_HOLE); + HAllocate* AllocateArray(HValue* capacity, + HConstant* capacity_upper_bound, + HValue* length_field, + FillMode fill_mode = FILL_WITH_HOLE); HValue* GetElementsLocation() { return elements_location_; } HValue* EmitMapCode(); @@ -1728,25 +1803,23 @@ } HValue* EmitInternalMapCode(); - HValue* EstablishEmptyArrayAllocationSize(); - HValue* EstablishAllocationSize(HValue* length_node); - HValue* AllocateArray(HValue* size_in_bytes, HValue* capacity, - HValue* length_field, - FillMode fill_mode = FILL_WITH_HOLE); HGraphBuilder* builder_; ElementsKind kind_; AllocationSiteMode mode_; HValue* allocation_site_payload_; HValue* constructor_function_; - HInnerAllocatedObject* elements_location_; + HAllocate* elements_location_; }; HValue* BuildAllocateArrayFromLength(JSArrayBuilder* array_builder, HValue* length_argument); + HValue* BuildCalculateElementsSize(ElementsKind kind, + HValue* capacity); + HAllocate* AllocateJSArrayObject(AllocationSiteMode mode); + HConstant* EstablishElementsAllocationSize(ElementsKind kind, int capacity); - HValue* BuildAllocateElements(ElementsKind kind, - HValue* capacity); + HAllocate* BuildAllocateElements(ElementsKind kind, HValue* size_in_bytes); void BuildInitializeElementsHeader(HValue* elements, ElementsKind kind, @@ -1755,16 +1828,17 @@ HValue* BuildAllocateElementsAndInitializeElementsHeader(ElementsKind kind, HValue* capacity); - // array must have been allocated with enough room for - // 1) the JSArray, 2) a AllocationMemento if mode requires it, - // 3) a FixedArray or FixedDoubleArray. - // A pointer to the Fixed(Double)Array is returned. - HInnerAllocatedObject* BuildJSArrayHeader(HValue* array, - HValue* array_map, - AllocationSiteMode mode, - ElementsKind elements_kind, - HValue* allocation_site_payload, - HValue* length_field); + // |array| must have been allocated with enough room for + // 1) the JSArray and 2) an AllocationMemento if mode requires it. + // If the |elements| value provided is NULL then the array elements storage + // is initialized with empty array. + void BuildJSArrayHeader(HValue* array, + HValue* array_map, + HValue* elements, + AllocationSiteMode mode, + ElementsKind elements_kind, + HValue* allocation_site_payload, + HValue* length_field); HValue* BuildGrowElementsCapacity(HValue* object, HValue* elements, @@ -1773,6 +1847,12 @@ HValue* length, HValue* new_capacity); + void BuildFillElementsWithValue(HValue* elements, + ElementsKind elements_kind, + HValue* from, + HValue* to, + HValue* value); + void BuildFillElementsWithHole(HValue* elements, ElementsKind elements_kind, HValue* from, @@ -1785,11 +1865,19 @@ HValue* length, HValue* capacity); - HValue* BuildCloneShallowArray(HValue* boilerplate, - HValue* allocation_site, - AllocationSiteMode mode, - ElementsKind kind, - int length); + HValue* BuildCloneShallowArrayCow(HValue* boilerplate, + HValue* allocation_site, + AllocationSiteMode mode, + ElementsKind kind); + + HValue* BuildCloneShallowArrayEmpty(HValue* boilerplate, + HValue* allocation_site, + AllocationSiteMode mode); + + HValue* BuildCloneShallowArrayNonEmpty(HValue* boilerplate, + HValue* allocation_site, + AllocationSiteMode mode, + ElementsKind kind); HValue* BuildElementIndexHash(HValue* index); @@ -1802,8 +1890,7 @@ HValue* previous_object_size, HValue* payload); - HInstruction* BuildConstantMapCheck(Handle<JSObject> constant, - CompilationInfo* info); + HInstruction* BuildConstantMapCheck(Handle<JSObject> constant); HInstruction* BuildCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder); @@ -1813,7 +1900,7 @@ protected: void SetSourcePosition(int position) { - ASSERT(position != RelocInfo::kNoPosition); + DCHECK(position != RelocInfo::kNoPosition); position_.set_position(position - start_position_); } @@ -1847,13 +1934,6 @@ private: HGraphBuilder(); - HValue* BuildUncheckedDictionaryElementLoadHelper( - HValue* elements, - HValue* key, - HValue* hash, - HValue* mask, - int current_probe); - template <class I> I* AddInstructionTyped(I* instr) { return I::cast(AddInstruction(instr)); @@ -1862,6 +1942,7 @@ CompilationInfo* info_; HGraph* graph_; HBasicBlock* current_block_; + Scope* scope_; HSourcePosition position_; int start_position_; }; @@ -1989,10 +2070,12 @@ class BreakAndContinueInfo V8_FINAL BASE_EMBEDDED { public: explicit BreakAndContinueInfo(BreakableStatement* target, + Scope* scope, int drop_extra = 0) : target_(target), break_block_(NULL), continue_block_(NULL), + scope_(scope), drop_extra_(drop_extra) { } @@ -2001,12 +2084,14 @@ void set_break_block(HBasicBlock* block) { break_block_ = block; } HBasicBlock* continue_block() { return continue_block_; } void set_continue_block(HBasicBlock* block) { continue_block_ = block; } + Scope* scope() { return scope_; } int drop_extra() { return drop_extra_; } private: BreakableStatement* target_; HBasicBlock* break_block_; HBasicBlock* continue_block_; + Scope* scope_; int drop_extra_; }; @@ -2028,7 +2113,8 @@ // Search the break stack for a break or continue target. enum BreakType { BREAK, CONTINUE }; - HBasicBlock* Get(BreakableStatement* stmt, BreakType type, int* drop_extra); + HBasicBlock* Get(BreakableStatement* stmt, BreakType type, + Scope** scope, int* drop_extra); private: BreakAndContinueInfo* info_; @@ -2138,8 +2224,7 @@ bool PreProcessOsrEntry(IterationStatement* statement); void VisitLoopBody(IterationStatement* stmt, - HBasicBlock* loop_entry, - BreakAndContinueInfo* break_info); + HBasicBlock* loop_entry); // Create a back edge in the flow graph. body_exit is the predecessor // block and loop_entry is the successor block. loop_successor is the @@ -2252,6 +2337,17 @@ // Try to optimize fun.apply(receiver, arguments) pattern. bool TryCallApply(Call* expr); + bool TryHandleArrayCall(Call* expr, HValue* function); + bool TryHandleArrayCallNew(CallNew* expr, HValue* function); + void BuildArrayCall(Expression* expr, int arguments_count, HValue* function, + Handle<AllocationSite> cell); + + enum ArrayIndexOfMode { kFirstIndexOf, kLastIndexOf }; + HValue* BuildArrayIndexOf(HValue* receiver, + HValue* search_element, + ElementsKind kind, + ArrayIndexOfMode mode); + HValue* ImplicitReceiverFor(HValue* function, Handle<JSFunction> target); @@ -2319,6 +2415,7 @@ void HandlePropertyAssignment(Assignment* expr); void HandleCompoundAssignment(Assignment* expr); void HandlePolymorphicNamedFieldAccess(PropertyAccessType access_type, + Expression* expr, BailoutId ast_id, BailoutId return_id, HValue* object, @@ -2335,8 +2432,13 @@ ElementsKind fixed_elements_kind, HValue* byte_length, HValue* length); - bool IsCallNewArrayInlineable(CallNew* expr); - void BuildInlinedCallNewArray(CallNew* expr); + Handle<JSFunction> array_function() { + return handle(isolate()->native_context()->array_function()); + } + + bool IsCallArrayInlineable(int argument_count, Handle<AllocationSite> site); + void BuildInlinedCallArray(Expression* expression, int argument_count, + Handle<AllocationSite> site); class PropertyAccessInfo { public: @@ -2349,6 +2451,7 @@ access_type_(access_type), type_(type), name_(name), + field_type_(HType::Tagged()), access_(HObjectAccess::ForMap()) { } // Checkes whether this PropertyAccessInfo can be handled as a monomorphic @@ -2364,23 +2467,7 @@ // PropertyAccessInfo is built for types->first(). bool CanAccessAsMonomorphic(SmallMapList* types); - Handle<Map> map() { - if (type_->Is(Type::Number())) { - Context* context = current_info()->closure()->context(); - context = context->native_context(); - return handle(context->number_function()->initial_map()); - } else if (type_->Is(Type::Boolean())) { - Context* context = current_info()->closure()->context(); - context = context->native_context(); - return handle(context->boolean_function()->initial_map()); - } else if (type_->Is(Type::String())) { - Context* context = current_info()->closure()->context(); - context = context->native_context(); - return handle(context->string_function()->initial_map()); - } else { - return type_->AsClass(); - } - } + Handle<Map> map(); Type* type() const { return type_; } Handle<String> name() const { return name_; } @@ -2393,10 +2480,10 @@ int offset; if (Accessors::IsJSObjectFieldAccessor<Type>(type_, name_, &offset)) { if (type_->Is(Type::String())) { - ASSERT(name_->Equals(isolate()->heap()->length_string())); + DCHECK(String::Equals(isolate()->factory()->length_string(), name_)); *access = HObjectAccess::ForStringLength(); } else if (type_->Is(Type::Array())) { - ASSERT(name_->Equals(isolate()->heap()->length_string())); + DCHECK(String::Equals(isolate()->factory()->length_string(), name_)); *access = HObjectAccess::ForArrayLength(map()->elements_kind()); } else { *access = HObjectAccess::ForMapAndOffset(map(), offset); @@ -2414,14 +2501,19 @@ Handle<JSFunction> accessor() { return accessor_; } Handle<Object> constant() { return constant_; } Handle<Map> transition() { return handle(lookup_.GetTransitionTarget()); } + SmallMapList* field_maps() { return &field_maps_; } + HType field_type() const { return field_type_; } HObjectAccess access() { return access_; } private: Type* ToType(Handle<Map> map) { return builder_->ToType(map); } + Zone* zone() { return builder_->zone(); } Isolate* isolate() { return lookup_.isolate(); } + CompilationInfo* top_info() { return builder_->top_info(); } CompilationInfo* current_info() { return builder_->current_info(); } bool LoadResult(Handle<Map> map); + void LoadFieldMaps(Handle<Map> map); bool LookupDescriptor(); bool LookupInPrototypes(); bool IsCompatible(PropertyAccessInfo* other); @@ -2440,6 +2532,8 @@ Handle<JSFunction> accessor_; Handle<JSObject> api_holder_; Handle<Object> constant_; + SmallMapList field_maps_; + HType field_type_; HObjectAccess access_; }; @@ -2499,6 +2593,7 @@ HInstruction* BuildIncrement(bool returns_original_input, CountOperation* expr); HInstruction* BuildKeyedGeneric(PropertyAccessType access_type, + Expression* expr, HValue* object, HValue* key, HValue* value); @@ -2518,7 +2613,8 @@ PropertyAccessType access_type, KeyedAccessStoreMode store_mode); - HValue* HandlePolymorphicElementAccess(HValue* object, + HValue* HandlePolymorphicElementAccess(Expression* expr, + HValue* object, HValue* key, HValue* val, SmallMapList* maps, @@ -2534,6 +2630,7 @@ bool* has_side_effects); HInstruction* BuildNamedGeneric(PropertyAccessType access, + Expression* expr, HValue* object, Handle<String> name, HValue* value, @@ -2656,30 +2753,38 @@ source_size_(0) { } void Initialize(CompilationInfo* info); - void Print(); - void SaveTiming(const char* name, TimeDelta time, unsigned size); + void Print(const char* stats_name); + void SaveTiming(const char* name, base::TimeDelta time, unsigned size); - void IncrementFullCodeGen(TimeDelta full_code_gen) { + void IncrementFullCodeGen(base::TimeDelta full_code_gen) { full_code_gen_ += full_code_gen; } - void IncrementSubtotals(TimeDelta create_graph, - TimeDelta optimize_graph, - TimeDelta generate_code) { - create_graph_ += create_graph; - optimize_graph_ += optimize_graph; - generate_code_ += generate_code; + void IncrementCreateGraph(base::TimeDelta delta) { create_graph_ += delta; } + + void IncrementOptimizeGraph(base::TimeDelta delta) { + optimize_graph_ += delta; + } + + void IncrementGenerateCode(base::TimeDelta delta) { generate_code_ += delta; } + + void IncrementSubtotals(base::TimeDelta create_graph, + base::TimeDelta optimize_graph, + base::TimeDelta generate_code) { + IncrementCreateGraph(create_graph); + IncrementOptimizeGraph(optimize_graph); + IncrementGenerateCode(generate_code); } private: - List<TimeDelta> times_; + List<base::TimeDelta> times_; List<const char*> names_; List<unsigned> sizes_; - TimeDelta create_graph_; - TimeDelta optimize_graph_; - TimeDelta generate_code_; + base::TimeDelta create_graph_; + base::TimeDelta optimize_graph_; + base::TimeDelta generate_code_; unsigned total_size_; - TimeDelta full_code_gen_; + base::TimeDelta full_code_gen_; double source_size_; }; @@ -2706,12 +2811,12 @@ explicit HTracer(int isolate_id) : trace_(&string_allocator_), indent_(0) { if (FLAG_trace_hydrogen_file == NULL) { - OS::SNPrintF(filename_, - "hydrogen-%d-%d.cfg", - OS::GetCurrentProcessId(), - isolate_id); + SNPrintF(filename_, + "hydrogen-%d-%d.cfg", + base::OS::GetCurrentProcessId(), + isolate_id); } else { - OS::StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length()); + StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length()); } WriteChars(filename_.start(), "", 0, false); } @@ -2736,7 +2841,7 @@ tracer_->indent_--; tracer_->PrintIndent(); tracer_->trace_.Add("end_%s\n", name_); - ASSERT(tracer_->indent_ >= 0); + DCHECK(tracer_->indent_ >= 0); tracer_->FlushToFile(); } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-infer-representation.cc nodejs-0.11.15/deps/v8/src/hydrogen-infer-representation.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-infer-representation.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-infer-representation.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-infer-representation.h" +#include "src/hydrogen-infer-representation.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-infer-representation.h nodejs-0.11.15/deps/v8/src/hydrogen-infer-representation.h --- nodejs-0.11.13/deps/v8/src/hydrogen-infer-representation.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-infer-representation.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_INFER_REPRESENTATION_H_ #define V8_HYDROGEN_INFER_REPRESENTATION_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-infer-types.cc nodejs-0.11.15/deps/v8/src/hydrogen-infer-types.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-infer-types.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-infer-types.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-infer-types.h" +#include "src/hydrogen-infer-types.h" namespace v8 { namespace internal { @@ -69,7 +46,7 @@ } } } - ASSERT(in_worklist_.IsEmpty()); + DCHECK(in_worklist_.IsEmpty()); } } } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-infer-types.h nodejs-0.11.15/deps/v8/src/hydrogen-infer-types.h --- nodejs-0.11.13/deps/v8/src/hydrogen-infer-types.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-infer-types.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_INFER_TYPES_H_ #define V8_HYDROGEN_INFER_TYPES_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-instructions.cc nodejs-0.11.15/deps/v8/src/hydrogen-instructions.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-instructions.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-instructions.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,51 +1,34 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "double.h" -#include "factory.h" -#include "hydrogen-infer-representation.h" -#include "property-details-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/double.h" +#include "src/factory.h" +#include "src/hydrogen-infer-representation.h" +#include "src/property-details-inl.h" #if V8_TARGET_ARCH_IA32 -#include "ia32/lithium-ia32.h" +#include "src/ia32/lithium-ia32.h" // NOLINT #elif V8_TARGET_ARCH_X64 -#include "x64/lithium-x64.h" +#include "src/x64/lithium-x64.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 -#include "arm64/lithium-arm64.h" +#include "src/arm64/lithium-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM -#include "arm/lithium-arm.h" +#include "src/arm/lithium-arm.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/lithium-mips.h" +#include "src/mips/lithium-mips.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/lithium-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/lithium-x87.h" // NOLINT #else #error Unsupported target architecture. #endif +#include "src/base/safe_math.h" + namespace v8 { namespace internal { @@ -58,7 +41,7 @@ Isolate* HValue::isolate() const { - ASSERT(block() != NULL); + DCHECK(block() != NULL); return block()->isolate(); } @@ -74,7 +57,7 @@ void HValue::InferRepresentation(HInferRepresentationPhase* h_infer) { - ASSERT(CheckFlag(kFlexibleRepresentation)); + DCHECK(CheckFlag(kFlexibleRepresentation)); Representation new_rep = RepresentationFromInputs(); UpdateRepresentation(new_rep, h_infer, "inputs"); new_rep = RepresentationFromUses(); @@ -309,7 +292,7 @@ #ifdef DEBUG void Range::Verify() const { - ASSERT(lower_ <= upper_); + DCHECK(lower_ <= upper_); } #endif @@ -329,46 +312,6 @@ } -const char* HType::ToString() { - // Note: The c1visualizer syntax for locals allows only a sequence of the - // following characters: A-Za-z0-9_-|: - switch (type_) { - case kNone: return "none"; - case kTagged: return "tagged"; - case kTaggedPrimitive: return "primitive"; - case kTaggedNumber: return "number"; - case kSmi: return "smi"; - case kHeapNumber: return "heap-number"; - case kString: return "string"; - case kBoolean: return "boolean"; - case kNonPrimitive: return "non-primitive"; - case kJSArray: return "array"; - case kJSObject: return "object"; - } - UNREACHABLE(); - return "unreachable"; -} - - -HType HType::TypeFromValue(Handle<Object> value) { - HType result = HType::Tagged(); - if (value->IsSmi()) { - result = HType::Smi(); - } else if (value->IsHeapNumber()) { - result = HType::HeapNumber(); - } else if (value->IsString()) { - result = HType::String(); - } else if (value->IsBoolean()) { - result = HType::Boolean(); - } else if (value->IsJSObject()) { - result = HType::JSObject(); - } else if (value->IsJSArray()) { - result = HType::JSArray(); - } - return result; -} - - bool HValue::IsDefinedAfter(HBasicBlock* other) const { return block()->block_id() > other->block_id(); } @@ -478,7 +421,7 @@ if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false; } bool result = DataEquals(other); - ASSERT(!result || Hashcode() == other->Hashcode()); + DCHECK(!result || Hashcode() == other->Hashcode()); return result; } @@ -509,6 +452,8 @@ !(block()->IsReachable() || IsBlockEntry() || IsControlInstruction() || + IsArgumentsObject() || + IsCapturedObject() || IsSimulate() || IsEnterInlined() || IsLeaveInlined()); @@ -548,7 +493,7 @@ while (use_list_ != NULL) { HUseListNode* list_node = use_list_; HValue* value = list_node->value(); - ASSERT(!value->block()->IsStartBlock()); + DCHECK(!value->block()->IsStartBlock()); value->InternalSetOperandAt(list_node->index(), other); use_list_ = list_node->tail(); list_node->set_tail(other->use_list_); @@ -574,7 +519,7 @@ void HValue::SetBlock(HBasicBlock* block) { - ASSERT(block_ == NULL || block == NULL); + DCHECK(block_ == NULL || block == NULL); block_ = block; if (id_ == kNoNumber && block != NULL) { id_ = block->graph()->GetNextValueID(this); @@ -582,47 +527,36 @@ } -void HValue::PrintTypeTo(StringStream* stream) { - if (!representation().IsTagged() || type().Equals(HType::Tagged())) return; - stream->Add(" type:%s", type().ToString()); -} +OStream& operator<<(OStream& os, const HValue& v) { return v.PrintTo(os); } -void HValue::PrintRangeTo(StringStream* stream) { - if (range() == NULL || range()->IsMostGeneric()) return; - // Note: The c1visualizer syntax for locals allows only a sequence of the - // following characters: A-Za-z0-9_-|: - stream->Add(" range:%d_%d%s", - range()->lower(), - range()->upper(), - range()->CanBeMinusZero() ? "_m0" : ""); +OStream& operator<<(OStream& os, const TypeOf& t) { + if (t.value->representation().IsTagged() && + !t.value->type().Equals(HType::Tagged())) + return os; + return os << " type:" << t.value->type(); } -void HValue::PrintChangesTo(StringStream* stream) { - GVNFlagSet changes_flags = ChangesFlags(); - if (changes_flags.IsEmpty()) return; - stream->Add(" changes["); - if (changes_flags == AllSideEffectsFlagSet()) { - stream->Add("*"); +OStream& operator<<(OStream& os, const ChangesOf& c) { + GVNFlagSet changes_flags = c.value->ChangesFlags(); + if (changes_flags.IsEmpty()) return os; + os << " changes["; + if (changes_flags == c.value->AllSideEffectsFlagSet()) { + os << "*"; } else { bool add_comma = false; -#define PRINT_DO(Type) \ - if (changes_flags.Contains(k##Type)) { \ - if (add_comma) stream->Add(","); \ - add_comma = true; \ - stream->Add(#Type); \ - } +#define PRINT_DO(Type) \ + if (changes_flags.Contains(k##Type)) { \ + if (add_comma) os << ","; \ + add_comma = true; \ + os << #Type; \ + } GVN_TRACKED_FLAG_LIST(PRINT_DO); GVN_UNTRACKED_FLAG_LIST(PRINT_DO); #undef PRINT_DO } - stream->Add("]"); -} - - -void HValue::PrintNameTo(StringStream* stream) { - stream->Add("%s%d", representation_.Mnemonic(), id()); + return os << "]"; } @@ -663,75 +597,63 @@ void HValue::AddNewRange(Range* r, Zone* zone) { if (!HasRange()) ComputeInitialRange(zone); if (!HasRange()) range_ = new(zone) Range(); - ASSERT(HasRange()); + DCHECK(HasRange()); r->StackUpon(range_); range_ = r; } void HValue::RemoveLastAddedRange() { - ASSERT(HasRange()); - ASSERT(range_->next() != NULL); + DCHECK(HasRange()); + DCHECK(range_->next() != NULL); range_ = range_->next(); } void HValue::ComputeInitialRange(Zone* zone) { - ASSERT(!HasRange()); + DCHECK(!HasRange()); range_ = InferRange(zone); - ASSERT(HasRange()); + DCHECK(HasRange()); } -void HSourcePosition::PrintTo(FILE* out) { - if (IsUnknown()) { - PrintF(out, "<?>"); +OStream& operator<<(OStream& os, const HSourcePosition& p) { + if (p.IsUnknown()) { + return os << "<?>"; + } else if (FLAG_hydrogen_track_positions) { + return os << "<" << p.inlining_id() << ":" << p.position() << ">"; } else { - if (FLAG_hydrogen_track_positions) { - PrintF(out, "<%d:%d>", inlining_id(), position()); - } else { - PrintF(out, "<0:%d>", raw()); - } + return os << "<0:" << p.raw() << ">"; } } -void HInstruction::PrintTo(StringStream* stream) { - PrintMnemonicTo(stream); - PrintDataTo(stream); - PrintRangeTo(stream); - PrintChangesTo(stream); - PrintTypeTo(stream); - if (CheckFlag(HValue::kHasNoObservableSideEffects)) { - stream->Add(" [noOSE]"); - } - if (CheckFlag(HValue::kIsDead)) { - stream->Add(" [dead]"); - } +OStream& HInstruction::PrintTo(OStream& os) const { // NOLINT + os << Mnemonic() << " "; + PrintDataTo(os) << ChangesOf(this) << TypeOf(this); + if (CheckFlag(HValue::kHasNoObservableSideEffects)) os << " [noOSE]"; + if (CheckFlag(HValue::kIsDead)) os << " [dead]"; + return os; } -void HInstruction::PrintDataTo(StringStream *stream) { +OStream& HInstruction::PrintDataTo(OStream& os) const { // NOLINT for (int i = 0; i < OperandCount(); ++i) { - if (i > 0) stream->Add(" "); - OperandAt(i)->PrintNameTo(stream); + if (i > 0) os << " "; + os << NameOf(OperandAt(i)); } -} - - -void HInstruction::PrintMnemonicTo(StringStream* stream) { - stream->Add("%s ", Mnemonic()); + return os; } void HInstruction::Unlink() { - ASSERT(IsLinked()); - ASSERT(!IsControlInstruction()); // Must never move control instructions. - ASSERT(!IsBlockEntry()); // Doesn't make sense to delete these. - ASSERT(previous_ != NULL); + DCHECK(IsLinked()); + DCHECK(!IsControlInstruction()); // Must never move control instructions. + DCHECK(!IsBlockEntry()); // Doesn't make sense to delete these. + DCHECK(previous_ != NULL); previous_->next_ = next_; if (next_ == NULL) { - ASSERT(block()->last() == this); + DCHECK(block()->last() == this); block()->set_last(previous_); } else { next_->previous_ = previous_; @@ -741,11 +663,11 @@ void HInstruction::InsertBefore(HInstruction* next) { - ASSERT(!IsLinked()); - ASSERT(!next->IsBlockEntry()); - ASSERT(!IsControlInstruction()); - ASSERT(!next->block()->IsStartBlock()); - ASSERT(next->previous_ != NULL); + DCHECK(!IsLinked()); + DCHECK(!next->IsBlockEntry()); + DCHECK(!IsControlInstruction()); + DCHECK(!next->block()->IsStartBlock()); + DCHECK(next->previous_ != NULL); HInstruction* prev = next->previous(); prev->next_ = this; next->previous_ = this; @@ -759,14 +681,14 @@ void HInstruction::InsertAfter(HInstruction* previous) { - ASSERT(!IsLinked()); - ASSERT(!previous->IsControlInstruction()); - ASSERT(!IsControlInstruction() || previous->next_ == NULL); + DCHECK(!IsLinked()); + DCHECK(!previous->IsControlInstruction()); + DCHECK(!IsControlInstruction() || previous->next_ == NULL); HBasicBlock* block = previous->block(); // Never insert anything except constants into the start block after finishing // it. if (block->IsStartBlock() && block->IsFinished() && !IsConstant()) { - ASSERT(block->end()->SecondSuccessor() == NULL); + DCHECK(block->end()->SecondSuccessor() == NULL); InsertAfter(block->end()->FirstSuccessor()->first()); return; } @@ -776,7 +698,7 @@ // simulate instruction instead. HInstruction* next = previous->next_; if (previous->HasObservableSideEffects() && next != NULL) { - ASSERT(next->IsSimulate()); + DCHECK(next->IsSimulate()); previous = next; next = previous->next_; } @@ -795,6 +717,21 @@ } +bool HInstruction::Dominates(HInstruction* other) { + if (block() != other->block()) { + return block()->Dominates(other->block()); + } + // Both instructions are in the same basic block. This instruction + // should precede the other one in order to dominate it. + for (HInstruction* instr = next(); instr != NULL; instr = instr->next()) { + if (instr == other) { + return true; + } + } + return false; +} + + #ifdef DEBUG void HInstruction::Verify() { // Verify that input operands are defined before use. @@ -811,19 +748,19 @@ cur = cur->previous(); } // Must reach other operand in the same block! - ASSERT(cur == other_operand); + DCHECK(cur == other_operand); } } else { // If the following assert fires, you may have forgotten an // AddInstruction. - ASSERT(other_block->Dominates(cur_block)); + DCHECK(other_block->Dominates(cur_block)); } } // Verify that instructions that may have side-effects are followed // by a simulate instruction. if (HasObservableSideEffects() && !IsOsrEntry()) { - ASSERT(next()->IsSimulate()); + DCHECK(next()->IsSimulate()); } // Verify that instructions that can be eliminated by GVN have overridden @@ -834,135 +771,167 @@ // Verify that all uses are in the graph. for (HUseIterator use = uses(); !use.Done(); use.Advance()) { if (use.value()->IsInstruction()) { - ASSERT(HInstruction::cast(use.value())->IsLinked()); + DCHECK(HInstruction::cast(use.value())->IsLinked()); } } } #endif -static bool HasPrimitiveRepresentation(HValue* instr) { - return instr->representation().IsInteger32() || - instr->representation().IsDouble(); -} - - bool HInstruction::CanDeoptimize() { // TODO(titzer): make this a virtual method? switch (opcode()) { + case HValue::kAbnormalExit: case HValue::kAccessArgumentsAt: - case HValue::kApplyArguments: + case HValue::kAllocate: case HValue::kArgumentsElements: case HValue::kArgumentsLength: case HValue::kArgumentsObject: + case HValue::kBlockEntry: case HValue::kBoundsCheckBaseIndexInformation: + case HValue::kCallFunction: + case HValue::kCallNew: + case HValue::kCallNewArray: + case HValue::kCallStub: + case HValue::kCallWithDescriptor: case HValue::kCapturedObject: - case HValue::kClampToUint8: + case HValue::kClassOfTestAndBranch: + case HValue::kCompareGeneric: + case HValue::kCompareHoleAndBranch: + case HValue::kCompareMap: + case HValue::kCompareMinusZeroAndBranch: + case HValue::kCompareNumericAndBranch: + case HValue::kCompareObjectEqAndBranch: case HValue::kConstant: + case HValue::kConstructDouble: case HValue::kContext: - case HValue::kDateField: case HValue::kDebugBreak: case HValue::kDeclareGlobals: - case HValue::kDiv: + case HValue::kDoubleBits: case HValue::kDummyUse: case HValue::kEnterInlined: case HValue::kEnvironmentMarker: - case HValue::kForInCacheArray: - case HValue::kForInPrepareMap: - case HValue::kFunctionLiteral: + case HValue::kForceRepresentation: case HValue::kGetCachedArrayIndex: case HValue::kGoto: + case HValue::kHasCachedArrayIndexAndBranch: + case HValue::kHasInstanceTypeAndBranch: case HValue::kInnerAllocatedObject: case HValue::kInstanceOf: case HValue::kInstanceOfKnownGlobal: - case HValue::kInvokeFunction: + case HValue::kIsConstructCallAndBranch: + case HValue::kIsObjectAndBranch: + case HValue::kIsSmiAndBranch: + case HValue::kIsStringAndBranch: + case HValue::kIsUndetectableAndBranch: case HValue::kLeaveInlined: - case HValue::kLoadContextSlot: case HValue::kLoadFieldByIndex: - case HValue::kLoadFunctionPrototype: - case HValue::kLoadGlobalCell: case HValue::kLoadGlobalGeneric: - case HValue::kLoadKeyed: - case HValue::kLoadKeyedGeneric: case HValue::kLoadNamedField: case HValue::kLoadNamedGeneric: case HValue::kLoadRoot: case HValue::kMapEnumLength: - case HValue::kMathFloorOfDiv: case HValue::kMathMinMax: + case HValue::kParameter: + case HValue::kPhi: + case HValue::kPushArguments: + case HValue::kRegExpLiteral: + case HValue::kReturn: + case HValue::kSeqStringGetChar: + case HValue::kStoreCodeEntry: + case HValue::kStoreFrameContext: + case HValue::kStoreKeyed: + case HValue::kStoreNamedField: + case HValue::kStoreNamedGeneric: + case HValue::kStringCharCodeAt: + case HValue::kStringCharFromCode: + case HValue::kThisFunction: + case HValue::kTypeofIsAndBranch: + case HValue::kUnknownOSRValue: + case HValue::kUseConst: + return false; + + case HValue::kAdd: + case HValue::kAllocateBlockContext: + case HValue::kApplyArguments: + case HValue::kBitwise: + case HValue::kBoundsCheck: + case HValue::kBranch: + case HValue::kCallJSFunction: + case HValue::kCallRuntime: + case HValue::kChange: + case HValue::kCheckHeapObject: + case HValue::kCheckInstanceType: + case HValue::kCheckMapValue: + case HValue::kCheckMaps: + case HValue::kCheckSmi: + case HValue::kCheckValue: + case HValue::kClampToUint8: + case HValue::kDateField: + case HValue::kDeoptimize: + case HValue::kDiv: + case HValue::kForInCacheArray: + case HValue::kForInPrepareMap: + case HValue::kFunctionLiteral: + case HValue::kInvokeFunction: + case HValue::kLoadContextSlot: + case HValue::kLoadFunctionPrototype: + case HValue::kLoadGlobalCell: + case HValue::kLoadKeyed: + case HValue::kLoadKeyedGeneric: + case HValue::kMathFloorOfDiv: case HValue::kMod: case HValue::kMul: case HValue::kOsrEntry: - case HValue::kParameter: case HValue::kPower: - case HValue::kPushArgument: case HValue::kRor: case HValue::kSar: - case HValue::kSeqStringGetChar: case HValue::kSeqStringSetChar: case HValue::kShl: case HValue::kShr: case HValue::kSimulate: case HValue::kStackCheck: - case HValue::kStoreCodeEntry: case HValue::kStoreContextSlot: case HValue::kStoreGlobalCell: - case HValue::kStoreKeyed: case HValue::kStoreKeyedGeneric: - case HValue::kStoreNamedField: - case HValue::kStoreNamedGeneric: case HValue::kStringAdd: - case HValue::kStringCharCodeAt: - case HValue::kStringCharFromCode: + case HValue::kStringCompareAndBranch: case HValue::kSub: - case HValue::kThisFunction: case HValue::kToFastProperties: case HValue::kTransitionElementsKind: case HValue::kTrapAllocationMemento: case HValue::kTypeof: case HValue::kUnaryMathOperation: - case HValue::kUseConst: case HValue::kWrapReceiver: - return false; - case HValue::kForceRepresentation: - case HValue::kAdd: - case HValue::kBitwise: - case HValue::kChange: - case HValue::kCompareGeneric: - // These instructions might deoptimize if they are not primitive. - if (!HasPrimitiveRepresentation(this)) return true; - for (int i = 0; i < OperandCount(); i++) { - HValue* input = OperandAt(i); - if (!HasPrimitiveRepresentation(input)) return true; - } - return false; - default: return true; } + UNREACHABLE(); + return true; } -void HDummyUse::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); +OStream& operator<<(OStream& os, const NameOf& v) { + return os << v.value->representation().Mnemonic() << v.value->id(); } +OStream& HDummyUse::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(value()); +} -void HEnvironmentMarker::PrintDataTo(StringStream* stream) { - stream->Add("%s var[%d]", kind() == BIND ? "bind" : "lookup", index()); + +OStream& HEnvironmentMarker::PrintDataTo(OStream& os) const { // NOLINT + return os << (kind() == BIND ? "bind" : "lookup") << " var[" << index() + << "]"; } -void HUnaryCall::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); - stream->Add(" "); - stream->Add("#%d", argument_count()); +OStream& HUnaryCall::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(value()) << " #" << argument_count(); } -void HCallJSFunction::PrintDataTo(StringStream* stream) { - function()->PrintNameTo(stream); - stream->Add(" "); - stream->Add("#%d", argument_count()); +OStream& HCallJSFunction::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(function()) << " #" << argument_count(); } @@ -988,14 +957,9 @@ } - - -void HBinaryCall::PrintDataTo(StringStream* stream) { - first()->PrintNameTo(stream); - stream->Add(" "); - second()->PrintNameTo(stream); - stream->Add(" "); - stream->Add("#%d", argument_count()); +OStream& HBinaryCall::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(first()) << " " << NameOf(second()) << " #" + << argument_count(); } @@ -1005,7 +969,7 @@ DecompositionResult decomposition; bool index_is_decomposable = index()->TryDecompose(&decomposition); if (index_is_decomposable) { - ASSERT(decomposition.base() == base()); + DCHECK(decomposition.base() == base()); if (decomposition.offset() == offset() && decomposition.scale() == scale()) return; } else { @@ -1049,27 +1013,24 @@ } -void HBoundsCheck::PrintDataTo(StringStream* stream) { - index()->PrintNameTo(stream); - stream->Add(" "); - length()->PrintNameTo(stream); +OStream& HBoundsCheck::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(index()) << " " << NameOf(length()); if (base() != NULL && (offset() != 0 || scale() != 0)) { - stream->Add(" base: (("); + os << " base: (("; if (base() != index()) { - index()->PrintNameTo(stream); + os << NameOf(index()); } else { - stream->Add("index"); + os << "index"; } - stream->Add(" + %d) >> %d)", offset(), scale()); - } - if (skip_check()) { - stream->Add(" [DISABLED]"); + os << " + " << offset() << ") >> " << scale() << ")"; } + if (skip_check()) os << " [DISABLED]"; + return os; } void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) { - ASSERT(CheckFlag(kFlexibleRepresentation)); + DCHECK(CheckFlag(kFlexibleRepresentation)); HValue* actual_index = index()->ActualValue(); HValue* actual_length = length()->ActualValue(); Representation index_rep = actual_index->representation(); @@ -1107,84 +1068,78 @@ } -void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) { - stream->Add("base: "); - base_index()->PrintNameTo(stream); - stream->Add(", check: "); - base_index()->PrintNameTo(stream); +OStream& HBoundsCheckBaseIndexInformation::PrintDataTo( + OStream& os) const { // NOLINT + // TODO(svenpanne) This 2nd base_index() looks wrong... + return os << "base: " << NameOf(base_index()) + << ", check: " << NameOf(base_index()); } -void HCallWithDescriptor::PrintDataTo(StringStream* stream) { +OStream& HCallWithDescriptor::PrintDataTo(OStream& os) const { // NOLINT for (int i = 0; i < OperandCount(); i++) { - OperandAt(i)->PrintNameTo(stream); - stream->Add(" "); + os << NameOf(OperandAt(i)) << " "; } - stream->Add("#%d", argument_count()); + return os << "#" << argument_count(); } -void HCallNewArray::PrintDataTo(StringStream* stream) { - stream->Add(ElementsKindToString(elements_kind())); - stream->Add(" "); - HBinaryCall::PrintDataTo(stream); +OStream& HCallNewArray::PrintDataTo(OStream& os) const { // NOLINT + os << ElementsKindToString(elements_kind()) << " "; + return HBinaryCall::PrintDataTo(os); } -void HCallRuntime::PrintDataTo(StringStream* stream) { - stream->Add("%o ", *name()); - if (save_doubles() == kSaveFPRegs) { - stream->Add("[save doubles] "); - } - stream->Add("#%d", argument_count()); +OStream& HCallRuntime::PrintDataTo(OStream& os) const { // NOLINT + os << name()->ToCString().get() << " "; + if (save_doubles() == kSaveFPRegs) os << "[save doubles] "; + return os << "#" << argument_count(); +} + + +OStream& HClassOfTestAndBranch::PrintDataTo(OStream& os) const { // NOLINT + return os << "class_of_test(" << NameOf(value()) << ", \"" + << class_name()->ToCString().get() << "\")"; } -void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("class_of_test("); - value()->PrintNameTo(stream); - stream->Add(", \"%o\")", *class_name()); +OStream& HWrapReceiver::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(receiver()) << " " << NameOf(function()); } -void HWrapReceiver::PrintDataTo(StringStream* stream) { - receiver()->PrintNameTo(stream); - stream->Add(" "); - function()->PrintNameTo(stream); +OStream& HAccessArgumentsAt::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(arguments()) << "[" << NameOf(index()) << "], length " + << NameOf(length()); } -void HAccessArgumentsAt::PrintDataTo(StringStream* stream) { - arguments()->PrintNameTo(stream); - stream->Add("["); - index()->PrintNameTo(stream); - stream->Add("], length "); - length()->PrintNameTo(stream); +OStream& HAllocateBlockContext::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(context()) << " " << NameOf(function()); } -void HControlInstruction::PrintDataTo(StringStream* stream) { - stream->Add(" goto ("); +OStream& HControlInstruction::PrintDataTo(OStream& os) const { // NOLINT + os << " goto ("; bool first_block = true; for (HSuccessorIterator it(this); !it.Done(); it.Advance()) { - stream->Add(first_block ? "B%d" : ", B%d", it.Current()->block_id()); + if (!first_block) os << ", "; + os << *it.Current(); first_block = false; } - stream->Add(")"); + return os << ")"; } -void HUnaryControlInstruction::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); - HControlInstruction::PrintDataTo(stream); +OStream& HUnaryControlInstruction::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(value()); + return HControlInstruction::PrintDataTo(os); } -void HReturn::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); - stream->Add(" (pop "); - parameter_count()->PrintNameTo(stream); - stream->Add(" values)"); +OStream& HReturn::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(value()) << " (pop " << NameOf(parameter_count()) + << " values)"; } @@ -1216,8 +1171,8 @@ bool HBranch::KnownSuccessorBlock(HBasicBlock** block) { HValue* value = this->value(); if (value->EmitAtUses()) { - ASSERT(value->IsConstant()); - ASSERT(!value->representation().IsDouble()); + DCHECK(value->IsConstant()); + DCHECK(!value->representation().IsDouble()); *block = HConstant::cast(value)->BooleanValue() ? FirstSuccessor() : SecondSuccessor(); @@ -1228,28 +1183,44 @@ } -void HCompareMap::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); - stream->Add(" (%p)", *map().handle()); - HControlInstruction::PrintDataTo(stream); +OStream& HBranch::PrintDataTo(OStream& os) const { // NOLINT + return HUnaryControlInstruction::PrintDataTo(os) << " " + << expected_input_types(); +} + + +OStream& HCompareMap::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(value()) << " (" << *map().handle() << ")"; + HControlInstruction::PrintDataTo(os); if (known_successor_index() == 0) { - stream->Add(" [true]"); + os << " [true]"; } else if (known_successor_index() == 1) { - stream->Add(" [false]"); + os << " [false]"; } + return os; } const char* HUnaryMathOperation::OpName() const { switch (op()) { - case kMathFloor: return "floor"; - case kMathRound: return "round"; - case kMathAbs: return "abs"; - case kMathLog: return "log"; - case kMathExp: return "exp"; - case kMathSqrt: return "sqrt"; - case kMathPowHalf: return "pow-half"; - case kMathClz32: return "clz32"; + case kMathFloor: + return "floor"; + case kMathFround: + return "fround"; + case kMathRound: + return "round"; + case kMathAbs: + return "abs"; + case kMathLog: + return "log"; + case kMathExp: + return "exp"; + case kMathSqrt: + return "sqrt"; + case kMathPowHalf: + return "pow-half"; + case kMathClz32: + return "clz32"; default: UNREACHABLE(); return NULL; @@ -1282,43 +1253,41 @@ } -void HUnaryMathOperation::PrintDataTo(StringStream* stream) { - const char* name = OpName(); - stream->Add("%s ", name); - value()->PrintNameTo(stream); +OStream& HUnaryMathOperation::PrintDataTo(OStream& os) const { // NOLINT + return os << OpName() << " " << NameOf(value()); } -void HUnaryOperation::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); +OStream& HUnaryOperation::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(value()); } -void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); +OStream& HHasInstanceTypeAndBranch::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(value()); switch (from_) { case FIRST_JS_RECEIVER_TYPE: - if (to_ == LAST_TYPE) stream->Add(" spec_object"); + if (to_ == LAST_TYPE) os << " spec_object"; break; case JS_REGEXP_TYPE: - if (to_ == JS_REGEXP_TYPE) stream->Add(" reg_exp"); + if (to_ == JS_REGEXP_TYPE) os << " reg_exp"; break; case JS_ARRAY_TYPE: - if (to_ == JS_ARRAY_TYPE) stream->Add(" array"); + if (to_ == JS_ARRAY_TYPE) os << " array"; break; case JS_FUNCTION_TYPE: - if (to_ == JS_FUNCTION_TYPE) stream->Add(" function"); + if (to_ == JS_FUNCTION_TYPE) os << " function"; break; default: break; } + return os; } -void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); - stream->Add(" == %o", *type_literal_.handle()); - HControlInstruction::PrintDataTo(stream); +OStream& HTypeofIsAndBranch::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(value()) << " == " << type_literal()->ToCString().get(); + return HControlInstruction::PrintDataTo(os); } @@ -1335,10 +1304,9 @@ return heap->boolean_string(); } if (unique.IsKnownGlobal(heap->null_value())) { - return FLAG_harmony_typeof ? heap->null_string() - : heap->object_string(); + return heap->object_string(); } - ASSERT(unique.IsKnownGlobal(heap->undefined_value())); + DCHECK(unique.IsKnownGlobal(heap->undefined_value())); return heap->undefined_string(); } case SYMBOL_TYPE: @@ -1370,30 +1338,35 @@ } -void HCheckMapValue::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); - stream->Add(" "); - map()->PrintNameTo(stream); +OStream& HCheckMapValue::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(value()) << " " << NameOf(map()); } -void HForInPrepareMap::PrintDataTo(StringStream* stream) { - enumerable()->PrintNameTo(stream); +HValue* HCheckMapValue::Canonicalize() { + if (map()->IsConstant()) { + HConstant* c_map = HConstant::cast(map()); + return HCheckMaps::CreateAndInsertAfter( + block()->graph()->zone(), value(), c_map->MapValue(), + c_map->HasStableMapValue(), this); + } + return this; } -void HForInCacheArray::PrintDataTo(StringStream* stream) { - enumerable()->PrintNameTo(stream); - stream->Add(" "); - map()->PrintNameTo(stream); - stream->Add("[%d]", idx_); +OStream& HForInPrepareMap::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(enumerable()); } -void HLoadFieldByIndex::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); - stream->Add(" "); - index()->PrintNameTo(stream); +OStream& HForInCacheArray::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(enumerable()) << " " << NameOf(map()) << "[" << idx_ + << "]"; +} + + +OStream& HLoadFieldByIndex::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(object()) << " " << NameOf(index()); } @@ -1529,8 +1502,8 @@ } -void HTypeof::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); +OStream& HTypeof::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(value()); } @@ -1540,7 +1513,10 @@ HConstant* c = HConstant::cast(value); if (c->HasNumberValue()) { double double_res = c->DoubleValue(); - if (representation.CanContainDouble(double_res)) { + if (representation.IsDouble()) { + return HConstant::New(zone, context, double_res); + + } else if (representation.CanContainDouble(double_res)) { return HConstant::New(zone, context, static_cast<int32_t>(double_res), representation); @@ -1551,19 +1527,20 @@ } -void HForceRepresentation::PrintDataTo(StringStream* stream) { - stream->Add("%s ", representation().Mnemonic()); - value()->PrintNameTo(stream); +OStream& HForceRepresentation::PrintDataTo(OStream& os) const { // NOLINT + return os << representation().Mnemonic() << " " << NameOf(value()); } -void HChange::PrintDataTo(StringStream* stream) { - HUnaryOperation::PrintDataTo(stream); - stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic()); +OStream& HChange::PrintDataTo(OStream& os) const { // NOLINT + HUnaryOperation::PrintDataTo(os); + os << " " << from().Mnemonic() << " to " << to().Mnemonic(); - if (CanTruncateToInt32()) stream->Add(" truncating-int32"); - if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?"); - if (CheckFlag(kAllowUndefinedAsNaN)) stream->Add(" allow-undefined-as-nan"); + if (CanTruncateToSmi()) os << " truncating-smi"; + if (CanTruncateToInt32()) os << " truncating-int32"; + if (CheckFlag(kBailoutOnMinusZero)) os << " -0?"; + if (CheckFlag(kAllowUndefinedAsNaN)) os << " allow-undefined-as-nan"; + return os; } @@ -1577,7 +1554,7 @@ val, representation(), false, false)); } } - if (op() == kMathFloor && value()->IsDiv() && value()->UseCount() == 1) { + if (op() == kMathFloor && value()->IsDiv() && value()->HasOneUse()) { HDiv* hdiv = HDiv::cast(value()); HValue* left = hdiv->left(); @@ -1618,7 +1595,9 @@ HValue* HCheckInstanceType::Canonicalize() { - if (check_ == IS_STRING && value()->type().IsString()) { + if ((check_ == IS_SPEC_OBJECT && value()->type().IsJSObject()) || + (check_ == IS_JS_ARRAY && value()->type().IsJSArray()) || + (check_ == IS_STRING && value()->type().IsString())) { return value(); } @@ -1633,7 +1612,7 @@ void HCheckInstanceType::GetCheckInterval(InstanceType* first, InstanceType* last) { - ASSERT(is_interval_check()); + DCHECK(is_interval_check()); switch (check_) { case IS_SPEC_OBJECT: *first = FIRST_SPEC_OBJECT_TYPE; @@ -1649,7 +1628,7 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) { - ASSERT(!is_interval_check()); + DCHECK(!is_interval_check()); switch (check_) { case IS_STRING: *mask = kIsNotStringMask; @@ -1665,39 +1644,39 @@ } -bool HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect, - HValue* dominator) { - ASSERT(side_effect == kMaps); - // TODO(mstarzinger): For now we specialize on HStoreNamedField, but once - // type information is rich enough we should generalize this to any HType - // for which the map is known. - if (HasNoUses() && dominator->IsStoreNamedField()) { - HStoreNamedField* store = HStoreNamedField::cast(dominator); - if (!store->has_transition() || store->object() != value()) return false; - HConstant* transition = HConstant::cast(store->transition()); - if (map_set_.Contains(Unique<Map>::cast(transition->GetUnique()))) { - DeleteAndReplaceWith(NULL); - return true; - } - } - return false; +OStream& HCheckMaps::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(value()) << " [" << *maps()->at(0).handle(); + for (int i = 1; i < maps()->size(); ++i) { + os << "," << *maps()->at(i).handle(); + } + os << "]"; + if (IsStabilityCheck()) os << "(stability-check)"; + return os; } -void HCheckMaps::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); - stream->Add(" [%p", *map_set_.at(0).handle()); - for (int i = 1; i < map_set_.size(); ++i) { - stream->Add(",%p", *map_set_.at(i).handle()); +HValue* HCheckMaps::Canonicalize() { + if (!IsStabilityCheck() && maps_are_stable() && value()->IsConstant()) { + HConstant* c_value = HConstant::cast(value()); + if (c_value->HasObjectMap()) { + for (int i = 0; i < maps()->size(); ++i) { + if (c_value->ObjectMap() == maps()->at(i)) { + if (maps()->size() > 1) { + set_maps(new(block()->graph()->zone()) UniqueSet<Map>( + maps()->at(i), block()->graph()->zone())); + } + MarkAsStabilityCheck(); + break; + } + } + } } - stream->Add("]%s", CanOmitMapChecks() ? "(omitted)" : ""); + return this; } -void HCheckValue::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); - stream->Add(" "); - object().handle()->ShortPrint(stream); +OStream& HCheckValue::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(value()) << " " << Brief(*object().handle()); } @@ -1707,7 +1686,7 @@ } -const char* HCheckInstanceType::GetCheckName() { +const char* HCheckInstanceType::GetCheckName() const { switch (check_) { case IS_SPEC_OBJECT: return "object"; case IS_JS_ARRAY: return "array"; @@ -1719,34 +1698,30 @@ } -void HCheckInstanceType::PrintDataTo(StringStream* stream) { - stream->Add("%s ", GetCheckName()); - HUnaryOperation::PrintDataTo(stream); +OStream& HCheckInstanceType::PrintDataTo(OStream& os) const { // NOLINT + os << GetCheckName() << " "; + return HUnaryOperation::PrintDataTo(os); } -void HCallStub::PrintDataTo(StringStream* stream) { - stream->Add("%s ", - CodeStub::MajorName(major_key_, false)); - HUnaryCall::PrintDataTo(stream); +OStream& HCallStub::PrintDataTo(OStream& os) const { // NOLINT + os << CodeStub::MajorName(major_key_, false) << " "; + return HUnaryCall::PrintDataTo(os); } -void HUnknownOSRValue::PrintDataTo(StringStream *stream) { +OStream& HUnknownOSRValue::PrintDataTo(OStream& os) const { // NOLINT const char* type = "expression"; if (environment_->is_local_index(index_)) type = "local"; if (environment_->is_special_index(index_)) type = "special"; if (environment_->is_parameter_index(index_)) type = "parameter"; - stream->Add("%s @ %d", type, index_); + return os << type << " @ " << index_; } -void HInstanceOf::PrintDataTo(StringStream* stream) { - left()->PrintNameTo(stream); - stream->Add(" "); - right()->PrintNameTo(stream); - stream->Add(" "); - context()->PrintNameTo(stream); +OStream& HInstanceOf::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(left()) << " " << NameOf(right()) << " " + << NameOf(context()); } @@ -1956,15 +1931,18 @@ } +// Returns the absolute value of its argument minus one, avoiding undefined +// behavior at kMinInt. +static int32_t AbsMinus1(int32_t a) { return a < 0 ? -(a + 1) : (a - 1); } + + Range* HMod::InferRange(Zone* zone) { if (representation().IsInteger32()) { Range* a = left()->range(); Range* b = right()->range(); - // The magnitude of the modulus is bounded by the right operand. Note that - // apart for the cases involving kMinInt, the calculation below is the same - // as Max(Abs(b->lower()), Abs(b->upper())) - 1. - int32_t positive_bound = -(Min(NegAbs(b->lower()), NegAbs(b->upper())) + 1); + // The magnitude of the modulus is bounded by the right operand. + int32_t positive_bound = Max(AbsMinus1(b->lower()), AbsMinus1(b->upper())); // The result of the modulo operation has the sign of its left operand. bool left_can_be_negative = a->CanBeMinusZero() || a->CanBeNegative(); @@ -2077,7 +2055,7 @@ void InductionVariableData::AddCheck(HBoundsCheck* check, int32_t upper_limit) { - ASSERT(limit_validity() != NULL); + DCHECK(limit_validity() != NULL); if (limit_validity() != check->block() && !limit_validity()->Dominates(check->block())) return; if (!phi()->block()->current_loop()->IsNestedInThisLoop( @@ -2115,9 +2093,9 @@ int32_t mask, HValue* index_base, HValue* context) { - ASSERT(first_check_in_block() != NULL); + DCHECK(first_check_in_block() != NULL); HValue* previous_index = first_check_in_block()->index(); - ASSERT(context != NULL); + DCHECK(context != NULL); Zone* zone = index_base->block()->graph()->zone(); set_added_constant(HConstant::New(zone, context, mask)); @@ -2131,18 +2109,18 @@ first_check_in_block()->ReplaceAllUsesWith(first_check_in_block()->index()); HInstruction* new_index = HBitwise::New(zone, context, token, index_base, added_constant()); - ASSERT(new_index->IsBitwise()); + DCHECK(new_index->IsBitwise()); new_index->ClearAllSideEffects(); new_index->AssumeRepresentation(Representation::Integer32()); set_added_index(HBitwise::cast(new_index)); added_index()->InsertBefore(first_check_in_block()); } - ASSERT(added_index()->op() == token); + DCHECK(added_index()->op() == token); added_index()->SetOperandAt(1, index_base); added_index()->SetOperandAt(2, added_constant()); first_check_in_block()->SetOperandAt(0, added_index()); - if (previous_index->UseCount() == 0) { + if (previous_index->HasNoUses()) { previous_index->DeleteAndReplaceWith(NULL); } } @@ -2247,7 +2225,7 @@ */ void InductionVariableData::UpdateAdditionalLimit( InductionVariableLimitUpdate* update) { - ASSERT(update->updated_variable == this); + DCHECK(update->updated_variable == this); if (update->limit_is_upper) { swap(&additional_upper_limit_, &update->limit); swap(&additional_upper_limit_is_included_, &update->limit_is_included); @@ -2378,7 +2356,7 @@ } else { other_target = branch->SuccessorAt(0); token = Token::NegateCompareOp(token); - ASSERT(block == branch->SuccessorAt(1)); + DCHECK(block == branch->SuccessorAt(1)); } InductionVariableData* data; @@ -2443,7 +2421,7 @@ if (operation_ == kMathMax) { res->CombinedMax(b); } else { - ASSERT(operation_ == kMathMin); + DCHECK(operation_ == kMathMin); res->CombinedMin(b); } return res; @@ -2453,23 +2431,23 @@ } -void HPhi::PrintTo(StringStream* stream) { - stream->Add("["); +void HPushArguments::AddInput(HValue* value) { + inputs_.Add(NULL, value->block()->zone()); + SetOperandAt(OperandCount() - 1, value); +} + + +OStream& HPhi::PrintTo(OStream& os) const { // NOLINT + os << "["; for (int i = 0; i < OperandCount(); ++i) { - HValue* value = OperandAt(i); - stream->Add(" "); - value->PrintNameTo(stream); - stream->Add(" "); - } - stream->Add(" uses:%d_%ds_%di_%dd_%dt", - UseCount(), - smi_non_phi_uses() + smi_indirect_uses(), - int32_non_phi_uses() + int32_indirect_uses(), - double_non_phi_uses() + double_indirect_uses(), - tagged_non_phi_uses() + tagged_indirect_uses()); - PrintRangeTo(stream); - PrintTypeTo(stream); - stream->Add("]"); + os << " " << NameOf(OperandAt(i)) << " "; + } + return os << " uses:" << UseCount() << "_" + << smi_non_phi_uses() + smi_indirect_uses() << "s_" + << int32_non_phi_uses() + int32_indirect_uses() << "i_" + << double_non_phi_uses() + double_indirect_uses() << "d_" + << tagged_non_phi_uses() + tagged_indirect_uses() << "t" + << TypeOf(this) << "]"; } @@ -2503,15 +2481,15 @@ HValue* current = OperandAt(position++); if (current != this && current != candidate) return NULL; } - ASSERT(candidate != this); + DCHECK(candidate != this); return candidate; } void HPhi::DeleteFromGraph() { - ASSERT(block() != NULL); + DCHECK(block() != NULL); block()->RemovePhi(this); - ASSERT(block() == NULL); + DCHECK(block() == NULL); } @@ -2591,27 +2569,28 @@ } -void HSimulate::PrintDataTo(StringStream* stream) { - stream->Add("id=%d", ast_id().ToInt()); - if (pop_count_ > 0) stream->Add(" pop %d", pop_count_); +OStream& HSimulate::PrintDataTo(OStream& os) const { // NOLINT + os << "id=" << ast_id().ToInt(); + if (pop_count_ > 0) os << " pop " << pop_count_; if (values_.length() > 0) { - if (pop_count_ > 0) stream->Add(" /"); + if (pop_count_ > 0) os << " /"; for (int i = values_.length() - 1; i >= 0; --i) { if (HasAssignedIndexAt(i)) { - stream->Add(" var[%d] = ", GetAssignedIndexAt(i)); + os << " var[" << GetAssignedIndexAt(i) << "] = "; } else { - stream->Add(" push "); + os << " push "; } - values_[i]->PrintNameTo(stream); - if (i > 0) stream->Add(","); + os << NameOf(values_[i]); + if (i > 0) os << ","; } } + return os; } void HSimulate::ReplayEnvironment(HEnvironment* env) { if (done_with_replay_) return; - ASSERT(env != NULL); + DCHECK(env != NULL); env->set_ast_id(ast_id()); env->Drop(pop_count()); for (int i = values()->length() - 1; i >= 0; --i) { @@ -2644,7 +2623,7 @@ // Replay captured objects by replacing all captured objects with the // same capture id in the current and all outer environments. void HCapturedObject::ReplayEnvironment(HEnvironment* env) { - ASSERT(env != NULL); + DCHECK(env != NULL); while (env != NULL) { ReplayEnvironmentNested(env->values(), this); env = env->outer(); @@ -2652,22 +2631,22 @@ } -void HCapturedObject::PrintDataTo(StringStream* stream) { - stream->Add("#%d ", capture_id()); - HDematerializedObject::PrintDataTo(stream); +OStream& HCapturedObject::PrintDataTo(OStream& os) const { // NOLINT + os << "#" << capture_id() << " "; + return HDematerializedObject::PrintDataTo(os); } void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target, Zone* zone) { - ASSERT(return_target->IsInlineReturnTarget()); + DCHECK(return_target->IsInlineReturnTarget()); return_targets_.Add(return_target, zone); } -void HEnterInlined::PrintDataTo(StringStream* stream) { - SmartArrayPointer<char> name = function()->debug_name()->ToCString(); - stream->Add("%s, id=%d", name.get(), function()->id().ToInt()); +OStream& HEnterInlined::PrintDataTo(OStream& os) const { // NOLINT + return os << function()->debug_name()->ToCString().get() + << ", id=" << function()->id().ToInt(); } @@ -2677,26 +2656,32 @@ } -HConstant::HConstant(Handle<Object> handle, Representation r) - : HTemplateInstruction<0>(HType::TypeFromValue(handle)), - object_(Unique<Object>::CreateUninitialized(handle)), +HConstant::HConstant(Handle<Object> object, Representation r) + : HTemplateInstruction<0>(HType::FromValue(object)), + object_(Unique<Object>::CreateUninitialized(object)), + object_map_(Handle<Map>::null()), + has_stable_map_value_(false), has_smi_value_(false), has_int32_value_(false), has_double_value_(false), has_external_reference_value_(false), is_not_in_new_space_(true), - boolean_value_(handle->BooleanValue()), + boolean_value_(object->BooleanValue()), is_undetectable_(false), instance_type_(kUnknownInstanceType) { - if (handle->IsHeapObject()) { - Handle<HeapObject> heap_obj = Handle<HeapObject>::cast(handle); - Heap* heap = heap_obj->GetHeap(); - is_not_in_new_space_ = !heap->InNewSpace(*handle); - instance_type_ = heap_obj->map()->instance_type(); - is_undetectable_ = heap_obj->map()->is_undetectable(); + if (object->IsHeapObject()) { + Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object); + Isolate* isolate = heap_object->GetIsolate(); + Handle<Map> map(heap_object->map(), isolate); + is_not_in_new_space_ = !isolate->heap()->InNewSpace(*object); + instance_type_ = map->instance_type(); + is_undetectable_ = map->is_undetectable(); + if (map->is_stable()) object_map_ = Unique<Map>::CreateImmovable(map); + has_stable_map_value_ = (instance_type_ == MAP_TYPE && + Handle<Map>::cast(heap_object)->is_stable()); } - if (handle->IsNumber()) { - double n = handle->Number(); + if (object->IsNumber()) { + double n = object->Number(); has_int32_value_ = IsInteger32(n); int32_value_ = DoubleToInt32(n); has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_); @@ -2709,7 +2694,9 @@ } -HConstant::HConstant(Unique<Object> unique, +HConstant::HConstant(Unique<Object> object, + Unique<Map> object_map, + bool has_stable_map_value, Representation r, HType type, bool is_not_in_new_space, @@ -2717,7 +2704,9 @@ bool is_undetectable, InstanceType instance_type) : HTemplateInstruction<0>(type), - object_(unique), + object_(object), + object_map_(object_map), + has_stable_map_value_(has_stable_map_value), has_smi_value_(false), has_int32_value_(false), has_double_value_(false), @@ -2726,8 +2715,8 @@ boolean_value_(boolean_value), is_undetectable_(is_undetectable), instance_type_(instance_type) { - ASSERT(!unique.handle().is_null()); - ASSERT(!type.IsTaggedNumber()); + DCHECK(!object.handle().is_null()); + DCHECK(!type.IsTaggedNumber() || type.IsNone()); Initialize(r); } @@ -2737,6 +2726,8 @@ bool is_not_in_new_space, Unique<Object> object) : object_(object), + object_map_(Handle<Map>::null()), + has_stable_map_value_(false), has_smi_value_(Smi::IsValid(integer_value)), has_int32_value_(true), has_double_value_(true), @@ -2761,6 +2752,8 @@ bool is_not_in_new_space, Unique<Object> object) : object_(object), + object_map_(Handle<Map>::null()), + has_stable_map_value_(false), has_int32_value_(IsInteger32(double_value)), has_double_value_(true), has_external_reference_value_(false), @@ -2781,8 +2774,10 @@ HConstant::HConstant(ExternalReference reference) - : HTemplateInstruction<0>(HType::None()), + : HTemplateInstruction<0>(HType::Any()), object_(Unique<Object>(Handle<Object>::null())), + object_map_(Handle<Map>::null()), + has_stable_map_value_(false), has_smi_value_(false), has_int32_value_(false), has_double_value_(false), @@ -2837,10 +2832,10 @@ return false; } - ASSERT(!object_.handle().is_null()); + DCHECK(!object_.handle().is_null()); Heap* heap = isolate()->heap(); - ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value())); - ASSERT(!object_.IsKnownGlobal(heap->nan_value())); + DCHECK(!object_.IsKnownGlobal(heap->minus_zero_value())); + DCHECK(!object_.IsKnownGlobal(heap->nan_value())); return #define IMMORTAL_IMMOVABLE_ROOT(name) \ object_.IsKnownGlobal(heap->name()) || @@ -2859,15 +2854,16 @@ bool HConstant::EmitAtUses() { - ASSERT(IsLinked()); + DCHECK(IsLinked()); if (block()->graph()->has_osr() && block()->graph()->IsStandardConstant(this)) { // TODO(titzer): this seems like a hack that should be fixed by custom OSR. return true; } - if (UseCount() == 0) return true; + if (HasNoUses()) return true; if (IsCell()) return false; if (representation().IsDouble()) return false; + if (representation().IsExternal()) return false; return true; } @@ -2886,8 +2882,10 @@ if (has_external_reference_value_) { return new(zone) HConstant(external_reference_value_); } - ASSERT(!object_.handle().is_null()); + DCHECK(!object_.handle().is_null()); return new(zone) HConstant(object_, + object_map_, + has_stable_map_value_, r, type_, is_not_in_new_space_, @@ -2921,7 +2919,7 @@ res = handle->BooleanValue() ? new(zone) HConstant(1) : new(zone) HConstant(0); } else if (handle->IsUndefined()) { - res = new(zone) HConstant(OS::nan_value()); + res = new(zone) HConstant(base::OS::nan_value()); } else if (handle->IsNull()) { res = new(zone) HConstant(0); } @@ -2929,34 +2927,35 @@ } -void HConstant::PrintDataTo(StringStream* stream) { +OStream& HConstant::PrintDataTo(OStream& os) const { // NOLINT if (has_int32_value_) { - stream->Add("%d ", int32_value_); + os << int32_value_ << " "; } else if (has_double_value_) { - stream->Add("%f ", FmtElm(double_value_)); + os << double_value_ << " "; } else if (has_external_reference_value_) { - stream->Add("%p ", reinterpret_cast<void*>( - external_reference_value_.address())); + os << reinterpret_cast<void*>(external_reference_value_.address()) << " "; } else { - handle(Isolate::Current())->ShortPrint(stream); - } - if (!is_not_in_new_space_) { - stream->Add("[new space] "); - } + // The handle() method is silently and lazily mutating the object. + Handle<Object> h = const_cast<HConstant*>(this)->handle(Isolate::Current()); + os << Brief(*h) << " "; + if (HasStableMapValue()) os << "[stable-map] "; + if (HasObjectMap()) os << "[map " << *ObjectMap().handle() << "] "; + } + if (!is_not_in_new_space_) os << "[new space] "; + return os; } -void HBinaryOperation::PrintDataTo(StringStream* stream) { - left()->PrintNameTo(stream); - stream->Add(" "); - right()->PrintNameTo(stream); - if (CheckFlag(kCanOverflow)) stream->Add(" !"); - if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?"); +OStream& HBinaryOperation::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(left()) << " " << NameOf(right()); + if (CheckFlag(kCanOverflow)) os << " !"; + if (CheckFlag(kBailoutOnMinusZero)) os << " -0?"; + return os; } void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) { - ASSERT(CheckFlag(kFlexibleRepresentation)); + DCHECK(CheckFlag(kFlexibleRepresentation)); Representation new_rep = RepresentationFromInputs(); UpdateRepresentation(new_rep, h_infer, "inputs"); @@ -3023,7 +3022,7 @@ void HMathMinMax::InferRepresentation(HInferRepresentationPhase* h_infer) { - ASSERT(CheckFlag(kFlexibleRepresentation)); + DCHECK(CheckFlag(kFlexibleRepresentation)); Representation new_rep = RepresentationFromInputs(); UpdateRepresentation(new_rep, h_infer, "inputs"); // Do not care about uses. @@ -3174,35 +3173,27 @@ } -void HCompareGeneric::PrintDataTo(StringStream* stream) { - stream->Add(Token::Name(token())); - stream->Add(" "); - HBinaryOperation::PrintDataTo(stream); +OStream& HCompareGeneric::PrintDataTo(OStream& os) const { // NOLINT + os << Token::Name(token()) << " "; + return HBinaryOperation::PrintDataTo(os); } -void HStringCompareAndBranch::PrintDataTo(StringStream* stream) { - stream->Add(Token::Name(token())); - stream->Add(" "); - HControlInstruction::PrintDataTo(stream); +OStream& HStringCompareAndBranch::PrintDataTo(OStream& os) const { // NOLINT + os << Token::Name(token()) << " "; + return HControlInstruction::PrintDataTo(os); } -void HCompareNumericAndBranch::PrintDataTo(StringStream* stream) { - stream->Add(Token::Name(token())); - stream->Add(" "); - left()->PrintNameTo(stream); - stream->Add(" "); - right()->PrintNameTo(stream); - HControlInstruction::PrintDataTo(stream); +OStream& HCompareNumericAndBranch::PrintDataTo(OStream& os) const { // NOLINT + os << Token::Name(token()) << " " << NameOf(left()) << " " << NameOf(right()); + return HControlInstruction::PrintDataTo(os); } -void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) { - left()->PrintNameTo(stream); - stream->Add(" "); - right()->PrintNameTo(stream); - HControlInstruction::PrintDataTo(stream); +OStream& HCompareObjectEqAndBranch::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(left()) << " " << NameOf(right()); + return HControlInstruction::PrintDataTo(os); } @@ -3245,11 +3236,27 @@ bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) { + if (known_successor_index() != kNoKnownSuccessorIndex) { + *block = SuccessorAt(known_successor_index()); + return true; + } if (FLAG_fold_constants && value()->IsConstant()) { *block = HConstant::cast(value())->HasStringValue() ? FirstSuccessor() : SecondSuccessor(); return true; } + if (value()->type().IsString()) { + *block = FirstSuccessor(); + return true; + } + if (value()->type().IsSmi() || + value()->type().IsNull() || + value()->type().IsBoolean() || + value()->type().IsUndefined() || + value()->type().IsJSObject()) { + *block = SecondSuccessor(); + return true; + } *block = NULL; return false; } @@ -3284,6 +3291,21 @@ } +bool HCompareNumericAndBranch::KnownSuccessorBlock(HBasicBlock** block) { + if (left() == right() && + left()->representation().IsSmiOrInteger32()) { + *block = (token() == Token::EQ || + token() == Token::EQ_STRICT || + token() == Token::LTE || + token() == Token::GTE) + ? FirstSuccessor() : SecondSuccessor(); + return true; + } + *block = NULL; + return false; +} + + bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) { if (FLAG_fold_constants && value()->IsConstant()) { HConstant* constant = HConstant::cast(value()); @@ -3309,9 +3331,8 @@ } - -void HGoto::PrintDataTo(StringStream* stream) { - stream->Add("B%d", SuccessorAt(0)->block_id()); +OStream& HGoto::PrintDataTo(OStream& os) const { // NOLINT + return os << *SuccessorAt(0); } @@ -3354,78 +3375,65 @@ } -void HParameter::PrintDataTo(StringStream* stream) { - stream->Add("%u", index()); +OStream& HParameter::PrintDataTo(OStream& os) const { // NOLINT + return os << index(); } -void HLoadNamedField::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); - access_.PrintTo(stream); +OStream& HLoadNamedField::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(object()) << access_; - if (HasDependency()) { - stream->Add(" "); - dependency()->PrintNameTo(stream); - } -} - - -HCheckMaps* HCheckMaps::New(Zone* zone, - HValue* context, - HValue* value, - Handle<Map> map, - CompilationInfo* info, - HValue* typecheck) { - HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck); - check_map->Add(map, zone); - if (map->CanOmitMapChecks() && - value->IsConstant() && - HConstant::cast(value)->HasMap(map)) { - // TODO(titzer): collect dependent map checks into a list. - check_map->omit_ = true; - if (map->CanTransition()) { - map->AddDependentCompilationInfo( - DependentCode::kPrototypeCheckGroup, info); + if (maps() != NULL) { + os << " [" << *maps()->at(0).handle(); + for (int i = 1; i < maps()->size(); ++i) { + os << "," << *maps()->at(i).handle(); } + os << "]"; } - return check_map; + + if (HasDependency()) os << " " << NameOf(dependency()); + return os; } -void HLoadNamedGeneric::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); - stream->Add("."); - stream->Add(String::cast(*name())->ToCString().get()); +OStream& HLoadNamedGeneric::PrintDataTo(OStream& os) const { // NOLINT + Handle<String> n = Handle<String>::cast(name()); + return os << NameOf(object()) << "." << n->ToCString().get(); } -void HLoadKeyed::PrintDataTo(StringStream* stream) { +OStream& HLoadKeyed::PrintDataTo(OStream& os) const { // NOLINT if (!is_external()) { - elements()->PrintNameTo(stream); + os << NameOf(elements()); } else { - ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND && + DCHECK(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND && elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND); - elements()->PrintNameTo(stream); - stream->Add("."); - stream->Add(ElementsKindToString(elements_kind())); + os << NameOf(elements()) << "." << ElementsKindToString(elements_kind()); } - stream->Add("["); - key()->PrintNameTo(stream); - if (IsDehoisted()) { - stream->Add(" + %d]", index_offset()); - } else { - stream->Add("]"); - } + os << "[" << NameOf(key()); + if (IsDehoisted()) os << " + " << base_offset(); + os << "]"; + + if (HasDependency()) os << " " << NameOf(dependency()); + if (RequiresHoleCheck()) os << " check_hole"; + return os; +} - if (HasDependency()) { - stream->Add(" "); - dependency()->PrintNameTo(stream); - } - if (RequiresHoleCheck()) { - stream->Add(" check_hole"); - } +bool HLoadKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) { + // The base offset is usually simply the size of the array header, except + // with dehoisting adds an addition offset due to a array index key + // manipulation, in which case it becomes (array header size + + // constant-offset-from-key * kPointerSize) + uint32_t base_offset = BaseOffsetField::decode(bit_field_); + v8::base::internal::CheckedNumeric<uint32_t> addition_result = base_offset; + addition_result += increase_by_value; + if (!addition_result.IsValid()) return false; + base_offset = addition_result.ValueOrDie(); + if (!BaseOffsetField::is_valid(base_offset)) return false; + bit_field_ = BaseOffsetField::update(bit_field_, base_offset); + return true; } @@ -3482,11 +3490,8 @@ } -void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); - stream->Add("["); - key()->PrintNameTo(stream); - stream->Add("]"); +OStream& HLoadKeyedGeneric::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(object()) << "[" << NameOf(key()) << "]"; } @@ -3527,79 +3532,60 @@ } -void HStoreNamedGeneric::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); - stream->Add("."); - ASSERT(name()->IsString()); - stream->Add(String::cast(*name())->ToCString().get()); - stream->Add(" = "); - value()->PrintNameTo(stream); +OStream& HStoreNamedGeneric::PrintDataTo(OStream& os) const { // NOLINT + Handle<String> n = Handle<String>::cast(name()); + return os << NameOf(object()) << "." << n->ToCString().get() << " = " + << NameOf(value()); } -void HStoreNamedField::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); - access_.PrintTo(stream); - stream->Add(" = "); - value()->PrintNameTo(stream); - if (NeedsWriteBarrier()) { - stream->Add(" (write-barrier)"); - } - if (has_transition()) { - stream->Add(" (transition map %p)", *transition_map()); - } +OStream& HStoreNamedField::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(object()) << access_ << " = " << NameOf(value()); + if (NeedsWriteBarrier()) os << " (write-barrier)"; + if (has_transition()) os << " (transition map " << *transition_map() << ")"; + return os; } -void HStoreKeyed::PrintDataTo(StringStream* stream) { +OStream& HStoreKeyed::PrintDataTo(OStream& os) const { // NOLINT if (!is_external()) { - elements()->PrintNameTo(stream); + os << NameOf(elements()); } else { - elements()->PrintNameTo(stream); - stream->Add("."); - stream->Add(ElementsKindToString(elements_kind())); - ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND && + DCHECK(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND && elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND); + os << NameOf(elements()) << "." << ElementsKindToString(elements_kind()); } - stream->Add("["); - key()->PrintNameTo(stream); - if (IsDehoisted()) { - stream->Add(" + %d] = ", index_offset()); - } else { - stream->Add("] = "); - } - - value()->PrintNameTo(stream); + os << "[" << NameOf(key()); + if (IsDehoisted()) os << " + " << base_offset(); + return os << "] = " << NameOf(value()); } -void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); - stream->Add("["); - key()->PrintNameTo(stream); - stream->Add("] = "); - value()->PrintNameTo(stream); +OStream& HStoreKeyedGeneric::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(object()) << "[" << NameOf(key()) + << "] = " << NameOf(value()); } -void HTransitionElementsKind::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); +OStream& HTransitionElementsKind::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(object()); ElementsKind from_kind = original_map().handle()->elements_kind(); ElementsKind to_kind = transitioned_map().handle()->elements_kind(); - stream->Add(" %p [%s] -> %p [%s]", - *original_map().handle(), - ElementsAccessor::ForKind(from_kind)->name(), - *transitioned_map().handle(), - ElementsAccessor::ForKind(to_kind)->name()); - if (IsSimpleMapChangeTransition(from_kind, to_kind)) stream->Add(" (simple)"); + os << " " << *original_map().handle() << " [" + << ElementsAccessor::ForKind(from_kind)->name() << "] -> " + << *transitioned_map().handle() << " [" + << ElementsAccessor::ForKind(to_kind)->name() << "]"; + if (IsSimpleMapChangeTransition(from_kind, to_kind)) os << " (simple)"; + return os; } -void HLoadGlobalCell::PrintDataTo(StringStream* stream) { - stream->Add("[%p]", *cell().handle()); - if (!details_.IsDontDelete()) stream->Add(" (deleteable)"); - if (details_.IsReadOnly()) stream->Add(" (read-only)"); +OStream& HLoadGlobalCell::PrintDataTo(OStream& os) const { // NOLINT + os << "[" << *cell().handle() << "]"; + if (!details_.IsDontDelete()) os << " (deleteable)"; + if (details_.IsReadOnly()) os << " (read-only)"; + return os; } @@ -3613,36 +3599,33 @@ } -void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) { - stream->Add("%o ", *name()); +OStream& HLoadGlobalGeneric::PrintDataTo(OStream& os) const { // NOLINT + return os << name()->ToCString().get() << " "; } -void HInnerAllocatedObject::PrintDataTo(StringStream* stream) { - base_object()->PrintNameTo(stream); - stream->Add(" offset "); - offset()->PrintTo(stream); +OStream& HInnerAllocatedObject::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(base_object()) << " offset "; + return offset()->PrintTo(os); } -void HStoreGlobalCell::PrintDataTo(StringStream* stream) { - stream->Add("[%p] = ", *cell().handle()); - value()->PrintNameTo(stream); - if (!details_.IsDontDelete()) stream->Add(" (deleteable)"); - if (details_.IsReadOnly()) stream->Add(" (read-only)"); +OStream& HStoreGlobalCell::PrintDataTo(OStream& os) const { // NOLINT + os << "[" << *cell().handle() << "] = " << NameOf(value()); + if (!details_.IsDontDelete()) os << " (deleteable)"; + if (details_.IsReadOnly()) os << " (read-only)"; + return os; } -void HLoadContextSlot::PrintDataTo(StringStream* stream) { - value()->PrintNameTo(stream); - stream->Add("[%d]", slot_index()); +OStream& HLoadContextSlot::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(value()) << "[" << slot_index() << "]"; } -void HStoreContextSlot::PrintDataTo(StringStream* stream) { - context()->PrintNameTo(stream); - stream->Add("[%d] = ", slot_index()); - value()->PrintNameTo(stream); +OStream& HStoreContextSlot::PrintDataTo(OStream& os) const { // NOLINT + return os << NameOf(context()) << "[" << slot_index() + << "] = " << NameOf(value()); } @@ -3672,6 +3655,12 @@ Representation HUnaryMathOperation::RepresentationFromInputs() { + if (SupportsFlexibleFloorAndRound() && + (op_ == kMathFloor || op_ == kMathRound)) { + // Floor and Round always take a double input. The integral result can be + // used as an integer or a double. Infer the representation from the uses. + return Representation::None(); + } Representation rep = representation(); // If any of the actual input representation is more general than what we // have so far but not Tagged, use that representation instead. @@ -3685,7 +3674,7 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect, HValue* dominator) { - ASSERT(side_effect == kNewSpacePromotion); + DCHECK(side_effect == kNewSpacePromotion); Zone* zone = block()->zone(); if (!FLAG_use_allocation_folding) return false; @@ -3712,10 +3701,10 @@ HValue* current_size = size(); // TODO(hpayer): Add support for non-constant allocation in dominator. - if (!current_size->IsInteger32Constant() || - !dominator_size->IsInteger32Constant()) { + if (!dominator_size->IsInteger32Constant()) { if (FLAG_trace_allocation_folding) { - PrintF("#%d (%s) cannot fold into #%d (%s), dynamic allocation size\n", + PrintF("#%d (%s) cannot fold into #%d (%s), " + "dynamic allocation size in dominator\n", id(), Mnemonic(), dominator->id(), dominator->Mnemonic()); } return false; @@ -3726,7 +3715,33 @@ return false; } - ASSERT((IsNewSpaceAllocation() && + if (!has_size_upper_bound()) { + if (FLAG_trace_allocation_folding) { + PrintF("#%d (%s) cannot fold into #%d (%s), " + "can't estimate total allocation size\n", + id(), Mnemonic(), dominator->id(), dominator->Mnemonic()); + } + return false; + } + + if (!current_size->IsInteger32Constant()) { + // If it's not constant then it is a size_in_bytes calculation graph + // like this: (const_header_size + const_element_size * size). + DCHECK(current_size->IsInstruction()); + + HInstruction* current_instr = HInstruction::cast(current_size); + if (!current_instr->Dominates(dominator_allocate)) { + if (FLAG_trace_allocation_folding) { + PrintF("#%d (%s) cannot fold into #%d (%s), dynamic size " + "value does not dominate target allocation\n", + id(), Mnemonic(), dominator_allocate->id(), + dominator_allocate->Mnemonic()); + } + return false; + } + } + + DCHECK((IsNewSpaceAllocation() && dominator_allocate->IsNewSpaceAllocation()) || (IsOldDataSpaceAllocation() && dominator_allocate->IsOldDataSpaceAllocation()) || @@ -3738,20 +3753,16 @@ int32_t original_object_size = HConstant::cast(dominator_size)->GetInteger32Constant(); int32_t dominator_size_constant = original_object_size; - int32_t current_size_constant = - HConstant::cast(current_size)->GetInteger32Constant(); - int32_t new_dominator_size = dominator_size_constant + current_size_constant; if (MustAllocateDoubleAligned()) { - if (!dominator_allocate->MustAllocateDoubleAligned()) { - dominator_allocate->MakeDoubleAligned(); - } if ((dominator_size_constant & kDoubleAlignmentMask) != 0) { dominator_size_constant += kDoubleSize / 2; - new_dominator_size += kDoubleSize / 2; } } + int32_t current_size_max_value = size_upper_bound()->GetInteger32Constant(); + int32_t new_dominator_size = dominator_size_constant + current_size_max_value; + // Since we clear the first word after folded memory, we cannot use the // whole Page::kMaxRegularHeapObjectSize memory. if (new_dominator_size > Page::kMaxRegularHeapObjectSize - kPointerSize) { @@ -3763,27 +3774,54 @@ return false; } - HInstruction* new_dominator_size_constant = HConstant::CreateAndInsertBefore( - zone, - context(), - new_dominator_size, - Representation::None(), - dominator_allocate); - dominator_allocate->UpdateSize(new_dominator_size_constant); + HInstruction* new_dominator_size_value; + + if (current_size->IsInteger32Constant()) { + new_dominator_size_value = + HConstant::CreateAndInsertBefore(zone, + context(), + new_dominator_size, + Representation::None(), + dominator_allocate); + } else { + HValue* new_dominator_size_constant = + HConstant::CreateAndInsertBefore(zone, + context(), + dominator_size_constant, + Representation::Integer32(), + dominator_allocate); + + // Add old and new size together and insert. + current_size->ChangeRepresentation(Representation::Integer32()); + + new_dominator_size_value = HAdd::New(zone, context(), + new_dominator_size_constant, current_size); + new_dominator_size_value->ClearFlag(HValue::kCanOverflow); + new_dominator_size_value->ChangeRepresentation(Representation::Integer32()); + + new_dominator_size_value->InsertBefore(dominator_allocate); + } + + dominator_allocate->UpdateSize(new_dominator_size_value); + if (MustAllocateDoubleAligned()) { + if (!dominator_allocate->MustAllocateDoubleAligned()) { + dominator_allocate->MakeDoubleAligned(); + } + } + + bool keep_new_space_iterable = FLAG_log_gc || FLAG_heap_stats; #ifdef VERIFY_HEAP - if (FLAG_verify_heap && dominator_allocate->IsNewSpaceAllocation()) { + keep_new_space_iterable = keep_new_space_iterable || FLAG_verify_heap; +#endif + + if (keep_new_space_iterable && dominator_allocate->IsNewSpaceAllocation()) { dominator_allocate->MakePrefillWithFiller(); } else { // TODO(hpayer): This is a short-term hack to make allocation mementos // work again in new space. dominator_allocate->ClearNextMapWord(original_object_size); } -#else - // TODO(hpayer): This is a short-term hack to make allocation mementos - // work again in new space. - dominator_allocate->ClearNextMapWord(original_object_size); -#endif dominator_allocate->UpdateClearNextMapWord(MustClearNextMapWord()); @@ -3851,7 +3889,7 @@ return NULL; } - ASSERT((IsOldDataSpaceAllocation() && + DCHECK((IsOldDataSpaceAllocation() && dominator_dominator->IsOldDataSpaceAllocation()) || (IsOldPointerSpaceAllocation() && dominator_dominator->IsOldPointerSpaceAllocation())); @@ -3877,7 +3915,7 @@ void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) { - ASSERT(filler_free_space_size_ != NULL); + DCHECK(filler_free_space_size_ != NULL); Zone* zone = block()->zone(); // We must explicitly force Smi representation here because on x64 we // would otherwise automatically choose int32, but the actual store @@ -3894,18 +3932,15 @@ void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) { - ASSERT(filler_free_space_size_ == NULL); + DCHECK(filler_free_space_size_ == NULL); Zone* zone = block()->zone(); HInstruction* free_space_instr = HInnerAllocatedObject::New(zone, context(), dominating_allocate_, dominating_allocate_->size(), type()); free_space_instr->InsertBefore(this); - HConstant* filler_map = HConstant::New( - zone, - context(), - isolate()->factory()->free_space_map()); - filler_map->FinalizeUniqueness(); // TODO(titzer): should be init'd a'ready - filler_map->InsertAfter(free_space_instr); + HConstant* filler_map = HConstant::CreateAndInsertAfter( + zone, Unique<Map>::CreateImmovable( + isolate()->factory()->free_space_map()), true, free_space_instr); HInstruction* store_map = HStoreNamedField::New(zone, context(), free_space_instr, HObjectAccess::ForMap(), filler_map); store_map->SetFlag(HValue::kHasNoObservableSideEffects); @@ -3943,15 +3978,27 @@ } -void HAllocate::PrintDataTo(StringStream* stream) { - size()->PrintNameTo(stream); - stream->Add(" ("); - if (IsNewSpaceAllocation()) stream->Add("N"); - if (IsOldPointerSpaceAllocation()) stream->Add("P"); - if (IsOldDataSpaceAllocation()) stream->Add("D"); - if (MustAllocateDoubleAligned()) stream->Add("A"); - if (MustPrefillWithFiller()) stream->Add("F"); - stream->Add(")"); +OStream& HAllocate::PrintDataTo(OStream& os) const { // NOLINT + os << NameOf(size()) << " ("; + if (IsNewSpaceAllocation()) os << "N"; + if (IsOldPointerSpaceAllocation()) os << "P"; + if (IsOldDataSpaceAllocation()) os << "D"; + if (MustAllocateDoubleAligned()) os << "A"; + if (MustPrefillWithFiller()) os << "F"; + return os << ")"; +} + + +bool HStoreKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) { + // The base offset is usually simply the size of the array header, except + // with dehoisting adds an addition offset due to a array index key + // manipulation, in which case it becomes (array header size + + // constant-offset-from-key * kPointerSize) + v8::base::internal::CheckedNumeric<uint32_t> addition_result = base_offset_; + addition_result += increase_by_value; + if (!addition_result.IsValid()) return false; + base_offset_ = addition_result.ValueOrDie(); + return true; } @@ -4029,10 +4076,9 @@ Handle<String> right_string = c_right->StringValue(); // Prevent possible exception by invalid string length. if (left_string->length() + right_string->length() < String::kMaxLength) { - Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString( + MaybeHandle<String> concat = zone->isolate()->factory()->NewConsString( c_left->StringValue(), c_right->StringValue()); - ASSERT(!concat.is_null()); - return HConstant::New(zone, context, concat); + return HConstant::New(zone, context, concat.ToHandleChecked()); } } } @@ -4041,19 +4087,21 @@ } -void HStringAdd::PrintDataTo(StringStream* stream) { +OStream& HStringAdd::PrintDataTo(OStream& os) const { // NOLINT if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) { - stream->Add("_CheckBoth"); + os << "_CheckBoth"; } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_LEFT) { - stream->Add("_CheckLeft"); + os << "_CheckLeft"; } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_RIGHT) { - stream->Add("_CheckRight"); + os << "_CheckRight"; } - HBinaryOperation::PrintDataTo(stream); - stream->Add(" ("); - if (pretenure_flag() == NOT_TENURED) stream->Add("N"); - else if (pretenure_flag() == TENURED) stream->Add("D"); - stream->Add(")"); + HBinaryOperation::PrintDataTo(os); + os << " ("; + if (pretenure_flag() == NOT_TENURED) + os << "N"; + else if (pretenure_flag() == TENURED) + os << "D"; + return os << ")"; } @@ -4066,7 +4114,7 @@ if (std::isfinite(c_code->DoubleValue())) { uint32_t code = c_code->NumberValueAsInteger32() & 0xffff; return HConstant::New(zone, context, - LookupSingleCharacterStringFromCode(isolate, code)); + isolate->factory()->LookupSingleCharacterStringFromCode(code)); } return HConstant::New(zone, context, isolate->factory()->empty_string()); } @@ -4084,7 +4132,7 @@ if (!constant->HasNumberValue()) break; double d = constant->DoubleValue(); if (std::isnan(d)) { // NaN poisons everything. - return H_CONSTANT_DOUBLE(OS::nan_value()); + return H_CONSTANT_DOUBLE(base::OS::nan_value()); } if (std::isinf(d)) { // +Infinity and -Infinity. switch (op) { @@ -4092,11 +4140,12 @@ return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0); case kMathLog: case kMathSqrt: - return H_CONSTANT_DOUBLE((d > 0.0) ? d : OS::nan_value()); + return H_CONSTANT_DOUBLE((d > 0.0) ? d : base::OS::nan_value()); case kMathPowHalf: case kMathAbs: return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d); case kMathRound: + case kMathFround: case kMathFloor: return H_CONSTANT_DOUBLE(d); case kMathClz32: @@ -4123,9 +4172,11 @@ // Doubles are represented as Significant * 2 ^ Exponent. If the // Exponent is not negative, the double value is already an integer. if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d); - return H_CONSTANT_DOUBLE(std::floor(d + 0.5)); + return H_CONSTANT_DOUBLE(Floor(d + 0.5)); + case kMathFround: + return H_CONSTANT_DOUBLE(static_cast<double>(static_cast<float>(d))); case kMathFloor: - return H_CONSTANT_DOUBLE(std::floor(d)); + return H_CONSTANT_DOUBLE(Floor(d)); case kMathClz32: { uint32_t i = DoubleToUint32(d); return H_CONSTANT_INT( @@ -4140,6 +4191,43 @@ } +Representation HUnaryMathOperation::RepresentationFromUses() { + if (op_ != kMathFloor && op_ != kMathRound) { + return HValue::RepresentationFromUses(); + } + + // The instruction can have an int32 or double output. Prefer a double + // representation if there are double uses. + bool use_double = false; + + for (HUseIterator it(uses()); !it.Done(); it.Advance()) { + HValue* use = it.value(); + int use_index = it.index(); + Representation rep_observed = use->observed_input_representation(use_index); + Representation rep_required = use->RequiredInputRepresentation(use_index); + use_double |= (rep_observed.IsDouble() || rep_required.IsDouble()); + if (use_double && !FLAG_trace_representation) { + // Having seen one double is enough. + break; + } + if (FLAG_trace_representation) { + if (!rep_required.IsDouble() || rep_observed.IsDouble()) { + PrintF("#%d %s is used by #%d %s as %s%s\n", + id(), Mnemonic(), use->id(), + use->Mnemonic(), rep_observed.Mnemonic(), + (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : "")); + } else { + PrintF("#%d %s is required by #%d %s as %s%s\n", + id(), Mnemonic(), use->id(), + use->Mnemonic(), rep_required.Mnemonic(), + (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : "")); + } + } + } + return use_double ? Representation::Double() : Representation::Integer32(); +} + + HInstruction* HPower::New(Zone* zone, HValue* context, HValue* left, @@ -4150,7 +4238,8 @@ if (c_left->HasNumberValue() && c_right->HasNumberValue()) { double result = power_helper(c_left->DoubleValue(), c_right->DoubleValue()); - return H_CONSTANT_DOUBLE(std::isnan(result) ? OS::nan_value() : result); + return H_CONSTANT_DOUBLE(std::isnan(result) ? base::OS::nan_value() + : result); } } return new(zone) HPower(left, right); @@ -4183,7 +4272,7 @@ } } // All comparisons failed, must be NaN. - return H_CONSTANT_DOUBLE(OS::nan_value()); + return H_CONSTANT_DOUBLE(base::OS::nan_value()); } } return new(zone) HMathMinMax(context, left, right, op); @@ -4321,8 +4410,8 @@ if (c_string->HasStringValue() && c_index->HasInteger32Value()) { Handle<String> s = c_string->StringValue(); int32_t i = c_index->Integer32Value(); - ASSERT_LE(0, i); - ASSERT_LT(i, s->length()); + DCHECK_LE(0, i); + DCHECK_LT(i, s->length()); return H_CONSTANT_INT(s->Get(i)); } } @@ -4334,10 +4423,9 @@ #undef H_CONSTANT_DOUBLE -void HBitwise::PrintDataTo(StringStream* stream) { - stream->Add(Token::Name(op_)); - stream->Add(" "); - HBitwiseBinaryOperation::PrintDataTo(stream); +OStream& HBitwise::PrintDataTo(OStream& os) const { // NOLINT + os << Token::Name(op_) << " "; + return HBitwiseBinaryOperation::PrintDataTo(os); } @@ -4378,7 +4466,7 @@ void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) { - ASSERT(CheckFlag(kFlexibleRepresentation)); + DCHECK(CheckFlag(kFlexibleRepresentation)); Representation new_rep = RepresentationFromInputs(); UpdateRepresentation(new_rep, h_infer, "inputs"); new_rep = RepresentationFromUses(); @@ -4442,12 +4530,12 @@ #ifdef DEBUG void HPhi::Verify() { - ASSERT(OperandCount() == block()->predecessors()->length()); + DCHECK(OperandCount() == block()->predecessors()->length()); for (int i = 0; i < OperandCount(); ++i) { HValue* value = OperandAt(i); HBasicBlock* defining_block = value->block(); HBasicBlock* predecessor_block = block()->predecessors()->at(i); - ASSERT(defining_block == predecessor_block || + DCHECK(defining_block == predecessor_block || defining_block->Dominates(predecessor_block)); } } @@ -4455,27 +4543,27 @@ void HSimulate::Verify() { HInstruction::Verify(); - ASSERT(HasAstId()); + DCHECK(HasAstId() || next()->IsEnterInlined()); } void HCheckHeapObject::Verify() { HInstruction::Verify(); - ASSERT(HasNoUses()); + DCHECK(HasNoUses()); } void HCheckValue::Verify() { HInstruction::Verify(); - ASSERT(HasNoUses()); + DCHECK(HasNoUses()); } #endif HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) { - ASSERT(offset >= 0); - ASSERT(offset < FixedArray::kHeaderSize); + DCHECK(offset >= 0); + DCHECK(offset < FixedArray::kHeaderSize); if (offset == FixedArray::kLengthOffset) return ForFixedArrayLength(); return HObjectAccess(kInobject, offset); } @@ -4483,7 +4571,7 @@ HObjectAccess HObjectAccess::ForMapAndOffset(Handle<Map> map, int offset, Representation representation) { - ASSERT(offset >= 0); + DCHECK(offset >= 0); Portion portion = kInobject; if (offset == JSObject::kElementsOffset) { @@ -4523,16 +4611,16 @@ HObjectAccess HObjectAccess::ForContextSlot(int index) { - ASSERT(index >= 0); + DCHECK(index >= 0); Portion portion = kInobject; int offset = Context::kHeaderSize + index * kPointerSize; - ASSERT_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag); + DCHECK_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag); return HObjectAccess(portion, offset, Representation::Tagged()); } HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) { - ASSERT(offset >= 0); + DCHECK(offset >= 0); Portion portion = kInobject; if (offset == JSObject::kElementsOffset) { @@ -4548,7 +4636,7 @@ HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset, Representation representation) { - ASSERT(offset >= 0); + DCHECK(offset >= 0); return HObjectAccess(kBackingStore, offset, representation, Handle<String>::null(), false, false); } @@ -4557,7 +4645,7 @@ HObjectAccess HObjectAccess::ForField(Handle<Map> map, LookupResult* lookup, Handle<String> name) { - ASSERT(lookup->IsField() || lookup->IsTransitionToField()); + DCHECK(lookup->IsField() || lookup->IsTransitionToField()); int index; Representation representation; if (lookup->IsField()) { @@ -4666,39 +4754,39 @@ } -void HObjectAccess::PrintTo(StringStream* stream) const { - stream->Add("."); +OStream& operator<<(OStream& os, const HObjectAccess& access) { + os << "."; - switch (portion()) { - case kArrayLengths: - case kStringLengths: - stream->Add("%length"); + switch (access.portion()) { + case HObjectAccess::kArrayLengths: + case HObjectAccess::kStringLengths: + os << "%length"; break; - case kElementsPointer: - stream->Add("%elements"); + case HObjectAccess::kElementsPointer: + os << "%elements"; break; - case kMaps: - stream->Add("%map"); + case HObjectAccess::kMaps: + os << "%map"; break; - case kDouble: // fall through - case kInobject: - if (!name_.is_null()) { - stream->Add(String::cast(*name_)->ToCString().get()); + case HObjectAccess::kDouble: // fall through + case HObjectAccess::kInobject: + if (!access.name().is_null()) { + os << Handle<String>::cast(access.name())->ToCString().get(); } - stream->Add("[in-object]"); + os << "[in-object]"; break; - case kBackingStore: - if (!name_.is_null()) { - stream->Add(String::cast(*name_)->ToCString().get()); + case HObjectAccess::kBackingStore: + if (!access.name().is_null()) { + os << Handle<String>::cast(access.name())->ToCString().get(); } - stream->Add("[backing-store]"); + os << "[backing-store]"; break; - case kExternalMemory: - stream->Add("[external-memory]"); + case HObjectAccess::kExternalMemory: + os << "[external-memory]"; break; } - stream->Add("@%d", offset()); + return os << "@" << access.offset(); } } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-instructions.h nodejs-0.11.15/deps/v8/src/hydrogen-instructions.h --- nodejs-0.11.13/deps/v8/src/hydrogen-instructions.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-instructions.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,50 +1,29 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_INSTRUCTIONS_H_ #define V8_HYDROGEN_INSTRUCTIONS_H_ -#include "v8.h" +#include "src/v8.h" -#include "allocation.h" -#include "code-stubs.h" -#include "data-flow.h" -#include "deoptimizer.h" -#include "small-pointer-list.h" -#include "string-stream.h" -#include "unique.h" -#include "v8conversions.h" -#include "v8utils.h" -#include "zone.h" +#include "src/allocation.h" +#include "src/code-stubs.h" +#include "src/conversions.h" +#include "src/data-flow.h" +#include "src/deoptimizer.h" +#include "src/feedback-slots.h" +#include "src/hydrogen-types.h" +#include "src/small-pointer-list.h" +#include "src/unique.h" +#include "src/utils.h" +#include "src/zone.h" namespace v8 { namespace internal { // Forward declarations. +struct ChangesOf; class HBasicBlock; class HDiv; class HEnvironment; @@ -55,6 +34,7 @@ class HValue; class LInstruction; class LChunkBuilder; +class OStream; #define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \ V(ArithmeticBinaryOperation) \ @@ -68,6 +48,7 @@ V(AbnormalExit) \ V(AccessArgumentsAt) \ V(Add) \ + V(AllocateBlockContext) \ V(Allocate) \ V(ApplyArguments) \ V(ArgumentsElements) \ @@ -149,7 +130,7 @@ V(OsrEntry) \ V(Parameter) \ V(Power) \ - V(PushArgument) \ + V(PushArguments) \ V(RegExpLiteral) \ V(Return) \ V(Ror) \ @@ -162,6 +143,7 @@ V(StackCheck) \ V(StoreCodeEntry) \ V(StoreContextSlot) \ + V(StoreFrameContext) \ V(StoreGlobalCell) \ V(StoreKeyed) \ V(StoreKeyedGeneric) \ @@ -184,7 +166,6 @@ V(WrapReceiver) #define GVN_TRACKED_FLAG_LIST(V) \ - V(Maps) \ V(NewSpacePromotion) #define GVN_UNTRACKED_FLAG_LIST(V) \ @@ -200,6 +181,7 @@ V(ElementsPointer) \ V(GlobalVars) \ V(InobjectFields) \ + V(Maps) \ V(OsrEntries) \ V(ExternalMemory) \ V(StringChars) \ @@ -209,7 +191,7 @@ #define DECLARE_ABSTRACT_INSTRUCTION(type) \ virtual bool Is##type() const V8_FINAL V8_OVERRIDE { return true; } \ static H##type* cast(HValue* value) { \ - ASSERT(value->Is##type()); \ + DCHECK(value->Is##type()); \ return reinterpret_cast<H##type*>(value); \ } @@ -218,7 +200,7 @@ virtual LInstruction* CompileToLithium( \ LChunkBuilder* builder) V8_FINAL V8_OVERRIDE; \ static H##type* cast(HValue* value) { \ - ASSERT(value->Is##type()); \ + DCHECK(value->Is##type()); \ return reinterpret_cast<H##type*>(value); \ } \ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \ @@ -304,124 +286,6 @@ }; -class HType V8_FINAL { - public: - static HType None() { return HType(kNone); } - static HType Tagged() { return HType(kTagged); } - static HType TaggedPrimitive() { return HType(kTaggedPrimitive); } - static HType TaggedNumber() { return HType(kTaggedNumber); } - static HType Smi() { return HType(kSmi); } - static HType HeapNumber() { return HType(kHeapNumber); } - static HType String() { return HType(kString); } - static HType Boolean() { return HType(kBoolean); } - static HType NonPrimitive() { return HType(kNonPrimitive); } - static HType JSArray() { return HType(kJSArray); } - static HType JSObject() { return HType(kJSObject); } - - // Return the weakest (least precise) common type. - HType Combine(HType other) { - return HType(static_cast<Type>(type_ & other.type_)); - } - - bool Equals(const HType& other) const { - return type_ == other.type_; - } - - bool IsSubtypeOf(const HType& other) { - return Combine(other).Equals(other); - } - - bool IsTaggedPrimitive() const { - return ((type_ & kTaggedPrimitive) == kTaggedPrimitive); - } - - bool IsTaggedNumber() const { - return ((type_ & kTaggedNumber) == kTaggedNumber); - } - - bool IsSmi() const { - return ((type_ & kSmi) == kSmi); - } - - bool IsHeapNumber() const { - return ((type_ & kHeapNumber) == kHeapNumber); - } - - bool IsString() const { - return ((type_ & kString) == kString); - } - - bool IsNonString() const { - return IsTaggedPrimitive() || IsSmi() || IsHeapNumber() || - IsBoolean() || IsJSArray(); - } - - bool IsBoolean() const { - return ((type_ & kBoolean) == kBoolean); - } - - bool IsNonPrimitive() const { - return ((type_ & kNonPrimitive) == kNonPrimitive); - } - - bool IsJSArray() const { - return ((type_ & kJSArray) == kJSArray); - } - - bool IsJSObject() const { - return ((type_ & kJSObject) == kJSObject); - } - - bool IsHeapObject() const { - return IsHeapNumber() || IsString() || IsBoolean() || IsNonPrimitive(); - } - - bool ToStringOrToNumberCanBeObserved(Representation representation) { - switch (type_) { - case kTaggedPrimitive: // fallthru - case kTaggedNumber: // fallthru - case kSmi: // fallthru - case kHeapNumber: // fallthru - case kString: // fallthru - case kBoolean: - return false; - case kJSArray: // fallthru - case kJSObject: - return true; - case kTagged: - break; - } - return !representation.IsSmiOrInteger32() && !representation.IsDouble(); - } - - static HType TypeFromValue(Handle<Object> value); - - const char* ToString(); - - private: - enum Type { - kNone = 0x0, // 0000 0000 0000 0000 - kTagged = 0x1, // 0000 0000 0000 0001 - kTaggedPrimitive = 0x5, // 0000 0000 0000 0101 - kTaggedNumber = 0xd, // 0000 0000 0000 1101 - kSmi = 0x1d, // 0000 0000 0001 1101 - kHeapNumber = 0x2d, // 0000 0000 0010 1101 - kString = 0x45, // 0000 0000 0100 0101 - kBoolean = 0x85, // 0000 0000 1000 0101 - kNonPrimitive = 0x101, // 0000 0001 0000 0001 - kJSObject = 0x301, // 0000 0011 0000 0001 - kJSArray = 0x701 // 0000 0111 0000 0001 - }; - - // Make sure type fits in int16. - STATIC_ASSERT(kJSArray < (1 << (2 * kBitsPerByte))); - - explicit HType(Type t) : type_(t) { } - - int16_t type_; -}; - - class HUseListNode: public ZoneObject { public: HUseListNode(HValue* value, int index, HUseListNode* tail) @@ -457,12 +321,12 @@ void Advance(); HValue* value() { - ASSERT(!Done()); + DCHECK(!Done()); return value_; } int index() { - ASSERT(!Done()); + DCHECK(!Done()); return index_; } @@ -494,8 +358,8 @@ static inline GVNFlag GVNFlagFromInt(int i) { - ASSERT(i >= 0); - ASSERT(i < kNumberOfFlags); + DCHECK(i >= 0); + DCHECK(i < kNumberOfFlags); return static_cast<GVNFlag>(i); } @@ -583,13 +447,11 @@ int raw() const { return value_; } - void PrintTo(FILE* f); - private: typedef BitField<int, 0, 9> InliningIdField; // Offset from the start of the inlined function. - typedef BitField<int, 9, 22> PositionField; + typedef BitField<int, 9, 23> PositionField; // On HPositionInfo can use this constructor. explicit HSourcePosition(int value) : value_(value) { } @@ -603,6 +465,9 @@ }; +OStream& operator<<(OStream& os, const HSourcePosition& p); + + class HValue : public ZoneObject { public: static const int kNoNumber = -1; @@ -641,6 +506,10 @@ // flag. kUint32, kHasNoObservableSideEffects, + // Indicates an instruction shouldn't be replaced by optimization, this flag + // is useful to set in cases where recomputing a value is cheaper than + // extending the value's live range and spilling it. + kCantBeReplaced, // Indicates the instruction is live during dead code elimination. kIsLive, @@ -678,12 +547,19 @@ HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE) #undef DECLARE_PREDICATE - HValue(HType type = HType::Tagged()) + bool IsBitwiseBinaryShift() { + return IsShl() || IsShr() || IsSar(); + } + + explicit HValue(HType type = HType::Tagged()) : block_(NULL), id_(kNoNumber), type_(type), use_list_(NULL), range_(NULL), +#ifdef DEBUG + range_poisoned_(false), +#endif flags_(0) {} virtual ~HValue() {} @@ -709,8 +585,8 @@ Representation representation() const { return representation_; } void ChangeRepresentation(Representation r) { - ASSERT(CheckFlag(kFlexibleRepresentation)); - ASSERT(!CheckFlag(kCannotBeTagged) || !r.IsTagged()); + DCHECK(CheckFlag(kFlexibleRepresentation)); + DCHECK(!CheckFlag(kCannotBeTagged) || !r.IsTagged()); RepresentationChanged(r); representation_ = r; if (r.IsTagged()) { @@ -734,14 +610,10 @@ HType type() const { return type_; } void set_type(HType new_type) { - ASSERT(new_type.IsSubtypeOf(type_)); + DCHECK(new_type.IsSubtypeOf(type_)); type_ = new_type; } - bool IsHeapObject() { - return representation_.IsHeapObject() || type_.IsHeapObject(); - } - // There are HInstructions that do not really change a value, they // only add pieces of information to it (like bounds checks, map checks, // smi checks...). @@ -787,13 +659,16 @@ bool IsDefinedAfter(HBasicBlock* other) const; // Operands. - virtual int OperandCount() = 0; + virtual int OperandCount() const = 0; virtual HValue* OperandAt(int index) const = 0; void SetOperandAt(int index, HValue* value); void DeleteAndReplaceWith(HValue* other); void ReplaceAllUsesWith(HValue* other); bool HasNoUses() const { return use_list_ == NULL; } + bool HasOneUse() const { + return use_list_ != NULL && use_list_->tail() == NULL; + } bool HasMultipleUses() const { return use_list_ != NULL && use_list_->tail() != NULL; } @@ -854,9 +729,17 @@ return result; } - Range* range() const { return range_; } - // TODO(svenpanne) We should really use the null object pattern here. - bool HasRange() const { return range_ != NULL; } + Range* range() const { + DCHECK(!range_poisoned_); + return range_; + } + bool HasRange() const { + DCHECK(!range_poisoned_); + return range_ != NULL; + } +#ifdef DEBUG + void PoisonRange() { range_poisoned_ = true; } +#endif void AddNewRange(Range* r, Zone* zone); void RemoveLastAddedRange(); void ComputeInitialRange(Zone* zone); @@ -885,11 +768,7 @@ virtual void FinalizeUniqueness() { } // Printing support. - virtual void PrintTo(StringStream* stream) = 0; - void PrintNameTo(StringStream* stream); - void PrintTypeTo(StringStream* stream); - void PrintRangeTo(StringStream* stream); - void PrintChangesTo(StringStream* stream); + virtual OStream& PrintTo(OStream& os) const = 0; // NOLINT const char* Mnemonic() const; @@ -937,13 +816,13 @@ // Returns true conservatively if the program might be able to observe a // ToString() operation on this value. bool ToStringCanBeObserved() const { - return type().ToStringOrToNumberCanBeObserved(representation()); + return ToStringOrToNumberCanBeObserved(); } // Returns true conservatively if the program might be able to observe a // ToNumber() operation on this value. bool ToNumberCanBeObserved() const { - return type().ToStringOrToNumberCanBeObserved(representation()); + return ToStringOrToNumberCanBeObserved(); } MinusZeroMode GetMinusZeroMode() { @@ -959,10 +838,16 @@ return false; } + bool ToStringOrToNumberCanBeObserved() const { + if (type().IsTaggedPrimitive()) return false; + if (type().IsJSObject()) return true; + return !representation().IsSmiOrInteger32() && !representation().IsDouble(); + } + virtual Representation RepresentationFromInputs() { return representation(); } - Representation RepresentationFromUses(); + virtual Representation RepresentationFromUses(); Representation RepresentationFromUseRequirements(); bool HasNonSmiUse(); virtual void UpdateRepresentation(Representation new_rep, @@ -976,12 +861,12 @@ virtual void DeleteFromGraph() = 0; virtual void InternalSetOperandAt(int index, HValue* value) = 0; void clear_block() { - ASSERT(block_ != NULL); + DCHECK(block_ != NULL); block_ = NULL; } void set_representation(Representation r) { - ASSERT(representation_.IsNone() && !r.IsNone()); + DCHECK(representation_.IsNone() && !r.IsNone()); representation_ = r; } @@ -1000,6 +885,7 @@ result.Remove(kOsrEntries); return result; } + friend OStream& operator<<(OStream& os, const ChangesOf& v); // A flag mask of all side effects that can make observable changes in // an executing program (i.e. are not safe to repeat, move or remove); @@ -1028,6 +914,9 @@ HType type_; HUseListNode* use_list_; Range* range_; +#ifdef DEBUG + bool range_poisoned_; +#endif int flags_; GVNFlagSet changes_flags_; GVNFlagSet depends_on_flags_; @@ -1038,6 +927,30 @@ DISALLOW_COPY_AND_ASSIGN(HValue); }; +// Support for printing various aspects of an HValue. +struct NameOf { + explicit NameOf(const HValue* const v) : value(v) {} + const HValue* value; +}; + + +struct TypeOf { + explicit TypeOf(const HValue* const v) : value(v) {} + const HValue* value; +}; + + +struct ChangesOf { + explicit ChangesOf(const HValue* const v) : value(v) {} + const HValue* value; +}; + + +OStream& operator<<(OStream& os, const HValue& v); +OStream& operator<<(OStream& os, const NameOf& v); +OStream& operator<<(OStream& os, const TypeOf& v); +OStream& operator<<(OStream& os, const ChangesOf& v); + #define DECLARE_INSTRUCTION_FACTORY_P0(I) \ static I* New(Zone* zone, HValue* context) { \ @@ -1080,6 +993,18 @@ return new(zone) I(p1, p2, p3, p4, p5); \ } +#define DECLARE_INSTRUCTION_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \ + static I* New(Zone* zone, \ + HValue* context, \ + P1 p1, \ + P2 p2, \ + P3 p3, \ + P4 p4, \ + P5 p5, \ + P6 p6) { \ + return new(zone) I(p1, p2, p3, p4, p5, p6); \ + } + #define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \ static I* New(Zone* zone, HValue* context) { \ return new(zone) I(context); \ @@ -1164,7 +1089,7 @@ data_ = reinterpret_cast<intptr_t>(positions); set_position(pos); - ASSERT(has_operand_positions()); + DCHECK(has_operand_positions()); } HSourcePosition operand_position(int idx) const { @@ -1183,7 +1108,7 @@ static const intptr_t kFirstOperandPosIndex = 1; HSourcePosition* operand_position_slot(int idx) const { - ASSERT(has_operand_positions()); + DCHECK(has_operand_positions()); return &(operand_positions()[kFirstOperandPosIndex + idx]); } @@ -1192,7 +1117,7 @@ } HSourcePosition* operand_positions() const { - ASSERT(has_operand_positions()); + DCHECK(has_operand_positions()); return reinterpret_cast<HSourcePosition*>(data_); } @@ -1202,12 +1127,12 @@ return (val & kPositionTag) != 0; } static intptr_t UntagPosition(intptr_t val) { - ASSERT(IsTaggedPosition(val)); + DCHECK(IsTaggedPosition(val)); return val >> kPositionShift; } static intptr_t TagPosition(intptr_t val) { const intptr_t result = (val << kPositionShift) | kPositionTag; - ASSERT(UntagPosition(result) == val); + DCHECK(UntagPosition(result) == val); return result; } @@ -1220,8 +1145,8 @@ HInstruction* next() const { return next_; } HInstruction* previous() const { return previous_; } - virtual void PrintTo(StringStream* stream) V8_OVERRIDE; - virtual void PrintDataTo(StringStream* stream); + virtual OStream& PrintTo(OStream& os) const V8_OVERRIDE; // NOLINT + virtual OStream& PrintDataTo(OStream& os) const; // NOLINT bool IsLinked() const { return block() != NULL; } void Unlink(); @@ -1248,8 +1173,8 @@ return !position().IsUnknown(); } void set_position(HSourcePosition position) { - ASSERT(!has_position()); - ASSERT(!position.IsUnknown()); + DCHECK(!has_position()); + DCHECK(!position.IsUnknown()); position_.set_position(position); } @@ -1258,11 +1183,13 @@ return pos.IsUnknown() ? position() : pos; } void set_operand_position(Zone* zone, int index, HSourcePosition pos) { - ASSERT(0 <= index && index < OperandCount()); + DCHECK(0 <= index && index < OperandCount()); position_.ensure_storage_for_operand_positions(zone, OperandCount()); position_.set_operand_position(index, pos); } + bool Dominates(HInstruction* other); + bool CanTruncateToSmi() const { return CheckFlag(kTruncatingToSmi); } bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); } virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0; @@ -1278,7 +1205,7 @@ DECLARE_ABSTRACT_INSTRUCTION(Instruction) protected: - HInstruction(HType type = HType::Tagged()) + explicit HInstruction(HType type = HType::Tagged()) : HValue(type), next_(NULL), previous_(NULL), @@ -1290,12 +1217,10 @@ private: void InitializeAsFirst(HBasicBlock* block) { - ASSERT(!IsLinked()); + DCHECK(!IsLinked()); SetBlock(block); } - void PrintMnemonicTo(StringStream* stream); - HInstruction* next_; HInstruction* previous_; HPositionInfo position_; @@ -1307,13 +1232,14 @@ template<int V> class HTemplateInstruction : public HInstruction { public: - virtual int OperandCount() V8_FINAL V8_OVERRIDE { return V; } + virtual int OperandCount() const V8_FINAL V8_OVERRIDE { return V; } virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE { return inputs_[i]; } protected: - HTemplateInstruction(HType type = HType::Tagged()) : HInstruction(type) {} + explicit HTemplateInstruction(HType type = HType::Tagged()) + : HInstruction(type) {} virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE { inputs_[i] = value; @@ -1326,11 +1252,11 @@ class HControlInstruction : public HInstruction { public: - virtual HBasicBlock* SuccessorAt(int i) = 0; - virtual int SuccessorCount() = 0; + virtual HBasicBlock* SuccessorAt(int i) const = 0; + virtual int SuccessorCount() const = 0; virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0; - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual bool KnownSuccessorBlock(HBasicBlock** block) { *block = NULL; @@ -1356,15 +1282,15 @@ class HSuccessorIterator V8_FINAL BASE_EMBEDDED { public: - explicit HSuccessorIterator(HControlInstruction* instr) - : instr_(instr), current_(0) { } + explicit HSuccessorIterator(const HControlInstruction* instr) + : instr_(instr), current_(0) {} bool Done() { return current_ >= instr_->SuccessorCount(); } HBasicBlock* Current() { return instr_->SuccessorAt(current_); } void Advance() { current_++; } private: - HControlInstruction* instr_; + const HControlInstruction* instr_; int current_; }; @@ -1372,13 +1298,13 @@ template<int S, int V> class HTemplateControlInstruction : public HControlInstruction { public: - int SuccessorCount() V8_OVERRIDE { return S; } - HBasicBlock* SuccessorAt(int i) V8_OVERRIDE { return successors_[i]; } + int SuccessorCount() const V8_OVERRIDE { return S; } + HBasicBlock* SuccessorAt(int i) const V8_OVERRIDE { return successors_[i]; } void SetSuccessorAt(int i, HBasicBlock* block) V8_OVERRIDE { successors_[i] = block; } - int OperandCount() V8_OVERRIDE { return V; } + int OperandCount() const V8_OVERRIDE { return V; } HValue* OperandAt(int i) const V8_OVERRIDE { return inputs_[i]; } @@ -1413,14 +1339,14 @@ set_representation(Representation::Tagged()); } - HValue* value() { return OperandAt(0); } + HValue* value() const { return OperandAt(0); } virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(DummyUse); }; @@ -1454,7 +1380,7 @@ return Representation::None(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(Goto) }; @@ -1507,9 +1433,9 @@ SetSuccessorAt(1, false_target); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT - HValue* value() { return OperandAt(0); } + HValue* value() const { return OperandAt(0); } }; @@ -1529,6 +1455,8 @@ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT + ToBooleanStub::Types expected_input_types() const { return expected_input_types_; } @@ -1564,7 +1492,7 @@ return false; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT static const int kNoKnownSuccessorIndex = -1; int known_successor_index() const { return known_successor_index_; } @@ -1573,6 +1501,7 @@ } Unique<Map> map() const { return map_; } + bool map_is_stable() const { return map_is_stable_; } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -1589,12 +1518,14 @@ HBasicBlock* true_target = NULL, HBasicBlock* false_target = NULL) : HUnaryControlInstruction(value, true_target, false_target), - known_successor_index_(kNoKnownSuccessorIndex), map_(Unique<Map>(map)) { - ASSERT(!map.is_null()); + known_successor_index_(kNoKnownSuccessorIndex), + map_is_stable_(map->is_stable()), + map_(Unique<Map>::CreateImmovable(map)) { set_representation(Representation::Tagged()); } - int known_successor_index_; + int known_successor_index_ : 31; + bool map_is_stable_ : 1; Unique<Map> map_; }; @@ -1635,11 +1566,11 @@ return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT - HValue* value() { return OperandAt(0); } - HValue* context() { return OperandAt(1); } - HValue* parameter_count() { return OperandAt(2); } + HValue* value() const { return OperandAt(0); } + HValue* context() const { return OperandAt(1); } + HValue* parameter_count() const { return OperandAt(2); } DECLARE_CONCRETE_INSTRUCTION(Return) @@ -1668,7 +1599,7 @@ class HUnaryOperation : public HTemplateInstruction<1> { public: - HUnaryOperation(HValue* value, HType type = HType::Tagged()) + explicit HUnaryOperation(HValue* value, HType type = HType::Tagged()) : HTemplateInstruction<1>(type) { SetOperandAt(0, value); } @@ -1678,7 +1609,7 @@ } HValue* value() const { return OperandAt(0); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT }; @@ -1702,13 +1633,13 @@ static HInstruction* New(Zone* zone, HValue* context, HValue* value, Representation required_representation); - HValue* value() { return OperandAt(0); } + HValue* value() const { return OperandAt(0); } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return representation(); // Same as the output representation. } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation) @@ -1727,13 +1658,13 @@ bool is_truncating_to_smi, bool is_truncating_to_int32) : HUnaryOperation(value) { - ASSERT(!value->representation().IsNone()); - ASSERT(!to.IsNone()); - ASSERT(!value->representation().Equals(to)); + DCHECK(!value->representation().IsNone()); + DCHECK(!to.IsNone()); + DCHECK(!value->representation().Equals(to)); set_representation(to); SetFlag(kUseGVN); SetFlag(kCanOverflow); - if (is_truncating_to_smi) { + if (is_truncating_to_smi && to.IsSmi()) { SetFlag(kTruncatingToSmi); SetFlag(kTruncatingToInt32); } @@ -1764,7 +1695,7 @@ virtual Range* InferRange(Zone* zone) V8_OVERRIDE; - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(Change) @@ -1883,19 +1814,19 @@ done_with_replay_(false) {} ~HSimulate() {} - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT bool HasAstId() const { return !ast_id_.IsNone(); } BailoutId ast_id() const { return ast_id_; } void set_ast_id(BailoutId id) { - ASSERT(!HasAstId()); + DCHECK(!HasAstId()); ast_id_ = id; } int pop_count() const { return pop_count_; } const ZoneList<HValue*>* values() const { return &values_; } int GetAssignedIndexAt(int index) const { - ASSERT(HasAssignedIndexAt(index)); + DCHECK(HasAssignedIndexAt(index)); return assigned_indexes_[index]; } bool HasAssignedIndexAt(int index) const { @@ -1913,7 +1844,7 @@ } return -1; } - virtual int OperandCount() V8_OVERRIDE { return values_.length(); } + virtual int OperandCount() const V8_OVERRIDE { return values_.length(); } virtual HValue* OperandAt(int index) const V8_OVERRIDE { return values_[index]; } @@ -1978,8 +1909,8 @@ DECLARE_INSTRUCTION_FACTORY_P2(HEnvironmentMarker, Kind, int); - Kind kind() { return kind_; } - int index() { return index_; } + Kind kind() const { return kind_; } + int index() const { return index_; } HSimulate* next_simulate() { return next_simulate_; } void set_next_simulate(HSimulate* simulate) { next_simulate_ = simulate; @@ -1989,12 +1920,12 @@ return Representation::None(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT #ifdef DEBUG void set_closure(Handle<JSFunction> closure) { - ASSERT(closure_.is_null()); - ASSERT(!closure.is_null()); + DCHECK(closure_.is_null()); + DCHECK(!closure.is_null()); closure_ = closure; } Handle<JSFunction> closure() const { return closure_; } @@ -2069,21 +2000,22 @@ public: static HEnterInlined* New(Zone* zone, HValue* context, + BailoutId return_id, Handle<JSFunction> closure, int arguments_count, FunctionLiteral* function, InliningKind inlining_kind, Variable* arguments_var, HArgumentsObject* arguments_object) { - return new(zone) HEnterInlined(closure, arguments_count, function, - inlining_kind, arguments_var, + return new(zone) HEnterInlined(return_id, closure, arguments_count, + function, inlining_kind, arguments_var, arguments_object, zone); } void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone); ZoneList<HBasicBlock*>* return_targets() { return &return_targets_; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT Handle<JSFunction> closure() const { return closure_; } int arguments_count() const { return arguments_count_; } @@ -2091,6 +2023,7 @@ void set_arguments_pushed() { arguments_pushed_ = true; } FunctionLiteral* function() const { return function_; } InliningKind inlining_kind() const { return inlining_kind_; } + BailoutId ReturnId() const { return return_id_; } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); @@ -2102,14 +2035,16 @@ DECLARE_CONCRETE_INSTRUCTION(EnterInlined) private: - HEnterInlined(Handle<JSFunction> closure, + HEnterInlined(BailoutId return_id, + Handle<JSFunction> closure, int arguments_count, FunctionLiteral* function, InliningKind inlining_kind, Variable* arguments_var, HArgumentsObject* arguments_object, Zone* zone) - : closure_(closure), + : return_id_(return_id), + closure_(closure), arguments_count_(arguments_count), arguments_pushed_(false), function_(function), @@ -2119,6 +2054,7 @@ return_targets_(2, zone) { } + BailoutId return_id_; Handle<JSFunction> closure_; int arguments_count_; bool arguments_pushed_; @@ -2153,23 +2089,71 @@ }; -class HPushArgument V8_FINAL : public HUnaryOperation { +class HPushArguments V8_FINAL : public HInstruction { public: - DECLARE_INSTRUCTION_FACTORY_P1(HPushArgument, HValue*); + static HPushArguments* New(Zone* zone, HValue* context) { + return new(zone) HPushArguments(zone); + } + static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1) { + HPushArguments* instr = new(zone) HPushArguments(zone); + instr->AddInput(arg1); + return instr; + } + static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1, + HValue* arg2) { + HPushArguments* instr = new(zone) HPushArguments(zone); + instr->AddInput(arg1); + instr->AddInput(arg2); + return instr; + } + static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1, + HValue* arg2, HValue* arg3) { + HPushArguments* instr = new(zone) HPushArguments(zone); + instr->AddInput(arg1); + instr->AddInput(arg2); + instr->AddInput(arg3); + return instr; + } + static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1, + HValue* arg2, HValue* arg3, HValue* arg4) { + HPushArguments* instr = new(zone) HPushArguments(zone); + instr->AddInput(arg1); + instr->AddInput(arg2); + instr->AddInput(arg3); + instr->AddInput(arg4); + return instr; + } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } - virtual int argument_delta() const V8_OVERRIDE { return 1; } - HValue* argument() { return OperandAt(0); } + virtual int argument_delta() const V8_OVERRIDE { return inputs_.length(); } + HValue* argument(int i) { return OperandAt(i); } - DECLARE_CONCRETE_INSTRUCTION(PushArgument) + virtual int OperandCount() const V8_FINAL V8_OVERRIDE { + return inputs_.length(); + } + virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE { + return inputs_[i]; + } + + void AddInput(HValue* value); + + DECLARE_CONCRETE_INSTRUCTION(PushArguments) + + protected: + virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE { + inputs_[i] = value; + } private: - explicit HPushArgument(HValue* value) : HUnaryOperation(value) { + explicit HPushArguments(Zone* zone) + : HInstruction(HType::Tagged()), inputs_(4, zone) { set_representation(Representation::Tagged()); } + + ZoneList<HValue*> inputs_; }; @@ -2266,9 +2250,9 @@ return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT - HValue* value() { return OperandAt(0); } + HValue* value() const { return OperandAt(0); } }; @@ -2280,15 +2264,15 @@ SetOperandAt(1, second); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation( int index) V8_FINAL V8_OVERRIDE { return Representation::Tagged(); } - HValue* first() { return OperandAt(0); } - HValue* second() { return OperandAt(1); } + HValue* first() const { return OperandAt(0); } + HValue* second() const { return OperandAt(1); } }; @@ -2300,13 +2284,13 @@ int argument_count, bool pass_argument_count); - HValue* function() { return OperandAt(0); } + HValue* function() const { return OperandAt(0); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation( int index) V8_FINAL V8_OVERRIDE { - ASSERT(index == 0); + DCHECK(index == 0); return Representation::Tagged(); } @@ -2340,16 +2324,18 @@ static HCallWithDescriptor* New(Zone* zone, HValue* context, HValue* target, int argument_count, - const CallInterfaceDescriptor* descriptor, - Vector<HValue*>& operands) { - ASSERT(operands.length() == descriptor->environment_length()); + const InterfaceDescriptor* descriptor, + const Vector<HValue*>& operands) { + DCHECK(operands.length() == descriptor->GetEnvironmentLength()); HCallWithDescriptor* res = new(zone) HCallWithDescriptor(target, argument_count, descriptor, operands, zone); return res; } - virtual int OperandCount() V8_FINAL V8_OVERRIDE { return values_.length(); } + virtual int OperandCount() const V8_FINAL V8_OVERRIDE { + return values_.length(); + } virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE { return values_[index]; } @@ -2360,7 +2346,7 @@ return Representation::Tagged(); } else { int par_index = index - 1; - ASSERT(par_index < descriptor_->environment_length()); + DCHECK(par_index < descriptor_->GetEnvironmentLength()); return descriptor_->GetParameterRepresentation(par_index); } } @@ -2379,7 +2365,7 @@ return -argument_count_; } - const CallInterfaceDescriptor* descriptor() const { + const InterfaceDescriptor* descriptor() const { return descriptor_; } @@ -2387,17 +2373,17 @@ return OperandAt(0); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT private: // The argument count includes the receiver. HCallWithDescriptor(HValue* target, int argument_count, - const CallInterfaceDescriptor* descriptor, - Vector<HValue*>& operands, + const InterfaceDescriptor* descriptor, + const Vector<HValue*>& operands, Zone* zone) : descriptor_(descriptor), - values_(descriptor->environment_length() + 1, zone) { + values_(descriptor->GetEnvironmentLength() + 1, zone) { argument_count_ = argument_count; AddOperand(target, zone); for (int i = 0; i < operands.length(); i++) { @@ -2417,7 +2403,7 @@ values_[index] = value; } - const CallInterfaceDescriptor* descriptor_; + const InterfaceDescriptor* descriptor_; ZoneList<HValue*> values_; int argument_count_; }; @@ -2522,7 +2508,7 @@ HValue* context() { return first(); } HValue* constructor() { return second(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT ElementsKind elements_kind() const { return elements_kind_; } @@ -2545,7 +2531,7 @@ const Runtime::Function*, int); - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT HValue* context() { return OperandAt(0); } const Runtime::Function* function() const { return c_function_; } @@ -2609,10 +2595,10 @@ HValue* value, BuiltinFunctionId op); - HValue* context() { return OperandAt(0); } - HValue* value() { return OperandAt(1); } + HValue* context() const { return OperandAt(0); } + HValue* value() const { return OperandAt(1); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { if (index == 0) { @@ -2621,6 +2607,7 @@ switch (op_) { case kMathFloor: case kMathRound: + case kMathFround: case kMathSqrt: case kMathPowHalf: case kMathLog: @@ -2640,6 +2627,7 @@ virtual Range* InferRange(Zone* zone) V8_OVERRIDE; virtual HValue* Canonicalize() V8_OVERRIDE; + virtual Representation RepresentationFromUses() V8_OVERRIDE; virtual Representation RepresentationFromInputs() V8_OVERRIDE; BuiltinFunctionId op() const { return op_; } @@ -2654,6 +2642,15 @@ } private: + // Indicates if we support a double (and int32) output for Math.floor and + // Math.round. + bool SupportsFlexibleFloorAndRound() const { +#ifdef V8_TARGET_ARCH_ARM64 + return true; +#else + return false; +#endif + } HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op) : HTemplateInstruction<2>(HType::TaggedNumber()), op_(op) { SetOperandAt(0, context); @@ -2661,6 +2658,12 @@ switch (op) { case kMathFloor: case kMathRound: + if (SupportsFlexibleFloorAndRound()) { + SetFlag(kFlexibleRepresentation); + } else { + set_representation(Representation::Integer32()); + } + break; case kMathClz32: set_representation(Representation::Integer32()); break; @@ -2671,6 +2674,7 @@ // is tagged, and not when it is an unboxed double or unboxed integer. SetChangesFlag(kNewSpacePromotion); break; + case kMathFround: case kMathLog: case kMathExp: case kMathSqrt: @@ -2713,7 +2717,7 @@ } private: - HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged()) + explicit HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged()) : HTemplateInstruction<0>(type), index_(index) { SetFlag(kUseGVN); // TODO(bmeurer): We'll need kDependsOnRoots once we add the @@ -2730,81 +2734,119 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> { public: static HCheckMaps* New(Zone* zone, HValue* context, HValue* value, - Handle<Map> map, CompilationInfo* info, - HValue* typecheck = NULL); + Handle<Map> map, HValue* typecheck = NULL) { + return new(zone) HCheckMaps(value, new(zone) UniqueSet<Map>( + Unique<Map>::CreateImmovable(map), zone), typecheck); + } static HCheckMaps* New(Zone* zone, HValue* context, - HValue* value, SmallMapList* maps, + HValue* value, SmallMapList* map_list, HValue* typecheck = NULL) { - HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck); - for (int i = 0; i < maps->length(); i++) { - check_map->Add(maps->at(i), zone); - } - return check_map; + UniqueSet<Map>* maps = new(zone) UniqueSet<Map>(map_list->length(), zone); + for (int i = 0; i < map_list->length(); ++i) { + maps->Add(Unique<Map>::CreateImmovable(map_list->at(i)), zone); + } + return new(zone) HCheckMaps(value, maps, typecheck); } - bool CanOmitMapChecks() { return omit_; } + bool IsStabilityCheck() const { return is_stability_check_; } + void MarkAsStabilityCheck() { + maps_are_stable_ = true; + has_migration_target_ = false; + is_stability_check_ = true; + ClearChangesFlag(kNewSpacePromotion); + ClearDependsOnFlag(kElementsKind); + ClearDependsOnFlag(kMaps); + } virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } - virtual bool HandleSideEffectDominator(GVNFlag side_effect, - HValue* dominator) V8_OVERRIDE; - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - HValue* value() { return OperandAt(0); } - HValue* typecheck() { return OperandAt(1); } + virtual HType CalculateInferredType() V8_OVERRIDE { + if (value()->type().IsHeapObject()) return value()->type(); + return HType::HeapObject(); + } - Unique<Map> first_map() const { return map_set_.at(0); } - UniqueSet<Map> map_set() const { return map_set_; } + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT - void set_map_set(UniqueSet<Map>* maps, Zone *zone) { - map_set_.Clear(); - for (int i = 0; i < maps->size(); i++) { - map_set_.Add(maps->at(i), zone); - } + HValue* value() const { return OperandAt(0); } + HValue* typecheck() const { return OperandAt(1); } + + const UniqueSet<Map>* maps() const { return maps_; } + void set_maps(const UniqueSet<Map>* maps) { maps_ = maps; } + + bool maps_are_stable() const { return maps_are_stable_; } + + bool HasMigrationTarget() const { return has_migration_target_; } + + virtual HValue* Canonicalize() V8_OVERRIDE; + + static HCheckMaps* CreateAndInsertAfter(Zone* zone, + HValue* value, + Unique<Map> map, + bool map_is_stable, + HInstruction* instr) { + return instr->Append(new(zone) HCheckMaps( + value, new(zone) UniqueSet<Map>(map, zone), map_is_stable)); } - bool has_migration_target() const { - return has_migration_target_; + static HCheckMaps* CreateAndInsertBefore(Zone* zone, + HValue* value, + const UniqueSet<Map>* maps, + bool maps_are_stable, + HInstruction* instr) { + return instr->Prepend(new(zone) HCheckMaps(value, maps, maps_are_stable)); } DECLARE_CONCRETE_INSTRUCTION(CheckMaps) protected: virtual bool DataEquals(HValue* other) V8_OVERRIDE { - return this->map_set_.Equals(&HCheckMaps::cast(other)->map_set_); + return this->maps()->Equals(HCheckMaps::cast(other)->maps()); } virtual int RedefinedOperandIndex() { return 0; } private: - void Add(Handle<Map> map, Zone* zone) { - map_set_.Add(Unique<Map>(map), zone); + HCheckMaps(HValue* value, const UniqueSet<Map>* maps, bool maps_are_stable) + : HTemplateInstruction<2>(HType::HeapObject()), maps_(maps), + has_migration_target_(false), is_stability_check_(false), + maps_are_stable_(maps_are_stable) { + DCHECK_NE(0, maps->size()); + SetOperandAt(0, value); + // Use the object value for the dependency. + SetOperandAt(1, value); + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); SetDependsOnFlag(kMaps); SetDependsOnFlag(kElementsKind); - - if (!has_migration_target_ && map->is_migration_target()) { - has_migration_target_ = true; - SetChangesFlag(kNewSpacePromotion); - } } - // Clients should use one of the static New* methods above. - HCheckMaps(HValue* value, Zone *zone, HValue* typecheck) - : HTemplateInstruction<2>(value->type()), - omit_(false), has_migration_target_(false) { + HCheckMaps(HValue* value, const UniqueSet<Map>* maps, HValue* typecheck) + : HTemplateInstruction<2>(HType::HeapObject()), maps_(maps), + has_migration_target_(false), is_stability_check_(false), + maps_are_stable_(true) { + DCHECK_NE(0, maps->size()); SetOperandAt(0, value); // Use the object value for the dependency if NULL is passed. - SetOperandAt(1, typecheck != NULL ? typecheck : value); + SetOperandAt(1, typecheck ? typecheck : value); set_representation(Representation::Tagged()); SetFlag(kUseGVN); - SetFlag(kTrackSideEffectDominators); + SetDependsOnFlag(kMaps); + SetDependsOnFlag(kElementsKind); + for (int i = 0; i < maps->size(); ++i) { + Handle<Map> map = maps->at(i).handle(); + if (map->is_migration_target()) has_migration_target_ = true; + if (!map->is_stable()) maps_are_stable_ = false; + } + if (has_migration_target_) SetChangesFlag(kNewSpacePromotion); } - bool omit_; - bool has_migration_target_; - UniqueSet<Map> map_set_; + const UniqueSet<Map>* maps_; + bool has_migration_target_ : 1; + bool is_stability_check_ : 1; + bool maps_are_stable_ : 1; }; @@ -2834,7 +2876,7 @@ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual HValue* Canonicalize() V8_OVERRIDE; @@ -2880,18 +2922,31 @@ DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check); - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } + virtual HType CalculateInferredType() V8_OVERRIDE { + switch (check_) { + case IS_SPEC_OBJECT: return HType::JSObject(); + case IS_JS_ARRAY: return HType::JSArray(); + case IS_STRING: return HType::String(); + case IS_INTERNALIZED_STRING: return HType::String(); + } + UNREACHABLE(); + return HType::Tagged(); + } + virtual HValue* Canonicalize() V8_OVERRIDE; bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; } void GetCheckInterval(InstanceType* first, InstanceType* last); void GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag); + Check check() const { return check_; } + DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType) protected: @@ -2906,10 +2961,10 @@ virtual int RedefinedOperandIndex() { return 0; } private: - const char* GetCheckName(); + const char* GetCheckName() const; HCheckInstanceType(HValue* value, Check check) - : HUnaryOperation(value), check_(check) { + : HUnaryOperation(value, HType::HeapObject()), check_(check) { set_representation(Representation::Tagged()); SetFlag(kUseGVN); } @@ -2956,6 +3011,11 @@ return Representation::Tagged(); } + virtual HType CalculateInferredType() V8_OVERRIDE { + if (value()->type().IsHeapObject()) return value()->type(); + return HType::HeapObject(); + } + #ifdef DEBUG virtual void Verify() V8_OVERRIDE; #endif @@ -2970,8 +3030,7 @@ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; } private: - explicit HCheckHeapObject(HValue* value) - : HUnaryOperation(value, HType::NonPrimitive()) { + explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) { set_representation(Representation::Tagged()); SetFlag(kUseGVN); } @@ -3007,7 +3066,7 @@ InductionVariableCheck* next() { return next_; } bool HasUpperLimit() { return upper_limit_ >= 0; } int32_t upper_limit() { - ASSERT(HasUpperLimit()); + DCHECK(HasUpperLimit()); return upper_limit_; } void set_upper_limit(int32_t upper_limit) { @@ -3210,7 +3269,7 @@ non_phi_uses_[i] = 0; indirect_uses_[i] = 0; } - ASSERT(merged_index >= 0 || merged_index == kInvalidMergedIndex); + DCHECK(merged_index >= 0 || merged_index == kInvalidMergedIndex); SetFlag(kFlexibleRepresentation); SetFlag(kAllowUndefinedAsNaN); } @@ -3227,7 +3286,7 @@ return representation(); } virtual HType CalculateInferredType() V8_OVERRIDE; - virtual int OperandCount() V8_OVERRIDE { return inputs_.length(); } + virtual int OperandCount() const V8_OVERRIDE { return inputs_.length(); } virtual HValue* OperandAt(int index) const V8_OVERRIDE { return inputs_[index]; } @@ -3253,11 +3312,11 @@ induction_variable_data_->limit() != NULL; } void DetectInductionVariable() { - ASSERT(induction_variable_data_ == NULL); + DCHECK(induction_variable_data_ == NULL); induction_variable_data_ = InductionVariableData::ExaminePhi(this); } - virtual void PrintTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintTo(OStream& os) const V8_OVERRIDE; // NOLINT #ifdef DEBUG virtual void Verify() V8_OVERRIDE; @@ -3294,7 +3353,7 @@ int phi_id() { return phi_id_; } static HPhi* cast(HValue* value) { - ASSERT(value->IsPhi()); + DCHECK(value->IsPhi()); return reinterpret_cast<HPhi*>(value); } virtual Opcode opcode() const V8_OVERRIDE { return HValue::kPhi; } @@ -3329,7 +3388,9 @@ public: HDematerializedObject(int count, Zone* zone) : values_(count, zone) {} - virtual int OperandCount() V8_FINAL V8_OVERRIDE { return values_.length(); } + virtual int OperandCount() const V8_FINAL V8_OVERRIDE { + return values_.length(); + } virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE { return values_[index]; } @@ -3377,8 +3438,6 @@ set_representation(Representation::Tagged()); SetFlag(kIsArguments); } - - virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; } }; @@ -3401,15 +3460,15 @@ HValue* map_value() const { return values()->first(); } void ReuseSideEffectsFromStore(HInstruction* store) { - ASSERT(store->HasObservableSideEffects()); - ASSERT(store->IsStoreNamedField()); + DCHECK(store->HasObservableSideEffects()); + DCHECK(store->IsStoreNamedField()); changes_flags_.Add(store->ChangesFlags()); } // Replay effects of this instruction on the given environment. void ReplayEnvironment(HEnvironment* env); - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(CapturedObject) @@ -3450,12 +3509,23 @@ } static HConstant* CreateAndInsertBefore(Zone* zone, - Unique<Object> unique, - bool is_not_in_new_space, + Unique<Map> map, + bool map_is_stable, HInstruction* instruction) { return instruction->Prepend(new(zone) HConstant( - unique, Representation::Tagged(), HType::Tagged(), - is_not_in_new_space, false, false, kUnknownInstanceType)); + map, Unique<Map>(Handle<Map>::null()), map_is_stable, + Representation::Tagged(), HType::HeapObject(), true, + false, false, MAP_TYPE)); + } + + static HConstant* CreateAndInsertAfter(Zone* zone, + Unique<Map> map, + bool map_is_stable, + HInstruction* instruction) { + return instruction->Append(new(zone) HConstant( + map, Unique<Map>(Handle<Map>::null()), map_is_stable, + Representation::Tagged(), HType::HeapObject(), true, + false, false, MAP_TYPE)); } Handle<Object> handle(Isolate* isolate) { @@ -3466,16 +3536,10 @@ isolate->factory()->NewNumber(double_value_, TENURED)); } AllowDeferredHandleDereference smi_check; - ASSERT(has_int32_value_ || !object_.handle()->IsSmi()); + DCHECK(has_int32_value_ || !object_.handle()->IsSmi()); return object_.handle(); } - bool HasMap(Handle<Map> map) { - Handle<Object> constant_object = handle(map->GetIsolate()); - return constant_object->IsHeapObject() && - Handle<HeapObject>::cast(constant_object)->map() == *map; - } - bool IsSpecialDouble() const { return has_double_value_ && (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) || @@ -3493,6 +3557,10 @@ return instance_type_ == CELL_TYPE || instance_type_ == PROPERTY_CELL_TYPE; } + bool IsMap() const { + return instance_type_ == MAP_TYPE; + } + virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); } @@ -3506,19 +3574,19 @@ } virtual bool EmitAtUses() V8_OVERRIDE; - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT HConstant* CopyToRepresentation(Representation r, Zone* zone) const; Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone); Maybe<HConstant*> CopyToTruncatedNumber(Zone* zone); bool HasInteger32Value() const { return has_int32_value_; } int32_t Integer32Value() const { - ASSERT(HasInteger32Value()); + DCHECK(HasInteger32Value()); return int32_value_; } bool HasSmiValue() const { return has_smi_value_; } bool HasDoubleValue() const { return has_double_value_; } double DoubleValue() const { - ASSERT(HasDoubleValue()); + DCHECK(HasDoubleValue()); return double_value_; } bool IsTheHole() const { @@ -3529,7 +3597,7 @@ } bool HasNumberValue() const { return has_double_value_; } int32_t NumberValueAsInteger32() const { - ASSERT(HasNumberValue()); + DCHECK(HasNumberValue()); // Irrespective of whether a numeric HConstant can be safely // represented as an int32, we store the (in some cases lossy) // representation of the number in int32_value_. @@ -3537,11 +3605,11 @@ } bool HasStringValue() const { if (has_double_value_ || has_int32_value_) return false; - ASSERT(!object_.handle().is_null()); + DCHECK(!object_.handle().is_null()); return instance_type_ < FIRST_NONSTRING_TYPE; } Handle<String> StringValue() const { - ASSERT(HasStringValue()); + DCHECK(HasStringValue()); return Handle<String>::cast(object_.handle()); } bool HasInternalizedStringValue() const { @@ -3560,6 +3628,22 @@ bool IsUndetectable() const { return is_undetectable_; } InstanceType GetInstanceType() const { return instance_type_; } + bool HasMapValue() const { return instance_type_ == MAP_TYPE; } + Unique<Map> MapValue() const { + DCHECK(HasMapValue()); + return Unique<Map>::cast(GetUnique()); + } + bool HasStableMapValue() const { + DCHECK(HasMapValue() || !has_stable_map_value_); + return has_stable_map_value_; + } + + bool HasObjectMap() const { return !object_map_.IsNull(); } + Unique<Map> ObjectMap() const { + DCHECK(HasObjectMap()); + return object_map_; + } + virtual intptr_t Hashcode() V8_OVERRIDE { if (has_int32_value_) { return static_cast<intptr_t>(int32_value_); @@ -3568,14 +3652,14 @@ } else if (has_external_reference_value_) { return reinterpret_cast<intptr_t>(external_reference_value_.address()); } else { - ASSERT(!object_.handle().is_null()); + DCHECK(!object_.handle().is_null()); return object_.Hashcode(); } } virtual void FinalizeUniqueness() V8_OVERRIDE { if (!has_double_value_ && !has_external_reference_value_) { - ASSERT(!object_.handle().is_null()); + DCHECK(!object_.handle().is_null()); object_ = Unique<Object>(object_.handle()); } } @@ -3607,7 +3691,7 @@ other_constant->has_external_reference_value_) { return false; } - ASSERT(!object_.handle().is_null()); + DCHECK(!object_.handle().is_null()); return other_constant->object_ == object_; } } @@ -3623,7 +3707,8 @@ private: friend class HGraph; - HConstant(Handle<Object> handle, Representation r = Representation::None()); + explicit HConstant(Handle<Object> handle, + Representation r = Representation::None()); HConstant(int32_t value, Representation r = Representation::None(), bool is_not_in_new_space = true, @@ -3632,7 +3717,9 @@ Representation r = Representation::None(), bool is_not_in_new_space = true, Unique<Object> optional = Unique<Object>(Handle<Object>::null())); - HConstant(Unique<Object> unique, + HConstant(Unique<Object> object, + Unique<Map> object_map, + bool has_stable_map_value, Representation r, HType type, bool is_not_in_new_space, @@ -3652,6 +3739,12 @@ // constant HeapObject. Unique<Object> object_; + // If object_ is a heap object, this points to the stable map of the object. + Unique<Map> object_map_; + + // If object_ is a map, this indicates whether the map is stable. + bool has_stable_map_value_ : 1; + // We store the HConstant in the most specific form safely possible. // The two flags, has_int32_value_ and has_double_value_ tell us if // int32_value_ and double_value_ hold valid, safe representations @@ -3679,7 +3772,7 @@ HType type = HType::Tagged()) : HTemplateInstruction<3>(type), observed_output_representation_(Representation::None()) { - ASSERT(left != NULL && right != NULL); + DCHECK(left != NULL && right != NULL); SetOperandAt(0, context); SetOperandAt(1, left); SetOperandAt(2, right); @@ -3703,7 +3796,7 @@ // Otherwise, if there is only one use of the right operand, it would be // better off on the left for platforms that only have 2-arg arithmetic // ops (e.g ia32, x64) that clobber the left operand. - return right()->UseCount() == 1; + return right()->HasOneUse(); } HValue* BetterLeftOperand() { @@ -3715,7 +3808,7 @@ } void set_observed_input_representation(int index, Representation rep) { - ASSERT(index >= 1 && index <= 2); + DCHECK(index >= 1 && index <= 2); observed_input_representation_[index - 1] = rep; } @@ -3744,7 +3837,7 @@ virtual bool IsCommutative() const { return false; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { if (index == 0) return Representation::Tagged(); @@ -3761,7 +3854,7 @@ bool RightIsPowerOf2() { if (!right()->IsInteger32Constant()) return false; int32_t value = right()->GetInteger32Constant(); - return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value)); + return IsPowerOf2(value) || IsPowerOf2(-value); } DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation) @@ -3784,12 +3877,12 @@ return Representation::Tagged(); } - HValue* receiver() { return OperandAt(0); } - HValue* function() { return OperandAt(1); } + HValue* receiver() const { return OperandAt(0); } + HValue* function() const { return OperandAt(1); } virtual HValue* Canonicalize() V8_OVERRIDE; - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT bool known_function() const { return known_function_; } DECLARE_CONCRETE_INSTRUCTION(WrapReceiver) @@ -3898,7 +3991,7 @@ public: DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*); - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { // The arguments elements is considered tagged. @@ -3907,9 +4000,9 @@ : Representation::Integer32(); } - HValue* arguments() { return OperandAt(0); } - HValue* length() { return OperandAt(1); } - HValue* index() { return OperandAt(2); } + HValue* arguments() const { return OperandAt(0); } + HValue* length() const { return OperandAt(1); } + HValue* index() const { return OperandAt(2); } DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt) @@ -3936,13 +4029,13 @@ bool skip_check() const { return skip_check_; } void set_skip_check() { skip_check_ = true; } - HValue* base() { return base_; } - int offset() { return offset_; } - int scale() { return scale_; } + HValue* base() const { return base_; } + int offset() const { return offset_; } + int scale() const { return scale_; } void ApplyIndexChange(); bool DetectCompoundIndex() { - ASSERT(base() == NULL); + DCHECK(base() == NULL); DecompositionResult decomposition; if (index()->TryDecompose(&decomposition)) { @@ -3962,13 +4055,13 @@ return representation(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual void InferRepresentation( HInferRepresentationPhase* h_infer) V8_OVERRIDE; - HValue* index() { return OperandAt(0); } - HValue* length() { return OperandAt(1); } - bool allow_equality() { return allow_equality_; } + HValue* index() const { return OperandAt(0); } + HValue* length() const { return OperandAt(1); } + bool allow_equality() const { return allow_equality_; } void set_allow_equality(bool v) { allow_equality_ = v; } virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; } @@ -4024,7 +4117,7 @@ } } - HValue* base_index() { return OperandAt(0); } + HValue* base_index() const { return OperandAt(0); } HBoundsCheck* bounds_check() { return HBoundsCheck::cast(OperandAt(1)); } DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation) @@ -4033,7 +4126,7 @@ return representation(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; } virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE { return true; } @@ -4043,7 +4136,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation { public: HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right, - HType type = HType::Tagged()) + HType type = HType::TaggedNumber()) : HBinaryOperation(context, left, right, type) { SetFlag(kFlexibleRepresentation); SetFlag(kTruncatingToInt32); @@ -4159,7 +4252,7 @@ } Token::Value token() const { return token_; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(CompareGeneric) @@ -4170,7 +4263,7 @@ Token::Value token) : HBinaryOperation(context, left, right, HType::Boolean()), token_(token) { - ASSERT(Token::IsCompareOp(token)); + DCHECK(Token::IsCompareOp(token)); set_representation(Representation::Tagged()); SetAllSideEffects(); } @@ -4187,8 +4280,8 @@ HValue*, HValue*, Token::Value, HBasicBlock*, HBasicBlock*); - HValue* left() { return OperandAt(0); } - HValue* right() { return OperandAt(1); } + HValue* left() const { return OperandAt(0); } + HValue* right() const { return OperandAt(1); } Token::Value token() const { return token_; } void set_observed_input_representation(Representation left, @@ -4206,7 +4299,10 @@ virtual Representation observed_input_representation(int index) V8_OVERRIDE { return observed_input_representation_[index]; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE; + + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT void SetOperandPositions(Zone* zone, HSourcePosition left_pos, @@ -4225,7 +4321,7 @@ HBasicBlock* false_target = NULL) : token_(token) { SetFlag(kFlexibleRepresentation); - ASSERT(Token::IsCompareOp(token)); + DCHECK(Token::IsCompareOp(token)); SetOperandAt(0, left); SetOperandAt(1, right); SetSuccessorAt(0, true_target); @@ -4299,10 +4395,10 @@ known_successor_index_ = known_successor_index; } - HValue* left() { return OperandAt(0); } - HValue* right() { return OperandAt(1); } + HValue* left() const { return OperandAt(0); } + HValue* right() const { return OperandAt(1); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -4320,12 +4416,6 @@ HBasicBlock* true_target = NULL, HBasicBlock* false_target = NULL) : known_successor_index_(kNoKnownSuccessorIndex) { - ASSERT(!left->IsConstant() || - (!HConstant::cast(left)->HasInteger32Value() || - HConstant::cast(left)->HasSmiValue())); - ASSERT(!right->IsConstant() || - (!HConstant::cast(right)->HasInteger32Value() || - HConstant::cast(right)->HasSmiValue())); SetOperandAt(0, left); SetOperandAt(1, right); SetSuccessorAt(0, true_target); @@ -4370,6 +4460,12 @@ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE; + static const int kNoKnownSuccessorIndex = -1; + int known_successor_index() const { return known_successor_index_; } + void set_known_successor_index(int known_successor_index) { + known_successor_index_ = known_successor_index; + } + DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch) protected: @@ -4379,7 +4475,10 @@ HIsStringAndBranch(HValue* value, HBasicBlock* true_target = NULL, HBasicBlock* false_target = NULL) - : HUnaryControlInstruction(value, true_target, false_target) {} + : HUnaryControlInstruction(value, true_target, false_target), + known_successor_index_(kNoKnownSuccessorIndex) { } + + int known_successor_index_; }; @@ -4443,7 +4542,7 @@ HValue* right() { return OperandAt(2); } Token::Value token() const { return token_; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -4461,7 +4560,7 @@ HValue* right, Token::Value token) : token_(token) { - ASSERT(Token::IsCompareOp(token)); + DCHECK(Token::IsCompareOp(token)); SetOperandAt(0, context); SetOperandAt(1, left); SetOperandAt(2, right); @@ -4497,7 +4596,7 @@ InstanceType from() { return from_; } InstanceType to() { return to_; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -4512,7 +4611,7 @@ : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { } HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to) : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) { - ASSERT(to == LAST_TYPE); // Others not implemented yet in backend. + DCHECK(to == LAST_TYPE); // Others not implemented yet in backend. } InstanceType from_; @@ -4569,7 +4668,7 @@ return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT Handle<String> class_name() const { return class_name_; } @@ -4586,8 +4685,8 @@ public: DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>); - Handle<String> type_literal() { return type_literal_.handle(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + Handle<String> type_literal() const { return type_literal_.handle(); } + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch) @@ -4618,7 +4717,7 @@ return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(InstanceOf) @@ -4977,7 +5076,7 @@ virtual HValue* Canonicalize() V8_OVERRIDE; - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(Bitwise) @@ -4993,9 +5092,9 @@ Token::Value op, HValue* left, HValue* right) - : HBitwiseBinaryOperation(context, left, right, HType::TaggedNumber()), + : HBitwiseBinaryOperation(context, left, right), op_(op) { - ASSERT(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR); + DCHECK(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR); // BIT_AND with a smi-range positive value will always unset the // entire sign-extension of the smi-sign. if (op == Token::BIT_AND && @@ -5200,7 +5299,7 @@ unsigned index() const { return index_; } ParameterKind kind() const { return kind_; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); @@ -5236,7 +5335,7 @@ HValue* context() { return value(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(CallStub) @@ -5254,7 +5353,7 @@ public: DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int); - virtual void PrintDataTo(StringStream* stream); + virtual OStream& PrintDataTo(OStream& os) const; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); @@ -5294,7 +5393,7 @@ Unique<Cell> cell() const { return cell_; } bool RequiresHoleCheck() const; - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual intptr_t Hashcode() V8_OVERRIDE { return cell_.Hashcode(); @@ -5333,14 +5432,25 @@ class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> { public: DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*, - Handle<Object>, bool); + Handle<String>, bool); HValue* context() { return OperandAt(0); } HValue* global_object() { return OperandAt(1); } - Handle<Object> name() const { return name_; } + Handle<String> name() const { return name_; } bool for_typeof() const { return for_typeof_; } + int slot() const { + DCHECK(FLAG_vector_ics && + slot_ != FeedbackSlotInterface::kInvalidFeedbackSlot); + return slot_; + } + Handle<FixedArray> feedback_vector() const { return feedback_vector_; } + void SetVectorAndSlot(Handle<FixedArray> vector, int slot) { + DCHECK(FLAG_vector_ics); + feedback_vector_ = vector; + slot_ = slot; + } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -5349,20 +5459,20 @@ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric) private: - HLoadGlobalGeneric(HValue* context, - HValue* global_object, - Handle<Object> name, - bool for_typeof) - : name_(name), - for_typeof_(for_typeof) { + HLoadGlobalGeneric(HValue* context, HValue* global_object, + Handle<String> name, bool for_typeof) + : name_(name), for_typeof_(for_typeof), + slot_(FeedbackSlotInterface::kInvalidFeedbackSlot) { SetOperandAt(0, context); SetOperandAt(1, global_object); set_representation(Representation::Tagged()); SetAllSideEffects(); } - Handle<Object> name_; + Handle<String> name_; bool for_typeof_; + Handle<FixedArray> feedback_vector_; + int slot_; }; @@ -5389,8 +5499,15 @@ // Maximum instance size for which allocations will be inlined. static const int kMaxInlineSize = 64 * kPointerSize; - HValue* context() { return OperandAt(0); } - HValue* size() { return OperandAt(1); } + HValue* context() const { return OperandAt(0); } + HValue* size() const { return OperandAt(1); } + + bool has_size_upper_bound() { return size_upper_bound_ != NULL; } + HConstant* size_upper_bound() { return size_upper_bound_; } + void set_size_upper_bound(HConstant* value) { + DCHECK(size_upper_bound_ == NULL); + size_upper_bound_ = value; + } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { if (index == 0) { @@ -5443,7 +5560,7 @@ virtual bool HandleSideEffectDominator(GVNFlag side_effect, HValue* dominator) V8_OVERRIDE; - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(Allocate) @@ -5467,9 +5584,10 @@ : HTemplateInstruction<2>(type), flags_(ComputeFlags(pretenure_flag, instance_type)), dominating_allocate_(NULL), - filler_free_space_size_(NULL) { + filler_free_space_size_(NULL), + size_upper_bound_(NULL) { SetOperandAt(0, context); - SetOperandAt(1, size); + UpdateSize(size); set_representation(Representation::Tagged()); SetFlag(kTrackSideEffectDominators); SetChangesFlag(kNewSpacePromotion); @@ -5516,6 +5634,11 @@ void UpdateSize(HValue* size) { SetOperandAt(1, size); + if (size->IsInteger32Constant()) { + size_upper_bound_ = HConstant::cast(size); + } else { + size_upper_bound_ = NULL; + } } HAllocate* GetFoldableDominator(HAllocate* dominator); @@ -5537,6 +5660,7 @@ Handle<Map> known_initial_map_; HAllocate* dominating_allocate_; HStoreNamedField* filler_free_space_size_; + HConstant* size_upper_bound_; }; @@ -5572,45 +5696,46 @@ HValue* context, HValue* value, HValue* offset, - HType type = HType::Tagged()) { + HType type) { return new(zone) HInnerAllocatedObject(value, offset, type); } - HValue* base_object() { return OperandAt(0); } - HValue* offset() { return OperandAt(1); } + HValue* base_object() const { return OperandAt(0); } + HValue* offset() const { return OperandAt(1); } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return index == 0 ? Representation::Tagged() : Representation::Integer32(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject) private: HInnerAllocatedObject(HValue* value, HValue* offset, - HType type = HType::Tagged()) - : HTemplateInstruction<2>(type) { - ASSERT(value->IsAllocate()); + HType type) : HTemplateInstruction<2>(type) { + DCHECK(value->IsAllocate()); + DCHECK(type.IsHeapObject()); SetOperandAt(0, value); SetOperandAt(1, offset); - set_type(type); set_representation(Representation::Tagged()); } }; inline bool StoringValueNeedsWriteBarrier(HValue* value) { - return !value->type().IsBoolean() - && !value->type().IsSmi() + return !value->type().IsSmi() + && !value->type().IsNull() + && !value->type().IsBoolean() + && !value->type().IsUndefined() && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable()); } inline bool ReceiverObjectNeedsWriteBarrier(HValue* object, HValue* value, - HValue* new_space_dominator) { + HValue* dominator) { while (object->IsInnerAllocatedObject()) { object = HInnerAllocatedObject::cast(object)->base_object(); } @@ -5622,24 +5747,46 @@ // Stores to external references require no write barriers return false; } - if (object != new_space_dominator) return true; - if (object->IsAllocate()) { - // Stores to new space allocations require no write barriers if the object - // is the new space dominator. + // We definitely need a write barrier unless the object is the allocation + // dominator. + if (object == dominator && object->IsAllocate()) { + // Stores to new space allocations require no write barriers. if (HAllocate::cast(object)->IsNewSpaceAllocation()) { return false; } - // Likewise we don't need a write barrier if we store a value that - // originates from the same allocation (via allocation folding). + // Stores to old space allocations require no write barriers if the value is + // a constant provably not in new space. + if (value->IsConstant() && HConstant::cast(value)->NotInNewSpace()) { + return false; + } + // Stores to old space allocations require no write barriers if the value is + // an old space allocation. while (value->IsInnerAllocatedObject()) { value = HInnerAllocatedObject::cast(value)->base_object(); } - return object != value; + if (value->IsAllocate() && + !HAllocate::cast(value)->IsNewSpaceAllocation()) { + return false; + } } return true; } +inline PointersToHereCheck PointersToHereCheckForObject(HValue* object, + HValue* dominator) { + while (object->IsInnerAllocatedObject()) { + object = HInnerAllocatedObject::cast(object)->base_object(); + } + if (object == dominator && + object->IsAllocate() && + HAllocate::cast(object)->IsNewSpaceAllocation()) { + return kPointersToHereAreAlwaysInteresting; + } + return kPointersToHereMaybeInteresting; +} + + class HStoreGlobalCell V8_FINAL : public HUnaryOperation { public: DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*, @@ -5660,7 +5807,7 @@ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell) @@ -5694,20 +5841,8 @@ kCheckReturnUndefined }; - HLoadContextSlot(HValue* context, Variable* var) - : HUnaryOperation(context), slot_index_(var->index()) { - ASSERT(var->IsContextSlot()); - switch (var->mode()) { - case LET: - case CONST: - mode_ = kCheckDeoptimize; - break; - case CONST_LEGACY: - mode_ = kCheckReturnUndefined; - break; - default: - mode_ = kNoCheck; - } + HLoadContextSlot(HValue* context, int slot_index, Mode mode) + : HUnaryOperation(context), slot_index_(slot_index), mode_(mode) { set_representation(Representation::Tagged()); SetFlag(kUseGVN); SetDependsOnFlag(kContextSlots); @@ -5728,7 +5863,7 @@ return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot) @@ -5764,8 +5899,8 @@ DECLARE_INSTRUCTION_FACTORY_P4(HStoreContextSlot, HValue*, int, Mode, HValue*); - HValue* context() { return OperandAt(0); } - HValue* value() { return OperandAt(1); } + HValue* context() const { return OperandAt(0); } + HValue* value() const { return OperandAt(1); } int slot_index() const { return slot_index_; } Mode mode() const { return mode_; } @@ -5785,7 +5920,7 @@ return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot) @@ -5818,6 +5953,10 @@ return portion() == kStringLengths; } + inline bool IsMap() const { + return portion() == kMaps; + } + inline int offset() const { return OffsetField::decode(value_); } @@ -5951,9 +6090,14 @@ return HObjectAccess(kMaps, JSObject::kMapOffset); } - static HObjectAccess ForMapInstanceSize() { + static HObjectAccess ForMapAsInteger32() { + return HObjectAccess(kMaps, JSObject::kMapOffset, + Representation::Integer32()); + } + + static HObjectAccess ForMapInObjectProperties() { return HObjectAccess(kInobject, - Map::kInstanceSizeOffset, + Map::kInObjectPropertiesOffset, Representation::UInteger8()); } @@ -5963,6 +6107,40 @@ Representation::UInteger8()); } + static HObjectAccess ForMapInstanceSize() { + return HObjectAccess(kInobject, + Map::kInstanceSizeOffset, + Representation::UInteger8()); + } + + static HObjectAccess ForMapBitField() { + return HObjectAccess(kInobject, + Map::kBitFieldOffset, + Representation::UInteger8()); + } + + static HObjectAccess ForMapBitField2() { + return HObjectAccess(kInobject, + Map::kBitField2Offset, + Representation::UInteger8()); + } + + static HObjectAccess ForNameHashField() { + return HObjectAccess(kInobject, + Name::kHashFieldOffset, + Representation::Integer32()); + } + + static HObjectAccess ForMapInstanceTypeAndBitField() { + STATIC_ASSERT((Map::kInstanceTypeAndBitFieldOffset & 1) == 0); + // Ensure the two fields share one 16-bit word, endian-independent. + STATIC_ASSERT((Map::kBitFieldOffset & ~1) == + (Map::kInstanceTypeOffset & ~1)); + return HObjectAccess(kInobject, + Map::kInstanceTypeAndBitFieldOffset, + Representation::UInteger16()); + } + static HObjectAccess ForPropertyCellValue() { return HObjectAccess(kInobject, PropertyCell::kValueOffset); } @@ -5980,6 +6158,11 @@ Handle<String>::null(), false, false); } + static HObjectAccess ForExternalUInteger8() { + return HObjectAccess(kExternalMemory, 0, Representation::UInteger8(), + Handle<String>::null(), false, false); + } + // Create an access to an offset in a fixed array header. static HObjectAccess ForFixedArrayHeader(int offset); @@ -6025,6 +6208,11 @@ JSArrayBuffer::kBackingStoreOffset, Representation::External()); } + static HObjectAccess ForJSArrayBufferByteLength() { + return HObjectAccess::ForObservableJSObjectOffset( + JSArrayBuffer::kByteLengthOffset, Representation::Tagged()); + } + static HObjectAccess ForExternalArrayExternalPointer() { return HObjectAccess::ForObservableJSObjectOffset( ExternalArray::kExternalPointerOffset, Representation::External()); @@ -6059,8 +6247,6 @@ return HObjectAccess(kInobject, GlobalObject::kNativeContextOffset); } - void PrintTo(StringStream* stream) const; - inline bool Equals(HObjectAccess that) const { return value_ == that.value_; // portion and offset must match } @@ -6096,12 +6282,12 @@ OffsetField::encode(offset)), name_(name) { // assert that the fields decode correctly - ASSERT(this->offset() == offset); - ASSERT(this->portion() == portion); - ASSERT(this->immutable() == immutable); - ASSERT(this->existing_inobject_property() == existing_inobject_property); - ASSERT(RepresentationField::decode(value_) == representation.kind()); - ASSERT(!this->existing_inobject_property() || IsInobject()); + DCHECK(this->offset() == offset); + DCHECK(this->portion() == portion); + DCHECK(this->immutable() == immutable); + DCHECK(this->existing_inobject_property() == existing_inobject_property); + DCHECK(RepresentationField::decode(value_) == representation.kind()); + DCHECK(!this->existing_inobject_property() || IsInobject()); } class PortionField : public BitField<Portion, 0, 3> {}; @@ -6116,6 +6302,7 @@ friend class HLoadNamedField; friend class HStoreNamedField; friend class SideEffectsTracker; + friend OStream& operator<<(OStream& os, const HObjectAccess& access); inline Portion portion() const { return PortionField::decode(value_); @@ -6123,14 +6310,19 @@ }; +OStream& operator<<(OStream& os, const HObjectAccess& access); + + class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> { public: - DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*, HValue*, - HObjectAccess); + DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*, + HValue*, HObjectAccess); + DECLARE_INSTRUCTION_FACTORY_P5(HLoadNamedField, HValue*, HValue*, + HObjectAccess, const UniqueSet<Map>*, HType); - HValue* object() { return OperandAt(0); } - HValue* dependency() { - ASSERT(HasDependency()); + HValue* object() const { return OperandAt(0); } + HValue* dependency() const { + DCHECK(HasDependency()); return OperandAt(1); } bool HasDependency() const { return OperandAt(0) != OperandAt(1); } @@ -6139,6 +6331,8 @@ return access_.representation(); } + const UniqueSet<Map>* maps() const { return maps_; } + virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; } virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE { return !access().IsInobject() || access().offset() >= size; @@ -6151,23 +6345,39 @@ return Representation::Tagged(); } virtual Range* InferRange(Zone* zone) V8_OVERRIDE; - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT + + bool CanBeReplacedWith(HValue* other) const { + if (!CheckFlag(HValue::kCantBeReplaced)) return false; + if (!type().Equals(other->type())) return false; + if (!representation().Equals(other->representation())) return false; + if (!other->IsLoadNamedField()) return true; + HLoadNamedField* that = HLoadNamedField::cast(other); + if (this->maps_ == that->maps_) return true; + if (this->maps_ == NULL || that->maps_ == NULL) return false; + return this->maps_->IsSubset(that->maps_); + } DECLARE_CONCRETE_INSTRUCTION(LoadNamedField) protected: virtual bool DataEquals(HValue* other) V8_OVERRIDE { - HLoadNamedField* b = HLoadNamedField::cast(other); - return access_.Equals(b->access_); + HLoadNamedField* that = HLoadNamedField::cast(other); + if (!this->access_.Equals(that->access_)) return false; + if (this->maps_ == that->maps_) return true; + return (this->maps_ != NULL && + that->maps_ != NULL && + this->maps_->Equals(that->maps_)); } private: HLoadNamedField(HValue* object, HValue* dependency, - HObjectAccess access) : access_(access) { - ASSERT(object != NULL); + HObjectAccess access) + : access_(access), maps_(NULL) { + DCHECK_NOT_NULL(object); SetOperandAt(0, object); - SetOperandAt(1, dependency != NULL ? dependency : object); + SetOperandAt(1, dependency ? dependency : object); Representation representation = access.representation(); if (representation.IsInteger8() || @@ -6187,7 +6397,7 @@ representation.IsInteger32()) { set_representation(representation); } else if (representation.IsHeapObject()) { - set_type(HType::NonPrimitive()); + set_type(HType::HeapObject()); set_representation(Representation::Tagged()); } else { set_representation(Representation::Tagged()); @@ -6195,9 +6405,30 @@ access.SetGVNFlags(this, LOAD); } + HLoadNamedField(HValue* object, + HValue* dependency, + HObjectAccess access, + const UniqueSet<Map>* maps, + HType type) + : HTemplateInstruction<2>(type), access_(access), maps_(maps) { + DCHECK_NOT_NULL(maps); + DCHECK_NE(0, maps->size()); + + DCHECK_NOT_NULL(object); + SetOperandAt(0, object); + SetOperandAt(1, dependency ? dependency : object); + + DCHECK(access.representation().IsHeapObject()); + DCHECK(type.IsHeapObject()); + set_representation(Representation::Tagged()); + + access.SetGVNFlags(this, LOAD); + } + virtual bool IsDeletable() const V8_OVERRIDE { return true; } HObjectAccess access_; + const UniqueSet<Map>* maps_; }; @@ -6206,21 +6437,34 @@ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadNamedGeneric, HValue*, Handle<Object>); - HValue* context() { return OperandAt(0); } - HValue* object() { return OperandAt(1); } + HValue* context() const { return OperandAt(0); } + HValue* object() const { return OperandAt(1); } Handle<Object> name() const { return name_; } + int slot() const { + DCHECK(FLAG_vector_ics && + slot_ != FeedbackSlotInterface::kInvalidFeedbackSlot); + return slot_; + } + Handle<FixedArray> feedback_vector() const { return feedback_vector_; } + void SetVectorAndSlot(Handle<FixedArray> vector, int slot) { + DCHECK(FLAG_vector_ics); + feedback_vector_ = vector; + slot_ = slot; + } + virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric) private: HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name) - : name_(name) { + : name_(name), + slot_(FeedbackSlotInterface::kInvalidFeedbackSlot) { SetOperandAt(0, context); SetOperandAt(1, object); set_representation(Representation::Tagged()); @@ -6228,6 +6472,8 @@ } Handle<Object> name_; + Handle<FixedArray> feedback_vector_; + int slot_; }; @@ -6259,11 +6505,12 @@ public: virtual HValue* GetKey() = 0; virtual void SetKey(HValue* key) = 0; - virtual void SetIndexOffset(uint32_t index_offset) = 0; - virtual int MaxIndexOffsetBits() = 0; - virtual bool IsDehoisted() = 0; + virtual ElementsKind elements_kind() const = 0; + // TryIncreaseBaseOffset returns false if overflow would result. + virtual bool TryIncreaseBaseOffset(uint32_t increase_by_value) = 0; + virtual bool IsDehoisted() const = 0; virtual void SetDehoisted(bool is_dehoisted) = 0; - virtual ~ArrayInstructionInterface() { }; + virtual ~ArrayInstructionInterface() { } static Representation KeyedAccessIndexRequirement(Representation r) { return r.IsInteger32() || SmiValuesAre32Bits() @@ -6272,6 +6519,8 @@ }; +static const int kDefaultKeyedHeaderOffsetSentinel = -1; + enum LoadKeyedHoleMode { NEVER_RETURN_HOLE, ALLOW_RETURN_HOLE @@ -6285,6 +6534,8 @@ ElementsKind); DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*, ElementsKind, LoadKeyedHoleMode); + DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*, + ElementsKind, LoadKeyedHoleMode, int); bool is_external() const { return IsExternalArrayElementsKind(elements_kind()); @@ -6295,27 +6546,22 @@ bool is_typed_elements() const { return is_external() || is_fixed_typed_array(); } - HValue* elements() { return OperandAt(0); } - HValue* key() { return OperandAt(1); } - HValue* dependency() { - ASSERT(HasDependency()); + HValue* elements() const { return OperandAt(0); } + HValue* key() const { return OperandAt(1); } + HValue* dependency() const { + DCHECK(HasDependency()); return OperandAt(2); } bool HasDependency() const { return OperandAt(0) != OperandAt(2); } - uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); } - void SetIndexOffset(uint32_t index_offset) { - bit_field_ = IndexOffsetField::update(bit_field_, index_offset); - } - virtual int MaxIndexOffsetBits() { - return kBitsForIndexOffset; - } + uint32_t base_offset() const { return BaseOffsetField::decode(bit_field_); } + bool TryIncreaseBaseOffset(uint32_t increase_by_value); HValue* GetKey() { return key(); } void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); } + bool IsDehoisted() const { return IsDehoistedField::decode(bit_field_); } void SetDehoisted(bool is_dehoisted) { bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted); } - ElementsKind elements_kind() const { + virtual ElementsKind elements_kind() const V8_OVERRIDE { return ElementsKindField::decode(bit_field_); } LoadKeyedHoleMode hole_mode() const { @@ -6342,7 +6588,7 @@ return RequiredInputRepresentation(index); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT bool UsesMustHandleHole() const; bool AllUsesCanTreatHoleAsNaN() const; @@ -6357,7 +6603,7 @@ if (!other->IsLoadKeyed()) return false; HLoadKeyed* other_load = HLoadKeyed::cast(other); - if (IsDehoisted() && index_offset() != other_load->index_offset()) + if (IsDehoisted() && base_offset() != other_load->base_offset()) return false; return elements_kind() == other_load->elements_kind(); } @@ -6367,10 +6613,15 @@ HValue* key, HValue* dependency, ElementsKind elements_kind, - LoadKeyedHoleMode mode = NEVER_RETURN_HOLE) + LoadKeyedHoleMode mode = NEVER_RETURN_HOLE, + int offset = kDefaultKeyedHeaderOffsetSentinel) : bit_field_(0) { + offset = offset == kDefaultKeyedHeaderOffsetSentinel + ? GetDefaultHeaderSizeForElementsKind(elements_kind) + : offset; bit_field_ = ElementsKindField::encode(elements_kind) | - HoleModeField::encode(mode); + HoleModeField::encode(mode) | + BaseOffsetField::encode(offset); SetOperandAt(0, obj); SetOperandAt(1, key); @@ -6379,7 +6630,7 @@ if (!is_typed_elements()) { // I can detect the case between storing double (holey and fast) and // smi/object by looking at elements_kind_. - ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) || + DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) || IsFastDoubleElementsKind(elements_kind)); if (IsFastSmiOrObjectElementsKind(elements_kind)) { @@ -6433,16 +6684,16 @@ enum LoadKeyedBits { kBitsForElementsKind = 5, kBitsForHoleMode = 1, - kBitsForIndexOffset = 25, + kBitsForBaseOffset = 25, kBitsForIsDehoisted = 1, kStartElementsKind = 0, kStartHoleMode = kStartElementsKind + kBitsForElementsKind, - kStartIndexOffset = kStartHoleMode + kBitsForHoleMode, - kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset + kStartBaseOffset = kStartHoleMode + kBitsForHoleMode, + kStartIsDehoisted = kStartBaseOffset + kBitsForBaseOffset }; - STATIC_ASSERT((kBitsForElementsKind + kBitsForIndexOffset + + STATIC_ASSERT((kBitsForElementsKind + kBitsForBaseOffset + kBitsForIsDehoisted) <= sizeof(uint32_t)*8); STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind)); class ElementsKindField: @@ -6451,8 +6702,8 @@ class HoleModeField: public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode> {}; // NOLINT - class IndexOffsetField: - public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset> + class BaseOffsetField: + public BitField<uint32_t, kStartBaseOffset, kBitsForBaseOffset> {}; // NOLINT class IsDehoistedField: public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted> @@ -6465,11 +6716,22 @@ public: DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadKeyedGeneric, HValue*, HValue*); - HValue* object() { return OperandAt(0); } - HValue* key() { return OperandAt(1); } - HValue* context() { return OperandAt(2); } + HValue* object() const { return OperandAt(0); } + HValue* key() const { return OperandAt(1); } + HValue* context() const { return OperandAt(2); } + int slot() const { + DCHECK(FLAG_vector_ics && + slot_ != FeedbackSlotInterface::kInvalidFeedbackSlot); + return slot_; + } + Handle<FixedArray> feedback_vector() const { return feedback_vector_; } + void SetVectorAndSlot(Handle<FixedArray> vector, int slot) { + DCHECK(FLAG_vector_ics); + feedback_vector_ = vector; + slot_ = slot; + } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { // tagged[tagged] @@ -6481,13 +6743,17 @@ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric) private: - HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) { + HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) + : slot_(FeedbackSlotInterface::kInvalidFeedbackSlot) { set_representation(Representation::Tagged()); SetOperandAt(0, obj); SetOperandAt(1, key); SetOperandAt(2, context); SetAllSideEffects(); } + + Handle<FixedArray> feedback_vector_; + int slot_; }; @@ -6543,24 +6809,19 @@ } virtual bool HandleSideEffectDominator(GVNFlag side_effect, HValue* dominator) V8_OVERRIDE { - ASSERT(side_effect == kNewSpacePromotion); + DCHECK(side_effect == kNewSpacePromotion); if (!FLAG_use_write_barrier_elimination) return false; - new_space_dominator_ = dominator; + dominator_ = dominator; return false; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - - void SkipWriteBarrier() { write_barrier_mode_ = SKIP_WRITE_BARRIER; } - bool IsSkipWriteBarrier() const { - return write_barrier_mode_ == SKIP_WRITE_BARRIER; - } + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT HValue* object() const { return OperandAt(0); } HValue* value() const { return OperandAt(1); } HValue* transition() const { return OperandAt(2); } HObjectAccess access() const { return access_; } - HValue* new_space_dominator() const { return new_space_dominator_; } + HValue* dominator() const { return dominator_; } bool has_transition() const { return has_transition_; } StoreFieldOrKeyedMode store_mode() const { return store_mode_; } @@ -6573,32 +6834,36 @@ } } - void SetTransition(HConstant* map_constant, CompilationInfo* info) { - ASSERT(!has_transition()); // Only set once. - Handle<Map> map = Handle<Map>::cast(map_constant->handle(info->isolate())); - if (map->CanBeDeprecated()) { - map->AddDependentCompilationInfo(DependentCode::kTransitionGroup, info); - } - SetOperandAt(2, map_constant); + void SetTransition(HConstant* transition) { + DCHECK(!has_transition()); // Only set once. + SetOperandAt(2, transition); has_transition_ = true; + SetChangesFlag(kMaps); } - bool NeedsWriteBarrier() { - ASSERT(!field_representation().IsDouble() || !has_transition()); - if (IsSkipWriteBarrier()) return false; + bool NeedsWriteBarrier() const { + DCHECK(!field_representation().IsDouble() || !has_transition()); if (field_representation().IsDouble()) return false; if (field_representation().IsSmi()) return false; if (field_representation().IsInteger32()) return false; if (field_representation().IsExternal()) return false; return StoringValueNeedsWriteBarrier(value()) && - ReceiverObjectNeedsWriteBarrier(object(), value(), - new_space_dominator()); + ReceiverObjectNeedsWriteBarrier(object(), value(), dominator()); } bool NeedsWriteBarrierForMap() { - if (IsSkipWriteBarrier()) return false; return ReceiverObjectNeedsWriteBarrier(object(), transition(), - new_space_dominator()); + dominator()); + } + + SmiCheck SmiCheckForWriteBarrier() const { + if (field_representation().IsHeapObject()) return OMIT_SMI_CHECK; + if (value()->type().IsHeapObject()) return OMIT_SMI_CHECK; + return INLINE_SMI_CHECK; + } + + PointersToHereCheck PointersToHereCheckForValue() const { + return PointersToHereCheckForObject(value(), dominator()); } Representation field_representation() const { @@ -6609,19 +6874,31 @@ SetOperandAt(1, value); } + bool CanBeReplacedWith(HStoreNamedField* that) const { + if (!this->access().Equals(that->access())) return false; + if (SmiValuesAre32Bits() && + this->field_representation().IsSmi() && + this->store_mode() == INITIALIZING_STORE && + that->store_mode() == STORE_TO_INITIALIZED_ENTRY) { + // We cannot replace an initializing store to a smi field with a store to + // an initialized entry on 64-bit architectures (with 32-bit smis). + return false; + } + return true; + } + private: HStoreNamedField(HValue* obj, HObjectAccess access, HValue* val, StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE) : access_(access), - new_space_dominator_(NULL), - write_barrier_mode_(UPDATE_WRITE_BARRIER), + dominator_(NULL), has_transition_(false), store_mode_(store_mode) { // Stores to a non existing in-object property are allowed only to the // newly allocated objects (via HAllocate or HInnerAllocatedObject). - ASSERT(!access.IsInobject() || access.existing_inobject_property() || + DCHECK(!access.IsInobject() || access.existing_inobject_property() || obj->IsAllocate() || obj->IsInnerAllocatedObject()); SetOperandAt(0, obj); SetOperandAt(1, val); @@ -6630,8 +6907,7 @@ } HObjectAccess access_; - HValue* new_space_dominator_; - WriteBarrierMode write_barrier_mode_ : 1; + HValue* dominator_; bool has_transition_ : 1; StoreFieldOrKeyedMode store_mode_ : 1; }; @@ -6642,13 +6918,13 @@ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*, Handle<String>, HValue*, StrictMode); - HValue* object() { return OperandAt(0); } - HValue* value() { return OperandAt(1); } - HValue* context() { return OperandAt(2); } - Handle<String> name() { return name_; } - StrictMode strict_mode() { return strict_mode_; } + HValue* object() const { return OperandAt(0); } + HValue* value() const { return OperandAt(1); } + HValue* context() const { return OperandAt(2); } + Handle<String> name() const { return name_; } + StrictMode strict_mode() const { return strict_mode_; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -6682,6 +6958,8 @@ ElementsKind); DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*, ElementsKind, StoreFieldOrKeyedMode); + DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*, + ElementsKind, StoreFieldOrKeyedMode, int); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { // kind_fast: tagged[int32] = tagged @@ -6697,20 +6975,29 @@ OperandAt(1)->representation()); } - ASSERT_EQ(index, 2); - if (IsDoubleOrFloatElementsKind(elements_kind())) { + DCHECK_EQ(index, 2); + return RequiredValueRepresentation(elements_kind_, store_mode_); + } + + static Representation RequiredValueRepresentation( + ElementsKind kind, StoreFieldOrKeyedMode mode) { + if (IsDoubleOrFloatElementsKind(kind)) { return Representation::Double(); } - if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) { + + if (kind == FAST_SMI_ELEMENTS && SmiValuesAre32Bits() && + mode == STORE_TO_INITIALIZED_ENTRY) { return Representation::Integer32(); } - if (IsFastSmiElementsKind(elements_kind())) { + + if (IsFastSmiElementsKind(kind)) { return Representation::Smi(); } - return is_external() || is_fixed_typed_array() - ? Representation::Integer32() - : Representation::Tagged(); + return IsExternalArrayElementsKind(kind) || + IsFixedTypedArrayElementsKind(kind) + ? Representation::Integer32() + : Representation::Tagged(); } bool is_external() const { @@ -6730,38 +7017,25 @@ if (IsUninitialized()) { return Representation::None(); } - if (IsDoubleOrFloatElementsKind(elements_kind())) { - return Representation::Double(); - } - if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) { - return Representation::Integer32(); - } - if (IsFastSmiElementsKind(elements_kind())) { - return Representation::Smi(); - } - if (is_typed_elements()) { - return Representation::Integer32(); - } + Representation r = RequiredValueRepresentation(elements_kind_, store_mode_); // For fast object elements kinds, don't assume anything. - return Representation::None(); + if (r.IsTagged()) return Representation::None(); + return r; } - HValue* elements() { return OperandAt(0); } - HValue* key() { return OperandAt(1); } - HValue* value() { return OperandAt(2); } + HValue* elements() const { return OperandAt(0); } + HValue* key() const { return OperandAt(1); } + HValue* value() const { return OperandAt(2); } bool value_is_smi() const { return IsFastSmiElementsKind(elements_kind_); } StoreFieldOrKeyedMode store_mode() const { return store_mode_; } ElementsKind elements_kind() const { return elements_kind_; } - uint32_t index_offset() { return index_offset_; } - void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } - virtual int MaxIndexOffsetBits() { - return 31 - ElementsKindToShiftSize(elements_kind_); - } + uint32_t base_offset() const { return base_offset_; } + bool TryIncreaseBaseOffset(uint32_t increase_by_value); HValue* GetKey() { return key(); } void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return is_dehoisted_; } + bool IsDehoisted() const { return is_dehoisted_; } void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } bool IsUninitialized() { return is_uninitialized_; } void SetUninitialized(bool is_uninitialized) { @@ -6774,46 +7048,49 @@ virtual bool HandleSideEffectDominator(GVNFlag side_effect, HValue* dominator) V8_OVERRIDE { - ASSERT(side_effect == kNewSpacePromotion); - new_space_dominator_ = dominator; + DCHECK(side_effect == kNewSpacePromotion); + dominator_ = dominator; return false; } - HValue* new_space_dominator() const { return new_space_dominator_; } + HValue* dominator() const { return dominator_; } bool NeedsWriteBarrier() { if (value_is_smi()) { return false; } else { return StoringValueNeedsWriteBarrier(value()) && - ReceiverObjectNeedsWriteBarrier(elements(), value(), - new_space_dominator()); + ReceiverObjectNeedsWriteBarrier(elements(), value(), dominator()); } } + PointersToHereCheck PointersToHereCheckForValue() const { + return PointersToHereCheckForObject(value(), dominator()); + } + bool NeedsCanonicalization(); - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(StoreKeyed) private: HStoreKeyed(HValue* obj, HValue* key, HValue* val, ElementsKind elements_kind, - StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE) + StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE, + int offset = kDefaultKeyedHeaderOffsetSentinel) : elements_kind_(elements_kind), - index_offset_(0), + base_offset_(offset == kDefaultKeyedHeaderOffsetSentinel + ? GetDefaultHeaderSizeForElementsKind(elements_kind) + : offset), is_dehoisted_(false), is_uninitialized_(false), store_mode_(store_mode), - new_space_dominator_(NULL) { + dominator_(NULL) { SetOperandAt(0, obj); SetOperandAt(1, key); SetOperandAt(2, val); - ASSERT(store_mode != STORE_TO_INITIALIZED_ENTRY || - elements_kind == FAST_SMI_ELEMENTS); - if (IsFastObjectElementsKind(elements_kind)) { SetFlag(kTrackSideEffectDominators); SetDependsOnFlag(kNewSpacePromotion); @@ -6842,11 +7119,11 @@ } ElementsKind elements_kind_; - uint32_t index_offset_; + uint32_t base_offset_; bool is_dehoisted_ : 1; bool is_uninitialized_ : 1; StoreFieldOrKeyedMode store_mode_: 1; - HValue* new_space_dominator_; + HValue* dominator_; }; @@ -6855,18 +7132,18 @@ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*, HValue*, HValue*, StrictMode); - HValue* object() { return OperandAt(0); } - HValue* key() { return OperandAt(1); } - HValue* value() { return OperandAt(2); } - HValue* context() { return OperandAt(3); } - StrictMode strict_mode() { return strict_mode_; } + HValue* object() const { return OperandAt(0); } + HValue* key() const { return OperandAt(1); } + HValue* value() const { return OperandAt(2); } + HValue* context() const { return OperandAt(3); } + StrictMode strict_mode() const { return strict_mode_; } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { // tagged[tagged] = tagged return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric) @@ -6903,14 +7180,14 @@ return Representation::Tagged(); } - HValue* object() { return OperandAt(0); } - HValue* context() { return OperandAt(1); } - Unique<Map> original_map() { return original_map_; } - Unique<Map> transitioned_map() { return transitioned_map_; } - ElementsKind from_kind() { return from_kind_; } - ElementsKind to_kind() { return to_kind_; } + HValue* object() const { return OperandAt(0); } + HValue* context() const { return OperandAt(1); } + Unique<Map> original_map() const { return original_map_; } + Unique<Map> transitioned_map() const { return transitioned_map_; } + ElementsKind from_kind() const { return from_kind_; } + ElementsKind to_kind() const { return to_kind_; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind) @@ -6968,7 +7245,7 @@ return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT DECLARE_CONCRETE_INSTRUCTION(StringAdd) @@ -7203,10 +7480,10 @@ public: DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*); - HValue* context() { return OperandAt(0); } - HValue* value() { return OperandAt(1); } + HValue* context() const { return OperandAt(0); } + HValue* value() const { return OperandAt(1); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -7262,10 +7539,10 @@ // This instruction is not marked as kChangesMaps, but does // change the map of the input operand. Use it only when creating // object literals via a runtime call. - ASSERT(value->IsCallRuntime()); + DCHECK(value->IsCallRuntime()); #ifdef DEBUG const Runtime::Function* function = HCallRuntime::cast(value)->function(); - ASSERT(function->function_id == Runtime::kHiddenCreateObjectLiteral); + DCHECK(function->function_id == Runtime::kCreateObjectLiteral); #endif } @@ -7323,7 +7600,7 @@ if (encoding() == String::ONE_BYTE_ENCODING) { return new(zone) Range(0, String::kMaxOneByteCharCode); } else { - ASSERT_EQ(String::TWO_BYTE_ENCODING, encoding()); + DCHECK_EQ(String::TWO_BYTE_ENCODING, encoding()); return new(zone) Range(0, String::kMaxUtf16CodeUnit); } } @@ -7390,14 +7667,17 @@ return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual HType CalculateInferredType() V8_OVERRIDE { - return HType::Tagged(); + if (value()->type().IsHeapObject()) return value()->type(); + return HType::HeapObject(); } - HValue* value() { return OperandAt(0); } - HValue* map() { return OperandAt(1); } + HValue* value() const { return OperandAt(0); } + HValue* map() const { return OperandAt(1); } + + virtual HValue* Canonicalize() V8_OVERRIDE; DECLARE_CONCRETE_INSTRUCTION(CheckMapValue) @@ -7409,8 +7689,8 @@ } private: - HCheckMapValue(HValue* value, - HValue* map) { + HCheckMapValue(HValue* value, HValue* map) + : HTemplateInstruction<2>(HType::HeapObject()) { SetOperandAt(0, value); SetOperandAt(1, map); set_representation(Representation::Tagged()); @@ -7429,10 +7709,10 @@ return Representation::Tagged(); } - HValue* context() { return OperandAt(0); } - HValue* enumerable() { return OperandAt(1); } + HValue* context() const { return OperandAt(0); } + HValue* enumerable() const { return OperandAt(1); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual HType CalculateInferredType() V8_OVERRIDE { return HType::Tagged(); @@ -7459,9 +7739,9 @@ return Representation::Tagged(); } - HValue* enumerable() { return OperandAt(0); } - HValue* map() { return OperandAt(1); } - int idx() { return idx_; } + HValue* enumerable() const { return OperandAt(0); } + HValue* map() const { return OperandAt(1); } + int idx() const { return idx_; } HForInCacheArray* index_cache() { return index_cache_; @@ -7471,7 +7751,7 @@ index_cache_ = index_cache; } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual HType CalculateInferredType() V8_OVERRIDE { return HType::Tagged(); @@ -7495,21 +7775,28 @@ class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> { public: + DECLARE_INSTRUCTION_FACTORY_P2(HLoadFieldByIndex, HValue*, HValue*); + HLoadFieldByIndex(HValue* object, HValue* index) { SetOperandAt(0, object); SetOperandAt(1, index); + SetChangesFlag(kNewSpacePromotion); set_representation(Representation::Tagged()); } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { - return Representation::Tagged(); + if (index == 1) { + return Representation::Smi(); + } else { + return Representation::Tagged(); + } } - HValue* object() { return OperandAt(0); } - HValue* index() { return OperandAt(1); } + HValue* object() const { return OperandAt(0); } + HValue* index() const { return OperandAt(1); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual OStream& PrintDataTo(OStream& os) const V8_OVERRIDE; // NOLINT virtual HType CalculateInferredType() V8_OVERRIDE { return HType::Tagged(); @@ -7522,6 +7809,57 @@ }; +class HStoreFrameContext: public HUnaryOperation { + public: + DECLARE_INSTRUCTION_FACTORY_P1(HStoreFrameContext, HValue*); + + HValue* context() { return OperandAt(0); } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext) + private: + explicit HStoreFrameContext(HValue* context) + : HUnaryOperation(context) { + set_representation(Representation::Tagged()); + SetChangesFlag(kContextSlots); + } +}; + + +class HAllocateBlockContext: public HTemplateInstruction<2> { + public: + DECLARE_INSTRUCTION_FACTORY_P3(HAllocateBlockContext, HValue*, + HValue*, Handle<ScopeInfo>); + HValue* context() const { return OperandAt(0); } + HValue* function() const { return OperandAt(1); } + Handle<ScopeInfo> scope_info() const { return scope_info_; } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + virtual OStream& PrintDataTo(OStream& os) const; // NOLINT + + DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext) + + private: + HAllocateBlockContext(HValue* context, + HValue* function, + Handle<ScopeInfo> scope_info) + : scope_info_(scope_info) { + SetOperandAt(0, context); + SetOperandAt(1, function); + set_representation(Representation::Tagged()); + } + + Handle<ScopeInfo> scope_info_; +}; + + + #undef DECLARE_INSTRUCTION #undef DECLARE_CONCRETE_INSTRUCTION diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-load-elimination.cc nodejs-0.11.15/deps/v8/src/hydrogen-load-elimination.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-load-elimination.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-load-elimination.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "hydrogen-alias-analysis.h" -#include "hydrogen-load-elimination.h" -#include "hydrogen-instructions.h" -#include "hydrogen-flow-engine.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/hydrogen-alias-analysis.h" +#include "src/hydrogen-flow-engine.h" +#include "src/hydrogen-instructions.h" +#include "src/hydrogen-load-elimination.h" namespace v8 { namespace internal { @@ -48,11 +25,10 @@ // Recursively copy the entire linked list of field approximations. HFieldApproximation* Copy(Zone* zone) { - if (this == NULL) return NULL; HFieldApproximation* copy = new(zone) HFieldApproximation(); copy->object_ = this->object_; copy->last_value_ = this->last_value_; - copy->next_ = this->next_->Copy(zone); + copy->next_ = this->next_ == NULL ? NULL : this->next_->Copy(zone); return copy; } }; @@ -76,9 +52,7 @@ FieldOf(l->access()), l->object()->ActualValue()->id())); HValue* result = load(l); - if (result != instr && - result->type().Equals(instr->type()) && - result->representation().Equals(instr->representation())) { + if (result != instr && l->CanBeReplacedWith(result)) { // The load can be replaced with a previous load or a value. TRACE((" replace L%d -> v%d\n", instr->id(), result->id())); instr->DeleteAndReplaceWith(result); @@ -148,7 +122,7 @@ HLoadEliminationTable* pred_state, HBasicBlock* pred_block, Zone* zone) { - ASSERT(pred_state != NULL); + DCHECK(pred_state != NULL); if (succ_state == NULL) { return pred_state->Copy(succ_block, pred_block, zone); } else { @@ -161,7 +135,7 @@ static HLoadEliminationTable* Finish(HLoadEliminationTable* state, HBasicBlock* block, Zone* zone) { - ASSERT(state != NULL); + DCHECK(state != NULL); return state; } @@ -173,7 +147,7 @@ new(zone) HLoadEliminationTable(zone, aliasing_); copy->EnsureFields(fields_.length()); for (int i = 0; i < fields_.length(); i++) { - copy->fields_[i] = fields_[i]->Copy(zone); + copy->fields_[i] = fields_[i] == NULL ? NULL : fields_[i]->Copy(zone); } if (FLAG_trace_load_elimination) { TRACE((" copy-to B%d\n", succ->block_id())); @@ -226,7 +200,7 @@ // which the load should be replaced. Otherwise, return {instr}. HValue* load(HLoadNamedField* instr) { // There must be no loads from non observable in-object properties. - ASSERT(!instr->access().IsInobject() || + DCHECK(!instr->access().IsInobject() || instr->access().existing_inobject_property()); int field = FieldOf(instr->access()); @@ -408,7 +382,7 @@ // farthest away from the current instruction. HFieldApproximation* ReuseLastApproximation(int field) { HFieldApproximation* approx = fields_[field]; - ASSERT(approx != NULL); + DCHECK(approx != NULL); HFieldApproximation* prev = NULL; while (approx->next_ != NULL) { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-load-elimination.h nodejs-0.11.15/deps/v8/src/hydrogen-load-elimination.h --- nodejs-0.11.13/deps/v8/src/hydrogen-load-elimination.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-load-elimination.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_LOAD_ELIMINATION_H_ #define V8_HYDROGEN_LOAD_ELIMINATION_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-mark-deoptimize.cc nodejs-0.11.15/deps/v8/src/hydrogen-mark-deoptimize.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-mark-deoptimize.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-mark-deoptimize.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-mark-deoptimize.h" +#include "src/hydrogen-mark-deoptimize.h" namespace v8 { namespace internal { @@ -43,8 +20,8 @@ void HMarkDeoptimizeOnUndefinedPhase::ProcessPhi(HPhi* phi) { - ASSERT(phi->CheckFlag(HValue::kAllowUndefinedAsNaN)); - ASSERT(worklist_.is_empty()); + DCHECK(phi->CheckFlag(HValue::kAllowUndefinedAsNaN)); + DCHECK(worklist_.is_empty()); // Push the phi onto the worklist phi->ClearFlag(HValue::kAllowUndefinedAsNaN); diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-mark-deoptimize.h nodejs-0.11.15/deps/v8/src/hydrogen-mark-deoptimize.h --- nodejs-0.11.13/deps/v8/src/hydrogen-mark-deoptimize.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-mark-deoptimize.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_MARK_DEOPTIMIZE_H_ #define V8_HYDROGEN_MARK_DEOPTIMIZE_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-mark-unreachable.cc nodejs-0.11.15/deps/v8/src/hydrogen-mark-unreachable.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-mark-unreachable.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-mark-unreachable.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-mark-unreachable.h" +#include "src/hydrogen-mark-unreachable.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-mark-unreachable.h nodejs-0.11.15/deps/v8/src/hydrogen-mark-unreachable.h --- nodejs-0.11.13/deps/v8/src/hydrogen-mark-unreachable.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-mark-unreachable.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_MARK_UNREACHABLE_H_ #define V8_HYDROGEN_MARK_UNREACHABLE_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-osr.cc nodejs-0.11.15/deps/v8/src/hydrogen-osr.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-osr.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-osr.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,9 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen.h" -#include "hydrogen-osr.h" +#include "src/hydrogen.h" +#include "src/hydrogen-osr.h" namespace v8 { namespace internal { @@ -38,13 +15,13 @@ HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) { - ASSERT(HasOsrEntryAt(statement)); + DCHECK(HasOsrEntryAt(statement)); Zone* zone = builder_->zone(); HGraph* graph = builder_->graph(); // only one OSR point per compile is allowed. - ASSERT(graph->osr() == NULL); + DCHECK(graph->osr() == NULL); // remember this builder as the one OSR builder in the graph. graph->set_osr(this); diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-osr.h nodejs-0.11.15/deps/v8/src/hydrogen-osr.h --- nodejs-0.11.13/deps/v8/src/hydrogen-osr.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-osr.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_OSR_H_ #define V8_HYDROGEN_OSR_H_ -#include "hydrogen.h" -#include "ast.h" -#include "zone.h" +#include "src/hydrogen.h" +#include "src/ast.h" +#include "src/zone.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-range-analysis.cc nodejs-0.11.15/deps/v8/src/hydrogen-range-analysis.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-range-analysis.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-range-analysis.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-range-analysis.h" +#include "src/hydrogen-range-analysis.h" namespace v8 { namespace internal { @@ -49,7 +26,7 @@ if (FLAG_trace_range) { va_list arguments; va_start(arguments, msg); - OS::VPrint(msg, arguments); + base::OS::VPrint(msg, arguments); va_end(arguments); } } @@ -87,9 +64,9 @@ // Propagate flags for negative zero checks upwards from conversions // int32-to-tagged and int32-to-double. Representation from = instr->value()->representation(); - ASSERT(from.Equals(instr->from())); + DCHECK(from.Equals(instr->from())); if (from.IsSmiOrInteger32()) { - ASSERT(instr->to().IsTagged() || + DCHECK(instr->to().IsTagged() || instr->to().IsDouble() || instr->to().IsSmiOrInteger32()); PropagateMinusZeroChecks(instr->value()); @@ -123,12 +100,28 @@ block = NULL; } } + + // The ranges are not valid anymore due to SSI vs. SSA! + PoisonRanges(); +} + + +void HRangeAnalysisPhase::PoisonRanges() { +#ifdef DEBUG + for (int i = 0; i < graph()->blocks()->length(); ++i) { + HBasicBlock* block = graph()->blocks()->at(i); + for (HInstructionIterator it(block); !it.Done(); it.Advance()) { + HInstruction* instr = it.Current(); + if (instr->HasRange()) instr->PoisonRange(); + } + } +#endif } void HRangeAnalysisPhase::InferControlFlowRange(HCompareNumericAndBranch* test, HBasicBlock* dest) { - ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest)); + DCHECK((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest)); if (test->representation().IsSmiOrInteger32()) { Token::Value op = test->token(); if (test->SecondSuccessor() == dest) { @@ -177,7 +170,7 @@ void HRangeAnalysisPhase::InferRange(HValue* value) { - ASSERT(!value->HasRange()); + DCHECK(!value->HasRange()); if (!value->representation().IsNone()) { value->ComputeInitialRange(graph()->zone()); Range* range = value->range(); @@ -191,7 +184,7 @@ void HRangeAnalysisPhase::RollBackTo(int index) { - ASSERT(index <= changed_ranges_.length()); + DCHECK(index <= changed_ranges_.length()); for (int i = index; i < changed_ranges_.length(); ++i) { changed_ranges_[i]->RemoveLastAddedRange(); } @@ -220,8 +213,8 @@ void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) { - ASSERT(worklist_.is_empty()); - ASSERT(in_worklist_.IsEmpty()); + DCHECK(worklist_.is_empty()); + DCHECK(in_worklist_.IsEmpty()); AddToWorklist(value); while (!worklist_.is_empty()) { @@ -289,8 +282,8 @@ } in_worklist_.Clear(); - ASSERT(in_worklist_.IsEmpty()); - ASSERT(worklist_.is_empty()); + DCHECK(in_worklist_.IsEmpty()); + DCHECK(worklist_.is_empty()); } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-range-analysis.h nodejs-0.11.15/deps/v8/src/hydrogen-range-analysis.h --- nodejs-0.11.13/deps/v8/src/hydrogen-range-analysis.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-range-analysis.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_RANGE_ANALYSIS_H_ #define V8_HYDROGEN_RANGE_ANALYSIS_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { @@ -57,6 +34,7 @@ worklist_.Add(value, zone()); } void PropagateMinusZeroChecks(HValue* value); + void PoisonRanges(); ZoneList<HValue*> changed_ranges_; diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-redundant-phi.cc nodejs-0.11.15/deps/v8/src/hydrogen-redundant-phi.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-redundant-phi.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-redundant-phi.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-redundant-phi.h" +#include "src/hydrogen-redundant-phi.h" namespace v8 { namespace internal { @@ -48,7 +25,7 @@ // Make sure that we *really* removed all redundant phis. for (int i = 0; i < blocks->length(); ++i) { for (int j = 0; j < blocks->at(i)->phis()->length(); j++) { - ASSERT(blocks->at(i)->phis()->at(j)->GetRedundantReplacement() == NULL); + DCHECK(blocks->at(i)->phis()->at(j)->GetRedundantReplacement() == NULL); } } #endif diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-redundant-phi.h nodejs-0.11.15/deps/v8/src/hydrogen-redundant-phi.h --- nodejs-0.11.13/deps/v8/src/hydrogen-redundant-phi.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-redundant-phi.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_REDUNDANT_PHI_H_ #define V8_HYDROGEN_REDUNDANT_PHI_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-removable-simulates.cc nodejs-0.11.15/deps/v8/src/hydrogen-removable-simulates.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-removable-simulates.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-removable-simulates.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,94 +1,180 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-removable-simulates.h" +#include "src/hydrogen-flow-engine.h" +#include "src/hydrogen-instructions.h" +#include "src/hydrogen-removable-simulates.h" namespace v8 { namespace internal { -void HMergeRemovableSimulatesPhase::Run() { - ZoneList<HSimulate*> mergelist(2, zone()); - for (int i = 0; i < graph()->blocks()->length(); ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - // Make sure the merge list is empty at the start of a block. - ASSERT(mergelist.is_empty()); - // Nasty heuristic: Never remove the first simulate in a block. This - // just so happens to have a beneficial effect on register allocation. - bool first = true; - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* current = it.Current(); - if (current->IsLeaveInlined()) { - // Never fold simulates from inlined environments into simulates - // in the outer environment. - // (Before each HEnterInlined, there is a non-foldable HSimulate - // anyway, so we get the barrier in the other direction for free.) - // Simply remove all accumulated simulates without merging. This - // is safe because simulates after instructions with side effects - // are never added to the merge list. - while (!mergelist.is_empty()) { - mergelist.RemoveLast()->DeleteAndReplaceWith(NULL); - } - continue; - } - if (current->IsReturn()) { - // Drop mergeable simulates in the list. This is safe because - // simulates after instructions with side effects are never added - // to the merge list. - while (!mergelist.is_empty()) { - mergelist.RemoveLast()->DeleteAndReplaceWith(NULL); +class State : public ZoneObject { + public: + explicit State(Zone* zone) + : zone_(zone), mergelist_(2, zone), first_(true), mode_(NORMAL) { } + + State* Process(HInstruction* instr, Zone* zone) { + if (FLAG_trace_removable_simulates) { + PrintF("[%s with state %p in B%d: #%d %s]\n", + mode_ == NORMAL ? "processing" : "collecting", + reinterpret_cast<void*>(this), instr->block()->block_id(), + instr->id(), instr->Mnemonic()); + } + // Forward-merge "trains" of simulates after an instruction with observable + // side effects to keep live ranges short. + if (mode_ == COLLECT_CONSECUTIVE_SIMULATES) { + if (instr->IsSimulate()) { + HSimulate* current_simulate = HSimulate::cast(instr); + if (current_simulate->is_candidate_for_removal() && + !current_simulate->ast_id().IsNone()) { + Remember(current_simulate); + return this; } - continue; - } - // Skip the non-simulates and the first simulate. - if (!current->IsSimulate()) continue; - if (first) { - first = false; - continue; - } - HSimulate* current_simulate = HSimulate::cast(current); - if ((current_simulate->previous()->HasObservableSideEffects() && - !current_simulate->next()->IsSimulate()) || - !current_simulate->is_candidate_for_removal()) { - // This simulate is not suitable for folding. - // Fold the ones accumulated so far. - current_simulate->MergeWith(&mergelist); - continue; - } else { - // Accumulate this simulate for folding later on. - mergelist.Add(current_simulate, zone()); } + FlushSimulates(); + mode_ = NORMAL; } - - if (!mergelist.is_empty()) { + // Ensure there's a non-foldable HSimulate before an HEnterInlined to avoid + // folding across HEnterInlined. + DCHECK(!(instr->IsEnterInlined() && + HSimulate::cast(instr->previous())->is_candidate_for_removal())); + if (instr->IsLeaveInlined() || instr->IsReturn()) { + // Never fold simulates from inlined environments into simulates in the + // outer environment. Simply remove all accumulated simulates without + // merging. This is safe because simulates after instructions with side + // effects are never added to the merge list. The same reasoning holds for + // return instructions. + RemoveSimulates(); + return this; + } + if (instr->IsControlInstruction()) { // Merge the accumulated simulates at the end of the block. - HSimulate* last = mergelist.RemoveLast(); - last->MergeWith(&mergelist); + FlushSimulates(); + return this; + } + // Skip the non-simulates and the first simulate. + if (!instr->IsSimulate()) return this; + if (first_) { + first_ = false; + return this; + } + HSimulate* current_simulate = HSimulate::cast(instr); + if (!current_simulate->is_candidate_for_removal()) { + Remember(current_simulate); + FlushSimulates(); + } else if (current_simulate->ast_id().IsNone()) { + DCHECK(current_simulate->next()->IsEnterInlined()); + FlushSimulates(); + } else if (current_simulate->previous()->HasObservableSideEffects()) { + Remember(current_simulate); + mode_ = COLLECT_CONSECUTIVE_SIMULATES; + } else { + Remember(current_simulate); + } + + return this; + } + + static State* Merge(State* succ_state, + HBasicBlock* succ_block, + State* pred_state, + HBasicBlock* pred_block, + Zone* zone) { + return (succ_state == NULL) + ? pred_state->Copy(succ_block, pred_block, zone) + : succ_state->Merge(succ_block, pred_state, pred_block, zone); + } + + static State* Finish(State* state, HBasicBlock* block, Zone* zone) { + if (FLAG_trace_removable_simulates) { + PrintF("[preparing state %p for B%d]\n", reinterpret_cast<void*>(state), + block->block_id()); + } + // For our current local analysis, we should not remember simulates across + // block boundaries. + DCHECK(!state->HasRememberedSimulates()); + // Nasty heuristic: Never remove the first simulate in a block. This + // just so happens to have a beneficial effect on register allocation. + state->first_ = true; + return state; + } + + private: + explicit State(const State& other) + : zone_(other.zone_), + mergelist_(other.mergelist_, other.zone_), + first_(other.first_), + mode_(other.mode_) { } + + enum Mode { NORMAL, COLLECT_CONSECUTIVE_SIMULATES }; + + bool HasRememberedSimulates() const { return !mergelist_.is_empty(); } + + void Remember(HSimulate* sim) { + mergelist_.Add(sim, zone_); + } + + void FlushSimulates() { + if (HasRememberedSimulates()) { + mergelist_.RemoveLast()->MergeWith(&mergelist_); + } + } + + void RemoveSimulates() { + while (HasRememberedSimulates()) { + mergelist_.RemoveLast()->DeleteAndReplaceWith(NULL); + } + } + + State* Copy(HBasicBlock* succ_block, HBasicBlock* pred_block, Zone* zone) { + State* copy = new(zone) State(*this); + if (FLAG_trace_removable_simulates) { + PrintF("[copy state %p from B%d to new state %p for B%d]\n", + reinterpret_cast<void*>(this), pred_block->block_id(), + reinterpret_cast<void*>(copy), succ_block->block_id()); + } + return copy; + } + + State* Merge(HBasicBlock* succ_block, + State* pred_state, + HBasicBlock* pred_block, + Zone* zone) { + // For our current local analysis, we should not remember simulates across + // block boundaries. + DCHECK(!pred_state->HasRememberedSimulates()); + DCHECK(!HasRememberedSimulates()); + if (FLAG_trace_removable_simulates) { + PrintF("[merge state %p from B%d into %p for B%d]\n", + reinterpret_cast<void*>(pred_state), pred_block->block_id(), + reinterpret_cast<void*>(this), succ_block->block_id()); } + return this; } + + Zone* zone_; + ZoneList<HSimulate*> mergelist_; + bool first_; + Mode mode_; +}; + + +// We don't use effects here. +class Effects : public ZoneObject { + public: + explicit Effects(Zone* zone) { } + bool Disabled() { return true; } + void Process(HInstruction* instr, Zone* zone) { } + void Apply(State* state) { } + void Union(Effects* that, Zone* zone) { } +}; + + +void HMergeRemovableSimulatesPhase::Run() { + HFlowEngine<State, Effects> engine(graph(), zone()); + State* state = new(zone()) State(zone()); + engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), state); } } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-removable-simulates.h nodejs-0.11.15/deps/v8/src/hydrogen-removable-simulates.h --- nodejs-0.11.13/deps/v8/src/hydrogen-removable-simulates.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-removable-simulates.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_REMOVABLE_SIMULATES_H_ #define V8_HYDROGEN_REMOVABLE_SIMULATES_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-representation-changes.cc nodejs-0.11.15/deps/v8/src/hydrogen-representation-changes.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-representation-changes.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-representation-changes.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-representation-changes.h" +#include "src/hydrogen-representation-changes.h" namespace v8 { namespace internal { @@ -64,7 +41,7 @@ if (!use_value->operand_position(use_index).IsUnknown()) { new_value->set_position(use_value->operand_position(use_index)); } else { - ASSERT(!FLAG_hydrogen_track_positions || + DCHECK(!FLAG_hydrogen_track_positions || !graph()->info()->IsOptimizing()); } } @@ -74,6 +51,15 @@ } +static bool IsNonDeoptingIntToSmiChange(HChange* change) { + Representation from_rep = change->from(); + Representation to_rep = change->to(); + // Flags indicating Uint32 operations are set in a later Hydrogen phase. + DCHECK(!change->CheckFlag(HValue::kUint32)); + return from_rep.IsInteger32() && to_rep.IsSmi() && SmiValuesAre32Bits(); +} + + void HRepresentationChangesPhase::InsertRepresentationChangesForValue( HValue* value) { Representation r = value->representation(); @@ -88,17 +74,33 @@ int use_index = it.index(); Representation req = use_value->RequiredInputRepresentation(use_index); if (req.IsNone() || req.Equals(r)) continue; + + // If this is an HForceRepresentation instruction, and an HChange has been + // inserted above it, examine the input representation of the HChange. If + // that's int32, and this HForceRepresentation use is int32, and int32 to + // smi changes can't cause deoptimisation, set the input of the use to the + // input of the HChange. + if (value->IsForceRepresentation()) { + HValue* input = HForceRepresentation::cast(value)->value(); + if (input->IsChange()) { + HChange* change = HChange::cast(input); + if (change->from().Equals(req) && IsNonDeoptingIntToSmiChange(change)) { + use_value->SetOperandAt(use_index, change->value()); + continue; + } + } + } InsertRepresentationChangeForUse(value, use_value, use_index, req); } if (value->HasNoUses()) { - ASSERT(value->IsConstant()); + DCHECK(value->IsConstant() || value->IsForceRepresentation()); value->DeleteAndReplaceWith(NULL); - } - - // The only purpose of a HForceRepresentation is to represent the value - // after the (possible) HChange instruction. We make it disappear. - if (value->IsForceRepresentation()) { - value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value()); + } else { + // The only purpose of a HForceRepresentation is to represent the value + // after the (possible) HChange instruction. We make it disappear. + if (value->IsForceRepresentation()) { + value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value()); + } } } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-representation-changes.h nodejs-0.11.15/deps/v8/src/hydrogen-representation-changes.h --- nodejs-0.11.13/deps/v8/src/hydrogen-representation-changes.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-representation-changes.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_REPRESENTATION_CHANGES_H_ #define V8_HYDROGEN_REPRESENTATION_CHANGES_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-sce.cc nodejs-0.11.15/deps/v8/src/hydrogen-sce.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-sce.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-sce.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,9 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-sce.h" -#include "v8.h" +#include "src/hydrogen-sce.h" +#include "src/v8.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-sce.h nodejs-0.11.15/deps/v8/src/hydrogen-sce.h --- nodejs-0.11.13/deps/v8/src/hydrogen-sce.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-sce.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_SCE_H_ #define V8_HYDROGEN_SCE_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-store-elimination.cc nodejs-0.11.15/deps/v8/src/hydrogen-store-elimination.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-store-elimination.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-store-elimination.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,9 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-store-elimination.h" -#include "hydrogen-instructions.h" +#include "src/hydrogen-instructions.h" +#include "src/hydrogen-store-elimination.h" namespace v8 { namespace internal { @@ -53,8 +30,10 @@ for (int i = 0; i < graph()->blocks()->length(); i++) { unobserved_.Rewind(0); HBasicBlock* block = graph()->blocks()->at(i); + if (!block->IsReachable()) continue; for (HInstructionIterator it(block); !it.Done(); it.Advance()) { HInstruction* instr = it.Current(); + if (instr->CheckFlag(HValue::kIsDead)) continue; // TODO(titzer): eliminate unobserved HStoreKeyed instructions too. switch (instr->opcode()) { @@ -81,7 +60,7 @@ while (i < unobserved_.length()) { HStoreNamedField* prev = unobserved_.at(i); if (aliasing_->MustAlias(object, prev->object()->ActualValue()) && - store->access().Equals(prev->access())) { + prev->CanBeReplacedWith(store)) { // This store is guaranteed to overwrite the previous store. prev->DeleteAndReplaceWith(NULL); TRACE(("++ Unobserved store S%d overwritten by S%d\n", @@ -120,17 +99,20 @@ GVNFlagSet flags) { if (unobserved_.length() == 0) return; // Nothing to do. if (instr->CanDeoptimize()) { - TRACE(("-- Observed stores at I%d (might deoptimize)\n", instr->id())); + TRACE(("-- Observed stores at I%d (%s might deoptimize)\n", + instr->id(), instr->Mnemonic())); unobserved_.Rewind(0); return; } if (instr->CheckChangesFlag(kNewSpacePromotion)) { - TRACE(("-- Observed stores at I%d (might GC)\n", instr->id())); + TRACE(("-- Observed stores at I%d (%s might GC)\n", + instr->id(), instr->Mnemonic())); unobserved_.Rewind(0); return; } - if (instr->ChangesFlags().ContainsAnyOf(flags)) { - TRACE(("-- Observed stores at I%d (GVN flags)\n", instr->id())); + if (instr->DependsOnFlags().ContainsAnyOf(flags)) { + TRACE(("-- Observed stores at I%d (GVN flags of %s)\n", + instr->id(), instr->Mnemonic())); unobserved_.Rewind(0); return; } diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-store-elimination.h nodejs-0.11.15/deps/v8/src/hydrogen-store-elimination.h --- nodejs-0.11.13/deps/v8/src/hydrogen-store-elimination.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-store-elimination.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_STORE_ELIMINATION_H_ #define V8_HYDROGEN_STORE_ELIMINATION_H_ -#include "hydrogen.h" -#include "hydrogen-alias-analysis.h" +#include "src/hydrogen.h" +#include "src/hydrogen-alias-analysis.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-types.cc nodejs-0.11.15/deps/v8/src/hydrogen-types.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-types.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-types.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,70 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/hydrogen-types.h" + +#include "src/ostreams.h" +#include "src/types-inl.h" + + +namespace v8 { +namespace internal { + +// static +template <class T> +HType HType::FromType(typename T::TypeHandle type) { + if (T::Any()->Is(type)) return HType::Any(); + if (type->Is(T::None())) return HType::None(); + if (type->Is(T::SignedSmall())) return HType::Smi(); + if (type->Is(T::Number())) return HType::TaggedNumber(); + if (type->Is(T::Null())) return HType::Null(); + if (type->Is(T::String())) return HType::String(); + if (type->Is(T::Boolean())) return HType::Boolean(); + if (type->Is(T::Undefined())) return HType::Undefined(); + if (type->Is(T::Array())) return HType::JSArray(); + if (type->Is(T::Object())) return HType::JSObject(); + return HType::Tagged(); +} + + +// static +template +HType HType::FromType<Type>(Type* type); + + +// static +template +HType HType::FromType<HeapType>(Handle<HeapType> type); + + +// static +HType HType::FromValue(Handle<Object> value) { + if (value->IsSmi()) return HType::Smi(); + if (value->IsNull()) return HType::Null(); + if (value->IsHeapNumber()) return HType::HeapNumber(); + if (value->IsString()) return HType::String(); + if (value->IsBoolean()) return HType::Boolean(); + if (value->IsUndefined()) return HType::Undefined(); + if (value->IsJSArray()) return HType::JSArray(); + if (value->IsJSObject()) return HType::JSObject(); + DCHECK(value->IsHeapObject()); + return HType::HeapObject(); +} + + +OStream& operator<<(OStream& os, const HType& t) { + // Note: The c1visualizer syntax for locals allows only a sequence of the + // following characters: A-Za-z0-9_-|: + switch (t.kind_) { +#define DEFINE_CASE(Name, mask) \ + case HType::k##Name: \ + return os << #Name; + HTYPE_LIST(DEFINE_CASE) +#undef DEFINE_CASE + } + UNREACHABLE(); + return os; +} + +} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-types.h nodejs-0.11.15/deps/v8/src/hydrogen-types.h --- nodejs-0.11.13/deps/v8/src/hydrogen-types.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-types.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,90 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef HYDROGEN_TYPES_H_ +#define HYDROGEN_TYPES_H_ + +#include <climits> + +#include "src/base/macros.h" + +namespace v8 { +namespace internal { + +// Forward declarations. +template <typename T> class Handle; +class Object; +class OStream; + +#define HTYPE_LIST(V) \ + V(Any, 0x0) /* 0000 0000 0000 0000 */ \ + V(Tagged, 0x1) /* 0000 0000 0000 0001 */ \ + V(TaggedPrimitive, 0x5) /* 0000 0000 0000 0101 */ \ + V(TaggedNumber, 0xd) /* 0000 0000 0000 1101 */ \ + V(Smi, 0x1d) /* 0000 0000 0001 1101 */ \ + V(HeapObject, 0x21) /* 0000 0000 0010 0001 */ \ + V(HeapPrimitive, 0x25) /* 0000 0000 0010 0101 */ \ + V(Null, 0x27) /* 0000 0000 0010 0111 */ \ + V(HeapNumber, 0x2d) /* 0000 0000 0010 1101 */ \ + V(String, 0x65) /* 0000 0000 0110 0101 */ \ + V(Boolean, 0xa5) /* 0000 0000 1010 0101 */ \ + V(Undefined, 0x125) /* 0000 0001 0010 0101 */ \ + V(JSObject, 0x221) /* 0000 0010 0010 0001 */ \ + V(JSArray, 0x621) /* 0000 0110 0010 0001 */ \ + V(None, 0x7ff) /* 0000 0111 1111 1111 */ + +class HType V8_FINAL { + public: + #define DECLARE_CONSTRUCTOR(Name, mask) \ + static HType Name() V8_WARN_UNUSED_RESULT { return HType(k##Name); } + HTYPE_LIST(DECLARE_CONSTRUCTOR) + #undef DECLARE_CONSTRUCTOR + + // Return the weakest (least precise) common type. + HType Combine(HType other) const V8_WARN_UNUSED_RESULT { + return HType(static_cast<Kind>(kind_ & other.kind_)); + } + + bool Equals(HType other) const V8_WARN_UNUSED_RESULT { + return kind_ == other.kind_; + } + + bool IsSubtypeOf(HType other) const V8_WARN_UNUSED_RESULT { + return Combine(other).Equals(other); + } + + #define DECLARE_IS_TYPE(Name, mask) \ + bool Is##Name() const V8_WARN_UNUSED_RESULT { \ + return IsSubtypeOf(HType::Name()); \ + } + HTYPE_LIST(DECLARE_IS_TYPE) + #undef DECLARE_IS_TYPE + + template <class T> + static HType FromType(typename T::TypeHandle type) V8_WARN_UNUSED_RESULT; + static HType FromValue(Handle<Object> value) V8_WARN_UNUSED_RESULT; + + friend OStream& operator<<(OStream& os, const HType& t); + + private: + enum Kind { + #define DECLARE_TYPE(Name, mask) k##Name = mask, + HTYPE_LIST(DECLARE_TYPE) + #undef DECLARE_TYPE + LAST_KIND = kNone + }; + + // Make sure type fits in int16. + STATIC_ASSERT(LAST_KIND < (1 << (CHAR_BIT * sizeof(int16_t)))); + + explicit HType(Kind kind) : kind_(kind) { } + + int16_t kind_; +}; + + +OStream& operator<<(OStream& os, const HType& t); +} } // namespace v8::internal + +#endif // HYDROGEN_TYPES_H_ diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-uint32-analysis.cc nodejs-0.11.15/deps/v8/src/hydrogen-uint32-analysis.cc --- nodejs-0.11.13/deps/v8/src/hydrogen-uint32-analysis.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-uint32-analysis.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,37 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "hydrogen-uint32-analysis.h" +#include "src/hydrogen-uint32-analysis.h" namespace v8 { namespace internal { +static bool IsUnsignedLoad(HLoadKeyed* instr) { + switch (instr->elements_kind()) { + case EXTERNAL_UINT8_ELEMENTS: + case EXTERNAL_UINT16_ELEMENTS: + case EXTERNAL_UINT32_ELEMENTS: + case EXTERNAL_UINT8_CLAMPED_ELEMENTS: + case UINT8_ELEMENTS: + case UINT16_ELEMENTS: + case UINT32_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + return true; + default: + return false; + } +} + + +static bool IsUint32Operation(HValue* instr) { + return instr->IsShr() || + (instr->IsLoadKeyed() && IsUnsignedLoad(HLoadKeyed::cast(instr))) || + (instr->IsInteger32Constant() && instr->GetInteger32Constant() >= 0); +} + + bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) { // Operations that operate on bits are safe. if (use->IsBitwise() || use->IsShl() || use->IsSar() || use->IsShr()) { @@ -40,10 +41,10 @@ return true; } else if (use->IsChange()) { // Conversions have special support for uint32. - // This ASSERT guards that the conversion in question is actually + // This DCHECK guards that the conversion in question is actually // implemented. Do not extend the whitelist without adding // support to LChunkBuilder::DoChange(). - ASSERT(HChange::cast(use)->to().IsDouble() || + DCHECK(HChange::cast(use)->to().IsDouble() || HChange::cast(use)->to().IsSmi() || HChange::cast(use)->to().IsTagged()); return true; @@ -54,12 +55,15 @@ // operation. if (store->value() == val) { // Clamping or a conversion to double should have beed inserted. - ASSERT(store->elements_kind() != EXTERNAL_UINT8_CLAMPED_ELEMENTS); - ASSERT(store->elements_kind() != EXTERNAL_FLOAT32_ELEMENTS); - ASSERT(store->elements_kind() != EXTERNAL_FLOAT64_ELEMENTS); + DCHECK(store->elements_kind() != EXTERNAL_UINT8_CLAMPED_ELEMENTS); + DCHECK(store->elements_kind() != EXTERNAL_FLOAT32_ELEMENTS); + DCHECK(store->elements_kind() != EXTERNAL_FLOAT64_ELEMENTS); return true; } } + } else if (use->IsCompareNumericAndBranch()) { + HCompareNumericAndBranch* c = HCompareNumericAndBranch::cast(use); + return IsUint32Operation(c->left()) && IsUint32Operation(c->right()); } return false; diff -Nru nodejs-0.11.13/deps/v8/src/hydrogen-uint32-analysis.h nodejs-0.11.15/deps/v8/src/hydrogen-uint32-analysis.h --- nodejs-0.11.13/deps/v8/src/hydrogen-uint32-analysis.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/hydrogen-uint32-analysis.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_UINT32_ANALYSIS_H_ #define V8_HYDROGEN_UINT32_ANALYSIS_H_ -#include "hydrogen.h" +#include "src/hydrogen.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/i18n.cc nodejs-0.11.15/deps/v8/src/i18n.cc --- nodejs-0.11.13/deps/v8/src/i18n.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/i18n.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,9 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // limitations under the License. -#include "i18n.h" +#include "src/i18n.h" #include "unicode/brkiter.h" #include "unicode/calendar.h" @@ -57,12 +34,11 @@ Handle<JSObject> options, const char* key, icu::UnicodeString* setting) { - Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key)); - MaybeObject* maybe_object = options->GetProperty(*str); - Object* object; - if (maybe_object->ToObject(&object) && object->IsString()) { + Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key); + Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked(); + if (object->IsString()) { v8::String::Utf8Value utf8_string( - v8::Utils::ToLocal(Handle<String>(String::cast(object)))); + v8::Utils::ToLocal(Handle<String>::cast(object))); *setting = icu::UnicodeString::fromUTF8(*utf8_string); return true; } @@ -74,10 +50,9 @@ Handle<JSObject> options, const char* key, int32_t* value) { - Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key)); - MaybeObject* maybe_object = options->GetProperty(*str); - Object* object; - if (maybe_object->ToObject(&object) && object->IsNumber()) { + Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key); + Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked(); + if (object->IsNumber()) { object->ToInt32(value); return true; } @@ -89,10 +64,9 @@ Handle<JSObject> options, const char* key, bool* value) { - Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key)); - MaybeObject* maybe_object = options->GetProperty(*str); - Object* object; - if (maybe_object->ToObject(&object) && object->IsBoolean()) { + Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key); + Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked(); + if (object->IsBoolean()) { *value = object->BooleanValue(); return true; } @@ -152,28 +126,27 @@ const icu::Locale& icu_locale, icu::SimpleDateFormat* date_format, Handle<JSObject> resolved) { + Factory* factory = isolate->factory(); UErrorCode status = U_ZERO_ERROR; icu::UnicodeString pattern; date_format->toPattern(pattern); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("pattern")), - isolate->factory()->NewStringFromTwoByte( + factory->NewStringFromStaticAscii("pattern"), + factory->NewStringFromTwoByte( Vector<const uint16_t>( reinterpret_cast<const uint16_t*>(pattern.getBuffer()), - pattern.length())), - NONE, - SLOPPY); + pattern.length())).ToHandleChecked(), + SLOPPY).Assert(); // Set time zone and calendar. const icu::Calendar* calendar = date_format->getCalendar(); const char* calendar_name = calendar->getType(); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("calendar")), - isolate->factory()->NewStringFromAscii(CStrVector(calendar_name)), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("calendar"), + factory->NewStringFromAsciiChecked(calendar_name), + SLOPPY).Assert(); const icu::TimeZone& tz = calendar->getTimeZone(); icu::UnicodeString time_zone; @@ -185,21 +158,19 @@ if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("timeZone")), - isolate->factory()->NewStringFromAscii(CStrVector("UTC")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("timeZone"), + factory->NewStringFromStaticAscii("UTC"), + SLOPPY).Assert(); } else { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("timeZone")), - isolate->factory()->NewStringFromTwoByte( + factory->NewStringFromStaticAscii("timeZone"), + factory->NewStringFromTwoByte( Vector<const uint16_t>( reinterpret_cast<const uint16_t*>( canonical_time_zone.getBuffer()), - canonical_time_zone.length())), - NONE, - SLOPPY); + canonical_time_zone.length())).ToHandleChecked(), + SLOPPY).Assert(); } } @@ -213,17 +184,15 @@ const char* ns = numbering_system->getName(); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")), - isolate->factory()->NewStringFromAscii(CStrVector(ns)), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("numberingSystem"), + factory->NewStringFromAsciiChecked(ns), + SLOPPY).Assert(); } else { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")), - isolate->factory()->undefined_value(), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("numberingSystem"), + factory->undefined_value(), + SLOPPY).Assert(); } delete numbering_system; @@ -235,18 +204,16 @@ if (U_SUCCESS(status)) { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("locale")), - isolate->factory()->NewStringFromAscii(CStrVector(result)), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("locale"), + factory->NewStringFromAsciiChecked(result), + SLOPPY).Assert(); } else { // This would never happen, since we got the locale from ICU. JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("locale")), - isolate->factory()->NewStringFromAscii(CStrVector("und")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("locale"), + factory->NewStringFromStaticAscii("und"), + SLOPPY).Assert(); } } @@ -379,30 +346,29 @@ const icu::Locale& icu_locale, icu::DecimalFormat* number_format, Handle<JSObject> resolved) { + Factory* factory = isolate->factory(); icu::UnicodeString pattern; number_format->toPattern(pattern); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("pattern")), - isolate->factory()->NewStringFromTwoByte( + factory->NewStringFromStaticAscii("pattern"), + factory->NewStringFromTwoByte( Vector<const uint16_t>( reinterpret_cast<const uint16_t*>(pattern.getBuffer()), - pattern.length())), - NONE, - SLOPPY); + pattern.length())).ToHandleChecked(), + SLOPPY).Assert(); // Set resolved currency code in options.currency if not empty. icu::UnicodeString currency(number_format->getCurrency()); if (!currency.isEmpty()) { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("currency")), - isolate->factory()->NewStringFromTwoByte( + factory->NewStringFromStaticAscii("currency"), + factory->NewStringFromTwoByte( Vector<const uint16_t>( reinterpret_cast<const uint16_t*>(currency.getBuffer()), - currency.length())), - NONE, - SLOPPY); + currency.length())).ToHandleChecked(), + SLOPPY).Assert(); } // Ugly hack. ICU doesn't expose numbering system in any way, so we have @@ -415,78 +381,63 @@ const char* ns = numbering_system->getName(); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")), - isolate->factory()->NewStringFromAscii(CStrVector(ns)), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("numberingSystem"), + factory->NewStringFromAsciiChecked(ns), + SLOPPY).Assert(); } else { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")), - isolate->factory()->undefined_value(), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("numberingSystem"), + factory->undefined_value(), + SLOPPY).Assert(); } delete numbering_system; JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("useGrouping")), - isolate->factory()->ToBoolean(number_format->isGroupingUsed()), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("useGrouping"), + factory->ToBoolean(number_format->isGroupingUsed()), + SLOPPY).Assert(); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii( - CStrVector("minimumIntegerDigits")), - isolate->factory()->NewNumberFromInt( - number_format->getMinimumIntegerDigits()), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("minimumIntegerDigits"), + factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()), + SLOPPY).Assert(); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii( - CStrVector("minimumFractionDigits")), - isolate->factory()->NewNumberFromInt( - number_format->getMinimumFractionDigits()), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("minimumFractionDigits"), + factory->NewNumberFromInt(number_format->getMinimumFractionDigits()), + SLOPPY).Assert(); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii( - CStrVector("maximumFractionDigits")), - isolate->factory()->NewNumberFromInt( - number_format->getMaximumFractionDigits()), - NONE, - SLOPPY); - - Handle<String> key = isolate->factory()->NewStringFromAscii( - CStrVector("minimumSignificantDigits")); - if (JSReceiver::HasLocalProperty(resolved, key)) { + factory->NewStringFromStaticAscii("maximumFractionDigits"), + factory->NewNumberFromInt(number_format->getMaximumFractionDigits()), + SLOPPY).Assert(); + + Handle<String> key = + factory->NewStringFromStaticAscii("minimumSignificantDigits"); + Maybe<bool> maybe = JSReceiver::HasOwnProperty(resolved, key); + CHECK(maybe.has_value); + if (maybe.value) { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii( - CStrVector("minimumSignificantDigits")), - isolate->factory()->NewNumberFromInt( - number_format->getMinimumSignificantDigits()), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("minimumSignificantDigits"), + factory->NewNumberFromInt(number_format->getMinimumSignificantDigits()), + SLOPPY).Assert(); } - key = isolate->factory()->NewStringFromAscii( - CStrVector("maximumSignificantDigits")); - if (JSReceiver::HasLocalProperty(resolved, key)) { + key = factory->NewStringFromStaticAscii("maximumSignificantDigits"); + maybe = JSReceiver::HasOwnProperty(resolved, key); + CHECK(maybe.has_value); + if (maybe.value) { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii( - CStrVector("maximumSignificantDigits")), - isolate->factory()->NewNumberFromInt( - number_format->getMaximumSignificantDigits()), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("maximumSignificantDigits"), + factory->NewNumberFromInt(number_format->getMaximumSignificantDigits()), + SLOPPY).Assert(); } // Set the locale @@ -497,18 +448,16 @@ if (U_SUCCESS(status)) { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("locale")), - isolate->factory()->NewStringFromAscii(CStrVector(result)), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("locale"), + factory->NewStringFromAsciiChecked(result), + SLOPPY).Assert(); } else { // This would never happen, since we got the locale from ICU. JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("locale")), - isolate->factory()->NewStringFromAscii(CStrVector("und")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("locale"), + factory->NewStringFromStaticAscii("und"), + SLOPPY).Assert(); } } @@ -581,135 +530,120 @@ const icu::Locale& icu_locale, icu::Collator* collator, Handle<JSObject> resolved) { + Factory* factory = isolate->factory(); UErrorCode status = U_ZERO_ERROR; JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("numeric")), - isolate->factory()->ToBoolean( + factory->NewStringFromStaticAscii("numeric"), + factory->ToBoolean( collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON), - NONE, - SLOPPY); + SLOPPY).Assert(); switch (collator->getAttribute(UCOL_CASE_FIRST, status)) { case UCOL_LOWER_FIRST: JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")), - isolate->factory()->NewStringFromAscii(CStrVector("lower")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("caseFirst"), + factory->NewStringFromStaticAscii("lower"), + SLOPPY).Assert(); break; case UCOL_UPPER_FIRST: JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")), - isolate->factory()->NewStringFromAscii(CStrVector("upper")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("caseFirst"), + factory->NewStringFromStaticAscii("upper"), + SLOPPY).Assert(); break; default: JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")), - isolate->factory()->NewStringFromAscii(CStrVector("false")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("caseFirst"), + factory->NewStringFromStaticAscii("false"), + SLOPPY).Assert(); } switch (collator->getAttribute(UCOL_STRENGTH, status)) { case UCOL_PRIMARY: { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("strength")), - isolate->factory()->NewStringFromAscii(CStrVector("primary")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("strength"), + factory->NewStringFromStaticAscii("primary"), + SLOPPY).Assert(); // case level: true + s1 -> case, s1 -> base. if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")), - isolate->factory()->NewStringFromAscii(CStrVector("case")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("sensitivity"), + factory->NewStringFromStaticAscii("case"), + SLOPPY).Assert(); } else { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")), - isolate->factory()->NewStringFromAscii(CStrVector("base")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("sensitivity"), + factory->NewStringFromStaticAscii("base"), + SLOPPY).Assert(); } break; } case UCOL_SECONDARY: JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("strength")), - isolate->factory()->NewStringFromAscii(CStrVector("secondary")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("strength"), + factory->NewStringFromStaticAscii("secondary"), + SLOPPY).Assert(); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")), - isolate->factory()->NewStringFromAscii(CStrVector("accent")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("sensitivity"), + factory->NewStringFromStaticAscii("accent"), + SLOPPY).Assert(); break; case UCOL_TERTIARY: JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("strength")), - isolate->factory()->NewStringFromAscii(CStrVector("tertiary")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("strength"), + factory->NewStringFromStaticAscii("tertiary"), + SLOPPY).Assert(); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")), - isolate->factory()->NewStringFromAscii(CStrVector("variant")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("sensitivity"), + factory->NewStringFromStaticAscii("variant"), + SLOPPY).Assert(); break; case UCOL_QUATERNARY: // We shouldn't get quaternary and identical from ICU, but if we do // put them into variant. JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("strength")), - isolate->factory()->NewStringFromAscii(CStrVector("quaternary")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("strength"), + factory->NewStringFromStaticAscii("quaternary"), + SLOPPY).Assert(); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")), - isolate->factory()->NewStringFromAscii(CStrVector("variant")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("sensitivity"), + factory->NewStringFromStaticAscii("variant"), + SLOPPY).Assert(); break; default: JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("strength")), - isolate->factory()->NewStringFromAscii(CStrVector("identical")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("strength"), + factory->NewStringFromStaticAscii("identical"), + SLOPPY).Assert(); JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")), - isolate->factory()->NewStringFromAscii(CStrVector("variant")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("sensitivity"), + factory->NewStringFromStaticAscii("variant"), + SLOPPY).Assert(); } JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("ignorePunctuation")), - isolate->factory()->ToBoolean(collator->getAttribute( + factory->NewStringFromStaticAscii("ignorePunctuation"), + factory->ToBoolean(collator->getAttribute( UCOL_ALTERNATE_HANDLING, status) == UCOL_SHIFTED), - NONE, - SLOPPY); + SLOPPY).Assert(); // Set the locale char result[ULOC_FULLNAME_CAPACITY]; @@ -719,18 +653,16 @@ if (U_SUCCESS(status)) { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("locale")), - isolate->factory()->NewStringFromAscii(CStrVector(result)), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("locale"), + factory->NewStringFromAsciiChecked(result), + SLOPPY).Assert(); } else { // This would never happen, since we got the locale from ICU. JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("locale")), - isolate->factory()->NewStringFromAscii(CStrVector("und")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("locale"), + factory->NewStringFromStaticAscii("und"), + SLOPPY).Assert(); } } @@ -772,6 +704,7 @@ const icu::Locale& icu_locale, icu::BreakIterator* break_iterator, Handle<JSObject> resolved) { + Factory* factory = isolate->factory(); UErrorCode status = U_ZERO_ERROR; // Set the locale @@ -782,18 +715,16 @@ if (U_SUCCESS(status)) { JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("locale")), - isolate->factory()->NewStringFromAscii(CStrVector(result)), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("locale"), + factory->NewStringFromAsciiChecked(result), + SLOPPY).Assert(); } else { // This would never happen, since we got the locale from ICU. JSObject::SetProperty( resolved, - isolate->factory()->NewStringFromAscii(CStrVector("locale")), - isolate->factory()->NewStringFromAscii(CStrVector("und")), - NONE, - SLOPPY); + factory->NewStringFromStaticAscii("locale"), + factory->NewStringFromStaticAscii("und"), + SLOPPY).Assert(); } } @@ -855,8 +786,10 @@ Isolate* isolate, Handle<JSObject> obj) { Handle<String> key = - isolate->factory()->NewStringFromAscii(CStrVector("dateFormat")); - if (JSReceiver::HasLocalProperty(obj, key)) { + isolate->factory()->NewStringFromStaticAscii("dateFormat"); + Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key); + CHECK(maybe.has_value); + if (maybe.value) { return reinterpret_cast<icu::SimpleDateFormat*>( obj->GetInternalField(0)); } @@ -929,8 +862,10 @@ Isolate* isolate, Handle<JSObject> obj) { Handle<String> key = - isolate->factory()->NewStringFromAscii(CStrVector("numberFormat")); - if (JSReceiver::HasLocalProperty(obj, key)) { + isolate->factory()->NewStringFromStaticAscii("numberFormat"); + Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key); + CHECK(maybe.has_value); + if (maybe.value) { return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0)); } @@ -984,9 +919,10 @@ icu::Collator* Collator::UnpackCollator(Isolate* isolate, Handle<JSObject> obj) { - Handle<String> key = - isolate->factory()->NewStringFromAscii(CStrVector("collator")); - if (JSReceiver::HasLocalProperty(obj, key)) { + Handle<String> key = isolate->factory()->NewStringFromStaticAscii("collator"); + Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key); + CHECK(maybe.has_value); + if (maybe.value) { return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0)); } @@ -1044,8 +980,10 @@ icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate, Handle<JSObject> obj) { Handle<String> key = - isolate->factory()->NewStringFromAscii(CStrVector("breakIterator")); - if (JSReceiver::HasLocalProperty(obj, key)) { + isolate->factory()->NewStringFromStaticAscii("breakIterator"); + Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key); + CHECK(maybe.has_value); + if (maybe.value) { return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0)); } diff -Nru nodejs-0.11.13/deps/v8/src/i18n.h nodejs-0.11.15/deps/v8/src/i18n.h --- nodejs-0.11.13/deps/v8/src/i18n.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/i18n.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // limitations under the License. #ifndef V8_I18N_H_ #define V8_I18N_H_ +#include "src/v8.h" #include "unicode/uversion.h" -#include "v8.h" namespace U_ICU_NAMESPACE { class BreakIterator; diff -Nru nodejs-0.11.13/deps/v8/src/i18n.js nodejs-0.11.15/deps/v8/src/i18n.js --- nodejs-0.11.13/deps/v8/src/i18n.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/i18n.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,30 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// limitations under the License. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; // ECMAScript 402 API implementation. @@ -34,8 +12,6 @@ */ $Object.defineProperty(global, "Intl", { enumerable: false, value: (function() { -'use strict'; - var Intl = {}; var undefined = global.undefined; @@ -232,8 +208,7 @@ */ function addBoundMethod(obj, methodName, implementation, length) { function getter() { - if (!this || typeof this !== 'object' || - this.__initializedIntlObject === undefined) { + if (!%IsInitializedIntlObject(this)) { throw new $TypeError('Method ' + methodName + ' called on a ' + 'non-object or on a wrong type of object.'); } @@ -282,7 +257,7 @@ %FunctionRemovePrototype(getter); %SetNativeFlag(getter); - $Object.defineProperty(obj.prototype, methodName, { + ObjectDefineProperty(obj.prototype, methodName, { get: getter, enumerable: false, configurable: true @@ -303,7 +278,7 @@ if (options === undefined) { options = {}; } else { - options = toObject(options); + options = ToObject(options); } var matcher = options.localeMatcher; @@ -537,18 +512,6 @@ /** - * Converts parameter to an Object if possible. - */ -function toObject(value) { - if (IS_NULL_OR_UNDEFINED(value)) { - throw new $TypeError('Value cannot be converted to an Object.'); - } - - return $Object(value); -} - - -/** * Populates internalOptions object with boolean key-value pairs * from extensionMap and options. * Returns filtered extension (number and date format constructors use @@ -617,15 +580,14 @@ */ function freezeArray(array) { array.forEach(function(element, index) { - $Object.defineProperty(array, index, {value: element, + ObjectDefineProperty(array, index, {value: element, configurable: false, writable: false, enumerable: true}); }); - $Object.defineProperty(array, 'length', {value: array.length, - writable: false}); - + ObjectDefineProperty(array, 'length', {value: array.length, + writable: false}); return array; } @@ -686,8 +648,8 @@ * Configurable is false by default. */ function defineWEProperty(object, property, value) { - $Object.defineProperty(object, property, - {value: value, writable: true, enumerable: true}); + ObjectDefineProperty(object, property, + {value: value, writable: true, enumerable: true}); } @@ -706,11 +668,11 @@ * Defines a property and sets writable, enumerable and configurable to true. */ function defineWECProperty(object, property, value) { - $Object.defineProperty(object, property, - {value: value, - writable: true, - enumerable: true, - configurable: true}); + ObjectDefineProperty(object, property, + {value: value, + writable: true, + enumerable: true, + configurable: true}); } @@ -777,7 +739,7 @@ return freezeArray(seen); } - var o = toObject(locales); + var o = ToObject(locales); // Converts it to UInt32 (>>> is shr on 32bit integers). var len = o.length >>> 0; @@ -896,7 +858,7 @@ * Useful for subclassing. */ function initializeCollator(collator, locales, options) { - if (collator.hasOwnProperty('__initializedIntlObject')) { + if (%IsInitializedIntlObject(collator)) { throw new $TypeError('Trying to re-initialize Collator object.'); } @@ -949,8 +911,8 @@ // We define all properties C++ code may produce, to prevent security // problems. If malicious user decides to redefine Object.prototype.locale // we can't just use plain x.locale = 'us' or in C++ Set("locale", "us"). - // Object.defineProperties will either succeed defining or throw an error. - var resolved = $Object.defineProperties({}, { + // ObjectDefineProperties will either succeed defining or throw an error. + var resolved = ObjectDefineProperties({}, { caseFirst: {writable: true}, collation: {value: internalOptions.collation, writable: true}, ignorePunctuation: {writable: true}, @@ -967,10 +929,8 @@ resolved); // Writable, configurable and enumerable are set to false by default. - $Object.defineProperty(collator, 'collator', {value: internalCollator}); - $Object.defineProperty(collator, '__initializedIntlObject', - {value: 'collator'}); - $Object.defineProperty(collator, 'resolved', {value: resolved}); + %MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator); + ObjectDefineProperty(collator, 'resolved', {value: resolved}); return collator; } @@ -982,7 +942,7 @@ * * @constructor */ -%SetProperty(Intl, 'Collator', function() { +%AddNamedProperty(Intl, 'Collator', function() { var locales = %_Arguments(0); var options = %_Arguments(1); @@ -991,7 +951,7 @@ return new Intl.Collator(locales, options); } - return initializeCollator(toObject(this), locales, options); + return initializeCollator(ToObject(this), locales, options); }, DONT_ENUM ); @@ -1000,13 +960,12 @@ /** * Collator resolvedOptions method. */ -%SetProperty(Intl.Collator.prototype, 'resolvedOptions', function() { +%AddNamedProperty(Intl.Collator.prototype, 'resolvedOptions', function() { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - if (!this || typeof this !== 'object' || - this.__initializedIntlObject !== 'collator') { + if (!%IsInitializedIntlObjectOfType(this, 'collator')) { throw new $TypeError('resolvedOptions method called on a non-object ' + 'or on a object that is not Intl.Collator.'); } @@ -1038,7 +997,7 @@ * order in the returned list as in the input list. * Options are optional parameter. */ -%SetProperty(Intl.Collator, 'supportedLocalesOf', function(locales) { +%AddNamedProperty(Intl.Collator, 'supportedLocalesOf', function(locales) { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } @@ -1063,7 +1022,8 @@ * the sort order, or x comes after y in the sort order, respectively. */ function compare(collator, x, y) { - return %InternalCompare(collator.collator, $String(x), $String(y)); + return %InternalCompare(%GetImplFromInitializedIntlObject(collator), + $String(x), $String(y)); }; @@ -1104,7 +1064,7 @@ * Useful for subclassing. */ function initializeNumberFormat(numberFormat, locales, options) { - if (numberFormat.hasOwnProperty('__initializedIntlObject')) { + if (%IsInitializedIntlObject(numberFormat)) { throw new $TypeError('Trying to re-initialize NumberFormat object.'); } @@ -1167,7 +1127,7 @@ getOption, internalOptions); var requestedLocale = locale.locale + extension; - var resolved = $Object.defineProperties({}, { + var resolved = ObjectDefineProperties({}, { currency: {writable: true}, currencyDisplay: {writable: true}, locale: {writable: true}, @@ -1192,14 +1152,12 @@ // We can't get information about number or currency style from ICU, so we // assume user request was fulfilled. if (internalOptions.style === 'currency') { - $Object.defineProperty(resolved, 'currencyDisplay', {value: currencyDisplay, - writable: true}); + ObjectDefineProperty(resolved, 'currencyDisplay', {value: currencyDisplay, + writable: true}); } - $Object.defineProperty(numberFormat, 'formatter', {value: formatter}); - $Object.defineProperty(numberFormat, 'resolved', {value: resolved}); - $Object.defineProperty(numberFormat, '__initializedIntlObject', - {value: 'numberformat'}); + %MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter); + ObjectDefineProperty(numberFormat, 'resolved', {value: resolved}); return numberFormat; } @@ -1211,7 +1169,7 @@ * * @constructor */ -%SetProperty(Intl, 'NumberFormat', function() { +%AddNamedProperty(Intl, 'NumberFormat', function() { var locales = %_Arguments(0); var options = %_Arguments(1); @@ -1220,7 +1178,7 @@ return new Intl.NumberFormat(locales, options); } - return initializeNumberFormat(toObject(this), locales, options); + return initializeNumberFormat(ToObject(this), locales, options); }, DONT_ENUM ); @@ -1229,13 +1187,12 @@ /** * NumberFormat resolvedOptions method. */ -%SetProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() { +%AddNamedProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - if (!this || typeof this !== 'object' || - this.__initializedIntlObject !== 'numberformat') { + if (!%IsInitializedIntlObjectOfType(this, 'numberformat')) { throw new $TypeError('resolvedOptions method called on a non-object' + ' or on a object that is not Intl.NumberFormat.'); } @@ -1286,7 +1243,7 @@ * order in the returned list as in the input list. * Options are optional parameter. */ -%SetProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) { +%AddNamedProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } @@ -1309,7 +1266,8 @@ // Spec treats -0 and +0 as 0. var number = $Number(value) + 0; - return %InternalNumberFormat(formatter.formatter, number); + return %InternalNumberFormat(%GetImplFromInitializedIntlObject(formatter), + number); } @@ -1317,7 +1275,8 @@ * Returns a Number that represents string value that was passed in. */ function parseNumber(formatter, value) { - return %InternalNumberParse(formatter.formatter, $String(value)); + return %InternalNumberParse(%GetImplFromInitializedIntlObject(formatter), + $String(value)); } @@ -1470,13 +1429,11 @@ */ function toDateTimeOptions(options, required, defaults) { if (options === undefined) { - options = null; + options = {}; } else { - options = toObject(options); + options = TO_OBJECT_INLINE(options); } - options = $Object.apply(this, [options]); - var needsDefault = true; if ((required === 'date' || required === 'any') && (options.weekday !== undefined || options.year !== undefined || @@ -1491,30 +1448,30 @@ } if (needsDefault && (defaults === 'date' || defaults === 'all')) { - $Object.defineProperty(options, 'year', {value: 'numeric', - writable: true, - enumerable: true, - configurable: true}); - $Object.defineProperty(options, 'month', {value: 'numeric', - writable: true, - enumerable: true, - configurable: true}); - $Object.defineProperty(options, 'day', {value: 'numeric', + ObjectDefineProperty(options, 'year', {value: 'numeric', + writable: true, + enumerable: true, + configurable: true}); + ObjectDefineProperty(options, 'month', {value: 'numeric', writable: true, enumerable: true, configurable: true}); + ObjectDefineProperty(options, 'day', {value: 'numeric', + writable: true, + enumerable: true, + configurable: true}); } if (needsDefault && (defaults === 'time' || defaults === 'all')) { - $Object.defineProperty(options, 'hour', {value: 'numeric', + ObjectDefineProperty(options, 'hour', {value: 'numeric', writable: true, enumerable: true, configurable: true}); - $Object.defineProperty(options, 'minute', {value: 'numeric', + ObjectDefineProperty(options, 'minute', {value: 'numeric', writable: true, enumerable: true, configurable: true}); - $Object.defineProperty(options, 'second', {value: 'numeric', + ObjectDefineProperty(options, 'second', {value: 'numeric', writable: true, enumerable: true, configurable: true}); @@ -1530,7 +1487,7 @@ */ function initializeDateTimeFormat(dateFormat, locales, options) { - if (dateFormat.hasOwnProperty('__initializedIntlObject')) { + if (%IsInitializedIntlObject(dateFormat)) { throw new $TypeError('Trying to re-initialize DateTimeFormat object.'); } @@ -1565,7 +1522,7 @@ getOption, internalOptions); var requestedLocale = locale.locale + extension; - var resolved = $Object.defineProperties({}, { + var resolved = ObjectDefineProperties({}, { calendar: {writable: true}, day: {writable: true}, era: {writable: true}, @@ -1592,10 +1549,8 @@ throw new $RangeError('Unsupported time zone specified ' + tz); } - $Object.defineProperty(dateFormat, 'formatter', {value: formatter}); - $Object.defineProperty(dateFormat, 'resolved', {value: resolved}); - $Object.defineProperty(dateFormat, '__initializedIntlObject', - {value: 'dateformat'}); + %MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter); + ObjectDefineProperty(dateFormat, 'resolved', {value: resolved}); return dateFormat; } @@ -1607,7 +1562,7 @@ * * @constructor */ -%SetProperty(Intl, 'DateTimeFormat', function() { +%AddNamedProperty(Intl, 'DateTimeFormat', function() { var locales = %_Arguments(0); var options = %_Arguments(1); @@ -1616,7 +1571,7 @@ return new Intl.DateTimeFormat(locales, options); } - return initializeDateTimeFormat(toObject(this), locales, options); + return initializeDateTimeFormat(ToObject(this), locales, options); }, DONT_ENUM ); @@ -1625,13 +1580,12 @@ /** * DateTimeFormat resolvedOptions method. */ -%SetProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() { +%AddNamedProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - if (!this || typeof this !== 'object' || - this.__initializedIntlObject !== 'dateformat') { + if (!%IsInitializedIntlObjectOfType(this, 'dateformat')) { throw new $TypeError('resolvedOptions method called on a non-object or ' + 'on a object that is not Intl.DateTimeFormat.'); } @@ -1682,7 +1636,7 @@ * order in the returned list as in the input list. * Options are optional parameter. */ -%SetProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) { +%AddNamedProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } @@ -1713,7 +1667,8 @@ throw new $RangeError('Provided date is not in valid range.'); } - return %InternalDateFormat(formatter.formatter, new $Date(dateMs)); + return %InternalDateFormat(%GetImplFromInitializedIntlObject(formatter), + new $Date(dateMs)); } @@ -1724,7 +1679,8 @@ * Returns undefined if date string cannot be parsed. */ function parseDate(formatter, value) { - return %InternalDateParse(formatter.formatter, $String(value)); + return %InternalDateParse(%GetImplFromInitializedIntlObject(formatter), + $String(value)); } @@ -1772,7 +1728,7 @@ * Useful for subclassing. */ function initializeBreakIterator(iterator, locales, options) { - if (iterator.hasOwnProperty('__initializedIntlObject')) { + if (%IsInitializedIntlObject(iterator)) { throw new $TypeError('Trying to re-initialize v8BreakIterator object.'); } @@ -1788,7 +1744,7 @@ 'type', 'string', ['character', 'word', 'sentence', 'line'], 'word')); var locale = resolveLocale('breakiterator', locales, options); - var resolved = $Object.defineProperties({}, { + var resolved = ObjectDefineProperties({}, { requestedLocale: {value: locale.locale, writable: true}, type: {value: internalOptions.type, writable: true}, locale: {writable: true} @@ -1798,10 +1754,9 @@ internalOptions, resolved); - $Object.defineProperty(iterator, 'iterator', {value: internalIterator}); - $Object.defineProperty(iterator, 'resolved', {value: resolved}); - $Object.defineProperty(iterator, '__initializedIntlObject', - {value: 'breakiterator'}); + %MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator', + internalIterator); + ObjectDefineProperty(iterator, 'resolved', {value: resolved}); return iterator; } @@ -1813,7 +1768,7 @@ * * @constructor */ -%SetProperty(Intl, 'v8BreakIterator', function() { +%AddNamedProperty(Intl, 'v8BreakIterator', function() { var locales = %_Arguments(0); var options = %_Arguments(1); @@ -1822,7 +1777,7 @@ return new Intl.v8BreakIterator(locales, options); } - return initializeBreakIterator(toObject(this), locales, options); + return initializeBreakIterator(ToObject(this), locales, options); }, DONT_ENUM ); @@ -1831,13 +1786,13 @@ /** * BreakIterator resolvedOptions method. */ -%SetProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions', function() { +%AddNamedProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions', + function() { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - if (!this || typeof this !== 'object' || - this.__initializedIntlObject !== 'breakiterator') { + if (!%IsInitializedIntlObjectOfType(this, 'breakiterator')) { throw new $TypeError('resolvedOptions method called on a non-object or ' + 'on a object that is not Intl.v8BreakIterator.'); } @@ -1865,7 +1820,8 @@ * order in the returned list as in the input list. * Options are optional parameter. */ -%SetProperty(Intl.v8BreakIterator, 'supportedLocalesOf', function(locales) { +%AddNamedProperty(Intl.v8BreakIterator, 'supportedLocalesOf', + function(locales) { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } @@ -1884,7 +1840,8 @@ * gets discarded. */ function adoptText(iterator, text) { - %BreakIteratorAdoptText(iterator.iterator, $String(text)); + %BreakIteratorAdoptText(%GetImplFromInitializedIntlObject(iterator), + $String(text)); } @@ -1892,7 +1849,7 @@ * Returns index of the first break in the string and moves current pointer. */ function first(iterator) { - return %BreakIteratorFirst(iterator.iterator); + return %BreakIteratorFirst(%GetImplFromInitializedIntlObject(iterator)); } @@ -1900,7 +1857,7 @@ * Returns the index of the next break and moves the pointer. */ function next(iterator) { - return %BreakIteratorNext(iterator.iterator); + return %BreakIteratorNext(%GetImplFromInitializedIntlObject(iterator)); } @@ -1908,7 +1865,7 @@ * Returns index of the current break. */ function current(iterator) { - return %BreakIteratorCurrent(iterator.iterator); + return %BreakIteratorCurrent(%GetImplFromInitializedIntlObject(iterator)); } @@ -1916,7 +1873,7 @@ * Returns type of the current break. */ function breakType(iterator) { - return %BreakIteratorBreakType(iterator.iterator); + return %BreakIteratorBreakType(%GetImplFromInitializedIntlObject(iterator)); } @@ -1967,7 +1924,7 @@ * Compares this and that, and returns less than 0, 0 or greater than 0 value. * Overrides the built-in method. */ -$Object.defineProperty($String.prototype, 'localeCompare', { +ObjectDefineProperty($String.prototype, 'localeCompare', { value: function(that) { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); @@ -1998,7 +1955,7 @@ * If the form is not one of "NFC", "NFD", "NFKC", or "NFKD", then throw * a RangeError Exception. */ -$Object.defineProperty($String.prototype, 'normalize', { +ObjectDefineProperty($String.prototype, 'normalize', { value: function(that) { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); @@ -2029,7 +1986,7 @@ * Formats a Number object (this) using locale and options values. * If locale or options are omitted, defaults are used. */ -$Object.defineProperty($Number.prototype, 'toLocaleString', { +ObjectDefineProperty($Number.prototype, 'toLocaleString', { value: function() { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); @@ -2079,7 +2036,7 @@ * If locale or options are omitted, defaults are used - both date and time are * present in the output. */ -$Object.defineProperty($Date.prototype, 'toLocaleString', { +ObjectDefineProperty($Date.prototype, 'toLocaleString', { value: function() { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); @@ -2104,7 +2061,7 @@ * If locale or options are omitted, defaults are used - only date is present * in the output. */ -$Object.defineProperty($Date.prototype, 'toLocaleDateString', { +ObjectDefineProperty($Date.prototype, 'toLocaleDateString', { value: function() { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); @@ -2129,7 +2086,7 @@ * If locale or options are omitted, defaults are used - only time is present * in the output. */ -$Object.defineProperty($Date.prototype, 'toLocaleTimeString', { +ObjectDefineProperty($Date.prototype, 'toLocaleTimeString', { value: function() { if (%_IsConstructCall()) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); diff -Nru nodejs-0.11.13/deps/v8/src/ia32/assembler-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/assembler-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/assembler-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/assembler-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -34,13 +34,14 @@ // significantly by Google Inc. // Copyright 2012 the V8 project authors. All rights reserved. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "disassembler.h" -#include "macro-assembler.h" -#include "serialize.h" +#include "src/base/cpu.h" +#include "src/disassembler.h" +#include "src/macro-assembler.h" +#include "src/serialize.h" namespace v8 { namespace internal { @@ -48,95 +49,35 @@ // ----------------------------------------------------------------------------- // Implementation of CpuFeatures -#ifdef DEBUG -bool CpuFeatures::initialized_ = false; -#endif -uint64_t CpuFeatures::supported_ = 0; -uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0; -uint64_t CpuFeatures::cross_compile_ = 0; - - -ExternalReference ExternalReference::cpu_features() { - ASSERT(CpuFeatures::initialized_); - return ExternalReference(&CpuFeatures::supported_); -} +void CpuFeatures::ProbeImpl(bool cross_compile) { + base::CPU cpu; + CHECK(cpu.has_sse2()); // SSE2 support is mandatory. + CHECK(cpu.has_cmov()); // CMOV support is mandatory. + // Only use statically determined features for cross compile (snapshot). + if (cross_compile) return; -int IntelDoubleRegister::NumAllocatableRegisters() { - if (CpuFeatures::IsSupported(SSE2)) { - return XMMRegister::kNumAllocatableRegisters; - } else { - return X87Register::kNumAllocatableRegisters; - } + if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1; + if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3; } -int IntelDoubleRegister::NumRegisters() { - if (CpuFeatures::IsSupported(SSE2)) { - return XMMRegister::kNumRegisters; - } else { - return X87Register::kNumRegisters; - } -} - - -const char* IntelDoubleRegister::AllocationIndexToString(int index) { - if (CpuFeatures::IsSupported(SSE2)) { - return XMMRegister::AllocationIndexToString(index); - } else { - return X87Register::AllocationIndexToString(index); - } -} - - -void CpuFeatures::Probe() { - ASSERT(!initialized_); - ASSERT(supported_ == 0); -#ifdef DEBUG - initialized_ = true; -#endif - if (Serializer::enabled()) { - supported_ |= OS::CpuFeaturesImpliedByPlatform(); - return; // No features if we might serialize. - } - - uint64_t probed_features = 0; - CPU cpu; - if (cpu.has_sse41()) { - probed_features |= static_cast<uint64_t>(1) << SSE4_1; - } - if (cpu.has_sse3()) { - probed_features |= static_cast<uint64_t>(1) << SSE3; - } - if (cpu.has_sse2()) { - probed_features |= static_cast<uint64_t>(1) << SSE2; - } - if (cpu.has_cmov()) { - probed_features |= static_cast<uint64_t>(1) << CMOV; - } - - // SAHF must be available in compat/legacy mode. - ASSERT(cpu.has_sahf()); - probed_features |= static_cast<uint64_t>(1) << SAHF; - - uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform(); - supported_ = probed_features | platform_features; - found_by_runtime_probing_only_ = probed_features & ~platform_features; -} +void CpuFeatures::PrintTarget() { } +void CpuFeatures::PrintFeatures() { } // ----------------------------------------------------------------------------- // Implementation of Displacement void Displacement::init(Label* L, Type type) { - ASSERT(!L->is_bound()); + DCHECK(!L->is_bound()); int next = 0; if (L->is_linked()) { next = L->pos(); - ASSERT(next > 0); // Displacements must be at positions > 0 + DCHECK(next > 0); // Displacements must be at positions > 0 } // Ensure that we _never_ overflow the next field. - ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize)); + DCHECK(NextField::is_valid(Assembler::kMaximalBufferSize)); data_ = NextField::encode(next) | TypeField::encode(type); } @@ -172,7 +113,7 @@ } // Indicate that code has changed. - CPU::FlushICache(pc_, instruction_count); + CpuFeatures::FlushICache(pc_, instruction_count); } @@ -196,11 +137,11 @@ patcher.masm()->call(target, RelocInfo::NONE32); // Check that the size of the code generated is as expected. - ASSERT_EQ(kCallCodeSize, + DCHECK_EQ(kCallCodeSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize)); // Add the requested number of int3 instructions after the call. - ASSERT_GE(guard_bytes, 0); + DCHECK_GE(guard_bytes, 0); for (int i = 0; i < guard_bytes; i++) { patcher.masm()->int3(); } @@ -235,7 +176,7 @@ ScaleFactor scale, int32_t disp, RelocInfo::Mode rmode) { - ASSERT(!index.is(esp)); // illegal addressing mode + DCHECK(!index.is(esp)); // illegal addressing mode // [base + index*scale + disp/r] if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) { // [base + index*scale] @@ -259,7 +200,7 @@ ScaleFactor scale, int32_t disp, RelocInfo::Mode rmode) { - ASSERT(!index.is(esp)); // illegal addressing mode + DCHECK(!index.is(esp)); // illegal addressing mode // [index*scale + disp/r] set_modrm(0, esp); set_sib(scale, index, ebp); @@ -279,7 +220,7 @@ Register Operand::reg() const { - ASSERT(is_reg_only()); + DCHECK(is_reg_only()); return Register::from_code(buf_[0] & 0x07); } @@ -319,7 +260,7 @@ void Assembler::GetCode(CodeDesc* desc) { // Finalize code (at this point overflow() may be true, but the gap ensures // that we are still not overlapping instructions and relocation info). - ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. + DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; @@ -330,7 +271,7 @@ void Assembler::Align(int m) { - ASSERT(IsPowerOf2(m)); + DCHECK(IsPowerOf2(m)); int mask = m - 1; int addr = pc_offset(); Nop((m - (addr & mask)) & mask); @@ -349,15 +290,6 @@ void Assembler::Nop(int bytes) { EnsureSpace ensure_space(this); - if (!CpuFeatures::IsSupported(SSE2)) { - // Older CPUs that do not support SSE2 may not support multibyte NOP - // instructions. - for (; bytes > 0; bytes--) { - EMIT(0x90); - } - return; - } - // Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf while (bytes > 0) { switch (bytes) { @@ -489,7 +421,7 @@ void Assembler::pop(Register dst) { - ASSERT(reloc_info_writer.last_pc() != NULL); + DCHECK(reloc_info_writer.last_pc() != NULL); EnsureSpace ensure_space(this); EMIT(0x58 | dst.code()); } @@ -657,7 +589,6 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) { - ASSERT(IsEnabled(CMOV)); EnsureSpace ensure_space(this); // Opcode: 0f 40 + cc /r. EMIT(0x0F); @@ -703,6 +634,13 @@ } +void Assembler::xchg(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x87); + emit_operand(dst, src); +} + + void Assembler::adc(Register dst, int32_t imm32) { EnsureSpace ensure_space(this); emit_arith(2, Operand(dst), Immediate(imm32)); @@ -731,7 +669,7 @@ void Assembler::add(const Operand& dst, const Immediate& x) { - ASSERT(reloc_info_writer.last_pc() != NULL); + DCHECK(reloc_info_writer.last_pc() != NULL); EnsureSpace ensure_space(this); emit_arith(0, dst, x); } @@ -797,7 +735,7 @@ void Assembler::cmpw(const Operand& op, Immediate imm16) { - ASSERT(imm16.is_int16()); + DCHECK(imm16.is_int16()); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x81); @@ -886,10 +824,17 @@ } -void Assembler::idiv(Register src) { +void Assembler::idiv(const Operand& src) { EnsureSpace ensure_space(this); EMIT(0xF7); - EMIT(0xF8 | src.code()); + emit_operand(edi, src); +} + + +void Assembler::div(const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + emit_operand(esi, src); } @@ -909,14 +854,19 @@ void Assembler::imul(Register dst, Register src, int32_t imm32) { + imul(dst, Operand(src), imm32); +} + + +void Assembler::imul(Register dst, const Operand& src, int32_t imm32) { EnsureSpace ensure_space(this); if (is_int8(imm32)) { EMIT(0x6B); - EMIT(0xC0 | dst.code() << 3 | src.code()); + emit_operand(dst, src); EMIT(imm32); } else { EMIT(0x69); - EMIT(0xC0 | dst.code() << 3 | src.code()); + emit_operand(dst, src); emit(imm32); } } @@ -956,6 +906,13 @@ } +void Assembler::neg(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + emit_operand(ebx, dst); +} + + void Assembler::not_(Register dst) { EnsureSpace ensure_space(this); EMIT(0xF7); @@ -963,6 +920,13 @@ } +void Assembler::not_(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + emit_operand(edx, dst); +} + + void Assembler::or_(Register dst, int32_t imm32) { EnsureSpace ensure_space(this); emit_arith(1, Operand(dst), Immediate(imm32)); @@ -991,7 +955,7 @@ void Assembler::rcl(Register dst, uint8_t imm8) { EnsureSpace ensure_space(this); - ASSERT(is_uint5(imm8)); // illegal shift count + DCHECK(is_uint5(imm8)); // illegal shift count if (imm8 == 1) { EMIT(0xD1); EMIT(0xD0 | dst.code()); @@ -1005,7 +969,7 @@ void Assembler::rcr(Register dst, uint8_t imm8) { EnsureSpace ensure_space(this); - ASSERT(is_uint5(imm8)); // illegal shift count + DCHECK(is_uint5(imm8)); // illegal shift count if (imm8 == 1) { EMIT(0xD1); EMIT(0xD8 | dst.code()); @@ -1019,7 +983,7 @@ void Assembler::ror(Register dst, uint8_t imm8) { EnsureSpace ensure_space(this); - ASSERT(is_uint5(imm8)); // illegal shift count + DCHECK(is_uint5(imm8)); // illegal shift count if (imm8 == 1) { EMIT(0xD1); EMIT(0xC8 | dst.code()); @@ -1038,24 +1002,24 @@ } -void Assembler::sar(Register dst, uint8_t imm8) { +void Assembler::sar(const Operand& dst, uint8_t imm8) { EnsureSpace ensure_space(this); - ASSERT(is_uint5(imm8)); // illegal shift count + DCHECK(is_uint5(imm8)); // illegal shift count if (imm8 == 1) { EMIT(0xD1); - EMIT(0xF8 | dst.code()); + emit_operand(edi, dst); } else { EMIT(0xC1); - EMIT(0xF8 | dst.code()); + emit_operand(edi, dst); EMIT(imm8); } } -void Assembler::sar_cl(Register dst) { +void Assembler::sar_cl(const Operand& dst) { EnsureSpace ensure_space(this); EMIT(0xD3); - EMIT(0xF8 | dst.code()); + emit_operand(edi, dst); } @@ -1074,24 +1038,24 @@ } -void Assembler::shl(Register dst, uint8_t imm8) { +void Assembler::shl(const Operand& dst, uint8_t imm8) { EnsureSpace ensure_space(this); - ASSERT(is_uint5(imm8)); // illegal shift count + DCHECK(is_uint5(imm8)); // illegal shift count if (imm8 == 1) { EMIT(0xD1); - EMIT(0xE0 | dst.code()); + emit_operand(esp, dst); } else { EMIT(0xC1); - EMIT(0xE0 | dst.code()); + emit_operand(esp, dst); EMIT(imm8); } } -void Assembler::shl_cl(Register dst) { +void Assembler::shl_cl(const Operand& dst) { EnsureSpace ensure_space(this); EMIT(0xD3); - EMIT(0xE0 | dst.code()); + emit_operand(esp, dst); } @@ -1103,24 +1067,24 @@ } -void Assembler::shr(Register dst, uint8_t imm8) { +void Assembler::shr(const Operand& dst, uint8_t imm8) { EnsureSpace ensure_space(this); - ASSERT(is_uint5(imm8)); // illegal shift count + DCHECK(is_uint5(imm8)); // illegal shift count if (imm8 == 1) { EMIT(0xD1); - EMIT(0xE8 | dst.code()); + emit_operand(ebp, dst); } else { EMIT(0xC1); - EMIT(0xE8 | dst.code()); + emit_operand(ebp, dst); EMIT(imm8); } } -void Assembler::shr_cl(Register dst) { +void Assembler::shr_cl(const Operand& dst) { EnsureSpace ensure_space(this); EMIT(0xD3); - EMIT(0xE8 | dst.code()); + emit_operand(ebp, dst); } @@ -1292,7 +1256,7 @@ void Assembler::ret(int imm16) { EnsureSpace ensure_space(this); - ASSERT(is_uint16(imm16)); + DCHECK(is_uint16(imm16)); if (imm16 == 0) { EMIT(0xC3); } else { @@ -1337,7 +1301,7 @@ void Assembler::bind_to(Label* L, int pos) { EnsureSpace ensure_space(this); - ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position + DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position while (L->is_linked()) { Displacement disp = disp_at(L); int fixup_pos = L->pos(); @@ -1346,7 +1310,7 @@ long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag); } else { if (disp.type() == Displacement::UNCONDITIONAL_JUMP) { - ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected + DCHECK(byte_at(fixup_pos - 1) == 0xE9); // jmp expected } // Relative address, relative to point after address. int imm32 = pos - (fixup_pos + sizeof(int32_t)); @@ -1358,7 +1322,7 @@ int fixup_pos = L->near_link_pos(); int offset_to_next = static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos))); - ASSERT(offset_to_next <= 0); + DCHECK(offset_to_next <= 0); // Relative address, relative to point after address. int disp = pos - fixup_pos - sizeof(int8_t); CHECK(0 <= disp && disp <= 127); @@ -1375,7 +1339,7 @@ void Assembler::bind(Label* L) { EnsureSpace ensure_space(this); - ASSERT(!L->is_bound()); // label can only be bound once + DCHECK(!L->is_bound()); // label can only be bound once bind_to(L, pc_offset()); } @@ -1386,7 +1350,7 @@ if (L->is_bound()) { const int long_size = 5; int offs = L->pos() - pc_offset(); - ASSERT(offs <= 0); + DCHECK(offs <= 0); // 1110 1000 #32-bit disp. EMIT(0xE8); emit(offs - long_size); @@ -1401,7 +1365,7 @@ void Assembler::call(byte* entry, RelocInfo::Mode rmode) { positions_recorder()->WriteRecordedPositions(); EnsureSpace ensure_space(this); - ASSERT(!RelocInfo::IsCodeTarget(rmode)); + DCHECK(!RelocInfo::IsCodeTarget(rmode)); EMIT(0xE8); if (RelocInfo::IsRuntimeEntry(rmode)) { emit(reinterpret_cast<uint32_t>(entry), rmode); @@ -1435,7 +1399,7 @@ TypeFeedbackId ast_id) { positions_recorder()->WriteRecordedPositions(); EnsureSpace ensure_space(this); - ASSERT(RelocInfo::IsCodeTarget(rmode) + DCHECK(RelocInfo::IsCodeTarget(rmode) || rmode == RelocInfo::CODE_AGE_SEQUENCE); EMIT(0xE8); emit(code, rmode, ast_id); @@ -1448,7 +1412,7 @@ const int short_size = 2; const int long_size = 5; int offs = L->pos() - pc_offset(); - ASSERT(offs <= 0); + DCHECK(offs <= 0); if (is_int8(offs - short_size)) { // 1110 1011 #8-bit disp. EMIT(0xEB); @@ -1471,7 +1435,7 @@ void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) { EnsureSpace ensure_space(this); - ASSERT(!RelocInfo::IsCodeTarget(rmode)); + DCHECK(!RelocInfo::IsCodeTarget(rmode)); EMIT(0xE9); if (RelocInfo::IsRuntimeEntry(rmode)) { emit(reinterpret_cast<uint32_t>(entry), rmode); @@ -1490,7 +1454,7 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) { EnsureSpace ensure_space(this); - ASSERT(RelocInfo::IsCodeTarget(rmode)); + DCHECK(RelocInfo::IsCodeTarget(rmode)); EMIT(0xE9); emit(code, rmode); } @@ -1498,12 +1462,12 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) { EnsureSpace ensure_space(this); - ASSERT(0 <= cc && static_cast<int>(cc) < 16); + DCHECK(0 <= cc && static_cast<int>(cc) < 16); if (L->is_bound()) { const int short_size = 2; const int long_size = 6; int offs = L->pos() - pc_offset(); - ASSERT(offs <= 0); + DCHECK(offs <= 0); if (is_int8(offs - short_size)) { // 0111 tttn #8-bit disp EMIT(0x70 | cc); @@ -1530,7 +1494,7 @@ void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) { EnsureSpace ensure_space(this); - ASSERT((0 <= cc) && (static_cast<int>(cc) < 16)); + DCHECK((0 <= cc) && (static_cast<int>(cc) < 16)); // 0000 1111 1000 tttn #32-bit disp. EMIT(0x0F); EMIT(0x80 | cc); @@ -1657,7 +1621,7 @@ void Assembler::fisttp_s(const Operand& adr) { - ASSERT(IsEnabled(SSE3)); + DCHECK(IsEnabled(SSE3)); EnsureSpace ensure_space(this); EMIT(0xDB); emit_operand(ecx, adr); @@ -1665,7 +1629,7 @@ void Assembler::fisttp_d(const Operand& adr) { - ASSERT(IsEnabled(SSE3)); + DCHECK(IsEnabled(SSE3)); EnsureSpace ensure_space(this); EMIT(0xDD); emit_operand(ecx, adr); @@ -1942,7 +1906,7 @@ void Assembler::setcc(Condition cc, Register reg) { - ASSERT(reg.is_byte_register()); + DCHECK(reg.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x90 | cc); @@ -1951,7 +1915,6 @@ void Assembler::cvttss2si(Register dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -1961,7 +1924,6 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1971,7 +1933,6 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1981,7 +1942,6 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1991,7 +1951,6 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -2001,7 +1960,6 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2011,7 +1969,6 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2021,7 +1978,6 @@ void Assembler::addsd(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2031,7 +1987,6 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2041,7 +1996,6 @@ void Assembler::mulsd(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2051,7 +2005,6 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2061,7 +2014,6 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2071,7 +2023,6 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2081,7 +2032,6 @@ void Assembler::andps(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x54); @@ -2090,7 +2040,6 @@ void Assembler::orps(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x56); @@ -2099,7 +2048,6 @@ void Assembler::xorps(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x57); @@ -2108,7 +2056,6 @@ void Assembler::addps(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x58); @@ -2117,7 +2064,6 @@ void Assembler::subps(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x5C); @@ -2126,7 +2072,6 @@ void Assembler::mulps(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x59); @@ -2135,7 +2080,6 @@ void Assembler::divps(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x5E); @@ -2144,7 +2088,15 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); + EnsureSpace ensure_space(this); + EMIT(0xF2); + EMIT(0x0F); + EMIT(0x51); + emit_sse_operand(dst, src); +} + + +void Assembler::sqrtsd(XMMRegister dst, const Operand& src) { EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2154,7 +2106,6 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2164,7 +2115,6 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2174,7 +2124,6 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2184,7 +2133,7 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) { - ASSERT(IsEnabled(SSE4_1)); + DCHECK(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2197,7 +2146,6 @@ void Assembler::movmskpd(Register dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2207,7 +2155,6 @@ void Assembler::movmskps(Register dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x50); @@ -2216,7 +2163,6 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2226,7 +2172,6 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2237,7 +2182,6 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x28); @@ -2246,8 +2190,7 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) { - ASSERT(IsEnabled(SSE2)); - ASSERT(is_uint8(imm8)); + DCHECK(is_uint8(imm8)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0xC6); @@ -2257,7 +2200,6 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2267,7 +2209,6 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2277,7 +2218,6 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src ) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -2287,7 +2227,6 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -2297,7 +2236,7 @@ void Assembler::movntdqa(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE4_1)); + DCHECK(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2308,7 +2247,6 @@ void Assembler::movntdq(const Operand& dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2318,7 +2256,7 @@ void Assembler::prefetch(const Operand& src, int level) { - ASSERT(is_uint2(level)); + DCHECK(is_uint2(level)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x18); @@ -2329,7 +2267,6 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); // double EMIT(0x0F); @@ -2339,7 +2276,6 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); // double EMIT(0x0F); @@ -2349,7 +2285,6 @@ void Assembler::movss(const Operand& dst, XMMRegister src ) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); // float EMIT(0x0F); @@ -2359,7 +2294,6 @@ void Assembler::movss(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); // float EMIT(0x0F); @@ -2369,7 +2303,6 @@ void Assembler::movd(XMMRegister dst, const Operand& src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2379,7 +2312,6 @@ void Assembler::movd(const Operand& dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2389,8 +2321,8 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) { - ASSERT(IsEnabled(SSE4_1)); - ASSERT(is_uint8(imm8)); + DCHECK(IsEnabled(SSE4_1)); + DCHECK(is_uint8(imm8)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2402,7 +2334,6 @@ void Assembler::pand(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2412,7 +2343,6 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2422,7 +2352,6 @@ void Assembler::por(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2432,7 +2361,7 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE4_1)); + DCHECK(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2443,7 +2372,6 @@ void Assembler::psllq(XMMRegister reg, int8_t shift) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2454,7 +2382,6 @@ void Assembler::psllq(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2464,7 +2391,6 @@ void Assembler::psrlq(XMMRegister reg, int8_t shift) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2475,7 +2401,6 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2485,7 +2410,6 @@ void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) { - ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2496,7 +2420,7 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) { - ASSERT(IsEnabled(SSE4_1)); + DCHECK(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2508,7 +2432,7 @@ void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) { - ASSERT(IsEnabled(SSE4_1)); + DCHECK(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2568,16 +2492,13 @@ void Assembler::GrowBuffer() { - ASSERT(buffer_overflow()); + DCHECK(buffer_overflow()); if (!own_buffer_) FATAL("external code buffer is too small"); // Compute new buffer size. CodeDesc desc; // the new buffer - if (buffer_size_ < 4*KB) { - desc.buffer_size = 4*KB; - } else { - desc.buffer_size = 2*buffer_size_; - } + desc.buffer_size = 2 * buffer_size_; + // Some internal data structures overflow for very large buffers, // they must ensure that kMaximalBufferSize is not too large. if ((desc.buffer_size > kMaximalBufferSize) || @@ -2599,17 +2520,12 @@ // Copy the data. int pc_delta = desc.buffer - buffer_; int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); - OS::MemMove(desc.buffer, buffer_, desc.instr_size); - OS::MemMove(rc_delta + reloc_info_writer.pos(), - reloc_info_writer.pos(), desc.reloc_size); + MemMove(desc.buffer, buffer_, desc.instr_size); + MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(), + desc.reloc_size); // Switch buffers. - if (isolate()->assembler_spare_buffer() == NULL && - buffer_size_ == kMinimalBufferSize) { - isolate()->set_assembler_spare_buffer(buffer_); - } else { - DeleteArray(buffer_); - } + DeleteArray(buffer_); buffer_ = desc.buffer; buffer_size_ = desc.buffer_size; pc_ += pc_delta; @@ -2627,14 +2543,14 @@ } } - ASSERT(!buffer_overflow()); + DCHECK(!buffer_overflow()); } void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { - ASSERT(is_uint8(op1) && is_uint8(op2)); // wrong opcode - ASSERT(is_uint8(imm8)); - ASSERT((op1 & 0x01) == 0); // should be 8bit operation + DCHECK(is_uint8(op1) && is_uint8(op2)); // wrong opcode + DCHECK(is_uint8(imm8)); + DCHECK((op1 & 0x01) == 0); // should be 8bit operation EMIT(op1); EMIT(op2 | dst.code()); EMIT(imm8); @@ -2642,7 +2558,7 @@ void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) { - ASSERT((0 <= sel) && (sel <= 7)); + DCHECK((0 <= sel) && (sel <= 7)); Register ireg = { sel }; if (x.is_int8()) { EMIT(0x83); // using a sign-extended 8-bit immediate. @@ -2661,7 +2577,7 @@ void Assembler::emit_operand(Register reg, const Operand& adr) { const unsigned length = adr.len_; - ASSERT(length > 0); + DCHECK(length > 0); // Emit updated ModRM byte containing the given register. pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3); @@ -2680,8 +2596,8 @@ void Assembler::emit_farith(int b1, int b2, int i) { - ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode - ASSERT(0 <= i && i < 8); // illegal stack offset + DCHECK(is_uint8(b1) && is_uint8(b2)); // wrong opcode + DCHECK(0 <= i && i < 8); // illegal stack offset EMIT(b1); EMIT(b2 + i); } @@ -2700,33 +2616,28 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - ASSERT(!RelocInfo::IsNone(rmode)); + DCHECK(!RelocInfo::IsNone(rmode)); // Don't record external references unless the heap will be serialized. - if (rmode == RelocInfo::EXTERNAL_REFERENCE) { -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); - } -#endif - if (!Serializer::enabled() && !emit_debug_code()) { - return; - } + if (rmode == RelocInfo::EXTERNAL_REFERENCE && + !serializer_enabled() && !emit_debug_code()) { + return; } RelocInfo rinfo(pc_, rmode, data, NULL); reloc_info_writer.Write(&rinfo); } -MaybeObject* Assembler::AllocateConstantPool(Heap* heap) { +Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { // No out-of-line constant pool support. - UNREACHABLE(); - return NULL; + DCHECK(!FLAG_enable_ool_constant_pool); + return isolate->factory()->empty_constant_pool_array(); } void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { // No out-of-line constant pool support. - UNREACHABLE(); + DCHECK(!FLAG_enable_ool_constant_pool); + return; } diff -Nru nodejs-0.11.13/deps/v8/src/ia32/assembler-ia32.h nodejs-0.11.15/deps/v8/src/ia32/assembler-ia32.h --- nodejs-0.11.13/deps/v8/src/ia32/assembler-ia32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/assembler-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -37,8 +37,8 @@ #ifndef V8_IA32_ASSEMBLER_IA32_H_ #define V8_IA32_ASSEMBLER_IA32_H_ -#include "isolate.h" -#include "serialize.h" +#include "src/isolate.h" +#include "src/serialize.h" namespace v8 { namespace internal { @@ -78,8 +78,8 @@ static inline Register FromAllocationIndex(int index); static Register from_code(int code) { - ASSERT(code >= 0); - ASSERT(code < kNumRegisters); + DCHECK(code >= 0); + DCHECK(code < kNumRegisters); Register r = { code }; return r; } @@ -88,11 +88,11 @@ // eax, ebx, ecx and edx are byte registers, the rest are not. bool is_byte_register() const { return code_ <= 3; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } int bit() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return 1 << code_; } @@ -122,7 +122,7 @@ inline const char* Register::AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); // This is the mapping of allocation indices to registers. const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" }; return kNames[index]; @@ -130,82 +130,52 @@ inline int Register::ToAllocationIndex(Register reg) { - ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp)); + DCHECK(reg.is_valid() && !reg.is(esp) && !reg.is(ebp)); return (reg.code() >= 6) ? reg.code() - 2 : reg.code(); } inline Register Register::FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); return (index >= 4) ? from_code(index + 2) : from_code(index); } -struct IntelDoubleRegister { - static const int kMaxNumRegisters = 8; +struct XMMRegister { static const int kMaxNumAllocatableRegisters = 7; - static int NumAllocatableRegisters(); - static int NumRegisters(); - static const char* AllocationIndexToString(int index); + static const int kMaxNumRegisters = 8; + static int NumAllocatableRegisters() { + return kMaxNumAllocatableRegisters; + } - static int ToAllocationIndex(IntelDoubleRegister reg) { - ASSERT(reg.code() != 0); + static int ToAllocationIndex(XMMRegister reg) { + DCHECK(reg.code() != 0); return reg.code() - 1; } - static IntelDoubleRegister FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < NumAllocatableRegisters()); + static XMMRegister FromAllocationIndex(int index) { + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); return from_code(index + 1); } - static IntelDoubleRegister from_code(int code) { - IntelDoubleRegister result = { code }; + static XMMRegister from_code(int code) { + XMMRegister result = { code }; return result; } bool is_valid() const { - return 0 <= code_ && code_ < NumRegisters(); + return 0 <= code_ && code_ < kMaxNumRegisters; } + int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } - int code_; -}; - - -const IntelDoubleRegister double_register_0 = { 0 }; -const IntelDoubleRegister double_register_1 = { 1 }; -const IntelDoubleRegister double_register_2 = { 2 }; -const IntelDoubleRegister double_register_3 = { 3 }; -const IntelDoubleRegister double_register_4 = { 4 }; -const IntelDoubleRegister double_register_5 = { 5 }; -const IntelDoubleRegister double_register_6 = { 6 }; -const IntelDoubleRegister double_register_7 = { 7 }; -const IntelDoubleRegister no_double_reg = { -1 }; - - -struct XMMRegister : IntelDoubleRegister { - static const int kNumAllocatableRegisters = 7; - static const int kNumRegisters = 8; - - static XMMRegister from_code(int code) { - STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister)); - XMMRegister result; - result.code_ = code; - return result; - } - bool is(XMMRegister reg) const { return code_ == reg.code_; } - static XMMRegister FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < NumAllocatableRegisters()); - return from_code(index + 1); - } - static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "xmm1", "xmm2", @@ -217,57 +187,23 @@ }; return names[index]; } -}; - - -#define xmm0 (static_cast<const XMMRegister&>(double_register_0)) -#define xmm1 (static_cast<const XMMRegister&>(double_register_1)) -#define xmm2 (static_cast<const XMMRegister&>(double_register_2)) -#define xmm3 (static_cast<const XMMRegister&>(double_register_3)) -#define xmm4 (static_cast<const XMMRegister&>(double_register_4)) -#define xmm5 (static_cast<const XMMRegister&>(double_register_5)) -#define xmm6 (static_cast<const XMMRegister&>(double_register_6)) -#define xmm7 (static_cast<const XMMRegister&>(double_register_7)) -#define no_xmm_reg (static_cast<const XMMRegister&>(no_double_reg)) - - -struct X87Register : IntelDoubleRegister { - static const int kNumAllocatableRegisters = 5; - static const int kNumRegisters = 5; - - bool is(X87Register reg) const { - return code_ == reg.code_; - } - - static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); - const char* const names[] = { - "stX_0", "stX_1", "stX_2", "stX_3", "stX_4" - }; - return names[index]; - } - static X87Register FromAllocationIndex(int index) { - STATIC_ASSERT(sizeof(X87Register) == sizeof(IntelDoubleRegister)); - ASSERT(index >= 0 && index < NumAllocatableRegisters()); - X87Register result; - result.code_ = index; - return result; - } - - static int ToAllocationIndex(X87Register reg) { - return reg.code_; - } + int code_; }; -#define stX_0 static_cast<const X87Register&>(double_register_0) -#define stX_1 static_cast<const X87Register&>(double_register_1) -#define stX_2 static_cast<const X87Register&>(double_register_2) -#define stX_3 static_cast<const X87Register&>(double_register_3) -#define stX_4 static_cast<const X87Register&>(double_register_4) +typedef XMMRegister DoubleRegister; -typedef IntelDoubleRegister DoubleRegister; + +const XMMRegister xmm0 = { 0 }; +const XMMRegister xmm1 = { 1 }; +const XMMRegister xmm2 = { 2 }; +const XMMRegister xmm3 = { 3 }; +const XMMRegister xmm4 = { 4 }; +const XMMRegister xmm5 = { 5 }; +const XMMRegister xmm6 = { 6 }; +const XMMRegister xmm7 = { 7 }; +const XMMRegister no_xmm_reg = { -1 }; enum Condition { @@ -310,8 +246,8 @@ } -// Corresponds to transposing the operands of a comparison. -inline Condition ReverseCondition(Condition cc) { +// Commute a condition such that {a cond b == b cond' a}. +inline Condition CommuteCondition(Condition cc) { switch (cc) { case below: return above; @@ -331,7 +267,7 @@ return greater_equal; default: return cc; - }; + } } @@ -364,6 +300,7 @@ int x_; RelocInfo::Mode rmode_; + friend class Operand; friend class Assembler; friend class MacroAssembler; }; @@ -386,12 +323,17 @@ class Operand BASE_EMBEDDED { public: + // reg + INLINE(explicit Operand(Register reg)); + // XMM reg INLINE(explicit Operand(XMMRegister xmm_reg)); // [disp/r] INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode)); - // disp only must always be relocated + + // [disp/r] + INLINE(explicit Operand(Immediate imm)); // [base + disp/r] explicit Operand(Register base, int32_t disp, @@ -428,6 +370,10 @@ RelocInfo::CELL); } + static Operand ForRegisterPlusImmediate(Register base, Immediate imm) { + return Operand(base, imm.x_, imm.rmode_); + } + // Returns true if this Operand is a wrapper for the specified register. bool is_reg(Register reg) const; @@ -439,9 +385,6 @@ Register reg() const; private: - // reg - INLINE(explicit Operand(Register reg)); - // Set the ModRM byte without an encoded 'reg' register. The // register is encoded later as part of the emit_operand operation. inline void set_modrm(int mod, Register rm); @@ -458,7 +401,6 @@ friend class Assembler; friend class MacroAssembler; - friend class LCodeGen; }; @@ -516,77 +458,6 @@ }; - -// CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a CpuFeatureScope before use. -// Example: -// if (assembler->IsSupported(SSE2)) { -// CpuFeatureScope fscope(assembler, SSE2); -// // Generate SSE2 floating point code. -// } else { -// // Generate standard x87 floating point code. -// } -class CpuFeatures : public AllStatic { - public: - // Detect features of the target CPU. Set safe defaults if the serializer - // is enabled (snapshots must be portable). - static void Probe(); - - // Check whether a feature is supported by the target CPU. - static bool IsSupported(CpuFeature f) { - ASSERT(initialized_); - if (Check(f, cross_compile_)) return true; - if (f == SSE2 && !FLAG_enable_sse2) return false; - if (f == SSE3 && !FLAG_enable_sse3) return false; - if (f == SSE4_1 && !FLAG_enable_sse4_1) return false; - if (f == CMOV && !FLAG_enable_cmov) return false; - return Check(f, supported_); - } - - static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { - ASSERT(initialized_); - return Check(f, found_by_runtime_probing_only_); - } - - static bool IsSafeForSnapshot(CpuFeature f) { - return Check(f, cross_compile_) || - (IsSupported(f) && - (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); - } - - static bool VerifyCrossCompiling() { - return cross_compile_ == 0; - } - - static bool VerifyCrossCompiling(CpuFeature f) { - uint64_t mask = flag2set(f); - return cross_compile_ == 0 || - (cross_compile_ & mask) == mask; - } - - private: - static bool Check(CpuFeature f, uint64_t set) { - return (set & flag2set(f)) != 0; - } - - static uint64_t flag2set(CpuFeature f) { - return static_cast<uint64_t>(1) << f; - } - -#ifdef DEBUG - static bool initialized_; -#endif - static uint64_t supported_; - static uint64_t found_by_runtime_probing_only_; - - static uint64_t cross_compile_; - - friend class ExternalReference; - friend class PlatformFeatureScope; - DISALLOW_COPY_AND_ASSIGN(CpuFeatures); -}; - - class Assembler : public AssemblerBase { private: // We check before assembling an instruction that there is sufficient @@ -628,14 +499,18 @@ ConstantPoolArray* constant_pool); inline static void set_target_address_at(Address pc, ConstantPoolArray* constant_pool, - Address target); + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED); static inline Address target_address_at(Address pc, Code* code) { ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; return target_address_at(pc, constant_pool); } static inline void set_target_address_at(Address pc, Code* code, - Address target) { + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED) { ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; set_target_address_at(pc, constant_pool, target); } @@ -644,6 +519,9 @@ // of that call in the instruction stream. inline static Address target_address_from_return_address(Address pc); + // Return the code target address of the patch debug break slot + inline static Address break_address_from_return_address(Address pc); + // This sets the branch destination (which is in the instruction on x86). // This is for calls and branches within generated code. inline static void deserialization_set_special_target_at( @@ -778,8 +656,9 @@ void rep_stos(); void stos(); - // Exchange two registers + // Exchange void xchg(Register dst, Register src); + void xchg(Register dst, const Operand& src); // Arithmetics void adc(Register dst, int32_t imm32); @@ -821,13 +700,17 @@ void cdq(); - void idiv(Register src); + void idiv(Register src) { idiv(Operand(src)); } + void idiv(const Operand& src); + void div(Register src) { div(Operand(src)); } + void div(const Operand& src); // Signed multiply instructions. void imul(Register src); // edx:eax = eax * src. void imul(Register dst, Register src) { imul(dst, Operand(src)); } void imul(Register dst, const Operand& src); // dst = dst * src. void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32. + void imul(Register dst, const Operand& src, int32_t imm32); void inc(Register dst); void inc(const Operand& dst); @@ -838,8 +721,10 @@ void mul(Register src); // edx:eax = eax * reg. void neg(Register dst); + void neg(const Operand& dst); void not_(Register dst); + void not_(const Operand& dst); void or_(Register dst, int32_t imm32); void or_(Register dst, Register src) { or_(dst, Operand(src)); } @@ -853,22 +738,28 @@ void ror(Register dst, uint8_t imm8); void ror_cl(Register dst); - void sar(Register dst, uint8_t imm8); - void sar_cl(Register dst); + void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); } + void sar(const Operand& dst, uint8_t imm8); + void sar_cl(Register dst) { sar_cl(Operand(dst)); } + void sar_cl(const Operand& dst); void sbb(Register dst, const Operand& src); void shld(Register dst, Register src) { shld(dst, Operand(src)); } void shld(Register dst, const Operand& src); - void shl(Register dst, uint8_t imm8); - void shl_cl(Register dst); + void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); } + void shl(const Operand& dst, uint8_t imm8); + void shl_cl(Register dst) { shl_cl(Operand(dst)); } + void shl_cl(const Operand& dst); void shrd(Register dst, Register src) { shrd(dst, Operand(src)); } void shrd(Register dst, const Operand& src); - void shr(Register dst, uint8_t imm8); - void shr_cl(Register dst); + void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); } + void shr(const Operand& dst, uint8_t imm8); + void shr_cl(Register dst) { shr_cl(Operand(dst)); } + void shr_cl(const Operand& dst); void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); } void sub(const Operand& dst, const Immediate& x); @@ -1052,6 +943,9 @@ cvttss2si(dst, Operand(src)); } void cvttsd2si(Register dst, const Operand& src); + void cvttsd2si(Register dst, XMMRegister src) { + cvttsd2si(dst, Operand(src)); + } void cvtsd2si(Register dst, XMMRegister src); void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); } @@ -1067,6 +961,7 @@ void divsd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src); + void sqrtsd(XMMRegister dst, const Operand& src); void andpd(XMMRegister dst, XMMRegister src); void orpd(XMMRegister dst, XMMRegister src); @@ -1192,7 +1087,7 @@ void set_byte_at(int pos, byte value) { buffer_[pos] = value; } // Allocate a constant pool of the correct size for the generated code. - MaybeObject* AllocateConstantPool(Heap* heap); + Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); // Generate the constant pool for the generated code. void PopulateConstantPool(ConstantPoolArray* constant_pool); @@ -1283,7 +1178,7 @@ #ifdef DEBUG ~EnsureSpace() { int bytes_generated = space_before_ - assembler_->available_space(); - ASSERT(bytes_generated < assembler_->kGap); + DCHECK(bytes_generated < assembler_->kGap); } #endif diff -Nru nodejs-0.11.13/deps/v8/src/ia32/assembler-ia32-inl.h nodejs-0.11.15/deps/v8/src/ia32/assembler-ia32-inl.h --- nodejs-0.11.13/deps/v8/src/ia32/assembler-ia32-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/assembler-ia32-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -37,60 +37,63 @@ #ifndef V8_IA32_ASSEMBLER_IA32_INL_H_ #define V8_IA32_ASSEMBLER_IA32_INL_H_ -#include "ia32/assembler-ia32.h" +#include "src/ia32/assembler-ia32.h" -#include "cpu.h" -#include "debug.h" +#include "src/assembler.h" +#include "src/debug.h" namespace v8 { namespace internal { +bool CpuFeatures::SupportsCrankshaft() { return true; } + static const byte kCallOpcode = 0xE8; static const int kNoCodeAgeSequenceLength = 5; // The modes possibly affected by apply must be in kApplyMask. -void RelocInfo::apply(intptr_t delta) { +void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) { + bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH; if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) { int32_t* p = reinterpret_cast<int32_t*>(pc_); *p -= delta; // Relocate entry. - CPU::FlushICache(p, sizeof(uint32_t)); + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); } else if (rmode_ == CODE_AGE_SEQUENCE) { if (*pc_ == kCallOpcode) { int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); *p -= delta; // Relocate entry. - CPU::FlushICache(p, sizeof(uint32_t)); + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); } } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) { // Special handling of js_return when a break point is set (call // instruction has been inserted). int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); *p -= delta; // Relocate entry. - CPU::FlushICache(p, sizeof(uint32_t)); + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) { // Special handling of a debug break slot when a break point is set (call // instruction has been inserted). int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); *p -= delta; // Relocate entry. - CPU::FlushICache(p, sizeof(uint32_t)); + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); } else if (IsInternalReference(rmode_)) { // absolute code pointer inside code object moves with the code object. int32_t* p = reinterpret_cast<int32_t*>(pc_); *p += delta; // Relocate entry. - CPU::FlushICache(p, sizeof(uint32_t)); + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); } } Address RelocInfo::target_address() { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); return Assembler::target_address_at(pc_, host_); } Address RelocInfo::target_address_address() { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); return reinterpret_cast<Address>(pc_); @@ -108,10 +111,13 @@ } -void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { - Assembler::set_target_address_at(pc_, host_, target); - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); - if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { +void RelocInfo::set_target_address(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode); + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && + IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( host(), this, HeapObject::cast(target_code)); @@ -120,23 +126,26 @@ Object* RelocInfo::target_object() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return Memory::Object_at(pc_); } Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return Memory::Object_Handle_at(pc_); } -void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - ASSERT(!target->IsConsString()); +void RelocInfo::set_target_object(Object* target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Memory::Object_at(pc_) = target; - CPU::FlushICache(pc_, sizeof(Address)); - if (mode == UPDATE_WRITE_BARRIER && + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(pc_, sizeof(Address)); + } + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && target->IsHeapObject()) { host()->GetHeap()->incremental_marking()->RecordWrite( @@ -146,43 +155,50 @@ Address RelocInfo::target_reference() { - ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE); + DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); return Memory::Address_at(pc_); } Address RelocInfo::target_runtime_entry(Assembler* origin) { - ASSERT(IsRuntimeEntry(rmode_)); + DCHECK(IsRuntimeEntry(rmode_)); return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_)); } void RelocInfo::set_target_runtime_entry(Address target, - WriteBarrierMode mode) { - ASSERT(IsRuntimeEntry(rmode_)); - if (target_address() != target) set_target_address(target, mode); + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsRuntimeEntry(rmode_)); + if (target_address() != target) { + set_target_address(target, write_barrier_mode, icache_flush_mode); + } } Handle<Cell> RelocInfo::target_cell_handle() { - ASSERT(rmode_ == RelocInfo::CELL); + DCHECK(rmode_ == RelocInfo::CELL); Address address = Memory::Address_at(pc_); return Handle<Cell>(reinterpret_cast<Cell**>(address)); } Cell* RelocInfo::target_cell() { - ASSERT(rmode_ == RelocInfo::CELL); + DCHECK(rmode_ == RelocInfo::CELL); return Cell::FromValueAddress(Memory::Address_at(pc_)); } -void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) { - ASSERT(rmode_ == RelocInfo::CELL); +void RelocInfo::set_target_cell(Cell* cell, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::CELL); Address address = cell->address() + Cell::kValueOffset; Memory::Address_at(pc_) = address; - CPU::FlushICache(pc_, sizeof(Address)); - if (mode == UPDATE_WRITE_BARRIER && host() != NULL) { + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(pc_, sizeof(Address)); + } + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) { // TODO(1550) We are passing NULL as a slot because cell can never be on // evacuation candidate. host()->GetHeap()->incremental_marking()->RecordWrite( @@ -192,36 +208,38 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - ASSERT(*pc_ == kCallOpcode); + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + DCHECK(*pc_ == kCallOpcode); return Memory::Object_Handle_at(pc_ + 1); } Code* RelocInfo::code_age_stub() { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - ASSERT(*pc_ == kCallOpcode); + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + DCHECK(*pc_ == kCallOpcode); return Code::GetCodeFromTargetAddress( Assembler::target_address_at(pc_ + 1, host_)); } -void RelocInfo::set_code_age_stub(Code* stub) { - ASSERT(*pc_ == kCallOpcode); - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start()); +void RelocInfo::set_code_age_stub(Code* stub, + ICacheFlushMode icache_flush_mode) { + DCHECK(*pc_ == kCallOpcode); + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(), + icache_flush_mode); } Address RelocInfo::call_address() { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); return Assembler::target_address_at(pc_ + 1, host_); } void RelocInfo::set_call_address(Address target) { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Assembler::set_target_address_at(pc_ + 1, host_, target); if (host() != NULL) { @@ -243,7 +261,7 @@ Object** RelocInfo::call_object_address() { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); return reinterpret_cast<Object**>(pc_ + 1); } @@ -275,24 +293,22 @@ RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { visitor->VisitEmbeddedPointer(this); - CPU::FlushICache(pc_, sizeof(Address)); + CpuFeatures::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); } else if (mode == RelocInfo::CELL) { visitor->VisitCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(this); - CPU::FlushICache(pc_, sizeof(Address)); + CpuFeatures::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeAgeSequence(mode)) { visitor->VisitCodeAgeSequence(this); - #ifdef ENABLE_DEBUGGER_SUPPORT } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence())) && isolate->debug()->has_break_points()) { visitor->VisitDebugTarget(this); -#endif } else if (IsRuntimeEntry(mode)) { visitor->VisitRuntimeEntry(this); } @@ -304,24 +320,22 @@ RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { StaticVisitor::VisitEmbeddedPointer(heap, this); - CPU::FlushICache(pc_, sizeof(Address)); + CpuFeatures::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); } else if (mode == RelocInfo::CELL) { StaticVisitor::VisitCell(heap, this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(this); - CPU::FlushICache(pc_, sizeof(Address)); + CpuFeatures::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeAgeSequence(mode)) { StaticVisitor::VisitCodeAgeSequence(heap, this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence()))) { StaticVisitor::VisitDebugTarget(heap, this); -#endif } else if (IsRuntimeEntry(mode)) { StaticVisitor::VisitRuntimeEntry(this); } @@ -352,7 +366,7 @@ // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; if (obj->IsHeapObject()) { - ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); + DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); x_ = reinterpret_cast<intptr_t>(handle.location()); rmode_ = RelocInfo::EMBEDDED_OBJECT; } else { @@ -385,7 +399,7 @@ AllowDeferredHandleDereference heap_object_check; // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; - ASSERT(!isolate()->heap()->InNewSpace(obj)); + DCHECK(!isolate()->heap()->InNewSpace(obj)); if (obj->IsHeapObject()) { emit(reinterpret_cast<intptr_t>(handle.location()), RelocInfo::EMBEDDED_OBJECT); @@ -438,7 +452,7 @@ void Assembler::emit_w(const Immediate& x) { - ASSERT(RelocInfo::IsNone(x.rmode_)); + DCHECK(RelocInfo::IsNone(x.rmode_)); uint16_t value = static_cast<uint16_t>(x.x_); reinterpret_cast<uint16_t*>(pc_)[0] = value; pc_ += sizeof(uint16_t); @@ -453,10 +467,13 @@ void Assembler::set_target_address_at(Address pc, ConstantPoolArray* constant_pool, - Address target) { + Address target, + ICacheFlushMode icache_flush_mode) { int32_t* p = reinterpret_cast<int32_t*>(pc); *p = target - (pc + sizeof(int32_t)); - CPU::FlushICache(p, sizeof(int32_t)); + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(p, sizeof(int32_t)); + } } @@ -465,6 +482,11 @@ } +Address Assembler::break_address_from_return_address(Address pc) { + return pc - Assembler::kPatchDebugBreakSlotReturnOffset; +} + + Displacement Assembler::disp_at(Label* L) { return Displacement(long_at(L->pos())); } @@ -486,7 +508,7 @@ byte disp = 0x00; if (L->is_near_linked()) { int offset = L->near_link_pos() - pc_offset(); - ASSERT(is_int8(offset)); + DCHECK(is_int8(offset)); disp = static_cast<byte>(offset & 0xFF); } L->link_to(pc_offset(), Label::kNear); @@ -495,30 +517,30 @@ void Operand::set_modrm(int mod, Register rm) { - ASSERT((mod & -4) == 0); + DCHECK((mod & -4) == 0); buf_[0] = mod << 6 | rm.code(); len_ = 1; } void Operand::set_sib(ScaleFactor scale, Register index, Register base) { - ASSERT(len_ == 1); - ASSERT((scale & -4) == 0); + DCHECK(len_ == 1); + DCHECK((scale & -4) == 0); // Use SIB with no index register only for base esp. - ASSERT(!index.is(esp) || base.is(esp)); + DCHECK(!index.is(esp) || base.is(esp)); buf_[1] = scale << 6 | index.code() << 3 | base.code(); len_ = 2; } void Operand::set_disp8(int8_t disp) { - ASSERT(len_ == 1 || len_ == 2); + DCHECK(len_ == 1 || len_ == 2); *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp; } void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) { - ASSERT(len_ == 1 || len_ == 2); + DCHECK(len_ == 1 || len_ == 2); int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]); *p = disp; len_ += sizeof(int32_t); @@ -543,6 +565,12 @@ set_dispr(disp, rmode); } + +Operand::Operand(Immediate imm) { + // [disp/r] + set_modrm(0, ebp); + set_dispr(imm.x_, imm.rmode_); +} } } // namespace v8::internal #endif // V8_IA32_ASSEMBLER_IA32_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/ia32/builtins-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/builtins-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/builtins-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/builtins-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "codegen.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -65,7 +42,7 @@ __ push(edi); __ push(scratch); // Restore return address. } else { - ASSERT(extra_args == NO_EXTRA_ARGUMENTS); + DCHECK(extra_args == NO_EXTRA_ARGUMENTS); } // JumpToExternalReference expects eax to contain the number of arguments @@ -115,7 +92,7 @@ __ cmp(esp, Operand::StaticVariable(stack_limit)); __ j(above_equal, &ok, Label::kNear); - CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode); + CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); GenerateTailCallToReturnedCode(masm); __ bind(&ok); @@ -125,7 +102,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function, - bool count_constructions, bool create_memento) { // ----------- S t a t e ------------- // -- eax: number of arguments @@ -133,14 +109,8 @@ // -- ebx: allocation site or undefined // ----------------------------------- - // Should never count constructions for api objects. - ASSERT(!is_api_function || !count_constructions); - // Should never create mementos for api functions. - ASSERT(!is_api_function || !create_memento); - - // Should never create mementos before slack tracking is finished. - ASSERT(!count_constructions || !create_memento); + DCHECK(!is_api_function || !create_memento); // Enter a construct frame. { @@ -163,12 +133,10 @@ Label rt_call, allocated; if (FLAG_inline_new) { Label undo_allocation; -#ifdef ENABLE_DEBUGGER_SUPPORT ExternalReference debug_step_in_fp = ExternalReference::debug_step_in_fp_address(masm->isolate()); __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0)); __ j(not_equal, &rt_call); -#endif // Verified that the constructor is a JSFunction. // Load the initial map and verify that it is in fact a map. @@ -189,23 +157,32 @@ __ CmpInstanceType(eax, JS_FUNCTION_TYPE); __ j(equal, &rt_call); - if (count_constructions) { + if (!is_api_function) { Label allocate; + // The code below relies on these assumptions. + STATIC_ASSERT(JSFunction::kNoSlackTracking == 0); + STATIC_ASSERT(Map::ConstructionCount::kShift + + Map::ConstructionCount::kSize == 32); + // Check if slack tracking is enabled. + __ mov(esi, FieldOperand(eax, Map::kBitField3Offset)); + __ shr(esi, Map::ConstructionCount::kShift); + __ j(zero, &allocate); // JSFunction::kNoSlackTracking // Decrease generous allocation count. - __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - __ dec_b(FieldOperand(ecx, - SharedFunctionInfo::kConstructionCountOffset)); - __ j(not_zero, &allocate); + __ sub(FieldOperand(eax, Map::kBitField3Offset), + Immediate(1 << Map::ConstructionCount::kShift)); + + __ cmp(esi, JSFunction::kFinishSlackTracking); + __ j(not_equal, &allocate); __ push(eax); __ push(edi); __ push(edi); // constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1); + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); __ pop(edi); __ pop(eax); + __ xor_(esi, esi); // JSFunction::kNoSlackTracking __ bind(&allocate); } @@ -235,9 +212,17 @@ // eax: initial map // ebx: JSObject // edi: start of next object (including memento if create_memento) - __ lea(ecx, Operand(ebx, JSObject::kHeaderSize)); + // esi: slack tracking counter (non-API function case) __ mov(edx, factory->undefined_value()); - if (count_constructions) { + __ lea(ecx, Operand(ebx, JSObject::kHeaderSize)); + if (!is_api_function) { + Label no_inobject_slack_tracking; + + // Check if slack tracking is enabled. + __ cmp(esi, JSFunction::kNoSlackTracking); + __ j(equal, &no_inobject_slack_tracking); + + // Allocate object with a slack. __ movzx_b(esi, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset)); __ lea(esi, @@ -250,16 +235,19 @@ } __ InitializeFieldsWithFiller(ecx, esi, edx); __ mov(edx, factory->one_pointer_filler_map()); - __ InitializeFieldsWithFiller(ecx, edi, edx); - } else if (create_memento) { + // Fill the remaining fields with one pointer filler map. + + __ bind(&no_inobject_slack_tracking); + } + + if (create_memento) { __ lea(esi, Operand(edi, -AllocationMemento::kSize)); __ InitializeFieldsWithFiller(ecx, esi, edx); // Fill in memento fields if necessary. // esi: points to the allocated but uninitialized memento. - Handle<Map> allocation_memento_map = factory->allocation_memento_map(); __ mov(Operand(esi, AllocationMemento::kMapOffset), - allocation_memento_map); + factory->allocation_memento_map()); // Get the cell or undefined. __ mov(edx, Operand(esp, kPointerSize*2)); __ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset), @@ -365,14 +353,15 @@ offset = kPointerSize; } - // Must restore edi (constructor) before calling runtime. + // Must restore esi (context) and edi (constructor) before calling runtime. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(edi, Operand(esp, offset)); // edi: function (constructor) __ push(edi); if (create_memento) { - __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2); + __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2); } else { - __ CallRuntime(Runtime::kHiddenNewObject, 1); + __ CallRuntime(Runtime::kNewObject, 1); } __ mov(ebx, eax); // store result in ebx @@ -438,7 +427,7 @@ } // Store offset of return address for deoptimizer. - if (!is_api_function && !count_constructions) { + if (!is_api_function) { masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); } @@ -480,18 +469,13 @@ } -void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, true, false); -} - - void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new); + Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); } void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, true, false, false); + Generate_JSConstructStubHelper(masm, true, false); } @@ -540,7 +524,7 @@ if (is_construct) { // No type feedback cell is available __ mov(ebx, masm->isolate()->factory()->undefined_value()); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS); __ CallStub(&stub); } else { ParameterCount actual(eax); @@ -567,7 +551,7 @@ void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) { - CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized); + CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized); GenerateTailCallToReturnedCode(masm); } @@ -582,7 +566,7 @@ // Whether to compile in a background thread. __ Push(masm->isolate()->factory()->ToBoolean(concurrent)); - __ CallRuntime(Runtime::kHiddenCompileOptimized, 2); + __ CallRuntime(Runtime::kCompileOptimized, 2); // Restore receiver. __ pop(edi); } @@ -686,7 +670,7 @@ // stubs that tail call the runtime on deopts passing their parameters in // registers. __ pushad(); - __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ popad(); // Tear down internal frame. } @@ -702,12 +686,7 @@ void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { - if (Serializer::enabled()) { - PlatformFeatureScope sse2(SSE2); - Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); - } else { - Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); - } + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); } @@ -718,7 +697,7 @@ // Pass deoptimization type to the runtime system. __ push(Immediate(Smi::FromInt(static_cast<int>(type)))); - __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); // Tear down internal frame. } @@ -786,7 +765,7 @@ // 3a. Patch the first argument if necessary when calling a function. Label shift_arguments; __ Move(edx, Immediate(0)); // indicate regular JS_FUNCTION - { Label convert_to_object, use_global_receiver, patch_receiver; + { Label convert_to_object, use_global_proxy, patch_receiver; // Change context eagerly in case we need the global receiver. __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); @@ -808,9 +787,9 @@ // global object if it is null or undefined. __ JumpIfSmi(ebx, &convert_to_object); __ cmp(ebx, factory->null_value()); - __ j(equal, &use_global_receiver); + __ j(equal, &use_global_proxy); __ cmp(ebx, factory->undefined_value()); - __ j(equal, &use_global_receiver); + __ j(equal, &use_global_proxy); STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx); __ j(above_equal, &shift_arguments); @@ -835,10 +814,10 @@ __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize)); __ jmp(&patch_receiver); - __ bind(&use_global_receiver); + __ bind(&use_global_proxy); __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); + __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset)); __ bind(&patch_receiver); __ mov(Operand(esp, eax, times_4, 0), ebx); @@ -949,7 +928,7 @@ // Out of stack space. __ push(Operand(ebp, 4 * kPointerSize)); // push this __ push(eax); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); __ bind(&okay); // End of stack check. @@ -964,7 +943,7 @@ __ mov(ebx, Operand(ebp, kReceiverOffset)); // Check that the function is a JS function (otherwise it must be a proxy). - Label push_receiver, use_global_receiver; + Label push_receiver, use_global_proxy; __ mov(edi, Operand(ebp, kFunctionOffset)); __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); __ j(not_equal, &push_receiver); @@ -992,9 +971,9 @@ // global object if it is null or undefined. __ JumpIfSmi(ebx, &call_to_object); __ cmp(ebx, factory->null_value()); - __ j(equal, &use_global_receiver); + __ j(equal, &use_global_proxy); __ cmp(ebx, factory->undefined_value()); - __ j(equal, &use_global_receiver); + __ j(equal, &use_global_proxy); STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx); __ j(above_equal, &push_receiver); @@ -1005,10 +984,10 @@ __ mov(ebx, eax); __ jmp(&push_receiver); - __ bind(&use_global_receiver); + __ bind(&use_global_proxy); __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); + __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset)); // Push the receiver. __ bind(&push_receiver); @@ -1016,12 +995,17 @@ // Copy all arguments from the array to the stack. Label entry, loop; - __ mov(ecx, Operand(ebp, kIndexOffset)); + Register receiver = LoadIC::ReceiverRegister(); + Register key = LoadIC::NameRegister(); + __ mov(key, Operand(ebp, kIndexOffset)); __ jmp(&entry); __ bind(&loop); - __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments + __ mov(receiver, Operand(ebp, kArgumentsOffset)); // load arguments // Use inline caching to speed up access to arguments. + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), Immediate(Smi::FromInt(0))); + } Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize(); __ call(ic, RelocInfo::CODE_TARGET); // It is important that we do not have a test instruction after the @@ -1032,19 +1016,19 @@ // Push the nth argument. __ push(eax); - // Update the index on the stack and in register eax. - __ mov(ecx, Operand(ebp, kIndexOffset)); - __ add(ecx, Immediate(1 << kSmiTagSize)); - __ mov(Operand(ebp, kIndexOffset), ecx); + // Update the index on the stack and in register key. + __ mov(key, Operand(ebp, kIndexOffset)); + __ add(key, Immediate(1 << kSmiTagSize)); + __ mov(Operand(ebp, kIndexOffset), key); __ bind(&entry); - __ cmp(ecx, Operand(ebp, kLimitOffset)); + __ cmp(key, Operand(ebp, kLimitOffset)); __ j(not_equal, &loop); // Call the function. Label call_proxy; - __ mov(eax, ecx); ParameterCount actual(eax); + __ Move(eax, key); __ SmiUntag(eax); __ mov(edi, Operand(ebp, kFunctionOffset)); __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); @@ -1252,6 +1236,33 @@ } +static void ArgumentsAdaptorStackCheck(MacroAssembler* masm, + Label* stack_overflow) { + // ----------- S t a t e ------------- + // -- eax : actual number of arguments + // -- ebx : expected number of arguments + // -- edi : function (passed through to callee) + // ----------------------------------- + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + ExternalReference real_stack_limit = + ExternalReference::address_of_real_stack_limit(masm->isolate()); + __ mov(edx, Operand::StaticVariable(real_stack_limit)); + // Make ecx the space we have left. The stack might already be overflowed + // here which will cause ecx to become negative. + __ mov(ecx, esp); + __ sub(ecx, edx); + // Make edx the space we need for the array when it is unrolled onto the + // stack. + __ mov(edx, ebx); + __ shl(edx, kPointerSizeLog2); + // Check if the arguments will overflow the stack. + __ cmp(ecx, edx); + __ j(less_equal, stack_overflow); // Signed comparison. +} + + static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ push(ebp); __ mov(ebp, esp); @@ -1296,6 +1307,9 @@ Label invoke, dont_adapt_arguments; __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1); + Label stack_overflow; + ArgumentsAdaptorStackCheck(masm, &stack_overflow); + Label enough, too_few; __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset)); __ cmp(eax, ebx); @@ -1370,6 +1384,14 @@ // ------------------------------------------- __ bind(&dont_adapt_arguments); __ jmp(edx); + + __ bind(&stack_overflow); + { + FrameScope frame(masm, StackFrame::MANUAL); + EnterArgumentsAdaptorFrame(masm); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ int3(); + } } @@ -1419,7 +1441,7 @@ __ j(above_equal, &ok, Label::kNear); { FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kHiddenStackGuard, 0); + __ CallRuntime(Runtime::kStackGuard, 0); } __ jmp(masm->isolate()->builtins()->OnStackReplacement(), RelocInfo::CODE_TARGET); diff -Nru nodejs-0.11.13/deps/v8/src/ia32/codegen-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/codegen-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/codegen-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/codegen-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "codegen.h" -#include "heap.h" -#include "macro-assembler.h" +#include "src/codegen.h" +#include "src/heap/heap.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { @@ -42,14 +19,14 @@ void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { masm->EnterFrame(StackFrame::INTERNAL); - ASSERT(!masm->has_frame()); + DCHECK(!masm->has_frame()); masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { masm->LeaveFrame(StackFrame::INTERNAL); - ASSERT(masm->has_frame()); + DCHECK(masm->has_frame()); masm->set_has_frame(false); } @@ -58,10 +35,10 @@ UnaryMathFunction CreateExpFunction() { - if (!CpuFeatures::IsSupported(SSE2)) return &std::exp; if (!FLAG_fast_math) return &std::exp; size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &std::exp; ExternalReference::InitializeMathExpData(); @@ -69,7 +46,6 @@ // esp[1 * kPointerSize]: raw double input // esp[0 * kPointerSize]: return address { - CpuFeatureScope use_sse2(&masm, SSE2); XMMRegister input = xmm1; XMMRegister result = xmm2; __ movsd(input, Operand(esp, 1 * kPointerSize)); @@ -87,10 +63,10 @@ CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST<UnaryMathFunction>(buffer); } @@ -98,18 +74,14 @@ UnaryMathFunction CreateSqrtFunction() { size_t actual_size; // Allocate buffer in executable space. - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, - &actual_size, - true)); - // If SSE2 is not available, we can use libc's implementation to ensure - // consistency since code by fullcodegen's calls into runtime in that case. - if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &std::sqrt; + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); + if (buffer == NULL) return &std::sqrt; MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); // esp[1 * kPointerSize]: raw double input // esp[0 * kPointerSize]: return address // Move double input into registers. { - CpuFeatureScope use_sse2(&masm, SSE2); __ movsd(xmm0, Operand(esp, 1 * kPointerSize)); __ sqrtsd(xmm0, xmm0); __ movsd(Operand(esp, 1 * kPointerSize), xmm0); @@ -120,10 +92,10 @@ CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST<UnaryMathFunction>(buffer); } @@ -214,10 +186,11 @@ }; -OS::MemMoveFunction CreateMemMoveFunction() { +MemMoveFunction CreateMemMoveFunction() { size_t actual_size; // Allocate buffer in executable space. - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return NULL; MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); LabelConverter conv(buffer); @@ -266,325 +239,264 @@ __ cmp(dst, src); __ j(equal, &pop_and_return); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope sse2_scope(&masm, SSE2); - __ prefetch(Operand(src, 0), 1); + __ prefetch(Operand(src, 0), 1); + __ cmp(count, kSmallCopySize); + __ j(below_equal, &small_size); + __ cmp(count, kMediumCopySize); + __ j(below_equal, &medium_size); + __ cmp(dst, src); + __ j(above, &backward); + + { + // |dst| is a lower address than |src|. Copy front-to-back. + Label unaligned_source, move_last_15, skip_last_move; + __ mov(eax, src); + __ sub(eax, dst); + __ cmp(eax, kMinMoveDistance); + __ j(below, &forward_much_overlap); + // Copy first 16 bytes. + __ movdqu(xmm0, Operand(src, 0)); + __ movdqu(Operand(dst, 0), xmm0); + // Determine distance to alignment: 16 - (dst & 0xF). + __ mov(edx, dst); + __ and_(edx, 0xF); + __ neg(edx); + __ add(edx, Immediate(16)); + __ add(dst, edx); + __ add(src, edx); + __ sub(count, edx); + // dst is now aligned. Main copy loop. + __ mov(loop_count, count); + __ shr(loop_count, 6); + // Check if src is also aligned. + __ test(src, Immediate(0xF)); + __ j(not_zero, &unaligned_source); + // Copy loop for aligned source and destination. + MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED); + // At most 15 bytes to copy. Copy 16 bytes at end of string. + __ bind(&move_last_15); + __ and_(count, 0xF); + __ j(zero, &skip_last_move, Label::kNear); + __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); + __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); + __ bind(&skip_last_move); + MemMoveEmitPopAndReturn(&masm); + + // Copy loop for unaligned source and aligned destination. + __ bind(&unaligned_source); + MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED); + __ jmp(&move_last_15); + + // Less than kMinMoveDistance offset between dst and src. + Label loop_until_aligned, last_15_much_overlap; + __ bind(&loop_until_aligned); + __ mov_b(eax, Operand(src, 0)); + __ inc(src); + __ mov_b(Operand(dst, 0), eax); + __ inc(dst); + __ dec(count); + __ bind(&forward_much_overlap); // Entry point into this block. + __ test(dst, Immediate(0xF)); + __ j(not_zero, &loop_until_aligned); + // dst is now aligned, src can't be. Main copy loop. + __ mov(loop_count, count); + __ shr(loop_count, 6); + MemMoveEmitMainLoop(&masm, &last_15_much_overlap, + FORWARD, MOVE_UNALIGNED); + __ bind(&last_15_much_overlap); + __ and_(count, 0xF); + __ j(zero, &pop_and_return); __ cmp(count, kSmallCopySize); __ j(below_equal, &small_size); - __ cmp(count, kMediumCopySize); - __ j(below_equal, &medium_size); - __ cmp(dst, src); - __ j(above, &backward); - - { - // |dst| is a lower address than |src|. Copy front-to-back. - Label unaligned_source, move_last_15, skip_last_move; - __ mov(eax, src); - __ sub(eax, dst); - __ cmp(eax, kMinMoveDistance); - __ j(below, &forward_much_overlap); - // Copy first 16 bytes. - __ movdqu(xmm0, Operand(src, 0)); - __ movdqu(Operand(dst, 0), xmm0); - // Determine distance to alignment: 16 - (dst & 0xF). - __ mov(edx, dst); - __ and_(edx, 0xF); - __ neg(edx); - __ add(edx, Immediate(16)); - __ add(dst, edx); - __ add(src, edx); - __ sub(count, edx); - // dst is now aligned. Main copy loop. - __ mov(loop_count, count); - __ shr(loop_count, 6); - // Check if src is also aligned. - __ test(src, Immediate(0xF)); - __ j(not_zero, &unaligned_source); - // Copy loop for aligned source and destination. - MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED); - // At most 15 bytes to copy. Copy 16 bytes at end of string. - __ bind(&move_last_15); - __ and_(count, 0xF); - __ j(zero, &skip_last_move, Label::kNear); - __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); - __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); - __ bind(&skip_last_move); - MemMoveEmitPopAndReturn(&masm); - - // Copy loop for unaligned source and aligned destination. - __ bind(&unaligned_source); - MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED); - __ jmp(&move_last_15); - - // Less than kMinMoveDistance offset between dst and src. - Label loop_until_aligned, last_15_much_overlap; - __ bind(&loop_until_aligned); - __ mov_b(eax, Operand(src, 0)); - __ inc(src); - __ mov_b(Operand(dst, 0), eax); - __ inc(dst); - __ dec(count); - __ bind(&forward_much_overlap); // Entry point into this block. - __ test(dst, Immediate(0xF)); - __ j(not_zero, &loop_until_aligned); - // dst is now aligned, src can't be. Main copy loop. - __ mov(loop_count, count); - __ shr(loop_count, 6); - MemMoveEmitMainLoop(&masm, &last_15_much_overlap, - FORWARD, MOVE_UNALIGNED); - __ bind(&last_15_much_overlap); - __ and_(count, 0xF); - __ j(zero, &pop_and_return); - __ cmp(count, kSmallCopySize); - __ j(below_equal, &small_size); - __ jmp(&medium_size); - } + __ jmp(&medium_size); + } - { - // |dst| is a higher address than |src|. Copy backwards. - Label unaligned_source, move_first_15, skip_last_move; - __ bind(&backward); - // |dst| and |src| always point to the end of what's left to copy. - __ add(dst, count); - __ add(src, count); - __ mov(eax, dst); - __ sub(eax, src); - __ cmp(eax, kMinMoveDistance); - __ j(below, &backward_much_overlap); - // Copy last 16 bytes. - __ movdqu(xmm0, Operand(src, -0x10)); - __ movdqu(Operand(dst, -0x10), xmm0); - // Find distance to alignment: dst & 0xF - __ mov(edx, dst); - __ and_(edx, 0xF); - __ sub(dst, edx); - __ sub(src, edx); - __ sub(count, edx); - // dst is now aligned. Main copy loop. - __ mov(loop_count, count); - __ shr(loop_count, 6); - // Check if src is also aligned. - __ test(src, Immediate(0xF)); - __ j(not_zero, &unaligned_source); - // Copy loop for aligned source and destination. - MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED); - // At most 15 bytes to copy. Copy 16 bytes at beginning of string. - __ bind(&move_first_15); - __ and_(count, 0xF); - __ j(zero, &skip_last_move, Label::kNear); - __ sub(src, count); - __ sub(dst, count); - __ movdqu(xmm0, Operand(src, 0)); - __ movdqu(Operand(dst, 0), xmm0); - __ bind(&skip_last_move); - MemMoveEmitPopAndReturn(&masm); - - // Copy loop for unaligned source and aligned destination. - __ bind(&unaligned_source); - MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED); - __ jmp(&move_first_15); - - // Less than kMinMoveDistance offset between dst and src. - Label loop_until_aligned, first_15_much_overlap; - __ bind(&loop_until_aligned); - __ dec(src); - __ dec(dst); - __ mov_b(eax, Operand(src, 0)); - __ mov_b(Operand(dst, 0), eax); - __ dec(count); - __ bind(&backward_much_overlap); // Entry point into this block. - __ test(dst, Immediate(0xF)); - __ j(not_zero, &loop_until_aligned); - // dst is now aligned, src can't be. Main copy loop. - __ mov(loop_count, count); - __ shr(loop_count, 6); - MemMoveEmitMainLoop(&masm, &first_15_much_overlap, - BACKWARD, MOVE_UNALIGNED); - __ bind(&first_15_much_overlap); - __ and_(count, 0xF); - __ j(zero, &pop_and_return); - // Small/medium handlers expect dst/src to point to the beginning. - __ sub(dst, count); - __ sub(src, count); - __ cmp(count, kSmallCopySize); - __ j(below_equal, &small_size); - __ jmp(&medium_size); - } - { - // Special handlers for 9 <= copy_size < 64. No assumptions about - // alignment or move distance, so all reads must be unaligned and - // must happen before any writes. - Label medium_handlers, f9_16, f17_32, f33_48, f49_63; - - __ bind(&f9_16); - __ movsd(xmm0, Operand(src, 0)); - __ movsd(xmm1, Operand(src, count, times_1, -8)); - __ movsd(Operand(dst, 0), xmm0); - __ movsd(Operand(dst, count, times_1, -8), xmm1); - MemMoveEmitPopAndReturn(&masm); - - __ bind(&f17_32); - __ movdqu(xmm0, Operand(src, 0)); - __ movdqu(xmm1, Operand(src, count, times_1, -0x10)); - __ movdqu(Operand(dst, 0x00), xmm0); - __ movdqu(Operand(dst, count, times_1, -0x10), xmm1); - MemMoveEmitPopAndReturn(&masm); - - __ bind(&f33_48); - __ movdqu(xmm0, Operand(src, 0x00)); - __ movdqu(xmm1, Operand(src, 0x10)); - __ movdqu(xmm2, Operand(src, count, times_1, -0x10)); - __ movdqu(Operand(dst, 0x00), xmm0); - __ movdqu(Operand(dst, 0x10), xmm1); - __ movdqu(Operand(dst, count, times_1, -0x10), xmm2); - MemMoveEmitPopAndReturn(&masm); - - __ bind(&f49_63); - __ movdqu(xmm0, Operand(src, 0x00)); - __ movdqu(xmm1, Operand(src, 0x10)); - __ movdqu(xmm2, Operand(src, 0x20)); - __ movdqu(xmm3, Operand(src, count, times_1, -0x10)); - __ movdqu(Operand(dst, 0x00), xmm0); - __ movdqu(Operand(dst, 0x10), xmm1); - __ movdqu(Operand(dst, 0x20), xmm2); - __ movdqu(Operand(dst, count, times_1, -0x10), xmm3); - MemMoveEmitPopAndReturn(&masm); - - __ bind(&medium_handlers); - __ dd(conv.address(&f9_16)); - __ dd(conv.address(&f17_32)); - __ dd(conv.address(&f33_48)); - __ dd(conv.address(&f49_63)); - - __ bind(&medium_size); // Entry point into this block. - __ mov(eax, count); - __ dec(eax); - __ shr(eax, 4); - if (FLAG_debug_code) { - Label ok; - __ cmp(eax, 3); - __ j(below_equal, &ok); - __ int3(); - __ bind(&ok); - } - __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers))); - __ jmp(eax); - } - { - // Specialized copiers for copy_size <= 8 bytes. - Label small_handlers, f0, f1, f2, f3, f4, f5_8; - __ bind(&f0); - MemMoveEmitPopAndReturn(&masm); - - __ bind(&f1); - __ mov_b(eax, Operand(src, 0)); - __ mov_b(Operand(dst, 0), eax); - MemMoveEmitPopAndReturn(&masm); - - __ bind(&f2); - __ mov_w(eax, Operand(src, 0)); - __ mov_w(Operand(dst, 0), eax); - MemMoveEmitPopAndReturn(&masm); - - __ bind(&f3); - __ mov_w(eax, Operand(src, 0)); - __ mov_b(edx, Operand(src, 2)); - __ mov_w(Operand(dst, 0), eax); - __ mov_b(Operand(dst, 2), edx); - MemMoveEmitPopAndReturn(&masm); - - __ bind(&f4); - __ mov(eax, Operand(src, 0)); - __ mov(Operand(dst, 0), eax); - MemMoveEmitPopAndReturn(&masm); - - __ bind(&f5_8); - __ mov(eax, Operand(src, 0)); - __ mov(edx, Operand(src, count, times_1, -4)); - __ mov(Operand(dst, 0), eax); - __ mov(Operand(dst, count, times_1, -4), edx); - MemMoveEmitPopAndReturn(&masm); - - __ bind(&small_handlers); - __ dd(conv.address(&f0)); - __ dd(conv.address(&f1)); - __ dd(conv.address(&f2)); - __ dd(conv.address(&f3)); - __ dd(conv.address(&f4)); - __ dd(conv.address(&f5_8)); - __ dd(conv.address(&f5_8)); - __ dd(conv.address(&f5_8)); - __ dd(conv.address(&f5_8)); - - __ bind(&small_size); // Entry point into this block. - if (FLAG_debug_code) { - Label ok; - __ cmp(count, 8); - __ j(below_equal, &ok); - __ int3(); - __ bind(&ok); - } - __ mov(eax, Operand(count, times_4, conv.address(&small_handlers))); - __ jmp(eax); - } - } else { - // No SSE2. - Label forward; - __ cmp(count, 0); - __ j(equal, &pop_and_return); - __ cmp(dst, src); - __ j(above, &backward); - __ jmp(&forward); - { - // Simple forward copier. - Label forward_loop_1byte, forward_loop_4byte; - __ bind(&forward_loop_4byte); - __ mov(eax, Operand(src, 0)); - __ sub(count, Immediate(4)); - __ add(src, Immediate(4)); - __ mov(Operand(dst, 0), eax); - __ add(dst, Immediate(4)); - __ bind(&forward); // Entry point. - __ cmp(count, 3); - __ j(above, &forward_loop_4byte); - __ bind(&forward_loop_1byte); - __ cmp(count, 0); - __ j(below_equal, &pop_and_return); - __ mov_b(eax, Operand(src, 0)); - __ dec(count); - __ inc(src); - __ mov_b(Operand(dst, 0), eax); - __ inc(dst); - __ jmp(&forward_loop_1byte); + { + // |dst| is a higher address than |src|. Copy backwards. + Label unaligned_source, move_first_15, skip_last_move; + __ bind(&backward); + // |dst| and |src| always point to the end of what's left to copy. + __ add(dst, count); + __ add(src, count); + __ mov(eax, dst); + __ sub(eax, src); + __ cmp(eax, kMinMoveDistance); + __ j(below, &backward_much_overlap); + // Copy last 16 bytes. + __ movdqu(xmm0, Operand(src, -0x10)); + __ movdqu(Operand(dst, -0x10), xmm0); + // Find distance to alignment: dst & 0xF + __ mov(edx, dst); + __ and_(edx, 0xF); + __ sub(dst, edx); + __ sub(src, edx); + __ sub(count, edx); + // dst is now aligned. Main copy loop. + __ mov(loop_count, count); + __ shr(loop_count, 6); + // Check if src is also aligned. + __ test(src, Immediate(0xF)); + __ j(not_zero, &unaligned_source); + // Copy loop for aligned source and destination. + MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED); + // At most 15 bytes to copy. Copy 16 bytes at beginning of string. + __ bind(&move_first_15); + __ and_(count, 0xF); + __ j(zero, &skip_last_move, Label::kNear); + __ sub(src, count); + __ sub(dst, count); + __ movdqu(xmm0, Operand(src, 0)); + __ movdqu(Operand(dst, 0), xmm0); + __ bind(&skip_last_move); + MemMoveEmitPopAndReturn(&masm); + + // Copy loop for unaligned source and aligned destination. + __ bind(&unaligned_source); + MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED); + __ jmp(&move_first_15); + + // Less than kMinMoveDistance offset between dst and src. + Label loop_until_aligned, first_15_much_overlap; + __ bind(&loop_until_aligned); + __ dec(src); + __ dec(dst); + __ mov_b(eax, Operand(src, 0)); + __ mov_b(Operand(dst, 0), eax); + __ dec(count); + __ bind(&backward_much_overlap); // Entry point into this block. + __ test(dst, Immediate(0xF)); + __ j(not_zero, &loop_until_aligned); + // dst is now aligned, src can't be. Main copy loop. + __ mov(loop_count, count); + __ shr(loop_count, 6); + MemMoveEmitMainLoop(&masm, &first_15_much_overlap, + BACKWARD, MOVE_UNALIGNED); + __ bind(&first_15_much_overlap); + __ and_(count, 0xF); + __ j(zero, &pop_and_return); + // Small/medium handlers expect dst/src to point to the beginning. + __ sub(dst, count); + __ sub(src, count); + __ cmp(count, kSmallCopySize); + __ j(below_equal, &small_size); + __ jmp(&medium_size); + } + { + // Special handlers for 9 <= copy_size < 64. No assumptions about + // alignment or move distance, so all reads must be unaligned and + // must happen before any writes. + Label medium_handlers, f9_16, f17_32, f33_48, f49_63; + + __ bind(&f9_16); + __ movsd(xmm0, Operand(src, 0)); + __ movsd(xmm1, Operand(src, count, times_1, -8)); + __ movsd(Operand(dst, 0), xmm0); + __ movsd(Operand(dst, count, times_1, -8), xmm1); + MemMoveEmitPopAndReturn(&masm); + + __ bind(&f17_32); + __ movdqu(xmm0, Operand(src, 0)); + __ movdqu(xmm1, Operand(src, count, times_1, -0x10)); + __ movdqu(Operand(dst, 0x00), xmm0); + __ movdqu(Operand(dst, count, times_1, -0x10), xmm1); + MemMoveEmitPopAndReturn(&masm); + + __ bind(&f33_48); + __ movdqu(xmm0, Operand(src, 0x00)); + __ movdqu(xmm1, Operand(src, 0x10)); + __ movdqu(xmm2, Operand(src, count, times_1, -0x10)); + __ movdqu(Operand(dst, 0x00), xmm0); + __ movdqu(Operand(dst, 0x10), xmm1); + __ movdqu(Operand(dst, count, times_1, -0x10), xmm2); + MemMoveEmitPopAndReturn(&masm); + + __ bind(&f49_63); + __ movdqu(xmm0, Operand(src, 0x00)); + __ movdqu(xmm1, Operand(src, 0x10)); + __ movdqu(xmm2, Operand(src, 0x20)); + __ movdqu(xmm3, Operand(src, count, times_1, -0x10)); + __ movdqu(Operand(dst, 0x00), xmm0); + __ movdqu(Operand(dst, 0x10), xmm1); + __ movdqu(Operand(dst, 0x20), xmm2); + __ movdqu(Operand(dst, count, times_1, -0x10), xmm3); + MemMoveEmitPopAndReturn(&masm); + + __ bind(&medium_handlers); + __ dd(conv.address(&f9_16)); + __ dd(conv.address(&f17_32)); + __ dd(conv.address(&f33_48)); + __ dd(conv.address(&f49_63)); + + __ bind(&medium_size); // Entry point into this block. + __ mov(eax, count); + __ dec(eax); + __ shr(eax, 4); + if (FLAG_debug_code) { + Label ok; + __ cmp(eax, 3); + __ j(below_equal, &ok); + __ int3(); + __ bind(&ok); } - { - // Simple backward copier. - Label backward_loop_1byte, backward_loop_4byte, entry_shortcut; - __ bind(&backward); - __ add(src, count); - __ add(dst, count); - __ cmp(count, 3); - __ j(below_equal, &entry_shortcut); - - __ bind(&backward_loop_4byte); - __ sub(src, Immediate(4)); - __ sub(count, Immediate(4)); - __ mov(eax, Operand(src, 0)); - __ sub(dst, Immediate(4)); - __ mov(Operand(dst, 0), eax); - __ cmp(count, 3); - __ j(above, &backward_loop_4byte); - __ bind(&backward_loop_1byte); - __ cmp(count, 0); - __ j(below_equal, &pop_and_return); - __ bind(&entry_shortcut); - __ dec(src); - __ dec(count); - __ mov_b(eax, Operand(src, 0)); - __ dec(dst); - __ mov_b(Operand(dst, 0), eax); - __ jmp(&backward_loop_1byte); + __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers))); + __ jmp(eax); + } + { + // Specialized copiers for copy_size <= 8 bytes. + Label small_handlers, f0, f1, f2, f3, f4, f5_8; + __ bind(&f0); + MemMoveEmitPopAndReturn(&masm); + + __ bind(&f1); + __ mov_b(eax, Operand(src, 0)); + __ mov_b(Operand(dst, 0), eax); + MemMoveEmitPopAndReturn(&masm); + + __ bind(&f2); + __ mov_w(eax, Operand(src, 0)); + __ mov_w(Operand(dst, 0), eax); + MemMoveEmitPopAndReturn(&masm); + + __ bind(&f3); + __ mov_w(eax, Operand(src, 0)); + __ mov_b(edx, Operand(src, 2)); + __ mov_w(Operand(dst, 0), eax); + __ mov_b(Operand(dst, 2), edx); + MemMoveEmitPopAndReturn(&masm); + + __ bind(&f4); + __ mov(eax, Operand(src, 0)); + __ mov(Operand(dst, 0), eax); + MemMoveEmitPopAndReturn(&masm); + + __ bind(&f5_8); + __ mov(eax, Operand(src, 0)); + __ mov(edx, Operand(src, count, times_1, -4)); + __ mov(Operand(dst, 0), eax); + __ mov(Operand(dst, count, times_1, -4), edx); + MemMoveEmitPopAndReturn(&masm); + + __ bind(&small_handlers); + __ dd(conv.address(&f0)); + __ dd(conv.address(&f1)); + __ dd(conv.address(&f2)); + __ dd(conv.address(&f3)); + __ dd(conv.address(&f4)); + __ dd(conv.address(&f5_8)); + __ dd(conv.address(&f5_8)); + __ dd(conv.address(&f5_8)); + __ dd(conv.address(&f5_8)); + + __ bind(&small_size); // Entry point into this block. + if (FLAG_debug_code) { + Label ok; + __ cmp(count, 8); + __ j(below_equal, &ok); + __ int3(); + __ bind(&ok); } + __ mov(eax, Operand(count, times_4, conv.address(&small_handlers))); + __ jmp(eax); } __ bind(&pop_and_return); @@ -592,12 +504,12 @@ CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + DCHECK(!RelocInfo::RequiresRelocation(desc)); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); // TODO(jkummerow): It would be nice to register this code creation event // with the PROFILE / GDBJIT system. - return FUNCTION_CAST<OS::MemMoveFunction>(buffer); + return FUNCTION_CAST<MemMoveFunction>(buffer); } @@ -610,26 +522,28 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - MacroAssembler* masm, AllocationSiteMode mode, + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, Label* allocation_memento_found) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ebx : target map - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + Register scratch = edi; + DCHECK(!AreAliased(receiver, key, value, target_map, scratch)); + if (mode == TRACK_ALLOCATION_SITE) { - ASSERT(allocation_memento_found != NULL); - __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found); + DCHECK(allocation_memento_found != NULL); + __ JumpIfJSArrayHasAllocationMemento( + receiver, scratch, allocation_memento_found); } // Set transitioned map. - __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); - __ RecordWriteField(edx, + __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map); + __ RecordWriteField(receiver, HeapObject::kMapOffset, - ebx, - edi, + target_map, + scratch, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); @@ -637,14 +551,19 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( - MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ebx : target map - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Return address is on the stack. + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + DCHECK(value.is(eax)); + DCHECK(target_map.is(ebx)); + Label loop, entry, convert_hole, gc_required, only_change_map; if (mode == TRACK_ALLOCATION_SITE) { @@ -694,11 +613,8 @@ ExternalReference canonical_the_hole_nan_reference = ExternalReference::address_of_the_hole_nan(); XMMRegister the_hole_nan = xmm1; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ movsd(the_hole_nan, - Operand::StaticVariable(canonical_the_hole_nan_reference)); - } + __ movsd(the_hole_nan, + Operand::StaticVariable(canonical_the_hole_nan_reference)); __ jmp(&entry); // Call into runtime if GC is required. @@ -719,17 +635,9 @@ // Normal smi, convert it to double and store. __ SmiUntag(ebx); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope fscope(masm, SSE2); - __ Cvtsi2sd(xmm0, ebx); - __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), - xmm0); - } else { - __ push(ebx); - __ fild_s(Operand(esp, 0)); - __ pop(ebx); - __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize)); - } + __ Cvtsi2sd(xmm0, ebx); + __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), + xmm0); __ jmp(&entry); // Found hole, store hole_nan_as_double instead. @@ -740,14 +648,8 @@ __ Assert(equal, kObjectFoundInSmiOnlyArray); } - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), - the_hole_nan); - } else { - __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference)); - __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize)); - } + __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), + the_hole_nan); __ bind(&entry); __ sub(edi, Immediate(Smi::FromInt(1))); @@ -775,14 +677,19 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( - MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ebx : target map - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Return address is on the stack. + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + DCHECK(value.is(eax)); + DCHECK(target_map.is(ebx)); + Label loop, entry, convert_hole, gc_required, only_change_map, success; if (mode == TRACK_ALLOCATION_SITE) { @@ -849,17 +756,9 @@ // Non-hole double, copy value into a heap number. __ AllocateHeapNumber(edx, esi, no_reg, &gc_required); // edx: new heap number - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope fscope(masm, SSE2); - __ movsd(xmm0, - FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); - __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); - } else { - __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); - __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi); - __ mov(esi, FieldOperand(edi, ebx, times_4, offset)); - __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi); - } + __ movsd(xmm0, + FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); + __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx); __ mov(esi, ebx); __ RecordWriteArray(eax, @@ -971,7 +870,7 @@ __ Assert(zero, kExternalStringExpectedButNotFound); } // Rule out short external strings. - STATIC_CHECK(kShortExternalStringTag != 0); + STATIC_ASSERT(kShortExternalStringTag != 0); __ test_b(result, kShortExternalStringMask); __ j(not_zero, call_runtime); // Check encoding. @@ -1025,11 +924,12 @@ XMMRegister double_scratch, Register temp1, Register temp2) { - ASSERT(!input.is(double_scratch)); - ASSERT(!input.is(result)); - ASSERT(!result.is(double_scratch)); - ASSERT(!temp1.is(temp2)); - ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(!input.is(double_scratch)); + DCHECK(!input.is(result)); + DCHECK(!result.is(double_scratch)); + DCHECK(!temp1.is(temp2)); + DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(!masm->serializer_enabled()); // External references not serializable. Label done; @@ -1073,37 +973,33 @@ #undef __ -static byte* GetNoCodeAgeSequence(uint32_t* length) { - static bool initialized = false; - static byte sequence[kNoCodeAgeSequenceLength]; - *length = kNoCodeAgeSequenceLength; - if (!initialized) { - // The sequence of instructions that is patched out for aging code is the - // following boilerplate stack-building prologue that is found both in - // FUNCTION and OPTIMIZED_FUNCTION code: - CodePatcher patcher(sequence, kNoCodeAgeSequenceLength); - patcher.masm()->push(ebp); - patcher.masm()->mov(ebp, esp); - patcher.masm()->push(esi); - patcher.masm()->push(edi); - initialized = true; - } - return sequence; +CodeAgingHelper::CodeAgingHelper() { + DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); + CodePatcher patcher(young_sequence_.start(), young_sequence_.length()); + patcher.masm()->push(ebp); + patcher.masm()->mov(ebp, esp); + patcher.masm()->push(esi); + patcher.masm()->push(edi); +} + + +#ifdef DEBUG +bool CodeAgingHelper::IsOld(byte* candidate) const { + return *candidate == kCallOpcode; } +#endif -bool Code::IsYoungSequence(byte* sequence) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); - bool result = (!memcmp(sequence, young_sequence, young_length)); - ASSERT(result || *sequence == kCallOpcode); +bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { + bool result = isolate->code_aging_helper()->IsYoung(sequence); + DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); return result; } -void Code::GetCodeAgeAndParity(byte* sequence, Age* age, +void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, MarkingParity* parity) { - if (IsYoungSequence(sequence)) { + if (IsYoungSequence(isolate, sequence)) { *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { @@ -1120,11 +1016,10 @@ byte* sequence, Code::Age age, MarkingParity parity) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); + uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); if (age == kNoAgeCodeAge) { - CopyBytes(sequence, young_sequence, young_length); - CPU::FlushICache(sequence, young_length); + isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); + CpuFeatures::FlushICache(sequence, young_length); } else { Code* stub = GetCodeAgeStub(isolate, age, parity); CodePatcher patcher(sequence, young_length); diff -Nru nodejs-0.11.13/deps/v8/src/ia32/codegen-ia32.h nodejs-0.11.15/deps/v8/src/ia32/codegen-ia32.h --- nodejs-0.11.13/deps/v8/src/ia32/codegen-ia32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/codegen-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IA32_CODEGEN_IA32_H_ #define V8_IA32_CODEGEN_IA32_H_ -#include "ast.h" -#include "ic-inl.h" +#include "src/ast.h" +#include "src/ic-inl.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/ia32/code-stubs-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/code-stubs-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/code-stubs-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/code-stubs-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,395 +1,270 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "bootstrapper.h" -#include "code-stubs.h" -#include "isolate.h" -#include "jsregexp.h" -#include "regexp-macro-assembler.h" -#include "runtime.h" -#include "stub-cache.h" -#include "codegen.h" -#include "runtime.h" +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/isolate.h" +#include "src/jsregexp.h" +#include "src/regexp-macro-assembler.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { void FastNewClosureStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { ebx }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry; + Register registers[] = { esi, ebx }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry); } void FastNewContextStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edi }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { esi, edi }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void ToNumberStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { eax }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + // ToNumberStub invokes a function, and therefore needs a context. + Register registers[] = { esi, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void NumberToStringStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { eax }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry; + Register registers[] = { esi, eax }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry); } void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { eax, ebx, ecx }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId( - Runtime::kHiddenCreateArrayLiteralStubBailout)->entry; + Register registers[] = { esi, eax, ebx, ecx }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Smi(), + Representation::Tagged() }; + + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry, + representations); } void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { eax, ebx, ecx, edx }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry; + Register registers[] = { esi, eax, ebx, ecx, edx }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry); } void CreateAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { ebx, edx }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { esi, ebx, edx }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } -void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void CallFunctionStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edx, ecx }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); + Register registers[] = {esi, edi}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } -void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void CallConstructStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edx, ecx }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); + // eax : number of arguments + // ebx : feedback vector + // edx : (only if ebx is not the megamorphic symbol) slot in feedback + // vector (Smi) + // edi : constructor function + // TODO(turbofan): So far we don't gather type feedback and hence skip the + // slot parameter, but ArrayConstructStub needs the vector to be undefined. + Register registers[] = {esi, eax, edi, ebx}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void RegExpConstructResultStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { ecx, ebx, eax }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry; -} - - -void LoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edx }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedLoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edx }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void StringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edx, ecx }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedStringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edx, ecx }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { esi, ecx, ebx, eax }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry); } -void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void TransitionElementsKindStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edx, ecx, eax }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); + Register registers[] = { esi, eax, ebx }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry); } -void TransitionElementsKindStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { eax, ebx }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; -} +const Register InterfaceDescriptor::ContextRegister() { return esi; } static void InitializeArrayConstructorDescriptor( - Isolate* isolate, + Isolate* isolate, CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { // register state // eax -- number of arguments // edi -- function // ebx -- allocation site with elements kind - static Register registers_variable_args[] = { edi, ebx, eax }; - static Register registers_no_args[] = { edi, ebx }; + Address deopt_handler = Runtime::FunctionForId( + Runtime::kArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers_no_args; + Register registers[] = { esi, edi, ebx }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); } else { // stack param count needs (constructor pointer, and single argument) - descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; - descriptor->stack_parameter_count_ = eax; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers_variable_args; + Register registers[] = { esi, edi, ebx, eax }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); } - - descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; - descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry; } static void InitializeInternalArrayConstructorDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor, + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { // register state // eax -- number of arguments // edi -- constructor function - static Register registers_variable_args[] = { edi, eax }; - static Register registers_no_args[] = { edi }; + Address deopt_handler = Runtime::FunctionForId( + Runtime::kInternalArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers_no_args; + Register registers[] = { esi, edi }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); } else { // stack param count needs (constructor pointer, and single argument) - descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; - descriptor->stack_parameter_count_ = eax; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers_variable_args; + Register registers[] = { esi, edi, eax }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); } - - descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; - descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry; } void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, -1); + InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, -1); } void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0); } void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1); } void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1); } void CompareNilICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { eax }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(CompareNilIC_Miss); + Register registers[] = { esi, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(CompareNilIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate())); } void ToBooleanStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { eax }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(ToBooleanIC_Miss); + Register registers[] = { esi, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(ToBooleanIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); -} - - -void StoreGlobalStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edx, ecx, eax }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(StoreIC_MissFromStubFailure); -} - - -void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { eax, ebx, ecx, edx }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); + ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate())); } void BinaryOpICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edx, eax }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); + Register registers[] = { esi, edx, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate())); } void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { ecx, edx, eax }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite); + Register registers[] = { esi, ecx, edx, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite)); } void StringAddStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { edx, eax }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry; + Register registers[] = { esi, edx, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kStringAdd)->entry); } @@ -397,82 +272,72 @@ { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::ArgumentAdaptorCall); - static Register registers[] = { edi, // JSFunction - esi, // context - eax, // actual number of arguments - ebx, // expected number of arguments + Register registers[] = { esi, // context + edi, // JSFunction + eax, // actual number of arguments + ebx, // expected number of arguments }; - static Representation representations[] = { - Representation::Tagged(), // JSFunction + Representation representations[] = { Representation::Tagged(), // context + Representation::Tagged(), // JSFunction Representation::Integer32(), // actual number of arguments Representation::Integer32(), // expected number of arguments }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::KeyedCall); - static Register registers[] = { esi, // context - ecx, // key + Register registers[] = { esi, // context + ecx, // key }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // key }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::NamedCall); - static Register registers[] = { esi, // context - ecx, // name + Register registers[] = { esi, // context + ecx, // name }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // name }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::CallHandler); - static Register registers[] = { esi, // context - edx, // receiver + Register registers[] = { esi, // context + edx, // name }; - static Representation representations[] = { - Representation::Tagged(), // context - Representation::Tagged(), // receiver + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // receiver }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::ApiFunctionCall); - static Register registers[] = { eax, // callee - ebx, // call_data - ecx, // holder - edx, // api_function_address - esi, // context + Register registers[] = { esi, // context + eax, // callee + ebx, // call_data + ecx, // holder + edx, // api_function_address }; - static Representation representations[] = { + Representation representations[] = { + Representation::Tagged(), // context Representation::Tagged(), // callee Representation::Tagged(), // call_data Representation::Tagged(), // holder Representation::External(), // api_function_address - Representation::Tagged(), // context }; - descriptor->register_param_count_ = 5; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } } @@ -482,22 +347,22 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { // Update the static counter each time a new code stub is generated. - Isolate* isolate = masm->isolate(); - isolate->counters()->code_stubs()->Increment(); + isolate()->counters()->code_stubs()->Increment(); - CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); - int param_count = descriptor->register_param_count_; + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); + int param_count = descriptor->GetEnvironmentParameterCount(); { // Call the runtime system in a fresh internal frame. FrameScope scope(masm, StackFrame::INTERNAL); - ASSERT(descriptor->register_param_count_ == 0 || - eax.is(descriptor->register_params_[param_count - 1])); + DCHECK(param_count == 0 || + eax.is(descriptor->GetEnvironmentParameterRegister( + param_count - 1))); // Push arguments for (int i = 0; i < param_count; ++i) { - __ push(descriptor->register_params_[i]); + __ push(descriptor->GetEnvironmentParameterRegister(i)); } ExternalReference miss = descriptor->miss_handler(); - __ CallExternalReference(miss, descriptor->register_param_count_); + __ CallExternalReference(miss, param_count); } __ ret(0); @@ -510,9 +375,8 @@ // restore them. __ pushad(); if (save_doubles_ == kSaveFPRegs) { - CpuFeatureScope scope(masm, SSE2); - __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); - for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + __ sub(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); + for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); __ movsd(Operand(esp, i * kDoubleSize), reg); } @@ -522,17 +386,16 @@ AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count, ecx); __ mov(Operand(esp, 0 * kPointerSize), - Immediate(ExternalReference::isolate_address(masm->isolate()))); + Immediate(ExternalReference::isolate_address(isolate()))); __ CallCFunction( - ExternalReference::store_buffer_overflow_function(masm->isolate()), + ExternalReference::store_buffer_overflow_function(isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - CpuFeatureScope scope(masm, SSE2); - for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); __ movsd(reg, Operand(esp, i * kDoubleSize)); } - __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); + __ add(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); } __ popad(); __ ret(0); @@ -570,7 +433,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { Register input_reg = this->source(); Register final_result_reg = this->destination(); - ASSERT(is_truncating()); + DCHECK(is_truncating()); Label check_negative, process_64_bits, done, done_no_stash; @@ -661,15 +524,7 @@ __ shrd(result_reg, scratch1); __ shr_cl(result_reg); __ test(ecx, Immediate(32)); - if (CpuFeatures::IsSupported(CMOV)) { - CpuFeatureScope use_cmov(masm, CMOV); - __ cmov(not_equal, scratch1, result_reg); - } else { - Label skip_mov; - __ j(equal, &skip_mov, Label::kNear); - __ mov(scratch1, result_reg); - __ bind(&skip_mov); - } + __ cmov(not_equal, scratch1, result_reg); } // If the double was negative, negate the integer result. @@ -681,15 +536,7 @@ } else { __ cmp(exponent_operand, Immediate(0)); } - if (CpuFeatures::IsSupported(CMOV)) { - CpuFeatureScope use_cmov(masm, CMOV); __ cmov(greater, result_reg, scratch1); - } else { - Label skip_mov; - __ j(less_equal, &skip_mov, Label::kNear); - __ mov(result_reg, scratch1); - __ bind(&skip_mov); - } // Restore registers __ bind(&done); @@ -698,7 +545,7 @@ } __ bind(&done_no_stash); if (!final_result_reg.is(result_reg)) { - ASSERT(final_result_reg.is(ecx)); + DCHECK(final_result_reg.is(ecx)); __ mov(final_result_reg, result_reg); } __ pop(save_reg); @@ -780,8 +627,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { - CpuFeatureScope use_sse2(masm, SSE2); - Factory* factory = masm->isolate()->factory(); + Factory* factory = isolate()->factory(); const Register exponent = eax; const Register base = edx; const Register scratch = ecx; @@ -1010,11 +856,11 @@ __ Cvtsi2sd(double_exponent, exponent); // Returning or bailing out. - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); if (exponent_type_ == ON_STACK) { // The arguments are still on the stack. __ bind(&call_runtime); - __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); + __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); // The stub is called from non-optimized code, which expects the result // as heap number in exponent. @@ -1031,7 +877,7 @@ __ movsd(Operand(esp, 0 * kDoubleSize), double_base); __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), 4); + ExternalReference::power_double_double_function(isolate()), 4); } // Return value is in st(0) on ia32. // Store it into the (fixed) result register. @@ -1048,22 +894,14 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- Label miss; + Register receiver = LoadIC::ReceiverRegister(); - if (kind() == Code::KEYED_LOAD_IC) { - __ cmp(ecx, Immediate(masm->isolate()->factory()->prototype_string())); - __ j(not_equal, &miss); - } - - StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss); + NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax, + ebx, &miss); __ bind(&miss); - StubCompiler::TailCallBuiltin( - masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); + PropertyAccessCompiler::TailCallBuiltin( + masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); } @@ -1147,13 +985,11 @@ __ mov(Operand(esp, 2 * kPointerSize), edx); __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { - Isolate* isolate = masm->isolate(); - // esp[0] : return address // esp[4] : number of parameters (tagged) // esp[8] : receiver displacement @@ -1184,7 +1020,7 @@ __ mov(Operand(esp, 2 * kPointerSize), edx); // ebx = parameter count (tagged) - // ecx = argument count (tagged) + // ecx = argument count (smi-tagged) // esp[4] = parameter count (tagged) // esp[8] = address of receiver argument // Compute the mapped parameter count = min(ebx, ecx) in ebx. @@ -1217,47 +1053,52 @@ __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT); // eax = address of new object(s) (tagged) - // ecx = argument count (tagged) + // ecx = argument count (smi-tagged) // esp[0] = mapped parameter count (tagged) // esp[8] = parameter count (tagged) // esp[12] = address of receiver argument - // Get the arguments boilerplate from the current native context into edi. - Label has_mapped_parameters, copy; + // Get the arguments map from the current native context into edi. + Label has_mapped_parameters, instantiate; __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset)); __ mov(ebx, Operand(esp, 0 * kPointerSize)); __ test(ebx, ebx); __ j(not_zero, &has_mapped_parameters, Label::kNear); - __ mov(edi, Operand(edi, - Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX))); - __ jmp(©, Label::kNear); + __ mov( + edi, + Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX))); + __ jmp(&instantiate, Label::kNear); __ bind(&has_mapped_parameters); - __ mov(edi, Operand(edi, - Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX))); - __ bind(©); + __ mov( + edi, + Operand(edi, Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX))); + __ bind(&instantiate); // eax = address of new object (tagged) // ebx = mapped parameter count (tagged) - // ecx = argument count (tagged) - // edi = address of boilerplate object (tagged) + // ecx = argument count (smi-tagged) + // edi = address of arguments map (tagged) // esp[0] = mapped parameter count (tagged) // esp[8] = parameter count (tagged) // esp[12] = address of receiver argument // Copy the JS object part. - for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { - __ mov(edx, FieldOperand(edi, i)); - __ mov(FieldOperand(eax, i), edx); - } + __ mov(FieldOperand(eax, JSObject::kMapOffset), edi); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), + masm->isolate()->factory()->empty_fixed_array()); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), + masm->isolate()->factory()->empty_fixed_array()); // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ mov(edx, Operand(esp, 4 * kPointerSize)); + __ AssertNotSmi(edx); __ mov(FieldOperand(eax, JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize), edx); // Use the length (smi tagged) and set that as an in-object property too. + __ AssertSmi(ecx); STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); __ mov(FieldOperand(eax, JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize), @@ -1285,7 +1126,7 @@ __ j(zero, &skip_parameter_map); __ mov(FieldOperand(edi, FixedArray::kMapOffset), - Immediate(isolate->factory()->sloppy_arguments_elements_map())); + Immediate(isolate()->factory()->sloppy_arguments_elements_map())); __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2)))); __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax); __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi); @@ -1306,7 +1147,7 @@ __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); __ add(ebx, Operand(esp, 4 * kPointerSize)); __ sub(ebx, eax); - __ mov(ecx, isolate->factory()->the_hole_value()); + __ mov(ecx, isolate()->factory()->the_hole_value()); __ mov(edx, edi); __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize)); // eax = loop variable (tagged) @@ -1341,7 +1182,7 @@ // esp[16] = address of receiver argument // Copy arguments header and remaining slots (if there are any). __ mov(FieldOperand(edi, FixedArray::kMapOffset), - Immediate(isolate->factory()->fixed_array_map())); + Immediate(isolate()->factory()->fixed_array_map())); __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); Label arguments_loop, arguments_test; @@ -1372,13 +1213,11 @@ __ bind(&runtime); __ pop(eax); // Remove saved parameter count. __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count. - __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { - Isolate* isolate = masm->isolate(); - // esp[0] : return address // esp[4] : number of parameters // esp[8] : receiver displacement @@ -1416,22 +1255,22 @@ // Do the allocation of both objects in one go. __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); - // Get the arguments boilerplate from the current native context. + // Get the arguments map from the current native context. __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset)); - const int offset = - Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX); + const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX); __ mov(edi, Operand(edi, offset)); - // Copy the JS object part. - for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { - __ mov(ebx, FieldOperand(edi, i)); - __ mov(FieldOperand(eax, i), ebx); - } + __ mov(FieldOperand(eax, JSObject::kMapOffset), edi); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), + masm->isolate()->factory()->empty_fixed_array()); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), + masm->isolate()->factory()->empty_fixed_array()); // Get the length (smi tagged) and set that as an in-object property too. STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); __ mov(ecx, Operand(esp, 1 * kPointerSize)); + __ AssertSmi(ecx); __ mov(FieldOperand(eax, JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize), ecx); @@ -1449,7 +1288,7 @@ __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize)); __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); __ mov(FieldOperand(edi, FixedArray::kMapOffset), - Immediate(isolate->factory()->fixed_array_map())); + Immediate(isolate()->factory()->fixed_array_map())); __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); // Untag the length for the loop below. @@ -1471,7 +1310,7 @@ // Do the runtime call to allocate the arguments object. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); } @@ -1480,7 +1319,7 @@ // time or if regexp entry in generated code is turned off runtime switch or // at compilation. #ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); #else // V8_INTERPRETED_REGEXP // Stack frame on entry. @@ -1496,14 +1335,13 @@ static const int kJSRegExpOffset = 4 * kPointerSize; Label runtime; - Factory* factory = masm->isolate()->factory(); + Factory* factory = isolate()->factory(); // Ensure that a RegExp stack is allocated. ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address( - masm->isolate()); + ExternalReference::address_of_regexp_stack_memory_address(isolate()); ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(masm->isolate()); + ExternalReference::address_of_regexp_stack_memory_size(isolate()); __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); __ test(ebx, ebx); __ j(zero, &runtime); @@ -1620,8 +1458,8 @@ // (5b) Is subject external? If yes, go to (8). __ test_b(ebx, kStringRepresentationMask); // The underlying external string is never a short external string. - STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); - STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); __ j(not_zero, &external_string); // Go to (8). // eax: sequential subject string (or look-alike, external string) @@ -1652,7 +1490,7 @@ // edx: code // ecx: encoding of subject string (1 if ASCII, 0 if two_byte); // All checks done. Now push arguments for native regexp code. - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->regexp_entry_native(), 1); // Isolates: note we add an additional parameter here (isolate pointer). @@ -1661,7 +1499,7 @@ // Argument 9: Pass current isolate address. __ mov(Operand(esp, 8 * kPointerSize), - Immediate(ExternalReference::isolate_address(masm->isolate()))); + Immediate(ExternalReference::isolate_address(isolate()))); // Argument 8: Indicate that this is a direct call from JavaScript. __ mov(Operand(esp, 7 * kPointerSize), Immediate(1)); @@ -1678,7 +1516,7 @@ // Argument 5: static offsets vector buffer. __ mov(Operand(esp, 4 * kPointerSize), Immediate(ExternalReference::address_of_static_offsets_vector( - masm->isolate()))); + isolate()))); // Argument 2: Previous index. __ SmiUntag(ebx); @@ -1752,8 +1590,8 @@ // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. ExternalReference pending_exception(Isolate::kPendingExceptionAddress, - masm->isolate()); - __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); + isolate()); + __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); __ mov(eax, Operand::StaticVariable(pending_exception)); __ cmp(edx, eax); __ j(equal, &runtime); @@ -1834,7 +1672,7 @@ // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(masm->isolate()); + ExternalReference::address_of_static_offsets_vector(isolate()); __ mov(ecx, Immediate(address_of_static_offsets_vector)); // ebx: last_match_info backing store (FixedArray) @@ -1864,7 +1702,7 @@ // Do the runtime call to execute the regexp. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); // Deferred code for string handling. // (7) Not a long external string? If yes, go to (10). @@ -1925,8 +1763,8 @@ static int NegativeComparisonResult(Condition cc) { - ASSERT(cc != equal); - ASSERT((cc == less) || (cc == less_equal) + DCHECK(cc != equal); + DCHECK((cc == less) || (cc == less_equal) || (cc == greater) || (cc == greater_equal)); return (cc == greater || cc == greater_equal) ? LESS : GREATER; } @@ -2000,7 +1838,7 @@ // Check for undefined. undefined OP undefined is false even though // undefined == undefined. Label check_for_nan; - __ cmp(edx, masm->isolate()->factory()->undefined_value()); + __ cmp(edx, isolate()->factory()->undefined_value()); __ j(not_equal, &check_for_nan, Label::kNear); __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); __ ret(0); @@ -2010,7 +1848,7 @@ // Test for NaN. Compare heap numbers in a general way, // to hanlde NaNs correctly. __ cmp(FieldOperand(edx, HeapObject::kMapOffset), - Immediate(masm->isolate()->factory()->heap_number_map())); + Immediate(isolate()->factory()->heap_number_map())); __ j(equal, &generic_heap_number_comparison, Label::kNear); if (cc != equal) { // Call runtime on identical JSObjects. Otherwise return equal. @@ -2036,7 +1874,7 @@ // If either is a Smi (we know that not both are), then they can only // be equal if the other is a HeapNumber. If so, use the slow case. STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(0, Smi::FromInt(0)); + DCHECK_EQ(0, Smi::FromInt(0)); __ mov(ecx, Immediate(kSmiTagMask)); __ and_(ecx, eax); __ test(ecx, edx); @@ -2055,7 +1893,7 @@ // Check if the non-smi operand is a heap number. __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), - Immediate(masm->isolate()->factory()->heap_number_map())); + Immediate(isolate()->factory()->heap_number_map())); // If heap number, handle it in the slow case. __ j(equal, &slow, Label::kNear); // Return non-equal (ebx is not zero) @@ -2100,53 +1938,23 @@ Label non_number_comparison; Label unordered; __ bind(&generic_heap_number_comparison); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - CpuFeatureScope use_cmov(masm, CMOV); - - FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); - __ ucomisd(xmm0, xmm1); - - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered, Label::kNear); - // Return a result of -1, 0, or 1, based on EFLAGS. - __ mov(eax, 0); // equal - __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, ecx); - __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, ecx); - __ ret(0); - } else { - FloatingPointHelper::CheckFloatOperands( - masm, &non_number_comparison, ebx); - FloatingPointHelper::LoadFloatOperand(masm, eax); - FloatingPointHelper::LoadFloatOperand(masm, edx); - __ FCmp(); - - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered, Label::kNear); - - Label below_label, above_label; - // Return a result of -1, 0, or 1, based on EFLAGS. - __ j(below, &below_label, Label::kNear); - __ j(above, &above_label, Label::kNear); - - __ Move(eax, Immediate(0)); - __ ret(0); - __ bind(&below_label); - __ mov(eax, Immediate(Smi::FromInt(-1))); - __ ret(0); - - __ bind(&above_label); - __ mov(eax, Immediate(Smi::FromInt(1))); - __ ret(0); - } + FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); + __ ucomisd(xmm0, xmm1); + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, Label::kNear); + + __ mov(eax, 0); // equal + __ mov(ecx, Immediate(Smi::FromInt(1))); + __ cmov(above, eax, ecx); + __ mov(ecx, Immediate(Smi::FromInt(-1))); + __ cmov(below, eax, ecx); + __ ret(0); // If one of the numbers was NaN, then the result is always false. // The cc is never not-equal. __ bind(&unordered); - ASSERT(cc != not_equal); + DCHECK(cc != not_equal); if (cc == less || cc == less_equal) { __ mov(eax, Immediate(Smi::FromInt(1))); } else { @@ -2284,8 +2092,7 @@ // If we didn't have a matching function, and we didn't find the megamorph // sentinel, then we have in the slot either some other function or an // AllocationSite. Do a map check on the object in ecx. - Handle<Map> allocation_site_map = - masm->isolate()->factory()->allocation_site_map(); + Handle<Map> allocation_site_map = isolate->factory()->allocation_site_map(); __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map)); __ j(not_equal, &miss); @@ -2332,7 +2139,7 @@ __ push(edx); __ push(ebx); - CreateAllocationSiteStub create_stub; + CreateAllocationSiteStub create_stub(isolate); __ CallStub(&create_stub); __ pop(ebx); @@ -2363,52 +2170,90 @@ } -void CallFunctionStub::Generate(MacroAssembler* masm) { - // ebx : feedback vector - // edx : (only if ebx is not the megamorphic symbol) slot in feedback - // vector (Smi) +static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { + // Do not transform the receiver for strict mode functions. + __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset), + 1 << SharedFunctionInfo::kStrictModeBitWithinByte); + __ j(not_equal, cont); + + // Do not transform the receiver for natives (shared already in ecx). + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset), + 1 << SharedFunctionInfo::kNativeBitWithinByte); + __ j(not_equal, cont); +} + + +static void EmitSlowCase(Isolate* isolate, + MacroAssembler* masm, + int argc, + Label* non_function) { + // Check for function proxy. + __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); + __ j(not_equal, non_function); + __ pop(ecx); + __ push(edi); // put proxy as additional argument under return address + __ push(ecx); + __ Move(eax, Immediate(argc + 1)); + __ Move(ebx, Immediate(0)); + __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); + { + Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); + __ jmp(adaptor, RelocInfo::CODE_TARGET); + } + + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ bind(non_function); + __ mov(Operand(esp, (argc + 1) * kPointerSize), edi); + __ Move(eax, Immediate(argc)); + __ Move(ebx, Immediate(0)); + __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); + Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); + __ jmp(adaptor, RelocInfo::CODE_TARGET); +} + + +static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { + // Wrap the receiver and patch it back onto the stack. + { FrameScope frame_scope(masm, StackFrame::INTERNAL); + __ push(edi); + __ push(eax); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ pop(edi); + } + __ mov(Operand(esp, (argc + 1) * kPointerSize), eax); + __ jmp(cont); +} + + +static void CallFunctionNoFeedback(MacroAssembler* masm, + int argc, bool needs_checks, + bool call_as_method) { // edi : the function to call - Isolate* isolate = masm->isolate(); Label slow, non_function, wrap, cont; - if (NeedsChecks()) { + if (needs_checks) { // Check that the function really is a JavaScript function. __ JumpIfSmi(edi, &non_function); // Goto slow case if we do not have a function. __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); __ j(not_equal, &slow); - - if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); - // Type information was updated. Because we may call Array, which - // expects either undefined or an AllocationSite in ebx we need - // to set ebx to undefined. - __ mov(ebx, Immediate(isolate->factory()->undefined_value())); - } } // Fast-case: Just invoke the function. - ParameterCount actual(argc_); + ParameterCount actual(argc); - if (CallAsMethod()) { - if (NeedsChecks()) { - // Do not transform the receiver for strict mode functions. - __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset), - 1 << SharedFunctionInfo::kStrictModeBitWithinByte); - __ j(not_equal, &cont); - - // Do not transform the receiver for natives (shared already in ecx). - __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset), - 1 << SharedFunctionInfo::kNativeBitWithinByte); - __ j(not_equal, &cont); + if (call_as_method) { + if (needs_checks) { + EmitContinueIfStrictOrNative(masm, &cont); } // Load the receiver from the stack. - __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize)); + __ mov(eax, Operand(esp, (argc + 1) * kPointerSize)); - if (NeedsChecks()) { + if (needs_checks) { __ JumpIfSmi(eax, &wrap); __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); @@ -2422,57 +2267,25 @@ __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper()); - if (NeedsChecks()) { + if (needs_checks) { // Slow-case: Non-function called. __ bind(&slow); - if (RecordCallTarget()) { - // If there is a call target cache, mark it megamorphic in the - // non-function case. MegamorphicSentinel is an immortal immovable - // object (megamorphic symbol) so no write barrier is needed. - __ mov(FieldOperand(ebx, edx, times_half_pointer_size, - FixedArray::kHeaderSize), - Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate))); - } - // Check for function proxy. - __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); - __ j(not_equal, &non_function); - __ pop(ecx); - __ push(edi); // put proxy as additional argument under return address - __ push(ecx); - __ Move(eax, Immediate(argc_ + 1)); - __ Move(ebx, Immediate(0)); - __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); - { - Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); - __ jmp(adaptor, RelocInfo::CODE_TARGET); - } - - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ bind(&non_function); - __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi); - __ Move(eax, Immediate(argc_)); - __ Move(ebx, Immediate(0)); - __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); - Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); - __ jmp(adaptor, RelocInfo::CODE_TARGET); + // (non_function is bound in EmitSlowCase) + EmitSlowCase(masm->isolate(), masm, argc, &non_function); } - if (CallAsMethod()) { + if (call_as_method) { __ bind(&wrap); - // Wrap the receiver and patch it back onto the stack. - { FrameScope frame_scope(masm, StackFrame::INTERNAL); - __ push(edi); - __ push(eax); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ pop(edi); - } - __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax); - __ jmp(&cont); + EmitWrapCase(masm, argc, &cont); } } +void CallFunctionStub::Generate(MacroAssembler* masm) { + CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod()); +} + + void CallConstructStub::Generate(MacroAssembler* masm) { // eax : number of arguments // ebx : feedback vector @@ -2502,10 +2315,10 @@ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize)); Handle<Map> allocation_site_map = - masm->isolate()->factory()->allocation_site_map(); + isolate()->factory()->allocation_site_map(); __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map)); __ j(equal, &feedback_register_initialized); - __ mov(ebx, masm->isolate()->factory()->undefined_value()); + __ mov(ebx, isolate()->factory()->undefined_value()); __ bind(&feedback_register_initialized); } @@ -2536,11 +2349,168 @@ // Set expected number of arguments to zero (not changing eax). __ Move(ebx, Immediate(0)); Handle<Code> arguments_adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + isolate()->builtins()->ArgumentsAdaptorTrampoline(); __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET); } +static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { + __ mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset)); + __ mov(vector, FieldOperand(vector, + SharedFunctionInfo::kFeedbackVectorOffset)); +} + + +void CallIC_ArrayStub::Generate(MacroAssembler* masm) { + // edi - function + // edx - slot id + Label miss; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, ebx); + + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx); + __ cmp(edi, ecx); + __ j(not_equal, &miss); + + __ mov(eax, arg_count()); + __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize)); + + // Verify that ecx contains an AllocationSite + Factory* factory = masm->isolate()->factory(); + __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), + factory->allocation_site_map()); + __ j(not_equal, &miss); + + __ mov(ebx, ecx); + ArrayConstructorStub stub(masm->isolate(), arg_count()); + __ TailCallStub(&stub); + + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Customization_Miss); + + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, + arg_count(), + true, + CallAsMethod()); + + // Unreachable. + __ int3(); +} + + +void CallICStub::Generate(MacroAssembler* masm) { + // edi - function + // edx - slot id + Isolate* isolate = masm->isolate(); + Label extra_checks_or_miss, slow_start; + Label slow, non_function, wrap, cont; + Label have_js_function; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, ebx); + + // The checks. First, does edi match the recorded monomorphic target? + __ cmp(edi, FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ j(not_equal, &extra_checks_or_miss); + + __ bind(&have_js_function); + if (state_.CallAsMethod()) { + EmitContinueIfStrictOrNative(masm, &cont); + + // Load the receiver from the stack. + __ mov(eax, Operand(esp, (argc + 1) * kPointerSize)); + + __ JumpIfSmi(eax, &wrap); + + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(below, &wrap); + + __ bind(&cont); + } + + __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper()); + + __ bind(&slow); + EmitSlowCase(isolate, masm, argc, &non_function); + + if (state_.CallAsMethod()) { + __ bind(&wrap); + EmitWrapCase(masm, argc, &cont); + } + + __ bind(&extra_checks_or_miss); + Label miss; + + __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate))); + __ j(equal, &slow_start); + __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate))); + __ j(equal, &miss); + + if (!FLAG_trace_ic) { + // We are going megamorphic. If the feedback is a JSFunction, it is fine + // to handle it here. More complex cases are dealt with in the runtime. + __ AssertNotSmi(ecx); + __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &miss); + __ mov(FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize), + Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate))); + __ jmp(&slow_start); + } + + // We are here because tracing is on or we are going monomorphic. + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Miss); + + // the slow case + __ bind(&slow_start); + + // Check that the function really is a JavaScript function. + __ JumpIfSmi(edi, &non_function); + + // Goto slow case if we do not have a function. + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &slow); + __ jmp(&have_js_function); + + // Unreachable + __ int3(); +} + + +void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) { + // Get the receiver of the function from the stack; 1 ~ return address. + __ mov(ecx, Operand(esp, (state_.arg_count() + 1) * kPointerSize)); + + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Push the receiver and the function and feedback info. + __ push(ecx); + __ push(edi); + __ push(ebx); + __ push(edx); + + // Call the entry. + ExternalReference miss = ExternalReference(IC_Utility(id), + masm->isolate()); + __ CallExternalReference(miss, 4); + + // Move result to edi and exit the internal frame. + __ mov(edi, eax); + } +} + + bool CEntryStub::NeedsImmovableCode() { return false; } @@ -2553,43 +2523,42 @@ // It is important that the store buffer overflow stubs are generated first. ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); CreateAllocationSiteStub::GenerateAheadOfTime(isolate); - if (Serializer::enabled()) { - PlatformFeatureScope sse2(SSE2); - BinaryOpICStub::GenerateAheadOfTime(isolate); - BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); - } else { - BinaryOpICStub::GenerateAheadOfTime(isolate); - BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); - } + BinaryOpICStub::GenerateAheadOfTime(isolate); + BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); } void CodeStub::GenerateFPStubs(Isolate* isolate) { - if (CpuFeatures::IsSupported(SSE2)) { - CEntryStub save_doubles(1, kSaveFPRegs); - // Stubs might already be in the snapshot, detect that and don't regenerate, - // which would lead to code stub initialization state being messed up. - Code* save_doubles_code; - if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { - save_doubles_code = *(save_doubles.GetCode(isolate)); - } - isolate->set_fp_stubs_generated(true); + CEntryStub save_doubles(isolate, 1, kSaveFPRegs); + // Stubs might already be in the snapshot, detect that and don't regenerate, + // which would lead to code stub initialization state being messed up. + Code* save_doubles_code; + if (!save_doubles.FindCodeInCache(&save_doubles_code)) { + save_doubles_code = *(save_doubles.GetCode()); } + isolate->set_fp_stubs_generated(true); } void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { - CEntryStub stub(1, kDontSaveFPRegs); - stub.GetCode(isolate); + CEntryStub stub(isolate, 1, kDontSaveFPRegs); + stub.GetCode(); } -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - bool do_gc, - bool always_allocate_scope) { - // eax: result parameter for PerformGC, if any +void CEntryStub::Generate(MacroAssembler* masm) { + // eax: number of arguments including receiver + // ebx: pointer to C function (C callee-saved) + // ebp: frame pointer (restored after C call) + // esp: stack pointer (restored after C call) + // esi: current context (C callee-saved) + // edi: JS function of the caller (C callee-saved) + + ProfileEntryHookStub::MaybeCallEntryHook(masm); + + // Enter the exit frame that transitions from JavaScript to C++. + __ EnterExitFrame(save_doubles_ == kSaveFPRegs); + // ebx: pointer to C function (C callee-saved) // ebp: frame pointer (restored after C call) // esp: stack pointer (restored after C call) @@ -2603,62 +2572,37 @@ __ CheckStackAlignment(); } - if (do_gc) { - // Pass failure code returned from last attempt as first argument to - // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the - // stack alignment is known to be correct. This function takes one argument - // which is passed on the stack, and we know that the stack has been - // prepared to pass at least one argument. - __ mov(Operand(esp, 1 * kPointerSize), - Immediate(ExternalReference::isolate_address(masm->isolate()))); - __ mov(Operand(esp, 0 * kPointerSize), eax); // Result. - __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); - } - - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(masm->isolate()); - if (always_allocate_scope) { - __ inc(Operand::StaticVariable(scope_depth)); - } - // Call C function. __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. __ mov(Operand(esp, 2 * kPointerSize), - Immediate(ExternalReference::isolate_address(masm->isolate()))); + Immediate(ExternalReference::isolate_address(isolate()))); __ call(ebx); // Result is in eax or edx:eax - do not destroy these registers! - if (always_allocate_scope) { - __ dec(Operand::StaticVariable(scope_depth)); - } - // Runtime functions should not return 'the hole'. Allowing it to escape may // lead to crashes in the IC code later. if (FLAG_debug_code) { Label okay; - __ cmp(eax, masm->isolate()->factory()->the_hole_value()); + __ cmp(eax, isolate()->factory()->the_hole_value()); __ j(not_equal, &okay, Label::kNear); __ int3(); __ bind(&okay); } - // Check for failure result. - Label failure_returned; - STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); - __ lea(ecx, Operand(eax, 1)); - // Lower 2 bits of ecx are 0 iff eax has failure tag. - __ test(ecx, Immediate(kFailureTagMask)); - __ j(zero, &failure_returned); + // Check result for exception sentinel. + Label exception_returned; + __ cmp(eax, isolate()->factory()->exception()); + __ j(equal, &exception_returned); ExternalReference pending_exception_address( - Isolate::kPendingExceptionAddress, masm->isolate()); + Isolate::kPendingExceptionAddress, isolate()); // Check that there is no pending exception, otherwise we - // should have returned some failure value. + // should have returned the exception sentinel. if (FLAG_debug_code) { __ push(edx); - __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); + __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); Label okay; __ cmp(edx, Operand::StaticVariable(pending_exception_address)); // Cannot use check here as it attempts to generate call into runtime. @@ -2672,96 +2616,27 @@ __ LeaveExitFrame(save_doubles_ == kSaveFPRegs); __ ret(0); - // Handling of failure. - __ bind(&failure_returned); - - Label retry; - // If the returned exception is RETRY_AFTER_GC continue at retry label - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); - __ j(zero, &retry, Label::kNear); + // Handling of exception. + __ bind(&exception_returned); // Retrieve the pending exception. __ mov(eax, Operand::StaticVariable(pending_exception_address)); // Clear the pending exception. - __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); + __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); __ mov(Operand::StaticVariable(pending_exception_address), edx); // Special handling of termination exceptions which are uncatchable // by javascript code. - __ cmp(eax, masm->isolate()->factory()->termination_exception()); - __ j(equal, throw_termination_exception); - - // Handle normal exception. - __ jmp(throw_normal_exception); - - // Retry. - __ bind(&retry); -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // eax: number of arguments including receiver - // ebx: pointer to C function (C callee-saved) - // ebp: frame pointer (restored after C call) - // esp: stack pointer (restored after C call) - // esi: current context (C callee-saved) - // edi: JS function of the caller (C callee-saved) - - ProfileEntryHookStub::MaybeCallEntryHook(masm); - - // NOTE: Invocations of builtins may return failure objects instead - // of a proper result. The builtin entry handles this by performing - // a garbage collection and retrying the builtin (twice). - - // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(save_doubles_ == kSaveFPRegs); - - // eax: result parameter for PerformGC, if any (setup below) - // ebx: pointer to builtin function (C callee-saved) - // ebp: frame pointer (restored after C call) - // esp: stack pointer (restored after C call) - // edi: number of arguments including receiver (C callee-saved) - // esi: argv pointer (C callee-saved) - - Label throw_normal_exception; Label throw_termination_exception; + __ cmp(eax, isolate()->factory()->termination_exception()); + __ j(equal, &throw_termination_exception); - // Call into the runtime system. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - false, - false); - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - true, - false); - - // Do full GC and retry runtime call one final time. - Failure* failure = Failure::InternalError(); - __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure))); - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - true, - true); - - { FrameScope scope(masm, StackFrame::MANUAL); - __ PrepareCallCFunction(0, eax); - __ CallCFunction( - ExternalReference::out_of_memory_function(masm->isolate()), 0); - } + // Handle normal exception. + __ Throw(eax); __ bind(&throw_termination_exception); __ ThrowUncatchable(eax); - - __ bind(&throw_normal_exception); - __ Throw(eax); } @@ -2785,12 +2660,11 @@ __ push(ebx); // Save copies of the top frame descriptor on the stack. - ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate()); + ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate()); __ push(Operand::StaticVariable(c_entry_fp)); // If this is the outermost JS call, set js_entry_sp value. - ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, - masm->isolate()); + ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0)); __ j(not_equal, ¬_outermost_js, Label::kNear); __ mov(Operand::StaticVariable(js_entry_sp), ebp); @@ -2807,9 +2681,9 @@ // Caught exception: Store result (exception) in the pending exception // field in the JSEnv and return a failure sentinel. ExternalReference pending_exception(Isolate::kPendingExceptionAddress, - masm->isolate()); + isolate()); __ mov(Operand::StaticVariable(pending_exception), eax); - __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception())); + __ mov(eax, Immediate(isolate()->factory()->exception())); __ jmp(&exit); // Invoke: Link this frame into the handler chain. There's only one @@ -2818,7 +2692,7 @@ __ PushTryHandler(StackHandler::JS_ENTRY, 0); // Clear any pending exceptions. - __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); + __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); __ mov(Operand::StaticVariable(pending_exception), edx); // Fake a receiver (NULL). @@ -2830,11 +2704,10 @@ // builtin stubs may not have been generated yet. if (is_construct) { ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, - masm->isolate()); + isolate()); __ mov(edx, Immediate(construct_entry)); } else { - ExternalReference entry(Builtins::kJSEntryTrampoline, - masm->isolate()); + ExternalReference entry(Builtins::kJSEntryTrampoline, isolate()); __ mov(edx, Immediate(entry)); } __ mov(edx, Operand(edx, 0)); // deref address @@ -2854,8 +2727,7 @@ // Restore the top frame descriptor from the stack. __ pop(Operand::StaticVariable(ExternalReference( - Isolate::kCEntryFPAddress, - masm->isolate()))); + Isolate::kCEntryFPAddress, isolate()))); // Restore callee-saved registers (C calling conventions). __ pop(ebx); @@ -2887,7 +2759,7 @@ // void InstanceofStub::Generate(MacroAssembler* masm) { // Call site inlining and patching implies arguments in registers. - ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); + DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck()); // Fixed register usage throughout the stub. Register object = eax; // Object (lhs). @@ -2904,8 +2776,8 @@ static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d); static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8); - ASSERT_EQ(object.code(), InstanceofStub::left().code()); - ASSERT_EQ(function.code(), InstanceofStub::right().code()); + DCHECK_EQ(object.code(), InstanceofStub::left().code()); + DCHECK_EQ(function.code(), InstanceofStub::right().code()); // Get the object and function - they are always both needed. Label slow, not_js_object; @@ -2920,7 +2792,7 @@ // If there is a call site cache don't look in the global cache, but do the // real lookup and update the call site cache. - if (!HasCallSiteInlineCheck()) { + if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) { // Look up the function and the map in the instanceof cache. Label miss; __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex); @@ -2947,7 +2819,7 @@ } else { // The constants for the code patching are based on no push instructions // at the call site. - ASSERT(HasArgsInRegisters()); + DCHECK(HasArgsInRegisters()); // Get return address and delta to inlined map check. __ mov(scratch, Operand(esp, 0 * kPointerSize)); __ sub(scratch, Operand(esp, 1 * kPointerSize)); @@ -2968,7 +2840,7 @@ __ bind(&loop); __ cmp(scratch, prototype); __ j(equal, &is_instance, Label::kNear); - Factory* factory = masm->isolate()->factory(); + Factory* factory = isolate()->factory(); __ cmp(scratch, Immediate(factory->null_value())); __ j(equal, &is_not_instance, Label::kNear); __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); @@ -2979,6 +2851,9 @@ if (!HasCallSiteInlineCheck()) { __ mov(eax, Immediate(0)); __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex); + if (ReturnTrueFalseObject()) { + __ mov(eax, factory->true_value()); + } } else { // Get return address and delta to inlined map check. __ mov(eax, factory->true_value()); @@ -2999,6 +2874,9 @@ if (!HasCallSiteInlineCheck()) { __ mov(eax, Immediate(Smi::FromInt(1))); __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex); + if (ReturnTrueFalseObject()) { + __ mov(eax, factory->false_value()); + } } else { // Get return address and delta to inlined map check. __ mov(eax, factory->false_value()); @@ -3026,20 +2904,32 @@ // Null is not instance of anything. __ cmp(object, factory->null_value()); __ j(not_equal, &object_not_null, Label::kNear); - __ Move(eax, Immediate(Smi::FromInt(1))); + if (ReturnTrueFalseObject()) { + __ mov(eax, factory->false_value()); + } else { + __ Move(eax, Immediate(Smi::FromInt(1))); + } __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); __ bind(&object_not_null); // Smi values is not instance of anything. __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear); - __ Move(eax, Immediate(Smi::FromInt(1))); + if (ReturnTrueFalseObject()) { + __ mov(eax, factory->false_value()); + } else { + __ Move(eax, Immediate(Smi::FromInt(1))); + } __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); __ bind(&object_not_null_or_smi); // String values is not instance of anything. Condition is_string = masm->IsObjectStringType(object, scratch, scratch); __ j(NegateCondition(is_string), &slow, Label::kNear); - __ Move(eax, Immediate(Smi::FromInt(1))); + if (ReturnTrueFalseObject()) { + __ mov(eax, factory->false_value()); + } else { + __ Move(eax, Immediate(Smi::FromInt(1))); + } __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); // Slow-case: Go through the JavaScript implementation. @@ -3134,9 +3024,9 @@ if (index_flags_ == STRING_INDEX_IS_NUMBER) { __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); } else { - ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kHiddenNumberToSmi, 1); + __ CallRuntime(Runtime::kNumberToSmi, 1); } if (!index_.is(eax)) { // Save the conversion result before the pop instructions below @@ -3162,7 +3052,7 @@ __ push(object_); __ SmiTag(index_); __ push(index_); - __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2); + __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); if (!result_.is(eax)) { __ mov(result_, eax); } @@ -3180,7 +3070,7 @@ // Fast case of Heap::LookupSingleCharacterStringFromCode. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); + DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1)); __ test(code_, Immediate(kSmiTagMask | ((~String::kMaxOneByteCharCode) << kSmiTagSize))); @@ -3220,21 +3110,15 @@ } -void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii) { - // Copy characters using rep movs of doublewords. - // The destination is aligned on a 4 byte boundary because we are - // copying to the beginning of a newly allocated string. - ASSERT(dest.is(edi)); // rep movs destination - ASSERT(src.is(esi)); // rep movs source - ASSERT(count.is(ecx)); // rep movs count - ASSERT(!scratch.is(dest)); - ASSERT(!scratch.is(src)); - ASSERT(!scratch.is(count)); +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + String::Encoding encoding) { + DCHECK(!scratch.is(dest)); + DCHECK(!scratch.is(src)); + DCHECK(!scratch.is(count)); // Nothing to do for zero characters. Label done; @@ -3242,38 +3126,17 @@ __ j(zero, &done); // Make count the number of bytes to copy. - if (!ascii) { + if (encoding == String::TWO_BYTE_ENCODING) { __ shl(count, 1); } - // Don't enter the rep movs if there are less than 4 bytes to copy. - Label last_bytes; - __ test(count, Immediate(~3)); - __ j(zero, &last_bytes, Label::kNear); - - // Copy from edi to esi using rep movs instruction. - __ mov(scratch, count); - __ sar(count, 2); // Number of doublewords to copy. - __ cld(); - __ rep_movs(); - - // Find number of bytes left. - __ mov(count, scratch); - __ and_(count, 3); - - // Check if there are more bytes to copy. - __ bind(&last_bytes); - __ test(count, count); - __ j(zero, &done); - - // Copy remaining characters. Label loop; __ bind(&loop); __ mov_b(scratch, Operand(src, 0)); __ mov_b(Operand(dest, 0), scratch); - __ add(src, Immediate(1)); - __ add(dest, Immediate(1)); - __ sub(count, Immediate(1)); + __ inc(src); + __ inc(dest); + __ dec(count); __ j(not_zero, &loop); __ bind(&done); @@ -3285,7 +3148,7 @@ Register character, Register scratch) { // hash = (seed + character) + ((seed + character) << 10); - if (Serializer::enabled()) { + if (masm->serializer_enabled()) { __ LoadRoot(scratch, Heap::kHashSeedRootIndex); __ SmiUntag(scratch); __ add(scratch, character); @@ -3380,7 +3243,7 @@ // Longer than original string's length or negative: unsafe arguments. __ j(above, &runtime); // Return original string. - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(3 * kPointerSize); __ bind(¬_original_string); @@ -3402,7 +3265,7 @@ __ test(ebx, Immediate(kIsIndirectStringMask)); __ j(zero, &seq_or_external_string, Label::kNear); - Factory* factory = masm->isolate()->factory(); + Factory* factory = isolate()->factory(); __ test(ebx, Immediate(kSlicedNotConsMask)); __ j(not_zero, &sliced_string, Label::kNear); // Cons string. Check whether it is flat, then fetch first part. @@ -3480,7 +3343,7 @@ // Handle external string. // Rule out short external strings. - STATIC_CHECK(kShortExternalStringTag != 0); + STATIC_ASSERT(kShortExternalStringTag != 0); __ test_b(ebx, kShortExternalStringMask); __ j(not_zero, &runtime); __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset)); @@ -3502,23 +3365,21 @@ // eax: result string // ecx: result string length - __ mov(edx, esi); // esi used by following code. // Locate first character of result. __ mov(edi, eax); __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. - __ pop(esi); + __ pop(edx); __ pop(ebx); __ SmiUntag(ebx); - __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize)); + __ lea(edx, FieldOperand(edx, ebx, times_1, SeqOneByteString::kHeaderSize)); // eax: result string // ecx: result length - // edx: original value of esi // edi: first character of result - // esi: character of sub string start - StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true); - __ mov(esi, edx); // Restore esi. + // edx: character of sub string start + StringHelper::GenerateCopyCharacters( + masm, edi, edx, ecx, ebx, String::ONE_BYTE_ENCODING); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(3 * kPointerSize); @@ -3528,27 +3389,25 @@ // eax: result string // ecx: result string length - __ mov(edx, esi); // esi used by following code. // Locate first character of result. __ mov(edi, eax); __ add(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. - __ pop(esi); + __ pop(edx); __ pop(ebx); // As from is a smi it is 2 times the value which matches the size of a two // byte character. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize)); + __ lea(edx, FieldOperand(edx, ebx, times_1, SeqTwoByteString::kHeaderSize)); // eax: result string // ecx: result length - // edx: original value of esi // edi: first character of result - // esi: character of sub string start - StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false); - __ mov(esi, edx); // Restore esi. + // edx: character of sub string start + StringHelper::GenerateCopyCharacters( + masm, edi, edx, ecx, ebx, String::TWO_BYTE_ENCODING); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(3 * kPointerSize); @@ -3558,7 +3417,7 @@ // Just jump to runtime to create the sub string. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1); + __ TailCallRuntime(Runtime::kSubString, 3, 1); __ bind(&single_char); // eax: string @@ -3722,7 +3581,7 @@ STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ Move(eax, Immediate(Smi::FromInt(EQUAL))); - __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1); + __ IncrementCounter(isolate()->counters()->string_compare_native(), 1); __ ret(2 * kPointerSize); __ bind(¬_same); @@ -3740,199 +3599,7 @@ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); -} - - -void ArrayPushStub::Generate(MacroAssembler* masm) { - int argc = arguments_count(); - - if (argc == 0) { - // Noop, return the length. - __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); - __ ret((argc + 1) * kPointerSize); - return; - } - - Isolate* isolate = masm->isolate(); - - if (argc != 1) { - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - Label call_builtin, attempt_to_grow_elements, with_write_barrier; - - // Get the elements array of the object. - __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset)); - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - // Check that the elements are in fast mode and writable. - __ cmp(FieldOperand(edi, HeapObject::kMapOffset), - isolate->factory()->fixed_array_map()); - __ j(not_equal, &call_builtin); - } - - // Get the array's length into eax and calculate new length. - __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - __ add(eax, Immediate(Smi::FromInt(argc))); - - // Get the elements' length into ecx. - __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); - - // Check if we could survive without allocation. - __ cmp(eax, ecx); - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - __ j(greater, &attempt_to_grow_elements); - - // Check if value is a smi. - __ mov(ecx, Operand(esp, argc * kPointerSize)); - __ JumpIfNotSmi(ecx, &with_write_barrier); - - // Store the value. - __ mov(FieldOperand(edi, eax, times_half_pointer_size, - FixedArray::kHeaderSize - argc * kPointerSize), - ecx); - } else { - __ j(greater, &call_builtin); - - __ mov(ecx, Operand(esp, argc * kPointerSize)); - __ StoreNumberToDoubleElements( - ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize); - } - - // Save new length. - __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); - __ ret((argc + 1) * kPointerSize); - - if (IsFastDoubleElementsKind(elements_kind())) { - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ bind(&with_write_barrier); - - if (IsFastSmiElementsKind(elements_kind())) { - if (FLAG_trace_elements_transitions) __ jmp(&call_builtin); - - __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), - isolate->factory()->heap_number_map()); - __ j(equal, &call_builtin); - - ElementsKind target_kind = IsHoleyElementsKind(elements_kind()) - ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; - __ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX)); - __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset)); - __ mov(ebx, ContextOperand(ebx, Context::JS_ARRAY_MAPS_INDEX)); - const int header_size = FixedArrayBase::kHeaderSize; - // Verify that the object can be transitioned in place. - const int origin_offset = header_size + elements_kind() * kPointerSize; - __ mov(edi, FieldOperand(ebx, origin_offset)); - __ cmp(edi, FieldOperand(edx, HeapObject::kMapOffset)); - __ j(not_equal, &call_builtin); - - const int target_offset = header_size + target_kind * kPointerSize; - __ mov(ebx, FieldOperand(ebx, target_offset)); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - masm, DONT_TRACK_ALLOCATION_SITE, NULL); - // Restore edi used as a scratch register for the write barrier used while - // setting the map. - __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset)); - } - - // Save new length. - __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); - - // Store the value. - __ lea(edx, FieldOperand(edi, eax, times_half_pointer_size, - FixedArray::kHeaderSize - argc * kPointerSize)); - __ mov(Operand(edx, 0), ecx); - - __ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - - __ ret((argc + 1) * kPointerSize); - - __ bind(&attempt_to_grow_elements); - if (!FLAG_inline_new) { - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ mov(ebx, Operand(esp, argc * kPointerSize)); - // Growing elements that are SMI-only requires special handling in case the - // new element is non-Smi. For now, delegate to the builtin. - if (IsFastSmiElementsKind(elements_kind())) { - __ JumpIfNotSmi(ebx, &call_builtin); - } - - // We could be lucky and the elements array could be at the top of new-space. - // In this case we can just grow it in place by moving the allocation pointer - // up. - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate); - ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate); - - const int kAllocationDelta = 4; - ASSERT(kAllocationDelta >= argc); - // Load top. - __ mov(ecx, Operand::StaticVariable(new_space_allocation_top)); - - // Check if it's the end of elements. - __ lea(edx, FieldOperand(edi, eax, times_half_pointer_size, - FixedArray::kHeaderSize - argc * kPointerSize)); - __ cmp(edx, ecx); - __ j(not_equal, &call_builtin); - __ add(ecx, Immediate(kAllocationDelta * kPointerSize)); - __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit)); - __ j(above, &call_builtin); - - // We fit and could grow elements. - __ mov(Operand::StaticVariable(new_space_allocation_top), ecx); - - // Push the argument... - __ mov(Operand(edx, 0), ebx); - // ... and fill the rest with holes. - for (int i = 1; i < kAllocationDelta; i++) { - __ mov(Operand(edx, i * kPointerSize), - isolate->factory()->the_hole_value()); - } - - if (IsFastObjectElementsKind(elements_kind())) { - // We know the elements array is in new space so we don't need the - // remembered set, but we just pushed a value onto it so we may have to tell - // the incremental marker to rescan the object that we just grew. We don't - // need to worry about the holes because they are in old space and already - // marked black. - __ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET); - } - - // Restore receiver to edx as finish sequence assumes it's here. - __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); - - // Increment element's and array's sizes. - __ add(FieldOperand(edi, FixedArray::kLengthOffset), - Immediate(Smi::FromInt(kAllocationDelta))); - - // NOTE: This only happen in new-space, where we don't care about the - // black-byte-count on pages. Otherwise we should update that too if the - // object is black. - - __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); - __ ret((argc + 1) * kPointerSize); - - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } @@ -3942,31 +3609,30 @@ // -- eax : right // -- esp[0] : return address // ----------------------------------- - Isolate* isolate = masm->isolate(); // Load ecx with the allocation site. We stick an undefined dummy value here // and replace it with the real allocation site later when we instantiate this // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). - __ mov(ecx, handle(isolate->heap()->undefined_value())); + __ mov(ecx, handle(isolate()->heap()->undefined_value())); // Make sure that we actually patched the allocation site. if (FLAG_debug_code) { __ test(ecx, Immediate(kSmiTagMask)); __ Assert(not_equal, kExpectedAllocationSite); __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), - isolate->factory()->allocation_site_map()); + isolate()->factory()->allocation_site_map()); __ Assert(equal, kExpectedAllocationSite); } // Tail call into the stub that handles binary operations with allocation // sites. - BinaryOpWithAllocationSiteStub stub(state_); + BinaryOpWithAllocationSiteStub stub(isolate(), state_); __ TailCallStub(&stub); } void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMI); + DCHECK(state_ == CompareIC::SMI); Label miss; __ mov(ecx, edx); __ or_(ecx, eax); @@ -3992,7 +3658,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::NUMBER); + DCHECK(state_ == CompareIC::NUMBER); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; @@ -4005,74 +3671,56 @@ __ JumpIfNotSmi(eax, &miss); } - // Inlining the double comparison and falling back to the general compare - // stub if NaN is involved or SSE2 or CMOV is unsupported. - if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) { - CpuFeatureScope scope1(masm, SSE2); - CpuFeatureScope scope2(masm, CMOV); - - // Load left and right operand. - Label done, left, left_smi, right_smi; - __ JumpIfSmi(eax, &right_smi, Label::kNear); - __ cmp(FieldOperand(eax, HeapObject::kMapOffset), - masm->isolate()->factory()->heap_number_map()); - __ j(not_equal, &maybe_undefined1, Label::kNear); - __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); - __ jmp(&left, Label::kNear); - __ bind(&right_smi); - __ mov(ecx, eax); // Can't clobber eax because we can still jump away. - __ SmiUntag(ecx); - __ Cvtsi2sd(xmm1, ecx); - - __ bind(&left); - __ JumpIfSmi(edx, &left_smi, Label::kNear); - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), - masm->isolate()->factory()->heap_number_map()); - __ j(not_equal, &maybe_undefined2, Label::kNear); - __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); - __ jmp(&done); - __ bind(&left_smi); - __ mov(ecx, edx); // Can't clobber edx because we can still jump away. - __ SmiUntag(ecx); - __ Cvtsi2sd(xmm0, ecx); + // Load left and right operand. + Label done, left, left_smi, right_smi; + __ JumpIfSmi(eax, &right_smi, Label::kNear); + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + isolate()->factory()->heap_number_map()); + __ j(not_equal, &maybe_undefined1, Label::kNear); + __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ jmp(&left, Label::kNear); + __ bind(&right_smi); + __ mov(ecx, eax); // Can't clobber eax because we can still jump away. + __ SmiUntag(ecx); + __ Cvtsi2sd(xmm1, ecx); - __ bind(&done); - // Compare operands. - __ ucomisd(xmm0, xmm1); + __ bind(&left); + __ JumpIfSmi(edx, &left_smi, Label::kNear); + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), + isolate()->factory()->heap_number_map()); + __ j(not_equal, &maybe_undefined2, Label::kNear); + __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); + __ jmp(&done); + __ bind(&left_smi); + __ mov(ecx, edx); // Can't clobber edx because we can still jump away. + __ SmiUntag(ecx); + __ Cvtsi2sd(xmm0, ecx); - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered, Label::kNear); + __ bind(&done); + // Compare operands. + __ ucomisd(xmm0, xmm1); - // Return a result of -1, 0, or 1, based on EFLAGS. - // Performing mov, because xor would destroy the flag register. - __ mov(eax, 0); // equal - __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, ecx); - __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, ecx); - __ ret(0); - } else { - __ mov(ecx, edx); - __ and_(ecx, eax); - __ JumpIfSmi(ecx, &generic_stub, Label::kNear); + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, Label::kNear); - __ cmp(FieldOperand(eax, HeapObject::kMapOffset), - masm->isolate()->factory()->heap_number_map()); - __ j(not_equal, &maybe_undefined1, Label::kNear); - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), - masm->isolate()->factory()->heap_number_map()); - __ j(not_equal, &maybe_undefined2, Label::kNear); - } + // Return a result of -1, 0, or 1, based on EFLAGS. + // Performing mov, because xor would destroy the flag register. + __ mov(eax, 0); // equal + __ mov(ecx, Immediate(Smi::FromInt(1))); + __ cmov(above, eax, ecx); + __ mov(ecx, Immediate(Smi::FromInt(-1))); + __ cmov(below, eax, ecx); + __ ret(0); __ bind(&unordered); __ bind(&generic_stub); - ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC, CompareIC::GENERIC); - __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { - __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value())); + __ cmp(eax, Immediate(isolate()->factory()->undefined_value())); __ j(not_equal, &miss); __ JumpIfSmi(edx, &unordered); __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx); @@ -4082,7 +3730,7 @@ __ bind(&maybe_undefined2); if (Token::IsOrderedRelationalCompareOp(op_)) { - __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value())); + __ cmp(edx, Immediate(isolate()->factory()->undefined_value())); __ j(equal, &unordered); } @@ -4092,8 +3740,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::INTERNALIZED_STRING); - ASSERT(GetCondition() == equal); + DCHECK(state_ == CompareIC::INTERNALIZED_STRING); + DCHECK(GetCondition() == equal); // Registers containing left and right operands respectively. Register left = edx; @@ -4123,7 +3771,7 @@ __ cmp(left, right); // Make sure eax is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(eax)); + DCHECK(right.is(eax)); __ j(not_equal, &done, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); @@ -4137,8 +3785,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::UNIQUE_NAME); - ASSERT(GetCondition() == equal); + DCHECK(state_ == CompareIC::UNIQUE_NAME); + DCHECK(GetCondition() == equal); // Registers containing left and right operands respectively. Register left = edx; @@ -4168,7 +3816,7 @@ __ cmp(left, right); // Make sure eax is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(eax)); + DCHECK(right.is(eax)); __ j(not_equal, &done, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); @@ -4182,7 +3830,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRING); + DCHECK(state_ == CompareIC::STRING); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -4236,7 +3884,7 @@ __ j(not_zero, &do_compare, Label::kNear); // Make sure eax is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(eax)); + DCHECK(right.is(eax)); __ ret(0); __ bind(&do_compare); } @@ -4263,7 +3911,7 @@ if (equality) { __ TailCallRuntime(Runtime::kStringEquals, 2, 1); } else { - __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } __ bind(&miss); @@ -4272,7 +3920,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECT); + DCHECK(state_ == CompareIC::OBJECT); Label miss; __ mov(ecx, edx); __ and_(ecx, eax); @@ -4283,7 +3931,7 @@ __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx); __ j(not_equal, &miss, Label::kNear); - ASSERT(GetCondition() == equal); + DCHECK(GetCondition() == equal); __ sub(eax, edx); __ ret(0); @@ -4317,7 +3965,7 @@ { // Call the runtime system in a fresh internal frame. ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), - masm->isolate()); + isolate()); FrameScope scope(masm, StackFrame::INTERNAL); __ push(edx); // Preserve edx and eax. __ push(eax); @@ -4347,7 +3995,7 @@ Register properties, Handle<Name> name, Register r0) { - ASSERT(name->IsUniqueName()); + DCHECK(name->IsUniqueName()); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the @@ -4365,11 +4013,11 @@ NameDictionary::GetProbeOffset(i)))); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ lea(index, Operand(index, index, times_2, 0)); // index *= 3. Register entity_name = r0; // Having undefined at this place means the name is not contained. - ASSERT_EQ(kSmiTagSize, 1); + DCHECK_EQ(kSmiTagSize, 1); __ mov(entity_name, Operand(properties, index, times_half_pointer_size, kElementsStartOffset - kHeapObjectTag)); __ cmp(entity_name, masm->isolate()->factory()->undefined_value()); @@ -4391,7 +4039,8 @@ __ bind(&good); } - NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0, + NEGATIVE_LOOKUP); __ push(Immediate(Handle<Object>(name))); __ push(Immediate(name->Hash())); __ CallStub(&stub); @@ -4412,10 +4061,10 @@ Register name, Register r0, Register r1) { - ASSERT(!elements.is(r0)); - ASSERT(!elements.is(r1)); - ASSERT(!name.is(r0)); - ASSERT(!name.is(r1)); + DCHECK(!elements.is(r0)); + DCHECK(!elements.is(r1)); + DCHECK(!name.is(r0)); + DCHECK(!name.is(r1)); __ AssertName(name); @@ -4436,7 +4085,7 @@ __ and_(r0, r1); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3 // Check if the key is identical to the name. @@ -4447,7 +4096,8 @@ __ j(equal, done); } - NameDictionaryLookupStub stub(elements, r1, r0, POSITIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0, + POSITIVE_LOOKUP); __ push(name); __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); __ shr(r0, Name::kHashShift); @@ -4498,16 +4148,16 @@ __ and_(scratch, Operand(esp, 0)); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3. // Having undefined at this place means the name is not contained. - ASSERT_EQ(kSmiTagSize, 1); + DCHECK_EQ(kSmiTagSize, 1); __ mov(scratch, Operand(dictionary_, index_, times_pointer_size, kElementsStartOffset - kHeapObjectTag)); - __ cmp(scratch, masm->isolate()->factory()->undefined_value()); + __ cmp(scratch, isolate()->factory()->undefined_value()); __ j(equal, ¬_in_dictionary); // Stop if found the property. @@ -4550,17 +4200,10 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { - StoreBufferOverflowStub stub(kDontSaveFPRegs); - stub.GetCode(isolate); - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - StoreBufferOverflowStub stub2(kSaveFPRegs); - stub2.GetCode(isolate); - } -} - - -bool CodeStub::CanUseFPRegisters() { - return CpuFeatures::IsSupported(SSE2); + StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs); + stub.GetCode(); + StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); + stub2.GetCode(); } @@ -4653,12 +4296,11 @@ __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. __ mov(Operand(esp, 2 * kPointerSize), - Immediate(ExternalReference::isolate_address(masm->isolate()))); + Immediate(ExternalReference::isolate_address(isolate()))); AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction( - ExternalReference::incremental_marking_record_write_function( - masm->isolate()), + ExternalReference::incremental_marking_record_write_function(isolate()), argument_count); regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); @@ -4837,16 +4479,15 @@ ecx, edi, xmm0, - &slow_elements_from_double, - false); + &slow_elements_from_double); __ pop(edx); __ ret(0); } void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { - CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); - __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + CEntryStub ces(isolate(), 1, kSaveFPRegs); + __ call(ces.GetCode(), RelocInfo::CODE_TARGET); int parameter_count_offset = StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; __ mov(ebx, MemOperand(ebp, parameter_count_offset)); @@ -4862,7 +4503,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { - ProfileEntryHookStub stub; + ProfileEntryHookStub stub(masm->isolate()); masm->CallStub(&stub); } } @@ -4886,8 +4527,8 @@ __ push(eax); // Call the entry hook. - ASSERT(masm->isolate()->function_entry_hook() != NULL); - __ call(FUNCTION_ADDR(masm->isolate()->function_entry_hook()), + DCHECK(isolate()->function_entry_hook() != NULL); + __ call(FUNCTION_ADDR(isolate()->function_entry_hook()), RelocInfo::RUNTIME_ENTRY); __ add(esp, Immediate(2 * kPointerSize)); @@ -4904,7 +4545,8 @@ static void CreateArrayDispatch(MacroAssembler* masm, AllocationSiteOverrideMode mode) { if (mode == DISABLE_ALLOCATION_SITES) { - T stub(GetInitialFastElementsKind(), + T stub(masm->isolate(), + GetInitialFastElementsKind(), mode); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { @@ -4915,7 +4557,7 @@ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmp(edx, kind); __ j(not_equal, &next); - T stub(kind); + T stub(masm->isolate(), kind); __ TailCallStub(&stub); __ bind(&next); } @@ -4938,12 +4580,12 @@ // esp[4] - last argument Label normal_sequence; if (mode == DONT_OVERRIDE) { - ASSERT(FAST_SMI_ELEMENTS == 0); - ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); - ASSERT(FAST_ELEMENTS == 2); - ASSERT(FAST_HOLEY_ELEMENTS == 3); - ASSERT(FAST_DOUBLE_ELEMENTS == 4); - ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + DCHECK(FAST_SMI_ELEMENTS == 0); + DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1); + DCHECK(FAST_ELEMENTS == 2); + DCHECK(FAST_HOLEY_ELEMENTS == 3); + DCHECK(FAST_DOUBLE_ELEMENTS == 4); + DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5); // is the low bit set? If so, we are holey and that is good. __ test_b(edx, 1); @@ -4959,12 +4601,14 @@ ElementsKind initial = GetInitialFastElementsKind(); ElementsKind holey_initial = GetHoleyElementsKind(initial); - ArraySingleArgumentConstructorStub stub_holey(holey_initial, + ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), + holey_initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub_holey); __ bind(&normal_sequence); - ArraySingleArgumentConstructorStub stub(initial, + ArraySingleArgumentConstructorStub stub(masm->isolate(), + initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { @@ -4994,7 +4638,7 @@ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmp(edx, kind); __ j(not_equal, &next); - ArraySingleArgumentConstructorStub stub(kind); + ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); __ TailCallStub(&stub); __ bind(&next); } @@ -5013,11 +4657,11 @@ TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= to_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); - T stub(kind); - stub.GetCode(isolate); + T stub(isolate, kind); + stub.GetCode(); if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { - T stub1(kind, DISABLE_ALLOCATION_SITES); - stub1.GetCode(isolate); + T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); + stub1.GetCode(); } } } @@ -5038,12 +4682,12 @@ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; for (int i = 0; i < 2; i++) { // For internal arrays we only need a few things - InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); - stubh1.GetCode(isolate); - InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); - stubh2.GetCode(isolate); - InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); - stubh3.GetCode(isolate); + InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); + stubh1.GetCode(); + InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); + stubh2.GetCode(); + InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); + stubh3.GetCode(); } } @@ -5103,7 +4747,7 @@ Label no_info; // If the feedback vector is the undefined value call an array constructor // that doesn't use AllocationSites. - __ cmp(ebx, masm->isolate()->factory()->undefined_value()); + __ cmp(ebx, isolate()->factory()->undefined_value()); __ j(equal, &no_info); // Only look at the lower 16 bits of the transition info. @@ -5125,7 +4769,7 @@ __ test(eax, eax); __ j(not_zero, ¬_zero_case); - InternalArrayNoArgumentConstructorStub stub0(kind); + InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); __ TailCallStub(&stub0); __ bind(¬_zero_case); @@ -5140,16 +4784,16 @@ __ j(zero, &normal_sequence); InternalArraySingleArgumentConstructorStub - stub1_holey(GetHoleyElementsKind(kind)); + stub1_holey(isolate(), GetHoleyElementsKind(kind)); __ TailCallStub(&stub1_holey); } __ bind(&normal_sequence); - InternalArraySingleArgumentConstructorStub stub1(kind); + InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); __ TailCallStub(&stub1); __ bind(¬_one_case); - InternalArrayNArgumentsConstructorStub stubN(kind); + InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); __ TailCallStub(&stubN); } @@ -5182,8 +4826,7 @@ // but the following masking takes care of that anyway. __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset)); // Retrieve elements_kind from bit field 2. - __ and_(ecx, Map::kElementsKindMask); - __ shr(ecx, Map::kElementsKindShift); + __ DecodeField<Map::ElementsKindBits>(ecx); if (FLAG_debug_code) { Label done; @@ -5242,8 +4885,6 @@ STATIC_ASSERT(FCA::kHolderIndex == 0); STATIC_ASSERT(FCA::kArgsLength == 7); - Isolate* isolate = masm->isolate(); - __ pop(return_address); // context save @@ -5260,9 +4901,9 @@ Register scratch = call_data; if (!call_data_undefined) { // return value - __ push(Immediate(isolate->factory()->undefined_value())); + __ push(Immediate(isolate()->factory()->undefined_value())); // return value default - __ push(Immediate(isolate->factory()->undefined_value())); + __ push(Immediate(isolate()->factory()->undefined_value())); } else { // return value __ push(scratch); @@ -5270,7 +4911,7 @@ __ push(scratch); } // isolate - __ push(Immediate(reinterpret_cast<int>(isolate))); + __ push(Immediate(reinterpret_cast<int>(isolate()))); // holder __ push(holder); @@ -5305,7 +4946,8 @@ __ lea(scratch, ApiParameterOperand(2)); __ mov(ApiParameterOperand(0), scratch); - Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(isolate()); Operand context_restore_operand(ebp, (2 + FCA::kContextSaveIndex) * kPointerSize); @@ -5318,7 +4960,7 @@ } Operand return_value_operand(ebp, return_value_offset * kPointerSize); __ CallApiFunctionAndReturn(api_function_address, - thunk_address, + thunk_ref, ApiParameterOperand(1), argc + FCA::kArgsLength + 1, return_value_operand, @@ -5353,10 +4995,11 @@ __ add(scratch, Immediate(kPointerSize)); __ mov(ApiParameterOperand(1), scratch); // arguments pointer. - Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); + ExternalReference thunk_ref = + ExternalReference::invoke_accessor_getter_callback(isolate()); __ CallApiFunctionAndReturn(api_function_address, - thunk_address, + thunk_ref, ApiParameterOperand(2), kStackSpace, Operand(ebp, 7 * kPointerSize), diff -Nru nodejs-0.11.13/deps/v8/src/ia32/code-stubs-ia32.h nodejs-0.11.15/deps/v8/src/ia32/code-stubs-ia32.h --- nodejs-0.11.13/deps/v8/src/ia32/code-stubs-ia32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/code-stubs-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IA32_CODE_STUBS_IA32_H_ #define V8_IA32_CODE_STUBS_IA32_H_ -#include "macro-assembler.h" -#include "ic-inl.h" +#include "src/ic-inl.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { @@ -42,10 +19,8 @@ class StoreBufferOverflowStub: public PlatformCodeStub { public: - explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) { - ASSERT(CpuFeatures::IsSafeForSnapshot(SSE2) || save_fp == kDontSaveFPRegs); - } + StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp) + : PlatformCodeStub(isolate), save_doubles_(save_fp) { } void Generate(MacroAssembler* masm); @@ -55,8 +30,8 @@ private: SaveFPRegsMode save_doubles_; - Major MajorKey() { return StoreBufferOverflow; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } + Major MajorKey() const { return StoreBufferOverflow; } + int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } }; @@ -65,12 +40,12 @@ // Generate code for copying characters using the rep movs instruction. // Copies ecx characters from esi to edi. Copying of overlapping regions is // not supported. - static void GenerateCopyCharactersREP(MacroAssembler* masm, - Register dest, // Must be edi. - Register src, // Must be esi. - Register count, // Must be ecx. - Register scratch, // Neither of above. - bool ascii); + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + String::Encoding encoding); // Generate string hash. static void GenerateHashInit(MacroAssembler* masm, @@ -92,11 +67,11 @@ class SubStringStub: public PlatformCodeStub { public: - SubStringStub() {} + explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {} private: - Major MajorKey() { return SubString; } - int MinorKey() { return 0; } + Major MajorKey() const { return SubString; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); }; @@ -104,7 +79,7 @@ class StringCompareStub: public PlatformCodeStub { public: - StringCompareStub() { } + explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { } // Compares two flat ASCII strings and returns result in eax. static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, @@ -123,8 +98,8 @@ Register scratch2); private: - virtual Major MajorKey() { return StringCompare; } - virtual int MinorKey() { return 0; } + virtual Major MajorKey() const { return StringCompare; } + virtual int MinorKey() const { return 0; } virtual void Generate(MacroAssembler* masm); static void GenerateAsciiCharsCompareLoop( @@ -142,11 +117,13 @@ public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - NameDictionaryLookupStub(Register dictionary, + NameDictionaryLookupStub(Isolate* isolate, + Register dictionary, Register result, Register index, LookupMode mode) - : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { } + : PlatformCodeStub(isolate), + dictionary_(dictionary), result_(result), index_(index), mode_(mode) { } void Generate(MacroAssembler* masm); @@ -179,9 +156,9 @@ NameDictionary::kHeaderSize + NameDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return NameDictionaryLookup; } + Major MajorKey() const { return NameDictionaryLookup; } - int MinorKey() { + int MinorKey() const { return DictionaryBits::encode(dictionary_.code()) | ResultBits::encode(result_.code()) | IndexBits::encode(index_.code()) | @@ -202,12 +179,14 @@ class RecordWriteStub: public PlatformCodeStub { public: - RecordWriteStub(Register object, + RecordWriteStub(Isolate* isolate, + Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) - : object_(object), + : PlatformCodeStub(isolate), + object_(object), value_(value), address_(address), remembered_set_action_(remembered_set_action), @@ -215,7 +194,6 @@ regs_(object, // An input reg. address, // An input reg. value) { // One scratch reg. - ASSERT(CpuFeatures::IsSafeForSnapshot(SSE2) || fp_mode == kDontSaveFPRegs); } enum Mode { @@ -240,13 +218,13 @@ return INCREMENTAL; } - ASSERT(first_instruction == kTwoByteNopInstruction); + DCHECK(first_instruction == kTwoByteNopInstruction); if (second_instruction == kFiveByteJumpInstruction) { return INCREMENTAL_COMPACTION; } - ASSERT(second_instruction == kFiveByteNopInstruction); + DCHECK(second_instruction == kFiveByteNopInstruction); return STORE_BUFFER_ONLY; } @@ -254,23 +232,23 @@ static void Patch(Code* stub, Mode mode) { switch (mode) { case STORE_BUFFER_ONLY: - ASSERT(GetMode(stub) == INCREMENTAL || + DCHECK(GetMode(stub) == INCREMENTAL || GetMode(stub) == INCREMENTAL_COMPACTION); stub->instruction_start()[0] = kTwoByteNopInstruction; stub->instruction_start()[2] = kFiveByteNopInstruction; break; case INCREMENTAL: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); stub->instruction_start()[0] = kTwoByteJumpInstruction; break; case INCREMENTAL_COMPACTION: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); stub->instruction_start()[0] = kTwoByteNopInstruction; stub->instruction_start()[2] = kFiveByteJumpInstruction; break; } - ASSERT(GetMode(stub) == mode); - CPU::FlushICache(stub->instruction_start(), 7); + DCHECK(GetMode(stub) == mode); + CpuFeatures::FlushICache(stub->instruction_start(), 7); } private: @@ -288,7 +266,7 @@ object_(object), address_(address), scratch0_(scratch0) { - ASSERT(!AreAliased(scratch0, object, address, no_reg)); + DCHECK(!AreAliased(scratch0, object, address, no_reg)); scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_); if (scratch0.is(ecx)) { scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_); @@ -299,15 +277,15 @@ if (address.is(ecx)) { address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_); } - ASSERT(!AreAliased(scratch0_, object_, address_, ecx)); + DCHECK(!AreAliased(scratch0_, object_, address_, ecx)); } void Save(MacroAssembler* masm) { - ASSERT(!address_orig_.is(object_)); - ASSERT(object_.is(object_orig_) || address_.is(address_orig_)); - ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); - ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_)); - ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_)); + DCHECK(!address_orig_.is(object_)); + DCHECK(object_.is(object_orig_) || address_.is(address_orig_)); + DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_)); + DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_)); + DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_)); // We don't have to save scratch0_orig_ because it was given to us as // a scratch register. But if we had to switch to a different reg then // we should save the new scratch0_. @@ -357,11 +335,10 @@ if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax); if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx); if (mode == kSaveFPRegs) { - CpuFeatureScope scope(masm, SSE2); masm->sub(esp, - Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); + Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1))); // Save all XMM registers except XMM0. - for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { + for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) { XMMRegister reg = XMMRegister::from_code(i); masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg); } @@ -371,14 +348,13 @@ inline void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) { if (mode == kSaveFPRegs) { - CpuFeatureScope scope(masm, SSE2); // Restore all XMM registers except XMM0. - for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { + for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) { XMMRegister reg = XMMRegister::from_code(i); masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize)); } masm->add(esp, - Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); + Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1))); } if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx); if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax); @@ -429,9 +405,9 @@ Mode mode); void InformIncrementalMarker(MacroAssembler* masm); - Major MajorKey() { return RecordWrite; } + Major MajorKey() const { return RecordWrite; } - int MinorKey() { + int MinorKey() const { return ObjectBits::encode(object_.code()) | ValueBits::encode(value_.code()) | AddressBits::encode(address_.code()) | diff -Nru nodejs-0.11.13/deps/v8/src/ia32/cpu-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/cpu-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/cpu-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/cpu-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,57 +1,24 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // CPU specific code for ia32 independent of OS goes here. #ifdef __GNUC__ -#include "third_party/valgrind/valgrind.h" +#include "src/third_party/valgrind/valgrind.h" #endif -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "cpu.h" -#include "macro-assembler.h" +#include "src/assembler.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { -void CPU::SetUp() { - CpuFeatures::Probe(); -} - - -bool CPU::SupportsCrankshaft() { - return CpuFeatures::IsSupported(SSE2); -} - - -void CPU::FlushICache(void* start, size_t size) { +void CpuFeatures::FlushICache(void* start, size_t size) { // No need to flush the instruction cache on Intel. On Intel instruction // cache flushing is only necessary when multiple cores running the same // code simultaneously. V8 (and JavaScript) is single threaded and when code diff -Nru nodejs-0.11.13/deps/v8/src/ia32/debug-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/debug-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/debug-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/debug-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "codegen.h" -#include "debug.h" +#include "src/codegen.h" +#include "src/debug.h" namespace v8 { namespace internal { -#ifdef ENABLE_DEBUGGER_SUPPORT - bool BreakLocationIterator::IsDebugBreakAtReturn() { return Debug::IsDebugBreakAtReturn(rinfo()); } @@ -47,10 +22,10 @@ // CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc // for the precise return instructions sequence. void BreakLocationIterator::SetDebugBreakAtReturn() { - ASSERT(Assembler::kJSReturnSequenceLength >= + DCHECK(Assembler::kJSReturnSequenceLength >= Assembler::kCallInstructionLength); rinfo()->PatchCodeWithCall( - debug_info_->GetIsolate()->debug()->debug_break_return()->entry(), + debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(), Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength); } @@ -65,37 +40,33 @@ // A debug break in the frame exit code is identified by the JS frame exit code // having been patched with a call instruction. bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsJSReturn(rinfo->rmode())); + DCHECK(RelocInfo::IsJSReturn(rinfo->rmode())); return rinfo->IsPatchedReturnSequence(); } bool BreakLocationIterator::IsDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); // Check whether the debug break slot instructions have been patched. return rinfo()->IsPatchedDebugBreakSlotSequence(); } void BreakLocationIterator::SetDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); Isolate* isolate = debug_info_->GetIsolate(); rinfo()->PatchCodeWithCall( - isolate->debug()->debug_break_slot()->entry(), + isolate->builtins()->Slot_DebugBreak()->entry(), Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength); } void BreakLocationIterator::ClearDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength); } -// All debug break stubs support padding for LiveEdit. -const bool Debug::FramePaddingLayout::kIsSupported = true; - - #define __ ACCESS_MASM(masm) static void Generate_DebugBreakCallHelper(MacroAssembler* masm, @@ -107,18 +78,17 @@ FrameScope scope(masm, StackFrame::INTERNAL); // Load padding words on stack. - for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) { - __ push(Immediate(Smi::FromInt( - Debug::FramePaddingLayout::kPaddingValue))); + for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) { + __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue))); } - __ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize))); + __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize))); // Store the registers containing live values on the expression stack to // make sure that these are correctly updated during GC. Non object values // are stored as a smi causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); + DCHECK((object_regs & ~kJSCallerSaved) == 0); + DCHECK((non_object_regs & ~kJSCallerSaved) == 0); + DCHECK((object_regs & non_object_regs) == 0); for (int i = 0; i < kNumJSCallerSaved; i++) { int r = JSCallerSavedCode(i); Register reg = { r }; @@ -141,7 +111,7 @@ __ Move(eax, Immediate(0)); // No arguments. __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate()))); - CEntryStub ceb(1); + CEntryStub ceb(masm->isolate(), 1); __ CallStub(&ceb); // Automatically find register that could be used after register restore. @@ -171,7 +141,7 @@ } } - ASSERT(unused_reg.code() != -1); + DCHECK(unused_reg.code() != -1); // Read current padding counter and skip corresponding number of words. __ pop(unused_reg); @@ -192,56 +162,57 @@ // jumping to the target address intended by the caller and that was // overwritten by the address of DebugBreakXXX. ExternalReference after_break_target = - ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate()); + ExternalReference::debug_after_break_target_address(masm->isolate()); __ jmp(Operand::StaticVariable(after_break_target)); } -void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { - // Register state for IC load call (from ic-ia32.cc). +void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) { + // Register state for CallICStub // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver + // -- edx : type feedback slot (smi) + // -- edi : function // ----------------------------------- - Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false); + Generate_DebugBreakCallHelper(masm, edx.bit() | edi.bit(), + 0, false); +} + + +void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) { + // Register state for IC load call (from ic-ia32.cc). + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0, false); } -void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) { // Register state for IC store call (from ic-ia32.cc). - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // ----------------------------------- + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + Register value = StoreIC::ValueRegister(); Generate_DebugBreakCallHelper( - masm, eax.bit() | ecx.bit() | edx.bit(), 0, false); + masm, receiver.bit() | name.bit() | value.bit(), 0, false); } -void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { // Register state for keyed IC load call (from ic-ia32.cc). - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false); + GenerateLoadICDebugBreak(masm); } -void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { - // Register state for keyed IC load call (from ic-ia32.cc). - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // ----------------------------------- +void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { + // Register state for keyed IC store call (from ic-ia32.cc). + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register name = KeyedStoreIC::NameRegister(); + Register value = KeyedStoreIC::ValueRegister(); Generate_DebugBreakCallHelper( - masm, eax.bit() | ecx.bit() | edx.bit(), 0, false); + masm, receiver.bit() | name.bit() | value.bit(), 0, false); } -void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { // Register state for CompareNil IC // ----------- S t a t e ------------- // -- eax : value @@ -250,16 +221,7 @@ } -void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { - // Register state for keyed IC call call (from ic-ia32.cc) - // ----------- S t a t e ------------- - // -- ecx: name - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, ecx.bit(), 0, false); -} - - -void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) { // Register state just before return from JS function (from codegen-ia32.cc). // ----------- S t a t e ------------- // -- eax: return value @@ -268,7 +230,7 @@ } -void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { // Register state for CallFunctionStub (from code-stubs-ia32.cc). // ----------- S t a t e ------------- // -- edi: function @@ -277,19 +239,7 @@ } -void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) { - // Register state for CallFunctionStub (from code-stubs-ia32.cc). - // ----------- S t a t e ------------- - // -- ebx: feedback array - // -- edx: slot in feedback array - // -- edi: function - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(), - 0, false); -} - - -void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { // Register state for CallConstructStub (from code-stubs-ia32.cc). // eax is the actual number of arguments not encoded as a smi see comment // above IC call. @@ -302,7 +252,8 @@ } -void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallConstructStubRecordDebugBreak( + MacroAssembler* masm) { // Register state for CallConstructStub (from code-stubs-ia32.cc). // eax is the actual number of arguments not encoded as a smi see comment // above IC call. @@ -318,33 +269,33 @@ } -void Debug::GenerateSlot(MacroAssembler* masm) { +void DebugCodegen::GenerateSlot(MacroAssembler* masm) { // Generate enough nop's to make space for a call instruction. Label check_codesize; __ bind(&check_codesize); __ RecordDebugBreakSlot(); __ Nop(Assembler::kDebugBreakSlotLength); - ASSERT_EQ(Assembler::kDebugBreakSlotLength, + DCHECK_EQ(Assembler::kDebugBreakSlotLength, masm->SizeOfCodeGeneratedSince(&check_codesize)); } -void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) { // In the places where a debug break slot is inserted no registers can contain // object pointers. Generate_DebugBreakCallHelper(masm, 0, 0, true); } -void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { +void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { masm->ret(0); } -void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { +void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { ExternalReference restarter_frame_function_slot = - ExternalReference(Debug_Address::RestarterFrameFunctionPointer(), - masm->isolate()); + ExternalReference::debug_restarter_frame_function_pointer_address( + masm->isolate()); __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0)); // We do not know our frame height, but set esp based on ebp. @@ -365,11 +316,10 @@ __ jmp(edx); } -const bool Debug::kFrameDropperSupported = true; -#undef __ +const bool LiveEdit::kFrameDropperSupported = true; -#endif // ENABLE_DEBUGGER_SUPPORT +#undef __ } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/ia32/deoptimizer-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/deoptimizer-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/deoptimizer-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/deoptimizer-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "codegen.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "safepoint-table.h" +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/safepoint-table.h" namespace v8 { namespace internal { @@ -58,7 +35,7 @@ for (int i = 0; i < deopt_data->DeoptCount(); i++) { int pc_offset = deopt_data->Pc(i)->value(); if (pc_offset == -1) continue; - ASSERT_GE(pc_offset, prev_pc_offset); + DCHECK_GE(pc_offset, prev_pc_offset); int pc_delta = pc_offset - prev_pc_offset; // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes // if encodable with small pc delta encoding and up to 6 bytes @@ -90,9 +67,8 @@ Factory* factory = isolate->factory(); Handle<ByteArray> new_reloc = factory->NewByteArray(reloc_length + padding, TENURED); - OS::MemCopy(new_reloc->GetDataStartAddress() + padding, - code->relocation_info()->GetDataStartAddress(), - reloc_length); + MemCopy(new_reloc->GetDataStartAddress() + padding, + code->relocation_info()->GetDataStartAddress(), reloc_length); // Create a relocation writer to write the comments in the padding // space. Use position 0 for everything to ensure short encoding. RelocInfoWriter reloc_info_writer( @@ -105,7 +81,7 @@ byte* pos_before = reloc_info_writer.pos(); #endif reloc_info_writer.Write(&rinfo); - ASSERT(RelocInfo::kMinRelocCommentSize == + DCHECK(RelocInfo::kMinRelocCommentSize == pos_before - reloc_info_writer.pos()); } // Replace relocation information on the code object. @@ -152,9 +128,6 @@ // Emit call to lazy deoptimization at all lazy deopt points. DeoptimizationInputData* deopt_data = DeoptimizationInputData::cast(code->deoptimization_data()); - SharedFunctionInfo* shared = - SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()); - shared->EvictFromOptimizedCodeMap(code, "deoptimized code"); #ifdef DEBUG Address prev_call_address = NULL; #endif @@ -173,11 +146,11 @@ reinterpret_cast<intptr_t>(deopt_entry), NULL); reloc_info_writer.Write(&rinfo); - ASSERT_GE(reloc_info_writer.pos(), + DCHECK_GE(reloc_info_writer.pos(), reloc_info->address() + ByteArray::kHeaderSize); - ASSERT(prev_call_address == NULL || + DCHECK(prev_call_address == NULL || call_address >= prev_call_address + patch_size()); - ASSERT(call_address + patch_size() <= code->instruction_end()); + DCHECK(call_address + patch_size() <= code->instruction_end()); #ifdef DEBUG prev_call_address = call_address; #endif @@ -185,8 +158,7 @@ // Move the relocation info to the beginning of the byte array. int new_reloc_size = reloc_end_address - reloc_info_writer.pos(); - OS::MemMove( - code->relocation_start(), reloc_info_writer.pos(), new_reloc_size); + MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size); // The relocation info is in place, update the size. reloc_info->set_length(new_reloc_size); @@ -194,7 +166,7 @@ // Handle the junk part after the new relocation info. We will create // a non-live object in the extra space at the end of the former reloc info. Address junk_address = reloc_info->address() + reloc_info->Size(); - ASSERT(junk_address <= reloc_end_address); + DCHECK(junk_address <= reloc_end_address); isolate->heap()->CreateFillerObjectAt(junk_address, reloc_end_address - junk_address); } @@ -210,7 +182,7 @@ } input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp())); input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp())); - for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { + for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; i++) { input_->SetDoubleRegister(i, 0.0); } @@ -224,7 +196,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { intptr_t handler = - reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_); + reinterpret_cast<intptr_t>(descriptor->deoptimization_handler()); int params = descriptor->GetHandlerParameterCount(); output_frame->SetRegister(eax.code(), params); output_frame->SetRegister(ebx.code(), handler); @@ -232,8 +204,7 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { - if (!CpuFeatures::IsSupported(SSE2)) return; - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) { double double_value = input_->GetDoubleRegister(i); output_frame->SetDoubleRegister(i, double_value); } @@ -247,20 +218,13 @@ input_frame_size - parameter_count * kPointerSize - StandardFrameConstants::kFixedFrameSize - kPointerSize; - ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset == + DCHECK(JavaScriptFrameConstants::kDynamicAlignmentStateOffset == JavaScriptFrameConstants::kLocal0Offset); int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset); return (alignment_state == kAlignmentPaddingPushed); } -Code* Deoptimizer::NotifyStubFailureBuiltin() { - Builtins::Name name = CpuFeatures::IsSupported(SSE2) ? - Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure; - return isolate_->builtins()->builtin(name); -} - - #define __ masm()-> void Deoptimizer::EntryGenerator::Generate() { @@ -270,15 +234,12 @@ const int kNumberOfRegisters = Register::kNumRegisters; const int kDoubleRegsSize = kDoubleSize * - XMMRegister::kNumAllocatableRegisters; + XMMRegister::kMaxNumAllocatableRegisters; __ sub(esp, Immediate(kDoubleRegsSize)); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { - XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); - int offset = i * kDoubleSize; - __ movsd(Operand(esp, offset), xmm_reg); - } + for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) { + XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); + int offset = i * kDoubleSize; + __ movsd(Operand(esp, offset), xmm_reg); } __ pushad(); @@ -323,15 +284,12 @@ } int double_regs_offset = FrameDescription::double_registers_offset(); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - // Fill in the double input registers. - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { - int dst_offset = i * kDoubleSize + double_regs_offset; - int src_offset = i * kDoubleSize; - __ movsd(xmm0, Operand(esp, src_offset)); - __ movsd(Operand(ebx, dst_offset), xmm0); - } + // Fill in the double input registers. + for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) { + int dst_offset = i * kDoubleSize + double_regs_offset; + int src_offset = i * kDoubleSize; + __ movsd(xmm0, Operand(esp, src_offset)); + __ movsd(Operand(ebx, dst_offset), xmm0); } // Clear FPU all exceptions. @@ -410,13 +368,10 @@ __ j(below, &outer_push_loop); // In case of a failed STUB, we have to restore the XMM registers. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { - XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); - int src_offset = i * kDoubleSize + double_regs_offset; - __ movsd(xmm_reg, Operand(ebx, src_offset)); - } + for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) { + XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); + int src_offset = i * kDoubleSize + double_regs_offset; + __ movsd(xmm_reg, Operand(ebx, src_offset)); } // Push state, pc, and continuation from the last output frame. @@ -447,7 +402,7 @@ USE(start); __ push_imm32(i); __ jmp(&done); - ASSERT(masm()->pc_offset() - start == table_entry_size_); + DCHECK(masm()->pc_offset() - start == table_entry_size_); } __ bind(&done); } diff -Nru nodejs-0.11.13/deps/v8/src/ia32/disasm-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/disasm-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/disasm-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/disasm-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,16 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <assert.h> -#include <stdio.h> #include <stdarg.h> +#include <stdio.h> -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "disasm.h" +#include "src/disasm.h" namespace disasm { @@ -234,7 +211,7 @@ InstructionDesc* id = &instructions_[bm[i].b]; id->mnem = bm[i].mnem; id->op_order_ = bm[i].op_order_; - ASSERT_EQ(NO_INSTR, id->type); // Information not already entered. + DCHECK_EQ(NO_INSTR, id->type); // Information not already entered. id->type = type; } } @@ -246,7 +223,7 @@ const char* mnem) { for (byte b = start; b <= end; b++) { InstructionDesc* id = &instructions_[b]; - ASSERT_EQ(NO_INSTR, id->type); // Information not already entered. + DCHECK_EQ(NO_INSTR, id->type); // Information not already entered. id->mnem = mnem; id->type = type; } @@ -256,7 +233,7 @@ void InstructionTable::AddJumpConditionalShort() { for (byte b = 0x70; b <= 0x7F; b++) { InstructionDesc* id = &instructions_[b]; - ASSERT_EQ(NO_INSTR, id->type); // Information not already entered. + DCHECK_EQ(NO_INSTR, id->type); // Information not already entered. id->mnem = jump_conditional_mnem[b & 0x0F]; id->type = JUMP_CONDITIONAL_SHORT_INSTR; } @@ -380,7 +357,7 @@ v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_; va_list args; va_start(args, format); - int result = v8::internal::OS::VSNPrintF(buf, format, args); + int result = v8::internal::VSNPrintF(buf, format, args); va_end(args); tmp_buffer_pos_ += result; } @@ -551,84 +528,101 @@ // Returns number of bytes used, including *data. int DisassemblerIA32::F7Instruction(byte* data) { - ASSERT_EQ(0xF7, *data); - byte modrm = *(data+1); + DCHECK_EQ(0xF7, *data); + byte modrm = *++data; int mod, regop, rm; get_modrm(modrm, &mod, ®op, &rm); - if (mod == 3 && regop != 0) { - const char* mnem = NULL; - switch (regop) { - case 2: mnem = "not"; break; - case 3: mnem = "neg"; break; - case 4: mnem = "mul"; break; - case 5: mnem = "imul"; break; - case 7: mnem = "idiv"; break; - default: UnimplementedInstruction(); - } - AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm)); - return 2; - } else if (mod == 3 && regop == eax) { - int32_t imm = *reinterpret_cast<int32_t*>(data+2); - AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm); - return 6; - } else if (regop == eax) { - AppendToBuffer("test "); - int count = PrintRightOperand(data+1); - int32_t imm = *reinterpret_cast<int32_t*>(data+1+count); - AppendToBuffer(",0x%x", imm); - return 1+count+4 /*int32_t*/; - } else { - UnimplementedInstruction(); - return 2; + const char* mnem = NULL; + switch (regop) { + case 0: + mnem = "test"; + break; + case 2: + mnem = "not"; + break; + case 3: + mnem = "neg"; + break; + case 4: + mnem = "mul"; + break; + case 5: + mnem = "imul"; + break; + case 6: + mnem = "div"; + break; + case 7: + mnem = "idiv"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(data); + if (regop == 0) { + AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + count)); + count += 4; } + return 1 + count; } int DisassemblerIA32::D1D3C1Instruction(byte* data) { byte op = *data; - ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1); - byte modrm = *(data+1); + DCHECK(op == 0xD1 || op == 0xD3 || op == 0xC1); + byte modrm = *++data; int mod, regop, rm; get_modrm(modrm, &mod, ®op, &rm); int imm8 = -1; - int num_bytes = 2; - if (mod == 3) { - const char* mnem = NULL; - switch (regop) { - case kROL: mnem = "rol"; break; - case kROR: mnem = "ror"; break; - case kRCL: mnem = "rcl"; break; - case kRCR: mnem = "rcr"; break; - case kSHL: mnem = "shl"; break; - case KSHR: mnem = "shr"; break; - case kSAR: mnem = "sar"; break; - default: UnimplementedInstruction(); - } - if (op == 0xD1) { - imm8 = 1; - } else if (op == 0xC1) { - imm8 = *(data+2); - num_bytes = 3; - } else if (op == 0xD3) { - // Shift/rotate by cl. - } - ASSERT_NE(NULL, mnem); - AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm)); - if (imm8 >= 0) { - AppendToBuffer("%d", imm8); - } else { - AppendToBuffer("cl"); - } + const char* mnem = NULL; + switch (regop) { + case kROL: + mnem = "rol"; + break; + case kROR: + mnem = "ror"; + break; + case kRCL: + mnem = "rcl"; + break; + case kRCR: + mnem = "rcr"; + break; + case kSHL: + mnem = "shl"; + break; + case KSHR: + mnem = "shr"; + break; + case kSAR: + mnem = "sar"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(data); + if (op == 0xD1) { + imm8 = 1; + } else if (op == 0xC1) { + imm8 = *(data + 1); + count++; + } else if (op == 0xD3) { + // Shift/rotate by cl. + } + if (imm8 >= 0) { + AppendToBuffer(",%d", imm8); } else { - UnimplementedInstruction(); + AppendToBuffer(",cl"); } - return num_bytes; + return 1 + count; } // Returns number of bytes used, including *data. int DisassemblerIA32::JumpShort(byte* data) { - ASSERT_EQ(0xEB, *data); + DCHECK_EQ(0xEB, *data); byte b = *(data+1); byte* dest = data + static_cast<int8_t>(b) + 2; AppendToBuffer("jmp %s", NameOfAddress(dest)); @@ -638,7 +632,7 @@ // Returns number of bytes used, including *data. int DisassemblerIA32::JumpConditional(byte* data, const char* comment) { - ASSERT_EQ(0x0F, *data); + DCHECK_EQ(0x0F, *data); byte cond = *(data+1) & 0x0F; byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6; const char* mnem = jump_conditional_mnem[cond]; @@ -666,7 +660,7 @@ // Returns number of bytes used, including *data. int DisassemblerIA32::SetCC(byte* data) { - ASSERT_EQ(0x0F, *data); + DCHECK_EQ(0x0F, *data); byte cond = *(data+1) & 0x0F; const char* mnem = set_conditional_mnem[cond]; AppendToBuffer("%s ", mnem); @@ -677,7 +671,7 @@ // Returns number of bytes used, including *data. int DisassemblerIA32::CMov(byte* data) { - ASSERT_EQ(0x0F, *data); + DCHECK_EQ(0x0F, *data); byte cond = *(data + 1) & 0x0F; const char* mnem = conditional_move_mnem[cond]; int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2); @@ -688,7 +682,7 @@ // Returns number of bytes used, including *data. int DisassemblerIA32::FPUInstruction(byte* data) { byte escape_opcode = *data; - ASSERT_EQ(0xD8, escape_opcode & 0xF8); + DCHECK_EQ(0xD8, escape_opcode & 0xF8); byte modrm_byte = *(data+1); if (modrm_byte >= 0xC0) { @@ -977,17 +971,18 @@ data += 3; break; - case 0x69: // fall through - case 0x6B: - { int mod, regop, rm; - get_modrm(*(data+1), &mod, ®op, &rm); - int32_t imm = - *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2); - AppendToBuffer("imul %s,%s,0x%x", - NameOfCPURegister(regop), - NameOfCPURegister(rm), - imm); - data += 2 + (*data == 0x6B ? 1 : 4); + case 0x6B: { + data++; + data += PrintOperands("imul", REG_OPER_OP_ORDER, data); + AppendToBuffer(",%d", *data); + data++; + } break; + + case 0x69: { + data++; + data += PrintOperands("imul", REG_OPER_OP_ORDER, data); + AppendToBuffer(",%d", *reinterpret_cast<int32_t*>(data)); + data += 4; } break; @@ -1396,7 +1391,7 @@ int mod, regop, rm; get_modrm(*data, &mod, ®op, &rm); int8_t imm8 = static_cast<int8_t>(data[1]); - ASSERT(regop == esi || regop == edx); + DCHECK(regop == esi || regop == edx); AppendToBuffer("%s %s,%d", (regop == esi) ? "psllq" : "psrlq", NameOfXMMRegister(rm), @@ -1663,23 +1658,22 @@ if (instr_len == 0) { printf("%02x", *data); } - ASSERT(instr_len > 0); // Ensure progress. + DCHECK(instr_len > 0); // Ensure progress. int outp = 0; // Instruction bytes. for (byte* bp = instr; bp < data; bp++) { - outp += v8::internal::OS::SNPrintF(out_buffer + outp, - "%02x", - *bp); + outp += v8::internal::SNPrintF(out_buffer + outp, + "%02x", + *bp); } for (int i = 6 - instr_len; i >= 0; i--) { - outp += v8::internal::OS::SNPrintF(out_buffer + outp, - " "); + outp += v8::internal::SNPrintF(out_buffer + outp, " "); } - outp += v8::internal::OS::SNPrintF(out_buffer + outp, - " %s", - tmp_buffer_.start()); + outp += v8::internal::SNPrintF(out_buffer + outp, + " %s", + tmp_buffer_.start()); return instr_len; } // NOLINT (function is too long) @@ -1703,7 +1697,7 @@ const char* NameConverter::NameOfAddress(byte* addr) const { - v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr); + v8::internal::SNPrintF(tmp_buffer_, "%p", addr); return tmp_buffer_.start(); } diff -Nru nodejs-0.11.13/deps/v8/src/ia32/frames-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/frames-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/frames-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/frames-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "assembler.h" -#include "assembler-ia32.h" -#include "assembler-ia32-inl.h" -#include "frames.h" +#include "src/assembler.h" +#include "src/frames.h" +#include "src/ia32/assembler-ia32-inl.h" +#include "src/ia32/assembler-ia32.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/ia32/frames-ia32.h nodejs-0.11.15/deps/v8/src/ia32/frames-ia32.h --- nodejs-0.11.13/deps/v8/src/ia32/frames-ia32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/frames-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IA32_FRAMES_IA32_H_ #define V8_IA32_FRAMES_IA32_H_ @@ -47,8 +24,6 @@ const int kNumJSCallerSaved = 5; -typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved]; - // Number of registers for which space is reserved in safepoints. const int kNumSafepointRegisters = 8; diff -Nru nodejs-0.11.13/deps/v8/src/ia32/full-codegen-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/full-codegen-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/full-codegen-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/full-codegen-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,20 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "code-stubs.h" -#include "codegen.h" -#include "compiler.h" -#include "debug.h" -#include "full-codegen.h" -#include "isolate-inl.h" -#include "parser.h" -#include "scopes.h" -#include "stub-cache.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/full-codegen.h" +#include "src/isolate-inl.h" +#include "src/parser.h" +#include "src/scopes.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -54,7 +31,7 @@ } ~JumpPatchSite() { - ASSERT(patch_site_.is_bound() == info_emitted_); + DCHECK(patch_site_.is_bound() == info_emitted_); } void EmitJumpIfNotSmi(Register reg, @@ -74,7 +51,7 @@ void EmitPatchInfo() { if (patch_site_.is_bound()) { int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_); - ASSERT(is_int8(delta_to_patch_site)); + DCHECK(is_uint8(delta_to_patch_site)); __ test(eax, Immediate(delta_to_patch_site)); #ifdef DEBUG info_emitted_ = true; @@ -87,8 +64,8 @@ private: // jc will be patched with jz, jnc will become jnz. void EmitJump(Condition cc, Label* target, Label::Distance distance) { - ASSERT(!patch_site_.is_bound() && !info_emitted_); - ASSERT(cc == carry || cc == not_carry); + DCHECK(!patch_site_.is_bound() && !info_emitted_); + DCHECK(cc == carry || cc == not_carry); __ bind(&patch_site_); __ j(cc, target, distance); } @@ -101,25 +78,6 @@ }; -static void EmitStackCheck(MacroAssembler* masm_, - int pointers = 0, - Register scratch = esp) { - Label ok; - Isolate* isolate = masm_->isolate(); - ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(isolate); - ASSERT(scratch.is(esp) == (pointers == 0)); - if (pointers != 0) { - __ mov(scratch, esp); - __ sub(scratch, Immediate(pointers * kPointerSize)); - } - __ cmp(scratch, Operand::StaticVariable(stack_limit)); - __ j(above_equal, &ok, Label::kNear); - __ call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET); - __ bind(&ok); -} - - // Generate code for a JS function. On entry to the function the receiver // and arguments have been pushed on the stack left to right, with the // return address on top of them. The actual argument count matches the @@ -138,8 +96,6 @@ handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); - InitializeFeedbackVector(); - profiling_counter_ = isolate()->factory()->NewCell( Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); @@ -167,7 +123,7 @@ __ j(not_equal, &ok, Label::kNear); __ mov(ecx, GlobalObjectOperand()); - __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset)); + __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset)); __ mov(Operand(esp, receiver_offset), ecx); @@ -180,18 +136,26 @@ FrameScope frame_scope(masm_, StackFrame::MANUAL); info->set_prologue_offset(masm_->pc_offset()); - __ Prologue(BUILD_FUNCTION_FRAME); + __ Prologue(info->IsCodePreAgingActive()); info->AddNoFrameRange(0, masm_->pc_offset()); { Comment cmnt(masm_, "[ Allocate locals"); int locals_count = info->scope()->num_stack_slots(); // Generators allocate locals, if any, in context slots. - ASSERT(!info->function()->is_generator() || locals_count == 0); + DCHECK(!info->function()->is_generator() || locals_count == 0); if (locals_count == 1) { __ push(Immediate(isolate()->factory()->undefined_value())); } else if (locals_count > 1) { if (locals_count >= 128) { - EmitStackCheck(masm_, locals_count, ecx); + Label ok; + __ mov(ecx, esp); + __ sub(ecx, Immediate(locals_count * kPointerSize)); + ExternalReference stack_limit = + ExternalReference::address_of_real_stack_limit(isolate()); + __ cmp(ecx, Operand::StaticVariable(stack_limit)); + __ j(above_equal, &ok, Label::kNear); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ bind(&ok); } __ mov(eax, Immediate(isolate()->factory()->undefined_value())); const int kMaxPushes = 32; @@ -221,17 +185,20 @@ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment cmnt(masm_, "[ Allocate context"); + bool need_write_barrier = true; // Argument to NewContext is the function, which is still in edi. if (FLAG_harmony_scoping && info->scope()->is_global_scope()) { __ push(edi); __ Push(info->scope()->GetScopeInfo()); - __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2); + __ CallRuntime(Runtime::kNewGlobalContext, 2); } else if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; } else { __ push(edi); - __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); } function_in_register = false; // Context is returned in eax. It replaces the context passed to us. @@ -252,11 +219,18 @@ int context_offset = Context::SlotOffset(var->index()); __ mov(Operand(esi, context_offset), eax); // Update the write barrier. This clobbers eax and ebx. - __ RecordWriteContextSlot(esi, - context_offset, - eax, - ebx, - kDontSaveFPRegs); + if (need_write_barrier) { + __ RecordWriteContextSlot(esi, + context_offset, + eax, + ebx, + kDontSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } } } } @@ -289,7 +263,7 @@ } else { type = ArgumentsAccessStub::NEW_SLOPPY_FAST; } - ArgumentsAccessStub stub(type); + ArgumentsAccessStub stub(isolate(), type); __ CallStub(&stub); SetVar(arguments, eax, ebx, edx); @@ -312,9 +286,9 @@ // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { VariableDeclaration* function = scope()->function(); - ASSERT(function->proxy()->var()->mode() == CONST || + DCHECK(function->proxy()->var()->mode() == CONST || function->proxy()->var()->mode() == CONST_LEGACY); - ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED); + DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED); VisitVariableDeclaration(function); } VisitDeclarations(scope()->declarations()); @@ -322,13 +296,19 @@ { Comment cmnt(masm_, "[ Stack check"); PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS); - EmitStackCheck(masm_); + Label ok; + ExternalReference stack_limit + = ExternalReference::address_of_stack_limit(isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(above_equal, &ok, Label::kNear); + __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET); + __ bind(&ok); } { Comment cmnt(masm_, "[ Body"); - ASSERT(loop_depth() == 0); + DCHECK(loop_depth() == 0); VisitStatements(function()->body()); - ASSERT(loop_depth() == 0); + DCHECK(loop_depth() == 0); } } @@ -366,7 +346,7 @@ Comment cmnt(masm_, "[ Back edge bookkeeping"); Label ok; - ASSERT(back_edge_target->is_bound()); + DCHECK(back_edge_target->is_bound()); int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier)); @@ -434,30 +414,28 @@ int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize; __ Ret(arguments_bytes, ecx); -#ifdef ENABLE_DEBUGGER_SUPPORT // Check that the size of the code used for returning is large enough // for the debugger's requirements. - ASSERT(Assembler::kJSReturnSequenceLength <= + DCHECK(Assembler::kJSReturnSequenceLength <= masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); -#endif info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); } } void FullCodeGenerator::EffectContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); } void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); codegen()->GetVar(result_register(), var); } void FullCodeGenerator::StackValueContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); MemOperand operand = codegen()->VarOperand(var, result_register()); // Memory operands can be pushed directly. __ push(operand); @@ -522,7 +500,7 @@ true, true_label_, false_label_); - ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals. + DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals. if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) { if (false_label_ != fall_through_) __ jmp(false_label_); } else if (lit->IsTrue() || lit->IsJSObject()) { @@ -549,7 +527,7 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); __ Drop(count); } @@ -557,7 +535,7 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug( int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); __ Drop(count); __ Move(result_register(), reg); } @@ -565,7 +543,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); if (count > 1) __ Drop(count - 1); __ mov(Operand(esp, 0), reg); } @@ -573,7 +551,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); // For simplicity we always test the accumulator register. __ Drop(count); __ Move(result_register(), reg); @@ -584,7 +562,7 @@ void FullCodeGenerator::EffectContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == materialize_false); + DCHECK(materialize_true == materialize_false); __ bind(materialize_true); } @@ -617,8 +595,8 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == true_label_); - ASSERT(materialize_false == false_label_); + DCHECK(materialize_true == true_label_); + DCHECK(materialize_false == false_label_); } @@ -683,7 +661,7 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) { - ASSERT(var->IsStackAllocated()); + DCHECK(var->IsStackAllocated()); // Offset is negative because higher indexes are at lower addresses. int offset = -var->index() * kPointerSize; // Adjust by a (parameter or local) base offset. @@ -697,7 +675,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); if (var->IsContextSlot()) { int context_chain_length = scope()->ContextChainLength(var->scope()); __ LoadContext(scratch, context_chain_length); @@ -709,7 +687,7 @@ void FullCodeGenerator::GetVar(Register dest, Variable* var) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); MemOperand location = VarOperand(var, dest); __ mov(dest, location); } @@ -719,17 +697,17 @@ Register src, Register scratch0, Register scratch1) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); - ASSERT(!scratch0.is(src)); - ASSERT(!scratch0.is(scratch1)); - ASSERT(!scratch1.is(src)); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(!scratch0.is(src)); + DCHECK(!scratch0.is(scratch1)); + DCHECK(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ mov(location, src); // Emit the write barrier code if the location is in the heap. if (var->IsContextSlot()) { int offset = Context::SlotOffset(var->index()); - ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi)); + DCHECK(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi)); __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs); } } @@ -757,7 +735,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { // The variable in the declaration always resides in the current context. - ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); + DCHECK_EQ(0, scope()->ContextChainLength(variable->scope())); if (generate_debug_code_) { // Check that we're not inside a with or catch context. __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset)); @@ -811,7 +789,7 @@ __ push(esi); __ push(Immediate(variable->name())); // VariableDeclaration nodes are always introduced in one of four modes. - ASSERT(IsDeclaredVariableMode(mode)); + DCHECK(IsDeclaredVariableMode(mode)); PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY : NONE; __ push(Immediate(Smi::FromInt(attr))); @@ -824,7 +802,7 @@ } else { __ push(Immediate(Smi::FromInt(0))); // Indicates no initial value. } - __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); break; } } @@ -839,7 +817,7 @@ case Variable::UNALLOCATED: { globals_->Add(variable->name(), zone()); Handle<SharedFunctionInfo> function = - Compiler::BuildFunctionInfo(declaration->fun(), script()); + Compiler::BuildFunctionInfo(declaration->fun(), script(), info_); // Check for stack-overflow exception. if (function.is_null()) return SetStackOverflow(); globals_->Add(function, zone()); @@ -877,7 +855,7 @@ __ push(Immediate(variable->name())); __ push(Immediate(Smi::FromInt(NONE))); VisitForStackValue(declaration->fun()); - __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); break; } } @@ -886,8 +864,8 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { Variable* variable = declaration->proxy()->var(); - ASSERT(variable->location() == Variable::CONTEXT); - ASSERT(variable->interface()->IsFrozen()); + DCHECK(variable->location() == Variable::CONTEXT); + DCHECK(variable->interface()->IsFrozen()); Comment cmnt(masm_, "[ ModuleDeclaration"); EmitDebugCheckDeclarationContext(variable); @@ -947,7 +925,7 @@ __ push(esi); // The context is the first argument. __ Push(pairs); __ Push(Smi::FromInt(DeclareGlobalsFlags())); - __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3); + __ CallRuntime(Runtime::kDeclareGlobals, 3); // Return value is ignored. } @@ -955,7 +933,7 @@ void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { // Call the runtime to declare the modules. __ Push(descriptions); - __ CallRuntime(Runtime::kHiddenDeclareModules, 1); + __ CallRuntime(Runtime::kDeclareModules, 1); // Return value is ignored. } @@ -1135,15 +1113,10 @@ Label non_proxy; __ bind(&fixed_array); - Handle<Object> feedback = Handle<Object>( - Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker), - isolate()); - StoreFeedbackVectorSlot(slot, feedback); - // No need for a write barrier, we are storing a Smi in the feedback vector. __ LoadHeapObject(ebx, FeedbackVector()); __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)), - Immediate(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker))); + Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object @@ -1182,7 +1155,7 @@ // For proxies, no filtering is done. // TODO(rossberg): What if only a prototype is a proxy? Not specified yet. - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); __ test(edx, edx); __ j(zero, &update_each); @@ -1234,24 +1207,8 @@ Iteration loop_statement(this, stmt); increment_loop_depth(); - // var iterator = iterable[@@iterator]() - VisitForAccumulatorValue(stmt->assign_iterator()); - - // As with for-in, skip the loop if the iterator is null or undefined. - __ CompareRoot(eax, Heap::kUndefinedValueRootIndex); - __ j(equal, loop_statement.break_label()); - __ CompareRoot(eax, Heap::kNullValueRootIndex); - __ j(equal, loop_statement.break_label()); - - // Convert the iterator to a JS object. - Label convert, done_convert; - __ JumpIfSmi(eax, &convert); - __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); - __ j(above_equal, &done_convert); - __ bind(&convert); - __ push(eax); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ bind(&done_convert); + // var iterator = iterable[Symbol.iterator](); + VisitForEffect(stmt->assign_iterator()); // Loop entry. __ bind(loop_statement.continue_label()); @@ -1298,7 +1255,9 @@ !pretenure && scope()->is_function_scope() && info->num_literals() == 0) { - FastNewClosureStub stub(info->strict_mode(), info->is_generator()); + FastNewClosureStub stub(isolate(), + info->strict_mode(), + info->is_generator()); __ mov(ebx, Immediate(info)); __ CallStub(&stub); } else { @@ -1307,7 +1266,7 @@ __ push(Immediate(pretenure ? isolate()->factory()->true_value() : isolate()->factory()->false_value())); - __ CallRuntime(Runtime::kHiddenNewClosure, 3); + __ CallRuntime(Runtime::kNewClosure, 3); } context()->Plug(eax); } @@ -1319,7 +1278,7 @@ } -void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, +void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, TypeofState typeof_state, Label* slow) { Register context = esi; @@ -1369,8 +1328,13 @@ // All extension objects were empty and it is safe to use a global // load IC call. - __ mov(edx, GlobalObjectOperand()); - __ mov(ecx, var->name()); + __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ mov(LoadIC::NameRegister(), proxy->var()->name()); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } + ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL : CONTEXTUAL; @@ -1381,7 +1345,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var, Label* slow) { - ASSERT(var->IsContextSlot()); + DCHECK(var->IsContextSlot()); Register context = esi; Register temp = ebx; @@ -1409,7 +1373,7 @@ } -void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, +void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy, TypeofState typeof_state, Label* slow, Label* done) { @@ -1418,8 +1382,9 @@ // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. + Variable* var = proxy->var(); if (var->mode() == DYNAMIC_GLOBAL) { - EmitLoadGlobalCheckExtensions(var, typeof_state, slow); + EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow); __ jmp(done); } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); @@ -1432,7 +1397,7 @@ __ mov(eax, isolate()->factory()->undefined_value()); } else { // LET || CONST __ push(Immediate(var->name())); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); + __ CallRuntime(Runtime::kThrowReferenceError, 1); } } __ jmp(done); @@ -1450,10 +1415,12 @@ switch (var->location()) { case Variable::UNALLOCATED: { Comment cmnt(masm_, "[ Global variable"); - // Use inline caching. Variable name is passed in ecx and the global - // object in eax. - __ mov(edx, GlobalObjectOperand()); - __ mov(ecx, var->name()); + __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ mov(LoadIC::NameRegister(), var->name()); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } CallLoadIC(CONTEXTUAL); context()->Plug(eax); break; @@ -1470,7 +1437,7 @@ // always looked up dynamically, i.e. in that case // var->location() == LOOKUP. // always holds. - ASSERT(var->scope() != NULL); + DCHECK(var->scope() != NULL); // Check if the binding really needs an initialization check. The check // can be skipped in the following situation: we have a LET or CONST @@ -1493,8 +1460,8 @@ skip_init_check = false; } else { // Check that we always have valid source position. - ASSERT(var->initializer_position() != RelocInfo::kNoPosition); - ASSERT(proxy->position() != RelocInfo::kNoPosition); + DCHECK(var->initializer_position() != RelocInfo::kNoPosition); + DCHECK(proxy->position() != RelocInfo::kNoPosition); skip_init_check = var->mode() != CONST_LEGACY && var->initializer_position() < proxy->position(); } @@ -1509,10 +1476,10 @@ // Throw a reference error when using an uninitialized let/const // binding in harmony mode. __ push(Immediate(var->name())); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); + __ CallRuntime(Runtime::kThrowReferenceError, 1); } else { // Uninitalized const bindings outside of harmony mode are unholed. - ASSERT(var->mode() == CONST_LEGACY); + DCHECK(var->mode() == CONST_LEGACY); __ mov(eax, isolate()->factory()->undefined_value()); } __ bind(&done); @@ -1529,11 +1496,11 @@ Label done, slow; // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); __ bind(&slow); __ push(esi); // Context. __ push(Immediate(var->name())); - __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); __ bind(&done); context()->Plug(eax); break; @@ -1564,7 +1531,7 @@ __ push(Immediate(Smi::FromInt(expr->literal_index()))); __ push(Immediate(expr->pattern())); __ push(Immediate(expr->flags())); - __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4); + __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); __ mov(ebx, eax); __ bind(&materialized); @@ -1576,7 +1543,7 @@ __ bind(&runtime_allocate); __ push(ebx); __ push(Immediate(Smi::FromInt(size))); - __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); __ pop(ebx); __ bind(&allocated); @@ -1617,7 +1584,8 @@ ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags; int properties_count = constant_properties->length() / 2; - if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() || + if (expr->may_store_doubles() || expr->depth() > 1 || + masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); @@ -1625,14 +1593,14 @@ __ push(Immediate(Smi::FromInt(expr->literal_index()))); __ push(Immediate(constant_properties)); __ push(Immediate(Smi::FromInt(flags))); - __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); } else { __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset)); __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index()))); __ mov(ecx, Immediate(constant_properties)); __ mov(edx, Immediate(Smi::FromInt(flags))); - FastCloneShallowObjectStub stub(properties_count); + FastCloneShallowObjectStub stub(isolate(), properties_count); __ CallStub(&stub); } @@ -1660,14 +1628,15 @@ case ObjectLiteral::Property::CONSTANT: UNREACHABLE(); case ObjectLiteral::Property::MATERIALIZED_LITERAL: - ASSERT(!CompileTimeValue::IsCompileTimeValue(value)); + DCHECK(!CompileTimeValue::IsCompileTimeValue(value)); // Fall through. case ObjectLiteral::Property::COMPUTED: if (key->value()->IsInternalizedString()) { if (property->emit_store()) { VisitForAccumulatorValue(value); - __ mov(ecx, Immediate(key->value())); - __ mov(edx, Operand(esp, 0)); + DCHECK(StoreIC::ValueRegister().is(eax)); + __ mov(StoreIC::NameRegister(), Immediate(key->value())); + __ mov(StoreIC::ReceiverRegister(), Operand(esp, 0)); CallStoreIC(key->LiteralFeedbackId()); PrepareForBailoutForId(key->id(), NO_REGISTERS); } else { @@ -1679,7 +1648,7 @@ VisitForStackValue(key); VisitForStackValue(value); if (property->emit_store()) { - __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes + __ push(Immediate(Smi::FromInt(SLOPPY))); // Strict mode __ CallRuntime(Runtime::kSetProperty, 4); } else { __ Drop(3); @@ -1713,11 +1682,11 @@ EmitAccessor(it->second->getter); EmitAccessor(it->second->setter); __ push(Immediate(Smi::FromInt(NONE))); - __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); + __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5); } if (expr->has_function()) { - ASSERT(result_saved); + DCHECK(result_saved); __ push(Operand(esp, 0)); __ CallRuntime(Runtime::kToFastProperties, 1); } @@ -1741,7 +1710,7 @@ ZoneList<Expression*>* subexprs = expr->values(); int length = subexprs->length(); Handle<FixedArray> constant_elements = expr->constant_elements(); - ASSERT_EQ(2, constant_elements->length()); + DCHECK_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); bool has_constant_fast_elements = @@ -1756,46 +1725,19 @@ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE; } - Heap* heap = isolate()->heap(); - if (has_constant_fast_elements && - constant_elements_values->map() == heap->fixed_cow_array_map()) { - // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot - // change, so it's possible to specialize the stub in advance. - __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1); - __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); - __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset)); - __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index()))); - __ mov(ecx, Immediate(constant_elements)); - FastCloneShallowArrayStub stub( - FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, - allocation_site_mode, - length); - __ CallStub(&stub); - } else if (expr->depth() > 1 || Serializer::enabled() || - length > FastCloneShallowArrayStub::kMaximumClonedLength) { + if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) { __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset)); __ push(Immediate(Smi::FromInt(expr->literal_index()))); __ push(Immediate(constant_elements)); __ push(Immediate(Smi::FromInt(flags))); - __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4); + __ CallRuntime(Runtime::kCreateArrayLiteral, 4); } else { - ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || - FLAG_smi_only_arrays); - FastCloneShallowArrayStub::Mode mode = - FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; - - // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot - // change, so it's possible to specialize the stub in advance. - if (has_constant_fast_elements) { - mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; - } - __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset)); __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index()))); __ mov(ecx, Immediate(constant_elements)); - FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); + FastCloneShallowArrayStub stub(isolate(), allocation_site_mode); __ CallStub(&stub); } @@ -1832,7 +1774,7 @@ } else { // Store the subexpression value in the array's elements. __ mov(ecx, Immediate(Smi::FromInt(i))); - StoreArrayLiteralElementStub stub; + StoreArrayLiteralElementStub stub(isolate()); __ CallStub(&stub); } @@ -1849,7 +1791,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { - ASSERT(expr->target()->IsValidLeftHandSide()); + DCHECK(expr->target()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ Assignment"); @@ -1871,9 +1813,9 @@ break; case NAMED_PROPERTY: if (expr->is_compound()) { - // We need the receiver both on the stack and in edx. + // We need the receiver both on the stack and in the register. VisitForStackValue(property->obj()); - __ mov(edx, Operand(esp, 0)); + __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0)); } else { VisitForStackValue(property->obj()); } @@ -1882,8 +1824,8 @@ if (expr->is_compound()) { VisitForStackValue(property->obj()); VisitForStackValue(property->key()); - __ mov(edx, Operand(esp, kPointerSize)); // Object. - __ mov(ecx, Operand(esp, 0)); // Key. + __ mov(LoadIC::ReceiverRegister(), Operand(esp, kPointerSize)); + __ mov(LoadIC::NameRegister(), Operand(esp, 0)); } else { VisitForStackValue(property->obj()); VisitForStackValue(property->key()); @@ -1980,7 +1922,7 @@ __ bind(&suspend); VisitForAccumulatorValue(expr->generator_object()); - ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); + DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset), Immediate(Smi::FromInt(continuation.pos()))); __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi); @@ -1991,7 +1933,7 @@ __ cmp(esp, ebx); __ j(equal, &post_runtime); __ push(eax); // generator object - __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ mov(context_register(), Operand(ebp, StandardFrameConstants::kContextOffset)); __ bind(&post_runtime); @@ -2024,6 +1966,9 @@ Label l_catch, l_try, l_suspend, l_continuation, l_resume; Label l_next, l_call, l_loop; + Register load_receiver = LoadIC::ReceiverRegister(); + Register load_name = LoadIC::NameRegister(); + // Initial send value is undefined. __ mov(eax, isolate()->factory()->undefined_value()); __ jmp(&l_next); @@ -2031,10 +1976,10 @@ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; } __ bind(&l_catch); handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); - __ mov(ecx, isolate()->factory()->throw_string()); // "throw" - __ push(ecx); // "throw" - __ push(Operand(esp, 2 * kPointerSize)); // iter - __ push(eax); // exception + __ mov(load_name, isolate()->factory()->throw_string()); // "throw" + __ push(load_name); // "throw" + __ push(Operand(esp, 2 * kPointerSize)); // iter + __ push(eax); // exception __ jmp(&l_call); // try { received = %yield result } @@ -2052,14 +1997,14 @@ const int generator_object_depth = kPointerSize + handler_size; __ mov(eax, Operand(esp, generator_object_depth)); __ push(eax); // g - ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); + DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset), Immediate(Smi::FromInt(l_continuation.pos()))); __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi); __ mov(ecx, esi); __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx, kDontSaveFPRegs); - __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ mov(context_register(), Operand(ebp, StandardFrameConstants::kContextOffset)); __ pop(eax); // result @@ -2069,19 +2014,24 @@ // receiver = iter; f = iter.next; arg = received; __ bind(&l_next); - __ mov(ecx, isolate()->factory()->next_string()); // "next" - __ push(ecx); - __ push(Operand(esp, 2 * kPointerSize)); // iter - __ push(eax); // received + + __ mov(load_name, isolate()->factory()->next_string()); + __ push(load_name); // "next" + __ push(Operand(esp, 2 * kPointerSize)); // iter + __ push(eax); // received // result = receiver[f](arg); __ bind(&l_call); - __ mov(edx, Operand(esp, kPointerSize)); + __ mov(load_receiver, Operand(esp, kPointerSize)); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(expr->KeyedLoadFeedbackSlot()))); + } Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallIC(ic, TypeFeedbackId::None()); __ mov(edi, eax); __ mov(Operand(esp, 2 * kPointerSize), edi); - CallFunctionStub stub(1, CALL_AS_METHOD); + CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD); __ CallStub(&stub); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); @@ -2090,8 +2040,13 @@ // if (!result.done) goto l_try; __ bind(&l_loop); __ push(eax); // save result - __ mov(edx, eax); // result - __ mov(ecx, isolate()->factory()->done_string()); // "done" + __ Move(load_receiver, eax); // result + __ mov(load_name, + isolate()->factory()->done_string()); // "done" + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(expr->DoneFeedbackSlot()))); + } CallLoadIC(NOT_CONTEXTUAL); // result.done in eax Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate()); CallIC(bool_ic); @@ -2099,8 +2054,13 @@ __ j(zero, &l_try); // result.value - __ pop(edx); // result - __ mov(ecx, isolate()->factory()->value_string()); // "value" + __ pop(load_receiver); // result + __ mov(load_name, + isolate()->factory()->value_string()); // "value" + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(expr->ValueFeedbackSlot()))); + } CallLoadIC(NOT_CONTEXTUAL); // result.value in eax context()->DropAndPlug(2, eax); // drop iter and g break; @@ -2113,7 +2073,7 @@ Expression *value, JSGeneratorObject::ResumeMode resume_mode) { // The value stays in eax, and is ultimately read by the resumed generator, as - // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it + // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it // is read to throw the value when the resumed generator is already closed. // ebx will hold the generator object until the activation has been resumed. VisitForStackValue(generator); @@ -2193,7 +2153,7 @@ __ push(ebx); __ push(result_register()); __ Push(Smi::FromInt(resume_mode)); - __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); // Not reached: the runtime call returns elsewhere. __ Abort(kGeneratorFailedToResume); @@ -2207,14 +2167,14 @@ } else { // Throw the provided value. __ push(eax); - __ CallRuntime(Runtime::kHiddenThrow, 1); + __ CallRuntime(Runtime::kThrow, 1); } __ jmp(&done); // Throw error if we attempt to operate on a running generator. __ bind(&wrong_state); __ push(ebx); - __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); __ bind(&done); context()->Plug(result_register()); @@ -2225,14 +2185,14 @@ Label gc_required; Label allocated; - Handle<Map> map(isolate()->native_context()->generator_result_map()); + Handle<Map> map(isolate()->native_context()->iterator_result_map()); __ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT); __ jmp(&allocated); __ bind(&gc_required); __ Push(Smi::FromInt(map->instance_size())); - __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); __ mov(context_register(), Operand(ebp, StandardFrameConstants::kContextOffset)); @@ -2240,7 +2200,7 @@ __ mov(ebx, map); __ pop(ecx); __ mov(edx, isolate()->factory()->ToBoolean(done)); - ASSERT_EQ(map->instance_size(), 5 * kPointerSize); + DCHECK_EQ(map->instance_size(), 5 * kPointerSize); __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx); __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), isolate()->factory()->empty_fixed_array()); @@ -2259,16 +2219,28 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); - ASSERT(!key->value()->IsSmi()); - __ mov(ecx, Immediate(key->value())); - CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + DCHECK(!key->value()->IsSmi()); + __ mov(LoadIC::NameRegister(), Immediate(key->value())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(prop->PropertyFeedbackSlot()))); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + } } void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - CallIC(ic, prop->PropertyFeedbackId()); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(prop->PropertyFeedbackSlot()))); + CallIC(ic); + } else { + CallIC(ic, prop->PropertyFeedbackId()); + } } @@ -2288,8 +2260,8 @@ __ bind(&stub_call); __ mov(eax, ecx); - BinaryOpICStub stub(op, mode); - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + BinaryOpICStub stub(isolate(), op, mode); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); __ jmp(&done, Label::kNear); @@ -2371,16 +2343,16 @@ Token::Value op, OverwriteMode mode) { __ pop(edx); - BinaryOpICStub stub(op, mode); + BinaryOpICStub stub(isolate(), op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); context()->Plug(eax); } void FullCodeGenerator::EmitAssignment(Expression* expr) { - ASSERT(expr->IsValidLeftHandSide()); + DCHECK(expr->IsValidReferenceExpression()); // Left-hand side can only be a property, a global or a (parameter or local) // slot. @@ -2403,9 +2375,9 @@ case NAMED_PROPERTY: { __ push(eax); // Preserve value. VisitForAccumulatorValue(prop->obj()); - __ mov(edx, eax); - __ pop(eax); // Restore value. - __ mov(ecx, prop->key()->AsLiteral()->value()); + __ Move(StoreIC::ReceiverRegister(), eax); + __ pop(StoreIC::ValueRegister()); // Restore value. + __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value()); CallStoreIC(); break; } @@ -2413,9 +2385,9 @@ __ push(eax); // Preserve value. VisitForStackValue(prop->obj()); VisitForAccumulatorValue(prop->key()); - __ mov(ecx, eax); - __ pop(edx); // Receiver. - __ pop(eax); // Restore value. + __ Move(KeyedStoreIC::NameRegister(), eax); + __ pop(KeyedStoreIC::ReceiverRegister()); // Receiver. + __ pop(KeyedStoreIC::ValueRegister()); // Restore value. Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); @@ -2438,34 +2410,24 @@ } -void FullCodeGenerator::EmitCallStoreContextSlot( - Handle<String> name, StrictMode strict_mode) { - __ push(eax); // Value. - __ push(esi); // Context. - __ push(Immediate(name)); - __ push(Immediate(Smi::FromInt(strict_mode))); - __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4); -} - - void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { if (var->IsUnallocated()) { // Global var, const, or let. - __ mov(ecx, var->name()); - __ mov(edx, GlobalObjectOperand()); + __ mov(StoreIC::NameRegister(), var->name()); + __ mov(StoreIC::ReceiverRegister(), GlobalObjectOperand()); CallStoreIC(); } else if (op == Token::INIT_CONST_LEGACY) { // Const initializers need a write barrier. - ASSERT(!var->IsParameter()); // No const parameters. + DCHECK(!var->IsParameter()); // No const parameters. if (var->IsLookupSlot()) { __ push(eax); __ push(esi); __ push(Immediate(var->name())); - __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3); + __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3); } else { - ASSERT(var->IsStackLocal() || var->IsContextSlot()); + DCHECK(var->IsStackLocal() || var->IsContextSlot()); Label skip; MemOperand location = VarOperand(var, ecx); __ mov(edx, location); @@ -2477,28 +2439,30 @@ } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. - if (var->IsLookupSlot()) { - EmitCallStoreContextSlot(var->name(), strict_mode()); - } else { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); - Label assign; - MemOperand location = VarOperand(var, ecx); - __ mov(edx, location); - __ cmp(edx, isolate()->factory()->the_hole_value()); - __ j(not_equal, &assign, Label::kNear); - __ push(Immediate(var->name())); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); - __ bind(&assign); - EmitStoreToStackLocalOrContextSlot(var, location); - } + DCHECK(!var->IsLookupSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + Label assign; + MemOperand location = VarOperand(var, ecx); + __ mov(edx, location); + __ cmp(edx, isolate()->factory()->the_hole_value()); + __ j(not_equal, &assign, Label::kNear); + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + __ bind(&assign); + EmitStoreToStackLocalOrContextSlot(var, location); } else if (!var->is_const_mode() || op == Token::INIT_CONST) { - // Assignment to var or initializing assignment to let/const - // in harmony mode. if (var->IsLookupSlot()) { - EmitCallStoreContextSlot(var->name(), strict_mode()); + // Assignment to var. + __ push(eax); // Value. + __ push(esi); // Context. + __ push(Immediate(var->name())); + __ push(Immediate(Smi::FromInt(strict_mode()))); + __ CallRuntime(Runtime::kStoreLookupSlot, 4); } else { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + // Assignment to var or initializing assignment to let/const in harmony + // mode. + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); MemOperand location = VarOperand(var, ecx); if (generate_debug_code_ && op == Token::INIT_LET) { // Check for an uninitialized let binding. @@ -2519,13 +2483,13 @@ // esp[0] : receiver Property* prop = expr->target()->AsProperty(); - ASSERT(prop != NULL); - ASSERT(prop->key()->AsLiteral() != NULL); + DCHECK(prop != NULL); + DCHECK(prop->key()->IsLiteral()); // Record source code position before IC call. SetSourcePosition(expr->position()); - __ mov(ecx, prop->key()->AsLiteral()->value()); - __ pop(edx); + __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value()); + __ pop(StoreIC::ReceiverRegister()); CallStoreIC(expr->AssignmentFeedbackId()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); context()->Plug(eax); @@ -2538,8 +2502,9 @@ // esp[0] : key // esp[kPointerSize] : receiver - __ pop(ecx); // Key. - __ pop(edx); + __ pop(KeyedStoreIC::NameRegister()); // Key. + __ pop(KeyedStoreIC::ReceiverRegister()); + DCHECK(KeyedStoreIC::ValueRegister().is(eax)); // Record source code position before IC call. SetSourcePosition(expr->position()); Handle<Code> ic = strict_mode() == SLOPPY @@ -2558,15 +2523,15 @@ if (key->IsPropertyName()) { VisitForAccumulatorValue(expr->obj()); - __ mov(edx, result_register()); + __ Move(LoadIC::ReceiverRegister(), result_register()); EmitNamedPropertyLoad(expr); PrepareForBailoutForId(expr->LoadId(), TOS_REG); context()->Plug(eax); } else { VisitForStackValue(expr->obj()); VisitForAccumulatorValue(expr->key()); - __ pop(edx); // Object. - __ mov(ecx, result_register()); // Key. + __ pop(LoadIC::ReceiverRegister()); // Object. + __ Move(LoadIC::NameRegister(), result_register()); // Key. EmitKeyedPropertyLoad(expr); context()->Plug(eax); } @@ -2580,17 +2545,15 @@ } - - // Code common for calls using the IC. -void FullCodeGenerator::EmitCallWithIC(Call* expr) { +void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); - CallFunctionFlags flags; + CallIC::CallType call_type = callee->IsVariableProxy() + ? CallIC::FUNCTION + : CallIC::METHOD; // Get the target function. - if (callee->IsVariableProxy()) { + if (call_type == CallIC::FUNCTION) { { StackValueContext context(this); EmitVariableLoad(callee->AsVariableProxy()); PrepareForBailout(callee, NO_REGISTERS); @@ -2598,55 +2561,33 @@ // Push undefined as receiver. This is patched in the method prologue if it // is a sloppy mode method. __ push(Immediate(isolate()->factory()->undefined_value())); - flags = NO_CALL_FUNCTION_FLAGS; } else { // Load the function from the receiver. - ASSERT(callee->IsProperty()); - __ mov(edx, Operand(esp, 0)); + DCHECK(callee->IsProperty()); + __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0)); EmitNamedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); // Push the target function under the receiver. __ push(Operand(esp, 0)); __ mov(Operand(esp, kPointerSize), eax); - flags = CALL_AS_METHOD; - } - - // Load the arguments. - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } } - // Record source position of the IC call. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, flags); - __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); - RecordJSReturnSite(expr); - - // Restore context register. - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, eax); + EmitCall(expr, call_type); } // Code common for calls using the IC. -void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, - Expression* key) { +void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, + Expression* key) { // Load the key. VisitForAccumulatorValue(key); Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); // Load the function from the receiver. - ASSERT(callee->IsProperty()); - __ mov(edx, Operand(esp, 0)); - // Move the key into the right register for the keyed load IC. - __ mov(ecx, eax); + DCHECK(callee->IsProperty()); + __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0)); + __ mov(LoadIC::NameRegister(), eax); EmitKeyedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); @@ -2654,29 +2595,12 @@ __ push(Operand(esp, 0)); __ mov(Operand(esp, kPointerSize), eax); - // Load the arguments. - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } - } - - // Record source position of the IC call. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, CALL_AS_METHOD); - __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); - RecordJSReturnSite(expr); - - // Restore context register. - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, eax); + EmitCall(expr, CallIC::METHOD); } -void FullCodeGenerator::EmitCallWithStub(Call* expr) { - // Code common for calls using the call stub. +void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { + // Load the arguments. ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); { PreservePositionScope scope(masm()->positions_recorder()); @@ -2684,23 +2608,22 @@ VisitForStackValue(args->at(i)); } } - // Record source position for debugger. - SetSourcePosition(expr->position()); - - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized); - __ LoadHeapObject(ebx, FeedbackVector()); - __ mov(edx, Immediate(Smi::FromInt(expr->CallFeedbackSlot()))); - // Record call targets in unoptimized code. - CallFunctionStub stub(arg_count, RECORD_CALL_TARGET); + // Record source position of the IC call. + SetSourcePosition(expr->position()); + Handle<Code> ic = CallIC::initialize_stub( + isolate(), arg_count, call_type); + __ Move(edx, Immediate(Smi::FromInt(expr->CallFeedbackSlot()))); __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); + // Don't assign a type feedback id to the IC, since type feedback is provided + // by the vector above. + CallIC(ic); RecordJSReturnSite(expr); + // Restore context register. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + context()->DropAndPlug(1, eax); } @@ -2722,7 +2645,7 @@ __ push(Immediate(Smi::FromInt(scope()->start_position()))); // Do the runtime call. - __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5); + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); } @@ -2764,7 +2687,7 @@ } // Record source position for debugger. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); RecordJSReturnSite(expr); @@ -2773,7 +2696,7 @@ context()->DropAndPlug(1, eax); } else if (call_type == Call::GLOBAL_CALL) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else if (call_type == Call::LOOKUP_SLOT_CALL) { // Call to a lookup slot (dynamically introduced variable). @@ -2782,14 +2705,14 @@ { PreservePositionScope scope(masm()->positions_recorder()); // Generate code for loading from variables potentially shadowed by // eval-introduced variables. - EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); } __ bind(&slow); // Call the runtime to find the function to call (returned in eax) and // the object holding it (returned in edx). __ push(context_register()); __ push(Immediate(proxy->name())); - __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); __ push(eax); // Function. __ push(edx); // Receiver. @@ -2809,7 +2732,7 @@ // The receiver is either the global receiver or an object found by // LoadContextSlot. - EmitCallWithStub(expr); + EmitCall(expr); } else if (call_type == Call::PROPERTY_CALL) { Property* property = callee->AsProperty(); @@ -2817,25 +2740,25 @@ VisitForStackValue(property->obj()); } if (property->key()->IsPropertyName()) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else { - EmitKeyedCallWithIC(expr, property->key()); + EmitKeyedCallWithLoadIC(expr, property->key()); } } else { - ASSERT(call_type == Call::OTHER_CALL); + DCHECK(call_type == Call::OTHER_CALL); // Call to an arbitrary expression not handled specially above. { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(callee); } __ push(Immediate(isolate()->factory()->undefined_value())); // Emit function call. - EmitCallWithStub(expr); + EmitCall(expr); } #ifdef DEBUG // RecordJSReturnSite should have been called. - ASSERT(expr->return_is_recorded_); + DCHECK(expr->return_is_recorded_); #endif } @@ -2867,21 +2790,17 @@ __ mov(edi, Operand(esp, arg_count * kPointerSize)); // Record call targets in unoptimized code. - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized); if (FLAG_pretenuring_call_new) { - StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(), - isolate()->factory()->NewAllocationSite()); - ASSERT(expr->AllocationSiteFeedbackSlot() == + EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot()); + DCHECK(expr->AllocationSiteFeedbackSlot() == expr->CallNewFeedbackSlot() + 1); } __ LoadHeapObject(ebx, FeedbackVector()); __ mov(edx, Immediate(Smi::FromInt(expr->CallNewFeedbackSlot()))); - CallConstructStub stub(RECORD_CALL_TARGET); - __ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); + CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET); + __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); PrepareForBailoutForId(expr->ReturnId(), TOS_REG); context()->Plug(eax); } @@ -2889,7 +2808,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2910,7 +2829,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2931,7 +2850,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2963,7 +2882,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2985,7 +2904,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3010,7 +2929,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3054,7 +2973,7 @@ STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kPointerSize == 4); __ imul(ecx, ecx, DescriptorArray::kDescriptorSize); - __ lea(ecx, Operand(ebx, ecx, times_2, DescriptorArray::kFirstOffset)); + __ lea(ecx, Operand(ebx, ecx, times_4, DescriptorArray::kFirstOffset)); // Calculate location of the first key name. __ add(ebx, Immediate(DescriptorArray::kFirstOffset)); // Loop through all the keys in the descriptor array. If one of these is the @@ -3100,7 +3019,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3122,7 +3041,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3150,7 +3069,7 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3172,7 +3091,7 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3194,7 +3113,7 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); Label materialize_true, materialize_false; Label* if_true = NULL; @@ -3226,7 +3145,7 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); // Load the two objects into registers and perform the comparison. VisitForStackValue(args->at(0)); @@ -3250,21 +3169,21 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); // ArgumentsAccessStub expects the key in edx and the formal // parameter count in eax. VisitForAccumulatorValue(args->at(0)); __ mov(edx, eax); __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters()))); - ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); + ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT); __ CallStub(&stub); context()->Plug(eax); } void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); Label exit; // Get the number of formal parameters. @@ -3288,7 +3207,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); Label done, null, function, non_function_constructor; VisitForAccumulatorValue(args->at(0)); @@ -3347,32 +3266,11 @@ } -void FullCodeGenerator::EmitLog(CallRuntime* expr) { - // Conditionally generate a log call. - // Args: - // 0 (literal string): The type of logging (corresponds to the flags). - // This is used to determine whether or not to generate the log call. - // 1 (string): Format string. Access the string at argument index 2 - // with '%2s' (see Logger::LogRuntime for all the formats). - // 2 (array): Arguments to the format string. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 3); - if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) { - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - __ CallRuntime(Runtime::kHiddenLog, 2); - } - // Finally, we're expected to leave a value on the top of the stack. - __ mov(eax, isolate()->factory()->undefined_value()); - context()->Plug(eax); -} - - void FullCodeGenerator::EmitSubString(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - SubStringStub stub; + SubStringStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); + DCHECK(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); @@ -3383,9 +3281,9 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - RegExpExecStub stub; + RegExpExecStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 4); + DCHECK(args->length() == 4); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); @@ -3397,7 +3295,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -3416,8 +3314,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); - ASSERT_NE(NULL, args->at(1)->AsLiteral()); + DCHECK(args->length() == 2); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -3453,7 +3351,7 @@ } __ bind(¬_date_object); - __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0); + __ CallRuntime(Runtime::kThrowNotDateError, 0); __ bind(&done); context()->Plug(result); } @@ -3461,7 +3359,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(3, args->length()); + DCHECK_EQ(3, args->length()); Register string = eax; Register index = ebx; @@ -3497,7 +3395,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(3, args->length()); + DCHECK_EQ(3, args->length()); Register string = eax; Register index = ebx; @@ -3531,23 +3429,19 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the runtime function. ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - if (CpuFeatures::IsSupported(SSE2)) { - MathPowStub stub(MathPowStub::ON_STACK); - __ CallStub(&stub); - } else { - __ CallRuntime(Runtime::kMath_pow, 2); - } + MathPowStub stub(isolate(), MathPowStub::ON_STACK); + __ CallStub(&stub); context()->Plug(eax); } void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); // Load the object. VisitForAccumulatorValue(args->at(1)); // Load the value. @@ -3576,12 +3470,12 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 1); + DCHECK_EQ(args->length(), 1); // Load the argument into eax and call the stub. VisitForAccumulatorValue(args->at(0)); - NumberToStringStub stub; + NumberToStringStub stub(isolate()); __ CallStub(&stub); context()->Plug(eax); } @@ -3589,7 +3483,7 @@ void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3608,7 +3502,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); @@ -3654,7 +3548,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); @@ -3702,12 +3596,12 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); __ pop(edx); - StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED); + StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED); __ CallStub(&stub); context()->Plug(eax); } @@ -3715,40 +3609,20 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - StringCompareStub stub; + StringCompareStub stub(isolate()); __ CallStub(&stub); context()->Plug(eax); } -void FullCodeGenerator::EmitMathLog(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_log, 1); - context()->Plug(eax); -} - - -void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_sqrt, 1); - context()->Plug(eax); -} - - void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() >= 2); + DCHECK(args->length() >= 2); int arg_count = args->length() - 2; // 2 ~ receiver and function. for (int i = 0; i < arg_count + 1; ++i) { @@ -3780,9 +3654,9 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - RegExpConstructResultStub stub; + RegExpConstructResultStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); + DCHECK(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForAccumulatorValue(args->at(2)); @@ -3795,9 +3669,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); - ASSERT_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle<FixedArray> jsfunction_result_caches( @@ -3835,7 +3709,7 @@ // Call runtime to perform the lookup. __ push(cache); __ push(key); - __ CallRuntime(Runtime::kHiddenGetFromCache, 2); + __ CallRuntime(Runtime::kGetFromCache, 2); __ bind(&done); context()->Plug(eax); @@ -3844,7 +3718,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3868,7 +3742,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); __ AssertString(eax); @@ -3886,7 +3760,7 @@ loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry; ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); // We will leave the separator on the stack until the end of the function. VisitForStackValue(args->at(1)); // Load this to eax (= array) @@ -4144,6 +4018,16 @@ } +void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + ExternalReference debug_is_active = + ExternalReference::debug_is_active_address(isolate()); + __ movzx_b(eax, Operand::StaticVariable(debug_is_active)); + __ SmiTag(eax); + context()->Plug(eax); +} + + void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { if (expr->function() != NULL && expr->function()->intrinsic_type == Runtime::INLINE) { @@ -4161,9 +4045,15 @@ __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset)); // Load the function from the receiver. - __ mov(edx, Operand(esp, 0)); - __ mov(ecx, Immediate(expr->name())); - CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0)); + __ mov(LoadIC::NameRegister(), Immediate(expr->name())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(expr->CallRuntimeFeedbackSlot()))); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + } // Push the target function under the receiver. __ push(Operand(esp, 0)); @@ -4178,7 +4068,7 @@ // Record source position of the IC call. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); // Restore context register. @@ -4217,7 +4107,7 @@ Variable* var = proxy->var(); // Delete of an unqualified identifier is disallowed in strict mode // but "delete this" is allowed. - ASSERT(strict_mode() == SLOPPY || var->is_this()); + DCHECK(strict_mode() == SLOPPY || var->is_this()); if (var->IsUnallocated()) { __ push(GlobalObjectOperand()); __ push(Immediate(var->name())); @@ -4234,7 +4124,7 @@ // context where the variable was introduced. __ push(context_register()); __ push(Immediate(var->name())); - __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2); + __ CallRuntime(Runtime::kDeleteLookupSlot, 2); context()->Plug(eax); } } else { @@ -4272,7 +4162,7 @@ // for control and plugging the control flow into the context, // because we need to prepare a pair of extra administrative AST ids // for the optimizing compiler. - ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue()); + DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue()); Label materialize_true, materialize_false, done; VisitForControl(expr->expression(), &materialize_false, @@ -4315,7 +4205,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { - ASSERT(expr->expression()->IsValidLeftHandSide()); + DCHECK(expr->expression()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ CountOperation"); SetSourcePosition(expr->position()); @@ -4334,7 +4224,7 @@ // Evaluate expression and get value. if (assign_type == VARIABLE) { - ASSERT(expr->expression()->AsVariableProxy()->var() != NULL); + DCHECK(expr->expression()->AsVariableProxy()->var() != NULL); AccumulatorValueContext context(this); EmitVariableLoad(expr->expression()->AsVariableProxy()); } else { @@ -4343,16 +4233,16 @@ __ push(Immediate(Smi::FromInt(0))); } if (assign_type == NAMED_PROPERTY) { - // Put the object both on the stack and in edx. - VisitForAccumulatorValue(prop->obj()); - __ push(eax); - __ mov(edx, eax); + // Put the object both on the stack and in the register. + VisitForStackValue(prop->obj()); + __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0)); EmitNamedPropertyLoad(prop); } else { VisitForStackValue(prop->obj()); VisitForStackValue(prop->key()); - __ mov(edx, Operand(esp, kPointerSize)); // Object. - __ mov(ecx, Operand(esp, 0)); // Key. + __ mov(LoadIC::ReceiverRegister(), + Operand(esp, kPointerSize)); // Object. + __ mov(LoadIC::NameRegister(), Operand(esp, 0)); // Key. EmitKeyedPropertyLoad(prop); } } @@ -4407,7 +4297,7 @@ __ jmp(&stub_call, Label::kNear); __ bind(&slow); } - ToNumberStub convert_stub; + ToNumberStub convert_stub(isolate()); __ CallStub(&convert_stub); // Save result for postfix expressions. @@ -4437,8 +4327,8 @@ __ bind(&stub_call); __ mov(edx, eax); __ mov(eax, Immediate(Smi::FromInt(1))); - BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE); - CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); + BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE); + CallIC(stub.GetCode(), expr->CountBinOpFeedbackId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -4467,8 +4357,8 @@ } break; case NAMED_PROPERTY: { - __ mov(ecx, prop->key()->AsLiteral()->value()); - __ pop(edx); + __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value()); + __ pop(StoreIC::ReceiverRegister()); CallStoreIC(expr->CountStoreFeedbackId()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -4481,8 +4371,8 @@ break; } case KEYED_PROPERTY: { - __ pop(ecx); - __ pop(edx); + __ pop(KeyedStoreIC::NameRegister()); + __ pop(KeyedStoreIC::ReceiverRegister()); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); @@ -4504,13 +4394,17 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { VariableProxy* proxy = expr->AsVariableProxy(); - ASSERT(!context()->IsEffect()); - ASSERT(!context()->IsTest()); + DCHECK(!context()->IsEffect()); + DCHECK(!context()->IsTest()); if (proxy != NULL && proxy->var()->IsUnallocated()) { Comment cmnt(masm_, "[ Global variable"); - __ mov(edx, GlobalObjectOperand()); - __ mov(ecx, Immediate(proxy->name())); + __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ mov(LoadIC::NameRegister(), Immediate(proxy->name())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } // Use a regular load, not a contextual load, to avoid a reference // error. CallLoadIC(NOT_CONTEXTUAL); @@ -4522,12 +4416,12 @@ // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done); __ bind(&slow); __ push(esi); __ push(Immediate(proxy->name())); - __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2); + __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2); PrepareForBailout(expr, TOS_REG); __ bind(&done); @@ -4554,12 +4448,13 @@ } PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_string())) { + Factory* factory = isolate()->factory(); + if (String::Equals(check, factory->number_string())) { __ JumpIfSmi(eax, if_true); __ cmp(FieldOperand(eax, HeapObject::kMapOffset), isolate()->factory()->heap_number_map()); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_string())) { + } else if (String::Equals(check, factory->string_string())) { __ JumpIfSmi(eax, if_false); __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx); __ j(above_equal, if_false); @@ -4567,20 +4462,16 @@ __ test_b(FieldOperand(edx, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); Split(zero, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->symbol_string())) { + } else if (String::Equals(check, factory->symbol_string())) { __ JumpIfSmi(eax, if_false); __ CmpObjectType(eax, SYMBOL_TYPE, edx); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_string())) { + } else if (String::Equals(check, factory->boolean_string())) { __ cmp(eax, isolate()->factory()->true_value()); __ j(equal, if_true); __ cmp(eax, isolate()->factory()->false_value()); Split(equal, if_true, if_false, fall_through); - } else if (FLAG_harmony_typeof && - check->Equals(isolate()->heap()->null_string())) { - __ cmp(eax, isolate()->factory()->null_value()); - Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_string())) { + } else if (String::Equals(check, factory->undefined_string())) { __ cmp(eax, isolate()->factory()->undefined_value()); __ j(equal, if_true); __ JumpIfSmi(eax, if_false); @@ -4589,19 +4480,17 @@ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset)); __ test(ecx, Immediate(1 << Map::kIsUndetectable)); Split(not_zero, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->function_string())) { + } else if (String::Equals(check, factory->function_string())) { __ JumpIfSmi(eax, if_false); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx); __ j(equal, if_true); __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->object_string())) { + } else if (String::Equals(check, factory->object_string())) { __ JumpIfSmi(eax, if_false); - if (!FLAG_harmony_typeof) { - __ cmp(eax, isolate()->factory()->null_value()); - __ j(equal, if_true); - } + __ cmp(eax, isolate()->factory()->null_value()); + __ j(equal, if_true); __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx); __ j(below, if_false); __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); @@ -4647,7 +4536,7 @@ case Token::INSTANCEOF: { VisitForStackValue(expr->right()); - InstanceofStub stub(InstanceofStub::kNoFlags); + InstanceofStub stub(isolate(), InstanceofStub::kNoFlags); __ CallStub(&stub); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); __ test(eax, eax); @@ -4737,7 +4626,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { - ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); + DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); __ mov(Operand(ebp, frame_offset), value); } @@ -4762,7 +4651,7 @@ // Fetch it from the context. __ push(ContextOperand(esi, Context::CLOSURE_INDEX)); } else { - ASSERT(declaration_scope->is_function_scope()); + DCHECK(declaration_scope->is_function_scope()); __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); } } @@ -4773,7 +4662,7 @@ void FullCodeGenerator::EnterFinallyBlock() { // Cook return address on top of stack (smi encoded Code* delta) - ASSERT(!result_register().is(edx)); + DCHECK(!result_register().is(edx)); __ pop(edx); __ sub(edx, Immediate(masm_->CodeObject())); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); @@ -4804,7 +4693,7 @@ void FullCodeGenerator::ExitFinallyBlock() { - ASSERT(!result_register().is(edx)); + DCHECK(!result_register().is(edx)); // Restore pending message from stack. __ pop(edx); ExternalReference pending_message_script = @@ -4915,25 +4804,25 @@ Address pc) { Address call_target_address = pc - kIntSize; Address jns_instr_address = call_target_address - 3; - ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); + DCHECK_EQ(kCallInstruction, *(call_target_address - 1)); if (*jns_instr_address == kJnsInstruction) { - ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); - ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(), + DCHECK_EQ(kJnsOffset, *(call_target_address - 2)); + DCHECK_EQ(isolate->builtins()->InterruptCheck()->entry(), Assembler::target_address_at(call_target_address, unoptimized_code)); return INTERRUPT; } - ASSERT_EQ(kNopByteOne, *jns_instr_address); - ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); + DCHECK_EQ(kNopByteOne, *jns_instr_address); + DCHECK_EQ(kNopByteTwo, *(call_target_address - 2)); if (Assembler::target_address_at(call_target_address, unoptimized_code) == isolate->builtins()->OnStackReplacement()->entry()) { return ON_STACK_REPLACEMENT; } - ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(), + DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(), Assembler::target_address_at(call_target_address, unoptimized_code)); return OSR_AFTER_STACK_CHECK; diff -Nru nodejs-0.11.13/deps/v8/src/ia32/ic-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/ic-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/ic-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/ic-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "codegen.h" -#include "ic-inl.h" -#include "runtime.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -58,45 +35,6 @@ } -// Generated code falls through if the receiver is a regular non-global -// JS object with slow properties and no interceptors. -static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, - Register receiver, - Register r0, - Register r1, - Label* miss) { - // Register usage: - // receiver: holds the receiver on entry and is unchanged. - // r0: used to hold receiver instance type. - // Holds the property dictionary on fall through. - // r1: used to hold receivers map. - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - - // Check that the receiver is a valid JS object. - __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset)); - __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset)); - __ cmp(r0, FIRST_SPEC_OBJECT_TYPE); - __ j(below, miss); - - // If this assert fails, we have to check upper bound too. - STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); - - GenerateGlobalInstanceTypeCheck(masm, r0, miss); - - // Check for non-global object that requires access check. - __ test_b(FieldOperand(r1, Map::kBitFieldOffset), - (1 << Map::kIsAccessCheckNeeded) | - (1 << Map::kHasNamedInterceptor)); - __ j(not_zero, miss); - - __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset)); - __ CheckMap(r0, masm->isolate()->factory()->hash_table_map(), miss, - DONT_DO_SMI_CHECK); -} - - // Helper function used to load a property from a dictionary backing // storage. This function may fail to load a property even though it is // in the dictionary, so code at miss_label must always call a backup @@ -243,7 +181,7 @@ // In the case that the object is a value-wrapper object, // we enter the runtime system to make sure that indexing // into string objects works as intended. - ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); + DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); __ CmpInstanceType(map, JS_OBJECT_TYPE); __ j(below, slow); @@ -406,41 +344,40 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + // The return address is on the stack. Label slow, check_name, index_smi, index_name, property_array_property; Label probe_dictionary, check_number_dictionary; + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + // Check that the key is a smi. - __ JumpIfNotSmi(ecx, &check_name); + __ JumpIfNotSmi(key, &check_name); __ bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from // where a numeric string is converted to a smi. GenerateKeyedLoadReceiverCheck( - masm, edx, eax, Map::kHasIndexedInterceptor, &slow); + masm, receiver, eax, Map::kHasIndexedInterceptor, &slow); // Check the receiver's map to see if it has fast elements. __ CheckFastElements(eax, &check_number_dictionary); - GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow); + GenerateFastArrayLoad(masm, receiver, key, eax, eax, NULL, &slow); Isolate* isolate = masm->isolate(); Counters* counters = isolate->counters(); __ IncrementCounter(counters->keyed_load_generic_smi(), 1); __ ret(0); __ bind(&check_number_dictionary); - __ mov(ebx, ecx); + __ mov(ebx, key); __ SmiUntag(ebx); - __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset)); + __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset)); // Check whether the elements is a number dictionary. - // edx: receiver // ebx: untagged index - // ecx: key // eax: elements __ CheckMap(eax, isolate->factory()->hash_table_map(), @@ -449,32 +386,30 @@ Label slow_pop_receiver; // Push receiver on the stack to free up a register for the dictionary // probing. - __ push(edx); - __ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax); + __ push(receiver); + __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax); // Pop receiver before returning. - __ pop(edx); + __ pop(receiver); __ ret(0); __ bind(&slow_pop_receiver); // Pop the receiver from the stack and jump to runtime. - __ pop(edx); + __ pop(receiver); __ bind(&slow); // Slow case: jump to runtime. - // edx: receiver - // ecx: key __ IncrementCounter(counters->keyed_load_generic_slow(), 1); GenerateRuntimeGetProperty(masm); __ bind(&check_name); - GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow); + GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow); GenerateKeyedLoadReceiverCheck( - masm, edx, eax, Map::kHasNamedInterceptor, &slow); + masm, receiver, eax, Map::kHasNamedInterceptor, &slow); // If the receiver is a fast-case object, check the keyed lookup // cache. Otherwise probe the dictionary. - __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset)); + __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset)); __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), Immediate(isolate->factory()->hash_table_map())); __ j(equal, &probe_dictionary); @@ -482,12 +417,12 @@ // The receiver's map is still in eax, compute the keyed lookup cache hash // based on 32 bits of the map pointer and the string hash. if (FLAG_debug_code) { - __ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset)); + __ cmp(eax, FieldOperand(receiver, HeapObject::kMapOffset)); __ Check(equal, kMapIsNoLongerInEax); } __ mov(ebx, eax); // Keep the map around for later. __ shr(eax, KeyedLookupCache::kMapHashShift); - __ mov(edi, FieldOperand(ecx, String::kHashFieldOffset)); + __ mov(edi, FieldOperand(key, String::kHashFieldOffset)); __ shr(edi, String::kHashShift); __ xor_(eax, edi); __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask); @@ -510,7 +445,7 @@ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); __ j(not_equal, &try_next_entry); __ add(edi, Immediate(kPointerSize)); - __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys)); + __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys)); __ j(equal, &hit_on_nth_entry[i]); __ bind(&try_next_entry); } @@ -521,14 +456,12 @@ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); __ j(not_equal, &slow); __ add(edi, Immediate(kPointerSize)); - __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys)); + __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys)); __ j(not_equal, &slow); // Get field offset. - // edx : receiver - // ebx : receiver's map - // ecx : key - // eax : lookup cache index + // ebx : receiver's map + // eax : lookup cache index ExternalReference cache_field_offsets = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate()); @@ -552,13 +485,13 @@ __ bind(&load_in_object_property); __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset)); __ add(eax, edi); - __ mov(eax, FieldOperand(edx, eax, times_pointer_size, 0)); + __ mov(eax, FieldOperand(receiver, eax, times_pointer_size, 0)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ ret(0); // Load property array property. __ bind(&property_array_property); - __ mov(eax, FieldOperand(edx, JSObject::kPropertiesOffset)); + __ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset)); __ mov(eax, FieldOperand(eax, edi, times_pointer_size, FixedArray::kHeaderSize)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); @@ -568,33 +501,31 @@ // exists. __ bind(&probe_dictionary); - __ mov(eax, FieldOperand(edx, JSObject::kMapOffset)); + __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset)); __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset)); GenerateGlobalInstanceTypeCheck(masm, eax, &slow); - GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax); + GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax); __ IncrementCounter(counters->keyed_load_generic_symbol(), 1); __ ret(0); __ bind(&index_name); - __ IndexFromHash(ebx, ecx); + __ IndexFromHash(ebx, key); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); } void KeyedLoadIC::GenerateString(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : key (index) - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + // Return address is on the stack. Label miss; - Register receiver = edx; - Register index = ecx; + Register receiver = ReceiverRegister(); + Register index = NameRegister(); Register scratch = ebx; + DCHECK(!scratch.is(receiver) && !scratch.is(index)); Register result = eax; + DCHECK(!result.is(scratch)); StringCharAtGenerator char_at_generator(receiver, index, @@ -616,40 +547,40 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + // Return address is on the stack. Label slow; + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + Register scratch = eax; + DCHECK(!scratch.is(receiver) && !scratch.is(key)); + // Check that the receiver isn't a smi. - __ JumpIfSmi(edx, &slow); + __ JumpIfSmi(receiver, &slow); // Check that the key is an array index, that is Uint32. - __ test(ecx, Immediate(kSmiTagMask | kSmiSignMask)); + __ test(key, Immediate(kSmiTagMask | kSmiSignMask)); __ j(not_zero, &slow); // Get the map of the receiver. - __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset)); + __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); // Check that it has indexed interceptor and access checks // are not enabled for this object. - __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset)); - __ and_(eax, Immediate(kSlowCaseBitFieldMask)); - __ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor)); + __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset)); + __ and_(scratch, Immediate(kSlowCaseBitFieldMask)); + __ cmp(scratch, Immediate(1 << Map::kHasIndexedInterceptor)); __ j(not_zero, &slow); // Everything is fine, call runtime. - __ pop(eax); - __ push(edx); // receiver - __ push(ecx); // key - __ push(eax); // return address + __ pop(scratch); + __ push(receiver); // receiver + __ push(key); // key + __ push(scratch); // return address // Perform tail call to the entry. - ExternalReference ref = - ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor), - masm->isolate()); + ExternalReference ref = ExternalReference( + IC_Utility(kLoadElementWithInterceptor), masm->isolate()); __ TailCallExternalReference(ref, 2, 1); __ bind(&slow); @@ -658,21 +589,23 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + // The return address is on the stack. + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + Label slow, notin; Factory* factory = masm->isolate()->factory(); Operand mapped_location = - GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, ¬in, &slow); + GenerateMappedArgumentsLookup( + masm, receiver, key, ebx, eax, ¬in, &slow); __ mov(eax, mapped_location); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in ebx. Operand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow); + GenerateUnmappedArgumentsLookup(masm, key, ebx, eax, &slow); __ cmp(unmapped_location, factory->the_hole_value()); __ j(equal, &slow); __ mov(eax, unmapped_location); @@ -683,27 +616,30 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + // Return address is on the stack. Label slow, notin; + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + Register value = ValueRegister(); + DCHECK(receiver.is(edx)); + DCHECK(name.is(ecx)); + DCHECK(value.is(eax)); + Operand mapped_location = - GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, ¬in, &slow); - __ mov(mapped_location, eax); + GenerateMappedArgumentsLookup(masm, receiver, name, ebx, edi, ¬in, + &slow); + __ mov(mapped_location, value); __ lea(ecx, mapped_location); - __ mov(edx, eax); + __ mov(edx, value); __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in ebx. Operand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow); - __ mov(unmapped_location, eax); + GenerateUnmappedArgumentsLookup(masm, name, ebx, edi, &slow); + __ mov(unmapped_location, value); __ lea(edi, unmapped_location); - __ mov(edx, eax); + __ mov(edx, value); __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs); __ Ret(); __ bind(&slow); @@ -721,9 +657,13 @@ Label transition_smi_elements; Label finish_object_store, non_double_value, transition_double_elements; Label fast_double_without_map_check; - // eax: value - // ecx: key (a smi) - // edx: receiver + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register key = KeyedStoreIC::NameRegister(); + Register value = KeyedStoreIC::ValueRegister(); + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + DCHECK(value.is(eax)); + // key is a smi. // ebx: FixedArray receiver->elements // edi: receiver map // Fast case: Do the store, could either Object or double. @@ -738,43 +678,43 @@ // We have to go to the runtime if the current value is the hole because // there may be a callback on the element Label holecheck_passed1; - __ cmp(FixedArrayElementOperand(ebx, ecx), + __ cmp(FixedArrayElementOperand(ebx, key), masm->isolate()->factory()->the_hole_value()); __ j(not_equal, &holecheck_passed1); - __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow); - __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); + __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); __ bind(&holecheck_passed1); // Smi stores don't require further checks. Label non_smi_value; - __ JumpIfNotSmi(eax, &non_smi_value); + __ JumpIfNotSmi(value, &non_smi_value); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ add(FieldOperand(edx, JSArray::kLengthOffset), + __ add(FieldOperand(receiver, JSArray::kLengthOffset), Immediate(Smi::FromInt(1))); } // It's irrelevant whether array is smi-only or not when writing a smi. - __ mov(FixedArrayElementOperand(ebx, ecx), eax); + __ mov(FixedArrayElementOperand(ebx, key), value); __ ret(0); __ bind(&non_smi_value); // Escape to elements kind transition case. - __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset)); __ CheckFastObjectElements(edi, &transition_smi_elements); // Fast elements array, store the value to the elements backing store. __ bind(&finish_object_store); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ add(FieldOperand(edx, JSArray::kLengthOffset), + __ add(FieldOperand(receiver, JSArray::kLengthOffset), Immediate(Smi::FromInt(1))); } - __ mov(FixedArrayElementOperand(ebx, ecx), eax); + __ mov(FixedArrayElementOperand(ebx, key), value); // Update write barrier for the elements array address. - __ mov(edx, eax); // Preserve the value which is returned. + __ mov(edx, value); // Preserve the value which is returned. __ RecordWriteArray( - ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ ret(0); __ bind(fast_double); @@ -791,26 +731,26 @@ // We have to see if the double version of the hole is present. If so // go to the runtime. uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); - __ cmp(FieldOperand(ebx, ecx, times_4, offset), Immediate(kHoleNanUpper32)); + __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32)); __ j(not_equal, &fast_double_without_map_check); - __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow); - __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); + __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(eax, ebx, ecx, edi, xmm0, - &transition_double_elements, false); + __ StoreNumberToDoubleElements(value, ebx, key, edi, xmm0, + &transition_double_elements); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ add(FieldOperand(edx, JSArray::kLengthOffset), + __ add(FieldOperand(receiver, JSArray::kLengthOffset), Immediate(Smi::FromInt(1))); } __ ret(0); __ bind(&transition_smi_elements); - __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); + __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset)); // Transition the array appropriately depending on the value type. - __ CheckMap(eax, + __ CheckMap(value, masm->isolate()->factory()->heap_number_map(), &non_double_value, DONT_DO_SMI_CHECK); @@ -824,8 +764,9 @@ slow); AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); - ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); - __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); + ElementsTransitionGenerator::GenerateSmiToDouble( + masm, receiver, key, value, ebx, mode, slow); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); @@ -836,51 +777,51 @@ edi, slow); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, - slow); - __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition( + masm, receiver, key, value, ebx, mode, slow); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); __ bind(&transition_double_elements); // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS - __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); + __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset)); __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, ebx, edi, slow); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); - __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); + ElementsTransitionGenerator::GenerateDoubleToObject( + masm, receiver, key, value, ebx, mode, slow); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); } void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + // Return address is on the stack. Label slow, fast_object, fast_object_grow; Label fast_double, fast_double_grow; Label array, extra, check_if_double_array; + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); // Check that the object isn't a smi. - __ JumpIfSmi(edx, &slow); + __ JumpIfSmi(receiver, &slow); // Get the map from the receiver. - __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset)); // Check that the receiver does not require access checks and is not observed. // The generic stub does not perform map checks or handle observed objects. __ test_b(FieldOperand(edi, Map::kBitFieldOffset), 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved); __ j(not_zero, &slow); // Check that the key is a smi. - __ JumpIfNotSmi(ecx, &slow); + __ JumpIfNotSmi(key, &slow); __ CmpInstanceType(edi, JS_ARRAY_TYPE); __ j(equal, &array); // Check that the object is some kind of JSObject. @@ -888,13 +829,11 @@ __ j(below, &slow); // Object case: Check key against length in the elements array. - // eax: value - // edx: JSObject - // ecx: key (a smi) + // Key is a smi. // edi: receiver map - __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); // Check array bounds. Both the key and the length of FixedArray are smis. - __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); + __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset)); __ j(below, &fast_object); // Slow case: call runtime. @@ -905,15 +844,14 @@ // perform the store and update the length. Used for adding one // element to the array by writing to array[array.length]. __ bind(&extra); - // eax: value - // edx: receiver, a JSArray - // ecx: key, a smi. + // receiver is a JSArray. + // key is a smi. // ebx: receiver->elements, a FixedArray // edi: receiver map - // flags: compare (ecx, edx.length()) + // flags: compare (key, receiver.length()) // do not leave holes in the array: __ j(not_equal, &slow); - __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); + __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset)); __ j(above_equal, &slow); __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); __ cmp(edi, masm->isolate()->factory()->fixed_array_map()); @@ -929,15 +867,14 @@ // array. Check that the array is in fast mode (and writable); if it // is the length is always a smi. __ bind(&array); - // eax: value - // edx: receiver, a JSArray - // ecx: key, a smi. + // receiver is a JSArray. + // key is a smi. // edi: receiver map - __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); // Check the key against the length in the array and fall through to the // common store code. - __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis. + __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis. __ j(above_equal, &extra); KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, @@ -948,16 +885,17 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + // The return address is on the stack. + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(receiver.is(edx)); + DCHECK(name.is(ecx)); // Probe the stub cache. - Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::LOAD_IC)); masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, edx, ecx, ebx, eax); + masm, flags, receiver, name, ebx, eax); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -965,39 +903,41 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; + Register dictionary = eax; + DCHECK(!dictionary.is(ReceiverRegister())); + DCHECK(!dictionary.is(NameRegister())); - GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss); + Label slow; - // eax: elements - // Search the dictionary placing the result in eax. - GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, eax); + __ mov(dictionary, + FieldOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); + GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), edi, ebx, + eax); __ ret(0); - // Cache miss: Jump to runtime. - __ bind(&miss); - GenerateMiss(masm); + // Dictionary load failed, go slow (but don't miss). + __ bind(&slow); + GenerateRuntimeGetProperty(masm); } -void LoadIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- +static void LoadIC_PushArgs(MacroAssembler* masm) { + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + DCHECK(!ebx.is(receiver) && !ebx.is(name)); + __ pop(ebx); + __ push(receiver); + __ push(name); + __ push(ebx); +} + + +void LoadIC::GenerateMiss(MacroAssembler* masm) { + // Return address is on the stack. __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1); - __ pop(ebx); - __ push(edx); // receiver - __ push(ecx); // name - __ push(ebx); // return address + LoadIC_PushArgs(masm); // Perform tail call to the entry. ExternalReference ref = @@ -1007,16 +947,8 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - - __ pop(ebx); - __ push(edx); // receiver - __ push(ecx); // name - __ push(ebx); // return address + // Return address is on the stack. + LoadIC_PushArgs(masm); // Perform tail call to the entry. __ TailCallRuntime(Runtime::kGetProperty, 2, 1); @@ -1024,18 +956,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - + // Return address is on the stack. __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1); - __ pop(ebx); - __ push(edx); // receiver - __ push(ecx); // name - __ push(ebx); // return address + LoadIC_PushArgs(masm); // Perform tail call to the entry. ExternalReference ref = @@ -1044,17 +968,36 @@ } -void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- +// IC register specifications +const Register LoadIC::ReceiverRegister() { return edx; } +const Register LoadIC::NameRegister() { return ecx; } - __ pop(ebx); - __ push(edx); // receiver - __ push(ecx); // name - __ push(ebx); // return address + +const Register LoadIC::SlotRegister() { + DCHECK(FLAG_vector_ics); + return eax; +} + + +const Register LoadIC::VectorRegister() { + DCHECK(FLAG_vector_ics); + return ebx; +} + + +const Register StoreIC::ReceiverRegister() { return edx; } +const Register StoreIC::NameRegister() { return ecx; } +const Register StoreIC::ValueRegister() { return eax; } + + +const Register KeyedStoreIC::MapRegister() { + return ebx; +} + + +void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { + // Return address is on the stack. + LoadIC_PushArgs(masm); // Perform tail call to the entry. __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); @@ -1062,34 +1005,36 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC); + // Return address is on the stack. + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, edx, ecx, ebx, no_reg); + masm, flags, ReceiverRegister(), NameRegister(), + ebx, no_reg); // Cache miss: Jump to runtime. GenerateMiss(masm); } -void StoreIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- +static void StoreIC_PushArgs(MacroAssembler* masm) { + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + Register value = StoreIC::ValueRegister(); + + DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value)); __ pop(ebx); - __ push(edx); - __ push(ecx); - __ push(eax); + __ push(receiver); + __ push(name); + __ push(value); __ push(ebx); +} + + +void StoreIC::GenerateMiss(MacroAssembler* masm) { + // Return address is on the stack. + StoreIC_PushArgs(masm); // Perform tail call to the entry. ExternalReference ref = @@ -1099,31 +1044,27 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + Label restore_miss; + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + Register value = ValueRegister(); + Register dictionary = ebx; - Label miss, restore_miss; - - GenerateNameDictionaryReceiverCheck(masm, edx, ebx, edi, &miss); + __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset)); // A lot of registers are needed for storing to slow case // objects. Push and restore receiver but rely on // GenerateDictionaryStore preserving the value and name. - __ push(edx); - GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi); + __ push(receiver); + GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value, + receiver, edi); __ Drop(1); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->store_normal_hit(), 1); __ ret(0); __ bind(&restore_miss); - __ pop(edx); - - __ bind(&miss); + __ pop(receiver); __ IncrementCounter(counters->store_normal_miss(), 1); GenerateMiss(masm); } @@ -1131,60 +1072,41 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + // Return address is on the stack. + DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) && + !ebx.is(ValueRegister())); __ pop(ebx); - __ push(edx); - __ push(ecx); - __ push(eax); - __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes + __ push(ReceiverRegister()); + __ push(NameRegister()); + __ push(ValueRegister()); __ push(Immediate(Smi::FromInt(strict_mode))); __ push(ebx); // return address // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 5, 1); + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); } void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - + // Return address is on the stack. + DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) && + !ebx.is(ValueRegister())); __ pop(ebx); - __ push(edx); - __ push(ecx); - __ push(eax); - __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes - __ push(Immediate(Smi::FromInt(strict_mode))); // Strict mode. - __ push(ebx); // return address + __ push(ReceiverRegister()); + __ push(NameRegister()); + __ push(ValueRegister()); + __ push(Immediate(Smi::FromInt(strict_mode))); + __ push(ebx); // return address // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 5, 1); + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); } void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - - __ pop(ebx); - __ push(edx); - __ push(ecx); - __ push(eax); - __ push(ebx); + // Return address is on the stack. + StoreIC_PushArgs(masm); // Do tail-call to runtime routine. ExternalReference ref = @@ -1194,18 +1116,8 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - - __ pop(ebx); - __ push(edx); - __ push(ecx); - __ push(eax); - __ push(ebx); // return address + // Return address is on the stack. + StoreIC_PushArgs(masm); // Do tail-call to runtime routine. ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate()); @@ -1214,18 +1126,8 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - - __ pop(ebx); - __ push(edx); - __ push(ecx); - __ push(eax); - __ push(ebx); // return address + // Return address is on the stack. + StoreIC_PushArgs(masm); // Do tail-call to runtime routine. ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); @@ -1275,14 +1177,14 @@ // If the instruction following the call is not a test al, nothing // was inlined. if (*test_instruction_address != Assembler::kTestAlByte) { - ASSERT(*test_instruction_address == Assembler::kNopByte); + DCHECK(*test_instruction_address == Assembler::kNopByte); return; } Address delta_address = test_instruction_address + 1; // The delta to the start of the map check instruction and the // condition code uses at the patched jump. - int8_t delta = *reinterpret_cast<int8_t*>(delta_address); + uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address); if (FLAG_trace_ic) { PrintF("[ patching ic at %p, test=%p, delta=%d\n", address, test_instruction_address, delta); @@ -1292,7 +1194,7 @@ // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the // reverse operation of that. Address jmp_address = test_instruction_address - delta; - ASSERT((check == ENABLE_INLINED_SMI_CHECK) + DCHECK((check == ENABLE_INLINED_SMI_CHECK) ? (*jmp_address == Assembler::kJncShortOpcode || *jmp_address == Assembler::kJcShortOpcode) : (*jmp_address == Assembler::kJnzShortOpcode || diff -Nru nodejs-0.11.13/deps/v8/src/ia32/lithium-codegen-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/lithium-codegen-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/lithium-codegen-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/lithium-codegen-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,52 +1,22 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "ia32/lithium-codegen-ia32.h" -#include "ic.h" -#include "code-stubs.h" -#include "deoptimizer.h" -#include "stub-cache.h" -#include "codegen.h" -#include "hydrogen-osr.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/hydrogen-osr.h" +#include "src/ia32/lithium-codegen-ia32.h" +#include "src/ic.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { - -static SaveFPRegsMode GetSaveFPRegsMode() { - // We don't need to save floating point regs when generating the snapshot - return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs; -} - - // When invoking builtins, we need to record the safepoint in the middle of // the invoke instruction sequence generated by the macro assembler. class SafepointGenerator V8_FINAL : public CallWrapper { @@ -76,7 +46,7 @@ bool LCodeGen::GenerateCode() { LPhase phase("Z_Code generation", chunk()); - ASSERT(is_unused()); + DCHECK(is_unused()); status_ = GENERATING; // Open a frame scope to indicate that there is a frame on the stack. The @@ -100,7 +70,7 @@ void LCodeGen::FinishCode(Handle<Code> code) { - ASSERT(is_done()); + DCHECK(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); @@ -108,13 +78,6 @@ if (!info()->IsStub()) { Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); } - info()->CommitDependencies(code); -} - - -void LCodeGen::Abort(BailoutReason reason) { - info()->set_bailout_reason(reason); - status_ = ABORTED; } @@ -129,10 +92,9 @@ void LCodeGen::SaveCallerDoubles() { - ASSERT(info()->saves_caller_doubles()); - ASSERT(NeedsEagerFrame()); + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); Comment(";;; Save clobbered callee double registers"); - CpuFeatureScope scope(masm(), SSE2); int count = 0; BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); @@ -146,10 +108,9 @@ void LCodeGen::RestoreCallerDoubles() { - ASSERT(info()->saves_caller_doubles()); - ASSERT(NeedsEagerFrame()); + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); Comment(";;; Restore clobbered callee double registers"); - CpuFeatureScope scope(masm(), SSE2); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); int count = 0; @@ -163,7 +124,7 @@ bool LCodeGen::GeneratePrologue() { - ASSERT(is_generating()); + DCHECK(is_generating()); if (info()->IsOptimizing()) { ProfileEntryHookStub::MaybeCallEntryHook(masm_); @@ -190,7 +151,7 @@ __ j(not_equal, &ok, Label::kNear); __ mov(ecx, GlobalObjectOperand()); - __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset)); + __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset)); __ mov(Operand(esp, receiver_offset), ecx); @@ -225,9 +186,13 @@ info()->set_prologue_offset(masm_->pc_offset()); if (NeedsEagerFrame()) { - ASSERT(!frame_is_built_); + DCHECK(!frame_is_built_); frame_is_built_ = true; - __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); + if (info()->IsStub()) { + __ StubPrologue(); + } else { + __ Prologue(info()->IsCodePreAgingActive()); + } info()->AddNoFrameRange(0, masm_->pc_offset()); } @@ -240,7 +205,7 @@ // Reserve space for the stack slots needed by the code. int slots = GetStackSlotCount(); - ASSERT(slots != 0 || !info()->IsOptimizing()); + DCHECK(slots != 0 || !info()->IsOptimizing()); if (slots > 0) { if (slots == 1) { if (dynamic_frame_alignment_) { @@ -282,22 +247,23 @@ } } - if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { - SaveCallerDoubles(); - } + if (info()->saves_caller_doubles()) SaveCallerDoubles(); } // Possibly allocate a local context. int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); + bool need_write_barrier = true; // Argument to NewContext is the function, which is still in edi. if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; } else { __ push(edi); - __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); } RecordSafepoint(Safepoint::kNoLazyDeopt); // Context is returned in eax. It replaces the context passed to us. @@ -318,11 +284,18 @@ int context_offset = Context::SlotOffset(var->index()); __ mov(Operand(esi, context_offset), eax); // Update the write barrier. This clobbers eax and ebx. - __ RecordWriteContextSlot(esi, - context_offset, - eax, - ebx, - kDontSaveFPRegs); + if (need_write_barrier) { + __ RecordWriteContextSlot(esi, + context_offset, + eax, + ebx, + kDontSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } } } Comment(";;; End allocate local context"); @@ -384,7 +357,7 @@ // Adjust the frame size, subsuming the unoptimized frame into the // optimized frame. int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); - ASSERT(slots >= 1); + DCHECK(slots >= 1); __ sub(esp, Immediate((slots - 1) * kPointerSize)); } @@ -396,27 +369,10 @@ if (!instr->IsLazyBailout() && !instr->IsGap()) { safepoints_.BumpLastLazySafepointIndex(); } - if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); } -void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { - if (!CpuFeatures::IsSupported(SSE2)) { - if (instr->IsGoto()) { - x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); - } else if (FLAG_debug_code && FLAG_enable_slow_asserts && - !instr->IsGap() && !instr->IsReturn()) { - if (instr->ClobbersDoubleRegisters()) { - if (instr->HasDoubleRegisterResult()) { - ASSERT_EQ(1, x87_stack_.depth()); - } else { - ASSERT_EQ(0, x87_stack_.depth()); - } - } - __ VerifyX87StackDepth(x87_stack_.depth()); - } - } -} +void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { } bool LCodeGen::GenerateJumpTable() { @@ -435,7 +391,7 @@ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); } if (jump_table_[i].needs_frame) { - ASSERT(!info()->saves_caller_doubles()); + DCHECK(!info()->saves_caller_doubles()); __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); if (needs_frame.is_bound()) { __ jmp(&needs_frame); @@ -445,7 +401,7 @@ // This variant of deopt can only be used with stubs. Since we don't // have a function pointer to install in the stack frame that we're // building, install a special marker there instead. - ASSERT(info()->IsStub()); + DCHECK(info()->IsStub()); __ push(Immediate(Smi::FromInt(StackFrame::STUB))); // Push a PC inside the function so that the deopt code can find where // the deopt comes from. It doesn't have to be the precise return @@ -462,9 +418,7 @@ __ ret(0); // Call the continuation without clobbering registers. } } else { - if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { - RestoreCallerDoubles(); - } + if (info()->saves_caller_doubles()) RestoreCallerDoubles(); __ call(entry, RelocInfo::RUNTIME_ENTRY); } } @@ -473,12 +427,10 @@ bool LCodeGen::GenerateDeferredCode() { - ASSERT(is_generating()); + DCHECK(is_generating()); if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; - X87Stack copy(code->x87_stack()); - x87_stack_ = copy; HValue* value = instructions_->at(code->instruction_index())->hydrogen_value(); @@ -493,8 +445,8 @@ __ bind(code->entry()); if (NeedsDeferredFrame()) { Comment(";;; Build frame"); - ASSERT(!frame_is_built_); - ASSERT(info()->IsStub()); + DCHECK(!frame_is_built_); + DCHECK(info()->IsStub()); frame_is_built_ = true; // Build the frame in such a way that esi isn't trashed. __ push(ebp); // Caller's frame pointer. @@ -507,7 +459,7 @@ if (NeedsDeferredFrame()) { __ bind(code->done()); Comment(";;; Destroy frame"); - ASSERT(frame_is_built_); + DCHECK(frame_is_built_); frame_is_built_ = false; __ mov(esp, ebp); __ pop(ebp); @@ -524,7 +476,7 @@ bool LCodeGen::GenerateSafepointTable() { - ASSERT(is_done()); + DCHECK(is_done()); if (!info()->IsStub()) { // For lazy deoptimization we need space to patch a call after every call. // Ensure there is always space for such patching, even if the code ends @@ -544,234 +496,19 @@ } -X87Register LCodeGen::ToX87Register(int index) const { - return X87Register::FromAllocationIndex(index); -} - - XMMRegister LCodeGen::ToDoubleRegister(int index) const { return XMMRegister::FromAllocationIndex(index); } -void LCodeGen::X87LoadForUsage(X87Register reg) { - ASSERT(x87_stack_.Contains(reg)); - x87_stack_.Fxch(reg); - x87_stack_.pop(); -} - - -void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { - ASSERT(x87_stack_.Contains(reg1)); - ASSERT(x87_stack_.Contains(reg2)); - x87_stack_.Fxch(reg1, 1); - x87_stack_.Fxch(reg2); - x87_stack_.pop(); - x87_stack_.pop(); -} - - -void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { - ASSERT(is_mutable_); - ASSERT(Contains(reg) && stack_depth_ > other_slot); - int i = ArrayIndex(reg); - int st = st2idx(i); - if (st != other_slot) { - int other_i = st2idx(other_slot); - X87Register other = stack_[other_i]; - stack_[other_i] = reg; - stack_[i] = other; - if (st == 0) { - __ fxch(other_slot); - } else if (other_slot == 0) { - __ fxch(st); - } else { - __ fxch(st); - __ fxch(other_slot); - __ fxch(st); - } - } -} - - -int LCodeGen::X87Stack::st2idx(int pos) { - return stack_depth_ - pos - 1; -} - - -int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { - for (int i = 0; i < stack_depth_; i++) { - if (stack_[i].is(reg)) return i; - } - UNREACHABLE(); - return -1; -} - - -bool LCodeGen::X87Stack::Contains(X87Register reg) { - for (int i = 0; i < stack_depth_; i++) { - if (stack_[i].is(reg)) return true; - } - return false; -} - - -void LCodeGen::X87Stack::Free(X87Register reg) { - ASSERT(is_mutable_); - ASSERT(Contains(reg)); - int i = ArrayIndex(reg); - int st = st2idx(i); - if (st > 0) { - // keep track of how fstp(i) changes the order of elements - int tos_i = st2idx(0); - stack_[i] = stack_[tos_i]; - } - pop(); - __ fstp(st); -} - - -void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { - if (x87_stack_.Contains(dst)) { - x87_stack_.Fxch(dst); - __ fstp(0); - } else { - x87_stack_.push(dst); - } - X87Fld(src, opts); -} - - -void LCodeGen::X87Fld(Operand src, X87OperandType opts) { - ASSERT(!src.is_reg_only()); - switch (opts) { - case kX87DoubleOperand: - __ fld_d(src); - break; - case kX87FloatOperand: - __ fld_s(src); - break; - case kX87IntOperand: - __ fild_s(src); - break; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { - ASSERT(!dst.is_reg_only()); - x87_stack_.Fxch(src); - switch (opts) { - case kX87DoubleOperand: - __ fst_d(dst); - break; - case kX87IntOperand: - __ fist_s(dst); - break; - default: - UNREACHABLE(); - } -} - - -void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { - ASSERT(is_mutable_); - if (Contains(reg)) { - Free(reg); - } - // Mark this register as the next register to write to - stack_[stack_depth_] = reg; -} - - -void LCodeGen::X87Stack::CommitWrite(X87Register reg) { - ASSERT(is_mutable_); - // Assert the reg is prepared to write, but not on the virtual stack yet - ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) && - stack_depth_ < X87Register::kNumAllocatableRegisters); - stack_depth_++; -} - - -void LCodeGen::X87PrepareBinaryOp( - X87Register left, X87Register right, X87Register result) { - // You need to use DefineSameAsFirst for x87 instructions - ASSERT(result.is(left)); - x87_stack_.Fxch(right, 1); - x87_stack_.Fxch(left); -} - - -void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { - if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) { - bool double_inputs = instr->HasDoubleRegisterInput(); - - // Flush stack from tos down, since FreeX87() will mess with tos - for (int i = stack_depth_-1; i >= 0; i--) { - X87Register reg = stack_[i]; - // Skip registers which contain the inputs for the next instruction - // when flushing the stack - if (double_inputs && instr->IsDoubleInput(reg, cgen)) { - continue; - } - Free(reg); - if (i < stack_depth_-1) i++; - } - } - if (instr->IsReturn()) { - while (stack_depth_ > 0) { - __ fstp(0); - stack_depth_--; - } - if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0); - } -} - - -void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) { - ASSERT(stack_depth_ <= 1); - // If ever used for new stubs producing two pairs of doubles joined into two - // phis this assert hits. That situation is not handled, since the two stacks - // might have st0 and st1 swapped. - if (current_block_id + 1 != goto_instr->block_id()) { - // If we have a value on the x87 stack on leaving a block, it must be a - // phi input. If the next block we compile is not the join block, we have - // to discard the stack state. - stack_depth_ = 0; - } -} - - -void LCodeGen::EmitFlushX87ForDeopt() { - // The deoptimizer does not support X87 Registers. But as long as we - // deopt from a stub its not a problem, since we will re-materialize the - // original stub inputs, which can't be double registers. - ASSERT(info()->IsStub()); - if (FLAG_debug_code && FLAG_enable_slow_asserts) { - __ pushfd(); - __ VerifyX87StackDepth(x87_stack_.depth()); - __ popfd(); - } - for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0); -} - - Register LCodeGen::ToRegister(LOperand* op) const { - ASSERT(op->IsRegister()); + DCHECK(op->IsRegister()); return ToRegister(op->index()); } -X87Register LCodeGen::ToX87Register(LOperand* op) const { - ASSERT(op->IsDoubleRegister()); - return ToX87Register(op->index()); -} - - XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - ASSERT(op->IsDoubleRegister()); + DCHECK(op->IsDoubleRegister()); return ToDoubleRegister(op->index()); } @@ -786,28 +523,28 @@ HConstant* constant = chunk_->LookupConstant(op); int32_t value = constant->Integer32Value(); if (r.IsInteger32()) return value; - ASSERT(r.IsSmiOrTagged()); + DCHECK(r.IsSmiOrTagged()); return reinterpret_cast<int32_t>(Smi::FromInt(value)); } Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); + DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); return constant->handle(isolate()); } double LCodeGen::ToDouble(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(constant->HasDoubleValue()); + DCHECK(constant->HasDoubleValue()); return constant->DoubleValue(); } ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(constant->HasExternalReferenceValue()); + DCHECK(constant->HasExternalReferenceValue()); return constant->ExternalReferenceValue(); } @@ -823,7 +560,7 @@ static int ArgumentsOffsetWithoutFrame(int index) { - ASSERT(index < 0); + DCHECK(index < 0); return -(index + 1) * kPointerSize + kPCOnStackSize; } @@ -831,7 +568,7 @@ Operand LCodeGen::ToOperand(LOperand* op) const { if (op->IsRegister()) return Operand(ToRegister(op)); if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op)); - ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); if (NeedsEagerFrame()) { return Operand(ebp, StackSlotOffset(op->index())); } else { @@ -843,7 +580,7 @@ Operand LCodeGen::HighOperand(LOperand* op) { - ASSERT(op->IsDoubleStackSlot()); + DCHECK(op->IsDoubleStackSlot()); if (NeedsEagerFrame()) { return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize); } else { @@ -878,13 +615,13 @@ translation->BeginConstructStubFrame(closure_id, translation_size); break; case JS_GETTER: - ASSERT(translation_size == 1); - ASSERT(height == 0); + DCHECK(translation_size == 1); + DCHECK(height == 0); translation->BeginGetterStubFrame(closure_id); break; case JS_SETTER: - ASSERT(translation_size == 2); - ASSERT(height == 0); + DCHECK(translation_size == 2); + DCHECK(height == 0); translation->BeginSetterStubFrame(closure_id); break; case ARGUMENTS_ADAPTOR: @@ -984,7 +721,7 @@ RelocInfo::Mode mode, LInstruction* instr, SafepointMode safepoint_mode) { - ASSERT(instr != NULL); + DCHECK(instr != NULL); __ call(code, mode); RecordSafepointWithLazyDeopt(instr, safepoint_mode); @@ -1008,14 +745,14 @@ int argc, LInstruction* instr, SaveFPRegsMode save_doubles) { - ASSERT(instr != NULL); - ASSERT(instr->HasPointerMap()); + DCHECK(instr != NULL); + DCHECK(instr->HasPointerMap()); __ CallRuntime(fun, argc, save_doubles); RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); - ASSERT(info()->is_calling()); + DCHECK(info()->is_calling()); } @@ -1045,12 +782,13 @@ RecordSafepointWithRegisters( instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); - ASSERT(info()->is_calling()); + DCHECK(info()->is_calling()); } void LCodeGen::RegisterEnvironmentForDeoptimization( LEnvironment* environment, Safepoint::DeoptMode mode) { + environment->set_has_been_used(); if (!environment->HasBeenRegistered()) { // Physical stack frame layout: // -x ............. -4 0 ..................................... y @@ -1089,9 +827,9 @@ LEnvironment* environment, Deoptimizer::BailoutType bailout_type) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - ASSERT(environment->HasBeenRegistered()); + DCHECK(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); - ASSERT(info()->IsOptimizing() || info()->IsStub()); + DCHECK(info()->IsOptimizing() || info()->IsStub()); Address entry = Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { @@ -1112,7 +850,7 @@ __ mov(Operand::StaticVariable(count), eax); __ pop(eax); __ popfd(); - ASSERT(frame_is_built_); + DCHECK(frame_is_built_); __ call(entry, RelocInfo::RUNTIME_ENTRY); __ bind(&no_deopt); __ mov(Operand::StaticVariable(count), eax); @@ -1120,17 +858,6 @@ __ popfd(); } - // Before Instructions which can deopt, we normally flush the x87 stack. But - // we can have inputs or outputs of the current instruction on the stack, - // thus we need to flush them here from the physical stack to leave it in a - // consistent state. - if (x87_stack_.depth() > 0) { - Label done; - if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); - EmitFlushX87ForDeopt(); - __ bind(&done); - } - if (info()->ShouldTrapOnDeopt()) { Label done; if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); @@ -1138,7 +865,7 @@ __ bind(&done); } - ASSERT(info()->IsStub() || frame_is_built_); + DCHECK(info()->IsStub() || frame_is_built_); if (cc == no_condition && frame_is_built_) { __ call(entry, RelocInfo::RUNTIME_ENTRY); } else { @@ -1175,7 +902,7 @@ int length = deoptimizations_.length(); if (length == 0) return; Handle<DeoptimizationInputData> data = - factory()->NewDeoptimizationInputData(length, TENURED); + DeoptimizationInputData::New(isolate(), length, 0, TENURED); Handle<ByteArray> translations = translations_.CreateByteArray(isolate()->factory()); @@ -1226,7 +953,7 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { - ASSERT(deoptimization_literals_.length() == 0); + DCHECK(deoptimization_literals_.length() == 0); const ZoneList<Handle<JSFunction> >* inlined_closures = chunk()->inlined_closures(); @@ -1246,7 +973,7 @@ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); } else { - ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kLazyDeopt); } @@ -1258,7 +985,7 @@ Safepoint::Kind kind, int arguments, Safepoint::DeoptMode deopt_mode) { - ASSERT(kind == expected_safepoint_kind_); + DCHECK(kind == expected_safepoint_kind_); const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); @@ -1345,22 +1072,22 @@ void LCodeGen::DoCallStub(LCallStub* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->result()).is(eax)); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->result()).is(eax)); switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpExec: { - RegExpExecStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + RegExpExecStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { - SubStringStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + SubStringStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { - StringCompareStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + StringCompareStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } default: @@ -1377,7 +1104,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(dividend.is(ToRegister(instr->result()))); + DCHECK(dividend.is(ToRegister(instr->result()))); // Theoretically, a variation of the branch-free code for integer division by // a power of 2 (calculating the remainder via an additional multiplication @@ -1410,7 +1137,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(ToRegister(instr->result()).is(eax)); + DCHECK(ToRegister(instr->result()).is(eax)); if (divisor == 0) { DeoptimizeIf(no_condition, instr->environment()); @@ -1438,12 +1165,12 @@ HMod* hmod = instr->hydrogen(); Register left_reg = ToRegister(instr->left()); - ASSERT(left_reg.is(eax)); + DCHECK(left_reg.is(eax)); Register right_reg = ToRegister(instr->right()); - ASSERT(!right_reg.is(eax)); - ASSERT(!right_reg.is(edx)); + DCHECK(!right_reg.is(eax)); + DCHECK(!right_reg.is(edx)); Register result_reg = ToRegister(instr->result()); - ASSERT(result_reg.is(edx)); + DCHECK(result_reg.is(edx)); Label done; // Check for x % 0, idiv would signal a divide error. We have to @@ -1493,8 +1220,8 @@ Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor)))); - ASSERT(!result.is(dividend)); + DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor))); + DCHECK(!result.is(dividend)); // Check for (0 / -x) that will produce negative zero. HDiv* hdiv = instr->hydrogen(); @@ -1530,7 +1257,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(ToRegister(instr->result()).is(edx)); + DCHECK(ToRegister(instr->result()).is(edx)); if (divisor == 0) { DeoptimizeIf(no_condition, instr->environment()); @@ -1556,17 +1283,17 @@ } +// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. void LCodeGen::DoDivI(LDivI* instr) { HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->left()); - Register divisor = ToRegister(instr->right()); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); Register remainder = ToRegister(instr->temp()); - Register result = ToRegister(instr->result()); - ASSERT(dividend.is(eax)); - ASSERT(remainder.is(edx)); - ASSERT(result.is(eax)); - ASSERT(!divisor.is(eax)); - ASSERT(!divisor.is(edx)); + DCHECK(dividend.is(eax)); + DCHECK(remainder.is(edx)); + DCHECK(ToRegister(instr->result()).is(eax)); + DCHECK(!divisor.is(eax)); + DCHECK(!divisor.is(edx)); // Check for x / 0. if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { @@ -1598,15 +1325,7 @@ __ cdq(); __ idiv(divisor); - if (hdiv->IsMathFloorOfDiv()) { - Label done; - __ test(remainder, remainder); - __ j(zero, &done, Label::kNear); - __ xor_(remainder, divisor); - __ sar(remainder, 31); - __ add(result, remainder); - __ bind(&done); - } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { // Deoptimize if remainder is not 0. __ test(remainder, remainder); DeoptimizeIf(not_zero, instr->environment()); @@ -1617,7 +1336,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(dividend.is(ToRegister(instr->result()))); + DCHECK(dividend.is(ToRegister(instr->result()))); // If the divisor is positive, things are easy: There can be no deopts and we // can simply do an arithmetic right shift. @@ -1629,22 +1348,29 @@ } // If the divisor is negative, we have to negate and handle edge cases. - Label not_kmin_int, done; __ neg(dividend); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(zero, instr->environment()); } - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - // Note that we could emit branch-free code, but that would need one more - // register. - if (divisor == -1) { + + // Dividing by -1 is basically negation, unless we overflow. + if (divisor == -1) { + if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { DeoptimizeIf(overflow, instr->environment()); - } else { - __ j(no_overflow, ¬_kmin_int, Label::kNear); - __ mov(dividend, Immediate(kMinInt / divisor)); - __ jmp(&done, Label::kNear); } + return; + } + + // If the negation could not overflow, simply shifting is OK. + if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + __ sar(dividend, shift); + return; } + + Label not_kmin_int, done; + __ j(no_overflow, ¬_kmin_int, Label::kNear); + __ mov(dividend, Immediate(kMinInt / divisor)); + __ jmp(&done, Label::kNear); __ bind(¬_kmin_int); __ sar(dividend, shift); __ bind(&done); @@ -1654,7 +1380,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(ToRegister(instr->result()).is(edx)); + DCHECK(ToRegister(instr->result()).is(edx)); if (divisor == 0) { DeoptimizeIf(no_condition, instr->environment()); @@ -1680,7 +1406,7 @@ // In the general case we may need to adjust before and after the truncating // division to get a flooring division. Register temp = ToRegister(instr->temp3()); - ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx)); + DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx)); Label needs_adjustment, done; __ cmp(dividend, Immediate(0)); __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); @@ -1696,6 +1422,59 @@ } +// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. +void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); + Register remainder = ToRegister(instr->temp()); + Register result = ToRegister(instr->result()); + DCHECK(dividend.is(eax)); + DCHECK(remainder.is(edx)); + DCHECK(result.is(eax)); + DCHECK(!divisor.is(eax)); + DCHECK(!divisor.is(edx)); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + __ test(divisor, divisor); + DeoptimizeIf(zero, instr->environment()); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label dividend_not_zero; + __ test(dividend, dividend); + __ j(not_zero, ÷nd_not_zero, Label::kNear); + __ test(divisor, divisor); + DeoptimizeIf(sign, instr->environment()); + __ bind(÷nd_not_zero); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow)) { + Label dividend_not_min_int; + __ cmp(dividend, kMinInt); + __ j(not_zero, ÷nd_not_min_int, Label::kNear); + __ cmp(divisor, -1); + DeoptimizeIf(zero, instr->environment()); + __ bind(÷nd_not_min_int); + } + + // Sign extend to edx (= remainder). + __ cdq(); + __ idiv(divisor); + + Label done; + __ test(remainder, remainder); + __ j(zero, &done, Label::kNear); + __ xor_(remainder, divisor); + __ sar(remainder, 31); + __ add(result, remainder); + __ bind(&done); +} + + void LCodeGen::DoMulI(LMulI* instr) { Register left = ToRegister(instr->left()); LOperand* right = instr->right(); @@ -1784,8 +1563,8 @@ void LCodeGen::DoBitI(LBitI* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); - ASSERT(left->IsRegister()); + DCHECK(left->Equals(instr->result())); + DCHECK(left->IsRegister()); if (right->IsConstantOperand()) { int32_t right_operand = @@ -1831,10 +1610,10 @@ void LCodeGen::DoShiftI(LShiftI* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); - ASSERT(left->IsRegister()); + DCHECK(left->Equals(instr->result())); + DCHECK(left->IsRegister()); if (right->IsRegister()) { - ASSERT(ToRegister(right).is(ecx)); + DCHECK(ToRegister(right).is(ecx)); switch (instr->op()) { case Token::ROR: @@ -1879,11 +1658,11 @@ } break; case Token::SHR: - if (shift_count == 0 && instr->can_deopt()) { + if (shift_count != 0) { + __ shr(ToRegister(left), shift_count); + } else if (instr->can_deopt()) { __ test(ToRegister(left), ToRegister(left)); DeoptimizeIf(sign, instr->environment()); - } else { - __ shr(ToRegister(left), shift_count); } break; case Token::SHL: @@ -1911,7 +1690,7 @@ void LCodeGen::DoSubI(LSubI* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); + DCHECK(left->Equals(instr->result())); if (right->IsConstantOperand()) { __ sub(ToOperand(left), @@ -1940,43 +1719,34 @@ uint64_t int_val = BitCast<uint64_t, double>(v); int32_t lower = static_cast<int32_t>(int_val); int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); - ASSERT(instr->result()->IsDoubleRegister()); + DCHECK(instr->result()->IsDoubleRegister()); - if (!CpuFeatures::IsSafeForSnapshot(SSE2)) { - __ push(Immediate(upper)); - __ push(Immediate(lower)); - X87Register reg = ToX87Register(instr->result()); - X87Mov(reg, Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); - } else { - CpuFeatureScope scope1(masm(), SSE2); - XMMRegister res = ToDoubleRegister(instr->result()); - if (int_val == 0) { - __ xorps(res, res); - } else { - Register temp = ToRegister(instr->temp()); - if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatureScope scope2(masm(), SSE4_1); - if (lower != 0) { - __ Move(temp, Immediate(lower)); - __ movd(res, Operand(temp)); - __ Move(temp, Immediate(upper)); - __ pinsrd(res, Operand(temp), 1); - } else { - __ xorps(res, res); - __ Move(temp, Immediate(upper)); - __ pinsrd(res, Operand(temp), 1); - } + XMMRegister res = ToDoubleRegister(instr->result()); + if (int_val == 0) { + __ xorps(res, res); + } else { + Register temp = ToRegister(instr->temp()); + if (CpuFeatures::IsSupported(SSE4_1)) { + CpuFeatureScope scope2(masm(), SSE4_1); + if (lower != 0) { + __ Move(temp, Immediate(lower)); + __ movd(res, Operand(temp)); + __ Move(temp, Immediate(upper)); + __ pinsrd(res, Operand(temp), 1); } else { + __ xorps(res, res); __ Move(temp, Immediate(upper)); - __ movd(res, Operand(temp)); - __ psllq(res, 32); - if (lower != 0) { - XMMRegister xmm_scratch = double_scratch0(); - __ Move(temp, Immediate(lower)); - __ movd(xmm_scratch, Operand(temp)); - __ orps(res, xmm_scratch); - } + __ pinsrd(res, Operand(temp), 1); + } + } else { + __ Move(temp, Immediate(upper)); + __ movd(res, Operand(temp)); + __ psllq(res, 32); + if (lower != 0) { + XMMRegister xmm_scratch = double_scratch0(); + __ Move(temp, Immediate(lower)); + __ movd(xmm_scratch, Operand(temp)); + __ orps(res, xmm_scratch); } } } @@ -1990,9 +1760,9 @@ void LCodeGen::DoConstantT(LConstantT* instr) { Register reg = ToRegister(instr->result()); - Handle<Object> handle = instr->value(isolate()); + Handle<Object> object = instr->value(isolate()); AllowDeferredHandleDereference smi_check; - __ LoadObject(reg, handle); + __ LoadObject(reg, object); } @@ -2009,8 +1779,8 @@ Register scratch = ToRegister(instr->temp()); Smi* index = instr->index(); Label runtime, done; - ASSERT(object.is(result)); - ASSERT(object.is(eax)); + DCHECK(object.is(result)); + DCHECK(object.is(eax)); __ test(object, Immediate(kSmiTagMask)); DeoptimizeIf(zero, instr->environment()); @@ -2105,12 +1875,12 @@ if (instr->value()->IsConstantOperand()) { int value = ToRepresentation(LConstantOperand::cast(instr->value()), Representation::Integer32()); - ASSERT_LE(0, value); + DCHECK_LE(0, value); if (encoding == String::ONE_BYTE_ENCODING) { - ASSERT_LE(value, String::kMaxOneByteCharCode); + DCHECK_LE(value, String::kMaxOneByteCharCode); __ mov_b(operand, static_cast<int8_t>(value)); } else { - ASSERT_LE(value, String::kMaxUtf16CodeUnit); + DCHECK_LE(value, String::kMaxUtf16CodeUnit); __ mov_w(operand, static_cast<int16_t>(value)); } } else { @@ -2152,10 +1922,9 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - CpuFeatureScope scope(masm(), SSE2); LOperand* left = instr->left(); LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); + DCHECK(left->Equals(instr->result())); HMathMinMax::Operation operation = instr->hydrogen()->operation(); if (instr->hydrogen()->representation().IsSmiOrInteger32()) { Label return_left; @@ -2178,7 +1947,7 @@ } __ bind(&return_left); } else { - ASSERT(instr->hydrogen()->representation().IsDouble()); + DCHECK(instr->hydrogen()->representation().IsDouble()); Label check_nan_left, check_zero, return_left, return_right; Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; XMMRegister left_reg = ToDoubleRegister(left); @@ -2215,100 +1984,57 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister left = ToDoubleRegister(instr->left()); - XMMRegister right = ToDoubleRegister(instr->right()); - XMMRegister result = ToDoubleRegister(instr->result()); - switch (instr->op()) { - case Token::ADD: - __ addsd(left, right); - break; - case Token::SUB: - __ subsd(left, right); - break; - case Token::MUL: - __ mulsd(left, right); - break; - case Token::DIV: - __ divsd(left, right); - // Don't delete this mov. It may improve performance on some CPUs, - // when there is a mulsd depending on the result - __ movaps(left, left); - break; - case Token::MOD: { - // Pass two doubles as arguments on the stack. - __ PrepareCallCFunction(4, eax); - __ movsd(Operand(esp, 0 * kDoubleSize), left); - __ movsd(Operand(esp, 1 * kDoubleSize), right); - __ CallCFunction( - ExternalReference::mod_two_doubles_operation(isolate()), - 4); - - // Return value is in st(0) on ia32. - // Store it into the result register. - __ sub(Operand(esp), Immediate(kDoubleSize)); - __ fstp_d(Operand(esp, 0)); - __ movsd(result, Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); - break; - } - default: - UNREACHABLE(); - break; - } - } else { - X87Register left = ToX87Register(instr->left()); - X87Register right = ToX87Register(instr->right()); - X87Register result = ToX87Register(instr->result()); - if (instr->op() != Token::MOD) { - X87PrepareBinaryOp(left, right, result); - } - switch (instr->op()) { - case Token::ADD: - __ fadd_i(1); - break; - case Token::SUB: - __ fsub_i(1); - break; - case Token::MUL: - __ fmul_i(1); - break; - case Token::DIV: - __ fdiv_i(1); - break; - case Token::MOD: { - // Pass two doubles as arguments on the stack. - __ PrepareCallCFunction(4, eax); - X87Mov(Operand(esp, 1 * kDoubleSize), right); - X87Mov(Operand(esp, 0), left); - X87Free(right); - ASSERT(left.is(result)); - X87PrepareToWrite(result); - __ CallCFunction( - ExternalReference::mod_two_doubles_operation(isolate()), - 4); - - // Return value is in st(0) on ia32. - X87CommitWrite(result); - break; - } - default: - UNREACHABLE(); - break; + XMMRegister left = ToDoubleRegister(instr->left()); + XMMRegister right = ToDoubleRegister(instr->right()); + XMMRegister result = ToDoubleRegister(instr->result()); + switch (instr->op()) { + case Token::ADD: + __ addsd(left, right); + break; + case Token::SUB: + __ subsd(left, right); + break; + case Token::MUL: + __ mulsd(left, right); + break; + case Token::DIV: + __ divsd(left, right); + // Don't delete this mov. It may improve performance on some CPUs, + // when there is a mulsd depending on the result + __ movaps(left, left); + break; + case Token::MOD: { + // Pass two doubles as arguments on the stack. + __ PrepareCallCFunction(4, eax); + __ movsd(Operand(esp, 0 * kDoubleSize), left); + __ movsd(Operand(esp, 1 * kDoubleSize), right); + __ CallCFunction( + ExternalReference::mod_two_doubles_operation(isolate()), + 4); + + // Return value is in st(0) on ia32. + // Store it into the result register. + __ sub(Operand(esp), Immediate(kDoubleSize)); + __ fstp_d(Operand(esp, 0)); + __ movsd(result, Operand(esp, 0)); + __ add(Operand(esp), Immediate(kDoubleSize)); + break; } + default: + UNREACHABLE(); + break; } } void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->left()).is(edx)); - ASSERT(ToRegister(instr->right()).is(eax)); - ASSERT(ToRegister(instr->result()).is(eax)); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->left()).is(edx)); + DCHECK(ToRegister(instr->right()).is(eax)); + DCHECK(ToRegister(instr->result()).is(eax)); - BinaryOpICStub stub(instr->op(), NO_OVERWRITE); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -2350,37 +2076,35 @@ __ test(reg, Operand(reg)); EmitBranch(instr, not_zero); } else if (r.IsDouble()) { - ASSERT(!info()->IsStub()); - CpuFeatureScope scope(masm(), SSE2); + DCHECK(!info()->IsStub()); XMMRegister reg = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(reg, xmm_scratch); EmitBranch(instr, not_equal); } else { - ASSERT(r.IsTagged()); + DCHECK(r.IsTagged()); Register reg = ToRegister(instr->value()); HType type = instr->hydrogen()->value()->type(); if (type.IsBoolean()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ cmp(reg, factory()->true_value()); EmitBranch(instr, equal); } else if (type.IsSmi()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ test(reg, Operand(reg)); EmitBranch(instr, not_equal); } else if (type.IsJSArray()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); EmitBranch(instr, no_condition); } else if (type.IsHeapNumber()) { - ASSERT(!info()->IsStub()); - CpuFeatureScope scope(masm(), SSE2); + DCHECK(!info()->IsStub()); XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); EmitBranch(instr, not_equal); } else if (type.IsString()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); EmitBranch(instr, not_equal); } else { @@ -2420,7 +2144,7 @@ Register map = no_reg; // Keep the compiler happy. if (expected.NeedsMap()) { map = ToRegister(instr->temp()); - ASSERT(!map.is(reg)); + DCHECK(!map.is(reg)); __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); if (expected.CanBeUndetectable()) { @@ -2460,16 +2184,9 @@ __ cmp(FieldOperand(reg, HeapObject::kMapOffset), factory()->heap_number_map()); __ j(not_equal, ¬_heap_number, Label::kNear); - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister xmm_scratch = double_scratch0(); - __ xorps(xmm_scratch, xmm_scratch); - __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); - } else { - __ fldz(); - __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); - __ FCmp(); - } + XMMRegister xmm_scratch = double_scratch0(); + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); __ j(zero, instr->FalseLabel(chunk_)); __ jmp(instr->TrueLabel(chunk_)); __ bind(¬_heap_number); @@ -2492,10 +2209,6 @@ } -void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) { -} - - void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); } @@ -2536,7 +2249,11 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - Condition cc = TokenToCondition(instr->op(), instr->is_double()); + bool is_unsigned = + instr->is_double() || + instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || + instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); + Condition cc = TokenToCondition(instr->op(), is_unsigned); if (left->IsConstantOperand() && right->IsConstantOperand()) { // We can statically evaluate the comparison. @@ -2547,13 +2264,7 @@ EmitGoto(next_block); } else { if (instr->is_double()) { - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); - } else { - X87LoadForUsage(ToX87Register(right), ToX87Register(left)); - __ FCmp(); - } + __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); // Don't base result on EFLAGS when a NaN is involved. Instead // jump to the false block. __ j(parity_even, instr->FalseLabel(chunk_)); @@ -2564,8 +2275,8 @@ } else if (left->IsConstantOperand()) { __ cmp(ToOperand(right), ToImmediate(left, instr->hydrogen()->representation())); - // We transposed the operands. Reverse the condition. - cc = ReverseCondition(cc); + // We commuted the operands, so commute the condition. + cc = CommuteCondition(cc); } else { __ cmp(ToRegister(left), ToOperand(right)); } @@ -2597,35 +2308,12 @@ return; } - bool use_sse2 = CpuFeatures::IsSupported(SSE2); - if (use_sse2) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister input_reg = ToDoubleRegister(instr->object()); - __ ucomisd(input_reg, input_reg); - EmitFalseBranch(instr, parity_odd); - } else { - // Put the value to the top of stack - X87Register src = ToX87Register(instr->object()); - X87LoadForUsage(src); - __ fld(0); - __ fld(0); - __ FCmp(); - Label ok; - __ j(parity_even, &ok, Label::kNear); - __ fstp(0); - EmitFalseBranch(instr, no_condition); - __ bind(&ok); - } - + XMMRegister input_reg = ToDoubleRegister(instr->object()); + __ ucomisd(input_reg, input_reg); + EmitFalseBranch(instr, parity_odd); __ sub(esp, Immediate(kDoubleSize)); - if (use_sse2) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister input_reg = ToDoubleRegister(instr->object()); - __ movsd(MemOperand(esp, 0), input_reg); - } else { - __ fstp_d(MemOperand(esp, 0)); - } + __ movsd(MemOperand(esp, 0), input_reg); __ add(esp, Immediate(kDoubleSize)); int offset = sizeof(kHoleNanUpper32); @@ -2636,11 +2324,10 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { Representation rep = instr->hydrogen()->value()->representation(); - ASSERT(!rep.IsInteger32()); + DCHECK(!rep.IsInteger32()); Register scratch = ToRegister(instr->temp()); if (rep.IsDouble()) { - CpuFeatureScope use_sse2(masm(), SSE2); XMMRegister value = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); @@ -2716,7 +2403,7 @@ Register temp = ToRegister(instr->temp()); SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; Condition true_cond = EmitIsString( @@ -2738,7 +2425,7 @@ Register input = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { STATIC_ASSERT(kSmiTag == 0); __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } @@ -2786,7 +2473,7 @@ InstanceType from = instr->from(); InstanceType to = instr->to(); if (from == FIRST_TYPE) return to; - ASSERT(from == to || to == LAST_TYPE); + DCHECK(from == to || to == LAST_TYPE); return from; } @@ -2806,7 +2493,7 @@ Register input = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } @@ -2844,9 +2531,9 @@ Register input, Register temp, Register temp2) { - ASSERT(!input.is(temp)); - ASSERT(!input.is(temp2)); - ASSERT(!temp.is(temp2)); + DCHECK(!input.is(temp)); + DCHECK(!input.is(temp2)); + DCHECK(!temp.is(temp2)); __ JumpIfSmi(input, is_false); if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { @@ -2924,9 +2611,9 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { // Object and function are in fixed registers defined by the stub. - ASSERT(ToRegister(instr->context()).is(esi)); - InstanceofStub stub(InstanceofStub::kArgsInRegisters); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + DCHECK(ToRegister(instr->context()).is(esi)); + InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); Label true_value, done; __ test(eax, Operand(eax)); @@ -2943,9 +2630,8 @@ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { public: DeferredInstanceOfKnownGlobal(LCodeGen* codegen, - LInstanceOfKnownGlobal* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } + LInstanceOfKnownGlobal* instr) + : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); } @@ -2957,7 +2643,7 @@ }; DeferredInstanceOfKnownGlobal* deferred; - deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_); + deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); Label done, false_result; Register object = ToRegister(instr->value()); @@ -3014,20 +2700,20 @@ flags | InstanceofStub::kCallSiteInlineCheck); flags = static_cast<InstanceofStub::Flags>( flags | InstanceofStub::kReturnTrueFalseObject); - InstanceofStub stub(flags); + InstanceofStub stub(isolate(), flags); // Get the temp register reserved by the instruction. This needs to be a // register which is pushed last by PushSafepointRegisters as top of the // stack is used to pass the offset to the location of the map check to // the stub. Register temp = ToRegister(instr->temp()); - ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0); + DCHECK(MacroAssembler::SafepointRegisterStackIndex(temp) == 0); __ LoadHeapObject(InstanceofStub::right(), instr->function()); static const int kAdditionalDelta = 13; int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; __ mov(temp, Immediate(delta)); __ StoreToSafepointRegisterSlot(temp, temp); - CallCodeGeneric(stub.GetCode(isolate()), + CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); @@ -3077,7 +2763,7 @@ __ SmiUntag(reg); Register return_addr_reg = reg.is(ecx) ? ebx : ecx; if (dynamic_frame_alignment && FLAG_debug_code) { - ASSERT(extra_value_count == 2); + DCHECK(extra_value_count == 2); __ cmp(Operand(esp, reg, times_pointer_size, extra_value_count * kPointerSize), Immediate(kAlignmentZapValue)); @@ -3106,9 +2792,7 @@ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ CallRuntime(Runtime::kTraceExit, 1); } - if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { - RestoreCallerDoubles(); - } + if (info()->saves_caller_doubles()) RestoreCallerDoubles(); if (dynamic_frame_alignment_) { // Fetch the state of the dynamic frame alignment. __ mov(edx, Operand(ebp, @@ -3147,11 +2831,20 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->global_object()).is(edx)); - ASSERT(ToRegister(instr->result()).is(eax)); - - __ mov(ecx, instr->name()); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(eax)); + + __ mov(LoadIC::NameRegister(), instr->name()); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ mov(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(eax)); + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(instr->hydrogen()->slot()))); + } ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); CallCode(ic, RelocInfo::CODE_TARGET, instr); @@ -3215,7 +2908,7 @@ __ mov(target, value); if (instr->hydrogen()->NeedsWriteBarrier()) { SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; Register temp = ToRegister(instr->temp()); int offset = Context::SlotOffset(instr->slot_index()); @@ -3223,7 +2916,7 @@ offset, value, temp, - GetSaveFPRegsMode(), + kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed); } @@ -3248,13 +2941,8 @@ Register object = ToRegister(instr->object()); if (instr->hydrogen()->representation().IsDouble()) { - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister result = ToDoubleRegister(instr->result()); - __ movsd(result, FieldOperand(object, offset)); - } else { - X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); - } + XMMRegister result = ToDoubleRegister(instr->result()); + __ movsd(result, FieldOperand(object, offset)); return; } @@ -3268,7 +2956,7 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { - ASSERT(!operand->IsDoubleRegister()); + DCHECK(!operand->IsDoubleRegister()); if (operand->IsConstantOperand()) { Handle<Object> object = ToHandle(LConstantOperand::cast(operand)); AllowDeferredHandleDereference smi_check; @@ -3286,11 +2974,20 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->object()).is(edx)); - ASSERT(ToRegister(instr->result()).is(eax)); - - __ mov(ecx, instr->name()); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(eax)); + + __ mov(LoadIC::NameRegister(), instr->name()); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ mov(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(eax)); + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(instr->hydrogen()->slot()))); + } Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -3301,16 +2998,6 @@ Register temp = ToRegister(instr->temp()); Register result = ToRegister(instr->result()); - // Check that the function really is a function. - __ CmpObjectType(function, JS_FUNCTION_TYPE, result); - DeoptimizeIf(not_equal, instr->environment()); - - // Check whether the function has an instance prototype. - Label non_instance; - __ test_b(FieldOperand(result, Map::kBitFieldOffset), - 1 << Map::kHasNonInstancePrototype); - __ j(not_zero, &non_instance, Label::kNear); - // Get the prototype or initial map from the function. __ mov(result, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); @@ -3326,12 +3013,6 @@ // Get the prototype from the initial map. __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); - __ jmp(&done, Label::kNear); - - // Non-instance prototype: Fetch prototype from constructor field - // in the function's map. - __ bind(&non_instance); - __ mov(result, FieldOperand(result, Map::kConstructorOffset)); // All done. __ bind(&done); @@ -3377,26 +3058,15 @@ key, instr->hydrogen()->key()->representation(), elements_kind, - 0, - instr->additional_index())); + instr->base_offset())); if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || elements_kind == FLOAT32_ELEMENTS) { - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister result(ToDoubleRegister(instr->result())); - __ movss(result, operand); - __ cvtss2sd(result, result); - } else { - X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); - } + XMMRegister result(ToDoubleRegister(instr->result())); + __ movss(result, operand); + __ cvtss2sd(result, result); } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - __ movsd(ToDoubleRegister(instr->result()), operand); - } else { - X87Mov(ToX87Register(instr->result()), operand); - } + __ movsd(ToDoubleRegister(instr->result()), operand); } else { Register result(ToRegister(instr->result())); switch (elements_kind) { @@ -3451,14 +3121,11 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { if (instr->hydrogen()->RequiresHoleCheck()) { - int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + - sizeof(kHoleNanLower32); Operand hole_check_operand = BuildFastArrayOperand( instr->elements(), instr->key(), instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, - offset, - instr->additional_index()); + instr->base_offset() + sizeof(kHoleNanLower32)); __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); DeoptimizeIf(equal, instr->environment()); } @@ -3468,15 +3135,9 @@ instr->key(), instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister result = ToDoubleRegister(instr->result()); - __ movsd(result, double_load_operand); - } else { - X87Mov(ToX87Register(instr->result()), double_load_operand); - } + instr->base_offset()); + XMMRegister result = ToDoubleRegister(instr->result()); + __ movsd(result, double_load_operand); } @@ -3489,8 +3150,7 @@ instr->key(), instr->hydrogen()->key()->representation(), FAST_ELEMENTS, - FixedArray::kHeaderSize - kHeapObjectTag, - instr->additional_index())); + instr->base_offset())); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { @@ -3521,13 +3181,9 @@ LOperand* key, Representation key_representation, ElementsKind elements_kind, - uint32_t offset, - uint32_t additional_index) { + uint32_t base_offset) { Register elements_pointer_reg = ToRegister(elements_pointer); int element_shift_size = ElementsKindToShiftSize(elements_kind); - if (IsFixedTypedArrayElementsKind(elements_kind)) { - offset += FixedTypedArrayBase::kDataOffset - kHeapObjectTag; - } int shift_size = element_shift_size; if (key->IsConstantOperand()) { int constant_value = ToInteger32(LConstantOperand::cast(key)); @@ -3535,8 +3191,8 @@ Abort(kArrayIndexConstantValueTooBig); } return Operand(elements_pointer_reg, - ((constant_value + additional_index) << shift_size) - + offset); + ((constant_value) << shift_size) + + base_offset); } else { // Take the tag bit into account while computing the shift size. if (key_representation.IsSmi() && (shift_size >= 1)) { @@ -3546,15 +3202,25 @@ return Operand(elements_pointer_reg, ToRegister(key), scale_factor, - offset + (additional_index << element_shift_size)); + base_offset); } } void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->object()).is(edx)); - ASSERT(ToRegister(instr->key()).is(ecx)); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister())); + + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ mov(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(eax)); + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(instr->hydrogen()->slot()))); + } Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallCode(ic, RelocInfo::CODE_TARGET, instr); @@ -3655,8 +3321,8 @@ __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset)); const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); __ mov(receiver, Operand(receiver, global_offset)); - const int receiver_offset = GlobalObject::kGlobalReceiverOffset; - __ mov(receiver, FieldOperand(receiver, receiver_offset)); + const int proxy_offset = GlobalObject::kGlobalProxyOffset; + __ mov(receiver, FieldOperand(receiver, proxy_offset)); __ bind(&receiver_ok); } @@ -3666,9 +3332,9 @@ Register function = ToRegister(instr->function()); Register length = ToRegister(instr->length()); Register elements = ToRegister(instr->elements()); - ASSERT(receiver.is(eax)); // Used for parameter count. - ASSERT(function.is(edi)); // Required by InvokeFunction. - ASSERT(ToRegister(instr->result()).is(eax)); + DCHECK(receiver.is(eax)); // Used for parameter count. + DCHECK(function.is(edi)); // Required by InvokeFunction. + DCHECK(ToRegister(instr->result()).is(eax)); // Copy the arguments to this function possibly from the // adaptor frame below it. @@ -3692,7 +3358,7 @@ // Invoke the function. __ bind(&invoke); - ASSERT(instr->HasPointerMap()); + DCHECK(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator safepoint_generator( this, pointers, Safepoint::kLazyDeopt); @@ -3729,17 +3395,17 @@ __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); } else { // If there is no frame, the context must be in esi. - ASSERT(result.is(esi)); + DCHECK(result.is(esi)); } } void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->context()).is(esi)); __ push(esi); // The context is the first argument. __ push(Immediate(instr->hydrogen()->pairs())); __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags()))); - CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); } @@ -3787,7 +3453,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - ASSERT(ToRegister(instr->result()).is(eax)); + DCHECK(ToRegister(instr->result()).is(eax)); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); @@ -3798,7 +3464,7 @@ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); __ call(code, RelocInfo::CODE_TARGET); } else { - ASSERT(instr->target()->IsRegister()); + DCHECK(instr->target()->IsRegister()); Register target = ToRegister(instr->target()); generator.BeforeCall(__ CallSize(Operand(target))); __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); @@ -3809,8 +3475,8 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { - ASSERT(ToRegister(instr->function()).is(edi)); - ASSERT(ToRegister(instr->result()).is(eax)); + DCHECK(ToRegister(instr->function()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); if (instr->hydrogen()->pass_argument_count()) { __ mov(eax, instr->arity()); @@ -3863,7 +3529,7 @@ // Slow case: Call the runtime system to do the number allocation. __ bind(&slow); - CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, + CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, instr->context()); // Set the pointer to the new heap number in tmp. if (!tmp.is(eax)) __ mov(tmp, eax); @@ -3898,9 +3564,8 @@ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { public: DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, - LMathAbs* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } + LMathAbs* instr) + : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } @@ -3909,10 +3574,9 @@ LMathAbs* instr_; }; - ASSERT(instr->value()->Equals(instr->result())); + DCHECK(instr->value()->Equals(instr->result())); Representation r = instr->hydrogen()->value()->representation(); - CpuFeatureScope scope(masm(), SSE2); if (r.IsDouble()) { XMMRegister scratch = double_scratch0(); XMMRegister input_reg = ToDoubleRegister(instr->value()); @@ -3923,7 +3587,7 @@ EmitIntegerMathAbs(instr); } else { // Tagged case. DeferredMathAbsTaggedHeapNumber* deferred = - new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); + new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); Register input_reg = ToRegister(instr->value()); // Smi check. __ JumpIfNotSmi(input_reg, deferred->entry()); @@ -3934,7 +3598,6 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { - CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = double_scratch0(); Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); @@ -4000,7 +3663,6 @@ void LCodeGen::DoMathRound(LMathRound* instr) { - CpuFeatureScope scope(masm(), SSE2); Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = double_scratch0(); @@ -4062,20 +3724,26 @@ } -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { - CpuFeatureScope scope(masm(), SSE2); +void LCodeGen::DoMathFround(LMathFround* instr) { XMMRegister input_reg = ToDoubleRegister(instr->value()); - ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); - __ sqrtsd(input_reg, input_reg); + XMMRegister output_reg = ToDoubleRegister(instr->result()); + __ cvtsd2ss(output_reg, input_reg); + __ cvtss2sd(output_reg, output_reg); +} + + +void LCodeGen::DoMathSqrt(LMathSqrt* instr) { + Operand input = ToOperand(instr->value()); + XMMRegister output = ToDoubleRegister(instr->result()); + __ sqrtsd(output, input); } void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = double_scratch0(); XMMRegister input_reg = ToDoubleRegister(instr->value()); Register scratch = ToRegister(instr->temp()); - ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); + DCHECK(ToDoubleRegister(instr->result()).is(input_reg)); // Note that according to ECMA-262 15.8.2.13: // Math.pow(-Infinity, 0.5) == Infinity @@ -4109,15 +3777,15 @@ Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. - ASSERT(!instr->right()->IsDoubleRegister() || + DCHECK(!instr->right()->IsDoubleRegister() || ToDoubleRegister(instr->right()).is(xmm1)); - ASSERT(!instr->right()->IsRegister() || + DCHECK(!instr->right()->IsRegister() || ToRegister(instr->right()).is(eax)); - ASSERT(ToDoubleRegister(instr->left()).is(xmm2)); - ASSERT(ToDoubleRegister(instr->result()).is(xmm3)); + DCHECK(ToDoubleRegister(instr->left()).is(xmm2)); + DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); if (exponent_type.IsSmi()) { - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsTagged()) { Label no_deopt; @@ -4125,22 +3793,21 @@ __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); DeoptimizeIf(not_equal, instr->environment()); __ bind(&no_deopt); - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsInteger32()) { - MathPowStub stub(MathPowStub::INTEGER); + MathPowStub stub(isolate(), MathPowStub::INTEGER); __ CallStub(&stub); } else { - ASSERT(exponent_type.IsDouble()); - MathPowStub stub(MathPowStub::DOUBLE); + DCHECK(exponent_type.IsDouble()); + MathPowStub stub(isolate(), MathPowStub::DOUBLE); __ CallStub(&stub); } } void LCodeGen::DoMathLog(LMathLog* instr) { - CpuFeatureScope scope(masm(), SSE2); - ASSERT(instr->value()->Equals(instr->result())); + DCHECK(instr->value()->Equals(instr->result())); XMMRegister input_reg = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = double_scratch0(); Label positive, done, zero; @@ -4171,7 +3838,6 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) { - CpuFeatureScope scope(masm(), SSE2); Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); Label not_zero_input; @@ -4186,7 +3852,6 @@ void LCodeGen::DoMathExp(LMathExp* instr) { - CpuFeatureScope scope(masm(), SSE2); XMMRegister input = ToDoubleRegister(instr->value()); XMMRegister result = ToDoubleRegister(instr->result()); XMMRegister temp0 = double_scratch0(); @@ -4198,9 +3863,9 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->function()).is(edi)); - ASSERT(instr->HasPointerMap()); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->function()).is(edi)); + DCHECK(instr->HasPointerMap()); Handle<JSFunction> known_function = instr->hydrogen()->known_function(); if (known_function.is_null()) { @@ -4220,33 +3885,33 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->function()).is(edi)); - ASSERT(ToRegister(instr->result()).is(eax)); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->function()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); int arity = instr->arity(); - CallFunctionStub stub(arity, instr->hydrogen()->function_flags()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoCallNew(LCallNew* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->constructor()).is(edi)); - ASSERT(ToRegister(instr->result()).is(eax)); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->constructor()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); // No cell in ebx for construct type feedback in optimized code __ mov(ebx, isolate()->factory()->undefined_value()); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); __ Move(eax, Immediate(instr->arity())); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->constructor()).is(edi)); - ASSERT(ToRegister(instr->result()).is(eax)); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->constructor()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); __ Move(eax, Immediate(instr->arity())); __ mov(ebx, isolate()->factory()->undefined_value()); @@ -4257,8 +3922,8 @@ : DONT_OVERRIDE; if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } else if (instr->arity() == 1) { Label done; if (IsFastPackedElementsKind(kind)) { @@ -4270,24 +3935,26 @@ __ j(zero, &packed_case, Label::kNear); ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(holey_kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), + holey_kind, + override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ jmp(&done, Label::kNear); __ bind(&packed_case); } - ArraySingleArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ bind(&done); } else { - ArrayNArgumentsConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } } void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->context()).is(esi)); CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); } @@ -4314,13 +3981,13 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { - Representation representation = instr->representation(); + Representation representation = instr->hydrogen()->field_representation(); HObjectAccess access = instr->hydrogen()->access(); int offset = access.offset(); if (access.IsExternalMemory()) { - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); MemOperand operand = instr->object()->IsConstantOperand() ? MemOperand::StaticVariable( ToExternalReference(LConstantOperand::cast(instr->object()))) @@ -4336,61 +4003,29 @@ } Register object = ToRegister(instr->object()); - Handle<Map> transition = instr->transition(); - SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - - ASSERT(!(representation.IsSmi() && - instr->value()->IsConstantOperand() && - !IsSmi(LConstantOperand::cast(instr->value())))); - if (representation.IsHeapObject()) { - if (instr->value()->IsConstantOperand()) { - LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); - if (chunk_->LookupConstant(operand_value)->HasSmiValue()) { - DeoptimizeIf(no_condition, instr->environment()); - } - } else { - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - Register value = ToRegister(instr->value()); - __ test(value, Immediate(kSmiTagMask)); - DeoptimizeIf(zero, instr->environment()); + __ AssertNotSmi(object); - // We know that value is a smi now, so we can omit the check below. - check_needed = OMIT_SMI_CHECK; - } - } - } else if (representation.IsDouble()) { - ASSERT(transition.is_null()); - ASSERT(access.IsInobject()); - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister value = ToDoubleRegister(instr->value()); - __ movsd(FieldOperand(object, offset), value); - } else { - X87Register value = ToX87Register(instr->value()); - X87Mov(FieldOperand(object, offset), value); - } + DCHECK(!representation.IsSmi() || + !instr->value()->IsConstantOperand() || + IsSmi(LConstantOperand::cast(instr->value()))); + if (representation.IsDouble()) { + DCHECK(access.IsInobject()); + DCHECK(!instr->hydrogen()->has_transition()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + XMMRegister value = ToDoubleRegister(instr->value()); + __ movsd(FieldOperand(object, offset), value); return; } - if (!transition.is_null()) { - if (!instr->hydrogen()->NeedsWriteBarrierForMap()) { - __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); - } else { + if (instr->hydrogen()->has_transition()) { + Handle<Map> transition = instr->hydrogen()->transition_map(); + AddDeprecationDependency(transition); + __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); + if (instr->hydrogen()->NeedsWriteBarrierForMap()) { Register temp = ToRegister(instr->temp()); Register temp_map = ToRegister(instr->temp_map()); - __ mov(temp_map, transition); - __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); // Update the write barrier for the map field. - __ RecordWriteField(object, - HeapObject::kMapOffset, - temp_map, - temp, - GetSaveFPRegsMode(), - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs); } } @@ -4409,11 +4044,11 @@ __ Store(value, operand, representation); } else if (representation.IsInteger32()) { Immediate immediate = ToImmediate(operand_value, representation); - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); __ mov(operand, immediate); } else { Handle<Object> handle_value = ToHandle(operand_value); - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); __ mov(operand, handle_value); } } else { @@ -4429,52 +4064,46 @@ offset, value, temp, - GetSaveFPRegsMode(), + kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + instr->hydrogen()->SmiCheckForWriteBarrier(), + instr->hydrogen()->PointersToHereCheckForValue()); } } void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->object()).is(edx)); - ASSERT(ToRegister(instr->value()).is(eax)); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister())); - __ mov(ecx, instr->name()); + __ mov(StoreIC::NameRegister(), instr->name()); Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); CallCode(ic, RelocInfo::CODE_TARGET, instr); } -void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { - if (FLAG_debug_code && check->hydrogen()->skip_check()) { +void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { + Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal; + if (instr->index()->IsConstantOperand()) { + __ cmp(ToOperand(instr->length()), + ToImmediate(LConstantOperand::cast(instr->index()), + instr->hydrogen()->length()->representation())); + cc = CommuteCondition(cc); + } else if (instr->length()->IsConstantOperand()) { + __ cmp(ToOperand(instr->index()), + ToImmediate(LConstantOperand::cast(instr->length()), + instr->hydrogen()->index()->representation())); + } else { + __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); + } + if (FLAG_debug_code && instr->hydrogen()->skip_check()) { Label done; __ j(NegateCondition(cc), &done, Label::kNear); __ int3(); __ bind(&done); } else { - DeoptimizeIf(cc, check->environment()); - } -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return; - - if (instr->index()->IsConstantOperand()) { - Immediate immediate = - ToImmediate(LConstantOperand::cast(instr->index()), - instr->hydrogen()->length()->representation()); - __ cmp(ToOperand(instr->length()), immediate); - Condition condition = - instr->hydrogen()->allow_equality() ? below : below_equal; - ApplyCheckIf(condition, instr); - } else { - __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); - Condition condition = - instr->hydrogen()->allow_equality() ? above : above_equal; - ApplyCheckIf(condition, instr); + DeoptimizeIf(cc, instr->environment()); } } @@ -4492,27 +4121,15 @@ key, instr->hydrogen()->key()->representation(), elements_kind, - 0, - instr->additional_index())); + instr->base_offset())); if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || elements_kind == FLOAT32_ELEMENTS) { - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister xmm_scratch = double_scratch0(); - __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); - __ movss(operand, xmm_scratch); - } else { - __ fld(0); - __ fstp_s(operand); - } + XMMRegister xmm_scratch = double_scratch0(); + __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); + __ movss(operand, xmm_scratch); } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - __ movsd(operand, ToDoubleRegister(instr->value())); - } else { - X87Mov(operand, ToX87Register(instr->value())); - } + __ movsd(operand, ToDoubleRegister(instr->value())); } else { Register value = ToRegister(instr->value()); switch (elements_kind) { @@ -4563,71 +4180,21 @@ instr->key(), instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); - - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister value = ToDoubleRegister(instr->value()); + instr->base_offset()); - if (instr->NeedsCanonicalization()) { - Label have_value; + XMMRegister value = ToDoubleRegister(instr->value()); - __ ucomisd(value, value); - __ j(parity_odd, &have_value, Label::kNear); // NaN. + if (instr->NeedsCanonicalization()) { + Label have_value; - __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); - __ bind(&have_value); - } - - __ movsd(double_store_operand, value); - } else { - // Can't use SSE2 in the serializer - if (instr->hydrogen()->IsConstantHoleStore()) { - // This means we should store the (double) hole. No floating point - // registers required. - double nan_double = FixedDoubleArray::hole_nan_as_double(); - uint64_t int_val = BitCast<uint64_t, double>(nan_double); - int32_t lower = static_cast<int32_t>(int_val); - int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); - - __ mov(double_store_operand, Immediate(lower)); - Operand double_store_operand2 = BuildFastArrayOperand( - instr->elements(), - instr->key(), - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize, - instr->additional_index()); - __ mov(double_store_operand2, Immediate(upper)); - } else { - Label no_special_nan_handling; - X87Register value = ToX87Register(instr->value()); - X87Fxch(value); - - if (instr->NeedsCanonicalization()) { - __ fld(0); - __ fld(0); - __ FCmp(); - - __ j(parity_odd, &no_special_nan_handling, Label::kNear); - __ sub(esp, Immediate(kDoubleSize)); - __ fst_d(MemOperand(esp, 0)); - __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), - Immediate(kHoleNanUpper32)); - __ add(esp, Immediate(kDoubleSize)); - Label canonicalize; - __ j(not_equal, &canonicalize, Label::kNear); - __ jmp(&no_special_nan_handling, Label::kNear); - __ bind(&canonicalize); - __ fstp(0); - __ fld_d(Operand::StaticVariable(canonical_nan_reference)); - } + __ ucomisd(value, value); + __ j(parity_odd, &have_value, Label::kNear); // NaN. - __ bind(&no_special_nan_handling); - __ fst_d(double_store_operand); - } + __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); + __ bind(&have_value); } + + __ movsd(double_store_operand, value); } @@ -4640,8 +4207,7 @@ instr->key(), instr->hydrogen()->key()->representation(), FAST_ELEMENTS, - FixedArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); + instr->base_offset()); if (instr->value()->IsRegister()) { __ mov(operand, ToRegister(instr->value())); } else { @@ -4650,27 +4216,28 @@ Immediate immediate = ToImmediate(operand_value, Representation::Smi()); __ mov(operand, immediate); } else { - ASSERT(!IsInteger32(operand_value)); + DCHECK(!IsInteger32(operand_value)); Handle<Object> handle_value = ToHandle(operand_value); __ mov(operand, handle_value); } } if (instr->hydrogen()->NeedsWriteBarrier()) { - ASSERT(instr->value()->IsRegister()); + DCHECK(instr->value()->IsRegister()); Register value = ToRegister(instr->value()); - ASSERT(!instr->key()->IsConstantOperand()); + DCHECK(!instr->key()->IsConstantOperand()); SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. __ lea(key, operand); __ RecordWrite(elements, key, value, - GetSaveFPRegsMode(), + kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + check_needed, + instr->hydrogen()->PointersToHereCheckForValue()); } } @@ -4688,10 +4255,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->object()).is(edx)); - ASSERT(ToRegister(instr->key()).is(ecx)); - ASSERT(ToRegister(instr->value()).is(eax)); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); + DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); Handle<Code> ic = instr->strict_mode() == STRICT ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() @@ -4730,22 +4297,20 @@ __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), Immediate(to_map)); // Write barrier. - ASSERT_NE(instr->temp(), NULL); + DCHECK_NE(instr->temp(), NULL); __ RecordWriteForMap(object_reg, to_map, new_map_reg, ToRegister(instr->temp()), kDontSaveFPRegs); } else { - ASSERT(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(object_reg.is(eax)); PushSafepointRegistersScope scope(this); - if (!object_reg.is(eax)) { - __ mov(eax, object_reg); - } __ mov(ebx, to_map); bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; - TransitionElementsKindStub stub(from_kind, to_kind, is_js_array); + TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); __ CallStub(&stub); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + RecordSafepointWithLazyDeopt(instr, + RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); } __ bind(¬_applicable); } @@ -4755,9 +4320,8 @@ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { public: DeferredStringCharCodeAt(LCodeGen* codegen, - LStringCharCodeAt* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } + LStringCharCodeAt* instr) + : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); } @@ -4767,7 +4331,7 @@ }; DeferredStringCharCodeAt* deferred = - new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); + new(zone()) DeferredStringCharCodeAt(this, instr); StringCharLoadGenerator::Generate(masm(), factory(), @@ -4802,7 +4366,7 @@ __ SmiTag(index); __ push(index); } - CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, + CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, instr->context()); __ AssertSmi(eax); __ SmiUntag(eax); @@ -4814,9 +4378,8 @@ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { public: DeferredStringCharFromCode(LCodeGen* codegen, - LStringCharFromCode* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } + LStringCharFromCode* instr) + : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredStringCharFromCode(instr_); } @@ -4826,12 +4389,12 @@ }; DeferredStringCharFromCode* deferred = - new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); + new(zone()) DeferredStringCharFromCode(this, instr); - ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); + DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); Register char_code = ToRegister(instr->char_code()); Register result = ToRegister(instr->result()); - ASSERT(!char_code.is(result)); + DCHECK(!char_code.is(result)); __ cmp(char_code, String::kMaxOneByteCharCode); __ j(above, deferred->entry()); @@ -4863,50 +4426,29 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); - ASSERT(ToRegister(instr->left()).is(edx)); - ASSERT(ToRegister(instr->right()).is(eax)); - StringAddStub stub(instr->hydrogen()->flags(), + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->left()).is(edx)); + DCHECK(ToRegister(instr->right()).is(eax)); + StringAddStub stub(isolate(), + instr->hydrogen()->flags(), instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { LOperand* input = instr->value(); LOperand* output = instr->result(); - ASSERT(input->IsRegister() || input->IsStackSlot()); - ASSERT(output->IsDoubleRegister()); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); - } else if (input->IsRegister()) { - Register input_reg = ToRegister(input); - __ push(input_reg); - X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); - __ pop(input_reg); - } else { - X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); - } + DCHECK(input->IsRegister() || input->IsStackSlot()); + DCHECK(output->IsDoubleRegister()); + __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); } void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { LOperand* input = instr->value(); LOperand* output = instr->result(); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - LOperand* temp = instr->temp(); - - __ LoadUint32(ToDoubleRegister(output), - ToRegister(input), - ToDoubleRegister(temp)); - } else { - X87Register res = ToX87Register(output); - X87PrepareToWrite(res); - __ LoadUint32NoSSE2(ToRegister(input)); - X87CommitWrite(res); - } + __ LoadUint32(ToDoubleRegister(output), ToRegister(input)); } @@ -4914,12 +4456,11 @@ class DeferredNumberTagI V8_FINAL : public LDeferredCode { public: DeferredNumberTagI(LCodeGen* codegen, - LNumberTagI* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } + LNumberTagI* instr) + : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), - NULL, SIGNED_INT32); + codegen()->DoDeferredNumberTagIU( + instr_, instr_->value(), instr_->temp(), SIGNED_INT32); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: @@ -4927,11 +4468,11 @@ }; LOperand* input = instr->value(); - ASSERT(input->IsRegister() && input->Equals(instr->result())); + DCHECK(input->IsRegister() && input->Equals(instr->result())); Register reg = ToRegister(input); DeferredNumberTagI* deferred = - new(zone()) DeferredNumberTagI(this, instr, x87_stack_); + new(zone()) DeferredNumberTagI(this, instr); __ SmiTag(reg); __ j(overflow, deferred->entry()); __ bind(deferred->exit()); @@ -4941,13 +4482,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { class DeferredNumberTagU V8_FINAL : public LDeferredCode { public: - DeferredNumberTagU(LCodeGen* codegen, - LNumberTagU* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } + DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) + : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), - instr_->temp2(), UNSIGNED_INT32); + codegen()->DoDeferredNumberTagIU( + instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: @@ -4955,11 +4494,11 @@ }; LOperand* input = instr->value(); - ASSERT(input->IsRegister() && input->Equals(instr->result())); + DCHECK(input->IsRegister() && input->Equals(instr->result())); Register reg = ToRegister(input); DeferredNumberTagU* deferred = - new(zone()) DeferredNumberTagU(this, instr, x87_stack_); + new(zone()) DeferredNumberTagU(this, instr); __ cmp(reg, Immediate(Smi::kMaxValue)); __ j(above, deferred->entry()); __ SmiTag(reg); @@ -4969,12 +4508,11 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value, - LOperand* temp1, - LOperand* temp2, + LOperand* temp, IntegerSignedness signedness) { Label done, slow; Register reg = ToRegister(value); - Register tmp = ToRegister(temp1); + Register tmp = ToRegister(temp); XMMRegister xmm_scratch = double_scratch0(); if (signedness == SIGNED_INT32) { @@ -4983,27 +4521,9 @@ // the value in there. If that fails, call the runtime system. __ SmiUntag(reg); __ xor_(reg, 0x80000000); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope feature_scope(masm(), SSE2); - __ Cvtsi2sd(xmm_scratch, Operand(reg)); - } else { - __ push(reg); - __ fild_s(Operand(esp, 0)); - __ pop(reg); - } + __ Cvtsi2sd(xmm_scratch, Operand(reg)); } else { - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope feature_scope(masm(), SSE2); - __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2)); - } else { - // There's no fild variant for unsigned values, so zero-extend to a 64-bit - // int manually. - __ push(Immediate(0)); - __ push(reg); - __ fild_d(Operand(esp, 0)); - __ pop(reg); - __ pop(reg); - } + __ LoadUint32(xmm_scratch, reg); } if (FLAG_inline_new) { @@ -5024,11 +4544,11 @@ // NumberTagI and NumberTagD use the context from the frame, rather than // the environment's HContext or HInlinedContext value. - // They only call Runtime::kHiddenAllocateHeapNumber. + // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(reg, eax); @@ -5037,22 +4557,15 @@ // Done. Put the value in xmm_scratch into the value of the allocated heap // number. __ bind(&done); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope feature_scope(masm(), SSE2); - __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); - } else { - __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); - } + __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); } void LCodeGen::DoNumberTagD(LNumberTagD* instr) { class DeferredNumberTagD V8_FINAL : public LDeferredCode { public: - DeferredNumberTagD(LCodeGen* codegen, - LNumberTagD* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } + DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) + : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); } @@ -5063,15 +4576,8 @@ Register reg = ToRegister(instr->result()); - bool use_sse2 = CpuFeatures::IsSupported(SSE2); - if (!use_sse2) { - // Put the value to the top of stack - X87Register src = ToX87Register(instr->value()); - X87LoadForUsage(src); - } - DeferredNumberTagD* deferred = - new(zone()) DeferredNumberTagD(this, instr, x87_stack_); + new(zone()) DeferredNumberTagD(this, instr); if (FLAG_inline_new) { Register tmp = ToRegister(instr->temp()); __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); @@ -5079,13 +4585,8 @@ __ jmp(deferred->entry()); } __ bind(deferred->exit()); - if (use_sse2) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister input_reg = ToDoubleRegister(instr->value()); - __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); - } else { - __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); - } + XMMRegister input_reg = ToDoubleRegister(instr->value()); + __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); } @@ -5099,11 +4600,11 @@ PushSafepointRegistersScope scope(this); // NumberTagI and NumberTagD use the context from the frame, rather than // the environment's HContext or HInlinedContext value. - // They only call Runtime::kHiddenAllocateHeapNumber. + // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(reg, eax); @@ -5129,7 +4630,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { LOperand* input = instr->value(); Register result = ToRegister(input); - ASSERT(input->IsRegister() && input->Equals(instr->result())); + DCHECK(input->IsRegister() && input->Equals(instr->result())); if (instr->needs_check()) { __ test(result, Immediate(kSmiTagMask)); DeoptimizeIf(not_zero, instr->environment()); @@ -5140,76 +4641,6 @@ } -void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, - Register temp_reg, - X87Register res_reg, - bool can_convert_undefined_to_nan, - bool deoptimize_on_minus_zero, - LEnvironment* env, - NumberUntagDMode mode) { - Label load_smi, done; - - X87PrepareToWrite(res_reg); - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { - // Smi check. - __ JumpIfSmi(input_reg, &load_smi, Label::kNear); - - // Heap number map check. - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - if (!can_convert_undefined_to_nan) { - DeoptimizeIf(not_equal, env); - } else { - Label heap_number, convert; - __ j(equal, &heap_number, Label::kNear); - - // Convert undefined (or hole) to NaN. - __ cmp(input_reg, factory()->undefined_value()); - DeoptimizeIf(not_equal, env); - - __ bind(&convert); - ExternalReference nan = - ExternalReference::address_of_canonical_non_hole_nan(); - __ fld_d(Operand::StaticVariable(nan)); - __ jmp(&done, Label::kNear); - - __ bind(&heap_number); - } - // Heap number to x87 conversion. - __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); - if (deoptimize_on_minus_zero) { - __ fldz(); - __ FCmp(); - __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ j(not_zero, &done, Label::kNear); - - // Use general purpose registers to check if we have -0.0 - __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); - __ test(temp_reg, Immediate(HeapNumber::kSignMask)); - __ j(zero, &done, Label::kNear); - - // Pop FPU stack before deoptimizing. - __ fstp(0); - DeoptimizeIf(not_zero, env); - } - __ jmp(&done, Label::kNear); - } else { - ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); - } - - __ bind(&load_smi); - // Clobbering a temp is faster than re-tagging the - // input register since we avoid dependencies. - __ mov(temp_reg, input_reg); - __ SmiUntag(temp_reg); // Untag smi before converting to float. - __ push(temp_reg); - __ fild_s(Operand(esp, 0)); - __ add(esp, Immediate(kPointerSize)); - __ bind(&done); - X87CommitWrite(res_reg); -} - - void LCodeGen::EmitNumberUntagD(Register input_reg, Register temp_reg, XMMRegister result_reg, @@ -5259,7 +4690,7 @@ __ jmp(&done, Label::kNear); } } else { - ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); + DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); } __ bind(&load_smi); @@ -5325,10 +4756,8 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { class DeferredTaggedToI V8_FINAL : public LDeferredCode { public: - DeferredTaggedToI(LCodeGen* codegen, - LTaggedToI* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } + DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) + : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredTaggedToI(instr_, done()); } @@ -5338,15 +4767,15 @@ }; LOperand* input = instr->value(); - ASSERT(input->IsRegister()); + DCHECK(input->IsRegister()); Register input_reg = ToRegister(input); - ASSERT(input_reg.is(ToRegister(instr->result()))); + DCHECK(input_reg.is(ToRegister(instr->result()))); if (instr->hydrogen()->value()->representation().IsSmi()) { __ SmiUntag(input_reg); } else { DeferredTaggedToI* deferred = - new(zone()) DeferredTaggedToI(this, instr, x87_stack_); + new(zone()) DeferredTaggedToI(this, instr); // Optimistically untag the input. // If the input is a HeapObject, SmiUntag will set the carry flag. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); @@ -5361,11 +4790,11 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { LOperand* input = instr->value(); - ASSERT(input->IsRegister()); + DCHECK(input->IsRegister()); LOperand* temp = instr->temp(); - ASSERT(temp->IsRegister()); + DCHECK(temp->IsRegister()); LOperand* result = instr->result(); - ASSERT(result->IsDoubleRegister()); + DCHECK(result->IsDoubleRegister()); Register input_reg = ToRegister(input); bool deoptimize_on_minus_zero = @@ -5376,59 +4805,33 @@ NumberUntagDMode mode = value->representation().IsSmi() ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister result_reg = ToDoubleRegister(result); - EmitNumberUntagD(input_reg, - temp_reg, - result_reg, - instr->hydrogen()->can_convert_undefined_to_nan(), - deoptimize_on_minus_zero, - instr->environment(), - mode); - } else { - EmitNumberUntagDNoSSE2(input_reg, - temp_reg, - ToX87Register(instr->result()), - instr->hydrogen()->can_convert_undefined_to_nan(), - deoptimize_on_minus_zero, - instr->environment(), - mode); - } + XMMRegister result_reg = ToDoubleRegister(result); + EmitNumberUntagD(input_reg, + temp_reg, + result_reg, + instr->hydrogen()->can_convert_undefined_to_nan(), + deoptimize_on_minus_zero, + instr->environment(), + mode); } void LCodeGen::DoDoubleToI(LDoubleToI* instr) { LOperand* input = instr->value(); - ASSERT(input->IsDoubleRegister()); + DCHECK(input->IsDoubleRegister()); LOperand* result = instr->result(); - ASSERT(result->IsRegister()); + DCHECK(result->IsRegister()); Register result_reg = ToRegister(result); if (instr->truncating()) { - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister input_reg = ToDoubleRegister(input); - __ TruncateDoubleToI(result_reg, input_reg); - } else { - X87Register input_reg = ToX87Register(input); - X87Fxch(input_reg); - __ TruncateX87TOSToI(result_reg); - } + XMMRegister input_reg = ToDoubleRegister(input); + __ TruncateDoubleToI(result_reg, input_reg); } else { Label bailout, done; - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister input_reg = ToDoubleRegister(input); - XMMRegister xmm_scratch = double_scratch0(); - __ DoubleToI(result_reg, input_reg, xmm_scratch, - instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); - } else { - X87Register input_reg = ToX87Register(input); - X87Fxch(input_reg); - __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), - &bailout, Label::kNear); - } + XMMRegister input_reg = ToDoubleRegister(input); + XMMRegister xmm_scratch = double_scratch0(); + __ DoubleToI(result_reg, input_reg, xmm_scratch, + instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); __ jmp(&done, Label::kNear); __ bind(&bailout); DeoptimizeIf(no_condition, instr->environment()); @@ -5439,24 +4842,16 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { LOperand* input = instr->value(); - ASSERT(input->IsDoubleRegister()); + DCHECK(input->IsDoubleRegister()); LOperand* result = instr->result(); - ASSERT(result->IsRegister()); + DCHECK(result->IsRegister()); Register result_reg = ToRegister(result); Label bailout, done; - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - CpuFeatureScope scope(masm(), SSE2); - XMMRegister input_reg = ToDoubleRegister(input); - XMMRegister xmm_scratch = double_scratch0(); - __ DoubleToI(result_reg, input_reg, xmm_scratch, - instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); - } else { - X87Register input_reg = ToX87Register(input); - X87Fxch(input_reg); - __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), - &bailout, Label::kNear); - } + XMMRegister input_reg = ToDoubleRegister(input); + XMMRegister xmm_scratch = double_scratch0(); + __ DoubleToI(result_reg, input_reg, xmm_scratch, + instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); __ jmp(&done, Label::kNear); __ bind(&bailout); DeoptimizeIf(no_condition, instr->environment()); @@ -5475,7 +4870,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { LOperand* input = instr->value(); __ test(ToOperand(input), Immediate(kSmiTagMask)); DeoptimizeIf(zero, instr->environment()); @@ -5515,7 +4910,7 @@ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); if (IsPowerOf2(mask)) { - ASSERT(tag == 0 || IsPowerOf2(tag)); + DCHECK(tag == 0 || IsPowerOf2(tag)); __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask); DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment()); } else { @@ -5560,11 +4955,8 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { class DeferredCheckMaps V8_FINAL : public LDeferredCode { public: - DeferredCheckMaps(LCodeGen* codegen, - LCheckMaps* instr, - Register object, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) { + DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) + : LDeferredCode(codegen), instr_(instr), object_(object) { SetExit(check_maps()); } virtual void Generate() V8_OVERRIDE { @@ -5578,29 +4970,35 @@ Register object_; }; - if (instr->hydrogen()->CanOmitMapChecks()) return; + if (instr->hydrogen()->IsStabilityCheck()) { + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + for (int i = 0; i < maps->size(); ++i) { + AddStabilityDependency(maps->at(i).handle()); + } + return; + } LOperand* input = instr->value(); - ASSERT(input->IsRegister()); + DCHECK(input->IsRegister()); Register reg = ToRegister(input); DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->has_migration_target()) { - deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); + if (instr->hydrogen()->HasMigrationTarget()) { + deferred = new(zone()) DeferredCheckMaps(this, instr, reg); __ bind(deferred->check_maps()); } - UniqueSet<Map> map_set = instr->hydrogen()->map_set(); + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); Label success; - for (int i = 0; i < map_set.size() - 1; i++) { - Handle<Map> map = map_set.at(i).handle(); + for (int i = 0; i < maps->size() - 1; i++) { + Handle<Map> map = maps->at(i).handle(); __ CompareMap(reg, map); __ j(equal, &success, Label::kNear); } - Handle<Map> map = map_set.at(map_set.size() - 1).handle(); + Handle<Map> map = maps->at(maps->size() - 1).handle(); __ CompareMap(reg, map); - if (instr->hydrogen()->has_migration_target()) { + if (instr->hydrogen()->HasMigrationTarget()) { __ j(not_equal, deferred->entry()); } else { DeoptimizeIf(not_equal, instr->environment()); @@ -5611,7 +5009,6 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - CpuFeatureScope scope(masm(), SSE2); XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); XMMRegister xmm_scratch = double_scratch0(); Register result_reg = ToRegister(instr->result()); @@ -5620,16 +5017,14 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - ASSERT(instr->unclamped()->Equals(instr->result())); + DCHECK(instr->unclamped()->Equals(instr->result())); Register value_reg = ToRegister(instr->result()); __ ClampUint8(value_reg); } void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - CpuFeatureScope scope(masm(), SSE2); - - ASSERT(instr->unclamped()->Equals(instr->result())); + DCHECK(instr->unclamped()->Equals(instr->result())); Register input_reg = ToRegister(instr->unclamped()); XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); XMMRegister xmm_scratch = double_scratch0(); @@ -5663,130 +5058,7 @@ } -void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { - Register input_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - Register scratch = ToRegister(instr->scratch()); - Register scratch2 = ToRegister(instr->scratch2()); - Register scratch3 = ToRegister(instr->scratch3()); - Label is_smi, done, heap_number, valid_exponent, - largest_value, zero_result, maybe_nan_or_infinity; - - __ JumpIfSmi(input_reg, &is_smi); - - // Check for heap number - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - __ j(equal, &heap_number, Label::kNear); - - // Check for undefined. Undefined is converted to zero for clamping - // conversions. - __ cmp(input_reg, factory()->undefined_value()); - DeoptimizeIf(not_equal, instr->environment()); - __ jmp(&zero_result, Label::kNear); - - // Heap number - __ bind(&heap_number); - - // Surprisingly, all of the hand-crafted bit-manipulations below are much - // faster than the x86 FPU built-in instruction, especially since "banker's - // rounding" would be additionally very expensive - - // Get exponent word. - __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); - __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); - - // Test for negative values --> clamp to zero - __ test(scratch, scratch); - __ j(negative, &zero_result, Label::kNear); - - // Get exponent alone in scratch2. - __ mov(scratch2, scratch); - __ and_(scratch2, HeapNumber::kExponentMask); - __ shr(scratch2, HeapNumber::kExponentShift); - __ j(zero, &zero_result, Label::kNear); - __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); - __ j(negative, &zero_result, Label::kNear); - - const uint32_t non_int8_exponent = 7; - __ cmp(scratch2, Immediate(non_int8_exponent + 1)); - // If the exponent is too big, check for special values. - __ j(greater, &maybe_nan_or_infinity, Label::kNear); - - __ bind(&valid_exponent); - // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent - // < 7. The shift bias is the number of bits to shift the mantissa such that - // with an exponent of 7 such the that top-most one is in bit 30, allowing - // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to - // 1). - int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; - __ lea(result_reg, MemOperand(scratch2, shift_bias)); - // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the - // top bits of the mantissa. - __ and_(scratch, HeapNumber::kMantissaMask); - // Put back the implicit 1 of the mantissa - __ or_(scratch, 1 << HeapNumber::kExponentShift); - // Shift up to round - __ shl_cl(scratch); - // Use "banker's rounding" to spec: If fractional part of number is 0.5, then - // use the bit in the "ones" place and add it to the "halves" place, which has - // the effect of rounding to even. - __ mov(scratch2, scratch); - const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; - const uint32_t one_bit_shift = one_half_bit_shift + 1; - __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); - __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); - Label no_round; - __ j(less, &no_round, Label::kNear); - Label round_up; - __ mov(scratch2, Immediate(1 << one_half_bit_shift)); - __ j(greater, &round_up, Label::kNear); - __ test(scratch3, scratch3); - __ j(not_zero, &round_up, Label::kNear); - __ mov(scratch2, scratch); - __ and_(scratch2, Immediate(1 << one_bit_shift)); - __ shr(scratch2, 1); - __ bind(&round_up); - __ add(scratch, scratch2); - __ j(overflow, &largest_value, Label::kNear); - __ bind(&no_round); - __ shr(scratch, 23); - __ mov(result_reg, scratch); - __ jmp(&done, Label::kNear); - - __ bind(&maybe_nan_or_infinity); - // Check for NaN/Infinity, all other values map to 255 - __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); - __ j(not_equal, &largest_value, Label::kNear); - - // Check for NaN, which differs from Infinity in that at least one mantissa - // bit is set. - __ and_(scratch, HeapNumber::kMantissaMask); - __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); - __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN - // Infinity -> Fall through to map to 255. - - __ bind(&largest_value); - __ mov(result_reg, Immediate(255)); - __ jmp(&done, Label::kNear); - - __ bind(&zero_result); - __ xor_(result_reg, result_reg); - __ jmp(&done, Label::kNear); - - // smi - __ bind(&is_smi); - if (!input_reg.is(result_reg)) { - __ mov(result_reg, input_reg); - } - __ SmiUntag(result_reg); - __ ClampUint8(result_reg); - __ bind(&done); -} - - void LCodeGen::DoDoubleBits(LDoubleBits* instr) { - CpuFeatureScope scope(masm(), SSE2); XMMRegister value_reg = ToDoubleRegister(instr->value()); Register result_reg = ToRegister(instr->result()); if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { @@ -5808,7 +5080,6 @@ Register hi_reg = ToRegister(instr->hi()); Register lo_reg = ToRegister(instr->lo()); XMMRegister result_reg = ToDoubleRegister(instr->result()); - CpuFeatureScope scope(masm(), SSE2); if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope scope2(masm(), SSE4_1); @@ -5827,10 +5098,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) { class DeferredAllocate V8_FINAL : public LDeferredCode { public: - DeferredAllocate(LCodeGen* codegen, - LAllocate* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } + DeferredAllocate(LCodeGen* codegen, LAllocate* instr) + : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredAllocate(instr_); } @@ -5839,8 +5108,7 @@ LAllocate* instr_; }; - DeferredAllocate* deferred = - new(zone()) DeferredAllocate(this, instr, x87_stack_); + DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); Register result = ToRegister(instr->result()); Register temp = ToRegister(instr->temp()); @@ -5851,11 +5119,11 @@ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); } if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); } @@ -5903,22 +5171,28 @@ PushSafepointRegistersScope scope(this); if (instr->size()->IsRegister()) { Register size = ToRegister(instr->size()); - ASSERT(!size.is(result)); + DCHECK(!size.is(result)); __ SmiTag(ToRegister(instr->size())); __ push(size); } else { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ push(Immediate(Smi::FromInt(size))); + if (size >= 0 && size <= Smi::kMaxValue) { + __ push(Immediate(Smi::FromInt(size))); + } else { + // We should never get here at runtime => abort + __ int3(); + return; + } } int flags = AllocateDoubleAlignFlag::encode( instr->hydrogen()->MustAllocateDoubleAligned()); if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); } else { flags = AllocateTargetSpace::update(flags, NEW_SPACE); @@ -5926,20 +5200,20 @@ __ push(Immediate(Smi::FromInt(flags))); CallRuntimeFromDeferred( - Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context()); + Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); __ StoreToSafepointRegisterSlot(result, eax); } void LCodeGen::DoToFastProperties(LToFastProperties* instr) { - ASSERT(ToRegister(instr->value()).is(eax)); + DCHECK(ToRegister(instr->value()).is(eax)); __ push(eax); CallRuntime(Runtime::kToFastProperties, 1, instr); } void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->context()).is(esi)); Label materialized; // Registers will be used as follows: // ecx = literals array. @@ -5959,7 +5233,7 @@ __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); __ push(Immediate(instr->hydrogen()->pattern())); __ push(Immediate(instr->hydrogen()->flags())); - CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr); + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); __ mov(ebx, eax); __ bind(&materialized); @@ -5971,7 +5245,7 @@ __ bind(&runtime_allocate); __ push(ebx); __ push(Immediate(Smi::FromInt(size))); - CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr); + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); __ pop(ebx); __ bind(&allocated); @@ -5991,27 +5265,28 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->context()).is(esi)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. bool pretenure = instr->hydrogen()->pretenure(); if (!pretenure && instr->hydrogen()->has_no_literals()) { - FastNewClosureStub stub(instr->hydrogen()->strict_mode(), + FastNewClosureStub stub(isolate(), + instr->hydrogen()->strict_mode(), instr->hydrogen()->is_generator()); __ mov(ebx, Immediate(instr->hydrogen()->shared_info())); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } else { __ push(esi); __ push(Immediate(instr->hydrogen()->shared_info())); __ push(Immediate(pretenure ? factory()->true_value() : factory()->false_value())); - CallRuntime(Runtime::kHiddenNewClosure, 3, instr); + CallRuntime(Runtime::kNewClosure, 3, instr); } } void LCodeGen::DoTypeof(LTypeof* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->context()).is(esi)); LOperand* input = instr->value(); EmitPushTaggedOperand(input); CallRuntime(Runtime::kTypeof, 1, instr); @@ -6040,13 +5315,13 @@ Label::Distance false_distance = right_block == next_block ? Label::kNear : Label::kFar; Condition final_branch_condition = no_condition; - if (type_name->Equals(heap()->number_string())) { + if (String::Equals(type_name, factory()->number_string())) { __ JumpIfSmi(input, true_label, true_distance); __ cmp(FieldOperand(input, HeapObject::kMapOffset), factory()->heap_number_map()); final_branch_condition = equal; - } else if (type_name->Equals(heap()->string_string())) { + } else if (String::Equals(type_name, factory()->string_string())) { __ JumpIfSmi(input, false_label, false_distance); __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); __ j(above_equal, false_label, false_distance); @@ -6054,22 +5329,18 @@ 1 << Map::kIsUndetectable); final_branch_condition = zero; - } else if (type_name->Equals(heap()->symbol_string())) { + } else if (String::Equals(type_name, factory()->symbol_string())) { __ JumpIfSmi(input, false_label, false_distance); __ CmpObjectType(input, SYMBOL_TYPE, input); final_branch_condition = equal; - } else if (type_name->Equals(heap()->boolean_string())) { + } else if (String::Equals(type_name, factory()->boolean_string())) { __ cmp(input, factory()->true_value()); __ j(equal, true_label, true_distance); __ cmp(input, factory()->false_value()); final_branch_condition = equal; - } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { - __ cmp(input, factory()->null_value()); - final_branch_condition = equal; - - } else if (type_name->Equals(heap()->undefined_string())) { + } else if (String::Equals(type_name, factory()->undefined_string())) { __ cmp(input, factory()->undefined_value()); __ j(equal, true_label, true_distance); __ JumpIfSmi(input, false_label, false_distance); @@ -6079,7 +5350,7 @@ 1 << Map::kIsUndetectable); final_branch_condition = not_zero; - } else if (type_name->Equals(heap()->function_string())) { + } else if (String::Equals(type_name, factory()->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label, false_distance); __ CmpObjectType(input, JS_FUNCTION_TYPE, input); @@ -6087,12 +5358,10 @@ __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); final_branch_condition = equal; - } else if (type_name->Equals(heap()->object_string())) { + } else if (String::Equals(type_name, factory()->object_string())) { __ JumpIfSmi(input, false_label, false_distance); - if (!FLAG_harmony_typeof) { - __ cmp(input, factory()->null_value()); - __ j(equal, true_label, true_distance); - } + __ cmp(input, factory()->null_value()); + __ j(equal, true_label, true_distance); __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); __ j(below, false_label, false_distance); __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); @@ -6151,7 +5420,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) { last_lazy_deopt_pc_ = masm()->pc_offset(); - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); @@ -6185,10 +5454,10 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { PushSafepointRegistersScope scope(this); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard); + __ CallRuntimeSaveDoubles(Runtime::kStackGuard); RecordSafepointWithLazyDeopt( instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); } @@ -6197,10 +5466,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { class DeferredStackCheck V8_FINAL : public LDeferredCode { public: - DeferredStackCheck(LCodeGen* codegen, - LStackCheck* instr, - const X87Stack& x87_stack) - : LDeferredCode(codegen, x87_stack), instr_(instr) { } + DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) + : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredStackCheck(instr_); } @@ -6209,7 +5476,7 @@ LStackCheck* instr_; }; - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); // There is no LLazyBailout instruction for stack-checks. We have to // prepare for lazy deoptimization explicitly here. @@ -6221,17 +5488,17 @@ __ cmp(esp, Operand::StaticVariable(stack_limit)); __ j(above_equal, &done, Label::kNear); - ASSERT(instr->context()->IsRegister()); - ASSERT(ToRegister(instr->context()).is(esi)); + DCHECK(instr->context()->IsRegister()); + DCHECK(ToRegister(instr->context()).is(esi)); CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, instr); __ bind(&done); } else { - ASSERT(instr->hydrogen()->is_backwards_branch()); + DCHECK(instr->hydrogen()->is_backwards_branch()); // Perform stack overflow check if this goto needs it before jumping. DeferredStackCheck* deferred_stack_check = - new(zone()) DeferredStackCheck(this, instr, x87_stack_); + new(zone()) DeferredStackCheck(this, instr); ExternalReference stack_limit = ExternalReference::address_of_stack_limit(isolate()); __ cmp(esp, Operand::StaticVariable(stack_limit)); @@ -6255,7 +5522,7 @@ // If the environment were already registered, we would have no way of // backpatching it with the spill slot operands. - ASSERT(!environment->HasBeenRegistered()); + DCHECK(!environment->HasBeenRegistered()); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); GenerateOsrPrologue(); @@ -6263,7 +5530,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - ASSERT(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->context()).is(esi)); __ cmp(eax, isolate()->factory()->undefined_value()); DeoptimizeIf(equal, instr->environment()); @@ -6325,11 +5592,55 @@ } +void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register object, + Register index) { + PushSafepointRegistersScope scope(this); + __ push(object); + __ push(index); + __ xor_(esi, esi); + __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(object, eax); +} + + void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { + public: + DeferredLoadMutableDouble(LCodeGen* codegen, + LLoadFieldByIndex* instr, + Register object, + Register index) + : LDeferredCode(codegen), + instr_(instr), + object_(object), + index_(index) { + } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LLoadFieldByIndex* instr_; + Register object_; + Register index_; + }; + Register object = ToRegister(instr->object()); Register index = ToRegister(instr->index()); + DeferredLoadMutableDouble* deferred; + deferred = new(zone()) DeferredLoadMutableDouble( + this, instr, object, index); + Label out_of_object, done; + __ test(index, Immediate(Smi::FromInt(1))); + __ j(not_zero, deferred->entry()); + + __ sar(index, 1); + __ cmp(index, Immediate(0)); __ j(less, &out_of_object, Label::kNear); __ mov(object, FieldOperand(object, @@ -6346,10 +5657,26 @@ index, times_half_pointer_size, FixedArray::kHeaderSize - kPointerSize)); + __ bind(deferred->exit()); __ bind(&done); } +void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { + Register context = ToRegister(instr->context()); + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context); +} + + +void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { + Handle<ScopeInfo> scope_info = instr->scope_info(); + __ Push(scope_info); + __ push(ToRegister(instr->function())); + CallRuntime(Runtime::kPushBlockContext, 2, instr); + RecordSafepoint(Safepoint::kNoLazyDeopt); +} + + #undef __ } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/ia32/lithium-codegen-ia32.h nodejs-0.11.15/deps/v8/src/ia32/lithium-codegen-ia32.h --- nodejs-0.11.13/deps/v8/src/ia32/lithium-codegen-ia32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/lithium-codegen-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_ #define V8_IA32_LITHIUM_CODEGEN_IA32_H_ -#include "ia32/lithium-ia32.h" +#include "src/ia32/lithium-ia32.h" -#include "checks.h" -#include "deoptimizer.h" -#include "ia32/lithium-gap-resolver-ia32.h" -#include "lithium-codegen.h" -#include "safepoint-table.h" -#include "scopes.h" -#include "v8utils.h" +#include "src/base/logging.h" +#include "src/deoptimizer.h" +#include "src/ia32/lithium-gap-resolver-ia32.h" +#include "src/lithium-codegen.h" +#include "src/safepoint-table.h" +#include "src/scopes.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -61,7 +38,6 @@ support_aligned_spilled_doubles_(false), osr_pc_offset_(-1), frame_is_built_(false), - x87_stack_(assembler), safepoints_(info->zone()), resolver_(this), expected_safepoint_kind_(Safepoint::kSimple) { @@ -90,7 +66,6 @@ Operand ToOperand(LOperand* op) const; Register ToRegister(LOperand* op) const; XMMRegister ToDoubleRegister(LOperand* op) const; - X87Register ToX87Register(LOperand* op) const; bool IsInteger32(LConstantOperand* op) const; bool IsSmi(LConstantOperand* op) const; @@ -99,36 +74,6 @@ } double ToDouble(LConstantOperand* op) const; - // Support for non-sse2 (x87) floating point stack handling. - // These functions maintain the mapping of physical stack registers to our - // virtual registers between instructions. - enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand }; - - void X87Mov(X87Register reg, Operand src, - X87OperandType operand = kX87DoubleOperand); - void X87Mov(Operand src, X87Register reg, - X87OperandType operand = kX87DoubleOperand); - - void X87PrepareBinaryOp( - X87Register left, X87Register right, X87Register result); - - void X87LoadForUsage(X87Register reg); - void X87LoadForUsage(X87Register reg1, X87Register reg2); - void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); } - void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); } - - void X87Fxch(X87Register reg, int other_slot = 0) { - x87_stack_.Fxch(reg, other_slot); - } - void X87Free(X87Register reg) { - x87_stack_.Free(reg); - } - - - bool X87StackEmpty() { - return x87_stack_.depth() == 0; - } - Handle<Object> ToHandle(LConstantOperand* op) const; // The operand denoting the second word (the one with a higher address) of @@ -150,8 +95,7 @@ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value, - LOperand* temp1, - LOperand* temp2, + LOperand* temp, IntegerSignedness signedness); void DoDeferredTaggedToI(LTaggedToI* instr, Label* done); @@ -163,6 +107,9 @@ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); + void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register object, + Register index); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -194,8 +141,6 @@ int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - void Abort(BailoutReason reason); - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } void SaveCallerDoubles(); @@ -268,7 +213,6 @@ LEnvironment* environment, Deoptimizer::BailoutType bailout_type); void DeoptimizeIf(Condition cc, LEnvironment* environment); - void ApplyCheckIf(Condition cc, LBoundsCheck* check); bool DeoptEveryNTimes() { return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); @@ -288,7 +232,6 @@ Register ToRegister(int index) const; XMMRegister ToDoubleRegister(int index) const; - X87Register ToX87Register(int index) const; int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; int32_t ToInteger32(LConstantOperand* op) const; ExternalReference ToExternalReference(LConstantOperand* op) const; @@ -297,8 +240,7 @@ LOperand* key, Representation key_representation, ElementsKind elements_kind, - uint32_t offset, - uint32_t additional_index = 0); + uint32_t base_offset); Operand BuildSeqStringOperand(Register string, LOperand* index, @@ -336,15 +278,6 @@ LEnvironment* env, NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED); - void EmitNumberUntagDNoSSE2( - Register input, - Register temp, - X87Register res_reg, - bool allow_undefined_as_nan, - bool deoptimize_on_minus_zero, - LEnvironment* env, - NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED); - // Emits optimized code for typeof x == "y". Modifies input register. // Returns the condition on which a final split to // true and false label should be made, to optimize fallthrough. @@ -392,12 +325,6 @@ // register, or a stack slot operand. void EmitPushTaggedOperand(LOperand* operand); - void X87Fld(Operand src, X87OperandType opts); - - void EmitFlushX87ForDeopt(); - void FlushX87StackIfNecessary(LInstruction* instr) { - x87_stack_.FlushIfNecessary(instr, this); - } friend class LGapResolver; #ifdef _MSC_VER @@ -420,55 +347,6 @@ int osr_pc_offset_; bool frame_is_built_; - class X87Stack { - public: - explicit X87Stack(MacroAssembler* masm) - : stack_depth_(0), is_mutable_(true), masm_(masm) { } - explicit X87Stack(const X87Stack& other) - : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) { - for (int i = 0; i < stack_depth_; i++) { - stack_[i] = other.stack_[i]; - } - } - bool operator==(const X87Stack& other) const { - if (stack_depth_ != other.stack_depth_) return false; - for (int i = 0; i < stack_depth_; i++) { - if (!stack_[i].is(other.stack_[i])) return false; - } - return true; - } - bool Contains(X87Register reg); - void Fxch(X87Register reg, int other_slot = 0); - void Free(X87Register reg); - void PrepareToWrite(X87Register reg); - void CommitWrite(X87Register reg); - void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen); - void LeavingBlock(int current_block_id, LGoto* goto_instr); - int depth() const { return stack_depth_; } - void pop() { - ASSERT(is_mutable_); - stack_depth_--; - } - void push(X87Register reg) { - ASSERT(is_mutable_); - ASSERT(stack_depth_ < X87Register::kNumAllocatableRegisters); - stack_[stack_depth_] = reg; - stack_depth_++; - } - - MacroAssembler* masm() const { return masm_; } - - private: - int ArrayIndex(X87Register reg); - int st2idx(int pos); - - X87Register stack_[X87Register::kNumAllocatableRegisters]; - int stack_depth_; - bool is_mutable_; - MacroAssembler* masm_; - }; - X87Stack x87_stack_; - // Builder that keeps track of safepoints in the code. The table // itself is emitted at the end of the generated code. SafepointTableBuilder safepoints_; @@ -482,14 +360,14 @@ public: explicit PushSafepointRegistersScope(LCodeGen* codegen) : codegen_(codegen) { - ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); codegen_->masm_->PushSafepointRegisters(); codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; - ASSERT(codegen_->info()->is_calling()); + DCHECK(codegen_->info()->is_calling()); } ~PushSafepointRegistersScope() { - ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); codegen_->masm_->PopSafepointRegisters(); codegen_->expected_safepoint_kind_ = Safepoint::kSimple; } @@ -507,11 +385,10 @@ class LDeferredCode : public ZoneObject { public: - explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack) + explicit LDeferredCode(LCodeGen* codegen) : codegen_(codegen), external_exit_(NULL), - instruction_index_(codegen->current_instruction_), - x87_stack_(x87_stack) { + instruction_index_(codegen->current_instruction_) { codegen->AddDeferredCode(this); } @@ -524,7 +401,6 @@ Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); } int instruction_index() const { return instruction_index_; } - const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; } protected: LCodeGen* codegen() const { return codegen_; } @@ -537,7 +413,6 @@ Label* external_exit_; Label done_; int instruction_index_; - LCodeGen::X87Stack x87_stack_; }; } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "ia32/lithium-gap-resolver-ia32.h" -#include "ia32/lithium-codegen-ia32.h" +#include "src/ia32/lithium-codegen-ia32.h" +#include "src/ia32/lithium-gap-resolver-ia32.h" namespace v8 { namespace internal { @@ -44,7 +21,7 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) { - ASSERT(HasBeenReset()); + DCHECK(HasBeenReset()); // Build up a worklist of moves. BuildInitialMoveList(parallel_move); @@ -61,13 +38,13 @@ // Perform the moves with constant sources. for (int i = 0; i < moves_.length(); ++i) { if (!moves_[i].IsEliminated()) { - ASSERT(moves_[i].source()->IsConstantOperand()); + DCHECK(moves_[i].source()->IsConstantOperand()); EmitMove(i); } } Finish(); - ASSERT(HasBeenReset()); + DCHECK(HasBeenReset()); } @@ -93,12 +70,12 @@ // which means that a call to PerformMove could change any source operand // in the move graph. - ASSERT(!moves_[index].IsPending()); - ASSERT(!moves_[index].IsRedundant()); + DCHECK(!moves_[index].IsPending()); + DCHECK(!moves_[index].IsRedundant()); // Clear this move's destination to indicate a pending move. The actual // destination is saved on the side. - ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. + DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. LOperand* destination = moves_[index].destination(); moves_[index].set_destination(NULL); @@ -139,7 +116,7 @@ for (int i = 0; i < moves_.length(); ++i) { LMoveOperands other_move = moves_[i]; if (other_move.Blocks(destination)) { - ASSERT(other_move.IsPending()); + DCHECK(other_move.IsPending()); EmitSwap(index); return; } @@ -165,13 +142,13 @@ LOperand* source = moves_[index].source(); if (source->IsRegister()) { --source_uses_[source->index()]; - ASSERT(source_uses_[source->index()] >= 0); + DCHECK(source_uses_[source->index()] >= 0); } LOperand* destination = moves_[index].destination(); if (destination->IsRegister()) { --destination_uses_[destination->index()]; - ASSERT(destination_uses_[destination->index()] >= 0); + DCHECK(destination_uses_[destination->index()] >= 0); } moves_[index].Eliminate(); @@ -213,12 +190,12 @@ void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_ASSERTS +#ifdef ENABLE_SLOW_DCHECKS // No operand should be the destination for more than one move. for (int i = 0; i < moves_.length(); ++i) { LOperand* destination = moves_[i].destination(); for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_ASSERT(!destination->Equals(moves_[j].destination())); + SLOW_DCHECK(!destination->Equals(moves_[j].destination())); } } #endif @@ -282,13 +259,13 @@ // Dispatch on the source and destination operand kinds. Not all // combinations are possible. if (source->IsRegister()) { - ASSERT(destination->IsRegister() || destination->IsStackSlot()); + DCHECK(destination->IsRegister() || destination->IsStackSlot()); Register src = cgen_->ToRegister(source); Operand dst = cgen_->ToOperand(destination); __ mov(dst, src); } else if (source->IsStackSlot()) { - ASSERT(destination->IsRegister() || destination->IsStackSlot()); + DCHECK(destination->IsRegister() || destination->IsStackSlot()); Operand src = cgen_->ToOperand(source); if (destination->IsRegister()) { Register dst = cgen_->ToRegister(destination); @@ -318,26 +295,17 @@ uint64_t int_val = BitCast<uint64_t, double>(v); int32_t lower = static_cast<int32_t>(int_val); int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(cgen_->masm(), SSE2); - XMMRegister dst = cgen_->ToDoubleRegister(destination); - if (int_val == 0) { - __ xorps(dst, dst); - } else { - __ push(Immediate(upper)); - __ push(Immediate(lower)); - __ movsd(dst, Operand(esp, 0)); - __ add(esp, Immediate(kDoubleSize)); - } + XMMRegister dst = cgen_->ToDoubleRegister(destination); + if (int_val == 0) { + __ xorps(dst, dst); } else { __ push(Immediate(upper)); __ push(Immediate(lower)); - X87Register dst = cgen_->ToX87Register(destination); - cgen_->X87Mov(dst, MemOperand(esp, 0)); + __ movsd(dst, Operand(esp, 0)); __ add(esp, Immediate(kDoubleSize)); } } else { - ASSERT(destination->IsStackSlot()); + DCHECK(destination->IsStackSlot()); Operand dst = cgen_->ToOperand(destination); Representation r = cgen_->IsSmi(constant_source) ? Representation::Smi() : Representation::Integer32(); @@ -351,59 +319,27 @@ } } else if (source->IsDoubleRegister()) { - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(cgen_->masm(), SSE2); - XMMRegister src = cgen_->ToDoubleRegister(source); - if (destination->IsDoubleRegister()) { - XMMRegister dst = cgen_->ToDoubleRegister(destination); - __ movaps(dst, src); - } else { - ASSERT(destination->IsDoubleStackSlot()); - Operand dst = cgen_->ToOperand(destination); - __ movsd(dst, src); - } + XMMRegister src = cgen_->ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + XMMRegister dst = cgen_->ToDoubleRegister(destination); + __ movaps(dst, src); } else { - // load from the register onto the stack, store in destination, which must - // be a double stack slot in the non-SSE2 case. - ASSERT(destination->IsDoubleStackSlot()); + DCHECK(destination->IsDoubleStackSlot()); Operand dst = cgen_->ToOperand(destination); - X87Register src = cgen_->ToX87Register(source); - cgen_->X87Mov(dst, src); + __ movsd(dst, src); } } else if (source->IsDoubleStackSlot()) { - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(cgen_->masm(), SSE2); - ASSERT(destination->IsDoubleRegister() || - destination->IsDoubleStackSlot()); - Operand src = cgen_->ToOperand(source); - if (destination->IsDoubleRegister()) { - XMMRegister dst = cgen_->ToDoubleRegister(destination); - __ movsd(dst, src); - } else { - // We rely on having xmm0 available as a fixed scratch register. - Operand dst = cgen_->ToOperand(destination); - __ movsd(xmm0, src); - __ movsd(dst, xmm0); - } + DCHECK(destination->IsDoubleRegister() || + destination->IsDoubleStackSlot()); + Operand src = cgen_->ToOperand(source); + if (destination->IsDoubleRegister()) { + XMMRegister dst = cgen_->ToDoubleRegister(destination); + __ movsd(dst, src); } else { - // load from the stack slot on top of the floating point stack, and then - // store in destination. If destination is a double register, then it - // represents the top of the stack and nothing needs to be done. - if (destination->IsDoubleStackSlot()) { - Register tmp = EnsureTempRegister(); - Operand src0 = cgen_->ToOperand(source); - Operand src1 = cgen_->HighOperand(source); - Operand dst0 = cgen_->ToOperand(destination); - Operand dst1 = cgen_->HighOperand(destination); - __ mov(tmp, src0); // Then use tmp to copy source to destination. - __ mov(dst0, tmp); - __ mov(tmp, src1); - __ mov(dst1, tmp); - } else { - Operand src = cgen_->ToOperand(source); - X87Register dst = cgen_->ToX87Register(destination); - cgen_->X87Mov(dst, src); - } + // We rely on having xmm0 available as a fixed scratch register. + Operand dst = cgen_->ToOperand(destination); + __ movsd(xmm0, src); + __ movsd(dst, xmm0); } } else { UNREACHABLE(); @@ -468,7 +404,6 @@ __ mov(src, tmp0); } } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { - CpuFeatureScope scope(cgen_->masm(), SSE2); // XMM register-register swap. We rely on having xmm0 // available as a fixed scratch register. XMMRegister src = cgen_->ToDoubleRegister(source); @@ -477,10 +412,9 @@ __ movaps(src, dst); __ movaps(dst, xmm0); } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { - CpuFeatureScope scope(cgen_->masm(), SSE2); // XMM register-memory swap. We rely on having xmm0 // available as a fixed scratch register. - ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot()); + DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot()); XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister() ? source : destination); @@ -490,7 +424,6 @@ __ movsd(other, reg); __ movaps(reg, xmm0); } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { - CpuFeatureScope scope(cgen_->masm(), SSE2); // Double-width memory-to-memory. Spill on demand to use a general // purpose temporary register and also rely on having xmm0 available as // a fixed scratch register. diff -Nru nodejs-0.11.13/deps/v8/src/ia32/lithium-gap-resolver-ia32.h nodejs-0.11.15/deps/v8/src/ia32/lithium-gap-resolver-ia32.h --- nodejs-0.11.13/deps/v8/src/ia32/lithium-gap-resolver-ia32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/lithium-gap-resolver-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_ #define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_ -#include "v8.h" +#include "src/v8.h" -#include "lithium.h" +#include "src/lithium.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/ia32/lithium-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/lithium-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/lithium-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/lithium-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "lithium-allocator-inl.h" -#include "ia32/lithium-ia32.h" -#include "ia32/lithium-codegen-ia32.h" -#include "hydrogen-osr.h" +#include "src/hydrogen-osr.h" +#include "src/ia32/lithium-codegen-ia32.h" +#include "src/lithium-inl.h" namespace v8 { namespace internal { @@ -51,17 +27,17 @@ // outputs because all registers are blocked by the calling convention. // Inputs operands must use a fixed register or use-at-start policy or // a non-register policy. - ASSERT(Output() == NULL || + DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() || !LUnallocated::cast(Output())->HasRegisterPolicy()); for (UseIterator it(this); !it.Done(); it.Advance()) { LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() || + DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart()); } for (TempIterator it(this); !it.Done(); it.Advance()) { LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); + DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); } } #endif @@ -83,17 +59,6 @@ } -bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) { - for (int i = 0; i < InputCount(); i++) { - LOperand* op = InputAt(i); - if (op != NULL && op->IsDoubleRegister()) { - if (cgen->ToX87Register(op).is(reg)) return true; - } - } - return false; -} - - void LInstruction::PrintTo(StringStream* stream) { stream->Add("%s ", this->Mnemonic()); @@ -392,7 +357,7 @@ if (kind == DOUBLE_REGISTERS) { return LDoubleStackSlot::Create(index, zone()); } else { - ASSERT(kind == GENERAL_REGISTERS); + DCHECK(kind == GENERAL_REGISTERS); return LStackSlot::Create(index, zone()); } } @@ -400,8 +365,9 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); - hydrogen()->access().PrintTo(stream); - stream->Add(" <- "); + OStringStream os; + os << hydrogen()->access() << " <- "; + stream->Add(os.c_str()); value()->PrintTo(stream); } @@ -420,7 +386,7 @@ stream->Add("["); key()->PrintTo(stream); if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", additional_index()); + stream->Add(" + %d]", base_offset()); } else { stream->Add("]"); } @@ -432,13 +398,13 @@ stream->Add("["); key()->PrintTo(stream); if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", additional_index()); + stream->Add(" + %d] <-", base_offset()); } else { stream->Add("] <- "); } if (value() == NULL) { - ASSERT(hydrogen()->IsConstantHoleStore() && + DCHECK(hydrogen()->IsConstantHoleStore() && hydrogen()->value()->representation().IsDouble()); stream->Add("<the hole(nan)>"); } else { @@ -463,7 +429,7 @@ LPlatformChunk* LChunkBuilder::Build() { - ASSERT(is_unused()); + DCHECK(is_unused()); chunk_ = new(zone()) LPlatformChunk(info(), graph()); LPhase phase("L_Building chunk", chunk_); status_ = BUILDING; @@ -471,7 +437,7 @@ // Reserve the first spill slot for the state of dynamic alignment. if (info()->IsOptimizing()) { int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS); - ASSERT_EQ(alignment_state_index, 0); + DCHECK_EQ(alignment_state_index, 0); USE(alignment_state_index); } @@ -688,6 +654,8 @@ !hinstr->HasObservableSideEffects(); if (needs_environment && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); + // We can't really figure out if the environment is needed or not. + instr->environment()->set_has_been_used(); } return instr; @@ -695,7 +663,7 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - ASSERT(!instr->HasPointerMap()); + DCHECK(!instr->HasPointerMap()); instr->set_pointer_map(new(zone()) LPointerMap(zone())); return instr; } @@ -716,14 +684,14 @@ LOperand* LChunkBuilder::FixedTemp(Register reg) { LUnallocated* operand = ToUnallocated(reg); - ASSERT(operand->HasFixedPolicy()); + DCHECK(operand->HasFixedPolicy()); return operand; } LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) { LUnallocated* operand = ToUnallocated(reg); - ASSERT(operand->HasFixedPolicy()); + DCHECK(operand->HasFixedPolicy()); return operand; } @@ -752,8 +720,8 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->left()); HValue* right_value = instr->right(); @@ -794,9 +762,9 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); if (op == Token::MOD) { LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); @@ -815,8 +783,8 @@ HBinaryOperation* instr) { HValue* left = instr->left(); HValue* right = instr->right(); - ASSERT(left->representation().IsTagged()); - ASSERT(right->representation().IsTagged()); + DCHECK(left->representation().IsTagged()); + DCHECK(right->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), esi); LOperand* left_operand = UseFixed(left, edx); LOperand* right_operand = UseFixed(right, eax); @@ -827,7 +795,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - ASSERT(is_building()); + DCHECK(is_building()); current_block_ = block; next_block_ = next_block; if (block->IsStartBlock()) { @@ -836,13 +804,13 @@ } else if (block->predecessors()->length() == 1) { // We have a single predecessor => copy environment and outgoing // argument count from the predecessor. - ASSERT(block->phis()->length() == 0); + DCHECK(block->phis()->length() == 0); HBasicBlock* pred = block->predecessors()->at(0); HEnvironment* last_environment = pred->last_environment(); - ASSERT(last_environment != NULL); + DCHECK(last_environment != NULL); // Only copy the environment, if it is later used again. if (pred->end()->SecondSuccessor() == NULL) { - ASSERT(pred->end()->FirstSuccessor() == block); + DCHECK(pred->end()->FirstSuccessor() == block); } else { if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || pred->end()->SecondSuccessor()->block_id() > block->block_id()) { @@ -850,7 +818,7 @@ } } block->UpdateEnvironment(last_environment); - ASSERT(pred->argument_count() >= 0); + DCHECK(pred->argument_count() >= 0); argument_count_ = pred->argument_count(); } else { // We are at a state join => process phis. @@ -902,7 +870,7 @@ if (current->OperandCount() == 0) { instr = DefineAsRegister(new(zone()) LDummy()); } else { - ASSERT(!current->OperandAt(0)->IsControlInstruction()); + DCHECK(!current->OperandAt(0)->IsControlInstruction()); instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(current->OperandAt(0)))); } @@ -914,85 +882,90 @@ chunk_->AddInstruction(dummy, current_block_); } } else { - instr = current->CompileToLithium(this); + HBasicBlock* successor; + if (current->IsControlInstruction() && + HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && + successor != NULL) { + instr = new(zone()) LGoto(successor); + } else { + instr = current->CompileToLithium(this); + } } argument_count_ += current->argument_delta(); - ASSERT(argument_count_ >= 0); + DCHECK(argument_count_ >= 0); if (instr != NULL) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(current); + AddInstruction(instr, current); + } + + current_instruction_ = old_current; +} + + +void LChunkBuilder::AddInstruction(LInstruction* instr, + HInstruction* hydrogen_val) { + // Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(hydrogen_val); #if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - ASSERT(fixed == 0 || used_at_start == 0); + // Make sure that the lithium instruction has either no fixed register + // constraints in temps or the result OR no uses that are only used at + // start. If this invariant doesn't hold, the register allocator can decide + // to insert a split of a range immediately before the instruction due to an + // already allocated register needing to be used for the instruction's fixed + // register constraint. In this case, The register allocator won't see an + // interference between the split child and the use-at-start (it would if + // the it was just a plain use), so it is free to move the split child into + // the same register that is used for the use-at-start. + // See https://code.google.com/p/chromium/issues/detail?id=201590 + if (!(instr->ClobbersRegisters() && + instr->ClobbersDoubleRegisters(isolate()))) { + int fixed = 0; + int used_at_start = 0; + for (UseIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->IsUsedAtStart()) ++used_at_start; + } + if (instr->Output() != NULL) { + if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; + } + for (TempIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->HasFixedPolicy()) ++fixed; } + DCHECK(fixed == 0 || used_at_start == 0); + } #endif - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - if (!CpuFeatures::IsSafeForSnapshot(SSE2) && instr->IsGoto() && - LGoto::cast(instr)->jumps_to_join()) { - // TODO(olivf) Since phis of spilled values are joined as registers - // (not in the stack slot), we need to allow the goto gaps to keep one - // x87 register alive. To ensure all other values are still spilled, we - // insert a fpu register barrier right before. - LClobberDoubles* clobber = new(zone()) LClobberDoubles(); - clobber->set_hydrogen_value(current); - chunk_->AddInstruction(clobber, current_block_); - } - chunk_->AddInstruction(instr, current_block_); - - if (instr->IsCall()) { - HValue* hydrogen_value_for_lazy_bailout = current; - LInstruction* instruction_needing_environment = NULL; - if (current->HasObservableSideEffects()) { - HSimulate* sim = HSimulate::cast(current->next()); - instruction_needing_environment = instr; - sim->ReplayEnvironment(current_block_->last_environment()); - hydrogen_value_for_lazy_bailout = sim; - } - LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); - bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); - chunk_->AddInstruction(bailout, current_block_); - if (instruction_needing_environment != NULL) { - // Store the lazy deopt environment with the instruction if needed. - // Right now it is only used for LInstanceOfKnownGlobal. - instruction_needing_environment-> - SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); - } + if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { + instr = AssignPointerMap(instr); + } + if (FLAG_stress_environments && !instr->HasEnvironment()) { + instr = AssignEnvironment(instr); + } + chunk_->AddInstruction(instr, current_block_); + + if (instr->IsCall()) { + HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; + LInstruction* instruction_needing_environment = NULL; + if (hydrogen_val->HasObservableSideEffects()) { + HSimulate* sim = HSimulate::cast(hydrogen_val->next()); + instruction_needing_environment = instr; + sim->ReplayEnvironment(current_block_->last_environment()); + hydrogen_value_for_lazy_bailout = sim; + } + LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); + bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); + chunk_->AddInstruction(bailout, current_block_); + if (instruction_needing_environment != NULL) { + // Store the lazy deopt environment with the instruction if needed. + // Right now it is only used for LInstanceOfKnownGlobal. + instruction_needing_environment-> + SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); } } - current_instruction_ = old_current; } @@ -1002,9 +975,6 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - HValue* value = instr->value(); Representation r = value->representation(); HType type = value->type(); @@ -1030,10 +1000,7 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new(zone()) LCmpMapAndBranch(value); } @@ -1094,9 +1061,13 @@ } -LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { - LOperand* argument = UseAny(instr->argument()); - return new(zone()) LPushArgument(argument); +LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { + int argc = instr->OperandCount(); + for (int i = 0; i < argc; ++i) { + LOperand* argument = UseAny(instr->argument(i)); + AddInstruction(new(zone()) LPushArgument(argument), instr); + } + return NULL; } @@ -1153,8 +1124,7 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor( HCallWithDescriptor* instr) { - const CallInterfaceDescriptor* descriptor = instr->descriptor(); - + const InterfaceDescriptor* descriptor = instr->descriptor(); LOperand* target = UseRegisterOrConstantAtStart(instr->target()); ZoneList<LOperand*> ops(instr->OperandCount(), zone()); ops.Add(target, zone()); @@ -1180,14 +1150,24 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { switch (instr->op()) { - case kMathFloor: return DoMathFloor(instr); - case kMathRound: return DoMathRound(instr); - case kMathAbs: return DoMathAbs(instr); - case kMathLog: return DoMathLog(instr); - case kMathExp: return DoMathExp(instr); - case kMathSqrt: return DoMathSqrt(instr); - case kMathPowHalf: return DoMathPowHalf(instr); - case kMathClz32: return DoMathClz32(instr); + case kMathFloor: + return DoMathFloor(instr); + case kMathRound: + return DoMathRound(instr); + case kMathFround: + return DoMathFround(instr); + case kMathAbs: + return DoMathAbs(instr); + case kMathLog: + return DoMathLog(instr); + case kMathExp: + return DoMathExp(instr); + case kMathSqrt: + return DoMathSqrt(instr); + case kMathPowHalf: + return DoMathPowHalf(instr); + case kMathClz32: + return DoMathClz32(instr); default: UNREACHABLE(); return NULL; @@ -1210,6 +1190,13 @@ } +LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LMathFround* result = new (zone()) LMathFround(input); + return DefineAsRegister(result); +} + + LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { LOperand* context = UseAny(instr->context()); // Deferred use. LOperand* input = UseRegisterAtStart(instr->value()); @@ -1223,8 +1210,8 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseRegisterAtStart(instr->value()); return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr); } @@ -1238,8 +1225,8 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* value = UseTempRegister(instr->value()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); @@ -1249,9 +1236,8 @@ LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathSqrt* result = new(zone()) LMathSqrt(input); - return DefineSameAsFirst(result); + LOperand* input = UseAtStart(instr->value()); + return DefineAsRegister(new(zone()) LMathSqrt(input)); } @@ -1315,9 +1301,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); - ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32)); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); @@ -1329,9 +1315,9 @@ LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( @@ -1347,9 +1333,9 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp1 = FixedTemp(eax); @@ -1365,10 +1351,10 @@ } -LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); +LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseFixed(instr->left(), eax); LOperand* divisor = UseRegister(instr->right()); LOperand* temp = FixedTemp(edx); @@ -1377,8 +1363,7 @@ if (instr->CheckFlag(HValue::kCanBeDivByZero) || instr->CheckFlag(HValue::kBailoutOnMinusZero) || instr->CheckFlag(HValue::kCanOverflow) || - (!instr->IsMathFloorOfDiv() && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { result = AssignEnvironment(result); } return result; @@ -1416,9 +1401,9 @@ LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp1 = FixedTemp(eax); @@ -1442,26 +1427,45 @@ } +LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseFixed(instr->left(), eax); + LOperand* divisor = UseRegister(instr->right()); + LOperand* temp = FixedTemp(edx); + LInstruction* result = DefineFixed(new(zone()) LFlooringDivI( + dividend, divisor, temp), eax); + if (instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kBailoutOnMinusZero) || + instr->CheckFlag(HValue::kCanOverflow)) { + result = AssignEnvironment(result); + } + return result; +} + + LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { if (instr->RightIsPowerOf2()) { return DoFlooringDivByPowerOf2I(instr); } else if (instr->right()->IsConstant()) { return DoFlooringDivByConstI(instr); } else { - return DoDivI(instr); + return DoFlooringDivI(instr); } } LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegisterAtStart(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( dividend, divisor)); - if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + if (instr->CheckFlag(HValue::kLeftCanBeNegative) && + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { result = AssignEnvironment(result); } return result; @@ -1469,9 +1473,9 @@ LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp1 = FixedTemp(eax); @@ -1486,9 +1490,9 @@ LInstruction* LChunkBuilder::DoModI(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseFixed(instr->left(), eax); LOperand* divisor = UseRegister(instr->right()); LOperand* temp = FixedTemp(edx); @@ -1521,8 +1525,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstant(instr->BetterRightOperand()); LOperand* temp = NULL; @@ -1545,8 +1549,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); LSubI* sub = new(zone()) LSubI(left, right); @@ -1565,8 +1569,8 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); // Check to see if it would be advantageous to use an lea instruction rather // than an add. This is the case when no overflow check is needed and there // are multiple uses of the add's inputs, so using a 3-register add will @@ -1589,9 +1593,9 @@ } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::ADD, instr); } else if (instr->representation().IsExternal()) { - ASSERT(instr->left()->representation().IsExternal()); - ASSERT(instr->right()->representation().IsInteger32()); - ASSERT(!instr->CheckFlag(HValue::kCanOverflow)); + DCHECK(instr->left()->representation().IsExternal()); + DCHECK(instr->right()->representation().IsInteger32()); + DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); bool use_lea = LAddI::UseLea(instr); LOperand* left = UseRegisterAtStart(instr->left()); HValue* right_candidate = instr->right(); @@ -1613,14 +1617,14 @@ LOperand* left = NULL; LOperand* right = NULL; if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); left = UseRegisterAtStart(instr->BetterLeftOperand()); right = UseOrConstantAtStart(instr->BetterRightOperand()); } else { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); left = UseRegisterAtStart(instr->left()); right = UseRegisterAtStart(instr->right()); } @@ -1630,11 +1634,11 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) { - ASSERT(instr->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); // We call a C function for double power. It can't trigger a GC. // We need to use fixed result register for the call. Representation exponent_type = instr->right()->representation(); - ASSERT(instr->left()->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); LOperand* left = UseFixedDouble(instr->left(), xmm2); LOperand* right = exponent_type.IsDouble() ? UseFixedDouble(instr->right(), xmm1) : @@ -1646,8 +1650,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - ASSERT(instr->left()->representation().IsSmiOrTagged()); - ASSERT(instr->right()->representation().IsSmiOrTagged()); + DCHECK(instr->left()->representation().IsSmiOrTagged()); + DCHECK(instr->right()->representation().IsSmiOrTagged()); LOperand* context = UseFixed(instr->context(), esi); LOperand* left = UseFixed(instr->left(), edx); LOperand* right = UseFixed(instr->right(), eax); @@ -1660,15 +1664,15 @@ HCompareNumericAndBranch* instr) { Representation r = instr->representation(); if (r.IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(r)); - ASSERT(instr->right()->representation().Equals(r)); + DCHECK(instr->left()->representation().Equals(r)); + DCHECK(instr->right()->representation().Equals(r)); LOperand* left = UseRegisterOrConstantAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); return new(zone()) LCompareNumericAndBranch(left, right); } else { - ASSERT(r.IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(r.IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); LOperand* left; LOperand* right; if (CanBeImmediateConstant(instr->left()) && @@ -1688,8 +1692,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( HCompareObjectEqAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); return new(zone()) LCmpObjectEqAndBranch(left, right); @@ -1705,8 +1707,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch( HCompareMinusZeroAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; LOperand* value = UseRegister(instr->value()); LOperand* scratch = TempRegister(); return new(zone()) LCompareMinusZeroAndBranch(value, scratch); @@ -1714,28 +1714,28 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { - ASSERT(instr->value()->representation().IsSmiOrTagged()); + DCHECK(instr->value()->representation().IsSmiOrTagged()); LOperand* temp = TempRegister(); return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp); } LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* temp = TempRegister(); return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp); } LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LIsSmiAndBranch(Use(instr->value())); } LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( HIsUndetectableAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LIsUndetectableAndBranch( UseRegisterAtStart(instr->value()), TempRegister()); } @@ -1743,8 +1743,8 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch( HStringCompareAndBranch* instr) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), esi); LOperand* left = UseFixed(instr->left(), edx); LOperand* right = UseFixed(instr->right(), eax); @@ -1758,7 +1758,7 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( HHasInstanceTypeAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LHasInstanceTypeAndBranch( UseRegisterAtStart(instr->value()), TempRegister()); @@ -1767,7 +1767,7 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex( HGetCachedArrayIndex* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value)); @@ -1776,7 +1776,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( HHasCachedArrayIndexAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LHasCachedArrayIndexAndBranch( UseRegisterAtStart(instr->value())); } @@ -1784,7 +1784,7 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch( HClassOfTestAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()), TempRegister(), TempRegister()); @@ -1846,9 +1846,16 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - return AssignEnvironment(new(zone()) LBoundsCheck( - UseRegisterOrConstantAtStart(instr->index()), - UseAtStart(instr->length()))); + if (!FLAG_debug_code && instr->skip_check()) return NULL; + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + LOperand* length = !index->IsConstantOperand() + ? UseOrConstantAtStart(instr->length()) + : UseAtStart(instr->length()); + LInstruction* result = new(zone()) LBoundsCheck(index, length); + if (!FLAG_debug_code || !instr->skip_check()) { + result = AssignEnvironment(result); + } + return result; } @@ -1882,74 +1889,60 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation from = instr->from(); Representation to = instr->to(); + HValue* val = instr->value(); if (from.IsSmi()) { if (to.IsTagged()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return DefineSameAsFirst(new(zone()) LDummyUse(value)); } from = Representation::Tagged(); } - // Only mark conversions that might need to allocate as calling rather than - // all changes. This makes simple, non-allocating conversion not have to force - // building a stack frame. if (from.IsTagged()) { if (to.IsDouble()) { - LOperand* value = UseRegister(instr->value()); - // Temp register only necessary for minus zero check. + LOperand* value = UseRegister(val); LOperand* temp = TempRegister(); - LInstruction* result = DefineAsRegister( - new(zone()) LNumberUntagD(value, temp)); - if (!instr->value()->representation().IsSmi()) { - result = AssignEnvironment(result); - } + LInstruction* result = + DefineAsRegister(new(zone()) LNumberUntagD(value, temp)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); return result; } else if (to.IsSmi()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); if (val->type().IsSmi()) { return DefineSameAsFirst(new(zone()) LDummyUse(value)); } return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); } else { - ASSERT(to.IsInteger32()); - HValue* val = instr->value(); + DCHECK(to.IsInteger32()); if (val->type().IsSmi() || val->representation().IsSmi()) { LOperand* value = UseRegister(val); return DefineSameAsFirst(new(zone()) LSmiUntag(value, false)); } else { + LOperand* value = UseRegister(val); bool truncating = instr->CanTruncateToInt32(); - LOperand* xmm_temp = - (CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating) - ? FixedTemp(xmm1) : NULL; - LInstruction* result = DefineSameAsFirst( - new(zone()) LTaggedToI(UseRegister(val), xmm_temp)); - if (!instr->value()->representation().IsSmi()) { - // Note: Only deopts in deferred code. - result = AssignEnvironment(result); - } + LOperand* xmm_temp = !truncating ? FixedTemp(xmm1) : NULL; + LInstruction* result = + DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); return result; } } } else if (from.IsDouble()) { if (to.IsTagged()) { info()->MarkAsDeferredCalling(); - LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* value = UseRegisterAtStart(val); LOperand* temp = FLAG_inline_new ? TempRegister() : NULL; - - // Make sure that temp and result_temp are different registers. LUnallocated* result_temp = TempRegister(); LNumberTagD* result = new(zone()) LNumberTagD(value, temp); return AssignPointerMap(Define(result, result_temp)); } else if (to.IsSmi()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return AssignEnvironment( DefineAsRegister(new(zone()) LDoubleToSmi(value))); } else { - ASSERT(to.IsInteger32()); + DCHECK(to.IsInteger32()); bool truncating = instr->CanTruncateToInt32(); - bool needs_temp = CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating; - LOperand* value = needs_temp ? - UseTempRegister(instr->value()) : UseRegister(instr->value()); + bool needs_temp = !truncating; + LOperand* value = needs_temp ? UseTempRegister(val) : UseRegister(val); LOperand* temp = needs_temp ? TempRegister() : NULL; LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value, temp)); @@ -1959,15 +1952,12 @@ } else if (from.IsInteger32()) { info()->MarkAsDeferredCalling(); if (to.IsTagged()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); if (!instr->CheckFlag(HValue::kCanOverflow)) { return DefineSameAsFirst(new(zone()) LSmiTag(value)); } else if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* temp1 = TempRegister(); - LOperand* temp2 = CpuFeatures::IsSupported(SSE2) ? FixedTemp(xmm1) - : NULL; - LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); + LOperand* temp = TempRegister(); + LNumberTagU* result = new(zone()) LNumberTagU(value, temp); return AssignPointerMap(DefineSameAsFirst(result)); } else { LOperand* temp = TempRegister(); @@ -1975,7 +1965,6 @@ return AssignPointerMap(DefineSameAsFirst(result)); } } else if (to.IsSmi()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value)); if (instr->CheckFlag(HValue::kCanOverflow)) { @@ -1983,14 +1972,11 @@ } return result; } else { - ASSERT(to.IsDouble()); - if (instr->value()->CheckFlag(HInstruction::kUint32)) { - LOperand* temp = FixedTemp(xmm1); - return DefineAsRegister( - new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp)); + DCHECK(to.IsDouble()); + if (val->CheckFlag(HInstruction::kUint32)) { + return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); } else { - return DefineAsRegister( - new(zone()) LInteger32ToDouble(Use(instr->value()))); + return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); } } } @@ -2001,7 +1987,11 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { LOperand* value = UseAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckNonSmi(value)); + LInstruction* result = new(zone()) LCheckNonSmi(value); + if (!instr->value()->type().IsHeapObject()) { + result = AssignEnvironment(result); + } + return result; } @@ -2031,16 +2021,12 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - LOperand* value = NULL; - if (!instr->CanOmitMapChecks()) { - value = UseRegisterAtStart(instr->value()); - if (instr->has_migration_target()) info()->MarkAsDeferredCalling(); - } - LCheckMaps* result = new(zone()) LCheckMaps(value); - if (!instr->CanOmitMapChecks()) { - // Note: Only deopts in deferred code. - AssignEnvironment(result); - if (instr->has_migration_target()) return AssignPointerMap(result); + if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; + LOperand* value = UseRegisterAtStart(instr->value()); + LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); + if (instr->HasMigrationTarget()) { + info()->MarkAsDeferredCalling(); + result = AssignPointerMap(result); } return result; } @@ -2056,28 +2042,20 @@ LOperand* reg = UseFixed(value, eax); return DefineFixed(new(zone()) LClampIToUint8(reg), eax); } else { - ASSERT(input_rep.IsSmiOrTagged()); - if (CpuFeatures::IsSupported(SSE2)) { - LOperand* reg = UseFixed(value, eax); - // Register allocator doesn't (yet) support allocation of double - // temps. Reserve xmm1 explicitly. - LOperand* temp = FixedTemp(xmm1); - LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp); - return AssignEnvironment(DefineFixed(result, eax)); - } else { - LOperand* value = UseRegister(instr->value()); - LClampTToUint8NoSSE2* res = - new(zone()) LClampTToUint8NoSSE2(value, TempRegister(), - TempRegister(), TempRegister()); - return AssignEnvironment(DefineFixed(res, ecx)); - } + DCHECK(input_rep.IsSmiOrTagged()); + LOperand* reg = UseFixed(value, eax); + // Register allocator doesn't (yet) support allocation of double + // temps. Reserve xmm1 explicitly. + LOperand* temp = FixedTemp(xmm1); + LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp); + return AssignEnvironment(DefineFixed(result, eax)); } } LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) { HValue* value = instr->value(); - ASSERT(value->representation().IsDouble()); + DCHECK(value->representation().IsDouble()); return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value))); } @@ -2129,9 +2107,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LOperand* context = UseFixed(instr->context(), esi); - LOperand* global_object = UseFixed(instr->global_object(), edx); + LOperand* global_object = UseFixed(instr->global_object(), + LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LLoadGlobalGeneric* result = - new(zone()) LLoadGlobalGeneric(context, global_object); + new(zone()) LLoadGlobalGeneric(context, global_object, vector); return MarkAsCall(DefineFixed(result, eax), instr); } @@ -2184,8 +2168,13 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { LOperand* context = UseFixed(instr->context(), esi); - LOperand* object = UseFixed(instr->object(), edx); - LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric( + context, object, vector); return MarkAsCall(DefineFixed(result, eax), instr); } @@ -2204,7 +2193,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - ASSERT(instr->key()->representation().IsSmiOrInteger32()); + DCHECK(instr->key()->representation().IsSmiOrInteger32()); ElementsKind elements_kind = instr->elements_kind(); bool clobbers_key = ExternalArrayOpRequiresTemp( instr->key()->representation(), elements_kind); @@ -2217,7 +2206,7 @@ LOperand* obj = UseRegisterAtStart(instr->elements()); result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key)); } else { - ASSERT( + DCHECK( (instr->representation().IsInteger32() && !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) || (instr->representation().IsDouble() && @@ -2242,11 +2231,14 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), esi); - LOperand* object = UseFixed(instr->object(), edx); - LOperand* key = UseFixed(instr->key(), ecx); - + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } LLoadKeyedGeneric* result = - new(zone()) LLoadKeyedGeneric(context, object, key); + new(zone()) LLoadKeyedGeneric(context, object, key, vector); return MarkAsCall(DefineFixed(result, eax), instr); } @@ -2266,19 +2258,14 @@ return UseFixed(instr->value(), eax); } - if (!CpuFeatures::IsSafeForSnapshot(SSE2) && - IsDoubleOrFloatElementsKind(elements_kind)) { - return UseRegisterAtStart(instr->value()); - } - return UseRegister(instr->value()); } LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { if (!instr->is_typed_elements()) { - ASSERT(instr->elements()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || + DCHECK(instr->elements()->representation().IsTagged()); + DCHECK(instr->key()->representation().IsInteger32() || instr->key()->representation().IsSmi()); if (instr->value()->representation().IsDouble()) { @@ -2288,7 +2275,7 @@ LOperand* key = UseRegisterOrConstantAtStart(instr->key()); return new(zone()) LStoreKeyed(object, key, val); } else { - ASSERT(instr->value()->representation().IsSmiOrTagged()); + DCHECK(instr->value()->representation().IsSmiOrTagged()); bool needs_write_barrier = instr->NeedsWriteBarrier(); LOperand* obj = UseRegister(instr->elements()); @@ -2306,12 +2293,12 @@ } ElementsKind elements_kind = instr->elements_kind(); - ASSERT( + DCHECK( (instr->value()->representation().IsInteger32() && !IsDoubleOrFloatElementsKind(elements_kind)) || (instr->value()->representation().IsDouble() && IsDoubleOrFloatElementsKind(elements_kind))); - ASSERT((instr->is_fixed_typed_array() && + DCHECK((instr->is_fixed_typed_array() && instr->elements()->representation().IsTagged()) || (instr->is_external() && instr->elements()->representation().IsExternal())); @@ -2329,13 +2316,14 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), esi); - LOperand* object = UseFixed(instr->object(), edx); - LOperand* key = UseFixed(instr->key(), ecx); - LOperand* value = UseFixed(instr->value(), eax); - - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsTagged()); - ASSERT(instr->value()->representation().IsTagged()); + LOperand* object = UseFixed(instr->object(), + KeyedStoreIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister()); + LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister()); + + DCHECK(instr->object()->representation().IsTagged()); + DCHECK(instr->key()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LStoreKeyedGeneric* result = new(zone()) LStoreKeyedGeneric(context, object, key, value); @@ -2345,7 +2333,6 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - LOperand* object = UseRegister(instr->object()); if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); @@ -2355,10 +2342,11 @@ new_map_reg, temp_reg); return result; } else { + LOperand* object = UseFixed(instr->object(), eax); LOperand* context = UseFixed(instr->context(), esi); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, context, NULL, NULL); - return AssignPointerMap(result); + return MarkAsCall(result, instr); } } @@ -2387,9 +2375,9 @@ ? UseRegister(instr->object()) : UseTempRegister(instr->object()); } else if (is_external_location) { - ASSERT(!is_in_object); - ASSERT(!needs_write_barrier); - ASSERT(!needs_write_barrier_for_map); + DCHECK(!is_in_object); + DCHECK(!needs_write_barrier); + DCHECK(!needs_write_barrier_for_map); obj = UseRegisterOrConstant(instr->object()); } else { obj = needs_write_barrier_for_map @@ -2427,23 +2415,14 @@ // We need a temporary register for write barrier of the map field. LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL; - LInstruction* result = - new(zone()) LStoreNamedField(obj, val, temp, temp_map); - if (!instr->access().IsExternalMemory() && - instr->field_representation().IsHeapObject() && - (val->IsConstantOperand() - ? HConstant::cast(instr->value())->HasSmiValue() - : !instr->value()->type().IsHeapObject())) { - result = AssignEnvironment(result); - } - return result; + return new(zone()) LStoreNamedField(obj, val, temp, temp_map); } LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { LOperand* context = UseFixed(instr->context(), esi); - LOperand* object = UseFixed(instr->object(), edx); - LOperand* value = UseFixed(instr->value(), eax); + LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister()); + LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister()); LStoreNamedGeneric* result = new(zone()) LStoreNamedGeneric(context, object, value); @@ -2506,7 +2485,7 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - ASSERT(argument_count_ == 0); + DCHECK(argument_count_ == 0); allocator_->MarkAsOsrEntry(); current_block_->last_environment()->set_ast_id(instr->ast_id()); return AssignEnvironment(new(zone()) LOsrEntry); @@ -2519,11 +2498,11 @@ int spill_index = chunk()->GetParameterStackSlot(instr->index()); return DefineAsSpilled(result, spill_index); } else { - ASSERT(info()->IsStub()); + DCHECK(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = - info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + info()->code_stub()->GetInterfaceDescriptor(); int index = static_cast<int>(instr->index()); - Register reg = descriptor->GetParameterRegister(index); + Register reg = descriptor->GetEnvironmentParameterRegister(index); return DefineFixed(result, reg); } } @@ -2608,8 +2587,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value())); } @@ -2632,7 +2609,7 @@ LOperand* context = UseFixed(instr->context(), esi); return MarkAsCall(new(zone()) LStackCheck(context), instr); } else { - ASSERT(instr->is_backwards_branch()); + DCHECK(instr->is_backwards_branch()); LOperand* context = UseAny(instr->context()); return AssignEnvironment( AssignPointerMap(new(zone()) LStackCheck(context))); @@ -2642,6 +2619,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { HEnvironment* outer = current_block_->last_environment(); + outer->set_ast_id(instr->ReturnId()); HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner = outer->CopyForInlining(instr->closure(), instr->arguments_count(), @@ -2667,7 +2645,7 @@ if (env->entry()->arguments_pushed()) { int argument_count = env->arguments_environment()->parameter_count(); pop = new(zone()) LDrop(argument_count); - ASSERT(instr->argument_delta() == -argument_count); + DCHECK(instr->argument_delta() == -argument_count); } HEnvironment* outer = current_block_->last_environment()-> @@ -2702,7 +2680,25 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { LOperand* object = UseRegister(instr->object()); LOperand* index = UseTempRegister(instr->index()); - return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index)); + LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); + LInstruction* result = DefineSameAsFirst(load); + return AssignPointerMap(result); +} + + +LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) { + LOperand* context = UseRegisterAtStart(instr->context()); + return new(zone()) LStoreFrameContext(context); +} + + +LInstruction* LChunkBuilder::DoAllocateBlockContext( + HAllocateBlockContext* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* function = UseRegisterAtStart(instr->function()); + LAllocateBlockContext* result = + new(zone()) LAllocateBlockContext(context, function); + return MarkAsCall(DefineFixed(result, esi), instr); } diff -Nru nodejs-0.11.13/deps/v8/src/ia32/lithium-ia32.h nodejs-0.11.15/deps/v8/src/ia32/lithium-ia32.h --- nodejs-0.11.13/deps/v8/src/ia32/lithium-ia32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/lithium-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,185 +1,168 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IA32_LITHIUM_IA32_H_ #define V8_IA32_LITHIUM_IA32_H_ -#include "hydrogen.h" -#include "lithium-allocator.h" -#include "lithium.h" -#include "safepoint-table.h" -#include "utils.h" +#include "src/hydrogen.h" +#include "src/lithium.h" +#include "src/lithium-allocator.h" +#include "src/safepoint-table.h" +#include "src/utils.h" namespace v8 { namespace internal { +namespace compiler { +class RCodeVisualizer; +} + // Forward declarations. class LCodeGen; -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallJSFunction) \ - V(CallWithDescriptor) \ - V(CallFunction) \ - V(CallNew) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CallStub) \ - V(CheckInstanceType) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckNonSmi) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClampTToUint8NoSSE2) \ - V(ClassOfTestAndBranch) \ - V(ClobberDoubles) \ - V(CompareMinusZeroAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(ConstructDouble) \ - V(Context) \ - V(DateField) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleBits) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(FunctionLiteral) \ - V(GetCachedArrayIndex) \ - V(Goto) \ - V(HasCachedArrayIndexAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstanceOf) \ - V(InstanceOfKnownGlobal) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsConstructCallAndBranch) \ - V(IsObjectAndBranch) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadGlobalCell) \ - V(LoadGlobalGeneric) \ - V(LoadKeyed) \ - V(LoadKeyedGeneric) \ - V(LoadNamedField) \ - V(LoadNamedGeneric) \ - V(LoadRoot) \ - V(MapEnumLength) \ - V(MathAbs) \ - V(MathClz32) \ - V(MathExp) \ - V(MathFloor) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRound) \ - V(MathSqrt) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(PushArgument) \ - V(RegExpLiteral) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreGlobalCell) \ - V(StoreKeyed) \ - V(StoreKeyedGeneric) \ - V(StoreNamedField) \ - V(StoreNamedGeneric) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(ToFastProperties) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ +#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ + V(AccessArgumentsAt) \ + V(AddI) \ + V(AllocateBlockContext) \ + V(Allocate) \ + V(ApplyArguments) \ + V(ArgumentsElements) \ + V(ArgumentsLength) \ + V(ArithmeticD) \ + V(ArithmeticT) \ + V(BitI) \ + V(BoundsCheck) \ + V(Branch) \ + V(CallJSFunction) \ + V(CallWithDescriptor) \ + V(CallFunction) \ + V(CallNew) \ + V(CallNewArray) \ + V(CallRuntime) \ + V(CallStub) \ + V(CheckInstanceType) \ + V(CheckMaps) \ + V(CheckMapValue) \ + V(CheckNonSmi) \ + V(CheckSmi) \ + V(CheckValue) \ + V(ClampDToUint8) \ + V(ClampIToUint8) \ + V(ClampTToUint8) \ + V(ClassOfTestAndBranch) \ + V(CompareMinusZeroAndBranch) \ + V(CompareNumericAndBranch) \ + V(CmpObjectEqAndBranch) \ + V(CmpHoleAndBranch) \ + V(CmpMapAndBranch) \ + V(CmpT) \ + V(ConstantD) \ + V(ConstantE) \ + V(ConstantI) \ + V(ConstantS) \ + V(ConstantT) \ + V(ConstructDouble) \ + V(Context) \ + V(DateField) \ + V(DebugBreak) \ + V(DeclareGlobals) \ + V(Deoptimize) \ + V(DivByConstI) \ + V(DivByPowerOf2I) \ + V(DivI) \ + V(DoubleBits) \ + V(DoubleToI) \ + V(DoubleToSmi) \ + V(Drop) \ + V(Dummy) \ + V(DummyUse) \ + V(FlooringDivByConstI) \ + V(FlooringDivByPowerOf2I) \ + V(FlooringDivI) \ + V(ForInCacheArray) \ + V(ForInPrepareMap) \ + V(FunctionLiteral) \ + V(GetCachedArrayIndex) \ + V(Goto) \ + V(HasCachedArrayIndexAndBranch) \ + V(HasInstanceTypeAndBranch) \ + V(InnerAllocatedObject) \ + V(InstanceOf) \ + V(InstanceOfKnownGlobal) \ + V(InstructionGap) \ + V(Integer32ToDouble) \ + V(InvokeFunction) \ + V(IsConstructCallAndBranch) \ + V(IsObjectAndBranch) \ + V(IsStringAndBranch) \ + V(IsSmiAndBranch) \ + V(IsUndetectableAndBranch) \ + V(Label) \ + V(LazyBailout) \ + V(LoadContextSlot) \ + V(LoadFieldByIndex) \ + V(LoadFunctionPrototype) \ + V(LoadGlobalCell) \ + V(LoadGlobalGeneric) \ + V(LoadKeyed) \ + V(LoadKeyedGeneric) \ + V(LoadNamedField) \ + V(LoadNamedGeneric) \ + V(LoadRoot) \ + V(MapEnumLength) \ + V(MathAbs) \ + V(MathClz32) \ + V(MathExp) \ + V(MathFloor) \ + V(MathFround) \ + V(MathLog) \ + V(MathMinMax) \ + V(MathPowHalf) \ + V(MathRound) \ + V(MathSqrt) \ + V(ModByConstI) \ + V(ModByPowerOf2I) \ + V(ModI) \ + V(MulI) \ + V(NumberTagD) \ + V(NumberTagI) \ + V(NumberTagU) \ + V(NumberUntagD) \ + V(OsrEntry) \ + V(Parameter) \ + V(Power) \ + V(PushArgument) \ + V(RegExpLiteral) \ + V(Return) \ + V(SeqStringGetChar) \ + V(SeqStringSetChar) \ + V(ShiftI) \ + V(SmiTag) \ + V(SmiUntag) \ + V(StackCheck) \ + V(StoreCodeEntry) \ + V(StoreContextSlot) \ + V(StoreFrameContext) \ + V(StoreGlobalCell) \ + V(StoreKeyed) \ + V(StoreKeyedGeneric) \ + V(StoreNamedField) \ + V(StoreNamedGeneric) \ + V(StringAdd) \ + V(StringCharCodeAt) \ + V(StringCharFromCode) \ + V(StringCompareAndBranch) \ + V(SubI) \ + V(TaggedToI) \ + V(ThisFunction) \ + V(ToFastProperties) \ + V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ + V(Typeof) \ + V(TypeofIsAndBranch) \ + V(Uint32ToDouble) \ + V(UnknownOSRValue) \ V(WrapReceiver) @@ -192,7 +175,7 @@ return mnemonic; \ } \ static L##type* cast(LInstruction* instr) { \ - ASSERT(instr->Is##type()); \ + DCHECK(instr->Is##type()); \ return reinterpret_cast<L##type*>(instr); \ } @@ -222,7 +205,7 @@ enum Opcode { // Declare a unique enum value for each instruction. #define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) + LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter, kNumberOfInstructions #undef DECLARE_OPCODE }; @@ -241,6 +224,9 @@ virtual bool IsControl() const { return false; } + // Try deleting this instruction if possible. + virtual bool TryDelete() { return false; } + void set_environment(LEnvironment* env) { environment_ = env; } LEnvironment* environment() const { return environment_; } bool HasEnvironment() const { return environment_ != NULL; } @@ -260,11 +246,8 @@ // Interface to the register allocator and iterators. bool ClobbersTemps() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters() const { - return IsCall() || - // We only have rudimentary X87Stack tracking, thus in general - // cannot handle phi-nodes. - (!CpuFeatures::IsSafeForSnapshot(SSE2) && IsControl()); + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { + return IsCall(); } virtual bool HasResult() const = 0; @@ -272,7 +255,6 @@ bool HasDoubleRegisterResult(); bool HasDoubleRegisterInput(); - bool IsDoubleInput(X87Register reg, LCodeGen* cgen); LOperand* FirstInput() { return InputAt(0); } LOperand* Output() { return HasResult() ? result() : NULL; } @@ -283,11 +265,12 @@ void VerifyCall(); #endif + virtual int InputCount() = 0; + virtual LOperand* InputAt(int i) = 0; + private: // Iterator support. friend class InputIterator; - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; friend class TempIterator; virtual int TempCount() = 0; @@ -351,7 +334,7 @@ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; } virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; static LGap* cast(LInstruction* instr) { - ASSERT(instr->IsGap()); + DCHECK(instr->IsGap()); return reinterpret_cast<LGap*>(instr); } @@ -397,16 +380,6 @@ }; -class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> { - public: - LClobberDoubles() { ASSERT(!CpuFeatures::IsSafeForSnapshot(SSE2)); } - - virtual bool ClobbersDoubleRegisters() const { return true; } - - DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d") -}; - - class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> { public: explicit LGoto(HBasicBlock* block) : block_(block) { } @@ -417,7 +390,9 @@ virtual bool IsControl() const V8_OVERRIDE { return true; } int block_id() const { return block_->block_id(); } - virtual bool ClobbersDoubleRegisters() const { return false; } + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { + return false; + } bool jumps_to_join() const { return block_->predecessors()->length() > 1; } @@ -434,7 +409,7 @@ class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> { public: - explicit LDummy() { } + LDummy() {} DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") }; @@ -450,6 +425,7 @@ class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> { public: + virtual bool IsControl() const V8_OVERRIDE { return true; } DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") DECLARE_HYDROGEN_ACCESSOR(Deoptimize) }; @@ -744,14 +720,14 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LDivI(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; + LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; temps_[0] = temp; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") @@ -806,6 +782,23 @@ }; +class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; + temps_[0] = temp; + } + + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) +}; + + class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: LMulI(LOperand* left, LOperand* right, LOperand* temp) { @@ -866,14 +859,24 @@ temps_[0] = temp; } - LOperand* value() { return inputs_[0]; } LOperand* temp() { return temps_[0]; } + LOperand* value() { return inputs_[0]; } DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) }; +class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathFround(LOperand* value) { inputs_[0] = value; } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") +}; + + class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: LMathAbs(LOperand* context, LOperand* value) { @@ -1571,7 +1574,7 @@ return parameter_count()->IsConstantOperand(); } LConstantOperand* constant_parameter_count() { - ASSERT(has_constant_parameter_count()); + DCHECK(has_constant_parameter_count()); return LConstantOperand::cast(parameter_count()); } LOperand* parameter_count() { return inputs_[2]; } @@ -1594,15 +1597,17 @@ }; -class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LLoadNamedGeneric(LOperand* context, LOperand* object) { + LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) { inputs_[0] = context; inputs_[1] = object; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) @@ -1660,7 +1665,7 @@ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - uint32_t additional_index() const { return hydrogen()->index_offset(); } + uint32_t base_offset() const { return hydrogen()->base_offset(); } bool key_is_smi() { return hydrogen()->key()->representation().IsTagged(); } @@ -1683,19 +1688,23 @@ } -class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> { +class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> { public: - LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) { + LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key, + LOperand* vector) { inputs_[0] = context; inputs_[1] = obj; inputs_[2] = key; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } LOperand* key() { return inputs_[2]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric) }; @@ -1706,15 +1715,18 @@ }; -class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LLoadGlobalGeneric(LOperand* context, LOperand* global_object) { + LLoadGlobalGeneric(LOperand* context, LOperand* global_object, + LOperand* vector) { inputs_[0] = context; inputs_[1] = global_object; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* global_object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric) @@ -1800,15 +1812,15 @@ }; -class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> { +class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> { public: LStoreCodeEntry(LOperand* function, LOperand* code_object) { inputs_[0] = function; - temps_[0] = code_object; + inputs_[1] = code_object; } LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return temps_[0]; } + LOperand* code_object() { return inputs_[1]; } virtual void PrintDataTo(StringStream* stream); @@ -1879,11 +1891,11 @@ class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> { public: - LCallWithDescriptor(const CallInterfaceDescriptor* descriptor, - ZoneList<LOperand*>& operands, + LCallWithDescriptor(const InterfaceDescriptor* descriptor, + const ZoneList<LOperand*>& operands, Zone* zone) - : inputs_(descriptor->environment_length() + 1, zone) { - ASSERT(descriptor->environment_length() + 1 == operands.length()); + : inputs_(descriptor->GetRegisterParameterCount() + 1, zone) { + DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length()); inputs_.AddAll(operands, zone); } @@ -1993,7 +2005,7 @@ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { return save_doubles() == kDontSaveFPRegs; } @@ -2015,15 +2027,13 @@ }; -class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> { +class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: - explicit LUint32ToDouble(LOperand* value, LOperand* temp) { + explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") }; @@ -2043,17 +2053,15 @@ }; -class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> { +class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> { public: - LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) { + LNumberTagU(LOperand* value, LOperand* temp) { inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; + temps_[0] = temp; } LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } + LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") }; @@ -2190,11 +2198,6 @@ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - - Handle<Map> transition() const { return hydrogen()->transition_map(); } - Representation representation() const { - return hydrogen()->field_representation(); - } }; @@ -2245,7 +2248,7 @@ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - uint32_t additional_index() const { return hydrogen()->index_offset(); } + uint32_t base_offset() const { return hydrogen()->base_offset(); } bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } }; @@ -2403,7 +2406,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> { public: - explicit LCheckMaps(LOperand* value) { + explicit LCheckMaps(LOperand* value = NULL) { inputs_[0] = value; } @@ -2464,30 +2467,6 @@ }; -// Truncating conversion from a tagged value to an int32. -class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> { - public: - LClampTToUint8NoSSE2(LOperand* unclamped, - LOperand* temp1, - LOperand* temp2, - LOperand* temp3) { - inputs_[0] = unclamped; - temps_[0] = temp1; - temps_[1] = temp2; - temps_[2] = temp3; - } - - LOperand* unclamped() { return inputs_[0]; } - LOperand* scratch() { return temps_[0]; } - LOperand* scratch2() { return temps_[1]; } - LOperand* scratch3() { return temps_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8NoSSE2, - "clamp-t-to-uint8-nosse2") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) -}; - - class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> { public: explicit LCheckNonSmi(LOperand* value) { @@ -2700,6 +2679,35 @@ }; +class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> { + public: + explicit LStoreFrameContext(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context") +}; + + +class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> { + public: + LAllocateBlockContext(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); } + + DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context") + DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext) +}; + + class LChunkBuilder; class LPlatformChunk V8_FINAL : public LChunk { public: @@ -2730,11 +2738,11 @@ next_block_(NULL), allocator_(allocator) { } + Isolate* isolate() const { return graph_->isolate(); } + // Build the sequence for the graph. LPlatformChunk* Build(); - LInstruction* CheckElideControlInstruction(HControlInstruction* instr); - // Declare methods that deal with the individual node types. #define DECLARE_DO(type) LInstruction* Do##type(H##type* node); HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -2742,6 +2750,7 @@ LInstruction* DoMathFloor(HUnaryMathOperation* instr); LInstruction* DoMathRound(HUnaryMathOperation* instr); + LInstruction* DoMathFround(HUnaryMathOperation* instr); LInstruction* DoMathAbs(HUnaryMathOperation* instr); LInstruction* DoMathLog(HUnaryMathOperation* instr); LInstruction* DoMathExp(HUnaryMathOperation* instr); @@ -2750,12 +2759,13 @@ LInstruction* DoMathClz32(HUnaryMathOperation* instr); LInstruction* DoDivByPowerOf2I(HDiv* instr); LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HBinaryOperation* instr); + LInstruction* DoDivI(HDiv* instr); LInstruction* DoModByPowerOf2I(HMod* instr); LInstruction* DoModByConstI(HMod* instr); LInstruction* DoModI(HMod* instr); LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); + LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); private: enum Status { @@ -2779,7 +2789,6 @@ // Methods for getting operands for Use / Define / Temp. LUnallocated* ToUnallocated(Register reg); LUnallocated* ToUnallocated(XMMRegister reg); - LUnallocated* ToUnallocated(X87Register reg); // Methods for setting up define-use relationships. MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); @@ -2841,7 +2850,6 @@ Register reg); LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr, XMMRegister reg); - LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr); // Assigns an environment to an instruction. An instruction which can // deoptimize must have an environment. LInstruction* AssignEnvironment(LInstruction* instr); @@ -2862,6 +2870,7 @@ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); void VisitInstruction(HInstruction* current); + void AddInstruction(LInstruction* instr, HInstruction* current); void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); diff -Nru nodejs-0.11.13/deps/v8/src/ia32/macro-assembler-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/macro-assembler-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/macro-assembler-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/macro-assembler-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "bootstrapper.h" -#include "codegen.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "isolate-inl.h" -#include "runtime.h" -#include "serialize.h" +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/isolate-inl.h" +#include "src/runtime.h" +#include "src/serialize.h" namespace v8 { namespace internal { @@ -56,7 +33,7 @@ void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8()) { movsx_b(dst, src); } else if (r.IsUInteger8()) { @@ -72,12 +49,17 @@ void MacroAssembler::Store(Register src, const Operand& dst, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8() || r.IsUInteger8()) { mov_b(dst, src); } else if (r.IsInteger16() || r.IsUInteger16()) { mov_w(dst, src); } else { + if (r.IsHeapObject()) { + AssertNotSmi(src); + } else if (r.IsSmi()) { + AssertSmi(src); + } mov(dst, src); } } @@ -101,7 +83,7 @@ void MacroAssembler::StoreRoot(Register source, Register scratch, Heap::RootListIndex index) { - ASSERT(Heap::RootCanBeWrittenAfterInitialization(index)); + DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); ExternalReference roots_array_start = ExternalReference::roots_array_start(isolate()); mov(scratch, Immediate(index)); @@ -123,7 +105,7 @@ void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) { - ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index)); + DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index)); Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); cmp(with, value); } @@ -131,7 +113,7 @@ void MacroAssembler::CompareRoot(const Operand& with, Heap::RootListIndex index) { - ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index)); + DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index)); Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); cmp(with, value); } @@ -143,7 +125,7 @@ Condition cc, Label* condition_met, Label::Distance condition_met_distance) { - ASSERT(cc == equal || cc == not_equal); + DCHECK(cc == equal || cc == not_equal); if (scratch.is(object)) { and_(scratch, Immediate(~Page::kPageAlignmentMask)); } else { @@ -151,8 +133,8 @@ and_(scratch, object); } // Check that we can use a test_b. - ASSERT(MemoryChunk::IN_FROM_SPACE < 8); - ASSERT(MemoryChunk::IN_TO_SPACE < 8); + DCHECK(MemoryChunk::IN_FROM_SPACE < 8); + DCHECK(MemoryChunk::IN_TO_SPACE < 8); int mask = (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE); // If non-zero, the page belongs to new-space. @@ -194,16 +176,16 @@ ret(0); bind(&buffer_overflowed); } else { - ASSERT(and_then == kFallThroughAtEnd); + DCHECK(and_then == kFallThroughAtEnd); j(equal, &done, Label::kNear); } StoreBufferOverflowStub store_buffer_overflow = - StoreBufferOverflowStub(save_fp); + StoreBufferOverflowStub(isolate(), save_fp); CallStub(&store_buffer_overflow); if (and_then == kReturnAtEnd) { ret(0); } else { - ASSERT(and_then == kFallThroughAtEnd); + DCHECK(and_then == kFallThroughAtEnd); bind(&done); } } @@ -247,8 +229,8 @@ void MacroAssembler::SlowTruncateToI(Register result_reg, Register input_reg, int offset) { - DoubleToIStub stub(input_reg, result_reg, offset, true); - call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); + DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true); + call(stub.GetCode(), RelocInfo::CODE_TARGET); } @@ -267,49 +249,13 @@ } -void MacroAssembler::TruncateX87TOSToI(Register result_reg) { - sub(esp, Immediate(kDoubleSize)); - fst_d(MemOperand(esp, 0)); - SlowTruncateToI(result_reg, esp, 0); - add(esp, Immediate(kDoubleSize)); -} - - -void MacroAssembler::X87TOSToI(Register result_reg, - MinusZeroMode minus_zero_mode, - Label* conversion_failed, - Label::Distance dst) { - Label done; - sub(esp, Immediate(kPointerSize)); - fld(0); - fist_s(MemOperand(esp, 0)); - fild_s(MemOperand(esp, 0)); - pop(result_reg); - FCmp(); - j(not_equal, conversion_failed, dst); - j(parity_even, conversion_failed, dst); - if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { - test(result_reg, Operand(result_reg)); - j(not_zero, &done, Label::kNear); - // To check for minus zero, we load the value again as float, and check - // if that is still 0. - sub(esp, Immediate(kPointerSize)); - fst_s(MemOperand(esp, 0)); - pop(result_reg); - test(result_reg, Operand(result_reg)); - j(not_zero, conversion_failed, dst); - } - bind(&done); -} - - void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg, XMMRegister scratch, MinusZeroMode minus_zero_mode, Label* conversion_failed, Label::Distance dst) { - ASSERT(!input_reg.is(scratch)); + DCHECK(!input_reg.is(scratch)); cvttsd2si(result_reg, Operand(input_reg)); Cvtsi2sd(scratch, Operand(result_reg)); ucomisd(scratch, input_reg); @@ -370,8 +316,7 @@ fstp(0); SlowTruncateToI(result_reg, input_reg); } - } else if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope scope(this, SSE2); + } else { movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); cvttsd2si(result_reg, Operand(xmm0)); cmp(result_reg, 0x1); @@ -395,8 +340,6 @@ } else { SlowTruncateToI(result_reg, input_reg); } - } else { - SlowTruncateToI(result_reg, input_reg); } bind(&done); } @@ -408,114 +351,62 @@ MinusZeroMode minus_zero_mode, Label* lost_precision) { Label done; - ASSERT(!temp.is(xmm0)); + DCHECK(!temp.is(xmm0)); cmp(FieldOperand(input_reg, HeapObject::kMapOffset), isolate()->factory()->heap_number_map()); j(not_equal, lost_precision, Label::kNear); - if (CpuFeatures::IsSafeForSnapshot(SSE2)) { - ASSERT(!temp.is(no_xmm_reg)); - CpuFeatureScope scope(this, SSE2); + DCHECK(!temp.is(no_xmm_reg)); - movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); - cvttsd2si(result_reg, Operand(xmm0)); - Cvtsi2sd(temp, Operand(result_reg)); - ucomisd(xmm0, temp); - RecordComment("Deferred TaggedToI: lost precision"); - j(not_equal, lost_precision, Label::kNear); - RecordComment("Deferred TaggedToI: NaN"); - j(parity_even, lost_precision, Label::kNear); - if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { - test(result_reg, Operand(result_reg)); - j(not_zero, &done, Label::kNear); - movmskpd(result_reg, xmm0); - and_(result_reg, 1); - RecordComment("Deferred TaggedToI: minus zero"); - j(not_zero, lost_precision, Label::kNear); - } - } else { - // TODO(olivf) Converting a number on the fpu is actually quite slow. We - // should first try a fast conversion and then bailout to this slow case. - Label lost_precision_pop, zero_check; - Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO) - ? &lost_precision_pop : lost_precision; - sub(esp, Immediate(kPointerSize)); - fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); - if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0); - fist_s(MemOperand(esp, 0)); - fild_s(MemOperand(esp, 0)); - FCmp(); - pop(result_reg); - j(not_equal, lost_precision_int, Label::kNear); - j(parity_even, lost_precision_int, Label::kNear); // NaN. - if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { - test(result_reg, Operand(result_reg)); - j(zero, &zero_check, Label::kNear); - fstp(0); - jmp(&done, Label::kNear); - bind(&zero_check); - // To check for minus zero, we load the value again as float, and check - // if that is still 0. - sub(esp, Immediate(kPointerSize)); - fstp_s(Operand(esp, 0)); - pop(result_reg); - test(result_reg, Operand(result_reg)); - j(zero, &done, Label::kNear); - jmp(lost_precision, Label::kNear); - - bind(&lost_precision_pop); - fstp(0); - jmp(lost_precision, Label::kNear); - } + movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); + cvttsd2si(result_reg, Operand(xmm0)); + Cvtsi2sd(temp, Operand(result_reg)); + ucomisd(xmm0, temp); + RecordComment("Deferred TaggedToI: lost precision"); + j(not_equal, lost_precision, Label::kNear); + RecordComment("Deferred TaggedToI: NaN"); + j(parity_even, lost_precision, Label::kNear); + if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { + test(result_reg, Operand(result_reg)); + j(not_zero, &done, Label::kNear); + movmskpd(result_reg, xmm0); + and_(result_reg, 1); + RecordComment("Deferred TaggedToI: minus zero"); + j(not_zero, lost_precision, Label::kNear); } bind(&done); } void MacroAssembler::LoadUint32(XMMRegister dst, - Register src, - XMMRegister scratch) { + Register src) { Label done; cmp(src, Immediate(0)); ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias(); - movsd(scratch, Operand::StaticVariable(uint32_bias)); Cvtsi2sd(dst, src); j(not_sign, &done, Label::kNear); - addsd(dst, scratch); + addsd(dst, Operand::StaticVariable(uint32_bias)); bind(&done); } -void MacroAssembler::LoadUint32NoSSE2(Register src) { - Label done; - push(src); - fild_s(Operand(esp, 0)); - cmp(src, Immediate(0)); - j(not_sign, &done, Label::kNear); - ExternalReference uint32_bias = - ExternalReference::address_of_uint32_bias(); - fld_d(Operand::StaticVariable(uint32_bias)); - faddp(1); - bind(&done); - add(esp, Immediate(kPointerSize)); -} - - -void MacroAssembler::RecordWriteArray(Register object, - Register value, - Register index, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { +void MacroAssembler::RecordWriteArray( + Register object, + Register value, + Register index, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { // First, check if a write barrier is even needed. The tests below // catch stores of Smis. Label done; // Skip barrier if writing a smi. if (smi_check == INLINE_SMI_CHECK) { - ASSERT_EQ(0, kSmiTag); + DCHECK_EQ(0, kSmiTag); test(value, Immediate(kSmiTagMask)); j(zero, &done); } @@ -527,8 +418,8 @@ lea(dst, Operand(object, index, times_half_pointer_size, FixedArray::kHeaderSize - kHeapObjectTag)); - RecordWrite( - object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); + RecordWrite(object, dst, value, save_fp, remembered_set_action, + OMIT_SMI_CHECK, pointers_to_here_check_for_value); bind(&done); @@ -548,7 +439,8 @@ Register dst, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action, - SmiCheck smi_check) { + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { // First, check if a write barrier is even needed. The tests below // catch stores of Smis. Label done; @@ -560,7 +452,7 @@ // Although the object register is tagged, the offset is relative to the start // of the object, so so offset must be a multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize)); + DCHECK(IsAligned(offset, kPointerSize)); lea(dst, FieldOperand(object, offset)); if (emit_debug_code()) { @@ -571,8 +463,8 @@ bind(&ok); } - RecordWrite( - object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); + RecordWrite(object, dst, value, save_fp, remembered_set_action, + OMIT_SMI_CHECK, pointers_to_here_check_for_value); bind(&done); @@ -604,41 +496,39 @@ bind(&ok); } - ASSERT(!object.is(value)); - ASSERT(!object.is(address)); - ASSERT(!value.is(address)); + DCHECK(!object.is(value)); + DCHECK(!object.is(address)); + DCHECK(!value.is(address)); AssertNotSmi(object); if (!FLAG_incremental_marking) { return; } - // Count number of write barriers in generated code. - isolate()->counters()->write_barriers_static()->Increment(); - IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); + // Compute the address. + lea(address, FieldOperand(object, HeapObject::kMapOffset)); // A single check of the map's pages interesting flag suffices, since it is // only set during incremental collection, and then it's also guaranteed that // the from object's page's interesting flag is also set. This optimization // relies on the fact that maps can never be in new space. - ASSERT(!isolate()->heap()->InNewSpace(*map)); + DCHECK(!isolate()->heap()->InNewSpace(*map)); CheckPageFlagForMap(map, MemoryChunk::kPointersToHereAreInterestingMask, zero, &done, Label::kNear); - // Delay the initialization of |address| and |value| for the stub until it's - // known that the will be needed. Up until this point their values are not - // needed since they are embedded in the operands of instructions that need - // them. - lea(address, FieldOperand(object, HeapObject::kMapOffset)); - mov(value, Immediate(map)); - RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp); + RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET, + save_fp); CallStub(&stub); bind(&done); + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); + // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { @@ -649,15 +539,17 @@ } -void MacroAssembler::RecordWrite(Register object, - Register address, - Register value, - SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - ASSERT(!object.is(value)); - ASSERT(!object.is(address)); - ASSERT(!value.is(address)); +void MacroAssembler::RecordWrite( + Register object, + Register address, + Register value, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + DCHECK(!object.is(value)); + DCHECK(!object.is(address)); + DCHECK(!value.is(address)); AssertNotSmi(object); if (remembered_set_action == OMIT_REMEMBERED_SET && @@ -673,10 +565,6 @@ bind(&ok); } - // Count number of write barriers in generated code. - isolate()->counters()->write_barriers_static()->Increment(); - IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); - // First, check if a write barrier is even needed. The tests below // catch stores of Smis and stores into young gen. Label done; @@ -686,12 +574,14 @@ JumpIfSmi(value, &done, Label::kNear); } - CheckPageFlag(value, - value, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, - zero, - &done, - Label::kNear); + if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + zero, + &done, + Label::kNear); + } CheckPageFlag(object, value, // Used as scratch. MemoryChunk::kPointersFromHereAreInterestingMask, @@ -699,11 +589,16 @@ &done, Label::kNear); - RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, + fp_mode); CallStub(&stub); bind(&done); + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { @@ -713,14 +608,12 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { Move(eax, Immediate(0)); mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate()))); - CEntryStub ces(1); - call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); + CEntryStub ces(isolate(), 1); + call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } -#endif void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) { @@ -817,7 +710,6 @@ Register scratch1, XMMRegister scratch2, Label* fail, - bool specialize_for_processor, int elements_offset) { Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value; JumpIfSmi(maybe_number, &smi_value, Label::kNear); @@ -836,19 +728,11 @@ bind(¬_nan); ExternalReference canonical_nan_reference = ExternalReference::address_of_canonical_non_hole_nan(); - if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { - CpuFeatureScope use_sse2(this, SSE2); - movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); - bind(&have_double_value); - movsd(FieldOperand(elements, key, times_4, - FixedDoubleArray::kHeaderSize - elements_offset), - scratch2); - } else { - fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); - bind(&have_double_value); - fstp_d(FieldOperand(elements, key, times_4, - FixedDoubleArray::kHeaderSize - elements_offset)); - } + movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); + bind(&have_double_value); + movsd(FieldOperand(elements, key, times_4, + FixedDoubleArray::kHeaderSize - elements_offset), + scratch2); jmp(&done); bind(&maybe_nan); @@ -858,12 +742,7 @@ cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); j(zero, ¬_nan); bind(&is_nan); - if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { - CpuFeatureScope use_sse2(this, SSE2); - movsd(scratch2, Operand::StaticVariable(canonical_nan_reference)); - } else { - fld_d(Operand::StaticVariable(canonical_nan_reference)); - } + movsd(scratch2, Operand::StaticVariable(canonical_nan_reference)); jmp(&have_double_value, Label::kNear); bind(&smi_value); @@ -871,19 +750,10 @@ // Preserve original value. mov(scratch1, maybe_number); SmiUntag(scratch1); - if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { - CpuFeatureScope fscope(this, SSE2); - Cvtsi2sd(scratch2, scratch1); - movsd(FieldOperand(elements, key, times_4, - FixedDoubleArray::kHeaderSize - elements_offset), - scratch2); - } else { - push(scratch1); - fild_s(Operand(esp, 0)); - pop(scratch1); - fstp_d(FieldOperand(elements, key, times_4, - FixedDoubleArray::kHeaderSize - elements_offset)); - } + Cvtsi2sd(scratch2, scratch1); + movsd(FieldOperand(elements, key, times_4, + FixedDoubleArray::kHeaderSize - elements_offset), + scratch2); bind(&done); } @@ -964,16 +834,8 @@ void MacroAssembler::FCmp() { - if (CpuFeatures::IsSupported(CMOV)) { - fucomip(); - fstp(0); - } else { - fucompp(); - push(eax); - fnstsw_ax(); - sahf(); - pop(eax); - } + fucomip(); + fstp(0); } @@ -1045,26 +907,27 @@ } -void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { - if (frame_mode == BUILD_STUB_FRAME) { +void MacroAssembler::StubPrologue() { + push(ebp); // Caller's frame pointer. + mov(ebp, esp); + push(esi); // Callee's context. + push(Immediate(Smi::FromInt(StackFrame::STUB))); +} + + +void MacroAssembler::Prologue(bool code_pre_aging) { + PredictableCodeSizeScope predictible_code_size_scope(this, + kNoCodeAgeSequenceLength); + if (code_pre_aging) { + // Pre-age the code. + call(isolate()->builtins()->MarkCodeAsExecutedOnce(), + RelocInfo::CODE_AGE_SEQUENCE); + Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength); + } else { push(ebp); // Caller's frame pointer. mov(ebp, esp); push(esi); // Callee's context. - push(Immediate(Smi::FromInt(StackFrame::STUB))); - } else { - PredictableCodeSizeScope predictible_code_size_scope(this, - kNoCodeAgeSequenceLength); - if (isolate()->IsCodePreAgingActive()) { - // Pre-age the code. - call(isolate()->builtins()->MarkCodeAsExecutedOnce(), - RelocInfo::CODE_AGE_SEQUENCE); - Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength); - } else { - push(ebp); // Caller's frame pointer. - mov(ebp, esp); - push(esi); // Callee's context. - push(edi); // Callee's JS function. - } + push(edi); // Callee's JS function. } } @@ -1094,14 +957,14 @@ void MacroAssembler::EnterExitFramePrologue() { // Set up the frame structure on the stack. - ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); - ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); - ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); + DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); + DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); + DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); push(ebp); mov(ebp, esp); // Reserve room for entry stack pointer and push the code object. - ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); + DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize); push(Immediate(0)); // Saved entry sp, patched before call. push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot. @@ -1116,11 +979,11 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { // Optionally save all XMM registers. if (save_doubles) { - CpuFeatureScope scope(this, SSE2); - int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; + int space = XMMRegister::kMaxNumRegisters * kDoubleSize + + argc * kPointerSize; sub(esp, Immediate(space)); const int offset = -2 * kPointerSize; - for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); } @@ -1129,9 +992,9 @@ } // Get the required frame alignment for the OS. - const int kFrameAlignment = OS::ActivationFrameAlignment(); + const int kFrameAlignment = base::OS::ActivationFrameAlignment(); if (kFrameAlignment > 0) { - ASSERT(IsPowerOf2(kFrameAlignment)); + DCHECK(IsPowerOf2(kFrameAlignment)); and_(esp, -kFrameAlignment); } @@ -1162,9 +1025,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { // Optionally restore all XMM registers. if (save_doubles) { - CpuFeatureScope scope(this, SSE2); const int offset = -2 * kPointerSize; - for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); } @@ -1357,9 +1219,9 @@ Label* miss) { Label same_contexts; - ASSERT(!holder_reg.is(scratch1)); - ASSERT(!holder_reg.is(scratch2)); - ASSERT(!scratch1.is(scratch2)); + DCHECK(!holder_reg.is(scratch1)); + DCHECK(!holder_reg.is(scratch2)); + DCHECK(!scratch1.is(scratch2)); // Load current lexical context from the stack frame. mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset)); @@ -1418,13 +1280,13 @@ // Compute the hash code from the untagged key. This must be kept in sync with -// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in +// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in // code-stub-hydrogen.cc // // Note: r0 will contain hash code void MacroAssembler::GetNumberHash(Register r0, Register scratch) { // Xor original key with a seed. - if (Serializer::enabled()) { + if (serializer_enabled()) { ExternalReference roots_array_start = ExternalReference::roots_array_start(isolate()); mov(scratch, Immediate(Heap::kHashSeedRootIndex)); @@ -1505,7 +1367,7 @@ and_(r2, r1); // Scale the index by multiplying by the entry size. - ASSERT(SeededNumberDictionary::kEntrySize == 3); + DCHECK(SeededNumberDictionary::kEntrySize == 3); lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 // Check if the key matches. @@ -1524,7 +1386,7 @@ // Check that the value is a normal propety. const int kDetailsOffset = SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; - ASSERT_EQ(NORMAL, 0); + DCHECK_EQ(NORMAL, 0); test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize)); j(not_zero, miss); @@ -1545,7 +1407,7 @@ // Just return if allocation top is already known. if ((flags & RESULT_CONTAINS_TOP) != 0) { // No use of scratch if allocation top is provided. - ASSERT(scratch.is(no_reg)); + DCHECK(scratch.is(no_reg)); #ifdef DEBUG // Assert that result actually contains top on entry. cmp(result, Operand::StaticVariable(allocation_top)); @@ -1590,8 +1452,8 @@ Register scratch, Label* gc_required, AllocationFlags flags) { - ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); - ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); + DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -1606,7 +1468,7 @@ jmp(gc_required); return; } - ASSERT(!result.is(result_end)); + DCHECK(!result.is(result_end)); // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); @@ -1617,8 +1479,8 @@ // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. if ((flags & DOUBLE_ALIGNMENT) != 0) { - ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); - ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); Label aligned; test(result, Immediate(kDoubleAlignmentMask)); j(zero, &aligned, Label::kNear); @@ -1654,7 +1516,7 @@ sub(result, Immediate(object_size)); } } else if (tag_result) { - ASSERT(kHeapObjectTag == 1); + DCHECK(kHeapObjectTag == 1); inc(result); } } @@ -1669,7 +1531,7 @@ Register scratch, Label* gc_required, AllocationFlags flags) { - ASSERT((flags & SIZE_IN_WORDS) == 0); + DCHECK((flags & SIZE_IN_WORDS) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -1683,7 +1545,7 @@ jmp(gc_required); return; } - ASSERT(!result.is(result_end)); + DCHECK(!result.is(result_end)); // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); @@ -1694,8 +1556,8 @@ // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. if ((flags & DOUBLE_ALIGNMENT) != 0) { - ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); - ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); Label aligned; test(result, Immediate(kDoubleAlignmentMask)); j(zero, &aligned, Label::kNear); @@ -1716,11 +1578,11 @@ STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1); STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2); STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4); - ASSERT(element_size >= times_2); - ASSERT(kSmiTagSize == 1); + DCHECK(element_size >= times_2); + DCHECK(kSmiTagSize == 1); element_size = static_cast<ScaleFactor>(element_size - 1); } else { - ASSERT(element_count_type == REGISTER_VALUE_IS_INT32); + DCHECK(element_count_type == REGISTER_VALUE_IS_INT32); } lea(result_end, Operand(element_count, element_size, header_size)); add(result_end, result); @@ -1729,7 +1591,7 @@ j(above, gc_required); if ((flags & TAG_OBJECT) != 0) { - ASSERT(kHeapObjectTag == 1); + DCHECK(kHeapObjectTag == 1); inc(result); } @@ -1744,7 +1606,7 @@ Register scratch, Label* gc_required, AllocationFlags flags) { - ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); + DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -1758,7 +1620,7 @@ jmp(gc_required); return; } - ASSERT(!result.is(result_end)); + DCHECK(!result.is(result_end)); // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); @@ -1769,8 +1631,8 @@ // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. if ((flags & DOUBLE_ALIGNMENT) != 0) { - ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); - ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); Label aligned; test(result, Immediate(kDoubleAlignmentMask)); j(zero, &aligned, Label::kNear); @@ -1795,7 +1657,7 @@ // Tag result if requested. if ((flags & TAG_OBJECT) != 0) { - ASSERT(kHeapObjectTag == 1); + DCHECK(kHeapObjectTag == 1); inc(result); } @@ -1821,14 +1683,18 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1, Register scratch2, - Label* gc_required) { + Label* gc_required, + MutableMode mode) { // Allocate heap number in new space. Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); + Handle<Map> map = mode == MUTABLE + ? isolate()->factory()->mutable_heap_number_map() + : isolate()->factory()->heap_number_map(); + // Set the map. - mov(FieldOperand(result, HeapObject::kMapOffset), - Immediate(isolate()->factory()->heap_number_map())); + mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map)); } @@ -1840,8 +1706,8 @@ Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); - ASSERT(kShortSize == 2); + DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); + DCHECK(kShortSize == 2); // scratch1 = length * 2 + kObjectAlignmentMask. lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask)); and_(scratch1, Immediate(~kObjectAlignmentMask)); @@ -1876,9 +1742,9 @@ Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); + DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); mov(scratch1, length); - ASSERT(kCharSize == 1); + DCHECK(kCharSize == 1); add(scratch1, Immediate(kObjectAlignmentMask)); and_(scratch1, Immediate(~kObjectAlignmentMask)); @@ -1909,7 +1775,7 @@ Register scratch1, Register scratch2, Label* gc_required) { - ASSERT(length > 0); + DCHECK(length > 0); // Allocate ASCII string in new space. Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2, @@ -1943,32 +1809,13 @@ Register scratch1, Register scratch2, Label* gc_required) { - Label allocate_new_space, install_map; - AllocationFlags flags = TAG_OBJECT; - - ExternalReference high_promotion_mode = ExternalReference:: - new_space_high_promotion_mode_active_address(isolate()); - - test(Operand::StaticVariable(high_promotion_mode), Immediate(1)); - j(zero, &allocate_new_space); - Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, - static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE)); - jmp(&install_map); - - bind(&allocate_new_space); - Allocate(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - flags); + TAG_OBJECT); - bind(&install_map); // Set the map. The other fields are left uninitialized. mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(isolate()->factory()->cons_ascii_string_map())); @@ -2016,9 +1863,9 @@ Register length, Register scratch) { Label short_loop, len4, len8, len12, done, short_string; - ASSERT(source.is(esi)); - ASSERT(destination.is(edi)); - ASSERT(length.is(ecx)); + DCHECK(source.is(esi)); + DCHECK(destination.is(edi)); + DCHECK(length.is(ecx)); cmp(length, Immediate(4)); j(below, &short_string, Label::kNear); @@ -2088,7 +1935,7 @@ int field_offset, int bit_index) { bit_index += kSmiTagSize + kSmiShiftSize; - ASSERT(IsPowerOf2(kBitsPerByte)); + DCHECK(IsPowerOf2(kBitsPerByte)); int byte_index = bit_index / kBitsPerByte; int byte_bit_index = bit_index & (kBitsPerByte - 1); test_b(FieldOperand(object, field_offset + byte_index), @@ -2129,27 +1976,27 @@ Register scratch, Label* miss, bool miss_on_bound_function) { - // Check that the receiver isn't a smi. - JumpIfSmi(function, miss); + Label non_instance; + if (miss_on_bound_function) { + // Check that the receiver isn't a smi. + JumpIfSmi(function, miss); - // Check that the function really is a function. - CmpObjectType(function, JS_FUNCTION_TYPE, result); - j(not_equal, miss); + // Check that the function really is a function. + CmpObjectType(function, JS_FUNCTION_TYPE, result); + j(not_equal, miss); - if (miss_on_bound_function) { // If a bound function, go to miss label. mov(scratch, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset, SharedFunctionInfo::kBoundFunction); j(not_zero, miss); - } - // Make sure that the function has an instance prototype. - Label non_instance; - movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset)); - test(scratch, Immediate(1 << Map::kHasNonInstancePrototype)); - j(not_zero, &non_instance); + // Make sure that the function has an instance prototype. + movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset)); + test(scratch, Immediate(1 << Map::kHasNonInstancePrototype)); + j(not_zero, &non_instance); + } // Get the prototype or initial map from the function. mov(result, @@ -2168,12 +2015,15 @@ // Get the prototype from the initial map. mov(result, FieldOperand(result, Map::kPrototypeOffset)); - jmp(&done); - // Non-instance prototype: Fetch prototype from constructor field - // in initial map. - bind(&non_instance); - mov(result, FieldOperand(result, Map::kConstructorOffset)); + if (miss_on_bound_function) { + jmp(&done); + + // Non-instance prototype: Fetch prototype from constructor field + // in initial map. + bind(&non_instance); + mov(result, FieldOperand(result, Map::kConstructorOffset)); + } // All done. bind(&done); @@ -2181,18 +2031,18 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { - ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. - call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id); + DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. + call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); } void MacroAssembler::TailCallStub(CodeStub* stub) { - jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET); + jmp(stub->GetCode(), RelocInfo::CODE_TARGET); } void MacroAssembler::StubReturn(int argc) { - ASSERT(argc >= 1 && generating_stub()); + DCHECK(argc >= 1 && generating_stub()); ret((argc - 1) * kPointerSize); } @@ -2202,30 +2052,16 @@ } -void MacroAssembler::IllegalOperation(int num_arguments) { - if (num_arguments > 0) { - add(esp, Immediate(num_arguments * kPointerSize)); - } - mov(eax, Immediate(isolate()->factory()->undefined_value())); -} - - void MacroAssembler::IndexFromHash(Register hash, Register index) { // The assert checks that the constants for the maximum number of digits // for an array index cached in the hash field and the number of bits // reserved for it does not conflict. - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < (1 << String::kArrayIndexValueBits)); - // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in - // the low kHashShift bits. - and_(hash, String::kArrayIndexValueMask); - STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0); - if (String::kHashShift > kSmiTagSize) { - shr(hash, String::kHashShift - kSmiTagSize); - } if (!index.is(hash)) { mov(index, hash); } + DecodeFieldToSmi<String::ArrayIndexValueBits>(index); } @@ -2235,10 +2071,7 @@ // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the // expectation. - if (f->nargs >= 0 && f->nargs != num_arguments) { - IllegalOperation(num_arguments); - return; - } + CHECK(f->nargs < 0 || f->nargs == num_arguments); // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we @@ -2246,8 +2079,7 @@ // smarter. Move(eax, Immediate(num_arguments)); mov(ebx, Immediate(ExternalReference(f, isolate()))); - CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles - : kDontSaveFPRegs); + CEntryStub ces(isolate(), 1, save_doubles); CallStub(&ces); } @@ -2257,7 +2089,7 @@ mov(eax, Immediate(num_arguments)); mov(ebx, Immediate(ref)); - CEntryStub stub(1); + CEntryStub stub(isolate(), 1); CallStub(&stub); } @@ -2298,7 +2130,7 @@ void MacroAssembler::CallApiFunctionAndReturn( Register function_address, - Address thunk_address, + ExternalReference thunk_ref, Operand thunk_last_arg, int stack_space, Operand return_value_operand, @@ -2310,7 +2142,7 @@ ExternalReference level_address = ExternalReference::handle_scope_level_address(isolate()); - ASSERT(edx.is(function_address)); + DCHECK(edx.is(function_address)); // Allocate HandleScope in callee-save registers. mov(ebx, Operand::StaticVariable(next_address)); mov(edi, Operand::StaticVariable(limit_address)); @@ -2329,17 +2161,15 @@ Label profiler_disabled; Label end_profiler_check; - bool* is_profiling_flag = - isolate()->cpu_profiler()->is_profiling_address(); - STATIC_ASSERT(sizeof(*is_profiling_flag) == 1); - mov(eax, Immediate(reinterpret_cast<Address>(is_profiling_flag))); + mov(eax, Immediate(ExternalReference::is_profiling_address(isolate()))); cmpb(Operand(eax, 0), 0); j(zero, &profiler_disabled); // Additional parameter is the address of the actual getter function. mov(thunk_last_arg, function_address); // Call the api function. - call(thunk_address, RelocInfo::RUNTIME_ENTRY); + mov(eax, Immediate(thunk_ref)); + call(eax); jmp(&end_profiler_check); bind(&profiler_disabled); @@ -2429,7 +2259,7 @@ bind(&promote_scheduled_exception); { FrameScope frame(this, StackFrame::INTERNAL); - CallRuntime(Runtime::kHiddenPromoteScheduledException, 0); + CallRuntime(Runtime::kPromoteScheduledException, 0); } jmp(&exception_handled); @@ -2451,8 +2281,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) { // Set the entry point and jump to the C entry runtime stub. mov(ebx, Immediate(ext)); - CEntryStub ces(1); - jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET); + CEntryStub ces(isolate(), 1); + jmp(ces.GetCode(), RelocInfo::CODE_TARGET); } @@ -2469,7 +2299,7 @@ *definitely_mismatches = false; Label invoke; if (expected.is_immediate()) { - ASSERT(actual.is_immediate()); + DCHECK(actual.is_immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; } else { @@ -2493,15 +2323,15 @@ // IC mechanism. cmp(expected.reg(), actual.immediate()); j(equal, &invoke); - ASSERT(expected.reg().is(ebx)); + DCHECK(expected.reg().is(ebx)); mov(eax, actual.immediate()); } else if (!expected.reg().is(actual.reg())) { // Both expected and actual are in (different) registers. This // is the case when we invoke functions using call and apply. cmp(expected.reg(), actual.reg()); j(equal, &invoke); - ASSERT(actual.reg().is(eax)); - ASSERT(expected.reg().is(ebx)); + DCHECK(actual.reg().is(eax)); + DCHECK(expected.reg().is(ebx)); } } @@ -2536,7 +2366,7 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); Label done; bool definitely_mismatches = false; @@ -2549,7 +2379,7 @@ call(code); call_wrapper.AfterCall(); } else { - ASSERT(flag == JUMP_FUNCTION); + DCHECK(flag == JUMP_FUNCTION); jmp(code); } bind(&done); @@ -2562,9 +2392,9 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); - ASSERT(fun.is(edi)); + DCHECK(fun.is(edi)); mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); @@ -2582,9 +2412,9 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); - ASSERT(fun.is(edi)); + DCHECK(fun.is(edi)); mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), @@ -2606,7 +2436,7 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a builtin without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Rely on the assertion to check that the number of provided // arguments match the expected number of arguments. Fake a @@ -2629,7 +2459,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { - ASSERT(!target.is(edi)); + DCHECK(!target.is(edi)); // Load the JavaScript builtin function from the builtins object. GetBuiltinFunction(edi, id); // Load the code entry point from the function into the target register. @@ -2742,7 +2572,7 @@ // The registers are pushed starting with the lowest encoding, // which means that lowest encodings are furthest away from // the stack pointer. - ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); + DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters); return kNumSafepointRegisters - reg_code - 1; } @@ -2798,27 +2628,6 @@ } -void MacroAssembler::VerifyX87StackDepth(uint32_t depth) { - // Make sure the floating point stack is either empty or has depth items. - ASSERT(depth <= 7); - // This is very expensive. - ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts); - - // The top-of-stack (tos) is 7 if there is one item pushed. - int tos = (8 - depth) % 8; - const int kTopMask = 0x3800; - push(eax); - fwait(); - fnstsw_ax(); - and_(eax, kTopMask); - shr(eax, 11); - cmp(eax, Immediate(tos)); - Check(equal, kUnexpectedFPUStackDepthAfterInstruction); - fnclex(); - pop(eax); -} - - void MacroAssembler::Drop(int stack_elements) { if (stack_elements > 0) { add(esp, Immediate(stack_elements * kPointerSize)); @@ -2849,7 +2658,6 @@ void MacroAssembler::Move(XMMRegister dst, double val) { // TODO(titzer): recognize double constants with ExternalReferences. - CpuFeatureScope scope(this, SSE2); uint64_t int_val = BitCast<uint64_t, double>(val); if (int_val == 0) { xorps(dst, dst); @@ -2872,7 +2680,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { Operand operand = Operand::StaticVariable(ExternalReference(counter)); if (value == 1) { @@ -2885,7 +2693,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { Operand operand = Operand::StaticVariable(ExternalReference(counter)); if (value == 1) { @@ -2900,7 +2708,7 @@ void MacroAssembler::IncrementCounter(Condition cc, StatsCounter* counter, int value) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { Label skip; j(NegateCondition(cc), &skip); @@ -2915,7 +2723,7 @@ void MacroAssembler::DecrementCounter(Condition cc, StatsCounter* counter, int value) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { Label skip; j(NegateCondition(cc), &skip); @@ -2961,10 +2769,10 @@ void MacroAssembler::CheckStackAlignment() { - int frame_alignment = OS::ActivationFrameAlignment(); + int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); Label alignment_as_expected; test(esp, Immediate(frame_alignment_mask)); j(zero, &alignment_as_expected); @@ -2989,7 +2797,6 @@ } #endif - push(eax); push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason)))); // Disable stub call restrictions to always allow calls to abort. if (!has_frame_) { @@ -3005,40 +2812,6 @@ } -void MacroAssembler::Throw(BailoutReason reason) { -#ifdef DEBUG - const char* msg = GetBailoutReason(reason); - if (msg != NULL) { - RecordComment("Throw message: "); - RecordComment(msg); - } -#endif - - push(eax); - push(Immediate(Smi::FromInt(reason))); - // Disable stub call restrictions to always allow calls to throw. - if (!has_frame_) { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(this, StackFrame::NONE); - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } else { - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } - // will not return here - int3(); -} - - -void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) { - Label L; - j(NegateCondition(cc), &L); - Throw(reason); - // will not return here - bind(&L); -} - - void MacroAssembler::LoadInstanceDescriptors(Register map, Register descriptors) { mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset)); @@ -3054,7 +2827,7 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst, Register scratch, int power) { - ASSERT(is_uintn(power + HeapNumber::kExponentBias, + DCHECK(is_uintn(power + HeapNumber::kExponentBias, HeapNumber::kExponentBits)); mov(scratch, Immediate(power + HeapNumber::kExponentBias)); movd(dst, scratch); @@ -3109,15 +2882,8 @@ times_twice_pointer_size, FixedArray::kHeaderSize)); JumpIfSmi(probe, not_found); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope fscope(this, SSE2); - movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); - ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); - } else { - fld_d(FieldOperand(object, HeapNumber::kValueOffset)); - fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); - FCmp(); - } + movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); + ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); j(parity_even, not_found); // Bail out if NaN is involved. j(not_equal, not_found); // The cache did not contain this value. jmp(&load_result_from_cache, Label::kNear); @@ -3181,7 +2947,7 @@ const int kFlatAsciiStringTag = kStringTag | kOneByteStringTag | kSeqStringTag; // Interleave bits from both instance types and compare them in one check. - ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); + DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); and_(scratch1, kFlatAsciiStringMask); and_(scratch2, kFlatAsciiStringMask); lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); @@ -3240,13 +3006,13 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { - int frame_alignment = OS::ActivationFrameAlignment(); + int frame_alignment = base::OS::ActivationFrameAlignment(); if (frame_alignment != 0) { // Make stack end at alignment and make room for num_arguments words // and the original value of esp. mov(scratch, esp); sub(esp, Immediate((num_arguments + 1) * kPointerSize)); - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); and_(esp, -frame_alignment); mov(Operand(esp, num_arguments * kPointerSize), scratch); } else { @@ -3265,14 +3031,14 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) { - ASSERT(has_frame()); + DCHECK(has_frame()); // Check stack alignment. if (emit_debug_code()) { CheckStackAlignment(); } call(function); - if (OS::ActivationFrameAlignment() != 0) { + if (base::OS::ActivationFrameAlignment() != 0) { mov(esp, Operand(esp, num_arguments * kPointerSize)); } else { add(esp, Immediate(num_arguments * kPointerSize)); @@ -3280,15 +3046,33 @@ } -bool AreAliased(Register r1, Register r2, Register r3, Register r4) { - if (r1.is(r2)) return true; - if (r1.is(r3)) return true; - if (r1.is(r4)) return true; - if (r2.is(r3)) return true; - if (r2.is(r4)) return true; - if (r3.is(r4)) return true; - return false; +#ifdef DEBUG +bool AreAliased(Register reg1, + Register reg2, + Register reg3, + Register reg4, + Register reg5, + Register reg6, + Register reg7, + Register reg8) { + int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + + reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + + reg7.is_valid() + reg8.is_valid(); + + RegList regs = 0; + if (reg1.is_valid()) regs |= reg1.bit(); + if (reg2.is_valid()) regs |= reg2.bit(); + if (reg3.is_valid()) regs |= reg3.bit(); + if (reg4.is_valid()) regs |= reg4.bit(); + if (reg5.is_valid()) regs |= reg5.bit(); + if (reg6.is_valid()) regs |= reg6.bit(); + if (reg7.is_valid()) regs |= reg7.bit(); + if (reg8.is_valid()) regs |= reg8.bit(); + int n_of_non_aliasing_regs = NumRegs(regs); + + return n_of_valid_regs != n_of_non_aliasing_regs; } +#endif CodePatcher::CodePatcher(byte* address, int size) @@ -3298,17 +3082,17 @@ // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } CodePatcher::~CodePatcher() { // Indicate that code has changed. - CPU::FlushICache(address_, size_); + CpuFeatures::FlushICache(address_, size_); // Check that the code was patched as expected. - ASSERT(masm_.pc_ == address_ + size_); - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); + DCHECK(masm_.pc_ == address_ + size_); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } @@ -3319,7 +3103,7 @@ Condition cc, Label* condition_met, Label::Distance condition_met_distance) { - ASSERT(cc == zero || cc == not_zero); + DCHECK(cc == zero || cc == not_zero); if (scratch.is(object)) { and_(scratch, Immediate(~Page::kPageAlignmentMask)); } else { @@ -3342,12 +3126,13 @@ Condition cc, Label* condition_met, Label::Distance condition_met_distance) { - ASSERT(cc == zero || cc == not_zero); + DCHECK(cc == zero || cc == not_zero); Page* page = Page::FromAddress(map->address()); + DCHECK(!serializer_enabled()); // Serializer cannot match page_flags. ExternalReference reference(ExternalReference::page_flags(page)); // The inlined static address check of the page's flags relies // on maps never being compacted. - ASSERT(!isolate()->heap()->mark_compact_collector()-> + DCHECK(!isolate()->heap()->mark_compact_collector()-> IsOnEvacuationCandidate(*map)); if (mask < (1 << kBitsPerByte)) { test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask)); @@ -3364,7 +3149,7 @@ if (map->CanBeDeprecated()) { mov(scratch, map); mov(scratch, FieldOperand(scratch, Map::kBitField3Offset)); - and_(scratch, Immediate(Smi::FromInt(Map::Deprecated::kMask))); + and_(scratch, Immediate(Map::Deprecated::kMask)); j(not_zero, if_deprecated); } } @@ -3378,7 +3163,7 @@ HasColor(object, scratch0, scratch1, on_black, on_black_near, 1, 0); // kBlackBitPattern. - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); } @@ -3389,7 +3174,7 @@ Label::Distance has_color_distance, int first_bit, int second_bit) { - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx)); + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx)); GetMarkBits(object, bitmap_scratch, mask_scratch); @@ -3413,7 +3198,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg, Register mask_reg) { - ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx)); + DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx)); mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); and_(bitmap_reg, addr_reg); mov(ecx, addr_reg); @@ -3438,14 +3223,14 @@ Register mask_scratch, Label* value_is_white_and_not_data, Label::Distance distance) { - ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx)); + DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx)); GetMarkBits(value, bitmap_scratch, mask_scratch); // If the value is black or grey we don't need to do anything. - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); Label done; @@ -3483,8 +3268,8 @@ bind(¬_heap_number); // Check for strings. - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); // If it's a string and it's not a cons string then it's an object containing // no GC pointers. Register instance_type = ecx; @@ -3497,8 +3282,8 @@ Label not_external; // External strings are the only ones with the kExternalStringTag bit // set. - ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); - ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); + DCHECK_EQ(0, kConsStringTag & kExternalStringTag); test_b(instance_type, kExternalStringTag); j(zero, ¬_external, Label::kNear); mov(length, Immediate(ExternalString::kSize)); @@ -3506,15 +3291,15 @@ bind(¬_external); // Sequential string, either ASCII or UC16. - ASSERT(kOneByteStringTag == 0x04); + DCHECK(kOneByteStringTag == 0x04); and_(length, Immediate(kStringEncodingMask)); xor_(length, Immediate(kStringEncodingMask)); add(length, Immediate(0x04)); // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted // by 2. If we multiply the string length as smi by this, it still // won't overflow a 32-bit value. - ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize); - ASSERT(SeqOneByteString::kMaxSize <= + DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize); + DCHECK(SeqOneByteString::kMaxSize <= static_cast<int>(0xffffffffu >> (2 + kSmiTagSize))); imul(length, FieldOperand(value, String::kLengthOffset)); shr(length, 2 + kSmiTagSize + kSmiShiftSize); @@ -3542,7 +3327,8 @@ void MacroAssembler::EnumLength(Register dst, Register map) { STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); mov(dst, FieldOperand(map, Map::kBitField3Offset)); - and_(dst, Immediate(Smi::FromInt(Map::EnumLengthBits::kMask))); + and_(dst, Immediate(Map::EnumLengthBits::kMask)); + SmiTag(dst); } @@ -3613,7 +3399,7 @@ Register scratch0, Register scratch1, Label* found) { - ASSERT(!scratch1.is(scratch0)); + DCHECK(!scratch1.is(scratch0)); Factory* factory = isolate()->factory(); Register current = scratch0; Label loop_again; @@ -3625,8 +3411,7 @@ bind(&loop_again); mov(current, FieldOperand(current, HeapObject::kMapOffset)); mov(scratch1, FieldOperand(current, Map::kBitField2Offset)); - and_(scratch1, Map::kElementsKindMask); - shr(scratch1, Map::kElementsKindShift); + DecodeField<Map::ElementsKindBits>(scratch1); cmp(scratch1, Immediate(DICTIONARY_ELEMENTS)); j(equal, found); mov(current, FieldOperand(current, Map::kPrototypeOffset)); @@ -3636,8 +3421,8 @@ void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { - ASSERT(!dividend.is(eax)); - ASSERT(!dividend.is(edx)); + DCHECK(!dividend.is(eax)); + DCHECK(!dividend.is(edx)); MultiplierAndShift ms(divisor); mov(eax, Immediate(ms.multiplier())); imul(dividend); diff -Nru nodejs-0.11.13/deps/v8/src/ia32/macro-assembler-ia32.h nodejs-0.11.15/deps/v8/src/ia32/macro-assembler-ia32.h --- nodejs-0.11.13/deps/v8/src/ia32/macro-assembler-ia32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/macro-assembler-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_ #define V8_IA32_MACRO_ASSEMBLER_IA32_H_ -#include "assembler.h" -#include "frames.h" -#include "v8globals.h" +#include "src/assembler.h" +#include "src/frames.h" +#include "src/globals.h" namespace v8 { namespace internal { @@ -41,6 +18,10 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum PointersToHereCheck { + kPointersToHereMaybeInteresting, + kPointersToHereAreAlwaysInteresting +}; enum RegisterValueType { @@ -49,7 +30,16 @@ }; -bool AreAliased(Register r1, Register r2, Register r3, Register r4); +#ifdef DEBUG +bool AreAliased(Register reg1, + Register reg2, + Register reg3 = no_reg, + Register reg4 = no_reg, + Register reg5 = no_reg, + Register reg6 = no_reg, + Register reg7 = no_reg, + Register reg8 = no_reg); +#endif // MacroAssembler implements a collection of frequently used macros. @@ -163,7 +153,9 @@ Register scratch, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); // As above, but the offset has the tag presubtracted. For use with // Operand(reg, off). @@ -174,14 +166,17 @@ Register scratch, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK) { + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting) { RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp, remembered_set_action, - smi_check); + smi_check, + pointers_to_here_check_for_value); } // Notify the garbage collector that we wrote a pointer into a fixed array. @@ -196,7 +191,9 @@ Register index, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); // For page containing |object| mark region covering |address| // dirty. |object| is the object being stored into, |value| is the @@ -209,7 +206,9 @@ Register value, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); // For page containing |object| mark the region covering the object's map // dirty. |object| is the object being stored into, |map| is the Map object @@ -221,15 +220,14 @@ Register scratch2, SaveFPRegsMode save_fp); -#ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- // Debugger Support void DebugBreak(); -#endif // Generates function and stub prologue code. - void Prologue(PrologueFrameMode frame_mode); + void StubPrologue(); + void Prologue(bool code_pre_aging); // Enter specific kind of exit frame. Expects the number of // arguments in register eax and sets up the number of arguments in @@ -395,7 +393,6 @@ Register scratch1, XMMRegister scratch2, Label* fail, - bool specialize_for_processor, int offset = 0); // Compare an object's map with the specified map. @@ -464,13 +461,10 @@ void TruncateHeapNumberToI(Register result_reg, Register input_reg); void TruncateDoubleToI(Register result_reg, XMMRegister input_reg); - void TruncateX87TOSToI(Register result_reg); void DoubleToI(Register result_reg, XMMRegister input_reg, XMMRegister scratch, MinusZeroMode minus_zero_mode, Label* conversion_failed, Label::Distance dst = Label::kFar); - void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode, - Label* conversion_failed, Label::Distance dst = Label::kFar); void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp, MinusZeroMode minus_zero_mode, Label* lost_precision); @@ -493,8 +487,7 @@ j(not_carry, is_smi); } - void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch); - void LoadUint32NoSSE2(Register src); + void LoadUint32(XMMRegister dst, Register src); // Jump the register contains a smi. inline void JumpIfSmi(Register value, @@ -525,10 +518,27 @@ template<typename Field> void DecodeField(Register reg) { static const int shift = Field::kShift; + static const int mask = Field::kMask >> Field::kShift; + if (shift != 0) { + sar(reg, shift); + } + and_(reg, Immediate(mask)); + } + + template<typename Field> + void DecodeFieldToSmi(Register reg) { + static const int shift = Field::kShift; static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize; - sar(reg, shift); + STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); + STATIC_ASSERT(kSmiTag == 0); + if (shift < kSmiTagSize) { + shl(reg, kSmiTagSize - shift); + } else if (shift > kSmiTagSize) { + sar(reg, shift - kSmiTagSize); + } and_(reg, Immediate(mask)); } + void LoadPowerOf2(XMMRegister dst, Register scratch, int power); // Abort execution if argument is not a number, enabled via --debug-code. @@ -565,12 +575,6 @@ // Throw past all JS frames to the top JS entry frame. void ThrowUncatchable(Register value); - // Throw a message string as an exception. - void Throw(BailoutReason reason); - - // Throw a message string as an exception if a condition is not true. - void ThrowIf(Condition cc, BailoutReason reason); - // --------------------------------------------------------------------------- // Inline caching support @@ -643,7 +647,8 @@ void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, - Label* gc_required); + Label* gc_required, + MutableMode mode = IMMUTABLE); // Allocate a sequential string. All the header fields of the string object // are initialized. @@ -727,10 +732,6 @@ Label* miss, bool miss_on_bound_function = false); - // Generates code for reporting that an illegal operation has - // occurred. - void IllegalOperation(int num_arguments); - // Picks out an array index from the hash field. // Register use: // hash - holds the index's hash. Clobbered. @@ -809,7 +810,7 @@ // caller-save registers. Restores context. On return removes // stack_space * kPointerSize (GCed). void CallApiFunctionAndReturn(Register function_address, - Address thunk_address, + ExternalReference thunk_ref, Operand thunk_last_arg, int stack_space, Operand return_value_operand, @@ -856,13 +857,10 @@ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } Handle<Object> CodeObject() { - ASSERT(!code_object_.is_null()); + DCHECK(!code_object_.is_null()); return code_object_; } - // Insert code to verify that the x87 stack has the specified depth (0-7) - void VerifyX87StackDepth(uint32_t depth); - // Emit code for a truncating division by a constant. The dividend register is // unchanged, the result is in edx, and eax gets clobbered. void TruncatingDiv(Register dividend, int32_t divisor); @@ -1012,13 +1010,6 @@ Register scratch, AllocationFlags flags); - // Helper for PopHandleScope. Allowed to perform a GC and returns - // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and - // possibly returns a failure object indicating an allocation failure. - MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved, - Register scratch, - bool gc_allowed); - // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. void InNewSpace(Register object, Register scratch, diff -Nru nodejs-0.11.13/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "cpu-profiler.h" -#include "unicode.h" -#include "log.h" -#include "regexp-stack.h" -#include "macro-assembler.h" -#include "regexp-macro-assembler.h" -#include "ia32/regexp-macro-assembler-ia32.h" +#include "src/cpu-profiler.h" +#include "src/log.h" +#include "src/macro-assembler.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-stack.h" +#include "src/unicode.h" + +#include "src/ia32/regexp-macro-assembler-ia32.h" namespace v8 { namespace internal { @@ -114,7 +92,7 @@ success_label_(), backtrack_label_(), exit_label_() { - ASSERT_EQ(0, registers_to_save % 2); + DCHECK_EQ(0, registers_to_save % 2); __ jmp(&entry_label_); // We'll write the entry code later. __ bind(&start_label_); // And then continue from here. } @@ -146,8 +124,8 @@ void RegExpMacroAssemblerIA32::AdvanceRegister(int reg, int by) { - ASSERT(reg >= 0); - ASSERT(reg < num_registers_); + DCHECK(reg >= 0); + DCHECK(reg < num_registers_); if (by != 0) { __ add(register_location(reg), Immediate(by)); } @@ -304,7 +282,7 @@ // Compute new value of character position after the matched part. __ sub(edi, esi); } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); // Save registers before calling C function. __ push(esi); __ push(edi); @@ -392,7 +370,7 @@ __ movzx_b(eax, Operand(edx, 0)); __ cmpb_al(Operand(ebx, 0)); } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); __ movzx_w(eax, Operand(edx, 0)); __ cmpw_ax(Operand(ebx, 0)); } @@ -461,7 +439,7 @@ uc16 minus, uc16 mask, Label* on_not_equal) { - ASSERT(minus < String::kMaxUtf16CodeUnit); + DCHECK(minus < String::kMaxUtf16CodeUnit); __ lea(eax, Operand(current_character(), -minus)); if (c == 0) { __ test(eax, Immediate(mask)); @@ -570,7 +548,7 @@ __ cmp(current_character(), Immediate('z')); BranchOrBacktrack(above, on_no_match); } - ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char. + DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char. ExternalReference word_map = ExternalReference::re_word_character_map(); __ test_b(current_character(), Operand::StaticArray(current_character(), times_1, word_map)); @@ -584,7 +562,7 @@ __ cmp(current_character(), Immediate('z')); __ j(above, &done); } - ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char. + DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char. ExternalReference word_map = ExternalReference::re_word_character_map(); __ test_b(current_character(), Operand::StaticArray(current_character(), times_1, word_map)); @@ -611,7 +589,7 @@ } else { Label done; BranchOrBacktrack(below_equal, &done); - ASSERT_EQ(UC16, mode_); + DCHECK_EQ(UC16, mode_); // Compare original value to 0x2028 and 0x2029, using the already // computed (current_char ^ 0x01 - 0x0b). I.e., check for // 0x201d (0x2028 - 0x0b) or 0x201e. @@ -969,8 +947,8 @@ Label* on_end_of_input, bool check_bounds, int characters) { - ASSERT(cp_offset >= -1); // ^ and \b can look behind one character. - ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works) + DCHECK(cp_offset >= -1); // ^ and \b can look behind one character. + DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works) if (check_bounds) { CheckPosition(cp_offset + characters - 1, on_end_of_input); } @@ -1032,7 +1010,7 @@ void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) { - ASSERT(register_index >= num_saved_registers_); // Reserved for positions! + DCHECK(register_index >= num_saved_registers_); // Reserved for positions! __ mov(register_location(register_index), Immediate(to)); } @@ -1055,7 +1033,7 @@ void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) { - ASSERT(reg_from <= reg_to); + DCHECK(reg_from <= reg_to); __ mov(eax, Operand(ebp, kInputStartMinusOne)); for (int reg = reg_from; reg <= reg_to; reg++) { __ mov(register_location(reg), eax); @@ -1099,7 +1077,8 @@ Code* re_code, Address re_frame) { Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate); - if (isolate->stack_guard()->IsStackOverflow()) { + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { isolate->StackOverflow(); return EXCEPTION; } @@ -1122,11 +1101,11 @@ // Current string. bool is_ascii = subject->IsOneByteRepresentationUnderneath(); - ASSERT(re_code->instruction_start() <= *return_address); - ASSERT(*return_address <= + DCHECK(re_code->instruction_start() <= *return_address); + DCHECK(*return_address <= re_code->instruction_start() + re_code->instruction_size()); - MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate); + Object* result = isolate->stack_guard()->HandleInterrupts(); if (*code_handle != re_code) { // Return address no longer valid int delta = code_handle->address() - re_code->address(); @@ -1162,7 +1141,7 @@ // be a sequential or external string with the same content. // Update the start and end pointers in the stack frame to the current // location (whether it has actually moved or not). - ASSERT(StringShape(*subject_tmp).IsSequential() || + DCHECK(StringShape(*subject_tmp).IsSequential() || StringShape(*subject_tmp).IsExternal()); // The original start address of the characters to match. @@ -1194,7 +1173,7 @@ Operand RegExpMacroAssemblerIA32::register_location(int register_index) { - ASSERT(register_index < (1<<30)); + DCHECK(register_index < (1<<30)); if (num_registers_ <= register_index) { num_registers_ = register_index + 1; } @@ -1248,7 +1227,7 @@ void RegExpMacroAssemblerIA32::Push(Register source) { - ASSERT(!source.is(backtrack_stackpointer())); + DCHECK(!source.is(backtrack_stackpointer())); // Notice: This updates flags, unlike normal Push. __ sub(backtrack_stackpointer(), Immediate(kPointerSize)); __ mov(Operand(backtrack_stackpointer(), 0), source); @@ -1263,7 +1242,7 @@ void RegExpMacroAssemblerIA32::Pop(Register target) { - ASSERT(!target.is(backtrack_stackpointer())); + DCHECK(!target.is(backtrack_stackpointer())); __ mov(target, Operand(backtrack_stackpointer(), 0)); // Notice: This updates flags, unlike normal Pop. __ add(backtrack_stackpointer(), Immediate(kPointerSize)); @@ -1305,16 +1284,16 @@ } else if (characters == 2) { __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset)); } else { - ASSERT(characters == 1); + DCHECK(characters == 1); __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset)); } } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); if (characters == 2) { __ mov(current_character(), Operand(esi, edi, times_1, cp_offset * sizeof(uc16))); } else { - ASSERT(characters == 1); + DCHECK(characters == 1); __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset * sizeof(uc16))); } diff -Nru nodejs-0.11.13/deps/v8/src/ia32/regexp-macro-assembler-ia32.h nodejs-0.11.15/deps/v8/src/ia32/regexp-macro-assembler-ia32.h --- nodejs-0.11.13/deps/v8/src/ia32/regexp-macro-assembler-ia32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/regexp-macro-assembler-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_ #define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_ -#include "ia32/assembler-ia32.h" -#include "ia32/assembler-ia32-inl.h" -#include "macro-assembler.h" +#include "src/ia32/assembler-ia32.h" +#include "src/ia32/assembler-ia32-inl.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/ia32/simulator-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/simulator-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/simulator-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/simulator-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Since there is no simulator for the ia32 architecture this file is empty. diff -Nru nodejs-0.11.13/deps/v8/src/ia32/simulator-ia32.h nodejs-0.11.15/deps/v8/src/ia32/simulator-ia32.h --- nodejs-0.11.13/deps/v8/src/ia32/simulator-ia32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/simulator-ia32.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IA32_SIMULATOR_IA32_H_ #define V8_IA32_SIMULATOR_IA32_H_ -#include "allocation.h" +#include "src/allocation.h" namespace v8 { namespace internal { @@ -48,9 +25,6 @@ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8)) -#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ - (reinterpret_cast<TryCatch*>(try_catch_address)) - // The stack limit beyond which we will throw stack overflow errors in // generated code. Because generated code on ia32 uses the C stack, we // just use the C stack limit. diff -Nru nodejs-0.11.13/deps/v8/src/ia32/stub-cache-ia32.cc nodejs-0.11.15/deps/v8/src/ia32/stub-cache-ia32.cc --- nodejs-0.11.13/deps/v8/src/ia32/stub-cache-ia32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ia32/stub-cache-ia32.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_IA32 -#include "ic-inl.h" -#include "codegen.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -137,14 +114,11 @@ } -void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, - Label* miss_label, - Register receiver, - Handle<Name> name, - Register scratch0, - Register scratch1) { - ASSERT(name->IsUniqueName()); - ASSERT(!receiver.is(scratch0)); +void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( + MacroAssembler* masm, Label* miss_label, Register receiver, + Handle<Name> name, Register scratch0, Register scratch1) { + DCHECK(name->IsUniqueName()); + DCHECK(!receiver.is(scratch0)); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->negative_lookups(), 1); __ IncrementCounter(counters->negative_lookups_miss(), 1); @@ -196,22 +170,22 @@ // Assert that code is valid. The multiplying code relies on the entry size // being 12. - ASSERT(sizeof(Entry) == 12); + DCHECK(sizeof(Entry) == 12); // Assert the flags do not name a specific type. - ASSERT(Code::ExtractTypeFromFlags(flags) == 0); + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); // Assert that there are no register conflicts. - ASSERT(!scratch.is(receiver)); - ASSERT(!scratch.is(name)); - ASSERT(!extra.is(receiver)); - ASSERT(!extra.is(name)); - ASSERT(!extra.is(scratch)); + DCHECK(!scratch.is(receiver)); + DCHECK(!scratch.is(name)); + DCHECK(!extra.is(receiver)); + DCHECK(!extra.is(name)); + DCHECK(!extra.is(scratch)); // Assert scratch and extra registers are valid, and extra2/3 are unused. - ASSERT(!scratch.is(no_reg)); - ASSERT(extra2.is(no_reg)); - ASSERT(extra3.is(no_reg)); + DCHECK(!scratch.is(no_reg)); + DCHECK(extra2.is(no_reg)); + DCHECK(extra3.is(no_reg)); Register offset = scratch; scratch = no_reg; @@ -228,10 +202,10 @@ __ xor_(offset, flags); // We mask out the last two bits because they are not part of the hash and // they are always 01 for maps. Also in the two 'and' instructions below. - __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize); + __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift); // ProbeTable expects the offset to be pointer scaled, which it is, because // the heap object tag size is 2 and the pointer size log 2 is also 2. - ASSERT(kHeapObjectTagSize == kPointerSizeLog2); + DCHECK(kCacheIndexShift == kPointerSizeLog2); // Probe the primary table. ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra); @@ -240,10 +214,10 @@ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); __ xor_(offset, flags); - __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize); + __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift); __ sub(offset, name); __ add(offset, Immediate(flags)); - __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize); + __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift); // Probe the secondary table. ProbeTable( @@ -256,21 +230,8 @@ } -void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, - int index, - Register prototype) { - __ LoadGlobalFunction(index, prototype); - __ LoadGlobalFunctionInitialMap(prototype, prototype); - // Load the prototype from the initial map. - __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset)); -} - - -void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( - MacroAssembler* masm, - int index, - Register prototype, - Label* miss) { +void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( + MacroAssembler* masm, int index, Register prototype, Label* miss) { // Get the global function with the given index. Handle<JSFunction> function( JSFunction::cast(masm->isolate()->native_context()->get(index))); @@ -289,65 +250,28 @@ } -void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, - Register receiver, - Register scratch, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss_label); - - // Check that the object is a JS array. - __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch); - __ j(not_equal, miss_label); - - // Load length directly from the JS array. - __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset)); - __ ret(0); -} - - -void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, - Register receiver, - Register scratch1, - Register scratch2, - Label* miss_label) { +void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype( + MacroAssembler* masm, Register receiver, Register scratch1, + Register scratch2, Label* miss_label) { __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); __ mov(eax, scratch1); __ ret(0); } -void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, - Register dst, - Register src, - bool inobject, - int index, - Representation representation) { - ASSERT(!representation.IsDouble()); - int offset = index * kPointerSize; - if (!inobject) { - // Calculate the offset into the properties array. - offset = offset + FixedArray::kHeaderSize; - __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset)); - src = dst; - } - __ mov(dst, FieldOperand(src, offset)); -} - - static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, Register holder, Register name, Handle<JSObject> holder_obj) { - STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0); - STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1); - STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2); - STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3); - STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4); __ push(name); Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); - ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor)); + DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor)); Register scratch = name; __ mov(scratch, Immediate(interceptor)); __ push(scratch); @@ -364,9 +288,8 @@ Handle<JSObject> holder_obj, IC::UtilityId id) { PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallExternalReference( - ExternalReference(IC_Utility(id), masm->isolate()), - StubCache::kInterceptorArgsLength); + __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()), + NamedLoadHandlerCompiler::kInterceptorArgsLength); } @@ -374,14 +297,10 @@ // This function uses push() to generate smaller, faster code than // the version above. It is an optimization that should will be removed // when api call ICs are generated in hydrogen. -void StubCompiler::GenerateFastApiCall(MacroAssembler* masm, - const CallOptimization& optimization, - Handle<Map> receiver_map, - Register receiver, - Register scratch_in, - bool is_store, - int argc, - Register* values) { +void PropertyHandlerCompiler::GenerateFastApiCall( + MacroAssembler* masm, const CallOptimization& optimization, + Handle<Map> receiver_map, Register receiver, Register scratch_in, + bool is_store, int argc, Register* values) { // Copy return value. __ pop(scratch_in); // receiver @@ -389,13 +308,13 @@ // Write the arguments to stack frame. for (int i = 0; i < argc; i++) { Register arg = values[argc-1-i]; - ASSERT(!receiver.is(arg)); - ASSERT(!scratch_in.is(arg)); + DCHECK(!receiver.is(arg)); + DCHECK(!scratch_in.is(arg)); __ push(arg); } __ push(scratch_in); // Stack now matches JSFunction abi. - ASSERT(optimization.is_simple_api_call()); + DCHECK(optimization.is_simple_api_call()); // Abi for CallApiFunctionStub. Register callee = eax; @@ -446,34 +365,22 @@ __ mov(api_function_address, Immediate(function_address)); // Jump to stub. - CallApiFunctionStub stub(is_store, call_data_undefined, argc); + CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc); __ TailCallStub(&stub); } -void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, - Label* label, - Handle<Name> name) { - if (!label->is_unused()) { - __ bind(label); - __ mov(this->name(), Immediate(name)); - } -} - - // Generate code to check that a global property cell is empty. Create // the property cell at compilation time if no cell exists for the // property. -void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm, - Handle<JSGlobalObject> global, - Handle<Name> name, - Register scratch, - Label* miss) { +void PropertyHandlerCompiler::GenerateCheckPropertyCell( + MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name, + Register scratch, Label* miss) { Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name); - ASSERT(cell->value()->IsTheHole()); + DCHECK(cell->value()->IsTheHole()); Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value(); - if (Serializer::enabled()) { + if (masm->serializer_enabled()) { __ mov(scratch, Immediate(cell)); __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset), Immediate(the_hole)); @@ -484,94 +391,85 @@ } -void StoreStubCompiler::GenerateNegativeHolderLookup( - MacroAssembler* masm, - Handle<JSObject> holder, - Register holder_reg, - Handle<Name> name, - Label* miss) { - if (holder->IsJSGlobalObject()) { - GenerateCheckPropertyCell( - masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss); - } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { - GenerateDictionaryNegativeLookup( - masm, miss, holder_reg, name, scratch1(), scratch2()); +void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm, + Handle<Code> code) { + __ jmp(code, RelocInfo::CODE_TARGET); +} + + +#undef __ +#define __ ACCESS_MASM(masm()) + + +void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ bind(label); + __ mov(this->name(), Immediate(name)); } } // Receiver_reg is preserved on jumps to miss_label, but may be destroyed if // store is successful. -void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register storage_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register unused, - Label* miss_label, - Label* slow) { +void NamedStoreHandlerCompiler::GenerateStoreTransition( + Handle<Map> transition, Handle<Name> name, Register receiver_reg, + Register storage_reg, Register value_reg, Register scratch1, + Register scratch2, Register unused, Label* miss_label, Label* slow) { int descriptor = transition->LastAdded(); DescriptorArray* descriptors = transition->instance_descriptors(); PropertyDetails details = descriptors->GetDetails(descriptor); Representation representation = details.representation(); - ASSERT(!representation.IsNone()); + DCHECK(!representation.IsNone()); if (details.type() == CONSTANT) { - Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); + Handle<Object> constant(descriptors->GetValue(descriptor), isolate()); __ CmpObject(value_reg, constant); __ j(not_equal, miss_label); } else if (representation.IsSmi()) { __ JumpIfNotSmi(value_reg, miss_label); } else if (representation.IsHeapObject()) { __ JumpIfSmi(value_reg, miss_label); + HeapType* field_type = descriptors->GetFieldType(descriptor); + HeapType::Iterator<Map> it = field_type->Classes(); + if (!it.Done()) { + Label do_store; + while (true) { + __ CompareMap(value_reg, it.Current()); + it.Advance(); + if (it.Done()) { + __ j(not_equal, miss_label); + break; + } + __ j(equal, &do_store, Label::kNear); + } + __ bind(&do_store); + } } else if (representation.IsDouble()) { Label do_store, heap_number; - __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow); + __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow, MUTABLE); __ JumpIfNotSmi(value_reg, &heap_number); __ SmiUntag(value_reg); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ Cvtsi2sd(xmm0, value_reg); - } else { - __ push(value_reg); - __ fild_s(Operand(esp, 0)); - __ pop(value_reg); - } + __ Cvtsi2sd(xmm0, value_reg); __ SmiTag(value_reg); __ jmp(&do_store); __ bind(&heap_number); - __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(), - miss_label, DONT_DO_SMI_CHECK); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset)); - } else { - __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset)); - } + __ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label, + DONT_DO_SMI_CHECK); + __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset)); __ bind(&do_store); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0); - } else { - __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset)); - } + __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0); } - // Stub never generated for non-global objects that require access - // checks. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); + // Stub never generated for objects that require access checks. + DCHECK(!transition->is_access_check_needed()); // Perform map transition for the receiver if necessary. if (details.type() == FIELD && - object->map()->unused_property_fields() == 0) { + Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) { // The properties must be extended before we can store the value. // We jump to a runtime call that extends the properties array. __ pop(scratch1); // Return address. @@ -581,9 +479,8 @@ __ push(scratch1); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), - masm->isolate()), - 3, - 1); + isolate()), + 3, 1); return; } @@ -601,7 +498,7 @@ OMIT_SMI_CHECK); if (details.type() == CONSTANT) { - ASSERT(value_reg.is(eax)); + DCHECK(value_reg.is(eax)); __ ret(0); return; } @@ -612,14 +509,14 @@ // Adjust for the number of properties stored in the object. Even in the // face of a transition we can use the old map here because the size of the // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); + index -= transition->inobject_properties(); SmiCheck smi_check = representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; // TODO(verwaest): Share this code as a code stub. if (index < 0) { // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); + int offset = transition->instance_size() + (index * kPointerSize); if (representation.IsDouble()) { __ mov(FieldOperand(receiver_reg, offset), storage_reg); } else { @@ -666,157 +563,44 @@ } // Return the value (register eax). - ASSERT(value_reg.is(eax)); + DCHECK(value_reg.is(eax)); __ ret(0); } -// Both name_reg and receiver_reg are preserved on jumps to miss_label, -// but may be destroyed if store is successful. -void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label) { - // Stub never generated for non-global objects that require access - // checks. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - - int index = lookup->GetFieldIndex().field_index(); - - // Adjust for the number of properties stored in the object. Even in the - // face of a transition we can use the old map here because the size of the - // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); - - Representation representation = lookup->representation(); - ASSERT(!representation.IsNone()); - if (representation.IsSmi()) { - __ JumpIfNotSmi(value_reg, miss_label); - } else if (representation.IsHeapObject()) { - __ JumpIfSmi(value_reg, miss_label); - } else if (representation.IsDouble()) { - // Load the double storage. - if (index < 0) { - int offset = object->map()->instance_size() + (index * kPointerSize); - __ mov(scratch1, FieldOperand(receiver_reg, offset)); - } else { - __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); - int offset = index * kPointerSize + FixedArray::kHeaderSize; - __ mov(scratch1, FieldOperand(scratch1, offset)); - } - - // Store the value into the storage. - Label do_store, heap_number; - __ JumpIfNotSmi(value_reg, &heap_number); - __ SmiUntag(value_reg); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ Cvtsi2sd(xmm0, value_reg); - } else { - __ push(value_reg); - __ fild_s(Operand(esp, 0)); - __ pop(value_reg); - } - __ SmiTag(value_reg); - __ jmp(&do_store); - __ bind(&heap_number); - __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(), - miss_label, DONT_DO_SMI_CHECK); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset)); - } else { - __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset)); - } - __ bind(&do_store); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0); - } else { - __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset)); - } - // Return the value (register eax). - ASSERT(value_reg.is(eax)); - __ ret(0); - return; - } - - ASSERT(!representation.IsDouble()); - // TODO(verwaest): Share this code as a code stub. - SmiCheck smi_check = representation.IsTagged() - ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; - if (index < 0) { - // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); - __ mov(FieldOperand(receiver_reg, offset), value_reg); - - if (!representation.IsSmi()) { - // Update the write barrier for the array address. - // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, value_reg); - __ RecordWriteField(receiver_reg, - offset, - name_reg, - scratch1, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); - } - } else { - // Write to the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; - // Get the properties array (optimistically). - __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ mov(FieldOperand(scratch1, offset), value_reg); - - if (!representation.IsSmi()) { - // Update the write barrier for the array address. - // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, value_reg); - __ RecordWriteField(scratch1, - offset, - name_reg, - receiver_reg, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); +void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup, + Register value_reg, + Label* miss_label) { + DCHECK(lookup->representation().IsHeapObject()); + __ JumpIfSmi(value_reg, miss_label); + HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes(); + Label do_store; + while (true) { + __ CompareMap(value_reg, it.Current()); + it.Advance(); + if (it.Done()) { + __ j(not_equal, miss_label); + break; } + __ j(equal, &do_store, Label::kNear); } + __ bind(&do_store); - // Return the value (register eax). - ASSERT(value_reg.is(eax)); - __ ret(0); -} - - -void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { - __ jmp(code, RelocInfo::CODE_TARGET); + StoreFieldStub stub(isolate(), lookup->GetFieldIndex(), + lookup->representation()); + GenerateTailCall(masm(), stub.GetCode()); } -#undef __ -#define __ ACCESS_MASM(masm()) - - -Register StubCompiler::CheckPrototypes(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Register holder_reg, - Register scratch1, - Register scratch2, - Handle<Name> name, - Label* miss, - PrototypeCheckType check) { - Handle<Map> receiver_map(IC::TypeToMap(*type, isolate())); +Register PropertyHandlerCompiler::CheckPrototypes( + Register object_reg, Register holder_reg, Register scratch1, + Register scratch2, Handle<Name> name, Label* miss, + PrototypeCheckType check) { + Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate())); // Make sure there's no overlap between holder and object registers. - ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); - ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) + DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); + DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) && !scratch2.is(scratch1)); // Keep track of the current object in register reg. @@ -824,10 +608,11 @@ int depth = 0; Handle<JSObject> current = Handle<JSObject>::null(); - if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant()); + if (type()->IsConstant()) + current = Handle<JSObject>::cast(type()->AsConstant()->Value()); Handle<JSObject> prototype = Handle<JSObject>::null(); Handle<Map> current_map = receiver_map; - Handle<Map> holder_map(holder->map()); + Handle<Map> holder_map(holder()->map()); // Traverse the prototype chain and check the maps in the prototype chain for // fast and global objects or do negative lookup for normal objects. while (!current_map.is_identical_to(holder_map)) { @@ -835,19 +620,19 @@ // Only global objects and objects that do not require access // checks are allowed in stubs. - ASSERT(current_map->IsJSGlobalProxyMap() || + DCHECK(current_map->IsJSGlobalProxyMap() || !current_map->is_access_check_needed()); prototype = handle(JSObject::cast(current_map->prototype())); if (current_map->is_dictionary_map() && - !current_map->IsJSGlobalObjectMap() && - !current_map->IsJSGlobalProxyMap()) { + !current_map->IsJSGlobalObjectMap()) { + DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast. if (!name->IsUniqueName()) { - ASSERT(name->IsString()); + DCHECK(name->IsString()); name = factory()->InternalizeString(Handle<String>::cast(name)); } - ASSERT(current.is_null() || - current->property_dictionary()->FindEntry(*name) == + DCHECK(current.is_null() || + current->property_dictionary()->FindEntry(name) == NameDictionary::kNotFound); GenerateDictionaryNegativeLookup(masm(), miss, reg, name, @@ -858,6 +643,11 @@ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset)); } else { bool in_new_space = heap()->InNewSpace(*prototype); + // Two possible reasons for loading the prototype from the map: + // (1) Can't store references to new space in code. + // (2) Handler is shared for all receivers with the same prototype + // map (but not necessarily the same prototype instance). + bool load_prototype_from_map = in_new_space || depth == 1; if (depth != 1 || check == CHECK_ALL_MAPS) { __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK); } @@ -865,6 +655,9 @@ // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global // object. + // This allows us to install generated handlers for accesses to the + // global proxy (as opposed to using slow ICs). See corresponding code + // in LookupForRead(). if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss); } else if (current_map->IsJSGlobalObjectMap()) { @@ -873,19 +666,16 @@ scratch2, miss); } - if (in_new_space) { + if (load_prototype_from_map) { // Save the map in scratch1 for later. __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); } reg = holder_reg; // From now on the object will be in holder_reg. - if (in_new_space) { - // The prototype is in new space; we cannot store a reference to it - // in the code. Load it from the map. + if (load_prototype_from_map) { __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset)); } else { - // The prototype is in old space; load it directly. __ mov(reg, prototype); } } @@ -904,7 +694,7 @@ } // Perform security check for access to the global object. - ASSERT(current_map->IsJSGlobalProxyMap() || + DCHECK(current_map->IsJSGlobalProxyMap() || !current_map->is_access_check_needed()); if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss); @@ -915,7 +705,7 @@ } -void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { +void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { if (!miss->is_unused()) { Label success; __ jmp(&success); @@ -926,100 +716,21 @@ } -void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { +void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { if (!miss->is_unused()) { Label success; __ jmp(&success); - GenerateRestoreName(masm(), miss, name); + GenerateRestoreName(miss, name); TailCallBuiltin(masm(), MissBuiltin(kind())); __ bind(&success); } } -Register LoadStubCompiler::CallbackHandlerFrontend( - Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Handle<Object> callback) { - Label miss; - - Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); - - if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { - ASSERT(!reg.is(scratch2())); - ASSERT(!reg.is(scratch3())); - Register dictionary = scratch1(); - bool must_preserve_dictionary_reg = reg.is(dictionary); - - // Load the properties dictionary. - if (must_preserve_dictionary_reg) { - __ push(dictionary); - } - __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset)); - - // Probe the dictionary. - Label probe_done, pop_and_miss; - NameDictionaryLookupStub::GeneratePositiveLookup(masm(), - &pop_and_miss, - &probe_done, - dictionary, - this->name(), - scratch2(), - scratch3()); - __ bind(&pop_and_miss); - if (must_preserve_dictionary_reg) { - __ pop(dictionary); - } - __ jmp(&miss); - __ bind(&probe_done); - - // If probing finds an entry in the dictionary, scratch2 contains the - // index into the dictionary. Check that the value is the callback. - Register index = scratch2(); - const int kElementsStartOffset = - NameDictionary::kHeaderSize + - NameDictionary::kElementsStartIndex * kPointerSize; - const int kValueOffset = kElementsStartOffset + kPointerSize; - __ mov(scratch3(), - Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag)); - if (must_preserve_dictionary_reg) { - __ pop(dictionary); - } - __ cmp(scratch3(), callback); - __ j(not_equal, &miss); - } - - HandlerFrontendFooter(name, &miss); - return reg; -} - - -void LoadStubCompiler::GenerateLoadField(Register reg, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation) { - if (!reg.is(receiver())) __ mov(receiver(), reg); - if (kind() == Code::LOAD_IC) { - LoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); - } else { - KeyedLoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); - } -} - - -void LoadStubCompiler::GenerateLoadCallback( - Register reg, - Handle<ExecutableAccessorInfo> callback) { +void NamedLoadHandlerCompiler::GenerateLoadCallback( + Register reg, Handle<ExecutableAccessorInfo> callback) { // Insert additional parameters into the stack frame above return address. - ASSERT(!scratch3().is(reg)); + DCHECK(!scratch3().is(reg)); __ pop(scratch3()); // Get return address to place it below. STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); @@ -1031,7 +742,7 @@ __ push(receiver()); // receiver // Push data from ExecutableAccessorInfo. if (isolate()->heap()->InNewSpace(callback->data())) { - ASSERT(!scratch2().is(reg)); + DCHECK(!scratch2().is(reg)); __ mov(scratch2(), Immediate(callback)); __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset)); } else { @@ -1056,26 +767,23 @@ Address function_address = v8::ToCData<Address>(callback->getter()); __ mov(getter_address, Immediate(function_address)); - CallApiGetterStub stub; + CallApiGetterStub stub(isolate()); __ TailCallStub(&stub); } -void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { +void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) { // Return the constant value. __ LoadObject(eax, value); __ ret(0); } -void LoadStubCompiler::GenerateLoadInterceptor( - Register holder_reg, - Handle<Object> object, - Handle<JSObject> interceptor_holder, - LookupResult* lookup, - Handle<Name> name) { - ASSERT(interceptor_holder->HasNamedInterceptor()); - ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); +void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg, + LookupResult* lookup, + Handle<Name> name) { + DCHECK(holder()->HasNamedInterceptor()); + DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined()); // So far the most popular follow ups for interceptor loads are FIELD // and CALLBACKS, so inline only them, other cases may be added @@ -1086,10 +794,12 @@ compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { - ExecutableAccessorInfo* callback = - ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); - compile_followup_inline = callback->getter() != NULL && - callback->IsCompatibleReceiver(*object); + Handle<ExecutableAccessorInfo> callback( + ExecutableAccessorInfo::cast(lookup->GetCallbackObject())); + compile_followup_inline = + callback->getter() != NULL && + ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback, + type()); } } @@ -1097,13 +807,13 @@ // Compile the interceptor call, followed by inline code to load the // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. - ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); + DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1())); // Preserve the receiver register explicitly whenever it is different from // the holder and it is needed should the interceptor return without any // result. The CALLBACKS case needs the receiver to be passed into C++ code, // the FIELD case might cause a miss during the prototype check. - bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); + bool must_perfrom_prototype_check = *holder() != lookup->holder(); bool must_preserve_receiver_reg = !receiver().is(holder_reg) && (lookup->type() == CALLBACKS || must_perfrom_prototype_check); @@ -1122,7 +832,7 @@ // interceptor's holder has been compiled before (see a caller // of this method.) CompileCallLoadPropertyWithInterceptor( - masm(), receiver(), holder_reg, this->name(), interceptor_holder, + masm(), receiver(), holder_reg, this->name(), holder(), IC::kLoadPropertyWithInterceptorOnly); // Check if interceptor provided a value for property. If it's @@ -1150,41 +860,28 @@ // Leave the internal frame. } - GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); + GenerateLoadPostInterceptor(holder_reg, name, lookup); } else { // !compile_followup_inline // Call the runtime system to load the interceptor. // Check that the maps haven't changed. __ pop(scratch2()); // save old return address - PushInterceptorArguments(masm(), receiver(), holder_reg, - this->name(), interceptor_holder); + PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), + holder()); __ push(scratch2()); // restore old return address ExternalReference ref = - ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), + ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor), isolate()); - __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1); + __ TailCallExternalReference( + ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); } } -void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) { - Label success; - // Check that the object is a boolean. - __ cmp(object, factory()->true_value()); - __ j(equal, &success); - __ cmp(object, factory()->false_value()); - __ j(not_equal, miss); - __ bind(&success); -} - - -Handle<Code> StoreStubCompiler::CompileStoreCallback( - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( + Handle<JSObject> object, Handle<Name> name, Handle<ExecutableAccessorInfo> callback) { - Register holder_reg = HandlerFrontend( - IC::CurrentTypeOf(object, isolate()), receiver(), holder, name); + Register holder_reg = Frontend(receiver(), name); __ pop(scratch1()); // remove the return address __ push(receiver()); @@ -1208,10 +905,8 @@ #define __ ACCESS_MASM(masm) -void StoreStubCompiler::GenerateStoreViaSetter( - MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, +void NamedStoreHandlerCompiler::GenerateStoreViaSetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, Handle<JSFunction> setter) { // ----------- S t a t e ------------- // -- esp[0] : return address @@ -1227,7 +922,7 @@ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { // Swap in the global receiver. __ mov(receiver, - FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); + FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); } __ push(receiver); __ push(value()); @@ -1255,8 +950,7 @@ #define __ ACCESS_MASM(masm()) -Handle<Code> StoreStubCompiler::CompileStoreInterceptor( - Handle<JSObject> object, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( Handle<Name> name) { __ pop(scratch1()); // remove the return address __ push(receiver()); @@ -1265,8 +959,8 @@ __ push(scratch1()); // restore return address // Do tail-call to the runtime system. - ExternalReference store_ic_property = - ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); + ExternalReference store_ic_property = ExternalReference( + IC_Utility(IC::kStorePropertyWithInterceptor), isolate()); __ TailCallExternalReference(store_ic_property, 3, 1); // Return the generated code. @@ -1274,23 +968,8 @@ } -void StoreStubCompiler::GenerateStoreArrayLength() { - // Prepare tail call to StoreIC_ArrayLength. - __ pop(scratch1()); // remove the return address - __ push(receiver()); - __ push(value()); - __ push(scratch1()); // restore return address - - ExternalReference ref = - ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), - masm()->isolate()); - __ TailCallExternalReference(ref, 2, 1); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( - MapHandleList* receiver_maps, - CodeHandleList* handler_stubs, +Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( + MapHandleList* receiver_maps, CodeHandleList* handler_stubs, MapHandleList* transitioned_maps) { Label miss; __ JumpIfSmi(receiver(), &miss, Label::kNear); @@ -1311,67 +990,39 @@ TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetICCode( - kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); + return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); } -Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type, - Handle<JSObject> last, - Handle<Name> name) { - NonexistentHandlerFrontend(type, last, name); - - // Return undefined if maps of the full prototype chain are still the - // same and no global property with this name contains a value. - __ mov(eax, isolate()->factory()->undefined_value()); - __ ret(0); - - // Return the generated code. - return GetCode(kind(), Code::FAST, name); -} - - -Register* LoadStubCompiler::registers() { - // receiver, name, scratch1, scratch2, scratch3, scratch4. - static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg }; - return registers; -} - - -Register* KeyedLoadStubCompiler::registers() { +Register* PropertyAccessCompiler::load_calling_convention() { // receiver, name, scratch1, scratch2, scratch3, scratch4. - static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg }; + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + static Register registers[] = { receiver, name, ebx, eax, edi, no_reg }; return registers; } -Register StoreStubCompiler::value() { - return eax; -} - - -Register* StoreStubCompiler::registers() { +Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. - static Register registers[] = { edx, ecx, ebx, edi, no_reg }; + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + DCHECK(ebx.is(KeyedStoreIC::MapRegister())); + static Register registers[] = { receiver, name, ebx, edi, no_reg }; return registers; } -Register* KeyedStoreStubCompiler::registers() { - // receiver, name, scratch1, scratch2, scratch3. - static Register registers[] = { edx, ecx, ebx, edi, no_reg }; - return registers; -} +Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); } #undef __ #define __ ACCESS_MASM(masm) -void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, - Handle<JSFunction> getter) { +void NamedLoadHandlerCompiler::GenerateLoadViaGetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, + Handle<JSFunction> getter) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -1380,7 +1031,7 @@ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { // Swap in the global receiver. __ mov(receiver, - FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); + FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); } __ push(receiver); ParameterCount actual(0); @@ -1404,29 +1055,26 @@ #define __ ACCESS_MASM(masm()) -Handle<Code> LoadStubCompiler::CompileLoadGlobal( - Handle<HeapType> type, - Handle<GlobalObject> global, - Handle<PropertyCell> cell, - Handle<Name> name, - bool is_dont_delete) { +Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal( + Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) { Label miss; - HandlerFrontendHeader(type, receiver(), global, name, &miss); + FrontendHeader(receiver(), name, &miss); // Get the value from the cell. - if (Serializer::enabled()) { - __ mov(eax, Immediate(cell)); - __ mov(eax, FieldOperand(eax, PropertyCell::kValueOffset)); + Register result = StoreIC::ValueRegister(); + if (masm()->serializer_enabled()) { + __ mov(result, Immediate(cell)); + __ mov(result, FieldOperand(result, PropertyCell::kValueOffset)); } else { - __ mov(eax, Operand::ForCell(cell)); + __ mov(result, Operand::ForCell(cell)); } // Check for deleted property if property can actually be deleted. - if (!is_dont_delete) { - __ cmp(eax, factory()->the_hole_value()); + if (is_configurable) { + __ cmp(result, factory()->the_hole_value()); __ j(equal, &miss); } else if (FLAG_debug_code) { - __ cmp(eax, factory()->the_hole_value()); + __ cmp(result, factory()->the_hole_value()); __ Check(not_equal, kDontDeleteCellsCannotContainTheHole); } @@ -1435,32 +1083,40 @@ // The code above already loads the result into the return register. __ ret(0); - HandlerFrontendFooter(name, &miss); + FrontendFooter(name, &miss); // Return the generated code. return GetCode(kind(), Code::NORMAL, name); } -Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( - TypeHandleList* types, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { +Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { Label miss; if (check == PROPERTY && (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - __ cmp(this->name(), Immediate(name)); - __ j(not_equal, &miss); + // In case we are compiling an IC for dictionary loads and stores, just + // check whether the name is unique. + if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { + __ JumpIfNotUniqueName(this->name(), &miss); + } else { + __ cmp(this->name(), Immediate(name)); + __ j(not_equal, &miss); + } } Label number_case; Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; __ JumpIfSmi(receiver(), smi_target); + // Polymorphic keyed stores may use the map register Register map_reg = scratch1(); + DCHECK(kind() != Code::KEYED_STORE_IC || + map_reg.is(KeyedStoreIC::MapRegister())); __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); int receiver_count = types->length(); int number_of_handled_maps = 0; @@ -1471,13 +1127,13 @@ number_of_handled_maps++; __ cmp(map_reg, map); if (type->Is(HeapType::Number())) { - ASSERT(!number_case.is_unused()); + DCHECK(!number_case.is_unused()); __ bind(&number_case); } __ j(equal, handlers->at(current)); } } - ASSERT(number_of_handled_maps != 0); + DCHECK(number_of_handled_maps != 0); __ bind(&miss); TailCallBuiltin(masm(), MissBuiltin(kind())); @@ -1485,7 +1141,7 @@ // Return the generated code. InlineCacheState state = number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetICCode(kind(), type, name, state); + return GetCode(kind(), type, name, state); } @@ -1493,13 +1149,15 @@ #define __ ACCESS_MASM(masm) -void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( +void ElementHandlerCompiler::GenerateLoadDictionaryElement( MacroAssembler* masm) { // ----------- S t a t e ------------- // -- ecx : key // -- edx : receiver // -- esp[0] : return address // ----------------------------------- + DCHECK(edx.is(LoadIC::ReceiverRegister())); + DCHECK(ecx.is(LoadIC::NameRegister())); Label slow, miss; // This stub is meant to be tail-jumped to, the receiver must already diff -Nru nodejs-0.11.13/deps/v8/src/ic.cc nodejs-0.11.15/deps/v8/src/ic.cc --- nodejs-0.11.13/deps/v8/src/ic.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ic.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,52 +1,30 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "accessors.h" -#include "api.h" -#include "arguments.h" -#include "codegen.h" -#include "execution.h" -#include "ic-inl.h" -#include "runtime.h" -#include "stub-cache.h" -#include "v8conversions.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/accessors.h" +#include "src/api.h" +#include "src/arguments.h" +#include "src/codegen.h" +#include "src/conversions.h" +#include "src/execution.h" +#include "src/ic-inl.h" +#include "src/prototype.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { -#ifdef DEBUG char IC::TransitionMarkFromState(IC::State state) { switch (state) { case UNINITIALIZED: return '0'; case PREMONOMORPHIC: return '.'; case MONOMORPHIC: return '1'; - case MONOMORPHIC_PROTOTYPE_FAILURE: return '^'; + case PROTOTYPE_FAILURE: + return '^'; case POLYMORPHIC: return 'P'; case MEGAMORPHIC: return 'N'; case GENERIC: return 'G'; @@ -55,6 +33,9 @@ // computed from the original code - not the patched code. Let // these cases fall through to the unreachable code below. case DEBUG_STUB: break; + // Type-vector-based ICs resolve state to one of the above. + case DEFAULT: + break; } UNREACHABLE(); return 0; @@ -71,57 +52,79 @@ } -void IC::TraceIC(const char* type, - Handle<Object> name) { +#ifdef DEBUG + +#define TRACE_GENERIC_IC(isolate, type, reason) \ + do { \ + if (FLAG_trace_ic) { \ + PrintF("[%s patching generic stub in ", type); \ + JavaScriptFrame::PrintTop(isolate, stdout, false, true); \ + PrintF(" (%s)]\n", reason); \ + } \ + } while (false) + +#else + +#define TRACE_GENERIC_IC(isolate, type, reason) + +#endif // DEBUG + + +void IC::TraceIC(const char* type, Handle<Object> name) { if (FLAG_trace_ic) { Code* new_target = raw_target(); State new_state = new_target->ic_state(); + TraceIC(type, name, state(), new_state); + } +} + + +void IC::TraceIC(const char* type, Handle<Object> name, State old_state, + State new_state) { + if (FLAG_trace_ic) { + Code* new_target = raw_target(); PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type); - StackFrameIterator it(isolate()); - while (it.frame()->fp() != this->fp()) it.Advance(); - StackFrame* raw_frame = it.frame(); - if (raw_frame->is_internal()) { - Code* apply_builtin = isolate()->builtins()->builtin( - Builtins::kFunctionApply); - if (raw_frame->unchecked_code() == apply_builtin) { - PrintF("apply from "); - it.Advance(); - raw_frame = it.frame(); - } + + // TODO(jkummerow): Add support for "apply". The logic is roughly: + // marker = [fp_ + kMarkerOffset]; + // if marker is smi and marker.value == INTERNAL and + // the frame's code == builtin(Builtins::kFunctionApply): + // then print "apply from" and advance one frame + + Object* maybe_function = + Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset); + if (maybe_function->IsJSFunction()) { + JSFunction* function = JSFunction::cast(maybe_function); + JavaScriptFrame::PrintFunctionAndOffset(function, function->code(), pc(), + stdout, true); } - JavaScriptFrame::PrintTop(isolate(), stdout, false, true); + ExtraICState extra_state = new_target->extra_ic_state(); - const char* modifier = - GetTransitionMarkModifier( - KeyedStoreIC::GetKeyedAccessStoreMode(extra_state)); - PrintF(" (%c->%c%s)", - TransitionMarkFromState(state()), - TransitionMarkFromState(new_state), - modifier); - name->Print(); + const char* modifier = ""; + if (new_target->kind() == Code::KEYED_STORE_IC) { + modifier = GetTransitionMarkModifier( + KeyedStoreIC::GetKeyedAccessStoreMode(extra_state)); + } + PrintF(" (%c->%c%s)", TransitionMarkFromState(old_state), + TransitionMarkFromState(new_state), modifier); +#ifdef OBJECT_PRINT + OFStream os(stdout); + name->Print(os); +#else + name->ShortPrint(stdout); +#endif PrintF("]\n"); } } -#define TRACE_GENERIC_IC(isolate, type, reason) \ - do { \ - if (FLAG_trace_ic) { \ - PrintF("[%s patching generic stub in ", type); \ - JavaScriptFrame::PrintTop(isolate, stdout, false, true); \ - PrintF(" (%s)]\n", reason); \ - } \ - } while (false) - -#else -#define TRACE_GENERIC_IC(isolate, type, reason) -#endif // DEBUG - -#define TRACE_IC(type, name) \ - ASSERT((TraceIC(type, name), true)) +#define TRACE_IC(type, name) TraceIC(type, name) +#define TRACE_VECTOR_IC(type, name, old_state, new_state) \ + TraceIC(type, name, old_state, new_state) IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate), - target_set_(false) { + target_set_(false), + target_maps_set_(false) { // To improve the performance of the (much used) IC code, we unfold a few // levels of the stack frame iteration code. This yields a ~35% speedup when // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag. @@ -151,7 +154,7 @@ StackFrameIterator it(isolate); for (int i = 0; i < depth + 1; i++) it.Advance(); StackFrame* frame = it.frame(); - ASSERT(fp == frame->fp() && pc_address == frame->pc_address()); + DCHECK(fp == frame->fp() && pc_address == frame->pc_address()); #endif fp_ = fp; if (FLAG_enable_ool_constant_pool) { @@ -162,11 +165,11 @@ pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address); target_ = handle(raw_target(), isolate); state_ = target_->ic_state(); + kind_ = target_->kind(); extra_ic_state_ = target_->extra_ic_state(); } -#ifdef ENABLE_DEBUGGER_SUPPORT SharedFunctionInfo* IC::GetSharedFunctionInfo() const { // Compute the JavaScript frame for the frame pointer of this IC // structure. We need this to be able to find the function @@ -192,17 +195,11 @@ Code* IC::GetOriginalCode() const { HandleScope scope(isolate()); Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate()); - ASSERT(Debug::HasDebugInfo(shared)); + DCHECK(Debug::HasDebugInfo(shared)); Code* original_code = Debug::GetDebugInfo(shared)->original_code(); - ASSERT(original_code->IsCode()); + DCHECK(original_code->IsCode()); return original_code; } -#endif - - -static bool HasInterceptorGetter(JSObject* object) { - return !object->GetNamedInterceptor()->getter()->IsUndefined(); -} static bool HasInterceptorSetter(JSObject* object) { @@ -210,150 +207,114 @@ } -static void LookupForRead(Handle<Object> object, - Handle<String> name, - LookupResult* lookup) { - // Skip all the objects with named interceptors, but - // without actual getter. - while (true) { - object->Lookup(*name, lookup); - // Besides normal conditions (property not found or it's not - // an interceptor), bail out if lookup is not cacheable: we won't - // be able to IC it anyway and regular lookup should work fine. - if (!lookup->IsInterceptor() || !lookup->IsCacheable()) { - return; - } - - Handle<JSObject> holder(lookup->holder(), lookup->isolate()); - if (HasInterceptorGetter(*holder)) { - return; - } - - holder->LocalLookupRealNamedProperty(*name, lookup); - if (lookup->IsFound()) { - ASSERT(!lookup->IsInterceptor()); - return; - } - - Handle<Object> proto(holder->GetPrototype(), lookup->isolate()); - if (proto->IsNull()) { - ASSERT(!lookup->IsFound()); - return; +static void LookupForRead(LookupIterator* it) { + for (; it->IsFound(); it->Next()) { + switch (it->state()) { + case LookupIterator::NOT_FOUND: + UNREACHABLE(); + case LookupIterator::JSPROXY: + return; + case LookupIterator::INTERCEPTOR: { + // If there is a getter, return; otherwise loop to perform the lookup. + Handle<JSObject> holder = it->GetHolder<JSObject>(); + if (!holder->GetNamedInterceptor()->getter()->IsUndefined()) { + return; + } + break; + } + case LookupIterator::ACCESS_CHECK: + // PropertyHandlerCompiler::CheckPrototypes() knows how to emit + // access checks for global proxies. + if (it->GetHolder<JSObject>()->IsJSGlobalProxy() && + it->HasAccess(v8::ACCESS_GET)) { + break; + } + return; + case LookupIterator::PROPERTY: + if (it->HasProperty()) return; // Yay! + break; } - - object = proto; } } bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver, Handle<String> name) { - if (target()->is_keyed_stub()) { - // Determine whether the failure is due to a name failure. - if (!name->IsName()) return false; - Name* stub_name = target()->FindFirstName(); - if (*name != stub_name) return false; - } + if (!IsNameCompatibleWithPrototypeFailure(name)) return false; + Handle<Map> receiver_map = TypeToMap(*receiver_type(), isolate()); + maybe_handler_ = target()->FindHandlerForMap(*receiver_map); + + // The current map wasn't handled yet. There's no reason to stay monomorphic, + // *unless* we're moving from a deprecated map to its replacement, or + // to a more general elements kind. + // TODO(verwaest): Check if the current map is actually what the old map + // would transition to. + if (maybe_handler_.is_null()) { + if (!receiver_map->IsJSObjectMap()) return false; + Map* first_map = FirstTargetMap(); + if (first_map == NULL) return false; + Handle<Map> old_map(first_map); + if (old_map->is_deprecated()) return true; + if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(), + receiver_map->elements_kind())) { + return true; + } + return false; + } + + CacheHolderFlag flag; + Handle<Map> ic_holder_map( + GetICCacheHolder(*receiver_type(), isolate(), &flag)); + + DCHECK(flag != kCacheOnReceiver || receiver->IsJSObject()); + DCHECK(flag != kCacheOnPrototype || !receiver->IsJSReceiver()); + DCHECK(flag != kCacheOnPrototypeReceiverIsDictionary); - InlineCacheHolderFlag cache_holder = - Code::ExtractCacheHolderFromFlags(target()->flags()); - - switch (cache_holder) { - case OWN_MAP: - // The stub was generated for JSObject but called for non-JSObject. - // IC::GetCodeCacheHolder is not applicable. - if (!receiver->IsJSObject()) return false; - break; - case PROTOTYPE_MAP: - // IC::GetCodeCacheHolder is not applicable. - if (receiver->GetPrototype(isolate())->IsNull()) return false; - break; - } - - Handle<Map> map( - IC::GetCodeCacheHolder(isolate(), *receiver, cache_holder)->map()); - - // Decide whether the inline cache failed because of changes to the - // receiver itself or changes to one of its prototypes. - // - // If there are changes to the receiver itself, the map of the - // receiver will have changed and the current target will not be in - // the receiver map's code cache. Therefore, if the current target - // is in the receiver map's code cache, the inline cache failed due - // to prototype check failure. - int index = map->IndexInCodeCache(*name, *target()); - if (index >= 0) { - map->RemoveFromCodeCache(*name, *target(), index); - // Handlers are stored in addition to the ICs on the map. Remove those, too. - TryRemoveInvalidHandlers(map, name); - return true; - } - - // The stub is not in the cache. We've ruled out all other kinds of failure - // except for proptotype chain changes, a deprecated map, a map that's - // different from the one that the stub expects, elements kind changes, or a - // constant global property that will become mutable. Threat all those - // situations as prototype failures (stay monomorphic if possible). - - // If the IC is shared between multiple receivers (slow dictionary mode), then - // the map cannot be deprecated and the stub invalidated. - if (cache_holder == OWN_MAP) { - Map* old_map = target()->FindFirstMap(); - if (old_map == *map) return true; - if (old_map != NULL) { - if (old_map->is_deprecated()) return true; - if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(), - map->elements_kind())) { - return true; - } + if (state() == MONOMORPHIC) { + int index = ic_holder_map->IndexInCodeCache(*name, *target()); + if (index >= 0) { + ic_holder_map->RemoveFromCodeCache(*name, *target(), index); } } if (receiver->IsGlobalObject()) { LookupResult lookup(isolate()); GlobalObject* global = GlobalObject::cast(*receiver); - global->LocalLookupRealNamedProperty(*name, &lookup); + global->LookupOwnRealNamedProperty(name, &lookup); if (!lookup.IsFound()) return false; PropertyCell* cell = global->GetPropertyCell(&lookup); return cell->type()->IsConstant(); } - return false; + return true; } -void IC::TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name) { - CodeHandleList handlers; - target()->FindHandlers(&handlers); - for (int i = 0; i < handlers.length(); i++) { - Handle<Code> handler = handlers.at(i); - int index = map->IndexInCodeCache(*name, *handler); - if (index >= 0) { - map->RemoveFromCodeCache(*name, *handler, index); - return; - } +bool IC::IsNameCompatibleWithPrototypeFailure(Handle<Object> name) { + if (target()->is_keyed_stub()) { + // Determine whether the failure is due to a name failure. + if (!name->IsName()) return false; + Name* stub_name = target()->FindFirstName(); + if (*name != stub_name) return false; } + + return true; } void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) { + receiver_type_ = CurrentTypeOf(receiver, isolate()); if (!name->IsString()) return; - if (state() != MONOMORPHIC) { - if (state() == POLYMORPHIC && receiver->IsHeapObject()) { - TryRemoveInvalidHandlers( - handle(Handle<HeapObject>::cast(receiver)->map()), - Handle<String>::cast(name)); - } - return; - } + if (state() != MONOMORPHIC && state() != POLYMORPHIC) return; if (receiver->IsUndefined() || receiver->IsNull()) return; // Remove the target from the code cache if it became invalid // because of changes in the prototype chain to avoid hitting it // again. - if (TryRemoveInvalidPrototypeDependentStub( - receiver, Handle<String>::cast(name))) { - return MarkMonomorphicPrototypeFailure(); + if (TryRemoveInvalidPrototypeDependentStub(receiver, + Handle<String>::cast(name))) { + MarkPrototypeFailure(name); + return; } // The builtins object is special. It only changes when JavaScript @@ -366,52 +327,79 @@ } -Failure* IC::TypeError(const char* type, - Handle<Object> object, - Handle<Object> key) { +MaybeHandle<Object> IC::TypeError(const char* type, + Handle<Object> object, + Handle<Object> key) { HandleScope scope(isolate()); Handle<Object> args[2] = { key, object }; Handle<Object> error = isolate()->factory()->NewTypeError( type, HandleVector(args, 2)); - return isolate()->Throw(*error); + return isolate()->Throw<Object>(error); } -Failure* IC::ReferenceError(const char* type, Handle<String> name) { +MaybeHandle<Object> IC::ReferenceError(const char* type, Handle<Name> name) { HandleScope scope(isolate()); Handle<Object> error = isolate()->factory()->NewReferenceError( type, HandleVector(&name, 1)); - return isolate()->Throw(*error); + return isolate()->Throw<Object>(error); } -static int ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state) { - bool was_uninitialized = - old_state == UNINITIALIZED || old_state == PREMONOMORPHIC; - bool is_uninitialized = - new_state == UNINITIALIZED || new_state == PREMONOMORPHIC; - return (was_uninitialized && !is_uninitialized) ? 1 : - (!was_uninitialized && is_uninitialized) ? -1 : 0; +static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state, + int* polymorphic_delta, + int* generic_delta) { + switch (old_state) { + case UNINITIALIZED: + case PREMONOMORPHIC: + if (new_state == UNINITIALIZED || new_state == PREMONOMORPHIC) break; + if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) { + *polymorphic_delta = 1; + } else if (new_state == MEGAMORPHIC || new_state == GENERIC) { + *generic_delta = 1; + } + break; + case MONOMORPHIC: + case POLYMORPHIC: + if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) break; + *polymorphic_delta = -1; + if (new_state == MEGAMORPHIC || new_state == GENERIC) { + *generic_delta = 1; + } + break; + case MEGAMORPHIC: + case GENERIC: + if (new_state == MEGAMORPHIC || new_state == GENERIC) break; + *generic_delta = -1; + if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) { + *polymorphic_delta = 1; + } + break; + case PROTOTYPE_FAILURE: + case DEBUG_STUB: + case DEFAULT: + UNREACHABLE(); + } } -void IC::PostPatching(Address address, Code* target, Code* old_target) { - Isolate* isolate = target->GetHeap()->isolate(); +void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address, + State old_state, State new_state, + bool target_remains_ic_stub) { Code* host = isolate-> inner_pointer_to_code_cache()->GetCacheEntry(address)->code; if (host->kind() != Code::FUNCTION) return; - if (FLAG_type_info_threshold > 0 && - old_target->is_inline_cache_stub() && - target->is_inline_cache_stub()) { - int delta = ComputeTypeInfoCountDelta(old_target->ic_state(), - target->ic_state()); - // Not all Code objects have TypeFeedbackInfo. - if (host->type_feedback_info()->IsTypeFeedbackInfo() && delta != 0) { - TypeFeedbackInfo* info = - TypeFeedbackInfo::cast(host->type_feedback_info()); - info->change_ic_with_type_info_count(delta); - } + if (FLAG_type_info_threshold > 0 && target_remains_ic_stub && + // Not all Code objects have TypeFeedbackInfo. + host->type_feedback_info()->IsTypeFeedbackInfo()) { + int polymorphic_delta = 0; // "Polymorphic" here includes monomorphic. + int generic_delta = 0; // "Generic" here includes megamorphic. + ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta, + &generic_delta); + TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info()); + info->change_ic_with_type_info_count(polymorphic_delta); + info->change_ic_generic_count(generic_delta); } if (host->type_feedback_info()->IsTypeFeedbackInfo()) { TypeFeedbackInfo* info = @@ -426,6 +414,62 @@ } +void IC::PostPatching(Address address, Code* target, Code* old_target) { + // Type vector based ICs update these statistics at a different time because + // they don't always patch on state change. + if (target->kind() == Code::CALL_IC) return; + + Isolate* isolate = target->GetHeap()->isolate(); + State old_state = UNINITIALIZED; + State new_state = UNINITIALIZED; + bool target_remains_ic_stub = false; + if (old_target->is_inline_cache_stub() && target->is_inline_cache_stub()) { + old_state = old_target->ic_state(); + new_state = target->ic_state(); + target_remains_ic_stub = true; + } + + OnTypeFeedbackChanged(isolate, address, old_state, new_state, + target_remains_ic_stub); +} + + +void IC::RegisterWeakMapDependency(Handle<Code> stub) { + if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_ic && + stub->CanBeWeakStub()) { + DCHECK(!stub->is_weak_stub()); + MapHandleList maps; + stub->FindAllMaps(&maps); + if (maps.length() == 1 && stub->IsWeakObjectInIC(*maps.at(0))) { + Map::AddDependentIC(maps.at(0), stub); + stub->mark_as_weak_stub(); + if (FLAG_enable_ool_constant_pool) { + stub->constant_pool()->set_weak_object_state( + ConstantPoolArray::WEAK_OBJECTS_IN_IC); + } + } + } +} + + +void IC::InvalidateMaps(Code* stub) { + DCHECK(stub->is_weak_stub()); + stub->mark_as_invalidated_weak_stub(); + Isolate* isolate = stub->GetIsolate(); + Heap* heap = isolate->heap(); + Object* undefined = heap->undefined_value(); + int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + for (RelocIterator it(stub, mode_mask); !it.done(); it.next()) { + RelocInfo::Mode mode = it.rinfo()->rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT && + it.rinfo()->target_object()->IsMap()) { + it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER); + } + } + CpuFeatures::FlushICache(stub->instruction_start(), stub->instruction_size()); +} + + void IC::Clear(Isolate* isolate, Address address, ConstantPoolArray* constant_pool) { Code* target = GetTargetAtAddress(address, constant_pool); @@ -442,6 +486,8 @@ return StoreIC::Clear(isolate, address, target, constant_pool); case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(isolate, address, target, constant_pool); + case Code::CALL_IC: + return CallIC::Clear(isolate, address, target, constant_pool); case Code::COMPARE_IC: return CompareIC::Clear(isolate, address, target, constant_pool); case Code::COMPARE_NIL_IC: @@ -468,13 +514,21 @@ } +void CallIC::Clear(Isolate* isolate, + Address address, + Code* target, + ConstantPoolArray* constant_pool) { + // Currently, CallIC doesn't have state changes. +} + + void LoadIC::Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool) { if (IsCleared(target)) return; - Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC( - Code::LOAD_IC, target->extra_ic_state()); + Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::LOAD_IC, + target->extra_ic_state()); SetTargetAtAddress(address, code, constant_pool); } @@ -484,8 +538,8 @@ Code* target, ConstantPoolArray* constant_pool) { if (IsCleared(target)) return; - Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC( - Code::STORE_IC, target->extra_ic_state()); + Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::STORE_IC, + target->extra_ic_state()); SetTargetAtAddress(address, code, constant_pool); } @@ -506,11 +560,10 @@ Address address, Code* target, ConstantPoolArray* constant_pool) { - ASSERT(target->major_key() == CodeStub::CompareIC); + DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC); CompareIC::State handler_state; Token::Value op; - ICCompareStub::DecodeMinorKey(target->stub_info(), NULL, NULL, - &handler_state, &op); + ICCompareStub::DecodeKey(target->stub_key(), NULL, NULL, &handler_state, &op); // Only clear CompareICs that can retain objects. if (handler_state != KNOWN_OBJECT) return; SetTargetAtAddress(address, GetRawUninitialized(isolate, op), constant_pool); @@ -518,6 +571,16 @@ } +// static +Handle<Code> KeyedLoadIC::generic_stub(Isolate* isolate) { + if (FLAG_compiled_keyed_generic_loads) { + return KeyedLoadGenericStub(isolate).GetCode(); + } else { + return isolate->builtins()->KeyedLoadIC_Generic(); + } +} + + static bool MigrateDeprecated(Handle<Object> object) { if (!object->IsJSObject()) return false; Handle<JSObject> receiver = Handle<JSObject>::cast(object); @@ -527,57 +590,40 @@ } -MaybeObject* LoadIC::Load(Handle<Object> object, - Handle<String> name) { +MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) { // If the object is undefined or null it's illegal to try to get any // of its properties; throw a TypeError in that case. if (object->IsUndefined() || object->IsNull()) { return TypeError("non_object_property_load", object, name); } - if (FLAG_use_ic) { - // Use specialized code for getting prototype of functions. - if (object->IsJSFunction() && - name->Equals(isolate()->heap()->prototype_string()) && - Handle<JSFunction>::cast(object)->should_have_prototype()) { - Handle<Code> stub; - if (state() == UNINITIALIZED) { - stub = pre_monomorphic_stub(); - } else if (state() == PREMONOMORPHIC) { - FunctionPrototypeStub function_prototype_stub(kind()); - stub = function_prototype_stub.GetCode(isolate()); - } else if (state() != MEGAMORPHIC) { - ASSERT(state() != GENERIC); - stub = megamorphic_stub(); - } - if (!stub.is_null()) { - set_target(*stub); - if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n"); - } - return *Accessors::FunctionGetPrototype(Handle<JSFunction>::cast(object)); - } - } - // Check if the name is trivially convertible to an index and get // the element or char if so. uint32_t index; if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) { // Rewrite to the generic keyed load stub. - if (FLAG_use_ic) set_target(*generic_stub()); - Handle<Object> result = - Runtime::GetElementOrCharAt(isolate(), object, index); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - return *result; + if (FLAG_use_ic) { + set_target(*KeyedLoadIC::generic_stub(isolate())); + TRACE_IC("LoadIC", name); + TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index"); + } + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + result, + Runtime::GetElementOrCharAt(isolate(), object, index), + Object); + return result; } bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic; // Named lookup in the object. - LookupResult lookup(isolate()); - LookupForRead(object, name, &lookup); + LookupIterator it(object, name); + LookupForRead(&it); // If we did not find a property, check if we need to throw an exception. - if (!lookup.IsFound()) { + if (!it.IsFound()) { if (IsUndeclaredGlobal(object)) { return ReferenceError("not_defined", name); } @@ -585,27 +631,24 @@ } // Update inline cache and stub cache. - if (use_ic) UpdateCaches(&lookup, object, name); + if (use_ic) UpdateCaches(&it, object, name); - PropertyAttributes attr; // Get the property. - Handle<Object> result = - Object::GetProperty(object, object, &lookup, name, &attr); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - // If the property is not present, check if we need to throw an - // exception. - if ((lookup.IsInterceptor() || lookup.IsHandler()) && - attr == ABSENT && IsUndeclaredGlobal(object)) { + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), result, Object::GetProperty(&it), Object); + // If the property is not present, check if we need to throw an exception. + if (!it.IsFound() && IsUndeclaredGlobal(object)) { return ReferenceError("not_defined", name); } - return *result; + return result; } static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps, Handle<Map> new_receiver_map) { - ASSERT(!new_receiver_map.is_null()); + DCHECK(!new_receiver_map.is_null()); for (int current = 0; current < receiver_maps->length(); ++current) { if (!receiver_maps->at(current).is_null() && receiver_maps->at(current).is_identical_to(new_receiver_map)) { @@ -617,24 +660,25 @@ } -bool IC::UpdatePolymorphicIC(Handle<HeapType> type, - Handle<String> name, - Handle<Code> code) { +bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) { if (!code->is_handler()) return false; + if (target()->is_keyed_stub() && state() != PROTOTYPE_FAILURE) return false; + Handle<HeapType> type = receiver_type(); TypeHandleList types; CodeHandleList handlers; - target()->FindAllTypes(&types); + TargetTypes(&types); int number_of_types = types.length(); int deprecated_types = 0; int handler_to_overwrite = -1; for (int i = 0; i < number_of_types; i++) { Handle<HeapType> current_type = types.at(i); - if (current_type->IsClass() && current_type->AsClass()->is_deprecated()) { + if (current_type->IsClass() && + current_type->AsClass()->Map()->is_deprecated()) { // Filter out deprecated maps to ensure their instances get migrated. ++deprecated_types; - } else if (type->IsCurrently(current_type)) { + } else if (type->NowIs(current_type)) { // If the receiver type is already in the polymorphic IC, this indicates // there was a prototoype chain failure. In that case, just overwrite the // handler. @@ -642,8 +686,8 @@ } else if (handler_to_overwrite == -1 && current_type->IsClass() && type->IsClass() && - IsTransitionOfMonomorphicTarget(*current_type->AsClass(), - *type->AsClass())) { + IsTransitionOfMonomorphicTarget(*current_type->AsClass()->Map(), + *type->AsClass()->Map())) { handler_to_overwrite = i; } } @@ -656,18 +700,25 @@ if (!target()->FindHandlers(&handlers, types.length())) return false; number_of_valid_types++; - if (handler_to_overwrite >= 0) { - handlers.Set(handler_to_overwrite, code); - if (!type->IsCurrently(types.at(handler_to_overwrite))) { - types.Set(handler_to_overwrite, type); - } + if (number_of_valid_types > 1 && target()->is_keyed_stub()) return false; + Handle<Code> ic; + if (number_of_valid_types == 1) { + ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, type, code, + extra_ic_state()); } else { - types.Add(type); - handlers.Add(code); + if (handler_to_overwrite >= 0) { + handlers.Set(handler_to_overwrite, code); + if (!type->NowIs(types.at(handler_to_overwrite))) { + types.Set(handler_to_overwrite, type); + } + } else { + types.Add(type); + handlers.Add(code); + } + ic = PropertyICCompiler::ComputePolymorphic(kind(), &types, &handlers, + number_of_valid_types, name, + extra_ic_state()); } - - Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC( - kind(), &types, &handlers, number_of_valid_types, name, extra_ic_state()); set_target(*ic); return true; } @@ -676,19 +727,20 @@ Handle<HeapType> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) { return object->IsJSGlobalObject() ? HeapType::Constant(Handle<JSGlobalObject>::cast(object), isolate) - : HeapType::OfCurrently(object, isolate); + : HeapType::NowOf(object, isolate); } Handle<Map> IC::TypeToMap(HeapType* type, Isolate* isolate) { if (type->Is(HeapType::Number())) return isolate->factory()->heap_number_map(); - if (type->Is(HeapType::Boolean())) return isolate->factory()->oddball_map(); + if (type->Is(HeapType::Boolean())) return isolate->factory()->boolean_map(); if (type->IsConstant()) { - return handle(Handle<JSGlobalObject>::cast(type->AsConstant())->map()); + return handle( + Handle<JSGlobalObject>::cast(type->AsConstant()->Value())->map()); } - ASSERT(type->IsClass()); - return type->AsClass(); + DCHECK(type->IsClass()); + return type->AsClass()->Map(); } @@ -714,20 +766,18 @@ Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map, Isolate* region); -void IC::UpdateMonomorphicIC(Handle<HeapType> type, - Handle<Code> handler, - Handle<String> name) { - if (!handler->is_handler()) return set_target(*handler); - Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC( - kind(), name, type, handler, extra_ic_state()); +void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) { + DCHECK(handler->is_handler()); + Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic( + kind(), name, receiver_type(), handler, extra_ic_state()); set_target(*ic); } -void IC::CopyICToMegamorphicCache(Handle<String> name) { +void IC::CopyICToMegamorphicCache(Handle<Name> name) { TypeHandleList types; CodeHandleList handlers; - target()->FindAllTypes(&types); + TargetTypes(&types); if (!target()->FindHandlers(&handlers, types.length())) return; for (int i = 0; i < types.length(); i++) { UpdateMegamorphicCache(*types.at(i), *name, *handlers.at(i)); @@ -750,28 +800,27 @@ } -void IC::PatchCache(Handle<HeapType> type, - Handle<String> name, - Handle<Code> code) { +void IC::PatchCache(Handle<Name> name, Handle<Code> code) { switch (state()) { case UNINITIALIZED: case PREMONOMORPHIC: - case MONOMORPHIC_PROTOTYPE_FAILURE: - UpdateMonomorphicIC(type, code, name); + UpdateMonomorphicIC(code, name); break; - case MONOMORPHIC: // Fall through. + case PROTOTYPE_FAILURE: + case MONOMORPHIC: case POLYMORPHIC: - if (!target()->is_keyed_stub()) { - if (UpdatePolymorphicIC(type, name, code)) break; + if (!target()->is_keyed_stub() || state() == PROTOTYPE_FAILURE) { + if (UpdatePolymorphicIC(name, code)) break; CopyICToMegamorphicCache(name); } set_target(*megamorphic_stub()); // Fall through. case MEGAMORPHIC: - UpdateMegamorphicCache(*type, *name, *code); + UpdateMegamorphicCache(*receiver_type(), *name, *code); break; case DEBUG_STUB: break; + case DEFAULT: case GENERIC: UNREACHABLE(); break; @@ -781,37 +830,50 @@ Handle<Code> LoadIC::initialize_stub(Isolate* isolate, ExtraICState extra_state) { - return isolate->stub_cache()->ComputeLoad(UNINITIALIZED, extra_state); + return PropertyICCompiler::ComputeLoad(isolate, UNINITIALIZED, extra_state); +} + + +Handle<Code> LoadIC::megamorphic_stub() { + if (kind() == Code::LOAD_IC) { + return PropertyICCompiler::ComputeLoad(isolate(), MEGAMORPHIC, + extra_ic_state()); + } else { + DCHECK_EQ(Code::KEYED_LOAD_IC, kind()); + return KeyedLoadIC::generic_stub(isolate()); + } } Handle<Code> LoadIC::pre_monomorphic_stub(Isolate* isolate, ExtraICState extra_state) { - return isolate->stub_cache()->ComputeLoad(PREMONOMORPHIC, extra_state); + return PropertyICCompiler::ComputeLoad(isolate, PREMONOMORPHIC, extra_state); } -Handle<Code> LoadIC::megamorphic_stub() { - return isolate()->stub_cache()->ComputeLoad(MEGAMORPHIC, extra_ic_state()); +Handle<Code> KeyedLoadIC::pre_monomorphic_stub(Isolate* isolate) { + return isolate->builtins()->KeyedLoadIC_PreMonomorphic(); } -Handle<Code> LoadIC::SimpleFieldLoad(int offset, - bool inobject, - Representation representation) { +Handle<Code> LoadIC::pre_monomorphic_stub() const { if (kind() == Code::LOAD_IC) { - LoadFieldStub stub(inobject, offset, representation); - return stub.GetCode(isolate()); + return LoadIC::pre_monomorphic_stub(isolate(), extra_ic_state()); } else { - KeyedLoadFieldStub stub(inobject, offset, representation); - return stub.GetCode(isolate()); + DCHECK_EQ(Code::KEYED_LOAD_IC, kind()); + return KeyedLoadIC::pre_monomorphic_stub(isolate()); } } -void LoadIC::UpdateCaches(LookupResult* lookup, - Handle<Object> object, - Handle<String> name) { +Handle<Code> LoadIC::SimpleFieldLoad(FieldIndex index) { + LoadFieldStub stub(isolate(), index); + return stub.GetCode(); +} + + +void LoadIC::UpdateCaches(LookupIterator* lookup, Handle<Object> object, + Handle<Name> name) { if (state() == UNINITIALIZED) { // This is the first time we execute this inline cache. // Set the target to the pre monomorphic stub to delay @@ -821,14 +883,16 @@ return; } - Handle<HeapType> type = CurrentTypeOf(object, isolate()); Handle<Code> code; - if (!lookup->IsCacheable()) { - // Bail out if the result is not cacheable. + if (lookup->state() == LookupIterator::JSPROXY || + lookup->state() == LookupIterator::ACCESS_CHECK) { code = slow_stub(); - } else if (!lookup->IsProperty()) { + } else if (!lookup->IsFound()) { if (kind() == Code::LOAD_IC) { - code = isolate()->stub_cache()->ComputeLoadNonexistent(name, type); + code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(name, + receiver_type()); + // TODO(jkummerow/verwaest): Introduce a builtin that handles this case. + if (code.is_null()) code = slow_stub(); } else { code = slow_stub(); } @@ -836,164 +900,245 @@ code = ComputeHandler(lookup, object, name); } - PatchCache(type, name, code); + PatchCache(name, code); TRACE_IC("LoadIC", name); } void IC::UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) { - // Cache code holding map should be consistent with - // GenerateMonomorphicCacheProbe. + if (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC) return; Map* map = *TypeToMap(type, isolate()); isolate()->stub_cache()->Set(name, map, code); } -Handle<Code> IC::ComputeHandler(LookupResult* lookup, - Handle<Object> object, - Handle<String> name, - Handle<Object> value) { - InlineCacheHolderFlag cache_holder = GetCodeCacheForObject(*object); - Handle<HeapObject> stub_holder(GetCodeCacheHolder( - isolate(), *object, cache_holder)); - - Handle<Code> code = isolate()->stub_cache()->FindHandler( - name, handle(stub_holder->map()), kind(), cache_holder, - lookup->holder()->HasFastProperties() ? Code::FAST : Code::NORMAL); +Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> object, + Handle<Name> name, Handle<Object> value) { + bool receiver_is_holder = + object.is_identical_to(lookup->GetHolder<JSObject>()); + CacheHolderFlag flag; + Handle<Map> stub_holder_map = IC::GetHandlerCacheHolder( + *receiver_type(), receiver_is_holder, isolate(), &flag); + + Handle<Code> code = PropertyHandlerCompiler::Find( + name, stub_holder_map, kind(), flag, + lookup->holder_map()->is_dictionary_map() ? Code::NORMAL : Code::FAST); + // Use the cached value if it exists, and if it is different from the + // handler that just missed. if (!code.is_null()) { - return code; + if (!maybe_handler_.is_null() && + !maybe_handler_.ToHandleChecked().is_identical_to(code)) { + return code; + } + if (maybe_handler_.is_null()) { + // maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs. + // In MEGAMORPHIC case, check if the handler in the megamorphic stub + // cache (which just missed) is different from the cached handler. + if (state() == MEGAMORPHIC && object->IsHeapObject()) { + Map* map = Handle<HeapObject>::cast(object)->map(); + Code* megamorphic_cached_code = + isolate()->stub_cache()->Get(*name, map, code->flags()); + if (megamorphic_cached_code != *code) return code; + } else { + return code; + } + } } - code = CompileHandler(lookup, object, name, value, cache_holder); - ASSERT(code->is_handler()); + code = CompileHandler(lookup, object, name, value, flag); + DCHECK(code->is_handler()); if (code->type() != Code::NORMAL) { - HeapObject::UpdateMapCodeCache(stub_holder, name, code); + Map::UpdateCodeCache(stub_holder_map, name, code); } return code; } -Handle<Code> LoadIC::CompileHandler(LookupResult* lookup, - Handle<Object> object, - Handle<String> name, - Handle<Object> unused, - InlineCacheHolderFlag cache_holder) { - if (object->IsString() && name->Equals(isolate()->heap()->length_string())) { - int length_index = String::kLengthOffset / kPointerSize; - return SimpleFieldLoad(length_index); - } +Handle<Code> IC::ComputeStoreHandler(LookupResult* lookup, + Handle<Object> object, Handle<Name> name, + Handle<Object> value) { + bool receiver_is_holder = lookup->ReceiverIsHolder(object); + CacheHolderFlag flag; + Handle<Map> stub_holder_map = IC::GetHandlerCacheHolder( + *receiver_type(), receiver_is_holder, isolate(), &flag); - if (object->IsStringWrapper() && - name->Equals(isolate()->heap()->length_string())) { - if (kind() == Code::LOAD_IC) { - StringLengthStub string_length_stub; - return string_length_stub.GetCode(isolate()); - } else { - KeyedStringLengthStub string_length_stub; - return string_length_stub.GetCode(isolate()); + Handle<Code> code = PropertyHandlerCompiler::Find( + name, stub_holder_map, handler_kind(), flag, + lookup->holder()->HasFastProperties() ? Code::FAST : Code::NORMAL); + // Use the cached value if it exists, and if it is different from the + // handler that just missed. + if (!code.is_null()) { + if (!maybe_handler_.is_null() && + !maybe_handler_.ToHandleChecked().is_identical_to(code)) { + return code; + } + if (maybe_handler_.is_null()) { + // maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs. + // In MEGAMORPHIC case, check if the handler in the megamorphic stub + // cache (which just missed) is different from the cached handler. + if (state() == MEGAMORPHIC && object->IsHeapObject()) { + Map* map = Handle<HeapObject>::cast(object)->map(); + Code* megamorphic_cached_code = + isolate()->stub_cache()->Get(*name, map, code->flags()); + if (megamorphic_cached_code != *code) return code; + } else { + return code; + } } } - Handle<HeapType> type = CurrentTypeOf(object, isolate()); - Handle<JSObject> holder(lookup->holder()); - LoadStubCompiler compiler(isolate(), kNoExtraICState, cache_holder, kind()); + code = CompileStoreHandler(lookup, object, name, value, flag); + DCHECK(code->is_handler()); - switch (lookup->type()) { - case FIELD: { - PropertyIndex field = lookup->GetFieldIndex(); - if (object.is_identical_to(holder)) { - return SimpleFieldLoad(field.translate(holder), - field.is_inobject(holder), - lookup->representation()); - } - return compiler.CompileLoadField( - type, holder, name, field, lookup->representation()); - } - case CONSTANT: { - Handle<Object> constant(lookup->GetConstant(), isolate()); - // TODO(2803): Don't compute a stub for cons strings because they cannot - // be embedded into code. - if (constant->IsConsString()) break; - return compiler.CompileLoadConstant(type, holder, name, constant); - } - case NORMAL: - if (kind() != Code::LOAD_IC) break; - if (holder->IsGlobalObject()) { - Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder); - Handle<PropertyCell> cell( - global->GetPropertyCell(lookup), isolate()); - Handle<Code> code = compiler.CompileLoadGlobal( - type, global, cell, name, lookup->IsDontDelete()); - // TODO(verwaest): Move caching of these NORMAL stubs outside as well. - Handle<HeapObject> stub_holder(GetCodeCacheHolder( - isolate(), *object, cache_holder)); - HeapObject::UpdateMapCodeCache(stub_holder, name, code); - return code; - } - // There is only one shared stub for loading normalized - // properties. It does not traverse the prototype chain, so the - // property must be found in the object for the stub to be - // applicable. - if (!object.is_identical_to(holder)) break; - return isolate()->builtins()->LoadIC_Normal(); - case CALLBACKS: { - // Use simple field loads for some well-known callback properties. - if (object->IsJSObject()) { - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - Handle<HeapType> type = IC::MapToType<HeapType>( - handle(receiver->map()), isolate()); - int object_offset; - if (Accessors::IsJSObjectFieldAccessor<HeapType>( - type, name, &object_offset)) { - return SimpleFieldLoad(object_offset / kPointerSize); - } - } + if (code->type() != Code::NORMAL) { + Map::UpdateCodeCache(stub_holder_map, name, code); + } - Handle<Object> callback(lookup->GetCallbackObject(), isolate()); - if (callback->IsExecutableAccessorInfo()) { - Handle<ExecutableAccessorInfo> info = - Handle<ExecutableAccessorInfo>::cast(callback); - if (v8::ToCData<Address>(info->getter()) == 0) break; - if (!info->IsCompatibleReceiver(*object)) break; - return compiler.CompileLoadCallback(type, holder, name, info); - } else if (callback->IsAccessorPair()) { - Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter(), - isolate()); - if (!getter->IsJSFunction()) break; - if (holder->IsGlobalObject()) break; - if (!holder->HasFastProperties()) break; - Handle<JSFunction> function = Handle<JSFunction>::cast(getter); - if (!object->IsJSObject() && - !function->IsBuiltin() && - function->shared()->strict_mode() == SLOPPY) { - // Calling sloppy non-builtins with a value as the receiver - // requires boxing. - break; - } - CallOptimization call_optimization(function); - if (call_optimization.is_simple_api_call() && - call_optimization.IsCompatibleReceiver(object, holder)) { - return compiler.CompileLoadCallback( - type, holder, name, call_optimization); - } - return compiler.CompileLoadViaGetter(type, holder, name, function); - } - // TODO(dcarney): Handle correctly. - if (callback->IsDeclaredAccessorInfo()) break; - ASSERT(callback->IsForeign()); - // No IC support for old-style native accessors. - break; - } - case INTERCEPTOR: - ASSERT(HasInterceptorGetter(*holder)); - return compiler.CompileLoadInterceptor(type, holder, name); - default: - break; + return code; +} + + +Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup, + Handle<Object> object, Handle<Name> name, + Handle<Object> unused, + CacheHolderFlag cache_holder) { + if (object->IsString() && + Name::Equals(isolate()->factory()->length_string(), name)) { + FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset); + return SimpleFieldLoad(index); } - return slow_stub(); + if (object->IsStringWrapper() && + Name::Equals(isolate()->factory()->length_string(), name)) { + StringLengthStub string_length_stub(isolate()); + return string_length_stub.GetCode(); + } + + // Use specialized code for getting prototype of functions. + if (object->IsJSFunction() && + Name::Equals(isolate()->factory()->prototype_string(), name) && + Handle<JSFunction>::cast(object)->should_have_prototype() && + !Handle<JSFunction>::cast(object)->map()->has_non_instance_prototype()) { + Handle<Code> stub; + FunctionPrototypeStub function_prototype_stub(isolate()); + return function_prototype_stub.GetCode(); + } + + Handle<HeapType> type = receiver_type(); + Handle<JSObject> holder = lookup->GetHolder<JSObject>(); + bool receiver_is_holder = object.is_identical_to(holder); + // -------------- Interceptors -------------- + if (lookup->state() == LookupIterator::INTERCEPTOR) { + DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined()); + NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder, + cache_holder); + return compiler.CompileLoadInterceptor(name); + } + DCHECK(lookup->state() == LookupIterator::PROPERTY); + + // -------------- Accessors -------------- + if (lookup->property_kind() == LookupIterator::ACCESSOR) { + // Use simple field loads for some well-known callback properties. + if (receiver_is_holder) { + DCHECK(object->IsJSObject()); + Handle<JSObject> receiver = Handle<JSObject>::cast(object); + int object_offset; + if (Accessors::IsJSObjectFieldAccessor<HeapType>(type, name, + &object_offset)) { + FieldIndex index = + FieldIndex::ForInObjectOffset(object_offset, receiver->map()); + return SimpleFieldLoad(index); + } + } + + Handle<Object> accessors = lookup->GetAccessors(); + if (accessors->IsExecutableAccessorInfo()) { + Handle<ExecutableAccessorInfo> info = + Handle<ExecutableAccessorInfo>::cast(accessors); + if (v8::ToCData<Address>(info->getter()) == 0) return slow_stub(); + if (!ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), info, + type)) { + return slow_stub(); + } + if (!holder->HasFastProperties()) return slow_stub(); + NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder, + cache_holder); + return compiler.CompileLoadCallback(name, info); + } + if (accessors->IsAccessorPair()) { + Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(), + isolate()); + if (!getter->IsJSFunction()) return slow_stub(); + if (!holder->HasFastProperties()) return slow_stub(); + Handle<JSFunction> function = Handle<JSFunction>::cast(getter); + if (!object->IsJSObject() && !function->IsBuiltin() && + function->shared()->strict_mode() == SLOPPY) { + // Calling sloppy non-builtins with a value as the receiver + // requires boxing. + return slow_stub(); + } + CallOptimization call_optimization(function); + NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder, + cache_holder); + if (call_optimization.is_simple_api_call() && + call_optimization.IsCompatibleReceiver(object, holder)) { + return compiler.CompileLoadCallback(name, call_optimization); + } + return compiler.CompileLoadViaGetter(name, function); + } + // TODO(dcarney): Handle correctly. + DCHECK(accessors->IsDeclaredAccessorInfo()); + return slow_stub(); + } + + // -------------- Dictionary properties -------------- + DCHECK(lookup->property_kind() == LookupIterator::DATA); + if (lookup->property_encoding() == LookupIterator::DICTIONARY) { + if (kind() != Code::LOAD_IC) return slow_stub(); + if (holder->IsGlobalObject()) { + NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder, + cache_holder); + Handle<PropertyCell> cell = lookup->GetPropertyCell(); + Handle<Code> code = + compiler.CompileLoadGlobal(cell, name, lookup->IsConfigurable()); + // TODO(verwaest): Move caching of these NORMAL stubs outside as well. + CacheHolderFlag flag; + Handle<Map> stub_holder_map = + GetHandlerCacheHolder(*type, receiver_is_holder, isolate(), &flag); + Map::UpdateCodeCache(stub_holder_map, name, code); + return code; + } + // There is only one shared stub for loading normalized + // properties. It does not traverse the prototype chain, so the + // property must be found in the object for the stub to be + // applicable. + if (!receiver_is_holder) return slow_stub(); + return isolate()->builtins()->LoadIC_Normal(); + } + + // -------------- Fields -------------- + DCHECK(lookup->property_encoding() == LookupIterator::DESCRIPTOR); + if (lookup->property_details().type() == FIELD) { + FieldIndex field = lookup->GetFieldIndex(); + if (receiver_is_holder) { + return SimpleFieldLoad(field); + } + NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder, + cache_holder); + return compiler.CompileLoadField(name, field); + } + + // -------------- Constant properties -------------- + DCHECK(lookup->property_details().type() == CONSTANT); + if (receiver_is_holder) { + LoadConstantStub stub(isolate(), lookup->GetConstantIndex()); + return stub.GetCode(); + } + NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder, + cache_holder); + return compiler.CompileLoadConstant(name, lookup->GetConstantIndex()); } @@ -1028,19 +1173,13 @@ Handle<Map> receiver_map(receiver->map(), isolate()); MapHandleList target_receiver_maps; - if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) { - // Optimistically assume that ICs that haven't reached the MONOMORPHIC state - // yet will do so and stay there. - return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map); - } - if (target().is_identical_to(string_stub())) { target_receiver_maps.Add(isolate()->factory()->string_map()); } else { - target()->FindAllMaps(&target_receiver_maps); - if (target_receiver_maps.length() == 0) { - return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map); - } + TargetMaps(&target_receiver_maps); + } + if (target_receiver_maps.length() == 0) { + return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map); } // The first time a receiver is seen that is a transitioned version of the @@ -1054,10 +1193,10 @@ IsMoreGeneralElementsKindTransition( target_receiver_maps.at(0)->elements_kind(), receiver->GetElementsKind())) { - return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map); + return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map); } - ASSERT(state() != GENERIC); + DCHECK(state() != GENERIC); // Determine the list of receiver maps that this call site has seen, // adding the map that was just encountered. @@ -1075,26 +1214,35 @@ return generic_stub(); } - return isolate()->stub_cache()->ComputeLoadElementPolymorphic( - &target_receiver_maps); + return PropertyICCompiler::ComputeKeyedLoadPolymorphic(&target_receiver_maps); } -MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key) { +MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object, + Handle<Object> key) { if (MigrateDeprecated(object)) { - return Runtime::GetObjectPropertyOrFail(isolate(), object, key); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + result, + Runtime::GetObjectProperty(isolate(), object, key), + Object); + return result; } - MaybeObject* maybe_object = NULL; + Handle<Object> load_handle; Handle<Code> stub = generic_stub(); // Check for non-string values that can be converted into an // internalized string directly or is representable as a smi. key = TryConvertKey(key, isolate()); - if (key->IsInternalizedString()) { - maybe_object = LoadIC::Load(object, Handle<String>::cast(key)); - if (maybe_object->IsFailure()) return maybe_object; + if (key->IsInternalizedString() || key->IsSymbol()) { + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + load_handle, + LoadIC::Load(object, Handle<Name>::cast(key)), + Object); } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) { if (object->IsString() && key->IsNumber()) { if (state() == UNINITIALIZED) stub = string_stub(); @@ -1105,7 +1253,7 @@ stub = sloppy_arguments_stub(); } else if (receiver->HasIndexedInterceptor()) { stub = indexed_interceptor_stub(); - } else if (!key->ToSmi()->IsFailure() && + } else if (!Object::ToSmi(isolate(), key).is_null() && (!target().is_identical_to(sloppy_arguments_stub()))) { stub = LoadElementStub(receiver); } @@ -1113,28 +1261,36 @@ } if (!is_target_set()) { - if (*stub == *generic_stub()) { + Code* generic = *generic_stub(); + if (*stub == generic) { TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic"); } set_target(*stub); TRACE_IC("LoadIC", key); } - if (maybe_object != NULL) return maybe_object; - return Runtime::GetObjectPropertyOrFail(isolate(), object, key); + if (!load_handle.is_null()) return load_handle; + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + result, + Runtime::GetObjectProperty(isolate(), object, key), + Object); + return result; } -static bool LookupForWrite(Handle<JSObject> receiver, - Handle<String> name, - Handle<Object> value, - LookupResult* lookup, - IC* ic) { +static bool LookupForWrite(Handle<Object> object, Handle<Name> name, + Handle<Object> value, LookupResult* lookup, IC* ic) { + // Disable ICs for non-JSObjects for now. + if (!object->IsJSObject()) return false; + Handle<JSObject> receiver = Handle<JSObject>::cast(object); + Handle<JSObject> holder = receiver; - receiver->Lookup(*name, lookup); + receiver->Lookup(name, lookup); if (lookup->IsFound()) { if (lookup->IsInterceptor() && !HasInterceptorSetter(lookup->holder())) { - receiver->LocalLookupRealNamedProperty(*name, lookup); + receiver->LookupOwnRealNamedProperty(name, lookup); if (!lookup->IsFound()) return false; } @@ -1145,7 +1301,8 @@ // goes into the runtime if access checks are needed, so this is always // safe. if (receiver->IsJSGlobalProxy()) { - return lookup->holder() == receiver->GetPrototype(); + PrototypeIterator iter(lookup->isolate(), receiver); + return lookup->holder() == *PrototypeIterator::GetCurrent(iter); } // Currently normal holders in the prototype chain are not supported. They // would require a runtime positive lookup and verification that the details @@ -1160,9 +1317,7 @@ // chain check. This avoids a double lookup, but requires us to pass in the // receiver when trying to fetch extra information from the transition. receiver->map()->LookupTransition(*holder, *name, lookup); - if (!lookup->IsTransition()) return false; - PropertyDetails target_details = lookup->GetTransitionDetails(); - if (target_details.IsReadOnly()) return false; + if (!lookup->IsTransition() || lookup->IsReadOnly()) return false; // If the value that's being stored does not fit in the field that the // instance would transition to, create a new transition that fits the value. @@ -1170,31 +1325,40 @@ // transition target. // Ensure the instance and its map were migrated before trying to update the // transition target. - ASSERT(!receiver->map()->is_deprecated()); - if (!value->FitsRepresentation(target_details.representation())) { + DCHECK(!receiver->map()->is_deprecated()); + if (!lookup->CanHoldValue(value)) { Handle<Map> target(lookup->GetTransitionTarget()); + Representation field_representation = value->OptimalRepresentation(); + Handle<HeapType> field_type = value->OptimalType( + lookup->isolate(), field_representation); Map::GeneralizeRepresentation( target, target->LastAdded(), - value->OptimalRepresentation(), FORCE_FIELD); + field_representation, field_type, FORCE_FIELD); // Lookup the transition again since the transition tree may have changed // entirely by the migration above. receiver->map()->LookupTransition(*holder, *name, lookup); if (!lookup->IsTransition()) return false; - ic->MarkMonomorphicPrototypeFailure(); + if (!ic->IsNameCompatibleWithPrototypeFailure(name)) return false; + ic->MarkPrototypeFailure(name); + return true; } + return true; } -MaybeObject* StoreIC::Store(Handle<Object> object, - Handle<String> name, - Handle<Object> value, - JSReceiver::StoreFromKeyed store_mode) { +MaybeHandle<Object> StoreIC::Store(Handle<Object> object, + Handle<Name> name, + Handle<Object> value, + JSReceiver::StoreFromKeyed store_mode) { + // TODO(verwaest): Let SetProperty do the migration, since storing a property + // might deprecate the current map again, if value does not fit. if (MigrateDeprecated(object) || object->IsJSProxy()) { - Handle<Object> result = JSReceiver::SetProperty( - Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode()); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - return *result; + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), result, + Object::SetProperty(object, name, value, strict_mode()), Object); + return result; } // If the object is undefined or null it's illegal to try to set any @@ -1203,37 +1367,36 @@ return TypeError("non_object_property_store", object, name); } - // The length property of string values is read-only. Throw in strict mode. - if (strict_mode() == STRICT && object->IsString() && - name->Equals(isolate()->heap()->length_string())) { - return TypeError("strict_read_only_property", object, name); - } - - // Ignore other stores where the receiver is not a JSObject. - // TODO(1475): Must check prototype chains of object wrappers. - if (!object->IsJSObject()) return *value; - - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - // Check if the given name is an array index. uint32_t index; if (name->AsArrayIndex(&index)) { - Handle<Object> result = - JSObject::SetElement(receiver, index, value, NONE, strict_mode()); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - return *value; + // Ignore other stores where the receiver is not a JSObject. + // TODO(1475): Must check prototype chains of object wrappers. + if (!object->IsJSObject()) return value; + Handle<JSObject> receiver = Handle<JSObject>::cast(object); + + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + result, + JSObject::SetElement(receiver, index, value, NONE, strict_mode()), + Object); + return value; } // Observed objects are always modified through the runtime. - if (receiver->map()->is_observed()) { - Handle<Object> result = JSReceiver::SetProperty( - receiver, name, value, NONE, strict_mode(), store_mode); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - return *result; + if (object->IsHeapObject() && + Handle<HeapObject>::cast(object)->map()->is_observed()) { + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), result, + Object::SetProperty(object, name, value, strict_mode(), store_mode), + Object); + return result; } LookupResult lookup(isolate()); - bool can_store = LookupForWrite(receiver, name, value, &lookup, this); + bool can_store = LookupForWrite(object, name, value, &lookup, this); if (!can_store && strict_mode() == STRICT && !(lookup.IsProperty() && lookup.IsReadOnly()) && @@ -1247,9 +1410,8 @@ set_target(*stub); TRACE_IC("StoreIC", name); } else if (can_store) { - UpdateCaches(&lookup, receiver, name, value); - } else if (!name->IsCacheable(isolate()) || - lookup.IsNormal() || + UpdateCaches(&lookup, Handle<JSObject>::cast(object), name, value); + } else if (lookup.IsNormal() || (lookup.IsField() && lookup.CanHoldValue(value))) { Handle<Code> stub = generic_stub(); set_target(*stub); @@ -1257,155 +1419,186 @@ } // Set the property. - Handle<Object> result = JSReceiver::SetProperty( - receiver, name, value, NONE, strict_mode(), store_mode); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - return *result; + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), result, + Object::SetProperty(object, name, value, strict_mode(), store_mode), + Object); + return result; +} + + +OStream& operator<<(OStream& os, const CallIC::State& s) { + return os << "(args(" << s.arg_count() << "), " + << (s.call_type() == CallIC::METHOD ? "METHOD" : "FUNCTION") + << ", "; +} + + +Handle<Code> CallIC::initialize_stub(Isolate* isolate, + int argc, + CallType call_type) { + CallICStub stub(isolate, State(argc, call_type)); + Handle<Code> code = stub.GetCode(); + return code; } Handle<Code> StoreIC::initialize_stub(Isolate* isolate, StrictMode strict_mode) { ExtraICState extra_state = ComputeExtraICState(strict_mode); - Handle<Code> ic = isolate->stub_cache()->ComputeStore( - UNINITIALIZED, extra_state); + Handle<Code> ic = + PropertyICCompiler::ComputeStore(isolate, UNINITIALIZED, extra_state); return ic; } Handle<Code> StoreIC::megamorphic_stub() { - return isolate()->stub_cache()->ComputeStore(MEGAMORPHIC, extra_ic_state()); + return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC, + extra_ic_state()); } Handle<Code> StoreIC::generic_stub() const { - return isolate()->stub_cache()->ComputeStore(GENERIC, extra_ic_state()); + return PropertyICCompiler::ComputeStore(isolate(), GENERIC, extra_ic_state()); } Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate, StrictMode strict_mode) { ExtraICState state = ComputeExtraICState(strict_mode); - return isolate->stub_cache()->ComputeStore(PREMONOMORPHIC, state); + return PropertyICCompiler::ComputeStore(isolate, PREMONOMORPHIC, state); } void StoreIC::UpdateCaches(LookupResult* lookup, Handle<JSObject> receiver, - Handle<String> name, + Handle<Name> name, Handle<Object> value) { - ASSERT(lookup->IsFound()); + DCHECK(lookup->IsFound()); // These are not cacheable, so we never see such LookupResults here. - ASSERT(!lookup->IsHandler()); + DCHECK(!lookup->IsHandler()); - Handle<Code> code = ComputeHandler(lookup, receiver, name, value); + Handle<Code> code = ComputeStoreHandler(lookup, receiver, name, value); - PatchCache(CurrentTypeOf(receiver, isolate()), name, code); + PatchCache(name, code); TRACE_IC("StoreIC", name); } -Handle<Code> StoreIC::CompileHandler(LookupResult* lookup, - Handle<Object> object, - Handle<String> name, - Handle<Object> value, - InlineCacheHolderFlag cache_holder) { +Handle<Code> StoreIC::CompileStoreHandler(LookupResult* lookup, + Handle<Object> object, + Handle<Name> name, + Handle<Object> value, + CacheHolderFlag cache_holder) { if (object->IsAccessCheckNeeded()) return slow_stub(); - ASSERT(cache_holder == OWN_MAP); + DCHECK(cache_holder == kCacheOnReceiver || lookup->type() == CALLBACKS || + (object->IsJSGlobalProxy() && lookup->holder()->IsJSGlobalObject())); // This is currently guaranteed by checks in StoreIC::Store. Handle<JSObject> receiver = Handle<JSObject>::cast(object); Handle<JSObject> holder(lookup->holder()); - // Handlers do not use strict mode. - StoreStubCompiler compiler(isolate(), SLOPPY, kind()); - switch (lookup->type()) { - case FIELD: - return compiler.CompileStoreField(receiver, lookup, name); - case TRANSITION: { - // Explicitly pass in the receiver map since LookupForWrite may have - // stored something else than the receiver in the holder. - Handle<Map> transition(lookup->GetTransitionTarget()); - PropertyDetails details = transition->GetLastDescriptorDetails(); - - if (details.type() == CALLBACKS || details.attributes() != NONE) break; - - return compiler.CompileStoreTransition( - receiver, lookup, transition, name); - } - case NORMAL: - if (kind() == Code::KEYED_STORE_IC) break; - if (receiver->IsJSGlobalProxy() || receiver->IsGlobalObject()) { - // The stub generated for the global object picks the value directly - // from the property cell. So the property must be directly on the - // global object. - Handle<GlobalObject> global = receiver->IsJSGlobalProxy() - ? handle(GlobalObject::cast(receiver->GetPrototype())) - : Handle<GlobalObject>::cast(receiver); - Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate()); - Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value); - StoreGlobalStub stub( - union_type->IsConstant(), receiver->IsJSGlobalProxy()); - Handle<Code> code = stub.GetCodeCopyFromTemplate( - isolate(), global, cell); - // TODO(verwaest): Move caching of these NORMAL stubs outside as well. - HeapObject::UpdateMapCodeCache(receiver, name, code); - return code; - } - ASSERT(holder.is_identical_to(receiver)); - return isolate()->builtins()->StoreIC_Normal(); - case CALLBACKS: { - Handle<Object> callback(lookup->GetCallbackObject(), isolate()); - if (callback->IsExecutableAccessorInfo()) { - Handle<ExecutableAccessorInfo> info = - Handle<ExecutableAccessorInfo>::cast(callback); - if (v8::ToCData<Address>(info->setter()) == 0) break; - if (!holder->HasFastProperties()) break; - if (!info->IsCompatibleReceiver(*receiver)) break; - return compiler.CompileStoreCallback(receiver, holder, name, info); - } else if (callback->IsAccessorPair()) { - Handle<Object> setter( - Handle<AccessorPair>::cast(callback)->setter(), isolate()); - if (!setter->IsJSFunction()) break; - if (holder->IsGlobalObject()) break; - if (!holder->HasFastProperties()) break; - Handle<JSFunction> function = Handle<JSFunction>::cast(setter); - CallOptimization call_optimization(function); - if (call_optimization.is_simple_api_call() && - call_optimization.IsCompatibleReceiver(receiver, holder)) { - return compiler.CompileStoreCallback( - receiver, holder, name, call_optimization); + + if (lookup->IsTransition()) { + // Explicitly pass in the receiver map since LookupForWrite may have + // stored something else than the receiver in the holder. + Handle<Map> transition(lookup->GetTransitionTarget()); + PropertyDetails details = lookup->GetPropertyDetails(); + + if (details.type() != CALLBACKS && details.attributes() == NONE && + holder->HasFastProperties()) { + NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder); + return compiler.CompileStoreTransition(transition, name); + } + } else { + switch (lookup->type()) { + case FIELD: { + bool use_stub = true; + if (lookup->representation().IsHeapObject()) { + // Only use a generic stub if no types need to be tracked. + HeapType* field_type = lookup->GetFieldType(); + HeapType::Iterator<Map> it = field_type->Classes(); + use_stub = it.Done(); + } + if (use_stub) { + StoreFieldStub stub(isolate(), lookup->GetFieldIndex(), + lookup->representation()); + return stub.GetCode(); } - return compiler.CompileStoreViaSetter( - receiver, holder, name, Handle<JSFunction>::cast(setter)); + NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder); + return compiler.CompileStoreField(lookup, name); } - // TODO(dcarney): Handle correctly. - if (callback->IsDeclaredAccessorInfo()) break; - ASSERT(callback->IsForeign()); - - // Use specialized code for setting the length of arrays with fast - // properties. Slow properties might indicate redefinition of the length - // property. - if (receiver->IsJSArray() && - name->Equals(isolate()->heap()->length_string()) && - Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() && - receiver->HasFastProperties()) { - return compiler.CompileStoreArrayLength(receiver, lookup, name); - } - - // No IC support for old-style native accessors. - break; - } - case INTERCEPTOR: - if (kind() == Code::KEYED_STORE_IC) break; - ASSERT(HasInterceptorSetter(*holder)); - return compiler.CompileStoreInterceptor(receiver, name); - case CONSTANT: - break; - case NONEXISTENT: - case HANDLER: - UNREACHABLE(); - break; + case NORMAL: + if (receiver->IsJSGlobalProxy() || receiver->IsGlobalObject()) { + // The stub generated for the global object picks the value directly + // from the property cell. So the property must be directly on the + // global object. + PrototypeIterator iter(isolate(), receiver); + Handle<GlobalObject> global = + receiver->IsJSGlobalProxy() + ? Handle<GlobalObject>::cast( + PrototypeIterator::GetCurrent(iter)) + : Handle<GlobalObject>::cast(receiver); + Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate()); + Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value); + StoreGlobalStub stub( + isolate(), union_type->IsConstant(), receiver->IsJSGlobalProxy()); + Handle<Code> code = stub.GetCodeCopyFromTemplate(global, cell); + // TODO(verwaest): Move caching of these NORMAL stubs outside as well. + HeapObject::UpdateMapCodeCache(receiver, name, code); + return code; + } + DCHECK(holder.is_identical_to(receiver)); + return isolate()->builtins()->StoreIC_Normal(); + case CALLBACKS: { + Handle<Object> callback(lookup->GetCallbackObject(), isolate()); + if (callback->IsExecutableAccessorInfo()) { + Handle<ExecutableAccessorInfo> info = + Handle<ExecutableAccessorInfo>::cast(callback); + if (v8::ToCData<Address>(info->setter()) == 0) break; + if (!holder->HasFastProperties()) break; + if (!ExecutableAccessorInfo::IsCompatibleReceiverType( + isolate(), info, receiver_type())) { + break; + } + NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), + holder); + return compiler.CompileStoreCallback(receiver, name, info); + } else if (callback->IsAccessorPair()) { + Handle<Object> setter( + Handle<AccessorPair>::cast(callback)->setter(), isolate()); + if (!setter->IsJSFunction()) break; + if (holder->IsGlobalObject()) break; + if (!holder->HasFastProperties()) break; + Handle<JSFunction> function = Handle<JSFunction>::cast(setter); + CallOptimization call_optimization(function); + NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), + holder); + if (call_optimization.is_simple_api_call() && + call_optimization.IsCompatibleReceiver(receiver, holder)) { + return compiler.CompileStoreCallback(receiver, name, + call_optimization); + } + return compiler.CompileStoreViaSetter( + receiver, name, Handle<JSFunction>::cast(setter)); + } + // TODO(dcarney): Handle correctly. + DCHECK(callback->IsDeclaredAccessorInfo()); + break; + } + case INTERCEPTOR: { + DCHECK(HasInterceptorSetter(*holder)); + NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder); + return compiler.CompileStoreInterceptor(name); + } + case CONSTANT: + break; + case NONEXISTENT: + case HANDLER: + UNREACHABLE(); + break; + } } return slow_stub(); } @@ -1422,22 +1615,14 @@ } Handle<Map> receiver_map(receiver->map(), isolate()); - if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) { - // Optimistically assume that ICs that haven't reached the MONOMORPHIC state - // yet will do so and stay there. - Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, store_mode); - store_mode = GetNonTransitioningStoreMode(store_mode); - return isolate()->stub_cache()->ComputeKeyedStoreElement( - monomorphic_map, strict_mode(), store_mode); - } - MapHandleList target_receiver_maps; - target()->FindAllMaps(&target_receiver_maps); + TargetMaps(&target_receiver_maps); if (target_receiver_maps.length() == 0) { - // In the case that there is a non-map-specific IC is installed (e.g. keyed - // stores into properties in dictionary mode), then there will be not - // receiver maps in the target. - return generic_stub(); + Handle<Map> monomorphic_map = + ComputeTransitionedMap(receiver_map, store_mode); + store_mode = GetNonTransitioningStoreMode(store_mode); + return PropertyICCompiler::ComputeKeyedStoreMonomorphic( + monomorphic_map, strict_mode(), store_mode); } // There are several special cases where an IC that is MONOMORPHIC can still @@ -1450,7 +1635,8 @@ if (state() == MONOMORPHIC) { Handle<Map> transitioned_receiver_map = receiver_map; if (IsTransitionStoreMode(store_mode)) { - transitioned_receiver_map = ComputeTransitionedMap(receiver, store_mode); + transitioned_receiver_map = + ComputeTransitionedMap(receiver_map, store_mode); } if ((receiver_map.is_identical_to(previous_receiver_map) && IsTransitionStoreMode(store_mode)) || @@ -1460,7 +1646,7 @@ // if they at least come from the same origin for a transitioning store, // stay MONOMORPHIC and use the map for the most generic ElementsKind. store_mode = GetNonTransitioningStoreMode(store_mode); - return isolate()->stub_cache()->ComputeKeyedStoreElement( + return PropertyICCompiler::ComputeKeyedStoreMonomorphic( transitioned_receiver_map, strict_mode(), store_mode); } else if (*previous_receiver_map == receiver->map() && old_store_mode == STANDARD_STORE && @@ -1470,19 +1656,19 @@ // A "normal" IC that handles stores can switch to a version that can // grow at the end of the array, handle OOB accesses or copy COW arrays // and still stay MONOMORPHIC. - return isolate()->stub_cache()->ComputeKeyedStoreElement( + return PropertyICCompiler::ComputeKeyedStoreMonomorphic( receiver_map, strict_mode(), store_mode); } } - ASSERT(state() != GENERIC); + DCHECK(state() != GENERIC); bool map_added = AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map); if (IsTransitionStoreMode(store_mode)) { Handle<Map> transitioned_receiver_map = - ComputeTransitionedMap(receiver, store_mode); + ComputeTransitionedMap(receiver_map, store_mode); map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, transitioned_receiver_map); } @@ -1532,42 +1718,41 @@ } } - return isolate()->stub_cache()->ComputeStoreElementPolymorphic( + return PropertyICCompiler::ComputeKeyedStorePolymorphic( &target_receiver_maps, store_mode, strict_mode()); } Handle<Map> KeyedStoreIC::ComputeTransitionedMap( - Handle<JSObject> receiver, + Handle<Map> map, KeyedAccessStoreMode store_mode) { switch (store_mode) { case STORE_TRANSITION_SMI_TO_OBJECT: case STORE_TRANSITION_DOUBLE_TO_OBJECT: case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT: case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT: - return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS); + return Map::TransitionElementsTo(map, FAST_ELEMENTS); case STORE_TRANSITION_SMI_TO_DOUBLE: case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE: - return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS); + return Map::TransitionElementsTo(map, FAST_DOUBLE_ELEMENTS); case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT: case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT: case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT: case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT: - return JSObject::GetElementsTransitionMap(receiver, - FAST_HOLEY_ELEMENTS); + return Map::TransitionElementsTo(map, FAST_HOLEY_ELEMENTS); case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE: case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE: - return JSObject::GetElementsTransitionMap(receiver, - FAST_HOLEY_DOUBLE_ELEMENTS); + return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS); case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS: - ASSERT(receiver->map()->has_external_array_elements()); + DCHECK(map->has_external_array_elements()); // Fall through case STORE_NO_TRANSITION_HANDLE_COW: case STANDARD_STORE: case STORE_AND_GROW_NO_TRANSITION: - return Handle<Map>(receiver->map(), isolate()); + return map; } - return Handle<Map>::null(); + UNREACHABLE(); + return MaybeHandle<Map>().ToHandleChecked(); } @@ -1584,9 +1769,7 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver, Handle<Object> key, Handle<Object> value) { - ASSERT(!key->ToSmi()->IsFailure()); - Smi* smi_key = NULL; - key->ToSmi()->To(&smi_key); + Handle<Smi> smi_key = Object::ToSmi(isolate(), key).ToHandleChecked(); int index = smi_key->value(); bool oob_access = IsOutOfBoundsAccess(receiver, index); // Don't consider this a growing store if the store would send the receiver to @@ -1659,89 +1842,223 @@ } -MaybeObject* KeyedStoreIC::Store(Handle<Object> object, - Handle<Object> key, - Handle<Object> value) { +MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object, + Handle<Object> key, + Handle<Object> value) { + // TODO(verwaest): Let SetProperty do the migration, since storing a property + // might deprecate the current map again, if value does not fit. if (MigrateDeprecated(object)) { - Handle<Object> result = Runtime::SetObjectProperty(isolate(), object, - key, - value, - NONE, - strict_mode()); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - return *result; + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + result, + Runtime::SetObjectProperty( + isolate(), object, key, value, strict_mode()), + Object); + return result; } // Check for non-string values that can be converted into an // internalized string directly or is representable as a smi. key = TryConvertKey(key, isolate()); - MaybeObject* maybe_object = NULL; + Handle<Object> store_handle; Handle<Code> stub = generic_stub(); if (key->IsInternalizedString()) { - maybe_object = StoreIC::Store(object, - Handle<String>::cast(key), - value, - JSReceiver::MAY_BE_STORE_FROM_KEYED); - if (maybe_object->IsFailure()) return maybe_object; - } else { - bool use_ic = FLAG_use_ic && - !object->IsAccessCheckNeeded() && - !object->IsJSGlobalProxy() && - !(object->IsJSObject() && - JSObject::cast(*object)->map()->is_observed()); - if (use_ic && !object->IsSmi()) { - // Don't use ICs for maps of the objects in Array's prototype chain. We - // expect to be able to trap element sets to objects with those maps in - // the runtime to enable optimization of element hole access. - Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object); - if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false; - } - - if (use_ic) { - ASSERT(!object->IsAccessCheckNeeded()); - - if (object->IsJSObject()) { - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure(); - if (receiver->elements()->map() == - isolate()->heap()->sloppy_arguments_elements_map()) { - if (strict_mode() == SLOPPY) { - stub = sloppy_arguments_stub(); - } - } else if (key_is_smi_like && - !(target().is_identical_to(sloppy_arguments_stub()))) { - // We should go generic if receiver isn't a dictionary, but our - // prototype chain does have dictionary elements. This ensures that - // other non-dictionary receivers in the polymorphic case benefit - // from fast path keyed stores. - if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) { - KeyedAccessStoreMode store_mode = - GetStoreMode(receiver, key, value); - stub = StoreElementStub(receiver, store_mode); - } + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + store_handle, + StoreIC::Store(object, + Handle<String>::cast(key), + value, + JSReceiver::MAY_BE_STORE_FROM_KEYED), + Object); + TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic"); + set_target(*stub); + return store_handle; + } + + bool use_ic = + FLAG_use_ic && !object->IsStringWrapper() && + !object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy() && + !(object->IsJSObject() && JSObject::cast(*object)->map()->is_observed()); + if (use_ic && !object->IsSmi()) { + // Don't use ICs for maps of the objects in Array's prototype chain. We + // expect to be able to trap element sets to objects with those maps in + // the runtime to enable optimization of element hole access. + Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object); + if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false; + } + + if (use_ic) { + DCHECK(!object->IsAccessCheckNeeded()); + + if (object->IsJSObject()) { + Handle<JSObject> receiver = Handle<JSObject>::cast(object); + bool key_is_smi_like = !Object::ToSmi(isolate(), key).is_null(); + if (receiver->elements()->map() == + isolate()->heap()->sloppy_arguments_elements_map()) { + if (strict_mode() == SLOPPY) { + stub = sloppy_arguments_stub(); + } + } else if (key_is_smi_like && + !(target().is_identical_to(sloppy_arguments_stub()))) { + // We should go generic if receiver isn't a dictionary, but our + // prototype chain does have dictionary elements. This ensures that + // other non-dictionary receivers in the polymorphic case benefit + // from fast path keyed stores. + if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) { + KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value); + stub = StoreElementStub(receiver, store_mode); } } } } - if (!is_target_set()) { - if (*stub == *generic_stub()) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic"); + if (store_handle.is_null()) { + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + store_handle, + Runtime::SetObjectProperty( + isolate(), object, key, value, strict_mode()), + Object); + } + + DCHECK(!is_target_set()); + Code* generic = *generic_stub(); + if (*stub == generic) { + TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic"); + } + DCHECK(!stub.is_null()); + set_target(*stub); + TRACE_IC("StoreIC", key); + + return store_handle; +} + + +CallIC::State::State(ExtraICState extra_ic_state) + : argc_(ArgcBits::decode(extra_ic_state)), + call_type_(CallTypeBits::decode(extra_ic_state)) { +} + + +ExtraICState CallIC::State::GetExtraICState() const { + ExtraICState extra_ic_state = + ArgcBits::encode(argc_) | + CallTypeBits::encode(call_type_); + return extra_ic_state; +} + + +bool CallIC::DoCustomHandler(Handle<Object> receiver, + Handle<Object> function, + Handle<FixedArray> vector, + Handle<Smi> slot, + const State& state) { + DCHECK(FLAG_use_ic && function->IsJSFunction()); + + // Are we the array function? + Handle<JSFunction> array_function = Handle<JSFunction>( + isolate()->native_context()->array_function()); + if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) { + // Alter the slot. + IC::State old_state = FeedbackToState(vector, slot); + Object* feedback = vector->get(slot->value()); + if (!feedback->IsAllocationSite()) { + Handle<AllocationSite> new_site = + isolate()->factory()->NewAllocationSite(); + vector->set(slot->value(), *new_site); + } + + CallIC_ArrayStub stub(isolate(), state); + set_target(*stub.GetCode()); + Handle<String> name; + if (array_function->shared()->name()->IsString()) { + name = Handle<String>(String::cast(array_function->shared()->name()), + isolate()); + } + + IC::State new_state = FeedbackToState(vector, slot); + OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true); + TRACE_VECTOR_IC("CallIC (custom handler)", name, old_state, new_state); + return true; + } + return false; +} + + +void CallIC::PatchMegamorphic(Handle<Object> function, + Handle<FixedArray> vector, Handle<Smi> slot) { + State state(target()->extra_ic_state()); + IC::State old_state = FeedbackToState(vector, slot); + + // We are going generic. + vector->set(slot->value(), + *TypeFeedbackInfo::MegamorphicSentinel(isolate()), + SKIP_WRITE_BARRIER); + + CallICStub stub(isolate(), state); + Handle<Code> code = stub.GetCode(); + set_target(*code); + + Handle<Object> name = isolate()->factory()->empty_string(); + if (function->IsJSFunction()) { + Handle<JSFunction> js_function = Handle<JSFunction>::cast(function); + name = handle(js_function->shared()->name(), isolate()); + } + + IC::State new_state = FeedbackToState(vector, slot); + OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true); + TRACE_VECTOR_IC("CallIC", name, old_state, new_state); +} + + +void CallIC::HandleMiss(Handle<Object> receiver, + Handle<Object> function, + Handle<FixedArray> vector, + Handle<Smi> slot) { + State state(target()->extra_ic_state()); + IC::State old_state = FeedbackToState(vector, slot); + Handle<Object> name = isolate()->factory()->empty_string(); + Object* feedback = vector->get(slot->value()); + + // Hand-coded MISS handling is easier if CallIC slots don't contain smis. + DCHECK(!feedback->IsSmi()); + + if (feedback->IsJSFunction() || !function->IsJSFunction()) { + // We are going generic. + vector->set(slot->value(), + *TypeFeedbackInfo::MegamorphicSentinel(isolate()), + SKIP_WRITE_BARRIER); + } else { + // The feedback is either uninitialized or an allocation site. + // It might be an allocation site because if we re-compile the full code + // to add deoptimization support, we call with the default call-ic, and + // merely need to patch the target to match the feedback. + // TODO(mvstanton): the better approach is to dispense with patching + // altogether, which is in progress. + DCHECK(feedback == *TypeFeedbackInfo::UninitializedSentinel(isolate()) || + feedback->IsAllocationSite()); + + // Do we want to install a custom handler? + if (FLAG_use_ic && + DoCustomHandler(receiver, function, vector, slot, state)) { + return; } - ASSERT(!stub.is_null()); - set_target(*stub); - TRACE_IC("StoreIC", key); + + vector->set(slot->value(), *function); } - if (maybe_object) return maybe_object; - Handle<Object> result = Runtime::SetObjectProperty(isolate(), object, key, - value, - NONE, - strict_mode()); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - return *result; + if (function->IsJSFunction()) { + Handle<JSFunction> js_function = Handle<JSFunction>::cast(function); + name = handle(js_function->shared()->name(), isolate()); + } + + IC::State new_state = FeedbackToState(vector, slot); + OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true); + TRACE_VECTOR_IC("CallIC", name, old_state, new_state); } @@ -1753,194 +2070,210 @@ // // Used from ic-<arch>.cc. +RUNTIME_FUNCTION(CallIC_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); + HandleScope scope(isolate); + DCHECK(args.length() == 4); + CallIC ic(isolate); + Handle<Object> receiver = args.at<Object>(0); + Handle<Object> function = args.at<Object>(1); + Handle<FixedArray> vector = args.at<FixedArray>(2); + Handle<Smi> slot = args.at<Smi>(3); + ic.HandleMiss(receiver, function, vector, slot); + return *function; +} + + +RUNTIME_FUNCTION(CallIC_Customization_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); + HandleScope scope(isolate); + DCHECK(args.length() == 4); + // A miss on a custom call ic always results in going megamorphic. + CallIC ic(isolate); + Handle<Object> function = args.at<Object>(1); + Handle<FixedArray> vector = args.at<FixedArray>(2); + Handle<Smi> slot = args.at<Smi>(3); + ic.PatchMegamorphic(function, vector, slot); + return *function; +} + + // Used from ic-<arch>.cc. -RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) { +RUNTIME_FUNCTION(LoadIC_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); LoadIC ic(IC::NO_EXTRA_FRAME, isolate); Handle<Object> receiver = args.at<Object>(0); Handle<String> key = args.at<String>(1); ic.UpdateState(receiver, key); - return ic.Load(receiver, key); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key)); + return *result; } // Used from ic-<arch>.cc -RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) { +RUNTIME_FUNCTION(KeyedLoadIC_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate); Handle<Object> receiver = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); ic.UpdateState(receiver, key); - return ic.Load(receiver, key); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) { +RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate); Handle<Object> receiver = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); ic.UpdateState(receiver, key); - return ic.Load(receiver, key); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key)); + return *result; } // Used from ic-<arch>.cc. -RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) { +RUNTIME_FUNCTION(StoreIC_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); StoreIC ic(IC::NO_EXTRA_FRAME, isolate); Handle<Object> receiver = args.at<Object>(0); Handle<String> key = args.at<String>(1); ic.UpdateState(receiver, key); - return ic.Store(receiver, key, args.at<Object>(2)); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, + result, + ic.Store(receiver, key, args.at<Object>(2))); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure) { +RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); StoreIC ic(IC::EXTRA_CALL_FRAME, isolate); Handle<Object> receiver = args.at<Object>(0); Handle<String> key = args.at<String>(1); ic.UpdateState(receiver, key); - return ic.Store(receiver, key, args.at<Object>(2)); -} - - -RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) { - HandleScope scope(isolate); - - ASSERT(args.length() == 2); - Handle<JSArray> receiver = args.at<JSArray>(0); - Handle<Object> len = args.at<Object>(1); - - // The generated code should filter out non-Smis before we get here. - ASSERT(len->IsSmi()); - -#ifdef DEBUG - // The length property has to be a writable callback property. - LookupResult debug_lookup(isolate); - receiver->LocalLookup(isolate->heap()->length_string(), &debug_lookup); - ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly()); -#endif - - RETURN_IF_EMPTY_HANDLE(isolate, - JSArray::SetElementsLength(receiver, len)); - return *len; + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, + result, + ic.Store(receiver, key, args.at<Object>(2))); + return *result; } // Extend storage is called in a store inline cache when // it is necessary to extend the properties array of a // JSObject. -RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 3); +RUNTIME_FUNCTION(SharedStoreIC_ExtendStorage) { + TimerEventScope<TimerEventIcMiss> timer(isolate); + HandleScope shs(isolate); + DCHECK(args.length() == 3); // Convert the parameters - JSObject* object = JSObject::cast(args[0]); - Map* transition = Map::cast(args[1]); - Object* value = args[2]; + Handle<JSObject> object = args.at<JSObject>(0); + Handle<Map> transition = args.at<Map>(1); + Handle<Object> value = args.at<Object>(2); // Check the object has run out out property space. - ASSERT(object->HasFastProperties()); - ASSERT(object->map()->unused_property_fields() == 0); + DCHECK(object->HasFastProperties()); + DCHECK(object->map()->unused_property_fields() == 0); - // Expand the properties array. - FixedArray* old_storage = object->properties(); - int new_unused = transition->unused_property_fields(); - int new_size = old_storage->length() + new_unused + 1; - Object* result; - MaybeObject* maybe_result = old_storage->CopySize(new_size); - if (!maybe_result->ToObject(&result)) return maybe_result; - - FixedArray* new_storage = FixedArray::cast(result); - - Object* to_store = value; - - DescriptorArray* descriptors = transition->instance_descriptors(); - PropertyDetails details = descriptors->GetDetails(transition->LastAdded()); - if (details.representation().IsDouble()) { - MaybeObject* maybe_storage = - isolate->heap()->AllocateHeapNumber(value->Number()); - if (!maybe_storage->To(&to_store)) return maybe_storage; - } - - new_storage->set(old_storage->length(), to_store); - - // Set the new property value and do the map transition. - object->set_properties(new_storage); - object->set_map(transition); + JSObject::MigrateToNewProperty(object, transition, value); // Return the stored value. - return value; + return *value; } // Used from ic-<arch>.cc. -RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) { +RUNTIME_FUNCTION(KeyedStoreIC_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate); Handle<Object> receiver = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); ic.UpdateState(receiver, key); - return ic.Store(receiver, key, args.at<Object>(2)); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, + result, + ic.Store(receiver, key, args.at<Object>(2))); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) { +RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate); Handle<Object> receiver = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); ic.UpdateState(receiver, key); - return ic.Store(receiver, key, args.at<Object>(2)); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, + result, + ic.Store(receiver, key, args.at<Object>(2))); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) { +RUNTIME_FUNCTION(StoreIC_Slow) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); StoreIC ic(IC::NO_EXTRA_FRAME, isolate); Handle<Object> object = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); Handle<Object> value = args.at<Object>(2); StrictMode strict_mode = ic.strict_mode(); - Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key, - value, - NONE, - strict_mode); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Runtime::SetObjectProperty( + isolate, object, key, value, strict_mode)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) { +RUNTIME_FUNCTION(KeyedStoreIC_Slow) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate); Handle<Object> object = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); Handle<Object> value = args.at<Object>(2); StrictMode strict_mode = ic.strict_mode(); - Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key, - value, - NONE, - strict_mode); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Runtime::SetObjectProperty( + isolate, object, key, value, strict_mode)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) { +RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate); Handle<Object> value = args.at<Object>(0); Handle<Map> map = args.at<Map>(1); @@ -1951,19 +2284,17 @@ JSObject::TransitionElementsKind(Handle<JSObject>::cast(object), map->elements_kind()); } - Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key, - value, - NONE, - strict_mode); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Runtime::SetObjectProperty( + isolate, object, key, value, strict_mode)); return *result; } -BinaryOpIC::State::State(ExtraICState extra_ic_state) { - // We don't deserialize the SSE2 Field, since this is only used to be able - // to include SSE2 as well as non-SSE2 versions in the snapshot. For code - // generation we always want it to reflect the current state. +BinaryOpIC::State::State(Isolate* isolate, ExtraICState extra_ic_state) + : isolate_(isolate) { op_ = static_cast<Token::Value>( FIRST_TOKEN + OpField::decode(extra_ic_state)); mode_ = OverwriteModeField::decode(extra_ic_state); @@ -1977,16 +2308,13 @@ right_kind_ = RightKindField::decode(extra_ic_state); } result_kind_ = ResultKindField::decode(extra_ic_state); - ASSERT_LE(FIRST_TOKEN, op_); - ASSERT_LE(op_, LAST_TOKEN); + DCHECK_LE(FIRST_TOKEN, op_); + DCHECK_LE(op_, LAST_TOKEN); } ExtraICState BinaryOpIC::State::GetExtraICState() const { - bool sse2 = (Max(result_kind_, Max(left_kind_, right_kind_)) > SMI && - CpuFeatures::IsSafeForSnapshot(SSE2)); ExtraICState extra_ic_state = - SSE2Field::encode(sse2) | OpField::encode(op_ - FIRST_TOKEN) | OverwriteModeField::encode(mode_) | LeftKindField::encode(left_kind_) | @@ -2011,7 +2339,7 @@ // Generated list of commonly used stubs #define GENERATE(op, left_kind, right_kind, result_kind, mode) \ do { \ - State state(op, mode); \ + State state(isolate, op, mode); \ state.left_kind_ = left_kind; \ state.fixed_right_arg_.has_value = false; \ state.right_kind_ = right_kind; \ @@ -2206,7 +2534,7 @@ #undef GENERATE #define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \ do { \ - State state(op, mode); \ + State state(isolate, op, mode); \ state.left_kind_ = left_kind; \ state.fixed_right_arg_.has_value = true; \ state.fixed_right_arg_.value = fixed_right_arg_value; \ @@ -2234,23 +2562,25 @@ } else if (result_kind == NUMBER && op_ == Token::SHR) { return Type::Unsigned32(zone); } - ASSERT_NE(GENERIC, result_kind); + DCHECK_NE(GENERIC, result_kind); return KindToType(result_kind, zone); } -void BinaryOpIC::State::Print(StringStream* stream) const { - stream->Add("(%s", Token::Name(op_)); - if (mode_ == OVERWRITE_LEFT) stream->Add("_ReuseLeft"); - else if (mode_ == OVERWRITE_RIGHT) stream->Add("_ReuseRight"); - if (CouldCreateAllocationMementos()) stream->Add("_CreateAllocationMementos"); - stream->Add(":%s*", KindToString(left_kind_)); - if (fixed_right_arg_.has_value) { - stream->Add("%d", fixed_right_arg_.value); +OStream& operator<<(OStream& os, const BinaryOpIC::State& s) { + os << "(" << Token::Name(s.op_); + if (s.mode_ == OVERWRITE_LEFT) + os << "_ReuseLeft"; + else if (s.mode_ == OVERWRITE_RIGHT) + os << "_ReuseRight"; + if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos"; + os << ":" << BinaryOpIC::State::KindToString(s.left_kind_) << "*"; + if (s.fixed_right_arg_.has_value) { + os << s.fixed_right_arg_.value; } else { - stream->Add("%s", KindToString(right_kind_)); + os << BinaryOpIC::State::KindToString(s.right_kind_); } - stream->Add("->%s)", KindToString(result_kind_)); + return os << "->" << BinaryOpIC::State::KindToString(s.result_kind_) << ")"; } @@ -2286,12 +2616,12 @@ // We don't want to distinguish INT32 and NUMBER for string add (because // NumberToString can't make use of this anyway). if (left_kind_ == STRING && right_kind_ == INT32) { - ASSERT_EQ(STRING, result_kind_); - ASSERT_EQ(Token::ADD, op_); + DCHECK_EQ(STRING, result_kind_); + DCHECK_EQ(Token::ADD, op_); right_kind_ = NUMBER; } else if (right_kind_ == STRING && left_kind_ == INT32) { - ASSERT_EQ(STRING, result_kind_); - ASSERT_EQ(Token::ADD, op_); + DCHECK_EQ(STRING, result_kind_); + DCHECK_EQ(Token::ADD, op_); left_kind_ = NUMBER; } @@ -2307,14 +2637,9 @@ // Tagged operations can lead to non-truncating HChanges if (left->IsUndefined() || left->IsBoolean()) { left_kind_ = GENERIC; - } else if (right->IsUndefined() || right->IsBoolean()) { - right_kind_ = GENERIC; } else { - // Since the X87 is too precise, we might bail out on numbers which - // actually would truncate with 64 bit precision. - ASSERT(!CpuFeatures::IsSupported(SSE2)); - ASSERT(result_kind_ < NUMBER); - result_kind_ = NUMBER; + DCHECK(right->IsUndefined() || right->IsBoolean()); + right_kind_ = GENERIC; } } } @@ -2380,25 +2705,28 @@ } -MaybeObject* BinaryOpIC::Transition(Handle<AllocationSite> allocation_site, - Handle<Object> left, - Handle<Object> right) { - State state(target()->extra_ic_state()); +MaybeHandle<Object> BinaryOpIC::Transition( + Handle<AllocationSite> allocation_site, + Handle<Object> left, + Handle<Object> right) { + State state(isolate(), target()->extra_ic_state()); // Compute the actual result using the builtin for the binary operation. Object* builtin = isolate()->js_builtins_object()->javascript_builtin( TokenToJSBuiltin(state.op())); Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate()); - bool caught_exception; - Handle<Object> result = Execution::Call( - isolate(), function, left, 1, &right, &caught_exception); - if (caught_exception) return Failure::Exception(); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate(), + result, + Execution::Call(isolate(), function, left, 1, &right), + Object); // Execution::Call can execute arbitrary JavaScript, hence potentially // update the state of this very IC, so we must update the stored state. UpdateTarget(); // Compute the new state. - State old_state(target()->extra_ic_state()); + State old_state(isolate(), target()->extra_ic_state()); state.Update(left, right, result); // Check if we have a string operation here. @@ -2410,37 +2738,30 @@ } // Install the stub with an allocation site. - BinaryOpICWithAllocationSiteStub stub(state); - target = stub.GetCodeCopyFromTemplate(isolate(), allocation_site); + BinaryOpICWithAllocationSiteStub stub(isolate(), state); + target = stub.GetCodeCopyFromTemplate(allocation_site); // Sanity check the trampoline stub. - ASSERT_EQ(*allocation_site, target->FindFirstAllocationSite()); + DCHECK_EQ(*allocation_site, target->FindFirstAllocationSite()); } else { // Install the generic stub. - BinaryOpICStub stub(state); - target = stub.GetCode(isolate()); + BinaryOpICStub stub(isolate(), state); + target = stub.GetCode(); // Sanity check the generic stub. - ASSERT_EQ(NULL, target->FindFirstAllocationSite()); + DCHECK_EQ(NULL, target->FindFirstAllocationSite()); } set_target(*target); if (FLAG_trace_ic) { - char buffer[150]; - NoAllocationStringAllocator allocator( - buffer, static_cast<unsigned>(sizeof(buffer))); - StringStream stream(&allocator); - stream.Add("[BinaryOpIC"); - old_state.Print(&stream); - stream.Add(" => "); - state.Print(&stream); - stream.Add(" @ %p <- ", static_cast<void*>(*target)); - stream.OutputToStdOut(); + OFStream os(stdout); + os << "[BinaryOpIC" << old_state << " => " << state << " @ " + << static_cast<void*>(*target) << " <- "; JavaScriptFrame::PrintTop(isolate(), stdout, false, true); if (!allocation_site.is_null()) { - PrintF(" using allocation site %p", static_cast<void*>(*allocation_site)); + os << " using allocation site " << static_cast<void*>(*allocation_site); } - PrintF("]\n"); + os << "]" << endl; } // Patch the inlined smi code as necessary. @@ -2450,23 +2771,30 @@ PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK); } - return *result; + return result; } -RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss) { +RUNTIME_FUNCTION(BinaryOpIC_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT_EQ(2, args.length()); + DCHECK_EQ(2, args.length()); Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft); Handle<Object> right = args.at<Object>(BinaryOpICStub::kRight); BinaryOpIC ic(isolate); - return ic.Transition(Handle<AllocationSite>::null(), left, right); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, + result, + ic.Transition(Handle<AllocationSite>::null(), left, right)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_MissWithAllocationSite) { +RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT_EQ(3, args.length()); + DCHECK_EQ(3, args.length()); Handle<AllocationSite> allocation_site = args.at<AllocationSite>( BinaryOpWithAllocationSiteStub::kAllocationSite); Handle<Object> left = args.at<Object>( @@ -2474,21 +2802,26 @@ Handle<Object> right = args.at<Object>( BinaryOpWithAllocationSiteStub::kRight); BinaryOpIC ic(isolate); - return ic.Transition(allocation_site, left, right); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, + result, + ic.Transition(allocation_site, left, right)); + return *result; } Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) { - ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED); + ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED); Code* code = NULL; - CHECK(stub.FindCodeInCache(&code, isolate)); + CHECK(stub.FindCodeInCache(&code)); return code; } Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) { - ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED); - return stub.GetCode(isolate); + ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED); + return stub.GetCode(); } @@ -2530,15 +2863,12 @@ } -void CompareIC::StubInfoToType(int stub_minor_key, - Type** left_type, - Type** right_type, - Type** overall_type, - Handle<Map> map, - Zone* zone) { +void CompareIC::StubInfoToType(uint32_t stub_key, Type** left_type, + Type** right_type, Type** overall_type, + Handle<Map> map, Zone* zone) { State left_state, right_state, handler_state; - ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state, - &handler_state, NULL); + ICCompareStub::DecodeKey(stub_key, &left_state, &right_state, &handler_state, + NULL); *left_type = StateToType(zone, left_state); *right_type = StateToType(zone, right_state); *overall_type = StateToType(zone, handler_state, map); @@ -2625,7 +2955,7 @@ case SMI: return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC; case INTERNALIZED_STRING: - ASSERT(Token::IsEqualityOp(op_)); + DCHECK(Token::IsEqualityOp(op_)); if (x->IsString() && y->IsString()) return STRING; if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME; return GENERIC; @@ -2637,7 +2967,7 @@ if (old_right == SMI && y->IsHeapNumber()) return NUMBER; return GENERIC; case KNOWN_OBJECT: - ASSERT(Token::IsEqualityOp(op_)); + DCHECK(Token::IsEqualityOp(op_)); if (x->IsJSObject() && y->IsJSObject()) return OBJECT; return GENERIC; case STRING: @@ -2654,18 +2984,18 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { HandleScope scope(isolate()); State previous_left, previous_right, previous_state; - ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left, - &previous_right, &previous_state, NULL); + ICCompareStub::DecodeKey(target()->stub_key(), &previous_left, + &previous_right, &previous_state, NULL); State new_left = NewInputState(previous_left, x); State new_right = NewInputState(previous_right, y); State state = TargetState(previous_state, previous_left, previous_right, HasInlinedSmiCode(address()), x, y); - ICCompareStub stub(op_, new_left, new_right, state); + ICCompareStub stub(isolate(), op_, new_left, new_right, state); if (state == KNOWN_OBJECT) { stub.set_known_map( Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate())); } - Handle<Code> new_target = stub.GetCode(isolate()); + Handle<Code> new_target = stub.GetCode(); set_target(*new_target); if (FLAG_trace_ic) { @@ -2679,7 +3009,7 @@ GetStateName(new_right), GetStateName(state), Token::Name(op_), - static_cast<void*>(*stub.GetCode(isolate()))); + static_cast<void*>(*stub.GetCode())); } // Activate inlined smi code. @@ -2692,9 +3022,10 @@ // Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc. -RUNTIME_FUNCTION(Code*, CompareIC_Miss) { +RUNTIME_FUNCTION(CompareIC_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2))); return ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1)); } @@ -2706,29 +3037,32 @@ if (IsCleared(target)) return; ExtraICState state = target->extra_ic_state(); - CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED); + CompareNilICStub stub(target->GetIsolate(), + state, + HydrogenCodeStub::UNINITIALIZED); stub.ClearState(); Code* code = NULL; - CHECK(stub.FindCodeInCache(&code, target->GetIsolate())); + CHECK(stub.FindCodeInCache(&code)); SetTargetAtAddress(address, code, constant_pool); } -MaybeObject* CompareNilIC::DoCompareNilSlow(NilValue nil, - Handle<Object> object) { +Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate, + NilValue nil, + Handle<Object> object) { if (object->IsNull() || object->IsUndefined()) { - return Smi::FromInt(true); + return handle(Smi::FromInt(true), isolate); } - return Smi::FromInt(object->IsUndetectableObject()); + return handle(Smi::FromInt(object->IsUndetectableObject()), isolate); } -MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) { +Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) { ExtraICState extra_ic_state = target()->extra_ic_state(); - CompareNilICStub stub(extra_ic_state); + CompareNilICStub stub(isolate(), extra_ic_state); // Extract the current supported types from the patched IC and calculate what // types must be supported as a result of the miss. @@ -2741,27 +3075,28 @@ // Find or create the specialized stub to support the new set of types. Handle<Code> code; if (stub.IsMonomorphic()) { - Handle<Map> monomorphic_map(already_monomorphic - ? target()->FindFirstMap() + Handle<Map> monomorphic_map(already_monomorphic && FirstTargetMap() != NULL + ? FirstTargetMap() : HeapObject::cast(*object)->map()); - code = isolate()->stub_cache()->ComputeCompareNil(monomorphic_map, stub); + code = PropertyICCompiler::ComputeCompareNil(monomorphic_map, &stub); } else { - code = stub.GetCode(isolate()); + code = stub.GetCode(); } set_target(*code); - return DoCompareNilSlow(nil, object); + return DoCompareNilSlow(isolate(), nil, object); } -RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss) { +RUNTIME_FUNCTION(CompareNilIC_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); Handle<Object> object = args.at<Object>(0); CompareNilIC ic(isolate); - return ic.CompareNil(object); + return *ic.CompareNil(object); } -RUNTIME_FUNCTION(MaybeObject*, Unreachable) { +RUNTIME_FUNCTION(Unreachable) { UNREACHABLE(); CHECK(false); return isolate->heap()->undefined_value(); @@ -2809,21 +3144,22 @@ } -MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object) { - ToBooleanStub stub(target()->extra_ic_state()); +Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) { + ToBooleanStub stub(isolate(), target()->extra_ic_state()); bool to_boolean_value = stub.UpdateStatus(object); - Handle<Code> code = stub.GetCode(isolate()); + Handle<Code> code = stub.GetCode(); set_target(*code); - return Smi::FromInt(to_boolean_value ? 1 : 0); + return handle(Smi::FromInt(to_boolean_value ? 1 : 0), isolate()); } -RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss) { - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(ToBooleanIC_Miss) { + TimerEventScope<TimerEventIcMiss> timer(isolate); + DCHECK(args.length() == 1); HandleScope scope(isolate); Handle<Object> object = args.at<Object>(0); ToBooleanIC ic(isolate); - return ic.ToBoolean(object); + return *ic.ToBoolean(object); } diff -Nru nodejs-0.11.13/deps/v8/src/ic.h nodejs-0.11.15/deps/v8/src/ic.h --- nodejs-0.11.13/deps/v8/src/ic.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ic.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IC_H_ #define V8_IC_H_ -#include "macro-assembler.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { @@ -39,26 +16,26 @@ // IC_UTIL_LIST defines all utility functions called from generated // inline caching code. The argument for the macro, ICU, is the function name. -#define IC_UTIL_LIST(ICU) \ - ICU(LoadIC_Miss) \ - ICU(KeyedLoadIC_Miss) \ - ICU(StoreIC_Miss) \ - ICU(StoreIC_ArrayLength) \ - ICU(StoreIC_Slow) \ - ICU(SharedStoreIC_ExtendStorage) \ - ICU(KeyedStoreIC_Miss) \ - ICU(KeyedStoreIC_Slow) \ - /* Utilities for IC stubs. */ \ - ICU(StoreCallbackProperty) \ - ICU(LoadPropertyWithInterceptorOnly) \ - ICU(LoadPropertyWithInterceptorForLoad) \ - ICU(LoadPropertyWithInterceptorForCall) \ - ICU(KeyedLoadPropertyWithInterceptor) \ - ICU(StoreInterceptorProperty) \ - ICU(CompareIC_Miss) \ - ICU(BinaryOpIC_Miss) \ - ICU(CompareNilIC_Miss) \ - ICU(Unreachable) \ +#define IC_UTIL_LIST(ICU) \ + ICU(LoadIC_Miss) \ + ICU(KeyedLoadIC_Miss) \ + ICU(CallIC_Miss) \ + ICU(CallIC_Customization_Miss) \ + ICU(StoreIC_Miss) \ + ICU(StoreIC_Slow) \ + ICU(SharedStoreIC_ExtendStorage) \ + ICU(KeyedStoreIC_Miss) \ + ICU(KeyedStoreIC_Slow) \ + /* Utilities for IC stubs. */ \ + ICU(StoreCallbackProperty) \ + ICU(LoadPropertyWithInterceptorOnly) \ + ICU(LoadPropertyWithInterceptor) \ + ICU(LoadElementWithInterceptor) \ + ICU(StorePropertyWithInterceptor) \ + ICU(CompareIC_Miss) \ + ICU(BinaryOpIC_Miss) \ + ICU(CompareNilIC_Miss) \ + ICU(Unreachable) \ ICU(ToBooleanIC_Miss) // // IC is the base class for LoadIC, StoreIC, KeyedLoadIC, and KeyedStoreIC. @@ -96,10 +73,21 @@ // Compute the current IC state based on the target stub, receiver and name. void UpdateState(Handle<Object> receiver, Handle<Object> name); - void MarkMonomorphicPrototypeFailure() { - state_ = MONOMORPHIC_PROTOTYPE_FAILURE; + + bool IsNameCompatibleWithPrototypeFailure(Handle<Object> name); + void MarkPrototypeFailure(Handle<Object> name) { + DCHECK(IsNameCompatibleWithPrototypeFailure(name)); + state_ = PROTOTYPE_FAILURE; } + // If the stub contains weak maps then this function adds the stub to + // the dependent code array of each weak map. + static void RegisterWeakMapDependency(Handle<Code> stub); + + // This function is called when a weak map in the stub is dying, + // invalidates the stub by setting maps in it to undefined. + static void InvalidateMaps(Code* stub); + // Clear the inline cache to initial state. static void Clear(Isolate* isolate, Address address, @@ -113,22 +101,21 @@ bool IsStoreStub() const { return target()->is_store_stub() || target()->is_keyed_store_stub(); } + + bool IsCallStub() const { + return target()->is_call_stub(); + } #endif - // Determines which map must be used for keeping the code stub. - // These methods should not be called with undefined or null. - static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object); - // TODO(verwaest): This currently returns a HeapObject rather than JSObject* - // since loading the IC for loading the length from strings are stored on - // the string map directly, rather than on the JSObject-typed prototype. - static inline HeapObject* GetCodeCacheHolder(Isolate* isolate, - Object* object, - InlineCacheHolderFlag holder); - - static inline InlineCacheHolderFlag GetCodeCacheFlag(HeapType* type); - static inline Handle<Map> GetCodeCacheHolder(InlineCacheHolderFlag flag, - HeapType* type, - Isolate* isolate); + template <class TypeClass> + static JSFunction* GetRootConstructor(TypeClass* type, + Context* native_context); + static inline Handle<Map> GetHandlerCacheHolder(HeapType* type, + bool receiver_is_holder, + Isolate* isolate, + CacheHolderFlag* flag); + static inline Handle<Map> GetICCacheHolder(HeapType* type, Isolate* isolate, + CacheHolderFlag* flag); static bool IsCleared(Code* code) { InlineCacheState state = code->ic_state(); @@ -156,33 +143,33 @@ Address pc() const { return *pc_address_; } Isolate* isolate() const { return isolate_; } -#ifdef ENABLE_DEBUGGER_SUPPORT // Get the shared function info of the caller. SharedFunctionInfo* GetSharedFunctionInfo() const; // Get the code object of the caller. Code* GetCode() const; // Get the original (non-breakpointed) code object of the caller. Code* GetOriginalCode() const; -#endif // Set the call-site target. void set_target(Code* code) { +#ifdef VERIFY_HEAP + code->VerifyEmbeddedObjectsDependency(); +#endif SetTargetAtAddress(address(), code, constant_pool()); target_set_ = true; } bool is_target_set() { return target_set_; } -#ifdef DEBUG char TransitionMarkFromState(IC::State state); - void TraceIC(const char* type, Handle<Object> name); -#endif + void TraceIC(const char* type, Handle<Object> name, State old_state, + State new_state); - Failure* TypeError(const char* type, - Handle<Object> object, - Handle<Object> key); - Failure* ReferenceError(const char* type, Handle<String> name); + MaybeHandle<Object> TypeError(const char* type, + Handle<Object> object, + Handle<Object> key); + MaybeHandle<Object> ReferenceError(const char* type, Handle<Name> name); // Access the target code for the given IC address. static inline Code* GetTargetAtAddress(Address address, @@ -190,63 +177,84 @@ static inline void SetTargetAtAddress(Address address, Code* target, ConstantPoolArray* constant_pool); + static void OnTypeFeedbackChanged(Isolate* isolate, Address address, + State old_state, State new_state, + bool target_remains_ic_stub); static void PostPatching(Address address, Code* target, Code* old_target); // Compute the handler either by compiling or by retrieving a cached version. - Handle<Code> ComputeHandler(LookupResult* lookup, - Handle<Object> object, - Handle<String> name, + Handle<Code> ComputeHandler(LookupIterator* lookup, Handle<Object> object, + Handle<Name> name, Handle<Object> value = Handle<Code>::null()); - virtual Handle<Code> CompileHandler(LookupResult* lookup, + virtual Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> object, - Handle<String> name, - Handle<Object> value, - InlineCacheHolderFlag cache_holder) { + Handle<Name> name, Handle<Object> value, + CacheHolderFlag cache_holder) { + UNREACHABLE(); + return Handle<Code>::null(); + } + // Temporary copy of the above, but using a LookupResult. + // TODO(jkummerow): Migrate callers to LookupIterator and delete these. + Handle<Code> ComputeStoreHandler(LookupResult* lookup, Handle<Object> object, + Handle<Name> name, + Handle<Object> value = Handle<Code>::null()); + virtual Handle<Code> CompileStoreHandler(LookupResult* lookup, + Handle<Object> object, + Handle<Name> name, + Handle<Object> value, + CacheHolderFlag cache_holder) { UNREACHABLE(); return Handle<Code>::null(); } - void UpdateMonomorphicIC(Handle<HeapType> type, - Handle<Code> handler, - Handle<String> name); - - bool UpdatePolymorphicIC(Handle<HeapType> type, - Handle<String> name, - Handle<Code> code); - - virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code); + void UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name); + bool UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code); + void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code); - void CopyICToMegamorphicCache(Handle<String> name); + void CopyICToMegamorphicCache(Handle<Name> name); bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map); - void PatchCache(Handle<HeapType> type, - Handle<String> name, - Handle<Code> code); - virtual Code::Kind kind() const { - UNREACHABLE(); - return Code::STUB; - } - virtual Handle<Code> slow_stub() const { - UNREACHABLE(); - return Handle<Code>::null(); + void PatchCache(Handle<Name> name, Handle<Code> code); + Code::Kind kind() const { return kind_; } + Code::Kind handler_kind() const { + if (kind_ == Code::KEYED_LOAD_IC) return Code::LOAD_IC; + DCHECK(kind_ == Code::LOAD_IC || kind_ == Code::STORE_IC || + kind_ == Code::KEYED_STORE_IC); + return kind_; } virtual Handle<Code> megamorphic_stub() { UNREACHABLE(); return Handle<Code>::null(); } - virtual Handle<Code> generic_stub() const { - UNREACHABLE(); - return Handle<Code>::null(); - } bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver, Handle<String> name); - void TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name); ExtraICState extra_ic_state() const { return extra_ic_state_; } void set_extra_ic_state(ExtraICState state) { extra_ic_state_ = state; } + Handle<HeapType> receiver_type() { return receiver_type_; } + + void TargetMaps(MapHandleList* list) { + FindTargetMaps(); + for (int i = 0; i < target_maps_.length(); i++) { + list->Add(target_maps_.at(i)); + } + } + + void TargetTypes(TypeHandleList* list) { + FindTargetMaps(); + for (int i = 0; i < target_maps_.length(); i++) { + list->Add(IC::MapToType<HeapType>(target_maps_.at(i), isolate_)); + } + } + + Map* FirstTargetMap() { + FindTargetMaps(); + return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL; + } + protected: void UpdateTarget() { target_ = handle(raw_target(), isolate_); @@ -259,6 +267,17 @@ inline ConstantPoolArray* constant_pool() const; inline ConstantPoolArray* raw_constant_pool() const; + void FindTargetMaps() { + if (target_maps_set_) return; + target_maps_set_ = true; + if (state_ == MONOMORPHIC) { + Map* map = target_->FindFirstMap(); + if (map != NULL) target_maps_.Add(handle(map)); + } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) { + target_->FindAllMaps(&target_maps_); + } + } + // Frame pointer for the frame that uses (calls) the IC. Address fp_; @@ -276,10 +295,15 @@ // The original code target that missed. Handle<Code> target_; - State state_; bool target_set_; + State state_; + Code::Kind kind_; + Handle<HeapType> receiver_type_; + MaybeHandle<Code> maybe_handler_; ExtraICState extra_ic_state_; + MapHandleList target_maps_; + bool target_maps_set_; DISALLOW_IMPLICIT_CONSTRUCTORS(IC); }; @@ -301,27 +325,123 @@ }; +class CallIC: public IC { + public: + enum CallType { METHOD, FUNCTION }; + + class State V8_FINAL BASE_EMBEDDED { + public: + explicit State(ExtraICState extra_ic_state); + + State(int argc, CallType call_type) + : argc_(argc), call_type_(call_type) { + } + + ExtraICState GetExtraICState() const; + + static void GenerateAheadOfTime( + Isolate*, void (*Generate)(Isolate*, const State&)); + + int arg_count() const { return argc_; } + CallType call_type() const { return call_type_; } + + bool CallAsMethod() const { return call_type_ == METHOD; } + + private: + class ArgcBits: public BitField<int, 0, Code::kArgumentsBits> {}; + class CallTypeBits: public BitField<CallType, Code::kArgumentsBits, 1> {}; + + const int argc_; + const CallType call_type_; + }; + + explicit CallIC(Isolate* isolate) + : IC(EXTRA_CALL_FRAME, isolate) { + } + + void PatchMegamorphic(Handle<Object> function, Handle<FixedArray> vector, + Handle<Smi> slot); + + void HandleMiss(Handle<Object> receiver, + Handle<Object> function, + Handle<FixedArray> vector, + Handle<Smi> slot); + + // Returns true if a custom handler was installed. + bool DoCustomHandler(Handle<Object> receiver, + Handle<Object> function, + Handle<FixedArray> vector, + Handle<Smi> slot, + const State& state); + + // Code generator routines. + static Handle<Code> initialize_stub(Isolate* isolate, + int argc, + CallType call_type); + + static void Clear(Isolate* isolate, Address address, Code* target, + ConstantPoolArray* constant_pool); + + private: + inline IC::State FeedbackToState(Handle<FixedArray> vector, + Handle<Smi> slot) const; +}; + + +OStream& operator<<(OStream& os, const CallIC::State& s); + + class LoadIC: public IC { public: - // ExtraICState bits - class ContextualModeBits: public BitField<ContextualMode, 0, 1> {}; - STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0); + enum ParameterIndices { + kReceiverIndex, + kNameIndex, + kParameterCount + }; + static const Register ReceiverRegister(); + static const Register NameRegister(); + + // With flag vector-ics, there is an additional argument. And for calls from + // crankshaft, yet another. + static const Register SlotRegister(); + static const Register VectorRegister(); + + class State V8_FINAL BASE_EMBEDDED { + public: + explicit State(ExtraICState extra_ic_state) + : state_(extra_ic_state) {} + + explicit State(ContextualMode mode) + : state_(ContextualModeBits::encode(mode)) {} + + ExtraICState GetExtraICState() const { return state_; } + + ContextualMode contextual_mode() const { + return ContextualModeBits::decode(state_); + } + + private: + class ContextualModeBits: public BitField<ContextualMode, 0, 1> {}; + STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0); + + const ExtraICState state_; + }; static ExtraICState ComputeExtraICState(ContextualMode contextual_mode) { - return ContextualModeBits::encode(contextual_mode); + return State(contextual_mode).GetExtraICState(); } static ContextualMode GetContextualMode(ExtraICState state) { - return ContextualModeBits::decode(state); + return State(state).contextual_mode(); } ContextualMode contextual_mode() const { - return ContextualModeBits::decode(extra_ic_state()); + return GetContextualMode(extra_ic_state()); } explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) { - ASSERT(IsLoadStub()); + DCHECK(IsLoadStub()); } // Returns if this IC is for contextual (no explicit receiver) @@ -330,7 +450,7 @@ if (receiver->IsGlobalObject()) { return contextual_mode() == CONTEXTUAL; } else { - ASSERT(contextual_mode() != CONTEXTUAL); + DCHECK(contextual_mode() != CONTEXTUAL); return false; } } @@ -348,51 +468,46 @@ static Handle<Code> initialize_stub(Isolate* isolate, ExtraICState extra_state); - MUST_USE_RESULT MaybeObject* Load(Handle<Object> object, - Handle<String> name); + MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object, + Handle<Name> name); protected: - virtual Code::Kind kind() const { return Code::LOAD_IC; } - void set_target(Code* code) { // The contextual mode must be preserved across IC patching. - ASSERT(GetContextualMode(code->extra_ic_state()) == + DCHECK(GetContextualMode(code->extra_ic_state()) == GetContextualMode(target()->extra_ic_state())); IC::set_target(code); } - virtual Handle<Code> slow_stub() const { - return isolate()->builtins()->LoadIC_Slow(); + Handle<Code> slow_stub() const { + if (kind() == Code::LOAD_IC) { + return isolate()->builtins()->LoadIC_Slow(); + } else { + DCHECK_EQ(Code::KEYED_LOAD_IC, kind()); + return isolate()->builtins()->KeyedLoadIC_Slow(); + } } virtual Handle<Code> megamorphic_stub(); // Update the inline cache and the global stub cache based on the // lookup result. - void UpdateCaches(LookupResult* lookup, - Handle<Object> object, - Handle<String> name); + void UpdateCaches(LookupIterator* lookup, Handle<Object> object, + Handle<Name> name); - virtual Handle<Code> CompileHandler(LookupResult* lookup, + virtual Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> object, - Handle<String> name, + Handle<Name> name, Handle<Object> unused, - InlineCacheHolderFlag cache_holder); + CacheHolderFlag cache_holder); private: - // Stub accessors. + virtual Handle<Code> pre_monomorphic_stub() const; static Handle<Code> pre_monomorphic_stub(Isolate* isolate, - ExtraICState exstra_state); - - virtual Handle<Code> pre_monomorphic_stub() { - return pre_monomorphic_stub(isolate(), extra_ic_state()); - } + ExtraICState extra_state); - Handle<Code> SimpleFieldLoad(int offset, - bool inobject = true, - Representation representation = - Representation::Tagged()); + Handle<Code> SimpleFieldLoad(FieldIndex index); static void Clear(Isolate* isolate, Address address, @@ -407,11 +522,11 @@ public: explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate) : LoadIC(depth, isolate) { - ASSERT(target()->is_keyed_load_stub()); + DCHECK(target()->is_keyed_load_stub()); } - MUST_USE_RESULT MaybeObject* Load(Handle<Object> object, - Handle<Object> key); + MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object, + Handle<Object> key); // Code generator routines. static void GenerateMiss(MacroAssembler* masm); @@ -432,31 +547,17 @@ static const int kSlowCaseBitFieldMask = (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor); - protected: - virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; } + static Handle<Code> generic_stub(Isolate* isolate); + static Handle<Code> pre_monomorphic_stub(Isolate* isolate); + protected: Handle<Code> LoadElementStub(Handle<JSObject> receiver); - - virtual Handle<Code> megamorphic_stub() { - return isolate()->builtins()->KeyedLoadIC_Generic(); - } - virtual Handle<Code> generic_stub() const { - return isolate()->builtins()->KeyedLoadIC_Generic(); - } - virtual Handle<Code> slow_stub() const { - return isolate()->builtins()->KeyedLoadIC_Slow(); + virtual Handle<Code> pre_monomorphic_stub() const { + return pre_monomorphic_stub(isolate()); } - virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {} - private: - // Stub accessors. - static Handle<Code> pre_monomorphic_stub(Isolate* isolate) { - return isolate->builtins()->KeyedLoadIC_PreMonomorphic(); - } - virtual Handle<Code> pre_monomorphic_stub() { - return pre_monomorphic_stub(isolate()); - } + Handle<Code> generic_stub() const { return generic_stub(isolate()); } Handle<Code> indexed_interceptor_stub() { return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor(); } @@ -491,9 +592,19 @@ static const ExtraICState kStrictModeState = 1 << StrictModeState::kShift; + enum ParameterIndices { + kReceiverIndex, + kNameIndex, + kValueIndex, + kParameterCount + }; + static const Register ReceiverRegister(); + static const Register NameRegister(); + static const Register ValueRegister(); + StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) { - ASSERT(IsStoreStub()); + DCHECK(IsStoreStub()); } StrictMode strict_mode() const { @@ -515,15 +626,14 @@ static Handle<Code> initialize_stub(Isolate* isolate, StrictMode strict_mode); - MUST_USE_RESULT MaybeObject* Store( + MUST_USE_RESULT MaybeHandle<Object> Store( Handle<Object> object, - Handle<String> name, + Handle<Name> name, Handle<Object> value, JSReceiver::StoreFromKeyed store_mode = JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED); protected: - virtual Code::Kind kind() const { return Code::STORE_IC; } virtual Handle<Code> megamorphic_stub(); // Stub accessors. @@ -533,7 +643,7 @@ return isolate()->builtins()->StoreIC_Slow(); } - virtual Handle<Code> pre_monomorphic_stub() { + virtual Handle<Code> pre_monomorphic_stub() const { return pre_monomorphic_stub(isolate(), strict_mode()); } @@ -544,18 +654,18 @@ // lookup result. void UpdateCaches(LookupResult* lookup, Handle<JSObject> receiver, - Handle<String> name, + Handle<Name> name, Handle<Object> value); - virtual Handle<Code> CompileHandler(LookupResult* lookup, - Handle<Object> object, - Handle<String> name, - Handle<Object> value, - InlineCacheHolderFlag cache_holder); + virtual Handle<Code> CompileStoreHandler(LookupResult* lookup, + Handle<Object> object, + Handle<Name> name, + Handle<Object> value, + CacheHolderFlag cache_holder); private: void set_target(Code* code) { // Strict mode must be preserved across IC patching. - ASSERT(GetStrictMode(code->extra_ic_state()) == + DCHECK(GetStrictMode(code->extra_ic_state()) == GetStrictMode(target()->extra_ic_state())); IC::set_target(code); } @@ -599,14 +709,19 @@ return ExtraICStateKeyedAccessStoreMode::decode(extra_state); } + // The map register isn't part of the normal call specification, but + // ElementsTransitionAndStoreStub, used in polymorphic keyed store + // stub implementations requires it to be initialized. + static const Register MapRegister(); + KeyedStoreIC(FrameDepth depth, Isolate* isolate) : StoreIC(depth, isolate) { - ASSERT(target()->is_keyed_store_stub()); + DCHECK(target()->is_keyed_store_stub()); } - MUST_USE_RESULT MaybeObject* Store(Handle<Object> object, - Handle<Object> name, - Handle<Object> value); + MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object, + Handle<Object> name, + Handle<Object> value); // Code generators for stub routines. Only called once at startup. static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); } @@ -621,11 +736,7 @@ static void GenerateSloppyArguments(MacroAssembler* masm); protected: - virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; } - - virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {} - - virtual Handle<Code> pre_monomorphic_stub() { + virtual Handle<Code> pre_monomorphic_stub() const { return pre_monomorphic_stub(isolate(), strict_mode()); } static Handle<Code> pre_monomorphic_stub(Isolate* isolate, @@ -653,7 +764,7 @@ private: void set_target(Code* code) { // Strict mode must be preserved across IC patching. - ASSERT(GetStrictMode(code->extra_ic_state()) == strict_mode()); + DCHECK(GetStrictMode(code->extra_ic_state()) == strict_mode()); IC::set_target(code); } @@ -679,7 +790,7 @@ Handle<Object> key, Handle<Object> value); - Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver, + Handle<Map> ComputeTransitionedMap(Handle<Map> map, KeyedAccessStoreMode store_mode); friend class IC; @@ -694,13 +805,13 @@ public: class State V8_FINAL BASE_EMBEDDED { public: - explicit State(ExtraICState extra_ic_state); + State(Isolate* isolate, ExtraICState extra_ic_state); - State(Token::Value op, OverwriteMode mode) + State(Isolate* isolate, Token::Value op, OverwriteMode mode) : op_(op), mode_(mode), left_kind_(NONE), right_kind_(NONE), - result_kind_(NONE) { - ASSERT_LE(FIRST_TOKEN, op); - ASSERT_LE(op, LAST_TOKEN); + result_kind_(NONE), isolate_(isolate) { + DCHECK_LE(FIRST_TOKEN, op); + DCHECK_LE(op, LAST_TOKEN); } InlineCacheState GetICState() const { @@ -732,7 +843,7 @@ // Returns true if the IC _could_ create allocation mementos. bool CouldCreateAllocationMementos() const { if (left_kind_ == STRING || right_kind_ == STRING) { - ASSERT_EQ(Token::ADD, op_); + DCHECK_EQ(Token::ADD, op_); return true; } return false; @@ -769,13 +880,15 @@ } Type* GetResultType(Zone* zone) const; - void Print(StringStream* stream) const; - void Update(Handle<Object> left, Handle<Object> right, Handle<Object> result); + Isolate* isolate() const { return isolate_; } + private: + friend OStream& operator<<(OStream& os, const BinaryOpIC::State& s); + enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC }; Kind UpdateKind(Handle<Object> object, Kind kind) const; @@ -790,14 +903,13 @@ STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4)); class OpField: public BitField<int, 0, 4> {}; class OverwriteModeField: public BitField<OverwriteMode, 4, 2> {}; - class SSE2Field: public BitField<bool, 6, 1> {}; - class ResultKindField: public BitField<Kind, 7, 3> {}; - class LeftKindField: public BitField<Kind, 10, 3> {}; + class ResultKindField: public BitField<Kind, 6, 3> {}; + class LeftKindField: public BitField<Kind, 9, 3> {}; // When fixed right arg is set, we don't need to store the right kind. // Thus the two fields can overlap. - class HasFixedRightArgField: public BitField<bool, 13, 1> {}; - class FixedRightArgValueField: public BitField<int, 14, 4> {}; - class RightKindField: public BitField<Kind, 14, 3> {}; + class HasFixedRightArgField: public BitField<bool, 12, 1> {}; + class FixedRightArgValueField: public BitField<int, 13, 4> {}; + class RightKindField: public BitField<Kind, 13, 3> {}; Token::Value op_; OverwriteMode mode_; @@ -805,18 +917,22 @@ Kind right_kind_; Kind result_kind_; Maybe<int> fixed_right_arg_; + Isolate* isolate_; }; explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { } static Builtins::JavaScript TokenToJSBuiltin(Token::Value op); - MaybeObject* Transition(Handle<AllocationSite> allocation_site, - Handle<Object> left, - Handle<Object> right) V8_WARN_UNUSED_RESULT; + MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site, + Handle<Object> left, + Handle<Object> right) V8_WARN_UNUSED_RESULT; }; +OStream& operator<<(OStream& os, const BinaryOpIC::State& s); + + class CompareIC: public IC { public: // The type/state lattice is defined by the following inequations: @@ -843,12 +959,9 @@ State state, Handle<Map> map = Handle<Map>()); - static void StubInfoToType(int stub_minor_key, - Type** left_type, - Type** right_type, - Type** overall_type, - Handle<Map> map, - Zone* zone); + static void StubInfoToType(uint32_t stub_key, Type** left_type, + Type** right_type, Type** overall_type, + Handle<Map> map, Zone* zone); CompareIC(Isolate* isolate, Token::Value op) : IC(EXTRA_CALL_FRAME, isolate), op_(op) { } @@ -895,7 +1008,7 @@ public: explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {} - MUST_USE_RESULT MaybeObject* CompareNil(Handle<Object> object); + Handle<Object> CompareNil(Handle<Object> object); static Handle<Code> GetUninitialized(); @@ -903,8 +1016,8 @@ Code* target, ConstantPoolArray* constant_pool); - static MUST_USE_RESULT MaybeObject* DoCompareNilSlow(NilValue nil, - Handle<Object> object); + static Handle<Object> DoCompareNilSlow(Isolate* isolate, NilValue nil, + Handle<Object> object); }; @@ -912,7 +1025,7 @@ public: explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { } - MaybeObject* ToBoolean(Handle<Object> object); + Handle<Object> ToBoolean(Handle<Object> object); }; @@ -920,15 +1033,15 @@ enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK }; void PatchInlinedSmiCode(Address address, InlinedSmiCheck check); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_MissWithAllocationSite); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss); +DECLARE_RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure); +DECLARE_RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure); +DECLARE_RUNTIME_FUNCTION(UnaryOpIC_Miss); +DECLARE_RUNTIME_FUNCTION(StoreIC_MissFromStubFailure); +DECLARE_RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss); +DECLARE_RUNTIME_FUNCTION(BinaryOpIC_Miss); +DECLARE_RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite); +DECLARE_RUNTIME_FUNCTION(CompareNilIC_Miss); +DECLARE_RUNTIME_FUNCTION(ToBooleanIC_Miss); } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/ic-inl.h nodejs-0.11.15/deps/v8/src/ic-inl.h --- nodejs-0.11.13/deps/v8/src/ic-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ic-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,16 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_IC_INL_H_ #define V8_IC_INL_H_ -#include "ic.h" +#include "src/ic.h" -#include "compiler.h" -#include "debug.h" -#include "macro-assembler.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/macro-assembler.h" +#include "src/prototype.h" namespace v8 { namespace internal { @@ -42,7 +20,6 @@ // Get the address of the call. Address result = Assembler::target_address_from_return_address(pc()); -#ifdef ENABLE_DEBUGGER_SUPPORT Debug* debug = isolate()->debug(); // First check if any break points are active if not just return the address // of the call. @@ -68,9 +45,6 @@ // No break point here just return the address of the call. return result; } -#else - return result; -#endif } @@ -79,7 +53,6 @@ return NULL; } else { Handle<ConstantPoolArray> result = raw_constant_pool_; -#ifdef ENABLE_DEBUGGER_SUPPORT Debug* debug = isolate()->debug(); // First check if any break points are active if not just return the // original constant pool. @@ -94,7 +67,6 @@ // constant pool for the original code instead of the breakpointed code. return GetOriginalCode()->constant_pool(); } -#endif return *result; } } @@ -116,7 +88,7 @@ // Convert target address to the code object. Code::GetCodeFromTargetAddress // is safe for use during GC where the map might be marked. Code* result = Code::GetCodeFromTargetAddress(target); - ASSERT(result->is_inline_cache_stub()); + DCHECK(result->is_inline_cache_stub()); return result; } @@ -124,7 +96,7 @@ void IC::SetTargetAtAddress(Address address, Code* target, ConstantPoolArray* constant_pool) { - ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub()); + DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub()); Heap* heap = target->GetHeap(); Code* old_target = GetTargetAtAddress(address, constant_pool); #ifdef DEBUG @@ -132,7 +104,7 @@ // ICs as strict mode. The strict-ness of the IC must be preserved. if (old_target->kind() == Code::STORE_IC || old_target->kind() == Code::KEYED_STORE_IC) { - ASSERT(StoreIC::GetStrictMode(old_target->extra_ic_state()) == + DCHECK(StoreIC::GetStrictMode(old_target->extra_ic_state()) == StoreIC::GetStrictMode(target->extra_ic_state())); } #endif @@ -147,59 +119,71 @@ } -InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object) { - if (object->IsJSObject()) return OWN_MAP; - - // If the object is a value, we use the prototype map for the cache. - ASSERT(object->IsString() || object->IsSymbol() || - object->IsNumber() || object->IsBoolean()); - return PROTOTYPE_MAP; -} - - -HeapObject* IC::GetCodeCacheHolder(Isolate* isolate, - Object* object, - InlineCacheHolderFlag holder) { - if (object->IsSmi()) holder = PROTOTYPE_MAP; - Object* map_owner = holder == OWN_MAP - ? object : object->GetPrototype(isolate); - return HeapObject::cast(map_owner); +template <class TypeClass> +JSFunction* IC::GetRootConstructor(TypeClass* type, Context* native_context) { + if (type->Is(TypeClass::Boolean())) { + return native_context->boolean_function(); + } else if (type->Is(TypeClass::Number())) { + return native_context->number_function(); + } else if (type->Is(TypeClass::String())) { + return native_context->string_function(); + } else if (type->Is(TypeClass::Symbol())) { + return native_context->symbol_function(); + } else { + return NULL; + } } -InlineCacheHolderFlag IC::GetCodeCacheFlag(HeapType* type) { - if (type->Is(HeapType::Boolean()) || - type->Is(HeapType::Number()) || - type->Is(HeapType::String()) || - type->Is(HeapType::Symbol())) { - return PROTOTYPE_MAP; - } - return OWN_MAP; +Handle<Map> IC::GetHandlerCacheHolder(HeapType* type, bool receiver_is_holder, + Isolate* isolate, CacheHolderFlag* flag) { + Handle<Map> receiver_map = TypeToMap(type, isolate); + if (receiver_is_holder) { + *flag = kCacheOnReceiver; + return receiver_map; + } + Context* native_context = *isolate->native_context(); + JSFunction* builtin_ctor = GetRootConstructor(type, native_context); + if (builtin_ctor != NULL) { + *flag = kCacheOnPrototypeReceiverIsPrimitive; + return handle(HeapObject::cast(builtin_ctor->instance_prototype())->map()); + } + *flag = receiver_map->is_dictionary_map() + ? kCacheOnPrototypeReceiverIsDictionary + : kCacheOnPrototype; + // Callers must ensure that the prototype is non-null. + return handle(JSObject::cast(receiver_map->prototype())->map()); } -Handle<Map> IC::GetCodeCacheHolder(InlineCacheHolderFlag flag, - HeapType* type, - Isolate* isolate) { - if (flag == PROTOTYPE_MAP) { - Context* context = isolate->context()->native_context(); - JSFunction* constructor; - if (type->Is(HeapType::Boolean())) { - constructor = context->boolean_function(); - } else if (type->Is(HeapType::Number())) { - constructor = context->number_function(); - } else if (type->Is(HeapType::String())) { - constructor = context->string_function(); - } else { - ASSERT(type->Is(HeapType::Symbol())); - constructor = context->symbol_function(); - } - return handle(JSObject::cast(constructor->instance_prototype())->map()); +Handle<Map> IC::GetICCacheHolder(HeapType* type, Isolate* isolate, + CacheHolderFlag* flag) { + Context* native_context = *isolate->native_context(); + JSFunction* builtin_ctor = GetRootConstructor(type, native_context); + if (builtin_ctor != NULL) { + *flag = kCacheOnPrototype; + return handle(builtin_ctor->initial_map()); } + *flag = kCacheOnReceiver; return TypeToMap(type, isolate); } +IC::State CallIC::FeedbackToState(Handle<FixedArray> vector, + Handle<Smi> slot) const { + IC::State state = UNINITIALIZED; + Object* feedback = vector->get(slot->value()); + + if (feedback == *TypeFeedbackInfo::MegamorphicSentinel(isolate())) { + state = GENERIC; + } else if (feedback->IsAllocationSite() || feedback->IsJSFunction()) { + state = MONOMORPHIC; + } else { + CHECK(feedback == *TypeFeedbackInfo::UninitializedSentinel(isolate())); + } + + return state; +} } } // namespace v8::internal #endif // V8_IC_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/icu_util.cc nodejs-0.11.15/deps/v8/src/icu_util.cc --- nodejs-0.11.13/deps/v8/src/icu_util.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/icu_util.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "icu_util.h" +#include "src/icu_util.h" #if defined(_WIN32) #include <windows.h> diff -Nru nodejs-0.11.13/deps/v8/src/icu_util.h nodejs-0.11.15/deps/v8/src/icu_util.h --- nodejs-0.11.13/deps/v8/src/icu_util.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/icu_util.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ICU_UTIL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/incremental-marking.cc nodejs-0.11.15/deps/v8/src/incremental-marking.cc --- nodejs-0.11.13/deps/v8/src/incremental-marking.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/incremental-marking.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,1020 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "incremental-marking.h" - -#include "code-stubs.h" -#include "compilation-cache.h" -#include "objects-visiting.h" -#include "objects-visiting-inl.h" -#include "v8conversions.h" - -namespace v8 { -namespace internal { - - -IncrementalMarking::IncrementalMarking(Heap* heap) - : heap_(heap), - state_(STOPPED), - marking_deque_memory_(NULL), - marking_deque_memory_committed_(false), - steps_count_(0), - steps_took_(0), - longest_step_(0.0), - old_generation_space_available_at_start_of_incremental_(0), - old_generation_space_used_at_start_of_incremental_(0), - steps_count_since_last_gc_(0), - steps_took_since_last_gc_(0), - should_hurry_(false), - marking_speed_(0), - allocated_(0), - no_marking_scope_depth_(0), - unscanned_bytes_of_large_object_(0) { -} - - -void IncrementalMarking::TearDown() { - delete marking_deque_memory_; -} - - -void IncrementalMarking::RecordWriteSlow(HeapObject* obj, - Object** slot, - Object* value) { - if (BaseRecordWrite(obj, slot, value) && slot != NULL) { - MarkBit obj_bit = Marking::MarkBitFrom(obj); - if (Marking::IsBlack(obj_bit)) { - // Object is not going to be rescanned we need to record the slot. - heap_->mark_compact_collector()->RecordSlot( - HeapObject::RawField(obj, 0), slot, value); - } - } -} - - -void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, - Object** slot, - Isolate* isolate) { - ASSERT(obj->IsHeapObject()); - IncrementalMarking* marking = isolate->heap()->incremental_marking(); - - MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); - int counter = chunk->write_barrier_counter(); - if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { - marking->write_barriers_invoked_since_last_step_ += - MemoryChunk::kWriteBarrierCounterGranularity - - chunk->write_barrier_counter(); - chunk->set_write_barrier_counter( - MemoryChunk::kWriteBarrierCounterGranularity); - } - - marking->RecordWrite(obj, slot, *slot); -} - - -void IncrementalMarking::RecordCodeTargetPatch(Code* host, - Address pc, - HeapObject* value) { - if (IsMarking()) { - RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); - RecordWriteIntoCode(host, &rinfo, value); - } -} - - -void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { - if (IsMarking()) { - Code* host = heap_->isolate()->inner_pointer_to_code_cache()-> - GcSafeFindCodeForInnerPointer(pc); - RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); - RecordWriteIntoCode(host, &rinfo, value); - } -} - - -void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host, - Object** slot, - Code* value) { - if (BaseRecordWrite(host, slot, value)) { - ASSERT(slot != NULL); - heap_->mark_compact_collector()-> - RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value); - } -} - - -void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj, - RelocInfo* rinfo, - Object* value) { - MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value)); - if (Marking::IsWhite(value_bit)) { - MarkBit obj_bit = Marking::MarkBitFrom(obj); - if (Marking::IsBlack(obj_bit)) { - BlackToGreyAndUnshift(obj, obj_bit); - RestartIfNotMarking(); - } - // Object is either grey or white. It will be scanned if survives. - return; - } - - if (is_compacting_) { - MarkBit obj_bit = Marking::MarkBitFrom(obj); - if (Marking::IsBlack(obj_bit)) { - // Object is not going to be rescanned. We need to record the slot. - heap_->mark_compact_collector()->RecordRelocSlot(rinfo, - Code::cast(value)); - } - } -} - - -static void MarkObjectGreyDoNotEnqueue(Object* obj) { - if (obj->IsHeapObject()) { - HeapObject* heap_obj = HeapObject::cast(obj); - MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); - if (Marking::IsBlack(mark_bit)) { - MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), - -heap_obj->Size()); - } - Marking::AnyToGrey(mark_bit); - } -} - - -static inline void MarkBlackOrKeepGrey(HeapObject* heap_object, - MarkBit mark_bit, - int size) { - ASSERT(!Marking::IsImpossible(mark_bit)); - if (mark_bit.Get()) return; - mark_bit.Set(); - MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); - ASSERT(Marking::IsBlack(mark_bit)); -} - - -static inline void MarkBlackOrKeepBlack(HeapObject* heap_object, - MarkBit mark_bit, - int size) { - ASSERT(!Marking::IsImpossible(mark_bit)); - if (Marking::IsBlack(mark_bit)) return; - Marking::MarkBlack(mark_bit); - MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); - ASSERT(Marking::IsBlack(mark_bit)); -} - - -class IncrementalMarkingMarkingVisitor - : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> { - public: - static void Initialize() { - StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize(); - table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental); - table_.Register(kVisitNativeContext, &VisitNativeContextIncremental); - table_.Register(kVisitJSRegExp, &VisitJSRegExp); - } - - static const int kProgressBarScanningChunk = 32 * 1024; - - static void VisitFixedArrayIncremental(Map* map, HeapObject* object) { - MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); - // TODO(mstarzinger): Move setting of the flag to the allocation site of - // the array. The visitor should just check the flag. - if (FLAG_use_marking_progress_bar && - chunk->owner()->identity() == LO_SPACE) { - chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR); - } - if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { - Heap* heap = map->GetHeap(); - // When using a progress bar for large fixed arrays, scan only a chunk of - // the array and try to push it onto the marking deque again until it is - // fully scanned. Fall back to scanning it through to the end in case this - // fails because of a full deque. - int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); - int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset, - chunk->progress_bar()); - int end_offset = Min(object_size, - start_offset + kProgressBarScanningChunk); - int already_scanned_offset = start_offset; - bool scan_until_end = false; - do { - VisitPointersWithAnchor(heap, - HeapObject::RawField(object, 0), - HeapObject::RawField(object, start_offset), - HeapObject::RawField(object, end_offset)); - start_offset = end_offset; - end_offset = Min(object_size, end_offset + kProgressBarScanningChunk); - scan_until_end = heap->incremental_marking()->marking_deque()->IsFull(); - } while (scan_until_end && start_offset < object_size); - chunk->set_progress_bar(start_offset); - if (start_offset < object_size) { - heap->incremental_marking()->marking_deque()->UnshiftGrey(object); - heap->incremental_marking()->NotifyIncompleteScanOfObject( - object_size - (start_offset - already_scanned_offset)); - } - } else { - FixedArrayVisitor::Visit(map, object); - } - } - - static void VisitNativeContextIncremental(Map* map, HeapObject* object) { - Context* context = Context::cast(object); - - // We will mark cache black with a separate pass - // when we finish marking. - MarkObjectGreyDoNotEnqueue(context->normalized_map_cache()); - VisitNativeContext(map, context); - } - - static void VisitWeakCollection(Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - VisitPointers(heap, - HeapObject::RawField(object, - JSWeakCollection::kPropertiesOffset), - HeapObject::RawField(object, JSWeakCollection::kSize)); - } - - static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {} - - INLINE(static void VisitPointer(Heap* heap, Object** p)) { - Object* obj = *p; - if (obj->NonFailureIsHeapObject()) { - heap->mark_compact_collector()->RecordSlot(p, p, obj); - MarkObject(heap, obj); - } - } - - INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { - for (Object** p = start; p < end; p++) { - Object* obj = *p; - if (obj->NonFailureIsHeapObject()) { - heap->mark_compact_collector()->RecordSlot(start, p, obj); - MarkObject(heap, obj); - } - } - } - - INLINE(static void VisitPointersWithAnchor(Heap* heap, - Object** anchor, - Object** start, - Object** end)) { - for (Object** p = start; p < end; p++) { - Object* obj = *p; - if (obj->NonFailureIsHeapObject()) { - heap->mark_compact_collector()->RecordSlot(anchor, p, obj); - MarkObject(heap, obj); - } - } - } - - // Marks the object grey and pushes it on the marking stack. - INLINE(static void MarkObject(Heap* heap, Object* obj)) { - HeapObject* heap_object = HeapObject::cast(obj); - MarkBit mark_bit = Marking::MarkBitFrom(heap_object); - if (mark_bit.data_only()) { - MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size()); - } else if (Marking::IsWhite(mark_bit)) { - heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit); - } - } - - // Marks the object black without pushing it on the marking stack. - // Returns true if object needed marking and false otherwise. - INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) { - HeapObject* heap_object = HeapObject::cast(obj); - MarkBit mark_bit = Marking::MarkBitFrom(heap_object); - if (Marking::IsWhite(mark_bit)) { - mark_bit.Set(); - MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), - heap_object->Size()); - return true; - } - return false; - } -}; - - -class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { - public: - explicit IncrementalMarkingRootMarkingVisitor( - IncrementalMarking* incremental_marking) - : incremental_marking_(incremental_marking) { - } - - void VisitPointer(Object** p) { - MarkObjectByPointer(p); - } - - void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) MarkObjectByPointer(p); - } - - private: - void MarkObjectByPointer(Object** p) { - Object* obj = *p; - if (!obj->IsHeapObject()) return; - - HeapObject* heap_object = HeapObject::cast(obj); - MarkBit mark_bit = Marking::MarkBitFrom(heap_object); - if (mark_bit.data_only()) { - MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size()); - } else { - if (Marking::IsWhite(mark_bit)) { - incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); - } - } - } - - IncrementalMarking* incremental_marking_; -}; - - -void IncrementalMarking::Initialize() { - IncrementalMarkingMarkingVisitor::Initialize(); -} - - -void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, - bool is_marking, - bool is_compacting) { - if (is_marking) { - chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); - chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); - - // It's difficult to filter out slots recorded for large objects. - if (chunk->owner()->identity() == LO_SPACE && - chunk->size() > static_cast<size_t>(Page::kPageSize) && - is_compacting) { - chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION); - } - } else if (chunk->owner()->identity() == CELL_SPACE || - chunk->owner()->identity() == PROPERTY_CELL_SPACE || - chunk->scan_on_scavenge()) { - chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); - chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); - } else { - chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); - chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); - } -} - - -void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk, - bool is_marking) { - chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); - if (is_marking) { - chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); - } else { - chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); - } - chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE); -} - - -void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( - PagedSpace* space) { - PageIterator it(space); - while (it.has_next()) { - Page* p = it.next(); - SetOldSpacePageFlags(p, false, false); - } -} - - -void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( - NewSpace* space) { - NewSpacePageIterator it(space); - while (it.has_next()) { - NewSpacePage* p = it.next(); - SetNewSpacePageFlags(p, false); - } -} - - -void IncrementalMarking::DeactivateIncrementalWriteBarrier() { - DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space()); - DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space()); - DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space()); - DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space()); - DeactivateIncrementalWriteBarrierForSpace(heap_->map_space()); - DeactivateIncrementalWriteBarrierForSpace(heap_->code_space()); - DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); - - LargePage* lop = heap_->lo_space()->first_page(); - while (lop->is_valid()) { - SetOldSpacePageFlags(lop, false, false); - lop = lop->next_page(); - } -} - - -void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) { - PageIterator it(space); - while (it.has_next()) { - Page* p = it.next(); - SetOldSpacePageFlags(p, true, is_compacting_); - } -} - - -void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) { - NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); - while (it.has_next()) { - NewSpacePage* p = it.next(); - SetNewSpacePageFlags(p, true); - } -} - - -void IncrementalMarking::ActivateIncrementalWriteBarrier() { - ActivateIncrementalWriteBarrier(heap_->old_pointer_space()); - ActivateIncrementalWriteBarrier(heap_->old_data_space()); - ActivateIncrementalWriteBarrier(heap_->cell_space()); - ActivateIncrementalWriteBarrier(heap_->property_cell_space()); - ActivateIncrementalWriteBarrier(heap_->map_space()); - ActivateIncrementalWriteBarrier(heap_->code_space()); - ActivateIncrementalWriteBarrier(heap_->new_space()); - - LargePage* lop = heap_->lo_space()->first_page(); - while (lop->is_valid()) { - SetOldSpacePageFlags(lop, true, is_compacting_); - lop = lop->next_page(); - } -} - - -bool IncrementalMarking::WorthActivating() { -#ifndef DEBUG - static const intptr_t kActivationThreshold = 8 * MB; -#else - // TODO(gc) consider setting this to some low level so that some - // debug tests run with incremental marking and some without. - static const intptr_t kActivationThreshold = 0; -#endif - // Only start incremental marking in a safe state: 1) when incremental - // marking is turned on, 2) when we are currently not in a GC, and - // 3) when we are currently not serializing or deserializing the heap. - return FLAG_incremental_marking && - FLAG_incremental_marking_steps && - heap_->gc_state() == Heap::NOT_IN_GC && - !Serializer::enabled() && - heap_->isolate()->IsInitialized() && - heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold; -} - - -void IncrementalMarking::ActivateGeneratedStub(Code* stub) { - ASSERT(RecordWriteStub::GetMode(stub) == - RecordWriteStub::STORE_BUFFER_ONLY); - - if (!IsMarking()) { - // Initially stub is generated in STORE_BUFFER_ONLY mode thus - // we don't need to do anything if incremental marking is - // not active. - } else if (IsCompacting()) { - RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); - } else { - RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); - } -} - - -static void PatchIncrementalMarkingRecordWriteStubs( - Heap* heap, RecordWriteStub::Mode mode) { - UnseededNumberDictionary* stubs = heap->code_stubs(); - - int capacity = stubs->Capacity(); - for (int i = 0; i < capacity; i++) { - Object* k = stubs->KeyAt(i); - if (stubs->IsKey(k)) { - uint32_t key = NumberToUint32(k); - - if (CodeStub::MajorKeyFromKey(key) == - CodeStub::RecordWrite) { - Object* e = stubs->ValueAt(i); - if (e->IsCode()) { - RecordWriteStub::Patch(Code::cast(e), mode); - } - } - } - } -} - - -void IncrementalMarking::EnsureMarkingDequeIsCommitted() { - if (marking_deque_memory_ == NULL) { - marking_deque_memory_ = new VirtualMemory(4 * MB); - } - if (!marking_deque_memory_committed_) { - bool success = marking_deque_memory_->Commit( - reinterpret_cast<Address>(marking_deque_memory_->address()), - marking_deque_memory_->size(), - false); // Not executable. - CHECK(success); - marking_deque_memory_committed_ = true; - } -} - - -void IncrementalMarking::UncommitMarkingDeque() { - if (state_ == STOPPED && marking_deque_memory_committed_) { - bool success = marking_deque_memory_->Uncommit( - reinterpret_cast<Address>(marking_deque_memory_->address()), - marking_deque_memory_->size()); - CHECK(success); - marking_deque_memory_committed_ = false; - } -} - - -void IncrementalMarking::Start(CompactionFlag flag) { - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Start\n"); - } - ASSERT(FLAG_incremental_marking); - ASSERT(FLAG_incremental_marking_steps); - ASSERT(state_ == STOPPED); - ASSERT(heap_->gc_state() == Heap::NOT_IN_GC); - ASSERT(!Serializer::enabled()); - ASSERT(heap_->isolate()->IsInitialized()); - - ResetStepCounters(); - - if (heap_->IsSweepingComplete()) { - StartMarking(flag); - } else { - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Start sweeping.\n"); - } - state_ = SWEEPING; - } - - heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); -} - - -void IncrementalMarking::StartMarking(CompactionFlag flag) { - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Start marking\n"); - } - - is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) && - heap_->mark_compact_collector()->StartCompaction( - MarkCompactCollector::INCREMENTAL_COMPACTION); - - state_ = MARKING; - - RecordWriteStub::Mode mode = is_compacting_ ? - RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL; - - PatchIncrementalMarkingRecordWriteStubs(heap_, mode); - - EnsureMarkingDequeIsCommitted(); - - // Initialize marking stack. - Address addr = static_cast<Address>(marking_deque_memory_->address()); - size_t size = marking_deque_memory_->size(); - if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; - marking_deque_.Initialize(addr, addr + size); - - ActivateIncrementalWriteBarrier(); - - // Marking bits are cleared by the sweeper. -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); - } -#endif - - heap_->CompletelyClearInstanceofCache(); - heap_->isolate()->compilation_cache()->MarkCompactPrologue(); - - if (FLAG_cleanup_code_caches_at_gc) { - // We will mark cache black with a separate pass - // when we finish marking. - MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache()); - } - - // Mark strong roots grey. - IncrementalMarkingRootMarkingVisitor visitor(this); - heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); - - heap_->mark_compact_collector()->MarkWeakObjectToCodeTable(); - - // Ready to start incremental marking. - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Running\n"); - } -} - - -void IncrementalMarking::PrepareForScavenge() { - if (!IsMarking()) return; - NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(), - heap_->new_space()->FromSpaceEnd()); - while (it.has_next()) { - Bitmap::Clear(it.next()); - } -} - - -void IncrementalMarking::UpdateMarkingDequeAfterScavenge() { - if (!IsMarking()) return; - - int current = marking_deque_.bottom(); - int mask = marking_deque_.mask(); - int limit = marking_deque_.top(); - HeapObject** array = marking_deque_.array(); - int new_top = current; - - Map* filler_map = heap_->one_pointer_filler_map(); - - while (current != limit) { - HeapObject* obj = array[current]; - ASSERT(obj->IsHeapObject()); - current = ((current + 1) & mask); - if (heap_->InNewSpace(obj)) { - MapWord map_word = obj->map_word(); - if (map_word.IsForwardingAddress()) { - HeapObject* dest = map_word.ToForwardingAddress(); - array[new_top] = dest; - new_top = ((new_top + 1) & mask); - ASSERT(new_top != marking_deque_.bottom()); -#ifdef DEBUG - MarkBit mark_bit = Marking::MarkBitFrom(obj); - ASSERT(Marking::IsGrey(mark_bit) || - (obj->IsFiller() && Marking::IsWhite(mark_bit))); -#endif - } - } else if (obj->map() != filler_map) { - // Skip one word filler objects that appear on the - // stack when we perform in place array shift. - array[new_top] = obj; - new_top = ((new_top + 1) & mask); - ASSERT(new_top != marking_deque_.bottom()); -#ifdef DEBUG - MarkBit mark_bit = Marking::MarkBitFrom(obj); - MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); - ASSERT(Marking::IsGrey(mark_bit) || - (obj->IsFiller() && Marking::IsWhite(mark_bit)) || - (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && - Marking::IsBlack(mark_bit))); -#endif - } - } - marking_deque_.set_top(new_top); - - steps_took_since_last_gc_ = 0; - steps_count_since_last_gc_ = 0; - longest_step_ = 0.0; -} - - -void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { - MarkBit map_mark_bit = Marking::MarkBitFrom(map); - if (Marking::IsWhite(map_mark_bit)) { - WhiteToGreyAndPush(map, map_mark_bit); - } - - IncrementalMarkingMarkingVisitor::IterateBody(map, obj); - - MarkBit mark_bit = Marking::MarkBitFrom(obj); -#if ENABLE_SLOW_ASSERTS - MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); - SLOW_ASSERT(Marking::IsGrey(mark_bit) || - (obj->IsFiller() && Marking::IsWhite(mark_bit)) || - (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && - Marking::IsBlack(mark_bit))); -#endif - MarkBlackOrKeepBlack(obj, mark_bit, size); -} - - -void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) { - Map* filler_map = heap_->one_pointer_filler_map(); - while (!marking_deque_.IsEmpty() && bytes_to_process > 0) { - HeapObject* obj = marking_deque_.Pop(); - - // Explicitly skip one word fillers. Incremental markbit patterns are - // correct only for objects that occupy at least two words. - Map* map = obj->map(); - if (map == filler_map) continue; - - int size = obj->SizeFromMap(map); - unscanned_bytes_of_large_object_ = 0; - VisitObject(map, obj, size); - bytes_to_process -= (size - unscanned_bytes_of_large_object_); - } -} - - -void IncrementalMarking::ProcessMarkingDeque() { - Map* filler_map = heap_->one_pointer_filler_map(); - while (!marking_deque_.IsEmpty()) { - HeapObject* obj = marking_deque_.Pop(); - - // Explicitly skip one word fillers. Incremental markbit patterns are - // correct only for objects that occupy at least two words. - Map* map = obj->map(); - if (map == filler_map) continue; - - VisitObject(map, obj, obj->SizeFromMap(map)); - } -} - - -void IncrementalMarking::Hurry() { - if (state() == MARKING) { - double start = 0.0; - if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { - start = OS::TimeCurrentMillis(); - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Hurry\n"); - } - } - // TODO(gc) hurry can mark objects it encounters black as mutator - // was stopped. - ProcessMarkingDeque(); - state_ = COMPLETE; - if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { - double end = OS::TimeCurrentMillis(); - double delta = end - start; - heap_->AddMarkingTime(delta); - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n", - static_cast<int>(delta)); - } - } - } - - if (FLAG_cleanup_code_caches_at_gc) { - PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache(); - Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache)); - MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(), - PolymorphicCodeCache::kSize); - } - - Object* context = heap_->native_contexts_list(); - while (!context->IsUndefined()) { - // GC can happen when the context is not fully initialized, - // so the cache can be undefined. - HeapObject* cache = HeapObject::cast( - Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX)); - if (!cache->IsUndefined()) { - MarkBit mark_bit = Marking::MarkBitFrom(cache); - if (Marking::IsGrey(mark_bit)) { - Marking::GreyToBlack(mark_bit); - MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size()); - } - } - context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); - } -} - - -void IncrementalMarking::Abort() { - if (IsStopped()) return; - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Aborting.\n"); - } - heap_->new_space()->LowerInlineAllocationLimit(0); - IncrementalMarking::set_should_hurry(false); - ResetStepCounters(); - if (IsMarking()) { - PatchIncrementalMarkingRecordWriteStubs(heap_, - RecordWriteStub::STORE_BUFFER_ONLY); - DeactivateIncrementalWriteBarrier(); - - if (is_compacting_) { - LargeObjectIterator it(heap_->lo_space()); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { - Page* p = Page::FromAddress(obj->address()); - if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { - p->ClearFlag(Page::RESCAN_ON_EVACUATION); - } - } - } - } - heap_->isolate()->stack_guard()->Continue(GC_REQUEST); - state_ = STOPPED; - is_compacting_ = false; -} - - -void IncrementalMarking::Finalize() { - Hurry(); - state_ = STOPPED; - is_compacting_ = false; - heap_->new_space()->LowerInlineAllocationLimit(0); - IncrementalMarking::set_should_hurry(false); - ResetStepCounters(); - PatchIncrementalMarkingRecordWriteStubs(heap_, - RecordWriteStub::STORE_BUFFER_ONLY); - DeactivateIncrementalWriteBarrier(); - ASSERT(marking_deque_.IsEmpty()); - heap_->isolate()->stack_guard()->Continue(GC_REQUEST); -} - - -void IncrementalMarking::MarkingComplete(CompletionAction action) { - state_ = COMPLETE; - // We will set the stack guard to request a GC now. This will mean the rest - // of the GC gets performed as soon as possible (we can't do a GC here in a - // record-write context). If a few things get allocated between now and then - // that shouldn't make us do a scavenge and keep being incremental, so we set - // the should-hurry flag to indicate that there can't be much work left to do. - set_should_hurry(true); - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Complete (normal).\n"); - } - if (action == GC_VIA_STACK_GUARD) { - heap_->isolate()->stack_guard()->RequestGC(); - } -} - - -void IncrementalMarking::OldSpaceStep(intptr_t allocated) { - if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) { - // TODO(hpayer): Let's play safe for now, but compaction should be - // in principle possible. - Start(PREVENT_COMPACTION); - } else { - Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD); - } -} - - -void IncrementalMarking::Step(intptr_t allocated_bytes, - CompletionAction action) { - if (heap_->gc_state() != Heap::NOT_IN_GC || - !FLAG_incremental_marking || - !FLAG_incremental_marking_steps || - (state_ != SWEEPING && state_ != MARKING)) { - return; - } - - allocated_ += allocated_bytes; - - if (allocated_ < kAllocatedThreshold && - write_barriers_invoked_since_last_step_ < - kWriteBarriersInvokedThreshold) { - return; - } - - if (state_ == MARKING && no_marking_scope_depth_ > 0) return; - - // The marking speed is driven either by the allocation rate or by the rate - // at which we are having to check the color of objects in the write barrier. - // It is possible for a tight non-allocating loop to run a lot of write - // barriers before we get here and check them (marking can only take place on - // allocation), so to reduce the lumpiness we don't use the write barriers - // invoked since last step directly to determine the amount of work to do. - intptr_t bytes_to_process = - marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_); - allocated_ = 0; - write_barriers_invoked_since_last_step_ = 0; - - bytes_scanned_ += bytes_to_process; - - double start = 0; - - if (FLAG_trace_incremental_marking || FLAG_trace_gc || - FLAG_print_cumulative_gc_stat) { - start = OS::TimeCurrentMillis(); - } - - if (state_ == SWEEPING) { - if (heap_->EnsureSweepersProgressed(static_cast<int>(bytes_to_process))) { - bytes_scanned_ = 0; - StartMarking(PREVENT_COMPACTION); - } - } else if (state_ == MARKING) { - ProcessMarkingDeque(bytes_to_process); - if (marking_deque_.IsEmpty()) MarkingComplete(action); - } - - steps_count_++; - steps_count_since_last_gc_++; - - bool speed_up = false; - - if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { - if (FLAG_trace_gc) { - PrintPID("Speed up marking after %d steps\n", - static_cast<int>(kMarkingSpeedAccellerationInterval)); - } - speed_up = true; - } - - bool space_left_is_very_small = - (old_generation_space_available_at_start_of_incremental_ < 10 * MB); - - bool only_1_nth_of_space_that_was_available_still_left = - (SpaceLeftInOldSpace() * (marking_speed_ + 1) < - old_generation_space_available_at_start_of_incremental_); - - if (space_left_is_very_small || - only_1_nth_of_space_that_was_available_still_left) { - if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n"); - speed_up = true; - } - - bool size_of_old_space_multiplied_by_n_during_marking = - (heap_->PromotedTotalSize() > - (marking_speed_ + 1) * - old_generation_space_used_at_start_of_incremental_); - if (size_of_old_space_multiplied_by_n_during_marking) { - speed_up = true; - if (FLAG_trace_gc) { - PrintPID("Speed up marking because of heap size increase\n"); - } - } - - int64_t promoted_during_marking = heap_->PromotedTotalSize() - - old_generation_space_used_at_start_of_incremental_; - intptr_t delay = marking_speed_ * MB; - intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); - - // We try to scan at at least twice the speed that we are allocating. - if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { - if (FLAG_trace_gc) { - PrintPID("Speed up marking because marker was not keeping up\n"); - } - speed_up = true; - } - - if (speed_up) { - if (state_ != MARKING) { - if (FLAG_trace_gc) { - PrintPID("Postponing speeding up marking until marking starts\n"); - } - } else { - marking_speed_ += kMarkingSpeedAccelleration; - marking_speed_ = static_cast<int>( - Min(kMaxMarkingSpeed, - static_cast<intptr_t>(marking_speed_ * 1.3))); - if (FLAG_trace_gc) { - PrintPID("Marking speed increased to %d\n", marking_speed_); - } - } - } - - if (FLAG_trace_incremental_marking || FLAG_trace_gc || - FLAG_print_cumulative_gc_stat) { - double end = OS::TimeCurrentMillis(); - double delta = (end - start); - longest_step_ = Max(longest_step_, delta); - steps_took_ += delta; - steps_took_since_last_gc_ += delta; - heap_->AddMarkingTime(delta); - } -} - - -void IncrementalMarking::ResetStepCounters() { - steps_count_ = 0; - steps_took_ = 0; - longest_step_ = 0.0; - old_generation_space_available_at_start_of_incremental_ = - SpaceLeftInOldSpace(); - old_generation_space_used_at_start_of_incremental_ = - heap_->PromotedTotalSize(); - steps_count_since_last_gc_ = 0; - steps_took_since_last_gc_ = 0; - bytes_rescanned_ = 0; - marking_speed_ = kInitialMarkingSpeed; - bytes_scanned_ = 0; - write_barriers_invoked_since_last_step_ = 0; -} - - -int64_t IncrementalMarking::SpaceLeftInOldSpace() { - return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/incremental-marking.h nodejs-0.11.15/deps/v8/src/incremental-marking.h --- nodejs-0.11.13/deps/v8/src/incremental-marking.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/incremental-marking.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,284 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_INCREMENTAL_MARKING_H_ -#define V8_INCREMENTAL_MARKING_H_ - - -#include "execution.h" -#include "mark-compact.h" -#include "objects.h" - -namespace v8 { -namespace internal { - - -class IncrementalMarking { - public: - enum State { - STOPPED, - SWEEPING, - MARKING, - COMPLETE - }; - - enum CompletionAction { - GC_VIA_STACK_GUARD, - NO_GC_VIA_STACK_GUARD - }; - - explicit IncrementalMarking(Heap* heap); - - static void Initialize(); - - void TearDown(); - - State state() { - ASSERT(state_ == STOPPED || FLAG_incremental_marking); - return state_; - } - - bool should_hurry() { return should_hurry_; } - void set_should_hurry(bool val) { should_hurry_ = val; } - - inline bool IsStopped() { return state() == STOPPED; } - - INLINE(bool IsMarking()) { return state() >= MARKING; } - - inline bool IsMarkingIncomplete() { return state() == MARKING; } - - inline bool IsComplete() { return state() == COMPLETE; } - - bool WorthActivating(); - - enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION }; - - void Start(CompactionFlag flag = ALLOW_COMPACTION); - - void Stop(); - - void PrepareForScavenge(); - - void UpdateMarkingDequeAfterScavenge(); - - void Hurry(); - - void Finalize(); - - void Abort(); - - void MarkingComplete(CompletionAction action); - - // It's hard to know how much work the incremental marker should do to make - // progress in the face of the mutator creating new work for it. We start - // of at a moderate rate of work and gradually increase the speed of the - // incremental marker until it completes. - // Do some marking every time this much memory has been allocated or that many - // heavy (color-checking) write barriers have been invoked. - static const intptr_t kAllocatedThreshold = 65536; - static const intptr_t kWriteBarriersInvokedThreshold = 32768; - // Start off by marking this many times more memory than has been allocated. - static const intptr_t kInitialMarkingSpeed = 1; - // But if we are promoting a lot of data we need to mark faster to keep up - // with the data that is entering the old space through promotion. - static const intptr_t kFastMarking = 3; - // After this many steps we increase the marking/allocating factor. - static const intptr_t kMarkingSpeedAccellerationInterval = 1024; - // This is how much we increase the marking/allocating factor by. - static const intptr_t kMarkingSpeedAccelleration = 2; - static const intptr_t kMaxMarkingSpeed = 1000; - - void OldSpaceStep(intptr_t allocated); - - void Step(intptr_t allocated, CompletionAction action); - - inline void RestartIfNotMarking() { - if (state_ == COMPLETE) { - state_ = MARKING; - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Restarting (new grey objects)\n"); - } - } - } - - static void RecordWriteFromCode(HeapObject* obj, - Object** slot, - Isolate* isolate); - - // Record a slot for compaction. Returns false for objects that are - // guaranteed to be rescanned or not guaranteed to survive. - // - // No slots in white objects should be recorded, as some slots are typed and - // cannot be interpreted correctly if the underlying object does not survive - // the incremental cycle (stays white). - INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value)); - INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value)); - INLINE(void RecordWriteIntoCode(HeapObject* obj, - RelocInfo* rinfo, - Object* value)); - INLINE(void RecordWriteOfCodeEntry(JSFunction* host, - Object** slot, - Code* value)); - - - void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value); - void RecordWriteIntoCodeSlow(HeapObject* obj, - RelocInfo* rinfo, - Object* value); - void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value); - void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value); - void RecordCodeTargetPatch(Address pc, HeapObject* value); - - inline void RecordWrites(HeapObject* obj); - - inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit); - - inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit); - - inline int steps_count() { - return steps_count_; - } - - inline double steps_took() { - return steps_took_; - } - - inline double longest_step() { - return longest_step_; - } - - inline int steps_count_since_last_gc() { - return steps_count_since_last_gc_; - } - - inline double steps_took_since_last_gc() { - return steps_took_since_last_gc_; - } - - inline void SetOldSpacePageFlags(MemoryChunk* chunk) { - SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting()); - } - - inline void SetNewSpacePageFlags(NewSpacePage* chunk) { - SetNewSpacePageFlags(chunk, IsMarking()); - } - - MarkingDeque* marking_deque() { return &marking_deque_; } - - bool IsCompacting() { return IsMarking() && is_compacting_; } - - void ActivateGeneratedStub(Code* stub); - - void NotifyOfHighPromotionRate() { - if (IsMarking()) { - if (marking_speed_ < kFastMarking) { - if (FLAG_trace_gc) { - PrintPID("Increasing marking speed to %d " - "due to high promotion rate\n", - static_cast<int>(kFastMarking)); - } - marking_speed_ = kFastMarking; - } - } - } - - void EnterNoMarkingScope() { - no_marking_scope_depth_++; - } - - void LeaveNoMarkingScope() { - no_marking_scope_depth_--; - } - - void UncommitMarkingDeque(); - - void NotifyIncompleteScanOfObject(int unscanned_bytes) { - unscanned_bytes_of_large_object_ = unscanned_bytes; - } - - private: - int64_t SpaceLeftInOldSpace(); - - void ResetStepCounters(); - - void StartMarking(CompactionFlag flag); - - void ActivateIncrementalWriteBarrier(PagedSpace* space); - static void ActivateIncrementalWriteBarrier(NewSpace* space); - void ActivateIncrementalWriteBarrier(); - - static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space); - static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space); - void DeactivateIncrementalWriteBarrier(); - - static void SetOldSpacePageFlags(MemoryChunk* chunk, - bool is_marking, - bool is_compacting); - - static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking); - - void EnsureMarkingDequeIsCommitted(); - - INLINE(void ProcessMarkingDeque()); - - INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process)); - - INLINE(void VisitObject(Map* map, HeapObject* obj, int size)); - - Heap* heap_; - - State state_; - bool is_compacting_; - - VirtualMemory* marking_deque_memory_; - bool marking_deque_memory_committed_; - MarkingDeque marking_deque_; - - int steps_count_; - double steps_took_; - double longest_step_; - int64_t old_generation_space_available_at_start_of_incremental_; - int64_t old_generation_space_used_at_start_of_incremental_; - int steps_count_since_last_gc_; - double steps_took_since_last_gc_; - int64_t bytes_rescanned_; - bool should_hurry_; - int marking_speed_; - intptr_t bytes_scanned_; - intptr_t allocated_; - intptr_t write_barriers_invoked_since_last_step_; - - int no_marking_scope_depth_; - - int unscanned_bytes_of_large_object_; - - DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); -}; - -} } // namespace v8::internal - -#endif // V8_INCREMENTAL_MARKING_H_ diff -Nru nodejs-0.11.13/deps/v8/src/incremental-marking-inl.h nodejs-0.11.15/deps/v8/src/incremental-marking-inl.h --- nodejs-0.11.13/deps/v8/src/incremental-marking-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/incremental-marking-inl.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,145 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_INCREMENTAL_MARKING_INL_H_ -#define V8_INCREMENTAL_MARKING_INL_H_ - -#include "incremental-marking.h" - -namespace v8 { -namespace internal { - - -bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, - Object** slot, - Object* value) { - HeapObject* value_heap_obj = HeapObject::cast(value); - MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj); - if (Marking::IsWhite(value_bit)) { - MarkBit obj_bit = Marking::MarkBitFrom(obj); - if (Marking::IsBlack(obj_bit)) { - MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); - if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { - if (chunk->IsLeftOfProgressBar(slot)) { - WhiteToGreyAndPush(value_heap_obj, value_bit); - RestartIfNotMarking(); - } else { - return false; - } - } else { - BlackToGreyAndUnshift(obj, obj_bit); - RestartIfNotMarking(); - return false; - } - } else { - return false; - } - } - if (!is_compacting_) return false; - MarkBit obj_bit = Marking::MarkBitFrom(obj); - return Marking::IsBlack(obj_bit); -} - - -void IncrementalMarking::RecordWrite(HeapObject* obj, - Object** slot, - Object* value) { - if (IsMarking() && value->NonFailureIsHeapObject()) { - RecordWriteSlow(obj, slot, value); - } -} - - -void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, - Object** slot, - Code* value) { - if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value); -} - - -void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, - RelocInfo* rinfo, - Object* value) { - if (IsMarking() && value->NonFailureIsHeapObject()) { - RecordWriteIntoCodeSlow(obj, rinfo, value); - } -} - - -void IncrementalMarking::RecordWrites(HeapObject* obj) { - if (IsMarking()) { - MarkBit obj_bit = Marking::MarkBitFrom(obj); - if (Marking::IsBlack(obj_bit)) { - MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); - if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { - chunk->set_progress_bar(0); - } - BlackToGreyAndUnshift(obj, obj_bit); - RestartIfNotMarking(); - } - } -} - - -void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj, - MarkBit mark_bit) { - ASSERT(Marking::MarkBitFrom(obj) == mark_bit); - ASSERT(obj->Size() >= 2*kPointerSize); - ASSERT(IsMarking()); - Marking::BlackToGrey(mark_bit); - int obj_size = obj->Size(); - MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size); - bytes_scanned_ -= obj_size; - int64_t old_bytes_rescanned = bytes_rescanned_; - bytes_rescanned_ = old_bytes_rescanned + obj_size; - if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) { - if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) { - // If we have queued twice the heap size for rescanning then we are - // going around in circles, scanning the same objects again and again - // as the program mutates the heap faster than we can incrementally - // trace it. In this case we switch to non-incremental marking in - // order to finish off this marking phase. - if (FLAG_trace_gc) { - PrintPID("Hurrying incremental marking because of lack of progress\n"); - } - marking_speed_ = kMaxMarkingSpeed; - } - } - - marking_deque_.UnshiftGrey(obj); -} - - -void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) { - Marking::WhiteToGrey(mark_bit); - marking_deque_.PushGrey(obj); -} - - -} } // namespace v8::internal - -#endif // V8_INCREMENTAL_MARKING_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/interface.cc nodejs-0.11.15/deps/v8/src/interface.cc --- nodejs-0.11.13/deps/v8/src/interface.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/interface.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,56 +1,24 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "interface.h" +#include "src/interface.h" namespace v8 { namespace internal { -static bool Match(void* key1, void* key2) { - String* name1 = *static_cast<String**>(key1); - String* name2 = *static_cast<String**>(key2); - ASSERT(name1->IsInternalizedString()); - ASSERT(name2->IsInternalizedString()); - return name1 == name2; -} - - Interface* Interface::Lookup(Handle<String> name, Zone* zone) { - ASSERT(IsModule()); + DCHECK(IsModule()); ZoneHashMap* map = Chase()->exports_; if (map == NULL) return NULL; ZoneAllocationPolicy allocator(zone); ZoneHashMap::Entry* p = map->Lookup(name.location(), name->Hash(), false, allocator); if (p == NULL) return NULL; - ASSERT(*static_cast<String**>(p->key) == *name); - ASSERT(p->value != NULL); + DCHECK(*static_cast<String**>(p->key) == *name); + DCHECK(p->value != NULL); return static_cast<Interface*>(p->value); } @@ -70,8 +38,8 @@ #endif -void Interface::DoAdd( - void* name, uint32_t hash, Interface* interface, Zone* zone, bool* ok) { +void Interface::DoAdd(const void* name, uint32_t hash, Interface* interface, + Zone* zone, bool* ok) { MakeModule(ok); if (!*ok) return; @@ -80,8 +48,9 @@ PrintF("%*s# Adding...\n", Nesting::current(), ""); PrintF("%*sthis = ", Nesting::current(), ""); this->Print(Nesting::current()); - PrintF("%*s%s : ", Nesting::current(), "", - (*static_cast<String**>(name))->ToAsciiArray()); + const AstRawString* symbol = static_cast<const AstRawString*>(name); + PrintF("%*s%.*s : ", Nesting::current(), "", symbol->length(), + symbol->raw_data()); interface->Print(Nesting::current()); } #endif @@ -91,10 +60,12 @@ if (*map == NULL) { *map = new(zone->New(sizeof(ZoneHashMap))) - ZoneHashMap(Match, ZoneHashMap::kDefaultHashMapCapacity, allocator); + ZoneHashMap(ZoneHashMap::PointersMatch, + ZoneHashMap::kDefaultHashMapCapacity, allocator); } - ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen(), allocator); + ZoneHashMap::Entry* p = + (*map)->Lookup(const_cast<void*>(name), hash, !IsFrozen(), allocator); if (p == NULL) { // This didn't have name but was frozen already, that's an error. *ok = false; @@ -120,8 +91,8 @@ void Interface::Unify(Interface* that, Zone* zone, bool* ok) { if (this->forward_) return this->Chase()->Unify(that, zone, ok); if (that->forward_) return this->Unify(that->Chase(), zone, ok); - ASSERT(this->forward_ == NULL); - ASSERT(that->forward_ == NULL); + DCHECK(this->forward_ == NULL); + DCHECK(that->forward_ == NULL); *ok = true; if (this == that) return; @@ -167,13 +138,13 @@ void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) { - ASSERT(this->forward_ == NULL); - ASSERT(that->forward_ == NULL); - ASSERT(!this->IsValue()); - ASSERT(!that->IsValue()); - ASSERT(this->index_ == -1); - ASSERT(that->index_ == -1); - ASSERT(*ok); + DCHECK(this->forward_ == NULL); + DCHECK(that->forward_ == NULL); + DCHECK(!this->IsValue()); + DCHECK(!that->IsValue()); + DCHECK(this->index_ == -1); + DCHECK(that->index_ == -1); + DCHECK(*ok); #ifdef DEBUG Nesting nested; diff -Nru nodejs-0.11.13/deps/v8/src/interface.h nodejs-0.11.15/deps/v8/src/interface.h --- nodejs-0.11.13/deps/v8/src/interface.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/interface.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_INTERFACE_H_ #define V8_INTERFACE_H_ -#include "zone-inl.h" // For operator new. +#include "src/ast-value-factory.h" +#include "src/zone-inl.h" // For operator new. namespace v8 { namespace internal { @@ -82,8 +60,9 @@ // Add a name to the list of exports. If it already exists, unify with // interface, otherwise insert unless this is closed. - void Add(Handle<String> name, Interface* interface, Zone* zone, bool* ok) { - DoAdd(name.location(), name->Hash(), interface, zone, ok); + void Add(const AstRawString* name, Interface* interface, Zone* zone, + bool* ok) { + DoAdd(name, name->hash(), interface, zone, ok); } // Unify with another interface. If successful, both interface objects will @@ -116,7 +95,7 @@ // Assign an index. void Allocate(int index) { - ASSERT(IsModule() && IsFrozen() && Chase()->index_ == -1); + DCHECK(IsModule() && IsFrozen() && Chase()->index_ == -1); Chase()->index_ = index; } @@ -145,14 +124,14 @@ } int Length() { - ASSERT(IsModule() && IsFrozen()); + DCHECK(IsModule() && IsFrozen()); ZoneHashMap* exports = Chase()->exports_; return exports ? exports->occupancy() : 0; } // The context slot in the hosting global context pointing to this module. int Index() { - ASSERT(IsModule() && IsFrozen()); + DCHECK(IsModule() && IsFrozen()); return Chase()->index_; } @@ -169,12 +148,12 @@ class Iterator { public: bool done() const { return entry_ == NULL; } - Handle<String> name() const { - ASSERT(!done()); - return Handle<String>(*static_cast<String**>(entry_->key)); + const AstRawString* name() const { + DCHECK(!done()); + return static_cast<const AstRawString*>(entry_->key); } Interface* interface() const { - ASSERT(!done()); + DCHECK(!done()); return static_cast<Interface*>(entry_->value); } void Advance() { entry_ = exports_->Next(entry_); } @@ -230,7 +209,7 @@ return result; } - void DoAdd(void* name, uint32_t hash, Interface* interface, Zone* zone, + void DoAdd(const void* name, uint32_t hash, Interface* interface, Zone* zone, bool* ok); void DoUnify(Interface* that, bool* ok, Zone* zone); }; diff -Nru nodejs-0.11.13/deps/v8/src/interpreter-irregexp.cc nodejs-0.11.15/deps/v8/src/interpreter-irregexp.cc --- nodejs-0.11.13/deps/v8/src/interpreter-irregexp.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/interpreter-irregexp.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,19 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // A simple interpreter for the Irregexp byte code. -#include "v8.h" -#include "unicode.h" -#include "utils.h" -#include "ast.h" -#include "bytecodes-irregexp.h" -#include "interpreter-irregexp.h" -#include "jsregexp.h" -#include "regexp-macro-assembler.h" +#include "src/v8.h" + +#include "src/ast.h" +#include "src/bytecodes-irregexp.h" +#include "src/interpreter-irregexp.h" +#include "src/jsregexp.h" +#include "src/regexp-macro-assembler.h" +#include "src/unicode.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -141,13 +119,13 @@ static int32_t Load32Aligned(const byte* pc) { - ASSERT((reinterpret_cast<intptr_t>(pc) & 3) == 0); + DCHECK((reinterpret_cast<intptr_t>(pc) & 3) == 0); return *reinterpret_cast<const int32_t *>(pc); } static int32_t Load16Aligned(const byte* pc) { - ASSERT((reinterpret_cast<intptr_t>(pc) & 1) == 0); + DCHECK((reinterpret_cast<intptr_t>(pc) & 1) == 0); return *reinterpret_cast<const uint16_t *>(pc); } @@ -158,9 +136,7 @@ // matching terminates. class BacktrackStack { public: - explicit BacktrackStack() { - data_ = NewArray<int>(kBacktrackStackSize); - } + BacktrackStack() { data_ = NewArray<int>(kBacktrackStackSize); } ~BacktrackStack() { DeleteArray(data_); @@ -330,7 +306,7 @@ break; } BYTECODE(LOAD_4_CURRENT_CHARS) { - ASSERT(sizeof(Char) == 1); + DCHECK(sizeof(Char) == 1); int pos = current + (insn >> BYTECODE_SHIFT); if (pos + 4 > subject.length()) { pc = code_base + Load32Aligned(pc + 4); @@ -347,7 +323,7 @@ break; } BYTECODE(LOAD_4_CURRENT_CHARS_UNCHECKED) { - ASSERT(sizeof(Char) == 1); + DCHECK(sizeof(Char) == 1); int pos = current + (insn >> BYTECODE_SHIFT); Char next1 = subject[pos + 1]; Char next2 = subject[pos + 2]; @@ -602,7 +578,7 @@ Handle<String> subject, int* registers, int start_position) { - ASSERT(subject->IsFlat()); + DCHECK(subject->IsFlat()); DisallowHeapAllocation no_gc; const byte* code_base = code_array->GetDataStartAddress(); @@ -618,7 +594,7 @@ start_position, previous_char); } else { - ASSERT(subject_content.IsTwoByte()); + DCHECK(subject_content.IsTwoByte()); Vector<const uc16> subject_vector = subject_content.ToUC16Vector(); if (start_position != 0) previous_char = subject_vector[start_position - 1]; return RawMatch(isolate, diff -Nru nodejs-0.11.13/deps/v8/src/interpreter-irregexp.h nodejs-0.11.15/deps/v8/src/interpreter-irregexp.h --- nodejs-0.11.13/deps/v8/src/interpreter-irregexp.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/interpreter-irregexp.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // A simple interpreter for the Irregexp byte code. diff -Nru nodejs-0.11.13/deps/v8/src/isolate.cc nodejs-0.11.15/deps/v8/src/isolate.cc --- nodejs-0.11.13/deps/v8/src/isolate.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/isolate.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,78 +1,57 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdlib.h> -#include "v8.h" +#include "src/v8.h" -#include "ast.h" -#include "bootstrapper.h" -#include "codegen.h" -#include "compilation-cache.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "deoptimizer.h" -#include "heap-profiler.h" -#include "hydrogen.h" -#include "isolate-inl.h" -#include "lithium-allocator.h" -#include "log.h" -#include "messages.h" -#include "platform.h" -#include "regexp-stack.h" -#include "runtime-profiler.h" -#include "sampler.h" -#include "scopeinfo.h" -#include "serialize.h" -#include "simulator.h" -#include "spaces.h" -#include "stub-cache.h" -#include "sweeper-thread.h" -#include "utils/random-number-generator.h" -#include "version.h" -#include "vm-state-inl.h" +#include "src/ast.h" +#include "src/base/platform/platform.h" +#include "src/base/utils/random-number-generator.h" +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/compilation-cache.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/heap/spaces.h" +#include "src/heap/sweeper-thread.h" +#include "src/heap-profiler.h" +#include "src/hydrogen.h" +#include "src/isolate-inl.h" +#include "src/lithium-allocator.h" +#include "src/log.h" +#include "src/messages.h" +#include "src/prototype.h" +#include "src/regexp-stack.h" +#include "src/runtime-profiler.h" +#include "src/sampler.h" +#include "src/scopeinfo.h" +#include "src/serialize.h" +#include "src/simulator.h" +#include "src/stub-cache.h" +#include "src/version.h" +#include "src/vm-state-inl.h" namespace v8 { namespace internal { -Atomic32 ThreadId::highest_thread_id_ = 0; +base::Atomic32 ThreadId::highest_thread_id_ = 0; int ThreadId::AllocateThreadId() { - int new_id = NoBarrier_AtomicIncrement(&highest_thread_id_, 1); + int new_id = base::NoBarrier_AtomicIncrement(&highest_thread_id_, 1); return new_id; } int ThreadId::GetCurrentThreadId() { - int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_); + Isolate::EnsureInitialized(); + int thread_id = base::Thread::GetThreadLocalInt(Isolate::thread_id_key_); if (thread_id == 0) { thread_id = AllocateThreadId(); - Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id); + base::Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id); } return thread_id; } @@ -92,7 +71,7 @@ js_entry_sp_ = NULL; external_callback_scope_ = NULL; current_vm_state_ = EXTERNAL; - try_catch_handler_address_ = NULL; + try_catch_handler_ = NULL; context_ = NULL; thread_id_ = ThreadId::Invalid(); external_caught_exception_ = false; @@ -121,43 +100,30 @@ } -v8::TryCatch* ThreadLocalTop::TryCatchHandler() { - return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address()); -} - - -Isolate* Isolate::default_isolate_ = NULL; -Thread::LocalStorageKey Isolate::isolate_key_; -Thread::LocalStorageKey Isolate::thread_id_key_; -Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_; +base::Thread::LocalStorageKey Isolate::isolate_key_; +base::Thread::LocalStorageKey Isolate::thread_id_key_; +base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_; #ifdef DEBUG -Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key; +base::Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key; #endif // DEBUG -Mutex Isolate::process_wide_mutex_; -// TODO(dcarney): Remove with default isolate. -enum DefaultIsolateStatus { - kDefaultIsolateUninitialized, - kDefaultIsolateInitialized, - kDefaultIsolateCrashIfInitialized -}; -static DefaultIsolateStatus default_isolate_status_ - = kDefaultIsolateUninitialized; +base::LazyMutex Isolate::process_wide_mutex_ = LAZY_MUTEX_INITIALIZER; Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL; -Atomic32 Isolate::isolate_counter_ = 0; +base::Atomic32 Isolate::isolate_counter_ = 0; Isolate::PerIsolateThreadData* Isolate::FindOrAllocatePerThreadDataForThisThread() { + EnsureInitialized(); ThreadId thread_id = ThreadId::Current(); PerIsolateThreadData* per_thread = NULL; { - LockGuard<Mutex> lock_guard(&process_wide_mutex_); + base::LockGuard<base::Mutex> lock_guard(process_wide_mutex_.Pointer()); per_thread = thread_data_table_->Lookup(this, thread_id); if (per_thread == NULL) { per_thread = new PerIsolateThreadData(this, thread_id); thread_data_table_->Insert(per_thread); } + DCHECK(thread_data_table_->Lookup(this, thread_id) == per_thread); } - ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread); return per_thread; } @@ -170,79 +136,30 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread( ThreadId thread_id) { + EnsureInitialized(); PerIsolateThreadData* per_thread = NULL; { - LockGuard<Mutex> lock_guard(&process_wide_mutex_); + base::LockGuard<base::Mutex> lock_guard(process_wide_mutex_.Pointer()); per_thread = thread_data_table_->Lookup(this, thread_id); } return per_thread; } -void Isolate::SetCrashIfDefaultIsolateInitialized() { - LockGuard<Mutex> lock_guard(&process_wide_mutex_); - CHECK(default_isolate_status_ != kDefaultIsolateInitialized); - default_isolate_status_ = kDefaultIsolateCrashIfInitialized; -} - - -void Isolate::EnsureDefaultIsolate() { - LockGuard<Mutex> lock_guard(&process_wide_mutex_); - CHECK(default_isolate_status_ != kDefaultIsolateCrashIfInitialized); - if (default_isolate_ == NULL) { - isolate_key_ = Thread::CreateThreadLocalKey(); - thread_id_key_ = Thread::CreateThreadLocalKey(); - per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey(); +void Isolate::EnsureInitialized() { + base::LockGuard<base::Mutex> lock_guard(process_wide_mutex_.Pointer()); + if (thread_data_table_ == NULL) { + isolate_key_ = base::Thread::CreateThreadLocalKey(); + thread_id_key_ = base::Thread::CreateThreadLocalKey(); + per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey(); #ifdef DEBUG - PerThreadAssertScopeBase::thread_local_key = Thread::CreateThreadLocalKey(); + PerThreadAssertScopeBase::thread_local_key = + base::Thread::CreateThreadLocalKey(); #endif // DEBUG thread_data_table_ = new Isolate::ThreadDataTable(); - default_isolate_ = new Isolate(); - } - // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here - // because a non-null thread data may be already set. - if (Thread::GetThreadLocal(isolate_key_) == NULL) { - Thread::SetThreadLocal(isolate_key_, default_isolate_); } } -struct StaticInitializer { - StaticInitializer() { - Isolate::EnsureDefaultIsolate(); - } -} static_initializer; - -#ifdef ENABLE_DEBUGGER_SUPPORT -Debugger* Isolate::GetDefaultIsolateDebugger() { - EnsureDefaultIsolate(); - return default_isolate_->debugger(); -} -#endif - - -StackGuard* Isolate::GetDefaultIsolateStackGuard() { - EnsureDefaultIsolate(); - return default_isolate_->stack_guard(); -} - - -void Isolate::EnterDefaultIsolate() { - EnsureDefaultIsolate(); - ASSERT(default_isolate_ != NULL); - - PerIsolateThreadData* data = CurrentPerIsolateThreadData(); - // If not yet in default isolate - enter it. - if (data == NULL || data->isolate() != default_isolate_) { - default_isolate_->Enter(); - } -} - - -v8::Isolate* Isolate::GetDefaultIsolateForLocking() { - EnsureDefaultIsolate(); - return reinterpret_cast<v8::Isolate*>(default_isolate_); -} - Address Isolate::get_address_from_id(Isolate::AddressId id) { return isolate_addresses_[id]; @@ -264,25 +181,15 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) { // Visit the roots from the top for a given thread. - Object* pending; - // The pending exception can sometimes be a failure. We can't show - // that to the GC, which only understands objects. - if (thread->pending_exception_->ToObject(&pending)) { - v->VisitPointer(&pending); - thread->pending_exception_ = pending; // In case GC updated it. - } + v->VisitPointer(&thread->pending_exception_); v->VisitPointer(&(thread->pending_message_obj_)); v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_))); v->VisitPointer(BitCast<Object**>(&(thread->context_))); - Object* scheduled; - if (thread->scheduled_exception_->ToObject(&scheduled)) { - v->VisitPointer(&scheduled); - thread->scheduled_exception_ = scheduled; - } + v->VisitPointer(&thread->scheduled_exception_); - for (v8::TryCatch* block = thread->TryCatchHandler(); + for (v8::TryCatch* block = thread->try_catch_handler(); block != NULL; - block = TRY_CATCH_FROM_ADDRESS(block->next_)) { + block = block->next_) { v->VisitPointer(BitCast<Object**>(&(block->exception_))); v->VisitPointer(BitCast<Object**>(&(block->message_obj_))); v->VisitPointer(BitCast<Object**>(&(block->message_script_))); @@ -337,23 +244,14 @@ void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) { - // The ARM simulator has a separate JS stack. We therefore register - // the C++ try catch handler with the simulator and get back an - // address that can be used for comparisons with addresses into the - // JS stack. When running without the simulator, the address - // returned will be the address of the C++ try catch handler itself. - Address address = reinterpret_cast<Address>( - SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that))); - thread_local_top()->set_try_catch_handler_address(address); + thread_local_top()->set_try_catch_handler(that); } void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) { - ASSERT(thread_local_top()->TryCatchHandler() == that); - thread_local_top()->set_try_catch_handler_address( - reinterpret_cast<Address>(that->next_)); + DCHECK(thread_local_top()->try_catch_handler() == that); + thread_local_top()->set_try_catch_handler(that->next_); thread_local_top()->catcher_ = NULL; - SimulatorStack::UnregisterCTryCatch(); } @@ -371,14 +269,14 @@ return stack_trace; } else if (stack_trace_nesting_level_ == 1) { stack_trace_nesting_level_++; - OS::PrintError( + base::OS::PrintError( "\n\nAttempt to print stack while printing stack (double fault)\n"); - OS::PrintError( + base::OS::PrintError( "If you are lucky you may find a partial stack dump on stdout.\n\n"); incomplete_message_->OutputToStdOut(); return factory()->empty_string(); } else { - OS::Abort(); + base::OS::Abort(); // Unreachable return factory()->empty_string(); } @@ -396,11 +294,10 @@ String::WriteToFlat(*trace, buffer, 0, length); buffer[length] = '\0'; // TODO(dcarney): convert buffer to utf8? - OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n", - magic, magic2, - static_cast<void*>(object), static_cast<void*>(map), - reinterpret_cast<char*>(buffer)); - OS::Abort(); + base::OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n", magic, magic2, + static_cast<void*>(object), static_cast<void*>(map), + reinterpret_cast<char*>(buffer)); + base::OS::Abort(); } @@ -410,13 +307,10 @@ // call to this function is encountered it is skipped. The seen_caller // in/out parameter is used to remember if the caller has been seen // yet. -static bool IsVisibleInStackTrace(StackFrame* raw_frame, +static bool IsVisibleInStackTrace(JSFunction* fun, Object* caller, + Object* receiver, bool* seen_caller) { - // Only display JS frames. - if (!raw_frame->is_java_script()) return false; - JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); - JSFunction* fun = frame->function(); if ((fun == caller) && !(*seen_caller)) { *seen_caller = true; return false; @@ -429,8 +323,10 @@ // The --builtins-in-stack-traces command line flag allows including // internal call sites in the stack trace for debugging purposes. if (!FLAG_builtins_in_stack_traces) { - if (frame->receiver()->IsJSBuiltinsObject() || - (fun->IsBuiltin() && !fun->shared()->native())) { + if (receiver->IsJSBuiltinsObject()) return false; + if (fun->IsBuiltin()) { + return fun->shared()->native(); + } else if (fun->IsFromNativeScript() || fun->IsFromExtensionScript()) { return false; } } @@ -438,10 +334,23 @@ } -Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object, - Handle<Object> caller, - int limit) { +Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object, + Handle<Object> caller) { + // Get stack trace limit. + Handle<Object> error = Object::GetProperty( + this, js_builtins_object(), "$Error").ToHandleChecked(); + if (!error->IsJSObject()) return factory()->undefined_value(); + + Handle<String> stackTraceLimit = + factory()->InternalizeUtf8String("stackTraceLimit"); + DCHECK(!stackTraceLimit.is_null()); + Handle<Object> stack_trace_limit = + JSObject::GetDataProperty(Handle<JSObject>::cast(error), + stackTraceLimit); + if (!stack_trace_limit->IsNumber()) return factory()->undefined_value(); + int limit = FastD2IChecked(stack_trace_limit->Number()); limit = Max(limit, 0); // Ensure that limit is not negative. + int initial_size = Min(limit, 10); Handle<FixedArray> elements = factory()->NewFixedArrayWithHoles(initial_size * 4 + 1); @@ -454,49 +363,51 @@ int frames_seen = 0; int sloppy_frames = 0; bool encountered_strict_function = false; - for (StackFrameIterator iter(this); + for (JavaScriptFrameIterator iter(this); !iter.done() && frames_seen < limit; iter.Advance()) { - StackFrame* raw_frame = iter.frame(); - if (IsVisibleInStackTrace(raw_frame, *caller, &seen_caller)) { - frames_seen++; - JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); - // Set initial size to the maximum inlining level + 1 for the outermost - // function. - List<FrameSummary> frames(FLAG_max_inlining_levels + 1); - frame->Summarize(&frames); - for (int i = frames.length() - 1; i >= 0; i--) { - if (cursor + 4 > elements->length()) { - int new_capacity = JSObject::NewElementsCapacity(elements->length()); - Handle<FixedArray> new_elements = - factory()->NewFixedArrayWithHoles(new_capacity); - for (int i = 0; i < cursor; i++) { - new_elements->set(i, elements->get(i)); - } - elements = new_elements; + JavaScriptFrame* frame = iter.frame(); + // Set initial size to the maximum inlining level + 1 for the outermost + // function. + List<FrameSummary> frames(FLAG_max_inlining_levels + 1); + frame->Summarize(&frames); + for (int i = frames.length() - 1; i >= 0; i--) { + Handle<JSFunction> fun = frames[i].function(); + Handle<Object> recv = frames[i].receiver(); + // Filter out internal frames that we do not want to show. + if (!IsVisibleInStackTrace(*fun, *caller, *recv, &seen_caller)) continue; + // Filter out frames from other security contexts. + if (!this->context()->HasSameSecurityTokenAs(fun->context())) continue; + if (cursor + 4 > elements->length()) { + int new_capacity = JSObject::NewElementsCapacity(elements->length()); + Handle<FixedArray> new_elements = + factory()->NewFixedArrayWithHoles(new_capacity); + for (int i = 0; i < cursor; i++) { + new_elements->set(i, elements->get(i)); } - ASSERT(cursor + 4 <= elements->length()); + elements = new_elements; + } + DCHECK(cursor + 4 <= elements->length()); - Handle<Object> recv = frames[i].receiver(); - Handle<JSFunction> fun = frames[i].function(); - Handle<Code> code = frames[i].code(); - Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this); - // The stack trace API should not expose receivers and function - // objects on frames deeper than the top-most one with a strict - // mode function. The number of sloppy frames is stored as - // first element in the result array. - if (!encountered_strict_function) { - if (fun->shared()->strict_mode() == STRICT) { - encountered_strict_function = true; - } else { - sloppy_frames++; - } + + Handle<Code> code = frames[i].code(); + Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this); + // The stack trace API should not expose receivers and function + // objects on frames deeper than the top-most one with a strict + // mode function. The number of sloppy frames is stored as + // first element in the result array. + if (!encountered_strict_function) { + if (fun->shared()->strict_mode() == STRICT) { + encountered_strict_function = true; + } else { + sloppy_frames++; } - elements->set(cursor++, *recv); - elements->set(cursor++, *fun); - elements->set(cursor++, *code); - elements->set(cursor++, *offset); } + elements->set(cursor++, *recv); + elements->set(cursor++, *fun); + elements->set(cursor++, *code); + elements->set(cursor++, *offset); + frames_seen++; } } elements->set(0, Smi::FromInt(sloppy_frames)); @@ -509,15 +420,24 @@ void Isolate::CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object) { if (capture_stack_trace_for_uncaught_exceptions_) { // Capture stack trace for a detailed exception message. - Handle<String> key = factory()->hidden_stack_trace_string(); + Handle<Name> key = factory()->detailed_stack_trace_symbol(); Handle<JSArray> stack_trace = CaptureCurrentStackTrace( stack_trace_for_uncaught_exceptions_frame_limit_, stack_trace_for_uncaught_exceptions_options_); - JSObject::SetHiddenProperty(error_object, key, stack_trace); + JSObject::SetProperty(error_object, key, stack_trace, STRICT).Assert(); } } +void Isolate::CaptureAndSetSimpleStackTrace(Handle<JSObject> error_object, + Handle<Object> caller) { + // Capture stack trace for simple stack trace string formatting. + Handle<Name> key = factory()->stack_trace_symbol(); + Handle<Object> stack_trace = CaptureSimpleStackTrace(error_object, caller); + JSObject::SetProperty(error_object, key, stack_trace, STRICT).Assert(); +} + + Handle<JSArray> Isolate::CaptureCurrentStackTrace( int frame_limit, StackTrace::StackTraceOptions options) { // Ensure no negative values. @@ -551,16 +471,20 @@ List<FrameSummary> frames(FLAG_max_inlining_levels + 1); frame->Summarize(&frames); for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) { + Handle<JSFunction> fun = frames[i].function(); + // Filter frames from other security contexts. + if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) && + !this->context()->HasSameSecurityTokenAs(fun->context())) continue; + // Create a JSObject to hold the information for the StackFrame. Handle<JSObject> stack_frame = factory()->NewJSObject(object_function()); - Handle<JSFunction> fun = frames[i].function(); Handle<Script> script(Script::cast(fun->shared()->script())); if (options & StackTrace::kLineNumber) { int script_line_offset = script->line_offset()->value(); int position = frames[i].code()->SourcePosition(frames[i].pc()); - int line_number = GetScriptLineNumber(script, position); + int line_number = Script::GetLineNumber(script, position); // line_number is already shifted by the script_line_offset. int relative_line_number = line_number - script_line_offset; if (options & StackTrace::kColumnOffset && relative_line_number >= 0) { @@ -573,69 +497,48 @@ // tag. column_offset += script->column_offset()->value(); } - CHECK_NOT_EMPTY_HANDLE( - this, - JSObject::SetLocalPropertyIgnoreAttributes( - stack_frame, column_key, - Handle<Smi>(Smi::FromInt(column_offset + 1), this), NONE)); + JSObject::AddProperty( + stack_frame, column_key, + handle(Smi::FromInt(column_offset + 1), this), NONE); } - CHECK_NOT_EMPTY_HANDLE( - this, - JSObject::SetLocalPropertyIgnoreAttributes( - stack_frame, line_key, - Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE)); + JSObject::AddProperty( + stack_frame, line_key, + handle(Smi::FromInt(line_number + 1), this), NONE); } if (options & StackTrace::kScriptId) { - Handle<Smi> script_id(script->id(), this); - CHECK_NOT_EMPTY_HANDLE(this, - JSObject::SetLocalPropertyIgnoreAttributes( - stack_frame, script_id_key, script_id, - NONE)); + JSObject::AddProperty( + stack_frame, script_id_key, handle(script->id(), this), NONE); } if (options & StackTrace::kScriptName) { - Handle<Object> script_name(script->name(), this); - CHECK_NOT_EMPTY_HANDLE(this, - JSObject::SetLocalPropertyIgnoreAttributes( - stack_frame, script_name_key, script_name, - NONE)); + JSObject::AddProperty( + stack_frame, script_name_key, handle(script->name(), this), NONE); } if (options & StackTrace::kScriptNameOrSourceURL) { - Handle<Object> result = GetScriptNameOrSourceURL(script); - CHECK_NOT_EMPTY_HANDLE(this, - JSObject::SetLocalPropertyIgnoreAttributes( - stack_frame, script_name_or_source_url_key, - result, NONE)); + Handle<Object> result = Script::GetNameOrSourceURL(script); + JSObject::AddProperty( + stack_frame, script_name_or_source_url_key, result, NONE); } if (options & StackTrace::kFunctionName) { - Handle<Object> fun_name(fun->shared()->name(), this); - if (!fun_name->BooleanValue()) { - fun_name = Handle<Object>(fun->shared()->inferred_name(), this); - } - CHECK_NOT_EMPTY_HANDLE(this, - JSObject::SetLocalPropertyIgnoreAttributes( - stack_frame, function_key, fun_name, NONE)); + Handle<Object> fun_name(fun->shared()->DebugName(), this); + JSObject::AddProperty(stack_frame, function_key, fun_name, NONE); } if (options & StackTrace::kIsEval) { Handle<Object> is_eval = script->compilation_type() == Script::COMPILATION_TYPE_EVAL ? factory()->true_value() : factory()->false_value(); - CHECK_NOT_EMPTY_HANDLE(this, - JSObject::SetLocalPropertyIgnoreAttributes( - stack_frame, eval_key, is_eval, NONE)); + JSObject::AddProperty(stack_frame, eval_key, is_eval, NONE); } if (options & StackTrace::kIsConstructor) { Handle<Object> is_constructor = (frames[i].is_constructor()) ? factory()->true_value() : factory()->false_value(); - CHECK_NOT_EMPTY_HANDLE(this, - JSObject::SetLocalPropertyIgnoreAttributes( - stack_frame, constructor_key, - is_constructor, NONE)); + JSObject::AddProperty( + stack_frame, constructor_key, is_constructor, NONE); } FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame); @@ -664,9 +567,9 @@ stack_trace_nesting_level_ = 0; } else if (stack_trace_nesting_level_ == 1) { stack_trace_nesting_level_++; - OS::PrintError( + base::OS::PrintError( "\n\nAttempt to print stack while printing stack (double fault)\n"); - OS::PrintError( + base::OS::PrintError( "If you are lucky you may find a partial stack dump on stdout.\n\n"); incomplete_message_->OutputToFile(out); } @@ -693,7 +596,7 @@ } // The MentionedObjectCache is not GC-proof at the moment. DisallowHeapAllocation no_gc; - ASSERT(StringStream::IsMentionedObjectCacheClear(this)); + DCHECK(StringStream::IsMentionedObjectCacheClear(this)); // Avoid printing anything if there are no frames. if (c_entry_fp(thread_local_top()) == 0) return; @@ -717,28 +620,45 @@ } -void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) { - if (!thread_local_top()->failed_access_check_callback_) return; - - ASSERT(receiver->IsAccessCheckNeeded()); - ASSERT(context()); - - // Get the data object from access check info. +static inline AccessCheckInfo* GetAccessCheckInfo(Isolate* isolate, + Handle<JSObject> receiver) { JSFunction* constructor = JSFunction::cast(receiver->map()->constructor()); - if (!constructor->shared()->IsApiFunction()) return; + if (!constructor->shared()->IsApiFunction()) return NULL; + Object* data_obj = - constructor->shared()->get_api_func_data()->access_check_info(); - if (data_obj == heap_.undefined_value()) return; + constructor->shared()->get_api_func_data()->access_check_info(); + if (data_obj == isolate->heap()->undefined_value()) return NULL; + return AccessCheckInfo::cast(data_obj); +} + + +void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver, + v8::AccessType type) { + if (!thread_local_top()->failed_access_check_callback_) { + Handle<String> message = factory()->InternalizeUtf8String("no access"); + ScheduleThrow(*factory()->NewTypeError(message)); + return; + } + + DCHECK(receiver->IsAccessCheckNeeded()); + DCHECK(context()); + + // Get the data object from access check info. HandleScope scope(this); - Handle<JSObject> receiver_handle(receiver); - Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this); - { VMState<EXTERNAL> state(this); - thread_local_top()->failed_access_check_callback_( - v8::Utils::ToLocal(receiver_handle), + Handle<Object> data; + { DisallowHeapAllocation no_gc; + AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver); + if (!access_check_info) return; + data = handle(access_check_info->data(), this); + } + + // Leaving JavaScript. + VMState<EXTERNAL> state(this); + thread_local_top()->failed_access_check_callback_( + v8::Utils::ToLocal(receiver), type, v8::Utils::ToLocal(data)); - } } @@ -748,13 +668,14 @@ static MayAccessDecision MayAccessPreCheck(Isolate* isolate, - JSObject* receiver, + Handle<JSObject> receiver, v8::AccessType type) { + DisallowHeapAllocation no_gc; // During bootstrapping, callback functions are not enabled yet. if (isolate->bootstrapper()->IsActive()) return YES; if (receiver->IsJSGlobalProxy()) { - Object* receiver_context = JSGlobalProxy::cast(receiver)->native_context(); + Object* receiver_context = JSGlobalProxy::cast(*receiver)->native_context(); if (!receiver_context->IsContext()) return NO; // Get the native context of current top context. @@ -772,95 +693,75 @@ } -bool Isolate::MayNamedAccess(JSObject* receiver, Object* key, +bool Isolate::MayNamedAccess(Handle<JSObject> receiver, + Handle<Object> key, v8::AccessType type) { - ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded()); - - // The callers of this method are not expecting a GC. - DisallowHeapAllocation no_gc; + DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded()); // Skip checks for hidden properties access. Note, we do not // require existence of a context in this case. - if (key == heap_.hidden_string()) return true; + if (key.is_identical_to(factory()->hidden_string())) return true; // Check for compatibility between the security tokens in the // current lexical context and the accessed object. - ASSERT(context()); + DCHECK(context()); MayAccessDecision decision = MayAccessPreCheck(this, receiver, type); if (decision != UNKNOWN) return decision == YES; - // Get named access check callback - JSFunction* constructor = JSFunction::cast(receiver->map()->constructor()); - if (!constructor->shared()->IsApiFunction()) return false; - - Object* data_obj = - constructor->shared()->get_api_func_data()->access_check_info(); - if (data_obj == heap_.undefined_value()) return false; - - Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback(); - v8::NamedSecurityCallback callback = - v8::ToCData<v8::NamedSecurityCallback>(fun_obj); - - if (!callback) return false; - HandleScope scope(this); - Handle<JSObject> receiver_handle(receiver, this); - Handle<Object> key_handle(key, this); - Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this); - LOG(this, ApiNamedSecurityCheck(key)); - bool result = false; - { - // Leaving JavaScript. - VMState<EXTERNAL> state(this); - result = callback(v8::Utils::ToLocal(receiver_handle), - v8::Utils::ToLocal(key_handle), - type, - v8::Utils::ToLocal(data)); + Handle<Object> data; + v8::NamedSecurityCallback callback; + { DisallowHeapAllocation no_gc; + AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver); + if (!access_check_info) return false; + Object* fun_obj = access_check_info->named_callback(); + callback = v8::ToCData<v8::NamedSecurityCallback>(fun_obj); + if (!callback) return false; + data = handle(access_check_info->data(), this); } - return result; + + LOG(this, ApiNamedSecurityCheck(*key)); + + // Leaving JavaScript. + VMState<EXTERNAL> state(this); + return callback(v8::Utils::ToLocal(receiver), + v8::Utils::ToLocal(key), + type, + v8::Utils::ToLocal(data)); } -bool Isolate::MayIndexedAccess(JSObject* receiver, +bool Isolate::MayIndexedAccess(Handle<JSObject> receiver, uint32_t index, v8::AccessType type) { - ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded()); + DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded()); // Check for compatibility between the security tokens in the // current lexical context and the accessed object. - ASSERT(context()); + DCHECK(context()); MayAccessDecision decision = MayAccessPreCheck(this, receiver, type); if (decision != UNKNOWN) return decision == YES; - // Get indexed access check callback - JSFunction* constructor = JSFunction::cast(receiver->map()->constructor()); - if (!constructor->shared()->IsApiFunction()) return false; - - Object* data_obj = - constructor->shared()->get_api_func_data()->access_check_info(); - if (data_obj == heap_.undefined_value()) return false; - - Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback(); - v8::IndexedSecurityCallback callback = - v8::ToCData<v8::IndexedSecurityCallback>(fun_obj); - - if (!callback) return false; - HandleScope scope(this); - Handle<JSObject> receiver_handle(receiver, this); - Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this); - LOG(this, ApiIndexedSecurityCheck(index)); - bool result = false; - { - // Leaving JavaScript. - VMState<EXTERNAL> state(this); - result = callback(v8::Utils::ToLocal(receiver_handle), - index, - type, - v8::Utils::ToLocal(data)); + Handle<Object> data; + v8::IndexedSecurityCallback callback; + { DisallowHeapAllocation no_gc; + // Get named access check callback + AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver); + if (!access_check_info) return false; + Object* fun_obj = access_check_info->indexed_callback(); + callback = v8::ToCData<v8::IndexedSecurityCallback>(fun_obj); + if (!callback) return false; + data = handle(access_check_info->data(), this); } - return result; + + LOG(this, ApiIndexedSecurityCheck(index)); + + // Leaving JavaScript. + VMState<EXTERNAL> state(this); + return callback( + v8::Utils::ToLocal(receiver), index, type, v8::Utils::ToLocal(data)); } @@ -868,38 +769,25 @@ "Uncaught RangeError: Maximum call stack size exceeded"; -Failure* Isolate::StackOverflow() { +Object* Isolate::StackOverflow() { HandleScope scope(this); // At this point we cannot create an Error object using its javascript // constructor. Instead, we copy the pre-constructed boilerplate and // attach the stack trace as a hidden property. Handle<String> key = factory()->stack_overflow_string(); - Handle<JSObject> boilerplate = - Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key)); - Handle<JSObject> exception = JSObject::Copy(boilerplate); + Handle<JSObject> boilerplate = Handle<JSObject>::cast( + Object::GetProperty(js_builtins_object(), key).ToHandleChecked()); + Handle<JSObject> exception = factory()->CopyJSObject(boilerplate); DoThrow(*exception, NULL); - // Get stack trace limit. - Handle<Object> error = GetProperty(js_builtins_object(), "$Error"); - if (!error->IsJSObject()) return Failure::Exception(); - Handle<Object> stack_trace_limit = - GetProperty(Handle<JSObject>::cast(error), "stackTraceLimit"); - if (!stack_trace_limit->IsNumber()) return Failure::Exception(); - double dlimit = stack_trace_limit->Number(); - int limit = std::isnan(dlimit) ? 0 : static_cast<int>(dlimit); - - Handle<JSArray> stack_trace = CaptureSimpleStackTrace( - exception, factory()->undefined_value(), limit); - JSObject::SetHiddenProperty(exception, - factory()->hidden_stack_trace_string(), - stack_trace); - return Failure::Exception(); + CaptureAndSetSimpleStackTrace(exception, factory()->undefined_value()); + return heap()->exception(); } -Failure* Isolate::TerminateExecution() { +Object* Isolate::TerminateExecution() { DoThrow(heap_.termination_exception(), NULL); - return Failure::Exception(); + return heap()->exception(); } @@ -920,13 +808,33 @@ } -Failure* Isolate::Throw(Object* exception, MessageLocation* location) { +void Isolate::InvokeApiInterruptCallback() { + // Note: callback below should be called outside of execution access lock. + InterruptCallback callback = NULL; + void* data = NULL; + { + ExecutionAccess access(this); + callback = api_interrupt_callback_; + data = api_interrupt_callback_data_; + api_interrupt_callback_ = NULL; + api_interrupt_callback_data_ = NULL; + } + + if (callback != NULL) { + VMState<EXTERNAL> state(this); + HandleScope handle_scope(this); + callback(reinterpret_cast<v8::Isolate*>(this), data); + } +} + + +Object* Isolate::Throw(Object* exception, MessageLocation* location) { DoThrow(exception, location); - return Failure::Exception(); + return heap()->exception(); } -Failure* Isolate::ReThrow(MaybeObject* exception) { +Object* Isolate::ReThrow(Object* exception) { bool can_be_caught_externally = false; bool catchable_by_javascript = is_catchable_by_javascript(exception); ShouldReportException(&can_be_caught_externally, catchable_by_javascript); @@ -936,18 +844,17 @@ // Set the exception being re-thrown. set_pending_exception(exception); - if (exception->IsFailure()) return exception->ToFailureUnchecked(); - return Failure::Exception(); + return heap()->exception(); } -Failure* Isolate::ThrowIllegalOperation() { +Object* Isolate::ThrowIllegalOperation() { if (FLAG_stack_trace_on_illegal) PrintStack(stdout); return Throw(heap_.illegal_access_string()); } -Failure* Isolate::ThrowInvalidStringLength() { +Object* Isolate::ThrowInvalidStringLength() { return Throw(*factory()->NewRangeError( "invalid_string_length", HandleVector<Object>(NULL, 0))); } @@ -967,14 +874,14 @@ void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) { - ASSERT(handler == try_catch_handler()); - ASSERT(handler->HasCaught()); - ASSERT(handler->rethrow_); - ASSERT(handler->capture_message_); + DCHECK(handler == try_catch_handler()); + DCHECK(handler->HasCaught()); + DCHECK(handler->rethrow_); + DCHECK(handler->capture_message_); Object* message = reinterpret_cast<Object*>(handler->message_obj_); Object* script = reinterpret_cast<Object*>(handler->message_script_); - ASSERT(message->IsJSMessageObject() || message->IsTheHole()); - ASSERT(script->IsScript() || script->IsTheHole()); + DCHECK(message->IsJSMessageObject() || message->IsTheHole()); + DCHECK(script->IsScript() || script->IsTheHole()); thread_local_top()->pending_message_obj_ = message; thread_local_top()->pending_message_script_ = script; thread_local_top()->pending_message_start_pos_ = handler->message_start_pos_; @@ -982,8 +889,17 @@ } -Failure* Isolate::PromoteScheduledException() { - MaybeObject* thrown = scheduled_exception(); +void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) { + DCHECK(has_scheduled_exception()); + if (scheduled_exception() == handler->exception_) { + DCHECK(scheduled_exception() != heap()->termination_exception()); + clear_scheduled_exception(); + } +} + + +Object* Isolate::PromoteScheduledException() { + Object* thrown = scheduled_exception(); clear_scheduled_exception(); // Re-throw the exception to avoid getting repeated error reporting. return ReThrow(thrown); @@ -1070,15 +986,17 @@ bool Isolate::IsErrorObject(Handle<Object> obj) { if (!obj->IsJSObject()) return false; - String* error_key = - *(factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error"))); - Object* error_constructor = - js_builtins_object()->GetPropertyNoExceptionThrown(error_key); - - for (Object* prototype = *obj; !prototype->IsNull(); - prototype = prototype->GetPrototype(this)) { - if (!prototype->IsJSObject()) return false; - if (JSObject::cast(prototype)->map()->constructor() == error_constructor) { + Handle<String> error_key = + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error")); + Handle<Object> error_constructor = Object::GetProperty( + js_builtins_object(), error_key).ToHandleChecked(); + + DisallowHeapAllocation no_gc; + for (PrototypeIterator iter(this, *obj, PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(); iter.Advance()) { + if (iter.GetCurrent()->IsJSProxy()) return false; + if (JSObject::cast(iter.GetCurrent())->map()->constructor() == + *error_constructor) { return true; } } @@ -1088,7 +1006,7 @@ static int fatal_exception_depth = 0; void Isolate::DoThrow(Object* exception, MessageLocation* location) { - ASSERT(!has_pending_exception()); + DCHECK(!has_pending_exception()); HandleScope scope(this); Handle<Object> exception_handle(exception, this); @@ -1106,12 +1024,10 @@ thread_local_top()->rethrowing_message_ = false; -#ifdef ENABLE_DEBUGGER_SUPPORT // Notify debugger of exception. if (catchable_by_javascript) { - debugger_->OnException(exception_handle, report_exception); + debug()->OnThrow(exception_handle, report_exception); } -#endif // Generate the message if required. if (report_exception || try_catch_needs_message) { @@ -1129,13 +1045,16 @@ if (capture_stack_trace_for_uncaught_exceptions_) { if (IsErrorObject(exception_handle)) { // We fetch the stack trace that corresponds to this error object. - String* key = heap()->hidden_stack_trace_string(); - Object* stack_property = - JSObject::cast(*exception_handle)->GetHiddenProperty(key); - // Property lookup may have failed. In this case it's probably not - // a valid Error object. - if (stack_property->IsJSArray()) { - stack_trace_object = Handle<JSArray>(JSArray::cast(stack_property)); + Handle<Name> key = factory()->detailed_stack_trace_symbol(); + // Look up as own property. If the lookup fails, the exception is + // probably not a valid Error object. In that case, we fall through + // and capture the stack trace at this throw site. + LookupIterator lookup( + exception_handle, key, LookupIterator::CHECK_OWN_REAL); + Handle<Object> stack_trace_property; + if (Object::GetProperty(&lookup).ToHandle(&stack_trace_property) && + stack_trace_property->IsJSArray()) { + stack_trace_object = Handle<JSArray>::cast(stack_trace_property); } } if (stack_trace_object.is_null()) { @@ -1151,10 +1070,9 @@ // before throwing as uncaught exception. Note that the pending // exception object to be set later must not be turned into a string. if (exception_arg->IsJSObject() && !IsErrorObject(exception_arg)) { - bool failed = false; - exception_arg = - Execution::ToDetailString(this, exception_arg, &failed); - if (failed) { + MaybeHandle<Object> maybe_exception = + Execution::ToDetailString(this, exception_arg); + if (!maybe_exception.ToHandle(&exception_arg)) { exception_arg = factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("exception")); } @@ -1172,10 +1090,9 @@ thread_local_top()->pending_message_end_pos_ = location->end_pos(); } - // If the abort-on-uncaught-exception flag is specified, abort on any - // exception not caught by JavaScript, even when an external handler is - // present. This flag is intended for use by JavaScript developers, so - // print a user-friendly stack trace (not an internal one). + // If the abort-on-uncaught-exception flag is specified, and if the + // exception is not caught by JavaScript (even when an external handler is + // present). if (fatal_exception_depth == 0 && FLAG_abort_on_uncaught_exception && (report_exception || can_be_caught_externally)) { @@ -1184,7 +1101,7 @@ "%s\n\nFROM\n", MessageHandler::GetLocalizedMessage(this, message_obj).get()); PrintCurrentStackTrace(stderr); - OS::Abort(); + base::OS::Abort(); } } else if (location != NULL && !location->script().is_null()) { // We are bootstrapping and caught an error where the location is set @@ -1192,22 +1109,40 @@ // In this case we could have an extension (or an internal error // somewhere) and we print out the line number at which the error occured // to the console for easier debugging. - int line_number = GetScriptLineNumberSafe(location->script(), - location->start_pos()); + int line_number = + location->script()->GetLineNumber(location->start_pos()) + 1; if (exception->IsString() && location->script()->name()->IsString()) { - OS::PrintError( + base::OS::PrintError( "Extension or internal compilation error: %s in %s at line %d.\n", String::cast(exception)->ToCString().get(), String::cast(location->script()->name())->ToCString().get(), - line_number + 1); + line_number); } else if (location->script()->name()->IsString()) { - OS::PrintError( + base::OS::PrintError( "Extension or internal compilation error in %s at line %d.\n", String::cast(location->script()->name())->ToCString().get(), - line_number + 1); + line_number); } else { - OS::PrintError("Extension or internal compilation error.\n"); + base::OS::PrintError("Extension or internal compilation error.\n"); + } +#ifdef OBJECT_PRINT + // Since comments and empty lines have been stripped from the source of + // builtins, print the actual source here so that line numbers match. + if (location->script()->source()->IsString()) { + Handle<String> src(String::cast(location->script()->source())); + PrintF("Failing script:\n"); + int len = src->length(); + int line_number = 1; + PrintF("%5d: ", line_number); + for (int i = 0; i < len; i++) { + uint16_t character = src->Get(i); + PrintF("%c", character); + if (character == '\n' && i < len - 2) { + PrintF("%5d: ", ++line_number); + } + } } +#endif } } @@ -1223,25 +1158,20 @@ } -bool Isolate::IsExternallyCaught() { - ASSERT(has_pending_exception()); +bool Isolate::HasExternalTryCatch() { + DCHECK(has_pending_exception()); - if ((thread_local_top()->catcher_ == NULL) || - (try_catch_handler() != thread_local_top()->catcher_)) { - // When throwing the exception, we found no v8::TryCatch - // which should care about this exception. - return false; - } + return (thread_local_top()->catcher_ != NULL) && + (try_catch_handler() == thread_local_top()->catcher_); +} - if (!is_catchable_by_javascript(pending_exception())) { - return true; - } +bool Isolate::IsFinallyOnTop() { // Get the address of the external handler so we can compare the address to // determine which one is closer to the top of the stack. Address external_handler_address = thread_local_top()->try_catch_handler_address(); - ASSERT(external_handler_address != NULL); + DCHECK(external_handler_address != NULL); // The exception has been externally caught if and only if there is // an external handler which is on top of the top-most try-finally @@ -1255,23 +1185,22 @@ StackHandler* handler = StackHandler::FromAddress(Isolate::handler(thread_local_top())); while (handler != NULL && handler->address() < external_handler_address) { - ASSERT(!handler->is_catch()); - if (handler->is_finally()) return false; + DCHECK(!handler->is_catch()); + if (handler->is_finally()) return true; handler = handler->next(); } - return true; + return false; } void Isolate::ReportPendingMessages() { - ASSERT(has_pending_exception()); - PropagatePendingExceptionToExternalTryCatch(); + DCHECK(has_pending_exception()); + bool can_clear_message = PropagatePendingExceptionToExternalTryCatch(); HandleScope scope(this); - if (thread_local_top_.pending_exception_ == - heap()->termination_exception()) { + if (thread_local_top_.pending_exception_ == heap()->termination_exception()) { // Do nothing: if needed, the exception has been already propagated to // v8::TryCatch. } else { @@ -1294,12 +1223,12 @@ } } } - clear_pending_message(); + if (can_clear_message) clear_pending_message(); } MessageLocation Isolate::GetMessageLocation() { - ASSERT(has_pending_exception()); + DCHECK(has_pending_exception()); if (thread_local_top_.pending_exception_ != heap()->termination_exception() && thread_local_top_.has_pending_message_ && @@ -1317,7 +1246,7 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) { - ASSERT(has_pending_exception()); + DCHECK(has_pending_exception()); PropagatePendingExceptionToExternalTryCatch(); bool is_termination_exception = @@ -1336,7 +1265,7 @@ // If the exception is externally caught, clear it if there are no // JavaScript frames on the way to the C++ frame that has the // external handler. - ASSERT(thread_local_top()->try_catch_handler_address() != NULL); + DCHECK(thread_local_top()->try_catch_handler_address() != NULL); Address external_handler_address = thread_local_top()->try_catch_handler_address(); JavaScriptFrameIterator it(this); @@ -1368,21 +1297,19 @@ stack_trace_for_uncaught_exceptions_options_ = options; } - Handle<Context> Isolate::native_context() { - return Handle<Context>(context()->global_object()->native_context()); + return handle(context()->native_context()); } Handle<Context> Isolate::global_context() { - return Handle<Context>(context()->global_object()->global_context()); + return handle(context()->global_object()->global_context()); } Handle<Context> Isolate::GetCallingNativeContext() { JavaScriptFrameIterator it(this); -#ifdef ENABLE_DEBUGGER_SUPPORT - if (debug_->InDebugger()) { + if (debug_->in_debug_scope()) { while (!it.done()) { JavaScriptFrame* frame = it.frame(); Context* context = Context::cast(frame->context()); @@ -1393,7 +1320,6 @@ } } } -#endif // ENABLE_DEBUGGER_SUPPORT if (it.done()) return Handle<Context>::null(); JavaScriptFrame* frame = it.frame(); Context* context = Context::cast(frame->context()); @@ -1402,8 +1328,8 @@ char* Isolate::ArchiveThread(char* to) { - OS::MemCopy(to, reinterpret_cast<char*>(thread_local_top()), - sizeof(ThreadLocalTop)); + MemCopy(to, reinterpret_cast<char*>(thread_local_top()), + sizeof(ThreadLocalTop)); InitializeThreadLocal(); clear_pending_exception(); clear_pending_message(); @@ -1413,14 +1339,14 @@ char* Isolate::RestoreThread(char* from) { - OS::MemCopy(reinterpret_cast<char*>(thread_local_top()), from, - sizeof(ThreadLocalTop)); - // This might be just paranoia, but it seems to be needed in case a - // thread_local_top_ is restored on a separate OS thread. + MemCopy(reinterpret_cast<char*>(thread_local_top()), from, + sizeof(ThreadLocalTop)); +// This might be just paranoia, but it seems to be needed in case a +// thread_local_top_ is restored on a separate OS thread. #ifdef USE_SIMULATOR thread_local_top()->simulator_ = Simulator::current(this); #endif - ASSERT(context() == NULL || context()->IsContext()); + DCHECK(context() == NULL || context()->IsContext()); return from + sizeof(ThreadLocalTop); } @@ -1434,7 +1360,7 @@ // TODO(svenpanne) The assertion below would fire if an embedder does not // cleanly dispose all Isolates before disposing v8, so we are conservative // and leave it out for now. - // ASSERT_EQ(NULL, list_); + // DCHECK_EQ(NULL, list_); } @@ -1504,10 +1430,10 @@ compilation_cache_(NULL), counters_(NULL), code_range_(NULL), - debugger_initialized_(false), logger_(NULL), stats_table_(NULL), stub_cache_(NULL), + code_aging_helper_(NULL), deoptimizer_data_(NULL), materialized_object_store_(NULL), capture_stack_trace_for_uncaught_exceptions_(false), @@ -1534,8 +1460,8 @@ // TODO(bmeurer) Initialized lazily because it depends on flags; can // be fixed once the default isolate cleanup is done. random_number_generator_(NULL), + serializer_enabled_(false), has_fatal_error_(false), - use_crankshaft_(true), initialized_from_snapshot_(false), cpu_profiler_(NULL), heap_profiler_(NULL), @@ -1545,8 +1471,9 @@ sweeper_thread_(NULL), num_sweeper_threads_(0), stress_deopt_count_(0), - next_optimization_id_(0) { - id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1); + next_optimization_id_(0), + use_counter_callback_(NULL) { + id_ = base::NoBarrier_AtomicIncrement(&isolate_counter_, 1); TRACE_ISOLATE(constructor); memset(isolate_addresses_, 0, @@ -1565,11 +1492,6 @@ memset(&js_spill_information_, 0, sizeof(js_spill_information_)); #endif -#ifdef ENABLE_DEBUGGER_SUPPORT - debug_ = NULL; - debugger_ = NULL; -#endif - handle_scope_data_.Initialize(); #define ISOLATE_INIT_EXECUTE(type, name, initial_value) \ @@ -1581,6 +1503,9 @@ memset(name##_, 0, sizeof(type) * length); ISOLATE_INIT_ARRAY_LIST(ISOLATE_INIT_ARRAY_EXECUTE) #undef ISOLATE_INIT_ARRAY_EXECUTE + + InitializeLoggingAndCounters(); + debug_ = new Debug(this); } @@ -1597,7 +1522,8 @@ Deinit(); - { LockGuard<Mutex> lock_guard(&process_wide_mutex_); + { + base::LockGuard<base::Mutex> lock_guard(process_wide_mutex_.Pointer()); thread_data_table_->RemoveAllThreads(this); } @@ -1606,9 +1532,7 @@ serialize_partial_snapshot_cache_ = NULL; } - if (!IsDefaultIsolate()) { - delete this; - } + delete this; // Restore the previous current isolate. SetIsolateThreadLocals(saved_isolate, saved_data); @@ -1624,9 +1548,7 @@ if (state_ == INITIALIZED) { TRACE_ISOLATE(deinit); -#ifdef ENABLE_DEBUGGER_SUPPORT - debugger()->UnloadDebugger(); -#endif + debug()->Unload(); if (concurrent_recompilation_enabled()) { optimizing_compiler_thread_->Stop(); @@ -1643,11 +1565,12 @@ sweeper_thread_ = NULL; if (FLAG_job_based_sweeping && - heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) { - heap_.mark_compact_collector()->WaitUntilSweepingCompleted(); + heap_.mark_compact_collector()->sweeping_in_progress()) { + heap_.mark_compact_collector()->EnsureSweepingCompleted(); } - if (FLAG_hydrogen_stats) GetHStatistics()->Print(); + if (FLAG_turbo_stats) GetTStatistics()->Print("TurboFan"); + if (FLAG_hydrogen_stats) GetHStatistics()->Print("Hydrogen"); if (FLAG_print_deopt_stress) { PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_); @@ -1702,8 +1625,9 @@ void Isolate::SetIsolateThreadLocals(Isolate* isolate, PerIsolateThreadData* data) { - Thread::SetThreadLocal(isolate_key_, isolate); - Thread::SetThreadLocal(per_isolate_thread_data_key_, data); + EnsureInitialized(); + base::Thread::SetThreadLocal(isolate_key_, isolate); + base::Thread::SetThreadLocal(per_isolate_thread_data_key_, data); } @@ -1713,18 +1637,12 @@ // Has to be called while counters_ are still alive runtime_zone_.DeleteKeptSegment(); - // The entry stack must be empty when we get here, - // except for the default isolate, where it can - // still contain up to one entry stack item - ASSERT(entry_stack_ == NULL || this == default_isolate_); - ASSERT(entry_stack_ == NULL || entry_stack_->previous_item == NULL); + // The entry stack must be empty when we get here. + DCHECK(entry_stack_ == NULL || entry_stack_->previous_item == NULL); delete entry_stack_; entry_stack_ = NULL; - delete[] assembler_spare_buffer_; - assembler_spare_buffer_ = NULL; - delete unicode_cache_; unicode_cache_ = NULL; @@ -1749,6 +1667,8 @@ delete stub_cache_; stub_cache_ = NULL; + delete code_aging_helper_; + code_aging_helper_ = NULL; delete stats_table_; stats_table_ = NULL; @@ -1797,12 +1717,8 @@ delete random_number_generator_; random_number_generator_ = NULL; -#ifdef ENABLE_DEBUGGER_SUPPORT - delete debugger_; - debugger_ = NULL; delete debug_; debug_ = NULL; -#endif } @@ -1812,39 +1728,44 @@ } -void Isolate::PropagatePendingExceptionToExternalTryCatch() { - ASSERT(has_pending_exception()); +bool Isolate::PropagatePendingExceptionToExternalTryCatch() { + DCHECK(has_pending_exception()); - bool external_caught = IsExternallyCaught(); - thread_local_top_.external_caught_exception_ = external_caught; + bool has_external_try_catch = HasExternalTryCatch(); + if (!has_external_try_catch) { + thread_local_top_.external_caught_exception_ = false; + return true; + } - if (!external_caught) return; + bool catchable_by_js = is_catchable_by_javascript(pending_exception()); + if (catchable_by_js && IsFinallyOnTop()) { + thread_local_top_.external_caught_exception_ = false; + return false; + } - if (thread_local_top_.pending_exception_ == - heap()->termination_exception()) { + thread_local_top_.external_caught_exception_ = true; + if (thread_local_top_.pending_exception_ == heap()->termination_exception()) { try_catch_handler()->can_continue_ = false; try_catch_handler()->has_terminated_ = true; try_catch_handler()->exception_ = heap()->null_value(); } else { v8::TryCatch* handler = try_catch_handler(); - // At this point all non-object (failure) exceptions have - // been dealt with so this shouldn't fail. - ASSERT(!pending_exception()->IsFailure()); - ASSERT(thread_local_top_.pending_message_obj_->IsJSMessageObject() || + DCHECK(thread_local_top_.pending_message_obj_->IsJSMessageObject() || thread_local_top_.pending_message_obj_->IsTheHole()); - ASSERT(thread_local_top_.pending_message_script_->IsScript() || + DCHECK(thread_local_top_.pending_message_script_->IsScript() || thread_local_top_.pending_message_script_->IsTheHole()); handler->can_continue_ = true; handler->has_terminated_ = false; handler->exception_ = pending_exception(); // Propagate to the external try-catch only if we got an actual message. - if (thread_local_top_.pending_message_obj_->IsTheHole()) return; + if (thread_local_top_.pending_message_obj_->IsTheHole()) return true; handler->message_obj_ = thread_local_top_.pending_message_obj_; handler->message_script_ = thread_local_top_.pending_message_script_; handler->message_start_pos_ = thread_local_top_.pending_message_start_pos_; handler->message_end_pos_ = thread_local_top_.pending_message_end_pos_; } + return true; } @@ -1858,45 +1779,25 @@ } -void Isolate::InitializeDebugger() { -#ifdef ENABLE_DEBUGGER_SUPPORT - LockGuard<RecursiveMutex> lock_guard(debugger_access()); - if (NoBarrier_Load(&debugger_initialized_)) return; - InitializeLoggingAndCounters(); - debug_ = new Debug(this); - debugger_ = new Debugger(this); - Release_Store(&debugger_initialized_, true); -#endif -} - - bool Isolate::Init(Deserializer* des) { - ASSERT(state_ != INITIALIZED); + DCHECK(state_ != INITIALIZED); TRACE_ISOLATE(init); stress_deopt_count_ = FLAG_deopt_every_n_times; has_fatal_error_ = false; - use_crankshaft_ = FLAG_crankshaft - && !Serializer::enabled() - && CPU::SupportsCrankshaft(); - if (function_entry_hook() != NULL) { // When function entry hooking is in effect, we have to create the code // stubs from scratch to get entry hooks, rather than loading the previously // generated stubs from disk. // If this assert fires, the initialization path has regressed. - ASSERT(des == NULL); + DCHECK(des == NULL); } // The initialization process does not handle memory exhaustion. DisallowAllocationFailure disallow_allocation_failure(this); - InitializeLoggingAndCounters(); - - InitializeDebugger(); - memory_allocator_ = new MemoryAllocator(this); code_range_ = new CodeRange(this); @@ -1939,11 +1840,14 @@ // Initialize other runtime facilities #if defined(USE_SIMULATOR) -#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS +#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \ + V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 Simulator::Initialize(this); #endif #endif + code_aging_helper_ = new CodeAgingHelper(); + { // NOLINT // Ensure that the thread has a valid stack guard. The v8::Locker object // will ensure this too, but we don't have to use lockers if we are only @@ -1953,7 +1857,7 @@ } // SetUp the object heap. - ASSERT(!heap_.HasBeenSetUp()); + DCHECK(!heap_.HasBeenSetUp()); if (!heap_.SetUp()) { V8::FatalProcessOutOfMemory("heap setup"); return false; @@ -1978,9 +1882,9 @@ builtins_.SetUp(this, create_heap_objects); if (FLAG_log_internal_timer_events) { - set_event_logger(Logger::LogInternalEvents); + set_event_logger(Logger::DefaultTimerEventsLogger); } else { - set_event_logger(Logger::EmptyLogInternalEvents); + set_event_logger(Logger::EmptyTimerEventsLogger); } // Set default value if not yet set. @@ -1988,7 +1892,8 @@ // once ResourceConstraints becomes an argument to the Isolate constructor. if (max_available_threads_ < 1) { // Choose the default between 1 and 4. - max_available_threads_ = Max(Min(CPU::NumberOfProcessorsOnline(), 4), 1); + max_available_threads_ = + Max(Min(base::OS::NumberOfProcessorsOnline(), 4), 1); } if (!FLAG_job_based_sweeping) { @@ -2011,10 +1916,6 @@ } } -#ifdef ENABLE_DEBUGGER_SUPPORT - debug_->SetUp(create_heap_objects); -#endif - // If we are deserializing, read the state into the now-empty heap. if (!create_heap_objects) { des->Deserialize(this); @@ -2048,19 +1949,20 @@ LOG(this, LogCompiledFunctions()); } - // If we are profiling with the Linux perf tool, we need to disable - // code relocation. - if (FLAG_perf_jit_prof || FLAG_perf_basic_prof) { - FLAG_compact_code_space = false; - } - CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)), Internals::kIsolateEmbedderDataOffset); CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)), Internals::kIsolateRootsOffset); + CHECK_EQ(static_cast<int>( + OFFSET_OF(Isolate, heap_.amount_of_external_allocated_memory_)), + Internals::kAmountOfExternalAllocatedMemoryOffset); + CHECK_EQ(static_cast<int>(OFFSET_OF( + Isolate, + heap_.amount_of_external_allocated_memory_at_last_global_gc_)), + Internals::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset); state_ = INITIALIZED; - time_millis_at_init_ = OS::TimeCurrentMillis(); + time_millis_at_init_ = base::OS::TimeCurrentMillis(); if (!create_heap_objects) { // Now that the heap is consistent, it's OK to generate the code for the @@ -2073,7 +1975,7 @@ kDeoptTableSerializeEntryCount - 1); } - if (!Serializer::enabled()) { + if (!serializer_enabled()) { // Ensure that all stubs which need to be generated ahead of time, but // cannot be serialized into the snapshot have been generated. HandleScope scope(this); @@ -2095,6 +1997,8 @@ NumberToStringStub::InstallDescriptors(this); StringAddStub::InstallDescriptors(this); RegExpConstructResultStub::InstallDescriptors(this); + KeyedLoadGenericStub::InstallDescriptors(this); + StoreFieldStub::InstallDescriptors(this); } CallDescriptors::InitializeForIsolate(this); @@ -2120,11 +2024,11 @@ PerIsolateThreadData* current_data = CurrentPerIsolateThreadData(); if (current_data != NULL) { current_isolate = current_data->isolate_; - ASSERT(current_isolate != NULL); + DCHECK(current_isolate != NULL); if (current_isolate == this) { - ASSERT(Current() == this); - ASSERT(entry_stack_ != NULL); - ASSERT(entry_stack_->previous_thread_data == NULL || + DCHECK(Current() == this); + DCHECK(entry_stack_ != NULL); + DCHECK(entry_stack_->previous_thread_data == NULL || entry_stack_->previous_thread_data->thread_id().Equals( ThreadId::Current())); // Same thread re-enters the isolate, no need to re-init anything. @@ -2133,19 +2037,9 @@ } } - // Threads can have default isolate set into TLS as Current but not yet have - // PerIsolateThreadData for it, as it requires more advanced phase of the - // initialization. For example, a thread might be the one that system used for - // static initializers - in this case the default isolate is set in TLS but - // the thread did not yet Enter the isolate. If PerisolateThreadData is not - // there, use the isolate set in TLS. - if (current_isolate == NULL) { - current_isolate = Isolate::UncheckedCurrent(); - } - PerIsolateThreadData* data = FindOrAllocatePerThreadDataForThisThread(); - ASSERT(data != NULL); - ASSERT(data->isolate_ == this); + DCHECK(data != NULL); + DCHECK(data->isolate_ == this); EntryStackItem* item = new EntryStackItem(current_data, current_isolate, @@ -2160,15 +2054,15 @@ void Isolate::Exit() { - ASSERT(entry_stack_ != NULL); - ASSERT(entry_stack_->previous_thread_data == NULL || + DCHECK(entry_stack_ != NULL); + DCHECK(entry_stack_->previous_thread_data == NULL || entry_stack_->previous_thread_data->thread_id().Equals( ThreadId::Current())); if (--entry_stack_->entry_count > 0) return; - ASSERT(CurrentPerIsolateThreadData() != NULL); - ASSERT(CurrentPerIsolateThreadData()->isolate_ == this); + DCHECK(CurrentPerIsolateThreadData() != NULL); + DCHECK(CurrentPerIsolateThreadData()->isolate_ == this); // Pop the stack. EntryStackItem* item = entry_stack_; @@ -2200,7 +2094,7 @@ while (deferred_iterator->previous_ != NULL) { deferred_iterator = deferred_iterator->previous_; } - ASSERT(deferred_handles_head_ == deferred_iterator); + DCHECK(deferred_handles_head_ == deferred_iterator); #endif if (deferred_handles_head_ == deferred) { deferred_handles_head_ = deferred_handles_head_->next_; @@ -2220,6 +2114,12 @@ } +HStatistics* Isolate::GetTStatistics() { + if (tstatistics() == NULL) set_tstatistics(new HStatistics()); + return tstatistics(); +} + + HTracer* Isolate::GetHTracer() { if (htracer() == NULL) set_htracer(new HTracer(id())); return htracer(); @@ -2246,10 +2146,17 @@ } +bool Isolate::use_crankshaft() const { + return FLAG_crankshaft && + !serializer_enabled_ && + CpuFeatures::SupportsCrankshaft(); +} + + bool Isolate::IsFastArrayConstructorPrototypeChainIntact() { Map* root_array_map = get_initial_js_array_map(GetInitialFastElementsKind()); - ASSERT(root_array_map != NULL); + DCHECK(root_array_map != NULL); JSObject* initial_array_proto = JSObject::cast(*initial_array_prototype()); // Check that the array prototype hasn't been altered WRT empty elements. @@ -2260,13 +2167,16 @@ // Check that the object prototype hasn't been altered WRT empty elements. JSObject* initial_object_proto = JSObject::cast(*initial_object_prototype()); - Object* root_array_map_proto = initial_array_proto->GetPrototype(); - if (root_array_map_proto != initial_object_proto) return false; + PrototypeIterator iter(this, initial_array_proto); + if (iter.IsAtEnd() || iter.GetCurrent() != initial_object_proto) { + return false; + } if (initial_object_proto->elements() != heap()->empty_fixed_array()) { return false; } - return initial_object_proto->GetPrototype()->IsNull(); + iter.Advance(); + return iter.IsAtEnd(); } @@ -2278,7 +2188,7 @@ CallInterfaceDescriptor* Isolate::call_descriptor(CallDescriptorKey index) { - ASSERT(0 <= index && index < NUMBER_OF_CALL_DESCRIPTORS); + DCHECK(0 <= index && index < NUMBER_OF_CALL_DESCRIPTORS); return &call_descriptors_[index]; } @@ -2310,11 +2220,152 @@ Handle<String> name = factory()->InternalizeUtf8String(nested[i]); Handle<JSObject> obj = factory()->NewJSObjectFromMap(map); JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8); - JSObject::SetProperty(registry, name, obj, NONE, STRICT); + JSObject::SetProperty(registry, name, obj, STRICT).Assert(); } } return Handle<JSObject>::cast(factory()->symbol_registry()); } +void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) { + for (int i = 0; i < call_completed_callbacks_.length(); i++) { + if (callback == call_completed_callbacks_.at(i)) return; + } + call_completed_callbacks_.Add(callback); +} + + +void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) { + for (int i = 0; i < call_completed_callbacks_.length(); i++) { + if (callback == call_completed_callbacks_.at(i)) { + call_completed_callbacks_.Remove(i); + } + } +} + + +void Isolate::FireCallCompletedCallback() { + bool has_call_completed_callbacks = !call_completed_callbacks_.is_empty(); + bool run_microtasks = autorun_microtasks() && pending_microtask_count(); + if (!has_call_completed_callbacks && !run_microtasks) return; + + if (!handle_scope_implementer()->CallDepthIsZero()) return; + if (run_microtasks) RunMicrotasks(); + // Fire callbacks. Increase call depth to prevent recursive callbacks. + v8::Isolate::SuppressMicrotaskExecutionScope suppress( + reinterpret_cast<v8::Isolate*>(this)); + for (int i = 0; i < call_completed_callbacks_.length(); i++) { + call_completed_callbacks_.at(i)(); + } +} + + +void Isolate::EnqueueMicrotask(Handle<Object> microtask) { + DCHECK(microtask->IsJSFunction() || microtask->IsCallHandlerInfo()); + Handle<FixedArray> queue(heap()->microtask_queue(), this); + int num_tasks = pending_microtask_count(); + DCHECK(num_tasks <= queue->length()); + if (num_tasks == 0) { + queue = factory()->NewFixedArray(8); + heap()->set_microtask_queue(*queue); + } else if (num_tasks == queue->length()) { + queue = FixedArray::CopySize(queue, num_tasks * 2); + heap()->set_microtask_queue(*queue); + } + DCHECK(queue->get(num_tasks)->IsUndefined()); + queue->set(num_tasks, *microtask); + set_pending_microtask_count(num_tasks + 1); +} + + +void Isolate::RunMicrotasks() { + // %RunMicrotasks may be called in mjsunit tests, which violates + // this assertion, hence the check for --allow-natives-syntax. + // TODO(adamk): However, this also fails some layout tests. + // + // DCHECK(FLAG_allow_natives_syntax || + // handle_scope_implementer()->CallDepthIsZero()); + + // Increase call depth to prevent recursive callbacks. + v8::Isolate::SuppressMicrotaskExecutionScope suppress( + reinterpret_cast<v8::Isolate*>(this)); + + while (pending_microtask_count() > 0) { + HandleScope scope(this); + int num_tasks = pending_microtask_count(); + Handle<FixedArray> queue(heap()->microtask_queue(), this); + DCHECK(num_tasks <= queue->length()); + set_pending_microtask_count(0); + heap()->set_microtask_queue(heap()->empty_fixed_array()); + + for (int i = 0; i < num_tasks; i++) { + HandleScope scope(this); + Handle<Object> microtask(queue->get(i), this); + if (microtask->IsJSFunction()) { + Handle<JSFunction> microtask_function = + Handle<JSFunction>::cast(microtask); + SaveContext save(this); + set_context(microtask_function->context()->native_context()); + Handle<Object> exception; + MaybeHandle<Object> result = Execution::TryCall( + microtask_function, factory()->undefined_value(), + 0, NULL, &exception); + // If execution is terminating, just bail out. + if (result.is_null() && + !exception.is_null() && + *exception == heap()->termination_exception()) { + // Clear out any remaining callbacks in the queue. + heap()->set_microtask_queue(heap()->empty_fixed_array()); + set_pending_microtask_count(0); + return; + } + } else { + Handle<CallHandlerInfo> callback_info = + Handle<CallHandlerInfo>::cast(microtask); + v8::MicrotaskCallback callback = + v8::ToCData<v8::MicrotaskCallback>(callback_info->callback()); + void* data = v8::ToCData<void*>(callback_info->data()); + callback(data); + } + } + } +} + + +void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) { + DCHECK(!use_counter_callback_); + use_counter_callback_ = callback; +} + + +void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) { + if (use_counter_callback_) { + use_counter_callback_(reinterpret_cast<v8::Isolate*>(this), feature); + } +} + + +bool StackLimitCheck::JsHasOverflowed() const { + StackGuard* stack_guard = isolate_->stack_guard(); +#ifdef USE_SIMULATOR + // The simulator uses a separate JS stack. + Address jssp_address = Simulator::current(isolate_)->get_sp(); + uintptr_t jssp = reinterpret_cast<uintptr_t>(jssp_address); + if (jssp < stack_guard->real_jslimit()) return true; +#endif // USE_SIMULATOR + return GetCurrentStackPosition() < stack_guard->real_climit(); +} + + +bool PostponeInterruptsScope::Intercept(StackGuard::InterruptFlag flag) { + // First check whether the previous scope intercepts. + if (prev_ && prev_->Intercept(flag)) return true; + // Then check whether this scope intercepts. + if ((flag & intercept_mask_)) { + intercepted_flags_ |= flag; + return true; + } + return false; +} + } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/isolate.h nodejs-0.11.15/deps/v8/src/isolate.h --- nodejs-0.11.13/deps/v8/src/isolate.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/isolate.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,60 +1,42 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ISOLATE_H_ #define V8_ISOLATE_H_ -#include "../include/v8-debug.h" -#include "allocation.h" -#include "assert-scope.h" -#include "atomicops.h" -#include "builtins.h" -#include "contexts.h" -#include "execution.h" -#include "frames.h" -#include "date.h" -#include "global-handles.h" -#include "handles.h" -#include "hashmap.h" -#include "heap.h" -#include "optimizing-compiler-thread.h" -#include "regexp-stack.h" -#include "runtime-profiler.h" -#include "runtime.h" -#include "zone.h" +#include "include/v8-debug.h" +#include "src/allocation.h" +#include "src/assert-scope.h" +#include "src/base/atomicops.h" +#include "src/builtins.h" +#include "src/contexts.h" +#include "src/date.h" +#include "src/execution.h" +#include "src/frames.h" +#include "src/global-handles.h" +#include "src/handles.h" +#include "src/hashmap.h" +#include "src/heap/heap.h" +#include "src/optimizing-compiler-thread.h" +#include "src/regexp-stack.h" +#include "src/runtime.h" +#include "src/runtime-profiler.h" +#include "src/zone.h" namespace v8 { + +namespace base { +class RandomNumberGenerator; +} + namespace internal { class Bootstrapper; -struct CallInterfaceDescriptor; +class CallInterfaceDescriptor; class CodeGenerator; class CodeRange; -struct CodeStubInterfaceDescriptor; +class CodeStubInterfaceDescriptor; class CodeTracer; class CompilationCache; class ConsStringIteratorOp; @@ -76,8 +58,7 @@ class InlineRuntimeFunctionsTable; class InnerPointerToCodeCache; class MaterializedObjectStore; -class NoAllocationStringAllocator; -class RandomNumberGenerator; +class CodeAgingHelper; class RegExpStack; class SaveContext; class StringTracker; @@ -95,15 +76,13 @@ typedef void* ExternalReferenceRedirectorPointer(); -#ifdef ENABLE_DEBUGGER_SUPPORT class Debug; class Debugger; -class DebuggerAgent; -#endif #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \ - !defined(__mips__) && V8_TARGET_ARCH_MIPS + !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ + !defined(__mips__) && V8_TARGET_ARCH_MIPS64 class Redirection; class Simulator; #endif @@ -117,7 +96,7 @@ // of handles to the actual constants. typedef ZoneList<Handle<Object> > ZoneObjectList; -#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \ +#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \ do { \ Isolate* __isolate__ = (isolate); \ if (__isolate__->has_scheduled_exception()) { \ @@ -125,31 +104,49 @@ } \ } while (false) -#define RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, T) \ - do { \ - Isolate* __isolate__ = (isolate); \ - if (__isolate__->has_scheduled_exception()) { \ - __isolate__->PromoteScheduledException(); \ - return Handle<T>::null(); \ - } \ +// Macros for MaybeHandle. + +#define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \ + do { \ + Isolate* __isolate__ = (isolate); \ + if (__isolate__->has_scheduled_exception()) { \ + __isolate__->PromoteScheduledException(); \ + return value; \ + } \ } while (false) -#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \ - do { \ - if ((call).is_null()) { \ - ASSERT((isolate)->has_pending_exception()); \ - return (value); \ - } \ +#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \ + RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>()) + +#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \ + do { \ + if (!(call).ToHandle(&dst)) { \ + DCHECK((isolate)->has_pending_exception()); \ + return value; \ + } \ } while (false) -#define CHECK_NOT_EMPTY_HANDLE(isolate, call) \ - do { \ - ASSERT(!(isolate)->has_pending_exception()); \ - CHECK(!(call).is_null()); \ +#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \ + ASSIGN_RETURN_ON_EXCEPTION_VALUE( \ + isolate, dst, call, isolate->heap()->exception()) + +#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \ + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>()) + +#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \ + do { \ + if ((call).is_null()) { \ + DCHECK((isolate)->has_pending_exception()); \ + return value; \ + } \ } while (false) -#define RETURN_IF_EMPTY_HANDLE(isolate, call) \ - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception()) +#define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \ + RETURN_ON_EXCEPTION_VALUE(isolate, call, isolate->heap()->exception()) + +#define RETURN_ON_EXCEPTION(isolate, call, T) \ + RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>()) + #define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \ C(Handler, handler) \ @@ -201,7 +198,7 @@ int id_; - static Atomic32 highest_thread_id_; + static base::Atomic32 highest_thread_id_; friend class Isolate; }; @@ -223,10 +220,10 @@ // Get the top C++ try catch handler or NULL if none are registered. // - // This method is not guarenteed to return an address that can be + // This method is not guaranteed to return an address that can be // used for comparison with addresses into the JS stack. If such an // address is needed, use try_catch_handler_address. - v8::TryCatch* TryCatchHandler(); + FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler) // Get the address of the top C++ try catch handler or NULL if // none are registered. @@ -238,12 +235,15 @@ // stack, try_catch_handler_address returns a JS stack address that // corresponds to the place on the JS stack where the C++ handler // would have been if the stack were not separate. - FIELD_ACCESSOR(Address, try_catch_handler_address) + Address try_catch_handler_address() { + return reinterpret_cast<Address>( + v8::TryCatch::JSStackComparableAddress(try_catch_handler())); + } void Free() { - ASSERT(!has_pending_message_); - ASSERT(!external_caught_exception_); - ASSERT(try_catch_handler_address_ == NULL); + DCHECK(!has_pending_message_); + DCHECK(!external_caught_exception_); + DCHECK(try_catch_handler_ == NULL); } Isolate* isolate_; @@ -251,7 +251,7 @@ // lookups. Context* context_; ThreadId thread_id_; - MaybeObject* pending_exception_; + Object* pending_exception_; bool has_pending_message_; bool rethrowing_message_; Object* pending_message_obj_; @@ -261,7 +261,7 @@ // Use a separate value for scheduled exceptions to preserve the // invariants that hold about pending_exception. We may want to // unify them later. - MaybeObject* scheduled_exception_; + Object* scheduled_exception_; bool external_caught_exception_; SaveContext* save_context_; v8::TryCatch* catcher_; @@ -291,24 +291,14 @@ private: void InitializeInternal(); - Address try_catch_handler_address_; + v8::TryCatch* try_catch_handler_; }; -#ifdef ENABLE_DEBUGGER_SUPPORT - -#define ISOLATE_DEBUGGER_INIT_LIST(V) \ - V(DebuggerAgent*, debugger_agent_instance, NULL) -#else - -#define ISOLATE_DEBUGGER_INIT_LIST(V) - -#endif - - #if V8_TARGET_ARCH_ARM && !defined(__arm__) || \ V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \ - V8_TARGET_ARCH_MIPS && !defined(__mips__) + V8_TARGET_ARCH_MIPS && !defined(__mips__) || \ + V8_TARGET_ARCH_MIPS64 && !defined(__mips__) #define ISOLATE_INIT_SIMULATOR_LIST(V) \ V(bool, simulator_initialized, false) \ @@ -350,8 +340,6 @@ V(int, serialize_partial_snapshot_cache_capacity, 0) \ V(Object**, serialize_partial_snapshot_cache, NULL) \ /* Assembler state. */ \ - /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \ - V(byte*, assembler_spare_buffer, NULL) \ V(FatalErrorCallback, exception_behavior, NULL) \ V(LogEventCallback, event_logger, NULL) \ V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \ @@ -370,16 +358,18 @@ /* AstNode state. */ \ V(int, ast_node_id, 0) \ V(unsigned, ast_node_count, 0) \ - V(bool, microtask_pending, false) \ + V(int, pending_microtask_count, 0) \ V(bool, autorun_microtasks, true) \ V(HStatistics*, hstatistics, NULL) \ + V(HStatistics*, tstatistics, NULL) \ V(HTracer*, htracer, NULL) \ V(CodeTracer*, code_tracer, NULL) \ V(bool, fp_stubs_generated, false) \ V(int, max_available_threads, 0) \ V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \ - ISOLATE_INIT_SIMULATOR_LIST(V) \ - ISOLATE_DEBUGGER_INIT_LIST(V) + V(InterruptCallback, api_interrupt_callback, NULL) \ + V(void*, api_interrupt_callback_data, NULL) \ + ISOLATE_INIT_SIMULATOR_LIST(V) #define THREAD_LOCAL_TOP_ACCESSOR(type, name) \ inline void set_##name(type v) { thread_local_top_.name##_ = v; } \ @@ -406,7 +396,8 @@ thread_state_(NULL), #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \ - !defined(__mips__) && V8_TARGET_ARCH_MIPS + !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ + !defined(__mips__) && V8_TARGET_ARCH_MIPS64 simulator_(NULL), #endif next_(NULL), @@ -420,7 +411,8 @@ #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \ - !defined(__mips__) && V8_TARGET_ARCH_MIPS + !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ + !defined(__mips__) && V8_TARGET_ARCH_MIPS64 FIELD_ACCESSOR(Simulator*, simulator) #endif @@ -436,7 +428,8 @@ #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \ - !defined(__mips__) && V8_TARGET_ARCH_MIPS + !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ + !defined(__mips__) && V8_TARGET_ARCH_MIPS64 Simulator* simulator_; #endif @@ -461,20 +454,31 @@ // Returns the PerIsolateThreadData for the current thread (or NULL if one is // not currently set). static PerIsolateThreadData* CurrentPerIsolateThreadData() { + EnsureInitialized(); return reinterpret_cast<PerIsolateThreadData*>( - Thread::GetThreadLocal(per_isolate_thread_data_key_)); + base::Thread::GetThreadLocal(per_isolate_thread_data_key_)); } // Returns the isolate inside which the current thread is running. INLINE(static Isolate* Current()) { + EnsureInitialized(); Isolate* isolate = reinterpret_cast<Isolate*>( - Thread::GetExistingThreadLocal(isolate_key_)); - ASSERT(isolate != NULL); + base::Thread::GetExistingThreadLocal(isolate_key_)); + DCHECK(isolate != NULL); return isolate; } INLINE(static Isolate* UncheckedCurrent()) { - return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_)); + EnsureInitialized(); + return reinterpret_cast<Isolate*>( + base::Thread::GetThreadLocal(isolate_key_)); + } + + // Like UncheckedCurrent, but skips the check that |isolate_key_| was + // initialized. Callers have to ensure that themselves. + INLINE(static Isolate* UnsafeCurrent()) { + return reinterpret_cast<Isolate*>( + base::Thread::GetThreadLocal(isolate_key_)); } // Usually called by Init(), but can be called early e.g. to allow @@ -498,15 +502,6 @@ static void GlobalTearDown(); - bool IsDefaultIsolate() const { return this == default_isolate_; } - - static void SetCrashIfDefaultIsolateInitialized(); - // Ensures that process-wide resources and the default isolate have been - // allocated. It is only necessary to call this method in rare cases, for - // example if you are using V8 from within the body of a static initializer. - // Safe to call multiple times. - static void EnsureDefaultIsolate(); - // Find the PerThread for this particular (isolate, thread) combination // If one does not yet exist, return null. PerIsolateThreadData* FindPerThreadDataForThisThread(); @@ -515,48 +510,31 @@ // If one does not yet exist, return null. PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id); -#ifdef ENABLE_DEBUGGER_SUPPORT - // Get the debugger from the default isolate. Preinitializes the - // default isolate if needed. - static Debugger* GetDefaultIsolateDebugger(); -#endif - - // Get the stack guard from the default isolate. Preinitializes the - // default isolate if needed. - static StackGuard* GetDefaultIsolateStackGuard(); - // Returns the key used to store the pointer to the current isolate. // Used internally for V8 threads that do not execute JavaScript but still // are part of the domain of an isolate (like the context switcher). - static Thread::LocalStorageKey isolate_key() { + static base::Thread::LocalStorageKey isolate_key() { + EnsureInitialized(); return isolate_key_; } // Returns the key used to store process-wide thread IDs. - static Thread::LocalStorageKey thread_id_key() { + static base::Thread::LocalStorageKey thread_id_key() { + EnsureInitialized(); return thread_id_key_; } - static Thread::LocalStorageKey per_isolate_thread_data_key(); - - // If a client attempts to create a Locker without specifying an isolate, - // we assume that the client is using legacy behavior. Set up the current - // thread to be inside the implicit isolate (or fail a check if we have - // switched to non-legacy behavior). - static void EnterDefaultIsolate(); + static base::Thread::LocalStorageKey per_isolate_thread_data_key(); // Mutex for serializing access to break control structures. - RecursiveMutex* break_access() { return &break_access_; } - - // Mutex for serializing access to debugger. - RecursiveMutex* debugger_access() { return &debugger_access_; } + base::RecursiveMutex* break_access() { return &break_access_; } Address get_address_from_id(AddressId id); // Access to top context (where the current function object was created). Context* context() { return thread_local_top_.context_; } void set_context(Context* context) { - ASSERT(context == NULL || context->IsContext()); + DCHECK(context == NULL || context->IsContext()); thread_local_top_.context_ = context; } Context** context_address() { return &thread_local_top_.context_; } @@ -567,24 +545,28 @@ THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id) // Interface to pending exception. - MaybeObject* pending_exception() { - ASSERT(has_pending_exception()); + Object* pending_exception() { + DCHECK(has_pending_exception()); + DCHECK(!thread_local_top_.pending_exception_->IsException()); return thread_local_top_.pending_exception_; } - void set_pending_exception(MaybeObject* exception) { - thread_local_top_.pending_exception_ = exception; + void set_pending_exception(Object* exception_obj) { + DCHECK(!exception_obj->IsException()); + thread_local_top_.pending_exception_ = exception_obj; } void clear_pending_exception() { + DCHECK(!thread_local_top_.pending_exception_->IsException()); thread_local_top_.pending_exception_ = heap_.the_hole_value(); } - MaybeObject** pending_exception_address() { + Object** pending_exception_address() { return &thread_local_top_.pending_exception_; } bool has_pending_exception() { + DCHECK(!thread_local_top_.pending_exception_->IsException()); return !thread_local_top_.pending_exception_->IsTheHole(); } @@ -596,7 +578,7 @@ thread_local_top_.pending_message_script_ = heap_.the_hole_value(); } v8::TryCatch* try_catch_handler() { - return thread_local_top_.TryCatchHandler(); + return thread_local_top_.try_catch_handler(); } Address try_catch_handler_address() { return thread_local_top_.try_catch_handler_address(); @@ -607,7 +589,7 @@ THREAD_LOCAL_TOP_ACCESSOR(v8::TryCatch*, catcher) - MaybeObject** scheduled_exception_address() { + Object** scheduled_exception_address() { return &thread_local_top_.scheduled_exception_; } @@ -624,20 +606,24 @@ &thread_local_top_.pending_message_script_); } - MaybeObject* scheduled_exception() { - ASSERT(has_scheduled_exception()); + Object* scheduled_exception() { + DCHECK(has_scheduled_exception()); + DCHECK(!thread_local_top_.scheduled_exception_->IsException()); return thread_local_top_.scheduled_exception_; } bool has_scheduled_exception() { + DCHECK(!thread_local_top_.scheduled_exception_->IsException()); return thread_local_top_.scheduled_exception_ != heap_.the_hole_value(); } void clear_scheduled_exception() { + DCHECK(!thread_local_top_.scheduled_exception_->IsException()); thread_local_top_.scheduled_exception_ = heap_.the_hole_value(); } - bool IsExternallyCaught(); + bool HasExternalTryCatch(); + bool IsFinallyOnTop(); - bool is_catchable_by_javascript(MaybeObject* exception) { + bool is_catchable_by_javascript(Object* exception) { return exception != heap()->termination_exception(); } @@ -673,7 +659,7 @@ } // Returns the global proxy object of the current context. - Object* global_proxy() { + JSObject* global_proxy() { return context()->global_proxy(); } @@ -693,11 +679,10 @@ class ExceptionScope { public: explicit ExceptionScope(Isolate* isolate) : - // Scope currently can only be used for regular exceptions, not - // failures like OOM or termination exception. + // Scope currently can only be used for regular exceptions, + // not termination exception. isolate_(isolate), - pending_exception_(isolate_->pending_exception()->ToObjectUnchecked(), - isolate_), + pending_exception_(isolate_->pending_exception(), isolate_), catcher_(isolate_->catcher()) { } @@ -728,61 +713,55 @@ Handle<JSArray> CaptureCurrentStackTrace( int frame_limit, StackTrace::StackTraceOptions options); - - Handle<JSArray> CaptureSimpleStackTrace(Handle<JSObject> error_object, - Handle<Object> caller, - int limit); + Handle<Object> CaptureSimpleStackTrace(Handle<JSObject> error_object, + Handle<Object> caller); void CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object); + void CaptureAndSetSimpleStackTrace(Handle<JSObject> error_object, + Handle<Object> caller); // Returns if the top context may access the given global object. If // the result is false, the pending exception is guaranteed to be // set. - // TODO(yangguo): temporary wrappers - bool MayNamedAccessWrapper(Handle<JSObject> receiver, - Handle<Object> key, - v8::AccessType type) { - return MayNamedAccess(*receiver, *key, type); - } - bool MayIndexedAccessWrapper(Handle<JSObject> receiver, - uint32_t index, - v8::AccessType type) { - return MayIndexedAccess(*receiver, index, type); - } - void ReportFailedAccessCheckWrapper(Handle<JSObject> receiver, - v8::AccessType type) { - ReportFailedAccessCheck(*receiver, type); - } - - bool MayNamedAccess(JSObject* receiver, - Object* key, + bool MayNamedAccess(Handle<JSObject> receiver, + Handle<Object> key, v8::AccessType type); - bool MayIndexedAccess(JSObject* receiver, + bool MayIndexedAccess(Handle<JSObject> receiver, uint32_t index, v8::AccessType type); void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback); - void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type); + void ReportFailedAccessCheck(Handle<JSObject> receiver, v8::AccessType type); // Exception throwing support. The caller should use the result // of Throw() as its return value. - Failure* Throw(Object* exception, MessageLocation* location = NULL); + Object* Throw(Object* exception, MessageLocation* location = NULL); + + template <typename T> + MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception, + MessageLocation* location = NULL) { + Throw(*exception, location); + return MaybeHandle<T>(); + } + // Re-throw an exception. This involves no error reporting since // error reporting was handled when the exception was thrown // originally. - Failure* ReThrow(MaybeObject* exception); + Object* ReThrow(Object* exception); void ScheduleThrow(Object* exception); // Re-set pending message, script and positions reported to the TryCatch // back to the TLS for re-use when rethrowing. void RestorePendingMessageFromTryCatch(v8::TryCatch* handler); + // Un-schedule an exception that was caught by a TryCatch handler. + void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler); void ReportPendingMessages(); // Return pending location if any or unfilled structure. MessageLocation GetMessageLocation(); - Failure* ThrowIllegalOperation(); - Failure* ThrowInvalidStringLength(); + Object* ThrowIllegalOperation(); + Object* ThrowInvalidStringLength(); // Promote a scheduled exception to pending. Asserts has_scheduled_exception. - Failure* PromoteScheduledException(); + Object* PromoteScheduledException(); void DoThrow(Object* exception, MessageLocation* location); // Checks if exception should be reported and finds out if it's // caught externally. @@ -794,10 +773,12 @@ void ComputeLocation(MessageLocation* target); // Out of resource exception helpers. - Failure* StackOverflow(); - Failure* TerminateExecution(); + Object* StackOverflow(); + Object* TerminateExecution(); void CancelTerminateExecution(); + void InvokeApiInterruptCallback(); + // Administration void Iterate(ObjectVisitor* v); void Iterate(ObjectVisitor* v, ThreadLocalTop* t); @@ -827,11 +808,11 @@ // Accessors. #define GLOBAL_ACCESSOR(type, name, initialvalue) \ inline type name() const { \ - ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \ + DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \ return name##_; \ } \ inline void set_##name(type value) { \ - ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \ + DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \ name##_ = value; \ } ISOLATE_INIT_LIST(GLOBAL_ACCESSOR) @@ -839,7 +820,7 @@ #define GLOBAL_ARRAY_ACCESSOR(type, name, length) \ inline type* name() { \ - ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \ + DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \ return &(name##_)[0]; \ } ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR) @@ -847,10 +828,10 @@ #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \ Handle<type> name() { \ - return Handle<type>(context()->native_context()->name(), this); \ + return Handle<type>(native_context()->name(), this); \ } \ bool is_##name(type* value) { \ - return context()->native_context()->is_##name(value); \ + return native_context()->is_##name(value); \ } NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR) #undef NATIVE_CONTEXT_FIELD_ACCESSOR @@ -859,7 +840,7 @@ Counters* counters() { // Call InitializeLoggingAndCounters() if logging is needed before // the isolate is fully initialized. - ASSERT(counters_ != NULL); + DCHECK(counters_ != NULL); return counters_; } CodeRange* code_range() { return code_range_; } @@ -868,13 +849,14 @@ Logger* logger() { // Call InitializeLoggingAndCounters() if logging is needed before // the isolate is fully initialized. - ASSERT(logger_ != NULL); + DCHECK(logger_ != NULL); return logger_; } StackGuard* stack_guard() { return &stack_guard_; } Heap* heap() { return &heap_; } StatsTable* stats_table(); StubCache* stub_cache() { return stub_cache_; } + CodeAgingHelper* code_aging_helper() { return code_aging_helper_; } DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; } ThreadLocalTop* thread_local_top() { return &thread_local_top_; } MaterializedObjectStore* materialized_object_store() { @@ -900,7 +882,7 @@ HandleScopeData* handle_scope_data() { return &handle_scope_data_; } HandleScopeImplementer* handle_scope_implementer() { - ASSERT(handle_scope_implementer_); + DCHECK(handle_scope_implementer_); return handle_scope_implementer_; } Zone* runtime_zone() { return &runtime_zone_; } @@ -965,20 +947,8 @@ return &interp_canonicalize_mapping_; } - inline bool IsCodePreAgingActive(); - -#ifdef ENABLE_DEBUGGER_SUPPORT - Debugger* debugger() { - if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger(); - return debugger_; - } - Debug* debug() { - if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger(); - return debug_; - } -#endif + Debug* debug() { return debug_; } - inline bool IsDebuggerActive(); inline bool DebuggerHasBreakPoints(); CpuProfiler* cpu_profiler() const { return cpu_profiler_; } @@ -1001,25 +971,33 @@ THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state) void SetData(uint32_t slot, void* data) { - ASSERT(slot < Internals::kNumIsolateDataSlots); + DCHECK(slot < Internals::kNumIsolateDataSlots); embedder_data_[slot] = data; } void* GetData(uint32_t slot) { - ASSERT(slot < Internals::kNumIsolateDataSlots); + DCHECK(slot < Internals::kNumIsolateDataSlots); return embedder_data_[slot]; } THREAD_LOCAL_TOP_ACCESSOR(LookupResult*, top_lookup_result) + void enable_serializer() { + // The serializer can only be enabled before the isolate init. + DCHECK(state_ != INITIALIZED); + serializer_enabled_ = true; + } + + bool serializer_enabled() const { return serializer_enabled_; } + bool IsDead() { return has_fatal_error_; } void SignalFatalError() { has_fatal_error_ = true; } - bool use_crankshaft() const { return use_crankshaft_; } + bool use_crankshaft() const; bool initialized_from_snapshot() { return initialized_from_snapshot_; } double time_millis_since_init() { - return OS::TimeCurrentMillis() - time_millis_at_init_; + return base::OS::TimeCurrentMillis() - time_millis_at_init_; } DateCache* date_cache() { @@ -1061,14 +1039,14 @@ bool concurrent_recompilation_enabled() { // Thread is only available with flag enabled. - ASSERT(optimizing_compiler_thread_ == NULL || + DCHECK(optimizing_compiler_thread_ == NULL || FLAG_concurrent_recompilation); return optimizing_compiler_thread_ != NULL; } bool concurrent_osr_enabled() const { // Thread is only available with flag enabled. - ASSERT(optimizing_compiler_thread_ == NULL || + DCHECK(optimizing_compiler_thread_ == NULL || FLAG_concurrent_recompilation); return optimizing_compiler_thread_ != NULL && FLAG_concurrent_osr; } @@ -1085,14 +1063,10 @@ return sweeper_thread_; } - // PreInits and returns a default isolate. Needed when a new thread tries - // to create a Locker for the first time (the lock itself is in the isolate). - // TODO(svenpanne) This method is on death row... - static v8::Isolate* GetDefaultIsolateForLocking(); - int id() const { return static_cast<int>(id_); } HStatistics* GetHStatistics(); + HStatistics* GetTStatistics(); HTracer* GetHTracer(); CodeTracer* GetCodeTracer(); @@ -1103,7 +1077,7 @@ void* stress_deopt_count_address() { return &stress_deopt_count_; } - inline RandomNumberGenerator* random_number_generator(); + inline base::RandomNumberGenerator* random_number_generator(); // Given an address occupied by a live code object, return that object. Object* FindCodeObject(Address a); @@ -1119,7 +1093,19 @@ // Get (and lazily initialize) the registry for per-isolate symbols. Handle<JSObject> GetSymbolRegistry(); + void AddCallCompletedCallback(CallCompletedCallback callback); + void RemoveCallCompletedCallback(CallCompletedCallback callback); + void FireCallCompletedCallback(); + + void EnqueueMicrotask(Handle<Object> microtask); + void RunMicrotasks(); + + void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback); + void CountUsage(v8::Isolate::UseCounterFeature feature); + private: + static void EnsureInitialized(); + Isolate(); friend struct GlobalState; @@ -1178,18 +1164,16 @@ DISALLOW_COPY_AND_ASSIGN(EntryStackItem); }; - // This mutex protects highest_thread_id_, thread_data_table_ and - // default_isolate_. - static Mutex process_wide_mutex_; - - static Thread::LocalStorageKey per_isolate_thread_data_key_; - static Thread::LocalStorageKey isolate_key_; - static Thread::LocalStorageKey thread_id_key_; - static Isolate* default_isolate_; + // This mutex protects highest_thread_id_ and thread_data_table_. + static base::LazyMutex process_wide_mutex_; + + static base::Thread::LocalStorageKey per_isolate_thread_data_key_; + static base::Thread::LocalStorageKey isolate_key_; + static base::Thread::LocalStorageKey thread_id_key_; static ThreadDataTable* thread_data_table_; // A global counter for all generated Isolates, might overflow. - static Atomic32 isolate_counter_; + static base::Atomic32 isolate_counter_; void Deinit(); @@ -1220,15 +1204,16 @@ void FillCache(); - void PropagatePendingExceptionToExternalTryCatch(); - - void InitializeDebugger(); + // Propagate pending exception message to the v8::TryCatch. + // If there is no external try-catch or message was successfully propagated, + // then return true. + bool PropagatePendingExceptionToExternalTryCatch(); // Traverse prototype chain to find out whether the object is derived from // the Error object. bool IsErrorObject(Handle<Object> obj); - Atomic32 id_; + base::Atomic32 id_; EntryStackItem* entry_stack_; int stack_trace_nesting_level_; StringStream* incomplete_message_; @@ -1238,13 +1223,13 @@ CompilationCache* compilation_cache_; Counters* counters_; CodeRange* code_range_; - RecursiveMutex break_access_; - Atomic32 debugger_initialized_; - RecursiveMutex debugger_access_; + base::RecursiveMutex break_access_; + base::Atomic32 debugger_initialized_; Logger* logger_; StackGuard stack_guard_; StatsTable* stats_table_; StubCache* stub_cache_; + CodeAgingHelper* code_aging_helper_; DeoptimizerData* deoptimizer_data_; MaterializedObjectStore* materialized_object_store_; ThreadLocalTop thread_local_top_; @@ -1280,14 +1265,14 @@ unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_; CodeStubInterfaceDescriptor* code_stub_interface_descriptors_; CallInterfaceDescriptor* call_descriptors_; - RandomNumberGenerator* random_number_generator_; + base::RandomNumberGenerator* random_number_generator_; + + // Whether the isolate has been created for snapshotting. + bool serializer_enabled_; // True if fatal error has been signaled for this isolate. bool has_fatal_error_; - // True if we are using the Crankshaft optimizing compiler. - bool use_crankshaft_; - // True if this isolate was initialized from a snapshot. bool initialized_from_snapshot_; @@ -1300,10 +1285,7 @@ JSObject::SpillInformation js_spill_information_; #endif -#ifdef ENABLE_DEBUGGER_SUPPORT - Debugger* debugger_; Debug* debug_; -#endif CpuProfiler* cpu_profiler_; HeapProfiler* heap_profiler_; FunctionEntryHook function_entry_hook_; @@ -1339,6 +1321,12 @@ int next_optimization_id_; + // List of callbacks when a Call completes. + List<CallCompletedCallback> call_completed_callbacks_; + + v8::Isolate::UseCounterCallback use_counter_callback_; + + friend class ExecutionAccess; friend class HandleScopeImplementer; friend class IsolateInitializer; @@ -1397,7 +1385,7 @@ : isolate_(isolate), context_(isolate->context(), isolate) { } ~AssertNoContextChange() { - ASSERT(isolate_->context() == *context_); + DCHECK(isolate_->context() == *context_); } private: @@ -1429,15 +1417,20 @@ }; -// Support for checking for stack-overflows in C++ code. +// Support for checking for stack-overflows. class StackLimitCheck BASE_EMBEDDED { public: explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { } - bool HasOverflowed() const { + // Use this to check for stack-overflows in C++ code. + inline bool HasOverflowed() const { StackGuard* stack_guard = isolate_->stack_guard(); - return (reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit()); + return GetCurrentStackPosition() < stack_guard->real_climit(); } + + // Use this to check for stack-overflow when entering runtime from JS code. + bool JsHasOverflowed() const; + private: Isolate* isolate_; }; @@ -1449,22 +1442,29 @@ // account. class PostponeInterruptsScope BASE_EMBEDDED { public: - explicit PostponeInterruptsScope(Isolate* isolate) - : stack_guard_(isolate->stack_guard()), isolate_(isolate) { - ExecutionAccess access(isolate_); - stack_guard_->thread_local_.postpone_interrupts_nesting_++; - stack_guard_->DisableInterrupts(); + PostponeInterruptsScope(Isolate* isolate, + int intercept_mask = StackGuard::ALL_INTERRUPTS) + : stack_guard_(isolate->stack_guard()), + intercept_mask_(intercept_mask), + intercepted_flags_(0) { + stack_guard_->PushPostponeInterruptsScope(this); } ~PostponeInterruptsScope() { - ExecutionAccess access(isolate_); - if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) { - stack_guard_->EnableInterrupts(); - } + stack_guard_->PopPostponeInterruptsScope(); } + + // Find the bottom-most scope that intercepts this interrupt. + // Return whether the interrupt has been intercepted. + bool Intercept(StackGuard::InterruptFlag flag); + private: StackGuard* stack_guard_; - Isolate* isolate_; + int intercept_mask_; + int intercepted_flags_; + PostponeInterruptsScope* prev_; + + friend class StackGuard; }; @@ -1479,12 +1479,12 @@ } if (FLAG_redirect_code_traces_to == NULL) { - OS::SNPrintF(filename_, - "code-%d-%d.asm", - OS::GetCurrentProcessId(), - isolate_id); + SNPrintF(filename_, + "code-%d-%d.asm", + base::OS::GetCurrentProcessId(), + isolate_id); } else { - OS::StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length()); + StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length()); } WriteChars(filename_.start(), "", 0, false); @@ -1507,7 +1507,7 @@ } if (file_ == NULL) { - file_ = OS::FOpen(filename_.start(), "a"); + file_ = base::OS::FOpen(filename_.start(), "a"); } scope_depth_++; diff -Nru nodejs-0.11.13/deps/v8/src/isolate-inl.h nodejs-0.11.15/deps/v8/src/isolate-inl.h --- nodejs-0.11.13/deps/v8/src/isolate-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/isolate-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ISOLATE_INL_H_ #define V8_ISOLATE_INL_H_ -#include "debug.h" -#include "isolate.h" -#include "utils/random-number-generator.h" +#include "src/base/utils/random-number-generator.h" +#include "src/debug.h" +#include "src/isolate.h" namespace v8 { namespace internal { @@ -48,33 +25,19 @@ } -bool Isolate::IsCodePreAgingActive() { - return FLAG_optimize_for_size && FLAG_age_code && !IsDebuggerActive(); -} - - -bool Isolate::IsDebuggerActive() { -#ifdef ENABLE_DEBUGGER_SUPPORT - if (!NoBarrier_Load(&debugger_initialized_)) return false; - return debugger()->IsDebuggerActive(); -#else - return false; -#endif -} - - bool Isolate::DebuggerHasBreakPoints() { -#ifdef ENABLE_DEBUGGER_SUPPORT return debug()->has_break_points(); -#else - return false; -#endif } -RandomNumberGenerator* Isolate::random_number_generator() { +base::RandomNumberGenerator* Isolate::random_number_generator() { if (random_number_generator_ == NULL) { - random_number_generator_ = new RandomNumberGenerator; + if (FLAG_random_seed != 0) { + random_number_generator_ = + new base::RandomNumberGenerator(FLAG_random_seed); + } else { + random_number_generator_ = new base::RandomNumberGenerator(); + } } return random_number_generator_; } diff -Nru nodejs-0.11.13/deps/v8/src/json.js nodejs-0.11.15/deps/v8/src/json.js --- nodejs-0.11.13/deps/v8/src/json.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/json.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,8 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; // This file relies on the fact that the following declarations have been made // in runtime.js: diff -Nru nodejs-0.11.13/deps/v8/src/json-parser.h nodejs-0.11.15/deps/v8/src/json-parser.h --- nodejs-0.11.13/deps/v8/src/json-parser.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/json-parser.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_JSON_PARSER_H_ #define V8_JSON_PARSER_H_ -#include "v8.h" +#include "src/v8.h" -#include "char-predicates-inl.h" -#include "v8conversions.h" -#include "messages.h" -#include "spaces-inl.h" -#include "token.h" +#include "src/char-predicates-inl.h" +#include "src/conversions.h" +#include "src/heap/spaces-inl.h" +#include "src/messages.h" +#include "src/token.h" namespace v8 { namespace internal { @@ -43,7 +20,7 @@ template <bool seq_ascii> class JsonParser BASE_EMBEDDED { public: - static Handle<Object> Parse(Handle<String> source) { + MUST_USE_RESULT static MaybeHandle<Object> Parse(Handle<String> source) { return JsonParser(source).ParseJson(); } @@ -59,7 +36,7 @@ object_constructor_(isolate_->native_context()->object_function(), isolate_), position_(-1) { - FlattenString(source_); + source_ = String::Flatten(source_); pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED; // Optimized fast case where we only have ASCII characters. @@ -69,7 +46,7 @@ } // Parse a string containing a single JSON value. - Handle<Object> ParseJson(); + MaybeHandle<Object> ParseJson(); inline void Advance() { position_++; @@ -127,7 +104,7 @@ DisallowHeapAllocation no_gc; String::FlatContent content = expected->GetFlatContent(); if (content.IsAscii()) { - ASSERT_EQ('"', c0_); + DCHECK_EQ('"', c0_); const uint8_t* input_chars = seq_source_->GetChars() + position_ + 1; const uint8_t* expected_chars = content.ToOneByteVector().start(); for (int i = 0; i < length; i++) { @@ -219,7 +196,7 @@ }; template <bool seq_ascii> -Handle<Object> JsonParser<seq_ascii>::ParseJson() { +MaybeHandle<Object> JsonParser<seq_ascii>::ParseJson() { // Advance to the first character (possibly EOS) AdvanceSkipWhitespace(); Handle<Object> result = ParseJsonValue(); @@ -257,8 +234,7 @@ break; default: message = "unexpected_token"; - Handle<Object> name = - LookupSingleCharacterStringFromCode(isolate_, c0_); + Handle<Object> name = factory->LookupSingleCharacterStringFromCode(c0_); Handle<FixedArray> element = factory->NewFixedArray(1); element->set(0, *name); array = factory->NewJSArrayWithElements(element); @@ -268,9 +244,8 @@ MessageLocation location(factory->NewScript(source_), position_, position_ + 1); - Handle<Object> result = factory->NewSyntaxError(message, array); - isolate()->Throw(*result, &location); - return Handle<Object>::null(); + Handle<Object> error = factory->NewSyntaxError(message, array); + return isolate()->template Throw<Object>(error, &location); } return result; } @@ -325,7 +300,7 @@ factory()->NewJSObject(object_constructor(), pretenure_); Handle<Map> map(json_object->map()); ZoneList<Handle<Object> > properties(8, zone()); - ASSERT_EQ(c0_, '{'); + DCHECK_EQ(c0_, '{'); bool transitioning = true; @@ -361,7 +336,7 @@ Handle<Object> value = ParseJsonValue(); if (value.is_null()) return ReportUnexpectedCharacter(); - JSObject::SetOwnElement(json_object, index, value, SLOPPY); + JSObject::SetOwnElement(json_object, index, value, SLOPPY).Assert(); continue; } // Not an index, fallback to the slow path. @@ -383,19 +358,19 @@ bool follow_expected = false; Handle<Map> target; if (seq_ascii) { - key = JSObject::ExpectedTransitionKey(map); + key = Map::ExpectedTransitionKey(map); follow_expected = !key.is_null() && ParseJsonString(key); } // If the expected transition hits, follow it. if (follow_expected) { - target = JSObject::ExpectedTransitionTarget(map); + target = Map::ExpectedTransitionTarget(map); } else { // If the expected transition failed, parse an internalized string and // try to find a matching transition. key = ParseJsonInternalizedString(); if (key.is_null()) return ReportUnexpectedCharacter(); - target = JSObject::FindTransitionToField(map, key); + target = Map::FindTransitionToField(map, key); // If a transition was found, follow it and continue. transitioning = !target.is_null(); } @@ -412,12 +387,18 @@ Representation expected_representation = details.representation(); if (value->FitsRepresentation(expected_representation)) { - // If the target representation is double and the value is already - // double, use the existing box. - if (value->IsSmi() && expected_representation.IsDouble()) { - value = factory()->NewHeapNumber( - Handle<Smi>::cast(value)->value()); + if (expected_representation.IsDouble()) { + value = Object::NewStorageFor(isolate(), value, + expected_representation); + } else if (expected_representation.IsHeapObject() && + !target->instance_descriptors()->GetFieldType( + descriptor)->NowContains(value)) { + Handle<HeapType> value_type(value->OptimalType( + isolate(), expected_representation)); + Map::GeneralizeFieldType(target, descriptor, value_type); } + DCHECK(target->instance_descriptors()->GetFieldType( + descriptor)->NowContains(value)); properties.Add(value, zone()); map = target; continue; @@ -431,7 +412,8 @@ int length = properties.length(); for (int i = 0; i < length; i++) { Handle<Object> value = properties[i]; - json_object->FastPropertyAtPut(i, *value); + FieldIndex index = FieldIndex::ForPropertyIndex(*map, i); + json_object->FastPropertyAtPut(index, *value); } } else { key = ParseJsonInternalizedString(); @@ -442,8 +424,8 @@ if (value.is_null()) return ReportUnexpectedCharacter(); } - JSObject::SetLocalPropertyIgnoreAttributes( - json_object, key, value, NONE); + JSObject::SetOwnPropertyIgnoreAttributes( + json_object, key, value, NONE).Assert(); } while (MatchSkipWhiteSpace(',')); if (c0_ != '}') { return ReportUnexpectedCharacter(); @@ -455,7 +437,8 @@ int length = properties.length(); for (int i = 0; i < length; i++) { Handle<Object> value = properties[i]; - json_object->FastPropertyAtPut(i, *value); + FieldIndex index = FieldIndex::ForPropertyIndex(*map, i); + json_object->FastPropertyAtPut(index, *value); } } } @@ -468,7 +451,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() { HandleScope scope(isolate()); ZoneList<Handle<Object> > elements(4, zone()); - ASSERT_EQ(c0_, '['); + DCHECK_EQ(c0_, '['); AdvanceSkipWhitespace(); if (c0_ != ']') { @@ -541,17 +524,16 @@ if (seq_ascii) { Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length); number = StringToDouble(isolate()->unicode_cache(), - Vector<const char>::cast(chars), - NO_FLAGS, // Hex, octal or trailing junk. - OS::nan_value()); + chars, + NO_FLAGS, // Hex, octal or trailing junk. + base::OS::nan_value()); } else { Vector<uint8_t> buffer = Vector<uint8_t>::New(length); String::WriteToFlat(*source_, buffer.start(), beg_pos, position_); Vector<const uint8_t> result = Vector<const uint8_t>(buffer.start(), length); number = StringToDouble(isolate()->unicode_cache(), - // TODO(dcarney): Convert StringToDouble to uint_t. - Vector<const char>::cast(result), + result, NO_FLAGS, // Hex, octal or trailing junk. 0.0); buffer.Dispose(); @@ -583,14 +565,14 @@ inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length, PretenureFlag pretenure) { - return factory->NewRawTwoByteString(length, pretenure); + return factory->NewRawTwoByteString(length, pretenure).ToHandleChecked(); } template <> inline Handle<SeqOneByteString> NewRawString(Factory* factory, int length, PretenureFlag pretenure) { - return factory->NewRawOneByteString(length, pretenure); + return factory->NewRawOneByteString(length, pretenure).ToHandleChecked(); } @@ -606,7 +588,6 @@ int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count)); Handle<StringType> seq_string = NewRawString<StringType>(factory(), length, pretenure_); - ASSERT(!seq_string.is_null()); // Copy prefix into seq_str. SinkChar* dest = seq_string->GetChars(); String::WriteToFlat(*prefix, dest, start, end); @@ -685,7 +666,7 @@ } } - ASSERT_EQ('"', c0_); + DCHECK_EQ('"', c0_); // Advance past the last '"'. AdvanceSkipWhitespace(); @@ -697,7 +678,7 @@ template <bool seq_ascii> template <bool is_internalized> Handle<String> JsonParser<seq_ascii>::ScanJsonString() { - ASSERT_EQ('"', c0_); + DCHECK_EQ('"', c0_); Advance(); if (c0_ == '"') { AdvanceSkipWhitespace(); @@ -738,7 +719,8 @@ } while (c0 != '"'); int length = position - position_; uint32_t hash = (length <= String::kMaxHashCalcLength) - ? StringHasher::GetHashCore(running_hash) : length; + ? StringHasher::GetHashCore(running_hash) + : static_cast<uint32_t>(length); Vector<const uint8_t> string_vector( seq_source_->GetChars() + position_, length); StringTable* string_table = isolate()->heap()->string_table(); @@ -760,7 +742,7 @@ #ifdef DEBUG uint32_t hash_field = (hash << String::kHashShift) | String::kIsNotArrayIndexMask; - ASSERT_EQ(static_cast<int>(result->Hash()), + DCHECK_EQ(static_cast<int>(result->Hash()), static_cast<int>(hash_field >> String::kHashShift)); #endif break; @@ -793,12 +775,12 @@ } } while (c0_ != '"'); int length = position_ - beg_pos; - Handle<String> result = factory()->NewRawOneByteString(length, pretenure_); - ASSERT(!result.is_null()); + Handle<String> result = + factory()->NewRawOneByteString(length, pretenure_).ToHandleChecked(); uint8_t* dest = SeqOneByteString::cast(*result)->GetChars(); String::WriteToFlat(*source_, dest, beg_pos, position_); - ASSERT_EQ('"', c0_); + DCHECK_EQ('"', c0_); // Advance past the last '"'. AdvanceSkipWhitespace(); return result; diff -Nru nodejs-0.11.13/deps/v8/src/json-stringifier.h nodejs-0.11.15/deps/v8/src/json-stringifier.h --- nodejs-0.11.13/deps/v8/src/json-stringifier.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/json-stringifier.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_JSON_STRINGIFIER_H_ #define V8_JSON_STRINGIFIER_H_ -#include "v8.h" -#include "v8utils.h" -#include "v8conversions.h" +#include "src/v8.h" + +#include "src/conversions.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -39,17 +17,18 @@ public: explicit BasicJsonStringifier(Isolate* isolate); - MaybeObject* Stringify(Handle<Object> object); + MUST_USE_RESULT MaybeHandle<Object> Stringify(Handle<Object> object); - INLINE(static MaybeObject* StringifyString(Isolate* isolate, - Handle<String> object)); + MUST_USE_RESULT INLINE(static MaybeHandle<Object> StringifyString( + Isolate* isolate, + Handle<String> object)); private: static const int kInitialPartLength = 32; static const int kMaxPartLength = 16 * 1024; static const int kPartLengthGrowthFactor = 2; - enum Result { UNCHANGED, SUCCESS, EXCEPTION, CIRCULAR, STACK_OVERFLOW }; + enum Result { UNCHANGED, SUCCESS, EXCEPTION }; void Accumulate(); @@ -81,8 +60,9 @@ } } - Handle<Object> ApplyToJsonFunction(Handle<Object> object, - Handle<Object> key); + MUST_USE_RESULT MaybeHandle<Object> ApplyToJsonFunction( + Handle<Object> object, + Handle<Object> key); Result SerializeGeneric(Handle<Object> object, Handle<Object> key, @@ -90,9 +70,9 @@ bool deferred_key); template <typename ResultType, typename Char> - INLINE(static MaybeObject* StringifyString_(Isolate* isolate, - Vector<Char> vector, - Handle<String> result)); + INLINE(static Handle<String> StringifyString_(Isolate* isolate, + Vector<Char> vector, + Handle<String> result)); // Entry point to serialize the object. INLINE(Result SerializeObject(Handle<Object> obj)) { @@ -115,7 +95,7 @@ INLINE(Result SerializeProperty(Handle<Object> object, bool deferred_comma, Handle<String> deferred_key)) { - ASSERT(!deferred_key.is_null()); + DCHECK(!deferred_key.is_null()); return Serialize_<true>(object, deferred_comma, deferred_key); } @@ -140,7 +120,7 @@ INLINE(Result SerializeJSArray(Handle<JSArray> object)); INLINE(Result SerializeJSObject(Handle<JSObject> object)); - Result SerializeJSArraySlow(Handle<JSArray> object, int length); + Result SerializeJSArraySlow(Handle<JSArray> object, uint32_t length); void SerializeString(Handle<String> object); @@ -263,38 +243,33 @@ overflowed_(false) { factory_ = isolate_->factory(); accumulator_store_ = Handle<JSValue>::cast( - factory_->ToObject(factory_->empty_string())); + Object::ToObject(isolate, factory_->empty_string()).ToHandleChecked()); part_length_ = kInitialPartLength; - current_part_ = factory_->NewRawOneByteString(part_length_); - ASSERT(!current_part_.is_null()); + current_part_ = factory_->NewRawOneByteString(part_length_).ToHandleChecked(); tojson_string_ = factory_->toJSON_string(); stack_ = factory_->NewJSArray(8); } -MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) { - switch (SerializeObject(object)) { - case UNCHANGED: - return isolate_->heap()->undefined_value(); - case SUCCESS: { - ShrinkCurrentPart(); - Accumulate(); - if (overflowed_) return isolate_->ThrowInvalidStringLength(); - return *accumulator(); - } - case CIRCULAR: - return isolate_->Throw(*factory_->NewTypeError( - "circular_structure", HandleVector<Object>(NULL, 0))); - case STACK_OVERFLOW: - return isolate_->StackOverflow(); - default: - return Failure::Exception(); +MaybeHandle<Object> BasicJsonStringifier::Stringify(Handle<Object> object) { + Result result = SerializeObject(object); + if (result == UNCHANGED) return isolate_->factory()->undefined_value(); + if (result == SUCCESS) { + ShrinkCurrentPart(); + Accumulate(); + if (overflowed_) { + return isolate_->Throw<Object>( + isolate_->factory()->NewInvalidStringLengthError()); + } + return accumulator(); } + DCHECK(result == EXCEPTION); + return MaybeHandle<Object>(); } -MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate, - Handle<String> object) { +MaybeHandle<Object> BasicJsonStringifier::StringifyString( + Isolate* isolate, Handle<String> object) { static const int kJsonQuoteWorstCaseBlowup = 6; static const int kSpaceForQuotes = 2; int worst_case_length = @@ -305,21 +280,19 @@ return stringifier.Stringify(object); } - FlattenString(object); - ASSERT(object->IsFlat()); + object = String::Flatten(object); + DCHECK(object->IsFlat()); if (object->IsOneByteRepresentationUnderneath()) { - Handle<String> result = - isolate->factory()->NewRawOneByteString(worst_case_length); - ASSERT(!result.is_null()); + Handle<String> result = isolate->factory()->NewRawOneByteString( + worst_case_length).ToHandleChecked(); DisallowHeapAllocation no_gc; return StringifyString_<SeqOneByteString>( isolate, object->GetFlatContent().ToOneByteVector(), result); } else { - Handle<String> result = - isolate->factory()->NewRawTwoByteString(worst_case_length); - ASSERT(!result.is_null()); + Handle<String> result = isolate->factory()->NewRawTwoByteString( + worst_case_length).ToHandleChecked(); DisallowHeapAllocation no_gc; return StringifyString_<SeqTwoByteString>( isolate, @@ -330,9 +303,9 @@ template <typename ResultType, typename Char> -MaybeObject* BasicJsonStringifier::StringifyString_(Isolate* isolate, - Vector<Char> vector, - Handle<String> result) { +Handle<String> BasicJsonStringifier::StringifyString_(Isolate* isolate, + Vector<Char> vector, + Handle<String> result) { DisallowHeapAllocation no_gc; int final_size = 0; ResultType* dest = ResultType::cast(*result); @@ -341,7 +314,7 @@ dest->GetChars() + 1, vector.length()); dest->Set(final_size++, '\"'); - return *SeqString::Truncate(Handle<SeqString>::cast(result), final_size); + return SeqString::Truncate(Handle<SeqString>::cast(result), final_size); } @@ -364,25 +337,21 @@ } -Handle<Object> BasicJsonStringifier::ApplyToJsonFunction( +MaybeHandle<Object> BasicJsonStringifier::ApplyToJsonFunction( Handle<Object> object, Handle<Object> key) { - LookupResult lookup(isolate_); - JSObject::cast(*object)->LookupRealNamedProperty(*tojson_string_, &lookup); - if (!lookup.IsProperty()) return object; - PropertyAttributes attr; - Handle<Object> fun = - Object::GetProperty(object, object, &lookup, tojson_string_, &attr); - if (fun.is_null()) return Handle<Object>::null(); + LookupIterator it(object, tojson_string_, LookupIterator::SKIP_INTERCEPTOR); + Handle<Object> fun; + ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object); if (!fun->IsJSFunction()) return object; // Call toJSON function. if (key->IsSmi()) key = factory_->NumberToString(key); Handle<Object> argv[] = { key }; - bool has_exception = false; HandleScope scope(isolate_); - object = Execution::Call(isolate_, fun, object, 1, argv, &has_exception); - // Return empty handle to signal an exception. - if (has_exception) return Handle<Object>::null(); + ASSIGN_RETURN_ON_EXCEPTION( + isolate_, object, + Execution::Call(isolate_, fun, object, 1, argv), + Object); return scope.CloseAndEscape(object); } @@ -390,7 +359,10 @@ BasicJsonStringifier::Result BasicJsonStringifier::StackPush( Handle<Object> object) { StackLimitCheck check(isolate_); - if (check.HasOverflowed()) return STACK_OVERFLOW; + if (check.HasOverflowed()) { + isolate_->StackOverflow(); + return EXCEPTION; + } int length = Smi::cast(stack_->length())->value(); { @@ -398,7 +370,10 @@ FixedArray* elements = FixedArray::cast(stack_->elements()); for (int i = 0; i < length; i++) { if (elements->get(i) == *object) { - return CIRCULAR; + AllowHeapAllocation allow_to_return_error; + isolate_->Throw(*factory_->NewTypeError( + "circular_structure", HandleVector<Object>(NULL, 0))); + return EXCEPTION; } } } @@ -419,8 +394,10 @@ BasicJsonStringifier::Result BasicJsonStringifier::Serialize_( Handle<Object> object, bool comma, Handle<Object> key) { if (object->IsJSObject()) { - object = ApplyToJsonFunction(object, key); - if (object.is_null()) return EXCEPTION; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate_, object, + ApplyToJsonFunction(object, key), + EXCEPTION); } if (object->IsSmi()) { @@ -430,6 +407,7 @@ switch (HeapObject::cast(*object)->map()->instance_type()) { case HEAP_NUMBER_TYPE: + case MUTABLE_HEAP_NUMBER_TYPE: if (deferred_string_key) SerializeDeferredKey(comma, key); return SerializeHeapNumber(Handle<HeapNumber>::cast(object)); case ODDBALL_TYPE: @@ -464,7 +442,8 @@ SerializeString(Handle<String>::cast(object)); return SUCCESS; } else if (object->IsJSObject()) { - if (object->IsAccessCheckNeeded()) break; + // Go to slow path for global proxy and objects requiring access checks. + if (object->IsAccessCheckNeeded() || object->IsJSGlobalProxy()) break; if (deferred_string_key) SerializeDeferredKey(comma, key); return SerializeJSObject(Handle<JSObject>::cast(object)); } @@ -479,15 +458,16 @@ Handle<Object> key, bool deferred_comma, bool deferred_key) { - Handle<JSObject> builtins(isolate_->native_context()->builtins()); - Handle<JSFunction> builtin = - Handle<JSFunction>::cast(GetProperty(builtins, "JSONSerializeAdapter")); + Handle<JSObject> builtins(isolate_->native_context()->builtins(), isolate_); + Handle<JSFunction> builtin = Handle<JSFunction>::cast(Object::GetProperty( + isolate_, builtins, "JSONSerializeAdapter").ToHandleChecked()); Handle<Object> argv[] = { key, object }; - bool has_exception = false; - Handle<Object> result = - Execution::Call(isolate_, builtin, object, 2, argv, &has_exception); - if (has_exception) return EXCEPTION; + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate_, result, + Execution::Call(isolate_, builtin, object, 2, argv), + EXCEPTION); if (result->IsUndefined()) return UNCHANGED; if (deferred_key) { if (key->IsSmi()) key = factory_->NumberToString(key); @@ -501,8 +481,11 @@ part_length_ = kInitialPartLength; // Allocate conservatively. Extend(); // Attach current part and allocate new part. // Attach result string to the accumulator. - Handle<String> cons = factory_->NewConsString(accumulator(), result_string); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, cons, EXCEPTION); + Handle<String> cons; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate_, cons, + factory_->NewConsString(accumulator(), result_string), + EXCEPTION); set_accumulator(cons); return SUCCESS; } @@ -510,23 +493,22 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue( Handle<JSValue> object) { - bool has_exception = false; String* class_name = object->class_name(); if (class_name == isolate_->heap()->String_string()) { - Handle<Object> value = - Execution::ToString(isolate_, object, &has_exception); - if (has_exception) return EXCEPTION; + Handle<Object> value; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate_, value, Execution::ToString(isolate_, object), EXCEPTION); SerializeString(Handle<String>::cast(value)); } else if (class_name == isolate_->heap()->Number_string()) { - Handle<Object> value = - Execution::ToNumber(isolate_, object, &has_exception); - if (has_exception) return EXCEPTION; + Handle<Object> value; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate_, value, Execution::ToNumber(isolate_, object), EXCEPTION); if (value->IsSmi()) return SerializeSmi(Smi::cast(*value)); SerializeHeapNumber(Handle<HeapNumber>::cast(value)); } else { - ASSERT(class_name == isolate_->heap()->Boolean_string()); + DCHECK(class_name == isolate_->heap()->Boolean_string()); Object* value = JSValue::cast(*object)->value(); - ASSERT(value->IsBoolean()); + DCHECK(value->IsBoolean()); AppendAscii(value->IsTrue() ? "true" : "false"); } return SUCCESS; @@ -561,22 +543,25 @@ HandleScope handle_scope(isolate_); Result stack_push = StackPush(object); if (stack_push != SUCCESS) return stack_push; - int length = Smi::cast(object->length())->value(); + uint32_t length = 0; + CHECK(object->length()->ToArrayIndex(&length)); Append('['); switch (object->GetElementsKind()) { case FAST_SMI_ELEMENTS: { Handle<FixedArray> elements( FixedArray::cast(object->elements()), isolate_); - for (int i = 0; i < length; i++) { + for (uint32_t i = 0; i < length; i++) { if (i > 0) Append(','); SerializeSmi(Smi::cast(elements->get(i))); } break; } case FAST_DOUBLE_ELEMENTS: { + // Empty array is FixedArray but not FixedDoubleArray. + if (length == 0) break; Handle<FixedDoubleArray> elements( FixedDoubleArray::cast(object->elements()), isolate_); - for (int i = 0; i < length; i++) { + for (uint32_t i = 0; i < length; i++) { if (i > 0) Append(','); SerializeDouble(elements->get_scalar(i)); } @@ -585,7 +570,7 @@ case FAST_ELEMENTS: { Handle<FixedArray> elements( FixedArray::cast(object->elements()), isolate_); - for (int i = 0; i < length; i++) { + for (uint32_t i = 0; i < length; i++) { if (i > 0) Append(','); Result result = SerializeElement(isolate_, @@ -617,11 +602,14 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow( - Handle<JSArray> object, int length) { - for (int i = 0; i < length; i++) { + Handle<JSArray> object, uint32_t length) { + for (uint32_t i = 0; i < length; i++) { if (i > 0) Append(','); - Handle<Object> element = Object::GetElement(isolate_, object, i); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, element, EXCEPTION); + Handle<Object> element; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate_, element, + Object::GetElement(isolate_, object, i), + EXCEPTION); if (element->IsUndefined()) { AppendAscii("null"); } else { @@ -643,11 +631,7 @@ HandleScope handle_scope(isolate_); Result stack_push = StackPush(object); if (stack_push != SUCCESS) return stack_push; - if (object->IsJSGlobalProxy()) { - object = Handle<JSObject>( - JSObject::cast(object->GetPrototype()), isolate_); - ASSERT(object->IsGlobalObject()); - } + DCHECK(!object->IsJSGlobalProxy() && !object->IsGlobalObject()); Append('{'); bool comma = false; @@ -666,48 +650,51 @@ if (details.IsDontEnum()) continue; Handle<Object> property; if (details.type() == FIELD && *map == object->map()) { - property = Handle<Object>( - object->RawFastPropertyAt( - map->instance_descriptors()->GetFieldIndex(i)), - isolate_); + property = Handle<Object>(object->RawFastPropertyAt( + FieldIndex::ForDescriptor(*map, i)), isolate_); } else { - property = GetProperty(isolate_, object, key); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, property, EXCEPTION); + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate_, property, + Object::GetPropertyOrElement(object, key), + EXCEPTION); } Result result = SerializeProperty(property, comma, key); if (!comma && result == SUCCESS) comma = true; - if (result >= EXCEPTION) return result; + if (result == EXCEPTION) return result; } } else { - bool has_exception = false; - Handle<FixedArray> contents = - GetKeysInFixedArrayFor(object, LOCAL_ONLY, &has_exception); - if (has_exception) return EXCEPTION; + Handle<FixedArray> contents; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate_, contents, + JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY), + EXCEPTION); for (int i = 0; i < contents->length(); i++) { Object* key = contents->get(i); Handle<String> key_handle; - Handle<Object> property; + MaybeHandle<Object> maybe_property; if (key->IsString()) { key_handle = Handle<String>(String::cast(key), isolate_); - property = GetProperty(isolate_, object, key_handle); + maybe_property = Object::GetPropertyOrElement(object, key_handle); } else { - ASSERT(key->IsNumber()); + DCHECK(key->IsNumber()); key_handle = factory_->NumberToString(Handle<Object>(key, isolate_)); uint32_t index; if (key->IsSmi()) { - property = Object::GetElement( + maybe_property = Object::GetElement( isolate_, object, Smi::cast(key)->value()); } else if (key_handle->AsArrayIndex(&index)) { - property = Object::GetElement(isolate_, object, index); + maybe_property = Object::GetElement(isolate_, object, index); } else { - property = GetProperty(isolate_, object, key_handle); + maybe_property = Object::GetPropertyOrElement(object, key_handle); } } - RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, property, EXCEPTION); + Handle<Object> property; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate_, property, maybe_property, EXCEPTION); Result result = SerializeProperty(property, comma, key_handle); if (!comma && result == SUCCESS) comma = true; - if (result >= EXCEPTION) return result; + if (result == EXCEPTION) return result; } } @@ -719,7 +706,7 @@ void BasicJsonStringifier::ShrinkCurrentPart() { - ASSERT(current_index_ < part_length_); + DCHECK(current_index_ < part_length_); current_part_ = SeqString::Truncate(Handle<SeqString>::cast(current_part_), current_index_); } @@ -731,7 +718,8 @@ set_accumulator(factory_->empty_string()); overflowed_ = true; } else { - set_accumulator(factory_->NewConsString(accumulator(), current_part_)); + set_accumulator(factory_->NewConsString(accumulator(), + current_part_).ToHandleChecked()); } } @@ -742,11 +730,13 @@ part_length_ *= kPartLengthGrowthFactor; } if (is_ascii_) { - current_part_ = factory_->NewRawOneByteString(part_length_); + current_part_ = + factory_->NewRawOneByteString(part_length_).ToHandleChecked(); } else { - current_part_ = factory_->NewRawTwoByteString(part_length_); + current_part_ = + factory_->NewRawTwoByteString(part_length_).ToHandleChecked(); } - ASSERT(!current_part_.is_null()); + DCHECK(!current_part_.is_null()); current_index_ = 0; } @@ -754,8 +744,9 @@ void BasicJsonStringifier::ChangeEncoding() { ShrinkCurrentPart(); Accumulate(); - current_part_ = factory_->NewRawTwoByteString(part_length_); - ASSERT(!current_part_.is_null()); + current_part_ = + factory_->NewRawTwoByteString(part_length_).ToHandleChecked(); + DCHECK(!current_part_.is_null()); current_index_ = 0; is_ascii_ = false; } @@ -769,7 +760,7 @@ // Assert that uc16 character is not truncated down to 8 bit. // The <uc16, char> version of this method must not be called. - ASSERT(sizeof(*dest) >= sizeof(*src)); + DCHECK(sizeof(*dest) >= sizeof(*src)); for (int i = 0; i < length; i++) { SrcChar c = src[i]; @@ -850,7 +841,7 @@ Vector<const uint8_t> BasicJsonStringifier::GetCharVector( Handle<String> string) { String::FlatContent flat = string->GetFlatContent(); - ASSERT(flat.IsAscii()); + DCHECK(flat.IsAscii()); return flat.ToOneByteVector(); } @@ -858,13 +849,13 @@ template <> Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) { String::FlatContent flat = string->GetFlatContent(); - ASSERT(flat.IsTwoByte()); + DCHECK(flat.IsTwoByte()); return flat.ToUC16Vector(); } void BasicJsonStringifier::SerializeString(Handle<String> object) { - object = FlattenGetString(object); + object = String::Flatten(object); if (is_ascii_) { if (object->IsOneByteRepresentationUnderneath()) { SerializeString_<true, uint8_t>(object); diff -Nru nodejs-0.11.13/deps/v8/src/jsregexp.cc nodejs-0.11.15/deps/v8/src/jsregexp.cc --- nodejs-0.11.13/deps/v8/src/jsregexp.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/jsregexp.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,79 +1,59 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "ast.h" -#include "compiler.h" -#include "execution.h" -#include "factory.h" -#include "jsregexp.h" -#include "jsregexp-inl.h" -#include "platform.h" -#include "string-search.h" -#include "runtime.h" -#include "compilation-cache.h" -#include "string-stream.h" -#include "parser.h" -#include "regexp-macro-assembler.h" -#include "regexp-macro-assembler-tracer.h" -#include "regexp-macro-assembler-irregexp.h" -#include "regexp-stack.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/ast.h" +#include "src/base/platform/platform.h" +#include "src/compilation-cache.h" +#include "src/compiler.h" +#include "src/execution.h" +#include "src/factory.h" +#include "src/jsregexp-inl.h" +#include "src/jsregexp.h" +#include "src/ostreams.h" +#include "src/parser.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-macro-assembler-irregexp.h" +#include "src/regexp-macro-assembler-tracer.h" +#include "src/regexp-stack.h" +#include "src/runtime.h" +#include "src/string-search.h" #ifndef V8_INTERPRETED_REGEXP #if V8_TARGET_ARCH_IA32 -#include "ia32/regexp-macro-assembler-ia32.h" +#include "src/ia32/regexp-macro-assembler-ia32.h" // NOLINT #elif V8_TARGET_ARCH_X64 -#include "x64/regexp-macro-assembler-x64.h" +#include "src/x64/regexp-macro-assembler-x64.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 -#include "arm64/regexp-macro-assembler-arm64.h" +#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM -#include "arm/regexp-macro-assembler-arm.h" +#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/regexp-macro-assembler-mips.h" +#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/regexp-macro-assembler-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/regexp-macro-assembler-x87.h" // NOLINT #else #error Unsupported target architecture. #endif #endif -#include "interpreter-irregexp.h" +#include "src/interpreter-irregexp.h" namespace v8 { namespace internal { -Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor, - Handle<String> pattern, - Handle<String> flags, - bool* has_pending_exception) { +MaybeHandle<Object> RegExpImpl::CreateRegExpLiteral( + Handle<JSFunction> constructor, + Handle<String> pattern, + Handle<String> flags) { // Call the construct code with 2 arguments. Handle<Object> argv[] = { pattern, flags }; - return Execution::New(constructor, ARRAY_SIZE(argv), argv, - has_pending_exception); + return Execution::New(constructor, ARRAY_SIZE(argv), argv); } @@ -96,10 +76,12 @@ } -static inline void ThrowRegExpException(Handle<JSRegExp> re, - Handle<String> pattern, - Handle<String> error_text, - const char* message) { +MUST_USE_RESULT +static inline MaybeHandle<Object> ThrowRegExpException( + Handle<JSRegExp> re, + Handle<String> pattern, + Handle<String> error_text, + const char* message) { Isolate* isolate = re->GetIsolate(); Factory* factory = isolate->factory(); Handle<FixedArray> elements = factory->NewFixedArray(2); @@ -107,7 +89,7 @@ elements->set(1, *error_text); Handle<JSArray> array = factory->NewJSArrayWithElements(elements); Handle<Object> regexp_err = factory->NewSyntaxError(message, array); - isolate->Throw(*regexp_err); + return isolate->Throw<Object>(regexp_err); } @@ -115,8 +97,8 @@ const int* ranges, int ranges_length, Interval new_range) { - ASSERT((ranges_length & 1) == 1); - ASSERT(ranges[ranges_length - 1] == String::kMaxUtf16CodeUnit + 1); + DCHECK((ranges_length & 1) == 1); + DCHECK(ranges[ranges_length - 1] == String::kMaxUtf16CodeUnit + 1); if (containment == kLatticeUnknown) return containment; bool inside = false; int last = 0; @@ -168,15 +150,17 @@ // Generic RegExp methods. Dispatches to implementation specific methods. -Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re, - Handle<String> pattern, - Handle<String> flag_str) { +MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re, + Handle<String> pattern, + Handle<String> flag_str) { Isolate* isolate = re->GetIsolate(); Zone zone(isolate); JSRegExp::Flags flags = RegExpFlagsFromString(flag_str); CompilationCache* compilation_cache = isolate->compilation_cache(); - Handle<FixedArray> cached = compilation_cache->LookupRegExp(pattern, flags); - bool in_cache = !cached.is_null(); + MaybeHandle<FixedArray> maybe_cached = + compilation_cache->LookupRegExp(pattern, flags); + Handle<FixedArray> cached; + bool in_cache = maybe_cached.ToHandle(&cached); LOG(isolate, RegExpCompileEvent(re, in_cache)); Handle<Object> result; @@ -184,18 +168,17 @@ re->set_data(*cached); return re; } - pattern = FlattenGetString(pattern); + pattern = String::Flatten(pattern); PostponeInterruptsScope postpone(isolate); RegExpCompileData parse_result; FlatStringReader reader(isolate, pattern); if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(), &parse_result, &zone)) { // Throw an exception if we fail to parse the pattern. - ThrowRegExpException(re, - pattern, - parse_result.error, - "malformed_regexp"); - return Handle<Object>::null(); + return ThrowRegExpException(re, + pattern, + parse_result.error, + "malformed_regexp"); } bool has_been_compiled = false; @@ -211,8 +194,11 @@ parse_result.capture_count == 0) { RegExpAtom* atom = parse_result.tree->AsAtom(); Vector<const uc16> atom_pattern = atom->data(); - Handle<String> atom_string = - isolate->factory()->NewStringFromTwoByte(atom_pattern); + Handle<String> atom_string; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, atom_string, + isolate->factory()->NewStringFromTwoByte(atom_pattern), + Object); if (!HasFewDifferentCharacters(atom_string)) { AtomCompile(re, pattern, flags, atom_string); has_been_compiled = true; @@ -221,7 +207,7 @@ if (!has_been_compiled) { IrregexpInitialize(re, pattern, flags, parse_result.capture_count); } - ASSERT(re->data()->IsFixedArray()); + DCHECK(re->data()->IsFixedArray()); // Compilation succeeded so the data is set on the regexp // and we can store it in the cache. Handle<FixedArray> data(FixedArray::cast(re->data())); @@ -231,23 +217,19 @@ } -Handle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp, - Handle<String> subject, - int index, - Handle<JSArray> last_match_info) { +MaybeHandle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp, + Handle<String> subject, + int index, + Handle<JSArray> last_match_info) { switch (regexp->TypeTag()) { case JSRegExp::ATOM: return AtomExec(regexp, subject, index, last_match_info); case JSRegExp::IRREGEXP: { - Handle<Object> result = - IrregexpExec(regexp, subject, index, last_match_info); - ASSERT(!result.is_null() || - regexp->GetIsolate()->has_pending_exception()); - return result; + return IrregexpExec(regexp, subject, index, last_match_info); } default: UNREACHABLE(); - return Handle<Object>::null(); + return MaybeHandle<Object>(); } } @@ -287,16 +269,16 @@ int output_size) { Isolate* isolate = regexp->GetIsolate(); - ASSERT(0 <= index); - ASSERT(index <= subject->length()); + DCHECK(0 <= index); + DCHECK(index <= subject->length()); - if (!subject->IsFlat()) FlattenString(subject); + subject = String::Flatten(subject); DisallowHeapAllocation no_gc; // ensure vectors stay valid String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)); int needle_len = needle->length(); - ASSERT(needle->IsFlat()); - ASSERT_LT(0, needle_len); + DCHECK(needle->IsFlat()); + DCHECK_LT(0, needle_len); if (index + needle_len > subject->length()) { return RegExpImpl::RE_FAILURE; @@ -305,8 +287,8 @@ for (int i = 0; i < output_size; i += 2) { String::FlatContent needle_content = needle->GetFlatContent(); String::FlatContent subject_content = subject->GetFlatContent(); - ASSERT(needle_content.IsFlat()); - ASSERT(subject_content.IsFlat()); + DCHECK(needle_content.IsFlat()); + DCHECK(subject_content.IsFlat()); // dispatch on type of strings index = (needle_content.IsAscii() ? (subject_content.IsAscii() @@ -353,7 +335,7 @@ if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value(); - ASSERT_EQ(res, RegExpImpl::RE_SUCCESS); + DCHECK_EQ(res, RegExpImpl::RE_SUCCESS); SealHandleScope shs(isolate); FixedArray* array = FixedArray::cast(last_match_info->elements()); SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]); @@ -383,7 +365,7 @@ if (saved_code->IsCode()) { // Reinstate the code in the original place. re->SetDataAt(JSRegExp::code_index(is_ascii), saved_code); - ASSERT(compiled_code->IsSmi()); + DCHECK(compiled_code->IsSmi()); return true; } return CompileIrregexp(re, sample_subject, is_ascii); @@ -419,9 +401,9 @@ // When arriving here entry can only be a smi, either representing an // uncompiled regexp, a previous compilation error, or code that has // been flushed. - ASSERT(entry->IsSmi()); + DCHECK(entry->IsSmi()); int entry_value = Smi::cast(entry)->value(); - ASSERT(entry_value == JSRegExp::kUninitializedValue || + DCHECK(entry_value == JSRegExp::kUninitializedValue || entry_value == JSRegExp::kCompilationErrorValue || (entry_value < JSRegExp::kCodeAgeMask && entry_value >= 0)); @@ -430,7 +412,7 @@ // the saved code index (we store the error message, not the actual // error). Recreate the error object and throw it. Object* error_string = re->DataAt(JSRegExp::saved_code_index(is_ascii)); - ASSERT(error_string->IsString()); + DCHECK(error_string->IsString()); Handle<String> error_message(String::cast(error_string)); CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate); return false; @@ -439,7 +421,7 @@ JSRegExp::Flags flags = re->GetFlags(); Handle<String> pattern(re->Pattern()); - if (!pattern->IsFlat()) FlattenString(pattern); + pattern = String::Flatten(pattern); RegExpCompileData compile_data; FlatStringReader reader(isolate, pattern); if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(), @@ -447,10 +429,10 @@ &zone)) { // Throw an exception if we fail to parse the pattern. // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once. - ThrowRegExpException(re, - pattern, - compile_data.error, - "malformed_regexp"); + USE(ThrowRegExpException(re, + pattern, + compile_data.error, + "malformed_regexp")); return false; } RegExpEngine::CompilationResult result = @@ -464,9 +446,8 @@ &zone); if (result.error_message != NULL) { // Unable to compile regexp. - Handle<String> error_message = - isolate->factory()->NewStringFromUtf8(CStrVector(result.error_message)); - ASSERT(!error_message.is_null()); + Handle<String> error_message = isolate->factory()->NewStringFromUtf8( + CStrVector(result.error_message)).ToHandleChecked(); CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate); return false; } @@ -528,7 +509,7 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp, Handle<String> subject) { - if (!subject->IsFlat()) FlattenString(subject); + subject = String::Flatten(subject); // Check the asciiness of the underlying storage. bool is_ascii = subject->IsOneByteRepresentationUnderneath(); @@ -558,14 +539,14 @@ Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate); - ASSERT(index >= 0); - ASSERT(index <= subject->length()); - ASSERT(subject->IsFlat()); + DCHECK(index >= 0); + DCHECK(index <= subject->length()); + DCHECK(subject->IsFlat()); bool is_ascii = subject->IsOneByteRepresentationUnderneath(); #ifndef V8_INTERPRETED_REGEXP - ASSERT(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2); + DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2); do { EnsureCompiledIrregexp(regexp, subject, is_ascii); Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate); @@ -581,7 +562,7 @@ index, isolate); if (res != NativeRegExpMacroAssembler::RETRY) { - ASSERT(res != NativeRegExpMacroAssembler::EXCEPTION || + DCHECK(res != NativeRegExpMacroAssembler::EXCEPTION || isolate->has_pending_exception()); STATIC_ASSERT( static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) == RE_SUCCESS); @@ -604,7 +585,7 @@ return RE_EXCEPTION; #else // V8_INTERPRETED_REGEXP - ASSERT(output_size >= IrregexpNumberOfRegisters(*irregexp)); + DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp)); // We must have done EnsureCompiledIrregexp, so we can get the number of // registers. int number_of_capture_registers = @@ -625,11 +606,10 @@ index); if (result == RE_SUCCESS) { // Copy capture results to the start of the registers array. - OS::MemCopy( - output, raw_output, number_of_capture_registers * sizeof(int32_t)); + MemCopy(output, raw_output, number_of_capture_registers * sizeof(int32_t)); } if (result == RE_EXCEPTION) { - ASSERT(!isolate->has_pending_exception()); + DCHECK(!isolate->has_pending_exception()); isolate->StackOverflow(); } return result; @@ -637,12 +617,12 @@ } -Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp, - Handle<String> subject, - int previous_index, - Handle<JSArray> last_match_info) { +MaybeHandle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp, + Handle<String> subject, + int previous_index, + Handle<JSArray> last_match_info) { Isolate* isolate = regexp->GetIsolate(); - ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP); + DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP); // Prepare space for the return values. #if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG) @@ -655,8 +635,8 @@ int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject); if (required_registers < 0) { // Compiling failed with an exception. - ASSERT(isolate->has_pending_exception()); - return Handle<Object>::null(); + DCHECK(isolate->has_pending_exception()); + return MaybeHandle<Object>(); } int32_t* output_registers = NULL; @@ -677,10 +657,10 @@ last_match_info, subject, capture_count, output_registers); } if (res == RE_EXCEPTION) { - ASSERT(isolate->has_pending_exception()); - return Handle<Object>::null(); + DCHECK(isolate->has_pending_exception()); + return MaybeHandle<Object>(); } - ASSERT(res == RE_FAILURE); + DCHECK(res == RE_FAILURE); return isolate->factory()->null_value(); } @@ -689,7 +669,7 @@ Handle<String> subject, int capture_count, int32_t* match) { - ASSERT(last_match_info->HasFastObjectElements()); + DCHECK(last_match_info->HasFastObjectElements()); int capture_register_count = (capture_count + 1) * 2; JSArray::EnsureSize(last_match_info, capture_register_count + kLastMatchOverhead); @@ -756,8 +736,8 @@ // to the compiled regexp. current_match_index_ = max_matches_ - 1; num_matches_ = max_matches_; - ASSERT(registers_per_match_ >= 2); // Each match has at least one capture. - ASSERT_GE(register_array_size_, registers_per_match_); + DCHECK(registers_per_match_ >= 2); // Each match has at least one capture. + DCHECK_GE(register_array_size_, registers_per_match_); int32_t* last_match = ®ister_array_[current_match_index_ * registers_per_match_]; last_match[0] = -1; @@ -986,7 +966,7 @@ // Does not measure in percent, but rather per-128 (the table size from the // regexp macro assembler). int Frequency(int in_character) { - ASSERT((in_character & RegExpMacroAssembler::kTableMask) == in_character); + DCHECK((in_character & RegExpMacroAssembler::kTableMask) == in_character); if (total_samples_ < 1) return 1; // Division by zero. int freq_in_per128 = (frequencies_[in_character].counter() * 128) / total_samples_; @@ -1108,7 +1088,7 @@ frequency_collator_(), zone_(zone) { accept_ = new(zone) EndNode(EndNode::ACCEPT, zone); - ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister); + DCHECK(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister); } @@ -1155,8 +1135,8 @@ #ifdef DEBUG if (FLAG_print_code) { CodeTracer::Scope trace_scope(heap->isolate()->GetCodeTracer()); - Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), - trace_scope.file()); + OFStream os(trace_scope.file()); + Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os); } if (FLAG_trace_regexp_assembler) { delete macro_assembler_; @@ -1188,7 +1168,7 @@ bool Trace::GetStoredPosition(int reg, int* cp_offset) { - ASSERT_EQ(0, *cp_offset); + DCHECK_EQ(0, *cp_offset); for (DeferredAction* action = actions_; action != NULL; action = action->next()) { @@ -1227,11 +1207,12 @@ void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler, int max_register, - OutSet& registers_to_pop, - OutSet& registers_to_clear) { + const OutSet& registers_to_pop, + const OutSet& registers_to_clear) { for (int reg = max_register; reg >= 0; reg--) { - if (registers_to_pop.Get(reg)) assembler->PopRegister(reg); - else if (registers_to_clear.Get(reg)) { + if (registers_to_pop.Get(reg)) { + assembler->PopRegister(reg); + } else if (registers_to_clear.Get(reg)) { int clear_to = reg; while (reg > 0 && registers_to_clear.Get(reg - 1)) { reg--; @@ -1244,7 +1225,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler, int max_register, - OutSet& affected_registers, + const OutSet& affected_registers, OutSet* registers_to_pop, OutSet* registers_to_clear, Zone* zone) { @@ -1289,16 +1270,16 @@ // we can set undo_action to IGNORE if we know there is no value to // restore. undo_action = RESTORE; - ASSERT_EQ(store_position, -1); - ASSERT(!clear); + DCHECK_EQ(store_position, -1); + DCHECK(!clear); break; } case ActionNode::INCREMENT_REGISTER: if (!absolute) { value++; } - ASSERT_EQ(store_position, -1); - ASSERT(!clear); + DCHECK_EQ(store_position, -1); + DCHECK(!clear); undo_action = RESTORE; break; case ActionNode::STORE_POSITION: { @@ -1320,8 +1301,8 @@ } else { undo_action = pc->is_capture() ? CLEAR : RESTORE; } - ASSERT(!absolute); - ASSERT_EQ(value, 0); + DCHECK(!absolute); + DCHECK_EQ(value, 0); break; } case ActionNode::CLEAR_CAPTURES: { @@ -1332,8 +1313,8 @@ clear = true; } undo_action = RESTORE; - ASSERT(!absolute); - ASSERT_EQ(value, 0); + DCHECK(!absolute); + DCHECK_EQ(value, 0); break; } default: @@ -1378,7 +1359,7 @@ void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) { RegExpMacroAssembler* assembler = compiler->macro_assembler(); - ASSERT(!is_trivial()); + DCHECK(!is_trivial()); if (actions_ == NULL && backtrack() == NULL) { // Here we just have some deferred cp advances to fix and we are back to @@ -1595,13 +1576,13 @@ Trace* trace) { switch (guard->op()) { case Guard::LT: - ASSERT(!trace->mentions_reg(guard->reg())); + DCHECK(!trace->mentions_reg(guard->reg())); macro_assembler->IfRegisterGE(guard->reg(), guard->value(), trace->backtrack()); break; case Guard::GEQ: - ASSERT(!trace->mentions_reg(guard->reg())); + DCHECK(!trace->mentions_reg(guard->reg())); macro_assembler->IfRegisterLT(guard->reg(), guard->value(), trace->backtrack()); @@ -1705,12 +1686,12 @@ if (((exor - 1) & exor) == 0) { // If c1 and c2 differ only by one bit. // Ecma262UnCanonicalize always gives the highest number last. - ASSERT(c2 > c1); + DCHECK(c2 > c1); uc16 mask = char_mask ^ exor; macro_assembler->CheckNotCharacterAfterAnd(c1, mask, on_failure); return true; } - ASSERT(c2 > c1); + DCHECK(c2 > c1); uc16 diff = c2 - c1; if (((diff - 1) & diff) == 0 && c1 >= diff) { // If the characters differ by 2^n but don't differ by one bit then @@ -1756,7 +1737,7 @@ macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check); } Label ok; - ASSERT(unibrow::Ecma262UnCanonicalize::kMaxWidth == 4); + DCHECK(unibrow::Ecma262UnCanonicalize::kMaxWidth == 4); switch (length) { case 2: { if (ShortCutEmitCharacterPair(macro_assembler, @@ -1844,9 +1825,9 @@ // Assert that everything is on one kTableSize page. for (int i = start_index; i <= end_index; i++) { - ASSERT_EQ(ranges->at(i) & ~kMask, base); + DCHECK_EQ(ranges->at(i) & ~kMask, base); } - ASSERT(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base); + DCHECK(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base); char templ[kSize]; Label* on_bit_set; @@ -1902,7 +1883,7 @@ &dummy, in_range_label, &dummy); - ASSERT(!dummy.is_linked()); + DCHECK(!dummy.is_linked()); // Cut out the single range by rewriting the array. This creates a new // range that is a merger of the two ranges on either side of the one we // are cutting out. The oddity of the labels is preserved. @@ -1969,7 +1950,7 @@ } } - ASSERT(*new_start_index > start_index); + DCHECK(*new_start_index > start_index); *new_end_index = *new_start_index - 1; if (ranges->at(*new_end_index) == *border) { (*new_end_index)--; @@ -2000,7 +1981,7 @@ int first = ranges->at(start_index); int last = ranges->at(end_index) - 1; - ASSERT_LT(min_char, first); + DCHECK_LT(min_char, first); // Just need to test if the character is before or on-or-after // a particular character. @@ -2033,7 +2014,7 @@ if (cut == kNoCutIndex) cut = start_index; CutOutRange( masm, ranges, start_index, end_index, cut, even_label, odd_label); - ASSERT_GE(end_index - start_index, 2); + DCHECK_GE(end_index - start_index, 2); GenerateBranches(masm, ranges, start_index + 1, @@ -2093,25 +2074,25 @@ // We didn't find any section that started after the limit, so everything // above the border is one of the terminal labels. above = (end_index & 1) != (start_index & 1) ? odd_label : even_label; - ASSERT(new_end_index == end_index - 1); + DCHECK(new_end_index == end_index - 1); } - ASSERT_LE(start_index, new_end_index); - ASSERT_LE(new_start_index, end_index); - ASSERT_LT(start_index, new_start_index); - ASSERT_LT(new_end_index, end_index); - ASSERT(new_end_index + 1 == new_start_index || + DCHECK_LE(start_index, new_end_index); + DCHECK_LE(new_start_index, end_index); + DCHECK_LT(start_index, new_start_index); + DCHECK_LT(new_end_index, end_index); + DCHECK(new_end_index + 1 == new_start_index || (new_end_index + 2 == new_start_index && border == ranges->at(new_end_index + 1))); - ASSERT_LT(min_char, border - 1); - ASSERT_LT(border, max_char); - ASSERT_LT(ranges->at(new_end_index), border); - ASSERT(border < ranges->at(new_start_index) || + DCHECK_LT(min_char, border - 1); + DCHECK_LT(border, max_char); + DCHECK_LT(ranges->at(new_end_index), border); + DCHECK(border < ranges->at(new_start_index) || (border == ranges->at(new_start_index) && new_start_index == end_index && new_end_index == end_index - 1 && border == last + 1)); - ASSERT(new_start_index == 0 || border >= ranges->at(new_start_index - 1)); + DCHECK(new_start_index == 0 || border >= ranges->at(new_start_index - 1)); masm->CheckCharacterGT(border - 1, above); Label dummy; @@ -2228,7 +2209,7 @@ for (int i = 0; i <= last_valid_range; i++) { CharacterRange& range = ranges->at(i); if (range.from() == 0) { - ASSERT_EQ(i, 0); + DCHECK_EQ(i, 0); zeroth_entry_is_failure = !zeroth_entry_is_failure; } else { range_boundaries->Add(range.from(), zone); @@ -2485,7 +2466,7 @@ details, compiler, 0, trace->at_start() == Trace::FALSE_VALUE); if (details->cannot_match()) return false; if (!details->Rationalize(compiler->ascii())) return false; - ASSERT(details->characters() == 1 || + DCHECK(details->characters() == 1 || compiler->macro_assembler()->CanReadUnaligned()); uint32_t mask = details->mask(); uint32_t value = details->value(); @@ -2555,7 +2536,7 @@ int characters_filled_in, bool not_at_start) { Isolate* isolate = compiler->macro_assembler()->zone()->isolate(); - ASSERT(characters_filled_in < details->characters()); + DCHECK(characters_filled_in < details->characters()); int characters = details->characters(); int char_mask; if (compiler->ascii()) { @@ -2584,7 +2565,7 @@ unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth]; int length = GetCaseIndependentLetters(isolate, c, compiler->ascii(), chars); - ASSERT(length != 0); // Can only happen if c > char_mask (see above). + DCHECK(length != 0); // Can only happen if c > char_mask (see above). if (length == 1) { // This letter has no case equivalents, so it's nice and simple // and the mask-compare will determine definitely whether we have @@ -2620,7 +2601,7 @@ pos->determines_perfectly = true; } characters_filled_in++; - ASSERT(characters_filled_in <= details->characters()); + DCHECK(characters_filled_in <= details->characters()); if (characters_filled_in == details->characters()) { return; } @@ -2686,13 +2667,13 @@ pos->value = bits; } characters_filled_in++; - ASSERT(characters_filled_in <= details->characters()); + DCHECK(characters_filled_in <= details->characters()); if (characters_filled_in == details->characters()) { return; } } } - ASSERT(characters_filled_in != details->characters()); + DCHECK(characters_filled_in != details->characters()); if (!details->cannot_match()) { on_success()-> GetQuickCheckDetails(details, compiler, @@ -2713,7 +2694,7 @@ void QuickCheckDetails::Advance(int by, bool ascii) { - ASSERT(by >= 0); + DCHECK(by >= 0); if (by >= characters_) { Clear(); return; @@ -2734,7 +2715,7 @@ void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) { - ASSERT(characters_ == other->characters_); + DCHECK(characters_ == other->characters_); if (other->cannot_match_) { return; } @@ -2765,7 +2746,7 @@ class VisitMarker { public: explicit VisitMarker(NodeInfo* info) : info_(info) { - ASSERT(!info->visited); + DCHECK(!info->visited); info->visited = true; } ~VisitMarker() { @@ -2779,7 +2760,7 @@ RegExpNode* SeqRegExpNode::FilterASCII(int depth, bool ignore_case) { if (info()->replacement_calculated) return replacement(); if (depth < 0) return this; - ASSERT(!info()->visited); + DCHECK(!info()->visited); VisitMarker marker(info()); return FilterSuccessor(depth - 1, ignore_case); } @@ -2813,7 +2794,7 @@ RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) { if (info()->replacement_calculated) return replacement(); if (depth < 0) return this; - ASSERT(!info()->visited); + DCHECK(!info()->visited); VisitMarker marker(info()); int element_count = elms_->length(); for (int i = 0; i < element_count; i++) { @@ -2834,7 +2815,7 @@ copy[j] = converted; } } else { - ASSERT(elm.text_type() == TextElement::CHAR_CLASS); + DCHECK(elm.text_type() == TextElement::CHAR_CLASS); RegExpCharacterClass* cc = elm.char_class(); ZoneList<CharacterRange>* ranges = cc->ranges(zone()); if (!CharacterRange::IsCanonical(ranges)) { @@ -2903,7 +2884,7 @@ GuardedAlternative alternative = alternatives_->at(i); RegExpNode* replacement = alternative.node()->FilterASCII(depth - 1, ignore_case); - ASSERT(replacement != this); // No missing EMPTY_MATCH_CHECK. + DCHECK(replacement != this); // No missing EMPTY_MATCH_CHECK. if (replacement != NULL) { alternatives_->at(i).set_node(replacement); surviving++; @@ -2989,7 +2970,7 @@ bool not_at_start) { not_at_start = (not_at_start || not_at_start_); int choice_count = alternatives_->length(); - ASSERT(choice_count > 0); + DCHECK(choice_count > 0); alternatives_->at(0).node()->GetQuickCheckDetails(details, compiler, characters_filled_in, @@ -3113,7 +3094,7 @@ } else if (next_is_word_character == Trace::TRUE_VALUE) { BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord); } else { - ASSERT(next_is_word_character == Trace::FALSE_VALUE); + DCHECK(next_is_word_character == Trace::FALSE_VALUE); BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord); } } @@ -3269,7 +3250,7 @@ EmitCharacterFunction* emit_function = NULL; switch (pass) { case NON_ASCII_MATCH: - ASSERT(ascii); + DCHECK(ascii); if (quarks[j] > String::kMaxOneByteCharCode) { assembler->GoTo(backtrack); return; @@ -3299,7 +3280,7 @@ } } } else { - ASSERT_EQ(TextElement::CHAR_CLASS, elm.text_type()); + DCHECK_EQ(TextElement::CHAR_CLASS, elm.text_type()); if (pass == CHARACTER_CLASS_MATCH) { if (first_element_checked && i == 0) continue; if (DeterminedAlready(quick_check, elm.cp_offset())) continue; @@ -3321,7 +3302,7 @@ int TextNode::Length() { TextElement elm = elms_->last(); - ASSERT(elm.cp_offset() >= 0); + DCHECK(elm.cp_offset() >= 0); return elm.cp_offset() + elm.length(); } @@ -3345,7 +3326,7 @@ void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) { LimitResult limit_result = LimitVersions(compiler, trace); if (limit_result == DONE) return; - ASSERT(limit_result == CONTINUE); + DCHECK(limit_result == CONTINUE); if (trace->cp_offset() + Length() > RegExpMacroAssembler::kMaxCPOffset) { compiler->SetRegExpTooBig(); @@ -3402,7 +3383,7 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) { - ASSERT(by > 0); + DCHECK(by > 0); // We don't have an instruction for shifting the current character register // down or for using a shifted value for anything so lets just forget that // we preloaded any characters into it. @@ -3497,14 +3478,14 @@ void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) { - ASSERT_EQ(loop_node_, NULL); + DCHECK_EQ(loop_node_, NULL); AddAlternative(alt); loop_node_ = alt.node(); } void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) { - ASSERT_EQ(continue_node_, NULL); + DCHECK_EQ(continue_node_, NULL); AddAlternative(alt); continue_node_ = alt.node(); } @@ -3515,15 +3496,15 @@ if (trace->stop_node() == this) { int text_length = GreedyLoopTextLengthForAlternative(&(alternatives_->at(0))); - ASSERT(text_length != kNodeIsTooComplexForGreedyLoops); + DCHECK(text_length != kNodeIsTooComplexForGreedyLoops); // Update the counter-based backtracking info on the stack. This is an // optimization for greedy loops (see below). - ASSERT(trace->cp_offset() == text_length); + DCHECK(trace->cp_offset() == text_length); macro_assembler->AdvanceCurrentPosition(text_length); macro_assembler->GoTo(trace->loop_label()); return; } - ASSERT(trace->stop_node() == NULL); + DCHECK(trace->stop_node() == NULL); if (!trace->is_trivial()) { trace->Flush(compiler, this); return; @@ -3833,7 +3814,7 @@ Handle<ByteArray> boolean_skip_table = factory->NewByteArray(kSize, TENURED); int skip_distance = GetSkipTable( min_lookahead, max_lookahead, boolean_skip_table); - ASSERT(skip_distance != 0); + DCHECK(skip_distance != 0); Label cont, again; masm->Bind(&again); @@ -3934,14 +3915,14 @@ ZoneList<Guard*>* guards = alternative.guards(); int guard_count = (guards == NULL) ? 0 : guards->length(); for (int j = 0; j < guard_count; j++) { - ASSERT(!trace->mentions_reg(guards->at(j)->reg())); + DCHECK(!trace->mentions_reg(guards->at(j)->reg())); } } #endif LimitResult limit_result = LimitVersions(compiler, trace); if (limit_result == DONE) return; - ASSERT(limit_result == CONTINUE); + DCHECK(limit_result == CONTINUE); int new_flush_budget = trace->flush_budget() / choice_count; if (trace->flush_budget() == 0 && trace->actions() != NULL) { @@ -3969,7 +3950,7 @@ // information for each iteration of the loop, which could take up a lot of // space. greedy_loop = true; - ASSERT(trace->stop_node() == NULL); + DCHECK(trace->stop_node() == NULL); macro_assembler->PushCurrentPosition(); current_trace = &counter_backtrack_trace; Label greedy_match_failed; @@ -4008,7 +3989,7 @@ // and step forwards 3 if the character is not one of abc. Abc need // not be atoms, they can be any reasonably limited character class or // small alternation. - ASSERT(trace->is_trivial()); // This is the case on LoopChoiceNodes. + DCHECK(trace->is_trivial()); // This is the case on LoopChoiceNodes. BoyerMooreLookahead* lookahead = bm_info(not_at_start); if (lookahead == NULL) { eats_at_least = Min(kMaxLookaheadForBoyerMoore, @@ -4193,7 +4174,7 @@ RegExpMacroAssembler* assembler = compiler->macro_assembler(); LimitResult limit_result = LimitVersions(compiler, trace); if (limit_result == DONE) return; - ASSERT(limit_result == CONTINUE); + DCHECK(limit_result == CONTINUE); RecursionCheck rc(compiler); @@ -4301,7 +4282,7 @@ int clear_registers_to = clear_registers_from + clear_register_count - 1; assembler->ClearRegisters(clear_registers_from, clear_registers_to); - ASSERT(trace->backtrack() == NULL); + DCHECK(trace->backtrack() == NULL); assembler->Backtrack(); return; } @@ -4320,11 +4301,11 @@ LimitResult limit_result = LimitVersions(compiler, trace); if (limit_result == DONE) return; - ASSERT(limit_result == CONTINUE); + DCHECK(limit_result == CONTINUE); RecursionCheck rc(compiler); - ASSERT_EQ(start_reg_ + 1, end_reg_); + DCHECK_EQ(start_reg_ + 1, end_reg_); if (compiler->ignore_case()) { assembler->CheckNotBackReferenceIgnoreCase(start_reg_, trace->backtrack()); @@ -4344,44 +4325,41 @@ class DotPrinter: public NodeVisitor { public: - explicit DotPrinter(bool ignore_case) - : ignore_case_(ignore_case), - stream_(&alloc_) { } + DotPrinter(OStream& os, bool ignore_case) // NOLINT + : os_(os), + ignore_case_(ignore_case) {} void PrintNode(const char* label, RegExpNode* node); void Visit(RegExpNode* node); void PrintAttributes(RegExpNode* from); - StringStream* stream() { return &stream_; } void PrintOnFailure(RegExpNode* from, RegExpNode* to); #define DECLARE_VISIT(Type) \ virtual void Visit##Type(Type##Node* that); FOR_EACH_NODE_TYPE(DECLARE_VISIT) #undef DECLARE_VISIT private: + OStream& os_; bool ignore_case_; - HeapStringAllocator alloc_; - StringStream stream_; }; void DotPrinter::PrintNode(const char* label, RegExpNode* node) { - stream()->Add("digraph G {\n graph [label=\""); + os_ << "digraph G {\n graph [label=\""; for (int i = 0; label[i]; i++) { switch (label[i]) { case '\\': - stream()->Add("\\\\"); + os_ << "\\\\"; break; case '"': - stream()->Add("\""); + os_ << "\""; break; default: - stream()->Put(label[i]); + os_ << label[i]; break; } } - stream()->Add("\"];\n"); + os_ << "\"];\n"; Visit(node); - stream()->Add("}\n"); - printf("%s", stream()->ToCString().get()); + os_ << "}" << endl; } @@ -4393,97 +4371,95 @@ void DotPrinter::PrintOnFailure(RegExpNode* from, RegExpNode* on_failure) { - stream()->Add(" n%p -> n%p [style=dotted];\n", from, on_failure); + os_ << " n" << from << " -> n" << on_failure << " [style=dotted];\n"; Visit(on_failure); } class TableEntryBodyPrinter { public: - TableEntryBodyPrinter(StringStream* stream, ChoiceNode* choice) - : stream_(stream), choice_(choice) { } + TableEntryBodyPrinter(OStream& os, ChoiceNode* choice) // NOLINT + : os_(os), + choice_(choice) {} void Call(uc16 from, DispatchTable::Entry entry) { OutSet* out_set = entry.out_set(); for (unsigned i = 0; i < OutSet::kFirstLimit; i++) { if (out_set->Get(i)) { - stream()->Add(" n%p:s%io%i -> n%p;\n", - choice(), - from, - i, - choice()->alternatives()->at(i).node()); + os_ << " n" << choice() << ":s" << from << "o" << i << " -> n" + << choice()->alternatives()->at(i).node() << ";\n"; } } } private: - StringStream* stream() { return stream_; } ChoiceNode* choice() { return choice_; } - StringStream* stream_; + OStream& os_; ChoiceNode* choice_; }; class TableEntryHeaderPrinter { public: - explicit TableEntryHeaderPrinter(StringStream* stream) - : first_(true), stream_(stream) { } + explicit TableEntryHeaderPrinter(OStream& os) // NOLINT + : first_(true), + os_(os) {} void Call(uc16 from, DispatchTable::Entry entry) { if (first_) { first_ = false; } else { - stream()->Add("|"); + os_ << "|"; } - stream()->Add("{\\%k-\\%k|{", from, entry.to()); + os_ << "{\\" << AsUC16(from) << "-\\" << AsUC16(entry.to()) << "|{"; OutSet* out_set = entry.out_set(); int priority = 0; for (unsigned i = 0; i < OutSet::kFirstLimit; i++) { if (out_set->Get(i)) { - if (priority > 0) stream()->Add("|"); - stream()->Add("<s%io%i> %i", from, i, priority); + if (priority > 0) os_ << "|"; + os_ << "<s" << from << "o" << i << "> " << priority; priority++; } } - stream()->Add("}}"); + os_ << "}}"; } private: bool first_; - StringStream* stream() { return stream_; } - StringStream* stream_; + OStream& os_; }; class AttributePrinter { public: - explicit AttributePrinter(DotPrinter* out) - : out_(out), first_(true) { } + explicit AttributePrinter(OStream& os) // NOLINT + : os_(os), + first_(true) {} void PrintSeparator() { if (first_) { first_ = false; } else { - out_->stream()->Add("|"); + os_ << "|"; } } void PrintBit(const char* name, bool value) { if (!value) return; PrintSeparator(); - out_->stream()->Add("{%s}", name); + os_ << "{" << name << "}"; } void PrintPositive(const char* name, int value) { if (value < 0) return; PrintSeparator(); - out_->stream()->Add("{%s|%x}", name, value); + os_ << "{" << name << "|" << value << "}"; } + private: - DotPrinter* out_; + OStream& os_; bool first_; }; void DotPrinter::PrintAttributes(RegExpNode* that) { - stream()->Add(" a%p [shape=Mrecord, color=grey, fontcolor=grey, " - "margin=0.1, fontsize=10, label=\"{", - that); - AttributePrinter printer(this); + os_ << " a" << that << " [shape=Mrecord, color=grey, fontcolor=grey, " + << "margin=0.1, fontsize=10, label=\"{"; + AttributePrinter printer(os_); NodeInfo* info = that->info(); printer.PrintBit("NI", info->follows_newline_interest); printer.PrintBit("WI", info->follows_word_interest); @@ -4491,27 +4467,27 @@ Label* label = that->label(); if (label->is_bound()) printer.PrintPositive("@", label->pos()); - stream()->Add("}\"];\n"); - stream()->Add(" a%p -> n%p [style=dashed, color=grey, " - "arrowhead=none];\n", that, that); + os_ << "}\"];\n" + << " a" << that << " -> n" << that + << " [style=dashed, color=grey, arrowhead=none];\n"; } static const bool kPrintDispatchTable = false; void DotPrinter::VisitChoice(ChoiceNode* that) { if (kPrintDispatchTable) { - stream()->Add(" n%p [shape=Mrecord, label=\"", that); - TableEntryHeaderPrinter header_printer(stream()); + os_ << " n" << that << " [shape=Mrecord, label=\""; + TableEntryHeaderPrinter header_printer(os_); that->GetTable(ignore_case_)->ForEach(&header_printer); - stream()->Add("\"]\n", that); + os_ << "\"]\n"; PrintAttributes(that); - TableEntryBodyPrinter body_printer(stream(), that); + TableEntryBodyPrinter body_printer(os_, that); that->GetTable(ignore_case_)->ForEach(&body_printer); } else { - stream()->Add(" n%p [shape=Mrecord, label=\"?\"];\n", that); + os_ << " n" << that << " [shape=Mrecord, label=\"?\"];\n"; for (int i = 0; i < that->alternatives()->length(); i++) { GuardedAlternative alt = that->alternatives()->at(i); - stream()->Add(" n%p -> n%p;\n", that, alt.node()); + os_ << " n" << that << " -> n" << alt.node(); } } for (int i = 0; i < that->alternatives()->length(); i++) { @@ -4523,138 +4499,136 @@ void DotPrinter::VisitText(TextNode* that) { Zone* zone = that->zone(); - stream()->Add(" n%p [label=\"", that); + os_ << " n" << that << " [label=\""; for (int i = 0; i < that->elements()->length(); i++) { - if (i > 0) stream()->Add(" "); + if (i > 0) os_ << " "; TextElement elm = that->elements()->at(i); switch (elm.text_type()) { case TextElement::ATOM: { - stream()->Add("'%w'", elm.atom()->data()); + Vector<const uc16> data = elm.atom()->data(); + for (int i = 0; i < data.length(); i++) { + os_ << static_cast<char>(data[i]); + } break; } case TextElement::CHAR_CLASS: { RegExpCharacterClass* node = elm.char_class(); - stream()->Add("["); - if (node->is_negated()) - stream()->Add("^"); + os_ << "["; + if (node->is_negated()) os_ << "^"; for (int j = 0; j < node->ranges(zone)->length(); j++) { CharacterRange range = node->ranges(zone)->at(j); - stream()->Add("%k-%k", range.from(), range.to()); + os_ << AsUC16(range.from()) << "-" << AsUC16(range.to()); } - stream()->Add("]"); + os_ << "]"; break; } default: UNREACHABLE(); } } - stream()->Add("\", shape=box, peripheries=2];\n"); + os_ << "\", shape=box, peripheries=2];\n"; PrintAttributes(that); - stream()->Add(" n%p -> n%p;\n", that, that->on_success()); + os_ << " n" << that << " -> n" << that->on_success() << ";\n"; Visit(that->on_success()); } void DotPrinter::VisitBackReference(BackReferenceNode* that) { - stream()->Add(" n%p [label=\"$%i..$%i\", shape=doubleoctagon];\n", - that, - that->start_register(), - that->end_register()); + os_ << " n" << that << " [label=\"$" << that->start_register() << "..$" + << that->end_register() << "\", shape=doubleoctagon];\n"; PrintAttributes(that); - stream()->Add(" n%p -> n%p;\n", that, that->on_success()); + os_ << " n" << that << " -> n" << that->on_success() << ";\n"; Visit(that->on_success()); } void DotPrinter::VisitEnd(EndNode* that) { - stream()->Add(" n%p [style=bold, shape=point];\n", that); + os_ << " n" << that << " [style=bold, shape=point];\n"; PrintAttributes(that); } void DotPrinter::VisitAssertion(AssertionNode* that) { - stream()->Add(" n%p [", that); + os_ << " n" << that << " ["; switch (that->assertion_type()) { case AssertionNode::AT_END: - stream()->Add("label=\"$\", shape=septagon"); + os_ << "label=\"$\", shape=septagon"; break; case AssertionNode::AT_START: - stream()->Add("label=\"^\", shape=septagon"); + os_ << "label=\"^\", shape=septagon"; break; case AssertionNode::AT_BOUNDARY: - stream()->Add("label=\"\\b\", shape=septagon"); + os_ << "label=\"\\b\", shape=septagon"; break; case AssertionNode::AT_NON_BOUNDARY: - stream()->Add("label=\"\\B\", shape=septagon"); + os_ << "label=\"\\B\", shape=septagon"; break; case AssertionNode::AFTER_NEWLINE: - stream()->Add("label=\"(?<=\\n)\", shape=septagon"); + os_ << "label=\"(?<=\\n)\", shape=septagon"; break; } - stream()->Add("];\n"); + os_ << "];\n"; PrintAttributes(that); RegExpNode* successor = that->on_success(); - stream()->Add(" n%p -> n%p;\n", that, successor); + os_ << " n" << that << " -> n" << successor << ";\n"; Visit(successor); } void DotPrinter::VisitAction(ActionNode* that) { - stream()->Add(" n%p [", that); + os_ << " n" << that << " ["; switch (that->action_type_) { case ActionNode::SET_REGISTER: - stream()->Add("label=\"$%i:=%i\", shape=octagon", - that->data_.u_store_register.reg, - that->data_.u_store_register.value); + os_ << "label=\"$" << that->data_.u_store_register.reg + << ":=" << that->data_.u_store_register.value << "\", shape=octagon"; break; case ActionNode::INCREMENT_REGISTER: - stream()->Add("label=\"$%i++\", shape=octagon", - that->data_.u_increment_register.reg); + os_ << "label=\"$" << that->data_.u_increment_register.reg + << "++\", shape=octagon"; break; case ActionNode::STORE_POSITION: - stream()->Add("label=\"$%i:=$pos\", shape=octagon", - that->data_.u_position_register.reg); + os_ << "label=\"$" << that->data_.u_position_register.reg + << ":=$pos\", shape=octagon"; break; case ActionNode::BEGIN_SUBMATCH: - stream()->Add("label=\"$%i:=$pos,begin\", shape=septagon", - that->data_.u_submatch.current_position_register); + os_ << "label=\"$" << that->data_.u_submatch.current_position_register + << ":=$pos,begin\", shape=septagon"; break; case ActionNode::POSITIVE_SUBMATCH_SUCCESS: - stream()->Add("label=\"escape\", shape=septagon"); + os_ << "label=\"escape\", shape=septagon"; break; case ActionNode::EMPTY_MATCH_CHECK: - stream()->Add("label=\"$%i=$pos?,$%i<%i?\", shape=septagon", - that->data_.u_empty_match_check.start_register, - that->data_.u_empty_match_check.repetition_register, - that->data_.u_empty_match_check.repetition_limit); + os_ << "label=\"$" << that->data_.u_empty_match_check.start_register + << "=$pos?,$" << that->data_.u_empty_match_check.repetition_register + << "<" << that->data_.u_empty_match_check.repetition_limit + << "?\", shape=septagon"; break; case ActionNode::CLEAR_CAPTURES: { - stream()->Add("label=\"clear $%i to $%i\", shape=septagon", - that->data_.u_clear_captures.range_from, - that->data_.u_clear_captures.range_to); + os_ << "label=\"clear $" << that->data_.u_clear_captures.range_from + << " to $" << that->data_.u_clear_captures.range_to + << "\", shape=septagon"; break; } } - stream()->Add("];\n"); + os_ << "];\n"; PrintAttributes(that); RegExpNode* successor = that->on_success(); - stream()->Add(" n%p -> n%p;\n", that, successor); + os_ << " n" << that << " -> n" << successor << ";\n"; Visit(successor); } class DispatchTableDumper { public: - explicit DispatchTableDumper(StringStream* stream) : stream_(stream) { } + explicit DispatchTableDumper(OStream& os) : os_(os) {} void Call(uc16 key, DispatchTable::Entry entry); - StringStream* stream() { return stream_; } private: - StringStream* stream_; + OStream& os_; }; void DispatchTableDumper::Call(uc16 key, DispatchTable::Entry entry) { - stream()->Add("[%k-%k]: {", key, entry.to()); + os_ << "[" << AsUC16(key) << "-" << AsUC16(entry.to()) << "]: {"; OutSet* set = entry.out_set(); bool first = true; for (unsigned i = 0; i < OutSet::kFirstLimit; i++) { @@ -4662,28 +4636,27 @@ if (first) { first = false; } else { - stream()->Add(", "); + os_ << ", "; } - stream()->Add("%i", i); + os_ << i; } } - stream()->Add("}\n"); + os_ << "}\n"; } void DispatchTable::Dump() { - HeapStringAllocator alloc; - StringStream stream(&alloc); - DispatchTableDumper dumper(&stream); + OFStream os(stderr); + DispatchTableDumper dumper(os); tree()->ForEach(&dumper); - OS::PrintError("%s", stream.ToCString().get()); } void RegExpEngine::DotPrint(const char* label, RegExpNode* node, bool ignore_case) { - DotPrinter printer(ignore_case); + OFStream os(stdout); + DotPrinter printer(os, ignore_case); printer.PrintNode(label, node); } @@ -4713,10 +4686,10 @@ const int* special_class, int length) { length--; // Remove final 0x10000. - ASSERT(special_class[length] == 0x10000); - ASSERT(ranges->length() != 0); - ASSERT(length != 0); - ASSERT(special_class[0] != 0); + DCHECK(special_class[length] == 0x10000); + DCHECK(ranges->length() != 0); + DCHECK(length != 0); + DCHECK(special_class[0] != 0); if (ranges->length() != (length >> 1) + 1) { return false; } @@ -4744,7 +4717,7 @@ const int* special_class, int length) { length--; // Remove final 0x10000. - ASSERT(special_class[length] == 0x10000); + DCHECK(special_class[length] == 0x10000); if (ranges->length() * 2 != length) { return false; } @@ -4841,7 +4814,7 @@ : compiler_(compiler), saved_expansion_factor_(compiler->current_expansion_factor()), ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) { - ASSERT(factor > 0); + DCHECK(factor > 0); if (ok_to_expand_) { if (factor > kMaxExpansionFactor) { // Avoid integer overflow of the current expansion factor. @@ -4930,7 +4903,7 @@ } } if (max <= kMaxUnrolledMaxMatches && min == 0) { - ASSERT(max > 0); // Due to the 'if' above. + DCHECK(max > 0); // Due to the 'if' above. RegExpExpansionLimiter limiter(compiler, max); if (limiter.ok_to_expand()) { // Unroll the optional matches up to max. @@ -5169,9 +5142,9 @@ ZoneList<CharacterRange>* ranges, Zone* zone) { elmc--; - ASSERT(elmv[elmc] == 0x10000); + DCHECK(elmv[elmc] == 0x10000); for (int i = 0; i < elmc; i += 2) { - ASSERT(elmv[i] < elmv[i + 1]); + DCHECK(elmv[i] < elmv[i + 1]); ranges->Add(CharacterRange(elmv[i], elmv[i + 1] - 1), zone); } } @@ -5182,13 +5155,13 @@ ZoneList<CharacterRange>* ranges, Zone* zone) { elmc--; - ASSERT(elmv[elmc] == 0x10000); - ASSERT(elmv[0] != 0x0000); - ASSERT(elmv[elmc-1] != String::kMaxUtf16CodeUnit); + DCHECK(elmv[elmc] == 0x10000); + DCHECK(elmv[0] != 0x0000); + DCHECK(elmv[elmc-1] != String::kMaxUtf16CodeUnit); uc16 last = 0x0000; for (int i = 0; i < elmc; i += 2) { - ASSERT(last <= elmv[i] - 1); - ASSERT(elmv[i] < elmv[i + 1]); + DCHECK(last <= elmv[i] - 1); + DCHECK(elmv[i] < elmv[i + 1]); ranges->Add(CharacterRange(last, elmv[i] - 1), zone); last = elmv[i + 1]; } @@ -5284,8 +5257,8 @@ ZoneList<CharacterRange>** included, ZoneList<CharacterRange>** excluded, Zone* zone) { - ASSERT_EQ(NULL, *included); - ASSERT_EQ(NULL, *excluded); + DCHECK_EQ(NULL, *included); + DCHECK_EQ(NULL, *excluded); DispatchTable table(zone); for (int i = 0; i < base->length(); i++) table.AddRange(base->at(i), CharacterRangeSplitter::kInBase, zone); @@ -5345,7 +5318,7 @@ if (length == 0) { block_end = pos; } else { - ASSERT_EQ(1, length); + DCHECK_EQ(1, length); block_end = range[0]; } int end = (block_end > top) ? top : block_end; @@ -5365,7 +5338,7 @@ bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) { - ASSERT_NOT_NULL(ranges); + DCHECK_NOT_NULL(ranges); int n = ranges->length(); if (n <= 1) return true; int max = ranges->at(0).to(); @@ -5505,15 +5478,15 @@ } while (read < n); character_ranges->Rewind(num_canonical); - ASSERT(CharacterRange::IsCanonical(character_ranges)); + DCHECK(CharacterRange::IsCanonical(character_ranges)); } void CharacterRange::Negate(ZoneList<CharacterRange>* ranges, ZoneList<CharacterRange>* negated_ranges, Zone* zone) { - ASSERT(CharacterRange::IsCanonical(ranges)); - ASSERT_EQ(0, negated_ranges->length()); + DCHECK(CharacterRange::IsCanonical(ranges)); + DCHECK_EQ(0, negated_ranges->length()); int range_count = ranges->length(); uc16 from = 0; int i = 0; @@ -5569,7 +5542,7 @@ } -bool OutSet::Get(unsigned value) { +bool OutSet::Get(unsigned value) const { if (value < kFirstLimit) { return (first_ & (1 << value)) != 0; } else if (remaining_ == NULL) { @@ -5589,7 +5562,7 @@ if (tree()->is_empty()) { // If this is the first range we just insert into the table. ZoneSplayTree<Config>::Locator loc; - ASSERT_RESULT(tree()->Insert(current.from(), &loc)); + DCHECK_RESULT(tree()->Insert(current.from(), &loc)); loc.set_value(Entry(current.from(), current.to(), empty()->Extend(value, zone))); return; @@ -5615,7 +5588,7 @@ // to the map and let the next step deal with merging it with // the range we're adding. ZoneSplayTree<Config>::Locator loc; - ASSERT_RESULT(tree()->Insert(right.from(), &loc)); + DCHECK_RESULT(tree()->Insert(right.from(), &loc)); loc.set_value(Entry(right.from(), right.to(), entry->out_set())); @@ -5631,24 +5604,24 @@ // then we have to add a range covering just that space. if (current.from() < entry->from()) { ZoneSplayTree<Config>::Locator ins; - ASSERT_RESULT(tree()->Insert(current.from(), &ins)); + DCHECK_RESULT(tree()->Insert(current.from(), &ins)); ins.set_value(Entry(current.from(), entry->from() - 1, empty()->Extend(value, zone))); current.set_from(entry->from()); } - ASSERT_EQ(current.from(), entry->from()); + DCHECK_EQ(current.from(), entry->from()); // If the overlapping range extends beyond the one we want to add // we have to snap the right part off and add it separately. if (entry->to() > current.to()) { ZoneSplayTree<Config>::Locator ins; - ASSERT_RESULT(tree()->Insert(current.to() + 1, &ins)); + DCHECK_RESULT(tree()->Insert(current.to() + 1, &ins)); ins.set_value(Entry(current.to() + 1, entry->to(), entry->out_set())); entry->set_to(current.to()); } - ASSERT(entry->to() <= current.to()); + DCHECK(entry->to() <= current.to()); // The overlapping range is now completely contained by the range // we're adding so we can just update it and move the start point // of the range we're adding just past it. @@ -5657,12 +5630,12 @@ // adding 1 will wrap around to 0. if (entry->to() == String::kMaxUtf16CodeUnit) break; - ASSERT(entry->to() + 1 > current.from()); + DCHECK(entry->to() + 1 > current.from()); current.set_from(entry->to() + 1); } else { // There is no overlap so we can just add the range ZoneSplayTree<Config>::Locator ins; - ASSERT_RESULT(tree()->Insert(current.from(), &ins)); + DCHECK_RESULT(tree()->Insert(current.from(), &ins)); ins.set_value(Entry(current.from(), current.to(), empty()->Extend(value, zone))); @@ -5855,7 +5828,7 @@ } } } else { - ASSERT_EQ(TextElement::CHAR_CLASS, text.text_type()); + DCHECK_EQ(TextElement::CHAR_CLASS, text.text_type()); RegExpCharacterClass* char_class = text.char_class(); ZoneList<CharacterRange>* ranges = char_class->ranges(zone()); if (char_class->is_negated()) { @@ -6015,7 +5988,7 @@ // Sample some characters from the middle of the string. static const int kSampleSize = 128; - FlattenString(sample_subject); + sample_subject = String::Flatten(sample_subject); int chars_sampled = 0; int half_way = (sample_subject->length() - kSampleSize) / 2; for (int i = Max(0, half_way); @@ -6098,6 +6071,12 @@ #elif V8_TARGET_ARCH_MIPS RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2, zone); +#elif V8_TARGET_ARCH_MIPS64 + RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2, + zone); +#elif V8_TARGET_ARCH_X87 + RegExpMacroAssemblerX87 macro_assembler(mode, (data->capture_count + 1) * 2, + zone); #else #error "Unsupported architecture" #endif diff -Nru nodejs-0.11.13/deps/v8/src/jsregexp.h nodejs-0.11.15/deps/v8/src/jsregexp.h --- nodejs-0.11.13/deps/v8/src/jsregexp.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/jsregexp.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_JSREGEXP_H_ #define V8_JSREGEXP_H_ -#include "allocation.h" -#include "assembler.h" -#include "zone-inl.h" +#include "src/allocation.h" +#include "src/assembler.h" +#include "src/zone-inl.h" namespace v8 { namespace internal { @@ -55,10 +32,10 @@ // Creates a regular expression literal in the old space. // This function calls the garbage collector if necessary. - static Handle<Object> CreateRegExpLiteral(Handle<JSFunction> constructor, - Handle<String> pattern, - Handle<String> flags, - bool* has_pending_exception); + MUST_USE_RESULT static MaybeHandle<Object> CreateRegExpLiteral( + Handle<JSFunction> constructor, + Handle<String> pattern, + Handle<String> flags); // Returns a string representation of a regular expression. // Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4. @@ -69,16 +46,18 @@ // generic data and choice of implementation - as well as what // the implementation wants to store in the data field. // Returns false if compilation fails. - static Handle<Object> Compile(Handle<JSRegExp> re, - Handle<String> pattern, - Handle<String> flags); + MUST_USE_RESULT static MaybeHandle<Object> Compile( + Handle<JSRegExp> re, + Handle<String> pattern, + Handle<String> flags); // See ECMA-262 section 15.10.6.2. // This function calls the garbage collector if necessary. - static Handle<Object> Exec(Handle<JSRegExp> regexp, - Handle<String> subject, - int index, - Handle<JSArray> lastMatchInfo); + MUST_USE_RESULT static MaybeHandle<Object> Exec( + Handle<JSRegExp> regexp, + Handle<String> subject, + int index, + Handle<JSArray> lastMatchInfo); // Prepares a JSRegExp object with Irregexp-specific data. static void IrregexpInitialize(Handle<JSRegExp> re, @@ -133,10 +112,11 @@ // On a successful match, the result is a JSArray containing // captured positions. On a failure, the result is the null value. // Returns an empty handle in case of an exception. - static Handle<Object> IrregexpExec(Handle<JSRegExp> regexp, - Handle<String> subject, - int index, - Handle<JSArray> lastMatchInfo); + MUST_USE_RESULT static MaybeHandle<Object> IrregexpExec( + Handle<JSRegExp> regexp, + Handle<String> subject, + int index, + Handle<JSArray> lastMatchInfo); // Set last match info. If match is NULL, then setting captures is omitted. static Handle<JSArray> SetLastMatchInfo(Handle<JSArray> last_match_info, @@ -259,7 +239,7 @@ public: CharacterRange() : from_(0), to_(0) { } // For compatibility with the CHECK_OK macro - CharacterRange(void* null) { ASSERT_EQ(NULL, null); } //NOLINT + CharacterRange(void* null) { DCHECK_EQ(NULL, null); } //NOLINT CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { } static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges, Zone* zone); @@ -268,7 +248,7 @@ return CharacterRange(value, value); } static inline CharacterRange Range(uc16 from, uc16 to) { - ASSERT(from <= to); + DCHECK(from <= to); return CharacterRange(from, to); } static inline CharacterRange Everything() { @@ -316,7 +296,7 @@ public: OutSet() : first_(0), remaining_(NULL), successors_(NULL) { } OutSet* Extend(unsigned value, Zone* zone); - bool Get(unsigned value); + bool Get(unsigned value) const; static const unsigned kFirstLimit = 32; private: @@ -445,12 +425,12 @@ RegExpTree* tree() const { return tree_; } RegExpAtom* atom() const { - ASSERT(text_type() == ATOM); + DCHECK(text_type() == ATOM); return reinterpret_cast<RegExpAtom*>(tree()); } RegExpCharacterClass* char_class() const { - ASSERT(text_type() == CHAR_CLASS); + DCHECK(text_type() == CHAR_CLASS); return reinterpret_cast<RegExpCharacterClass*>(tree()); } @@ -561,8 +541,8 @@ int characters() { return characters_; } void set_characters(int characters) { characters_ = characters; } Position* positions(int index) { - ASSERT(index >= 0); - ASSERT(index < characters_); + DCHECK(index >= 0); + DCHECK(index < characters_); return positions_ + index; } uint32_t mask() { return mask_; } @@ -648,7 +628,7 @@ virtual RegExpNode* FilterASCII(int depth, bool ignore_case) { return this; } // Helper for FilterASCII. RegExpNode* replacement() { - ASSERT(info()->replacement_calculated); + DCHECK(info()->replacement_calculated); return replacement_; } RegExpNode* set_replacement(RegExpNode* replacement) { @@ -1465,7 +1445,7 @@ // These set methods and AdvanceCurrentPositionInTrace should be used only on // new traces - the intention is that traces are immutable after creation. void add_action(DeferredAction* new_action) { - ASSERT(new_action->next_ == NULL); + DCHECK(new_action->next_ == NULL); new_action->next_ = actions_; actions_ = new_action; } @@ -1485,14 +1465,14 @@ int FindAffectedRegisters(OutSet* affected_registers, Zone* zone); void PerformDeferredActions(RegExpMacroAssembler* macro, int max_register, - OutSet& affected_registers, + const OutSet& affected_registers, OutSet* registers_to_pop, OutSet* registers_to_clear, Zone* zone); void RestoreAffectedRegisters(RegExpMacroAssembler* macro, int max_register, - OutSet& registers_to_pop, - OutSet& registers_to_clear); + const OutSet& registers_to_pop, + const OutSet& registers_to_clear); int cp_offset_; DeferredAction* actions_; Label* backtrack_; @@ -1580,7 +1560,7 @@ bool has_failed() { return error_message_ != NULL; } const char* error_message() { - ASSERT(error_message_ != NULL); + DCHECK(error_message_ != NULL); return error_message_; } void fail(const char* error_message) { diff -Nru nodejs-0.11.13/deps/v8/src/jsregexp-inl.h nodejs-0.11.15/deps/v8/src/jsregexp-inl.h --- nodejs-0.11.13/deps/v8/src/jsregexp-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/jsregexp-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,16 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_JSREGEXP_INL_H_ #define V8_JSREGEXP_INL_H_ -#include "allocation.h" -#include "handles.h" -#include "heap.h" -#include "jsregexp.h" -#include "objects.h" +#include "src/allocation.h" +#include "src/handles.h" +#include "src/heap/heap.h" +#include "src/jsregexp.h" +#include "src/objects.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/lazy-instance.h nodejs-0.11.15/deps/v8/src/lazy-instance.h --- nodejs-0.11.13/deps/v8/src/lazy-instance.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lazy-instance.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,260 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// The LazyInstance<Type, Traits> class manages a single instance of Type, -// which will be lazily created on the first time it's accessed. This class is -// useful for places you would normally use a function-level static, but you -// need to have guaranteed thread-safety. The Type constructor will only ever -// be called once, even if two threads are racing to create the object. Get() -// and Pointer() will always return the same, completely initialized instance. -// -// LazyInstance is completely thread safe, assuming that you create it safely. -// The class was designed to be POD initialized, so it shouldn't require a -// static constructor. It really only makes sense to declare a LazyInstance as -// a global variable using the LAZY_INSTANCE_INITIALIZER initializer. -// -// LazyInstance is similar to Singleton, except it does not have the singleton -// property. You can have multiple LazyInstance's of the same type, and each -// will manage a unique instance. It also preallocates the space for Type, as -// to avoid allocating the Type instance on the heap. This may help with the -// performance of creating the instance, and reducing heap fragmentation. This -// requires that Type be a complete type so we can determine the size. See -// notes for advanced users below for more explanations. -// -// Example usage: -// static LazyInstance<MyClass>::type my_instance = LAZY_INSTANCE_INITIALIZER; -// void SomeMethod() { -// my_instance.Get().SomeMethod(); // MyClass::SomeMethod() -// -// MyClass* ptr = my_instance.Pointer(); -// ptr->DoDoDo(); // MyClass::DoDoDo -// } -// -// Additionally you can override the way your instance is constructed by -// providing your own trait: -// Example usage: -// struct MyCreateTrait { -// static void Construct(MyClass* allocated_ptr) { -// new (allocated_ptr) MyClass(/* extra parameters... */); -// } -// }; -// static LazyInstance<MyClass, MyCreateTrait>::type my_instance = -// LAZY_INSTANCE_INITIALIZER; -// -// WARNINGS: -// - This implementation of LazyInstance is NOT THREAD-SAFE by default. See -// ThreadSafeInitOnceTrait declared below for that. -// - Lazy initialization comes with a cost. Make sure that you don't use it on -// critical path. Consider adding your initialization code to a function -// which is explicitly called once. -// -// Notes for advanced users: -// LazyInstance can actually be used in two different ways: -// -// - "Static mode" which is the default mode since it is the most efficient -// (no extra heap allocation). In this mode, the instance is statically -// allocated (stored in the global data section at compile time). -// The macro LAZY_STATIC_INSTANCE_INITIALIZER (= LAZY_INSTANCE_INITIALIZER) -// must be used to initialize static lazy instances. -// -// - "Dynamic mode". In this mode, the instance is dynamically allocated and -// constructed (using new) by default. This mode is useful if you have to -// deal with some code already allocating the instance for you (e.g. -// OS::Mutex() which returns a new private OS-dependent subclass of Mutex). -// The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize -// dynamic lazy instances. - -#ifndef V8_LAZY_INSTANCE_H_ -#define V8_LAZY_INSTANCE_H_ - -#include "checks.h" -#include "once.h" - -namespace v8 { -namespace internal { - -#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, { {} } } -#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 } - -// Default to static mode. -#define LAZY_INSTANCE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER - - -template <typename T> -struct LeakyInstanceTrait { - static void Destroy(T* /* instance */) {} -}; - - -// Traits that define how an instance is allocated and accessed. - - -template <typename T> -struct StaticallyAllocatedInstanceTrait { - // 16-byte alignment fallback to be on the safe side here. - struct V8_ALIGNAS(T, 16) StorageType { - char x[sizeof(T)]; - }; - - STATIC_ASSERT(V8_ALIGNOF(StorageType) >= V8_ALIGNOF(T)); - - static T* MutableInstance(StorageType* storage) { - return reinterpret_cast<T*>(storage); - } - - template <typename ConstructTrait> - static void InitStorageUsingTrait(StorageType* storage) { - ConstructTrait::Construct(MutableInstance(storage)); - } -}; - - -template <typename T> -struct DynamicallyAllocatedInstanceTrait { - typedef T* StorageType; - - static T* MutableInstance(StorageType* storage) { - return *storage; - } - - template <typename CreateTrait> - static void InitStorageUsingTrait(StorageType* storage) { - *storage = CreateTrait::Create(); - } -}; - - -template <typename T> -struct DefaultConstructTrait { - // Constructs the provided object which was already allocated. - static void Construct(T* allocated_ptr) { - new(allocated_ptr) T(); - } -}; - - -template <typename T> -struct DefaultCreateTrait { - static T* Create() { - return new T(); - } -}; - - -struct ThreadSafeInitOnceTrait { - template <typename Function, typename Storage> - static void Init(OnceType* once, Function function, Storage storage) { - CallOnce(once, function, storage); - } -}; - - -// Initialization trait for users who don't care about thread-safety. -struct SingleThreadInitOnceTrait { - template <typename Function, typename Storage> - static void Init(OnceType* once, Function function, Storage storage) { - if (*once == ONCE_STATE_UNINITIALIZED) { - function(storage); - *once = ONCE_STATE_DONE; - } - } -}; - - -// TODO(pliard): Handle instances destruction (using global destructors). -template <typename T, typename AllocationTrait, typename CreateTrait, - typename InitOnceTrait, typename DestroyTrait /* not used yet. */> -struct LazyInstanceImpl { - public: - typedef typename AllocationTrait::StorageType StorageType; - - private: - static void InitInstance(StorageType* storage) { - AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage); - } - - void Init() const { - InitOnceTrait::Init( - &once_, - // Casts to void* are needed here to avoid breaking strict aliasing - // rules. - reinterpret_cast<void(*)(void*)>(&InitInstance), // NOLINT - reinterpret_cast<void*>(&storage_)); - } - - public: - T* Pointer() { - Init(); - return AllocationTrait::MutableInstance(&storage_); - } - - const T& Get() const { - Init(); - return *AllocationTrait::MutableInstance(&storage_); - } - - mutable OnceType once_; - // Note that the previous field, OnceType, is an AtomicWord which guarantees - // 4-byte alignment of the storage field below. If compiling with GCC (>4.2), - // the LAZY_ALIGN macro above will guarantee correctness for any alignment. - mutable StorageType storage_; -}; - - -template <typename T, - typename CreateTrait = DefaultConstructTrait<T>, - typename InitOnceTrait = SingleThreadInitOnceTrait, - typename DestroyTrait = LeakyInstanceTrait<T> > -struct LazyStaticInstance { - typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>, - CreateTrait, InitOnceTrait, DestroyTrait> type; -}; - - -template <typename T, - typename CreateTrait = DefaultConstructTrait<T>, - typename InitOnceTrait = SingleThreadInitOnceTrait, - typename DestroyTrait = LeakyInstanceTrait<T> > -struct LazyInstance { - // A LazyInstance is a LazyStaticInstance. - typedef typename LazyStaticInstance<T, CreateTrait, InitOnceTrait, - DestroyTrait>::type type; -}; - - -template <typename T, - typename CreateTrait = DefaultCreateTrait<T>, - typename InitOnceTrait = SingleThreadInitOnceTrait, - typename DestroyTrait = LeakyInstanceTrait<T> > -struct LazyDynamicInstance { - typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>, - CreateTrait, InitOnceTrait, DestroyTrait> type; -}; - -} } // namespace v8::internal - -#endif // V8_LAZY_INSTANCE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/libplatform/default-platform.cc nodejs-0.11.15/deps/v8/src/libplatform/default-platform.cc --- nodejs-0.11.13/deps/v8/src/libplatform/default-platform.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/libplatform/default-platform.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,34 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "default-platform.h" +#include "src/libplatform/default-platform.h" +#include <algorithm> #include <queue> -// TODO(jochen): We should have our own version of checks.h. -#include "../checks.h" -// TODO(jochen): Why is cpu.h not in platform/? -#include "../cpu.h" -#include "worker-thread.h" +#include "src/base/logging.h" +#include "src/base/platform/platform.h" +#include "src/libplatform/worker-thread.h" namespace v8 { -namespace internal { +namespace platform { + + +v8::Platform* CreateDefaultPlatform(int thread_pool_size) { + DefaultPlatform* platform = new DefaultPlatform(); + platform->SetThreadPoolSize(thread_pool_size); + platform->EnsureInitialized(); + return platform; +} + + +bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate) { + return reinterpret_cast<DefaultPlatform*>(platform)->PumpMessageLoop(isolate); +} + + +const int DefaultPlatform::kMaxThreadPoolSize = 4; DefaultPlatform::DefaultPlatform() @@ -44,7 +36,7 @@ DefaultPlatform::~DefaultPlatform() { - LockGuard<Mutex> guard(&lock_); + base::LockGuard<base::Mutex> guard(&lock_); queue_.Terminate(); if (initialized_) { for (std::vector<WorkerThread*>::iterator i = thread_pool_.begin(); @@ -52,20 +44,29 @@ delete *i; } } + for (std::map<v8::Isolate*, std::queue<Task*> >::iterator i = + main_thread_queue_.begin(); + i != main_thread_queue_.end(); ++i) { + while (!i->second.empty()) { + delete i->second.front(); + i->second.pop(); + } + } } void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) { - LockGuard<Mutex> guard(&lock_); - ASSERT(thread_pool_size >= 0); + base::LockGuard<base::Mutex> guard(&lock_); + DCHECK(thread_pool_size >= 0); if (thread_pool_size < 1) - thread_pool_size = CPU::NumberOfProcessorsOnline(); - thread_pool_size_ = Max(Min(thread_pool_size, kMaxThreadPoolSize), 1); + thread_pool_size = base::OS::NumberOfProcessorsOnline(); + thread_pool_size_ = + std::max(std::min(thread_pool_size, kMaxThreadPoolSize), 1); } void DefaultPlatform::EnsureInitialized() { - LockGuard<Mutex> guard(&lock_); + base::LockGuard<base::Mutex> guard(&lock_); if (initialized_) return; initialized_ = true; @@ -73,6 +74,24 @@ thread_pool_.push_back(new WorkerThread(&queue_)); } + +bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate) { + Task* task = NULL; + { + base::LockGuard<base::Mutex> guard(&lock_); + std::map<v8::Isolate*, std::queue<Task*> >::iterator it = + main_thread_queue_.find(isolate); + if (it == main_thread_queue_.end() || it->second.empty()) { + return false; + } + task = it->second.front(); + it->second.pop(); + } + task->Run(); + delete task; + return true; +} + void DefaultPlatform::CallOnBackgroundThread(Task *task, ExpectedRuntime expected_runtime) { EnsureInitialized(); @@ -81,9 +100,8 @@ void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) { - // TODO(jochen): implement. - task->Run(); - delete task; + base::LockGuard<base::Mutex> guard(&lock_); + main_thread_queue_[isolate].push(task); } -} } // namespace v8::internal +} } // namespace v8::platform diff -Nru nodejs-0.11.13/deps/v8/src/libplatform/default-platform.h nodejs-0.11.15/deps/v8/src/libplatform/default-platform.h --- nodejs-0.11.13/deps/v8/src/libplatform/default-platform.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/libplatform/default-platform.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,21 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LIBPLATFORM_DEFAULT_PLATFORM_H_ #define V8_LIBPLATFORM_DEFAULT_PLATFORM_H_ +#include <map> +#include <queue> #include <vector> -#include "../../include/v8-platform.h" -// TODO(jochen): We should have our own version of globals.h. -#include "../globals.h" -#include "../platform/mutex.h" -#include "task-queue.h" +#include "include/v8-platform.h" +#include "src/base/macros.h" +#include "src/base/platform/mutex.h" +#include "src/libplatform/task-queue.h" namespace v8 { -namespace internal { +namespace platform { class TaskQueue; class Thread; @@ -52,26 +30,29 @@ void EnsureInitialized(); + bool PumpMessageLoop(v8::Isolate* isolate); + // v8::Platform implementation. virtual void CallOnBackgroundThread( - Task *task, ExpectedRuntime expected_runtime) V8_OVERRIDE; - virtual void CallOnForegroundThread(v8::Isolate *isolate, - Task *task) V8_OVERRIDE; + Task* task, ExpectedRuntime expected_runtime) V8_OVERRIDE; + virtual void CallOnForegroundThread(v8::Isolate* isolate, + Task* task) V8_OVERRIDE; private: - static const int kMaxThreadPoolSize = 4; + static const int kMaxThreadPoolSize; - Mutex lock_; + base::Mutex lock_; bool initialized_; int thread_pool_size_; std::vector<WorkerThread*> thread_pool_; TaskQueue queue_; + std::map<v8::Isolate*, std::queue<Task*> > main_thread_queue_; DISALLOW_COPY_AND_ASSIGN(DefaultPlatform); }; -} } // namespace v8::internal +} } // namespace v8::platform #endif // V8_LIBPLATFORM_DEFAULT_PLATFORM_H_ diff -Nru nodejs-0.11.13/deps/v8/src/libplatform/DEPS nodejs-0.11.15/deps/v8/src/libplatform/DEPS --- nodejs-0.11.13/deps/v8/src/libplatform/DEPS 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/libplatform/DEPS 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +include_rules = [ + "-include", + "+include/libplatform", + "+include/v8-platform.h", + "-src", + "+src/base", + "+src/libplatform", +] diff -Nru nodejs-0.11.13/deps/v8/src/libplatform/task-queue.cc nodejs-0.11.15/deps/v8/src/libplatform/task-queue.cc --- nodejs-0.11.13/deps/v8/src/libplatform/task-queue.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/libplatform/task-queue.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,51 +1,27 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "task-queue.h" +#include "src/libplatform/task-queue.h" -// TODO(jochen): We should have our own version of checks.h. -#include "../checks.h" +#include "src/base/logging.h" namespace v8 { -namespace internal { +namespace platform { TaskQueue::TaskQueue() : process_queue_semaphore_(0), terminated_(false) {} TaskQueue::~TaskQueue() { - LockGuard<Mutex> guard(&lock_); - ASSERT(terminated_); - ASSERT(task_queue_.empty()); + base::LockGuard<base::Mutex> guard(&lock_); + DCHECK(terminated_); + DCHECK(task_queue_.empty()); } void TaskQueue::Append(Task* task) { - LockGuard<Mutex> guard(&lock_); - ASSERT(!terminated_); + base::LockGuard<base::Mutex> guard(&lock_); + DCHECK(!terminated_); task_queue_.push(task); process_queue_semaphore_.Signal(); } @@ -54,7 +30,7 @@ Task* TaskQueue::GetNext() { for (;;) { { - LockGuard<Mutex> guard(&lock_); + base::LockGuard<base::Mutex> guard(&lock_); if (!task_queue_.empty()) { Task* result = task_queue_.front(); task_queue_.pop(); @@ -71,10 +47,10 @@ void TaskQueue::Terminate() { - LockGuard<Mutex> guard(&lock_); - ASSERT(!terminated_); + base::LockGuard<base::Mutex> guard(&lock_); + DCHECK(!terminated_); terminated_ = true; process_queue_semaphore_.Signal(); } -} } // namespace v8::internal +} } // namespace v8::platform diff -Nru nodejs-0.11.13/deps/v8/src/libplatform/task-queue.h nodejs-0.11.15/deps/v8/src/libplatform/task-queue.h --- nodejs-0.11.13/deps/v8/src/libplatform/task-queue.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/libplatform/task-queue.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,45 +1,21 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LIBPLATFORM_TASK_QUEUE_H_ #define V8_LIBPLATFORM_TASK_QUEUE_H_ #include <queue> -// TODO(jochen): We should have our own version of globals.h. -#include "../globals.h" -#include "../platform/mutex.h" -#include "../platform/semaphore.h" +#include "src/base/macros.h" +#include "src/base/platform/mutex.h" +#include "src/base/platform/semaphore.h" namespace v8 { class Task; -namespace internal { +namespace platform { class TaskQueue { public: @@ -57,15 +33,15 @@ void Terminate(); private: - Mutex lock_; - Semaphore process_queue_semaphore_; + base::Mutex lock_; + base::Semaphore process_queue_semaphore_; std::queue<Task*> task_queue_; bool terminated_; DISALLOW_COPY_AND_ASSIGN(TaskQueue); }; -} } // namespace v8::internal +} } // namespace v8::platform #endif // V8_LIBPLATFORM_TASK_QUEUE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/libplatform/worker-thread.cc nodejs-0.11.15/deps/v8/src/libplatform/worker-thread.cc --- nodejs-0.11.13/deps/v8/src/libplatform/worker-thread.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/libplatform/worker-thread.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,17 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "worker-thread.h" - -// TODO(jochen): We should have our own version of checks.h. -#include "../checks.h" -#include "../../include/v8-platform.h" -#include "task-queue.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/libplatform/worker-thread.h" + +#include "include/v8-platform.h" +#include "src/libplatform/task-queue.h" namespace v8 { -namespace internal { +namespace platform { WorkerThread::WorkerThread(TaskQueue* queue) - : Thread("V8 WorkerThread"), queue_(queue) { + : Thread(Options("V8 WorkerThread")), queue_(queue) { Start(); } @@ -53,4 +28,4 @@ } } -} } // namespace v8::internal +} } // namespace v8::platform diff -Nru nodejs-0.11.13/deps/v8/src/libplatform/worker-thread.h nodejs-0.11.15/deps/v8/src/libplatform/worker-thread.h --- nodejs-0.11.13/deps/v8/src/libplatform/worker-thread.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/libplatform/worker-thread.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,22 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LIBPLATFORM_WORKER_THREAD_H_ #define V8_LIBPLATFORM_WORKER_THREAD_H_ #include <queue> -// TODO(jochen): We should have our own version of globals.h. -#include "../globals.h" -#include "../platform.h" +#include "src/base/macros.h" +#include "src/base/platform/platform.h" namespace v8 { -namespace internal { +namespace platform { class TaskQueue; -class WorkerThread : public Thread { +class WorkerThread : public base::Thread { public: explicit WorkerThread(TaskQueue* queue); virtual ~WorkerThread(); @@ -56,7 +32,7 @@ DISALLOW_COPY_AND_ASSIGN(WorkerThread); }; -} } // namespace v8::internal +} } // namespace v8::platform #endif // V8_LIBPLATFORM_WORKER_THREAD_H_ diff -Nru nodejs-0.11.13/deps/v8/src/list.h nodejs-0.11.15/deps/v8/src/list.h --- nodejs-0.11.13/deps/v8/src/list.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/list.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,17 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LIST_H_ #define V8_LIST_H_ -#include "utils.h" +#include "src/checks.h" +#include "src/utils.h" namespace v8 { namespace internal { +template<typename T> class Vector; // ---------------------------------------------------------------------------- // The list is a template for very light-weight lists. We are not @@ -83,8 +62,8 @@ // not safe to use after operations that can change the list's // backing store (e.g. Add). inline T& operator[](int i) const { - ASSERT(0 <= i); - SLOW_ASSERT(i < length_); + DCHECK(0 <= i); + SLOW_DCHECK(i < length_); return data_[i]; } inline T& at(int i) const { return operator[](i); } diff -Nru nodejs-0.11.13/deps/v8/src/list-inl.h nodejs-0.11.15/deps/v8/src/list-inl.h --- nodejs-0.11.13/deps/v8/src/list-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/list-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,13 @@ // Copyright 2006-2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LIST_INL_H_ #define V8_LIST_INL_H_ -#include "list.h" -#include "platform.h" +#include "src/list.h" + +#include "src/base/platform/platform.h" namespace v8 { namespace internal { @@ -72,7 +50,7 @@ template<typename T, class P> void List<T, P>::ResizeAddInternal(const T& element, P alloc) { - ASSERT(length_ >= capacity_); + DCHECK(length_ >= capacity_); // Grow the list capacity by 100%, but make sure to let it grow // even when the capacity is zero (possible initial case). int new_capacity = 1 + 2 * capacity_; @@ -86,9 +64,9 @@ template<typename T, class P> void List<T, P>::Resize(int new_capacity, P alloc) { - ASSERT_LE(length_, new_capacity); + DCHECK_LE(length_, new_capacity); T* new_data = NewData(new_capacity, alloc); - OS::MemCopy(new_data, data_, length_ * sizeof(T)); + MemCopy(new_data, data_, length_ * sizeof(T)); List<T, P>::DeleteData(data_); data_ = new_data; capacity_ = new_capacity; @@ -105,14 +83,14 @@ template<typename T, class P> void List<T, P>::Set(int index, const T& elm) { - ASSERT(index >= 0 && index <= length_); + DCHECK(index >= 0 && index <= length_); data_[index] = elm; } template<typename T, class P> void List<T, P>::InsertAt(int index, const T& elm, P alloc) { - ASSERT(index >= 0 && index <= length_); + DCHECK(index >= 0 && index <= length_); Add(elm, alloc); for (int i = length_ - 1; i > index; --i) { data_[i] = data_[i - 1]; @@ -166,7 +144,7 @@ template<typename T, class P> void List<T, P>::Rewind(int pos) { - ASSERT(0 <= pos && pos <= length_); + DCHECK(0 <= pos && pos <= length_); length_ = pos; } @@ -217,7 +195,7 @@ ToVector().Sort(cmp); #ifdef DEBUG for (int i = 1; i < length_; i++) - ASSERT(cmp(&data_[i - 1], &data_[i]) <= 0); + DCHECK(cmp(&data_[i - 1], &data_[i]) <= 0); #endif } @@ -230,7 +208,7 @@ template<typename T, class P> void List<T, P>::Initialize(int capacity, P allocator) { - ASSERT(capacity >= 0); + DCHECK(capacity >= 0); data_ = (capacity > 0) ? NewData(capacity, allocator) : NULL; capacity_ = capacity; length_ = 0; diff -Nru nodejs-0.11.13/deps/v8/src/lithium-allocator.cc nodejs-0.11.15/deps/v8/src/lithium-allocator.cc --- nodejs-0.11.13/deps/v8/src/lithium-allocator.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lithium-allocator.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,49 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" -#include "lithium-allocator-inl.h" - -#include "hydrogen.h" -#include "string-stream.h" - -#if V8_TARGET_ARCH_IA32 -#include "ia32/lithium-ia32.h" -#elif V8_TARGET_ARCH_X64 -#include "x64/lithium-x64.h" -#elif V8_TARGET_ARCH_ARM64 -#include "arm64/lithium-arm64.h" -#elif V8_TARGET_ARCH_ARM -#include "arm/lithium-arm.h" -#elif V8_TARGET_ARCH_MIPS -#include "mips/lithium-mips.h" -#else -#error "Unknown architecture." -#endif +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/hydrogen.h" +#include "src/lithium-inl.h" +#include "src/lithium-allocator-inl.h" +#include "src/string-stream.h" namespace v8 { namespace internal { @@ -69,10 +33,11 @@ register_beneficial_(true) { if (operand_ != NULL && operand_->IsUnallocated()) { LUnallocated* unalloc = LUnallocated::cast(operand_); - requires_reg_ = unalloc->HasRegisterPolicy(); + requires_reg_ = unalloc->HasRegisterPolicy() || + unalloc->HasDoubleRegisterPolicy(); register_beneficial_ = !unalloc->HasAnyPolicy(); } - ASSERT(pos_.IsValid()); + DCHECK(pos_.IsValid()); } @@ -92,7 +57,7 @@ void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) { - ASSERT(Contains(pos) && pos.Value() != start().Value()); + DCHECK(Contains(pos) && pos.Value() != start().Value()); UseInterval* after = new(zone) UseInterval(pos, end_); after->next_ = next_; next_ = after; @@ -106,7 +71,7 @@ void LiveRange::Verify() const { UsePosition* cur = first_pos_; while (cur != NULL) { - ASSERT(Start().Value() <= cur->pos().Value() && + DCHECK(Start().Value() <= cur->pos().Value() && cur->pos().Value() <= End().Value()); cur = cur->next(); } @@ -148,15 +113,15 @@ void LiveRange::set_assigned_register(int reg, Zone* zone) { - ASSERT(!HasRegisterAssigned() && !IsSpilled()); + DCHECK(!HasRegisterAssigned() && !IsSpilled()); assigned_register_ = reg; ConvertOperands(zone); } void LiveRange::MakeSpilled(Zone* zone) { - ASSERT(!IsSpilled()); - ASSERT(TopLevel()->HasAllocatedSpillOperand()); + DCHECK(!IsSpilled()); + DCHECK(TopLevel()->HasAllocatedSpillOperand()); spilled_ = true; assigned_register_ = kInvalidAssignment; ConvertOperands(zone); @@ -164,15 +129,15 @@ bool LiveRange::HasAllocatedSpillOperand() const { - ASSERT(spill_operand_ != NULL); + DCHECK(spill_operand_ != NULL); return !spill_operand_->IsIgnored(); } void LiveRange::SetSpillOperand(LOperand* operand) { - ASSERT(!operand->IsUnallocated()); - ASSERT(spill_operand_ != NULL); - ASSERT(spill_operand_->IsIgnored()); + DCHECK(!operand->IsUnallocated()); + DCHECK(spill_operand_ != NULL); + DCHECK(spill_operand_->IsIgnored()); spill_operand_->ConvertTo(operand->kind(), operand->index()); } @@ -232,7 +197,7 @@ LOperand* LiveRange::CreateAssignedOperand(Zone* zone) { LOperand* op = NULL; if (HasRegisterAssigned()) { - ASSERT(!IsSpilled()); + DCHECK(!IsSpilled()); switch (Kind()) { case GENERAL_REGISTERS: op = LRegister::Create(assigned_register(), zone); @@ -244,9 +209,9 @@ UNREACHABLE(); } } else if (IsSpilled()) { - ASSERT(!HasRegisterAssigned()); + DCHECK(!HasRegisterAssigned()); op = TopLevel()->GetSpillOperand(); - ASSERT(!op->IsUnallocated()); + DCHECK(!op->IsUnallocated()); } else { LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE); unalloc->set_virtual_register(id_); @@ -283,8 +248,8 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result, Zone* zone) { - ASSERT(Start().Value() < position.Value()); - ASSERT(result->IsEmpty()); + DCHECK(Start().Value() < position.Value()); + DCHECK(result->IsEmpty()); // Find the last interval that ends before the position. If the // position is contained in one of the intervals in the chain, we // split that interval and use the first part. @@ -388,9 +353,9 @@ void LiveRange::ShortenTo(LifetimePosition start) { LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value()); - ASSERT(first_interval_ != NULL); - ASSERT(first_interval_->start().Value() <= start.Value()); - ASSERT(start.Value() < first_interval_->end().Value()); + DCHECK(first_interval_ != NULL); + DCHECK(first_interval_->start().Value() <= start.Value()); + DCHECK(start.Value() < first_interval_->end().Value()); first_interval_->set_start(start); } @@ -442,7 +407,7 @@ // Order of instruction's processing (see ProcessInstructions) guarantees // that each new use interval either precedes or intersects with // last added interval. - ASSERT(start.Value() < first_interval_->end().Value()); + DCHECK(start.Value() < first_interval_->end().Value()); first_interval_->start_ = Min(start, first_interval_->start_); first_interval_->end_ = Max(end, first_interval_->end_); } @@ -485,11 +450,11 @@ LOperand* op = CreateAssignedOperand(zone); UsePosition* use_pos = first_pos(); while (use_pos != NULL) { - ASSERT(Start().Value() <= use_pos->pos().Value() && + DCHECK(Start().Value() <= use_pos->pos().Value() && use_pos->pos().Value() <= End().Value()); if (use_pos->HasOperand()) { - ASSERT(op->IsRegister() || op->IsDoubleRegister() || + DCHECK(op->IsRegister() || op->IsDoubleRegister() || !use_pos->RequiresRegister()); use_pos->operand()->ConvertTo(op->kind(), op->index()); } @@ -511,7 +476,7 @@ for (UseInterval* interval = start_search; interval != NULL; interval = interval->next()) { - ASSERT(interval->next() == NULL || + DCHECK(interval->next() == NULL || interval->next()->start().Value() >= interval->start().Value()); AdvanceLastProcessedMarker(interval, position); if (interval->Contains(position)) return true; @@ -629,7 +594,7 @@ int pos, bool is_tagged) { TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register()); - ASSERT(operand->HasFixedPolicy()); + DCHECK(operand->HasFixedPolicy()); if (operand->HasFixedSlotPolicy()) { operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index()); } else if (operand->HasFixedRegisterPolicy()) { @@ -653,11 +618,11 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) { - ASSERT(index < Register::kMaxNumAllocatableRegisters); + DCHECK(index < Register::kMaxNumAllocatableRegisters); LiveRange* result = fixed_live_ranges_[index]; if (result == NULL) { result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone()); - ASSERT(result->IsFixed()); + DCHECK(result->IsFixed()); result->kind_ = GENERAL_REGISTERS; SetLiveRangeAssignedRegister(result, index); fixed_live_ranges_[index] = result; @@ -667,12 +632,12 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) { - ASSERT(index < DoubleRegister::NumAllocatableRegisters()); + DCHECK(index < DoubleRegister::NumAllocatableRegisters()); LiveRange* result = fixed_double_live_ranges_[index]; if (result == NULL) { result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index), chunk()->zone()); - ASSERT(result->IsFixed()); + DCHECK(result->IsFixed()); result->kind_ = DOUBLE_REGISTERS; SetLiveRangeAssignedRegister(result, index); fixed_double_live_ranges_[index] = result; @@ -862,7 +827,7 @@ } else if (cur_input->HasWritableRegisterPolicy()) { // The live range of writable input registers always goes until the end // of the instruction. - ASSERT(!cur_input->IsUsedAtStart()); + DCHECK(!cur_input->IsUsedAtStart()); LUnallocated* input_copy = cur_input->CopyUnconstrained( chunk()->zone()); @@ -962,7 +927,7 @@ } } } else { - ASSERT(!IsGapAt(index)); + DCHECK(!IsGapAt(index)); LInstruction* instr = InstructionAt(index); if (instr != NULL) { @@ -986,7 +951,7 @@ } } - if (instr->ClobbersDoubleRegisters()) { + if (instr->ClobbersDoubleRegisters(isolate())) { for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { if (output == NULL || !output->IsDoubleRegister() || output->index() != i) { @@ -1028,6 +993,15 @@ } Use(block_start_position, curr_position.InstructionEnd(), temp, NULL); Define(curr_position, temp, NULL); + + if (temp->IsUnallocated()) { + LUnallocated* temp_unalloc = LUnallocated::cast(temp); + if (temp_unalloc->HasDoubleRegisterPolicy()) { + double_artificial_registers_.Add( + temp_unalloc->virtual_register() - first_artificial_register_, + zone()); + } + } } } } @@ -1051,7 +1025,7 @@ HConstant* constant = HConstant::cast(op); operand = chunk_->DefineConstantOperand(constant); } else { - ASSERT(!op->EmitAtUses()); + DCHECK(!op->EmitAtUses()); LUnallocated* unalloc = new(chunk()->zone()) LUnallocated(LUnallocated::ANY); unalloc->set_virtual_register(op->id()); @@ -1093,7 +1067,7 @@ bool LAllocator::Allocate(LChunk* chunk) { - ASSERT(chunk_ == NULL); + DCHECK(chunk_ == NULL); chunk_ = static_cast<LPlatformChunk*>(chunk); assigned_registers_ = new(chunk->zone()) BitVector(Register::NumAllocatableRegisters(), @@ -1118,7 +1092,6 @@ void LAllocator::MeetRegisterConstraints() { LAllocatorPhase phase("L_Register constraints", this); - first_artificial_register_ = next_virtual_register_; const ZoneList<HBasicBlock*>* blocks = graph_->blocks(); for (int i = 0; i < blocks->length(); ++i) { HBasicBlock* block = blocks->at(i); @@ -1152,18 +1125,18 @@ LiveRange* cur_range = range; while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) { if (cur_range->CanCover(cur_start)) { - ASSERT(cur_cover == NULL); + DCHECK(cur_cover == NULL); cur_cover = cur_range; } if (cur_range->CanCover(pred_end)) { - ASSERT(pred_cover == NULL); + DCHECK(pred_cover == NULL); pred_cover = cur_range; } cur_range = cur_range->next(); } if (cur_cover->IsSpilled()) return; - ASSERT(pred_cover != NULL && cur_cover != NULL); + DCHECK(pred_cover != NULL && cur_cover != NULL); if (pred_cover != cur_cover) { LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone()); LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone()); @@ -1172,7 +1145,7 @@ if (block->predecessors()->length() == 1) { gap = GapAt(block->first_instruction_index()); } else { - ASSERT(pred->end()->SecondSuccessor() == NULL); + DCHECK(pred->end()->SecondSuccessor() == NULL); gap = GetLastGap(pred); // We are going to insert a move before the branch instruction. @@ -1321,7 +1294,7 @@ break; } } - ASSERT(hint != NULL); + DCHECK(hint != NULL); LifetimePosition block_start = LifetimePosition::FromInstructionIndex( block->first_instruction_index()); @@ -1368,7 +1341,7 @@ CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey(); PrintF("Function: %s\n", CodeStub::MajorName(major_key, false)); } else { - ASSERT(chunk_->info()->IsOptimizing()); + DCHECK(chunk_->info()->IsOptimizing()); AllowHandleDereference allow_deref; PrintF("Function: %s\n", chunk_->info()->function()->debug_name()->ToCString().get()); @@ -1378,7 +1351,7 @@ PrintF("First use is at %d\n", range->first_pos()->pos().Value()); iterator.Advance(); } - ASSERT(!found); + DCHECK(!found); } #endif } @@ -1407,7 +1380,7 @@ LAllocatorPhase phase("L_Populate pointer maps", this); const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps(); - ASSERT(SafePointsAreInOrder()); + DCHECK(SafePointsAreInOrder()); // Iterate over all safe point positions and record a pointer // for all spilled live ranges at this point. @@ -1429,7 +1402,7 @@ for (LiveRange* cur = range; cur != NULL; cur = cur->next()) { LifetimePosition this_end = cur->End(); if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex(); - ASSERT(cur->Start().InstructionIndex() >= start); + DCHECK(cur->Start().InstructionIndex() >= start); } // Most of the ranges are in order, but not all. Keep an eye on when @@ -1483,7 +1456,7 @@ "at safe point %d\n", cur->id(), cur->Start().Value(), safe_point); LOperand* operand = cur->CreateAssignedOperand(chunk()->zone()); - ASSERT(!operand->IsStackSlot()); + DCHECK(!operand->IsStackSlot()); map->RecordPointer(operand, chunk()->zone()); } } @@ -1508,7 +1481,7 @@ void LAllocator::AllocateRegisters() { - ASSERT(unhandled_live_ranges_.is_empty()); + DCHECK(unhandled_live_ranges_.is_empty()); for (int i = 0; i < live_ranges_.length(); ++i) { if (live_ranges_[i] != NULL) { @@ -1518,11 +1491,11 @@ } } SortUnhandled(); - ASSERT(UnhandledIsSorted()); + DCHECK(UnhandledIsSorted()); - ASSERT(reusable_slots_.is_empty()); - ASSERT(active_live_ranges_.is_empty()); - ASSERT(inactive_live_ranges_.is_empty()); + DCHECK(reusable_slots_.is_empty()); + DCHECK(active_live_ranges_.is_empty()); + DCHECK(inactive_live_ranges_.is_empty()); if (mode_ == DOUBLE_REGISTERS) { for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { @@ -1532,7 +1505,7 @@ } } } else { - ASSERT(mode_ == GENERAL_REGISTERS); + DCHECK(mode_ == GENERAL_REGISTERS); for (int i = 0; i < fixed_live_ranges_.length(); ++i) { LiveRange* current = fixed_live_ranges_.at(i); if (current != NULL) { @@ -1542,9 +1515,9 @@ } while (!unhandled_live_ranges_.is_empty()) { - ASSERT(UnhandledIsSorted()); + DCHECK(UnhandledIsSorted()); LiveRange* current = unhandled_live_ranges_.RemoveLast(); - ASSERT(UnhandledIsSorted()); + DCHECK(UnhandledIsSorted()); LifetimePosition position = current->Start(); #ifdef DEBUG allocation_finger_ = position; @@ -1571,7 +1544,7 @@ // the register is too close to the start of live range. SpillBetween(current, current->Start(), pos->pos()); if (!AllocationOk()) return; - ASSERT(UnhandledIsSorted()); + DCHECK(UnhandledIsSorted()); continue; } } @@ -1598,7 +1571,7 @@ } } - ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled()); + DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled()); bool result = TryAllocateFreeReg(current); if (!AllocationOk()) return; @@ -1630,7 +1603,7 @@ if (FLAG_trace_alloc) { va_list arguments; va_start(arguments, msg); - OS::VPrint(msg, arguments); + base::OS::VPrint(msg, arguments); va_end(arguments); } } @@ -1672,33 +1645,33 @@ void LAllocator::AddToUnhandledSorted(LiveRange* range) { if (range == NULL || range->IsEmpty()) return; - ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled()); - ASSERT(allocation_finger_.Value() <= range->Start().Value()); + DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); + DCHECK(allocation_finger_.Value() <= range->Start().Value()); for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) { LiveRange* cur_range = unhandled_live_ranges_.at(i); if (range->ShouldBeAllocatedBefore(cur_range)) { TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1); unhandled_live_ranges_.InsertAt(i + 1, range, zone()); - ASSERT(UnhandledIsSorted()); + DCHECK(UnhandledIsSorted()); return; } } TraceAlloc("Add live range %d to unhandled at start\n", range->id()); unhandled_live_ranges_.InsertAt(0, range, zone()); - ASSERT(UnhandledIsSorted()); + DCHECK(UnhandledIsSorted()); } void LAllocator::AddToUnhandledUnsorted(LiveRange* range) { if (range == NULL || range->IsEmpty()) return; - ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled()); + DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id()); unhandled_live_ranges_.Add(range, zone()); } static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) { - ASSERT(!(*a)->ShouldBeAllocatedBefore(*b) || + DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) || !(*b)->ShouldBeAllocatedBefore(*a)); if ((*a)->ShouldBeAllocatedBefore(*b)) return 1; if ((*b)->ShouldBeAllocatedBefore(*a)) return -1; @@ -1752,7 +1725,7 @@ void LAllocator::ActiveToHandled(LiveRange* range) { - ASSERT(active_live_ranges_.Contains(range)); + DCHECK(active_live_ranges_.Contains(range)); active_live_ranges_.RemoveElement(range); TraceAlloc("Moving live range %d from active to handled\n", range->id()); FreeSpillSlot(range); @@ -1760,7 +1733,7 @@ void LAllocator::ActiveToInactive(LiveRange* range) { - ASSERT(active_live_ranges_.Contains(range)); + DCHECK(active_live_ranges_.Contains(range)); active_live_ranges_.RemoveElement(range); inactive_live_ranges_.Add(range, zone()); TraceAlloc("Moving live range %d from active to inactive\n", range->id()); @@ -1768,7 +1741,7 @@ void LAllocator::InactiveToHandled(LiveRange* range) { - ASSERT(inactive_live_ranges_.Contains(range)); + DCHECK(inactive_live_ranges_.Contains(range)); inactive_live_ranges_.RemoveElement(range); TraceAlloc("Moving live range %d from inactive to handled\n", range->id()); FreeSpillSlot(range); @@ -1776,7 +1749,7 @@ void LAllocator::InactiveToActive(LiveRange* range) { - ASSERT(inactive_live_ranges_.Contains(range)); + DCHECK(inactive_live_ranges_.Contains(range)); inactive_live_ranges_.RemoveElement(range); active_live_ranges_.Add(range, zone()); TraceAlloc("Moving live range %d from inactive to active\n", range->id()); @@ -1804,7 +1777,7 @@ for (int i = 0; i < inactive_live_ranges_.length(); ++i) { LiveRange* cur_inactive = inactive_live_ranges_.at(i); - ASSERT(cur_inactive->End().Value() > current->Start().Value()); + DCHECK(cur_inactive->End().Value() > current->Start().Value()); LifetimePosition next_intersection = cur_inactive->FirstIntersection(current); if (!next_intersection.IsValid()) continue; @@ -1858,7 +1831,7 @@ // Register reg is available at the range start and is free until // the range end. - ASSERT(pos.Value() >= current->End().Value()); + DCHECK(pos.Value() >= current->End().Value()); TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg), current->id()); @@ -1904,7 +1877,7 @@ for (int i = 0; i < inactive_live_ranges_.length(); ++i) { LiveRange* range = inactive_live_ranges_.at(i); - ASSERT(range->End().Value() > current->Start().Value()); + DCHECK(range->End().Value() > current->Start().Value()); LifetimePosition next_intersection = range->FirstIntersection(current); if (!next_intersection.IsValid()) continue; int cur_reg = range->assigned_register(); @@ -1943,7 +1916,7 @@ } // Register reg is not blocked for the whole range. - ASSERT(block_pos[reg].Value() >= current->End().Value()); + DCHECK(block_pos[reg].Value() >= current->End().Value()); TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg), current->id()); @@ -1990,7 +1963,7 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) { - ASSERT(current->HasRegisterAssigned()); + DCHECK(current->HasRegisterAssigned()); int reg = current->assigned_register(); LifetimePosition split_pos = current->Start(); for (int i = 0; i < active_live_ranges_.length(); ++i) { @@ -2019,7 +1992,7 @@ for (int i = 0; i < inactive_live_ranges_.length(); ++i) { LiveRange* range = inactive_live_ranges_[i]; - ASSERT(range->End().Value() > current->Start().Value()); + DCHECK(range->End().Value() > current->Start().Value()); if (range->assigned_register() == reg && !range->IsFixed()) { LifetimePosition next_intersection = range->FirstIntersection(current); if (next_intersection.IsValid()) { @@ -2046,14 +2019,14 @@ LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) { - ASSERT(!range->IsFixed()); + DCHECK(!range->IsFixed()); TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value()); if (pos.Value() <= range->Start().Value()) return range; // We can't properly connect liveranges if split occured at the end // of control instruction. - ASSERT(pos.IsInstructionStart() || + DCHECK(pos.IsInstructionStart() || !chunk_->instructions()->at(pos.InstructionIndex())->IsControl()); int vreg = GetVirtualRegister(); @@ -2067,14 +2040,14 @@ LiveRange* LAllocator::SplitBetween(LiveRange* range, LifetimePosition start, LifetimePosition end) { - ASSERT(!range->IsFixed()); + DCHECK(!range->IsFixed()); TraceAlloc("Splitting live range %d in position between [%d, %d]\n", range->id(), start.Value(), end.Value()); LifetimePosition split_pos = FindOptimalSplitPos(start, end); - ASSERT(split_pos.Value() >= start.Value()); + DCHECK(split_pos.Value() >= start.Value()); return SplitRangeAt(range, split_pos); } @@ -2083,7 +2056,7 @@ LifetimePosition end) { int start_instr = start.InstructionIndex(); int end_instr = end.InstructionIndex(); - ASSERT(start_instr <= end_instr); + DCHECK(start_instr <= end_instr); // We have no choice if (start_instr == end_instr) return end; @@ -2145,7 +2118,7 @@ end.PrevInstruction().InstructionEnd()); if (!AllocationOk()) return; - ASSERT(third_part != second_part); + DCHECK(third_part != second_part); Spill(second_part); AddToUnhandledSorted(third_part); @@ -2158,7 +2131,7 @@ void LAllocator::Spill(LiveRange* range) { - ASSERT(!range->IsSpilled()); + DCHECK(!range->IsSpilled()); TraceAlloc("Spilling live range %d\n", range->id()); LiveRange* first = range->TopLevel(); @@ -2204,7 +2177,7 @@ if (FLAG_hydrogen_stats) { unsigned size = allocator_->zone()->allocation_size() - allocator_zone_start_allocation_size_; - isolate()->GetHStatistics()->SaveTiming(name(), TimeDelta(), size); + isolate()->GetHStatistics()->SaveTiming(name(), base::TimeDelta(), size); } if (ShouldProduceTraceOutput()) { diff -Nru nodejs-0.11.13/deps/v8/src/lithium-allocator.h nodejs-0.11.15/deps/v8/src/lithium-allocator.h --- nodejs-0.11.13/deps/v8/src/lithium-allocator.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lithium-allocator.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LITHIUM_ALLOCATOR_H_ #define V8_LITHIUM_ALLOCATOR_H_ -#include "v8.h" +#include "src/v8.h" -#include "allocation.h" -#include "lithium.h" -#include "zone.h" +#include "src/allocation.h" +#include "src/lithium.h" +#include "src/zone.h" namespace v8 { namespace internal { @@ -40,7 +17,6 @@ // Forward declarations. class HBasicBlock; class HGraph; -class HInstruction; class HPhi; class HTracer; class HValue; @@ -75,7 +51,7 @@ // Returns the index of the instruction to which this lifetime position // corresponds. int InstructionIndex() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return value_ / kStep; } @@ -88,28 +64,28 @@ // Returns the lifetime position for the start of the instruction which // corresponds to this lifetime position. LifetimePosition InstructionStart() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return LifetimePosition(value_ & ~(kStep - 1)); } // Returns the lifetime position for the end of the instruction which // corresponds to this lifetime position. LifetimePosition InstructionEnd() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return LifetimePosition(InstructionStart().Value() + kStep/2); } // Returns the lifetime position for the beginning of the next instruction. LifetimePosition NextInstruction() const { - ASSERT(IsValid()); + DCHECK(IsValid()); return LifetimePosition(InstructionStart().Value() + kStep); } // Returns the lifetime position for the beginning of the previous // instruction. LifetimePosition PrevInstruction() const { - ASSERT(IsValid()); - ASSERT(value_ > 1); + DCHECK(IsValid()); + DCHECK(value_ > 1); return LifetimePosition(InstructionStart().Value() - kStep); } @@ -141,70 +117,12 @@ }; -enum RegisterKind { - UNALLOCATED_REGISTERS, - GENERAL_REGISTERS, - DOUBLE_REGISTERS -}; - - -// A register-allocator view of a Lithium instruction. It contains the id of -// the output operand and a list of input operand uses. - -class LInstruction; -class LEnvironment; - -// Iterator for non-null temp operands. -class TempIterator BASE_EMBEDDED { - public: - inline explicit TempIterator(LInstruction* instr); - inline bool Done(); - inline LOperand* Current(); - inline void Advance(); - - private: - inline void SkipUninteresting(); - LInstruction* instr_; - int limit_; - int current_; -}; - - -// Iterator for non-constant input operands. -class InputIterator BASE_EMBEDDED { - public: - inline explicit InputIterator(LInstruction* instr); - inline bool Done(); - inline LOperand* Current(); - inline void Advance(); - - private: - inline void SkipUninteresting(); - LInstruction* instr_; - int limit_; - int current_; -}; - - -class UseIterator BASE_EMBEDDED { - public: - inline explicit UseIterator(LInstruction* instr); - inline bool Done(); - inline LOperand* Current(); - inline void Advance(); - - private: - InputIterator input_iterator_; - DeepIterator env_iterator_; -}; - - // Representation of the non-empty interval [start,end[. class UseInterval: public ZoneObject { public: UseInterval(LifetimePosition start, LifetimePosition end) : start_(start), end_(end), next_(NULL) { - ASSERT(start.Value() < end.Value()); + DCHECK(start.Value() < end.Value()); } LifetimePosition start() const { return start_; } @@ -325,7 +243,7 @@ bool IsSpilled() const { return spilled_; } LOperand* current_hint_operand() const { - ASSERT(current_hint_operand_ == FirstHint()); + DCHECK(current_hint_operand_ == FirstHint()); return current_hint_operand_; } LOperand* FirstHint() const { @@ -336,12 +254,12 @@ } LifetimePosition Start() const { - ASSERT(!IsEmpty()); + DCHECK(!IsEmpty()); return first_interval()->start(); } LifetimePosition End() const { - ASSERT(!IsEmpty()); + DCHECK(!IsEmpty()); return last_interval_->end(); } @@ -446,7 +364,7 @@ void MarkAsOsrEntry() { // There can be only one. - ASSERT(!has_osr_entry_); + DCHECK(!has_osr_entry_); // Simply set a flag to find and process instruction later. has_osr_entry_ = true; } diff -Nru nodejs-0.11.13/deps/v8/src/lithium-allocator-inl.h nodejs-0.11.15/deps/v8/src/lithium-allocator-inl.h --- nodejs-0.11.13/deps/v8/src/lithium-allocator-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lithium-allocator-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,45 +1,26 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LITHIUM_ALLOCATOR_INL_H_ #define V8_LITHIUM_ALLOCATOR_INL_H_ -#include "lithium-allocator.h" +#include "src/lithium-allocator.h" #if V8_TARGET_ARCH_IA32 -#include "ia32/lithium-ia32.h" +#include "src/ia32/lithium-ia32.h" // NOLINT #elif V8_TARGET_ARCH_X64 -#include "x64/lithium-x64.h" +#include "src/x64/lithium-x64.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 -#include "arm64/lithium-arm64.h" +#include "src/arm64/lithium-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM -#include "arm/lithium-arm.h" +#include "src/arm/lithium-arm.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/lithium-mips.h" +#include "src/mips/lithium-mips.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/lithium-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/lithium-x87.h" // NOLINT #else #error "Unknown architecture." #endif @@ -60,98 +41,11 @@ } -TempIterator::TempIterator(LInstruction* instr) - : instr_(instr), - limit_(instr->TempCount()), - current_(0) { - SkipUninteresting(); -} - - -bool TempIterator::Done() { return current_ >= limit_; } - - -LOperand* TempIterator::Current() { - ASSERT(!Done()); - return instr_->TempAt(current_); -} - - -void TempIterator::SkipUninteresting() { - while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_; -} - - -void TempIterator::Advance() { - ++current_; - SkipUninteresting(); -} - - -InputIterator::InputIterator(LInstruction* instr) - : instr_(instr), - limit_(instr->InputCount()), - current_(0) { - SkipUninteresting(); -} - - -bool InputIterator::Done() { return current_ >= limit_; } - - -LOperand* InputIterator::Current() { - ASSERT(!Done()); - ASSERT(instr_->InputAt(current_) != NULL); - return instr_->InputAt(current_); -} - - -void InputIterator::Advance() { - ++current_; - SkipUninteresting(); -} - - -void InputIterator::SkipUninteresting() { - while (current_ < limit_) { - LOperand* current = instr_->InputAt(current_); - if (current != NULL && !current->IsConstantOperand()) break; - ++current_; - } -} - - -UseIterator::UseIterator(LInstruction* instr) - : input_iterator_(instr), env_iterator_(instr->environment()) { } - - -bool UseIterator::Done() { - return input_iterator_.Done() && env_iterator_.Done(); -} - - -LOperand* UseIterator::Current() { - ASSERT(!Done()); - LOperand* result = input_iterator_.Done() - ? env_iterator_.Current() - : input_iterator_.Current(); - ASSERT(result != NULL); - return result; -} - - -void UseIterator::Advance() { - input_iterator_.Done() - ? env_iterator_.Advance() - : input_iterator_.Advance(); -} - - void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) { if (range->Kind() == DOUBLE_REGISTERS) { assigned_double_registers_->Add(reg); } else { - ASSERT(range->Kind() == GENERAL_REGISTERS); + DCHECK(range->Kind() == GENERAL_REGISTERS); assigned_registers_->Add(reg); } range->set_assigned_register(reg, chunk()->zone()); diff -Nru nodejs-0.11.13/deps/v8/src/lithium.cc nodejs-0.11.15/deps/v8/src/lithium.cc --- nodejs-0.11.13/deps/v8/src/lithium.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lithium.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,49 +1,34 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" -#include "lithium.h" -#include "scopes.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/lithium.h" +#include "src/scopes.h" +#include "src/serialize.h" #if V8_TARGET_ARCH_IA32 -#include "ia32/lithium-ia32.h" -#include "ia32/lithium-codegen-ia32.h" +#include "src/ia32/lithium-ia32.h" // NOLINT +#include "src/ia32/lithium-codegen-ia32.h" // NOLINT #elif V8_TARGET_ARCH_X64 -#include "x64/lithium-x64.h" -#include "x64/lithium-codegen-x64.h" +#include "src/x64/lithium-x64.h" // NOLINT +#include "src/x64/lithium-codegen-x64.h" // NOLINT #elif V8_TARGET_ARCH_ARM -#include "arm/lithium-arm.h" -#include "arm/lithium-codegen-arm.h" +#include "src/arm/lithium-arm.h" // NOLINT +#include "src/arm/lithium-codegen-arm.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/lithium-mips.h" -#include "mips/lithium-codegen-mips.h" +#include "src/mips/lithium-mips.h" // NOLINT +#include "src/mips/lithium-codegen-mips.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 -#include "arm64/lithium-arm64.h" -#include "arm64/lithium-codegen-arm64.h" +#include "src/arm64/lithium-arm64.h" // NOLINT +#include "src/arm64/lithium-codegen-arm64.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/lithium-mips64.h" // NOLINT +#include "src/mips64/lithium-codegen-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/lithium-x87.h" // NOLINT +#include "src/x87/lithium-codegen-x87.h" // NOLINT #else #error "Unknown architecture." #endif @@ -70,21 +55,34 @@ break; case LUnallocated::FIXED_REGISTER: { int reg_index = unalloc->fixed_register_index(); - const char* register_name = - Register::AllocationIndexToString(reg_index); - stream->Add("(=%s)", register_name); + if (reg_index < 0 || + reg_index >= Register::kMaxNumAllocatableRegisters) { + stream->Add("(=invalid_reg#%d)", reg_index); + } else { + const char* register_name = + Register::AllocationIndexToString(reg_index); + stream->Add("(=%s)", register_name); + } break; } case LUnallocated::FIXED_DOUBLE_REGISTER: { int reg_index = unalloc->fixed_register_index(); - const char* double_register_name = - DoubleRegister::AllocationIndexToString(reg_index); - stream->Add("(=%s)", double_register_name); + if (reg_index < 0 || + reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) { + stream->Add("(=invalid_double_reg#%d)", reg_index); + } else { + const char* double_register_name = + DoubleRegister::AllocationIndexToString(reg_index); + stream->Add("(=%s)", double_register_name); + } break; } case LUnallocated::MUST_HAVE_REGISTER: stream->Add("(R)"); break; + case LUnallocated::MUST_HAVE_DOUBLE_REGISTER: + stream->Add("(D)"); + break; case LUnallocated::WRITABLE_REGISTER: stream->Add("(WR)"); break; @@ -105,12 +103,26 @@ case DOUBLE_STACK_SLOT: stream->Add("[double_stack:%d]", index()); break; - case REGISTER: - stream->Add("[%s|R]", Register::AllocationIndexToString(index())); + case REGISTER: { + int reg_index = index(); + if (reg_index < 0 || reg_index >= Register::kMaxNumAllocatableRegisters) { + stream->Add("(=invalid_reg#%d|R)", reg_index); + } else { + stream->Add("[%s|R]", Register::AllocationIndexToString(reg_index)); + } break; - case DOUBLE_REGISTER: - stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index())); + } + case DOUBLE_REGISTER: { + int reg_index = index(); + if (reg_index < 0 || + reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) { + stream->Add("(=invalid_double_reg#%d|R)", reg_index); + } else { + stream->Add("[%s|R]", + DoubleRegister::AllocationIndexToString(reg_index)); + } break; + } } } @@ -201,7 +213,7 @@ void LPointerMap::RecordPointer(LOperand* op, Zone* zone) { // Do not record arguments as pointers. if (op->IsStackSlot() && op->index() < 0) return; - ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); + DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); pointer_operands_.Add(op, zone); } @@ -209,7 +221,7 @@ void LPointerMap::RemovePointer(LOperand* op) { // Do not record arguments as pointers. if (op->IsStackSlot() && op->index() < 0) return; - ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); + DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); for (int i = 0; i < pointer_operands_.length(); ++i) { if (pointer_operands_[i]->Equals(op)) { pointer_operands_.Remove(i); @@ -222,7 +234,7 @@ void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) { // Do not record arguments as pointers. if (op->IsStackSlot() && op->index() < 0) return; - ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); + DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); untagged_operands_.Add(op, zone); } @@ -254,10 +266,11 @@ : spill_slot_count_(0), info_(info), graph_(graph), - instructions_(32, graph->zone()), - pointer_maps_(8, graph->zone()), - inlined_closures_(1, graph->zone()) { -} + instructions_(32, info->zone()), + pointer_maps_(8, info->zone()), + inlined_closures_(1, info->zone()), + deprecation_dependencies_(MapLess(), MapAllocator(info->zone())), + stability_dependencies_(MapLess(), MapAllocator(info->zone())) {} LLabel* LChunk::GetLabel(int block_id) const { @@ -277,7 +290,7 @@ Label* LChunk::GetAssemblyLabel(int block_id) const { LLabel* label = GetLabel(block_id); - ASSERT(!label->HasReplacement()); + DCHECK(!label->HasReplacement()); return label->label(); } @@ -318,7 +331,7 @@ void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { - LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block); + LInstructionGap* gap = new (zone()) LInstructionGap(block); gap->set_hydrogen_value(instr->hydrogen_value()); int index = -1; if (instr->IsControl()) { @@ -349,14 +362,14 @@ // spill slots. int result = index - info()->num_parameters() - 1; - ASSERT(result < 0); + DCHECK(result < 0); return result; } // A parameter relative to ebp in the arguments stub. int LChunk::ParameterAt(int index) { - ASSERT(-1 <= index); // -1 is the receiver. + DCHECK(-1 <= index); // -1 is the receiver. return (1 + info()->scope()->num_parameters() - index) * kPointerSize; } @@ -395,6 +408,27 @@ } +void LChunk::CommitDependencies(Handle<Code> code) const { + for (MapSet::const_iterator it = deprecation_dependencies_.begin(), + iend = deprecation_dependencies_.end(); it != iend; ++it) { + Handle<Map> map = *it; + DCHECK(!map->is_deprecated()); + DCHECK(map->CanBeDeprecated()); + Map::AddDependentCode(map, DependentCode::kTransitionGroup, code); + } + + for (MapSet::const_iterator it = stability_dependencies_.begin(), + iend = stability_dependencies_.end(); it != iend; ++it) { + Handle<Map> map = *it; + DCHECK(map->is_stable()); + DCHECK(map->CanTransition()); + Map::AddDependentCode(map, DependentCode::kPrototypeCheckGroup, code); + } + + info_->CommitDependencies(code); +} + + LChunk* LChunk::NewChunk(HGraph* graph) { DisallowHandleAllocation no_handles; DisallowHeapAllocation no_gc; @@ -427,16 +461,20 @@ LOG_CODE_EVENT(info()->isolate(), CodeStartLinePosInfoRecordEvent( assembler.positions_recorder())); + // TODO(yangguo) remove this once the code serializer handles code stubs. + if (info()->will_serialize()) assembler.enable_serializer(); LCodeGen generator(this, &assembler, info()); MarkEmptyBlocks(); if (generator.GenerateCode()) { + generator.CheckEnvironmentUsage(); CodeGenerator::MakeCodePrologue(info(), "optimized"); Code::Flags flags = info()->flags(); Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&assembler, flags, info()); generator.FinishCode(code); + CommitDependencies(code); code->set_is_crankshafted(true); void* jit_handler_data = assembler.positions_recorder()->DetachJITHandlerData(); @@ -444,6 +482,9 @@ CodeEndLinePosInfoRecordEvent(*code, jit_handler_data)); CodeGenerator::PrintCode(code, info()); + DCHECK(!(info()->isolate()->serializer_enabled() && + info()->GetMustNotHaveEagerFrame() && + generator.NeedsEagerFrame())); return code; } assembler.AbortedCodeGeneration(); @@ -478,7 +519,7 @@ argument_index_accumulator, objects_to_materialize); BailoutId ast_id = hydrogen_env->ast_id(); - ASSERT(!ast_id.IsNone() || + DCHECK(!ast_id.IsNone() || hydrogen_env->frame_type() != JS_FUNCTION); int value_count = hydrogen_env->length() - hydrogen_env->specials_count(); LEnvironment* result = @@ -500,7 +541,7 @@ LOperand* op; HValue* value = hydrogen_env->values()->at(i); - CHECK(!value->IsPushArgument()); // Do not deopt outgoing arguments + CHECK(!value->IsPushArguments()); // Do not deopt outgoing arguments if (value->IsArgumentsObject() || value->IsCapturedObject()) { op = LEnvironment::materialization_marker(); } else { @@ -581,7 +622,7 @@ // Insert a hole for nested objects op = LEnvironment::materialization_marker(); } else { - ASSERT(!arg_value->IsPushArgument()); + DCHECK(!arg_value->IsPushArguments()); // For ordinary values, tell the register allocator we need the value // to be alive here op = UseAny(arg_value); @@ -600,14 +641,6 @@ } -LInstruction* LChunkBuilder::CheckElideControlInstruction( - HControlInstruction* instr) { - HBasicBlock* successor; - if (!instr->KnownSuccessorBlock(&successor)) return NULL; - return new(zone()) LGoto(successor); -} - - LPhase::~LPhase() { if (ShouldProduceTraceOutput()) { isolate()->GetHTracer()->TraceLithium(name(), chunk_); diff -Nru nodejs-0.11.13/deps/v8/src/lithium-codegen.cc nodejs-0.11.15/deps/v8/src/lithium-codegen.cc --- nodejs-0.11.13/deps/v8/src/lithium-codegen.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lithium-codegen.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,49 +1,32 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "lithium-codegen.h" +#include "src/lithium-codegen.h" #if V8_TARGET_ARCH_IA32 -#include "ia32/lithium-ia32.h" -#include "ia32/lithium-codegen-ia32.h" +#include "src/ia32/lithium-ia32.h" // NOLINT +#include "src/ia32/lithium-codegen-ia32.h" // NOLINT #elif V8_TARGET_ARCH_X64 -#include "x64/lithium-x64.h" -#include "x64/lithium-codegen-x64.h" +#include "src/x64/lithium-x64.h" // NOLINT +#include "src/x64/lithium-codegen-x64.h" // NOLINT #elif V8_TARGET_ARCH_ARM -#include "arm/lithium-arm.h" -#include "arm/lithium-codegen-arm.h" +#include "src/arm/lithium-arm.h" // NOLINT +#include "src/arm/lithium-codegen-arm.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 -#include "arm64/lithium-arm64.h" -#include "arm64/lithium-codegen-arm64.h" +#include "src/arm64/lithium-arm64.h" // NOLINT +#include "src/arm64/lithium-codegen-arm64.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/lithium-mips.h" -#include "mips/lithium-codegen-mips.h" +#include "src/mips/lithium-mips.h" // NOLINT +#include "src/mips/lithium-codegen-mips.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/lithium-mips64.h" // NOLINT +#include "src/mips64/lithium-codegen-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/lithium-x87.h" // NOLINT +#include "src/x87/lithium-codegen-x87.h" // NOLINT #else #error Unsupported target architecture. #endif @@ -73,7 +56,7 @@ bool LCodeGenBase::GenerateBody() { - ASSERT(is_generating()); + DCHECK(is_generating()); bool emit_instructions = true; LCodeGen* codegen = static_cast<LCodeGen*>(this); for (current_instruction_ = 0; @@ -122,6 +105,30 @@ } +void LCodeGenBase::CheckEnvironmentUsage() { +#ifdef DEBUG + bool dead_block = false; + for (int i = 0; i < instructions_->length(); i++) { + LInstruction* instr = instructions_->at(i); + HValue* hval = instr->hydrogen_value(); + if (instr->IsLabel()) dead_block = LLabel::cast(instr)->HasReplacement(); + if (dead_block || !hval->block()->IsReachable()) continue; + + HInstruction* hinstr = HInstruction::cast(hval); + if (!hinstr->CanDeoptimize() && instr->HasEnvironment()) { + V8_Fatal(__FILE__, __LINE__, "CanDeoptimize is wrong for %s (%s)", + hinstr->Mnemonic(), instr->Mnemonic()); + } + + if (instr->HasEnvironment() && !instr->environment()->has_been_used()) { + V8_Fatal(__FILE__, __LINE__, "unused environment for %s (%s)", + hinstr->Mnemonic(), instr->Mnemonic()); + } + } +#endif +} + + void LCodeGenBase::Comment(const char* format, ...) { if (!FLAG_code_comments) return; char buffer[4 * KB]; @@ -135,7 +142,7 @@ // issues when the stack allocated buffer goes out of scope. size_t length = builder.position(); Vector<char> copy = Vector<char>::New(static_cast<int>(length) + 1); - OS::MemCopy(copy.start(), builder.Finalize(), copy.length()); + MemCopy(copy.start(), builder.Finalize(), copy.length()); masm()->RecordComment(copy.start()); } @@ -149,8 +156,19 @@ } +static void AddWeakObjectToCodeDependency(Isolate* isolate, + Handle<Object> object, + Handle<Code> code) { + Heap* heap = isolate->heap(); + heap->EnsureWeakObjectToCodeTable(); + Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object)); + dep = DependentCode::Insert(dep, DependentCode::kWeakCodeGroup, code); + heap->AddWeakObjectToCodeDependency(object, dep); +} + + void LCodeGenBase::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) { - ASSERT(code->is_optimized_code()); + DCHECK(code->is_optimized_code()); ZoneList<Handle<Map> > maps(1, zone()); ZoneList<Handle<JSObject> > objects(1, zone()); ZoneList<Handle<Cell> > cells(1, zone()); @@ -176,6 +194,10 @@ } } } + if (FLAG_enable_ool_constant_pool) { + code->constant_pool()->set_weak_object_state( + ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE); + } #ifdef VERIFY_HEAP // This disables verification of weak embedded objects after full GC. // AddDependentCode can cause a GC, which would observe the state where @@ -183,15 +205,32 @@ NoWeakObjectVerificationScope disable_verification_of_embedded_objects; #endif for (int i = 0; i < maps.length(); i++) { - maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); + Map::AddDependentCode(maps.at(i), DependentCode::kWeakCodeGroup, code); } for (int i = 0; i < objects.length(); i++) { - AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code); + AddWeakObjectToCodeDependency(isolate(), objects.at(i), code); } for (int i = 0; i < cells.length(); i++) { - AddWeakObjectToCodeDependency(isolate()->heap(), cells.at(i), code); + AddWeakObjectToCodeDependency(isolate(), cells.at(i), code); } } +void LCodeGenBase::Abort(BailoutReason reason) { + info()->set_bailout_reason(reason); + status_ = ABORTED; +} + + +void LCodeGenBase::AddDeprecationDependency(Handle<Map> map) { + if (map->is_deprecated()) return Abort(kMapBecameDeprecated); + chunk_->AddDeprecationDependency(map); +} + + +void LCodeGenBase::AddStabilityDependency(Handle<Map> map) { + if (!map->is_stable()) return Abort(kMapBecameUnstable); + chunk_->AddStabilityDependency(map); +} + } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/lithium-codegen.h nodejs-0.11.15/deps/v8/src/lithium-codegen.h --- nodejs-0.11.13/deps/v8/src/lithium-codegen.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lithium-codegen.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LITHIUM_CODEGEN_H_ #define V8_LITHIUM_CODEGEN_H_ -#include "v8.h" +#include "src/v8.h" -#include "compiler.h" +#include "src/compiler.h" namespace v8 { namespace internal { @@ -68,6 +45,11 @@ void RegisterWeakObjectsInOptimizedCode(Handle<Code> code); + // Check that an environment assigned via AssignEnvironment is actually being + // used. Redundant assignments keep things alive longer than necessary, and + // consequently lead to worse code, so it's important to minimize this. + void CheckEnvironmentUsage(); + protected: enum Status { UNUSED, @@ -90,6 +72,12 @@ bool is_generating() const { return status_ == GENERATING; } bool is_done() const { return status_ == DONE; } bool is_aborted() const { return status_ == ABORTED; } + + void Abort(BailoutReason reason); + + // Methods for code dependencies. + void AddDeprecationDependency(Handle<Map> map); + void AddStabilityDependency(Handle<Map> map); }; diff -Nru nodejs-0.11.13/deps/v8/src/lithium.h nodejs-0.11.15/deps/v8/src/lithium.h --- nodejs-0.11.13/deps/v8/src/lithium.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lithium.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,16 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LITHIUM_H_ #define V8_LITHIUM_H_ -#include "allocation.h" -#include "hydrogen.h" -#include "safepoint-table.h" +#include <set> + +#include "src/allocation.h" +#include "src/hydrogen.h" +#include "src/safepoint-table.h" +#include "src/zone-allocator.h" namespace v8 { namespace internal { @@ -42,7 +22,6 @@ V(Register, REGISTER, 16) \ V(DoubleRegister, DOUBLE_REGISTER, 16) - class LOperand : public ZoneObject { public: enum Kind { @@ -69,9 +48,10 @@ void PrintTo(StringStream* stream); void ConvertTo(Kind kind, int index) { + if (kind == REGISTER) DCHECK(index >= 0); value_ = KindField::encode(kind); value_ |= index << kKindFieldWidth; - ASSERT(this->index() == index); + DCHECK(this->index() == index); } // Calls SetUpCache()/TearDownCache() for each subclass. @@ -101,6 +81,7 @@ FIXED_REGISTER, FIXED_DOUBLE_REGISTER, MUST_HAVE_REGISTER, + MUST_HAVE_DOUBLE_REGISTER, WRITABLE_REGISTER, SAME_AS_FIRST_INPUT }; @@ -126,14 +107,14 @@ } LUnallocated(BasicPolicy policy, int index) : LOperand(UNALLOCATED, 0) { - ASSERT(policy == FIXED_SLOT); + DCHECK(policy == FIXED_SLOT); value_ |= BasicPolicyField::encode(policy); value_ |= index << FixedSlotIndexField::kShift; - ASSERT(this->fixed_slot_index() == index); + DCHECK(this->fixed_slot_index() == index); } LUnallocated(ExtendedPolicy policy, int index) : LOperand(UNALLOCATED, 0) { - ASSERT(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER); + DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER); value_ |= BasicPolicyField::encode(EXTENDED_POLICY); value_ |= ExtendedPolicyField::encode(policy); value_ |= LifetimeField::encode(USED_AT_END); @@ -154,7 +135,7 @@ } static LUnallocated* cast(LOperand* op) { - ASSERT(op->IsUnallocated()); + DCHECK(op->IsUnallocated()); return reinterpret_cast<LUnallocated*>(op); } @@ -210,6 +191,10 @@ extended_policy() == WRITABLE_REGISTER || extended_policy() == MUST_HAVE_REGISTER); } + bool HasDoubleRegisterPolicy() const { + return basic_policy() == EXTENDED_POLICY && + extended_policy() == MUST_HAVE_DOUBLE_REGISTER; + } bool HasSameAsInputPolicy() const { return basic_policy() == EXTENDED_POLICY && extended_policy() == SAME_AS_FIRST_INPUT; @@ -237,19 +222,19 @@ // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy. ExtendedPolicy extended_policy() const { - ASSERT(basic_policy() == EXTENDED_POLICY); + DCHECK(basic_policy() == EXTENDED_POLICY); return ExtendedPolicyField::decode(value_); } // [fixed_slot_index]: Only for FIXED_SLOT. int fixed_slot_index() const { - ASSERT(HasFixedSlotPolicy()); + DCHECK(HasFixedSlotPolicy()); return static_cast<int>(value_) >> FixedSlotIndexField::kShift; } // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER. int fixed_register_index() const { - ASSERT(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy()); + DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy()); return FixedRegisterField::decode(value_); } @@ -263,7 +248,7 @@ // [lifetime]: Only for non-FIXED_SLOT. bool IsUsedAtStart() { - ASSERT(basic_policy() == EXTENDED_POLICY); + DCHECK(basic_policy() == EXTENDED_POLICY); return LifetimeField::decode(value_) == USED_AT_START; } }; @@ -293,9 +278,10 @@ } // A move is redundant if it's been eliminated, if its source and - // destination are the same, or if its destination is unneeded. + // destination are the same, or if its destination is unneeded or constant. bool IsRedundant() const { - return IsEliminated() || source_->Equals(destination_) || IsIgnored(); + return IsEliminated() || source_->Equals(destination_) || IsIgnored() || + (destination_ != NULL && destination_->IsConstantOperand()); } bool IsIgnored() const { @@ -305,7 +291,7 @@ // We clear both operands to indicate move that's been eliminated. void Eliminate() { source_ = destination_ = NULL; } bool IsEliminated() const { - ASSERT(source_ != NULL || destination_ == NULL); + DCHECK(source_ != NULL || destination_ == NULL); return source_ == NULL; } @@ -319,13 +305,13 @@ class LSubKindOperand V8_FINAL : public LOperand { public: static LSubKindOperand* Create(int index, Zone* zone) { - ASSERT(index >= 0); + DCHECK(index >= 0); if (index < kNumCachedOperands) return &cache[index]; return new(zone) LSubKindOperand(index); } static LSubKindOperand* cast(LOperand* op) { - ASSERT(op->kind() == kOperandKind); + DCHECK(op->kind() == kOperandKind); return reinterpret_cast<LSubKindOperand*>(op); } @@ -356,9 +342,7 @@ bool IsRedundant() const; - const ZoneList<LMoveOperands>* move_operands() const { - return &move_operands_; - } + ZoneList<LMoveOperands>* move_operands() { return &move_operands_; } void PrintDataTo(StringStream* stream) const; @@ -384,7 +368,7 @@ int lithium_position() const { return lithium_position_; } void set_lithium_position(int pos) { - ASSERT(lithium_position_ == -1); + DCHECK(lithium_position_ == -1); lithium_position_ = pos; } @@ -426,7 +410,8 @@ object_mapping_(0, zone), outer_(outer), entry_(entry), - zone_(zone) { } + zone_(zone), + has_been_used_(false) { } Handle<JSFunction> closure() const { return closure_; } FrameType frame_type() const { return frame_type_; } @@ -442,12 +427,15 @@ HEnterInlined* entry() { return entry_; } Zone* zone() const { return zone_; } + bool has_been_used() const { return has_been_used_; } + void set_has_been_used() { has_been_used_ = true; } + void AddValue(LOperand* operand, Representation representation, bool is_uint32) { values_.Add(operand, zone()); if (representation.IsSmiOrTagged()) { - ASSERT(!is_uint32); + DCHECK(!is_uint32); is_tagged_.Add(values_.length() - 1, zone()); } @@ -478,17 +466,17 @@ } int ObjectDuplicateOfAt(int index) { - ASSERT(ObjectIsDuplicateAt(index)); + DCHECK(ObjectIsDuplicateAt(index)); return LengthOrDupeField::decode(object_mapping_[index]); } int ObjectLengthAt(int index) { - ASSERT(!ObjectIsDuplicateAt(index)); + DCHECK(!ObjectIsDuplicateAt(index)); return LengthOrDupeField::decode(object_mapping_[index]); } bool ObjectIsArgumentsAt(int index) { - ASSERT(!ObjectIsDuplicateAt(index)); + DCHECK(!ObjectIsDuplicateAt(index)); return IsArgumentsField::decode(object_mapping_[index]); } @@ -499,7 +487,7 @@ void Register(int deoptimization_index, int translation_index, int pc_offset) { - ASSERT(!HasBeenRegistered()); + DCHECK(!HasBeenRegistered()); deoptimization_index_ = deoptimization_index; translation_index_ = translation_index; pc_offset_ = pc_offset; @@ -541,6 +529,7 @@ LEnvironment* outer_; HEnterInlined* entry_; Zone* zone_; + bool has_been_used_; }; @@ -557,13 +546,13 @@ bool Done() { return current_ >= limit_; } LOperand* Current() { - ASSERT(!Done()); - ASSERT(env_->values()->at(current_) != NULL); + DCHECK(!Done()); + DCHECK(env_->values()->at(current_) != NULL); return env_->values()->at(current_); } void Advance() { - ASSERT(!Done()); + DCHECK(!Done()); ++current_; SkipUninteresting(); } @@ -599,8 +588,8 @@ bool Done() { return current_iterator_.Done(); } LOperand* Current() { - ASSERT(!current_iterator_.Done()); - ASSERT(current_iterator_.Current() != NULL); + DCHECK(!current_iterator_.Done()); + DCHECK(current_iterator_.Current() != NULL); return current_iterator_.Current(); } @@ -660,6 +649,20 @@ inlined_closures_.Add(closure, zone()); } + void AddDeprecationDependency(Handle<Map> map) { + DCHECK(!map->is_deprecated()); + if (!map->CanBeDeprecated()) return; + DCHECK(!info_->IsStub()); + deprecation_dependencies_.insert(map); + } + + void AddStabilityDependency(Handle<Map> map) { + DCHECK(map->is_stable()); + if (!map->CanTransition()) return; + DCHECK(!info_->IsStub()); + stability_dependencies_.insert(map); + } + Zone* zone() const { return info_->zone(); } Handle<Code> Codegen(); @@ -675,12 +678,20 @@ int spill_slot_count_; private: + typedef std::less<Handle<Map> > MapLess; + typedef zone_allocator<Handle<Map> > MapAllocator; + typedef std::set<Handle<Map>, MapLess, MapAllocator> MapSet; + + void CommitDependencies(Handle<Code> code) const; + CompilationInfo* info_; HGraph* const graph_; BitVector* allocated_double_registers_; ZoneList<LInstruction*> instructions_; ZoneList<LPointerMap*> pointer_maps_; ZoneList<Handle<JSFunction> > inlined_closures_; + MapSet deprecation_dependencies_; + MapSet stability_dependencies_; }; @@ -735,6 +746,61 @@ }; +// A register-allocator view of a Lithium instruction. It contains the id of +// the output operand and a list of input operand uses. + +enum RegisterKind { + UNALLOCATED_REGISTERS, + GENERAL_REGISTERS, + DOUBLE_REGISTERS +}; + +// Iterator for non-null temp operands. +class TempIterator BASE_EMBEDDED { + public: + inline explicit TempIterator(LInstruction* instr); + inline bool Done(); + inline LOperand* Current(); + inline void Advance(); + + private: + inline void SkipUninteresting(); + LInstruction* instr_; + int limit_; + int current_; +}; + + +// Iterator for non-constant input operands. +class InputIterator BASE_EMBEDDED { + public: + inline explicit InputIterator(LInstruction* instr); + inline bool Done(); + inline LOperand* Current(); + inline void Advance(); + + private: + inline void SkipUninteresting(); + LInstruction* instr_; + int limit_; + int current_; +}; + + +class UseIterator BASE_EMBEDDED { + public: + inline explicit UseIterator(LInstruction* instr); + inline bool Done(); + inline LOperand* Current(); + inline void Advance(); + + private: + InputIterator input_iterator_; + DeepIterator env_iterator_; +}; + +class LInstruction; +class LCodeGen; } } // namespace v8::internal #endif // V8_LITHIUM_H_ diff -Nru nodejs-0.11.13/deps/v8/src/lithium-inl.h nodejs-0.11.15/deps/v8/src/lithium-inl.h --- nodejs-0.11.13/deps/v8/src/lithium-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lithium-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,112 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_LITHIUM_INL_H_ +#define V8_LITHIUM_INL_H_ + +#include "src/lithium.h" + +#if V8_TARGET_ARCH_IA32 +#include "src/ia32/lithium-ia32.h" // NOLINT +#elif V8_TARGET_ARCH_X64 +#include "src/x64/lithium-x64.h" // NOLINT +#elif V8_TARGET_ARCH_ARM64 +#include "src/arm64/lithium-arm64.h" // NOLINT +#elif V8_TARGET_ARCH_ARM +#include "src/arm/lithium-arm.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS +#include "src/mips/lithium-mips.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/lithium-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_X87 +#include "src/x87/lithium-x87.h" // NOLINT +#else +#error "Unknown architecture." +#endif + +namespace v8 { +namespace internal { + +TempIterator::TempIterator(LInstruction* instr) + : instr_(instr), limit_(instr->TempCount()), current_(0) { + SkipUninteresting(); +} + + +bool TempIterator::Done() { return current_ >= limit_; } + + +LOperand* TempIterator::Current() { + DCHECK(!Done()); + return instr_->TempAt(current_); +} + + +void TempIterator::SkipUninteresting() { + while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_; +} + + +void TempIterator::Advance() { + ++current_; + SkipUninteresting(); +} + + +InputIterator::InputIterator(LInstruction* instr) + : instr_(instr), limit_(instr->InputCount()), current_(0) { + SkipUninteresting(); +} + + +bool InputIterator::Done() { return current_ >= limit_; } + + +LOperand* InputIterator::Current() { + DCHECK(!Done()); + DCHECK(instr_->InputAt(current_) != NULL); + return instr_->InputAt(current_); +} + + +void InputIterator::Advance() { + ++current_; + SkipUninteresting(); +} + + +void InputIterator::SkipUninteresting() { + while (current_ < limit_) { + LOperand* current = instr_->InputAt(current_); + if (current != NULL && !current->IsConstantOperand()) break; + ++current_; + } +} + + +UseIterator::UseIterator(LInstruction* instr) + : input_iterator_(instr), env_iterator_(instr->environment()) {} + + +bool UseIterator::Done() { + return input_iterator_.Done() && env_iterator_.Done(); +} + + +LOperand* UseIterator::Current() { + DCHECK(!Done()); + LOperand* result = input_iterator_.Done() ? env_iterator_.Current() + : input_iterator_.Current(); + DCHECK(result != NULL); + return result; +} + + +void UseIterator::Advance() { + input_iterator_.Done() ? env_iterator_.Advance() : input_iterator_.Advance(); +} +} +} // namespace v8::internal + +#endif // V8_LITHIUM_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/liveedit.cc nodejs-0.11.15/deps/v8/src/liveedit.cc --- nodejs-0.11.13/deps/v8/src/liveedit.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/liveedit.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,53 +1,26 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -#include "v8.h" - -#include "liveedit.h" - -#include "code-stubs.h" -#include "compilation-cache.h" -#include "compiler.h" -#include "debug.h" -#include "deoptimizer.h" -#include "global-handles.h" -#include "messages.h" -#include "parser.h" -#include "scopeinfo.h" -#include "scopes.h" -#include "v8memory.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -namespace v8 { -namespace internal { +#include "src/v8.h" + +#include "src/liveedit.h" -#ifdef ENABLE_DEBUGGER_SUPPORT +#include "src/code-stubs.h" +#include "src/compilation-cache.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/global-handles.h" +#include "src/messages.h" +#include "src/parser.h" +#include "src/scopeinfo.h" +#include "src/scopes.h" +#include "src/v8memory.h" +namespace v8 { +namespace internal { void SetElementSloppy(Handle<JSObject> object, uint32_t index, @@ -55,10 +28,7 @@ // Ignore return value from SetElement. It can only be a failure if there // are element setters causing exceptions and the debugger context has none // of these. - Handle<Object> no_failure = - JSObject::SetElement(object, index, value, NONE, SLOPPY); - ASSERT(!no_failure.is_null()); - USE(no_failure); + JSObject::SetElement(object, index, value, NONE, SLOPPY).Assert(); } @@ -191,7 +161,7 @@ // Each cell keeps a value plus direction. Value is multiplied by 4. void set_value4_and_dir(int i1, int i2, int value4, Direction dir) { - ASSERT((value4 & kDirectionMask) == 0); + DCHECK((value4 & kDirectionMask) == 0); get_cell(i1, i2) = value4 | dir; } @@ -204,7 +174,7 @@ static const int kDirectionSizeBits = 2; static const int kDirectionMask = (1 << kDirectionSizeBits) - 1; - static const int kEmptyCellValue = -1 << kDirectionSizeBits; + static const int kEmptyCellValue = ~0u << kDirectionSizeBits; // This method only holds static assert statement (unfortunately you cannot // place one in class scope). @@ -434,7 +404,7 @@ class LineEndsWrapper { public: explicit LineEndsWrapper(Handle<String> string) - : ends_array_(CalculateLineEnds(string, false)), + : ends_array_(String::CalculateLineEnds(string, false)), string_len_(string->length()) { } int length() { @@ -585,8 +555,8 @@ Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1, Handle<String> s2) { - s1 = FlattenGetString(s1); - s2 = FlattenGetString(s2); + s1 = String::Flatten(s1); + s2 = String::Flatten(s2); LineEndsWrapper line_ends1(s1); LineEndsWrapper line_ends2(s2); @@ -635,168 +605,94 @@ } -// Simple helper class that creates more or less typed structures over -// JSArray object. This is an adhoc method of passing structures from C++ -// to JavaScript. -template<typename S> -class JSArrayBasedStruct { - public: - static S Create(Isolate* isolate) { - Factory* factory = isolate->factory(); - Handle<JSArray> array = factory->NewJSArray(S::kSize_); - return S(array); - } - static S cast(Object* object) { - JSArray* array = JSArray::cast(object); - Handle<JSArray> array_handle(array); - return S(array_handle); - } - explicit JSArrayBasedStruct(Handle<JSArray> array) : array_(array) { - } - Handle<JSArray> GetJSArray() { - return array_; - } - Isolate* isolate() const { - return array_->GetIsolate(); - } - - protected: - void SetField(int field_position, Handle<Object> value) { - SetElementSloppy(array_, field_position, value); - } - void SetSmiValueField(int field_position, int value) { - SetElementSloppy(array_, - field_position, - Handle<Smi>(Smi::FromInt(value), isolate())); - } - Handle<Object> GetField(int field_position) { - return Object::GetElementNoExceptionThrown( - isolate(), array_, field_position); - } - int GetSmiValueField(int field_position) { - Handle<Object> res = GetField(field_position); - return Handle<Smi>::cast(res)->value(); - } +void FunctionInfoWrapper::SetInitialProperties(Handle<String> name, + int start_position, + int end_position, + int param_num, + int literal_count, + int slot_count, + int parent_index) { + HandleScope scope(isolate()); + this->SetField(kFunctionNameOffset_, name); + this->SetSmiValueField(kStartPositionOffset_, start_position); + this->SetSmiValueField(kEndPositionOffset_, end_position); + this->SetSmiValueField(kParamNumOffset_, param_num); + this->SetSmiValueField(kLiteralNumOffset_, literal_count); + this->SetSmiValueField(kSlotNumOffset_, slot_count); + this->SetSmiValueField(kParentIndexOffset_, parent_index); +} - private: - Handle<JSArray> array_; -}; +void FunctionInfoWrapper::SetFunctionCode(Handle<Code> function_code, + Handle<HeapObject> code_scope_info) { + Handle<JSValue> code_wrapper = WrapInJSValue(function_code); + this->SetField(kCodeOffset_, code_wrapper); -// Represents some function compilation details. This structure will be used -// from JavaScript. It contains Code object, which is kept wrapped -// into a BlindReference for sanitizing reasons. -class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> { - public: - explicit FunctionInfoWrapper(Handle<JSArray> array) - : JSArrayBasedStruct<FunctionInfoWrapper>(array) { - } - void SetInitialProperties(Handle<String> name, int start_position, - int end_position, int param_num, - int literal_count, int parent_index) { - HandleScope scope(isolate()); - this->SetField(kFunctionNameOffset_, name); - this->SetSmiValueField(kStartPositionOffset_, start_position); - this->SetSmiValueField(kEndPositionOffset_, end_position); - this->SetSmiValueField(kParamNumOffset_, param_num); - this->SetSmiValueField(kLiteralNumOffset_, literal_count); - this->SetSmiValueField(kParentIndexOffset_, parent_index); - } - void SetFunctionCode(Handle<Code> function_code, - Handle<HeapObject> code_scope_info) { - Handle<JSValue> code_wrapper = WrapInJSValue(function_code); - this->SetField(kCodeOffset_, code_wrapper); - - Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info); - this->SetField(kCodeScopeInfoOffset_, scope_wrapper); - } - void SetFunctionScopeInfo(Handle<Object> scope_info_array) { - this->SetField(kFunctionScopeInfoOffset_, scope_info_array); - } - void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) { - Handle<JSValue> info_holder = WrapInJSValue(info); - this->SetField(kSharedFunctionInfoOffset_, info_holder); - } - int GetLiteralCount() { - return this->GetSmiValueField(kLiteralNumOffset_); - } - int GetParentIndex() { - return this->GetSmiValueField(kParentIndexOffset_); - } - Handle<Code> GetFunctionCode() { - Handle<Object> element = this->GetField(kCodeOffset_); - Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element); - Handle<Object> raw_result = UnwrapJSValue(value_wrapper); - CHECK(raw_result->IsCode()); - return Handle<Code>::cast(raw_result); - } - Handle<Object> GetCodeScopeInfo() { - Handle<Object> element = this->GetField(kCodeScopeInfoOffset_); - return UnwrapJSValue(Handle<JSValue>::cast(element)); - } - int GetStartPosition() { - return this->GetSmiValueField(kStartPositionOffset_); - } - int GetEndPosition() { - return this->GetSmiValueField(kEndPositionOffset_); - } + Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info); + this->SetField(kCodeScopeInfoOffset_, scope_wrapper); +} - private: - static const int kFunctionNameOffset_ = 0; - static const int kStartPositionOffset_ = 1; - static const int kEndPositionOffset_ = 2; - static const int kParamNumOffset_ = 3; - static const int kCodeOffset_ = 4; - static const int kCodeScopeInfoOffset_ = 5; - static const int kFunctionScopeInfoOffset_ = 6; - static const int kParentIndexOffset_ = 7; - static const int kSharedFunctionInfoOffset_ = 8; - static const int kLiteralNumOffset_ = 9; - static const int kSize_ = 10; - friend class JSArrayBasedStruct<FunctionInfoWrapper>; -}; +void FunctionInfoWrapper::SetSharedFunctionInfo( + Handle<SharedFunctionInfo> info) { + Handle<JSValue> info_holder = WrapInJSValue(info); + this->SetField(kSharedFunctionInfoOffset_, info_holder); +} -// Wraps SharedFunctionInfo along with some of its fields for passing it -// back to JavaScript. SharedFunctionInfo object itself is additionally -// wrapped into BlindReference for sanitizing reasons. -class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> { - public: - static bool IsInstance(Handle<JSArray> array) { - return array->length() == Smi::FromInt(kSize_) && - Object::GetElementNoExceptionThrown( - array->GetIsolate(), array, kSharedInfoOffset_)->IsJSValue(); - } +Handle<Code> FunctionInfoWrapper::GetFunctionCode() { + Handle<Object> element = this->GetField(kCodeOffset_); + Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element); + Handle<Object> raw_result = UnwrapJSValue(value_wrapper); + CHECK(raw_result->IsCode()); + return Handle<Code>::cast(raw_result); +} - explicit SharedInfoWrapper(Handle<JSArray> array) - : JSArrayBasedStruct<SharedInfoWrapper>(array) { - } - void SetProperties(Handle<String> name, int start_position, int end_position, - Handle<SharedFunctionInfo> info) { - HandleScope scope(isolate()); - this->SetField(kFunctionNameOffset_, name); - Handle<JSValue> info_holder = WrapInJSValue(info); - this->SetField(kSharedInfoOffset_, info_holder); - this->SetSmiValueField(kStartPositionOffset_, start_position); - this->SetSmiValueField(kEndPositionOffset_, end_position); - } - Handle<SharedFunctionInfo> GetInfo() { - Handle<Object> element = this->GetField(kSharedInfoOffset_); +Handle<FixedArray> FunctionInfoWrapper::GetFeedbackVector() { + Handle<Object> element = this->GetField(kSharedFunctionInfoOffset_); + Handle<FixedArray> result; + if (element->IsJSValue()) { Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element); - return UnwrapSharedFunctionInfoFromJSValue(value_wrapper); + Handle<Object> raw_result = UnwrapJSValue(value_wrapper); + Handle<SharedFunctionInfo> shared = + Handle<SharedFunctionInfo>::cast(raw_result); + result = Handle<FixedArray>(shared->feedback_vector(), isolate()); + CHECK_EQ(result->length(), GetSlotCount()); + } else { + // Scripts may never have a SharedFunctionInfo created, so + // create a type feedback vector here. + int slot_count = GetSlotCount(); + result = isolate()->factory()->NewTypeFeedbackVector(slot_count); } + return result; +} - private: - static const int kFunctionNameOffset_ = 0; - static const int kStartPositionOffset_ = 1; - static const int kEndPositionOffset_ = 2; - static const int kSharedInfoOffset_ = 3; - static const int kSize_ = 4; - friend class JSArrayBasedStruct<SharedInfoWrapper>; -}; +Handle<Object> FunctionInfoWrapper::GetCodeScopeInfo() { + Handle<Object> element = this->GetField(kCodeScopeInfoOffset_); + return UnwrapJSValue(Handle<JSValue>::cast(element)); +} + + +void SharedInfoWrapper::SetProperties(Handle<String> name, + int start_position, + int end_position, + Handle<SharedFunctionInfo> info) { + HandleScope scope(isolate()); + this->SetField(kFunctionNameOffset_, name); + Handle<JSValue> info_holder = WrapInJSValue(info); + this->SetField(kSharedInfoOffset_, info_holder); + this->SetSmiValueField(kStartPositionOffset_, start_position); + this->SetSmiValueField(kEndPositionOffset_, end_position); +} + + +Handle<SharedFunctionInfo> SharedInfoWrapper::GetInfo() { + Handle<Object> element = this->GetField(kSharedInfoOffset_); + Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element); + return UnwrapSharedFunctionInfoFromJSValue(value_wrapper); +} class FunctionInfoListener { @@ -813,6 +709,7 @@ info.SetInitialProperties(fun->name(), fun->start_position(), fun->end_position(), fun->parameter_count(), fun->materialized_literal_count(), + fun->slot_count(), current_parent_index_); current_parent_index_ = len_; SetElementSloppy(result_, len_, info.GetJSArray()); @@ -823,8 +720,8 @@ HandleScope scope(isolate()); FunctionInfoWrapper info = FunctionInfoWrapper::cast( - *Object::GetElementNoExceptionThrown( - isolate(), result_, current_parent_index_)); + *Object::GetElement( + isolate(), result_, current_parent_index_).ToHandleChecked()); current_parent_index_ = info.GetParentIndex(); } @@ -833,8 +730,8 @@ void FunctionCode(Handle<Code> function_code) { FunctionInfoWrapper info = FunctionInfoWrapper::cast( - *Object::GetElementNoExceptionThrown( - isolate(), result_, current_parent_index_)); + *Object::GetElement( + isolate(), result_, current_parent_index_).ToHandleChecked()); info.SetFunctionCode(function_code, Handle<HeapObject>(isolate()->heap()->null_value())); } @@ -848,14 +745,13 @@ } FunctionInfoWrapper info = FunctionInfoWrapper::cast( - *Object::GetElementNoExceptionThrown( - isolate(), result_, current_parent_index_)); + *Object::GetElement( + isolate(), result_, current_parent_index_).ToHandleChecked()); info.SetFunctionCode(Handle<Code>(shared->code()), Handle<HeapObject>(shared->scope_info())); info.SetSharedFunctionInfo(shared); - Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone), - isolate()); + Handle<Object> scope_info_list = SerializeFunctionScope(scope, zone); info.SetFunctionScopeInfo(scope_info_list); } @@ -864,9 +760,7 @@ private: Isolate* isolate() const { return result_->GetIsolate(); } - Object* SerializeFunctionScope(Scope* scope, Zone* zone) { - HandleScope handle_scope(isolate()); - + Handle<Object> SerializeFunctionScope(Scope* scope, Zone* zone) { Handle<JSArray> scope_info_list = isolate()->factory()->NewJSArray(10); int scope_info_length = 0; @@ -875,6 +769,7 @@ // scopes of this chain. Scope* current_scope = scope; while (current_scope != NULL) { + HandleScope handle_scope(isolate()); ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone); ZoneList<Variable*> context_list( current_scope->ContextLocalCount(), zone); @@ -901,7 +796,7 @@ current_scope = current_scope->outer_scope(); } - return *scope_info_list; + return scope_info_list; } Handle<JSArray> result_; @@ -910,8 +805,43 @@ }; -JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script, - Handle<String> source) { +void LiveEdit::InitializeThreadLocal(Debug* debug) { + debug->thread_local_.frame_drop_mode_ = LiveEdit::FRAMES_UNTOUCHED; +} + + +bool LiveEdit::SetAfterBreakTarget(Debug* debug) { + Code* code = NULL; + Isolate* isolate = debug->isolate_; + switch (debug->thread_local_.frame_drop_mode_) { + case FRAMES_UNTOUCHED: + return false; + case FRAME_DROPPED_IN_IC_CALL: + // We must have been calling IC stub. Do not go there anymore. + code = isolate->builtins()->builtin(Builtins::kPlainReturn_LiveEdit); + break; + case FRAME_DROPPED_IN_DEBUG_SLOT_CALL: + // Debug break slot stub does not return normally, instead it manually + // cleans the stack and jumps. We should patch the jump address. + code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit); + break; + case FRAME_DROPPED_IN_DIRECT_CALL: + // Nothing to do, after_break_target is not used here. + return true; + case FRAME_DROPPED_IN_RETURN_CALL: + code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit); + break; + case CURRENTLY_SET_MODE: + UNREACHABLE(); + break; + } + debug->after_break_target_ = code->entry(); + return true; +} + + +MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script, + Handle<String> source) { Isolate* isolate = script->GetIsolate(); FunctionInfoListener listener(isolate); @@ -933,8 +863,7 @@ // A logical 'catch' section. Handle<JSObject> rethrow_exception; if (isolate->has_pending_exception()) { - Handle<Object> exception(isolate->pending_exception()->ToObjectChecked(), - isolate); + Handle<Object> exception(isolate->pending_exception(), isolate); MessageLocation message_location = isolate->GetMessageLocation(); isolate->clear_pending_message(); @@ -954,13 +883,14 @@ Handle<Smi> start_pos( Smi::FromInt(message_location.start_pos()), isolate); Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate); - Handle<JSValue> script_obj = GetScriptWrapper(message_location.script()); - JSReceiver::SetProperty( - rethrow_exception, start_pos_key, start_pos, NONE, SLOPPY); - JSReceiver::SetProperty( - rethrow_exception, end_pos_key, end_pos, NONE, SLOPPY); - JSReceiver::SetProperty( - rethrow_exception, script_obj_key, script_obj, NONE, SLOPPY); + Handle<JSObject> script_obj = + Script::GetWrapper(message_location.script()); + Object::SetProperty(rethrow_exception, start_pos_key, start_pos, SLOPPY) + .Assert(); + Object::SetProperty(rethrow_exception, end_pos_key, end_pos, SLOPPY) + .Assert(); + Object::SetProperty(rethrow_exception, script_obj_key, script_obj, SLOPPY) + .Assert(); } } @@ -969,10 +899,9 @@ script->set_source(*original_source); if (rethrow_exception.is_null()) { - return *(listener.GetResult()); + return listener.GetResult(); } else { - isolate->Throw(*rethrow_exception); - return 0; + return isolate->Throw<JSArray>(rethrow_exception); } } @@ -984,7 +913,7 @@ for (int i = 0; i < len; i++) { Handle<SharedFunctionInfo> info( SharedFunctionInfo::cast( - *Object::GetElementNoExceptionThrown(isolate, array, i))); + *Object::GetElement(isolate, array, i).ToHandleChecked())); SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate); Handle<String> name_handle(String::cast(info->name())); info_wrapper.SetProperties(name_handle, info->start_position(), @@ -1045,12 +974,9 @@ // to code objects (that are never in new space) without worrying about // write barriers. Heap* heap = original->GetHeap(); - heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, - "liveedit.cc ReplaceCodeObject"); - - ASSERT(!heap->InNewSpace(*substitution)); + HeapIterator iterator(heap); - DisallowHeapAllocation no_allocation; + DCHECK(!heap->InNewSpace(*substitution)); ReplacingVisitor visitor(*original, *substitution); @@ -1061,7 +987,6 @@ // Now iterate over all pointers of all objects, including code_target // implicit pointers. - HeapIterator iterator(heap); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { obj->Iterate(&visitor); } @@ -1088,7 +1013,7 @@ // If literal count didn't change, simply go over all functions // and clear literal arrays. ClearValuesVisitor visitor; - IterateJSFunctions(*shared_info, &visitor); + IterateJSFunctions(shared_info, &visitor); } else { // When literal count changes, we have to create new array instances. // Since we cannot create instances when iterating heap, we should first @@ -1123,16 +1048,14 @@ // Iterates all function instances in the HEAP that refers to the // provided shared_info. template<typename Visitor> - static void IterateJSFunctions(SharedFunctionInfo* shared_info, + static void IterateJSFunctions(Handle<SharedFunctionInfo> shared_info, Visitor* visitor) { - DisallowHeapAllocation no_allocation; - HeapIterator iterator(shared_info->GetHeap()); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (obj->IsJSFunction()) { JSFunction* function = JSFunction::cast(obj); - if (function->shared() == shared_info) { + if (function->shared() == *shared_info) { visitor->visit(function); } } @@ -1145,13 +1068,13 @@ Handle<SharedFunctionInfo> shared_info, Isolate* isolate) { CountVisitor count_visitor; count_visitor.count = 0; - IterateJSFunctions(*shared_info, &count_visitor); + IterateJSFunctions(shared_info, &count_visitor); int size = count_visitor.count; Handle<FixedArray> result = isolate->factory()->NewFixedArray(size); if (size > 0) { CollectVisitor collect_visitor(result); - IterateJSFunctions(*shared_info, &collect_visitor); + IterateJSFunctions(shared_info, &collect_visitor); } return result; } @@ -1237,7 +1160,7 @@ virtual void LeaveContext(Context* context) { } // Don't care. virtual void VisitFunction(JSFunction* function) { // It should be guaranteed by the iterator that everything is optimized. - ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION); + DCHECK(function->code()->kind() == Code::OPTIMIZED_FUNCTION); if (shared_info_ == function->shared() || IsInlined(function, shared_info_)) { // Mark the code for deoptimization. @@ -1261,23 +1184,16 @@ } -MaybeObject* LiveEdit::ReplaceFunctionCode( +void LiveEdit::ReplaceFunctionCode( Handle<JSArray> new_compile_info_array, Handle<JSArray> shared_info_array) { Isolate* isolate = new_compile_info_array->GetIsolate(); - HandleScope scope(isolate); - - if (!SharedInfoWrapper::IsInstance(shared_info_array)) { - return isolate->ThrowIllegalOperation(); - } FunctionInfoWrapper compile_info_wrapper(new_compile_info_array); SharedInfoWrapper shared_info_wrapper(shared_info_array); Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo(); - isolate->heap()->EnsureHeapIsIterable(); - if (IsJSFunctionCode(shared_info->code())) { Handle<Code> code = compile_info_wrapper.GetFunctionCode(); ReplaceCodeObject(Handle<Code>(shared_info->code()), code); @@ -1286,6 +1202,10 @@ shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info)); } shared_info->DisableOptimization(kLiveEdit); + // Update the type feedback vector + Handle<FixedArray> feedback_vector = + compile_info_wrapper.GetFeedbackVector(); + shared_info->set_feedback_vector(*feedback_vector); } if (shared_info->debug_info()->IsDebugInfo()) { @@ -1307,27 +1227,15 @@ DeoptimizeDependentFunctions(*shared_info); isolate->compilation_cache()->Remove(shared_info); - - return isolate->heap()->undefined_value(); } -MaybeObject* LiveEdit::FunctionSourceUpdated( - Handle<JSArray> shared_info_array) { - Isolate* isolate = shared_info_array->GetIsolate(); - HandleScope scope(isolate); - - if (!SharedInfoWrapper::IsInstance(shared_info_array)) { - return isolate->ThrowIllegalOperation(); - } - +void LiveEdit::FunctionSourceUpdated(Handle<JSArray> shared_info_array) { SharedInfoWrapper shared_info_wrapper(shared_info_array); Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo(); DeoptimizeDependentFunctions(*shared_info); - isolate->compilation_cache()->Remove(shared_info); - - return isolate->heap()->undefined_value(); + shared_info_array->GetIsolate()->compilation_cache()->Remove(shared_info); } @@ -1359,21 +1267,21 @@ // TODO(635): binary search may be used here for (int i = 0; i < array_len; i += 3) { HandleScope scope(isolate); - Handle<Object> element = Object::GetElementNoExceptionThrown( - isolate, position_change_array, i); + Handle<Object> element = Object::GetElement( + isolate, position_change_array, i).ToHandleChecked(); CHECK(element->IsSmi()); int chunk_start = Handle<Smi>::cast(element)->value(); if (original_position < chunk_start) { break; } - element = Object::GetElementNoExceptionThrown( - isolate, position_change_array, i + 1); + element = Object::GetElement( + isolate, position_change_array, i + 1).ToHandleChecked(); CHECK(element->IsSmi()); int chunk_end = Handle<Smi>::cast(element)->value(); // Position mustn't be inside a chunk. - ASSERT(original_position >= chunk_end); - element = Object::GetElementNoExceptionThrown( - isolate, position_change_array, i + 2); + DCHECK(original_position >= chunk_end); + element = Object::GetElement( + isolate, position_change_array, i + 2).ToHandleChecked(); CHECK(element->IsSmi()); int chunk_changed_end = Handle<Smi>::cast(element)->value(); position_diff = chunk_changed_end - chunk_end; @@ -1438,8 +1346,8 @@ // Copy the data. int curently_used_size = static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos()); - OS::MemMove(new_buffer + new_buffer_size - curently_used_size, - reloc_info_writer_.pos(), curently_used_size); + MemMove(new_buffer + new_buffer_size - curently_used_size, + reloc_info_writer_.pos(), curently_used_size); reloc_info_writer_.Reposition( new_buffer + new_buffer_size - curently_used_size, @@ -1492,7 +1400,7 @@ if (buffer.length() == code->relocation_size()) { // Simply patch relocation area of code. - OS::MemCopy(code->relocation_start(), buffer.start(), buffer.length()); + MemCopy(code->relocation_start(), buffer.start(), buffer.length()); return code; } else { // Relocation info section now has different size. We cannot simply @@ -1504,12 +1412,8 @@ } -MaybeObject* LiveEdit::PatchFunctionPositions( - Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) { - if (!SharedInfoWrapper::IsInstance(shared_info_array)) { - return shared_info_array->GetIsolate()->ThrowIllegalOperation(); - } - +void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array, + Handle<JSArray> position_change_array) { SharedInfoWrapper shared_info_wrapper(shared_info_array); Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo(); @@ -1525,8 +1429,6 @@ info->set_end_position(new_function_end); info->set_function_token_position(new_function_token_pos); - info->GetIsolate()->heap()->EnsureHeapIsIterable(); - if (IsJSFunctionCode(info->code())) { // Patch relocation info section of the code. Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()), @@ -1540,8 +1442,6 @@ ReplaceCodeObject(Handle<Code>(info->code()), patched_code); } } - - return info->GetIsolate()->heap()->undefined_value(); } @@ -1568,17 +1468,16 @@ } -Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script, - Handle<String> new_source, - Handle<Object> old_script_name) { +Handle<Object> LiveEdit::ChangeScriptSource(Handle<Script> original_script, + Handle<String> new_source, + Handle<Object> old_script_name) { Isolate* isolate = original_script->GetIsolate(); Handle<Object> old_script_object; if (old_script_name->IsString()) { Handle<Script> old_script = CreateScriptCopy(original_script); old_script->set_name(String::cast(*old_script_name)); old_script_object = old_script; - isolate->debugger()->OnAfterCompile( - old_script, Debugger::SEND_WHEN_DEBUGGING); + isolate->debug()->OnAfterCompile(old_script); } else { old_script_object = isolate->factory()->null_value(); } @@ -1588,7 +1487,7 @@ // Drop line ends so that they will be recalculated. original_script->set_line_ends(isolate->heap()->undefined_value()); - return *old_script_object; + return old_script_object; } @@ -1630,7 +1529,7 @@ for (int i = 0; i < len; i++) { HandleScope scope(isolate); Handle<Object> element = - Object::GetElementNoExceptionThrown(isolate, shared_info_array, i); + Object::GetElement(isolate, shared_info_array, i).ToHandleChecked(); Handle<JSValue> jsvalue = Handle<JSValue>::cast(element); Handle<SharedFunctionInfo> shared = UnwrapSharedFunctionInfoFromJSValue(jsvalue); @@ -1665,6 +1564,38 @@ } +// Initializes an artificial stack frame. The data it contains is used for: +// a. successful work of frame dropper code which eventually gets control, +// b. being compatible with regular stack structure for various stack +// iterators. +// Returns address of stack allocated pointer to restarted function, +// the value that is called 'restarter_frame_function_pointer'. The value +// at this address (possibly updated by GC) may be used later when preparing +// 'step in' operation. +// Frame structure (conforms InternalFrame structure): +// -- code +// -- SMI maker +// -- function (slot is called "context") +// -- frame base +static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame, + Handle<Code> code) { + DCHECK(bottom_js_frame->is_java_script()); + + Address fp = bottom_js_frame->fp(); + + // Move function pointer into "context" slot. + Memory::Object_at(fp + StandardFrameConstants::kContextOffset) = + Memory::Object_at(fp + JavaScriptFrameConstants::kFunctionOffset); + + Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code; + Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) = + Smi::FromInt(StackFrame::INTERNAL); + + return reinterpret_cast<Object**>(&Memory::Object_at( + fp + StandardFrameConstants::kContextOffset)); +} + + // Removes specified range of frames from stack. There may be 1 or more // frames in range. Anyway the bottom frame is restarted rather than dropped, // and therefore has to be a JavaScript frame. @@ -1672,9 +1603,9 @@ static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index, int bottom_js_frame_index, - Debug::FrameDropMode* mode, + LiveEdit::FrameDropMode* mode, Object*** restarter_frame_function_pointer) { - if (!Debug::kFrameDropperSupported) { + if (!LiveEdit::kFrameDropperSupported) { return "Stack manipulations are not supported in this architecture."; } @@ -1682,39 +1613,35 @@ StackFrame* top_frame = frames[top_frame_index]; StackFrame* bottom_js_frame = frames[bottom_js_frame_index]; - ASSERT(bottom_js_frame->is_java_script()); + DCHECK(bottom_js_frame->is_java_script()); // Check the nature of the top frame. Isolate* isolate = bottom_js_frame->isolate(); Code* pre_top_frame_code = pre_top_frame->LookupCode(); - bool frame_has_padding; + bool frame_has_padding = true; if (pre_top_frame_code->is_inline_cache_stub() && pre_top_frame_code->is_debug_stub()) { // OK, we can drop inline cache calls. - *mode = Debug::FRAME_DROPPED_IN_IC_CALL; - frame_has_padding = Debug::FramePaddingLayout::kIsSupported; + *mode = LiveEdit::FRAME_DROPPED_IN_IC_CALL; } else if (pre_top_frame_code == - isolate->debug()->debug_break_slot()) { + isolate->builtins()->builtin(Builtins::kSlot_DebugBreak)) { // OK, we can drop debug break slot. - *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL; - frame_has_padding = Debug::FramePaddingLayout::kIsSupported; + *mode = LiveEdit::FRAME_DROPPED_IN_DEBUG_SLOT_CALL; } else if (pre_top_frame_code == - isolate->builtins()->builtin( - Builtins::kFrameDropper_LiveEdit)) { + isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit)) { // OK, we can drop our own code. pre_top_frame = frames[top_frame_index - 2]; top_frame = frames[top_frame_index - 1]; - *mode = Debug::CURRENTLY_SET_MODE; + *mode = LiveEdit::CURRENTLY_SET_MODE; frame_has_padding = false; } else if (pre_top_frame_code == - isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) { - *mode = Debug::FRAME_DROPPED_IN_RETURN_CALL; - frame_has_padding = Debug::FramePaddingLayout::kIsSupported; + isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) { + *mode = LiveEdit::FRAME_DROPPED_IN_RETURN_CALL; } else if (pre_top_frame_code->kind() == Code::STUB && - pre_top_frame_code->major_key() == CodeStub::CEntry) { + CodeStub::GetMajorKey(pre_top_frame_code) == CodeStub::CEntry) { // Entry from our unit tests on 'debugger' statement. // It's fine, we support this case. - *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL; + *mode = LiveEdit::FRAME_DROPPED_IN_DIRECT_CALL; // We don't have a padding from 'debugger' statement call. // Here the stub is CEntry, it's not debug-only and can't be padded. // If anyone would complain, a proxy padded stub could be added. @@ -1722,25 +1649,25 @@ } else if (pre_top_frame->type() == StackFrame::ARGUMENTS_ADAPTOR) { // This must be adaptor that remain from the frame dropping that // is still on stack. A frame dropper frame must be above it. - ASSERT(frames[top_frame_index - 2]->LookupCode() == - isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit)); + DCHECK(frames[top_frame_index - 2]->LookupCode() == + isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit)); pre_top_frame = frames[top_frame_index - 3]; top_frame = frames[top_frame_index - 2]; - *mode = Debug::CURRENTLY_SET_MODE; + *mode = LiveEdit::CURRENTLY_SET_MODE; frame_has_padding = false; } else { return "Unknown structure of stack above changing function"; } Address unused_stack_top = top_frame->sp(); + int new_frame_size = LiveEdit::kFrameDropperFrameSize * kPointerSize; Address unused_stack_bottom = bottom_js_frame->fp() - - Debug::kFrameDropperFrameSize * kPointerSize // Size of the new frame. - + kPointerSize; // Bigger address end is exclusive. + - new_frame_size + kPointerSize; // Bigger address end is exclusive. Address* top_frame_pc_address = top_frame->pc_address(); // top_frame may be damaged below this point. Do not used it. - ASSERT(!(top_frame = NULL)); + DCHECK(!(top_frame = NULL)); if (unused_stack_top > unused_stack_bottom) { if (frame_has_padding) { @@ -1748,11 +1675,10 @@ static_cast<int>(unused_stack_top - unused_stack_bottom); Address padding_start = pre_top_frame->fp() - - Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize; + LiveEdit::kFrameDropperFrameSize * kPointerSize; Address padding_pointer = padding_start; - Smi* padding_object = - Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue); + Smi* padding_object = Smi::FromInt(LiveEdit::kFramePaddingValue); while (Memory::Object_at(padding_pointer) == padding_object) { padding_pointer -= kPointerSize; } @@ -1767,9 +1693,9 @@ StackFrame* pre_pre_frame = frames[top_frame_index - 2]; - OS::MemMove(padding_start + kPointerSize - shortage_bytes, - padding_start + kPointerSize, - Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize); + MemMove(padding_start + kPointerSize - shortage_bytes, + padding_start + kPointerSize, + LiveEdit::kFrameDropperFrameSize * kPointerSize); pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes); pre_pre_frame->SetCallerFp(pre_top_frame->fp()); @@ -1786,16 +1712,16 @@ FixTryCatchHandler(pre_top_frame, bottom_js_frame); // Make sure FixTryCatchHandler is idempotent. - ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame)); + DCHECK(!FixTryCatchHandler(pre_top_frame, bottom_js_frame)); Handle<Code> code = isolate->builtins()->FrameDropper_LiveEdit(); *top_frame_pc_address = code->entry(); pre_top_frame->SetCallerFp(bottom_js_frame->fp()); *restarter_frame_function_pointer = - Debug::SetUpFrameDropperFrame(bottom_js_frame, code); + SetUpFrameDropperFrame(bottom_js_frame, code); - ASSERT((**restarter_frame_function_pointer)->IsJSFunction()); + DCHECK((**restarter_frame_function_pointer)->IsJSFunction()); for (Address a = unused_stack_top; a < unused_stack_bottom; @@ -1807,11 +1733,6 @@ } -static bool IsDropableFrame(StackFrame* frame) { - return !frame->is_exit(); -} - - // Describes a set of call frames that execute any of listed functions. // Finding no such frames does not mean error. class MultipleFunctionTarget { @@ -1824,7 +1745,7 @@ LiveEdit::FunctionPatchabilityStatus status) { return CheckActivation(m_shared_info_array, m_result, frame, status); } - const char* GetNotFoundMessage() { + const char* GetNotFoundMessage() const { return NULL; } private: @@ -1836,7 +1757,9 @@ // Drops all call frame matched by target and all frames above them. template<typename TARGET> static const char* DropActivationsInActiveThreadImpl( - Isolate* isolate, TARGET& target, bool do_drop) { + Isolate* isolate, + TARGET& target, // NOLINT + bool do_drop) { Debug* debug = isolate->debug(); Zone zone(isolate); Vector<StackFrame*> frames = CreateStackMap(isolate, &zone); @@ -1865,12 +1788,20 @@ bool target_frame_found = false; int bottom_js_frame_index = top_frame_index; - bool c_code_found = false; + bool non_droppable_frame_found = false; + LiveEdit::FunctionPatchabilityStatus non_droppable_reason; for (; frame_index < frames.length(); frame_index++) { StackFrame* frame = frames[frame_index]; - if (!IsDropableFrame(frame)) { - c_code_found = true; + if (frame->is_exit()) { + non_droppable_frame_found = true; + non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE; + break; + } + if (frame->is_java_script() && + JavaScriptFrame::cast(frame)->function()->shared()->is_generator()) { + non_droppable_frame_found = true; + non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR; break; } if (target.MatchActivation( @@ -1880,15 +1811,15 @@ } } - if (c_code_found) { - // There is a C frames on stack. Check that there are no target frames - // below them. + if (non_droppable_frame_found) { + // There is a C or generator frame on stack. We can't drop C frames, and we + // can't restart generators. Check that there are no target frames below + // them. for (; frame_index < frames.length(); frame_index++) { StackFrame* frame = frames[frame_index]; if (frame->is_java_script()) { - if (target.MatchActivation( - frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) { - // Cannot drop frame under C frames. + if (target.MatchActivation(frame, non_droppable_reason)) { + // Fail. return NULL; } } @@ -1905,7 +1836,7 @@ return target.GetNotFoundMessage(); } - Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED; + LiveEdit::FrameDropMode drop_mode = LiveEdit::FRAMES_UNTOUCHED; Object** restarter_frame_function_pointer = NULL; const char* error_message = DropFrames(frames, top_frame_index, bottom_js_frame_index, &drop_mode, @@ -1923,8 +1854,8 @@ break; } } - debug->FramesHaveBeenDropped(new_id, drop_mode, - restarter_frame_function_pointer); + debug->FramesHaveBeenDropped( + new_id, drop_mode, restarter_frame_function_pointer); return NULL; } @@ -1947,7 +1878,7 @@ // Replace "blocked on active" with "replaced on active" status. for (int i = 0; i < array_len; i++) { Handle<Object> obj = - Object::GetElementNoExceptionThrown(isolate, result, i); + Object::GetElement(isolate, result, i).ToHandleChecked(); if (*obj == Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) { Handle<Object> replaced( Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate); @@ -1958,6 +1889,44 @@ } +bool LiveEdit::FindActiveGenerators(Handle<FixedArray> shared_info_array, + Handle<FixedArray> result, + int len) { + Isolate* isolate = shared_info_array->GetIsolate(); + bool found_suspended_activations = false; + + DCHECK_LE(len, result->length()); + + FunctionPatchabilityStatus active = FUNCTION_BLOCKED_ACTIVE_GENERATOR; + + Heap* heap = isolate->heap(); + HeapIterator iterator(heap); + HeapObject* obj = NULL; + while ((obj = iterator.next()) != NULL) { + if (!obj->IsJSGeneratorObject()) continue; + + JSGeneratorObject* gen = JSGeneratorObject::cast(obj); + if (gen->is_closed()) continue; + + HandleScope scope(isolate); + + for (int i = 0; i < len; i++) { + Handle<JSValue> jsvalue = + Handle<JSValue>::cast(FixedArray::get(shared_info_array, i)); + Handle<SharedFunctionInfo> shared = + UnwrapSharedFunctionInfoFromJSValue(jsvalue); + + if (gen->function()->shared() == *shared) { + result->set(i, Smi::FromInt(active)); + found_suspended_activations = true; + } + } + } + + return found_suspended_activations; +} + + class InactiveThreadActivationsChecker : public ThreadVisitor { public: InactiveThreadActivationsChecker(Handle<JSArray> shared_info_array, @@ -1988,18 +1957,29 @@ Isolate* isolate = shared_info_array->GetIsolate(); int len = GetArrayLength(shared_info_array); + DCHECK(shared_info_array->HasFastElements()); + Handle<FixedArray> shared_info_array_elements( + FixedArray::cast(shared_info_array->elements())); + Handle<JSArray> result = isolate->factory()->NewJSArray(len); + Handle<FixedArray> result_elements = + JSObject::EnsureWritableFastElements(result); // Fill the default values. for (int i = 0; i < len; i++) { - SetElementSloppy( - result, - i, - Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH), isolate)); + FunctionPatchabilityStatus status = FUNCTION_AVAILABLE_FOR_PATCH; + result_elements->set(i, Smi::FromInt(status)); } + // Scan the heap for active generators -- those that are either currently + // running (as we wouldn't want to restart them, because we don't know where + // to restart them from) or suspended. Fail if any one corresponds to the set + // of functions being edited. + if (FindActiveGenerators(shared_info_array_elements, result_elements, len)) { + return result; + } - // First check inactive threads. Fail if some functions are blocked there. + // Check inactive threads. Fail if some functions are blocked there. InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array, result); isolate->thread_manager()->IterateArchivedThreads( @@ -2013,8 +1993,8 @@ DropActivationsInActiveThread(shared_info_array, result, do_drop); if (error_message != NULL) { // Add error message as an array extra element. - Handle<String> str = isolate->factory()->NewStringFromAscii( - CStrVector(error_message)); + Handle<String> str = + isolate->factory()->NewStringFromAsciiChecked(error_message); SetElementSloppy(result, len, str); } return result; @@ -2037,7 +2017,7 @@ } return false; } - const char* GetNotFoundMessage() { + const char* GetNotFoundMessage() const { return "Failed to found requested frame"; } LiveEdit::FunctionPatchabilityStatus saved_status() { @@ -2062,6 +2042,9 @@ if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE) { return "Function is blocked under native code"; } + if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR) { + return "Function is blocked under a generator activation"; + } return NULL; } @@ -2101,36 +2084,4 @@ return isolate->active_function_info_listener() != NULL; } - -#else // ENABLE_DEBUGGER_SUPPORT - -// This ifdef-else-endif section provides working or stub implementation of -// LiveEditFunctionTracker. -LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate, - FunctionLiteral* fun) { -} - - -LiveEditFunctionTracker::~LiveEditFunctionTracker() { -} - - -void LiveEditFunctionTracker::RecordFunctionInfo( - Handle<SharedFunctionInfo> info, FunctionLiteral* lit, - Zone* zone) { -} - - -void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) { -} - - -bool LiveEditFunctionTracker::IsActive(Isolate* isolate) { - return false; -} - -#endif // ENABLE_DEBUGGER_SUPPORT - - - } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/liveedit-debugger.js nodejs-0.11.15/deps/v8/src/liveedit-debugger.js --- nodejs-0.11.13/deps/v8/src/liveedit-debugger.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/liveedit-debugger.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // LiveEdit feature implementation. The script should be executed after // debug-debugger.js. @@ -969,7 +946,9 @@ BLOCKED_ON_ACTIVE_STACK: 2, BLOCKED_ON_OTHER_STACK: 3, BLOCKED_UNDER_NATIVE_CODE: 4, - REPLACED_ON_ACTIVE_STACK: 5 + REPLACED_ON_ACTIVE_STACK: 5, + BLOCKED_UNDER_GENERATOR: 6, + BLOCKED_ACTIVE_GENERATOR: 7 }; FunctionPatchabilityStatus.SymbolName = function(code) { diff -Nru nodejs-0.11.13/deps/v8/src/liveedit.h nodejs-0.11.15/deps/v8/src/liveedit.h --- nodejs-0.11.13/deps/v8/src/liveedit.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/liveedit.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LIVEEDIT_H_ #define V8_LIVEEDIT_H_ @@ -49,8 +26,8 @@ // instantiate newly compiled functions. -#include "allocation.h" -#include "compiler.h" +#include "src/allocation.h" +#include "src/compiler.h" namespace v8 { namespace internal { @@ -75,39 +52,56 @@ static bool IsActive(Isolate* isolate); private: -#ifdef ENABLE_DEBUGGER_SUPPORT Isolate* isolate_; -#endif }; -#ifdef ENABLE_DEBUGGER_SUPPORT class LiveEdit : AllStatic { public: - static JSArray* GatherCompileInfo(Handle<Script> script, - Handle<String> source); + // Describes how exactly a frame has been dropped from stack. + enum FrameDropMode { + // No frame has been dropped. + FRAMES_UNTOUCHED, + // The top JS frame had been calling IC stub. IC stub mustn't be called now. + FRAME_DROPPED_IN_IC_CALL, + // The top JS frame had been calling debug break slot stub. Patch the + // address this stub jumps to in the end. + FRAME_DROPPED_IN_DEBUG_SLOT_CALL, + // The top JS frame had been calling some C++ function. The return address + // gets patched automatically. + FRAME_DROPPED_IN_DIRECT_CALL, + FRAME_DROPPED_IN_RETURN_CALL, + CURRENTLY_SET_MODE + }; + + static void InitializeThreadLocal(Debug* debug); + + static bool SetAfterBreakTarget(Debug* debug); + + MUST_USE_RESULT static MaybeHandle<JSArray> GatherCompileInfo( + Handle<Script> script, + Handle<String> source); static void WrapSharedFunctionInfos(Handle<JSArray> array); - MUST_USE_RESULT static MaybeObject* ReplaceFunctionCode( - Handle<JSArray> new_compile_info_array, - Handle<JSArray> shared_info_array); + static void ReplaceFunctionCode(Handle<JSArray> new_compile_info_array, + Handle<JSArray> shared_info_array); - static MaybeObject* FunctionSourceUpdated(Handle<JSArray> shared_info_array); + static void FunctionSourceUpdated(Handle<JSArray> shared_info_array); // Updates script field in FunctionSharedInfo. static void SetFunctionScript(Handle<JSValue> function_wrapper, Handle<Object> script_handle); - MUST_USE_RESULT static MaybeObject* PatchFunctionPositions( - Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array); + static void PatchFunctionPositions(Handle<JSArray> shared_info_array, + Handle<JSArray> position_change_array); // For a script updates its source field. If old_script_name is provided // (i.e. is a String), also creates a copy of the script with its original // source and sends notification to debugger. - static Object* ChangeScriptSource(Handle<Script> original_script, - Handle<String> new_source, - Handle<Object> old_script_name); + static Handle<Object> ChangeScriptSource(Handle<Script> original_script, + Handle<String> new_source, + Handle<Object> old_script_name); // In a code of a parent function replaces original function as embedded // object with a substitution one. @@ -115,6 +109,11 @@ Handle<JSValue> orig_function_shared, Handle<JSValue> subst_function_shared); + // Find open generator activations, and set corresponding "result" elements to + // FUNCTION_BLOCKED_ACTIVE_GENERATOR. + static bool FindActiveGenerators(Handle<FixedArray> shared_info_array, + Handle<FixedArray> result, int len); + // Checks listed functions on stack and return array with corresponding // FunctionPatchabilityStatus statuses; extra array element may // contain general error message. Modifies the current stack and @@ -133,7 +132,9 @@ FUNCTION_BLOCKED_ON_ACTIVE_STACK = 2, FUNCTION_BLOCKED_ON_OTHER_STACK = 3, FUNCTION_BLOCKED_UNDER_NATIVE_CODE = 4, - FUNCTION_REPLACED_ON_ACTIVE_STACK = 5 + FUNCTION_REPLACED_ON_ACTIVE_STACK = 5, + FUNCTION_BLOCKED_UNDER_GENERATOR = 6, + FUNCTION_BLOCKED_ACTIVE_GENERATOR = 7 }; // Compares 2 strings line-by-line, then token-wise and returns diff in form @@ -141,6 +142,46 @@ // of diff chunks. static Handle<JSArray> CompareStrings(Handle<String> s1, Handle<String> s2); + + // Architecture-specific constant. + static const bool kFrameDropperSupported; + + /** + * Defines layout of a stack frame that supports padding. This is a regular + * internal frame that has a flexible stack structure. LiveEdit can shift + * its lower part up the stack, taking up the 'padding' space when additional + * stack memory is required. + * Such frame is expected immediately above the topmost JavaScript frame. + * + * Stack Layout: + * --- Top + * LiveEdit routine frames + * --- + * C frames of debug handler + * --- + * ... + * --- + * An internal frame that has n padding words: + * - any number of words as needed by code -- upper part of frame + * - padding size: a Smi storing n -- current size of padding + * - padding: n words filled with kPaddingValue in form of Smi + * - 3 context/type words of a regular InternalFrame + * - fp + * --- + * Topmost JavaScript frame + * --- + * ... + * --- Bottom + */ + // A size of frame base including fp. Padding words starts right above + // the base. + static const int kFrameDropperFrameSize = 4; + // A number of words that should be reserved on stack for the LiveEdit use. + // Stored on stack in form of Smi. + static const int kFramePaddingInitialSize = 1; + // A value that padding words are filled with (in form of Smi). Going + // bottom-top, the first word not having this value is a counter word. + static const int kFramePaddingValue = kFramePaddingInitialSize + 1; }; @@ -175,9 +216,165 @@ Output* result_writer); }; -#endif // ENABLE_DEBUGGER_SUPPORT +// Simple helper class that creates more or less typed structures over +// JSArray object. This is an adhoc method of passing structures from C++ +// to JavaScript. +template<typename S> +class JSArrayBasedStruct { + public: + static S Create(Isolate* isolate) { + Factory* factory = isolate->factory(); + Handle<JSArray> array = factory->NewJSArray(S::kSize_); + return S(array); + } + + static S cast(Object* object) { + JSArray* array = JSArray::cast(object); + Handle<JSArray> array_handle(array); + return S(array_handle); + } + + explicit JSArrayBasedStruct(Handle<JSArray> array) : array_(array) { + } + + Handle<JSArray> GetJSArray() { + return array_; + } + + Isolate* isolate() const { + return array_->GetIsolate(); + } + + protected: + void SetField(int field_position, Handle<Object> value) { + JSObject::SetElement(array_, field_position, value, NONE, SLOPPY).Assert(); + } + + void SetSmiValueField(int field_position, int value) { + SetField(field_position, Handle<Smi>(Smi::FromInt(value), isolate())); + } + + Handle<Object> GetField(int field_position) { + return Object::GetElement( + isolate(), array_, field_position).ToHandleChecked(); + } + + int GetSmiValueField(int field_position) { + Handle<Object> res = GetField(field_position); + return Handle<Smi>::cast(res)->value(); + } + + private: + Handle<JSArray> array_; +}; + + +// Represents some function compilation details. This structure will be used +// from JavaScript. It contains Code object, which is kept wrapped +// into a BlindReference for sanitizing reasons. +class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> { + public: + explicit FunctionInfoWrapper(Handle<JSArray> array) + : JSArrayBasedStruct<FunctionInfoWrapper>(array) { + } + + void SetInitialProperties(Handle<String> name, + int start_position, + int end_position, + int param_num, + int literal_count, + int slot_count, + int parent_index); + + void SetFunctionCode(Handle<Code> function_code, + Handle<HeapObject> code_scope_info); + + void SetFunctionScopeInfo(Handle<Object> scope_info_array) { + this->SetField(kFunctionScopeInfoOffset_, scope_info_array); + } + + void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info); + + int GetLiteralCount() { + return this->GetSmiValueField(kLiteralNumOffset_); + } + + int GetParentIndex() { + return this->GetSmiValueField(kParentIndexOffset_); + } + + Handle<Code> GetFunctionCode(); + + Handle<FixedArray> GetFeedbackVector(); + + Handle<Object> GetCodeScopeInfo(); + + int GetStartPosition() { + return this->GetSmiValueField(kStartPositionOffset_); + } + + int GetEndPosition() { return this->GetSmiValueField(kEndPositionOffset_); } + + int GetSlotCount() { + return this->GetSmiValueField(kSlotNumOffset_); + } + + private: + static const int kFunctionNameOffset_ = 0; + static const int kStartPositionOffset_ = 1; + static const int kEndPositionOffset_ = 2; + static const int kParamNumOffset_ = 3; + static const int kCodeOffset_ = 4; + static const int kCodeScopeInfoOffset_ = 5; + static const int kFunctionScopeInfoOffset_ = 6; + static const int kParentIndexOffset_ = 7; + static const int kSharedFunctionInfoOffset_ = 8; + static const int kLiteralNumOffset_ = 9; + static const int kSlotNumOffset_ = 10; + static const int kSize_ = 11; + + friend class JSArrayBasedStruct<FunctionInfoWrapper>; +}; + + +// Wraps SharedFunctionInfo along with some of its fields for passing it +// back to JavaScript. SharedFunctionInfo object itself is additionally +// wrapped into BlindReference for sanitizing reasons. +class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> { + public: + static bool IsInstance(Handle<JSArray> array) { + if (array->length() != Smi::FromInt(kSize_)) return false; + Handle<Object> element( + Object::GetElement(array->GetIsolate(), + array, + kSharedInfoOffset_).ToHandleChecked()); + if (!element->IsJSValue()) return false; + return Handle<JSValue>::cast(element)->value()->IsSharedFunctionInfo(); + } + + explicit SharedInfoWrapper(Handle<JSArray> array) + : JSArrayBasedStruct<SharedInfoWrapper>(array) { + } + + void SetProperties(Handle<String> name, + int start_position, + int end_position, + Handle<SharedFunctionInfo> info); + + Handle<SharedFunctionInfo> GetInfo(); + + private: + static const int kFunctionNameOffset_ = 0; + static const int kStartPositionOffset_ = 1; + static const int kEndPositionOffset_ = 2; + static const int kSharedInfoOffset_ = 3; + static const int kSize_ = 4; + + friend class JSArrayBasedStruct<SharedInfoWrapper>; +}; + } } // namespace v8::internal #endif /* V*_LIVEEDIT_H_ */ diff -Nru nodejs-0.11.13/deps/v8/src/log.cc nodejs-0.11.15/deps/v8/src/log.cc --- nodejs-0.11.13/deps/v8/src/log.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/log.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,47 +1,25 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdarg.h> -#include "v8.h" +#include "src/v8.h" -#include "bootstrapper.h" -#include "code-stubs.h" -#include "cpu-profiler.h" -#include "deoptimizer.h" -#include "global-handles.h" -#include "log.h" -#include "log-utils.h" -#include "macro-assembler.h" -#include "platform.h" -#include "runtime-profiler.h" -#include "serialize.h" -#include "string-stream.h" -#include "vm-state-inl.h" +#include "src/base/platform/platform.h" +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/cpu-profiler.h" +#include "src/deoptimizer.h" +#include "src/global-handles.h" +#include "src/log.h" +#include "src/log-utils.h" +#include "src/macro-assembler.h" +#include "src/perf-jit.h" +#include "src/runtime-profiler.h" +#include "src/serialize.h" +#include "src/string-stream.h" +#include "src/vm-state-inl.h" namespace v8 { namespace internal { @@ -129,7 +107,7 @@ void AppendBytes(const char* bytes, int size) { size = Min(size, kUtf8BufferSize - utf8_pos_); - OS::MemCopy(utf8_buffer_ + utf8_pos_, bytes, size); + MemCopy(utf8_buffer_ + utf8_pos_, bytes, size); utf8_pos_ += size; } @@ -145,7 +123,7 @@ void AppendInt(int n) { Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_); - int size = OS::SNPrintF(buffer, "%d", n); + int size = SNPrintF(buffer, "%d", n); if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) { utf8_pos_ += size; } @@ -154,7 +132,7 @@ void AppendHex(uint32_t n) { Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_); - int size = OS::SNPrintF(buffer, "%x", n); + int size = SNPrintF(buffer, "%x", n); if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) { utf8_pos_ += size; } @@ -253,6 +231,7 @@ virtual ~PerfBasicLogger(); virtual void CodeMoveEvent(Address from, Address to) { } + virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { } virtual void CodeDeleteEvent(Address from) { } private: @@ -281,12 +260,13 @@ // Open the perf JIT dump file. int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding; ScopedVector<char> perf_dump_name(bufferSize); - int size = OS::SNPrintF( + int size = SNPrintF( perf_dump_name, kFilenameFormatString, - OS::GetCurrentProcessId()); + base::OS::GetCurrentProcessId()); CHECK_NE(size, -1); - perf_output_handle_ = OS::FOpen(perf_dump_name.start(), OS::LogFileOpenMode); + perf_output_handle_ = + base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode); CHECK_NE(perf_output_handle_, NULL); setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize); } @@ -302,172 +282,11 @@ SharedFunctionInfo*, const char* name, int length) { - ASSERT(code->instruction_start() == code->address() + Code::kHeaderSize); - - OS::FPrint(perf_output_handle_, "%llx %x %.*s\n", - reinterpret_cast<uint64_t>(code->instruction_start()), - code->instruction_size(), - length, name); -} - - -// Linux perf tool logging support -class PerfJitLogger : public CodeEventLogger { - public: - PerfJitLogger(); - virtual ~PerfJitLogger(); - - virtual void CodeMoveEvent(Address from, Address to) { } - virtual void CodeDeleteEvent(Address from) { } - - private: - virtual void LogRecordedBuffer(Code* code, - SharedFunctionInfo* shared, - const char* name, - int length); - - // Extension added to V8 log file name to get the low-level log name. - static const char kFilenameFormatString[]; - static const int kFilenameBufferPadding; - - // File buffer size of the low-level log. We don't use the default to - // minimize the associated overhead. - static const int kLogBufferSize = 2 * MB; - - void LogWriteBytes(const char* bytes, int size); - void LogWriteHeader(); - - static const uint32_t kJitHeaderMagic = 0x4F74496A; - static const uint32_t kJitHeaderVersion = 0x2; - static const uint32_t kElfMachIA32 = 3; - static const uint32_t kElfMachX64 = 62; - static const uint32_t kElfMachARM = 40; - static const uint32_t kElfMachMIPS = 10; - - struct jitheader { - uint32_t magic; - uint32_t version; - uint32_t total_size; - uint32_t elf_mach; - uint32_t pad1; - uint32_t pid; - uint64_t timestamp; - }; - - enum jit_record_type { - JIT_CODE_LOAD = 0 - // JIT_CODE_UNLOAD = 1, - // JIT_CODE_CLOSE = 2, - // JIT_CODE_DEBUG_INFO = 3, - // JIT_CODE_PAGE_MAP = 4, - // JIT_CODE_MAX = 5 - }; - - struct jr_code_load { - uint32_t id; - uint32_t total_size; - uint64_t timestamp; - uint64_t vma; - uint64_t code_addr; - uint32_t code_size; - uint32_t align; - }; - - uint32_t GetElfMach() { -#if V8_TARGET_ARCH_IA32 - return kElfMachIA32; -#elif V8_TARGET_ARCH_X64 - return kElfMachX64; -#elif V8_TARGET_ARCH_ARM - return kElfMachARM; -#elif V8_TARGET_ARCH_MIPS - return kElfMachMIPS; -#else - UNIMPLEMENTED(); - return 0; -#endif - } - - FILE* perf_output_handle_; -}; - -const char PerfJitLogger::kFilenameFormatString[] = "/tmp/jit-%d.dump"; - -// Extra padding for the PID in the filename -const int PerfJitLogger::kFilenameBufferPadding = 16; - -PerfJitLogger::PerfJitLogger() - : perf_output_handle_(NULL) { - // Open the perf JIT dump file. - int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding; - ScopedVector<char> perf_dump_name(bufferSize); - int size = OS::SNPrintF( - perf_dump_name, - kFilenameFormatString, - OS::GetCurrentProcessId()); - CHECK_NE(size, -1); - perf_output_handle_ = OS::FOpen(perf_dump_name.start(), OS::LogFileOpenMode); - CHECK_NE(perf_output_handle_, NULL); - setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize); - - LogWriteHeader(); -} - + DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize); -PerfJitLogger::~PerfJitLogger() { - fclose(perf_output_handle_); - perf_output_handle_ = NULL; -} - - -void PerfJitLogger::LogRecordedBuffer(Code* code, - SharedFunctionInfo*, - const char* name, - int length) { - ASSERT(code->instruction_start() == code->address() + Code::kHeaderSize); - ASSERT(perf_output_handle_ != NULL); - - const char* code_name = name; - uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start()); - uint32_t code_size = code->instruction_size(); - - static const char string_terminator[] = "\0"; - - jr_code_load code_load; - code_load.id = JIT_CODE_LOAD; - code_load.total_size = sizeof(code_load) + length + 1 + code_size; - code_load.timestamp = - static_cast<uint64_t>(OS::TimeCurrentMillis() * 1000.0); - code_load.vma = 0x0; // Our addresses are absolute. - code_load.code_addr = reinterpret_cast<uint64_t>(code->instruction_start()); - code_load.code_size = code_size; - code_load.align = 0; - - LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load)); - LogWriteBytes(code_name, length); - LogWriteBytes(string_terminator, 1); - LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size); -} - - -void PerfJitLogger::LogWriteBytes(const char* bytes, int size) { - size_t rv = fwrite(bytes, 1, size, perf_output_handle_); - ASSERT(static_cast<size_t>(size) == rv); - USE(rv); -} - - -void PerfJitLogger::LogWriteHeader() { - ASSERT(perf_output_handle_ != NULL); - jitheader header; - header.magic = kJitHeaderMagic; - header.version = kJitHeaderVersion; - header.total_size = sizeof(jitheader); - header.pad1 = 0xdeadbeef; - header.elf_mach = GetElfMach(); - header.pid = OS::GetCurrentProcessId(); - header.timestamp = static_cast<uint64_t>(OS::TimeCurrentMillis() * 1000.0); - LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header)); + base::OS::FPrint(perf_output_handle_, "%llx %x %.*s\n", + reinterpret_cast<uint64_t>(code->instruction_start()), + code->instruction_size(), length, name); } @@ -480,6 +299,7 @@ virtual ~LowLevelLogger(); virtual void CodeMoveEvent(Address from, Address to); + virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { } virtual void CodeDeleteEvent(Address from); virtual void SnapshotPositionEvent(Address addr, int pos); virtual void CodeMovingGCEvent(); @@ -553,9 +373,10 @@ // Open the low-level log file. size_t len = strlen(name); ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLogExt))); - OS::MemCopy(ll_name.start(), name, len); - OS::MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt)); - ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode); + MemCopy(ll_name.start(), name, len); + MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt)); + ll_output_handle_ = + base::OS::FOpen(ll_name.start(), base::OS::LogFileOpenMode); setvbuf(ll_output_handle_, NULL, _IOFBF, kLogBufferSize); LogCodeInfo(); @@ -571,12 +392,18 @@ void LowLevelLogger::LogCodeInfo() { #if V8_TARGET_ARCH_IA32 const char arch[] = "ia32"; -#elif V8_TARGET_ARCH_X64 +#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT const char arch[] = "x64"; +#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT + const char arch[] = "x32"; #elif V8_TARGET_ARCH_ARM const char arch[] = "arm"; #elif V8_TARGET_ARCH_MIPS const char arch[] = "mips"; +#elif V8_TARGET_ARCH_X87 + const char arch[] = "x87"; +#elif V8_TARGET_ARCH_ARM64 + const char arch[] = "arm64"; #else const char arch[] = "unknown"; #endif @@ -591,7 +418,7 @@ CodeCreateStruct event; event.name_size = length; event.code_address = code->instruction_start(); - ASSERT(event.code_address == code->address() + Code::kHeaderSize); + DCHECK(event.code_address == code->address() + Code::kHeaderSize); event.code_size = code->instruction_size(); LogWriteStruct(event); LogWriteBytes(name, length); @@ -626,7 +453,7 @@ void LowLevelLogger::LogWriteBytes(const char* bytes, int size) { size_t rv = fwrite(bytes, 1, size, ll_output_handle_); - ASSERT(static_cast<size_t>(size) == rv); + DCHECK(static_cast<size_t>(size) == rv); USE(rv); } @@ -646,6 +473,7 @@ explicit JitLogger(JitCodeEventHandler code_event_handler); virtual void CodeMoveEvent(Address from, Address to); + virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { } virtual void CodeDeleteEvent(Address from); virtual void AddCodeLinePosInfoEvent( void* jit_handler_data, @@ -680,11 +508,11 @@ event.type = JitCodeEvent::CODE_ADDED; event.code_start = code->instruction_start(); event.code_len = code->instruction_size(); - Handle<Script> script_handle; + Handle<SharedFunctionInfo> shared_function_handle; if (shared && shared->script()->IsScript()) { - script_handle = Handle<Script>(Script::cast(shared->script())); + shared_function_handle = Handle<SharedFunctionInfo>(shared); } - event.script = ToApiHandle<v8::Script>(script_handle); + event.script = ToApiHandle<v8::UnboundScript>(shared_function_handle); event.name.str = name; event.name.len = length; code_event_handler_(&event); @@ -765,7 +593,7 @@ // An independent thread removes data and writes it to the log. // This design minimizes the time spent in the sampler. // -class Profiler: public Thread { +class Profiler: public base::Thread { public: explicit Profiler(Isolate* isolate); void Engage(); @@ -814,7 +642,7 @@ int tail_; // Index to the buffer tail. bool overflow_; // Tell whether a buffer overflow has occurred. // Sempahore used for buffer synchronization. - Semaphore buffer_semaphore_; + base::Semaphore buffer_semaphore_; // Tells whether profiler is engaged, that is, processing thread is stated. bool engaged_; @@ -844,7 +672,7 @@ } void SetProfiler(Profiler* profiler) { - ASSERT(profiler_ == NULL); + DCHECK(profiler_ == NULL); profiler_ = profiler; IncreaseProfilingDepth(); if (!IsActive()) Start(); @@ -865,7 +693,7 @@ // Profiler implementation. // Profiler::Profiler(Isolate* isolate) - : Thread("v8:Profiler"), + : base::Thread(Options("v8:Profiler")), isolate_(isolate), head_(0), tail_(0), @@ -873,15 +701,19 @@ buffer_semaphore_(0), engaged_(false), running_(false), - paused_(false) { -} + paused_(false) {} void Profiler::Engage() { if (engaged_) return; engaged_ = true; - OS::LogSharedLibraryAddresses(isolate_); + std::vector<base::OS::SharedLibraryAddress> addresses = + base::OS::GetSharedLibraryAddresses(); + for (size_t i = 0; i < addresses.size(); ++i) { + LOG(isolate_, SharedLibraryEvent( + addresses[i].library_path, addresses[i].start, addresses[i].end)); + } // Start thread processing the profiler buffer. running_ = true; @@ -951,13 +783,13 @@ void Logger::addCodeEventListener(CodeEventListener* listener) { - ASSERT(!hasCodeEventListener(listener)); + DCHECK(!hasCodeEventListener(listener)); listeners_.Add(listener); } void Logger::removeCodeEventListener(CodeEventListener* listener) { - ASSERT(hasCodeEventListener(listener)); + DCHECK(hasCodeEventListener(listener)); listeners_.RemoveElement(listener); } @@ -970,7 +802,7 @@ void Logger::ProfilerBeginEvent() { if (!log_->IsEnabled()) return; Log::MessageBuilder msg(log_); - msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs); + msg.Append("profiler,\"begin\",%d", kSamplingIntervalMs); msg.WriteToLogFile(); } @@ -983,7 +815,7 @@ void Logger::UncheckedStringEvent(const char* name, const char* value) { if (!log_->IsEnabled()) return; Log::MessageBuilder msg(log_); - msg.Append("%s,\"%s\"\n", name, value); + msg.Append("%s,\"%s\"", name, value); msg.WriteToLogFile(); } @@ -1001,7 +833,7 @@ void Logger::UncheckedIntEvent(const char* name, int value) { if (!log_->IsEnabled()) return; Log::MessageBuilder msg(log_); - msg.Append("%s,%d\n", name, value); + msg.Append("%s,%d", name, value); msg.WriteToLogFile(); } @@ -1009,7 +841,7 @@ void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) { if (!log_->IsEnabled()) return; Log::MessageBuilder msg(log_); - msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value); + msg.Append("%s,%" V8_PTR_PREFIX "d", name, value); msg.WriteToLogFile(); } @@ -1017,7 +849,7 @@ void Logger::HandleEvent(const char* name, Object** location) { if (!log_->IsEnabled() || !FLAG_log_handles) return; Log::MessageBuilder msg(log_); - msg.Append("%s,0x%" V8PRIxPTR "\n", name, location); + msg.Append("%s,0x%" V8PRIxPTR, name, location); msg.WriteToLogFile(); } @@ -1026,7 +858,7 @@ // caller's responsibility to ensure that log is enabled and that // FLAG_log_api is true. void Logger::ApiEvent(const char* format, ...) { - ASSERT(log_->IsEnabled() && FLAG_log_api); + DCHECK(log_->IsEnabled() && FLAG_log_api); Log::MessageBuilder msg(log_); va_list ap; va_start(ap, format); @@ -1041,108 +873,103 @@ if (key->IsString()) { SmartArrayPointer<char> str = String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - ApiEvent("api,check-security,\"%s\"\n", str.get()); + ApiEvent("api,check-security,\"%s\"", str.get()); } else if (key->IsSymbol()) { Symbol* symbol = Symbol::cast(key); if (symbol->name()->IsUndefined()) { - ApiEvent("api,check-security,symbol(hash %x)\n", - Symbol::cast(key)->Hash()); + ApiEvent("api,check-security,symbol(hash %x)", Symbol::cast(key)->Hash()); } else { SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString( DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - ApiEvent("api,check-security,symbol(\"%s\" hash %x)\n", - str.get(), + ApiEvent("api,check-security,symbol(\"%s\" hash %x)", str.get(), Symbol::cast(key)->Hash()); } } else if (key->IsUndefined()) { - ApiEvent("api,check-security,undefined\n"); + ApiEvent("api,check-security,undefined"); } else { - ApiEvent("api,check-security,['no-name']\n"); + ApiEvent("api,check-security,['no-name']"); } } -void Logger::SharedLibraryEvent(const char* library_path, +void Logger::SharedLibraryEvent(const std::string& library_path, uintptr_t start, uintptr_t end) { if (!log_->IsEnabled() || !FLAG_prof) return; Log::MessageBuilder msg(log_); - msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n", - library_path, - start, - end); + msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR, + library_path.c_str(), start, end); msg.WriteToLogFile(); } -void Logger::SharedLibraryEvent(const wchar_t* library_path, - uintptr_t start, - uintptr_t end) { - if (!log_->IsEnabled() || !FLAG_prof) return; +void Logger::CodeDeoptEvent(Code* code) { + if (!log_->IsEnabled()) return; + DCHECK(FLAG_log_internal_timer_events); Log::MessageBuilder msg(log_); - msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n", - library_path, - start, - end); + int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds()); + msg.Append("code-deopt,%ld,%d", since_epoch, code->CodeSize()); msg.WriteToLogFile(); } -void Logger::CodeDeoptEvent(Code* code) { +void Logger::CurrentTimeEvent() { if (!log_->IsEnabled()) return; - ASSERT(FLAG_log_internal_timer_events); + DCHECK(FLAG_log_internal_timer_events); Log::MessageBuilder msg(log_); int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds()); - msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize()); + msg.Append("current-time,%ld", since_epoch); msg.WriteToLogFile(); } -void Logger::TimerEvent(StartEnd se, const char* name) { +void Logger::TimerEvent(Logger::StartEnd se, const char* name) { if (!log_->IsEnabled()) return; - ASSERT(FLAG_log_internal_timer_events); + DCHECK(FLAG_log_internal_timer_events); Log::MessageBuilder msg(log_); int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds()); - const char* format = (se == START) ? "timer-event-start,\"%s\",%ld\n" - : "timer-event-end,\"%s\",%ld\n"; + const char* format = (se == START) ? "timer-event-start,\"%s\",%ld" + : "timer-event-end,\"%s\",%ld"; msg.Append(format, name, since_epoch); msg.WriteToLogFile(); } void Logger::EnterExternal(Isolate* isolate) { - LOG(isolate, TimerEvent(START, TimerEventScope::v8_external)); - ASSERT(isolate->current_vm_state() == JS); + LOG(isolate, TimerEvent(START, TimerEventExternal::name())); + DCHECK(isolate->current_vm_state() == JS); isolate->set_current_vm_state(EXTERNAL); } void Logger::LeaveExternal(Isolate* isolate) { - LOG(isolate, TimerEvent(END, TimerEventScope::v8_external)); - ASSERT(isolate->current_vm_state() == EXTERNAL); + LOG(isolate, TimerEvent(END, TimerEventExternal::name())); + DCHECK(isolate->current_vm_state() == EXTERNAL); isolate->set_current_vm_state(JS); } -void Logger::LogInternalEvents(const char* name, int se) { +void Logger::DefaultTimerEventsLogger(const char* name, int se) { Isolate* isolate = Isolate::Current(); LOG(isolate, TimerEvent(static_cast<StartEnd>(se), name)); } -void Logger::TimerEventScope::LogTimerEvent(StartEnd se) { - isolate_->event_logger()(name_, se); +template <class TimerEvent> +void TimerEventScope<TimerEvent>::LogTimerEvent(Logger::StartEnd se) { + if (TimerEvent::expose_to_api() || + isolate_->event_logger() == Logger::DefaultTimerEventsLogger) { + isolate_->event_logger()(TimerEvent::name(), se); + } } -const char* Logger::TimerEventScope::v8_recompile_synchronous = - "V8.RecompileSynchronous"; -const char* Logger::TimerEventScope::v8_recompile_concurrent = - "V8.RecompileConcurrent"; -const char* Logger::TimerEventScope::v8_compile_full_code = - "V8.CompileFullCode"; -const char* Logger::TimerEventScope::v8_execute = "V8.Execute"; -const char* Logger::TimerEventScope::v8_external = "V8.External"; +// Instantiate template methods. +#define V(TimerName, expose) \ + template void TimerEventScope<TimerEvent##TimerName>::LogTimerEvent( \ + Logger::StartEnd se); +TIMER_EVENTS_LIST(V) +#undef V void Logger::LogRegExpSource(Handle<JSRegExp> regexp) { @@ -1150,7 +977,8 @@ // (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"") Log::MessageBuilder msg(log_); - Handle<Object> source = GetProperty(regexp, "source"); + Handle<Object> source = Object::GetProperty( + isolate_, regexp, "source").ToHandleChecked(); if (!source->IsString()) { msg.Append("no source"); return; @@ -1168,17 +996,20 @@ msg.Append('/'); // global flag - Handle<Object> global = GetProperty(regexp, "global"); + Handle<Object> global = Object::GetProperty( + isolate_, regexp, "global").ToHandleChecked(); if (global->IsTrue()) { msg.Append('g'); } // ignorecase flag - Handle<Object> ignorecase = GetProperty(regexp, "ignoreCase"); + Handle<Object> ignorecase = Object::GetProperty( + isolate_, regexp, "ignoreCase").ToHandleChecked(); if (ignorecase->IsTrue()) { msg.Append('i'); } // multiline flag - Handle<Object> multiline = GetProperty(regexp, "multiline"); + Handle<Object> multiline = Object::GetProperty( + isolate_, regexp, "multiline").ToHandleChecked(); if (multiline->IsTrue()) { msg.Append('m'); } @@ -1192,62 +1023,21 @@ Log::MessageBuilder msg(log_); msg.Append("regexp-compile,"); LogRegExpSource(regexp); - msg.Append(in_cache ? ",hit\n" : ",miss\n"); - msg.WriteToLogFile(); -} - - -void Logger::LogRuntime(Vector<const char> format, - Handle<JSArray> args) { - if (!log_->IsEnabled() || !FLAG_log_runtime) return; - Log::MessageBuilder msg(log_); - for (int i = 0; i < format.length(); i++) { - char c = format[i]; - if (c == '%' && i <= format.length() - 2) { - i++; - ASSERT('0' <= format[i] && format[i] <= '9'); - // No exception expected when getting an element from an array literal. - Handle<Object> obj = - Object::GetElementNoExceptionThrown(isolate_, args, format[i] - '0'); - i++; - switch (format[i]) { - case 's': - msg.AppendDetailed(String::cast(*obj), false); - break; - case 'S': - msg.AppendDetailed(String::cast(*obj), true); - break; - case 'r': - Logger::LogRegExpSource(Handle<JSRegExp>::cast(obj)); - break; - case 'x': - msg.Append("0x%x", Smi::cast(*obj)->value()); - break; - case 'i': - msg.Append("%i", Smi::cast(*obj)->value()); - break; - default: - UNREACHABLE(); - } - } else { - msg.Append(c); - } - } - msg.Append('\n'); + msg.Append(in_cache ? ",hit" : ",miss"); msg.WriteToLogFile(); } void Logger::ApiIndexedSecurityCheck(uint32_t index) { if (!log_->IsEnabled() || !FLAG_log_api) return; - ApiEvent("api,check-security,%u\n", index); + ApiEvent("api,check-security,%u", index); } void Logger::ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name) { - ASSERT(name->IsName()); + DCHECK(name->IsName()); if (!log_->IsEnabled() || !FLAG_log_api) return; String* class_name_obj = holder->class_name(); SmartArrayPointer<char> class_name = @@ -1255,18 +1045,18 @@ if (name->IsString()) { SmartArrayPointer<char> property_name = String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, class_name.get(), + ApiEvent("api,%s,\"%s\",\"%s\"", tag, class_name.get(), property_name.get()); } else { Symbol* symbol = Symbol::cast(name); uint32_t hash = symbol->Hash(); if (symbol->name()->IsUndefined()) { - ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, class_name.get(), hash); + ApiEvent("api,%s,\"%s\",symbol(hash %x)", tag, class_name.get(), hash); } else { SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString( DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)\n", - tag, class_name.get(), str.get(), hash); + ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)", tag, class_name.get(), + str.get(), hash); } } } @@ -1278,7 +1068,7 @@ String* class_name_obj = holder->class_name(); SmartArrayPointer<char> class_name = class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - ApiEvent("api,%s,\"%s\",%u\n", tag, class_name.get(), index); + ApiEvent("api,%s,\"%s\",%u", tag, class_name.get(), index); } @@ -1287,20 +1077,20 @@ String* class_name_obj = object->class_name(); SmartArrayPointer<char> class_name = class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - ApiEvent("api,%s,\"%s\"\n", tag, class_name.get()); + ApiEvent("api,%s,\"%s\"", tag, class_name.get()); } void Logger::ApiEntryCall(const char* name) { if (!log_->IsEnabled() || !FLAG_log_api) return; - ApiEvent("api,%s\n", name); + ApiEvent("api,%s", name); } void Logger::NewEvent(const char* name, void* object, size_t size) { if (!log_->IsEnabled() || !FLAG_log) return; Log::MessageBuilder msg(log_); - msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object, + msg.Append("new,%s,0x%" V8PRIxPTR ",%u", name, object, static_cast<unsigned int>(size)); msg.WriteToLogFile(); } @@ -1309,7 +1099,7 @@ void Logger::DeleteEvent(const char* name, void* object) { if (!log_->IsEnabled() || !FLAG_log) return; Log::MessageBuilder msg(log_); - msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object); + msg.Append("delete,%s,0x%" V8PRIxPTR, name, object); msg.WriteToLogFile(); } @@ -1347,7 +1137,6 @@ symbol->Hash()); } } - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1373,7 +1162,7 @@ static void AppendCodeCreateHeader(Log::MessageBuilder* msg, Logger::LogEventsAndTags tag, Code* code) { - ASSERT(msg); + DCHECK(msg); msg->Append("%s,%s,%d,", kLogEventsNames[Logger::CODE_CREATION_EVENT], kLogEventsNames[tag], @@ -1395,7 +1184,6 @@ Log::MessageBuilder msg(log_); AppendCodeCreateHeader(&msg, tag, code); msg.AppendDoubleQuotedString(comment); - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1418,7 +1206,6 @@ } else { msg.AppendSymbolName(Symbol::cast(name)); } - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1449,7 +1236,6 @@ msg.Append(','); msg.AppendAddress(shared->address()); msg.Append(",%s", ComputeMarker(code)); - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1484,7 +1270,6 @@ msg.Append(":%d:%d\",", line, column); msg.AppendAddress(shared->address()); msg.Append(",%s", ComputeMarker(code)); - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1501,7 +1286,24 @@ Log::MessageBuilder msg(log_); AppendCodeCreateHeader(&msg, tag, code); msg.Append("\"args_count: %d\"", args_count); - msg.Append('\n'); + msg.WriteToLogFile(); +} + + +void Logger::CodeDisableOptEvent(Code* code, + SharedFunctionInfo* shared) { + PROFILER_LOG(CodeDisableOptEvent(code, shared)); + + if (!is_logging_code_events()) return; + CALL_LISTENERS(CodeDisableOptEvent(code, shared)); + + if (!FLAG_log_code || !log_->IsEnabled()) return; + Log::MessageBuilder msg(log_); + msg.Append("%s,", kLogEventsNames[CODE_DISABLE_OPT_EVENT]); + SmartArrayPointer<char> name = + shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + msg.Append("\"%s\",", name.get()); + msg.Append("\"%s\"", GetBailoutReason(shared->DisableOptimizationReason())); msg.WriteToLogFile(); } @@ -1512,7 +1314,7 @@ if (!is_logging_code_events()) return; if (!log_->IsEnabled() || !FLAG_ll_prof) return; CALL_LISTENERS(CodeMovingGCEvent()); - OS::SignalCodeMovingGC(); + base::OS::SignalCodeMovingGC(); } @@ -1528,7 +1330,6 @@ msg.Append('"'); msg.AppendDetailed(source, false); msg.Append('"'); - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1552,7 +1353,6 @@ Log::MessageBuilder msg(log_); msg.Append("%s,", kLogEventsNames[CODE_DELETE_EVENT]); msg.AppendAddress(from); - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1595,7 +1395,6 @@ Log::MessageBuilder msg(log_); msg.Append("%s,%d,", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos); msg.AppendDoubleQuotedString(code_name); - msg.Append("\n"); msg.WriteToLogFile(); } @@ -1608,7 +1407,6 @@ msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]); msg.AppendAddress(addr); msg.Append(",%d", pos); - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1630,7 +1428,6 @@ msg.AppendAddress(from); msg.Append(','); msg.AppendAddress(to); - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1641,12 +1438,10 @@ msg.Append("%s,%s,", name, tag); uint32_t sec, usec; - if (OS::GetUserTime(&sec, &usec) != -1) { + if (base::OS::GetUserTime(&sec, &usec) != -1) { msg.Append("%d,%d,", sec, usec); } - msg.Append("%.0f", OS::TimeCurrentMillis()); - - msg.Append('\n'); + msg.Append("%.0f", base::OS::TimeCurrentMillis()); msg.WriteToLogFile(); } @@ -1667,7 +1462,6 @@ } else { msg.AppendSymbolName(Symbol::cast(name)); } - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1677,8 +1471,8 @@ Log::MessageBuilder msg(log_); // Using non-relative system time in order to be able to synchronize with // external memory profiling events (e.g. DOM memory size). - msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n", - space, kind, OS::TimeCurrentMillis()); + msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f", space, kind, + base::OS::TimeCurrentMillis()); msg.WriteToLogFile(); } @@ -1686,7 +1480,7 @@ void Logger::HeapSampleEndEvent(const char* space, const char* kind) { if (!log_->IsEnabled() || !FLAG_log_gc) return; Log::MessageBuilder msg(log_); - msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind); + msg.Append("heap-sample-end,\"%s\",\"%s\"", space, kind); msg.WriteToLogFile(); } @@ -1694,7 +1488,7 @@ void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) { if (!log_->IsEnabled() || !FLAG_log_gc) return; Log::MessageBuilder msg(log_); - msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes); + msg.Append("heap-sample-item,%s,%d,%d", type, number, bytes); msg.WriteToLogFile(); } @@ -1702,7 +1496,7 @@ void Logger::DebugTag(const char* call_site_tag) { if (!log_->IsEnabled() || !FLAG_log) return; Log::MessageBuilder msg(log_); - msg.Append("debug-tag,%s\n", call_site_tag); + msg.Append("debug-tag,%s", call_site_tag); msg.WriteToLogFile(); } @@ -1715,10 +1509,8 @@ } char* parameter_string = s.Finalize(); Log::MessageBuilder msg(log_); - msg.Append("debug-queue-event,%s,%15.3f,%s\n", - event_type, - OS::TimeCurrentMillis(), - parameter_string); + msg.Append("debug-queue-event,%s,%15.3f,%s", event_type, + base::OS::TimeCurrentMillis(), parameter_string); DeleteArray(parameter_string); msg.WriteToLogFile(); } @@ -1741,11 +1533,10 @@ if (overflow) { msg.Append(",overflow"); } - for (int i = 0; i < sample->frames_count; ++i) { + for (unsigned i = 0; i < sample->frames_count; ++i) { msg.Append(','); msg.AppendAddress(sample->stack[i]); } - msg.Append('\n'); msg.WriteToLogFile(); } @@ -1785,7 +1576,7 @@ sfis_[*count_] = Handle<SharedFunctionInfo>(sfi); } if (code_objects_ != NULL) { - ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION); + DCHECK(function->code()->kind() == Code::OPTIMIZED_FUNCTION); code_objects_[*count_] = Handle<Code>(function->code()); } *count_ = *count_ + 1; @@ -1872,6 +1663,10 @@ description = "A load IC from the snapshot"; tag = Logger::LOAD_IC_TAG; break; + case Code::CALL_IC: + description = "A call IC from the snapshot"; + tag = Logger::CALL_IC_TAG; + break; case Code::STORE_IC: description = "A store IC from the snapshot"; tag = Logger::STORE_IC_TAG; @@ -1904,9 +1699,9 @@ Handle<String> func_name(shared->DebugName()); if (shared->script()->IsScript()) { Handle<Script> script(Script::cast(shared->script())); - int line_num = GetScriptLineNumber(script, shared->start_position()) + 1; + int line_num = Script::GetLineNumber(script, shared->start_position()) + 1; int column_num = - GetScriptColumnNumber(script, shared->start_position()) + 1; + Script::GetColumnNumber(script, shared->start_position()) + 1; if (script->name()->IsString()) { Handle<String> script_name(String::cast(script->name())); if (line_num > 0) { @@ -1991,59 +1786,44 @@ } -static void AddIsolateIdIfNeeded(Isolate* isolate, StringStream* stream) { - if (isolate->IsDefaultIsolate() || !FLAG_logfile_per_isolate) return; - stream->Add("isolate-%p-", isolate); +static void AddIsolateIdIfNeeded(OStream& os, // NOLINT + Isolate* isolate) { + if (FLAG_logfile_per_isolate) os << "isolate-" << isolate << "-"; } -static SmartArrayPointer<const char> PrepareLogFileName( - Isolate* isolate, const char* file_name) { - if (strchr(file_name, '%') != NULL || !isolate->IsDefaultIsolate()) { - // If there's a '%' in the log file name we have to expand - // placeholders. - HeapStringAllocator allocator; - StringStream stream(&allocator); - AddIsolateIdIfNeeded(isolate, &stream); - for (const char* p = file_name; *p; p++) { - if (*p == '%') { - p++; - switch (*p) { - case '\0': - // If there's a % at the end of the string we back up - // one character so we can escape the loop properly. - p--; - break; - case 'p': - stream.Add("%d", OS::GetCurrentProcessId()); - break; - case 't': { - // %t expands to the current time in milliseconds. - double time = OS::TimeCurrentMillis(); - stream.Add("%.0f", FmtElm(time)); - break; - } - case '%': - // %% expands (contracts really) to %. - stream.Put('%'); - break; - default: - // All other %'s expand to themselves. - stream.Put('%'); - stream.Put(*p); - break; - } - } else { - stream.Put(*p); +static void PrepareLogFileName(OStream& os, // NOLINT + Isolate* isolate, const char* file_name) { + AddIsolateIdIfNeeded(os, isolate); + for (const char* p = file_name; *p; p++) { + if (*p == '%') { + p++; + switch (*p) { + case '\0': + // If there's a % at the end of the string we back up + // one character so we can escape the loop properly. + p--; + break; + case 'p': + os << base::OS::GetCurrentProcessId(); + break; + case 't': + // %t expands to the current time in milliseconds. + os << static_cast<int64_t>(base::OS::TimeCurrentMillis()); + break; + case '%': + // %% expands (contracts really) to %. + os << '%'; + break; + default: + // All other %'s expand to themselves. + os << '%' << *p; + break; } + } else { + os << *p; } - return SmartArrayPointer<const char>(stream.ToCString()); } - int length = StrLength(file_name); - char* str = NewArray<char>(length + 1); - OS::MemCopy(str, file_name, length); - str[length] = '\0'; - return SmartArrayPointer<const char>(str); } @@ -2057,9 +1837,9 @@ FLAG_log_snapshot_positions = true; } - SmartArrayPointer<const char> log_file_name = - PrepareLogFileName(isolate, FLAG_logfile); - log_->Initialize(log_file_name.get()); + OStringStream log_file_name; + PrepareLogFileName(log_file_name, isolate, FLAG_logfile); + log_->Initialize(log_file_name.c_str()); if (FLAG_perf_basic_prof) { @@ -2073,7 +1853,7 @@ } if (FLAG_ll_prof) { - ll_logger_ = new LowLevelLogger(log_file_name.get()); + ll_logger_ = new LowLevelLogger(log_file_name.c_str()); addCodeEventListener(ll_logger_); } diff -Nru nodejs-0.11.13/deps/v8/src/log.h nodejs-0.11.15/deps/v8/src/log.h --- nodejs-0.11.13/deps/v8/src/log.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/log.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,23 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LOG_H_ #define V8_LOG_H_ -#include "allocation.h" -#include "objects.h" -#include "platform.h" -#include "platform/elapsed-timer.h" +#include <string> + +#include "src/allocation.h" +#include "src/base/platform/elapsed-timer.h" +#include "src/base/platform/platform.h" +#include "src/objects.h" namespace v8 { + +namespace base { +class Semaphore; +} + namespace internal { // Logger is used for collecting logging information from V8 during @@ -78,7 +62,6 @@ class Log; class PositionsRecorder; class Profiler; -class Semaphore; class Ticker; struct TickSample; @@ -102,6 +85,7 @@ #define LOG_EVENTS_AND_TAGS_LIST(V) \ V(CODE_CREATION_EVENT, "code-creation") \ + V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization") \ V(CODE_MOVE_EVENT, "code-move") \ V(CODE_DELETE_EVENT, "code-delete") \ V(CODE_MOVING_GC, "code-moving-gc") \ @@ -144,6 +128,7 @@ V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \ V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \ V(LAZY_COMPILE_TAG, "LazyCompile") \ + V(CALL_IC_TAG, "CallIC") \ V(LOAD_IC_TAG, "LoadIC") \ V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \ V(REG_EXP_TAG, "RegExp") \ @@ -166,6 +151,8 @@ class Logger { public: + enum StartEnd { START = 0, END = 1 }; + #define DECLARE_ENUM(enum_item, ignore) enum_item, enum LogEventsAndTags { LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM) @@ -259,6 +246,8 @@ CompilationInfo* info, Name* source, int line, int column); void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count); + // Emits a code deoptimization event. + void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared); void CodeMovingGCEvent(); // Emits a code create event for a RegExp. void RegExpCodeCreateEvent(Code* code, String* source); @@ -299,58 +288,26 @@ void HeapSampleStats(const char* space, const char* kind, intptr_t capacity, intptr_t used); - void SharedLibraryEvent(const char* library_path, - uintptr_t start, - uintptr_t end); - void SharedLibraryEvent(const wchar_t* library_path, + void SharedLibraryEvent(const std::string& library_path, uintptr_t start, uintptr_t end); - // ==== Events logged by --log-timer-events. ==== - enum StartEnd { START, END }; - void CodeDeoptEvent(Code* code); + void CurrentTimeEvent(); void TimerEvent(StartEnd se, const char* name); static void EnterExternal(Isolate* isolate); static void LeaveExternal(Isolate* isolate); - static void EmptyLogInternalEvents(const char* name, int se) { } - static void LogInternalEvents(const char* name, int se); - - class TimerEventScope { - public: - TimerEventScope(Isolate* isolate, const char* name) - : isolate_(isolate), name_(name) { - LogTimerEvent(START); - } - - ~TimerEventScope() { - LogTimerEvent(END); - } - - void LogTimerEvent(StartEnd se); - - static const char* v8_recompile_synchronous; - static const char* v8_recompile_concurrent; - static const char* v8_compile_full_code; - static const char* v8_execute; - static const char* v8_external; - - private: - Isolate* isolate_; - const char* name_; - }; + static void EmptyTimerEventsLogger(const char* name, int se) {} + static void DefaultTimerEventsLogger(const char* name, int se); // ==== Events logged by --log-regexp ==== // Regexp compilation and execution events. void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache); - // Log an event reported from generated code - void LogRuntime(Vector<const char> format, Handle<JSArray> args); - bool is_logging() { return is_logging_; } @@ -457,12 +414,46 @@ // 'true' between SetUp() and TearDown(). bool is_initialized_; - ElapsedTimer timer_; + base::ElapsedTimer timer_; friend class CpuProfiler; }; +#define TIMER_EVENTS_LIST(V) \ + V(RecompileSynchronous, true) \ + V(RecompileConcurrent, true) \ + V(CompileFullCode, true) \ + V(Execute, true) \ + V(External, true) \ + V(IcMiss, false) + +#define V(TimerName, expose) \ + class TimerEvent##TimerName : public AllStatic { \ + public: \ + static const char* name(void* unused = NULL) { return "V8." #TimerName; } \ + static bool expose_to_api() { return expose; } \ + }; +TIMER_EVENTS_LIST(V) +#undef V + + +template <class TimerEvent> +class TimerEventScope { + public: + explicit TimerEventScope(Isolate* isolate) : isolate_(isolate) { + LogTimerEvent(Logger::START); + } + + ~TimerEventScope() { LogTimerEvent(Logger::END); } + + void LogTimerEvent(Logger::StartEnd se); + + private: + Isolate* isolate_; +}; + + class CodeEventListener { public: virtual ~CodeEventListener() {} @@ -495,6 +486,7 @@ virtual void CodeDeleteEvent(Address from) = 0; virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0; virtual void CodeMovingGCEvent() = 0; + virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) = 0; }; diff -Nru nodejs-0.11.13/deps/v8/src/log-inl.h nodejs-0.11.15/deps/v8/src/log-inl.h --- nodejs-0.11.13/deps/v8/src/log-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/log-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2006-2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LOG_INL_H_ #define V8_LOG_INL_H_ -#include "log.h" +#include "src/log.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/log-utils.cc nodejs-0.11.15/deps/v8/src/log-utils.cc --- nodejs-0.11.13/deps/v8/src/log-utils.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/log-utils.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "log-utils.h" -#include "string-stream.h" +#include "src/log-utils.h" +#include "src/string-stream.h" namespace v8 { namespace internal { @@ -51,7 +28,6 @@ // --log-all enables all the log flags. if (FLAG_log_all) { - FLAG_log_runtime = true; FLAG_log_api = true; FLAG_log_code = true; FLAG_log_gc = true; @@ -78,20 +54,20 @@ void Log::OpenStdout() { - ASSERT(!IsEnabled()); + DCHECK(!IsEnabled()); output_handle_ = stdout; } void Log::OpenTemporaryFile() { - ASSERT(!IsEnabled()); - output_handle_ = i::OS::OpenTemporaryFile(); + DCHECK(!IsEnabled()); + output_handle_ = base::OS::OpenTemporaryFile(); } void Log::OpenFile(const char* name) { - ASSERT(!IsEnabled()); - output_handle_ = OS::FOpen(name, OS::LogFileOpenMode); + DCHECK(!IsEnabled()); + output_handle_ = base::OS::FOpen(name, base::OS::LogFileOpenMode); } @@ -118,7 +94,7 @@ : log_(log), lock_guard_(&log_->mutex_), pos_(0) { - ASSERT(log_->message_buffer_ != NULL); + DCHECK(log_->message_buffer_ != NULL); } @@ -129,14 +105,14 @@ va_start(args, format); AppendVA(format, args); va_end(args); - ASSERT(pos_ <= Log::kMessageBufferSize); + DCHECK(pos_ <= Log::kMessageBufferSize); } void Log::MessageBuilder::AppendVA(const char* format, va_list args) { Vector<char> buf(log_->message_buffer_ + pos_, Log::kMessageBufferSize - pos_); - int result = v8::internal::OS::VSNPrintF(buf, format, args); + int result = v8::internal::VSNPrintF(buf, format, args); // Result is -1 if output was truncated. if (result >= 0) { @@ -144,7 +120,7 @@ } else { pos_ = Log::kMessageBufferSize; } - ASSERT(pos_ <= Log::kMessageBufferSize); + DCHECK(pos_ <= Log::kMessageBufferSize); } @@ -152,7 +128,7 @@ if (pos_ < Log::kMessageBufferSize) { log_->message_buffer_[pos_++] = c; } - ASSERT(pos_ <= Log::kMessageBufferSize); + DCHECK(pos_ <= Log::kMessageBufferSize); } @@ -183,7 +159,7 @@ void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) { - ASSERT(symbol); + DCHECK(symbol); Append("symbol("); if (!symbol->name()->IsUndefined()) { Append("\""); @@ -230,19 +206,23 @@ void Log::MessageBuilder::AppendStringPart(const char* str, int len) { if (pos_ + len > Log::kMessageBufferSize) { len = Log::kMessageBufferSize - pos_; - ASSERT(len >= 0); + DCHECK(len >= 0); if (len == 0) return; } Vector<char> buf(log_->message_buffer_ + pos_, Log::kMessageBufferSize - pos_); - OS::StrNCpy(buf, str, len); + StrNCpy(buf, str, len); pos_ += len; - ASSERT(pos_ <= Log::kMessageBufferSize); + DCHECK(pos_ <= Log::kMessageBufferSize); } void Log::MessageBuilder::WriteToLogFile() { - ASSERT(pos_ <= Log::kMessageBufferSize); + DCHECK(pos_ <= Log::kMessageBufferSize); + // Assert that we do not already have a new line at the end. + DCHECK(pos_ == 0 || log_->message_buffer_[pos_ - 1] != '\n'); + if (pos_ == Log::kMessageBufferSize) pos_--; + log_->message_buffer_[pos_++] = '\n'; const int written = log_->WriteToFile(log_->message_buffer_, pos_); if (written != pos_) { log_->stop(); diff -Nru nodejs-0.11.13/deps/v8/src/log-utils.h nodejs-0.11.15/deps/v8/src/log-utils.h --- nodejs-0.11.13/deps/v8/src/log-utils.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/log-utils.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2006-2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_LOG_UTILS_H_ #define V8_LOG_UTILS_H_ -#include "allocation.h" +#include "src/allocation.h" namespace v8 { namespace internal { @@ -45,10 +22,10 @@ void stop() { is_stopped_ = true; } static bool InitLogAtStart() { - return FLAG_log || FLAG_log_runtime || FLAG_log_api - || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect - || FLAG_log_regexp || FLAG_ll_prof || FLAG_perf_basic_prof - || FLAG_perf_jit_prof || FLAG_log_internal_timer_events; + return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc + || FLAG_log_handles || FLAG_log_suspect || FLAG_log_regexp + || FLAG_ll_prof || FLAG_perf_basic_prof || FLAG_perf_jit_prof + || FLAG_log_internal_timer_events; } // Frees all resources acquired in Initialize and Open... functions. @@ -108,7 +85,7 @@ private: Log* log_; - LockGuard<Mutex> lock_guard_; + base::LockGuard<base::Mutex> lock_guard_; int pos_; }; @@ -126,9 +103,9 @@ // Implementation of writing to a log file. int WriteToFile(const char* msg, int length) { - ASSERT(output_handle_ != NULL); + DCHECK(output_handle_ != NULL); size_t rv = fwrite(msg, 1, length, output_handle_); - ASSERT(static_cast<size_t>(length) == rv); + DCHECK(static_cast<size_t>(length) == rv); USE(rv); fflush(output_handle_); return length; @@ -143,7 +120,7 @@ // mutex_ is a Mutex used for enforcing exclusive // access to the formatting buffer and the log file or log memory buffer. - Mutex mutex_; + base::Mutex mutex_; // Buffer used for formatting log messages. This is a singleton buffer and // mutex_ should be acquired before using it. diff -Nru nodejs-0.11.13/deps/v8/src/lookup.cc nodejs-0.11.15/deps/v8/src/lookup.cc --- nodejs-0.11.13/deps/v8/src/lookup.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lookup.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,275 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/bootstrapper.h" +#include "src/lookup.h" +#include "src/lookup-inl.h" + +namespace v8 { +namespace internal { + + +void LookupIterator::Next() { + DisallowHeapAllocation no_gc; + has_property_ = false; + + JSReceiver* holder = NULL; + Map* map = *holder_map_; + + // Perform lookup on current holder. + state_ = LookupInHolder(map); + + // Continue lookup if lookup on current holder failed. + while (!IsFound()) { + JSReceiver* maybe_holder = NextHolder(map); + if (maybe_holder == NULL) break; + holder = maybe_holder; + map = holder->map(); + state_ = LookupInHolder(map); + } + + // Either was found in the receiver, or the receiver has no prototype. + if (holder == NULL) return; + + maybe_holder_ = handle(holder); + holder_map_ = handle(map); +} + + +Handle<JSReceiver> LookupIterator::GetRoot() const { + Handle<Object> receiver = GetReceiver(); + if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver); + Handle<Object> root = + handle(receiver->GetRootMap(isolate_)->prototype(), isolate_); + CHECK(!root->IsNull()); + return Handle<JSReceiver>::cast(root); +} + + +Handle<Map> LookupIterator::GetReceiverMap() const { + Handle<Object> receiver = GetReceiver(); + if (receiver->IsNumber()) return isolate_->factory()->heap_number_map(); + return handle(Handle<HeapObject>::cast(receiver)->map()); +} + + +bool LookupIterator::IsBootstrapping() const { + return isolate_->bootstrapper()->IsActive(); +} + + +bool LookupIterator::HasAccess(v8::AccessType access_type) const { + DCHECK_EQ(ACCESS_CHECK, state_); + DCHECK(is_guaranteed_to_have_holder()); + return isolate_->MayNamedAccess(GetHolder<JSObject>(), name_, access_type); +} + + +bool LookupIterator::HasProperty() { + DCHECK_EQ(PROPERTY, state_); + DCHECK(is_guaranteed_to_have_holder()); + + if (property_encoding_ == DICTIONARY) { + Handle<JSObject> holder = GetHolder<JSObject>(); + number_ = holder->property_dictionary()->FindEntry(name_); + if (number_ == NameDictionary::kNotFound) return false; + + property_details_ = holder->property_dictionary()->DetailsAt(number_); + // Holes in dictionary cells are absent values. + if (holder->IsGlobalObject() && + (property_details_.IsDeleted() || FetchValue()->IsTheHole())) { + return false; + } + } else { + // Can't use descriptor_number() yet because has_property_ is still false. + property_details_ = + holder_map_->instance_descriptors()->GetDetails(number_); + } + + switch (property_details_.type()) { + case v8::internal::FIELD: + case v8::internal::NORMAL: + case v8::internal::CONSTANT: + property_kind_ = DATA; + break; + case v8::internal::CALLBACKS: + property_kind_ = ACCESSOR; + break; + case v8::internal::HANDLER: + case v8::internal::NONEXISTENT: + case v8::internal::INTERCEPTOR: + UNREACHABLE(); + } + + has_property_ = true; + return true; +} + + +void LookupIterator::PrepareForDataProperty(Handle<Object> value) { + DCHECK(has_property_); + DCHECK(HolderIsReceiverOrHiddenPrototype()); + if (property_encoding_ == DICTIONARY) return; + holder_map_ = + Map::PrepareForDataProperty(holder_map_, descriptor_number(), value); + JSObject::MigrateToMap(GetHolder<JSObject>(), holder_map_); + // Reload property information. + if (holder_map_->is_dictionary_map()) { + property_encoding_ = DICTIONARY; + } else { + property_encoding_ = DESCRIPTOR; + } + CHECK(HasProperty()); +} + + +void LookupIterator::TransitionToDataProperty( + Handle<Object> value, PropertyAttributes attributes, + Object::StoreFromKeyed store_mode) { + DCHECK(!has_property_ || !HolderIsReceiverOrHiddenPrototype()); + + // Can only be called when the receiver is a JSObject. JSProxy has to be + // handled via a trap. Adding properties to primitive values is not + // observable. + Handle<JSObject> receiver = Handle<JSObject>::cast(GetReceiver()); + + // Properties have to be added to context extension objects through + // SetOwnPropertyIgnoreAttributes. + DCHECK(!receiver->IsJSContextExtensionObject()); + + if (receiver->IsJSGlobalProxy()) { + PrototypeIterator iter(isolate(), receiver); + receiver = + Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter)); + } + + maybe_holder_ = receiver; + holder_map_ = Map::TransitionToDataProperty(handle(receiver->map()), name_, + value, attributes, store_mode); + JSObject::MigrateToMap(receiver, holder_map_); + + // Reload the information. + state_ = NOT_FOUND; + configuration_ = CHECK_OWN_REAL; + state_ = LookupInHolder(*holder_map_); + DCHECK(IsFound()); + HasProperty(); +} + + +bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const { + DCHECK(has_property_ || state_ == INTERCEPTOR || state_ == JSPROXY); + DisallowHeapAllocation no_gc; + Handle<Object> receiver = GetReceiver(); + if (!receiver->IsJSReceiver()) return false; + Object* current = *receiver; + JSReceiver* holder = *maybe_holder_.ToHandleChecked(); + // JSProxy do not occur as hidden prototypes. + if (current->IsJSProxy()) { + return JSReceiver::cast(current) == holder; + } + PrototypeIterator iter(isolate(), current, + PrototypeIterator::START_AT_RECEIVER); + do { + if (JSReceiver::cast(iter.GetCurrent()) == holder) return true; + DCHECK(!current->IsJSProxy()); + iter.Advance(); + } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)); + return false; +} + + +Handle<Object> LookupIterator::FetchValue() const { + Object* result = NULL; + Handle<JSObject> holder = GetHolder<JSObject>(); + switch (property_encoding_) { + case DICTIONARY: + result = holder->property_dictionary()->ValueAt(number_); + if (holder->IsGlobalObject()) { + result = PropertyCell::cast(result)->value(); + } + break; + case DESCRIPTOR: + if (property_details_.type() == v8::internal::FIELD) { + FieldIndex field_index = + FieldIndex::ForDescriptor(*holder_map_, number_); + return JSObject::FastPropertyAt( + holder, property_details_.representation(), field_index); + } + result = holder_map_->instance_descriptors()->GetValue(number_); + } + return handle(result, isolate_); +} + + +int LookupIterator::GetConstantIndex() const { + DCHECK(has_property_); + DCHECK_EQ(DESCRIPTOR, property_encoding_); + DCHECK_EQ(v8::internal::CONSTANT, property_details_.type()); + return descriptor_number(); +} + + +FieldIndex LookupIterator::GetFieldIndex() const { + DCHECK(has_property_); + DCHECK_EQ(DESCRIPTOR, property_encoding_); + DCHECK_EQ(v8::internal::FIELD, property_details_.type()); + int index = + holder_map()->instance_descriptors()->GetFieldIndex(descriptor_number()); + bool is_double = representation().IsDouble(); + return FieldIndex::ForPropertyIndex(*holder_map(), index, is_double); +} + + +Handle<PropertyCell> LookupIterator::GetPropertyCell() const { + Handle<JSObject> holder = GetHolder<JSObject>(); + Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder); + Object* value = global->property_dictionary()->ValueAt(dictionary_entry()); + return Handle<PropertyCell>(PropertyCell::cast(value)); +} + + +Handle<Object> LookupIterator::GetAccessors() const { + DCHECK(has_property_); + DCHECK_EQ(ACCESSOR, property_kind_); + return FetchValue(); +} + + +Handle<Object> LookupIterator::GetDataValue() const { + DCHECK(has_property_); + DCHECK_EQ(DATA, property_kind_); + Handle<Object> value = FetchValue(); + return value; +} + + +void LookupIterator::WriteDataValue(Handle<Object> value) { + DCHECK(is_guaranteed_to_have_holder()); + DCHECK(has_property_); + Handle<JSObject> holder = GetHolder<JSObject>(); + if (property_encoding_ == DICTIONARY) { + NameDictionary* property_dictionary = holder->property_dictionary(); + if (holder->IsGlobalObject()) { + Handle<PropertyCell> cell( + PropertyCell::cast(property_dictionary->ValueAt(dictionary_entry()))); + PropertyCell::SetValueInferType(cell, value); + } else { + property_dictionary->ValueAtPut(dictionary_entry(), *value); + } + } else if (property_details_.type() == v8::internal::FIELD) { + holder->WriteToField(descriptor_number(), *value); + } else { + DCHECK_EQ(v8::internal::CONSTANT, property_details_.type()); + } +} + + +void LookupIterator::InternalizeName() { + if (name_->IsUniqueName()) return; + name_ = factory()->InternalizeString(Handle<String>::cast(name_)); +} +} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/lookup.h nodejs-0.11.15/deps/v8/src/lookup.h --- nodejs-0.11.13/deps/v8/src/lookup.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lookup.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,217 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_LOOKUP_H_ +#define V8_LOOKUP_H_ + +#include "src/factory.h" +#include "src/isolate.h" +#include "src/objects.h" + +namespace v8 { +namespace internal { + +class LookupIterator V8_FINAL BASE_EMBEDDED { + public: + enum Configuration { + CHECK_OWN_REAL = 0, + CHECK_HIDDEN = 1 << 0, + CHECK_DERIVED = 1 << 1, + CHECK_INTERCEPTOR = 1 << 2, + CHECK_ACCESS_CHECK = 1 << 3, + CHECK_ALL = CHECK_HIDDEN | CHECK_DERIVED | + CHECK_INTERCEPTOR | CHECK_ACCESS_CHECK, + SKIP_INTERCEPTOR = CHECK_ALL ^ CHECK_INTERCEPTOR, + CHECK_OWN = CHECK_ALL ^ CHECK_DERIVED + }; + + enum State { + NOT_FOUND, + PROPERTY, + INTERCEPTOR, + ACCESS_CHECK, + JSPROXY + }; + + enum PropertyKind { + DATA, + ACCESSOR + }; + + enum PropertyEncoding { + DICTIONARY, + DESCRIPTOR + }; + + LookupIterator(Handle<Object> receiver, + Handle<Name> name, + Configuration configuration = CHECK_ALL) + : configuration_(ComputeConfiguration(configuration, name)), + state_(NOT_FOUND), + property_kind_(DATA), + property_encoding_(DESCRIPTOR), + property_details_(NONE, NONEXISTENT, Representation::None()), + isolate_(name->GetIsolate()), + name_(name), + maybe_receiver_(receiver), + number_(DescriptorArray::kNotFound) { + Handle<JSReceiver> root = GetRoot(); + holder_map_ = handle(root->map()); + maybe_holder_ = root; + Next(); + } + + LookupIterator(Handle<Object> receiver, + Handle<Name> name, + Handle<JSReceiver> holder, + Configuration configuration = CHECK_ALL) + : configuration_(ComputeConfiguration(configuration, name)), + state_(NOT_FOUND), + property_kind_(DATA), + property_encoding_(DESCRIPTOR), + property_details_(NONE, NONEXISTENT, Representation::None()), + isolate_(name->GetIsolate()), + name_(name), + holder_map_(holder->map()), + maybe_receiver_(receiver), + maybe_holder_(holder), + number_(DescriptorArray::kNotFound) { + Next(); + } + + Isolate* isolate() const { return isolate_; } + State state() const { return state_; } + Handle<Name> name() const { return name_; } + + bool IsFound() const { return state_ != NOT_FOUND; } + void Next(); + + Heap* heap() const { return isolate_->heap(); } + Factory* factory() const { return isolate_->factory(); } + Handle<Object> GetReceiver() const { + return Handle<Object>::cast(maybe_receiver_.ToHandleChecked()); + } + Handle<Map> holder_map() const { return holder_map_; } + template <class T> + Handle<T> GetHolder() const { + DCHECK(IsFound()); + return Handle<T>::cast(maybe_holder_.ToHandleChecked()); + } + Handle<JSReceiver> GetRoot() const; + bool HolderIsReceiverOrHiddenPrototype() const; + + /* Dynamically reduce the trapped types. */ + void skip_interceptor() { + configuration_ = static_cast<Configuration>( + configuration_ & ~CHECK_INTERCEPTOR); + } + void skip_access_check() { + configuration_ = static_cast<Configuration>( + configuration_ & ~CHECK_ACCESS_CHECK); + } + + /* ACCESS_CHECK */ + bool HasAccess(v8::AccessType access_type) const; + + /* PROPERTY */ + // HasProperty needs to be called before any of the other PROPERTY methods + // below can be used. It ensures that we are able to provide a definite + // answer, and loads extra information about the property. + bool HasProperty(); + void PrepareForDataProperty(Handle<Object> value); + void TransitionToDataProperty(Handle<Object> value, + PropertyAttributes attributes, + Object::StoreFromKeyed store_mode); + PropertyKind property_kind() const { + DCHECK(has_property_); + return property_kind_; + } + PropertyEncoding property_encoding() const { + DCHECK(has_property_); + return property_encoding_; + } + PropertyDetails property_details() const { + DCHECK(has_property_); + return property_details_; + } + bool IsConfigurable() const { return !property_details().IsDontDelete(); } + Representation representation() const { + return property_details().representation(); + } + FieldIndex GetFieldIndex() const; + int GetConstantIndex() const; + Handle<PropertyCell> GetPropertyCell() const; + Handle<Object> GetAccessors() const; + Handle<Object> GetDataValue() const; + void WriteDataValue(Handle<Object> value); + + void InternalizeName(); + + private: + Handle<Map> GetReceiverMap() const; + + MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map); + inline State LookupInHolder(Map* map); + Handle<Object> FetchValue() const; + + bool IsBootstrapping() const; + + // Methods that fetch data from the holder ensure they always have a holder. + // This means the receiver needs to be present as opposed to just the receiver + // map. Other objects in the prototype chain are transitively guaranteed to be + // present via the receiver map. + bool is_guaranteed_to_have_holder() const { + return !maybe_receiver_.is_null(); + } + bool check_interceptor() const { + return !IsBootstrapping() && (configuration_ & CHECK_INTERCEPTOR) != 0; + } + bool check_derived() const { + return (configuration_ & CHECK_DERIVED) != 0; + } + bool check_hidden() const { + return (configuration_ & CHECK_HIDDEN) != 0; + } + bool check_access_check() const { + return (configuration_ & CHECK_ACCESS_CHECK) != 0; + } + int descriptor_number() const { + DCHECK(has_property_); + DCHECK_EQ(DESCRIPTOR, property_encoding_); + return number_; + } + int dictionary_entry() const { + DCHECK(has_property_); + DCHECK_EQ(DICTIONARY, property_encoding_); + return number_; + } + + static Configuration ComputeConfiguration( + Configuration configuration, Handle<Name> name) { + if (name->IsOwn()) { + return static_cast<Configuration>(configuration & CHECK_OWN); + } else { + return configuration; + } + } + + Configuration configuration_; + State state_; + bool has_property_; + PropertyKind property_kind_; + PropertyEncoding property_encoding_; + PropertyDetails property_details_; + Isolate* isolate_; + Handle<Name> name_; + Handle<Map> holder_map_; + MaybeHandle<Object> maybe_receiver_; + MaybeHandle<JSReceiver> maybe_holder_; + + int number_; +}; + + +} } // namespace v8::internal + +#endif // V8_LOOKUP_H_ diff -Nru nodejs-0.11.13/deps/v8/src/lookup-inl.h nodejs-0.11.15/deps/v8/src/lookup-inl.h --- nodejs-0.11.13/deps/v8/src/lookup-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/lookup-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,68 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_LOOKUP_INL_H_ +#define V8_LOOKUP_INL_H_ + +#include "src/lookup.h" + +namespace v8 { +namespace internal { + + +JSReceiver* LookupIterator::NextHolder(Map* map) { + DisallowHeapAllocation no_gc; + if (map->prototype()->IsNull()) return NULL; + + JSReceiver* next = JSReceiver::cast(map->prototype()); + DCHECK(!next->map()->IsGlobalObjectMap() || + next->map()->is_hidden_prototype()); + + if (!check_derived() && + !(check_hidden() && next->map()->is_hidden_prototype())) { + return NULL; + } + + return next; +} + + +LookupIterator::State LookupIterator::LookupInHolder(Map* map) { + DisallowHeapAllocation no_gc; + switch (state_) { + case NOT_FOUND: + if (map->IsJSProxyMap()) { + return JSPROXY; + } + if (check_access_check() && map->is_access_check_needed()) { + return ACCESS_CHECK; + } + // Fall through. + case ACCESS_CHECK: + if (check_interceptor() && map->has_named_interceptor()) { + return INTERCEPTOR; + } + // Fall through. + case INTERCEPTOR: + if (map->is_dictionary_map()) { + property_encoding_ = DICTIONARY; + } else { + DescriptorArray* descriptors = map->instance_descriptors(); + number_ = descriptors->SearchWithCache(*name_, map); + if (number_ == DescriptorArray::kNotFound) return NOT_FOUND; + property_encoding_ = DESCRIPTOR; + } + return PROPERTY; + case PROPERTY: + return NOT_FOUND; + case JSPROXY: + UNREACHABLE(); + } + UNREACHABLE(); + return state_; +} +} +} // namespace v8::internal + +#endif // V8_LOOKUP_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/macro-assembler.h nodejs-0.11.15/deps/v8/src/macro-assembler.h --- nodejs-0.11.13/deps/v8/src/macro-assembler.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/macro-assembler.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MACRO_ASSEMBLER_H_ #define V8_MACRO_ASSEMBLER_H_ @@ -61,39 +38,52 @@ const int kInvalidProtoDepth = -1; #if V8_TARGET_ARCH_IA32 -#include "assembler.h" -#include "ia32/assembler-ia32.h" -#include "ia32/assembler-ia32-inl.h" -#include "code.h" // must be after assembler_*.h -#include "ia32/macro-assembler-ia32.h" +#include "src/assembler.h" +#include "src/ia32/assembler-ia32.h" +#include "src/ia32/assembler-ia32-inl.h" +#include "src/code.h" // NOLINT, must be after assembler_*.h +#include "src/ia32/macro-assembler-ia32.h" #elif V8_TARGET_ARCH_X64 -#include "assembler.h" -#include "x64/assembler-x64.h" -#include "x64/assembler-x64-inl.h" -#include "code.h" // must be after assembler_*.h -#include "x64/macro-assembler-x64.h" +#include "src/assembler.h" +#include "src/x64/assembler-x64.h" +#include "src/x64/assembler-x64-inl.h" +#include "src/code.h" // NOLINT, must be after assembler_*.h +#include "src/x64/macro-assembler-x64.h" #elif V8_TARGET_ARCH_ARM64 -#include "arm64/constants-arm64.h" -#include "assembler.h" -#include "arm64/assembler-arm64.h" -#include "arm64/assembler-arm64-inl.h" -#include "code.h" // must be after assembler_*.h -#include "arm64/macro-assembler-arm64.h" -#include "arm64/macro-assembler-arm64-inl.h" +#include "src/arm64/constants-arm64.h" +#include "src/assembler.h" +#include "src/arm64/assembler-arm64.h" // NOLINT +#include "src/arm64/assembler-arm64-inl.h" +#include "src/code.h" // NOLINT, must be after assembler_*.h +#include "src/arm64/macro-assembler-arm64.h" // NOLINT +#include "src/arm64/macro-assembler-arm64-inl.h" #elif V8_TARGET_ARCH_ARM -#include "arm/constants-arm.h" -#include "assembler.h" -#include "arm/assembler-arm.h" -#include "arm/assembler-arm-inl.h" -#include "code.h" // must be after assembler_*.h -#include "arm/macro-assembler-arm.h" +#include "src/arm/constants-arm.h" +#include "src/assembler.h" +#include "src/arm/assembler-arm.h" // NOLINT +#include "src/arm/assembler-arm-inl.h" +#include "src/code.h" // NOLINT, must be after assembler_*.h +#include "src/arm/macro-assembler-arm.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/constants-mips.h" -#include "assembler.h" -#include "mips/assembler-mips.h" -#include "mips/assembler-mips-inl.h" -#include "code.h" // must be after assembler_*.h -#include "mips/macro-assembler-mips.h" +#include "src/mips/constants-mips.h" +#include "src/assembler.h" // NOLINT +#include "src/mips/assembler-mips.h" // NOLINT +#include "src/mips/assembler-mips-inl.h" +#include "src/code.h" // NOLINT, must be after assembler_*.h +#include "src/mips/macro-assembler-mips.h" +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/constants-mips64.h" +#include "src/assembler.h" // NOLINT +#include "src/mips64/assembler-mips64.h" // NOLINT +#include "src/mips64/assembler-mips64-inl.h" +#include "src/code.h" // NOLINT, must be after assembler_*.h +#include "src/mips64/macro-assembler-mips64.h" +#elif V8_TARGET_ARCH_X87 +#include "src/assembler.h" +#include "src/x87/assembler-x87.h" +#include "src/x87/assembler-x87-inl.h" +#include "src/code.h" // NOLINT, must be after assembler_*.h +#include "src/x87/macro-assembler-x87.h" #else #error Unsupported target architecture. #endif @@ -124,7 +114,7 @@ // scope, the MacroAssembler is still marked as being in a frame scope, and // the code will be generated again when it goes out of scope. void GenerateLeaveFrame() { - ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); + DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); masm_->LeaveFrame(type_); } diff -Nru nodejs-0.11.13/deps/v8/src/macros.py nodejs-0.11.15/deps/v8/src/macros.py --- nodejs-0.11.13/deps/v8/src/macros.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/macros.py 2015-01-20 21:22:17.000000000 +0000 @@ -87,6 +87,10 @@ const kMinMonth = -10000000; const kMaxMonth = 10000000; +# Strict mode flags for passing to %SetProperty +const kSloppyMode = 0; +const kStrictMode = 1; + # Native cache ids. const STRING_TO_REGEXP_CACHE_ID = 0; @@ -97,7 +101,7 @@ # values of 'bar'. macro IS_NULL(arg) = (arg === null); macro IS_NULL_OR_UNDEFINED(arg) = (arg == null); -macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined'); +macro IS_UNDEFINED(arg) = (arg === (void 0)); macro IS_NUMBER(arg) = (typeof(arg) === 'number'); macro IS_STRING(arg) = (typeof(arg) === 'string'); macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean'); @@ -122,6 +126,8 @@ macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer'); macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView'); macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator'); +macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator'); +macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator'); macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg)); macro FLOOR(arg) = $floor(arg); @@ -162,10 +168,12 @@ macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null"); # Private names. +# GET_PRIVATE should only be used if the property is known to exists on obj +# itself (it should really use %GetOwnProperty, but that would be way slower). macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateSymbol(name)); macro NEW_PRIVATE(name) = (%CreatePrivateSymbol(name)); macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym)); -macro HAS_PRIVATE(obj, sym) = (sym in obj); +macro HAS_PRIVATE(obj, sym) = (%HasOwnProperty(obj, sym)); macro GET_PRIVATE(obj, sym) = (obj[sym]); macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val); macro DELETE_PRIVATE(obj, sym) = (delete obj[sym]); @@ -272,3 +280,11 @@ const PROPERTY_ATTRIBUTES_STRING = 8; const PROPERTY_ATTRIBUTES_SYMBOLIC = 16; const PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL = 32; + +# Use for keys, values and entries iterators. +const ITERATOR_KIND_KEYS = 1; +const ITERATOR_KIND_VALUES = 2; +const ITERATOR_KIND_ENTRIES = 3; + +# Check whether debug is active. +const DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0); diff -Nru nodejs-0.11.13/deps/v8/src/mark-compact.cc nodejs-0.11.15/deps/v8/src/mark-compact.cc --- nodejs-0.11.13/deps/v8/src/mark-compact.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mark-compact.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,4533 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "code-stubs.h" -#include "compilation-cache.h" -#include "cpu-profiler.h" -#include "deoptimizer.h" -#include "execution.h" -#include "gdb-jit.h" -#include "global-handles.h" -#include "heap-profiler.h" -#include "ic-inl.h" -#include "incremental-marking.h" -#include "mark-compact.h" -#include "objects-visiting.h" -#include "objects-visiting-inl.h" -#include "stub-cache.h" -#include "sweeper-thread.h" - -namespace v8 { -namespace internal { - - -const char* Marking::kWhiteBitPattern = "00"; -const char* Marking::kBlackBitPattern = "10"; -const char* Marking::kGreyBitPattern = "11"; -const char* Marking::kImpossibleBitPattern = "01"; - - -// ------------------------------------------------------------------------- -// MarkCompactCollector - -MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT -#ifdef DEBUG - state_(IDLE), -#endif - sweep_precisely_(false), - reduce_memory_footprint_(false), - abort_incremental_marking_(false), - marking_parity_(ODD_MARKING_PARITY), - compacting_(false), - was_marked_incrementally_(false), - sweeping_pending_(false), - pending_sweeper_jobs_semaphore_(0), - sequential_sweeping_(false), - tracer_(NULL), - migration_slots_buffer_(NULL), - heap_(heap), - code_flusher_(NULL), - encountered_weak_collections_(NULL), - have_code_to_deoptimize_(false) { } - -#ifdef VERIFY_HEAP -class VerifyMarkingVisitor: public ObjectVisitor { - public: - explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} - - void VisitPointers(Object** start, Object** end) { - for (Object** current = start; current < end; current++) { - if ((*current)->IsHeapObject()) { - HeapObject* object = HeapObject::cast(*current); - CHECK(heap_->mark_compact_collector()->IsMarked(object)); - } - } - } - - void VisitEmbeddedPointer(RelocInfo* rinfo) { - ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); - if (!rinfo->host()->IsWeakObject(rinfo->target_object())) { - Object* p = rinfo->target_object(); - VisitPointer(&p); - } - } - - void VisitCell(RelocInfo* rinfo) { - Code* code = rinfo->host(); - ASSERT(rinfo->rmode() == RelocInfo::CELL); - if (!code->IsWeakObject(rinfo->target_cell())) { - ObjectVisitor::VisitCell(rinfo); - } - } - - private: - Heap* heap_; -}; - - -static void VerifyMarking(Heap* heap, Address bottom, Address top) { - VerifyMarkingVisitor visitor(heap); - HeapObject* object; - Address next_object_must_be_here_or_later = bottom; - - for (Address current = bottom; - current < top; - current += kPointerSize) { - object = HeapObject::FromAddress(current); - if (MarkCompactCollector::IsMarked(object)) { - CHECK(current >= next_object_must_be_here_or_later); - object->Iterate(&visitor); - next_object_must_be_here_or_later = current + object->Size(); - } - } -} - - -static void VerifyMarking(NewSpace* space) { - Address end = space->top(); - NewSpacePageIterator it(space->bottom(), end); - // The bottom position is at the start of its page. Allows us to use - // page->area_start() as start of range on all pages. - CHECK_EQ(space->bottom(), - NewSpacePage::FromAddress(space->bottom())->area_start()); - while (it.has_next()) { - NewSpacePage* page = it.next(); - Address limit = it.has_next() ? page->area_end() : end; - CHECK(limit == end || !page->Contains(end)); - VerifyMarking(space->heap(), page->area_start(), limit); - } -} - - -static void VerifyMarking(PagedSpace* space) { - PageIterator it(space); - - while (it.has_next()) { - Page* p = it.next(); - VerifyMarking(space->heap(), p->area_start(), p->area_end()); - } -} - - -static void VerifyMarking(Heap* heap) { - VerifyMarking(heap->old_pointer_space()); - VerifyMarking(heap->old_data_space()); - VerifyMarking(heap->code_space()); - VerifyMarking(heap->cell_space()); - VerifyMarking(heap->property_cell_space()); - VerifyMarking(heap->map_space()); - VerifyMarking(heap->new_space()); - - VerifyMarkingVisitor visitor(heap); - - LargeObjectIterator it(heap->lo_space()); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { - if (MarkCompactCollector::IsMarked(obj)) { - obj->Iterate(&visitor); - } - } - - heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); -} - - -class VerifyEvacuationVisitor: public ObjectVisitor { - public: - void VisitPointers(Object** start, Object** end) { - for (Object** current = start; current < end; current++) { - if ((*current)->IsHeapObject()) { - HeapObject* object = HeapObject::cast(*current); - CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); - } - } - } -}; - - -static void VerifyEvacuation(Address bottom, Address top) { - VerifyEvacuationVisitor visitor; - HeapObject* object; - Address next_object_must_be_here_or_later = bottom; - - for (Address current = bottom; - current < top; - current += kPointerSize) { - object = HeapObject::FromAddress(current); - if (MarkCompactCollector::IsMarked(object)) { - CHECK(current >= next_object_must_be_here_or_later); - object->Iterate(&visitor); - next_object_must_be_here_or_later = current + object->Size(); - } - } -} - - -static void VerifyEvacuation(NewSpace* space) { - NewSpacePageIterator it(space->bottom(), space->top()); - VerifyEvacuationVisitor visitor; - - while (it.has_next()) { - NewSpacePage* page = it.next(); - Address current = page->area_start(); - Address limit = it.has_next() ? page->area_end() : space->top(); - CHECK(limit == space->top() || !page->Contains(space->top())); - while (current < limit) { - HeapObject* object = HeapObject::FromAddress(current); - object->Iterate(&visitor); - current += object->Size(); - } - } -} - - -static void VerifyEvacuation(PagedSpace* space) { - // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently - // swept pages. - if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) && - space->was_swept_conservatively()) return; - PageIterator it(space); - - while (it.has_next()) { - Page* p = it.next(); - if (p->IsEvacuationCandidate()) continue; - VerifyEvacuation(p->area_start(), p->area_end()); - } -} - - -static void VerifyEvacuation(Heap* heap) { - VerifyEvacuation(heap->old_pointer_space()); - VerifyEvacuation(heap->old_data_space()); - VerifyEvacuation(heap->code_space()); - VerifyEvacuation(heap->cell_space()); - VerifyEvacuation(heap->property_cell_space()); - VerifyEvacuation(heap->map_space()); - VerifyEvacuation(heap->new_space()); - - VerifyEvacuationVisitor visitor; - heap->IterateStrongRoots(&visitor, VISIT_ALL); -} -#endif // VERIFY_HEAP - - -#ifdef DEBUG -class VerifyNativeContextSeparationVisitor: public ObjectVisitor { - public: - VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {} - - void VisitPointers(Object** start, Object** end) { - for (Object** current = start; current < end; current++) { - if ((*current)->IsHeapObject()) { - HeapObject* object = HeapObject::cast(*current); - if (object->IsString()) continue; - switch (object->map()->instance_type()) { - case JS_FUNCTION_TYPE: - CheckContext(JSFunction::cast(object)->context()); - break; - case JS_GLOBAL_PROXY_TYPE: - CheckContext(JSGlobalProxy::cast(object)->native_context()); - break; - case JS_GLOBAL_OBJECT_TYPE: - case JS_BUILTINS_OBJECT_TYPE: - CheckContext(GlobalObject::cast(object)->native_context()); - break; - case JS_ARRAY_TYPE: - case JS_DATE_TYPE: - case JS_OBJECT_TYPE: - case JS_REGEXP_TYPE: - VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset)); - break; - case MAP_TYPE: - VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset)); - VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset)); - break; - case FIXED_ARRAY_TYPE: - if (object->IsContext()) { - CheckContext(object); - } else { - FixedArray* array = FixedArray::cast(object); - int length = array->length(); - // Set array length to zero to prevent cycles while iterating - // over array bodies, this is easier than intrusive marking. - array->set_length(0); - array->IterateBody( - FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this); - array->set_length(length); - } - break; - case CELL_TYPE: - case JS_PROXY_TYPE: - case JS_VALUE_TYPE: - case TYPE_FEEDBACK_INFO_TYPE: - object->Iterate(this); - break; - case DECLARED_ACCESSOR_INFO_TYPE: - case EXECUTABLE_ACCESSOR_INFO_TYPE: - case BYTE_ARRAY_TYPE: - case CALL_HANDLER_INFO_TYPE: - case CODE_TYPE: - case FIXED_DOUBLE_ARRAY_TYPE: - case HEAP_NUMBER_TYPE: - case INTERCEPTOR_INFO_TYPE: - case ODDBALL_TYPE: - case SCRIPT_TYPE: - case SHARED_FUNCTION_INFO_TYPE: - break; - default: - UNREACHABLE(); - } - } - } - } - - private: - void CheckContext(Object* context) { - if (!context->IsContext()) return; - Context* native_context = Context::cast(context)->native_context(); - if (current_native_context_ == NULL) { - current_native_context_ = native_context; - } else { - CHECK_EQ(current_native_context_, native_context); - } - } - - Context* current_native_context_; -}; - - -static void VerifyNativeContextSeparation(Heap* heap) { - HeapObjectIterator it(heap->code_space()); - - for (Object* object = it.Next(); object != NULL; object = it.Next()) { - VerifyNativeContextSeparationVisitor visitor; - Code::cast(object)->CodeIterateBody(&visitor); - } -} -#endif - - -void MarkCompactCollector::SetUp() { - free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space())); - free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space())); -} - - -void MarkCompactCollector::TearDown() { - AbortCompaction(); -} - - -void MarkCompactCollector::AddEvacuationCandidate(Page* p) { - p->MarkEvacuationCandidate(); - evacuation_candidates_.Add(p); -} - - -static void TraceFragmentation(PagedSpace* space) { - int number_of_pages = space->CountTotalPages(); - intptr_t reserved = (number_of_pages * space->AreaSize()); - intptr_t free = reserved - space->SizeOfObjects(); - PrintF("[%s]: %d pages, %d (%.1f%%) free\n", - AllocationSpaceName(space->identity()), - number_of_pages, - static_cast<int>(free), - static_cast<double>(free) * 100 / reserved); -} - - -bool MarkCompactCollector::StartCompaction(CompactionMode mode) { - if (!compacting_) { - ASSERT(evacuation_candidates_.length() == 0); - -#ifdef ENABLE_GDB_JIT_INTERFACE - // If GDBJIT interface is active disable compaction. - if (FLAG_gdbjit) return false; -#endif - - CollectEvacuationCandidates(heap()->old_pointer_space()); - CollectEvacuationCandidates(heap()->old_data_space()); - - if (FLAG_compact_code_space && - (mode == NON_INCREMENTAL_COMPACTION || - FLAG_incremental_code_compaction)) { - CollectEvacuationCandidates(heap()->code_space()); - } else if (FLAG_trace_fragmentation) { - TraceFragmentation(heap()->code_space()); - } - - if (FLAG_trace_fragmentation) { - TraceFragmentation(heap()->map_space()); - TraceFragmentation(heap()->cell_space()); - TraceFragmentation(heap()->property_cell_space()); - } - - heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); - heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); - heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); - - compacting_ = evacuation_candidates_.length() > 0; - } - - return compacting_; -} - - -void MarkCompactCollector::CollectGarbage() { - // Make sure that Prepare() has been called. The individual steps below will - // update the state as they proceed. - ASSERT(state_ == PREPARE_GC); - ASSERT(encountered_weak_collections_ == Smi::FromInt(0)); - - MarkLiveObjects(); - ASSERT(heap_->incremental_marking()->IsStopped()); - - if (FLAG_collect_maps) ClearNonLiveReferences(); - - ClearWeakCollections(); - -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - VerifyMarking(heap_); - } -#endif - - SweepSpaces(); - - if (!FLAG_collect_maps) ReattachInitialMaps(); - -#ifdef DEBUG - if (FLAG_verify_native_context_separation) { - VerifyNativeContextSeparation(heap_); - } -#endif - -#ifdef VERIFY_HEAP - if (heap()->weak_embedded_objects_verification_enabled()) { - VerifyWeakEmbeddedObjectsInOptimizedCode(); - } - if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) { - VerifyOmittedMapChecks(); - } -#endif - - Finish(); - - if (marking_parity_ == EVEN_MARKING_PARITY) { - marking_parity_ = ODD_MARKING_PARITY; - } else { - ASSERT(marking_parity_ == ODD_MARKING_PARITY); - marking_parity_ = EVEN_MARKING_PARITY; - } - - tracer_ = NULL; -} - - -#ifdef VERIFY_HEAP -void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { - PageIterator it(space); - - while (it.has_next()) { - Page* p = it.next(); - CHECK(p->markbits()->IsClean()); - CHECK_EQ(0, p->LiveBytes()); - } -} - - -void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { - NewSpacePageIterator it(space->bottom(), space->top()); - - while (it.has_next()) { - NewSpacePage* p = it.next(); - CHECK(p->markbits()->IsClean()); - CHECK_EQ(0, p->LiveBytes()); - } -} - - -void MarkCompactCollector::VerifyMarkbitsAreClean() { - VerifyMarkbitsAreClean(heap_->old_pointer_space()); - VerifyMarkbitsAreClean(heap_->old_data_space()); - VerifyMarkbitsAreClean(heap_->code_space()); - VerifyMarkbitsAreClean(heap_->cell_space()); - VerifyMarkbitsAreClean(heap_->property_cell_space()); - VerifyMarkbitsAreClean(heap_->map_space()); - VerifyMarkbitsAreClean(heap_->new_space()); - - LargeObjectIterator it(heap_->lo_space()); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { - MarkBit mark_bit = Marking::MarkBitFrom(obj); - CHECK(Marking::IsWhite(mark_bit)); - CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); - } -} - - -void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() { - HeapObjectIterator code_iterator(heap()->code_space()); - for (HeapObject* obj = code_iterator.Next(); - obj != NULL; - obj = code_iterator.Next()) { - Code* code = Code::cast(obj); - if (code->kind() != Code::OPTIMIZED_FUNCTION) continue; - if (WillBeDeoptimized(code)) continue; - code->VerifyEmbeddedObjectsDependency(); - } -} - - -void MarkCompactCollector::VerifyOmittedMapChecks() { - HeapObjectIterator iterator(heap()->map_space()); - for (HeapObject* obj = iterator.Next(); - obj != NULL; - obj = iterator.Next()) { - Map* map = Map::cast(obj); - map->VerifyOmittedMapChecks(); - } -} -#endif // VERIFY_HEAP - - -static void ClearMarkbitsInPagedSpace(PagedSpace* space) { - PageIterator it(space); - - while (it.has_next()) { - Bitmap::Clear(it.next()); - } -} - - -static void ClearMarkbitsInNewSpace(NewSpace* space) { - NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); - - while (it.has_next()) { - Bitmap::Clear(it.next()); - } -} - - -void MarkCompactCollector::ClearMarkbits() { - ClearMarkbitsInPagedSpace(heap_->code_space()); - ClearMarkbitsInPagedSpace(heap_->map_space()); - ClearMarkbitsInPagedSpace(heap_->old_pointer_space()); - ClearMarkbitsInPagedSpace(heap_->old_data_space()); - ClearMarkbitsInPagedSpace(heap_->cell_space()); - ClearMarkbitsInPagedSpace(heap_->property_cell_space()); - ClearMarkbitsInNewSpace(heap_->new_space()); - - LargeObjectIterator it(heap_->lo_space()); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { - MarkBit mark_bit = Marking::MarkBitFrom(obj); - mark_bit.Clear(); - mark_bit.Next().Clear(); - Page::FromAddress(obj->address())->ResetProgressBar(); - Page::FromAddress(obj->address())->ResetLiveBytes(); - } -} - - -class MarkCompactCollector::SweeperTask : public v8::Task { - public: - SweeperTask(Heap* heap, PagedSpace* space) - : heap_(heap), space_(space) {} - - virtual ~SweeperTask() {} - - private: - // v8::Task overrides. - virtual void Run() V8_OVERRIDE { - heap_->mark_compact_collector()->SweepInParallel(space_); - heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal(); - } - - Heap* heap_; - PagedSpace* space_; - - DISALLOW_COPY_AND_ASSIGN(SweeperTask); -}; - - -void MarkCompactCollector::StartSweeperThreads() { - // TODO(hpayer): This check is just used for debugging purpose and - // should be removed or turned into an assert after investigating the - // crash in concurrent sweeping. - CHECK(free_list_old_pointer_space_.get()->IsEmpty()); - CHECK(free_list_old_data_space_.get()->IsEmpty()); - sweeping_pending_ = true; - for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { - isolate()->sweeper_threads()[i]->StartSweeping(); - } - if (FLAG_job_based_sweeping) { - V8::GetCurrentPlatform()->CallOnBackgroundThread( - new SweeperTask(heap(), heap()->old_data_space()), - v8::Platform::kShortRunningTask); - V8::GetCurrentPlatform()->CallOnBackgroundThread( - new SweeperTask(heap(), heap()->old_pointer_space()), - v8::Platform::kShortRunningTask); - } -} - - -void MarkCompactCollector::WaitUntilSweepingCompleted() { - ASSERT(sweeping_pending_ == true); - for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { - isolate()->sweeper_threads()[i]->WaitForSweeperThread(); - } - if (FLAG_job_based_sweeping) { - // Wait twice for both jobs. - pending_sweeper_jobs_semaphore_.Wait(); - pending_sweeper_jobs_semaphore_.Wait(); - } - ParallelSweepSpacesComplete(); - sweeping_pending_ = false; - RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE)); - RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE)); - heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); - heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); -} - - -intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) { - FreeList* free_list; - - if (space == heap()->old_pointer_space()) { - free_list = free_list_old_pointer_space_.get(); - } else if (space == heap()->old_data_space()) { - free_list = free_list_old_data_space_.get(); - } else { - // Any PagedSpace might invoke RefillFreeLists, so we need to make sure - // to only refill them for old data and pointer spaces. - return 0; - } - - intptr_t freed_bytes = space->free_list()->Concatenate(free_list); - space->AddToAccountingStats(freed_bytes); - space->DecrementUnsweptFreeBytes(freed_bytes); - return freed_bytes; -} - - -bool MarkCompactCollector::AreSweeperThreadsActivated() { - return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping; -} - - -bool MarkCompactCollector::IsConcurrentSweepingInProgress() { - return sweeping_pending_; -} - - -void Marking::TransferMark(Address old_start, Address new_start) { - // This is only used when resizing an object. - ASSERT(MemoryChunk::FromAddress(old_start) == - MemoryChunk::FromAddress(new_start)); - - if (!heap_->incremental_marking()->IsMarking()) return; - - // If the mark doesn't move, we don't check the color of the object. - // It doesn't matter whether the object is black, since it hasn't changed - // size, so the adjustment to the live data count will be zero anyway. - if (old_start == new_start) return; - - MarkBit new_mark_bit = MarkBitFrom(new_start); - MarkBit old_mark_bit = MarkBitFrom(old_start); - -#ifdef DEBUG - ObjectColor old_color = Color(old_mark_bit); -#endif - - if (Marking::IsBlack(old_mark_bit)) { - old_mark_bit.Clear(); - ASSERT(IsWhite(old_mark_bit)); - Marking::MarkBlack(new_mark_bit); - return; - } else if (Marking::IsGrey(old_mark_bit)) { - old_mark_bit.Clear(); - old_mark_bit.Next().Clear(); - ASSERT(IsWhite(old_mark_bit)); - heap_->incremental_marking()->WhiteToGreyAndPush( - HeapObject::FromAddress(new_start), new_mark_bit); - heap_->incremental_marking()->RestartIfNotMarking(); - } - -#ifdef DEBUG - ObjectColor new_color = Color(new_mark_bit); - ASSERT(new_color == old_color); -#endif -} - - -const char* AllocationSpaceName(AllocationSpace space) { - switch (space) { - case NEW_SPACE: return "NEW_SPACE"; - case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE"; - case OLD_DATA_SPACE: return "OLD_DATA_SPACE"; - case CODE_SPACE: return "CODE_SPACE"; - case MAP_SPACE: return "MAP_SPACE"; - case CELL_SPACE: return "CELL_SPACE"; - case PROPERTY_CELL_SPACE: - return "PROPERTY_CELL_SPACE"; - case LO_SPACE: return "LO_SPACE"; - default: - UNREACHABLE(); - } - - return NULL; -} - - -// Returns zero for pages that have so little fragmentation that it is not -// worth defragmenting them. Otherwise a positive integer that gives an -// estimate of fragmentation on an arbitrary scale. -static int FreeListFragmentation(PagedSpace* space, Page* p) { - // If page was not swept then there are no free list items on it. - if (!p->WasSwept()) { - if (FLAG_trace_fragmentation) { - PrintF("%p [%s]: %d bytes live (unswept)\n", - reinterpret_cast<void*>(p), - AllocationSpaceName(space->identity()), - p->LiveBytes()); - } - return 0; - } - - PagedSpace::SizeStats sizes; - space->ObtainFreeListStatistics(p, &sizes); - - intptr_t ratio; - intptr_t ratio_threshold; - intptr_t area_size = space->AreaSize(); - if (space->identity() == CODE_SPACE) { - ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / - area_size; - ratio_threshold = 10; - } else { - ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / - area_size; - ratio_threshold = 15; - } - - if (FLAG_trace_fragmentation) { - PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", - reinterpret_cast<void*>(p), - AllocationSpaceName(space->identity()), - static_cast<int>(sizes.small_size_), - static_cast<double>(sizes.small_size_ * 100) / - area_size, - static_cast<int>(sizes.medium_size_), - static_cast<double>(sizes.medium_size_ * 100) / - area_size, - static_cast<int>(sizes.large_size_), - static_cast<double>(sizes.large_size_ * 100) / - area_size, - static_cast<int>(sizes.huge_size_), - static_cast<double>(sizes.huge_size_ * 100) / - area_size, - (ratio > ratio_threshold) ? "[fragmented]" : ""); - } - - if (FLAG_always_compact && sizes.Total() != area_size) { - return 1; - } - - if (ratio <= ratio_threshold) return 0; // Not fragmented. - - return static_cast<int>(ratio - ratio_threshold); -} - - -void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { - ASSERT(space->identity() == OLD_POINTER_SPACE || - space->identity() == OLD_DATA_SPACE || - space->identity() == CODE_SPACE); - - static const int kMaxMaxEvacuationCandidates = 1000; - int number_of_pages = space->CountTotalPages(); - int max_evacuation_candidates = - static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1); - - if (FLAG_stress_compaction || FLAG_always_compact) { - max_evacuation_candidates = kMaxMaxEvacuationCandidates; - } - - class Candidate { - public: - Candidate() : fragmentation_(0), page_(NULL) { } - Candidate(int f, Page* p) : fragmentation_(f), page_(p) { } - - int fragmentation() { return fragmentation_; } - Page* page() { return page_; } - - private: - int fragmentation_; - Page* page_; - }; - - enum CompactionMode { - COMPACT_FREE_LISTS, - REDUCE_MEMORY_FOOTPRINT - }; - - CompactionMode mode = COMPACT_FREE_LISTS; - - intptr_t reserved = number_of_pages * space->AreaSize(); - intptr_t over_reserved = reserved - space->SizeOfObjects(); - static const intptr_t kFreenessThreshold = 50; - - if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) { - // If reduction of memory footprint was requested, we are aggressive - // about choosing pages to free. We expect that half-empty pages - // are easier to compact so slightly bump the limit. - mode = REDUCE_MEMORY_FOOTPRINT; - max_evacuation_candidates += 2; - } - - - if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) { - // If over-usage is very high (more than a third of the space), we - // try to free all mostly empty pages. We expect that almost empty - // pages are even easier to compact so bump the limit even more. - mode = REDUCE_MEMORY_FOOTPRINT; - max_evacuation_candidates *= 2; - } - - if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { - PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), " - "evacuation candidate limit: %d\n", - static_cast<double>(over_reserved) / MB, - static_cast<double>(reserved) / MB, - static_cast<int>(kFreenessThreshold), - max_evacuation_candidates); - } - - intptr_t estimated_release = 0; - - Candidate candidates[kMaxMaxEvacuationCandidates]; - - max_evacuation_candidates = - Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates); - - int count = 0; - int fragmentation = 0; - Candidate* least = NULL; - - PageIterator it(space); - if (it.has_next()) it.next(); // Never compact the first page. - - while (it.has_next()) { - Page* p = it.next(); - p->ClearEvacuationCandidate(); - - if (FLAG_stress_compaction) { - unsigned int counter = space->heap()->ms_count(); - uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; - if ((counter & 1) == (page_number & 1)) fragmentation = 1; - } else if (mode == REDUCE_MEMORY_FOOTPRINT) { - // Don't try to release too many pages. - if (estimated_release >= over_reserved) { - continue; - } - - intptr_t free_bytes = 0; - - if (!p->WasSwept()) { - free_bytes = (p->area_size() - p->LiveBytes()); - } else { - PagedSpace::SizeStats sizes; - space->ObtainFreeListStatistics(p, &sizes); - free_bytes = sizes.Total(); - } - - int free_pct = static_cast<int>(free_bytes * 100) / p->area_size(); - - if (free_pct >= kFreenessThreshold) { - estimated_release += free_bytes; - fragmentation = free_pct; - } else { - fragmentation = 0; - } - - if (FLAG_trace_fragmentation) { - PrintF("%p [%s]: %d (%.2f%%) free %s\n", - reinterpret_cast<void*>(p), - AllocationSpaceName(space->identity()), - static_cast<int>(free_bytes), - static_cast<double>(free_bytes * 100) / p->area_size(), - (fragmentation > 0) ? "[fragmented]" : ""); - } - } else { - fragmentation = FreeListFragmentation(space, p); - } - - if (fragmentation != 0) { - if (count < max_evacuation_candidates) { - candidates[count++] = Candidate(fragmentation, p); - } else { - if (least == NULL) { - for (int i = 0; i < max_evacuation_candidates; i++) { - if (least == NULL || - candidates[i].fragmentation() < least->fragmentation()) { - least = candidates + i; - } - } - } - if (least->fragmentation() < fragmentation) { - *least = Candidate(fragmentation, p); - least = NULL; - } - } - } - } - - for (int i = 0; i < count; i++) { - AddEvacuationCandidate(candidates[i].page()); - } - - if (count > 0 && FLAG_trace_fragmentation) { - PrintF("Collected %d evacuation candidates for space %s\n", - count, - AllocationSpaceName(space->identity())); - } -} - - -void MarkCompactCollector::AbortCompaction() { - if (compacting_) { - int npages = evacuation_candidates_.length(); - for (int i = 0; i < npages; i++) { - Page* p = evacuation_candidates_[i]; - slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); - p->ClearEvacuationCandidate(); - p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); - } - compacting_ = false; - evacuation_candidates_.Rewind(0); - invalidated_code_.Rewind(0); - } - ASSERT_EQ(0, evacuation_candidates_.length()); -} - - -void MarkCompactCollector::Prepare(GCTracer* tracer) { - was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); - - // Rather than passing the tracer around we stash it in a static member - // variable. - tracer_ = tracer; - -#ifdef DEBUG - ASSERT(state_ == IDLE); - state_ = PREPARE_GC; -#endif - - ASSERT(!FLAG_never_compact || !FLAG_always_compact); - - if (IsConcurrentSweepingInProgress()) { - // Instead of waiting we could also abort the sweeper threads here. - WaitUntilSweepingCompleted(); - } - - // Clear marking bits if incremental marking is aborted. - if (was_marked_incrementally_ && abort_incremental_marking_) { - heap()->incremental_marking()->Abort(); - ClearMarkbits(); - AbortCompaction(); - was_marked_incrementally_ = false; - } - - // Don't start compaction if we are in the middle of incremental - // marking cycle. We did not collect any slots. - if (!FLAG_never_compact && !was_marked_incrementally_) { - StartCompaction(NON_INCREMENTAL_COMPACTION); - } - - PagedSpaces spaces(heap()); - for (PagedSpace* space = spaces.next(); - space != NULL; - space = spaces.next()) { - space->PrepareForMarkCompact(); - } - -#ifdef VERIFY_HEAP - if (!was_marked_incrementally_ && FLAG_verify_heap) { - VerifyMarkbitsAreClean(); - } -#endif -} - - -void MarkCompactCollector::Finish() { -#ifdef DEBUG - ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); - state_ = IDLE; -#endif - // The stub cache is not traversed during GC; clear the cache to - // force lazy re-initialization of it. This must be done after the - // GC, because it relies on the new address of certain old space - // objects (empty string, illegal builtin). - isolate()->stub_cache()->Clear(); - - if (have_code_to_deoptimize_) { - // Some code objects were marked for deoptimization during the GC. - Deoptimizer::DeoptimizeMarkedCode(isolate()); - have_code_to_deoptimize_ = false; - } -} - - -// ------------------------------------------------------------------------- -// Phase 1: tracing and marking live objects. -// before: all objects are in normal state. -// after: a live object's map pointer is marked as '00'. - -// Marking all live objects in the heap as part of mark-sweep or mark-compact -// collection. Before marking, all objects are in their normal state. After -// marking, live objects' map pointers are marked indicating that the object -// has been found reachable. -// -// The marking algorithm is a (mostly) depth-first (because of possible stack -// overflow) traversal of the graph of objects reachable from the roots. It -// uses an explicit stack of pointers rather than recursion. The young -// generation's inactive ('from') space is used as a marking stack. The -// objects in the marking stack are the ones that have been reached and marked -// but their children have not yet been visited. -// -// The marking stack can overflow during traversal. In that case, we set an -// overflow flag. When the overflow flag is set, we continue marking objects -// reachable from the objects on the marking stack, but no longer push them on -// the marking stack. Instead, we mark them as both marked and overflowed. -// When the stack is in the overflowed state, objects marked as overflowed -// have been reached and marked but their children have not been visited yet. -// After emptying the marking stack, we clear the overflow flag and traverse -// the heap looking for objects marked as overflowed, push them on the stack, -// and continue with marking. This process repeats until all reachable -// objects have been marked. - -void CodeFlusher::ProcessJSFunctionCandidates() { - Code* lazy_compile = - isolate_->builtins()->builtin(Builtins::kCompileUnoptimized); - Object* undefined = isolate_->heap()->undefined_value(); - - JSFunction* candidate = jsfunction_candidates_head_; - JSFunction* next_candidate; - while (candidate != NULL) { - next_candidate = GetNextCandidate(candidate); - ClearNextCandidate(candidate, undefined); - - SharedFunctionInfo* shared = candidate->shared(); - - Code* code = shared->code(); - MarkBit code_mark = Marking::MarkBitFrom(code); - if (!code_mark.Get()) { - if (FLAG_trace_code_flushing && shared->is_compiled()) { - PrintF("[code-flushing clears: "); - shared->ShortPrint(); - PrintF(" - age: %d]\n", code->GetAge()); - } - shared->set_code(lazy_compile); - candidate->set_code(lazy_compile); - } else { - candidate->set_code(code); - } - - // We are in the middle of a GC cycle so the write barrier in the code - // setter did not record the slot update and we have to do that manually. - Address slot = candidate->address() + JSFunction::kCodeEntryOffset; - Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot)); - isolate_->heap()->mark_compact_collector()-> - RecordCodeEntrySlot(slot, target); - - Object** shared_code_slot = - HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset); - isolate_->heap()->mark_compact_collector()-> - RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot); - - candidate = next_candidate; - } - - jsfunction_candidates_head_ = NULL; -} - - -void CodeFlusher::ProcessSharedFunctionInfoCandidates() { - Code* lazy_compile = - isolate_->builtins()->builtin(Builtins::kCompileUnoptimized); - - SharedFunctionInfo* candidate = shared_function_info_candidates_head_; - SharedFunctionInfo* next_candidate; - while (candidate != NULL) { - next_candidate = GetNextCandidate(candidate); - ClearNextCandidate(candidate); - - Code* code = candidate->code(); - MarkBit code_mark = Marking::MarkBitFrom(code); - if (!code_mark.Get()) { - if (FLAG_trace_code_flushing && candidate->is_compiled()) { - PrintF("[code-flushing clears: "); - candidate->ShortPrint(); - PrintF(" - age: %d]\n", code->GetAge()); - } - candidate->set_code(lazy_compile); - } - - Object** code_slot = - HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset); - isolate_->heap()->mark_compact_collector()-> - RecordSlot(code_slot, code_slot, *code_slot); - - candidate = next_candidate; - } - - shared_function_info_candidates_head_ = NULL; -} - - -void CodeFlusher::ProcessOptimizedCodeMaps() { - STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4); - - SharedFunctionInfo* holder = optimized_code_map_holder_head_; - SharedFunctionInfo* next_holder; - - while (holder != NULL) { - next_holder = GetNextCodeMap(holder); - ClearNextCodeMap(holder); - - FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); - int new_length = SharedFunctionInfo::kEntriesStart; - int old_length = code_map->length(); - for (int i = SharedFunctionInfo::kEntriesStart; - i < old_length; - i += SharedFunctionInfo::kEntryLength) { - Code* code = - Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset)); - if (!Marking::MarkBitFrom(code).Get()) continue; - - // Move every slot in the entry. - for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) { - int dst_index = new_length++; - Object** slot = code_map->RawFieldOfElementAt(dst_index); - Object* object = code_map->get(i + j); - code_map->set(dst_index, object); - if (j == SharedFunctionInfo::kOsrAstIdOffset) { - ASSERT(object->IsSmi()); - } else { - ASSERT(Marking::IsBlack( - Marking::MarkBitFrom(HeapObject::cast(*slot)))); - isolate_->heap()->mark_compact_collector()-> - RecordSlot(slot, slot, *slot); - } - } - } - - // Trim the optimized code map if entries have been removed. - if (new_length < old_length) { - holder->TrimOptimizedCodeMap(old_length - new_length); - } - - holder = next_holder; - } - - optimized_code_map_holder_head_ = NULL; -} - - -void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) { - // Make sure previous flushing decisions are revisited. - isolate_->heap()->incremental_marking()->RecordWrites(shared_info); - - if (FLAG_trace_code_flushing) { - PrintF("[code-flushing abandons function-info: "); - shared_info->ShortPrint(); - PrintF("]\n"); - } - - SharedFunctionInfo* candidate = shared_function_info_candidates_head_; - SharedFunctionInfo* next_candidate; - if (candidate == shared_info) { - next_candidate = GetNextCandidate(shared_info); - shared_function_info_candidates_head_ = next_candidate; - ClearNextCandidate(shared_info); - } else { - while (candidate != NULL) { - next_candidate = GetNextCandidate(candidate); - - if (next_candidate == shared_info) { - next_candidate = GetNextCandidate(shared_info); - SetNextCandidate(candidate, next_candidate); - ClearNextCandidate(shared_info); - break; - } - - candidate = next_candidate; - } - } -} - - -void CodeFlusher::EvictCandidate(JSFunction* function) { - ASSERT(!function->next_function_link()->IsUndefined()); - Object* undefined = isolate_->heap()->undefined_value(); - - // Make sure previous flushing decisions are revisited. - isolate_->heap()->incremental_marking()->RecordWrites(function); - isolate_->heap()->incremental_marking()->RecordWrites(function->shared()); - - if (FLAG_trace_code_flushing) { - PrintF("[code-flushing abandons closure: "); - function->shared()->ShortPrint(); - PrintF("]\n"); - } - - JSFunction* candidate = jsfunction_candidates_head_; - JSFunction* next_candidate; - if (candidate == function) { - next_candidate = GetNextCandidate(function); - jsfunction_candidates_head_ = next_candidate; - ClearNextCandidate(function, undefined); - } else { - while (candidate != NULL) { - next_candidate = GetNextCandidate(candidate); - - if (next_candidate == function) { - next_candidate = GetNextCandidate(function); - SetNextCandidate(candidate, next_candidate); - ClearNextCandidate(function, undefined); - break; - } - - candidate = next_candidate; - } - } -} - - -void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) { - ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())-> - get(SharedFunctionInfo::kNextMapIndex)->IsUndefined()); - - // Make sure previous flushing decisions are revisited. - isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder); - - if (FLAG_trace_code_flushing) { - PrintF("[code-flushing abandons code-map: "); - code_map_holder->ShortPrint(); - PrintF("]\n"); - } - - SharedFunctionInfo* holder = optimized_code_map_holder_head_; - SharedFunctionInfo* next_holder; - if (holder == code_map_holder) { - next_holder = GetNextCodeMap(code_map_holder); - optimized_code_map_holder_head_ = next_holder; - ClearNextCodeMap(code_map_holder); - } else { - while (holder != NULL) { - next_holder = GetNextCodeMap(holder); - - if (next_holder == code_map_holder) { - next_holder = GetNextCodeMap(code_map_holder); - SetNextCodeMap(holder, next_holder); - ClearNextCodeMap(code_map_holder); - break; - } - - holder = next_holder; - } - } -} - - -void CodeFlusher::EvictJSFunctionCandidates() { - JSFunction* candidate = jsfunction_candidates_head_; - JSFunction* next_candidate; - while (candidate != NULL) { - next_candidate = GetNextCandidate(candidate); - EvictCandidate(candidate); - candidate = next_candidate; - } - ASSERT(jsfunction_candidates_head_ == NULL); -} - - -void CodeFlusher::EvictSharedFunctionInfoCandidates() { - SharedFunctionInfo* candidate = shared_function_info_candidates_head_; - SharedFunctionInfo* next_candidate; - while (candidate != NULL) { - next_candidate = GetNextCandidate(candidate); - EvictCandidate(candidate); - candidate = next_candidate; - } - ASSERT(shared_function_info_candidates_head_ == NULL); -} - - -void CodeFlusher::EvictOptimizedCodeMaps() { - SharedFunctionInfo* holder = optimized_code_map_holder_head_; - SharedFunctionInfo* next_holder; - while (holder != NULL) { - next_holder = GetNextCodeMap(holder); - EvictOptimizedCodeMap(holder); - holder = next_holder; - } - ASSERT(optimized_code_map_holder_head_ == NULL); -} - - -void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) { - Heap* heap = isolate_->heap(); - - JSFunction** slot = &jsfunction_candidates_head_; - JSFunction* candidate = jsfunction_candidates_head_; - while (candidate != NULL) { - if (heap->InFromSpace(candidate)) { - v->VisitPointer(reinterpret_cast<Object**>(slot)); - } - candidate = GetNextCandidate(*slot); - slot = GetNextCandidateSlot(*slot); - } -} - - -MarkCompactCollector::~MarkCompactCollector() { - if (code_flusher_ != NULL) { - delete code_flusher_; - code_flusher_ = NULL; - } -} - - -static inline HeapObject* ShortCircuitConsString(Object** p) { - // Optimization: If the heap object pointed to by p is a non-internalized - // cons string whose right substring is HEAP->empty_string, update - // it in place to its left substring. Return the updated value. - // - // Here we assume that if we change *p, we replace it with a heap object - // (i.e., the left substring of a cons string is always a heap object). - // - // The check performed is: - // object->IsConsString() && !object->IsInternalizedString() && - // (ConsString::cast(object)->second() == HEAP->empty_string()) - // except the maps for the object and its possible substrings might be - // marked. - HeapObject* object = HeapObject::cast(*p); - if (!FLAG_clever_optimizations) return object; - Map* map = object->map(); - InstanceType type = map->instance_type(); - if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; - - Object* second = reinterpret_cast<ConsString*>(object)->second(); - Heap* heap = map->GetHeap(); - if (second != heap->empty_string()) { - return object; - } - - // Since we don't have the object's start, it is impossible to update the - // page dirty marks. Therefore, we only replace the string with its left - // substring when page dirty marks do not change. - Object* first = reinterpret_cast<ConsString*>(object)->first(); - if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object; - - *p = first; - return HeapObject::cast(first); -} - - -class MarkCompactMarkingVisitor - : public StaticMarkingVisitor<MarkCompactMarkingVisitor> { - public: - static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, - Map* map, HeapObject* obj); - - static void ObjectStatsCountFixedArray( - FixedArrayBase* fixed_array, - FixedArraySubInstanceType fast_type, - FixedArraySubInstanceType dictionary_type); - - template<MarkCompactMarkingVisitor::VisitorId id> - class ObjectStatsTracker { - public: - static inline void Visit(Map* map, HeapObject* obj); - }; - - static void Initialize(); - - INLINE(static void VisitPointer(Heap* heap, Object** p)) { - MarkObjectByPointer(heap->mark_compact_collector(), p, p); - } - - INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { - // Mark all objects pointed to in [start, end). - const int kMinRangeForMarkingRecursion = 64; - if (end - start >= kMinRangeForMarkingRecursion) { - if (VisitUnmarkedObjects(heap, start, end)) return; - // We are close to a stack overflow, so just mark the objects. - } - MarkCompactCollector* collector = heap->mark_compact_collector(); - for (Object** p = start; p < end; p++) { - MarkObjectByPointer(collector, start, p); - } - } - - // Marks the object black and pushes it on the marking stack. - INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { - MarkBit mark = Marking::MarkBitFrom(object); - heap->mark_compact_collector()->MarkObject(object, mark); - } - - // Marks the object black without pushing it on the marking stack. - // Returns true if object needed marking and false otherwise. - INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) { - MarkBit mark_bit = Marking::MarkBitFrom(object); - if (!mark_bit.Get()) { - heap->mark_compact_collector()->SetMark(object, mark_bit); - return true; - } - return false; - } - - // Mark object pointed to by p. - INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, - Object** anchor_slot, - Object** p)) { - if (!(*p)->IsHeapObject()) return; - HeapObject* object = ShortCircuitConsString(p); - collector->RecordSlot(anchor_slot, p, object); - MarkBit mark = Marking::MarkBitFrom(object); - collector->MarkObject(object, mark); - } - - - // Visit an unmarked object. - INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, - HeapObject* obj)) { -#ifdef DEBUG - ASSERT(collector->heap()->Contains(obj)); - ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj)); -#endif - Map* map = obj->map(); - Heap* heap = obj->GetHeap(); - MarkBit mark = Marking::MarkBitFrom(obj); - heap->mark_compact_collector()->SetMark(obj, mark); - // Mark the map pointer and the body. - MarkBit map_mark = Marking::MarkBitFrom(map); - heap->mark_compact_collector()->MarkObject(map, map_mark); - IterateBody(map, obj); - } - - // Visit all unmarked objects pointed to by [start, end). - // Returns false if the operation fails (lack of stack space). - INLINE(static bool VisitUnmarkedObjects(Heap* heap, - Object** start, - Object** end)) { - // Return false is we are close to the stack limit. - StackLimitCheck check(heap->isolate()); - if (check.HasOverflowed()) return false; - - MarkCompactCollector* collector = heap->mark_compact_collector(); - // Visit the unmarked objects. - for (Object** p = start; p < end; p++) { - Object* o = *p; - if (!o->IsHeapObject()) continue; - collector->RecordSlot(start, p, o); - HeapObject* obj = HeapObject::cast(o); - MarkBit mark = Marking::MarkBitFrom(obj); - if (mark.Get()) continue; - VisitUnmarkedObject(collector, obj); - } - return true; - } - - INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) { - SharedFunctionInfo* shared = SharedFunctionInfo::cast(object); - shared->BeforeVisitingPointers(); - } - - static void VisitWeakCollection(Map* map, HeapObject* object) { - MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); - JSWeakCollection* weak_collection = - reinterpret_cast<JSWeakCollection*>(object); - - // Enqueue weak map in linked list of encountered weak maps. - if (weak_collection->next() == Smi::FromInt(0)) { - weak_collection->set_next(collector->encountered_weak_collections()); - collector->set_encountered_weak_collections(weak_collection); - } - - // Skip visiting the backing hash table containing the mappings. - int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object); - BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers( - map->GetHeap(), - object, - JSWeakCollection::BodyDescriptor::kStartOffset, - JSWeakCollection::kTableOffset); - BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers( - map->GetHeap(), - object, - JSWeakCollection::kTableOffset + kPointerSize, - object_size); - - // Mark the backing hash table without pushing it on the marking stack. - Object* table_object = weak_collection->table(); - if (!table_object->IsHashTable()) return; - WeakHashTable* table = WeakHashTable::cast(table_object); - Object** table_slot = - HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset); - MarkBit table_mark = Marking::MarkBitFrom(table); - collector->RecordSlot(table_slot, table_slot, table); - if (!table_mark.Get()) collector->SetMark(table, table_mark); - // Recording the map slot can be skipped, because maps are not compacted. - collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map())); - ASSERT(MarkCompactCollector::IsMarked(table->map())); - } - - private: - template<int id> - static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj); - - // Code flushing support. - - static const int kRegExpCodeThreshold = 5; - - static void UpdateRegExpCodeAgeAndFlush(Heap* heap, - JSRegExp* re, - bool is_ascii) { - // Make sure that the fixed array is in fact initialized on the RegExp. - // We could potentially trigger a GC when initializing the RegExp. - if (HeapObject::cast(re->data())->map()->instance_type() != - FIXED_ARRAY_TYPE) return; - - // Make sure this is a RegExp that actually contains code. - if (re->TypeTag() != JSRegExp::IRREGEXP) return; - - Object* code = re->DataAt(JSRegExp::code_index(is_ascii)); - if (!code->IsSmi() && - HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) { - // Save a copy that can be reinstated if we need the code again. - re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code); - - // Saving a copy might create a pointer into compaction candidate - // that was not observed by marker. This might happen if JSRegExp data - // was marked through the compilation cache before marker reached JSRegExp - // object. - FixedArray* data = FixedArray::cast(re->data()); - Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii); - heap->mark_compact_collector()-> - RecordSlot(slot, slot, code); - - // Set a number in the 0-255 range to guarantee no smi overflow. - re->SetDataAt(JSRegExp::code_index(is_ascii), - Smi::FromInt(heap->sweep_generation() & 0xff)); - } else if (code->IsSmi()) { - int value = Smi::cast(code)->value(); - // The regexp has not been compiled yet or there was a compilation error. - if (value == JSRegExp::kUninitializedValue || - value == JSRegExp::kCompilationErrorValue) { - return; - } - - // Check if we should flush now. - if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) { - re->SetDataAt(JSRegExp::code_index(is_ascii), - Smi::FromInt(JSRegExp::kUninitializedValue)); - re->SetDataAt(JSRegExp::saved_code_index(is_ascii), - Smi::FromInt(JSRegExp::kUninitializedValue)); - } - } - } - - - // Works by setting the current sweep_generation (as a smi) in the - // code object place in the data array of the RegExp and keeps a copy - // around that can be reinstated if we reuse the RegExp before flushing. - // If we did not use the code for kRegExpCodeThreshold mark sweep GCs - // we flush the code. - static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - MarkCompactCollector* collector = heap->mark_compact_collector(); - if (!collector->is_code_flushing_enabled()) { - VisitJSRegExp(map, object); - return; - } - JSRegExp* re = reinterpret_cast<JSRegExp*>(object); - // Flush code or set age on both ASCII and two byte code. - UpdateRegExpCodeAgeAndFlush(heap, re, true); - UpdateRegExpCodeAgeAndFlush(heap, re, false); - // Visit the fields of the RegExp, including the updated FixedArray. - VisitJSRegExp(map, object); - } - - static VisitorDispatchTable<Callback> non_count_table_; -}; - - -void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( - FixedArrayBase* fixed_array, - FixedArraySubInstanceType fast_type, - FixedArraySubInstanceType dictionary_type) { - Heap* heap = fixed_array->map()->GetHeap(); - if (fixed_array->map() != heap->fixed_cow_array_map() && - fixed_array->map() != heap->fixed_double_array_map() && - fixed_array != heap->empty_fixed_array()) { - if (fixed_array->IsDictionary()) { - heap->RecordFixedArraySubTypeStats(dictionary_type, - fixed_array->Size()); - } else { - heap->RecordFixedArraySubTypeStats(fast_type, - fixed_array->Size()); - } - } -} - - -void MarkCompactMarkingVisitor::ObjectStatsVisitBase( - MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) { - Heap* heap = map->GetHeap(); - int object_size = obj->Size(); - heap->RecordObjectStats(map->instance_type(), object_size); - non_count_table_.GetVisitorById(id)(map, obj); - if (obj->IsJSObject()) { - JSObject* object = JSObject::cast(obj); - ObjectStatsCountFixedArray(object->elements(), - DICTIONARY_ELEMENTS_SUB_TYPE, - FAST_ELEMENTS_SUB_TYPE); - ObjectStatsCountFixedArray(object->properties(), - DICTIONARY_PROPERTIES_SUB_TYPE, - FAST_PROPERTIES_SUB_TYPE); - } -} - - -template<MarkCompactMarkingVisitor::VisitorId id> -void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit( - Map* map, HeapObject* obj) { - ObjectStatsVisitBase(id, map, obj); -} - - -template<> -class MarkCompactMarkingVisitor::ObjectStatsTracker< - MarkCompactMarkingVisitor::kVisitMap> { - public: - static inline void Visit(Map* map, HeapObject* obj) { - Heap* heap = map->GetHeap(); - Map* map_obj = Map::cast(obj); - ASSERT(map->instance_type() == MAP_TYPE); - DescriptorArray* array = map_obj->instance_descriptors(); - if (map_obj->owns_descriptors() && - array != heap->empty_descriptor_array()) { - int fixed_array_size = array->Size(); - heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE, - fixed_array_size); - } - if (map_obj->HasTransitionArray()) { - int fixed_array_size = map_obj->transitions()->Size(); - heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE, - fixed_array_size); - } - if (map_obj->has_code_cache()) { - CodeCache* cache = CodeCache::cast(map_obj->code_cache()); - heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE, - cache->default_cache()->Size()); - if (!cache->normal_type_cache()->IsUndefined()) { - heap->RecordFixedArraySubTypeStats( - MAP_CODE_CACHE_SUB_TYPE, - FixedArray::cast(cache->normal_type_cache())->Size()); - } - } - ObjectStatsVisitBase(kVisitMap, map, obj); - } -}; - - -template<> -class MarkCompactMarkingVisitor::ObjectStatsTracker< - MarkCompactMarkingVisitor::kVisitCode> { - public: - static inline void Visit(Map* map, HeapObject* obj) { - Heap* heap = map->GetHeap(); - int object_size = obj->Size(); - ASSERT(map->instance_type() == CODE_TYPE); - Code* code_obj = Code::cast(obj); - heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(), - object_size); - ObjectStatsVisitBase(kVisitCode, map, obj); - } -}; - - -template<> -class MarkCompactMarkingVisitor::ObjectStatsTracker< - MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> { - public: - static inline void Visit(Map* map, HeapObject* obj) { - Heap* heap = map->GetHeap(); - SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj); - if (sfi->scope_info() != heap->empty_fixed_array()) { - heap->RecordFixedArraySubTypeStats( - SCOPE_INFO_SUB_TYPE, - FixedArray::cast(sfi->scope_info())->Size()); - } - ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj); - } -}; - - -template<> -class MarkCompactMarkingVisitor::ObjectStatsTracker< - MarkCompactMarkingVisitor::kVisitFixedArray> { - public: - static inline void Visit(Map* map, HeapObject* obj) { - Heap* heap = map->GetHeap(); - FixedArray* fixed_array = FixedArray::cast(obj); - if (fixed_array == heap->string_table()) { - heap->RecordFixedArraySubTypeStats( - STRING_TABLE_SUB_TYPE, - fixed_array->Size()); - } - ObjectStatsVisitBase(kVisitFixedArray, map, obj); - } -}; - - -void MarkCompactMarkingVisitor::Initialize() { - StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize(); - - table_.Register(kVisitJSRegExp, - &VisitRegExpAndFlushCode); - - if (FLAG_track_gc_object_stats) { - // Copy the visitor table to make call-through possible. - non_count_table_.CopyFrom(&table_); -#define VISITOR_ID_COUNT_FUNCTION(id) \ - table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit); - VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION) -#undef VISITOR_ID_COUNT_FUNCTION - } -} - - -VisitorDispatchTable<MarkCompactMarkingVisitor::Callback> - MarkCompactMarkingVisitor::non_count_table_; - - -class CodeMarkingVisitor : public ThreadVisitor { - public: - explicit CodeMarkingVisitor(MarkCompactCollector* collector) - : collector_(collector) {} - - void VisitThread(Isolate* isolate, ThreadLocalTop* top) { - collector_->PrepareThreadForCodeFlushing(isolate, top); - } - - private: - MarkCompactCollector* collector_; -}; - - -class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { - public: - explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) - : collector_(collector) {} - - void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) VisitPointer(p); - } - - void VisitPointer(Object** slot) { - Object* obj = *slot; - if (obj->IsSharedFunctionInfo()) { - SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); - MarkBit shared_mark = Marking::MarkBitFrom(shared); - MarkBit code_mark = Marking::MarkBitFrom(shared->code()); - collector_->MarkObject(shared->code(), code_mark); - collector_->MarkObject(shared, shared_mark); - } - } - - private: - MarkCompactCollector* collector_; -}; - - -void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, - ThreadLocalTop* top) { - for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { - // Note: for the frame that has a pending lazy deoptimization - // StackFrame::unchecked_code will return a non-optimized code object for - // the outermost function and StackFrame::LookupCode will return - // actual optimized code object. - StackFrame* frame = it.frame(); - Code* code = frame->unchecked_code(); - MarkBit code_mark = Marking::MarkBitFrom(code); - MarkObject(code, code_mark); - if (frame->is_optimized()) { - MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(), - frame->LookupCode()); - } - } -} - - -void MarkCompactCollector::PrepareForCodeFlushing() { - // Enable code flushing for non-incremental cycles. - if (FLAG_flush_code && !FLAG_flush_code_incrementally) { - EnableCodeFlushing(!was_marked_incrementally_); - } - - // If code flushing is disabled, there is no need to prepare for it. - if (!is_code_flushing_enabled()) return; - - // Ensure that empty descriptor array is marked. Method MarkDescriptorArray - // relies on it being marked before any other descriptor array. - HeapObject* descriptor_array = heap()->empty_descriptor_array(); - MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); - MarkObject(descriptor_array, descriptor_array_mark); - - // Make sure we are not referencing the code from the stack. - ASSERT(this == heap()->mark_compact_collector()); - PrepareThreadForCodeFlushing(heap()->isolate(), - heap()->isolate()->thread_local_top()); - - // Iterate the archived stacks in all threads to check if - // the code is referenced. - CodeMarkingVisitor code_marking_visitor(this); - heap()->isolate()->thread_manager()->IterateArchivedThreads( - &code_marking_visitor); - - SharedFunctionInfoMarkingVisitor visitor(this); - heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); - heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); - - ProcessMarkingDeque(); -} - - -// Visitor class for marking heap roots. -class RootMarkingVisitor : public ObjectVisitor { - public: - explicit RootMarkingVisitor(Heap* heap) - : collector_(heap->mark_compact_collector()) { } - - void VisitPointer(Object** p) { - MarkObjectByPointer(p); - } - - void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) MarkObjectByPointer(p); - } - - // Skip the weak next code link in a code object, which is visited in - // ProcessTopOptimizedFrame. - void VisitNextCodeLink(Object** p) { } - - private: - void MarkObjectByPointer(Object** p) { - if (!(*p)->IsHeapObject()) return; - - // Replace flat cons strings in place. - HeapObject* object = ShortCircuitConsString(p); - MarkBit mark_bit = Marking::MarkBitFrom(object); - if (mark_bit.Get()) return; - - Map* map = object->map(); - // Mark the object. - collector_->SetMark(object, mark_bit); - - // Mark the map pointer and body, and push them on the marking stack. - MarkBit map_mark = Marking::MarkBitFrom(map); - collector_->MarkObject(map, map_mark); - MarkCompactMarkingVisitor::IterateBody(map, object); - - // Mark all the objects reachable from the map and body. May leave - // overflowed objects in the heap. - collector_->EmptyMarkingDeque(); - } - - MarkCompactCollector* collector_; -}; - - -// Helper class for pruning the string table. -template<bool finalize_external_strings> -class StringTableCleaner : public ObjectVisitor { - public: - explicit StringTableCleaner(Heap* heap) - : heap_(heap), pointers_removed_(0) { } - - virtual void VisitPointers(Object** start, Object** end) { - // Visit all HeapObject pointers in [start, end). - for (Object** p = start; p < end; p++) { - Object* o = *p; - if (o->IsHeapObject() && - !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { - if (finalize_external_strings) { - ASSERT(o->IsExternalString()); - heap_->FinalizeExternalString(String::cast(*p)); - } else { - pointers_removed_++; - } - // Set the entry to the_hole_value (as deleted). - *p = heap_->the_hole_value(); - } - } - } - - int PointersRemoved() { - ASSERT(!finalize_external_strings); - return pointers_removed_; - } - - private: - Heap* heap_; - int pointers_removed_; -}; - - -typedef StringTableCleaner<false> InternalizedStringTableCleaner; -typedef StringTableCleaner<true> ExternalStringTableCleaner; - - -// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects -// are retained. -class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { - public: - virtual Object* RetainAs(Object* object) { - if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) { - return object; - } else if (object->IsAllocationSite() && - !(AllocationSite::cast(object)->IsZombie())) { - // "dead" AllocationSites need to live long enough for a traversal of new - // space. These sites get a one-time reprieve. - AllocationSite* site = AllocationSite::cast(object); - site->MarkZombie(); - site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site); - return object; - } else { - return NULL; - } - } -}; - - -// Fill the marking stack with overflowed objects returned by the given -// iterator. Stop when the marking stack is filled or the end of the space -// is reached, whichever comes first. -template<class T> -static void DiscoverGreyObjectsWithIterator(Heap* heap, - MarkingDeque* marking_deque, - T* it) { - // The caller should ensure that the marking stack is initially not full, - // so that we don't waste effort pointlessly scanning for objects. - ASSERT(!marking_deque->IsFull()); - - Map* filler_map = heap->one_pointer_filler_map(); - for (HeapObject* object = it->Next(); - object != NULL; - object = it->Next()) { - MarkBit markbit = Marking::MarkBitFrom(object); - if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { - Marking::GreyToBlack(markbit); - MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size()); - marking_deque->PushBlack(object); - if (marking_deque->IsFull()) return; - } - } -} - - -static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts); - - -static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, - MemoryChunk* p) { - ASSERT(!marking_deque->IsFull()); - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); - - for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { - Address cell_base = it.CurrentCellBase(); - MarkBit::CellType* cell = it.CurrentCell(); - - const MarkBit::CellType current_cell = *cell; - if (current_cell == 0) continue; - - MarkBit::CellType grey_objects; - if (it.HasNext()) { - const MarkBit::CellType next_cell = *(cell+1); - grey_objects = current_cell & - ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1))); - } else { - grey_objects = current_cell & (current_cell >> 1); - } - - int offset = 0; - while (grey_objects != 0) { - int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects); - grey_objects >>= trailing_zeros; - offset += trailing_zeros; - MarkBit markbit(cell, 1 << offset, false); - ASSERT(Marking::IsGrey(markbit)); - Marking::GreyToBlack(markbit); - Address addr = cell_base + offset * kPointerSize; - HeapObject* object = HeapObject::FromAddress(addr); - MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size()); - marking_deque->PushBlack(object); - if (marking_deque->IsFull()) return; - offset += 2; - grey_objects >>= 2; - } - - grey_objects >>= (Bitmap::kBitsPerCell - 1); - } -} - - -int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage( - NewSpace* new_space, - NewSpacePage* p) { - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); - - MarkBit::CellType* cells = p->markbits()->cells(); - int survivors_size = 0; - - for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { - Address cell_base = it.CurrentCellBase(); - MarkBit::CellType* cell = it.CurrentCell(); - - MarkBit::CellType current_cell = *cell; - if (current_cell == 0) continue; - - int offset = 0; - while (current_cell != 0) { - int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell); - current_cell >>= trailing_zeros; - offset += trailing_zeros; - Address address = cell_base + offset * kPointerSize; - HeapObject* object = HeapObject::FromAddress(address); - - int size = object->Size(); - survivors_size += size; - - Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); - - offset++; - current_cell >>= 1; - // Aggressively promote young survivors to the old space. - if (TryPromoteObject(object, size)) { - continue; - } - - // Promotion failed. Just migrate object to another semispace. - MaybeObject* allocation = new_space->AllocateRaw(size); - if (allocation->IsFailure()) { - if (!new_space->AddFreshPage()) { - // Shouldn't happen. We are sweeping linearly, and to-space - // has the same number of pages as from-space, so there is - // always room. - UNREACHABLE(); - } - allocation = new_space->AllocateRaw(size); - ASSERT(!allocation->IsFailure()); - } - Object* target = allocation->ToObjectUnchecked(); - - MigrateObject(HeapObject::cast(target), - object, - size, - NEW_SPACE); - } - *cells = 0; - } - return survivors_size; -} - - -static void DiscoverGreyObjectsInSpace(Heap* heap, - MarkingDeque* marking_deque, - PagedSpace* space) { - if (!space->was_swept_conservatively()) { - HeapObjectIterator it(space); - DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); - } else { - PageIterator it(space); - while (it.has_next()) { - Page* p = it.next(); - DiscoverGreyObjectsOnPage(marking_deque, p); - if (marking_deque->IsFull()) return; - } - } -} - - -static void DiscoverGreyObjectsInNewSpace(Heap* heap, - MarkingDeque* marking_deque) { - NewSpace* space = heap->new_space(); - NewSpacePageIterator it(space->bottom(), space->top()); - while (it.has_next()) { - NewSpacePage* page = it.next(); - DiscoverGreyObjectsOnPage(marking_deque, page); - if (marking_deque->IsFull()) return; - } -} - - -bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { - Object* o = *p; - if (!o->IsHeapObject()) return false; - HeapObject* heap_object = HeapObject::cast(o); - MarkBit mark = Marking::MarkBitFrom(heap_object); - return !mark.Get(); -} - - -bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap, - Object** p) { - Object* o = *p; - ASSERT(o->IsHeapObject()); - HeapObject* heap_object = HeapObject::cast(o); - MarkBit mark = Marking::MarkBitFrom(heap_object); - return !mark.Get(); -} - - -void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) { - StringTable* string_table = heap()->string_table(); - // Mark the string table itself. - MarkBit string_table_mark = Marking::MarkBitFrom(string_table); - SetMark(string_table, string_table_mark); - // Explicitly mark the prefix. - string_table->IteratePrefix(visitor); - ProcessMarkingDeque(); -} - - -void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) { - MarkBit mark_bit = Marking::MarkBitFrom(site); - SetMark(site, mark_bit); -} - - -void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { - // Mark the heap roots including global variables, stack variables, - // etc., and all objects reachable from them. - heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); - - // Handle the string table specially. - MarkStringTable(visitor); - - MarkWeakObjectToCodeTable(); - - // There may be overflowed objects in the heap. Visit them now. - while (marking_deque_.overflowed()) { - RefillMarkingDeque(); - EmptyMarkingDeque(); - } -} - - -void MarkCompactCollector::MarkImplicitRefGroups() { - List<ImplicitRefGroup*>* ref_groups = - isolate()->global_handles()->implicit_ref_groups(); - - int last = 0; - for (int i = 0; i < ref_groups->length(); i++) { - ImplicitRefGroup* entry = ref_groups->at(i); - ASSERT(entry != NULL); - - if (!IsMarked(*entry->parent)) { - (*ref_groups)[last++] = entry; - continue; - } - - Object*** children = entry->children; - // A parent object is marked, so mark all child heap objects. - for (size_t j = 0; j < entry->length; ++j) { - if ((*children[j])->IsHeapObject()) { - HeapObject* child = HeapObject::cast(*children[j]); - MarkBit mark = Marking::MarkBitFrom(child); - MarkObject(child, mark); - } - } - - // Once the entire group has been marked, dispose it because it's - // not needed anymore. - delete entry; - } - ref_groups->Rewind(last); -} - - -void MarkCompactCollector::MarkWeakObjectToCodeTable() { - HeapObject* weak_object_to_code_table = - HeapObject::cast(heap()->weak_object_to_code_table()); - if (!IsMarked(weak_object_to_code_table)) { - MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table); - SetMark(weak_object_to_code_table, mark); - } -} - - -// Mark all objects reachable from the objects on the marking stack. -// Before: the marking stack contains zero or more heap object pointers. -// After: the marking stack is empty, and all objects reachable from the -// marking stack have been marked, or are overflowed in the heap. -void MarkCompactCollector::EmptyMarkingDeque() { - while (!marking_deque_.IsEmpty()) { - HeapObject* object = marking_deque_.Pop(); - ASSERT(object->IsHeapObject()); - ASSERT(heap()->Contains(object)); - ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); - - Map* map = object->map(); - MarkBit map_mark = Marking::MarkBitFrom(map); - MarkObject(map, map_mark); - - MarkCompactMarkingVisitor::IterateBody(map, object); - } -} - - -// Sweep the heap for overflowed objects, clear their overflow bits, and -// push them on the marking stack. Stop early if the marking stack fills -// before sweeping completes. If sweeping completes, there are no remaining -// overflowed objects in the heap so the overflow flag on the markings stack -// is cleared. -void MarkCompactCollector::RefillMarkingDeque() { - ASSERT(marking_deque_.overflowed()); - - DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_); - if (marking_deque_.IsFull()) return; - - DiscoverGreyObjectsInSpace(heap(), - &marking_deque_, - heap()->old_pointer_space()); - if (marking_deque_.IsFull()) return; - - DiscoverGreyObjectsInSpace(heap(), - &marking_deque_, - heap()->old_data_space()); - if (marking_deque_.IsFull()) return; - - DiscoverGreyObjectsInSpace(heap(), - &marking_deque_, - heap()->code_space()); - if (marking_deque_.IsFull()) return; - - DiscoverGreyObjectsInSpace(heap(), - &marking_deque_, - heap()->map_space()); - if (marking_deque_.IsFull()) return; - - DiscoverGreyObjectsInSpace(heap(), - &marking_deque_, - heap()->cell_space()); - if (marking_deque_.IsFull()) return; - - DiscoverGreyObjectsInSpace(heap(), - &marking_deque_, - heap()->property_cell_space()); - if (marking_deque_.IsFull()) return; - - LargeObjectIterator lo_it(heap()->lo_space()); - DiscoverGreyObjectsWithIterator(heap(), - &marking_deque_, - &lo_it); - if (marking_deque_.IsFull()) return; - - marking_deque_.ClearOverflowed(); -} - - -// Mark all objects reachable (transitively) from objects on the marking -// stack. Before: the marking stack contains zero or more heap object -// pointers. After: the marking stack is empty and there are no overflowed -// objects in the heap. -void MarkCompactCollector::ProcessMarkingDeque() { - EmptyMarkingDeque(); - while (marking_deque_.overflowed()) { - RefillMarkingDeque(); - EmptyMarkingDeque(); - } -} - - -// Mark all objects reachable (transitively) from objects on the marking -// stack including references only considered in the atomic marking pause. -void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) { - bool work_to_do = true; - ASSERT(marking_deque_.IsEmpty()); - while (work_to_do) { - isolate()->global_handles()->IterateObjectGroups( - visitor, &IsUnmarkedHeapObjectWithHeap); - MarkImplicitRefGroups(); - ProcessWeakCollections(); - work_to_do = !marking_deque_.IsEmpty(); - ProcessMarkingDeque(); - } -} - - -void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { - for (StackFrameIterator it(isolate(), isolate()->thread_local_top()); - !it.done(); it.Advance()) { - if (it.frame()->type() == StackFrame::JAVA_SCRIPT) { - return; - } - if (it.frame()->type() == StackFrame::OPTIMIZED) { - Code* code = it.frame()->LookupCode(); - if (!code->CanDeoptAt(it.frame()->pc())) { - code->CodeIterateBody(visitor); - } - ProcessMarkingDeque(); - return; - } - } -} - - -void MarkCompactCollector::MarkLiveObjects() { - GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); - // The recursive GC marker detects when it is nearing stack overflow, - // and switches to a different marking system. JS interrupts interfere - // with the C stack limit check. - PostponeInterruptsScope postpone(isolate()); - - bool incremental_marking_overflowed = false; - IncrementalMarking* incremental_marking = heap_->incremental_marking(); - if (was_marked_incrementally_) { - // Finalize the incremental marking and check whether we had an overflow. - // Both markers use grey color to mark overflowed objects so - // non-incremental marker can deal with them as if overflow - // occured during normal marking. - // But incremental marker uses a separate marking deque - // so we have to explicitly copy its overflow state. - incremental_marking->Finalize(); - incremental_marking_overflowed = - incremental_marking->marking_deque()->overflowed(); - incremental_marking->marking_deque()->ClearOverflowed(); - } else { - // Abort any pending incremental activities e.g. incremental sweeping. - incremental_marking->Abort(); - } - -#ifdef DEBUG - ASSERT(state_ == PREPARE_GC); - state_ = MARK_LIVE_OBJECTS; -#endif - // The to space contains live objects, a page in from space is used as a - // marking stack. - Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); - Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); - if (FLAG_force_marking_deque_overflows) { - marking_deque_end = marking_deque_start + 64 * kPointerSize; - } - marking_deque_.Initialize(marking_deque_start, - marking_deque_end); - ASSERT(!marking_deque_.overflowed()); - - if (incremental_marking_overflowed) { - // There are overflowed objects left in the heap after incremental marking. - marking_deque_.SetOverflowed(); - } - - PrepareForCodeFlushing(); - - if (was_marked_incrementally_) { - // There is no write barrier on cells so we have to scan them now at the end - // of the incremental marking. - { - HeapObjectIterator cell_iterator(heap()->cell_space()); - HeapObject* cell; - while ((cell = cell_iterator.Next()) != NULL) { - ASSERT(cell->IsCell()); - if (IsMarked(cell)) { - int offset = Cell::kValueOffset; - MarkCompactMarkingVisitor::VisitPointer( - heap(), - reinterpret_cast<Object**>(cell->address() + offset)); - } - } - } - { - HeapObjectIterator js_global_property_cell_iterator( - heap()->property_cell_space()); - HeapObject* cell; - while ((cell = js_global_property_cell_iterator.Next()) != NULL) { - ASSERT(cell->IsPropertyCell()); - if (IsMarked(cell)) { - MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell); - } - } - } - } - - RootMarkingVisitor root_visitor(heap()); - MarkRoots(&root_visitor); - - ProcessTopOptimizedFrame(&root_visitor); - - // The objects reachable from the roots are marked, yet unreachable - // objects are unmarked. Mark objects reachable due to host - // application specific logic or through Harmony weak maps. - ProcessEphemeralMarking(&root_visitor); - - // The objects reachable from the roots, weak maps or object groups - // are marked, yet unreachable objects are unmarked. Mark objects - // reachable only from weak global handles. - // - // First we identify nonlive weak handles and mark them as pending - // destruction. - heap()->isolate()->global_handles()->IdentifyWeakHandles( - &IsUnmarkedHeapObject); - // Then we mark the objects and process the transitive closure. - heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); - while (marking_deque_.overflowed()) { - RefillMarkingDeque(); - EmptyMarkingDeque(); - } - - // Repeat host application specific and Harmony weak maps marking to - // mark unmarked objects reachable from the weak roots. - ProcessEphemeralMarking(&root_visitor); - - AfterMarking(); -} - - -void MarkCompactCollector::AfterMarking() { - // Object literal map caches reference strings (cache keys) and maps - // (cache values). At this point still useful maps have already been - // marked. Mark the keys for the alive values before we process the - // string table. - ProcessMapCaches(); - - // Prune the string table removing all strings only pointed to by the - // string table. Cannot use string_table() here because the string - // table is marked. - StringTable* string_table = heap()->string_table(); - InternalizedStringTableCleaner internalized_visitor(heap()); - string_table->IterateElements(&internalized_visitor); - string_table->ElementsRemoved(internalized_visitor.PointersRemoved()); - - ExternalStringTableCleaner external_visitor(heap()); - heap()->external_string_table_.Iterate(&external_visitor); - heap()->external_string_table_.CleanUp(); - - // Process the weak references. - MarkCompactWeakObjectRetainer mark_compact_object_retainer; - heap()->ProcessWeakReferences(&mark_compact_object_retainer); - - // Remove object groups after marking phase. - heap()->isolate()->global_handles()->RemoveObjectGroups(); - heap()->isolate()->global_handles()->RemoveImplicitRefGroups(); - - // Flush code from collected candidates. - if (is_code_flushing_enabled()) { - code_flusher_->ProcessCandidates(); - // If incremental marker does not support code flushing, we need to - // disable it before incremental marking steps for next cycle. - if (FLAG_flush_code && !FLAG_flush_code_incrementally) { - EnableCodeFlushing(false); - } - } - - if (FLAG_track_gc_object_stats) { - heap()->CheckpointObjectStats(); - } -} - - -void MarkCompactCollector::ProcessMapCaches() { - Object* raw_context = heap()->native_contexts_list_; - while (raw_context != heap()->undefined_value()) { - Context* context = reinterpret_cast<Context*>(raw_context); - if (IsMarked(context)) { - HeapObject* raw_map_cache = - HeapObject::cast(context->get(Context::MAP_CACHE_INDEX)); - // A map cache may be reachable from the stack. In this case - // it's already transitively marked and it's too late to clean - // up its parts. - if (!IsMarked(raw_map_cache) && - raw_map_cache != heap()->undefined_value()) { - MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache); - int existing_elements = map_cache->NumberOfElements(); - int used_elements = 0; - for (int i = MapCache::kElementsStartIndex; - i < map_cache->length(); - i += MapCache::kEntrySize) { - Object* raw_key = map_cache->get(i); - if (raw_key == heap()->undefined_value() || - raw_key == heap()->the_hole_value()) continue; - STATIC_ASSERT(MapCache::kEntrySize == 2); - Object* raw_map = map_cache->get(i + 1); - if (raw_map->IsHeapObject() && IsMarked(raw_map)) { - ++used_elements; - } else { - // Delete useless entries with unmarked maps. - ASSERT(raw_map->IsMap()); - map_cache->set_the_hole(i); - map_cache->set_the_hole(i + 1); - } - } - if (used_elements == 0) { - context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value()); - } else { - // Note: we don't actually shrink the cache here to avoid - // extra complexity during GC. We rely on subsequent cache - // usages (EnsureCapacity) to do this. - map_cache->ElementsRemoved(existing_elements - used_elements); - MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache); - MarkObject(map_cache, map_cache_markbit); - } - } - } - // Move to next element in the list. - raw_context = context->get(Context::NEXT_CONTEXT_LINK); - } - ProcessMarkingDeque(); -} - - -void MarkCompactCollector::ReattachInitialMaps() { - HeapObjectIterator map_iterator(heap()->map_space()); - for (HeapObject* obj = map_iterator.Next(); - obj != NULL; - obj = map_iterator.Next()) { - Map* map = Map::cast(obj); - - STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue; - - if (map->attached_to_shared_function_info()) { - JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map); - } - } -} - - -void MarkCompactCollector::ClearNonLiveReferences() { - // Iterate over the map space, setting map transitions that go from - // a marked map to an unmarked map to null transitions. This action - // is carried out only on maps of JSObjects and related subtypes. - HeapObjectIterator map_iterator(heap()->map_space()); - for (HeapObject* obj = map_iterator.Next(); - obj != NULL; - obj = map_iterator.Next()) { - Map* map = Map::cast(obj); - - if (!map->CanTransition()) continue; - - MarkBit map_mark = Marking::MarkBitFrom(map); - if (map_mark.Get() && map->attached_to_shared_function_info()) { - // This map is used for inobject slack tracking and has been detached - // from SharedFunctionInfo during the mark phase. - // Since it survived the GC, reattach it now. - JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map); - } - - ClearNonLivePrototypeTransitions(map); - ClearNonLiveMapTransitions(map, map_mark); - - if (map_mark.Get()) { - ClearNonLiveDependentCode(map->dependent_code()); - } else { - ClearAndDeoptimizeDependentCode(map->dependent_code()); - map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); - } - } - - // Iterate over property cell space, removing dependent code that is not - // otherwise kept alive by strong references. - HeapObjectIterator cell_iterator(heap_->property_cell_space()); - for (HeapObject* cell = cell_iterator.Next(); - cell != NULL; - cell = cell_iterator.Next()) { - if (IsMarked(cell)) { - ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code()); - } - } - - // Iterate over allocation sites, removing dependent code that is not - // otherwise kept alive by strong references. - Object* undefined = heap()->undefined_value(); - for (Object* site = heap()->allocation_sites_list(); - site != undefined; - site = AllocationSite::cast(site)->weak_next()) { - if (IsMarked(site)) { - ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code()); - } - } - - if (heap_->weak_object_to_code_table()->IsHashTable()) { - WeakHashTable* table = - WeakHashTable::cast(heap_->weak_object_to_code_table()); - uint32_t capacity = table->Capacity(); - for (uint32_t i = 0; i < capacity; i++) { - uint32_t key_index = table->EntryToIndex(i); - Object* key = table->get(key_index); - if (!table->IsKey(key)) continue; - uint32_t value_index = table->EntryToValueIndex(i); - Object* value = table->get(value_index); - if (key->IsCell() && !IsMarked(key)) { - Cell* cell = Cell::cast(key); - Object* object = cell->value(); - if (IsMarked(object)) { - MarkBit mark = Marking::MarkBitFrom(cell); - SetMark(cell, mark); - Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset); - RecordSlot(value_slot, value_slot, *value_slot); - } - } - if (IsMarked(key)) { - if (!IsMarked(value)) { - HeapObject* obj = HeapObject::cast(value); - MarkBit mark = Marking::MarkBitFrom(obj); - SetMark(obj, mark); - } - ClearNonLiveDependentCode(DependentCode::cast(value)); - } else { - ClearAndDeoptimizeDependentCode(DependentCode::cast(value)); - table->set(key_index, heap_->the_hole_value()); - table->set(value_index, heap_->the_hole_value()); - } - } - } -} - - -void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) { - int number_of_transitions = map->NumberOfProtoTransitions(); - FixedArray* prototype_transitions = map->GetPrototypeTransitions(); - - int new_number_of_transitions = 0; - const int header = Map::kProtoTransitionHeaderSize; - const int proto_offset = header + Map::kProtoTransitionPrototypeOffset; - const int map_offset = header + Map::kProtoTransitionMapOffset; - const int step = Map::kProtoTransitionElementsPerEntry; - for (int i = 0; i < number_of_transitions; i++) { - Object* prototype = prototype_transitions->get(proto_offset + i * step); - Object* cached_map = prototype_transitions->get(map_offset + i * step); - if (IsMarked(prototype) && IsMarked(cached_map)) { - ASSERT(!prototype->IsUndefined()); - int proto_index = proto_offset + new_number_of_transitions * step; - int map_index = map_offset + new_number_of_transitions * step; - if (new_number_of_transitions != i) { - prototype_transitions->set( - proto_index, - prototype, - UPDATE_WRITE_BARRIER); - prototype_transitions->set( - map_index, - cached_map, - SKIP_WRITE_BARRIER); - } - Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index); - RecordSlot(slot, slot, prototype); - new_number_of_transitions++; - } - } - - if (new_number_of_transitions != number_of_transitions) { - map->SetNumberOfProtoTransitions(new_number_of_transitions); - } - - // Fill slots that became free with undefined value. - for (int i = new_number_of_transitions * step; - i < number_of_transitions * step; - i++) { - prototype_transitions->set_undefined(header + i); - } -} - - -void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map, - MarkBit map_mark) { - Object* potential_parent = map->GetBackPointer(); - if (!potential_parent->IsMap()) return; - Map* parent = Map::cast(potential_parent); - - // Follow back pointer, check whether we are dealing with a map transition - // from a live map to a dead path and in case clear transitions of parent. - bool current_is_alive = map_mark.Get(); - bool parent_is_alive = Marking::MarkBitFrom(parent).Get(); - if (!current_is_alive && parent_is_alive) { - parent->ClearNonLiveTransitions(heap()); - } -} - - -void MarkCompactCollector::ClearAndDeoptimizeDependentCode( - DependentCode* entries) { - DisallowHeapAllocation no_allocation; - DependentCode::GroupStartIndexes starts(entries); - int number_of_entries = starts.number_of_entries(); - if (number_of_entries == 0) return; - for (int i = 0; i < number_of_entries; i++) { - // If the entry is compilation info then the map must be alive, - // and ClearAndDeoptimizeDependentCode shouldn't be called. - ASSERT(entries->is_code_at(i)); - Code* code = entries->code_at(i); - - if (IsMarked(code) && !code->marked_for_deoptimization()) { - code->set_marked_for_deoptimization(true); - code->InvalidateEmbeddedObjects(); - have_code_to_deoptimize_ = true; - } - entries->clear_at(i); - } -} - - -void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) { - DisallowHeapAllocation no_allocation; - DependentCode::GroupStartIndexes starts(entries); - int number_of_entries = starts.number_of_entries(); - if (number_of_entries == 0) return; - int new_number_of_entries = 0; - // Go through all groups, remove dead codes and compact. - for (int g = 0; g < DependentCode::kGroupCount; g++) { - int group_number_of_entries = 0; - for (int i = starts.at(g); i < starts.at(g + 1); i++) { - Object* obj = entries->object_at(i); - ASSERT(obj->IsCode() || IsMarked(obj)); - if (IsMarked(obj) && - (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) { - if (new_number_of_entries + group_number_of_entries != i) { - entries->set_object_at( - new_number_of_entries + group_number_of_entries, obj); - } - Object** slot = entries->slot_at(new_number_of_entries + - group_number_of_entries); - RecordSlot(slot, slot, obj); - group_number_of_entries++; - } - } - entries->set_number_of_entries( - static_cast<DependentCode::DependencyGroup>(g), - group_number_of_entries); - new_number_of_entries += group_number_of_entries; - } - for (int i = new_number_of_entries; i < number_of_entries; i++) { - entries->clear_at(i); - } -} - - -void MarkCompactCollector::ProcessWeakCollections() { - GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS); - Object* weak_collection_obj = encountered_weak_collections(); - while (weak_collection_obj != Smi::FromInt(0)) { - ASSERT(MarkCompactCollector::IsMarked( - HeapObject::cast(weak_collection_obj))); - JSWeakCollection* weak_collection = - reinterpret_cast<JSWeakCollection*>(weak_collection_obj); - ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table()); - Object** anchor = reinterpret_cast<Object**>(table->address()); - for (int i = 0; i < table->Capacity(); i++) { - if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { - Object** key_slot = - table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i)); - RecordSlot(anchor, key_slot, *key_slot); - Object** value_slot = - table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i)); - MarkCompactMarkingVisitor::MarkObjectByPointer( - this, anchor, value_slot); - } - } - weak_collection_obj = weak_collection->next(); - } -} - - -void MarkCompactCollector::ClearWeakCollections() { - GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR); - Object* weak_collection_obj = encountered_weak_collections(); - while (weak_collection_obj != Smi::FromInt(0)) { - ASSERT(MarkCompactCollector::IsMarked( - HeapObject::cast(weak_collection_obj))); - JSWeakCollection* weak_collection = - reinterpret_cast<JSWeakCollection*>(weak_collection_obj); - ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table()); - for (int i = 0; i < table->Capacity(); i++) { - if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { - table->RemoveEntry(i); - } - } - weak_collection_obj = weak_collection->next(); - weak_collection->set_next(Smi::FromInt(0)); - } - set_encountered_weak_collections(Smi::FromInt(0)); -} - - -// We scavange new space simultaneously with sweeping. This is done in two -// passes. -// -// The first pass migrates all alive objects from one semispace to another or -// promotes them to old space. Forwarding address is written directly into -// first word of object without any encoding. If object is dead we write -// NULL as a forwarding address. -// -// The second pass updates pointers to new space in all spaces. It is possible -// to encounter pointers to dead new space objects during traversal of pointers -// to new space. We should clear them to avoid encountering them during next -// pointer iteration. This is an issue if the store buffer overflows and we -// have to scan the entire old space, including dead objects, looking for -// pointers to new space. -void MarkCompactCollector::MigrateObject(HeapObject* dst, - HeapObject* src, - int size, - AllocationSpace dest) { - Address dst_addr = dst->address(); - Address src_addr = src->address(); - HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler(); - if (heap_profiler->is_tracking_object_moves()) { - heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size); - } - ASSERT(heap()->AllowedToBeMigrated(src, dest)); - ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize); - if (dest == OLD_POINTER_SPACE) { - Address src_slot = src_addr; - Address dst_slot = dst_addr; - ASSERT(IsAligned(size, kPointerSize)); - - for (int remaining = size / kPointerSize; remaining > 0; remaining--) { - Object* value = Memory::Object_at(src_slot); - - Memory::Object_at(dst_slot) = value; - - if (heap_->InNewSpace(value)) { - heap_->store_buffer()->Mark(dst_slot); - } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { - SlotsBuffer::AddTo(&slots_buffer_allocator_, - &migration_slots_buffer_, - reinterpret_cast<Object**>(dst_slot), - SlotsBuffer::IGNORE_OVERFLOW); - } - - src_slot += kPointerSize; - dst_slot += kPointerSize; - } - - if (compacting_ && dst->IsJSFunction()) { - Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset; - Address code_entry = Memory::Address_at(code_entry_slot); - - if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { - SlotsBuffer::AddTo(&slots_buffer_allocator_, - &migration_slots_buffer_, - SlotsBuffer::CODE_ENTRY_SLOT, - code_entry_slot, - SlotsBuffer::IGNORE_OVERFLOW); - } - } else if (compacting_ && dst->IsConstantPoolArray()) { - ConstantPoolArray* constant_pool = ConstantPoolArray::cast(dst); - for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) { - Address code_entry_slot = - dst_addr + constant_pool->OffsetOfElementAt(i); - Address code_entry = Memory::Address_at(code_entry_slot); - - if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { - SlotsBuffer::AddTo(&slots_buffer_allocator_, - &migration_slots_buffer_, - SlotsBuffer::CODE_ENTRY_SLOT, - code_entry_slot, - SlotsBuffer::IGNORE_OVERFLOW); - } - } - } - } else if (dest == CODE_SPACE) { - PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); - heap()->MoveBlock(dst_addr, src_addr, size); - SlotsBuffer::AddTo(&slots_buffer_allocator_, - &migration_slots_buffer_, - SlotsBuffer::RELOCATED_CODE_OBJECT, - dst_addr, - SlotsBuffer::IGNORE_OVERFLOW); - Code::cast(dst)->Relocate(dst_addr - src_addr); - } else { - ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE); - heap()->MoveBlock(dst_addr, src_addr, size); - } - Memory::Address_at(src_addr) = dst_addr; -} - - -// Visitor for updating pointers from live objects in old spaces to new space. -// It does not expect to encounter pointers to dead objects. -class PointersUpdatingVisitor: public ObjectVisitor { - public: - explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { } - - void VisitPointer(Object** p) { - UpdatePointer(p); - } - - void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) UpdatePointer(p); - } - - void VisitEmbeddedPointer(RelocInfo* rinfo) { - ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); - Object* target = rinfo->target_object(); - Object* old_target = target; - VisitPointer(&target); - // Avoid unnecessary changes that might unnecessary flush the instruction - // cache. - if (target != old_target) { - rinfo->set_target_object(target); - } - } - - void VisitCodeTarget(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); - Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); - Object* old_target = target; - VisitPointer(&target); - if (target != old_target) { - rinfo->set_target_address(Code::cast(target)->instruction_start()); - } - } - - void VisitCodeAgeSequence(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); - Object* stub = rinfo->code_age_stub(); - ASSERT(stub != NULL); - VisitPointer(&stub); - if (stub != rinfo->code_age_stub()) { - rinfo->set_code_age_stub(Code::cast(stub)); - } - } - - void VisitDebugTarget(RelocInfo* rinfo) { - ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && - rinfo->IsPatchedReturnSequence()) || - (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && - rinfo->IsPatchedDebugBreakSlotSequence())); - Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); - VisitPointer(&target); - rinfo->set_call_address(Code::cast(target)->instruction_start()); - } - - static inline void UpdateSlot(Heap* heap, Object** slot) { - Object* obj = *slot; - - if (!obj->IsHeapObject()) return; - - HeapObject* heap_obj = HeapObject::cast(obj); - - MapWord map_word = heap_obj->map_word(); - if (map_word.IsForwardingAddress()) { - ASSERT(heap->InFromSpace(heap_obj) || - MarkCompactCollector::IsOnEvacuationCandidate(heap_obj)); - HeapObject* target = map_word.ToForwardingAddress(); - *slot = target; - ASSERT(!heap->InFromSpace(target) && - !MarkCompactCollector::IsOnEvacuationCandidate(target)); - } - } - - private: - inline void UpdatePointer(Object** p) { - UpdateSlot(heap_, p); - } - - Heap* heap_; -}; - - -static void UpdatePointer(HeapObject** p, HeapObject* object) { - ASSERT(*p == object); - - Address old_addr = object->address(); - - Address new_addr = Memory::Address_at(old_addr); - - // The new space sweep will overwrite the map word of dead objects - // with NULL. In this case we do not need to transfer this entry to - // the store buffer which we are rebuilding. - if (new_addr != NULL) { - *p = HeapObject::FromAddress(new_addr); - } else { - // We have to zap this pointer, because the store buffer may overflow later, - // and then we have to scan the entire heap and we don't want to find - // spurious newspace pointers in the old space. - // TODO(mstarzinger): This was changed to a sentinel value to track down - // rare crashes, change it back to Smi::FromInt(0) later. - *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood - } -} - - -static String* UpdateReferenceInExternalStringTableEntry(Heap* heap, - Object** p) { - MapWord map_word = HeapObject::cast(*p)->map_word(); - - if (map_word.IsForwardingAddress()) { - return String::cast(map_word.ToForwardingAddress()); - } - - return String::cast(*p); -} - - -bool MarkCompactCollector::TryPromoteObject(HeapObject* object, - int object_size) { - // TODO(hpayer): Replace that check with an assert. - CHECK(object_size <= Page::kMaxRegularHeapObjectSize); - - OldSpace* target_space = heap()->TargetSpace(object); - - ASSERT(target_space == heap()->old_pointer_space() || - target_space == heap()->old_data_space()); - Object* result; - MaybeObject* maybe_result = target_space->AllocateRaw(object_size); - if (maybe_result->ToObject(&result)) { - HeapObject* target = HeapObject::cast(result); - MigrateObject(target, - object, - object_size, - target_space->identity()); - heap()->mark_compact_collector()->tracer()-> - increment_promoted_objects_size(object_size); - return true; - } - - return false; -} - - -void MarkCompactCollector::EvacuateNewSpace() { - // There are soft limits in the allocation code, designed trigger a mark - // sweep collection by failing allocations. But since we are already in - // a mark-sweep allocation, there is no sense in trying to trigger one. - AlwaysAllocateScope scope(isolate()); - heap()->CheckNewSpaceExpansionCriteria(); - - NewSpace* new_space = heap()->new_space(); - - // Store allocation range before flipping semispaces. - Address from_bottom = new_space->bottom(); - Address from_top = new_space->top(); - - // Flip the semispaces. After flipping, to space is empty, from space has - // live objects. - new_space->Flip(); - new_space->ResetAllocationInfo(); - - int survivors_size = 0; - - // First pass: traverse all objects in inactive semispace, remove marks, - // migrate live objects and write forwarding addresses. This stage puts - // new entries in the store buffer and may cause some pages to be marked - // scan-on-scavenge. - NewSpacePageIterator it(from_bottom, from_top); - while (it.has_next()) { - NewSpacePage* p = it.next(); - survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p); - } - - heap_->IncrementYoungSurvivorsCounter(survivors_size); - new_space->set_age_mark(new_space->top()); -} - - -void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { - AlwaysAllocateScope always_allocate(isolate()); - PagedSpace* space = static_cast<PagedSpace*>(p->owner()); - ASSERT(p->IsEvacuationCandidate() && !p->WasSwept()); - p->MarkSweptPrecisely(); - - int offsets[16]; - - for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { - Address cell_base = it.CurrentCellBase(); - MarkBit::CellType* cell = it.CurrentCell(); - - if (*cell == 0) continue; - - int live_objects = MarkWordToObjectStarts(*cell, offsets); - for (int i = 0; i < live_objects; i++) { - Address object_addr = cell_base + offsets[i] * kPointerSize; - HeapObject* object = HeapObject::FromAddress(object_addr); - ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); - - int size = object->Size(); - - MaybeObject* target = space->AllocateRaw(size); - if (target->IsFailure()) { - // OS refused to give us memory. - V8::FatalProcessOutOfMemory("Evacuation"); - return; - } - - Object* target_object = target->ToObjectUnchecked(); - - MigrateObject(HeapObject::cast(target_object), - object, - size, - space->identity()); - ASSERT(object->map_word().IsForwardingAddress()); - } - - // Clear marking bits for current cell. - *cell = 0; - } - p->ResetLiveBytes(); -} - - -void MarkCompactCollector::EvacuatePages() { - int npages = evacuation_candidates_.length(); - for (int i = 0; i < npages; i++) { - Page* p = evacuation_candidates_[i]; - // TODO(hpayer): This check is just used for debugging purpose and - // should be removed or turned into an assert after investigating the - // crash in concurrent sweeping. - CHECK(p->IsEvacuationCandidate() || - p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); - CHECK_EQ(static_cast<int>(p->parallel_sweeping()), 0); - if (p->IsEvacuationCandidate()) { - // During compaction we might have to request a new page. - // Check that space still have room for that. - if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { - EvacuateLiveObjectsFromPage(p); - } else { - // Without room for expansion evacuation is not guaranteed to succeed. - // Pessimistically abandon unevacuated pages. - for (int j = i; j < npages; j++) { - Page* page = evacuation_candidates_[j]; - slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); - page->ClearEvacuationCandidate(); - page->SetFlag(Page::RESCAN_ON_EVACUATION); - page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor()); - } - return; - } - } - } -} - - -class EvacuationWeakObjectRetainer : public WeakObjectRetainer { - public: - virtual Object* RetainAs(Object* object) { - if (object->IsHeapObject()) { - HeapObject* heap_object = HeapObject::cast(object); - MapWord map_word = heap_object->map_word(); - if (map_word.IsForwardingAddress()) { - return map_word.ToForwardingAddress(); - } - } - return object; - } -}; - - -static inline void UpdateSlot(Isolate* isolate, - ObjectVisitor* v, - SlotsBuffer::SlotType slot_type, - Address addr) { - switch (slot_type) { - case SlotsBuffer::CODE_TARGET_SLOT: { - RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL); - rinfo.Visit(isolate, v); - break; - } - case SlotsBuffer::CODE_ENTRY_SLOT: { - v->VisitCodeEntry(addr); - break; - } - case SlotsBuffer::RELOCATED_CODE_OBJECT: { - HeapObject* obj = HeapObject::FromAddress(addr); - Code::cast(obj)->CodeIterateBody(v); - break; - } - case SlotsBuffer::DEBUG_TARGET_SLOT: { - RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL); - if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v); - break; - } - case SlotsBuffer::JS_RETURN_SLOT: { - RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL); - if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v); - break; - } - case SlotsBuffer::EMBEDDED_OBJECT_SLOT: { - RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL); - rinfo.Visit(isolate, v); - break; - } - default: - UNREACHABLE(); - break; - } -} - - -enum SweepingMode { - SWEEP_ONLY, - SWEEP_AND_VISIT_LIVE_OBJECTS -}; - - -enum SkipListRebuildingMode { - REBUILD_SKIP_LIST, - IGNORE_SKIP_LIST -}; - - -enum FreeSpaceTreatmentMode { - IGNORE_FREE_SPACE, - ZAP_FREE_SPACE -}; - - -// Sweep a space precisely. After this has been done the space can -// be iterated precisely, hitting only the live objects. Code space -// is always swept precisely because we want to be able to iterate -// over it. Map space is swept precisely, because it is not compacted. -// Slots in live objects pointing into evacuation candidates are updated -// if requested. -template<SweepingMode sweeping_mode, - SkipListRebuildingMode skip_list_mode, - FreeSpaceTreatmentMode free_space_mode> -static void SweepPrecisely(PagedSpace* space, - Page* p, - ObjectVisitor* v) { - ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); - ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, - space->identity() == CODE_SPACE); - ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); - - double start_time = 0.0; - if (FLAG_print_cumulative_gc_stat) { - start_time = OS::TimeCurrentMillis(); - } - - p->MarkSweptPrecisely(); - - Address free_start = p->area_start(); - ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); - int offsets[16]; - - SkipList* skip_list = p->skip_list(); - int curr_region = -1; - if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { - skip_list->Clear(); - } - - for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { - Address cell_base = it.CurrentCellBase(); - MarkBit::CellType* cell = it.CurrentCell(); - int live_objects = MarkWordToObjectStarts(*cell, offsets); - int live_index = 0; - for ( ; live_objects != 0; live_objects--) { - Address free_end = cell_base + offsets[live_index++] * kPointerSize; - if (free_end != free_start) { - if (free_space_mode == ZAP_FREE_SPACE) { - memset(free_start, 0xcc, static_cast<int>(free_end - free_start)); - } - space->Free(free_start, static_cast<int>(free_end - free_start)); -#ifdef ENABLE_GDB_JIT_INTERFACE - if (FLAG_gdbjit && space->identity() == CODE_SPACE) { - GDBJITInterface::RemoveCodeRange(free_start, free_end); - } -#endif - } - HeapObject* live_object = HeapObject::FromAddress(free_end); - ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); - Map* map = live_object->map(); - int size = live_object->SizeFromMap(map); - if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { - live_object->IterateBody(map->instance_type(), size, v); - } - if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { - int new_region_start = - SkipList::RegionNumber(free_end); - int new_region_end = - SkipList::RegionNumber(free_end + size - kPointerSize); - if (new_region_start != curr_region || - new_region_end != curr_region) { - skip_list->AddObject(free_end, size); - curr_region = new_region_end; - } - } - free_start = free_end + size; - } - // Clear marking bits for current cell. - *cell = 0; - } - if (free_start != p->area_end()) { - if (free_space_mode == ZAP_FREE_SPACE) { - memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start)); - } - space->Free(free_start, static_cast<int>(p->area_end() - free_start)); -#ifdef ENABLE_GDB_JIT_INTERFACE - if (FLAG_gdbjit && space->identity() == CODE_SPACE) { - GDBJITInterface::RemoveCodeRange(free_start, p->area_end()); - } -#endif - } - p->ResetLiveBytes(); - if (FLAG_print_cumulative_gc_stat) { - space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time); - } -} - - -static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { - Page* p = Page::FromAddress(code->address()); - - if (p->IsEvacuationCandidate() || - p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { - return false; - } - - Address code_start = code->address(); - Address code_end = code_start + code->Size(); - - uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start); - uint32_t end_index = - MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize); - - Bitmap* b = p->markbits(); - - MarkBit start_mark_bit = b->MarkBitFromIndex(start_index); - MarkBit end_mark_bit = b->MarkBitFromIndex(end_index); - - MarkBit::CellType* start_cell = start_mark_bit.cell(); - MarkBit::CellType* end_cell = end_mark_bit.cell(); - - if (value) { - MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1); - MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1; - - if (start_cell == end_cell) { - *start_cell |= start_mask & end_mask; - } else { - *start_cell |= start_mask; - for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) { - *cell = ~0; - } - *end_cell |= end_mask; - } - } else { - for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) { - *cell = 0; - } - } - - return true; -} - - -static bool IsOnInvalidatedCodeObject(Address addr) { - // We did not record any slots in large objects thus - // we can safely go to the page from the slot address. - Page* p = Page::FromAddress(addr); - - // First check owner's identity because old pointer and old data spaces - // are swept lazily and might still have non-zero mark-bits on some - // pages. - if (p->owner()->identity() != CODE_SPACE) return false; - - // In code space only bits on evacuation candidates (but we don't record - // any slots on them) and under invalidated code objects are non-zero. - MarkBit mark_bit = - p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr)); - - return mark_bit.Get(); -} - - -void MarkCompactCollector::InvalidateCode(Code* code) { - if (heap_->incremental_marking()->IsCompacting() && - !ShouldSkipEvacuationSlotRecording(code)) { - ASSERT(compacting_); - - // If the object is white than no slots were recorded on it yet. - MarkBit mark_bit = Marking::MarkBitFrom(code); - if (Marking::IsWhite(mark_bit)) return; - - invalidated_code_.Add(code); - } -} - - -// Return true if the given code is deoptimized or will be deoptimized. -bool MarkCompactCollector::WillBeDeoptimized(Code* code) { - return code->marked_for_deoptimization(); -} - - -bool MarkCompactCollector::MarkInvalidatedCode() { - bool code_marked = false; - - int length = invalidated_code_.length(); - for (int i = 0; i < length; i++) { - Code* code = invalidated_code_[i]; - - if (SetMarkBitsUnderInvalidatedCode(code, true)) { - code_marked = true; - } - } - - return code_marked; -} - - -void MarkCompactCollector::RemoveDeadInvalidatedCode() { - int length = invalidated_code_.length(); - for (int i = 0; i < length; i++) { - if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL; - } -} - - -void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) { - int length = invalidated_code_.length(); - for (int i = 0; i < length; i++) { - Code* code = invalidated_code_[i]; - if (code != NULL) { - code->Iterate(visitor); - SetMarkBitsUnderInvalidatedCode(code, false); - } - } - invalidated_code_.Rewind(0); -} - - -void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { - Heap::RelocationLock relocation_lock(heap()); - - bool code_slots_filtering_required; - { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); - code_slots_filtering_required = MarkInvalidatedCode(); - EvacuateNewSpace(); - } - - { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES); - EvacuatePages(); - } - - // Second pass: find pointers to new space and update them. - PointersUpdatingVisitor updating_visitor(heap()); - - { GCTracer::Scope gc_scope(tracer_, - GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); - // Update pointers in to space. - SemiSpaceIterator to_it(heap()->new_space()->bottom(), - heap()->new_space()->top()); - for (HeapObject* object = to_it.Next(); - object != NULL; - object = to_it.Next()) { - Map* map = object->map(); - object->IterateBody(map->instance_type(), - object->SizeFromMap(map), - &updating_visitor); - } - } - - { GCTracer::Scope gc_scope(tracer_, - GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS); - // Update roots. - heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); - } - - { GCTracer::Scope gc_scope(tracer_, - GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS); - StoreBufferRebuildScope scope(heap_, - heap_->store_buffer(), - &Heap::ScavengeStoreBufferCallback); - heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps( - &UpdatePointer); - } - - { GCTracer::Scope gc_scope(tracer_, - GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED); - SlotsBuffer::UpdateSlotsRecordedIn(heap_, - migration_slots_buffer_, - code_slots_filtering_required); - if (FLAG_trace_fragmentation) { - PrintF(" migration slots buffer: %d\n", - SlotsBuffer::SizeOfChain(migration_slots_buffer_)); - } - - if (compacting_ && was_marked_incrementally_) { - // It's difficult to filter out slots recorded for large objects. - LargeObjectIterator it(heap_->lo_space()); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { - // LargeObjectSpace is not swept yet thus we have to skip - // dead objects explicitly. - if (!IsMarked(obj)) continue; - - Page* p = Page::FromAddress(obj->address()); - if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { - obj->Iterate(&updating_visitor); - p->ClearFlag(Page::RESCAN_ON_EVACUATION); - } - } - } - } - - int npages = evacuation_candidates_.length(); - { GCTracer::Scope gc_scope( - tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED); - for (int i = 0; i < npages; i++) { - Page* p = evacuation_candidates_[i]; - ASSERT(p->IsEvacuationCandidate() || - p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); - - if (p->IsEvacuationCandidate()) { - SlotsBuffer::UpdateSlotsRecordedIn(heap_, - p->slots_buffer(), - code_slots_filtering_required); - if (FLAG_trace_fragmentation) { - PrintF(" page %p slots buffer: %d\n", - reinterpret_cast<void*>(p), - SlotsBuffer::SizeOfChain(p->slots_buffer())); - } - - // Important: skip list should be cleared only after roots were updated - // because root iteration traverses the stack and might have to find - // code objects from non-updated pc pointing into evacuation candidate. - SkipList* list = p->skip_list(); - if (list != NULL) list->Clear(); - } else { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", - reinterpret_cast<intptr_t>(p)); - } - PagedSpace* space = static_cast<PagedSpace*>(p->owner()); - p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); - - switch (space->identity()) { - case OLD_DATA_SPACE: - SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); - break; - case OLD_POINTER_SPACE: - SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, - IGNORE_SKIP_LIST, - IGNORE_FREE_SPACE>( - space, p, &updating_visitor); - break; - case CODE_SPACE: - if (FLAG_zap_code_space) { - SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, - REBUILD_SKIP_LIST, - ZAP_FREE_SPACE>( - space, p, &updating_visitor); - } else { - SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, - REBUILD_SKIP_LIST, - IGNORE_FREE_SPACE>( - space, p, &updating_visitor); - } - break; - default: - UNREACHABLE(); - break; - } - } - } - } - - GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS); - - // Update pointers from cells. - HeapObjectIterator cell_iterator(heap_->cell_space()); - for (HeapObject* cell = cell_iterator.Next(); - cell != NULL; - cell = cell_iterator.Next()) { - if (cell->IsCell()) { - Cell::BodyDescriptor::IterateBody(cell, &updating_visitor); - } - } - - HeapObjectIterator js_global_property_cell_iterator( - heap_->property_cell_space()); - for (HeapObject* cell = js_global_property_cell_iterator.Next(); - cell != NULL; - cell = js_global_property_cell_iterator.Next()) { - if (cell->IsPropertyCell()) { - PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor); - } - } - - // Update the head of the native contexts list in the heap. - updating_visitor.VisitPointer(heap_->native_contexts_list_address()); - - heap_->string_table()->Iterate(&updating_visitor); - updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address()); - if (heap_->weak_object_to_code_table()->IsHashTable()) { - WeakHashTable* table = - WeakHashTable::cast(heap_->weak_object_to_code_table()); - table->Iterate(&updating_visitor); - table->Rehash(heap_->undefined_value()); - } - - // Update pointers from external string table. - heap_->UpdateReferencesInExternalStringTable( - &UpdateReferenceInExternalStringTableEntry); - - EvacuationWeakObjectRetainer evacuation_object_retainer; - heap()->ProcessWeakReferences(&evacuation_object_retainer); - - // Visit invalidated code (we ignored all slots on it) and clear mark-bits - // under it. - ProcessInvalidatedCode(&updating_visitor); - - heap_->isolate()->inner_pointer_to_code_cache()->Flush(); - -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - VerifyEvacuation(heap_); - } -#endif - - slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); - ASSERT(migration_slots_buffer_ == NULL); -} - - -void MarkCompactCollector::UnlinkEvacuationCandidates() { - int npages = evacuation_candidates_.length(); - for (int i = 0; i < npages; i++) { - Page* p = evacuation_candidates_[i]; - if (!p->IsEvacuationCandidate()) continue; - p->Unlink(); - p->ClearSweptPrecisely(); - p->ClearSweptConservatively(); - } -} - - -void MarkCompactCollector::ReleaseEvacuationCandidates() { - int npages = evacuation_candidates_.length(); - for (int i = 0; i < npages; i++) { - Page* p = evacuation_candidates_[i]; - if (!p->IsEvacuationCandidate()) continue; - PagedSpace* space = static_cast<PagedSpace*>(p->owner()); - space->Free(p->area_start(), p->area_size()); - p->set_scan_on_scavenge(false); - slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); - p->ResetLiveBytes(); - space->ReleasePage(p, false); - } - evacuation_candidates_.Rewind(0); - compacting_ = false; - heap()->FreeQueuedChunks(); -} - - -static const int kStartTableEntriesPerLine = 5; -static const int kStartTableLines = 171; -static const int kStartTableInvalidLine = 127; -static const int kStartTableUnusedEntry = 126; - -#define _ kStartTableUnusedEntry -#define X kStartTableInvalidLine -// Mark-bit to object start offset table. -// -// The line is indexed by the mark bits in a byte. The first number on -// the line describes the number of live object starts for the line and the -// other numbers on the line describe the offsets (in words) of the object -// starts. -// -// Since objects are at least 2 words large we don't have entries for two -// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits. -char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = { - 0, _, _, _, _, // 0 - 1, 0, _, _, _, // 1 - 1, 1, _, _, _, // 2 - X, _, _, _, _, // 3 - 1, 2, _, _, _, // 4 - 2, 0, 2, _, _, // 5 - X, _, _, _, _, // 6 - X, _, _, _, _, // 7 - 1, 3, _, _, _, // 8 - 2, 0, 3, _, _, // 9 - 2, 1, 3, _, _, // 10 - X, _, _, _, _, // 11 - X, _, _, _, _, // 12 - X, _, _, _, _, // 13 - X, _, _, _, _, // 14 - X, _, _, _, _, // 15 - 1, 4, _, _, _, // 16 - 2, 0, 4, _, _, // 17 - 2, 1, 4, _, _, // 18 - X, _, _, _, _, // 19 - 2, 2, 4, _, _, // 20 - 3, 0, 2, 4, _, // 21 - X, _, _, _, _, // 22 - X, _, _, _, _, // 23 - X, _, _, _, _, // 24 - X, _, _, _, _, // 25 - X, _, _, _, _, // 26 - X, _, _, _, _, // 27 - X, _, _, _, _, // 28 - X, _, _, _, _, // 29 - X, _, _, _, _, // 30 - X, _, _, _, _, // 31 - 1, 5, _, _, _, // 32 - 2, 0, 5, _, _, // 33 - 2, 1, 5, _, _, // 34 - X, _, _, _, _, // 35 - 2, 2, 5, _, _, // 36 - 3, 0, 2, 5, _, // 37 - X, _, _, _, _, // 38 - X, _, _, _, _, // 39 - 2, 3, 5, _, _, // 40 - 3, 0, 3, 5, _, // 41 - 3, 1, 3, 5, _, // 42 - X, _, _, _, _, // 43 - X, _, _, _, _, // 44 - X, _, _, _, _, // 45 - X, _, _, _, _, // 46 - X, _, _, _, _, // 47 - X, _, _, _, _, // 48 - X, _, _, _, _, // 49 - X, _, _, _, _, // 50 - X, _, _, _, _, // 51 - X, _, _, _, _, // 52 - X, _, _, _, _, // 53 - X, _, _, _, _, // 54 - X, _, _, _, _, // 55 - X, _, _, _, _, // 56 - X, _, _, _, _, // 57 - X, _, _, _, _, // 58 - X, _, _, _, _, // 59 - X, _, _, _, _, // 60 - X, _, _, _, _, // 61 - X, _, _, _, _, // 62 - X, _, _, _, _, // 63 - 1, 6, _, _, _, // 64 - 2, 0, 6, _, _, // 65 - 2, 1, 6, _, _, // 66 - X, _, _, _, _, // 67 - 2, 2, 6, _, _, // 68 - 3, 0, 2, 6, _, // 69 - X, _, _, _, _, // 70 - X, _, _, _, _, // 71 - 2, 3, 6, _, _, // 72 - 3, 0, 3, 6, _, // 73 - 3, 1, 3, 6, _, // 74 - X, _, _, _, _, // 75 - X, _, _, _, _, // 76 - X, _, _, _, _, // 77 - X, _, _, _, _, // 78 - X, _, _, _, _, // 79 - 2, 4, 6, _, _, // 80 - 3, 0, 4, 6, _, // 81 - 3, 1, 4, 6, _, // 82 - X, _, _, _, _, // 83 - 3, 2, 4, 6, _, // 84 - 4, 0, 2, 4, 6, // 85 - X, _, _, _, _, // 86 - X, _, _, _, _, // 87 - X, _, _, _, _, // 88 - X, _, _, _, _, // 89 - X, _, _, _, _, // 90 - X, _, _, _, _, // 91 - X, _, _, _, _, // 92 - X, _, _, _, _, // 93 - X, _, _, _, _, // 94 - X, _, _, _, _, // 95 - X, _, _, _, _, // 96 - X, _, _, _, _, // 97 - X, _, _, _, _, // 98 - X, _, _, _, _, // 99 - X, _, _, _, _, // 100 - X, _, _, _, _, // 101 - X, _, _, _, _, // 102 - X, _, _, _, _, // 103 - X, _, _, _, _, // 104 - X, _, _, _, _, // 105 - X, _, _, _, _, // 106 - X, _, _, _, _, // 107 - X, _, _, _, _, // 108 - X, _, _, _, _, // 109 - X, _, _, _, _, // 110 - X, _, _, _, _, // 111 - X, _, _, _, _, // 112 - X, _, _, _, _, // 113 - X, _, _, _, _, // 114 - X, _, _, _, _, // 115 - X, _, _, _, _, // 116 - X, _, _, _, _, // 117 - X, _, _, _, _, // 118 - X, _, _, _, _, // 119 - X, _, _, _, _, // 120 - X, _, _, _, _, // 121 - X, _, _, _, _, // 122 - X, _, _, _, _, // 123 - X, _, _, _, _, // 124 - X, _, _, _, _, // 125 - X, _, _, _, _, // 126 - X, _, _, _, _, // 127 - 1, 7, _, _, _, // 128 - 2, 0, 7, _, _, // 129 - 2, 1, 7, _, _, // 130 - X, _, _, _, _, // 131 - 2, 2, 7, _, _, // 132 - 3, 0, 2, 7, _, // 133 - X, _, _, _, _, // 134 - X, _, _, _, _, // 135 - 2, 3, 7, _, _, // 136 - 3, 0, 3, 7, _, // 137 - 3, 1, 3, 7, _, // 138 - X, _, _, _, _, // 139 - X, _, _, _, _, // 140 - X, _, _, _, _, // 141 - X, _, _, _, _, // 142 - X, _, _, _, _, // 143 - 2, 4, 7, _, _, // 144 - 3, 0, 4, 7, _, // 145 - 3, 1, 4, 7, _, // 146 - X, _, _, _, _, // 147 - 3, 2, 4, 7, _, // 148 - 4, 0, 2, 4, 7, // 149 - X, _, _, _, _, // 150 - X, _, _, _, _, // 151 - X, _, _, _, _, // 152 - X, _, _, _, _, // 153 - X, _, _, _, _, // 154 - X, _, _, _, _, // 155 - X, _, _, _, _, // 156 - X, _, _, _, _, // 157 - X, _, _, _, _, // 158 - X, _, _, _, _, // 159 - 2, 5, 7, _, _, // 160 - 3, 0, 5, 7, _, // 161 - 3, 1, 5, 7, _, // 162 - X, _, _, _, _, // 163 - 3, 2, 5, 7, _, // 164 - 4, 0, 2, 5, 7, // 165 - X, _, _, _, _, // 166 - X, _, _, _, _, // 167 - 3, 3, 5, 7, _, // 168 - 4, 0, 3, 5, 7, // 169 - 4, 1, 3, 5, 7 // 170 -}; -#undef _ -#undef X - - -// Takes a word of mark bits. Returns the number of objects that start in the -// range. Puts the offsets of the words in the supplied array. -static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) { - int objects = 0; - int offset = 0; - - // No consecutive 1 bits. - ASSERT((mark_bits & 0x180) != 0x180); - ASSERT((mark_bits & 0x18000) != 0x18000); - ASSERT((mark_bits & 0x1800000) != 0x1800000); - - while (mark_bits != 0) { - int byte = (mark_bits & 0xff); - mark_bits >>= 8; - if (byte != 0) { - ASSERT(byte < kStartTableLines); // No consecutive 1 bits. - char* table = kStartTable + byte * kStartTableEntriesPerLine; - int objects_in_these_8_words = table[0]; - ASSERT(objects_in_these_8_words != kStartTableInvalidLine); - ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine); - for (int i = 0; i < objects_in_these_8_words; i++) { - starts[objects++] = offset + table[1 + i]; - } - } - offset += 8; - } - return objects; -} - - -static inline Address DigestFreeStart(Address approximate_free_start, - uint32_t free_start_cell) { - ASSERT(free_start_cell != 0); - - // No consecutive 1 bits. - ASSERT((free_start_cell & (free_start_cell << 1)) == 0); - - int offsets[16]; - uint32_t cell = free_start_cell; - int offset_of_last_live; - if ((cell & 0x80000000u) != 0) { - // This case would overflow below. - offset_of_last_live = 31; - } else { - // Remove all but one bit, the most significant. This is an optimization - // that may or may not be worthwhile. - cell |= cell >> 16; - cell |= cell >> 8; - cell |= cell >> 4; - cell |= cell >> 2; - cell |= cell >> 1; - cell = (cell + 1) >> 1; - int live_objects = MarkWordToObjectStarts(cell, offsets); - ASSERT(live_objects == 1); - offset_of_last_live = offsets[live_objects - 1]; - } - Address last_live_start = - approximate_free_start + offset_of_last_live * kPointerSize; - HeapObject* last_live = HeapObject::FromAddress(last_live_start); - Address free_start = last_live_start + last_live->Size(); - return free_start; -} - - -static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { - ASSERT(cell != 0); - - // No consecutive 1 bits. - ASSERT((cell & (cell << 1)) == 0); - - int offsets[16]; - if (cell == 0x80000000u) { // Avoid overflow below. - return block_address + 31 * kPointerSize; - } - uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; - ASSERT((first_set_bit & cell) == first_set_bit); - int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); - ASSERT(live_objects == 1); - USE(live_objects); - return block_address + offsets[0] * kPointerSize; -} - - -template<MarkCompactCollector::SweepingParallelism mode> -static intptr_t Free(PagedSpace* space, - FreeList* free_list, - Address start, - int size) { - if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) { - return space->Free(start, size); - } else { - return size - free_list->Free(start, size); - } -} - - -// Force instantiation of templatized SweepConservatively method for -// SWEEP_SEQUENTIALLY mode. -template intptr_t MarkCompactCollector:: - SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>( - PagedSpace*, FreeList*, Page*); - - -// Force instantiation of templatized SweepConservatively method for -// SWEEP_IN_PARALLEL mode. -template intptr_t MarkCompactCollector:: - SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>( - PagedSpace*, FreeList*, Page*); - - -// Sweeps a space conservatively. After this has been done the larger free -// spaces have been put on the free list and the smaller ones have been -// ignored and left untouched. A free space is always either ignored or put -// on the free list, never split up into two parts. This is important -// because it means that any FreeSpace maps left actually describe a region of -// memory that can be ignored when scanning. Dead objects other than free -// spaces will not contain the free space map. -template<MarkCompactCollector::SweepingParallelism mode> -intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, - FreeList* free_list, - Page* p) { - // TODO(hpayer): This check is just used for debugging purpose and - // should be removed or turned into an assert after investigating the - // crash in concurrent sweeping. - CHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); - ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && - free_list != NULL) || - (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY && - free_list == NULL)); - - // When parallel sweeping is active, the page will be marked after - // sweeping by the main thread. - if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) { - p->MarkSweptConservatively(); - } - - intptr_t freed_bytes = 0; - size_t size = 0; - - // Skip over all the dead objects at the start of the page and mark them free. - Address cell_base = 0; - MarkBit::CellType* cell = NULL; - MarkBitCellIterator it(p); - for (; !it.Done(); it.Advance()) { - cell_base = it.CurrentCellBase(); - cell = it.CurrentCell(); - if (*cell != 0) break; - } - - if (it.Done()) { - size = p->area_end() - p->area_start(); - freed_bytes += Free<mode>(space, free_list, p->area_start(), - static_cast<int>(size)); - ASSERT_EQ(0, p->LiveBytes()); - return freed_bytes; - } - - // Grow the size of the start-of-page free space a little to get up to the - // first live object. - Address free_end = StartOfLiveObject(cell_base, *cell); - // Free the first free space. - size = free_end - p->area_start(); - freed_bytes += Free<mode>(space, free_list, p->area_start(), - static_cast<int>(size)); - - // The start of the current free area is represented in undigested form by - // the address of the last 32-word section that contained a live object and - // the marking bitmap for that cell, which describes where the live object - // started. Unless we find a large free space in the bitmap we will not - // digest this pair into a real address. We start the iteration here at the - // first word in the marking bit map that indicates a live object. - Address free_start = cell_base; - MarkBit::CellType free_start_cell = *cell; - - for (; !it.Done(); it.Advance()) { - cell_base = it.CurrentCellBase(); - cell = it.CurrentCell(); - if (*cell != 0) { - // We have a live object. Check approximately whether it is more than 32 - // words since the last live object. - if (cell_base - free_start > 32 * kPointerSize) { - free_start = DigestFreeStart(free_start, free_start_cell); - if (cell_base - free_start > 32 * kPointerSize) { - // Now that we know the exact start of the free space it still looks - // like we have a large enough free space to be worth bothering with. - // so now we need to find the start of the first live object at the - // end of the free space. - free_end = StartOfLiveObject(cell_base, *cell); - freed_bytes += Free<mode>(space, free_list, free_start, - static_cast<int>(free_end - free_start)); - } - } - // Update our undigested record of where the current free area started. - free_start = cell_base; - free_start_cell = *cell; - // Clear marking bits for current cell. - *cell = 0; - } - } - - // Handle the free space at the end of the page. - if (cell_base - free_start > 32 * kPointerSize) { - free_start = DigestFreeStart(free_start, free_start_cell); - freed_bytes += Free<mode>(space, free_list, free_start, - static_cast<int>(p->area_end() - free_start)); - } - - p->ResetLiveBytes(); - return freed_bytes; -} - - -void MarkCompactCollector::SweepInParallel(PagedSpace* space) { - PageIterator it(space); - FreeList* free_list = space == heap()->old_pointer_space() - ? free_list_old_pointer_space_.get() - : free_list_old_data_space_.get(); - FreeList private_free_list(space); - while (it.has_next()) { - Page* p = it.next(); - - if (p->TryParallelSweeping()) { - SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p); - free_list->Concatenate(&private_free_list); - p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE); - } - } -} - - -void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { - space->set_was_swept_conservatively(sweeper == CONSERVATIVE || - sweeper == LAZY_CONSERVATIVE || - sweeper == PARALLEL_CONSERVATIVE || - sweeper == CONCURRENT_CONSERVATIVE); - space->ClearStats(); - - PageIterator it(space); - - int pages_swept = 0; - bool lazy_sweeping_active = false; - bool unused_page_present = false; - bool parallel_sweeping_active = false; - - while (it.has_next()) { - Page* p = it.next(); - - ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE); - ASSERT(!p->IsEvacuationCandidate()); - - // Clear sweeping flags indicating that marking bits are still intact. - p->ClearSweptPrecisely(); - p->ClearSweptConservatively(); - - if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { - // Will be processed in EvacuateNewSpaceAndCandidates. - ASSERT(evacuation_candidates_.length() > 0); - continue; - } - - // One unused page is kept, all further are released before sweeping them. - if (p->LiveBytes() == 0) { - if (unused_page_present) { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", - reinterpret_cast<intptr_t>(p)); - } - // Adjust unswept free bytes because releasing a page expects said - // counter to be accurate for unswept pages. - space->IncreaseUnsweptFreeBytes(p); - space->ReleasePage(p, true); - continue; - } - unused_page_present = true; - } - - switch (sweeper) { - case CONSERVATIVE: { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", - reinterpret_cast<intptr_t>(p)); - } - SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); - pages_swept++; - break; - } - case LAZY_CONSERVATIVE: { - if (lazy_sweeping_active) { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n", - reinterpret_cast<intptr_t>(p)); - } - space->IncreaseUnsweptFreeBytes(p); - } else { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", - reinterpret_cast<intptr_t>(p)); - } - SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); - pages_swept++; - space->SetPagesToSweep(p->next_page()); - lazy_sweeping_active = true; - } - break; - } - case CONCURRENT_CONSERVATIVE: - case PARALLEL_CONSERVATIVE: { - if (!parallel_sweeping_active) { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", - reinterpret_cast<intptr_t>(p)); - } - SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); - pages_swept++; - parallel_sweeping_active = true; - } else { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", - reinterpret_cast<intptr_t>(p)); - } - p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING); - space->IncreaseUnsweptFreeBytes(p); - } - break; - } - case PRECISE: { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", - reinterpret_cast<intptr_t>(p)); - } - if (space->identity() == CODE_SPACE && FLAG_zap_code_space) { - SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>( - space, p, NULL); - } else if (space->identity() == CODE_SPACE) { - SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>( - space, p, NULL); - } else { - SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( - space, p, NULL); - } - pages_swept++; - break; - } - default: { - UNREACHABLE(); - } - } - } - - if (FLAG_gc_verbose) { - PrintF("SweepSpace: %s (%d pages swept)\n", - AllocationSpaceName(space->identity()), - pages_swept); - } - - // Give pages that are queued to be freed back to the OS. - heap()->FreeQueuedChunks(); -} - - -void MarkCompactCollector::SweepSpaces() { - GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); -#ifdef DEBUG - state_ = SWEEP_SPACES; -#endif - SweeperType how_to_sweep = - FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; - if (AreSweeperThreadsActivated()) { - if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; - if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; - } - if (sweep_precisely_) how_to_sweep = PRECISE; - - // Unlink evacuation candidates before sweeper threads access the list of - // pages to avoid race condition. - UnlinkEvacuationCandidates(); - - // Noncompacting collections simply sweep the spaces to clear the mark - // bits and free the nonlive blocks (for old and map spaces). We sweep - // the map space last because freeing non-live maps overwrites them and - // the other spaces rely on possibly non-live maps to get the sizes for - // non-live objects. - { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE); - { SequentialSweepingScope scope(this); - SweepSpace(heap()->old_pointer_space(), how_to_sweep); - SweepSpace(heap()->old_data_space(), how_to_sweep); - } - - if (how_to_sweep == PARALLEL_CONSERVATIVE || - how_to_sweep == CONCURRENT_CONSERVATIVE) { - // TODO(hpayer): fix race with concurrent sweeper - StartSweeperThreads(); - } - - if (how_to_sweep == PARALLEL_CONSERVATIVE) { - WaitUntilSweepingCompleted(); - } - } - RemoveDeadInvalidatedCode(); - SweepSpace(heap()->code_space(), PRECISE); - - SweepSpace(heap()->cell_space(), PRECISE); - SweepSpace(heap()->property_cell_space(), PRECISE); - - EvacuateNewSpaceAndCandidates(); - - // ClearNonLiveTransitions depends on precise sweeping of map space to - // detect whether unmarked map became dead in this collection or in one - // of the previous ones. - SweepSpace(heap()->map_space(), PRECISE); - - // Deallocate unmarked objects and clear marked bits for marked objects. - heap_->lo_space()->FreeUnmarkedObjects(); - - // Deallocate evacuated candidate pages. - ReleaseEvacuationCandidates(); -} - - -void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { - PageIterator it(space); - while (it.has_next()) { - Page* p = it.next(); - if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) { - p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE); - p->MarkSweptConservatively(); - } - ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE); - } -} - - -void MarkCompactCollector::ParallelSweepSpacesComplete() { - ParallelSweepSpaceComplete(heap()->old_pointer_space()); - ParallelSweepSpaceComplete(heap()->old_data_space()); -} - - -void MarkCompactCollector::EnableCodeFlushing(bool enable) { -#ifdef ENABLE_DEBUGGER_SUPPORT - if (isolate()->debug()->IsLoaded() || - isolate()->debug()->has_break_points()) { - enable = false; - } -#endif - - if (enable) { - if (code_flusher_ != NULL) return; - code_flusher_ = new CodeFlusher(isolate()); - } else { - if (code_flusher_ == NULL) return; - code_flusher_->EvictAllCandidates(); - delete code_flusher_; - code_flusher_ = NULL; - } - - if (FLAG_trace_code_flushing) { - PrintF("[code-flushing is now %s]\n", enable ? "on" : "off"); - } -} - - -// TODO(1466) ReportDeleteIfNeeded is not called currently. -// Our profiling tools do not expect intersections between -// code objects. We should either reenable it or change our tools. -void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, - Isolate* isolate) { -#ifdef ENABLE_GDB_JIT_INTERFACE - if (obj->IsCode()) { - GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); - } -#endif - if (obj->IsCode()) { - PROFILE(isolate, CodeDeleteEvent(obj->address())); - } -} - - -Isolate* MarkCompactCollector::isolate() const { - return heap_->isolate(); -} - - -void MarkCompactCollector::Initialize() { - MarkCompactMarkingVisitor::Initialize(); - IncrementalMarking::Initialize(); -} - - -bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) { - return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES; -} - - -bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator, - SlotsBuffer** buffer_address, - SlotType type, - Address addr, - AdditionMode mode) { - SlotsBuffer* buffer = *buffer_address; - if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) { - if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { - allocator->DeallocateChain(buffer_address); - return false; - } - buffer = allocator->AllocateBuffer(buffer); - *buffer_address = buffer; - } - ASSERT(buffer->HasSpaceForTypedSlot()); - buffer->Add(reinterpret_cast<ObjectSlot>(type)); - buffer->Add(reinterpret_cast<ObjectSlot>(addr)); - return true; -} - - -static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { - if (RelocInfo::IsCodeTarget(rmode)) { - return SlotsBuffer::CODE_TARGET_SLOT; - } else if (RelocInfo::IsEmbeddedObject(rmode)) { - return SlotsBuffer::EMBEDDED_OBJECT_SLOT; - } else if (RelocInfo::IsDebugBreakSlot(rmode)) { - return SlotsBuffer::DEBUG_TARGET_SLOT; - } else if (RelocInfo::IsJSReturn(rmode)) { - return SlotsBuffer::JS_RETURN_SLOT; - } - UNREACHABLE(); - return SlotsBuffer::NUMBER_OF_SLOT_TYPES; -} - - -void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) { - Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); - RelocInfo::Mode rmode = rinfo->rmode(); - if (target_page->IsEvacuationCandidate() && - (rinfo->host() == NULL || - !ShouldSkipEvacuationSlotRecording(rinfo->host()))) { - bool success; - if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) { - // This doesn't need to be typed since it is just a normal heap pointer. - Object** target_pointer = - reinterpret_cast<Object**>(rinfo->constant_pool_entry_address()); - success = SlotsBuffer::AddTo(&slots_buffer_allocator_, - target_page->slots_buffer_address(), - target_pointer, - SlotsBuffer::FAIL_ON_OVERFLOW); - } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) { - success = SlotsBuffer::AddTo(&slots_buffer_allocator_, - target_page->slots_buffer_address(), - SlotsBuffer::CODE_ENTRY_SLOT, - rinfo->constant_pool_entry_address(), - SlotsBuffer::FAIL_ON_OVERFLOW); - } else { - success = SlotsBuffer::AddTo(&slots_buffer_allocator_, - target_page->slots_buffer_address(), - SlotTypeForRMode(rmode), - rinfo->pc(), - SlotsBuffer::FAIL_ON_OVERFLOW); - } - if (!success) { - EvictEvacuationCandidate(target_page); - } - } -} - - -void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) { - Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); - if (target_page->IsEvacuationCandidate() && - !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) { - if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, - target_page->slots_buffer_address(), - SlotsBuffer::CODE_ENTRY_SLOT, - slot, - SlotsBuffer::FAIL_ON_OVERFLOW)) { - EvictEvacuationCandidate(target_page); - } - } -} - - -void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) { - ASSERT(heap()->gc_state() == Heap::MARK_COMPACT); - if (is_compacting()) { - Code* host = isolate()->inner_pointer_to_code_cache()-> - GcSafeFindCodeForInnerPointer(pc); - MarkBit mark_bit = Marking::MarkBitFrom(host); - if (Marking::IsBlack(mark_bit)) { - RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); - RecordRelocSlot(&rinfo, target); - } - } -} - - -static inline SlotsBuffer::SlotType DecodeSlotType( - SlotsBuffer::ObjectSlot slot) { - return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot)); -} - - -void SlotsBuffer::UpdateSlots(Heap* heap) { - PointersUpdatingVisitor v(heap); - - for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { - ObjectSlot slot = slots_[slot_idx]; - if (!IsTypedSlot(slot)) { - PointersUpdatingVisitor::UpdateSlot(heap, slot); - } else { - ++slot_idx; - ASSERT(slot_idx < idx_); - UpdateSlot(heap->isolate(), - &v, - DecodeSlotType(slot), - reinterpret_cast<Address>(slots_[slot_idx])); - } - } -} - - -void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) { - PointersUpdatingVisitor v(heap); - - for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { - ObjectSlot slot = slots_[slot_idx]; - if (!IsTypedSlot(slot)) { - if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) { - PointersUpdatingVisitor::UpdateSlot(heap, slot); - } - } else { - ++slot_idx; - ASSERT(slot_idx < idx_); - Address pc = reinterpret_cast<Address>(slots_[slot_idx]); - if (!IsOnInvalidatedCodeObject(pc)) { - UpdateSlot(heap->isolate(), - &v, - DecodeSlotType(slot), - reinterpret_cast<Address>(slots_[slot_idx])); - } - } - } -} - - -SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { - return new SlotsBuffer(next_buffer); -} - - -void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { - delete buffer; -} - - -void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { - SlotsBuffer* buffer = *buffer_address; - while (buffer != NULL) { - SlotsBuffer* next_buffer = buffer->next(); - DeallocateBuffer(buffer); - buffer = next_buffer; - } - *buffer_address = NULL; -} - - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/mark-compact.h nodejs-0.11.15/deps/v8/src/mark-compact.h --- nodejs-0.11.13/deps/v8/src/mark-compact.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mark-compact.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,1044 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_MARK_COMPACT_H_ -#define V8_MARK_COMPACT_H_ - -#include "compiler-intrinsics.h" -#include "spaces.h" - -namespace v8 { -namespace internal { - -// Callback function, returns whether an object is alive. The heap size -// of the object is returned in size. It optionally updates the offset -// to the first live object in the page (only used for old and map objects). -typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); - -// Forward declarations. -class CodeFlusher; -class GCTracer; -class MarkCompactCollector; -class MarkingVisitor; -class RootMarkingVisitor; - - -class Marking { - public: - explicit Marking(Heap* heap) - : heap_(heap) { - } - - INLINE(static MarkBit MarkBitFrom(Address addr)); - - INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) { - return MarkBitFrom(reinterpret_cast<Address>(obj)); - } - - // Impossible markbits: 01 - static const char* kImpossibleBitPattern; - INLINE(static bool IsImpossible(MarkBit mark_bit)) { - return !mark_bit.Get() && mark_bit.Next().Get(); - } - - // Black markbits: 10 - this is required by the sweeper. - static const char* kBlackBitPattern; - INLINE(static bool IsBlack(MarkBit mark_bit)) { - return mark_bit.Get() && !mark_bit.Next().Get(); - } - - // White markbits: 00 - this is required by the mark bit clearer. - static const char* kWhiteBitPattern; - INLINE(static bool IsWhite(MarkBit mark_bit)) { - return !mark_bit.Get(); - } - - // Grey markbits: 11 - static const char* kGreyBitPattern; - INLINE(static bool IsGrey(MarkBit mark_bit)) { - return mark_bit.Get() && mark_bit.Next().Get(); - } - - INLINE(static void MarkBlack(MarkBit mark_bit)) { - mark_bit.Set(); - mark_bit.Next().Clear(); - } - - INLINE(static void BlackToGrey(MarkBit markbit)) { - markbit.Next().Set(); - } - - INLINE(static void WhiteToGrey(MarkBit markbit)) { - markbit.Set(); - markbit.Next().Set(); - } - - INLINE(static void GreyToBlack(MarkBit markbit)) { - markbit.Next().Clear(); - } - - INLINE(static void BlackToGrey(HeapObject* obj)) { - BlackToGrey(MarkBitFrom(obj)); - } - - INLINE(static void AnyToGrey(MarkBit markbit)) { - markbit.Set(); - markbit.Next().Set(); - } - - void TransferMark(Address old_start, Address new_start); - -#ifdef DEBUG - enum ObjectColor { - BLACK_OBJECT, - WHITE_OBJECT, - GREY_OBJECT, - IMPOSSIBLE_COLOR - }; - - static const char* ColorName(ObjectColor color) { - switch (color) { - case BLACK_OBJECT: return "black"; - case WHITE_OBJECT: return "white"; - case GREY_OBJECT: return "grey"; - case IMPOSSIBLE_COLOR: return "impossible"; - } - return "error"; - } - - static ObjectColor Color(HeapObject* obj) { - return Color(Marking::MarkBitFrom(obj)); - } - - static ObjectColor Color(MarkBit mark_bit) { - if (IsBlack(mark_bit)) return BLACK_OBJECT; - if (IsWhite(mark_bit)) return WHITE_OBJECT; - if (IsGrey(mark_bit)) return GREY_OBJECT; - UNREACHABLE(); - return IMPOSSIBLE_COLOR; - } -#endif - - // Returns true if the transferred color is black. - INLINE(static bool TransferColor(HeapObject* from, - HeapObject* to)) { - MarkBit from_mark_bit = MarkBitFrom(from); - MarkBit to_mark_bit = MarkBitFrom(to); - bool is_black = false; - if (from_mark_bit.Get()) { - to_mark_bit.Set(); - is_black = true; // Looks black so far. - } - if (from_mark_bit.Next().Get()) { - to_mark_bit.Next().Set(); - is_black = false; // Was actually gray. - } - return is_black; - } - - private: - Heap* heap_; -}; - -// ---------------------------------------------------------------------------- -// Marking deque for tracing live objects. -class MarkingDeque { - public: - MarkingDeque() - : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { } - - void Initialize(Address low, Address high) { - HeapObject** obj_low = reinterpret_cast<HeapObject**>(low); - HeapObject** obj_high = reinterpret_cast<HeapObject**>(high); - array_ = obj_low; - mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1; - top_ = bottom_ = 0; - overflowed_ = false; - } - - inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } - - inline bool IsEmpty() { return top_ == bottom_; } - - bool overflowed() const { return overflowed_; } - - void ClearOverflowed() { overflowed_ = false; } - - void SetOverflowed() { overflowed_ = true; } - - // Push the (marked) object on the marking stack if there is room, - // otherwise mark the object as overflowed and wait for a rescan of the - // heap. - INLINE(void PushBlack(HeapObject* object)) { - ASSERT(object->IsHeapObject()); - if (IsFull()) { - Marking::BlackToGrey(object); - MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size()); - SetOverflowed(); - } else { - array_[top_] = object; - top_ = ((top_ + 1) & mask_); - } - } - - INLINE(void PushGrey(HeapObject* object)) { - ASSERT(object->IsHeapObject()); - if (IsFull()) { - SetOverflowed(); - } else { - array_[top_] = object; - top_ = ((top_ + 1) & mask_); - } - } - - INLINE(HeapObject* Pop()) { - ASSERT(!IsEmpty()); - top_ = ((top_ - 1) & mask_); - HeapObject* object = array_[top_]; - ASSERT(object->IsHeapObject()); - return object; - } - - INLINE(void UnshiftGrey(HeapObject* object)) { - ASSERT(object->IsHeapObject()); - if (IsFull()) { - SetOverflowed(); - } else { - bottom_ = ((bottom_ - 1) & mask_); - array_[bottom_] = object; - } - } - - HeapObject** array() { return array_; } - int bottom() { return bottom_; } - int top() { return top_; } - int mask() { return mask_; } - void set_top(int top) { top_ = top; } - - private: - HeapObject** array_; - // array_[(top - 1) & mask_] is the top element in the deque. The Deque is - // empty when top_ == bottom_. It is full when top_ + 1 == bottom - // (mod mask + 1). - int top_; - int bottom_; - int mask_; - bool overflowed_; - - DISALLOW_COPY_AND_ASSIGN(MarkingDeque); -}; - - -class SlotsBufferAllocator { - public: - SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer); - void DeallocateBuffer(SlotsBuffer* buffer); - - void DeallocateChain(SlotsBuffer** buffer_address); -}; - - -// SlotsBuffer records a sequence of slots that has to be updated -// after live objects were relocated from evacuation candidates. -// All slots are either untyped or typed: -// - Untyped slots are expected to contain a tagged object pointer. -// They are recorded by an address. -// - Typed slots are expected to contain an encoded pointer to a heap -// object where the way of encoding depends on the type of the slot. -// They are recorded as a pair (SlotType, slot address). -// We assume that zero-page is never mapped this allows us to distinguish -// untyped slots from typed slots during iteration by a simple comparison: -// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it -// is the first element of typed slot's pair. -class SlotsBuffer { - public: - typedef Object** ObjectSlot; - - explicit SlotsBuffer(SlotsBuffer* next_buffer) - : idx_(0), chain_length_(1), next_(next_buffer) { - if (next_ != NULL) { - chain_length_ = next_->chain_length_ + 1; - } - } - - ~SlotsBuffer() { - } - - void Add(ObjectSlot slot) { - ASSERT(0 <= idx_ && idx_ < kNumberOfElements); - slots_[idx_++] = slot; - } - - enum SlotType { - EMBEDDED_OBJECT_SLOT, - RELOCATED_CODE_OBJECT, - CODE_TARGET_SLOT, - CODE_ENTRY_SLOT, - DEBUG_TARGET_SLOT, - JS_RETURN_SLOT, - NUMBER_OF_SLOT_TYPES - }; - - static const char* SlotTypeToString(SlotType type) { - switch (type) { - case EMBEDDED_OBJECT_SLOT: - return "EMBEDDED_OBJECT_SLOT"; - case RELOCATED_CODE_OBJECT: - return "RELOCATED_CODE_OBJECT"; - case CODE_TARGET_SLOT: - return "CODE_TARGET_SLOT"; - case CODE_ENTRY_SLOT: - return "CODE_ENTRY_SLOT"; - case DEBUG_TARGET_SLOT: - return "DEBUG_TARGET_SLOT"; - case JS_RETURN_SLOT: - return "JS_RETURN_SLOT"; - case NUMBER_OF_SLOT_TYPES: - return "NUMBER_OF_SLOT_TYPES"; - } - return "UNKNOWN SlotType"; - } - - void UpdateSlots(Heap* heap); - - void UpdateSlotsWithFilter(Heap* heap); - - SlotsBuffer* next() { return next_; } - - static int SizeOfChain(SlotsBuffer* buffer) { - if (buffer == NULL) return 0; - return static_cast<int>(buffer->idx_ + - (buffer->chain_length_ - 1) * kNumberOfElements); - } - - inline bool IsFull() { - return idx_ == kNumberOfElements; - } - - inline bool HasSpaceForTypedSlot() { - return idx_ < kNumberOfElements - 1; - } - - static void UpdateSlotsRecordedIn(Heap* heap, - SlotsBuffer* buffer, - bool code_slots_filtering_required) { - while (buffer != NULL) { - if (code_slots_filtering_required) { - buffer->UpdateSlotsWithFilter(heap); - } else { - buffer->UpdateSlots(heap); - } - buffer = buffer->next(); - } - } - - enum AdditionMode { - FAIL_ON_OVERFLOW, - IGNORE_OVERFLOW - }; - - static bool ChainLengthThresholdReached(SlotsBuffer* buffer) { - return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold; - } - - INLINE(static bool AddTo(SlotsBufferAllocator* allocator, - SlotsBuffer** buffer_address, - ObjectSlot slot, - AdditionMode mode)) { - SlotsBuffer* buffer = *buffer_address; - if (buffer == NULL || buffer->IsFull()) { - if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { - allocator->DeallocateChain(buffer_address); - return false; - } - buffer = allocator->AllocateBuffer(buffer); - *buffer_address = buffer; - } - buffer->Add(slot); - return true; - } - - static bool IsTypedSlot(ObjectSlot slot); - - static bool AddTo(SlotsBufferAllocator* allocator, - SlotsBuffer** buffer_address, - SlotType type, - Address addr, - AdditionMode mode); - - static const int kNumberOfElements = 1021; - - private: - static const int kChainLengthThreshold = 15; - - intptr_t idx_; - intptr_t chain_length_; - SlotsBuffer* next_; - ObjectSlot slots_[kNumberOfElements]; -}; - - -// CodeFlusher collects candidates for code flushing during marking and -// processes those candidates after marking has completed in order to -// reset those functions referencing code objects that would otherwise -// be unreachable. Code objects can be referenced in three ways: -// - SharedFunctionInfo references unoptimized code. -// - JSFunction references either unoptimized or optimized code. -// - OptimizedCodeMap references optimized code. -// We are not allowed to flush unoptimized code for functions that got -// optimized or inlined into optimized code, because we might bailout -// into the unoptimized code again during deoptimization. -class CodeFlusher { - public: - explicit CodeFlusher(Isolate* isolate) - : isolate_(isolate), - jsfunction_candidates_head_(NULL), - shared_function_info_candidates_head_(NULL), - optimized_code_map_holder_head_(NULL) {} - - void AddCandidate(SharedFunctionInfo* shared_info) { - if (GetNextCandidate(shared_info) == NULL) { - SetNextCandidate(shared_info, shared_function_info_candidates_head_); - shared_function_info_candidates_head_ = shared_info; - } - } - - void AddCandidate(JSFunction* function) { - ASSERT(function->code() == function->shared()->code()); - if (GetNextCandidate(function)->IsUndefined()) { - SetNextCandidate(function, jsfunction_candidates_head_); - jsfunction_candidates_head_ = function; - } - } - - void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) { - if (GetNextCodeMap(code_map_holder)->IsUndefined()) { - SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_); - optimized_code_map_holder_head_ = code_map_holder; - } - } - - void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder); - void EvictCandidate(SharedFunctionInfo* shared_info); - void EvictCandidate(JSFunction* function); - - void ProcessCandidates() { - ProcessOptimizedCodeMaps(); - ProcessSharedFunctionInfoCandidates(); - ProcessJSFunctionCandidates(); - } - - void EvictAllCandidates() { - EvictOptimizedCodeMaps(); - EvictJSFunctionCandidates(); - EvictSharedFunctionInfoCandidates(); - } - - void IteratePointersToFromSpace(ObjectVisitor* v); - - private: - void ProcessOptimizedCodeMaps(); - void ProcessJSFunctionCandidates(); - void ProcessSharedFunctionInfoCandidates(); - void EvictOptimizedCodeMaps(); - void EvictJSFunctionCandidates(); - void EvictSharedFunctionInfoCandidates(); - - static JSFunction** GetNextCandidateSlot(JSFunction* candidate) { - return reinterpret_cast<JSFunction**>( - HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset)); - } - - static JSFunction* GetNextCandidate(JSFunction* candidate) { - Object* next_candidate = candidate->next_function_link(); - return reinterpret_cast<JSFunction*>(next_candidate); - } - - static void SetNextCandidate(JSFunction* candidate, - JSFunction* next_candidate) { - candidate->set_next_function_link(next_candidate); - } - - static void ClearNextCandidate(JSFunction* candidate, Object* undefined) { - ASSERT(undefined->IsUndefined()); - candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER); - } - - static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) { - Object* next_candidate = candidate->code()->gc_metadata(); - return reinterpret_cast<SharedFunctionInfo*>(next_candidate); - } - - static void SetNextCandidate(SharedFunctionInfo* candidate, - SharedFunctionInfo* next_candidate) { - candidate->code()->set_gc_metadata(next_candidate); - } - - static void ClearNextCandidate(SharedFunctionInfo* candidate) { - candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER); - } - - static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) { - FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); - Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex); - return reinterpret_cast<SharedFunctionInfo*>(next_map); - } - - static void SetNextCodeMap(SharedFunctionInfo* holder, - SharedFunctionInfo* next_holder) { - FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); - code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder); - } - - static void ClearNextCodeMap(SharedFunctionInfo* holder) { - FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); - code_map->set_undefined(SharedFunctionInfo::kNextMapIndex); - } - - Isolate* isolate_; - JSFunction* jsfunction_candidates_head_; - SharedFunctionInfo* shared_function_info_candidates_head_; - SharedFunctionInfo* optimized_code_map_holder_head_; - - DISALLOW_COPY_AND_ASSIGN(CodeFlusher); -}; - - -// Defined in isolate.h. -class ThreadLocalTop; - - -// ------------------------------------------------------------------------- -// Mark-Compact collector -class MarkCompactCollector { - public: - // Type of functions to compute forwarding addresses of objects in - // compacted spaces. Given an object and its size, return a (non-failure) - // Object* that will be the object after forwarding. There is a separate - // allocation function for each (compactable) space based on the location - // of the object before compaction. - typedef MaybeObject* (*AllocationFunction)(Heap* heap, - HeapObject* object, - int object_size); - - // Type of functions to encode the forwarding address for an object. - // Given the object, its size, and the new (non-failure) object it will be - // forwarded to, encode the forwarding address. For paged spaces, the - // 'offset' input/output parameter contains the offset of the forwarded - // object from the forwarding address of the previous live object in the - // page as input, and is updated to contain the offset to be used for the - // next live object in the same page. For spaces using a different - // encoding (i.e., contiguous spaces), the offset parameter is ignored. - typedef void (*EncodingFunction)(Heap* heap, - HeapObject* old_object, - int object_size, - Object* new_object, - int* offset); - - // Type of functions to process non-live objects. - typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate); - - // Pointer to member function, used in IterateLiveObjects. - typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj); - - // Set the global flags, it must be called before Prepare to take effect. - inline void SetFlags(int flags); - - static void Initialize(); - - void SetUp(); - - void TearDown(); - - void CollectEvacuationCandidates(PagedSpace* space); - - void AddEvacuationCandidate(Page* p); - - // Prepares for GC by resetting relocation info in old and map spaces and - // choosing spaces to compact. - void Prepare(GCTracer* tracer); - - // Performs a global garbage collection. - void CollectGarbage(); - - enum CompactionMode { - INCREMENTAL_COMPACTION, - NON_INCREMENTAL_COMPACTION - }; - - bool StartCompaction(CompactionMode mode); - - void AbortCompaction(); - - // During a full GC, there is a stack-allocated GCTracer that is used for - // bookkeeping information. Return a pointer to that tracer. - GCTracer* tracer() { return tracer_; } - -#ifdef DEBUG - // Checks whether performing mark-compact collection. - bool in_use() { return state_ > PREPARE_GC; } - bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; } -#endif - - // Determine type of object and emit deletion log event. - static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate); - - // Distinguishable invalid map encodings (for single word and multiple words) - // that indicate free regions. - static const uint32_t kSingleFreeEncoding = 0; - static const uint32_t kMultiFreeEncoding = 1; - - static inline bool IsMarked(Object* obj); - - inline Heap* heap() const { return heap_; } - inline Isolate* isolate() const; - - CodeFlusher* code_flusher() { return code_flusher_; } - inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; } - void EnableCodeFlushing(bool enable); - - enum SweeperType { - CONSERVATIVE, - LAZY_CONSERVATIVE, - PARALLEL_CONSERVATIVE, - CONCURRENT_CONSERVATIVE, - PRECISE - }; - - enum SweepingParallelism { - SWEEP_SEQUENTIALLY, - SWEEP_IN_PARALLEL - }; - -#ifdef VERIFY_HEAP - void VerifyMarkbitsAreClean(); - static void VerifyMarkbitsAreClean(PagedSpace* space); - static void VerifyMarkbitsAreClean(NewSpace* space); - void VerifyWeakEmbeddedObjectsInOptimizedCode(); - void VerifyOmittedMapChecks(); -#endif - - // Sweep a single page from the given space conservatively. - // Return a number of reclaimed bytes. - template<SweepingParallelism type> - static intptr_t SweepConservatively(PagedSpace* space, - FreeList* free_list, - Page* p); - - INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { - return Page::FromAddress(reinterpret_cast<Address>(anchor))-> - ShouldSkipEvacuationSlotRecording(); - } - - INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) { - return Page::FromAddress(reinterpret_cast<Address>(host))-> - ShouldSkipEvacuationSlotRecording(); - } - - INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { - return Page::FromAddress(reinterpret_cast<Address>(obj))-> - IsEvacuationCandidate(); - } - - INLINE(void EvictEvacuationCandidate(Page* page)) { - if (FLAG_trace_fragmentation) { - PrintF("Page %p is too popular. Disabling evacuation.\n", - reinterpret_cast<void*>(page)); - } - - // TODO(gc) If all evacuation candidates are too popular we - // should stop slots recording entirely. - page->ClearEvacuationCandidate(); - - // We were not collecting slots on this page that point - // to other evacuation candidates thus we have to - // rescan the page after evacuation to discover and update all - // pointers to evacuated objects. - if (page->owner()->identity() == OLD_DATA_SPACE) { - evacuation_candidates_.RemoveElement(page); - } else { - page->SetFlag(Page::RESCAN_ON_EVACUATION); - } - } - - void RecordRelocSlot(RelocInfo* rinfo, Object* target); - void RecordCodeEntrySlot(Address slot, Code* target); - void RecordCodeTargetPatch(Address pc, Code* target); - - INLINE(void RecordSlot(Object** anchor_slot, - Object** slot, - Object* object, - SlotsBuffer::AdditionMode mode = - SlotsBuffer::FAIL_ON_OVERFLOW)); - - void MigrateObject(HeapObject* dst, - HeapObject* src, - int size, - AllocationSpace to_old_space); - - bool TryPromoteObject(HeapObject* object, int object_size); - - inline Object* encountered_weak_collections() { - return encountered_weak_collections_; - } - inline void set_encountered_weak_collections(Object* weak_collection) { - encountered_weak_collections_ = weak_collection; - } - - void InvalidateCode(Code* code); - - void ClearMarkbits(); - - bool abort_incremental_marking() const { return abort_incremental_marking_; } - - bool is_compacting() const { return compacting_; } - - MarkingParity marking_parity() { return marking_parity_; } - - // Concurrent and parallel sweeping support. - void SweepInParallel(PagedSpace* space); - - void WaitUntilSweepingCompleted(); - - intptr_t RefillFreeLists(PagedSpace* space); - - bool AreSweeperThreadsActivated(); - - bool IsConcurrentSweepingInProgress(); - - void set_sequential_sweeping(bool sequential_sweeping) { - sequential_sweeping_ = sequential_sweeping; - } - - bool sequential_sweeping() const { - return sequential_sweeping_; - } - - // Mark the global table which maps weak objects to dependent code without - // marking its contents. - void MarkWeakObjectToCodeTable(); - - // Special case for processing weak references in a full collection. We need - // to artifically keep AllocationSites alive for a time. - void MarkAllocationSite(AllocationSite* site); - - private: - class SweeperTask; - - explicit MarkCompactCollector(Heap* heap); - ~MarkCompactCollector(); - - bool MarkInvalidatedCode(); - bool WillBeDeoptimized(Code* code); - void RemoveDeadInvalidatedCode(); - void ProcessInvalidatedCode(ObjectVisitor* visitor); - - void UnlinkEvacuationCandidates(); - void ReleaseEvacuationCandidates(); - - void StartSweeperThreads(); - -#ifdef DEBUG - enum CollectorState { - IDLE, - PREPARE_GC, - MARK_LIVE_OBJECTS, - SWEEP_SPACES, - ENCODE_FORWARDING_ADDRESSES, - UPDATE_POINTERS, - RELOCATE_OBJECTS - }; - - // The current stage of the collector. - CollectorState state_; -#endif - - // Global flag that forces sweeping to be precise, so we can traverse the - // heap. - bool sweep_precisely_; - - bool reduce_memory_footprint_; - - bool abort_incremental_marking_; - - MarkingParity marking_parity_; - - // True if we are collecting slots to perform evacuation from evacuation - // candidates. - bool compacting_; - - bool was_marked_incrementally_; - - // True if concurrent or parallel sweeping is currently in progress. - bool sweeping_pending_; - - Semaphore pending_sweeper_jobs_semaphore_; - - bool sequential_sweeping_; - - // A pointer to the current stack-allocated GC tracer object during a full - // collection (NULL before and after). - GCTracer* tracer_; - - SlotsBufferAllocator slots_buffer_allocator_; - - SlotsBuffer* migration_slots_buffer_; - - // Finishes GC, performs heap verification if enabled. - void Finish(); - - // ----------------------------------------------------------------------- - // Phase 1: Marking live objects. - // - // Before: The heap has been prepared for garbage collection by - // MarkCompactCollector::Prepare() and is otherwise in its - // normal state. - // - // After: Live objects are marked and non-live objects are unmarked. - - friend class RootMarkingVisitor; - friend class MarkingVisitor; - friend class MarkCompactMarkingVisitor; - friend class CodeMarkingVisitor; - friend class SharedFunctionInfoMarkingVisitor; - - // Mark code objects that are active on the stack to prevent them - // from being flushed. - void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top); - - void PrepareForCodeFlushing(); - - // Marking operations for objects reachable from roots. - void MarkLiveObjects(); - - void AfterMarking(); - - // Marks the object black and pushes it on the marking stack. - // This is for non-incremental marking only. - INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); - - // Marks the object black assuming that it is not yet marked. - // This is for non-incremental marking only. - INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); - - // Mark the heap roots and all objects reachable from them. - void MarkRoots(RootMarkingVisitor* visitor); - - // Mark the string table specially. References to internalized strings from - // the string table are weak. - void MarkStringTable(RootMarkingVisitor* visitor); - - // Mark objects in implicit references groups if their parent object - // is marked. - void MarkImplicitRefGroups(); - - // Mark objects reachable (transitively) from objects in the marking stack - // or overflowed in the heap. - void ProcessMarkingDeque(); - - // Mark objects reachable (transitively) from objects in the marking stack - // or overflowed in the heap. This respects references only considered in - // the final atomic marking pause including the following: - // - Processing of objects reachable through Harmony WeakMaps. - // - Objects reachable due to host application logic like object groups - // or implicit references' groups. - void ProcessEphemeralMarking(ObjectVisitor* visitor); - - // If the call-site of the top optimized code was not prepared for - // deoptimization, then treat the maps in the code as strong pointers, - // otherwise a map can die and deoptimize the code. - void ProcessTopOptimizedFrame(ObjectVisitor* visitor); - - // Mark objects reachable (transitively) from objects in the marking - // stack. This function empties the marking stack, but may leave - // overflowed objects in the heap, in which case the marking stack's - // overflow flag will be set. - void EmptyMarkingDeque(); - - // Refill the marking stack with overflowed objects from the heap. This - // function either leaves the marking stack full or clears the overflow - // flag on the marking stack. - void RefillMarkingDeque(); - - // After reachable maps have been marked process per context object - // literal map caches removing unmarked entries. - void ProcessMapCaches(); - - // Callback function for telling whether the object *p is an unmarked - // heap object. - static bool IsUnmarkedHeapObject(Object** p); - static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p); - - // Map transitions from a live map to a dead map must be killed. - // We replace them with a null descriptor, with the same key. - void ClearNonLiveReferences(); - void ClearNonLivePrototypeTransitions(Map* map); - void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark); - - void ClearAndDeoptimizeDependentCode(DependentCode* dependent_code); - void ClearNonLiveDependentCode(DependentCode* dependent_code); - - // Marking detaches initial maps from SharedFunctionInfo objects - // to make this reference weak. We need to reattach initial maps - // back after collection. This is either done during - // ClearNonLiveTransitions pass or by calling this function. - void ReattachInitialMaps(); - - // Mark all values associated with reachable keys in weak collections - // encountered so far. This might push new object or even new weak maps onto - // the marking stack. - void ProcessWeakCollections(); - - // After all reachable objects have been marked those weak map entries - // with an unreachable key are removed from all encountered weak maps. - // The linked list of all encountered weak maps is destroyed. - void ClearWeakCollections(); - - // ----------------------------------------------------------------------- - // Phase 2: Sweeping to clear mark bits and free non-live objects for - // a non-compacting collection. - // - // Before: Live objects are marked and non-live objects are unmarked. - // - // After: Live objects are unmarked, non-live regions have been added to - // their space's free list. Active eden semispace is compacted by - // evacuation. - // - - // If we are not compacting the heap, we simply sweep the spaces except - // for the large object space, clearing mark bits and adding unmarked - // regions to each space's free list. - void SweepSpaces(); - - int DiscoverAndPromoteBlackObjectsOnPage(NewSpace* new_space, - NewSpacePage* p); - - void EvacuateNewSpace(); - - void EvacuateLiveObjectsFromPage(Page* p); - - void EvacuatePages(); - - void EvacuateNewSpaceAndCandidates(); - - void SweepSpace(PagedSpace* space, SweeperType sweeper); - - // Finalizes the parallel sweeping phase. Marks all the pages that were - // swept in parallel. - void ParallelSweepSpacesComplete(); - - void ParallelSweepSpaceComplete(PagedSpace* space); - -#ifdef DEBUG - friend class MarkObjectVisitor; - static void VisitObject(HeapObject* obj); - - friend class UnmarkObjectVisitor; - static void UnmarkObject(HeapObject* obj); -#endif - - Heap* heap_; - MarkingDeque marking_deque_; - CodeFlusher* code_flusher_; - Object* encountered_weak_collections_; - bool have_code_to_deoptimize_; - - List<Page*> evacuation_candidates_; - List<Code*> invalidated_code_; - - SmartPointer<FreeList> free_list_old_data_space_; - SmartPointer<FreeList> free_list_old_pointer_space_; - - friend class Heap; -}; - - -class MarkBitCellIterator BASE_EMBEDDED { - public: - explicit MarkBitCellIterator(MemoryChunk* chunk) - : chunk_(chunk) { - last_cell_index_ = Bitmap::IndexToCell( - Bitmap::CellAlignIndex( - chunk_->AddressToMarkbitIndex(chunk_->area_end()))); - cell_base_ = chunk_->area_start(); - cell_index_ = Bitmap::IndexToCell( - Bitmap::CellAlignIndex( - chunk_->AddressToMarkbitIndex(cell_base_))); - cells_ = chunk_->markbits()->cells(); - } - - inline bool Done() { return cell_index_ == last_cell_index_; } - - inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; } - - inline MarkBit::CellType* CurrentCell() { - ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( - chunk_->AddressToMarkbitIndex(cell_base_)))); - return &cells_[cell_index_]; - } - - inline Address CurrentCellBase() { - ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( - chunk_->AddressToMarkbitIndex(cell_base_)))); - return cell_base_; - } - - inline void Advance() { - cell_index_++; - cell_base_ += 32 * kPointerSize; - } - - private: - MemoryChunk* chunk_; - MarkBit::CellType* cells_; - unsigned int last_cell_index_; - unsigned int cell_index_; - Address cell_base_; -}; - - -class SequentialSweepingScope BASE_EMBEDDED { - public: - explicit SequentialSweepingScope(MarkCompactCollector *collector) : - collector_(collector) { - collector_->set_sequential_sweeping(true); - } - - ~SequentialSweepingScope() { - collector_->set_sequential_sweeping(false); - } - - private: - MarkCompactCollector* collector_; -}; - - -const char* AllocationSpaceName(AllocationSpace space); - -} } // namespace v8::internal - -#endif // V8_MARK_COMPACT_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mark-compact-inl.h nodejs-0.11.15/deps/v8/src/mark-compact-inl.h --- nodejs-0.11.13/deps/v8/src/mark-compact-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mark-compact-inl.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_MARK_COMPACT_INL_H_ -#define V8_MARK_COMPACT_INL_H_ - -#include "isolate.h" -#include "memory.h" -#include "mark-compact.h" - - -namespace v8 { -namespace internal { - - -MarkBit Marking::MarkBitFrom(Address addr) { - MemoryChunk* p = MemoryChunk::FromAddress(addr); - return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr), - p->ContainsOnlyData()); -} - - -void MarkCompactCollector::SetFlags(int flags) { - sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0); - reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0); - abort_incremental_marking_ = - ((flags & Heap::kAbortIncrementalMarkingMask) != 0); -} - - -void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { - ASSERT(Marking::MarkBitFrom(obj) == mark_bit); - if (!mark_bit.Get()) { - mark_bit.Set(); - MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); - ASSERT(IsMarked(obj)); - ASSERT(obj->GetIsolate()->heap()->Contains(obj)); - marking_deque_.PushBlack(obj); - } -} - - -void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) { - ASSERT(!mark_bit.Get()); - ASSERT(Marking::MarkBitFrom(obj) == mark_bit); - mark_bit.Set(); - MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); -} - - -bool MarkCompactCollector::IsMarked(Object* obj) { - ASSERT(obj->IsHeapObject()); - HeapObject* heap_object = HeapObject::cast(obj); - return Marking::MarkBitFrom(heap_object).Get(); -} - - -void MarkCompactCollector::RecordSlot(Object** anchor_slot, - Object** slot, - Object* object, - SlotsBuffer::AdditionMode mode) { - Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object)); - if (object_page->IsEvacuationCandidate() && - !ShouldSkipEvacuationSlotRecording(anchor_slot)) { - if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, - object_page->slots_buffer_address(), - slot, - mode)) { - EvictEvacuationCandidate(object_page); - } - } -} - - -} } // namespace v8::internal - -#endif // V8_MARK_COMPACT_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/math.js nodejs-0.11.15/deps/v8/src/math.js --- nodejs-0.11.13/deps/v8/src/math.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/math.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,8 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; // This file relies on the fact that the following declarations have been made // in runtime.js: @@ -51,25 +30,25 @@ } // ECMA 262 - 15.8.2.2 -function MathAcos(x) { - return %Math_acos(TO_NUMBER_INLINE(x)); +function MathAcosJS(x) { + return %MathAcos(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.3 -function MathAsin(x) { - return %Math_asin(TO_NUMBER_INLINE(x)); +function MathAsinJS(x) { + return %MathAsin(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.4 -function MathAtan(x) { - return %Math_atan(TO_NUMBER_INLINE(x)); +function MathAtanJS(x) { + return %MathAtan(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.5 // The naming of y and x matches the spec, as does the order in which // ToNumber (valueOf) is called. -function MathAtan2(y, x) { - return %Math_atan2(TO_NUMBER_INLINE(y), TO_NUMBER_INLINE(x)); +function MathAtan2JS(y, x) { + return %MathAtan2(TO_NUMBER_INLINE(y), TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.6 @@ -77,15 +56,9 @@ return -MathFloor(-x); } -// ECMA 262 - 15.8.2.7 -function MathCos(x) { - x = MathAbs(x); // Convert to number and get rid of -0. - return TrigonometricInterpolation(x, 1); -} - // ECMA 262 - 15.8.2.8 function MathExp(x) { - return %Math_exp(TO_NUMBER_INLINE(x)); + return %MathExpRT(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.9 @@ -100,13 +73,13 @@ // has to be -0, which wouldn't be the case with the shift. return TO_UINT32(x); } else { - return %Math_floor(x); + return %MathFloorRT(x); } } // ECMA 262 - 15.8.2.10 function MathLog(x) { - return %_MathLog(TO_NUMBER_INLINE(x)); + return %_MathLogRT(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.11 @@ -185,21 +158,9 @@ return %RoundNumber(TO_NUMBER_INLINE(x)); } -// ECMA 262 - 15.8.2.16 -function MathSin(x) { - x = x * 1; // Convert to number and deal with -0. - if (%_IsMinusZero(x)) return x; - return TrigonometricInterpolation(x, 0); -} - // ECMA 262 - 15.8.2.17 function MathSqrt(x) { - return %_MathSqrt(TO_NUMBER_INLINE(x)); -} - -// ECMA 262 - 15.8.2.18 -function MathTan(x) { - return MathSin(x) / MathCos(x); + return %_MathSqrtRT(TO_NUMBER_INLINE(x)); } // Non-standard extension. @@ -207,72 +168,183 @@ return %NumberImul(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y)); } +// ES6 draft 09-27-13, section 20.2.2.28. +function MathSign(x) { + x = TO_NUMBER_INLINE(x); + if (x > 0) return 1; + if (x < 0) return -1; + if (x === 0) return x; + return NAN; +} -var kInversePiHalf = 0.636619772367581343; // 2 / pi -var kInversePiHalfS26 = 9.48637384723993156e-9; // 2 / pi / (2^26) -var kS26 = 1 << 26; -var kTwoStepThreshold = 1 << 27; -// pi / 2 rounded up -var kPiHalf = 1.570796326794896780; // 0x192d4454fb21f93f -// We use two parts for pi/2 to emulate a higher precision. -// pi_half_1 only has 26 significant bits for mantissa. -// Note that pi_half > pi_half_1 + pi_half_2 -var kPiHalf1 = 1.570796325802803040; // 0x00000054fb21f93f -var kPiHalf2 = 9.920935796805404252e-10; // 0x3326a611460b113e - -var kSamples; // Initialized to a number during genesis. -var kIndexConvert; // Initialized to kSamples / (pi/2) during genesis. -var kSinTable; // Initialized to a Float64Array during genesis. -var kCosXIntervalTable; // Initialized to a Float64Array during genesis. - -// This implements sine using the following algorithm. -// 1) Multiplication takes care of to-number conversion. -// 2) Reduce x to the first quadrant [0, pi/2]. -// Conveniently enough, in case of +/-Infinity, we get NaN. -// Note that we try to use only 26 instead of 52 significant bits for -// mantissa to avoid rounding errors when multiplying. For very large -// input we therefore have additional steps. -// 3) Replace x by (pi/2-x) if x was in the 2nd or 4th quadrant. -// 4) Do a table lookup for the closest samples to the left and right of x. -// 5) Find the derivatives at those sampling points by table lookup: -// dsin(x)/dx = cos(x) = sin(pi/2-x) for x in [0, pi/2]. -// 6) Use cubic spline interpolation to approximate sin(x). -// 7) Negate the result if x was in the 3rd or 4th quadrant. -// 8) Get rid of -0 by adding 0. -function TrigonometricInterpolation(x, phase) { - if (x < 0 || x > kPiHalf) { - var multiple; - while (x < -kTwoStepThreshold || x > kTwoStepThreshold) { - // Let's assume this loop does not terminate. - // All numbers x in each loop forms a set S. - // (1) abs(x) > 2^27 for all x in S. - // (2) abs(multiple) != 0 since (2^27 * inverse_pi_half_s26) > 1 - // (3) multiple is rounded down in 2^26 steps, so the rounding error is - // at most max(ulp, 2^26). - // (4) so for x > 2^27, we subtract at most (1+pi/4)x and at least - // (1-pi/4)x - // (5) The subtraction results in x' so that abs(x') <= abs(x)*pi/4. - // Note that this difference cannot be simply rounded off. - // Set S cannot exist since (5) violates (1). Loop must terminate. - multiple = MathFloor(x * kInversePiHalfS26) * kS26; - x = x - multiple * kPiHalf1 - multiple * kPiHalf2; - } - multiple = MathFloor(x * kInversePiHalf); - x = x - multiple * kPiHalf1 - multiple * kPiHalf2; - phase += multiple; +// ES6 draft 09-27-13, section 20.2.2.34. +function MathTrunc(x) { + x = TO_NUMBER_INLINE(x); + if (x > 0) return MathFloor(x); + if (x < 0) return MathCeil(x); + if (x === 0) return x; + return NAN; +} + +// ES6 draft 09-27-13, section 20.2.2.30. +function MathSinh(x) { + if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + // Idempotent for NaN, +/-0 and +/-Infinity. + if (x === 0 || !NUMBER_IS_FINITE(x)) return x; + return (MathExp(x) - MathExp(-x)) / 2; +} + +// ES6 draft 09-27-13, section 20.2.2.12. +function MathCosh(x) { + if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + if (!NUMBER_IS_FINITE(x)) return MathAbs(x); + return (MathExp(x) + MathExp(-x)) / 2; +} + +// ES6 draft 09-27-13, section 20.2.2.33. +function MathTanh(x) { + if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + // Idempotent for +/-0. + if (x === 0) return x; + // Returns +/-1 for +/-Infinity. + if (!NUMBER_IS_FINITE(x)) return MathSign(x); + var exp1 = MathExp(x); + var exp2 = MathExp(-x); + return (exp1 - exp2) / (exp1 + exp2); +} + +// ES6 draft 09-27-13, section 20.2.2.5. +function MathAsinh(x) { + if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + // Idempotent for NaN, +/-0 and +/-Infinity. + if (x === 0 || !NUMBER_IS_FINITE(x)) return x; + if (x > 0) return MathLog(x + MathSqrt(x * x + 1)); + // This is to prevent numerical errors caused by large negative x. + return -MathLog(-x + MathSqrt(x * x + 1)); +} + +// ES6 draft 09-27-13, section 20.2.2.3. +function MathAcosh(x) { + if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + if (x < 1) return NAN; + // Idempotent for NaN and +Infinity. + if (!NUMBER_IS_FINITE(x)) return x; + return MathLog(x + MathSqrt(x + 1) * MathSqrt(x - 1)); +} + +// ES6 draft 09-27-13, section 20.2.2.7. +function MathAtanh(x) { + if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + // Idempotent for +/-0. + if (x === 0) return x; + // Returns NaN for NaN and +/- Infinity. + if (!NUMBER_IS_FINITE(x)) return NAN; + return 0.5 * MathLog((1 + x) / (1 - x)); +} + +// ES6 draft 09-27-13, section 20.2.2.21. +function MathLog10(x) { + return MathLog(x) * 0.434294481903251828; // log10(x) = log(x)/log(10). +} + + +// ES6 draft 09-27-13, section 20.2.2.22. +function MathLog2(x) { + return MathLog(x) * 1.442695040888963407; // log2(x) = log(x)/log(2). +} + +// ES6 draft 09-27-13, section 20.2.2.17. +function MathHypot(x, y) { // Function length is 2. + // We may want to introduce fast paths for two arguments and when + // normalization to avoid overflow is not necessary. For now, we + // simply assume the general case. + var length = %_ArgumentsLength(); + var args = new InternalArray(length); + var max = 0; + for (var i = 0; i < length; i++) { + var n = %_Arguments(i); + if (!IS_NUMBER(n)) n = NonNumberToNumber(n); + if (n === INFINITY || n === -INFINITY) return INFINITY; + n = MathAbs(n); + if (n > max) max = n; + args[i] = n; + } + + // Kahan summation to avoid rounding errors. + // Normalize the numbers to the largest one to avoid overflow. + if (max === 0) max = 1; + var sum = 0; + var compensation = 0; + for (var i = 0; i < length; i++) { + var n = args[i] / max; + var summand = n * n - compensation; + var preliminary = sum + summand; + compensation = (preliminary - sum) - summand; + sum = preliminary; + } + return MathSqrt(sum) * max; +} + +// ES6 draft 09-27-13, section 20.2.2.16. +function MathFroundJS(x) { + return %MathFround(TO_NUMBER_INLINE(x)); +} + +// ES6 draft 07-18-14, section 20.2.2.11 +function MathClz32(x) { + x = ToUint32(TO_NUMBER_INLINE(x)); + if (x == 0) return 32; + var result = 0; + // Binary search. + if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; }; + if ((x & 0xFF000000) === 0) { x <<= 8; result += 8; }; + if ((x & 0xF0000000) === 0) { x <<= 4; result += 4; }; + if ((x & 0xC0000000) === 0) { x <<= 2; result += 2; }; + if ((x & 0x80000000) === 0) { x <<= 1; result += 1; }; + return result; +} + +// ES6 draft 09-27-13, section 20.2.2.9. +// Cube root approximation, refer to: http://metamerist.com/cbrt/cbrt.htm +// Using initial approximation adapted from Kahan's cbrt and 4 iterations +// of Newton's method. +function MathCbrt(x) { + if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + if (x == 0 || !NUMBER_IS_FINITE(x)) return x; + return x >= 0 ? CubeRoot(x) : -CubeRoot(-x); +} + +macro NEWTON_ITERATION_CBRT(x, approx) + (1.0 / 3.0) * (x / (approx * approx) + 2 * approx); +endmacro + +function CubeRoot(x) { + var approx_hi = MathFloor(%_DoubleHi(x) / 3) + 0x2A9F7893; + var approx = %_ConstructDouble(approx_hi, 0); + approx = NEWTON_ITERATION_CBRT(x, approx); + approx = NEWTON_ITERATION_CBRT(x, approx); + approx = NEWTON_ITERATION_CBRT(x, approx); + return NEWTON_ITERATION_CBRT(x, approx); +} + +// ES6 draft 09-27-13, section 20.2.2.14. +// Use Taylor series to approximate. +// exp(x) - 1 at 0 == -1 + exp(0) + exp'(0)*x/1! + exp''(0)*x^2/2! + ... +// == x/1! + x^2/2! + x^3/3! + ... +// The closer x is to 0, the fewer terms are required. +function MathExpm1(x) { + if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + var xabs = MathAbs(x); + if (xabs < 2E-7) { + return x * (1 + x * (1/2)); + } else if (xabs < 6E-5) { + return x * (1 + x * (1/2 + x * (1/6))); + } else if (xabs < 2E-2) { + return x * (1 + x * (1/2 + x * (1/6 + + x * (1/24 + x * (1/120 + x * (1/720)))))); + } else { // Use regular exp if not close enough to 0. + return MathExp(x) - 1; } - var double_index = x * kIndexConvert; - if (phase & 1) double_index = kSamples - double_index; - var index = double_index | 0; - var t1 = double_index - index; - var t2 = 1 - t1; - var y1 = kSinTable[index]; - var y2 = kSinTable[index + 1]; - var dy = y2 - y1; - return (t2 * y1 + t1 * y2 + - t1 * t2 * ((kCosXIntervalTable[index] - dy) * t2 + - (dy - kCosXIntervalTable[index + 1]) * t1)) - * (1 - (phase & 2)) + 0; } // ------------------------------------------------------------------- @@ -280,8 +352,8 @@ function SetUpMath() { %CheckIsBootstrapping(); - %SetPrototype($Math, $Object.prototype); - %SetProperty(global, "Math", $Math, DONT_ENUM); + %InternalSetPrototype($Math, $Object.prototype); + %AddNamedProperty(global, "Math", $Math, DONT_ENUM); %FunctionSetInstanceClassName(MathConstructor, 'Math'); // Set up math constants. @@ -305,31 +377,45 @@ InstallFunctions($Math, DONT_ENUM, $Array( "random", MathRandom, "abs", MathAbs, - "acos", MathAcos, - "asin", MathAsin, - "atan", MathAtan, + "acos", MathAcosJS, + "asin", MathAsinJS, + "atan", MathAtanJS, "ceil", MathCeil, - "cos", MathCos, + "cos", MathCos, // implemented by third_party/fdlibm "exp", MathExp, "floor", MathFloor, "log", MathLog, "round", MathRound, - "sin", MathSin, + "sin", MathSin, // implemented by third_party/fdlibm "sqrt", MathSqrt, - "tan", MathTan, - "atan2", MathAtan2, + "tan", MathTan, // implemented by third_party/fdlibm + "atan2", MathAtan2JS, "pow", MathPow, "max", MathMax, "min", MathMin, - "imul", MathImul + "imul", MathImul, + "sign", MathSign, + "trunc", MathTrunc, + "sinh", MathSinh, + "cosh", MathCosh, + "tanh", MathTanh, + "asinh", MathAsinh, + "acosh", MathAcosh, + "atanh", MathAtanh, + "log10", MathLog10, + "log2", MathLog2, + "hypot", MathHypot, + "fround", MathFroundJS, + "clz32", MathClz32, + "cbrt", MathCbrt, + "log1p", MathLog1p, // implemented by third_party/fdlibm + "expm1", MathExpm1 )); %SetInlineBuiltinFlag(MathCeil); %SetInlineBuiltinFlag(MathRandom); %SetInlineBuiltinFlag(MathSin); %SetInlineBuiltinFlag(MathCos); - %SetInlineBuiltinFlag(MathTan); - %SetInlineBuiltinFlag(TrigonometricInterpolation); } SetUpMath(); diff -Nru nodejs-0.11.13/deps/v8/src/messages.cc nodejs-0.11.15/deps/v8/src/messages.cc --- nodejs-0.11.13/deps/v8/src/messages.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/messages.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "api.h" -#include "execution.h" -#include "messages.h" -#include "spaces-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/api.h" +#include "src/execution.h" +#include "src/heap/spaces-inl.h" +#include "src/messages.h" namespace v8 { namespace internal { @@ -78,7 +55,7 @@ if (loc) { start = loc->start_pos(); end = loc->end_pos(); - script_handle = GetScriptWrapper(loc->script()); + script_handle = Script::GetWrapper(loc->script()); } Handle<Object> stack_frames_handle = stack_frames.is_null() @@ -107,7 +84,7 @@ // We pass the exception object into the message handler callback though. Object* exception_object = isolate->heap()->undefined_value(); if (isolate->has_pending_exception()) { - isolate->pending_exception()->ToObject(&exception_object); + exception_object = isolate->pending_exception(); } Handle<Object> exception_handle(exception_object, isolate); @@ -154,24 +131,16 @@ Factory* factory = isolate->factory(); Handle<String> fmt_str = factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("FormatMessage")); - Handle<JSFunction> fun = - Handle<JSFunction>( - JSFunction::cast( - isolate->js_builtins_object()-> - GetPropertyNoExceptionThrown(*fmt_str))); + Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty( + isolate->js_builtins_object(), fmt_str).ToHandleChecked()); Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data); Handle<Object> argv[] = { Handle<Object>(message->type(), isolate), Handle<Object>(message->arguments(), isolate) }; - bool caught_exception; - Handle<Object> result = - Execution::TryCall(fun, - isolate->js_builtins_object(), - ARRAY_SIZE(argv), - argv, - &caught_exception); - - if (caught_exception || !result->IsString()) { + MaybeHandle<Object> maybe_result = Execution::TryCall( + fun, isolate->js_builtins_object(), ARRAY_SIZE(argv), argv); + Handle<Object> result; + if (!maybe_result.ToHandle(&result) || !result->IsString()) { return factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("<error>")); } Handle<String> result_string = Handle<String>::cast(result); @@ -180,7 +149,7 @@ // here to improve the efficiency of converting it to a C string and // other operations that are likely to take place (see GetLocalizedMessage // for example). - FlattenString(result_string); + result_string = String::Flatten(result_string); return result_string; } diff -Nru nodejs-0.11.13/deps/v8/src/messages.h nodejs-0.11.15/deps/v8/src/messages.h --- nodejs-0.11.13/deps/v8/src/messages.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/messages.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // The infrastructure used for (localized) message reporting in V8. // @@ -33,7 +10,7 @@ #ifndef V8_MESSAGES_H_ #define V8_MESSAGES_H_ -#include "handles-inl.h" +#include "src/handles-inl.h" // Forward declaration of MessageLocation. namespace v8 { diff -Nru nodejs-0.11.13/deps/v8/src/messages.js nodejs-0.11.15/deps/v8/src/messages.js --- nodejs-0.11.13/deps/v8/src/messages.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/messages.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // ------------------------------------------------------------------- @@ -47,7 +24,9 @@ incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"], multiple_defaults_in_switch: ["More than one default clause in switch statement"], newline_after_throw: ["Illegal newline after throw"], - redeclaration: ["%0", " '", "%1", "' has already been declared"], + label_redeclaration: ["Label '", "%0", "' has already been declared"], + var_redeclaration: ["Identifier '", "%0", "' has already been declared"], + duplicate_template_property: ["Object template has duplicate property '", "%0", "'"], no_catch_or_finally: ["Missing catch or finally after try"], unknown_label: ["Undefined label '", "%0", "'"], uncaught_exception: ["Uncaught ", "%0"], @@ -99,6 +78,7 @@ observe_perform_non_string: ["Invalid non-string changeType"], observe_perform_non_function: ["Cannot perform non-function"], observe_notify_non_notifier: ["notify called on non-notifier object"], + observe_global_proxy: ["%0", " cannot be called on the global proxy object"], not_typed_array: ["this is not a typed array."], invalid_argument: ["invalid_argument"], data_view_not_array_buffer: ["First argument to DataView constructor must be an ArrayBuffer"], @@ -110,6 +90,10 @@ array_functions_on_frozen: ["Cannot modify frozen array elements"], array_functions_change_sealed: ["Cannot add/remove sealed array elements"], first_argument_not_regexp: ["First argument to ", "%0", " must not be a regular expression"], + not_iterable: ["%0", " is not iterable"], + not_an_iterator: ["%0", " is not an iterator"], + iterator_result_not_an_object: ["Iterator result ", "%0", " is not an object"], + iterator_value_not_an_object: ["Iterator value ", "%0", " is not an entry object"], // RangeError invalid_array_length: ["Invalid array length"], invalid_array_buffer_length: ["Invalid array buffer length"], @@ -129,6 +113,7 @@ stack_overflow: ["Maximum call stack size exceeded"], invalid_time_value: ["Invalid time value"], invalid_count_value: ["Invalid count value"], + invalid_code_point: ["Invalid code point ", "%0"], // ReferenceError invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"], invalid_lhs_in_for: ["Invalid left-hand side in for-loop"], @@ -143,7 +128,6 @@ illegal_break: ["Illegal break statement"], illegal_continue: ["Illegal continue statement"], illegal_return: ["Illegal return statement"], - illegal_let: ["Illegal let declaration outside extended mode"], error_loading_debugger: ["Error loading debugger"], no_input_to_regexp: ["No input to ", "%0"], invalid_json: ["String '", "%0", "' is not valid JSON"], @@ -153,7 +137,6 @@ array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"], object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"], illegal_access: ["Illegal access"], - invalid_preparser_data: ["Invalid preparser data for function ", "%0"], strict_mode_with: ["Strict mode code may not include a with statement"], strict_eval_arguments: ["Unexpected eval or arguments in strict mode"], too_many_arguments: ["Too many arguments in function call (only 65535 allowed)"], @@ -172,6 +155,8 @@ strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"], strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"], strict_caller: ["Illegal access to a strict mode caller function."], + malformed_arrow_function_parameter_list: ["Malformed arrow function parameter list"], + generator_poison_pill: ["'caller' and 'arguments' properties may not be accessed on generator functions."], unprotected_let: ["Illegal let declaration in unprotected statement context."], unprotected_const: ["Illegal const declaration in unprotected statement context."], cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"], @@ -179,6 +164,7 @@ harmony_const_assign: ["Assignment to constant variable."], symbol_to_string: ["Cannot convert a Symbol value to a string"], symbol_to_primitive: ["Cannot convert a Symbol wrapper object to a primitive value"], + symbol_to_number: ["Cannot convert a Symbol value to a number"], invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"], module_type_error: ["Module '", "%0", "' used improperly"], module_export_undefined: ["Export '", "%0", "' is not defined in module"] @@ -197,10 +183,6 @@ // str is one of %0, %1, %2 or %3. try { str = NoSideEffectToString(args[arg_num]); - if (str.length > 256) { - str = %SubString(str, 0, 239) + "...<omitted>..." + - %SubString(str, str.length - 2, str.length); - } } catch (e) { if (%IsJSModule(args[arg_num])) str = "module"; @@ -220,10 +202,18 @@ function NoSideEffectToString(obj) { if (IS_STRING(obj)) return obj; if (IS_NUMBER(obj)) return %_NumberToString(obj); - if (IS_BOOLEAN(obj)) return x ? 'true' : 'false'; + if (IS_BOOLEAN(obj)) return obj ? 'true' : 'false'; if (IS_UNDEFINED(obj)) return 'undefined'; if (IS_NULL(obj)) return 'null'; - if (IS_FUNCTION(obj)) return %_CallFunction(obj, FunctionToString); + if (IS_FUNCTION(obj)) { + var str = %_CallFunction(obj, FunctionToString); + if (str.length > 128) { + str = %_SubString(str, 0, 111) + "...<omitted>..." + + %_SubString(str, str.length - 2, str.length); + } + return str; + } + if (IS_SYMBOL(obj)) return %_CallFunction(obj, SymbolToString); if (IS_OBJECT(obj) && %GetDataProperty(obj, "toString") === ObjectToString) { var constructor = %GetDataProperty(obj, "constructor"); if (typeof constructor == "function") { @@ -298,8 +288,8 @@ * Set up the Script function and constructor. */ %FunctionSetInstanceClassName(Script, 'Script'); -%SetProperty(Script.prototype, 'constructor', Script, - DONT_ENUM | DONT_DELETE | READ_ONLY); +%AddNamedProperty(Script.prototype, 'constructor', Script, + DONT_ENUM | DONT_DELETE | READ_ONLY); %SetCode(Script, function(x) { // Script objects can only be created by the VM. throw new $Error("Not supported"); @@ -571,44 +561,16 @@ if (this.line_offset > 0 || this.column_offset > 0) { return this.name; } - - // The result is cached as on long scripts it takes noticable time to search - // for the sourceURL. - if (this.hasCachedNameOrSourceURL) { - return this.cachedNameOrSourceURL; - } - this.hasCachedNameOrSourceURL = true; - - // TODO(608): the spaces in a regexp below had to be escaped as \040 - // because this file is being processed by js2c whose handling of spaces - // in regexps is broken. Also, ['"] are excluded from allowed URLs to - // avoid matches against sources that invoke evals with sourceURL. - // A better solution would be to detect these special comments in - // the scanner/parser. - var source = ToString(this.source); - var sourceUrlPos = %StringIndexOf(source, "sourceURL=", 0); - this.cachedNameOrSourceURL = this.name; - if (sourceUrlPos > 4) { - var sourceUrlPattern = - /\/\/[#@][\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm; - // Don't reuse lastMatchInfo here, so we create a new array with room - // for four captures (array with length one longer than the index - // of the fourth capture, where the numbering is zero-based). - var matchInfo = new InternalArray(CAPTURE(3) + 1); - var match = - %_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo); - if (match) { - this.cachedNameOrSourceURL = - %_SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]); - } + if (this.source_url) { + return this.source_url; } - return this.cachedNameOrSourceURL; + return this.name; } SetUpLockedPrototype(Script, - $Array("source", "name", "line_ends", "line_offset", "column_offset", - "cachedNameOrSourceURL", "hasCachedNameOrSourceURL" ), + $Array("source", "name", "source_url", "source_mapping_url", "line_ends", + "line_offset", "column_offset"), $Array( "lineFromPosition", ScriptLineFromPosition, "locationFromPosition", ScriptLocationFromPosition, @@ -974,12 +936,12 @@ var methodName = this.getMethodName(); if (functionName) { if (typeName && - %_CallFunction(functionName, typeName, StringIndexOf) != 0) { + %_CallFunction(functionName, typeName, StringIndexOfJS) != 0) { line += typeName + "."; } line += functionName; if (methodName && - (%_CallFunction(functionName, "." + methodName, StringIndexOf) != + (%_CallFunction(functionName, "." + methodName, StringIndexOfJS) != functionName.length - methodName.length - 1)) { line += " [as " + methodName + "]"; } @@ -1092,7 +1054,8 @@ var formatting_custom_stack_trace = false; -function FormatStackTrace(obj, error_string, frames) { +function FormatStackTrace(obj, raw_stack) { + var frames = GetStackFrames(raw_stack); if (IS_FUNCTION($Error.prepareStackTrace) && !formatting_custom_stack_trace) { var array = []; %MoveArrayContents(frames, array); @@ -1109,7 +1072,7 @@ } var lines = new InternalArray(); - lines.push(error_string); + lines.push(FormatErrorString(obj)); for (var i = 0; i < frames.length; i++) { var frame = frames[i]; var line; @@ -1144,43 +1107,48 @@ } -function captureStackTrace(obj, cons_opt) { - var stackTraceLimit = $Error.stackTraceLimit; - if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return; - if (stackTraceLimit < 0 || stackTraceLimit > 10000) { - stackTraceLimit = 10000; - } - var stack = %CollectStackTrace(obj, - cons_opt ? cons_opt : captureStackTrace, - stackTraceLimit); - - var error_string = FormatErrorString(obj); - // The holder of this getter ('obj') may not be the receiver ('this'). - // When this getter is called the first time, we use the context values to - // format a stack trace string and turn this accessor pair into a data - // property (on the holder). - var getter = function() { - // Stack is still a raw array awaiting to be formatted. - var result = FormatStackTrace(obj, error_string, GetStackFrames(stack)); - // Turn this accessor into a data property. - %DefineOrRedefineDataProperty(obj, 'stack', result, NONE); - // Release context values. - stack = error_string = UNDEFINED; - return result; - }; +var stack_trace_symbol; // Set during bootstrapping. +var formatted_stack_trace_symbol = NEW_PRIVATE("formatted stack trace"); + - // Set the 'stack' property on the receiver. If the receiver is the same as - // holder of this setter, the accessor pair is turned into a data property. - var setter = function(v) { - // Set data property on the receiver (not necessarily holder). - %DefineOrRedefineDataProperty(this, 'stack', v, NONE); - if (this === obj) { - // Release context values if holder is the same as the receiver. - stack = error_string = UNDEFINED; +// Format the stack trace if not yet done, and return it. +// Cache the formatted stack trace on the holder. +var StackTraceGetter = function() { + var formatted_stack_trace = GET_PRIVATE(this, formatted_stack_trace_symbol); + if (IS_UNDEFINED(formatted_stack_trace)) { + var holder = this; + while (!HAS_PRIVATE(holder, stack_trace_symbol)) { + holder = %GetPrototype(holder); + if (!holder) return UNDEFINED; } - }; + var stack_trace = GET_PRIVATE(holder, stack_trace_symbol); + if (IS_UNDEFINED(stack_trace)) return UNDEFINED; + formatted_stack_trace = FormatStackTrace(holder, stack_trace); + SET_PRIVATE(holder, stack_trace_symbol, UNDEFINED); + SET_PRIVATE(holder, formatted_stack_trace_symbol, formatted_stack_trace); + } + return formatted_stack_trace; +}; + - %DefineOrRedefineAccessorProperty(obj, 'stack', getter, setter, DONT_ENUM); +// If the receiver equals the holder, set the formatted stack trace that the +// getter returns. +var StackTraceSetter = function(v) { + if (HAS_PRIVATE(this, stack_trace_symbol)) { + SET_PRIVATE(this, stack_trace_symbol, UNDEFINED); + SET_PRIVATE(this, formatted_stack_trace_symbol, v); + } +}; + + +// Use a dummy function since we do not actually want to capture a stack trace +// when constructing the initial Error prototytpes. +var captureStackTrace = function captureStackTrace(obj, cons_opt) { + // Define accessors first, as this may fail and throw. + ObjectDefineProperty(obj, 'stack', { get: StackTraceGetter, + set: StackTraceSetter, + configurable: true }); + %CollectStackTrace(obj, cons_opt ? cons_opt : captureStackTrace); } @@ -1195,8 +1163,9 @@ // effects when overwriting the error functions from // user code. var name = f.name; - %SetProperty(global, name, f, DONT_ENUM); - %SetProperty(builtins, '$' + name, f, DONT_ENUM | DONT_DELETE | READ_ONLY); + %AddNamedProperty(global, name, f, DONT_ENUM); + %AddNamedProperty(builtins, '$' + name, f, + DONT_ENUM | DONT_DELETE | READ_ONLY); // Configure the error function. if (name == 'Error') { // The prototype of the Error object must itself be an error. @@ -1211,19 +1180,18 @@ %FunctionSetPrototype(f, new $Error()); } %FunctionSetInstanceClassName(f, 'Error'); - %SetProperty(f.prototype, 'constructor', f, DONT_ENUM); - %SetProperty(f.prototype, "name", name, DONT_ENUM); + %AddNamedProperty(f.prototype, 'constructor', f, DONT_ENUM); + %AddNamedProperty(f.prototype, "name", name, DONT_ENUM); %SetCode(f, function(m) { if (%_IsConstructCall()) { // Define all the expected properties directly on the error // object. This avoids going through getters and setters defined // on prototype objects. - %IgnoreAttributesAndSetProperty(this, 'stack', UNDEFINED, DONT_ENUM); + %AddNamedProperty(this, 'stack', UNDEFINED, DONT_ENUM); if (!IS_UNDEFINED(m)) { - %IgnoreAttributesAndSetProperty( - this, 'message', ToString(m), DONT_ENUM); + %AddNamedProperty(this, 'message', ToString(m), DONT_ENUM); } - captureStackTrace(this, f); + try { captureStackTrace(this, f); } catch (e) { } } else { return new f(m); } @@ -1244,7 +1212,7 @@ $Error.captureStackTrace = captureStackTrace; -%SetProperty($Error.prototype, 'message', '', DONT_ENUM); +%AddNamedProperty($Error.prototype, 'message', '', DONT_ENUM); // Global list of error objects visited during ErrorToString. This is // used to detect cycles in error toString formatting. @@ -1254,7 +1222,7 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) { var current = error; // Climb the prototype chain until we find the holder. - while (current && !%HasLocalProperty(current, name)) { + while (current && !%HasOwnProperty(current, name)) { current = %GetPrototype(current); } if (IS_NULL(current)) return UNDEFINED; @@ -1316,39 +1284,8 @@ function SetUpStackOverflowBoilerplate() { var boilerplate = MakeRangeError('stack_overflow', []); - var error_string = boilerplate.name + ": " + boilerplate.message; - - // The raw stack trace is stored as a hidden property on the holder of this - // getter, which may not be the same as the receiver. Find the holder to - // retrieve the raw stack trace and then turn this accessor pair into a - // data property. - var getter = function() { - var holder = this; - while (!IS_ERROR(holder)) { - holder = %GetPrototype(holder); - if (IS_NULL(holder)) return MakeSyntaxError('illegal_access', []); - } - var stack = %GetAndClearOverflowedStackTrace(holder); - // We may not have captured any stack trace. - if (IS_UNDEFINED(stack)) return stack; - - var result = FormatStackTrace(holder, error_string, GetStackFrames(stack)); - // Replace this accessor with a data property. - %DefineOrRedefineDataProperty(holder, 'stack', result, NONE); - return result; - }; - - // Set the 'stack' property on the receiver. If the receiver is the same as - // holder of this setter, the accessor pair is turned into a data property. - var setter = function(v) { - %DefineOrRedefineDataProperty(this, 'stack', v, NONE); - // Tentatively clear the hidden property. If the receiver is the same as - // holder, we release the raw stack trace this way. - %GetAndClearOverflowedStackTrace(this); - }; - - %DefineOrRedefineAccessorProperty( - boilerplate, 'stack', getter, setter, DONT_ENUM); + %DefineAccessorPropertyUnchecked( + boilerplate, 'stack', StackTraceGetter, StackTraceSetter, DONT_ENUM); return boilerplate; } diff -Nru nodejs-0.11.13/deps/v8/src/mips/assembler-mips.cc nodejs-0.11.15/deps/v8/src/mips/assembler-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/assembler-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/assembler-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -33,55 +33,40 @@ // Copyright 2012 the V8 project authors. All rights reserved. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "mips/assembler-mips-inl.h" -#include "serialize.h" +#include "src/base/cpu.h" +#include "src/mips/assembler-mips-inl.h" +#include "src/serialize.h" namespace v8 { namespace internal { -#ifdef DEBUG -bool CpuFeatures::initialized_ = false; -#endif -unsigned CpuFeatures::supported_ = 0; -unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; -unsigned CpuFeatures::cross_compile_ = 0; - - -ExternalReference ExternalReference::cpu_features() { - ASSERT(CpuFeatures::initialized_); - return ExternalReference(&CpuFeatures::supported_); -} - - // Get the CPU features enabled by the build. For cross compilation the // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS // can be defined to enable FPU instructions when building the // snapshot. -static uint64_t CpuFeaturesImpliedByCompiler() { - uint64_t answer = 0; +static unsigned CpuFeaturesImpliedByCompiler() { + unsigned answer = 0; #ifdef CAN_USE_FPU_INSTRUCTIONS - answer |= static_cast<uint64_t>(1) << FPU; + answer |= 1u << FPU; #endif // def CAN_USE_FPU_INSTRUCTIONS -#ifdef __mips__ // If the compiler is allowed to use FPU then we can use FPU too in our code // generation even when generating snapshots. This won't work for cross // compilation. -#if(defined(__mips_hard_float) && __mips_hard_float != 0) - answer |= static_cast<uint64_t>(1) << FPU; -#endif // defined(__mips_hard_float) && __mips_hard_float != 0 -#endif // def __mips__ +#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0 + answer |= 1u << FPU; +#endif return answer; } const char* DoubleRegister::AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "f0", "f2", @@ -102,44 +87,31 @@ } -void CpuFeatures::Probe() { - unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() | - CpuFeaturesImpliedByCompiler()); - ASSERT(supported_ == 0 || supported_ == standard_features); -#ifdef DEBUG - initialized_ = true; -#endif - - // Get the features implied by the OS and the compiler settings. This is the - // minimal set of features which is also allowed for generated code in the - // snapshot. - supported_ |= standard_features; +void CpuFeatures::ProbeImpl(bool cross_compile) { + supported_ |= CpuFeaturesImpliedByCompiler(); - if (Serializer::enabled()) { - // No probing for features if we might serialize (generate snapshot). - return; - } + // Only use statically determined features for cross compile (snapshot). + if (cross_compile) return; // If the compiler is allowed to use fpu then we can use fpu too in our // code generation. -#if !defined(__mips__) +#ifndef __mips__ // For the simulator build, use FPU. - supported_ |= static_cast<uint64_t>(1) << FPU; + supported_ |= 1u << FPU; #else - // Probe for additional features not already known to be available. - CPU cpu; - if (cpu.has_fpu()) { - // This implementation also sets the FPU flags if - // runtime detection of FPU returns true. - supported_ |= static_cast<uint64_t>(1) << FPU; - found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << FPU; - } + // Probe for additional features at runtime. + base::CPU cpu; + if (cpu.has_fpu()) supported_ |= 1u << FPU; #endif } +void CpuFeatures::PrintTarget() { } +void CpuFeatures::PrintFeatures() { } + + int ToNumber(Register reg) { - ASSERT(reg.is_valid()); + DCHECK(reg.is_valid()); const int kNumbers[] = { 0, // zero_reg 1, // at @@ -179,7 +151,7 @@ Register ToRegister(int num) { - ASSERT(num >= 0 && num < kNumRegisters); + DCHECK(num >= 0 && num < kNumRegisters); const Register kRegisters[] = { zero_reg, at, @@ -227,7 +199,7 @@ } // Indicate that code has changed. - CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); + CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize); } @@ -249,7 +221,7 @@ // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; if (obj->IsHeapObject()) { - ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); + DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); imm32_ = reinterpret_cast<intptr_t>(handle.location()); rmode_ = RelocInfo::EMBEDDED_OBJECT; } else { @@ -278,28 +250,30 @@ // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r) // operations as post-increment of sp. const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift) - | (kRegister_sp_Code << kRtShift) | (kPointerSize & kImm16Mask); + | (kRegister_sp_Code << kRtShift) + | (kPointerSize & kImm16Mask); // NOLINT // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp. const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift) - | (kRegister_sp_Code << kRtShift) | (-kPointerSize & kImm16Mask); + | (kRegister_sp_Code << kRtShift) + | (-kPointerSize & kImm16Mask); // NOLINT // sw(r, MemOperand(sp, 0)) const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift) - | (0 & kImm16Mask); + | (0 & kImm16Mask); // NOLINT // lw(r, MemOperand(sp, 0)) const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift) - | (0 & kImm16Mask); + | (0 & kImm16Mask); // NOLINT const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift) - | (0 & kImm16Mask); + | (0 & kImm16Mask); // NOLINT const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift) - | (0 & kImm16Mask); + | (0 & kImm16Mask); // NOLINT const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift) - | (kNegOffset & kImm16Mask); + | (kNegOffset & kImm16Mask); // NOLINT const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift) - | (kNegOffset & kImm16Mask); + | (kNegOffset & kImm16Mask); // NOLINT // A mask for the Rt register for push, pop, lw, sw instructions. const Instr kRtMask = kRtFieldMask; const Instr kLwSwInstrTypeMask = 0xffe00000; @@ -332,7 +306,7 @@ void Assembler::GetCode(CodeDesc* desc) { - ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. + DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; @@ -343,7 +317,7 @@ void Assembler::Align(int m) { - ASSERT(m >= 4 && IsPowerOf2(m)); + DCHECK(m >= 4 && IsPowerOf2(m)); while ((pc_offset() & (m - 1)) != 0) { nop(); } @@ -580,7 +554,7 @@ bool Assembler::IsNop(Instr instr, unsigned int type) { // See Assembler::nop(type). - ASSERT(type < 32); + DCHECK(type < 32); uint32_t opcode = GetOpcodeField(instr); uint32_t function = GetFunctionField(instr); uint32_t rt = GetRt(instr); @@ -603,7 +577,7 @@ int32_t Assembler::GetBranchOffset(Instr instr) { - ASSERT(IsBranch(instr)); + DCHECK(IsBranch(instr)); return (static_cast<int16_t>(instr & kImm16Mask)) << 2; } @@ -614,13 +588,13 @@ int16_t Assembler::GetLwOffset(Instr instr) { - ASSERT(IsLw(instr)); + DCHECK(IsLw(instr)); return ((instr & kImm16Mask)); } Instr Assembler::SetLwOffset(Instr instr, int16_t offset) { - ASSERT(IsLw(instr)); + DCHECK(IsLw(instr)); // We actually create a new lw instruction based on the original one. Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) @@ -636,7 +610,7 @@ Instr Assembler::SetSwOffset(Instr instr, int16_t offset) { - ASSERT(IsSw(instr)); + DCHECK(IsSw(instr)); return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); } @@ -647,7 +621,7 @@ Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) { - ASSERT(IsAddImmediate(instr)); + DCHECK(IsAddImmediate(instr)); return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); } @@ -669,7 +643,7 @@ } } // Check we have a branch or jump instruction. - ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr)); + DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr)); // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming // the compiler uses arithmectic shifts for signed integers. if (IsBranch(instr)) { @@ -684,7 +658,7 @@ } else if (IsLui(instr)) { Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); - ASSERT(IsOri(instr_ori)); + DCHECK(IsOri(instr_ori)); int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift; imm |= (instr_ori & static_cast<int32_t>(kImm16Mask)); @@ -694,7 +668,7 @@ } else { uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos); int32_t delta = instr_address - imm; - ASSERT(pos > delta); + DCHECK(pos > delta); return pos - delta; } } else { @@ -706,7 +680,7 @@ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos); instr_address &= kImm28Mask; int32_t delta = instr_address - imm28; - ASSERT(pos > delta); + DCHECK(pos > delta); return pos - delta; } } @@ -716,29 +690,29 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) { Instr instr = instr_at(pos); if ((instr & ~kImm16Mask) == 0) { - ASSERT(target_pos == kEndOfChain || target_pos >= 0); + DCHECK(target_pos == kEndOfChain || target_pos >= 0); // Emitted label constant, not part of a branch. // Make label relative to Code* of generated Code object. instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); return; } - ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr)); + DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr)); if (IsBranch(instr)) { int32_t imm18 = target_pos - (pos + kBranchPCOffset); - ASSERT((imm18 & 3) == 0); + DCHECK((imm18 & 3) == 0); instr &= ~kImm16Mask; int32_t imm16 = imm18 >> 2; - ASSERT(is_int16(imm16)); + DCHECK(is_int16(imm16)); instr_at_put(pos, instr | (imm16 & kImm16Mask)); } else if (IsLui(instr)) { Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); - ASSERT(IsOri(instr_ori)); + DCHECK(IsOri(instr_ori)); uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos; - ASSERT((imm & 3) == 0); + DCHECK((imm & 3) == 0); instr_lui &= ~kImm16Mask; instr_ori &= ~kImm16Mask; @@ -750,11 +724,11 @@ } else { uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos; imm28 &= kImm28Mask; - ASSERT((imm28 & 3) == 0); + DCHECK((imm28 & 3) == 0); instr &= ~kImm26Mask; uint32_t imm26 = imm28 >> 2; - ASSERT(is_uint26(imm26)); + DCHECK(is_uint26(imm26)); instr_at_put(pos, instr | (imm26 & kImm26Mask)); } @@ -786,7 +760,7 @@ void Assembler::bind_to(Label* L, int pos) { - ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position. + DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. int32_t trampoline_pos = kInvalidSlotPos; if (L->is_linked() && !trampoline_emitted_) { unbound_labels_count_--; @@ -804,14 +778,14 @@ trampoline_pos = get_trampoline_entry(fixup_pos); CHECK(trampoline_pos != kInvalidSlotPos); } - ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset); + DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); target_at_put(fixup_pos, trampoline_pos); fixup_pos = trampoline_pos; dist = pos - fixup_pos; } target_at_put(fixup_pos, pos); } else { - ASSERT(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr)); + DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr)); target_at_put(fixup_pos, pos); } } @@ -825,18 +799,18 @@ void Assembler::bind(Label* L) { - ASSERT(!L->is_bound()); // Label can only be bound once. + DCHECK(!L->is_bound()); // Label can only be bound once. bind_to(L, pc_offset()); } void Assembler::next(Label* L) { - ASSERT(L->is_linked()); + DCHECK(L->is_linked()); int link = target_at(L->pos()); if (link == kEndOfChain) { L->Unuse(); } else { - ASSERT(link >= 0); + DCHECK(link >= 0); L->link_to(link); } } @@ -864,7 +838,7 @@ Register rd, uint16_t sa, SecondaryField func) { - ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa)); + DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa)); Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | (rd.code() << kRdShift) | (sa << kSaShift) | func; emit(instr); @@ -877,7 +851,7 @@ uint16_t msb, uint16_t lsb, SecondaryField func) { - ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb)); + DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb)); Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | (msb << kRdShift) | (lsb << kSaShift) | func; emit(instr); @@ -890,7 +864,7 @@ FPURegister fs, FPURegister fd, SecondaryField func) { - ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid()); + DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid()); Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; emit(instr); @@ -903,7 +877,7 @@ FPURegister fs, FPURegister fd, SecondaryField func) { - ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid()); + DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid()); Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; emit(instr); @@ -916,7 +890,7 @@ FPURegister fs, FPURegister fd, SecondaryField func) { - ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid()); + DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid()); Instr instr = opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; emit(instr); @@ -928,7 +902,7 @@ Register rt, FPUControlRegister fs, SecondaryField func) { - ASSERT(fs.is_valid() && rt.is_valid()); + DCHECK(fs.is_valid() && rt.is_valid()); Instr instr = opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; emit(instr); @@ -941,7 +915,7 @@ Register rs, Register rt, int32_t j) { - ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); + DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | (j & kImm16Mask); emit(instr); @@ -952,7 +926,7 @@ Register rs, SecondaryField SF, int32_t j) { - ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j))); + DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j))); Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); emit(instr); } @@ -962,7 +936,7 @@ Register rs, FPURegister ft, int32_t j) { - ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); + DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) | (j & kImm16Mask); emit(instr); @@ -972,7 +946,7 @@ void Assembler::GenInstrJump(Opcode opcode, uint32_t address) { BlockTrampolinePoolScope block_trampoline_pool(this); - ASSERT(is_uint26(address)); + DCHECK(is_uint26(address)); Instr instr = opcode | address; emit(instr); BlockTrampolinePoolFor(1); // For associated delay slot. @@ -1012,7 +986,7 @@ } uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos; - ASSERT((imm & 3) == 0); + DCHECK((imm & 3) == 0); return imm; } @@ -1038,8 +1012,8 @@ } int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); - ASSERT((offset & 3) == 0); - ASSERT(is_int16(offset >> 2)); + DCHECK((offset & 3) == 0); + DCHECK(is_int16(offset >> 2)); return offset; } @@ -1054,9 +1028,9 @@ if (L->is_linked()) { target_pos = L->pos(); // L's link. int32_t imm18 = target_pos - at_offset; - ASSERT((imm18 & 3) == 0); + DCHECK((imm18 & 3) == 0); int32_t imm16 = imm18 >> 2; - ASSERT(is_int16(imm16)); + DCHECK(is_int16(imm16)); instr_at_put(at_offset, (imm16 & kImm16Mask)); } else { target_pos = kEndOfChain; @@ -1148,7 +1122,7 @@ uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); bool in_range = (ipc ^ static_cast<uint32_t>(target) >> (kImm26Bits + kImmFieldShift)) == 0; - ASSERT(in_range && ((target & 3) == 0)); + DCHECK(in_range && ((target & 3) == 0)); #endif GenInstrJump(J, target >> 2); } @@ -1170,7 +1144,7 @@ uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); bool in_range = (ipc ^ static_cast<uint32_t>(target) >> (kImm26Bits + kImmFieldShift)) == 0; - ASSERT(in_range && ((target & 3) == 0)); + DCHECK(in_range && ((target & 3) == 0)); #endif positions_recorder()->WriteRecordedPositions(); GenInstrJump(JAL, target >> 2); @@ -1211,7 +1185,7 @@ } -//-------Data-processing-instructions--------- +// -------Data-processing-instructions--------- // Arithmetic. @@ -1263,7 +1237,7 @@ void Assembler::andi(Register rt, Register rs, int32_t j) { - ASSERT(is_uint16(j)); + DCHECK(is_uint16(j)); GenInstrImmediate(ANDI, rs, rt, j); } @@ -1274,7 +1248,7 @@ void Assembler::ori(Register rt, Register rs, int32_t j) { - ASSERT(is_uint16(j)); + DCHECK(is_uint16(j)); GenInstrImmediate(ORI, rs, rt, j); } @@ -1285,7 +1259,7 @@ void Assembler::xori(Register rt, Register rs, int32_t j) { - ASSERT(is_uint16(j)); + DCHECK(is_uint16(j)); GenInstrImmediate(XORI, rs, rt, j); } @@ -1304,7 +1278,7 @@ // generated using the sll instruction. They must be generated using // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo // instructions. - ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg))); + DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg))); GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL); } @@ -1336,8 +1310,8 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) { // Should be called via MacroAssembler::Ror. - ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa)); - ASSERT(kArchVariant == kMips32r2); + DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa)); + DCHECK(kArchVariant == kMips32r2); Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) | (rd.code() << kRdShift) | (sa << kSaShift) | SRL; emit(instr); @@ -1346,19 +1320,19 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) { // Should be called via MacroAssembler::Ror. - ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() ); - ASSERT(kArchVariant == kMips32r2); + DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() ); + DCHECK(kArchVariant == kMips32r2); Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; emit(instr); } -//------------Memory-instructions------------- +// ------------Memory-instructions------------- // Helper for base-reg + offset, when offset is larger than int16. void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { - ASSERT(!src.rm().is(at)); + DCHECK(!src.rm().is(at)); lui(at, (src.offset_ >> kLuiShift) & kImm16Mask); ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. addu(at, at, src.rm()); // Add base register. @@ -1466,20 +1440,20 @@ void Assembler::lui(Register rd, int32_t j) { - ASSERT(is_uint16(j)); + DCHECK(is_uint16(j)); GenInstrImmediate(LUI, zero_reg, rd, j); } -//-------------Misc-instructions-------------- +// -------------Misc-instructions-------------- // Break / Trap instructions. void Assembler::break_(uint32_t code, bool break_as_stop) { - ASSERT((code & ~0xfffff) == 0); + DCHECK((code & ~0xfffff) == 0); // We need to invalidate breaks that could be stops as well because the // simulator expects a char pointer after the stop instruction. // See constants-mips.h for explanation. - ASSERT((break_as_stop && + DCHECK((break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) || (!break_as_stop && @@ -1491,8 +1465,8 @@ void Assembler::stop(const char* msg, uint32_t code) { - ASSERT(code > kMaxWatchpointCode); - ASSERT(code <= kMaxStopCode); + DCHECK(code > kMaxWatchpointCode); + DCHECK(code <= kMaxStopCode); #if V8_HOST_ARCH_MIPS break_(0x54321); #else // V8_HOST_ARCH_MIPS @@ -1506,7 +1480,7 @@ void Assembler::tge(Register rs, Register rt, uint16_t code) { - ASSERT(is_uint10(code)); + DCHECK(is_uint10(code)); Instr instr = SPECIAL | TGE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; emit(instr); @@ -1514,7 +1488,7 @@ void Assembler::tgeu(Register rs, Register rt, uint16_t code) { - ASSERT(is_uint10(code)); + DCHECK(is_uint10(code)); Instr instr = SPECIAL | TGEU | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; emit(instr); @@ -1522,7 +1496,7 @@ void Assembler::tlt(Register rs, Register rt, uint16_t code) { - ASSERT(is_uint10(code)); + DCHECK(is_uint10(code)); Instr instr = SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; emit(instr); @@ -1530,7 +1504,7 @@ void Assembler::tltu(Register rs, Register rt, uint16_t code) { - ASSERT(is_uint10(code)); + DCHECK(is_uint10(code)); Instr instr = SPECIAL | TLTU | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; @@ -1539,7 +1513,7 @@ void Assembler::teq(Register rs, Register rt, uint16_t code) { - ASSERT(is_uint10(code)); + DCHECK(is_uint10(code)); Instr instr = SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; emit(instr); @@ -1547,7 +1521,7 @@ void Assembler::tne(Register rs, Register rt, uint16_t code) { - ASSERT(is_uint10(code)); + DCHECK(is_uint10(code)); Instr instr = SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; emit(instr); @@ -1622,7 +1596,7 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { // Should be called via MacroAssembler::Ins. // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. - ASSERT(kArchVariant == kMips32r2); + DCHECK(kArchVariant == kMips32r2); GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); } @@ -1630,21 +1604,21 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { // Should be called via MacroAssembler::Ext. // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. - ASSERT(kArchVariant == kMips32r2); + DCHECK(kArchVariant == kMips32r2); GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); } void Assembler::pref(int32_t hint, const MemOperand& rs) { - ASSERT(kArchVariant != kLoongson); - ASSERT(is_uint5(hint) && is_uint16(rs.offset_)); + DCHECK(kArchVariant != kLoongson); + DCHECK(is_uint5(hint) && is_uint16(rs.offset_)); Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | (rs.offset_); emit(instr); } -//--------Coprocessor-instructions---------------- +// --------Coprocessor-instructions---------------- // Load, store, move. void Assembler::lwc1(FPURegister fd, const MemOperand& src) { @@ -1655,10 +1629,12 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) { // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit // load to two 32-bit loads. - GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); + GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ + + Register::kMantissaOffset); FPURegister nextfpreg; nextfpreg.setcode(fd.code() + 1); - GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4); + GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + + Register::kExponentOffset); } @@ -1670,10 +1646,12 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) { // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit // store to two 32-bit stores. - GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); + GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ + + Register::kMantissaOffset); FPURegister nextfpreg; nextfpreg.setcode(fd.code() + 1); - GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4); + GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + + Register::kExponentOffset); } @@ -1699,7 +1677,7 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { uint64_t i; - OS::MemCopy(&i, &d, 8); + memcpy(&i, &d, 8); *lo = i & 0xffffffff; *hi = i >> 32; @@ -1807,25 +1785,25 @@ void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { - ASSERT(kArchVariant == kMips32r2); + DCHECK(kArchVariant == kMips32r2); GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); } void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { - ASSERT(kArchVariant == kMips32r2); + DCHECK(kArchVariant == kMips32r2); GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); } void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { - ASSERT(kArchVariant == kMips32r2); + DCHECK(kArchVariant == kMips32r2); GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); } void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { - ASSERT(kArchVariant == kMips32r2); + DCHECK(kArchVariant == kMips32r2); GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); } @@ -1866,7 +1844,7 @@ void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { - ASSERT(kArchVariant == kMips32r2); + DCHECK(kArchVariant == kMips32r2); GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); } @@ -1882,7 +1860,7 @@ void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { - ASSERT(kArchVariant == kMips32r2); + DCHECK(kArchVariant == kMips32r2); GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); } @@ -1895,8 +1873,8 @@ // Conditions. void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs, FPURegister ft, uint16_t cc) { - ASSERT(is_uint3(cc)); - ASSERT((fmt & ~(31 << kRsShift)) == 0); + DCHECK(is_uint3(cc)); + DCHECK((fmt & ~(31 << kRsShift)) == 0); Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift | cc << 8 | 3 << 4 | cond; emit(instr); @@ -1905,7 +1883,7 @@ void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) { - ASSERT(src2 == 0.0); + DCHECK(src2 == 0.0); mtc1(zero_reg, f14); cvt_d_w(f14, f14); c(cond, D, src1, f14, 0); @@ -1913,14 +1891,14 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) { - ASSERT(is_uint3(cc)); + DCHECK(is_uint3(cc)); Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); emit(instr); } void Assembler::bc1t(int16_t offset, uint16_t cc) { - ASSERT(is_uint3(cc)); + DCHECK(is_uint3(cc)); Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); emit(instr); } @@ -1951,18 +1929,18 @@ int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) { Instr instr = instr_at(pc); - ASSERT(IsJ(instr) || IsLui(instr)); + DCHECK(IsJ(instr) || IsLui(instr)); if (IsLui(instr)) { Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize); Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize); - ASSERT(IsOri(instr_ori)); + DCHECK(IsOri(instr_ori)); int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift; imm |= (instr_ori & static_cast<int32_t>(kImm16Mask)); if (imm == kEndOfJumpChain) { return 0; // Number of instructions patched. } imm += pc_delta; - ASSERT((imm & 3) == 0); + DCHECK((imm & 3) == 0); instr_lui &= ~kImm16Mask; instr_ori &= ~kImm16Mask; @@ -1979,11 +1957,11 @@ } imm28 += pc_delta; imm28 &= kImm28Mask; - ASSERT((imm28 & 3) == 0); + DCHECK((imm28 & 3) == 0); instr &= ~kImm26Mask; uint32_t imm26 = imm28 >> 2; - ASSERT(is_uint26(imm26)); + DCHECK(is_uint26(imm26)); instr_at_put(pc, instr | (imm26 & kImm26Mask)); return 1; // Number of instructions patched. @@ -1996,9 +1974,7 @@ // Compute new buffer size. CodeDesc desc; // The new buffer. - if (buffer_size_ < 4*KB) { - desc.buffer_size = 4*KB; - } else if (buffer_size_ < 1*MB) { + if (buffer_size_ < 1 * MB) { desc.buffer_size = 2*buffer_size_; } else { desc.buffer_size = buffer_size_ + 1*MB; @@ -2014,9 +1990,9 @@ // Copy the data. int pc_delta = desc.buffer - buffer_; int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); - OS::MemMove(desc.buffer, buffer_, desc.instr_size); - OS::MemMove(reloc_info_writer.pos() + rc_delta, - reloc_info_writer.pos(), desc.reloc_size); + MemMove(desc.buffer, buffer_, desc.instr_size); + MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), + desc.reloc_size); // Switch buffers. DeleteArray(buffer_); @@ -2035,7 +2011,7 @@ } } - ASSERT(!overflow()); + DCHECK(!overflow()); } @@ -2066,7 +2042,7 @@ RelocInfo rinfo(pc_, rmode, data, NULL); if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { // Adjust code for new modes. - ASSERT(RelocInfo::IsDebugBreakSlot(rmode) + DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsJSReturn(rmode) || RelocInfo::IsComment(rmode) || RelocInfo::IsPosition(rmode)); @@ -2074,17 +2050,11 @@ } if (!RelocInfo::IsNone(rinfo.rmode())) { // Don't record external references unless the heap will be serialized. - if (rmode == RelocInfo::EXTERNAL_REFERENCE) { -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); - } -#endif - if (!Serializer::enabled() && !emit_debug_code()) { - return; - } + if (rmode == RelocInfo::EXTERNAL_REFERENCE && + !serializer_enabled() && !emit_debug_code()) { + return; } - ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. + DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { RelocInfo reloc_info_with_ast_id(pc_, rmode, @@ -2122,8 +2092,8 @@ return; } - ASSERT(!trampoline_emitted_); - ASSERT(unbound_labels_count_ >= 0); + DCHECK(!trampoline_emitted_); + DCHECK(unbound_labels_count_ >= 0); if (unbound_labels_count_ > 0) { // First we emit jump (2 instructions), then we emit trampoline pool. { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -2185,7 +2155,7 @@ // snapshot generated on ia32, the resulting MIPS sNaN must be quieted. // OS::nan_value() returns a qNaN. void Assembler::QuietNaN(HeapObject* object) { - HeapNumber::cast(object)->set_value(OS::nan_value()); + HeapNumber::cast(object)->set_value(base::OS::nan_value()); } @@ -2196,7 +2166,9 @@ // There is an optimization below, which emits a nop when the address // fits in just 16 bits. This is unlikely to help, and should be benchmarked, // and possibly removed. -void Assembler::set_target_address_at(Address pc, Address target) { +void Assembler::set_target_address_at(Address pc, + Address target, + ICacheFlushMode icache_flush_mode) { Instr instr2 = instr_at(pc + kInstrSize); uint32_t rt_code = GetRtField(instr2); uint32_t* p = reinterpret_cast<uint32_t*>(pc); @@ -2290,7 +2262,9 @@ patched_jump = true; } - CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t)); + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t)); + } } @@ -2306,16 +2280,16 @@ bool patched = false; if (IsJal(instr3)) { - ASSERT(GetOpcodeField(instr1) == LUI); - ASSERT(GetOpcodeField(instr2) == ORI); + DCHECK(GetOpcodeField(instr1) == LUI); + DCHECK(GetOpcodeField(instr2) == ORI); uint32_t rs_field = GetRt(instr2) << kRsShift; uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. *(p+2) = SPECIAL | rs_field | rd_field | JALR; patched = true; } else if (IsJ(instr3)) { - ASSERT(GetOpcodeField(instr1) == LUI); - ASSERT(GetOpcodeField(instr2) == ORI); + DCHECK(GetOpcodeField(instr1) == LUI); + DCHECK(GetOpcodeField(instr2) == ORI); uint32_t rs_field = GetRt(instr2) << kRsShift; *(p+2) = SPECIAL | rs_field | JR; @@ -2323,21 +2297,22 @@ } if (patched) { - CPU::FlushICache(pc+2, sizeof(Address)); + CpuFeatures::FlushICache(pc+2, sizeof(Address)); } } -MaybeObject* Assembler::AllocateConstantPool(Heap* heap) { +Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { // No out-of-line constant pool support. - UNREACHABLE(); - return NULL; + DCHECK(!FLAG_enable_ool_constant_pool); + return isolate->factory()->empty_constant_pool_array(); } void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { // No out-of-line constant pool support. - UNREACHABLE(); + DCHECK(!FLAG_enable_ool_constant_pool); + return; } diff -Nru nodejs-0.11.13/deps/v8/src/mips/assembler-mips.h nodejs-0.11.15/deps/v8/src/mips/assembler-mips.h --- nodejs-0.11.13/deps/v8/src/mips/assembler-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/assembler-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -38,9 +38,9 @@ #include <stdio.h> -#include "assembler.h" -#include "constants-mips.h" -#include "serialize.h" +#include "src/assembler.h" +#include "src/mips/constants-mips.h" +#include "src/serialize.h" namespace v8 { namespace internal { @@ -77,10 +77,20 @@ static const int kSizeInBytes = 4; static const int kCpRegister = 23; // cp (s7) is the 23rd register. +#if defined(V8_TARGET_LITTLE_ENDIAN) + static const int kMantissaOffset = 0; + static const int kExponentOffset = 4; +#elif defined(V8_TARGET_BIG_ENDIAN) + static const int kMantissaOffset = 4; + static const int kExponentOffset = 0; +#else +#error Unknown endianness +#endif + inline static int NumAllocatableRegisters(); static int ToAllocationIndex(Register reg) { - ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) || + DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) || reg.is(from_code(kCpRegister))); return reg.is(from_code(kCpRegister)) ? kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'. @@ -88,14 +98,14 @@ } static Register FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); return index == kMaxNumAllocatableRegisters - 1 ? from_code(kCpRegister) : // Last index is always the 'cp' register. from_code(index + 2); // zero_reg and 'at' are skipped. } static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "v0", "v1", @@ -123,11 +133,11 @@ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } bool is(Register reg) const { return code_ == reg.code_; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } int bit() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return 1 << code_; } @@ -216,7 +226,7 @@ static const char* AllocationIndexToString(int index); static FPURegister FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); return from_code(index * 2); } @@ -229,32 +239,32 @@ bool is(FPURegister creg) const { return code_ == creg.code_; } FPURegister low() const { // Find low reg of a Double-reg pair, which is the reg itself. - ASSERT(code_ % 2 == 0); // Specified Double reg must be even. + DCHECK(code_ % 2 == 0); // Specified Double reg must be even. FPURegister reg; reg.code_ = code_; - ASSERT(reg.is_valid()); + DCHECK(reg.is_valid()); return reg; } FPURegister high() const { // Find high reg of a Doubel-reg pair, which is reg + 1. - ASSERT(code_ % 2 == 0); // Specified Double reg must be even. + DCHECK(code_ % 2 == 0); // Specified Double reg must be even. FPURegister reg; reg.code_ = code_ + 1; - ASSERT(reg.is_valid()); + DCHECK(reg.is_valid()); return reg; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } int bit() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return 1 << code_; } void setcode(int f) { code_ = f; - ASSERT(is_valid()); + DCHECK(is_valid()); } // Unfortunately we can't make this private in a struct. int code_; @@ -325,16 +335,16 @@ bool is_valid() const { return code_ == kFCSRRegister; } bool is(FPUControlRegister creg) const { return code_ == creg.code_; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } int bit() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return 1 << code_; } void setcode(int f) { code_ = f; - ASSERT(is_valid()); + DCHECK(is_valid()); } // Unfortunately we can't make this private in a struct. int code_; @@ -367,7 +377,7 @@ INLINE(bool is_reg() const); inline int32_t immediate() const { - ASSERT(!is_reg()); + DCHECK(!is_reg()); return imm32_; } @@ -409,64 +419,6 @@ }; -// CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a CpuFeatureScope before use. -class CpuFeatures : public AllStatic { - public: - // Detect features of the target CPU. Set safe defaults if the serializer - // is enabled (snapshots must be portable). - static void Probe(); - - // Check whether a feature is supported by the target CPU. - static bool IsSupported(CpuFeature f) { - ASSERT(initialized_); - return Check(f, supported_); - } - - static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { - ASSERT(initialized_); - return Check(f, found_by_runtime_probing_only_); - } - - static bool IsSafeForSnapshot(CpuFeature f) { - return Check(f, cross_compile_) || - (IsSupported(f) && - (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); - } - - static bool VerifyCrossCompiling() { - return cross_compile_ == 0; - } - - static bool VerifyCrossCompiling(CpuFeature f) { - unsigned mask = flag2set(f); - return cross_compile_ == 0 || - (cross_compile_ & mask) == mask; - } - - private: - static bool Check(CpuFeature f, unsigned set) { - return (set & flag2set(f)) != 0; - } - - static unsigned flag2set(CpuFeature f) { - return 1u << f; - } - -#ifdef DEBUG - static bool initialized_; -#endif - static unsigned supported_; - static unsigned found_by_runtime_probing_only_; - - static unsigned cross_compile_; - - friend class ExternalReference; - friend class PlatformFeatureScope; - DISALLOW_COPY_AND_ASSIGN(CpuFeatures); -}; - - class Assembler : public AssemblerBase { public: // Create an assembler. Instructions and relocation information are emitted @@ -515,7 +467,7 @@ int32_t branch_offset(Label* L, bool jump_elimination_allowed); int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { int32_t o = branch_offset(L, jump_elimination_allowed); - ASSERT((o & 3) == 0); // Assert the offset is aligned. + DCHECK((o & 3) == 0); // Assert the offset is aligned. return o >> 2; } uint32_t jump_address(Label* L); @@ -526,7 +478,10 @@ // Read/Modify the code target address in the branch/call instruction at pc. static Address target_address_at(Address pc); - static void set_target_address_at(Address pc, Address target); + static void set_target_address_at(Address pc, + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED); // On MIPS there is no Constant Pool so we skip that parameter. INLINE(static Address target_address_at(Address pc, ConstantPoolArray* constant_pool)) { @@ -534,8 +489,10 @@ } INLINE(static void set_target_address_at(Address pc, ConstantPoolArray* constant_pool, - Address target)) { - set_target_address_at(pc, target); + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)) { + set_target_address_at(pc, target, icache_flush_mode); } INLINE(static Address target_address_at(Address pc, Code* code)) { ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; @@ -543,15 +500,20 @@ } INLINE(static void set_target_address_at(Address pc, Code* code, - Address target)) { + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)) { ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; - set_target_address_at(pc, constant_pool, target); + set_target_address_at(pc, constant_pool, target, icache_flush_mode); } // Return the code target address at a call site from the return address // of that call in the instruction stream. inline static Address target_address_from_return_address(Address pc); + // Return the code target address of the patch debug break slot + inline static Address break_address_from_return_address(Address pc); + static void JumpLabelToJumpRegister(Address pc); static void QuietNaN(HeapObject* nan); @@ -647,7 +609,7 @@ // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero // marking, to avoid conflict with ssnop and ehb instructions. void nop(unsigned int type = 0) { - ASSERT(type < 32); + DCHECK(type < 32); Register nop_rt_reg = (type == 0) ? zero_reg : at; sll(zero_reg, nop_rt_reg, type, true); } @@ -687,7 +649,7 @@ void jal_or_jalr(int32_t target, Register rs); - //-------Data-processing-instructions--------- + // -------Data-processing-instructions--------- // Arithmetic. void addu(Register rd, Register rs, Register rt); @@ -725,7 +687,7 @@ void rotrv(Register rd, Register rt, Register rs); - //------------Memory-instructions------------- + // ------------Memory-instructions------------- void lb(Register rd, const MemOperand& rs); void lbu(Register rd, const MemOperand& rs); @@ -741,12 +703,12 @@ void swr(Register rd, const MemOperand& rs); - //----------------Prefetch-------------------- + // ----------------Prefetch-------------------- void pref(int32_t hint, const MemOperand& rs); - //-------------Misc-instructions-------------- + // -------------Misc-instructions-------------- // Break / Trap instructions. void break_(uint32_t code, bool break_as_stop = false); @@ -779,7 +741,7 @@ void ins_(Register rt, Register rs, uint16_t pos, uint16_t size); void ext_(Register rt, Register rs, uint16_t pos, uint16_t size); - //--------Coprocessor-instructions---------------- + // --------Coprocessor-instructions---------------- // Load, store, and move. void lwc1(FPURegister fd, const MemOperand& src); @@ -885,10 +847,10 @@ assem_->EndBlockGrowBuffer(); } - private: - Assembler* assem_; + private: + Assembler* assem_; - DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); }; // Debugging. @@ -902,12 +864,12 @@ // Record the AST id of the CallIC being compiled, so that it can be placed // in the relocation information. void SetRecordedAstId(TypeFeedbackId ast_id) { - ASSERT(recorded_ast_id_.IsNone()); + DCHECK(recorded_ast_id_.IsNone()); recorded_ast_id_ = ast_id; } TypeFeedbackId RecordedAstId() { - ASSERT(!recorded_ast_id_.IsNone()); + DCHECK(!recorded_ast_id_.IsNone()); return recorded_ast_id_; } @@ -1007,7 +969,7 @@ void CheckTrampolinePool(); // Allocate a constant pool of the correct size for the generated code. - MaybeObject* AllocateConstantPool(Heap* heap); + Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); // Generate the constant pool for the generated code. void PopulateConstantPool(ConstantPoolArray* constant_pool); @@ -1062,12 +1024,12 @@ // Temporarily block automatic assembly buffer growth. void StartBlockGrowBuffer() { - ASSERT(!block_buffer_growth_); + DCHECK(!block_buffer_growth_); block_buffer_growth_ = true; } void EndBlockGrowBuffer() { - ASSERT(block_buffer_growth_); + DCHECK(block_buffer_growth_); block_buffer_growth_ = false; } @@ -1229,7 +1191,7 @@ // We have run out of space on trampolines. // Make sure we fail in debug mode, so we become aware of each case // when this happens. - ASSERT(0); + DCHECK(0); // Internal exception will be caught. } else { trampoline_slot = next_slot_; diff -Nru nodejs-0.11.13/deps/v8/src/mips/assembler-mips-inl.h nodejs-0.11.15/deps/v8/src/mips/assembler-mips-inl.h --- nodejs-0.11.13/deps/v8/src/mips/assembler-mips-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/assembler-mips-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -37,15 +37,19 @@ #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_ #define V8_MIPS_ASSEMBLER_MIPS_INL_H_ -#include "mips/assembler-mips.h" +#include "src/mips/assembler-mips.h" -#include "cpu.h" -#include "debug.h" +#include "src/assembler.h" +#include "src/debug.h" namespace v8 { namespace internal { + +bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); } + + // ----------------------------------------------------------------------------- // Operand and MemOperand. @@ -96,11 +100,11 @@ int FPURegister::ToAllocationIndex(FPURegister reg) { - ASSERT(reg.code() % 2 == 0); - ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters); - ASSERT(reg.is_valid()); - ASSERT(!reg.is(kDoubleRegZero)); - ASSERT(!reg.is(kLithiumScratchDouble)); + DCHECK(reg.code() % 2 == 0); + DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters); + DCHECK(reg.is_valid()); + DCHECK(!reg.is(kDoubleRegZero)); + DCHECK(!reg.is(kLithiumScratchDouble)); return (reg.code() / 2); } @@ -108,7 +112,7 @@ // ----------------------------------------------------------------------------- // RelocInfo. -void RelocInfo::apply(intptr_t delta) { +void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) { if (IsCodeTarget(rmode_)) { uint32_t scope1 = (uint32_t) target_address() & ~kImm28Mask; uint32_t scope2 = reinterpret_cast<uint32_t>(pc_) & ~kImm28Mask; @@ -121,19 +125,19 @@ // Absolute code pointer inside code object moves with the code object. byte* p = reinterpret_cast<byte*>(pc_); int count = Assembler::RelocateInternalReference(p, delta); - CPU::FlushICache(p, count * sizeof(uint32_t)); + CpuFeatures::FlushICache(p, count * sizeof(uint32_t)); } } Address RelocInfo::target_address() { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); return Assembler::target_address_at(pc_, host_); } Address RelocInfo::target_address_address() { - ASSERT(IsCodeTarget(rmode_) || + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); @@ -167,10 +171,13 @@ } -void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); - Assembler::set_target_address_at(pc_, host_, target); - if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { +void RelocInfo::set_target_address(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && + host() != NULL && IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( host(), this, HeapObject::cast(target_code)); @@ -183,25 +190,32 @@ } +Address Assembler::break_address_from_return_address(Address pc) { + return pc - Assembler::kPatchDebugBreakSlotReturnOffset; +} + + Object* RelocInfo::target_object() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)); } Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return Handle<Object>(reinterpret_cast<Object**>( Assembler::target_address_at(pc_, host_))); } -void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - ASSERT(!target->IsConsString()); +void RelocInfo::set_target_object(Object* target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, host_, - reinterpret_cast<Address>(target)); - if (mode == UPDATE_WRITE_BARRIER && + reinterpret_cast<Address>(target), + icache_flush_mode); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && target->IsHeapObject()) { host()->GetHeap()->incremental_marking()->RecordWrite( @@ -211,42 +225,46 @@ Address RelocInfo::target_reference() { - ASSERT(rmode_ == EXTERNAL_REFERENCE); + DCHECK(rmode_ == EXTERNAL_REFERENCE); return Assembler::target_address_at(pc_, host_); } Address RelocInfo::target_runtime_entry(Assembler* origin) { - ASSERT(IsRuntimeEntry(rmode_)); + DCHECK(IsRuntimeEntry(rmode_)); return target_address(); } void RelocInfo::set_target_runtime_entry(Address target, - WriteBarrierMode mode) { - ASSERT(IsRuntimeEntry(rmode_)); - if (target_address() != target) set_target_address(target, mode); + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsRuntimeEntry(rmode_)); + if (target_address() != target) + set_target_address(target, write_barrier_mode, icache_flush_mode); } Handle<Cell> RelocInfo::target_cell_handle() { - ASSERT(rmode_ == RelocInfo::CELL); + DCHECK(rmode_ == RelocInfo::CELL); Address address = Memory::Address_at(pc_); return Handle<Cell>(reinterpret_cast<Cell**>(address)); } Cell* RelocInfo::target_cell() { - ASSERT(rmode_ == RelocInfo::CELL); + DCHECK(rmode_ == RelocInfo::CELL); return Cell::FromValueAddress(Memory::Address_at(pc_)); } -void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) { - ASSERT(rmode_ == RelocInfo::CELL); +void RelocInfo::set_target_cell(Cell* cell, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::CELL); Address address = cell->address() + Cell::kValueOffset; Memory::Address_at(pc_) = address; - if (mode == UPDATE_WRITE_BARRIER && host() != NULL) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) { // TODO(1550) We are passing NULL as a slot because cell can never be on // evacuation candidate. host()->GetHeap()->incremental_marking()->RecordWrite( @@ -255,7 +273,7 @@ } -static const int kNoCodeAgeSequenceLength = 7; +static const int kNoCodeAgeSequenceLength = 7 * Assembler::kInstrSize; Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { @@ -265,14 +283,15 @@ Code* RelocInfo::code_age_stub() { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); return Code::GetCodeFromTargetAddress( Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_)); } -void RelocInfo::set_code_age_stub(Code* stub) { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); +void RelocInfo::set_code_age_stub(Code* stub, + ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); Assembler::set_target_address_at(pc_ + Assembler::kInstrSize, host_, stub->instruction_start()); @@ -280,7 +299,7 @@ Address RelocInfo::call_address() { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); // The pc_ offset of 0 assumes mips patched return sequence per // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or @@ -290,7 +309,7 @@ void RelocInfo::set_call_address(Address target) { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); // The pc_ offset of 0 assumes mips patched return sequence per // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or @@ -310,7 +329,7 @@ Object** RelocInfo::call_object_address() { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize); } @@ -322,7 +341,7 @@ void RelocInfo::WipeOut() { - ASSERT(IsEmbeddedObject(rmode_) || + DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsExternalReference(rmode_)); @@ -361,14 +380,12 @@ visitor->VisitExternalReference(this); } else if (RelocInfo::IsCodeAgeSequence(mode)) { visitor->VisitCodeAgeSequence(this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence())) && isolate->debug()->has_break_points()) { visitor->VisitDebugTarget(this); -#endif } else if (RelocInfo::IsRuntimeEntry(mode)) { visitor->VisitRuntimeEntry(this); } @@ -388,14 +405,12 @@ StaticVisitor::VisitExternalReference(this); } else if (RelocInfo::IsCodeAgeSequence(mode)) { StaticVisitor::VisitCodeAgeSequence(heap, this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence()))) { StaticVisitor::VisitDebugTarget(heap, this); -#endif } else if (RelocInfo::IsRuntimeEntry(mode)) { StaticVisitor::VisitRuntimeEntry(this); } diff -Nru nodejs-0.11.13/deps/v8/src/mips/builtins-mips.cc nodejs-0.11.15/deps/v8/src/mips/builtins-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/builtins-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/builtins-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "codegen.h" -#include "debug.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "runtime.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -65,7 +42,7 @@ num_extra_args = 1; __ push(a1); } else { - ASSERT(extra_args == NO_EXTRA_ARGUMENTS); + DCHECK(extra_args == NO_EXTRA_ARGUMENTS); } // JumpToExternalReference expects s0 to contain the number of arguments @@ -332,7 +309,7 @@ __ LoadRoot(t0, Heap::kStackLimitRootIndex); __ Branch(&ok, hs, sp, Operand(t0)); - CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode); + CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); GenerateTailCallToReturnedCode(masm); __ bind(&ok); @@ -342,7 +319,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function, - bool count_constructions, bool create_memento) { // ----------- S t a t e ------------- // -- a0 : number of arguments @@ -352,14 +328,8 @@ // -- sp[...]: constructor arguments // ----------------------------------- - // Should never count constructions for api objects. - ASSERT(!is_api_function || !count_constructions); - // Should never create mementos for api functions. - ASSERT(!is_api_function || !create_memento); - - // Should never create mementos before slack tracking is finished. - ASSERT(!count_constructions || !create_memento); + DCHECK(!is_api_function || !create_memento); Isolate* isolate = masm->isolate(); @@ -383,21 +353,16 @@ __ sll(a0, a0, kSmiTagSize); // Tag arguments count. __ MultiPushReversed(a0.bit() | a1.bit()); - // Use t7 to hold undefined, which is used in several places below. - __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); - Label rt_call, allocated; // Try to allocate the object without transitioning into C code. If any of // the preconditions is not met, the code bails out to the runtime call. if (FLAG_inline_new) { Label undo_allocation; -#ifdef ENABLE_DEBUGGER_SUPPORT ExternalReference debug_step_in_fp = ExternalReference::debug_step_in_fp_address(isolate); __ li(a2, Operand(debug_step_in_fp)); __ lw(a2, MemOperand(a2)); __ Branch(&rt_call, ne, a2, Operand(zero_reg)); -#endif // Load the initial map and verify that it is in fact a map. // a1: constructor function @@ -414,22 +379,26 @@ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset)); __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE)); - if (count_constructions) { + if (!is_api_function) { Label allocate; + MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset); + // Check if slack tracking is enabled. + __ lw(t0, bit_field3); + __ DecodeField<Map::ConstructionCount>(t2, t0); + __ Branch(&allocate, eq, t2, Operand(JSFunction::kNoSlackTracking)); // Decrease generous allocation count. - __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - MemOperand constructor_count = - FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset); - __ lbu(t0, constructor_count); - __ Subu(t0, t0, Operand(1)); - __ sb(t0, constructor_count); - __ Branch(&allocate, ne, t0, Operand(zero_reg)); + __ Subu(t0, t0, Operand(1 << Map::ConstructionCount::kShift)); + __ Branch(USE_DELAY_SLOT, + &allocate, ne, t2, Operand(JSFunction::kFinishSlackTracking)); + __ sw(t0, bit_field3); // In delay slot. __ Push(a1, a2, a1); // a1 = Constructor. - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1); + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); __ Pop(a1, a2); + // Slack tracking counter is kNoSlackTracking after runtime call. + DCHECK(JSFunction::kNoSlackTracking == 0); + __ mov(t2, zero_reg); __ bind(&allocate); } @@ -456,9 +425,9 @@ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset)); __ sw(t6, MemOperand(t5, JSObject::kElementsOffset)); __ Addu(t5, t5, Operand(3*kPointerSize)); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); - ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset); + DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset); // Fill all the in-object properties with appropriate filler. // a1: constructor function @@ -466,46 +435,56 @@ // a3: object size (in words, including memento if create_memento) // t4: JSObject (not tagged) // t5: First in-object property of JSObject (not tagged) - ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); + // t2: slack tracking counter (non-API function case) + DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize); + + // Use t7 to hold undefined, which is used in several places below. + __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); - if (count_constructions) { - __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); - __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); - __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, - kBitsPerByte); + if (!is_api_function) { + Label no_inobject_slack_tracking; + + // Check if slack tracking is enabled. + __ Branch(&no_inobject_slack_tracking, + eq, t2, Operand(JSFunction::kNoSlackTracking)); + + // Allocate object with a slack. + __ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset)); __ sll(at, a0, kPointerSizeLog2); __ addu(a0, t5, at); - __ sll(at, a3, kPointerSizeLog2); - __ Addu(t6, t4, Operand(at)); // End of object. // a0: offset of first field after pre-allocated fields if (FLAG_debug_code) { + __ sll(at, a3, kPointerSizeLog2); + __ Addu(t6, t4, Operand(at)); // End of object. __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, a0, Operand(t6)); } __ InitializeFieldsWithFiller(t5, a0, t7); // To allow for truncation. __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex); - __ InitializeFieldsWithFiller(t5, t6, t7); - } else if (create_memento) { - __ Subu(t7, a3, Operand(AllocationMemento::kSize / kPointerSize)); - __ sll(at, t7, kPointerSizeLog2); - __ Addu(a0, t4, Operand(at)); // End of object. - __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); + // Fill the remaining fields with one pointer filler map. + + __ bind(&no_inobject_slack_tracking); + } + + if (create_memento) { + __ Subu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize)); + __ sll(a0, a0, kPointerSizeLog2); + __ Addu(a0, t4, Operand(a0)); // End of object. __ InitializeFieldsWithFiller(t5, a0, t7); // Fill in memento fields. // t5: points to the allocated but uninitialized memento. __ LoadRoot(t7, Heap::kAllocationMementoMapRootIndex); - ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset); + DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset); __ sw(t7, MemOperand(t5)); __ Addu(t5, t5, kPointerSize); // Load the AllocationSite. __ lw(t7, MemOperand(sp, 2 * kPointerSize)); - ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset); + DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset); __ sw(t7, MemOperand(t5)); __ Addu(t5, t5, kPointerSize); } else { - __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); __ sll(at, a3, kPointerSizeLog2); __ Addu(a0, t4, Operand(at)); // End of object. __ InitializeFieldsWithFiller(t5, a0, t7); @@ -525,12 +504,9 @@ __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset)); // The field instance sizes contains both pre-allocated property fields // and in-object properties. - __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); - __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, - kBitsPerByte); + __ lbu(t6, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset)); __ Addu(a3, a3, Operand(t6)); - __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte, - kBitsPerByte); + __ lbu(t6, FieldMemOperand(a2, Map::kInObjectPropertiesOffset)); __ subu(a3, a3, t6); // Done if no extra properties are to be allocated. @@ -565,8 +541,8 @@ __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset)); __ Addu(a2, a2, Operand(2 * kPointerSize)); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset); + DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset); // Initialize the fields to undefined. // a1: constructor @@ -576,13 +552,13 @@ // t5: FixedArray (not tagged) __ sll(t3, a3, kPointerSizeLog2); __ addu(t6, a2, t3); // End of object. - ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize); { Label loop, entry; - if (count_constructions) { + if (!is_api_function || create_memento) { __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); } else if (FLAG_debug_code) { - __ LoadRoot(t8, Heap::kUndefinedValueRootIndex); - __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t8)); + __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); + __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t2)); } __ jmp(&entry); __ bind(&loop); @@ -624,9 +600,9 @@ __ push(a1); // Argument for Runtime_NewObject. if (create_memento) { - __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2); + __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2); } else { - __ CallRuntime(Runtime::kHiddenNewObject, 1); + __ CallRuntime(Runtime::kNewObject, 1); } __ mov(t4, v0); @@ -640,6 +616,7 @@ // Receiver for constructor call allocated. // t4: JSObject + __ bind(&allocated); if (create_memento) { __ lw(a2, MemOperand(sp, kPointerSize * 2)); @@ -655,7 +632,6 @@ __ bind(&count_incremented); } - __ bind(&allocated); __ Push(t4, t4); // Reload the number of arguments from the stack. @@ -706,7 +682,7 @@ } // Store offset of return address for deoptimizer. - if (!is_api_function && !count_constructions) { + if (!is_api_function) { masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); } @@ -755,18 +731,13 @@ } -void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, true, false); -} - - void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new); + Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); } void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, true, false, false); + Generate_JSConstructStubHelper(masm, true, false); } @@ -829,7 +800,7 @@ if (is_construct) { // No type feedback cell is available __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS); __ CallStub(&stub); } else { ParameterCount actual(a0); @@ -854,7 +825,7 @@ void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) { - CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized); + CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized); GenerateTailCallToReturnedCode(masm); } @@ -867,7 +838,7 @@ // Whether to compile in a background thread. __ Push(masm->isolate()->factory()->ToBoolean(concurrent)); - __ CallRuntime(Runtime::kHiddenCompileOptimized, 2); + __ CallRuntime(Runtime::kCompileOptimized, 2); // Restore receiver. __ Pop(a1); } @@ -895,7 +866,7 @@ // Set a0 to point to the head of the PlatformCodeAge sequence. __ Subu(a0, a0, - Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize)); + Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize)); // The following registers must be saved and restored when calling through to // the runtime: @@ -934,7 +905,7 @@ // Set a0 to point to the head of the PlatformCodeAge sequence. __ Subu(a0, a0, - Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize)); + Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize)); // The following registers must be saved and restored when calling through to // the runtime: @@ -956,7 +927,7 @@ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); // Jump to point after the code-age stub. - __ Addu(a0, a0, Operand((kNoCodeAgeSequenceLength) * Assembler::kInstrSize)); + __ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength)); __ Jump(a0); } @@ -976,7 +947,7 @@ // registers. __ MultiPush(kJSCallerSaved | kCalleeSaved); // Pass the function and deoptimization type to the runtime system. - __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ MultiPop(kJSCallerSaved | kCalleeSaved); } @@ -1002,7 +973,7 @@ // Pass the function and deoptimization type to the runtime system. __ li(a0, Operand(Smi::FromInt(static_cast<int>(type)))); __ push(a0); - __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); } // Get the full codegen state from the stack and untag it -> t2. @@ -1084,7 +1055,7 @@ __ Branch(&ok, hs, sp, Operand(at)); { FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kHiddenStackGuard, 0); + __ CallRuntime(Runtime::kStackGuard, 0); } __ Jump(masm->isolate()->builtins()->OnStackReplacement(), RelocInfo::CODE_TARGET); @@ -1121,7 +1092,7 @@ // a1: function Label shift_arguments; __ li(t0, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION. - { Label convert_to_object, use_global_receiver, patch_receiver; + { Label convert_to_object, use_global_proxy, patch_receiver; // Change context eagerly in case we need the global receiver. __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); @@ -1147,9 +1118,9 @@ __ JumpIfSmi(a2, &convert_to_object, t2); __ LoadRoot(a3, Heap::kUndefinedValueRootIndex); - __ Branch(&use_global_receiver, eq, a2, Operand(a3)); + __ Branch(&use_global_proxy, eq, a2, Operand(a3)); __ LoadRoot(a3, Heap::kNullValueRootIndex); - __ Branch(&use_global_receiver, eq, a2, Operand(a3)); + __ Branch(&use_global_proxy, eq, a2, Operand(a3)); STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); __ GetObjectType(a2, a3, a3); @@ -1168,16 +1139,17 @@ __ sra(a0, a0, kSmiTagSize); // Un-tag. // Leave internal frame. } + // Restore the function to a1, and the flag to t0. __ sll(at, a0, kPointerSizeLog2); __ addu(at, sp, at); __ lw(a1, MemOperand(at)); - __ li(t0, Operand(0, RelocInfo::NONE32)); - __ Branch(&patch_receiver); + __ Branch(USE_DELAY_SLOT, &patch_receiver); + __ li(t0, Operand(0, RelocInfo::NONE32)); // In delay slot. - __ bind(&use_global_receiver); + __ bind(&use_global_proxy); __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); - __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset)); + __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); __ bind(&patch_receiver); __ sll(at, a0, kPointerSizeLog2); @@ -1305,7 +1277,7 @@ // Out of stack space. __ lw(a1, MemOperand(fp, kFunctionOffset)); __ Push(a1, v0); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); // End of stack check. // Push current limit and index. @@ -1329,7 +1301,7 @@ // Compute the receiver. // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; + Label call_to_object, use_global_proxy; __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset)); __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize))); @@ -1342,9 +1314,9 @@ // Compute the receiver in sloppy mode. __ JumpIfSmi(a0, &call_to_object); __ LoadRoot(a1, Heap::kNullValueRootIndex); - __ Branch(&use_global_receiver, eq, a0, Operand(a1)); + __ Branch(&use_global_proxy, eq, a0, Operand(a1)); __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); - __ Branch(&use_global_receiver, eq, a0, Operand(a2)); + __ Branch(&use_global_proxy, eq, a0, Operand(a2)); // Check if the receiver is already a JavaScript object. // a0: receiver @@ -1360,9 +1332,9 @@ __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver. __ Branch(&push_receiver); - __ bind(&use_global_receiver); + __ bind(&use_global_proxy); __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); - __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset)); + __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset)); // Push the receiver. // a0: receiver @@ -1426,6 +1398,27 @@ } +static void ArgumentAdaptorStackCheck(MacroAssembler* masm, + Label* stack_overflow) { + // ----------- S t a t e ------------- + // -- a0 : actual number of arguments + // -- a1 : function (passed through to callee) + // -- a2 : expected number of arguments + // ----------------------------------- + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + __ LoadRoot(t1, Heap::kRealStackLimitRootIndex); + // Make t1 the space we have left. The stack might already be overflowed + // here which will cause t1 to become negative. + __ subu(t1, sp, t1); + // Check if the arguments will overflow the stack. + __ sll(at, a2, kPointerSizeLog2); + // Signed comparison. + __ Branch(stack_overflow, le, t1, Operand(at)); +} + + static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ sll(a0, a0, kSmiTagSize); __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); @@ -1460,6 +1453,8 @@ // -- a2: expected arguments count // ----------------------------------- + Label stack_overflow; + ArgumentAdaptorStackCheck(masm, &stack_overflow); Label invoke, dont_adapt_arguments; Label enough, too_few; @@ -1568,6 +1563,14 @@ // ------------------------------------------- __ bind(&dont_adapt_arguments); __ Jump(a3); + + __ bind(&stack_overflow); + { + FrameScope frame(masm, StackFrame::MANUAL); + EnterArgumentsAdaptorFrame(masm); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ break_(0xCC); + } } diff -Nru nodejs-0.11.13/deps/v8/src/mips/codegen-mips.cc nodejs-0.11.15/deps/v8/src/mips/codegen-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/codegen-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/codegen-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "codegen.h" -#include "macro-assembler.h" -#include "simulator-mips.h" +#include "src/codegen.h" +#include "src/macro-assembler.h" +#include "src/mips/simulator-mips.h" namespace v8 { namespace internal { @@ -52,7 +29,8 @@ UnaryMathFunction CreateExpFunction() { if (!FLAG_fast_math) return &std::exp; size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &std::exp; ExternalReference::InitializeMathExpData(); @@ -67,30 +45,22 @@ Register temp2 = t1; Register temp3 = t2; - if (!IsMipsSoftFloatABI) { - // Input value is in f12 anyway, nothing to do. - } else { - __ Move(input, a0, a1); - } + __ MovFromFloatParameter(input); __ Push(temp3, temp2, temp1); MathExpGenerator::EmitMathExp( &masm, input, result, double_scratch1, double_scratch2, temp1, temp2, temp3); __ Pop(temp3, temp2, temp1); - if (!IsMipsSoftFloatABI) { - // Result is already in f0, nothing to do. - } else { - __ Move(v0, v1, result); - } + __ MovToFloatResult(result); __ Ret(); } CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); #if !defined(USE_SIMULATOR) return FUNCTION_CAST<UnaryMathFunction>(buffer); @@ -102,17 +72,13 @@ #if defined(V8_HOST_ARCH_MIPS) -OS::MemCopyUint8Function CreateMemCopyUint8Function( - OS::MemCopyUint8Function stub) { +MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) { #if defined(USE_SIMULATOR) return stub; #else - if (Serializer::enabled()) { - return stub; - } - size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true)); if (buffer == NULL) return stub; // This code assumes that cache lines are 32 bytes and if the cache line is @@ -132,7 +98,7 @@ // the kPrefHintPrepareForStore hint is used, the code will not work // correctly. uint32_t max_pref_size = 128; - ASSERT(pref_chunk < max_pref_size); + DCHECK(pref_chunk < max_pref_size); // pref_limit is set based on the fact that we never use an offset // greater then 5 on a store pref and that a single pref can @@ -145,7 +111,7 @@ // The initial prefetches may fetch bytes that are before the buffer being // copied. Start copies with an offset of 4 so avoid this situation when // using kPrefHintPrepareForStore. - ASSERT(pref_hint_store != kPrefHintPrepareForStore || + DCHECK(pref_hint_store != kPrefHintPrepareForStore || pref_chunk * 4 >= max_pref_size); // If the size is less than 8, go to lastb. Regardless of size, @@ -167,11 +133,17 @@ __ beq(a3, zero_reg, &aligned); // Already aligned. __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count. - __ lwr(t8, MemOperand(a1)); - __ addu(a1, a1, a3); - __ swr(t8, MemOperand(a0)); - __ addu(a0, a0, a3); - + if (kArchEndian == kLittle) { + __ lwr(t8, MemOperand(a1)); + __ addu(a1, a1, a3); + __ swr(t8, MemOperand(a0)); + __ addu(a0, a0, a3); + } else { + __ lwl(t8, MemOperand(a1)); + __ addu(a1, a1, a3); + __ swl(t8, MemOperand(a0)); + __ addu(a0, a0, a3); + } // Now dst/src are both aligned to (word) aligned addresses. Set a2 to // count how many bytes we have to copy after all the 64 byte chunks are // copied and a3 to the dst pointer after all the 64 byte chunks have been @@ -323,12 +295,21 @@ __ beq(a3, zero_reg, &ua_chk16w); __ subu(a2, a2, a3); // In delay slot. - __ lwr(v1, MemOperand(a1)); - __ lwl(v1, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - __ addu(a1, a1, a3); - __ swr(v1, MemOperand(a0)); - __ addu(a0, a0, a3); + if (kArchEndian == kLittle) { + __ lwr(v1, MemOperand(a1)); + __ lwl(v1, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ addu(a1, a1, a3); + __ swr(v1, MemOperand(a0)); + __ addu(a0, a0, a3); + } else { + __ lwl(v1, MemOperand(a1)); + __ lwr(v1, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ addu(a1, a1, a3); + __ swl(v1, MemOperand(a0)); + __ addu(a0, a0, a3); + } // Now the dst (but not the source) is aligned. Set a2 to count how many // bytes we have to copy after all the 64 byte chunks are copied and a3 to @@ -357,40 +338,77 @@ __ bind(&ua_loop16w); __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); - __ lwr(t0, MemOperand(a1)); - __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); - __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); - - if (pref_hint_store == kPrefHintPrepareForStore) { - __ sltu(v1, t9, a0); - __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); + if (kArchEndian == kLittle) { + __ lwr(t0, MemOperand(a1)); + __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); + __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); + + if (pref_hint_store == kPrefHintPrepareForStore) { + __ sltu(v1, t9, a0); + __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); + } + __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. + + __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); + + __ bind(&ua_skip_pref); + __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); + __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); + __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); + __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); + __ lwl(t0, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t1, + MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t2, + MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t3, + MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t4, + MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t5, + MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t6, + MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t7, + MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); + } else { + __ lwl(t0, MemOperand(a1)); + __ lwl(t1, MemOperand(a1, 1, loadstore_chunk)); + __ lwl(t2, MemOperand(a1, 2, loadstore_chunk)); + + if (pref_hint_store == kPrefHintPrepareForStore) { + __ sltu(v1, t9, a0); + __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); + } + __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. + + __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); + + __ bind(&ua_skip_pref); + __ lwl(t4, MemOperand(a1, 4, loadstore_chunk)); + __ lwl(t5, MemOperand(a1, 5, loadstore_chunk)); + __ lwl(t6, MemOperand(a1, 6, loadstore_chunk)); + __ lwl(t7, MemOperand(a1, 7, loadstore_chunk)); + __ lwr(t0, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t1, + MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t2, + MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t3, + MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t4, + MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t5, + MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t6, + MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t7, + MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); } - __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. - - __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); - __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); - - __ bind(&ua_skip_pref); - __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); - __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); - __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); - __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); - __ lwl(t0, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t1, - MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t2, - MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t3, - MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t4, - MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t5, - MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t6, - MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t7, - MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); __ sw(t0, MemOperand(a0)); __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); @@ -400,30 +418,57 @@ __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); - __ lwr(t0, MemOperand(a1, 8, loadstore_chunk)); - __ lwr(t1, MemOperand(a1, 9, loadstore_chunk)); - __ lwr(t2, MemOperand(a1, 10, loadstore_chunk)); - __ lwr(t3, MemOperand(a1, 11, loadstore_chunk)); - __ lwr(t4, MemOperand(a1, 12, loadstore_chunk)); - __ lwr(t5, MemOperand(a1, 13, loadstore_chunk)); - __ lwr(t6, MemOperand(a1, 14, loadstore_chunk)); - __ lwr(t7, MemOperand(a1, 15, loadstore_chunk)); - __ lwl(t0, - MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t1, - MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t2, - MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t3, - MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t4, - MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t5, - MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t6, - MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t7, - MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); + if (kArchEndian == kLittle) { + __ lwr(t0, MemOperand(a1, 8, loadstore_chunk)); + __ lwr(t1, MemOperand(a1, 9, loadstore_chunk)); + __ lwr(t2, MemOperand(a1, 10, loadstore_chunk)); + __ lwr(t3, MemOperand(a1, 11, loadstore_chunk)); + __ lwr(t4, MemOperand(a1, 12, loadstore_chunk)); + __ lwr(t5, MemOperand(a1, 13, loadstore_chunk)); + __ lwr(t6, MemOperand(a1, 14, loadstore_chunk)); + __ lwr(t7, MemOperand(a1, 15, loadstore_chunk)); + __ lwl(t0, + MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t1, + MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t2, + MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t3, + MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t4, + MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t5, + MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t6, + MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t7, + MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); + } else { + __ lwl(t0, MemOperand(a1, 8, loadstore_chunk)); + __ lwl(t1, MemOperand(a1, 9, loadstore_chunk)); + __ lwl(t2, MemOperand(a1, 10, loadstore_chunk)); + __ lwl(t3, MemOperand(a1, 11, loadstore_chunk)); + __ lwl(t4, MemOperand(a1, 12, loadstore_chunk)); + __ lwl(t5, MemOperand(a1, 13, loadstore_chunk)); + __ lwl(t6, MemOperand(a1, 14, loadstore_chunk)); + __ lwl(t7, MemOperand(a1, 15, loadstore_chunk)); + __ lwr(t0, + MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t1, + MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t2, + MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t3, + MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t4, + MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t5, + MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t6, + MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t7, + MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); + } __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); __ sw(t0, MemOperand(a0, 8, loadstore_chunk)); __ sw(t1, MemOperand(a0, 9, loadstore_chunk)); @@ -447,30 +492,57 @@ __ beq(a2, t8, &ua_chk1w); __ nop(); // In delay slot. - __ lwr(t0, MemOperand(a1)); - __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); - __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); - __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); - __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); - __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); - __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); - __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); - __ lwl(t0, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t1, - MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t2, - MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t3, - MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t4, - MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t5, - MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t6, - MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t7, - MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); + if (kArchEndian == kLittle) { + __ lwr(t0, MemOperand(a1)); + __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); + __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); + __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); + __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); + __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); + __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); + __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); + __ lwl(t0, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t1, + MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t2, + MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t3, + MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t4, + MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t5, + MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t6, + MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t7, + MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); + } else { + __ lwl(t0, MemOperand(a1)); + __ lwl(t1, MemOperand(a1, 1, loadstore_chunk)); + __ lwl(t2, MemOperand(a1, 2, loadstore_chunk)); + __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); + __ lwl(t4, MemOperand(a1, 4, loadstore_chunk)); + __ lwl(t5, MemOperand(a1, 5, loadstore_chunk)); + __ lwl(t6, MemOperand(a1, 6, loadstore_chunk)); + __ lwl(t7, MemOperand(a1, 7, loadstore_chunk)); + __ lwr(t0, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t1, + MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t2, + MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t3, + MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t4, + MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t5, + MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t6, + MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t7, + MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); + } __ addiu(a1, a1, 8 * loadstore_chunk); __ sw(t0, MemOperand(a0)); __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); @@ -491,9 +563,15 @@ __ addu(a3, a0, a3); __ bind(&ua_wordCopy_loop); - __ lwr(v1, MemOperand(a1)); - __ lwl(v1, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + if (kArchEndian == kLittle) { + __ lwr(v1, MemOperand(a1)); + __ lwl(v1, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + } else { + __ lwl(v1, MemOperand(a1)); + __ lwr(v1, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + } __ addiu(a0, a0, loadstore_chunk); __ addiu(a1, a1, loadstore_chunk); __ bne(a0, a3, &ua_wordCopy_loop); @@ -516,11 +594,11 @@ } CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); - return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); + return FUNCTION_CAST<MemCopyUint8Function>(buffer); #endif } #endif @@ -530,7 +608,8 @@ return &std::sqrt; #else size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &std::sqrt; MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); @@ -542,10 +621,10 @@ CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST<UnaryMathFunction>(buffer); #endif } @@ -558,14 +637,14 @@ void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { masm->EnterFrame(StackFrame::INTERNAL); - ASSERT(!masm->has_frame()); + DCHECK(!masm->has_frame()); masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { masm->LeaveFrame(StackFrame::INTERNAL); - ASSERT(masm->has_frame()); + DCHECK(masm->has_frame()); masm->set_has_frame(false); } @@ -576,26 +655,28 @@ #define __ ACCESS_MASM(masm) void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - MacroAssembler* masm, AllocationSiteMode mode, + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, Label* allocation_memento_found) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- ra : return address - // -- a3 : target map, scratch for subsequent call - // -- t0 : scratch (elements) - // ----------------------------------- + Register scratch_elements = t0; + DCHECK(!AreAliased(receiver, key, value, target_map, + scratch_elements)); + if (mode == TRACK_ALLOCATION_SITE) { - ASSERT(allocation_memento_found != NULL); - __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found); + DCHECK(allocation_memento_found != NULL); + __ JumpIfJSArrayHasAllocationMemento( + receiver, scratch_elements, allocation_memento_found); } // Set transitioned map. - __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); - __ RecordWriteField(a2, + __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, HeapObject::kMapOffset, - a3, + target_map, t5, kRAHasNotBeenSaved, kDontSaveFPRegs, @@ -605,62 +686,74 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( - MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- ra : return address - // -- a3 : target map, scratch for subsequent call - // -- t0 : scratch (elements) - // ----------------------------------- + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Register ra contains the return address. Label loop, entry, convert_hole, gc_required, only_change_map, done; + Register elements = t0; + Register length = t1; + Register array = t2; + Register array_end = array; + + // target_map parameter can be clobbered. + Register scratch1 = target_map; + Register scratch2 = t5; + Register scratch3 = t3; + + // Verify input registers don't conflict with locals. + DCHECK(!AreAliased(receiver, key, value, target_map, + elements, length, array, scratch2)); Register scratch = t6; if (mode == TRACK_ALLOCATION_SITE) { - __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail); + __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); } // Check for empty arrays, which only require a map transition and no changes // to the backing store. - __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); + __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); - __ Branch(&only_change_map, eq, at, Operand(t0)); + __ Branch(&only_change_map, eq, at, Operand(elements)); __ push(ra); - __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); - // t0: source FixedArray - // t1: number of elements (smi-tagged) + __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); + // elements: source FixedArray + // length: number of elements (smi-tagged) // Allocate new FixedDoubleArray. - __ sll(scratch, t1, 2); + __ sll(scratch, length, 2); __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize); - __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT); - // t2: destination FixedDoubleArray, not tagged as heap object + __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT); + // array: destination FixedDoubleArray, not tagged as heap object // Set destination FixedDoubleArray's length and map. - __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex); - __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset)); - __ sw(t5, MemOperand(t2, HeapObject::kMapOffset)); + __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex); + __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); // Update receiver's map. + __ sw(scratch2, MemOperand(array, HeapObject::kMapOffset)); - __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); - __ RecordWriteField(a2, + __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, HeapObject::kMapOffset, - a3, - t5, + target_map, + scratch2, kRAHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); // Replace receiver's backing store with newly created FixedDoubleArray. - __ Addu(a3, t2, Operand(kHeapObjectTag)); - __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset)); - __ RecordWriteField(a2, + __ Addu(scratch1, array, Operand(kHeapObjectTag)); + __ sw(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset)); + __ RecordWriteField(receiver, JSObject::kElementsOffset, - a3, - t5, + scratch1, + scratch2, kRAHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, @@ -668,26 +761,32 @@ // Prepare for conversion loop. - __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize)); - __ sll(t2, t1, 2); - __ Addu(t2, t2, t3); - __ li(t0, Operand(kHoleNanLower32)); - __ li(t1, Operand(kHoleNanUpper32)); - // t0: kHoleNanLower32 - // t1: kHoleNanUpper32 - // t2: end of destination FixedDoubleArray, not tagged - // t3: begin of FixedDoubleArray element fields, not tagged - - __ Branch(&entry); + __ Addu(scratch1, elements, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize)); + __ sll(at, length, 2); + __ Addu(array_end, scratch3, at); + + // Repurpose registers no longer in use. + Register hole_lower = elements; + Register hole_upper = length; + + __ li(hole_lower, Operand(kHoleNanLower32)); + // scratch1: begin of source FixedArray element fields, not tagged + // hole_lower: kHoleNanLower32 + // hole_upper: kHoleNanUpper32 + // array_end: end of destination FixedDoubleArray, not tagged + // scratch3: begin of FixedDoubleArray element fields, not tagged + __ Branch(USE_DELAY_SLOT, &entry); + __ li(hole_upper, Operand(kHoleNanUpper32)); // In delay slot. __ bind(&only_change_map); - __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); - __ RecordWriteField(a2, + __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, HeapObject::kMapOffset, - a3, - t5, - kRAHasNotBeenSaved, + target_map, + scratch2, + kRAHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); @@ -695,125 +794,154 @@ // Call into runtime if GC is required. __ bind(&gc_required); - __ pop(ra); - __ Branch(fail); + __ lw(ra, MemOperand(sp, 0)); + __ Branch(USE_DELAY_SLOT, fail); + __ addiu(sp, sp, kPointerSize); // In delay slot. // Convert and copy elements. __ bind(&loop); - __ lw(t5, MemOperand(a3)); - __ Addu(a3, a3, kIntSize); - // t5: current element - __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole); + __ lw(scratch2, MemOperand(scratch1)); + __ Addu(scratch1, scratch1, kIntSize); + // scratch2: current element + __ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole); // Normal smi, convert to double and store. - __ mtc1(t5, f0); + __ mtc1(scratch2, f0); __ cvt_d_w(f0, f0); - __ sdc1(f0, MemOperand(t3)); - __ Addu(t3, t3, kDoubleSize); - - __ Branch(&entry); + __ sdc1(f0, MemOperand(scratch3)); + __ Branch(USE_DELAY_SLOT, &entry); + __ addiu(scratch3, scratch3, kDoubleSize); // In delay slot. // Hole found, store the-hole NaN. __ bind(&convert_hole); if (FLAG_debug_code) { // Restore a "smi-untagged" heap object. - __ SmiTag(t5); - __ Or(t5, t5, Operand(1)); + __ SmiTag(scratch2); + __ Or(scratch2, scratch2, Operand(1)); __ LoadRoot(at, Heap::kTheHoleValueRootIndex); - __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5)); + __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2)); } - __ sw(t0, MemOperand(t3)); // mantissa - __ sw(t1, MemOperand(t3, kIntSize)); // exponent - __ Addu(t3, t3, kDoubleSize); - + // mantissa + __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset)); + // exponent + __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset)); __ bind(&entry); - __ Branch(&loop, lt, t3, Operand(t2)); + __ addiu(scratch3, scratch3, kDoubleSize); + + __ Branch(&loop, lt, scratch3, Operand(array_end)); - __ pop(ra); __ bind(&done); + __ pop(ra); } void ElementsTransitionGenerator::GenerateDoubleToObject( - MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- ra : return address - // -- a3 : target map, scratch for subsequent call - // -- t0 : scratch (elements) - // ----------------------------------- + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Register ra contains the return address. Label entry, loop, convert_hole, gc_required, only_change_map; + Register elements = t0; + Register array = t2; + Register length = t1; + Register scratch = t5; + + // Verify input registers don't conflict with locals. + DCHECK(!AreAliased(receiver, key, value, target_map, + elements, array, length, scratch)); if (mode == TRACK_ALLOCATION_SITE) { - __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail); + __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); } // Check for empty arrays, which only require a map transition and no changes // to the backing store. - __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); + __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); - __ Branch(&only_change_map, eq, at, Operand(t0)); + __ Branch(&only_change_map, eq, at, Operand(elements)); - __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); + __ MultiPush( + value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit()); - __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); - // t0: source FixedArray - // t1: number of elements (smi-tagged) + __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); + // elements: source FixedArray + // length: number of elements (smi-tagged) // Allocate new FixedArray. - __ sll(a0, t1, 1); - __ Addu(a0, a0, FixedDoubleArray::kHeaderSize); - __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS); - // t2: destination FixedArray, not tagged as heap object + // Re-use value and target_map registers, as they have been saved on the + // stack. + Register array_size = value; + Register allocate_scratch = target_map; + __ sll(array_size, length, 1); + __ Addu(array_size, array_size, FixedDoubleArray::kHeaderSize); + __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required, + NO_ALLOCATION_FLAGS); + // array: destination FixedArray, not tagged as heap object // Set destination FixedDoubleArray's length and map. - __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex); - __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset)); - __ sw(t5, MemOperand(t2, HeapObject::kMapOffset)); + __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); + __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); + __ sw(scratch, MemOperand(array, HeapObject::kMapOffset)); // Prepare for conversion loop. - __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); - __ Addu(a3, t2, Operand(FixedArray::kHeaderSize)); - __ Addu(t2, t2, Operand(kHeapObjectTag)); - __ sll(t1, t1, 1); - __ Addu(t1, a3, t1); - __ LoadRoot(t3, Heap::kTheHoleValueRootIndex); - __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex); + Register src_elements = elements; + Register dst_elements = target_map; + Register dst_end = length; + Register heap_number_map = scratch; + __ Addu(src_elements, src_elements, Operand( + FixedDoubleArray::kHeaderSize - kHeapObjectTag + + Register::kExponentOffset)); + __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize)); + __ Addu(array, array, Operand(kHeapObjectTag)); + __ sll(dst_end, dst_end, 1); + __ Addu(dst_end, dst_elements, dst_end); + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); // Using offsetted addresses. - // a3: begin of destination FixedArray element fields, not tagged - // t0: begin of source FixedDoubleArray element fields, not tagged, +4 - // t1: end of destination FixedArray, not tagged - // t2: destination FixedArray - // t3: the-hole pointer - // t5: heap number map + // dst_elements: begin of destination FixedArray element fields, not tagged + // src_elements: begin of source FixedDoubleArray element fields, not tagged, + // points to the exponent + // dst_end: end of destination FixedArray, not tagged + // array: destination FixedArray + // heap_number_map: heap number map __ Branch(&entry); // Call into runtime if GC is required. __ bind(&gc_required); - __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); + __ MultiPop( + value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit()); __ Branch(fail); __ bind(&loop); - __ lw(a1, MemOperand(t0)); - __ Addu(t0, t0, kDoubleSize); - // a1: current element's upper 32 bit - // t0: address of next element's upper 32 bit + Register upper_bits = key; + __ lw(upper_bits, MemOperand(src_elements)); + __ Addu(src_elements, src_elements, kDoubleSize); + // upper_bits: current element's upper 32 bit + // src_elements: address of next element's upper 32 bit __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); // Non-hole double, copy value into a heap number. - __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required); - // a2: new heap number - __ lw(a0, MemOperand(t0, -12)); - __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset)); - __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset)); - __ mov(a0, a3); - __ sw(a2, MemOperand(a3)); - __ Addu(a3, a3, kIntSize); - __ RecordWrite(t2, - a0, - a2, + Register heap_number = receiver; + Register scratch2 = value; + Register scratch3 = t6; + __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map, + &gc_required); + // heap_number: new heap number + // Load mantissa of current element, src_elements + // point to exponent of next element. + __ lw(scratch2, MemOperand(src_elements, (Register::kMantissaOffset + - Register::kExponentOffset - kDoubleSize))); + __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset)); + __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset)); + __ mov(scratch2, dst_elements); + __ sw(heap_number, MemOperand(dst_elements)); + __ Addu(dst_elements, dst_elements, kIntSize); + __ RecordWrite(array, + scratch2, + heap_number, kRAHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, @@ -822,19 +950,20 @@ // Replace the-hole NaN with the-hole pointer. __ bind(&convert_hole); - __ sw(t3, MemOperand(a3)); - __ Addu(a3, a3, kIntSize); + __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); + __ sw(scratch2, MemOperand(dst_elements)); + __ Addu(dst_elements, dst_elements, kIntSize); __ bind(&entry); - __ Branch(&loop, lt, a3, Operand(t1)); + __ Branch(&loop, lt, dst_elements, Operand(dst_end)); - __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit()); + __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit()); // Replace receiver's backing store with newly created and filled FixedArray. - __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset)); - __ RecordWriteField(a2, + __ sw(array, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ RecordWriteField(receiver, JSObject::kElementsOffset, - t2, - t5, + array, + scratch, kRAHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, @@ -843,11 +972,11 @@ __ bind(&only_change_map); // Update receiver's map. - __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); - __ RecordWriteField(a2, + __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, HeapObject::kMapOffset, - a3, - t5, + target_map, + scratch, kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, @@ -924,7 +1053,7 @@ at, Operand(zero_reg)); } // Rule out short external strings. - STATIC_CHECK(kShortExternalStringTag != 0); + STATIC_ASSERT(kShortExternalStringTag != 0); __ And(at, result, Operand(kShortExternalStringMask)); __ Branch(call_runtime, ne, at, Operand(zero_reg)); __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); @@ -960,16 +1089,17 @@ Register temp1, Register temp2, Register temp3) { - ASSERT(!input.is(result)); - ASSERT(!input.is(double_scratch1)); - ASSERT(!input.is(double_scratch2)); - ASSERT(!result.is(double_scratch1)); - ASSERT(!result.is(double_scratch2)); - ASSERT(!double_scratch1.is(double_scratch2)); - ASSERT(!temp1.is(temp2)); - ASSERT(!temp1.is(temp3)); - ASSERT(!temp2.is(temp3)); - ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(!input.is(result)); + DCHECK(!input.is(double_scratch1)); + DCHECK(!input.is(double_scratch2)); + DCHECK(!result.is(double_scratch1)); + DCHECK(!result.is(double_scratch2)); + DCHECK(!double_scratch1.is(double_scratch2)); + DCHECK(!temp1.is(temp2)); + DCHECK(!temp1.is(temp3)); + DCHECK(!temp2.is(temp3)); + DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(!masm->serializer_enabled()); // External references not serializable. Label zero, infinity, done; @@ -998,7 +1128,7 @@ __ mul_d(result, result, double_scratch2); __ sub_d(result, result, double_scratch1); // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1. - ASSERT(*reinterpret_cast<double*> + DCHECK(*reinterpret_cast<double*> (ExternalReference::math_exp_constants(8).address()) == 1); __ Move(double_scratch2, 1); __ add_d(result, result, double_scratch2); @@ -1010,8 +1140,8 @@ __ li(temp3, Operand(ExternalReference::math_exp_log_table())); __ sll(at, temp2, 3); __ Addu(temp3, temp3, Operand(at)); - __ lw(temp2, MemOperand(temp3, 0)); - __ lw(temp3, MemOperand(temp3, kPointerSize)); + __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset)); + __ lw(temp3, MemOperand(temp3, Register::kExponentOffset)); // The first word is loaded is the lower number register. if (temp2.code() < temp3.code()) { __ sll(at, temp1, 20); @@ -1040,42 +1170,42 @@ static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; #endif -static byte* GetNoCodeAgeSequence(uint32_t* length) { - // The sequence of instructions that is patched out for aging code is the - // following boilerplate stack-building prologue that is found in FUNCTIONS - static bool initialized = false; - static uint32_t sequence[kNoCodeAgeSequenceLength]; - byte* byte_sequence = reinterpret_cast<byte*>(sequence); - *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize; - if (!initialized) { - // Since patcher is a large object, allocate it dynamically when needed, - // to avoid overloading the stack in stress conditions. - SmartPointer<CodePatcher> - patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength)); - PredictableCodeSizeScope scope(patcher->masm(), *length); - patcher->masm()->Push(ra, fp, cp, a1); - patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); - patcher->masm()->Addu( - fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - initialized = true; - } - return byte_sequence; + +CodeAgingHelper::CodeAgingHelper() { + DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); + // Since patcher is a large object, allocate it dynamically when needed, + // to avoid overloading the stack in stress conditions. + // DONT_FLUSH is used because the CodeAgingHelper is initialized early in + // the process, before MIPS simulator ICache is setup. + SmartPointer<CodePatcher> patcher( + new CodePatcher(young_sequence_.start(), + young_sequence_.length() / Assembler::kInstrSize, + CodePatcher::DONT_FLUSH)); + PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); + patcher->masm()->Push(ra, fp, cp, a1); + patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); + patcher->masm()->Addu( + fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); +} + + +#ifdef DEBUG +bool CodeAgingHelper::IsOld(byte* candidate) const { + return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction; } +#endif -bool Code::IsYoungSequence(byte* sequence) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); - bool result = !memcmp(sequence, young_sequence, young_length); - ASSERT(result || - Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction); +bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { + bool result = isolate->code_aging_helper()->IsYoung(sequence); + DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); return result; } -void Code::GetCodeAgeAndParity(byte* sequence, Age* age, +void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, MarkingParity* parity) { - if (IsYoungSequence(sequence)) { + if (IsYoungSequence(isolate, sequence)) { *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { @@ -1091,11 +1221,10 @@ byte* sequence, Code::Age age, MarkingParity parity) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); + uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); if (age == kNoAgeCodeAge) { - CopyBytes(sequence, young_sequence, young_length); - CPU::FlushICache(sequence, young_length); + isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); + CpuFeatures::FlushICache(sequence, young_length); } else { Code* stub = GetCodeAgeStub(isolate, age, parity); CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); diff -Nru nodejs-0.11.13/deps/v8/src/mips/codegen-mips.h nodejs-0.11.15/deps/v8/src/mips/codegen-mips.h --- nodejs-0.11.13/deps/v8/src/mips/codegen-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/codegen-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MIPS_CODEGEN_MIPS_H_ #define V8_MIPS_CODEGEN_MIPS_H_ -#include "ast.h" -#include "ic-inl.h" +#include "src/ast.h" +#include "src/ic-inl.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/mips/code-stubs-mips.cc nodejs-0.11.15/deps/v8/src/mips/code-stubs-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/code-stubs-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/code-stubs-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,393 +1,259 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "bootstrapper.h" -#include "code-stubs.h" -#include "codegen.h" -#include "regexp-macro-assembler.h" -#include "stub-cache.h" +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/regexp-macro-assembler.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { void FastNewClosureStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a2 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry; + Register registers[] = { cp, a2 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry); } void FastNewContextStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a1 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { cp, a1 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void ToNumberStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a0 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { cp, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void NumberToStringStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a0 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry; + Register registers[] = { cp, a0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry); } void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a3, a2, a1 }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId( - Runtime::kHiddenCreateArrayLiteralStubBailout)->entry; + Register registers[] = { cp, a3, a2, a1 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Smi(), + Representation::Tagged() }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry, + representations); } void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a3, a2, a1, a0 }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry; + Register registers[] = { cp, a3, a2, a1, a0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry); } void CreateAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a2, a3 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { cp, a2, a3 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } -void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void CallFunctionStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a1, a0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); + UNIMPLEMENTED(); } -void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void CallConstructStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = {a1, a0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); + UNIMPLEMENTED(); } void RegExpConstructResultStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a2, a1, a0 }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry; -} - - -void LoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a0 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedLoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a1 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void StringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a0, a2 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedStringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a1, a0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a2, a1, a0 }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); + Register registers[] = { cp, a2, a1, a0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry); } void TransitionElementsKindStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a0, a1 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; + Register registers[] = { cp, a0, a1 }; Address entry = Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; - descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(entry)); } void CompareNilICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a0 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(CompareNilIC_Miss); + Register registers[] = { cp, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(CompareNilIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate())); } +const Register InterfaceDescriptor::ContextRegister() { return cp; } + + static void InitializeArrayConstructorDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor, + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { // register state + // cp -- context // a0 -- number of arguments // a1 -- function // a2 -- allocation site with elements kind - static Register registers_variable_args[] = { a1, a2, a0 }; - static Register registers_no_args[] = { a1, a2 }; + Address deopt_handler = Runtime::FunctionForId( + Runtime::kArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers_no_args; + Register registers[] = { cp, a1, a2 }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); } else { // stack param count needs (constructor pointer, and single argument) - descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; - descriptor->stack_parameter_count_ = a0; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers_variable_args; + Register registers[] = { cp, a1, a2, a0 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); } - - descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; - descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry; } static void InitializeInternalArrayConstructorDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor, + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { // register state + // cp -- context // a0 -- number of arguments // a1 -- constructor function - static Register registers_variable_args[] = { a1, a0 }; - static Register registers_no_args[] = { a1 }; + Address deopt_handler = Runtime::FunctionForId( + Runtime::kInternalArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers_no_args; + Register registers[] = { cp, a1 }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); } else { // stack param count needs (constructor pointer, and single argument) - descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; - descriptor->stack_parameter_count_ = a0; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers_variable_args; + Register registers[] = { cp, a1, a0 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); } - - descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; - descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry; } void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, -1); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1); } void ToBooleanStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a0 }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(ToBooleanIC_Miss); + Register registers[] = { cp, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(ToBooleanIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate())); } void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0); } void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1); } void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); -} - - -void StoreGlobalStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a1, a2, a0 }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(StoreIC_MissFromStubFailure); -} - - -void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a0, a3, a1, a2 }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1); } void BinaryOpICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a1, a0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); + Register registers[] = { cp, a1, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate())); } void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a2, a1, a0 }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite); + Register registers[] = { cp, a2, a1, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite)); } void StringAddStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { a1, a0 }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry; + Register registers[] = { cp, a1, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kStringAdd)->entry); } @@ -395,82 +261,72 @@ { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::ArgumentAdaptorCall); - static Register registers[] = { a1, // JSFunction - cp, // context - a0, // actual number of arguments - a2, // expected number of arguments + Register registers[] = { cp, // context, + a1, // JSFunction + a0, // actual number of arguments + a2, // expected number of arguments }; - static Representation representations[] = { - Representation::Tagged(), // JSFunction + Representation representations[] = { Representation::Tagged(), // context + Representation::Tagged(), // JSFunction Representation::Integer32(), // actual number of arguments Representation::Integer32(), // expected number of arguments }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::KeyedCall); - static Register registers[] = { cp, // context - a2, // key + Register registers[] = { cp, // context + a2, // key }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // key }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::NamedCall); - static Register registers[] = { cp, // context - a2, // name + Register registers[] = { cp, // context + a2, // name }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // name }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::CallHandler); - static Register registers[] = { cp, // context - a0, // receiver + Register registers[] = { cp, // context + a0, // receiver }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // receiver }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::ApiFunctionCall); - static Register registers[] = { a0, // callee - t0, // call_data - a2, // holder - a1, // api_function_address - cp, // context + Register registers[] = { cp, // context + a0, // callee + t0, // call_data + a2, // holder + a1, // api_function_address }; - static Representation representations[] = { + Representation representations[] = { + Representation::Tagged(), // context Representation::Tagged(), // callee Representation::Tagged(), // call_data Representation::Tagged(), // holder Representation::External(), // api_function_address - Representation::Tagged(), // context }; - descriptor->register_param_count_ = 5; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } } @@ -494,25 +350,25 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { // Update the static counter each time a new code stub is generated. - Isolate* isolate = masm->isolate(); - isolate->counters()->code_stubs()->Increment(); + isolate()->counters()->code_stubs()->Increment(); - CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); - int param_count = descriptor->register_param_count_; + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); + int param_count = descriptor->GetEnvironmentParameterCount(); { // Call the runtime system in a fresh internal frame. FrameScope scope(masm, StackFrame::INTERNAL); - ASSERT(descriptor->register_param_count_ == 0 || - a0.is(descriptor->register_params_[param_count - 1])); + DCHECK(param_count == 0 || + a0.is(descriptor->GetEnvironmentParameterRegister( + param_count - 1))); // Push arguments, adjust sp. __ Subu(sp, sp, Operand(param_count * kPointerSize)); for (int i = 0; i < param_count; ++i) { // Store argument to stack. - __ sw(descriptor->register_params_[i], + __ sw(descriptor->GetEnvironmentParameterRegister(i), MemOperand(sp, (param_count-1-i) * kPointerSize)); } ExternalReference miss = descriptor->miss_handler(); - __ CallExternalReference(miss, descriptor->register_param_count_); + __ CallExternalReference(miss, param_count); } __ Ret(); @@ -526,11 +382,13 @@ // stub so you don't have to set up the frame. class ConvertToDoubleStub : public PlatformCodeStub { public: - ConvertToDoubleStub(Register result_reg_1, + ConvertToDoubleStub(Isolate* isolate, + Register result_reg_1, Register result_reg_2, Register source_reg, Register scratch_reg) - : result1_(result_reg_1), + : PlatformCodeStub(isolate), + result1_(result_reg_1), result2_(result_reg_2), source_(source_reg), zeros_(scratch_reg) { } @@ -545,8 +403,8 @@ class ModeBits: public BitField<OverwriteMode, 0, 2> {}; class OpBits: public BitField<Token::Value, 2, 14> {}; - Major MajorKey() { return ConvertToDouble; } - int MinorKey() { + Major MajorKey() const { return ConvertToDouble; } + int MinorKey() const { // Encode the parameters in a unique 16 bit value. return result1_.code() + (result2_.code() << 4) + @@ -559,13 +417,14 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { -#ifndef BIG_ENDIAN_FLOATING_POINT - Register exponent = result1_; - Register mantissa = result2_; -#else - Register exponent = result2_; - Register mantissa = result1_; -#endif + Register exponent, mantissa; + if (kArchEndian == kLittle) { + exponent = result1_; + mantissa = result2_; + } else { + exponent = result2_; + mantissa = result1_; + } Label not_special; // Convert from Smi to integer. __ sra(source_, source_, kSmiTagSize); @@ -671,8 +530,10 @@ Register input_high = scratch2; Register input_low = scratch3; - __ lw(input_low, MemOperand(input_reg, double_offset)); - __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize)); + __ lw(input_low, + MemOperand(input_reg, double_offset + Register::kMantissaOffset)); + __ lw(input_high, + MemOperand(input_reg, double_offset + Register::kExponentOffset)); Label normal_exponent, restore_sign; // Extract the biased exponent in result. @@ -758,10 +619,10 @@ void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { - WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3); - WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0); - stub1.GetCode(isolate); - stub2.GetCode(isolate); + WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3); + WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0); + stub1.GetCode(); + stub2.GetCode(); } @@ -789,7 +650,7 @@ // but it just ends up combining harmlessly with the last digit of the // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get // the most significant 1 to hit the last bit of the 12 bit sign and exponent. - ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); + DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; __ srl(at, the_int_, shift_distance); __ or_(scratch_, scratch_, at); @@ -850,7 +711,7 @@ __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); __ Branch(&return_equal, ne, a0, Operand(t2)); - ASSERT(is_int16(GREATER) && is_int16(LESS)); + DCHECK(is_int16(GREATER) && is_int16(LESS)); __ Ret(USE_DELAY_SLOT); if (cc == le) { // undefined <= undefined should fail. @@ -864,7 +725,7 @@ } __ bind(&return_equal); - ASSERT(is_int16(GREATER) && is_int16(LESS)); + DCHECK(is_int16(GREATER) && is_int16(LESS)); __ Ret(USE_DELAY_SLOT); if (cc == less) { __ li(v0, Operand(GREATER)); // Things aren't less than themselves. @@ -903,7 +764,7 @@ if (cc != eq) { // All-zero means Infinity means equal. __ Ret(eq, v0, Operand(zero_reg)); - ASSERT(is_int16(GREATER) && is_int16(LESS)); + DCHECK(is_int16(GREATER) && is_int16(LESS)); __ Ret(USE_DELAY_SLOT); if (cc == le) { __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. @@ -924,7 +785,7 @@ Label* both_loaded_as_doubles, Label* slow, bool strict) { - ASSERT((lhs.is(a0) && rhs.is(a1)) || + DCHECK((lhs.is(a0) && rhs.is(a1)) || (lhs.is(a1) && rhs.is(a0))); Label lhs_is_smi; @@ -1042,7 +903,7 @@ Register rhs, Label* possible_strings, Label* not_both_strings) { - ASSERT((lhs.is(a0) && rhs.is(a1)) || + DCHECK((lhs.is(a0) && rhs.is(a1)) || (lhs.is(a1) && rhs.is(a0))); // a2 is object type of rhs. @@ -1133,7 +994,7 @@ // If either is a Smi (we know that not both are), then they can only // be strictly equal if the other is a HeapNumber. STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(0, Smi::FromInt(0)); + DCHECK_EQ(0, Smi::FromInt(0)); __ And(t2, lhs, Operand(rhs)); __ JumpIfNotSmi(t2, ¬_smis, t0); // One operand is a smi. EmitSmiNonsmiComparison generates code that can: @@ -1151,8 +1012,6 @@ // f12, f14 are the double representations of the left hand side // and the right hand side if we have FPU. Otherwise a2, a3 represent // left hand side and a0, a1 represent right hand side. - - Isolate* isolate = masm->isolate(); Label nan; __ li(t0, Operand(LESS)); __ li(t1, Operand(GREATER)); @@ -1179,7 +1038,7 @@ __ bind(&nan); // NaN comparisons always fail. // Load whatever we need in v0 to make the comparison fail. - ASSERT(is_int16(GREATER) && is_int16(LESS)); + DCHECK(is_int16(GREATER) && is_int16(LESS)); __ Ret(USE_DELAY_SLOT); if (cc == lt || cc == le) { __ li(v0, Operand(GREATER)); @@ -1227,7 +1086,8 @@ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow); - __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3); + __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2, + a3); if (cc == eq) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, @@ -1260,7 +1120,7 @@ if (cc == lt || cc == le) { ncr = GREATER; } else { - ASSERT(cc == gt || cc == ge); // Remaining cases. + DCHECK(cc == gt || cc == ge); // Remaining cases. ncr = LESS; } __ li(a0, Operand(Smi::FromInt(ncr))); @@ -1279,11 +1139,7 @@ void StoreRegistersStateStub::Generate(MacroAssembler* masm) { __ mov(t9, ra); __ pop(ra); - if (save_doubles_ == kSaveFPRegs) { - __ PushSafepointRegistersAndDoubles(); - } else { - __ PushSafepointRegisters(); - } + __ PushSafepointRegisters(); __ Jump(t9); } @@ -1291,12 +1147,7 @@ void RestoreRegistersStateStub::Generate(MacroAssembler* masm) { __ mov(t9, ra); __ pop(ra); - __ StoreToSafepointRegisterSlot(t9, t9); - if (save_doubles_ == kSaveFPRegs) { - __ PopSafepointRegistersAndDoubles(); - } else { - __ PopSafepointRegisters(); - } + __ PopSafepointRegisters(); __ Jump(t9); } @@ -1315,9 +1166,9 @@ AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); - __ li(a0, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ li(a0, Operand(ExternalReference::isolate_address(isolate()))); __ CallCFunction( - ExternalReference::store_buffer_overflow_function(masm->isolate()), + ExternalReference::store_buffer_overflow_function(isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { __ MultiPopFPU(kCallerSavedFPU); @@ -1446,7 +1297,7 @@ __ PrepareCallCFunction(0, 2, scratch2); __ MovToFloatParameters(double_base, double_exponent); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), + ExternalReference::power_double_double_function(isolate()), 0, 2); } __ pop(ra); @@ -1507,11 +1358,11 @@ __ cvt_d_w(double_exponent, single_scratch); // Returning or bailing out. - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); if (exponent_type_ == ON_STACK) { // The arguments are still on the stack. __ bind(&call_runtime); - __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); + __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); // The stub is called from non-optimized code, which expects the result // as heap number in exponent. @@ -1520,7 +1371,7 @@ heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); __ sdc1(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); - ASSERT(heapnumber.is(v0)); + DCHECK(heapnumber.is(v0)); __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); __ DropAndRet(2); } else { @@ -1530,7 +1381,7 @@ __ PrepareCallCFunction(0, 2, scratch); __ MovToFloatParameters(double_base, double_exponent); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), + ExternalReference::power_double_double_function(isolate()), 0, 2); } __ pop(ra); @@ -1562,79 +1413,69 @@ } -void StoreRegistersStateStub::GenerateAheadOfTime( - Isolate* isolate) { - StoreRegistersStateStub stub1(kDontSaveFPRegs); - stub1.GetCode(isolate); - // Hydrogen code stubs need stub2 at snapshot time. - StoreRegistersStateStub stub2(kSaveFPRegs); - stub2.GetCode(isolate); +void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) { + StoreRegistersStateStub stub(isolate); + stub.GetCode(); } -void RestoreRegistersStateStub::GenerateAheadOfTime( - Isolate* isolate) { - RestoreRegistersStateStub stub1(kDontSaveFPRegs); - stub1.GetCode(isolate); - // Hydrogen code stubs need stub2 at snapshot time. - RestoreRegistersStateStub stub2(kSaveFPRegs); - stub2.GetCode(isolate); +void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) { + RestoreRegistersStateStub stub(isolate); + stub.GetCode(); } void CodeStub::GenerateFPStubs(Isolate* isolate) { SaveFPRegsMode mode = kSaveFPRegs; - CEntryStub save_doubles(1, mode); - StoreBufferOverflowStub stub(mode); + CEntryStub save_doubles(isolate, 1, mode); + StoreBufferOverflowStub stub(isolate, mode); // These stubs might already be in the snapshot, detect that and don't // regenerate, which would lead to code stub initialization state being messed // up. Code* save_doubles_code; - if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { - save_doubles_code = *save_doubles.GetCode(isolate); + if (!save_doubles.FindCodeInCache(&save_doubles_code)) { + save_doubles_code = *save_doubles.GetCode(); } Code* store_buffer_overflow_code; - if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) { - store_buffer_overflow_code = *stub.GetCode(isolate); + if (!stub.FindCodeInCache(&store_buffer_overflow_code)) { + store_buffer_overflow_code = *stub.GetCode(); } isolate->set_fp_stubs_generated(true); } void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { - CEntryStub stub(1, kDontSaveFPRegs); - stub.GetCode(isolate); + CEntryStub stub(isolate, 1, kDontSaveFPRegs); + stub.GetCode(); } -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - bool do_gc, - bool always_allocate) { - // v0: result parameter for PerformGC, if any - // s0: number of arguments including receiver (C callee-saved) - // s1: pointer to the first argument (C callee-saved) - // s2: pointer to builtin function (C callee-saved) +void CEntryStub::Generate(MacroAssembler* masm) { + // Called from JavaScript; parameters are on stack as if calling JS function + // s0: number of arguments including receiver + // s1: size of arguments excluding receiver + // s2: pointer to builtin function + // fp: frame pointer (restored after C call) + // sp: stack pointer (restored as callee's sp after C call) + // cp: current context (C callee-saved) - Isolate* isolate = masm->isolate(); + ProfileEntryHookStub::MaybeCallEntryHook(masm); - if (do_gc) { - // Move result passed in v0 into a0 to call PerformGC. - __ mov(a0, v0); - __ PrepareCallCFunction(2, 0, a1); - __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate()))); - __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0); - } + // NOTE: s0-s2 hold the arguments of this function instead of a0-a2. + // The reason for this is that these arguments would need to be saved anyway + // so it's faster to set them up directly. + // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction. - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(isolate); - if (always_allocate) { - __ li(a0, Operand(scope_depth)); - __ lw(a1, MemOperand(a0)); - __ Addu(a1, a1, Operand(1)); - __ sw(a1, MemOperand(a0)); - } + // Compute the argv pointer in a callee-saved register. + __ Addu(s1, sp, s1); + + // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(save_doubles_); + + // s0: number of arguments including receiver (C callee-saved) + // s1: pointer to first argument (C callee-saved) + // s2: pointer to builtin function (C callee-saved) // Prepare arguments for C routine. // a0 = argc @@ -1646,7 +1487,7 @@ __ AssertStackIsAligned(); - __ li(a2, Operand(ExternalReference::isolate_address(isolate))); + __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); // To let the GC traverse the return address of the exit frames, we need to // know where the return address is. The CEntryStub is unmovable, so @@ -1677,134 +1518,71 @@ // Set up sp in the delay slot. masm->addiu(sp, sp, -kCArgsSlotsSize); // Make sure the stored 'ra' points to this position. - ASSERT_EQ(kNumInstructionsToJump, + DCHECK_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra)); } - if (always_allocate) { - // It's okay to clobber a2 and a3 here. v0 & v1 contain result. - __ li(a2, Operand(scope_depth)); - __ lw(a3, MemOperand(a2)); - __ Subu(a3, a3, Operand(1)); - __ sw(a3, MemOperand(a2)); - } - - // Check for failure result. - Label failure_returned; - STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); - __ addiu(a2, v0, 1); - __ andi(t0, a2, kFailureTagMask); - __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg)); - // Restore stack (remove arg slots) in branch delay slot. - __ addiu(sp, sp, kCArgsSlotsSize); + // Runtime functions should not return 'the hole'. Allowing it to escape may + // lead to crashes in the IC code later. + if (FLAG_debug_code) { + Label okay; + __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); + __ Branch(&okay, ne, v0, Operand(t0)); + __ stop("The hole escaped"); + __ bind(&okay); + } + + // Check result for exception sentinel. + Label exception_returned; + __ LoadRoot(t0, Heap::kExceptionRootIndex); + __ Branch(&exception_returned, eq, t0, Operand(v0)); + + ExternalReference pending_exception_address( + Isolate::kPendingExceptionAddress, isolate()); + + // Check that there is no pending exception, otherwise we + // should have returned the exception sentinel. + if (FLAG_debug_code) { + Label okay; + __ li(a2, Operand(pending_exception_address)); + __ lw(a2, MemOperand(a2)); + __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); + // Cannot use check here as it attempts to generate call into runtime. + __ Branch(&okay, eq, t0, Operand(a2)); + __ stop("Unexpected pending exception"); + __ bind(&okay); + } // Exit C frame and return. // v0:v1: result // sp: stack pointer // fp: frame pointer + // s0: still holds argc (callee-saved). __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN); - // Check if we should retry or throw exception. - Label retry; - __ bind(&failure_returned); - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize); - __ Branch(&retry, eq, t0, Operand(zero_reg)); + // Handling of exception. + __ bind(&exception_returned); // Retrieve the pending exception. - __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); - __ lw(v0, MemOperand(t0)); + __ li(a2, Operand(pending_exception_address)); + __ lw(v0, MemOperand(a2)); // Clear the pending exception. - __ li(a3, Operand(isolate->factory()->the_hole_value())); - __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); - __ sw(a3, MemOperand(t0)); + __ li(a3, Operand(isolate()->factory()->the_hole_value())); + __ sw(a3, MemOperand(a2)); // Special handling of termination exceptions which are uncatchable // by javascript code. + Label throw_termination_exception; __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex); - __ Branch(throw_termination_exception, eq, v0, Operand(t0)); + __ Branch(&throw_termination_exception, eq, v0, Operand(t0)); // Handle normal exception. - __ jmp(throw_normal_exception); - - __ bind(&retry); - // Last failure (v0) will be moved to (a0) for parameter when retrying. -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // Called from JavaScript; parameters are on stack as if calling JS function - // s0: number of arguments including receiver - // s1: size of arguments excluding receiver - // s2: pointer to builtin function - // fp: frame pointer (restored after C call) - // sp: stack pointer (restored as callee's sp after C call) - // cp: current context (C callee-saved) - - ProfileEntryHookStub::MaybeCallEntryHook(masm); - - // NOTE: Invocations of builtins may return failure objects - // instead of a proper result. The builtin entry handles - // this by performing a garbage collection and retrying the - // builtin once. - - // NOTE: s0-s2 hold the arguments of this function instead of a0-a2. - // The reason for this is that these arguments would need to be saved anyway - // so it's faster to set them up directly. - // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction. - - // Compute the argv pointer in a callee-saved register. - __ Addu(s1, sp, s1); - - // Enter the exit frame that transitions from JavaScript to C++. - FrameScope scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(save_doubles_); - - // s0: number of arguments (C callee-saved) - // s1: pointer to first argument (C callee-saved) - // s2: pointer to builtin function (C callee-saved) - - Label throw_normal_exception; - Label throw_termination_exception; - - // Call into the runtime system. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - false, - false); - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - true, - false); - - // Do full GC and retry runtime call one final time. - Failure* failure = Failure::InternalError(); - __ li(v0, Operand(reinterpret_cast<int32_t>(failure))); - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - true, - true); - - { FrameScope scope(masm, StackFrame::MANUAL); - __ PrepareCallCFunction(0, v0); - __ CallCFunction( - ExternalReference::out_of_memory_function(masm->isolate()), 0); - } + __ Throw(v0); __ bind(&throw_termination_exception); __ ThrowUncatchable(v0); - - __ bind(&throw_normal_exception); - __ Throw(v0); } @@ -1896,7 +1674,7 @@ __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0. - __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); + __ LoadRoot(v0, Heap::kExceptionRootIndex); __ b(&exit); // b exposes branch delay slot. __ nop(); // Branch delay slot nop. @@ -1991,9 +1769,9 @@ // in the safepoint slot for register t0. void InstanceofStub::Generate(MacroAssembler* masm) { // Call site inlining and patching implies arguments in registers. - ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); + DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck()); // ReturnTrueFalse is only implemented for inlined call sites. - ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); + DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); // Fixed register usage throughout the stub: const Register object = a0; // Object (lhs). @@ -2043,7 +1821,7 @@ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); } else { - ASSERT(HasArgsInRegisters()); + DCHECK(HasArgsInRegisters()); // Patch the (relocated) inlined map check. // The offset was stored in t0 safepoint slot. @@ -2073,7 +1851,7 @@ __ Branch(&loop); __ bind(&is_instance); - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); if (!HasCallSiteInlineCheck()) { __ mov(v0, zero_reg); __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); @@ -2085,7 +1863,7 @@ __ PatchRelocatedValue(inline_site, scratch, v0); if (!ReturnTrueFalseObject()) { - ASSERT_EQ(Smi::FromInt(0), 0); + DCHECK_EQ(Smi::FromInt(0), 0); __ mov(v0, zero_reg); } } @@ -2121,7 +1899,7 @@ __ Branch(&object_not_null, ne, scratch, - Operand(masm->isolate()->factory()->null_value())); + Operand(isolate()->factory()->null_value())); __ li(v0, Operand(Smi::FromInt(1))); __ DropAndRet(HasArgsInRegisters() ? 0 : 2); @@ -2161,31 +1939,12 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) { Label miss; - Register receiver; - if (kind() == Code::KEYED_LOAD_IC) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - __ Branch(&miss, ne, a0, - Operand(masm->isolate()->factory()->prototype_string())); - receiver = a1; - } else { - ASSERT(kind() == Code::LOAD_IC); - // ----------- S t a t e ------------- - // -- a2 : name - // -- ra : return address - // -- a0 : receiver - // -- sp[0] : receiver - // ----------------------------------- - receiver = a0; - } - - StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss); + Register receiver = LoadIC::ReceiverRegister(); + NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, + t0, &miss); __ bind(&miss); - StubCompiler::TailCallBuiltin( - masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); + PropertyAccessCompiler::TailCallBuiltin( + masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); } @@ -2270,7 +2029,7 @@ __ sw(a3, MemOperand(sp, 1 * kPointerSize)); __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } @@ -2325,7 +2084,7 @@ FixedArray::kHeaderSize + 2 * kPointerSize; // If there are no mapped parameters, we do not need the parameter_map. Label param_map_size; - ASSERT_EQ(0, Smi::FromInt(0)); + DCHECK_EQ(0, Smi::FromInt(0)); __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg)); __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0. __ sll(t5, a1, 1); @@ -2344,12 +2103,12 @@ __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT); // v0 = address of new object(s) (tagged) - // a2 = argument count (tagged) + // a2 = argument count (smi-tagged) // Get the arguments boilerplate from the current native context into t0. const int kNormalOffset = - Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX); + Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); const int kAliasedOffset = - Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); + Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX); __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset)); @@ -2364,22 +2123,23 @@ // v0 = address of new object (tagged) // a1 = mapped parameter count (tagged) - // a2 = argument count (tagged) - // t0 = address of boilerplate object (tagged) - // Copy the JS object part. - for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { - __ lw(a3, FieldMemOperand(t0, i)); - __ sw(a3, FieldMemOperand(v0, i)); - } + // a2 = argument count (smi-tagged) + // t0 = address of arguments map (tagged) + __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset)); + __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); + __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); + __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ lw(a3, MemOperand(sp, 2 * kPointerSize)); + __ AssertNotSmi(a3); const int kCalleeOffset = JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize; __ sw(a3, FieldMemOperand(v0, kCalleeOffset)); // Use the length (smi tagged) and set that as an in-object property too. + __ AssertSmi(a2); STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); const int kLengthOffset = JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize; @@ -2489,7 +2249,7 @@ // a2 = argument count (tagged) __ bind(&runtime); __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. - __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } @@ -2538,15 +2298,18 @@ // Get the arguments boilerplate from the current native context. __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset)); - __ lw(t0, MemOperand(t0, Context::SlotOffset( - Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX))); + __ lw(t0, MemOperand( + t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX))); - // Copy the JS object part. - __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize); + __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset)); + __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); + __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); + __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); // Get the length (smi tagged) and set that as an in-object property too. STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); __ lw(a1, MemOperand(sp, 0 * kPointerSize)); + __ AssertSmi(a1); __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize)); @@ -2587,7 +2350,7 @@ // Do the runtime call to allocate the arguments object. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); } @@ -2596,7 +2359,7 @@ // time or if regexp entry in generated code is turned off runtime switch or // at compilation. #ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); #else // V8_INTERPRETED_REGEXP // Stack frame on entry. @@ -2610,8 +2373,6 @@ const int kSubjectOffset = 2 * kPointerSize; const int kJSRegExpOffset = 3 * kPointerSize; - Isolate* isolate = masm->isolate(); - Label runtime; // Allocation of registers for this function. These are in callee save // registers and will be preserved by the call to the native RegExp code, as @@ -2626,9 +2387,9 @@ // Ensure that a RegExp stack is allocated. ExternalReference address_of_regexp_stack_memory_address = ExternalReference::address_of_regexp_stack_memory_address( - isolate); + isolate()); ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(isolate); + ExternalReference::address_of_regexp_stack_memory_size(isolate()); __ li(a0, Operand(address_of_regexp_stack_memory_size)); __ lw(a0, MemOperand(a0, 0)); __ Branch(&runtime, eq, a0, Operand(zero_reg)); @@ -2736,8 +2497,8 @@ STATIC_ASSERT(kSeqStringTag == 0); __ And(at, a0, Operand(kStringRepresentationMask)); // The underlying external string is never a short external string. - STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); - STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7). // (5) Sequential string. Load regexp code according to encoding. @@ -2775,7 +2536,7 @@ // subject: Subject string // regexp_data: RegExp data (FixedArray) // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(isolate->counters()->regexp_entry_native(), + __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, a0, a2); // Isolates: note we add an additional parameter here (isolate pointer). @@ -2799,7 +2560,7 @@ // Argument 9: Pass current isolate address. // CFunctionArgumentOperand handles MIPS stack argument slots. - __ li(a0, Operand(ExternalReference::isolate_address(isolate))); + __ li(a0, Operand(ExternalReference::isolate_address(isolate()))); __ sw(a0, MemOperand(sp, 5 * kPointerSize)); // Argument 8: Indicate that this is a direct call from JavaScript. @@ -2821,7 +2582,7 @@ // Argument 5: static offsets vector buffer. __ li(a0, Operand( - ExternalReference::address_of_static_offsets_vector(isolate))); + ExternalReference::address_of_static_offsets_vector(isolate()))); __ sw(a0, MemOperand(sp, 1 * kPointerSize)); // For arguments 4 and 3 get string length, calculate start of string data @@ -2854,7 +2615,7 @@ // Locate the code entry and call it. __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag)); - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(masm, t9); __ LeaveExitFrame(false, no_reg, true); @@ -2876,9 +2637,9 @@ // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ li(a1, Operand(isolate->factory()->the_hole_value())); + __ li(a1, Operand(isolate()->factory()->the_hole_value())); __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + isolate()))); __ lw(v0, MemOperand(a2, 0)); __ Branch(&runtime, eq, v0, Operand(a1)); @@ -2896,7 +2657,7 @@ __ bind(&failure); // For failure and exception return null. - __ li(v0, Operand(isolate->factory()->null_value())); + __ li(v0, Operand(isolate()->factory()->null_value())); __ DropAndRet(4); // Process the result from the native regexp code. @@ -2957,7 +2718,7 @@ // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(isolate); + ExternalReference::address_of_static_offsets_vector(isolate()); __ li(a2, Operand(address_of_static_offsets_vector)); // a1: number of capture registers @@ -2988,7 +2749,7 @@ // Do the runtime call to execute the regexp. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); // Deferred code for string handling. // (6) Not a long external string? If yes, go to (8). @@ -3044,9 +2805,9 @@ // a3 : slot in feedback vector (Smi) Label initialize, done, miss, megamorphic, not_array_function; - ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), + DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), masm->isolate()->heap()->megamorphic_symbol()); - ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), + DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), masm->isolate()->heap()->uninitialized_symbol()); // Load the cache state into t0. @@ -3110,7 +2871,7 @@ __ SmiTag(a0); __ MultiPush(kSavedRegs); - CreateAllocationSiteStub create_stub; + CreateAllocationSiteStub create_stub(masm->isolate()); __ CallStub(&create_stub); __ MultiPop(kSavedRegs); @@ -3135,14 +2896,66 @@ } -void CallFunctionStub::Generate(MacroAssembler* masm) { +static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { + __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset)); + + // Do not transform the receiver for strict mode functions. + int32_t strict_mode_function_mask = + 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize); + // Do not transform the receiver for native (Compilerhints already in a3). + int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize); + __ And(at, t0, Operand(strict_mode_function_mask | native_mask)); + __ Branch(cont, ne, at, Operand(zero_reg)); +} + + +static void EmitSlowCase(MacroAssembler* masm, + int argc, + Label* non_function) { + // Check for function proxy. + __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE)); + __ push(a1); // put proxy as additional argument + __ li(a0, Operand(argc + 1, RelocInfo::NONE32)); + __ mov(a2, zero_reg); + __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); + { + Handle<Code> adaptor = + masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + __ Jump(adaptor, RelocInfo::CODE_TARGET); + } + + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ bind(non_function); + __ sw(a1, MemOperand(sp, argc * kPointerSize)); + __ li(a0, Operand(argc)); // Set up the number of arguments. + __ mov(a2, zero_reg); + __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); +} + + +static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { + // Wrap the receiver and patch it back onto the stack. + { FrameScope frame_scope(masm, StackFrame::INTERNAL); + __ Push(a1, a3); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ pop(a1); + } + __ Branch(USE_DELAY_SLOT, cont); + __ sw(v0, MemOperand(sp, argc * kPointerSize)); +} + + +static void CallFunctionNoFeedback(MacroAssembler* masm, + int argc, bool needs_checks, + bool call_as_method) { // a1 : the function to call - // a2 : feedback vector - // a3 : (only if a2 is not the megamorphic symbol) slot in feedback - // vector (Smi) Label slow, non_function, wrap, cont; - if (NeedsChecks()) { + if (needs_checks) { // Check that the function is really a JavaScript function. // a1: pushed function (to be verified) __ JumpIfSmi(a1, &non_function); @@ -3150,36 +2963,21 @@ // Goto slow case if we do not have a function. __ GetObjectType(a1, t0, t0); __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE)); - - if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); - // Type information was updated. Because we may call Array, which - // expects either undefined or an AllocationSite in a2 we need - // to set a2 to undefined. - __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); - } } // Fast-case: Invoke the function now. // a1: pushed function - ParameterCount actual(argc_); + ParameterCount actual(argc); - if (CallAsMethod()) { - if (NeedsChecks()) { - // Do not transform the receiver for strict mode functions and natives. - __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset)); - int32_t strict_mode_function_mask = - 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize); - int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize); - __ And(at, t0, Operand(strict_mode_function_mask | native_mask)); - __ Branch(&cont, ne, at, Operand(zero_reg)); + if (call_as_method) { + if (needs_checks) { + EmitContinueIfStrictOrNative(masm, &cont); } // Compute the receiver in sloppy mode. - __ lw(a3, MemOperand(sp, argc_ * kPointerSize)); + __ lw(a3, MemOperand(sp, argc * kPointerSize)); - if (NeedsChecks()) { + if (needs_checks) { __ JumpIfSmi(a3, &wrap); __ GetObjectType(a3, t0, t0); __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE)); @@ -3189,60 +2987,28 @@ __ bind(&cont); } + __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper()); - if (NeedsChecks()) { + if (needs_checks) { // Slow-case: Non-function called. __ bind(&slow); - if (RecordCallTarget()) { - // If there is a call target cache, mark it megamorphic in the - // non-function case. MegamorphicSentinel is an immortal immovable - // object (megamorphic symbol) so no write barrier is needed. - ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), - masm->isolate()->heap()->megamorphic_symbol()); - __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize); - __ Addu(t1, a2, Operand(t1)); - __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); - __ sw(at, FieldMemOperand(t1, FixedArray::kHeaderSize)); - } - // Check for function proxy. - __ Branch(&non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE)); - __ push(a1); // Put proxy as additional argument. - __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32)); - __ li(a2, Operand(0, RelocInfo::NONE32)); - __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); - { - Handle<Code> adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); - __ Jump(adaptor, RelocInfo::CODE_TARGET); - } - - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ bind(&non_function); - __ sw(a1, MemOperand(sp, argc_ * kPointerSize)); - __ li(a0, Operand(argc_)); // Set up the number of arguments. - __ li(a2, Operand(0, RelocInfo::NONE32)); - __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION); - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + EmitSlowCase(masm, argc, &non_function); } - if (CallAsMethod()) { + if (call_as_method) { __ bind(&wrap); // Wrap the receiver and patch it back onto the stack. - { FrameScope frame_scope(masm, StackFrame::INTERNAL); - __ Push(a1, a3); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ pop(a1); - } - __ mov(a0, v0); - __ sw(a0, MemOperand(sp, argc_ * kPointerSize)); - __ jmp(&cont); + EmitWrapCase(masm, argc, &cont); } } +void CallFunctionStub::Generate(MacroAssembler* masm) { + CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod()); +} + + void CallConstructStub::Generate(MacroAssembler* masm) { // a0 : number of arguments // a1 : the function to call @@ -3303,7 +3069,153 @@ // Set expected number of arguments to zero (not changing r0). __ li(a2, Operand(0, RelocInfo::NONE32)); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + RelocInfo::CODE_TARGET); +} + + +static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { + __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ lw(vector, FieldMemOperand(vector, + JSFunction::kSharedFunctionInfoOffset)); + __ lw(vector, FieldMemOperand(vector, + SharedFunctionInfo::kFeedbackVectorOffset)); +} + + +void CallIC_ArrayStub::Generate(MacroAssembler* masm) { + // a1 - function + // a3 - slot id + Label miss; + + EmitLoadTypeFeedbackVector(masm, a2); + + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at); + __ Branch(&miss, ne, a1, Operand(at)); + + __ li(a0, Operand(arg_count())); + __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); + __ Addu(at, a2, Operand(at)); + __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize)); + + // Verify that t0 contains an AllocationSite + __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&miss, ne, t1, Operand(at)); + + __ mov(a2, t0); + ArrayConstructorStub stub(masm->isolate(), arg_count()); + __ TailCallStub(&stub); + + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Customization_Miss); + + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, + arg_count(), + true, + CallAsMethod()); + + // Unreachable. + __ stop("Unexpected code address"); +} + + +void CallICStub::Generate(MacroAssembler* masm) { + // r1 - function + // r3 - slot id (Smi) + Label extra_checks_or_miss, slow_start; + Label slow, non_function, wrap, cont; + Label have_js_function; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, a2); + + // The checks. First, does r1 match the recorded monomorphic target? + __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); + __ Addu(t0, a2, Operand(t0)); + __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize)); + __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0)); + + __ bind(&have_js_function); + if (state_.CallAsMethod()) { + EmitContinueIfStrictOrNative(masm, &cont); + // Compute the receiver in sloppy mode. + __ lw(a3, MemOperand(sp, argc * kPointerSize)); + + __ JumpIfSmi(a3, &wrap); + __ GetObjectType(a3, t0, t0); + __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE)); + + __ bind(&cont); + } + + __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper()); + + __ bind(&slow); + EmitSlowCase(masm, argc, &non_function); + + if (state_.CallAsMethod()) { + __ bind(&wrap); + EmitWrapCase(masm, argc, &cont); + } + + __ bind(&extra_checks_or_miss); + Label miss; + + __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); + __ Branch(&slow_start, eq, t0, Operand(at)); + __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex); + __ Branch(&miss, eq, t0, Operand(at)); + + if (!FLAG_trace_ic) { + // We are going megamorphic. If the feedback is a JSFunction, it is fine + // to handle it here. More complex cases are dealt with in the runtime. + __ AssertNotSmi(t0); + __ GetObjectType(t0, t1, t1); + __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE)); + __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); + __ Addu(t0, a2, Operand(t0)); + __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); + __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); + __ Branch(&slow_start); + } + + // We are here because tracing is on or we are going monomorphic. + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Miss); + + // the slow case + __ bind(&slow_start); + // Check that the function is really a JavaScript function. + // r1: pushed function (to be verified) + __ JumpIfSmi(a1, &non_function); + + // Goto slow case if we do not have a function. + __ GetObjectType(a1, t0, t0); + __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE)); + __ Branch(&have_js_function); +} + + +void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) { + // Get the receiver of the function from the stack; 1 ~ return address. + __ lw(t0, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize)); + + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Push the receiver and the function and feedback info. + __ Push(t0, a1, a2, a3); + + // Call the entry. + ExternalReference miss = ExternalReference(IC_Utility(id), + masm->isolate()); + __ CallExternalReference(miss, 4); + + // Move result to a1 and exit the internal frame. + __ mov(a1, v0); + } } @@ -3314,9 +3226,9 @@ Label got_char_code; Label sliced_string; - ASSERT(!t0.is(index_)); - ASSERT(!t0.is(result_)); - ASSERT(!t0.is(object_)); + DCHECK(!t0.is(index_)); + DCHECK(!t0.is(result_)); + DCHECK(!t0.is(object_)); // If the receiver is a smi trigger the non-string case. __ JumpIfSmi(object_, receiver_not_string_); @@ -3369,9 +3281,9 @@ if (index_flags_ == STRING_INDEX_IS_NUMBER) { __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); } else { - ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kHiddenNumberToSmi, 1); + __ CallRuntime(Runtime::kNumberToSmi, 1); } // Save the conversion result before the pop instructions below @@ -3395,7 +3307,7 @@ call_helper.BeforeCall(masm); __ sll(index_, index_, kSmiTagSize); __ Push(object_, index_); - __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2); + __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); __ Move(result_, v0); @@ -3412,12 +3324,12 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { // Fast case of Heap::LookupSingleCharacterStringFromCode. - ASSERT(!t0.is(result_)); - ASSERT(!t0.is(code_)); + DCHECK(!t0.is(result_)); + DCHECK(!t0.is(code_)); STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); + DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1)); __ And(t0, code_, Operand(kSmiTagMask | @@ -3460,113 +3372,42 @@ }; -void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - int flags) { - bool ascii = (flags & COPY_ASCII) != 0; - bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; - - if (dest_always_aligned && FLAG_debug_code) { - // Check that destination is actually word aligned if the flag says - // that it is. - __ And(scratch4, dest, Operand(kPointerAlignmentMask)); +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + String::Encoding encoding) { + if (FLAG_debug_code) { + // Check that destination is word aligned. + __ And(scratch, dest, Operand(kPointerAlignmentMask)); __ Check(eq, kDestinationOfCopyNotAligned, - scratch4, + scratch, Operand(zero_reg)); } - const int kReadAlignment = 4; - const int kReadAlignmentMask = kReadAlignment - 1; - // Ensure that reading an entire aligned word containing the last character - // of a string will not read outside the allocated area (because we pad up - // to kObjectAlignment). - STATIC_ASSERT(kObjectAlignment >= kReadAlignment); // Assumes word reads and writes are little endian. // Nothing to do for zero characters. Label done; - if (!ascii) { - __ addu(count, count, count); + if (encoding == String::TWO_BYTE_ENCODING) { + __ Addu(count, count, count); } - __ Branch(&done, eq, count, Operand(zero_reg)); - Label byte_loop; - // Must copy at least eight bytes, otherwise just do it one byte at a time. - __ Subu(scratch1, count, Operand(8)); - __ Addu(count, dest, Operand(count)); - Register limit = count; // Read until src equals this. - __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg)); - - if (!dest_always_aligned) { - // Align dest by byte copying. Copies between zero and three bytes. - __ And(scratch4, dest, Operand(kReadAlignmentMask)); - Label dest_aligned; - __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg)); - Label aligned_loop; - __ bind(&aligned_loop); - __ lbu(scratch1, MemOperand(src)); - __ addiu(src, src, 1); - __ sb(scratch1, MemOperand(dest)); - __ addiu(dest, dest, 1); - __ addiu(scratch4, scratch4, 1); - __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask)); - __ bind(&dest_aligned); - } - - Label simple_loop; - - __ And(scratch4, src, Operand(kReadAlignmentMask)); - __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg)); - - // Loop for src/dst that are not aligned the same way. - // This loop uses lwl and lwr instructions. These instructions - // depend on the endianness, and the implementation assumes little-endian. - { - Label loop; - __ bind(&loop); - __ lwr(scratch1, MemOperand(src)); - __ Addu(src, src, Operand(kReadAlignment)); - __ lwl(scratch1, MemOperand(src, -1)); - __ sw(scratch1, MemOperand(dest)); - __ Addu(dest, dest, Operand(kReadAlignment)); - __ Subu(scratch2, limit, dest); - __ Branch(&loop, ge, scratch2, Operand(kReadAlignment)); - } - - __ Branch(&byte_loop); - - // Simple loop. - // Copy words from src to dest, until less than four bytes left. - // Both src and dest are word aligned. - __ bind(&simple_loop); - { - Label loop; - __ bind(&loop); - __ lw(scratch1, MemOperand(src)); - __ Addu(src, src, Operand(kReadAlignment)); - __ sw(scratch1, MemOperand(dest)); - __ Addu(dest, dest, Operand(kReadAlignment)); - __ Subu(scratch2, limit, dest); - __ Branch(&loop, ge, scratch2, Operand(kReadAlignment)); - } + Register limit = count; // Read until dest equals this. + __ Addu(limit, dest, Operand(count)); + Label loop_entry, loop; // Copy bytes from src to dest until dest hits limit. - __ bind(&byte_loop); - // Test if dest has already reached the limit. - __ Branch(&done, ge, dest, Operand(limit)); - __ lbu(scratch1, MemOperand(src)); - __ addiu(src, src, 1); - __ sb(scratch1, MemOperand(dest)); - __ addiu(dest, dest, 1); - __ Branch(&byte_loop); + __ Branch(&loop_entry); + __ bind(&loop); + __ lbu(scratch, MemOperand(src)); + __ Addu(src, src, Operand(1)); + __ sb(scratch, MemOperand(dest)); + __ Addu(dest, dest, Operand(1)); + __ bind(&loop_entry); + __ Branch(&loop, lt, dest, Operand(limit)); __ bind(&done); } @@ -3768,7 +3609,7 @@ // Handle external string. // Rule out short external strings. - STATIC_CHECK(kShortExternalStringTag != 0); + STATIC_ASSERT(kShortExternalStringTag != 0); __ And(t0, a1, Operand(kShortExternalStringTag)); __ Branch(&runtime, ne, t0, Operand(zero_reg)); __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset)); @@ -3800,8 +3641,8 @@ // a2: result string length // t1: first character of substring to copy STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); - StringHelper::GenerateCopyCharactersLong( - masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED); + StringHelper::GenerateCopyCharacters( + masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING); __ jmp(&return_v0); // Allocate and copy the resulting two-byte string. @@ -3820,17 +3661,17 @@ // a2: result length. // t1: first character of substring to copy. STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); - StringHelper::GenerateCopyCharactersLong( - masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED); + StringHelper::GenerateCopyCharacters( + masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING); __ bind(&return_v0); - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1, a3, t0); __ DropAndRet(3); // Just jump to runtime to create the sub string. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1); + __ TailCallRuntime(Runtime::kSubString, 3, 1); __ bind(&single_char); // v0: original string @@ -3860,7 +3701,7 @@ __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); __ Branch(&check_zero_length, eq, length, Operand(scratch2)); __ bind(&strings_not_equal); - ASSERT(is_int16(NOT_EQUAL)); + DCHECK(is_int16(NOT_EQUAL)); __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); @@ -3869,7 +3710,7 @@ __ bind(&check_zero_length); STATIC_ASSERT(kSmiTag == 0); __ Branch(&compare_chars, ne, length, Operand(zero_reg)); - ASSERT(is_int16(EQUAL)); + DCHECK(is_int16(EQUAL)); __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(Smi::FromInt(EQUAL))); @@ -3912,7 +3753,7 @@ // Compare lengths - strings up to min-length are equal. __ bind(&compare_lengths); - ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); + DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); // Use length_delta as result if it's zero. __ mov(scratch2, length_delta); __ mov(scratch4, zero_reg); @@ -3968,7 +3809,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { Label runtime; - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); // Stack frame on entry. // sp[0]: right string @@ -3995,207 +3836,7 @@ GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1); __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); -} - - -void ArrayPushStub::Generate(MacroAssembler* masm) { - Register receiver = a0; - Register scratch = a1; - - int argc = arguments_count(); - - if (argc == 0) { - // Nothing to do, just return the length. - __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ DropAndRet(argc + 1); - return; - } - - Isolate* isolate = masm->isolate(); - - if (argc != 1) { - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - Label call_builtin, attempt_to_grow_elements, with_write_barrier; - - Register elements = t2; - Register end_elements = t1; - // Get the elements array of the object. - __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - // Check that the elements are in fast mode and writable. - __ CheckMap(elements, - scratch, - Heap::kFixedArrayMapRootIndex, - &call_builtin, - DONT_DO_SMI_CHECK); - } - - // Get the array's length into scratch and calculate new length. - __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Addu(scratch, scratch, Operand(Smi::FromInt(argc))); - - // Get the elements' length. - __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); - - const int kEndElementsOffset = - FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - // Check if we could survive without allocation. - __ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0)); - - // Check if value is a smi. - __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); - __ JumpIfNotSmi(t0, &with_write_barrier); - - // Store the value. - // We may need a register containing the address end_elements below, - // so write back the value in end_elements. - __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize); - __ Addu(end_elements, elements, end_elements); - __ Addu(end_elements, end_elements, kEndElementsOffset); - __ sw(t0, MemOperand(end_elements)); - } else { - // Check if we could survive without allocation. - __ Branch(&call_builtin, gt, scratch, Operand(t0)); - - __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); - __ StoreNumberToDoubleElements(t0, scratch, elements, a3, t1, a2, - &call_builtin, argc * kDoubleSize); - } - - // Save new length. - __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ mov(v0, scratch); - __ DropAndRet(argc + 1); - - if (IsFastDoubleElementsKind(elements_kind())) { - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ bind(&with_write_barrier); - - if (IsFastSmiElementsKind(elements_kind())) { - if (FLAG_trace_elements_transitions) __ jmp(&call_builtin); - - __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - __ Branch(&call_builtin, eq, t3, Operand(at)); - - ElementsKind target_kind = IsHoleyElementsKind(elements_kind()) - ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; - __ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); - __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset)); - __ lw(a3, ContextOperand(a3, Context::JS_ARRAY_MAPS_INDEX)); - const int header_size = FixedArrayBase::kHeaderSize; - // Verify that the object can be transitioned in place. - const int origin_offset = header_size + elements_kind() * kPointerSize; - __ lw(a2, FieldMemOperand(receiver, origin_offset)); - __ lw(at, FieldMemOperand(a3, HeapObject::kMapOffset)); - __ Branch(&call_builtin, ne, a2, Operand(at)); - - - const int target_offset = header_size + target_kind * kPointerSize; - __ lw(a3, FieldMemOperand(a3, target_offset)); - __ mov(a2, receiver); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - masm, DONT_TRACK_ALLOCATION_SITE, NULL); - } - - // Save new length. - __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - - // Store the value. - // We may need a register containing the address end_elements below, so write - // back the value in end_elements. - __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize); - __ Addu(end_elements, elements, end_elements); - __ Addu(end_elements, end_elements, kEndElementsOffset); - __ sw(t0, MemOperand(end_elements)); - - __ RecordWrite(elements, - end_elements, - t0, - kRAHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ mov(v0, scratch); - __ DropAndRet(argc + 1); - - __ bind(&attempt_to_grow_elements); - // scratch: array's length + 1. - - if (!FLAG_inline_new) { - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize)); - // Growing elements that are SMI-only requires special handling in case the - // new element is non-Smi. For now, delegate to the builtin. - if (IsFastSmiElementsKind(elements_kind())) { - __ JumpIfNotSmi(a2, &call_builtin); - } - - // We could be lucky and the elements array could be at the top of new-space. - // In this case we can just grow it in place by moving the allocation pointer - // up. - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate); - ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate); - - const int kAllocationDelta = 4; - ASSERT(kAllocationDelta >= argc); - // Load top and check if it is the end of elements. - __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize); - __ Addu(end_elements, elements, end_elements); - __ Addu(end_elements, end_elements, Operand(kEndElementsOffset)); - __ li(t0, Operand(new_space_allocation_top)); - __ lw(a3, MemOperand(t0)); - __ Branch(&call_builtin, ne, a3, Operand(end_elements)); - - __ li(t3, Operand(new_space_allocation_limit)); - __ lw(t3, MemOperand(t3)); - __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize)); - __ Branch(&call_builtin, hi, a3, Operand(t3)); - - // We fit and could grow elements. - // Update new_space_allocation_top. - __ sw(a3, MemOperand(t0)); - // Push the argument. - __ sw(a2, MemOperand(end_elements)); - // Fill the rest with holes. - __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); - for (int i = 1; i < kAllocationDelta; i++) { - __ sw(a3, MemOperand(end_elements, i * kPointerSize)); - } - - // Update elements' and array's sizes. - __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); - __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta))); - __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); - - // Elements are in new space, so write barrier is not required. - __ mov(v0, scratch); - __ DropAndRet(argc + 1); - - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } @@ -4205,12 +3846,11 @@ // -- a0 : right // -- ra : return address // ----------------------------------- - Isolate* isolate = masm->isolate(); // Load a2 with the allocation site. We stick an undefined dummy value here // and replace it with the real allocation site later when we instantiate this // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). - __ li(a2, handle(isolate->heap()->undefined_value())); + __ li(a2, handle(isolate()->heap()->undefined_value())); // Make sure that we actually patched the allocation site. if (FLAG_debug_code) { @@ -4223,13 +3863,13 @@ // Tail call into the stub that handles binary operations with allocation // sites. - BinaryOpWithAllocationSiteStub stub(state_); + BinaryOpWithAllocationSiteStub stub(isolate(), state_); __ TailCallStub(&stub); } void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMI); + DCHECK(state_ == CompareIC::SMI); Label miss; __ Or(a2, a1, a0); __ JumpIfNotSmi(a2, &miss); @@ -4252,7 +3892,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::NUMBER); + DCHECK(state_ == CompareIC::NUMBER); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; @@ -4305,7 +3945,7 @@ __ BranchF(&fpu_lt, NULL, lt, f0, f2); // Otherwise it's greater, so just fall thru, and return. - ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS)); + DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS)); __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(GREATER)); @@ -4319,9 +3959,9 @@ __ bind(&unordered); __ bind(&generic_stub); - ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC, CompareIC::GENERIC); - __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { @@ -4345,7 +3985,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::INTERNALIZED_STRING); + DCHECK(state_ == CompareIC::INTERNALIZED_STRING); Label miss; // Registers containing left and right operands respectively. @@ -4369,13 +4009,13 @@ // Make sure a0 is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(a0)); + DCHECK(right.is(a0)); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ mov(v0, right); // Internalized strings are compared by identity. __ Ret(ne, left, Operand(right)); - ASSERT(is_int16(EQUAL)); + DCHECK(is_int16(EQUAL)); __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(Smi::FromInt(EQUAL))); @@ -4385,8 +4025,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::UNIQUE_NAME); - ASSERT(GetCondition() == eq); + DCHECK(state_ == CompareIC::UNIQUE_NAME); + DCHECK(GetCondition() == eq); Label miss; // Registers containing left and right operands respectively. @@ -4416,7 +4056,7 @@ __ Branch(&done, ne, left, Operand(right)); // Make sure a0 is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(a0)); + DCHECK(right.is(a0)); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ li(v0, Operand(Smi::FromInt(EQUAL))); @@ -4429,7 +4069,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRING); + DCHECK(state_ == CompareIC::STRING); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -4472,7 +4112,7 @@ // because we already know they are not identical. We know they are both // strings. if (equality) { - ASSERT(GetCondition() == eq); + DCHECK(GetCondition() == eq); STATIC_ASSERT(kInternalizedTag == 0); __ Or(tmp3, tmp1, Operand(tmp2)); __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask)); @@ -4480,7 +4120,7 @@ __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg)); // Make sure a0 is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(a0)); + DCHECK(right.is(a0)); __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); // In the delay slot. __ bind(&is_symbol); @@ -4506,7 +4146,7 @@ if (equality) { __ TailCallRuntime(Runtime::kStringEquals, 2, 1); } else { - __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } __ bind(&miss); @@ -4515,7 +4155,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECT); + DCHECK(state_ == CompareIC::OBJECT); Label miss; __ And(a2, a1, Operand(a0)); __ JumpIfSmi(a2, &miss); @@ -4525,7 +4165,7 @@ __ GetObjectType(a1, a2, a2); __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); - ASSERT(GetCondition() == eq); + DCHECK(GetCondition() == eq); __ Ret(USE_DELAY_SLOT); __ subu(v0, a0, a1); @@ -4555,7 +4195,7 @@ { // Call the runtime system in a fresh internal frame. ExternalReference miss = - ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); + ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate()); FrameScope scope(masm, StackFrame::INTERNAL); __ Push(a1, a0); __ Push(ra, a1, a0); @@ -4600,7 +4240,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) { intptr_t loc = - reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); + reinterpret_cast<intptr_t>(GetCode().location()); __ Move(t9, target); __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); __ Call(ra); @@ -4614,7 +4254,7 @@ Register properties, Handle<Name> name, Register scratch0) { - ASSERT(name->IsUniqueName()); + DCHECK(name->IsUniqueName()); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the // undefined value), it guarantees the hash table doesn't contain the @@ -4631,19 +4271,19 @@ Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ sll(at, index, 1); __ Addu(index, index, at); Register entity_name = scratch0; // Having undefined at this place means the name is not contained. - ASSERT_EQ(kSmiTagSize, 1); + DCHECK_EQ(kSmiTagSize, 1); Register tmp = properties; __ sll(scratch0, index, 1); __ Addu(tmp, properties, scratch0); __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); - ASSERT(!tmp.is(entity_name)); + DCHECK(!tmp.is(entity_name)); __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); __ Branch(done, eq, entity_name, Operand(tmp)); @@ -4675,7 +4315,7 @@ __ MultiPush(spill_mask); __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ li(a1, Operand(Handle<Name>(name))); - NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); __ CallStub(&stub); __ mov(at, v0); __ MultiPop(spill_mask); @@ -4696,10 +4336,10 @@ Register name, Register scratch1, Register scratch2) { - ASSERT(!elements.is(scratch1)); - ASSERT(!elements.is(scratch2)); - ASSERT(!name.is(scratch1)); - ASSERT(!name.is(scratch2)); + DCHECK(!elements.is(scratch1)); + DCHECK(!elements.is(scratch2)); + DCHECK(!name.is(scratch1)); + DCHECK(!name.is(scratch2)); __ AssertName(name); @@ -4718,7 +4358,7 @@ // Add the probe offset (i + i * i) left shifted to avoid right shifting // the hash in a separate instruction. The value hash + i + i * i is right // shifted in the following and instruction. - ASSERT(NameDictionary::GetProbeOffset(i) < + DCHECK(NameDictionary::GetProbeOffset(i) < 1 << (32 - Name::kHashFieldOffset)); __ Addu(scratch2, scratch2, Operand( NameDictionary::GetProbeOffset(i) << Name::kHashShift)); @@ -4727,7 +4367,7 @@ __ And(scratch2, scratch1, scratch2); // Scale the index by multiplying by the element size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); // scratch2 = scratch2 * 3. __ sll(at, scratch2, 1); @@ -4747,14 +4387,14 @@ __ MultiPush(spill_mask); if (name.is(a0)) { - ASSERT(!elements.is(a1)); + DCHECK(!elements.is(a1)); __ Move(a1, name); __ Move(a0, elements); } else { __ Move(a0, elements); __ Move(a1, name); } - NameDictionaryLookupStub stub(POSITIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); __ CallStub(&stub); __ mov(scratch2, a2); __ mov(at, v0); @@ -4803,7 +4443,7 @@ // Add the probe offset (i + i * i) left shifted to avoid right shifting // the hash in a separate instruction. The value hash + i + i * i is right // shifted in the following and instruction. - ASSERT(NameDictionary::GetProbeOffset(i) < + DCHECK(NameDictionary::GetProbeOffset(i) < 1 << (32 - Name::kHashFieldOffset)); __ Addu(index, hash, Operand( NameDictionary::GetProbeOffset(i) << Name::kHashShift)); @@ -4814,14 +4454,14 @@ __ And(index, mask, index); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); // index *= 3. __ mov(at, index); __ sll(index, index, 1); __ Addu(index, index, at); - ASSERT_EQ(kSmiTagSize, 1); + DCHECK_EQ(kSmiTagSize, 1); __ sll(index, index, 2); __ Addu(index, index, dictionary); __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset)); @@ -4862,16 +4502,11 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { - StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode(isolate); + StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs); + stub1.GetCode(); // Hydrogen code stubs need stub2 at snapshot time. - StoreBufferOverflowStub stub2(kSaveFPRegs); - stub2.GetCode(isolate); -} - - -bool CodeStub::CanUseFPRegisters() { - return true; // FPU is a base requirement for V8. + StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); + stub2.GetCode(); } @@ -4963,17 +4598,16 @@ __ PrepareCallCFunction(argument_count, regs_.scratch0()); Register address = a0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); - ASSERT(!address.is(regs_.object())); - ASSERT(!address.is(a0)); + DCHECK(!address.is(regs_.object())); + DCHECK(!address.is(a0)); __ Move(address, regs_.address()); __ Move(a0, regs_.object()); __ Move(a1, address); - __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction( - ExternalReference::incremental_marking_record_write_function( - masm->isolate()), + ExternalReference::incremental_marking_record_write_function(isolate()), argument_count); regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); } @@ -5133,8 +4767,8 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { - CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); - __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + CEntryStub ces(isolate(), 1, kSaveFPRegs); + __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); int parameter_count_offset = StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; __ lw(a1, MemOperand(fp, parameter_count_offset)); @@ -5150,7 +4784,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { - ProfileEntryHookStub stub; + ProfileEntryHookStub stub(masm->isolate()); __ push(ra); __ CallStub(&stub); __ pop(ra); @@ -5186,24 +4820,24 @@ int frame_alignment = masm->ActivationFrameAlignment(); if (frame_alignment > kPointerSize) { __ mov(s5, sp); - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); __ And(sp, sp, Operand(-frame_alignment)); } __ Subu(sp, sp, kCArgsSlotsSize); #if defined(V8_HOST_ARCH_MIPS) int32_t entry_hook = - reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook()); + reinterpret_cast<int32_t>(isolate()->function_entry_hook()); __ li(t9, Operand(entry_hook)); #else // Under the simulator we need to indirect the entry hook through a // trampoline function at a known address. // It additionally takes an isolate as a third parameter. - __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); __ li(t9, Operand(ExternalReference(&dispatcher, ExternalReference::BUILTIN_CALL, - masm->isolate()))); + isolate()))); #endif // Call C function through t9 to conform ABI for PIC. __ Call(t9); @@ -5225,14 +4859,14 @@ static void CreateArrayDispatch(MacroAssembler* masm, AllocationSiteOverrideMode mode) { if (mode == DISABLE_ALLOCATION_SITES) { - T stub(GetInitialFastElementsKind(), mode); + T stub(masm->isolate(), GetInitialFastElementsKind(), mode); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { int last_index = GetSequenceIndexFromFastElementsKind( TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= last_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); - T stub(kind); + T stub(masm->isolate(), kind); __ TailCallStub(&stub, eq, a3, Operand(kind)); } @@ -5253,12 +4887,12 @@ // sp[0] - last argument Label normal_sequence; if (mode == DONT_OVERRIDE) { - ASSERT(FAST_SMI_ELEMENTS == 0); - ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); - ASSERT(FAST_ELEMENTS == 2); - ASSERT(FAST_HOLEY_ELEMENTS == 3); - ASSERT(FAST_DOUBLE_ELEMENTS == 4); - ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + DCHECK(FAST_SMI_ELEMENTS == 0); + DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1); + DCHECK(FAST_ELEMENTS == 2); + DCHECK(FAST_HOLEY_ELEMENTS == 3); + DCHECK(FAST_DOUBLE_ELEMENTS == 4); + DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5); // is the low bit set? If so, we are holey and that is good. __ And(at, a3, Operand(1)); @@ -5273,12 +4907,14 @@ ElementsKind initial = GetInitialFastElementsKind(); ElementsKind holey_initial = GetHoleyElementsKind(initial); - ArraySingleArgumentConstructorStub stub_holey(holey_initial, + ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), + holey_initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub_holey); __ bind(&normal_sequence); - ArraySingleArgumentConstructorStub stub(initial, + ArraySingleArgumentConstructorStub stub(masm->isolate(), + initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { @@ -5306,7 +4942,7 @@ TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= last_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); - ArraySingleArgumentConstructorStub stub(kind); + ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); __ TailCallStub(&stub, eq, a3, Operand(kind)); } @@ -5324,11 +4960,11 @@ TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= to_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); - T stub(kind); - stub.GetCode(isolate); + T stub(isolate, kind); + stub.GetCode(); if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { - T stub1(kind, DISABLE_ALLOCATION_SITES); - stub1.GetCode(isolate); + T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); + stub1.GetCode(); } } } @@ -5349,12 +4985,12 @@ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; for (int i = 0; i < 2; i++) { // For internal arrays we only need a few things. - InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); - stubh1.GetCode(isolate); - InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); - stubh2.GetCode(isolate); - InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); - stubh3.GetCode(isolate); + InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); + stubh1.GetCode(); + InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); + stubh2.GetCode(); + InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); + stubh3.GetCode(); } } @@ -5432,10 +5068,10 @@ void InternalArrayConstructorStub::GenerateCase( MacroAssembler* masm, ElementsKind kind) { - InternalArrayNoArgumentConstructorStub stub0(kind); + InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); __ TailCallStub(&stub0, lo, a0, Operand(1)); - InternalArrayNArgumentsConstructorStub stubN(kind); + InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); __ TailCallStub(&stubN, hi, a0, Operand(1)); if (IsFastPackedElementsKind(kind)) { @@ -5444,11 +5080,11 @@ __ lw(at, MemOperand(sp, 0)); InternalArraySingleArgumentConstructorStub - stub1_holey(GetHoleyElementsKind(kind)); + stub1_holey(isolate(), GetHoleyElementsKind(kind)); __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg)); } - InternalArraySingleArgumentConstructorStub stub1(kind); + InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); __ TailCallStub(&stub1); } @@ -5483,7 +5119,7 @@ // but the following bit field extraction takes care of that anyway. __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset)); // Retrieve elements_kind from bit field 2. - __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount); + __ DecodeField<Map::ElementsKindBits>(a3); if (FLAG_debug_code) { Label done; @@ -5538,8 +5174,6 @@ STATIC_ASSERT(FCA::kHolderIndex == 0); STATIC_ASSERT(FCA::kArgsLength == 7); - Isolate* isolate = masm->isolate(); - // Save context, callee and call data. __ Push(context, callee, call_data); // Load context from callee. @@ -5552,7 +5186,7 @@ // Push return value and default return value. __ Push(scratch, scratch); __ li(scratch, - Operand(ExternalReference::isolate_address(isolate))); + Operand(ExternalReference::isolate_address(isolate()))); // Push isolate and holder. __ Push(scratch, holder); @@ -5566,7 +5200,7 @@ FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); - ASSERT(!api_function_address.is(a0) && !scratch.is(a0)); + DCHECK(!api_function_address.is(a0) && !scratch.is(a0)); // a0 = FunctionCallbackInfo& // Arguments is after the return address. __ Addu(a0, sp, Operand(1 * kPointerSize)); @@ -5582,11 +5216,8 @@ __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize)); const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; - Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); - ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL; - ApiFunction thunk_fun(thunk_address); - ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, - masm->isolate()); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(isolate()); AllowExternalCallThatCantCauseGC scope(masm); MemOperand context_restore_operand( @@ -5632,12 +5263,8 @@ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; - Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); - ExternalReference::Type thunk_type = - ExternalReference::PROFILING_GETTER_CALL; - ApiFunction thunk_fun(thunk_address); - ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, - masm->isolate()); + ExternalReference thunk_ref = + ExternalReference::invoke_accessor_getter_callback(isolate()); __ CallApiFunctionAndReturn(api_function_address, thunk_ref, kStackUnwindSpace, diff -Nru nodejs-0.11.13/deps/v8/src/mips/code-stubs-mips.h nodejs-0.11.15/deps/v8/src/mips/code-stubs-mips.h --- nodejs-0.11.13/deps/v8/src/mips/code-stubs-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/code-stubs-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MIPS_CODE_STUBS_ARM_H_ #define V8_MIPS_CODE_STUBS_ARM_H_ -#include "ic-inl.h" +#include "src/ic-inl.h" namespace v8 { @@ -40,8 +17,8 @@ class StoreBufferOverflowStub: public PlatformCodeStub { public: - explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) {} + StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp) + : PlatformCodeStub(isolate), save_doubles_(save_fp) {} void Generate(MacroAssembler* masm); @@ -51,8 +28,8 @@ private: SaveFPRegsMode save_doubles_; - Major MajorKey() { return StoreBufferOverflow; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } + Major MajorKey() const { return StoreBufferOverflow; } + int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } }; @@ -62,16 +39,12 @@ // is allowed to spend extra time setting up conditions to make copying // faster. Copying of overlapping regions is not supported. // Dest register ends at the position after the last character written. - static void GenerateCopyCharactersLong(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - int flags); + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + String::Encoding encoding); // Generate string hash. @@ -93,46 +66,45 @@ class SubStringStub: public PlatformCodeStub { public: - SubStringStub() {} + explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {} private: - Major MajorKey() { return SubString; } - int MinorKey() { return 0; } + Major MajorKey() const { return SubString; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); }; + class StoreRegistersStateStub: public PlatformCodeStub { public: - explicit StoreRegistersStateStub(SaveFPRegsMode with_fp) - : save_doubles_(with_fp) {} + explicit StoreRegistersStateStub(Isolate* isolate) + : PlatformCodeStub(isolate) {} static void GenerateAheadOfTime(Isolate* isolate); private: - Major MajorKey() { return StoreRegistersState; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } - SaveFPRegsMode save_doubles_; + Major MajorKey() const { return StoreRegistersState; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); }; class RestoreRegistersStateStub: public PlatformCodeStub { public: - explicit RestoreRegistersStateStub(SaveFPRegsMode with_fp) - : save_doubles_(with_fp) {} + explicit RestoreRegistersStateStub(Isolate* isolate) + : PlatformCodeStub(isolate) {} static void GenerateAheadOfTime(Isolate* isolate); private: - Major MajorKey() { return RestoreRegistersState; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } - SaveFPRegsMode save_doubles_; + Major MajorKey() const { return RestoreRegistersState; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); }; class StringCompareStub: public PlatformCodeStub { public: - StringCompareStub() { } + explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { } // Compare two flat ASCII strings and returns result in v0. static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, @@ -153,8 +125,8 @@ Register scratch3); private: - virtual Major MajorKey() { return StringCompare; } - virtual int MinorKey() { return 0; } + virtual Major MajorKey() const { return StringCompare; } + virtual int MinorKey() const { return 0; } virtual void Generate(MacroAssembler* masm); static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, @@ -173,18 +145,20 @@ // so you don't have to set up the frame. class WriteInt32ToHeapNumberStub : public PlatformCodeStub { public: - WriteInt32ToHeapNumberStub(Register the_int, + WriteInt32ToHeapNumberStub(Isolate* isolate, + Register the_int, Register the_heap_number, Register scratch, Register scratch2) - : the_int_(the_int), + : PlatformCodeStub(isolate), + the_int_(the_int), the_heap_number_(the_heap_number), scratch_(scratch), sign_(scratch2) { - ASSERT(IntRegisterBits::is_valid(the_int_.code())); - ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code())); - ASSERT(ScratchRegisterBits::is_valid(scratch_.code())); - ASSERT(SignRegisterBits::is_valid(sign_.code())); + DCHECK(IntRegisterBits::is_valid(the_int_.code())); + DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number_.code())); + DCHECK(ScratchRegisterBits::is_valid(scratch_.code())); + DCHECK(SignRegisterBits::is_valid(sign_.code())); } static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); @@ -201,8 +175,8 @@ class ScratchRegisterBits: public BitField<int, 8, 4> {}; class SignRegisterBits: public BitField<int, 12, 4> {}; - Major MajorKey() { return WriteInt32ToHeapNumber; } - int MinorKey() { + Major MajorKey() const { return WriteInt32ToHeapNumber; } + int MinorKey() const { // Encode the parameters in a unique 16 bit value. return IntRegisterBits::encode(the_int_.code()) | HeapNumberRegisterBits::encode(the_heap_number_.code()) @@ -216,12 +190,14 @@ class RecordWriteStub: public PlatformCodeStub { public: - RecordWriteStub(Register object, + RecordWriteStub(Isolate* isolate, + Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) - : object_(object), + : PlatformCodeStub(isolate), + object_(object), value_(value), address_(address), remembered_set_action_(remembered_set_action), @@ -243,14 +219,14 @@ const unsigned offset = masm->instr_at(pos) & kImm16Mask; masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) | (zero_reg.code() << kRtShift) | (offset & kImm16Mask)); - ASSERT(Assembler::IsBne(masm->instr_at(pos))); + DCHECK(Assembler::IsBne(masm->instr_at(pos))); } static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { const unsigned offset = masm->instr_at(pos) & kImm16Mask; masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) | (zero_reg.code() << kRtShift) | (offset & kImm16Mask)); - ASSERT(Assembler::IsBeq(masm->instr_at(pos))); + DCHECK(Assembler::IsBeq(masm->instr_at(pos))); } static Mode GetMode(Code* stub) { @@ -262,13 +238,13 @@ return INCREMENTAL; } - ASSERT(Assembler::IsBne(first_instruction)); + DCHECK(Assembler::IsBne(first_instruction)); if (Assembler::IsBeq(second_instruction)) { return INCREMENTAL_COMPACTION; } - ASSERT(Assembler::IsBne(second_instruction)); + DCHECK(Assembler::IsBne(second_instruction)); return STORE_BUFFER_ONLY; } @@ -279,22 +255,23 @@ stub->instruction_size()); switch (mode) { case STORE_BUFFER_ONLY: - ASSERT(GetMode(stub) == INCREMENTAL || + DCHECK(GetMode(stub) == INCREMENTAL || GetMode(stub) == INCREMENTAL_COMPACTION); PatchBranchIntoNop(&masm, 0); PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize); break; case INCREMENTAL: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); PatchNopIntoBranch(&masm, 0); break; case INCREMENTAL_COMPACTION: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize); break; } - ASSERT(GetMode(stub) == mode); - CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize); + DCHECK(GetMode(stub) == mode); + CpuFeatures::FlushICache(stub->instruction_start(), + 4 * Assembler::kInstrSize); } private: @@ -309,12 +286,12 @@ : object_(object), address_(address), scratch0_(scratch0) { - ASSERT(!AreAliased(scratch0, object, address, no_reg)); + DCHECK(!AreAliased(scratch0, object, address, no_reg)); scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_); } void Save(MacroAssembler* masm) { - ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); + DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_)); // We don't have to save scratch0_ because it was given to us as // a scratch register. masm->push(scratch1_); @@ -369,9 +346,9 @@ Mode mode); void InformIncrementalMarker(MacroAssembler* masm); - Major MajorKey() { return RecordWrite; } + Major MajorKey() const { return RecordWrite; } - int MinorKey() { + int MinorKey() const { return ObjectBits::encode(object_.code()) | ValueBits::encode(value_.code()) | AddressBits::encode(address_.code()) | @@ -406,13 +383,13 @@ // moved by GC class DirectCEntryStub: public PlatformCodeStub { public: - DirectCEntryStub() {} + explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {} void Generate(MacroAssembler* masm); void GenerateCall(MacroAssembler* masm, Register target); private: - Major MajorKey() { return DirectCEntry; } - int MinorKey() { return 0; } + Major MajorKey() const { return DirectCEntry; } + int MinorKey() const { return 0; } bool NeedsImmovableCode() { return true; } }; @@ -422,7 +399,8 @@ public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { } + NameDictionaryLookupStub(Isolate* isolate, LookupMode mode) + : PlatformCodeStub(isolate), mode_(mode) { } void Generate(MacroAssembler* masm); @@ -456,11 +434,9 @@ NameDictionary::kHeaderSize + NameDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return NameDictionaryLookup; } + Major MajorKey() const { return NameDictionaryLookup; } - int MinorKey() { - return LookupModeBits::encode(mode_); - } + int MinorKey() const { return LookupModeBits::encode(mode_); } class LookupModeBits: public BitField<LookupMode, 0, 1> {}; diff -Nru nodejs-0.11.13/deps/v8/src/mips/constants-mips.cc nodejs-0.11.15/deps/v8/src/mips/constants-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/constants-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/constants-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "constants-mips.h" +#include "src/mips/constants-mips.h" namespace v8 { namespace internal { @@ -174,7 +151,7 @@ return true; default: return false; - }; + } break; case SPECIAL: switch (FunctionFieldRaw()) { @@ -183,11 +160,11 @@ return true; default: return false; - }; + } break; default: return false; - }; + } } @@ -203,17 +180,17 @@ return true; default: return false; - }; + } case SPECIAL: switch (FunctionFieldRaw()) { case JALR: return true; default: return false; - }; + } default: return false; - }; + } } @@ -232,7 +209,7 @@ return true; default: return false; - }; + } } } @@ -278,7 +255,7 @@ return kRegisterType; default: return kUnsupported; - }; + } break; case SPECIAL2: switch (FunctionFieldRaw()) { @@ -287,7 +264,7 @@ return kRegisterType; default: return kUnsupported; - }; + } break; case SPECIAL3: switch (FunctionFieldRaw()) { @@ -296,7 +273,7 @@ return kRegisterType; default: return kUnsupported; - }; + } break; case COP1: // Coprocessor instructions. switch (RsFieldRawNoAssert()) { @@ -304,7 +281,7 @@ return kImmediateType; default: return kRegisterType; - }; + } break; case COP1X: return kRegisterType; @@ -349,7 +326,7 @@ return kJumpType; default: return kUnsupported; - }; + } return kUnsupported; } diff -Nru nodejs-0.11.13/deps/v8/src/mips/constants-mips.h nodejs-0.11.15/deps/v8/src/mips/constants-mips.h --- nodejs-0.11.13/deps/v8/src/mips/constants-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/constants-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MIPS_CONSTANTS_H_ #define V8_MIPS_CONSTANTS_H_ @@ -55,6 +32,18 @@ static const ArchVariants kArchVariant = kMips32r1; #endif +enum Endianness { + kLittle, + kBig +}; + +#if defined(V8_TARGET_LITTLE_ENDIAN) + static const Endianness kArchEndian = kLittle; +#elif defined(V8_TARGET_BIG_ENDIAN) + static const Endianness kArchEndian = kBig; +#else +#error Unknown endianness +#endif #if(defined(__mips_hard_float) && __mips_hard_float != 0) // Use floating-point coprocessor instructions. This flag is raised when @@ -69,6 +58,15 @@ const bool IsMipsSoftFloatABI = true; #endif +#if defined(V8_TARGET_LITTLE_ENDIAN) +const uint32_t kHoleNanUpper32Offset = 4; +const uint32_t kHoleNanLower32Offset = 0; +#elif defined(V8_TARGET_BIG_ENDIAN) +const uint32_t kHoleNanUpper32Offset = 0; +const uint32_t kHoleNanLower32Offset = 4; +#else +#error Unknown endianness +#endif // Defines constants and accessor classes to assemble, disassemble and // simulate MIPS32 instructions. @@ -501,12 +499,13 @@ // no_condition value (-2). As long as tests for no_condition check // for condition < 0, this will work as expected. inline Condition NegateCondition(Condition cc) { - ASSERT(cc != cc_always); + DCHECK(cc != cc_always); return static_cast<Condition>(cc ^ 1); } -inline Condition ReverseCondition(Condition cc) { +// Commute a condition such that {a cond b == b cond' a}. +inline Condition CommuteCondition(Condition cc) { switch (cc) { case Uless: return Ugreater; @@ -526,7 +525,7 @@ return greater_equal; default: return cc; - }; + } } @@ -661,29 +660,29 @@ } inline int RsValue() const { - ASSERT(InstructionType() == kRegisterType || + DCHECK(InstructionType() == kRegisterType || InstructionType() == kImmediateType); return Bits(kRsShift + kRsBits - 1, kRsShift); } inline int RtValue() const { - ASSERT(InstructionType() == kRegisterType || + DCHECK(InstructionType() == kRegisterType || InstructionType() == kImmediateType); return Bits(kRtShift + kRtBits - 1, kRtShift); } inline int RdValue() const { - ASSERT(InstructionType() == kRegisterType); + DCHECK(InstructionType() == kRegisterType); return Bits(kRdShift + kRdBits - 1, kRdShift); } inline int SaValue() const { - ASSERT(InstructionType() == kRegisterType); + DCHECK(InstructionType() == kRegisterType); return Bits(kSaShift + kSaBits - 1, kSaShift); } inline int FunctionValue() const { - ASSERT(InstructionType() == kRegisterType || + DCHECK(InstructionType() == kRegisterType || InstructionType() == kImmediateType); return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift); } @@ -725,7 +724,7 @@ } inline int RsFieldRaw() const { - ASSERT(InstructionType() == kRegisterType || + DCHECK(InstructionType() == kRegisterType || InstructionType() == kImmediateType); return InstructionBits() & kRsFieldMask; } @@ -736,18 +735,18 @@ } inline int RtFieldRaw() const { - ASSERT(InstructionType() == kRegisterType || + DCHECK(InstructionType() == kRegisterType || InstructionType() == kImmediateType); return InstructionBits() & kRtFieldMask; } inline int RdFieldRaw() const { - ASSERT(InstructionType() == kRegisterType); + DCHECK(InstructionType() == kRegisterType); return InstructionBits() & kRdFieldMask; } inline int SaFieldRaw() const { - ASSERT(InstructionType() == kRegisterType); + DCHECK(InstructionType() == kRegisterType); return InstructionBits() & kSaFieldMask; } @@ -772,12 +771,12 @@ } inline int32_t Imm16Value() const { - ASSERT(InstructionType() == kImmediateType); + DCHECK(InstructionType() == kImmediateType); return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift); } inline int32_t Imm26Value() const { - ASSERT(InstructionType() == kJumpType); + DCHECK(InstructionType() == kJumpType); return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift); } diff -Nru nodejs-0.11.13/deps/v8/src/mips/cpu-mips.cc nodejs-0.11.15/deps/v8/src/mips/cpu-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/cpu-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/cpu-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // CPU specific code for arm independent of OS goes here. @@ -34,30 +11,20 @@ #include <asm/cachectl.h> #endif // #ifdef __mips -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "cpu.h" -#include "macro-assembler.h" +#include "src/assembler.h" +#include "src/macro-assembler.h" -#include "simulator.h" // For cache flushing. +#include "src/simulator.h" // For cache flushing. namespace v8 { namespace internal { -void CPU::SetUp() { - CpuFeatures::Probe(); -} - - -bool CPU::SupportsCrankshaft() { - return CpuFeatures::IsSupported(FPU); -} - - -void CPU::FlushICache(void* start, size_t size) { +void CpuFeatures::FlushICache(void* start, size_t size) { // Nothing to do, flushing no instructions. if (size == 0) { return; diff -Nru nodejs-0.11.13/deps/v8/src/mips/debug-mips.cc nodejs-0.11.15/deps/v8/src/mips/debug-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/debug-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/debug-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "codegen.h" -#include "debug.h" +#include "src/codegen.h" +#include "src/debug.h" namespace v8 { namespace internal { -#ifdef ENABLE_DEBUGGER_SUPPORT - bool BreakLocationIterator::IsDebugBreakAtReturn() { return Debug::IsDebugBreakAtReturn(rinfo()); } @@ -55,12 +30,11 @@ // nop (in branch delay slot) // Make sure this constant matches the number if instrucntions we emit. - ASSERT(Assembler::kJSReturnSequenceInstructions == 7); + DCHECK(Assembler::kJSReturnSequenceInstructions == 7); CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions); // li and Call pseudo-instructions emit two instructions each. - patcher.masm()->li(v8::internal::t9, - Operand(reinterpret_cast<int32_t>( - debug_info_->GetIsolate()->debug()->debug_break_return()->entry()))); + patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>( + debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry()))); patcher.masm()->Call(v8::internal::t9); patcher.masm()->nop(); patcher.masm()->nop(); @@ -81,20 +55,20 @@ // A debug break in the exit code is identified by the JS frame exit code // having been patched with li/call psuedo-instrunction (liu/ori/jalr). bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsJSReturn(rinfo->rmode())); + DCHECK(RelocInfo::IsJSReturn(rinfo->rmode())); return rinfo->IsPatchedReturnSequence(); } bool BreakLocationIterator::IsDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); // Check whether the debug break slot instructions have been patched. return rinfo()->IsPatchedDebugBreakSlotSequence(); } void BreakLocationIterator::SetDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); // Patch the code changing the debug break slot code from: // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1) // nop(DEBUG_BREAK_NOP) @@ -105,19 +79,17 @@ // call t9 (jalr t9 / nop instruction pair) CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions); patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>( - debug_info_->GetIsolate()->debug()->debug_break_slot()->entry()))); + debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry()))); patcher.masm()->Call(v8::internal::t9); } void BreakLocationIterator::ClearDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotInstructions); } -const bool Debug::FramePaddingLayout::kIsSupported = false; - #define __ ACCESS_MASM(masm) @@ -129,12 +101,22 @@ { FrameScope scope(masm, StackFrame::INTERNAL); + // Load padding words on stack. + __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue))); + __ Subu(sp, sp, + Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize)); + for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) { + __ sw(at, MemOperand(sp, kPointerSize * i)); + } + __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize))); + __ push(at); + // Store the registers containing live values on the expression stack to // make sure that these are correctly updated during GC. Non object values // are stored as a smi causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); + DCHECK((object_regs & ~kJSCallerSaved) == 0); + DCHECK((non_object_regs & ~kJSCallerSaved) == 0); + DCHECK((object_regs & non_object_regs) == 0); if ((object_regs | non_object_regs) != 0) { for (int i = 0; i < kNumJSCallerSaved; i++) { int r = JSCallerSavedCode(i); @@ -156,7 +138,7 @@ __ PrepareCEntryArgs(0); // No arguments. __ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate())); - CEntryStub ceb(1); + CEntryStub ceb(masm->isolate(), 1); __ CallStub(&ceb); // Restore the register values from the expression stack. @@ -175,67 +157,67 @@ } } + // Don't bother removing padding bytes pushed on the stack + // as the frame is going to be restored right away. + // Leave the internal frame. } // Now that the break point has been handled, resume normal execution by // jumping to the target address intended by the caller and that was // overwritten by the address of DebugBreakXXX. - __ li(t9, Operand( - ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate()))); + ExternalReference after_break_target = + ExternalReference::debug_after_break_target_address(masm->isolate()); + __ li(t9, Operand(after_break_target)); __ lw(t9, MemOperand(t9)); __ Jump(t9); } -void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { - // Calling convention for IC load (from ic-mips.cc). +void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) { + // Register state for CallICStub // ----------- S t a t e ------------- - // -- a2 : name - // -- ra : return address - // -- a0 : receiver - // -- [sp] : receiver + // -- a1 : function + // -- a3 : slot in feedback array (smi) // ----------------------------------- - // Registers a0 and a2 contain objects that need to be pushed on the - // expression stack of the fake JS frame. - Generate_DebugBreakCallHelper(masm, a0.bit() | a2.bit(), 0); + Generate_DebugBreakCallHelper(masm, a1.bit() | a3.bit(), 0); } -void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) { + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0); +} + + +void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) { // Calling convention for IC store (from ic-mips.cc). - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - // Registers a0, a1, and a2 contain objects that need to be pushed on the - // expression stack of the fake JS frame. - Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0); + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + Register value = StoreIC::ValueRegister(); + Generate_DebugBreakCallHelper( + masm, receiver.bit() | name.bit() | value.bit(), 0); } -void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit(), 0); +void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { + // Calling convention for keyed IC load (from ic-mips.cc). + GenerateLoadICDebugBreak(masm); } -void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- ra : return address - Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0); +void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { + // Calling convention for IC keyed store call (from ic-mips.cc). + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register name = KeyedStoreIC::NameRegister(); + Register value = KeyedStoreIC::ValueRegister(); + Generate_DebugBreakCallHelper( + masm, receiver.bit() | name.bit() | value.bit(), 0); } -void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { // Register state for CompareNil IC // ----------- S t a t e ------------- // -- a0 : value @@ -244,16 +226,7 @@ } -void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { - // Calling convention for IC call (from ic-mips.cc). - // ----------- S t a t e ------------- - // -- a2: name - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, a2.bit(), 0); -} - - -void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) { // In places other than IC call sites it is expected that v0 is TOS which // is an object - this is not generally the case so this should be used with // care. @@ -261,7 +234,7 @@ } -void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { // Register state for CallFunctionStub (from code-stubs-mips.cc). // ----------- S t a t e ------------- // -- a1 : function @@ -270,18 +243,7 @@ } -void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) { - // Register state for CallFunctionStub (from code-stubs-mips.cc). - // ----------- S t a t e ------------- - // -- a1 : function - // -- a2 : feedback array - // -- a3 : slot in feedback array - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), 0); -} - - -void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { // Calling convention for CallConstructStub (from code-stubs-mips.cc). // ----------- S t a t e ------------- // -- a0 : number of arguments (not smi) @@ -291,7 +253,8 @@ } -void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallConstructStubRecordDebugBreak( + MacroAssembler* masm) { // Calling convention for CallConstructStub (from code-stubs-mips.cc). // ----------- S t a t e ------------- // -- a0 : number of arguments (not smi) @@ -303,7 +266,7 @@ } -void Debug::GenerateSlot(MacroAssembler* masm) { +void DebugCodegen::GenerateSlot(MacroAssembler* masm) { // Generate enough nop's to make space for a call instruction. Avoid emitting // the trampoline pool in the debug break slot code. Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); @@ -313,34 +276,51 @@ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) { __ nop(MacroAssembler::DEBUG_BREAK_NOP); } - ASSERT_EQ(Assembler::kDebugBreakSlotInstructions, + DCHECK_EQ(Assembler::kDebugBreakSlotInstructions, masm->InstructionsGeneratedSince(&check_codesize)); } -void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) { // In the places where a debug break slot is inserted no registers can contain // object pointers. Generate_DebugBreakCallHelper(masm, 0, 0); } -void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { - masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips); +void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { + __ Ret(); } -void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { - masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips); -} +void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { + ExternalReference restarter_frame_function_slot = + ExternalReference::debug_restarter_frame_function_pointer_address( + masm->isolate()); + __ li(at, Operand(restarter_frame_function_slot)); + __ sw(zero_reg, MemOperand(at, 0)); + // We do not know our frame height, but set sp based on fp. + __ Subu(sp, fp, Operand(kPointerSize)); -const bool Debug::kFrameDropperSupported = false; + __ Pop(ra, fp, a1); // Return address, Frame, Function. -#undef __ + // Load context from the function. + __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + // Get function code. + __ lw(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ lw(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset)); + __ Addu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag)); + + // Re-run JSFunction, a1 is function, cp is context. + __ Jump(t9); +} -#endif // ENABLE_DEBUGGER_SUPPORT +const bool LiveEdit::kFrameDropperSupported = true; + +#undef __ } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/mips/deoptimizer-mips.cc nodejs-0.11.15/deps/v8/src/mips/deoptimizer-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/deoptimizer-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/deoptimizer-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "codegen.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "safepoint-table.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/safepoint-table.h" namespace v8 { namespace internal { @@ -53,7 +30,7 @@ // Fail hard and early if we enter this code object again. byte* pointer = code->FindCodeAgeSequence(); if (pointer != NULL) { - pointer += kNoCodeAgeSequenceLength * Assembler::kInstrSize; + pointer += kNoCodeAgeSequenceLength; } else { pointer = code->instruction_start(); } @@ -71,9 +48,6 @@ DeoptimizationInputData* deopt_data = DeoptimizationInputData::cast(code->deoptimization_data()); - SharedFunctionInfo* shared = - SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()); - shared->EvictFromOptimizedCodeMap(code, "deoptimized code"); #ifdef DEBUG Address prev_call_address = NULL; #endif @@ -86,13 +60,13 @@ int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry, RelocInfo::NONE32); int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; - ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); - ASSERT(call_size_in_bytes <= patch_size()); + DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); + DCHECK(call_size_in_bytes <= patch_size()); CodePatcher patcher(call_address, call_size_in_words); patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); - ASSERT(prev_call_address == NULL || + DCHECK(prev_call_address == NULL || call_address >= prev_call_address + patch_size()); - ASSERT(call_address + patch_size() <= code->instruction_end()); + DCHECK(call_address + patch_size() <= code->instruction_end()); #ifdef DEBUG prev_call_address = call_address; @@ -124,7 +98,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { - ApiFunction function(descriptor->deoptimization_handler_); + ApiFunction function(descriptor->deoptimization_handler()); ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); int params = descriptor->GetHandlerParameterCount(); @@ -148,11 +122,6 @@ } -Code* Deoptimizer::NotifyStubFailureBuiltin() { - return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); -} - - #define __ masm()-> @@ -226,7 +195,7 @@ __ lw(a1, MemOperand(v0, Deoptimizer::input_offset())); // Copy core registers into FrameDescription::registers_[kNumRegisters]. - ASSERT(Register::kNumRegisters == kNumberOfRegisters); + DCHECK(Register::kNumRegisters == kNumberOfRegisters); for (int i = 0; i < kNumberOfRegisters; i++) { int offset = (i * kPointerSize) + FrameDescription::registers_offset(); if ((saved_regs & (1 << i)) != 0) { @@ -328,7 +297,7 @@ // Technically restoring 'at' should work unless zero_reg is also restored // but it's safer to check for this. - ASSERT(!(at.bit() & restored_regs)); + DCHECK(!(at.bit() & restored_regs)); // Restore the registers from the last output frame. __ mov(at, a2); for (int i = kNumberOfRegisters - 1; i >= 0; i--) { @@ -348,39 +317,29 @@ // Maximum size of a table entry generated below. -const int Deoptimizer::table_entry_size_ = 7 * Assembler::kInstrSize; +const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize; void Deoptimizer::TableEntryGenerator::GeneratePrologue() { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); // Create a sequence of deoptimization entries. // Note that registers are still live when jumping to an entry. - Label table_start; + Label table_start, done; __ bind(&table_start); for (int i = 0; i < count(); i++) { Label start; __ bind(&start); - __ addiu(sp, sp, -1 * kPointerSize); - // Jump over the remaining deopt entries (including this one). - // This code is always reached by calling Jump, which puts the target (label - // start) into t9. - const int remaining_entries = (count() - i) * table_entry_size_; - __ Addu(t9, t9, remaining_entries); - // 'at' was clobbered so we can only load the current entry value here. - __ li(at, i); - __ jr(t9); // Expose delay slot. - __ sw(at, MemOperand(sp, 0 * kPointerSize)); // In the delay slot. - - // Pad the rest of the code. - while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) { - __ nop(); - } + DCHECK(is_int16(i)); + __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot. + __ li(at, i); // In the delay slot. - ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); + DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); } - ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), + DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), count() * table_entry_size_); + __ bind(&done); + __ Push(at); } diff -Nru nodejs-0.11.13/deps/v8/src/mips/disasm-mips.cc nodejs-0.11.15/deps/v8/src/mips/disasm-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/disasm-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/disasm-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // A Disassembler object is used to disassemble a block of code instruction by // instruction. The default implementation of the NameConverter object can be @@ -47,18 +24,18 @@ #include <assert.h> -#include <stdio.h> #include <stdarg.h> +#include <stdio.h> #include <string.h> -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "mips/constants-mips.h" -#include "disasm.h" -#include "macro-assembler.h" -#include "platform.h" +#include "src/base/platform/platform.h" +#include "src/disasm.h" +#include "src/macro-assembler.h" +#include "src/mips/constants-mips.h" namespace v8 { namespace internal { @@ -207,21 +184,21 @@ // Print the integer value of the sa field. void Decoder::PrintSa(Instruction* instr) { int sa = instr->SaValue(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); } // Print the integer value of the rd field, when it is not used as reg. void Decoder::PrintSd(Instruction* instr) { int sd = instr->RdValue(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd); } // Print the integer value of the rd field, when used as 'ext' size. void Decoder::PrintSs1(Instruction* instr) { int ss = instr->RdValue(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1); } @@ -230,49 +207,49 @@ int ss = instr->RdValue(); int pos = instr->SaValue(); out_buffer_pos_ += - OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1); + SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1); } // Print the integer value of the cc field for the bc1t/f instructions. void Decoder::PrintBc(Instruction* instr) { int cc = instr->FBccValue(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc); } // Print the integer value of the cc field for the FP compare instructions. void Decoder::PrintCc(Instruction* instr) { int cc = instr->FCccValue(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc); } // Print 16-bit unsigned immediate value. void Decoder::PrintUImm16(Instruction* instr) { int32_t imm = instr->Imm16Value(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); } // Print 16-bit signed immediate value. void Decoder::PrintSImm16(Instruction* instr) { int32_t imm = ((instr->Imm16Value()) << 16) >> 16; - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); } // Print 16-bit hexa immediate value. void Decoder::PrintXImm16(Instruction* instr) { int32_t imm = instr->Imm16Value(); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); } // Print 26-bit immediate value. void Decoder::PrintXImm26(Instruction* instr) { uint32_t imm = instr->Imm26Value() << kImmFieldShift; - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); } @@ -283,8 +260,8 @@ switch (instr->FunctionFieldRaw()) { case BREAK: { int32_t code = instr->Bits(25, 6); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "0x%05x (%d)", code, code); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "0x%05x (%d)", code, code); break; } case TGE: @@ -295,12 +272,12 @@ case TNE: { int32_t code = instr->Bits(15, 6); out_buffer_pos_ += - OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code); + SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code); break; } default: // Not a break or trap instruction. break; - }; + } } @@ -312,7 +289,7 @@ // Handle all register based formatting in this function to reduce the // complexity of FormatOption. int Decoder::FormatRegister(Instruction* instr, const char* format) { - ASSERT(format[0] == 'r'); + DCHECK(format[0] == 'r'); if (format[1] == 's') { // 'rs: Rs register. int reg = instr->RsValue(); PrintRegister(reg); @@ -334,7 +311,7 @@ // Handle all FPUregister based formatting in this function to reduce the // complexity of FormatOption. int Decoder::FormatFPURegister(Instruction* instr, const char* format) { - ASSERT(format[0] == 'f'); + DCHECK(format[0] == 'f'); if (format[1] == 's') { // 'fs: fs register. int reg = instr->FsValue(); PrintFPURegister(reg); @@ -365,26 +342,26 @@ int Decoder::FormatOption(Instruction* instr, const char* format) { switch (format[0]) { case 'c': { // 'code for break or trap instructions. - ASSERT(STRING_STARTS_WITH(format, "code")); + DCHECK(STRING_STARTS_WITH(format, "code")); PrintCode(instr); return 4; } case 'i': { // 'imm16u or 'imm26. if (format[3] == '1') { - ASSERT(STRING_STARTS_WITH(format, "imm16")); + DCHECK(STRING_STARTS_WITH(format, "imm16")); if (format[5] == 's') { - ASSERT(STRING_STARTS_WITH(format, "imm16s")); + DCHECK(STRING_STARTS_WITH(format, "imm16s")); PrintSImm16(instr); } else if (format[5] == 'u') { - ASSERT(STRING_STARTS_WITH(format, "imm16u")); + DCHECK(STRING_STARTS_WITH(format, "imm16u")); PrintSImm16(instr); } else { - ASSERT(STRING_STARTS_WITH(format, "imm16x")); + DCHECK(STRING_STARTS_WITH(format, "imm16x")); PrintXImm16(instr); } return 6; } else { - ASSERT(STRING_STARTS_WITH(format, "imm26x")); + DCHECK(STRING_STARTS_WITH(format, "imm26x")); PrintXImm26(instr); return 6; } @@ -398,22 +375,22 @@ case 's': { // 'sa. switch (format[1]) { case 'a': { - ASSERT(STRING_STARTS_WITH(format, "sa")); + DCHECK(STRING_STARTS_WITH(format, "sa")); PrintSa(instr); return 2; } case 'd': { - ASSERT(STRING_STARTS_WITH(format, "sd")); + DCHECK(STRING_STARTS_WITH(format, "sd")); PrintSd(instr); return 2; } case 's': { if (format[2] == '1') { - ASSERT(STRING_STARTS_WITH(format, "ss1")); /* ext size */ + DCHECK(STRING_STARTS_WITH(format, "ss1")); /* ext size */ PrintSs1(instr); return 3; } else { - ASSERT(STRING_STARTS_WITH(format, "ss2")); /* ins size */ + DCHECK(STRING_STARTS_WITH(format, "ss2")); /* ins size */ PrintSs2(instr); return 3; } @@ -421,16 +398,16 @@ } } case 'b': { // 'bc - Special for bc1 cc field. - ASSERT(STRING_STARTS_WITH(format, "bc")); + DCHECK(STRING_STARTS_WITH(format, "bc")); PrintBc(instr); return 2; } case 'C': { // 'Cc - Special for c.xx.d cc field. - ASSERT(STRING_STARTS_WITH(format, "Cc")); + DCHECK(STRING_STARTS_WITH(format, "Cc")); PrintCc(instr); return 2; } - }; + } UNREACHABLE(); return -1; } @@ -626,7 +603,7 @@ break; default: UNREACHABLE(); - }; + } break; case SPECIAL: switch (instr->FunctionFieldRaw()) { @@ -819,7 +796,7 @@ break; default: UNREACHABLE(); - }; + } break; // Case COP1. case REGIMM: switch (instr->RtFieldRaw()) { @@ -932,7 +909,7 @@ default: UNREACHABLE(); break; - }; + } } @@ -954,9 +931,9 @@ int Decoder::InstructionDecode(byte* instr_ptr) { Instruction* instr = Instruction::At(instr_ptr); // Print raw instruction bytes. - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "%08x ", - instr->InstructionBits()); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "%08x ", + instr->InstructionBits()); switch (instr->InstructionType()) { case Instruction::kRegisterType: { DecodeTypeRegister(instr); @@ -988,7 +965,7 @@ namespace disasm { const char* NameConverter::NameOfAddress(byte* addr) const { - v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr); + v8::internal::SNPrintF(tmp_buffer_, "%p", addr); return tmp_buffer_.start(); } diff -Nru nodejs-0.11.13/deps/v8/src/mips/frames-mips.cc nodejs-0.11.15/deps/v8/src/mips/frames-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/frames-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/frames-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "assembler.h" -#include "assembler-mips.h" -#include "assembler-mips-inl.h" -#include "frames.h" +#include "src/assembler.h" +#include "src/frames.h" +#include "src/mips/assembler-mips-inl.h" +#include "src/mips/assembler-mips.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/mips/frames-mips.h nodejs-0.11.15/deps/v8/src/mips/frames-mips.h --- nodejs-0.11.13/deps/v8/src/mips/frames-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/frames-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. @@ -110,8 +87,6 @@ const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved; -typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved]; - const int kUndefIndex = -1; // Map with indexes on stack that corresponds to codes of saved registers. const int kSafepointRegisterStackIndexMap[kNumRegs] = { diff -Nru nodejs-0.11.13/deps/v8/src/mips/full-codegen-mips.cc nodejs-0.11.15/deps/v8/src/mips/full-codegen-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/full-codegen-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/full-codegen-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS @@ -37,18 +14,18 @@ // places where we have to move a previous result in v0 to a0 for the // next call: mov(a0, v0). This is not needed on the other architectures. -#include "code-stubs.h" -#include "codegen.h" -#include "compiler.h" -#include "debug.h" -#include "full-codegen.h" -#include "isolate-inl.h" -#include "parser.h" -#include "scopes.h" -#include "stub-cache.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/full-codegen.h" +#include "src/isolate-inl.h" +#include "src/parser.h" +#include "src/scopes.h" +#include "src/stub-cache.h" -#include "mips/code-stubs-mips.h" -#include "mips/macro-assembler-mips.h" +#include "src/mips/code-stubs-mips.h" +#include "src/mips/macro-assembler-mips.h" namespace v8 { namespace internal { @@ -73,13 +50,13 @@ } ~JumpPatchSite() { - ASSERT(patch_site_.is_bound() == info_emitted_); + DCHECK(patch_site_.is_bound() == info_emitted_); } // When initially emitting this ensure that a jump is always generated to skip // the inlined smi code. void EmitJumpIfNotSmi(Register reg, Label* target) { - ASSERT(!patch_site_.is_bound() && !info_emitted_); + DCHECK(!patch_site_.is_bound() && !info_emitted_); Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); __ bind(&patch_site_); __ andi(at, reg, 0); @@ -91,7 +68,7 @@ // the inlined smi code. void EmitJumpIfSmi(Register reg, Label* target) { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); - ASSERT(!patch_site_.is_bound() && !info_emitted_); + DCHECK(!patch_site_.is_bound() && !info_emitted_); __ bind(&patch_site_); __ andi(at, reg, 0); // Never taken before patched. @@ -120,24 +97,6 @@ }; -static void EmitStackCheck(MacroAssembler* masm_, - Register stack_limit_scratch, - int pointers = 0, - Register scratch = sp) { - Isolate* isolate = masm_->isolate(); - Label ok; - ASSERT(scratch.is(sp) == (pointers == 0)); - if (pointers != 0) { - __ Subu(scratch, sp, Operand(pointers * kPointerSize)); - } - __ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex); - __ Branch(&ok, hs, scratch, Operand(stack_limit_scratch)); - PredictableCodeSizeScope predictable(masm_, 4 * Assembler::kInstrSize); - __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET); - __ bind(&ok); -} - - // Generate code for a JS function. On entry to the function the receiver // and arguments have been pushed on the stack left to right. The actual // argument count matches the formal parameter count expected by the @@ -157,8 +116,6 @@ handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); - InitializeFeedbackVector(); - profiling_counter_ = isolate()->factory()->NewCell( Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); @@ -184,7 +141,7 @@ __ Branch(&ok, ne, a2, Operand(at)); __ lw(a2, GlobalObjectOperand()); - __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset)); + __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); __ sw(a2, MemOperand(sp, receiver_offset)); @@ -197,16 +154,21 @@ FrameScope frame_scope(masm_, StackFrame::MANUAL); info->set_prologue_offset(masm_->pc_offset()); - __ Prologue(BUILD_FUNCTION_FRAME); + __ Prologue(info->IsCodePreAgingActive()); info->AddNoFrameRange(0, masm_->pc_offset()); { Comment cmnt(masm_, "[ Allocate locals"); int locals_count = info->scope()->num_stack_slots(); // Generators allocate locals, if any, in context slots. - ASSERT(!info->function()->is_generator() || locals_count == 0); + DCHECK(!info->function()->is_generator() || locals_count == 0); if (locals_count > 0) { if (locals_count >= 128) { - EmitStackCheck(masm_, a2, locals_count, t5); + Label ok; + __ Subu(t5, sp, Operand(locals_count * kPointerSize)); + __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); + __ Branch(&ok, hs, t5, Operand(a2)); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ bind(&ok); } __ LoadRoot(t5, Heap::kUndefinedValueRootIndex); int kMaxPushes = FLAG_optimize_for_size ? 4 : 32; @@ -240,16 +202,19 @@ if (heap_slots > 0) { Comment cmnt(masm_, "[ Allocate context"); // Argument to NewContext is the function, which is still in a1. + bool need_write_barrier = true; if (FLAG_harmony_scoping && info->scope()->is_global_scope()) { __ push(a1); __ Push(info->scope()->GetScopeInfo()); - __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2); + __ CallRuntime(Runtime::kNewGlobalContext, 2); } else if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; } else { __ push(a1); - __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); } function_in_register = false; // Context is returned in v0. It replaces the context passed to us. @@ -270,8 +235,15 @@ __ sw(a0, target); // Update the write barrier. - __ RecordWriteContextSlot( - cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs); + if (need_write_barrier) { + __ RecordWriteContextSlot( + cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(cp, a0, &done); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } } } } @@ -306,7 +278,7 @@ } else { type = ArgumentsAccessStub::NEW_SLOPPY_FAST; } - ArgumentsAccessStub stub(type); + ArgumentsAccessStub stub(isolate(), type); __ CallStub(&stub); SetVar(arguments, v0, a1, a2); @@ -329,9 +301,9 @@ // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { VariableDeclaration* function = scope()->function(); - ASSERT(function->proxy()->var()->mode() == CONST || + DCHECK(function->proxy()->var()->mode() == CONST || function->proxy()->var()->mode() == CONST_LEGACY); - ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED); + DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED); VisitVariableDeclaration(function); } VisitDeclarations(scope()->declarations()); @@ -339,13 +311,20 @@ { Comment cmnt(masm_, "[ Stack check"); PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS); - EmitStackCheck(masm_, at); + Label ok; + __ LoadRoot(at, Heap::kStackLimitRootIndex); + __ Branch(&ok, hs, sp, Operand(at)); + Handle<Code> stack_check = isolate()->builtins()->StackCheck(); + PredictableCodeSizeScope predictable(masm_, + masm_->CallSize(stack_check, RelocInfo::CODE_TARGET)); + __ Call(stack_check, RelocInfo::CODE_TARGET); + __ bind(&ok); } { Comment cmnt(masm_, "[ Body"); - ASSERT(loop_depth() == 0); + DCHECK(loop_depth() == 0); VisitStatements(function()->body()); - ASSERT(loop_depth() == 0); + DCHECK(loop_depth() == 0); } } @@ -359,7 +338,7 @@ void FullCodeGenerator::ClearAccumulator() { - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); __ mov(v0, zero_reg); } @@ -374,7 +353,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() { int reset_value = FLAG_interrupt_budget; - if (isolate()->IsDebuggerActive()) { + if (info_->is_debug()) { // Detect debug break requests as soon as possible. reset_value = FLAG_interrupt_budget >> 4; } @@ -394,7 +373,7 @@ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); Comment cmnt(masm_, "[ Back edge bookkeeping"); Label ok; - ASSERT(back_edge_target->is_bound()); + DCHECK(back_edge_target->is_bound()); int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier)); @@ -473,7 +452,7 @@ #ifdef DEBUG // Check that the size of the code used for returning is large enough // for the debugger's requirements. - ASSERT(Assembler::kJSReturnSequenceInstructions <= + DCHECK(Assembler::kJSReturnSequenceInstructions <= masm_->InstructionsGeneratedSince(&check_exit_codesize)); #endif } @@ -481,18 +460,18 @@ void FullCodeGenerator::EffectContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); } void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); codegen()->GetVar(result_register(), var); } void FullCodeGenerator::StackValueContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); codegen()->GetVar(result_register(), var); __ push(result_register()); } @@ -563,7 +542,7 @@ true, true_label_, false_label_); - ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals. + DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals. if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) { if (false_label_ != fall_through_) __ Branch(false_label_); } else if (lit->IsTrue() || lit->IsJSObject()) { @@ -590,7 +569,7 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); __ Drop(count); } @@ -598,7 +577,7 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug( int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); __ Drop(count); __ Move(result_register(), reg); } @@ -606,7 +585,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); if (count > 1) __ Drop(count - 1); __ sw(reg, MemOperand(sp, 0)); } @@ -614,7 +593,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); // For simplicity we always test the accumulator register. __ Drop(count); __ Move(result_register(), reg); @@ -625,7 +604,7 @@ void FullCodeGenerator::EffectContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == materialize_false); + DCHECK(materialize_true == materialize_false); __ bind(materialize_true); } @@ -661,8 +640,8 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == true_label_); - ASSERT(materialize_false == false_label_); + DCHECK(materialize_true == true_label_); + DCHECK(materialize_false == false_label_); } @@ -728,7 +707,7 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) { - ASSERT(var->IsStackAllocated()); + DCHECK(var->IsStackAllocated()); // Offset is negative because higher indexes are at lower addresses. int offset = -var->index() * kPointerSize; // Adjust by a (parameter or local) base offset. @@ -742,7 +721,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); if (var->IsContextSlot()) { int context_chain_length = scope()->ContextChainLength(var->scope()); __ LoadContext(scratch, context_chain_length); @@ -764,10 +743,10 @@ Register src, Register scratch0, Register scratch1) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); - ASSERT(!scratch0.is(src)); - ASSERT(!scratch0.is(scratch1)); - ASSERT(!scratch1.is(src)); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(!scratch0.is(src)); + DCHECK(!scratch0.is(scratch1)); + DCHECK(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ sw(src, location); // Emit the write barrier code if the location is in the heap. @@ -805,7 +784,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { // The variable in the declaration always resides in the current function // context. - ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); + DCHECK_EQ(0, scope()->ContextChainLength(variable->scope())); if (generate_debug_code_) { // Check that we're not inside a with or catch context. __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset)); @@ -861,7 +840,7 @@ Comment cmnt(masm_, "[ VariableDeclaration"); __ li(a2, Operand(variable->name())); // Declaration nodes are always introduced in one of four modes. - ASSERT(IsDeclaredVariableMode(mode)); + DCHECK(IsDeclaredVariableMode(mode)); PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY : NONE; __ li(a1, Operand(Smi::FromInt(attr))); @@ -873,11 +852,11 @@ __ LoadRoot(a0, Heap::kTheHoleValueRootIndex); __ Push(cp, a2, a1, a0); } else { - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); __ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value. __ Push(cp, a2, a1, a0); } - __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); break; } } @@ -892,7 +871,7 @@ case Variable::UNALLOCATED: { globals_->Add(variable->name(), zone()); Handle<SharedFunctionInfo> function = - Compiler::BuildFunctionInfo(declaration->fun(), script()); + Compiler::BuildFunctionInfo(declaration->fun(), script(), info_); // Check for stack-overflow exception. if (function.is_null()) return SetStackOverflow(); globals_->Add(function, zone()); @@ -933,7 +912,7 @@ __ Push(cp, a2, a1); // Push initial value for function declaration. VisitForStackValue(declaration->fun()); - __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); break; } } @@ -942,8 +921,8 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { Variable* variable = declaration->proxy()->var(); - ASSERT(variable->location() == Variable::CONTEXT); - ASSERT(variable->interface()->IsFrozen()); + DCHECK(variable->location() == Variable::CONTEXT); + DCHECK(variable->interface()->IsFrozen()); Comment cmnt(masm_, "[ ModuleDeclaration"); EmitDebugCheckDeclarationContext(variable); @@ -1005,7 +984,7 @@ __ li(a1, Operand(pairs)); __ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags()))); __ Push(cp, a1, a0); - __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3); + __ CallRuntime(Runtime::kDeclareGlobals, 3); // Return value is ignored. } @@ -1013,7 +992,7 @@ void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { // Call the runtime to declare the modules. __ Push(descriptions); - __ CallRuntime(Runtime::kHiddenDeclareModules, 1); + __ CallRuntime(Runtime::kDeclareModules, 1); // Return value is ignored. } @@ -1198,12 +1177,8 @@ Label non_proxy; __ bind(&fixed_array); - Handle<Object> feedback = Handle<Object>( - Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker), - isolate()); - StoreFeedbackVectorSlot(slot, feedback); __ li(a1, FeedbackVector()); - __ li(a2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker))); + __ li(a2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot))); __ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check @@ -1246,7 +1221,7 @@ // For proxies, no filtering is done. // TODO(rossberg): What if only a prototype is a proxy? Not specified yet. - ASSERT_EQ(Smi::FromInt(0), 0); + DCHECK_EQ(Smi::FromInt(0), 0); __ Branch(&update_each, eq, a2, Operand(zero_reg)); // Convert the entry to a string or (smi) 0 if it isn't a property @@ -1297,27 +1272,8 @@ Iteration loop_statement(this, stmt); increment_loop_depth(); - // var iterator = iterable[@@iterator]() - VisitForAccumulatorValue(stmt->assign_iterator()); - __ mov(a0, v0); - - // As with for-in, skip the loop if the iterator is null or undefined. - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - __ Branch(loop_statement.break_label(), eq, a0, Operand(at)); - __ LoadRoot(at, Heap::kNullValueRootIndex); - __ Branch(loop_statement.break_label(), eq, a0, Operand(at)); - - // Convert the iterator to a JS object. - Label convert, done_convert; - __ JumpIfSmi(a0, &convert); - __ GetObjectType(a0, a1, a1); - __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); - __ bind(&convert); - __ push(a0); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(a0, v0); - __ bind(&done_convert); - __ push(a0); + // var iterator = iterable[Symbol.iterator](); + VisitForEffect(stmt->assign_iterator()); // Loop entry. __ bind(loop_statement.continue_label()); @@ -1364,7 +1320,9 @@ !pretenure && scope()->is_function_scope() && info->num_literals() == 0) { - FastNewClosureStub stub(info->strict_mode(), info->is_generator()); + FastNewClosureStub stub(isolate(), + info->strict_mode(), + info->is_generator()); __ li(a2, Operand(info)); __ CallStub(&stub); } else { @@ -1372,7 +1330,7 @@ __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex); __ Push(cp, a0, a1); - __ CallRuntime(Runtime::kHiddenNewClosure, 3); + __ CallRuntime(Runtime::kNewClosure, 3); } context()->Plug(v0); } @@ -1384,7 +1342,7 @@ } -void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, +void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, TypeofState typeof_state, Label* slow) { Register current = cp; @@ -1429,8 +1387,13 @@ __ bind(&fast); } - __ lw(a0, GlobalObjectOperand()); - __ li(a2, Operand(var->name())); + __ lw(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ li(LoadIC::NameRegister(), Operand(proxy->var()->name())); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } + ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL : CONTEXTUAL; @@ -1440,7 +1403,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var, Label* slow) { - ASSERT(var->IsContextSlot()); + DCHECK(var->IsContextSlot()); Register context = cp; Register next = a3; Register temp = t0; @@ -1468,7 +1431,7 @@ } -void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, +void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy, TypeofState typeof_state, Label* slow, Label* done) { @@ -1477,8 +1440,9 @@ // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. + Variable* var = proxy->var(); if (var->mode() == DYNAMIC_GLOBAL) { - EmitLoadGlobalCheckExtensions(var, typeof_state, slow); + EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow); __ Branch(done); } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); @@ -1494,7 +1458,7 @@ __ Branch(done, ne, at, Operand(zero_reg)); __ li(a0, Operand(var->name())); __ push(a0); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); + __ CallRuntime(Runtime::kThrowReferenceError, 1); } } __ Branch(done); @@ -1512,10 +1476,12 @@ switch (var->location()) { case Variable::UNALLOCATED: { Comment cmnt(masm_, "[ Global variable"); - // Use inline caching. Variable name is passed in a2 and the global - // object (receiver) in a0. - __ lw(a0, GlobalObjectOperand()); - __ li(a2, Operand(var->name())); + __ lw(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ li(LoadIC::NameRegister(), Operand(var->name())); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } CallLoadIC(CONTEXTUAL); context()->Plug(v0); break; @@ -1532,7 +1498,7 @@ // always looked up dynamically, i.e. in that case // var->location() == LOOKUP. // always holds. - ASSERT(var->scope() != NULL); + DCHECK(var->scope() != NULL); // Check if the binding really needs an initialization check. The check // can be skipped in the following situation: we have a LET or CONST @@ -1555,8 +1521,8 @@ skip_init_check = false; } else { // Check that we always have valid source position. - ASSERT(var->initializer_position() != RelocInfo::kNoPosition); - ASSERT(proxy->position() != RelocInfo::kNoPosition); + DCHECK(var->initializer_position() != RelocInfo::kNoPosition); + DCHECK(proxy->position() != RelocInfo::kNoPosition); skip_init_check = var->mode() != CONST_LEGACY && var->initializer_position() < proxy->position(); } @@ -1573,11 +1539,11 @@ __ Branch(&done, ne, at, Operand(zero_reg)); __ li(a0, Operand(var->name())); __ push(a0); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); + __ CallRuntime(Runtime::kThrowReferenceError, 1); __ bind(&done); } else { // Uninitalized const bindings outside of harmony mode are unholed. - ASSERT(var->mode() == CONST_LEGACY); + DCHECK(var->mode() == CONST_LEGACY); __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole. } @@ -1594,11 +1560,11 @@ Label done, slow; // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); __ bind(&slow); __ li(a1, Operand(var->name())); __ Push(cp, a1); // Context and name. - __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); __ bind(&done); context()->Plug(v0); } @@ -1630,7 +1596,7 @@ __ li(a2, Operand(expr->pattern())); __ li(a1, Operand(expr->flags())); __ Push(t0, a3, a2, a1); - __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4); + __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); __ mov(t1, v0); __ bind(&materialized); @@ -1642,7 +1608,7 @@ __ bind(&runtime_allocate); __ li(a0, Operand(Smi::FromInt(size))); __ Push(t1, a0); - __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); __ pop(t1); __ bind(&allocated); @@ -1683,13 +1649,13 @@ : ObjectLiteral::kNoFlags; __ li(a0, Operand(Smi::FromInt(flags))); int properties_count = constant_properties->length() / 2; - if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() || - flags != ObjectLiteral::kFastElements || + if (expr->may_store_doubles() || expr->depth() > 1 || + masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { __ Push(a3, a2, a1, a0); - __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); } else { - FastCloneShallowObjectStub stub(properties_count); + FastCloneShallowObjectStub stub(isolate(), properties_count); __ CallStub(&stub); } @@ -1717,15 +1683,16 @@ case ObjectLiteral::Property::CONSTANT: UNREACHABLE(); case ObjectLiteral::Property::MATERIALIZED_LITERAL: - ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value())); + DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value())); // Fall through. case ObjectLiteral::Property::COMPUTED: if (key->value()->IsInternalizedString()) { if (property->emit_store()) { VisitForAccumulatorValue(value); - __ mov(a0, result_register()); - __ li(a2, Operand(key->value())); - __ lw(a1, MemOperand(sp)); + __ mov(StoreIC::ValueRegister(), result_register()); + DCHECK(StoreIC::ValueRegister().is(a0)); + __ li(StoreIC::NameRegister(), Operand(key->value())); + __ lw(StoreIC::ReceiverRegister(), MemOperand(sp)); CallStoreIC(key->LiteralFeedbackId()); PrepareForBailoutForId(key->id(), NO_REGISTERS); } else { @@ -1739,7 +1706,7 @@ VisitForStackValue(key); VisitForStackValue(value); if (property->emit_store()) { - __ li(a0, Operand(Smi::FromInt(NONE))); // PropertyAttributes. + __ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes. __ push(a0); __ CallRuntime(Runtime::kSetProperty, 4); } else { @@ -1778,11 +1745,11 @@ EmitAccessor(it->second->setter); __ li(a0, Operand(Smi::FromInt(NONE))); __ push(a0); - __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); + __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5); } if (expr->has_function()) { - ASSERT(result_saved); + DCHECK(result_saved); __ lw(a0, MemOperand(sp)); __ push(a0); __ CallRuntime(Runtime::kToFastProperties, 1); @@ -1808,7 +1775,7 @@ int length = subexprs->length(); Handle<FixedArray> constant_elements = expr->constant_elements(); - ASSERT_EQ(2, constant_elements->length()); + DCHECK_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); bool has_fast_elements = @@ -1828,31 +1795,12 @@ __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); __ li(a2, Operand(Smi::FromInt(expr->literal_index()))); __ li(a1, Operand(constant_elements)); - if (has_fast_elements && constant_elements_values->map() == - isolate()->heap()->fixed_cow_array_map()) { - FastCloneShallowArrayStub stub( - FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, - allocation_site_mode, - length); - __ CallStub(&stub); - __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), - 1, a1, a2); - } else if (expr->depth() > 1 || Serializer::enabled() || - length > FastCloneShallowArrayStub::kMaximumClonedLength) { + if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) { __ li(a0, Operand(Smi::FromInt(flags))); __ Push(a3, a2, a1, a0); - __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4); + __ CallRuntime(Runtime::kCreateArrayLiteral, 4); } else { - ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || - FLAG_smi_only_arrays); - FastCloneShallowArrayStub::Mode mode = - FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; - - if (has_fast_elements) { - mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; - } - - FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); + FastCloneShallowArrayStub stub(isolate(), allocation_site_mode); __ CallStub(&stub); } @@ -1886,7 +1834,7 @@ } else { __ li(a3, Operand(Smi::FromInt(i))); __ mov(a0, result_register()); - StoreArrayLiteralElementStub stub; + StoreArrayLiteralElementStub stub(isolate()); __ CallStub(&stub); } @@ -1902,7 +1850,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { - ASSERT(expr->target()->IsValidLeftHandSide()); + DCHECK(expr->target()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ Assignment"); @@ -1924,9 +1872,9 @@ break; case NAMED_PROPERTY: if (expr->is_compound()) { - // We need the receiver both on the stack and in the accumulator. - VisitForAccumulatorValue(property->obj()); - __ push(result_register()); + // We need the receiver both on the stack and in the register. + VisitForStackValue(property->obj()); + __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); } else { VisitForStackValue(property->obj()); } @@ -1935,9 +1883,9 @@ // We need the key and receiver on both the stack and in v0 and a1. if (expr->is_compound()) { VisitForStackValue(property->obj()); - VisitForAccumulatorValue(property->key()); - __ lw(a1, MemOperand(sp, 0)); - __ push(v0); + VisitForStackValue(property->key()); + __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); + __ lw(LoadIC::NameRegister(), MemOperand(sp, 0)); } else { VisitForStackValue(property->obj()); VisitForStackValue(property->key()); @@ -2033,7 +1981,7 @@ __ bind(&suspend); VisitForAccumulatorValue(expr->generator_object()); - ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); + DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); __ li(a1, Operand(Smi::FromInt(continuation.pos()))); __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset)); __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset)); @@ -2043,7 +1991,7 @@ __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset)); __ Branch(&post_runtime, eq, sp, Operand(a1)); __ push(v0); // generator object - __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ bind(&post_runtime); __ pop(result_register()); @@ -2074,7 +2022,10 @@ // [sp + 0 * kPointerSize] g Label l_catch, l_try, l_suspend, l_continuation, l_resume; - Label l_next, l_call, l_loop; + Label l_next, l_call; + Register load_receiver = LoadIC::ReceiverRegister(); + Register load_name = LoadIC::NameRegister(); + // Initial send value is undefined. __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); __ Branch(&l_next); @@ -2083,9 +2034,9 @@ __ bind(&l_catch); __ mov(a0, v0); handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); - __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw" - __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter - __ Push(a2, a3, a0); // "throw", iter, except + __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw" + __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter + __ Push(load_name, a3, a0); // "throw", iter, except __ jmp(&l_call); // try { received = %yield result } @@ -2104,14 +2055,14 @@ const int generator_object_depth = kPointerSize + handler_size; __ lw(a0, MemOperand(sp, generator_object_depth)); __ push(a0); // g - ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); + DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); __ li(a1, Operand(Smi::FromInt(l_continuation.pos()))); __ sw(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset)); __ sw(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset)); __ mov(a1, cp); __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2, kRAHasBeenSaved, kDontSaveFPRegs); - __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ pop(v0); // result EmitReturnSequence(); @@ -2121,41 +2072,54 @@ // receiver = iter; f = 'next'; arg = received; __ bind(&l_next); - __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next" - __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter - __ Push(a2, a3, a0); // "next", iter, received + + __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next" + __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter + __ Push(load_name, a3, a0); // "next", iter, received // result = receiver[f](arg); __ bind(&l_call); - __ lw(a1, MemOperand(sp, kPointerSize)); - __ lw(a0, MemOperand(sp, 2 * kPointerSize)); + __ lw(load_receiver, MemOperand(sp, kPointerSize)); + __ lw(load_name, MemOperand(sp, 2 * kPointerSize)); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot()))); + } Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallIC(ic, TypeFeedbackId::None()); __ mov(a0, v0); __ mov(a1, a0); __ sw(a1, MemOperand(sp, 2 * kPointerSize)); - CallFunctionStub stub(1, CALL_AS_METHOD); + CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD); __ CallStub(&stub); __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ Drop(1); // The function is still on the stack; drop it. // if (!result.done) goto l_try; - __ bind(&l_loop); - __ mov(a0, v0); - __ push(a0); // save result - __ LoadRoot(a2, Heap::kdone_stringRootIndex); // "done" - CallLoadIC(NOT_CONTEXTUAL); // result.done in v0 + __ Move(load_receiver, v0); + + __ push(load_receiver); // save result + __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done" + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->DoneFeedbackSlot()))); + } + CallLoadIC(NOT_CONTEXTUAL); // v0=result.done __ mov(a0, v0); Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate()); CallIC(bool_ic); __ Branch(&l_try, eq, v0, Operand(zero_reg)); // result.value - __ pop(a0); // result - __ LoadRoot(a2, Heap::kvalue_stringRootIndex); // "value" - CallLoadIC(NOT_CONTEXTUAL); // result.value in v0 - context()->DropAndPlug(2, v0); // drop iter and g + __ pop(load_receiver); // result + __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value" + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->ValueFeedbackSlot()))); + } + CallLoadIC(NOT_CONTEXTUAL); // v0=result.value + context()->DropAndPlug(2, v0); // drop iter and g break; } } @@ -2166,7 +2130,7 @@ Expression *value, JSGeneratorObject::ResumeMode resume_mode) { // The value stays in a0, and is ultimately read by the resumed generator, as - // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it + // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it // is read to throw the value when the resumed generator is already closed. // a1 will hold the generator object until the activation has been resumed. VisitForStackValue(generator); @@ -2245,10 +2209,10 @@ __ push(a2); __ Branch(&push_operand_holes); __ bind(&call_resume); - ASSERT(!result_register().is(a1)); + DCHECK(!result_register().is(a1)); __ Push(a1, result_register()); __ Push(Smi::FromInt(resume_mode)); - __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); // Not reached: the runtime call returns elsewhere. __ stop("not-reached"); @@ -2263,14 +2227,14 @@ } else { // Throw the provided value. __ push(a0); - __ CallRuntime(Runtime::kHiddenThrow, 1); + __ CallRuntime(Runtime::kThrow, 1); } __ jmp(&done); // Throw error if we attempt to operate on a running generator. __ bind(&wrong_state); __ push(a1); - __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); __ bind(&done); context()->Plug(result_register()); @@ -2281,14 +2245,14 @@ Label gc_required; Label allocated; - Handle<Map> map(isolate()->native_context()->generator_result_map()); + Handle<Map> map(isolate()->native_context()->iterator_result_map()); __ Allocate(map->instance_size(), v0, a2, a3, &gc_required, TAG_OBJECT); __ jmp(&allocated); __ bind(&gc_required); __ Push(Smi::FromInt(map->instance_size())); - __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); __ lw(context_register(), MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2297,7 +2261,7 @@ __ pop(a2); __ li(a3, Operand(isolate()->factory()->ToBoolean(done))); __ li(t0, Operand(isolate()->factory()->empty_fixed_array())); - ASSERT_EQ(map->instance_size(), 5 * kPointerSize); + DCHECK_EQ(map->instance_size(), 5 * kPointerSize); __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset)); __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); @@ -2316,19 +2280,27 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); - __ mov(a0, result_register()); - __ li(a2, Operand(key->value())); - // Call load IC. It has arguments receiver and property name a0 and a2. - CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + __ li(LoadIC::NameRegister(), Operand(key->value())); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(prop->PropertyFeedbackSlot()))); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + } } void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); - __ mov(a0, result_register()); - // Call keyed load IC. It has arguments key and receiver in a0 and a1. Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - CallIC(ic, prop->PropertyFeedbackId()); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(prop->PropertyFeedbackSlot()))); + CallIC(ic); + } else { + CallIC(ic, prop->PropertyFeedbackId()); + } } @@ -2355,8 +2327,8 @@ patch_site.EmitJumpIfSmi(scratch1, &smi_case); __ bind(&stub_call); - BinaryOpICStub stub(op, mode); - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + BinaryOpICStub stub(isolate(), op, mode); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); __ jmp(&done); @@ -2406,7 +2378,7 @@ __ Branch(&done, ne, v0, Operand(zero_reg)); __ Addu(scratch2, right, left); __ Branch(&stub_call, lt, scratch2, Operand(zero_reg)); - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); __ mov(v0, zero_reg); break; } @@ -2433,16 +2405,16 @@ OverwriteMode mode) { __ mov(a0, result_register()); __ pop(a1); - BinaryOpICStub stub(op, mode); + BinaryOpICStub stub(isolate(), op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); context()->Plug(v0); } void FullCodeGenerator::EmitAssignment(Expression* expr) { - ASSERT(expr->IsValidLeftHandSide()); + DCHECK(expr->IsValidReferenceExpression()); // Left-hand side can only be a property, a global or a (parameter or local) // slot. @@ -2465,9 +2437,10 @@ case NAMED_PROPERTY: { __ push(result_register()); // Preserve value. VisitForAccumulatorValue(prop->obj()); - __ mov(a1, result_register()); - __ pop(a0); // Restore value. - __ li(a2, Operand(prop->key()->AsLiteral()->value())); + __ mov(StoreIC::ReceiverRegister(), result_register()); + __ pop(StoreIC::ValueRegister()); // Restore value. + __ li(StoreIC::NameRegister(), + Operand(prop->key()->AsLiteral()->value())); CallStoreIC(); break; } @@ -2475,8 +2448,8 @@ __ push(result_register()); // Preserve value. VisitForStackValue(prop->obj()); VisitForAccumulatorValue(prop->key()); - __ mov(a1, result_register()); - __ Pop(a0, a2); // a0 = restored value. + __ mov(KeyedStoreIC::NameRegister(), result_register()); + __ Pop(KeyedStoreIC::ValueRegister(), KeyedStoreIC::ReceiverRegister()); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); @@ -2501,32 +2474,23 @@ } -void FullCodeGenerator::EmitCallStoreContextSlot( - Handle<String> name, StrictMode strict_mode) { - __ li(a1, Operand(name)); - __ li(a0, Operand(Smi::FromInt(strict_mode))); - __ Push(v0, cp, a1, a0); // Value, context, name, strict mode. - __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4); -} - - void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { if (var->IsUnallocated()) { // Global var, const, or let. - __ mov(a0, result_register()); - __ li(a2, Operand(var->name())); - __ lw(a1, GlobalObjectOperand()); + __ mov(StoreIC::ValueRegister(), result_register()); + __ li(StoreIC::NameRegister(), Operand(var->name())); + __ lw(StoreIC::ReceiverRegister(), GlobalObjectOperand()); CallStoreIC(); } else if (op == Token::INIT_CONST_LEGACY) { // Const initializers need a write barrier. - ASSERT(!var->IsParameter()); // No const parameters. + DCHECK(!var->IsParameter()); // No const parameters. if (var->IsLookupSlot()) { __ li(a0, Operand(var->name())); __ Push(v0, cp, a0); // Context and name. - __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3); + __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3); } else { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); Label skip; MemOperand location = VarOperand(var, a1); __ lw(a2, location); @@ -2538,30 +2502,31 @@ } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. - if (var->IsLookupSlot()) { - EmitCallStoreContextSlot(var->name(), strict_mode()); - } else { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); - Label assign; - MemOperand location = VarOperand(var, a1); - __ lw(a3, location); - __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); - __ Branch(&assign, ne, a3, Operand(t0)); - __ li(a3, Operand(var->name())); - __ push(a3); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); - // Perform the assignment. - __ bind(&assign); - EmitStoreToStackLocalOrContextSlot(var, location); - } + DCHECK(!var->IsLookupSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + Label assign; + MemOperand location = VarOperand(var, a1); + __ lw(a3, location); + __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); + __ Branch(&assign, ne, a3, Operand(t0)); + __ li(a3, Operand(var->name())); + __ push(a3); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + // Perform the assignment. + __ bind(&assign); + EmitStoreToStackLocalOrContextSlot(var, location); } else if (!var->is_const_mode() || op == Token::INIT_CONST) { - // Assignment to var or initializing assignment to let/const - // in harmony mode. if (var->IsLookupSlot()) { - EmitCallStoreContextSlot(var->name(), strict_mode()); + // Assignment to var. + __ li(a1, Operand(var->name())); + __ li(a0, Operand(Smi::FromInt(strict_mode()))); + __ Push(v0, cp, a1, a0); // Value, context, name, strict mode. + __ CallRuntime(Runtime::kStoreLookupSlot, 4); } else { - ASSERT((var->IsStackAllocated() || var->IsContextSlot())); + // Assignment to var or initializing assignment to let/const in harmony + // mode. + DCHECK((var->IsStackAllocated() || var->IsContextSlot())); MemOperand location = VarOperand(var, a1); if (generate_debug_code_ && op == Token::INIT_LET) { // Check for an uninitialized let binding. @@ -2579,15 +2544,14 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { // Assignment to a property, using a named store IC. Property* prop = expr->target()->AsProperty(); - ASSERT(prop != NULL); - ASSERT(prop->key()->AsLiteral() != NULL); + DCHECK(prop != NULL); + DCHECK(prop->key()->IsLiteral()); // Record source code position before IC call. SetSourcePosition(expr->position()); - __ mov(a0, result_register()); // Load the value. - __ li(a2, Operand(prop->key()->AsLiteral()->value())); - __ pop(a1); - + __ mov(StoreIC::ValueRegister(), result_register()); + __ li(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value())); + __ pop(StoreIC::ReceiverRegister()); CallStoreIC(expr->AssignmentFeedbackId()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); @@ -2605,8 +2569,9 @@ // - a0 is the value, // - a1 is the key, // - a2 is the receiver. - __ mov(a0, result_register()); - __ Pop(a2, a1); // a1 = key. + __ mov(KeyedStoreIC::ValueRegister(), result_register()); + __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister()); + DCHECK(KeyedStoreIC::ValueRegister().is(a0)); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() @@ -2624,13 +2589,15 @@ if (key->IsPropertyName()) { VisitForAccumulatorValue(expr->obj()); + __ Move(LoadIC::ReceiverRegister(), v0); EmitNamedPropertyLoad(expr); PrepareForBailoutForId(expr->LoadId(), TOS_REG); context()->Plug(v0); } else { VisitForStackValue(expr->obj()); VisitForAccumulatorValue(expr->key()); - __ pop(a1); + __ Move(LoadIC::NameRegister(), v0); + __ pop(LoadIC::ReceiverRegister()); EmitKeyedPropertyLoad(expr); context()->Plug(v0); } @@ -2645,14 +2612,15 @@ // Code common for calls using the IC. -void FullCodeGenerator::EmitCallWithIC(Call* expr) { +void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); - CallFunctionFlags flags; + CallIC::CallType call_type = callee->IsVariableProxy() + ? CallIC::FUNCTION + : CallIC::METHOD; + // Get the target function. - if (callee->IsVariableProxy()) { + if (call_type == CallIC::FUNCTION) { { StackValueContext context(this); EmitVariableLoad(callee->AsVariableProxy()); PrepareForBailout(callee, NO_REGISTERS); @@ -2660,54 +2628,34 @@ // Push undefined as receiver. This is patched in the method prologue if it // is a sloppy mode method. __ Push(isolate()->factory()->undefined_value()); - flags = NO_CALL_FUNCTION_FLAGS; } else { // Load the function from the receiver. - ASSERT(callee->IsProperty()); - __ lw(v0, MemOperand(sp, 0)); + DCHECK(callee->IsProperty()); + __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); EmitNamedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); // Push the target function under the receiver. __ lw(at, MemOperand(sp, 0)); __ push(at); __ sw(v0, MemOperand(sp, kPointerSize)); - flags = CALL_AS_METHOD; } - // Load the arguments. - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } - } - // Record source position for debugger. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, flags); - __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); - - RecordJSReturnSite(expr); - - // Restore context register. - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, v0); + EmitCall(expr, call_type); } // Code common for calls using the IC. -void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, - Expression* key) { +void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, + Expression* key) { // Load the key. VisitForAccumulatorValue(key); Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); // Load the function from the receiver. - ASSERT(callee->IsProperty()); - __ lw(a1, MemOperand(sp, 0)); + DCHECK(callee->IsProperty()); + __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); + __ Move(LoadIC::NameRegister(), v0); EmitKeyedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); @@ -2716,28 +2664,12 @@ __ push(at); __ sw(v0, MemOperand(sp, kPointerSize)); - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } - } - - // Record source position for debugger. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, CALL_AS_METHOD); - __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); - - RecordJSReturnSite(expr); - // Restore context register. - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, v0); + EmitCall(expr, CallIC::METHOD); } -void FullCodeGenerator::EmitCallWithStub(Call* expr) { - // Code common for calls using the call stub. +void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { + // Load the arguments. ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); { PreservePositionScope scope(masm()->positions_recorder()); @@ -2745,19 +2677,17 @@ VisitForStackValue(args->at(i)); } } - // Record source position for debugger. - SetSourcePosition(expr->position()); - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized); - __ li(a2, FeedbackVector()); + // Record source position of the IC call. + SetSourcePosition(expr->position()); + Handle<Code> ic = CallIC::initialize_stub( + isolate(), arg_count, call_type); __ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot()))); - - // Record call targets in unoptimized code. - CallFunctionStub stub(arg_count, RECORD_CALL_TARGET); __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); + // Don't assign a type feedback id to the IC, since type feedback is provided + // by the vector above. + CallIC(ic); + RecordJSReturnSite(expr); // Restore context register. __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2785,7 +2715,7 @@ // Do the runtime call. __ Push(t2, t1, t0, a1); - __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5); + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); } @@ -2831,7 +2761,7 @@ } // Record source position for debugger. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); RecordJSReturnSite(expr); @@ -2839,7 +2769,7 @@ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); context()->DropAndPlug(1, v0); } else if (call_type == Call::GLOBAL_CALL) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else if (call_type == Call::LOOKUP_SLOT_CALL) { // Call to a lookup slot (dynamically introduced variable). VariableProxy* proxy = callee->AsVariableProxy(); @@ -2848,16 +2778,16 @@ { PreservePositionScope scope(masm()->positions_recorder()); // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); } __ bind(&slow); // Call the runtime to find the function to call (returned in v0) // and the object holding it (returned in v1). - ASSERT(!context_register().is(a2)); + DCHECK(!context_register().is(a2)); __ li(a2, Operand(proxy->name())); __ Push(context_register(), a2); - __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); __ Push(v0, v1); // Function, receiver. // If fast case code has been generated, emit code to push the @@ -2878,19 +2808,19 @@ // The receiver is either the global receiver or an object found // by LoadContextSlot. - EmitCallWithStub(expr); + EmitCall(expr); } else if (call_type == Call::PROPERTY_CALL) { Property* property = callee->AsProperty(); { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(property->obj()); } if (property->key()->IsPropertyName()) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else { - EmitKeyedCallWithIC(expr, property->key()); + EmitKeyedCallWithLoadIC(expr, property->key()); } } else { - ASSERT(call_type == Call::OTHER_CALL); + DCHECK(call_type == Call::OTHER_CALL); // Call to an arbitrary expression not handled specially above. { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(callee); @@ -2898,12 +2828,12 @@ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); __ push(a1); // Emit function call. - EmitCallWithStub(expr); + EmitCall(expr); } #ifdef DEBUG // RecordJSReturnSite should have been called. - ASSERT(expr->return_is_recorded_); + DCHECK(expr->return_is_recorded_); #endif } @@ -2935,21 +2865,17 @@ __ lw(a1, MemOperand(sp, arg_count * kPointerSize)); // Record call targets in unoptimized code. - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized); if (FLAG_pretenuring_call_new) { - StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(), - isolate()->factory()->NewAllocationSite()); - ASSERT(expr->AllocationSiteFeedbackSlot() == + EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot()); + DCHECK(expr->AllocationSiteFeedbackSlot() == expr->CallNewFeedbackSlot() + 1); } __ li(a2, FeedbackVector()); __ li(a3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot()))); - CallConstructStub stub(RECORD_CALL_TARGET); - __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); + CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET); + __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); PrepareForBailoutForId(expr->ReturnId(), TOS_REG); context()->Plug(v0); } @@ -2957,7 +2883,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2978,7 +2904,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2999,7 +2925,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3030,7 +2956,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3053,7 +2979,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3078,7 +3004,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3123,7 +3049,7 @@ __ Addu(t0, t0, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag)); // Calculate the end of the descriptor array. __ mov(a2, t0); - __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize); + __ sll(t1, a3, kPointerSizeLog2); __ Addu(a2, a2, t1); // Loop through all the keys in the descriptor array. If one of these is the @@ -3165,7 +3091,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3188,7 +3114,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3218,7 +3144,7 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3241,7 +3167,7 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3262,7 +3188,7 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); Label materialize_true, materialize_false; Label* if_true = NULL; @@ -3294,7 +3220,7 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); // Load the two objects into registers and perform the comparison. VisitForStackValue(args->at(0)); @@ -3317,21 +3243,21 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); // ArgumentsAccessStub expects the key in a1 and the formal // parameter count in a0. VisitForAccumulatorValue(args->at(0)); __ mov(a1, v0); __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); - ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); + ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT); __ CallStub(&stub); context()->Plug(v0); } void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); Label exit; // Get the number of formal parameters. __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); @@ -3353,7 +3279,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); Label done, null, function, non_function_constructor; VisitForAccumulatorValue(args->at(0)); @@ -3411,33 +3337,11 @@ } -void FullCodeGenerator::EmitLog(CallRuntime* expr) { - // Conditionally generate a log call. - // Args: - // 0 (literal string): The type of logging (corresponds to the flags). - // This is used to determine whether or not to generate the log call. - // 1 (string): Format string. Access the string at argument index 2 - // with '%2s' (see Logger::LogRuntime for all the formats). - // 2 (array): Arguments to the format string. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 3); - if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) { - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - __ CallRuntime(Runtime::kHiddenLog, 2); - } - - // Finally, we're expected to leave a value on the top of the stack. - __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); - context()->Plug(v0); -} - - void FullCodeGenerator::EmitSubString(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - SubStringStub stub; + SubStringStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); + DCHECK(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); @@ -3448,9 +3352,9 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - RegExpExecStub stub; + RegExpExecStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 4); + DCHECK(args->length() == 4); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); @@ -3462,7 +3366,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -3482,8 +3386,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); - ASSERT_NE(NULL, args->at(1)->AsLiteral()); + DCHECK(args->length() == 2); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -3521,7 +3425,7 @@ } __ bind(¬_date_object); - __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0); + __ CallRuntime(Runtime::kThrowNotDateError, 0); __ bind(&done); context()->Plug(v0); } @@ -3529,7 +3433,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(3, args->length()); + DCHECK_EQ(3, args->length()); Register string = v0; Register index = a1; @@ -3566,7 +3470,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(3, args->length()); + DCHECK_EQ(3, args->length()); Register string = v0; Register index = a1; @@ -3604,10 +3508,10 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the runtime function. ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - MathPowStub stub(MathPowStub::ON_STACK); + MathPowStub stub(isolate(), MathPowStub::ON_STACK); __ CallStub(&stub); context()->Plug(v0); } @@ -3615,7 +3519,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); // Load the object. VisitForAccumulatorValue(args->at(1)); // Load the value. @@ -3644,13 +3548,13 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 1); + DCHECK_EQ(args->length(), 1); // Load the argument into a0 and call the stub. VisitForAccumulatorValue(args->at(0)); __ mov(a0, result_register()); - NumberToStringStub stub; + NumberToStringStub stub(isolate()); __ CallStub(&stub); context()->Plug(v0); } @@ -3658,7 +3562,7 @@ void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3677,7 +3581,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); @@ -3724,7 +3628,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); @@ -3773,13 +3677,13 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); __ pop(a1); __ mov(a0, result_register()); // StringAddStub requires args in a0, a1. - StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED); + StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED); __ CallStub(&stub); context()->Plug(v0); } @@ -3787,40 +3691,20 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - StringCompareStub stub; + StringCompareStub stub(isolate()); __ CallStub(&stub); context()->Plug(v0); } -void FullCodeGenerator::EmitMathLog(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_log, 1); - context()->Plug(v0); -} - - -void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_sqrt, 1); - context()->Plug(v0); -} - - void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() >= 2); + DCHECK(args->length() >= 2); int arg_count = args->length() - 2; // 2 ~ receiver and function. for (int i = 0; i < arg_count + 1; i++) { @@ -3851,9 +3735,9 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { - RegExpConstructResultStub stub; + RegExpConstructResultStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); + DCHECK(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForAccumulatorValue(args->at(2)); @@ -3867,9 +3751,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); - ASSERT_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle<FixedArray> jsfunction_result_caches( @@ -3912,7 +3796,7 @@ __ bind(¬_found); // Call runtime to perform the lookup. __ Push(cache, key); - __ CallRuntime(Runtime::kHiddenGetFromCache, 2); + __ CallRuntime(Runtime::kGetFromCache, 2); __ bind(&done); context()->Plug(v0); @@ -3942,7 +3826,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); __ AssertString(v0); @@ -3960,7 +3844,7 @@ empty_separator_loop, one_char_separator_loop, one_char_separator_loop_entry, long_separator_loop; ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(1)); VisitForAccumulatorValue(args->at(0)); @@ -4121,7 +4005,7 @@ __ CopyBytes(string, result_pos, string_length, scratch1); // End while (element < elements_end). __ Branch(&empty_separator_loop, lt, element, Operand(elements_end)); - ASSERT(result.is(v0)); + DCHECK(result.is(v0)); __ Branch(&done); // One-character separator case. @@ -4153,7 +4037,7 @@ __ CopyBytes(string, result_pos, string_length, scratch1); // End while (element < elements_end). __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end)); - ASSERT(result.is(v0)); + DCHECK(result.is(v0)); __ Branch(&done); // Long separator case (separator is more than one character). Entry is at the @@ -4182,7 +4066,7 @@ __ CopyBytes(string, result_pos, string_length, scratch1); // End while (element < elements_end). __ Branch(&long_separator_loop, lt, element, Operand(elements_end)); - ASSERT(result.is(v0)); + DCHECK(result.is(v0)); __ Branch(&done); __ bind(&bailout); @@ -4192,6 +4076,17 @@ } +void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + ExternalReference debug_is_active = + ExternalReference::debug_is_active_address(isolate()); + __ li(at, Operand(debug_is_active)); + __ lb(v0, MemOperand(at)); + __ SmiTag(v0); + context()->Plug(v0); +} + + void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { if (expr->function() != NULL && expr->function()->intrinsic_type == Runtime::INLINE) { @@ -4206,12 +4101,20 @@ if (expr->is_jsruntime()) { // Push the builtins object as the receiver. - __ lw(a0, GlobalObjectOperand()); - __ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset)); - __ push(a0); + Register receiver = LoadIC::ReceiverRegister(); + __ lw(receiver, GlobalObjectOperand()); + __ lw(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset)); + __ push(receiver); + // Load the function from the receiver. - __ li(a2, Operand(expr->name())); - CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + __ li(LoadIC::NameRegister(), Operand(expr->name())); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot()))); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + } // Push the target function under the receiver. __ lw(at, MemOperand(sp, 0)); @@ -4226,7 +4129,7 @@ // Record source position of the IC call. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); @@ -4265,7 +4168,7 @@ Variable* var = proxy->var(); // Delete of an unqualified identifier is disallowed in strict mode // but "delete this" is allowed. - ASSERT(strict_mode() == SLOPPY || var->is_this()); + DCHECK(strict_mode() == SLOPPY || var->is_this()); if (var->IsUnallocated()) { __ lw(a2, GlobalObjectOperand()); __ li(a1, Operand(var->name())); @@ -4280,10 +4183,10 @@ } else { // Non-global variable. Call the runtime to try to delete from the // context where the variable was introduced. - ASSERT(!context_register().is(a2)); + DCHECK(!context_register().is(a2)); __ li(a2, Operand(var->name())); __ Push(context_register(), a2); - __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2); + __ CallRuntime(Runtime::kDeleteLookupSlot, 2); context()->Plug(v0); } } else { @@ -4321,7 +4224,7 @@ // for control and plugging the control flow into the context, // because we need to prepare a pair of extra administrative AST ids // for the optimizing compiler. - ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue()); + DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue()); Label materialize_true, materialize_false, done; VisitForControl(expr->expression(), &materialize_false, @@ -4358,7 +4261,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { - ASSERT(expr->expression()->IsValidLeftHandSide()); + DCHECK(expr->expression()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ CountOperation"); SetSourcePosition(expr->position()); @@ -4377,7 +4280,7 @@ // Evaluate expression and get value. if (assign_type == VARIABLE) { - ASSERT(expr->expression()->AsVariableProxy()->var() != NULL); + DCHECK(expr->expression()->AsVariableProxy()->var() != NULL); AccumulatorValueContext context(this); EmitVariableLoad(expr->expression()->AsVariableProxy()); } else { @@ -4387,15 +4290,15 @@ __ push(at); } if (assign_type == NAMED_PROPERTY) { - // Put the object both on the stack and in the accumulator. - VisitForAccumulatorValue(prop->obj()); - __ push(v0); + // Put the object both on the stack and in the register. + VisitForStackValue(prop->obj()); + __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); EmitNamedPropertyLoad(prop); } else { VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - __ lw(a1, MemOperand(sp, 0)); - __ push(v0); + VisitForStackValue(prop->key()); + __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); + __ lw(LoadIC::NameRegister(), MemOperand(sp, 0)); EmitKeyedPropertyLoad(prop); } } @@ -4448,7 +4351,7 @@ __ jmp(&stub_call); __ bind(&slow); } - ToNumberStub convert_stub; + ToNumberStub convert_stub(isolate()); __ CallStub(&convert_stub); // Save result for postfix expressions. @@ -4478,8 +4381,8 @@ // Record position before stub call. SetSourcePosition(expr->position()); - BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); - CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); + BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE); + CallIC(stub.GetCode(), expr->CountBinOpFeedbackId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -4506,9 +4409,10 @@ } break; case NAMED_PROPERTY: { - __ mov(a0, result_register()); // Value. - __ li(a2, Operand(prop->key()->AsLiteral()->value())); // Name. - __ pop(a1); // Receiver. + __ mov(StoreIC::ValueRegister(), result_register()); + __ li(StoreIC::NameRegister(), + Operand(prop->key()->AsLiteral()->value())); + __ pop(StoreIC::ReceiverRegister()); CallStoreIC(expr->CountStoreFeedbackId()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -4521,8 +4425,8 @@ break; } case KEYED_PROPERTY: { - __ mov(a0, result_register()); // Value. - __ Pop(a2, a1); // a1 = key, a2 = receiver. + __ mov(KeyedStoreIC::ValueRegister(), result_register()); + __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister()); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); @@ -4542,13 +4446,17 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { - ASSERT(!context()->IsEffect()); - ASSERT(!context()->IsTest()); + DCHECK(!context()->IsEffect()); + DCHECK(!context()->IsTest()); VariableProxy* proxy = expr->AsVariableProxy(); if (proxy != NULL && proxy->var()->IsUnallocated()) { Comment cmnt(masm_, "[ Global variable"); - __ lw(a0, GlobalObjectOperand()); - __ li(a2, Operand(proxy->name())); + __ lw(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ li(LoadIC::NameRegister(), Operand(proxy->name())); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } // Use a regular load, not a contextual load, to avoid a reference // error. CallLoadIC(NOT_CONTEXTUAL); @@ -4560,12 +4468,12 @@ // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done); __ bind(&slow); __ li(a0, Operand(proxy->name())); __ Push(cp, a0); - __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2); + __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2); PrepareForBailout(expr, TOS_REG); __ bind(&done); @@ -4591,12 +4499,13 @@ } PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_string())) { + Factory* factory = isolate()->factory(); + if (String::Equals(check, factory->number_string())) { __ JumpIfSmi(v0, if_true); __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); Split(eq, v0, Operand(at), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_string())) { + } else if (String::Equals(check, factory->string_string())) { __ JumpIfSmi(v0, if_false); // Check for undetectable objects => false. __ GetObjectType(v0, v0, a1); @@ -4605,20 +4514,16 @@ __ And(a1, a1, Operand(1 << Map::kIsUndetectable)); Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->symbol_string())) { + } else if (String::Equals(check, factory->symbol_string())) { __ JumpIfSmi(v0, if_false); __ GetObjectType(v0, v0, a1); Split(eq, a1, Operand(SYMBOL_TYPE), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_string())) { + } else if (String::Equals(check, factory->boolean_string())) { __ LoadRoot(at, Heap::kTrueValueRootIndex); __ Branch(if_true, eq, v0, Operand(at)); __ LoadRoot(at, Heap::kFalseValueRootIndex); Split(eq, v0, Operand(at), if_true, if_false, fall_through); - } else if (FLAG_harmony_typeof && - check->Equals(isolate()->heap()->null_string())) { - __ LoadRoot(at, Heap::kNullValueRootIndex); - Split(eq, v0, Operand(at), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_string())) { + } else if (String::Equals(check, factory->undefined_string())) { __ LoadRoot(at, Heap::kUndefinedValueRootIndex); __ Branch(if_true, eq, v0, Operand(at)); __ JumpIfSmi(v0, if_false); @@ -4627,19 +4532,17 @@ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset)); __ And(a1, a1, Operand(1 << Map::kIsUndetectable)); Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->function_string())) { + } else if (String::Equals(check, factory->function_string())) { __ JumpIfSmi(v0, if_false); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ GetObjectType(v0, v0, a1); __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE)); Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->object_string())) { + } else if (String::Equals(check, factory->object_string())) { __ JumpIfSmi(v0, if_false); - if (!FLAG_harmony_typeof) { - __ LoadRoot(at, Heap::kNullValueRootIndex); - __ Branch(if_true, eq, v0, Operand(at)); - } + __ LoadRoot(at, Heap::kNullValueRootIndex); + __ Branch(if_true, eq, v0, Operand(at)); // Check for JS objects => true. __ GetObjectType(v0, v0, a1); __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); @@ -4686,7 +4589,7 @@ case Token::INSTANCEOF: { VisitForStackValue(expr->right()); - InstanceofStub stub(InstanceofStub::kNoFlags); + InstanceofStub stub(isolate(), InstanceofStub::kNoFlags); __ CallStub(&stub); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); // The stub returns 0 for true. @@ -4770,7 +4673,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { - ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); + DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); __ sw(value, MemOperand(fp, frame_offset)); } @@ -4795,7 +4698,7 @@ // code. Fetch it from the context. __ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX)); } else { - ASSERT(declaration_scope->is_function_scope()); + DCHECK(declaration_scope->is_function_scope()); __ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); } __ push(at); @@ -4806,12 +4709,12 @@ // Non-local control flow support. void FullCodeGenerator::EnterFinallyBlock() { - ASSERT(!result_register().is(a1)); + DCHECK(!result_register().is(a1)); // Store result register while executing finally block. __ push(result_register()); // Cook return address in link register to stack (smi encoded Code* delta). __ Subu(a1, ra, Operand(masm_->CodeObject())); - ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); + DCHECK_EQ(1, kSmiTagSize + kSmiShiftSize); STATIC_ASSERT(0 == kSmiTag); __ Addu(a1, a1, Operand(a1)); // Convert to smi. @@ -4841,7 +4744,7 @@ void FullCodeGenerator::ExitFinallyBlock() { - ASSERT(!result_register().is(a1)); + DCHECK(!result_register().is(a1)); // Restore pending message from stack. __ pop(a1); ExternalReference pending_message_script = @@ -4867,7 +4770,7 @@ // Uncook return address and return. __ pop(result_register()); - ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); + DCHECK_EQ(1, kSmiTagSize + kSmiShiftSize); __ sra(a1, a1, 1); // Un-smi-tag value. __ Addu(at, a1, Operand(masm_->CodeObject())); __ Jump(at); @@ -4955,16 +4858,16 @@ Address branch_address = pc - 6 * kInstrSize; Address pc_immediate_load_address = pc - 4 * kInstrSize; - ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize))); + DCHECK(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize))); if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) { - ASSERT(reinterpret_cast<uint32_t>( + DCHECK(reinterpret_cast<uint32_t>( Assembler::target_address_at(pc_immediate_load_address)) == reinterpret_cast<uint32_t>( isolate->builtins()->InterruptCheck()->entry())); return INTERRUPT; } - ASSERT(Assembler::IsAddImmediate(Assembler::instr_at(branch_address))); + DCHECK(Assembler::IsAddImmediate(Assembler::instr_at(branch_address))); if (reinterpret_cast<uint32_t>( Assembler::target_address_at(pc_immediate_load_address)) == @@ -4973,7 +4876,7 @@ return ON_STACK_REPLACEMENT; } - ASSERT(reinterpret_cast<uint32_t>( + DCHECK(reinterpret_cast<uint32_t>( Assembler::target_address_at(pc_immediate_load_address)) == reinterpret_cast<uint32_t>( isolate->builtins()->OsrAfterStackCheck()->entry())); diff -Nru nodejs-0.11.13/deps/v8/src/mips/ic-mips.cc nodejs-0.11.15/deps/v8/src/mips/ic-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/ic-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/ic-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "codegen.h" -#include "code-stubs.h" -#include "ic-inl.h" -#include "runtime.h" -#include "stub-cache.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -59,47 +36,6 @@ } -// Generated code falls through if the receiver is a regular non-global -// JS object with slow properties and no interceptors. -static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, - Register receiver, - Register elements, - Register scratch0, - Register scratch1, - Label* miss) { - // Register usage: - // receiver: holds the receiver on entry and is unchanged. - // elements: holds the property dictionary on fall through. - // Scratch registers: - // scratch0: used to holds the receiver map. - // scratch1: used to holds the receiver instance type, receiver bit mask - // and elements map. - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - - // Check that the receiver is a valid JS object. - __ GetObjectType(receiver, scratch0, scratch1); - __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE)); - - // If this assert fails, we have to check upper bound too. - STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); - - GenerateGlobalInstanceTypeCheck(masm, scratch1, miss); - - // Check that the global object does not require access checks. - __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset)); - __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) | - (1 << Map::kHasNamedInterceptor))); - __ Branch(miss, ne, scratch1, Operand(zero_reg)); - - __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex); - __ Branch(miss, ne, scratch1, Operand(scratch0)); -} - - // Helper function used from LoadIC GenerateNormal. // // elements: Property dictionary. It is not clobbered if a jump to the miss @@ -236,7 +172,7 @@ // In the case that the object is a value-wrapper object, // we enter the runtime system to make sure that indexing into string // objects work as intended. - ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); + DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE)); } @@ -340,16 +276,17 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a2 : name - // -- ra : return address - // -- a0 : receiver - // ----------------------------------- + // The return address is in lr. + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(receiver.is(a1)); + DCHECK(name.is(a2)); // Probe the stub cache. - Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::LOAD_IC)); masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, a0, a2, a3, t0, t1, t2); + masm, flags, receiver, name, a3, t0, t1, t2); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -357,37 +294,35 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a2 : name - // -- lr : return address - // -- a0 : receiver - // ----------------------------------- - Label miss; + Register dictionary = a0; + DCHECK(!dictionary.is(ReceiverRegister())); + DCHECK(!dictionary.is(NameRegister())); - GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss); + Label slow; - // a1: elements - GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0); + __ lw(dictionary, + FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); + GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), v0, a3, t0); __ Ret(); - // Cache miss: Jump to runtime. - __ bind(&miss); - GenerateMiss(masm); + // Dictionary load failed, go slow (but don't miss). + __ bind(&slow); + GenerateRuntimeGetProperty(masm); } +// A register that isn't one of the parameters to the load ic. +static const Register LoadIC_TempRegister() { return a3; } + + void LoadIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a2 : name - // -- ra : return address - // -- a0 : receiver - // ----------------------------------- + // The return address is in ra. Isolate* isolate = masm->isolate(); __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0); - __ mov(a3, a0); - __ Push(a3, a2); + __ mov(LoadIC_TempRegister(), ReceiverRegister()); + __ Push(LoadIC_TempRegister(), NameRegister()); // Perform tail call to the entry. ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); @@ -396,14 +331,10 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- a2 : name - // -- ra : return address - // -- a0 : receiver - // ----------------------------------- + // The return address is in ra. - __ mov(a3, a0); - __ Push(a3, a2); + __ mov(LoadIC_TempRegister(), ReceiverRegister()); + __ Push(LoadIC_TempRegister(), NameRegister()); __ TailCallRuntime(Runtime::kGetProperty, 2, 1); } @@ -500,56 +431,57 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- + // The return address is in ra. + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(a1)); + DCHECK(key.is(a2)); + Label slow, notin; MemOperand mapped_location = - GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, ¬in, &slow); + GenerateMappedArgumentsLookup( + masm, receiver, key, a0, a3, t0, ¬in, &slow); __ Ret(USE_DELAY_SLOT); __ lw(v0, mapped_location); __ bind(¬in); - // The unmapped lookup expects that the parameter map is in a2. + // The unmapped lookup expects that the parameter map is in a0. MemOperand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow); - __ lw(a2, unmapped_location); + GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow); + __ lw(a0, unmapped_location); __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); - __ Branch(&slow, eq, a2, Operand(a3)); + __ Branch(&slow, eq, a0, Operand(a3)); __ Ret(USE_DELAY_SLOT); - __ mov(v0, a2); + __ mov(v0, a0); __ bind(&slow); GenerateMiss(masm); } void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- lr : return address - // ----------------------------------- + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + Register value = ValueRegister(); + DCHECK(value.is(a0)); + Label slow, notin; // Store address is returned in register (of MemOperand) mapped_location. - MemOperand mapped_location = - GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, ¬in, &slow); - __ sw(a0, mapped_location); - __ mov(t5, a0); - ASSERT_EQ(mapped_location.offset(), 0); + MemOperand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, a3, t0, t1, ¬in, &slow); + __ sw(value, mapped_location); + __ mov(t5, value); + DCHECK_EQ(mapped_location.offset(), 0); __ RecordWrite(a3, mapped_location.rm(), t5, kRAHasNotBeenSaved, kDontSaveFPRegs); __ Ret(USE_DELAY_SLOT); - __ mov(v0, a0); // (In delay slot) return the value stored in v0. + __ mov(v0, value); // (In delay slot) return the value stored in v0. __ bind(¬in); // The unmapped lookup expects that the parameter map is in a3. // Store address is returned in register (of MemOperand) unmapped_location. MemOperand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow); - __ sw(a0, unmapped_location); - __ mov(t5, a0); - ASSERT_EQ(unmapped_location.offset(), 0); + GenerateUnmappedArgumentsLookup(masm, key, a3, t0, &slow); + __ sw(value, unmapped_location); + __ mov(t5, value); + DCHECK_EQ(unmapped_location.offset(), 0); __ RecordWrite(a3, unmapped_location.rm(), t5, kRAHasNotBeenSaved, kDontSaveFPRegs); __ Ret(USE_DELAY_SLOT); @@ -560,16 +492,12 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- + // The return address is in ra. Isolate* isolate = masm->isolate(); __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0); - __ Push(a1, a0); + __ Push(ReceiverRegister(), NameRegister()); // Perform tail call to the entry. ExternalReference ref = @@ -579,30 +507,51 @@ } +// IC register specifications +const Register LoadIC::ReceiverRegister() { return a1; } +const Register LoadIC::NameRegister() { return a2; } + + +const Register LoadIC::SlotRegister() { + DCHECK(FLAG_vector_ics); + return a0; +} + + +const Register LoadIC::VectorRegister() { + DCHECK(FLAG_vector_ics); + return a3; +} + + +const Register StoreIC::ReceiverRegister() { return a1; } +const Register StoreIC::NameRegister() { return a2; } +const Register StoreIC::ValueRegister() { return a0; } + + +const Register KeyedStoreIC::MapRegister() { + return a3; +} + + void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- + // The return address is in ra. - __ Push(a1, a0); + __ Push(ReceiverRegister(), NameRegister()); __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); } void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- + // The return address is in ra. Label slow, check_name, index_smi, index_name, property_array_property; Label probe_dictionary, check_number_dictionary; - Register key = a0; - Register receiver = a1; + Register key = NameRegister(); + Register receiver = ReceiverRegister(); + DCHECK(key.is(a2)); + DCHECK(receiver.is(a1)); Isolate* isolate = masm->isolate(); @@ -613,15 +562,14 @@ // where a numeric string is converted to a smi. GenerateKeyedLoadReceiverCheck( - masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow); + masm, receiver, a0, a3, Map::kHasIndexedInterceptor, &slow); // Check the receiver's map to see if it has fast elements. - __ CheckFastElements(a2, a3, &check_number_dictionary); + __ CheckFastElements(a0, a3, &check_number_dictionary); GenerateFastArrayLoad( - masm, receiver, key, t0, a3, a2, v0, NULL, &slow); - - __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3); + masm, receiver, key, a0, a3, t0, v0, NULL, &slow); + __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3); __ Ret(); __ bind(&check_number_dictionary); @@ -629,42 +577,41 @@ __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset)); // Check whether the elements is a number dictionary. - // a0: key // a3: elements map // t0: elements __ LoadRoot(at, Heap::kHashTableMapRootIndex); __ Branch(&slow, ne, a3, Operand(at)); - __ sra(a2, a0, kSmiTagSize); - __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1); + __ sra(a0, key, kSmiTagSize); + __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1); __ Ret(); - // Slow case, key and receiver still in a0 and a1. + // Slow case, key and receiver still in a2 and a1. __ bind(&slow); __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, - a2, + t0, a3); GenerateRuntimeGetProperty(masm); __ bind(&check_name); - GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow); + GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow); GenerateKeyedLoadReceiverCheck( - masm, receiver, a2, a3, Map::kHasNamedInterceptor, &slow); + masm, receiver, a0, a3, Map::kHasNamedInterceptor, &slow); // If the receiver is a fast-case object, check the keyed lookup // cache. Otherwise probe the dictionary. - __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset)); + __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset)); __ LoadRoot(at, Heap::kHashTableMapRootIndex); __ Branch(&probe_dictionary, eq, t0, Operand(at)); // Load the map of the receiver, compute the keyed lookup cache hash // based on 32 bits of the map pointer and the name hash. - __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset)); - __ sra(a3, a2, KeyedLookupCache::kMapHashShift); - __ lw(t0, FieldMemOperand(a0, Name::kHashFieldOffset)); + __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ sra(a3, a0, KeyedLookupCache::kMapHashShift); + __ lw(t0, FieldMemOperand(key, Name::kHashFieldOffset)); __ sra(at, t0, Name::kHashShift); __ xor_(a3, a3, at); int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; @@ -684,21 +631,19 @@ for (int i = 0; i < kEntriesPerBucket - 1; i++) { Label try_next_entry; __ lw(t1, MemOperand(t0, kPointerSize * i * 2)); - __ Branch(&try_next_entry, ne, a2, Operand(t1)); + __ Branch(&try_next_entry, ne, a0, Operand(t1)); __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1))); - __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1)); + __ Branch(&hit_on_nth_entry[i], eq, key, Operand(t1)); __ bind(&try_next_entry); } __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2)); - __ Branch(&slow, ne, a2, Operand(t1)); - __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1))); __ Branch(&slow, ne, a0, Operand(t1)); + __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1))); + __ Branch(&slow, ne, key, Operand(t1)); // Get field offset. - // a0 : key - // a1 : receiver - // a2 : receiver's map + // a0 : receiver's map // a3 : lookup cache index ExternalReference cache_field_offsets = ExternalReference::keyed_lookup_cache_field_offsets(isolate); @@ -710,7 +655,7 @@ __ sll(at, a3, kPointerSizeLog2); __ addu(at, t0, at); __ lw(t1, MemOperand(at, kPointerSize * i)); - __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset)); + __ lbu(t2, FieldMemOperand(a0, Map::kInObjectPropertiesOffset)); __ Subu(t1, t1, t2); __ Branch(&property_array_property, ge, t1, Operand(zero_reg)); if (i != 0) { @@ -720,28 +665,28 @@ // Load in-object property. __ bind(&load_in_object_property); - __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset)); + __ lbu(t2, FieldMemOperand(a0, Map::kInstanceSizeOffset)); __ addu(t2, t2, t1); // Index from start of object. - __ Subu(a1, a1, Operand(kHeapObjectTag)); // Remove the heap tag. + __ Subu(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. __ sll(at, t2, kPointerSizeLog2); - __ addu(at, a1, at); + __ addu(at, receiver, at); __ lw(v0, MemOperand(at)); __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, - a2, + t0, a3); __ Ret(); // Load property array property. __ bind(&property_array_property); - __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset)); - __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag); - __ sll(t0, t1, kPointerSizeLog2); - __ Addu(t0, t0, a1); - __ lw(v0, MemOperand(t0)); + __ lw(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + __ Addu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag); + __ sll(v0, t1, kPointerSizeLog2); + __ Addu(v0, v0, receiver); + __ lw(v0, MemOperand(v0)); __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, - a2, + t0, a3); __ Ret(); @@ -749,17 +694,15 @@ // Do a quick inline probe of the receiver's dictionary, if it // exists. __ bind(&probe_dictionary); - // a1: receiver - // a0: key // a3: elements - __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset)); - __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset)); - GenerateGlobalInstanceTypeCheck(masm, a2, &slow); + __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); + GenerateGlobalInstanceTypeCheck(masm, a0, &slow); // Load the property to v0. - GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0); + GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0); __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, - a2, + t0, a3); __ Ret(); @@ -771,17 +714,14 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key (index) - // -- a1 : receiver - // ----------------------------------- + // Return address is in ra. Label miss; - Register receiver = a1; - Register index = a0; + Register receiver = ReceiverRegister(); + Register index = NameRegister(); Register scratch = a3; Register result = v0; + DCHECK(!scratch.is(receiver) && !scratch.is(index)); StringCharAtGenerator char_at_generator(receiver, index, @@ -804,20 +744,12 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode) { - // ---------- S t a t e -------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- ra : return address - // ----------------------------------- - // Push receiver, key and value for runtime call. - __ Push(a2, a1, a0); - __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes. + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode. - __ Push(a1, a0); + __ Push(a0); - __ TailCallRuntime(Runtime::kSetProperty, 5, 1); + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); } @@ -918,7 +850,7 @@ // We have to see if the double version of the hole is present. If so // go to the runtime. __ Addu(address, elements, - Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32) + Operand(FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset - kHeapObjectTag)); __ sll(at, key, kPointerSizeLog2); __ addu(address, address, at); @@ -956,10 +888,10 @@ receiver_map, t0, slow); - ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); - ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); + ElementsTransitionGenerator::GenerateSmiToDouble( + masm, receiver, key, value, receiver_map, mode, slow); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); @@ -970,10 +902,9 @@ receiver_map, t0, slow); - ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, - slow); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition( + masm, receiver, key, value, receiver_map, mode, slow); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -986,9 +917,9 @@ receiver_map, t0, slow); - ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); + ElementsTransitionGenerator::GenerateDoubleToObject( + masm, receiver, key, value, receiver_map, mode, slow); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); } @@ -1007,9 +938,10 @@ Label array, extra, check_if_double_array; // Register usage. - Register value = a0; - Register key = a1; - Register receiver = a2; + Register value = ValueRegister(); + Register key = NameRegister(); + Register receiver = ReceiverRegister(); + DCHECK(value.is(a0)); Register receiver_map = a3; Register elements_map = t2; Register elements = t3; // Elements array of the receiver. @@ -1090,34 +1022,37 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- + // Return address is in ra. Label slow; + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + Register scratch1 = a3; + Register scratch2 = t0; + DCHECK(!scratch1.is(receiver) && !scratch1.is(key)); + DCHECK(!scratch2.is(receiver) && !scratch2.is(key)); + // Check that the receiver isn't a smi. - __ JumpIfSmi(a1, &slow); + __ JumpIfSmi(receiver, &slow); // Check that the key is an array index, that is Uint32. - __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask)); + __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask)); __ Branch(&slow, ne, t0, Operand(zero_reg)); // Get the map of the receiver. - __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset)); + __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check that it has indexed interceptor and access checks // are not enabled for this object. - __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset)); - __ And(a3, a3, Operand(kSlowCaseBitFieldMask)); - __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor)); + __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); + __ And(scratch2, scratch2, Operand(kSlowCaseBitFieldMask)); + __ Branch(&slow, ne, scratch2, Operand(1 << Map::kHasIndexedInterceptor)); // Everything is fine, call runtime. - __ Push(a1, a0); // Receiver, key. + __ Push(receiver, key); // Receiver, key. // Perform tail call to the entry. __ TailCallExternalReference(ExternalReference( - IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1); + IC_Utility(kLoadElementWithInterceptor), masm->isolate()), 2, 1); __ bind(&slow); GenerateMiss(masm); @@ -1125,15 +1060,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- ra : return address - // ----------------------------------- - // Push receiver, key and value for runtime call. - __ Push(a2, a1, a0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); @@ -1142,15 +1070,8 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- a0 : value - // -- a2 : key - // -- a1 : receiver - // -- ra : return address - // ----------------------------------- - // Push receiver, key and value for runtime call. - __ Push(a1, a2, a0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. @@ -1161,16 +1082,9 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- ra : return address - // ----------------------------------- - // Push receiver, key and value for runtime call. // We can't use MultiPush as the order of the registers is important. - __ Push(a2, a1, a0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. @@ -1182,17 +1096,17 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(receiver.is(a1)); + DCHECK(name.is(a2)); + DCHECK(ValueRegister().is(a0)); // Get the receiver from the stack and probe the stub cache. - Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC); + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, a1, a2, a3, t0, t1, t2); + masm, flags, receiver, name, a3, t0, t1, t2); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -1200,14 +1114,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - - __ Push(a1, a2, a0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); // Perform tail call to the entry. ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); @@ -1216,17 +1123,18 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- Label miss; + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + Register value = ValueRegister(); + Register dictionary = a3; + DCHECK(receiver.is(a1)); + DCHECK(name.is(a2)); + DCHECK(value.is(a0)); - GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss); + __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1); + GenerateDictionaryStore(masm, &miss, dictionary, name, value, t0, t1); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1); __ Ret(); @@ -1239,21 +1147,13 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - - __ Push(a1, a2, a0); + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); - __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes. __ li(a0, Operand(Smi::FromInt(strict_mode))); - __ Push(a1, a0); + __ Push(a0); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 5, 1); + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); } @@ -1336,19 +1236,19 @@ CodePatcher patcher(patch_address, 2); Register reg = Register::from_code(Assembler::GetRs(instr_at_patch)); if (check == ENABLE_INLINED_SMI_CHECK) { - ASSERT(Assembler::IsAndImmediate(instr_at_patch)); - ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch)); + DCHECK(Assembler::IsAndImmediate(instr_at_patch)); + DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch)); patcher.masm()->andi(at, reg, kSmiTagMask); } else { - ASSERT(check == DISABLE_INLINED_SMI_CHECK); - ASSERT(Assembler::IsAndImmediate(instr_at_patch)); + DCHECK(check == DISABLE_INLINED_SMI_CHECK); + DCHECK(Assembler::IsAndImmediate(instr_at_patch)); patcher.masm()->andi(at, reg, 0); } - ASSERT(Assembler::IsBranch(branch_instr)); + DCHECK(Assembler::IsBranch(branch_instr)); if (Assembler::IsBeq(branch_instr)) { patcher.ChangeBranchCondition(ne); } else { - ASSERT(Assembler::IsBne(branch_instr)); + DCHECK(Assembler::IsBne(branch_instr)); patcher.ChangeBranchCondition(eq); } } diff -Nru nodejs-0.11.13/deps/v8/src/mips/lithium-codegen-mips.cc nodejs-0.11.15/deps/v8/src/mips/lithium-codegen-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/lithium-codegen-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/lithium-codegen-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -25,13 +25,13 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "v8.h" +#include "src/v8.h" -#include "mips/lithium-codegen-mips.h" -#include "mips/lithium-gap-resolver-mips.h" -#include "code-stubs.h" -#include "stub-cache.h" -#include "hydrogen-osr.h" +#include "src/code-stubs.h" +#include "src/hydrogen-osr.h" +#include "src/mips/lithium-codegen-mips.h" +#include "src/mips/lithium-gap-resolver-mips.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -64,7 +64,7 @@ bool LCodeGen::GenerateCode() { LPhase phase("Z_Code generation", chunk()); - ASSERT(is_unused()); + DCHECK(is_unused()); status_ = GENERATING; // Open a frame scope to indicate that there is a frame on the stack. The @@ -81,24 +81,17 @@ void LCodeGen::FinishCode(Handle<Code> code) { - ASSERT(is_done()); + DCHECK(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); PopulateDeoptimizationData(code); - info()->CommitDependencies(code); -} - - -void LChunkBuilder::Abort(BailoutReason reason) { - info()->set_bailout_reason(reason); - status_ = ABORTED; } void LCodeGen::SaveCallerDoubles() { - ASSERT(info()->saves_caller_doubles()); - ASSERT(NeedsEagerFrame()); + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); Comment(";;; Save clobbered callee double registers"); int count = 0; BitVector* doubles = chunk()->allocated_double_registers(); @@ -113,8 +106,8 @@ void LCodeGen::RestoreCallerDoubles() { - ASSERT(info()->saves_caller_doubles()); - ASSERT(NeedsEagerFrame()); + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); Comment(";;; Restore clobbered callee double registers"); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); @@ -129,7 +122,7 @@ bool LCodeGen::GeneratePrologue() { - ASSERT(is_generating()); + DCHECK(is_generating()); if (info()->IsOptimizing()) { ProfileEntryHookStub::MaybeCallEntryHook(masm_); @@ -159,7 +152,7 @@ __ Branch(&ok, ne, a2, Operand(at)); __ lw(a2, GlobalObjectOperand()); - __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset)); + __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); __ sw(a2, MemOperand(sp, receiver_offset)); @@ -169,7 +162,11 @@ info()->set_prologue_offset(masm_->pc_offset()); if (NeedsEagerFrame()) { - __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); + if (info()->IsStub()) { + __ StubPrologue(); + } else { + __ Prologue(info()->IsCodePreAgingActive()); + } frame_is_built_ = true; info_->AddNoFrameRange(0, masm_->pc_offset()); } @@ -201,13 +198,16 @@ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); + bool need_write_barrier = true; // Argument to NewContext is the function, which is in a1. if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; } else { __ push(a1); - __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); } RecordSafepoint(Safepoint::kNoLazyDeopt); // Context is returned in both v0. It replaces the context passed to us. @@ -227,8 +227,15 @@ MemOperand target = ContextOperand(cp, var->index()); __ sw(a0, target); // Update the write barrier. This clobbers a3 and a0. - __ RecordWriteContextSlot( - cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs); + if (need_write_barrier) { + __ RecordWriteContextSlot( + cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(cp, a0, &done); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } } } Comment(";;; End allocate local context"); @@ -254,7 +261,7 @@ // Adjust the frame size, subsuming the unoptimized frame into the // optimized frame. int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); - ASSERT(slots >= 0); + DCHECK(slots >= 0); __ Subu(sp, sp, Operand(slots * kPointerSize)); } @@ -270,7 +277,7 @@ bool LCodeGen::GenerateDeferredCode() { - ASSERT(is_generating()); + DCHECK(is_generating()); if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; @@ -288,8 +295,8 @@ __ bind(code->entry()); if (NeedsDeferredFrame()) { Comment(";;; Build frame"); - ASSERT(!frame_is_built_); - ASSERT(info()->IsStub()); + DCHECK(!frame_is_built_); + DCHECK(info()->IsStub()); frame_is_built_ = true; __ MultiPush(cp.bit() | fp.bit() | ra.bit()); __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); @@ -300,7 +307,7 @@ code->Generate(); if (NeedsDeferredFrame()) { Comment(";;; Destroy frame"); - ASSERT(frame_is_built_); + DCHECK(frame_is_built_); __ pop(at); __ MultiPop(cp.bit() | fp.bit() | ra.bit()); frame_is_built_ = false; @@ -317,45 +324,74 @@ bool LCodeGen::GenerateDeoptJumpTable() { if (deopt_jump_table_.length() > 0) { + Label needs_frame, call_deopt_entry; + Comment(";;; -------------------- Jump table --------------------"); - } - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); - Label table_start; - __ bind(&table_start); - Label needs_frame; - for (int i = 0; i < deopt_jump_table_.length(); i++) { - __ bind(&deopt_jump_table_[i].label); - Address entry = deopt_jump_table_[i].address; - Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; - int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); - if (id == Deoptimizer::kNotDeoptimizationEntry) { - Comment(";;; jump table entry %d.", i); - } else { + Address base = deopt_jump_table_[0].address; + + Register entry_offset = t9; + + int length = deopt_jump_table_.length(); + for (int i = 0; i < length; i++) { + __ bind(&deopt_jump_table_[i].label); + + Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; + DCHECK(type == deopt_jump_table_[0].bailout_type); + Address entry = deopt_jump_table_[i].address; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + DCHECK(id != Deoptimizer::kNotDeoptimizationEntry); Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); - } - __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); - if (deopt_jump_table_[i].needs_frame) { - ASSERT(!info()->saves_caller_doubles()); - if (needs_frame.is_bound()) { - __ Branch(&needs_frame); + + // Second-level deopt table entries are contiguous and small, so instead + // of loading the full, absolute address of each one, load an immediate + // offset which will be added to the base address later. + __ li(entry_offset, Operand(entry - base)); + + if (deopt_jump_table_[i].needs_frame) { + DCHECK(!info()->saves_caller_doubles()); + if (needs_frame.is_bound()) { + __ Branch(&needs_frame); + } else { + __ bind(&needs_frame); + Comment(";;; call deopt with frame"); + __ MultiPush(cp.bit() | fp.bit() | ra.bit()); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + DCHECK(info()->IsStub()); + __ li(at, Operand(Smi::FromInt(StackFrame::STUB))); + __ push(at); + __ Addu(fp, sp, + Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); + __ bind(&call_deopt_entry); + // Add the base address to the offset previously loaded in + // entry_offset. + __ Addu(entry_offset, entry_offset, + Operand(ExternalReference::ForDeoptEntry(base))); + __ Call(entry_offset); + } } else { - __ bind(&needs_frame); - __ MultiPush(cp.bit() | fp.bit() | ra.bit()); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - ASSERT(info()->IsStub()); - __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); - __ push(scratch0()); - __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - __ Call(t9); + // The last entry can fall through into `call_deopt_entry`, avoiding a + // branch. + bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound(); + + if (need_branch) __ Branch(&call_deopt_entry); } - } else { + } + + if (!call_deopt_entry.is_bound()) { + Comment(";;; call deopt"); + __ bind(&call_deopt_entry); + if (info()->saves_caller_doubles()) { - ASSERT(info()->IsStub()); + DCHECK(info()->IsStub()); RestoreCallerDoubles(); } - __ Call(t9); + + // Add the base address to the offset previously loaded in entry_offset. + __ Addu(entry_offset, entry_offset, + Operand(ExternalReference::ForDeoptEntry(base))); + __ Call(entry_offset); } } __ RecordComment("]"); @@ -368,7 +404,7 @@ bool LCodeGen::GenerateSafepointTable() { - ASSERT(is_done()); + DCHECK(is_done()); safepoints_.Emit(masm(), GetStackSlotCount()); return !is_aborted(); } @@ -385,7 +421,7 @@ Register LCodeGen::ToRegister(LOperand* op) const { - ASSERT(op->IsRegister()); + DCHECK(op->IsRegister()); return ToRegister(op->index()); } @@ -399,15 +435,15 @@ Handle<Object> literal = constant->handle(isolate()); Representation r = chunk_->LookupLiteralRepresentation(const_op); if (r.IsInteger32()) { - ASSERT(literal->IsNumber()); + DCHECK(literal->IsNumber()); __ li(scratch, Operand(static_cast<int32_t>(literal->Number()))); } else if (r.IsSmi()) { - ASSERT(constant->HasSmiValue()); + DCHECK(constant->HasSmiValue()); __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value()))); } else if (r.IsDouble()) { Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); } else { - ASSERT(r.IsSmiOrTagged()); + DCHECK(r.IsSmiOrTagged()); __ li(scratch, literal); } return scratch; @@ -421,7 +457,7 @@ DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - ASSERT(op->IsDoubleRegister()); + DCHECK(op->IsDoubleRegister()); return ToDoubleRegister(op->index()); } @@ -437,7 +473,7 @@ Handle<Object> literal = constant->handle(isolate()); Representation r = chunk_->LookupLiteralRepresentation(const_op); if (r.IsInteger32()) { - ASSERT(literal->IsNumber()); + DCHECK(literal->IsNumber()); __ li(at, Operand(static_cast<int32_t>(literal->Number()))); __ mtc1(at, flt_scratch); __ cvt_d_w(dbl_scratch, flt_scratch); @@ -459,7 +495,7 @@ Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); + DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); return constant->handle(isolate()); } @@ -484,7 +520,7 @@ HConstant* constant = chunk_->LookupConstant(op); int32_t value = constant->Integer32Value(); if (r.IsInteger32()) return value; - ASSERT(r.IsSmiOrTagged()); + DCHECK(r.IsSmiOrTagged()); return reinterpret_cast<int32_t>(Smi::FromInt(value)); } @@ -497,7 +533,7 @@ double LCodeGen::ToDouble(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(constant->HasDoubleValue()); + DCHECK(constant->HasDoubleValue()); return constant->DoubleValue(); } @@ -508,15 +544,15 @@ HConstant* constant = chunk()->LookupConstant(const_op); Representation r = chunk_->LookupLiteralRepresentation(const_op); if (r.IsSmi()) { - ASSERT(constant->HasSmiValue()); + DCHECK(constant->HasSmiValue()); return Operand(Smi::FromInt(constant->Integer32Value())); } else if (r.IsInteger32()) { - ASSERT(constant->HasInteger32Value()); + DCHECK(constant->HasInteger32Value()); return Operand(constant->Integer32Value()); } else if (r.IsDouble()) { Abort(kToOperandUnsupportedDoubleImmediate); } - ASSERT(r.IsTagged()); + DCHECK(r.IsTagged()); return Operand(constant->handle(isolate())); } else if (op->IsRegister()) { return Operand(ToRegister(op)); @@ -531,15 +567,15 @@ static int ArgumentsOffsetWithoutFrame(int index) { - ASSERT(index < 0); + DCHECK(index < 0); return -(index + 1) * kPointerSize; } MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - ASSERT(!op->IsRegister()); - ASSERT(!op->IsDoubleRegister()); - ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); + DCHECK(!op->IsRegister()); + DCHECK(!op->IsDoubleRegister()); + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); if (NeedsEagerFrame()) { return MemOperand(fp, StackSlotOffset(op->index())); } else { @@ -551,7 +587,7 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { - ASSERT(op->IsDoubleStackSlot()); + DCHECK(op->IsDoubleStackSlot()); if (NeedsEagerFrame()) { return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); } else { @@ -587,13 +623,13 @@ translation->BeginConstructStubFrame(closure_id, translation_size); break; case JS_GETTER: - ASSERT(translation_size == 1); - ASSERT(height == 0); + DCHECK(translation_size == 1); + DCHECK(height == 0); translation->BeginGetterStubFrame(closure_id); break; case JS_SETTER: - ASSERT(translation_size == 2); - ASSERT(height == 0); + DCHECK(translation_size == 2); + DCHECK(height == 0); translation->BeginSetterStubFrame(closure_id); break; case STUB: @@ -698,7 +734,7 @@ RelocInfo::Mode mode, LInstruction* instr, SafepointMode safepoint_mode) { - ASSERT(instr != NULL); + DCHECK(instr != NULL); __ Call(code, mode); RecordSafepointWithLazyDeopt(instr, safepoint_mode); } @@ -708,7 +744,7 @@ int num_arguments, LInstruction* instr, SaveFPRegsMode save_doubles) { - ASSERT(instr != NULL); + DCHECK(instr != NULL); __ CallRuntime(function, num_arguments, save_doubles); @@ -744,6 +780,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode) { + environment->set_has_been_used(); if (!environment->HasBeenRegistered()) { // Physical stack frame layout: // -x ............. -4 0 ..................................... y @@ -784,9 +821,9 @@ Register src1, const Operand& src2) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - ASSERT(environment->HasBeenRegistered()); + DCHECK(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); - ASSERT(info()->IsOptimizing() || info()->IsStub()); + DCHECK(info()->IsOptimizing() || info()->IsStub()); Address entry = Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { @@ -822,7 +859,7 @@ __ bind(&skip); } - ASSERT(info()->IsStub() || frame_is_built_); + DCHECK(info()->IsStub() || frame_is_built_); // Go through jump table if we need to handle condition, build frame, or // restore caller doubles. if (condition == al && frame_is_built_ && @@ -860,7 +897,7 @@ int length = deoptimizations_.length(); if (length == 0) return; Handle<DeoptimizationInputData> data = - factory()->NewDeoptimizationInputData(length, TENURED); + DeoptimizationInputData::New(isolate(), length, 0, TENURED); Handle<ByteArray> translations = translations_.CreateByteArray(isolate()->factory()); @@ -911,7 +948,7 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { - ASSERT(deoptimization_literals_.length() == 0); + DCHECK(deoptimization_literals_.length() == 0); const ZoneList<Handle<JSFunction> >* inlined_closures = chunk()->inlined_closures(); @@ -931,7 +968,7 @@ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); } else { - ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kLazyDeopt); } @@ -943,7 +980,7 @@ Safepoint::Kind kind, int arguments, Safepoint::DeoptMode deopt_mode) { - ASSERT(expected_safepoint_kind_ == kind); + DCHECK(expected_safepoint_kind_ == kind); const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), @@ -979,15 +1016,6 @@ } -void LCodeGen::RecordSafepointWithRegistersAndDoubles( - LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode deopt_mode) { - RecordSafepoint( - pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode); -} - - void LCodeGen::RecordAndWritePosition(int position) { if (position == RelocInfo::kNoPosition) return; masm()->positions_recorder()->RecordPosition(position); @@ -1041,22 +1069,22 @@ void LCodeGen::DoCallStub(LCallStub* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->result()).is(v0)); switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpExec: { - RegExpExecStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + RegExpExecStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { - SubStringStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + SubStringStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { - StringCompareStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + StringCompareStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } default: @@ -1073,7 +1101,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(dividend.is(ToRegister(instr->result()))); + DCHECK(dividend.is(ToRegister(instr->result()))); // Theoretically, a variation of the branch-free code for integer division by // a power of 2 (calculating the remainder via an additional multiplication @@ -1107,7 +1135,7 @@ Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(!dividend.is(result)); + DCHECK(!dividend.is(result)); if (divisor == 0) { DeoptimizeIf(al, instr->environment()); @@ -1174,8 +1202,8 @@ Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor)))); - ASSERT(!result.is(dividend)); + DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor))); + DCHECK(!result.is(dividend)); // Check for (0 / -x) that will produce negative zero. HDiv* hdiv = instr->hydrogen(); @@ -1218,7 +1246,7 @@ Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(!dividend.is(result)); + DCHECK(!dividend.is(result)); if (divisor == 0) { DeoptimizeIf(al, instr->environment()); @@ -1242,26 +1270,27 @@ } +// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. void LCodeGen::DoDivI(LDivI* instr) { HBinaryOperation* hdiv = instr->hydrogen(); - const Register left = ToRegister(instr->left()); - const Register right = ToRegister(instr->right()); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); const Register result = ToRegister(instr->result()); // On MIPS div is asynchronous - it will run in the background while we // check for special cases. - __ div(left, right); + __ div(dividend, divisor); // Check for x / 0. if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg)); + DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg)); } // Check for (0 / -x) that will produce negative zero. if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { Label left_not_zero; - __ Branch(&left_not_zero, ne, left, Operand(zero_reg)); - DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg)); + __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); + DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg)); __ bind(&left_not_zero); } @@ -1269,23 +1298,12 @@ if (hdiv->CheckFlag(HValue::kCanOverflow) && !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { Label left_not_min_int; - __ Branch(&left_not_min_int, ne, left, Operand(kMinInt)); - DeoptimizeIf(eq, instr->environment(), right, Operand(-1)); + __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); + DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1)); __ bind(&left_not_min_int); } - if (hdiv->IsMathFloorOfDiv()) { - // We performed a truncating division. Correct the result if necessary. - Label done; - Register remainder = scratch0(); - __ mfhi(remainder); - __ mflo(result); - __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); - __ Xor(remainder, remainder, Operand(right)); - __ Branch(&done, ge, remainder, Operand(zero_reg)); - __ Subu(result, result, Operand(1)); - __ bind(&done); - } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { __ mfhi(result); DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg)); __ mflo(result); @@ -1301,7 +1319,7 @@ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); // This is computed in-place. - ASSERT(addend.is(ToDoubleRegister(instr->result()))); + DCHECK(addend.is(ToDoubleRegister(instr->result()))); __ madd_d(addend, addend, multiplier, multiplicand); } @@ -1311,12 +1329,17 @@ Register dividend = ToRegister(instr->dividend()); Register result = ToRegister(instr->result()); int32_t divisor = instr->divisor(); - Register scratch = scratch0(); - ASSERT(!scratch.is(dividend)); + Register scratch = result.is(dividend) ? scratch0() : dividend; + DCHECK(!result.is(dividend) || !scratch.is(dividend)); + + // If the divisor is 1, return the dividend. + if (divisor == 1) { + __ Move(result, dividend); + return; + } // If the divisor is positive, things are easy: There can be no deopts and we // can simply do an arithmetic right shift. - if (divisor == 1) return; uint16_t shift = WhichPowerOf2Abs(divisor); if (divisor > 1) { __ sra(result, dividend, shift); @@ -1324,32 +1347,38 @@ } // If the divisor is negative, we have to negate and handle edge cases. - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - __ Move(scratch, dividend); - } + + // dividend can be the same register as result so save the value of it + // for checking overflow. + __ Move(scratch, dividend); + __ Subu(result, zero_reg, dividend); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg)); } - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - // Note that we could emit branch-free code, but that would need one more - // register. - __ Xor(at, scratch, result); - if (divisor == -1) { - DeoptimizeIf(ge, instr->environment(), at, Operand(zero_reg)); - __ sra(result, dividend, shift); - } else { - Label no_overflow, done; - __ Branch(&no_overflow, lt, at, Operand(zero_reg)); - __ li(result, Operand(kMinInt / divisor)); - __ Branch(&done); - __ bind(&no_overflow); - __ sra(result, dividend, shift); - __ bind(&done); + + // Dividing by -1 is basically negation, unless we overflow. + __ Xor(scratch, scratch, result); + if (divisor == -1) { + if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + DeoptimizeIf(ge, instr->environment(), scratch, Operand(zero_reg)); } - } else { - __ sra(result, dividend, shift); + return; + } + + // If the negation could not overflow, simply shifting is OK. + if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + __ sra(result, result, shift); + return; } + + Label no_overflow, done; + __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); + __ li(result, Operand(kMinInt / divisor)); + __ Branch(&done); + __ bind(&no_overflow); + __ sra(result, result, shift); + __ bind(&done); } @@ -1357,7 +1386,7 @@ Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(!dividend.is(result)); + DCHECK(!dividend.is(result)); if (divisor == 0) { DeoptimizeIf(al, instr->environment()); @@ -1382,7 +1411,7 @@ // In the general case we may need to adjust before and after the truncating // division to get a flooring division. Register temp = ToRegister(instr->temp()); - ASSERT(!temp.is(dividend) && !temp.is(result)); + DCHECK(!temp.is(dividend) && !temp.is(result)); Label needs_adjustment, done; __ Branch(&needs_adjustment, divisor > 0 ? lt : gt, dividend, Operand(zero_reg)); @@ -1398,6 +1427,52 @@ } +// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. +void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); + const Register result = ToRegister(instr->result()); + + // On MIPS div is asynchronous - it will run in the background while we + // check for special cases. + __ div(dividend, divisor); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg)); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label left_not_zero; + __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); + DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg)); + __ bind(&left_not_zero); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow) && + !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + Label left_not_min_int; + __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); + DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1)); + __ bind(&left_not_min_int); + } + + // We performed a truncating division. Correct the result if necessary. + Label done; + Register remainder = scratch0(); + __ mfhi(remainder); + __ mflo(result); + __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); + __ Xor(remainder, remainder, Operand(divisor)); + __ Branch(&done, ge, remainder, Operand(zero_reg)); + __ Subu(result, result, Operand(1)); + __ bind(&done); +} + + void LCodeGen::DoMulI(LMulI* instr) { Register scratch = scratch0(); Register result = ToRegister(instr->result()); @@ -1471,7 +1546,7 @@ } } else { - ASSERT(right_op->IsRegister()); + DCHECK(right_op->IsRegister()); Register right = ToRegister(right_op); if (overflow) { @@ -1515,7 +1590,7 @@ void LCodeGen::DoBitI(LBitI* instr) { LOperand* left_op = instr->left(); LOperand* right_op = instr->right(); - ASSERT(left_op->IsRegister()); + DCHECK(left_op->IsRegister()); Register left = ToRegister(left_op); Register result = ToRegister(instr->result()); Operand right(no_reg); @@ -1523,7 +1598,7 @@ if (right_op->IsStackSlot()) { right = Operand(EmitLoadRegister(right_op, at)); } else { - ASSERT(right_op->IsRegister() || right_op->IsConstantOperand()); + DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); right = ToOperand(right_op); } @@ -1646,7 +1721,7 @@ Register right_reg = EmitLoadRegister(right, at); __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg)); } else { - ASSERT(right->IsRegister() || right->IsConstantOperand()); + DCHECK(right->IsRegister() || right->IsConstantOperand()); __ Subu(ToRegister(result), ToRegister(left), ToOperand(right)); } } else { // can_overflow. @@ -1659,7 +1734,7 @@ right_reg, overflow); // Reg at also used as scratch. } else { - ASSERT(right->IsRegister()); + DCHECK(right->IsRegister()); // Due to overflow check macros not supporting constant operands, // handling the IsConstantOperand case was moved to prev if clause. __ SubuAndCheckForOverflow(ToRegister(result), @@ -1683,7 +1758,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { - ASSERT(instr->result()->IsDoubleRegister()); + DCHECK(instr->result()->IsDoubleRegister()); DoubleRegister result = ToDoubleRegister(instr->result()); double v = instr->value(); __ Move(result, v); @@ -1696,9 +1771,9 @@ void LCodeGen::DoConstantT(LConstantT* instr) { - Handle<Object> value = instr->value(isolate()); + Handle<Object> object = instr->value(isolate()); AllowDeferredHandleDereference smi_check; - __ li(ToRegister(instr->result()), value); + __ li(ToRegister(instr->result()), object); } @@ -1715,10 +1790,10 @@ Register scratch = ToRegister(instr->temp()); Smi* index = instr->index(); Label runtime, done; - ASSERT(object.is(a0)); - ASSERT(result.is(v0)); - ASSERT(!scratch.is(scratch0())); - ASSERT(!scratch.is(object)); + DCHECK(object.is(a0)); + DCHECK(result.is(v0)); + DCHECK(!scratch.is(scratch0())); + DCHECK(!scratch.is(object)); __ SmiTst(object, at); DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); @@ -1759,8 +1834,8 @@ return FieldMemOperand(string, SeqString::kHeaderSize + offset); } Register scratch = scratch0(); - ASSERT(!scratch.is(string)); - ASSERT(!scratch.is(ToRegister(index))); + DCHECK(!scratch.is(string)); + DCHECK(!scratch.is(ToRegister(index))); if (encoding == String::ONE_BYTE_ENCODING) { __ Addu(scratch, string, ToRegister(index)); } else { @@ -1836,7 +1911,7 @@ Register right_reg = EmitLoadRegister(right, at); __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg)); } else { - ASSERT(right->IsRegister() || right->IsConstantOperand()); + DCHECK(right->IsRegister() || right->IsConstantOperand()); __ Addu(ToRegister(result), ToRegister(left), ToOperand(right)); } } else { // can_overflow. @@ -1850,7 +1925,7 @@ right_reg, overflow); // Reg at also used as scratch. } else { - ASSERT(right->IsRegister()); + DCHECK(right->IsRegister()); // Due to overflow check macros not supporting constant operands, // handling the IsConstantOperand case was moved to prev if clause. __ AdduAndCheckForOverflow(ToRegister(result), @@ -1870,22 +1945,21 @@ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; if (instr->hydrogen()->representation().IsSmiOrInteger32()) { Register left_reg = ToRegister(left); - Operand right_op = (right->IsRegister() || right->IsConstantOperand()) - ? ToOperand(right) - : Operand(EmitLoadRegister(right, at)); + Register right_reg = EmitLoadRegister(right, scratch0()); Register result_reg = ToRegister(instr->result()); Label return_right, done; - if (!result_reg.is(left_reg)) { - __ Branch(&return_right, NegateCondition(condition), left_reg, right_op); - __ mov(result_reg, left_reg); - __ Branch(&done); + Register scratch = scratch1(); + __ Slt(scratch, left_reg, Operand(right_reg)); + if (condition == ge) { + __ Movz(result_reg, left_reg, scratch); + __ Movn(result_reg, right_reg, scratch); + } else { + DCHECK(condition == le); + __ Movn(result_reg, left_reg, scratch); + __ Movz(result_reg, right_reg, scratch); } - __ Branch(&done, condition, left_reg, right_op); - __ bind(&return_right); - __ Addu(result_reg, zero_reg, right_op); - __ bind(&done); } else { - ASSERT(instr->hydrogen()->representation().IsDouble()); + DCHECK(instr->hydrogen()->representation().IsDouble()); FPURegister left_reg = ToDoubleRegister(left); FPURegister right_reg = ToDoubleRegister(right); FPURegister result_reg = ToDoubleRegister(instr->result()); @@ -1967,13 +2041,13 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->left()).is(a1)); - ASSERT(ToRegister(instr->right()).is(a0)); - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->left()).is(a1)); + DCHECK(ToRegister(instr->right()).is(a0)); + DCHECK(ToRegister(instr->result()).is(v0)); - BinaryOpICStub stub(instr->op(), NO_OVERWRITE); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); // Other arch use a nop here, to signal that there is no inlined // patchable code. Mips does not need the nop, since our marker // instruction (andi zero_reg) will never be used in normal code. @@ -2057,36 +2131,36 @@ void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32() || r.IsSmi()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); Register reg = ToRegister(instr->value()); EmitBranch(instr, ne, reg, Operand(zero_reg)); } else if (r.IsDouble()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); DoubleRegister reg = ToDoubleRegister(instr->value()); // Test the double value. Zero and NaN are false. EmitBranchF(instr, nue, reg, kDoubleRegZero); } else { - ASSERT(r.IsTagged()); + DCHECK(r.IsTagged()); Register reg = ToRegister(instr->value()); HType type = instr->hydrogen()->value()->type(); if (type.IsBoolean()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ LoadRoot(at, Heap::kTrueValueRootIndex); EmitBranch(instr, eq, reg, Operand(at)); } else if (type.IsSmi()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); EmitBranch(instr, ne, reg, Operand(zero_reg)); } else if (type.IsJSArray()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); EmitBranch(instr, al, zero_reg, Operand(zero_reg)); } else if (type.IsHeapNumber()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); DoubleRegister dbl_scratch = double_scratch0(); __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); // Test the double value. Zero and NaN are false. EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero); } else if (type.IsString()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ lw(at, FieldMemOperand(reg, String::kLengthOffset)); EmitBranch(instr, ne, at, Operand(zero_reg)); } else { @@ -2229,7 +2303,10 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - Condition cond = TokenToCondition(instr->op(), false); + bool is_unsigned = + instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || + instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); + Condition cond = TokenToCondition(instr->op(), is_unsigned); if (left->IsConstantOperand() && right->IsConstantOperand()) { // We can statically evaluate the comparison. @@ -2273,8 +2350,8 @@ cmp_left = ToRegister(right); cmp_right = Operand(value); } - // We transposed the operands. Reverse the condition. - cond = ReverseCondition(cond); + // We commuted the operands, so commute the condition. + cond = CommuteCondition(cond); } else { cmp_left = ToRegister(left); cmp_right = Operand(ToRegister(right)); @@ -2313,7 +2390,7 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { Representation rep = instr->hydrogen()->value()->representation(); - ASSERT(!rep.IsInteger32()); + DCHECK(!rep.IsInteger32()); Register scratch = ToRegister(instr->temp()); if (rep.IsDouble()) { @@ -2395,7 +2472,7 @@ Register temp1 = ToRegister(instr->temp()); SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; Condition true_cond = EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); @@ -2416,7 +2493,7 @@ Register input = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset)); @@ -2447,7 +2524,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -2463,7 +2540,7 @@ InstanceType from = instr->from(); InstanceType to = instr->to(); if (from == FIRST_TYPE) return to; - ASSERT(from == to || to == LAST_TYPE); + DCHECK(from == to || to == LAST_TYPE); return from; } @@ -2483,7 +2560,7 @@ Register scratch = scratch0(); Register input = ToRegister(instr->value()); - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } @@ -2526,9 +2603,9 @@ Register input, Register temp, Register temp2) { - ASSERT(!input.is(temp)); - ASSERT(!input.is(temp2)); - ASSERT(!temp.is(temp2)); + DCHECK(!input.is(temp)); + DCHECK(!input.is(temp2)); + DCHECK(!temp.is(temp2)); __ JumpIfSmi(input, is_false); @@ -2607,15 +2684,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Label true_label, done; - ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0. - ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1. + DCHECK(ToRegister(instr->left()).is(a0)); // Object is in a0. + DCHECK(ToRegister(instr->right()).is(a1)); // Function is in a1. Register result = ToRegister(instr->result()); - ASSERT(result.is(v0)); + DCHECK(result.is(v0)); - InstanceofStub stub(InstanceofStub::kArgsInRegisters); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ Branch(&true_label, eq, result, Operand(zero_reg)); __ li(result, Operand(factory()->false_value())); @@ -2651,8 +2728,8 @@ Register temp = ToRegister(instr->temp()); Register result = ToRegister(instr->result()); - ASSERT(object.is(a0)); - ASSERT(result.is(v0)); + DCHECK(object.is(a0)); + DCHECK(result.is(v0)); // A Smi is not instance of anything. __ JumpIfSmi(object, &false_result); @@ -2706,7 +2783,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check) { Register result = ToRegister(instr->result()); - ASSERT(result.is(v0)); + DCHECK(result.is(v0)); InstanceofStub::Flags flags = InstanceofStub::kNoFlags; flags = static_cast<InstanceofStub::Flags>( @@ -2715,16 +2792,16 @@ flags | InstanceofStub::kCallSiteInlineCheck); flags = static_cast<InstanceofStub::Flags>( flags | InstanceofStub::kReturnTrueFalseObject); - InstanceofStub stub(flags); + InstanceofStub stub(isolate(), flags); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); LoadContextFromDeferred(instr->context()); // Get the temp register reserved by the instruction. This needs to be t0 as // its slot of the pushing of safepoint registers is used to communicate the // offset to the location of the map check. Register temp = ToRegister(instr->temp()); - ASSERT(temp.is(t0)); + DCHECK(temp.is(t0)); __ li(InstanceofStub::right(), instr->function()); static const int kAdditionalDelta = 7; int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; @@ -2735,7 +2812,7 @@ __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE); __ StoreToSafepointRegisterSlot(temp, temp); } - CallCodeGeneric(stub.GetCode(isolate()), + CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); @@ -2748,7 +2825,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -2763,7 +2840,7 @@ __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); __ bind(&check); __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); - ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check)); + DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check)); __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); __ bind(&done); } @@ -2822,11 +2899,20 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->global_object()).is(a0)); - ASSERT(ToRegister(instr->result()).is(v0)); - - __ li(a2, Operand(instr->name())); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(v0)); + + __ li(LoadIC::NameRegister(), Operand(instr->name())); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ li(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(a0)); + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(instr->hydrogen()->slot()))); + } ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); CallCode(ic, RelocInfo::CODE_TARGET, instr); @@ -2901,7 +2987,7 @@ __ sw(value, target); if (instr->hydrogen()->NeedsWriteBarrier()) { SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; __ RecordWriteContextSlot(context, target.offset(), @@ -2946,12 +3032,21 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->object()).is(a0)); - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(v0)); // Name is always in a2. - __ li(a2, Operand(instr->name())); + __ li(LoadIC::NameRegister(), Operand(instr->name())); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ li(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(a0)); + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(instr->hydrogen()->slot()))); + } Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -2962,17 +3057,6 @@ Register function = ToRegister(instr->function()); Register result = ToRegister(instr->result()); - // Check that the function really is a function. Load map into the - // result register. - __ GetObjectType(function, result, scratch); - DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE)); - - // Make sure that the function has an instance prototype. - Label non_instance; - __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); - __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); - __ Branch(&non_instance, ne, scratch, Operand(zero_reg)); - // Get the prototype or initial map from the function. __ lw(result, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); @@ -2988,12 +3072,6 @@ // Get the prototype from the initial map. __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); - __ Branch(&done); - - // Non-instance prototype: Fetch prototype from constructor field - // in initial map. - __ bind(&non_instance); - __ lw(result, FieldMemOperand(result, Map::kConstructorOffset)); // All done. __ bind(&done); @@ -3068,16 +3146,13 @@ int element_size_shift = ElementsKindToShiftSize(elements_kind); int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) ? (element_size_shift - kSmiTagSize) : element_size_shift; - int additional_offset = IsFixedTypedArrayElementsKind(elements_kind) - ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag - : 0; + int base_offset = instr->base_offset(); if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || elements_kind == FLOAT32_ELEMENTS || elements_kind == EXTERNAL_FLOAT64_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - int base_offset = - (instr->additional_index() << element_size_shift) + additional_offset; + int base_offset = instr->base_offset(); FPURegister result = ToDoubleRegister(instr->result()); if (key_is_constant) { __ Addu(scratch0(), external_pointer, constant_key << element_size_shift); @@ -3089,15 +3164,14 @@ elements_kind == FLOAT32_ELEMENTS) { __ lwc1(result, MemOperand(scratch0(), base_offset)); __ cvt_d_s(result, result); - } else { // loading doubles, not floats. + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS __ ldc1(result, MemOperand(scratch0(), base_offset)); } } else { Register result = ToRegister(instr->result()); MemOperand mem_operand = PrepareKeyedOperand( key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, - instr->additional_index(), additional_offset); + element_size_shift, shift_size, base_offset); switch (elements_kind) { case EXTERNAL_INT8_ELEMENTS: case INT8_ELEMENTS: @@ -3157,15 +3231,13 @@ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int base_offset = - FixedDoubleArray::kHeaderSize - kHeapObjectTag + - (instr->additional_index() << element_size_shift); + int base_offset = instr->base_offset(); if (key_is_constant) { int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xF0000000) { Abort(kArrayIndexConstantValueTooBig); } - base_offset += constant_key << element_size_shift; + base_offset += constant_key * kDoubleSize; } __ Addu(scratch, elements, Operand(base_offset)); @@ -3180,7 +3252,7 @@ __ ldc1(result, MemOperand(scratch)); if (instr->hydrogen()->RequiresHoleCheck()) { - __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); + __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset)); DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); } } @@ -3191,12 +3263,11 @@ Register result = ToRegister(instr->result()); Register scratch = scratch0(); Register store_base = scratch; - int offset = 0; + int offset = instr->base_offset(); if (instr->key()->IsConstantOperand()) { LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); + offset += ToInteger32(const_operand) * kPointerSize; store_base = elements; } else { Register key = ToRegister(instr->key()); @@ -3211,9 +3282,8 @@ __ sll(scratch, key, kPointerSizeLog2); __ addu(scratch, elements, scratch); } - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); } - __ lw(result, FieldMemOperand(store_base, offset)); + __ lw(result, MemOperand(store_base, offset)); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { @@ -3245,39 +3315,18 @@ int constant_key, int element_size, int shift_size, - int additional_index, - int additional_offset) { - int base_offset = (additional_index << element_size) + additional_offset; + int base_offset) { if (key_is_constant) { - return MemOperand(base, - base_offset + (constant_key << element_size)); - } - - if (additional_offset != 0) { - if (shift_size >= 0) { - __ sll(scratch0(), key, shift_size); - __ Addu(scratch0(), scratch0(), Operand(base_offset)); - } else { - ASSERT_EQ(-1, shift_size); - __ srl(scratch0(), key, 1); - __ Addu(scratch0(), scratch0(), Operand(base_offset)); - } - __ Addu(scratch0(), base, scratch0()); - return MemOperand(scratch0()); - } - - if (additional_index != 0) { - additional_index *= 1 << (element_size - shift_size); - __ Addu(scratch0(), key, Operand(additional_index)); + return MemOperand(base, (constant_key << element_size) + base_offset); } - if (additional_index == 0) { + if (base_offset == 0) { if (shift_size >= 0) { __ sll(scratch0(), key, shift_size); __ Addu(scratch0(), base, scratch0()); return MemOperand(scratch0()); } else { - ASSERT_EQ(-1, shift_size); + DCHECK_EQ(-1, shift_size); __ srl(scratch0(), key, 1); __ Addu(scratch0(), base, scratch0()); return MemOperand(scratch0()); @@ -3285,22 +3334,32 @@ } if (shift_size >= 0) { - __ sll(scratch0(), scratch0(), shift_size); + __ sll(scratch0(), key, shift_size); __ Addu(scratch0(), base, scratch0()); - return MemOperand(scratch0()); + return MemOperand(scratch0(), base_offset); } else { - ASSERT_EQ(-1, shift_size); - __ srl(scratch0(), scratch0(), 1); + DCHECK_EQ(-1, shift_size); + __ sra(scratch0(), key, 1); __ Addu(scratch0(), base, scratch0()); - return MemOperand(scratch0()); + return MemOperand(scratch0(), base_offset); } } void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->object()).is(a1)); - ASSERT(ToRegister(instr->key()).is(a0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister())); + + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ li(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(a0)); + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(instr->hydrogen()->slot()))); + } Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallCode(ic, RelocInfo::CODE_TARGET, instr); @@ -3397,7 +3456,7 @@ __ lw(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); __ lw(result, - FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset)); + FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); if (result.is(receiver)) { __ bind(&result_in_receiver); @@ -3417,9 +3476,9 @@ Register length = ToRegister(instr->length()); Register elements = ToRegister(instr->elements()); Register scratch = scratch0(); - ASSERT(receiver.is(a0)); // Used for parameter count. - ASSERT(function.is(a1)); // Required by InvokeFunction. - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(receiver.is(a0)); // Used for parameter count. + DCHECK(function.is(a1)); // Required by InvokeFunction. + DCHECK(ToRegister(instr->result()).is(v0)); // Copy the arguments to this function possibly from the // adaptor frame below it. @@ -3448,7 +3507,7 @@ __ sll(scratch, length, 2); __ bind(&invoke); - ASSERT(instr->HasPointerMap()); + DCHECK(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator safepoint_generator( this, pointers, Safepoint::kLazyDeopt); @@ -3488,18 +3547,18 @@ __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); } else { // If there is no frame, the context must be in cp. - ASSERT(result.is(cp)); + DCHECK(result.is(cp)); } } void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); __ li(scratch0(), instr->hydrogen()->pairs()); __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); // The context is the first argument. __ Push(cp, scratch0(), scratch1()); - CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); } @@ -3545,8 +3604,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { - ASSERT(instr->context() != NULL); - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(instr->context() != NULL); + DCHECK(ToRegister(instr->context()).is(cp)); Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -3569,7 +3628,7 @@ // Input is negative. Reverse its sign. // Preserve the value of all registers. { - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); // Registers were saved at the safepoint, so we can use // many scratch registers. @@ -3588,7 +3647,7 @@ // Slow case: Call the runtime system to do the number allocation. __ bind(&slow); - CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr, + CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, instr->context()); // Set the pointer to the new heap number in tmp. if (!tmp1.is(v0)) @@ -3765,6 +3824,14 @@ } +void LCodeGen::DoMathFround(LMathFround* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + DoubleRegister result = ToDoubleRegister(instr->result()); + __ cvt_s_d(result.low(), input); + __ cvt_d_s(result, result.low()); +} + + void LCodeGen::DoMathSqrt(LMathSqrt* instr) { DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister result = ToDoubleRegister(instr->result()); @@ -3777,7 +3844,7 @@ DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister temp = ToDoubleRegister(instr->temp()); - ASSERT(!input.is(result)); + DCHECK(!input.is(result)); // Note that according to ECMA-262 15.8.2.13: // Math.pow(-Infinity, 0.5) == Infinity @@ -3800,15 +3867,15 @@ Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. - ASSERT(!instr->right()->IsDoubleRegister() || + DCHECK(!instr->right()->IsDoubleRegister() || ToDoubleRegister(instr->right()).is(f4)); - ASSERT(!instr->right()->IsRegister() || + DCHECK(!instr->right()->IsRegister() || ToRegister(instr->right()).is(a2)); - ASSERT(ToDoubleRegister(instr->left()).is(f2)); - ASSERT(ToDoubleRegister(instr->result()).is(f0)); + DCHECK(ToDoubleRegister(instr->left()).is(f2)); + DCHECK(ToDoubleRegister(instr->result()).is(f0)); if (exponent_type.IsSmi()) { - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsTagged()) { Label no_deopt; @@ -3817,14 +3884,14 @@ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); DeoptimizeIf(ne, instr->environment(), t3, Operand(at)); __ bind(&no_deopt); - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsInteger32()) { - MathPowStub stub(MathPowStub::INTEGER); + MathPowStub stub(isolate(), MathPowStub::INTEGER); __ CallStub(&stub); } else { - ASSERT(exponent_type.IsDouble()); - MathPowStub stub(MathPowStub::DOUBLE); + DCHECK(exponent_type.IsDouble()); + MathPowStub stub(isolate(), MathPowStub::DOUBLE); __ CallStub(&stub); } } @@ -3861,9 +3928,9 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->function()).is(a1)); - ASSERT(instr->HasPointerMap()); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->function()).is(a1)); + DCHECK(instr->HasPointerMap()); Handle<JSFunction> known_function = instr->hydrogen()->known_function(); if (known_function.is_null()) { @@ -3882,7 +3949,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(ToRegister(instr->result()).is(v0)); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); @@ -3893,7 +3960,7 @@ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); __ Call(code, RelocInfo::CODE_TARGET); } else { - ASSERT(instr->target()->IsRegister()); + DCHECK(instr->target()->IsRegister()); Register target = ToRegister(instr->target()); generator.BeforeCall(__ CallSize(target)); __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); @@ -3904,8 +3971,8 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { - ASSERT(ToRegister(instr->function()).is(a1)); - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(ToRegister(instr->function()).is(a1)); + DCHECK(ToRegister(instr->result()).is(v0)); if (instr->hydrogen()->pass_argument_count()) { __ li(a0, Operand(instr->arity())); @@ -3923,33 +3990,33 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->function()).is(a1)); - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->function()).is(a1)); + DCHECK(ToRegister(instr->result()).is(v0)); int arity = instr->arity(); - CallFunctionStub stub(arity, instr->hydrogen()->function_flags()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoCallNew(LCallNew* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->constructor()).is(a1)); - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->constructor()).is(a1)); + DCHECK(ToRegister(instr->result()).is(v0)); __ li(a0, Operand(instr->arity())); // No cell in a2 for construct type feedback in optimized code __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->constructor()).is(a1)); - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->constructor()).is(a1)); + DCHECK(ToRegister(instr->result()).is(v0)); __ li(a0, Operand(instr->arity())); __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); @@ -3960,8 +4027,8 @@ : DONT_OVERRIDE; if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } else if (instr->arity() == 1) { Label done; if (IsFastPackedElementsKind(kind)) { @@ -3972,18 +4039,20 @@ __ Branch(&packed_case, eq, t1, Operand(zero_reg)); ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(holey_kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), + holey_kind, + override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ jmp(&done); __ bind(&packed_case); } - ArraySingleArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ bind(&done); } else { - ArrayNArgumentsConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } } @@ -4031,46 +4100,33 @@ return; } - Handle<Map> transition = instr->transition(); - SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + __ AssertNotSmi(object); - ASSERT(!(representation.IsSmi() && - instr->value()->IsConstantOperand() && - !IsSmi(LConstantOperand::cast(instr->value())))); - if (representation.IsHeapObject()) { - Register value = ToRegister(instr->value()); - if (!instr->hydrogen()->value()->type().IsHeapObject()) { - __ SmiTst(value, scratch); - DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg)); - - // We know that value is a smi now, so we can omit the check below. - check_needed = OMIT_SMI_CHECK; - } - } else if (representation.IsDouble()) { - ASSERT(transition.is_null()); - ASSERT(access.IsInobject()); - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + DCHECK(!representation.IsSmi() || + !instr->value()->IsConstantOperand() || + IsSmi(LConstantOperand::cast(instr->value()))); + if (representation.IsDouble()) { + DCHECK(access.IsInobject()); + DCHECK(!instr->hydrogen()->has_transition()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); DoubleRegister value = ToDoubleRegister(instr->value()); __ sdc1(value, FieldMemOperand(object, offset)); return; } - if (!transition.is_null()) { + if (instr->hydrogen()->has_transition()) { + Handle<Map> transition = instr->hydrogen()->transition_map(); + AddDeprecationDependency(transition); __ li(scratch, Operand(transition)); __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); if (instr->hydrogen()->NeedsWriteBarrierForMap()) { Register temp = ToRegister(instr->temp()); // Update the write barrier for the map field. - __ RecordWriteField(object, - HeapObject::kMapOffset, - scratch, - temp, - GetRAState(), - kSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWriteForMap(object, + scratch, + temp, + GetRAState(), + kSaveFPRegs); } } @@ -4088,7 +4144,8 @@ GetRAState(), kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + instr->hydrogen()->SmiCheckForWriteBarrier(), + instr->hydrogen()->PointersToHereCheckForValue()); } } else { __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); @@ -4104,60 +4161,43 @@ GetRAState(), kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + instr->hydrogen()->SmiCheckForWriteBarrier(), + instr->hydrogen()->PointersToHereCheckForValue()); } } } void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->object()).is(a1)); - ASSERT(ToRegister(instr->value()).is(a0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister())); - // Name is always in a2. - __ li(a2, Operand(instr->name())); + __ li(StoreIC::NameRegister(), Operand(instr->name())); Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); CallCode(ic, RelocInfo::CODE_TARGET, instr); } -void LCodeGen::ApplyCheckIf(Condition condition, - LBoundsCheck* check, - Register src1, - const Operand& src2) { - if (FLAG_debug_code && check->hydrogen()->skip_check()) { +void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { + Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; + Operand operand(0); + Register reg; + if (instr->index()->IsConstantOperand()) { + operand = ToOperand(instr->index()); + reg = ToRegister(instr->length()); + cc = CommuteCondition(cc); + } else { + reg = ToRegister(instr->index()); + operand = ToOperand(instr->length()); + } + if (FLAG_debug_code && instr->hydrogen()->skip_check()) { Label done; - __ Branch(&done, NegateCondition(condition), src1, src2); + __ Branch(&done, NegateCondition(cc), reg, operand); __ stop("eliminated bounds check failed"); __ bind(&done); } else { - DeoptimizeIf(condition, check->environment(), src1, src2); - } -} - - -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - if (instr->hydrogen()->skip_check()) return; - - Condition condition = instr->hydrogen()->allow_equality() ? hi : hs; - if (instr->index()->IsConstantOperand()) { - int constant_index = - ToInteger32(LConstantOperand::cast(instr->index())); - if (instr->hydrogen()->length()->representation().IsSmi()) { - __ li(at, Operand(Smi::FromInt(constant_index))); - } else { - __ li(at, Operand(constant_index)); - } - ApplyCheckIf(condition, - instr, - at, - Operand(ToRegister(instr->length()))); - } else { - ApplyCheckIf(condition, - instr, - ToRegister(instr->index()), - Operand(ToRegister(instr->length()))); + DeoptimizeIf(cc, instr->environment(), reg, operand); } } @@ -4179,16 +4219,12 @@ int element_size_shift = ElementsKindToShiftSize(elements_kind); int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) ? (element_size_shift - kSmiTagSize) : element_size_shift; - int additional_offset = IsFixedTypedArrayElementsKind(elements_kind) - ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag - : 0; + int base_offset = instr->base_offset(); if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || elements_kind == FLOAT32_ELEMENTS || elements_kind == EXTERNAL_FLOAT64_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { - int base_offset = - (instr->additional_index() << element_size_shift) + additional_offset; Register address = scratch0(); FPURegister value(ToDoubleRegister(instr->value())); if (key_is_constant) { @@ -4215,7 +4251,7 @@ MemOperand mem_operand = PrepareKeyedOperand( key, external_pointer, key_is_constant, constant_key, element_size_shift, shift_size, - instr->additional_index(), additional_offset); + base_offset); switch (elements_kind) { case EXTERNAL_UINT8_CLAMPED_ELEMENTS: case EXTERNAL_INT8_ELEMENTS: @@ -4262,6 +4298,7 @@ Register scratch = scratch0(); DoubleRegister double_scratch = double_scratch0(); bool key_is_constant = instr->key()->IsConstantOperand(); + int base_offset = instr->base_offset(); Label not_nan, done; // Calculate the effective address of the slot in the array to store the @@ -4273,13 +4310,11 @@ Abort(kArrayIndexConstantValueTooBig); } __ Addu(scratch, elements, - Operand((constant_key << element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + Operand((constant_key << element_size_shift) + base_offset)); } else { int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) ? (element_size_shift - kSmiTagSize) : element_size_shift; - __ Addu(scratch, elements, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + __ Addu(scratch, elements, Operand(base_offset)); __ sll(at, ToRegister(instr->key()), shift_size); __ Addu(scratch, scratch, at); } @@ -4294,14 +4329,12 @@ __ bind(&is_nan); __ LoadRoot(at, Heap::kNanValueRootIndex); __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset)); - __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() << - element_size_shift)); + __ sdc1(double_scratch, MemOperand(scratch, 0)); __ Branch(&done); } __ bind(¬_nan); - __ sdc1(value, MemOperand(scratch, instr->additional_index() << - element_size_shift)); + __ sdc1(value, MemOperand(scratch, 0)); __ bind(&done); } @@ -4313,14 +4346,13 @@ : no_reg; Register scratch = scratch0(); Register store_base = scratch; - int offset = 0; + int offset = instr->base_offset(); // Do the store. if (instr->key()->IsConstantOperand()) { - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); + offset += ToInteger32(const_operand) * kPointerSize; store_base = elements; } else { // Even though the HLoadKeyed instruction forces the input @@ -4334,23 +4366,23 @@ __ sll(scratch, key, kPointerSizeLog2); __ addu(scratch, elements, scratch); } - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); } - __ sw(value, FieldMemOperand(store_base, offset)); + __ sw(value, MemOperand(store_base, offset)); if (instr->hydrogen()->NeedsWriteBarrier()) { SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. - __ Addu(key, store_base, Operand(offset - kHeapObjectTag)); + __ Addu(key, store_base, Operand(offset)); __ RecordWrite(elements, key, value, GetRAState(), kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + check_needed, + instr->hydrogen()->PointersToHereCheckForValue()); } } @@ -4368,10 +4400,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->object()).is(a2)); - ASSERT(ToRegister(instr->key()).is(a1)); - ASSERT(ToRegister(instr->value()).is(a0)); + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); + DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); Handle<Code> ic = (instr->strict_mode() == STRICT) ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() @@ -4398,19 +4430,21 @@ __ li(new_map_reg, Operand(to_map)); __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); // Write barrier. - __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, - scratch, GetRAState(), kDontSaveFPRegs); - } else { - ASSERT(ToRegister(instr->context()).is(cp)); - PushSafepointRegistersScope scope( - this, Safepoint::kWithRegistersAndDoubles); - __ mov(a0, object_reg); + __ RecordWriteForMap(object_reg, + new_map_reg, + scratch, + GetRAState(), + kDontSaveFPRegs); + } else { + DCHECK(object_reg.is(a0)); + DCHECK(ToRegister(instr->context()).is(cp)); + PushSafepointRegistersScope scope(this); __ li(a1, Operand(to_map)); bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; - TransitionElementsKindStub stub(from_kind, to_kind, is_js_array); + TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); __ CallStub(&stub); - RecordSafepointWithRegistersAndDoubles( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kLazyDeopt); } __ bind(¬_applicable); } @@ -4428,12 +4462,13 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); - ASSERT(ToRegister(instr->left()).is(a1)); - ASSERT(ToRegister(instr->right()).is(a0)); - StringAddStub stub(instr->hydrogen()->flags(), + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->left()).is(a1)); + DCHECK(ToRegister(instr->right()).is(a0)); + StringAddStub stub(isolate(), + instr->hydrogen()->flags(), instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -4471,7 +4506,7 @@ // contained in the register pointer map. __ mov(result, zero_reg); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); __ push(string); // Push the index as a smi. This is safe because of the checks in // DoStringCharCodeAt above. @@ -4484,7 +4519,7 @@ __ SmiTag(index); __ push(index); } - CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr, + CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, instr->context()); __ AssertSmi(v0); __ SmiUntag(v0); @@ -4508,11 +4543,11 @@ DeferredStringCharFromCode* deferred = new(zone()) DeferredStringCharFromCode(this, instr); - ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); + DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); Register char_code = ToRegister(instr->char_code()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); - ASSERT(!char_code.is(result)); + DCHECK(!char_code.is(result)); __ Branch(deferred->entry(), hi, char_code, Operand(String::kMaxOneByteCharCode)); @@ -4535,7 +4570,7 @@ // contained in the register pointer map. __ mov(result, zero_reg); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); __ SmiTag(char_code); __ push(char_code); CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); @@ -4545,9 +4580,9 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { LOperand* input = instr->value(); - ASSERT(input->IsRegister() || input->IsStackSlot()); + DCHECK(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); - ASSERT(output->IsDoubleRegister()); + DCHECK(output->IsDoubleRegister()); FPURegister single_scratch = double_scratch0().low(); if (input->IsStackSlot()) { Register scratch = scratch0(); @@ -4668,15 +4703,15 @@ __ mov(dst, zero_reg); // Preserve the value of all registers. - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); // NumberTagI and NumberTagD use the context from the frame, rather than // the environment's HContext or HInlinedContext value. - // They only call Runtime::kHiddenAllocateHeapNumber. + // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ Subu(v0, v0, kHeapObjectTag); @@ -4734,14 +4769,14 @@ Register reg = ToRegister(instr->result()); __ mov(reg, zero_reg); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); // NumberTagI and NumberTagD use the context from the frame, rather than // the environment's HContext or HInlinedContext value. - // They only call Runtime::kHiddenAllocateHeapNumber. + // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ Subu(v0, v0, kHeapObjectTag); @@ -4823,7 +4858,7 @@ } } else { __ SmiUntag(scratch, input_reg); - ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); + DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); } // Smi to double register conversion __ bind(&load_smi); @@ -4841,8 +4876,8 @@ DoubleRegister double_scratch = double_scratch0(); DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); - ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); - ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); + DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); + DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); Label done; @@ -4868,7 +4903,7 @@ __ bind(&no_heap_number); __ LoadRoot(at, Heap::kUndefinedValueRootIndex); __ Branch(&check_bools, ne, input_reg, Operand(at)); - ASSERT(ToRegister(instr->result()).is(input_reg)); + DCHECK(ToRegister(instr->result()).is(input_reg)); __ Branch(USE_DELAY_SLOT, &done); __ mov(input_reg, zero_reg); // In delay slot. @@ -4929,8 +4964,8 @@ }; LOperand* input = instr->value(); - ASSERT(input->IsRegister()); - ASSERT(input->Equals(instr->result())); + DCHECK(input->IsRegister()); + DCHECK(input->Equals(instr->result())); Register input_reg = ToRegister(input); @@ -4951,9 +4986,9 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { LOperand* input = instr->value(); - ASSERT(input->IsRegister()); + DCHECK(input->IsRegister()); LOperand* result = instr->result(); - ASSERT(result->IsDoubleRegister()); + DCHECK(result->IsDoubleRegister()); Register input_reg = ToRegister(input); DoubleRegister result_reg = ToDoubleRegister(result); @@ -5046,7 +5081,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { LOperand* input = instr->value(); __ SmiTst(ToRegister(input), at); DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); @@ -5081,7 +5116,7 @@ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); if (IsPowerOf2(mask)) { - ASSERT(tag == 0 || IsPowerOf2(tag)); + DCHECK(tag == 0 || IsPowerOf2(tag)); __ And(at, scratch, mask); DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(), at, Operand(zero_reg)); @@ -5113,7 +5148,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { { - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); __ push(object); __ mov(cp, zero_reg); __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); @@ -5144,28 +5179,35 @@ Register object_; }; - if (instr->hydrogen()->CanOmitMapChecks()) return; + if (instr->hydrogen()->IsStabilityCheck()) { + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + for (int i = 0; i < maps->size(); ++i) { + AddStabilityDependency(maps->at(i).handle()); + } + return; + } + Register map_reg = scratch0(); LOperand* input = instr->value(); - ASSERT(input->IsRegister()); + DCHECK(input->IsRegister()); Register reg = ToRegister(input); __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->has_migration_target()) { + if (instr->hydrogen()->HasMigrationTarget()) { deferred = new(zone()) DeferredCheckMaps(this, instr, reg); __ bind(deferred->check_maps()); } - UniqueSet<Map> map_set = instr->hydrogen()->map_set(); + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); Label success; - for (int i = 0; i < map_set.size() - 1; i++) { - Handle<Map> map = map_set.at(i).handle(); + for (int i = 0; i < maps->size() - 1; i++) { + Handle<Map> map = maps->at(i).handle(); __ CompareMapAndBranch(map_reg, map, &success, eq, &success); } - Handle<Map> map = map_set.at(map_set.size() - 1).handle(); + Handle<Map> map = maps->at(maps->size() - 1).handle(); // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). - if (instr->hydrogen()->has_migration_target()) { + if (instr->hydrogen()->HasMigrationTarget()) { __ Branch(deferred->entry(), ne, map_reg, Operand(map)); } else { DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map)); @@ -5270,11 +5312,11 @@ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); } if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); } if (instr->size()->IsConstantOperand()) { @@ -5286,33 +5328,26 @@ } } else { Register size = ToRegister(instr->size()); - __ Allocate(size, - result, - scratch, - scratch2, - deferred->entry(), - flags); + __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); } __ bind(deferred->exit()); if (instr->hydrogen()->MustPrefillWithFiller()) { + STATIC_ASSERT(kHeapObjectTag == 1); if (instr->size()->IsConstantOperand()) { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ li(scratch, Operand(size)); + __ li(scratch, Operand(size - kHeapObjectTag)); } else { - scratch = ToRegister(instr->size()); + __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); } - __ Subu(scratch, scratch, Operand(kPointerSize)); - __ Subu(result, result, Operand(kHeapObjectTag)); + __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); Label loop; __ bind(&loop); - __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); + __ Subu(scratch, scratch, Operand(kPointerSize)); __ Addu(at, result, Operand(scratch)); __ sw(scratch2, MemOperand(at)); - __ Subu(scratch, scratch, Operand(kPointerSize)); __ Branch(&loop, ge, scratch, Operand(zero_reg)); - __ Addu(result, result, Operand(kHeapObjectTag)); } } @@ -5325,25 +5360,31 @@ // contained in the register pointer map. __ mov(result, zero_reg); - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); if (instr->size()->IsRegister()) { Register size = ToRegister(instr->size()); - ASSERT(!size.is(result)); + DCHECK(!size.is(result)); __ SmiTag(size); __ push(size); } else { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ Push(Smi::FromInt(size)); + if (size >= 0 && size <= Smi::kMaxValue) { + __ Push(Smi::FromInt(size)); + } else { + // We should never get here at runtime => abort + __ stop("invalid allocation size"); + return; + } } int flags = AllocateDoubleAlignFlag::encode( instr->hydrogen()->MustAllocateDoubleAligned()); if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); } else { flags = AllocateTargetSpace::update(flags, NEW_SPACE); @@ -5351,21 +5392,21 @@ __ Push(Smi::FromInt(flags)); CallRuntimeFromDeferred( - Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context()); + Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); __ StoreToSafepointRegisterSlot(v0, result); } void LCodeGen::DoToFastProperties(LToFastProperties* instr) { - ASSERT(ToRegister(instr->value()).is(a0)); - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(ToRegister(instr->value()).is(a0)); + DCHECK(ToRegister(instr->result()).is(v0)); __ push(a0); CallRuntime(Runtime::kToFastProperties, 1, instr); } void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); Label materialized; // Registers will be used as follows: // t3 = literals array. @@ -5385,7 +5426,7 @@ __ li(t1, Operand(instr->hydrogen()->pattern())); __ li(t0, Operand(instr->hydrogen()->flags())); __ Push(t3, t2, t1, t0); - CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr); + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); __ mov(a1, v0); __ bind(&materialized); @@ -5398,7 +5439,7 @@ __ bind(&runtime_allocate); __ li(a0, Operand(Smi::FromInt(size))); __ Push(a1, a0); - CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr); + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); __ pop(a1); __ bind(&allocated); @@ -5418,27 +5459,28 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->context()).is(cp)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. bool pretenure = instr->hydrogen()->pretenure(); if (!pretenure && instr->hydrogen()->has_no_literals()) { - FastNewClosureStub stub(instr->hydrogen()->strict_mode(), + FastNewClosureStub stub(isolate(), + instr->hydrogen()->strict_mode(), instr->hydrogen()->is_generator()); __ li(a2, Operand(instr->hydrogen()->shared_info())); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } else { __ li(a2, Operand(instr->hydrogen()->shared_info())); __ li(a1, Operand(pretenure ? factory()->true_value() : factory()->false_value())); __ Push(cp, a2, a1); - CallRuntime(Runtime::kHiddenNewClosure, 3, instr); + CallRuntime(Runtime::kNewClosure, 3, instr); } } void LCodeGen::DoTypeof(LTypeof* instr) { - ASSERT(ToRegister(instr->result()).is(v0)); + DCHECK(ToRegister(instr->result()).is(v0)); Register input = ToRegister(instr->value()); __ push(input); CallRuntime(Runtime::kTypeof, 1, instr); @@ -5455,11 +5497,11 @@ instr->FalseLabel(chunk_), input, instr->type_literal(), - cmp1, - cmp2); + &cmp1, + &cmp2); - ASSERT(cmp1.is_valid()); - ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid()); + DCHECK(cmp1.is_valid()); + DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid()); if (final_branch_condition != kNoCondition) { EmitBranch(instr, final_branch_condition, cmp1, cmp2); @@ -5471,22 +5513,23 @@ Label* false_label, Register input, Handle<String> type_name, - Register& cmp1, - Operand& cmp2) { + Register* cmp1, + Operand* cmp2) { // This function utilizes the delay slot heavily. This is used to load // values that are always usable without depending on the type of the input // register. Condition final_branch_condition = kNoCondition; Register scratch = scratch0(); - if (type_name->Equals(heap()->number_string())) { + Factory* factory = isolate()->factory(); + if (String::Equals(type_name, factory->number_string())) { __ JumpIfSmi(input, true_label); __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - cmp1 = input; - cmp2 = Operand(at); + *cmp1 = input; + *cmp2 = Operand(at); final_branch_condition = eq; - } else if (type_name->Equals(heap()->string_string())) { + } else if (String::Equals(type_name, factory->string_string())) { __ JumpIfSmi(input, false_label); __ GetObjectType(input, input, scratch); __ Branch(USE_DELAY_SLOT, false_label, @@ -5495,32 +5538,26 @@ // other branch. __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); __ And(at, at, 1 << Map::kIsUndetectable); - cmp1 = at; - cmp2 = Operand(zero_reg); + *cmp1 = at; + *cmp2 = Operand(zero_reg); final_branch_condition = eq; - } else if (type_name->Equals(heap()->symbol_string())) { + } else if (String::Equals(type_name, factory->symbol_string())) { __ JumpIfSmi(input, false_label); __ GetObjectType(input, input, scratch); - cmp1 = scratch; - cmp2 = Operand(SYMBOL_TYPE); + *cmp1 = scratch; + *cmp2 = Operand(SYMBOL_TYPE); final_branch_condition = eq; - } else if (type_name->Equals(heap()->boolean_string())) { + } else if (String::Equals(type_name, factory->boolean_string())) { __ LoadRoot(at, Heap::kTrueValueRootIndex); __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); __ LoadRoot(at, Heap::kFalseValueRootIndex); - cmp1 = at; - cmp2 = Operand(input); - final_branch_condition = eq; - - } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { - __ LoadRoot(at, Heap::kNullValueRootIndex); - cmp1 = at; - cmp2 = Operand(input); + *cmp1 = at; + *cmp2 = Operand(input); final_branch_condition = eq; - } else if (type_name->Equals(heap()->undefined_string())) { + } else if (String::Equals(type_name, factory->undefined_string())) { __ LoadRoot(at, Heap::kUndefinedValueRootIndex); __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); // The first instruction of JumpIfSmi is an And - it is safe in the delay @@ -5530,25 +5567,23 @@ __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); __ And(at, at, 1 << Map::kIsUndetectable); - cmp1 = at; - cmp2 = Operand(zero_reg); + *cmp1 = at; + *cmp2 = Operand(zero_reg); final_branch_condition = ne; - } else if (type_name->Equals(heap()->function_string())) { + } else if (String::Equals(type_name, factory->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); __ GetObjectType(input, scratch, input); __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE)); - cmp1 = input; - cmp2 = Operand(JS_FUNCTION_PROXY_TYPE); + *cmp1 = input; + *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE); final_branch_condition = eq; - } else if (type_name->Equals(heap()->object_string())) { + } else if (String::Equals(type_name, factory->object_string())) { __ JumpIfSmi(input, false_label); - if (!FLAG_harmony_typeof) { - __ LoadRoot(at, Heap::kNullValueRootIndex); - __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); - } + __ LoadRoot(at, Heap::kNullValueRootIndex); + __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); Register map = input; __ GetObjectType(input, map, scratch); __ Branch(false_label, @@ -5559,13 +5594,13 @@ // Check for undetectable objects => false. __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); __ And(at, at, 1 << Map::kIsUndetectable); - cmp1 = at; - cmp2 = Operand(zero_reg); + *cmp1 = at; + *cmp2 = Operand(zero_reg); final_branch_condition = eq; } else { - cmp1 = at; - cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion. + *cmp1 = at; + *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion. __ Branch(false_label); } @@ -5584,7 +5619,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { - ASSERT(!temp1.is(temp2)); + DCHECK(!temp1.is(temp2)); // Get the frame pointer for the calling frame. __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); @@ -5608,7 +5643,7 @@ int current_pc = masm()->pc_offset(); if (current_pc < last_lazy_deopt_pc_ + space_needed) { int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; - ASSERT_EQ(0, padding_size % Assembler::kInstrSize); + DCHECK_EQ(0, padding_size % Assembler::kInstrSize); while (padding_size > 0) { __ nop(); padding_size -= Assembler::kInstrSize; @@ -5621,7 +5656,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) { last_lazy_deopt_pc_ = masm()->pc_offset(); - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); @@ -5654,12 +5689,12 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + PushSafepointRegistersScope scope(this); LoadContextFromDeferred(instr->context()); - __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard); + __ CallRuntimeSaveDoubles(Runtime::kStackGuard); RecordSafepointWithLazyDeopt( instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); } @@ -5678,7 +5713,7 @@ LStackCheck* instr_; }; - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); // There is no LLazyBailout instruction for stack-checks. We have to // prepare for lazy deoptimization explicitly here. @@ -5687,14 +5722,14 @@ Label done; __ LoadRoot(at, Heap::kStackLimitRootIndex); __ Branch(&done, hs, sp, Operand(at)); - ASSERT(instr->context()->IsRegister()); - ASSERT(ToRegister(instr->context()).is(cp)); + DCHECK(instr->context()->IsRegister()); + DCHECK(ToRegister(instr->context()).is(cp)); CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, instr); __ bind(&done); } else { - ASSERT(instr->hydrogen()->is_backwards_branch()); + DCHECK(instr->hydrogen()->is_backwards_branch()); // Perform stack overflow check if this goto needs it before jumping. DeferredStackCheck* deferred_stack_check = new(zone()) DeferredStackCheck(this, instr); @@ -5719,7 +5754,7 @@ // If the environment were already registered, we would have no way of // backpatching it with the spill slot operands. - ASSERT(!environment->HasBeenRegistered()); + DCHECK(!environment->HasBeenRegistered()); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); GenerateOsrPrologue(); @@ -5744,7 +5779,7 @@ DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE)); Label use_cache, call_runtime; - ASSERT(object.is(a0)); + DCHECK(object.is(a0)); __ CheckEnumCache(null_value, &call_runtime); __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset)); @@ -5756,7 +5791,7 @@ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); - ASSERT(result.is(v0)); + DCHECK(result.is(v0)); __ LoadRoot(at, Heap::kMetaMapRootIndex); DeoptimizeIf(ne, instr->environment(), a1, Operand(at)); __ bind(&use_cache); @@ -5792,13 +5827,60 @@ } +void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register result, + Register object, + Register index) { + PushSafepointRegistersScope scope(this); + __ Push(object, index); + __ mov(cp, zero_reg); + __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(v0, result); +} + + void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { + public: + DeferredLoadMutableDouble(LCodeGen* codegen, + LLoadFieldByIndex* instr, + Register result, + Register object, + Register index) + : LDeferredCode(codegen), + instr_(instr), + result_(result), + object_(object), + index_(index) { + } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LLoadFieldByIndex* instr_; + Register result_; + Register object_; + Register index_; + }; + Register object = ToRegister(instr->object()); Register index = ToRegister(instr->index()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); + DeferredLoadMutableDouble* deferred; + deferred = new(zone()) DeferredLoadMutableDouble( + this, instr, result, object, index); + Label out_of_object, done; + + __ And(scratch, index, Operand(Smi::FromInt(1))); + __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg)); + __ sra(index, index, 1); + __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg)); __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot. @@ -5814,10 +5896,26 @@ __ Subu(scratch, result, scratch); __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); + __ bind(deferred->exit()); __ bind(&done); } +void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { + Register context = ToRegister(instr->context()); + __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); +} + + +void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { + Handle<ScopeInfo> scope_info = instr->scope_info(); + __ li(at, scope_info); + __ Push(at, ToRegister(instr->function())); + CallRuntime(Runtime::kPushBlockContext, 2, instr); + RecordSafepoint(Safepoint::kNoLazyDeopt); +} + + #undef __ } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/mips/lithium-codegen-mips.h nodejs-0.11.15/deps/v8/src/mips/lithium-codegen-mips.h --- nodejs-0.11.13/deps/v8/src/mips/lithium-codegen-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/lithium-codegen-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_ #define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_ -#include "deoptimizer.h" -#include "mips/lithium-gap-resolver-mips.h" -#include "mips/lithium-mips.h" -#include "lithium-codegen.h" -#include "safepoint-table.h" -#include "scopes.h" -#include "v8utils.h" +#include "src/deoptimizer.h" +#include "src/lithium-codegen.h" +#include "src/mips/lithium-gap-resolver-mips.h" +#include "src/mips/lithium-mips.h" +#include "src/safepoint-table.h" +#include "src/scopes.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -140,6 +117,10 @@ Label* map_check); void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); + void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register result, + Register object, + Register index); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -151,8 +132,7 @@ int constant_key, int element_size, int shift_size, - int additional_index, - int additional_offset); + int base_offset); // Emit frame translation commands for an environment. void WriteTranslation(LEnvironment* environment, Translation* translation); @@ -182,8 +162,6 @@ int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - void Abort(BailoutReason reason); - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } void SaveCallerDoubles(); @@ -259,10 +237,6 @@ LEnvironment* environment, Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg)); - void ApplyCheckIf(Condition condition, - LBoundsCheck* check, - Register src1 = zero_reg, - const Operand& src2 = Operand(zero_reg)); void AddToTranslation(LEnvironment* environment, Translation* translation, @@ -295,9 +269,6 @@ void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, Safepoint::DeoptMode mode); - void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers, - int arguments, - Safepoint::DeoptMode mode); void RecordAndWritePosition(int position) V8_OVERRIDE; @@ -342,8 +313,8 @@ Label* false_label, Register input, Handle<String> type_name, - Register& cmp1, - Operand& cmp2); + Register* cmp1, + Operand* cmp2); // Emits optimized code for %_IsObject(x). Preserves input register. // Returns the condition on which a final split to @@ -412,52 +383,24 @@ Safepoint::Kind expected_safepoint_kind_; - class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { + class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { public: - PushSafepointRegistersScope(LCodeGen* codegen, - Safepoint::Kind kind) + explicit PushSafepointRegistersScope(LCodeGen* codegen) : codegen_(codegen) { - ASSERT(codegen_->info()->is_calling()); - ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); - codegen_->expected_safepoint_kind_ = kind; - - switch (codegen_->expected_safepoint_kind_) { - case Safepoint::kWithRegisters: { - StoreRegistersStateStub stub1(kDontSaveFPRegs); - codegen_->masm_->push(ra); - codegen_->masm_->CallStub(&stub1); - break; - } - case Safepoint::kWithRegistersAndDoubles: { - StoreRegistersStateStub stub2(kSaveFPRegs); - codegen_->masm_->push(ra); - codegen_->masm_->CallStub(&stub2); - break; - } - default: - UNREACHABLE(); - } + DCHECK(codegen_->info()->is_calling()); + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); + codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; + + StoreRegistersStateStub stub(codegen_->isolate()); + codegen_->masm_->push(ra); + codegen_->masm_->CallStub(&stub); } ~PushSafepointRegistersScope() { - Safepoint::Kind kind = codegen_->expected_safepoint_kind_; - ASSERT((kind & Safepoint::kWithRegisters) != 0); - switch (kind) { - case Safepoint::kWithRegisters: { - RestoreRegistersStateStub stub1(kDontSaveFPRegs); - codegen_->masm_->push(ra); - codegen_->masm_->CallStub(&stub1); - break; - } - case Safepoint::kWithRegistersAndDoubles: { - RestoreRegistersStateStub stub2(kSaveFPRegs); - codegen_->masm_->push(ra); - codegen_->masm_->CallStub(&stub2); - break; - } - default: - UNREACHABLE(); - } + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); + RestoreRegistersStateStub stub(codegen_->isolate()); + codegen_->masm_->push(ra); + codegen_->masm_->CallStub(&stub); codegen_->expected_safepoint_kind_ = Safepoint::kSimple; } diff -Nru nodejs-0.11.13/deps/v8/src/mips/lithium-gap-resolver-mips.cc nodejs-0.11.15/deps/v8/src/mips/lithium-gap-resolver-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/lithium-gap-resolver-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/lithium-gap-resolver-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "mips/lithium-gap-resolver-mips.h" -#include "mips/lithium-codegen-mips.h" +#include "src/mips/lithium-codegen-mips.h" +#include "src/mips/lithium-gap-resolver-mips.h" namespace v8 { namespace internal { @@ -42,7 +19,7 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) { - ASSERT(moves_.is_empty()); + DCHECK(moves_.is_empty()); // Build up a worklist of moves. BuildInitialMoveList(parallel_move); @@ -63,7 +40,7 @@ // Perform the moves with constant sources. for (int i = 0; i < moves_.length(); ++i) { if (!moves_[i].IsEliminated()) { - ASSERT(moves_[i].source()->IsConstantOperand()); + DCHECK(moves_[i].source()->IsConstantOperand()); EmitMove(i); } } @@ -101,13 +78,13 @@ // An additional complication is that moves to MemOperands with large // offsets (more than 1K or 4K) require us to spill this spilled value to // the stack, to free up the register. - ASSERT(!moves_[index].IsPending()); - ASSERT(!moves_[index].IsRedundant()); + DCHECK(!moves_[index].IsPending()); + DCHECK(!moves_[index].IsRedundant()); // Clear this move's destination to indicate a pending move. The actual // destination is saved in a stack allocated local. Multiple moves can // be pending because this function is recursive. - ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. + DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. LOperand* destination = moves_[index].destination(); moves_[index].set_destination(NULL); @@ -134,7 +111,7 @@ // a scratch register to break it. LMoveOperands other_move = moves_[root_index_]; if (other_move.Blocks(destination)) { - ASSERT(other_move.IsPending()); + DCHECK(other_move.IsPending()); BreakCycle(index); return; } @@ -145,12 +122,12 @@ void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_ASSERTS +#ifdef ENABLE_SLOW_DCHECKS // No operand should be the destination for more than one move. for (int i = 0; i < moves_.length(); ++i) { LOperand* destination = moves_[i].destination(); for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_ASSERT(!destination->Equals(moves_[j].destination())); + SLOW_DCHECK(!destination->Equals(moves_[j].destination())); } } #endif @@ -162,8 +139,8 @@ // We save in a register the value that should end up in the source of // moves_[root_index]. After performing all moves in the tree rooted // in that move, we save the value to that source. - ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); - ASSERT(!in_cycle_); + DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); + DCHECK(!in_cycle_); in_cycle_ = true; LOperand* source = moves_[index].source(); saved_destination_ = moves_[index].destination(); @@ -184,8 +161,8 @@ void LGapResolver::RestoreValue() { - ASSERT(in_cycle_); - ASSERT(saved_destination_ != NULL); + DCHECK(in_cycle_); + DCHECK(saved_destination_ != NULL); // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble. if (saved_destination_->IsRegister()) { @@ -219,7 +196,7 @@ if (destination->IsRegister()) { __ mov(cgen_->ToRegister(destination), source_register); } else { - ASSERT(destination->IsStackSlot()); + DCHECK(destination->IsStackSlot()); __ sw(source_register, cgen_->ToMemOperand(destination)); } } else if (source->IsStackSlot()) { @@ -227,7 +204,7 @@ if (destination->IsRegister()) { __ lw(cgen_->ToRegister(destination), source_operand); } else { - ASSERT(destination->IsStackSlot()); + DCHECK(destination->IsStackSlot()); MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { if (!destination_operand.OffsetIsInt16Encodable()) { @@ -263,8 +240,8 @@ double v = cgen_->ToDouble(constant_source); __ Move(result, v); } else { - ASSERT(destination->IsStackSlot()); - ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. + DCHECK(destination->IsStackSlot()); + DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. Representation r = cgen_->IsSmi(constant_source) ? Representation::Smi() : Representation::Integer32(); if (cgen_->IsInteger32(constant_source)) { @@ -281,7 +258,7 @@ if (destination->IsDoubleRegister()) { __ mov_d(cgen_->ToDoubleRegister(destination), source_register); } else { - ASSERT(destination->IsDoubleStackSlot()); + DCHECK(destination->IsDoubleStackSlot()); MemOperand destination_operand = cgen_->ToMemOperand(destination); __ sdc1(source_register, destination_operand); } @@ -291,7 +268,7 @@ if (destination->IsDoubleRegister()) { __ ldc1(cgen_->ToDoubleRegister(destination), source_operand); } else { - ASSERT(destination->IsDoubleStackSlot()); + DCHECK(destination->IsDoubleStackSlot()); MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { // kLithiumScratchDouble was used to break the cycle, diff -Nru nodejs-0.11.13/deps/v8/src/mips/lithium-gap-resolver-mips.h nodejs-0.11.15/deps/v8/src/mips/lithium-gap-resolver-mips.h --- nodejs-0.11.13/deps/v8/src/mips/lithium-gap-resolver-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/lithium-gap-resolver-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_ #define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_ -#include "v8.h" +#include "src/v8.h" -#include "lithium.h" +#include "src/lithium.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/mips/lithium-mips.cc nodejs-0.11.15/deps/v8/src/mips/lithium-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/lithium-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/lithium-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "lithium-allocator-inl.h" -#include "mips/lithium-mips.h" -#include "mips/lithium-codegen-mips.h" -#include "hydrogen-osr.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS + +#include "src/hydrogen-osr.h" +#include "src/lithium-inl.h" +#include "src/mips/lithium-codegen-mips.h" namespace v8 { namespace internal { @@ -48,17 +26,17 @@ // outputs because all registers are blocked by the calling convention. // Inputs operands must use a fixed register or use-at-start policy or // a non-register policy. - ASSERT(Output() == NULL || + DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() || !LUnallocated::cast(Output())->HasRegisterPolicy()); for (UseIterator it(this); !it.Done(); it.Advance()) { LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() || + DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart()); } for (TempIterator it(this); !it.Done(); it.Advance()) { LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); + DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); } } #endif @@ -345,8 +323,9 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); - hydrogen()->access().PrintTo(stream); - stream->Add(" <- "); + OStringStream os; + os << hydrogen()->access() << " <- "; + stream->Add(os.c_str()); value()->PrintTo(stream); } @@ -365,7 +344,7 @@ stream->Add("["); key()->PrintTo(stream); if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", additional_index()); + stream->Add(" + %d]", base_offset()); } else { stream->Add("]"); } @@ -377,13 +356,13 @@ stream->Add("["); key()->PrintTo(stream); if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", additional_index()); + stream->Add(" + %d] <-", base_offset()); } else { stream->Add("] <- "); } if (value() == NULL) { - ASSERT(hydrogen()->IsConstantHoleStore() && + DCHECK(hydrogen()->IsConstantHoleStore() && hydrogen()->value()->representation().IsDouble()); stream->Add("<the hole(nan)>"); } else { @@ -419,14 +398,14 @@ if (kind == DOUBLE_REGISTERS) { return LDoubleStackSlot::Create(index, zone()); } else { - ASSERT(kind == GENERAL_REGISTERS); + DCHECK(kind == GENERAL_REGISTERS); return LStackSlot::Create(index, zone()); } } LPlatformChunk* LChunkBuilder::Build() { - ASSERT(is_unused()); + DCHECK(is_unused()); chunk_ = new(zone()) LPlatformChunk(info(), graph()); LPhase phase("L_Building chunk", chunk_); status_ = BUILDING; @@ -451,7 +430,7 @@ } -void LCodeGen::Abort(BailoutReason reason) { +void LChunkBuilder::Abort(BailoutReason reason) { info()->set_bailout_reason(reason); status_ = ABORTED; } @@ -628,6 +607,8 @@ !hinstr->HasObservableSideEffects(); if (needs_environment && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); + // We can't really figure out if the environment is needed or not. + instr->environment()->set_has_been_used(); } return instr; @@ -635,7 +616,7 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - ASSERT(!instr->HasPointerMap()); + DCHECK(!instr->HasPointerMap()); instr->set_pointer_map(new(zone()) LPointerMap(zone())); return instr; } @@ -654,16 +635,29 @@ } +LUnallocated* LChunkBuilder::TempDoubleRegister() { + LUnallocated* operand = + new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER); + int vreg = allocator_->GetVirtualRegister(); + if (!allocator_->AllocationOk()) { + Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); + vreg = 0; + } + operand->set_virtual_register(vreg); + return operand; +} + + LOperand* LChunkBuilder::FixedTemp(Register reg) { LUnallocated* operand = ToUnallocated(reg); - ASSERT(operand->HasFixedPolicy()); + DCHECK(operand->HasFixedPolicy()); return operand; } LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { LUnallocated* operand = ToUnallocated(reg); - ASSERT(operand->HasFixedPolicy()); + DCHECK(operand->HasFixedPolicy()); return operand; } @@ -692,8 +686,8 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->left()); HValue* right_value = instr->right(); @@ -734,9 +728,9 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); if (op == Token::MOD) { LOperand* left = UseFixedDouble(instr->left(), f2); LOperand* right = UseFixedDouble(instr->right(), f4); @@ -758,8 +752,8 @@ HBinaryOperation* instr) { HValue* left = instr->left(); HValue* right = instr->right(); - ASSERT(left->representation().IsTagged()); - ASSERT(right->representation().IsTagged()); + DCHECK(left->representation().IsTagged()); + DCHECK(right->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), cp); LOperand* left_operand = UseFixed(left, a1); LOperand* right_operand = UseFixed(right, a0); @@ -770,7 +764,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - ASSERT(is_building()); + DCHECK(is_building()); current_block_ = block; next_block_ = next_block; if (block->IsStartBlock()) { @@ -779,13 +773,13 @@ } else if (block->predecessors()->length() == 1) { // We have a single predecessor => copy environment and outgoing // argument count from the predecessor. - ASSERT(block->phis()->length() == 0); + DCHECK(block->phis()->length() == 0); HBasicBlock* pred = block->predecessors()->at(0); HEnvironment* last_environment = pred->last_environment(); - ASSERT(last_environment != NULL); + DCHECK(last_environment != NULL); // Only copy the environment, if it is later used again. if (pred->end()->SecondSuccessor() == NULL) { - ASSERT(pred->end()->FirstSuccessor() == block); + DCHECK(pred->end()->FirstSuccessor() == block); } else { if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || pred->end()->SecondSuccessor()->block_id() > block->block_id()) { @@ -793,7 +787,7 @@ } } block->UpdateEnvironment(last_environment); - ASSERT(pred->argument_count() >= 0); + DCHECK(pred->argument_count() >= 0); argument_count_ = pred->argument_count(); } else { // We are at a state join => process phis. @@ -845,7 +839,7 @@ if (current->OperandCount() == 0) { instr = DefineAsRegister(new(zone()) LDummy()); } else { - ASSERT(!current->OperandAt(0)->IsControlInstruction()); + DCHECK(!current->OperandAt(0)->IsControlInstruction()); instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(current->OperandAt(0)))); } @@ -857,75 +851,90 @@ chunk_->AddInstruction(dummy, current_block_); } } else { - instr = current->CompileToLithium(this); + HBasicBlock* successor; + if (current->IsControlInstruction() && + HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && + successor != NULL) { + instr = new(zone()) LGoto(successor); + } else { + instr = current->CompileToLithium(this); + } } argument_count_ += current->argument_delta(); - ASSERT(argument_count_ >= 0); + DCHECK(argument_count_ >= 0); if (instr != NULL) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(current); + AddInstruction(instr, current); + } + + current_instruction_ = old_current; +} + + +void LChunkBuilder::AddInstruction(LInstruction* instr, + HInstruction* hydrogen_val) { +// Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(hydrogen_val); #if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - ASSERT(fixed == 0 || used_at_start == 0); + // Make sure that the lithium instruction has either no fixed register + // constraints in temps or the result OR no uses that are only used at + // start. If this invariant doesn't hold, the register allocator can decide + // to insert a split of a range immediately before the instruction due to an + // already allocated register needing to be used for the instruction's fixed + // register constraint. In this case, The register allocator won't see an + // interference between the split child and the use-at-start (it would if + // the it was just a plain use), so it is free to move the split child into + // the same register that is used for the use-at-start. + // See https://code.google.com/p/chromium/issues/detail?id=201590 + if (!(instr->ClobbersRegisters() && + instr->ClobbersDoubleRegisters(isolate()))) { + int fixed = 0; + int used_at_start = 0; + for (UseIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->IsUsedAtStart()) ++used_at_start; + } + if (instr->Output() != NULL) { + if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; + } + for (TempIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->HasFixedPolicy()) ++fixed; } + DCHECK(fixed == 0 || used_at_start == 0); + } #endif - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); + if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { + instr = AssignPointerMap(instr); + } + if (FLAG_stress_environments && !instr->HasEnvironment()) { + instr = AssignEnvironment(instr); + } + chunk_->AddInstruction(instr, current_block_); - if (instr->IsCall()) { - HValue* hydrogen_value_for_lazy_bailout = current; - LInstruction* instruction_needing_environment = NULL; - if (current->HasObservableSideEffects()) { - HSimulate* sim = HSimulate::cast(current->next()); - instruction_needing_environment = instr; - sim->ReplayEnvironment(current_block_->last_environment()); - hydrogen_value_for_lazy_bailout = sim; - } - LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); - bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); - chunk_->AddInstruction(bailout, current_block_); - if (instruction_needing_environment != NULL) { - // Store the lazy deopt environment with the instruction if needed. - // Right now it is only used for LInstanceOfKnownGlobal. - instruction_needing_environment-> - SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); - } + if (instr->IsCall()) { + HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; + LInstruction* instruction_needing_environment = NULL; + if (hydrogen_val->HasObservableSideEffects()) { + HSimulate* sim = HSimulate::cast(hydrogen_val->next()); + instruction_needing_environment = instr; + sim->ReplayEnvironment(current_block_->last_environment()); + hydrogen_value_for_lazy_bailout = sim; + } + LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); + bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); + chunk_->AddInstruction(bailout, current_block_); + if (instruction_needing_environment != NULL) { + // Store the lazy deopt environment with the instruction if needed. + // Right now it is only used for LInstanceOfKnownGlobal. + instruction_needing_environment-> + SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); } } - current_instruction_ = old_current; } @@ -935,30 +944,26 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - HValue* value = instr->value(); - LBranch* result = new(zone()) LBranch(UseRegister(value)); - // Tagged values that are not known smis or booleans require a - // deoptimization environment. If the instruction is generic no - // environment is needed since all cases are handled. - Representation rep = value->representation(); + Representation r = value->representation(); HType type = value->type(); ToBooleanStub::Types expected = instr->expected_input_types(); - if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() && - !expected.IsGeneric()) { - return AssignEnvironment(result); + if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); + + bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || + type.IsJSArray() || type.IsHeapNumber() || type.IsString(); + LInstruction* branch = new(zone()) LBranch(UseRegister(value)); + if (!easy_case && + ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) || + !expected.IsGeneric())) { + branch = AssignEnvironment(branch); } - return result; + return branch; } LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp = TempRegister(); return new(zone()) LCmpMapAndBranch(value, temp); @@ -1019,9 +1024,13 @@ } -LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { - LOperand* argument = Use(instr->argument()); - return new(zone()) LPushArgument(argument); +LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { + int argc = instr->OperandCount(); + for (int i = 0; i < argc; ++i) { + LOperand* argument = Use(instr->argument(i)); + AddInstruction(new(zone()) LPushArgument(argument), instr); + } + return NULL; } @@ -1078,7 +1087,7 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor( HCallWithDescriptor* instr) { - const CallInterfaceDescriptor* descriptor = instr->descriptor(); + const InterfaceDescriptor* descriptor = instr->descriptor(); LOperand* target = UseRegisterOrConstantAtStart(instr->target()); ZoneList<LOperand*> ops(instr->OperandCount(), zone()); @@ -1105,14 +1114,24 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { switch (instr->op()) { - case kMathFloor: return DoMathFloor(instr); - case kMathRound: return DoMathRound(instr); - case kMathAbs: return DoMathAbs(instr); - case kMathLog: return DoMathLog(instr); - case kMathExp: return DoMathExp(instr); - case kMathSqrt: return DoMathSqrt(instr); - case kMathPowHalf: return DoMathPowHalf(instr); - case kMathClz32: return DoMathClz32(instr); + case kMathFloor: + return DoMathFloor(instr); + case kMathRound: + return DoMathRound(instr); + case kMathFround: + return DoMathFround(instr); + case kMathAbs: + return DoMathAbs(instr); + case kMathLog: + return DoMathLog(instr); + case kMathExp: + return DoMathExp(instr); + case kMathSqrt: + return DoMathSqrt(instr); + case kMathPowHalf: + return DoMathPowHalf(instr); + case kMathClz32: + return DoMathClz32(instr); default: UNREACHABLE(); return NULL; @@ -1121,8 +1140,8 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseFixedDouble(instr->value(), f4); return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr); } @@ -1136,12 +1155,12 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseRegister(instr->value()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll. + LOperand* double_temp = TempDoubleRegister(); LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2); return DefineAsRegister(result); } @@ -1150,20 +1169,30 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf. LOperand* input = UseFixedDouble(instr->value(), f8); - LOperand* temp = FixedTemp(f6); + LOperand* temp = TempDoubleRegister(); LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp); return DefineFixedDouble(result, f4); } +LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LMathFround* result = new (zone()) LMathFround(input); + return DefineAsRegister(result); +} + + LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { Representation r = instr->value()->representation(); LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32()) ? NULL : UseFixed(instr->context(), cp); LOperand* input = UseRegister(instr->value()); - LMathAbs* result = new(zone()) LMathAbs(context, input); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + LInstruction* result = + DefineAsRegister(new(zone()) LMathAbs(context, input)); + if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); + if (!r.IsDouble()) result = AssignEnvironment(result); + return result; } @@ -1184,7 +1213,7 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { LOperand* input = UseRegister(instr->value()); - LOperand* temp = FixedTemp(f6); + LOperand* temp = TempDoubleRegister(); LMathRound* result = new(zone()) LMathRound(input, temp); return AssignEnvironment(DefineAsRegister(result)); } @@ -1242,9 +1271,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); - ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32)); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); @@ -1256,9 +1285,9 @@ LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( @@ -1274,9 +1303,9 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI( @@ -1290,14 +1319,23 @@ } -LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); +LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); - LDivI* div = new(zone()) LDivI(dividend, divisor); - return AssignEnvironment(DefineAsRegister(div)); + LInstruction* result = + DefineAsRegister(new(zone()) LDivI(dividend, divisor)); + if (instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kBailoutOnMinusZero) || + (instr->CheckFlag(HValue::kCanOverflow) && + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) || + (!instr->IsMathFloorOfDiv() && + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { + result = AssignEnvironment(result); + } + return result; } @@ -1332,9 +1370,9 @@ LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp = @@ -1351,26 +1389,38 @@ } +LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + LOperand* divisor = UseRegister(instr->right()); + LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor); + return AssignEnvironment(DefineAsRegister(div)); +} + + LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { if (instr->RightIsPowerOf2()) { return DoFlooringDivByPowerOf2I(instr); } else if (instr->right()->IsConstant()) { return DoFlooringDivByConstI(instr); } else { - return DoDivI(instr); + return DoFlooringDivI(instr); } } LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegisterAtStart(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( dividend, divisor)); - if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + if (instr->CheckFlag(HValue::kLeftCanBeNegative) && + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { result = AssignEnvironment(result); } return result; @@ -1378,9 +1428,9 @@ LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineAsRegister(new(zone()) LModByConstI( @@ -1393,9 +1443,9 @@ LInstruction* LChunkBuilder::DoModI(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); LInstruction* result = DefineAsRegister(new(zone()) LModI( @@ -1421,8 +1471,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); HValue* left = instr->BetterLeftOperand(); HValue* right = instr->BetterRightOperand(); LOperand* left_op; @@ -1462,7 +1512,7 @@ } else if (instr->representation().IsDouble()) { if (kArchVariant == kMips32r2) { - if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) { + if (instr->HasOneUse() && instr->uses().value()->IsAdd()) { HAdd* add = HAdd::cast(instr->uses().value()); if (instr == add->left()) { // This mul is the lhs of an add. The add and mul will be folded @@ -1485,8 +1535,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); LSubI* sub = new(zone()) LSubI(left, right); @@ -1514,8 +1564,8 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); LAddI* add = new(zone()) LAddI(left, right); @@ -1525,9 +1575,9 @@ } return result; } else if (instr->representation().IsExternal()) { - ASSERT(instr->left()->representation().IsExternal()); - ASSERT(instr->right()->representation().IsInteger32()); - ASSERT(!instr->CheckFlag(HValue::kCanOverflow)); + DCHECK(instr->left()->representation().IsExternal()); + DCHECK(instr->right()->representation().IsInteger32()); + DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); LAddI* add = new(zone()) LAddI(left, right); @@ -1539,7 +1589,7 @@ return DoMultiplyAdd(HMul::cast(instr->left()), instr->right()); if (instr->right()->IsMul()) { - ASSERT(!instr->left()->IsMul()); + DCHECK(!instr->left()->IsMul()); return DoMultiplyAdd(HMul::cast(instr->right()), instr->left()); } } @@ -1554,14 +1604,14 @@ LOperand* left = NULL; LOperand* right = NULL; if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); left = UseRegisterAtStart(instr->BetterLeftOperand()); right = UseOrConstantAtStart(instr->BetterRightOperand()); } else { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); left = UseRegisterAtStart(instr->left()); right = UseRegisterAtStart(instr->right()); } @@ -1570,11 +1620,11 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) { - ASSERT(instr->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); // We call a C function for double power. It can't trigger a GC. // We need to use fixed result register for the call. Representation exponent_type = instr->right()->representation(); - ASSERT(instr->left()->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); LOperand* left = UseFixedDouble(instr->left(), f2); LOperand* right = exponent_type.IsDouble() ? UseFixedDouble(instr->right(), f4) : @@ -1587,8 +1637,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseFixed(instr->left(), a1); LOperand* right = UseFixed(instr->right(), a0); @@ -1601,15 +1651,15 @@ HCompareNumericAndBranch* instr) { Representation r = instr->representation(); if (r.IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(r)); - ASSERT(instr->right()->representation().Equals(r)); + DCHECK(instr->left()->representation().Equals(r)); + DCHECK(instr->right()->representation().Equals(r)); LOperand* left = UseRegisterOrConstantAtStart(instr->left()); LOperand* right = UseRegisterOrConstantAtStart(instr->right()); return new(zone()) LCompareNumericAndBranch(left, right); } else { - ASSERT(r.IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(r.IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); return new(zone()) LCompareNumericAndBranch(left, right); @@ -1619,8 +1669,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( HCompareObjectEqAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); return new(zone()) LCmpObjectEqAndBranch(left, right); @@ -1636,8 +1684,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch( HCompareMinusZeroAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; LOperand* value = UseRegister(instr->value()); LOperand* scratch = TempRegister(); return new(zone()) LCompareMinusZeroAndBranch(value, scratch); @@ -1645,7 +1691,7 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* temp = TempRegister(); return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp); @@ -1653,7 +1699,7 @@ LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* temp = TempRegister(); return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp); @@ -1661,14 +1707,14 @@ LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LIsSmiAndBranch(Use(instr->value())); } LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( HIsUndetectableAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LIsUndetectableAndBranch( UseRegisterAtStart(instr->value()), TempRegister()); } @@ -1676,8 +1722,8 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch( HStringCompareAndBranch* instr) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseFixed(instr->left(), a1); LOperand* right = UseFixed(instr->right(), a0); @@ -1689,7 +1735,7 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( HHasInstanceTypeAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new(zone()) LHasInstanceTypeAndBranch(value); } @@ -1697,7 +1743,7 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex( HGetCachedArrayIndex* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value)); @@ -1706,7 +1752,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( HHasCachedArrayIndexAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LHasCachedArrayIndexAndBranch( UseRegisterAtStart(instr->value())); } @@ -1714,7 +1760,7 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch( HClassOfTestAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()), TempRegister()); } @@ -1753,9 +1799,16 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - LOperand* value = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = UseRegister(instr->length()); - return AssignEnvironment(new(zone()) LBoundsCheck(value, length)); + if (!FLAG_debug_code && instr->skip_check()) return NULL; + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + LOperand* length = !index->IsConstantOperand() + ? UseRegisterOrConstantAtStart(instr->length()) + : UseRegisterAtStart(instr->length()); + LInstruction* result = new(zone()) LBoundsCheck(index, length); + if (!FLAG_debug_code || !instr->skip_check()) { + result = AssignEnvironment(result); + } + return result; } @@ -1789,87 +1842,81 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation from = instr->from(); Representation to = instr->to(); + HValue* val = instr->value(); if (from.IsSmi()) { if (to.IsTagged()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return DefineSameAsFirst(new(zone()) LDummyUse(value)); } from = Representation::Tagged(); } if (from.IsTagged()) { if (to.IsDouble()) { - LOperand* value = UseRegister(instr->value()); - LNumberUntagD* res = new(zone()) LNumberUntagD(value); - return AssignEnvironment(DefineAsRegister(res)); + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; } else if (to.IsSmi()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); if (val->type().IsSmi()) { return DefineSameAsFirst(new(zone()) LDummyUse(value)); } return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); } else { - ASSERT(to.IsInteger32()); - LOperand* value = NULL; - LInstruction* res = NULL; - HValue* val = instr->value(); + DCHECK(to.IsInteger32()); if (val->type().IsSmi() || val->representation().IsSmi()) { - value = UseRegisterAtStart(val); - res = DefineAsRegister(new(zone()) LSmiUntag(value, false)); + LOperand* value = UseRegisterAtStart(val); + return DefineAsRegister(new(zone()) LSmiUntag(value, false)); } else { - value = UseRegister(val); + LOperand* value = UseRegister(val); LOperand* temp1 = TempRegister(); - LOperand* temp2 = FixedTemp(f22); - res = DefineSameAsFirst(new(zone()) LTaggedToI(value, - temp1, - temp2)); - res = AssignEnvironment(res); + LOperand* temp2 = TempDoubleRegister(); + LInstruction* result = + DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; } - return res; } } else if (from.IsDouble()) { if (to.IsTagged()) { info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - - // Make sure that the temp and result_temp registers are - // different. LUnallocated* result_temp = TempRegister(); LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); - Define(result, result_temp); - return AssignPointerMap(result); + return AssignPointerMap(Define(result, result_temp)); } else if (to.IsSmi()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return AssignEnvironment( DefineAsRegister(new(zone()) LDoubleToSmi(value))); } else { - ASSERT(to.IsInteger32()); - LOperand* value = UseRegister(instr->value()); - LDoubleToI* res = new(zone()) LDoubleToI(value); - return AssignEnvironment(DefineAsRegister(res)); + DCHECK(to.IsInteger32()); + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); + if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); + return result; } } else if (from.IsInteger32()) { info()->MarkAsDeferredCalling(); if (to.IsTagged()) { - HValue* val = instr->value(); - LOperand* value = UseRegisterAtStart(val); if (!instr->CheckFlag(HValue::kCanOverflow)) { + LOperand* value = UseRegisterAtStart(val); return DefineAsRegister(new(zone()) LSmiTag(value)); } else if (val->CheckFlag(HInstruction::kUint32)) { + LOperand* value = UseRegisterAtStart(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + return AssignPointerMap(DefineAsRegister(result)); } else { + LOperand* value = UseRegisterAtStart(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + return AssignPointerMap(DefineAsRegister(result)); } } else if (to.IsSmi()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); if (instr->CheckFlag(HValue::kCanOverflow)) { @@ -1877,13 +1924,11 @@ } return result; } else { - ASSERT(to.IsDouble()); - if (instr->value()->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister( - new(zone()) LUint32ToDouble(UseRegister(instr->value()))); + DCHECK(to.IsDouble()); + if (val->CheckFlag(HInstruction::kUint32)) { + return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); } else { - return DefineAsRegister( - new(zone()) LInteger32ToDouble(Use(instr->value()))); + return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); } } } @@ -1894,7 +1939,11 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckNonSmi(value)); + LInstruction* result = new(zone()) LCheckNonSmi(value); + if (!instr->value()->type().IsHeapObject()) { + result = AssignEnvironment(result); + } + return result; } @@ -1918,15 +1967,12 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - LOperand* value = NULL; - if (!instr->CanOmitMapChecks()) { - value = UseRegisterAtStart(instr->value()); - if (instr->has_migration_target()) info()->MarkAsDeferredCalling(); - } - LCheckMaps* result = new(zone()) LCheckMaps(value); - if (!instr->CanOmitMapChecks()) { - AssignEnvironment(result); - if (instr->has_migration_target()) return AssignPointerMap(result); + if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; + LOperand* value = UseRegisterAtStart(instr->value()); + LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); + if (instr->HasMigrationTarget()) { + info()->MarkAsDeferredCalling(); + result = AssignPointerMap(result); } return result; } @@ -1938,14 +1984,14 @@ LOperand* reg = UseRegister(value); if (input_rep.IsDouble()) { // Revisit this decision, here and 8 lines below. - return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(f22))); + return DefineAsRegister(new(zone()) LClampDToUint8(reg, + TempDoubleRegister())); } else if (input_rep.IsInteger32()) { return DefineAsRegister(new(zone()) LClampIToUint8(reg)); } else { - ASSERT(input_rep.IsSmiOrTagged()); - // Register allocator doesn't (yet) support allocation of double - // temps. Reserve f22 explicitly. - LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(f22)); + DCHECK(input_rep.IsSmiOrTagged()); + LClampTToUint8* result = + new(zone()) LClampTToUint8(reg, TempDoubleRegister()); return AssignEnvironment(DefineAsRegister(result)); } } @@ -1953,7 +1999,7 @@ LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) { HValue* value = instr->value(); - ASSERT(value->representation().IsDouble()); + DCHECK(value->representation().IsDouble()); return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value))); } @@ -2004,9 +2050,14 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* global_object = UseFixed(instr->global_object(), a0); + LOperand* global_object = UseFixed(instr->global_object(), + LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } LLoadGlobalGeneric* result = - new(zone()) LLoadGlobalGeneric(context, global_object); + new(zone()) LLoadGlobalGeneric(context, global_object, vector); return MarkAsCall(DefineFixed(result, v0), instr); } @@ -2025,7 +2076,10 @@ LOperand* context = UseRegisterAtStart(instr->value()); LInstruction* result = DefineAsRegister(new(zone()) LLoadContextSlot(context)); - return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; } @@ -2040,7 +2094,10 @@ value = UseRegister(instr->value()); } LInstruction* result = new(zone()) LStoreContextSlot(context, value); - return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; } @@ -2052,9 +2109,14 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->object(), a0); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LInstruction* result = - DefineFixed(new(zone()) LLoadNamedGeneric(context, object), v0); + DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0); return MarkAsCall(result, instr); } @@ -2072,54 +2134,63 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - ASSERT(instr->key()->representation().IsSmiOrInteger32()); + DCHECK(instr->key()->representation().IsSmiOrInteger32()); ElementsKind elements_kind = instr->elements_kind(); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyed* result = NULL; + LInstruction* result = NULL; if (!instr->is_typed_elements()) { LOperand* obj = NULL; if (instr->representation().IsDouble()) { obj = UseRegister(instr->elements()); } else { - ASSERT(instr->representation().IsSmiOrTagged()); + DCHECK(instr->representation().IsSmiOrTagged()); obj = UseRegisterAtStart(instr->elements()); } - result = new(zone()) LLoadKeyed(obj, key); + result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key)); } else { - ASSERT( + DCHECK( (instr->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(instr->elements_kind())) || + !IsDoubleOrFloatElementsKind(elements_kind)) || (instr->representation().IsDouble() && - IsDoubleOrFloatElementsKind(instr->elements_kind()))); + IsDoubleOrFloatElementsKind(elements_kind))); LOperand* backing_store = UseRegister(instr->elements()); - result = new(zone()) LLoadKeyed(backing_store, key); + result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key)); } - DefineAsRegister(result); - // An unsigned int array load might overflow and cause a deopt, make sure it - // has an environment. - bool can_deoptimize = instr->RequiresHoleCheck() || - elements_kind == EXTERNAL_UINT32_ELEMENTS || - elements_kind == UINT32_ELEMENTS; - return can_deoptimize ? AssignEnvironment(result) : result; + if ((instr->is_external() || instr->is_fixed_typed_array()) ? + // see LCodeGen::DoLoadKeyedExternalArray + ((elements_kind == EXTERNAL_UINT32_ELEMENTS || + elements_kind == UINT32_ELEMENTS) && + !instr->CheckFlag(HInstruction::kUint32)) : + // see LCodeGen::DoLoadKeyedFixedDoubleArray and + // LCodeGen::DoLoadKeyedFixedArray + instr->RequiresHoleCheck()) { + result = AssignEnvironment(result); + } + return result; } LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* object = UseFixed(instr->object(), a1); - LOperand* key = UseFixed(instr->key(), a0); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } LInstruction* result = - DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), v0); + DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector), + v0); return MarkAsCall(result, instr); } LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { if (!instr->is_typed_elements()) { - ASSERT(instr->elements()->representation().IsTagged()); + DCHECK(instr->elements()->representation().IsTagged()); bool needs_write_barrier = instr->NeedsWriteBarrier(); LOperand* object = NULL; LOperand* val = NULL; @@ -2130,7 +2201,7 @@ key = UseRegisterOrConstantAtStart(instr->key()); val = UseRegister(instr->value()); } else { - ASSERT(instr->value()->representation().IsSmiOrTagged()); + DCHECK(instr->value()->representation().IsSmiOrTagged()); if (needs_write_barrier) { object = UseTempRegister(instr->elements()); val = UseTempRegister(instr->value()); @@ -2145,12 +2216,12 @@ return new(zone()) LStoreKeyed(object, key, val); } - ASSERT( + DCHECK( (instr->value()->representation().IsInteger32() && !IsDoubleOrFloatElementsKind(instr->elements_kind())) || (instr->value()->representation().IsDouble() && IsDoubleOrFloatElementsKind(instr->elements_kind()))); - ASSERT((instr->is_fixed_typed_array() && + DCHECK((instr->is_fixed_typed_array() && instr->elements()->representation().IsTagged()) || (instr->is_external() && instr->elements()->representation().IsExternal())); @@ -2163,13 +2234,13 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* obj = UseFixed(instr->object(), a2); - LOperand* key = UseFixed(instr->key(), a1); - LOperand* val = UseFixed(instr->value(), a0); - - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsTagged()); - ASSERT(instr->value()->representation().IsTagged()); + LOperand* obj = UseFixed(instr->object(), KeyedStoreIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister()); + LOperand* val = UseFixed(instr->value(), KeyedStoreIC::ValueRegister()); + + DCHECK(instr->object()->representation().IsTagged()); + DCHECK(instr->key()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return MarkAsCall( new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr); @@ -2178,17 +2249,18 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - LOperand* object = UseRegister(instr->object()); if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { + LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, NULL, new_map_reg); return result; } else { + LOperand* object = UseFixed(instr->object(), a0); LOperand* context = UseFixed(instr->context(), cp); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, context, NULL); - return AssignPointerMap(result); + return MarkAsCall(result, instr); } } @@ -2232,20 +2304,14 @@ // We need a temporary register for write barrier of the map field. LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; - LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp); - if (instr->field_representation().IsHeapObject()) { - if (!instr->value()->type().IsHeapObject()) { - return AssignEnvironment(result); - } - } - return result; + return new(zone()) LStoreNamedField(obj, val, temp); } LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); - LOperand* obj = UseFixed(instr->object(), a1); - LOperand* val = UseFixed(instr->value(), a0); + LOperand* obj = UseFixed(instr->object(), StoreIC::ReceiverRegister()); + LOperand* val = UseFixed(instr->value(), StoreIC::ValueRegister()); LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val); return MarkAsCall(result, instr); @@ -2268,7 +2334,7 @@ LOperand* context = UseAny(instr->context()); LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(context, string, index); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + return AssignPointerMap(DefineAsRegister(result)); } @@ -2284,9 +2350,7 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { info()->MarkAsDeferredCalling(); LOperand* context = UseAny(instr->context()); - LOperand* size = instr->size()->IsConstant() - ? UseConstant(instr->size()) - : UseTempRegister(instr->size()); + LOperand* size = UseRegisterOrConstant(instr->size()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2); @@ -2309,7 +2373,7 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - ASSERT(argument_count_ == 0); + DCHECK(argument_count_ == 0); allocator_->MarkAsOsrEntry(); current_block_->last_environment()->set_ast_id(instr->ast_id()); return AssignEnvironment(new(zone()) LOsrEntry); @@ -2322,11 +2386,11 @@ int spill_index = chunk()->GetParameterStackSlot(instr->index()); return DefineAsSpilled(result, spill_index); } else { - ASSERT(info()->IsStub()); + DCHECK(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = - info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + info()->code_stub()->GetInterfaceDescriptor(); int index = static_cast<int>(instr->index()); - Register reg = descriptor->GetParameterRegister(index); + Register reg = descriptor->GetEnvironmentParameterRegister(index); return DefineFixed(result, reg); } } @@ -2397,9 +2461,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value())); } @@ -2421,7 +2482,7 @@ LOperand* context = UseFixed(instr->context(), cp); return MarkAsCall(new(zone()) LStackCheck(context), instr); } else { - ASSERT(instr->is_backwards_branch()); + DCHECK(instr->is_backwards_branch()); LOperand* context = UseAny(instr->context()); return AssignEnvironment( AssignPointerMap(new(zone()) LStackCheck(context))); @@ -2431,6 +2492,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { HEnvironment* outer = current_block_->last_environment(); + outer->set_ast_id(instr->ReturnId()); HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner = outer->CopyForInlining(instr->closure(), instr->arguments_count(), @@ -2456,7 +2518,7 @@ if (env->entry()->arguments_pushed()) { int argument_count = env->arguments_environment()->parameter_count(); pop = new(zone()) LDrop(argument_count); - ASSERT(instr->argument_delta() == -argument_count); + DCHECK(instr->argument_delta() == -argument_count); } HEnvironment* outer = current_block_->last_environment()-> @@ -2490,9 +2552,29 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { LOperand* object = UseRegister(instr->object()); - LOperand* index = UseRegister(instr->index()); - return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index)); + LOperand* index = UseTempRegister(instr->index()); + LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); + LInstruction* result = DefineSameAsFirst(load); + return AssignPointerMap(result); } + +LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) { + LOperand* context = UseRegisterAtStart(instr->context()); + return new(zone()) LStoreFrameContext(context); +} + + +LInstruction* LChunkBuilder::DoAllocateBlockContext( + HAllocateBlockContext* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* function = UseRegisterAtStart(instr->function()); + LAllocateBlockContext* result = + new(zone()) LAllocateBlockContext(context, function); + return MarkAsCall(DefineFixed(result, cp), instr); +} + } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS diff -Nru nodejs-0.11.13/deps/v8/src/mips/lithium-mips.h nodejs-0.11.15/deps/v8/src/mips/lithium-mips.h --- nodejs-0.11.13/deps/v8/src/mips/lithium-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/lithium-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MIPS_LITHIUM_MIPS_H_ #define V8_MIPS_LITHIUM_MIPS_H_ -#include "hydrogen.h" -#include "lithium-allocator.h" -#include "lithium.h" -#include "safepoint-table.h" -#include "utils.h" +#include "src/hydrogen.h" +#include "src/lithium.h" +#include "src/lithium-allocator.h" +#include "src/safepoint-table.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -40,145 +17,149 @@ // Forward declarations. class LCodeGen; -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallJSFunction) \ - V(CallWithDescriptor) \ - V(CallFunction) \ - V(CallNew) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CallStub) \ - V(CheckInstanceType) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckNonSmi) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareMinusZeroAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(ConstructDouble) \ - V(Context) \ - V(DateField) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleToI) \ - V(DoubleBits) \ - V(DoubleToSmi) \ - V(Drop) \ - V(Dummy) \ - V(DummyUse) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(FunctionLiteral) \ - V(GetCachedArrayIndex) \ - V(Goto) \ - V(HasCachedArrayIndexAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstanceOf) \ - V(InstanceOfKnownGlobal) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsConstructCallAndBranch) \ - V(IsObjectAndBranch) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadRoot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadGlobalCell) \ - V(LoadGlobalGeneric) \ - V(LoadKeyed) \ - V(LoadKeyedGeneric) \ - V(LoadNamedField) \ - V(LoadNamedGeneric) \ - V(MapEnumLength) \ - V(MathAbs) \ - V(MathExp) \ - V(MathClz32) \ - V(MathFloor) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRound) \ - V(MathSqrt) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(MultiplyAddD) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(PushArgument) \ - V(RegExpLiteral) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreGlobalCell) \ - V(StoreKeyed) \ - V(StoreKeyedGeneric) \ - V(StoreNamedField) \ - V(StoreNamedGeneric) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(ToFastProperties) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ +#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ + V(AccessArgumentsAt) \ + V(AddI) \ + V(Allocate) \ + V(AllocateBlockContext) \ + V(ApplyArguments) \ + V(ArgumentsElements) \ + V(ArgumentsLength) \ + V(ArithmeticD) \ + V(ArithmeticT) \ + V(BitI) \ + V(BoundsCheck) \ + V(Branch) \ + V(CallJSFunction) \ + V(CallWithDescriptor) \ + V(CallFunction) \ + V(CallNew) \ + V(CallNewArray) \ + V(CallRuntime) \ + V(CallStub) \ + V(CheckInstanceType) \ + V(CheckMaps) \ + V(CheckMapValue) \ + V(CheckNonSmi) \ + V(CheckSmi) \ + V(CheckValue) \ + V(ClampDToUint8) \ + V(ClampIToUint8) \ + V(ClampTToUint8) \ + V(ClassOfTestAndBranch) \ + V(CompareMinusZeroAndBranch) \ + V(CompareNumericAndBranch) \ + V(CmpObjectEqAndBranch) \ + V(CmpHoleAndBranch) \ + V(CmpMapAndBranch) \ + V(CmpT) \ + V(ConstantD) \ + V(ConstantE) \ + V(ConstantI) \ + V(ConstantS) \ + V(ConstantT) \ + V(ConstructDouble) \ + V(Context) \ + V(DateField) \ + V(DebugBreak) \ + V(DeclareGlobals) \ + V(Deoptimize) \ + V(DivByConstI) \ + V(DivByPowerOf2I) \ + V(DivI) \ + V(DoubleToI) \ + V(DoubleBits) \ + V(DoubleToSmi) \ + V(Drop) \ + V(Dummy) \ + V(DummyUse) \ + V(FlooringDivByConstI) \ + V(FlooringDivByPowerOf2I) \ + V(FlooringDivI) \ + V(ForInCacheArray) \ + V(ForInPrepareMap) \ + V(FunctionLiteral) \ + V(GetCachedArrayIndex) \ + V(Goto) \ + V(HasCachedArrayIndexAndBranch) \ + V(HasInstanceTypeAndBranch) \ + V(InnerAllocatedObject) \ + V(InstanceOf) \ + V(InstanceOfKnownGlobal) \ + V(InstructionGap) \ + V(Integer32ToDouble) \ + V(InvokeFunction) \ + V(IsConstructCallAndBranch) \ + V(IsObjectAndBranch) \ + V(IsStringAndBranch) \ + V(IsSmiAndBranch) \ + V(IsUndetectableAndBranch) \ + V(Label) \ + V(LazyBailout) \ + V(LoadContextSlot) \ + V(LoadRoot) \ + V(LoadFieldByIndex) \ + V(LoadFunctionPrototype) \ + V(LoadGlobalCell) \ + V(LoadGlobalGeneric) \ + V(LoadKeyed) \ + V(LoadKeyedGeneric) \ + V(LoadNamedField) \ + V(LoadNamedGeneric) \ + V(MapEnumLength) \ + V(MathAbs) \ + V(MathExp) \ + V(MathClz32) \ + V(MathFloor) \ + V(MathFround) \ + V(MathLog) \ + V(MathMinMax) \ + V(MathPowHalf) \ + V(MathRound) \ + V(MathSqrt) \ + V(ModByConstI) \ + V(ModByPowerOf2I) \ + V(ModI) \ + V(MulI) \ + V(MultiplyAddD) \ + V(NumberTagD) \ + V(NumberTagI) \ + V(NumberTagU) \ + V(NumberUntagD) \ + V(OsrEntry) \ + V(Parameter) \ + V(Power) \ + V(PushArgument) \ + V(RegExpLiteral) \ + V(Return) \ + V(SeqStringGetChar) \ + V(SeqStringSetChar) \ + V(ShiftI) \ + V(SmiTag) \ + V(SmiUntag) \ + V(StackCheck) \ + V(StoreCodeEntry) \ + V(StoreContextSlot) \ + V(StoreFrameContext) \ + V(StoreGlobalCell) \ + V(StoreKeyed) \ + V(StoreKeyedGeneric) \ + V(StoreNamedField) \ + V(StoreNamedGeneric) \ + V(StringAdd) \ + V(StringCharCodeAt) \ + V(StringCharFromCode) \ + V(StringCompareAndBranch) \ + V(SubI) \ + V(TaggedToI) \ + V(ThisFunction) \ + V(ToFastProperties) \ + V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ + V(Typeof) \ + V(TypeofIsAndBranch) \ + V(Uint32ToDouble) \ + V(UnknownOSRValue) \ V(WrapReceiver) #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ @@ -190,7 +171,7 @@ return mnemonic; \ } \ static L##type* cast(LInstruction* instr) { \ - ASSERT(instr->Is##type()); \ + DCHECK(instr->Is##type()); \ return reinterpret_cast<L##type*>(instr); \ } @@ -239,6 +220,9 @@ virtual bool IsControl() const { return false; } + // Try deleting this instruction if possible. + virtual bool TryDelete() { return false; } + void set_environment(LEnvironment* env) { environment_ = env; } LEnvironment* environment() const { return environment_; } bool HasEnvironment() const { return environment_ != NULL; } @@ -258,7 +242,9 @@ // Interface to the register allocator and iterators. bool ClobbersTemps() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters() const { return IsCall(); } + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { + return IsCall(); + } // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return IsCall(); } @@ -275,11 +261,12 @@ void VerifyCall(); #endif + virtual int InputCount() = 0; + virtual LOperand* InputAt(int i) = 0; + private: // Iterator interface. friend class InputIterator; - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; friend class TempIterator; virtual int TempCount() = 0; @@ -344,7 +331,7 @@ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; } virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; static LGap* cast(LInstruction* instr) { - ASSERT(instr->IsGap()); + DCHECK(instr->IsGap()); return reinterpret_cast<LGap*>(instr); } @@ -424,7 +411,7 @@ class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> { public: - explicit LDummy() { } + LDummy() {} DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") }; @@ -440,6 +427,7 @@ class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> { public: + virtual bool IsControl() const V8_OVERRIDE { return true; } DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") DECLARE_HYDROGEN_ACCESSOR(Deoptimize) }; @@ -709,13 +697,13 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - LDivI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; + LDivI(LOperand* dividend, LOperand* divisor) { + inputs_[0] = dividend; + inputs_[1] = divisor; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) @@ -761,6 +749,21 @@ }; +class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LFlooringDivI(LOperand* dividend, LOperand* divisor) { + inputs_[0] = dividend; + inputs_[1] = divisor; + } + + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) +}; + + class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: LMulI(LOperand* left, LOperand* right) { @@ -853,6 +856,16 @@ }; +class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathFround(LOperand* value) { inputs_[0] = value; } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") +}; + + class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: LMathAbs(LOperand* context, LOperand* value) { @@ -1526,7 +1539,7 @@ return parameter_count()->IsConstantOperand(); } LConstantOperand* constant_parameter_count() { - ASSERT(has_constant_parameter_count()); + DCHECK(has_constant_parameter_count()); return LConstantOperand::cast(parameter_count()); } LOperand* parameter_count() { return inputs_[2]; } @@ -1548,15 +1561,17 @@ }; -class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LLoadNamedGeneric(LOperand* context, LOperand* object) { + LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) { inputs_[0] = context; inputs_[1] = object; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) @@ -1613,23 +1628,27 @@ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) virtual void PrintDataTo(StringStream* stream); - uint32_t additional_index() const { return hydrogen()->index_offset(); } + uint32_t base_offset() const { return hydrogen()->base_offset(); } }; -class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> { +class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> { public: - LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) { + LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key, + LOperand* vector) { inputs_[0] = context; inputs_[1] = object; inputs_[2] = key; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } LOperand* key() { return inputs_[2]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric) }; @@ -1640,15 +1659,18 @@ }; -class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LLoadGlobalGeneric(LOperand* context, LOperand* global_object) { + LLoadGlobalGeneric(LOperand* context, LOperand* global_object, + LOperand* vector) { inputs_[0] = context; inputs_[1] = global_object; + temps_[0] = vector; } LOperand* context() { return inputs_[0]; } LOperand* global_object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric) @@ -1734,15 +1756,15 @@ }; -class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> { +class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> { public: LStoreCodeEntry(LOperand* function, LOperand* code_object) { inputs_[0] = function; - temps_[0] = code_object; + inputs_[1] = code_object; } LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return temps_[0]; } + LOperand* code_object() { return inputs_[1]; } virtual void PrintDataTo(StringStream* stream); @@ -1813,18 +1835,18 @@ class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> { public: - LCallWithDescriptor(const CallInterfaceDescriptor* descriptor, - ZoneList<LOperand*>& operands, + LCallWithDescriptor(const InterfaceDescriptor* descriptor, + const ZoneList<LOperand*>& operands, Zone* zone) : descriptor_(descriptor), - inputs_(descriptor->environment_length() + 1, zone) { - ASSERT(descriptor->environment_length() + 1 == operands.length()); + inputs_(descriptor->GetRegisterParameterCount() + 1, zone) { + DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length()); inputs_.AddAll(operands, zone); } LOperand* target() const { return inputs_[0]; } - const CallInterfaceDescriptor* descriptor() { return descriptor_; } + const InterfaceDescriptor* descriptor() { return descriptor_; } private: DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") @@ -1834,7 +1856,7 @@ int arity() const { return hydrogen()->argument_count() - 1; } - const CallInterfaceDescriptor* descriptor_; + const InterfaceDescriptor* descriptor_; ZoneList<LOperand*> inputs_; // Iterator support. @@ -1931,7 +1953,7 @@ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { return save_doubles() == kDontSaveFPRegs; } @@ -2127,7 +2149,6 @@ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - Handle<Map> transition() const { return hydrogen()->transition_map(); } Representation representation() const { return hydrogen()->field_representation(); } @@ -2183,7 +2204,7 @@ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } - uint32_t additional_index() const { return hydrogen()->index_offset(); } + uint32_t base_offset() const { return hydrogen()->base_offset(); } }; @@ -2336,7 +2357,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> { public: - explicit LCheckMaps(LOperand* value) { + explicit LCheckMaps(LOperand* value = NULL) { inputs_[0] = value; } @@ -2631,6 +2652,35 @@ }; +class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> { + public: + explicit LStoreFrameContext(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context") +}; + + +class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> { + public: + LAllocateBlockContext(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); } + + DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context") + DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext) +}; + + class LChunkBuilder; class LPlatformChunk V8_FINAL : public LChunk { public: @@ -2655,11 +2705,11 @@ next_block_(NULL), allocator_(allocator) { } + Isolate* isolate() const { return graph_->isolate(); } + // Build the sequence for the graph. LPlatformChunk* Build(); - LInstruction* CheckElideControlInstruction(HControlInstruction* instr); - // Declare methods that deal with the individual node types. #define DECLARE_DO(type) LInstruction* Do##type(H##type* node); HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -2671,6 +2721,7 @@ LInstruction* DoMathFloor(HUnaryMathOperation* instr); LInstruction* DoMathRound(HUnaryMathOperation* instr); + LInstruction* DoMathFround(HUnaryMathOperation* instr); LInstruction* DoMathAbs(HUnaryMathOperation* instr); LInstruction* DoMathLog(HUnaryMathOperation* instr); LInstruction* DoMathExp(HUnaryMathOperation* instr); @@ -2679,12 +2730,13 @@ LInstruction* DoMathClz32(HUnaryMathOperation* instr); LInstruction* DoDivByPowerOf2I(HDiv* instr); LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HBinaryOperation* instr); + LInstruction* DoDivI(HDiv* instr); LInstruction* DoModByPowerOf2I(HMod* instr); LInstruction* DoModByConstI(HMod* instr); LInstruction* DoModI(HMod* instr); LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); + LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); private: enum Status { @@ -2750,6 +2802,7 @@ // Temporary operand that must be in a register. MUST_USE_RESULT LUnallocated* TempRegister(); + MUST_USE_RESULT LUnallocated* TempDoubleRegister(); MUST_USE_RESULT LOperand* FixedTemp(Register reg); MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); @@ -2779,6 +2832,7 @@ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); void VisitInstruction(HInstruction* current); + void AddInstruction(LInstruction* instr, HInstruction* current); void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr); diff -Nru nodejs-0.11.13/deps/v8/src/mips/macro-assembler-mips.cc nodejs-0.11.15/deps/v8/src/mips/macro-assembler-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/macro-assembler-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/macro-assembler-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <limits.h> // For LONG_MIN, LONG_MAX. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "bootstrapper.h" -#include "codegen.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "isolate-inl.h" -#include "runtime.h" +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/isolate-inl.h" +#include "src/runtime.h" namespace v8 { namespace internal { @@ -55,7 +32,7 @@ void MacroAssembler::Load(Register dst, const MemOperand& src, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8()) { lb(dst, src); } else if (r.IsUInteger8()) { @@ -73,12 +50,17 @@ void MacroAssembler::Store(Register src, const MemOperand& dst, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8() || r.IsUInteger8()) { sb(src, dst); } else if (r.IsInteger16() || r.IsUInteger16()) { sh(src, dst); } else { + if (r.IsHeapObject()) { + AssertNotSmi(src); + } else if (r.IsSmi()) { + AssertSmi(src); + } sw(src, dst); } } @@ -119,7 +101,7 @@ // Safepoints expect a block of kNumSafepointRegisters values on the // stack, so adjust the stack for unsaved registers. const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; - ASSERT(num_unsaved >= 0); + DCHECK(num_unsaved >= 0); if (num_unsaved > 0) { Subu(sp, sp, Operand(num_unsaved * kPointerSize)); } @@ -136,32 +118,6 @@ } -void MacroAssembler::PushSafepointRegistersAndDoubles() { - PushSafepointRegisters(); - Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize)); - for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) { - FPURegister reg = FPURegister::FromAllocationIndex(i); - sdc1(reg, MemOperand(sp, i * kDoubleSize)); - } -} - - -void MacroAssembler::PopSafepointRegistersAndDoubles() { - for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) { - FPURegister reg = FPURegister::FromAllocationIndex(i); - ldc1(reg, MemOperand(sp, i * kDoubleSize)); - } - Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize)); - PopSafepointRegisters(); -} - - -void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, - Register dst) { - sw(src, SafepointRegistersAndDoublesSlot(dst)); -} - - void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { sw(src, SafepointRegisterSlot(dst)); } @@ -197,7 +153,7 @@ Register scratch, Condition cc, Label* branch) { - ASSERT(cc == eq || cc == ne); + DCHECK(cc == eq || cc == ne); And(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); Branch(branch, cc, scratch, Operand(ExternalReference::new_space_start(isolate()))); @@ -212,8 +168,9 @@ RAStatus ra_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - ASSERT(!AreAliased(value, dst, t8, object)); + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + DCHECK(!AreAliased(value, dst, t8, object)); // First, check if a write barrier is even needed. The tests below // catch stores of Smis. Label done; @@ -225,7 +182,7 @@ // Although the object register is tagged, the offset is relative to the start // of the object, so so offset must be a multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize)); + DCHECK(IsAligned(offset, kPointerSize)); Addu(dst, object, Operand(offset - kHeapObjectTag)); if (emit_debug_code()) { @@ -242,7 +199,8 @@ ra_status, save_fp, remembered_set_action, - OMIT_SMI_CHECK); + OMIT_SMI_CHECK, + pointers_to_here_check_for_value); bind(&done); @@ -255,18 +213,95 @@ } +// Will clobber 4 registers: object, map, dst, ip. The +// register 'object' contains a heap object pointer. +void MacroAssembler::RecordWriteForMap(Register object, + Register map, + Register dst, + RAStatus ra_status, + SaveFPRegsMode fp_mode) { + if (emit_debug_code()) { + DCHECK(!dst.is(at)); + lw(dst, FieldMemOperand(map, HeapObject::kMapOffset)); + Check(eq, + kWrongAddressOrValuePassedToRecordWrite, + dst, + Operand(isolate()->factory()->meta_map())); + } + + if (!FLAG_incremental_marking) { + return; + } + + if (emit_debug_code()) { + lw(at, FieldMemOperand(object, HeapObject::kMapOffset)); + Check(eq, + kWrongAddressOrValuePassedToRecordWrite, + map, + Operand(at)); + } + + Label done; + + // A single check of the map's pages interesting flag suffices, since it is + // only set during incremental collection, and then it's also guaranteed that + // the from object's page's interesting flag is also set. This optimization + // relies on the fact that maps can never be in new space. + CheckPageFlag(map, + map, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + + Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); + if (emit_debug_code()) { + Label ok; + And(at, dst, Operand((1 << kPointerSizeLog2) - 1)); + Branch(&ok, eq, at, Operand(zero_reg)); + stop("Unaligned cell in write barrier"); + bind(&ok); + } + + // Record the actual write. + if (ra_status == kRAHasNotBeenSaved) { + push(ra); + } + RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, + fp_mode); + CallStub(&stub); + if (ra_status == kRAHasNotBeenSaved) { + pop(ra); + } + + bind(&done); + + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst); + + // Clobber clobbered registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + li(dst, Operand(BitCast<int32_t>(kZapValue + 12))); + li(map, Operand(BitCast<int32_t>(kZapValue + 16))); + } +} + + // Will clobber 4 registers: object, address, scratch, ip. The // register 'object' contains a heap object pointer. The heap object // tag is shifted away. -void MacroAssembler::RecordWrite(Register object, - Register address, - Register value, - RAStatus ra_status, - SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - ASSERT(!AreAliased(object, address, value, t8)); - ASSERT(!AreAliased(object, address, value, t9)); +void MacroAssembler::RecordWrite( + Register object, + Register address, + Register value, + RAStatus ra_status, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + DCHECK(!AreAliased(object, address, value, t8)); + DCHECK(!AreAliased(object, address, value, t9)); if (emit_debug_code()) { lw(at, MemOperand(address)); @@ -274,24 +309,27 @@ eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value)); } - // Count number of write barriers in generated code. - isolate()->counters()->write_barriers_static()->Increment(); - // TODO(mstarzinger): Dynamic counter missing. + if (remembered_set_action == OMIT_REMEMBERED_SET && + !FLAG_incremental_marking) { + return; + } // First, check if a write barrier is even needed. The tests below // catch stores of smis and stores into the young generation. Label done; if (smi_check == INLINE_SMI_CHECK) { - ASSERT_EQ(0, kSmiTag); + DCHECK_EQ(0, kSmiTag); JumpIfSmi(value, &done); } - CheckPageFlag(value, - value, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, - eq, - &done); + if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + } CheckPageFlag(object, value, // Used as scratch. MemoryChunk::kPointersFromHereAreInterestingMask, @@ -302,7 +340,8 @@ if (ra_status == kRAHasNotBeenSaved) { push(ra); } - RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, + fp_mode); CallStub(&stub); if (ra_status == kRAHasNotBeenSaved) { pop(ra); @@ -310,6 +349,11 @@ bind(&done); + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, + value); + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { @@ -347,12 +391,12 @@ if (and_then == kFallThroughAtEnd) { Branch(&done, eq, t8, Operand(zero_reg)); } else { - ASSERT(and_then == kReturnAtEnd); + DCHECK(and_then == kReturnAtEnd); Ret(eq, t8, Operand(zero_reg)); } push(ra); StoreBufferOverflowStub store_buffer_overflow = - StoreBufferOverflowStub(fp_mode); + StoreBufferOverflowStub(isolate(), fp_mode); CallStub(&store_buffer_overflow); pop(ra); bind(&done); @@ -371,9 +415,9 @@ Label* miss) { Label same_contexts; - ASSERT(!holder_reg.is(scratch)); - ASSERT(!holder_reg.is(at)); - ASSERT(!scratch.is(at)); + DCHECK(!holder_reg.is(scratch)); + DCHECK(!holder_reg.is(at)); + DCHECK(!scratch.is(at)); // Load current lexical context from the stack frame. lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -436,6 +480,9 @@ } +// Compute the hash code from the untagged key. This must be kept in sync with +// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in +// code-stub-hydrogen.cc void MacroAssembler::GetNumberHash(Register reg0, Register scratch) { // First of all we assign the hash seed to scratch. LoadRoot(scratch, Heap::kHashSeedRootIndex); @@ -525,7 +572,7 @@ and_(reg2, reg2, reg1); // Scale the index by multiplying by the element size. - ASSERT(SeededNumberDictionary::kEntrySize == 3); + DCHECK(SeededNumberDictionary::kEntrySize == 3); sll(at, reg2, 1); // 2x. addu(reg2, reg2, at); // reg2 = reg2 * 3. @@ -568,7 +615,7 @@ addiu(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); addu(rd, rs, at); } @@ -584,7 +631,7 @@ addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm). } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); subu(rd, rs, at); } @@ -602,7 +649,7 @@ } } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); if (kArchVariant == kLoongson) { mult(rs, at); @@ -619,7 +666,7 @@ mult(rs, rt.rm()); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); mult(rs, at); } @@ -631,7 +678,7 @@ multu(rs, rt.rm()); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); multu(rs, at); } @@ -643,7 +690,7 @@ div(rs, rt.rm()); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); div(rs, at); } @@ -655,7 +702,7 @@ divu(rs, rt.rm()); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); divu(rs, at); } @@ -670,7 +717,7 @@ andi(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); and_(rd, rs, at); } @@ -686,7 +733,7 @@ ori(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); or_(rd, rs, at); } @@ -702,7 +749,7 @@ xori(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); xor_(rd, rs, at); } @@ -715,7 +762,7 @@ nor(rd, rs, rt.rm()); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); nor(rd, rs, at); } @@ -723,9 +770,9 @@ void MacroAssembler::Neg(Register rs, const Operand& rt) { - ASSERT(rt.is_reg()); - ASSERT(!at.is(rs)); - ASSERT(!at.is(rt.rm())); + DCHECK(rt.is_reg()); + DCHECK(!at.is(rs)); + DCHECK(!at.is(rt.rm())); li(at, -1); xor_(rs, rt.rm(), at); } @@ -739,7 +786,7 @@ slti(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); slt(rd, rs, at); } @@ -755,7 +802,7 @@ sltiu(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); sltu(rd, rs, at); } @@ -798,7 +845,7 @@ } -//------------Pseudo-instructions------------- +// ------------Pseudo-instructions------------- void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { lwr(rd, rs); @@ -817,7 +864,7 @@ if (value->IsSmi()) { li(dst, Operand(value), mode); } else { - ASSERT(value->IsHeapObject()); + DCHECK(value->IsHeapObject()); if (isolate()->heap()->InNewSpace(*value)) { Handle<Cell> cell = isolate()->factory()->NewCell(value); li(dst, Operand(cell)); @@ -830,7 +877,7 @@ void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { - ASSERT(!j.is_reg()); + DCHECK(!j.is_reg()); BlockTrampolinePoolScope block_trampoline_pool(this); if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { // Normal load of an immediate value which does not need Relocation Info. @@ -983,8 +1030,8 @@ Register rs, uint16_t pos, uint16_t size) { - ASSERT(pos < 32); - ASSERT(pos + size < 33); + DCHECK(pos < 32); + DCHECK(pos + size < 33); if (kArchVariant == kMips32r2) { ext_(rt, rs, pos, size); @@ -1006,14 +1053,14 @@ Register rs, uint16_t pos, uint16_t size) { - ASSERT(pos < 32); - ASSERT(pos + size <= 32); - ASSERT(size != 0); + DCHECK(pos < 32); + DCHECK(pos + size <= 32); + DCHECK(size != 0); if (kArchVariant == kMips32r2) { ins_(rt, rs, pos, size); } else { - ASSERT(!rt.is(t8) && !rs.is(t8)); + DCHECK(!rt.is(t8) && !rs.is(t8)); Subu(at, zero_reg, Operand(1)); srl(at, at, 32 - size); and_(t8, rs, at); @@ -1042,9 +1089,9 @@ // We do this by converting rs minus the MSB to avoid sign conversion, // then adding 2^31 to the result (if needed). - ASSERT(!fd.is(scratch)); - ASSERT(!rs.is(t9)); - ASSERT(!rs.is(at)); + DCHECK(!fd.is(scratch)); + DCHECK(!rs.is(t9)); + DCHECK(!rs.is(at)); // Save rs's MSB to t9. Ext(t9, rs, 31, 1); @@ -1128,8 +1175,8 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch) { - ASSERT(!fd.is(scratch)); - ASSERT(!rs.is(at)); + DCHECK(!fd.is(scratch)); + DCHECK(!rs.is(at)); // Load 2^31 into scratch as its float representation. li(at, 0x41E00000); @@ -1170,7 +1217,7 @@ return; } - ASSERT(nan || target); + DCHECK(nan || target); // Check for unordered (NaN) cases. if (nan) { c(UN, D, cmp1, cmp2); @@ -1216,7 +1263,7 @@ break; default: CHECK(0); - }; + } } if (bd == PROTECT) { @@ -1286,8 +1333,8 @@ if (kArchVariant == kLoongson) { // Tests an FP condition code and then conditionally move rs to rd. // We do not currently use any FPU cc bit other than bit 0. - ASSERT(cc == 0); - ASSERT(!(rs.is(t8) || rd.is(t8))); + DCHECK(cc == 0); + DCHECK(!(rs.is(t8) || rd.is(t8))); Label done; Register scratch = t8; // For testing purposes we need to fetch content of the FCSR register and @@ -1312,8 +1359,8 @@ if (kArchVariant == kLoongson) { // Tests an FP condition code and then conditionally move rs to rd. // We do not currently use any FPU cc bit other than bit 0. - ASSERT(cc == 0); - ASSERT(!(rs.is(t8) || rd.is(t8))); + DCHECK(cc == 0); + DCHECK(!(rs.is(t8) || rd.is(t8))); Label done; Register scratch = t8; // For testing purposes we need to fetch content of the FCSR register and @@ -1336,7 +1383,7 @@ void MacroAssembler::Clz(Register rd, Register rs) { if (kArchVariant == kLoongson) { - ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9))); + DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9))); Register mask = t8; Register scratch = t9; Label loop, end; @@ -1363,9 +1410,9 @@ DoubleRegister double_scratch, Register except_flag, CheckForInexactConversion check_inexact) { - ASSERT(!result.is(scratch)); - ASSERT(!double_input.is(double_scratch)); - ASSERT(!except_flag.is(scratch)); + DCHECK(!result.is(scratch)); + DCHECK(!double_input.is(double_scratch)); + DCHECK(!except_flag.is(scratch)); Label done; @@ -1456,7 +1503,7 @@ Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack. sdc1(double_input, MemOperand(sp, 0)); - DoubleToIStub stub(sp, result, 0, true, true); + DoubleToIStub stub(isolate(), sp, result, 0, true, true); CallStub(&stub); Addu(sp, sp, Operand(kDoubleSize)); @@ -1469,7 +1516,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { Label done; DoubleRegister double_scratch = f12; - ASSERT(!result.is(object)); + DCHECK(!result.is(object)); ldc1(double_scratch, MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); @@ -1477,7 +1524,8 @@ // If we fell through then inline version didn't succeed - call stub instead. push(ra); - DoubleToIStub stub(object, + DoubleToIStub stub(isolate(), + object, result, HeapNumber::kValueOffset - kHeapObjectTag, true, @@ -1495,7 +1543,7 @@ Register scratch, Label* not_number) { Label done; - ASSERT(!result.is(object)); + DCHECK(!result.is(object)); UntagAndJumpIfSmi(result, object, &done); JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); @@ -1522,7 +1570,7 @@ // Emulated condtional branches do not emit a nop in the branch delay slot. // // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. -#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ +#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \ (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) @@ -1614,7 +1662,7 @@ const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); - ASSERT(!rs.is(zero_reg)); + DCHECK(!rs.is(zero_reg)); Register r2 = no_reg; Register scratch = at; @@ -1714,14 +1762,14 @@ break; case eq: // We don't want any other register but scratch clobbered. - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); beq(rs, r2, offset); break; case ne: // We don't want any other register but scratch clobbered. - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); bne(rs, r2, offset); @@ -1966,14 +2014,14 @@ b(offset); break; case eq: - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); offset = shifted_branch_offset(L, false); beq(rs, r2, offset); break; case ne: - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); offset = shifted_branch_offset(L, false); @@ -1985,7 +2033,7 @@ offset = shifted_branch_offset(L, false); bgtz(rs, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, r2, rs); @@ -2002,7 +2050,7 @@ offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, rs, r2); @@ -2019,7 +2067,7 @@ offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, rs, r2); @@ -2032,7 +2080,7 @@ offset = shifted_branch_offset(L, false); blez(rs, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, r2, rs); @@ -2044,9 +2092,9 @@ case Ugreater: if (rt.imm32_ == 0) { offset = shifted_branch_offset(L, false); - bgtz(rs, offset); + bne(rs, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); @@ -2063,7 +2111,7 @@ offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); @@ -2080,7 +2128,7 @@ offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); @@ -2091,9 +2139,9 @@ case Uless_equal: if (rt.imm32_ == 0) { offset = shifted_branch_offset(L, false); - b(offset); + beq(rs, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); @@ -2106,7 +2154,7 @@ } } // Check that offset could actually hold on an int16_t. - ASSERT(is_int16(offset)); + DCHECK(is_int16(offset)); // Emit a nop in the branch delay slot if required. if (bdslot == PROTECT) nop(); @@ -2368,7 +2416,7 @@ } } // Check that offset could actually hold on an int16_t. - ASSERT(is_int16(offset)); + DCHECK(is_int16(offset)); // Emit a nop in the branch delay slot if required. if (bdslot == PROTECT) @@ -2419,7 +2467,7 @@ Register rs, const Operand& rt, BranchDelaySlot bd) { - ASSERT(!RelocInfo::IsCodeTarget(rmode)); + DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd); } @@ -2430,7 +2478,7 @@ Register rs, const Operand& rt, BranchDelaySlot bd) { - ASSERT(RelocInfo::IsCodeTarget(rmode)); + DCHECK(RelocInfo::IsCodeTarget(rmode)); AllowDeferredHandleDereference embedding_raw_address; Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd); } @@ -2476,7 +2524,7 @@ if (bd == PROTECT) nop(); - ASSERT_EQ(CallSize(target, cond, rs, rt, bd), + DCHECK_EQ(CallSize(target, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } @@ -2507,7 +2555,7 @@ positions_recorder()->WriteRecordedPositions(); li(t9, Operand(target_int, rmode), CONSTANT_SIZE); Call(t9, cond, rs, rt, bd); - ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd), + DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } @@ -2535,14 +2583,14 @@ BlockTrampolinePoolScope block_trampoline_pool(this); Label start; bind(&start); - ASSERT(RelocInfo::IsCodeTarget(rmode)); + DCHECK(RelocInfo::IsCodeTarget(rmode)); if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { SetRecordedAstId(ast_id); rmode = RelocInfo::CODE_TARGET_WITH_ID; } AllowDeferredHandleDereference embedding_raw_address; Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); - ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd), + DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } @@ -2686,18 +2734,14 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT - void MacroAssembler::DebugBreak() { PrepareCEntryArgs(0); PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate())); - CEntryStub ces(1); - ASSERT(AllowThisStubCall(&ces)); - Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); + CEntryStub ces(isolate(), 1); + DCHECK(AllowThisStubCall(&ces)); + Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } -#endif // ENABLE_DEBUGGER_SUPPORT - // --------------------------------------------------------------------------- // Exception handling. @@ -2724,7 +2768,7 @@ // Push the frame pointer, context, state, and code object. if (kind == StackHandler::JS_ENTRY) { - ASSERT_EQ(Smi::FromInt(0), 0); + DCHECK_EQ(Smi::FromInt(0), 0); // The second zero_reg indicates no context. // The first zero_reg is the NULL frame pointer. // The operands are reversed to match the order of MultiPush/Pop. @@ -2852,7 +2896,7 @@ Register scratch2, Label* gc_required, AllocationFlags flags) { - ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -2864,18 +2908,18 @@ return; } - ASSERT(!result.is(scratch1)); - ASSERT(!result.is(scratch2)); - ASSERT(!scratch1.is(scratch2)); - ASSERT(!scratch1.is(t9)); - ASSERT(!scratch2.is(t9)); - ASSERT(!result.is(t9)); + DCHECK(!result.is(scratch1)); + DCHECK(!result.is(scratch2)); + DCHECK(!scratch1.is(scratch2)); + DCHECK(!scratch1.is(t9)); + DCHECK(!scratch2.is(t9)); + DCHECK(!result.is(t9)); // Make object size into bytes. if ((flags & SIZE_IN_WORDS) != 0) { object_size *= kPointerSize; } - ASSERT_EQ(0, object_size & kObjectAlignmentMask); + DCHECK_EQ(0, object_size & kObjectAlignmentMask); // Check relative positions of allocation top and limit addresses. // ARM adds additional checks to make sure the ldm instruction can be @@ -2889,7 +2933,7 @@ reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); - ASSERT((limit - top) == kPointerSize); + DCHECK((limit - top) == kPointerSize); // Set up allocation top address and object size registers. Register topaddr = scratch1; @@ -2915,8 +2959,8 @@ if ((flags & DOUBLE_ALIGNMENT) != 0) { // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. - ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); - ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); And(scratch2, result, Operand(kDoubleAlignmentMask)); Label aligned; Branch(&aligned, eq, scratch2, Operand(zero_reg)); @@ -2959,11 +3003,11 @@ return; } - ASSERT(!result.is(scratch1)); - ASSERT(!result.is(scratch2)); - ASSERT(!scratch1.is(scratch2)); - ASSERT(!object_size.is(t9)); - ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); + DCHECK(!result.is(scratch1)); + DCHECK(!result.is(scratch2)); + DCHECK(!scratch1.is(scratch2)); + DCHECK(!object_size.is(t9)); + DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); // Check relative positions of allocation top and limit addresses. // ARM adds additional checks to make sure the ldm instruction can be @@ -2976,7 +3020,7 @@ reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); - ASSERT((limit - top) == kPointerSize); + DCHECK((limit - top) == kPointerSize); // Set up allocation top address and object size registers. Register topaddr = scratch1; @@ -3002,8 +3046,8 @@ if ((flags & DOUBLE_ALIGNMENT) != 0) { // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. - ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); - ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); And(scratch2, result, Operand(kDoubleAlignmentMask)); Label aligned; Branch(&aligned, eq, scratch2, Operand(zero_reg)); @@ -3069,7 +3113,7 @@ Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); + DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); sll(scratch1, length, 1); // Length in bytes, not chars. addiu(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); @@ -3100,8 +3144,8 @@ Label* gc_required) { // Calculate the number of bytes needed for the characters in the string // while observing object alignment. - ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); - ASSERT(kCharSize == 1); + DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); + DCHECK(kCharSize == 1); addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); @@ -3142,33 +3186,12 @@ Register scratch1, Register scratch2, Label* gc_required) { - Label allocate_new_space, install_map; - AllocationFlags flags = TAG_OBJECT; - - ExternalReference high_promotion_mode = ExternalReference:: - new_space_high_promotion_mode_active_address(isolate()); - li(scratch1, Operand(high_promotion_mode)); - lw(scratch1, MemOperand(scratch1, 0)); - Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg)); - Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, - static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE)); - - jmp(&install_map); - - bind(&allocate_new_space); - Allocate(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - flags); - - bind(&install_map); + TAG_OBJECT); InitializeNewString(result, length, @@ -3229,14 +3252,19 @@ Register scratch2, Register heap_number_map, Label* need_gc, - TaggingMode tagging_mode) { + TaggingMode tagging_mode, + MutableMode mode) { // Allocate an object in the heap for the heap number and tag it as a heap // object. Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc, tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); + Heap::RootListIndex map_index = mode == MUTABLE + ? Heap::kMutableHeapNumberMapRootIndex + : Heap::kHeapNumberMapRootIndex; + AssertIsRoot(heap_number_map, map_index); + // Store heap number map in the allocated object. - AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); if (tagging_mode == TAG_RESULT) { sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); } else { @@ -3261,8 +3289,8 @@ Register src, RegList temps, int field_count) { - ASSERT((temps & dst.bit()) == 0); - ASSERT((temps & src.bit()) == 0); + DCHECK((temps & dst.bit()) == 0); + DCHECK((temps & src.bit()) == 0); // Primitive implementation using only one temporary register. Register tmp = no_reg; @@ -3273,7 +3301,7 @@ break; } } - ASSERT(!tmp.is(no_reg)); + DCHECK(!tmp.is(no_reg)); for (int i = 0; i < field_count; i++) { lw(tmp, FieldMemOperand(src, i * kPointerSize)); @@ -3313,13 +3341,24 @@ // TODO(kalmard) check if this can be optimized to use sw in most cases. // Can't use unaligned access - copy byte by byte. - sb(scratch, MemOperand(dst, 0)); - srl(scratch, scratch, 8); - sb(scratch, MemOperand(dst, 1)); - srl(scratch, scratch, 8); - sb(scratch, MemOperand(dst, 2)); - srl(scratch, scratch, 8); - sb(scratch, MemOperand(dst, 3)); + if (kArchEndian == kLittle) { + sb(scratch, MemOperand(dst, 0)); + srl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 1)); + srl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 2)); + srl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 3)); + } else { + sb(scratch, MemOperand(dst, 3)); + srl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 2)); + srl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 1)); + srl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 0)); + } + Addu(dst, dst, 4); Subu(length, length, Operand(kPointerSize)); @@ -3424,11 +3463,12 @@ bind(&have_double_value); sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize); Addu(scratch1, scratch1, elements_reg); - sw(mantissa_reg, FieldMemOperand( - scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); - uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + - sizeof(kHoleNanLower32); - sw(exponent_reg, FieldMemOperand(scratch1, offset)); + sw(mantissa_reg, + FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset + + kHoleNanLower32Offset)); + sw(exponent_reg, + FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset + + kHoleNanUpper32Offset)); jmp(&done); bind(&maybe_nan); @@ -3526,7 +3566,11 @@ void MacroAssembler::MovFromFloatResult(DoubleRegister dst) { if (IsMipsSoftFloatABI) { - Move(dst, v0, v1); + if (kArchEndian == kLittle) { + Move(dst, v0, v1); + } else { + Move(dst, v1, v0); + } } else { Move(dst, f0); // Reg f0 is o32 ABI FP return value. } @@ -3535,7 +3579,11 @@ void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) { if (IsMipsSoftFloatABI) { - Move(dst, a0, a1); + if (kArchEndian == kLittle) { + Move(dst, a0, a1); + } else { + Move(dst, a1, a0); + } } else { Move(dst, f12); // Reg f12 is o32 ABI FP first argument value. } @@ -3546,7 +3594,11 @@ if (!IsMipsSoftFloatABI) { Move(f12, src); } else { - Move(a0, a1, src); + if (kArchEndian == kLittle) { + Move(a0, a1, src); + } else { + Move(a1, a0, src); + } } } @@ -3555,7 +3607,11 @@ if (!IsMipsSoftFloatABI) { Move(f0, src); } else { - Move(v0, v1, src); + if (kArchEndian == kLittle) { + Move(v0, v1, src); + } else { + Move(v1, v0, src); + } } } @@ -3564,7 +3620,7 @@ DoubleRegister src2) { if (!IsMipsSoftFloatABI) { if (src2.is(f12)) { - ASSERT(!src1.is(f14)); + DCHECK(!src1.is(f14)); Move(f14, src2); Move(f12, src1); } else { @@ -3572,8 +3628,13 @@ Move(f14, src2); } } else { - Move(a0, a1, src1); - Move(a2, a3, src2); + if (kArchEndian == kLittle) { + Move(a0, a1, src1); + Move(a2, a3, src2); + } else { + Move(a1, a0, src1); + Move(a3, a2, src2); + } } } @@ -3602,12 +3663,12 @@ // The code below is made a lot easier because the calling code already sets // up actual and expected registers according to the contract if values are // passed in registers. - ASSERT(actual.is_immediate() || actual.reg().is(a0)); - ASSERT(expected.is_immediate() || expected.reg().is(a2)); - ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); + DCHECK(actual.is_immediate() || actual.reg().is(a0)); + DCHECK(expected.is_immediate() || expected.reg().is(a2)); + DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); if (expected.is_immediate()) { - ASSERT(actual.is_immediate()); + DCHECK(actual.is_immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; } else { @@ -3660,7 +3721,7 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); Label done; @@ -3674,7 +3735,7 @@ Call(code); call_wrapper.AfterCall(); } else { - ASSERT(flag == JUMP_FUNCTION); + DCHECK(flag == JUMP_FUNCTION); Jump(code); } // Continue here if InvokePrologue does handle the invocation due to @@ -3689,10 +3750,10 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Contract with called JS functions requires that function is passed in a1. - ASSERT(function.is(a1)); + DCHECK(function.is(a1)); Register expected_reg = a2; Register code_reg = a3; @@ -3715,10 +3776,10 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Contract with called JS functions requires that function is passed in a1. - ASSERT(function.is(a1)); + DCHECK(function.is(a1)); // Get the function and setup the context. lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); @@ -3762,7 +3823,7 @@ void MacroAssembler::IsObjectJSStringType(Register object, Register scratch, Label* fail) { - ASSERT(kNotStringTag != 0); + DCHECK(kNotStringTag != 0); lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); @@ -3789,14 +3850,15 @@ Register scratch, Label* miss, bool miss_on_bound_function) { - // Check that the receiver isn't a smi. - JumpIfSmi(function, miss); + Label non_instance; + if (miss_on_bound_function) { + // Check that the receiver isn't a smi. + JumpIfSmi(function, miss); - // Check that the function really is a function. Load map into result reg. - GetObjectType(function, result, scratch); - Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE)); + // Check that the function really is a function. Load map into result reg. + GetObjectType(function, result, scratch); + Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE)); - if (miss_on_bound_function) { lw(scratch, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); lw(scratch, @@ -3804,13 +3866,12 @@ And(scratch, scratch, Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction))); Branch(miss, ne, scratch, Operand(zero_reg)); - } - // Make sure that the function has an instance prototype. - Label non_instance; - lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); - And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); - Branch(&non_instance, ne, scratch, Operand(zero_reg)); + // Make sure that the function has an instance prototype. + lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); + And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); + Branch(&non_instance, ne, scratch, Operand(zero_reg)); + } // Get the prototype or initial map from the function. lw(result, @@ -3829,12 +3890,15 @@ // Get the prototype from the initial map. lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); - jmp(&done); - // Non-instance prototype: Fetch prototype from constructor field - // in initial map. - bind(&non_instance); - lw(result, FieldMemOperand(result, Map::kConstructorOffset)); + if (miss_on_bound_function) { + jmp(&done); + + // Non-instance prototype: Fetch prototype from constructor field + // in initial map. + bind(&non_instance); + lw(result, FieldMemOperand(result, Map::kConstructorOffset)); + } // All done. bind(&done); @@ -3858,8 +3922,8 @@ Register r1, const Operand& r2, BranchDelaySlot bd) { - ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. - Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, + DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond, r1, r2, bd); } @@ -3869,7 +3933,7 @@ Register r1, const Operand& r2, BranchDelaySlot bd) { - Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond, r1, r2, bd); + Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd); } @@ -3894,14 +3958,11 @@ ExternalReference::handle_scope_level_address(isolate()), next_address); - ASSERT(function_address.is(a1) || function_address.is(a2)); + DCHECK(function_address.is(a1) || function_address.is(a2)); Label profiler_disabled; Label end_profiler_check; - bool* is_profiling_flag = - isolate()->cpu_profiler()->is_profiling_address(); - STATIC_ASSERT(sizeof(*is_profiling_flag) == 1); - li(t9, reinterpret_cast<int32_t>(is_profiling_flag)); + li(t9, Operand(ExternalReference::is_profiling_address(isolate()))); lb(t9, MemOperand(t9, 0)); Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); @@ -3933,7 +3994,7 @@ // Native call returns to the DirectCEntry stub which redirects to the // return address pushed on stack (could have moved after GC). // DirectCEntry stub itself is generated early and never moves. - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(this, t9); if (FLAG_log_timer_events) { @@ -3986,7 +4047,7 @@ { FrameScope frame(this, StackFrame::INTERNAL); CallExternalReference( - ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()), + ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0); } jmp(&exception_handled); @@ -4010,27 +4071,14 @@ } -void MacroAssembler::IllegalOperation(int num_arguments) { - if (num_arguments > 0) { - addiu(sp, sp, num_arguments * kPointerSize); - } - LoadRoot(v0, Heap::kUndefinedValueRootIndex); -} - - -void MacroAssembler::IndexFromHash(Register hash, - Register index) { +void MacroAssembler::IndexFromHash(Register hash, Register index) { // If the hash field contains an array index pick it out. The assert checks // that the constants for the maximum number of digits for an array index // cached in the hash field and the number of bits reserved for it does not // conflict. - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < (1 << String::kArrayIndexValueBits)); - // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in - // the low kHashShift bits. - STATIC_ASSERT(kSmiTag == 0); - Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits); - sll(index, hash, kSmiTagSize); + DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); } @@ -4085,18 +4133,18 @@ Register right, Register overflow_dst, Register scratch) { - ASSERT(!dst.is(overflow_dst)); - ASSERT(!dst.is(scratch)); - ASSERT(!overflow_dst.is(scratch)); - ASSERT(!overflow_dst.is(left)); - ASSERT(!overflow_dst.is(right)); + DCHECK(!dst.is(overflow_dst)); + DCHECK(!dst.is(scratch)); + DCHECK(!overflow_dst.is(scratch)); + DCHECK(!overflow_dst.is(left)); + DCHECK(!overflow_dst.is(right)); if (left.is(right) && dst.is(left)) { - ASSERT(!dst.is(t9)); - ASSERT(!scratch.is(t9)); - ASSERT(!left.is(t9)); - ASSERT(!right.is(t9)); - ASSERT(!overflow_dst.is(t9)); + DCHECK(!dst.is(t9)); + DCHECK(!scratch.is(t9)); + DCHECK(!left.is(t9)); + DCHECK(!right.is(t9)); + DCHECK(!overflow_dst.is(t9)); mov(t9, right); right = t9; } @@ -4127,13 +4175,13 @@ Register right, Register overflow_dst, Register scratch) { - ASSERT(!dst.is(overflow_dst)); - ASSERT(!dst.is(scratch)); - ASSERT(!overflow_dst.is(scratch)); - ASSERT(!overflow_dst.is(left)); - ASSERT(!overflow_dst.is(right)); - ASSERT(!scratch.is(left)); - ASSERT(!scratch.is(right)); + DCHECK(!dst.is(overflow_dst)); + DCHECK(!dst.is(scratch)); + DCHECK(!overflow_dst.is(scratch)); + DCHECK(!overflow_dst.is(left)); + DCHECK(!overflow_dst.is(right)); + DCHECK(!scratch.is(left)); + DCHECK(!scratch.is(right)); // This happens with some crankshaft code. Since Subu works fine if // left == right, let's not make that restriction here. @@ -4172,10 +4220,7 @@ // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the // expectation. - if (f->nargs >= 0 && f->nargs != num_arguments) { - IllegalOperation(num_arguments); - return; - } + CHECK(f->nargs < 0 || f->nargs == num_arguments); // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we @@ -4183,7 +4228,7 @@ // smarter. PrepareCEntryArgs(num_arguments); PrepareCEntryFunction(ExternalReference(f, isolate())); - CEntryStub stub(1, save_doubles); + CEntryStub stub(isolate(), 1, save_doubles); CallStub(&stub); } @@ -4194,7 +4239,7 @@ PrepareCEntryArgs(num_arguments); PrepareCEntryFunction(ext); - CEntryStub stub(1); + CEntryStub stub(isolate(), 1); CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd); } @@ -4223,8 +4268,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, BranchDelaySlot bd) { PrepareCEntryFunction(builtin); - CEntryStub stub(1); - Jump(stub.GetCode(isolate()), + CEntryStub stub(isolate(), 1); + Jump(stub.GetCode(), RelocInfo::CODE_TARGET, al, zero_reg, @@ -4237,7 +4282,7 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a builtin without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); GetBuiltinEntry(t9, id); if (flag == CALL_FUNCTION) { @@ -4245,7 +4290,7 @@ Call(t9); call_wrapper.AfterCall(); } else { - ASSERT(flag == JUMP_FUNCTION); + DCHECK(flag == JUMP_FUNCTION); Jump(t9); } } @@ -4263,7 +4308,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { - ASSERT(!target.is(a1)); + DCHECK(!target.is(a1)); GetBuiltinFunction(a1, id); // Load the code entry point from the builtins object. lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); @@ -4282,7 +4327,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { li(scratch2, Operand(ExternalReference(counter))); lw(scratch1, MemOperand(scratch2)); @@ -4294,7 +4339,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { li(scratch2, Operand(ExternalReference(counter))); lw(scratch1, MemOperand(scratch2)); @@ -4316,7 +4361,7 @@ void MacroAssembler::AssertFastElements(Register elements) { if (emit_debug_code()) { - ASSERT(!elements.is(at)); + DCHECK(!elements.is(at)); Label ok; push(elements); lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); @@ -4379,7 +4424,7 @@ // generated instructions is 10, so we use this as a maximum value. static const int kExpectedAbortInstructions = 10; int abort_instructions = InstructionsGeneratedSince(&abort_start); - ASSERT(abort_instructions <= kExpectedAbortInstructions); + DCHECK(abort_instructions <= kExpectedAbortInstructions); while (abort_instructions++ < kExpectedAbortInstructions) { nop(); } @@ -4458,36 +4503,37 @@ } -void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { - if (frame_mode == BUILD_STUB_FRAME) { +void MacroAssembler::StubPrologue() { Push(ra, fp, cp); Push(Smi::FromInt(StackFrame::STUB)); // Adjust FP to point to saved FP. Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - } else { - PredictableCodeSizeScope predictible_code_size_scope( - this, kNoCodeAgeSequenceLength * Assembler::kInstrSize); - // The following three instructions must remain together and unmodified - // for code aging to work properly. - if (isolate()->IsCodePreAgingActive()) { - // Pre-age the code. - Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); - nop(Assembler::CODE_AGE_MARKER_NOP); - // Load the stub address to t9 and call it, - // GetCodeAgeAndParity() extracts the stub address from this instruction. - li(t9, - Operand(reinterpret_cast<uint32_t>(stub->instruction_start())), - CONSTANT_SIZE); - nop(); // Prevent jalr to jal optimization. - jalr(t9, a0); - nop(); // Branch delay slot nop. - nop(); // Pad the empty space. - } else { - Push(ra, fp, cp, a1); - nop(Assembler::CODE_AGE_SEQUENCE_NOP); - // Adjust fp to point to caller's fp. - Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - } +} + + +void MacroAssembler::Prologue(bool code_pre_aging) { + PredictableCodeSizeScope predictible_code_size_scope( + this, kNoCodeAgeSequenceLength); + // The following three instructions must remain together and unmodified + // for code aging to work properly. + if (code_pre_aging) { + // Pre-age the code. + Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); + nop(Assembler::CODE_AGE_MARKER_NOP); + // Load the stub address to t9 and call it, + // GetCodeAgeAndParity() extracts the stub address from this instruction. + li(t9, + Operand(reinterpret_cast<uint32_t>(stub->instruction_start())), + CONSTANT_SIZE); + nop(); // Prevent jalr to jal optimization. + jalr(t9, a0); + nop(); // Branch delay slot nop. + nop(); // Pad the empty space. + } else { + Push(ra, fp, cp, a1); + nop(Assembler::CODE_AGE_SEQUENCE_NOP); + // Adjust fp to point to caller's fp. + Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); } } @@ -4554,9 +4600,9 @@ const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (save_doubles) { // The stack must be allign to 0 modulo 8 for stores with sdc1. - ASSERT(kDoubleSize == frame_alignment); + DCHECK(kDoubleSize == frame_alignment); if (frame_alignment > 0) { - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); // Align stack. } int space = FPURegister::kMaxNumRegisters * kDoubleSize; @@ -4571,10 +4617,10 @@ // Reserve place for the return address, stack space and an optional slot // (used by the DirectCEntryStub to hold the return value if a struct is // returned) and align the frame preparing for calling the runtime function. - ASSERT(stack_space >= 0); + DCHECK(stack_space >= 0); Subu(sp, sp, Operand((stack_space + 2) * kPointerSize)); if (frame_alignment > 0) { - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); // Align stack. } @@ -4651,7 +4697,7 @@ // environment. // Note: This will break if we ever start generating snapshots on one Mips // platform for another Mips platform with a different alignment. - return OS::ActivationFrameAlignment(); + return base::OS::ActivationFrameAlignment(); #else // V8_HOST_ARCH_MIPS // If we are using the simulator then we should always align to the expected // alignment. As the simulator is used to generate snapshots we do not know @@ -4669,7 +4715,7 @@ if (frame_alignment > kPointerSize) { Label alignment_as_expected; - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); andi(at, sp, frame_alignment_mask); Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); // Don't use Check here, as it will call Runtime_Abort re-entering here. @@ -4693,7 +4739,7 @@ void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { - ASSERT(!reg.is(overflow)); + DCHECK(!reg.is(overflow)); mov(overflow, reg); // Save original value. SmiTag(reg); xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0. @@ -4707,9 +4753,9 @@ // Fall back to slower case. SmiTagCheckOverflow(dst, overflow); } else { - ASSERT(!dst.is(src)); - ASSERT(!dst.is(overflow)); - ASSERT(!src.is(overflow)); + DCHECK(!dst.is(src)); + DCHECK(!dst.is(overflow)); + DCHECK(!src.is(overflow)); SmiTag(dst, src); xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0. } @@ -4735,7 +4781,7 @@ Label* smi_label, Register scratch, BranchDelaySlot bd) { - ASSERT_EQ(0, kSmiTag); + DCHECK_EQ(0, kSmiTag); andi(scratch, value, kSmiTagMask); Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); } @@ -4744,7 +4790,7 @@ Label* not_smi_label, Register scratch, BranchDelaySlot bd) { - ASSERT_EQ(0, kSmiTag); + DCHECK_EQ(0, kSmiTag); andi(scratch, value, kSmiTagMask); Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); } @@ -4754,7 +4800,7 @@ Register reg2, Label* on_not_both_smi) { STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(1, kSmiTagMask); + DCHECK_EQ(1, kSmiTagMask); or_(at, reg1, reg2); JumpIfNotSmi(at, on_not_both_smi); } @@ -4764,7 +4810,7 @@ Register reg2, Label* on_either_smi) { STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(1, kSmiTagMask); + DCHECK_EQ(1, kSmiTagMask); // Both Smi tags must be 1 (not Smi). and_(at, reg1, reg2); JumpIfSmi(at, on_either_smi); @@ -4836,7 +4882,7 @@ void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { if (emit_debug_code()) { - ASSERT(!reg.is(at)); + DCHECK(!reg.is(at)); LoadRoot(at, index); Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at)); } @@ -4981,7 +5027,7 @@ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; const int kFlatAsciiStringTag = kStringTag | kOneByteStringTag | kSeqStringTag; - ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed. + DCHECK(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed. andi(scratch1, first, kFlatAsciiStringMask); Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag)); andi(scratch2, second, kFlatAsciiStringMask); @@ -5046,7 +5092,7 @@ lw(at, FieldMemOperand(string, String::kLengthOffset)); Check(lt, kIndexIsTooLarge, index, Operand(at)); - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); Check(ge, kIndexIsNegative, index, Operand(zero_reg)); SmiUntag(index, index); @@ -5070,7 +5116,7 @@ // and the original value of sp. mov(scratch, sp); Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); } else { @@ -5115,7 +5161,7 @@ void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { - ASSERT(has_frame()); + DCHECK(has_frame()); // Make sure that the stack is aligned before calling a C function unless // running in the simulator. The simulator has its own alignment check which // provides more information. @@ -5124,10 +5170,10 @@ #if V8_HOST_ARCH_MIPS if (emit_debug_code()) { - int frame_alignment = OS::ActivationFrameAlignment(); + int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); Label alignment_as_expected; And(at, sp, Operand(frame_alignment_mask)); Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); @@ -5153,7 +5199,7 @@ int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); - if (OS::ActivationFrameAlignment() > kPointerSize) { + if (base::OS::ActivationFrameAlignment() > kPointerSize) { lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); } else { Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); @@ -5242,7 +5288,7 @@ if (map->CanBeDeprecated()) { li(scratch, Operand(map)); lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); - And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask))); + And(scratch, scratch, Operand(Map::Deprecated::kMask)); Branch(if_deprecated, ne, scratch, Operand(zero_reg)); } } @@ -5253,7 +5299,7 @@ Register scratch1, Label* on_black) { HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); } @@ -5263,8 +5309,8 @@ Label* has_color, int first_bit, int second_bit) { - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8)); - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9)); + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8)); + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9)); GetMarkBits(object, bitmap_scratch, mask_scratch); @@ -5293,13 +5339,13 @@ void MacroAssembler::JumpIfDataObject(Register value, Register scratch, Label* not_data_object) { - ASSERT(!AreAliased(value, scratch, t8, no_reg)); + DCHECK(!AreAliased(value, scratch, t8, no_reg)); Label is_data_object; lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); LoadRoot(t8, Heap::kHeapNumberMapRootIndex); Branch(&is_data_object, eq, t8, Operand(scratch)); - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); // If it's a string and it's not a cons string then it's an object containing // no GC pointers. lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); @@ -5312,7 +5358,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg, Register mask_reg) { - ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); + DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; @@ -5330,14 +5376,14 @@ Register mask_scratch, Register load_scratch, Label* value_is_white_and_not_data) { - ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); + DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); GetMarkBits(value, bitmap_scratch, mask_scratch); // If the value is black or grey we don't need to do anything. - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); Label done; @@ -5376,8 +5422,8 @@ } // Check for strings. - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); // If it's a string and it's not a cons string then it's an object containing // no GC pointers. Register instance_type = load_scratch; @@ -5389,8 +5435,8 @@ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). // External strings are the only ones with the kExternalStringTag bit // set. - ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); - ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); + DCHECK_EQ(0, kConsStringTag & kExternalStringTag); And(t8, instance_type, Operand(kExternalStringTag)); { Label skip; @@ -5404,8 +5450,8 @@ // For ASCII (char-size of 1) we shift the smi tag away to get the length. // For UC16 (char-size of 2) we just leave the smi tag in place, thereby // getting the length multiplied by 2. - ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); - ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); + DCHECK(kSmiTag == 0 && kSmiTagSize == 1); lw(t9, FieldMemOperand(value, String::kLengthOffset)); And(t8, instance_type, Operand(kStringEncodingMask)); { @@ -5433,57 +5479,6 @@ } -void MacroAssembler::Throw(BailoutReason reason) { - Label throw_start; - bind(&throw_start); -#ifdef DEBUG - const char* msg = GetBailoutReason(reason); - if (msg != NULL) { - RecordComment("Throw message: "); - RecordComment(msg); - } -#endif - - li(a0, Operand(Smi::FromInt(reason))); - push(a0); - // Disable stub call restrictions to always allow calls to throw. - if (!has_frame_) { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(this, StackFrame::NONE); - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } else { - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } - // will not return here - if (is_trampoline_pool_blocked()) { - // If the calling code cares throw the exact number of - // instructions generated, we insert padding here to keep the size - // of the ThrowMessage macro constant. - // Currently in debug mode with debug_code enabled the number of - // generated instructions is 14, so we use this as a maximum value. - static const int kExpectedThrowMessageInstructions = 14; - int throw_instructions = InstructionsGeneratedSince(&throw_start); - ASSERT(throw_instructions <= kExpectedThrowMessageInstructions); - while (throw_instructions++ < kExpectedThrowMessageInstructions) { - nop(); - } - } -} - - -void MacroAssembler::ThrowIf(Condition cc, - BailoutReason reason, - Register rs, - Operand rt) { - Label L; - Branch(&L, NegateCondition(cc), rs, rt); - Throw(reason); - // will not return here - bind(&L); -} - - void MacroAssembler::LoadInstanceDescriptors(Register map, Register descriptors) { lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); @@ -5499,7 +5494,8 @@ void MacroAssembler::EnumLength(Register dst, Register map) { STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); lw(dst, FieldMemOperand(map, Map::kBitField3Offset)); - And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask))); + And(dst, dst, Operand(Map::EnumLengthBits::kMask)); + SmiTag(dst); } @@ -5545,7 +5541,7 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { - ASSERT(!output_reg.is(input_reg)); + DCHECK(!output_reg.is(input_reg)); Label done; li(output_reg, Operand(255)); // Normal branch: nop in delay slot. @@ -5640,7 +5636,7 @@ Register scratch0, Register scratch1, Label* found) { - ASSERT(!scratch1.is(scratch0)); + DCHECK(!scratch1.is(scratch0)); Factory* factory = isolate()->factory(); Register current = scratch0; Label loop_again; @@ -5652,42 +5648,63 @@ bind(&loop_again); lw(current, FieldMemOperand(current, HeapObject::kMapOffset)); lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); - Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount); + DecodeField<Map::ElementsKindBits>(scratch1); Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS)); lw(current, FieldMemOperand(current, Map::kPrototypeOffset)); Branch(&loop_again, ne, current, Operand(factory->null_value())); } -bool AreAliased(Register r1, Register r2, Register r3, Register r4) { - if (r1.is(r2)) return true; - if (r1.is(r3)) return true; - if (r1.is(r4)) return true; - if (r2.is(r3)) return true; - if (r2.is(r4)) return true; - if (r3.is(r4)) return true; - return false; +bool AreAliased(Register reg1, + Register reg2, + Register reg3, + Register reg4, + Register reg5, + Register reg6, + Register reg7, + Register reg8) { + int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + + reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + + reg7.is_valid() + reg8.is_valid(); + + RegList regs = 0; + if (reg1.is_valid()) regs |= reg1.bit(); + if (reg2.is_valid()) regs |= reg2.bit(); + if (reg3.is_valid()) regs |= reg3.bit(); + if (reg4.is_valid()) regs |= reg4.bit(); + if (reg5.is_valid()) regs |= reg5.bit(); + if (reg6.is_valid()) regs |= reg6.bit(); + if (reg7.is_valid()) regs |= reg7.bit(); + if (reg8.is_valid()) regs |= reg8.bit(); + int n_of_non_aliasing_regs = NumRegs(regs); + + return n_of_valid_regs != n_of_non_aliasing_regs; } -CodePatcher::CodePatcher(byte* address, int instructions) +CodePatcher::CodePatcher(byte* address, + int instructions, + FlushICache flush_cache) : address_(address), size_(instructions * Assembler::kInstrSize), - masm_(NULL, address, size_ + Assembler::kGap) { + masm_(NULL, address, size_ + Assembler::kGap), + flush_cache_(flush_cache) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } CodePatcher::~CodePatcher() { // Indicate that code has changed. - CPU::FlushICache(address_, size_); + if (flush_cache_ == FLUSH) { + CpuFeatures::FlushICache(address_, size_); + } // Check that the code was patched as expected. - ASSERT(masm_.pc_ == address_ + size_); - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); + DCHECK(masm_.pc_ == address_ + size_); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } @@ -5703,13 +5720,13 @@ void CodePatcher::ChangeBranchCondition(Condition cond) { Instr instr = Assembler::instr_at(masm_.pc_); - ASSERT(Assembler::IsBranch(instr)); + DCHECK(Assembler::IsBranch(instr)); uint32_t opcode = Assembler::GetOpcodeField(instr); // Currently only the 'eq' and 'ne' cond values are supported and the simple // branch instructions (with opcode being the branch type). // There are some special cases (see Assembler::IsBranch()) so extending this // would be tricky. - ASSERT(opcode == BEQ || + DCHECK(opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ || @@ -5726,9 +5743,9 @@ void MacroAssembler::TruncatingDiv(Register result, Register dividend, int32_t divisor) { - ASSERT(!dividend.is(result)); - ASSERT(!dividend.is(at)); - ASSERT(!result.is(at)); + DCHECK(!dividend.is(result)); + DCHECK(!dividend.is(at)); + DCHECK(!result.is(at)); MultiplierAndShift ms(divisor); li(at, Operand(ms.multiplier())); Mult(dividend, Operand(at)); diff -Nru nodejs-0.11.13/deps/v8/src/mips/macro-assembler-mips.h nodejs-0.11.15/deps/v8/src/mips/macro-assembler-mips.h --- nodejs-0.11.13/deps/v8/src/mips/macro-assembler-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/macro-assembler-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ -#include "assembler.h" -#include "mips/assembler-mips.h" -#include "v8globals.h" +#include "src/assembler.h" +#include "src/globals.h" +#include "src/mips/assembler-mips.h" namespace v8 { namespace internal { @@ -94,6 +71,10 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum PointersToHereCheck { + kPointersToHereMaybeInteresting, + kPointersToHereAreAlwaysInteresting +}; enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; Register GetRegisterThatIsNotOneOf(Register reg1, @@ -103,7 +84,14 @@ Register reg5 = no_reg, Register reg6 = no_reg); -bool AreAliased(Register r1, Register r2, Register r3, Register r4); +bool AreAliased(Register reg1, + Register reg2, + Register reg3 = no_reg, + Register reg4 = no_reg, + Register reg5 = no_reg, + Register reg6 = no_reg, + Register reg7 = no_reg, + Register reg8 = no_reg); // ----------------------------------------------------------------------------- @@ -128,7 +116,7 @@ // Generate a MemOperand for storing arguments 5..N on the stack // when calling CallCFunction(). inline MemOperand CFunctionArgumentOperand(int index) { - ASSERT(index > kCArgSlotCount); + DCHECK(index > kCArgSlotCount); // Argument 5 takes the slot just past the four Arg-slots. int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; return MemOperand(sp, offset); @@ -388,7 +376,9 @@ RAStatus ra_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); // As above, but the offset has the tag presubtracted. For use with // MemOperand(reg, off). @@ -400,7 +390,9 @@ RAStatus ra_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK) { + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting) { RecordWriteField(context, offset + kHeapObjectTag, value, @@ -408,9 +400,17 @@ ra_status, save_fp, remembered_set_action, - smi_check); + smi_check, + pointers_to_here_check_for_value); } + void RecordWriteForMap( + Register object, + Register map, + Register dst, + RAStatus ra_status, + SaveFPRegsMode save_fp); + // For a given |object| notify the garbage collector that the slot |address| // has been written. |value| is the object being stored. The value and // address registers are clobbered by the operation. @@ -421,7 +421,9 @@ RAStatus ra_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); // --------------------------------------------------------------------------- @@ -454,7 +456,7 @@ // nop(type)). These instructions are generated to mark special location in // the code, like some special IC code. static inline bool IsMarkedCode(Instr instr, int type) { - ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); + DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); return IsNop(instr, type); } @@ -472,7 +474,7 @@ rs == static_cast<uint32_t>(ToNumber(zero_reg))); int type = (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1; - ASSERT((type == -1) || + DCHECK((type == -1) || ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); return type; } @@ -551,7 +553,8 @@ Register scratch2, Register heap_number_map, Label* gc_required, - TaggingMode tagging_mode = TAG_RESULT); + TaggingMode tagging_mode = TAG_RESULT, + MutableMode mode = IMMUTABLE); void AllocateHeapNumberWithValue(Register result, FPURegister value, Register scratch1, @@ -686,7 +689,7 @@ // Pop two registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2) { - ASSERT(!src1.is(src2)); + DCHECK(!src1.is(src2)); lw(src2, MemOperand(sp, 0 * kPointerSize)); lw(src1, MemOperand(sp, 1 * kPointerSize)); Addu(sp, sp, 2 * kPointerSize); @@ -708,17 +711,15 @@ // RegList constant kSafepointSavedRegisters. void PushSafepointRegisters(); void PopSafepointRegisters(); - void PushSafepointRegistersAndDoubles(); - void PopSafepointRegistersAndDoubles(); // Store value in register src in the safepoint stack slot for // register dst. void StoreToSafepointRegisterSlot(Register src, Register dst); - void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); // Load the value of the src register from its safepoint stack slot // into register dst. void LoadFromSafepointRegisterSlot(Register dst, Register src); - // Flush the I-cache from asm code. You should use CPU::FlushICache from C. + // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache + // from C. // Does not handle errors. void FlushICache(Register address, unsigned instructions); @@ -757,7 +758,7 @@ FPURegister cmp1, FPURegister cmp2) { BranchF(target, nan, cc, cmp1, cmp2, bd); - }; + } // Truncates a double using a specific rounding mode, and writes the value // to the result register. @@ -932,13 +933,10 @@ Register scratch, Label* fail); -#ifdef ENABLE_DEBUGGER_SUPPORT // ------------------------------------------------------------------------- // Debugger Support. void DebugBreak(); -#endif - // ------------------------------------------------------------------------- // Exception handling. @@ -957,12 +955,6 @@ // handler chain. void ThrowUncatchable(Register value); - // Throw a message string as an exception. - void Throw(BailoutReason reason); - - // Throw a message string as an exception if a condition is not true. - void ThrowIf(Condition cc, BailoutReason reason, Register rs, Operand rt); - // Copies a fixed number of fields of heap objects from src to dst. void CopyFields(Register dst, Register src, RegList temps, int field_count); @@ -1074,10 +1066,6 @@ Handle<Code> success, SmiCheckType smi_check_type); - // Generates code for reporting that an illegal operation has - // occurred. - void IllegalOperation(int num_arguments); - // Load and check the instance type of an object for being a string. // Loads the type into the second argument register. @@ -1088,7 +1076,7 @@ lw(type, FieldMemOperand(obj, HeapObject::kMapOffset)); lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); And(type, type, Operand(kIsNotStringMask)); - ASSERT_EQ(0, kStringTag); + DCHECK_EQ(0, kStringTag); return eq; } @@ -1300,7 +1288,7 @@ }; Handle<Object> CodeObject() { - ASSERT(!code_object_.is_null()); + DCHECK(!code_object_.is_null()); return code_object_; } @@ -1514,15 +1502,40 @@ void NumberOfOwnDescriptors(Register dst, Register map); template<typename Field> + void DecodeField(Register dst, Register src) { + Ext(dst, src, Field::kShift, Field::kSize); + } + + template<typename Field> void DecodeField(Register reg) { + DecodeField<Field>(reg, reg); + } + + template<typename Field> + void DecodeFieldToSmi(Register dst, Register src) { static const int shift = Field::kShift; - static const int mask = (Field::kMask >> shift) << kSmiTagSize; - srl(reg, reg, shift); - And(reg, reg, Operand(mask)); + static const int mask = Field::kMask >> shift << kSmiTagSize; + STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); + STATIC_ASSERT(kSmiTag == 0); + if (shift < kSmiTagSize) { + sll(dst, src, kSmiTagSize - shift); + And(dst, dst, Operand(mask)); + } else if (shift > kSmiTagSize) { + srl(dst, src, shift - kSmiTagSize); + And(dst, dst, Operand(mask)); + } else { + And(dst, src, Operand(mask)); + } + } + + template<typename Field> + void DecodeFieldToSmi(Register reg) { + DecodeField<Field>(reg, reg); } // Generates function and stub prologue code. - void Prologue(PrologueFrameMode frame_mode); + void StubPrologue(); + void Prologue(bool code_pre_aging); // Activation support. void EnterFrame(StackFrame::Type type); @@ -1644,7 +1657,14 @@ // an assertion to fail. class CodePatcher { public: - CodePatcher(byte* address, int instructions); + enum FlushICache { + FLUSH, + DONT_FLUSH + }; + + CodePatcher(byte* address, + int instructions, + FlushICache flush_cache = FLUSH); virtual ~CodePatcher(); // Macro assembler to emit code. @@ -1664,6 +1684,7 @@ byte* address_; // The address of the code being patched. int size_; // Number of bytes of the expected patch size. MacroAssembler masm_; // Macro assembler used to generate the code. + FlushICache flush_cache_; // Whether to flush the I cache after patching. }; diff -Nru nodejs-0.11.13/deps/v8/src/mips/OWNERS nodejs-0.11.15/deps/v8/src/mips/OWNERS --- nodejs-0.11.13/deps/v8/src/mips/OWNERS 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/OWNERS 2015-01-20 21:22:17.000000000 +0000 @@ -1,2 +1,10 @@ plind44@gmail.com +paul.lind@imgtec.com gergely@homejinni.com +gergely.kis@imgtec.com +palfia@homejinni.com +akos.palfi@imgtec.com +kilvadyb@homejinni.com +balazs.kilvady@imgtec.com +Dusan.Milosavljevic@rt-rk.com +dusan.milosavljevic@imgtec.com diff -Nru nodejs-0.11.13/deps/v8/src/mips/regexp-macro-assembler-mips.cc nodejs-0.11.15/deps/v8/src/mips/regexp-macro-assembler-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/regexp-macro-assembler-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/regexp-macro-assembler-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "unicode.h" -#include "log.h" -#include "code-stubs.h" -#include "regexp-stack.h" -#include "macro-assembler.h" -#include "regexp-macro-assembler.h" -#include "mips/regexp-macro-assembler-mips.h" +#include "src/code-stubs.h" +#include "src/log.h" +#include "src/macro-assembler.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-stack.h" +#include "src/unicode.h" + +#include "src/mips/regexp-macro-assembler-mips.h" namespace v8 { namespace internal { @@ -132,7 +110,7 @@ backtrack_label_(), exit_label_(), internal_failure_label_() { - ASSERT_EQ(0, registers_to_save % 2); + DCHECK_EQ(0, registers_to_save % 2); __ jmp(&entry_label_); // We'll write the entry code later. // If the code gets too big or corrupted, an internal exception will be // raised, and we will exit right away. @@ -171,8 +149,8 @@ void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) { - ASSERT(reg >= 0); - ASSERT(reg < num_registers_); + DCHECK(reg >= 0); + DCHECK(reg < num_registers_); if (by != 0) { __ lw(a0, register_location(reg)); __ Addu(a0, a0, Operand(by)); @@ -311,7 +289,7 @@ // Compute new value of character position after the matched part. __ Subu(current_input_offset(), a2, end_of_input_address()); } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); // Put regexp engine registers on stack. RegList regexp_registers_to_retain = current_input_offset().bit() | current_character().bit() | backtrack_stackpointer().bit(); @@ -393,7 +371,7 @@ __ lbu(t0, MemOperand(a2, 0)); __ addiu(a2, a2, char_size()); } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); __ lhu(a3, MemOperand(a0, 0)); __ addiu(a0, a0, char_size()); __ lhu(t0, MemOperand(a2, 0)); @@ -437,7 +415,7 @@ uc16 minus, uc16 mask, Label* on_not_equal) { - ASSERT(minus < String::kMaxUtf16CodeUnit); + DCHECK(minus < String::kMaxUtf16CodeUnit); __ Subu(a0, current_character(), Operand(minus)); __ And(a0, a0, Operand(mask)); BranchOrBacktrack(on_not_equal, ne, a0, Operand(c)); @@ -728,7 +706,7 @@ __ Addu(a1, a1, Operand(a2)); // a1 is length of string in characters. - ASSERT_EQ(0, num_saved_registers_ % 2); + DCHECK_EQ(0, num_saved_registers_ % 2); // Always an even number of capture registers. This allows us to // unroll the loop once to add an operation between a load of a register // and the following use of that register. @@ -930,8 +908,8 @@ Label* on_end_of_input, bool check_bounds, int characters) { - ASSERT(cp_offset >= -1); // ^ and \b can look behind one character. - ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works). + DCHECK(cp_offset >= -1); // ^ and \b can look behind one character. + DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works). if (check_bounds) { CheckPosition(cp_offset + characters - 1, on_end_of_input); } @@ -1016,7 +994,7 @@ void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) { - ASSERT(register_index >= num_saved_registers_); // Reserved for positions! + DCHECK(register_index >= num_saved_registers_); // Reserved for positions! __ li(a0, Operand(to)); __ sw(a0, register_location(register_index)); } @@ -1040,7 +1018,7 @@ void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) { - ASSERT(reg_from <= reg_to); + DCHECK(reg_from <= reg_to); __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne)); for (int reg = reg_from; reg <= reg_to; reg++) { __ sw(a0, register_location(reg)); @@ -1063,12 +1041,12 @@ // Private methods: void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) { - int stack_alignment = OS::ActivationFrameAlignment(); + int stack_alignment = base::OS::ActivationFrameAlignment(); // Align the stack pointer and save the original sp value on the stack. __ mov(scratch, sp); __ Subu(sp, sp, Operand(kPointerSize)); - ASSERT(IsPowerOf2(stack_alignment)); + DCHECK(IsPowerOf2(stack_alignment)); __ And(sp, sp, Operand(-stack_alignment)); __ sw(scratch, MemOperand(sp)); @@ -1077,7 +1055,7 @@ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE); // We need to make room for the return address on the stack. - ASSERT(IsAligned(stack_alignment, kPointerSize)); + DCHECK(IsAligned(stack_alignment, kPointerSize)); __ Subu(sp, sp, Operand(stack_alignment)); // Stack pointer now points to cell where return address is to be written. @@ -1096,7 +1074,7 @@ ExternalReference stack_guard_check = ExternalReference::re_check_stack_guard_state(masm_->isolate()); __ li(t9, Operand(stack_guard_check)); - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(masm_, t9); // DirectCEntryStub allocated space for the C argument slots so we have to @@ -1127,7 +1105,8 @@ Code* re_code, Address re_frame) { Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate); - if (isolate->stack_guard()->IsStackOverflow()) { + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { isolate->StackOverflow(); return EXCEPTION; } @@ -1149,11 +1128,11 @@ // Current string. bool is_ascii = subject->IsOneByteRepresentationUnderneath(); - ASSERT(re_code->instruction_start() <= *return_address); - ASSERT(*return_address <= + DCHECK(re_code->instruction_start() <= *return_address); + DCHECK(*return_address <= re_code->instruction_start() + re_code->instruction_size()); - MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate); + Object* result = isolate->stack_guard()->HandleInterrupts(); if (*code_handle != re_code) { // Return address no longer valid. int delta = code_handle->address() - re_code->address(); @@ -1189,7 +1168,7 @@ // be a sequential or external string with the same content. // Update the start and end pointers in the stack frame to the current // location (whether it has actually moved or not). - ASSERT(StringShape(*subject_tmp).IsSequential() || + DCHECK(StringShape(*subject_tmp).IsSequential() || StringShape(*subject_tmp).IsExternal()); // The original start address of the characters to match. @@ -1221,7 +1200,7 @@ MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) { - ASSERT(register_index < (1<<30)); + DCHECK(register_index < (1<<30)); if (num_registers_ <= register_index) { num_registers_ = register_index + 1; } @@ -1282,7 +1261,7 @@ void RegExpMacroAssemblerMIPS::Push(Register source) { - ASSERT(!source.is(backtrack_stackpointer())); + DCHECK(!source.is(backtrack_stackpointer())); __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(-kPointerSize)); @@ -1291,7 +1270,7 @@ void RegExpMacroAssemblerMIPS::Pop(Register target) { - ASSERT(!target.is(backtrack_stackpointer())); + DCHECK(!target.is(backtrack_stackpointer())); __ lw(target, MemOperand(backtrack_stackpointer())); __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), kPointerSize); } @@ -1327,12 +1306,12 @@ } // We assume that we cannot do unaligned loads on MIPS, so this function // must only be used to load a single character at a time. - ASSERT(characters == 1); + DCHECK(characters == 1); __ Addu(t5, end_of_input_address(), Operand(offset)); if (mode_ == ASCII) { __ lbu(current_character(), MemOperand(t5, 0)); } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); __ lhu(current_character(), MemOperand(t5, 0)); } } diff -Nru nodejs-0.11.13/deps/v8/src/mips/regexp-macro-assembler-mips.h nodejs-0.11.15/deps/v8/src/mips/regexp-macro-assembler-mips.h --- nodejs-0.11.13/deps/v8/src/mips/regexp-macro-assembler-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/regexp-macro-assembler-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,15 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ #define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ -#include "mips/assembler-mips.h" -#include "mips/assembler-mips-inl.h" -#include "macro-assembler.h" -#include "code.h" -#include "mips/macro-assembler-mips.h" +#include "src/macro-assembler.h" +#include "src/mips/assembler-mips-inl.h" +#include "src/mips/assembler-mips.h" +#include "src/mips/macro-assembler-mips.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/mips/simulator-mips.cc nodejs-0.11.15/deps/v8/src/mips/simulator-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/simulator-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/simulator-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,45 +1,22 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <limits.h> #include <stdarg.h> #include <stdlib.h> #include <cmath> -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "cpu.h" -#include "disasm.h" -#include "assembler.h" -#include "globals.h" // Need the BitCast. -#include "mips/constants-mips.h" -#include "mips/simulator-mips.h" +#include "src/assembler.h" +#include "src/disasm.h" +#include "src/globals.h" // Need the BitCast. +#include "src/mips/constants-mips.h" +#include "src/mips/simulator-mips.h" +#include "src/ostreams.h" // Only build the simulator if not compiling for real MIPS hardware. @@ -130,7 +107,7 @@ char** msg_address = reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize); char* msg = *msg_address; - ASSERT(msg != NULL); + DCHECK(msg != NULL); // Update this stop description. if (!watched_stops_[code].desc) { @@ -479,17 +456,18 @@ || (strcmp(cmd, "printobject") == 0)) { if (argc == 2) { int32_t value; + OFStream os(stdout); if (GetValue(arg1, &value)) { Object* obj = reinterpret_cast<Object*>(value); - PrintF("%s: \n", arg1); + os << arg1 << ": \n"; #ifdef DEBUG - obj->PrintLn(); + obj->Print(os); + os << "\n"; #else - obj->ShortPrint(); - PrintF("\n"); + os << Brief(obj) << "\n"; #endif } else { - PrintF("%s unrecognized\n", arg1); + os << arg1 << " unrecognized\n"; } } else { PrintF("printobject <value>\n"); @@ -590,7 +568,7 @@ } } else if (strcmp(cmd, "gdb") == 0) { PrintF("relinquishing control to gdb\n"); - v8::internal::OS::DebugBreak(); + v8::base::OS::DebugBreak(); PrintF("regaining control from gdb\n"); } else if (strcmp(cmd, "break") == 0) { if (argc == 2) { @@ -776,8 +754,8 @@ static bool ICacheMatch(void* one, void* two) { - ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0); - ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0); + DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0); + DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0); return one == two; } @@ -814,7 +792,7 @@ FlushOnePage(i_cache, start, bytes_to_flush); start += bytes_to_flush; size -= bytes_to_flush; - ASSERT_EQ(0, start & CachePage::kPageMask); + DCHECK_EQ(0, start & CachePage::kPageMask); offset = 0; } if (size != 0) { @@ -839,10 +817,10 @@ void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start, int size) { - ASSERT(size <= CachePage::kPageSize); - ASSERT(AllOnOnePage(start, size - 1)); - ASSERT((start & CachePage::kLineMask) == 0); - ASSERT((size & CachePage::kLineMask) == 0); + DCHECK(size <= CachePage::kPageSize); + DCHECK(AllOnOnePage(start, size - 1)); + DCHECK((start & CachePage::kLineMask) == 0); + DCHECK((size & CachePage::kLineMask) == 0); void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask)); int offset = (start & CachePage::kPageMask); CachePage* cache_page = GetCachePage(i_cache, page); @@ -863,12 +841,12 @@ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask); if (cache_hit) { // Check that the data in memory matches the contents of the I-cache. - CHECK(memcmp(reinterpret_cast<void*>(instr), - cache_page->CachedData(offset), - Instruction::kInstrSize) == 0); + CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr), + cache_page->CachedData(offset), + Instruction::kInstrSize)); } else { // Cache miss. Load memory into the cache. - OS::MemCopy(cached_line, line, CachePage::kLineLength); + memcpy(cached_line, line, CachePage::kLineLength); *cache_valid_byte = CachePage::LINE_VALID; } } @@ -1001,8 +979,8 @@ Simulator* Simulator::current(Isolate* isolate) { v8::internal::Isolate::PerIsolateThreadData* isolate_data = isolate->FindOrAllocatePerThreadDataForThisThread(); - ASSERT(isolate_data != NULL); - ASSERT(isolate_data != NULL); + DCHECK(isolate_data != NULL); + DCHECK(isolate_data != NULL); Simulator* sim = isolate_data->simulator(); if (sim == NULL) { @@ -1017,7 +995,7 @@ // Sets the register in the architecture state. It will also deal with updating // Simulator internal state for special registers such as PC. void Simulator::set_register(int reg, int32_t value) { - ASSERT((reg >= 0) && (reg < kNumSimuRegisters)); + DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); if (reg == pc) { pc_modified_ = true; } @@ -1028,26 +1006,26 @@ void Simulator::set_dw_register(int reg, const int* dbl) { - ASSERT((reg >= 0) && (reg < kNumSimuRegisters)); + DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); registers_[reg] = dbl[0]; registers_[reg + 1] = dbl[1]; } void Simulator::set_fpu_register(int fpureg, int32_t value) { - ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters)); + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); FPUregisters_[fpureg] = value; } void Simulator::set_fpu_register_float(int fpureg, float value) { - ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters)); + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); *BitCast<float*>(&FPUregisters_[fpureg]) = value; } void Simulator::set_fpu_register_double(int fpureg, double value) { - ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); *BitCast<double*>(&FPUregisters_[fpureg]) = value; } @@ -1055,7 +1033,7 @@ // Get the register from the architecture state. This function does handle // the special case of accessing the PC register. int32_t Simulator::get_register(int reg) const { - ASSERT((reg >= 0) && (reg < kNumSimuRegisters)); + DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); if (reg == 0) return 0; else @@ -1064,40 +1042,40 @@ double Simulator::get_double_from_register_pair(int reg) { - ASSERT((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0)); + DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0)); double dm_val = 0.0; // Read the bits from the unsigned integer register_[] array // into the double precision floating point value and return it. char buffer[2 * sizeof(registers_[0])]; - OS::MemCopy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); - OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0])); + memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); + memcpy(&dm_val, buffer, 2 * sizeof(registers_[0])); return(dm_val); } int32_t Simulator::get_fpu_register(int fpureg) const { - ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters)); + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); return FPUregisters_[fpureg]; } int64_t Simulator::get_fpu_register_long(int fpureg) const { - ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); return *BitCast<int64_t*>( const_cast<int32_t*>(&FPUregisters_[fpureg])); } float Simulator::get_fpu_register_float(int fpureg) const { - ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters)); + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); return *BitCast<float*>( const_cast<int32_t*>(&FPUregisters_[fpureg])); } double Simulator::get_fpu_register_double(int fpureg) const { - ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); return *BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg])); } @@ -1119,14 +1097,14 @@ // Registers a0 and a1 -> x. reg_buffer[0] = get_register(a0); reg_buffer[1] = get_register(a1); - OS::MemCopy(x, buffer, sizeof(buffer)); + memcpy(x, buffer, sizeof(buffer)); // Registers a2 and a3 -> y. reg_buffer[0] = get_register(a2); reg_buffer[1] = get_register(a3); - OS::MemCopy(y, buffer, sizeof(buffer)); + memcpy(y, buffer, sizeof(buffer)); // Register 2 -> z. reg_buffer[0] = get_register(a2); - OS::MemCopy(z, buffer, sizeof(*z)); + memcpy(z, buffer, sizeof(*z)); } } @@ -1138,7 +1116,7 @@ } else { char buffer[2 * sizeof(registers_[0])]; int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer); - OS::MemCopy(buffer, &result, sizeof(buffer)); + memcpy(buffer, &result, sizeof(buffer)); // Copy result to v0 and v1. set_register(v0, reg_buffer[0]); set_register(v1, reg_buffer[1]); @@ -1267,7 +1245,7 @@ PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); - OS::Abort(); + base::OS::Abort(); return 0; } @@ -1281,7 +1259,7 @@ PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); - OS::Abort(); + base::OS::Abort(); } @@ -1293,7 +1271,7 @@ PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); - OS::Abort(); + base::OS::Abort(); return 0; } @@ -1306,7 +1284,7 @@ PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); - OS::Abort(); + base::OS::Abort(); return 0; } @@ -1320,7 +1298,7 @@ PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); - OS::Abort(); + base::OS::Abort(); } @@ -1333,7 +1311,7 @@ PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); - OS::Abort(); + base::OS::Abort(); } @@ -1660,8 +1638,8 @@ bool Simulator::IsEnabledStop(uint32_t code) { - ASSERT(code <= kMaxStopCode); - ASSERT(code > kMaxWatchpointCode); + DCHECK(code <= kMaxStopCode); + DCHECK(code > kMaxWatchpointCode); return !(watched_stops_[code].count & kStopDisabledBit); } @@ -1681,7 +1659,7 @@ void Simulator::IncreaseStopCounter(uint32_t code) { - ASSERT(code <= kMaxStopCode); + DCHECK(code <= kMaxStopCode); if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) { PrintF("Stop counter for code %i has overflowed.\n" "Enabling this code and reseting the counter to 0.\n", code); @@ -1729,12 +1707,12 @@ // Handle execution based on instruction types. void Simulator::ConfigureTypeRegister(Instruction* instr, - int32_t& alu_out, - int64_t& i64hilo, - uint64_t& u64hilo, - int32_t& next_pc, - int32_t& return_addr_reg, - bool& do_interrupt) { + int32_t* alu_out, + int64_t* i64hilo, + uint64_t* u64hilo, + int32_t* next_pc, + int32_t* return_addr_reg, + bool* do_interrupt) { // Every local variable declared here needs to be const. // This is to make sure that changed values are sent back to // DecodeTypeRegister correctly. @@ -1762,11 +1740,11 @@ break; case CFC1: // At the moment only FCSR is supported. - ASSERT(fs_reg == kFCSRRegister); - alu_out = FCSR_; + DCHECK(fs_reg == kFCSRRegister); + *alu_out = FCSR_; break; case MFC1: - alu_out = get_fpu_register(fs_reg); + *alu_out = get_fpu_register(fs_reg); break; case MFHC1: UNIMPLEMENTED_MIPS(); @@ -1785,7 +1763,7 @@ break; default: UNIMPLEMENTED_MIPS(); - }; + } break; case COP1X: break; @@ -1793,56 +1771,56 @@ switch (instr->FunctionFieldRaw()) { case JR: case JALR: - next_pc = get_register(instr->RsValue()); - return_addr_reg = instr->RdValue(); + *next_pc = get_register(instr->RsValue()); + *return_addr_reg = instr->RdValue(); break; case SLL: - alu_out = rt << sa; + *alu_out = rt << sa; break; case SRL: if (rs_reg == 0) { // Regular logical right shift of a word by a fixed number of // bits instruction. RS field is always equal to 0. - alu_out = rt_u >> sa; + *alu_out = rt_u >> sa; } else { // Logical right-rotate of a word by a fixed number of bits. This // is special case of SRL instruction, added in MIPS32 Release 2. // RS field is equal to 00001. - alu_out = (rt_u >> sa) | (rt_u << (32 - sa)); + *alu_out = (rt_u >> sa) | (rt_u << (32 - sa)); } break; case SRA: - alu_out = rt >> sa; + *alu_out = rt >> sa; break; case SLLV: - alu_out = rt << rs; + *alu_out = rt << rs; break; case SRLV: if (sa == 0) { // Regular logical right-shift of a word by a variable number of // bits instruction. SA field is always equal to 0. - alu_out = rt_u >> rs; + *alu_out = rt_u >> rs; } else { // Logical right-rotate of a word by a variable number of bits. // This is special case od SRLV instruction, added in MIPS32 // Release 2. SA field is equal to 00001. - alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u)); + *alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u)); } break; case SRAV: - alu_out = rt >> rs; + *alu_out = rt >> rs; break; case MFHI: - alu_out = get_register(HI); + *alu_out = get_register(HI); break; case MFLO: - alu_out = get_register(LO); + *alu_out = get_register(LO); break; case MULT: - i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt); + *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt); break; case MULTU: - u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u); + *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u); break; case ADD: if (HaveSameSign(rs, rt)) { @@ -1852,10 +1830,10 @@ exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt); } } - alu_out = rs + rt; + *alu_out = rs + rt; break; case ADDU: - alu_out = rs + rt; + *alu_out = rs + rt; break; case SUB: if (!HaveSameSign(rs, rt)) { @@ -1865,51 +1843,50 @@ exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt); } } - alu_out = rs - rt; + *alu_out = rs - rt; break; case SUBU: - alu_out = rs - rt; + *alu_out = rs - rt; break; case AND: - alu_out = rs & rt; + *alu_out = rs & rt; break; case OR: - alu_out = rs | rt; + *alu_out = rs | rt; break; case XOR: - alu_out = rs ^ rt; + *alu_out = rs ^ rt; break; case NOR: - alu_out = ~(rs | rt); + *alu_out = ~(rs | rt); break; case SLT: - alu_out = rs < rt ? 1 : 0; + *alu_out = rs < rt ? 1 : 0; break; case SLTU: - alu_out = rs_u < rt_u ? 1 : 0; + *alu_out = rs_u < rt_u ? 1 : 0; break; // Break and trap instructions. case BREAK: - - do_interrupt = true; + *do_interrupt = true; break; case TGE: - do_interrupt = rs >= rt; + *do_interrupt = rs >= rt; break; case TGEU: - do_interrupt = rs_u >= rt_u; + *do_interrupt = rs_u >= rt_u; break; case TLT: - do_interrupt = rs < rt; + *do_interrupt = rs < rt; break; case TLTU: - do_interrupt = rs_u < rt_u; + *do_interrupt = rs_u < rt_u; break; case TEQ: - do_interrupt = rs == rt; + *do_interrupt = rs == rt; break; case TNE: - do_interrupt = rs != rt; + *do_interrupt = rs != rt; break; case MOVN: case MOVZ: @@ -1922,23 +1899,23 @@ break; default: UNREACHABLE(); - }; + } break; case SPECIAL2: switch (instr->FunctionFieldRaw()) { case MUL: - alu_out = rs_u * rt_u; // Only the lower 32 bits are kept. + *alu_out = rs_u * rt_u; // Only the lower 32 bits are kept. break; case CLZ: // MIPS32 spec: If no bits were set in GPR rs, the result written to // GPR rd is 32. // GCC __builtin_clz: If input is 0, the result is undefined. - alu_out = + *alu_out = rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u); break; default: UNREACHABLE(); - }; + } break; case SPECIAL3: switch (instr->FunctionFieldRaw()) { @@ -1949,7 +1926,7 @@ uint16_t lsb = sa; uint16_t size = msb - lsb + 1; uint32_t mask = (1 << size) - 1; - alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb); + *alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb); break; } case EXT: { // Mips32r2 instruction. @@ -1959,16 +1936,16 @@ uint16_t lsb = sa; uint16_t size = msb + 1; uint32_t mask = (1 << size) - 1; - alu_out = (rs_u & (mask << lsb)) >> lsb; + *alu_out = (rs_u & (mask << lsb)) >> lsb; break; } default: UNREACHABLE(); - }; + } break; default: UNREACHABLE(); - }; + } } @@ -2007,12 +1984,12 @@ // Set up the variables if needed before executing the instruction. ConfigureTypeRegister(instr, - alu_out, - i64hilo, - u64hilo, - next_pc, - return_addr_reg, - do_interrupt); + &alu_out, + &i64hilo, + &u64hilo, + &next_pc, + &return_addr_reg, + &do_interrupt); // ---------- Raise exceptions triggered. SignalExceptions(); @@ -2034,7 +2011,7 @@ break; case CTC1: // At the moment only FCSR is supported. - ASSERT(fs_reg == kFCSRRegister); + DCHECK(fs_reg == kFCSRRegister); FCSR_ = registers_[rt_reg]; break; case MTC1: @@ -2126,7 +2103,7 @@ break; case CVT_W_D: // Convert double to word. // Rounding modes are not yet supported. - ASSERT((FCSR_ & 3) == 0); + DCHECK((FCSR_ & 3) == 0); // In rounding mode 0 it should behave like ROUND. case ROUND_W_D: // Round double to word (round half to even). { @@ -2227,7 +2204,7 @@ break; default: UNREACHABLE(); - }; + } break; case L: switch (instr->FunctionFieldRaw()) { @@ -2249,7 +2226,7 @@ break; default: UNREACHABLE(); - }; + } break; case COP1X: switch (instr->FunctionFieldRaw()) { @@ -2262,7 +2239,7 @@ break; default: UNREACHABLE(); - }; + } break; case SPECIAL: switch (instr->FunctionFieldRaw()) { @@ -2343,7 +2320,7 @@ break; default: // For other special opcodes we do the default operation. set_register(rd_reg, alu_out); - }; + } break; case SPECIAL2: switch (instr->FunctionFieldRaw()) { @@ -2369,14 +2346,14 @@ break; default: UNREACHABLE(); - }; + } break; // Unimplemented opcodes raised an error in the configuration step before, // so we can use the default here to set the destination register in common // cases. default: set_register(rd_reg, alu_out); - }; + } } @@ -2437,7 +2414,7 @@ break; default: UNREACHABLE(); - }; + } break; // ------------- REGIMM class. case REGIMM: @@ -2456,7 +2433,7 @@ break; default: UNREACHABLE(); - }; + } switch (instr->RtFieldRaw()) { case BLTZ: case BLTZAL: @@ -2475,7 +2452,7 @@ } default: break; - }; + } break; // case REGIMM. // ------------- Branch instructions. // When comparing to zero, the encoding of rt field is always 0, so we don't @@ -2608,7 +2585,7 @@ break; default: UNREACHABLE(); - }; + } // ---------- Raise exceptions triggered. SignalExceptions(); @@ -2684,7 +2661,7 @@ break; default: break; - }; + } if (execute_branch_delay_instruction) { @@ -2870,7 +2847,7 @@ // Set up arguments. // First four arguments passed in registers. - ASSERT(argument_count >= 4); + DCHECK(argument_count >= 4); set_register(a0, va_arg(parameters, int32_t)); set_register(a1, va_arg(parameters, int32_t)); set_register(a2, va_arg(parameters, int32_t)); @@ -2881,8 +2858,8 @@ // Compute position of stack on entry to generated code. int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t) - kCArgsSlotsSize); - if (OS::ActivationFrameAlignment() != 0) { - entry_stack &= -OS::ActivationFrameAlignment(); + if (base::OS::ActivationFrameAlignment() != 0) { + entry_stack &= -base::OS::ActivationFrameAlignment(); } // Store remaining arguments on stack, from low to high memory. intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); @@ -2909,10 +2886,10 @@ set_fpu_register_double(f14, d1); } else { int buffer[2]; - ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0)); - OS::MemCopy(buffer, &d0, sizeof(d0)); + DCHECK(sizeof(buffer[0]) * 2 == sizeof(d0)); + memcpy(buffer, &d0, sizeof(d0)); set_dw_register(a0, buffer); - OS::MemCopy(buffer, &d1, sizeof(d1)); + memcpy(buffer, &d1, sizeof(d1)); set_dw_register(a2, buffer); } CallInternal(entry); diff -Nru nodejs-0.11.13/deps/v8/src/mips/simulator-mips.h nodejs-0.11.15/deps/v8/src/mips/simulator-mips.h --- nodejs-0.11.13/deps/v8/src/mips/simulator-mips.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/simulator-mips.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Declares a Simulator for MIPS instructions if we are not generating a native @@ -36,8 +13,8 @@ #ifndef V8_MIPS_SIMULATOR_MIPS_H_ #define V8_MIPS_SIMULATOR_MIPS_H_ -#include "allocation.h" -#include "constants-mips.h" +#include "src/allocation.h" +#include "src/mips/constants-mips.h" #if !defined(USE_SIMULATOR) // Running without a simulator on a native mips platform. @@ -61,9 +38,6 @@ (FUNCTION_CAST<mips_regexp_matcher>(entry)( \ p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)) -#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ - reinterpret_cast<TryCatch*>(try_catch_address) - // The stack limit beyond which we will throw stack overflow errors in // generated code. Because generated code on mips uses the C stack, we // just use the C stack limit. @@ -96,8 +70,8 @@ #else // !defined(USE_SIMULATOR) // Running with a simulator. -#include "hashmap.h" -#include "assembler.h" +#include "src/assembler.h" +#include "src/hashmap.h" namespace v8 { namespace internal { @@ -289,12 +263,12 @@ // Helper function for DecodeTypeRegister. void ConfigureTypeRegister(Instruction* instr, - int32_t& alu_out, - int64_t& i64hilo, - uint64_t& u64hilo, - int32_t& next_pc, - int32_t& return_addr_reg, - bool& do_interrupt); + int32_t* alu_out, + int64_t* i64hilo, + uint64_t* u64hilo, + int32_t* next_pc, + int32_t* return_addr_reg, + bool* do_interrupt); void DecodeTypeImmediate(Instruction* instr); void DecodeTypeJump(Instruction* instr); @@ -413,10 +387,6 @@ Simulator::current(Isolate::Current())->Call( \ entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) -#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ - try_catch_address == NULL ? \ - NULL : *(reinterpret_cast<TryCatch**>(try_catch_address)) - // The simulator has its own stack. Thus it has a different stack limit from // the C-based native code. Setting the c_limit to indicate a very small diff -Nru nodejs-0.11.13/deps/v8/src/mips/stub-cache-mips.cc nodejs-0.11.15/deps/v8/src/mips/stub-cache-mips.cc --- nodejs-0.11.13/deps/v8/src/mips/stub-cache-mips.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips/stub-cache-mips.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "ic-inl.h" -#include "codegen.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -59,12 +36,12 @@ uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); // Check the relative positions of the address fields. - ASSERT(value_off_addr > key_off_addr); - ASSERT((value_off_addr - key_off_addr) % 4 == 0); - ASSERT((value_off_addr - key_off_addr) < (256 * 4)); - ASSERT(map_off_addr > key_off_addr); - ASSERT((map_off_addr - key_off_addr) % 4 == 0); - ASSERT((map_off_addr - key_off_addr) < (256 * 4)); + DCHECK(value_off_addr > key_off_addr); + DCHECK((value_off_addr - key_off_addr) % 4 == 0); + DCHECK((value_off_addr - key_off_addr) < (256 * 4)); + DCHECK(map_off_addr > key_off_addr); + DCHECK((map_off_addr - key_off_addr) % 4 == 0); + DCHECK((map_off_addr - key_off_addr) < (256 * 4)); Label miss; Register base_addr = scratch; @@ -117,14 +94,11 @@ } -void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, - Label* miss_label, - Register receiver, - Handle<Name> name, - Register scratch0, - Register scratch1) { - ASSERT(name->IsUniqueName()); - ASSERT(!receiver.is(scratch0)); +void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( + MacroAssembler* masm, Label* miss_label, Register receiver, + Handle<Name> name, Register scratch0, Register scratch1) { + DCHECK(name->IsUniqueName()); + DCHECK(!receiver.is(scratch0)); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); @@ -183,27 +157,27 @@ // Make sure that code is valid. The multiplying code relies on the // entry size being 12. - ASSERT(sizeof(Entry) == 12); + DCHECK(sizeof(Entry) == 12); // Make sure the flags does not name a specific type. - ASSERT(Code::ExtractTypeFromFlags(flags) == 0); + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); // Make sure that there are no register conflicts. - ASSERT(!scratch.is(receiver)); - ASSERT(!scratch.is(name)); - ASSERT(!extra.is(receiver)); - ASSERT(!extra.is(name)); - ASSERT(!extra.is(scratch)); - ASSERT(!extra2.is(receiver)); - ASSERT(!extra2.is(name)); - ASSERT(!extra2.is(scratch)); - ASSERT(!extra2.is(extra)); + DCHECK(!scratch.is(receiver)); + DCHECK(!scratch.is(name)); + DCHECK(!extra.is(receiver)); + DCHECK(!extra.is(name)); + DCHECK(!extra.is(scratch)); + DCHECK(!extra2.is(receiver)); + DCHECK(!extra2.is(name)); + DCHECK(!extra2.is(scratch)); + DCHECK(!extra2.is(extra)); // Check register validity. - ASSERT(!scratch.is(no_reg)); - ASSERT(!extra.is(no_reg)); - ASSERT(!extra2.is(no_reg)); - ASSERT(!extra3.is(no_reg)); + DCHECK(!scratch.is(no_reg)); + DCHECK(!extra.is(no_reg)); + DCHECK(!extra2.is(no_reg)); + DCHECK(!extra3.is(no_reg)); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, @@ -219,8 +193,8 @@ uint32_t mask = kPrimaryTableSize - 1; // We shift out the last two bits because they are not part of the hash and // they are always 01 for maps. - __ srl(scratch, scratch, kHeapObjectTagSize); - __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); + __ srl(scratch, scratch, kCacheIndexShift); + __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); __ And(scratch, scratch, Operand(mask)); // Probe the primary table. @@ -236,10 +210,10 @@ extra3); // Primary miss: Compute hash for secondary probe. - __ srl(at, name, kHeapObjectTagSize); + __ srl(at, name, kCacheIndexShift); __ Subu(scratch, scratch, at); uint32_t mask2 = kSecondaryTableSize - 1; - __ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); + __ Addu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); __ And(scratch, scratch, Operand(mask2)); // Probe the secondary table. @@ -262,30 +236,8 @@ } -void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, - int index, - Register prototype) { - // Load the global or builtins object from the current context. - __ lw(prototype, - MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - // Load the native context from the global or builtins object. - __ lw(prototype, - FieldMemOperand(prototype, GlobalObject::kNativeContextOffset)); - // Load the function from the native context. - __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index))); - // Load the initial map. The global functions all have initial maps. - __ lw(prototype, - FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); - // Load the prototype from the initial map. - __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); -} - - -void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( - MacroAssembler* masm, - int index, - Register prototype, - Label* miss) { +void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( + MacroAssembler* masm, int index, Register prototype, Label* miss) { Isolate* isolate = masm->isolate(); // Get the global function with the given index. Handle<JSFunction> function( @@ -307,59 +259,20 @@ } -void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, - Register dst, - Register src, - bool inobject, - int index, - Representation representation) { - ASSERT(!representation.IsDouble()); - int offset = index * kPointerSize; - if (!inobject) { - // Calculate the offset into the properties array. - offset = offset + FixedArray::kHeaderSize; - __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); - src = dst; - } - __ lw(dst, FieldMemOperand(src, offset)); -} - - -void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, - Register receiver, - Register scratch, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss_label); - - // Check that the object is a JS array. - __ GetObjectType(receiver, scratch, scratch); - __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE)); - - // Load length directly from the JS array. - __ Ret(USE_DELAY_SLOT); - __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); -} - - -void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, - Register receiver, - Register scratch1, - Register scratch2, - Label* miss_label) { +void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype( + MacroAssembler* masm, Register receiver, Register scratch1, + Register scratch2, Label* miss_label) { __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); __ Ret(USE_DELAY_SLOT); __ mov(v0, scratch1); } -void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm, - Handle<JSGlobalObject> global, - Handle<Name> name, - Register scratch, - Label* miss) { +void PropertyHandlerCompiler::GenerateCheckPropertyCell( + MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name, + Register scratch, Label* miss) { Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name); - ASSERT(cell->value()->IsTheHole()); + DCHECK(cell->value()->IsTheHole()); __ li(scratch, Operand(cell)); __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); __ LoadRoot(at, Heap::kTheHoleValueRootIndex); @@ -367,18 +280,126 @@ } -void StoreStubCompiler::GenerateNegativeHolderLookup( +static void PushInterceptorArguments(MacroAssembler* masm, + Register receiver, + Register holder, + Register name, + Handle<JSObject> holder_obj) { + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4); + __ push(name); + Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); + DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor)); + Register scratch = name; + __ li(scratch, Operand(interceptor)); + __ Push(scratch, receiver, holder); +} + + +static void CompileCallLoadPropertyWithInterceptor( MacroAssembler* masm, - Handle<JSObject> holder, - Register holder_reg, - Handle<Name> name, - Label* miss) { - if (holder->IsJSGlobalObject()) { - GenerateCheckPropertyCell( - masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss); - } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { - GenerateDictionaryNegativeLookup( - masm, miss, holder_reg, name, scratch1(), scratch2()); + Register receiver, + Register holder, + Register name, + Handle<JSObject> holder_obj, + IC::UtilityId id) { + PushInterceptorArguments(masm, receiver, holder, name, holder_obj); + __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()), + NamedLoadHandlerCompiler::kInterceptorArgsLength); +} + + +// Generate call to api function. +void PropertyHandlerCompiler::GenerateFastApiCall( + MacroAssembler* masm, const CallOptimization& optimization, + Handle<Map> receiver_map, Register receiver, Register scratch_in, + bool is_store, int argc, Register* values) { + DCHECK(!receiver.is(scratch_in)); + // Preparing to push, adjust sp. + __ Subu(sp, sp, Operand((argc + 1) * kPointerSize)); + __ sw(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver. + // Write the arguments to stack frame. + for (int i = 0; i < argc; i++) { + Register arg = values[argc-1-i]; + DCHECK(!receiver.is(arg)); + DCHECK(!scratch_in.is(arg)); + __ sw(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg. + } + DCHECK(optimization.is_simple_api_call()); + + // Abi for CallApiFunctionStub. + Register callee = a0; + Register call_data = t0; + Register holder = a2; + Register api_function_address = a1; + + // Put holder in place. + CallOptimization::HolderLookup holder_lookup; + Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType( + receiver_map, + &holder_lookup); + switch (holder_lookup) { + case CallOptimization::kHolderIsReceiver: + __ Move(holder, receiver); + break; + case CallOptimization::kHolderFound: + __ li(holder, api_holder); + break; + case CallOptimization::kHolderNotFound: + UNREACHABLE(); + break; + } + + Isolate* isolate = masm->isolate(); + Handle<JSFunction> function = optimization.constant_function(); + Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); + Handle<Object> call_data_obj(api_call_info->data(), isolate); + + // Put callee in place. + __ li(callee, function); + + bool call_data_undefined = false; + // Put call_data in place. + if (isolate->heap()->InNewSpace(*call_data_obj)) { + __ li(call_data, api_call_info); + __ lw(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); + } else if (call_data_obj->IsUndefined()) { + call_data_undefined = true; + __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); + } else { + __ li(call_data, call_data_obj); + } + // Put api_function_address in place. + Address function_address = v8::ToCData<Address>(api_call_info->callback()); + ApiFunction fun(function_address); + ExternalReference::Type type = ExternalReference::DIRECT_API_CALL; + ExternalReference ref = ExternalReference(&fun, type, masm->isolate()); + __ li(api_function_address, Operand(ref)); + + // Jump to stub. + CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc); + __ TailCallStub(&stub); +} + + +void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm, + Handle<Code> code) { + __ Jump(code, RelocInfo::CODE_TARGET); +} + + +#undef __ +#define __ ACCESS_MASM(masm()) + + +void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ bind(label); + __ li(this->name(), Operand(name)); } } @@ -386,19 +407,10 @@ // Generate StoreTransition code, value is passed in a0 register. // After executing generated code, the receiver_reg and name_reg // may be clobbered. -void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register storage_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Label* miss_label, - Label* slow) { +void NamedStoreHandlerCompiler::GenerateStoreTransition( + Handle<Map> transition, Handle<Name> name, Register receiver_reg, + Register storage_reg, Register value_reg, Register scratch1, + Register scratch2, Register scratch3, Label* miss_label, Label* slow) { // a0 : value. Label exit; @@ -406,20 +418,39 @@ DescriptorArray* descriptors = transition->instance_descriptors(); PropertyDetails details = descriptors->GetDetails(descriptor); Representation representation = details.representation(); - ASSERT(!representation.IsNone()); + DCHECK(!representation.IsNone()); if (details.type() == CONSTANT) { - Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); + Handle<Object> constant(descriptors->GetValue(descriptor), isolate()); __ li(scratch1, constant); __ Branch(miss_label, ne, value_reg, Operand(scratch1)); } else if (representation.IsSmi()) { __ JumpIfNotSmi(value_reg, miss_label); } else if (representation.IsHeapObject()) { __ JumpIfSmi(value_reg, miss_label); + HeapType* field_type = descriptors->GetFieldType(descriptor); + HeapType::Iterator<Map> it = field_type->Classes(); + Handle<Map> current; + if (!it.Done()) { + __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); + Label do_store; + while (true) { + // Do the CompareMap() directly within the Branch() functions. + current = it.Current(); + it.Advance(); + if (it.Done()) { + __ Branch(miss_label, ne, scratch1, Operand(current)); + break; + } + __ Branch(&do_store, eq, scratch1, Operand(current)); + } + __ bind(&do_store); + } } else if (representation.IsDouble()) { Label do_store, heap_number; - __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow); + __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex); + __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow, + TAG_RESULT, MUTABLE); __ JumpIfNotSmi(value_reg, &heap_number); __ SmiUntag(scratch1, value_reg); @@ -436,13 +467,12 @@ __ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); } - // Stub never generated for non-global objects that require access - // checks. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); + // Stub never generated for objects that require access checks. + DCHECK(!transition->is_access_check_needed()); // Perform map transition for the receiver if necessary. if (details.type() == FIELD && - object->map()->unused_property_fields() == 0) { + Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) { // The properties must be extended before we can store the value. // We jump to a runtime call that extends the properties array. __ push(receiver_reg); @@ -450,7 +480,7 @@ __ Push(a2, a0); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), - masm->isolate()), + isolate()), 3, 1); return; } @@ -470,7 +500,7 @@ OMIT_SMI_CHECK); if (details.type() == CONSTANT) { - ASSERT(value_reg.is(a0)); + DCHECK(value_reg.is(a0)); __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); return; @@ -482,14 +512,14 @@ // Adjust for the number of properties stored in the object. Even in the // face of a transition we can use the old map here because the size of the // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); + index -= transition->inobject_properties(); // TODO(verwaest): Share this code as a code stub. SmiCheck smi_check = representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; if (index < 0) { // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); + int offset = transition->instance_size() + (index * kPointerSize); if (representation.IsDouble()) { __ sw(storage_reg, FieldMemOperand(receiver_reg, offset)); } else { @@ -539,284 +569,49 @@ } // Return the value (register v0). - ASSERT(value_reg.is(a0)); - __ bind(&exit); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, a0); -} - - -// Generate StoreField code, value is passed in a0 register. -// When leaving generated code after success, the receiver_reg and name_reg -// may be clobbered. Upon branch to miss_label, the receiver and name -// registers have their original values. -void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label) { - // a0 : value - Label exit; - - // Stub never generated for non-global objects that require access - // checks. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - - int index = lookup->GetFieldIndex().field_index(); - - // Adjust for the number of properties stored in the object. Even in the - // face of a transition we can use the old map here because the size of the - // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); - - Representation representation = lookup->representation(); - ASSERT(!representation.IsNone()); - if (representation.IsSmi()) { - __ JumpIfNotSmi(value_reg, miss_label); - } else if (representation.IsHeapObject()) { - __ JumpIfSmi(value_reg, miss_label); - } else if (representation.IsDouble()) { - // Load the double storage. - if (index < 0) { - int offset = object->map()->instance_size() + (index * kPointerSize); - __ lw(scratch1, FieldMemOperand(receiver_reg, offset)); - } else { - __ lw(scratch1, - FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); - int offset = index * kPointerSize + FixedArray::kHeaderSize; - __ lw(scratch1, FieldMemOperand(scratch1, offset)); - } - - // Store the value into the storage. - Label do_store, heap_number; - __ JumpIfNotSmi(value_reg, &heap_number); - __ SmiUntag(scratch2, value_reg); - __ mtc1(scratch2, f6); - __ cvt_d_w(f4, f6); - __ jmp(&do_store); - - __ bind(&heap_number); - __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex, - miss_label, DONT_DO_SMI_CHECK); - __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); - - __ bind(&do_store); - __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); - // Return the value (register v0). - ASSERT(value_reg.is(a0)); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, a0); - return; - } - - // TODO(verwaest): Share this code as a code stub. - SmiCheck smi_check = representation.IsTagged() - ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; - if (index < 0) { - // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); - __ sw(value_reg, FieldMemOperand(receiver_reg, offset)); - - if (!representation.IsSmi()) { - // Skip updating write barrier if storing a smi. - __ JumpIfSmi(value_reg, &exit); - - // Update the write barrier for the array address. - // Pass the now unused name_reg as a scratch register. - __ mov(name_reg, value_reg); - __ RecordWriteField(receiver_reg, - offset, - name_reg, - scratch1, - kRAHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); - } - } else { - // Write to the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; - // Get the properties array. - __ lw(scratch1, - FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ sw(value_reg, FieldMemOperand(scratch1, offset)); - - if (!representation.IsSmi()) { - // Skip updating write barrier if storing a smi. - __ JumpIfSmi(value_reg, &exit); - - // Update the write barrier for the array address. - // Ok to clobber receiver_reg and name_reg, since we return. - __ mov(name_reg, value_reg); - __ RecordWriteField(scratch1, - offset, - name_reg, - receiver_reg, - kRAHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); - } - } - - // Return the value (register v0). - ASSERT(value_reg.is(a0)); + DCHECK(value_reg.is(a0)); __ bind(&exit); __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); } -void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, - Label* label, - Handle<Name> name) { - if (!label->is_unused()) { - __ bind(label); - __ li(this->name(), Operand(name)); - } -} - - -static void PushInterceptorArguments(MacroAssembler* masm, - Register receiver, - Register holder, - Register name, - Handle<JSObject> holder_obj) { - STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0); - STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1); - STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2); - STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3); - STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4); - __ push(name); - Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); - ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor)); - Register scratch = name; - __ li(scratch, Operand(interceptor)); - __ Push(scratch, receiver, holder); -} - - -static void CompileCallLoadPropertyWithInterceptor( - MacroAssembler* masm, - Register receiver, - Register holder, - Register name, - Handle<JSObject> holder_obj, - IC::UtilityId id) { - PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallExternalReference( - ExternalReference(IC_Utility(id), masm->isolate()), - StubCache::kInterceptorArgsLength); -} - - -// Generate call to api function. -void StubCompiler::GenerateFastApiCall(MacroAssembler* masm, - const CallOptimization& optimization, - Handle<Map> receiver_map, - Register receiver, - Register scratch_in, - bool is_store, - int argc, - Register* values) { - ASSERT(!receiver.is(scratch_in)); - // Preparing to push, adjust sp. - __ Subu(sp, sp, Operand((argc + 1) * kPointerSize)); - __ sw(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver. - // Write the arguments to stack frame. - for (int i = 0; i < argc; i++) { - Register arg = values[argc-1-i]; - ASSERT(!receiver.is(arg)); - ASSERT(!scratch_in.is(arg)); - __ sw(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg. - } - ASSERT(optimization.is_simple_api_call()); - - // Abi for CallApiFunctionStub. - Register callee = a0; - Register call_data = t0; - Register holder = a2; - Register api_function_address = a1; - - // Put holder in place. - CallOptimization::HolderLookup holder_lookup; - Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType( - receiver_map, - &holder_lookup); - switch (holder_lookup) { - case CallOptimization::kHolderIsReceiver: - __ Move(holder, receiver); +void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup, + Register value_reg, + Label* miss_label) { + DCHECK(lookup->representation().IsHeapObject()); + __ JumpIfSmi(value_reg, miss_label); + HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes(); + __ lw(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset)); + Label do_store; + Handle<Map> current; + while (true) { + // Do the CompareMap() directly within the Branch() functions. + current = it.Current(); + it.Advance(); + if (it.Done()) { + __ Branch(miss_label, ne, scratch1(), Operand(current)); break; - case CallOptimization::kHolderFound: - __ li(holder, api_holder); - break; - case CallOptimization::kHolderNotFound: - UNREACHABLE(); - break; - } - - Isolate* isolate = masm->isolate(); - Handle<JSFunction> function = optimization.constant_function(); - Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); - Handle<Object> call_data_obj(api_call_info->data(), isolate); - - // Put callee in place. - __ li(callee, function); - - bool call_data_undefined = false; - // Put call_data in place. - if (isolate->heap()->InNewSpace(*call_data_obj)) { - __ li(call_data, api_call_info); - __ lw(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); - } else if (call_data_obj->IsUndefined()) { - call_data_undefined = true; - __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); - } else { - __ li(call_data, call_data_obj); + } + __ Branch(&do_store, eq, scratch1(), Operand(current)); } - // Put api_function_address in place. - Address function_address = v8::ToCData<Address>(api_call_info->callback()); - ApiFunction fun(function_address); - ExternalReference::Type type = ExternalReference::DIRECT_API_CALL; - ExternalReference ref = - ExternalReference(&fun, - type, - masm->isolate()); - __ li(api_function_address, Operand(ref)); + __ bind(&do_store); - // Jump to stub. - CallApiFunctionStub stub(is_store, call_data_undefined, argc); - __ TailCallStub(&stub); + StoreFieldStub stub(isolate(), lookup->GetFieldIndex(), + lookup->representation()); + GenerateTailCall(masm(), stub.GetCode()); } -void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { - __ Jump(code, RelocInfo::CODE_TARGET); -} - - -#undef __ -#define __ ACCESS_MASM(masm()) - - -Register StubCompiler::CheckPrototypes(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Register holder_reg, - Register scratch1, - Register scratch2, - Handle<Name> name, - Label* miss, - PrototypeCheckType check) { - Handle<Map> receiver_map(IC::TypeToMap(*type, isolate())); +Register PropertyHandlerCompiler::CheckPrototypes( + Register object_reg, Register holder_reg, Register scratch1, + Register scratch2, Handle<Name> name, Label* miss, + PrototypeCheckType check) { + Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate())); // Make sure there's no overlap between holder and object registers. - ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); - ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) + DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); + DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) && !scratch2.is(scratch1)); // Keep track of the current object in register reg. @@ -824,10 +619,12 @@ int depth = 0; Handle<JSObject> current = Handle<JSObject>::null(); - if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant()); + if (type()->IsConstant()) { + current = Handle<JSObject>::cast(type()->AsConstant()->Value()); + } Handle<JSObject> prototype = Handle<JSObject>::null(); Handle<Map> current_map = receiver_map; - Handle<Map> holder_map(holder->map()); + Handle<Map> holder_map(holder()->map()); // Traverse the prototype chain and check the maps in the prototype chain for // fast and global objects or do negative lookup for normal objects. while (!current_map.is_identical_to(holder_map)) { @@ -835,19 +632,19 @@ // Only global objects and objects that do not require access // checks are allowed in stubs. - ASSERT(current_map->IsJSGlobalProxyMap() || + DCHECK(current_map->IsJSGlobalProxyMap() || !current_map->is_access_check_needed()); prototype = handle(JSObject::cast(current_map->prototype())); if (current_map->is_dictionary_map() && - !current_map->IsJSGlobalObjectMap() && - !current_map->IsJSGlobalProxyMap()) { + !current_map->IsJSGlobalObjectMap()) { + DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast. if (!name->IsUniqueName()) { - ASSERT(name->IsString()); + DCHECK(name->IsString()); name = factory()->InternalizeString(Handle<String>::cast(name)); } - ASSERT(current.is_null() || - current->property_dictionary()->FindEntry(*name) == + DCHECK(current.is_null() || + current->property_dictionary()->FindEntry(name) == NameDictionary::kNotFound); GenerateDictionaryNegativeLookup(masm(), miss, reg, name, @@ -868,6 +665,9 @@ // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global // object. + // This allows us to install generated handlers for accesses to the + // global proxy (as opposed to using slow ICs). See corresponding code + // in LookupForRead(). if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch2, miss); } else if (current_map->IsJSGlobalObjectMap()) { @@ -878,12 +678,15 @@ reg = holder_reg; // From now on the object will be in holder_reg. - if (heap()->InNewSpace(*prototype)) { - // The prototype is in new space; we cannot store a reference to it - // in the code. Load it from the map. + // Two possible reasons for loading the prototype from the map: + // (1) Can't store references to new space in code. + // (2) Handler is shared for all receivers with the same prototype + // map (but not necessarily the same prototype instance). + bool load_prototype_from_map = + heap()->InNewSpace(*prototype) || depth == 1; + if (load_prototype_from_map) { __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); } else { - // The prototype is in old space; load it directly. __ li(reg, Operand(prototype)); } } @@ -902,7 +705,7 @@ } // Perform security check for access to the global object. - ASSERT(current_map->IsJSGlobalProxyMap() || + DCHECK(current_map->IsJSGlobalProxyMap() || !current_map->is_access_check_needed()); if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch1, miss); @@ -913,7 +716,7 @@ } -void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { +void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { if (!miss->is_unused()) { Label success; __ Branch(&success); @@ -924,91 +727,26 @@ } -void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { +void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { if (!miss->is_unused()) { Label success; __ Branch(&success); - GenerateRestoreName(masm(), miss, name); + GenerateRestoreName(miss, name); TailCallBuiltin(masm(), MissBuiltin(kind())); __ bind(&success); } } -Register LoadStubCompiler::CallbackHandlerFrontend( - Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Handle<Object> callback) { - Label miss; - - Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); - - if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { - ASSERT(!reg.is(scratch2())); - ASSERT(!reg.is(scratch3())); - ASSERT(!reg.is(scratch4())); - - // Load the properties dictionary. - Register dictionary = scratch4(); - __ lw(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); - - // Probe the dictionary. - Label probe_done; - NameDictionaryLookupStub::GeneratePositiveLookup(masm(), - &miss, - &probe_done, - dictionary, - this->name(), - scratch2(), - scratch3()); - __ bind(&probe_done); - - // If probing finds an entry in the dictionary, scratch3 contains the - // pointer into the dictionary. Check that the value is the callback. - Register pointer = scratch3(); - const int kElementsStartOffset = NameDictionary::kHeaderSize + - NameDictionary::kElementsStartIndex * kPointerSize; - const int kValueOffset = kElementsStartOffset + kPointerSize; - __ lw(scratch2(), FieldMemOperand(pointer, kValueOffset)); - __ Branch(&miss, ne, scratch2(), Operand(callback)); - } - - HandlerFrontendFooter(name, &miss); - return reg; -} - - -void LoadStubCompiler::GenerateLoadField(Register reg, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation) { - if (!reg.is(receiver())) __ mov(receiver(), reg); - if (kind() == Code::LOAD_IC) { - LoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); - } else { - KeyedLoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); - } -} - - -void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { +void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) { // Return the constant value. __ li(v0, value); __ Ret(); } -void LoadStubCompiler::GenerateLoadCallback( - Register reg, - Handle<ExecutableAccessorInfo> callback) { +void NamedLoadHandlerCompiler::GenerateLoadCallback( + Register reg, Handle<ExecutableAccessorInfo> callback) { // Build AccessorInfo::args_ list on the stack and push property name below // the exit frame to make GC aware of them and store pointers to them. STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); @@ -1018,9 +756,9 @@ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); - ASSERT(!scratch2().is(reg)); - ASSERT(!scratch3().is(reg)); - ASSERT(!scratch4().is(reg)); + DCHECK(!scratch2().is(reg)); + DCHECK(!scratch3().is(reg)); + DCHECK(!scratch4().is(reg)); __ push(receiver()); if (heap()->InNewSpace(callback->data())) { __ li(scratch3(), callback); @@ -1051,19 +789,16 @@ ExternalReference ref = ExternalReference(&fun, type, isolate()); __ li(getter_address_reg, Operand(ref)); - CallApiGetterStub stub; + CallApiGetterStub stub(isolate()); __ TailCallStub(&stub); } -void LoadStubCompiler::GenerateLoadInterceptor( - Register holder_reg, - Handle<Object> object, - Handle<JSObject> interceptor_holder, - LookupResult* lookup, - Handle<Name> name) { - ASSERT(interceptor_holder->HasNamedInterceptor()); - ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); +void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg, + LookupResult* lookup, + Handle<Name> name) { + DCHECK(holder()->HasNamedInterceptor()); + DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined()); // So far the most popular follow ups for interceptor loads are FIELD // and CALLBACKS, so inline only them, other cases may be added @@ -1074,10 +809,12 @@ compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { - ExecutableAccessorInfo* callback = - ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); - compile_followup_inline = callback->getter() != NULL && - callback->IsCompatibleReceiver(*object); + Handle<ExecutableAccessorInfo> callback( + ExecutableAccessorInfo::cast(lookup->GetCallbackObject())); + compile_followup_inline = + callback->getter() != NULL && + ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback, + type()); } } @@ -1085,13 +822,13 @@ // Compile the interceptor call, followed by inline code to load the // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. - ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); + DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1())); // Preserve the receiver register explicitly whenever it is different from // the holder and it is needed should the interceptor return without any // result. The CALLBACKS case needs the receiver to be passed into C++ code, // the FIELD case might cause a miss during the prototype check. - bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); + bool must_perfrom_prototype_check = *holder() != lookup->holder(); bool must_preserve_receiver_reg = !receiver().is(holder_reg) && (lookup->type() == CALLBACKS || must_perfrom_prototype_check); @@ -1108,7 +845,7 @@ // interceptor's holder has been compiled before (see a caller // of this method). CompileCallLoadPropertyWithInterceptor( - masm(), receiver(), holder_reg, this->name(), interceptor_holder, + masm(), receiver(), holder_reg, this->name(), holder(), IC::kLoadPropertyWithInterceptorOnly); // Check if interceptor provided a value for property. If it's @@ -1127,42 +864,25 @@ } // Leave the internal frame. } - GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); + GenerateLoadPostInterceptor(holder_reg, name, lookup); } else { // !compile_followup_inline // Call the runtime system to load the interceptor. // Check that the maps haven't changed. - PushInterceptorArguments(masm(), receiver(), holder_reg, - this->name(), interceptor_holder); + PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), + holder()); ExternalReference ref = ExternalReference( - IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate()); - __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1); + IC_Utility(IC::kLoadPropertyWithInterceptor), isolate()); + __ TailCallExternalReference( + ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); } } -void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) { - Label success; - // Check that the object is a boolean. - __ LoadRoot(at, Heap::kTrueValueRootIndex); - __ Branch(&success, eq, object, Operand(at)); - __ LoadRoot(at, Heap::kFalseValueRootIndex); - __ Branch(miss, ne, object, Operand(at)); - __ bind(&success); -} - - -Handle<Code> StoreStubCompiler::CompileStoreCallback( - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( + Handle<JSObject> object, Handle<Name> name, Handle<ExecutableAccessorInfo> callback) { - Register holder_reg = HandlerFrontend( - IC::CurrentTypeOf(object, isolate()), receiver(), holder, name); - - // Stub never generated for non-global objects that require access - // checks. - ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); + Register holder_reg = Frontend(receiver(), name); __ Push(receiver(), holder_reg); // Receiver. __ li(at, Operand(callback)); // Callback info. @@ -1184,10 +904,8 @@ #define __ ACCESS_MASM(masm) -void StoreStubCompiler::GenerateStoreViaSetter( - MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, +void NamedStoreHandlerCompiler::GenerateStoreViaSetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, Handle<JSFunction> setter) { // ----------- S t a t e ------------- // -- ra : return address @@ -1203,8 +921,7 @@ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { // Swap in the global receiver. __ lw(receiver, - FieldMemOperand( - receiver, JSGlobalObject::kGlobalReceiverOffset)); + FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); } __ Push(receiver, value()); ParameterCount actual(1); @@ -1231,14 +948,13 @@ #define __ ACCESS_MASM(masm()) -Handle<Code> StoreStubCompiler::CompileStoreInterceptor( - Handle<JSObject> object, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( Handle<Name> name) { __ Push(receiver(), this->name(), value()); // Do tail-call to the runtime system. - ExternalReference store_ic_property = - ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); + ExternalReference store_ic_property = ExternalReference( + IC_Utility(IC::kStorePropertyWithInterceptor), isolate()); __ TailCallExternalReference(store_ic_property, 3, 1); // Return the generated code. @@ -1246,61 +962,35 @@ } -Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type, - Handle<JSObject> last, - Handle<Name> name) { - NonexistentHandlerFrontend(type, last, name); - - // Return undefined if maps of the full prototype chain is still the same. - __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); - __ Ret(); - - // Return the generated code. - return GetCode(kind(), Code::FAST, name); -} - - -Register* LoadStubCompiler::registers() { - // receiver, name, scratch1, scratch2, scratch3, scratch4. - static Register registers[] = { a0, a2, a3, a1, t0, t1 }; - return registers; -} - - -Register* KeyedLoadStubCompiler::registers() { +Register* PropertyAccessCompiler::load_calling_convention() { // receiver, name, scratch1, scratch2, scratch3, scratch4. - static Register registers[] = { a1, a0, a2, a3, t0, t1 }; + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + static Register registers[] = { receiver, name, a3, a0, t0, t1 }; return registers; } -Register StoreStubCompiler::value() { - return a0; -} - - -Register* StoreStubCompiler::registers() { +Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. - static Register registers[] = { a1, a2, a3, t0, t1 }; + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + DCHECK(a3.is(KeyedStoreIC::MapRegister())); + static Register registers[] = { receiver, name, a3, t0, t1 }; return registers; } -Register* KeyedStoreStubCompiler::registers() { - // receiver, name, scratch1, scratch2, scratch3. - static Register registers[] = { a2, a1, a3, t0, t1 }; - return registers; -} +Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); } #undef __ #define __ ACCESS_MASM(masm) -void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, - Handle<JSFunction> getter) { +void NamedLoadHandlerCompiler::GenerateLoadViaGetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, + Handle<JSFunction> getter) { // ----------- S t a t e ------------- // -- a0 : receiver // -- a2 : name @@ -1314,8 +1004,7 @@ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { // Swap in the global receiver. __ lw(receiver, - FieldMemOperand( - receiver, JSGlobalObject::kGlobalReceiverOffset)); + FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); } __ push(receiver); ParameterCount actual(0); @@ -1339,57 +1028,62 @@ #define __ ACCESS_MASM(masm()) -Handle<Code> LoadStubCompiler::CompileLoadGlobal( - Handle<HeapType> type, - Handle<GlobalObject> global, - Handle<PropertyCell> cell, - Handle<Name> name, - bool is_dont_delete) { +Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal( + Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) { Label miss; - HandlerFrontendHeader(type, receiver(), global, name, &miss); + FrontendHeader(receiver(), name, &miss); // Get the value from the cell. - __ li(a3, Operand(cell)); - __ lw(t0, FieldMemOperand(a3, Cell::kValueOffset)); + Register result = StoreIC::ValueRegister(); + __ li(result, Operand(cell)); + __ lw(result, FieldMemOperand(result, Cell::kValueOffset)); // Check for deleted property if property can actually be deleted. - if (!is_dont_delete) { + if (is_configurable) { __ LoadRoot(at, Heap::kTheHoleValueRootIndex); - __ Branch(&miss, eq, t0, Operand(at)); + __ Branch(&miss, eq, result, Operand(at)); } Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3); __ Ret(USE_DELAY_SLOT); - __ mov(v0, t0); + __ mov(v0, result); - HandlerFrontendFooter(name, &miss); + FrontendFooter(name, &miss); // Return the generated code. return GetCode(kind(), Code::NORMAL, name); } -Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( - TypeHandleList* types, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { +Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { Label miss; if (check == PROPERTY && (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - __ Branch(&miss, ne, this->name(), Operand(name)); + // In case we are compiling an IC for dictionary loads and stores, just + // check whether the name is unique. + if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { + __ JumpIfNotUniqueName(this->name(), &miss); + } else { + __ Branch(&miss, ne, this->name(), Operand(name)); + } } Label number_case; - Register match = scratch1(); + Register match = scratch2(); Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi. - Register map_reg = scratch2(); + // Polymorphic keyed stores may use the map register + Register map_reg = scratch1(); + DCHECK(kind() != Code::KEYED_STORE_IC || + map_reg.is(KeyedStoreIC::MapRegister())); int receiver_count = types->length(); int number_of_handled_maps = 0; @@ -1403,14 +1097,14 @@ // Separate compare from branch, to provide path for above JumpIfSmi(). __ Subu(match, map_reg, Operand(map)); if (type->Is(HeapType::Number())) { - ASSERT(!number_case.is_unused()); + DCHECK(!number_case.is_unused()); __ bind(&number_case); } __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match, Operand(zero_reg)); } } - ASSERT(number_of_handled_maps != 0); + DCHECK(number_of_handled_maps != 0); __ bind(&miss); TailCallBuiltin(masm(), MissBuiltin(kind())); @@ -1418,24 +1112,12 @@ // Return the generated code. InlineCacheState state = number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetICCode(kind(), type, name, state); -} - - -void StoreStubCompiler::GenerateStoreArrayLength() { - // Prepare tail call to StoreIC_ArrayLength. - __ Push(receiver(), value()); - - ExternalReference ref = - ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), - masm()->isolate()); - __ TailCallExternalReference(ref, 2, 1); + return GetCode(kind(), type, name, state); } -Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( - MapHandleList* receiver_maps, - CodeHandleList* handler_stubs, +Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( + MapHandleList* receiver_maps, CodeHandleList* handler_stubs, MapHandleList* transitioned_maps) { Label miss; __ JumpIfSmi(receiver(), &miss); @@ -1459,8 +1141,7 @@ TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetICCode( - kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); + return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); } @@ -1468,45 +1149,32 @@ #define __ ACCESS_MASM(masm) -void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( +void ElementHandlerCompiler::GenerateLoadDictionaryElement( MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- + // The return address is in ra. Label slow, miss; - Register key = a0; - Register receiver = a1; + Register key = LoadIC::NameRegister(); + Register receiver = LoadIC::ReceiverRegister(); + DCHECK(receiver.is(a1)); + DCHECK(key.is(a2)); - __ JumpIfNotSmi(key, &miss); + __ UntagAndJumpIfNotSmi(t2, key, &miss); __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ sra(a2, a0, kSmiTagSize); - __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1); + __ LoadFromNumberDictionary(&slow, t0, key, v0, t2, a3, t1); __ Ret(); - // Slow case, key and receiver still in a0 and a1. + // Slow case, key and receiver still unmodified. __ bind(&slow); __ IncrementCounter( masm->isolate()->counters()->keyed_load_external_array_slow(), 1, a2, a3); - // Entry registers are intact. - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); // Miss case, call the runtime. __ bind(&miss); - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); } diff -Nru nodejs-0.11.13/deps/v8/src/mips64/assembler-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/assembler-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/assembler-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/assembler-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2933 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/base/cpu.h" +#include "src/mips64/assembler-mips64-inl.h" +#include "src/serialize.h" + +namespace v8 { +namespace internal { + + +// Get the CPU features enabled by the build. For cross compilation the +// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS +// can be defined to enable FPU instructions when building the +// snapshot. +static unsigned CpuFeaturesImpliedByCompiler() { + unsigned answer = 0; +#ifdef CAN_USE_FPU_INSTRUCTIONS + answer |= 1u << FPU; +#endif // def CAN_USE_FPU_INSTRUCTIONS + + // If the compiler is allowed to use FPU then we can use FPU too in our code + // generation even when generating snapshots. This won't work for cross + // compilation. +#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0 + answer |= 1u << FPU; +#endif + + return answer; +} + + +const char* DoubleRegister::AllocationIndexToString(int index) { + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); + const char* const names[] = { + "f0", + "f2", + "f4", + "f6", + "f8", + "f10", + "f12", + "f14", + "f16", + "f18", + "f20", + "f22", + "f24", + "f26" + }; + return names[index]; +} + + +void CpuFeatures::ProbeImpl(bool cross_compile) { + supported_ |= CpuFeaturesImpliedByCompiler(); + + // Only use statically determined features for cross compile (snapshot). + if (cross_compile) return; + + // If the compiler is allowed to use fpu then we can use fpu too in our + // code generation. +#ifndef __mips__ + // For the simulator build, use FPU. + supported_ |= 1u << FPU; +#else + // Probe for additional features at runtime. + base::CPU cpu; + if (cpu.has_fpu()) supported_ |= 1u << FPU; +#endif +} + + +void CpuFeatures::PrintTarget() { } +void CpuFeatures::PrintFeatures() { } + + +int ToNumber(Register reg) { + DCHECK(reg.is_valid()); + const int kNumbers[] = { + 0, // zero_reg + 1, // at + 2, // v0 + 3, // v1 + 4, // a0 + 5, // a1 + 6, // a2 + 7, // a3 + 8, // a4 + 9, // a5 + 10, // a6 + 11, // a7 + 12, // t0 + 13, // t1 + 14, // t2 + 15, // t3 + 16, // s0 + 17, // s1 + 18, // s2 + 19, // s3 + 20, // s4 + 21, // s5 + 22, // s6 + 23, // s7 + 24, // t8 + 25, // t9 + 26, // k0 + 27, // k1 + 28, // gp + 29, // sp + 30, // fp + 31, // ra + }; + return kNumbers[reg.code()]; +} + + +Register ToRegister(int num) { + DCHECK(num >= 0 && num < kNumRegisters); + const Register kRegisters[] = { + zero_reg, + at, + v0, v1, + a0, a1, a2, a3, a4, a5, a6, a7, + t0, t1, t2, t3, + s0, s1, s2, s3, s4, s5, s6, s7, + t8, t9, + k0, k1, + gp, + sp, + fp, + ra + }; + return kRegisters[num]; +} + + +// ----------------------------------------------------------------------------- +// Implementation of RelocInfo. + +const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask | + 1 << RelocInfo::INTERNAL_REFERENCE; + + +bool RelocInfo::IsCodedSpecially() { + // The deserializer needs to know whether a pointer is specially coded. Being + // specially coded on MIPS means that it is a lui/ori instruction, and that is + // always the case inside code objects. + return true; +} + + +bool RelocInfo::IsInConstantPool() { + return false; +} + + +// Patch the code at the current address with the supplied instructions. +void RelocInfo::PatchCode(byte* instructions, int instruction_count) { + Instr* pc = reinterpret_cast<Instr*>(pc_); + Instr* instr = reinterpret_cast<Instr*>(instructions); + for (int i = 0; i < instruction_count; i++) { + *(pc + i) = *(instr + i); + } + + // Indicate that code has changed. + CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize); +} + + +// Patch the code at the current PC with a call to the target address. +// Additional guard instructions can be added if required. +void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { + // Patch the code at the current address with a call to the target. + UNIMPLEMENTED_MIPS(); +} + + +// ----------------------------------------------------------------------------- +// Implementation of Operand and MemOperand. +// See assembler-mips-inl.h for inlined constructors. + +Operand::Operand(Handle<Object> handle) { + AllowDeferredHandleDereference using_raw_address; + rm_ = no_reg; + // Verify all Objects referred by code are NOT in new space. + Object* obj = *handle; + if (obj->IsHeapObject()) { + DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); + imm64_ = reinterpret_cast<intptr_t>(handle.location()); + rmode_ = RelocInfo::EMBEDDED_OBJECT; + } else { + // No relocation needed. + imm64_ = reinterpret_cast<intptr_t>(obj); + rmode_ = RelocInfo::NONE64; + } +} + + +MemOperand::MemOperand(Register rm, int64_t offset) : Operand(rm) { + offset_ = offset; +} + + +MemOperand::MemOperand(Register rm, int64_t unit, int64_t multiplier, + OffsetAddend offset_addend) : Operand(rm) { + offset_ = unit * multiplier + offset_addend; +} + + +// ----------------------------------------------------------------------------- +// Specific instructions, constants, and masks. + +static const int kNegOffset = 0x00008000; +// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r) +// operations as post-increment of sp. +const Instr kPopInstruction = DADDIU | (kRegister_sp_Code << kRsShift) + | (kRegister_sp_Code << kRtShift) + | (kPointerSize & kImm16Mask); // NOLINT +// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. +const Instr kPushInstruction = DADDIU | (kRegister_sp_Code << kRsShift) + | (kRegister_sp_Code << kRtShift) + | (-kPointerSize & kImm16Mask); // NOLINT +// sd(r, MemOperand(sp, 0)) +const Instr kPushRegPattern = SD | (kRegister_sp_Code << kRsShift) + | (0 & kImm16Mask); // NOLINT +// ld(r, MemOperand(sp, 0)) +const Instr kPopRegPattern = LD | (kRegister_sp_Code << kRsShift) + | (0 & kImm16Mask); // NOLINT + +const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift) + | (0 & kImm16Mask); // NOLINT + +const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift) + | (0 & kImm16Mask); // NOLINT + +const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift) + | (kNegOffset & kImm16Mask); // NOLINT + +const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift) + | (kNegOffset & kImm16Mask); // NOLINT +// A mask for the Rt register for push, pop, lw, sw instructions. +const Instr kRtMask = kRtFieldMask; +const Instr kLwSwInstrTypeMask = 0xffe00000; +const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask; +const Instr kLwSwOffsetMask = kImm16Mask; + + +Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) + : AssemblerBase(isolate, buffer, buffer_size), + recorded_ast_id_(TypeFeedbackId::None()), + positions_recorder_(this) { + reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); + + last_trampoline_pool_end_ = 0; + no_trampoline_pool_before_ = 0; + trampoline_pool_blocked_nesting_ = 0; + // We leave space (16 * kTrampolineSlotsSize) + // for BlockTrampolinePoolScope buffer. + next_buffer_check_ = FLAG_force_long_branches + ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16; + internal_trampoline_exception_ = false; + last_bound_pos_ = 0; + + trampoline_emitted_ = FLAG_force_long_branches; + unbound_labels_count_ = 0; + block_buffer_growth_ = false; + + ClearRecordedAstId(); +} + + +void Assembler::GetCode(CodeDesc* desc) { + DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. + // Set up code descriptor. + desc->buffer = buffer_; + desc->buffer_size = buffer_size_; + desc->instr_size = pc_offset(); + desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); + desc->origin = this; +} + + +void Assembler::Align(int m) { + DCHECK(m >= 4 && IsPowerOf2(m)); + while ((pc_offset() & (m - 1)) != 0) { + nop(); + } +} + + +void Assembler::CodeTargetAlign() { + // No advantage to aligning branch/call targets to more than + // single instruction, that I am aware of. + Align(4); +} + + +Register Assembler::GetRtReg(Instr instr) { + Register rt; + rt.code_ = (instr & kRtFieldMask) >> kRtShift; + return rt; +} + + +Register Assembler::GetRsReg(Instr instr) { + Register rs; + rs.code_ = (instr & kRsFieldMask) >> kRsShift; + return rs; +} + + +Register Assembler::GetRdReg(Instr instr) { + Register rd; + rd.code_ = (instr & kRdFieldMask) >> kRdShift; + return rd; +} + + +uint32_t Assembler::GetRt(Instr instr) { + return (instr & kRtFieldMask) >> kRtShift; +} + + +uint32_t Assembler::GetRtField(Instr instr) { + return instr & kRtFieldMask; +} + + +uint32_t Assembler::GetRs(Instr instr) { + return (instr & kRsFieldMask) >> kRsShift; +} + + +uint32_t Assembler::GetRsField(Instr instr) { + return instr & kRsFieldMask; +} + + +uint32_t Assembler::GetRd(Instr instr) { + return (instr & kRdFieldMask) >> kRdShift; +} + + +uint32_t Assembler::GetRdField(Instr instr) { + return instr & kRdFieldMask; +} + + +uint32_t Assembler::GetSa(Instr instr) { + return (instr & kSaFieldMask) >> kSaShift; +} + + +uint32_t Assembler::GetSaField(Instr instr) { + return instr & kSaFieldMask; +} + + +uint32_t Assembler::GetOpcodeField(Instr instr) { + return instr & kOpcodeMask; +} + + +uint32_t Assembler::GetFunction(Instr instr) { + return (instr & kFunctionFieldMask) >> kFunctionShift; +} + + +uint32_t Assembler::GetFunctionField(Instr instr) { + return instr & kFunctionFieldMask; +} + + +uint32_t Assembler::GetImmediate16(Instr instr) { + return instr & kImm16Mask; +} + + +uint32_t Assembler::GetLabelConst(Instr instr) { + return instr & ~kImm16Mask; +} + + +bool Assembler::IsPop(Instr instr) { + return (instr & ~kRtMask) == kPopRegPattern; +} + + +bool Assembler::IsPush(Instr instr) { + return (instr & ~kRtMask) == kPushRegPattern; +} + + +bool Assembler::IsSwRegFpOffset(Instr instr) { + return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern); +} + + +bool Assembler::IsLwRegFpOffset(Instr instr) { + return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern); +} + + +bool Assembler::IsSwRegFpNegOffset(Instr instr) { + return ((instr & (kLwSwInstrTypeMask | kNegOffset)) == + kSwRegFpNegOffsetPattern); +} + + +bool Assembler::IsLwRegFpNegOffset(Instr instr) { + return ((instr & (kLwSwInstrTypeMask | kNegOffset)) == + kLwRegFpNegOffsetPattern); +} + + +// Labels refer to positions in the (to be) generated code. +// There are bound, linked, and unused labels. +// +// Bound labels refer to known positions in the already +// generated code. pos() is the position the label refers to. +// +// Linked labels refer to unknown positions in the code +// to be generated; pos() is the position of the last +// instruction using the label. + +// The link chain is terminated by a value in the instruction of -1, +// which is an otherwise illegal value (branch -1 is inf loop). +// The instruction 16-bit offset field addresses 32-bit words, but in +// code is conv to an 18-bit value addressing bytes, hence the -4 value. + +const int kEndOfChain = -4; +// Determines the end of the Jump chain (a subset of the label link chain). +const int kEndOfJumpChain = 0; + + +bool Assembler::IsBranch(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rt_field = GetRtField(instr); + uint32_t rs_field = GetRsField(instr); + // Checks if the instruction is a branch. + return opcode == BEQ || + opcode == BNE || + opcode == BLEZ || + opcode == BGTZ || + opcode == BEQL || + opcode == BNEL || + opcode == BLEZL || + opcode == BGTZL || + (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || + rt_field == BLTZAL || rt_field == BGEZAL)) || + (opcode == COP1 && rs_field == BC1) || // Coprocessor branch. + (opcode == COP1 && rs_field == BC1EQZ) || + (opcode == COP1 && rs_field == BC1NEZ); +} + + +bool Assembler::IsEmittedConstant(Instr instr) { + uint32_t label_constant = GetLabelConst(instr); + return label_constant == 0; // Emitted label const in reg-exp engine. +} + + +bool Assembler::IsBeq(Instr instr) { + return GetOpcodeField(instr) == BEQ; +} + + +bool Assembler::IsBne(Instr instr) { + return GetOpcodeField(instr) == BNE; +} + + +bool Assembler::IsJump(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rt_field = GetRtField(instr); + uint32_t rd_field = GetRdField(instr); + uint32_t function_field = GetFunctionField(instr); + // Checks if the instruction is a jump. + return opcode == J || opcode == JAL || + (opcode == SPECIAL && rt_field == 0 && + ((function_field == JALR) || (rd_field == 0 && (function_field == JR)))); +} + + +bool Assembler::IsJ(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + // Checks if the instruction is a jump. + return opcode == J; +} + + +bool Assembler::IsJal(Instr instr) { + return GetOpcodeField(instr) == JAL; +} + + +bool Assembler::IsJr(Instr instr) { + return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR; +} + + +bool Assembler::IsJalr(Instr instr) { + return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR; +} + + +bool Assembler::IsLui(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + // Checks if the instruction is a load upper immediate. + return opcode == LUI; +} + + +bool Assembler::IsOri(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + // Checks if the instruction is a load upper immediate. + return opcode == ORI; +} + + +bool Assembler::IsNop(Instr instr, unsigned int type) { + // See Assembler::nop(type). + DCHECK(type < 32); + uint32_t opcode = GetOpcodeField(instr); + uint32_t function = GetFunctionField(instr); + uint32_t rt = GetRt(instr); + uint32_t rd = GetRd(instr); + uint32_t sa = GetSa(instr); + + // Traditional mips nop == sll(zero_reg, zero_reg, 0) + // When marking non-zero type, use sll(zero_reg, at, type) + // to avoid use of mips ssnop and ehb special encodings + // of the sll instruction. + + Register nop_rt_reg = (type == 0) ? zero_reg : at; + bool ret = (opcode == SPECIAL && function == SLL && + rd == static_cast<uint32_t>(ToNumber(zero_reg)) && + rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) && + sa == type); + + return ret; +} + + +int32_t Assembler::GetBranchOffset(Instr instr) { + DCHECK(IsBranch(instr)); + return (static_cast<int16_t>(instr & kImm16Mask)) << 2; +} + + +bool Assembler::IsLw(Instr instr) { + return ((instr & kOpcodeMask) == LW); +} + + +int16_t Assembler::GetLwOffset(Instr instr) { + DCHECK(IsLw(instr)); + return ((instr & kImm16Mask)); +} + + +Instr Assembler::SetLwOffset(Instr instr, int16_t offset) { + DCHECK(IsLw(instr)); + + // We actually create a new lw instruction based on the original one. + Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) + | (offset & kImm16Mask); + + return temp_instr; +} + + +bool Assembler::IsSw(Instr instr) { + return ((instr & kOpcodeMask) == SW); +} + + +Instr Assembler::SetSwOffset(Instr instr, int16_t offset) { + DCHECK(IsSw(instr)); + return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); +} + + +bool Assembler::IsAddImmediate(Instr instr) { + return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU); +} + + +Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) { + DCHECK(IsAddImmediate(instr)); + return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); +} + + +bool Assembler::IsAndImmediate(Instr instr) { + return GetOpcodeField(instr) == ANDI; +} + + +int64_t Assembler::target_at(int64_t pos) { + Instr instr = instr_at(pos); + if ((instr & ~kImm16Mask) == 0) { + // Emitted label constant, not part of a branch. + if (instr == 0) { + return kEndOfChain; + } else { + int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; + return (imm18 + pos); + } + } + // Check we have a branch or jump instruction. + DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr)); + // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming + // the compiler uses arithmetic shifts for signed integers. + if (IsBranch(instr)) { + int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; + if (imm18 == kEndOfChain) { + // EndOfChain sentinel is returned directly, not relative to pc or pos. + return kEndOfChain; + } else { + return pos + kBranchPCOffset + imm18; + } + } else if (IsLui(instr)) { + Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); + Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); + Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize); + DCHECK(IsOri(instr_ori)); + DCHECK(IsOri(instr_ori2)); + + // TODO(plind) create named constants for shift values. + int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48; + imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32; + imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16; + // Sign extend address; + imm >>= 16; + + if (imm == kEndOfJumpChain) { + // EndOfChain sentinel is returned directly, not relative to pc or pos. + return kEndOfChain; + } else { + uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos); + int64_t delta = instr_address - imm; + DCHECK(pos > delta); + return pos - delta; + } + } else { + int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; + if (imm28 == kEndOfJumpChain) { + // EndOfChain sentinel is returned directly, not relative to pc or pos. + return kEndOfChain; + } else { + uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos); + instr_address &= kImm28Mask; + int64_t delta = instr_address - imm28; + DCHECK(pos > delta); + return pos - delta; + } + } +} + + +void Assembler::target_at_put(int64_t pos, int64_t target_pos) { + Instr instr = instr_at(pos); + if ((instr & ~kImm16Mask) == 0) { + DCHECK(target_pos == kEndOfChain || target_pos >= 0); + // Emitted label constant, not part of a branch. + // Make label relative to Code* of generated Code object. + instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + return; + } + + DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr)); + if (IsBranch(instr)) { + int32_t imm18 = target_pos - (pos + kBranchPCOffset); + DCHECK((imm18 & 3) == 0); + + instr &= ~kImm16Mask; + int32_t imm16 = imm18 >> 2; + DCHECK(is_int16(imm16)); + + instr_at_put(pos, instr | (imm16 & kImm16Mask)); + } else if (IsLui(instr)) { + Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); + Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); + Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize); + DCHECK(IsOri(instr_ori)); + DCHECK(IsOri(instr_ori2)); + + uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos; + DCHECK((imm & 3) == 0); + + instr_lui &= ~kImm16Mask; + instr_ori &= ~kImm16Mask; + instr_ori2 &= ~kImm16Mask; + + instr_at_put(pos + 0 * Assembler::kInstrSize, + instr_lui | ((imm >> 32) & kImm16Mask)); + instr_at_put(pos + 1 * Assembler::kInstrSize, + instr_ori | ((imm >> 16) & kImm16Mask)); + instr_at_put(pos + 3 * Assembler::kInstrSize, + instr_ori2 | (imm & kImm16Mask)); + } else { + uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos; + imm28 &= kImm28Mask; + DCHECK((imm28 & 3) == 0); + + instr &= ~kImm26Mask; + uint32_t imm26 = imm28 >> 2; + DCHECK(is_uint26(imm26)); + + instr_at_put(pos, instr | (imm26 & kImm26Mask)); + } +} + + +void Assembler::print(Label* L) { + if (L->is_unused()) { + PrintF("unused label\n"); + } else if (L->is_bound()) { + PrintF("bound label to %d\n", L->pos()); + } else if (L->is_linked()) { + Label l = *L; + PrintF("unbound label"); + while (l.is_linked()) { + PrintF("@ %d ", l.pos()); + Instr instr = instr_at(l.pos()); + if ((instr & ~kImm16Mask) == 0) { + PrintF("value\n"); + } else { + PrintF("%d\n", instr); + } + next(&l); + } + } else { + PrintF("label in inconsistent state (pos = %d)\n", L->pos_); + } +} + + +void Assembler::bind_to(Label* L, int pos) { + DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. + int32_t trampoline_pos = kInvalidSlotPos; + if (L->is_linked() && !trampoline_emitted_) { + unbound_labels_count_--; + next_buffer_check_ += kTrampolineSlotsSize; + } + + while (L->is_linked()) { + int32_t fixup_pos = L->pos(); + int32_t dist = pos - fixup_pos; + next(L); // Call next before overwriting link with target at fixup_pos. + Instr instr = instr_at(fixup_pos); + if (IsBranch(instr)) { + if (dist > kMaxBranchOffset) { + if (trampoline_pos == kInvalidSlotPos) { + trampoline_pos = get_trampoline_entry(fixup_pos); + CHECK(trampoline_pos != kInvalidSlotPos); + } + DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); + target_at_put(fixup_pos, trampoline_pos); + fixup_pos = trampoline_pos; + dist = pos - fixup_pos; + } + target_at_put(fixup_pos, pos); + } else { + DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr)); + target_at_put(fixup_pos, pos); + } + } + L->bind_to(pos); + + // Keep track of the last bound label so we don't eliminate any instructions + // before a bound label. + if (pos > last_bound_pos_) + last_bound_pos_ = pos; +} + + +void Assembler::bind(Label* L) { + DCHECK(!L->is_bound()); // Label can only be bound once. + bind_to(L, pc_offset()); +} + + +void Assembler::next(Label* L) { + DCHECK(L->is_linked()); + int link = target_at(L->pos()); + if (link == kEndOfChain) { + L->Unuse(); + } else { + DCHECK(link >= 0); + L->link_to(link); + } +} + + +bool Assembler::is_near(Label* L) { + if (L->is_bound()) { + return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize); + } + return false; +} + + +// We have to use a temporary register for things that can be relocated even +// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction +// space. There is no guarantee that the relocated location can be similarly +// encoded. +bool Assembler::MustUseReg(RelocInfo::Mode rmode) { + return !RelocInfo::IsNone(rmode); +} + +void Assembler::GenInstrRegister(Opcode opcode, + Register rs, + Register rt, + Register rd, + uint16_t sa, + SecondaryField func) { + DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa)); + Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) + | (rd.code() << kRdShift) | (sa << kSaShift) | func; + emit(instr); +} + + +void Assembler::GenInstrRegister(Opcode opcode, + Register rs, + Register rt, + uint16_t msb, + uint16_t lsb, + SecondaryField func) { + DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb)); + Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) + | (msb << kRdShift) | (lsb << kSaShift) | func; + emit(instr); +} + + +void Assembler::GenInstrRegister(Opcode opcode, + SecondaryField fmt, + FPURegister ft, + FPURegister fs, + FPURegister fd, + SecondaryField func) { + DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid()); + Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift) + | (fd.code() << kFdShift) | func; + emit(instr); +} + + +void Assembler::GenInstrRegister(Opcode opcode, + FPURegister fr, + FPURegister ft, + FPURegister fs, + FPURegister fd, + SecondaryField func) { + DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid()); + Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) + | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; + emit(instr); +} + + +void Assembler::GenInstrRegister(Opcode opcode, + SecondaryField fmt, + Register rt, + FPURegister fs, + FPURegister fd, + SecondaryField func) { + DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid()); + Instr instr = opcode | fmt | (rt.code() << kRtShift) + | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; + emit(instr); +} + + +void Assembler::GenInstrRegister(Opcode opcode, + SecondaryField fmt, + Register rt, + FPUControlRegister fs, + SecondaryField func) { + DCHECK(fs.is_valid() && rt.is_valid()); + Instr instr = + opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; + emit(instr); +} + + +// Instructions with immediate value. +// Registers are in the order of the instruction encoding, from left to right. +void Assembler::GenInstrImmediate(Opcode opcode, + Register rs, + Register rt, + int32_t j) { + DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); + Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) + | (j & kImm16Mask); + emit(instr); +} + + +void Assembler::GenInstrImmediate(Opcode opcode, + Register rs, + SecondaryField SF, + int32_t j) { + DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j))); + Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); + emit(instr); +} + + +void Assembler::GenInstrImmediate(Opcode opcode, + Register rs, + FPURegister ft, + int32_t j) { + DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); + Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) + | (j & kImm16Mask); + emit(instr); +} + + +void Assembler::GenInstrJump(Opcode opcode, + uint32_t address) { + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK(is_uint26(address)); + Instr instr = opcode | address; + emit(instr); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + + +// Returns the next free trampoline entry. +int32_t Assembler::get_trampoline_entry(int32_t pos) { + int32_t trampoline_entry = kInvalidSlotPos; + if (!internal_trampoline_exception_) { + if (trampoline_.start() > pos) { + trampoline_entry = trampoline_.take_slot(); + } + + if (kInvalidSlotPos == trampoline_entry) { + internal_trampoline_exception_ = true; + } + } + return trampoline_entry; +} + + +uint64_t Assembler::jump_address(Label* L) { + int64_t target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link. + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + return kEndOfJumpChain; + } + } + + uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos; + DCHECK((imm & 3) == 0); + + return imm; +} + + +int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { + int32_t target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + return kEndOfChain; + } + } + + int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); + DCHECK((offset & 3) == 0); + DCHECK(is_int16(offset >> 2)); + + return offset; +} + + +int32_t Assembler::branch_offset_compact(Label* L, + bool jump_elimination_allowed) { + int32_t target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + return kEndOfChain; + } + } + + int32_t offset = target_pos - pc_offset(); + DCHECK((offset & 3) == 0); + DCHECK(is_int16(offset >> 2)); + + return offset; +} + + +int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) { + int32_t target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + return kEndOfChain; + } + } + + int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); + DCHECK((offset & 3) == 0); + DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width. + + return offset; +} + + +int32_t Assembler::branch_offset21_compact(Label* L, + bool jump_elimination_allowed) { + int32_t target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + return kEndOfChain; + } + } + + int32_t offset = target_pos - pc_offset(); + DCHECK((offset & 3) == 0); + DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width. + + return offset; +} + + +void Assembler::label_at_put(Label* L, int at_offset) { + int target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link. + int32_t imm18 = target_pos - at_offset; + DCHECK((imm18 & 3) == 0); + int32_t imm16 = imm18 >> 2; + DCHECK(is_int16(imm16)); + instr_at_put(at_offset, (imm16 & kImm16Mask)); + } else { + target_pos = kEndOfChain; + instr_at_put(at_offset, 0); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + } + L->link_to(at_offset); + } +} + + +//------- Branch and jump instructions -------- + +void Assembler::b(int16_t offset) { + beq(zero_reg, zero_reg, offset); +} + + +void Assembler::bal(int16_t offset) { + positions_recorder()->WriteRecordedPositions(); + bgezal(zero_reg, offset); +} + + +void Assembler::beq(Register rs, Register rt, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(BEQ, rs, rt, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + + +void Assembler::bgez(Register rs, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(REGIMM, rs, BGEZ, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + + +void Assembler::bgezc(Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rt.is(zero_reg))); + GenInstrImmediate(BLEZL, rt, rt, offset); +} + + +void Assembler::bgeuc(Register rs, Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rs.is(zero_reg))); + DCHECK(!(rt.is(zero_reg))); + DCHECK(rs.code() != rt.code()); + GenInstrImmediate(BLEZ, rs, rt, offset); +} + + +void Assembler::bgec(Register rs, Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rs.is(zero_reg))); + DCHECK(!(rt.is(zero_reg))); + DCHECK(rs.code() != rt.code()); + GenInstrImmediate(BLEZL, rs, rt, offset); +} + + +void Assembler::bgezal(Register rs, int16_t offset) { + DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg)); + BlockTrampolinePoolScope block_trampoline_pool(this); + positions_recorder()->WriteRecordedPositions(); + GenInstrImmediate(REGIMM, rs, BGEZAL, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + + +void Assembler::bgtz(Register rs, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(BGTZ, rs, zero_reg, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + + +void Assembler::bgtzc(Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rt.is(zero_reg))); + GenInstrImmediate(BGTZL, zero_reg, rt, offset); +} + + +void Assembler::blez(Register rs, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(BLEZ, rs, zero_reg, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + + +void Assembler::blezc(Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rt.is(zero_reg))); + GenInstrImmediate(BLEZL, zero_reg, rt, offset); +} + + +void Assembler::bltzc(Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rt.is(zero_reg))); + GenInstrImmediate(BGTZL, rt, rt, offset); +} + + +void Assembler::bltuc(Register rs, Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rs.is(zero_reg))); + DCHECK(!(rt.is(zero_reg))); + DCHECK(rs.code() != rt.code()); + GenInstrImmediate(BGTZ, rs, rt, offset); +} + + +void Assembler::bltc(Register rs, Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rs.is(zero_reg))); + DCHECK(!(rt.is(zero_reg))); + DCHECK(rs.code() != rt.code()); + GenInstrImmediate(BGTZL, rs, rt, offset); +} + + +void Assembler::bltz(Register rs, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(REGIMM, rs, BLTZ, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + + +void Assembler::bltzal(Register rs, int16_t offset) { + DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg)); + BlockTrampolinePoolScope block_trampoline_pool(this); + positions_recorder()->WriteRecordedPositions(); + GenInstrImmediate(REGIMM, rs, BLTZAL, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + + +void Assembler::bne(Register rs, Register rt, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(BNE, rs, rt, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + + +void Assembler::bovc(Register rs, Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rs.is(zero_reg))); + DCHECK(rs.code() >= rt.code()); + GenInstrImmediate(ADDI, rs, rt, offset); +} + + +void Assembler::bnvc(Register rs, Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rs.is(zero_reg))); + DCHECK(rs.code() >= rt.code()); + GenInstrImmediate(DADDI, rs, rt, offset); +} + + +void Assembler::blezalc(Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rt.is(zero_reg))); + GenInstrImmediate(BLEZ, zero_reg, rt, offset); +} + + +void Assembler::bgezalc(Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rt.is(zero_reg))); + GenInstrImmediate(BLEZ, rt, rt, offset); +} + + +void Assembler::bgezall(Register rs, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rs.is(zero_reg))); + GenInstrImmediate(REGIMM, rs, BGEZALL, offset); +} + + +void Assembler::bltzalc(Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rt.is(zero_reg))); + GenInstrImmediate(BGTZ, rt, rt, offset); +} + + +void Assembler::bgtzalc(Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rt.is(zero_reg))); + GenInstrImmediate(BGTZ, zero_reg, rt, offset); +} + + +void Assembler::beqzalc(Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rt.is(zero_reg))); + GenInstrImmediate(ADDI, zero_reg, rt, offset); +} + + +void Assembler::bnezalc(Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rt.is(zero_reg))); + GenInstrImmediate(DADDI, zero_reg, rt, offset); +} + + +void Assembler::beqc(Register rs, Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(rs.code() < rt.code()); + GenInstrImmediate(ADDI, rs, rt, offset); +} + + +void Assembler::beqzc(Register rs, int32_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rs.is(zero_reg))); + Instr instr = BEQZC | (rs.code() << kRsShift) | offset; + emit(instr); +} + + +void Assembler::bnec(Register rs, Register rt, int16_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(rs.code() < rt.code()); + GenInstrImmediate(DADDI, rs, rt, offset); +} + + +void Assembler::bnezc(Register rs, int32_t offset) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(!(rs.is(zero_reg))); + Instr instr = BNEZC | (rs.code() << kRsShift) | offset; + emit(instr); +} + + +void Assembler::j(int64_t target) { +#if DEBUG + // Get pc of delay slot. + uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize); + bool in_range = (ipc ^ static_cast<uint64_t>(target) >> + (kImm26Bits + kImmFieldShift)) == 0; + DCHECK(in_range && ((target & 3) == 0)); +#endif + GenInstrJump(J, target >> 2); +} + + +void Assembler::jr(Register rs) { + if (kArchVariant != kMips64r6) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (rs.is(ra)) { + positions_recorder()->WriteRecordedPositions(); + } + GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); + BlockTrampolinePoolFor(1); // For associated delay slot. + } else { + jalr(rs, zero_reg); + } +} + + +void Assembler::jal(int64_t target) { +#ifdef DEBUG + // Get pc of delay slot. + uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize); + bool in_range = (ipc ^ static_cast<uint64_t>(target) >> + (kImm26Bits + kImmFieldShift)) == 0; + DCHECK(in_range && ((target & 3) == 0)); +#endif + positions_recorder()->WriteRecordedPositions(); + GenInstrJump(JAL, target >> 2); +} + + +void Assembler::jalr(Register rs, Register rd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + positions_recorder()->WriteRecordedPositions(); + GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + + +void Assembler::j_or_jr(int64_t target, Register rs) { + // Get pc of delay slot. + uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize); + bool in_range = (ipc ^ static_cast<uint64_t>(target) >> + (kImm26Bits + kImmFieldShift)) == 0; + if (in_range) { + j(target); + } else { + jr(t9); + } +} + + +void Assembler::jal_or_jalr(int64_t target, Register rs) { + // Get pc of delay slot. + uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize); + bool in_range = (ipc ^ static_cast<uint64_t>(target) >> + (kImm26Bits+kImmFieldShift)) == 0; + if (in_range) { + jal(target); + } else { + jalr(t9); + } +} + + +// -------Data-processing-instructions--------- + +// Arithmetic. + +void Assembler::addu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU); +} + + +void Assembler::addiu(Register rd, Register rs, int32_t j) { + GenInstrImmediate(ADDIU, rs, rd, j); +} + + +void Assembler::subu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU); +} + + +void Assembler::mul(Register rd, Register rs, Register rt) { + if (kArchVariant == kMips64r6) { + GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH); + } else { + GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); + } +} + + +void Assembler::muh(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH); +} + + +void Assembler::mulu(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U); +} + + +void Assembler::muhu(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U); +} + + +void Assembler::dmul(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH); +} + + +void Assembler::dmuh(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH); +} + + +void Assembler::dmulu(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U); +} + + +void Assembler::dmuhu(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U); +} + + +void Assembler::mult(Register rs, Register rt) { + DCHECK(kArchVariant != kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); +} + + +void Assembler::multu(Register rs, Register rt) { + DCHECK(kArchVariant != kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); +} + + +void Assembler::daddiu(Register rd, Register rs, int32_t j) { + GenInstrImmediate(DADDIU, rs, rd, j); +} + + +void Assembler::div(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV); +} + + +void Assembler::div(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD); +} + + +void Assembler::mod(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD); +} + + +void Assembler::divu(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); +} + + +void Assembler::divu(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U); +} + + +void Assembler::modu(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U); +} + + +void Assembler::daddu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU); +} + + +void Assembler::dsubu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU); +} + + +void Assembler::dmult(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT); +} + + +void Assembler::dmultu(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU); +} + + +void Assembler::ddiv(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV); +} + + +void Assembler::ddiv(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD); +} + + +void Assembler::dmod(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD); +} + + +void Assembler::ddivu(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU); +} + + +void Assembler::ddivu(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U); +} + + +void Assembler::dmodu(Register rd, Register rs, Register rt) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U); +} + + +// Logical. + +void Assembler::and_(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND); +} + + +void Assembler::andi(Register rt, Register rs, int32_t j) { + DCHECK(is_uint16(j)); + GenInstrImmediate(ANDI, rs, rt, j); +} + + +void Assembler::or_(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR); +} + + +void Assembler::ori(Register rt, Register rs, int32_t j) { + DCHECK(is_uint16(j)); + GenInstrImmediate(ORI, rs, rt, j); +} + + +void Assembler::xor_(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR); +} + + +void Assembler::xori(Register rt, Register rs, int32_t j) { + DCHECK(is_uint16(j)); + GenInstrImmediate(XORI, rs, rt, j); +} + + +void Assembler::nor(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR); +} + + +// Shifts. +void Assembler::sll(Register rd, + Register rt, + uint16_t sa, + bool coming_from_nop) { + // Don't allow nop instructions in the form sll zero_reg, zero_reg to be + // generated using the sll instruction. They must be generated using + // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo + // instructions. + DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg))); + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL); +} + + +void Assembler::sllv(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV); +} + + +void Assembler::srl(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL); +} + + +void Assembler::srlv(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV); +} + + +void Assembler::sra(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA); +} + + +void Assembler::srav(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); +} + + +void Assembler::rotr(Register rd, Register rt, uint16_t sa) { + // Should be called via MacroAssembler::Ror. + DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa)); + DCHECK(kArchVariant == kMips64r2); + Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) + | (rd.code() << kRdShift) | (sa << kSaShift) | SRL; + emit(instr); +} + + +void Assembler::rotrv(Register rd, Register rt, Register rs) { + // Should be called via MacroAssembler::Ror. + DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() ); + DCHECK(kArchVariant == kMips64r2); + Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) + | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; + emit(instr); +} + + +void Assembler::dsll(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL); +} + + +void Assembler::dsllv(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV); +} + + +void Assembler::dsrl(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL); +} + + +void Assembler::dsrlv(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV); +} + + +void Assembler::drotr(Register rd, Register rt, uint16_t sa) { + DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa)); + Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) + | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL; + emit(instr); +} + + +void Assembler::drotrv(Register rd, Register rt, Register rs) { + DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() ); + Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) + | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV; + emit(instr); +} + + +void Assembler::dsra(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA); +} + + +void Assembler::dsrav(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV); +} + + +void Assembler::dsll32(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL32); +} + + +void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL32); +} + + +void Assembler::dsra32(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32); +} + + +// ------------Memory-instructions------------- + +// Helper for base-reg + offset, when offset is larger than int16. +void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { + DCHECK(!src.rm().is(at)); + DCHECK(is_int32(src.offset_)); + daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask); + dsll(at, at, kLuiShift); + ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. + daddu(at, at, src.rm()); // Add base register. +} + + +void Assembler::lb(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to load. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0)); + } +} + + +void Assembler::lbu(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to load. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0)); + } +} + + +void Assembler::lh(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(LH, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to load. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0)); + } +} + + +void Assembler::lhu(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to load. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0)); + } +} + + +void Assembler::lw(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to load. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0)); + } +} + + +void Assembler::lwu(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to load. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0)); + } +} + + +void Assembler::lwl(Register rd, const MemOperand& rs) { + GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); +} + + +void Assembler::lwr(Register rd, const MemOperand& rs) { + GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); +} + + +void Assembler::sb(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to store. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0)); + } +} + + +void Assembler::sh(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(SH, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to store. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0)); + } +} + + +void Assembler::sw(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to store. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0)); + } +} + + +void Assembler::swl(Register rd, const MemOperand& rs) { + GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); +} + + +void Assembler::swr(Register rd, const MemOperand& rs) { + GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); +} + + +void Assembler::lui(Register rd, int32_t j) { + DCHECK(is_uint16(j)); + GenInstrImmediate(LUI, zero_reg, rd, j); +} + + +void Assembler::aui(Register rs, Register rt, int32_t j) { + // This instruction uses same opcode as 'lui'. The difference in encoding is + // 'lui' has zero reg. for rs field. + DCHECK(is_uint16(j)); + GenInstrImmediate(LUI, rs, rt, j); +} + + +void Assembler::daui(Register rs, Register rt, int32_t j) { + DCHECK(is_uint16(j)); + GenInstrImmediate(DAUI, rs, rt, j); +} + + +void Assembler::dahi(Register rs, int32_t j) { + DCHECK(is_uint16(j)); + GenInstrImmediate(REGIMM, rs, DAHI, j); +} + + +void Assembler::dati(Register rs, int32_t j) { + DCHECK(is_uint16(j)); + GenInstrImmediate(REGIMM, rs, DATI, j); +} + + +void Assembler::ldl(Register rd, const MemOperand& rs) { + GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_); +} + + +void Assembler::ldr(Register rd, const MemOperand& rs) { + GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_); +} + + +void Assembler::sdl(Register rd, const MemOperand& rs) { + GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_); +} + + +void Assembler::sdr(Register rd, const MemOperand& rs) { + GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_); +} + + +void Assembler::ld(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(LD, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to load. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0)); + } +} + + +void Assembler::sd(Register rd, const MemOperand& rs) { + if (is_int16(rs.offset_)) { + GenInstrImmediate(SD, rs.rm(), rd, rs.offset_); + } else { // Offset > 16 bits, use multiple instructions to store. + LoadRegPlusOffsetToAt(rs); + GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0)); + } +} + + +// -------------Misc-instructions-------------- + +// Break / Trap instructions. +void Assembler::break_(uint32_t code, bool break_as_stop) { + DCHECK((code & ~0xfffff) == 0); + // We need to invalidate breaks that could be stops as well because the + // simulator expects a char pointer after the stop instruction. + // See constants-mips.h for explanation. + DCHECK((break_as_stop && + code <= kMaxStopCode && + code > kMaxWatchpointCode) || + (!break_as_stop && + (code > kMaxStopCode || + code <= kMaxWatchpointCode))); + Instr break_instr = SPECIAL | BREAK | (code << 6); + emit(break_instr); +} + + +void Assembler::stop(const char* msg, uint32_t code) { + DCHECK(code > kMaxWatchpointCode); + DCHECK(code <= kMaxStopCode); +#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64) + break_(0x54321); +#else // V8_HOST_ARCH_MIPS + BlockTrampolinePoolFor(3); + // The Simulator will handle the stop instruction and get the message address. + // On MIPS stop() is just a special kind of break_(). + break_(code, true); + emit(reinterpret_cast<uint64_t>(msg)); +#endif +} + + +void Assembler::tge(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = SPECIAL | TGE | rs.code() << kRsShift + | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +void Assembler::tgeu(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = SPECIAL | TGEU | rs.code() << kRsShift + | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +void Assembler::tlt(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = + SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +void Assembler::tltu(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = + SPECIAL | TLTU | rs.code() << kRsShift + | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +void Assembler::teq(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = + SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +void Assembler::tne(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = + SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; + emit(instr); +} + + +// Move from HI/LO register. + +void Assembler::mfhi(Register rd) { + GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI); +} + + +void Assembler::mflo(Register rd) { + GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO); +} + + +// Set on less than instructions. +void Assembler::slt(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT); +} + + +void Assembler::sltu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU); +} + + +void Assembler::slti(Register rt, Register rs, int32_t j) { + GenInstrImmediate(SLTI, rs, rt, j); +} + + +void Assembler::sltiu(Register rt, Register rs, int32_t j) { + GenInstrImmediate(SLTIU, rs, rt, j); +} + + +// Conditional move. +void Assembler::movz(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ); +} + + +void Assembler::movn(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN); +} + + +void Assembler::movt(Register rd, Register rs, uint16_t cc) { + Register rt; + rt.code_ = (cc & 0x0007) << 2 | 1; + GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); +} + + +void Assembler::movf(Register rd, Register rs, uint16_t cc) { + Register rt; + rt.code_ = (cc & 0x0007) << 2 | 0; + GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); +} + + +void Assembler::sel(SecondaryField fmt, FPURegister fd, + FPURegister ft, FPURegister fs, uint8_t sel) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(fmt == D); + DCHECK(fmt == S); + + Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | + fs.code() << kFsShift | fd.code() << kFdShift | SEL; + emit(instr); +} + + +// GPR. +void Assembler::seleqz(Register rs, Register rt, Register rd) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S); +} + + +// FPR. +void Assembler::seleqz(SecondaryField fmt, FPURegister fd, + FPURegister ft, FPURegister fs) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(fmt == D); + DCHECK(fmt == S); + + Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | + fs.code() << kFsShift | fd.code() << kFdShift | SELEQZ_C; + emit(instr); +} + + +// GPR. +void Assembler::selnez(Register rs, Register rt, Register rd) { + DCHECK(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S); +} + + +// FPR. +void Assembler::selnez(SecondaryField fmt, FPURegister fd, + FPURegister ft, FPURegister fs) { + DCHECK(kArchVariant == kMips64r6); + DCHECK(fmt == D); + DCHECK(fmt == S); + + Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | + fs.code() << kFsShift | fd.code() << kFdShift | SELNEZ_C; + emit(instr); +} + + +// Bit twiddling. +void Assembler::clz(Register rd, Register rs) { + if (kArchVariant != kMips64r6) { + // Clz instr requires same GPR number in 'rd' and 'rt' fields. + GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); + } else { + GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6); + } +} + + +void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { + // Should be called via MacroAssembler::Ins. + // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. + DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6)); + GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); +} + + +void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { + // Should be called via MacroAssembler::Ext. + // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. + DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); +} + + +void Assembler::pref(int32_t hint, const MemOperand& rs) { + DCHECK(is_uint5(hint) && is_uint16(rs.offset_)); + Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) + | (rs.offset_); + emit(instr); +} + + +// --------Coprocessor-instructions---------------- + +// Load, store, move. +void Assembler::lwc1(FPURegister fd, const MemOperand& src) { + GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); +} + + +void Assembler::ldc1(FPURegister fd, const MemOperand& src) { + GenInstrImmediate(LDC1, src.rm(), fd, src.offset_); +} + + +void Assembler::swc1(FPURegister fd, const MemOperand& src) { + GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); +} + + +void Assembler::sdc1(FPURegister fd, const MemOperand& src) { + GenInstrImmediate(SDC1, src.rm(), fd, src.offset_); +} + + +void Assembler::mtc1(Register rt, FPURegister fs) { + GenInstrRegister(COP1, MTC1, rt, fs, f0); +} + + +void Assembler::mthc1(Register rt, FPURegister fs) { + GenInstrRegister(COP1, MTHC1, rt, fs, f0); +} + + +void Assembler::dmtc1(Register rt, FPURegister fs) { + GenInstrRegister(COP1, DMTC1, rt, fs, f0); +} + + +void Assembler::mfc1(Register rt, FPURegister fs) { + GenInstrRegister(COP1, MFC1, rt, fs, f0); +} + + +void Assembler::mfhc1(Register rt, FPURegister fs) { + GenInstrRegister(COP1, MFHC1, rt, fs, f0); +} + + +void Assembler::dmfc1(Register rt, FPURegister fs) { + GenInstrRegister(COP1, DMFC1, rt, fs, f0); +} + + +void Assembler::ctc1(Register rt, FPUControlRegister fs) { + GenInstrRegister(COP1, CTC1, rt, fs); +} + + +void Assembler::cfc1(Register rt, FPUControlRegister fs) { + GenInstrRegister(COP1, CFC1, rt, fs); +} + + +void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { + uint64_t i; + memcpy(&i, &d, 8); + + *lo = i & 0xffffffff; + *hi = i >> 32; +} + + +// Arithmetic. + +void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, D, ft, fs, fd, ADD_D); +} + + +void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, D, ft, fs, fd, SUB_D); +} + + +void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, D, ft, fs, fd, MUL_D); +} + + +void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft) { + GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D); +} + + +void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, D, ft, fs, fd, DIV_D); +} + + +void Assembler::abs_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, ABS_D); +} + + +void Assembler::mov_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, MOV_D); +} + + +void Assembler::neg_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, NEG_D); +} + + +void Assembler::sqrt_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D); +} + + +// Conversions. + +void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S); +} + + +void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D); +} + + +void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S); +} + + +void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D); +} + + +void Assembler::round_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S); +} + + +void Assembler::round_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D); +} + + +void Assembler::floor_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S); +} + + +void Assembler::floor_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D); +} + + +void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S); +} + + +void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D); +} + + +void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { + DCHECK(kArchVariant == kMips64r2); + GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); +} + + +void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { + DCHECK(kArchVariant == kMips64r2); + GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); +} + + +void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { + DCHECK(kArchVariant == kMips64r2); + GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); +} + + +void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { + DCHECK(kArchVariant == kMips64r2); + GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); +} + + +void Assembler::round_l_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S); +} + + +void Assembler::round_l_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D); +} + + +void Assembler::floor_l_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S); +} + + +void Assembler::floor_l_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D); +} + + +void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S); +} + + +void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D); +} + + +void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs) { + DCHECK(kArchVariant == kMips64r6); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MIN); +} + + +void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs) { + DCHECK(kArchVariant == kMips64r6); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MINA); +} + + +void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs) { + DCHECK(kArchVariant == kMips64r6); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MAX); +} + + +void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs) { + DCHECK(kArchVariant == kMips64r6); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA); +} + + +void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); +} + + +void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { + DCHECK(kArchVariant == kMips64r2); + GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); +} + + +void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); +} + + +void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); +} + + +void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { + DCHECK(kArchVariant == kMips64r2); + GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); +} + + +void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); +} + + +// Conditions for >= MIPSr6. +void Assembler::cmp(FPUCondition cond, SecondaryField fmt, + FPURegister fd, FPURegister fs, FPURegister ft) { + DCHECK(kArchVariant == kMips64r6); + DCHECK((fmt & ~(31 << kRsShift)) == 0); + Instr instr = COP1 | fmt | ft.code() << kFtShift | + fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond; + emit(instr); +} + + +void Assembler::bc1eqz(int16_t offset, FPURegister ft) { + DCHECK(kArchVariant == kMips64r6); + Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask); + emit(instr); +} + + +void Assembler::bc1nez(int16_t offset, FPURegister ft) { + DCHECK(kArchVariant == kMips64r6); + Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask); + emit(instr); +} + + +// Conditions for < MIPSr6. +void Assembler::c(FPUCondition cond, SecondaryField fmt, + FPURegister fs, FPURegister ft, uint16_t cc) { + DCHECK(kArchVariant != kMips64r6); + DCHECK(is_uint3(cc)); + DCHECK((fmt & ~(31 << kRsShift)) == 0); + Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift + | cc << 8 | 3 << 4 | cond; + emit(instr); +} + + +void Assembler::fcmp(FPURegister src1, const double src2, + FPUCondition cond) { + DCHECK(src2 == 0.0); + mtc1(zero_reg, f14); + cvt_d_w(f14, f14); + c(cond, D, src1, f14, 0); +} + + +void Assembler::bc1f(int16_t offset, uint16_t cc) { + DCHECK(is_uint3(cc)); + Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); + emit(instr); +} + + +void Assembler::bc1t(int16_t offset, uint16_t cc) { + DCHECK(is_uint3(cc)); + Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); + emit(instr); +} + + +// Debugging. +void Assembler::RecordJSReturn() { + positions_recorder()->WriteRecordedPositions(); + CheckBuffer(); + RecordRelocInfo(RelocInfo::JS_RETURN); +} + + +void Assembler::RecordDebugBreakSlot() { + positions_recorder()->WriteRecordedPositions(); + CheckBuffer(); + RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); +} + + +void Assembler::RecordComment(const char* msg) { + if (FLAG_code_comments) { + CheckBuffer(); + RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); + } +} + + +int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) { + Instr instr = instr_at(pc); + DCHECK(IsJ(instr) || IsLui(instr)); + if (IsLui(instr)) { + Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize); + Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize); + Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize); + DCHECK(IsOri(instr_ori)); + DCHECK(IsOri(instr_ori2)); + // TODO(plind): symbolic names for the shifts. + int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48; + imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32; + imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16; + // Sign extend address. + imm >>= 16; + + if (imm == kEndOfJumpChain) { + return 0; // Number of instructions patched. + } + imm += pc_delta; + DCHECK((imm & 3) == 0); + + instr_lui &= ~kImm16Mask; + instr_ori &= ~kImm16Mask; + instr_ori2 &= ~kImm16Mask; + + instr_at_put(pc + 0 * Assembler::kInstrSize, + instr_lui | ((imm >> 32) & kImm16Mask)); + instr_at_put(pc + 1 * Assembler::kInstrSize, + instr_ori | (imm >> 16 & kImm16Mask)); + instr_at_put(pc + 3 * Assembler::kInstrSize, + instr_ori2 | (imm & kImm16Mask)); + return 4; // Number of instructions patched. + } else { + uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; + if (static_cast<int32_t>(imm28) == kEndOfJumpChain) { + return 0; // Number of instructions patched. + } + + imm28 += pc_delta; + imm28 &= kImm28Mask; + DCHECK((imm28 & 3) == 0); + + instr &= ~kImm26Mask; + uint32_t imm26 = imm28 >> 2; + DCHECK(is_uint26(imm26)); + + instr_at_put(pc, instr | (imm26 & kImm26Mask)); + return 1; // Number of instructions patched. + } +} + + +void Assembler::GrowBuffer() { + if (!own_buffer_) FATAL("external code buffer is too small"); + + // Compute new buffer size. + CodeDesc desc; // The new buffer. + if (buffer_size_ < 1 * MB) { + desc.buffer_size = 2*buffer_size_; + } else { + desc.buffer_size = buffer_size_ + 1*MB; + } + CHECK_GT(desc.buffer_size, 0); // No overflow. + + // Set up new buffer. + desc.buffer = NewArray<byte>(desc.buffer_size); + + desc.instr_size = pc_offset(); + desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); + + // Copy the data. + intptr_t pc_delta = desc.buffer - buffer_; + intptr_t rc_delta = (desc.buffer + desc.buffer_size) - + (buffer_ + buffer_size_); + MemMove(desc.buffer, buffer_, desc.instr_size); + MemMove(reloc_info_writer.pos() + rc_delta, + reloc_info_writer.pos(), desc.reloc_size); + + // Switch buffers. + DeleteArray(buffer_); + buffer_ = desc.buffer; + buffer_size_ = desc.buffer_size; + pc_ += pc_delta; + reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, + reloc_info_writer.last_pc() + pc_delta); + + // Relocate runtime entries. + for (RelocIterator it(desc); !it.done(); it.next()) { + RelocInfo::Mode rmode = it.rinfo()->rmode(); + if (rmode == RelocInfo::INTERNAL_REFERENCE) { + byte* p = reinterpret_cast<byte*>(it.rinfo()->pc()); + RelocateInternalReference(p, pc_delta); + } + } + + DCHECK(!overflow()); +} + + +void Assembler::db(uint8_t data) { + CheckBuffer(); + *reinterpret_cast<uint8_t*>(pc_) = data; + pc_ += sizeof(uint8_t); +} + + +void Assembler::dd(uint32_t data) { + CheckBuffer(); + *reinterpret_cast<uint32_t*>(pc_) = data; + pc_ += sizeof(uint32_t); +} + + +void Assembler::emit_code_stub_address(Code* stub) { + CheckBuffer(); + *reinterpret_cast<uint64_t*>(pc_) = + reinterpret_cast<uint64_t>(stub->instruction_start()); + pc_ += sizeof(uint64_t); +} + + +void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { + // We do not try to reuse pool constants. + RelocInfo rinfo(pc_, rmode, data, NULL); + if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { + // Adjust code for new modes. + DCHECK(RelocInfo::IsDebugBreakSlot(rmode) + || RelocInfo::IsJSReturn(rmode) + || RelocInfo::IsComment(rmode) + || RelocInfo::IsPosition(rmode)); + // These modes do not need an entry in the constant pool. + } + if (!RelocInfo::IsNone(rinfo.rmode())) { + // Don't record external references unless the heap will be serialized. + if (rmode == RelocInfo::EXTERNAL_REFERENCE && + !serializer_enabled() && !emit_debug_code()) { + return; + } + DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. + if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { + RelocInfo reloc_info_with_ast_id(pc_, + rmode, + RecordedAstId().ToInt(), + NULL); + ClearRecordedAstId(); + reloc_info_writer.Write(&reloc_info_with_ast_id); + } else { + reloc_info_writer.Write(&rinfo); + } + } +} + + +void Assembler::BlockTrampolinePoolFor(int instructions) { + BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); +} + + +void Assembler::CheckTrampolinePool() { + // Some small sequences of instructions must not be broken up by the + // insertion of a trampoline pool; such sequences are protected by setting + // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, + // which are both checked here. Also, recursive calls to CheckTrampolinePool + // are blocked by trampoline_pool_blocked_nesting_. + if ((trampoline_pool_blocked_nesting_ > 0) || + (pc_offset() < no_trampoline_pool_before_)) { + // Emission is currently blocked; make sure we try again as soon as + // possible. + if (trampoline_pool_blocked_nesting_ > 0) { + next_buffer_check_ = pc_offset() + kInstrSize; + } else { + next_buffer_check_ = no_trampoline_pool_before_; + } + return; + } + + DCHECK(!trampoline_emitted_); + DCHECK(unbound_labels_count_ >= 0); + if (unbound_labels_count_ > 0) { + // First we emit jump (2 instructions), then we emit trampoline pool. + { BlockTrampolinePoolScope block_trampoline_pool(this); + Label after_pool; + b(&after_pool); + nop(); + + int pool_start = pc_offset(); + for (int i = 0; i < unbound_labels_count_; i++) { + uint64_t imm64; + imm64 = jump_address(&after_pool); + { BlockGrowBufferScope block_buf_growth(this); + // Buffer growth (and relocation) must be blocked for internal + // references until associated instructions are emitted and available + // to be patched. + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); + // TODO(plind): Verify this, presume I cannot use macro-assembler + // here. + lui(at, (imm64 >> 32) & kImm16Mask); + ori(at, at, (imm64 >> 16) & kImm16Mask); + dsll(at, at, 16); + ori(at, at, imm64 & kImm16Mask); + } + jr(at); + nop(); + } + bind(&after_pool); + trampoline_ = Trampoline(pool_start, unbound_labels_count_); + + trampoline_emitted_ = true; + // As we are only going to emit trampoline once, we need to prevent any + // further emission. + next_buffer_check_ = kMaxInt; + } + } else { + // Number of branches to unbound label at this point is zero, so we can + // move next buffer check to maximum. + next_buffer_check_ = pc_offset() + + kMaxBranchOffset - kTrampolineSlotsSize * 16; + } + return; +} + + +Address Assembler::target_address_at(Address pc) { + Instr instr0 = instr_at(pc); + Instr instr1 = instr_at(pc + 1 * kInstrSize); + Instr instr3 = instr_at(pc + 3 * kInstrSize); + + // Interpret 4 instructions for address generated by li: See listing in + // Assembler::set_target_address_at() just below. + if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) && + (GetOpcodeField(instr3) == ORI)) { + // Assemble the 48 bit value. + int64_t addr = static_cast<int64_t>( + ((uint64_t)(GetImmediate16(instr0)) << 32) | + ((uint64_t)(GetImmediate16(instr1)) << 16) | + ((uint64_t)(GetImmediate16(instr3)))); + + // Sign extend to get canonical address. + addr = (addr << 16) >> 16; + return reinterpret_cast<Address>(addr); + } + // We should never get here, force a bad address if we do. + UNREACHABLE(); + return (Address)0x0; +} + + +// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32 +// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap +// snapshot generated on ia32, the resulting MIPS sNaN must be quieted. +// OS::nan_value() returns a qNaN. +void Assembler::QuietNaN(HeapObject* object) { + HeapNumber::cast(object)->set_value(base::OS::nan_value()); +} + + +// On Mips64, a target address is stored in a 4-instruction sequence: +// 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask); +// 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); +// 2: dsll(rd, rd, 16); +// 3: ori(rd, rd, j.imm32_ & kImm16Mask); +// +// Patching the address must replace all the lui & ori instructions, +// and flush the i-cache. +// +// There is an optimization below, which emits a nop when the address +// fits in just 16 bits. This is unlikely to help, and should be benchmarked, +// and possibly removed. +void Assembler::set_target_address_at(Address pc, + Address target, + ICacheFlushMode icache_flush_mode) { +// There is an optimization where only 4 instructions are used to load address +// in code on MIP64 because only 48-bits of address is effectively used. +// It relies on fact the upper [63:48] bits are not used for virtual address +// translation and they have to be set according to value of bit 47 in order +// get canonical address. + Instr instr1 = instr_at(pc + kInstrSize); + uint32_t rt_code = GetRt(instr1); + uint32_t* p = reinterpret_cast<uint32_t*>(pc); + uint64_t itarget = reinterpret_cast<uint64_t>(target); + +#ifdef DEBUG + // Check we have the result from a li macro-instruction. + Instr instr0 = instr_at(pc); + Instr instr3 = instr_at(pc + kInstrSize * 3); + CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI && + GetOpcodeField(instr3) == ORI)); +#endif + + // Must use 4 instructions to insure patchable code. + // lui rt, upper-16. + // ori rt, rt, lower-16. + // dsll rt, rt, 16. + // ori rt rt, lower-16. + *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask); + *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift) + | ((itarget >> 16) & kImm16Mask); + *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift) + | (itarget & kImm16Mask); + + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize); + } +} + + +void Assembler::JumpLabelToJumpRegister(Address pc) { + // Address pc points to lui/ori instructions. + // Jump to label may follow at pc + 2 * kInstrSize. + uint32_t* p = reinterpret_cast<uint32_t*>(pc); +#ifdef DEBUG + Instr instr1 = instr_at(pc); +#endif + Instr instr2 = instr_at(pc + 1 * kInstrSize); + Instr instr3 = instr_at(pc + 6 * kInstrSize); + bool patched = false; + + if (IsJal(instr3)) { + DCHECK(GetOpcodeField(instr1) == LUI); + DCHECK(GetOpcodeField(instr2) == ORI); + + uint32_t rs_field = GetRt(instr2) << kRsShift; + uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. + *(p+6) = SPECIAL | rs_field | rd_field | JALR; + patched = true; + } else if (IsJ(instr3)) { + DCHECK(GetOpcodeField(instr1) == LUI); + DCHECK(GetOpcodeField(instr2) == ORI); + + uint32_t rs_field = GetRt(instr2) << kRsShift; + *(p+6) = SPECIAL | rs_field | JR; + patched = true; + } + + if (patched) { + CpuFeatures::FlushICache(pc+6, sizeof(int32_t)); + } +} + + +Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { + // No out-of-line constant pool support. + DCHECK(!FLAG_enable_ool_constant_pool); + return isolate->factory()->empty_constant_pool_array(); +} + + +void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { + // No out-of-line constant pool support. + DCHECK(!FLAG_enable_ool_constant_pool); + return; +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/assembler-mips64.h nodejs-0.11.15/deps/v8/src/mips64/assembler-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/assembler-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/assembler-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1416 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + + +#ifndef V8_MIPS_ASSEMBLER_MIPS_H_ +#define V8_MIPS_ASSEMBLER_MIPS_H_ + +#include <stdio.h> +#include "src/assembler.h" +#include "src/mips64/constants-mips64.h" +#include "src/serialize.h" + +namespace v8 { +namespace internal { + +// CPU Registers. +// +// 1) We would prefer to use an enum, but enum values are assignment- +// compatible with int, which has caused code-generation bugs. +// +// 2) We would prefer to use a class instead of a struct but we don't like +// the register initialization to depend on the particular initialization +// order (which appears to be different on OS X, Linux, and Windows for the +// installed versions of C++ we tried). Using a struct permits C-style +// "initialization". Also, the Register objects cannot be const as this +// forces initialization stubs in MSVC, making us dependent on initialization +// order. +// +// 3) By not using an enum, we are possibly preventing the compiler from +// doing certain constant folds, which may significantly reduce the +// code generated for some assembly instructions (because they boil down +// to a few constants). If this is a problem, we could change the code +// such that we use an enum in optimized mode, and the struct in debug +// mode. This way we get the compile-time error checking in debug mode +// and best performance in optimized code. + + +// ----------------------------------------------------------------------------- +// Implementation of Register and FPURegister. + +// Core register. +struct Register { + static const int kNumRegisters = v8::internal::kNumRegisters; + static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp. + static const int kSizeInBytes = 8; + static const int kCpRegister = 23; // cp (s7) is the 23rd register. + + inline static int NumAllocatableRegisters(); + + static int ToAllocationIndex(Register reg) { + DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) || + reg.is(from_code(kCpRegister))); + return reg.is(from_code(kCpRegister)) ? + kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'. + reg.code() - 2; // zero_reg and 'at' are skipped. + } + + static Register FromAllocationIndex(int index) { + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); + return index == kMaxNumAllocatableRegisters - 1 ? + from_code(kCpRegister) : // Last index is always the 'cp' register. + from_code(index + 2); // zero_reg and 'at' are skipped. + } + + static const char* AllocationIndexToString(int index) { + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); + const char* const names[] = { + "v0", + "v1", + "a0", + "a1", + "a2", + "a3", + "a4", + "a5", + "a6", + "a7", + "t0", + "t1", + "t2", + "s7", + }; + return names[index]; + } + + static Register from_code(int code) { + Register r = { code }; + return r; + } + + bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } + bool is(Register reg) const { return code_ == reg.code_; } + int code() const { + DCHECK(is_valid()); + return code_; + } + int bit() const { + DCHECK(is_valid()); + return 1 << code_; + } + + // Unfortunately we can't make this private in a struct. + int code_; +}; + +#define REGISTER(N, C) \ + const int kRegister_ ## N ## _Code = C; \ + const Register N = { C } + +REGISTER(no_reg, -1); +// Always zero. +REGISTER(zero_reg, 0); +// at: Reserved for synthetic instructions. +REGISTER(at, 1); +// v0, v1: Used when returning multiple values from subroutines. +REGISTER(v0, 2); +REGISTER(v1, 3); +// a0 - a4: Used to pass non-FP parameters. +REGISTER(a0, 4); +REGISTER(a1, 5); +REGISTER(a2, 6); +REGISTER(a3, 7); +// a4 - a7 t0 - t3: Can be used without reservation, act as temporary registers +// and are allowed to be destroyed by subroutines. +REGISTER(a4, 8); +REGISTER(a5, 9); +REGISTER(a6, 10); +REGISTER(a7, 11); +REGISTER(t0, 12); +REGISTER(t1, 13); +REGISTER(t2, 14); +REGISTER(t3, 15); +// s0 - s7: Subroutine register variables. Subroutines that write to these +// registers must restore their values before exiting so that the caller can +// expect the values to be preserved. +REGISTER(s0, 16); +REGISTER(s1, 17); +REGISTER(s2, 18); +REGISTER(s3, 19); +REGISTER(s4, 20); +REGISTER(s5, 21); +REGISTER(s6, 22); +REGISTER(s7, 23); +REGISTER(t8, 24); +REGISTER(t9, 25); +// k0, k1: Reserved for system calls and interrupt handlers. +REGISTER(k0, 26); +REGISTER(k1, 27); +// gp: Reserved. +REGISTER(gp, 28); +// sp: Stack pointer. +REGISTER(sp, 29); +// fp: Frame pointer. +REGISTER(fp, 30); +// ra: Return address pointer. +REGISTER(ra, 31); + +#undef REGISTER + + +int ToNumber(Register reg); + +Register ToRegister(int num); + +// Coprocessor register. +struct FPURegister { + static const int kMaxNumRegisters = v8::internal::kNumFPURegisters; + + // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers + // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to + // number of Double regs (64-bit regs, or FPU-reg-pairs). + + // A few double registers are reserved: one as a scratch register and one to + // hold 0.0. + // f28: 0.0 + // f30: scratch register. + static const int kNumReservedRegisters = 2; + static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 - + kNumReservedRegisters; + + inline static int NumRegisters(); + inline static int NumAllocatableRegisters(); + inline static int ToAllocationIndex(FPURegister reg); + static const char* AllocationIndexToString(int index); + + static FPURegister FromAllocationIndex(int index) { + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); + return from_code(index * 2); + } + + static FPURegister from_code(int code) { + FPURegister r = { code }; + return r; + } + + bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; } + bool is(FPURegister creg) const { return code_ == creg.code_; } + FPURegister low() const { + // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1. + // Find low reg of a Double-reg pair, which is the reg itself. + DCHECK(code_ % 2 == 0); // Specified Double reg must be even. + FPURegister reg; + reg.code_ = code_; + DCHECK(reg.is_valid()); + return reg; + } + FPURegister high() const { + // TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1. + // Find high reg of a Doubel-reg pair, which is reg + 1. + DCHECK(code_ % 2 == 0); // Specified Double reg must be even. + FPURegister reg; + reg.code_ = code_ + 1; + DCHECK(reg.is_valid()); + return reg; + } + + int code() const { + DCHECK(is_valid()); + return code_; + } + int bit() const { + DCHECK(is_valid()); + return 1 << code_; + } + void setcode(int f) { + code_ = f; + DCHECK(is_valid()); + } + // Unfortunately we can't make this private in a struct. + int code_; +}; + +// V8 now supports the O32 ABI, and the FPU Registers are organized as 32 +// 32-bit registers, f0 through f31. When used as 'double' they are used +// in pairs, starting with the even numbered register. So a double operation +// on f0 really uses f0 and f1. +// (Modern mips hardware also supports 32 64-bit registers, via setting +// (privileged) Status Register FR bit to 1. This is used by the N32 ABI, +// but it is not in common use. Someday we will want to support this in v8.) + +// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers. +typedef FPURegister DoubleRegister; +typedef FPURegister FloatRegister; + +const FPURegister no_freg = { -1 }; + +const FPURegister f0 = { 0 }; // Return value in hard float mode. +const FPURegister f1 = { 1 }; +const FPURegister f2 = { 2 }; +const FPURegister f3 = { 3 }; +const FPURegister f4 = { 4 }; +const FPURegister f5 = { 5 }; +const FPURegister f6 = { 6 }; +const FPURegister f7 = { 7 }; +const FPURegister f8 = { 8 }; +const FPURegister f9 = { 9 }; +const FPURegister f10 = { 10 }; +const FPURegister f11 = { 11 }; +const FPURegister f12 = { 12 }; // Arg 0 in hard float mode. +const FPURegister f13 = { 13 }; +const FPURegister f14 = { 14 }; // Arg 1 in hard float mode. +const FPURegister f15 = { 15 }; +const FPURegister f16 = { 16 }; +const FPURegister f17 = { 17 }; +const FPURegister f18 = { 18 }; +const FPURegister f19 = { 19 }; +const FPURegister f20 = { 20 }; +const FPURegister f21 = { 21 }; +const FPURegister f22 = { 22 }; +const FPURegister f23 = { 23 }; +const FPURegister f24 = { 24 }; +const FPURegister f25 = { 25 }; +const FPURegister f26 = { 26 }; +const FPURegister f27 = { 27 }; +const FPURegister f28 = { 28 }; +const FPURegister f29 = { 29 }; +const FPURegister f30 = { 30 }; +const FPURegister f31 = { 31 }; + +// Register aliases. +// cp is assumed to be a callee saved register. +// Defined using #define instead of "static const Register&" because Clang +// complains otherwise when a compilation unit that includes this header +// doesn't use the variables. +#define kRootRegister s6 +#define cp s7 +#define kLithiumScratchReg s3 +#define kLithiumScratchReg2 s4 +#define kLithiumScratchDouble f30 +#define kDoubleRegZero f28 + +// FPU (coprocessor 1) control registers. +// Currently only FCSR (#31) is implemented. +struct FPUControlRegister { + bool is_valid() const { return code_ == kFCSRRegister; } + bool is(FPUControlRegister creg) const { return code_ == creg.code_; } + int code() const { + DCHECK(is_valid()); + return code_; + } + int bit() const { + DCHECK(is_valid()); + return 1 << code_; + } + void setcode(int f) { + code_ = f; + DCHECK(is_valid()); + } + // Unfortunately we can't make this private in a struct. + int code_; +}; + +const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister }; +const FPUControlRegister FCSR = { kFCSRRegister }; + + +// ----------------------------------------------------------------------------- +// Machine instruction Operands. +const int kSmiShift = kSmiTagSize + kSmiShiftSize; +const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1; +// Class Operand represents a shifter operand in data processing instructions. +class Operand BASE_EMBEDDED { + public: + // Immediate. + INLINE(explicit Operand(int64_t immediate, + RelocInfo::Mode rmode = RelocInfo::NONE64)); + INLINE(explicit Operand(const ExternalReference& f)); + INLINE(explicit Operand(const char* s)); + INLINE(explicit Operand(Object** opp)); + INLINE(explicit Operand(Context** cpp)); + explicit Operand(Handle<Object> handle); + INLINE(explicit Operand(Smi* value)); + + // Register. + INLINE(explicit Operand(Register rm)); + + // Return true if this is a register operand. + INLINE(bool is_reg() const); + + inline int64_t immediate() const { + DCHECK(!is_reg()); + return imm64_; + } + + Register rm() const { return rm_; } + + private: + Register rm_; + int64_t imm64_; // Valid if rm_ == no_reg. + RelocInfo::Mode rmode_; + + friend class Assembler; + friend class MacroAssembler; +}; + + +// On MIPS we have only one adressing mode with base_reg + offset. +// Class MemOperand represents a memory operand in load and store instructions. +class MemOperand : public Operand { + public: + // Immediate value attached to offset. + enum OffsetAddend { + offset_minus_one = -1, + offset_zero = 0 + }; + + explicit MemOperand(Register rn, int64_t offset = 0); + explicit MemOperand(Register rn, int64_t unit, int64_t multiplier, + OffsetAddend offset_addend = offset_zero); + int32_t offset() const { return offset_; } + + bool OffsetIsInt16Encodable() const { + return is_int16(offset_); + } + + private: + int32_t offset_; + + friend class Assembler; +}; + + +class Assembler : public AssemblerBase { + public: + // Create an assembler. Instructions and relocation information are emitted + // into a buffer, with the instructions starting from the beginning and the + // relocation information starting from the end of the buffer. See CodeDesc + // for a detailed comment on the layout (globals.h). + // + // If the provided buffer is NULL, the assembler allocates and grows its own + // buffer, and buffer_size determines the initial buffer size. The buffer is + // owned by the assembler and deallocated upon destruction of the assembler. + // + // If the provided buffer is not NULL, the assembler uses the provided buffer + // for code generation and assumes its size to be buffer_size. If the buffer + // is too small, a fatal error occurs. No deallocation of the buffer is done + // upon destruction of the assembler. + Assembler(Isolate* isolate, void* buffer, int buffer_size); + virtual ~Assembler() { } + + // GetCode emits any pending (non-emitted) code and fills the descriptor + // desc. GetCode() is idempotent; it returns the same result if no other + // Assembler functions are invoked in between GetCode() calls. + void GetCode(CodeDesc* desc); + + // Label operations & relative jumps (PPUM Appendix D). + // + // Takes a branch opcode (cc) and a label (L) and generates + // either a backward branch or a forward branch and links it + // to the label fixup chain. Usage: + // + // Label L; // unbound label + // j(cc, &L); // forward branch to unbound label + // bind(&L); // bind label to the current pc + // j(cc, &L); // backward branch to bound label + // bind(&L); // illegal: a label may be bound only once + // + // Note: The same Label can be used for forward and backward branches + // but it may be bound only once. + void bind(Label* L); // Binds an unbound label L to current code position. + // Determines if Label is bound and near enough so that branch instruction + // can be used to reach it, instead of jump instruction. + bool is_near(Label* L); + + // Returns the branch offset to the given label from the current code + // position. Links the label to the current position if it is still unbound. + // Manages the jump elimination optimization if the second parameter is true. + int32_t branch_offset(Label* L, bool jump_elimination_allowed); + int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed); + int32_t branch_offset21(Label* L, bool jump_elimination_allowed); + int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed); + int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { + int32_t o = branch_offset(L, jump_elimination_allowed); + DCHECK((o & 3) == 0); // Assert the offset is aligned. + return o >> 2; + } + int32_t shifted_branch_offset_compact(Label* L, + bool jump_elimination_allowed) { + int32_t o = branch_offset_compact(L, jump_elimination_allowed); + DCHECK((o & 3) == 0); // Assert the offset is aligned. + return o >> 2; + } + uint64_t jump_address(Label* L); + + // Puts a labels target address at the given position. + // The high 8 bits are set to zero. + void label_at_put(Label* L, int at_offset); + + // Read/Modify the code target address in the branch/call instruction at pc. + static Address target_address_at(Address pc); + static void set_target_address_at(Address pc, + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED); + // On MIPS there is no Constant Pool so we skip that parameter. + INLINE(static Address target_address_at(Address pc, + ConstantPoolArray* constant_pool)) { + return target_address_at(pc); + } + INLINE(static void set_target_address_at(Address pc, + ConstantPoolArray* constant_pool, + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)) { + set_target_address_at(pc, target, icache_flush_mode); + } + INLINE(static Address target_address_at(Address pc, Code* code)) { + ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; + return target_address_at(pc, constant_pool); + } + INLINE(static void set_target_address_at(Address pc, + Code* code, + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED)) { + ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; + set_target_address_at(pc, constant_pool, target, icache_flush_mode); + } + + // Return the code target address at a call site from the return address + // of that call in the instruction stream. + inline static Address target_address_from_return_address(Address pc); + + // Return the code target address of the patch debug break slot + inline static Address break_address_from_return_address(Address pc); + + static void JumpLabelToJumpRegister(Address pc); + + static void QuietNaN(HeapObject* nan); + + // This sets the branch destination (which gets loaded at the call address). + // This is for calls and branches within generated code. The serializer + // has already deserialized the lui/ori instructions etc. + inline static void deserialization_set_special_target_at( + Address instruction_payload, Code* code, Address target) { + set_target_address_at( + instruction_payload - kInstructionsFor64BitConstant * kInstrSize, + code, + target); + } + + // Size of an instruction. + static const int kInstrSize = sizeof(Instr); + + // Difference between address of current opcode and target address offset. + static const int kBranchPCOffset = 4; + + // Here we are patching the address in the LUI/ORI instruction pair. + // These values are used in the serialization process and must be zero for + // MIPS platform, as Code, Embedded Object or External-reference pointers + // are split across two consecutive instructions and don't exist separately + // in the code, so the serializer should not step forwards in memory after + // a target is resolved and written. + static const int kSpecialTargetSize = 0; + + // Number of consecutive instructions used to store 32bit/64bit constant. + // Before jump-optimizations, this constant was used in + // RelocInfo::target_address_address() function to tell serializer address of + // the instruction that follows LUI/ORI instruction pair. Now, with new jump + // optimization, where jump-through-register instruction that usually + // follows LUI/ORI pair is substituted with J/JAL, this constant equals + // to 3 instructions (LUI+ORI+J/JAL/JR/JALR). + static const int kInstructionsFor32BitConstant = 3; + static const int kInstructionsFor64BitConstant = 5; + + // Distance between the instruction referring to the address of the call + // target and the return address. + static const int kCallTargetAddressOffset = 6 * kInstrSize; + + // Distance between start of patched return sequence and the emitted address + // to jump to. + static const int kPatchReturnSequenceAddressOffset = 0; + + // Distance between start of patched debug break slot and the emitted address + // to jump to. + static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; + + // Difference between address of current opcode and value read from pc + // register. + static const int kPcLoadDelta = 4; + + static const int kPatchDebugBreakSlotReturnOffset = 6 * kInstrSize; + + // Number of instructions used for the JS return sequence. The constant is + // used by the debugger to patch the JS return sequence. + static const int kJSReturnSequenceInstructions = 7; + static const int kDebugBreakSlotInstructions = 6; + static const int kDebugBreakSlotLength = + kDebugBreakSlotInstructions * kInstrSize; + + + // --------------------------------------------------------------------------- + // Code generation. + + // Insert the smallest number of nop instructions + // possible to align the pc offset to a multiple + // of m. m must be a power of 2 (>= 4). + void Align(int m); + // Aligns code to something that's optimal for a jump target for the platform. + void CodeTargetAlign(); + + // Different nop operations are used by the code generator to detect certain + // states of the generated code. + enum NopMarkerTypes { + NON_MARKING_NOP = 0, + DEBUG_BREAK_NOP, + // IC markers. + PROPERTY_ACCESS_INLINED, + PROPERTY_ACCESS_INLINED_CONTEXT, + PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, + // Helper values. + LAST_CODE_MARKER, + FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, + // Code aging + CODE_AGE_MARKER_NOP = 6, + CODE_AGE_SEQUENCE_NOP + }; + + // Type == 0 is the default non-marking nop. For mips this is a + // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero + // marking, to avoid conflict with ssnop and ehb instructions. + void nop(unsigned int type = 0) { + DCHECK(type < 32); + Register nop_rt_reg = (type == 0) ? zero_reg : at; + sll(zero_reg, nop_rt_reg, type, true); + } + + + // --------Branch-and-jump-instructions---------- + // We don't use likely variant of instructions. + void b(int16_t offset); + void b(Label* L) { b(branch_offset(L, false)>>2); } + void bal(int16_t offset); + void bal(Label* L) { bal(branch_offset(L, false)>>2); } + + void beq(Register rs, Register rt, int16_t offset); + void beq(Register rs, Register rt, Label* L) { + beq(rs, rt, branch_offset(L, false) >> 2); + } + void bgez(Register rs, int16_t offset); + void bgezc(Register rt, int16_t offset); + void bgezc(Register rt, Label* L) { + bgezc(rt, branch_offset_compact(L, false)>>2); + } + void bgeuc(Register rs, Register rt, int16_t offset); + void bgeuc(Register rs, Register rt, Label* L) { + bgeuc(rs, rt, branch_offset_compact(L, false)>>2); + } + void bgec(Register rs, Register rt, int16_t offset); + void bgec(Register rs, Register rt, Label* L) { + bgec(rs, rt, branch_offset_compact(L, false)>>2); + } + void bgezal(Register rs, int16_t offset); + void bgezalc(Register rt, int16_t offset); + void bgezalc(Register rt, Label* L) { + bgezalc(rt, branch_offset_compact(L, false)>>2); + } + void bgezall(Register rs, int16_t offset); + void bgezall(Register rs, Label* L) { + bgezall(rs, branch_offset(L, false)>>2); + } + void bgtz(Register rs, int16_t offset); + void bgtzc(Register rt, int16_t offset); + void bgtzc(Register rt, Label* L) { + bgtzc(rt, branch_offset_compact(L, false)>>2); + } + void blez(Register rs, int16_t offset); + void blezc(Register rt, int16_t offset); + void blezc(Register rt, Label* L) { + blezc(rt, branch_offset_compact(L, false)>>2); + } + void bltz(Register rs, int16_t offset); + void bltzc(Register rt, int16_t offset); + void bltzc(Register rt, Label* L) { + bltzc(rt, branch_offset_compact(L, false)>>2); + } + void bltuc(Register rs, Register rt, int16_t offset); + void bltuc(Register rs, Register rt, Label* L) { + bltuc(rs, rt, branch_offset_compact(L, false)>>2); + } + void bltc(Register rs, Register rt, int16_t offset); + void bltc(Register rs, Register rt, Label* L) { + bltc(rs, rt, branch_offset_compact(L, false)>>2); + } + + void bltzal(Register rs, int16_t offset); + void blezalc(Register rt, int16_t offset); + void blezalc(Register rt, Label* L) { + blezalc(rt, branch_offset_compact(L, false)>>2); + } + void bltzalc(Register rt, int16_t offset); + void bltzalc(Register rt, Label* L) { + bltzalc(rt, branch_offset_compact(L, false)>>2); + } + void bgtzalc(Register rt, int16_t offset); + void bgtzalc(Register rt, Label* L) { + bgtzalc(rt, branch_offset_compact(L, false)>>2); + } + void beqzalc(Register rt, int16_t offset); + void beqzalc(Register rt, Label* L) { + beqzalc(rt, branch_offset_compact(L, false)>>2); + } + void beqc(Register rs, Register rt, int16_t offset); + void beqc(Register rs, Register rt, Label* L) { + beqc(rs, rt, branch_offset_compact(L, false)>>2); + } + void beqzc(Register rs, int32_t offset); + void beqzc(Register rs, Label* L) { + beqzc(rs, branch_offset21_compact(L, false)>>2); + } + void bnezalc(Register rt, int16_t offset); + void bnezalc(Register rt, Label* L) { + bnezalc(rt, branch_offset_compact(L, false)>>2); + } + void bnec(Register rs, Register rt, int16_t offset); + void bnec(Register rs, Register rt, Label* L) { + bnec(rs, rt, branch_offset_compact(L, false)>>2); + } + void bnezc(Register rt, int32_t offset); + void bnezc(Register rt, Label* L) { + bnezc(rt, branch_offset21_compact(L, false)>>2); + } + void bne(Register rs, Register rt, int16_t offset); + void bne(Register rs, Register rt, Label* L) { + bne(rs, rt, branch_offset(L, false)>>2); + } + void bovc(Register rs, Register rt, int16_t offset); + void bovc(Register rs, Register rt, Label* L) { + bovc(rs, rt, branch_offset_compact(L, false)>>2); + } + void bnvc(Register rs, Register rt, int16_t offset); + void bnvc(Register rs, Register rt, Label* L) { + bnvc(rs, rt, branch_offset_compact(L, false)>>2); + } + + // Never use the int16_t b(l)cond version with a branch offset + // instead of using the Label* version. + + // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits. + void j(int64_t target); + void jal(int64_t target); + void jalr(Register rs, Register rd = ra); + void jr(Register target); + void j_or_jr(int64_t target, Register rs); + void jal_or_jalr(int64_t target, Register rs); + + + // -------Data-processing-instructions--------- + + // Arithmetic. + void addu(Register rd, Register rs, Register rt); + void subu(Register rd, Register rs, Register rt); + + void div(Register rs, Register rt); + void divu(Register rs, Register rt); + void ddiv(Register rs, Register rt); + void ddivu(Register rs, Register rt); + void div(Register rd, Register rs, Register rt); + void divu(Register rd, Register rs, Register rt); + void ddiv(Register rd, Register rs, Register rt); + void ddivu(Register rd, Register rs, Register rt); + void mod(Register rd, Register rs, Register rt); + void modu(Register rd, Register rs, Register rt); + void dmod(Register rd, Register rs, Register rt); + void dmodu(Register rd, Register rs, Register rt); + + void mul(Register rd, Register rs, Register rt); + void muh(Register rd, Register rs, Register rt); + void mulu(Register rd, Register rs, Register rt); + void muhu(Register rd, Register rs, Register rt); + void mult(Register rs, Register rt); + void multu(Register rs, Register rt); + void dmul(Register rd, Register rs, Register rt); + void dmuh(Register rd, Register rs, Register rt); + void dmulu(Register rd, Register rs, Register rt); + void dmuhu(Register rd, Register rs, Register rt); + void daddu(Register rd, Register rs, Register rt); + void dsubu(Register rd, Register rs, Register rt); + void dmult(Register rs, Register rt); + void dmultu(Register rs, Register rt); + + void addiu(Register rd, Register rs, int32_t j); + void daddiu(Register rd, Register rs, int32_t j); + + // Logical. + void and_(Register rd, Register rs, Register rt); + void or_(Register rd, Register rs, Register rt); + void xor_(Register rd, Register rs, Register rt); + void nor(Register rd, Register rs, Register rt); + + void andi(Register rd, Register rs, int32_t j); + void ori(Register rd, Register rs, int32_t j); + void xori(Register rd, Register rs, int32_t j); + void lui(Register rd, int32_t j); + void aui(Register rs, Register rt, int32_t j); + void daui(Register rs, Register rt, int32_t j); + void dahi(Register rs, int32_t j); + void dati(Register rs, int32_t j); + + // Shifts. + // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop + // and may cause problems in normal code. coming_from_nop makes sure this + // doesn't happen. + void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false); + void sllv(Register rd, Register rt, Register rs); + void srl(Register rd, Register rt, uint16_t sa); + void srlv(Register rd, Register rt, Register rs); + void sra(Register rt, Register rd, uint16_t sa); + void srav(Register rt, Register rd, Register rs); + void rotr(Register rd, Register rt, uint16_t sa); + void rotrv(Register rd, Register rt, Register rs); + void dsll(Register rd, Register rt, uint16_t sa); + void dsllv(Register rd, Register rt, Register rs); + void dsrl(Register rd, Register rt, uint16_t sa); + void dsrlv(Register rd, Register rt, Register rs); + void drotr(Register rd, Register rt, uint16_t sa); + void drotrv(Register rd, Register rt, Register rs); + void dsra(Register rt, Register rd, uint16_t sa); + void dsrav(Register rd, Register rt, Register rs); + void dsll32(Register rt, Register rd, uint16_t sa); + void dsrl32(Register rt, Register rd, uint16_t sa); + void dsra32(Register rt, Register rd, uint16_t sa); + + + // ------------Memory-instructions------------- + + void lb(Register rd, const MemOperand& rs); + void lbu(Register rd, const MemOperand& rs); + void lh(Register rd, const MemOperand& rs); + void lhu(Register rd, const MemOperand& rs); + void lw(Register rd, const MemOperand& rs); + void lwu(Register rd, const MemOperand& rs); + void lwl(Register rd, const MemOperand& rs); + void lwr(Register rd, const MemOperand& rs); + void sb(Register rd, const MemOperand& rs); + void sh(Register rd, const MemOperand& rs); + void sw(Register rd, const MemOperand& rs); + void swl(Register rd, const MemOperand& rs); + void swr(Register rd, const MemOperand& rs); + void ldl(Register rd, const MemOperand& rs); + void ldr(Register rd, const MemOperand& rs); + void sdl(Register rd, const MemOperand& rs); + void sdr(Register rd, const MemOperand& rs); + void ld(Register rd, const MemOperand& rs); + void sd(Register rd, const MemOperand& rs); + + + // ----------------Prefetch-------------------- + + void pref(int32_t hint, const MemOperand& rs); + + + // -------------Misc-instructions-------------- + + // Break / Trap instructions. + void break_(uint32_t code, bool break_as_stop = false); + void stop(const char* msg, uint32_t code = kMaxStopCode); + void tge(Register rs, Register rt, uint16_t code); + void tgeu(Register rs, Register rt, uint16_t code); + void tlt(Register rs, Register rt, uint16_t code); + void tltu(Register rs, Register rt, uint16_t code); + void teq(Register rs, Register rt, uint16_t code); + void tne(Register rs, Register rt, uint16_t code); + + // Move from HI/LO register. + void mfhi(Register rd); + void mflo(Register rd); + + // Set on less than. + void slt(Register rd, Register rs, Register rt); + void sltu(Register rd, Register rs, Register rt); + void slti(Register rd, Register rs, int32_t j); + void sltiu(Register rd, Register rs, int32_t j); + + // Conditional move. + void movz(Register rd, Register rs, Register rt); + void movn(Register rd, Register rs, Register rt); + void movt(Register rd, Register rs, uint16_t cc = 0); + void movf(Register rd, Register rs, uint16_t cc = 0); + + void sel(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs, uint8_t sel); + void seleqz(Register rs, Register rt, Register rd); + void seleqz(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs); + void selnez(Register rs, Register rt, Register rd); + void selnez(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs); + + // Bit twiddling. + void clz(Register rd, Register rs); + void ins_(Register rt, Register rs, uint16_t pos, uint16_t size); + void ext_(Register rt, Register rs, uint16_t pos, uint16_t size); + + // --------Coprocessor-instructions---------------- + + // Load, store, and move. + void lwc1(FPURegister fd, const MemOperand& src); + void ldc1(FPURegister fd, const MemOperand& src); + + void swc1(FPURegister fs, const MemOperand& dst); + void sdc1(FPURegister fs, const MemOperand& dst); + + void mtc1(Register rt, FPURegister fs); + void mthc1(Register rt, FPURegister fs); + void dmtc1(Register rt, FPURegister fs); + + void mfc1(Register rt, FPURegister fs); + void mfhc1(Register rt, FPURegister fs); + void dmfc1(Register rt, FPURegister fs); + + void ctc1(Register rt, FPUControlRegister fs); + void cfc1(Register rt, FPUControlRegister fs); + + // Arithmetic. + void add_d(FPURegister fd, FPURegister fs, FPURegister ft); + void sub_d(FPURegister fd, FPURegister fs, FPURegister ft); + void mul_d(FPURegister fd, FPURegister fs, FPURegister ft); + void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); + void div_d(FPURegister fd, FPURegister fs, FPURegister ft); + void abs_d(FPURegister fd, FPURegister fs); + void mov_d(FPURegister fd, FPURegister fs); + void neg_d(FPURegister fd, FPURegister fs); + void sqrt_d(FPURegister fd, FPURegister fs); + + // Conversion. + void cvt_w_s(FPURegister fd, FPURegister fs); + void cvt_w_d(FPURegister fd, FPURegister fs); + void trunc_w_s(FPURegister fd, FPURegister fs); + void trunc_w_d(FPURegister fd, FPURegister fs); + void round_w_s(FPURegister fd, FPURegister fs); + void round_w_d(FPURegister fd, FPURegister fs); + void floor_w_s(FPURegister fd, FPURegister fs); + void floor_w_d(FPURegister fd, FPURegister fs); + void ceil_w_s(FPURegister fd, FPURegister fs); + void ceil_w_d(FPURegister fd, FPURegister fs); + + void cvt_l_s(FPURegister fd, FPURegister fs); + void cvt_l_d(FPURegister fd, FPURegister fs); + void trunc_l_s(FPURegister fd, FPURegister fs); + void trunc_l_d(FPURegister fd, FPURegister fs); + void round_l_s(FPURegister fd, FPURegister fs); + void round_l_d(FPURegister fd, FPURegister fs); + void floor_l_s(FPURegister fd, FPURegister fs); + void floor_l_d(FPURegister fd, FPURegister fs); + void ceil_l_s(FPURegister fd, FPURegister fs); + void ceil_l_d(FPURegister fd, FPURegister fs); + + void min(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs); + void mina(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs); + void max(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs); + void maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs); + + void cvt_s_w(FPURegister fd, FPURegister fs); + void cvt_s_l(FPURegister fd, FPURegister fs); + void cvt_s_d(FPURegister fd, FPURegister fs); + + void cvt_d_w(FPURegister fd, FPURegister fs); + void cvt_d_l(FPURegister fd, FPURegister fs); + void cvt_d_s(FPURegister fd, FPURegister fs); + + // Conditions and branches for MIPSr6. + void cmp(FPUCondition cond, SecondaryField fmt, + FPURegister fd, FPURegister ft, FPURegister fs); + + void bc1eqz(int16_t offset, FPURegister ft); + void bc1eqz(Label* L, FPURegister ft) { + bc1eqz(branch_offset(L, false)>>2, ft); + } + void bc1nez(int16_t offset, FPURegister ft); + void bc1nez(Label* L, FPURegister ft) { + bc1nez(branch_offset(L, false)>>2, ft); + } + + // Conditions and branches for non MIPSr6. + void c(FPUCondition cond, SecondaryField fmt, + FPURegister ft, FPURegister fs, uint16_t cc = 0); + + void bc1f(int16_t offset, uint16_t cc = 0); + void bc1f(Label* L, uint16_t cc = 0) { + bc1f(branch_offset(L, false)>>2, cc); + } + void bc1t(int16_t offset, uint16_t cc = 0); + void bc1t(Label* L, uint16_t cc = 0) { + bc1t(branch_offset(L, false)>>2, cc); + } + void fcmp(FPURegister src1, const double src2, FPUCondition cond); + + // Check the code size generated from label to here. + int SizeOfCodeGeneratedSince(Label* label) { + return pc_offset() - label->pos(); + } + + // Check the number of instructions generated from label to here. + int InstructionsGeneratedSince(Label* label) { + return SizeOfCodeGeneratedSince(label) / kInstrSize; + } + + // Class for scoping postponing the trampoline pool generation. + class BlockTrampolinePoolScope { + public: + explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { + assem_->StartBlockTrampolinePool(); + } + ~BlockTrampolinePoolScope() { + assem_->EndBlockTrampolinePool(); + } + + private: + Assembler* assem_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); + }; + + // Class for postponing the assembly buffer growth. Typically used for + // sequences of instructions that must be emitted as a unit, before + // buffer growth (and relocation) can occur. + // This blocking scope is not nestable. + class BlockGrowBufferScope { + public: + explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { + assem_->StartBlockGrowBuffer(); + } + ~BlockGrowBufferScope() { + assem_->EndBlockGrowBuffer(); + } + + private: + Assembler* assem_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); + }; + + // Debugging. + + // Mark address of the ExitJSFrame code. + void RecordJSReturn(); + + // Mark address of a debug break slot. + void RecordDebugBreakSlot(); + + // Record the AST id of the CallIC being compiled, so that it can be placed + // in the relocation information. + void SetRecordedAstId(TypeFeedbackId ast_id) { + DCHECK(recorded_ast_id_.IsNone()); + recorded_ast_id_ = ast_id; + } + + TypeFeedbackId RecordedAstId() { + DCHECK(!recorded_ast_id_.IsNone()); + return recorded_ast_id_; + } + + void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } + + // Record a comment relocation entry that can be used by a disassembler. + // Use --code-comments to enable. + void RecordComment(const char* msg); + + static int RelocateInternalReference(byte* pc, intptr_t pc_delta); + + // Writes a single byte or word of data in the code stream. Used for + // inline tables, e.g., jump-tables. + void db(uint8_t data); + void dd(uint32_t data); + + // Emits the address of the code stub's first instruction. + void emit_code_stub_address(Code* stub); + + PositionsRecorder* positions_recorder() { return &positions_recorder_; } + + // Postpone the generation of the trampoline pool for the specified number of + // instructions. + void BlockTrampolinePoolFor(int instructions); + + // Check if there is less than kGap bytes available in the buffer. + // If this is the case, we need to grow the buffer before emitting + // an instruction or relocation information. + inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } + + // Get the number of bytes available in the buffer. + inline int available_space() const { return reloc_info_writer.pos() - pc_; } + + // Read/patch instructions. + static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } + static void instr_at_put(byte* pc, Instr instr) { + *reinterpret_cast<Instr*>(pc) = instr; + } + Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } + void instr_at_put(int pos, Instr instr) { + *reinterpret_cast<Instr*>(buffer_ + pos) = instr; + } + + // Check if an instruction is a branch of some kind. + static bool IsBranch(Instr instr); + static bool IsBeq(Instr instr); + static bool IsBne(Instr instr); + + static bool IsJump(Instr instr); + static bool IsJ(Instr instr); + static bool IsLui(Instr instr); + static bool IsOri(Instr instr); + + static bool IsJal(Instr instr); + static bool IsJr(Instr instr); + static bool IsJalr(Instr instr); + + static bool IsNop(Instr instr, unsigned int type); + static bool IsPop(Instr instr); + static bool IsPush(Instr instr); + static bool IsLwRegFpOffset(Instr instr); + static bool IsSwRegFpOffset(Instr instr); + static bool IsLwRegFpNegOffset(Instr instr); + static bool IsSwRegFpNegOffset(Instr instr); + + static Register GetRtReg(Instr instr); + static Register GetRsReg(Instr instr); + static Register GetRdReg(Instr instr); + + static uint32_t GetRt(Instr instr); + static uint32_t GetRtField(Instr instr); + static uint32_t GetRs(Instr instr); + static uint32_t GetRsField(Instr instr); + static uint32_t GetRd(Instr instr); + static uint32_t GetRdField(Instr instr); + static uint32_t GetSa(Instr instr); + static uint32_t GetSaField(Instr instr); + static uint32_t GetOpcodeField(Instr instr); + static uint32_t GetFunction(Instr instr); + static uint32_t GetFunctionField(Instr instr); + static uint32_t GetImmediate16(Instr instr); + static uint32_t GetLabelConst(Instr instr); + + static int32_t GetBranchOffset(Instr instr); + static bool IsLw(Instr instr); + static int16_t GetLwOffset(Instr instr); + static Instr SetLwOffset(Instr instr, int16_t offset); + + static bool IsSw(Instr instr); + static Instr SetSwOffset(Instr instr, int16_t offset); + static bool IsAddImmediate(Instr instr); + static Instr SetAddImmediateOffset(Instr instr, int16_t offset); + + static bool IsAndImmediate(Instr instr); + static bool IsEmittedConstant(Instr instr); + + void CheckTrampolinePool(); + + // Allocate a constant pool of the correct size for the generated code. + Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); + + // Generate the constant pool for the generated code. + void PopulateConstantPool(ConstantPoolArray* constant_pool); + + protected: + // Relocation for a type-recording IC has the AST id added to it. This + // member variable is a way to pass the information from the call site to + // the relocation info. + TypeFeedbackId recorded_ast_id_; + + int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; } + + // Decode branch instruction at pos and return branch target pos. + int64_t target_at(int64_t pos); + + // Patch branch instruction at pos to branch to given branch target pos. + void target_at_put(int64_t pos, int64_t target_pos); + + // Say if we need to relocate with this mode. + bool MustUseReg(RelocInfo::Mode rmode); + + // Record reloc info for current pc_. + void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); + + // Block the emission of the trampoline pool before pc_offset. + void BlockTrampolinePoolBefore(int pc_offset) { + if (no_trampoline_pool_before_ < pc_offset) + no_trampoline_pool_before_ = pc_offset; + } + + void StartBlockTrampolinePool() { + trampoline_pool_blocked_nesting_++; + } + + void EndBlockTrampolinePool() { + trampoline_pool_blocked_nesting_--; + } + + bool is_trampoline_pool_blocked() const { + return trampoline_pool_blocked_nesting_ > 0; + } + + bool has_exception() const { + return internal_trampoline_exception_; + } + + void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi); + + bool is_trampoline_emitted() const { + return trampoline_emitted_; + } + + // Temporarily block automatic assembly buffer growth. + void StartBlockGrowBuffer() { + DCHECK(!block_buffer_growth_); + block_buffer_growth_ = true; + } + + void EndBlockGrowBuffer() { + DCHECK(block_buffer_growth_); + block_buffer_growth_ = false; + } + + bool is_buffer_growth_blocked() const { + return block_buffer_growth_; + } + + private: + // Buffer size and constant pool distance are checked together at regular + // intervals of kBufferCheckInterval emitted bytes. + static const int kBufferCheckInterval = 1*KB/2; + + // Code generation. + // The relocation writer's position is at least kGap bytes below the end of + // the generated instructions. This is so that multi-instruction sequences do + // not have to check for overflow. The same is true for writes of large + // relocation info entries. + static const int kGap = 32; + + + // Repeated checking whether the trampoline pool should be emitted is rather + // expensive. By default we only check again once a number of instructions + // has been generated. + static const int kCheckConstIntervalInst = 32; + static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize; + + int next_buffer_check_; // pc offset of next buffer check. + + // Emission of the trampoline pool may be blocked in some code sequences. + int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. + int no_trampoline_pool_before_; // Block emission before this pc offset. + + // Keep track of the last emitted pool to guarantee a maximal distance. + int last_trampoline_pool_end_; // pc offset of the end of the last pool. + + // Automatic growth of the assembly buffer may be blocked for some sequences. + bool block_buffer_growth_; // Block growth when true. + + // Relocation information generation. + // Each relocation is encoded as a variable size value. + static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; + RelocInfoWriter reloc_info_writer; + + // The bound position, before this we cannot do instruction elimination. + int last_bound_pos_; + + // Code emission. + inline void CheckBuffer(); + void GrowBuffer(); + inline void emit(Instr x); + inline void emit(uint64_t x); + inline void CheckTrampolinePoolQuick(); + + // Instruction generation. + // We have 3 different kind of encoding layout on MIPS. + // However due to many different types of objects encoded in the same fields + // we have quite a few aliases for each mode. + // Using the same structure to refer to Register and FPURegister would spare a + // few aliases, but mixing both does not look clean to me. + // Anyway we could surely implement this differently. + + void GenInstrRegister(Opcode opcode, + Register rs, + Register rt, + Register rd, + uint16_t sa = 0, + SecondaryField func = NULLSF); + + void GenInstrRegister(Opcode opcode, + Register rs, + Register rt, + uint16_t msb, + uint16_t lsb, + SecondaryField func); + + void GenInstrRegister(Opcode opcode, + SecondaryField fmt, + FPURegister ft, + FPURegister fs, + FPURegister fd, + SecondaryField func = NULLSF); + + void GenInstrRegister(Opcode opcode, + FPURegister fr, + FPURegister ft, + FPURegister fs, + FPURegister fd, + SecondaryField func = NULLSF); + + void GenInstrRegister(Opcode opcode, + SecondaryField fmt, + Register rt, + FPURegister fs, + FPURegister fd, + SecondaryField func = NULLSF); + + void GenInstrRegister(Opcode opcode, + SecondaryField fmt, + Register rt, + FPUControlRegister fs, + SecondaryField func = NULLSF); + + + void GenInstrImmediate(Opcode opcode, + Register rs, + Register rt, + int32_t j); + void GenInstrImmediate(Opcode opcode, + Register rs, + SecondaryField SF, + int32_t j); + void GenInstrImmediate(Opcode opcode, + Register r1, + FPURegister r2, + int32_t j); + + + void GenInstrJump(Opcode opcode, + uint32_t address); + + // Helpers. + void LoadRegPlusOffsetToAt(const MemOperand& src); + + // Labels. + void print(Label* L); + void bind_to(Label* L, int pos); + void next(Label* L); + + // One trampoline consists of: + // - space for trampoline slots, + // - space for labels. + // + // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. + // Space for trampoline slots preceeds space for labels. Each label is of one + // instruction size, so total amount for labels is equal to + // label_count * kInstrSize. + class Trampoline { + public: + Trampoline() { + start_ = 0; + next_slot_ = 0; + free_slot_count_ = 0; + end_ = 0; + } + Trampoline(int start, int slot_count) { + start_ = start; + next_slot_ = start; + free_slot_count_ = slot_count; + end_ = start + slot_count * kTrampolineSlotsSize; + } + int start() { + return start_; + } + int end() { + return end_; + } + int take_slot() { + int trampoline_slot = kInvalidSlotPos; + if (free_slot_count_ <= 0) { + // We have run out of space on trampolines. + // Make sure we fail in debug mode, so we become aware of each case + // when this happens. + DCHECK(0); + // Internal exception will be caught. + } else { + trampoline_slot = next_slot_; + free_slot_count_--; + next_slot_ += kTrampolineSlotsSize; + } + return trampoline_slot; + } + + private: + int start_; + int end_; + int next_slot_; + int free_slot_count_; + }; + + int32_t get_trampoline_entry(int32_t pos); + int unbound_labels_count_; + // If trampoline is emitted, generated code is becoming large. As this is + // already a slow case which can possibly break our code generation for the + // extreme case, we use this information to trigger different mode of + // branch instruction generation, where we use jump instructions rather + // than regular branch instructions. + bool trampoline_emitted_; + static const int kTrampolineSlotsSize = 6 * kInstrSize; + static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; + static const int kInvalidSlotPos = -1; + + Trampoline trampoline_; + bool internal_trampoline_exception_; + + friend class RegExpMacroAssemblerMIPS; + friend class RelocInfo; + friend class CodePatcher; + friend class BlockTrampolinePoolScope; + + PositionsRecorder positions_recorder_; + friend class PositionsRecorder; + friend class EnsureSpace; +}; + + +class EnsureSpace BASE_EMBEDDED { + public: + explicit EnsureSpace(Assembler* assembler) { + assembler->CheckBuffer(); + } +}; + +} } // namespace v8::internal + +#endif // V8_ARM_ASSEMBLER_MIPS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/assembler-mips64-inl.h nodejs-0.11.15/deps/v8/src/mips64/assembler-mips64-inl.h --- nodejs-0.11.13/deps/v8/src/mips64/assembler-mips64-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/assembler-mips64-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,457 @@ + +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + + +#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_ +#define V8_MIPS_ASSEMBLER_MIPS_INL_H_ + +#include "src/mips64/assembler-mips64.h" + +#include "src/assembler.h" +#include "src/debug.h" + + +namespace v8 { +namespace internal { + + +bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); } + + +// ----------------------------------------------------------------------------- +// Operand and MemOperand. + +Operand::Operand(int64_t immediate, RelocInfo::Mode rmode) { + rm_ = no_reg; + imm64_ = immediate; + rmode_ = rmode; +} + + +Operand::Operand(const ExternalReference& f) { + rm_ = no_reg; + imm64_ = reinterpret_cast<int64_t>(f.address()); + rmode_ = RelocInfo::EXTERNAL_REFERENCE; +} + + +Operand::Operand(Smi* value) { + rm_ = no_reg; + imm64_ = reinterpret_cast<intptr_t>(value); + rmode_ = RelocInfo::NONE32; +} + + +Operand::Operand(Register rm) { + rm_ = rm; +} + + +bool Operand::is_reg() const { + return rm_.is_valid(); +} + + +int Register::NumAllocatableRegisters() { + return kMaxNumAllocatableRegisters; +} + + +int DoubleRegister::NumRegisters() { + return FPURegister::kMaxNumRegisters; +} + + +int DoubleRegister::NumAllocatableRegisters() { + return FPURegister::kMaxNumAllocatableRegisters; +} + + +int FPURegister::ToAllocationIndex(FPURegister reg) { + DCHECK(reg.code() % 2 == 0); + DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters); + DCHECK(reg.is_valid()); + DCHECK(!reg.is(kDoubleRegZero)); + DCHECK(!reg.is(kLithiumScratchDouble)); + return (reg.code() / 2); +} + + +// ----------------------------------------------------------------------------- +// RelocInfo. + +void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) { + if (IsInternalReference(rmode_)) { + // Absolute code pointer inside code object moves with the code object. + byte* p = reinterpret_cast<byte*>(pc_); + int count = Assembler::RelocateInternalReference(p, delta); + CpuFeatures::FlushICache(p, count * sizeof(uint32_t)); + } +} + + +Address RelocInfo::target_address() { + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + return Assembler::target_address_at(pc_, host_); +} + + +Address RelocInfo::target_address_address() { + DCHECK(IsCodeTarget(rmode_) || + IsRuntimeEntry(rmode_) || + rmode_ == EMBEDDED_OBJECT || + rmode_ == EXTERNAL_REFERENCE); + // Read the address of the word containing the target_address in an + // instruction stream. + // The only architecture-independent user of this function is the serializer. + // The serializer uses it to find out how many raw bytes of instruction to + // output before the next target. + // For an instruction like LUI/ORI where the target bits are mixed into the + // instruction bits, the size of the target will be zero, indicating that the + // serializer should not step forward in memory after a target is resolved + // and written. In this case the target_address_address function should + // return the end of the instructions to be patched, allowing the + // deserializer to deserialize the instructions as raw bytes and put them in + // place, ready to be patched with the target. After jump optimization, + // that is the address of the instruction that follows J/JAL/JR/JALR + // instruction. + // return reinterpret_cast<Address>( + // pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize); + return reinterpret_cast<Address>( + pc_ + Assembler::kInstructionsFor64BitConstant * Assembler::kInstrSize); +} + + +Address RelocInfo::constant_pool_entry_address() { + UNREACHABLE(); + return NULL; +} + + +int RelocInfo::target_address_size() { + return Assembler::kSpecialTargetSize; +} + + +void RelocInfo::set_target_address(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && + host() != NULL && IsCodeTarget(rmode_)) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } +} + + +Address Assembler::target_address_from_return_address(Address pc) { + return pc - kCallTargetAddressOffset; +} + + +Address Assembler::break_address_from_return_address(Address pc) { + return pc - Assembler::kPatchDebugBreakSlotReturnOffset; +} + + +Object* RelocInfo::target_object() { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)); +} + + +Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + return Handle<Object>(reinterpret_cast<Object**>( + Assembler::target_address_at(pc_, host_))); +} + + +void RelocInfo::set_target_object(Object* target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + Assembler::set_target_address_at(pc_, host_, + reinterpret_cast<Address>(target), + icache_flush_mode); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && + host() != NULL && + target->IsHeapObject()) { + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), &Memory::Object_at(pc_), HeapObject::cast(target)); + } +} + + +Address RelocInfo::target_reference() { + DCHECK(rmode_ == EXTERNAL_REFERENCE); + return Assembler::target_address_at(pc_, host_); +} + + +Address RelocInfo::target_runtime_entry(Assembler* origin) { + DCHECK(IsRuntimeEntry(rmode_)); + return target_address(); +} + + +void RelocInfo::set_target_runtime_entry(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsRuntimeEntry(rmode_)); + if (target_address() != target) + set_target_address(target, write_barrier_mode, icache_flush_mode); +} + + +Handle<Cell> RelocInfo::target_cell_handle() { + DCHECK(rmode_ == RelocInfo::CELL); + Address address = Memory::Address_at(pc_); + return Handle<Cell>(reinterpret_cast<Cell**>(address)); +} + + +Cell* RelocInfo::target_cell() { + DCHECK(rmode_ == RelocInfo::CELL); + return Cell::FromValueAddress(Memory::Address_at(pc_)); +} + + +void RelocInfo::set_target_cell(Cell* cell, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::CELL); + Address address = cell->address() + Cell::kValueOffset; + Memory::Address_at(pc_) = address; + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) { + // TODO(1550) We are passing NULL as a slot because cell can never be on + // evacuation candidate. + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), NULL, cell); + } +} + + +static const int kNoCodeAgeSequenceLength = 9 * Assembler::kInstrSize; + + +Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { + UNREACHABLE(); // This should never be reached on Arm. + return Handle<Object>(); +} + + +Code* RelocInfo::code_age_stub() { + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + return Code::GetCodeFromTargetAddress( + Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_)); +} + + +void RelocInfo::set_code_age_stub(Code* stub, + ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + Assembler::set_target_address_at(pc_ + Assembler::kInstrSize, + host_, + stub->instruction_start()); +} + + +Address RelocInfo::call_address() { + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); + // The pc_ offset of 0 assumes mips patched return sequence per + // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or + // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot(). + return Assembler::target_address_at(pc_, host_); +} + + +void RelocInfo::set_call_address(Address target) { + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); + // The pc_ offset of 0 assumes mips patched return sequence per + // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or + // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot(). + Assembler::set_target_address_at(pc_, host_, target); + if (host() != NULL) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } +} + + +Object* RelocInfo::call_object() { + return *call_object_address(); +} + + +Object** RelocInfo::call_object_address() { + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); + return reinterpret_cast<Object**>(pc_ + 6 * Assembler::kInstrSize); +} + + +void RelocInfo::set_call_object(Object* target) { + *call_object_address() = target; +} + + +void RelocInfo::WipeOut() { + DCHECK(IsEmbeddedObject(rmode_) || + IsCodeTarget(rmode_) || + IsRuntimeEntry(rmode_) || + IsExternalReference(rmode_)); + Assembler::set_target_address_at(pc_, host_, NULL); +} + + +bool RelocInfo::IsPatchedReturnSequence() { + Instr instr0 = Assembler::instr_at(pc_); // lui. + Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize); // ori. + Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize); // dsll. + Instr instr3 = Assembler::instr_at(pc_ + 3 * Assembler::kInstrSize); // ori. + Instr instr4 = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize); // jalr. + + bool patched_return = ((instr0 & kOpcodeMask) == LUI && + (instr1 & kOpcodeMask) == ORI && + (instr2 & kFunctionFieldMask) == DSLL && + (instr3 & kOpcodeMask) == ORI && + (instr4 & kFunctionFieldMask) == JALR); + return patched_return; +} + + +bool RelocInfo::IsPatchedDebugBreakSlotSequence() { + Instr current_instr = Assembler::instr_at(pc_); + return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP); +} + + +void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) { + RelocInfo::Mode mode = rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT) { + visitor->VisitEmbeddedPointer(this); + } else if (RelocInfo::IsCodeTarget(mode)) { + visitor->VisitCodeTarget(this); + } else if (mode == RelocInfo::CELL) { + visitor->VisitCell(this); + } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { + visitor->VisitExternalReference(this); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + visitor->VisitCodeAgeSequence(this); + } else if (((RelocInfo::IsJSReturn(mode) && + IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(mode) && + IsPatchedDebugBreakSlotSequence())) && + isolate->debug()->has_break_points()) { + visitor->VisitDebugTarget(this); + } else if (RelocInfo::IsRuntimeEntry(mode)) { + visitor->VisitRuntimeEntry(this); + } +} + + +template<typename StaticVisitor> +void RelocInfo::Visit(Heap* heap) { + RelocInfo::Mode mode = rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT) { + StaticVisitor::VisitEmbeddedPointer(heap, this); + } else if (RelocInfo::IsCodeTarget(mode)) { + StaticVisitor::VisitCodeTarget(heap, this); + } else if (mode == RelocInfo::CELL) { + StaticVisitor::VisitCell(heap, this); + } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { + StaticVisitor::VisitExternalReference(this); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + StaticVisitor::VisitCodeAgeSequence(heap, this); + } else if (heap->isolate()->debug()->has_break_points() && + ((RelocInfo::IsJSReturn(mode) && + IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(mode) && + IsPatchedDebugBreakSlotSequence()))) { + StaticVisitor::VisitDebugTarget(heap, this); + } else if (RelocInfo::IsRuntimeEntry(mode)) { + StaticVisitor::VisitRuntimeEntry(this); + } +} + + +// ----------------------------------------------------------------------------- +// Assembler. + + +void Assembler::CheckBuffer() { + if (buffer_space() <= kGap) { + GrowBuffer(); + } +} + + +void Assembler::CheckTrampolinePoolQuick() { + if (pc_offset() >= next_buffer_check_) { + CheckTrampolinePool(); + } +} + + +void Assembler::emit(Instr x) { + if (!is_buffer_growth_blocked()) { + CheckBuffer(); + } + *reinterpret_cast<Instr*>(pc_) = x; + pc_ += kInstrSize; + CheckTrampolinePoolQuick(); +} + + +void Assembler::emit(uint64_t x) { + if (!is_buffer_growth_blocked()) { + CheckBuffer(); + } + *reinterpret_cast<uint64_t*>(pc_) = x; + pc_ += kInstrSize * 2; + CheckTrampolinePoolQuick(); +} + + +} } // namespace v8::internal + +#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/builtins-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/builtins-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/builtins-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/builtins-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1597 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/codegen.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/runtime.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + + +#define __ ACCESS_MASM(masm) + + +void Builtins::Generate_Adaptor(MacroAssembler* masm, + CFunctionId id, + BuiltinExtraArguments extra_args) { + // ----------- S t a t e ------------- + // -- a0 : number of arguments excluding receiver + // -- a1 : called function (only guaranteed when + // -- extra_args requires it) + // -- cp : context + // -- sp[0] : last argument + // -- ... + // -- sp[8 * (argc - 1)] : first argument + // -- sp[8 * agrc] : receiver + // ----------------------------------- + + // Insert extra arguments. + int num_extra_args = 0; + if (extra_args == NEEDS_CALLED_FUNCTION) { + num_extra_args = 1; + __ push(a1); + } else { + DCHECK(extra_args == NO_EXTRA_ARGUMENTS); + } + + // JumpToExternalReference expects s0 to contain the number of arguments + // including the receiver and the extra arguments. + __ Daddu(s0, a0, num_extra_args + 1); + __ dsll(s1, s0, kPointerSizeLog2); + __ Dsubu(s1, s1, kPointerSize); + __ JumpToExternalReference(ExternalReference(id, masm->isolate())); +} + + +// Load the built-in InternalArray function from the current context. +static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, + Register result) { + // Load the native context. + + __ ld(result, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ ld(result, + FieldMemOperand(result, GlobalObject::kNativeContextOffset)); + // Load the InternalArray function from the native context. + __ ld(result, + MemOperand(result, + Context::SlotOffset( + Context::INTERNAL_ARRAY_FUNCTION_INDEX))); +} + + +// Load the built-in Array function from the current context. +static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { + // Load the native context. + + __ ld(result, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ ld(result, + FieldMemOperand(result, GlobalObject::kNativeContextOffset)); + // Load the Array function from the native context. + __ ld(result, + MemOperand(result, + Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); +} + + +void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : number of arguments + // -- ra : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + Label generic_array_code, one_or_more_arguments, two_or_more_arguments; + + // Get the InternalArray function. + GenerateLoadInternalArrayFunction(masm, a1); + + if (FLAG_debug_code) { + // Initial map for the builtin InternalArray functions should be maps. + __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); + __ SmiTst(a2, a4); + __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, + a4, Operand(zero_reg)); + __ GetObjectType(a2, a3, a4); + __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, + a4, Operand(MAP_TYPE)); + } + + // Run the native code for the InternalArray function called as a normal + // function. + // Tail call a stub. + InternalArrayConstructorStub stub(masm->isolate()); + __ TailCallStub(&stub); +} + + +void Builtins::Generate_ArrayCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : number of arguments + // -- ra : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + Label generic_array_code; + + // Get the Array function. + GenerateLoadArrayFunction(masm, a1); + + if (FLAG_debug_code) { + // Initial map for the builtin Array functions should be maps. + __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); + __ SmiTst(a2, a4); + __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, + a4, Operand(zero_reg)); + __ GetObjectType(a2, a3, a4); + __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, + a4, Operand(MAP_TYPE)); + } + + // Run the native code for the Array function called as a normal function. + // Tail call a stub. + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + ArrayConstructorStub stub(masm->isolate()); + __ TailCallStub(&stub); +} + + +void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : number of arguments + // -- a1 : constructor function + // -- ra : return address + // -- sp[(argc - n - 1) * 8] : arg[n] (zero based) + // -- sp[argc * 8] : receiver + // ----------------------------------- + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3); + + Register function = a1; + if (FLAG_debug_code) { + __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2); + __ Assert(eq, kUnexpectedStringFunction, function, Operand(a2)); + } + + // Load the first arguments in a0 and get rid of the rest. + Label no_arguments; + __ Branch(&no_arguments, eq, a0, Operand(zero_reg)); + // First args = sp[(argc - 1) * 8]. + __ Dsubu(a0, a0, Operand(1)); + __ dsll(a0, a0, kPointerSizeLog2); + __ Daddu(sp, a0, sp); + __ ld(a0, MemOperand(sp)); + // sp now point to args[0], drop args[0] + receiver. + __ Drop(2); + + Register argument = a2; + Label not_cached, argument_is_string; + __ LookupNumberStringCache(a0, // Input. + argument, // Result. + a3, // Scratch. + a4, // Scratch. + a5, // Scratch. + ¬_cached); + __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, a4); + __ bind(&argument_is_string); + + // ----------- S t a t e ------------- + // -- a2 : argument converted to string + // -- a1 : constructor function + // -- ra : return address + // ----------------------------------- + + Label gc_required; + __ Allocate(JSValue::kSize, + v0, // Result. + a3, // Scratch. + a4, // Scratch. + &gc_required, + TAG_OBJECT); + + // Initialising the String Object. + Register map = a3; + __ LoadGlobalFunctionInitialMap(function, map, a4); + if (FLAG_debug_code) { + __ lbu(a4, FieldMemOperand(map, Map::kInstanceSizeOffset)); + __ Assert(eq, kUnexpectedStringWrapperInstanceSize, + a4, Operand(JSValue::kSize >> kPointerSizeLog2)); + __ lbu(a4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset)); + __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper, + a4, Operand(zero_reg)); + } + __ sd(map, FieldMemOperand(v0, HeapObject::kMapOffset)); + + __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); + __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); + __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); + + __ sd(argument, FieldMemOperand(v0, JSValue::kValueOffset)); + + // Ensure the object is fully initialized. + STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); + + __ Ret(); + + // The argument was not found in the number to string cache. Check + // if it's a string already before calling the conversion builtin. + Label convert_argument; + __ bind(¬_cached); + __ JumpIfSmi(a0, &convert_argument); + + // Is it a String? + __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); + __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kNotStringTag != 0); + __ And(a4, a3, Operand(kIsNotStringMask)); + __ Branch(&convert_argument, ne, a4, Operand(zero_reg)); + __ mov(argument, a0); + __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4); + __ Branch(&argument_is_string); + + // Invoke the conversion builtin and put the result into a2. + __ bind(&convert_argument); + __ push(function); // Preserve the function. + __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(a0); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + } + __ pop(function); + __ mov(argument, v0); + __ Branch(&argument_is_string); + + // Load the empty string into a2, remove the receiver from the + // stack, and jump back to the case where the argument is a string. + __ bind(&no_arguments); + __ LoadRoot(argument, Heap::kempty_stringRootIndex); + __ Drop(1); + __ Branch(&argument_is_string); + + // At this point the argument is already a string. Call runtime to + // create a string wrapper. + __ bind(&gc_required); + __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, a4); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(argument); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + } + __ Ret(); +} + + +static void CallRuntimePassFunction( + MacroAssembler* masm, Runtime::FunctionId function_id) { + FrameScope scope(masm, StackFrame::INTERNAL); + // Push a copy of the function onto the stack. + // Push call kind information and function as parameter to the runtime call. + __ Push(a1, a1); + + __ CallRuntime(function_id, 1); + // Restore call kind information and receiver. + __ Pop(a1); +} + + +static void GenerateTailCallToSharedCode(MacroAssembler* masm) { + __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset)); + __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(at); +} + + +static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { + __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(at); +} + + +void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { + // Checking whether the queued function is ready for install is optional, + // since we come across interrupts and stack checks elsewhere. However, + // not checking may delay installing ready functions, and always checking + // would be quite expensive. A good compromise is to first check against + // stack limit as a cue for an interrupt signal. + Label ok; + __ LoadRoot(a4, Heap::kStackLimitRootIndex); + __ Branch(&ok, hs, sp, Operand(a4)); + + CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); + GenerateTailCallToReturnedCode(masm); + + __ bind(&ok); + GenerateTailCallToSharedCode(masm); +} + + +static void Generate_JSConstructStubHelper(MacroAssembler* masm, + bool is_api_function, + bool create_memento) { + // ----------- S t a t e ------------- + // -- a0 : number of arguments + // -- a1 : constructor function + // -- a2 : allocation site or undefined + // -- ra : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + + // Should never create mementos for api functions. + DCHECK(!is_api_function || !create_memento); + + Isolate* isolate = masm->isolate(); + + // ----------- S t a t e ------------- + // -- a0 : number of arguments + // -- a1 : constructor function + // -- ra : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + + // Enter a construct frame. + { + FrameScope scope(masm, StackFrame::CONSTRUCT); + + if (create_memento) { + __ AssertUndefinedOrAllocationSite(a2, a3); + __ push(a2); + } + + // Preserve the two incoming parameters on the stack. + // Tag arguments count. + __ dsll32(a0, a0, 0); + __ MultiPushReversed(a0.bit() | a1.bit()); + + Label rt_call, allocated; + // Try to allocate the object without transitioning into C code. If any of + // the preconditions is not met, the code bails out to the runtime call. + if (FLAG_inline_new) { + Label undo_allocation; + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(isolate); + __ li(a2, Operand(debug_step_in_fp)); + __ ld(a2, MemOperand(a2)); + __ Branch(&rt_call, ne, a2, Operand(zero_reg)); + + // Load the initial map and verify that it is in fact a map. + // a1: constructor function + __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); + __ JumpIfSmi(a2, &rt_call); + __ GetObjectType(a2, a3, t0); + __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE)); + + // Check that the constructor is not constructing a JSFunction (see + // comments in Runtime_NewObject in runtime.cc). In which case the + // initial map's instance type would be JS_FUNCTION_TYPE. + // a1: constructor function + // a2: initial map + __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset)); + __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE)); + + if (!is_api_function) { + Label allocate; + MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset); + // Check if slack tracking is enabled. + __ lwu(a4, bit_field3); + __ DecodeField<Map::ConstructionCount>(a6, a4); + __ Branch(&allocate, + eq, + a6, + Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking))); + // Decrease generous allocation count. + __ Dsubu(a4, a4, Operand(1 << Map::ConstructionCount::kShift)); + __ Branch(USE_DELAY_SLOT, + &allocate, ne, a6, Operand(JSFunction::kFinishSlackTracking)); + __ sw(a4, bit_field3); // In delay slot. + + __ Push(a1, a2, a1); // a1 = Constructor. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + + __ Pop(a1, a2); + // Slack tracking counter is kNoSlackTracking after runtime call. + DCHECK(JSFunction::kNoSlackTracking == 0); + __ mov(a6, zero_reg); + + __ bind(&allocate); + } + + // Now allocate the JSObject on the heap. + // a1: constructor function + // a2: initial map + __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset)); + if (create_memento) { + __ Daddu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize)); + } + + __ Allocate(a3, t0, t1, t2, &rt_call, SIZE_IN_WORDS); + + // Allocated the JSObject, now initialize the fields. Map is set to + // initial map and properties and elements are set to empty fixed array. + // a1: constructor function + // a2: initial map + // a3: object size (not including memento if create_memento) + // t0: JSObject (not tagged) + __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex); + __ mov(t1, t0); + __ sd(a2, MemOperand(t1, JSObject::kMapOffset)); + __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset)); + __ sd(t2, MemOperand(t1, JSObject::kElementsOffset)); + __ Daddu(t1, t1, Operand(3*kPointerSize)); + DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset); + DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset); + + // Fill all the in-object properties with appropriate filler. + // a1: constructor function + // a2: initial map + // a3: object size (in words, including memento if create_memento) + // t0: JSObject (not tagged) + // t1: First in-object property of JSObject (not tagged) + // a6: slack tracking counter (non-API function case) + DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize); + + // Use t3 to hold undefined, which is used in several places below. + __ LoadRoot(t3, Heap::kUndefinedValueRootIndex); + + if (!is_api_function) { + Label no_inobject_slack_tracking; + + // Check if slack tracking is enabled. + __ Branch(&no_inobject_slack_tracking, + eq, + a6, + Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking))); + + // Allocate object with a slack. + __ lwu(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); + __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, + kBitsPerByte); + __ dsll(at, a0, kPointerSizeLog2); + __ daddu(a0, t1, at); + // a0: offset of first field after pre-allocated fields + if (FLAG_debug_code) { + __ dsll(at, a3, kPointerSizeLog2); + __ Daddu(t2, t0, Operand(at)); // End of object. + __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, + a0, Operand(t2)); + } + __ InitializeFieldsWithFiller(t1, a0, t3); + // To allow for truncation. + __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex); + // Fill the remaining fields with one pointer filler map. + + __ bind(&no_inobject_slack_tracking); + } + + if (create_memento) { + __ Dsubu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize)); + __ dsll(a0, a0, kPointerSizeLog2); + __ Daddu(a0, t0, Operand(a0)); // End of object. + __ InitializeFieldsWithFiller(t1, a0, t3); + + // Fill in memento fields. + // t1: points to the allocated but uninitialized memento. + __ LoadRoot(t3, Heap::kAllocationMementoMapRootIndex); + DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset); + __ sd(t3, MemOperand(t1)); + __ Daddu(t1, t1, kPointerSize); + // Load the AllocationSite. + __ ld(t3, MemOperand(sp, 2 * kPointerSize)); + DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset); + __ sd(t3, MemOperand(t1)); + __ Daddu(t1, t1, kPointerSize); + } else { + __ dsll(at, a3, kPointerSizeLog2); + __ Daddu(a0, t0, Operand(at)); // End of object. + __ InitializeFieldsWithFiller(t1, a0, t3); + } + + // Add the object tag to make the JSObject real, so that we can continue + // and jump into the continuation code at any time from now on. Any + // failures need to undo the allocation, so that the heap is in a + // consistent state and verifiable. + __ Daddu(t0, t0, Operand(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. Continue with + // allocated object if not fall through to runtime call if it is. + // a1: constructor function + // t0: JSObject + // t1: start of next object (not tagged) + __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset)); + // The field instance sizes contains both pre-allocated property fields + // and in-object properties. + __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); + __ Ext(t2, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, + kBitsPerByte); + __ Daddu(a3, a3, Operand(t2)); + __ Ext(t2, a0, Map::kInObjectPropertiesByte * kBitsPerByte, + kBitsPerByte); + __ dsubu(a3, a3, t2); + + // Done if no extra properties are to be allocated. + __ Branch(&allocated, eq, a3, Operand(zero_reg)); + __ Assert(greater_equal, kPropertyAllocationCountFailed, + a3, Operand(zero_reg)); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // a1: constructor + // a3: number of elements in properties array + // t0: JSObject + // t1: start of next object + __ Daddu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ Allocate( + a0, + t1, + t2, + a2, + &undo_allocation, + static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); + + // Initialize the FixedArray. + // a1: constructor + // a3: number of elements in properties array (untagged) + // t0: JSObject + // t1: start of next object + __ LoadRoot(t2, Heap::kFixedArrayMapRootIndex); + __ mov(a2, t1); + __ sd(t2, MemOperand(a2, JSObject::kMapOffset)); + // Tag number of elements. + __ dsll32(a0, a3, 0); + __ sd(a0, MemOperand(a2, FixedArray::kLengthOffset)); + __ Daddu(a2, a2, Operand(2 * kPointerSize)); + + DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset); + DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + + // Initialize the fields to undefined. + // a1: constructor + // a2: First element of FixedArray (not tagged) + // a3: number of elements in properties array + // t0: JSObject + // t1: FixedArray (not tagged) + __ dsll(a7, a3, kPointerSizeLog2); + __ daddu(t2, a2, a7); // End of object. + DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + { Label loop, entry; + if (!is_api_function || create_memento) { + __ LoadRoot(t3, Heap::kUndefinedValueRootIndex); + } else if (FLAG_debug_code) { + __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); + __ Assert(eq, kUndefinedValueNotLoaded, t3, Operand(a6)); + } + __ jmp(&entry); + __ bind(&loop); + __ sd(t3, MemOperand(a2)); + __ daddiu(a2, a2, kPointerSize); + __ bind(&entry); + __ Branch(&loop, less, a2, Operand(t2)); + } + + // Store the initialized FixedArray into the properties field of + // the JSObject. + // a1: constructor function + // t0: JSObject + // t1: FixedArray (not tagged) + __ Daddu(t1, t1, Operand(kHeapObjectTag)); // Add the heap tag. + __ sd(t1, FieldMemOperand(t0, JSObject::kPropertiesOffset)); + + // Continue with JSObject being successfully allocated. + // a1: constructor function + // a4: JSObject + __ jmp(&allocated); + + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // t0: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(t0, t1); + } + + // Allocate the new receiver object using the runtime call. + // a1: constructor function + __ bind(&rt_call); + if (create_memento) { + // Get the cell or allocation site. + __ ld(a2, MemOperand(sp, 2 * kPointerSize)); + __ push(a2); + } + + __ push(a1); // Argument for Runtime_NewObject. + if (create_memento) { + __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2); + } else { + __ CallRuntime(Runtime::kNewObject, 1); + } + __ mov(t0, v0); + + // If we ended up using the runtime, and we want a memento, then the + // runtime call made it for us, and we shouldn't do create count + // increment. + Label count_incremented; + if (create_memento) { + __ jmp(&count_incremented); + } + + // Receiver for constructor call allocated. + // t0: JSObject + __ bind(&allocated); + + if (create_memento) { + __ ld(a2, MemOperand(sp, kPointerSize * 2)); + __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); + __ Branch(&count_incremented, eq, a2, Operand(t1)); + // a2 is an AllocationSite. We are creating a memento from it, so we + // need to increment the memento create count. + __ ld(a3, FieldMemOperand(a2, + AllocationSite::kPretenureCreateCountOffset)); + __ Daddu(a3, a3, Operand(Smi::FromInt(1))); + __ sd(a3, FieldMemOperand(a2, + AllocationSite::kPretenureCreateCountOffset)); + __ bind(&count_incremented); + } + + __ Push(t0, t0); + + // Reload the number of arguments from the stack. + // sp[0]: receiver + // sp[1]: receiver + // sp[2]: constructor function + // sp[3]: number of arguments (smi-tagged) + __ ld(a1, MemOperand(sp, 2 * kPointerSize)); + __ ld(a3, MemOperand(sp, 3 * kPointerSize)); + + // Set up pointer to last argument. + __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); + + // Set up number of arguments for function call below. + __ SmiUntag(a0, a3); + + // Copy arguments and receiver to the expression stack. + // a0: number of arguments + // a1: constructor function + // a2: address of last argument (caller sp) + // a3: number of arguments (smi-tagged) + // sp[0]: receiver + // sp[1]: receiver + // sp[2]: constructor function + // sp[3]: number of arguments (smi-tagged) + Label loop, entry; + __ SmiUntag(a3); + __ jmp(&entry); + __ bind(&loop); + __ dsll(a4, a3, kPointerSizeLog2); + __ Daddu(a4, a2, Operand(a4)); + __ ld(a5, MemOperand(a4)); + __ push(a5); + __ bind(&entry); + __ Daddu(a3, a3, Operand(-1)); + __ Branch(&loop, greater_equal, a3, Operand(zero_reg)); + + // Call the function. + // a0: number of arguments + // a1: constructor function + if (is_api_function) { + __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + Handle<Code> code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + __ Call(code, RelocInfo::CODE_TARGET); + } else { + ParameterCount actual(a0); + __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); + } + + // Store offset of return address for deoptimizer. + if (!is_api_function) { + masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); + } + + // Restore context from the frame. + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + + // If the result is a smi, it is *not* an object in the ECMA sense. + // v0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ JumpIfSmi(v0, &use_receiver); + + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + __ GetObjectType(v0, a1, a3); + __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ ld(v0, MemOperand(sp)); + + // Remove receiver from the stack, remove caller arguments, and + // return. + __ bind(&exit); + // v0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ld(a1, MemOperand(sp, 2 * kPointerSize)); + + // Leave construct frame. + } + + __ SmiScale(a4, a1, kPointerSizeLog2); + __ Daddu(sp, sp, a4); + __ Daddu(sp, sp, kPointerSize); + __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2); + __ Ret(); +} + + +void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { + Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); +} + + +void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { + Generate_JSConstructStubHelper(masm, true, false); +} + + +static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, + bool is_construct) { + // Called from JSEntryStub::GenerateBody + + // ----------- S t a t e ------------- + // -- a0: code entry + // -- a1: function + // -- a2: receiver_pointer + // -- a3: argc + // -- s0: argv + // ----------------------------------- + ProfileEntryHookStub::MaybeCallEntryHook(masm); + // Clear the context before we push it when entering the JS frame. + __ mov(cp, zero_reg); + + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Set up the context from the function argument. + __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + // Push the function and the receiver onto the stack. + __ Push(a1, a2); + + // Copy arguments to the stack in a loop. + // a3: argc + // s0: argv, i.e. points to first arg + Label loop, entry; + // TODO(plind): At least on simulator, argc in a3 is an int32_t with junk + // in upper bits. Should fix the root cause, rather than use below + // workaround to clear upper bits. + __ dsll32(a3, a3, 0); // int32_t -> int64_t. + __ dsrl32(a3, a3, 0); + __ dsll(a4, a3, kPointerSizeLog2); + __ daddu(a6, s0, a4); + __ b(&entry); + __ nop(); // Branch delay slot nop. + // a6 points past last arg. + __ bind(&loop); + __ ld(a4, MemOperand(s0)); // Read next parameter. + __ daddiu(s0, s0, kPointerSize); + __ ld(a4, MemOperand(a4)); // Dereference handle. + __ push(a4); // Push parameter. + __ bind(&entry); + __ Branch(&loop, ne, s0, Operand(a6)); + + // Initialize all JavaScript callee-saved registers, since they will be seen + // by the garbage collector as part of handlers. + __ LoadRoot(a4, Heap::kUndefinedValueRootIndex); + __ mov(s1, a4); + __ mov(s2, a4); + __ mov(s3, a4); + __ mov(s4, a4); + __ mov(s5, a4); + // s6 holds the root address. Do not clobber. + // s7 is cp. Do not init. + + // Invoke the code and pass argc as a0. + __ mov(a0, a3); + if (is_construct) { + // No type feedback cell is available + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + __ CallStub(&stub); + } else { + ParameterCount actual(a0); + __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); + } + + // Leave internal frame. + } + __ Jump(ra); +} + + +void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, false); +} + + +void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, true); +} + + +void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) { + CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized); + GenerateTailCallToReturnedCode(masm); +} + + +static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) { + FrameScope scope(masm, StackFrame::INTERNAL); + // Push a copy of the function onto the stack. + // Push function as parameter to the runtime call. + __ Push(a1, a1); + // Whether to compile in a background thread. + __ Push(masm->isolate()->factory()->ToBoolean(concurrent)); + + __ CallRuntime(Runtime::kCompileOptimized, 2); + // Restore receiver. + __ Pop(a1); +} + + +void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { + CallCompileOptimized(masm, false); + GenerateTailCallToReturnedCode(masm); +} + + +void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { + CallCompileOptimized(masm, true); + GenerateTailCallToReturnedCode(masm); +} + + +static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { + // For now, we are relying on the fact that make_code_young doesn't do any + // garbage collection which allows us to save/restore the registers without + // worrying about which of them contain pointers. We also don't build an + // internal frame to make the code faster, since we shouldn't have to do stack + // crawls in MakeCodeYoung. This seems a bit fragile. + + // Set a0 to point to the head of the PlatformCodeAge sequence. + __ Dsubu(a0, a0, + Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize)); + + // The following registers must be saved and restored when calling through to + // the runtime: + // a0 - contains return address (beginning of patch sequence) + // a1 - isolate + RegList saved_regs = + (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); + FrameScope scope(masm, StackFrame::MANUAL); + __ MultiPush(saved_regs); + __ PrepareCallCFunction(2, 0, a2); + __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ CallCFunction( + ExternalReference::get_make_code_young_function(masm->isolate()), 2); + __ MultiPop(saved_regs); + __ Jump(a0); +} + +#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \ +void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} \ +void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} +CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) +#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR + + +void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { + // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact + // that make_code_young doesn't do any garbage collection which allows us to + // save/restore the registers without worrying about which of them contain + // pointers. + + // Set a0 to point to the head of the PlatformCodeAge sequence. + __ Dsubu(a0, a0, + Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize)); + + // The following registers must be saved and restored when calling through to + // the runtime: + // a0 - contains return address (beginning of patch sequence) + // a1 - isolate + RegList saved_regs = + (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); + FrameScope scope(masm, StackFrame::MANUAL); + __ MultiPush(saved_regs); + __ PrepareCallCFunction(2, 0, a2); + __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ CallCFunction( + ExternalReference::get_mark_code_as_executed_function(masm->isolate()), + 2); + __ MultiPop(saved_regs); + + // Perform prologue operations usually performed by the young code stub. + __ Push(ra, fp, cp, a1); + __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); + + // Jump to point after the code-age stub. + __ Daddu(a0, a0, Operand((kNoCodeAgeSequenceLength))); + __ Jump(a0); +} + + +void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { + GenerateMakeCodeYoungAgainCommon(masm); +} + + +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, + SaveFPRegsMode save_doubles) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve registers across notification, this is important for compiled + // stubs that tail call the runtime on deopts passing their parameters in + // registers. + __ MultiPush(kJSCallerSaved | kCalleeSaved); + // Pass the function and deoptimization type to the runtime system. + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); + __ MultiPop(kJSCallerSaved | kCalleeSaved); + } + + __ Daddu(sp, sp, Operand(kPointerSize)); // Ignore state + __ Jump(ra); // Jump to miss handler +} + + +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); +} + + +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); +} + + +static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, + Deoptimizer::BailoutType type) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Pass the function and deoptimization type to the runtime system. + __ li(a0, Operand(Smi::FromInt(static_cast<int>(type)))); + __ push(a0); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + } + + // Get the full codegen state from the stack and untag it -> a6. + __ ld(a6, MemOperand(sp, 0 * kPointerSize)); + __ SmiUntag(a6); + // Switch on the state. + Label with_tos_register, unknown_state; + __ Branch(&with_tos_register, + ne, a6, Operand(FullCodeGenerator::NO_REGISTERS)); + __ Ret(USE_DELAY_SLOT); + // Safe to fill delay slot Addu will emit one instruction. + __ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state. + + __ bind(&with_tos_register); + __ ld(v0, MemOperand(sp, 1 * kPointerSize)); + __ Branch(&unknown_state, ne, a6, Operand(FullCodeGenerator::TOS_REG)); + + __ Ret(USE_DELAY_SLOT); + // Safe to fill delay slot Addu will emit one instruction. + __ Daddu(sp, sp, Operand(2 * kPointerSize)); // Remove state. + + __ bind(&unknown_state); + __ stop("no cases left"); +} + + +void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER); +} + + +void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) { + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT); +} + + +void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); +} + + +void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { + // Lookup the function in the JavaScript frame. + __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Pass function as argument. + __ push(a0); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + } + + // If the code object is null, just return to the unoptimized code. + __ Ret(eq, v0, Operand(Smi::FromInt(0))); + + // Load deoptimization data from the code object. + // <deopt_data> = <code>[#deoptimization_data_offset] + __ Uld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); + + // Load the OSR entrypoint offset from the deoptimization data. + // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset] + __ ld(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( + DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag)); + __ SmiUntag(a1); + + // Compute the target address = code_obj + header_size + osr_offset + // <entry_addr> = <code_obj> + #header_size + <osr_offset> + __ daddu(v0, v0, a1); + __ daddiu(ra, v0, Code::kHeaderSize - kHeapObjectTag); + + // And "return" to the OSR entry point of the function. + __ Ret(); +} + + +void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { + // We check the stack limit as indicator that recompilation might be done. + Label ok; + __ LoadRoot(at, Heap::kStackLimitRootIndex); + __ Branch(&ok, hs, sp, Operand(at)); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kStackGuard, 0); + } + __ Jump(masm->isolate()->builtins()->OnStackReplacement(), + RelocInfo::CODE_TARGET); + + __ bind(&ok); + __ Ret(); +} + + +void Builtins::Generate_FunctionCall(MacroAssembler* masm) { + // 1. Make sure we have at least one argument. + // a0: actual number of arguments + { Label done; + __ Branch(&done, ne, a0, Operand(zero_reg)); + __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); + __ push(a6); + __ Daddu(a0, a0, Operand(1)); + __ bind(&done); + } + + // 2. Get the function to call (passed as receiver) from the stack, check + // if it is a function. + // a0: actual number of arguments + Label slow, non_function; + __ dsll(at, a0, kPointerSizeLog2); + __ daddu(at, sp, at); + __ ld(a1, MemOperand(at)); + __ JumpIfSmi(a1, &non_function); + __ GetObjectType(a1, a2, a2); + __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE)); + + // 3a. Patch the first argument if necessary when calling a function. + // a0: actual number of arguments + // a1: function + Label shift_arguments; + __ li(a4, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION. + { Label convert_to_object, use_global_proxy, patch_receiver; + // Change context eagerly in case we need the global receiver. + __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + // Do not transform the receiver for strict mode functions. + __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset)); + __ And(a7, a3, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); + __ Branch(&shift_arguments, ne, a7, Operand(zero_reg)); + + // Do not transform the receiver for native (Compilerhints already in a3). + __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset)); + __ And(a7, a3, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte)); + __ Branch(&shift_arguments, ne, a7, Operand(zero_reg)); + + // Compute the receiver in sloppy mode. + // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2). + __ dsll(at, a0, kPointerSizeLog2); + __ daddu(a2, sp, at); + __ ld(a2, MemOperand(a2, -kPointerSize)); + // a0: actual number of arguments + // a1: function + // a2: first argument + __ JumpIfSmi(a2, &convert_to_object, a6); + + __ LoadRoot(a3, Heap::kUndefinedValueRootIndex); + __ Branch(&use_global_proxy, eq, a2, Operand(a3)); + __ LoadRoot(a3, Heap::kNullValueRootIndex); + __ Branch(&use_global_proxy, eq, a2, Operand(a3)); + + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ GetObjectType(a2, a3, a3); + __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); + + __ bind(&convert_to_object); + // Enter an internal frame in order to preserve argument count. + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ SmiTag(a0); + __ Push(a0, a2); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(a2, v0); + + __ pop(a0); + __ SmiUntag(a0); + // Leave internal frame. + } + // Restore the function to a1, and the flag to a4. + __ dsll(at, a0, kPointerSizeLog2); + __ daddu(at, sp, at); + __ ld(a1, MemOperand(at)); + __ Branch(USE_DELAY_SLOT, &patch_receiver); + __ li(a4, Operand(0, RelocInfo::NONE32)); + + __ bind(&use_global_proxy); + __ ld(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); + __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); + + __ bind(&patch_receiver); + __ dsll(at, a0, kPointerSizeLog2); + __ daddu(a3, sp, at); + __ sd(a2, MemOperand(a3, -kPointerSize)); + + __ Branch(&shift_arguments); + } + + // 3b. Check for function proxy. + __ bind(&slow); + __ li(a4, Operand(1, RelocInfo::NONE32)); // Indicate function proxy. + __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE)); + + __ bind(&non_function); + __ li(a4, Operand(2, RelocInfo::NONE32)); // Indicate non-function. + + // 3c. Patch the first argument when calling a non-function. The + // CALL_NON_FUNCTION builtin expects the non-function callee as + // receiver, so overwrite the first argument which will ultimately + // become the receiver. + // a0: actual number of arguments + // a1: function + // a4: call type (0: JS function, 1: function proxy, 2: non-function) + __ dsll(at, a0, kPointerSizeLog2); + __ daddu(a2, sp, at); + __ sd(a1, MemOperand(a2, -kPointerSize)); + + // 4. Shift arguments and return address one slot down on the stack + // (overwriting the original receiver). Adjust argument count to make + // the original first argument the new receiver. + // a0: actual number of arguments + // a1: function + // a4: call type (0: JS function, 1: function proxy, 2: non-function) + __ bind(&shift_arguments); + { Label loop; + // Calculate the copy start address (destination). Copy end address is sp. + __ dsll(at, a0, kPointerSizeLog2); + __ daddu(a2, sp, at); + + __ bind(&loop); + __ ld(at, MemOperand(a2, -kPointerSize)); + __ sd(at, MemOperand(a2)); + __ Dsubu(a2, a2, Operand(kPointerSize)); + __ Branch(&loop, ne, a2, Operand(sp)); + // Adjust the actual number of arguments and remove the top element + // (which is a copy of the last argument). + __ Dsubu(a0, a0, Operand(1)); + __ Pop(); + } + + // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, + // or a function proxy via CALL_FUNCTION_PROXY. + // a0: actual number of arguments + // a1: function + // a4: call type (0: JS function, 1: function proxy, 2: non-function) + { Label function, non_proxy; + __ Branch(&function, eq, a4, Operand(zero_reg)); + // Expected number of arguments is 0 for CALL_NON_FUNCTION. + __ mov(a2, zero_reg); + __ Branch(&non_proxy, ne, a4, Operand(1)); + + __ push(a1); // Re-add proxy object as additional argument. + __ Daddu(a0, a0, Operand(1)); + __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + + __ bind(&non_proxy); + __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + __ bind(&function); + } + + // 5b. Get the code to call from the function and check that the number of + // expected arguments matches what we're providing. If so, jump + // (tail-call) to the code in register edx without checking arguments. + // a0: actual number of arguments + // a1: function + __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + // The argument count is stored as int32_t on 64-bit platforms. + // TODO(plind): Smi on 32-bit platforms. + __ lw(a2, + FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); + // Check formal and actual parameter counts. + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET, ne, a2, Operand(a0)); + + __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + ParameterCount expected(0); + __ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper()); +} + + +void Builtins::Generate_FunctionApply(MacroAssembler* masm) { + const int kIndexOffset = + StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize); + const int kLimitOffset = + StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize); + const int kArgsOffset = 2 * kPointerSize; + const int kRecvOffset = 3 * kPointerSize; + const int kFunctionOffset = 4 * kPointerSize; + + { + FrameScope frame_scope(masm, StackFrame::INTERNAL); + __ ld(a0, MemOperand(fp, kFunctionOffset)); // Get the function. + __ push(a0); + __ ld(a0, MemOperand(fp, kArgsOffset)); // Get the args array. + __ push(a0); + // Returns (in v0) number of arguments to copy to stack as Smi. + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); + // Make a2 the space we have left. The stack might already be overflowed + // here which will cause a2 to become negative. + __ dsubu(a2, sp, a2); + // Check if the arguments will overflow the stack. + __ SmiScale(a7, v0, kPointerSizeLog2); + __ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison. + + // Out of stack space. + __ ld(a1, MemOperand(fp, kFunctionOffset)); + __ Push(a1, v0); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + // End of stack check. + + // Push current limit and index. + __ bind(&okay); + __ mov(a1, zero_reg); + __ Push(v0, a1); // Limit and initial index. + + // Get the receiver. + __ ld(a0, MemOperand(fp, kRecvOffset)); + + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ ld(a1, MemOperand(fp, kFunctionOffset)); + __ GetObjectType(a1, a2, a2); + __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE)); + + // Change context eagerly to get the right global object if necessary. + __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + // Load the shared function info while the function is still in a1. + __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + + // Compute the receiver. + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_proxy; + __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset)); + __ And(a7, a7, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); + __ Branch(&push_receiver, ne, a7, Operand(zero_reg)); + + // Do not transform the receiver for native (Compilerhints already in a2). + __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset)); + __ And(a7, a7, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte)); + __ Branch(&push_receiver, ne, a7, Operand(zero_reg)); + + // Compute the receiver in sloppy mode. + __ JumpIfSmi(a0, &call_to_object); + __ LoadRoot(a1, Heap::kNullValueRootIndex); + __ Branch(&use_global_proxy, eq, a0, Operand(a1)); + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ Branch(&use_global_proxy, eq, a0, Operand(a2)); + + // Check if the receiver is already a JavaScript object. + // a0: receiver + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ GetObjectType(a0, a1, a1); + __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); + + // Convert the receiver to a regular object. + // a0: receiver + __ bind(&call_to_object); + __ push(a0); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver. + __ Branch(&push_receiver); + + __ bind(&use_global_proxy); + __ ld(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); + __ ld(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset)); + + // Push the receiver. + // a0: receiver + __ bind(&push_receiver); + __ push(a0); + + // Copy all arguments from the array to the stack. + Label entry, loop; + __ ld(a0, MemOperand(fp, kIndexOffset)); + __ Branch(&entry); + + // Load the current argument from the arguments array and push it to the + // stack. + // a0: current argument index + __ bind(&loop); + __ ld(a1, MemOperand(fp, kArgsOffset)); + __ Push(a1, a0); + + // Call the runtime to access the property in the arguments array. + __ CallRuntime(Runtime::kGetProperty, 2); + __ push(v0); + + // Use inline caching to access the arguments. + __ ld(a0, MemOperand(fp, kIndexOffset)); + __ Daddu(a0, a0, Operand(Smi::FromInt(1))); + __ sd(a0, MemOperand(fp, kIndexOffset)); + + // Test if the copy loop has finished copying all the elements from the + // arguments object. + __ bind(&entry); + __ ld(a1, MemOperand(fp, kLimitOffset)); + __ Branch(&loop, ne, a0, Operand(a1)); + + // Call the function. + Label call_proxy; + ParameterCount actual(a0); + __ SmiUntag(a0); + __ ld(a1, MemOperand(fp, kFunctionOffset)); + __ GetObjectType(a1, a2, a2); + __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE)); + + __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); + + frame_scope.GenerateLeaveFrame(); + __ Ret(USE_DELAY_SLOT); + __ Daddu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. + + // Call the function proxy. + __ bind(&call_proxy); + __ push(a1); // Add function proxy as last argument. + __ Daddu(a0, a0, Operand(1)); + __ li(a2, Operand(0, RelocInfo::NONE32)); + __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); + __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + // Tear down the internal frame and remove function, receiver and args. + } + + __ Ret(USE_DELAY_SLOT); + __ Daddu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. +} + + +static void ArgumentAdaptorStackCheck(MacroAssembler* masm, + Label* stack_overflow) { + // ----------- S t a t e ------------- + // -- a0 : actual number of arguments + // -- a1 : function (passed through to callee) + // -- a2 : expected number of arguments + // ----------------------------------- + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + __ LoadRoot(a5, Heap::kRealStackLimitRootIndex); + // Make a5 the space we have left. The stack might already be overflowed + // here which will cause a5 to become negative. + __ dsubu(a5, sp, a5); + // Check if the arguments will overflow the stack. + __ dsll(at, a2, kPointerSizeLog2); + // Signed comparison. + __ Branch(stack_overflow, le, a5, Operand(at)); +} + + +static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { + // __ sll(a0, a0, kSmiTagSize); + __ dsll32(a0, a0, 0); + __ li(a4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit()); + __ Daddu(fp, sp, + Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); +} + + +static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- v0 : result being passed through + // ----------------------------------- + // Get the number of arguments passed (as a smi), tear down the frame and + // then tear down the parameters. + __ ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + + kPointerSize))); + __ mov(sp, fp); + __ MultiPop(fp.bit() | ra.bit()); + __ SmiScale(a4, a1, kPointerSizeLog2); + __ Daddu(sp, sp, a4); + // Adjust for the receiver. + __ Daddu(sp, sp, Operand(kPointerSize)); +} + + +void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { + // State setup as expected by MacroAssembler::InvokePrologue. + // ----------- S t a t e ------------- + // -- a0: actual arguments count + // -- a1: function (passed through to callee) + // -- a2: expected arguments count + // ----------------------------------- + + Label stack_overflow; + ArgumentAdaptorStackCheck(masm, &stack_overflow); + Label invoke, dont_adapt_arguments; + + Label enough, too_few; + __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + __ Branch(&dont_adapt_arguments, eq, + a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel)); + // We use Uless as the number of argument should always be greater than 0. + __ Branch(&too_few, Uless, a0, Operand(a2)); + + { // Enough parameters: actual >= expected. + // a0: actual number of arguments as a smi + // a1: function + // a2: expected number of arguments + // a3: code entry to call + __ bind(&enough); + EnterArgumentsAdaptorFrame(masm); + + // Calculate copy start address into a0 and copy end address into a2. + __ SmiScale(a0, a0, kPointerSizeLog2); + __ Daddu(a0, fp, a0); + // Adjust for return address and receiver. + __ Daddu(a0, a0, Operand(2 * kPointerSize)); + // Compute copy end address. + __ dsll(a2, a2, kPointerSizeLog2); + __ dsubu(a2, a0, a2); + + // Copy the arguments (including the receiver) to the new stack frame. + // a0: copy start address + // a1: function + // a2: copy end address + // a3: code entry to call + + Label copy; + __ bind(©); + __ ld(a4, MemOperand(a0)); + __ push(a4); + __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a2)); + __ daddiu(a0, a0, -kPointerSize); // In delay slot. + + __ jmp(&invoke); + } + + { // Too few parameters: Actual < expected. + __ bind(&too_few); + EnterArgumentsAdaptorFrame(masm); + + // Calculate copy start address into a0 and copy end address is fp. + // a0: actual number of arguments as a smi + // a1: function + // a2: expected number of arguments + // a3: code entry to call + __ SmiScale(a0, a0, kPointerSizeLog2); + __ Daddu(a0, fp, a0); + // Adjust for return address and receiver. + __ Daddu(a0, a0, Operand(2 * kPointerSize)); + // Compute copy end address. Also adjust for return address. + __ Daddu(a7, fp, kPointerSize); + + // Copy the arguments (including the receiver) to the new stack frame. + // a0: copy start address + // a1: function + // a2: expected number of arguments + // a3: code entry to call + // a7: copy end address + Label copy; + __ bind(©); + __ ld(a4, MemOperand(a0)); // Adjusted above for return addr and receiver. + __ Dsubu(sp, sp, kPointerSize); + __ Dsubu(a0, a0, kPointerSize); + __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a7)); + __ sd(a4, MemOperand(sp)); // In the delay slot. + + // Fill the remaining expected arguments with undefined. + // a1: function + // a2: expected number of arguments + // a3: code entry to call + __ LoadRoot(a4, Heap::kUndefinedValueRootIndex); + __ dsll(a6, a2, kPointerSizeLog2); + __ Dsubu(a2, fp, Operand(a6)); + // Adjust for frame. + __ Dsubu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + + 2 * kPointerSize)); + + Label fill; + __ bind(&fill); + __ Dsubu(sp, sp, kPointerSize); + __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2)); + __ sd(a4, MemOperand(sp)); + } + + // Call the entry point. + __ bind(&invoke); + + __ Call(a3); + + // Store offset of return address for deoptimizer. + masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); + + // Exit frame and return. + LeaveArgumentsAdaptorFrame(masm); + __ Ret(); + + + // ------------------------------------------- + // Don't adapt arguments. + // ------------------------------------------- + __ bind(&dont_adapt_arguments); + __ Jump(a3); + + __ bind(&stack_overflow); + { + FrameScope frame(masm, StackFrame::MANUAL); + EnterArgumentsAdaptorFrame(masm); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ break_(0xCC); + } +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/codegen-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/codegen-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/codegen-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/codegen-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1142 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/codegen.h" +#include "src/macro-assembler.h" +#include "src/mips64/simulator-mips64.h" + +namespace v8 { +namespace internal { + + +#define __ masm. + + +#if defined(USE_SIMULATOR) +byte* fast_exp_mips_machine_code = NULL; +double fast_exp_simulator(double x) { + return Simulator::current(Isolate::Current())->CallFP( + fast_exp_mips_machine_code, x, 0); +} +#endif + + +UnaryMathFunction CreateExpFunction() { + if (!FLAG_fast_math) return &std::exp; + size_t actual_size; + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); + if (buffer == NULL) return &std::exp; + ExternalReference::InitializeMathExpData(); + + MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + + { + DoubleRegister input = f12; + DoubleRegister result = f0; + DoubleRegister double_scratch1 = f4; + DoubleRegister double_scratch2 = f6; + Register temp1 = a4; + Register temp2 = a5; + Register temp3 = a6; + + if (!IsMipsSoftFloatABI) { + // Input value is in f12 anyway, nothing to do. + } else { + __ Move(input, a0, a1); + } + __ Push(temp3, temp2, temp1); + MathExpGenerator::EmitMathExp( + &masm, input, result, double_scratch1, double_scratch2, + temp1, temp2, temp3); + __ Pop(temp3, temp2, temp1); + if (!IsMipsSoftFloatABI) { + // Result is already in f0, nothing to do. + } else { + __ Move(v0, v1, result); + } + __ Ret(); + } + + CodeDesc desc; + masm.GetCode(&desc); + DCHECK(!RelocInfo::RequiresRelocation(desc)); + + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); + +#if !defined(USE_SIMULATOR) + return FUNCTION_CAST<UnaryMathFunction>(buffer); +#else + fast_exp_mips_machine_code = buffer; + return &fast_exp_simulator; +#endif +} + + +#if defined(V8_HOST_ARCH_MIPS) +MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) { +#if defined(USE_SIMULATOR) + return stub; +#else + + size_t actual_size; + byte* buffer = + static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true)); + if (buffer == NULL) return stub; + + // This code assumes that cache lines are 32 bytes and if the cache line is + // larger it will not work correctly. + MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + + { + Label lastb, unaligned, aligned, chkw, + loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop, + leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw, + ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop; + + // The size of each prefetch. + uint32_t pref_chunk = 32; + // The maximum size of a prefetch, it must not be less then pref_chunk. + // If the real size of a prefetch is greater then max_pref_size and + // the kPrefHintPrepareForStore hint is used, the code will not work + // correctly. + uint32_t max_pref_size = 128; + DCHECK(pref_chunk < max_pref_size); + + // pref_limit is set based on the fact that we never use an offset + // greater then 5 on a store pref and that a single pref can + // never be larger then max_pref_size. + uint32_t pref_limit = (5 * pref_chunk) + max_pref_size; + int32_t pref_hint_load = kPrefHintLoadStreamed; + int32_t pref_hint_store = kPrefHintPrepareForStore; + uint32_t loadstore_chunk = 4; + + // The initial prefetches may fetch bytes that are before the buffer being + // copied. Start copies with an offset of 4 so avoid this situation when + // using kPrefHintPrepareForStore. + DCHECK(pref_hint_store != kPrefHintPrepareForStore || + pref_chunk * 4 >= max_pref_size); + // If the size is less than 8, go to lastb. Regardless of size, + // copy dst pointer to v0 for the retuen value. + __ slti(a6, a2, 2 * loadstore_chunk); + __ bne(a6, zero_reg, &lastb); + __ mov(v0, a0); // In delay slot. + + // If src and dst have different alignments, go to unaligned, if they + // have the same alignment (but are not actually aligned) do a partial + // load/store to make them aligned. If they are both already aligned + // we can start copying at aligned. + __ xor_(t8, a1, a0); + __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement. + __ bne(t8, zero_reg, &unaligned); + __ subu(a3, zero_reg, a0); // In delay slot. + + __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. + __ beq(a3, zero_reg, &aligned); // Already aligned. + __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count. + + __ lwr(t8, MemOperand(a1)); + __ addu(a1, a1, a3); + __ swr(t8, MemOperand(a0)); + __ addu(a0, a0, a3); + + // Now dst/src are both aligned to (word) aligned addresses. Set a2 to + // count how many bytes we have to copy after all the 64 byte chunks are + // copied and a3 to the dst pointer after all the 64 byte chunks have been + // copied. We will loop, incrementing a0 and a1 until a0 equals a3. + __ bind(&aligned); + __ andi(t8, a2, 0x3f); + __ beq(a2, t8, &chkw); // Less than 64? + __ subu(a3, a2, t8); // In delay slot. + __ addu(a3, a0, a3); // Now a3 is the final dst after loop. + + // When in the loop we prefetch with kPrefHintPrepareForStore hint, + // in this case the a0+x should be past the "a4-32" address. This means: + // for x=128 the last "safe" a0 address is "a4-160". Alternatively, for + // x=64 the last "safe" a0 address is "a4-96". In the current version we + // will use "pref hint, 128(a0)", so "a4-160" is the limit. + if (pref_hint_store == kPrefHintPrepareForStore) { + __ addu(a4, a0, a2); // a4 is the "past the end" address. + __ Subu(t9, a4, pref_limit); // t9 is the "last safe pref" address. + } + + __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); + + if (pref_hint_store != kPrefHintPrepareForStore) { + __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); + } + __ bind(&loop16w); + __ lw(a4, MemOperand(a1)); + + if (pref_hint_store == kPrefHintPrepareForStore) { + __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch. + __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg)); + } + __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot. + + __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); + + __ bind(&skip_pref); + __ lw(a6, MemOperand(a1, 2, loadstore_chunk)); + __ lw(a7, MemOperand(a1, 3, loadstore_chunk)); + __ lw(t0, MemOperand(a1, 4, loadstore_chunk)); + __ lw(t1, MemOperand(a1, 5, loadstore_chunk)); + __ lw(t2, MemOperand(a1, 6, loadstore_chunk)); + __ lw(t3, MemOperand(a1, 7, loadstore_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); + + __ sw(a4, MemOperand(a0)); + __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); + __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); + __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); + __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); + __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); + + __ lw(a4, MemOperand(a1, 8, loadstore_chunk)); + __ lw(a5, MemOperand(a1, 9, loadstore_chunk)); + __ lw(a6, MemOperand(a1, 10, loadstore_chunk)); + __ lw(a7, MemOperand(a1, 11, loadstore_chunk)); + __ lw(t0, MemOperand(a1, 12, loadstore_chunk)); + __ lw(t1, MemOperand(a1, 13, loadstore_chunk)); + __ lw(t2, MemOperand(a1, 14, loadstore_chunk)); + __ lw(t3, MemOperand(a1, 15, loadstore_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); + + __ sw(a4, MemOperand(a0, 8, loadstore_chunk)); + __ sw(a5, MemOperand(a0, 9, loadstore_chunk)); + __ sw(a6, MemOperand(a0, 10, loadstore_chunk)); + __ sw(a7, MemOperand(a0, 11, loadstore_chunk)); + __ sw(t0, MemOperand(a0, 12, loadstore_chunk)); + __ sw(t1, MemOperand(a0, 13, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 14, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 15, loadstore_chunk)); + __ addiu(a0, a0, 16 * loadstore_chunk); + __ bne(a0, a3, &loop16w); + __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. + __ mov(a2, t8); + + // Here we have src and dest word-aligned but less than 64-bytes to go. + // Check for a 32 bytes chunk and copy if there is one. Otherwise jump + // down to chk1w to handle the tail end of the copy. + __ bind(&chkw); + __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); + __ andi(t8, a2, 0x1f); + __ beq(a2, t8, &chk1w); // Less than 32? + __ nop(); // In delay slot. + __ lw(a4, MemOperand(a1)); + __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); + __ lw(a6, MemOperand(a1, 2, loadstore_chunk)); + __ lw(a7, MemOperand(a1, 3, loadstore_chunk)); + __ lw(t0, MemOperand(a1, 4, loadstore_chunk)); + __ lw(t1, MemOperand(a1, 5, loadstore_chunk)); + __ lw(t2, MemOperand(a1, 6, loadstore_chunk)); + __ lw(t3, MemOperand(a1, 7, loadstore_chunk)); + __ addiu(a1, a1, 8 * loadstore_chunk); + __ sw(a4, MemOperand(a0)); + __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); + __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); + __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); + __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); + __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); + __ addiu(a0, a0, 8 * loadstore_chunk); + + // Here we have less than 32 bytes to copy. Set up for a loop to copy + // one word at a time. Set a2 to count how many bytes we have to copy + // after all the word chunks are copied and a3 to the dst pointer after + // all the word chunks have been copied. We will loop, incrementing a0 + // and a1 untill a0 equals a3. + __ bind(&chk1w); + __ andi(a2, t8, loadstore_chunk - 1); + __ beq(a2, t8, &lastb); + __ subu(a3, t8, a2); // In delay slot. + __ addu(a3, a0, a3); + + __ bind(&wordCopy_loop); + __ lw(a7, MemOperand(a1)); + __ addiu(a0, a0, loadstore_chunk); + __ addiu(a1, a1, loadstore_chunk); + __ bne(a0, a3, &wordCopy_loop); + __ sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. + + __ bind(&lastb); + __ Branch(&leave, le, a2, Operand(zero_reg)); + __ addu(a3, a0, a2); + + __ bind(&lastbloop); + __ lb(v1, MemOperand(a1)); + __ addiu(a0, a0, 1); + __ addiu(a1, a1, 1); + __ bne(a0, a3, &lastbloop); + __ sb(v1, MemOperand(a0, -1)); // In delay slot. + + __ bind(&leave); + __ jr(ra); + __ nop(); + + // Unaligned case. Only the dst gets aligned so we need to do partial + // loads of the source followed by normal stores to the dst (once we + // have aligned the destination). + __ bind(&unaligned); + __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. + __ beq(a3, zero_reg, &ua_chk16w); + __ subu(a2, a2, a3); // In delay slot. + + __ lwr(v1, MemOperand(a1)); + __ lwl(v1, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ addu(a1, a1, a3); + __ swr(v1, MemOperand(a0)); + __ addu(a0, a0, a3); + + // Now the dst (but not the source) is aligned. Set a2 to count how many + // bytes we have to copy after all the 64 byte chunks are copied and a3 to + // the dst pointer after all the 64 byte chunks have been copied. We will + // loop, incrementing a0 and a1 until a0 equals a3. + __ bind(&ua_chk16w); + __ andi(t8, a2, 0x3f); + __ beq(a2, t8, &ua_chkw); + __ subu(a3, a2, t8); // In delay slot. + __ addu(a3, a0, a3); + + if (pref_hint_store == kPrefHintPrepareForStore) { + __ addu(a4, a0, a2); + __ Subu(t9, a4, pref_limit); + } + + __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); + + if (pref_hint_store != kPrefHintPrepareForStore) { + __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); + } + + __ bind(&ua_loop16w); + __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); + __ lwr(a4, MemOperand(a1)); + __ lwr(a5, MemOperand(a1, 1, loadstore_chunk)); + __ lwr(a6, MemOperand(a1, 2, loadstore_chunk)); + + if (pref_hint_store == kPrefHintPrepareForStore) { + __ sltu(v1, t9, a0); + __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); + } + __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. + + __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); + + __ bind(&ua_skip_pref); + __ lwr(t0, MemOperand(a1, 4, loadstore_chunk)); + __ lwr(t1, MemOperand(a1, 5, loadstore_chunk)); + __ lwr(t2, MemOperand(a1, 6, loadstore_chunk)); + __ lwr(t3, MemOperand(a1, 7, loadstore_chunk)); + __ lwl(a4, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(a5, + MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(a6, + MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(a7, + MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t0, + MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t1, + MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t2, + MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t3, + MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); + __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); + __ sw(a4, MemOperand(a0)); + __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); + __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); + __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); + __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); + __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); + __ lwr(a4, MemOperand(a1, 8, loadstore_chunk)); + __ lwr(a5, MemOperand(a1, 9, loadstore_chunk)); + __ lwr(a6, MemOperand(a1, 10, loadstore_chunk)); + __ lwr(a7, MemOperand(a1, 11, loadstore_chunk)); + __ lwr(t0, MemOperand(a1, 12, loadstore_chunk)); + __ lwr(t1, MemOperand(a1, 13, loadstore_chunk)); + __ lwr(t2, MemOperand(a1, 14, loadstore_chunk)); + __ lwr(t3, MemOperand(a1, 15, loadstore_chunk)); + __ lwl(a4, + MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(a5, + MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(a6, + MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(a7, + MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t0, + MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t1, + MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t2, + MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t3, + MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); + __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); + __ sw(a4, MemOperand(a0, 8, loadstore_chunk)); + __ sw(a5, MemOperand(a0, 9, loadstore_chunk)); + __ sw(a6, MemOperand(a0, 10, loadstore_chunk)); + __ sw(a7, MemOperand(a0, 11, loadstore_chunk)); + __ sw(t0, MemOperand(a0, 12, loadstore_chunk)); + __ sw(t1, MemOperand(a0, 13, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 14, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 15, loadstore_chunk)); + __ addiu(a0, a0, 16 * loadstore_chunk); + __ bne(a0, a3, &ua_loop16w); + __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. + __ mov(a2, t8); + + // Here less than 64-bytes. Check for + // a 32 byte chunk and copy if there is one. Otherwise jump down to + // ua_chk1w to handle the tail end of the copy. + __ bind(&ua_chkw); + __ Pref(pref_hint_load, MemOperand(a1)); + __ andi(t8, a2, 0x1f); + + __ beq(a2, t8, &ua_chk1w); + __ nop(); // In delay slot. + __ lwr(a4, MemOperand(a1)); + __ lwr(a5, MemOperand(a1, 1, loadstore_chunk)); + __ lwr(a6, MemOperand(a1, 2, loadstore_chunk)); + __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); + __ lwr(t0, MemOperand(a1, 4, loadstore_chunk)); + __ lwr(t1, MemOperand(a1, 5, loadstore_chunk)); + __ lwr(t2, MemOperand(a1, 6, loadstore_chunk)); + __ lwr(t3, MemOperand(a1, 7, loadstore_chunk)); + __ lwl(a4, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(a5, + MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(a6, + MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(a7, + MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t0, + MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t1, + MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t2, + MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t3, + MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); + __ addiu(a1, a1, 8 * loadstore_chunk); + __ sw(a4, MemOperand(a0)); + __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); + __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); + __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); + __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); + __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); + __ addiu(a0, a0, 8 * loadstore_chunk); + + // Less than 32 bytes to copy. Set up for a loop to + // copy one word at a time. + __ bind(&ua_chk1w); + __ andi(a2, t8, loadstore_chunk - 1); + __ beq(a2, t8, &ua_smallCopy); + __ subu(a3, t8, a2); // In delay slot. + __ addu(a3, a0, a3); + + __ bind(&ua_wordCopy_loop); + __ lwr(v1, MemOperand(a1)); + __ lwl(v1, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ addiu(a0, a0, loadstore_chunk); + __ addiu(a1, a1, loadstore_chunk); + __ bne(a0, a3, &ua_wordCopy_loop); + __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. + + // Copy the last 8 bytes. + __ bind(&ua_smallCopy); + __ beq(a2, zero_reg, &leave); + __ addu(a3, a0, a2); // In delay slot. + + __ bind(&ua_smallCopy_loop); + __ lb(v1, MemOperand(a1)); + __ addiu(a0, a0, 1); + __ addiu(a1, a1, 1); + __ bne(a0, a3, &ua_smallCopy_loop); + __ sb(v1, MemOperand(a0, -1)); // In delay slot. + + __ jr(ra); + __ nop(); + } + CodeDesc desc; + masm.GetCode(&desc); + DCHECK(!RelocInfo::RequiresRelocation(desc)); + + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); + return FUNCTION_CAST<MemCopyUint8Function>(buffer); +#endif +} +#endif + +UnaryMathFunction CreateSqrtFunction() { +#if defined(USE_SIMULATOR) + return &std::sqrt; +#else + size_t actual_size; + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); + if (buffer == NULL) return &std::sqrt; + + MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + + __ MovFromFloatParameter(f12); + __ sqrt_d(f0, f12); + __ MovToFloatResult(f0); + __ Ret(); + + CodeDesc desc; + masm.GetCode(&desc); + DCHECK(!RelocInfo::RequiresRelocation(desc)); + + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); + return FUNCTION_CAST<UnaryMathFunction>(buffer); +#endif +} + +#undef __ + + +// ------------------------------------------------------------------------- +// Platform-specific RuntimeCallHelper functions. + +void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { + masm->EnterFrame(StackFrame::INTERNAL); + DCHECK(!masm->has_frame()); + masm->set_has_frame(true); +} + + +void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { + masm->LeaveFrame(StackFrame::INTERNAL); + DCHECK(masm->has_frame()); + masm->set_has_frame(false); +} + + +// ------------------------------------------------------------------------- +// Code generators + +#define __ ACCESS_MASM(masm) + +void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* allocation_memento_found) { + Register scratch_elements = a4; + DCHECK(!AreAliased(receiver, key, value, target_map, + scratch_elements)); + + if (mode == TRACK_ALLOCATION_SITE) { + __ JumpIfJSArrayHasAllocationMemento( + receiver, scratch_elements, allocation_memento_found); + } + + // Set transitioned map. + __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, + HeapObject::kMapOffset, + target_map, + t1, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); +} + + +void ElementsTransitionGenerator::GenerateSmiToDouble( + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Register ra contains the return address. + Label loop, entry, convert_hole, gc_required, only_change_map, done; + Register elements = a4; + Register length = a5; + Register array = a6; + Register array_end = array; + + // target_map parameter can be clobbered. + Register scratch1 = target_map; + Register scratch2 = t1; + Register scratch3 = a7; + + // Verify input registers don't conflict with locals. + DCHECK(!AreAliased(receiver, key, value, target_map, + elements, length, array, scratch2)); + + Register scratch = t2; + if (mode == TRACK_ALLOCATION_SITE) { + __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); + } + + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. + __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); + __ Branch(&only_change_map, eq, at, Operand(elements)); + + __ push(ra); + __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); + // elements: source FixedArray + // length: number of elements (smi-tagged) + + // Allocate new FixedDoubleArray. + __ SmiScale(scratch, length, kDoubleSizeLog2); + __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize); + __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT); + // array: destination FixedDoubleArray, not tagged as heap object + + // Set destination FixedDoubleArray's length and map. + __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex); + __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); + // Update receiver's map. + __ sd(scratch2, MemOperand(array, HeapObject::kMapOffset)); + + __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, + HeapObject::kMapOffset, + target_map, + scratch2, + kRAHasBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + // Replace receiver's backing store with newly created FixedDoubleArray. + __ Daddu(scratch1, array, Operand(kHeapObjectTag)); + __ sd(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset)); + __ RecordWriteField(receiver, + JSObject::kElementsOffset, + scratch1, + scratch2, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + + + // Prepare for conversion loop. + __ Daddu(scratch1, elements, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize)); + __ SmiScale(array_end, length, kDoubleSizeLog2); + __ Daddu(array_end, array_end, scratch3); + + // Repurpose registers no longer in use. + Register hole_lower = elements; + Register hole_upper = length; + __ li(hole_lower, Operand(kHoleNanLower32)); + // scratch1: begin of source FixedArray element fields, not tagged + // hole_lower: kHoleNanLower32 + // hole_upper: kHoleNanUpper32 + // array_end: end of destination FixedDoubleArray, not tagged + // scratch3: begin of FixedDoubleArray element fields, not tagged + __ Branch(USE_DELAY_SLOT, &entry); + __ li(hole_upper, Operand(kHoleNanUpper32)); // In delay slot. + + __ bind(&only_change_map); + __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, + HeapObject::kMapOffset, + target_map, + scratch2, + kRAHasBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ Branch(&done); + + // Call into runtime if GC is required. + __ bind(&gc_required); + __ ld(ra, MemOperand(sp, 0)); + __ Branch(USE_DELAY_SLOT, fail); + __ daddiu(sp, sp, kPointerSize); // In delay slot. + + // Convert and copy elements. + __ bind(&loop); + __ ld(scratch2, MemOperand(scratch1)); + __ Daddu(scratch1, scratch1, kIntSize); + // scratch2: current element + __ JumpIfNotSmi(scratch2, &convert_hole); + __ SmiUntag(scratch2); + + // Normal smi, convert to double and store. + __ mtc1(scratch2, f0); + __ cvt_d_w(f0, f0); + __ sdc1(f0, MemOperand(scratch3)); + __ Branch(USE_DELAY_SLOT, &entry); + __ daddiu(scratch3, scratch3, kDoubleSize); // In delay slot. + + // Hole found, store the-hole NaN. + __ bind(&convert_hole); + if (FLAG_debug_code) { + // Restore a "smi-untagged" heap object. + __ Or(scratch2, scratch2, Operand(1)); + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2)); + } + // mantissa + __ sw(hole_lower, MemOperand(scratch3)); + // exponent + __ sw(hole_upper, MemOperand(scratch3, kIntSize)); + __ Daddu(scratch3, scratch3, kDoubleSize); + + __ bind(&entry); + __ Branch(&loop, lt, scratch3, Operand(array_end)); + + __ bind(&done); + __ pop(ra); +} + + +void ElementsTransitionGenerator::GenerateDoubleToObject( + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Register ra contains the return address. + Label entry, loop, convert_hole, gc_required, only_change_map; + Register elements = a4; + Register array = a6; + Register length = a5; + Register scratch = t1; + + // Verify input registers don't conflict with locals. + DCHECK(!AreAliased(receiver, key, value, target_map, + elements, array, length, scratch)); + if (mode == TRACK_ALLOCATION_SITE) { + __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); + } + + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. + __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); + __ Branch(&only_change_map, eq, at, Operand(elements)); + + __ MultiPush( + value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit()); + + __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); + // elements: source FixedArray + // length: number of elements (smi-tagged) + + // Allocate new FixedArray. + // Re-use value and target_map registers, as they have been saved on the + // stack. + Register array_size = value; + Register allocate_scratch = target_map; + __ SmiScale(array_size, length, kPointerSizeLog2); + __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize); + __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required, + NO_ALLOCATION_FLAGS); + // array: destination FixedArray, not tagged as heap object + // Set destination FixedDoubleArray's length and map. + __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); + __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); + __ sd(scratch, MemOperand(array, HeapObject::kMapOffset)); + + // Prepare for conversion loop. + Register src_elements = elements; + Register dst_elements = target_map; + Register dst_end = length; + Register heap_number_map = scratch; + __ Daddu(src_elements, src_elements, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); + __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize)); + __ Daddu(array, array, Operand(kHeapObjectTag)); + __ SmiScale(dst_end, dst_end, kPointerSizeLog2); + __ Daddu(dst_end, dst_elements, dst_end); + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + // Using offsetted addresses. + // dst_elements: begin of destination FixedArray element fields, not tagged + // src_elements: begin of source FixedDoubleArray element fields, not tagged, + // points to the exponent + // dst_end: end of destination FixedArray, not tagged + // array: destination FixedArray + // heap_number_map: heap number map + __ Branch(&entry); + + // Call into runtime if GC is required. + __ bind(&gc_required); + __ MultiPop( + value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit()); + + __ Branch(fail); + + __ bind(&loop); + Register upper_bits = key; + __ lw(upper_bits, MemOperand(src_elements)); + __ Daddu(src_elements, src_elements, kDoubleSize); + // upper_bits: current element's upper 32 bit + // src_elements: address of next element's upper 32 bit + __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); + + // Non-hole double, copy value into a heap number. + Register heap_number = receiver; + Register scratch2 = value; + Register scratch3 = t2; + __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map, + &gc_required); + // heap_number: new heap number + // Load mantissa of current element, src_elements + // point to exponent of next element. + __ lw(scratch2, MemOperand(heap_number, -12)); + __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset)); + __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset)); + __ mov(scratch2, dst_elements); + __ sd(heap_number, MemOperand(dst_elements)); + __ Daddu(dst_elements, dst_elements, kPointerSize); + __ RecordWrite(array, + scratch2, + heap_number, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ Branch(&entry); + + // Replace the-hole NaN with the-hole pointer. + __ bind(&convert_hole); + __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); + __ sd(scratch2, MemOperand(dst_elements)); + __ Daddu(dst_elements, dst_elements, kPointerSize); + + __ bind(&entry); + __ Branch(&loop, lt, dst_elements, Operand(dst_end)); + + __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit()); + // Replace receiver's backing store with newly created and filled FixedArray. + __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ RecordWriteField(receiver, + JSObject::kElementsOffset, + array, + scratch, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ pop(ra); + + __ bind(&only_change_map); + // Update receiver's map. + __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ RecordWriteField(receiver, + HeapObject::kMapOffset, + target_map, + scratch, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); +} + + +void StringCharLoadGenerator::Generate(MacroAssembler* masm, + Register string, + Register index, + Register result, + Label* call_runtime) { + // Fetch the instance type of the receiver into result register. + __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset)); + __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); + + // We need special handling for indirect strings. + Label check_sequential; + __ And(at, result, Operand(kIsIndirectStringMask)); + __ Branch(&check_sequential, eq, at, Operand(zero_reg)); + + // Dispatch on the indirect string shape: slice or cons. + Label cons_string; + __ And(at, result, Operand(kSlicedNotConsMask)); + __ Branch(&cons_string, eq, at, Operand(zero_reg)); + + // Handle slices. + Label indirect_string_loaded; + __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); + __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset)); + __ dsra32(at, result, 0); + __ Daddu(index, index, at); + __ jmp(&indirect_string_loaded); + + // Handle cons strings. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ bind(&cons_string); + __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset)); + __ LoadRoot(at, Heap::kempty_stringRootIndex); + __ Branch(call_runtime, ne, result, Operand(at)); + // Get the first of the two strings and load its instance type. + __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset)); + + __ bind(&indirect_string_loaded); + __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset)); + __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); + + // Distinguish sequential and external strings. Only these two string + // representations can reach here (slices and flat cons strings have been + // reduced to the underlying sequential or external string). + Label external_string, check_encoding; + __ bind(&check_sequential); + STATIC_ASSERT(kSeqStringTag == 0); + __ And(at, result, Operand(kStringRepresentationMask)); + __ Branch(&external_string, ne, at, Operand(zero_reg)); + + // Prepare sequential strings + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + __ Daddu(string, + string, + SeqTwoByteString::kHeaderSize - kHeapObjectTag); + __ jmp(&check_encoding); + + // Handle external strings. + __ bind(&external_string); + if (FLAG_debug_code) { + // Assert that we do not have a cons or slice (indirect strings) here. + // Sequential strings have already been ruled out. + __ And(at, result, Operand(kIsIndirectStringMask)); + __ Assert(eq, kExternalStringExpectedButNotFound, + at, Operand(zero_reg)); + } + // Rule out short external strings. + STATIC_ASSERT(kShortExternalStringTag != 0); + __ And(at, result, Operand(kShortExternalStringMask)); + __ Branch(call_runtime, ne, at, Operand(zero_reg)); + __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); + + Label ascii, done; + __ bind(&check_encoding); + STATIC_ASSERT(kTwoByteStringTag == 0); + __ And(at, result, Operand(kStringEncodingMask)); + __ Branch(&ascii, ne, at, Operand(zero_reg)); + // Two-byte string. + __ dsll(at, index, 1); + __ Daddu(at, string, at); + __ lhu(result, MemOperand(at)); + __ jmp(&done); + __ bind(&ascii); + // Ascii string. + __ Daddu(at, string, index); + __ lbu(result, MemOperand(at)); + __ bind(&done); +} + + +static MemOperand ExpConstant(int index, Register base) { + return MemOperand(base, index * kDoubleSize); +} + + +void MathExpGenerator::EmitMathExp(MacroAssembler* masm, + DoubleRegister input, + DoubleRegister result, + DoubleRegister double_scratch1, + DoubleRegister double_scratch2, + Register temp1, + Register temp2, + Register temp3) { + DCHECK(!input.is(result)); + DCHECK(!input.is(double_scratch1)); + DCHECK(!input.is(double_scratch2)); + DCHECK(!result.is(double_scratch1)); + DCHECK(!result.is(double_scratch2)); + DCHECK(!double_scratch1.is(double_scratch2)); + DCHECK(!temp1.is(temp2)); + DCHECK(!temp1.is(temp3)); + DCHECK(!temp2.is(temp3)); + DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(!masm->serializer_enabled()); // External references not serializable. + + Label zero, infinity, done; + __ li(temp3, Operand(ExternalReference::math_exp_constants(0))); + + __ ldc1(double_scratch1, ExpConstant(0, temp3)); + __ BranchF(&zero, NULL, ge, double_scratch1, input); + + __ ldc1(double_scratch2, ExpConstant(1, temp3)); + __ BranchF(&infinity, NULL, ge, input, double_scratch2); + + __ ldc1(double_scratch1, ExpConstant(3, temp3)); + __ ldc1(result, ExpConstant(4, temp3)); + __ mul_d(double_scratch1, double_scratch1, input); + __ add_d(double_scratch1, double_scratch1, result); + __ FmoveLow(temp2, double_scratch1); + __ sub_d(double_scratch1, double_scratch1, result); + __ ldc1(result, ExpConstant(6, temp3)); + __ ldc1(double_scratch2, ExpConstant(5, temp3)); + __ mul_d(double_scratch1, double_scratch1, double_scratch2); + __ sub_d(double_scratch1, double_scratch1, input); + __ sub_d(result, result, double_scratch1); + __ mul_d(double_scratch2, double_scratch1, double_scratch1); + __ mul_d(result, result, double_scratch2); + __ ldc1(double_scratch2, ExpConstant(7, temp3)); + __ mul_d(result, result, double_scratch2); + __ sub_d(result, result, double_scratch1); + // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1. + DCHECK(*reinterpret_cast<double*> + (ExternalReference::math_exp_constants(8).address()) == 1); + __ Move(double_scratch2, 1); + __ add_d(result, result, double_scratch2); + __ dsrl(temp1, temp2, 11); + __ Ext(temp2, temp2, 0, 11); + __ Daddu(temp1, temp1, Operand(0x3ff)); + + // Must not call ExpConstant() after overwriting temp3! + __ li(temp3, Operand(ExternalReference::math_exp_log_table())); + __ dsll(at, temp2, 3); + __ Daddu(temp3, temp3, Operand(at)); + __ lwu(temp2, MemOperand(temp3, 0)); + __ lwu(temp3, MemOperand(temp3, kIntSize)); + // The first word is loaded is the lower number register. + if (temp2.code() < temp3.code()) { + __ dsll(at, temp1, 20); + __ Or(temp1, temp3, at); + __ Move(double_scratch1, temp2, temp1); + } else { + __ dsll(at, temp1, 20); + __ Or(temp1, temp2, at); + __ Move(double_scratch1, temp3, temp1); + } + __ mul_d(result, result, double_scratch1); + __ BranchShort(&done); + + __ bind(&zero); + __ Move(result, kDoubleRegZero); + __ BranchShort(&done); + + __ bind(&infinity); + __ ldc1(result, ExpConstant(2, temp3)); + + __ bind(&done); +} + +#ifdef DEBUG +// nop(CODE_AGE_MARKER_NOP) +static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; +#endif + + +CodeAgingHelper::CodeAgingHelper() { + DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); + // Since patcher is a large object, allocate it dynamically when needed, + // to avoid overloading the stack in stress conditions. + // DONT_FLUSH is used because the CodeAgingHelper is initialized early in + // the process, before MIPS simulator ICache is setup. + SmartPointer<CodePatcher> patcher( + new CodePatcher(young_sequence_.start(), + young_sequence_.length() / Assembler::kInstrSize, + CodePatcher::DONT_FLUSH)); + PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); + patcher->masm()->Push(ra, fp, cp, a1); + patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); + patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); + patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); + patcher->masm()->Daddu( + fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); +} + + +#ifdef DEBUG +bool CodeAgingHelper::IsOld(byte* candidate) const { + return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction; +} +#endif + + +bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { + bool result = isolate->code_aging_helper()->IsYoung(sequence); + DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); + return result; +} + + +void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, + MarkingParity* parity) { + if (IsYoungSequence(isolate, sequence)) { + *age = kNoAgeCodeAge; + *parity = NO_MARKING_PARITY; + } else { + Address target_address = Assembler::target_address_at( + sequence + Assembler::kInstrSize); + Code* stub = GetCodeFromTargetAddress(target_address); + GetCodeAgeAndParity(stub, age, parity); + } +} + + +void Code::PatchPlatformCodeAge(Isolate* isolate, + byte* sequence, + Code::Age age, + MarkingParity parity) { + uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); + if (age == kNoAgeCodeAge) { + isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); + CpuFeatures::FlushICache(sequence, young_length); + } else { + Code* stub = GetCodeAgeStub(isolate, age, parity); + CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); + // Mark this code sequence for FindPlatformCodeAgeSequence(). + patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP); + // Load the stub address to t9 and call it, + // GetCodeAgeAndParity() extracts the stub address from this instruction. + patcher.masm()->li( + t9, + Operand(reinterpret_cast<uint64_t>(stub->instruction_start())), + ADDRESS_LOAD); + patcher.masm()->nop(); // Prevent jalr to jal optimization. + patcher.masm()->jalr(t9, a0); + patcher.masm()->nop(); // Branch delay slot nop. + patcher.masm()->nop(); // Pad the empty space. + } +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/codegen-mips64.h nodejs-0.11.15/deps/v8/src/mips64/codegen-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/codegen-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/codegen-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,54 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + +#ifndef V8_MIPS_CODEGEN_MIPS_H_ +#define V8_MIPS_CODEGEN_MIPS_H_ + + +#include "src/ast.h" +#include "src/ic-inl.h" + +namespace v8 { +namespace internal { + + +enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; + + +class StringCharLoadGenerator : public AllStatic { + public: + // Generates the code for handling different string types and loading the + // indexed character into |result|. We expect |index| as untagged input and + // |result| as untagged output. + static void Generate(MacroAssembler* masm, + Register string, + Register index, + Register result, + Label* call_runtime); + + private: + DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator); +}; + + +class MathExpGenerator : public AllStatic { + public: + // Register input isn't modified. All other registers are clobbered. + static void EmitMathExp(MacroAssembler* masm, + DoubleRegister input, + DoubleRegister result, + DoubleRegister double_scratch1, + DoubleRegister double_scratch2, + Register temp1, + Register temp2, + Register temp3); + + private: + DISALLOW_COPY_AND_ASSIGN(MathExpGenerator); +}; + +} } // namespace v8::internal + +#endif // V8_MIPS_CODEGEN_MIPS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/code-stubs-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/code-stubs-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/code-stubs-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/code-stubs-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5329 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/regexp-macro-assembler.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + + +void FastNewClosureStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a2 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry); +} + + +void FastNewContextStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a1 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); +} + + +void ToNumberStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); +} + + +void NumberToStringStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry); +} + + +void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a3, a2, a1 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Smi(), + Representation::Tagged() }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry, + representations); +} + + +void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a3, a2, a1, a0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry); +} + + +void CallFunctionStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + UNIMPLEMENTED(); +} + + +void CallConstructStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + UNIMPLEMENTED(); +} + + +void CreateAllocationSiteStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a2, a3 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); +} + + +void RegExpConstructResultStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a2, a1, a0 }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry); +} + + +void TransitionElementsKindStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a0, a1 }; + Address entry = + Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(entry)); +} + + +void CompareNilICStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(CompareNilIC_Miss)); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate())); +} + + +const Register InterfaceDescriptor::ContextRegister() { return cp; } + + +static void InitializeArrayConstructorDescriptor( + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, + int constant_stack_parameter_count) { + // register state + // cp -- context + // a0 -- number of arguments + // a1 -- function + // a2 -- allocation site with elements kind + Address deopt_handler = Runtime::FunctionForId( + Runtime::kArrayConstructor)->entry; + + if (constant_stack_parameter_count == 0) { + Register registers[] = { cp, a1, a2 }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); + } else { + // stack param count needs (constructor pointer, and single argument) + Register registers[] = { cp, a1, a2, a0 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); + } +} + + +static void InitializeInternalArrayConstructorDescriptor( + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, + int constant_stack_parameter_count) { + // register state + // cp -- context + // a0 -- number of arguments + // a1 -- constructor function + Address deopt_handler = Runtime::FunctionForId( + Runtime::kInternalArrayConstructor)->entry; + + if (constant_stack_parameter_count == 0) { + Register registers[] = { cp, a1 }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); + } else { + // stack param count needs (constructor pointer, and single argument) + Register registers[] = { cp, a1, a0 }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); + } +} + + +void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0); +} + + +void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1); +} + + +void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1); +} + + +void ToBooleanStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(ToBooleanIC_Miss)); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate())); +} + + +void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0); +} + + +void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1); +} + + +void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1); +} + + +void BinaryOpICStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a1, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_Miss)); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate())); +} + + +void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a2, a1, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite)); +} + + +void StringAddStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { cp, a1, a0 }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kStringAdd)->entry); +} + + +void CallDescriptors::InitializeForIsolate(Isolate* isolate) { + { + CallInterfaceDescriptor* descriptor = + isolate->call_descriptor(Isolate::ArgumentAdaptorCall); + Register registers[] = { cp, // context + a1, // JSFunction + a0, // actual number of arguments + a2, // expected number of arguments + }; + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // JSFunction + Representation::Integer32(), // actual number of arguments + Representation::Integer32(), // expected number of arguments + }; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); + } + { + CallInterfaceDescriptor* descriptor = + isolate->call_descriptor(Isolate::KeyedCall); + Register registers[] = { cp, // context + a2, // key + }; + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // key + }; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); + } + { + CallInterfaceDescriptor* descriptor = + isolate->call_descriptor(Isolate::NamedCall); + Register registers[] = { cp, // context + a2, // name + }; + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // name + }; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); + } + { + CallInterfaceDescriptor* descriptor = + isolate->call_descriptor(Isolate::CallHandler); + Register registers[] = { cp, // context + a0, // receiver + }; + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // receiver + }; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); + } + { + CallInterfaceDescriptor* descriptor = + isolate->call_descriptor(Isolate::ApiFunctionCall); + Register registers[] = { cp, // context + a0, // callee + a4, // call_data + a2, // holder + a1, // api_function_address + }; + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // callee + Representation::Tagged(), // call_data + Representation::Tagged(), // holder + Representation::External(), // api_function_address + }; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); + } +} + + +#define __ ACCESS_MASM(masm) + + +static void EmitIdenticalObjectComparison(MacroAssembler* masm, + Label* slow, + Condition cc); +static void EmitSmiNonsmiComparison(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* rhs_not_nan, + Label* slow, + bool strict); +static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, + Register lhs, + Register rhs); + + +void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { + // Update the static counter each time a new code stub is generated. + isolate()->counters()->code_stubs()->Increment(); + + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); + int param_count = descriptor->GetEnvironmentParameterCount(); + { + // Call the runtime system in a fresh internal frame. + FrameScope scope(masm, StackFrame::INTERNAL); + DCHECK((param_count == 0) || + a0.is(descriptor->GetEnvironmentParameterRegister(param_count - 1))); + // Push arguments, adjust sp. + __ Dsubu(sp, sp, Operand(param_count * kPointerSize)); + for (int i = 0; i < param_count; ++i) { + // Store argument to stack. + __ sd(descriptor->GetEnvironmentParameterRegister(i), + MemOperand(sp, (param_count-1-i) * kPointerSize)); + } + ExternalReference miss = descriptor->miss_handler(); + __ CallExternalReference(miss, param_count); + } + + __ Ret(); +} + + +// Takes a Smi and converts to an IEEE 64 bit floating point value in two +// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and +// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a +// scratch register. Destroys the source register. No GC occurs during this +// stub so you don't have to set up the frame. +class ConvertToDoubleStub : public PlatformCodeStub { + public: + ConvertToDoubleStub(Isolate* isolate, + Register result_reg_1, + Register result_reg_2, + Register source_reg, + Register scratch_reg) + : PlatformCodeStub(isolate), + result1_(result_reg_1), + result2_(result_reg_2), + source_(source_reg), + zeros_(scratch_reg) { } + + private: + Register result1_; + Register result2_; + Register source_; + Register zeros_; + + // Minor key encoding in 16 bits. + class ModeBits: public BitField<OverwriteMode, 0, 2> {}; + class OpBits: public BitField<Token::Value, 2, 14> {}; + + Major MajorKey() const { return ConvertToDouble; } + int MinorKey() const { + // Encode the parameters in a unique 16 bit value. + return result1_.code() + + (result2_.code() << 4) + + (source_.code() << 8) + + (zeros_.code() << 12); + } + + void Generate(MacroAssembler* masm); +}; + + +void ConvertToDoubleStub::Generate(MacroAssembler* masm) { +#ifndef BIG_ENDIAN_FLOATING_POINT + Register exponent = result1_; + Register mantissa = result2_; +#else + Register exponent = result2_; + Register mantissa = result1_; +#endif + Label not_special; + // Convert from Smi to integer. + __ SmiUntag(source_); + // Move sign bit from source to destination. This works because the sign bit + // in the exponent word of the double has the same position and polarity as + // the 2's complement sign bit in a Smi. + STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); + __ And(exponent, source_, Operand(HeapNumber::kSignMask)); + // Subtract from 0 if source was negative. + __ subu(at, zero_reg, source_); + __ Movn(source_, at, exponent); + + // We have -1, 0 or 1, which we treat specially. Register source_ contains + // absolute value: it is either equal to 1 (special case of -1 and 1), + // greater than 1 (not a special case) or less than 1 (special case of 0). + __ Branch(¬_special, gt, source_, Operand(1)); + + // For 1 or -1 we need to or in the 0 exponent (biased to 1023). + const uint32_t exponent_word_for_1 = + HeapNumber::kExponentBias << HeapNumber::kExponentShift; + // Safe to use 'at' as dest reg here. + __ Or(at, exponent, Operand(exponent_word_for_1)); + __ Movn(exponent, at, source_); // Write exp when source not 0. + // 1, 0 and -1 all have 0 for the second word. + __ Ret(USE_DELAY_SLOT); + __ mov(mantissa, zero_reg); + + __ bind(¬_special); + // Count leading zeros. + // Gets the wrong answer for 0, but we already checked for that case above. + __ Clz(zeros_, source_); + // Compute exponent and or it into the exponent register. + // We use mantissa as a scratch register here. + __ li(mantissa, Operand(31 + HeapNumber::kExponentBias)); + __ subu(mantissa, mantissa, zeros_); + __ sll(mantissa, mantissa, HeapNumber::kExponentShift); + __ Or(exponent, exponent, mantissa); + + // Shift up the source chopping the top bit off. + __ Addu(zeros_, zeros_, Operand(1)); + // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. + __ sllv(source_, source_, zeros_); + // Compute lower part of fraction (last 12 bits). + __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord); + // And the top (top 20 bits). + __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord); + + __ Ret(USE_DELAY_SLOT); + __ or_(exponent, exponent, source_); +} + + +void DoubleToIStub::Generate(MacroAssembler* masm) { + Label out_of_range, only_low, negate, done; + Register input_reg = source(); + Register result_reg = destination(); + + int double_offset = offset(); + // Account for saved regs if input is sp. + if (input_reg.is(sp)) double_offset += 3 * kPointerSize; + + Register scratch = + GetRegisterThatIsNotOneOf(input_reg, result_reg); + Register scratch2 = + GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); + Register scratch3 = + GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2); + DoubleRegister double_scratch = kLithiumScratchDouble; + + __ Push(scratch, scratch2, scratch3); + if (!skip_fastpath()) { + // Load double input. + __ ldc1(double_scratch, MemOperand(input_reg, double_offset)); + + // Clear cumulative exception flags and save the FCSR. + __ cfc1(scratch2, FCSR); + __ ctc1(zero_reg, FCSR); + + // Try a conversion to a signed integer. + __ Trunc_w_d(double_scratch, double_scratch); + // Move the converted value into the result register. + __ mfc1(scratch3, double_scratch); + + // Retrieve and restore the FCSR. + __ cfc1(scratch, FCSR); + __ ctc1(scratch2, FCSR); + + // Check for overflow and NaNs. + __ And( + scratch, scratch, + kFCSROverflowFlagMask | kFCSRUnderflowFlagMask + | kFCSRInvalidOpFlagMask); + // If we had no exceptions then set result_reg and we are done. + Label error; + __ Branch(&error, ne, scratch, Operand(zero_reg)); + __ Move(result_reg, scratch3); + __ Branch(&done); + __ bind(&error); + } + + // Load the double value and perform a manual truncation. + Register input_high = scratch2; + Register input_low = scratch3; + + __ lw(input_low, MemOperand(input_reg, double_offset)); + __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize)); + + Label normal_exponent, restore_sign; + // Extract the biased exponent in result. + __ Ext(result_reg, + input_high, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + + // Check for Infinity and NaNs, which should return 0. + __ Subu(scratch, result_reg, HeapNumber::kExponentMask); + __ Movz(result_reg, zero_reg, scratch); + __ Branch(&done, eq, scratch, Operand(zero_reg)); + + // Express exponent as delta to (number of mantissa bits + 31). + __ Subu(result_reg, + result_reg, + Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); + + // If the delta is strictly positive, all bits would be shifted away, + // which means that we can return 0. + __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg)); + __ mov(result_reg, zero_reg); + __ Branch(&done); + + __ bind(&normal_exponent); + const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; + // Calculate shift. + __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits)); + + // Save the sign. + Register sign = result_reg; + result_reg = no_reg; + __ And(sign, input_high, Operand(HeapNumber::kSignMask)); + + // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need + // to check for this specific case. + Label high_shift_needed, high_shift_done; + __ Branch(&high_shift_needed, lt, scratch, Operand(32)); + __ mov(input_high, zero_reg); + __ Branch(&high_shift_done); + __ bind(&high_shift_needed); + + // Set the implicit 1 before the mantissa part in input_high. + __ Or(input_high, + input_high, + Operand(1 << HeapNumber::kMantissaBitsInTopWord)); + // Shift the mantissa bits to the correct position. + // We don't need to clear non-mantissa bits as they will be shifted away. + // If they weren't, it would mean that the answer is in the 32bit range. + __ sllv(input_high, input_high, scratch); + + __ bind(&high_shift_done); + + // Replace the shifted bits with bits from the lower mantissa word. + Label pos_shift, shift_done; + __ li(at, 32); + __ subu(scratch, at, scratch); + __ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); + + // Negate scratch. + __ Subu(scratch, zero_reg, scratch); + __ sllv(input_low, input_low, scratch); + __ Branch(&shift_done); + + __ bind(&pos_shift); + __ srlv(input_low, input_low, scratch); + + __ bind(&shift_done); + __ Or(input_high, input_high, Operand(input_low)); + // Restore sign if necessary. + __ mov(scratch, sign); + result_reg = sign; + sign = no_reg; + __ Subu(result_reg, zero_reg, input_high); + __ Movz(result_reg, input_high, scratch); + + __ bind(&done); + + __ Pop(scratch, scratch2, scratch3); + __ Ret(); +} + + +void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime( + Isolate* isolate) { + WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3); + WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0); + stub1.GetCode(); + stub2.GetCode(); +} + + +// See comment for class, this does NOT work for int32's that are in Smi range. +void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { + Label max_negative_int; + // the_int_ has the answer which is a signed int32 but not a Smi. + // We test for the special value that has a different exponent. + STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); + // Test sign, and save for later conditionals. + __ And(sign_, the_int_, Operand(0x80000000u)); + __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u)); + + // Set up the correct exponent in scratch_. All non-Smi int32s have the same. + // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). + uint32_t non_smi_exponent = + (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; + __ li(scratch_, Operand(non_smi_exponent)); + // Set the sign bit in scratch_ if the value was negative. + __ or_(scratch_, scratch_, sign_); + // Subtract from 0 if the value was negative. + __ subu(at, zero_reg, the_int_); + __ Movn(the_int_, at, sign_); + // We should be masking the implict first digit of the mantissa away here, + // but it just ends up combining harmlessly with the last digit of the + // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get + // the most significant 1 to hit the last bit of the 12 bit sign and exponent. + DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); + const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; + __ srl(at, the_int_, shift_distance); + __ or_(scratch_, scratch_, at); + __ sw(scratch_, FieldMemOperand(the_heap_number_, + HeapNumber::kExponentOffset)); + __ sll(scratch_, the_int_, 32 - shift_distance); + __ Ret(USE_DELAY_SLOT); + __ sw(scratch_, FieldMemOperand(the_heap_number_, + HeapNumber::kMantissaOffset)); + + __ bind(&max_negative_int); + // The max negative int32 is stored as a positive number in the mantissa of + // a double because it uses a sign bit instead of using two's complement. + // The actual mantissa bits stored are all 0 because the implicit most + // significant 1 bit is not stored. + non_smi_exponent += 1 << HeapNumber::kExponentShift; + __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent)); + __ sw(scratch_, + FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); + __ mov(scratch_, zero_reg); + __ Ret(USE_DELAY_SLOT); + __ sw(scratch_, + FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); +} + + +// Handle the case where the lhs and rhs are the same object. +// Equality is almost reflexive (everything but NaN), so this is a test +// for "identity and not NaN". +static void EmitIdenticalObjectComparison(MacroAssembler* masm, + Label* slow, + Condition cc) { + Label not_identical; + Label heap_number, return_equal; + Register exp_mask_reg = t1; + + __ Branch(¬_identical, ne, a0, Operand(a1)); + + __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask)); + + // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), + // so we do the second best thing - test it ourselves. + // They are both equal and they are not both Smis so both of them are not + // Smis. If it's not a heap number, then return equal. + if (cc == less || cc == greater) { + __ GetObjectType(a0, t0, t0); + __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE)); + } else { + __ GetObjectType(a0, t0, t0); + __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE)); + // Comparing JS objects with <=, >= is complicated. + if (cc != eq) { + __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE)); + // Normally here we fall through to return_equal, but undefined is + // special: (undefined == undefined) == true, but + // (undefined <= undefined) == false! See ECMAScript 11.8.5. + if (cc == less_equal || cc == greater_equal) { + __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE)); + __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); + __ Branch(&return_equal, ne, a0, Operand(a6)); + DCHECK(is_int16(GREATER) && is_int16(LESS)); + __ Ret(USE_DELAY_SLOT); + if (cc == le) { + // undefined <= undefined should fail. + __ li(v0, Operand(GREATER)); + } else { + // undefined >= undefined should fail. + __ li(v0, Operand(LESS)); + } + } + } + } + + __ bind(&return_equal); + DCHECK(is_int16(GREATER) && is_int16(LESS)); + __ Ret(USE_DELAY_SLOT); + if (cc == less) { + __ li(v0, Operand(GREATER)); // Things aren't less than themselves. + } else if (cc == greater) { + __ li(v0, Operand(LESS)); // Things aren't greater than themselves. + } else { + __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. + } + // For less and greater we don't have to check for NaN since the result of + // x < x is false regardless. For the others here is some code to check + // for NaN. + if (cc != lt && cc != gt) { + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if it's + // not NaN. + + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // Read top bits of double representation (second word of value). + __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset)); + // Test that exponent bits are all set. + __ And(a7, a6, Operand(exp_mask_reg)); + // If all bits not set (ne cond), then not a NaN, objects are equal. + __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg)); + + // Shift out flag and all exponent bits, retaining only mantissa. + __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord); + // Or with all low-bits of mantissa. + __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); + __ Or(v0, a7, Operand(a6)); + // For equal we already have the right value in v0: Return zero (equal) + // if all bits in mantissa are zero (it's an Infinity) and non-zero if + // not (it's a NaN). For <= and >= we need to load v0 with the failing + // value if it's a NaN. + if (cc != eq) { + // All-zero means Infinity means equal. + __ Ret(eq, v0, Operand(zero_reg)); + DCHECK(is_int16(GREATER) && is_int16(LESS)); + __ Ret(USE_DELAY_SLOT); + if (cc == le) { + __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. + } else { + __ li(v0, Operand(LESS)); // NaN >= NaN should fail. + } + } + } + // No fall through here. + + __ bind(¬_identical); +} + + +static void EmitSmiNonsmiComparison(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* both_loaded_as_doubles, + Label* slow, + bool strict) { + DCHECK((lhs.is(a0) && rhs.is(a1)) || + (lhs.is(a1) && rhs.is(a0))); + + Label lhs_is_smi; + __ JumpIfSmi(lhs, &lhs_is_smi); + // Rhs is a Smi. + // Check whether the non-smi is a heap number. + __ GetObjectType(lhs, t0, t0); + if (strict) { + // If lhs was not a number and rhs was a Smi then strict equality cannot + // succeed. Return non-equal (lhs is already not zero). + __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE)); + __ mov(v0, lhs); + } else { + // Smi compared non-strictly with a non-Smi non-heap-number. Call + // the runtime. + __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE)); + } + // Rhs is a smi, lhs is a number. + // Convert smi rhs to double. + __ SmiUntag(at, rhs); + __ mtc1(at, f14); + __ cvt_d_w(f14, f14); + __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); + + // We now have both loaded as doubles. + __ jmp(both_loaded_as_doubles); + + __ bind(&lhs_is_smi); + // Lhs is a Smi. Check whether the non-smi is a heap number. + __ GetObjectType(rhs, t0, t0); + if (strict) { + // If lhs was not a number and rhs was a Smi then strict equality cannot + // succeed. Return non-equal. + __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE)); + __ li(v0, Operand(1)); + } else { + // Smi compared non-strictly with a non-Smi non-heap-number. Call + // the runtime. + __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE)); + } + + // Lhs is a smi, rhs is a number. + // Convert smi lhs to double. + __ SmiUntag(at, lhs); + __ mtc1(at, f12); + __ cvt_d_w(f12, f12); + __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); + // Fall through to both_loaded_as_doubles. +} + + +static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, + Register lhs, + Register rhs) { + // If either operand is a JS object or an oddball value, then they are + // not equal since their pointers are different. + // There is no test for undetectability in strict equality. + STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); + Label first_non_object; + // Get the type of the first operand into a2 and compare it with + // FIRST_SPEC_OBJECT_TYPE. + __ GetObjectType(lhs, a2, a2); + __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE)); + + // Return non-zero. + Label return_not_equal; + __ bind(&return_not_equal); + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(1)); + + __ bind(&first_non_object); + // Check for oddballs: true, false, null, undefined. + __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE)); + + __ GetObjectType(rhs, a3, a3); + __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); + + // Check for oddballs: true, false, null, undefined. + __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE)); + + // Now that we have the types we might as well check for + // internalized-internalized. + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + __ Or(a2, a2, Operand(a3)); + __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask)); + __ Branch(&return_not_equal, eq, at, Operand(zero_reg)); +} + + +static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* both_loaded_as_doubles, + Label* not_heap_numbers, + Label* slow) { + __ GetObjectType(lhs, a3, a2); + __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); + __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); + // If first was a heap number & second wasn't, go to slow case. + __ Branch(slow, ne, a3, Operand(a2)); + + // Both are heap numbers. Load them up then jump to the code we have + // for that. + __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); + __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); + + __ jmp(both_loaded_as_doubles); +} + + +// Fast negative check for internalized-to-internalized equality. +static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* possible_strings, + Label* not_both_strings) { + DCHECK((lhs.is(a0) && rhs.is(a1)) || + (lhs.is(a1) && rhs.is(a0))); + + // a2 is object type of rhs. + Label object_test; + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + __ And(at, a2, Operand(kIsNotStringMask)); + __ Branch(&object_test, ne, at, Operand(zero_reg)); + __ And(at, a2, Operand(kIsNotInternalizedMask)); + __ Branch(possible_strings, ne, at, Operand(zero_reg)); + __ GetObjectType(rhs, a3, a3); + __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE)); + __ And(at, a3, Operand(kIsNotInternalizedMask)); + __ Branch(possible_strings, ne, at, Operand(zero_reg)); + + // Both are internalized strings. We already checked they weren't the same + // pointer so they are not equal. + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(1)); // Non-zero indicates not equal. + + __ bind(&object_test); + __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ GetObjectType(rhs, a2, a3); + __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); + + // If both objects are undetectable, they are equal. Otherwise, they + // are not equal, since they are different objects and an object is not + // equal to undefined. + __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset)); + __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset)); + __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset)); + __ and_(a0, a2, a3); + __ And(a0, a0, Operand(1 << Map::kIsUndetectable)); + __ Ret(USE_DELAY_SLOT); + __ xori(v0, a0, 1 << Map::kIsUndetectable); +} + + +static void ICCompareStub_CheckInputType(MacroAssembler* masm, + Register input, + Register scratch, + CompareIC::State expected, + Label* fail) { + Label ok; + if (expected == CompareIC::SMI) { + __ JumpIfNotSmi(input, fail); + } else if (expected == CompareIC::NUMBER) { + __ JumpIfSmi(input, &ok); + __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, + DONT_DO_SMI_CHECK); + } + // We could be strict about internalized/string here, but as long as + // hydrogen doesn't care, the stub doesn't have to care either. + __ bind(&ok); +} + + +// On entry a1 and a2 are the values to be compared. +// On exit a0 is 0, positive or negative to indicate the result of +// the comparison. +void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { + Register lhs = a1; + Register rhs = a0; + Condition cc = GetCondition(); + + Label miss; + ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss); + ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss); + + Label slow; // Call builtin. + Label not_smis, both_loaded_as_doubles; + + Label not_two_smis, smi_done; + __ Or(a2, a1, a0); + __ JumpIfNotSmi(a2, ¬_two_smis); + __ SmiUntag(a1); + __ SmiUntag(a0); + + __ Ret(USE_DELAY_SLOT); + __ dsubu(v0, a1, a0); + __ bind(¬_two_smis); + + // NOTICE! This code is only reached after a smi-fast-case check, so + // it is certain that at least one operand isn't a smi. + + // Handle the case where the objects are identical. Either returns the answer + // or goes to slow. Only falls through if the objects were not identical. + EmitIdenticalObjectComparison(masm, &slow, cc); + + // If either is a Smi (we know that not both are), then they can only + // be strictly equal if the other is a HeapNumber. + STATIC_ASSERT(kSmiTag == 0); + DCHECK_EQ(0, Smi::FromInt(0)); + __ And(a6, lhs, Operand(rhs)); + __ JumpIfNotSmi(a6, ¬_smis, a4); + // One operand is a smi. EmitSmiNonsmiComparison generates code that can: + // 1) Return the answer. + // 2) Go to slow. + // 3) Fall through to both_loaded_as_doubles. + // 4) Jump to rhs_not_nan. + // In cases 3 and 4 we have found out we were dealing with a number-number + // comparison and the numbers have been loaded into f12 and f14 as doubles, + // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. + EmitSmiNonsmiComparison(masm, lhs, rhs, + &both_loaded_as_doubles, &slow, strict()); + + __ bind(&both_loaded_as_doubles); + // f12, f14 are the double representations of the left hand side + // and the right hand side if we have FPU. Otherwise a2, a3 represent + // left hand side and a0, a1 represent right hand side. + + Label nan; + __ li(a4, Operand(LESS)); + __ li(a5, Operand(GREATER)); + __ li(a6, Operand(EQUAL)); + + // Check if either rhs or lhs is NaN. + __ BranchF(NULL, &nan, eq, f12, f14); + + // Check if LESS condition is satisfied. If true, move conditionally + // result to v0. + if (kArchVariant != kMips64r6) { + __ c(OLT, D, f12, f14); + __ Movt(v0, a4); + // Use previous check to store conditionally to v0 oposite condition + // (GREATER). If rhs is equal to lhs, this will be corrected in next + // check. + __ Movf(v0, a5); + // Check if EQUAL condition is satisfied. If true, move conditionally + // result to v0. + __ c(EQ, D, f12, f14); + __ Movt(v0, a6); + } else { + Label skip; + __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14); + __ mov(v0, a4); // Return LESS as result. + + __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14); + __ mov(v0, a6); // Return EQUAL as result. + + __ mov(v0, a5); // Return GREATER as result. + __ bind(&skip); + } + __ Ret(); + + __ bind(&nan); + // NaN comparisons always fail. + // Load whatever we need in v0 to make the comparison fail. + DCHECK(is_int16(GREATER) && is_int16(LESS)); + __ Ret(USE_DELAY_SLOT); + if (cc == lt || cc == le) { + __ li(v0, Operand(GREATER)); + } else { + __ li(v0, Operand(LESS)); + } + + + __ bind(¬_smis); + // At this point we know we are dealing with two different objects, + // and neither of them is a Smi. The objects are in lhs_ and rhs_. + if (strict()) { + // This returns non-equal for some object types, or falls through if it + // was not lucky. + EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); + } + + Label check_for_internalized_strings; + Label flat_string_check; + // Check for heap-number-heap-number comparison. Can jump to slow case, + // or load both doubles and jump to the code that handles + // that case. If the inputs are not doubles then jumps to + // check_for_internalized_strings. + // In this case a2 will contain the type of lhs_. + EmitCheckForTwoHeapNumbers(masm, + lhs, + rhs, + &both_loaded_as_doubles, + &check_for_internalized_strings, + &flat_string_check); + + __ bind(&check_for_internalized_strings); + if (cc == eq && !strict()) { + // Returns an answer for two internalized strings or two + // detectable objects. + // Otherwise jumps to string case or not both strings case. + // Assumes that a2 is the type of lhs_ on entry. + EmitCheckForInternalizedStringsOrObjects( + masm, lhs, rhs, &flat_string_check, &slow); + } + + // Check for both being sequential ASCII strings, and inline if that is the + // case. + __ bind(&flat_string_check); + + __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow); + + __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2, + a3); + if (cc == eq) { + StringCompareStub::GenerateFlatAsciiStringEquals(masm, + lhs, + rhs, + a2, + a3, + a4); + } else { + StringCompareStub::GenerateCompareFlatAsciiStrings(masm, + lhs, + rhs, + a2, + a3, + a4, + a5); + } + // Never falls through to here. + + __ bind(&slow); + // Prepare for call to builtin. Push object pointers, a0 (lhs) first, + // a1 (rhs) second. + __ Push(lhs, rhs); + // Figure out which native to call and setup the arguments. + Builtins::JavaScript native; + if (cc == eq) { + native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + } else { + native = Builtins::COMPARE; + int ncr; // NaN compare result. + if (cc == lt || cc == le) { + ncr = GREATER; + } else { + DCHECK(cc == gt || cc == ge); // Remaining cases. + ncr = LESS; + } + __ li(a0, Operand(Smi::FromInt(ncr))); + __ push(a0); + } + + // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ InvokeBuiltin(native, JUMP_FUNCTION); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void StoreRegistersStateStub::Generate(MacroAssembler* masm) { + __ mov(t9, ra); + __ pop(ra); + __ PushSafepointRegisters(); + __ Jump(t9); +} + + +void RestoreRegistersStateStub::Generate(MacroAssembler* masm) { + __ mov(t9, ra); + __ pop(ra); + __ PopSafepointRegisters(); + __ Jump(t9); +} + + +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { + // We don't allow a GC during a store buffer overflow so there is no need to + // store the registers in any particular way, but we do have to store and + // restore them. + __ MultiPush(kJSCallerSaved | ra.bit()); + if (save_doubles_ == kSaveFPRegs) { + __ MultiPushFPU(kCallerSavedFPU); + } + const int argument_count = 1; + const int fp_argument_count = 0; + const Register scratch = a1; + + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); + __ li(a0, Operand(ExternalReference::isolate_address(isolate()))); + __ CallCFunction( + ExternalReference::store_buffer_overflow_function(isolate()), + argument_count); + if (save_doubles_ == kSaveFPRegs) { + __ MultiPopFPU(kCallerSavedFPU); + } + + __ MultiPop(kJSCallerSaved | ra.bit()); + __ Ret(); +} + + +void MathPowStub::Generate(MacroAssembler* masm) { + const Register base = a1; + const Register exponent = a2; + const Register heapnumbermap = a5; + const Register heapnumber = v0; + const DoubleRegister double_base = f2; + const DoubleRegister double_exponent = f4; + const DoubleRegister double_result = f0; + const DoubleRegister double_scratch = f6; + const FPURegister single_scratch = f8; + const Register scratch = t1; + const Register scratch2 = a7; + + Label call_runtime, done, int_exponent; + if (exponent_type_ == ON_STACK) { + Label base_is_smi, unpack_exponent; + // The exponent and base are supplied as arguments on the stack. + // This can only happen if the stub is called from non-optimized code. + // Load input parameters from stack to double registers. + __ ld(base, MemOperand(sp, 1 * kPointerSize)); + __ ld(exponent, MemOperand(sp, 0 * kPointerSize)); + + __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); + + __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); + __ ld(scratch, FieldMemOperand(base, JSObject::kMapOffset)); + __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); + + __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); + __ jmp(&unpack_exponent); + + __ bind(&base_is_smi); + __ mtc1(scratch, single_scratch); + __ cvt_d_w(double_base, single_scratch); + __ bind(&unpack_exponent); + + __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); + + __ ld(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); + __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); + __ ldc1(double_exponent, + FieldMemOperand(exponent, HeapNumber::kValueOffset)); + } else if (exponent_type_ == TAGGED) { + // Base is already in double_base. + __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); + + __ ldc1(double_exponent, + FieldMemOperand(exponent, HeapNumber::kValueOffset)); + } + + if (exponent_type_ != INTEGER) { + Label int_exponent_convert; + // Detect integer exponents stored as double. + __ EmitFPUTruncate(kRoundToMinusInf, + scratch, + double_exponent, + at, + double_scratch, + scratch2, + kCheckForInexactConversion); + // scratch2 == 0 means there was no conversion error. + __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg)); + + if (exponent_type_ == ON_STACK) { + // Detect square root case. Crankshaft detects constant +/-0.5 at + // compile time and uses DoMathPowHalf instead. We then skip this check + // for non-constant cases of +/-0.5 as these hardly occur. + Label not_plus_half; + + // Test for 0.5. + __ Move(double_scratch, 0.5); + __ BranchF(USE_DELAY_SLOT, + ¬_plus_half, + NULL, + ne, + double_exponent, + double_scratch); + // double_scratch can be overwritten in the delay slot. + // Calculates square root of base. Check for the special case of + // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). + __ Move(double_scratch, -V8_INFINITY); + __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch); + __ neg_d(double_result, double_scratch); + + // Add +0 to convert -0 to +0. + __ add_d(double_scratch, double_base, kDoubleRegZero); + __ sqrt_d(double_result, double_scratch); + __ jmp(&done); + + __ bind(¬_plus_half); + __ Move(double_scratch, -0.5); + __ BranchF(USE_DELAY_SLOT, + &call_runtime, + NULL, + ne, + double_exponent, + double_scratch); + // double_scratch can be overwritten in the delay slot. + // Calculates square root of base. Check for the special case of + // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). + __ Move(double_scratch, -V8_INFINITY); + __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch); + __ Move(double_result, kDoubleRegZero); + + // Add +0 to convert -0 to +0. + __ add_d(double_scratch, double_base, kDoubleRegZero); + __ Move(double_result, 1); + __ sqrt_d(double_scratch, double_scratch); + __ div_d(double_result, double_result, double_scratch); + __ jmp(&done); + } + + __ push(ra); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(0, 2, scratch2); + __ MovToFloatParameters(double_base, double_exponent); + __ CallCFunction( + ExternalReference::power_double_double_function(isolate()), + 0, 2); + } + __ pop(ra); + __ MovFromFloatResult(double_result); + __ jmp(&done); + + __ bind(&int_exponent_convert); + } + + // Calculate power with integer exponent. + __ bind(&int_exponent); + + // Get two copies of exponent in the registers scratch and exponent. + if (exponent_type_ == INTEGER) { + __ mov(scratch, exponent); + } else { + // Exponent has previously been stored into scratch as untagged integer. + __ mov(exponent, scratch); + } + + __ mov_d(double_scratch, double_base); // Back up base. + __ Move(double_result, 1.0); + + // Get absolute value of exponent. + Label positive_exponent; + __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg)); + __ Dsubu(scratch, zero_reg, scratch); + __ bind(&positive_exponent); + + Label while_true, no_carry, loop_end; + __ bind(&while_true); + + __ And(scratch2, scratch, 1); + + __ Branch(&no_carry, eq, scratch2, Operand(zero_reg)); + __ mul_d(double_result, double_result, double_scratch); + __ bind(&no_carry); + + __ dsra(scratch, scratch, 1); + + __ Branch(&loop_end, eq, scratch, Operand(zero_reg)); + __ mul_d(double_scratch, double_scratch, double_scratch); + + __ Branch(&while_true); + + __ bind(&loop_end); + + __ Branch(&done, ge, exponent, Operand(zero_reg)); + __ Move(double_scratch, 1.0); + __ div_d(double_result, double_scratch, double_result); + // Test whether result is zero. Bail out to check for subnormal result. + // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. + __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero); + + // double_exponent may not contain the exponent value if the input was a + // smi. We set it with exponent value before bailing out. + __ mtc1(exponent, single_scratch); + __ cvt_d_w(double_exponent, single_scratch); + + // Returning or bailing out. + Counters* counters = isolate()->counters(); + if (exponent_type_ == ON_STACK) { + // The arguments are still on the stack. + __ bind(&call_runtime); + __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); + + // The stub is called from non-optimized code, which expects the result + // as heap number in exponent. + __ bind(&done); + __ AllocateHeapNumber( + heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); + __ sdc1(double_result, + FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); + DCHECK(heapnumber.is(v0)); + __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); + __ DropAndRet(2); + } else { + __ push(ra); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(0, 2, scratch); + __ MovToFloatParameters(double_base, double_exponent); + __ CallCFunction( + ExternalReference::power_double_double_function(isolate()), + 0, 2); + } + __ pop(ra); + __ MovFromFloatResult(double_result); + + __ bind(&done); + __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); + __ Ret(); + } +} + + +bool CEntryStub::NeedsImmovableCode() { + return true; +} + + +void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { + CEntryStub::GenerateAheadOfTime(isolate); + WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); + StubFailureTrampolineStub::GenerateAheadOfTime(isolate); + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); + CreateAllocationSiteStub::GenerateAheadOfTime(isolate); + BinaryOpICStub::GenerateAheadOfTime(isolate); + StoreRegistersStateStub::GenerateAheadOfTime(isolate); + RestoreRegistersStateStub::GenerateAheadOfTime(isolate); + BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); +} + + +void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) { + StoreRegistersStateStub stub(isolate); + stub.GetCode(); +} + + +void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) { + RestoreRegistersStateStub stub(isolate); + stub.GetCode(); +} + + +void CodeStub::GenerateFPStubs(Isolate* isolate) { + SaveFPRegsMode mode = kSaveFPRegs; + CEntryStub save_doubles(isolate, 1, mode); + StoreBufferOverflowStub stub(isolate, mode); + // These stubs might already be in the snapshot, detect that and don't + // regenerate, which would lead to code stub initialization state being messed + // up. + Code* save_doubles_code; + if (!save_doubles.FindCodeInCache(&save_doubles_code)) { + save_doubles_code = *save_doubles.GetCode(); + } + Code* store_buffer_overflow_code; + if (!stub.FindCodeInCache(&store_buffer_overflow_code)) { + store_buffer_overflow_code = *stub.GetCode(); + } + isolate->set_fp_stubs_generated(true); +} + + +void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { + CEntryStub stub(isolate, 1, kDontSaveFPRegs); + stub.GetCode(); +} + + +void CEntryStub::Generate(MacroAssembler* masm) { + // Called from JavaScript; parameters are on stack as if calling JS function + // s0: number of arguments including receiver + // s1: size of arguments excluding receiver + // s2: pointer to builtin function + // fp: frame pointer (restored after C call) + // sp: stack pointer (restored as callee's sp after C call) + // cp: current context (C callee-saved) + + ProfileEntryHookStub::MaybeCallEntryHook(masm); + + // NOTE: s0-s2 hold the arguments of this function instead of a0-a2. + // The reason for this is that these arguments would need to be saved anyway + // so it's faster to set them up directly. + // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction. + + // Compute the argv pointer in a callee-saved register. + __ Daddu(s1, sp, s1); + + // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(save_doubles_); + + // s0: number of arguments including receiver (C callee-saved) + // s1: pointer to first argument (C callee-saved) + // s2: pointer to builtin function (C callee-saved) + + // Prepare arguments for C routine. + // a0 = argc + __ mov(a0, s0); + // a1 = argv (set in the delay slot after find_ra below). + + // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We + // also need to reserve the 4 argument slots on the stack. + + __ AssertStackIsAligned(); + + __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); + + // To let the GC traverse the return address of the exit frames, we need to + // know where the return address is. The CEntryStub is unmovable, so + // we can store the address on the stack to be able to find it again and + // we never have to restore it, because it will not change. + { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); + // This branch-and-link sequence is needed to find the current PC on mips, + // saved to the ra register. + // Use masm-> here instead of the double-underscore macro since extra + // coverage code can interfere with the proper calculation of ra. + Label find_ra; + masm->bal(&find_ra); // bal exposes branch delay slot. + masm->mov(a1, s1); + masm->bind(&find_ra); + + // Adjust the value in ra to point to the correct return location, 2nd + // instruction past the real call into C code (the jalr(t9)), and push it. + // This is the return address of the exit frame. + const int kNumInstructionsToJump = 5; + masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size); + masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame. + // Stack space reservation moved to the branch delay slot below. + // Stack is still aligned. + + // Call the C routine. + masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. + masm->jalr(t9); + // Set up sp in the delay slot. + masm->daddiu(sp, sp, -kCArgsSlotsSize); + // Make sure the stored 'ra' points to this position. + DCHECK_EQ(kNumInstructionsToJump, + masm->InstructionsGeneratedSince(&find_ra)); + } + + // Runtime functions should not return 'the hole'. Allowing it to escape may + // lead to crashes in the IC code later. + if (FLAG_debug_code) { + Label okay; + __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); + __ Branch(&okay, ne, v0, Operand(a4)); + __ stop("The hole escaped"); + __ bind(&okay); + } + + // Check result for exception sentinel. + Label exception_returned; + __ LoadRoot(a4, Heap::kExceptionRootIndex); + __ Branch(&exception_returned, eq, a4, Operand(v0)); + + ExternalReference pending_exception_address( + Isolate::kPendingExceptionAddress, isolate()); + + // Check that there is no pending exception, otherwise we + // should have returned the exception sentinel. + if (FLAG_debug_code) { + Label okay; + __ li(a2, Operand(pending_exception_address)); + __ ld(a2, MemOperand(a2)); + __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); + // Cannot use check here as it attempts to generate call into runtime. + __ Branch(&okay, eq, a4, Operand(a2)); + __ stop("Unexpected pending exception"); + __ bind(&okay); + } + + // Exit C frame and return. + // v0:v1: result + // sp: stack pointer + // fp: frame pointer + // s0: still holds argc (callee-saved). + __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN); + + // Handling of exception. + __ bind(&exception_returned); + + // Retrieve the pending exception. + __ li(a2, Operand(pending_exception_address)); + __ ld(v0, MemOperand(a2)); + + // Clear the pending exception. + __ li(a3, Operand(isolate()->factory()->the_hole_value())); + __ sd(a3, MemOperand(a2)); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + Label throw_termination_exception; + __ LoadRoot(a4, Heap::kTerminationExceptionRootIndex); + __ Branch(&throw_termination_exception, eq, v0, Operand(a4)); + + // Handle normal exception. + __ Throw(v0); + + __ bind(&throw_termination_exception); + __ ThrowUncatchable(v0); +} + + +void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { + Label invoke, handler_entry, exit; + Isolate* isolate = masm->isolate(); + + // TODO(plind): unify the ABI description here. + // Registers: + // a0: entry address + // a1: function + // a2: receiver + // a3: argc + // a4 (a4): on mips64 + + // Stack: + // 0 arg slots on mips64 (4 args slots on mips) + // args -- in a4/a4 on mips64, on stack on mips + + ProfileEntryHookStub::MaybeCallEntryHook(masm); + + // Save callee saved registers on the stack. + __ MultiPush(kCalleeSaved | ra.bit()); + + // Save callee-saved FPU registers. + __ MultiPushFPU(kCalleeSavedFPU); + // Set up the reserved register for 0.0. + __ Move(kDoubleRegZero, 0.0); + + // Load argv in s0 register. + if (kMipsAbi == kN64) { + __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register. + } else { // Abi O32. + // 5th parameter on stack for O32 abi. + int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; + offset_to_argv += kNumCalleeSavedFPU * kDoubleSize; + __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize)); + } + + __ InitializeRootRegister(); + + // We build an EntryFrame. + __ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used. + int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; + __ li(a6, Operand(Smi::FromInt(marker))); + __ li(a5, Operand(Smi::FromInt(marker))); + ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate); + __ li(a4, Operand(c_entry_fp)); + __ ld(a4, MemOperand(a4)); + __ Push(a7, a6, a5, a4); + // Set up frame pointer for the frame to be pushed. + __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); + + // Registers: + // a0: entry_address + // a1: function + // a2: receiver_pointer + // a3: argc + // s0: argv + // + // Stack: + // caller fp | + // function slot | entry frame + // context slot | + // bad fp (0xff...f) | + // callee saved registers + ra + // [ O32: 4 args slots] + // args + + // If this is the outermost JS call, set js_entry_sp value. + Label non_outermost_js; + ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); + __ li(a5, Operand(ExternalReference(js_entry_sp))); + __ ld(a6, MemOperand(a5)); + __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg)); + __ sd(fp, MemOperand(a5)); + __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); + Label cont; + __ b(&cont); + __ nop(); // Branch delay slot nop. + __ bind(&non_outermost_js); + __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); + __ bind(&cont); + __ push(a4); + + // Jump to a faked try block that does the invoke, with a faked catch + // block that sets the pending exception. + __ jmp(&invoke); + __ bind(&handler_entry); + handler_offset_ = handler_entry.pos(); + // Caught exception: Store result (exception) in the pending exception + // field in the JSEnv and return a failure sentinel. Coming in here the + // fp will be invalid because the PushTryHandler below sets it to 0 to + // signal the existence of the JSEntry frame. + __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + __ sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0. + __ LoadRoot(v0, Heap::kExceptionRootIndex); + __ b(&exit); // b exposes branch delay slot. + __ nop(); // Branch delay slot nop. + + // Invoke: Link this frame into the handler chain. There's only one + // handler block in this code object, so its index is 0. + __ bind(&invoke); + __ PushTryHandler(StackHandler::JS_ENTRY, 0); + // If an exception not caught by another handler occurs, this handler + // returns control to the code after the bal(&invoke) above, which + // restores all kCalleeSaved registers (including cp and fp) to their + // saved values before returning a failure to C. + + // Clear any pending exceptions. + __ LoadRoot(a5, Heap::kTheHoleValueRootIndex); + __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + __ sd(a5, MemOperand(a4)); + + // Invoke the function by calling through JS entry trampoline builtin. + // Notice that we cannot store a reference to the trampoline code directly in + // this stub, because runtime stubs are not traversed when doing GC. + + // Registers: + // a0: entry_address + // a1: function + // a2: receiver_pointer + // a3: argc + // s0: argv + // + // Stack: + // handler frame + // entry frame + // callee saved registers + ra + // [ O32: 4 args slots] + // args + + if (is_construct) { + ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, + isolate); + __ li(a4, Operand(construct_entry)); + } else { + ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate()); + __ li(a4, Operand(entry)); + } + __ ld(t9, MemOperand(a4)); // Deref address. + // Call JSEntryTrampoline. + __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); + __ Call(t9); + + // Unlink this frame from the handler chain. + __ PopTryHandler(); + + __ bind(&exit); // v0 holds result + // Check if the current stack frame is marked as the outermost JS frame. + Label non_outermost_js_2; + __ pop(a5); + __ Branch(&non_outermost_js_2, + ne, + a5, + Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); + __ li(a5, Operand(ExternalReference(js_entry_sp))); + __ sd(zero_reg, MemOperand(a5)); + __ bind(&non_outermost_js_2); + + // Restore the top frame descriptors from the stack. + __ pop(a5); + __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress, + isolate))); + __ sd(a5, MemOperand(a4)); + + // Reset the stack to the callee saved registers. + __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); + + // Restore callee-saved fpu registers. + __ MultiPopFPU(kCalleeSavedFPU); + + // Restore callee saved registers from the stack. + __ MultiPop(kCalleeSaved | ra.bit()); + // Return. + __ Jump(ra); +} + + +// Uses registers a0 to a4. +// Expected input (depending on whether args are in registers or on the stack): +// * object: a0 or at sp + 1 * kPointerSize. +// * function: a1 or at sp. +// +// An inlined call site may have been generated before calling this stub. +// In this case the offset to the inline site to patch is passed on the stack, +// in the safepoint slot for register a4. +void InstanceofStub::Generate(MacroAssembler* masm) { + // Call site inlining and patching implies arguments in registers. + DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck()); + // ReturnTrueFalse is only implemented for inlined call sites. + DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); + + // Fixed register usage throughout the stub: + const Register object = a0; // Object (lhs). + Register map = a3; // Map of the object. + const Register function = a1; // Function (rhs). + const Register prototype = a4; // Prototype of the function. + const Register inline_site = t1; + const Register scratch = a2; + + const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize; + + Label slow, loop, is_instance, is_not_instance, not_js_object; + + if (!HasArgsInRegisters()) { + __ ld(object, MemOperand(sp, 1 * kPointerSize)); + __ ld(function, MemOperand(sp, 0)); + } + + // Check that the left hand is a JS object and load map. + __ JumpIfSmi(object, ¬_js_object); + __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); + + // If there is a call site cache don't look in the global cache, but do the + // real lookup and update the call site cache. + if (!HasCallSiteInlineCheck()) { + Label miss; + __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex); + __ Branch(&miss, ne, function, Operand(at)); + __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex); + __ Branch(&miss, ne, map, Operand(at)); + __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); + __ DropAndRet(HasArgsInRegisters() ? 0 : 2); + + __ bind(&miss); + } + + // Get the prototype of the function. + __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true); + + // Check that the function prototype is a JS object. + __ JumpIfSmi(prototype, &slow); + __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); + + // Update the global instanceof or call site inlined cache with the current + // map and function. The cached answer will be set when it is known below. + if (!HasCallSiteInlineCheck()) { + __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); + __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); + } else { + DCHECK(HasArgsInRegisters()); + // Patch the (relocated) inlined map check. + + // The offset was stored in a4 safepoint slot. + // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). + __ LoadFromSafepointRegisterSlot(scratch, a4); + __ Dsubu(inline_site, ra, scratch); + // Get the map location in scratch and patch it. + __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch. + __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset)); + } + + // Register mapping: a3 is object map and a4 is function prototype. + // Get prototype of object into a2. + __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); + + // We don't need map any more. Use it as a scratch register. + Register scratch2 = map; + map = no_reg; + + // Loop through the prototype chain looking for the function prototype. + __ LoadRoot(scratch2, Heap::kNullValueRootIndex); + __ bind(&loop); + __ Branch(&is_instance, eq, scratch, Operand(prototype)); + __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); + __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); + __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); + __ Branch(&loop); + + __ bind(&is_instance); + DCHECK(Smi::FromInt(0) == 0); + if (!HasCallSiteInlineCheck()) { + __ mov(v0, zero_reg); + __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); + } else { + // Patch the call site to return true. + __ LoadRoot(v0, Heap::kTrueValueRootIndex); + __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // Get the boolean result location in scratch and patch it. + __ PatchRelocatedValue(inline_site, scratch, v0); + + if (!ReturnTrueFalseObject()) { + DCHECK_EQ(Smi::FromInt(0), 0); + __ mov(v0, zero_reg); + } + } + __ DropAndRet(HasArgsInRegisters() ? 0 : 2); + + __ bind(&is_not_instance); + if (!HasCallSiteInlineCheck()) { + __ li(v0, Operand(Smi::FromInt(1))); + __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); + } else { + // Patch the call site to return false. + __ LoadRoot(v0, Heap::kFalseValueRootIndex); + __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // Get the boolean result location in scratch and patch it. + __ PatchRelocatedValue(inline_site, scratch, v0); + + if (!ReturnTrueFalseObject()) { + __ li(v0, Operand(Smi::FromInt(1))); + } + } + + __ DropAndRet(HasArgsInRegisters() ? 0 : 2); + + Label object_not_null, object_not_null_or_smi; + __ bind(¬_js_object); + // Before null, smi and string value checks, check that the rhs is a function + // as for a non-function rhs an exception needs to be thrown. + __ JumpIfSmi(function, &slow); + __ GetObjectType(function, scratch2, scratch); + __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE)); + + // Null is not instance of anything. + __ Branch(&object_not_null, + ne, + scratch, + Operand(isolate()->factory()->null_value())); + __ li(v0, Operand(Smi::FromInt(1))); + __ DropAndRet(HasArgsInRegisters() ? 0 : 2); + + __ bind(&object_not_null); + // Smi values are not instances of anything. + __ JumpIfNotSmi(object, &object_not_null_or_smi); + __ li(v0, Operand(Smi::FromInt(1))); + __ DropAndRet(HasArgsInRegisters() ? 0 : 2); + + __ bind(&object_not_null_or_smi); + // String values are not instances of anything. + __ IsObjectJSStringType(object, scratch, &slow); + __ li(v0, Operand(Smi::FromInt(1))); + __ DropAndRet(HasArgsInRegisters() ? 0 : 2); + + // Slow-case. Tail call builtin. + __ bind(&slow); + if (!ReturnTrueFalseObject()) { + if (HasArgsInRegisters()) { + __ Push(a0, a1); + } + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); + } else { + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a0, a1); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + } + __ mov(a0, v0); + __ LoadRoot(v0, Heap::kTrueValueRootIndex); + __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg)); + __ LoadRoot(v0, Heap::kFalseValueRootIndex); + __ DropAndRet(HasArgsInRegisters() ? 0 : 2); + } +} + + +void FunctionPrototypeStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver = LoadIC::ReceiverRegister(); + NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, + a4, &miss); + __ bind(&miss); + PropertyAccessCompiler::TailCallBuiltin( + masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); +} + + +Register InstanceofStub::left() { return a0; } + + +Register InstanceofStub::right() { return a1; } + + +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + // The displacement is the offset of the last parameter (if any) + // relative to the frame pointer. + const int kDisplacement = + StandardFrameConstants::kCallerSPOffset - kPointerSize; + + // Check that the key is a smiGenerateReadElement. + Label slow; + __ JumpIfNotSmi(a1, &slow); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); + __ Branch(&adaptor, + eq, + a3, + Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + + // Check index (a1) against formal parameters count limit passed in + // through register a0. Use unsigned comparison to get negative + // check for free. + __ Branch(&slow, hs, a1, Operand(a0)); + + // Read the argument from the stack and return it. + __ dsubu(a3, a0, a1); + __ SmiScale(a7, a3, kPointerSizeLog2); + __ Daddu(a3, fp, Operand(a7)); + __ Ret(USE_DELAY_SLOT); + __ ld(v0, MemOperand(a3, kDisplacement)); + + // Arguments adaptor case: Check index (a1) against actual arguments + // limit found in the arguments adaptor frame. Use unsigned + // comparison to get negative check for free. + __ bind(&adaptor); + __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ Branch(&slow, Ugreater_equal, a1, Operand(a0)); + + // Read the argument from the adaptor frame and return it. + __ dsubu(a3, a0, a1); + __ SmiScale(a7, a3, kPointerSizeLog2); + __ Daddu(a3, a2, Operand(a7)); + __ Ret(USE_DELAY_SLOT); + __ ld(v0, MemOperand(a3, kDisplacement)); + + // Slow-case: Handle non-smi or out-of-bounds access to arguments + // by calling the runtime system. + __ bind(&slow); + __ push(a1); + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); +} + + +void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { + // sp[0] : number of parameters + // sp[4] : receiver displacement + // sp[8] : function + // Check if the calling frame is an arguments adaptor frame. + Label runtime; + __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset)); + __ Branch(&runtime, + ne, + a2, + Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + + // Patch the arguments.length and the parameters pointer in the current frame. + __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ sd(a2, MemOperand(sp, 0 * kPointerSize)); + __ SmiScale(a7, a2, kPointerSizeLog2); + __ Daddu(a3, a3, Operand(a7)); + __ daddiu(a3, a3, StandardFrameConstants::kCallerSPOffset); + __ sd(a3, MemOperand(sp, 1 * kPointerSize)); + + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); +} + + +void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { + // Stack layout: + // sp[0] : number of parameters (tagged) + // sp[4] : address of receiver argument + // sp[8] : function + // Registers used over whole function: + // a6 : allocated object (tagged) + // t1 : mapped parameter count (tagged) + + __ ld(a1, MemOperand(sp, 0 * kPointerSize)); + // a1 = parameter count (tagged) + + // Check if the calling frame is an arguments adaptor frame. + Label runtime; + Label adaptor_frame, try_allocate; + __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset)); + __ Branch(&adaptor_frame, + eq, + a2, + Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + + // No adaptor, parameter count = argument count. + __ mov(a2, a1); + __ Branch(&try_allocate); + + // We have an adaptor frame. Patch the parameters pointer. + __ bind(&adaptor_frame); + __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiScale(t2, a2, kPointerSizeLog2); + __ Daddu(a3, a3, Operand(t2)); + __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); + __ sd(a3, MemOperand(sp, 1 * kPointerSize)); + + // a1 = parameter count (tagged) + // a2 = argument count (tagged) + // Compute the mapped parameter count = min(a1, a2) in a1. + Label skip_min; + __ Branch(&skip_min, lt, a1, Operand(a2)); + __ mov(a1, a2); + __ bind(&skip_min); + + __ bind(&try_allocate); + + // Compute the sizes of backing store, parameter map, and arguments object. + // 1. Parameter map, has 2 extra words containing context and backing store. + const int kParameterMapHeaderSize = + FixedArray::kHeaderSize + 2 * kPointerSize; + // If there are no mapped parameters, we do not need the parameter_map. + Label param_map_size; + DCHECK_EQ(0, Smi::FromInt(0)); + __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg)); + __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0. + __ SmiScale(t1, a1, kPointerSizeLog2); + __ daddiu(t1, t1, kParameterMapHeaderSize); + __ bind(¶m_map_size); + + // 2. Backing store. + __ SmiScale(t2, a2, kPointerSizeLog2); + __ Daddu(t1, t1, Operand(t2)); + __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize)); + + // 3. Arguments object. + __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize)); + + // Do the allocation of all three objects in one go. + __ Allocate(t1, v0, a3, a4, &runtime, TAG_OBJECT); + + // v0 = address of new object(s) (tagged) + // a2 = argument count (smi-tagged) + // Get the arguments boilerplate from the current native context into a4. + const int kNormalOffset = + Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); + const int kAliasedOffset = + Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX); + + __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset)); + Label skip2_ne, skip2_eq; + __ Branch(&skip2_ne, ne, a1, Operand(zero_reg)); + __ ld(a4, MemOperand(a4, kNormalOffset)); + __ bind(&skip2_ne); + + __ Branch(&skip2_eq, eq, a1, Operand(zero_reg)); + __ ld(a4, MemOperand(a4, kAliasedOffset)); + __ bind(&skip2_eq); + + // v0 = address of new object (tagged) + // a1 = mapped parameter count (tagged) + // a2 = argument count (smi-tagged) + // a4 = address of arguments map (tagged) + __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset)); + __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); + __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); + __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); + + // Set up the callee in-object property. + STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); + __ ld(a3, MemOperand(sp, 2 * kPointerSize)); + __ AssertNotSmi(a3); + const int kCalleeOffset = JSObject::kHeaderSize + + Heap::kArgumentsCalleeIndex * kPointerSize; + __ sd(a3, FieldMemOperand(v0, kCalleeOffset)); + + // Use the length (smi tagged) and set that as an in-object property too. + STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); + const int kLengthOffset = JSObject::kHeaderSize + + Heap::kArgumentsLengthIndex * kPointerSize; + __ sd(a2, FieldMemOperand(v0, kLengthOffset)); + + // Set up the elements pointer in the allocated arguments object. + // If we allocated a parameter map, a4 will point there, otherwise + // it will point to the backing store. + __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize)); + __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset)); + + // v0 = address of new object (tagged) + // a1 = mapped parameter count (tagged) + // a2 = argument count (tagged) + // a4 = address of parameter map or backing store (tagged) + // Initialize parameter map. If there are no mapped arguments, we're done. + Label skip_parameter_map; + Label skip3; + __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0))); + // Move backing store address to a3, because it is + // expected there when filling in the unmapped arguments. + __ mov(a3, a4); + __ bind(&skip3); + + __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0))); + + __ LoadRoot(a6, Heap::kSloppyArgumentsElementsMapRootIndex); + __ sd(a6, FieldMemOperand(a4, FixedArray::kMapOffset)); + __ Daddu(a6, a1, Operand(Smi::FromInt(2))); + __ sd(a6, FieldMemOperand(a4, FixedArray::kLengthOffset)); + __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize)); + __ SmiScale(t2, a1, kPointerSizeLog2); + __ Daddu(a6, a4, Operand(t2)); + __ Daddu(a6, a6, Operand(kParameterMapHeaderSize)); + __ sd(a6, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize)); + + // Copy the parameter slots and the holes in the arguments. + // We need to fill in mapped_parameter_count slots. They index the context, + // where parameters are stored in reverse order, at + // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 + // The mapped parameter thus need to get indices + // MIN_CONTEXT_SLOTS+parameter_count-1 .. + // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count + // We loop from right to left. + Label parameters_loop, parameters_test; + __ mov(a6, a1); + __ ld(t1, MemOperand(sp, 0 * kPointerSize)); + __ Daddu(t1, t1, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); + __ Dsubu(t1, t1, Operand(a1)); + __ LoadRoot(a7, Heap::kTheHoleValueRootIndex); + __ SmiScale(t2, a6, kPointerSizeLog2); + __ Daddu(a3, a4, Operand(t2)); + __ Daddu(a3, a3, Operand(kParameterMapHeaderSize)); + + // a6 = loop variable (tagged) + // a1 = mapping index (tagged) + // a3 = address of backing store (tagged) + // a4 = address of parameter map (tagged) + // a5 = temporary scratch (a.o., for address calculation) + // a7 = the hole value + __ jmp(¶meters_test); + + __ bind(¶meters_loop); + + __ Dsubu(a6, a6, Operand(Smi::FromInt(1))); + __ SmiScale(a5, a6, kPointerSizeLog2); + __ Daddu(a5, a5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); + __ Daddu(t2, a4, a5); + __ sd(t1, MemOperand(t2)); + __ Dsubu(a5, a5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); + __ Daddu(t2, a3, a5); + __ sd(a7, MemOperand(t2)); + __ Daddu(t1, t1, Operand(Smi::FromInt(1))); + __ bind(¶meters_test); + __ Branch(¶meters_loop, ne, a6, Operand(Smi::FromInt(0))); + + __ bind(&skip_parameter_map); + // a2 = argument count (tagged) + // a3 = address of backing store (tagged) + // a5 = scratch + // Copy arguments header and remaining slots (if there are any). + __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex); + __ sd(a5, FieldMemOperand(a3, FixedArray::kMapOffset)); + __ sd(a2, FieldMemOperand(a3, FixedArray::kLengthOffset)); + + Label arguments_loop, arguments_test; + __ mov(t1, a1); + __ ld(a4, MemOperand(sp, 1 * kPointerSize)); + __ SmiScale(t2, t1, kPointerSizeLog2); + __ Dsubu(a4, a4, Operand(t2)); + __ jmp(&arguments_test); + + __ bind(&arguments_loop); + __ Dsubu(a4, a4, Operand(kPointerSize)); + __ ld(a6, MemOperand(a4, 0)); + __ SmiScale(t2, t1, kPointerSizeLog2); + __ Daddu(a5, a3, Operand(t2)); + __ sd(a6, FieldMemOperand(a5, FixedArray::kHeaderSize)); + __ Daddu(t1, t1, Operand(Smi::FromInt(1))); + + __ bind(&arguments_test); + __ Branch(&arguments_loop, lt, t1, Operand(a2)); + + // Return and remove the on-stack parameters. + __ DropAndRet(3); + + // Do the runtime call to allocate the arguments object. + // a2 = argument count (tagged) + __ bind(&runtime); + __ sd(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); +} + + +void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { + // sp[0] : number of parameters + // sp[4] : receiver displacement + // sp[8] : function + // Check if the calling frame is an arguments adaptor frame. + Label adaptor_frame, try_allocate, runtime; + __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); + __ Branch(&adaptor_frame, + eq, + a3, + Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + + // Get the length from the frame. + __ ld(a1, MemOperand(sp, 0)); + __ Branch(&try_allocate); + + // Patch the arguments.length and the parameters pointer. + __ bind(&adaptor_frame); + __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ sd(a1, MemOperand(sp, 0)); + __ SmiScale(at, a1, kPointerSizeLog2); + + __ Daddu(a3, a2, Operand(at)); + + __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); + __ sd(a3, MemOperand(sp, 1 * kPointerSize)); + + // Try the new space allocation. Start out with computing the size + // of the arguments object and the elements array in words. + Label add_arguments_object; + __ bind(&try_allocate); + __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg)); + __ SmiUntag(a1); + + __ Daddu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ bind(&add_arguments_object); + __ Daddu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize)); + + // Do the allocation of both objects in one go. + __ Allocate(a1, v0, a2, a3, &runtime, + static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); + + // Get the arguments boilerplate from the current native context. + __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset)); + __ ld(a4, MemOperand(a4, Context::SlotOffset( + Context::STRICT_ARGUMENTS_MAP_INDEX))); + + __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset)); + __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); + __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); + __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); + + // Get the length (smi tagged) and set that as an in-object property too. + STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); + __ ld(a1, MemOperand(sp, 0 * kPointerSize)); + __ AssertSmi(a1); + __ sd(a1, FieldMemOperand(v0, JSObject::kHeaderSize + + Heap::kArgumentsLengthIndex * kPointerSize)); + + Label done; + __ Branch(&done, eq, a1, Operand(zero_reg)); + + // Get the parameters pointer from the stack. + __ ld(a2, MemOperand(sp, 1 * kPointerSize)); + + // Set up the elements pointer in the allocated arguments object and + // initialize the header in the elements fixed array. + __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize)); + __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset)); + __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex); + __ sd(a3, FieldMemOperand(a4, FixedArray::kMapOffset)); + __ sd(a1, FieldMemOperand(a4, FixedArray::kLengthOffset)); + // Untag the length for the loop. + __ SmiUntag(a1); + + + // Copy the fixed array slots. + Label loop; + // Set up a4 to point to the first array slot. + __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ bind(&loop); + // Pre-decrement a2 with kPointerSize on each iteration. + // Pre-decrement in order to skip receiver. + __ Daddu(a2, a2, Operand(-kPointerSize)); + __ ld(a3, MemOperand(a2)); + // Post-increment a4 with kPointerSize on each iteration. + __ sd(a3, MemOperand(a4)); + __ Daddu(a4, a4, Operand(kPointerSize)); + __ Dsubu(a1, a1, Operand(1)); + __ Branch(&loop, ne, a1, Operand(zero_reg)); + + // Return and remove the on-stack parameters. + __ bind(&done); + __ DropAndRet(3); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); +} + + +void RegExpExecStub::Generate(MacroAssembler* masm) { + // Just jump directly to runtime if native RegExp is not selected at compile + // time or if regexp entry in generated code is turned off runtime switch or + // at compilation. +#ifdef V8_INTERPRETED_REGEXP + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); +#else // V8_INTERPRETED_REGEXP + + // Stack frame on entry. + // sp[0]: last_match_info (expected JSArray) + // sp[4]: previous index + // sp[8]: subject string + // sp[12]: JSRegExp object + + const int kLastMatchInfoOffset = 0 * kPointerSize; + const int kPreviousIndexOffset = 1 * kPointerSize; + const int kSubjectOffset = 2 * kPointerSize; + const int kJSRegExpOffset = 3 * kPointerSize; + + Label runtime; + // Allocation of registers for this function. These are in callee save + // registers and will be preserved by the call to the native RegExp code, as + // this code is called using the normal C calling convention. When calling + // directly from generated code the native RegExp code will not do a GC and + // therefore the content of these registers are safe to use after the call. + // MIPS - using s0..s2, since we are not using CEntry Stub. + Register subject = s0; + Register regexp_data = s1; + Register last_match_info_elements = s2; + + // Ensure that a RegExp stack is allocated. + ExternalReference address_of_regexp_stack_memory_address = + ExternalReference::address_of_regexp_stack_memory_address( + isolate()); + ExternalReference address_of_regexp_stack_memory_size = + ExternalReference::address_of_regexp_stack_memory_size(isolate()); + __ li(a0, Operand(address_of_regexp_stack_memory_size)); + __ ld(a0, MemOperand(a0, 0)); + __ Branch(&runtime, eq, a0, Operand(zero_reg)); + + // Check that the first argument is a JSRegExp object. + __ ld(a0, MemOperand(sp, kJSRegExpOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfSmi(a0, &runtime); + __ GetObjectType(a0, a1, a1); + __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE)); + + // Check that the RegExp has been compiled (data contains a fixed array). + __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset)); + if (FLAG_debug_code) { + __ SmiTst(regexp_data, a4); + __ Check(nz, + kUnexpectedTypeForRegExpDataFixedArrayExpected, + a4, + Operand(zero_reg)); + __ GetObjectType(regexp_data, a0, a0); + __ Check(eq, + kUnexpectedTypeForRegExpDataFixedArrayExpected, + a0, + Operand(FIXED_ARRAY_TYPE)); + } + + // regexp_data: RegExp data (FixedArray) + // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. + __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); + __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); + + // regexp_data: RegExp data (FixedArray) + // Check that the number of captures fit in the static offsets vector buffer. + __ ld(a2, + FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); + // Check (number_of_captures + 1) * 2 <= offsets vector size + // Or number_of_captures * 2 <= offsets vector size - 2 + // Or number_of_captures <= offsets vector size / 2 - 1 + // Multiplying by 2 comes for free since a2 is smi-tagged. + STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); + int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1; + __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp))); + + // Reset offset for possibly sliced string. + __ mov(t0, zero_reg); + __ ld(subject, MemOperand(sp, kSubjectOffset)); + __ JumpIfSmi(subject, &runtime); + __ mov(a3, subject); // Make a copy of the original subject string. + __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); + // subject: subject string + // a3: subject string + // a0: subject string instance type + // regexp_data: RegExp data (FixedArray) + // Handle subject string according to its encoding and representation: + // (1) Sequential string? If yes, go to (5). + // (2) Anything but sequential or cons? If yes, go to (6). + // (3) Cons string. If the string is flat, replace subject with first string. + // Otherwise bailout. + // (4) Is subject external? If yes, go to (7). + // (5) Sequential string. Load regexp code according to encoding. + // (E) Carry on. + /// [...] + + // Deferred code at the end of the stub: + // (6) Not a long external string? If yes, go to (8). + // (7) External string. Make it, offset-wise, look like a sequential string. + // Go to (5). + // (8) Short external string or not a string? If yes, bail out to runtime. + // (9) Sliced string. Replace subject with parent. Go to (4). + + Label check_underlying; // (4) + Label seq_string; // (5) + Label not_seq_nor_cons; // (6) + Label external_string; // (7) + Label not_long_external; // (8) + + // (1) Sequential string? If yes, go to (5). + __ And(a1, + a0, + Operand(kIsNotStringMask | + kStringRepresentationMask | + kShortExternalStringMask)); + STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); + __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5). + + // (2) Anything but sequential or cons? If yes, go to (6). + STATIC_ASSERT(kConsStringTag < kExternalStringTag); + STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); + STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); + STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); + // Go to (6). + __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag)); + + // (3) Cons string. Check that it's flat. + // Replace subject with first string and reload instance type. + __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset)); + __ LoadRoot(a1, Heap::kempty_stringRootIndex); + __ Branch(&runtime, ne, a0, Operand(a1)); + __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); + + // (4) Is subject external? If yes, go to (7). + __ bind(&check_underlying); + __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kSeqStringTag == 0); + __ And(at, a0, Operand(kStringRepresentationMask)); + // The underlying external string is never a short external string. + STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); + __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7). + + // (5) Sequential string. Load regexp code according to encoding. + __ bind(&seq_string); + // subject: sequential subject string (or look-alike, external string) + // a3: original subject string + // Load previous index and check range before a3 is overwritten. We have to + // use a3 instead of subject here because subject might have been only made + // to look like a sequential string when it actually is an external string. + __ ld(a1, MemOperand(sp, kPreviousIndexOffset)); + __ JumpIfNotSmi(a1, &runtime); + __ ld(a3, FieldMemOperand(a3, String::kLengthOffset)); + __ Branch(&runtime, ls, a3, Operand(a1)); + __ SmiUntag(a1); + + STATIC_ASSERT(kStringEncodingMask == 4); + STATIC_ASSERT(kOneByteStringTag == 4); + STATIC_ASSERT(kTwoByteStringTag == 0); + __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII. + __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); + __ dsra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below). + __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); + __ Movz(t9, a5, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset. + + // (E) Carry on. String handling is done. + // t9: irregexp code + // Check that the irregexp code has been generated for the actual string + // encoding. If it has, the field contains a code object otherwise it contains + // a smi (code flushing support). + __ JumpIfSmi(t9, &runtime); + + // a1: previous index + // a3: encoding of subject string (1 if ASCII, 0 if two_byte); + // t9: code + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // All checks done. Now push arguments for native regexp code. + __ IncrementCounter(isolate()->counters()->regexp_entry_native(), + 1, a0, a2); + + // Isolates: note we add an additional parameter here (isolate pointer). + const int kRegExpExecuteArguments = 9; + const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4; + __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); + + // Stack pointer now points to cell where return address is to be written. + // Arguments are before that on the stack or in registers, meaning we + // treat the return address as argument 5. Thus every argument after that + // needs to be shifted back by 1. Since DirectCEntryStub will handle + // allocating space for the c argument slots, we don't need to calculate + // that into the argument positions on the stack. This is how the stack will + // look (sp meaning the value of sp at this moment): + // Abi n64: + // [sp + 1] - Argument 9 + // [sp + 0] - saved ra + // Abi O32: + // [sp + 5] - Argument 9 + // [sp + 4] - Argument 8 + // [sp + 3] - Argument 7 + // [sp + 2] - Argument 6 + // [sp + 1] - Argument 5 + // [sp + 0] - saved ra + + if (kMipsAbi == kN64) { + // Argument 9: Pass current isolate address. + __ li(a0, Operand(ExternalReference::isolate_address(isolate()))); + __ sd(a0, MemOperand(sp, 1 * kPointerSize)); + + // Argument 8: Indicate that this is a direct call from JavaScript. + __ li(a7, Operand(1)); + + // Argument 7: Start (high end) of backtracking stack memory area. + __ li(a0, Operand(address_of_regexp_stack_memory_address)); + __ ld(a0, MemOperand(a0, 0)); + __ li(a2, Operand(address_of_regexp_stack_memory_size)); + __ ld(a2, MemOperand(a2, 0)); + __ daddu(a6, a0, a2); + + // Argument 6: Set the number of capture registers to zero to force global + // regexps to behave as non-global. This does not affect non-global regexps. + __ mov(a5, zero_reg); + + // Argument 5: static offsets vector buffer. + __ li(a4, Operand( + ExternalReference::address_of_static_offsets_vector(isolate()))); + } else { // O32. + DCHECK(kMipsAbi == kO32); + + // Argument 9: Pass current isolate address. + // CFunctionArgumentOperand handles MIPS stack argument slots. + __ li(a0, Operand(ExternalReference::isolate_address(isolate()))); + __ sd(a0, MemOperand(sp, 5 * kPointerSize)); + + // Argument 8: Indicate that this is a direct call from JavaScript. + __ li(a0, Operand(1)); + __ sd(a0, MemOperand(sp, 4 * kPointerSize)); + + // Argument 7: Start (high end) of backtracking stack memory area. + __ li(a0, Operand(address_of_regexp_stack_memory_address)); + __ ld(a0, MemOperand(a0, 0)); + __ li(a2, Operand(address_of_regexp_stack_memory_size)); + __ ld(a2, MemOperand(a2, 0)); + __ daddu(a0, a0, a2); + __ sd(a0, MemOperand(sp, 3 * kPointerSize)); + + // Argument 6: Set the number of capture registers to zero to force global + // regexps to behave as non-global. This does not affect non-global regexps. + __ mov(a0, zero_reg); + __ sd(a0, MemOperand(sp, 2 * kPointerSize)); + + // Argument 5: static offsets vector buffer. + __ li(a0, Operand( + ExternalReference::address_of_static_offsets_vector(isolate()))); + __ sd(a0, MemOperand(sp, 1 * kPointerSize)); + } + + // For arguments 4 and 3 get string length, calculate start of string data + // and calculate the shift of the index (0 for ASCII and 1 for two byte). + __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); + __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte. + // Load the length from the original subject string from the previous stack + // frame. Therefore we have to use fp, which points exactly to two pointer + // sizes below the previous sp. (Because creating a new stack frame pushes + // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) + __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); + // If slice offset is not 0, load the length from the original sliced string. + // Argument 4, a3: End of string data + // Argument 3, a2: Start of string data + // Prepare start and end index of the input. + __ dsllv(t1, t0, a3); + __ daddu(t0, t2, t1); + __ dsllv(t1, a1, a3); + __ daddu(a2, t0, t1); + + __ ld(t2, FieldMemOperand(subject, String::kLengthOffset)); + + __ SmiUntag(t2); + __ dsllv(t1, t2, a3); + __ daddu(a3, t0, t1); + // Argument 2 (a1): Previous index. + // Already there + + // Argument 1 (a0): Subject string. + __ mov(a0, subject); + + // Locate the code entry and call it. + __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag)); + DirectCEntryStub stub(isolate()); + stub.GenerateCall(masm, t9); + + __ LeaveExitFrame(false, no_reg, true); + + // v0: result + // subject: subject string (callee saved) + // regexp_data: RegExp data (callee saved) + // last_match_info_elements: Last match info elements (callee saved) + // Check the result. + Label success; + __ Branch(&success, eq, v0, Operand(1)); + // We expect exactly one result since we force the called regexp to behave + // as non-global. + Label failure; + __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE)); + // If not exception it can only be retry. Handle that in the runtime system. + __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); + // Result must now be exception. If there is no pending exception already a + // stack overflow (on the backtrack stack) was detected in RegExp code but + // haven't created the exception yet. Handle that in the runtime system. + // TODO(592): Rerunning the RegExp to get the stack overflow exception. + __ li(a1, Operand(isolate()->factory()->the_hole_value())); + __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate()))); + __ ld(v0, MemOperand(a2, 0)); + __ Branch(&runtime, eq, v0, Operand(a1)); + + __ sd(a1, MemOperand(a2, 0)); // Clear pending exception. + + // Check if the exception is a termination. If so, throw as uncatchable. + __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex); + Label termination_exception; + __ Branch(&termination_exception, eq, v0, Operand(a0)); + + __ Throw(v0); + + __ bind(&termination_exception); + __ ThrowUncatchable(v0); + + __ bind(&failure); + // For failure and exception return null. + __ li(v0, Operand(isolate()->factory()->null_value())); + __ DropAndRet(4); + + // Process the result from the native regexp code. + __ bind(&success); + + __ lw(a1, UntagSmiFieldMemOperand( + regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. + __ Daddu(a1, a1, Operand(1)); + __ dsll(a1, a1, 1); // Multiply by 2. + + __ ld(a0, MemOperand(sp, kLastMatchInfoOffset)); + __ JumpIfSmi(a0, &runtime); + __ GetObjectType(a0, a2, a2); + __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE)); + // Check that the JSArray is in fast case. + __ ld(last_match_info_elements, + FieldMemOperand(a0, JSArray::kElementsOffset)); + __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); + __ Branch(&runtime, ne, a0, Operand(at)); + // Check that the last match info has space for the capture registers and the + // additional information. + __ ld(a0, + FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); + __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead)); + + __ SmiUntag(at, a0); + __ Branch(&runtime, gt, a2, Operand(at)); + + // a1: number of capture registers + // subject: subject string + // Store the capture count. + __ SmiTag(a2, a1); // To smi. + __ sd(a2, FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastCaptureCountOffset)); + // Store last subject and last input. + __ sd(subject, + FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastSubjectOffset)); + __ mov(a2, subject); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastSubjectOffset, + subject, + a7, + kRAHasNotBeenSaved, + kDontSaveFPRegs); + __ mov(subject, a2); + __ sd(subject, + FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastInputOffset)); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastInputOffset, + subject, + a7, + kRAHasNotBeenSaved, + kDontSaveFPRegs); + + // Get the static offsets vector filled by the native regexp code. + ExternalReference address_of_static_offsets_vector = + ExternalReference::address_of_static_offsets_vector(isolate()); + __ li(a2, Operand(address_of_static_offsets_vector)); + + // a1: number of capture registers + // a2: offsets vector + Label next_capture, done; + // Capture register counter starts from number of capture registers and + // counts down until wrapping after zero. + __ Daddu(a0, + last_match_info_elements, + Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); + __ bind(&next_capture); + __ Dsubu(a1, a1, Operand(1)); + __ Branch(&done, lt, a1, Operand(zero_reg)); + // Read the value from the static offsets vector buffer. + __ lw(a3, MemOperand(a2, 0)); + __ daddiu(a2, a2, kIntSize); + // Store the smi value in the last match info. + __ SmiTag(a3); + __ sd(a3, MemOperand(a0, 0)); + __ Branch(&next_capture, USE_DELAY_SLOT); + __ daddiu(a0, a0, kPointerSize); // In branch delay slot. + + __ bind(&done); + + // Return last match info. + __ ld(v0, MemOperand(sp, kLastMatchInfoOffset)); + __ DropAndRet(4); + + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); + + // Deferred code for string handling. + // (6) Not a long external string? If yes, go to (8). + __ bind(¬_seq_nor_cons); + // Go to (8). + __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag)); + + // (7) External string. Make it, offset-wise, look like a sequential string. + __ bind(&external_string); + __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); + if (FLAG_debug_code) { + // Assert that we do not have a cons or slice (indirect strings) here. + // Sequential strings have already been ruled out. + __ And(at, a0, Operand(kIsIndirectStringMask)); + __ Assert(eq, + kExternalStringExpectedButNotFound, + at, + Operand(zero_reg)); + } + __ ld(subject, + FieldMemOperand(subject, ExternalString::kResourceDataOffset)); + // Move the pointer so that offset-wise, it looks like a sequential string. + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + __ Dsubu(subject, + subject, + SeqTwoByteString::kHeaderSize - kHeapObjectTag); + __ jmp(&seq_string); // Go to (5). + + // (8) Short external string or not a string? If yes, bail out to runtime. + __ bind(¬_long_external); + STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); + __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask)); + __ Branch(&runtime, ne, at, Operand(zero_reg)); + + // (9) Sliced string. Replace subject with parent. Go to (4). + // Load offset into t0 and replace subject string with parent. + __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset)); + __ SmiUntag(t0); + __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); + __ jmp(&check_underlying); // Go to (4). +#endif // V8_INTERPRETED_REGEXP +} + + +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a feedback vector slot. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // a0 : number of arguments to the construct function + // a1 : the function to call + // a2 : Feedback vector + // a3 : slot in feedback vector (Smi) + Label initialize, done, miss, megamorphic, not_array_function; + + DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), + masm->isolate()->heap()->megamorphic_symbol()); + DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), + masm->isolate()->heap()->uninitialized_symbol()); + + // Load the cache state into a4. + __ dsrl(a4, a3, 32 - kPointerSizeLog2); + __ Daddu(a4, a2, Operand(a4)); + __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ Branch(&done, eq, a4, Operand(a1)); + + if (!FLAG_pretenuring_call_new) { + // If we came here, we need to see if we are the array function. + // If we didn't have a matching function, and we didn't find the megamorph + // sentinel, then we have in the slot either some other function or an + // AllocationSite. Do a map check on the object in a3. + __ ld(a5, FieldMemOperand(a4, 0)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&miss, ne, a5, Operand(at)); + + // Make sure the function is the Array() function + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4); + __ Branch(&megamorphic, ne, a1, Operand(a4)); + __ jmp(&done); + } + + __ bind(&miss); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex); + __ Branch(&initialize, eq, a4, Operand(at)); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ bind(&megamorphic); + __ dsrl(a4, a3, 32- kPointerSizeLog2); + __ Daddu(a4, a2, Operand(a4)); + __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); + __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize)); + __ jmp(&done); + + // An uninitialized cache is patched with the function. + __ bind(&initialize); + if (!FLAG_pretenuring_call_new) { + // Make sure the function is the Array() function. + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4); + __ Branch(¬_array_function, ne, a1, Operand(a4)); + + // The target function is the Array constructor, + // Create an AllocationSite if we don't already have it, store it in the + // slot. + { + FrameScope scope(masm, StackFrame::INTERNAL); + const RegList kSavedRegs = + 1 << 4 | // a0 + 1 << 5 | // a1 + 1 << 6 | // a2 + 1 << 7; // a3 + + // Arguments register must be smi-tagged to call out. + __ SmiTag(a0); + __ MultiPush(kSavedRegs); + + CreateAllocationSiteStub create_stub(masm->isolate()); + __ CallStub(&create_stub); + + __ MultiPop(kSavedRegs); + __ SmiUntag(a0); + } + __ Branch(&done); + + __ bind(¬_array_function); + } + + __ dsrl(a4, a3, 32 - kPointerSizeLog2); + __ Daddu(a4, a2, Operand(a4)); + __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ sd(a1, MemOperand(a4, 0)); + + __ Push(a4, a2, a1); + __ RecordWrite(a2, a4, a1, kRAHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ Pop(a4, a2, a1); + + __ bind(&done); +} + + +static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { + __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + + // Do not transform the receiver for strict mode functions. + int32_t strict_mode_function_mask = + 1 << SharedFunctionInfo::kStrictModeBitWithinByte ; + // Do not transform the receiver for native (Compilerhints already in a3). + int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte; + + __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kStrictModeByteOffset)); + __ And(at, a4, Operand(strict_mode_function_mask)); + __ Branch(cont, ne, at, Operand(zero_reg)); + __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kNativeByteOffset)); + __ And(at, a4, Operand(native_mask)); + __ Branch(cont, ne, at, Operand(zero_reg)); +} + + +static void EmitSlowCase(MacroAssembler* masm, + int argc, + Label* non_function) { + // Check for function proxy. + __ Branch(non_function, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE)); + __ push(a1); // put proxy as additional argument + __ li(a0, Operand(argc + 1, RelocInfo::NONE32)); + __ mov(a2, zero_reg); + __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); + { + Handle<Code> adaptor = + masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + __ Jump(adaptor, RelocInfo::CODE_TARGET); + } + + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ bind(non_function); + __ sd(a1, MemOperand(sp, argc * kPointerSize)); + __ li(a0, Operand(argc)); // Set up the number of arguments. + __ mov(a2, zero_reg); + __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); +} + + +static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { + // Wrap the receiver and patch it back onto the stack. + { FrameScope frame_scope(masm, StackFrame::INTERNAL); + __ Push(a1, a3); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ pop(a1); + } + __ Branch(USE_DELAY_SLOT, cont); + __ sd(v0, MemOperand(sp, argc * kPointerSize)); +} + + +static void CallFunctionNoFeedback(MacroAssembler* masm, + int argc, bool needs_checks, + bool call_as_method) { + // a1 : the function to call + Label slow, non_function, wrap, cont; + + if (needs_checks) { + // Check that the function is really a JavaScript function. + // a1: pushed function (to be verified) + __ JumpIfSmi(a1, &non_function); + + // Goto slow case if we do not have a function. + __ GetObjectType(a1, a4, a4); + __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE)); + } + + // Fast-case: Invoke the function now. + // a1: pushed function + ParameterCount actual(argc); + + if (call_as_method) { + if (needs_checks) { + EmitContinueIfStrictOrNative(masm, &cont); + } + + // Compute the receiver in sloppy mode. + __ ld(a3, MemOperand(sp, argc * kPointerSize)); + + if (needs_checks) { + __ JumpIfSmi(a3, &wrap); + __ GetObjectType(a3, a4, a4); + __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE)); + } else { + __ jmp(&wrap); + } + + __ bind(&cont); + } + __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper()); + + if (needs_checks) { + // Slow-case: Non-function called. + __ bind(&slow); + EmitSlowCase(masm, argc, &non_function); + } + + if (call_as_method) { + __ bind(&wrap); + // Wrap the receiver and patch it back onto the stack. + EmitWrapCase(masm, argc, &cont); + } +} + + +void CallFunctionStub::Generate(MacroAssembler* masm) { + CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod()); +} + + +void CallConstructStub::Generate(MacroAssembler* masm) { + // a0 : number of arguments + // a1 : the function to call + // a2 : feedback vector + // a3 : (only if a2 is not undefined) slot in feedback vector (Smi) + Label slow, non_function_call; + // Check that the function is not a smi. + __ JumpIfSmi(a1, &non_function_call); + // Check that the function is a JSFunction. + __ GetObjectType(a1, a4, a4); + __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE)); + + if (RecordCallTarget()) { + GenerateRecordCallTarget(masm); + + __ dsrl(at, a3, 32 - kPointerSizeLog2); + __ Daddu(a5, a2, at); + if (FLAG_pretenuring_call_new) { + // Put the AllocationSite from the feedback vector into a2. + // By adding kPointerSize we encode that we know the AllocationSite + // entry is at the feedback vector slot given by a3 + 1. + __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize)); + } else { + Label feedback_register_initialized; + // Put the AllocationSite from the feedback vector into a2, or undefined. + __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize)); + __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&feedback_register_initialized, eq, a5, Operand(at)); + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ bind(&feedback_register_initialized); + } + + __ AssertUndefinedOrAllocationSite(a2, a5); + } + + // Jump to the function-specific construct stub. + Register jmp_reg = a4; + __ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ ld(jmp_reg, FieldMemOperand(jmp_reg, + SharedFunctionInfo::kConstructStubOffset)); + __ Daddu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(at); + + // a0: number of arguments + // a1: called object + // a4: object type + Label do_call; + __ bind(&slow); + __ Branch(&non_function_call, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE)); + __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); + __ jmp(&do_call); + + __ bind(&non_function_call); + __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); + __ bind(&do_call); + // Set expected number of arguments to zero (not changing r0). + __ li(a2, Operand(0, RelocInfo::NONE32)); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); +} + + +// StringCharCodeAtGenerator. +void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { + Label flat_string; + Label ascii_string; + Label got_char_code; + Label sliced_string; + + DCHECK(!a4.is(index_)); + DCHECK(!a4.is(result_)); + DCHECK(!a4.is(object_)); + + // If the receiver is a smi trigger the non-string case. + __ JumpIfSmi(object_, receiver_not_string_); + + // Fetch the instance type of the receiver into result register. + __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); + // If the receiver is not a string trigger the non-string case. + __ And(a4, result_, Operand(kIsNotStringMask)); + __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg)); + + // If the index is non-smi trigger the non-smi case. + __ JumpIfNotSmi(index_, &index_not_smi_); + + __ bind(&got_smi_index_); + + // Check for index out of range. + __ ld(a4, FieldMemOperand(object_, String::kLengthOffset)); + __ Branch(index_out_of_range_, ls, a4, Operand(index_)); + + __ SmiUntag(index_); + + StringCharLoadGenerator::Generate(masm, + object_, + index_, + result_, + &call_runtime_); + + __ SmiTag(result_); + __ bind(&exit_); +} + + +static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { + __ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ ld(vector, FieldMemOperand(vector, + JSFunction::kSharedFunctionInfoOffset)); + __ ld(vector, FieldMemOperand(vector, + SharedFunctionInfo::kFeedbackVectorOffset)); +} + + +void CallIC_ArrayStub::Generate(MacroAssembler* masm) { + // a1 - function + // a3 - slot id + Label miss; + + EmitLoadTypeFeedbackVector(masm, a2); + + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at); + __ Branch(&miss, ne, a1, Operand(at)); + + __ li(a0, Operand(arg_count())); + __ dsrl(at, a3, 32 - kPointerSizeLog2); + __ Daddu(at, a2, Operand(at)); + __ ld(a4, FieldMemOperand(at, FixedArray::kHeaderSize)); + + // Verify that a4 contains an AllocationSite + __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&miss, ne, a5, Operand(at)); + + __ mov(a2, a4); + ArrayConstructorStub stub(masm->isolate(), arg_count()); + __ TailCallStub(&stub); + + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Customization_Miss); + + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, + arg_count(), + true, + CallAsMethod()); + + // Unreachable. + __ stop("Unexpected code address"); +} + + +void CallICStub::Generate(MacroAssembler* masm) { + // a1 - function + // a3 - slot id (Smi) + Label extra_checks_or_miss, slow_start; + Label slow, non_function, wrap, cont; + Label have_js_function; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, a2); + + // The checks. First, does r1 match the recorded monomorphic target? + __ dsrl(a4, a3, 32 - kPointerSizeLog2); + __ Daddu(a4, a2, Operand(a4)); + __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize)); + __ Branch(&extra_checks_or_miss, ne, a1, Operand(a4)); + + __ bind(&have_js_function); + if (state_.CallAsMethod()) { + EmitContinueIfStrictOrNative(masm, &cont); + // Compute the receiver in sloppy mode. + __ ld(a3, MemOperand(sp, argc * kPointerSize)); + + __ JumpIfSmi(a3, &wrap); + __ GetObjectType(a3, a4, a4); + __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE)); + + __ bind(&cont); + } + + __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper()); + + __ bind(&slow); + EmitSlowCase(masm, argc, &non_function); + + if (state_.CallAsMethod()) { + __ bind(&wrap); + EmitWrapCase(masm, argc, &cont); + } + + __ bind(&extra_checks_or_miss); + Label miss; + + __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); + __ Branch(&slow_start, eq, a4, Operand(at)); + __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex); + __ Branch(&miss, eq, a4, Operand(at)); + + if (!FLAG_trace_ic) { + // We are going megamorphic. If the feedback is a JSFunction, it is fine + // to handle it here. More complex cases are dealt with in the runtime. + __ AssertNotSmi(a4); + __ GetObjectType(a4, a5, a5); + __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE)); + __ dsrl(a4, a3, 32 - kPointerSizeLog2); + __ Daddu(a4, a2, Operand(a4)); + __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); + __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize)); + __ Branch(&slow_start); + } + + // We are here because tracing is on or we are going monomorphic. + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Miss); + + // the slow case + __ bind(&slow_start); + // Check that the function is really a JavaScript function. + // r1: pushed function (to be verified) + __ JumpIfSmi(a1, &non_function); + + // Goto slow case if we do not have a function. + __ GetObjectType(a1, a4, a4); + __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE)); + __ Branch(&have_js_function); +} + + +void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) { + // Get the receiver of the function from the stack; 1 ~ return address. + __ ld(a4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize)); + + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Push the receiver and the function and feedback info. + __ Push(a4, a1, a2, a3); + + // Call the entry. + ExternalReference miss = ExternalReference(IC_Utility(id), + masm->isolate()); + __ CallExternalReference(miss, 4); + + // Move result to a1 and exit the internal frame. + __ mov(a1, v0); + } +} + + +void StringCharCodeAtGenerator::GenerateSlow( + MacroAssembler* masm, + const RuntimeCallHelper& call_helper) { + __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); + + // Index is not a smi. + __ bind(&index_not_smi_); + // If index is a heap number, try converting it to an integer. + __ CheckMap(index_, + result_, + Heap::kHeapNumberMapRootIndex, + index_not_number_, + DONT_DO_SMI_CHECK); + call_helper.BeforeCall(masm); + // Consumed by runtime conversion function: + __ Push(object_, index_); + if (index_flags_ == STRING_INDEX_IS_NUMBER) { + __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); + } else { + DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + // NumberToSmi discards numbers that are not exact integers. + __ CallRuntime(Runtime::kNumberToSmi, 1); + } + + // Save the conversion result before the pop instructions below + // have a chance to overwrite it. + + __ Move(index_, v0); + __ pop(object_); + // Reload the instance type. + __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); + call_helper.AfterCall(masm); + // If index is still not a smi, it must be out of range. + __ JumpIfNotSmi(index_, index_out_of_range_); + // Otherwise, return to the fast path. + __ Branch(&got_smi_index_); + + // Call runtime. We get here when the receiver is a string and the + // index is a number, but the code of getting the actual character + // is too complex (e.g., when the string needs to be flattened). + __ bind(&call_runtime_); + call_helper.BeforeCall(masm); + __ SmiTag(index_); + __ Push(object_, index_); + __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); + + __ Move(result_, v0); + + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); +} + + +// ------------------------------------------------------------------------- +// StringCharFromCodeGenerator + +void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { + // Fast case of Heap::LookupSingleCharacterStringFromCode. + + DCHECK(!a4.is(result_)); + DCHECK(!a4.is(code_)); + + STATIC_ASSERT(kSmiTag == 0); + DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1)); + __ And(a4, + code_, + Operand(kSmiTagMask | + ((~String::kMaxOneByteCharCode) << kSmiTagSize))); + __ Branch(&slow_case_, ne, a4, Operand(zero_reg)); + + + __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); + // At this point code register contains smi tagged ASCII char code. + STATIC_ASSERT(kSmiTag == 0); + __ SmiScale(a4, code_, kPointerSizeLog2); + __ Daddu(result_, result_, a4); + __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); + __ LoadRoot(a4, Heap::kUndefinedValueRootIndex); + __ Branch(&slow_case_, eq, result_, Operand(a4)); + __ bind(&exit_); +} + + +void StringCharFromCodeGenerator::GenerateSlow( + MacroAssembler* masm, + const RuntimeCallHelper& call_helper) { + __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); + + __ bind(&slow_case_); + call_helper.BeforeCall(masm); + __ push(code_); + __ CallRuntime(Runtime::kCharFromCode, 1); + __ Move(result_, v0); + + call_helper.AfterCall(masm); + __ Branch(&exit_); + + __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); +} + + +enum CopyCharactersFlags { + COPY_ASCII = 1, + DEST_ALWAYS_ALIGNED = 2 +}; + + +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + String::Encoding encoding) { + if (FLAG_debug_code) { + // Check that destination is word aligned. + __ And(scratch, dest, Operand(kPointerAlignmentMask)); + __ Check(eq, + kDestinationOfCopyNotAligned, + scratch, + Operand(zero_reg)); + } + + // Assumes word reads and writes are little endian. + // Nothing to do for zero characters. + Label done; + + if (encoding == String::TWO_BYTE_ENCODING) { + __ Daddu(count, count, count); + } + + Register limit = count; // Read until dest equals this. + __ Daddu(limit, dest, Operand(count)); + + Label loop_entry, loop; + // Copy bytes from src to dest until dest hits limit. + __ Branch(&loop_entry); + __ bind(&loop); + __ lbu(scratch, MemOperand(src)); + __ daddiu(src, src, 1); + __ sb(scratch, MemOperand(dest)); + __ daddiu(dest, dest, 1); + __ bind(&loop_entry); + __ Branch(&loop, lt, dest, Operand(limit)); + + __ bind(&done); +} + + +void StringHelper::GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character) { + // hash = seed + character + ((seed + character) << 10); + __ LoadRoot(hash, Heap::kHashSeedRootIndex); + // Untag smi seed and add the character. + __ SmiUntag(hash); + __ addu(hash, hash, character); + __ sll(at, hash, 10); + __ addu(hash, hash, at); + // hash ^= hash >> 6; + __ srl(at, hash, 6); + __ xor_(hash, hash, at); +} + + +void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character) { + // hash += character; + __ addu(hash, hash, character); + // hash += hash << 10; + __ sll(at, hash, 10); + __ addu(hash, hash, at); + // hash ^= hash >> 6; + __ srl(at, hash, 6); + __ xor_(hash, hash, at); +} + + +void StringHelper::GenerateHashGetHash(MacroAssembler* masm, + Register hash) { + // hash += hash << 3; + __ sll(at, hash, 3); + __ addu(hash, hash, at); + // hash ^= hash >> 11; + __ srl(at, hash, 11); + __ xor_(hash, hash, at); + // hash += hash << 15; + __ sll(at, hash, 15); + __ addu(hash, hash, at); + + __ li(at, Operand(String::kHashBitMask)); + __ and_(hash, hash, at); + + // if (hash == 0) hash = 27; + __ ori(at, zero_reg, StringHasher::kZeroHash); + __ Movz(hash, at, hash); +} + + +void SubStringStub::Generate(MacroAssembler* masm) { + Label runtime; + // Stack frame on entry. + // ra: return address + // sp[0]: to + // sp[4]: from + // sp[8]: string + + // This stub is called from the native-call %_SubString(...), so + // nothing can be assumed about the arguments. It is tested that: + // "string" is a sequential string, + // both "from" and "to" are smis, and + // 0 <= from <= to <= string.length. + // If any of these assumptions fail, we call the runtime system. + + const int kToOffset = 0 * kPointerSize; + const int kFromOffset = 1 * kPointerSize; + const int kStringOffset = 2 * kPointerSize; + + __ ld(a2, MemOperand(sp, kToOffset)); + __ ld(a3, MemOperand(sp, kFromOffset)); +// Does not needed? +// STATIC_ASSERT(kFromOffset == kToOffset + 4); + STATIC_ASSERT(kSmiTag == 0); +// Does not needed? +// STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + + // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is + // safe in this case. + __ JumpIfNotSmi(a2, &runtime); + __ JumpIfNotSmi(a3, &runtime); + // Both a2 and a3 are untagged integers. + + __ SmiUntag(a2, a2); + __ SmiUntag(a3, a3); + __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0. + + __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to. + __ Dsubu(a2, a2, a3); + + // Make sure first argument is a string. + __ ld(v0, MemOperand(sp, kStringOffset)); + __ JumpIfSmi(v0, &runtime); + __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); + __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); + __ And(a4, a1, Operand(kIsNotStringMask)); + + __ Branch(&runtime, ne, a4, Operand(zero_reg)); + + Label single_char; + __ Branch(&single_char, eq, a2, Operand(1)); + + // Short-cut for the case of trivial substring. + Label return_v0; + // v0: original string + // a2: result string length + __ ld(a4, FieldMemOperand(v0, String::kLengthOffset)); + __ SmiUntag(a4); + // Return original string. + __ Branch(&return_v0, eq, a2, Operand(a4)); + // Longer than original string's length or negative: unsafe arguments. + __ Branch(&runtime, hi, a2, Operand(a4)); + // Shorter than original string's length: an actual substring. + + // Deal with different string types: update the index if necessary + // and put the underlying string into a5. + // v0: original string + // a1: instance type + // a2: length + // a3: from index (untagged) + Label underlying_unpacked, sliced_string, seq_or_external_string; + // If the string is not indirect, it can only be sequential or external. + STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); + STATIC_ASSERT(kIsIndirectStringMask != 0); + __ And(a4, a1, Operand(kIsIndirectStringMask)); + __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg)); + // a4 is used as a scratch register and can be overwritten in either case. + __ And(a4, a1, Operand(kSlicedNotConsMask)); + __ Branch(&sliced_string, ne, a4, Operand(zero_reg)); + // Cons string. Check whether it is flat, then fetch first part. + __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset)); + __ LoadRoot(a4, Heap::kempty_stringRootIndex); + __ Branch(&runtime, ne, a5, Operand(a4)); + __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset)); + // Update instance type. + __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset)); + __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); + __ jmp(&underlying_unpacked); + + __ bind(&sliced_string); + // Sliced string. Fetch parent and correct start index by offset. + __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset)); + __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset)); + __ SmiUntag(a4); // Add offset to index. + __ Daddu(a3, a3, a4); + // Update instance type. + __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset)); + __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); + __ jmp(&underlying_unpacked); + + __ bind(&seq_or_external_string); + // Sequential or external string. Just move string to the expected register. + __ mov(a5, v0); + + __ bind(&underlying_unpacked); + + if (FLAG_string_slices) { + Label copy_routine; + // a5: underlying subject string + // a1: instance type of underlying subject string + // a2: length + // a3: adjusted start index (untagged) + // Short slice. Copy instead of slicing. + __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength)); + // Allocate new sliced string. At this point we do not reload the instance + // type including the string encoding because we simply rely on the info + // provided by the original string. It does not matter if the original + // string's encoding is wrong because we always have to recheck encoding of + // the newly created string's parent anyways due to externalized strings. + Label two_byte_slice, set_slice_header; + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); + __ And(a4, a1, Operand(kStringEncodingMask)); + __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg)); + __ AllocateAsciiSlicedString(v0, a2, a6, a7, &runtime); + __ jmp(&set_slice_header); + __ bind(&two_byte_slice); + __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime); + __ bind(&set_slice_header); + __ SmiTag(a3); + __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset)); + __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset)); + __ jmp(&return_v0); + + __ bind(©_routine); + } + + // a5: underlying subject string + // a1: instance type of underlying subject string + // a2: length + // a3: adjusted start index (untagged) + Label two_byte_sequential, sequential_string, allocate_result; + STATIC_ASSERT(kExternalStringTag != 0); + STATIC_ASSERT(kSeqStringTag == 0); + __ And(a4, a1, Operand(kExternalStringTag)); + __ Branch(&sequential_string, eq, a4, Operand(zero_reg)); + + // Handle external string. + // Rule out short external strings. + STATIC_ASSERT(kShortExternalStringTag != 0); + __ And(a4, a1, Operand(kShortExternalStringTag)); + __ Branch(&runtime, ne, a4, Operand(zero_reg)); + __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset)); + // a5 already points to the first character of underlying string. + __ jmp(&allocate_result); + + __ bind(&sequential_string); + // Locate first character of underlying subject string. + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + + __ bind(&allocate_result); + // Sequential acii string. Allocate the result. + STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); + __ And(a4, a1, Operand(kStringEncodingMask)); + __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg)); + + // Allocate and copy the resulting ASCII string. + __ AllocateAsciiString(v0, a2, a4, a6, a7, &runtime); + + // Locate first character of substring to copy. + __ Daddu(a5, a5, a3); + + // Locate first character of result. + __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + + // v0: result string + // a1: first character of result string + // a2: result string length + // a5: first character of substring to copy + STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); + StringHelper::GenerateCopyCharacters( + masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING); + __ jmp(&return_v0); + + // Allocate and copy the resulting two-byte string. + __ bind(&two_byte_sequential); + __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime); + + // Locate first character of substring to copy. + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + __ dsll(a4, a3, 1); + __ Daddu(a5, a5, a4); + // Locate first character of result. + __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + + // v0: result string. + // a1: first character of result. + // a2: result length. + // a5: first character of substring to copy. + STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); + StringHelper::GenerateCopyCharacters( + masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING); + + __ bind(&return_v0); + Counters* counters = isolate()->counters(); + __ IncrementCounter(counters->sub_string_native(), 1, a3, a4); + __ DropAndRet(3); + + // Just jump to runtime to create the sub string. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kSubString, 3, 1); + + __ bind(&single_char); + // v0: original string + // a1: instance type + // a2: length + // a3: from index (untagged) + StringCharAtGenerator generator( + v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm); + __ DropAndRet(3); + generator.SkipSlow(masm, &runtime); +} + + +void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3) { + Register length = scratch1; + + // Compare lengths. + Label strings_not_equal, check_zero_length; + __ ld(length, FieldMemOperand(left, String::kLengthOffset)); + __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset)); + __ Branch(&check_zero_length, eq, length, Operand(scratch2)); + __ bind(&strings_not_equal); + // Can not put li in delayslot, it has multi instructions. + __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); + __ Ret(); + + // Check if the length is zero. + Label compare_chars; + __ bind(&check_zero_length); + STATIC_ASSERT(kSmiTag == 0); + __ Branch(&compare_chars, ne, length, Operand(zero_reg)); + DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL))); + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(Smi::FromInt(EQUAL))); + + // Compare characters. + __ bind(&compare_chars); + + GenerateAsciiCharsCompareLoop(masm, + left, right, length, scratch2, scratch3, v0, + &strings_not_equal); + + // Characters are equal. + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(Smi::FromInt(EQUAL))); +} + + +void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4) { + Label result_not_equal, compare_lengths; + // Find minimum length and length difference. + __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset)); + __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset)); + __ Dsubu(scratch3, scratch1, Operand(scratch2)); + Register length_delta = scratch3; + __ slt(scratch4, scratch2, scratch1); + __ Movn(scratch1, scratch2, scratch4); + Register min_length = scratch1; + STATIC_ASSERT(kSmiTag == 0); + __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); + + // Compare loop. + GenerateAsciiCharsCompareLoop(masm, + left, right, min_length, scratch2, scratch4, v0, + &result_not_equal); + + // Compare lengths - strings up to min-length are equal. + __ bind(&compare_lengths); + DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); + // Use length_delta as result if it's zero. + __ mov(scratch2, length_delta); + __ mov(scratch4, zero_reg); + __ mov(v0, zero_reg); + + __ bind(&result_not_equal); + // Conditionally update the result based either on length_delta or + // the last comparion performed in the loop above. + Label ret; + __ Branch(&ret, eq, scratch2, Operand(scratch4)); + __ li(v0, Operand(Smi::FromInt(GREATER))); + __ Branch(&ret, gt, scratch2, Operand(scratch4)); + __ li(v0, Operand(Smi::FromInt(LESS))); + __ bind(&ret); + __ Ret(); +} + + +void StringCompareStub::GenerateAsciiCharsCompareLoop( + MacroAssembler* masm, + Register left, + Register right, + Register length, + Register scratch1, + Register scratch2, + Register scratch3, + Label* chars_not_equal) { + // Change index to run from -length to -1 by adding length to string + // start. This means that loop ends when index reaches zero, which + // doesn't need an additional compare. + __ SmiUntag(length); + __ Daddu(scratch1, length, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ Daddu(left, left, Operand(scratch1)); + __ Daddu(right, right, Operand(scratch1)); + __ Dsubu(length, zero_reg, length); + Register index = length; // index = -length; + + + // Compare loop. + Label loop; + __ bind(&loop); + __ Daddu(scratch3, left, index); + __ lbu(scratch1, MemOperand(scratch3)); + __ Daddu(scratch3, right, index); + __ lbu(scratch2, MemOperand(scratch3)); + __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2)); + __ Daddu(index, index, 1); + __ Branch(&loop, ne, index, Operand(zero_reg)); +} + + +void StringCompareStub::Generate(MacroAssembler* masm) { + Label runtime; + + Counters* counters = isolate()->counters(); + + // Stack frame on entry. + // sp[0]: right string + // sp[4]: left string + __ ld(a1, MemOperand(sp, 1 * kPointerSize)); // Left. + __ ld(a0, MemOperand(sp, 0 * kPointerSize)); // Right. + + Label not_same; + __ Branch(¬_same, ne, a0, Operand(a1)); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ li(v0, Operand(Smi::FromInt(EQUAL))); + __ IncrementCounter(counters->string_compare_native(), 1, a1, a2); + __ DropAndRet(2); + + __ bind(¬_same); + + // Check that both objects are sequential ASCII strings. + __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime); + + // Compare flat ASCII strings natively. Remove arguments from stack first. + __ IncrementCounter(counters->string_compare_native(), 1, a2, a3); + __ Daddu(sp, sp, Operand(2 * kPointerSize)); + GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, a4, a5); + + __ bind(&runtime); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); +} + + +void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a1 : left + // -- a0 : right + // -- ra : return address + // ----------------------------------- + + // Load a2 with the allocation site. We stick an undefined dummy value here + // and replace it with the real allocation site later when we instantiate this + // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). + __ li(a2, handle(isolate()->heap()->undefined_value())); + + // Make sure that we actually patched the allocation site. + if (FLAG_debug_code) { + __ And(at, a2, Operand(kSmiTagMask)); + __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg)); + __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Assert(eq, kExpectedAllocationSite, a4, Operand(at)); + } + + // Tail call into the stub that handles binary operations with allocation + // sites. + BinaryOpWithAllocationSiteStub stub(isolate(), state_); + __ TailCallStub(&stub); +} + + +void ICCompareStub::GenerateSmis(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::SMI); + Label miss; + __ Or(a2, a1, a0); + __ JumpIfNotSmi(a2, &miss); + + if (GetCondition() == eq) { + // For equality we do not care about the sign of the result. + __ Ret(USE_DELAY_SLOT); + __ Dsubu(v0, a0, a1); + } else { + // Untag before subtracting to avoid handling overflow. + __ SmiUntag(a1); + __ SmiUntag(a0); + __ Ret(USE_DELAY_SLOT); + __ Dsubu(v0, a1, a0); + } + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::NUMBER); + + Label generic_stub; + Label unordered, maybe_undefined1, maybe_undefined2; + Label miss; + + if (left_ == CompareIC::SMI) { + __ JumpIfNotSmi(a1, &miss); + } + if (right_ == CompareIC::SMI) { + __ JumpIfNotSmi(a0, &miss); + } + + // Inlining the double comparison and falling back to the general compare + // stub if NaN is involved. + // Load left and right operand. + Label done, left, left_smi, right_smi; + __ JumpIfSmi(a0, &right_smi); + __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, + DONT_DO_SMI_CHECK); + __ Dsubu(a2, a0, Operand(kHeapObjectTag)); + __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); + __ Branch(&left); + __ bind(&right_smi); + __ SmiUntag(a2, a0); // Can't clobber a0 yet. + FPURegister single_scratch = f6; + __ mtc1(a2, single_scratch); + __ cvt_d_w(f2, single_scratch); + + __ bind(&left); + __ JumpIfSmi(a1, &left_smi); + __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, + DONT_DO_SMI_CHECK); + __ Dsubu(a2, a1, Operand(kHeapObjectTag)); + __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); + __ Branch(&done); + __ bind(&left_smi); + __ SmiUntag(a2, a1); // Can't clobber a1 yet. + single_scratch = f8; + __ mtc1(a2, single_scratch); + __ cvt_d_w(f0, single_scratch); + + __ bind(&done); + + // Return a result of -1, 0, or 1, or use CompareStub for NaNs. + Label fpu_eq, fpu_lt; + // Test if equal, and also handle the unordered/NaN case. + __ BranchF(&fpu_eq, &unordered, eq, f0, f2); + + // Test if less (unordered case is already handled). + __ BranchF(&fpu_lt, NULL, lt, f0, f2); + + // Otherwise it's greater, so just fall thru, and return. + DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS)); + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(GREATER)); + + __ bind(&fpu_eq); + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(EQUAL)); + + __ bind(&fpu_lt); + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(LESS)); + + __ bind(&unordered); + __ bind(&generic_stub); + ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC, + CompareIC::GENERIC); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); + + __ bind(&maybe_undefined1); + if (Token::IsOrderedRelationalCompareOp(op_)) { + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&miss, ne, a0, Operand(at)); + __ JumpIfSmi(a1, &unordered); + __ GetObjectType(a1, a2, a2); + __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE)); + __ jmp(&unordered); + } + + __ bind(&maybe_undefined2); + if (Token::IsOrderedRelationalCompareOp(op_)) { + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&unordered, eq, a1, Operand(at)); + } + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::INTERNALIZED_STRING); + Label miss; + + // Registers containing left and right operands respectively. + Register left = a1; + Register right = a0; + Register tmp1 = a2; + Register tmp2 = a3; + + // Check that both operands are heap objects. + __ JumpIfEitherSmi(left, right, &miss); + + // Check that both operands are internalized strings. + __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); + __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); + __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); + __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + __ Or(tmp1, tmp1, Operand(tmp2)); + __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); + __ Branch(&miss, ne, at, Operand(zero_reg)); + + // Make sure a0 is non-zero. At this point input operands are + // guaranteed to be non-zero. + DCHECK(right.is(a0)); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ mov(v0, right); + // Internalized strings are compared by identity. + __ Ret(ne, left, Operand(right)); + DCHECK(is_int16(EQUAL)); + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(Smi::FromInt(EQUAL))); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::UNIQUE_NAME); + DCHECK(GetCondition() == eq); + Label miss; + + // Registers containing left and right operands respectively. + Register left = a1; + Register right = a0; + Register tmp1 = a2; + Register tmp2 = a3; + + // Check that both operands are heap objects. + __ JumpIfEitherSmi(left, right, &miss); + + // Check that both operands are unique names. This leaves the instance + // types loaded in tmp1 and tmp2. + __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); + __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); + __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); + __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); + + __ JumpIfNotUniqueName(tmp1, &miss); + __ JumpIfNotUniqueName(tmp2, &miss); + + // Use a0 as result + __ mov(v0, a0); + + // Unique names are compared by identity. + Label done; + __ Branch(&done, ne, left, Operand(right)); + // Make sure a0 is non-zero. At this point input operands are + // guaranteed to be non-zero. + DCHECK(right.is(a0)); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ li(v0, Operand(Smi::FromInt(EQUAL))); + __ bind(&done); + __ Ret(); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateStrings(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::STRING); + Label miss; + + bool equality = Token::IsEqualityOp(op_); + + // Registers containing left and right operands respectively. + Register left = a1; + Register right = a0; + Register tmp1 = a2; + Register tmp2 = a3; + Register tmp3 = a4; + Register tmp4 = a5; + Register tmp5 = a6; + + // Check that both operands are heap objects. + __ JumpIfEitherSmi(left, right, &miss); + + // Check that both operands are strings. This leaves the instance + // types loaded in tmp1 and tmp2. + __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); + __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); + __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); + __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kNotStringTag != 0); + __ Or(tmp3, tmp1, tmp2); + __ And(tmp5, tmp3, Operand(kIsNotStringMask)); + __ Branch(&miss, ne, tmp5, Operand(zero_reg)); + + // Fast check for identical strings. + Label left_ne_right; + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Branch(&left_ne_right, ne, left, Operand(right)); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, zero_reg); // In the delay slot. + __ bind(&left_ne_right); + + // Handle not identical strings. + + // Check that both strings are internalized strings. If they are, we're done + // because we already know they are not identical. We know they are both + // strings. + if (equality) { + DCHECK(GetCondition() == eq); + STATIC_ASSERT(kInternalizedTag == 0); + __ Or(tmp3, tmp1, Operand(tmp2)); + __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask)); + Label is_symbol; + __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg)); + // Make sure a0 is non-zero. At this point input operands are + // guaranteed to be non-zero. + DCHECK(right.is(a0)); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a0); // In the delay slot. + __ bind(&is_symbol); + } + + // Check that both strings are sequential ASCII. + Label runtime; + __ JumpIfBothInstanceTypesAreNotSequentialAscii( + tmp1, tmp2, tmp3, tmp4, &runtime); + + // Compare flat ASCII strings. Returns when done. + if (equality) { + StringCompareStub::GenerateFlatAsciiStringEquals( + masm, left, right, tmp1, tmp2, tmp3); + } else { + StringCompareStub::GenerateCompareFlatAsciiStrings( + masm, left, right, tmp1, tmp2, tmp3, tmp4); + } + + // Handle more complex cases in runtime. + __ bind(&runtime); + __ Push(left, right); + if (equality) { + __ TailCallRuntime(Runtime::kStringEquals, 2, 1); + } else { + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); + } + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateObjects(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::OBJECT); + Label miss; + __ And(a2, a1, Operand(a0)); + __ JumpIfSmi(a2, &miss); + + __ GetObjectType(a0, a2, a2); + __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); + __ GetObjectType(a1, a2, a2); + __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); + + DCHECK(GetCondition() == eq); + __ Ret(USE_DELAY_SLOT); + __ dsubu(v0, a0, a1); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { + Label miss; + __ And(a2, a1, a0); + __ JumpIfSmi(a2, &miss); + __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); + __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset)); + __ Branch(&miss, ne, a2, Operand(known_map_)); + __ Branch(&miss, ne, a3, Operand(known_map_)); + + __ Ret(USE_DELAY_SLOT); + __ dsubu(v0, a0, a1); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateMiss(MacroAssembler* masm) { + { + // Call the runtime system in a fresh internal frame. + ExternalReference miss = + ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate()); + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1, a0); + __ Push(ra, a1, a0); + __ li(a4, Operand(Smi::FromInt(op_))); + __ daddiu(sp, sp, -kPointerSize); + __ CallExternalReference(miss, 3, USE_DELAY_SLOT); + __ sd(a4, MemOperand(sp)); // In the delay slot. + // Compute the entry point of the rewritten stub. + __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Restore registers. + __ Pop(a1, a0, ra); + } + __ Jump(a2); +} + + +void DirectCEntryStub::Generate(MacroAssembler* masm) { + // Make place for arguments to fit C calling convention. Most of the callers + // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame + // so they handle stack restoring and we don't have to do that here. + // Any caller of DirectCEntryStub::GenerateCall must take care of dropping + // kCArgsSlotsSize stack space after the call. + __ daddiu(sp, sp, -kCArgsSlotsSize); + // Place the return address on the stack, making the call + // GC safe. The RegExp backend also relies on this. + __ sd(ra, MemOperand(sp, kCArgsSlotsSize)); + __ Call(t9); // Call the C++ function. + __ ld(t9, MemOperand(sp, kCArgsSlotsSize)); + + if (FLAG_debug_code && FLAG_enable_slow_asserts) { + // In case of an error the return address may point to a memory area + // filled with kZapValue by the GC. + // Dereference the address and check for this. + __ Uld(a4, MemOperand(t9)); + __ Assert(ne, kReceivedInvalidReturnAddress, a4, + Operand(reinterpret_cast<uint64_t>(kZapValue))); + } + __ Jump(t9); +} + + +void DirectCEntryStub::GenerateCall(MacroAssembler* masm, + Register target) { + intptr_t loc = + reinterpret_cast<intptr_t>(GetCode().location()); + __ Move(t9, target); + __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); + __ Call(ra); +} + + +void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register receiver, + Register properties, + Handle<Name> name, + Register scratch0) { + DCHECK(name->IsUniqueName()); + // If names of slots in range from 1 to kProbes - 1 for the hash value are + // not equal to the name and kProbes-th slot is not used (its name is the + // undefined value), it guarantees the hash table doesn't contain the + // property. It's true even if some slots represent deleted properties + // (their names are the hole value). + for (int i = 0; i < kInlinedProbes; i++) { + // scratch0 points to properties hash. + // Compute the masked index: (hash + i + i * i) & mask. + Register index = scratch0; + // Capacity is smi 2^n. + __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset)); + __ Dsubu(index, index, Operand(1)); + __ And(index, index, + Operand(name->Hash() + NameDictionary::GetProbeOffset(i))); + + // Scale the index by multiplying by the entry size. + DCHECK(NameDictionary::kEntrySize == 3); + __ dsll(at, index, 1); + __ Daddu(index, index, at); // index *= 3. + + Register entity_name = scratch0; + // Having undefined at this place means the name is not contained. + DCHECK_EQ(kSmiTagSize, 1); + Register tmp = properties; + + __ dsll(scratch0, index, kPointerSizeLog2); + __ Daddu(tmp, properties, scratch0); + __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); + + DCHECK(!tmp.is(entity_name)); + __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); + __ Branch(done, eq, entity_name, Operand(tmp)); + + // Load the hole ready for use below: + __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); + + // Stop if found the property. + __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); + + Label good; + __ Branch(&good, eq, entity_name, Operand(tmp)); + + // Check if the entry name is not a unique name. + __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); + __ lbu(entity_name, + FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); + __ JumpIfNotUniqueName(entity_name, miss); + __ bind(&good); + + // Restore the properties. + __ ld(properties, + FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + } + + const int spill_mask = + (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() | + a2.bit() | a1.bit() | a0.bit() | v0.bit()); + + __ MultiPush(spill_mask); + __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + __ li(a1, Operand(Handle<Name>(name))); + NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); + __ CallStub(&stub); + __ mov(at, v0); + __ MultiPop(spill_mask); + + __ Branch(done, eq, at, Operand(zero_reg)); + __ Branch(miss, ne, at, Operand(zero_reg)); +} + + +// Probe the name dictionary in the |elements| register. Jump to the +// |done| label if a property with the given name is found. Jump to +// the |miss| label otherwise. +// If lookup was successful |scratch2| will be equal to elements + 4 * index. +void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register scratch1, + Register scratch2) { + DCHECK(!elements.is(scratch1)); + DCHECK(!elements.is(scratch2)); + DCHECK(!name.is(scratch1)); + DCHECK(!name.is(scratch2)); + + __ AssertName(name); + + // Compute the capacity mask. + __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset)); + __ SmiUntag(scratch1); + __ Dsubu(scratch1, scratch1, Operand(1)); + + // Generate an unrolled loop that performs a few probes before + // giving up. Measurements done on Gmail indicate that 2 probes + // cover ~93% of loads from dictionaries. + for (int i = 0; i < kInlinedProbes; i++) { + // Compute the masked index: (hash + i + i * i) & mask. + __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); + if (i > 0) { + // Add the probe offset (i + i * i) left shifted to avoid right shifting + // the hash in a separate instruction. The value hash + i + i * i is right + // shifted in the following and instruction. + DCHECK(NameDictionary::GetProbeOffset(i) < + 1 << (32 - Name::kHashFieldOffset)); + __ Daddu(scratch2, scratch2, Operand( + NameDictionary::GetProbeOffset(i) << Name::kHashShift)); + } + __ dsrl(scratch2, scratch2, Name::kHashShift); + __ And(scratch2, scratch1, scratch2); + + // Scale the index by multiplying by the element size. + DCHECK(NameDictionary::kEntrySize == 3); + // scratch2 = scratch2 * 3. + + __ dsll(at, scratch2, 1); + __ Daddu(scratch2, scratch2, at); + + // Check if the key is identical to the name. + __ dsll(at, scratch2, kPointerSizeLog2); + __ Daddu(scratch2, elements, at); + __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset)); + __ Branch(done, eq, name, Operand(at)); + } + + const int spill_mask = + (ra.bit() | a6.bit() | a5.bit() | a4.bit() | + a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) & + ~(scratch1.bit() | scratch2.bit()); + + __ MultiPush(spill_mask); + if (name.is(a0)) { + DCHECK(!elements.is(a1)); + __ Move(a1, name); + __ Move(a0, elements); + } else { + __ Move(a0, elements); + __ Move(a1, name); + } + NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); + __ CallStub(&stub); + __ mov(scratch2, a2); + __ mov(at, v0); + __ MultiPop(spill_mask); + + __ Branch(done, ne, at, Operand(zero_reg)); + __ Branch(miss, eq, at, Operand(zero_reg)); +} + + +void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. + // Registers: + // result: NameDictionary to probe + // a1: key + // dictionary: NameDictionary to probe. + // index: will hold an index of entry if lookup is successful. + // might alias with result_. + // Returns: + // result_ is zero if lookup failed, non zero otherwise. + + Register result = v0; + Register dictionary = a0; + Register key = a1; + Register index = a2; + Register mask = a3; + Register hash = a4; + Register undefined = a5; + Register entry_key = a6; + + Label in_dictionary, maybe_in_dictionary, not_in_dictionary; + + __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset)); + __ SmiUntag(mask); + __ Dsubu(mask, mask, Operand(1)); + + __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset)); + + __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); + + for (int i = kInlinedProbes; i < kTotalProbes; i++) { + // Compute the masked index: (hash + i + i * i) & mask. + // Capacity is smi 2^n. + if (i > 0) { + // Add the probe offset (i + i * i) left shifted to avoid right shifting + // the hash in a separate instruction. The value hash + i + i * i is right + // shifted in the following and instruction. + DCHECK(NameDictionary::GetProbeOffset(i) < + 1 << (32 - Name::kHashFieldOffset)); + __ Daddu(index, hash, Operand( + NameDictionary::GetProbeOffset(i) << Name::kHashShift)); + } else { + __ mov(index, hash); + } + __ dsrl(index, index, Name::kHashShift); + __ And(index, mask, index); + + // Scale the index by multiplying by the entry size. + DCHECK(NameDictionary::kEntrySize == 3); + // index *= 3. + __ mov(at, index); + __ dsll(index, index, 1); + __ Daddu(index, index, at); + + + DCHECK_EQ(kSmiTagSize, 1); + __ dsll(index, index, kPointerSizeLog2); + __ Daddu(index, index, dictionary); + __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset)); + + // Having undefined at this place means the name is not contained. + __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); + + // Stop if found the property. + __ Branch(&in_dictionary, eq, entry_key, Operand(key)); + + if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { + // Check if the entry name is not a unique name. + __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); + __ lbu(entry_key, + FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); + __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); + } + } + + __ bind(&maybe_in_dictionary); + // If we are doing negative lookup then probing failure should be + // treated as a lookup success. For positive lookup probing failure + // should be treated as lookup failure. + if (mode_ == POSITIVE_LOOKUP) { + __ Ret(USE_DELAY_SLOT); + __ mov(result, zero_reg); + } + + __ bind(&in_dictionary); + __ Ret(USE_DELAY_SLOT); + __ li(result, 1); + + __ bind(¬_in_dictionary); + __ Ret(USE_DELAY_SLOT); + __ mov(result, zero_reg); +} + + +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( + Isolate* isolate) { + StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs); + stub1.GetCode(); + // Hydrogen code stubs need stub2 at snapshot time. + StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); + stub2.GetCode(); +} + + +// Takes the input in 3 registers: address_ value_ and object_. A pointer to +// the value has just been written into the object, now this stub makes sure +// we keep the GC informed. The word in the object where the value has been +// written is in the address register. +void RecordWriteStub::Generate(MacroAssembler* masm) { + Label skip_to_incremental_noncompacting; + Label skip_to_incremental_compacting; + + // The first two branch+nop instructions are generated with labels so as to + // get the offset fixed up correctly by the bind(Label*) call. We patch it + // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this + // position) and the "beq zero_reg, zero_reg, ..." when we start and stop + // incremental heap marking. + // See RecordWriteStub::Patch for details. + __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting); + __ nop(); + __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting); + __ nop(); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } + __ Ret(); + + __ bind(&skip_to_incremental_noncompacting); + GenerateIncremental(masm, INCREMENTAL); + + __ bind(&skip_to_incremental_compacting); + GenerateIncremental(masm, INCREMENTAL_COMPACTION); + + // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. + // Will be checked in IncrementalMarking::ActivateGeneratedStub. + + PatchBranchIntoNop(masm, 0); + PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize); +} + + +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { + regs_.Save(masm); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + Label dont_need_remembered_set; + + __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0)); + __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. + regs_.scratch0(), + &dont_need_remembered_set); + + __ CheckPageFlag(regs_.object(), + regs_.scratch0(), + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + ne, + &dont_need_remembered_set); + + // First notify the incremental marker if necessary, then update the + // remembered set. + CheckNeedsToInformIncrementalMarker( + masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm); + regs_.Restore(masm); + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + + __ bind(&dont_need_remembered_set); + } + + CheckNeedsToInformIncrementalMarker( + masm, kReturnOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm); + regs_.Restore(masm); + __ Ret(); +} + + +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { + regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); + int argument_count = 3; + __ PrepareCallCFunction(argument_count, regs_.scratch0()); + Register address = + a0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); + DCHECK(!address.is(regs_.object())); + DCHECK(!address.is(a0)); + __ Move(address, regs_.address()); + __ Move(a0, regs_.object()); + __ Move(a1, address); + __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); + + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::incremental_marking_record_write_function(isolate()), + argument_count); + regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); +} + + +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode) { + Label on_black; + Label need_incremental; + Label need_incremental_pop_scratch; + + __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); + __ ld(regs_.scratch1(), + MemOperand(regs_.scratch0(), + MemoryChunk::kWriteBarrierCounterOffset)); + __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1)); + __ sd(regs_.scratch1(), + MemOperand(regs_.scratch0(), + MemoryChunk::kWriteBarrierCounterOffset)); + __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg)); + + // Let's look at the color of the object: If it is not black we don't have + // to inform the incremental marker. + __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&on_black); + + // Get the value from the slot. + __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0)); + + if (mode == INCREMENTAL_COMPACTION) { + Label ensure_not_white; + + __ CheckPageFlag(regs_.scratch0(), // Contains value. + regs_.scratch1(), // Scratch. + MemoryChunk::kEvacuationCandidateMask, + eq, + &ensure_not_white); + + __ CheckPageFlag(regs_.object(), + regs_.scratch1(), // Scratch. + MemoryChunk::kSkipEvacuationSlotsRecordingMask, + eq, + &need_incremental); + + __ bind(&ensure_not_white); + } + + // We need extra registers for this, so we push the object and the address + // register temporarily. + __ Push(regs_.object(), regs_.address()); + __ EnsureNotWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + regs_.address(), // Scratch. + &need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + __ bind(&need_incremental); + + // Fall through when we need to inform the incremental marker. +} + + +void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : element value to store + // -- a3 : element index as smi + // -- sp[0] : array literal index in function as smi + // -- sp[4] : array literal + // clobbers a1, a2, a4 + // ----------------------------------- + + Label element_done; + Label double_elements; + Label smi_element; + Label slow_elements; + Label fast_elements; + + // Get array literal index, array literal and its map. + __ ld(a4, MemOperand(sp, 0 * kPointerSize)); + __ ld(a1, MemOperand(sp, 1 * kPointerSize)); + __ ld(a2, FieldMemOperand(a1, JSObject::kMapOffset)); + + __ CheckFastElements(a2, a5, &double_elements); + // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements + __ JumpIfSmi(a0, &smi_element); + __ CheckFastSmiElements(a2, a5, &fast_elements); + + // Store into the array literal requires a elements transition. Call into + // the runtime. + __ bind(&slow_elements); + // call. + __ Push(a1, a3, a0); + __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ ld(a5, FieldMemOperand(a5, JSFunction::kLiteralsOffset)); + __ Push(a5, a4); + __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); + + // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. + __ bind(&fast_elements); + __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset)); + __ SmiScale(a6, a3, kPointerSizeLog2); + __ Daddu(a6, a5, a6); + __ Daddu(a6, a6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ sd(a0, MemOperand(a6, 0)); + // Update the write barrier for the array store. + __ RecordWrite(a5, a6, a0, kRAHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a0); + + // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, + // and value is Smi. + __ bind(&smi_element); + __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset)); + __ SmiScale(a6, a3, kPointerSizeLog2); + __ Daddu(a6, a5, a6); + __ sd(a0, FieldMemOperand(a6, FixedArray::kHeaderSize)); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a0); + + // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. + __ bind(&double_elements); + __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset)); + __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, a2, &slow_elements); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a0); +} + + +void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { + CEntryStub ces(isolate(), 1, kSaveFPRegs); + __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); + int parameter_count_offset = + StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; + __ ld(a1, MemOperand(fp, parameter_count_offset)); + if (function_mode_ == JS_FUNCTION_STUB_MODE) { + __ Daddu(a1, a1, Operand(1)); + } + masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); + __ dsll(a1, a1, kPointerSizeLog2); + __ Ret(USE_DELAY_SLOT); + __ Daddu(sp, sp, a1); +} + + +void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { + if (masm->isolate()->function_entry_hook() != NULL) { + ProfileEntryHookStub stub(masm->isolate()); + __ push(ra); + __ CallStub(&stub); + __ pop(ra); + } +} + + +void ProfileEntryHookStub::Generate(MacroAssembler* masm) { + // The entry hook is a "push ra" instruction, followed by a call. + // Note: on MIPS "push" is 2 instruction + const int32_t kReturnAddressDistanceFromFunctionStart = + Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize); + + // This should contain all kJSCallerSaved registers. + const RegList kSavedRegs = + kJSCallerSaved | // Caller saved registers. + s5.bit(); // Saved stack pointer. + + // We also save ra, so the count here is one higher than the mask indicates. + const int32_t kNumSavedRegs = kNumJSCallerSaved + 2; + + // Save all caller-save registers as this may be called from anywhere. + __ MultiPush(kSavedRegs | ra.bit()); + + // Compute the function's address for the first argument. + __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart)); + + // The caller's return address is above the saved temporaries. + // Grab that for the second argument to the hook. + __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize)); + + // Align the stack if necessary. + int frame_alignment = masm->ActivationFrameAlignment(); + if (frame_alignment > kPointerSize) { + __ mov(s5, sp); + DCHECK(IsPowerOf2(frame_alignment)); + __ And(sp, sp, Operand(-frame_alignment)); + } + + __ Dsubu(sp, sp, kCArgsSlotsSize); +#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64) + int64_t entry_hook = + reinterpret_cast<int64_t>(isolate()->function_entry_hook()); + __ li(t9, Operand(entry_hook)); +#else + // Under the simulator we need to indirect the entry hook through a + // trampoline function at a known address. + // It additionally takes an isolate as a third parameter. + __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); + + ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); + __ li(t9, Operand(ExternalReference(&dispatcher, + ExternalReference::BUILTIN_CALL, + isolate()))); +#endif + // Call C function through t9 to conform ABI for PIC. + __ Call(t9); + + // Restore the stack pointer if needed. + if (frame_alignment > kPointerSize) { + __ mov(sp, s5); + } else { + __ Daddu(sp, sp, kCArgsSlotsSize); + } + + // Also pop ra to get Ret(0). + __ MultiPop(kSavedRegs | ra.bit()); + __ Ret(); +} + + +template<class T> +static void CreateArrayDispatch(MacroAssembler* masm, + AllocationSiteOverrideMode mode) { + if (mode == DISABLE_ALLOCATION_SITES) { + T stub(masm->isolate(), GetInitialFastElementsKind(), mode); + __ TailCallStub(&stub); + } else if (mode == DONT_OVERRIDE) { + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + T stub(masm->isolate(), kind); + __ TailCallStub(&stub, eq, a3, Operand(kind)); + } + + // If we reached this point there is a problem. + __ Abort(kUnexpectedElementsKindInArrayConstructor); + } else { + UNREACHABLE(); + } +} + + +static void CreateArrayDispatchOneArgument(MacroAssembler* masm, + AllocationSiteOverrideMode mode) { + // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES) + // a3 - kind (if mode != DISABLE_ALLOCATION_SITES) + // a0 - number of arguments + // a1 - constructor? + // sp[0] - last argument + Label normal_sequence; + if (mode == DONT_OVERRIDE) { + DCHECK(FAST_SMI_ELEMENTS == 0); + DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1); + DCHECK(FAST_ELEMENTS == 2); + DCHECK(FAST_HOLEY_ELEMENTS == 3); + DCHECK(FAST_DOUBLE_ELEMENTS == 4); + DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + + // is the low bit set? If so, we are holey and that is good. + __ And(at, a3, Operand(1)); + __ Branch(&normal_sequence, ne, at, Operand(zero_reg)); + } + // look at the first argument + __ ld(a5, MemOperand(sp, 0)); + __ Branch(&normal_sequence, eq, a5, Operand(zero_reg)); + + if (mode == DISABLE_ALLOCATION_SITES) { + ElementsKind initial = GetInitialFastElementsKind(); + ElementsKind holey_initial = GetHoleyElementsKind(initial); + + ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), + holey_initial, + DISABLE_ALLOCATION_SITES); + __ TailCallStub(&stub_holey); + + __ bind(&normal_sequence); + ArraySingleArgumentConstructorStub stub(masm->isolate(), + initial, + DISABLE_ALLOCATION_SITES); + __ TailCallStub(&stub); + } else if (mode == DONT_OVERRIDE) { + // We are going to create a holey array, but our kind is non-holey. + // Fix kind and retry (only if we have an allocation site in the slot). + __ Daddu(a3, a3, Operand(1)); + + if (FLAG_debug_code) { + __ ld(a5, FieldMemOperand(a2, 0)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Assert(eq, kExpectedAllocationSite, a5, Operand(at)); + } + + // Save the resulting elements kind in type info. We can't just store a3 + // in the AllocationSite::transition_info field because elements kind is + // restricted to a portion of the field...upper bits need to be left alone. + STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); + __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); + __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley))); + __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); + + + __ bind(&normal_sequence); + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); + __ TailCallStub(&stub, eq, a3, Operand(kind)); + } + + // If we reached this point there is a problem. + __ Abort(kUnexpectedElementsKindInArrayConstructor); + } else { + UNREACHABLE(); + } +} + + +template<class T> +static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { + int to_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= to_index; ++i) { + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + T stub(isolate, kind); + stub.GetCode(); + if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { + T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); + stub1.GetCode(); + } + } +} + + +void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { + ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( + isolate); +} + + +void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( + Isolate* isolate) { + ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; + for (int i = 0; i < 2; i++) { + // For internal arrays we only need a few things. + InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); + stubh1.GetCode(); + InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); + stubh2.GetCode(); + InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); + stubh3.GetCode(); + } +} + + +void ArrayConstructorStub::GenerateDispatchToArrayStub( + MacroAssembler* masm, + AllocationSiteOverrideMode mode) { + if (argument_count_ == ANY) { + Label not_zero_case, not_one_case; + __ And(at, a0, a0); + __ Branch(¬_zero_case, ne, at, Operand(zero_reg)); + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); + + __ bind(¬_zero_case); + __ Branch(¬_one_case, gt, a0, Operand(1)); + CreateArrayDispatchOneArgument(masm, mode); + + __ bind(¬_one_case); + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); + } else if (argument_count_ == NONE) { + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); + } else if (argument_count_ == ONE) { + CreateArrayDispatchOneArgument(masm, mode); + } else if (argument_count_ == MORE_THAN_ONE) { + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); + } else { + UNREACHABLE(); + } +} + + +void ArrayConstructorStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc (only if argument_count_ == ANY) + // -- a1 : constructor + // -- a2 : AllocationSite or undefined + // -- sp[0] : return address + // -- sp[4] : last argument + // ----------------------------------- + + if (FLAG_debug_code) { + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + + // Initial map for the builtin Array function should be a map. + __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ SmiTst(a4, at); + __ Assert(ne, kUnexpectedInitialMapForArrayFunction, + at, Operand(zero_reg)); + __ GetObjectType(a4, a4, a5); + __ Assert(eq, kUnexpectedInitialMapForArrayFunction, + a5, Operand(MAP_TYPE)); + + // We should either have undefined in a2 or a valid AllocationSite + __ AssertUndefinedOrAllocationSite(a2, a4); + } + + Label no_info; + // Get the elements kind and case on that. + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&no_info, eq, a2, Operand(at)); + + __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); + __ SmiUntag(a3); + STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); + __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask)); + GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); + + __ bind(&no_info); + GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); +} + + +void InternalArrayConstructorStub::GenerateCase( + MacroAssembler* masm, ElementsKind kind) { + + InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); + __ TailCallStub(&stub0, lo, a0, Operand(1)); + + InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); + __ TailCallStub(&stubN, hi, a0, Operand(1)); + + if (IsFastPackedElementsKind(kind)) { + // We might need to create a holey array + // look at the first argument. + __ ld(at, MemOperand(sp, 0)); + + InternalArraySingleArgumentConstructorStub + stub1_holey(isolate(), GetHoleyElementsKind(kind)); + __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg)); + } + + InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); + __ TailCallStub(&stub1); +} + + +void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc + // -- a1 : constructor + // -- sp[0] : return address + // -- sp[4] : last argument + // ----------------------------------- + + if (FLAG_debug_code) { + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + + // Initial map for the builtin Array function should be a map. + __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ SmiTst(a3, at); + __ Assert(ne, kUnexpectedInitialMapForArrayFunction, + at, Operand(zero_reg)); + __ GetObjectType(a3, a3, a4); + __ Assert(eq, kUnexpectedInitialMapForArrayFunction, + a4, Operand(MAP_TYPE)); + } + + // Figure out the right elements kind. + __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); + + // Load the map's "bit field 2" into a3. We only need the first byte, + // but the following bit field extraction takes care of that anyway. + __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset)); + // Retrieve elements_kind from bit field 2. + __ DecodeField<Map::ElementsKindBits>(a3); + + if (FLAG_debug_code) { + Label done; + __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS)); + __ Assert( + eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, + a3, Operand(FAST_HOLEY_ELEMENTS)); + __ bind(&done); + } + + Label fast_elements_case; + __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS)); + GenerateCase(masm, FAST_HOLEY_ELEMENTS); + + __ bind(&fast_elements_case); + GenerateCase(masm, FAST_ELEMENTS); +} + + +void CallApiFunctionStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : callee + // -- a4 : call_data + // -- a2 : holder + // -- a1 : api_function_address + // -- cp : context + // -- + // -- sp[0] : last argument + // -- ... + // -- sp[(argc - 1)* 4] : first argument + // -- sp[argc * 4] : receiver + // ----------------------------------- + + Register callee = a0; + Register call_data = a4; + Register holder = a2; + Register api_function_address = a1; + Register context = cp; + + int argc = ArgumentBits::decode(bit_field_); + bool is_store = IsStoreBits::decode(bit_field_); + bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_); + + typedef FunctionCallbackArguments FCA; + + STATIC_ASSERT(FCA::kContextSaveIndex == 6); + STATIC_ASSERT(FCA::kCalleeIndex == 5); + STATIC_ASSERT(FCA::kDataIndex == 4); + STATIC_ASSERT(FCA::kReturnValueOffset == 3); + STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); + STATIC_ASSERT(FCA::kIsolateIndex == 1); + STATIC_ASSERT(FCA::kHolderIndex == 0); + STATIC_ASSERT(FCA::kArgsLength == 7); + + // Save context, callee and call data. + __ Push(context, callee, call_data); + // Load context from callee. + __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset)); + + Register scratch = call_data; + if (!call_data_undefined) { + __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + } + // Push return value and default return value. + __ Push(scratch, scratch); + __ li(scratch, + Operand(ExternalReference::isolate_address(isolate()))); + // Push isolate and holder. + __ Push(scratch, holder); + + // Prepare arguments. + __ mov(scratch, sp); + + // Allocate the v8::Arguments structure in the arguments' space since + // it's not controlled by GC. + const int kApiStackSpace = 4; + + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(false, kApiStackSpace); + + DCHECK(!api_function_address.is(a0) && !scratch.is(a0)); + // a0 = FunctionCallbackInfo& + // Arguments is after the return address. + __ Daddu(a0, sp, Operand(1 * kPointerSize)); + // FunctionCallbackInfo::implicit_args_ + __ sd(scratch, MemOperand(a0, 0 * kPointerSize)); + // FunctionCallbackInfo::values_ + __ Daddu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); + __ sd(at, MemOperand(a0, 1 * kPointerSize)); + // FunctionCallbackInfo::length_ = argc + __ li(at, Operand(argc)); + __ sd(at, MemOperand(a0, 2 * kPointerSize)); + // FunctionCallbackInfo::is_construct_call = 0 + __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize)); + + const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(isolate()); + + AllowExternalCallThatCantCauseGC scope(masm); + MemOperand context_restore_operand( + fp, (2 + FCA::kContextSaveIndex) * kPointerSize); + // Stores return the first js argument. + int return_value_offset = 0; + if (is_store) { + return_value_offset = 2 + FCA::kArgsLength; + } else { + return_value_offset = 2 + FCA::kReturnValueOffset; + } + MemOperand return_value_operand(fp, return_value_offset * kPointerSize); + + __ CallApiFunctionAndReturn(api_function_address, + thunk_ref, + kStackUnwindSpace, + return_value_operand, + &context_restore_operand); +} + + +void CallApiGetterStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- sp[0] : name + // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object + // -- ... + // -- a2 : api_function_address + // ----------------------------------- + + Register api_function_address = a2; + + __ mov(a0, sp); // a0 = Handle<Name> + __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA + + const int kApiStackSpace = 1; + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(false, kApiStackSpace); + + // Create PropertyAccessorInfo instance on the stack above the exit frame with + // a1 (internal::Object** args_) as the data. + __ sd(a1, MemOperand(sp, 1 * kPointerSize)); + __ Daddu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo& + + const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; + + ExternalReference thunk_ref = + ExternalReference::invoke_accessor_getter_callback(isolate()); + __ CallApiFunctionAndReturn(api_function_address, + thunk_ref, + kStackUnwindSpace, + MemOperand(fp, 6 * kPointerSize), + NULL); +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/code-stubs-mips64.h nodejs-0.11.15/deps/v8/src/mips64/code-stubs-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/code-stubs-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/code-stubs-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,448 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_MIPS_CODE_STUBS_ARM_H_ +#define V8_MIPS_CODE_STUBS_ARM_H_ + +#include "src/ic-inl.h" + + +namespace v8 { +namespace internal { + + +void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); + + +class StoreBufferOverflowStub: public PlatformCodeStub { + public: + StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp) + : PlatformCodeStub(isolate), save_doubles_(save_fp) {} + + void Generate(MacroAssembler* masm); + + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + SaveFPRegsMode save_doubles_; + + Major MajorKey() const { return StoreBufferOverflow; } + int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } +}; + + +class StringHelper : public AllStatic { + public: + // Generate code for copying a large number of characters. This function + // is allowed to spend extra time setting up conditions to make copying + // faster. Copying of overlapping regions is not supported. + // Dest register ends at the position after the last character written. + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + String::Encoding encoding); + + // Generate string hash. + static void GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character); + + static void GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character); + + static void GenerateHashGetHash(MacroAssembler* masm, + Register hash); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); +}; + + +class SubStringStub: public PlatformCodeStub { + public: + explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {} + + private: + Major MajorKey() const { return SubString; } + int MinorKey() const { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +class StoreRegistersStateStub: public PlatformCodeStub { + public: + explicit StoreRegistersStateStub(Isolate* isolate) + : PlatformCodeStub(isolate) {} + + static void GenerateAheadOfTime(Isolate* isolate); + private: + Major MajorKey() const { return StoreRegistersState; } + int MinorKey() const { return 0; } + + void Generate(MacroAssembler* masm); +}; + +class RestoreRegistersStateStub: public PlatformCodeStub { + public: + explicit RestoreRegistersStateStub(Isolate* isolate) + : PlatformCodeStub(isolate) {} + + static void GenerateAheadOfTime(Isolate* isolate); + private: + Major MajorKey() const { return RestoreRegistersState; } + int MinorKey() const { return 0; } + + void Generate(MacroAssembler* masm); +}; + +class StringCompareStub: public PlatformCodeStub { + public: + explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { } + + // Compare two flat ASCII strings and returns result in v0. + static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4); + + // Compares two flat ASCII strings for equality and returns result + // in v0. + static void GenerateFlatAsciiStringEquals(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3); + + private: + virtual Major MajorKey() const { return StringCompare; } + virtual int MinorKey() const { return 0; } + virtual void Generate(MacroAssembler* masm); + + static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, + Register left, + Register right, + Register length, + Register scratch1, + Register scratch2, + Register scratch3, + Label* chars_not_equal); +}; + + +// This stub can convert a signed int32 to a heap number (double). It does +// not work for int32s that are in Smi range! No GC occurs during this stub +// so you don't have to set up the frame. +class WriteInt32ToHeapNumberStub : public PlatformCodeStub { + public: + WriteInt32ToHeapNumberStub(Isolate* isolate, + Register the_int, + Register the_heap_number, + Register scratch, + Register scratch2) + : PlatformCodeStub(isolate), + the_int_(the_int), + the_heap_number_(the_heap_number), + scratch_(scratch), + sign_(scratch2) { + DCHECK(IntRegisterBits::is_valid(the_int_.code())); + DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number_.code())); + DCHECK(ScratchRegisterBits::is_valid(scratch_.code())); + DCHECK(SignRegisterBits::is_valid(sign_.code())); + } + + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); + + private: + Register the_int_; + Register the_heap_number_; + Register scratch_; + Register sign_; + + // Minor key encoding in 16 bits. + class IntRegisterBits: public BitField<int, 0, 4> {}; + class HeapNumberRegisterBits: public BitField<int, 4, 4> {}; + class ScratchRegisterBits: public BitField<int, 8, 4> {}; + class SignRegisterBits: public BitField<int, 12, 4> {}; + + Major MajorKey() const { return WriteInt32ToHeapNumber; } + int MinorKey() const { + // Encode the parameters in a unique 16 bit value. + return IntRegisterBits::encode(the_int_.code()) + | HeapNumberRegisterBits::encode(the_heap_number_.code()) + | ScratchRegisterBits::encode(scratch_.code()) + | SignRegisterBits::encode(sign_.code()); + } + + void Generate(MacroAssembler* masm); +}; + + +class RecordWriteStub: public PlatformCodeStub { + public: + RecordWriteStub(Isolate* isolate, + Register object, + Register value, + Register address, + RememberedSetAction remembered_set_action, + SaveFPRegsMode fp_mode) + : PlatformCodeStub(isolate), + object_(object), + value_(value), + address_(address), + remembered_set_action_(remembered_set_action), + save_fp_regs_mode_(fp_mode), + regs_(object, // An input reg. + address, // An input reg. + value) { // One scratch reg. + } + + enum Mode { + STORE_BUFFER_ONLY, + INCREMENTAL, + INCREMENTAL_COMPACTION + }; + + virtual bool SometimesSetsUpAFrame() { return false; } + + static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { + const unsigned offset = masm->instr_at(pos) & kImm16Mask; + masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) | + (zero_reg.code() << kRtShift) | (offset & kImm16Mask)); + DCHECK(Assembler::IsBne(masm->instr_at(pos))); + } + + static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { + const unsigned offset = masm->instr_at(pos) & kImm16Mask; + masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) | + (zero_reg.code() << kRtShift) | (offset & kImm16Mask)); + DCHECK(Assembler::IsBeq(masm->instr_at(pos))); + } + + static Mode GetMode(Code* stub) { + Instr first_instruction = Assembler::instr_at(stub->instruction_start()); + Instr second_instruction = Assembler::instr_at(stub->instruction_start() + + 2 * Assembler::kInstrSize); + + if (Assembler::IsBeq(first_instruction)) { + return INCREMENTAL; + } + + DCHECK(Assembler::IsBne(first_instruction)); + + if (Assembler::IsBeq(second_instruction)) { + return INCREMENTAL_COMPACTION; + } + + DCHECK(Assembler::IsBne(second_instruction)); + + return STORE_BUFFER_ONLY; + } + + static void Patch(Code* stub, Mode mode) { + MacroAssembler masm(NULL, + stub->instruction_start(), + stub->instruction_size()); + switch (mode) { + case STORE_BUFFER_ONLY: + DCHECK(GetMode(stub) == INCREMENTAL || + GetMode(stub) == INCREMENTAL_COMPACTION); + PatchBranchIntoNop(&masm, 0); + PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize); + break; + case INCREMENTAL: + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); + PatchNopIntoBranch(&masm, 0); + break; + case INCREMENTAL_COMPACTION: + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); + PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize); + break; + } + DCHECK(GetMode(stub) == mode); + CpuFeatures::FlushICache(stub->instruction_start(), + 4 * Assembler::kInstrSize); + } + + private: + // This is a helper class for freeing up 3 scratch registers. The input is + // two registers that must be preserved and one scratch register provided by + // the caller. + class RegisterAllocation { + public: + RegisterAllocation(Register object, + Register address, + Register scratch0) + : object_(object), + address_(address), + scratch0_(scratch0) { + DCHECK(!AreAliased(scratch0, object, address, no_reg)); + scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_); + } + + void Save(MacroAssembler* masm) { + DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_)); + // We don't have to save scratch0_ because it was given to us as + // a scratch register. + masm->push(scratch1_); + } + + void Restore(MacroAssembler* masm) { + masm->pop(scratch1_); + } + + // If we have to call into C then we need to save and restore all caller- + // saved registers that were not already preserved. The scratch registers + // will be restored by other means so we don't bother pushing them here. + void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { + masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit()); + if (mode == kSaveFPRegs) { + masm->MultiPushFPU(kCallerSavedFPU); + } + } + + inline void RestoreCallerSaveRegisters(MacroAssembler*masm, + SaveFPRegsMode mode) { + if (mode == kSaveFPRegs) { + masm->MultiPopFPU(kCallerSavedFPU); + } + masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit()); + } + + inline Register object() { return object_; } + inline Register address() { return address_; } + inline Register scratch0() { return scratch0_; } + inline Register scratch1() { return scratch1_; } + + private: + Register object_; + Register address_; + Register scratch0_; + Register scratch1_; + + friend class RecordWriteStub; + }; + + enum OnNoNeedToInformIncrementalMarker { + kReturnOnNoNeedToInformIncrementalMarker, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker + }; + + void Generate(MacroAssembler* masm); + void GenerateIncremental(MacroAssembler* masm, Mode mode); + void CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode); + void InformIncrementalMarker(MacroAssembler* masm); + + Major MajorKey() const { return RecordWrite; } + + int MinorKey() const { + return ObjectBits::encode(object_.code()) | + ValueBits::encode(value_.code()) | + AddressBits::encode(address_.code()) | + RememberedSetActionBits::encode(remembered_set_action_) | + SaveFPRegsModeBits::encode(save_fp_regs_mode_); + } + + void Activate(Code* code) { + code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); + } + + class ObjectBits: public BitField<int, 0, 5> {}; + class ValueBits: public BitField<int, 5, 5> {}; + class AddressBits: public BitField<int, 10, 5> {}; + class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {}; + class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {}; + + Register object_; + Register value_; + Register address_; + RememberedSetAction remembered_set_action_; + SaveFPRegsMode save_fp_regs_mode_; + Label slow_; + RegisterAllocation regs_; +}; + + +// Trampoline stub to call into native code. To call safely into native code +// in the presence of compacting GC (which can move code objects) we need to +// keep the code which called into native pinned in the memory. Currently the +// simplest approach is to generate such stub early enough so it can never be +// moved by GC +class DirectCEntryStub: public PlatformCodeStub { + public: + explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {} + void Generate(MacroAssembler* masm); + void GenerateCall(MacroAssembler* masm, Register target); + + private: + Major MajorKey() const { return DirectCEntry; } + int MinorKey() const { return 0; } + + bool NeedsImmovableCode() { return true; } +}; + + +class NameDictionaryLookupStub: public PlatformCodeStub { + public: + enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; + + NameDictionaryLookupStub(Isolate* isolate, LookupMode mode) + : PlatformCodeStub(isolate), mode_(mode) { } + + void Generate(MacroAssembler* masm); + + static void GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register receiver, + Register properties, + Handle<Name> name, + Register scratch0); + + static void GeneratePositiveLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register r0, + Register r1); + + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + static const int kInlinedProbes = 4; + static const int kTotalProbes = 20; + + static const int kCapacityOffset = + NameDictionary::kHeaderSize + + NameDictionary::kCapacityIndex * kPointerSize; + + static const int kElementsStartOffset = + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; + + Major MajorKey() const { return NameDictionaryLookup; } + + int MinorKey() const { return LookupModeBits::encode(mode_); } + + class LookupModeBits: public BitField<LookupMode, 0, 1> {}; + + LookupMode mode_; +}; + + +} } // namespace v8::internal + +#endif // V8_MIPS_CODE_STUBS_ARM_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/constants-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/constants-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/constants-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/constants-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,362 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/mips64/constants-mips64.h" + +namespace v8 { +namespace internal { + + +// ----------------------------------------------------------------------------- +// Registers. + + +// These register names are defined in a way to match the native disassembler +// formatting. See for example the command "objdump -d <binary file>". +const char* Registers::names_[kNumSimuRegisters] = { + "zero_reg", + "at", + "v0", "v1", + "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", + "t0", "t1", "t2", "t3", + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", + "t8", "t9", + "k0", "k1", + "gp", + "sp", + "fp", + "ra", + "LO", "HI", + "pc" +}; + + +// List of alias names which can be used when referring to MIPS registers. +const Registers::RegisterAlias Registers::aliases_[] = { + {0, "zero"}, + {23, "cp"}, + {30, "s8"}, + {30, "s8_fp"}, + {kInvalidRegister, NULL} +}; + + +const char* Registers::Name(int reg) { + const char* result; + if ((0 <= reg) && (reg < kNumSimuRegisters)) { + result = names_[reg]; + } else { + result = "noreg"; + } + return result; +} + + +int Registers::Number(const char* name) { + // Look through the canonical names. + for (int i = 0; i < kNumSimuRegisters; i++) { + if (strcmp(names_[i], name) == 0) { + return i; + } + } + + // Look through the alias names. + int i = 0; + while (aliases_[i].reg != kInvalidRegister) { + if (strcmp(aliases_[i].name, name) == 0) { + return aliases_[i].reg; + } + i++; + } + + // No register with the reguested name found. + return kInvalidRegister; +} + + +const char* FPURegisters::names_[kNumFPURegisters] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", + "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", + "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" +}; + + +// List of alias names which can be used when referring to MIPS registers. +const FPURegisters::RegisterAlias FPURegisters::aliases_[] = { + {kInvalidRegister, NULL} +}; + + +const char* FPURegisters::Name(int creg) { + const char* result; + if ((0 <= creg) && (creg < kNumFPURegisters)) { + result = names_[creg]; + } else { + result = "nocreg"; + } + return result; +} + + +int FPURegisters::Number(const char* name) { + // Look through the canonical names. + for (int i = 0; i < kNumFPURegisters; i++) { + if (strcmp(names_[i], name) == 0) { + return i; + } + } + + // Look through the alias names. + int i = 0; + while (aliases_[i].creg != kInvalidRegister) { + if (strcmp(aliases_[i].name, name) == 0) { + return aliases_[i].creg; + } + i++; + } + + // No Cregister with the reguested name found. + return kInvalidFPURegister; +} + + +// ----------------------------------------------------------------------------- +// Instructions. + +bool Instruction::IsForbiddenInBranchDelay() const { + const int op = OpcodeFieldRaw(); + switch (op) { + case J: + case JAL: + case BEQ: + case BNE: + case BLEZ: + case BGTZ: + case BEQL: + case BNEL: + case BLEZL: + case BGTZL: + return true; + case REGIMM: + switch (RtFieldRaw()) { + case BLTZ: + case BGEZ: + case BLTZAL: + case BGEZAL: + return true; + default: + return false; + } + break; + case SPECIAL: + switch (FunctionFieldRaw()) { + case JR: + case JALR: + return true; + default: + return false; + } + break; + default: + return false; + } +} + + +bool Instruction::IsLinkingInstruction() const { + const int op = OpcodeFieldRaw(); + switch (op) { + case JAL: + return true; + case REGIMM: + switch (RtFieldRaw()) { + case BGEZAL: + case BLTZAL: + return true; + default: + return false; + } + case SPECIAL: + switch (FunctionFieldRaw()) { + case JALR: + return true; + default: + return false; + } + default: + return false; + } +} + + +bool Instruction::IsTrap() const { + if (OpcodeFieldRaw() != SPECIAL) { + return false; + } else { + switch (FunctionFieldRaw()) { + case BREAK: + case TGE: + case TGEU: + case TLT: + case TLTU: + case TEQ: + case TNE: + return true; + default: + return false; + } + } +} + + +Instruction::Type Instruction::InstructionType() const { + switch (OpcodeFieldRaw()) { + case SPECIAL: + switch (FunctionFieldRaw()) { + case JR: + case JALR: + case BREAK: + case SLL: + case DSLL: + case DSLL32: + case SRL: + case DSRL: + case DSRL32: + case SRA: + case DSRA: + case DSRA32: + case SLLV: + case DSLLV: + case SRLV: + case DSRLV: + case SRAV: + case DSRAV: + case MFHI: + case MFLO: + case MULT: + case DMULT: + case MULTU: + case DMULTU: + case DIV: + case DDIV: + case DIVU: + case DDIVU: + case ADD: + case DADD: + case ADDU: + case DADDU: + case SUB: + case DSUB: + case SUBU: + case DSUBU: + case AND: + case OR: + case XOR: + case NOR: + case SLT: + case SLTU: + case TGE: + case TGEU: + case TLT: + case TLTU: + case TEQ: + case TNE: + case MOVZ: + case MOVN: + case MOVCI: + return kRegisterType; + default: + return kUnsupported; + } + break; + case SPECIAL2: + switch (FunctionFieldRaw()) { + case MUL: + case CLZ: + return kRegisterType; + default: + return kUnsupported; + } + break; + case SPECIAL3: + switch (FunctionFieldRaw()) { + case INS: + case EXT: + return kRegisterType; + default: + return kUnsupported; + } + break; + case COP1: // Coprocessor instructions. + switch (RsFieldRawNoAssert()) { + case BC1: // Branch on coprocessor condition. + case BC1EQZ: + case BC1NEZ: + return kImmediateType; + default: + return kRegisterType; + } + break; + case COP1X: + return kRegisterType; + // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16. + case REGIMM: + case BEQ: + case BNE: + case BLEZ: + case BGTZ: + case ADDI: + case DADDI: + case ADDIU: + case DADDIU: + case SLTI: + case SLTIU: + case ANDI: + case ORI: + case XORI: + case LUI: + case BEQL: + case BNEL: + case BLEZL: + case BGTZL: + case BEQZC: + case BNEZC: + case LB: + case LH: + case LWL: + case LW: + case LWU: + case LD: + case LBU: + case LHU: + case LWR: + case SB: + case SH: + case SWL: + case SW: + case SD: + case SWR: + case LWC1: + case LDC1: + case SWC1: + case SDC1: + return kImmediateType; + // 26 bits immediate type instructions. e.g.: j imm26. + case J: + case JAL: + return kJumpType; + default: + return kUnsupported; + } + return kUnsupported; +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/constants-mips64.h nodejs-0.11.15/deps/v8/src/mips64/constants-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/constants-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/constants-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,952 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_MIPS_CONSTANTS_H_ +#define V8_MIPS_CONSTANTS_H_ + +// UNIMPLEMENTED_ macro for MIPS. +#ifdef DEBUG +#define UNIMPLEMENTED_MIPS() \ + v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \ + __FILE__, __LINE__, __func__) +#else +#define UNIMPLEMENTED_MIPS() +#endif + +#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n") + +enum ArchVariants { + kMips64r2, + kMips64r6 +}; + + +#ifdef _MIPS_ARCH_MIPS64R2 + static const ArchVariants kArchVariant = kMips64r2; +#elif _MIPS_ARCH_MIPS64R6 + static const ArchVariants kArchVariant = kMips64r6; +#else + static const ArchVariants kArchVariant = kMips64r2; +#endif + + +// TODO(plind): consider deriving ABI from compiler flags or build system. + +// ABI-dependent definitions are made with #define in simulator-mips64.h, +// so the ABI choice must be available to the pre-processor. However, in all +// other cases, we should use the enum AbiVariants with normal if statements. + +#define MIPS_ABI_N64 1 +// #define MIPS_ABI_O32 1 + +// The only supported Abi's are O32, and n64. +enum AbiVariants { + kO32, + kN64 // Use upper case N for 'n64' ABI to conform to style standard. +}; + +#ifdef MIPS_ABI_N64 +static const AbiVariants kMipsAbi = kN64; +#else +static const AbiVariants kMipsAbi = kO32; +#endif + + +// TODO(plind): consider renaming these ... +#if(defined(__mips_hard_float) && __mips_hard_float != 0) +// Use floating-point coprocessor instructions. This flag is raised when +// -mhard-float is passed to the compiler. +const bool IsMipsSoftFloatABI = false; +#elif(defined(__mips_soft_float) && __mips_soft_float != 0) +// This flag is raised when -msoft-float is passed to the compiler. +// Although FPU is a base requirement for v8, soft-float ABI is used +// on soft-float systems with FPU kernel emulation. +const bool IsMipsSoftFloatABI = true; +#else +const bool IsMipsSoftFloatABI = true; +#endif + + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif +#include <inttypes.h> + + +// Defines constants and accessor classes to assemble, disassemble and +// simulate MIPS32 instructions. +// +// See: MIPS32 Architecture For Programmers +// Volume II: The MIPS32 Instruction Set +// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf. + +namespace v8 { +namespace internal { + +// ----------------------------------------------------------------------------- +// Registers and FPURegisters. + +// Number of general purpose registers. +const int kNumRegisters = 32; +const int kInvalidRegister = -1; + +// Number of registers with HI, LO, and pc. +const int kNumSimuRegisters = 35; + +// In the simulator, the PC register is simulated as the 34th register. +const int kPCRegister = 34; + +// Number coprocessor registers. +const int kNumFPURegisters = 32; +const int kInvalidFPURegister = -1; + +// FPU (coprocessor 1) control registers. Currently only FCSR is implemented. +const int kFCSRRegister = 31; +const int kInvalidFPUControlRegister = -1; +const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1; +const uint64_t kFPU64InvalidResult = + static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1; + +// FCSR constants. +const uint32_t kFCSRInexactFlagBit = 2; +const uint32_t kFCSRUnderflowFlagBit = 3; +const uint32_t kFCSROverflowFlagBit = 4; +const uint32_t kFCSRDivideByZeroFlagBit = 5; +const uint32_t kFCSRInvalidOpFlagBit = 6; + +const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit; +const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit; +const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit; +const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit; +const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit; + +const uint32_t kFCSRFlagMask = + kFCSRInexactFlagMask | + kFCSRUnderflowFlagMask | + kFCSROverflowFlagMask | + kFCSRDivideByZeroFlagMask | + kFCSRInvalidOpFlagMask; + +const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask; + +// 'pref' instruction hints +const int32_t kPrefHintLoad = 0; +const int32_t kPrefHintStore = 1; +const int32_t kPrefHintLoadStreamed = 4; +const int32_t kPrefHintStoreStreamed = 5; +const int32_t kPrefHintLoadRetained = 6; +const int32_t kPrefHintStoreRetained = 7; +const int32_t kPrefHintWritebackInvalidate = 25; +const int32_t kPrefHintPrepareForStore = 30; + +// Helper functions for converting between register numbers and names. +class Registers { + public: + // Return the name of the register. + static const char* Name(int reg); + + // Lookup the register number for the name provided. + static int Number(const char* name); + + struct RegisterAlias { + int reg; + const char* name; + }; + + static const int64_t kMaxValue = 0x7fffffffffffffffl; + static const int64_t kMinValue = 0x8000000000000000l; + + private: + static const char* names_[kNumSimuRegisters]; + static const RegisterAlias aliases_[]; +}; + +// Helper functions for converting between register numbers and names. +class FPURegisters { + public: + // Return the name of the register. + static const char* Name(int reg); + + // Lookup the register number for the name provided. + static int Number(const char* name); + + struct RegisterAlias { + int creg; + const char* name; + }; + + private: + static const char* names_[kNumFPURegisters]; + static const RegisterAlias aliases_[]; +}; + + +// ----------------------------------------------------------------------------- +// Instructions encoding constants. + +// On MIPS all instructions are 32 bits. +typedef int32_t Instr; + +// Special Software Interrupt codes when used in the presence of the MIPS +// simulator. +enum SoftwareInterruptCodes { + // Transition to C code. + call_rt_redirected = 0xfffff +}; + +// On MIPS Simulator breakpoints can have different codes: +// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints, +// the simulator will run through them and print the registers. +// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop() +// instructions (see Assembler::stop()). +// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the +// debugger. +const uint32_t kMaxWatchpointCode = 31; +const uint32_t kMaxStopCode = 127; +STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode); + + +// ----- Fields offset and length. +const int kOpcodeShift = 26; +const int kOpcodeBits = 6; +const int kRsShift = 21; +const int kRsBits = 5; +const int kRtShift = 16; +const int kRtBits = 5; +const int kRdShift = 11; +const int kRdBits = 5; +const int kSaShift = 6; +const int kSaBits = 5; +const int kFunctionShift = 0; +const int kFunctionBits = 6; +const int kLuiShift = 16; + +const int kImm16Shift = 0; +const int kImm16Bits = 16; +const int kImm21Shift = 0; +const int kImm21Bits = 21; +const int kImm26Shift = 0; +const int kImm26Bits = 26; +const int kImm28Shift = 0; +const int kImm28Bits = 28; +const int kImm32Shift = 0; +const int kImm32Bits = 32; + +// In branches and jumps immediate fields point to words, not bytes, +// and are therefore shifted by 2. +const int kImmFieldShift = 2; + +const int kFrBits = 5; +const int kFrShift = 21; +const int kFsShift = 11; +const int kFsBits = 5; +const int kFtShift = 16; +const int kFtBits = 5; +const int kFdShift = 6; +const int kFdBits = 5; +const int kFCccShift = 8; +const int kFCccBits = 3; +const int kFBccShift = 18; +const int kFBccBits = 3; +const int kFBtrueShift = 16; +const int kFBtrueBits = 1; + +// ----- Miscellaneous useful masks. +// Instruction bit masks. +const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift; +const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift; +const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift; +const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift; +const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift; +const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift; +const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift; +const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift; +const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift; +// Misc masks. +const int kHiMask = 0xffff << 16; +const int kLoMask = 0xffff; +const int kSignMask = 0x80000000; +const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1; +const int64_t kHi16MaskOf64 = (int64_t)0xffff << 48; +const int64_t kSe16MaskOf64 = (int64_t)0xffff << 32; +const int64_t kTh16MaskOf64 = (int64_t)0xffff << 16; + +// ----- MIPS Opcodes and Function Fields. +// We use this presentation to stay close to the table representation in +// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set. +enum Opcode { + SPECIAL = 0 << kOpcodeShift, + REGIMM = 1 << kOpcodeShift, + + J = ((0 << 3) + 2) << kOpcodeShift, + JAL = ((0 << 3) + 3) << kOpcodeShift, + BEQ = ((0 << 3) + 4) << kOpcodeShift, + BNE = ((0 << 3) + 5) << kOpcodeShift, + BLEZ = ((0 << 3) + 6) << kOpcodeShift, + BGTZ = ((0 << 3) + 7) << kOpcodeShift, + + ADDI = ((1 << 3) + 0) << kOpcodeShift, + ADDIU = ((1 << 3) + 1) << kOpcodeShift, + SLTI = ((1 << 3) + 2) << kOpcodeShift, + SLTIU = ((1 << 3) + 3) << kOpcodeShift, + ANDI = ((1 << 3) + 4) << kOpcodeShift, + ORI = ((1 << 3) + 5) << kOpcodeShift, + XORI = ((1 << 3) + 6) << kOpcodeShift, + LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family. + DAUI = ((3 << 3) + 5) << kOpcodeShift, + + BEQC = ((2 << 3) + 0) << kOpcodeShift, + COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class. + BEQL = ((2 << 3) + 4) << kOpcodeShift, + BNEL = ((2 << 3) + 5) << kOpcodeShift, + BLEZL = ((2 << 3) + 6) << kOpcodeShift, + BGTZL = ((2 << 3) + 7) << kOpcodeShift, + + DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC. + DADDIU = ((3 << 3) + 1) << kOpcodeShift, + LDL = ((3 << 3) + 2) << kOpcodeShift, + LDR = ((3 << 3) + 3) << kOpcodeShift, + SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift, + SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift, + + LB = ((4 << 3) + 0) << kOpcodeShift, + LH = ((4 << 3) + 1) << kOpcodeShift, + LWL = ((4 << 3) + 2) << kOpcodeShift, + LW = ((4 << 3) + 3) << kOpcodeShift, + LBU = ((4 << 3) + 4) << kOpcodeShift, + LHU = ((4 << 3) + 5) << kOpcodeShift, + LWR = ((4 << 3) + 6) << kOpcodeShift, + LWU = ((4 << 3) + 7) << kOpcodeShift, + + SB = ((5 << 3) + 0) << kOpcodeShift, + SH = ((5 << 3) + 1) << kOpcodeShift, + SWL = ((5 << 3) + 2) << kOpcodeShift, + SW = ((5 << 3) + 3) << kOpcodeShift, + SDL = ((5 << 3) + 4) << kOpcodeShift, + SDR = ((5 << 3) + 5) << kOpcodeShift, + SWR = ((5 << 3) + 6) << kOpcodeShift, + + LWC1 = ((6 << 3) + 1) << kOpcodeShift, + LLD = ((6 << 3) + 4) << kOpcodeShift, + LDC1 = ((6 << 3) + 5) << kOpcodeShift, + BEQZC = ((6 << 3) + 6) << kOpcodeShift, + LD = ((6 << 3) + 7) << kOpcodeShift, + + PREF = ((6 << 3) + 3) << kOpcodeShift, + + SWC1 = ((7 << 3) + 1) << kOpcodeShift, + SCD = ((7 << 3) + 4) << kOpcodeShift, + SDC1 = ((7 << 3) + 5) << kOpcodeShift, + BNEZC = ((7 << 3) + 6) << kOpcodeShift, + SD = ((7 << 3) + 7) << kOpcodeShift, + + COP1X = ((1 << 4) + 3) << kOpcodeShift +}; + +enum SecondaryField { + // SPECIAL Encoding of Function Field. + SLL = ((0 << 3) + 0), + MOVCI = ((0 << 3) + 1), + SRL = ((0 << 3) + 2), + SRA = ((0 << 3) + 3), + SLLV = ((0 << 3) + 4), + SRLV = ((0 << 3) + 6), + SRAV = ((0 << 3) + 7), + + JR = ((1 << 3) + 0), + JALR = ((1 << 3) + 1), + MOVZ = ((1 << 3) + 2), + MOVN = ((1 << 3) + 3), + BREAK = ((1 << 3) + 5), + + MFHI = ((2 << 3) + 0), + CLZ_R6 = ((2 << 3) + 0), + CLO_R6 = ((2 << 3) + 1), + MFLO = ((2 << 3) + 2), + DSLLV = ((2 << 3) + 4), + DSRLV = ((2 << 3) + 6), + DSRAV = ((2 << 3) + 7), + + MULT = ((3 << 3) + 0), + MULTU = ((3 << 3) + 1), + DIV = ((3 << 3) + 2), + DIVU = ((3 << 3) + 3), + DMULT = ((3 << 3) + 4), + DMULTU = ((3 << 3) + 5), + DDIV = ((3 << 3) + 6), + DDIVU = ((3 << 3) + 7), + + ADD = ((4 << 3) + 0), + ADDU = ((4 << 3) + 1), + SUB = ((4 << 3) + 2), + SUBU = ((4 << 3) + 3), + AND = ((4 << 3) + 4), + OR = ((4 << 3) + 5), + XOR = ((4 << 3) + 6), + NOR = ((4 << 3) + 7), + + SLT = ((5 << 3) + 2), + SLTU = ((5 << 3) + 3), + DADD = ((5 << 3) + 4), + DADDU = ((5 << 3) + 5), + DSUB = ((5 << 3) + 6), + DSUBU = ((5 << 3) + 7), + + TGE = ((6 << 3) + 0), + TGEU = ((6 << 3) + 1), + TLT = ((6 << 3) + 2), + TLTU = ((6 << 3) + 3), + TEQ = ((6 << 3) + 4), + SELEQZ_S = ((6 << 3) + 5), + TNE = ((6 << 3) + 6), + SELNEZ_S = ((6 << 3) + 7), + + DSLL = ((7 << 3) + 0), + DSRL = ((7 << 3) + 2), + DSRA = ((7 << 3) + 3), + DSLL32 = ((7 << 3) + 4), + DSRL32 = ((7 << 3) + 6), + DSRA32 = ((7 << 3) + 7), + + // Multiply integers in r6. + MUL_MUH = ((3 << 3) + 0), // MUL, MUH. + MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U. + D_MUL_MUH = ((7 << 2) + 0), // DMUL, DMUH. + D_MUL_MUH_U = ((7 << 2) + 1), // DMUL_U, DMUH_U. + + MUL_OP = ((0 << 3) + 2), + MUH_OP = ((0 << 3) + 3), + DIV_OP = ((0 << 3) + 2), + MOD_OP = ((0 << 3) + 3), + + DIV_MOD = ((3 << 3) + 2), + DIV_MOD_U = ((3 << 3) + 3), + D_DIV_MOD = ((3 << 3) + 6), + D_DIV_MOD_U = ((3 << 3) + 7), + + // drotr in special4? + + // SPECIAL2 Encoding of Function Field. + MUL = ((0 << 3) + 2), + CLZ = ((4 << 3) + 0), + CLO = ((4 << 3) + 1), + + // SPECIAL3 Encoding of Function Field. + EXT = ((0 << 3) + 0), + DEXTM = ((0 << 3) + 1), + DEXTU = ((0 << 3) + 2), + DEXT = ((0 << 3) + 3), + INS = ((0 << 3) + 4), + DINSM = ((0 << 3) + 5), + DINSU = ((0 << 3) + 6), + DINS = ((0 << 3) + 7), + + DSBH = ((4 << 3) + 4), + + // REGIMM encoding of rt Field. + BLTZ = ((0 << 3) + 0) << 16, + BGEZ = ((0 << 3) + 1) << 16, + BLTZAL = ((2 << 3) + 0) << 16, + BGEZAL = ((2 << 3) + 1) << 16, + BGEZALL = ((2 << 3) + 3) << 16, + DAHI = ((0 << 3) + 6) << 16, + DATI = ((3 << 3) + 6) << 16, + + // COP1 Encoding of rs Field. + MFC1 = ((0 << 3) + 0) << 21, + DMFC1 = ((0 << 3) + 1) << 21, + CFC1 = ((0 << 3) + 2) << 21, + MFHC1 = ((0 << 3) + 3) << 21, + MTC1 = ((0 << 3) + 4) << 21, + DMTC1 = ((0 << 3) + 5) << 21, + CTC1 = ((0 << 3) + 6) << 21, + MTHC1 = ((0 << 3) + 7) << 21, + BC1 = ((1 << 3) + 0) << 21, + S = ((2 << 3) + 0) << 21, + D = ((2 << 3) + 1) << 21, + W = ((2 << 3) + 4) << 21, + L = ((2 << 3) + 5) << 21, + PS = ((2 << 3) + 6) << 21, + // COP1 Encoding of Function Field When rs=S. + ROUND_L_S = ((1 << 3) + 0), + TRUNC_L_S = ((1 << 3) + 1), + CEIL_L_S = ((1 << 3) + 2), + FLOOR_L_S = ((1 << 3) + 3), + ROUND_W_S = ((1 << 3) + 4), + TRUNC_W_S = ((1 << 3) + 5), + CEIL_W_S = ((1 << 3) + 6), + FLOOR_W_S = ((1 << 3) + 7), + CVT_D_S = ((4 << 3) + 1), + CVT_W_S = ((4 << 3) + 4), + CVT_L_S = ((4 << 3) + 5), + CVT_PS_S = ((4 << 3) + 6), + // COP1 Encoding of Function Field When rs=D. + ADD_D = ((0 << 3) + 0), + SUB_D = ((0 << 3) + 1), + MUL_D = ((0 << 3) + 2), + DIV_D = ((0 << 3) + 3), + SQRT_D = ((0 << 3) + 4), + ABS_D = ((0 << 3) + 5), + MOV_D = ((0 << 3) + 6), + NEG_D = ((0 << 3) + 7), + ROUND_L_D = ((1 << 3) + 0), + TRUNC_L_D = ((1 << 3) + 1), + CEIL_L_D = ((1 << 3) + 2), + FLOOR_L_D = ((1 << 3) + 3), + ROUND_W_D = ((1 << 3) + 4), + TRUNC_W_D = ((1 << 3) + 5), + CEIL_W_D = ((1 << 3) + 6), + FLOOR_W_D = ((1 << 3) + 7), + MIN = ((3 << 3) + 4), + MINA = ((3 << 3) + 5), + MAX = ((3 << 3) + 6), + MAXA = ((3 << 3) + 7), + CVT_S_D = ((4 << 3) + 0), + CVT_W_D = ((4 << 3) + 4), + CVT_L_D = ((4 << 3) + 5), + C_F_D = ((6 << 3) + 0), + C_UN_D = ((6 << 3) + 1), + C_EQ_D = ((6 << 3) + 2), + C_UEQ_D = ((6 << 3) + 3), + C_OLT_D = ((6 << 3) + 4), + C_ULT_D = ((6 << 3) + 5), + C_OLE_D = ((6 << 3) + 6), + C_ULE_D = ((6 << 3) + 7), + // COP1 Encoding of Function Field When rs=W or L. + CVT_S_W = ((4 << 3) + 0), + CVT_D_W = ((4 << 3) + 1), + CVT_S_L = ((4 << 3) + 0), + CVT_D_L = ((4 << 3) + 1), + BC1EQZ = ((2 << 2) + 1) << 21, + BC1NEZ = ((3 << 2) + 1) << 21, + // COP1 CMP positive predicates Bit 5..4 = 00. + CMP_AF = ((0 << 3) + 0), + CMP_UN = ((0 << 3) + 1), + CMP_EQ = ((0 << 3) + 2), + CMP_UEQ = ((0 << 3) + 3), + CMP_LT = ((0 << 3) + 4), + CMP_ULT = ((0 << 3) + 5), + CMP_LE = ((0 << 3) + 6), + CMP_ULE = ((0 << 3) + 7), + CMP_SAF = ((1 << 3) + 0), + CMP_SUN = ((1 << 3) + 1), + CMP_SEQ = ((1 << 3) + 2), + CMP_SUEQ = ((1 << 3) + 3), + CMP_SSLT = ((1 << 3) + 4), + CMP_SSULT = ((1 << 3) + 5), + CMP_SLE = ((1 << 3) + 6), + CMP_SULE = ((1 << 3) + 7), + // COP1 CMP negative predicates Bit 5..4 = 01. + CMP_AT = ((2 << 3) + 0), // Reserved, not implemented. + CMP_OR = ((2 << 3) + 1), + CMP_UNE = ((2 << 3) + 2), + CMP_NE = ((2 << 3) + 3), + CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented. + CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented. + CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented. + CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented. + CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented. + CMP_SOR = ((3 << 3) + 1), + CMP_SUNE = ((3 << 3) + 2), + CMP_SNE = ((3 << 3) + 3), + CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented. + CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented. + CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented. + CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented. + + SEL = ((2 << 3) + 0), + SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers. + SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers. + + // COP1 Encoding of Function Field When rs=PS. + // COP1X Encoding of Function Field. + MADD_D = ((4 << 3) + 1), + + NULLSF = 0 +}; + + +// ----- Emulated conditions. +// On MIPS we use this enum to abstract from conditional branch instructions. +// The 'U' prefix is used to specify unsigned comparisons. +// Opposite conditions must be paired as odd/even numbers +// because 'NegateCondition' function flips LSB to negate condition. +enum Condition { + // Any value < 0 is considered no_condition. + kNoCondition = -1, + + overflow = 0, + no_overflow = 1, + Uless = 2, + Ugreater_equal= 3, + equal = 4, + not_equal = 5, + Uless_equal = 6, + Ugreater = 7, + negative = 8, + positive = 9, + parity_even = 10, + parity_odd = 11, + less = 12, + greater_equal = 13, + less_equal = 14, + greater = 15, + ueq = 16, // Unordered or Equal. + nue = 17, // Not (Unordered or Equal). + + cc_always = 18, + + // Aliases. + carry = Uless, + not_carry = Ugreater_equal, + zero = equal, + eq = equal, + not_zero = not_equal, + ne = not_equal, + nz = not_equal, + sign = negative, + not_sign = positive, + mi = negative, + pl = positive, + hi = Ugreater, + ls = Uless_equal, + ge = greater_equal, + lt = less, + gt = greater, + le = less_equal, + hs = Ugreater_equal, + lo = Uless, + al = cc_always, + + cc_default = kNoCondition +}; + + +// Returns the equivalent of !cc. +// Negation of the default kNoCondition (-1) results in a non-default +// no_condition value (-2). As long as tests for no_condition check +// for condition < 0, this will work as expected. +inline Condition NegateCondition(Condition cc) { + DCHECK(cc != cc_always); + return static_cast<Condition>(cc ^ 1); +} + + +// Commute a condition such that {a cond b == b cond' a}. +inline Condition CommuteCondition(Condition cc) { + switch (cc) { + case Uless: + return Ugreater; + case Ugreater: + return Uless; + case Ugreater_equal: + return Uless_equal; + case Uless_equal: + return Ugreater_equal; + case less: + return greater; + case greater: + return less; + case greater_equal: + return less_equal; + case less_equal: + return greater_equal; + default: + return cc; + } +} + + +// ----- Coprocessor conditions. +enum FPUCondition { + kNoFPUCondition = -1, + + F = 0, // False. + UN = 1, // Unordered. + EQ = 2, // Equal. + UEQ = 3, // Unordered or Equal. + OLT = 4, // Ordered or Less Than. + ULT = 5, // Unordered or Less Than. + OLE = 6, // Ordered or Less Than or Equal. + ULE = 7 // Unordered or Less Than or Equal. +}; + + +// FPU rounding modes. +enum FPURoundingMode { + RN = 0 << 0, // Round to Nearest. + RZ = 1 << 0, // Round towards zero. + RP = 2 << 0, // Round towards Plus Infinity. + RM = 3 << 0, // Round towards Minus Infinity. + + // Aliases. + kRoundToNearest = RN, + kRoundToZero = RZ, + kRoundToPlusInf = RP, + kRoundToMinusInf = RM +}; + +const uint32_t kFPURoundingModeMask = 3 << 0; + +enum CheckForInexactConversion { + kCheckForInexactConversion, + kDontCheckForInexactConversion +}; + + +// ----------------------------------------------------------------------------- +// Hints. + +// Branch hints are not used on the MIPS. They are defined so that they can +// appear in shared function signatures, but will be ignored in MIPS +// implementations. +enum Hint { + no_hint = 0 +}; + + +inline Hint NegateHint(Hint hint) { + return no_hint; +} + + +// ----------------------------------------------------------------------------- +// Specific instructions, constants, and masks. +// These constants are declared in assembler-mips.cc, as they use named +// registers and other constants. + +// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r) +// operations as post-increment of sp. +extern const Instr kPopInstruction; +// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp. +extern const Instr kPushInstruction; +// sw(r, MemOperand(sp, 0)) +extern const Instr kPushRegPattern; +// lw(r, MemOperand(sp, 0)) +extern const Instr kPopRegPattern; +extern const Instr kLwRegFpOffsetPattern; +extern const Instr kSwRegFpOffsetPattern; +extern const Instr kLwRegFpNegOffsetPattern; +extern const Instr kSwRegFpNegOffsetPattern; +// A mask for the Rt register for push, pop, lw, sw instructions. +extern const Instr kRtMask; +extern const Instr kLwSwInstrTypeMask; +extern const Instr kLwSwInstrArgumentMask; +extern const Instr kLwSwOffsetMask; + +// Break 0xfffff, reserved for redirected real time call. +const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6; +// A nop instruction. (Encoding of sll 0 0 0). +const Instr nopInstr = 0; + +class Instruction { + public: + enum { + kInstrSize = 4, + kInstrSizeLog2 = 2, + // On MIPS PC cannot actually be directly accessed. We behave as if PC was + // always the value of the current instruction being executed. + kPCReadOffset = 0 + }; + + // Get the raw instruction bits. + inline Instr InstructionBits() const { + return *reinterpret_cast<const Instr*>(this); + } + + // Set the raw instruction bits to value. + inline void SetInstructionBits(Instr value) { + *reinterpret_cast<Instr*>(this) = value; + } + + // Read one particular bit out of the instruction bits. + inline int Bit(int nr) const { + return (InstructionBits() >> nr) & 1; + } + + // Read a bit field out of the instruction bits. + inline int Bits(int hi, int lo) const { + return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1); + } + + // Instruction type. + enum Type { + kRegisterType, + kImmediateType, + kJumpType, + kUnsupported = -1 + }; + + // Get the encoding type of the instruction. + Type InstructionType() const; + + + // Accessors for the different named fields used in the MIPS encoding. + inline Opcode OpcodeValue() const { + return static_cast<Opcode>( + Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift)); + } + + inline int RsValue() const { + DCHECK(InstructionType() == kRegisterType || + InstructionType() == kImmediateType); + return Bits(kRsShift + kRsBits - 1, kRsShift); + } + + inline int RtValue() const { + DCHECK(InstructionType() == kRegisterType || + InstructionType() == kImmediateType); + return Bits(kRtShift + kRtBits - 1, kRtShift); + } + + inline int RdValue() const { + DCHECK(InstructionType() == kRegisterType); + return Bits(kRdShift + kRdBits - 1, kRdShift); + } + + inline int SaValue() const { + DCHECK(InstructionType() == kRegisterType); + return Bits(kSaShift + kSaBits - 1, kSaShift); + } + + inline int FunctionValue() const { + DCHECK(InstructionType() == kRegisterType || + InstructionType() == kImmediateType); + return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift); + } + + inline int FdValue() const { + return Bits(kFdShift + kFdBits - 1, kFdShift); + } + + inline int FsValue() const { + return Bits(kFsShift + kFsBits - 1, kFsShift); + } + + inline int FtValue() const { + return Bits(kFtShift + kFtBits - 1, kFtShift); + } + + inline int FrValue() const { + return Bits(kFrShift + kFrBits -1, kFrShift); + } + + // Float Compare condition code instruction bits. + inline int FCccValue() const { + return Bits(kFCccShift + kFCccBits - 1, kFCccShift); + } + + // Float Branch condition code instruction bits. + inline int FBccValue() const { + return Bits(kFBccShift + kFBccBits - 1, kFBccShift); + } + + // Float Branch true/false instruction bit. + inline int FBtrueValue() const { + return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift); + } + + // Return the fields at their original place in the instruction encoding. + inline Opcode OpcodeFieldRaw() const { + return static_cast<Opcode>(InstructionBits() & kOpcodeMask); + } + + inline int RsFieldRaw() const { + DCHECK(InstructionType() == kRegisterType || + InstructionType() == kImmediateType); + return InstructionBits() & kRsFieldMask; + } + + // Same as above function, but safe to call within InstructionType(). + inline int RsFieldRawNoAssert() const { + return InstructionBits() & kRsFieldMask; + } + + inline int RtFieldRaw() const { + DCHECK(InstructionType() == kRegisterType || + InstructionType() == kImmediateType); + return InstructionBits() & kRtFieldMask; + } + + inline int RdFieldRaw() const { + DCHECK(InstructionType() == kRegisterType); + return InstructionBits() & kRdFieldMask; + } + + inline int SaFieldRaw() const { + DCHECK(InstructionType() == kRegisterType); + return InstructionBits() & kSaFieldMask; + } + + inline int FunctionFieldRaw() const { + return InstructionBits() & kFunctionFieldMask; + } + + // Get the secondary field according to the opcode. + inline int SecondaryValue() const { + Opcode op = OpcodeFieldRaw(); + switch (op) { + case SPECIAL: + case SPECIAL2: + return FunctionValue(); + case COP1: + return RsValue(); + case REGIMM: + return RtValue(); + default: + return NULLSF; + } + } + + inline int32_t Imm16Value() const { + DCHECK(InstructionType() == kImmediateType); + return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift); + } + + inline int32_t Imm21Value() const { + DCHECK(InstructionType() == kImmediateType); + return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift); + } + + inline int32_t Imm26Value() const { + DCHECK(InstructionType() == kJumpType); + return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift); + } + + // Say if the instruction should not be used in a branch delay slot. + bool IsForbiddenInBranchDelay() const; + // Say if the instruction 'links'. e.g. jal, bal. + bool IsLinkingInstruction() const; + // Say if the instruction is a break or a trap. + bool IsTrap() const; + + // Instructions are read of out a code stream. The only way to get a + // reference to an instruction is to convert a pointer. There is no way + // to allocate or create instances of class Instruction. + // Use the At(pc) function to create references to Instruction. + static Instruction* At(byte* pc) { + return reinterpret_cast<Instruction*>(pc); + } + + private: + // We need to prevent the creation of instances of class Instruction. + DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction); +}; + + +// ----------------------------------------------------------------------------- +// MIPS assembly various constants. + +// C/C++ argument slots size. +const int kCArgSlotCount = (kMipsAbi == kN64) ? 0 : 4; + +// TODO(plind): below should be based on kPointerSize +// TODO(plind): find all usages and remove the needless instructions for n64. +const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2; + +const int kBranchReturnOffset = 2 * Instruction::kInstrSize; + +} } // namespace v8::internal + +#endif // #ifndef V8_MIPS_CONSTANTS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/cpu-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/cpu-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/cpu-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/cpu-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,59 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// CPU specific code for arm independent of OS goes here. + +#include <sys/syscall.h> +#include <unistd.h> + +#ifdef __mips +#include <asm/cachectl.h> +#endif // #ifdef __mips + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/assembler.h" +#include "src/macro-assembler.h" + +#include "src/simulator.h" // For cache flushing. + +namespace v8 { +namespace internal { + + +void CpuFeatures::FlushICache(void* start, size_t size) { + // Nothing to do, flushing no instructions. + if (size == 0) { + return; + } + +#if !defined (USE_SIMULATOR) +#if defined(ANDROID) && !defined(__LP64__) + // Bionic cacheflush can typically run in userland, avoiding kernel call. + char *end = reinterpret_cast<char *>(start) + size; + cacheflush( + reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0); +#else // ANDROID + int res; + // See http://www.linux-mips.org/wiki/Cacheflush_Syscall. + res = syscall(__NR_cacheflush, start, size, ICACHE); + if (res) { + V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache"); + } +#endif // ANDROID +#else // USE_SIMULATOR. + // Not generating mips instructions for C-code. This means that we are + // building a mips emulator based target. We should notify the simulator + // that the Icache was flushed. + // None of this code ends up in the snapshot so there are no issues + // around whether or not to generate the code when building snapshots. + Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size); +#endif // USE_SIMULATOR. +} + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/debug-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/debug-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/debug-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/debug-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,330 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/codegen.h" +#include "src/debug.h" + +namespace v8 { +namespace internal { + +bool BreakLocationIterator::IsDebugBreakAtReturn() { + return Debug::IsDebugBreakAtReturn(rinfo()); +} + + +void BreakLocationIterator::SetDebugBreakAtReturn() { + // Mips return sequence: + // mov sp, fp + // lw fp, sp(0) + // lw ra, sp(4) + // addiu sp, sp, 8 + // addiu sp, sp, N + // jr ra + // nop (in branch delay slot) + + // Make sure this constant matches the number if instructions we emit. + DCHECK(Assembler::kJSReturnSequenceInstructions == 7); + CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions); + // li and Call pseudo-instructions emit 6 + 2 instructions. + patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int64_t>( + debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry())), + ADDRESS_LOAD); + patcher.masm()->Call(v8::internal::t9); + // Place nop to match return sequence size. + patcher.masm()->nop(); + // TODO(mips): Open issue about using breakpoint instruction instead of nops. + // patcher.masm()->bkpt(0); +} + + +// Restore the JS frame exit code. +void BreakLocationIterator::ClearDebugBreakAtReturn() { + rinfo()->PatchCode(original_rinfo()->pc(), + Assembler::kJSReturnSequenceInstructions); +} + + +// A debug break in the exit code is identified by the JS frame exit code +// having been patched with li/call psuedo-instrunction (liu/ori/jalr). +bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) { + DCHECK(RelocInfo::IsJSReturn(rinfo->rmode())); + return rinfo->IsPatchedReturnSequence(); +} + + +bool BreakLocationIterator::IsDebugBreakAtSlot() { + DCHECK(IsDebugBreakSlot()); + // Check whether the debug break slot instructions have been patched. + return rinfo()->IsPatchedDebugBreakSlotSequence(); +} + + +void BreakLocationIterator::SetDebugBreakAtSlot() { + DCHECK(IsDebugBreakSlot()); + // Patch the code changing the debug break slot code from: + // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1) + // nop(DEBUG_BREAK_NOP) + // nop(DEBUG_BREAK_NOP) + // nop(DEBUG_BREAK_NOP) + // nop(DEBUG_BREAK_NOP) + // nop(DEBUG_BREAK_NOP) + // to a call to the debug break slot code. + // li t9, address (4-instruction sequence on mips64) + // call t9 (jalr t9 / nop instruction pair) + CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions); + patcher.masm()->li(v8::internal::t9, + Operand(reinterpret_cast<int64_t>( + debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())), + ADDRESS_LOAD); + patcher.masm()->Call(v8::internal::t9); +} + + +void BreakLocationIterator::ClearDebugBreakAtSlot() { + DCHECK(IsDebugBreakSlot()); + rinfo()->PatchCode(original_rinfo()->pc(), + Assembler::kDebugBreakSlotInstructions); +} + + +#define __ ACCESS_MASM(masm) + + + +static void Generate_DebugBreakCallHelper(MacroAssembler* masm, + RegList object_regs, + RegList non_object_regs) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Load padding words on stack. + __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue))); + __ Dsubu(sp, sp, + Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize)); + for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) { + __ sd(at, MemOperand(sp, kPointerSize * i)); + } + __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize))); + __ push(at); + + + // TODO(plind): This needs to be revised to store pairs of smi's per + // the other 64-bit arch's. + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + DCHECK((object_regs & ~kJSCallerSaved) == 0); + DCHECK((non_object_regs & ~kJSCallerSaved) == 0); + DCHECK((object_regs & non_object_regs) == 0); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((object_regs & (1 << r)) != 0) { + __ push(reg); + } + if ((non_object_regs & (1 << r)) != 0) { + __ PushRegisterAsTwoSmis(reg); + } + } + +#ifdef DEBUG + __ RecordComment("// Calling from debug break to runtime - come in - over"); +#endif + __ PrepareCEntryArgs(0); // No arguments. + __ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate())); + + CEntryStub ceb(masm->isolate(), 1); + __ CallStub(&ceb); + + // Restore the register values from the expression stack. + for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + __ PopRegisterAsTwoSmis(reg, at); + } + if ((object_regs & (1 << r)) != 0) { + __ pop(reg); + } + if (FLAG_debug_code && + (((object_regs |non_object_regs) & (1 << r)) == 0)) { + __ li(reg, kDebugZapValue); + } + } + + // Don't bother removing padding bytes pushed on the stack + // as the frame is going to be restored right away. + + // Leave the internal frame. + } + + // Now that the break point has been handled, resume normal execution by + // jumping to the target address intended by the caller and that was + // overwritten by the address of DebugBreakXXX. + ExternalReference after_break_target = + ExternalReference::debug_after_break_target_address(masm->isolate()); + __ li(t9, Operand(after_break_target)); + __ ld(t9, MemOperand(t9)); + __ Jump(t9); +} + + +void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) { + // Register state for CallICStub + // ----------- S t a t e ------------- + // -- a1 : function + // -- a3 : slot in feedback array (smi) + // ----------------------------------- + Generate_DebugBreakCallHelper(masm, a1.bit() | a3.bit(), 0); +} + + +void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) { + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0); +} + + +void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) { + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + Register value = StoreIC::ValueRegister(); + Generate_DebugBreakCallHelper( + masm, receiver.bit() | name.bit() | value.bit(), 0); +} + + +void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { + // Calling convention for keyed IC load (from ic-mips64.cc). + GenerateLoadICDebugBreak(masm); +} + + +void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { + // Calling convention for IC keyed store call (from ic-mips64.cc). + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register name = KeyedStoreIC::NameRegister(); + Register value = KeyedStoreIC::ValueRegister(); + Generate_DebugBreakCallHelper( + masm, receiver.bit() | name.bit() | value.bit(), 0); +} + + +void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { + // Register state for CompareNil IC + // ----------- S t a t e ------------- + // -- a0 : value + // ----------------------------------- + Generate_DebugBreakCallHelper(masm, a0.bit(), 0); +} + + +void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) { + // In places other than IC call sites it is expected that v0 is TOS which + // is an object - this is not generally the case so this should be used with + // care. + Generate_DebugBreakCallHelper(masm, v0.bit(), 0); +} + + +void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { + // Register state for CallFunctionStub (from code-stubs-mips.cc). + // ----------- S t a t e ------------- + // -- a1 : function + // ----------------------------------- + Generate_DebugBreakCallHelper(masm, a1.bit(), 0); +} + + +void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { + // Calling convention for CallConstructStub (from code-stubs-mips.cc). + // ----------- S t a t e ------------- + // -- a0 : number of arguments (not smi) + // -- a1 : constructor function + // ----------------------------------- + Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit()); +} + + + +void DebugCodegen::GenerateCallConstructStubRecordDebugBreak( + MacroAssembler* masm) { + // Calling convention for CallConstructStub (from code-stubs-mips.cc). + // ----------- S t a t e ------------- + // -- a0 : number of arguments (not smi) + // -- a1 : constructor function + // -- a2 : feedback array + // -- a3 : feedback slot (smi) + // ----------------------------------- + Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit()); +} + + +void DebugCodegen::GenerateSlot(MacroAssembler* masm) { + // Generate enough nop's to make space for a call instruction. Avoid emitting + // the trampoline pool in the debug break slot code. + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); + Label check_codesize; + __ bind(&check_codesize); + __ RecordDebugBreakSlot(); + for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) { + __ nop(MacroAssembler::DEBUG_BREAK_NOP); + } + DCHECK_EQ(Assembler::kDebugBreakSlotInstructions, + masm->InstructionsGeneratedSince(&check_codesize)); +} + + +void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) { + // In the places where a debug break slot is inserted no registers can contain + // object pointers. + Generate_DebugBreakCallHelper(masm, 0, 0); +} + + +void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { + __ Ret(); +} + + +void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { + ExternalReference restarter_frame_function_slot = + ExternalReference::debug_restarter_frame_function_pointer_address( + masm->isolate()); + __ li(at, Operand(restarter_frame_function_slot)); + __ sw(zero_reg, MemOperand(at, 0)); + + // We do not know our frame height, but set sp based on fp. + __ Dsubu(sp, fp, Operand(kPointerSize)); + + __ Pop(ra, fp, a1); // Return address, Frame, Function. + + // Load context from the function. + __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + // Get function code. + __ ld(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ ld(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset)); + __ Daddu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag)); + + // Re-run JSFunction, a1 is function, cp is context. + __ Jump(t9); +} + + +const bool LiveEdit::kFrameDropperSupported = true; + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/deoptimizer-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/deoptimizer-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/deoptimizer-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/deoptimizer-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,379 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/safepoint-table.h" + +namespace v8 { +namespace internal { + + +int Deoptimizer::patch_size() { + const int kCallInstructionSizeInWords = 6; + return kCallInstructionSizeInWords * Assembler::kInstrSize; +} + + +void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { + Address code_start_address = code->instruction_start(); + // Invalidate the relocation information, as it will become invalid by the + // code patching below, and is not needed any more. + code->InvalidateRelocation(); + + if (FLAG_zap_code_space) { + // Fail hard and early if we enter this code object again. + byte* pointer = code->FindCodeAgeSequence(); + if (pointer != NULL) { + pointer += kNoCodeAgeSequenceLength; + } else { + pointer = code->instruction_start(); + } + CodePatcher patcher(pointer, 1); + patcher.masm()->break_(0xCC); + + DeoptimizationInputData* data = + DeoptimizationInputData::cast(code->deoptimization_data()); + int osr_offset = data->OsrPcOffset()->value(); + if (osr_offset > 0) { + CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1); + osr_patcher.masm()->break_(0xCC); + } + } + + DeoptimizationInputData* deopt_data = + DeoptimizationInputData::cast(code->deoptimization_data()); +#ifdef DEBUG + Address prev_call_address = NULL; +#endif + // For each LLazyBailout instruction insert a call to the corresponding + // deoptimization entry. + for (int i = 0; i < deopt_data->DeoptCount(); i++) { + if (deopt_data->Pc(i)->value() == -1) continue; + Address call_address = code_start_address + deopt_data->Pc(i)->value(); + Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); + int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry, + RelocInfo::NONE32); + int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; + DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); + DCHECK(call_size_in_bytes <= patch_size()); + CodePatcher patcher(call_address, call_size_in_words); + patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); + DCHECK(prev_call_address == NULL || + call_address >= prev_call_address + patch_size()); + DCHECK(call_address + patch_size() <= code->instruction_end()); + +#ifdef DEBUG + prev_call_address = call_address; +#endif + } +} + + +void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { + // Set the register values. The values are not important as there are no + // callee saved registers in JavaScript frames, so all registers are + // spilled. Registers fp and sp are set to the correct values though. + + for (int i = 0; i < Register::kNumRegisters; i++) { + input_->SetRegister(i, i * 4); + } + input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp())); + input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { + input_->SetDoubleRegister(i, 0.0); + } + + // Fill the frame content from the actual data on the frame. + for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { + input_->SetFrameSlot(i, Memory::uint64_at(tos + i)); + } +} + + +void Deoptimizer::SetPlatformCompiledStubRegisters( + FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { + ApiFunction function(descriptor->deoptimization_handler()); + ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); + intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); + int params = descriptor->GetHandlerParameterCount(); + output_frame->SetRegister(s0.code(), params); + output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize); + output_frame->SetRegister(s2.code(), handler); +} + + +void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { + for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) { + double double_value = input_->GetDoubleRegister(i); + output_frame->SetDoubleRegister(i, double_value); + } +} + + +bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { + // There is no dynamic alignment padding on MIPS in the input frame. + return false; +} + + +#define __ masm()-> + + +// This code tries to be close to ia32 code so that any changes can be +// easily ported. +void Deoptimizer::EntryGenerator::Generate() { + GeneratePrologue(); + + // Unlike on ARM we don't save all the registers, just the useful ones. + // For the rest, there are gaps on the stack, so the offsets remain the same. + const int kNumberOfRegisters = Register::kNumRegisters; + + RegList restored_regs = kJSCallerSaved | kCalleeSaved; + RegList saved_regs = restored_regs | sp.bit() | ra.bit(); + + const int kDoubleRegsSize = + kDoubleSize * FPURegister::kMaxNumAllocatableRegisters; + + // Save all FPU registers before messing with them. + __ Dsubu(sp, sp, Operand(kDoubleRegsSize)); + for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) { + FPURegister fpu_reg = FPURegister::FromAllocationIndex(i); + int offset = i * kDoubleSize; + __ sdc1(fpu_reg, MemOperand(sp, offset)); + } + + // Push saved_regs (needed to populate FrameDescription::registers_). + // Leave gaps for other registers. + __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize); + for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { + if ((saved_regs & (1 << i)) != 0) { + __ sd(ToRegister(i), MemOperand(sp, kPointerSize * i)); + } + } + + const int kSavedRegistersAreaSize = + (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; + + // Get the bailout id from the stack. + __ ld(a2, MemOperand(sp, kSavedRegistersAreaSize)); + + // Get the address of the location in the code object (a3) (return + // address for lazy deoptimization) and compute the fp-to-sp delta in + // register a4. + __ mov(a3, ra); + // Correct one word for bailout id. + __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); + + __ Dsubu(a4, fp, a4); + + // Allocate a new deoptimizer object. + __ PrepareCallCFunction(6, a5); + // Pass six arguments, according to O32 or n64 ABI. a0..a3 are same for both. + __ li(a1, Operand(type())); // bailout type, + __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + // a2: bailout id already loaded. + // a3: code address or 0 already loaded. + if (kMipsAbi == kN64) { + // a4: already has fp-to-sp delta. + __ li(a5, Operand(ExternalReference::isolate_address(isolate()))); + } else { // O32 abi. + // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack. + __ sd(a4, CFunctionArgumentOperand(5)); // Fp-to-sp delta. + __ li(a5, Operand(ExternalReference::isolate_address(isolate()))); + __ sd(a5, CFunctionArgumentOperand(6)); // Isolate. + } + // Call Deoptimizer::New(). + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); + } + + // Preserve "deoptimizer" object in register v0 and get the input + // frame descriptor pointer to a1 (deoptimizer->input_); + // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below. + __ mov(a0, v0); + __ ld(a1, MemOperand(v0, Deoptimizer::input_offset())); + + // Copy core registers into FrameDescription::registers_[kNumRegisters]. + DCHECK(Register::kNumRegisters == kNumberOfRegisters); + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + if ((saved_regs & (1 << i)) != 0) { + __ ld(a2, MemOperand(sp, i * kPointerSize)); + __ sd(a2, MemOperand(a1, offset)); + } else if (FLAG_debug_code) { + __ li(a2, kDebugZapValue); + __ sd(a2, MemOperand(a1, offset)); + } + } + + int double_regs_offset = FrameDescription::double_registers_offset(); + // Copy FPU registers to + // double_registers_[DoubleRegister::kNumAllocatableRegisters] + for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) { + int dst_offset = i * kDoubleSize + double_regs_offset; + int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; + __ ldc1(f0, MemOperand(sp, src_offset)); + __ sdc1(f0, MemOperand(a1, dst_offset)); + } + + // Remove the bailout id and the saved registers from the stack. + __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); + + // Compute a pointer to the unwinding limit in register a2; that is + // the first stack slot not part of the input frame. + __ ld(a2, MemOperand(a1, FrameDescription::frame_size_offset())); + __ Daddu(a2, a2, sp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset())); + Label pop_loop; + Label pop_loop_header; + __ BranchShort(&pop_loop_header); + __ bind(&pop_loop); + __ pop(a4); + __ sd(a4, MemOperand(a3, 0)); + __ daddiu(a3, a3, sizeof(uint64_t)); + __ bind(&pop_loop_header); + __ BranchShort(&pop_loop, ne, a2, Operand(sp)); + // Compute the output frame in the deoptimizer. + __ push(a0); // Preserve deoptimizer object across call. + // a0: deoptimizer object; a1: scratch. + __ PrepareCallCFunction(1, a1); + // Call Deoptimizer::ComputeOutputFrames(). + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction( + ExternalReference::compute_output_frames_function(isolate()), 1); + } + __ pop(a0); // Restore deoptimizer object (class Deoptimizer). + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, inner_push_loop, + outer_loop_header, inner_loop_header; + // Outer loop state: a4 = current "FrameDescription** output_", + // a1 = one past the last FrameDescription**. + __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); + __ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_. + __ dsll(a1, a1, kPointerSizeLog2); // Count to offset. + __ daddu(a1, a4, a1); // a1 = one past the last FrameDescription**. + __ jmp(&outer_loop_header); + __ bind(&outer_push_loop); + // Inner loop state: a2 = current FrameDescription*, a3 = loop index. + __ ld(a2, MemOperand(a4, 0)); // output_[ix] + __ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset())); + __ jmp(&inner_loop_header); + __ bind(&inner_push_loop); + __ Dsubu(a3, a3, Operand(sizeof(uint64_t))); + __ Daddu(a6, a2, Operand(a3)); + __ ld(a7, MemOperand(a6, FrameDescription::frame_content_offset())); + __ push(a7); + __ bind(&inner_loop_header); + __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg)); + + __ Daddu(a4, a4, Operand(kPointerSize)); + __ bind(&outer_loop_header); + __ BranchShort(&outer_push_loop, lt, a4, Operand(a1)); + + __ ld(a1, MemOperand(a0, Deoptimizer::input_offset())); + for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) { + const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i); + int src_offset = i * kDoubleSize + double_regs_offset; + __ ldc1(fpu_reg, MemOperand(a1, src_offset)); + } + + // Push state, pc, and continuation from the last output frame. + __ ld(a6, MemOperand(a2, FrameDescription::state_offset())); + __ push(a6); + + __ ld(a6, MemOperand(a2, FrameDescription::pc_offset())); + __ push(a6); + __ ld(a6, MemOperand(a2, FrameDescription::continuation_offset())); + __ push(a6); + + + // Technically restoring 'at' should work unless zero_reg is also restored + // but it's safer to check for this. + DCHECK(!(at.bit() & restored_regs)); + // Restore the registers from the last output frame. + __ mov(at, a2); + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + if ((restored_regs & (1 << i)) != 0) { + __ ld(ToRegister(i), MemOperand(at, offset)); + } + } + + __ InitializeRootRegister(); + + __ pop(at); // Get continuation, leave pc on stack. + __ pop(ra); + __ Jump(at); + __ stop("Unreachable."); +} + + +// Maximum size of a table entry generated below. +const int Deoptimizer::table_entry_size_ = 11 * Assembler::kInstrSize; + +void Deoptimizer::TableEntryGenerator::GeneratePrologue() { + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); + + // Create a sequence of deoptimization entries. + // Note that registers are still live when jumping to an entry. + Label table_start; + __ bind(&table_start); + for (int i = 0; i < count(); i++) { + Label start; + __ bind(&start); + __ daddiu(sp, sp, -1 * kPointerSize); + // Jump over the remaining deopt entries (including this one). + // This code is always reached by calling Jump, which puts the target (label + // start) into t9. + const int remaining_entries = (count() - i) * table_entry_size_; + __ Daddu(t9, t9, remaining_entries); + // 'at' was clobbered so we can only load the current entry value here. + __ li(t8, i); + __ jr(t9); // Expose delay slot. + __ sd(t8, MemOperand(sp, 0 * kPointerSize)); // In the delay slot. + + // Pad the rest of the code. + while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) { + __ nop(); + } + + DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); + } + + DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), + count() * table_entry_size_); +} + + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + + +void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { + // No out-of-line constant pool support. + UNREACHABLE(); +} + + +#undef __ + + +} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/mips64/disasm-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/disasm-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/disasm-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/disasm-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1504 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// A Disassembler object is used to disassemble a block of code instruction by +// instruction. The default implementation of the NameConverter object can be +// overriden to modify register names or to do symbol lookup on addresses. +// +// The example below will disassemble a block of code and print it to stdout. +// +// NameConverter converter; +// Disassembler d(converter); +// for (byte* pc = begin; pc < end;) { +// v8::internal::EmbeddedVector<char, 256> buffer; +// byte* prev_pc = pc; +// pc += d.InstructionDecode(buffer, pc); +// printf("%p %08x %s\n", +// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer); +// } +// +// The Disassembler class also has a convenience method to disassemble a block +// of code into a FILE*, meaning that the above functionality could also be +// achieved by just calling Disassembler::Disassemble(stdout, begin, end); + + +#include <assert.h> +#include <stdarg.h> +#include <stdio.h> +#include <string.h> + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/base/platform/platform.h" +#include "src/disasm.h" +#include "src/macro-assembler.h" +#include "src/mips64/constants-mips64.h" + +namespace v8 { +namespace internal { + +//------------------------------------------------------------------------------ + +// Decoder decodes and disassembles instructions into an output buffer. +// It uses the converter to convert register names and call destinations into +// more informative description. +class Decoder { + public: + Decoder(const disasm::NameConverter& converter, + v8::internal::Vector<char> out_buffer) + : converter_(converter), + out_buffer_(out_buffer), + out_buffer_pos_(0) { + out_buffer_[out_buffer_pos_] = '\0'; + } + + ~Decoder() {} + + // Writes one disassembled instruction into 'buffer' (0-terminated). + // Returns the length of the disassembled machine instruction in bytes. + int InstructionDecode(byte* instruction); + + private: + // Bottleneck functions to print into the out_buffer. + void PrintChar(const char ch); + void Print(const char* str); + + // Printing of common values. + void PrintRegister(int reg); + void PrintFPURegister(int freg); + void PrintRs(Instruction* instr); + void PrintRt(Instruction* instr); + void PrintRd(Instruction* instr); + void PrintFs(Instruction* instr); + void PrintFt(Instruction* instr); + void PrintFd(Instruction* instr); + void PrintSa(Instruction* instr); + void PrintSd(Instruction* instr); + void PrintSs1(Instruction* instr); + void PrintSs2(Instruction* instr); + void PrintBc(Instruction* instr); + void PrintCc(Instruction* instr); + void PrintFunction(Instruction* instr); + void PrintSecondaryField(Instruction* instr); + void PrintUImm16(Instruction* instr); + void PrintSImm16(Instruction* instr); + void PrintXImm16(Instruction* instr); + void PrintXImm21(Instruction* instr); + void PrintXImm26(Instruction* instr); + void PrintCode(Instruction* instr); // For break and trap instructions. + // Printing of instruction name. + void PrintInstructionName(Instruction* instr); + + // Handle formatting of instructions and their options. + int FormatRegister(Instruction* instr, const char* option); + int FormatFPURegister(Instruction* instr, const char* option); + int FormatOption(Instruction* instr, const char* option); + void Format(Instruction* instr, const char* format); + void Unknown(Instruction* instr); + int DecodeBreakInstr(Instruction* instr); + + // Each of these functions decodes one particular instruction type. + int DecodeTypeRegister(Instruction* instr); + void DecodeTypeImmediate(Instruction* instr); + void DecodeTypeJump(Instruction* instr); + + const disasm::NameConverter& converter_; + v8::internal::Vector<char> out_buffer_; + int out_buffer_pos_; + + DISALLOW_COPY_AND_ASSIGN(Decoder); +}; + + +// Support for assertions in the Decoder formatting functions. +#define STRING_STARTS_WITH(string, compare_string) \ + (strncmp(string, compare_string, strlen(compare_string)) == 0) + + +// Append the ch to the output buffer. +void Decoder::PrintChar(const char ch) { + out_buffer_[out_buffer_pos_++] = ch; +} + + +// Append the str to the output buffer. +void Decoder::Print(const char* str) { + char cur = *str++; + while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) { + PrintChar(cur); + cur = *str++; + } + out_buffer_[out_buffer_pos_] = 0; +} + + +// Print the register name according to the active name converter. +void Decoder::PrintRegister(int reg) { + Print(converter_.NameOfCPURegister(reg)); +} + + +void Decoder::PrintRs(Instruction* instr) { + int reg = instr->RsValue(); + PrintRegister(reg); +} + + +void Decoder::PrintRt(Instruction* instr) { + int reg = instr->RtValue(); + PrintRegister(reg); +} + + +void Decoder::PrintRd(Instruction* instr) { + int reg = instr->RdValue(); + PrintRegister(reg); +} + + +// Print the FPUregister name according to the active name converter. +void Decoder::PrintFPURegister(int freg) { + Print(converter_.NameOfXMMRegister(freg)); +} + + +void Decoder::PrintFs(Instruction* instr) { + int freg = instr->RsValue(); + PrintFPURegister(freg); +} + + +void Decoder::PrintFt(Instruction* instr) { + int freg = instr->RtValue(); + PrintFPURegister(freg); +} + + +void Decoder::PrintFd(Instruction* instr) { + int freg = instr->RdValue(); + PrintFPURegister(freg); +} + + +// Print the integer value of the sa field. +void Decoder::PrintSa(Instruction* instr) { + int sa = instr->SaValue(); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); +} + + +// Print the integer value of the rd field, when it is not used as reg. +void Decoder::PrintSd(Instruction* instr) { + int sd = instr->RdValue(); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd); +} + + +// Print the integer value of the rd field, when used as 'ext' size. +void Decoder::PrintSs1(Instruction* instr) { + int ss = instr->RdValue(); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1); +} + + +// Print the integer value of the rd field, when used as 'ins' size. +void Decoder::PrintSs2(Instruction* instr) { + int ss = instr->RdValue(); + int pos = instr->SaValue(); + out_buffer_pos_ += + SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1); +} + + +// Print the integer value of the cc field for the bc1t/f instructions. +void Decoder::PrintBc(Instruction* instr) { + int cc = instr->FBccValue(); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc); +} + + +// Print the integer value of the cc field for the FP compare instructions. +void Decoder::PrintCc(Instruction* instr) { + int cc = instr->FCccValue(); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc); +} + + +// Print 16-bit unsigned immediate value. +void Decoder::PrintUImm16(Instruction* instr) { + int32_t imm = instr->Imm16Value(); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); +} + + +// Print 16-bit signed immediate value. +void Decoder::PrintSImm16(Instruction* instr) { + int32_t imm = ((instr->Imm16Value()) << 16) >> 16; + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); +} + + +// Print 16-bit hexa immediate value. +void Decoder::PrintXImm16(Instruction* instr) { + int32_t imm = instr->Imm16Value(); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); +} + + +// Print 21-bit immediate value. +void Decoder::PrintXImm21(Instruction* instr) { + uint32_t imm = instr->Imm21Value(); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); +} + + +// Print 26-bit immediate value. +void Decoder::PrintXImm26(Instruction* instr) { + uint32_t imm = instr->Imm26Value() << kImmFieldShift; + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); +} + + +// Print 26-bit immediate value. +void Decoder::PrintCode(Instruction* instr) { + if (instr->OpcodeFieldRaw() != SPECIAL) + return; // Not a break or trap instruction. + switch (instr->FunctionFieldRaw()) { + case BREAK: { + int32_t code = instr->Bits(25, 6); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "0x%05x (%d)", code, code); + break; + } + case TGE: + case TGEU: + case TLT: + case TLTU: + case TEQ: + case TNE: { + int32_t code = instr->Bits(15, 6); + out_buffer_pos_ += + SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code); + break; + } + default: // Not a break or trap instruction. + break; + } +} + + +// Printing of instruction name. +void Decoder::PrintInstructionName(Instruction* instr) { +} + + +// Handle all register based formatting in this function to reduce the +// complexity of FormatOption. +int Decoder::FormatRegister(Instruction* instr, const char* format) { + DCHECK(format[0] == 'r'); + if (format[1] == 's') { // 'rs: Rs register. + int reg = instr->RsValue(); + PrintRegister(reg); + return 2; + } else if (format[1] == 't') { // 'rt: rt register. + int reg = instr->RtValue(); + PrintRegister(reg); + return 2; + } else if (format[1] == 'd') { // 'rd: rd register. + int reg = instr->RdValue(); + PrintRegister(reg); + return 2; + } + UNREACHABLE(); + return -1; +} + + +// Handle all FPUregister based formatting in this function to reduce the +// complexity of FormatOption. +int Decoder::FormatFPURegister(Instruction* instr, const char* format) { + DCHECK(format[0] == 'f'); + if (format[1] == 's') { // 'fs: fs register. + int reg = instr->FsValue(); + PrintFPURegister(reg); + return 2; + } else if (format[1] == 't') { // 'ft: ft register. + int reg = instr->FtValue(); + PrintFPURegister(reg); + return 2; + } else if (format[1] == 'd') { // 'fd: fd register. + int reg = instr->FdValue(); + PrintFPURegister(reg); + return 2; + } else if (format[1] == 'r') { // 'fr: fr register. + int reg = instr->FrValue(); + PrintFPURegister(reg); + return 2; + } + UNREACHABLE(); + return -1; +} + + +// FormatOption takes a formatting string and interprets it based on +// the current instructions. The format string points to the first +// character of the option string (the option escape has already been +// consumed by the caller.) FormatOption returns the number of +// characters that were consumed from the formatting string. +int Decoder::FormatOption(Instruction* instr, const char* format) { + switch (format[0]) { + case 'c': { // 'code for break or trap instructions. + DCHECK(STRING_STARTS_WITH(format, "code")); + PrintCode(instr); + return 4; + } + case 'i': { // 'imm16u or 'imm26. + if (format[3] == '1') { + DCHECK(STRING_STARTS_WITH(format, "imm16")); + if (format[5] == 's') { + DCHECK(STRING_STARTS_WITH(format, "imm16s")); + PrintSImm16(instr); + } else if (format[5] == 'u') { + DCHECK(STRING_STARTS_WITH(format, "imm16u")); + PrintSImm16(instr); + } else { + DCHECK(STRING_STARTS_WITH(format, "imm16x")); + PrintXImm16(instr); + } + return 6; + } else if (format[3] == '2' && format[4] == '1') { + DCHECK(STRING_STARTS_WITH(format, "imm21x")); + PrintXImm21(instr); + return 6; + } else if (format[3] == '2' && format[4] == '6') { + DCHECK(STRING_STARTS_WITH(format, "imm26x")); + PrintXImm26(instr); + return 6; + } + } + case 'r': { // 'r: registers. + return FormatRegister(instr, format); + } + case 'f': { // 'f: FPUregisters. + return FormatFPURegister(instr, format); + } + case 's': { // 'sa. + switch (format[1]) { + case 'a': { + DCHECK(STRING_STARTS_WITH(format, "sa")); + PrintSa(instr); + return 2; + } + case 'd': { + DCHECK(STRING_STARTS_WITH(format, "sd")); + PrintSd(instr); + return 2; + } + case 's': { + if (format[2] == '1') { + DCHECK(STRING_STARTS_WITH(format, "ss1")); /* ext size */ + PrintSs1(instr); + return 3; + } else { + DCHECK(STRING_STARTS_WITH(format, "ss2")); /* ins size */ + PrintSs2(instr); + return 3; + } + } + } + } + case 'b': { // 'bc - Special for bc1 cc field. + DCHECK(STRING_STARTS_WITH(format, "bc")); + PrintBc(instr); + return 2; + } + case 'C': { // 'Cc - Special for c.xx.d cc field. + DCHECK(STRING_STARTS_WITH(format, "Cc")); + PrintCc(instr); + return 2; + } + } + UNREACHABLE(); + return -1; +} + + +// Format takes a formatting string for a whole instruction and prints it into +// the output buffer. All escaped options are handed to FormatOption to be +// parsed further. +void Decoder::Format(Instruction* instr, const char* format) { + char cur = *format++; + while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) { + if (cur == '\'') { // Single quote is used as the formatting escape. + format += FormatOption(instr, format); + } else { + out_buffer_[out_buffer_pos_++] = cur; + } + cur = *format++; + } + out_buffer_[out_buffer_pos_] = '\0'; +} + + +// For currently unimplemented decodings the disassembler calls Unknown(instr) +// which will just print "unknown" of the instruction bits. +void Decoder::Unknown(Instruction* instr) { + Format(instr, "unknown"); +} + + +int Decoder::DecodeBreakInstr(Instruction* instr) { + // This is already known to be BREAK instr, just extract the code. + if (instr->Bits(25, 6) == static_cast<int>(kMaxStopCode)) { + // This is stop(msg). + Format(instr, "break, code: 'code"); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "\n%p %08lx stop msg: %s", + static_cast<void*> + (reinterpret_cast<int32_t*>(instr + + Instruction::kInstrSize)), + reinterpret_cast<uint64_t> + (*reinterpret_cast<char**>(instr + + Instruction::kInstrSize)), + *reinterpret_cast<char**>(instr + + Instruction::kInstrSize)); + // Size 3: the break_ instr, plus embedded 64-bit char pointer. + return 3 * Instruction::kInstrSize; + } else { + Format(instr, "break, code: 'code"); + return Instruction::kInstrSize; + } +} + + +int Decoder::DecodeTypeRegister(Instruction* instr) { + switch (instr->OpcodeFieldRaw()) { + case COP1: // Coprocessor instructions. + switch (instr->RsFieldRaw()) { + case MFC1: + Format(instr, "mfc1 'rt, 'fs"); + break; + case DMFC1: + Format(instr, "dmfc1 'rt, 'fs"); + break; + case MFHC1: + Format(instr, "mfhc1 'rt, 'fs"); + break; + case MTC1: + Format(instr, "mtc1 'rt, 'fs"); + break; + case DMTC1: + Format(instr, "dmtc1 'rt, 'fs"); + break; + // These are called "fs" too, although they are not FPU registers. + case CTC1: + Format(instr, "ctc1 'rt, 'fs"); + break; + case CFC1: + Format(instr, "cfc1 'rt, 'fs"); + break; + case MTHC1: + Format(instr, "mthc1 'rt, 'fs"); + break; + case D: + switch (instr->FunctionFieldRaw()) { + case ADD_D: + Format(instr, "add.d 'fd, 'fs, 'ft"); + break; + case SUB_D: + Format(instr, "sub.d 'fd, 'fs, 'ft"); + break; + case MUL_D: + Format(instr, "mul.d 'fd, 'fs, 'ft"); + break; + case DIV_D: + Format(instr, "div.d 'fd, 'fs, 'ft"); + break; + case ABS_D: + Format(instr, "abs.d 'fd, 'fs"); + break; + case MOV_D: + Format(instr, "mov.d 'fd, 'fs"); + break; + case NEG_D: + Format(instr, "neg.d 'fd, 'fs"); + break; + case SQRT_D: + Format(instr, "sqrt.d 'fd, 'fs"); + break; + case CVT_W_D: + Format(instr, "cvt.w.d 'fd, 'fs"); + break; + case CVT_L_D: + Format(instr, "cvt.l.d 'fd, 'fs"); + break; + case TRUNC_W_D: + Format(instr, "trunc.w.d 'fd, 'fs"); + break; + case TRUNC_L_D: + Format(instr, "trunc.l.d 'fd, 'fs"); + break; + case ROUND_W_D: + Format(instr, "round.w.d 'fd, 'fs"); + break; + case ROUND_L_D: + Format(instr, "round.l.d 'fd, 'fs"); + break; + case FLOOR_W_D: + Format(instr, "floor.w.d 'fd, 'fs"); + break; + case FLOOR_L_D: + Format(instr, "floor.l.d 'fd, 'fs"); + break; + case CEIL_W_D: + Format(instr, "ceil.w.d 'fd, 'fs"); + break; + case CEIL_L_D: + Format(instr, "ceil.l.d 'fd, 'fs"); + break; + case CVT_S_D: + Format(instr, "cvt.s.d 'fd, 'fs"); + break; + case C_F_D: + Format(instr, "c.f.d 'fs, 'ft, 'Cc"); + break; + case C_UN_D: + Format(instr, "c.un.d 'fs, 'ft, 'Cc"); + break; + case C_EQ_D: + Format(instr, "c.eq.d 'fs, 'ft, 'Cc"); + break; + case C_UEQ_D: + Format(instr, "c.ueq.d 'fs, 'ft, 'Cc"); + break; + case C_OLT_D: + Format(instr, "c.olt.d 'fs, 'ft, 'Cc"); + break; + case C_ULT_D: + Format(instr, "c.ult.d 'fs, 'ft, 'Cc"); + break; + case C_OLE_D: + Format(instr, "c.ole.d 'fs, 'ft, 'Cc"); + break; + case C_ULE_D: + Format(instr, "c.ule.d 'fs, 'ft, 'Cc"); + break; + default: + Format(instr, "unknown.cop1.d"); + break; + } + break; + case W: + switch (instr->FunctionFieldRaw()) { + case CVT_D_W: // Convert word to double. + Format(instr, "cvt.d.w 'fd, 'fs"); + break; + default: + UNREACHABLE(); + } + break; + case L: + switch (instr->FunctionFieldRaw()) { + case CVT_D_L: + Format(instr, "cvt.d.l 'fd, 'fs"); + break; + case CVT_S_L: + Format(instr, "cvt.s.l 'fd, 'fs"); + break; + case CMP_UN: + Format(instr, "cmp.un.d 'fd, 'fs, 'ft"); + break; + case CMP_EQ: + Format(instr, "cmp.eq.d 'fd, 'fs, 'ft"); + break; + case CMP_UEQ: + Format(instr, "cmp.ueq.d 'fd, 'fs, 'ft"); + break; + case CMP_LT: + Format(instr, "cmp.lt.d 'fd, 'fs, 'ft"); + break; + case CMP_ULT: + Format(instr, "cmp.ult.d 'fd, 'fs, 'ft"); + break; + case CMP_LE: + Format(instr, "cmp.le.d 'fd, 'fs, 'ft"); + break; + case CMP_ULE: + Format(instr, "cmp.ule.d 'fd, 'fs, 'ft"); + break; + case CMP_OR: + Format(instr, "cmp.or.d 'fd, 'fs, 'ft"); + break; + case CMP_UNE: + Format(instr, "cmp.une.d 'fd, 'fs, 'ft"); + break; + case CMP_NE: + Format(instr, "cmp.ne.d 'fd, 'fs, 'ft"); + break; + default: + UNREACHABLE(); + } + break; + default: + UNREACHABLE(); + } + break; + case COP1X: + switch (instr->FunctionFieldRaw()) { + case MADD_D: + Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft"); + break; + default: + UNREACHABLE(); + } + break; + case SPECIAL: + switch (instr->FunctionFieldRaw()) { + case JR: + Format(instr, "jr 'rs"); + break; + case JALR: + Format(instr, "jalr 'rs"); + break; + case SLL: + if (0x0 == static_cast<int>(instr->InstructionBits())) + Format(instr, "nop"); + else + Format(instr, "sll 'rd, 'rt, 'sa"); + break; + case DSLL: + Format(instr, "dsll 'rd, 'rt, 'sa"); + break; + case D_MUL_MUH: // Equals to DMUL. + if (kArchVariant != kMips64r6) { + Format(instr, "dmult 'rs, 'rt"); + } else { + if (instr->SaValue() == MUL_OP) { + Format(instr, "dmul 'rd, 'rs, 'rt"); + } else { + Format(instr, "dmuh 'rd, 'rs, 'rt"); + } + } + break; + case DSLL32: + Format(instr, "dsll32 'rd, 'rt, 'sa"); + break; + case SRL: + if (instr->RsValue() == 0) { + Format(instr, "srl 'rd, 'rt, 'sa"); + } else { + if (kArchVariant == kMips64r2) { + Format(instr, "rotr 'rd, 'rt, 'sa"); + } else { + Unknown(instr); + } + } + break; + case DSRL: + if (instr->RsValue() == 0) { + Format(instr, "dsrl 'rd, 'rt, 'sa"); + } else { + if (kArchVariant == kMips64r2) { + Format(instr, "drotr 'rd, 'rt, 'sa"); + } else { + Unknown(instr); + } + } + break; + case DSRL32: + Format(instr, "dsrl32 'rd, 'rt, 'sa"); + break; + case SRA: + Format(instr, "sra 'rd, 'rt, 'sa"); + break; + case DSRA: + Format(instr, "dsra 'rd, 'rt, 'sa"); + break; + case DSRA32: + Format(instr, "dsra32 'rd, 'rt, 'sa"); + break; + case SLLV: + Format(instr, "sllv 'rd, 'rt, 'rs"); + break; + case DSLLV: + Format(instr, "dsllv 'rd, 'rt, 'rs"); + break; + case SRLV: + if (instr->SaValue() == 0) { + Format(instr, "srlv 'rd, 'rt, 'rs"); + } else { + if (kArchVariant == kMips64r2) { + Format(instr, "rotrv 'rd, 'rt, 'rs"); + } else { + Unknown(instr); + } + } + break; + case DSRLV: + if (instr->SaValue() == 0) { + Format(instr, "dsrlv 'rd, 'rt, 'rs"); + } else { + if (kArchVariant == kMips64r2) { + Format(instr, "drotrv 'rd, 'rt, 'rs"); + } else { + Unknown(instr); + } + } + break; + case SRAV: + Format(instr, "srav 'rd, 'rt, 'rs"); + break; + case DSRAV: + Format(instr, "dsrav 'rd, 'rt, 'rs"); + break; + case MFHI: + if (instr->Bits(25, 16) == 0) { + Format(instr, "mfhi 'rd"); + } else { + if ((instr->FunctionFieldRaw() == CLZ_R6) + && (instr->FdValue() == 1)) { + Format(instr, "clz 'rd, 'rs"); + } else if ((instr->FunctionFieldRaw() == CLO_R6) + && (instr->FdValue() == 1)) { + Format(instr, "clo 'rd, 'rs"); + } + } + break; + case MFLO: + Format(instr, "mflo 'rd"); + break; + case D_MUL_MUH_U: // Equals to DMULTU. + if (kArchVariant != kMips64r6) { + Format(instr, "dmultu 'rs, 'rt"); + } else { + if (instr->SaValue() == MUL_OP) { + Format(instr, "dmulu 'rd, 'rs, 'rt"); + } else { + Format(instr, "dmuhu 'rd, 'rs, 'rt"); + } + } + break; + case MULT: // @Mips64r6 == MUL_MUH. + if (kArchVariant != kMips64r6) { + Format(instr, "mult 'rs, 'rt"); + } else { + if (instr->SaValue() == MUL_OP) { + Format(instr, "mul 'rd, 'rs, 'rt"); + } else { + Format(instr, "muh 'rd, 'rs, 'rt"); + } + } + break; + case MULTU: // @Mips64r6 == MUL_MUH_U. + if (kArchVariant != kMips64r6) { + Format(instr, "multu 'rs, 'rt"); + } else { + if (instr->SaValue() == MUL_OP) { + Format(instr, "mulu 'rd, 'rs, 'rt"); + } else { + Format(instr, "muhu 'rd, 'rs, 'rt"); + } + } + + break; + case DIV: // @Mips64r6 == DIV_MOD. + if (kArchVariant != kMips64r6) { + Format(instr, "div 'rs, 'rt"); + } else { + if (instr->SaValue() == DIV_OP) { + Format(instr, "div 'rd, 'rs, 'rt"); + } else { + Format(instr, "mod 'rd, 'rs, 'rt"); + } + } + break; + case DDIV: // @Mips64r6 == D_DIV_MOD. + if (kArchVariant != kMips64r6) { + Format(instr, "ddiv 'rs, 'rt"); + } else { + if (instr->SaValue() == DIV_OP) { + Format(instr, "ddiv 'rd, 'rs, 'rt"); + } else { + Format(instr, "dmod 'rd, 'rs, 'rt"); + } + } + break; + case DIVU: // @Mips64r6 == DIV_MOD_U. + if (kArchVariant != kMips64r6) { + Format(instr, "divu 'rs, 'rt"); + } else { + if (instr->SaValue() == DIV_OP) { + Format(instr, "divu 'rd, 'rs, 'rt"); + } else { + Format(instr, "modu 'rd, 'rs, 'rt"); + } + } + break; + case DDIVU: // @Mips64r6 == D_DIV_MOD_U. + if (kArchVariant != kMips64r6) { + Format(instr, "ddivu 'rs, 'rt"); + } else { + if (instr->SaValue() == DIV_OP) { + Format(instr, "ddivu 'rd, 'rs, 'rt"); + } else { + Format(instr, "dmodu 'rd, 'rs, 'rt"); + } + } + break; + case ADD: + Format(instr, "add 'rd, 'rs, 'rt"); + break; + case DADD: + Format(instr, "dadd 'rd, 'rs, 'rt"); + break; + case ADDU: + Format(instr, "addu 'rd, 'rs, 'rt"); + break; + case DADDU: + Format(instr, "daddu 'rd, 'rs, 'rt"); + break; + case SUB: + Format(instr, "sub 'rd, 'rs, 'rt"); + break; + case DSUB: + Format(instr, "dsub 'rd, 'rs, 'rt"); + break; + case SUBU: + Format(instr, "subu 'rd, 'rs, 'rt"); + break; + case DSUBU: + Format(instr, "dsubu 'rd, 'rs, 'rt"); + break; + case AND: + Format(instr, "and 'rd, 'rs, 'rt"); + break; + case OR: + if (0 == instr->RsValue()) { + Format(instr, "mov 'rd, 'rt"); + } else if (0 == instr->RtValue()) { + Format(instr, "mov 'rd, 'rs"); + } else { + Format(instr, "or 'rd, 'rs, 'rt"); + } + break; + case XOR: + Format(instr, "xor 'rd, 'rs, 'rt"); + break; + case NOR: + Format(instr, "nor 'rd, 'rs, 'rt"); + break; + case SLT: + Format(instr, "slt 'rd, 'rs, 'rt"); + break; + case SLTU: + Format(instr, "sltu 'rd, 'rs, 'rt"); + break; + case BREAK: + return DecodeBreakInstr(instr); + case TGE: + Format(instr, "tge 'rs, 'rt, code: 'code"); + break; + case TGEU: + Format(instr, "tgeu 'rs, 'rt, code: 'code"); + break; + case TLT: + Format(instr, "tlt 'rs, 'rt, code: 'code"); + break; + case TLTU: + Format(instr, "tltu 'rs, 'rt, code: 'code"); + break; + case TEQ: + Format(instr, "teq 'rs, 'rt, code: 'code"); + break; + case TNE: + Format(instr, "tne 'rs, 'rt, code: 'code"); + break; + case MOVZ: + Format(instr, "movz 'rd, 'rs, 'rt"); + break; + case MOVN: + Format(instr, "movn 'rd, 'rs, 'rt"); + break; + case MOVCI: + if (instr->Bit(16)) { + Format(instr, "movt 'rd, 'rs, 'bc"); + } else { + Format(instr, "movf 'rd, 'rs, 'bc"); + } + break; + case SELEQZ_S: + Format(instr, "seleqz 'rd, 'rs, 'rt"); + break; + case SELNEZ_S: + Format(instr, "selnez 'rd, 'rs, 'rt"); + break; + default: + UNREACHABLE(); + } + break; + case SPECIAL2: + switch (instr->FunctionFieldRaw()) { + case MUL: + Format(instr, "mul 'rd, 'rs, 'rt"); + break; + case CLZ: + if (kArchVariant != kMips64r6) { + Format(instr, "clz 'rd, 'rs"); + } + break; + default: + UNREACHABLE(); + } + break; + case SPECIAL3: + switch (instr->FunctionFieldRaw()) { + case INS: { + Format(instr, "ins 'rt, 'rs, 'sa, 'ss2"); + break; + } + case EXT: { + Format(instr, "ext 'rt, 'rs, 'sa, 'ss1"); + break; + } + default: + UNREACHABLE(); + } + break; + default: + UNREACHABLE(); + } + return Instruction::kInstrSize; +} + + +void Decoder::DecodeTypeImmediate(Instruction* instr) { + switch (instr->OpcodeFieldRaw()) { + case COP1: + switch (instr->RsFieldRaw()) { + case BC1: + if (instr->FBtrueValue()) { + Format(instr, "bc1t 'bc, 'imm16u"); + } else { + Format(instr, "bc1f 'bc, 'imm16u"); + } + break; + case BC1EQZ: + Format(instr, "bc1eqz 'ft, 'imm16u"); + break; + case BC1NEZ: + Format(instr, "bc1nez 'ft, 'imm16u"); + break; + case W: // CMP.S instruction. + switch (instr->FunctionValue()) { + case CMP_AF: + Format(instr, "cmp.af.S 'ft, 'fs, 'fd"); + break; + case CMP_UN: + Format(instr, "cmp.un.S 'ft, 'fs, 'fd"); + break; + case CMP_EQ: + Format(instr, "cmp.eq.S 'ft, 'fs, 'fd"); + break; + case CMP_UEQ: + Format(instr, "cmp.ueq.S 'ft, 'fs, 'fd"); + break; + case CMP_LT: + Format(instr, "cmp.lt.S 'ft, 'fs, 'fd"); + break; + case CMP_ULT: + Format(instr, "cmp.ult.S 'ft, 'fs, 'fd"); + break; + case CMP_LE: + Format(instr, "cmp.le.S 'ft, 'fs, 'fd"); + break; + case CMP_ULE: + Format(instr, "cmp.ule.S 'ft, 'fs, 'fd"); + break; + case CMP_OR: + Format(instr, "cmp.or.S 'ft, 'fs, 'fd"); + break; + case CMP_UNE: + Format(instr, "cmp.une.S 'ft, 'fs, 'fd"); + break; + case CMP_NE: + Format(instr, "cmp.ne.S 'ft, 'fs, 'fd"); + break; + default: + UNREACHABLE(); + } + break; + case L: // CMP.D instruction. + switch (instr->FunctionValue()) { + case CMP_AF: + Format(instr, "cmp.af.D 'ft, 'fs, 'fd"); + break; + case CMP_UN: + Format(instr, "cmp.un.D 'ft, 'fs, 'fd"); + break; + case CMP_EQ: + Format(instr, "cmp.eq.D 'ft, 'fs, 'fd"); + break; + case CMP_UEQ: + Format(instr, "cmp.ueq.D 'ft, 'fs, 'fd"); + break; + case CMP_LT: + Format(instr, "cmp.lt.D 'ft, 'fs, 'fd"); + break; + case CMP_ULT: + Format(instr, "cmp.ult.D 'ft, 'fs, 'fd"); + break; + case CMP_LE: + Format(instr, "cmp.le.D 'ft, 'fs, 'fd"); + break; + case CMP_ULE: + Format(instr, "cmp.ule.D 'ft, 'fs, 'fd"); + break; + case CMP_OR: + Format(instr, "cmp.or.D 'ft, 'fs, 'fd"); + break; + case CMP_UNE: + Format(instr, "cmp.une.D 'ft, 'fs, 'fd"); + break; + case CMP_NE: + Format(instr, "cmp.ne.D 'ft, 'fs, 'fd"); + break; + default: + UNREACHABLE(); + } + break; + case S: + switch (instr->FunctionValue()) { + case SEL: + Format(instr, "sel.S 'ft, 'fs, 'fd"); + break; + case SELEQZ_C: + Format(instr, "seleqz.S 'ft, 'fs, 'fd"); + break; + case SELNEZ_C: + Format(instr, "selnez.S 'ft, 'fs, 'fd"); + break; + case MIN: + Format(instr, "min.S 'ft, 'fs, 'fd"); + break; + case MINA: + Format(instr, "mina.S 'ft, 'fs, 'fd"); + break; + case MAX: + Format(instr, "max.S 'ft, 'fs, 'fd"); + break; + case MAXA: + Format(instr, "maxa.S 'ft, 'fs, 'fd"); + break; + default: + UNREACHABLE(); + } + break; + case D: + switch (instr->FunctionValue()) { + case SEL: + Format(instr, "sel.D 'ft, 'fs, 'fd"); + break; + case SELEQZ_C: + Format(instr, "seleqz.D 'ft, 'fs, 'fd"); + break; + case SELNEZ_C: + Format(instr, "selnez.D 'ft, 'fs, 'fd"); + break; + case MIN: + Format(instr, "min.D 'ft, 'fs, 'fd"); + break; + case MINA: + Format(instr, "mina.D 'ft, 'fs, 'fd"); + break; + case MAX: + Format(instr, "max.D 'ft, 'fs, 'fd"); + break; + case MAXA: + Format(instr, "maxa.D 'ft, 'fs, 'fd"); + break; + default: + UNREACHABLE(); + } + break; + default: + UNREACHABLE(); + } + + break; // Case COP1. + // ------------- REGIMM class. + case REGIMM: + switch (instr->RtFieldRaw()) { + case BLTZ: + Format(instr, "bltz 'rs, 'imm16u"); + break; + case BLTZAL: + Format(instr, "bltzal 'rs, 'imm16u"); + break; + case BGEZ: + Format(instr, "bgez 'rs, 'imm16u"); + break; + case BGEZAL: + Format(instr, "bgezal 'rs, 'imm16u"); + break; + case BGEZALL: + Format(instr, "bgezall 'rs, 'imm16u"); + break; + case DAHI: + Format(instr, "dahi 'rs, 'imm16u"); + break; + case DATI: + Format(instr, "dati 'rs, 'imm16u"); + break; + default: + UNREACHABLE(); + } + break; // Case REGIMM. + // ------------- Branch instructions. + case BEQ: + Format(instr, "beq 'rs, 'rt, 'imm16u"); + break; + case BNE: + Format(instr, "bne 'rs, 'rt, 'imm16u"); + break; + case BLEZ: + if ((instr->RtFieldRaw() == 0) + && (instr->RsFieldRaw() != 0)) { + Format(instr, "blez 'rs, 'imm16u"); + } else if ((instr->RtFieldRaw() != instr->RsFieldRaw()) + && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgeuc 'rs, 'rt, 'imm16u"); + } else if ((instr->RtFieldRaw() == instr->RsFieldRaw()) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgezalc 'rs, 'imm16u"); + } else if ((instr->RsFieldRaw() == 0) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "blezalc 'rs, 'imm16u"); + } else { + UNREACHABLE(); + } + break; + case BGTZ: + if ((instr->RtFieldRaw() == 0) + && (instr->RsFieldRaw() != 0)) { + Format(instr, "bgtz 'rs, 'imm16u"); + } else if ((instr->RtFieldRaw() != instr->RsFieldRaw()) + && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) { + Format(instr, "bltuc 'rs, 'rt, 'imm16u"); + } else if ((instr->RtFieldRaw() == instr->RsFieldRaw()) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bltzalc 'rt, 'imm16u"); + } else if ((instr->RsFieldRaw() == 0) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgtzalc 'rt, 'imm16u"); + } else { + UNREACHABLE(); + } + break; + case BLEZL: + if ((instr->RtFieldRaw() == instr->RsFieldRaw()) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgezc 'rt, 'imm16u"); + } else if ((instr->RtFieldRaw() != instr->RsFieldRaw()) + && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgec 'rs, 'rt, 'imm16u"); + } else if ((instr->RsFieldRaw() == 0) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "blezc 'rt, 'imm16u"); + } else { + UNREACHABLE(); + } + break; + case BGTZL: + if ((instr->RtFieldRaw() == instr->RsFieldRaw()) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bltzc 'rt, 'imm16u"); + } else if ((instr->RtFieldRaw() != instr->RsFieldRaw()) + && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) { + Format(instr, "bltc 'rs, 'rt, 'imm16u"); + } else if ((instr->RsFieldRaw() == 0) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgtzc 'rt, 'imm16u"); + } else { + UNREACHABLE(); + } + break; + case BEQZC: + if (instr->RsFieldRaw() != 0) { + Format(instr, "beqzc 'rs, 'imm21x"); + } + break; + case BNEZC: + if (instr->RsFieldRaw() != 0) { + Format(instr, "bnezc 'rs, 'imm21x"); + } + break; + // ------------- Arithmetic instructions. + case ADDI: + if (kArchVariant != kMips64r6) { + Format(instr, "addi 'rt, 'rs, 'imm16s"); + } else { + // Check if BOVC or BEQC instruction. + if (instr->RsFieldRaw() >= instr->RtFieldRaw()) { + Format(instr, "bovc 'rs, 'rt, 'imm16s"); + } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) { + Format(instr, "beqc 'rs, 'rt, 'imm16s"); + } else { + UNREACHABLE(); + } + } + break; + case DADDI: + if (kArchVariant != kMips64r6) { + Format(instr, "daddi 'rt, 'rs, 'imm16s"); + } else { + // Check if BNVC or BNEC instruction. + if (instr->RsFieldRaw() >= instr->RtFieldRaw()) { + Format(instr, "bnvc 'rs, 'rt, 'imm16s"); + } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) { + Format(instr, "bnec 'rs, 'rt, 'imm16s"); + } else { + UNREACHABLE(); + } + } + break; + case ADDIU: + Format(instr, "addiu 'rt, 'rs, 'imm16s"); + break; + case DADDIU: + Format(instr, "daddiu 'rt, 'rs, 'imm16s"); + break; + case SLTI: + Format(instr, "slti 'rt, 'rs, 'imm16s"); + break; + case SLTIU: + Format(instr, "sltiu 'rt, 'rs, 'imm16u"); + break; + case ANDI: + Format(instr, "andi 'rt, 'rs, 'imm16x"); + break; + case ORI: + Format(instr, "ori 'rt, 'rs, 'imm16x"); + break; + case XORI: + Format(instr, "xori 'rt, 'rs, 'imm16x"); + break; + case LUI: + if (kArchVariant != kMips64r6) { + Format(instr, "lui 'rt, 'imm16x"); + } else { + if (instr->RsValue() != 0) { + Format(instr, "aui 'rt, 'imm16x"); + } else { + Format(instr, "lui 'rt, 'imm16x"); + } + } + break; + case DAUI: + Format(instr, "daui 'rt, 'imm16x"); + break; + // ------------- Memory instructions. + case LB: + Format(instr, "lb 'rt, 'imm16s('rs)"); + break; + case LH: + Format(instr, "lh 'rt, 'imm16s('rs)"); + break; + case LWL: + Format(instr, "lwl 'rt, 'imm16s('rs)"); + break; + case LDL: + Format(instr, "ldl 'rt, 'imm16s('rs)"); + break; + case LW: + Format(instr, "lw 'rt, 'imm16s('rs)"); + break; + case LWU: + Format(instr, "lwu 'rt, 'imm16s('rs)"); + break; + case LD: + Format(instr, "ld 'rt, 'imm16s('rs)"); + break; + case LBU: + Format(instr, "lbu 'rt, 'imm16s('rs)"); + break; + case LHU: + Format(instr, "lhu 'rt, 'imm16s('rs)"); + break; + case LWR: + Format(instr, "lwr 'rt, 'imm16s('rs)"); + break; + case LDR: + Format(instr, "ldr 'rt, 'imm16s('rs)"); + break; + case PREF: + Format(instr, "pref 'rt, 'imm16s('rs)"); + break; + case SB: + Format(instr, "sb 'rt, 'imm16s('rs)"); + break; + case SH: + Format(instr, "sh 'rt, 'imm16s('rs)"); + break; + case SWL: + Format(instr, "swl 'rt, 'imm16s('rs)"); + break; + case SW: + Format(instr, "sw 'rt, 'imm16s('rs)"); + break; + case SD: + Format(instr, "sd 'rt, 'imm16s('rs)"); + break; + case SWR: + Format(instr, "swr 'rt, 'imm16s('rs)"); + break; + case LWC1: + Format(instr, "lwc1 'ft, 'imm16s('rs)"); + break; + case LDC1: + Format(instr, "ldc1 'ft, 'imm16s('rs)"); + break; + case SWC1: + Format(instr, "swc1 'ft, 'imm16s('rs)"); + break; + case SDC1: + Format(instr, "sdc1 'ft, 'imm16s('rs)"); + break; + default: + printf("a 0x%x \n", instr->OpcodeFieldRaw()); + UNREACHABLE(); + break; + } +} + + +void Decoder::DecodeTypeJump(Instruction* instr) { + switch (instr->OpcodeFieldRaw()) { + case J: + Format(instr, "j 'imm26x"); + break; + case JAL: + Format(instr, "jal 'imm26x"); + break; + default: + UNREACHABLE(); + } +} + + +// Disassemble the instruction at *instr_ptr into the output buffer. +// All instructions are one word long, except for the simulator +// psuedo-instruction stop(msg). For that one special case, we return +// size larger than one kInstrSize. +int Decoder::InstructionDecode(byte* instr_ptr) { + Instruction* instr = Instruction::At(instr_ptr); + // Print raw instruction bytes. + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, + "%08x ", + instr->InstructionBits()); + switch (instr->InstructionType()) { + case Instruction::kRegisterType: { + return DecodeTypeRegister(instr); + } + case Instruction::kImmediateType: { + DecodeTypeImmediate(instr); + break; + } + case Instruction::kJumpType: { + DecodeTypeJump(instr); + break; + } + default: { + Format(instr, "UNSUPPORTED"); + UNSUPPORTED_MIPS(); + } + } + return Instruction::kInstrSize; +} + + +} } // namespace v8::internal + + + +//------------------------------------------------------------------------------ + +namespace disasm { + +const char* NameConverter::NameOfAddress(byte* addr) const { + v8::internal::SNPrintF(tmp_buffer_, "%p", addr); + return tmp_buffer_.start(); +} + + +const char* NameConverter::NameOfConstant(byte* addr) const { + return NameOfAddress(addr); +} + + +const char* NameConverter::NameOfCPURegister(int reg) const { + return v8::internal::Registers::Name(reg); +} + + +const char* NameConverter::NameOfXMMRegister(int reg) const { + return v8::internal::FPURegisters::Name(reg); +} + + +const char* NameConverter::NameOfByteCPURegister(int reg) const { + UNREACHABLE(); // MIPS does not have the concept of a byte register. + return "nobytereg"; +} + + +const char* NameConverter::NameInCode(byte* addr) const { + // The default name converter is called for unknown code. So we will not try + // to access any memory. + return ""; +} + + +//------------------------------------------------------------------------------ + +Disassembler::Disassembler(const NameConverter& converter) + : converter_(converter) {} + + +Disassembler::~Disassembler() {} + + +int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer, + byte* instruction) { + v8::internal::Decoder d(converter_, buffer); + return d.InstructionDecode(instruction); +} + + +// The MIPS assembler does not currently use constant pools. +int Disassembler::ConstantPoolSizeAt(byte* instruction) { + return -1; +} + + +void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) { + NameConverter converter; + Disassembler d(converter); + for (byte* pc = begin; pc < end;) { + v8::internal::EmbeddedVector<char, 128> buffer; + buffer[0] = '\0'; + byte* prev_pc = pc; + pc += d.InstructionDecode(buffer, pc); + v8::internal::PrintF(f, "%p %08x %s\n", + prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start()); + } +} + + +#undef UNSUPPORTED + +} // namespace disasm + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/frames-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/frames-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/frames-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/frames-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,43 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/assembler.h" +#include "src/frames.h" +#include "src/mips64/assembler-mips64-inl.h" +#include "src/mips64/assembler-mips64.h" + +namespace v8 { +namespace internal { + + +Register JavaScriptFrame::fp_register() { return v8::internal::fp; } +Register JavaScriptFrame::context_register() { return cp; } +Register JavaScriptFrame::constant_pool_pointer_register() { + UNREACHABLE(); + return no_reg; +} + + +Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; } +Register StubFailureTrampolineFrame::context_register() { return cp; } +Register StubFailureTrampolineFrame::constant_pool_pointer_register() { + UNREACHABLE(); + return no_reg; +} + + +Object*& ExitFrame::constant_pool_slot() const { + UNREACHABLE(); + return Memory::Object_at(NULL); +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/frames-mips64.h nodejs-0.11.15/deps/v8/src/mips64/frames-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/frames-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/frames-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,215 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + + +#ifndef V8_MIPS_FRAMES_MIPS_H_ +#define V8_MIPS_FRAMES_MIPS_H_ + +namespace v8 { +namespace internal { + +// Register lists. +// Note that the bit values must match those used in actual instruction +// encoding. +const int kNumRegs = 32; + +const RegList kJSCallerSaved = + 1 << 2 | // v0 + 1 << 3 | // v1 + 1 << 4 | // a0 + 1 << 5 | // a1 + 1 << 6 | // a2 + 1 << 7 | // a3 + 1 << 8 | // a4 + 1 << 9 | // a5 + 1 << 10 | // a6 + 1 << 11 | // a7 + 1 << 12 | // t0 + 1 << 13 | // t1 + 1 << 14 | // t2 + 1 << 15; // t3 + +const int kNumJSCallerSaved = 14; + + +// Return the code of the n-th caller-saved register available to JavaScript +// e.g. JSCallerSavedReg(0) returns a0.code() == 4. +int JSCallerSavedCode(int n); + + +// Callee-saved registers preserved when switching from C to JavaScript. +const RegList kCalleeSaved = + 1 << 16 | // s0 + 1 << 17 | // s1 + 1 << 18 | // s2 + 1 << 19 | // s3 + 1 << 20 | // s4 + 1 << 21 | // s5 + 1 << 22 | // s6 (roots in Javascript code) + 1 << 23 | // s7 (cp in Javascript code) + 1 << 30; // fp/s8 + +const int kNumCalleeSaved = 9; + +const RegList kCalleeSavedFPU = + 1 << 20 | // f20 + 1 << 22 | // f22 + 1 << 24 | // f24 + 1 << 26 | // f26 + 1 << 28 | // f28 + 1 << 30; // f30 + +const int kNumCalleeSavedFPU = 6; + +const RegList kCallerSavedFPU = + 1 << 0 | // f0 + 1 << 2 | // f2 + 1 << 4 | // f4 + 1 << 6 | // f6 + 1 << 8 | // f8 + 1 << 10 | // f10 + 1 << 12 | // f12 + 1 << 14 | // f14 + 1 << 16 | // f16 + 1 << 18; // f18 + + +// Number of registers for which space is reserved in safepoints. Must be a +// multiple of 8. +const int kNumSafepointRegisters = 24; + +// Define the list of registers actually saved at safepoints. +// Note that the number of saved registers may be smaller than the reserved +// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters. +const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved; +const int kNumSafepointSavedRegisters = + kNumJSCallerSaved + kNumCalleeSaved; + +const int kUndefIndex = -1; +// Map with indexes on stack that corresponds to codes of saved registers. +const int kSafepointRegisterStackIndexMap[kNumRegs] = { + kUndefIndex, // zero_reg + kUndefIndex, // at + 0, // v0 + 1, // v1 + 2, // a0 + 3, // a1 + 4, // a2 + 5, // a3 + 6, // a4 + 7, // a5 + 8, // a6 + 9, // a7 + 10, // t0 + 11, // t1 + 12, // t2 + 13, // t3 + 14, // s0 + 15, // s1 + 16, // s2 + 17, // s3 + 18, // s4 + 19, // s5 + 20, // s6 + 21, // s7 + kUndefIndex, // t8 + kUndefIndex, // t9 + kUndefIndex, // k0 + kUndefIndex, // k1 + kUndefIndex, // gp + kUndefIndex, // sp + 22, // fp + kUndefIndex +}; + + +// ---------------------------------------------------- + +class EntryFrameConstants : public AllStatic { + public: + static const int kCallerFPOffset = + -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); +}; + + +class ExitFrameConstants : public AllStatic { + public: + static const int kFrameSize = 2 * kPointerSize; + + static const int kCodeOffset = -2 * kPointerSize; + static const int kSPOffset = -1 * kPointerSize; + + // The caller fields are below the frame pointer on the stack. + static const int kCallerFPOffset = +0 * kPointerSize; + // The calling JS function is between FP and PC. + static const int kCallerPCOffset = +1 * kPointerSize; + + // MIPS-specific: a pointer to the old sp to avoid unnecessary calculations. + static const int kCallerSPOffset = +2 * kPointerSize; + + // FP-relative displacement of the caller's SP. + static const int kCallerSPDisplacement = +2 * kPointerSize; + + static const int kConstantPoolOffset = 0; // Not used. +}; + + +class JavaScriptFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset; + static const int kLastParameterOffset = +2 * kPointerSize; + static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset; + + // Caller SP-relative. + static const int kParam0Offset = -2 * kPointerSize; + static const int kReceiverOffset = -1 * kPointerSize; +}; + + +class ArgumentsAdaptorFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset; + + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + kPointerSize; +}; + + +class ConstructFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kImplicitReceiverOffset = -6 * kPointerSize; + static const int kConstructorOffset = -5 * kPointerSize; + static const int kLengthOffset = -4 * kPointerSize; + static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; + + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize; +}; + + +class InternalFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; +}; + + +inline Object* JavaScriptFrame::function_slot_object() const { + const int offset = JavaScriptFrameConstants::kFunctionOffset; + return Memory::Object_at(fp() + offset); +} + + +inline void StackHandler::SetFp(Address slot, Address fp) { + Memory::Address_at(slot) = fp; +} + + +} } // namespace v8::internal + +#endif diff -Nru nodejs-0.11.13/deps/v8/src/mips64/full-codegen-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/full-codegen-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/full-codegen-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/full-codegen-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4888 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +// Note on Mips implementation: +// +// The result_register() for mips is the 'v0' register, which is defined +// by the ABI to contain function return values. However, the first +// parameter to a function is defined to be 'a0'. So there are many +// places where we have to move a previous result in v0 to a0 for the +// next call: mov(a0, v0). This is not needed on the other architectures. + +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/full-codegen.h" +#include "src/isolate-inl.h" +#include "src/parser.h" +#include "src/scopes.h" +#include "src/stub-cache.h" + +#include "src/mips64/code-stubs-mips64.h" +#include "src/mips64/macro-assembler-mips64.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm_) + + +// A patch site is a location in the code which it is possible to patch. This +// class has a number of methods to emit the code which is patchable and the +// method EmitPatchInfo to record a marker back to the patchable code. This +// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy +// (raw 16 bit immediate value is used) is the delta from the pc to the first +// instruction of the patchable code. +// The marker instruction is effectively a NOP (dest is zero_reg) and will +// never be emitted by normal code. +class JumpPatchSite BASE_EMBEDDED { + public: + explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) { +#ifdef DEBUG + info_emitted_ = false; +#endif + } + + ~JumpPatchSite() { + DCHECK(patch_site_.is_bound() == info_emitted_); + } + + // When initially emitting this ensure that a jump is always generated to skip + // the inlined smi code. + void EmitJumpIfNotSmi(Register reg, Label* target) { + DCHECK(!patch_site_.is_bound() && !info_emitted_); + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); + __ bind(&patch_site_); + __ andi(at, reg, 0); + // Always taken before patched. + __ BranchShort(target, eq, at, Operand(zero_reg)); + } + + // When initially emitting this ensure that a jump is never generated to skip + // the inlined smi code. + void EmitJumpIfSmi(Register reg, Label* target) { + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); + DCHECK(!patch_site_.is_bound() && !info_emitted_); + __ bind(&patch_site_); + __ andi(at, reg, 0); + // Never taken before patched. + __ BranchShort(target, ne, at, Operand(zero_reg)); + } + + void EmitPatchInfo() { + if (patch_site_.is_bound()) { + int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); + Register reg = Register::from_code(delta_to_patch_site / kImm16Mask); + __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask); +#ifdef DEBUG + info_emitted_ = true; +#endif + } else { + __ nop(); // Signals no inlined code. + } + } + + private: + MacroAssembler* masm_; + Label patch_site_; +#ifdef DEBUG + bool info_emitted_; +#endif +}; + + +// Generate code for a JS function. On entry to the function the receiver +// and arguments have been pushed on the stack left to right. The actual +// argument count matches the formal parameter count expected by the +// function. +// +// The live registers are: +// o a1: the JS function object being called (i.e. ourselves) +// o cp: our context +// o fp: our caller's frame pointer +// o sp: stack pointer +// o ra: return address +// +// The function builds a JS frame. Please see JavaScriptFrameConstants in +// frames-mips.h for its layout. +void FullCodeGenerator::Generate() { + CompilationInfo* info = info_; + handler_table_ = + isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); + + profiling_counter_ = isolate()->factory()->NewCell( + Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); + SetFunctionPosition(function()); + Comment cmnt(masm_, "[ function compiled by full code generator"); + + ProfileEntryHookStub::MaybeCallEntryHook(masm_); + +#ifdef DEBUG + if (strlen(FLAG_stop_at) > 0 && + info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { + __ stop("stop-at"); + } +#endif + + // Sloppy mode functions and builtins need to replace the receiver with the + // global proxy when called as functions (without an explicit receiver + // object). + if (info->strict_mode() == SLOPPY && !info->is_native()) { + Label ok; + int receiver_offset = info->scope()->num_parameters() * kPointerSize; + __ ld(at, MemOperand(sp, receiver_offset)); + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ Branch(&ok, ne, a2, Operand(at)); + + __ ld(a2, GlobalObjectOperand()); + __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); + + __ sd(a2, MemOperand(sp, receiver_offset)); + __ bind(&ok); + } + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + info->set_prologue_offset(masm_->pc_offset()); + __ Prologue(info->IsCodePreAgingActive()); + info->AddNoFrameRange(0, masm_->pc_offset()); + + { Comment cmnt(masm_, "[ Allocate locals"); + int locals_count = info->scope()->num_stack_slots(); + // Generators allocate locals, if any, in context slots. + DCHECK(!info->function()->is_generator() || locals_count == 0); + if (locals_count > 0) { + if (locals_count >= 128) { + Label ok; + __ Dsubu(t1, sp, Operand(locals_count * kPointerSize)); + __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); + __ Branch(&ok, hs, t1, Operand(a2)); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ bind(&ok); + } + __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); + int kMaxPushes = FLAG_optimize_for_size ? 4 : 32; + if (locals_count >= kMaxPushes) { + int loop_iterations = locals_count / kMaxPushes; + __ li(a2, Operand(loop_iterations)); + Label loop_header; + __ bind(&loop_header); + // Do pushes. + __ Dsubu(sp, sp, Operand(kMaxPushes * kPointerSize)); + for (int i = 0; i < kMaxPushes; i++) { + __ sd(t1, MemOperand(sp, i * kPointerSize)); + } + // Continue loop if not done. + __ Dsubu(a2, a2, Operand(1)); + __ Branch(&loop_header, ne, a2, Operand(zero_reg)); + } + int remaining = locals_count % kMaxPushes; + // Emit the remaining pushes. + __ Dsubu(sp, sp, Operand(remaining * kPointerSize)); + for (int i = 0; i < remaining; i++) { + __ sd(t1, MemOperand(sp, i * kPointerSize)); + } + } + } + + bool function_in_register = true; + + // Possibly allocate a local context. + int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment cmnt(masm_, "[ Allocate context"); + // Argument to NewContext is the function, which is still in a1. + bool need_write_barrier = true; + if (FLAG_harmony_scoping && info->scope()->is_global_scope()) { + __ push(a1); + __ Push(info->scope()->GetScopeInfo()); + __ CallRuntime(Runtime::kNewGlobalContext, 2); + } else if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(isolate(), heap_slots); + __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; + } else { + __ push(a1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); + } + function_in_register = false; + // Context is returned in v0. It replaces the context passed to us. + // It's saved in the stack and kept live in cp. + __ mov(cp, v0); + __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset)); + // Copy any necessary parameters into the context. + int num_parameters = info->scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Variable* var = scope()->parameter(i); + if (var->IsContextSlot()) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ ld(a0, MemOperand(fp, parameter_offset)); + // Store it in the context. + MemOperand target = ContextOperand(cp, var->index()); + __ sd(a0, target); + + // Update the write barrier. + if (need_write_barrier) { + __ RecordWriteContextSlot( + cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(cp, a0, &done); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } + } + } + } + Variable* arguments = scope()->arguments(); + if (arguments != NULL) { + // Function uses arguments object. + Comment cmnt(masm_, "[ Allocate arguments object"); + if (!function_in_register) { + // Load this again, if it's used by the local context below. + __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + } else { + __ mov(a3, a1); + } + // Receiver is just before the parameters on the caller's stack. + int num_parameters = info->scope()->num_parameters(); + int offset = num_parameters * kPointerSize; + __ Daddu(a2, fp, + Operand(StandardFrameConstants::kCallerSPOffset + offset)); + __ li(a1, Operand(Smi::FromInt(num_parameters))); + __ Push(a3, a2, a1); + + // Arguments to ArgumentsAccessStub: + // function, receiver address, parameter count. + // The stub will rewrite receiever and parameter count if the previous + // stack frame was an arguments adapter frame. + ArgumentsAccessStub::Type type; + if (strict_mode() == STRICT) { + type = ArgumentsAccessStub::NEW_STRICT; + } else if (function()->has_duplicate_parameters()) { + type = ArgumentsAccessStub::NEW_SLOPPY_SLOW; + } else { + type = ArgumentsAccessStub::NEW_SLOPPY_FAST; + } + ArgumentsAccessStub stub(isolate(), type); + __ CallStub(&stub); + + SetVar(arguments, v0, a1, a2); + } + + if (FLAG_trace) { + __ CallRuntime(Runtime::kTraceEnter, 0); + } + // Visit the declarations and body unless there is an illegal + // redeclaration. + if (scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ Declarations"); + scope()->VisitIllegalRedeclaration(this); + + } else { + PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS); + { Comment cmnt(masm_, "[ Declarations"); + // For named function expressions, declare the function name as a + // constant. + if (scope()->is_function_scope() && scope()->function() != NULL) { + VariableDeclaration* function = scope()->function(); + DCHECK(function->proxy()->var()->mode() == CONST || + function->proxy()->var()->mode() == CONST_LEGACY); + DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED); + VisitVariableDeclaration(function); + } + VisitDeclarations(scope()->declarations()); + } + { Comment cmnt(masm_, "[ Stack check"); + PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS); + Label ok; + __ LoadRoot(at, Heap::kStackLimitRootIndex); + __ Branch(&ok, hs, sp, Operand(at)); + Handle<Code> stack_check = isolate()->builtins()->StackCheck(); + PredictableCodeSizeScope predictable(masm_, + masm_->CallSize(stack_check, RelocInfo::CODE_TARGET)); + __ Call(stack_check, RelocInfo::CODE_TARGET); + __ bind(&ok); + } + + { Comment cmnt(masm_, "[ Body"); + DCHECK(loop_depth() == 0); + + VisitStatements(function()->body()); + + DCHECK(loop_depth() == 0); + } + } + + // Always emit a 'return undefined' in case control fell off the end of + // the body. + { Comment cmnt(masm_, "[ return <undefined>;"); + __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); + } + EmitReturnSequence(); +} + + +void FullCodeGenerator::ClearAccumulator() { + DCHECK(Smi::FromInt(0) == 0); + __ mov(v0, zero_reg); +} + + +void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) { + __ li(a2, Operand(profiling_counter_)); + __ ld(a3, FieldMemOperand(a2, Cell::kValueOffset)); + __ Dsubu(a3, a3, Operand(Smi::FromInt(delta))); + __ sd(a3, FieldMemOperand(a2, Cell::kValueOffset)); +} + + +void FullCodeGenerator::EmitProfilingCounterReset() { + int reset_value = FLAG_interrupt_budget; + if (info_->is_debug()) { + // Detect debug break requests as soon as possible. + reset_value = FLAG_interrupt_budget >> 4; + } + __ li(a2, Operand(profiling_counter_)); + __ li(a3, Operand(Smi::FromInt(reset_value))); + __ sd(a3, FieldMemOperand(a2, Cell::kValueOffset)); +} + + +void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, + Label* back_edge_target) { + // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need + // to make sure it is constant. Branch may emit a skip-or-jump sequence + // instead of the normal Branch. It seems that the "skip" part of that + // sequence is about as long as this Branch would be so it is safe to ignore + // that. + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); + Comment cmnt(masm_, "[ Back edge bookkeeping"); + Label ok; + DCHECK(back_edge_target->is_bound()); + int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); + int weight = Min(kMaxBackEdgeWeight, + Max(1, distance / kCodeSizeMultiplier)); + EmitProfilingCounterDecrement(weight); + __ slt(at, a3, zero_reg); + __ beq(at, zero_reg, &ok); + // Call will emit a li t9 first, so it is safe to use the delay slot. + __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET); + // Record a mapping of this PC offset to the OSR id. This is used to find + // the AST id from the unoptimized code in order to use it as a key into + // the deoptimization input data found in the optimized code. + RecordBackEdge(stmt->OsrEntryId()); + EmitProfilingCounterReset(); + + __ bind(&ok); + PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); + // Record a mapping of the OSR id to this PC. This is used if the OSR + // entry becomes the target of a bailout. We don't expect it to be, but + // we want it to work if it is. + PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS); +} + + +void FullCodeGenerator::EmitReturnSequence() { + Comment cmnt(masm_, "[ Return sequence"); + if (return_label_.is_bound()) { + __ Branch(&return_label_); + } else { + __ bind(&return_label_); + if (FLAG_trace) { + // Push the return value on the stack as the parameter. + // Runtime::TraceExit returns its parameter in v0. + __ push(v0); + __ CallRuntime(Runtime::kTraceExit, 1); + } + // Pretend that the exit is a backwards jump to the entry. + int weight = 1; + if (info_->ShouldSelfOptimize()) { + weight = FLAG_interrupt_budget / FLAG_self_opt_count; + } else { + int distance = masm_->pc_offset(); + weight = Min(kMaxBackEdgeWeight, + Max(1, distance / kCodeSizeMultiplier)); + } + EmitProfilingCounterDecrement(weight); + Label ok; + __ Branch(&ok, ge, a3, Operand(zero_reg)); + __ push(v0); + __ Call(isolate()->builtins()->InterruptCheck(), + RelocInfo::CODE_TARGET); + __ pop(v0); + EmitProfilingCounterReset(); + __ bind(&ok); + +#ifdef DEBUG + // Add a label for checking the size of the code used for returning. + Label check_exit_codesize; + masm_->bind(&check_exit_codesize); +#endif + // Make sure that the constant pool is not emitted inside of the return + // sequence. + { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); + // Here we use masm_-> instead of the __ macro to avoid the code coverage + // tool from instrumenting as we rely on the code size here. + int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize; + CodeGenerator::RecordPositions(masm_, function()->end_position() - 1); + __ RecordJSReturn(); + masm_->mov(sp, fp); + int no_frame_start = masm_->pc_offset(); + masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit())); + masm_->Daddu(sp, sp, Operand(sp_delta)); + masm_->Jump(ra); + info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); + } + +#ifdef DEBUG + // Check that the size of the code used for returning is large enough + // for the debugger's requirements. + DCHECK(Assembler::kJSReturnSequenceInstructions <= + masm_->InstructionsGeneratedSince(&check_exit_codesize)); +#endif + } +} + + +void FullCodeGenerator::EffectContext::Plug(Variable* var) const { + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); +} + + +void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const { + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + codegen()->GetVar(result_register(), var); +} + + +void FullCodeGenerator::StackValueContext::Plug(Variable* var) const { + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + codegen()->GetVar(result_register(), var); + __ push(result_register()); +} + + +void FullCodeGenerator::TestContext::Plug(Variable* var) const { + // For simplicity we always test the accumulator register. + codegen()->GetVar(result_register(), var); + codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL); + codegen()->DoTest(this); +} + + +void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const { +} + + +void FullCodeGenerator::AccumulatorValueContext::Plug( + Heap::RootListIndex index) const { + __ LoadRoot(result_register(), index); +} + + +void FullCodeGenerator::StackValueContext::Plug( + Heap::RootListIndex index) const { + __ LoadRoot(result_register(), index); + __ push(result_register()); +} + + +void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const { + codegen()->PrepareForBailoutBeforeSplit(condition(), + true, + true_label_, + false_label_); + if (index == Heap::kUndefinedValueRootIndex || + index == Heap::kNullValueRootIndex || + index == Heap::kFalseValueRootIndex) { + if (false_label_ != fall_through_) __ Branch(false_label_); + } else if (index == Heap::kTrueValueRootIndex) { + if (true_label_ != fall_through_) __ Branch(true_label_); + } else { + __ LoadRoot(result_register(), index); + codegen()->DoTest(this); + } +} + + +void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const { +} + + +void FullCodeGenerator::AccumulatorValueContext::Plug( + Handle<Object> lit) const { + __ li(result_register(), Operand(lit)); +} + + +void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const { + // Immediates cannot be pushed directly. + __ li(result_register(), Operand(lit)); + __ push(result_register()); +} + + +void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const { + codegen()->PrepareForBailoutBeforeSplit(condition(), + true, + true_label_, + false_label_); + DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals. + if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) { + if (false_label_ != fall_through_) __ Branch(false_label_); + } else if (lit->IsTrue() || lit->IsJSObject()) { + if (true_label_ != fall_through_) __ Branch(true_label_); + } else if (lit->IsString()) { + if (String::cast(*lit)->length() == 0) { + if (false_label_ != fall_through_) __ Branch(false_label_); + } else { + if (true_label_ != fall_through_) __ Branch(true_label_); + } + } else if (lit->IsSmi()) { + if (Smi::cast(*lit)->value() == 0) { + if (false_label_ != fall_through_) __ Branch(false_label_); + } else { + if (true_label_ != fall_through_) __ Branch(true_label_); + } + } else { + // For simplicity we always test the accumulator register. + __ li(result_register(), Operand(lit)); + codegen()->DoTest(this); + } +} + + +void FullCodeGenerator::EffectContext::DropAndPlug(int count, + Register reg) const { + DCHECK(count > 0); + __ Drop(count); +} + + +void FullCodeGenerator::AccumulatorValueContext::DropAndPlug( + int count, + Register reg) const { + DCHECK(count > 0); + __ Drop(count); + __ Move(result_register(), reg); +} + + +void FullCodeGenerator::StackValueContext::DropAndPlug(int count, + Register reg) const { + DCHECK(count > 0); + if (count > 1) __ Drop(count - 1); + __ sd(reg, MemOperand(sp, 0)); +} + + +void FullCodeGenerator::TestContext::DropAndPlug(int count, + Register reg) const { + DCHECK(count > 0); + // For simplicity we always test the accumulator register. + __ Drop(count); + __ Move(result_register(), reg); + codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL); + codegen()->DoTest(this); +} + + +void FullCodeGenerator::EffectContext::Plug(Label* materialize_true, + Label* materialize_false) const { + DCHECK(materialize_true == materialize_false); + __ bind(materialize_true); +} + + +void FullCodeGenerator::AccumulatorValueContext::Plug( + Label* materialize_true, + Label* materialize_false) const { + Label done; + __ bind(materialize_true); + __ LoadRoot(result_register(), Heap::kTrueValueRootIndex); + __ Branch(&done); + __ bind(materialize_false); + __ LoadRoot(result_register(), Heap::kFalseValueRootIndex); + __ bind(&done); +} + + +void FullCodeGenerator::StackValueContext::Plug( + Label* materialize_true, + Label* materialize_false) const { + Label done; + __ bind(materialize_true); + __ LoadRoot(at, Heap::kTrueValueRootIndex); + // Push the value as the following branch can clobber at in long branch mode. + __ push(at); + __ Branch(&done); + __ bind(materialize_false); + __ LoadRoot(at, Heap::kFalseValueRootIndex); + __ push(at); + __ bind(&done); +} + + +void FullCodeGenerator::TestContext::Plug(Label* materialize_true, + Label* materialize_false) const { + DCHECK(materialize_true == true_label_); + DCHECK(materialize_false == false_label_); +} + + +void FullCodeGenerator::EffectContext::Plug(bool flag) const { +} + + +void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const { + Heap::RootListIndex value_root_index = + flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex; + __ LoadRoot(result_register(), value_root_index); +} + + +void FullCodeGenerator::StackValueContext::Plug(bool flag) const { + Heap::RootListIndex value_root_index = + flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex; + __ LoadRoot(at, value_root_index); + __ push(at); +} + + +void FullCodeGenerator::TestContext::Plug(bool flag) const { + codegen()->PrepareForBailoutBeforeSplit(condition(), + true, + true_label_, + false_label_); + if (flag) { + if (true_label_ != fall_through_) __ Branch(true_label_); + } else { + if (false_label_ != fall_through_) __ Branch(false_label_); + } +} + + +void FullCodeGenerator::DoTest(Expression* condition, + Label* if_true, + Label* if_false, + Label* fall_through) { + __ mov(a0, result_register()); + Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate()); + CallIC(ic, condition->test_id()); + __ mov(at, zero_reg); + Split(ne, v0, Operand(at), if_true, if_false, fall_through); +} + + +void FullCodeGenerator::Split(Condition cc, + Register lhs, + const Operand& rhs, + Label* if_true, + Label* if_false, + Label* fall_through) { + if (if_false == fall_through) { + __ Branch(if_true, cc, lhs, rhs); + } else if (if_true == fall_through) { + __ Branch(if_false, NegateCondition(cc), lhs, rhs); + } else { + __ Branch(if_true, cc, lhs, rhs); + __ Branch(if_false); + } +} + + +MemOperand FullCodeGenerator::StackOperand(Variable* var) { + DCHECK(var->IsStackAllocated()); + // Offset is negative because higher indexes are at lower addresses. + int offset = -var->index() * kPointerSize; + // Adjust by a (parameter or local) base offset. + if (var->IsParameter()) { + offset += (info_->scope()->num_parameters() + 1) * kPointerSize; + } else { + offset += JavaScriptFrameConstants::kLocal0Offset; + } + return MemOperand(fp, offset); +} + + +MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) { + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); + if (var->IsContextSlot()) { + int context_chain_length = scope()->ContextChainLength(var->scope()); + __ LoadContext(scratch, context_chain_length); + return ContextOperand(scratch, var->index()); + } else { + return StackOperand(var); + } +} + + +void FullCodeGenerator::GetVar(Register dest, Variable* var) { + // Use destination as scratch. + MemOperand location = VarOperand(var, dest); + __ ld(dest, location); +} + + +void FullCodeGenerator::SetVar(Variable* var, + Register src, + Register scratch0, + Register scratch1) { + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(!scratch0.is(src)); + DCHECK(!scratch0.is(scratch1)); + DCHECK(!scratch1.is(src)); + MemOperand location = VarOperand(var, scratch0); + __ sd(src, location); + // Emit the write barrier code if the location is in the heap. + if (var->IsContextSlot()) { + __ RecordWriteContextSlot(scratch0, + location.offset(), + src, + scratch1, + kRAHasBeenSaved, + kDontSaveFPRegs); + } +} + + +void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr, + bool should_normalize, + Label* if_true, + Label* if_false) { + // Only prepare for bailouts before splits if we're in a test + // context. Otherwise, we let the Visit function deal with the + // preparation to avoid preparing with the same AST id twice. + if (!context()->IsTest() || !info_->IsOptimizable()) return; + + Label skip; + if (should_normalize) __ Branch(&skip); + PrepareForBailout(expr, TOS_REG); + if (should_normalize) { + __ LoadRoot(a4, Heap::kTrueValueRootIndex); + Split(eq, a0, Operand(a4), if_true, if_false, NULL); + __ bind(&skip); + } +} + + +void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { + // The variable in the declaration always resides in the current function + // context. + DCHECK_EQ(0, scope()->ContextChainLength(variable->scope())); + if (generate_debug_code_) { + // Check that we're not inside a with or catch context. + __ ld(a1, FieldMemOperand(cp, HeapObject::kMapOffset)); + __ LoadRoot(a4, Heap::kWithContextMapRootIndex); + __ Check(ne, kDeclarationInWithContext, + a1, Operand(a4)); + __ LoadRoot(a4, Heap::kCatchContextMapRootIndex); + __ Check(ne, kDeclarationInCatchContext, + a1, Operand(a4)); + } +} + + +void FullCodeGenerator::VisitVariableDeclaration( + VariableDeclaration* declaration) { + // If it was not possible to allocate the variable at compile time, we + // need to "declare" it at runtime to make sure it actually exists in the + // local context. + VariableProxy* proxy = declaration->proxy(); + VariableMode mode = declaration->mode(); + Variable* variable = proxy->var(); + bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY; + switch (variable->location()) { + case Variable::UNALLOCATED: + globals_->Add(variable->name(), zone()); + globals_->Add(variable->binding_needs_init() + ? isolate()->factory()->the_hole_value() + : isolate()->factory()->undefined_value(), + zone()); + break; + + case Variable::PARAMETER: + case Variable::LOCAL: + if (hole_init) { + Comment cmnt(masm_, "[ VariableDeclaration"); + __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); + __ sd(a4, StackOperand(variable)); + } + break; + + case Variable::CONTEXT: + if (hole_init) { + Comment cmnt(masm_, "[ VariableDeclaration"); + EmitDebugCheckDeclarationContext(variable); + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + __ sd(at, ContextOperand(cp, variable->index())); + // No write barrier since the_hole_value is in old space. + PrepareForBailoutForId(proxy->id(), NO_REGISTERS); + } + break; + + case Variable::LOOKUP: { + Comment cmnt(masm_, "[ VariableDeclaration"); + __ li(a2, Operand(variable->name())); + // Declaration nodes are always introduced in one of four modes. + DCHECK(IsDeclaredVariableMode(mode)); + PropertyAttributes attr = + IsImmutableVariableMode(mode) ? READ_ONLY : NONE; + __ li(a1, Operand(Smi::FromInt(attr))); + // Push initial value, if any. + // Note: For variables we must not push an initial value (such as + // 'undefined') because we may have a (legal) redeclaration and we + // must not destroy the current value. + if (hole_init) { + __ LoadRoot(a0, Heap::kTheHoleValueRootIndex); + __ Push(cp, a2, a1, a0); + } else { + DCHECK(Smi::FromInt(0) == 0); + __ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value. + __ Push(cp, a2, a1, a0); + } + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); + break; + } + } +} + + +void FullCodeGenerator::VisitFunctionDeclaration( + FunctionDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: { + globals_->Add(variable->name(), zone()); + Handle<SharedFunctionInfo> function = + Compiler::BuildFunctionInfo(declaration->fun(), script(), info_); + // Check for stack-overflow exception. + if (function.is_null()) return SetStackOverflow(); + globals_->Add(function, zone()); + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + VisitForAccumulatorValue(declaration->fun()); + __ sd(result_register(), StackOperand(variable)); + break; + } + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + EmitDebugCheckDeclarationContext(variable); + VisitForAccumulatorValue(declaration->fun()); + __ sd(result_register(), ContextOperand(cp, variable->index())); + int offset = Context::SlotOffset(variable->index()); + // We know that we have written a function, which is not a smi. + __ RecordWriteContextSlot(cp, + offset, + result_register(), + a2, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(proxy->id(), NO_REGISTERS); + break; + } + + case Variable::LOOKUP: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + __ li(a2, Operand(variable->name())); + __ li(a1, Operand(Smi::FromInt(NONE))); + __ Push(cp, a2, a1); + // Push initial value for function declaration. + VisitForStackValue(declaration->fun()); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); + break; + } + } +} + + +void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { + Variable* variable = declaration->proxy()->var(); + DCHECK(variable->location() == Variable::CONTEXT); + DCHECK(variable->interface()->IsFrozen()); + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); + + // Load instance object. + __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope())); + __ ld(a1, ContextOperand(a1, variable->interface()->Index())); + __ ld(a1, ContextOperand(a1, Context::EXTENSION_INDEX)); + + // Assign it. + __ sd(a1, ContextOperand(cp, variable->index())); + // We know that we have written a module, which is not a smi. + __ RecordWriteContextSlot(cp, + Context::SlotOffset(variable->index()), + a1, + a3, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS); + + // Traverse into body. + Visit(declaration->module()); +} + + +void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: + // TODO(rossberg) + break; + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ ImportDeclaration"); + EmitDebugCheckDeclarationContext(variable); + // TODO(rossberg) + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::LOOKUP: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) { + // TODO(rossberg) +} + + +void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { + // Call the runtime to declare the globals. + // The context is the first argument. + __ li(a1, Operand(pairs)); + __ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags()))); + __ Push(cp, a1, a0); + __ CallRuntime(Runtime::kDeclareGlobals, 3); + // Return value is ignored. +} + + +void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { + // Call the runtime to declare the modules. + __ Push(descriptions); + __ CallRuntime(Runtime::kDeclareModules, 1); + // Return value is ignored. +} + + +void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { + Comment cmnt(masm_, "[ SwitchStatement"); + Breakable nested_statement(this, stmt); + SetStatementPosition(stmt); + + // Keep the switch value on the stack until a case matches. + VisitForStackValue(stmt->tag()); + PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); + + ZoneList<CaseClause*>* clauses = stmt->cases(); + CaseClause* default_clause = NULL; // Can occur anywhere in the list. + + Label next_test; // Recycled for each test. + // Compile all the tests with branches to their bodies. + for (int i = 0; i < clauses->length(); i++) { + CaseClause* clause = clauses->at(i); + clause->body_target()->Unuse(); + + // The default is not a test, but remember it as final fall through. + if (clause->is_default()) { + default_clause = clause; + continue; + } + + Comment cmnt(masm_, "[ Case comparison"); + __ bind(&next_test); + next_test.Unuse(); + + // Compile the label expression. + VisitForAccumulatorValue(clause->label()); + __ mov(a0, result_register()); // CompareStub requires args in a0, a1. + + // Perform the comparison as if via '==='. + __ ld(a1, MemOperand(sp, 0)); // Switch value. + bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT); + JumpPatchSite patch_site(masm_); + if (inline_smi_code) { + Label slow_case; + __ or_(a2, a1, a0); + patch_site.EmitJumpIfNotSmi(a2, &slow_case); + + __ Branch(&next_test, ne, a1, Operand(a0)); + __ Drop(1); // Switch value is no longer needed. + __ Branch(clause->body_target()); + + __ bind(&slow_case); + } + + // Record position before stub call for type feedback. + SetSourcePosition(clause->position()); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT); + CallIC(ic, clause->CompareId()); + patch_site.EmitPatchInfo(); + + Label skip; + __ Branch(&skip); + PrepareForBailout(clause, TOS_REG); + __ LoadRoot(at, Heap::kTrueValueRootIndex); + __ Branch(&next_test, ne, v0, Operand(at)); + __ Drop(1); + __ Branch(clause->body_target()); + __ bind(&skip); + + __ Branch(&next_test, ne, v0, Operand(zero_reg)); + __ Drop(1); // Switch value is no longer needed. + __ Branch(clause->body_target()); + } + + // Discard the test value and jump to the default if present, otherwise to + // the end of the statement. + __ bind(&next_test); + __ Drop(1); // Switch value is no longer needed. + if (default_clause == NULL) { + __ Branch(nested_statement.break_label()); + } else { + __ Branch(default_clause->body_target()); + } + + // Compile all the case bodies. + for (int i = 0; i < clauses->length(); i++) { + Comment cmnt(masm_, "[ Case body"); + CaseClause* clause = clauses->at(i); + __ bind(clause->body_target()); + PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS); + VisitStatements(clause->statements()); + } + + __ bind(nested_statement.break_label()); + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); +} + + +void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { + Comment cmnt(masm_, "[ ForInStatement"); + int slot = stmt->ForInFeedbackSlot(); + SetStatementPosition(stmt); + + Label loop, exit; + ForIn loop_statement(this, stmt); + increment_loop_depth(); + + // Get the object to enumerate over. If the object is null or undefined, skip + // over the loop. See ECMA-262 version 5, section 12.6.4. + VisitForAccumulatorValue(stmt->enumerable()); + __ mov(a0, result_register()); // Result as param to InvokeBuiltin below. + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&exit, eq, a0, Operand(at)); + Register null_value = a5; + __ LoadRoot(null_value, Heap::kNullValueRootIndex); + __ Branch(&exit, eq, a0, Operand(null_value)); + PrepareForBailoutForId(stmt->PrepareId(), TOS_REG); + __ mov(a0, v0); + // Convert the object to a JS object. + Label convert, done_convert; + __ JumpIfSmi(a0, &convert); + __ GetObjectType(a0, a1, a1); + __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ bind(&convert); + __ push(a0); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(a0, v0); + __ bind(&done_convert); + __ push(a0); + + // Check for proxies. + Label call_runtime; + STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); + __ GetObjectType(a0, a1, a1); + __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE)); + + // Check cache validity in generated code. This is a fast case for + // the JSObject::IsSimpleEnum cache validity checks. If we cannot + // guarantee cache validity, call the runtime system to check cache + // validity or get the property names in a fixed array. + __ CheckEnumCache(null_value, &call_runtime); + + // The enum cache is valid. Load the map of the object being + // iterated over and use the cache for the iteration. + Label use_cache; + __ ld(v0, FieldMemOperand(a0, HeapObject::kMapOffset)); + __ Branch(&use_cache); + + // Get the set of properties to enumerate. + __ bind(&call_runtime); + __ push(a0); // Duplicate the enumerable object on the stack. + __ CallRuntime(Runtime::kGetPropertyNamesFast, 1); + + // If we got a map from the runtime call, we can do a fast + // modification check. Otherwise, we got a fixed array, and we have + // to do a slow check. + Label fixed_array; + __ ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kMetaMapRootIndex); + __ Branch(&fixed_array, ne, a2, Operand(at)); + + // We got a map in register v0. Get the enumeration cache from it. + Label no_descriptors; + __ bind(&use_cache); + + __ EnumLength(a1, v0); + __ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0))); + + __ LoadInstanceDescriptors(v0, a2); + __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset)); + __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset)); + + // Set up the four remaining stack slots. + __ li(a0, Operand(Smi::FromInt(0))); + // Push map, enumeration cache, enumeration cache length (as smi) and zero. + __ Push(v0, a2, a1, a0); + __ jmp(&loop); + + __ bind(&no_descriptors); + __ Drop(1); + __ jmp(&exit); + + // We got a fixed array in register v0. Iterate through that. + Label non_proxy; + __ bind(&fixed_array); + + __ li(a1, FeedbackVector()); + __ li(a2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); + __ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot))); + + __ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check + __ ld(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object + STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); + __ GetObjectType(a2, a3, a3); + __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE)); + __ li(a1, Operand(Smi::FromInt(0))); // Zero indicates proxy + __ bind(&non_proxy); + __ Push(a1, v0); // Smi and array + __ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset)); + __ li(a0, Operand(Smi::FromInt(0))); + __ Push(a1, a0); // Fixed array length (as smi) and initial index. + + // Generate code for doing the condition check. + PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); + __ bind(&loop); + // Load the current count to a0, load the length to a1. + __ ld(a0, MemOperand(sp, 0 * kPointerSize)); + __ ld(a1, MemOperand(sp, 1 * kPointerSize)); + __ Branch(loop_statement.break_label(), hs, a0, Operand(a1)); + + // Get the current entry of the array into register a3. + __ ld(a2, MemOperand(sp, 2 * kPointerSize)); + __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ SmiScale(a4, a0, kPointerSizeLog2); + __ daddu(a4, a2, a4); // Array base + scaled (smi) index. + __ ld(a3, MemOperand(a4)); // Current entry. + + // Get the expected map from the stack or a smi in the + // permanent slow case into register a2. + __ ld(a2, MemOperand(sp, 3 * kPointerSize)); + + // Check if the expected map still matches that of the enumerable. + // If not, we may have to filter the key. + Label update_each; + __ ld(a1, MemOperand(sp, 4 * kPointerSize)); + __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset)); + __ Branch(&update_each, eq, a4, Operand(a2)); + + // For proxies, no filtering is done. + // TODO(rossberg): What if only a prototype is a proxy? Not specified yet. + DCHECK_EQ(Smi::FromInt(0), 0); + __ Branch(&update_each, eq, a2, Operand(zero_reg)); + + // Convert the entry to a string or (smi) 0 if it isn't a property + // any more. If the property has been removed while iterating, we + // just skip it. + __ Push(a1, a3); // Enumerable and current entry. + __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); + __ mov(a3, result_register()); + __ Branch(loop_statement.continue_label(), eq, a3, Operand(zero_reg)); + + // Update the 'each' property or variable from the possibly filtered + // entry in register a3. + __ bind(&update_each); + __ mov(result_register(), a3); + // Perform the assignment as if via '='. + { EffectContext context(this); + EmitAssignment(stmt->each()); + } + + // Generate code for the body of the loop. + Visit(stmt->body()); + + // Generate code for the going to the next element by incrementing + // the index (smi) stored on top of the stack. + __ bind(loop_statement.continue_label()); + __ pop(a0); + __ Daddu(a0, a0, Operand(Smi::FromInt(1))); + __ push(a0); + + EmitBackEdgeBookkeeping(stmt, &loop); + __ Branch(&loop); + + // Remove the pointers stored on the stack. + __ bind(loop_statement.break_label()); + __ Drop(5); + + // Exit and decrement the loop depth. + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); + __ bind(&exit); + decrement_loop_depth(); +} + + +void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) { + Comment cmnt(masm_, "[ ForOfStatement"); + SetStatementPosition(stmt); + + Iteration loop_statement(this, stmt); + increment_loop_depth(); + + // var iterator = iterable[Symbol.iterator](); + VisitForEffect(stmt->assign_iterator()); + + // Loop entry. + __ bind(loop_statement.continue_label()); + + // result = iterator.next() + VisitForEffect(stmt->next_result()); + + // if (result.done) break; + Label result_not_done; + VisitForControl(stmt->result_done(), + loop_statement.break_label(), + &result_not_done, + &result_not_done); + __ bind(&result_not_done); + + // each = result.value + VisitForEffect(stmt->assign_each()); + + // Generate code for the body of the loop. + Visit(stmt->body()); + + // Check stack before looping. + PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS); + EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label()); + __ jmp(loop_statement.continue_label()); + + // Exit and decrement the loop depth. + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); + __ bind(loop_statement.break_label()); + decrement_loop_depth(); +} + + +void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, + bool pretenure) { + // Use the fast case closure allocation code that allocates in new + // space for nested functions that don't need literals cloning. If + // we're running with the --always-opt or the --prepare-always-opt + // flag, we need to use the runtime function so that the new function + // we are creating here gets a chance to have its code optimized and + // doesn't just get a copy of the existing unoptimized code. + if (!FLAG_always_opt && + !FLAG_prepare_always_opt && + !pretenure && + scope()->is_function_scope() && + info->num_literals() == 0) { + FastNewClosureStub stub(isolate(), + info->strict_mode(), + info->is_generator()); + __ li(a2, Operand(info)); + __ CallStub(&stub); + } else { + __ li(a0, Operand(info)); + __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex + : Heap::kFalseValueRootIndex); + __ Push(cp, a0, a1); + __ CallRuntime(Runtime::kNewClosure, 3); + } + context()->Plug(v0); +} + + +void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { + Comment cmnt(masm_, "[ VariableProxy"); + EmitVariableLoad(expr); +} + + +void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, + TypeofState typeof_state, + Label* slow) { + Register current = cp; + Register next = a1; + Register temp = a2; + + Scope* s = scope(); + while (s != NULL) { + if (s->num_heap_slots() > 0) { + if (s->calls_sloppy_eval()) { + // Check that extension is NULL. + __ ld(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ Branch(slow, ne, temp, Operand(zero_reg)); + } + // Load next context in chain. + __ ld(next, ContextOperand(current, Context::PREVIOUS_INDEX)); + // Walk the rest of the chain without clobbering cp. + current = next; + } + // If no outer scope calls eval, we do not need to check more + // context extensions. + if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break; + s = s->outer_scope(); + } + + if (s->is_eval_scope()) { + Label loop, fast; + if (!current.is(next)) { + __ Move(next, current); + } + __ bind(&loop); + // Terminate at native context. + __ ld(temp, FieldMemOperand(next, HeapObject::kMapOffset)); + __ LoadRoot(a4, Heap::kNativeContextMapRootIndex); + __ Branch(&fast, eq, temp, Operand(a4)); + // Check that extension is NULL. + __ ld(temp, ContextOperand(next, Context::EXTENSION_INDEX)); + __ Branch(slow, ne, temp, Operand(zero_reg)); + // Load next context in chain. + __ ld(next, ContextOperand(next, Context::PREVIOUS_INDEX)); + __ Branch(&loop); + __ bind(&fast); + } + + __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ li(LoadIC::NameRegister(), Operand(proxy->var()->name())); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } + + ContextualMode mode = (typeof_state == INSIDE_TYPEOF) + ? NOT_CONTEXTUAL + : CONTEXTUAL; + CallLoadIC(mode); +} + + +MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var, + Label* slow) { + DCHECK(var->IsContextSlot()); + Register context = cp; + Register next = a3; + Register temp = a4; + + for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) { + if (s->num_heap_slots() > 0) { + if (s->calls_sloppy_eval()) { + // Check that extension is NULL. + __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX)); + __ Branch(slow, ne, temp, Operand(zero_reg)); + } + __ ld(next, ContextOperand(context, Context::PREVIOUS_INDEX)); + // Walk the rest of the chain without clobbering cp. + context = next; + } + } + // Check that last extension is NULL. + __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX)); + __ Branch(slow, ne, temp, Operand(zero_reg)); + + // This function is used only for loads, not stores, so it's safe to + // return an cp-based operand (the write barrier cannot be allowed to + // destroy the cp register). + return ContextOperand(context, var->index()); +} + + +void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy, + TypeofState typeof_state, + Label* slow, + Label* done) { + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + Variable* var = proxy->var(); + if (var->mode() == DYNAMIC_GLOBAL) { + EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow); + __ Branch(done); + } else if (var->mode() == DYNAMIC_LOCAL) { + Variable* local = var->local_if_not_shadowed(); + __ ld(v0, ContextSlotOperandCheckExtensions(local, slow)); + if (local->mode() == LET || local->mode() == CONST || + local->mode() == CONST_LEGACY) { + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + __ dsubu(at, v0, at); // Sub as compare: at == 0 on eq. + if (local->mode() == CONST_LEGACY) { + __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); + __ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole. + } else { // LET || CONST + __ Branch(done, ne, at, Operand(zero_reg)); + __ li(a0, Operand(var->name())); + __ push(a0); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + } + } + __ Branch(done); + } +} + + +void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { + // Record position before possible IC call. + SetSourcePosition(proxy->position()); + Variable* var = proxy->var(); + + // Three cases: global variables, lookup variables, and all other types of + // variables. + switch (var->location()) { + case Variable::UNALLOCATED: { + Comment cmnt(masm_, "[ Global variable"); + // Use inline caching. Variable name is passed in a2 and the global + // object (receiver) in a0. + __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ li(LoadIC::NameRegister(), Operand(var->name())); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } + CallLoadIC(CONTEXTUAL); + context()->Plug(v0); + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::CONTEXT: { + Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable" + : "[ Stack variable"); + if (var->binding_needs_init()) { + // var->scope() may be NULL when the proxy is located in eval code and + // refers to a potential outside binding. Currently those bindings are + // always looked up dynamically, i.e. in that case + // var->location() == LOOKUP. + // always holds. + DCHECK(var->scope() != NULL); + + // Check if the binding really needs an initialization check. The check + // can be skipped in the following situation: we have a LET or CONST + // binding in harmony mode, both the Variable and the VariableProxy have + // the same declaration scope (i.e. they are both in global code, in the + // same function or in the same eval code) and the VariableProxy is in + // the source physically located after the initializer of the variable. + // + // We cannot skip any initialization checks for CONST in non-harmony + // mode because const variables may be declared but never initialized: + // if (false) { const x; }; var y = x; + // + // The condition on the declaration scopes is a conservative check for + // nested functions that access a binding and are called before the + // binding is initialized: + // function() { f(); let x = 1; function f() { x = 2; } } + // + bool skip_init_check; + if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) { + skip_init_check = false; + } else { + // Check that we always have valid source position. + DCHECK(var->initializer_position() != RelocInfo::kNoPosition); + DCHECK(proxy->position() != RelocInfo::kNoPosition); + skip_init_check = var->mode() != CONST_LEGACY && + var->initializer_position() < proxy->position(); + } + + if (!skip_init_check) { + // Let and const need a read barrier. + GetVar(v0, var); + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + __ dsubu(at, v0, at); // Sub as compare: at == 0 on eq. + if (var->mode() == LET || var->mode() == CONST) { + // Throw a reference error when using an uninitialized let/const + // binding in harmony mode. + Label done; + __ Branch(&done, ne, at, Operand(zero_reg)); + __ li(a0, Operand(var->name())); + __ push(a0); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + __ bind(&done); + } else { + // Uninitalized const bindings outside of harmony mode are unholed. + DCHECK(var->mode() == CONST_LEGACY); + __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); + __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole. + } + context()->Plug(v0); + break; + } + } + context()->Plug(var); + break; + } + + case Variable::LOOKUP: { + Comment cmnt(masm_, "[ Lookup variable"); + Label done, slow; + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); + __ bind(&slow); + __ li(a1, Operand(var->name())); + __ Push(cp, a1); // Context and name. + __ CallRuntime(Runtime::kLoadLookupSlot, 2); + __ bind(&done); + context()->Plug(v0); + } + } +} + + +void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { + Comment cmnt(masm_, "[ RegExpLiteral"); + Label materialized; + // Registers will be used as follows: + // a5 = materialized value (RegExp literal) + // a4 = JS function, literals array + // a3 = literal index + // a2 = RegExp pattern + // a1 = RegExp flags + // a0 = RegExp literal clone + __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ ld(a4, FieldMemOperand(a0, JSFunction::kLiteralsOffset)); + int literal_offset = + FixedArray::kHeaderSize + expr->literal_index() * kPointerSize; + __ ld(a5, FieldMemOperand(a4, literal_offset)); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&materialized, ne, a5, Operand(at)); + + // Create regexp literal using runtime function. + // Result will be in v0. + __ li(a3, Operand(Smi::FromInt(expr->literal_index()))); + __ li(a2, Operand(expr->pattern())); + __ li(a1, Operand(expr->flags())); + __ Push(a4, a3, a2, a1); + __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); + __ mov(a5, v0); + + __ bind(&materialized); + int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; + Label allocated, runtime_allocate; + __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); + __ jmp(&allocated); + + __ bind(&runtime_allocate); + __ li(a0, Operand(Smi::FromInt(size))); + __ Push(a5, a0); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); + __ pop(a5); + + __ bind(&allocated); + + // After this, registers are used as follows: + // v0: Newly allocated regexp. + // a5: Materialized regexp. + // a2: temp. + __ CopyFields(v0, a5, a2.bit(), size / kPointerSize); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitAccessor(Expression* expression) { + if (expression == NULL) { + __ LoadRoot(a1, Heap::kNullValueRootIndex); + __ push(a1); + } else { + VisitForStackValue(expression); + } +} + + +void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { + Comment cmnt(masm_, "[ ObjectLiteral"); + + expr->BuildConstantProperties(isolate()); + Handle<FixedArray> constant_properties = expr->constant_properties(); + __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); + __ li(a2, Operand(Smi::FromInt(expr->literal_index()))); + __ li(a1, Operand(constant_properties)); + int flags = expr->fast_elements() + ? ObjectLiteral::kFastElements + : ObjectLiteral::kNoFlags; + flags |= expr->has_function() + ? ObjectLiteral::kHasFunction + : ObjectLiteral::kNoFlags; + __ li(a0, Operand(Smi::FromInt(flags))); + int properties_count = constant_properties->length() / 2; + if (expr->may_store_doubles() || expr->depth() > 1 || + masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements || + properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { + __ Push(a3, a2, a1, a0); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); + } else { + FastCloneShallowObjectStub stub(isolate(), properties_count); + __ CallStub(&stub); + } + + // If result_saved is true the result is on top of the stack. If + // result_saved is false the result is in v0. + bool result_saved = false; + + // Mark all computed expressions that are bound to a key that + // is shadowed by a later occurrence of the same key. For the + // marked expressions, no store code is emitted. + expr->CalculateEmitStore(zone()); + + AccessorTable accessor_table(zone()); + for (int i = 0; i < expr->properties()->length(); i++) { + ObjectLiteral::Property* property = expr->properties()->at(i); + if (property->IsCompileTimeValue()) continue; + + Literal* key = property->key(); + Expression* value = property->value(); + if (!result_saved) { + __ push(v0); // Save result on stack. + result_saved = true; + } + switch (property->kind()) { + case ObjectLiteral::Property::CONSTANT: + UNREACHABLE(); + case ObjectLiteral::Property::MATERIALIZED_LITERAL: + DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value())); + // Fall through. + case ObjectLiteral::Property::COMPUTED: + if (key->value()->IsInternalizedString()) { + if (property->emit_store()) { + VisitForAccumulatorValue(value); + __ mov(StoreIC::ValueRegister(), result_register()); + DCHECK(StoreIC::ValueRegister().is(a0)); + __ li(StoreIC::NameRegister(), Operand(key->value())); + __ ld(StoreIC::ReceiverRegister(), MemOperand(sp)); + CallStoreIC(key->LiteralFeedbackId()); + PrepareForBailoutForId(key->id(), NO_REGISTERS); + } else { + VisitForEffect(value); + } + break; + } + // Duplicate receiver on stack. + __ ld(a0, MemOperand(sp)); + __ push(a0); + VisitForStackValue(key); + VisitForStackValue(value); + if (property->emit_store()) { + __ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes. + __ push(a0); + __ CallRuntime(Runtime::kSetProperty, 4); + } else { + __ Drop(3); + } + break; + case ObjectLiteral::Property::PROTOTYPE: + // Duplicate receiver on stack. + __ ld(a0, MemOperand(sp)); + __ push(a0); + VisitForStackValue(value); + if (property->emit_store()) { + __ CallRuntime(Runtime::kSetPrototype, 2); + } else { + __ Drop(2); + } + break; + case ObjectLiteral::Property::GETTER: + accessor_table.lookup(key)->second->getter = value; + break; + case ObjectLiteral::Property::SETTER: + accessor_table.lookup(key)->second->setter = value; + break; + } + } + + // Emit code to define accessors, using only a single call to the runtime for + // each pair of corresponding getters and setters. + for (AccessorTable::Iterator it = accessor_table.begin(); + it != accessor_table.end(); + ++it) { + __ ld(a0, MemOperand(sp)); // Duplicate receiver. + __ push(a0); + VisitForStackValue(it->first); + EmitAccessor(it->second->getter); + EmitAccessor(it->second->setter); + __ li(a0, Operand(Smi::FromInt(NONE))); + __ push(a0); + __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5); + } + + if (expr->has_function()) { + DCHECK(result_saved); + __ ld(a0, MemOperand(sp)); + __ push(a0); + __ CallRuntime(Runtime::kToFastProperties, 1); + } + + if (result_saved) { + context()->PlugTOS(); + } else { + context()->Plug(v0); + } +} + + +void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { + Comment cmnt(masm_, "[ ArrayLiteral"); + + expr->BuildConstantElements(isolate()); + int flags = expr->depth() == 1 + ? ArrayLiteral::kShallowElements + : ArrayLiteral::kNoFlags; + + ZoneList<Expression*>* subexprs = expr->values(); + int length = subexprs->length(); + + Handle<FixedArray> constant_elements = expr->constant_elements(); + DCHECK_EQ(2, constant_elements->length()); + ElementsKind constant_elements_kind = + static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); + bool has_fast_elements = + IsFastObjectElementsKind(constant_elements_kind); + Handle<FixedArrayBase> constant_elements_values( + FixedArrayBase::cast(constant_elements->get(1))); + + AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE; + if (has_fast_elements && !FLAG_allocation_site_pretenuring) { + // If the only customer of allocation sites is transitioning, then + // we can turn it off if we don't have anywhere else to transition to. + allocation_site_mode = DONT_TRACK_ALLOCATION_SITE; + } + + __ mov(a0, result_register()); + __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); + __ li(a2, Operand(Smi::FromInt(expr->literal_index()))); + __ li(a1, Operand(constant_elements)); + if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) { + __ li(a0, Operand(Smi::FromInt(flags))); + __ Push(a3, a2, a1, a0); + __ CallRuntime(Runtime::kCreateArrayLiteral, 4); + } else { + FastCloneShallowArrayStub stub(isolate(), allocation_site_mode); + __ CallStub(&stub); + } + + bool result_saved = false; // Is the result saved to the stack? + + // Emit code to evaluate all the non-constant subexpressions and to store + // them into the newly cloned array. + for (int i = 0; i < length; i++) { + Expression* subexpr = subexprs->at(i); + // If the subexpression is a literal or a simple materialized literal it + // is already set in the cloned array. + if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue; + + if (!result_saved) { + __ push(v0); // array literal + __ Push(Smi::FromInt(expr->literal_index())); + result_saved = true; + } + + VisitForAccumulatorValue(subexpr); + + if (IsFastObjectElementsKind(constant_elements_kind)) { + int offset = FixedArray::kHeaderSize + (i * kPointerSize); + __ ld(a6, MemOperand(sp, kPointerSize)); // Copy of array literal. + __ ld(a1, FieldMemOperand(a6, JSObject::kElementsOffset)); + __ sd(result_register(), FieldMemOperand(a1, offset)); + // Update the write barrier for the array store. + __ RecordWriteField(a1, offset, result_register(), a2, + kRAHasBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, INLINE_SMI_CHECK); + } else { + __ li(a3, Operand(Smi::FromInt(i))); + __ mov(a0, result_register()); + StoreArrayLiteralElementStub stub(isolate()); + __ CallStub(&stub); + } + + PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); + } + if (result_saved) { + __ Pop(); // literal index + context()->PlugTOS(); + } else { + context()->Plug(v0); + } +} + + +void FullCodeGenerator::VisitAssignment(Assignment* expr) { + DCHECK(expr->target()->IsValidReferenceExpression()); + + Comment cmnt(masm_, "[ Assignment"); + + // Left-hand side can only be a property, a global or a (parameter or local) + // slot. + enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; + LhsKind assign_type = VARIABLE; + Property* property = expr->target()->AsProperty(); + if (property != NULL) { + assign_type = (property->key()->IsPropertyName()) + ? NAMED_PROPERTY + : KEYED_PROPERTY; + } + + // Evaluate LHS expression. + switch (assign_type) { + case VARIABLE: + // Nothing to do here. + break; + case NAMED_PROPERTY: + if (expr->is_compound()) { + // We need the receiver both on the stack and in the register. + VisitForStackValue(property->obj()); + __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); + } else { + VisitForStackValue(property->obj()); + } + break; + case KEYED_PROPERTY: + // We need the key and receiver on both the stack and in v0 and a1. + if (expr->is_compound()) { + VisitForStackValue(property->obj()); + VisitForStackValue(property->key()); + __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); + __ ld(LoadIC::NameRegister(), MemOperand(sp, 0)); + } else { + VisitForStackValue(property->obj()); + VisitForStackValue(property->key()); + } + break; + } + + // For compound assignments we need another deoptimization point after the + // variable/property load. + if (expr->is_compound()) { + { AccumulatorValueContext context(this); + switch (assign_type) { + case VARIABLE: + EmitVariableLoad(expr->target()->AsVariableProxy()); + PrepareForBailout(expr->target(), TOS_REG); + break; + case NAMED_PROPERTY: + EmitNamedPropertyLoad(property); + PrepareForBailoutForId(property->LoadId(), TOS_REG); + break; + case KEYED_PROPERTY: + EmitKeyedPropertyLoad(property); + PrepareForBailoutForId(property->LoadId(), TOS_REG); + break; + } + } + + Token::Value op = expr->binary_op(); + __ push(v0); // Left operand goes on the stack. + VisitForAccumulatorValue(expr->value()); + + OverwriteMode mode = expr->value()->ResultOverwriteAllowed() + ? OVERWRITE_RIGHT + : NO_OVERWRITE; + SetSourcePosition(expr->position() + 1); + AccumulatorValueContext context(this); + if (ShouldInlineSmiCase(op)) { + EmitInlineSmiBinaryOp(expr->binary_operation(), + op, + mode, + expr->target(), + expr->value()); + } else { + EmitBinaryOp(expr->binary_operation(), op, mode); + } + + // Deoptimization point in case the binary operation may have side effects. + PrepareForBailout(expr->binary_operation(), TOS_REG); + } else { + VisitForAccumulatorValue(expr->value()); + } + + // Record source position before possible IC call. + SetSourcePosition(expr->position()); + + // Store the value. + switch (assign_type) { + case VARIABLE: + EmitVariableAssignment(expr->target()->AsVariableProxy()->var(), + expr->op()); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + context()->Plug(v0); + break; + case NAMED_PROPERTY: + EmitNamedPropertyAssignment(expr); + break; + case KEYED_PROPERTY: + EmitKeyedPropertyAssignment(expr); + break; + } +} + + +void FullCodeGenerator::VisitYield(Yield* expr) { + Comment cmnt(masm_, "[ Yield"); + // Evaluate yielded value first; the initial iterator definition depends on + // this. It stays on the stack while we update the iterator. + VisitForStackValue(expr->expression()); + + switch (expr->yield_kind()) { + case Yield::SUSPEND: + // Pop value from top-of-stack slot; box result into result register. + EmitCreateIteratorResult(false); + __ push(result_register()); + // Fall through. + case Yield::INITIAL: { + Label suspend, continuation, post_runtime, resume; + + __ jmp(&suspend); + + __ bind(&continuation); + __ jmp(&resume); + + __ bind(&suspend); + VisitForAccumulatorValue(expr->generator_object()); + DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); + __ li(a1, Operand(Smi::FromInt(continuation.pos()))); + __ sd(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset)); + __ sd(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset)); + __ mov(a1, cp); + __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2, + kRAHasBeenSaved, kDontSaveFPRegs); + __ Daddu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset)); + __ Branch(&post_runtime, eq, sp, Operand(a1)); + __ push(v0); // generator object + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ bind(&post_runtime); + __ pop(result_register()); + EmitReturnSequence(); + + __ bind(&resume); + context()->Plug(result_register()); + break; + } + + case Yield::FINAL: { + VisitForAccumulatorValue(expr->generator_object()); + __ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed))); + __ sd(a1, FieldMemOperand(result_register(), + JSGeneratorObject::kContinuationOffset)); + // Pop value from top-of-stack slot, box result into result register. + EmitCreateIteratorResult(true); + EmitUnwindBeforeReturn(); + EmitReturnSequence(); + break; + } + + case Yield::DELEGATING: { + VisitForStackValue(expr->generator_object()); + + // Initial stack layout is as follows: + // [sp + 1 * kPointerSize] iter + // [sp + 0 * kPointerSize] g + + Label l_catch, l_try, l_suspend, l_continuation, l_resume; + Label l_next, l_call; + Register load_receiver = LoadIC::ReceiverRegister(); + Register load_name = LoadIC::NameRegister(); + // Initial send value is undefined. + __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); + __ Branch(&l_next); + + // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; } + __ bind(&l_catch); + __ mov(a0, v0); + handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); + __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw" + __ ld(a3, MemOperand(sp, 1 * kPointerSize)); // iter + __ Push(a2, a3, a0); // "throw", iter, except + __ jmp(&l_call); + + // try { received = %yield result } + // Shuffle the received result above a try handler and yield it without + // re-boxing. + __ bind(&l_try); + __ pop(a0); // result + __ PushTryHandler(StackHandler::CATCH, expr->index()); + const int handler_size = StackHandlerConstants::kSize; + __ push(a0); // result + __ jmp(&l_suspend); + __ bind(&l_continuation); + __ mov(a0, v0); + __ jmp(&l_resume); + __ bind(&l_suspend); + const int generator_object_depth = kPointerSize + handler_size; + __ ld(a0, MemOperand(sp, generator_object_depth)); + __ push(a0); // g + DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); + __ li(a1, Operand(Smi::FromInt(l_continuation.pos()))); + __ sd(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset)); + __ sd(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset)); + __ mov(a1, cp); + __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2, + kRAHasBeenSaved, kDontSaveFPRegs); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ pop(v0); // result + EmitReturnSequence(); + __ mov(a0, v0); + __ bind(&l_resume); // received in a0 + __ PopTryHandler(); + + // receiver = iter; f = 'next'; arg = received; + __ bind(&l_next); + __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next" + __ ld(a3, MemOperand(sp, 1 * kPointerSize)); // iter + __ Push(load_name, a3, a0); // "next", iter, received + + // result = receiver[f](arg); + __ bind(&l_call); + __ ld(load_receiver, MemOperand(sp, kPointerSize)); + __ ld(load_name, MemOperand(sp, 2 * kPointerSize)); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot()))); + } + Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); + CallIC(ic, TypeFeedbackId::None()); + __ mov(a0, v0); + __ mov(a1, a0); + __ sd(a1, MemOperand(sp, 2 * kPointerSize)); + CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD); + __ CallStub(&stub); + + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ Drop(1); // The function is still on the stack; drop it. + + // if (!result.done) goto l_try; + __ Move(load_receiver, v0); + + __ push(load_receiver); // save result + __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done" + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->DoneFeedbackSlot()))); + } + CallLoadIC(NOT_CONTEXTUAL); // v0=result.done + __ mov(a0, v0); + Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate()); + CallIC(bool_ic); + __ Branch(&l_try, eq, v0, Operand(zero_reg)); + + // result.value + __ pop(load_receiver); // result + __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value" + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->ValueFeedbackSlot()))); + } + CallLoadIC(NOT_CONTEXTUAL); // v0=result.value + context()->DropAndPlug(2, v0); // drop iter and g + break; + } + } +} + + +void FullCodeGenerator::EmitGeneratorResume(Expression *generator, + Expression *value, + JSGeneratorObject::ResumeMode resume_mode) { + // The value stays in a0, and is ultimately read by the resumed generator, as + // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it + // is read to throw the value when the resumed generator is already closed. + // a1 will hold the generator object until the activation has been resumed. + VisitForStackValue(generator); + VisitForAccumulatorValue(value); + __ pop(a1); + + // Check generator state. + Label wrong_state, closed_state, done; + __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); + STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0); + STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0); + __ Branch(&closed_state, eq, a3, Operand(zero_reg)); + __ Branch(&wrong_state, lt, a3, Operand(zero_reg)); + + // Load suspended function and context. + __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset)); + __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + + // Load receiver and store as the first argument. + __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); + __ push(a2); + + // Push holes for the rest of the arguments to the generator function. + __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); + // The argument count is stored as int32_t on 64-bit platforms. + // TODO(plind): Smi on 32-bit platforms. + __ lw(a3, + FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); + __ LoadRoot(a2, Heap::kTheHoleValueRootIndex); + Label push_argument_holes, push_frame; + __ bind(&push_argument_holes); + __ Dsubu(a3, a3, Operand(1)); + __ Branch(&push_frame, lt, a3, Operand(zero_reg)); + __ push(a2); + __ jmp(&push_argument_holes); + + // Enter a new JavaScript frame, and initialize its slots as they were when + // the generator was suspended. + Label resume_frame; + __ bind(&push_frame); + __ Call(&resume_frame); + __ jmp(&done); + __ bind(&resume_frame); + // ra = return address. + // fp = caller's frame pointer. + // cp = callee's context, + // a4 = callee's JS function. + __ Push(ra, fp, cp, a4); + // Adjust FP to point to saved FP. + __ Daddu(fp, sp, 2 * kPointerSize); + + // Load the operand stack size. + __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset)); + __ ld(a3, FieldMemOperand(a3, FixedArray::kLengthOffset)); + __ SmiUntag(a3); + + // If we are sending a value and there is no operand stack, we can jump back + // in directly. + if (resume_mode == JSGeneratorObject::NEXT) { + Label slow_resume; + __ Branch(&slow_resume, ne, a3, Operand(zero_reg)); + __ ld(a3, FieldMemOperand(a4, JSFunction::kCodeEntryOffset)); + __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); + __ SmiUntag(a2); + __ Daddu(a3, a3, Operand(a2)); + __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))); + __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); + __ Jump(a3); + __ bind(&slow_resume); + } + + // Otherwise, we push holes for the operand stack and call the runtime to fix + // up the stack and the handlers. + Label push_operand_holes, call_resume; + __ bind(&push_operand_holes); + __ Dsubu(a3, a3, Operand(1)); + __ Branch(&call_resume, lt, a3, Operand(zero_reg)); + __ push(a2); + __ Branch(&push_operand_holes); + __ bind(&call_resume); + DCHECK(!result_register().is(a1)); + __ Push(a1, result_register()); + __ Push(Smi::FromInt(resume_mode)); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); + // Not reached: the runtime call returns elsewhere. + __ stop("not-reached"); + + // Reach here when generator is closed. + __ bind(&closed_state); + if (resume_mode == JSGeneratorObject::NEXT) { + // Return completed iterator result when generator is closed. + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ push(a2); + // Pop value from top-of-stack slot; box result into result register. + EmitCreateIteratorResult(true); + } else { + // Throw the provided value. + __ push(a0); + __ CallRuntime(Runtime::kThrow, 1); + } + __ jmp(&done); + + // Throw error if we attempt to operate on a running generator. + __ bind(&wrong_state); + __ push(a1); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); + + __ bind(&done); + context()->Plug(result_register()); +} + + +void FullCodeGenerator::EmitCreateIteratorResult(bool done) { + Label gc_required; + Label allocated; + + Handle<Map> map(isolate()->native_context()->iterator_result_map()); + + __ Allocate(map->instance_size(), v0, a2, a3, &gc_required, TAG_OBJECT); + __ jmp(&allocated); + + __ bind(&gc_required); + __ Push(Smi::FromInt(map->instance_size())); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); + __ ld(context_register(), + MemOperand(fp, StandardFrameConstants::kContextOffset)); + + __ bind(&allocated); + __ li(a1, Operand(map)); + __ pop(a2); + __ li(a3, Operand(isolate()->factory()->ToBoolean(done))); + __ li(a4, Operand(isolate()->factory()->empty_fixed_array())); + DCHECK_EQ(map->instance_size(), 5 * kPointerSize); + __ sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); + __ sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset)); + __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset)); + __ sd(a2, + FieldMemOperand(v0, JSGeneratorObject::kResultValuePropertyOffset)); + __ sd(a3, + FieldMemOperand(v0, JSGeneratorObject::kResultDonePropertyOffset)); + + // Only the value field needs a write barrier, as the other values are in the + // root set. + __ RecordWriteField(v0, JSGeneratorObject::kResultValuePropertyOffset, + a2, a3, kRAHasBeenSaved, kDontSaveFPRegs); +} + + +void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { + SetSourcePosition(prop->position()); + Literal* key = prop->key()->AsLiteral(); + __ li(LoadIC::NameRegister(), Operand(key->value())); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(prop->PropertyFeedbackSlot()))); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + } +} + + +void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { + SetSourcePosition(prop->position()); + // Call keyed load IC. It has register arguments receiver and key. + Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(prop->PropertyFeedbackSlot()))); + CallIC(ic); + } else { + CallIC(ic, prop->PropertyFeedbackId()); + } +} + + +void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, + Token::Value op, + OverwriteMode mode, + Expression* left_expr, + Expression* right_expr) { + Label done, smi_case, stub_call; + + Register scratch1 = a2; + Register scratch2 = a3; + + // Get the arguments. + Register left = a1; + Register right = a0; + __ pop(left); + __ mov(a0, result_register()); + + // Perform combined smi check on both operands. + __ Or(scratch1, left, Operand(right)); + STATIC_ASSERT(kSmiTag == 0); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(scratch1, &smi_case); + + __ bind(&stub_call); + BinaryOpICStub stub(isolate(), op, mode); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); + patch_site.EmitPatchInfo(); + __ jmp(&done); + + __ bind(&smi_case); + // Smi case. This code works the same way as the smi-smi case in the type + // recording binary operation stub, see + switch (op) { + case Token::SAR: + __ GetLeastBitsFromSmi(scratch1, right, 5); + __ dsrav(right, left, scratch1); + __ And(v0, right, Operand(0xffffffff00000000L)); + break; + case Token::SHL: { + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ dsllv(scratch1, scratch1, scratch2); + __ SmiTag(v0, scratch1); + break; + } + case Token::SHR: { + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ dsrlv(scratch1, scratch1, scratch2); + __ And(scratch2, scratch1, 0x80000000); + __ Branch(&stub_call, ne, scratch2, Operand(zero_reg)); + __ SmiTag(v0, scratch1); + break; + } + case Token::ADD: + __ AdduAndCheckForOverflow(v0, left, right, scratch1); + __ BranchOnOverflow(&stub_call, scratch1); + break; + case Token::SUB: + __ SubuAndCheckForOverflow(v0, left, right, scratch1); + __ BranchOnOverflow(&stub_call, scratch1); + break; + case Token::MUL: { + __ Dmulh(v0, left, right); + __ dsra32(scratch2, v0, 0); + __ sra(scratch1, v0, 31); + __ Branch(USE_DELAY_SLOT, &stub_call, ne, scratch2, Operand(scratch1)); + __ SmiTag(v0); + __ Branch(USE_DELAY_SLOT, &done, ne, v0, Operand(zero_reg)); + __ Daddu(scratch2, right, left); + __ Branch(&stub_call, lt, scratch2, Operand(zero_reg)); + DCHECK(Smi::FromInt(0) == 0); + __ mov(v0, zero_reg); + break; + } + case Token::BIT_OR: + __ Or(v0, left, Operand(right)); + break; + case Token::BIT_AND: + __ And(v0, left, Operand(right)); + break; + case Token::BIT_XOR: + __ Xor(v0, left, Operand(right)); + break; + default: + UNREACHABLE(); + } + + __ bind(&done); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, + Token::Value op, + OverwriteMode mode) { + __ mov(a0, result_register()); + __ pop(a1); + BinaryOpICStub stub(isolate(), op, mode); + JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); + patch_site.EmitPatchInfo(); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitAssignment(Expression* expr) { + DCHECK(expr->IsValidReferenceExpression()); + + // Left-hand side can only be a property, a global or a (parameter or local) + // slot. + enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; + LhsKind assign_type = VARIABLE; + Property* prop = expr->AsProperty(); + if (prop != NULL) { + assign_type = (prop->key()->IsPropertyName()) + ? NAMED_PROPERTY + : KEYED_PROPERTY; + } + + switch (assign_type) { + case VARIABLE: { + Variable* var = expr->AsVariableProxy()->var(); + EffectContext context(this); + EmitVariableAssignment(var, Token::ASSIGN); + break; + } + case NAMED_PROPERTY: { + __ push(result_register()); // Preserve value. + VisitForAccumulatorValue(prop->obj()); + __ mov(StoreIC::ReceiverRegister(), result_register()); + __ pop(StoreIC::ValueRegister()); // Restore value. + __ li(StoreIC::NameRegister(), + Operand(prop->key()->AsLiteral()->value())); + CallStoreIC(); + break; + } + case KEYED_PROPERTY: { + __ push(result_register()); // Preserve value. + VisitForStackValue(prop->obj()); + VisitForAccumulatorValue(prop->key()); + __ Move(KeyedStoreIC::NameRegister(), result_register()); + __ Pop(KeyedStoreIC::ValueRegister(), KeyedStoreIC::ReceiverRegister()); + Handle<Code> ic = strict_mode() == SLOPPY + ? isolate()->builtins()->KeyedStoreIC_Initialize() + : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); + CallIC(ic); + break; + } + } + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot( + Variable* var, MemOperand location) { + __ sd(result_register(), location); + if (var->IsContextSlot()) { + // RecordWrite may destroy all its register arguments. + __ Move(a3, result_register()); + int offset = Context::SlotOffset(var->index()); + __ RecordWriteContextSlot( + a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs); + } +} + + +void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { + if (var->IsUnallocated()) { + // Global var, const, or let. + __ mov(StoreIC::ValueRegister(), result_register()); + __ li(StoreIC::NameRegister(), Operand(var->name())); + __ ld(StoreIC::ReceiverRegister(), GlobalObjectOperand()); + CallStoreIC(); + } else if (op == Token::INIT_CONST_LEGACY) { + // Const initializers need a write barrier. + DCHECK(!var->IsParameter()); // No const parameters. + if (var->IsLookupSlot()) { + __ li(a0, Operand(var->name())); + __ Push(v0, cp, a0); // Context and name. + __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3); + } else { + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + Label skip; + MemOperand location = VarOperand(var, a1); + __ ld(a2, location); + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + __ Branch(&skip, ne, a2, Operand(at)); + EmitStoreToStackLocalOrContextSlot(var, location); + __ bind(&skip); + } + + } else if (var->mode() == LET && op != Token::INIT_LET) { + // Non-initializing assignment to let variable needs a write barrier. + DCHECK(!var->IsLookupSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + Label assign; + MemOperand location = VarOperand(var, a1); + __ ld(a3, location); + __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); + __ Branch(&assign, ne, a3, Operand(a4)); + __ li(a3, Operand(var->name())); + __ push(a3); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + // Perform the assignment. + __ bind(&assign); + EmitStoreToStackLocalOrContextSlot(var, location); + + } else if (!var->is_const_mode() || op == Token::INIT_CONST) { + if (var->IsLookupSlot()) { + // Assignment to var. + __ li(a4, Operand(var->name())); + __ li(a3, Operand(Smi::FromInt(strict_mode()))); + // jssp[0] : mode. + // jssp[8] : name. + // jssp[16] : context. + // jssp[24] : value. + __ Push(v0, cp, a4, a3); + __ CallRuntime(Runtime::kStoreLookupSlot, 4); + } else { + // Assignment to var or initializing assignment to let/const in harmony + // mode. + DCHECK((var->IsStackAllocated() || var->IsContextSlot())); + MemOperand location = VarOperand(var, a1); + if (generate_debug_code_ && op == Token::INIT_LET) { + // Check for an uninitialized let binding. + __ ld(a2, location); + __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); + __ Check(eq, kLetBindingReInitialization, a2, Operand(a4)); + } + EmitStoreToStackLocalOrContextSlot(var, location); + } + } + // Non-initializing assignments to consts are ignored. +} + + +void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { + // Assignment to a property, using a named store IC. + Property* prop = expr->target()->AsProperty(); + DCHECK(prop != NULL); + DCHECK(prop->key()->IsLiteral()); + + // Record source code position before IC call. + SetSourcePosition(expr->position()); + __ mov(StoreIC::ValueRegister(), result_register()); + __ li(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value())); + __ pop(StoreIC::ReceiverRegister()); + CallStoreIC(expr->AssignmentFeedbackId()); + + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { + // Assignment to a property, using a keyed store IC. + + // Record source code position before IC call. + SetSourcePosition(expr->position()); + // Call keyed store IC. + // The arguments are: + // - a0 is the value, + // - a1 is the key, + // - a2 is the receiver. + __ mov(KeyedStoreIC::ValueRegister(), result_register()); + __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister()); + DCHECK(KeyedStoreIC::ValueRegister().is(a0)); + + Handle<Code> ic = strict_mode() == SLOPPY + ? isolate()->builtins()->KeyedStoreIC_Initialize() + : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); + CallIC(ic, expr->AssignmentFeedbackId()); + + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + context()->Plug(v0); +} + + +void FullCodeGenerator::VisitProperty(Property* expr) { + Comment cmnt(masm_, "[ Property"); + Expression* key = expr->key(); + + if (key->IsPropertyName()) { + VisitForAccumulatorValue(expr->obj()); + __ Move(LoadIC::ReceiverRegister(), v0); + EmitNamedPropertyLoad(expr); + PrepareForBailoutForId(expr->LoadId(), TOS_REG); + context()->Plug(v0); + } else { + VisitForStackValue(expr->obj()); + VisitForAccumulatorValue(expr->key()); + __ Move(LoadIC::NameRegister(), v0); + __ pop(LoadIC::ReceiverRegister()); + EmitKeyedPropertyLoad(expr); + context()->Plug(v0); + } +} + + +void FullCodeGenerator::CallIC(Handle<Code> code, + TypeFeedbackId id) { + ic_total_count_++; + __ Call(code, RelocInfo::CODE_TARGET, id); +} + + +// Code common for calls using the IC. +void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { + Expression* callee = expr->expression(); + + CallIC::CallType call_type = callee->IsVariableProxy() + ? CallIC::FUNCTION + : CallIC::METHOD; + + // Get the target function. + if (call_type == CallIC::FUNCTION) { + { StackValueContext context(this); + EmitVariableLoad(callee->AsVariableProxy()); + PrepareForBailout(callee, NO_REGISTERS); + } + // Push undefined as receiver. This is patched in the method prologue if it + // is a sloppy mode method. + __ Push(isolate()->factory()->undefined_value()); + } else { + // Load the function from the receiver. + DCHECK(callee->IsProperty()); + __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); + EmitNamedPropertyLoad(callee->AsProperty()); + PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); + // Push the target function under the receiver. + __ ld(at, MemOperand(sp, 0)); + __ push(at); + __ sd(v0, MemOperand(sp, kPointerSize)); + } + + EmitCall(expr, call_type); +} + + +// Code common for calls using the IC. +void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, + Expression* key) { + // Load the key. + VisitForAccumulatorValue(key); + + Expression* callee = expr->expression(); + + // Load the function from the receiver. + DCHECK(callee->IsProperty()); + __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); + __ Move(LoadIC::NameRegister(), v0); + EmitKeyedPropertyLoad(callee->AsProperty()); + PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); + + // Push the target function under the receiver. + __ ld(at, MemOperand(sp, 0)); + __ push(at); + __ sd(v0, MemOperand(sp, kPointerSize)); + + EmitCall(expr, CallIC::METHOD); +} + + +void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { + // Load the arguments. + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + { PreservePositionScope scope(masm()->positions_recorder()); + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + } + + // Record source position of the IC call. + SetSourcePosition(expr->position()); + Handle<Code> ic = CallIC::initialize_stub( + isolate(), arg_count, call_type); + __ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot()))); + __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); + // Don't assign a type feedback id to the IC, since type feedback is provided + // by the vector above. + CallIC(ic); + RecordJSReturnSite(expr); + // Restore context register. + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + context()->DropAndPlug(1, v0); +} + + +void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) { + // a6: copy of the first argument or undefined if it doesn't exist. + if (arg_count > 0) { + __ ld(a6, MemOperand(sp, arg_count * kPointerSize)); + } else { + __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); + } + + // a5: the receiver of the enclosing function. + int receiver_offset = 2 + info_->scope()->num_parameters(); + __ ld(a5, MemOperand(fp, receiver_offset * kPointerSize)); + + // a4: the strict mode. + __ li(a4, Operand(Smi::FromInt(strict_mode()))); + + // a1: the start position of the scope the calls resides in. + __ li(a1, Operand(Smi::FromInt(scope()->start_position()))); + + // Do the runtime call. + __ Push(a6, a5, a4, a1); + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); +} + + +void FullCodeGenerator::VisitCall(Call* expr) { +#ifdef DEBUG + // We want to verify that RecordJSReturnSite gets called on all paths + // through this function. Avoid early returns. + expr->return_is_recorded_ = false; +#endif + + Comment cmnt(masm_, "[ Call"); + Expression* callee = expr->expression(); + Call::CallType call_type = expr->GetCallType(isolate()); + + if (call_type == Call::POSSIBLY_EVAL_CALL) { + // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval + // to resolve the function we need to call and the receiver of the + // call. Then we call the resolved function using the given + // arguments. + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + + { PreservePositionScope pos_scope(masm()->positions_recorder()); + VisitForStackValue(callee); + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ push(a2); // Reserved receiver slot. + + // Push the arguments. + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + + // Push a copy of the function (found below the arguments) and + // resolve eval. + __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); + __ push(a1); + EmitResolvePossiblyDirectEval(arg_count); + + // The runtime call returns a pair of values in v0 (function) and + // v1 (receiver). Touch up the stack with the right values. + __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize)); + __ sd(v1, MemOperand(sp, arg_count * kPointerSize)); + } + // Record source position for debugger. + SetSourcePosition(expr->position()); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); + __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); + __ CallStub(&stub); + RecordJSReturnSite(expr); + // Restore context register. + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + context()->DropAndPlug(1, v0); + } else if (call_type == Call::GLOBAL_CALL) { + EmitCallWithLoadIC(expr); + } else if (call_type == Call::LOOKUP_SLOT_CALL) { + // Call to a lookup slot (dynamically introduced variable). + VariableProxy* proxy = callee->AsVariableProxy(); + Label slow, done; + + { PreservePositionScope scope(masm()->positions_recorder()); + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); + } + + __ bind(&slow); + // Call the runtime to find the function to call (returned in v0) + // and the object holding it (returned in v1). + DCHECK(!context_register().is(a2)); + __ li(a2, Operand(proxy->name())); + __ Push(context_register(), a2); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); + __ Push(v0, v1); // Function, receiver. + + // If fast case code has been generated, emit code to push the + // function and receiver and have the slow path jump around this + // code. + if (done.is_linked()) { + Label call; + __ Branch(&call); + __ bind(&done); + // Push function. + __ push(v0); + // The receiver is implicitly the global receiver. Indicate this + // by passing the hole to the call function stub. + __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); + __ push(a1); + __ bind(&call); + } + + // The receiver is either the global receiver or an object found + // by LoadContextSlot. + EmitCall(expr); + } else if (call_type == Call::PROPERTY_CALL) { + Property* property = callee->AsProperty(); + { PreservePositionScope scope(masm()->positions_recorder()); + VisitForStackValue(property->obj()); + } + if (property->key()->IsPropertyName()) { + EmitCallWithLoadIC(expr); + } else { + EmitKeyedCallWithLoadIC(expr, property->key()); + } + } else { + DCHECK(call_type == Call::OTHER_CALL); + // Call to an arbitrary expression not handled specially above. + { PreservePositionScope scope(masm()->positions_recorder()); + VisitForStackValue(callee); + } + __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); + __ push(a1); + // Emit function call. + EmitCall(expr); + } + +#ifdef DEBUG + // RecordJSReturnSite should have been called. + DCHECK(expr->return_is_recorded_); +#endif +} + + +void FullCodeGenerator::VisitCallNew(CallNew* expr) { + Comment cmnt(masm_, "[ CallNew"); + // According to ECMA-262, section 11.2.2, page 44, the function + // expression in new calls must be evaluated before the + // arguments. + + // Push constructor on the stack. If it's not a function it's used as + // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is + // ignored. + VisitForStackValue(expr->expression()); + + // Push the arguments ("left-to-right") on the stack. + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + // Call the construct call builtin that handles allocation and + // constructor invocation. + SetSourcePosition(expr->position()); + + // Load function and argument count into a1 and a0. + __ li(a0, Operand(arg_count)); + __ ld(a1, MemOperand(sp, arg_count * kPointerSize)); + + // Record call targets in unoptimized code. + if (FLAG_pretenuring_call_new) { + EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot()); + DCHECK(expr->AllocationSiteFeedbackSlot() == + expr->CallNewFeedbackSlot() + 1); + } + + __ li(a2, FeedbackVector()); + __ li(a3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot()))); + + CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET); + __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); + PrepareForBailoutForId(expr->ReturnId(), TOS_REG); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + __ SmiTst(v0, a4); + Split(eq, a4, Operand(zero_reg), if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + __ NonNegativeSmiTst(v0, at); + Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(v0, if_false); + __ LoadRoot(at, Heap::kNullValueRootIndex); + __ Branch(if_true, eq, v0, Operand(at)); + __ ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); + // Undetectable objects behave like undefined when tested with typeof. + __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset)); + __ And(at, a1, Operand(1 << Map::kIsUndetectable)); + __ Branch(if_false, ne, at, Operand(zero_reg)); + __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset)); + __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE), + if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(v0, if_false); + __ GetObjectType(v0, a1, a1); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE), + if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(v0, if_false); + __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); + __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset)); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + __ And(at, a1, Operand(1 << Map::kIsUndetectable)); + Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( + CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false, skip_lookup; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ AssertNotSmi(v0); + + __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); + __ lbu(a4, FieldMemOperand(a1, Map::kBitField2Offset)); + __ And(a4, a4, 1 << Map::kStringWrapperSafeForDefaultValueOf); + __ Branch(&skip_lookup, ne, a4, Operand(zero_reg)); + + // Check for fast case object. Generate false result for slow case object. + __ ld(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset)); + __ ld(a2, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ LoadRoot(a4, Heap::kHashTableMapRootIndex); + __ Branch(if_false, eq, a2, Operand(a4)); + + // Look for valueOf name in the descriptor array, and indicate false if + // found. Since we omit an enumeration index check, if it is added via a + // transition that shares its descriptor array, this is a false positive. + Label entry, loop, done; + + // Skip loop if no descriptors are valid. + __ NumberOfOwnDescriptors(a3, a1); + __ Branch(&done, eq, a3, Operand(zero_reg)); + + __ LoadInstanceDescriptors(a1, a4); + // a4: descriptor array. + // a3: valid entries in the descriptor array. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); +// Does not need? +// STATIC_ASSERT(kPointerSize == 4); + __ li(at, Operand(DescriptorArray::kDescriptorSize)); + __ Dmul(a3, a3, at); + // Calculate location of the first key name. + __ Daddu(a4, a4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag)); + // Calculate the end of the descriptor array. + __ mov(a2, a4); + __ dsll(a5, a3, kPointerSizeLog2); + __ Daddu(a2, a2, a5); + + // Loop through all the keys in the descriptor array. If one of these is the + // string "valueOf" the result is false. + // The use of a6 to store the valueOf string assumes that it is not otherwise + // used in the loop below. + __ li(a6, Operand(isolate()->factory()->value_of_string())); + __ jmp(&entry); + __ bind(&loop); + __ ld(a3, MemOperand(a4, 0)); + __ Branch(if_false, eq, a3, Operand(a6)); + __ Daddu(a4, a4, Operand(DescriptorArray::kDescriptorSize * kPointerSize)); + __ bind(&entry); + __ Branch(&loop, ne, a4, Operand(a2)); + + __ bind(&done); + + // Set the bit in the map to indicate that there is no local valueOf field. + __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset)); + __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset)); + + __ bind(&skip_lookup); + + // If a valueOf property is not found on the object check that its + // prototype is the un-modified String prototype. If not result is false. + __ ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset)); + __ JumpIfSmi(a2, if_false); + __ ld(a2, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ ld(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); + __ ld(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset)); + __ ld(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(eq, a2, Operand(a3), if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(v0, if_false); + __ GetObjectType(v0, a1, a2); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE)); + __ Branch(if_false); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK); + __ lwu(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset)); + __ lwu(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); + __ li(a4, 0x80000000); + Label not_nan; + __ Branch(¬_nan, ne, a2, Operand(a4)); + __ mov(a4, zero_reg); + __ mov(a2, a1); + __ bind(¬_nan); + + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(eq, a2, Operand(a4), if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsArray(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(v0, if_false); + __ GetObjectType(v0, a1, a1); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(eq, a1, Operand(JS_ARRAY_TYPE), + if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(v0, if_false); + __ GetObjectType(v0, a1, a1); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + // Get the frame pointer for the calling frame. + __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ ld(a1, MemOperand(a2, StandardFrameConstants::kContextOffset)); + __ Branch(&check_frame_marker, ne, + a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ ld(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset)); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)), + if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + + // Load the two objects into registers and perform the comparison. + VisitForStackValue(args->at(0)); + VisitForAccumulatorValue(args->at(1)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ pop(a1); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(eq, v0, Operand(a1), if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitArguments(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + // ArgumentsAccessStub expects the key in a1 and the formal + // parameter count in a0. + VisitForAccumulatorValue(args->at(0)); + __ mov(a1, v0); + __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); + ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT); + __ CallStub(&stub); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + Label exit; + // Get the number of formal parameters. + __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); + + // Check if the calling frame is an arguments adaptor frame. + __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); + __ Branch(&exit, ne, a3, + Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + + // Arguments adaptor case: Read the arguments length from the + // adaptor frame. + __ ld(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); + + __ bind(&exit); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + Label done, null, function, non_function_constructor; + + VisitForAccumulatorValue(args->at(0)); + + // If the object is a smi, we return null. + __ JumpIfSmi(v0, &null); + + // Check that the object is a JS object but take special care of JS + // functions to make sure they have 'Function' as their class. + // Assume that there are only two callable types, and one of them is at + // either end of the type range for JS object types. Saves extra comparisons. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ GetObjectType(v0, v0, a1); // Map is now in v0. + __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); + + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); + + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE)); + // Assume that there is no larger type. + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); + + // Check if the constructor in the map is a JS function. + __ ld(v0, FieldMemOperand(v0, Map::kConstructorOffset)); + __ GetObjectType(v0, a1, a1); + __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE)); + + // v0 now contains the constructor function. Grab the + // instance class name from there. + __ ld(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset)); + __ ld(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset)); + __ Branch(&done); + + // Functions have class 'Function'. + __ bind(&function); + __ LoadRoot(v0, Heap::kfunction_class_stringRootIndex); + __ jmp(&done); + + // Objects with a non-function constructor have class 'Object'. + __ bind(&non_function_constructor); + __ LoadRoot(v0, Heap::kObject_stringRootIndex); + __ jmp(&done); + + // Non-JS objects have class null. + __ bind(&null); + __ LoadRoot(v0, Heap::kNullValueRootIndex); + + // All done. + __ bind(&done); + + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitSubString(CallRuntime* expr) { + // Load the arguments on the stack and call the stub. + SubStringStub stub(isolate()); + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 3); + VisitForStackValue(args->at(0)); + VisitForStackValue(args->at(1)); + VisitForStackValue(args->at(2)); + __ CallStub(&stub); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { + // Load the arguments on the stack and call the stub. + RegExpExecStub stub(isolate()); + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 4); + VisitForStackValue(args->at(0)); + VisitForStackValue(args->at(1)); + VisitForStackValue(args->at(2)); + VisitForStackValue(args->at(3)); + __ CallStub(&stub); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); // Load the object. + + Label done; + // If the object is a smi return the object. + __ JumpIfSmi(v0, &done); + // If the object is not a value type, return the object. + __ GetObjectType(v0, a1, a1); + __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE)); + + __ ld(v0, FieldMemOperand(v0, JSValue::kValueOffset)); + + __ bind(&done); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitDateField(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); + Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); + + VisitForAccumulatorValue(args->at(0)); // Load the object. + + Label runtime, done, not_date_object; + Register object = v0; + Register result = v0; + Register scratch0 = t1; + Register scratch1 = a1; + + __ JumpIfSmi(object, ¬_date_object); + __ GetObjectType(object, scratch1, scratch1); + __ Branch(¬_date_object, ne, scratch1, Operand(JS_DATE_TYPE)); + + if (index->value() == 0) { + __ ld(result, FieldMemOperand(object, JSDate::kValueOffset)); + __ jmp(&done); + } else { + if (index->value() < JSDate::kFirstUncachedField) { + ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); + __ li(scratch1, Operand(stamp)); + __ ld(scratch1, MemOperand(scratch1)); + __ ld(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset)); + __ Branch(&runtime, ne, scratch1, Operand(scratch0)); + __ ld(result, FieldMemOperand(object, JSDate::kValueOffset + + kPointerSize * index->value())); + __ jmp(&done); + } + __ bind(&runtime); + __ PrepareCallCFunction(2, scratch1); + __ li(a1, Operand(index)); + __ Move(a0, object); + __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); + __ jmp(&done); + } + + __ bind(¬_date_object); + __ CallRuntime(Runtime::kThrowNotDateError, 0); + __ bind(&done); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(3, args->length()); + + Register string = v0; + Register index = a1; + Register value = a2; + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + VisitForAccumulatorValue(args->at(0)); // string + __ Pop(index, value); + + if (FLAG_debug_code) { + __ SmiTst(value, at); + __ Check(eq, kNonSmiValue, at, Operand(zero_reg)); + __ SmiTst(index, at); + __ Check(eq, kNonSmiIndex, at, Operand(zero_reg)); + __ SmiUntag(index, index); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + Register scratch = t1; + __ EmitSeqStringSetCharCheck( + string, index, value, scratch, one_byte_seq_type); + __ SmiTag(index, index); + } + + __ SmiUntag(value, value); + __ Daddu(at, + string, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ SmiUntag(index); + __ Daddu(at, at, index); + __ sb(value, MemOperand(at)); + context()->Plug(string); +} + + +void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(3, args->length()); + + Register string = v0; + Register index = a1; + Register value = a2; + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + VisitForAccumulatorValue(args->at(0)); // string + __ Pop(index, value); + + if (FLAG_debug_code) { + __ SmiTst(value, at); + __ Check(eq, kNonSmiValue, at, Operand(zero_reg)); + __ SmiTst(index, at); + __ Check(eq, kNonSmiIndex, at, Operand(zero_reg)); + __ SmiUntag(index, index); + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + Register scratch = t1; + __ EmitSeqStringSetCharCheck( + string, index, value, scratch, two_byte_seq_type); + __ SmiTag(index, index); + } + + __ SmiUntag(value, value); + __ Daddu(at, + string, + Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ dsra(index, index, 32 - 1); + __ Daddu(at, at, index); + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + __ sh(value, MemOperand(at)); + context()->Plug(string); +} + + +void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { + // Load the arguments on the stack and call the runtime function. + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + VisitForStackValue(args->at(0)); + VisitForStackValue(args->at(1)); + MathPowStub stub(isolate(), MathPowStub::ON_STACK); + __ CallStub(&stub); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + + VisitForStackValue(args->at(0)); // Load the object. + VisitForAccumulatorValue(args->at(1)); // Load the value. + __ pop(a1); // v0 = value. a1 = object. + + Label done; + // If the object is a smi, return the value. + __ JumpIfSmi(a1, &done); + + // If the object is not a value type, return the value. + __ GetObjectType(a1, a2, a2); + __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE)); + + // Store the value. + __ sd(v0, FieldMemOperand(a1, JSValue::kValueOffset)); + // Update the write barrier. Save the value as it will be + // overwritten by the write barrier code and is needed afterward. + __ mov(a2, v0); + __ RecordWriteField( + a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs); + + __ bind(&done); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(args->length(), 1); + + // Load the argument into a0 and call the stub. + VisitForAccumulatorValue(args->at(0)); + __ mov(a0, result_register()); + + NumberToStringStub stub(isolate()); + __ CallStub(&stub); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label done; + StringCharFromCodeGenerator generator(v0, a1); + generator.GenerateFast(masm_); + __ jmp(&done); + + NopRuntimeCallHelper call_helper; + generator.GenerateSlow(masm_, call_helper); + + __ bind(&done); + context()->Plug(a1); +} + + +void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + + VisitForStackValue(args->at(0)); + VisitForAccumulatorValue(args->at(1)); + __ mov(a0, result_register()); + + Register object = a1; + Register index = a0; + Register result = v0; + + __ pop(object); + + Label need_conversion; + Label index_out_of_range; + Label done; + StringCharCodeAtGenerator generator(object, + index, + result, + &need_conversion, + &need_conversion, + &index_out_of_range, + STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm_); + __ jmp(&done); + + __ bind(&index_out_of_range); + // When the index is out of range, the spec requires us to return + // NaN. + __ LoadRoot(result, Heap::kNanValueRootIndex); + __ jmp(&done); + + __ bind(&need_conversion); + // Load the undefined value into the result register, which will + // trigger conversion. + __ LoadRoot(result, Heap::kUndefinedValueRootIndex); + __ jmp(&done); + + NopRuntimeCallHelper call_helper; + generator.GenerateSlow(masm_, call_helper); + + __ bind(&done); + context()->Plug(result); +} + + +void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + + VisitForStackValue(args->at(0)); + VisitForAccumulatorValue(args->at(1)); + __ mov(a0, result_register()); + + Register object = a1; + Register index = a0; + Register scratch = a3; + Register result = v0; + + __ pop(object); + + Label need_conversion; + Label index_out_of_range; + Label done; + StringCharAtGenerator generator(object, + index, + scratch, + result, + &need_conversion, + &need_conversion, + &index_out_of_range, + STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm_); + __ jmp(&done); + + __ bind(&index_out_of_range); + // When the index is out of range, the spec requires us to return + // the empty string. + __ LoadRoot(result, Heap::kempty_stringRootIndex); + __ jmp(&done); + + __ bind(&need_conversion); + // Move smi zero into the result register, which will trigger + // conversion. + __ li(result, Operand(Smi::FromInt(0))); + __ jmp(&done); + + NopRuntimeCallHelper call_helper; + generator.GenerateSlow(masm_, call_helper); + + __ bind(&done); + context()->Plug(result); +} + + +void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(2, args->length()); + VisitForStackValue(args->at(0)); + VisitForAccumulatorValue(args->at(1)); + + __ pop(a1); + __ mov(a0, result_register()); // StringAddStub requires args in a0, a1. + StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED); + __ CallStub(&stub); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(2, args->length()); + + VisitForStackValue(args->at(0)); + VisitForStackValue(args->at(1)); + + StringCompareStub stub(isolate()); + __ CallStub(&stub); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() >= 2); + + int arg_count = args->length() - 2; // 2 ~ receiver and function. + for (int i = 0; i < arg_count + 1; i++) { + VisitForStackValue(args->at(i)); + } + VisitForAccumulatorValue(args->last()); // Function. + + Label runtime, done; + // Check for non-function argument (including proxy). + __ JumpIfSmi(v0, &runtime); + __ GetObjectType(v0, a1, a1); + __ Branch(&runtime, ne, a1, Operand(JS_FUNCTION_TYPE)); + + // InvokeFunction requires the function in a1. Move it in there. + __ mov(a1, result_register()); + ParameterCount count(arg_count); + __ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper()); + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ jmp(&done); + + __ bind(&runtime); + __ push(v0); + __ CallRuntime(Runtime::kCall, args->length()); + __ bind(&done); + + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { + RegExpConstructResultStub stub(isolate()); + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 3); + VisitForStackValue(args->at(0)); + VisitForStackValue(args->at(1)); + VisitForAccumulatorValue(args->at(2)); + __ mov(a0, result_register()); + __ pop(a1); + __ pop(a2); + __ CallStub(&stub); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(2, args->length()); + + DCHECK_NE(NULL, args->at(0)->AsLiteral()); + int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); + + Handle<FixedArray> jsfunction_result_caches( + isolate()->native_context()->jsfunction_result_caches()); + if (jsfunction_result_caches->length() <= cache_id) { + __ Abort(kAttemptToUseUndefinedCache); + __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); + context()->Plug(v0); + return; + } + + VisitForAccumulatorValue(args->at(1)); + + Register key = v0; + Register cache = a1; + __ ld(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); + __ ld(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset)); + __ ld(cache, + ContextOperand( + cache, Context::JSFUNCTION_RESULT_CACHES_INDEX)); + __ ld(cache, + FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id))); + + + Label done, not_found; + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + __ ld(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset)); + // a2 now holds finger offset as a smi. + __ Daddu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + // a3 now points to the start of fixed array elements. + __ SmiScale(at, a2, kPointerSizeLog2); + __ daddu(a3, a3, at); + // a3 now points to key of indexed element of cache. + __ ld(a2, MemOperand(a3)); + __ Branch(¬_found, ne, key, Operand(a2)); + + __ ld(v0, MemOperand(a3, kPointerSize)); + __ Branch(&done); + + __ bind(¬_found); + // Call runtime to perform the lookup. + __ Push(cache, key); + __ CallRuntime(Runtime::kGetFromCache, 2); + + __ bind(&done); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ lwu(a0, FieldMemOperand(v0, String::kHashFieldOffset)); + __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask)); + + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + VisitForAccumulatorValue(args->at(0)); + + __ AssertString(v0); + + __ lwu(v0, FieldMemOperand(v0, String::kHashFieldOffset)); + __ IndexFromHash(v0, v0); + + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { + Label bailout, done, one_char_separator, long_separator, + non_trivial_array, not_size_one_array, loop, + empty_separator_loop, one_char_separator_loop, + one_char_separator_loop_entry, long_separator_loop; + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + VisitForStackValue(args->at(1)); + VisitForAccumulatorValue(args->at(0)); + + // All aliases of the same register have disjoint lifetimes. + Register array = v0; + Register elements = no_reg; // Will be v0. + Register result = no_reg; // Will be v0. + Register separator = a1; + Register array_length = a2; + Register result_pos = no_reg; // Will be a2. + Register string_length = a3; + Register string = a4; + Register element = a5; + Register elements_end = a6; + Register scratch1 = a7; + Register scratch2 = t1; + Register scratch3 = t0; + + // Separator operand is on the stack. + __ pop(separator); + + // Check that the array is a JSArray. + __ JumpIfSmi(array, &bailout); + __ GetObjectType(array, scratch1, scratch2); + __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE)); + + // Check that the array has fast elements. + __ CheckFastElements(scratch1, scratch2, &bailout); + + // If the array has length zero, return the empty string. + __ ld(array_length, FieldMemOperand(array, JSArray::kLengthOffset)); + __ SmiUntag(array_length); + __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg)); + __ LoadRoot(v0, Heap::kempty_stringRootIndex); + __ Branch(&done); + + __ bind(&non_trivial_array); + + // Get the FixedArray containing array's elements. + elements = array; + __ ld(elements, FieldMemOperand(array, JSArray::kElementsOffset)); + array = no_reg; // End of array's live range. + + // Check that all array elements are sequential ASCII strings, and + // accumulate the sum of their lengths, as a smi-encoded value. + __ mov(string_length, zero_reg); + __ Daddu(element, + elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ dsll(elements_end, array_length, kPointerSizeLog2); + __ Daddu(elements_end, element, elements_end); + // Loop condition: while (element < elements_end). + // Live values in registers: + // elements: Fixed array of strings. + // array_length: Length of the fixed array of strings (not smi) + // separator: Separator string + // string_length: Accumulated sum of string lengths (smi). + // element: Current array element. + // elements_end: Array end. + if (generate_debug_code_) { + __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin, + array_length, Operand(zero_reg)); + } + __ bind(&loop); + __ ld(string, MemOperand(element)); + __ Daddu(element, element, kPointerSize); + __ JumpIfSmi(string, &bailout); + __ ld(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); + __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); + __ ld(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); + __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3); + __ BranchOnOverflow(&bailout, scratch3); + __ Branch(&loop, lt, element, Operand(elements_end)); + + // If array_length is 1, return elements[0], a string. + __ Branch(¬_size_one_array, ne, array_length, Operand(1)); + __ ld(v0, FieldMemOperand(elements, FixedArray::kHeaderSize)); + __ Branch(&done); + + __ bind(¬_size_one_array); + + // Live values in registers: + // separator: Separator string + // array_length: Length of the array. + // string_length: Sum of string lengths (smi). + // elements: FixedArray of strings. + + // Check that the separator is a flat ASCII string. + __ JumpIfSmi(separator, &bailout); + __ ld(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset)); + __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); + + // Add (separator length times array_length) - separator length to the + // string_length to get the length of the result string. array_length is not + // smi but the other values are, so the result is a smi. + __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); + __ Dsubu(string_length, string_length, Operand(scratch1)); + __ SmiUntag(scratch1); + __ Dmul(scratch2, array_length, scratch1); + // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are + // zero. + __ dsra32(scratch1, scratch2, 0); + __ Branch(&bailout, ne, scratch2, Operand(zero_reg)); + __ SmiUntag(string_length); + __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3); + __ BranchOnOverflow(&bailout, scratch3); + + // Get first element in the array to free up the elements register to be used + // for the result. + __ Daddu(element, + elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + result = elements; // End of live range for elements. + elements = no_reg; + // Live values in registers: + // element: First array element + // separator: Separator string + // string_length: Length of result string (not smi) + // array_length: Length of the array. + __ AllocateAsciiString(result, + string_length, + scratch1, + scratch2, + elements_end, + &bailout); + // Prepare for looping. Set up elements_end to end of the array. Set + // result_pos to the position of the result where to write the first + // character. + __ dsll(elements_end, array_length, kPointerSizeLog2); + __ Daddu(elements_end, element, elements_end); + result_pos = array_length; // End of live range for array_length. + array_length = no_reg; + __ Daddu(result_pos, + result, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + + // Check the length of the separator. + __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); + __ li(at, Operand(Smi::FromInt(1))); + __ Branch(&one_char_separator, eq, scratch1, Operand(at)); + __ Branch(&long_separator, gt, scratch1, Operand(at)); + + // Empty separator case. + __ bind(&empty_separator_loop); + // Live values in registers: + // result_pos: the position to which we are currently copying characters. + // element: Current array element. + // elements_end: Array end. + + // Copy next array element to the result. + __ ld(string, MemOperand(element)); + __ Daddu(element, element, kPointerSize); + __ ld(string_length, FieldMemOperand(string, String::kLengthOffset)); + __ SmiUntag(string_length); + __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); + __ CopyBytes(string, result_pos, string_length, scratch1); + // End while (element < elements_end). + __ Branch(&empty_separator_loop, lt, element, Operand(elements_end)); + DCHECK(result.is(v0)); + __ Branch(&done); + + // One-character separator case. + __ bind(&one_char_separator); + // Replace separator with its ASCII character value. + __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize)); + // Jump into the loop after the code that copies the separator, so the first + // element is not preceded by a separator. + __ jmp(&one_char_separator_loop_entry); + + __ bind(&one_char_separator_loop); + // Live values in registers: + // result_pos: the position to which we are currently copying characters. + // element: Current array element. + // elements_end: Array end. + // separator: Single separator ASCII char (in lower byte). + + // Copy the separator character to the result. + __ sb(separator, MemOperand(result_pos)); + __ Daddu(result_pos, result_pos, 1); + + // Copy next array element to the result. + __ bind(&one_char_separator_loop_entry); + __ ld(string, MemOperand(element)); + __ Daddu(element, element, kPointerSize); + __ ld(string_length, FieldMemOperand(string, String::kLengthOffset)); + __ SmiUntag(string_length); + __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); + __ CopyBytes(string, result_pos, string_length, scratch1); + // End while (element < elements_end). + __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end)); + DCHECK(result.is(v0)); + __ Branch(&done); + + // Long separator case (separator is more than one character). Entry is at the + // label long_separator below. + __ bind(&long_separator_loop); + // Live values in registers: + // result_pos: the position to which we are currently copying characters. + // element: Current array element. + // elements_end: Array end. + // separator: Separator string. + + // Copy the separator to the result. + __ ld(string_length, FieldMemOperand(separator, String::kLengthOffset)); + __ SmiUntag(string_length); + __ Daddu(string, + separator, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ CopyBytes(string, result_pos, string_length, scratch1); + + __ bind(&long_separator); + __ ld(string, MemOperand(element)); + __ Daddu(element, element, kPointerSize); + __ ld(string_length, FieldMemOperand(string, String::kLengthOffset)); + __ SmiUntag(string_length); + __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); + __ CopyBytes(string, result_pos, string_length, scratch1); + // End while (element < elements_end). + __ Branch(&long_separator_loop, lt, element, Operand(elements_end)); + DCHECK(result.is(v0)); + __ Branch(&done); + + __ bind(&bailout); + __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); + __ bind(&done); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + ExternalReference debug_is_active = + ExternalReference::debug_is_active_address(isolate()); + __ li(at, Operand(debug_is_active)); + __ lbu(v0, MemOperand(at)); + __ SmiTag(v0); + context()->Plug(v0); +} + + +void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { + if (expr->function() != NULL && + expr->function()->intrinsic_type == Runtime::INLINE) { + Comment cmnt(masm_, "[ InlineRuntimeCall"); + EmitInlineRuntimeCall(expr); + return; + } + + Comment cmnt(masm_, "[ CallRuntime"); + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + + if (expr->is_jsruntime()) { + // Push the builtins object as the receiver. + Register receiver = LoadIC::ReceiverRegister(); + __ ld(receiver, GlobalObjectOperand()); + __ ld(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset)); + __ push(receiver); + + // Load the function from the receiver. + __ li(LoadIC::NameRegister(), Operand(expr->name())); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot()))); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + } + + // Push the target function under the receiver. + __ ld(at, MemOperand(sp, 0)); + __ push(at); + __ sd(v0, MemOperand(sp, kPointerSize)); + + // Push the arguments ("left-to-right"). + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + + // Record source position of the IC call. + SetSourcePosition(expr->position()); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); + __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); + __ CallStub(&stub); + + // Restore context register. + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + + context()->DropAndPlug(1, v0); + } else { + // Push the arguments ("left-to-right"). + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + + // Call the C runtime function. + __ CallRuntime(expr->function(), arg_count); + context()->Plug(v0); + } +} + + +void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { + switch (expr->op()) { + case Token::DELETE: { + Comment cmnt(masm_, "[ UnaryOperation (DELETE)"); + Property* property = expr->expression()->AsProperty(); + VariableProxy* proxy = expr->expression()->AsVariableProxy(); + + if (property != NULL) { + VisitForStackValue(property->obj()); + VisitForStackValue(property->key()); + __ li(a1, Operand(Smi::FromInt(strict_mode()))); + __ push(a1); + __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); + context()->Plug(v0); + } else if (proxy != NULL) { + Variable* var = proxy->var(); + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is allowed. + DCHECK(strict_mode() == SLOPPY || var->is_this()); + if (var->IsUnallocated()) { + __ ld(a2, GlobalObjectOperand()); + __ li(a1, Operand(var->name())); + __ li(a0, Operand(Smi::FromInt(SLOPPY))); + __ Push(a2, a1, a0); + __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); + context()->Plug(v0); + } else if (var->IsStackAllocated() || var->IsContextSlot()) { + // Result of deleting non-global, non-dynamic variables is false. + // The subexpression does not have side effects. + context()->Plug(var->is_this()); + } else { + // Non-global variable. Call the runtime to try to delete from the + // context where the variable was introduced. + DCHECK(!context_register().is(a2)); + __ li(a2, Operand(var->name())); + __ Push(context_register(), a2); + __ CallRuntime(Runtime::kDeleteLookupSlot, 2); + context()->Plug(v0); + } + } else { + // Result of deleting non-property, non-variable reference is true. + // The subexpression may have side effects. + VisitForEffect(expr->expression()); + context()->Plug(true); + } + break; + } + + case Token::VOID: { + Comment cmnt(masm_, "[ UnaryOperation (VOID)"); + VisitForEffect(expr->expression()); + context()->Plug(Heap::kUndefinedValueRootIndex); + break; + } + + case Token::NOT: { + Comment cmnt(masm_, "[ UnaryOperation (NOT)"); + if (context()->IsEffect()) { + // Unary NOT has no side effects so it's only necessary to visit the + // subexpression. Match the optimizing compiler by not branching. + VisitForEffect(expr->expression()); + } else if (context()->IsTest()) { + const TestContext* test = TestContext::cast(context()); + // The labels are swapped for the recursive call. + VisitForControl(expr->expression(), + test->false_label(), + test->true_label(), + test->fall_through()); + context()->Plug(test->true_label(), test->false_label()); + } else { + // We handle value contexts explicitly rather than simply visiting + // for control and plugging the control flow into the context, + // because we need to prepare a pair of extra administrative AST ids + // for the optimizing compiler. + DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue()); + Label materialize_true, materialize_false, done; + VisitForControl(expr->expression(), + &materialize_false, + &materialize_true, + &materialize_true); + __ bind(&materialize_true); + PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS); + __ LoadRoot(v0, Heap::kTrueValueRootIndex); + if (context()->IsStackValue()) __ push(v0); + __ jmp(&done); + __ bind(&materialize_false); + PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS); + __ LoadRoot(v0, Heap::kFalseValueRootIndex); + if (context()->IsStackValue()) __ push(v0); + __ bind(&done); + } + break; + } + + case Token::TYPEOF: { + Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)"); + { StackValueContext context(this); + VisitForTypeofValue(expr->expression()); + } + __ CallRuntime(Runtime::kTypeof, 1); + context()->Plug(v0); + break; + } + + default: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { + DCHECK(expr->expression()->IsValidReferenceExpression()); + + Comment cmnt(masm_, "[ CountOperation"); + SetSourcePosition(expr->position()); + + // Expression can only be a property, a global or a (parameter or local) + // slot. + enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; + LhsKind assign_type = VARIABLE; + Property* prop = expr->expression()->AsProperty(); + // In case of a property we use the uninitialized expression context + // of the key to detect a named property. + if (prop != NULL) { + assign_type = + (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY; + } + + // Evaluate expression and get value. + if (assign_type == VARIABLE) { + DCHECK(expr->expression()->AsVariableProxy()->var() != NULL); + AccumulatorValueContext context(this); + EmitVariableLoad(expr->expression()->AsVariableProxy()); + } else { + // Reserve space for result of postfix operation. + if (expr->is_postfix() && !context()->IsEffect()) { + __ li(at, Operand(Smi::FromInt(0))); + __ push(at); + } + if (assign_type == NAMED_PROPERTY) { + // Put the object both on the stack and in the register. + VisitForStackValue(prop->obj()); + __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); + EmitNamedPropertyLoad(prop); + } else { + VisitForStackValue(prop->obj()); + VisitForStackValue(prop->key()); + __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); + __ ld(LoadIC::NameRegister(), MemOperand(sp, 0)); + EmitKeyedPropertyLoad(prop); + } + } + + // We need a second deoptimization point after loading the value + // in case evaluating the property load my have a side effect. + if (assign_type == VARIABLE) { + PrepareForBailout(expr->expression(), TOS_REG); + } else { + PrepareForBailoutForId(prop->LoadId(), TOS_REG); + } + + // Inline smi case if we are in a loop. + Label stub_call, done; + JumpPatchSite patch_site(masm_); + + int count_value = expr->op() == Token::INC ? 1 : -1; + __ mov(a0, v0); + if (ShouldInlineSmiCase(expr->op())) { + Label slow; + patch_site.EmitJumpIfNotSmi(v0, &slow); + + // Save result for postfix expressions. + if (expr->is_postfix()) { + if (!context()->IsEffect()) { + // Save the result on the stack. If we have a named or keyed property + // we store the result under the receiver that is currently on top + // of the stack. + switch (assign_type) { + case VARIABLE: + __ push(v0); + break; + case NAMED_PROPERTY: + __ sd(v0, MemOperand(sp, kPointerSize)); + break; + case KEYED_PROPERTY: + __ sd(v0, MemOperand(sp, 2 * kPointerSize)); + break; + } + } + } + + Register scratch1 = a1; + Register scratch2 = a4; + __ li(scratch1, Operand(Smi::FromInt(count_value))); + __ AdduAndCheckForOverflow(v0, v0, scratch1, scratch2); + __ BranchOnNoOverflow(&done, scratch2); + // Call stub. Undo operation first. + __ Move(v0, a0); + __ jmp(&stub_call); + __ bind(&slow); + } + ToNumberStub convert_stub(isolate()); + __ CallStub(&convert_stub); + + // Save result for postfix expressions. + if (expr->is_postfix()) { + if (!context()->IsEffect()) { + // Save the result on the stack. If we have a named or keyed property + // we store the result under the receiver that is currently on top + // of the stack. + switch (assign_type) { + case VARIABLE: + __ push(v0); + break; + case NAMED_PROPERTY: + __ sd(v0, MemOperand(sp, kPointerSize)); + break; + case KEYED_PROPERTY: + __ sd(v0, MemOperand(sp, 2 * kPointerSize)); + break; + } + } + } + + __ bind(&stub_call); + __ mov(a1, v0); + __ li(a0, Operand(Smi::FromInt(count_value))); + + // Record position before stub call. + SetSourcePosition(expr->position()); + + BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE); + CallIC(stub.GetCode(), expr->CountBinOpFeedbackId()); + patch_site.EmitPatchInfo(); + __ bind(&done); + + // Store the value returned in v0. + switch (assign_type) { + case VARIABLE: + if (expr->is_postfix()) { + { EffectContext context(this); + EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), + Token::ASSIGN); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + context.Plug(v0); + } + // For all contexts except EffectConstant we have the result on + // top of the stack. + if (!context()->IsEffect()) { + context()->PlugTOS(); + } + } else { + EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), + Token::ASSIGN); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + context()->Plug(v0); + } + break; + case NAMED_PROPERTY: { + __ mov(StoreIC::ValueRegister(), result_register()); + __ li(StoreIC::NameRegister(), + Operand(prop->key()->AsLiteral()->value())); + __ pop(StoreIC::ReceiverRegister()); + CallStoreIC(expr->CountStoreFeedbackId()); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + if (expr->is_postfix()) { + if (!context()->IsEffect()) { + context()->PlugTOS(); + } + } else { + context()->Plug(v0); + } + break; + } + case KEYED_PROPERTY: { + __ mov(KeyedStoreIC::ValueRegister(), result_register()); + __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister()); + Handle<Code> ic = strict_mode() == SLOPPY + ? isolate()->builtins()->KeyedStoreIC_Initialize() + : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); + CallIC(ic, expr->CountStoreFeedbackId()); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + if (expr->is_postfix()) { + if (!context()->IsEffect()) { + context()->PlugTOS(); + } + } else { + context()->Plug(v0); + } + break; + } + } +} + + +void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { + DCHECK(!context()->IsEffect()); + DCHECK(!context()->IsTest()); + VariableProxy* proxy = expr->AsVariableProxy(); + if (proxy != NULL && proxy->var()->IsUnallocated()) { + Comment cmnt(masm_, "[ Global variable"); + __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ li(LoadIC::NameRegister(), Operand(proxy->name())); + if (FLAG_vector_ics) { + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } + // Use a regular load, not a contextual load, to avoid a reference + // error. + CallLoadIC(NOT_CONTEXTUAL); + PrepareForBailout(expr, TOS_REG); + context()->Plug(v0); + } else if (proxy != NULL && proxy->var()->IsLookupSlot()) { + Comment cmnt(masm_, "[ Lookup slot"); + Label done, slow; + + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done); + + __ bind(&slow); + __ li(a0, Operand(proxy->name())); + __ Push(cp, a0); + __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2); + PrepareForBailout(expr, TOS_REG); + __ bind(&done); + + context()->Plug(v0); + } else { + // This expression cannot throw a reference error at the top level. + VisitInDuplicateContext(expr); + } +} + +void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, + Expression* sub_expr, + Handle<String> check) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + { AccumulatorValueContext context(this); + VisitForTypeofValue(sub_expr); + } + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + + Factory* factory = isolate()->factory(); + if (String::Equals(check, factory->number_string())) { + __ JumpIfSmi(v0, if_true); + __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + Split(eq, v0, Operand(at), if_true, if_false, fall_through); + } else if (String::Equals(check, factory->string_string())) { + __ JumpIfSmi(v0, if_false); + // Check for undetectable objects => false. + __ GetObjectType(v0, v0, a1); + __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE)); + __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset)); + __ And(a1, a1, Operand(1 << Map::kIsUndetectable)); + Split(eq, a1, Operand(zero_reg), + if_true, if_false, fall_through); + } else if (String::Equals(check, factory->symbol_string())) { + __ JumpIfSmi(v0, if_false); + __ GetObjectType(v0, v0, a1); + Split(eq, a1, Operand(SYMBOL_TYPE), if_true, if_false, fall_through); + } else if (String::Equals(check, factory->boolean_string())) { + __ LoadRoot(at, Heap::kTrueValueRootIndex); + __ Branch(if_true, eq, v0, Operand(at)); + __ LoadRoot(at, Heap::kFalseValueRootIndex); + Split(eq, v0, Operand(at), if_true, if_false, fall_through); + } else if (String::Equals(check, factory->undefined_string())) { + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(if_true, eq, v0, Operand(at)); + __ JumpIfSmi(v0, if_false); + // Check for undetectable objects => true. + __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); + __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset)); + __ And(a1, a1, Operand(1 << Map::kIsUndetectable)); + Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through); + } else if (String::Equals(check, factory->function_string())) { + __ JumpIfSmi(v0, if_false); + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ GetObjectType(v0, v0, a1); + __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE)); + Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE), + if_true, if_false, fall_through); + } else if (String::Equals(check, factory->object_string())) { + __ JumpIfSmi(v0, if_false); + __ LoadRoot(at, Heap::kNullValueRootIndex); + __ Branch(if_true, eq, v0, Operand(at)); + // Check for JS objects => true. + __ GetObjectType(v0, v0, a1); + __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset)); + __ Branch(if_false, gt, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); + // Check for undetectable objects => false. + __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset)); + __ And(a1, a1, Operand(1 << Map::kIsUndetectable)); + Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through); + } else { + if (if_false != fall_through) __ jmp(if_false); + } + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { + Comment cmnt(masm_, "[ CompareOperation"); + SetSourcePosition(expr->position()); + + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr)) return; + + // Always perform the comparison for its control flow. Pack the result + // into the expression's context after the comparison is performed. + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + Token::Value op = expr->op(); + VisitForStackValue(expr->left()); + switch (op) { + case Token::IN: + VisitForStackValue(expr->right()); + __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); + PrepareForBailoutBeforeSplit(expr, false, NULL, NULL); + __ LoadRoot(a4, Heap::kTrueValueRootIndex); + Split(eq, v0, Operand(a4), if_true, if_false, fall_through); + break; + + case Token::INSTANCEOF: { + VisitForStackValue(expr->right()); + InstanceofStub stub(isolate(), InstanceofStub::kNoFlags); + __ CallStub(&stub); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + // The stub returns 0 for true. + Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through); + break; + } + + default: { + VisitForAccumulatorValue(expr->right()); + Condition cc = CompareIC::ComputeCondition(op); + __ mov(a0, result_register()); + __ pop(a1); + + bool inline_smi_code = ShouldInlineSmiCase(op); + JumpPatchSite patch_site(masm_); + if (inline_smi_code) { + Label slow_case; + __ Or(a2, a0, Operand(a1)); + patch_site.EmitJumpIfNotSmi(a2, &slow_case); + Split(cc, a1, Operand(a0), if_true, if_false, NULL); + __ bind(&slow_case); + } + // Record position and call the compare IC. + SetSourcePosition(expr->position()); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); + CallIC(ic, expr->CompareOperationFeedbackId()); + patch_site.EmitPatchInfo(); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through); + } + } + + // Convert the result of the comparison into one expected for this + // expression's context. + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + VisitForAccumulatorValue(sub_expr); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + __ mov(a0, result_register()); + if (expr->op() == Token::EQ_STRICT) { + Heap::RootListIndex nil_value = nil == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ LoadRoot(a1, nil_value); + Split(eq, a0, Operand(a1), if_true, if_false, fall_through); + } else { + Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil); + CallIC(ic, expr->CompareOperationFeedbackId()); + Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through); + } + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) { + __ ld(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + context()->Plug(v0); +} + + +Register FullCodeGenerator::result_register() { + return v0; +} + + +Register FullCodeGenerator::context_register() { + return cp; +} + + +void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { + // DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); + DCHECK(IsAligned(frame_offset, kPointerSize)); + // __ sw(value, MemOperand(fp, frame_offset)); + __ sd(value, MemOperand(fp, frame_offset)); +} + + +void FullCodeGenerator::LoadContextField(Register dst, int context_index) { + __ ld(dst, ContextOperand(cp, context_index)); +} + + +void FullCodeGenerator::PushFunctionArgumentForContextAllocation() { + Scope* declaration_scope = scope()->DeclarationScope(); + if (declaration_scope->is_global_scope() || + declaration_scope->is_module_scope()) { + // Contexts nested in the native context have a canonical empty function + // as their closure, not the anonymous closure containing the global + // code. Pass a smi sentinel and let the runtime look up the empty + // function. + __ li(at, Operand(Smi::FromInt(0))); + } else if (declaration_scope->is_eval_scope()) { + // Contexts created by a call to eval have the same closure as the + // context calling eval, not the anonymous closure containing the eval + // code. Fetch it from the context. + __ ld(at, ContextOperand(cp, Context::CLOSURE_INDEX)); + } else { + DCHECK(declaration_scope->is_function_scope()); + __ ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + } + __ push(at); +} + + +// ---------------------------------------------------------------------------- +// Non-local control flow support. + +void FullCodeGenerator::EnterFinallyBlock() { + DCHECK(!result_register().is(a1)); + // Store result register while executing finally block. + __ push(result_register()); + // Cook return address in link register to stack (smi encoded Code* delta). + __ Dsubu(a1, ra, Operand(masm_->CodeObject())); + __ SmiTag(a1); + + // Store result register while executing finally block. + __ push(a1); + + // Store pending message while executing finally block. + ExternalReference pending_message_obj = + ExternalReference::address_of_pending_message_obj(isolate()); + __ li(at, Operand(pending_message_obj)); + __ ld(a1, MemOperand(at)); + __ push(a1); + + ExternalReference has_pending_message = + ExternalReference::address_of_has_pending_message(isolate()); + __ li(at, Operand(has_pending_message)); + __ ld(a1, MemOperand(at)); + __ SmiTag(a1); + __ push(a1); + + ExternalReference pending_message_script = + ExternalReference::address_of_pending_message_script(isolate()); + __ li(at, Operand(pending_message_script)); + __ ld(a1, MemOperand(at)); + __ push(a1); +} + + +void FullCodeGenerator::ExitFinallyBlock() { + DCHECK(!result_register().is(a1)); + // Restore pending message from stack. + __ pop(a1); + ExternalReference pending_message_script = + ExternalReference::address_of_pending_message_script(isolate()); + __ li(at, Operand(pending_message_script)); + __ sd(a1, MemOperand(at)); + + __ pop(a1); + __ SmiUntag(a1); + ExternalReference has_pending_message = + ExternalReference::address_of_has_pending_message(isolate()); + __ li(at, Operand(has_pending_message)); + __ sd(a1, MemOperand(at)); + + __ pop(a1); + ExternalReference pending_message_obj = + ExternalReference::address_of_pending_message_obj(isolate()); + __ li(at, Operand(pending_message_obj)); + __ sd(a1, MemOperand(at)); + + // Restore result register from stack. + __ pop(a1); + + // Uncook return address and return. + __ pop(result_register()); + + __ SmiUntag(a1); + __ Daddu(at, a1, Operand(masm_->CodeObject())); + __ Jump(at); +} + + +#undef __ + +#define __ ACCESS_MASM(masm()) + +FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( + int* stack_depth, + int* context_length) { + // The macros used here must preserve the result register. + + // Because the handler block contains the context of the finally + // code, we can restore it directly from there for the finally code + // rather than iteratively unwinding contexts via their previous + // links. + __ Drop(*stack_depth); // Down to the handler block. + if (*context_length > 0) { + // Restore the context to its dedicated register and the stack. + __ ld(cp, MemOperand(sp, StackHandlerConstants::kContextOffset)); + __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } + __ PopTryHandler(); + __ Call(finally_entry_); + + *stack_depth = 0; + *context_length = 0; + return previous_; +} + + +#undef __ + + +void BackEdgeTable::PatchAt(Code* unoptimized_code, + Address pc, + BackEdgeState target_state, + Code* replacement_code) { + static const int kInstrSize = Assembler::kInstrSize; + Address branch_address = pc - 8 * kInstrSize; + CodePatcher patcher(branch_address, 1); + + switch (target_state) { + case INTERRUPT: + // slt at, a3, zero_reg (in case of count based interrupts) + // beq at, zero_reg, ok + // lui t9, <interrupt stub address> upper + // ori t9, <interrupt stub address> u-middle + // dsll t9, t9, 16 + // ori t9, <interrupt stub address> lower + // jalr t9 + // nop + // ok-label ----- pc_after points here + patcher.masm()->slt(at, a3, zero_reg); + break; + case ON_STACK_REPLACEMENT: + case OSR_AFTER_STACK_CHECK: + // addiu at, zero_reg, 1 + // beq at, zero_reg, ok ;; Not changed + // lui t9, <on-stack replacement address> upper + // ori t9, <on-stack replacement address> middle + // dsll t9, t9, 16 + // ori t9, <on-stack replacement address> lower + // jalr t9 ;; Not changed + // nop ;; Not changed + // ok-label ----- pc_after points here + patcher.masm()->daddiu(at, zero_reg, 1); + break; + } + Address pc_immediate_load_address = pc - 6 * kInstrSize; + // Replace the stack check address in the load-immediate (6-instr sequence) + // with the entry address of the replacement code. + Assembler::set_target_address_at(pc_immediate_load_address, + replacement_code->entry()); + + unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, pc_immediate_load_address, replacement_code); +} + + +BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( + Isolate* isolate, + Code* unoptimized_code, + Address pc) { + static const int kInstrSize = Assembler::kInstrSize; + Address branch_address = pc - 8 * kInstrSize; + Address pc_immediate_load_address = pc - 6 * kInstrSize; + + DCHECK(Assembler::IsBeq(Assembler::instr_at(pc - 7 * kInstrSize))); + if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) { + DCHECK(reinterpret_cast<uint64_t>( + Assembler::target_address_at(pc_immediate_load_address)) == + reinterpret_cast<uint64_t>( + isolate->builtins()->InterruptCheck()->entry())); + return INTERRUPT; + } + + DCHECK(Assembler::IsAddImmediate(Assembler::instr_at(branch_address))); + + if (reinterpret_cast<uint64_t>( + Assembler::target_address_at(pc_immediate_load_address)) == + reinterpret_cast<uint64_t>( + isolate->builtins()->OnStackReplacement()->entry())) { + return ON_STACK_REPLACEMENT; + } + + DCHECK(reinterpret_cast<uint64_t>( + Assembler::target_address_at(pc_immediate_load_address)) == + reinterpret_cast<uint64_t>( + isolate->builtins()->OsrAfterStackCheck()->entry())); + return OSR_AFTER_STACK_CHECK; +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/ic-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/ic-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/ic-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/ic-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1266 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/runtime.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + + +// ---------------------------------------------------------------------------- +// Static IC stub generators. +// + +#define __ ACCESS_MASM(masm) + + +static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, + Register type, + Label* global_object) { + // Register usage: + // type: holds the receiver instance type on entry. + __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE)); + __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE)); + __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE)); +} + + +// Helper function used from LoadIC GenerateNormal. +// +// elements: Property dictionary. It is not clobbered if a jump to the miss +// label is done. +// name: Property name. It is not clobbered if a jump to the miss label is +// done +// result: Register for the result. It is only updated if a jump to the miss +// label is not done. Can be the same as elements or name clobbering +// one of these in the case of not jumping to the miss label. +// The two scratch registers need to be different from elements, name and +// result. +// The generated code assumes that the receiver has slow properties, +// is not a global object and does not have interceptors. +// The address returned from GenerateStringDictionaryProbes() in scratch2 +// is used. +static void GenerateDictionaryLoad(MacroAssembler* masm, + Label* miss, + Register elements, + Register name, + Register result, + Register scratch1, + Register scratch2) { + // Main use of the scratch registers. + // scratch1: Used as temporary and to hold the capacity of the property + // dictionary. + // scratch2: Used as temporary. + Label done; + + // Probe the dictionary. + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss, + &done, + elements, + name, + scratch1, + scratch2); + + // If probing finds an entry check that the value is a normal + // property. + __ bind(&done); // scratch2 == elements + 4 * index. + const int kElementsStartOffset = NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; + const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; + __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); + __ And(at, + scratch1, + Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); + __ Branch(miss, ne, at, Operand(zero_reg)); + + // Get the value at the masked, scaled index and return. + __ ld(result, + FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); +} + + +// Helper function used from StoreIC::GenerateNormal. +// +// elements: Property dictionary. It is not clobbered if a jump to the miss +// label is done. +// name: Property name. It is not clobbered if a jump to the miss label is +// done +// value: The value to store. +// The two scratch registers need to be different from elements, name and +// result. +// The generated code assumes that the receiver has slow properties, +// is not a global object and does not have interceptors. +// The address returned from GenerateStringDictionaryProbes() in scratch2 +// is used. +static void GenerateDictionaryStore(MacroAssembler* masm, + Label* miss, + Register elements, + Register name, + Register value, + Register scratch1, + Register scratch2) { + // Main use of the scratch registers. + // scratch1: Used as temporary and to hold the capacity of the property + // dictionary. + // scratch2: Used as temporary. + Label done; + + // Probe the dictionary. + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss, + &done, + elements, + name, + scratch1, + scratch2); + + // If probing finds an entry in the dictionary check that the value + // is a normal property that is not read only. + __ bind(&done); // scratch2 == elements + 4 * index. + const int kElementsStartOffset = NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; + const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; + const int kTypeAndReadOnlyMask = + (PropertyDetails::TypeField::kMask | + PropertyDetails::AttributesField::encode(READ_ONLY)); + __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); + __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask))); + __ Branch(miss, ne, at, Operand(zero_reg)); + + // Store the value at the masked, scaled index and return. + const int kValueOffset = kElementsStartOffset + kPointerSize; + __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); + __ sd(value, MemOperand(scratch2)); + + // Update the write barrier. Make sure not to clobber the value. + __ mov(scratch1, value); + __ RecordWrite( + elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs); +} + + +// Checks the receiver for special cases (value type, slow case bits). +// Falls through for regular JS object. +static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, + Register receiver, + Register map, + Register scratch, + int interceptor_bit, + Label* slow) { + // Check that the object isn't a smi. + __ JumpIfSmi(receiver, slow); + // Get the map of the receiver. + __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + // Check bit field. + __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(at, scratch, + Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); + __ Branch(slow, ne, at, Operand(zero_reg)); + // Check that the object is some kind of JS object EXCEPT JS Value type. + // In the case that the object is a value-wrapper object, + // we enter the runtime system to make sure that indexing into string + // objects work as intended. + DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); + __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); + __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE)); +} + + +// Loads an indexed element from a fast case array. +// If not_fast_array is NULL, doesn't perform the elements map check. +static void GenerateFastArrayLoad(MacroAssembler* masm, + Register receiver, + Register key, + Register elements, + Register scratch1, + Register scratch2, + Register result, + Label* not_fast_array, + Label* out_of_range) { + // Register use: + // + // receiver - holds the receiver on entry. + // Unchanged unless 'result' is the same register. + // + // key - holds the smi key on entry. + // Unchanged unless 'result' is the same register. + // + // elements - holds the elements of the receiver on exit. + // + // result - holds the result on exit if the load succeeded. + // Allowed to be the the same as 'receiver' or 'key'. + // Unchanged on bailout so 'receiver' and 'key' can be safely + // used by further computation. + // + // Scratch registers: + // + // scratch1 - used to hold elements map and elements length. + // Holds the elements map if not_fast_array branch is taken. + // + // scratch2 - used to hold the loaded value. + + __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + if (not_fast_array != NULL) { + // Check that the object is in fast mode (not dictionary). + __ ld(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); + __ Branch(not_fast_array, ne, scratch1, Operand(at)); + } else { + __ AssertFastElements(elements); + } + + // Check that the key (index) is within bounds. + __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); + __ Branch(out_of_range, hs, key, Operand(scratch1)); + + // Fast case: Do the load. + __ Daddu(scratch1, elements, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + // The key is a smi. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ SmiScale(at, key, kPointerSizeLog2); + __ daddu(at, at, scratch1); + __ ld(scratch2, MemOperand(at)); + + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + // In case the loaded value is the_hole we have to consult GetProperty + // to ensure the prototype chain is searched. + __ Branch(out_of_range, eq, scratch2, Operand(at)); + __ mov(result, scratch2); +} + + +// Checks whether a key is an array index string or a unique name. +// Falls through if a key is a unique name. +static void GenerateKeyNameCheck(MacroAssembler* masm, + Register key, + Register map, + Register hash, + Label* index_string, + Label* not_unique) { + // The key is not a smi. + Label unique; + // Is it a name? + __ GetObjectType(key, map, hash); + __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE)); + STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); + __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE)); + + // Is the string an array index, with cached numeric value? + __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset)); + __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask)); + __ Branch(index_string, eq, at, Operand(zero_reg)); + + // Is the string internalized? We know it's a string, so a single + // bit test is enough. + // map: key map + __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kInternalizedTag == 0); + __ And(at, hash, Operand(kIsNotInternalizedMask)); + __ Branch(not_unique, ne, at, Operand(zero_reg)); + + __ bind(&unique); +} + + +void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { + // The return address is in lr. + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(receiver.is(a1)); + DCHECK(name.is(a2)); + + // Probe the stub cache. + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::LOAD_IC)); + masm->isolate()->stub_cache()->GenerateProbe( + masm, flags, receiver, name, a3, a4, a5, a6); + + // Cache miss: Jump to runtime. + GenerateMiss(masm); +} + + +void LoadIC::GenerateNormal(MacroAssembler* masm) { + Register dictionary = a0; + DCHECK(!dictionary.is(ReceiverRegister())); + DCHECK(!dictionary.is(NameRegister())); + Label slow; + + __ ld(dictionary, + FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); + GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), v0, a3, a4); + __ Ret(); + + // Dictionary load failed, go slow (but don't miss). + __ bind(&slow); + GenerateRuntimeGetProperty(masm); +} + + +// A register that isn't one of the parameters to the load ic. +static const Register LoadIC_TempRegister() { return a3; } + + +void LoadIC::GenerateMiss(MacroAssembler* masm) { + // The return address is on the stack. + Isolate* isolate = masm->isolate(); + + __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4); + + __ mov(LoadIC_TempRegister(), ReceiverRegister()); + __ Push(LoadIC_TempRegister(), NameRegister()); + + // Perform tail call to the entry. + ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); + __ TailCallExternalReference(ref, 2, 1); +} + + +void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { + // The return address is in ra. + + __ mov(LoadIC_TempRegister(), ReceiverRegister()); + __ Push(LoadIC_TempRegister(), NameRegister()); + + __ TailCallRuntime(Runtime::kGetProperty, 2, 1); +} + + +static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, + Register object, + Register key, + Register scratch1, + Register scratch2, + Register scratch3, + Label* unmapped_case, + Label* slow_case) { + Heap* heap = masm->isolate()->heap(); + + // Check that the receiver is a JSObject. Because of the map check + // later, we do not need to check for interceptors or whether it + // requires access checks. + __ JumpIfSmi(object, slow_case); + // Check that the object is some kind of JSObject. + __ GetObjectType(object, scratch1, scratch2); + __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE)); + + // Check that the key is a positive smi. + __ NonNegativeSmiTst(key, scratch1); + __ Branch(slow_case, ne, scratch1, Operand(zero_reg)); + + // Load the elements into scratch1 and check its map. + Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); + __ ld(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); + __ CheckMap(scratch1, + scratch2, + arguments_map, + slow_case, + DONT_DO_SMI_CHECK); + // Check if element is in the range of mapped arguments. If not, jump + // to the unmapped lookup with the parameter map in scratch1. + __ ld(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); + __ Dsubu(scratch2, scratch2, Operand(Smi::FromInt(2))); + __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2)); + + // Load element index and check whether it is the hole. + const int kOffset = + FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; + + __ SmiUntag(scratch3, key); + __ dsll(scratch3, scratch3, kPointerSizeLog2); + __ Daddu(scratch3, scratch3, Operand(kOffset)); + + __ Daddu(scratch2, scratch1, scratch3); + __ ld(scratch2, MemOperand(scratch2)); + __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); + __ Branch(unmapped_case, eq, scratch2, Operand(scratch3)); + + // Load value from context and return it. We can reuse scratch1 because + // we do not jump to the unmapped lookup (which requires the parameter + // map in scratch1). + __ ld(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + __ SmiUntag(scratch3, scratch2); + __ dsll(scratch3, scratch3, kPointerSizeLog2); + __ Daddu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); + __ Daddu(scratch2, scratch1, scratch3); + return MemOperand(scratch2); +} + + +static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, + Register key, + Register parameter_map, + Register scratch, + Label* slow_case) { + // Element is in arguments backing store, which is referenced by the + // second element of the parameter_map. The parameter_map register + // must be loaded with the parameter map of the arguments object and is + // overwritten. + const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; + Register backing_store = parameter_map; + __ ld(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); + __ CheckMap(backing_store, + scratch, + Heap::kFixedArrayMapRootIndex, + slow_case, + DONT_DO_SMI_CHECK); + __ ld(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); + __ Branch(slow_case, Ugreater_equal, key, Operand(scratch)); + __ SmiUntag(scratch, key); + __ dsll(scratch, scratch, kPointerSizeLog2); + __ Daddu(scratch, + scratch, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ Daddu(scratch, backing_store, scratch); + return MemOperand(scratch); +} + + +void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { + // The return address is in ra. + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(a1)); + DCHECK(key.is(a2)); + + Label slow, notin; + MemOperand mapped_location = + GenerateMappedArgumentsLookup( + masm, receiver, key, a0, a3, a4, ¬in, &slow); + __ Ret(USE_DELAY_SLOT); + __ ld(v0, mapped_location); + __ bind(¬in); + // The unmapped lookup expects that the parameter map is in a2. + MemOperand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow); + __ ld(a0, unmapped_location); + __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); + __ Branch(&slow, eq, a0, Operand(a3)); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a0); + __ bind(&slow); + GenerateMiss(masm); +} + + +void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + Register value = ValueRegister(); + DCHECK(value.is(a0)); + + Label slow, notin; + // Store address is returned in register (of MemOperand) mapped_location. + MemOperand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, a3, a4, a5, ¬in, &slow); + __ sd(value, mapped_location); + __ mov(t1, value); + DCHECK_EQ(mapped_location.offset(), 0); + __ RecordWrite(a3, mapped_location.rm(), t1, + kRAHasNotBeenSaved, kDontSaveFPRegs); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, value); // (In delay slot) return the value stored in v0. + __ bind(¬in); + // The unmapped lookup expects that the parameter map is in a3. + // Store address is returned in register (of MemOperand) unmapped_location. + MemOperand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, key, a3, a4, &slow); + __ sd(value, unmapped_location); + __ mov(t1, value); + DCHECK_EQ(unmapped_location.offset(), 0); + __ RecordWrite(a3, unmapped_location.rm(), t1, + kRAHasNotBeenSaved, kDontSaveFPRegs); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a0); // (In delay slot) return the value stored in v0. + __ bind(&slow); + GenerateMiss(masm); +} + + +void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { + // The return address is in ra. + Isolate* isolate = masm->isolate(); + + __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4); + + __ Push(ReceiverRegister(), NameRegister()); + + // Perform tail call to the entry. + ExternalReference ref = + ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); + + __ TailCallExternalReference(ref, 2, 1); +} + + +// IC register specifications +const Register LoadIC::ReceiverRegister() { return a1; } +const Register LoadIC::NameRegister() { return a2; } + + +const Register LoadIC::SlotRegister() { + DCHECK(FLAG_vector_ics); + return a0; +} + + +const Register LoadIC::VectorRegister() { + DCHECK(FLAG_vector_ics); + return a3; +} + + +const Register StoreIC::ReceiverRegister() { return a1; } +const Register StoreIC::NameRegister() { return a2; } +const Register StoreIC::ValueRegister() { return a0; } + + +const Register KeyedStoreIC::MapRegister() { + return a3; +} + + +void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { + // The return address is in ra. + + __ Push(ReceiverRegister(), NameRegister()); + + __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); +} + + +void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { + // The return address is in ra. + Label slow, check_name, index_smi, index_name, property_array_property; + Label probe_dictionary, check_number_dictionary; + + Register key = NameRegister(); + Register receiver = ReceiverRegister(); + DCHECK(key.is(a2)); + DCHECK(receiver.is(a1)); + + Isolate* isolate = masm->isolate(); + + // Check that the key is a smi. + __ JumpIfNotSmi(key, &check_name); + __ bind(&index_smi); + // Now the key is known to be a smi. This place is also jumped to from below + // where a numeric string is converted to a smi. + + GenerateKeyedLoadReceiverCheck( + masm, receiver, a0, a3, Map::kHasIndexedInterceptor, &slow); + + // Check the receiver's map to see if it has fast elements. + __ CheckFastElements(a0, a3, &check_number_dictionary); + + GenerateFastArrayLoad( + masm, receiver, key, a0, a3, a4, v0, NULL, &slow); + __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3); + __ Ret(); + + __ bind(&check_number_dictionary); + __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset)); + + // Check whether the elements is a number dictionary. + // a3: elements map + // a4: elements + __ LoadRoot(at, Heap::kHashTableMapRootIndex); + __ Branch(&slow, ne, a3, Operand(at)); + __ dsra32(a0, key, 0); + __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5); + __ Ret(); + + // Slow case, key and receiver still in a2 and a1. + __ bind(&slow); + __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), + 1, + a4, + a3); + GenerateRuntimeGetProperty(masm); + + __ bind(&check_name); + GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow); + + GenerateKeyedLoadReceiverCheck( + masm, receiver, a0, a3, Map::kHasNamedInterceptor, &slow); + + + // If the receiver is a fast-case object, check the keyed lookup + // cache. Otherwise probe the dictionary. + __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHashTableMapRootIndex); + __ Branch(&probe_dictionary, eq, a4, Operand(at)); + + // Load the map of the receiver, compute the keyed lookup cache hash + // based on 32 bits of the map pointer and the name hash. + __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ dsll32(a3, a0, 0); + __ dsrl32(a3, a3, 0); + __ dsra(a3, a3, KeyedLookupCache::kMapHashShift); + __ lwu(a4, FieldMemOperand(key, Name::kHashFieldOffset)); + __ dsra(at, a4, Name::kHashShift); + __ xor_(a3, a3, at); + int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; + __ And(a3, a3, Operand(mask)); + + // Load the key (consisting of map and unique name) from the cache and + // check for match. + Label load_in_object_property; + static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; + Label hit_on_nth_entry[kEntriesPerBucket]; + ExternalReference cache_keys = + ExternalReference::keyed_lookup_cache_keys(isolate); + __ li(a4, Operand(cache_keys)); + __ dsll(at, a3, kPointerSizeLog2 + 1); + __ daddu(a4, a4, at); + + for (int i = 0; i < kEntriesPerBucket - 1; i++) { + Label try_next_entry; + __ ld(a5, MemOperand(a4, kPointerSize * i * 2)); + __ Branch(&try_next_entry, ne, a0, Operand(a5)); + __ ld(a5, MemOperand(a4, kPointerSize * (i * 2 + 1))); + __ Branch(&hit_on_nth_entry[i], eq, key, Operand(a5)); + __ bind(&try_next_entry); + } + + __ ld(a5, MemOperand(a4, kPointerSize * (kEntriesPerBucket - 1) * 2)); + __ Branch(&slow, ne, a0, Operand(a5)); + __ ld(a5, MemOperand(a4, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1))); + __ Branch(&slow, ne, key, Operand(a5)); + + // Get field offset. + // a0 : receiver's map + // a3 : lookup cache index + ExternalReference cache_field_offsets = + ExternalReference::keyed_lookup_cache_field_offsets(isolate); + + // Hit on nth entry. + for (int i = kEntriesPerBucket - 1; i >= 0; i--) { + __ bind(&hit_on_nth_entry[i]); + __ li(a4, Operand(cache_field_offsets)); + + // TODO(yy) This data structure does NOT follow natural pointer size. + __ dsll(at, a3, kPointerSizeLog2 - 1); + __ daddu(at, a4, at); + __ lwu(a5, MemOperand(at, kPointerSize / 2 * i)); + + __ lbu(a6, FieldMemOperand(a0, Map::kInObjectPropertiesOffset)); + __ Dsubu(a5, a5, a6); + __ Branch(&property_array_property, ge, a5, Operand(zero_reg)); + if (i != 0) { + __ Branch(&load_in_object_property); + } + } + + // Load in-object property. + __ bind(&load_in_object_property); + __ lbu(a6, FieldMemOperand(a0, Map::kInstanceSizeOffset)); + // Index from start of object. + __ daddu(a6, a6, a5); + // Remove the heap tag. + __ Dsubu(receiver, receiver, Operand(kHeapObjectTag)); + __ dsll(at, a6, kPointerSizeLog2); + __ daddu(at, receiver, at); + __ ld(v0, MemOperand(at)); + __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), + 1, + a4, + a3); + __ Ret(); + + // Load property array property. + __ bind(&property_array_property); + __ ld(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + __ Daddu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag); + __ dsll(v0, a5, kPointerSizeLog2); + __ Daddu(v0, v0, a1); + __ ld(v0, MemOperand(v0)); + __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), + 1, + a4, + a3); + __ Ret(); + + + // Do a quick inline probe of the receiver's dictionary, if it + // exists. + __ bind(&probe_dictionary); + // a3: elements + __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); + GenerateGlobalInstanceTypeCheck(masm, a0, &slow); + // Load the property to v0. + GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4); + __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), + 1, + a4, + a3); + __ Ret(); + + __ bind(&index_name); + __ IndexFromHash(a3, key); + // Now jump to the place where smi keys are handled. + __ Branch(&index_smi); +} + + +void KeyedLoadIC::GenerateString(MacroAssembler* masm) { + // Return address is in ra. + Label miss; + + Register receiver = ReceiverRegister(); + Register index = NameRegister(); + Register scratch = a3; + Register result = v0; + DCHECK(!scratch.is(receiver) && !scratch.is(index)); + + StringCharAtGenerator char_at_generator(receiver, + index, + scratch, + result, + &miss, // When not a string. + &miss, // When not a number. + &miss, // When index out of range. + STRING_INDEX_IS_ARRAY_INDEX); + char_at_generator.GenerateFast(masm); + __ Ret(); + + StubRuntimeCallHelper call_helper; + char_at_generator.GenerateSlow(masm, call_helper); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, + StrictMode strict_mode) { + // Push receiver, key and value for runtime call. + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); + + __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode. + __ Push(a0); + + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); +} + + +static void KeyedStoreGenerateGenericHelper( + MacroAssembler* masm, + Label* fast_object, + Label* fast_double, + Label* slow, + KeyedStoreCheckMap check_map, + KeyedStoreIncrementLength increment_length, + Register value, + Register key, + Register receiver, + Register receiver_map, + Register elements_map, + Register elements) { + Label transition_smi_elements; + Label finish_object_store, non_double_value, transition_double_elements; + Label fast_double_without_map_check; + + // Fast case: Do the store, could be either Object or double. + __ bind(fast_object); + Register scratch_value = a4; + Register address = a5; + if (check_map == kCheckMap) { + __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ Branch(fast_double, ne, elements_map, + Operand(masm->isolate()->factory()->fixed_array_map())); + } + + // HOLECHECK: guards "A[i] = V" + // We have to go to the runtime if the current value is the hole because + // there may be a callback on the element. + Label holecheck_passed1; + __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); + __ SmiScale(at, key, kPointerSizeLog2); + __ daddu(address, address, at); + __ ld(scratch_value, MemOperand(address)); + + __ Branch(&holecheck_passed1, ne, scratch_value, + Operand(masm->isolate()->factory()->the_hole_value())); + __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, + slow); + + __ bind(&holecheck_passed1); + + // Smi stores don't require further checks. + Label non_smi_value; + __ JumpIfNotSmi(value, &non_smi_value); + + if (increment_length == kIncrementLength) { + // Add 1 to receiver->length. + __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); + __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + } + // It's irrelevant whether array is smi-only or not when writing a smi. + __ Daddu(address, elements, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ SmiScale(scratch_value, key, kPointerSizeLog2); + __ Daddu(address, address, scratch_value); + __ sd(value, MemOperand(address)); + __ Ret(); + + __ bind(&non_smi_value); + // Escape to elements kind transition case. + __ CheckFastObjectElements(receiver_map, scratch_value, + &transition_smi_elements); + + // Fast elements array, store the value to the elements backing store. + __ bind(&finish_object_store); + if (increment_length == kIncrementLength) { + // Add 1 to receiver->length. + __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); + __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + } + __ Daddu(address, elements, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ SmiScale(scratch_value, key, kPointerSizeLog2); + __ Daddu(address, address, scratch_value); + __ sd(value, MemOperand(address)); + // Update write barrier for the elements array address. + __ mov(scratch_value, value); // Preserve the value which is returned. + __ RecordWrite(elements, + address, + scratch_value, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ Ret(); + + __ bind(fast_double); + if (check_map == kCheckMap) { + // Check for fast double array case. If this fails, call through to the + // runtime. + __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); + __ Branch(slow, ne, elements_map, Operand(at)); + } + + // HOLECHECK: guards "A[i] double hole?" + // We have to see if the double version of the hole is present. If so + // go to the runtime. + __ Daddu(address, elements, + Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32) + - kHeapObjectTag)); + __ SmiScale(at, key, kPointerSizeLog2); + __ daddu(address, address, at); + __ lw(scratch_value, MemOperand(address)); + __ Branch(&fast_double_without_map_check, ne, scratch_value, + Operand(kHoleNanUpper32)); + __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, + slow); + + __ bind(&fast_double_without_map_check); + __ StoreNumberToDoubleElements(value, + key, + elements, // Overwritten. + a3, // Scratch regs... + a4, + a5, + &transition_double_elements); + if (increment_length == kIncrementLength) { + // Add 1 to receiver->length. + __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); + __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + } + __ Ret(); + + __ bind(&transition_smi_elements); + // Transition the array appropriately depending on the value type. + __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + __ Branch(&non_double_value, ne, a4, Operand(at)); + + // Value is a double. Transition FAST_SMI_ELEMENTS -> + // FAST_DOUBLE_ELEMENTS and complete the store. + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS, + receiver_map, + a4, + slow); + AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble( + masm, receiver, key, value, receiver_map, mode, slow); + __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ jmp(&fast_double_without_map_check); + + __ bind(&non_double_value); + // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_ELEMENTS, + receiver_map, + a4, + slow); + mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition( + masm, receiver, key, value, receiver_map, mode, slow); + __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ jmp(&finish_object_store); + + __ bind(&transition_double_elements); + // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a + // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and + // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, + FAST_ELEMENTS, + receiver_map, + a4, + slow); + mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateDoubleToObject( + masm, receiver, key, value, receiver_map, mode, slow); + __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ jmp(&finish_object_store); +} + + +void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, + StrictMode strict_mode) { + // ---------- S t a t e -------------- + // -- a0 : value + // -- a1 : key + // -- a2 : receiver + // -- ra : return address + // ----------------------------------- + Label slow, fast_object, fast_object_grow; + Label fast_double, fast_double_grow; + Label array, extra, check_if_double_array; + + // Register usage. + Register value = ValueRegister(); + Register key = NameRegister(); + Register receiver = ReceiverRegister(); + DCHECK(value.is(a0)); + Register receiver_map = a3; + Register elements_map = a6; + Register elements = a7; // Elements array of the receiver. + // a4 and a5 are used as general scratch registers. + + // Check that the key is a smi. + __ JumpIfNotSmi(key, &slow); + // Check that the object isn't a smi. + __ JumpIfSmi(receiver, &slow); + // Get the map of the object. + __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + // Check that the receiver does not require access checks and is not observed. + // The generic stub does not perform map checks or handle observed objects. + __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); + __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded | + 1 << Map::kIsObserved)); + __ Branch(&slow, ne, a4, Operand(zero_reg)); + // Check if the object is a JS array or not. + __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); + __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE)); + // Check that the object is some kind of JSObject. + __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE)); + + // Object case: Check key against length in the elements array. + __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + // Check array bounds. Both the key and the length of FixedArray are smis. + __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset)); + __ Branch(&fast_object, lo, key, Operand(a4)); + + // Slow case, handle jump to runtime. + __ bind(&slow); + // Entry registers are intact. + // a0: value. + // a1: key. + // a2: receiver. + GenerateRuntimeSetProperty(masm, strict_mode); + + // Extra capacity case: Check if there is extra capacity to + // perform the store and update the length. Used for adding one + // element to the array by writing to array[array.length]. + __ bind(&extra); + // Condition code from comparing key and array length is still available. + // Only support writing to array[array.length]. + __ Branch(&slow, ne, key, Operand(a4)); + // Check for room in the elements backing store. + // Both the key and the length of FixedArray are smis. + __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset)); + __ Branch(&slow, hs, key, Operand(a4)); + __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ Branch( + &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex); + + __ jmp(&fast_object_grow); + + __ bind(&check_if_double_array); + __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex); + __ jmp(&fast_double_grow); + + // Array case: Get the length and the elements array from the JS + // array. Check that the array is in fast mode (and writable); if it + // is the length is always a smi. + __ bind(&array); + __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + + // Check the key against the length in the array. + __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ Branch(&extra, hs, key, Operand(a4)); + + KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, + &slow, kCheckMap, kDontIncrementLength, + value, key, receiver, receiver_map, + elements_map, elements); + KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, + &slow, kDontCheckMap, kIncrementLength, + value, key, receiver, receiver_map, + elements_map, elements); +} + + +void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { + // Return address is in ra. + Label slow; + + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + Register scratch1 = a3; + Register scratch2 = a4; + DCHECK(!scratch1.is(receiver) && !scratch1.is(key)); + DCHECK(!scratch2.is(receiver) && !scratch2.is(key)); + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &slow); + + // Check that the key is an array index, that is Uint32. + __ And(a4, key, Operand(kSmiTagMask | kSmiSignMask)); + __ Branch(&slow, ne, a4, Operand(zero_reg)); + + // Get the map of the receiver. + __ ld(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); + + // Check that it has indexed interceptor and access checks + // are not enabled for this object. + __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); + __ And(scratch2, scratch2, Operand(kSlowCaseBitFieldMask)); + __ Branch(&slow, ne, scratch2, Operand(1 << Map::kHasIndexedInterceptor)); + // Everything is fine, call runtime. + __ Push(receiver, key); // Receiver, key. + + // Perform tail call to the entry. + __ TailCallExternalReference(ExternalReference( + IC_Utility(kLoadElementWithInterceptor), masm->isolate()), 2, 1); + + __ bind(&slow); + GenerateMiss(masm); +} + + +void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { + // Push receiver, key and value for runtime call. + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); + + ExternalReference ref = + ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); + __ TailCallExternalReference(ref, 3, 1); +} + + +void StoreIC::GenerateSlow(MacroAssembler* masm) { + // Push receiver, key and value for runtime call. + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); + + // The slow case calls into the runtime to complete the store without causing + // an IC miss that would otherwise cause a transition to the generic stub. + ExternalReference ref = + ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate()); + __ TailCallExternalReference(ref, 3, 1); +} + + +void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { + // Push receiver, key and value for runtime call. + // We can't use MultiPush as the order of the registers is important. + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); + // The slow case calls into the runtime to complete the store without causing + // an IC miss that would otherwise cause a transition to the generic stub. + ExternalReference ref = + ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); + + __ TailCallExternalReference(ref, 3, 1); +} + + +void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(receiver.is(a1)); + DCHECK(name.is(a2)); + DCHECK(ValueRegister().is(a0)); + + // Get the receiver from the stack and probe the stub cache. + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); + masm->isolate()->stub_cache()->GenerateProbe( + masm, flags, receiver, name, a3, a4, a5, a6); + + // Cache miss: Jump to runtime. + GenerateMiss(masm); +} + + +void StoreIC::GenerateMiss(MacroAssembler* masm) { + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); + // Perform tail call to the entry. + ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss), + masm->isolate()); + __ TailCallExternalReference(ref, 3, 1); +} + + +void StoreIC::GenerateNormal(MacroAssembler* masm) { + Label miss; + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + Register value = ValueRegister(); + Register dictionary = a3; + DCHECK(!AreAliased(value, receiver, name, dictionary, a4, a5)); + + __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + + GenerateDictionaryStore(masm, &miss, a3, name, value, a4, a5); + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->store_normal_hit(), 1, a4, a5); + __ Ret(); + + __ bind(&miss); + __ IncrementCounter(counters->store_normal_miss(), 1, a4, a5); + GenerateMiss(masm); +} + + +void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, + StrictMode strict_mode) { + __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); + + __ li(a0, Operand(Smi::FromInt(strict_mode))); + __ Push(a0); + + // Do tail-call to runtime routine. + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); +} + + +#undef __ + + +Condition CompareIC::ComputeCondition(Token::Value op) { + switch (op) { + case Token::EQ_STRICT: + case Token::EQ: + return eq; + case Token::LT: + return lt; + case Token::GT: + return gt; + case Token::LTE: + return le; + case Token::GTE: + return ge; + default: + UNREACHABLE(); + return kNoCondition; + } +} + + +bool CompareIC::HasInlinedSmiCode(Address address) { + // The address of the instruction following the call. + Address andi_instruction_address = + address + Assembler::kCallTargetAddressOffset; + + // If the instruction following the call is not a andi at, rx, #yyy, nothing + // was inlined. + Instr instr = Assembler::instr_at(andi_instruction_address); + return Assembler::IsAndImmediate(instr) && + Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()); +} + + +void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { + Address andi_instruction_address = + address + Assembler::kCallTargetAddressOffset; + + // If the instruction following the call is not a andi at, rx, #yyy, nothing + // was inlined. + Instr instr = Assembler::instr_at(andi_instruction_address); + if (!(Assembler::IsAndImmediate(instr) && + Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) { + return; + } + + // The delta to the start of the map check instruction and the + // condition code uses at the patched jump. + int delta = Assembler::GetImmediate16(instr); + delta += Assembler::GetRs(instr) * kImm16Mask; + // If the delta is 0 the instruction is andi at, zero_reg, #0 which also + // signals that nothing was inlined. + if (delta == 0) { + return; + } + + if (FLAG_trace_ic) { + PrintF("[ patching ic at %p, andi=%p, delta=%d\n", + address, andi_instruction_address, delta); + } + + Address patch_address = + andi_instruction_address - delta * Instruction::kInstrSize; + Instr instr_at_patch = Assembler::instr_at(patch_address); + Instr branch_instr = + Assembler::instr_at(patch_address + Instruction::kInstrSize); + // This is patching a conditional "jump if not smi/jump if smi" site. + // Enabling by changing from + // andi at, rx, 0 + // Branch <target>, eq, at, Operand(zero_reg) + // to: + // andi at, rx, #kSmiTagMask + // Branch <target>, ne, at, Operand(zero_reg) + // and vice-versa to be disabled again. + CodePatcher patcher(patch_address, 2); + Register reg = Register::from_code(Assembler::GetRs(instr_at_patch)); + if (check == ENABLE_INLINED_SMI_CHECK) { + DCHECK(Assembler::IsAndImmediate(instr_at_patch)); + DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch)); + patcher.masm()->andi(at, reg, kSmiTagMask); + } else { + DCHECK(check == DISABLE_INLINED_SMI_CHECK); + DCHECK(Assembler::IsAndImmediate(instr_at_patch)); + patcher.masm()->andi(at, reg, 0); + } + DCHECK(Assembler::IsBranch(branch_instr)); + if (Assembler::IsBeq(branch_instr)) { + patcher.ChangeBranchCondition(ne); + } else { + DCHECK(Assembler::IsBne(branch_instr)); + patcher.ChangeBranchCondition(eq); + } +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/lithium-codegen-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/lithium-codegen-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/lithium-codegen-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/lithium-codegen-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5950 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/code-stubs.h" +#include "src/hydrogen-osr.h" +#include "src/mips64/lithium-codegen-mips64.h" +#include "src/mips64/lithium-gap-resolver-mips64.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + + +class SafepointGenerator V8_FINAL : public CallWrapper { + public: + SafepointGenerator(LCodeGen* codegen, + LPointerMap* pointers, + Safepoint::DeoptMode mode) + : codegen_(codegen), + pointers_(pointers), + deopt_mode_(mode) { } + virtual ~SafepointGenerator() {} + + virtual void BeforeCall(int call_size) const V8_OVERRIDE {} + + virtual void AfterCall() const V8_OVERRIDE { + codegen_->RecordSafepoint(pointers_, deopt_mode_); + } + + private: + LCodeGen* codegen_; + LPointerMap* pointers_; + Safepoint::DeoptMode deopt_mode_; +}; + + +#define __ masm()-> + +bool LCodeGen::GenerateCode() { + LPhase phase("Z_Code generation", chunk()); + DCHECK(is_unused()); + status_ = GENERATING; + + // Open a frame scope to indicate that there is a frame on the stack. The + // NONE indicates that the scope shouldn't actually generate code to set up + // the frame (that is done in GeneratePrologue). + FrameScope frame_scope(masm_, StackFrame::NONE); + + return GeneratePrologue() && + GenerateBody() && + GenerateDeferredCode() && + GenerateDeoptJumpTable() && + GenerateSafepointTable(); +} + + +void LCodeGen::FinishCode(Handle<Code> code) { + DCHECK(is_done()); + code->set_stack_slots(GetStackSlotCount()); + code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); + if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); + PopulateDeoptimizationData(code); +} + + +void LCodeGen::SaveCallerDoubles() { + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); + Comment(";;; Save clobbered callee double registers"); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + +void LCodeGen::RestoreCallerDoubles() { + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); + Comment(";;; Restore clobbered callee double registers"); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + +bool LCodeGen::GeneratePrologue() { + DCHECK(is_generating()); + + if (info()->IsOptimizing()) { + ProfileEntryHookStub::MaybeCallEntryHook(masm_); + +#ifdef DEBUG + if (strlen(FLAG_stop_at) > 0 && + info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { + __ stop("stop_at"); + } +#endif + + // a1: Callee's JS function. + // cp: Callee's context. + // fp: Caller's frame pointer. + // lr: Caller's pc. + + // Sloppy mode functions and builtins need to replace the receiver with the + // global proxy when called as functions (without an explicit receiver + // object). + if (info_->this_has_uses() && + info_->strict_mode() == SLOPPY && + !info_->is_native()) { + Label ok; + int receiver_offset = info_->scope()->num_parameters() * kPointerSize; + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ ld(a2, MemOperand(sp, receiver_offset)); + __ Branch(&ok, ne, a2, Operand(at)); + + __ ld(a2, GlobalObjectOperand()); + __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); + + __ sd(a2, MemOperand(sp, receiver_offset)); + + __ bind(&ok); + } + } + + info()->set_prologue_offset(masm_->pc_offset()); + if (NeedsEagerFrame()) { + if (info()->IsStub()) { + __ StubPrologue(); + } else { + __ Prologue(info()->IsCodePreAgingActive()); + } + frame_is_built_ = true; + info_->AddNoFrameRange(0, masm_->pc_offset()); + } + + // Reserve space for the stack slots needed by the code. + int slots = GetStackSlotCount(); + if (slots > 0) { + if (FLAG_debug_code) { + __ Dsubu(sp, sp, Operand(slots * kPointerSize)); + __ Push(a0, a1); + __ Daddu(a0, sp, Operand(slots * kPointerSize)); + __ li(a1, Operand(kSlotsZapValue)); + Label loop; + __ bind(&loop); + __ Dsubu(a0, a0, Operand(kPointerSize)); + __ sd(a1, MemOperand(a0, 2 * kPointerSize)); + __ Branch(&loop, ne, a0, Operand(sp)); + __ Pop(a0, a1); + } else { + __ Dsubu(sp, sp, Operand(slots * kPointerSize)); + } + } + + if (info()->saves_caller_doubles()) { + SaveCallerDoubles(); + } + + // Possibly allocate a local context. + int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment(";;; Allocate local context"); + bool need_write_barrier = true; + // Argument to NewContext is the function, which is in a1. + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(isolate(), heap_slots); + __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; + } else { + __ push(a1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); + } + RecordSafepoint(Safepoint::kNoLazyDeopt); + // Context is returned in both v0. It replaces the context passed to us. + // It's saved in the stack and kept live in cp. + __ mov(cp, v0); + __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset)); + // Copy any necessary parameters into the context. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Variable* var = scope()->parameter(i); + if (var->IsContextSlot()) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ ld(a0, MemOperand(fp, parameter_offset)); + // Store it in the context. + MemOperand target = ContextOperand(cp, var->index()); + __ sd(a0, target); + // Update the write barrier. This clobbers a3 and a0. + if (need_write_barrier) { + __ RecordWriteContextSlot( + cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(cp, a0, &done); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } + } + } + Comment(";;; End allocate local context"); + } + + // Trace the call. + if (FLAG_trace && info()->IsOptimizing()) { + // We have not executed any compiled code yet, so cp still holds the + // incoming context. + __ CallRuntime(Runtime::kTraceEnter, 0); + } + return !is_aborted(); +} + + +void LCodeGen::GenerateOsrPrologue() { + // Generate the OSR entry prologue at the first unknown OSR value, or if there + // are none, at the OSR entrypoint instruction. + if (osr_pc_offset_ >= 0) return; + + osr_pc_offset_ = masm()->pc_offset(); + + // Adjust the frame size, subsuming the unoptimized frame into the + // optimized frame. + int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); + DCHECK(slots >= 0); + __ Dsubu(sp, sp, Operand(slots * kPointerSize)); +} + + +void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { + if (instr->IsCall()) { + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); + } + if (!instr->IsLazyBailout() && !instr->IsGap()) { + safepoints_.BumpLastLazySafepointIndex(); + } +} + + +bool LCodeGen::GenerateDeferredCode() { + DCHECK(is_generating()); + if (deferred_.length() > 0) { + for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { + LDeferredCode* code = deferred_[i]; + + HValue* value = + instructions_->at(code->instruction_index())->hydrogen_value(); + RecordAndWritePosition( + chunk()->graph()->SourcePositionToScriptPosition(value->position())); + + Comment(";;; <@%d,#%d> " + "-------------------- Deferred %s --------------------", + code->instruction_index(), + code->instr()->hydrogen_value()->id(), + code->instr()->Mnemonic()); + __ bind(code->entry()); + if (NeedsDeferredFrame()) { + Comment(";;; Build frame"); + DCHECK(!frame_is_built_); + DCHECK(info()->IsStub()); + frame_is_built_ = true; + __ MultiPush(cp.bit() | fp.bit() | ra.bit()); + __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ Daddu(fp, sp, + Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); + Comment(";;; Deferred code"); + } + code->Generate(); + if (NeedsDeferredFrame()) { + Comment(";;; Destroy frame"); + DCHECK(frame_is_built_); + __ pop(at); + __ MultiPop(cp.bit() | fp.bit() | ra.bit()); + frame_is_built_ = false; + } + __ jmp(code->exit()); + } + } + // Deferred code is the last part of the instruction sequence. Mark + // the generated code as done unless we bailed out. + if (!is_aborted()) status_ = DONE; + return !is_aborted(); +} + + +bool LCodeGen::GenerateDeoptJumpTable() { + if (deopt_jump_table_.length() > 0) { + Comment(";;; -------------------- Jump table --------------------"); + } + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); + Label table_start; + __ bind(&table_start); + Label needs_frame; + for (int i = 0; i < deopt_jump_table_.length(); i++) { + __ bind(&deopt_jump_table_[i].label); + Address entry = deopt_jump_table_[i].address; + Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + if (id == Deoptimizer::kNotDeoptimizationEntry) { + Comment(";;; jump table entry %d.", i); + } else { + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + } + __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); + if (deopt_jump_table_[i].needs_frame) { + DCHECK(!info()->saves_caller_doubles()); + if (needs_frame.is_bound()) { + __ Branch(&needs_frame); + } else { + __ bind(&needs_frame); + __ MultiPush(cp.bit() | fp.bit() | ra.bit()); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + DCHECK(info()->IsStub()); + __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ Daddu(fp, sp, + Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); + __ Call(t9); + } + } else { + if (info()->saves_caller_doubles()) { + DCHECK(info()->IsStub()); + RestoreCallerDoubles(); + } + __ Call(t9); + } + } + __ RecordComment("]"); + + // The deoptimization jump table is the last part of the instruction + // sequence. Mark the generated code as done unless we bailed out. + if (!is_aborted()) status_ = DONE; + return !is_aborted(); +} + + +bool LCodeGen::GenerateSafepointTable() { + DCHECK(is_done()); + safepoints_.Emit(masm(), GetStackSlotCount()); + return !is_aborted(); +} + + +Register LCodeGen::ToRegister(int index) const { + return Register::FromAllocationIndex(index); +} + + +DoubleRegister LCodeGen::ToDoubleRegister(int index) const { + return DoubleRegister::FromAllocationIndex(index); +} + + +Register LCodeGen::ToRegister(LOperand* op) const { + DCHECK(op->IsRegister()); + return ToRegister(op->index()); +} + + +Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { + if (op->IsRegister()) { + return ToRegister(op->index()); + } else if (op->IsConstantOperand()) { + LConstantOperand* const_op = LConstantOperand::cast(op); + HConstant* constant = chunk_->LookupConstant(const_op); + Handle<Object> literal = constant->handle(isolate()); + Representation r = chunk_->LookupLiteralRepresentation(const_op); + if (r.IsInteger32()) { + DCHECK(literal->IsNumber()); + __ li(scratch, Operand(static_cast<int32_t>(literal->Number()))); + } else if (r.IsSmi()) { + DCHECK(constant->HasSmiValue()); + __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value()))); + } else if (r.IsDouble()) { + Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); + } else { + DCHECK(r.IsSmiOrTagged()); + __ li(scratch, literal); + } + return scratch; + } else if (op->IsStackSlot()) { + __ ld(scratch, ToMemOperand(op)); + return scratch; + } + UNREACHABLE(); + return scratch; +} + + +DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { + DCHECK(op->IsDoubleRegister()); + return ToDoubleRegister(op->index()); +} + + +DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, + FloatRegister flt_scratch, + DoubleRegister dbl_scratch) { + if (op->IsDoubleRegister()) { + return ToDoubleRegister(op->index()); + } else if (op->IsConstantOperand()) { + LConstantOperand* const_op = LConstantOperand::cast(op); + HConstant* constant = chunk_->LookupConstant(const_op); + Handle<Object> literal = constant->handle(isolate()); + Representation r = chunk_->LookupLiteralRepresentation(const_op); + if (r.IsInteger32()) { + DCHECK(literal->IsNumber()); + __ li(at, Operand(static_cast<int32_t>(literal->Number()))); + __ mtc1(at, flt_scratch); + __ cvt_d_w(dbl_scratch, flt_scratch); + return dbl_scratch; + } else if (r.IsDouble()) { + Abort(kUnsupportedDoubleImmediate); + } else if (r.IsTagged()) { + Abort(kUnsupportedTaggedImmediate); + } + } else if (op->IsStackSlot()) { + MemOperand mem_op = ToMemOperand(op); + __ ldc1(dbl_scratch, mem_op); + return dbl_scratch; + } + UNREACHABLE(); + return dbl_scratch; +} + + +Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { + HConstant* constant = chunk_->LookupConstant(op); + DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); + return constant->handle(isolate()); +} + + +bool LCodeGen::IsInteger32(LConstantOperand* op) const { + return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); +} + + +bool LCodeGen::IsSmi(LConstantOperand* op) const { + return chunk_->LookupLiteralRepresentation(op).IsSmi(); +} + + +int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { + // return ToRepresentation(op, Representation::Integer32()); + HConstant* constant = chunk_->LookupConstant(op); + return constant->Integer32Value(); +} + + +int32_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op, + const Representation& r) const { + HConstant* constant = chunk_->LookupConstant(op); + int32_t value = constant->Integer32Value(); + if (r.IsInteger32()) return value; + DCHECK(r.IsSmiOrTagged()); + return reinterpret_cast<int64_t>(Smi::FromInt(value)); +} + + +Smi* LCodeGen::ToSmi(LConstantOperand* op) const { + HConstant* constant = chunk_->LookupConstant(op); + return Smi::FromInt(constant->Integer32Value()); +} + + +double LCodeGen::ToDouble(LConstantOperand* op) const { + HConstant* constant = chunk_->LookupConstant(op); + DCHECK(constant->HasDoubleValue()); + return constant->DoubleValue(); +} + + +Operand LCodeGen::ToOperand(LOperand* op) { + if (op->IsConstantOperand()) { + LConstantOperand* const_op = LConstantOperand::cast(op); + HConstant* constant = chunk()->LookupConstant(const_op); + Representation r = chunk_->LookupLiteralRepresentation(const_op); + if (r.IsSmi()) { + DCHECK(constant->HasSmiValue()); + return Operand(Smi::FromInt(constant->Integer32Value())); + } else if (r.IsInteger32()) { + DCHECK(constant->HasInteger32Value()); + return Operand(constant->Integer32Value()); + } else if (r.IsDouble()) { + Abort(kToOperandUnsupportedDoubleImmediate); + } + DCHECK(r.IsTagged()); + return Operand(constant->handle(isolate())); + } else if (op->IsRegister()) { + return Operand(ToRegister(op)); + } else if (op->IsDoubleRegister()) { + Abort(kToOperandIsDoubleRegisterUnimplemented); + return Operand((int64_t)0); + } + // Stack slots not implemented, use ToMemOperand instead. + UNREACHABLE(); + return Operand((int64_t)0); +} + + +static int ArgumentsOffsetWithoutFrame(int index) { + DCHECK(index < 0); + return -(index + 1) * kPointerSize; +} + + +MemOperand LCodeGen::ToMemOperand(LOperand* op) const { + DCHECK(!op->IsRegister()); + DCHECK(!op->IsDoubleRegister()); + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); + if (NeedsEagerFrame()) { + return MemOperand(fp, StackSlotOffset(op->index())); + } else { + // Retrieve parameter without eager stack-frame relative to the + // stack-pointer. + return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); + } +} + + +MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { + DCHECK(op->IsDoubleStackSlot()); + if (NeedsEagerFrame()) { + // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); + return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize); + } else { + // Retrieve parameter without eager stack-frame relative to the + // stack-pointer. + // return MemOperand( + // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); + return MemOperand( + sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize); + } +} + + +void LCodeGen::WriteTranslation(LEnvironment* environment, + Translation* translation) { + if (environment == NULL) return; + + // The translation includes one command per value in the environment. + int translation_size = environment->translation_size(); + // The output frame height does not include the parameters. + int height = translation_size - environment->parameter_count(); + + WriteTranslation(environment->outer(), translation); + bool has_closure_id = !info()->closure().is_null() && + !info()->closure().is_identical_to(environment->closure()); + int closure_id = has_closure_id + ? DefineDeoptimizationLiteral(environment->closure()) + : Translation::kSelfLiteralId; + + switch (environment->frame_type()) { + case JS_FUNCTION: + translation->BeginJSFrame(environment->ast_id(), closure_id, height); + break; + case JS_CONSTRUCT: + translation->BeginConstructStubFrame(closure_id, translation_size); + break; + case JS_GETTER: + DCHECK(translation_size == 1); + DCHECK(height == 0); + translation->BeginGetterStubFrame(closure_id); + break; + case JS_SETTER: + DCHECK(translation_size == 2); + DCHECK(height == 0); + translation->BeginSetterStubFrame(closure_id); + break; + case STUB: + translation->BeginCompiledStubFrame(); + break; + case ARGUMENTS_ADAPTOR: + translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); + break; + } + + int object_index = 0; + int dematerialized_index = 0; + for (int i = 0; i < translation_size; ++i) { + LOperand* value = environment->values()->at(i); + AddToTranslation(environment, + translation, + value, + environment->HasTaggedValueAt(i), + environment->HasUint32ValueAt(i), + &object_index, + &dematerialized_index); + } +} + + +void LCodeGen::AddToTranslation(LEnvironment* environment, + Translation* translation, + LOperand* op, + bool is_tagged, + bool is_uint32, + int* object_index_pointer, + int* dematerialized_index_pointer) { + if (op == LEnvironment::materialization_marker()) { + int object_index = (*object_index_pointer)++; + if (environment->ObjectIsDuplicateAt(object_index)) { + int dupe_of = environment->ObjectDuplicateOfAt(object_index); + translation->DuplicateObject(dupe_of); + return; + } + int object_length = environment->ObjectLengthAt(object_index); + if (environment->ObjectIsArgumentsAt(object_index)) { + translation->BeginArgumentsObject(object_length); + } else { + translation->BeginCapturedObject(object_length); + } + int dematerialized_index = *dematerialized_index_pointer; + int env_offset = environment->translation_size() + dematerialized_index; + *dematerialized_index_pointer += object_length; + for (int i = 0; i < object_length; ++i) { + LOperand* value = environment->values()->at(env_offset + i); + AddToTranslation(environment, + translation, + value, + environment->HasTaggedValueAt(env_offset + i), + environment->HasUint32ValueAt(env_offset + i), + object_index_pointer, + dematerialized_index_pointer); + } + return; + } + + if (op->IsStackSlot()) { + if (is_tagged) { + translation->StoreStackSlot(op->index()); + } else if (is_uint32) { + translation->StoreUint32StackSlot(op->index()); + } else { + translation->StoreInt32StackSlot(op->index()); + } + } else if (op->IsDoubleStackSlot()) { + translation->StoreDoubleStackSlot(op->index()); + } else if (op->IsRegister()) { + Register reg = ToRegister(op); + if (is_tagged) { + translation->StoreRegister(reg); + } else if (is_uint32) { + translation->StoreUint32Register(reg); + } else { + translation->StoreInt32Register(reg); + } + } else if (op->IsDoubleRegister()) { + DoubleRegister reg = ToDoubleRegister(op); + translation->StoreDoubleRegister(reg); + } else if (op->IsConstantOperand()) { + HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); + int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); + translation->StoreLiteral(src_index); + } else { + UNREACHABLE(); + } +} + + +void LCodeGen::CallCode(Handle<Code> code, + RelocInfo::Mode mode, + LInstruction* instr) { + CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); +} + + +void LCodeGen::CallCodeGeneric(Handle<Code> code, + RelocInfo::Mode mode, + LInstruction* instr, + SafepointMode safepoint_mode) { + DCHECK(instr != NULL); + __ Call(code, mode); + RecordSafepointWithLazyDeopt(instr, safepoint_mode); +} + + +void LCodeGen::CallRuntime(const Runtime::Function* function, + int num_arguments, + LInstruction* instr, + SaveFPRegsMode save_doubles) { + DCHECK(instr != NULL); + + __ CallRuntime(function, num_arguments, save_doubles); + + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); +} + + +void LCodeGen::LoadContextFromDeferred(LOperand* context) { + if (context->IsRegister()) { + __ Move(cp, ToRegister(context)); + } else if (context->IsStackSlot()) { + __ ld(cp, ToMemOperand(context)); + } else if (context->IsConstantOperand()) { + HConstant* constant = + chunk_->LookupConstant(LConstantOperand::cast(context)); + __ li(cp, Handle<Object>::cast(constant->handle(isolate()))); + } else { + UNREACHABLE(); + } +} + + +void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, + int argc, + LInstruction* instr, + LOperand* context) { + LoadContextFromDeferred(context); + __ CallRuntimeSaveDoubles(id); + RecordSafepointWithRegisters( + instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); +} + + +void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, + Safepoint::DeoptMode mode) { + environment->set_has_been_used(); + if (!environment->HasBeenRegistered()) { + // Physical stack frame layout: + // -x ............. -4 0 ..................................... y + // [incoming arguments] [spill slots] [pushed outgoing arguments] + + // Layout of the environment: + // 0 ..................................................... size-1 + // [parameters] [locals] [expression stack including arguments] + + // Layout of the translation: + // 0 ........................................................ size - 1 + 4 + // [expression stack including arguments] [locals] [4 words] [parameters] + // |>------------ translation_size ------------<| + + int frame_count = 0; + int jsframe_count = 0; + for (LEnvironment* e = environment; e != NULL; e = e->outer()) { + ++frame_count; + if (e->frame_type() == JS_FUNCTION) { + ++jsframe_count; + } + } + Translation translation(&translations_, frame_count, jsframe_count, zone()); + WriteTranslation(environment, &translation); + int deoptimization_index = deoptimizations_.length(); + int pc_offset = masm()->pc_offset(); + environment->Register(deoptimization_index, + translation.index(), + (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); + deoptimizations_.Add(environment, zone()); + } +} + + +void LCodeGen::DeoptimizeIf(Condition condition, + LEnvironment* environment, + Deoptimizer::BailoutType bailout_type, + Register src1, + const Operand& src2) { + RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); + DCHECK(environment->HasBeenRegistered()); + int id = environment->deoptimization_index(); + DCHECK(info()->IsOptimizing() || info()->IsStub()); + Address entry = + Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); + if (entry == NULL) { + Abort(kBailoutWasNotPrepared); + return; + } + + if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { + Register scratch = scratch0(); + ExternalReference count = ExternalReference::stress_deopt_count(isolate()); + Label no_deopt; + __ Push(a1, scratch); + __ li(scratch, Operand(count)); + __ lw(a1, MemOperand(scratch)); + __ Subu(a1, a1, Operand(1)); + __ Branch(&no_deopt, ne, a1, Operand(zero_reg)); + __ li(a1, Operand(FLAG_deopt_every_n_times)); + __ sw(a1, MemOperand(scratch)); + __ Pop(a1, scratch); + + __ Call(entry, RelocInfo::RUNTIME_ENTRY); + __ bind(&no_deopt); + __ sw(a1, MemOperand(scratch)); + __ Pop(a1, scratch); + } + + if (info()->ShouldTrapOnDeopt()) { + Label skip; + if (condition != al) { + __ Branch(&skip, NegateCondition(condition), src1, src2); + } + __ stop("trap_on_deopt"); + __ bind(&skip); + } + + DCHECK(info()->IsStub() || frame_is_built_); + // Go through jump table if we need to handle condition, build frame, or + // restore caller doubles. + if (condition == al && frame_is_built_ && + !info()->saves_caller_doubles()) { + __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); + } else { + // We often have several deopts to the same entry, reuse the last + // jump entry if this is the case. + if (deopt_jump_table_.is_empty() || + (deopt_jump_table_.last().address != entry) || + (deopt_jump_table_.last().bailout_type != bailout_type) || + (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { + Deoptimizer::JumpTableEntry table_entry(entry, + bailout_type, + !frame_is_built_); + deopt_jump_table_.Add(table_entry, zone()); + } + __ Branch(&deopt_jump_table_.last().label, condition, src1, src2); + } +} + + +void LCodeGen::DeoptimizeIf(Condition condition, + LEnvironment* environment, + Register src1, + const Operand& src2) { + Deoptimizer::BailoutType bailout_type = info()->IsStub() + ? Deoptimizer::LAZY + : Deoptimizer::EAGER; + DeoptimizeIf(condition, environment, bailout_type, src1, src2); +} + + +void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { + int length = deoptimizations_.length(); + if (length == 0) return; + Handle<DeoptimizationInputData> data = + DeoptimizationInputData::New(isolate(), length, 0, TENURED); + + Handle<ByteArray> translations = + translations_.CreateByteArray(isolate()->factory()); + data->SetTranslationByteArray(*translations); + data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); + data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); + if (info_->IsOptimizing()) { + // Reference to shared function info does not change between phases. + AllowDeferredHandleDereference allow_handle_dereference; + data->SetSharedFunctionInfo(*info_->shared_info()); + } else { + data->SetSharedFunctionInfo(Smi::FromInt(0)); + } + + Handle<FixedArray> literals = + factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); + { AllowDeferredHandleDereference copy_handles; + for (int i = 0; i < deoptimization_literals_.length(); i++) { + literals->set(i, *deoptimization_literals_[i]); + } + data->SetLiteralArray(*literals); + } + + data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); + data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); + + // Populate the deoptimization entries. + for (int i = 0; i < length; i++) { + LEnvironment* env = deoptimizations_[i]; + data->SetAstId(i, env->ast_id()); + data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); + data->SetArgumentsStackHeight(i, + Smi::FromInt(env->arguments_stack_height())); + data->SetPc(i, Smi::FromInt(env->pc_offset())); + } + code->set_deoptimization_data(*data); +} + + +int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { + int result = deoptimization_literals_.length(); + for (int i = 0; i < deoptimization_literals_.length(); ++i) { + if (deoptimization_literals_[i].is_identical_to(literal)) return i; + } + deoptimization_literals_.Add(literal, zone()); + return result; +} + + +void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { + DCHECK(deoptimization_literals_.length() == 0); + + const ZoneList<Handle<JSFunction> >* inlined_closures = + chunk()->inlined_closures(); + + for (int i = 0, length = inlined_closures->length(); + i < length; + i++) { + DefineDeoptimizationLiteral(inlined_closures->at(i)); + } + + inlined_function_count_ = deoptimization_literals_.length(); +} + + +void LCodeGen::RecordSafepointWithLazyDeopt( + LInstruction* instr, SafepointMode safepoint_mode) { + if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { + RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); + } else { + DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kLazyDeopt); + } +} + + +void LCodeGen::RecordSafepoint( + LPointerMap* pointers, + Safepoint::Kind kind, + int arguments, + Safepoint::DeoptMode deopt_mode) { + DCHECK(expected_safepoint_kind_ == kind); + + const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); + Safepoint safepoint = safepoints_.DefineSafepoint(masm(), + kind, arguments, deopt_mode); + for (int i = 0; i < operands->length(); i++) { + LOperand* pointer = operands->at(i); + if (pointer->IsStackSlot()) { + safepoint.DefinePointerSlot(pointer->index(), zone()); + } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { + safepoint.DefinePointerRegister(ToRegister(pointer), zone()); + } + } +} + + +void LCodeGen::RecordSafepoint(LPointerMap* pointers, + Safepoint::DeoptMode deopt_mode) { + RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); +} + + +void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { + LPointerMap empty_pointers(zone()); + RecordSafepoint(&empty_pointers, deopt_mode); +} + + +void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, + int arguments, + Safepoint::DeoptMode deopt_mode) { + RecordSafepoint( + pointers, Safepoint::kWithRegisters, arguments, deopt_mode); +} + + +void LCodeGen::RecordAndWritePosition(int position) { + if (position == RelocInfo::kNoPosition) return; + masm()->positions_recorder()->RecordPosition(position); + masm()->positions_recorder()->WriteRecordedPositions(); +} + + +static const char* LabelType(LLabel* label) { + if (label->is_loop_header()) return " (loop header)"; + if (label->is_osr_entry()) return " (OSR entry)"; + return ""; +} + + +void LCodeGen::DoLabel(LLabel* label) { + Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", + current_instruction_, + label->hydrogen_value()->id(), + label->block_id(), + LabelType(label)); + __ bind(label->label()); + current_block_ = label->block_id(); + DoGap(label); +} + + +void LCodeGen::DoParallelMove(LParallelMove* move) { + resolver_.Resolve(move); +} + + +void LCodeGen::DoGap(LGap* gap) { + for (int i = LGap::FIRST_INNER_POSITION; + i <= LGap::LAST_INNER_POSITION; + i++) { + LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); + LParallelMove* move = gap->GetParallelMove(inner_pos); + if (move != NULL) DoParallelMove(move); + } +} + + +void LCodeGen::DoInstructionGap(LInstructionGap* instr) { + DoGap(instr); +} + + +void LCodeGen::DoParameter(LParameter* instr) { + // Nothing to do. +} + + +void LCodeGen::DoCallStub(LCallStub* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->result()).is(v0)); + switch (instr->hydrogen()->major_key()) { + case CodeStub::RegExpExec: { + RegExpExecStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + break; + } + case CodeStub::SubString: { + SubStringStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + break; + } + case CodeStub::StringCompare: { + StringCompareStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + break; + } + default: + UNREACHABLE(); + } +} + + +void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { + GenerateOsrPrologue(); +} + + +void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(dividend.is(ToRegister(instr->result()))); + + // Theoretically, a variation of the branch-free code for integer division by + // a power of 2 (calculating the remainder via an additional multiplication + // (which gets simplified to an 'and') and subtraction) should be faster, and + // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to + // indicate that positive dividends are heavily favored, so the branching + // version performs better. + HMod* hmod = instr->hydrogen(); + int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); + Label dividend_is_not_negative, done; + + if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { + __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); + // Note: The code below even works when right contains kMinInt. + __ dsubu(dividend, zero_reg, dividend); + __ And(dividend, dividend, Operand(mask)); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); + } + __ Branch(USE_DELAY_SLOT, &done); + __ dsubu(dividend, zero_reg, dividend); + } + + __ bind(÷nd_is_not_negative); + __ And(dividend, dividend, Operand(mask)); + __ bind(&done); +} + + +void LCodeGen::DoModByConstI(LModByConstI* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + Register result = ToRegister(instr->result()); + DCHECK(!dividend.is(result)); + + if (divisor == 0) { + DeoptimizeIf(al, instr->environment()); + return; + } + + __ TruncatingDiv(result, dividend, Abs(divisor)); + __ Dmul(result, result, Operand(Abs(divisor))); + __ Dsubu(result, dividend, Operand(result)); + + // Check for negative zero. + HMod* hmod = instr->hydrogen(); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label remainder_not_zero; + __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); + DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg)); + __ bind(&remainder_not_zero); + } +} + + +void LCodeGen::DoModI(LModI* instr) { + HMod* hmod = instr->hydrogen(); + const Register left_reg = ToRegister(instr->left()); + const Register right_reg = ToRegister(instr->right()); + const Register result_reg = ToRegister(instr->result()); + + // div runs in the background while we check for special cases. + __ Dmod(result_reg, left_reg, right_reg); + + Label done; + // Check for x % 0, we have to deopt in this case because we can't return a + // NaN. + if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { + DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg)); + } + + // Check for kMinInt % -1, div will return kMinInt, which is not what we + // want. We have to deopt if we care about -0, because we can't return that. + if (hmod->CheckFlag(HValue::kCanOverflow)) { + Label no_overflow_possible; + __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1)); + } else { + __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); + __ Branch(USE_DELAY_SLOT, &done); + __ mov(result_reg, zero_reg); + } + __ bind(&no_overflow_possible); + } + + // If we care about -0, test if the dividend is <0 and the result is 0. + __ Branch(&done, ge, left_reg, Operand(zero_reg)); + + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg)); + } + __ bind(&done); +} + + +void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + Register result = ToRegister(instr->result()); + DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor))); + DCHECK(!result.is(dividend)); + + // Check for (0 / -x) that will produce negative zero. + HDiv* hdiv = instr->hydrogen(); + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { + DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); + } + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { + DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt)); + } + // Deoptimize if remainder will not be 0. + if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && + divisor != 1 && divisor != -1) { + int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); + __ And(at, dividend, Operand(mask)); + DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); + } + + if (divisor == -1) { // Nice shortcut, not needed for correctness. + __ Dsubu(result, zero_reg, dividend); + return; + } + uint16_t shift = WhichPowerOf2Abs(divisor); + if (shift == 0) { + __ Move(result, dividend); + } else if (shift == 1) { + __ dsrl32(result, dividend, 31); + __ Daddu(result, dividend, Operand(result)); + } else { + __ dsra32(result, dividend, 31); + __ dsrl32(result, result, 32 - shift); + __ Daddu(result, dividend, Operand(result)); + } + if (shift > 0) __ dsra(result, result, shift); + if (divisor < 0) __ Dsubu(result, zero_reg, result); +} + + +void LCodeGen::DoDivByConstI(LDivByConstI* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + Register result = ToRegister(instr->result()); + DCHECK(!dividend.is(result)); + + if (divisor == 0) { + DeoptimizeIf(al, instr->environment()); + return; + } + + // Check for (0 / -x) that will produce negative zero. + HDiv* hdiv = instr->hydrogen(); + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { + DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); + } + + __ TruncatingDiv(result, dividend, Abs(divisor)); + if (divisor < 0) __ Subu(result, zero_reg, result); + + if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { + __ Dmul(scratch0(), result, Operand(divisor)); + __ Dsubu(scratch0(), scratch0(), dividend); + DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg)); + } +} + + +// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. +void LCodeGen::DoDivI(LDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); + const Register result = ToRegister(instr->result()); + + // On MIPS div is asynchronous - it will run in the background while we + // check for special cases. + __ Ddiv(result, dividend, divisor); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg)); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label left_not_zero; + __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); + DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg)); + __ bind(&left_not_zero); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow) && + !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + Label left_not_min_int; + __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); + DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1)); + __ bind(&left_not_min_int); + } + + if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + // Calculate remainder. + Register remainder = ToRegister(instr->temp()); + if (kArchVariant != kMips64r6) { + __ mfhi(remainder); + } else { + __ dmod(remainder, dividend, divisor); + } + DeoptimizeIf(ne, instr->environment(), remainder, Operand(zero_reg)); + } +} + + +void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { + DoubleRegister addend = ToDoubleRegister(instr->addend()); + DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); + DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); + + // This is computed in-place. + DCHECK(addend.is(ToDoubleRegister(instr->result()))); + + __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0()); +} + + +void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { + Register dividend = ToRegister(instr->dividend()); + Register result = ToRegister(instr->result()); + int32_t divisor = instr->divisor(); + Register scratch = result.is(dividend) ? scratch0() : dividend; + DCHECK(!result.is(dividend) || !scratch.is(dividend)); + + // If the divisor is 1, return the dividend. + if (divisor == 1) { + __ Move(result, dividend); + return; + } + + // If the divisor is positive, things are easy: There can be no deopts and we + // can simply do an arithmetic right shift. + uint16_t shift = WhichPowerOf2Abs(divisor); + if (divisor > 1) { + __ dsra(result, dividend, shift); + return; + } + + // If the divisor is negative, we have to negate and handle edge cases. + // Dividend can be the same register as result so save the value of it + // for checking overflow. + __ Move(scratch, dividend); + + __ Dsubu(result, zero_reg, dividend); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg)); + } + + __ Xor(scratch, scratch, result); + // Dividing by -1 is basically negation, unless we overflow. + if (divisor == -1) { + if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt)); + } + return; + } + + // If the negation could not overflow, simply shifting is OK. + if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + __ dsra(result, result, shift); + return; + } + + Label no_overflow, done; + __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); + __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE); + __ Branch(&done); + __ bind(&no_overflow); + __ dsra(result, result, shift); + __ bind(&done); +} + + +void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + Register result = ToRegister(instr->result()); + DCHECK(!dividend.is(result)); + + if (divisor == 0) { + DeoptimizeIf(al, instr->environment()); + return; + } + + // Check for (0 / -x) that will produce negative zero. + HMathFloorOfDiv* hdiv = instr->hydrogen(); + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { + DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); + } + + // Easy case: We need no dynamic check for the dividend and the flooring + // division is the same as the truncating division. + if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || + (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { + __ TruncatingDiv(result, dividend, Abs(divisor)); + if (divisor < 0) __ Dsubu(result, zero_reg, result); + return; + } + + // In the general case we may need to adjust before and after the truncating + // division to get a flooring division. + Register temp = ToRegister(instr->temp()); + DCHECK(!temp.is(dividend) && !temp.is(result)); + Label needs_adjustment, done; + __ Branch(&needs_adjustment, divisor > 0 ? lt : gt, + dividend, Operand(zero_reg)); + __ TruncatingDiv(result, dividend, Abs(divisor)); + if (divisor < 0) __ Dsubu(result, zero_reg, result); + __ jmp(&done); + __ bind(&needs_adjustment); + __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1)); + __ TruncatingDiv(result, temp, Abs(divisor)); + if (divisor < 0) __ Dsubu(result, zero_reg, result); + __ Dsubu(result, result, Operand(1)); + __ bind(&done); +} + + +// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. +void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); + const Register result = ToRegister(instr->result()); + + // On MIPS div is asynchronous - it will run in the background while we + // check for special cases. + __ Ddiv(result, dividend, divisor); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg)); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label left_not_zero; + __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); + DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg)); + __ bind(&left_not_zero); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow) && + !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + Label left_not_min_int; + __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); + DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1)); + __ bind(&left_not_min_int); + } + + // We performed a truncating division. Correct the result if necessary. + Label done; + Register remainder = scratch0(); + if (kArchVariant != kMips64r6) { + __ mfhi(remainder); + } else { + __ dmod(remainder, dividend, divisor); + } + __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); + __ Xor(remainder, remainder, Operand(divisor)); + __ Branch(&done, ge, remainder, Operand(zero_reg)); + __ Dsubu(result, result, Operand(1)); + __ bind(&done); +} + + +void LCodeGen::DoMulI(LMulI* instr) { + Register scratch = scratch0(); + Register result = ToRegister(instr->result()); + // Note that result may alias left. + Register left = ToRegister(instr->left()); + LOperand* right_op = instr->right(); + + bool bailout_on_minus_zero = + instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); + bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); + + if (right_op->IsConstantOperand()) { + int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); + + if (bailout_on_minus_zero && (constant < 0)) { + // The case of a null constant will be handled separately. + // If constant is negative and left is null, the result should be -0. + DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg)); + } + + switch (constant) { + case -1: + if (overflow) { + __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); + DeoptimizeIf(gt, instr->environment(), scratch, Operand(kMaxInt)); + } else { + __ Dsubu(result, zero_reg, left); + } + break; + case 0: + if (bailout_on_minus_zero) { + // If left is strictly negative and the constant is null, the + // result is -0. Deoptimize if required, otherwise return 0. + DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg)); + } + __ mov(result, zero_reg); + break; + case 1: + // Nothing to do. + __ Move(result, left); + break; + default: + // Multiplying by powers of two and powers of two plus or minus + // one can be done faster with shifted operands. + // For other constants we emit standard code. + int32_t mask = constant >> 31; + uint32_t constant_abs = (constant + mask) ^ mask; + + if (IsPowerOf2(constant_abs)) { + int32_t shift = WhichPowerOf2(constant_abs); + __ dsll(result, left, shift); + // Correct the sign of the result if the constant is negative. + if (constant < 0) __ Dsubu(result, zero_reg, result); + } else if (IsPowerOf2(constant_abs - 1)) { + int32_t shift = WhichPowerOf2(constant_abs - 1); + __ dsll(scratch, left, shift); + __ Daddu(result, scratch, left); + // Correct the sign of the result if the constant is negative. + if (constant < 0) __ Dsubu(result, zero_reg, result); + } else if (IsPowerOf2(constant_abs + 1)) { + int32_t shift = WhichPowerOf2(constant_abs + 1); + __ dsll(scratch, left, shift); + __ Dsubu(result, scratch, left); + // Correct the sign of the result if the constant is negative. + if (constant < 0) __ Dsubu(result, zero_reg, result); + } else { + // Generate standard code. + __ li(at, constant); + __ Dmul(result, left, at); + } + } + + } else { + DCHECK(right_op->IsRegister()); + Register right = ToRegister(right_op); + + if (overflow) { + // hi:lo = left * right. + if (instr->hydrogen()->representation().IsSmi()) { + __ Dmulh(result, left, right); + } else { + __ Dmul(result, left, right); + } + __ dsra32(scratch, result, 0); + __ sra(at, result, 31); + if (instr->hydrogen()->representation().IsSmi()) { + __ SmiTag(result); + } + DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); + } else { + if (instr->hydrogen()->representation().IsSmi()) { + __ SmiUntag(result, left); + __ Dmul(result, result, right); + } else { + __ Dmul(result, left, right); + } + } + + if (bailout_on_minus_zero) { + Label done; + __ Xor(at, left, right); + __ Branch(&done, ge, at, Operand(zero_reg)); + // Bail out if the result is minus zero. + DeoptimizeIf(eq, + instr->environment(), + result, + Operand(zero_reg)); + __ bind(&done); + } + } +} + + +void LCodeGen::DoBitI(LBitI* instr) { + LOperand* left_op = instr->left(); + LOperand* right_op = instr->right(); + DCHECK(left_op->IsRegister()); + Register left = ToRegister(left_op); + Register result = ToRegister(instr->result()); + Operand right(no_reg); + + if (right_op->IsStackSlot()) { + right = Operand(EmitLoadRegister(right_op, at)); + } else { + DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); + right = ToOperand(right_op); + } + + switch (instr->op()) { + case Token::BIT_AND: + __ And(result, left, right); + break; + case Token::BIT_OR: + __ Or(result, left, right); + break; + case Token::BIT_XOR: + if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { + __ Nor(result, zero_reg, left); + } else { + __ Xor(result, left, right); + } + break; + default: + UNREACHABLE(); + break; + } +} + + +void LCodeGen::DoShiftI(LShiftI* instr) { + // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so + // result may alias either of them. + LOperand* right_op = instr->right(); + Register left = ToRegister(instr->left()); + Register result = ToRegister(instr->result()); + + if (right_op->IsRegister()) { + // No need to mask the right operand on MIPS, it is built into the variable + // shift instructions. + switch (instr->op()) { + case Token::ROR: + __ Ror(result, left, Operand(ToRegister(right_op))); + break; + case Token::SAR: + __ srav(result, left, ToRegister(right_op)); + break; + case Token::SHR: + __ srlv(result, left, ToRegister(right_op)); + if (instr->can_deopt()) { + // TODO(yy): (-1) >>> 0. anything else? + DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); + DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt)); + } + break; + case Token::SHL: + __ sllv(result, left, ToRegister(right_op)); + break; + default: + UNREACHABLE(); + break; + } + } else { + // Mask the right_op operand. + int value = ToInteger32(LConstantOperand::cast(right_op)); + uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); + switch (instr->op()) { + case Token::ROR: + if (shift_count != 0) { + __ Ror(result, left, Operand(shift_count)); + } else { + __ Move(result, left); + } + break; + case Token::SAR: + if (shift_count != 0) { + __ sra(result, left, shift_count); + } else { + __ Move(result, left); + } + break; + case Token::SHR: + if (shift_count != 0) { + __ srl(result, left, shift_count); + } else { + if (instr->can_deopt()) { + __ And(at, left, Operand(0x80000000)); + DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); + } + __ Move(result, left); + } + break; + case Token::SHL: + if (shift_count != 0) { + if (instr->hydrogen_value()->representation().IsSmi()) { + __ dsll(result, left, shift_count); + } else { + __ sll(result, left, shift_count); + } + } else { + __ Move(result, left); + } + break; + default: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoSubI(LSubI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + LOperand* result = instr->result(); + bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); + + if (!can_overflow) { + if (right->IsStackSlot()) { + Register right_reg = EmitLoadRegister(right, at); + __ Dsubu(ToRegister(result), ToRegister(left), Operand(right_reg)); + } else { + DCHECK(right->IsRegister() || right->IsConstantOperand()); + __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right)); + } + } else { // can_overflow. + Register overflow = scratch0(); + Register scratch = scratch1(); + if (right->IsStackSlot() || right->IsConstantOperand()) { + Register right_reg = EmitLoadRegister(right, scratch); + __ SubuAndCheckForOverflow(ToRegister(result), + ToRegister(left), + right_reg, + overflow); // Reg at also used as scratch. + } else { + DCHECK(right->IsRegister()); + // Due to overflow check macros not supporting constant operands, + // handling the IsConstantOperand case was moved to prev if clause. + __ SubuAndCheckForOverflow(ToRegister(result), + ToRegister(left), + ToRegister(right), + overflow); // Reg at also used as scratch. + } + DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); + if (!instr->hydrogen()->representation().IsSmi()) { + DeoptimizeIf(gt, instr->environment(), + ToRegister(result), Operand(kMaxInt)); + DeoptimizeIf(lt, instr->environment(), + ToRegister(result), Operand(kMinInt)); + } + } +} + + +void LCodeGen::DoConstantI(LConstantI* instr) { + __ li(ToRegister(instr->result()), Operand(instr->value())); +} + + +void LCodeGen::DoConstantS(LConstantS* instr) { + __ li(ToRegister(instr->result()), Operand(instr->value())); +} + + +void LCodeGen::DoConstantD(LConstantD* instr) { + DCHECK(instr->result()->IsDoubleRegister()); + DoubleRegister result = ToDoubleRegister(instr->result()); + double v = instr->value(); + __ Move(result, v); +} + + +void LCodeGen::DoConstantE(LConstantE* instr) { + __ li(ToRegister(instr->result()), Operand(instr->value())); +} + + +void LCodeGen::DoConstantT(LConstantT* instr) { + Handle<Object> object = instr->value(isolate()); + AllowDeferredHandleDereference smi_check; + __ li(ToRegister(instr->result()), object); +} + + +void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { + Register result = ToRegister(instr->result()); + Register map = ToRegister(instr->value()); + __ EnumLength(result, map); +} + + +void LCodeGen::DoDateField(LDateField* instr) { + Register object = ToRegister(instr->date()); + Register result = ToRegister(instr->result()); + Register scratch = ToRegister(instr->temp()); + Smi* index = instr->index(); + Label runtime, done; + DCHECK(object.is(a0)); + DCHECK(result.is(v0)); + DCHECK(!scratch.is(scratch0())); + DCHECK(!scratch.is(object)); + + __ SmiTst(object, at); + DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); + __ GetObjectType(object, scratch, scratch); + DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE)); + + if (index->value() == 0) { + __ ld(result, FieldMemOperand(object, JSDate::kValueOffset)); + } else { + if (index->value() < JSDate::kFirstUncachedField) { + ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); + __ li(scratch, Operand(stamp)); + __ ld(scratch, MemOperand(scratch)); + __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); + __ Branch(&runtime, ne, scratch, Operand(scratch0())); + __ ld(result, FieldMemOperand(object, JSDate::kValueOffset + + kPointerSize * index->value())); + __ jmp(&done); + } + __ bind(&runtime); + __ PrepareCallCFunction(2, scratch); + __ li(a1, Operand(index)); + __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); + __ bind(&done); + } +} + + +MemOperand LCodeGen::BuildSeqStringOperand(Register string, + LOperand* index, + String::Encoding encoding) { + if (index->IsConstantOperand()) { + int offset = ToInteger32(LConstantOperand::cast(index)); + if (encoding == String::TWO_BYTE_ENCODING) { + offset *= kUC16Size; + } + STATIC_ASSERT(kCharSize == 1); + return FieldMemOperand(string, SeqString::kHeaderSize + offset); + } + Register scratch = scratch0(); + DCHECK(!scratch.is(string)); + DCHECK(!scratch.is(ToRegister(index))); + if (encoding == String::ONE_BYTE_ENCODING) { + __ Daddu(scratch, string, ToRegister(index)); + } else { + STATIC_ASSERT(kUC16Size == 2); + __ dsll(scratch, ToRegister(index), 1); + __ Daddu(scratch, string, scratch); + } + return FieldMemOperand(scratch, SeqString::kHeaderSize); +} + + +void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { + String::Encoding encoding = instr->hydrogen()->encoding(); + Register string = ToRegister(instr->string()); + Register result = ToRegister(instr->result()); + + if (FLAG_debug_code) { + Register scratch = scratch0(); + __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); + __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + + __ And(scratch, scratch, + Operand(kStringRepresentationMask | kStringEncodingMask)); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type)); + __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg)); + } + + MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); + if (encoding == String::ONE_BYTE_ENCODING) { + __ lbu(result, operand); + } else { + __ lhu(result, operand); + } +} + + +void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { + String::Encoding encoding = instr->hydrogen()->encoding(); + Register string = ToRegister(instr->string()); + Register value = ToRegister(instr->value()); + + if (FLAG_debug_code) { + Register scratch = scratch0(); + Register index = ToRegister(instr->index()); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + int encoding_mask = + instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type; + __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask); + } + + MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); + if (encoding == String::ONE_BYTE_ENCODING) { + __ sb(value, operand); + } else { + __ sh(value, operand); + } +} + + +void LCodeGen::DoAddI(LAddI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + LOperand* result = instr->result(); + bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); + + if (!can_overflow) { + if (right->IsStackSlot()) { + Register right_reg = EmitLoadRegister(right, at); + __ Daddu(ToRegister(result), ToRegister(left), Operand(right_reg)); + } else { + DCHECK(right->IsRegister() || right->IsConstantOperand()); + __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right)); + } + } else { // can_overflow. + Register overflow = scratch0(); + Register scratch = scratch1(); + if (right->IsStackSlot() || + right->IsConstantOperand()) { + Register right_reg = EmitLoadRegister(right, scratch); + __ AdduAndCheckForOverflow(ToRegister(result), + ToRegister(left), + right_reg, + overflow); // Reg at also used as scratch. + } else { + DCHECK(right->IsRegister()); + // Due to overflow check macros not supporting constant operands, + // handling the IsConstantOperand case was moved to prev if clause. + __ AdduAndCheckForOverflow(ToRegister(result), + ToRegister(left), + ToRegister(right), + overflow); // Reg at also used as scratch. + } + DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); + // if not smi, it must int32. + if (!instr->hydrogen()->representation().IsSmi()) { + DeoptimizeIf(gt, instr->environment(), + ToRegister(result), Operand(kMaxInt)); + DeoptimizeIf(lt, instr->environment(), + ToRegister(result), Operand(kMinInt)); + } + } +} + + +void LCodeGen::DoMathMinMax(LMathMinMax* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + HMathMinMax::Operation operation = instr->hydrogen()->operation(); + Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; + if (instr->hydrogen()->representation().IsSmiOrInteger32()) { + Register left_reg = ToRegister(left); + Register right_reg = EmitLoadRegister(right, scratch0()); + Register result_reg = ToRegister(instr->result()); + Label return_right, done; + Register scratch = scratch1(); + __ Slt(scratch, left_reg, Operand(right_reg)); + if (condition == ge) { + __ Movz(result_reg, left_reg, scratch); + __ Movn(result_reg, right_reg, scratch); + } else { + DCHECK(condition == le); + __ Movn(result_reg, left_reg, scratch); + __ Movz(result_reg, right_reg, scratch); + } + } else { + DCHECK(instr->hydrogen()->representation().IsDouble()); + FPURegister left_reg = ToDoubleRegister(left); + FPURegister right_reg = ToDoubleRegister(right); + FPURegister result_reg = ToDoubleRegister(instr->result()); + Label check_nan_left, check_zero, return_left, return_right, done; + __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg); + __ BranchF(&return_left, NULL, condition, left_reg, right_reg); + __ Branch(&return_right); + + __ bind(&check_zero); + // left == right != 0. + __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero); + // At this point, both left and right are either 0 or -0. + if (operation == HMathMinMax::kMathMin) { + __ neg_d(left_reg, left_reg); + __ sub_d(result_reg, left_reg, right_reg); + __ neg_d(result_reg, result_reg); + } else { + __ add_d(result_reg, left_reg, right_reg); + } + __ Branch(&done); + + __ bind(&check_nan_left); + // left == NaN. + __ BranchF(NULL, &return_left, eq, left_reg, left_reg); + __ bind(&return_right); + if (!right_reg.is(result_reg)) { + __ mov_d(result_reg, right_reg); + } + __ Branch(&done); + + __ bind(&return_left); + if (!left_reg.is(result_reg)) { + __ mov_d(result_reg, left_reg); + } + __ bind(&done); + } +} + + +void LCodeGen::DoArithmeticD(LArithmeticD* instr) { + DoubleRegister left = ToDoubleRegister(instr->left()); + DoubleRegister right = ToDoubleRegister(instr->right()); + DoubleRegister result = ToDoubleRegister(instr->result()); + switch (instr->op()) { + case Token::ADD: + __ add_d(result, left, right); + break; + case Token::SUB: + __ sub_d(result, left, right); + break; + case Token::MUL: + __ mul_d(result, left, right); + break; + case Token::DIV: + __ div_d(result, left, right); + break; + case Token::MOD: { + // Save a0-a3 on the stack. + RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit(); + __ MultiPush(saved_regs); + + __ PrepareCallCFunction(0, 2, scratch0()); + __ MovToFloatParameters(left, right); + __ CallCFunction( + ExternalReference::mod_two_doubles_operation(isolate()), + 0, 2); + // Move the result in the double result register. + __ MovFromFloatResult(result); + + // Restore saved register. + __ MultiPop(saved_regs); + break; + } + default: + UNREACHABLE(); + break; + } +} + + +void LCodeGen::DoArithmeticT(LArithmeticT* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->left()).is(a1)); + DCHECK(ToRegister(instr->right()).is(a0)); + DCHECK(ToRegister(instr->result()).is(v0)); + + BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + // Other arch use a nop here, to signal that there is no inlined + // patchable code. Mips does not need the nop, since our marker + // instruction (andi zero_reg) will never be used in normal code. +} + + +template<class InstrType> +void LCodeGen::EmitBranch(InstrType instr, + Condition condition, + Register src1, + const Operand& src2) { + int left_block = instr->TrueDestination(chunk_); + int right_block = instr->FalseDestination(chunk_); + + int next_block = GetNextEmittedBlock(); + if (right_block == left_block || condition == al) { + EmitGoto(left_block); + } else if (left_block == next_block) { + __ Branch(chunk_->GetAssemblyLabel(right_block), + NegateCondition(condition), src1, src2); + } else if (right_block == next_block) { + __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2); + } else { + __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2); + __ Branch(chunk_->GetAssemblyLabel(right_block)); + } +} + + +template<class InstrType> +void LCodeGen::EmitBranchF(InstrType instr, + Condition condition, + FPURegister src1, + FPURegister src2) { + int right_block = instr->FalseDestination(chunk_); + int left_block = instr->TrueDestination(chunk_); + + int next_block = GetNextEmittedBlock(); + if (right_block == left_block) { + EmitGoto(left_block); + } else if (left_block == next_block) { + __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL, + NegateCondition(condition), src1, src2); + } else if (right_block == next_block) { + __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, + condition, src1, src2); + } else { + __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, + condition, src1, src2); + __ Branch(chunk_->GetAssemblyLabel(right_block)); + } +} + + +template<class InstrType> +void LCodeGen::EmitFalseBranch(InstrType instr, + Condition condition, + Register src1, + const Operand& src2) { + int false_block = instr->FalseDestination(chunk_); + __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2); +} + + +template<class InstrType> +void LCodeGen::EmitFalseBranchF(InstrType instr, + Condition condition, + FPURegister src1, + FPURegister src2) { + int false_block = instr->FalseDestination(chunk_); + __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL, + condition, src1, src2); +} + + +void LCodeGen::DoDebugBreak(LDebugBreak* instr) { + __ stop("LDebugBreak"); +} + + +void LCodeGen::DoBranch(LBranch* instr) { + Representation r = instr->hydrogen()->value()->representation(); + if (r.IsInteger32() || r.IsSmi()) { + DCHECK(!info()->IsStub()); + Register reg = ToRegister(instr->value()); + EmitBranch(instr, ne, reg, Operand(zero_reg)); + } else if (r.IsDouble()) { + DCHECK(!info()->IsStub()); + DoubleRegister reg = ToDoubleRegister(instr->value()); + // Test the double value. Zero and NaN are false. + EmitBranchF(instr, nue, reg, kDoubleRegZero); + } else { + DCHECK(r.IsTagged()); + Register reg = ToRegister(instr->value()); + HType type = instr->hydrogen()->value()->type(); + if (type.IsBoolean()) { + DCHECK(!info()->IsStub()); + __ LoadRoot(at, Heap::kTrueValueRootIndex); + EmitBranch(instr, eq, reg, Operand(at)); + } else if (type.IsSmi()) { + DCHECK(!info()->IsStub()); + EmitBranch(instr, ne, reg, Operand(zero_reg)); + } else if (type.IsJSArray()) { + DCHECK(!info()->IsStub()); + EmitBranch(instr, al, zero_reg, Operand(zero_reg)); + } else if (type.IsHeapNumber()) { + DCHECK(!info()->IsStub()); + DoubleRegister dbl_scratch = double_scratch0(); + __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); + // Test the double value. Zero and NaN are false. + EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero); + } else if (type.IsString()) { + DCHECK(!info()->IsStub()); + __ ld(at, FieldMemOperand(reg, String::kLengthOffset)); + EmitBranch(instr, ne, at, Operand(zero_reg)); + } else { + ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); + // Avoid deopts in the case where we've never executed this path before. + if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); + + if (expected.Contains(ToBooleanStub::UNDEFINED)) { + // undefined -> false. + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); + } + if (expected.Contains(ToBooleanStub::BOOLEAN)) { + // Boolean -> its value. + __ LoadRoot(at, Heap::kTrueValueRootIndex); + __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at)); + __ LoadRoot(at, Heap::kFalseValueRootIndex); + __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); + } + if (expected.Contains(ToBooleanStub::NULL_TYPE)) { + // 'null' -> false. + __ LoadRoot(at, Heap::kNullValueRootIndex); + __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); + } + + if (expected.Contains(ToBooleanStub::SMI)) { + // Smis: 0 -> false, all other -> true. + __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); + __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); + } else if (expected.NeedsMap()) { + // If we need a map later and have a Smi -> deopt. + __ SmiTst(reg, at); + DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); + } + + const Register map = scratch0(); + if (expected.NeedsMap()) { + __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset)); + if (expected.CanBeUndetectable()) { + // Undetectable -> false. + __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(at, at, Operand(1 << Map::kIsUndetectable)); + __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); + } + } + + if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { + // spec object -> true. + __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); + __ Branch(instr->TrueLabel(chunk_), + ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); + } + + if (expected.Contains(ToBooleanStub::STRING)) { + // String value -> false iff empty. + Label not_string; + __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); + __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE)); + __ ld(at, FieldMemOperand(reg, String::kLengthOffset)); + __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg)); + __ Branch(instr->FalseLabel(chunk_)); + __ bind(¬_string); + } + + if (expected.Contains(ToBooleanStub::SYMBOL)) { + // Symbol value -> true. + const Register scratch = scratch1(); + __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); + __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE)); + } + + if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { + // heap number -> false iff +0, -0, or NaN. + DoubleRegister dbl_scratch = double_scratch0(); + Label not_heap_number; + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + __ Branch(¬_heap_number, ne, map, Operand(at)); + __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); + __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), + ne, dbl_scratch, kDoubleRegZero); + // Falls through if dbl_scratch == 0. + __ Branch(instr->FalseLabel(chunk_)); + __ bind(¬_heap_number); + } + + if (!expected.IsGeneric()) { + // We've seen something for the first time -> deopt. + // This can only happen if we are not generic already. + DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg)); + } + } + } +} + + +void LCodeGen::EmitGoto(int block) { + if (!IsNextEmittedBlock(block)) { + __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); + } +} + + +void LCodeGen::DoGoto(LGoto* instr) { + EmitGoto(instr->block_id()); +} + + +Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { + Condition cond = kNoCondition; + switch (op) { + case Token::EQ: + case Token::EQ_STRICT: + cond = eq; + break; + case Token::NE: + case Token::NE_STRICT: + cond = ne; + break; + case Token::LT: + cond = is_unsigned ? lo : lt; + break; + case Token::GT: + cond = is_unsigned ? hi : gt; + break; + case Token::LTE: + cond = is_unsigned ? ls : le; + break; + case Token::GTE: + cond = is_unsigned ? hs : ge; + break; + case Token::IN: + case Token::INSTANCEOF: + default: + UNREACHABLE(); + } + return cond; +} + + +void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + bool is_unsigned = + instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || + instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); + Condition cond = TokenToCondition(instr->op(), is_unsigned); + + if (left->IsConstantOperand() && right->IsConstantOperand()) { + // We can statically evaluate the comparison. + double left_val = ToDouble(LConstantOperand::cast(left)); + double right_val = ToDouble(LConstantOperand::cast(right)); + int next_block = EvalComparison(instr->op(), left_val, right_val) ? + instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); + EmitGoto(next_block); + } else { + if (instr->is_double()) { + // Compare left and right as doubles and load the + // resulting flags into the normal status register. + FPURegister left_reg = ToDoubleRegister(left); + FPURegister right_reg = ToDoubleRegister(right); + + // If a NaN is involved, i.e. the result is unordered, + // jump to false block label. + __ BranchF(NULL, instr->FalseLabel(chunk_), eq, + left_reg, right_reg); + + EmitBranchF(instr, cond, left_reg, right_reg); + } else { + Register cmp_left; + Operand cmp_right = Operand((int64_t)0); + if (right->IsConstantOperand()) { + int32_t value = ToInteger32(LConstantOperand::cast(right)); + if (instr->hydrogen_value()->representation().IsSmi()) { + cmp_left = ToRegister(left); + cmp_right = Operand(Smi::FromInt(value)); + } else { + cmp_left = ToRegister(left); + cmp_right = Operand(value); + } + } else if (left->IsConstantOperand()) { + int32_t value = ToInteger32(LConstantOperand::cast(left)); + if (instr->hydrogen_value()->representation().IsSmi()) { + cmp_left = ToRegister(right); + cmp_right = Operand(Smi::FromInt(value)); + } else { + cmp_left = ToRegister(right); + cmp_right = Operand(value); + } + // We commuted the operands, so commute the condition. + cond = CommuteCondition(cond); + } else { + cmp_left = ToRegister(left); + cmp_right = Operand(ToRegister(right)); + } + + EmitBranch(instr, cond, cmp_left, cmp_right); + } + } +} + + +void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { + Register left = ToRegister(instr->left()); + Register right = ToRegister(instr->right()); + + EmitBranch(instr, eq, left, Operand(right)); +} + + +void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { + if (instr->hydrogen()->representation().IsTagged()) { + Register input_reg = ToRegister(instr->object()); + __ li(at, Operand(factory()->the_hole_value())); + EmitBranch(instr, eq, input_reg, Operand(at)); + return; + } + + DoubleRegister input_reg = ToDoubleRegister(instr->object()); + EmitFalseBranchF(instr, eq, input_reg, input_reg); + + Register scratch = scratch0(); + __ FmoveHigh(scratch, input_reg); + EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32)); +} + + +void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { + Representation rep = instr->hydrogen()->value()->representation(); + DCHECK(!rep.IsInteger32()); + Register scratch = ToRegister(instr->temp()); + + if (rep.IsDouble()) { + DoubleRegister value = ToDoubleRegister(instr->value()); + EmitFalseBranchF(instr, ne, value, kDoubleRegZero); + __ FmoveHigh(scratch, value); + // Only use low 32-bits of value. + __ dsll32(scratch, scratch, 0); + __ dsrl32(scratch, scratch, 0); + __ li(at, 0x80000000); + } else { + Register value = ToRegister(instr->value()); + __ CheckMap(value, + scratch, + Heap::kHeapNumberMapRootIndex, + instr->FalseLabel(chunk()), + DO_SMI_CHECK); + __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset)); + EmitFalseBranch(instr, ne, scratch, Operand(0x80000000)); + __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset)); + __ mov(at, zero_reg); + } + EmitBranch(instr, eq, scratch, Operand(at)); +} + + +Condition LCodeGen::EmitIsObject(Register input, + Register temp1, + Register temp2, + Label* is_not_object, + Label* is_object) { + __ JumpIfSmi(input, is_not_object); + + __ LoadRoot(temp2, Heap::kNullValueRootIndex); + __ Branch(is_object, eq, input, Operand(temp2)); + + // Load map. + __ ld(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); + // Undetectable objects behave like undefined. + __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); + __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable)); + __ Branch(is_not_object, ne, temp2, Operand(zero_reg)); + + // Load instance type and check that it is in object type range. + __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); + __ Branch(is_not_object, + lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + + return le; +} + + +void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { + Register reg = ToRegister(instr->value()); + Register temp1 = ToRegister(instr->temp()); + Register temp2 = scratch0(); + + Condition true_cond = + EmitIsObject(reg, temp1, temp2, + instr->FalseLabel(chunk_), instr->TrueLabel(chunk_)); + + EmitBranch(instr, true_cond, temp2, + Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); +} + + +Condition LCodeGen::EmitIsString(Register input, + Register temp1, + Label* is_not_string, + SmiCheck check_needed = INLINE_SMI_CHECK) { + if (check_needed == INLINE_SMI_CHECK) { + __ JumpIfSmi(input, is_not_string); + } + __ GetObjectType(input, temp1, temp1); + + return lt; +} + + +void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { + Register reg = ToRegister(instr->value()); + Register temp1 = ToRegister(instr->temp()); + + SmiCheck check_needed = + instr->hydrogen()->value()->type().IsHeapObject() + ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + Condition true_cond = + EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); + + EmitBranch(instr, true_cond, temp1, + Operand(FIRST_NONSTRING_TYPE)); +} + + +void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { + Register input_reg = EmitLoadRegister(instr->value(), at); + __ And(at, input_reg, kSmiTagMask); + EmitBranch(instr, eq, at, Operand(zero_reg)); +} + + +void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { + Register input = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + if (!instr->hydrogen()->value()->type().IsHeapObject()) { + __ JumpIfSmi(input, instr->FalseLabel(chunk_)); + } + __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset)); + __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); + __ And(at, temp, Operand(1 << Map::kIsUndetectable)); + EmitBranch(instr, ne, at, Operand(zero_reg)); +} + + +static Condition ComputeCompareCondition(Token::Value op) { + switch (op) { + case Token::EQ_STRICT: + case Token::EQ: + return eq; + case Token::LT: + return lt; + case Token::GT: + return gt; + case Token::LTE: + return le; + case Token::GTE: + return ge; + default: + UNREACHABLE(); + return kNoCondition; + } +} + + +void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + Token::Value op = instr->op(); + + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + + Condition condition = ComputeCompareCondition(op); + + EmitBranch(instr, condition, v0, Operand(zero_reg)); +} + + +static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { + InstanceType from = instr->from(); + InstanceType to = instr->to(); + if (from == FIRST_TYPE) return to; + DCHECK(from == to || to == LAST_TYPE); + return from; +} + + +static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { + InstanceType from = instr->from(); + InstanceType to = instr->to(); + if (from == to) return eq; + if (to == LAST_TYPE) return hs; + if (from == FIRST_TYPE) return ls; + UNREACHABLE(); + return eq; +} + + +void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { + Register scratch = scratch0(); + Register input = ToRegister(instr->value()); + + if (!instr->hydrogen()->value()->type().IsHeapObject()) { + __ JumpIfSmi(input, instr->FalseLabel(chunk_)); + } + + __ GetObjectType(input, scratch, scratch); + EmitBranch(instr, + BranchCondition(instr->hydrogen()), + scratch, + Operand(TestType(instr->hydrogen()))); +} + + +void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { + Register input = ToRegister(instr->value()); + Register result = ToRegister(instr->result()); + + __ AssertString(input); + + __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset)); + __ IndexFromHash(result, result); +} + + +void LCodeGen::DoHasCachedArrayIndexAndBranch( + LHasCachedArrayIndexAndBranch* instr) { + Register input = ToRegister(instr->value()); + Register scratch = scratch0(); + + __ lwu(scratch, + FieldMemOperand(input, String::kHashFieldOffset)); + __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask)); + EmitBranch(instr, eq, at, Operand(zero_reg)); +} + + +// Branches to a label or falls through with the answer in flags. Trashes +// the temp registers, but not the input. +void LCodeGen::EmitClassOfTest(Label* is_true, + Label* is_false, + Handle<String>class_name, + Register input, + Register temp, + Register temp2) { + DCHECK(!input.is(temp)); + DCHECK(!input.is(temp2)); + DCHECK(!temp.is(temp2)); + + __ JumpIfSmi(input, is_false); + + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { + // Assuming the following assertions, we can use the same compares to test + // for both being a function type and being in the object type range. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + + __ GetObjectType(input, temp, temp2); + __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE)); + } else { + // Faster code path to avoid two compares: subtract lower bound from the + // actual type and do a signed compare with the width of the type range. + __ GetObjectType(input, temp, temp2); + __ Dsubu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + } + + // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. + // Check if the constructor in the map is a function. + __ ld(temp, FieldMemOperand(temp, Map::kConstructorOffset)); + + // Objects with a non-function constructor have class 'Object'. + __ GetObjectType(temp, temp2, temp2); + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { + __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE)); + } else { + __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE)); + } + + // temp now contains the constructor function. Grab the + // instance class name from there. + __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); + __ ld(temp, FieldMemOperand(temp, + SharedFunctionInfo::kInstanceClassNameOffset)); + // The class name we are testing against is internalized since it's a literal. + // The name in the constructor is internalized because of the way the context + // is booted. This routine isn't expected to work for random API-created + // classes and it doesn't have to because you can't access it with natives + // syntax. Since both sides are internalized it is sufficient to use an + // identity comparison. + + // End with the address of this class_name instance in temp register. + // On MIPS, the caller must do the comparison with Handle<String>class_name. +} + + +void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { + Register input = ToRegister(instr->value()); + Register temp = scratch0(); + Register temp2 = ToRegister(instr->temp()); + Handle<String> class_name = instr->hydrogen()->class_name(); + + EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), + class_name, input, temp, temp2); + + EmitBranch(instr, eq, temp, Operand(class_name)); +} + + +void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { + Register reg = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); + EmitBranch(instr, eq, temp, Operand(instr->map())); +} + + +void LCodeGen::DoInstanceOf(LInstanceOf* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + Label true_label, done; + DCHECK(ToRegister(instr->left()).is(a0)); // Object is in a0. + DCHECK(ToRegister(instr->right()).is(a1)); // Function is in a1. + Register result = ToRegister(instr->result()); + DCHECK(result.is(v0)); + + InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + + __ Branch(&true_label, eq, result, Operand(zero_reg)); + __ li(result, Operand(factory()->false_value())); + __ Branch(&done); + __ bind(&true_label); + __ li(result, Operand(factory()->true_value())); + __ bind(&done); +} + + +void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { + class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { + public: + DeferredInstanceOfKnownGlobal(LCodeGen* codegen, + LInstanceOfKnownGlobal* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + Label* map_check() { return &map_check_; } + + private: + LInstanceOfKnownGlobal* instr_; + Label map_check_; + }; + + DeferredInstanceOfKnownGlobal* deferred; + deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); + + Label done, false_result; + Register object = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + Register result = ToRegister(instr->result()); + + DCHECK(object.is(a0)); + DCHECK(result.is(v0)); + + // A Smi is not instance of anything. + __ JumpIfSmi(object, &false_result); + + // This is the inlined call site instanceof cache. The two occurences of the + // hole value will be patched to the last map/result pair generated by the + // instanceof stub. + Label cache_miss; + Register map = temp; + __ ld(map, FieldMemOperand(object, HeapObject::kMapOffset)); + + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); + __ bind(deferred->map_check()); // Label for calculating code patching. + // We use Factory::the_hole_value() on purpose instead of loading from the + // root array to force relocation to be able to later patch with + // the cached map. + Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); + __ li(at, Operand(Handle<Object>(cell))); + __ ld(at, FieldMemOperand(at, PropertyCell::kValueOffset)); + __ BranchShort(&cache_miss, ne, map, Operand(at)); + // We use Factory::the_hole_value() on purpose instead of loading from the + // root array to force relocation to be able to later patch + // with true or false. The distance from map check has to be constant. + __ li(result, Operand(factory()->the_hole_value())); + __ Branch(&done); + + // The inlined call site cache did not match. Check null and string before + // calling the deferred code. + __ bind(&cache_miss); + // Null is not instance of anything. + __ LoadRoot(temp, Heap::kNullValueRootIndex); + __ Branch(&false_result, eq, object, Operand(temp)); + + // String values is not instance of anything. + Condition cc = __ IsObjectStringType(object, temp, temp); + __ Branch(&false_result, cc, temp, Operand(zero_reg)); + + // Go to the deferred code. + __ Branch(deferred->entry()); + + __ bind(&false_result); + __ LoadRoot(result, Heap::kFalseValueRootIndex); + + // Here result has either true or false. Deferred code also produces true or + // false object. + __ bind(deferred->exit()); + __ bind(&done); +} + + +void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, + Label* map_check) { + Register result = ToRegister(instr->result()); + DCHECK(result.is(v0)); + + InstanceofStub::Flags flags = InstanceofStub::kNoFlags; + flags = static_cast<InstanceofStub::Flags>( + flags | InstanceofStub::kArgsInRegisters); + flags = static_cast<InstanceofStub::Flags>( + flags | InstanceofStub::kCallSiteInlineCheck); + flags = static_cast<InstanceofStub::Flags>( + flags | InstanceofStub::kReturnTrueFalseObject); + InstanceofStub stub(isolate(), flags); + + PushSafepointRegistersScope scope(this); + LoadContextFromDeferred(instr->context()); + + // Get the temp register reserved by the instruction. This needs to be a4 as + // its slot of the pushing of safepoint registers is used to communicate the + // offset to the location of the map check. + Register temp = ToRegister(instr->temp()); + DCHECK(temp.is(a4)); + __ li(InstanceofStub::right(), instr->function()); + static const int kAdditionalDelta = 13; + int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; + Label before_push_delta; + __ bind(&before_push_delta); + { + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); + __ li(temp, Operand(delta * kIntSize), CONSTANT_SIZE); + __ StoreToSafepointRegisterSlot(temp, temp); + } + CallCodeGeneric(stub.GetCode(), + RelocInfo::CODE_TARGET, + instr, + RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); + safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); + // Put the result value into the result register slot and + // restore all registers. + __ StoreToSafepointRegisterSlot(result, result); +} + + +void LCodeGen::DoCmpT(LCmpT* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + Token::Value op = instr->op(); + + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + // On MIPS there is no need for a "no inlined smi code" marker (nop). + + Condition condition = ComputeCompareCondition(op); + // A minor optimization that relies on LoadRoot always emitting one + // instruction. + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); + Label done, check; + __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); + __ bind(&check); + __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); + DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check)); + __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); + __ bind(&done); +} + + +void LCodeGen::DoReturn(LReturn* instr) { + if (FLAG_trace && info()->IsOptimizing()) { + // Push the return value on the stack as the parameter. + // Runtime::TraceExit returns its parameter in v0. We're leaving the code + // managed by the register allocator and tearing down the frame, it's + // safe to write to the context register. + __ push(v0); + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ CallRuntime(Runtime::kTraceExit, 1); + } + if (info()->saves_caller_doubles()) { + RestoreCallerDoubles(); + } + int no_frame_start = -1; + if (NeedsEagerFrame()) { + __ mov(sp, fp); + no_frame_start = masm_->pc_offset(); + __ Pop(ra, fp); + } + if (instr->has_constant_parameter_count()) { + int parameter_count = ToInteger32(instr->constant_parameter_count()); + int32_t sp_delta = (parameter_count + 1) * kPointerSize; + if (sp_delta != 0) { + __ Daddu(sp, sp, Operand(sp_delta)); + } + } else { + Register reg = ToRegister(instr->parameter_count()); + // The argument count parameter is a smi + __ SmiUntag(reg); + __ dsll(at, reg, kPointerSizeLog2); + __ Daddu(sp, sp, at); + } + + __ Jump(ra); + + if (no_frame_start != -1) { + info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); + } +} + + +void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { + Register result = ToRegister(instr->result()); + __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); + __ ld(result, FieldMemOperand(at, Cell::kValueOffset)); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + DeoptimizeIf(eq, instr->environment(), result, Operand(at)); + } +} + + +void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(v0)); + + __ li(LoadIC::NameRegister(), Operand(instr->name())); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ li(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(a0)); + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(instr->hydrogen()->slot()))); + } + ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; + Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { + Register value = ToRegister(instr->value()); + Register cell = scratch0(); + + // Load the cell. + __ li(cell, Operand(instr->hydrogen()->cell().handle())); + + // If the cell we are storing to contains the hole it could have + // been deleted from the property dictionary. In that case, we need + // to update the property details in the property dictionary to mark + // it as no longer deleted. + if (instr->hydrogen()->RequiresHoleCheck()) { + // We use a temp to check the payload. + Register payload = ToRegister(instr->temp()); + __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset)); + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + DeoptimizeIf(eq, instr->environment(), payload, Operand(at)); + } + + // Store the value. + __ sd(value, FieldMemOperand(cell, Cell::kValueOffset)); + // Cells are always rescanned, so no write barrier here. +} + + +void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { + Register context = ToRegister(instr->context()); + Register result = ToRegister(instr->result()); + + __ ld(result, ContextOperand(context, instr->slot_index())); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + + if (instr->hydrogen()->DeoptimizesOnHole()) { + DeoptimizeIf(eq, instr->environment(), result, Operand(at)); + } else { + Label is_not_hole; + __ Branch(&is_not_hole, ne, result, Operand(at)); + __ LoadRoot(result, Heap::kUndefinedValueRootIndex); + __ bind(&is_not_hole); + } + } +} + + +void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { + Register context = ToRegister(instr->context()); + Register value = ToRegister(instr->value()); + Register scratch = scratch0(); + MemOperand target = ContextOperand(context, instr->slot_index()); + + Label skip_assignment; + + if (instr->hydrogen()->RequiresHoleCheck()) { + __ ld(scratch, target); + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + + if (instr->hydrogen()->DeoptimizesOnHole()) { + DeoptimizeIf(eq, instr->environment(), scratch, Operand(at)); + } else { + __ Branch(&skip_assignment, ne, scratch, Operand(at)); + } + } + + __ sd(value, target); + if (instr->hydrogen()->NeedsWriteBarrier()) { + SmiCheck check_needed = + instr->hydrogen()->value()->type().IsHeapObject() + ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + __ RecordWriteContextSlot(context, + target.offset(), + value, + scratch0(), + GetRAState(), + kSaveFPRegs, + EMIT_REMEMBERED_SET, + check_needed); + } + + __ bind(&skip_assignment); +} + + +void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { + HObjectAccess access = instr->hydrogen()->access(); + int offset = access.offset(); + Register object = ToRegister(instr->object()); + if (access.IsExternalMemory()) { + Register result = ToRegister(instr->result()); + MemOperand operand = MemOperand(object, offset); + __ Load(result, operand, access.representation()); + return; + } + + if (instr->hydrogen()->representation().IsDouble()) { + DoubleRegister result = ToDoubleRegister(instr->result()); + __ ldc1(result, FieldMemOperand(object, offset)); + return; + } + + Register result = ToRegister(instr->result()); + if (!access.IsInobject()) { + __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); + object = result; + } + + Representation representation = access.representation(); + if (representation.IsSmi() && SmiValuesAre32Bits() && + instr->hydrogen()->representation().IsInteger32()) { + if (FLAG_debug_code) { + // Verify this is really an Smi. + Register scratch = scratch0(); + __ Load(scratch, FieldMemOperand(object, offset), representation); + __ AssertSmi(scratch); + } + + // Read int value directly from upper half of the smi. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); + offset += kPointerSize / 2; + representation = Representation::Integer32(); + } + __ Load(result, FieldMemOperand(object, offset), representation); +} + + +void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(v0)); + + // Name is always in a2. + __ li(LoadIC::NameRegister(), Operand(instr->name())); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ li(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(a0)); + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(instr->hydrogen()->slot()))); + } + Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { + Register scratch = scratch0(); + Register function = ToRegister(instr->function()); + Register result = ToRegister(instr->result()); + + // Get the prototype or initial map from the function. + __ ld(result, + FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); + + // Check that the function has a prototype or an initial map. + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + DeoptimizeIf(eq, instr->environment(), result, Operand(at)); + + // If the function does not have an initial map, we're done. + Label done; + __ GetObjectType(result, scratch, scratch); + __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); + + // Get the prototype from the initial map. + __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); + + // All done. + __ bind(&done); +} + + +void LCodeGen::DoLoadRoot(LLoadRoot* instr) { + Register result = ToRegister(instr->result()); + __ LoadRoot(result, instr->index()); +} + + +void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { + Register arguments = ToRegister(instr->arguments()); + Register result = ToRegister(instr->result()); + // There are two words between the frame pointer and the last argument. + // Subtracting from length accounts for one of them add one more. + if (instr->length()->IsConstantOperand()) { + int const_length = ToInteger32(LConstantOperand::cast(instr->length())); + if (instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + int index = (const_length - const_index) + 1; + __ ld(result, MemOperand(arguments, index * kPointerSize)); + } else { + Register index = ToRegister(instr->index()); + __ li(at, Operand(const_length + 1)); + __ Dsubu(result, at, index); + __ dsll(at, result, kPointerSizeLog2); + __ Daddu(at, arguments, at); + __ ld(result, MemOperand(at)); + } + } else if (instr->index()->IsConstantOperand()) { + Register length = ToRegister(instr->length()); + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + int loc = const_index - 1; + if (loc != 0) { + __ Dsubu(result, length, Operand(loc)); + __ dsll(at, result, kPointerSizeLog2); + __ Daddu(at, arguments, at); + __ ld(result, MemOperand(at)); + } else { + __ dsll(at, length, kPointerSizeLog2); + __ Daddu(at, arguments, at); + __ ld(result, MemOperand(at)); + } + } else { + Register length = ToRegister(instr->length()); + Register index = ToRegister(instr->index()); + __ Dsubu(result, length, index); + __ Daddu(result, result, 1); + __ dsll(at, result, kPointerSizeLog2); + __ Daddu(at, arguments, at); + __ ld(result, MemOperand(at)); + } +} + + +void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { + Register external_pointer = ToRegister(instr->elements()); + Register key = no_reg; + ElementsKind elements_kind = instr->elements_kind(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int constant_key = 0; + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort(kArrayIndexConstantValueTooBig); + } + } else { + key = ToRegister(instr->key()); + } + int element_size_shift = ElementsKindToShiftSize(elements_kind); + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) + ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) + : element_size_shift; + int base_offset = instr->base_offset(); + + if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || + elements_kind == FLOAT32_ELEMENTS || + elements_kind == EXTERNAL_FLOAT64_ELEMENTS || + elements_kind == FLOAT64_ELEMENTS) { + int base_offset = instr->base_offset(); + FPURegister result = ToDoubleRegister(instr->result()); + if (key_is_constant) { + __ Daddu(scratch0(), external_pointer, + constant_key << element_size_shift); + } else { + if (shift_size < 0) { + if (shift_size == -32) { + __ dsra32(scratch0(), key, 0); + } else { + __ dsra(scratch0(), key, -shift_size); + } + } else { + __ dsll(scratch0(), key, shift_size); + } + __ Daddu(scratch0(), scratch0(), external_pointer); + } + if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || + elements_kind == FLOAT32_ELEMENTS) { + __ lwc1(result, MemOperand(scratch0(), base_offset)); + __ cvt_d_s(result, result); + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS + __ ldc1(result, MemOperand(scratch0(), base_offset)); + } + } else { + Register result = ToRegister(instr->result()); + MemOperand mem_operand = PrepareKeyedOperand( + key, external_pointer, key_is_constant, constant_key, + element_size_shift, shift_size, base_offset); + switch (elements_kind) { + case EXTERNAL_INT8_ELEMENTS: + case INT8_ELEMENTS: + __ lb(result, mem_operand); + break; + case EXTERNAL_UINT8_CLAMPED_ELEMENTS: + case EXTERNAL_UINT8_ELEMENTS: + case UINT8_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + __ lbu(result, mem_operand); + break; + case EXTERNAL_INT16_ELEMENTS: + case INT16_ELEMENTS: + __ lh(result, mem_operand); + break; + case EXTERNAL_UINT16_ELEMENTS: + case UINT16_ELEMENTS: + __ lhu(result, mem_operand); + break; + case EXTERNAL_INT32_ELEMENTS: + case INT32_ELEMENTS: + __ lw(result, mem_operand); + break; + case EXTERNAL_UINT32_ELEMENTS: + case UINT32_ELEMENTS: + __ lw(result, mem_operand); + if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { + DeoptimizeIf(Ugreater_equal, instr->environment(), + result, Operand(0x80000000)); + } + break; + case FLOAT32_ELEMENTS: + case FLOAT64_ELEMENTS: + case EXTERNAL_FLOAT32_ELEMENTS: + case EXTERNAL_FLOAT64_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case DICTIONARY_ELEMENTS: + case SLOPPY_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { + Register elements = ToRegister(instr->elements()); + bool key_is_constant = instr->key()->IsConstantOperand(); + Register key = no_reg; + DoubleRegister result = ToDoubleRegister(instr->result()); + Register scratch = scratch0(); + + int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); + + int base_offset = instr->base_offset(); + if (key_is_constant) { + int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort(kArrayIndexConstantValueTooBig); + } + base_offset += constant_key * kDoubleSize; + } + __ Daddu(scratch, elements, Operand(base_offset)); + + if (!key_is_constant) { + key = ToRegister(instr->key()); + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) + ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) + : element_size_shift; + if (shift_size > 0) { + __ dsll(at, key, shift_size); + } else if (shift_size == -32) { + __ dsra32(at, key, 0); + } else { + __ dsra(at, key, -shift_size); + } + __ Daddu(scratch, scratch, at); + } + + __ ldc1(result, MemOperand(scratch)); + + if (instr->hydrogen()->RequiresHoleCheck()) { + __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); + DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); + } +} + + +void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { + HLoadKeyed* hinstr = instr->hydrogen(); + Register elements = ToRegister(instr->elements()); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + Register store_base = scratch; + int offset = instr->base_offset(); + + if (instr->key()->IsConstantOperand()) { + LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); + offset += ToInteger32(const_operand) * kPointerSize; + store_base = elements; + } else { + Register key = ToRegister(instr->key()); + // Even though the HLoadKeyed instruction forces the input + // representation for the key to be an integer, the input gets replaced + // during bound check elimination with the index argument to the bounds + // check, which can be tagged, so that case must be handled here, too. + if (instr->hydrogen()->key()->representation().IsSmi()) { + __ SmiScale(scratch, key, kPointerSizeLog2); + __ daddu(scratch, elements, scratch); + } else { + __ dsll(scratch, key, kPointerSizeLog2); + __ daddu(scratch, elements, scratch); + } + } + + Representation representation = hinstr->representation(); + if (representation.IsInteger32() && SmiValuesAre32Bits() && + hinstr->elements_kind() == FAST_SMI_ELEMENTS) { + DCHECK(!hinstr->RequiresHoleCheck()); + if (FLAG_debug_code) { + Register temp = scratch1(); + __ Load(temp, MemOperand(store_base, offset), Representation::Smi()); + __ AssertSmi(temp); + } + + // Read int value directly from upper half of the smi. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); + offset += kPointerSize / 2; + } + + __ Load(result, MemOperand(store_base, offset), representation); + + // Check for the hole value. + if (hinstr->RequiresHoleCheck()) { + if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { + __ SmiTst(result, scratch); + DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); + } else { + __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); + DeoptimizeIf(eq, instr->environment(), result, Operand(scratch)); + } + } +} + + +void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { + if (instr->is_typed_elements()) { + DoLoadKeyedExternalArray(instr); + } else if (instr->hydrogen()->representation().IsDouble()) { + DoLoadKeyedFixedDoubleArray(instr); + } else { + DoLoadKeyedFixedArray(instr); + } +} + + +MemOperand LCodeGen::PrepareKeyedOperand(Register key, + Register base, + bool key_is_constant, + int constant_key, + int element_size, + int shift_size, + int base_offset) { + if (key_is_constant) { + return MemOperand(base, (constant_key << element_size) + base_offset); + } + + if (base_offset == 0) { + if (shift_size >= 0) { + __ dsll(scratch0(), key, shift_size); + __ Daddu(scratch0(), base, scratch0()); + return MemOperand(scratch0()); + } else { + if (shift_size == -32) { + __ dsra32(scratch0(), key, 0); + } else { + __ dsra(scratch0(), key, -shift_size); + } + __ Daddu(scratch0(), base, scratch0()); + return MemOperand(scratch0()); + } + } + + if (shift_size >= 0) { + __ dsll(scratch0(), key, shift_size); + __ Daddu(scratch0(), base, scratch0()); + return MemOperand(scratch0(), base_offset); + } else { + if (shift_size == -32) { + __ dsra32(scratch0(), key, 0); + } else { + __ dsra(scratch0(), key, -shift_size); + } + __ Daddu(scratch0(), base, scratch0()); + return MemOperand(scratch0(), base_offset); + } +} + + +void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister())); + + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ li(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(a0)); + __ li(LoadIC::SlotRegister(), + Operand(Smi::FromInt(instr->hydrogen()->slot()))); + } + + Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { + Register scratch = scratch0(); + Register temp = scratch1(); + Register result = ToRegister(instr->result()); + + if (instr->hydrogen()->from_inlined()) { + __ Dsubu(result, sp, 2 * kPointerSize); + } else { + // Check if the calling frame is an arguments adaptor frame. + Label done, adapted; + __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); + __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + + // Result is the frame pointer for the frame if not adapted and for the real + // frame below the adaptor frame if adapted. + __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne). + __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq). + } +} + + +void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { + Register elem = ToRegister(instr->elements()); + Register result = ToRegister(instr->result()); + + Label done; + + // If no arguments adaptor frame the number of arguments is fixed. + __ Daddu(result, zero_reg, Operand(scope()->num_parameters())); + __ Branch(&done, eq, fp, Operand(elem)); + + // Arguments adaptor frame present. Get argument length from there. + __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ld(result, + MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiUntag(result); + + // Argument length is in result register. + __ bind(&done); +} + + +void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { + Register receiver = ToRegister(instr->receiver()); + Register function = ToRegister(instr->function()); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + + // If the receiver is null or undefined, we have to pass the global + // object as a receiver to normal functions. Values have to be + // passed unchanged to builtins and strict-mode functions. + Label global_object, result_in_receiver; + + if (!instr->hydrogen()->known_function()) { + // Do not transform the receiver to object for strict mode functions. + __ ld(scratch, + FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); + + // Do not transform the receiver to object for builtins. + int32_t strict_mode_function_mask = + 1 << SharedFunctionInfo::kStrictModeBitWithinByte; + int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte; + + __ lbu(at, + FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset)); + __ And(at, at, Operand(strict_mode_function_mask)); + __ Branch(&result_in_receiver, ne, at, Operand(zero_reg)); + __ lbu(at, + FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset)); + __ And(at, at, Operand(native_mask)); + __ Branch(&result_in_receiver, ne, at, Operand(zero_reg)); + } + + // Normal function. Replace undefined or null with global receiver. + __ LoadRoot(scratch, Heap::kNullValueRootIndex); + __ Branch(&global_object, eq, receiver, Operand(scratch)); + __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + __ Branch(&global_object, eq, receiver, Operand(scratch)); + + // Deoptimize if the receiver is not a JS object. + __ SmiTst(receiver, scratch); + DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg)); + + __ GetObjectType(receiver, scratch, scratch); + DeoptimizeIf(lt, instr->environment(), + scratch, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(&result_in_receiver); + + __ bind(&global_object); + __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset)); + __ ld(result, + ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); + __ ld(result, + FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); + + if (result.is(receiver)) { + __ bind(&result_in_receiver); + } else { + Label result_ok; + __ Branch(&result_ok); + __ bind(&result_in_receiver); + __ mov(result, receiver); + __ bind(&result_ok); + } +} + + +void LCodeGen::DoApplyArguments(LApplyArguments* instr) { + Register receiver = ToRegister(instr->receiver()); + Register function = ToRegister(instr->function()); + Register length = ToRegister(instr->length()); + Register elements = ToRegister(instr->elements()); + Register scratch = scratch0(); + DCHECK(receiver.is(a0)); // Used for parameter count. + DCHECK(function.is(a1)); // Required by InvokeFunction. + DCHECK(ToRegister(instr->result()).is(v0)); + + // Copy the arguments to this function possibly from the + // adaptor frame below it. + const uint32_t kArgumentsLimit = 1 * KB; + DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit)); + + // Push the receiver and use the register to keep the original + // number of arguments. + __ push(receiver); + __ Move(receiver, length); + // The arguments are at a one pointer size offset from elements. + __ Daddu(elements, elements, Operand(1 * kPointerSize)); + + // Loop through the arguments pushing them onto the execution + // stack. + Label invoke, loop; + // length is a small non-negative integer, due to the test above. + __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg)); + __ dsll(scratch, length, kPointerSizeLog2); + __ bind(&loop); + __ Daddu(scratch, elements, scratch); + __ ld(scratch, MemOperand(scratch)); + __ push(scratch); + __ Dsubu(length, length, Operand(1)); + __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg)); + __ dsll(scratch, length, kPointerSizeLog2); + + __ bind(&invoke); + DCHECK(instr->HasPointerMap()); + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator safepoint_generator( + this, pointers, Safepoint::kLazyDeopt); + // The number of arguments is stored in receiver which is a0, as expected + // by InvokeFunction. + ParameterCount actual(receiver); + __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); +} + + +void LCodeGen::DoPushArgument(LPushArgument* instr) { + LOperand* argument = instr->value(); + if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { + Abort(kDoPushArgumentNotImplementedForDoubleType); + } else { + Register argument_reg = EmitLoadRegister(argument, at); + __ push(argument_reg); + } +} + + +void LCodeGen::DoDrop(LDrop* instr) { + __ Drop(instr->count()); +} + + +void LCodeGen::DoThisFunction(LThisFunction* instr) { + Register result = ToRegister(instr->result()); + __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); +} + + +void LCodeGen::DoContext(LContext* instr) { + // If there is a non-return use, the context must be moved to a register. + Register result = ToRegister(instr->result()); + if (info()->IsOptimizing()) { + __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } else { + // If there is no frame, the context must be in cp. + DCHECK(result.is(cp)); + } +} + + +void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + __ li(scratch0(), instr->hydrogen()->pairs()); + __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); + // The context is the first argument. + __ Push(cp, scratch0(), scratch1()); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); +} + + +void LCodeGen::CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, + int arity, + LInstruction* instr, + A1State a1_state) { + bool dont_adapt_arguments = + formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; + bool can_invoke_directly = + dont_adapt_arguments || formal_parameter_count == arity; + + LPointerMap* pointers = instr->pointer_map(); + + if (can_invoke_directly) { + if (a1_state == A1_UNINITIALIZED) { + __ li(a1, function); + } + + // Change context. + __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + // Set r0 to arguments count if adaption is not needed. Assumes that r0 + // is available to write to at this point. + if (dont_adapt_arguments) { + __ li(a0, Operand(arity)); + } + + // Invoke function. + __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + __ Call(at); + + // Set up deoptimization. + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); + } else { + SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); + ParameterCount count(arity); + ParameterCount expected(formal_parameter_count); + __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); + } +} + + +void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { + DCHECK(instr->context() != NULL); + DCHECK(ToRegister(instr->context()).is(cp)); + Register input = ToRegister(instr->value()); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + + // Deoptimize if not a heap number. + __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); + + Label done; + Register exponent = scratch0(); + scratch = no_reg; + __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); + // Check the sign of the argument. If the argument is positive, just + // return it. + __ Move(result, input); + __ And(at, exponent, Operand(HeapNumber::kSignMask)); + __ Branch(&done, eq, at, Operand(zero_reg)); + + // Input is negative. Reverse its sign. + // Preserve the value of all registers. + { + PushSafepointRegistersScope scope(this); + + // Registers were saved at the safepoint, so we can use + // many scratch registers. + Register tmp1 = input.is(a1) ? a0 : a1; + Register tmp2 = input.is(a2) ? a0 : a2; + Register tmp3 = input.is(a3) ? a0 : a3; + Register tmp4 = input.is(a4) ? a0 : a4; + + // exponent: floating point exponent value. + + Label allocated, slow; + __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); + __ Branch(&allocated); + + // Slow case: Call the runtime system to do the number allocation. + __ bind(&slow); + + CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, + instr->context()); + // Set the pointer to the new heap number in tmp. + if (!tmp1.is(v0)) + __ mov(tmp1, v0); + // Restore input_reg after call to runtime. + __ LoadFromSafepointRegisterSlot(input, input); + __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); + + __ bind(&allocated); + // exponent: floating point exponent value. + // tmp1: allocated heap number. + __ And(exponent, exponent, Operand(~HeapNumber::kSignMask)); + __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); + __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); + __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); + + __ StoreToSafepointRegisterSlot(tmp1, result); + } + + __ bind(&done); +} + + +void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { + Register input = ToRegister(instr->value()); + Register result = ToRegister(instr->result()); + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); + Label done; + __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); + __ mov(result, input); + __ dsubu(result, zero_reg, input); + // Overflow if result is still negative, i.e. 0x80000000. + DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); + __ bind(&done); +} + + +void LCodeGen::DoMathAbs(LMathAbs* instr) { + // Class for deferred case. + class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { + public: + DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LMathAbs* instr_; + }; + + Representation r = instr->hydrogen()->value()->representation(); + if (r.IsDouble()) { + FPURegister input = ToDoubleRegister(instr->value()); + FPURegister result = ToDoubleRegister(instr->result()); + __ abs_d(result, input); + } else if (r.IsSmiOrInteger32()) { + EmitIntegerMathAbs(instr); + } else { + // Representation is tagged. + DeferredMathAbsTaggedHeapNumber* deferred = + new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); + Register input = ToRegister(instr->value()); + // Smi check. + __ JumpIfNotSmi(input, deferred->entry()); + // If smi, handle it directly. + EmitIntegerMathAbs(instr); + __ bind(deferred->exit()); + } +} + + +void LCodeGen::DoMathFloor(LMathFloor* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + Register result = ToRegister(instr->result()); + Register scratch1 = scratch0(); + Register except_flag = ToRegister(instr->temp()); + + __ EmitFPUTruncate(kRoundToMinusInf, + result, + input, + scratch1, + double_scratch0(), + except_flag); + + // Deopt if the operation did not succeed. + DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + // Test for -0. + Label done; + __ Branch(&done, ne, result, Operand(zero_reg)); + __ mfhc1(scratch1, input); // Get exponent/sign bits. + __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); + __ bind(&done); + } +} + + +void LCodeGen::DoMathRound(LMathRound* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + Register result = ToRegister(instr->result()); + DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); + Register scratch = scratch0(); + Label done, check_sign_on_zero; + + // Extract exponent bits. + __ mfhc1(result, input); + __ Ext(scratch, + result, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + + // If the number is in ]-0.5, +0.5[, the result is +/- 0. + Label skip1; + __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2)); + __ mov(result, zero_reg); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ Branch(&check_sign_on_zero); + } else { + __ Branch(&done); + } + __ bind(&skip1); + + // The following conversion will not work with numbers + // outside of ]-2^32, 2^32[. + DeoptimizeIf(ge, instr->environment(), scratch, + Operand(HeapNumber::kExponentBias + 32)); + + // Save the original sign for later comparison. + __ And(scratch, result, Operand(HeapNumber::kSignMask)); + + __ Move(double_scratch0(), 0.5); + __ add_d(double_scratch0(), input, double_scratch0()); + + // Check sign of the result: if the sign changed, the input + // value was in ]0.5, 0[ and the result should be -0. + __ mfhc1(result, double_scratch0()); + // mfhc1 sign-extends, clear the upper bits. + __ dsll32(result, result, 0); + __ dsrl32(result, result, 0); + __ Xor(result, result, Operand(scratch)); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + // ARM uses 'mi' here, which is 'lt' + DeoptimizeIf(lt, instr->environment(), result, + Operand(zero_reg)); + } else { + Label skip2; + // ARM uses 'mi' here, which is 'lt' + // Negating it results in 'ge' + __ Branch(&skip2, ge, result, Operand(zero_reg)); + __ mov(result, zero_reg); + __ Branch(&done); + __ bind(&skip2); + } + + Register except_flag = scratch; + __ EmitFPUTruncate(kRoundToMinusInf, + result, + double_scratch0(), + at, + double_scratch1, + except_flag); + + DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + // Test for -0. + __ Branch(&done, ne, result, Operand(zero_reg)); + __ bind(&check_sign_on_zero); + __ mfhc1(scratch, input); // Get exponent/sign bits. + __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); + } + __ bind(&done); +} + + +void LCodeGen::DoMathFround(LMathFround* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + DoubleRegister result = ToDoubleRegister(instr->result()); + __ cvt_s_d(result, input); + __ cvt_d_s(result, result); +} + + +void LCodeGen::DoMathSqrt(LMathSqrt* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + DoubleRegister result = ToDoubleRegister(instr->result()); + __ sqrt_d(result, input); +} + + +void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + DoubleRegister result = ToDoubleRegister(instr->result()); + DoubleRegister temp = ToDoubleRegister(instr->temp()); + + DCHECK(!input.is(result)); + + // Note that according to ECMA-262 15.8.2.13: + // Math.pow(-Infinity, 0.5) == Infinity + // Math.sqrt(-Infinity) == NaN + Label done; + __ Move(temp, -V8_INFINITY); + __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input); + // Set up Infinity in the delay slot. + // result is overwritten if the branch is not taken. + __ neg_d(result, temp); + + // Add +0 to convert -0 to +0. + __ add_d(result, input, kDoubleRegZero); + __ sqrt_d(result, result); + __ bind(&done); +} + + +void LCodeGen::DoPower(LPower* instr) { + Representation exponent_type = instr->hydrogen()->right()->representation(); + // Having marked this as a call, we can use any registers. + // Just make sure that the input/output registers are the expected ones. + DCHECK(!instr->right()->IsDoubleRegister() || + ToDoubleRegister(instr->right()).is(f4)); + DCHECK(!instr->right()->IsRegister() || + ToRegister(instr->right()).is(a2)); + DCHECK(ToDoubleRegister(instr->left()).is(f2)); + DCHECK(ToDoubleRegister(instr->result()).is(f0)); + + if (exponent_type.IsSmi()) { + MathPowStub stub(isolate(), MathPowStub::TAGGED); + __ CallStub(&stub); + } else if (exponent_type.IsTagged()) { + Label no_deopt; + __ JumpIfSmi(a2, &no_deopt); + __ ld(a7, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + DeoptimizeIf(ne, instr->environment(), a7, Operand(at)); + __ bind(&no_deopt); + MathPowStub stub(isolate(), MathPowStub::TAGGED); + __ CallStub(&stub); + } else if (exponent_type.IsInteger32()) { + MathPowStub stub(isolate(), MathPowStub::INTEGER); + __ CallStub(&stub); + } else { + DCHECK(exponent_type.IsDouble()); + MathPowStub stub(isolate(), MathPowStub::DOUBLE); + __ CallStub(&stub); + } +} + + +void LCodeGen::DoMathExp(LMathExp* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + DoubleRegister result = ToDoubleRegister(instr->result()); + DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); + DoubleRegister double_scratch2 = double_scratch0(); + Register temp1 = ToRegister(instr->temp1()); + Register temp2 = ToRegister(instr->temp2()); + + MathExpGenerator::EmitMathExp( + masm(), input, result, double_scratch1, double_scratch2, + temp1, temp2, scratch0()); +} + + +void LCodeGen::DoMathLog(LMathLog* instr) { + __ PrepareCallCFunction(0, 1, scratch0()); + __ MovToFloatParameter(ToDoubleRegister(instr->value())); + __ CallCFunction(ExternalReference::math_log_double_function(isolate()), + 0, 1); + __ MovFromFloatResult(ToDoubleRegister(instr->result())); +} + + +void LCodeGen::DoMathClz32(LMathClz32* instr) { + Register input = ToRegister(instr->value()); + Register result = ToRegister(instr->result()); + __ Clz(result, input); +} + + +void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->function()).is(a1)); + DCHECK(instr->HasPointerMap()); + + Handle<JSFunction> known_function = instr->hydrogen()->known_function(); + if (known_function.is_null()) { + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); + ParameterCount count(instr->arity()); + __ InvokeFunction(a1, count, CALL_FUNCTION, generator); + } else { + CallKnownFunction(known_function, + instr->hydrogen()->formal_parameter_count(), + instr->arity(), + instr, + A1_CONTAINS_TARGET); + } +} + + +void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { + DCHECK(ToRegister(instr->result()).is(v0)); + + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); + + if (instr->target()->IsConstantOperand()) { + LConstantOperand* target = LConstantOperand::cast(instr->target()); + Handle<Code> code = Handle<Code>::cast(ToHandle(target)); + generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); + __ Call(code, RelocInfo::CODE_TARGET); + } else { + DCHECK(instr->target()->IsRegister()); + Register target = ToRegister(instr->target()); + generator.BeforeCall(__ CallSize(target)); + __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Call(target); + } + generator.AfterCall(); +} + + +void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { + DCHECK(ToRegister(instr->function()).is(a1)); + DCHECK(ToRegister(instr->result()).is(v0)); + + if (instr->hydrogen()->pass_argument_count()) { + __ li(a0, Operand(instr->arity())); + } + + // Change context. + __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + // Load the code entry address + __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + __ Call(at); + + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); +} + + +void LCodeGen::DoCallFunction(LCallFunction* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->function()).is(a1)); + DCHECK(ToRegister(instr->result()).is(v0)); + + int arity = instr->arity(); + CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoCallNew(LCallNew* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->constructor()).is(a1)); + DCHECK(ToRegister(instr->result()).is(v0)); + + __ li(a0, Operand(instr->arity())); + // No cell in a2 for construct type feedback in optimized code + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); +} + + +void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->constructor()).is(a1)); + DCHECK(ToRegister(instr->result()).is(v0)); + + __ li(a0, Operand(instr->arity())); + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + ElementsKind kind = instr->hydrogen()->elements_kind(); + AllocationSiteOverrideMode override_mode = + (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) + ? DISABLE_ALLOCATION_SITES + : DONT_OVERRIDE; + + if (instr->arity() == 0) { + ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + } else if (instr->arity() == 1) { + Label done; + if (IsFastPackedElementsKind(kind)) { + Label packed_case; + // We might need a change here, + // look at the first argument. + __ ld(a5, MemOperand(sp, 0)); + __ Branch(&packed_case, eq, a5, Operand(zero_reg)); + + ElementsKind holey_kind = GetHoleyElementsKind(kind); + ArraySingleArgumentConstructorStub stub(isolate(), + holey_kind, + override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + __ jmp(&done); + __ bind(&packed_case); + } + + ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + __ bind(&done); + } else { + ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + } +} + + +void LCodeGen::DoCallRuntime(LCallRuntime* instr) { + CallRuntime(instr->function(), instr->arity(), instr); +} + + +void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { + Register function = ToRegister(instr->function()); + Register code_object = ToRegister(instr->code_object()); + __ Daddu(code_object, code_object, + Operand(Code::kHeaderSize - kHeapObjectTag)); + __ sd(code_object, + FieldMemOperand(function, JSFunction::kCodeEntryOffset)); +} + + +void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { + Register result = ToRegister(instr->result()); + Register base = ToRegister(instr->base_object()); + if (instr->offset()->IsConstantOperand()) { + LConstantOperand* offset = LConstantOperand::cast(instr->offset()); + __ Daddu(result, base, Operand(ToInteger32(offset))); + } else { + Register offset = ToRegister(instr->offset()); + __ Daddu(result, base, offset); + } +} + + +void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { + Representation representation = instr->representation(); + + Register object = ToRegister(instr->object()); + Register scratch2 = scratch1(); + Register scratch1 = scratch0(); + HObjectAccess access = instr->hydrogen()->access(); + int offset = access.offset(); + if (access.IsExternalMemory()) { + Register value = ToRegister(instr->value()); + MemOperand operand = MemOperand(object, offset); + __ Store(value, operand, representation); + return; + } + + __ AssertNotSmi(object); + + DCHECK(!representation.IsSmi() || + !instr->value()->IsConstantOperand() || + IsSmi(LConstantOperand::cast(instr->value()))); + if (representation.IsDouble()) { + DCHECK(access.IsInobject()); + DCHECK(!instr->hydrogen()->has_transition()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + DoubleRegister value = ToDoubleRegister(instr->value()); + __ sdc1(value, FieldMemOperand(object, offset)); + return; + } + + if (instr->hydrogen()->has_transition()) { + Handle<Map> transition = instr->hydrogen()->transition_map(); + AddDeprecationDependency(transition); + __ li(scratch1, Operand(transition)); + __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); + if (instr->hydrogen()->NeedsWriteBarrierForMap()) { + Register temp = ToRegister(instr->temp()); + // Update the write barrier for the map field. + __ RecordWriteForMap(object, + scratch1, + temp, + GetRAState(), + kSaveFPRegs); + } + } + + // Do the store. + Register destination = object; + if (!access.IsInobject()) { + destination = scratch1; + __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); + } + Register value = ToRegister(instr->value()); + if (representation.IsSmi() && SmiValuesAre32Bits() && + instr->hydrogen()->value()->representation().IsInteger32()) { + DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); + if (FLAG_debug_code) { + __ Load(scratch2, FieldMemOperand(destination, offset), representation); + __ AssertSmi(scratch2); + } + + // Store int value directly to upper half of the smi. + offset += kPointerSize / 2; + representation = Representation::Integer32(); + } + + MemOperand operand = FieldMemOperand(destination, offset); + __ Store(value, operand, representation); + if (instr->hydrogen()->NeedsWriteBarrier()) { + // Update the write barrier for the object for in-object properties. + __ RecordWriteField(destination, + offset, + value, + scratch2, + GetRAState(), + kSaveFPRegs, + EMIT_REMEMBERED_SET, + instr->hydrogen()->SmiCheckForWriteBarrier(), + instr->hydrogen()->PointersToHereCheckForValue()); + } +} + + +void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister())); + + __ li(StoreIC::NameRegister(), Operand(instr->name())); + Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { + Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; + Operand operand((int64_t)0); + Register reg; + if (instr->index()->IsConstantOperand()) { + operand = ToOperand(instr->index()); + reg = ToRegister(instr->length()); + cc = CommuteCondition(cc); + } else { + reg = ToRegister(instr->index()); + operand = ToOperand(instr->length()); + } + if (FLAG_debug_code && instr->hydrogen()->skip_check()) { + Label done; + __ Branch(&done, NegateCondition(cc), reg, operand); + __ stop("eliminated bounds check failed"); + __ bind(&done); + } else { + DeoptimizeIf(cc, instr->environment(), reg, operand); + } +} + + +void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { + Register external_pointer = ToRegister(instr->elements()); + Register key = no_reg; + ElementsKind elements_kind = instr->elements_kind(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int constant_key = 0; + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort(kArrayIndexConstantValueTooBig); + } + } else { + key = ToRegister(instr->key()); + } + int element_size_shift = ElementsKindToShiftSize(elements_kind); + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) + ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) + : element_size_shift; + int base_offset = instr->base_offset(); + + if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || + elements_kind == FLOAT32_ELEMENTS || + elements_kind == EXTERNAL_FLOAT64_ELEMENTS || + elements_kind == FLOAT64_ELEMENTS) { + Register address = scratch0(); + FPURegister value(ToDoubleRegister(instr->value())); + if (key_is_constant) { + if (constant_key != 0) { + __ Daddu(address, external_pointer, + Operand(constant_key << element_size_shift)); + } else { + address = external_pointer; + } + } else { + if (shift_size < 0) { + if (shift_size == -32) { + __ dsra32(address, key, 0); + } else { + __ dsra(address, key, -shift_size); + } + } else { + __ dsll(address, key, shift_size); + } + __ Daddu(address, external_pointer, address); + } + + if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || + elements_kind == FLOAT32_ELEMENTS) { + __ cvt_s_d(double_scratch0(), value); + __ swc1(double_scratch0(), MemOperand(address, base_offset)); + } else { // Storing doubles, not floats. + __ sdc1(value, MemOperand(address, base_offset)); + } + } else { + Register value(ToRegister(instr->value())); + MemOperand mem_operand = PrepareKeyedOperand( + key, external_pointer, key_is_constant, constant_key, + element_size_shift, shift_size, + base_offset); + switch (elements_kind) { + case EXTERNAL_UINT8_CLAMPED_ELEMENTS: + case EXTERNAL_INT8_ELEMENTS: + case EXTERNAL_UINT8_ELEMENTS: + case UINT8_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + case INT8_ELEMENTS: + __ sb(value, mem_operand); + break; + case EXTERNAL_INT16_ELEMENTS: + case EXTERNAL_UINT16_ELEMENTS: + case INT16_ELEMENTS: + case UINT16_ELEMENTS: + __ sh(value, mem_operand); + break; + case EXTERNAL_INT32_ELEMENTS: + case EXTERNAL_UINT32_ELEMENTS: + case INT32_ELEMENTS: + case UINT32_ELEMENTS: + __ sw(value, mem_operand); + break; + case FLOAT32_ELEMENTS: + case FLOAT64_ELEMENTS: + case EXTERNAL_FLOAT32_ELEMENTS: + case EXTERNAL_FLOAT64_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case DICTIONARY_ELEMENTS: + case SLOPPY_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { + DoubleRegister value = ToDoubleRegister(instr->value()); + Register elements = ToRegister(instr->elements()); + Register scratch = scratch0(); + DoubleRegister double_scratch = double_scratch0(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int base_offset = instr->base_offset(); + Label not_nan, done; + + // Calculate the effective address of the slot in the array to store the + // double value. + int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); + if (key_is_constant) { + int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort(kArrayIndexConstantValueTooBig); + } + __ Daddu(scratch, elements, + Operand((constant_key << element_size_shift) + base_offset)); + } else { + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) + ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) + : element_size_shift; + __ Daddu(scratch, elements, Operand(base_offset)); + DCHECK((shift_size == 3) || (shift_size == -29)); + if (shift_size == 3) { + __ dsll(at, ToRegister(instr->key()), 3); + } else if (shift_size == -29) { + __ dsra(at, ToRegister(instr->key()), 29); + } + __ Daddu(scratch, scratch, at); + } + + if (instr->NeedsCanonicalization()) { + Label is_nan; + // Check for NaN. All NaNs must be canonicalized. + __ BranchF(NULL, &is_nan, eq, value, value); + __ Branch(¬_nan); + + // Only load canonical NaN if the comparison above set the overflow. + __ bind(&is_nan); + __ LoadRoot(at, Heap::kNanValueRootIndex); + __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset)); + __ sdc1(double_scratch, MemOperand(scratch, 0)); + __ Branch(&done); + } + + __ bind(¬_nan); + __ sdc1(value, MemOperand(scratch, 0)); + __ bind(&done); +} + + +void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { + Register value = ToRegister(instr->value()); + Register elements = ToRegister(instr->elements()); + Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) + : no_reg; + Register scratch = scratch0(); + Register store_base = scratch; + int offset = instr->base_offset(); + + // Do the store. + if (instr->key()->IsConstantOperand()) { + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); + offset += ToInteger32(const_operand) * kPointerSize; + store_base = elements; + } else { + // Even though the HLoadKeyed instruction forces the input + // representation for the key to be an integer, the input gets replaced + // during bound check elimination with the index argument to the bounds + // check, which can be tagged, so that case must be handled here, too. + if (instr->hydrogen()->key()->representation().IsSmi()) { + __ SmiScale(scratch, key, kPointerSizeLog2); + __ daddu(store_base, elements, scratch); + } else { + __ dsll(scratch, key, kPointerSizeLog2); + __ daddu(store_base, elements, scratch); + } + } + + Representation representation = instr->hydrogen()->value()->representation(); + if (representation.IsInteger32() && SmiValuesAre32Bits()) { + DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); + DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); + if (FLAG_debug_code) { + Register temp = scratch1(); + __ Load(temp, MemOperand(store_base, offset), Representation::Smi()); + __ AssertSmi(temp); + } + + // Store int value directly to upper half of the smi. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); + offset += kPointerSize / 2; + representation = Representation::Integer32(); + } + + __ Store(value, MemOperand(store_base, offset), representation); + + if (instr->hydrogen()->NeedsWriteBarrier()) { + SmiCheck check_needed = + instr->hydrogen()->value()->type().IsHeapObject() + ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + // Compute address of modified element and store it into key register. + __ Daddu(key, store_base, Operand(offset)); + __ RecordWrite(elements, + key, + value, + GetRAState(), + kSaveFPRegs, + EMIT_REMEMBERED_SET, + check_needed, + instr->hydrogen()->PointersToHereCheckForValue()); + } +} + + +void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { + // By cases: external, fast double + if (instr->is_typed_elements()) { + DoStoreKeyedExternalArray(instr); + } else if (instr->hydrogen()->value()->representation().IsDouble()) { + DoStoreKeyedFixedDoubleArray(instr); + } else { + DoStoreKeyedFixedArray(instr); + } +} + + +void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); + DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); + + Handle<Code> ic = (instr->strict_mode() == STRICT) + ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() + : isolate()->builtins()->KeyedStoreIC_Initialize(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { + Register object_reg = ToRegister(instr->object()); + Register scratch = scratch0(); + + Handle<Map> from_map = instr->original_map(); + Handle<Map> to_map = instr->transitioned_map(); + ElementsKind from_kind = instr->from_kind(); + ElementsKind to_kind = instr->to_kind(); + + Label not_applicable; + __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); + __ Branch(¬_applicable, ne, scratch, Operand(from_map)); + + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { + Register new_map_reg = ToRegister(instr->new_map_temp()); + __ li(new_map_reg, Operand(to_map)); + __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); + // Write barrier. + __ RecordWriteForMap(object_reg, + new_map_reg, + scratch, + GetRAState(), + kDontSaveFPRegs); + } else { + DCHECK(object_reg.is(a0)); + DCHECK(ToRegister(instr->context()).is(cp)); + PushSafepointRegistersScope scope(this); + __ li(a1, Operand(to_map)); + bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; + TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); + __ CallStub(&stub); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kLazyDeopt); + } + __ bind(¬_applicable); +} + + +void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { + Register object = ToRegister(instr->object()); + Register temp = ToRegister(instr->temp()); + Label no_memento_found; + __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found, + ne, &no_memento_found); + DeoptimizeIf(al, instr->environment()); + __ bind(&no_memento_found); +} + + +void LCodeGen::DoStringAdd(LStringAdd* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + DCHECK(ToRegister(instr->left()).is(a1)); + DCHECK(ToRegister(instr->right()).is(a0)); + StringAddStub stub(isolate(), + instr->hydrogen()->flags(), + instr->hydrogen()->pretenure_flag()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { + class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { + public: + DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredStringCharCodeAt(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LStringCharCodeAt* instr_; + }; + + DeferredStringCharCodeAt* deferred = + new(zone()) DeferredStringCharCodeAt(this, instr); + StringCharLoadGenerator::Generate(masm(), + ToRegister(instr->string()), + ToRegister(instr->index()), + ToRegister(instr->result()), + deferred->entry()); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { + Register string = ToRegister(instr->string()); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ mov(result, zero_reg); + + PushSafepointRegistersScope scope(this); + __ push(string); + // Push the index as a smi. This is safe because of the checks in + // DoStringCharCodeAt above. + if (instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index))); + __ push(scratch); + } else { + Register index = ToRegister(instr->index()); + __ SmiTag(index); + __ push(index); + } + CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, + instr->context()); + __ AssertSmi(v0); + __ SmiUntag(v0); + __ StoreToSafepointRegisterSlot(v0, result); +} + + +void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { + class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { + public: + DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredStringCharFromCode(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LStringCharFromCode* instr_; + }; + + DeferredStringCharFromCode* deferred = + new(zone()) DeferredStringCharFromCode(this, instr); + + DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); + Register char_code = ToRegister(instr->char_code()); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + DCHECK(!char_code.is(result)); + + __ Branch(deferred->entry(), hi, + char_code, Operand(String::kMaxOneByteCharCode)); + __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); + __ dsll(scratch, char_code, kPointerSizeLog2); + __ Daddu(result, result, scratch); + __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize)); + __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + __ Branch(deferred->entry(), eq, result, Operand(scratch)); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { + Register char_code = ToRegister(instr->char_code()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ mov(result, zero_reg); + + PushSafepointRegistersScope scope(this); + __ SmiTag(char_code); + __ push(char_code); + CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); + __ StoreToSafepointRegisterSlot(v0, result); +} + + +void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { + LOperand* input = instr->value(); + DCHECK(input->IsRegister() || input->IsStackSlot()); + LOperand* output = instr->result(); + DCHECK(output->IsDoubleRegister()); + FPURegister single_scratch = double_scratch0().low(); + if (input->IsStackSlot()) { + Register scratch = scratch0(); + __ ld(scratch, ToMemOperand(input)); + __ mtc1(scratch, single_scratch); + } else { + __ mtc1(ToRegister(input), single_scratch); + } + __ cvt_d_w(ToDoubleRegister(output), single_scratch); +} + + +void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { + LOperand* input = instr->value(); + LOperand* output = instr->result(); + + FPURegister dbl_scratch = double_scratch0(); + __ mtc1(ToRegister(input), dbl_scratch); + __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); // TODO(plind): f22? +} + + +void LCodeGen::DoNumberTagU(LNumberTagU* instr) { + class DeferredNumberTagU V8_FINAL : public LDeferredCode { + public: + DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredNumberTagIU(instr_, + instr_->value(), + instr_->temp1(), + instr_->temp2(), + UNSIGNED_INT32); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LNumberTagU* instr_; + }; + + Register input = ToRegister(instr->value()); + Register result = ToRegister(instr->result()); + + DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); + __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue)); + __ SmiTag(result, input); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, + LOperand* value, + LOperand* temp1, + LOperand* temp2, + IntegerSignedness signedness) { + Label done, slow; + Register src = ToRegister(value); + Register dst = ToRegister(instr->result()); + Register tmp1 = scratch0(); + Register tmp2 = ToRegister(temp1); + Register tmp3 = ToRegister(temp2); + DoubleRegister dbl_scratch = double_scratch0(); + + if (signedness == SIGNED_INT32) { + // There was overflow, so bits 30 and 31 of the original integer + // disagree. Try to allocate a heap number in new space and store + // the value in there. If that fails, call the runtime system. + if (dst.is(src)) { + __ SmiUntag(src, dst); + __ Xor(src, src, Operand(0x80000000)); + } + __ mtc1(src, dbl_scratch); + __ cvt_d_w(dbl_scratch, dbl_scratch); + } else { + __ mtc1(src, dbl_scratch); + __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22); + } + + if (FLAG_inline_new) { + __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT); + __ Branch(&done); + } + + // Slow case: Call the runtime system to do the number allocation. + __ bind(&slow); + { + // TODO(3095996): Put a valid pointer value in the stack slot where the + // result register is stored, as this register is in the pointer map, but + // contains an integer value. + __ mov(dst, zero_reg); + // Preserve the value of all registers. + PushSafepointRegistersScope scope(this); + + // NumberTagI and NumberTagD use the context from the frame, rather than + // the environment's HContext or HInlinedContext value. + // They only call Runtime::kAllocateHeapNumber. + // The corresponding HChange instructions are added in a phase that does + // not have easy access to the local context. + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(v0, dst); + } + + // Done. Put the value in dbl_scratch into the value of the allocated heap + // number. + __ bind(&done); + __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); +} + + +void LCodeGen::DoNumberTagD(LNumberTagD* instr) { + class DeferredNumberTagD V8_FINAL : public LDeferredCode { + public: + DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredNumberTagD(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LNumberTagD* instr_; + }; + + DoubleRegister input_reg = ToDoubleRegister(instr->value()); + Register scratch = scratch0(); + Register reg = ToRegister(instr->result()); + Register temp1 = ToRegister(instr->temp()); + Register temp2 = ToRegister(instr->temp2()); + + DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); + if (FLAG_inline_new) { + __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); + // We want the untagged address first for performance + __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), + DONT_TAG_RESULT); + } else { + __ Branch(deferred->entry()); + } + __ bind(deferred->exit()); + __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset)); + // Now that we have finished with the object's real address tag it + __ Daddu(reg, reg, kHeapObjectTag); +} + + +void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + Register reg = ToRegister(instr->result()); + __ mov(reg, zero_reg); + + PushSafepointRegistersScope scope(this); + // NumberTagI and NumberTagD use the context from the frame, rather than + // the environment's HContext or HInlinedContext value. + // They only call Runtime::kAllocateHeapNumber. + // The corresponding HChange instructions are added in a phase that does + // not have easy access to the local context. + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + __ Dsubu(v0, v0, kHeapObjectTag); + __ StoreToSafepointRegisterSlot(v0, reg); +} + + +void LCodeGen::DoSmiTag(LSmiTag* instr) { + HChange* hchange = instr->hydrogen(); + Register input = ToRegister(instr->value()); + Register output = ToRegister(instr->result()); + if (hchange->CheckFlag(HValue::kCanOverflow) && + hchange->value()->CheckFlag(HValue::kUint32)) { + __ And(at, input, Operand(0x80000000)); + DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); + } + if (hchange->CheckFlag(HValue::kCanOverflow) && + !hchange->value()->CheckFlag(HValue::kUint32)) { + __ SmiTagCheckOverflow(output, input, at); + DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg)); + } else { + __ SmiTag(output, input); + } +} + + +void LCodeGen::DoSmiUntag(LSmiUntag* instr) { + Register scratch = scratch0(); + Register input = ToRegister(instr->value()); + Register result = ToRegister(instr->result()); + if (instr->needs_check()) { + STATIC_ASSERT(kHeapObjectTag == 1); + // If the input is a HeapObject, value of scratch won't be zero. + __ And(scratch, input, Operand(kHeapObjectTag)); + __ SmiUntag(result, input); + DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); + } else { + __ SmiUntag(result, input); + } +} + + +void LCodeGen::EmitNumberUntagD(Register input_reg, + DoubleRegister result_reg, + bool can_convert_undefined_to_nan, + bool deoptimize_on_minus_zero, + LEnvironment* env, + NumberUntagDMode mode) { + Register scratch = scratch0(); + Label convert, load_smi, done; + if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { + // Smi check. + __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); + // Heap number map check. + __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + if (can_convert_undefined_to_nan) { + __ Branch(&convert, ne, scratch, Operand(at)); + } else { + DeoptimizeIf(ne, env, scratch, Operand(at)); + } + // Load heap number. + __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); + if (deoptimize_on_minus_zero) { + __ mfc1(at, result_reg); + __ Branch(&done, ne, at, Operand(zero_reg)); + __ mfhc1(scratch, result_reg); // Get exponent/sign bits. + DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask)); + } + __ Branch(&done); + if (can_convert_undefined_to_nan) { + __ bind(&convert); + // Convert undefined (and hole) to NaN. + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + DeoptimizeIf(ne, env, input_reg, Operand(at)); + __ LoadRoot(scratch, Heap::kNanValueRootIndex); + __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); + __ Branch(&done); + } + } else { + __ SmiUntag(scratch, input_reg); + DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); + } + // Smi to double register conversion + __ bind(&load_smi); + // scratch: untagged value of input_reg + __ mtc1(scratch, result_reg); + __ cvt_d_w(result_reg, result_reg); + __ bind(&done); +} + + +void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { + Register input_reg = ToRegister(instr->value()); + Register scratch1 = scratch0(); + Register scratch2 = ToRegister(instr->temp()); + DoubleRegister double_scratch = double_scratch0(); + DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); + + DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); + DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); + + Label done; + + // The input is a tagged HeapObject. + // Heap number map check. + __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + // This 'at' value and scratch1 map value are used for tests in both clauses + // of the if. + + if (instr->truncating()) { + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. + Label no_heap_number, check_bools, check_false; + // Check HeapNumber map. + __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at)); + __ mov(scratch2, input_reg); // In delay slot. + __ TruncateHeapNumberToI(input_reg, scratch2); + __ Branch(&done); + + // Check for Oddballs. Undefined/False is converted to zero and True to one + // for truncating conversions. + __ bind(&no_heap_number); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&check_bools, ne, input_reg, Operand(at)); + DCHECK(ToRegister(instr->result()).is(input_reg)); + __ Branch(USE_DELAY_SLOT, &done); + __ mov(input_reg, zero_reg); // In delay slot. + + __ bind(&check_bools); + __ LoadRoot(at, Heap::kTrueValueRootIndex); + __ Branch(&check_false, ne, scratch2, Operand(at)); + __ Branch(USE_DELAY_SLOT, &done); + __ li(input_reg, Operand(1)); // In delay slot. + + __ bind(&check_false); + __ LoadRoot(at, Heap::kFalseValueRootIndex); + DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at)); + __ Branch(USE_DELAY_SLOT, &done); + __ mov(input_reg, zero_reg); // In delay slot. + } else { + // Deoptimize if we don't have a heap number. + DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at)); + + // Load the double value. + __ ldc1(double_scratch, + FieldMemOperand(input_reg, HeapNumber::kValueOffset)); + + Register except_flag = scratch2; + __ EmitFPUTruncate(kRoundToZero, + input_reg, + double_scratch, + scratch1, + double_scratch2, + except_flag, + kCheckForInexactConversion); + + // Deopt if the operation did not succeed. + DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ Branch(&done, ne, input_reg, Operand(zero_reg)); + + __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits. + __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); + } + } + __ bind(&done); +} + + +void LCodeGen::DoTaggedToI(LTaggedToI* instr) { + class DeferredTaggedToI V8_FINAL : public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredTaggedToI(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LTaggedToI* instr_; + }; + + LOperand* input = instr->value(); + DCHECK(input->IsRegister()); + DCHECK(input->Equals(instr->result())); + + Register input_reg = ToRegister(input); + + if (instr->hydrogen()->value()->representation().IsSmi()) { + __ SmiUntag(input_reg); + } else { + DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); + + // Let the deferred code handle the HeapObject case. + __ JumpIfNotSmi(input_reg, deferred->entry()); + + // Smi to int32 conversion. + __ SmiUntag(input_reg); + __ bind(deferred->exit()); + } +} + + +void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { + LOperand* input = instr->value(); + DCHECK(input->IsRegister()); + LOperand* result = instr->result(); + DCHECK(result->IsDoubleRegister()); + + Register input_reg = ToRegister(input); + DoubleRegister result_reg = ToDoubleRegister(result); + + HValue* value = instr->hydrogen()->value(); + NumberUntagDMode mode = value->representation().IsSmi() + ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; + + EmitNumberUntagD(input_reg, result_reg, + instr->hydrogen()->can_convert_undefined_to_nan(), + instr->hydrogen()->deoptimize_on_minus_zero(), + instr->environment(), + mode); +} + + +void LCodeGen::DoDoubleToI(LDoubleToI* instr) { + Register result_reg = ToRegister(instr->result()); + Register scratch1 = scratch0(); + DoubleRegister double_input = ToDoubleRegister(instr->value()); + + if (instr->truncating()) { + __ TruncateDoubleToI(result_reg, double_input); + } else { + Register except_flag = LCodeGen::scratch1(); + + __ EmitFPUTruncate(kRoundToMinusInf, + result_reg, + double_input, + scratch1, + double_scratch0(), + except_flag, + kCheckForInexactConversion); + + // Deopt if the operation did not succeed (except_flag != 0). + DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label done; + __ Branch(&done, ne, result_reg, Operand(zero_reg)); + __ mfhc1(scratch1, double_input); // Get exponent/sign bits. + __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); + __ bind(&done); + } + } +} + + +void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { + Register result_reg = ToRegister(instr->result()); + Register scratch1 = LCodeGen::scratch0(); + DoubleRegister double_input = ToDoubleRegister(instr->value()); + + if (instr->truncating()) { + __ TruncateDoubleToI(result_reg, double_input); + } else { + Register except_flag = LCodeGen::scratch1(); + + __ EmitFPUTruncate(kRoundToMinusInf, + result_reg, + double_input, + scratch1, + double_scratch0(), + except_flag, + kCheckForInexactConversion); + + // Deopt if the operation did not succeed (except_flag != 0). + DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label done; + __ Branch(&done, ne, result_reg, Operand(zero_reg)); + __ mfhc1(scratch1, double_input); // Get exponent/sign bits. + __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); + __ bind(&done); + } + } + __ SmiTag(result_reg, result_reg); +} + + +void LCodeGen::DoCheckSmi(LCheckSmi* instr) { + LOperand* input = instr->value(); + __ SmiTst(ToRegister(input), at); + DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); +} + + +void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { + LOperand* input = instr->value(); + __ SmiTst(ToRegister(input), at); + DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); + } +} + + +void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { + Register input = ToRegister(instr->value()); + Register scratch = scratch0(); + + __ GetObjectType(input, scratch, scratch); + + if (instr->hydrogen()->is_interval_check()) { + InstanceType first; + InstanceType last; + instr->hydrogen()->GetCheckInterval(&first, &last); + + // If there is only one type in the interval check for equality. + if (first == last) { + DeoptimizeIf(ne, instr->environment(), scratch, Operand(first)); + } else { + DeoptimizeIf(lo, instr->environment(), scratch, Operand(first)); + // Omit check for the last type. + if (last != LAST_TYPE) { + DeoptimizeIf(hi, instr->environment(), scratch, Operand(last)); + } + } + } else { + uint8_t mask; + uint8_t tag; + instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); + + if (IsPowerOf2(mask)) { + DCHECK(tag == 0 || IsPowerOf2(tag)); + __ And(at, scratch, mask); + DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(), + at, Operand(zero_reg)); + } else { + __ And(scratch, scratch, Operand(mask)); + DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag)); + } + } +} + + +void LCodeGen::DoCheckValue(LCheckValue* instr) { + Register reg = ToRegister(instr->value()); + Handle<HeapObject> object = instr->hydrogen()->object().handle(); + AllowDeferredHandleDereference smi_check; + if (isolate()->heap()->InNewSpace(*object)) { + Register reg = ToRegister(instr->value()); + Handle<Cell> cell = isolate()->factory()->NewCell(object); + __ li(at, Operand(Handle<Object>(cell))); + __ ld(at, FieldMemOperand(at, Cell::kValueOffset)); + DeoptimizeIf(ne, instr->environment(), reg, + Operand(at)); + } else { + DeoptimizeIf(ne, instr->environment(), reg, + Operand(object)); + } +} + + +void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { + { + PushSafepointRegistersScope scope(this); + __ push(object); + __ mov(cp, zero_reg); + __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); + RecordSafepointWithRegisters( + instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(v0, scratch0()); + } + __ SmiTst(scratch0(), at); + DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); +} + + +void LCodeGen::DoCheckMaps(LCheckMaps* instr) { + class DeferredCheckMaps V8_FINAL : public LDeferredCode { + public: + DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) + : LDeferredCode(codegen), instr_(instr), object_(object) { + SetExit(check_maps()); + } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredInstanceMigration(instr_, object_); + } + Label* check_maps() { return &check_maps_; } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LCheckMaps* instr_; + Label check_maps_; + Register object_; + }; + + if (instr->hydrogen()->IsStabilityCheck()) { + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + for (int i = 0; i < maps->size(); ++i) { + AddStabilityDependency(maps->at(i).handle()); + } + return; + } + + Register map_reg = scratch0(); + LOperand* input = instr->value(); + DCHECK(input->IsRegister()); + Register reg = ToRegister(input); + __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); + + DeferredCheckMaps* deferred = NULL; + if (instr->hydrogen()->HasMigrationTarget()) { + deferred = new(zone()) DeferredCheckMaps(this, instr, reg); + __ bind(deferred->check_maps()); + } + + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + Label success; + for (int i = 0; i < maps->size() - 1; i++) { + Handle<Map> map = maps->at(i).handle(); + __ CompareMapAndBranch(map_reg, map, &success, eq, &success); + } + Handle<Map> map = maps->at(maps->size() - 1).handle(); + // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). + if (instr->hydrogen()->HasMigrationTarget()) { + __ Branch(deferred->entry(), ne, map_reg, Operand(map)); + } else { + DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map)); + } + + __ bind(&success); +} + + +void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { + DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); + Register result_reg = ToRegister(instr->result()); + DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); + __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); +} + + +void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { + Register unclamped_reg = ToRegister(instr->unclamped()); + Register result_reg = ToRegister(instr->result()); + __ ClampUint8(result_reg, unclamped_reg); +} + + +void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { + Register scratch = scratch0(); + Register input_reg = ToRegister(instr->unclamped()); + Register result_reg = ToRegister(instr->result()); + DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); + Label is_smi, done, heap_number; + + // Both smi and heap number cases are handled. + __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); + + // Check for heap number + __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); + __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); + + // Check for undefined. Undefined is converted to zero for clamping + // conversions. + DeoptimizeIf(ne, instr->environment(), input_reg, + Operand(factory()->undefined_value())); + __ mov(result_reg, zero_reg); + __ jmp(&done); + + // Heap number + __ bind(&heap_number); + __ ldc1(double_scratch0(), FieldMemOperand(input_reg, + HeapNumber::kValueOffset)); + __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); + __ jmp(&done); + + __ bind(&is_smi); + __ ClampUint8(result_reg, scratch); + + __ bind(&done); +} + + +void LCodeGen::DoDoubleBits(LDoubleBits* instr) { + DoubleRegister value_reg = ToDoubleRegister(instr->value()); + Register result_reg = ToRegister(instr->result()); + if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { + __ FmoveHigh(result_reg, value_reg); + } else { + __ FmoveLow(result_reg, value_reg); + } +} + + +void LCodeGen::DoConstructDouble(LConstructDouble* instr) { + Register hi_reg = ToRegister(instr->hi()); + Register lo_reg = ToRegister(instr->lo()); + DoubleRegister result_reg = ToDoubleRegister(instr->result()); + __ Move(result_reg, lo_reg, hi_reg); +} + + +void LCodeGen::DoAllocate(LAllocate* instr) { + class DeferredAllocate V8_FINAL : public LDeferredCode { + public: + DeferredAllocate(LCodeGen* codegen, LAllocate* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredAllocate(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LAllocate* instr_; + }; + + DeferredAllocate* deferred = + new(zone()) DeferredAllocate(this, instr); + + Register result = ToRegister(instr->result()); + Register scratch = ToRegister(instr->temp1()); + Register scratch2 = ToRegister(instr->temp2()); + + // Allocate memory for the object. + AllocationFlags flags = TAG_OBJECT; + if (instr->hydrogen()->MustAllocateDoubleAligned()) { + flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); + } + if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); + flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); + } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); + flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); + } + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + if (size <= Page::kMaxRegularHeapObjectSize) { + __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); + } else { + __ jmp(deferred->entry()); + } + } else { + Register size = ToRegister(instr->size()); + __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); + } + + __ bind(deferred->exit()); + + if (instr->hydrogen()->MustPrefillWithFiller()) { + STATIC_ASSERT(kHeapObjectTag == 1); + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + __ li(scratch, Operand(size - kHeapObjectTag)); + } else { + __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); + } + __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); + Label loop; + __ bind(&loop); + __ Dsubu(scratch, scratch, Operand(kPointerSize)); + __ Daddu(at, result, Operand(scratch)); + __ sd(scratch2, MemOperand(at)); + __ Branch(&loop, ge, scratch, Operand(zero_reg)); + } +} + + +void LCodeGen::DoDeferredAllocate(LAllocate* instr) { + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ mov(result, zero_reg); + + PushSafepointRegistersScope scope(this); + if (instr->size()->IsRegister()) { + Register size = ToRegister(instr->size()); + DCHECK(!size.is(result)); + __ SmiTag(size); + __ push(size); + } else { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + if (size >= 0 && size <= Smi::kMaxValue) { + __ li(v0, Operand(Smi::FromInt(size))); + __ Push(v0); + } else { + // We should never get here at runtime => abort + __ stop("invalid allocation size"); + return; + } + } + + int flags = AllocateDoubleAlignFlag::encode( + instr->hydrogen()->MustAllocateDoubleAligned()); + if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); + flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); + } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); + flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); + } else { + flags = AllocateTargetSpace::update(flags, NEW_SPACE); + } + __ li(v0, Operand(Smi::FromInt(flags))); + __ Push(v0); + + CallRuntimeFromDeferred( + Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); + __ StoreToSafepointRegisterSlot(v0, result); +} + + +void LCodeGen::DoToFastProperties(LToFastProperties* instr) { + DCHECK(ToRegister(instr->value()).is(a0)); + DCHECK(ToRegister(instr->result()).is(v0)); + __ push(a0); + CallRuntime(Runtime::kToFastProperties, 1, instr); +} + + +void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + Label materialized; + // Registers will be used as follows: + // a7 = literals array. + // a1 = regexp literal. + // a0 = regexp literal clone. + // a2 and a4-a6 are used as temporaries. + int literal_offset = + FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); + __ li(a7, instr->hydrogen()->literals()); + __ ld(a1, FieldMemOperand(a7, literal_offset)); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&materialized, ne, a1, Operand(at)); + + // Create regexp literal using runtime function + // Result will be in v0. + __ li(a6, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); + __ li(a5, Operand(instr->hydrogen()->pattern())); + __ li(a4, Operand(instr->hydrogen()->flags())); + __ Push(a7, a6, a5, a4); + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); + __ mov(a1, v0); + + __ bind(&materialized); + int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; + Label allocated, runtime_allocate; + + __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); + __ jmp(&allocated); + + __ bind(&runtime_allocate); + __ li(a0, Operand(Smi::FromInt(size))); + __ Push(a1, a0); + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); + __ pop(a1); + + __ bind(&allocated); + // Copy the content into the newly allocated memory. + // (Unroll copy loop once for better throughput). + for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { + __ ld(a3, FieldMemOperand(a1, i)); + __ ld(a2, FieldMemOperand(a1, i + kPointerSize)); + __ sd(a3, FieldMemOperand(v0, i)); + __ sd(a2, FieldMemOperand(v0, i + kPointerSize)); + } + if ((size % (2 * kPointerSize)) != 0) { + __ ld(a3, FieldMemOperand(a1, size - kPointerSize)); + __ sd(a3, FieldMemOperand(v0, size - kPointerSize)); + } +} + + +void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { + DCHECK(ToRegister(instr->context()).is(cp)); + // Use the fast case closure allocation code that allocates in new + // space for nested functions that don't need literals cloning. + bool pretenure = instr->hydrogen()->pretenure(); + if (!pretenure && instr->hydrogen()->has_no_literals()) { + FastNewClosureStub stub(isolate(), + instr->hydrogen()->strict_mode(), + instr->hydrogen()->is_generator()); + __ li(a2, Operand(instr->hydrogen()->shared_info())); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + } else { + __ li(a2, Operand(instr->hydrogen()->shared_info())); + __ li(a1, Operand(pretenure ? factory()->true_value() + : factory()->false_value())); + __ Push(cp, a2, a1); + CallRuntime(Runtime::kNewClosure, 3, instr); + } +} + + +void LCodeGen::DoTypeof(LTypeof* instr) { + DCHECK(ToRegister(instr->result()).is(v0)); + Register input = ToRegister(instr->value()); + __ push(input); + CallRuntime(Runtime::kTypeof, 1, instr); +} + + +void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { + Register input = ToRegister(instr->value()); + + Register cmp1 = no_reg; + Operand cmp2 = Operand(no_reg); + + Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), + instr->FalseLabel(chunk_), + input, + instr->type_literal(), + &cmp1, + &cmp2); + + DCHECK(cmp1.is_valid()); + DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid()); + + if (final_branch_condition != kNoCondition) { + EmitBranch(instr, final_branch_condition, cmp1, cmp2); + } +} + + +Condition LCodeGen::EmitTypeofIs(Label* true_label, + Label* false_label, + Register input, + Handle<String> type_name, + Register* cmp1, + Operand* cmp2) { + // This function utilizes the delay slot heavily. This is used to load + // values that are always usable without depending on the type of the input + // register. + Condition final_branch_condition = kNoCondition; + Register scratch = scratch0(); + Factory* factory = isolate()->factory(); + if (String::Equals(type_name, factory->number_string())) { + __ JumpIfSmi(input, true_label); + __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + *cmp1 = input; + *cmp2 = Operand(at); + final_branch_condition = eq; + + } else if (String::Equals(type_name, factory->string_string())) { + __ JumpIfSmi(input, false_label); + __ GetObjectType(input, input, scratch); + __ Branch(USE_DELAY_SLOT, false_label, + ge, scratch, Operand(FIRST_NONSTRING_TYPE)); + // input is an object so we can load the BitFieldOffset even if we take the + // other branch. + __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); + __ And(at, at, 1 << Map::kIsUndetectable); + *cmp1 = at; + *cmp2 = Operand(zero_reg); + final_branch_condition = eq; + + } else if (String::Equals(type_name, factory->symbol_string())) { + __ JumpIfSmi(input, false_label); + __ GetObjectType(input, input, scratch); + *cmp1 = scratch; + *cmp2 = Operand(SYMBOL_TYPE); + final_branch_condition = eq; + + } else if (String::Equals(type_name, factory->boolean_string())) { + __ LoadRoot(at, Heap::kTrueValueRootIndex); + __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); + __ LoadRoot(at, Heap::kFalseValueRootIndex); + *cmp1 = at; + *cmp2 = Operand(input); + final_branch_condition = eq; + + } else if (String::Equals(type_name, factory->undefined_string())) { + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); + // The first instruction of JumpIfSmi is an And - it is safe in the delay + // slot. + __ JumpIfSmi(input, false_label); + // Check for undetectable objects => true. + __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); + __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); + __ And(at, at, 1 << Map::kIsUndetectable); + *cmp1 = at; + *cmp2 = Operand(zero_reg); + final_branch_condition = ne; + + } else if (String::Equals(type_name, factory->function_string())) { + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ JumpIfSmi(input, false_label); + __ GetObjectType(input, scratch, input); + __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE)); + *cmp1 = input; + *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE); + final_branch_condition = eq; + + } else if (String::Equals(type_name, factory->object_string())) { + __ JumpIfSmi(input, false_label); + __ LoadRoot(at, Heap::kNullValueRootIndex); + __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); + Register map = input; + __ GetObjectType(input, map, scratch); + __ Branch(false_label, + lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ Branch(USE_DELAY_SLOT, false_label, + gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); + // map is still valid, so the BitField can be loaded in delay slot. + // Check for undetectable objects => false. + __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(at, at, 1 << Map::kIsUndetectable); + *cmp1 = at; + *cmp2 = Operand(zero_reg); + final_branch_condition = eq; + + } else { + *cmp1 = at; + *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion. + __ Branch(false_label); + } + + return final_branch_condition; +} + + +void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { + Register temp1 = ToRegister(instr->temp()); + + EmitIsConstructCall(temp1, scratch0()); + + EmitBranch(instr, eq, temp1, + Operand(Smi::FromInt(StackFrame::CONSTRUCT))); +} + + +void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { + DCHECK(!temp1.is(temp2)); + // Get the frame pointer for the calling frame. + __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); + __ Branch(&check_frame_marker, ne, temp2, + Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); +} + + +void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { + if (!info()->IsStub()) { + // Ensure that we have enough space after the previous lazy-bailout + // instruction for patching the code here. + int current_pc = masm()->pc_offset(); + if (current_pc < last_lazy_deopt_pc_ + space_needed) { + int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; + DCHECK_EQ(0, padding_size % Assembler::kInstrSize); + while (padding_size > 0) { + __ nop(); + padding_size -= Assembler::kInstrSize; + } + } + } + last_lazy_deopt_pc_ = masm()->pc_offset(); +} + + +void LCodeGen::DoLazyBailout(LLazyBailout* instr) { + last_lazy_deopt_pc_ = masm()->pc_offset(); + DCHECK(instr->HasEnvironment()); + LEnvironment* env = instr->environment(); + RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); + safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); +} + + +void LCodeGen::DoDeoptimize(LDeoptimize* instr) { + Deoptimizer::BailoutType type = instr->hydrogen()->type(); + // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the + // needed return address), even though the implementation of LAZY and EAGER is + // now identical. When LAZY is eventually completely folded into EAGER, remove + // the special case below. + if (info()->IsStub() && type == Deoptimizer::EAGER) { + type = Deoptimizer::LAZY; + } + + Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); + DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg)); +} + + +void LCodeGen::DoDummy(LDummy* instr) { + // Nothing to see here, move on! +} + + +void LCodeGen::DoDummyUse(LDummyUse* instr) { + // Nothing to see here, move on! +} + + +void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { + PushSafepointRegistersScope scope(this); + LoadContextFromDeferred(instr->context()); + __ CallRuntimeSaveDoubles(Runtime::kStackGuard); + RecordSafepointWithLazyDeopt( + instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + DCHECK(instr->HasEnvironment()); + LEnvironment* env = instr->environment(); + safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); +} + + +void LCodeGen::DoStackCheck(LStackCheck* instr) { + class DeferredStackCheck V8_FINAL : public LDeferredCode { + public: + DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredStackCheck(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LStackCheck* instr_; + }; + + DCHECK(instr->HasEnvironment()); + LEnvironment* env = instr->environment(); + // There is no LLazyBailout instruction for stack-checks. We have to + // prepare for lazy deoptimization explicitly here. + if (instr->hydrogen()->is_function_entry()) { + // Perform stack overflow check. + Label done; + __ LoadRoot(at, Heap::kStackLimitRootIndex); + __ Branch(&done, hs, sp, Operand(at)); + DCHECK(instr->context()->IsRegister()); + DCHECK(ToRegister(instr->context()).is(cp)); + CallCode(isolate()->builtins()->StackCheck(), + RelocInfo::CODE_TARGET, + instr); + __ bind(&done); + } else { + DCHECK(instr->hydrogen()->is_backwards_branch()); + // Perform stack overflow check if this goto needs it before jumping. + DeferredStackCheck* deferred_stack_check = + new(zone()) DeferredStackCheck(this, instr); + __ LoadRoot(at, Heap::kStackLimitRootIndex); + __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at)); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); + __ bind(instr->done_label()); + deferred_stack_check->SetExit(instr->done_label()); + RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); + // Don't record a deoptimization index for the safepoint here. + // This will be done explicitly when emitting call and the safepoint in + // the deferred code. + } +} + + +void LCodeGen::DoOsrEntry(LOsrEntry* instr) { + // This is a pseudo-instruction that ensures that the environment here is + // properly registered for deoptimization and records the assembler's PC + // offset. + LEnvironment* environment = instr->environment(); + + // If the environment were already registered, we would have no way of + // backpatching it with the spill slot operands. + DCHECK(!environment->HasBeenRegistered()); + RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); + + GenerateOsrPrologue(); +} + + +void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { + Register result = ToRegister(instr->result()); + Register object = ToRegister(instr->object()); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + DeoptimizeIf(eq, instr->environment(), object, Operand(at)); + + Register null_value = a5; + __ LoadRoot(null_value, Heap::kNullValueRootIndex); + DeoptimizeIf(eq, instr->environment(), object, Operand(null_value)); + + __ And(at, object, kSmiTagMask); + DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); + + STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); + __ GetObjectType(object, a1, a1); + DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE)); + + Label use_cache, call_runtime; + DCHECK(object.is(a0)); + __ CheckEnumCache(null_value, &call_runtime); + + __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset)); + __ Branch(&use_cache); + + // Get the set of properties to enumerate. + __ bind(&call_runtime); + __ push(object); + CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); + + __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); + DCHECK(result.is(v0)); + __ LoadRoot(at, Heap::kMetaMapRootIndex); + DeoptimizeIf(ne, instr->environment(), a1, Operand(at)); + __ bind(&use_cache); +} + + +void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { + Register map = ToRegister(instr->map()); + Register result = ToRegister(instr->result()); + Label load_cache, done; + __ EnumLength(result, map); + __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); + __ li(result, Operand(isolate()->factory()->empty_fixed_array())); + __ jmp(&done); + + __ bind(&load_cache); + __ LoadInstanceDescriptors(map, result); + __ ld(result, + FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); + __ ld(result, + FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); + DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg)); + + __ bind(&done); +} + + +void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { + Register object = ToRegister(instr->value()); + Register map = ToRegister(instr->map()); + __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); + DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0())); +} + + +void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register result, + Register object, + Register index) { + PushSafepointRegistersScope scope(this); + __ Push(object, index); + __ mov(cp, zero_reg); + __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(v0, result); +} + + +void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { + public: + DeferredLoadMutableDouble(LCodeGen* codegen, + LLoadFieldByIndex* instr, + Register result, + Register object, + Register index) + : LDeferredCode(codegen), + instr_(instr), + result_(result), + object_(object), + index_(index) { + } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LLoadFieldByIndex* instr_; + Register result_; + Register object_; + Register index_; + }; + + Register object = ToRegister(instr->object()); + Register index = ToRegister(instr->index()); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + + DeferredLoadMutableDouble* deferred; + deferred = new(zone()) DeferredLoadMutableDouble( + this, instr, result, object, index); + + Label out_of_object, done; + + __ And(scratch, index, Operand(Smi::FromInt(1))); + __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg)); + __ dsra(index, index, 1); + + __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg)); + __ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot. + __ Daddu(scratch, object, scratch); + __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); + + __ Branch(&done); + + __ bind(&out_of_object); + __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); + // Index is equal to negated out of object property index plus 1. + __ Dsubu(scratch, result, scratch); + __ ld(result, FieldMemOperand(scratch, + FixedArray::kHeaderSize - kPointerSize)); + __ bind(deferred->exit()); + __ bind(&done); +} + + +void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { + Register context = ToRegister(instr->context()); + __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); +} + + +void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { + Handle<ScopeInfo> scope_info = instr->scope_info(); + __ li(at, scope_info); + __ Push(at, ToRegister(instr->function())); + CallRuntime(Runtime::kPushBlockContext, 2, instr); + RecordSafepoint(Safepoint::kNoLazyDeopt); +} + + +#undef __ + +} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/mips64/lithium-codegen-mips64.h nodejs-0.11.15/deps/v8/src/mips64/lithium-codegen-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/lithium-codegen-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/lithium-codegen-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,451 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_ +#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_ + +#include "src/deoptimizer.h" +#include "src/lithium-codegen.h" +#include "src/mips64/lithium-gap-resolver-mips64.h" +#include "src/mips64/lithium-mips64.h" +#include "src/safepoint-table.h" +#include "src/scopes.h" +#include "src/utils.h" + +namespace v8 { +namespace internal { + +// Forward declarations. +class LDeferredCode; +class SafepointGenerator; + +class LCodeGen: public LCodeGenBase { + public: + LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) + : LCodeGenBase(chunk, assembler, info), + deoptimizations_(4, info->zone()), + deopt_jump_table_(4, info->zone()), + deoptimization_literals_(8, info->zone()), + inlined_function_count_(0), + scope_(info->scope()), + translations_(info->zone()), + deferred_(8, info->zone()), + osr_pc_offset_(-1), + frame_is_built_(false), + safepoints_(info->zone()), + resolver_(this), + expected_safepoint_kind_(Safepoint::kSimple) { + PopulateDeoptimizationLiteralsWithInlinedFunctions(); + } + + + int LookupDestination(int block_id) const { + return chunk()->LookupDestination(block_id); + } + + bool IsNextEmittedBlock(int block_id) const { + return LookupDestination(block_id) == GetNextEmittedBlock(); + } + + bool NeedsEagerFrame() const { + return GetStackSlotCount() > 0 || + info()->is_non_deferred_calling() || + !info()->IsStub() || + info()->requires_frame(); + } + bool NeedsDeferredFrame() const { + return !NeedsEagerFrame() && info()->is_deferred_calling(); + } + + RAStatus GetRAState() const { + return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved; + } + + // Support for converting LOperands to assembler types. + // LOperand must be a register. + Register ToRegister(LOperand* op) const; + + // LOperand is loaded into scratch, unless already a register. + Register EmitLoadRegister(LOperand* op, Register scratch); + + // LOperand must be a double register. + DoubleRegister ToDoubleRegister(LOperand* op) const; + + // LOperand is loaded into dbl_scratch, unless already a double register. + DoubleRegister EmitLoadDoubleRegister(LOperand* op, + FloatRegister flt_scratch, + DoubleRegister dbl_scratch); + int32_t ToRepresentation_donotuse(LConstantOperand* op, + const Representation& r) const; + int32_t ToInteger32(LConstantOperand* op) const; + Smi* ToSmi(LConstantOperand* op) const; + double ToDouble(LConstantOperand* op) const; + Operand ToOperand(LOperand* op); + MemOperand ToMemOperand(LOperand* op) const; + // Returns a MemOperand pointing to the high word of a DoubleStackSlot. + MemOperand ToHighMemOperand(LOperand* op) const; + + bool IsInteger32(LConstantOperand* op) const; + bool IsSmi(LConstantOperand* op) const; + Handle<Object> ToHandle(LConstantOperand* op) const; + + // Try to generate code for the entire chunk, but it may fail if the + // chunk contains constructs we cannot handle. Returns true if the + // code generation attempt succeeded. + bool GenerateCode(); + + // Finish the code by setting stack height, safepoint, and bailout + // information on it. + void FinishCode(Handle<Code> code); + + void DoDeferredNumberTagD(LNumberTagD* instr); + + enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; + void DoDeferredNumberTagIU(LInstruction* instr, + LOperand* value, + LOperand* temp1, + LOperand* temp2, + IntegerSignedness signedness); + + void DoDeferredTaggedToI(LTaggedToI* instr); + void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); + void DoDeferredStackCheck(LStackCheck* instr); + void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); + void DoDeferredStringCharFromCode(LStringCharFromCode* instr); + void DoDeferredAllocate(LAllocate* instr); + void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, + Label* map_check); + + void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); + void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register result, + Register object, + Register index); + + // Parallel move support. + void DoParallelMove(LParallelMove* move); + void DoGap(LGap* instr); + + MemOperand PrepareKeyedOperand(Register key, + Register base, + bool key_is_constant, + int constant_key, + int element_size, + int shift_size, + int base_offset); + + // Emit frame translation commands for an environment. + void WriteTranslation(LEnvironment* environment, Translation* translation); + + // Declare methods that deal with the individual node types. +#define DECLARE_DO(type) void Do##type(L##type* node); + LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) +#undef DECLARE_DO + + private: + StrictMode strict_mode() const { return info()->strict_mode(); } + + Scope* scope() const { return scope_; } + + Register scratch0() { return kLithiumScratchReg; } + Register scratch1() { return kLithiumScratchReg2; } + DoubleRegister double_scratch0() { return kLithiumScratchDouble; } + + LInstruction* GetNextInstruction(); + + void EmitClassOfTest(Label* if_true, + Label* if_false, + Handle<String> class_name, + Register input, + Register temporary, + Register temporary2); + + int GetStackSlotCount() const { return chunk()->spill_slot_count(); } + + void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } + + void SaveCallerDoubles(); + void RestoreCallerDoubles(); + + // Code generation passes. Returns true if code generation should + // continue. + void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE; + bool GeneratePrologue(); + bool GenerateDeferredCode(); + bool GenerateDeoptJumpTable(); + bool GenerateSafepointTable(); + + // Generates the custom OSR entrypoint and sets the osr_pc_offset. + void GenerateOsrPrologue(); + + enum SafepointMode { + RECORD_SIMPLE_SAFEPOINT, + RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS + }; + + void CallCode(Handle<Code> code, + RelocInfo::Mode mode, + LInstruction* instr); + + void CallCodeGeneric(Handle<Code> code, + RelocInfo::Mode mode, + LInstruction* instr, + SafepointMode safepoint_mode); + + void CallRuntime(const Runtime::Function* function, + int num_arguments, + LInstruction* instr, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); + + void CallRuntime(Runtime::FunctionId id, + int num_arguments, + LInstruction* instr) { + const Runtime::Function* function = Runtime::FunctionForId(id); + CallRuntime(function, num_arguments, instr); + } + + void LoadContextFromDeferred(LOperand* context); + void CallRuntimeFromDeferred(Runtime::FunctionId id, + int argc, + LInstruction* instr, + LOperand* context); + + enum A1State { + A1_UNINITIALIZED, + A1_CONTAINS_TARGET + }; + + // Generate a direct call to a known function. Expects the function + // to be in a1. + void CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, + int arity, + LInstruction* instr, + A1State a1_state); + + void RecordSafepointWithLazyDeopt(LInstruction* instr, + SafepointMode safepoint_mode); + + void RegisterEnvironmentForDeoptimization(LEnvironment* environment, + Safepoint::DeoptMode mode); + void DeoptimizeIf(Condition condition, + LEnvironment* environment, + Deoptimizer::BailoutType bailout_type, + Register src1 = zero_reg, + const Operand& src2 = Operand(zero_reg)); + void DeoptimizeIf(Condition condition, + LEnvironment* environment, + Register src1 = zero_reg, + const Operand& src2 = Operand(zero_reg)); + + void AddToTranslation(LEnvironment* environment, + Translation* translation, + LOperand* op, + bool is_tagged, + bool is_uint32, + int* object_index_pointer, + int* dematerialized_index_pointer); + void PopulateDeoptimizationData(Handle<Code> code); + int DefineDeoptimizationLiteral(Handle<Object> literal); + + void PopulateDeoptimizationLiteralsWithInlinedFunctions(); + + Register ToRegister(int index) const; + DoubleRegister ToDoubleRegister(int index) const; + + MemOperand BuildSeqStringOperand(Register string, + LOperand* index, + String::Encoding encoding); + + void EmitIntegerMathAbs(LMathAbs* instr); + + // Support for recording safepoint and position information. + void RecordSafepoint(LPointerMap* pointers, + Safepoint::Kind kind, + int arguments, + Safepoint::DeoptMode mode); + void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); + void RecordSafepoint(Safepoint::DeoptMode mode); + void RecordSafepointWithRegisters(LPointerMap* pointers, + int arguments, + Safepoint::DeoptMode mode); + + void RecordAndWritePosition(int position) V8_OVERRIDE; + + static Condition TokenToCondition(Token::Value op, bool is_unsigned); + void EmitGoto(int block); + + // EmitBranch expects to be the last instruction of a block. + template<class InstrType> + void EmitBranch(InstrType instr, + Condition condition, + Register src1, + const Operand& src2); + template<class InstrType> + void EmitBranchF(InstrType instr, + Condition condition, + FPURegister src1, + FPURegister src2); + template<class InstrType> + void EmitFalseBranch(InstrType instr, + Condition condition, + Register src1, + const Operand& src2); + template<class InstrType> + void EmitFalseBranchF(InstrType instr, + Condition condition, + FPURegister src1, + FPURegister src2); + void EmitCmpI(LOperand* left, LOperand* right); + void EmitNumberUntagD(Register input, + DoubleRegister result, + bool allow_undefined_as_nan, + bool deoptimize_on_minus_zero, + LEnvironment* env, + NumberUntagDMode mode); + + // Emits optimized code for typeof x == "y". Modifies input register. + // Returns the condition on which a final split to + // true and false label should be made, to optimize fallthrough. + // Returns two registers in cmp1 and cmp2 that can be used in the + // Branch instruction after EmitTypeofIs. + Condition EmitTypeofIs(Label* true_label, + Label* false_label, + Register input, + Handle<String> type_name, + Register* cmp1, + Operand* cmp2); + + // Emits optimized code for %_IsObject(x). Preserves input register. + // Returns the condition on which a final split to + // true and false label should be made, to optimize fallthrough. + Condition EmitIsObject(Register input, + Register temp1, + Register temp2, + Label* is_not_object, + Label* is_object); + + // Emits optimized code for %_IsString(x). Preserves input register. + // Returns the condition on which a final split to + // true and false label should be made, to optimize fallthrough. + Condition EmitIsString(Register input, + Register temp1, + Label* is_not_string, + SmiCheck check_needed); + + // Emits optimized code for %_IsConstructCall(). + // Caller should branch on equal condition. + void EmitIsConstructCall(Register temp1, Register temp2); + + // Emits optimized code to deep-copy the contents of statically known + // object graphs (e.g. object literal boilerplate). + void EmitDeepCopy(Handle<JSObject> object, + Register result, + Register source, + int* offset, + AllocationSiteMode mode); + // Emit optimized code for integer division. + // Inputs are signed. + // All registers are clobbered. + // If 'remainder' is no_reg, it is not computed. + void EmitSignedIntegerDivisionByConstant(Register result, + Register dividend, + int32_t divisor, + Register remainder, + Register scratch, + LEnvironment* environment); + + + void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; + void DoLoadKeyedExternalArray(LLoadKeyed* instr); + void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); + void DoLoadKeyedFixedArray(LLoadKeyed* instr); + void DoStoreKeyedExternalArray(LStoreKeyed* instr); + void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); + void DoStoreKeyedFixedArray(LStoreKeyed* instr); + + ZoneList<LEnvironment*> deoptimizations_; + ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_; + ZoneList<Handle<Object> > deoptimization_literals_; + int inlined_function_count_; + Scope* const scope_; + TranslationBuffer translations_; + ZoneList<LDeferredCode*> deferred_; + int osr_pc_offset_; + bool frame_is_built_; + + // Builder that keeps track of safepoints in the code. The table + // itself is emitted at the end of the generated code. + SafepointTableBuilder safepoints_; + + // Compiler from a set of parallel moves to a sequential list of moves. + LGapResolver resolver_; + + Safepoint::Kind expected_safepoint_kind_; + + class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { + public: + explicit PushSafepointRegistersScope(LCodeGen* codegen) + : codegen_(codegen) { + DCHECK(codegen_->info()->is_calling()); + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); + codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; + + StoreRegistersStateStub stub(codegen_->isolate()); + codegen_->masm_->push(ra); + codegen_->masm_->CallStub(&stub); + } + + ~PushSafepointRegistersScope() { + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); + RestoreRegistersStateStub stub(codegen_->isolate()); + codegen_->masm_->push(ra); + codegen_->masm_->CallStub(&stub); + codegen_->expected_safepoint_kind_ = Safepoint::kSimple; + } + + private: + LCodeGen* codegen_; + }; + + friend class LDeferredCode; + friend class LEnvironment; + friend class SafepointGenerator; + DISALLOW_COPY_AND_ASSIGN(LCodeGen); +}; + + +class LDeferredCode : public ZoneObject { + public: + explicit LDeferredCode(LCodeGen* codegen) + : codegen_(codegen), + external_exit_(NULL), + instruction_index_(codegen->current_instruction_) { + codegen->AddDeferredCode(this); + } + + virtual ~LDeferredCode() {} + virtual void Generate() = 0; + virtual LInstruction* instr() = 0; + + void SetExit(Label* exit) { external_exit_ = exit; } + Label* entry() { return &entry_; } + Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } + int instruction_index() const { return instruction_index_; } + + protected: + LCodeGen* codegen() const { return codegen_; } + MacroAssembler* masm() const { return codegen_->masm(); } + + private: + LCodeGen* codegen_; + Label entry_; + Label exit_; + Label* external_exit_; + int instruction_index_; +}; + +} } // namespace v8::internal + +#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,300 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/mips64/lithium-codegen-mips64.h" +#include "src/mips64/lithium-gap-resolver-mips64.h" + +namespace v8 { +namespace internal { + +LGapResolver::LGapResolver(LCodeGen* owner) + : cgen_(owner), + moves_(32, owner->zone()), + root_index_(0), + in_cycle_(false), + saved_destination_(NULL) {} + + +void LGapResolver::Resolve(LParallelMove* parallel_move) { + DCHECK(moves_.is_empty()); + // Build up a worklist of moves. + BuildInitialMoveList(parallel_move); + + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands move = moves_[i]; + // Skip constants to perform them last. They don't block other moves + // and skipping such moves with register destinations keeps those + // registers free for the whole algorithm. + if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { + root_index_ = i; // Any cycle is found when by reaching this move again. + PerformMove(i); + if (in_cycle_) { + RestoreValue(); + } + } + } + + // Perform the moves with constant sources. + for (int i = 0; i < moves_.length(); ++i) { + if (!moves_[i].IsEliminated()) { + DCHECK(moves_[i].source()->IsConstantOperand()); + EmitMove(i); + } + } + + moves_.Rewind(0); +} + + +void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { + // Perform a linear sweep of the moves to add them to the initial list of + // moves to perform, ignoring any move that is redundant (the source is + // the same as the destination, the destination is ignored and + // unallocated, or the move was already eliminated). + const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); + for (int i = 0; i < moves->length(); ++i) { + LMoveOperands move = moves->at(i); + if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); + } + Verify(); +} + + +void LGapResolver::PerformMove(int index) { + // Each call to this function performs a move and deletes it from the move + // graph. We first recursively perform any move blocking this one. We + // mark a move as "pending" on entry to PerformMove in order to detect + // cycles in the move graph. + + // We can only find a cycle, when doing a depth-first traversal of moves, + // be encountering the starting move again. So by spilling the source of + // the starting move, we break the cycle. All moves are then unblocked, + // and the starting move is completed by writing the spilled value to + // its destination. All other moves from the spilled source have been + // completed prior to breaking the cycle. + // An additional complication is that moves to MemOperands with large + // offsets (more than 1K or 4K) require us to spill this spilled value to + // the stack, to free up the register. + DCHECK(!moves_[index].IsPending()); + DCHECK(!moves_[index].IsRedundant()); + + // Clear this move's destination to indicate a pending move. The actual + // destination is saved in a stack allocated local. Multiple moves can + // be pending because this function is recursive. + DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. + LOperand* destination = moves_[index].destination(); + moves_[index].set_destination(NULL); + + // Perform a depth-first traversal of the move graph to resolve + // dependencies. Any unperformed, unpending move with a source the same + // as this one's destination blocks this one so recursively perform all + // such moves. + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands other_move = moves_[i]; + if (other_move.Blocks(destination) && !other_move.IsPending()) { + PerformMove(i); + // If there is a blocking, pending move it must be moves_[root_index_] + // and all other moves with the same source as moves_[root_index_] are + // sucessfully executed (because they are cycle-free) by this loop. + } + } + + // We are about to resolve this move and don't need it marked as + // pending, so restore its destination. + moves_[index].set_destination(destination); + + // The move may be blocked on a pending move, which must be the starting move. + // In this case, we have a cycle, and we save the source of this move to + // a scratch register to break it. + LMoveOperands other_move = moves_[root_index_]; + if (other_move.Blocks(destination)) { + DCHECK(other_move.IsPending()); + BreakCycle(index); + return; + } + + // This move is no longer blocked. + EmitMove(index); +} + + +void LGapResolver::Verify() { +#ifdef ENABLE_SLOW_DCHECKS + // No operand should be the destination for more than one move. + for (int i = 0; i < moves_.length(); ++i) { + LOperand* destination = moves_[i].destination(); + for (int j = i + 1; j < moves_.length(); ++j) { + SLOW_DCHECK(!destination->Equals(moves_[j].destination())); + } + } +#endif +} + +#define __ ACCESS_MASM(cgen_->masm()) + +void LGapResolver::BreakCycle(int index) { + // We save in a register the value that should end up in the source of + // moves_[root_index]. After performing all moves in the tree rooted + // in that move, we save the value to that source. + DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); + DCHECK(!in_cycle_); + in_cycle_ = true; + LOperand* source = moves_[index].source(); + saved_destination_ = moves_[index].destination(); + if (source->IsRegister()) { + __ mov(kLithiumScratchReg, cgen_->ToRegister(source)); + } else if (source->IsStackSlot()) { + __ ld(kLithiumScratchReg, cgen_->ToMemOperand(source)); + } else if (source->IsDoubleRegister()) { + __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source)); + } else if (source->IsDoubleStackSlot()) { + __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source)); + } else { + UNREACHABLE(); + } + // This move will be done by restoring the saved value to the destination. + moves_[index].Eliminate(); +} + + +void LGapResolver::RestoreValue() { + DCHECK(in_cycle_); + DCHECK(saved_destination_ != NULL); + + // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble. + if (saved_destination_->IsRegister()) { + __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg); + } else if (saved_destination_->IsStackSlot()) { + __ sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_)); + } else if (saved_destination_->IsDoubleRegister()) { + __ mov_d(cgen_->ToDoubleRegister(saved_destination_), + kLithiumScratchDouble); + } else if (saved_destination_->IsDoubleStackSlot()) { + __ sdc1(kLithiumScratchDouble, + cgen_->ToMemOperand(saved_destination_)); + } else { + UNREACHABLE(); + } + + in_cycle_ = false; + saved_destination_ = NULL; +} + + +void LGapResolver::EmitMove(int index) { + LOperand* source = moves_[index].source(); + LOperand* destination = moves_[index].destination(); + + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + + if (source->IsRegister()) { + Register source_register = cgen_->ToRegister(source); + if (destination->IsRegister()) { + __ mov(cgen_->ToRegister(destination), source_register); + } else { + DCHECK(destination->IsStackSlot()); + __ sd(source_register, cgen_->ToMemOperand(destination)); + } + } else if (source->IsStackSlot()) { + MemOperand source_operand = cgen_->ToMemOperand(source); + if (destination->IsRegister()) { + __ ld(cgen_->ToRegister(destination), source_operand); + } else { + DCHECK(destination->IsStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + if (in_cycle_) { + if (!destination_operand.OffsetIsInt16Encodable()) { + // 'at' is overwritten while saving the value to the destination. + // Therefore we can't use 'at'. It is OK if the read from the source + // destroys 'at', since that happens before the value is read. + // This uses only a single reg of the double reg-pair. + __ ldc1(kLithiumScratchDouble, source_operand); + __ sdc1(kLithiumScratchDouble, destination_operand); + } else { + __ ld(at, source_operand); + __ sd(at, destination_operand); + } + } else { + __ ld(kLithiumScratchReg, source_operand); + __ sd(kLithiumScratchReg, destination_operand); + } + } + + } else if (source->IsConstantOperand()) { + LConstantOperand* constant_source = LConstantOperand::cast(source); + if (destination->IsRegister()) { + Register dst = cgen_->ToRegister(destination); + if (cgen_->IsSmi(constant_source)) { + __ li(dst, Operand(cgen_->ToSmi(constant_source))); + } else if (cgen_->IsInteger32(constant_source)) { + __ li(dst, Operand(cgen_->ToInteger32(constant_source))); + } else { + __ li(dst, cgen_->ToHandle(constant_source)); + } + } else if (destination->IsDoubleRegister()) { + DoubleRegister result = cgen_->ToDoubleRegister(destination); + double v = cgen_->ToDouble(constant_source); + __ Move(result, v); + } else { + DCHECK(destination->IsStackSlot()); + DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. + if (cgen_->IsSmi(constant_source)) { + __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source))); + __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination)); + } else if (cgen_->IsInteger32(constant_source)) { + __ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source))); + __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination)); + } else { + __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source)); + __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination)); + } + } + + } else if (source->IsDoubleRegister()) { + DoubleRegister source_register = cgen_->ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + __ mov_d(cgen_->ToDoubleRegister(destination), source_register); + } else { + DCHECK(destination->IsDoubleStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + __ sdc1(source_register, destination_operand); + } + + } else if (source->IsDoubleStackSlot()) { + MemOperand source_operand = cgen_->ToMemOperand(source); + if (destination->IsDoubleRegister()) { + __ ldc1(cgen_->ToDoubleRegister(destination), source_operand); + } else { + DCHECK(destination->IsDoubleStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + if (in_cycle_) { + // kLithiumScratchDouble was used to break the cycle, + // but kLithiumScratchReg is free. + MemOperand source_high_operand = + cgen_->ToHighMemOperand(source); + MemOperand destination_high_operand = + cgen_->ToHighMemOperand(destination); + __ lw(kLithiumScratchReg, source_operand); + __ sw(kLithiumScratchReg, destination_operand); + __ lw(kLithiumScratchReg, source_high_operand); + __ sw(kLithiumScratchReg, destination_high_operand); + } else { + __ ldc1(kLithiumScratchDouble, source_operand); + __ sdc1(kLithiumScratchDouble, destination_operand); + } + } + } else { + UNREACHABLE(); + } + + moves_[index].Eliminate(); +} + + +#undef __ + +} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/mips64/lithium-gap-resolver-mips64.h nodejs-0.11.15/deps/v8/src/mips64/lithium-gap-resolver-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/lithium-gap-resolver-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/lithium-gap-resolver-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,60 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_ +#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_ + +#include "src/v8.h" + +#include "src/lithium.h" + +namespace v8 { +namespace internal { + +class LCodeGen; +class LGapResolver; + +class LGapResolver V8_FINAL BASE_EMBEDDED { + public: + explicit LGapResolver(LCodeGen* owner); + + // Resolve a set of parallel moves, emitting assembler instructions. + void Resolve(LParallelMove* parallel_move); + + private: + // Build the initial list of moves. + void BuildInitialMoveList(LParallelMove* parallel_move); + + // Perform the move at the moves_ index in question (possibly requiring + // other moves to satisfy dependencies). + void PerformMove(int index); + + // If a cycle is found in the series of moves, save the blocking value to + // a scratch register. The cycle must be found by hitting the root of the + // depth-first search. + void BreakCycle(int index); + + // After a cycle has been resolved, restore the value from the scratch + // register to its proper destination. + void RestoreValue(); + + // Emit a move and remove it from the move graph. + void EmitMove(int index); + + // Verify the move list before performing moves. + void Verify(); + + LCodeGen* cgen_; + + // List of moves not yet resolved. + ZoneList<LMoveOperands> moves_; + + int root_index_; + bool in_cycle_; + LOperand* saved_destination_; +}; + +} } // namespace v8::internal + +#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/lithium-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/lithium-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/lithium-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/lithium-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2581 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/hydrogen-osr.h" +#include "src/lithium-inl.h" +#include "src/mips64/lithium-codegen-mips64.h" + +namespace v8 { +namespace internal { + +#define DEFINE_COMPILE(type) \ + void L##type::CompileToNative(LCodeGen* generator) { \ + generator->Do##type(this); \ + } +LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) +#undef DEFINE_COMPILE + +#ifdef DEBUG +void LInstruction::VerifyCall() { + // Call instructions can use only fixed registers as temporaries and + // outputs because all registers are blocked by the calling convention. + // Inputs operands must use a fixed register or use-at-start policy or + // a non-register policy. + DCHECK(Output() == NULL || + LUnallocated::cast(Output())->HasFixedPolicy() || + !LUnallocated::cast(Output())->HasRegisterPolicy()); + for (UseIterator it(this); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + DCHECK(operand->HasFixedPolicy() || + operand->IsUsedAtStart()); + } + for (TempIterator it(this); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); + } +} +#endif + + +void LInstruction::PrintTo(StringStream* stream) { + stream->Add("%s ", this->Mnemonic()); + + PrintOutputOperandTo(stream); + + PrintDataTo(stream); + + if (HasEnvironment()) { + stream->Add(" "); + environment()->PrintTo(stream); + } + + if (HasPointerMap()) { + stream->Add(" "); + pointer_map()->PrintTo(stream); + } +} + + +void LInstruction::PrintDataTo(StringStream* stream) { + stream->Add("= "); + for (int i = 0; i < InputCount(); i++) { + if (i > 0) stream->Add(" "); + if (InputAt(i) == NULL) { + stream->Add("NULL"); + } else { + InputAt(i)->PrintTo(stream); + } + } +} + + +void LInstruction::PrintOutputOperandTo(StringStream* stream) { + if (HasResult()) result()->PrintTo(stream); +} + + +void LLabel::PrintDataTo(StringStream* stream) { + LGap::PrintDataTo(stream); + LLabel* rep = replacement(); + if (rep != NULL) { + stream->Add(" Dead block replaced with B%d", rep->block_id()); + } +} + + +bool LGap::IsRedundant() const { + for (int i = 0; i < 4; i++) { + if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { + return false; + } + } + + return true; +} + + +void LGap::PrintDataTo(StringStream* stream) { + for (int i = 0; i < 4; i++) { + stream->Add("("); + if (parallel_moves_[i] != NULL) { + parallel_moves_[i]->PrintDataTo(stream); + } + stream->Add(") "); + } +} + + +const char* LArithmeticD::Mnemonic() const { + switch (op()) { + case Token::ADD: return "add-d"; + case Token::SUB: return "sub-d"; + case Token::MUL: return "mul-d"; + case Token::DIV: return "div-d"; + case Token::MOD: return "mod-d"; + default: + UNREACHABLE(); + return NULL; + } +} + + +const char* LArithmeticT::Mnemonic() const { + switch (op()) { + case Token::ADD: return "add-t"; + case Token::SUB: return "sub-t"; + case Token::MUL: return "mul-t"; + case Token::MOD: return "mod-t"; + case Token::DIV: return "div-t"; + case Token::BIT_AND: return "bit-and-t"; + case Token::BIT_OR: return "bit-or-t"; + case Token::BIT_XOR: return "bit-xor-t"; + case Token::ROR: return "ror-t"; + case Token::SHL: return "sll-t"; + case Token::SAR: return "sra-t"; + case Token::SHR: return "srl-t"; + default: + UNREACHABLE(); + return NULL; + } +} + + +bool LGoto::HasInterestingComment(LCodeGen* gen) const { + return !gen->IsNextEmittedBlock(block_id()); +} + + +void LGoto::PrintDataTo(StringStream* stream) { + stream->Add("B%d", block_id()); +} + + +void LBranch::PrintDataTo(StringStream* stream) { + stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); + value()->PrintTo(stream); +} + + +LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { + return new(zone()) LDebugBreak(); +} + + +void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if "); + left()->PrintTo(stream); + stream->Add(" %s ", Token::String(op())); + right()->PrintTo(stream); + stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LIsObjectAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if is_object("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LIsStringAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if is_string("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if is_smi("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if is_undetectable("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if string_compare("); + left()->PrintTo(stream); + right()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if has_instance_type("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if has_cached_array_index("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if class_of_test("); + value()->PrintTo(stream); + stream->Add(", \"%o\") then B%d else B%d", + *hydrogen()->class_name(), + true_block_id(), + false_block_id()); +} + + +void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if typeof "); + value()->PrintTo(stream); + stream->Add(" == \"%s\" then B%d else B%d", + hydrogen()->type_literal()->ToCString().get(), + true_block_id(), false_block_id()); +} + + +void LStoreCodeEntry::PrintDataTo(StringStream* stream) { + stream->Add(" = "); + function()->PrintTo(stream); + stream->Add(".code_entry = "); + code_object()->PrintTo(stream); +} + + +void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { + stream->Add(" = "); + base_object()->PrintTo(stream); + stream->Add(" + "); + offset()->PrintTo(stream); +} + + +void LCallJSFunction::PrintDataTo(StringStream* stream) { + stream->Add("= "); + function()->PrintTo(stream); + stream->Add("#%d / ", arity()); +} + + +void LCallWithDescriptor::PrintDataTo(StringStream* stream) { + for (int i = 0; i < InputCount(); i++) { + InputAt(i)->PrintTo(stream); + stream->Add(" "); + } + stream->Add("#%d / ", arity()); +} + + +void LLoadContextSlot::PrintDataTo(StringStream* stream) { + context()->PrintTo(stream); + stream->Add("[%d]", slot_index()); +} + + +void LStoreContextSlot::PrintDataTo(StringStream* stream) { + context()->PrintTo(stream); + stream->Add("[%d] <- ", slot_index()); + value()->PrintTo(stream); +} + + +void LInvokeFunction::PrintDataTo(StringStream* stream) { + stream->Add("= "); + function()->PrintTo(stream); + stream->Add(" #%d / ", arity()); +} + + +void LCallNew::PrintDataTo(StringStream* stream) { + stream->Add("= "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); +} + + +void LCallNewArray::PrintDataTo(StringStream* stream) { + stream->Add("= "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); + ElementsKind kind = hydrogen()->elements_kind(); + stream->Add(" (%s) ", ElementsKindToString(kind)); +} + + +void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { + arguments()->PrintTo(stream); + stream->Add(" length "); + length()->PrintTo(stream); + stream->Add(" index "); + index()->PrintTo(stream); +} + + +void LStoreNamedField::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + OStringStream os; + os << hydrogen()->access() << " <- "; + stream->Add(os.c_str()); + value()->PrintTo(stream); +} + + +void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(String::cast(*name())->ToCString().get()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LLoadKeyed::PrintDataTo(StringStream* stream) { + elements()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d]", base_offset()); + } else { + stream->Add("]"); + } +} + + +void LStoreKeyed::PrintDataTo(StringStream* stream) { + elements()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d] <-", base_offset()); + } else { + stream->Add("] <- "); + } + + if (value() == NULL) { + DCHECK(hydrogen()->IsConstantHoleStore() && + hydrogen()->value()->representation().IsDouble()); + stream->Add("<the hole(nan)>"); + } else { + value()->PrintTo(stream); + } +} + + +void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + +void LTransitionElementsKind::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add(" %p -> %p", *original_map(), *transitioned_map()); +} + + +int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { + // Skip a slot if for a double-width slot. + if (kind == DOUBLE_REGISTERS) spill_slot_count_++; + return spill_slot_count_++; +} + + +LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { + int index = GetNextSpillIndex(kind); + if (kind == DOUBLE_REGISTERS) { + return LDoubleStackSlot::Create(index, zone()); + } else { + DCHECK(kind == GENERAL_REGISTERS); + return LStackSlot::Create(index, zone()); + } +} + + +LPlatformChunk* LChunkBuilder::Build() { + DCHECK(is_unused()); + chunk_ = new(zone()) LPlatformChunk(info(), graph()); + LPhase phase("L_Building chunk", chunk_); + status_ = BUILDING; + + // If compiling for OSR, reserve space for the unoptimized frame, + // which will be subsumed into this frame. + if (graph()->has_osr()) { + for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { + chunk_->GetNextSpillIndex(GENERAL_REGISTERS); + } + } + + const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); + for (int i = 0; i < blocks->length(); i++) { + HBasicBlock* next = NULL; + if (i < blocks->length() - 1) next = blocks->at(i + 1); + DoBasicBlock(blocks->at(i), next); + if (is_aborted()) return NULL; + } + status_ = DONE; + return chunk_; +} + + +void LChunkBuilder::Abort(BailoutReason reason) { + info()->set_bailout_reason(reason); + status_ = ABORTED; +} + + +LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { + return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER, + Register::ToAllocationIndex(reg)); +} + + +LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) { + return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, + DoubleRegister::ToAllocationIndex(reg)); +} + + +LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { + return Use(value, ToUnallocated(fixed_register)); +} + + +LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) { + return Use(value, ToUnallocated(reg)); +} + + +LOperand* LChunkBuilder::UseRegister(HValue* value) { + return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); +} + + +LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { + return Use(value, + new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, + LUnallocated::USED_AT_START)); +} + + +LOperand* LChunkBuilder::UseTempRegister(HValue* value) { + return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); +} + + +LOperand* LChunkBuilder::Use(HValue* value) { + return Use(value, new(zone()) LUnallocated(LUnallocated::NONE)); +} + + +LOperand* LChunkBuilder::UseAtStart(HValue* value) { + return Use(value, new(zone()) LUnallocated(LUnallocated::NONE, + LUnallocated::USED_AT_START)); +} + + +LOperand* LChunkBuilder::UseOrConstant(HValue* value) { + return value->IsConstant() + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : Use(value); +} + + +LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { + return value->IsConstant() + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : UseAtStart(value); +} + + +LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { + return value->IsConstant() + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : UseRegister(value); +} + + +LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { + return value->IsConstant() + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : UseRegisterAtStart(value); +} + + +LOperand* LChunkBuilder::UseConstant(HValue* value) { + return chunk_->DefineConstantOperand(HConstant::cast(value)); +} + + +LOperand* LChunkBuilder::UseAny(HValue* value) { + return value->IsConstant() + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); +} + + +LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { + if (value->EmitAtUses()) { + HInstruction* instr = HInstruction::cast(value); + VisitInstruction(instr); + } + operand->set_virtual_register(value->id()); + return operand; +} + + +LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, + LUnallocated* result) { + result->set_virtual_register(current_instruction_->id()); + instr->set_result(result); + return instr; +} + + +LInstruction* LChunkBuilder::DefineAsRegister( + LTemplateResultInstruction<1>* instr) { + return Define(instr, + new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); +} + + +LInstruction* LChunkBuilder::DefineAsSpilled( + LTemplateResultInstruction<1>* instr, int index) { + return Define(instr, + new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); +} + + +LInstruction* LChunkBuilder::DefineSameAsFirst( + LTemplateResultInstruction<1>* instr) { + return Define(instr, + new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); +} + + +LInstruction* LChunkBuilder::DefineFixed( + LTemplateResultInstruction<1>* instr, Register reg) { + return Define(instr, ToUnallocated(reg)); +} + + +LInstruction* LChunkBuilder::DefineFixedDouble( + LTemplateResultInstruction<1>* instr, DoubleRegister reg) { + return Define(instr, ToUnallocated(reg)); +} + + +LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { + HEnvironment* hydrogen_env = current_block_->last_environment(); + int argument_index_accumulator = 0; + ZoneList<HValue*> objects_to_materialize(0, zone()); + instr->set_environment(CreateEnvironment(hydrogen_env, + &argument_index_accumulator, + &objects_to_materialize)); + return instr; +} + + +LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, + HInstruction* hinstr, + CanDeoptimize can_deoptimize) { + info()->MarkAsNonDeferredCalling(); +#ifdef DEBUG + instr->VerifyCall(); +#endif + instr->MarkAsCall(); + instr = AssignPointerMap(instr); + + // If instruction does not have side-effects lazy deoptimization + // after the call will try to deoptimize to the point before the call. + // Thus we still need to attach environment to this call even if + // call sequence can not deoptimize eagerly. + bool needs_environment = + (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || + !hinstr->HasObservableSideEffects(); + if (needs_environment && !instr->HasEnvironment()) { + instr = AssignEnvironment(instr); + // We can't really figure out if the environment is needed or not. + instr->environment()->set_has_been_used(); + } + + return instr; +} + + +LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { + DCHECK(!instr->HasPointerMap()); + instr->set_pointer_map(new(zone()) LPointerMap(zone())); + return instr; +} + + +LUnallocated* LChunkBuilder::TempRegister() { + LUnallocated* operand = + new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); + int vreg = allocator_->GetVirtualRegister(); + if (!allocator_->AllocationOk()) { + Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); + vreg = 0; + } + operand->set_virtual_register(vreg); + return operand; +} + + +LUnallocated* LChunkBuilder::TempDoubleRegister() { + LUnallocated* operand = + new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER); + int vreg = allocator_->GetVirtualRegister(); + if (!allocator_->AllocationOk()) { + Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); + vreg = 0; + } + operand->set_virtual_register(vreg); + return operand; +} + + +LOperand* LChunkBuilder::FixedTemp(Register reg) { + LUnallocated* operand = ToUnallocated(reg); + DCHECK(operand->HasFixedPolicy()); + return operand; +} + + +LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { + LUnallocated* operand = ToUnallocated(reg); + DCHECK(operand->HasFixedPolicy()); + return operand; +} + + +LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { + return new(zone()) LLabel(instr->block()); +} + + +LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { + return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); +} + + +LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { + UNREACHABLE(); + return NULL; +} + + +LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { + return AssignEnvironment(new(zone()) LDeoptimize); +} + + +LInstruction* LChunkBuilder::DoShift(Token::Value op, + HBitwiseBinaryOperation* instr) { + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* left = UseRegisterAtStart(instr->left()); + + HValue* right_value = instr->right(); + LOperand* right = NULL; + int constant_value = 0; + bool does_deopt = false; + if (right_value->IsConstant()) { + HConstant* constant = HConstant::cast(right_value); + right = chunk_->DefineConstantOperand(constant); + constant_value = constant->Integer32Value() & 0x1f; + // Left shifts can deoptimize if we shift by > 0 and the result cannot be + // truncated to smi. + if (instr->representation().IsSmi() && constant_value > 0) { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); + } + } else { + right = UseRegisterAtStart(right_value); + } + + // Shift operations can only deoptimize if we do a logical shift + // by 0 and the result cannot be truncated to int32. + if (op == Token::SHR && constant_value == 0) { + if (FLAG_opt_safe_uint32_operations) { + does_deopt = !instr->CheckFlag(HInstruction::kUint32); + } else { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); + } + } + + LInstruction* result = + DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt)); + return does_deopt ? AssignEnvironment(result) : result; + } else { + return DoArithmeticT(op, instr); + } +} + + +LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, + HArithmeticBinaryOperation* instr) { + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); + if (op == Token::MOD) { + LOperand* left = UseFixedDouble(instr->left(), f2); + LOperand* right = UseFixedDouble(instr->right(), f4); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + // We call a C function for double modulo. It can't trigger a GC. We need + // to use fixed result register for the call. + // TODO(fschneider): Allow any register as input registers. + return MarkAsCall(DefineFixedDouble(result, f2), instr); + } else { + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseRegisterAtStart(instr->right()); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + return DefineAsRegister(result); + } +} + + +LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, + HBinaryOperation* instr) { + HValue* left = instr->left(); + HValue* right = instr->right(); + DCHECK(left->representation().IsTagged()); + DCHECK(right->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), cp); + LOperand* left_operand = UseFixed(left, a1); + LOperand* right_operand = UseFixed(right, a0); + LArithmeticT* result = + new(zone()) LArithmeticT(op, context, left_operand, right_operand); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { + DCHECK(is_building()); + current_block_ = block; + next_block_ = next_block; + if (block->IsStartBlock()) { + block->UpdateEnvironment(graph_->start_environment()); + argument_count_ = 0; + } else if (block->predecessors()->length() == 1) { + // We have a single predecessor => copy environment and outgoing + // argument count from the predecessor. + DCHECK(block->phis()->length() == 0); + HBasicBlock* pred = block->predecessors()->at(0); + HEnvironment* last_environment = pred->last_environment(); + DCHECK(last_environment != NULL); + // Only copy the environment, if it is later used again. + if (pred->end()->SecondSuccessor() == NULL) { + DCHECK(pred->end()->FirstSuccessor() == block); + } else { + if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || + pred->end()->SecondSuccessor()->block_id() > block->block_id()) { + last_environment = last_environment->Copy(); + } + } + block->UpdateEnvironment(last_environment); + DCHECK(pred->argument_count() >= 0); + argument_count_ = pred->argument_count(); + } else { + // We are at a state join => process phis. + HBasicBlock* pred = block->predecessors()->at(0); + // No need to copy the environment, it cannot be used later. + HEnvironment* last_environment = pred->last_environment(); + for (int i = 0; i < block->phis()->length(); ++i) { + HPhi* phi = block->phis()->at(i); + if (phi->HasMergedIndex()) { + last_environment->SetValueAt(phi->merged_index(), phi); + } + } + for (int i = 0; i < block->deleted_phis()->length(); ++i) { + if (block->deleted_phis()->at(i) < last_environment->length()) { + last_environment->SetValueAt(block->deleted_phis()->at(i), + graph_->GetConstantUndefined()); + } + } + block->UpdateEnvironment(last_environment); + // Pick up the outgoing argument count of one of the predecessors. + argument_count_ = pred->argument_count(); + } + HInstruction* current = block->first(); + int start = chunk_->instructions()->length(); + while (current != NULL && !is_aborted()) { + // Code for constants in registers is generated lazily. + if (!current->EmitAtUses()) { + VisitInstruction(current); + } + current = current->next(); + } + int end = chunk_->instructions()->length() - 1; + if (end >= start) { + block->set_first_instruction_index(start); + block->set_last_instruction_index(end); + } + block->set_argument_count(argument_count_); + next_block_ = NULL; + current_block_ = NULL; +} + + +void LChunkBuilder::VisitInstruction(HInstruction* current) { + HInstruction* old_current = current_instruction_; + current_instruction_ = current; + + LInstruction* instr = NULL; + if (current->CanReplaceWithDummyUses()) { + if (current->OperandCount() == 0) { + instr = DefineAsRegister(new(zone()) LDummy()); + } else { + DCHECK(!current->OperandAt(0)->IsControlInstruction()); + instr = DefineAsRegister(new(zone()) + LDummyUse(UseAny(current->OperandAt(0)))); + } + for (int i = 1; i < current->OperandCount(); ++i) { + if (current->OperandAt(i)->IsControlInstruction()) continue; + LInstruction* dummy = + new(zone()) LDummyUse(UseAny(current->OperandAt(i))); + dummy->set_hydrogen_value(current); + chunk_->AddInstruction(dummy, current_block_); + } + } else { + HBasicBlock* successor; + if (current->IsControlInstruction() && + HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && + successor != NULL) { + instr = new(zone()) LGoto(successor); + } else { + instr = current->CompileToLithium(this); + } + } + + argument_count_ += current->argument_delta(); + DCHECK(argument_count_ >= 0); + + if (instr != NULL) { + AddInstruction(instr, current); + } + + current_instruction_ = old_current; +} + + +void LChunkBuilder::AddInstruction(LInstruction* instr, + HInstruction* hydrogen_val) { +// Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(hydrogen_val); + +#if DEBUG + // Make sure that the lithium instruction has either no fixed register + // constraints in temps or the result OR no uses that are only used at + // start. If this invariant doesn't hold, the register allocator can decide + // to insert a split of a range immediately before the instruction due to an + // already allocated register needing to be used for the instruction's fixed + // register constraint. In this case, The register allocator won't see an + // interference between the split child and the use-at-start (it would if + // the it was just a plain use), so it is free to move the split child into + // the same register that is used for the use-at-start. + // See https://code.google.com/p/chromium/issues/detail?id=201590 + if (!(instr->ClobbersRegisters() && + instr->ClobbersDoubleRegisters(isolate()))) { + int fixed = 0; + int used_at_start = 0; + for (UseIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->IsUsedAtStart()) ++used_at_start; + } + if (instr->Output() != NULL) { + if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; + } + for (TempIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->HasFixedPolicy()) ++fixed; + } + DCHECK(fixed == 0 || used_at_start == 0); + } +#endif + + if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { + instr = AssignPointerMap(instr); + } + if (FLAG_stress_environments && !instr->HasEnvironment()) { + instr = AssignEnvironment(instr); + } + chunk_->AddInstruction(instr, current_block_); + + if (instr->IsCall()) { + HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; + LInstruction* instruction_needing_environment = NULL; + if (hydrogen_val->HasObservableSideEffects()) { + HSimulate* sim = HSimulate::cast(hydrogen_val->next()); + instruction_needing_environment = instr; + sim->ReplayEnvironment(current_block_->last_environment()); + hydrogen_value_for_lazy_bailout = sim; + } + LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); + bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); + chunk_->AddInstruction(bailout, current_block_); + if (instruction_needing_environment != NULL) { + // Store the lazy deopt environment with the instruction if needed. + // Right now it is only used for LInstanceOfKnownGlobal. + instruction_needing_environment-> + SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); + } + } +} + + +LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { + return new(zone()) LGoto(instr->FirstSuccessor()); +} + + +LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { + HValue* value = instr->value(); + Representation r = value->representation(); + HType type = value->type(); + ToBooleanStub::Types expected = instr->expected_input_types(); + if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); + + bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || + type.IsJSArray() || type.IsHeapNumber() || type.IsString(); + LInstruction* branch = new(zone()) LBranch(UseRegister(value)); + if (!easy_case && + ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) || + !expected.IsGeneric())) { + branch = AssignEnvironment(branch); + } + return branch; +} + + +LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { + DCHECK(instr->value()->representation().IsTagged()); + LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* temp = TempRegister(); + return new(zone()) LCmpMapAndBranch(value, temp); +} + + +LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { + info()->MarkAsRequiresFrame(); + return DefineAsRegister( + new(zone()) LArgumentsLength(UseRegister(length->value()))); +} + + +LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { + info()->MarkAsRequiresFrame(); + return DefineAsRegister(new(zone()) LArgumentsElements); +} + + +LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LInstanceOf* result = + new(zone()) LInstanceOf(context, UseFixed(instr->left(), a0), + UseFixed(instr->right(), a1)); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( + HInstanceOfKnownGlobal* instr) { + LInstanceOfKnownGlobal* result = + new(zone()) LInstanceOfKnownGlobal( + UseFixed(instr->context(), cp), + UseFixed(instr->left(), a0), + FixedTemp(a4)); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { + LOperand* receiver = UseRegisterAtStart(instr->receiver()); + LOperand* function = UseRegisterAtStart(instr->function()); + LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function); + return AssignEnvironment(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { + LOperand* function = UseFixed(instr->function(), a1); + LOperand* receiver = UseFixed(instr->receiver(), a0); + LOperand* length = UseFixed(instr->length(), a2); + LOperand* elements = UseFixed(instr->elements(), a3); + LApplyArguments* result = new(zone()) LApplyArguments(function, + receiver, + length, + elements); + return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { + int argc = instr->OperandCount(); + for (int i = 0; i < argc; ++i) { + LOperand* argument = Use(instr->argument(i)); + AddInstruction(new(zone()) LPushArgument(argument), instr); + } + return NULL; +} + + +LInstruction* LChunkBuilder::DoStoreCodeEntry( + HStoreCodeEntry* store_code_entry) { + LOperand* function = UseRegister(store_code_entry->function()); + LOperand* code_object = UseTempRegister(store_code_entry->code_object()); + return new(zone()) LStoreCodeEntry(function, code_object); +} + + +LInstruction* LChunkBuilder::DoInnerAllocatedObject( + HInnerAllocatedObject* instr) { + LOperand* base_object = UseRegisterAtStart(instr->base_object()); + LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); + return DefineAsRegister( + new(zone()) LInnerAllocatedObject(base_object, offset)); +} + + +LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { + return instr->HasNoUses() + ? NULL + : DefineAsRegister(new(zone()) LThisFunction); +} + + +LInstruction* LChunkBuilder::DoContext(HContext* instr) { + if (instr->HasNoUses()) return NULL; + + if (info()->IsStub()) { + return DefineFixed(new(zone()) LContext, cp); + } + + return DefineAsRegister(new(zone()) LContext); +} + + +LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); +} + + +LInstruction* LChunkBuilder::DoCallJSFunction( + HCallJSFunction* instr) { + LOperand* function = UseFixed(instr->function(), a1); + + LCallJSFunction* result = new(zone()) LCallJSFunction(function); + + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +LInstruction* LChunkBuilder::DoCallWithDescriptor( + HCallWithDescriptor* instr) { + const InterfaceDescriptor* descriptor = instr->descriptor(); + + LOperand* target = UseRegisterOrConstantAtStart(instr->target()); + ZoneList<LOperand*> ops(instr->OperandCount(), zone()); + ops.Add(target, zone()); + for (int i = 1; i < instr->OperandCount(); i++) { + LOperand* op = UseFixed(instr->OperandAt(i), + descriptor->GetParameterRegister(i - 1)); + ops.Add(op, zone()); + } + + LCallWithDescriptor* result = new(zone()) LCallWithDescriptor( + descriptor, ops, zone()); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* function = UseFixed(instr->function(), a1); + LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); + return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { + switch (instr->op()) { + case kMathFloor: + return DoMathFloor(instr); + case kMathRound: + return DoMathRound(instr); + case kMathFround: + return DoMathFround(instr); + case kMathAbs: + return DoMathAbs(instr); + case kMathLog: + return DoMathLog(instr); + case kMathExp: + return DoMathExp(instr); + case kMathSqrt: + return DoMathSqrt(instr); + case kMathPowHalf: + return DoMathPowHalf(instr); + case kMathClz32: + return DoMathClz32(instr); + default: + UNREACHABLE(); + return NULL; + } +} + + +LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); + LOperand* input = UseFixedDouble(instr->value(), f4); + return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr); +} + + +LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { + LOperand* input = UseRegisterAtStart(instr->value()); + LMathClz32* result = new(zone()) LMathClz32(input); + return DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); + LOperand* input = UseRegister(instr->value()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LOperand* double_temp = TempDoubleRegister(); + LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2); + return DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { + // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf. + LOperand* input = UseFixedDouble(instr->value(), f8); + LOperand* temp = TempDoubleRegister(); + LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp); + return DefineFixedDouble(result, f4); +} + + +LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LMathFround* result = new (zone()) LMathFround(input); + return DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { + Representation r = instr->value()->representation(); + LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32()) + ? NULL + : UseFixed(instr->context(), cp); + LOperand* input = UseRegister(instr->value()); + LInstruction* result = + DefineAsRegister(new(zone()) LMathAbs(context, input)); + if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); + if (!r.IsDouble()) result = AssignEnvironment(result); + return result; +} + + +LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LOperand* temp = TempRegister(); + LMathFloor* result = new(zone()) LMathFloor(input, temp); + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); +} + + +LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LMathSqrt* result = new(zone()) LMathSqrt(input); + return DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LOperand* temp = TempDoubleRegister(); + LMathRound* result = new(zone()) LMathRound(input, temp); + return AssignEnvironment(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* constructor = UseFixed(instr->constructor(), a1); + LCallNew* result = new(zone()) LCallNew(context, constructor); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* constructor = UseFixed(instr->constructor(), a1); + LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* function = UseFixed(instr->function(), a1); + LCallFunction* call = new(zone()) LCallFunction(context, function); + return MarkAsCall(DefineFixed(call, v0), instr); +} + + +LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr); +} + + +LInstruction* LChunkBuilder::DoRor(HRor* instr) { + return DoShift(Token::ROR, instr); +} + + +LInstruction* LChunkBuilder::DoShr(HShr* instr) { + return DoShift(Token::SHR, instr); +} + + +LInstruction* LChunkBuilder::DoSar(HSar* instr) { + return DoShift(Token::SAR, instr); +} + + +LInstruction* LChunkBuilder::DoShl(HShl* instr) { + return DoShift(Token::SHL, instr); +} + + +LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); + + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); + return DefineAsRegister(new(zone()) LBitI(left, right)); + } else { + return DoArithmeticT(instr->op(), instr); + } +} + + +LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( + dividend, divisor)); + if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || + (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || + (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && + divisor != 1 && divisor != -1)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI( + dividend, divisor)); + if (divisor == 0 || + (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || + !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + LOperand* divisor = UseRegister(instr->right()); + LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) + ? NULL : TempRegister(); + LInstruction* result = + DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp)); + if (instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kBailoutOnMinusZero) || + (instr->CheckFlag(HValue::kCanOverflow) && + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) || + (!instr->IsMathFloorOfDiv() && + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { + if (instr->representation().IsSmiOrInteger32()) { + if (instr->RightIsPowerOf2()) { + return DoDivByPowerOf2I(instr); + } else if (instr->right()->IsConstant()) { + return DoDivByConstI(instr); + } else { + return DoDivI(instr); + } + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::DIV, instr); + } else { + return DoArithmeticT(Token::DIV, instr); + } +} + + +LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { + LOperand* dividend = UseRegisterAtStart(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I( + dividend, divisor)); + if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || + (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LOperand* temp = + ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || + (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ? + NULL : TempRegister(); + LInstruction* result = DefineAsRegister( + new(zone()) LFlooringDivByConstI(dividend, divisor, temp)); + if (divisor == 0 || + (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + LOperand* divisor = UseRegister(instr->right()); + LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor); + return AssignEnvironment(DefineAsRegister(div)); +} + + +LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { + if (instr->RightIsPowerOf2()) { + return DoFlooringDivByPowerOf2I(instr); + } else if (instr->right()->IsConstant()) { + return DoFlooringDivByConstI(instr); + } else { + return DoFlooringDivI(instr); + } +} + + +LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegisterAtStart(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( + dividend, divisor)); + if (instr->CheckFlag(HValue::kLeftCanBeNegative) && + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LInstruction* result = DefineAsRegister(new(zone()) LModByConstI( + dividend, divisor)); + if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoModI(HMod* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + LOperand* divisor = UseRegister(instr->right()); + LInstruction* result = DefineAsRegister(new(zone()) LModI( + dividend, divisor)); + if (instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoMod(HMod* instr) { + if (instr->representation().IsSmiOrInteger32()) { + return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr); + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::MOD, instr); + } else { + return DoArithmeticT(Token::MOD, instr); + } +} + + +LInstruction* LChunkBuilder::DoMul(HMul* instr) { + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + HValue* left = instr->BetterLeftOperand(); + HValue* right = instr->BetterRightOperand(); + LOperand* left_op; + LOperand* right_op; + bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); + bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero); + + if (right->IsConstant()) { + HConstant* constant = HConstant::cast(right); + int32_t constant_value = constant->Integer32Value(); + // Constants -1, 0 and 1 can be optimized if the result can overflow. + // For other constants, it can be optimized only without overflow. + if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) { + left_op = UseRegisterAtStart(left); + right_op = UseConstant(right); + } else { + if (bailout_on_minus_zero) { + left_op = UseRegister(left); + } else { + left_op = UseRegisterAtStart(left); + } + right_op = UseRegister(right); + } + } else { + if (bailout_on_minus_zero) { + left_op = UseRegister(left); + } else { + left_op = UseRegisterAtStart(left); + } + right_op = UseRegister(right); + } + LMulI* mul = new(zone()) LMulI(left_op, right_op); + if (can_overflow || bailout_on_minus_zero) { + AssignEnvironment(mul); + } + return DefineAsRegister(mul); + + } else if (instr->representation().IsDouble()) { + if (kArchVariant == kMips64r2) { + if (instr->HasOneUse() && instr->uses().value()->IsAdd()) { + HAdd* add = HAdd::cast(instr->uses().value()); + if (instr == add->left()) { + // This mul is the lhs of an add. The add and mul will be folded + // into a multiply-add. + return NULL; + } + if (instr == add->right() && !add->left()->IsMul()) { + // This mul is the rhs of an add, where the lhs is not another mul. + // The add and mul will be folded into a multiply-add. + return NULL; + } + } + } + return DoArithmeticD(Token::MUL, instr); + } else { + return DoArithmeticT(Token::MUL, instr); + } +} + + +LInstruction* LChunkBuilder::DoSub(HSub* instr) { + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseOrConstantAtStart(instr->right()); + LSubI* sub = new(zone()) LSubI(left, right); + LInstruction* result = DefineAsRegister(sub); + if (instr->CheckFlag(HValue::kCanOverflow)) { + result = AssignEnvironment(result); + } + return result; + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::SUB, instr); + } else { + return DoArithmeticT(Token::SUB, instr); + } +} + + +LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { + LOperand* multiplier_op = UseRegisterAtStart(mul->left()); + LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); + LOperand* addend_op = UseRegisterAtStart(addend); + return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op, + multiplicand_op)); +} + + +LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); + LAddI* add = new(zone()) LAddI(left, right); + LInstruction* result = DefineAsRegister(add); + if (instr->CheckFlag(HValue::kCanOverflow)) { + result = AssignEnvironment(result); + } + return result; + } else if (instr->representation().IsExternal()) { + DCHECK(instr->left()->representation().IsExternal()); + DCHECK(instr->right()->representation().IsInteger32()); + DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseOrConstantAtStart(instr->right()); + LAddI* add = new(zone()) LAddI(left, right); + LInstruction* result = DefineAsRegister(add); + return result; + } else if (instr->representation().IsDouble()) { + if (kArchVariant == kMips64r2) { + if (instr->left()->IsMul()) + return DoMultiplyAdd(HMul::cast(instr->left()), instr->right()); + + if (instr->right()->IsMul()) { + DCHECK(!instr->left()->IsMul()); + return DoMultiplyAdd(HMul::cast(instr->right()), instr->left()); + } + } + return DoArithmeticD(Token::ADD, instr); + } else { + return DoArithmeticT(Token::ADD, instr); + } +} + + +LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { + LOperand* left = NULL; + LOperand* right = NULL; + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + left = UseRegisterAtStart(instr->BetterLeftOperand()); + right = UseOrConstantAtStart(instr->BetterRightOperand()); + } else { + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); + left = UseRegisterAtStart(instr->left()); + right = UseRegisterAtStart(instr->right()); + } + return DefineAsRegister(new(zone()) LMathMinMax(left, right)); +} + + +LInstruction* LChunkBuilder::DoPower(HPower* instr) { + DCHECK(instr->representation().IsDouble()); + // We call a C function for double power. It can't trigger a GC. + // We need to use fixed result register for the call. + Representation exponent_type = instr->right()->representation(); + DCHECK(instr->left()->representation().IsDouble()); + LOperand* left = UseFixedDouble(instr->left(), f2); + LOperand* right = exponent_type.IsDouble() ? + UseFixedDouble(instr->right(), f4) : + UseFixed(instr->right(), a2); + LPower* result = new(zone()) LPower(left, right); + return MarkAsCall(DefineFixedDouble(result, f0), + instr, + CAN_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), cp); + LOperand* left = UseFixed(instr->left(), a1); + LOperand* right = UseFixed(instr->right(), a0); + LCmpT* result = new(zone()) LCmpT(context, left, right); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +LInstruction* LChunkBuilder::DoCompareNumericAndBranch( + HCompareNumericAndBranch* instr) { + Representation r = instr->representation(); + if (r.IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(r)); + DCHECK(instr->right()->representation().Equals(r)); + LOperand* left = UseRegisterOrConstantAtStart(instr->left()); + LOperand* right = UseRegisterOrConstantAtStart(instr->right()); + return new(zone()) LCompareNumericAndBranch(left, right); + } else { + DCHECK(r.IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseRegisterAtStart(instr->right()); + return new(zone()) LCompareNumericAndBranch(left, right); + } +} + + +LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( + HCompareObjectEqAndBranch* instr) { + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseRegisterAtStart(instr->right()); + return new(zone()) LCmpObjectEqAndBranch(left, right); +} + + +LInstruction* LChunkBuilder::DoCompareHoleAndBranch( + HCompareHoleAndBranch* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + return new(zone()) LCmpHoleAndBranch(value); +} + + +LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch( + HCompareMinusZeroAndBranch* instr) { + LOperand* value = UseRegister(instr->value()); + LOperand* scratch = TempRegister(); + return new(zone()) LCompareMinusZeroAndBranch(value, scratch); +} + + +LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + LOperand* temp = TempRegister(); + return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()), + temp); +} + + +LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + LOperand* temp = TempRegister(); + return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()), + temp); +} + + +LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + return new(zone()) LIsSmiAndBranch(Use(instr->value())); +} + + +LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( + HIsUndetectableAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + return new(zone()) LIsUndetectableAndBranch( + UseRegisterAtStart(instr->value()), TempRegister()); +} + + +LInstruction* LChunkBuilder::DoStringCompareAndBranch( + HStringCompareAndBranch* instr) { + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), cp); + LOperand* left = UseFixed(instr->left(), a1); + LOperand* right = UseFixed(instr->right(), a0); + LStringCompareAndBranch* result = + new(zone()) LStringCompareAndBranch(context, left, right); + return MarkAsCall(result, instr); +} + + +LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( + HHasInstanceTypeAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + LOperand* value = UseRegisterAtStart(instr->value()); + return new(zone()) LHasInstanceTypeAndBranch(value); +} + + +LInstruction* LChunkBuilder::DoGetCachedArrayIndex( + HGetCachedArrayIndex* instr) { + DCHECK(instr->value()->representation().IsTagged()); + LOperand* value = UseRegisterAtStart(instr->value()); + + return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value)); +} + + +LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( + HHasCachedArrayIndexAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + return new(zone()) LHasCachedArrayIndexAndBranch( + UseRegisterAtStart(instr->value())); +} + + +LInstruction* LChunkBuilder::DoClassOfTestAndBranch( + HClassOfTestAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()), + TempRegister()); +} + + +LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) { + LOperand* map = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new(zone()) LMapEnumLength(map)); +} + + +LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { + LOperand* object = UseFixed(instr->value(), a0); + LDateField* result = + new(zone()) LDateField(object, FixedTemp(a1), instr->index()); + return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { + LOperand* string = UseRegisterAtStart(instr->string()); + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index)); +} + + +LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { + LOperand* string = UseRegisterAtStart(instr->string()); + LOperand* index = FLAG_debug_code + ? UseRegisterAtStart(instr->index()) + : UseRegisterOrConstantAtStart(instr->index()); + LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL; + return new(zone()) LSeqStringSetChar(context, string, index, value); +} + + +LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { + if (!FLAG_debug_code && instr->skip_check()) return NULL; + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + LOperand* length = !index->IsConstantOperand() + ? UseRegisterOrConstantAtStart(instr->length()) + : UseRegisterAtStart(instr->length()); + LInstruction* result = new(zone()) LBoundsCheck(index, length); + if (!FLAG_debug_code || !instr->skip_check()) { + result = AssignEnvironment(result); + } +return result; +} + + +LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation( + HBoundsCheckBaseIndexInformation* instr) { + UNREACHABLE(); + return NULL; +} + + +LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { + // The control instruction marking the end of a block that completed + // abruptly (e.g., threw an exception). There is nothing specific to do. + return NULL; +} + + +LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { + return NULL; +} + + +LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { + // All HForceRepresentation instructions should be eliminated in the + // representation change phase of Hydrogen. + UNREACHABLE(); + return NULL; +} + + +LInstruction* LChunkBuilder::DoChange(HChange* instr) { + Representation from = instr->from(); + Representation to = instr->to(); + HValue* val = instr->value(); + if (from.IsSmi()) { + if (to.IsTagged()) { + LOperand* value = UseRegister(val); + return DefineSameAsFirst(new(zone()) LDummyUse(value)); + } + from = Representation::Tagged(); + } + if (from.IsTagged()) { + if (to.IsDouble()) { + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; + } else if (to.IsSmi()) { + LOperand* value = UseRegister(val); + if (val->type().IsSmi()) { + return DefineSameAsFirst(new(zone()) LDummyUse(value)); + } + return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); + } else { + DCHECK(to.IsInteger32()); + if (val->type().IsSmi() || val->representation().IsSmi()) { + LOperand* value = UseRegisterAtStart(val); + return DefineAsRegister(new(zone()) LSmiUntag(value, false)); + } else { + LOperand* value = UseRegister(val); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempDoubleRegister(); + LInstruction* result = + DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; + } + } + } else if (from.IsDouble()) { + if (to.IsTagged()) { + info()->MarkAsDeferredCalling(); + LOperand* value = UseRegister(val); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + + LUnallocated* result_temp = TempRegister(); + LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); + return AssignPointerMap(Define(result, result_temp)); + } else if (to.IsSmi()) { + LOperand* value = UseRegister(val); + return AssignEnvironment( + DefineAsRegister(new(zone()) LDoubleToSmi(value))); + } else { + DCHECK(to.IsInteger32()); + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); + if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); + return result; + } + } else if (from.IsInteger32()) { + info()->MarkAsDeferredCalling(); + if (to.IsTagged()) { + if (val->CheckFlag(HInstruction::kUint32)) { + LOperand* value = UseRegisterAtStart(val); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); + return AssignPointerMap(DefineAsRegister(result)); + } else { + STATIC_ASSERT((kMinInt == Smi::kMinValue) && + (kMaxInt == Smi::kMaxValue)); + LOperand* value = UseRegisterAtStart(val); + return DefineAsRegister(new(zone()) LSmiTag(value)); + } + } else if (to.IsSmi()) { + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); + if (instr->CheckFlag(HValue::kCanOverflow)) { + result = AssignEnvironment(result); + } + return result; + } else { + DCHECK(to.IsDouble()); + if (val->CheckFlag(HInstruction::kUint32)) { + return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); + } else { + return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); + } + } + } + UNREACHABLE(); + return NULL; +} + + +LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + LInstruction* result = new(zone()) LCheckNonSmi(value); + if (!instr->value()->type().IsHeapObject()) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + return AssignEnvironment(new(zone()) LCheckSmi(value)); +} + + +LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + LInstruction* result = new(zone()) LCheckInstanceType(value); + return AssignEnvironment(result); +} + + +LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + return AssignEnvironment(new(zone()) LCheckValue(value)); +} + + +LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { + if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; + LOperand* value = UseRegisterAtStart(instr->value()); + LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); + if (instr->HasMigrationTarget()) { + info()->MarkAsDeferredCalling(); + result = AssignPointerMap(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { + HValue* value = instr->value(); + Representation input_rep = value->representation(); + LOperand* reg = UseRegister(value); + if (input_rep.IsDouble()) { + // Revisit this decision, here and 8 lines below. + return DefineAsRegister(new(zone()) LClampDToUint8(reg, + TempDoubleRegister())); + } else if (input_rep.IsInteger32()) { + return DefineAsRegister(new(zone()) LClampIToUint8(reg)); + } else { + DCHECK(input_rep.IsSmiOrTagged()); + LClampTToUint8* result = + new(zone()) LClampTToUint8(reg, TempDoubleRegister()); + return AssignEnvironment(DefineAsRegister(result)); + } +} + + +LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) { + HValue* value = instr->value(); + DCHECK(value->representation().IsDouble()); + return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value))); +} + + +LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) { + LOperand* lo = UseRegister(instr->lo()); + LOperand* hi = UseRegister(instr->hi()); + return DefineAsRegister(new(zone()) LConstructDouble(hi, lo)); +} + + +LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { + LOperand* context = info()->IsStub() + ? UseFixed(instr->context(), cp) + : NULL; + LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); + return new(zone()) LReturn(UseFixed(instr->value(), v0), context, + parameter_count); +} + + +LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { + Representation r = instr->representation(); + if (r.IsSmi()) { + return DefineAsRegister(new(zone()) LConstantS); + } else if (r.IsInteger32()) { + return DefineAsRegister(new(zone()) LConstantI); + } else if (r.IsDouble()) { + return DefineAsRegister(new(zone()) LConstantD); + } else if (r.IsExternal()) { + return DefineAsRegister(new(zone()) LConstantE); + } else if (r.IsTagged()) { + return DefineAsRegister(new(zone()) LConstantT); + } else { + UNREACHABLE(); + return NULL; + } +} + + +LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { + LLoadGlobalCell* result = new(zone()) LLoadGlobalCell; + return instr->RequiresHoleCheck() + ? AssignEnvironment(DefineAsRegister(result)) + : DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* global_object = UseFixed(instr->global_object(), + LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LLoadGlobalGeneric* result = + new(zone()) LLoadGlobalGeneric(context, global_object, vector); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { + LOperand* value = UseRegister(instr->value()); + // Use a temp to check the value in the cell in the case where we perform + // a hole check. + return instr->RequiresHoleCheck() + ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister())) + : new(zone()) LStoreGlobalCell(value, NULL); +} + + +LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { + LOperand* context = UseRegisterAtStart(instr->value()); + LInstruction* result = + DefineAsRegister(new(zone()) LLoadContextSlot(context)); + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { + LOperand* context; + LOperand* value; + if (instr->NeedsWriteBarrier()) { + context = UseTempRegister(instr->context()); + value = UseTempRegister(instr->value()); + } else { + context = UseRegister(instr->context()); + value = UseRegister(instr->value()); + } + LInstruction* result = new(zone()) LStoreContextSlot(context, value); + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { + LOperand* obj = UseRegisterAtStart(instr->object()); + return DefineAsRegister(new(zone()) LLoadNamedField(obj)); +} + + +LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + + LInstruction* result = + DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0); + return MarkAsCall(result, instr); +} + + +LInstruction* LChunkBuilder::DoLoadFunctionPrototype( + HLoadFunctionPrototype* instr) { + return AssignEnvironment(DefineAsRegister( + new(zone()) LLoadFunctionPrototype(UseRegister(instr->function())))); +} + + +LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { + return DefineAsRegister(new(zone()) LLoadRoot); +} + + +LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { + DCHECK(instr->key()->representation().IsSmiOrInteger32()); + ElementsKind elements_kind = instr->elements_kind(); + LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + LInstruction* result = NULL; + + if (!instr->is_typed_elements()) { + LOperand* obj = NULL; + if (instr->representation().IsDouble()) { + obj = UseRegister(instr->elements()); + } else { + DCHECK(instr->representation().IsSmiOrTagged() || + instr->representation().IsInteger32()); + obj = UseRegisterAtStart(instr->elements()); + } + result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key)); + } else { + DCHECK( + (instr->representation().IsInteger32() && + !IsDoubleOrFloatElementsKind(elements_kind)) || + (instr->representation().IsDouble() && + IsDoubleOrFloatElementsKind(elements_kind))); + LOperand* backing_store = UseRegister(instr->elements()); + result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key)); + } + + if ((instr->is_external() || instr->is_fixed_typed_array()) ? + // see LCodeGen::DoLoadKeyedExternalArray + ((elements_kind == EXTERNAL_UINT32_ELEMENTS || + elements_kind == UINT32_ELEMENTS) && + !instr->CheckFlag(HInstruction::kUint32)) : + // see LCodeGen::DoLoadKeyedFixedDoubleArray and + // LCodeGen::DoLoadKeyedFixedArray + instr->RequiresHoleCheck()) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + + LInstruction* result = + DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector), + v0); + return MarkAsCall(result, instr); +} + + +LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { + if (!instr->is_typed_elements()) { + DCHECK(instr->elements()->representation().IsTagged()); + bool needs_write_barrier = instr->NeedsWriteBarrier(); + LOperand* object = NULL; + LOperand* val = NULL; + LOperand* key = NULL; + + if (instr->value()->representation().IsDouble()) { + object = UseRegisterAtStart(instr->elements()); + key = UseRegisterOrConstantAtStart(instr->key()); + val = UseRegister(instr->value()); + } else { + DCHECK(instr->value()->representation().IsSmiOrTagged() || + instr->value()->representation().IsInteger32()); + if (needs_write_barrier) { + object = UseTempRegister(instr->elements()); + val = UseTempRegister(instr->value()); + key = UseTempRegister(instr->key()); + } else { + object = UseRegisterAtStart(instr->elements()); + val = UseRegisterAtStart(instr->value()); + key = UseRegisterOrConstantAtStart(instr->key()); + } + } + + return new(zone()) LStoreKeyed(object, key, val); + } + + DCHECK( + (instr->value()->representation().IsInteger32() && + !IsDoubleOrFloatElementsKind(instr->elements_kind())) || + (instr->value()->representation().IsDouble() && + IsDoubleOrFloatElementsKind(instr->elements_kind()))); + DCHECK((instr->is_fixed_typed_array() && + instr->elements()->representation().IsTagged()) || + (instr->is_external() && + instr->elements()->representation().IsExternal())); + LOperand* val = UseRegister(instr->value()); + LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + LOperand* backing_store = UseRegister(instr->elements()); + return new(zone()) LStoreKeyed(backing_store, key, val); +} + + +LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* obj = UseFixed(instr->object(), KeyedStoreIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister()); + LOperand* val = UseFixed(instr->value(), KeyedStoreIC::ValueRegister()); + + DCHECK(instr->object()->representation().IsTagged()); + DCHECK(instr->key()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); + + return MarkAsCall( + new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr); +} + + +LInstruction* LChunkBuilder::DoTransitionElementsKind( + HTransitionElementsKind* instr) { + if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { + LOperand* object = UseRegister(instr->object()); + LOperand* new_map_reg = TempRegister(); + LTransitionElementsKind* result = + new(zone()) LTransitionElementsKind(object, NULL, new_map_reg); + return result; + } else { + LOperand* object = UseFixed(instr->object(), a0); + LOperand* context = UseFixed(instr->context(), cp); + LTransitionElementsKind* result = + new(zone()) LTransitionElementsKind(object, context, NULL); + return MarkAsCall(result, instr); + } +} + + +LInstruction* LChunkBuilder::DoTrapAllocationMemento( + HTrapAllocationMemento* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* temp = TempRegister(); + LTrapAllocationMemento* result = + new(zone()) LTrapAllocationMemento(object, temp); + return AssignEnvironment(result); +} + + +LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { + bool is_in_object = instr->access().IsInobject(); + bool needs_write_barrier = instr->NeedsWriteBarrier(); + bool needs_write_barrier_for_map = instr->has_transition() && + instr->NeedsWriteBarrierForMap(); + + LOperand* obj; + if (needs_write_barrier) { + obj = is_in_object + ? UseRegister(instr->object()) + : UseTempRegister(instr->object()); + } else { + obj = needs_write_barrier_for_map + ? UseRegister(instr->object()) + : UseRegisterAtStart(instr->object()); + } + + LOperand* val; + if (needs_write_barrier || instr->field_representation().IsSmi()) { + val = UseTempRegister(instr->value()); + } else if (instr->field_representation().IsDouble()) { + val = UseRegisterAtStart(instr->value()); + } else { + val = UseRegister(instr->value()); + } + + // We need a temporary register for write barrier of the map field. + LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; + + return new(zone()) LStoreNamedField(obj, val, temp); +} + + +LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* obj = UseFixed(instr->object(), StoreIC::ReceiverRegister()); + LOperand* val = UseFixed(instr->value(), StoreIC::ValueRegister()); + + LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val); + return MarkAsCall(result, instr); +} + + +LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* left = UseFixed(instr->left(), a1); + LOperand* right = UseFixed(instr->right(), a0); + return MarkAsCall( + DefineFixed(new(zone()) LStringAdd(context, left, right), v0), + instr); +} + + +LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { + LOperand* string = UseTempRegister(instr->string()); + LOperand* index = UseTempRegister(instr->index()); + LOperand* context = UseAny(instr->context()); + LStringCharCodeAt* result = + new(zone()) LStringCharCodeAt(context, string, index); + return AssignPointerMap(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { + LOperand* char_code = UseRegister(instr->value()); + LOperand* context = UseAny(instr->context()); + LStringCharFromCode* result = + new(zone()) LStringCharFromCode(context, char_code); + return AssignPointerMap(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { + info()->MarkAsDeferredCalling(); + LOperand* context = UseAny(instr->context()); + LOperand* size = UseRegisterOrConstant(instr->size()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2); + return AssignPointerMap(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) { + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall( + DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr); +} + + +LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall( + DefineFixed(new(zone()) LFunctionLiteral(context), v0), instr); +} + + +LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { + DCHECK(argument_count_ == 0); + allocator_->MarkAsOsrEntry(); + current_block_->last_environment()->set_ast_id(instr->ast_id()); + return AssignEnvironment(new(zone()) LOsrEntry); +} + + +LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { + LParameter* result = new(zone()) LParameter; + if (instr->kind() == HParameter::STACK_PARAMETER) { + int spill_index = chunk()->GetParameterStackSlot(instr->index()); + return DefineAsSpilled(result, spill_index); + } else { + DCHECK(info()->IsStub()); + CodeStubInterfaceDescriptor* descriptor = + info()->code_stub()->GetInterfaceDescriptor(); + int index = static_cast<int>(instr->index()); + Register reg = descriptor->GetEnvironmentParameterRegister(index); + return DefineFixed(result, reg); + } +} + + +LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { + // Use an index that corresponds to the location in the unoptimized frame, + // which the optimized frame will subsume. + int env_index = instr->index(); + int spill_index = 0; + if (instr->environment()->is_parameter_index(env_index)) { + spill_index = chunk()->GetParameterStackSlot(env_index); + } else { + spill_index = env_index - instr->environment()->first_local_index(); + if (spill_index > LUnallocated::kMaxFixedSlotIndex) { + Abort(kTooManySpillSlotsNeededForOSR); + spill_index = 0; + } + } + return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); +} + + +LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr); +} + + +LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { + // There are no real uses of the arguments object. + // arguments.length and element access are supported directly on + // stack arguments, and any real arguments object use causes a bailout. + // So this value is never used. + return NULL; +} + + +LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { + instr->ReplayEnvironment(current_block_->last_environment()); + + // There are no real uses of a captured object. + return NULL; +} + + +LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { + info()->MarkAsRequiresFrame(); + LOperand* args = UseRegister(instr->arguments()); + LOperand* length = UseRegisterOrConstantAtStart(instr->length()); + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); +} + + +LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) { + LOperand* object = UseFixed(instr->value(), a0); + LToFastProperties* result = new(zone()) LToFastProperties(object); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), a0)); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + +LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { + return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value())); +} + + +LInstruction* LChunkBuilder::DoIsConstructCallAndBranch( + HIsConstructCallAndBranch* instr) { + return new(zone()) LIsConstructCallAndBranch(TempRegister()); +} + + +LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { + instr->ReplayEnvironment(current_block_->last_environment()); + return NULL; +} + + +LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { + if (instr->is_function_entry()) { + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(new(zone()) LStackCheck(context), instr); + } else { + DCHECK(instr->is_backwards_branch()); + LOperand* context = UseAny(instr->context()); + return AssignEnvironment( + AssignPointerMap(new(zone()) LStackCheck(context))); + } +} + + +LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { + HEnvironment* outer = current_block_->last_environment(); + outer->set_ast_id(instr->ReturnId()); + HConstant* undefined = graph()->GetConstantUndefined(); + HEnvironment* inner = outer->CopyForInlining(instr->closure(), + instr->arguments_count(), + instr->function(), + undefined, + instr->inlining_kind()); + // Only replay binding of arguments object if it wasn't removed from graph. + if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { + inner->Bind(instr->arguments_var(), instr->arguments_object()); + } + inner->set_entry(instr); + current_block_->UpdateEnvironment(inner); + chunk_->AddInlinedClosure(instr->closure()); + return NULL; +} + + +LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { + LInstruction* pop = NULL; + + HEnvironment* env = current_block_->last_environment(); + + if (env->entry()->arguments_pushed()) { + int argument_count = env->arguments_environment()->parameter_count(); + pop = new(zone()) LDrop(argument_count); + DCHECK(instr->argument_delta() == -argument_count); + } + + HEnvironment* outer = current_block_->last_environment()-> + DiscardInlined(false); + current_block_->UpdateEnvironment(outer); + + return pop; +} + + +LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* object = UseFixed(instr->enumerable(), a0); + LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); + return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { + LOperand* map = UseRegister(instr->map()); + return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map))); +} + + +LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* map = UseRegisterAtStart(instr->map()); + return AssignEnvironment(new(zone()) LCheckMapValue(value, map)); +} + + +LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* index = UseTempRegister(instr->index()); + LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); + LInstruction* result = DefineSameAsFirst(load); + return AssignPointerMap(result); +} + + +LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) { + LOperand* context = UseRegisterAtStart(instr->context()); + return new(zone()) LStoreFrameContext(context); +} + + +LInstruction* LChunkBuilder::DoAllocateBlockContext( + HAllocateBlockContext* instr) { + LOperand* context = UseFixed(instr->context(), cp); + LOperand* function = UseRegisterAtStart(instr->function()); + LAllocateBlockContext* result = + new(zone()) LAllocateBlockContext(context, function); + return MarkAsCall(DefineFixed(result, cp), instr); +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/lithium-mips64.h nodejs-0.11.15/deps/v8/src/mips64/lithium-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/lithium-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/lithium-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2848 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_MIPS_LITHIUM_MIPS_H_ +#define V8_MIPS_LITHIUM_MIPS_H_ + +#include "src/hydrogen.h" +#include "src/lithium.h" +#include "src/lithium-allocator.h" +#include "src/safepoint-table.h" +#include "src/utils.h" + +namespace v8 { +namespace internal { + +// Forward declarations. +class LCodeGen; + +#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ + V(AccessArgumentsAt) \ + V(AddI) \ + V(Allocate) \ + V(AllocateBlockContext) \ + V(ApplyArguments) \ + V(ArgumentsElements) \ + V(ArgumentsLength) \ + V(ArithmeticD) \ + V(ArithmeticT) \ + V(BitI) \ + V(BoundsCheck) \ + V(Branch) \ + V(CallJSFunction) \ + V(CallWithDescriptor) \ + V(CallFunction) \ + V(CallNew) \ + V(CallNewArray) \ + V(CallRuntime) \ + V(CallStub) \ + V(CheckInstanceType) \ + V(CheckMaps) \ + V(CheckMapValue) \ + V(CheckNonSmi) \ + V(CheckSmi) \ + V(CheckValue) \ + V(ClampDToUint8) \ + V(ClampIToUint8) \ + V(ClampTToUint8) \ + V(ClassOfTestAndBranch) \ + V(CompareMinusZeroAndBranch) \ + V(CompareNumericAndBranch) \ + V(CmpObjectEqAndBranch) \ + V(CmpHoleAndBranch) \ + V(CmpMapAndBranch) \ + V(CmpT) \ + V(ConstantD) \ + V(ConstantE) \ + V(ConstantI) \ + V(ConstantS) \ + V(ConstantT) \ + V(ConstructDouble) \ + V(Context) \ + V(DateField) \ + V(DebugBreak) \ + V(DeclareGlobals) \ + V(Deoptimize) \ + V(DivByConstI) \ + V(DivByPowerOf2I) \ + V(DivI) \ + V(DoubleToI) \ + V(DoubleBits) \ + V(DoubleToSmi) \ + V(Drop) \ + V(Dummy) \ + V(DummyUse) \ + V(FlooringDivByConstI) \ + V(FlooringDivByPowerOf2I) \ + V(FlooringDivI) \ + V(ForInCacheArray) \ + V(ForInPrepareMap) \ + V(FunctionLiteral) \ + V(GetCachedArrayIndex) \ + V(Goto) \ + V(HasCachedArrayIndexAndBranch) \ + V(HasInstanceTypeAndBranch) \ + V(InnerAllocatedObject) \ + V(InstanceOf) \ + V(InstanceOfKnownGlobal) \ + V(InstructionGap) \ + V(Integer32ToDouble) \ + V(InvokeFunction) \ + V(IsConstructCallAndBranch) \ + V(IsObjectAndBranch) \ + V(IsStringAndBranch) \ + V(IsSmiAndBranch) \ + V(IsUndetectableAndBranch) \ + V(Label) \ + V(LazyBailout) \ + V(LoadContextSlot) \ + V(LoadRoot) \ + V(LoadFieldByIndex) \ + V(LoadFunctionPrototype) \ + V(LoadGlobalCell) \ + V(LoadGlobalGeneric) \ + V(LoadKeyed) \ + V(LoadKeyedGeneric) \ + V(LoadNamedField) \ + V(LoadNamedGeneric) \ + V(MapEnumLength) \ + V(MathAbs) \ + V(MathExp) \ + V(MathClz32) \ + V(MathFloor) \ + V(MathFround) \ + V(MathLog) \ + V(MathMinMax) \ + V(MathPowHalf) \ + V(MathRound) \ + V(MathSqrt) \ + V(ModByConstI) \ + V(ModByPowerOf2I) \ + V(ModI) \ + V(MulI) \ + V(MultiplyAddD) \ + V(NumberTagD) \ + V(NumberTagU) \ + V(NumberUntagD) \ + V(OsrEntry) \ + V(Parameter) \ + V(Power) \ + V(PushArgument) \ + V(RegExpLiteral) \ + V(Return) \ + V(SeqStringGetChar) \ + V(SeqStringSetChar) \ + V(ShiftI) \ + V(SmiTag) \ + V(SmiUntag) \ + V(StackCheck) \ + V(StoreCodeEntry) \ + V(StoreContextSlot) \ + V(StoreFrameContext) \ + V(StoreGlobalCell) \ + V(StoreKeyed) \ + V(StoreKeyedGeneric) \ + V(StoreNamedField) \ + V(StoreNamedGeneric) \ + V(StringAdd) \ + V(StringCharCodeAt) \ + V(StringCharFromCode) \ + V(StringCompareAndBranch) \ + V(SubI) \ + V(TaggedToI) \ + V(ThisFunction) \ + V(ToFastProperties) \ + V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ + V(Typeof) \ + V(TypeofIsAndBranch) \ + V(Uint32ToDouble) \ + V(UnknownOSRValue) \ + V(WrapReceiver) + +#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ + virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \ + return LInstruction::k##type; \ + } \ + virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \ + virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \ + return mnemonic; \ + } \ + static L##type* cast(LInstruction* instr) { \ + DCHECK(instr->Is##type()); \ + return reinterpret_cast<L##type*>(instr); \ + } + + +#define DECLARE_HYDROGEN_ACCESSOR(type) \ + H##type* hydrogen() const { \ + return H##type::cast(hydrogen_value()); \ + } + + +class LInstruction : public ZoneObject { + public: + LInstruction() + : environment_(NULL), + hydrogen_value_(NULL), + bit_field_(IsCallBits::encode(false)) { + } + + virtual ~LInstruction() {} + + virtual void CompileToNative(LCodeGen* generator) = 0; + virtual const char* Mnemonic() const = 0; + virtual void PrintTo(StringStream* stream); + virtual void PrintDataTo(StringStream* stream); + virtual void PrintOutputOperandTo(StringStream* stream); + + enum Opcode { + // Declare a unique enum value for each instruction. +#define DECLARE_OPCODE(type) k##type, + LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) + kNumberOfInstructions +#undef DECLARE_OPCODE + }; + + virtual Opcode opcode() const = 0; + + // Declare non-virtual type testers for all leaf IR classes. +#define DECLARE_PREDICATE(type) \ + bool Is##type() const { return opcode() == k##type; } + LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) +#undef DECLARE_PREDICATE + + // Declare virtual predicates for instructions that don't have + // an opcode. + virtual bool IsGap() const { return false; } + + virtual bool IsControl() const { return false; } + + // Try deleting this instruction if possible. + virtual bool TryDelete() { return false; } + + void set_environment(LEnvironment* env) { environment_ = env; } + LEnvironment* environment() const { return environment_; } + bool HasEnvironment() const { return environment_ != NULL; } + + void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } + LPointerMap* pointer_map() const { return pointer_map_.get(); } + bool HasPointerMap() const { return pointer_map_.is_set(); } + + void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } + HValue* hydrogen_value() const { return hydrogen_value_; } + + virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { } + + void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } + bool IsCall() const { return IsCallBits::decode(bit_field_); } + + // Interface to the register allocator and iterators. + bool ClobbersTemps() const { return IsCall(); } + bool ClobbersRegisters() const { return IsCall(); } + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { + return IsCall(); + } + + // Interface to the register allocator and iterators. + bool IsMarkedAsCall() const { return IsCall(); } + + virtual bool HasResult() const = 0; + virtual LOperand* result() const = 0; + + LOperand* FirstInput() { return InputAt(0); } + LOperand* Output() { return HasResult() ? result() : NULL; } + + virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } + +#ifdef DEBUG + void VerifyCall(); +#endif + + virtual int InputCount() = 0; + virtual LOperand* InputAt(int i) = 0; + + private: + // Iterator interface. + friend class InputIterator; + + friend class TempIterator; + virtual int TempCount() = 0; + virtual LOperand* TempAt(int i) = 0; + + class IsCallBits: public BitField<bool, 0, 1> {}; + + LEnvironment* environment_; + SetOncePointer<LPointerMap> pointer_map_; + HValue* hydrogen_value_; + int bit_field_; +}; + + +// R = number of result operands (0 or 1). +template<int R> +class LTemplateResultInstruction : public LInstruction { + public: + // Allow 0 or 1 output operands. + STATIC_ASSERT(R == 0 || R == 1); + virtual bool HasResult() const V8_FINAL V8_OVERRIDE { + return R != 0 && result() != NULL; + } + void set_result(LOperand* operand) { results_[0] = operand; } + LOperand* result() const { return results_[0]; } + + protected: + EmbeddedContainer<LOperand*, R> results_; +}; + + +// R = number of result operands (0 or 1). +// I = number of input operands. +// T = number of temporary operands. +template<int R, int I, int T> +class LTemplateInstruction : public LTemplateResultInstruction<R> { + protected: + EmbeddedContainer<LOperand*, I> inputs_; + EmbeddedContainer<LOperand*, T> temps_; + + private: + // Iterator support. + virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; } + virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; } + + virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; } + virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; } +}; + + +class LGap : public LTemplateInstruction<0, 0, 0> { + public: + explicit LGap(HBasicBlock* block) + : block_(block) { + parallel_moves_[BEFORE] = NULL; + parallel_moves_[START] = NULL; + parallel_moves_[END] = NULL; + parallel_moves_[AFTER] = NULL; + } + + // Can't use the DECLARE-macro here because of sub-classes. + virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; } + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + static LGap* cast(LInstruction* instr) { + DCHECK(instr->IsGap()); + return reinterpret_cast<LGap*>(instr); + } + + bool IsRedundant() const; + + HBasicBlock* block() const { return block_; } + + enum InnerPosition { + BEFORE, + START, + END, + AFTER, + FIRST_INNER_POSITION = BEFORE, + LAST_INNER_POSITION = AFTER + }; + + LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { + if (parallel_moves_[pos] == NULL) { + parallel_moves_[pos] = new(zone) LParallelMove(zone); + } + return parallel_moves_[pos]; + } + + LParallelMove* GetParallelMove(InnerPosition pos) { + return parallel_moves_[pos]; + } + + private: + LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; + HBasicBlock* block_; +}; + + +class LInstructionGap V8_FINAL : public LGap { + public: + explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } + + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE { + return !IsRedundant(); + } + + DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") +}; + + +class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + explicit LGoto(HBasicBlock* block) : block_(block) { } + + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE; + DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual bool IsControl() const V8_OVERRIDE { return true; } + + int block_id() const { return block_->block_id(); } + + private: + HBasicBlock* block_; +}; + + +class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + LLazyBailout() : gap_instructions_size_(0) { } + + DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") + + void set_gap_instructions_size(int gap_instructions_size) { + gap_instructions_size_ = gap_instructions_size; + } + int gap_instructions_size() { return gap_instructions_size_; } + + private: + int gap_instructions_size_; +}; + + +class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + LDummy() {} + DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") +}; + + +class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LDummyUse(LOperand* value) { + inputs_[0] = value; + } + DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") +}; + + +class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + virtual bool IsControl() const V8_OVERRIDE { return true; } + DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") + DECLARE_HYDROGEN_ACCESSOR(Deoptimize) +}; + + +class LLabel V8_FINAL : public LGap { + public: + explicit LLabel(HBasicBlock* block) + : LGap(block), replacement_(NULL) { } + + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE { + return false; + } + DECLARE_CONCRETE_INSTRUCTION(Label, "label") + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int block_id() const { return block()->block_id(); } + bool is_loop_header() const { return block()->IsLoopHeader(); } + bool is_osr_entry() const { return block()->is_osr_entry(); } + Label* label() { return &label_; } + LLabel* replacement() const { return replacement_; } + void set_replacement(LLabel* label) { replacement_ = label; } + bool HasReplacement() const { return replacement_ != NULL; } + + private: + Label label_; + LLabel* replacement_; +}; + + +class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE { + return false; + } + DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") +}; + + +class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallStub(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub") + DECLARE_HYDROGEN_ACCESSOR(CallStub) +}; + + +class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE { + return false; + } + DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") +}; + + +template<int I, int T> +class LControlInstruction : public LTemplateInstruction<0, I, T> { + public: + LControlInstruction() : false_label_(NULL), true_label_(NULL) { } + + virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; } + + int SuccessorCount() { return hydrogen()->SuccessorCount(); } + HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } + + int TrueDestination(LChunk* chunk) { + return chunk->LookupDestination(true_block_id()); + } + int FalseDestination(LChunk* chunk) { + return chunk->LookupDestination(false_block_id()); + } + + Label* TrueLabel(LChunk* chunk) { + if (true_label_ == NULL) { + true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); + } + return true_label_; + } + Label* FalseLabel(LChunk* chunk) { + if (false_label_ == NULL) { + false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); + } + return false_label_; + } + + protected: + int true_block_id() { return SuccessorAt(0)->block_id(); } + int false_block_id() { return SuccessorAt(1)->block_id(); } + + private: + HControlInstruction* hydrogen() { + return HControlInstruction::cast(this->hydrogen_value()); + } + + Label* false_label_; + Label* true_label_; +}; + + +class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LWrapReceiver(LOperand* receiver, LOperand* function) { + inputs_[0] = receiver; + inputs_[1] = function; + } + + DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") + DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) + + LOperand* receiver() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } +}; + + +class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> { + public: + LApplyArguments(LOperand* function, + LOperand* receiver, + LOperand* length, + LOperand* elements) { + inputs_[0] = function; + inputs_[1] = receiver; + inputs_[2] = length; + inputs_[3] = elements; + } + + DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") + + LOperand* function() { return inputs_[0]; } + LOperand* receiver() { return inputs_[1]; } + LOperand* length() { return inputs_[2]; } + LOperand* elements() { return inputs_[3]; } +}; + + +class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { + inputs_[0] = arguments; + inputs_[1] = length; + inputs_[2] = index; + } + + DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") + + LOperand* arguments() { return inputs_[0]; } + LOperand* length() { return inputs_[1]; } + LOperand* index() { return inputs_[2]; } + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LArgumentsLength(LOperand* elements) { + inputs_[0] = elements; + } + + LOperand* elements() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") +}; + + +class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") + DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) +}; + + +class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + LModByPowerOf2I(LOperand* dividend, int32_t divisor) { + inputs_[0] = dividend; + divisor_ = divisor; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + + DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") + DECLARE_HYDROGEN_ACCESSOR(Mod) + + private: + int32_t divisor_; +}; + + +class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + LModByConstI(LOperand* dividend, int32_t divisor) { + inputs_[0] = dividend; + divisor_ = divisor; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + + DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") + DECLARE_HYDROGEN_ACCESSOR(Mod) + + private: + int32_t divisor_; +}; + + +class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> { + public: + LModI(LOperand* left, + LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") + DECLARE_HYDROGEN_ACCESSOR(Mod) +}; + + +class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { + inputs_[0] = dividend; + divisor_ = divisor; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + + DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") + DECLARE_HYDROGEN_ACCESSOR(Div) + + private: + int32_t divisor_; +}; + + +class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + LDivByConstI(LOperand* dividend, int32_t divisor) { + inputs_[0] = dividend; + divisor_ = divisor; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + + DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") + DECLARE_HYDROGEN_ACCESSOR(Div) + + private: + int32_t divisor_; +}; + + +class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; + temps_[0] = temp; + } + + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") + DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) +}; + + +class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { + inputs_[0] = dividend; + divisor_ = divisor; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() { return divisor_; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, + "flooring-div-by-power-of-2-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) + + private: + int32_t divisor_; +}; + + +class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> { + public: + LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) { + inputs_[0] = dividend; + divisor_ = divisor; + temps_[0] = temp; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) + + private: + int32_t divisor_; +}; + + +class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LFlooringDivI(LOperand* dividend, LOperand* divisor) { + inputs_[0] = dividend; + inputs_[1] = divisor; + } + + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) +}; + + +class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LMulI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") + DECLARE_HYDROGEN_ACCESSOR(Mul) +}; + + +// Instruction for computing multiplier * multiplicand + addend. +class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LMultiplyAddD(LOperand* addend, LOperand* multiplier, + LOperand* multiplicand) { + inputs_[0] = addend; + inputs_[1] = multiplier; + inputs_[2] = multiplicand; + } + + LOperand* addend() { return inputs_[0]; } + LOperand* multiplier() { return inputs_[1]; } + LOperand* multiplicand() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d") +}; + + +class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") +}; + + +class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> { + public: + LCompareNumericAndBranch(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, + "compare-numeric-and-branch") + DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) + + Token::Value op() const { return hydrogen()->token(); } + bool is_double() const { + return hydrogen()->representation().IsDouble(); + } + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LMathFloor(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor") + DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) +}; + + +class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LMathRound(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") + DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) +}; + + +class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathFround(LOperand* value) { inputs_[0] = value; } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") +}; + + +class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LMathAbs(LOperand* context, LOperand* value) { + inputs_[1] = context; + inputs_[0] = value; + } + + LOperand* context() { return inputs_[1]; } + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") + DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) +}; + + +class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathLog(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") +}; + + +class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathClz32(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") +}; + + +class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> { + public: + LMathExp(LOperand* value, + LOperand* double_temp, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; + temps_[2] = double_temp; + ExternalReference::InitializeMathExpData(); + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + LOperand* double_temp() { return temps_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") +}; + + +class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathSqrt(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") +}; + + +class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LMathPowHalf(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") +}; + + +class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> { + public: + LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") + DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch) +}; + + +class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> { + public: + explicit LCmpHoleAndBranch(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") + DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) +}; + + +class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch, + "cmp-minus-zero-and-branch") + DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch) +}; + + +class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LIsObjectAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch) + + virtual void PrintDataTo(StringStream* stream); +}; + + +class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LIsStringAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> { + public: + explicit LIsSmiAndBranch(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, + "is-undetectable-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> { + public: + LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; + } + + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, + "string-compare-and-branch") + DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) + + Token::Value op() const { return hydrogen()->token(); } + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> { + public: + explicit LHasInstanceTypeAndBranch(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, + "has-instance-type-and-branch") + DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LGetCachedArrayIndex(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index") + DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex) +}; + + +class LHasCachedArrayIndexAndBranch V8_FINAL + : public LControlInstruction<1, 0> { + public: + explicit LHasCachedArrayIndexAndBranch(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch, + "has-cached-array-index-and-branch") + DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LClassOfTestAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, + "class-of-test-and-branch") + DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LCmpT(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; + } + + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") + DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) + + Token::Value op() const { return hydrogen()->token(); } +}; + + +class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LInstanceOf(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; + } + + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of") +}; + + +class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) { + inputs_[0] = context; + inputs_[1] = value; + temps_[0] = temp; + } + + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal, + "instance-of-known-global") + DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal) + + Handle<JSFunction> function() const { return hydrogen()->function(); } + LEnvironment* GetDeferredLazyDeoptimizationEnvironment() { + return lazy_deopt_env_; + } + virtual void SetDeferredLazyDeoptimizationEnvironment( + LEnvironment* env) V8_OVERRIDE { + lazy_deopt_env_ = env; + } + + private: + LEnvironment* lazy_deopt_env_; +}; + + +class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> { + public: + LBoundsCheck(LOperand* index, LOperand* length) { + inputs_[0] = index; + inputs_[1] = length; + } + + LOperand* index() { return inputs_[0]; } + LOperand* length() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") + DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) +}; + + +class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LBitI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + Token::Value op() const { return hydrogen()->op(); } + + DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") + DECLARE_HYDROGEN_ACCESSOR(Bitwise) +}; + + +class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) + : op_(op), can_deopt_(can_deopt) { + inputs_[0] = left; + inputs_[1] = right; + } + + Token::Value op() const { return op_; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + bool can_deopt() const { return can_deopt_; } + + DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") + + private: + Token::Value op_; + bool can_deopt_; +}; + + +class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LSubI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") + DECLARE_HYDROGEN_ACCESSOR(Sub) +}; + + +class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + int32_t value() const { return hydrogen()->Integer32Value(); } +}; + + +class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } +}; + + +class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + double value() const { return hydrogen()->DoubleValue(); } +}; + + +class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + ExternalReference value() const { + return hydrogen()->ExternalReferenceValue(); + } +}; + + +class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + Handle<Object> value(Isolate* isolate) const { + return hydrogen()->handle(isolate); + } +}; + + +class LBranch V8_FINAL : public LControlInstruction<1, 0> { + public: + explicit LBranch(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") + DECLARE_HYDROGEN_ACCESSOR(Branch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LCmpMapAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") + DECLARE_HYDROGEN_ACCESSOR(CompareMap) + + Handle<Map> map() const { return hydrogen()->map().handle(); } +}; + + +class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMapEnumLength(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length") +}; + + +class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) { + inputs_[0] = date; + temps_[0] = temp; + } + + LOperand* date() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + Smi* index() const { return index_; } + + DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field") + DECLARE_HYDROGEN_ACCESSOR(DateField) + + private: + Smi* index_; +}; + + +class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LSeqStringGetChar(LOperand* string, LOperand* index) { + inputs_[0] = string; + inputs_[1] = index; + } + + LOperand* string() const { return inputs_[0]; } + LOperand* index() const { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") + DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) +}; + + +class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> { + public: + LSeqStringSetChar(LOperand* context, + LOperand* string, + LOperand* index, + LOperand* value) { + inputs_[0] = context; + inputs_[1] = string; + inputs_[2] = index; + inputs_[3] = value; + } + + LOperand* string() { return inputs_[1]; } + LOperand* index() { return inputs_[2]; } + LOperand* value() { return inputs_[3]; } + + DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") + DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) +}; + + +class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LAddI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") + DECLARE_HYDROGEN_ACCESSOR(Add) +}; + + +class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LMathMinMax(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") + DECLARE_HYDROGEN_ACCESSOR(MathMinMax) +}; + + +class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LPower(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(Power, "power") + DECLARE_HYDROGEN_ACCESSOR(Power) +}; + + +class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LArithmeticD(Token::Value op, LOperand* left, LOperand* right) + : op_(op) { + inputs_[0] = left; + inputs_[1] = right; + } + + Token::Value op() const { return op_; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + virtual Opcode opcode() const V8_OVERRIDE { + return LInstruction::kArithmeticD; + } + virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE; + virtual const char* Mnemonic() const V8_OVERRIDE; + + private: + Token::Value op_; +}; + + +class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LArithmeticT(Token::Value op, + LOperand* context, + LOperand* left, + LOperand* right) + : op_(op) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; + } + + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } + Token::Value op() const { return op_; } + + virtual Opcode opcode() const V8_FINAL { return LInstruction::kArithmeticT; } + virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE; + virtual const char* Mnemonic() const V8_OVERRIDE; + + private: + Token::Value op_; +}; + + +class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> { + public: + LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) { + inputs_[0] = value; + inputs_[1] = context; + inputs_[2] = parameter_count; + } + + LOperand* value() { return inputs_[0]; } + + bool has_constant_parameter_count() { + return parameter_count()->IsConstantOperand(); + } + LConstantOperand* constant_parameter_count() { + DCHECK(has_constant_parameter_count()); + return LConstantOperand::cast(parameter_count()); + } + LOperand* parameter_count() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(Return, "return") +}; + + +class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LLoadNamedField(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") + DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) +}; + + +class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) { + inputs_[0] = context; + inputs_[1] = object; + temps_[0] = vector; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) + + Handle<Object> name() const { return hydrogen()->name(); } +}; + + +class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LLoadFunctionPrototype(LOperand* function) { + inputs_[0] = function; + } + + LOperand* function() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") + DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) +}; + + +class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") + DECLARE_HYDROGEN_ACCESSOR(LoadRoot) + + Heap::RootListIndex index() const { return hydrogen()->index(); } +}; + + +class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LLoadKeyed(LOperand* elements, LOperand* key) { + inputs_[0] = elements; + inputs_[1] = key; + } + + LOperand* elements() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); + } + bool is_external() const { + return hydrogen()->is_external(); + } + bool is_fixed_typed_array() const { + return hydrogen()->is_fixed_typed_array(); + } + bool is_typed_elements() const { + return is_external() || is_fixed_typed_array(); + } + + DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) + + virtual void PrintDataTo(StringStream* stream); + uint32_t base_offset() const { return hydrogen()->base_offset(); } +}; + + +class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> { + public: + LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key, + LOperand* vector) { + inputs_[0] = context; + inputs_[1] = object; + inputs_[2] = key; + temps_[0] = vector; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* key() { return inputs_[2]; } + LOperand* temp_vector() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric) +}; + + +class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell") + DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell) +}; + + +class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LLoadGlobalGeneric(LOperand* context, LOperand* global_object, + LOperand* vector) { + inputs_[0] = context; + inputs_[1] = global_object; + temps_[0] = vector; + } + + LOperand* context() { return inputs_[0]; } + LOperand* global_object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric) + + Handle<Object> name() const { return hydrogen()->name(); } + bool for_typeof() const { return hydrogen()->for_typeof(); } +}; + + +class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> { + public: + LStoreGlobalCell(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell") + DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell) +}; + + +class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LLoadContextSlot(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") + DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) + + int slot_index() { return hydrogen()->slot_index(); } + + virtual void PrintDataTo(StringStream* stream); +}; + + +class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> { + public: + LStoreContextSlot(LOperand* context, LOperand* value) { + inputs_[0] = context; + inputs_[1] = value; + } + + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") + DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) + + int slot_index() { return hydrogen()->slot_index(); } + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LPushArgument(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") +}; + + +class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + explicit LDrop(int count) : count_(count) { } + + int count() const { return count_; } + + DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") + + private: + int count_; +}; + + +class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> { + public: + LStoreCodeEntry(LOperand* function, LOperand* code_object) { + inputs_[0] = function; + inputs_[1] = code_object; + } + + LOperand* function() { return inputs_[0]; } + LOperand* code_object() { return inputs_[1]; } + + virtual void PrintDataTo(StringStream* stream); + + DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") + DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) +}; + + +class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> { + public: + LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { + inputs_[0] = base_object; + inputs_[1] = offset; + } + + LOperand* base_object() const { return inputs_[0]; } + LOperand* offset() const { return inputs_[1]; } + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") +}; + + +class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") + DECLARE_HYDROGEN_ACCESSOR(ThisFunction) +}; + + +class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(Context, "context") + DECLARE_HYDROGEN_ACCESSOR(Context) +}; + + +class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LDeclareGlobals(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") + DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) +}; + + +class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallJSFunction(LOperand* function) { + inputs_[0] = function; + } + + LOperand* function() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function") + DECLARE_HYDROGEN_ACCESSOR(CallJSFunction) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + +class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> { + public: + LCallWithDescriptor(const InterfaceDescriptor* descriptor, + const ZoneList<LOperand*>& operands, + Zone* zone) + : descriptor_(descriptor), + inputs_(descriptor->GetRegisterParameterCount() + 1, zone) { + DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length()); + inputs_.AddAll(operands, zone); + } + + LOperand* target() const { return inputs_[0]; } + + const InterfaceDescriptor* descriptor() { return descriptor_; } + + private: + DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") + DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int arity() const { return hydrogen()->argument_count() - 1; } + + const InterfaceDescriptor* descriptor_; + ZoneList<LOperand*> inputs_; + + // Iterator support. + virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); } + virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; } + + virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; } + virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; } +}; + + + +class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LInvokeFunction(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") + DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + +class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LCallFunction(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function") + DECLARE_HYDROGEN_ACCESSOR(CallFunction) + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + +class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LCallNew(LOperand* context, LOperand* constructor) { + inputs_[0] = context; + inputs_[1] = constructor; + } + + LOperand* context() { return inputs_[0]; } + LOperand* constructor() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new") + DECLARE_HYDROGEN_ACCESSOR(CallNew) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + +class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LCallNewArray(LOperand* context, LOperand* constructor) { + inputs_[0] = context; + inputs_[1] = constructor; + } + + LOperand* context() { return inputs_[0]; } + LOperand* constructor() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") + DECLARE_HYDROGEN_ACCESSOR(CallNewArray) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + +class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallRuntime(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") + DECLARE_HYDROGEN_ACCESSOR(CallRuntime) + + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { + return save_doubles() == kDontSaveFPRegs; + } + + const Runtime::Function* function() const { return hydrogen()->function(); } + int arity() const { return hydrogen()->argument_count(); } + SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } +}; + + +class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LInteger32ToDouble(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") +}; + + +class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LUint32ToDouble(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") +}; + + +class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> { + public: + LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") +}; + + +class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> { + public: + LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp; + temps_[1] = temp2; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") + DECLARE_HYDROGEN_ACCESSOR(Change) +}; + + +class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LDoubleToSmi(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") + DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) + + bool truncating() { return hydrogen()->CanTruncateToInt32(); } +}; + + +// Sometimes truncating conversion from a tagged value to an int32. +class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LDoubleToI(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") + DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) + + bool truncating() { return hydrogen()->CanTruncateToInt32(); } +}; + + +// Truncating conversion from a tagged value to an int32. +class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> { + public: + LTaggedToI(LOperand* value, + LOperand* temp, + LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp; + temps_[1] = temp2; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") + DECLARE_HYDROGEN_ACCESSOR(Change) + + bool truncating() { return hydrogen()->CanTruncateToInt32(); } +}; + + +class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LSmiTag(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") + DECLARE_HYDROGEN_ACCESSOR(Change) +}; + + +class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LNumberUntagD(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") + DECLARE_HYDROGEN_ACCESSOR(Change) +}; + + +class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + LSmiUntag(LOperand* value, bool needs_check) + : needs_check_(needs_check) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + bool needs_check() const { return needs_check_; } + + DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") + + private: + bool needs_check_; +}; + + +class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> { + public: + LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) { + inputs_[0] = object; + inputs_[1] = value; + temps_[0] = temp; + } + + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + Representation representation() const { + return hydrogen()->field_representation(); + } +}; + + +class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> { + public: + LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) { + inputs_[0] = context; + inputs_[1] = object; + inputs_[2] = value; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + Handle<Object> name() const { return hydrogen()->name(); } + StrictMode strict_mode() { return hydrogen()->strict_mode(); } +}; + + +class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> { + public: + LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) { + inputs_[0] = object; + inputs_[1] = key; + inputs_[2] = value; + } + + bool is_external() const { return hydrogen()->is_external(); } + bool is_fixed_typed_array() const { + return hydrogen()->is_fixed_typed_array(); + } + bool is_typed_elements() const { + return is_external() || is_fixed_typed_array(); + } + LOperand* elements() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); + } + + DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } + uint32_t base_offset() const { return hydrogen()->base_offset(); } +}; + + +class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> { + public: + LStoreKeyedGeneric(LOperand* context, + LOperand* obj, + LOperand* key, + LOperand* value) { + inputs_[0] = context; + inputs_[1] = obj; + inputs_[2] = key; + inputs_[3] = value; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* key() { return inputs_[2]; } + LOperand* value() { return inputs_[3]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + StrictMode strict_mode() { return hydrogen()->strict_mode(); } +}; + + +class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> { + public: + LTransitionElementsKind(LOperand* object, + LOperand* context, + LOperand* new_map_temp) { + inputs_[0] = object; + inputs_[1] = context; + temps_[0] = new_map_temp; + } + + LOperand* context() { return inputs_[1]; } + LOperand* object() { return inputs_[0]; } + LOperand* new_map_temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, + "transition-elements-kind") + DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + Handle<Map> original_map() { return hydrogen()->original_map().handle(); } + Handle<Map> transitioned_map() { + return hydrogen()->transitioned_map().handle(); + } + ElementsKind from_kind() { return hydrogen()->from_kind(); } + ElementsKind to_kind() { return hydrogen()->to_kind(); } +}; + + +class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> { + public: + LTrapAllocationMemento(LOperand* object, + LOperand* temp) { + inputs_[0] = object; + temps_[0] = temp; + } + + LOperand* object() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, + "trap-allocation-memento") +}; + + +class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LStringAdd(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; + } + + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") + DECLARE_HYDROGEN_ACCESSOR(StringAdd) +}; + + + +class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { + inputs_[0] = context; + inputs_[1] = string; + inputs_[2] = index; + } + + LOperand* context() { return inputs_[0]; } + LOperand* string() { return inputs_[1]; } + LOperand* index() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") + DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) +}; + + +class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + explicit LStringCharFromCode(LOperand* context, LOperand* char_code) { + inputs_[0] = context; + inputs_[1] = char_code; + } + + LOperand* context() { return inputs_[0]; } + LOperand* char_code() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") + DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) +}; + + +class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LCheckValue(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") + DECLARE_HYDROGEN_ACCESSOR(CheckValue) +}; + + +class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LCheckInstanceType(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") + DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) +}; + + +class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LCheckMaps(LOperand* value = NULL) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") + DECLARE_HYDROGEN_ACCESSOR(CheckMaps) +}; + + +class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LCheckSmi(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") +}; + + +class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LCheckNonSmi(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") + DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) +}; + + +class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LClampDToUint8(LOperand* unclamped, LOperand* temp) { + inputs_[0] = unclamped; + temps_[0] = temp; + } + + LOperand* unclamped() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") +}; + + +class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LClampIToUint8(LOperand* unclamped) { + inputs_[0] = unclamped; + } + + LOperand* unclamped() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") +}; + + +class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LClampTToUint8(LOperand* unclamped, LOperand* temp) { + inputs_[0] = unclamped; + temps_[0] = temp; + } + + LOperand* unclamped() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") +}; + + +class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LDoubleBits(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits") + DECLARE_HYDROGEN_ACCESSOR(DoubleBits) +}; + + +class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LConstructDouble(LOperand* hi, LOperand* lo) { + inputs_[0] = hi; + inputs_[1] = lo; + } + + LOperand* hi() { return inputs_[0]; } + LOperand* lo() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double") +}; + + +class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> { + public: + LAllocate(LOperand* context, + LOperand* size, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = context; + inputs_[1] = size; + temps_[0] = temp1; + temps_[1] = temp2; + } + + LOperand* context() { return inputs_[0]; } + LOperand* size() { return inputs_[1]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") + DECLARE_HYDROGEN_ACCESSOR(Allocate) +}; + + +class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LRegExpLiteral(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal") + DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral) +}; + + +class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LFunctionLiteral(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") + DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) +}; + + +class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LToFastProperties(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties") + DECLARE_HYDROGEN_ACCESSOR(ToFastProperties) +}; + + +class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LTypeof(LOperand* context, LOperand* value) { + inputs_[0] = context; + inputs_[1] = value; + } + + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") +}; + + +class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> { + public: + explicit LTypeofIsAndBranch(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") + DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) + + Handle<String> type_literal() { return hydrogen()->type_literal(); } + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> { + public: + explicit LIsConstructCallAndBranch(LOperand* temp) { + temps_[0] = temp; + } + + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch, + "is-construct-call-and-branch") +}; + + +class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + LOsrEntry() {} + + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE { + return false; + } + DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") +}; + + +class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LStackCheck(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") + DECLARE_HYDROGEN_ACCESSOR(StackCheck) + + Label* done_label() { return &done_label_; } + + private: + Label done_label_; +}; + + +class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LForInPrepareMap(LOperand* context, LOperand* object) { + inputs_[0] = context; + inputs_[1] = object; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") +}; + + +class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LForInCacheArray(LOperand* map) { + inputs_[0] = map; + } + + LOperand* map() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") + + int idx() { + return HForInCacheArray::cast(this->hydrogen_value())->idx(); + } +}; + + +class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> { + public: + LCheckMapValue(LOperand* value, LOperand* map) { + inputs_[0] = value; + inputs_[1] = map; + } + + LOperand* value() { return inputs_[0]; } + LOperand* map() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") +}; + + +class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LLoadFieldByIndex(LOperand* object, LOperand* index) { + inputs_[0] = object; + inputs_[1] = index; + } + + LOperand* object() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") +}; + + +class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> { + public: + explicit LStoreFrameContext(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context") +}; + + +class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> { + public: + LAllocateBlockContext(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); } + + DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context") + DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext) +}; + + +class LChunkBuilder; +class LPlatformChunk V8_FINAL : public LChunk { + public: + LPlatformChunk(CompilationInfo* info, HGraph* graph) + : LChunk(info, graph) { } + + int GetNextSpillIndex(RegisterKind kind); + LOperand* GetNextSpillSlot(RegisterKind kind); +}; + + +class LChunkBuilder V8_FINAL : public LChunkBuilderBase { + public: + LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) + : LChunkBuilderBase(graph->zone()), + chunk_(NULL), + info_(info), + graph_(graph), + status_(UNUSED), + current_instruction_(NULL), + current_block_(NULL), + next_block_(NULL), + allocator_(allocator) { } + + Isolate* isolate() const { return graph_->isolate(); } + + // Build the sequence for the graph. + LPlatformChunk* Build(); + + // Declare methods that deal with the individual node types. +#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); + HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) +#undef DECLARE_DO + + LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend); + + static bool HasMagicNumberForDivisor(int32_t divisor); + + LInstruction* DoMathFloor(HUnaryMathOperation* instr); + LInstruction* DoMathRound(HUnaryMathOperation* instr); + LInstruction* DoMathFround(HUnaryMathOperation* instr); + LInstruction* DoMathAbs(HUnaryMathOperation* instr); + LInstruction* DoMathLog(HUnaryMathOperation* instr); + LInstruction* DoMathExp(HUnaryMathOperation* instr); + LInstruction* DoMathSqrt(HUnaryMathOperation* instr); + LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); + LInstruction* DoMathClz32(HUnaryMathOperation* instr); + LInstruction* DoDivByPowerOf2I(HDiv* instr); + LInstruction* DoDivByConstI(HDiv* instr); + LInstruction* DoDivI(HDiv* instr); + LInstruction* DoModByPowerOf2I(HMod* instr); + LInstruction* DoModByConstI(HMod* instr); + LInstruction* DoModI(HMod* instr); + LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); + LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); + LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); + + private: + enum Status { + UNUSED, + BUILDING, + DONE, + ABORTED + }; + + LPlatformChunk* chunk() const { return chunk_; } + CompilationInfo* info() const { return info_; } + HGraph* graph() const { return graph_; } + + bool is_unused() const { return status_ == UNUSED; } + bool is_building() const { return status_ == BUILDING; } + bool is_done() const { return status_ == DONE; } + bool is_aborted() const { return status_ == ABORTED; } + + void Abort(BailoutReason reason); + + // Methods for getting operands for Use / Define / Temp. + LUnallocated* ToUnallocated(Register reg); + LUnallocated* ToUnallocated(DoubleRegister reg); + + // Methods for setting up define-use relationships. + MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); + MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); + MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value, + DoubleRegister fixed_register); + + // A value that is guaranteed to be allocated to a register. + // Operand created by UseRegister is guaranteed to be live until the end of + // instruction. This means that register allocator will not reuse it's + // register for any other operand inside instruction. + // Operand created by UseRegisterAtStart is guaranteed to be live only at + // instruction start. Register allocator is free to assign the same register + // to some other operand used inside instruction (i.e. temporary or + // output). + MUST_USE_RESULT LOperand* UseRegister(HValue* value); + MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); + + // An input operand in a register that may be trashed. + MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); + + // An input operand in a register or stack slot. + MUST_USE_RESULT LOperand* Use(HValue* value); + MUST_USE_RESULT LOperand* UseAtStart(HValue* value); + + // An input operand in a register, stack slot or a constant operand. + MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); + MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); + + // An input operand in a register or a constant operand. + MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); + MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); + + // An input operand in a constant operand. + MUST_USE_RESULT LOperand* UseConstant(HValue* value); + + // An input operand in register, stack slot or a constant operand. + // Will not be moved to a register even if one is freely available. + virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE; + + // Temporary operand that must be in a register. + MUST_USE_RESULT LUnallocated* TempRegister(); + MUST_USE_RESULT LUnallocated* TempDoubleRegister(); + MUST_USE_RESULT LOperand* FixedTemp(Register reg); + MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); + + // Methods for setting up define-use relationships. + // Return the same instruction that they are passed. + LInstruction* Define(LTemplateResultInstruction<1>* instr, + LUnallocated* result); + LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); + LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, + int index); + LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); + LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, + Register reg); + LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr, + DoubleRegister reg); + LInstruction* AssignEnvironment(LInstruction* instr); + LInstruction* AssignPointerMap(LInstruction* instr); + + enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; + + // By default we assume that instruction sequences generated for calls + // cannot deoptimize eagerly and we do not attach environment to this + // instruction. + LInstruction* MarkAsCall( + LInstruction* instr, + HInstruction* hinstr, + CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); + + void VisitInstruction(HInstruction* current); + void AddInstruction(LInstruction* instr, HInstruction* current); + + void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); + LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr); + LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); + LInstruction* DoArithmeticD(Token::Value op, + HArithmeticBinaryOperation* instr); + LInstruction* DoArithmeticT(Token::Value op, + HBinaryOperation* instr); + + LPlatformChunk* chunk_; + CompilationInfo* info_; + HGraph* const graph_; + Status status_; + HInstruction* current_instruction_; + HBasicBlock* current_block_; + HBasicBlock* next_block_; + LAllocator* allocator_; + + DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); +}; + +#undef DECLARE_HYDROGEN_ACCESSOR +#undef DECLARE_CONCRETE_INSTRUCTION + +} } // namespace v8::internal + +#endif // V8_MIPS_LITHIUM_MIPS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/macro-assembler-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/macro-assembler-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/macro-assembler-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/macro-assembler-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,6111 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <limits.h> // For LONG_MIN, LONG_MAX. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/isolate-inl.h" +#include "src/runtime.h" + +namespace v8 { +namespace internal { + +MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) + : Assembler(arg_isolate, buffer, size), + generating_stub_(false), + has_frame_(false) { + if (isolate() != NULL) { + code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), + isolate()); + } +} + + +void MacroAssembler::Load(Register dst, + const MemOperand& src, + Representation r) { + DCHECK(!r.IsDouble()); + if (r.IsInteger8()) { + lb(dst, src); + } else if (r.IsUInteger8()) { + lbu(dst, src); + } else if (r.IsInteger16()) { + lh(dst, src); + } else if (r.IsUInteger16()) { + lhu(dst, src); + } else if (r.IsInteger32()) { + lw(dst, src); + } else { + ld(dst, src); + } +} + + +void MacroAssembler::Store(Register src, + const MemOperand& dst, + Representation r) { + DCHECK(!r.IsDouble()); + if (r.IsInteger8() || r.IsUInteger8()) { + sb(src, dst); + } else if (r.IsInteger16() || r.IsUInteger16()) { + sh(src, dst); + } else if (r.IsInteger32()) { + sw(src, dst); + } else { + if (r.IsHeapObject()) { + AssertNotSmi(src); + } else if (r.IsSmi()) { + AssertSmi(src); + } + sd(src, dst); + } +} + + +void MacroAssembler::LoadRoot(Register destination, + Heap::RootListIndex index) { + ld(destination, MemOperand(s6, index << kPointerSizeLog2)); +} + + +void MacroAssembler::LoadRoot(Register destination, + Heap::RootListIndex index, + Condition cond, + Register src1, const Operand& src2) { + Branch(2, NegateCondition(cond), src1, src2); + ld(destination, MemOperand(s6, index << kPointerSizeLog2)); +} + + +void MacroAssembler::StoreRoot(Register source, + Heap::RootListIndex index) { + sd(source, MemOperand(s6, index << kPointerSizeLog2)); +} + + +void MacroAssembler::StoreRoot(Register source, + Heap::RootListIndex index, + Condition cond, + Register src1, const Operand& src2) { + Branch(2, NegateCondition(cond), src1, src2); + sd(source, MemOperand(s6, index << kPointerSizeLog2)); +} + + +// Push and pop all registers that can hold pointers. +void MacroAssembler::PushSafepointRegisters() { + // Safepoints expect a block of kNumSafepointRegisters values on the + // stack, so adjust the stack for unsaved registers. + const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; + DCHECK(num_unsaved >= 0); + if (num_unsaved > 0) { + Dsubu(sp, sp, Operand(num_unsaved * kPointerSize)); + } + MultiPush(kSafepointSavedRegisters); +} + + +void MacroAssembler::PopSafepointRegisters() { + const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; + MultiPop(kSafepointSavedRegisters); + if (num_unsaved > 0) { + Daddu(sp, sp, Operand(num_unsaved * kPointerSize)); + } +} + + +void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { + sd(src, SafepointRegisterSlot(dst)); +} + + +void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { + ld(dst, SafepointRegisterSlot(src)); +} + + +int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { + // The registers are pushed starting with the highest encoding, + // which means that lowest encodings are closest to the stack pointer. + return kSafepointRegisterStackIndexMap[reg_code]; +} + + +MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { + return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); +} + + +MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { + UNIMPLEMENTED_MIPS(); + // General purpose registers are pushed last on the stack. + int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize; + int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; + return MemOperand(sp, doubles_size + register_offset); +} + + +void MacroAssembler::InNewSpace(Register object, + Register scratch, + Condition cc, + Label* branch) { + DCHECK(cc == eq || cc == ne); + And(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); + Branch(branch, cc, scratch, + Operand(ExternalReference::new_space_start(isolate()))); +} + + +void MacroAssembler::RecordWriteField( + Register object, + int offset, + Register value, + Register dst, + RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + DCHECK(!AreAliased(value, dst, t8, object)); + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis. + Label done; + + // Skip barrier if writing a smi. + if (smi_check == INLINE_SMI_CHECK) { + JumpIfSmi(value, &done); + } + + // Although the object register is tagged, the offset is relative to the start + // of the object, so so offset must be a multiple of kPointerSize. + DCHECK(IsAligned(offset, kPointerSize)); + + Daddu(dst, object, Operand(offset - kHeapObjectTag)); + if (emit_debug_code()) { + Label ok; + And(t8, dst, Operand((1 << kPointerSizeLog2) - 1)); + Branch(&ok, eq, t8, Operand(zero_reg)); + stop("Unaligned cell in write barrier"); + bind(&ok); + } + + RecordWrite(object, + dst, + value, + ra_status, + save_fp, + remembered_set_action, + OMIT_SMI_CHECK, + pointers_to_here_check_for_value); + + bind(&done); + + // Clobber clobbered input registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + li(value, Operand(BitCast<int64_t>(kZapValue + 4))); + li(dst, Operand(BitCast<int64_t>(kZapValue + 8))); + } +} + + +// Will clobber 4 registers: object, map, dst, ip. The +// register 'object' contains a heap object pointer. +void MacroAssembler::RecordWriteForMap(Register object, + Register map, + Register dst, + RAStatus ra_status, + SaveFPRegsMode fp_mode) { + if (emit_debug_code()) { + DCHECK(!dst.is(at)); + ld(dst, FieldMemOperand(map, HeapObject::kMapOffset)); + Check(eq, + kWrongAddressOrValuePassedToRecordWrite, + dst, + Operand(isolate()->factory()->meta_map())); + } + + if (!FLAG_incremental_marking) { + return; + } + + if (emit_debug_code()) { + ld(at, FieldMemOperand(object, HeapObject::kMapOffset)); + Check(eq, + kWrongAddressOrValuePassedToRecordWrite, + map, + Operand(at)); + } + + Label done; + + // A single check of the map's pages interesting flag suffices, since it is + // only set during incremental collection, and then it's also guaranteed that + // the from object's page's interesting flag is also set. This optimization + // relies on the fact that maps can never be in new space. + CheckPageFlag(map, + map, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + + Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); + if (emit_debug_code()) { + Label ok; + And(at, dst, Operand((1 << kPointerSizeLog2) - 1)); + Branch(&ok, eq, at, Operand(zero_reg)); + stop("Unaligned cell in write barrier"); + bind(&ok); + } + + // Record the actual write. + if (ra_status == kRAHasNotBeenSaved) { + push(ra); + } + RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, + fp_mode); + CallStub(&stub); + if (ra_status == kRAHasNotBeenSaved) { + pop(ra); + } + + bind(&done); + + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst); + + // Clobber clobbered registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + li(dst, Operand(BitCast<int64_t>(kZapValue + 12))); + li(map, Operand(BitCast<int64_t>(kZapValue + 16))); + } +} + + +// Will clobber 4 registers: object, address, scratch, ip. The +// register 'object' contains a heap object pointer. The heap object +// tag is shifted away. +void MacroAssembler::RecordWrite( + Register object, + Register address, + Register value, + RAStatus ra_status, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + DCHECK(!AreAliased(object, address, value, t8)); + DCHECK(!AreAliased(object, address, value, t9)); + + if (emit_debug_code()) { + ld(at, MemOperand(address)); + Assert( + eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value)); + } + + if (remembered_set_action == OMIT_REMEMBERED_SET && + !FLAG_incremental_marking) { + return; + } + + // First, check if a write barrier is even needed. The tests below + // catch stores of smis and stores into the young generation. + Label done; + + if (smi_check == INLINE_SMI_CHECK) { + DCHECK_EQ(0, kSmiTag); + JumpIfSmi(value, &done); + } + + if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + } + CheckPageFlag(object, + value, // Used as scratch. + MemoryChunk::kPointersFromHereAreInterestingMask, + eq, + &done); + + // Record the actual write. + if (ra_status == kRAHasNotBeenSaved) { + push(ra); + } + RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, + fp_mode); + CallStub(&stub); + if (ra_status == kRAHasNotBeenSaved) { + pop(ra); + } + + bind(&done); + + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, + value); + + // Clobber clobbered registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + li(address, Operand(BitCast<int64_t>(kZapValue + 12))); + li(value, Operand(BitCast<int64_t>(kZapValue + 16))); + } +} + + +void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. + Register address, + Register scratch, + SaveFPRegsMode fp_mode, + RememberedSetFinalAction and_then) { + Label done; + if (emit_debug_code()) { + Label ok; + JumpIfNotInNewSpace(object, scratch, &ok); + stop("Remembered set pointer is in new space"); + bind(&ok); + } + // Load store buffer top. + ExternalReference store_buffer = + ExternalReference::store_buffer_top(isolate()); + li(t8, Operand(store_buffer)); + ld(scratch, MemOperand(t8)); + // Store pointer to buffer and increment buffer top. + sd(address, MemOperand(scratch)); + Daddu(scratch, scratch, kPointerSize); + // Write back new top of buffer. + sd(scratch, MemOperand(t8)); + // Call stub on end of buffer. + // Check for end of buffer. + And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); + DCHECK(!scratch.is(t8)); + if (and_then == kFallThroughAtEnd) { + Branch(&done, eq, t8, Operand(zero_reg)); + } else { + DCHECK(and_then == kReturnAtEnd); + Ret(eq, t8, Operand(zero_reg)); + } + push(ra); + StoreBufferOverflowStub store_buffer_overflow = + StoreBufferOverflowStub(isolate(), fp_mode); + CallStub(&store_buffer_overflow); + pop(ra); + bind(&done); + if (and_then == kReturnAtEnd) { + Ret(); + } +} + + +// ----------------------------------------------------------------------------- +// Allocation support. + + +void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, + Register scratch, + Label* miss) { + Label same_contexts; + + DCHECK(!holder_reg.is(scratch)); + DCHECK(!holder_reg.is(at)); + DCHECK(!scratch.is(at)); + + // Load current lexical context from the stack frame. + ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); + // In debug mode, make sure the lexical context is set. +#ifdef DEBUG + Check(ne, kWeShouldNotHaveAnEmptyLexicalContext, + scratch, Operand(zero_reg)); +#endif + + // Load the native context of the current context. + int offset = + Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; + ld(scratch, FieldMemOperand(scratch, offset)); + ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); + + // Check the context is a native context. + if (emit_debug_code()) { + push(holder_reg); // Temporarily save holder on the stack. + // Read the first word and compare to the native_context_map. + ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); + LoadRoot(at, Heap::kNativeContextMapRootIndex); + Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext, + holder_reg, Operand(at)); + pop(holder_reg); // Restore holder. + } + + // Check if both contexts are the same. + ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); + Branch(&same_contexts, eq, scratch, Operand(at)); + + // Check the context is a native context. + if (emit_debug_code()) { + push(holder_reg); // Temporarily save holder on the stack. + mov(holder_reg, at); // Move at to its holding place. + LoadRoot(at, Heap::kNullValueRootIndex); + Check(ne, kJSGlobalProxyContextShouldNotBeNull, + holder_reg, Operand(at)); + + ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); + LoadRoot(at, Heap::kNativeContextMapRootIndex); + Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext, + holder_reg, Operand(at)); + // Restore at is not needed. at is reloaded below. + pop(holder_reg); // Restore holder. + // Restore at to holder's context. + ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); + } + + // Check that the security token in the calling global object is + // compatible with the security token in the receiving global + // object. + int token_offset = Context::kHeaderSize + + Context::SECURITY_TOKEN_INDEX * kPointerSize; + + ld(scratch, FieldMemOperand(scratch, token_offset)); + ld(at, FieldMemOperand(at, token_offset)); + Branch(miss, ne, scratch, Operand(at)); + + bind(&same_contexts); +} + + +// Compute the hash code from the untagged key. This must be kept in sync with +// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in +// code-stub-hydrogen.cc +void MacroAssembler::GetNumberHash(Register reg0, Register scratch) { + // First of all we assign the hash seed to scratch. + LoadRoot(scratch, Heap::kHashSeedRootIndex); + SmiUntag(scratch); + + // Xor original key with a seed. + xor_(reg0, reg0, scratch); + + // Compute the hash code from the untagged key. This must be kept in sync + // with ComputeIntegerHash in utils.h. + // + // hash = ~hash + (hash << 15); + // The algorithm uses 32-bit integer values. + nor(scratch, reg0, zero_reg); + sll(at, reg0, 15); + addu(reg0, scratch, at); + + // hash = hash ^ (hash >> 12); + srl(at, reg0, 12); + xor_(reg0, reg0, at); + + // hash = hash + (hash << 2); + sll(at, reg0, 2); + addu(reg0, reg0, at); + + // hash = hash ^ (hash >> 4); + srl(at, reg0, 4); + xor_(reg0, reg0, at); + + // hash = hash * 2057; + sll(scratch, reg0, 11); + sll(at, reg0, 3); + addu(reg0, reg0, at); + addu(reg0, reg0, scratch); + + // hash = hash ^ (hash >> 16); + srl(at, reg0, 16); + xor_(reg0, reg0, at); +} + + +void MacroAssembler::LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register result, + Register reg0, + Register reg1, + Register reg2) { + // Register use: + // + // elements - holds the slow-case elements of the receiver on entry. + // Unchanged unless 'result' is the same register. + // + // key - holds the smi key on entry. + // Unchanged unless 'result' is the same register. + // + // + // result - holds the result on exit if the load succeeded. + // Allowed to be the same as 'key' or 'result'. + // Unchanged on bailout so 'key' or 'result' can be used + // in further computation. + // + // Scratch registers: + // + // reg0 - holds the untagged key on entry and holds the hash once computed. + // + // reg1 - Used to hold the capacity mask of the dictionary. + // + // reg2 - Used for the index into the dictionary. + // at - Temporary (avoid MacroAssembler instructions also using 'at'). + Label done; + + GetNumberHash(reg0, reg1); + + // Compute the capacity mask. + ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); + SmiUntag(reg1, reg1); + Dsubu(reg1, reg1, Operand(1)); + + // Generate an unrolled loop that performs a few probes before giving up. + for (int i = 0; i < kNumberDictionaryProbes; i++) { + // Use reg2 for index calculations and keep the hash intact in reg0. + mov(reg2, reg0); + // Compute the masked index: (hash + i + i * i) & mask. + if (i > 0) { + Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i))); + } + and_(reg2, reg2, reg1); + + // Scale the index by multiplying by the element size. + DCHECK(SeededNumberDictionary::kEntrySize == 3); + dsll(at, reg2, 1); // 2x. + daddu(reg2, reg2, at); // reg2 = reg2 * 3. + + // Check if the key is identical to the name. + dsll(at, reg2, kPointerSizeLog2); + daddu(reg2, elements, at); + + ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset)); + if (i != kNumberDictionaryProbes - 1) { + Branch(&done, eq, key, Operand(at)); + } else { + Branch(miss, ne, key, Operand(at)); + } + } + + bind(&done); + // Check that the value is a normal property. + // reg2: elements + (index * kPointerSize). + const int kDetailsOffset = + SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; + ld(reg1, FieldMemOperand(reg2, kDetailsOffset)); + And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); + Branch(miss, ne, at, Operand(zero_reg)); + + // Get the value at the masked, scaled index and return. + const int kValueOffset = + SeededNumberDictionary::kElementsStartOffset + kPointerSize; + ld(result, FieldMemOperand(reg2, kValueOffset)); +} + + +// --------------------------------------------------------------------------- +// Instruction macros. + +void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + addu(rd, rs, rt.rm()); + } else { + if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { + addiu(rd, rs, rt.imm64_); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + addu(rd, rs, at); + } + } +} + + +void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + daddu(rd, rs, rt.rm()); + } else { + if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { + daddiu(rd, rs, rt.imm64_); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + daddu(rd, rs, at); + } + } +} + + +void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + subu(rd, rs, rt.rm()); + } else { + if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { + addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm). + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + subu(rd, rs, at); + } + } +} + + +void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + dsubu(rd, rs, rt.rm()); + } else { + if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { + daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm). + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + dsubu(rd, rs, at); + } + } +} + + +void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + mul(rd, rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + mul(rd, rs, at); + } +} + + +void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (kArchVariant != kMips64r6) { + mult(rs, rt.rm()); + mfhi(rd); + } else { + muh(rd, rs, rt.rm()); + } + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + if (kArchVariant != kMips64r6) { + mult(rs, at); + mfhi(rd); + } else { + muh(rd, rs, at); + } + } +} + + +void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (kArchVariant == kMips64r6) { + dmul(rd, rs, rt.rm()); + } else { + dmult(rs, rt.rm()); + mflo(rd); + } + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + if (kArchVariant == kMips64r6) { + dmul(rd, rs, at); + } else { + dmult(rs, at); + mflo(rd); + } + } +} + + +void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (kArchVariant == kMips64r6) { + dmuh(rd, rs, rt.rm()); + } else { + dmult(rs, rt.rm()); + mfhi(rd); + } + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + if (kArchVariant == kMips64r6) { + dmuh(rd, rs, at); + } else { + dmult(rs, at); + mfhi(rd); + } + } +} + + +void MacroAssembler::Mult(Register rs, const Operand& rt) { + if (rt.is_reg()) { + mult(rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + mult(rs, at); + } +} + + +void MacroAssembler::Dmult(Register rs, const Operand& rt) { + if (rt.is_reg()) { + dmult(rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + dmult(rs, at); + } +} + + +void MacroAssembler::Multu(Register rs, const Operand& rt) { + if (rt.is_reg()) { + multu(rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + multu(rs, at); + } +} + + +void MacroAssembler::Dmultu(Register rs, const Operand& rt) { + if (rt.is_reg()) { + dmultu(rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + dmultu(rs, at); + } +} + + +void MacroAssembler::Div(Register rs, const Operand& rt) { + if (rt.is_reg()) { + div(rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + div(rs, at); + } +} + + +void MacroAssembler::Ddiv(Register rs, const Operand& rt) { + if (rt.is_reg()) { + ddiv(rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + ddiv(rs, at); + } +} + + +void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { + if (kArchVariant != kMips64r6) { + if (rt.is_reg()) { + ddiv(rs, rt.rm()); + mflo(rd); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + ddiv(rs, at); + mflo(rd); + } + } else { + if (rt.is_reg()) { + ddiv(rd, rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + ddiv(rd, rs, at); + } + } +} + + +void MacroAssembler::Divu(Register rs, const Operand& rt) { + if (rt.is_reg()) { + divu(rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + divu(rs, at); + } +} + + +void MacroAssembler::Ddivu(Register rs, const Operand& rt) { + if (rt.is_reg()) { + ddivu(rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + ddivu(rs, at); + } +} + + +void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) { + if (kArchVariant != kMips64r6) { + if (rt.is_reg()) { + ddiv(rs, rt.rm()); + mfhi(rd); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + ddiv(rs, at); + mfhi(rd); + } + } else { + if (rt.is_reg()) { + dmod(rd, rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + dmod(rd, rs, at); + } + } +} + + +void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + and_(rd, rs, rt.rm()); + } else { + if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { + andi(rd, rs, rt.imm64_); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + and_(rd, rs, at); + } + } +} + + +void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + or_(rd, rs, rt.rm()); + } else { + if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { + ori(rd, rs, rt.imm64_); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + or_(rd, rs, at); + } + } +} + + +void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + xor_(rd, rs, rt.rm()); + } else { + if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { + xori(rd, rs, rt.imm64_); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + xor_(rd, rs, at); + } + } +} + + +void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + nor(rd, rs, rt.rm()); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + nor(rd, rs, at); + } +} + + +void MacroAssembler::Neg(Register rs, const Operand& rt) { + DCHECK(rt.is_reg()); + DCHECK(!at.is(rs)); + DCHECK(!at.is(rt.rm())); + li(at, -1); + xor_(rs, rt.rm(), at); +} + + +void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + slt(rd, rs, rt.rm()); + } else { + if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { + slti(rd, rs, rt.imm64_); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + slt(rd, rs, at); + } + } +} + + +void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sltu(rd, rs, rt.rm()); + } else { + if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { + sltiu(rd, rs, rt.imm64_); + } else { + // li handles the relocation. + DCHECK(!rs.is(at)); + li(at, rt); + sltu(rd, rs, at); + } + } +} + + +void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { + if (kArchVariant == kMips64r2) { + if (rt.is_reg()) { + rotrv(rd, rs, rt.rm()); + } else { + rotr(rd, rs, rt.imm64_); + } + } else { + if (rt.is_reg()) { + subu(at, zero_reg, rt.rm()); + sllv(at, rs, at); + srlv(rd, rs, rt.rm()); + or_(rd, rd, at); + } else { + if (rt.imm64_ == 0) { + srl(rd, rs, 0); + } else { + srl(at, rs, rt.imm64_); + sll(rd, rs, (0x20 - rt.imm64_) & 0x1f); + or_(rd, rd, at); + } + } + } +} + + +void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + drotrv(rd, rs, rt.rm()); + } else { + drotr(rd, rs, rt.imm64_); + } +} + + +void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { + pref(hint, rs); +} + + +// ------------Pseudo-instructions------------- + +void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { + lwr(rd, rs); + lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); +} + + +void MacroAssembler::Usw(Register rd, const MemOperand& rs) { + swr(rd, rs); + swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); +} + + +// Do 64-bit load from unaligned address. Note this only handles +// the specific case of 32-bit aligned, but not 64-bit aligned. +void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) { + // Assert fail if the offset from start of object IS actually aligned. + // ONLY use with known misalignment, since there is performance cost. + DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1)); + // TODO(plind): endian dependency. + lwu(rd, rs); + lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); + dsll32(scratch, scratch, 0); + Daddu(rd, rd, scratch); +} + + +// Do 64-bit store to unaligned address. Note this only handles +// the specific case of 32-bit aligned, but not 64-bit aligned. +void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) { + // Assert fail if the offset from start of object IS actually aligned. + // ONLY use with known misalignment, since there is performance cost. + DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1)); + // TODO(plind): endian dependency. + sw(rd, rs); + dsrl32(scratch, rd, 0); + sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); +} + + +void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { + AllowDeferredHandleDereference smi_check; + if (value->IsSmi()) { + li(dst, Operand(value), mode); + } else { + DCHECK(value->IsHeapObject()); + if (isolate()->heap()->InNewSpace(*value)) { + Handle<Cell> cell = isolate()->factory()->NewCell(value); + li(dst, Operand(cell)); + ld(dst, FieldMemOperand(dst, Cell::kValueOffset)); + } else { + li(dst, Operand(value)); + } + } +} + + +void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { + DCHECK(!j.is_reg()); + BlockTrampolinePoolScope block_trampoline_pool(this); + if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { + // Normal load of an immediate value which does not need Relocation Info. + if (is_int32(j.imm64_)) { + if (is_int16(j.imm64_)) { + daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); + } else if (!(j.imm64_ & kHiMask)) { + ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); + } else if (!(j.imm64_ & kImm16Mask)) { + lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); + } else { + lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); + ori(rd, rd, (j.imm64_ & kImm16Mask)); + } + } else { + lui(rd, (j.imm64_ >> 48) & kImm16Mask); + ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); + dsll(rd, rd, 16); + ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); + dsll(rd, rd, 16); + ori(rd, rd, j.imm64_ & kImm16Mask); + } + } else if (MustUseReg(j.rmode_)) { + RecordRelocInfo(j.rmode_, j.imm64_); + lui(rd, (j.imm64_ >> 32) & kImm16Mask); + ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); + dsll(rd, rd, 16); + ori(rd, rd, j.imm64_ & kImm16Mask); + } else if (mode == ADDRESS_LOAD) { + // We always need the same number of instructions as we may need to patch + // this code to load another value which may need all 4 instructions. + lui(rd, (j.imm64_ >> 32) & kImm16Mask); + ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); + dsll(rd, rd, 16); + ori(rd, rd, j.imm64_ & kImm16Mask); + } else { + lui(rd, (j.imm64_ >> 48) & kImm16Mask); + ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); + dsll(rd, rd, 16); + ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); + dsll(rd, rd, 16); + ori(rd, rd, j.imm64_ & kImm16Mask); + } +} + + +void MacroAssembler::MultiPush(RegList regs) { + int16_t num_to_push = NumberOfBitsSet(regs); + int16_t stack_offset = num_to_push * kPointerSize; + + Dsubu(sp, sp, Operand(stack_offset)); + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + if ((regs & (1 << i)) != 0) { + stack_offset -= kPointerSize; + sd(ToRegister(i), MemOperand(sp, stack_offset)); + } + } +} + + +void MacroAssembler::MultiPushReversed(RegList regs) { + int16_t num_to_push = NumberOfBitsSet(regs); + int16_t stack_offset = num_to_push * kPointerSize; + + Dsubu(sp, sp, Operand(stack_offset)); + for (int16_t i = 0; i < kNumRegisters; i++) { + if ((regs & (1 << i)) != 0) { + stack_offset -= kPointerSize; + sd(ToRegister(i), MemOperand(sp, stack_offset)); + } + } +} + + +void MacroAssembler::MultiPop(RegList regs) { + int16_t stack_offset = 0; + + for (int16_t i = 0; i < kNumRegisters; i++) { + if ((regs & (1 << i)) != 0) { + ld(ToRegister(i), MemOperand(sp, stack_offset)); + stack_offset += kPointerSize; + } + } + daddiu(sp, sp, stack_offset); +} + + +void MacroAssembler::MultiPopReversed(RegList regs) { + int16_t stack_offset = 0; + + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + if ((regs & (1 << i)) != 0) { + ld(ToRegister(i), MemOperand(sp, stack_offset)); + stack_offset += kPointerSize; + } + } + daddiu(sp, sp, stack_offset); +} + + +void MacroAssembler::MultiPushFPU(RegList regs) { + int16_t num_to_push = NumberOfBitsSet(regs); + int16_t stack_offset = num_to_push * kDoubleSize; + + Dsubu(sp, sp, Operand(stack_offset)); + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + if ((regs & (1 << i)) != 0) { + stack_offset -= kDoubleSize; + sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); + } + } +} + + +void MacroAssembler::MultiPushReversedFPU(RegList regs) { + int16_t num_to_push = NumberOfBitsSet(regs); + int16_t stack_offset = num_to_push * kDoubleSize; + + Dsubu(sp, sp, Operand(stack_offset)); + for (int16_t i = 0; i < kNumRegisters; i++) { + if ((regs & (1 << i)) != 0) { + stack_offset -= kDoubleSize; + sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); + } + } +} + + +void MacroAssembler::MultiPopFPU(RegList regs) { + int16_t stack_offset = 0; + + for (int16_t i = 0; i < kNumRegisters; i++) { + if ((regs & (1 << i)) != 0) { + ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); + stack_offset += kDoubleSize; + } + } + daddiu(sp, sp, stack_offset); +} + + +void MacroAssembler::MultiPopReversedFPU(RegList regs) { + int16_t stack_offset = 0; + + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + if ((regs & (1 << i)) != 0) { + ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); + stack_offset += kDoubleSize; + } + } + daddiu(sp, sp, stack_offset); +} + + +void MacroAssembler::FlushICache(Register address, unsigned instructions) { + RegList saved_regs = kJSCallerSaved | ra.bit(); + MultiPush(saved_regs); + AllowExternalCallThatCantCauseGC scope(this); + + // Save to a0 in case address == a4. + Move(a0, address); + PrepareCallCFunction(2, a4); + + li(a1, instructions * kInstrSize); + CallCFunction(ExternalReference::flush_icache_function(isolate()), 2); + MultiPop(saved_regs); +} + + +void MacroAssembler::Ext(Register rt, + Register rs, + uint16_t pos, + uint16_t size) { + DCHECK(pos < 32); + DCHECK(pos + size < 33); + ext_(rt, rs, pos, size); +} + + +void MacroAssembler::Ins(Register rt, + Register rs, + uint16_t pos, + uint16_t size) { + DCHECK(pos < 32); + DCHECK(pos + size <= 32); + DCHECK(size != 0); + ins_(rt, rs, pos, size); +} + + +void MacroAssembler::Cvt_d_uw(FPURegister fd, + FPURegister fs, + FPURegister scratch) { + // Move the data from fs to t8. + mfc1(t8, fs); + Cvt_d_uw(fd, t8, scratch); +} + + +void MacroAssembler::Cvt_d_uw(FPURegister fd, + Register rs, + FPURegister scratch) { + // Convert rs to a FP value in fd (and fd + 1). + // We do this by converting rs minus the MSB to avoid sign conversion, + // then adding 2^31 to the result (if needed). + + DCHECK(!fd.is(scratch)); + DCHECK(!rs.is(t9)); + DCHECK(!rs.is(at)); + + // Save rs's MSB to t9. + Ext(t9, rs, 31, 1); + // Remove rs's MSB. + Ext(at, rs, 0, 31); + // Move the result to fd. + mtc1(at, fd); + mthc1(zero_reg, fd); + + // Convert fd to a real FP value. + cvt_d_w(fd, fd); + + Label conversion_done; + + // If rs's MSB was 0, it's done. + // Otherwise we need to add that to the FP register. + Branch(&conversion_done, eq, t9, Operand(zero_reg)); + + // Load 2^31 into f20 as its float representation. + li(at, 0x41E00000); + mtc1(zero_reg, scratch); + mthc1(at, scratch); + // Add it to fd. + add_d(fd, fd, scratch); + + bind(&conversion_done); +} + + +void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) { + round_l_d(fd, fs); +} + + +void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) { + floor_l_d(fd, fs); +} + + +void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) { + ceil_l_d(fd, fs); +} + + +void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) { + trunc_l_d(fd, fs); +} + + +void MacroAssembler::Trunc_l_ud(FPURegister fd, + FPURegister fs, + FPURegister scratch) { + // Load to GPR. + dmfc1(t8, fs); + // Reset sign bit. + li(at, 0x7fffffffffffffff); + and_(t8, t8, at); + dmtc1(t8, fs); + trunc_l_d(fd, fs); +} + + +void MacroAssembler::Trunc_uw_d(FPURegister fd, + FPURegister fs, + FPURegister scratch) { + Trunc_uw_d(fs, t8, scratch); + mtc1(t8, fd); +} + + +void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { + trunc_w_d(fd, fs); +} + + +void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) { + round_w_d(fd, fs); +} + + +void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) { + floor_w_d(fd, fs); +} + + +void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { + ceil_w_d(fd, fs); +} + + +void MacroAssembler::Trunc_uw_d(FPURegister fd, + Register rs, + FPURegister scratch) { + DCHECK(!fd.is(scratch)); + DCHECK(!rs.is(at)); + + // Load 2^31 into scratch as its float representation. + li(at, 0x41E00000); + mtc1(zero_reg, scratch); + mthc1(at, scratch); + // Test if scratch > fd. + // If fd < 2^31 we can convert it normally. + Label simple_convert; + BranchF(&simple_convert, NULL, lt, fd, scratch); + + // First we subtract 2^31 from fd, then trunc it to rs + // and add 2^31 to rs. + sub_d(scratch, fd, scratch); + trunc_w_d(scratch, scratch); + mfc1(rs, scratch); + Or(rs, rs, 1 << 31); + + Label done; + Branch(&done); + // Simple conversion. + bind(&simple_convert); + trunc_w_d(scratch, fd); + mfc1(rs, scratch); + + bind(&done); +} + + +void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft, FPURegister scratch) { + if (0) { // TODO(plind): find reasonable arch-variant symbol names. + madd_d(fd, fr, fs, ft); + } else { + // Can not change source regs's value. + DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch)); + mul_d(scratch, fs, ft); + add_d(fd, fr, scratch); + } +} + + +void MacroAssembler::BranchF(Label* target, + Label* nan, + Condition cc, + FPURegister cmp1, + FPURegister cmp2, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (cc == al) { + Branch(bd, target); + return; + } + + DCHECK(nan || target); + // Check for unordered (NaN) cases. + if (nan) { + if (kArchVariant != kMips64r6) { + c(UN, D, cmp1, cmp2); + bc1t(nan); + } else { + // Use f31 for comparison result. It has to be unavailable to lithium + // register allocator. + DCHECK(!cmp1.is(f31) && !cmp2.is(f31)); + cmp(UN, L, f31, cmp1, cmp2); + bc1nez(nan, f31); + } + } + + if (kArchVariant != kMips64r6) { + if (target) { + // Here NaN cases were either handled by this function or are assumed to + // have been handled by the caller. + switch (cc) { + case lt: + c(OLT, D, cmp1, cmp2); + bc1t(target); + break; + case gt: + c(ULE, D, cmp1, cmp2); + bc1f(target); + break; + case ge: + c(ULT, D, cmp1, cmp2); + bc1f(target); + break; + case le: + c(OLE, D, cmp1, cmp2); + bc1t(target); + break; + case eq: + c(EQ, D, cmp1, cmp2); + bc1t(target); + break; + case ueq: + c(UEQ, D, cmp1, cmp2); + bc1t(target); + break; + case ne: + c(EQ, D, cmp1, cmp2); + bc1f(target); + break; + case nue: + c(UEQ, D, cmp1, cmp2); + bc1f(target); + break; + default: + CHECK(0); + } + } + } else { + if (target) { + // Here NaN cases were either handled by this function or are assumed to + // have been handled by the caller. + // Unsigned conditions are treated as their signed counterpart. + // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode. + DCHECK(!cmp1.is(f31) && !cmp2.is(f31)); + switch (cc) { + case lt: + cmp(OLT, L, f31, cmp1, cmp2); + bc1nez(target, f31); + break; + case gt: + cmp(ULE, L, f31, cmp1, cmp2); + bc1eqz(target, f31); + break; + case ge: + cmp(ULT, L, f31, cmp1, cmp2); + bc1eqz(target, f31); + break; + case le: + cmp(OLE, L, f31, cmp1, cmp2); + bc1nez(target, f31); + break; + case eq: + cmp(EQ, L, f31, cmp1, cmp2); + bc1nez(target, f31); + break; + case ueq: + cmp(UEQ, L, f31, cmp1, cmp2); + bc1nez(target, f31); + break; + case ne: + cmp(EQ, L, f31, cmp1, cmp2); + bc1eqz(target, f31); + break; + case nue: + cmp(UEQ, L, f31, cmp1, cmp2); + bc1eqz(target, f31); + break; + default: + CHECK(0); + } + } + } + + if (bd == PROTECT) { + nop(); + } +} + + +void MacroAssembler::Move(FPURegister dst, double imm) { + static const DoubleRepresentation minus_zero(-0.0); + static const DoubleRepresentation zero(0.0); + DoubleRepresentation value_rep(imm); + // Handle special values first. + bool force_load = dst.is(kDoubleRegZero); + if (value_rep == zero && !force_load) { + mov_d(dst, kDoubleRegZero); + } else if (value_rep == minus_zero && !force_load) { + neg_d(dst, kDoubleRegZero); + } else { + uint32_t lo, hi; + DoubleAsTwoUInt32(imm, &lo, &hi); + // Move the low part of the double into the lower bits of the corresponding + // FPU register. + if (lo != 0) { + li(at, Operand(lo)); + mtc1(at, dst); + } else { + mtc1(zero_reg, dst); + } + // Move the high part of the double into the high bits of the corresponding + // FPU register. + if (hi != 0) { + li(at, Operand(hi)); + mthc1(at, dst); + } else { + mthc1(zero_reg, dst); + } + } +} + + +void MacroAssembler::Movz(Register rd, Register rs, Register rt) { + if (kArchVariant == kMips64r6) { + Label done; + Branch(&done, ne, rt, Operand(zero_reg)); + mov(rd, rs); + bind(&done); + } else { + movz(rd, rs, rt); + } +} + + +void MacroAssembler::Movn(Register rd, Register rs, Register rt) { + if (kArchVariant == kMips64r6) { + Label done; + Branch(&done, eq, rt, Operand(zero_reg)); + mov(rd, rs); + bind(&done); + } else { + movn(rd, rs, rt); + } +} + + +void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { + movt(rd, rs, cc); +} + + +void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { + movf(rd, rs, cc); +} + + +void MacroAssembler::Clz(Register rd, Register rs) { + clz(rd, rs); +} + + +void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, + Register result, + DoubleRegister double_input, + Register scratch, + DoubleRegister double_scratch, + Register except_flag, + CheckForInexactConversion check_inexact) { + DCHECK(!result.is(scratch)); + DCHECK(!double_input.is(double_scratch)); + DCHECK(!except_flag.is(scratch)); + + Label done; + + // Clear the except flag (0 = no exception) + mov(except_flag, zero_reg); + + // Test for values that can be exactly represented as a signed 32-bit integer. + cvt_w_d(double_scratch, double_input); + mfc1(result, double_scratch); + cvt_d_w(double_scratch, double_scratch); + BranchF(&done, NULL, eq, double_input, double_scratch); + + int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions. + + if (check_inexact == kDontCheckForInexactConversion) { + // Ignore inexact exceptions. + except_mask &= ~kFCSRInexactFlagMask; + } + + // Save FCSR. + cfc1(scratch, FCSR); + // Disable FPU exceptions. + ctc1(zero_reg, FCSR); + + // Do operation based on rounding mode. + switch (rounding_mode) { + case kRoundToNearest: + Round_w_d(double_scratch, double_input); + break; + case kRoundToZero: + Trunc_w_d(double_scratch, double_input); + break; + case kRoundToPlusInf: + Ceil_w_d(double_scratch, double_input); + break; + case kRoundToMinusInf: + Floor_w_d(double_scratch, double_input); + break; + } // End of switch-statement. + + // Retrieve FCSR. + cfc1(except_flag, FCSR); + // Restore FCSR. + ctc1(scratch, FCSR); + // Move the converted value into the result register. + mfc1(result, double_scratch); + + // Check for fpu exceptions. + And(except_flag, except_flag, Operand(except_mask)); + + bind(&done); +} + + +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, + DoubleRegister double_input, + Label* done) { + DoubleRegister single_scratch = kLithiumScratchDouble.low(); + Register scratch = at; + Register scratch2 = t9; + + // Clear cumulative exception flags and save the FCSR. + cfc1(scratch2, FCSR); + ctc1(zero_reg, FCSR); + // Try a conversion to a signed integer. + trunc_w_d(single_scratch, double_input); + mfc1(result, single_scratch); + // Retrieve and restore the FCSR. + cfc1(scratch, FCSR); + ctc1(scratch2, FCSR); + // Check for overflow and NaNs. + And(scratch, + scratch, + kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask); + // If we had no exceptions we are done. + Branch(done, eq, scratch, Operand(zero_reg)); +} + + +void MacroAssembler::TruncateDoubleToI(Register result, + DoubleRegister double_input) { + Label done; + + TryInlineTruncateDoubleToI(result, double_input, &done); + + // If we fell through then inline version didn't succeed - call stub instead. + push(ra); + Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack. + sdc1(double_input, MemOperand(sp, 0)); + + DoubleToIStub stub(isolate(), sp, result, 0, true, true); + CallStub(&stub); + + Daddu(sp, sp, Operand(kDoubleSize)); + pop(ra); + + bind(&done); +} + + +void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { + Label done; + DoubleRegister double_scratch = f12; + DCHECK(!result.is(object)); + + ldc1(double_scratch, + MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); + TryInlineTruncateDoubleToI(result, double_scratch, &done); + + // If we fell through then inline version didn't succeed - call stub instead. + push(ra); + DoubleToIStub stub(isolate(), + object, + result, + HeapNumber::kValueOffset - kHeapObjectTag, + true, + true); + CallStub(&stub); + pop(ra); + + bind(&done); +} + + +void MacroAssembler::TruncateNumberToI(Register object, + Register result, + Register heap_number_map, + Register scratch, + Label* not_number) { + Label done; + DCHECK(!result.is(object)); + + UntagAndJumpIfSmi(result, object, &done); + JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); + TruncateHeapNumberToI(result, object); + + bind(&done); +} + + +void MacroAssembler::GetLeastBitsFromSmi(Register dst, + Register src, + int num_least_bits) { + // Ext(dst, src, kSmiTagSize, num_least_bits); + SmiUntag(dst, src); + And(dst, dst, Operand((1 << num_least_bits) - 1)); +} + + +void MacroAssembler::GetLeastBitsFromInt32(Register dst, + Register src, + int num_least_bits) { + DCHECK(!src.is(dst)); + And(dst, src, Operand((1 << num_least_bits) - 1)); +} + + +// Emulated condtional branches do not emit a nop in the branch delay slot. +// +// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. +#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \ + (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ + (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) + + +void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { + BranchShort(offset, bdslot); +} + + +void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot) { + BranchShort(offset, cond, rs, rt, bdslot); +} + + +void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { + if (L->is_bound()) { + if (is_near(L)) { + BranchShort(L, bdslot); + } else { + Jr(L, bdslot); + } + } else { + if (is_trampoline_emitted()) { + Jr(L, bdslot); + } else { + BranchShort(L, bdslot); + } + } +} + + +void MacroAssembler::Branch(Label* L, Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot) { + if (L->is_bound()) { + if (is_near(L)) { + BranchShort(L, cond, rs, rt, bdslot); + } else { + if (cond != cc_always) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + Jr(L, bdslot); + bind(&skip); + } else { + Jr(L, bdslot); + } + } + } else { + if (is_trampoline_emitted()) { + if (cond != cc_always) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + Jr(L, bdslot); + bind(&skip); + } else { + Jr(L, bdslot); + } + } else { + BranchShort(L, cond, rs, rt, bdslot); + } + } +} + + +void MacroAssembler::Branch(Label* L, + Condition cond, + Register rs, + Heap::RootListIndex index, + BranchDelaySlot bdslot) { + LoadRoot(at, index); + Branch(L, cond, rs, Operand(at), bdslot); +} + + +void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) { + b(offset); + + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot) { + BRANCH_ARGS_CHECK(cond, rs, rt); + DCHECK(!rs.is(zero_reg)); + Register r2 = no_reg; + Register scratch = at; + + if (rt.is_reg()) { + // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or + // rt. + BlockTrampolinePoolScope block_trampoline_pool(this); + r2 = rt.rm_; + switch (cond) { + case cc_always: + b(offset); + break; + case eq: + beq(rs, r2, offset); + break; + case ne: + bne(rs, r2, offset); + break; + // Signed comparison. + case greater: + if (r2.is(zero_reg)) { + bgtz(rs, offset); + } else { + slt(scratch, r2, rs); + bne(scratch, zero_reg, offset); + } + break; + case greater_equal: + if (r2.is(zero_reg)) { + bgez(rs, offset); + } else { + slt(scratch, rs, r2); + beq(scratch, zero_reg, offset); + } + break; + case less: + if (r2.is(zero_reg)) { + bltz(rs, offset); + } else { + slt(scratch, rs, r2); + bne(scratch, zero_reg, offset); + } + break; + case less_equal: + if (r2.is(zero_reg)) { + blez(rs, offset); + } else { + slt(scratch, r2, rs); + beq(scratch, zero_reg, offset); + } + break; + // Unsigned comparison. + case Ugreater: + if (r2.is(zero_reg)) { + bgtz(rs, offset); + } else { + sltu(scratch, r2, rs); + bne(scratch, zero_reg, offset); + } + break; + case Ugreater_equal: + if (r2.is(zero_reg)) { + bgez(rs, offset); + } else { + sltu(scratch, rs, r2); + beq(scratch, zero_reg, offset); + } + break; + case Uless: + if (r2.is(zero_reg)) { + // No code needs to be emitted. + return; + } else { + sltu(scratch, rs, r2); + bne(scratch, zero_reg, offset); + } + break; + case Uless_equal: + if (r2.is(zero_reg)) { + b(offset); + } else { + sltu(scratch, r2, rs); + beq(scratch, zero_reg, offset); + } + break; + default: + UNREACHABLE(); + } + } else { + // Be careful to always use shifted_branch_offset only just before the + // branch instruction, as the location will be remember for patching the + // target. + BlockTrampolinePoolScope block_trampoline_pool(this); + switch (cond) { + case cc_always: + b(offset); + break; + case eq: + // We don't want any other register but scratch clobbered. + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + beq(rs, r2, offset); + break; + case ne: + // We don't want any other register but scratch clobbered. + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + bne(rs, r2, offset); + break; + // Signed comparison. + case greater: + if (rt.imm64_ == 0) { + bgtz(rs, offset); + } else { + r2 = scratch; + li(r2, rt); + slt(scratch, r2, rs); + bne(scratch, zero_reg, offset); + } + break; + case greater_equal: + if (rt.imm64_ == 0) { + bgez(rs, offset); + } else if (is_int16(rt.imm64_)) { + slti(scratch, rs, rt.imm64_); + beq(scratch, zero_reg, offset); + } else { + r2 = scratch; + li(r2, rt); + slt(scratch, rs, r2); + beq(scratch, zero_reg, offset); + } + break; + case less: + if (rt.imm64_ == 0) { + bltz(rs, offset); + } else if (is_int16(rt.imm64_)) { + slti(scratch, rs, rt.imm64_); + bne(scratch, zero_reg, offset); + } else { + r2 = scratch; + li(r2, rt); + slt(scratch, rs, r2); + bne(scratch, zero_reg, offset); + } + break; + case less_equal: + if (rt.imm64_ == 0) { + blez(rs, offset); + } else { + r2 = scratch; + li(r2, rt); + slt(scratch, r2, rs); + beq(scratch, zero_reg, offset); + } + break; + // Unsigned comparison. + case Ugreater: + if (rt.imm64_ == 0) { + bgtz(rs, offset); + } else { + r2 = scratch; + li(r2, rt); + sltu(scratch, r2, rs); + bne(scratch, zero_reg, offset); + } + break; + case Ugreater_equal: + if (rt.imm64_ == 0) { + bgez(rs, offset); + } else if (is_int16(rt.imm64_)) { + sltiu(scratch, rs, rt.imm64_); + beq(scratch, zero_reg, offset); + } else { + r2 = scratch; + li(r2, rt); + sltu(scratch, rs, r2); + beq(scratch, zero_reg, offset); + } + break; + case Uless: + if (rt.imm64_ == 0) { + // No code needs to be emitted. + return; + } else if (is_int16(rt.imm64_)) { + sltiu(scratch, rs, rt.imm64_); + bne(scratch, zero_reg, offset); + } else { + r2 = scratch; + li(r2, rt); + sltu(scratch, rs, r2); + bne(scratch, zero_reg, offset); + } + break; + case Uless_equal: + if (rt.imm64_ == 0) { + b(offset); + } else { + r2 = scratch; + li(r2, rt); + sltu(scratch, r2, rs); + beq(scratch, zero_reg, offset); + } + break; + default: + UNREACHABLE(); + } + } + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { + // We use branch_offset as an argument for the branch instructions to be sure + // it is called just before generating the branch instruction, as needed. + + b(shifted_branch_offset(L, false)); + + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot) { + BRANCH_ARGS_CHECK(cond, rs, rt); + + int32_t offset = 0; + Register r2 = no_reg; + Register scratch = at; + if (rt.is_reg()) { + BlockTrampolinePoolScope block_trampoline_pool(this); + r2 = rt.rm_; + // Be careful to always use shifted_branch_offset only just before the + // branch instruction, as the location will be remember for patching the + // target. + switch (cond) { + case cc_always: + offset = shifted_branch_offset(L, false); + b(offset); + break; + case eq: + offset = shifted_branch_offset(L, false); + beq(rs, r2, offset); + break; + case ne: + offset = shifted_branch_offset(L, false); + bne(rs, r2, offset); + break; + // Signed comparison. + case greater: + if (r2.is(zero_reg)) { + offset = shifted_branch_offset(L, false); + bgtz(rs, offset); + } else { + slt(scratch, r2, rs); + offset = shifted_branch_offset(L, false); + bne(scratch, zero_reg, offset); + } + break; + case greater_equal: + if (r2.is(zero_reg)) { + offset = shifted_branch_offset(L, false); + bgez(rs, offset); + } else { + slt(scratch, rs, r2); + offset = shifted_branch_offset(L, false); + beq(scratch, zero_reg, offset); + } + break; + case less: + if (r2.is(zero_reg)) { + offset = shifted_branch_offset(L, false); + bltz(rs, offset); + } else { + slt(scratch, rs, r2); + offset = shifted_branch_offset(L, false); + bne(scratch, zero_reg, offset); + } + break; + case less_equal: + if (r2.is(zero_reg)) { + offset = shifted_branch_offset(L, false); + blez(rs, offset); + } else { + slt(scratch, r2, rs); + offset = shifted_branch_offset(L, false); + beq(scratch, zero_reg, offset); + } + break; + // Unsigned comparison. + case Ugreater: + if (r2.is(zero_reg)) { + offset = shifted_branch_offset(L, false); + bgtz(rs, offset); + } else { + sltu(scratch, r2, rs); + offset = shifted_branch_offset(L, false); + bne(scratch, zero_reg, offset); + } + break; + case Ugreater_equal: + if (r2.is(zero_reg)) { + offset = shifted_branch_offset(L, false); + bgez(rs, offset); + } else { + sltu(scratch, rs, r2); + offset = shifted_branch_offset(L, false); + beq(scratch, zero_reg, offset); + } + break; + case Uless: + if (r2.is(zero_reg)) { + // No code needs to be emitted. + return; + } else { + sltu(scratch, rs, r2); + offset = shifted_branch_offset(L, false); + bne(scratch, zero_reg, offset); + } + break; + case Uless_equal: + if (r2.is(zero_reg)) { + offset = shifted_branch_offset(L, false); + b(offset); + } else { + sltu(scratch, r2, rs); + offset = shifted_branch_offset(L, false); + beq(scratch, zero_reg, offset); + } + break; + default: + UNREACHABLE(); + } + } else { + // Be careful to always use shifted_branch_offset only just before the + // branch instruction, as the location will be remember for patching the + // target. + BlockTrampolinePoolScope block_trampoline_pool(this); + switch (cond) { + case cc_always: + offset = shifted_branch_offset(L, false); + b(offset); + break; + case eq: + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + offset = shifted_branch_offset(L, false); + beq(rs, r2, offset); + break; + case ne: + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + offset = shifted_branch_offset(L, false); + bne(rs, r2, offset); + break; + // Signed comparison. + case greater: + if (rt.imm64_ == 0) { + offset = shifted_branch_offset(L, false); + bgtz(rs, offset); + } else { + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + slt(scratch, r2, rs); + offset = shifted_branch_offset(L, false); + bne(scratch, zero_reg, offset); + } + break; + case greater_equal: + if (rt.imm64_ == 0) { + offset = shifted_branch_offset(L, false); + bgez(rs, offset); + } else if (is_int16(rt.imm64_)) { + slti(scratch, rs, rt.imm64_); + offset = shifted_branch_offset(L, false); + beq(scratch, zero_reg, offset); + } else { + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + slt(scratch, rs, r2); + offset = shifted_branch_offset(L, false); + beq(scratch, zero_reg, offset); + } + break; + case less: + if (rt.imm64_ == 0) { + offset = shifted_branch_offset(L, false); + bltz(rs, offset); + } else if (is_int16(rt.imm64_)) { + slti(scratch, rs, rt.imm64_); + offset = shifted_branch_offset(L, false); + bne(scratch, zero_reg, offset); + } else { + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + slt(scratch, rs, r2); + offset = shifted_branch_offset(L, false); + bne(scratch, zero_reg, offset); + } + break; + case less_equal: + if (rt.imm64_ == 0) { + offset = shifted_branch_offset(L, false); + blez(rs, offset); + } else { + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + slt(scratch, r2, rs); + offset = shifted_branch_offset(L, false); + beq(scratch, zero_reg, offset); + } + break; + // Unsigned comparison. + case Ugreater: + if (rt.imm64_ == 0) { + offset = shifted_branch_offset(L, false); + bne(rs, zero_reg, offset); + } else { + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + sltu(scratch, r2, rs); + offset = shifted_branch_offset(L, false); + bne(scratch, zero_reg, offset); + } + break; + case Ugreater_equal: + if (rt.imm64_ == 0) { + offset = shifted_branch_offset(L, false); + bgez(rs, offset); + } else if (is_int16(rt.imm64_)) { + sltiu(scratch, rs, rt.imm64_); + offset = shifted_branch_offset(L, false); + beq(scratch, zero_reg, offset); + } else { + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + sltu(scratch, rs, r2); + offset = shifted_branch_offset(L, false); + beq(scratch, zero_reg, offset); + } + break; + case Uless: + if (rt.imm64_ == 0) { + // No code needs to be emitted. + return; + } else if (is_int16(rt.imm64_)) { + sltiu(scratch, rs, rt.imm64_); + offset = shifted_branch_offset(L, false); + bne(scratch, zero_reg, offset); + } else { + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + sltu(scratch, rs, r2); + offset = shifted_branch_offset(L, false); + bne(scratch, zero_reg, offset); + } + break; + case Uless_equal: + if (rt.imm64_ == 0) { + offset = shifted_branch_offset(L, false); + beq(rs, zero_reg, offset); + } else { + DCHECK(!scratch.is(rs)); + r2 = scratch; + li(r2, rt); + sltu(scratch, r2, rs); + offset = shifted_branch_offset(L, false); + beq(scratch, zero_reg, offset); + } + break; + default: + UNREACHABLE(); + } + } + // Check that offset could actually hold on an int16_t. + DCHECK(is_int16(offset)); + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) { + BranchAndLinkShort(offset, bdslot); +} + + +void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot) { + BranchAndLinkShort(offset, cond, rs, rt, bdslot); +} + + +void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { + if (L->is_bound()) { + if (is_near(L)) { + BranchAndLinkShort(L, bdslot); + } else { + Jalr(L, bdslot); + } + } else { + if (is_trampoline_emitted()) { + Jalr(L, bdslot); + } else { + BranchAndLinkShort(L, bdslot); + } + } +} + + +void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot) { + if (L->is_bound()) { + if (is_near(L)) { + BranchAndLinkShort(L, cond, rs, rt, bdslot); + } else { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + Jalr(L, bdslot); + bind(&skip); + } + } else { + if (is_trampoline_emitted()) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + Jalr(L, bdslot); + bind(&skip); + } else { + BranchAndLinkShort(L, cond, rs, rt, bdslot); + } + } +} + + +// We need to use a bgezal or bltzal, but they can't be used directly with the +// slt instructions. We could use sub or add instead but we would miss overflow +// cases, so we keep slt and add an intermediate third instruction. +void MacroAssembler::BranchAndLinkShort(int16_t offset, + BranchDelaySlot bdslot) { + bal(offset); + + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond, + Register rs, const Operand& rt, + BranchDelaySlot bdslot) { + BRANCH_ARGS_CHECK(cond, rs, rt); + Register r2 = no_reg; + Register scratch = at; + + if (rt.is_reg()) { + r2 = rt.rm_; + } else if (cond != cc_always) { + r2 = scratch; + li(r2, rt); + } + + { + BlockTrampolinePoolScope block_trampoline_pool(this); + switch (cond) { + case cc_always: + bal(offset); + break; + case eq: + bne(rs, r2, 2); + nop(); + bal(offset); + break; + case ne: + beq(rs, r2, 2); + nop(); + bal(offset); + break; + + // Signed comparison. + case greater: + // rs > rt + slt(scratch, r2, rs); + beq(scratch, zero_reg, 2); + nop(); + bal(offset); + break; + case greater_equal: + // rs >= rt + slt(scratch, rs, r2); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); + break; + case less: + // rs < r2 + slt(scratch, rs, r2); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); + break; + case less_equal: + // rs <= r2 + slt(scratch, r2, rs); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); + break; + + + // Unsigned comparison. + case Ugreater: + // rs > rt + sltu(scratch, r2, rs); + beq(scratch, zero_reg, 2); + nop(); + bal(offset); + break; + case Ugreater_equal: + // rs >= rt + sltu(scratch, rs, r2); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); + break; + case Uless: + // rs < r2 + sltu(scratch, rs, r2); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); + break; + case Uless_equal: + // rs <= r2 + sltu(scratch, r2, rs); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); + break; + default: + UNREACHABLE(); + } + } + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { + bal(shifted_branch_offset(L, false)); + + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot) { + BRANCH_ARGS_CHECK(cond, rs, rt); + + int32_t offset = 0; + Register r2 = no_reg; + Register scratch = at; + if (rt.is_reg()) { + r2 = rt.rm_; + } else if (cond != cc_always) { + r2 = scratch; + li(r2, rt); + } + + { + BlockTrampolinePoolScope block_trampoline_pool(this); + switch (cond) { + case cc_always: + offset = shifted_branch_offset(L, false); + bal(offset); + break; + case eq: + bne(rs, r2, 2); + nop(); + offset = shifted_branch_offset(L, false); + bal(offset); + break; + case ne: + beq(rs, r2, 2); + nop(); + offset = shifted_branch_offset(L, false); + bal(offset); + break; + + // Signed comparison. + case greater: + // rs > rt + slt(scratch, r2, rs); + beq(scratch, zero_reg, 2); + nop(); + offset = shifted_branch_offset(L, false); + bal(offset); + break; + case greater_equal: + // rs >= rt + slt(scratch, rs, r2); + bne(scratch, zero_reg, 2); + nop(); + offset = shifted_branch_offset(L, false); + bal(offset); + break; + case less: + // rs < r2 + slt(scratch, rs, r2); + bne(scratch, zero_reg, 2); + nop(); + offset = shifted_branch_offset(L, false); + bal(offset); + break; + case less_equal: + // rs <= r2 + slt(scratch, r2, rs); + bne(scratch, zero_reg, 2); + nop(); + offset = shifted_branch_offset(L, false); + bal(offset); + break; + + + // Unsigned comparison. + case Ugreater: + // rs > rt + sltu(scratch, r2, rs); + beq(scratch, zero_reg, 2); + nop(); + offset = shifted_branch_offset(L, false); + bal(offset); + break; + case Ugreater_equal: + // rs >= rt + sltu(scratch, rs, r2); + bne(scratch, zero_reg, 2); + nop(); + offset = shifted_branch_offset(L, false); + bal(offset); + break; + case Uless: + // rs < r2 + sltu(scratch, rs, r2); + bne(scratch, zero_reg, 2); + nop(); + offset = shifted_branch_offset(L, false); + bal(offset); + break; + case Uless_equal: + // rs <= r2 + sltu(scratch, r2, rs); + bne(scratch, zero_reg, 2); + nop(); + offset = shifted_branch_offset(L, false); + bal(offset); + break; + + default: + UNREACHABLE(); + } + } + // Check that offset could actually hold on an int16_t. + DCHECK(is_int16(offset)); + + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::Jump(Register target, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (cond == cc_always) { + jr(target); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jr(target); + } + // Emit a nop in the branch delay slot if required. + if (bd == PROTECT) + nop(); +} + + +void MacroAssembler::Jump(intptr_t target, + RelocInfo::Mode rmode, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + Label skip; + if (cond != cc_always) { + Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt); + } + // The first instruction of 'li' may be placed in the delay slot. + // This is not an issue, t9 is expected to be clobbered anyway. + li(t9, Operand(target, rmode)); + Jump(t9, al, zero_reg, Operand(zero_reg), bd); + bind(&skip); +} + + +void MacroAssembler::Jump(Address target, + RelocInfo::Mode rmode, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + DCHECK(!RelocInfo::IsCodeTarget(rmode)); + Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd); +} + + +void MacroAssembler::Jump(Handle<Code> code, + RelocInfo::Mode rmode, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + DCHECK(RelocInfo::IsCodeTarget(rmode)); + AllowDeferredHandleDereference embedding_raw_address; + Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd); +} + + +int MacroAssembler::CallSize(Register target, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + int size = 0; + + if (cond == cc_always) { + size += 1; + } else { + size += 3; + } + + if (bd == PROTECT) + size += 1; + + return size * kInstrSize; +} + + +// Note: To call gcc-compiled C code on mips, you must call thru t9. +void MacroAssembler::Call(Register target, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label start; + bind(&start); + if (cond == cc_always) { + jalr(target); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jalr(target); + } + // Emit a nop in the branch delay slot if required. + if (bd == PROTECT) + nop(); + + DCHECK_EQ(CallSize(target, cond, rs, rt, bd), + SizeOfCodeGeneratedSince(&start)); +} + + +int MacroAssembler::CallSize(Address target, + RelocInfo::Mode rmode, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + int size = CallSize(t9, cond, rs, rt, bd); + return size + 4 * kInstrSize; +} + + +void MacroAssembler::Call(Address target, + RelocInfo::Mode rmode, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label start; + bind(&start); + int64_t target_int = reinterpret_cast<int64_t>(target); + // Must record previous source positions before the + // li() generates a new code target. + positions_recorder()->WriteRecordedPositions(); + li(t9, Operand(target_int, rmode), ADDRESS_LOAD); + Call(t9, cond, rs, rt, bd); + DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd), + SizeOfCodeGeneratedSince(&start)); +} + + +int MacroAssembler::CallSize(Handle<Code> code, + RelocInfo::Mode rmode, + TypeFeedbackId ast_id, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + AllowDeferredHandleDereference using_raw_address; + return CallSize(reinterpret_cast<Address>(code.location()), + rmode, cond, rs, rt, bd); +} + + +void MacroAssembler::Call(Handle<Code> code, + RelocInfo::Mode rmode, + TypeFeedbackId ast_id, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label start; + bind(&start); + DCHECK(RelocInfo::IsCodeTarget(rmode)); + if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { + SetRecordedAstId(ast_id); + rmode = RelocInfo::CODE_TARGET_WITH_ID; + } + AllowDeferredHandleDereference embedding_raw_address; + Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); + DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd), + SizeOfCodeGeneratedSince(&start)); +} + + +void MacroAssembler::Ret(Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + Jump(ra, cond, rs, rt, bd); +} + + +void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) { + BlockTrampolinePoolScope block_trampoline_pool(this); + + uint64_t imm28; + imm28 = jump_address(L); + imm28 &= kImm28Mask; + { BlockGrowBufferScope block_buf_growth(this); + // Buffer growth (and relocation) must be blocked for internal references + // until associated instructions are emitted and available to be patched. + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); + j(imm28); + } + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) { + BlockTrampolinePoolScope block_trampoline_pool(this); + + uint64_t imm64; + imm64 = jump_address(L); + { BlockGrowBufferScope block_buf_growth(this); + // Buffer growth (and relocation) must be blocked for internal references + // until associated instructions are emitted and available to be patched. + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); + li(at, Operand(imm64), ADDRESS_LOAD); + } + jr(at); + + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) { + BlockTrampolinePoolScope block_trampoline_pool(this); + + uint64_t imm64; + imm64 = jump_address(L); + { BlockGrowBufferScope block_buf_growth(this); + // Buffer growth (and relocation) must be blocked for internal references + // until associated instructions are emitted and available to be patched. + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); + li(at, Operand(imm64), ADDRESS_LOAD); + } + jalr(at); + + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) + nop(); +} + + +void MacroAssembler::DropAndRet(int drop) { + Ret(USE_DELAY_SLOT); + daddiu(sp, sp, drop * kPointerSize); +} + +void MacroAssembler::DropAndRet(int drop, + Condition cond, + Register r1, + const Operand& r2) { + // Both Drop and Ret need to be conditional. + Label skip; + if (cond != cc_always) { + Branch(&skip, NegateCondition(cond), r1, r2); + } + + Drop(drop); + Ret(); + + if (cond != cc_always) { + bind(&skip); + } +} + + +void MacroAssembler::Drop(int count, + Condition cond, + Register reg, + const Operand& op) { + if (count <= 0) { + return; + } + + Label skip; + + if (cond != al) { + Branch(&skip, NegateCondition(cond), reg, op); + } + + daddiu(sp, sp, count * kPointerSize); + + if (cond != al) { + bind(&skip); + } +} + + + +void MacroAssembler::Swap(Register reg1, + Register reg2, + Register scratch) { + if (scratch.is(no_reg)) { + Xor(reg1, reg1, Operand(reg2)); + Xor(reg2, reg2, Operand(reg1)); + Xor(reg1, reg1, Operand(reg2)); + } else { + mov(scratch, reg1); + mov(reg1, reg2); + mov(reg2, scratch); + } +} + + +void MacroAssembler::Call(Label* target) { + BranchAndLink(target); +} + + +void MacroAssembler::Push(Handle<Object> handle) { + li(at, Operand(handle)); + push(at); +} + + +void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) { + DCHECK(!src.is(scratch)); + mov(scratch, src); + dsrl32(src, src, 0); + dsll32(src, src, 0); + push(src); + dsll32(scratch, scratch, 0); + push(scratch); +} + + +void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) { + DCHECK(!dst.is(scratch)); + pop(scratch); + dsrl32(scratch, scratch, 0); + pop(dst); + dsrl32(dst, dst, 0); + dsll32(dst, dst, 0); + or_(dst, dst, scratch); +} + + +void MacroAssembler::DebugBreak() { + PrepareCEntryArgs(0); + PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate())); + CEntryStub ces(isolate(), 1); + DCHECK(AllowThisStubCall(&ces)); + Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); +} + + +// --------------------------------------------------------------------------- +// Exception handling. + +void MacroAssembler::PushTryHandler(StackHandler::Kind kind, + int handler_index) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); + + // For the JSEntry handler, we must preserve a0-a3 and s0. + // a5-a7 are available. We will build up the handler from the bottom by + // pushing on the stack. + // Set up the code object (a5) and the state (a6) for pushing. + unsigned state = + StackHandler::IndexField::encode(handler_index) | + StackHandler::KindField::encode(kind); + li(a5, Operand(CodeObject()), CONSTANT_SIZE); + li(a6, Operand(state)); + + // Push the frame pointer, context, state, and code object. + if (kind == StackHandler::JS_ENTRY) { + DCHECK_EQ(Smi::FromInt(0), 0); + // The second zero_reg indicates no context. + // The first zero_reg is the NULL frame pointer. + // The operands are reversed to match the order of MultiPush/Pop. + Push(zero_reg, zero_reg, a6, a5); + } else { + MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit()); + } + + // Link the current handler as the next handler. + li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); + ld(a5, MemOperand(a6)); + push(a5); + // Set this new handler as the current one. + sd(sp, MemOperand(a6)); +} + + +void MacroAssembler::PopTryHandler() { + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(a1); + Daddu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); + li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); + sd(a1, MemOperand(at)); +} + + +void MacroAssembler::JumpToHandlerEntry() { + // Compute the handler entry address and jump to it. The handler table is + // a fixed array of (smi-tagged) code offsets. + // v0 = exception, a1 = code object, a2 = state. + Uld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); + Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + dsrl(a2, a2, StackHandler::kKindWidth); // Handler index. + dsll(a2, a2, kPointerSizeLog2); + Daddu(a2, a3, a2); + ld(a2, MemOperand(a2)); // Smi-tagged offset. + Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. + dsra32(t9, a2, 0); + Daddu(t9, t9, a1); + Jump(t9); // Jump. +} + + +void MacroAssembler::Throw(Register value) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); + + // The exception is expected in v0. + Move(v0, value); + + // Drop the stack pointer to the top of the top handler. + li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, + isolate()))); + ld(sp, MemOperand(a3)); + + // Restore the next handler. + pop(a2); + sd(a2, MemOperand(a3)); + + // Get the code object (a1) and state (a2). Restore the context and frame + // pointer. + MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit()); + + // If the handler is a JS frame, restore the context to the frame. + // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp + // or cp. + Label done; + Branch(&done, eq, cp, Operand(zero_reg)); + sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + bind(&done); + + JumpToHandlerEntry(); +} + + +void MacroAssembler::ThrowUncatchable(Register value) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); + + // The exception is expected in v0. + if (!value.is(v0)) { + mov(v0, value); + } + // Drop the stack pointer to the top of the top stack handler. + li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); + ld(sp, MemOperand(a3)); + + // Unwind the handlers until the ENTRY handler is found. + Label fetch_next, check_kind; + jmp(&check_kind); + bind(&fetch_next); + ld(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); + + bind(&check_kind); + STATIC_ASSERT(StackHandler::JS_ENTRY == 0); + ld(a2, MemOperand(sp, StackHandlerConstants::kStateOffset)); + And(a2, a2, Operand(StackHandler::KindField::kMask)); + Branch(&fetch_next, ne, a2, Operand(zero_reg)); + + // Set the top handler address to next handler past the top ENTRY handler. + pop(a2); + sd(a2, MemOperand(a3)); + + // Get the code object (a1) and state (a2). Clear the context and frame + // pointer (0 was saved in the handler). + MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit()); + + JumpToHandlerEntry(); +} + + +void MacroAssembler::Allocate(int object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags) { + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); + if (!FLAG_inline_new) { + if (emit_debug_code()) { + // Trash the registers to simulate an allocation failure. + li(result, 0x7091); + li(scratch1, 0x7191); + li(scratch2, 0x7291); + } + jmp(gc_required); + return; + } + + DCHECK(!result.is(scratch1)); + DCHECK(!result.is(scratch2)); + DCHECK(!scratch1.is(scratch2)); + DCHECK(!scratch1.is(t9)); + DCHECK(!scratch2.is(t9)); + DCHECK(!result.is(t9)); + + // Make object size into bytes. + if ((flags & SIZE_IN_WORDS) != 0) { + object_size *= kPointerSize; + } + DCHECK(0 == (object_size & kObjectAlignmentMask)); + + // Check relative positions of allocation top and limit addresses. + // ARM adds additional checks to make sure the ldm instruction can be + // used. On MIPS we don't have ldm so we don't need additional checks either. + ExternalReference allocation_top = + AllocationUtils::GetAllocationTopReference(isolate(), flags); + ExternalReference allocation_limit = + AllocationUtils::GetAllocationLimitReference(isolate(), flags); + + intptr_t top = + reinterpret_cast<intptr_t>(allocation_top.address()); + intptr_t limit = + reinterpret_cast<intptr_t>(allocation_limit.address()); + DCHECK((limit - top) == kPointerSize); + + // Set up allocation top address and object size registers. + Register topaddr = scratch1; + li(topaddr, Operand(allocation_top)); + + // This code stores a temporary value in t9. + if ((flags & RESULT_CONTAINS_TOP) == 0) { + // Load allocation top into result and allocation limit into t9. + ld(result, MemOperand(topaddr)); + ld(t9, MemOperand(topaddr, kPointerSize)); + } else { + if (emit_debug_code()) { + // Assert that result actually contains top on entry. t9 is used + // immediately below so this use of t9 does not cause difference with + // respect to register content between debug and release mode. + ld(t9, MemOperand(topaddr)); + Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); + } + // Load allocation limit into t9. Result already contains allocation top. + ld(t9, MemOperand(topaddr, limit - top)); + } + + DCHECK(kPointerSize == kDoubleSize); + if (emit_debug_code()) { + And(at, result, Operand(kDoubleAlignmentMask)); + Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); + } + + // Calculate new top and bail out if new space is exhausted. Use result + // to calculate the new top. + Daddu(scratch2, result, Operand(object_size)); + Branch(gc_required, Ugreater, scratch2, Operand(t9)); + sd(scratch2, MemOperand(topaddr)); + + // Tag object if requested. + if ((flags & TAG_OBJECT) != 0) { + Daddu(result, result, Operand(kHeapObjectTag)); + } +} + + +void MacroAssembler::Allocate(Register object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags) { + if (!FLAG_inline_new) { + if (emit_debug_code()) { + // Trash the registers to simulate an allocation failure. + li(result, 0x7091); + li(scratch1, 0x7191); + li(scratch2, 0x7291); + } + jmp(gc_required); + return; + } + + DCHECK(!result.is(scratch1)); + DCHECK(!result.is(scratch2)); + DCHECK(!scratch1.is(scratch2)); + DCHECK(!object_size.is(t9)); + DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); + + // Check relative positions of allocation top and limit addresses. + // ARM adds additional checks to make sure the ldm instruction can be + // used. On MIPS we don't have ldm so we don't need additional checks either. + ExternalReference allocation_top = + AllocationUtils::GetAllocationTopReference(isolate(), flags); + ExternalReference allocation_limit = + AllocationUtils::GetAllocationLimitReference(isolate(), flags); + intptr_t top = + reinterpret_cast<intptr_t>(allocation_top.address()); + intptr_t limit = + reinterpret_cast<intptr_t>(allocation_limit.address()); + DCHECK((limit - top) == kPointerSize); + + // Set up allocation top address and object size registers. + Register topaddr = scratch1; + li(topaddr, Operand(allocation_top)); + + // This code stores a temporary value in t9. + if ((flags & RESULT_CONTAINS_TOP) == 0) { + // Load allocation top into result and allocation limit into t9. + ld(result, MemOperand(topaddr)); + ld(t9, MemOperand(topaddr, kPointerSize)); + } else { + if (emit_debug_code()) { + // Assert that result actually contains top on entry. t9 is used + // immediately below so this use of t9 does not cause difference with + // respect to register content between debug and release mode. + ld(t9, MemOperand(topaddr)); + Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); + } + // Load allocation limit into t9. Result already contains allocation top. + ld(t9, MemOperand(topaddr, limit - top)); + } + + DCHECK(kPointerSize == kDoubleSize); + if (emit_debug_code()) { + And(at, result, Operand(kDoubleAlignmentMask)); + Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); + } + + // Calculate new top and bail out if new space is exhausted. Use result + // to calculate the new top. Object size may be in words so a shift is + // required to get the number of bytes. + if ((flags & SIZE_IN_WORDS) != 0) { + dsll(scratch2, object_size, kPointerSizeLog2); + Daddu(scratch2, result, scratch2); + } else { + Daddu(scratch2, result, Operand(object_size)); + } + Branch(gc_required, Ugreater, scratch2, Operand(t9)); + + // Update allocation top. result temporarily holds the new top. + if (emit_debug_code()) { + And(t9, scratch2, Operand(kObjectAlignmentMask)); + Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg)); + } + sd(scratch2, MemOperand(topaddr)); + + // Tag object if requested. + if ((flags & TAG_OBJECT) != 0) { + Daddu(result, result, Operand(kHeapObjectTag)); + } +} + + +void MacroAssembler::UndoAllocationInNewSpace(Register object, + Register scratch) { + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(isolate()); + + // Make sure the object has no tag before resetting top. + And(object, object, Operand(~kHeapObjectTagMask)); +#ifdef DEBUG + // Check that the object un-allocated is below the current top. + li(scratch, Operand(new_space_allocation_top)); + ld(scratch, MemOperand(scratch)); + Check(less, kUndoAllocationOfNonAllocatedMemory, + object, Operand(scratch)); +#endif + // Write the address of the object to un-allocate as the current top. + li(scratch, Operand(new_space_allocation_top)); + sd(object, MemOperand(scratch)); +} + + +void MacroAssembler::AllocateTwoByteString(Register result, + Register length, + Register scratch1, + Register scratch2, + Register scratch3, + Label* gc_required) { + // Calculate the number of bytes needed for the characters in the string while + // observing object alignment. + DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); + dsll(scratch1, length, 1); // Length in bytes, not chars. + daddiu(scratch1, scratch1, + kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); + And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); + + // Allocate two-byte string in new space. + Allocate(scratch1, + result, + scratch2, + scratch3, + gc_required, + TAG_OBJECT); + + // Set the map, length and hash field. + InitializeNewString(result, + length, + Heap::kStringMapRootIndex, + scratch1, + scratch2); +} + + +void MacroAssembler::AllocateAsciiString(Register result, + Register length, + Register scratch1, + Register scratch2, + Register scratch3, + Label* gc_required) { + // Calculate the number of bytes needed for the characters in the string + // while observing object alignment. + DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); + DCHECK(kCharSize == 1); + daddiu(scratch1, length, + kObjectAlignmentMask + SeqOneByteString::kHeaderSize); + And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); + + // Allocate ASCII string in new space. + Allocate(scratch1, + result, + scratch2, + scratch3, + gc_required, + TAG_OBJECT); + + // Set the map, length and hash field. + InitializeNewString(result, + length, + Heap::kAsciiStringMapRootIndex, + scratch1, + scratch2); +} + + +void MacroAssembler::AllocateTwoByteConsString(Register result, + Register length, + Register scratch1, + Register scratch2, + Label* gc_required) { + Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); + InitializeNewString(result, + length, + Heap::kConsStringMapRootIndex, + scratch1, + scratch2); +} + + +void MacroAssembler::AllocateAsciiConsString(Register result, + Register length, + Register scratch1, + Register scratch2, + Label* gc_required) { + Allocate(ConsString::kSize, + result, + scratch1, + scratch2, + gc_required, + TAG_OBJECT); + + InitializeNewString(result, + length, + Heap::kConsAsciiStringMapRootIndex, + scratch1, + scratch2); +} + + +void MacroAssembler::AllocateTwoByteSlicedString(Register result, + Register length, + Register scratch1, + Register scratch2, + Label* gc_required) { + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); + + InitializeNewString(result, + length, + Heap::kSlicedStringMapRootIndex, + scratch1, + scratch2); +} + + +void MacroAssembler::AllocateAsciiSlicedString(Register result, + Register length, + Register scratch1, + Register scratch2, + Label* gc_required) { + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); + + InitializeNewString(result, + length, + Heap::kSlicedAsciiStringMapRootIndex, + scratch1, + scratch2); +} + + +void MacroAssembler::JumpIfNotUniqueName(Register reg, + Label* not_unique_name) { + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + Label succeed; + And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); + Branch(&succeed, eq, at, Operand(zero_reg)); + Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE)); + + bind(&succeed); +} + + +// Allocates a heap number or jumps to the label if the young space is full and +// a scavenge is needed. +void MacroAssembler::AllocateHeapNumber(Register result, + Register scratch1, + Register scratch2, + Register heap_number_map, + Label* need_gc, + TaggingMode tagging_mode, + MutableMode mode) { + // Allocate an object in the heap for the heap number and tag it as a heap + // object. + Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc, + tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); + + Heap::RootListIndex map_index = mode == MUTABLE + ? Heap::kMutableHeapNumberMapRootIndex + : Heap::kHeapNumberMapRootIndex; + AssertIsRoot(heap_number_map, map_index); + + // Store heap number map in the allocated object. + if (tagging_mode == TAG_RESULT) { + sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); + } else { + sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); + } +} + + +void MacroAssembler::AllocateHeapNumberWithValue(Register result, + FPURegister value, + Register scratch1, + Register scratch2, + Label* gc_required) { + LoadRoot(t8, Heap::kHeapNumberMapRootIndex); + AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); + sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); +} + + +// Copies a fixed number of fields of heap objects from src to dst. +void MacroAssembler::CopyFields(Register dst, + Register src, + RegList temps, + int field_count) { + DCHECK((temps & dst.bit()) == 0); + DCHECK((temps & src.bit()) == 0); + // Primitive implementation using only one temporary register. + + Register tmp = no_reg; + // Find a temp register in temps list. + for (int i = 0; i < kNumRegisters; i++) { + if ((temps & (1 << i)) != 0) { + tmp.code_ = i; + break; + } + } + DCHECK(!tmp.is(no_reg)); + + for (int i = 0; i < field_count; i++) { + ld(tmp, FieldMemOperand(src, i * kPointerSize)); + sd(tmp, FieldMemOperand(dst, i * kPointerSize)); + } +} + + +void MacroAssembler::CopyBytes(Register src, + Register dst, + Register length, + Register scratch) { + Label align_loop_1, word_loop, byte_loop, byte_loop_1, done; + + // Align src before copying in word size chunks. + Branch(&byte_loop, le, length, Operand(kPointerSize)); + bind(&align_loop_1); + And(scratch, src, kPointerSize - 1); + Branch(&word_loop, eq, scratch, Operand(zero_reg)); + lbu(scratch, MemOperand(src)); + Daddu(src, src, 1); + sb(scratch, MemOperand(dst)); + Daddu(dst, dst, 1); + Dsubu(length, length, Operand(1)); + Branch(&align_loop_1, ne, length, Operand(zero_reg)); + + // Copy bytes in word size chunks. + bind(&word_loop); + if (emit_debug_code()) { + And(scratch, src, kPointerSize - 1); + Assert(eq, kExpectingAlignmentForCopyBytes, + scratch, Operand(zero_reg)); + } + Branch(&byte_loop, lt, length, Operand(kPointerSize)); + ld(scratch, MemOperand(src)); + Daddu(src, src, kPointerSize); + + // TODO(kalmard) check if this can be optimized to use sw in most cases. + // Can't use unaligned access - copy byte by byte. + sb(scratch, MemOperand(dst, 0)); + dsrl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 1)); + dsrl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 2)); + dsrl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 3)); + dsrl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 4)); + dsrl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 5)); + dsrl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 6)); + dsrl(scratch, scratch, 8); + sb(scratch, MemOperand(dst, 7)); + Daddu(dst, dst, 8); + + Dsubu(length, length, Operand(kPointerSize)); + Branch(&word_loop); + + // Copy the last bytes if any left. + bind(&byte_loop); + Branch(&done, eq, length, Operand(zero_reg)); + bind(&byte_loop_1); + lbu(scratch, MemOperand(src)); + Daddu(src, src, 1); + sb(scratch, MemOperand(dst)); + Daddu(dst, dst, 1); + Dsubu(length, length, Operand(1)); + Branch(&byte_loop_1, ne, length, Operand(zero_reg)); + bind(&done); +} + + +void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler) { + Label loop, entry; + Branch(&entry); + bind(&loop); + sd(filler, MemOperand(start_offset)); + Daddu(start_offset, start_offset, kPointerSize); + bind(&entry); + Branch(&loop, lt, start_offset, Operand(end_offset)); +} + + +void MacroAssembler::CheckFastElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); + lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); + Branch(fail, hi, scratch, + Operand(Map::kMaximumBitField2FastHoleyElementValue)); +} + + +void MacroAssembler::CheckFastObjectElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); + lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); + Branch(fail, ls, scratch, + Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); + Branch(fail, hi, scratch, + Operand(Map::kMaximumBitField2FastHoleyElementValue)); +} + + +void MacroAssembler::CheckFastSmiElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); + Branch(fail, hi, scratch, + Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); +} + + +void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, + Register key_reg, + Register elements_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* fail, + int elements_offset) { + Label smi_value, maybe_nan, have_double_value, is_nan, done; + Register mantissa_reg = scratch2; + Register exponent_reg = scratch3; + + // Handle smi values specially. + JumpIfSmi(value_reg, &smi_value); + + // Ensure that the object is a heap number + CheckMap(value_reg, + scratch1, + Heap::kHeapNumberMapRootIndex, + fail, + DONT_DO_SMI_CHECK); + + // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 + // in the exponent. + li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); + lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); + Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1)); + + lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + + bind(&have_double_value); + // dsll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize); + dsra(scratch1, key_reg, 32 - kDoubleSizeLog2); + Daddu(scratch1, scratch1, elements_reg); + sw(mantissa_reg, FieldMemOperand( + scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); + uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + + sizeof(kHoleNanLower32); + sw(exponent_reg, FieldMemOperand(scratch1, offset)); + jmp(&done); + + bind(&maybe_nan); + // Could be NaN, Infinity or -Infinity. If fraction is not zero, it's NaN, + // otherwise it's Infinity or -Infinity, and the non-NaN code path applies. + lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg)); + bind(&is_nan); + // Load canonical NaN for storing into the double array. + LoadRoot(at, Heap::kNanValueRootIndex); + lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset)); + lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset)); + jmp(&have_double_value); + + bind(&smi_value); + Daddu(scratch1, elements_reg, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - + elements_offset)); + // dsll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); + dsra(scratch2, key_reg, 32 - kDoubleSizeLog2); + Daddu(scratch1, scratch1, scratch2); + // scratch1 is now effective address of the double element + + Register untagged_value = elements_reg; + SmiUntag(untagged_value, value_reg); + mtc1(untagged_value, f2); + cvt_d_w(f0, f2); + sdc1(f0, MemOperand(scratch1, 0)); + bind(&done); +} + + +void MacroAssembler::CompareMapAndBranch(Register obj, + Register scratch, + Handle<Map> map, + Label* early_success, + Condition cond, + Label* branch_to) { + ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); + CompareMapAndBranch(scratch, map, early_success, cond, branch_to); +} + + +void MacroAssembler::CompareMapAndBranch(Register obj_map, + Handle<Map> map, + Label* early_success, + Condition cond, + Label* branch_to) { + Branch(branch_to, cond, obj_map, Operand(map)); +} + + +void MacroAssembler::CheckMap(Register obj, + Register scratch, + Handle<Map> map, + Label* fail, + SmiCheckType smi_check_type) { + if (smi_check_type == DO_SMI_CHECK) { + JumpIfSmi(obj, fail); + } + Label success; + CompareMapAndBranch(obj, scratch, map, &success, ne, fail); + bind(&success); +} + + +void MacroAssembler::DispatchMap(Register obj, + Register scratch, + Handle<Map> map, + Handle<Code> success, + SmiCheckType smi_check_type) { + Label fail; + if (smi_check_type == DO_SMI_CHECK) { + JumpIfSmi(obj, &fail); + } + ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); + Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map)); + bind(&fail); +} + + +void MacroAssembler::CheckMap(Register obj, + Register scratch, + Heap::RootListIndex index, + Label* fail, + SmiCheckType smi_check_type) { + if (smi_check_type == DO_SMI_CHECK) { + JumpIfSmi(obj, fail); + } + ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); + LoadRoot(at, index); + Branch(fail, ne, scratch, Operand(at)); +} + + +void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { + if (IsMipsSoftFloatABI) { + Move(dst, v0, v1); + } else { + Move(dst, f0); // Reg f0 is o32 ABI FP return value. + } +} + + +void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { + if (IsMipsSoftFloatABI) { + Move(dst, a0, a1); + } else { + Move(dst, f12); // Reg f12 is o32 ABI FP first argument value. + } +} + + +void MacroAssembler::MovToFloatParameter(DoubleRegister src) { + if (!IsMipsSoftFloatABI) { + Move(f12, src); + } else { + Move(a0, a1, src); + } +} + + +void MacroAssembler::MovToFloatResult(DoubleRegister src) { + if (!IsMipsSoftFloatABI) { + Move(f0, src); + } else { + Move(v0, v1, src); + } +} + + +void MacroAssembler::MovToFloatParameters(DoubleRegister src1, + DoubleRegister src2) { + if (!IsMipsSoftFloatABI) { + const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14; + if (src2.is(f12)) { + DCHECK(!src1.is(fparg2)); + Move(fparg2, src2); + Move(f12, src1); + } else { + Move(f12, src1); + Move(fparg2, src2); + } + } else { + Move(a0, a1, src1); + Move(a2, a3, src2); + } +} + + +// ----------------------------------------------------------------------------- +// JavaScript invokes. + +void MacroAssembler::InvokePrologue(const ParameterCount& expected, + const ParameterCount& actual, + Handle<Code> code_constant, + Register code_reg, + Label* done, + bool* definitely_mismatches, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + bool definitely_matches = false; + *definitely_mismatches = false; + Label regular_invoke; + + // Check whether the expected and actual arguments count match. If not, + // setup registers according to contract with ArgumentsAdaptorTrampoline: + // a0: actual arguments count + // a1: function (passed through to callee) + // a2: expected arguments count + + // The code below is made a lot easier because the calling code already sets + // up actual and expected registers according to the contract if values are + // passed in registers. + DCHECK(actual.is_immediate() || actual.reg().is(a0)); + DCHECK(expected.is_immediate() || expected.reg().is(a2)); + DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); + + if (expected.is_immediate()) { + DCHECK(actual.is_immediate()); + if (expected.immediate() == actual.immediate()) { + definitely_matches = true; + } else { + li(a0, Operand(actual.immediate())); + const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; + if (expected.immediate() == sentinel) { + // Don't worry about adapting arguments for builtins that + // don't want that done. Skip adaption code by making it look + // like we have a match between expected and actual number of + // arguments. + definitely_matches = true; + } else { + *definitely_mismatches = true; + li(a2, Operand(expected.immediate())); + } + } + } else if (actual.is_immediate()) { + Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate())); + li(a0, Operand(actual.immediate())); + } else { + Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg())); + } + + if (!definitely_matches) { + if (!code_constant.is_null()) { + li(a3, Operand(code_constant)); + daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); + } + + Handle<Code> adaptor = + isolate()->builtins()->ArgumentsAdaptorTrampoline(); + if (flag == CALL_FUNCTION) { + call_wrapper.BeforeCall(CallSize(adaptor)); + Call(adaptor); + call_wrapper.AfterCall(); + if (!*definitely_mismatches) { + Branch(done); + } + } else { + Jump(adaptor, RelocInfo::CODE_TARGET); + } + bind(®ular_invoke); + } +} + + +void MacroAssembler::InvokeCode(Register code, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + // You can't call a function without a valid frame. + DCHECK(flag == JUMP_FUNCTION || has_frame()); + + Label done; + + bool definitely_mismatches = false; + InvokePrologue(expected, actual, Handle<Code>::null(), code, + &done, &definitely_mismatches, flag, + call_wrapper); + if (!definitely_mismatches) { + if (flag == CALL_FUNCTION) { + call_wrapper.BeforeCall(CallSize(code)); + Call(code); + call_wrapper.AfterCall(); + } else { + DCHECK(flag == JUMP_FUNCTION); + Jump(code); + } + // Continue here if InvokePrologue does handle the invocation due to + // mismatched parameter counts. + bind(&done); + } +} + + +void MacroAssembler::InvokeFunction(Register function, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + // You can't call a function without a valid frame. + DCHECK(flag == JUMP_FUNCTION || has_frame()); + + // Contract with called JS functions requires that function is passed in a1. + DCHECK(function.is(a1)); + Register expected_reg = a2; + Register code_reg = a3; + ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + // The argument count is stored as int32_t on 64-bit platforms. + // TODO(plind): Smi on 32-bit platforms. + lw(expected_reg, + FieldMemOperand(code_reg, + SharedFunctionInfo::kFormalParameterCountOffset)); + ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + ParameterCount expected(expected_reg); + InvokeCode(code_reg, expected, actual, flag, call_wrapper); +} + + +void MacroAssembler::InvokeFunction(Register function, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + // You can't call a function without a valid frame. + DCHECK(flag == JUMP_FUNCTION || has_frame()); + + // Contract with called JS functions requires that function is passed in a1. + DCHECK(function.is(a1)); + + // Get the function and setup the context. + ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + // We call indirectly through the code field in the function to + // allow recompilation to take effect without changing any of the + // call sites. + ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + InvokeCode(a3, expected, actual, flag, call_wrapper); +} + + +void MacroAssembler::InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + li(a1, function); + InvokeFunction(a1, expected, actual, flag, call_wrapper); +} + + +void MacroAssembler::IsObjectJSObjectType(Register heap_object, + Register map, + Register scratch, + Label* fail) { + ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); + IsInstanceJSObjectType(map, scratch, fail); +} + + +void MacroAssembler::IsInstanceJSObjectType(Register map, + Register scratch, + Label* fail) { + lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); + Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); +} + + +void MacroAssembler::IsObjectJSStringType(Register object, + Register scratch, + Label* fail) { + DCHECK(kNotStringTag != 0); + + ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); + lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + And(scratch, scratch, Operand(kIsNotStringMask)); + Branch(fail, ne, scratch, Operand(zero_reg)); +} + + +void MacroAssembler::IsObjectNameType(Register object, + Register scratch, + Label* fail) { + ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); + lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE)); +} + + +// --------------------------------------------------------------------------- +// Support functions. + + +void MacroAssembler::TryGetFunctionPrototype(Register function, + Register result, + Register scratch, + Label* miss, + bool miss_on_bound_function) { + Label non_instance; + if (miss_on_bound_function) { + // Check that the receiver isn't a smi. + JumpIfSmi(function, miss); + + // Check that the function really is a function. Load map into result reg. + GetObjectType(function, result, scratch); + Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE)); + + ld(scratch, + FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); + lwu(scratch, + FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); + And(scratch, scratch, + Operand(1 << SharedFunctionInfo::kBoundFunction)); + Branch(miss, ne, scratch, Operand(zero_reg)); + + // Make sure that the function has an instance prototype. + lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); + And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); + Branch(&non_instance, ne, scratch, Operand(zero_reg)); + } + + // Get the prototype or initial map from the function. + ld(result, + FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); + + // If the prototype or initial map is the hole, don't return it and + // simply miss the cache instead. This will allow us to allocate a + // prototype object on-demand in the runtime system. + LoadRoot(t8, Heap::kTheHoleValueRootIndex); + Branch(miss, eq, result, Operand(t8)); + + // If the function does not have an initial map, we're done. + Label done; + GetObjectType(result, scratch, scratch); + Branch(&done, ne, scratch, Operand(MAP_TYPE)); + + // Get the prototype from the initial map. + ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); + + if (miss_on_bound_function) { + jmp(&done); + + // Non-instance prototype: Fetch prototype from constructor field + // in initial map. + bind(&non_instance); + ld(result, FieldMemOperand(result, Map::kConstructorOffset)); + } + + // All done. + bind(&done); +} + + +void MacroAssembler::GetObjectType(Register object, + Register map, + Register type_reg) { + ld(map, FieldMemOperand(object, HeapObject::kMapOffset)); + lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); +} + + +// ----------------------------------------------------------------------------- +// Runtime calls. + +void MacroAssembler::CallStub(CodeStub* stub, + TypeFeedbackId ast_id, + Condition cond, + Register r1, + const Operand& r2, + BranchDelaySlot bd) { + DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, + cond, r1, r2, bd); +} + + +void MacroAssembler::TailCallStub(CodeStub* stub, + Condition cond, + Register r1, + const Operand& r2, + BranchDelaySlot bd) { + Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd); +} + + +static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { + int64_t offset = (ref0.address() - ref1.address()); + DCHECK(static_cast<int>(offset) == offset); + return static_cast<int>(offset); +} + + +void MacroAssembler::CallApiFunctionAndReturn( + Register function_address, + ExternalReference thunk_ref, + int stack_space, + MemOperand return_value_operand, + MemOperand* context_restore_operand) { + ExternalReference next_address = + ExternalReference::handle_scope_next_address(isolate()); + const int kNextOffset = 0; + const int kLimitOffset = AddressOffset( + ExternalReference::handle_scope_limit_address(isolate()), + next_address); + const int kLevelOffset = AddressOffset( + ExternalReference::handle_scope_level_address(isolate()), + next_address); + + DCHECK(function_address.is(a1) || function_address.is(a2)); + + Label profiler_disabled; + Label end_profiler_check; + li(t9, Operand(ExternalReference::is_profiling_address(isolate()))); + lb(t9, MemOperand(t9, 0)); + Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); + + // Additional parameter is the address of the actual callback. + li(t9, Operand(thunk_ref)); + jmp(&end_profiler_check); + + bind(&profiler_disabled); + mov(t9, function_address); + bind(&end_profiler_check); + + // Allocate HandleScope in callee-save registers. + li(s3, Operand(next_address)); + ld(s0, MemOperand(s3, kNextOffset)); + ld(s1, MemOperand(s3, kLimitOffset)); + ld(s2, MemOperand(s3, kLevelOffset)); + Daddu(s2, s2, Operand(1)); + sd(s2, MemOperand(s3, kLevelOffset)); + + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(1, a0); + li(a0, Operand(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); + PopSafepointRegisters(); + } + + // Native call returns to the DirectCEntry stub which redirects to the + // return address pushed on stack (could have moved after GC). + // DirectCEntry stub itself is generated early and never moves. + DirectCEntryStub stub(isolate()); + stub.GenerateCall(this, t9); + + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(1, a0); + li(a0, Operand(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); + PopSafepointRegisters(); + } + + Label promote_scheduled_exception; + Label exception_handled; + Label delete_allocated_handles; + Label leave_exit_frame; + Label return_value_loaded; + + // Load value from ReturnValue. + ld(v0, return_value_operand); + bind(&return_value_loaded); + + // No more valid handles (the result handle was the last one). Restore + // previous handle scope. + sd(s0, MemOperand(s3, kNextOffset)); + if (emit_debug_code()) { + ld(a1, MemOperand(s3, kLevelOffset)); + Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); + } + Dsubu(s2, s2, Operand(1)); + sd(s2, MemOperand(s3, kLevelOffset)); + ld(at, MemOperand(s3, kLimitOffset)); + Branch(&delete_allocated_handles, ne, s1, Operand(at)); + + // Check if the function scheduled an exception. + bind(&leave_exit_frame); + LoadRoot(a4, Heap::kTheHoleValueRootIndex); + li(at, Operand(ExternalReference::scheduled_exception_address(isolate()))); + ld(a5, MemOperand(at)); + Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); + bind(&exception_handled); + + bool restore_context = context_restore_operand != NULL; + if (restore_context) { + ld(cp, *context_restore_operand); + } + li(s0, Operand(stack_space)); + LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN); + + bind(&promote_scheduled_exception); + { + FrameScope frame(this, StackFrame::INTERNAL); + CallExternalReference( + ExternalReference(Runtime::kPromoteScheduledException, isolate()), + 0); + } + jmp(&exception_handled); + + // HandleScope limit has changed. Delete allocated extensions. + bind(&delete_allocated_handles); + sd(s1, MemOperand(s3, kLimitOffset)); + mov(s0, v0); + mov(a0, v0); + PrepareCallCFunction(1, s1); + li(a0, Operand(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()), + 1); + mov(v0, s0); + jmp(&leave_exit_frame); +} + + +bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { + return has_frame_ || !stub->SometimesSetsUpAFrame(); +} + + +void MacroAssembler::IndexFromHash(Register hash, Register index) { + // If the hash field contains an array index pick it out. The assert checks + // that the constants for the maximum number of digits for an array index + // cached in the hash field and the number of bits reserved for it does not + // conflict. + DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < + (1 << String::kArrayIndexValueBits)); + DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); +} + + +void MacroAssembler::ObjectToDoubleFPURegister(Register object, + FPURegister result, + Register scratch1, + Register scratch2, + Register heap_number_map, + Label* not_number, + ObjectToDoubleFlags flags) { + Label done; + if ((flags & OBJECT_NOT_SMI) == 0) { + Label not_smi; + JumpIfNotSmi(object, ¬_smi); + // Remove smi tag and convert to double. + // dsra(scratch1, object, kSmiTagSize); + dsra32(scratch1, object, 0); + mtc1(scratch1, result); + cvt_d_w(result, result); + Branch(&done); + bind(¬_smi); + } + // Check for heap number and load double value from it. + ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); + Branch(not_number, ne, scratch1, Operand(heap_number_map)); + + if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { + // If exponent is all ones the number is either a NaN or +/-Infinity. + Register exponent = scratch1; + Register mask_reg = scratch2; + lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); + li(mask_reg, HeapNumber::kExponentMask); + + And(exponent, exponent, mask_reg); + Branch(not_number, eq, exponent, Operand(mask_reg)); + } + ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); + bind(&done); +} + + +void MacroAssembler::SmiToDoubleFPURegister(Register smi, + FPURegister value, + Register scratch1) { + // dsra(scratch1, smi, kSmiTagSize); + dsra32(scratch1, smi, 0); + mtc1(scratch1, value); + cvt_d_w(value, value); +} + + +void MacroAssembler::AdduAndCheckForOverflow(Register dst, + Register left, + Register right, + Register overflow_dst, + Register scratch) { + DCHECK(!dst.is(overflow_dst)); + DCHECK(!dst.is(scratch)); + DCHECK(!overflow_dst.is(scratch)); + DCHECK(!overflow_dst.is(left)); + DCHECK(!overflow_dst.is(right)); + + if (left.is(right) && dst.is(left)) { + DCHECK(!dst.is(t9)); + DCHECK(!scratch.is(t9)); + DCHECK(!left.is(t9)); + DCHECK(!right.is(t9)); + DCHECK(!overflow_dst.is(t9)); + mov(t9, right); + right = t9; + } + + if (dst.is(left)) { + mov(scratch, left); // Preserve left. + daddu(dst, left, right); // Left is overwritten. + xor_(scratch, dst, scratch); // Original left. + xor_(overflow_dst, dst, right); + and_(overflow_dst, overflow_dst, scratch); + } else if (dst.is(right)) { + mov(scratch, right); // Preserve right. + daddu(dst, left, right); // Right is overwritten. + xor_(scratch, dst, scratch); // Original right. + xor_(overflow_dst, dst, left); + and_(overflow_dst, overflow_dst, scratch); + } else { + daddu(dst, left, right); + xor_(overflow_dst, dst, left); + xor_(scratch, dst, right); + and_(overflow_dst, scratch, overflow_dst); + } +} + + +void MacroAssembler::SubuAndCheckForOverflow(Register dst, + Register left, + Register right, + Register overflow_dst, + Register scratch) { + DCHECK(!dst.is(overflow_dst)); + DCHECK(!dst.is(scratch)); + DCHECK(!overflow_dst.is(scratch)); + DCHECK(!overflow_dst.is(left)); + DCHECK(!overflow_dst.is(right)); + DCHECK(!scratch.is(left)); + DCHECK(!scratch.is(right)); + + // This happens with some crankshaft code. Since Subu works fine if + // left == right, let's not make that restriction here. + if (left.is(right)) { + mov(dst, zero_reg); + mov(overflow_dst, zero_reg); + return; + } + + if (dst.is(left)) { + mov(scratch, left); // Preserve left. + dsubu(dst, left, right); // Left is overwritten. + xor_(overflow_dst, dst, scratch); // scratch is original left. + xor_(scratch, scratch, right); // scratch is original left. + and_(overflow_dst, scratch, overflow_dst); + } else if (dst.is(right)) { + mov(scratch, right); // Preserve right. + dsubu(dst, left, right); // Right is overwritten. + xor_(overflow_dst, dst, left); + xor_(scratch, left, scratch); // Original right. + and_(overflow_dst, scratch, overflow_dst); + } else { + dsubu(dst, left, right); + xor_(overflow_dst, dst, left); + xor_(scratch, left, right); + and_(overflow_dst, scratch, overflow_dst); + } +} + + +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments, + SaveFPRegsMode save_doubles) { + // All parameters are on the stack. v0 has the return value after call. + + // If the expected number of arguments of the runtime function is + // constant, we check that the actual number of arguments match the + // expectation. + CHECK(f->nargs < 0 || f->nargs == num_arguments); + + // TODO(1236192): Most runtime routines don't need the number of + // arguments passed in because it is constant. At some point we + // should remove this need and make the runtime routine entry code + // smarter. + PrepareCEntryArgs(num_arguments); + PrepareCEntryFunction(ExternalReference(f, isolate())); + CEntryStub stub(isolate(), 1, save_doubles); + CallStub(&stub); +} + + +void MacroAssembler::CallExternalReference(const ExternalReference& ext, + int num_arguments, + BranchDelaySlot bd) { + PrepareCEntryArgs(num_arguments); + PrepareCEntryFunction(ext); + + CEntryStub stub(isolate(), 1); + CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd); +} + + +void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, + int num_arguments, + int result_size) { + // TODO(1236192): Most runtime routines don't need the number of + // arguments passed in because it is constant. At some point we + // should remove this need and make the runtime routine entry code + // smarter. + PrepareCEntryArgs(num_arguments); + JumpToExternalReference(ext); +} + + +void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, + int num_arguments, + int result_size) { + TailCallExternalReference(ExternalReference(fid, isolate()), + num_arguments, + result_size); +} + + +void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, + BranchDelaySlot bd) { + PrepareCEntryFunction(builtin); + CEntryStub stub(isolate(), 1); + Jump(stub.GetCode(), + RelocInfo::CODE_TARGET, + al, + zero_reg, + Operand(zero_reg), + bd); +} + + +void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + // You can't call a builtin without a valid frame. + DCHECK(flag == JUMP_FUNCTION || has_frame()); + + GetBuiltinEntry(t9, id); + if (flag == CALL_FUNCTION) { + call_wrapper.BeforeCall(CallSize(t9)); + Call(t9); + call_wrapper.AfterCall(); + } else { + DCHECK(flag == JUMP_FUNCTION); + Jump(t9); + } +} + + +void MacroAssembler::GetBuiltinFunction(Register target, + Builtins::JavaScript id) { + // Load the builtins object into target register. + ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); + // Load the JavaScript builtin function from the builtins object. + ld(target, FieldMemOperand(target, + JSBuiltinsObject::OffsetOfFunctionWithId(id))); +} + + +void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { + DCHECK(!target.is(a1)); + GetBuiltinFunction(a1, id); + // Load the code entry point from the builtins object. + ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); +} + + +void MacroAssembler::SetCounter(StatsCounter* counter, int value, + Register scratch1, Register scratch2) { + if (FLAG_native_code_counters && counter->Enabled()) { + li(scratch1, Operand(value)); + li(scratch2, Operand(ExternalReference(counter))); + sd(scratch1, MemOperand(scratch2)); + } +} + + +void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, + Register scratch1, Register scratch2) { + DCHECK(value > 0); + if (FLAG_native_code_counters && counter->Enabled()) { + li(scratch2, Operand(ExternalReference(counter))); + ld(scratch1, MemOperand(scratch2)); + Daddu(scratch1, scratch1, Operand(value)); + sd(scratch1, MemOperand(scratch2)); + } +} + + +void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, + Register scratch1, Register scratch2) { + DCHECK(value > 0); + if (FLAG_native_code_counters && counter->Enabled()) { + li(scratch2, Operand(ExternalReference(counter))); + ld(scratch1, MemOperand(scratch2)); + Dsubu(scratch1, scratch1, Operand(value)); + sd(scratch1, MemOperand(scratch2)); + } +} + + +// ----------------------------------------------------------------------------- +// Debugging. + +void MacroAssembler::Assert(Condition cc, BailoutReason reason, + Register rs, Operand rt) { + if (emit_debug_code()) + Check(cc, reason, rs, rt); +} + + +void MacroAssembler::AssertFastElements(Register elements) { + if (emit_debug_code()) { + DCHECK(!elements.is(at)); + Label ok; + push(elements); + ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); + LoadRoot(at, Heap::kFixedArrayMapRootIndex); + Branch(&ok, eq, elements, Operand(at)); + LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); + Branch(&ok, eq, elements, Operand(at)); + LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); + Branch(&ok, eq, elements, Operand(at)); + Abort(kJSObjectWithFastElementsMapHasSlowElements); + bind(&ok); + pop(elements); + } +} + + +void MacroAssembler::Check(Condition cc, BailoutReason reason, + Register rs, Operand rt) { + Label L; + Branch(&L, cc, rs, rt); + Abort(reason); + // Will not return here. + bind(&L); +} + + +void MacroAssembler::Abort(BailoutReason reason) { + Label abort_start; + bind(&abort_start); +#ifdef DEBUG + const char* msg = GetBailoutReason(reason); + if (msg != NULL) { + RecordComment("Abort message: "); + RecordComment(msg); + } + + if (FLAG_trap_on_abort) { + stop(msg); + return; + } +#endif + + li(a0, Operand(Smi::FromInt(reason))); + push(a0); + // Disable stub call restrictions to always allow calls to abort. + if (!has_frame_) { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(this, StackFrame::NONE); + CallRuntime(Runtime::kAbort, 1); + } else { + CallRuntime(Runtime::kAbort, 1); + } + // Will not return here. + if (is_trampoline_pool_blocked()) { + // If the calling code cares about the exact number of + // instructions generated, we insert padding here to keep the size + // of the Abort macro constant. + // Currently in debug mode with debug_code enabled the number of + // generated instructions is 10, so we use this as a maximum value. + static const int kExpectedAbortInstructions = 10; + int abort_instructions = InstructionsGeneratedSince(&abort_start); + DCHECK(abort_instructions <= kExpectedAbortInstructions); + while (abort_instructions++ < kExpectedAbortInstructions) { + nop(); + } + } +} + + +void MacroAssembler::LoadContext(Register dst, int context_chain_length) { + if (context_chain_length > 0) { + // Move up the chain of contexts to the context containing the slot. + ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); + for (int i = 1; i < context_chain_length; i++) { + ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); + } + } else { + // Slot is in the current function context. Move it into the + // destination register in case we store into it (the write barrier + // cannot be allowed to destroy the context in esi). + Move(dst, cp); + } +} + + +void MacroAssembler::LoadTransitionedArrayMapConditional( + ElementsKind expected_kind, + ElementsKind transitioned_kind, + Register map_in_out, + Register scratch, + Label* no_map_match) { + // Load the global or builtins object from the current context. + ld(scratch, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); + + // Check that the function's map is the same as the expected cached map. + ld(scratch, + MemOperand(scratch, + Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); + size_t offset = expected_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + ld(at, FieldMemOperand(scratch, offset)); + Branch(no_map_match, ne, map_in_out, Operand(at)); + + // Use the transitioned cached map. + offset = transitioned_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + ld(map_in_out, FieldMemOperand(scratch, offset)); +} + + +void MacroAssembler::LoadGlobalFunction(int index, Register function) { + // Load the global or builtins object from the current context. + ld(function, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + // Load the native context from the global or builtins object. + ld(function, FieldMemOperand(function, + GlobalObject::kNativeContextOffset)); + // Load the function from the native context. + ld(function, MemOperand(function, Context::SlotOffset(index))); +} + + +void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, + Register map, + Register scratch) { + // Load the initial map. The global functions all have initial maps. + ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); + if (emit_debug_code()) { + Label ok, fail; + CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); + Branch(&ok); + bind(&fail); + Abort(kGlobalFunctionsMustHaveInitialMap); + bind(&ok); + } +} + + +void MacroAssembler::StubPrologue() { + Push(ra, fp, cp); + Push(Smi::FromInt(StackFrame::STUB)); + // Adjust FP to point to saved FP. + Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); +} + + +void MacroAssembler::Prologue(bool code_pre_aging) { + PredictableCodeSizeScope predictible_code_size_scope( + this, kNoCodeAgeSequenceLength); + // The following three instructions must remain together and unmodified + // for code aging to work properly. + if (code_pre_aging) { + // Pre-age the code. + Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); + nop(Assembler::CODE_AGE_MARKER_NOP); + // Load the stub address to t9 and call it, + // GetCodeAgeAndParity() extracts the stub address from this instruction. + li(t9, + Operand(reinterpret_cast<uint64_t>(stub->instruction_start())), + ADDRESS_LOAD); + nop(); // Prevent jalr to jal optimization. + jalr(t9, a0); + nop(); // Branch delay slot nop. + nop(); // Pad the empty space. + } else { + Push(ra, fp, cp, a1); + nop(Assembler::CODE_AGE_SEQUENCE_NOP); + nop(Assembler::CODE_AGE_SEQUENCE_NOP); + nop(Assembler::CODE_AGE_SEQUENCE_NOP); + // Adjust fp to point to caller's fp. + Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); + } +} + + +void MacroAssembler::EnterFrame(StackFrame::Type type) { + daddiu(sp, sp, -5 * kPointerSize); + li(t8, Operand(Smi::FromInt(type))); + li(t9, Operand(CodeObject()), CONSTANT_SIZE); + sd(ra, MemOperand(sp, 4 * kPointerSize)); + sd(fp, MemOperand(sp, 3 * kPointerSize)); + sd(cp, MemOperand(sp, 2 * kPointerSize)); + sd(t8, MemOperand(sp, 1 * kPointerSize)); + sd(t9, MemOperand(sp, 0 * kPointerSize)); + // Adjust FP to point to saved FP. + Daddu(fp, sp, + Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); +} + + +void MacroAssembler::LeaveFrame(StackFrame::Type type) { + mov(sp, fp); + ld(fp, MemOperand(sp, 0 * kPointerSize)); + ld(ra, MemOperand(sp, 1 * kPointerSize)); + daddiu(sp, sp, 2 * kPointerSize); +} + + +void MacroAssembler::EnterExitFrame(bool save_doubles, + int stack_space) { + // Set up the frame structure on the stack. + STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); + STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); + STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); + + // This is how the stack will look: + // fp + 2 (==kCallerSPDisplacement) - old stack's end + // [fp + 1 (==kCallerPCOffset)] - saved old ra + // [fp + 0 (==kCallerFPOffset)] - saved old fp + // [fp - 1 (==kSPOffset)] - sp of the called function + // [fp - 2 (==kCodeOffset)] - CodeObject + // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the + // new stack (will contain saved ra) + + // Save registers. + daddiu(sp, sp, -4 * kPointerSize); + sd(ra, MemOperand(sp, 3 * kPointerSize)); + sd(fp, MemOperand(sp, 2 * kPointerSize)); + daddiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer. + + if (emit_debug_code()) { + sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); + } + + // Accessed from ExitFrame::code_slot. + li(t8, Operand(CodeObject()), CONSTANT_SIZE); + sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); + + // Save the frame pointer and the context in top. + li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); + sd(fp, MemOperand(t8)); + li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); + sd(cp, MemOperand(t8)); + + const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); + if (save_doubles) { + // The stack is already aligned to 0 modulo 8 for stores with sdc1. + int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2; + int space = kNumOfSavedRegisters * kDoubleSize ; + Dsubu(sp, sp, Operand(space)); + // Remember: we only need to save every 2nd double FPU value. + for (int i = 0; i < kNumOfSavedRegisters; i++) { + FPURegister reg = FPURegister::from_code(2 * i); + sdc1(reg, MemOperand(sp, i * kDoubleSize)); + } + } + + // Reserve place for the return address, stack space and an optional slot + // (used by the DirectCEntryStub to hold the return value if a struct is + // returned) and align the frame preparing for calling the runtime function. + DCHECK(stack_space >= 0); + Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize)); + if (frame_alignment > 0) { + DCHECK(IsPowerOf2(frame_alignment)); + And(sp, sp, Operand(-frame_alignment)); // Align stack. + } + + // Set the exit frame sp value to point just before the return address + // location. + daddiu(at, sp, kPointerSize); + sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); +} + + +void MacroAssembler::LeaveExitFrame(bool save_doubles, + Register argument_count, + bool restore_context, + bool do_return) { + // Optionally restore all double registers. + if (save_doubles) { + // Remember: we only need to restore every 2nd double FPU value. + int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2; + Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize + + kNumOfSavedRegisters * kDoubleSize)); + for (int i = 0; i < kNumOfSavedRegisters; i++) { + FPURegister reg = FPURegister::from_code(2 * i); + ldc1(reg, MemOperand(t8, i * kDoubleSize)); + } + } + + // Clear top frame. + li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); + sd(zero_reg, MemOperand(t8)); + + // Restore current context from top and clear it in debug mode. + if (restore_context) { + li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); + ld(cp, MemOperand(t8)); + } +#ifdef DEBUG + li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); + sd(a3, MemOperand(t8)); +#endif + + // Pop the arguments, restore registers, and return. + mov(sp, fp); // Respect ABI stack constraint. + ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); + ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); + + if (argument_count.is_valid()) { + dsll(t8, argument_count, kPointerSizeLog2); + daddu(sp, sp, t8); + } + + if (do_return) { + Ret(USE_DELAY_SLOT); + // If returning, the instruction in the delay slot will be the addiu below. + } + daddiu(sp, sp, 2 * kPointerSize); +} + + +void MacroAssembler::InitializeNewString(Register string, + Register length, + Heap::RootListIndex map_index, + Register scratch1, + Register scratch2) { + // dsll(scratch1, length, kSmiTagSize); + dsll32(scratch1, length, 0); + LoadRoot(scratch2, map_index); + sd(scratch1, FieldMemOperand(string, String::kLengthOffset)); + li(scratch1, Operand(String::kEmptyHashField)); + sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); + sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); +} + + +int MacroAssembler::ActivationFrameAlignment() { +#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 + // Running on the real platform. Use the alignment as mandated by the local + // environment. + // Note: This will break if we ever start generating snapshots on one Mips + // platform for another Mips platform with a different alignment. + return base::OS::ActivationFrameAlignment(); +#else // V8_HOST_ARCH_MIPS + // If we are using the simulator then we should always align to the expected + // alignment. As the simulator is used to generate snapshots we do not know + // if the target platform will need alignment, so this is controlled from a + // flag. + return FLAG_sim_stack_alignment; +#endif // V8_HOST_ARCH_MIPS +} + + +void MacroAssembler::AssertStackIsAligned() { + if (emit_debug_code()) { + const int frame_alignment = ActivationFrameAlignment(); + const int frame_alignment_mask = frame_alignment - 1; + + if (frame_alignment > kPointerSize) { + Label alignment_as_expected; + DCHECK(IsPowerOf2(frame_alignment)); + andi(at, sp, frame_alignment_mask); + Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); + // Don't use Check here, as it will call Runtime_Abort re-entering here. + stop("Unexpected stack alignment"); + bind(&alignment_as_expected); + } + } +} + + +void MacroAssembler::JumpIfNotPowerOfTwoOrZero( + Register reg, + Register scratch, + Label* not_power_of_two_or_zero) { + Dsubu(scratch, reg, Operand(1)); + Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt, + scratch, Operand(zero_reg)); + and_(at, scratch, reg); // In the delay slot. + Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg)); +} + + +void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { + DCHECK(!reg.is(overflow)); + mov(overflow, reg); // Save original value. + SmiTag(reg); + xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0. +} + + +void MacroAssembler::SmiTagCheckOverflow(Register dst, + Register src, + Register overflow) { + if (dst.is(src)) { + // Fall back to slower case. + SmiTagCheckOverflow(dst, overflow); + } else { + DCHECK(!dst.is(src)); + DCHECK(!dst.is(overflow)); + DCHECK(!src.is(overflow)); + SmiTag(dst, src); + xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0. + } +} + + +void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) { + if (SmiValuesAre32Bits()) { + lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); + } else { + lw(dst, src); + SmiUntag(dst); + } +} + + +void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) { + if (SmiValuesAre32Bits()) { + // TODO(plind): not clear if lw or ld faster here, need micro-benchmark. + lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); + dsll(dst, dst, scale); + } else { + lw(dst, src); + DCHECK(scale >= kSmiTagSize); + sll(dst, dst, scale - kSmiTagSize); + } +} + + +// Returns 2 values: the Smi and a scaled version of the int within the Smi. +void MacroAssembler::SmiLoadWithScale(Register d_smi, + Register d_scaled, + MemOperand src, + int scale) { + if (SmiValuesAre32Bits()) { + ld(d_smi, src); + dsra(d_scaled, d_smi, kSmiShift - scale); + } else { + lw(d_smi, src); + DCHECK(scale >= kSmiTagSize); + sll(d_scaled, d_smi, scale - kSmiTagSize); + } +} + + +// Returns 2 values: the untagged Smi (int32) and scaled version of that int. +void MacroAssembler::SmiLoadUntagWithScale(Register d_int, + Register d_scaled, + MemOperand src, + int scale) { + if (SmiValuesAre32Bits()) { + lw(d_int, UntagSmiMemOperand(src.rm(), src.offset())); + dsll(d_scaled, d_int, scale); + } else { + lw(d_int, src); + // Need both the int and the scaled in, so use two instructions. + SmiUntag(d_int); + sll(d_scaled, d_int, scale); + } +} + + +void MacroAssembler::UntagAndJumpIfSmi(Register dst, + Register src, + Label* smi_case) { + // DCHECK(!dst.is(src)); + JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT); + SmiUntag(dst, src); +} + + +void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, + Register src, + Label* non_smi_case) { + // DCHECK(!dst.is(src)); + JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT); + SmiUntag(dst, src); +} + +void MacroAssembler::JumpIfSmi(Register value, + Label* smi_label, + Register scratch, + BranchDelaySlot bd) { + DCHECK_EQ(0, kSmiTag); + andi(scratch, value, kSmiTagMask); + Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); +} + +void MacroAssembler::JumpIfNotSmi(Register value, + Label* not_smi_label, + Register scratch, + BranchDelaySlot bd) { + DCHECK_EQ(0, kSmiTag); + andi(scratch, value, kSmiTagMask); + Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); +} + + +void MacroAssembler::JumpIfNotBothSmi(Register reg1, + Register reg2, + Label* on_not_both_smi) { + STATIC_ASSERT(kSmiTag == 0); + // TODO(plind): Find some better to fix this assert issue. +#if defined(__APPLE__) + DCHECK_EQ(1, kSmiTagMask); +#else + DCHECK_EQ((uint64_t)1, kSmiTagMask); +#endif + or_(at, reg1, reg2); + JumpIfNotSmi(at, on_not_both_smi); +} + + +void MacroAssembler::JumpIfEitherSmi(Register reg1, + Register reg2, + Label* on_either_smi) { + STATIC_ASSERT(kSmiTag == 0); + // TODO(plind): Find some better to fix this assert issue. +#if defined(__APPLE__) + DCHECK_EQ(1, kSmiTagMask); +#else + DCHECK_EQ((uint64_t)1, kSmiTagMask); +#endif + // Both Smi tags must be 1 (not Smi). + and_(at, reg1, reg2); + JumpIfSmi(at, on_either_smi); +} + + +void MacroAssembler::AssertNotSmi(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + andi(at, object, kSmiTagMask); + Check(ne, kOperandIsASmi, at, Operand(zero_reg)); + } +} + + +void MacroAssembler::AssertSmi(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + andi(at, object, kSmiTagMask); + Check(eq, kOperandIsASmi, at, Operand(zero_reg)); + } +} + + +void MacroAssembler::AssertString(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + SmiTst(object, a4); + Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg)); + push(object); + ld(object, FieldMemOperand(object, HeapObject::kMapOffset)); + lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset)); + Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE)); + pop(object); + } +} + + +void MacroAssembler::AssertName(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + SmiTst(object, a4); + Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg)); + push(object); + ld(object, FieldMemOperand(object, HeapObject::kMapOffset)); + lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset)); + Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE)); + pop(object); + } +} + + +void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, + Register scratch) { + if (emit_debug_code()) { + Label done_checking; + AssertNotSmi(object); + LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + Branch(&done_checking, eq, object, Operand(scratch)); + push(object); + ld(object, FieldMemOperand(object, HeapObject::kMapOffset)); + LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex); + Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch)); + pop(object); + bind(&done_checking); + } +} + + +void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { + if (emit_debug_code()) { + DCHECK(!reg.is(at)); + LoadRoot(at, index); + Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at)); + } +} + + +void MacroAssembler::JumpIfNotHeapNumber(Register object, + Register heap_number_map, + Register scratch, + Label* on_not_heap_number) { + ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); + AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map)); +} + + +void MacroAssembler::LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch3; + + // Load the number string cache. + LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); + + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); + // Divide length by two (length is a smi). + // dsra(mask, mask, kSmiTagSize + 1); + dsra32(mask, mask, 1); + Daddu(mask, mask, -1); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label is_smi; + Label load_result_from_cache; + JumpIfSmi(object, &is_smi); + CheckMap(object, + scratch1, + Heap::kHeapNumberMapRootIndex, + not_found, + DONT_DO_SMI_CHECK); + + STATIC_ASSERT(8 == kDoubleSize); + Daddu(scratch1, + object, + Operand(HeapNumber::kValueOffset - kHeapObjectTag)); + ld(scratch2, MemOperand(scratch1, kPointerSize)); + ld(scratch1, MemOperand(scratch1, 0)); + Xor(scratch1, scratch1, Operand(scratch2)); + And(scratch1, scratch1, Operand(mask)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + dsll(scratch1, scratch1, kPointerSizeLog2 + 1); + Daddu(scratch1, number_string_cache, scratch1); + + Register probe = mask; + ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + JumpIfSmi(probe, not_found); + ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); + ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); + BranchF(&load_result_from_cache, NULL, eq, f12, f14); + Branch(not_found); + + bind(&is_smi); + Register scratch = scratch1; + // dsra(scratch, object, 1); // Shift away the tag. + dsra32(scratch, scratch, 0); + And(scratch, mask, Operand(scratch)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + dsll(scratch, scratch, kPointerSizeLog2 + 1); + Daddu(scratch, number_string_cache, scratch); + + // Check if the entry is the smi we are looking for. + ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + Branch(not_found, ne, object, Operand(probe)); + + // Get the result from the cache. + bind(&load_result_from_cache); + ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); + + IncrementCounter(isolate()->counters()->number_to_string_native(), + 1, + scratch1, + scratch2); +} + + +void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( + Register first, + Register second, + Register scratch1, + Register scratch2, + Label* failure) { + // Test that both first and second are sequential ASCII strings. + // Assume that they are non-smis. + ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); + ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); + lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); + lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); + + JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, + scratch2, + scratch1, + scratch2, + failure); +} + + +void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, + Register second, + Register scratch1, + Register scratch2, + Label* failure) { + // Check that neither is a smi. + STATIC_ASSERT(kSmiTag == 0); + And(scratch1, first, Operand(second)); + JumpIfSmi(scratch1, failure); + JumpIfNonSmisNotBothSequentialAsciiStrings(first, + second, + scratch1, + scratch2, + failure); +} + + +void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( + Register first, + Register second, + Register scratch1, + Register scratch2, + Label* failure) { + const int kFlatAsciiStringMask = + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; + const int kFlatAsciiStringTag = + kStringTag | kOneByteStringTag | kSeqStringTag; + DCHECK(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed. + andi(scratch1, first, kFlatAsciiStringMask); + Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag)); + andi(scratch2, second, kFlatAsciiStringMask); + Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag)); +} + + +void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, + Register scratch, + Label* failure) { + const int kFlatAsciiStringMask = + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; + const int kFlatAsciiStringTag = + kStringTag | kOneByteStringTag | kSeqStringTag; + And(scratch, type, Operand(kFlatAsciiStringMask)); + Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag)); +} + + +static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4; + +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments) { + int stack_passed_words = 0; + num_reg_arguments += 2 * num_double_arguments; + + // O32: Up to four simple arguments are passed in registers a0..a3. + // N64: Up to eight simple arguments are passed in registers a0..a7. + if (num_reg_arguments > kRegisterPassedArguments) { + stack_passed_words += num_reg_arguments - kRegisterPassedArguments; + } + stack_passed_words += kCArgSlotCount; + return stack_passed_words; +} + + +void MacroAssembler::EmitSeqStringSetCharCheck(Register string, + Register index, + Register value, + Register scratch, + uint32_t encoding_mask) { + Label is_object; + SmiTst(string, at); + Check(ne, kNonObject, at, Operand(zero_reg)); + + ld(at, FieldMemOperand(string, HeapObject::kMapOffset)); + lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset)); + + andi(at, at, kStringRepresentationMask | kStringEncodingMask); + li(scratch, Operand(encoding_mask)); + Check(eq, kUnexpectedStringType, at, Operand(scratch)); + + // TODO(plind): requires Smi size check code for mips32. + + ld(at, FieldMemOperand(string, String::kLengthOffset)); + Check(lt, kIndexIsTooLarge, index, Operand(at)); + + DCHECK(Smi::FromInt(0) == 0); + Check(ge, kIndexIsNegative, index, Operand(zero_reg)); +} + + +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, + int num_double_arguments, + Register scratch) { + int frame_alignment = ActivationFrameAlignment(); + + // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots. + // O32: Up to four simple arguments are passed in registers a0..a3. + // Those four arguments must have reserved argument slots on the stack for + // mips, even though those argument slots are not normally used. + // Both ABIs: Remaining arguments are pushed on the stack, above (higher + // address than) the (O32) argument slots. (arg slot calculation handled by + // CalculateStackPassedWords()). + int stack_passed_arguments = CalculateStackPassedWords( + num_reg_arguments, num_double_arguments); + if (frame_alignment > kPointerSize) { + // Make stack end at alignment and make room for num_arguments - 4 words + // and the original value of sp. + mov(scratch, sp); + Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); + DCHECK(IsPowerOf2(frame_alignment)); + And(sp, sp, Operand(-frame_alignment)); + sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); + } else { + Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); + } +} + + +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, + Register scratch) { + PrepareCallCFunction(num_reg_arguments, 0, scratch); +} + + +void MacroAssembler::CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments) { + li(t8, Operand(function)); + CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments); +} + + +void MacroAssembler::CallCFunction(Register function, + int num_reg_arguments, + int num_double_arguments) { + CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); +} + + +void MacroAssembler::CallCFunction(ExternalReference function, + int num_arguments) { + CallCFunction(function, num_arguments, 0); +} + + +void MacroAssembler::CallCFunction(Register function, + int num_arguments) { + CallCFunction(function, num_arguments, 0); +} + + +void MacroAssembler::CallCFunctionHelper(Register function, + int num_reg_arguments, + int num_double_arguments) { + DCHECK(has_frame()); + // Make sure that the stack is aligned before calling a C function unless + // running in the simulator. The simulator has its own alignment check which + // provides more information. + // The argument stots are presumed to have been set up by + // PrepareCallCFunction. The C function must be called via t9, for mips ABI. + +#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 + if (emit_debug_code()) { + int frame_alignment = base::OS::ActivationFrameAlignment(); + int frame_alignment_mask = frame_alignment - 1; + if (frame_alignment > kPointerSize) { + DCHECK(IsPowerOf2(frame_alignment)); + Label alignment_as_expected; + And(at, sp, Operand(frame_alignment_mask)); + Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); + // Don't use Check here, as it will call Runtime_Abort possibly + // re-entering here. + stop("Unexpected alignment in CallCFunction"); + bind(&alignment_as_expected); + } + } +#endif // V8_HOST_ARCH_MIPS + + // Just call directly. The function called cannot cause a GC, or + // allow preemption, so the return address in the link register + // stays correct. + + if (!function.is(t9)) { + mov(t9, function); + function = t9; + } + + Call(function); + + int stack_passed_arguments = CalculateStackPassedWords( + num_reg_arguments, num_double_arguments); + + if (base::OS::ActivationFrameAlignment() > kPointerSize) { + ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); + } else { + Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); + } +} + + +#undef BRANCH_ARGS_CHECK + + +void MacroAssembler::PatchRelocatedValue(Register li_location, + Register scratch, + Register new_value) { + lwu(scratch, MemOperand(li_location)); + // At this point scratch is a lui(at, ...) instruction. + if (emit_debug_code()) { + And(scratch, scratch, kOpcodeMask); + Check(eq, kTheInstructionToPatchShouldBeALui, + scratch, Operand(LUI)); + lwu(scratch, MemOperand(li_location)); + } + dsrl32(t9, new_value, 0); + Ins(scratch, t9, 0, kImm16Bits); + sw(scratch, MemOperand(li_location)); + + lwu(scratch, MemOperand(li_location, kInstrSize)); + // scratch is now ori(at, ...). + if (emit_debug_code()) { + And(scratch, scratch, kOpcodeMask); + Check(eq, kTheInstructionToPatchShouldBeAnOri, + scratch, Operand(ORI)); + lwu(scratch, MemOperand(li_location, kInstrSize)); + } + dsrl(t9, new_value, kImm16Bits); + Ins(scratch, t9, 0, kImm16Bits); + sw(scratch, MemOperand(li_location, kInstrSize)); + + lwu(scratch, MemOperand(li_location, kInstrSize * 3)); + // scratch is now ori(at, ...). + if (emit_debug_code()) { + And(scratch, scratch, kOpcodeMask); + Check(eq, kTheInstructionToPatchShouldBeAnOri, + scratch, Operand(ORI)); + lwu(scratch, MemOperand(li_location, kInstrSize * 3)); + } + + Ins(scratch, new_value, 0, kImm16Bits); + sw(scratch, MemOperand(li_location, kInstrSize * 3)); + + // Update the I-cache so the new lui and ori can be executed. + FlushICache(li_location, 4); +} + +void MacroAssembler::GetRelocatedValue(Register li_location, + Register value, + Register scratch) { + lwu(value, MemOperand(li_location)); + if (emit_debug_code()) { + And(value, value, kOpcodeMask); + Check(eq, kTheInstructionShouldBeALui, + value, Operand(LUI)); + lwu(value, MemOperand(li_location)); + } + + // value now holds a lui instruction. Extract the immediate. + andi(value, value, kImm16Mask); + dsll32(value, value, kImm16Bits); + + lwu(scratch, MemOperand(li_location, kInstrSize)); + if (emit_debug_code()) { + And(scratch, scratch, kOpcodeMask); + Check(eq, kTheInstructionShouldBeAnOri, + scratch, Operand(ORI)); + lwu(scratch, MemOperand(li_location, kInstrSize)); + } + // "scratch" now holds an ori instruction. Extract the immediate. + andi(scratch, scratch, kImm16Mask); + dsll32(scratch, scratch, 0); + + or_(value, value, scratch); + + lwu(scratch, MemOperand(li_location, kInstrSize * 3)); + if (emit_debug_code()) { + And(scratch, scratch, kOpcodeMask); + Check(eq, kTheInstructionShouldBeAnOri, + scratch, Operand(ORI)); + lwu(scratch, MemOperand(li_location, kInstrSize * 3)); + } + // "scratch" now holds an ori instruction. Extract the immediate. + andi(scratch, scratch, kImm16Mask); + dsll(scratch, scratch, kImm16Bits); + + or_(value, value, scratch); + // Sign extend extracted address. + dsra(value, value, kImm16Bits); +} + + +void MacroAssembler::CheckPageFlag( + Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met) { + And(scratch, object, Operand(~Page::kPageAlignmentMask)); + ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); + And(scratch, scratch, Operand(mask)); + Branch(condition_met, cc, scratch, Operand(zero_reg)); +} + + +void MacroAssembler::CheckMapDeprecated(Handle<Map> map, + Register scratch, + Label* if_deprecated) { + if (map->CanBeDeprecated()) { + li(scratch, Operand(map)); + ld(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); + And(scratch, scratch, Operand(Map::Deprecated::kMask)); + Branch(if_deprecated, ne, scratch, Operand(zero_reg)); + } +} + + +void MacroAssembler::JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black) { + HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); +} + + +void MacroAssembler::HasColor(Register object, + Register bitmap_scratch, + Register mask_scratch, + Label* has_color, + int first_bit, + int second_bit) { + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8)); + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9)); + + GetMarkBits(object, bitmap_scratch, mask_scratch); + + Label other_color; + // Note that we are using a 4-byte aligned 8-byte load. + Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + And(t8, t9, Operand(mask_scratch)); + Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg)); + // Shift left 1 by adding. + Daddu(mask_scratch, mask_scratch, Operand(mask_scratch)); + And(t8, t9, Operand(mask_scratch)); + Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg)); + + bind(&other_color); +} + + +// Detect some, but not all, common pointer-free objects. This is used by the +// incremental write barrier which doesn't care about oddballs (they are always +// marked black immediately so this code is not hit). +void MacroAssembler::JumpIfDataObject(Register value, + Register scratch, + Label* not_data_object) { + DCHECK(!AreAliased(value, scratch, t8, no_reg)); + Label is_data_object; + ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); + LoadRoot(t8, Heap::kHeapNumberMapRootIndex); + Branch(&is_data_object, eq, t8, Operand(scratch)); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); + Branch(not_data_object, ne, t8, Operand(zero_reg)); + bind(&is_data_object); +} + + +void MacroAssembler::GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg) { + DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); + // addr_reg is divided into fields: + // |63 page base 20|19 high 8|7 shift 3|2 0| + // 'high' gives the index of the cell holding color bits for the object. + // 'shift' gives the offset in the cell for this object's color. + And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); + Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); + const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; + Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits); + dsll(t8, t8, Bitmap::kBytesPerCellLog2); + Daddu(bitmap_reg, bitmap_reg, t8); + li(t8, Operand(1)); + dsllv(mask_reg, t8, mask_reg); +} + + +void MacroAssembler::EnsureNotWhite( + Register value, + Register bitmap_scratch, + Register mask_scratch, + Register load_scratch, + Label* value_is_white_and_not_data) { + DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); + GetMarkBits(value, bitmap_scratch, mask_scratch); + + // If the value is black or grey we don't need to do anything. + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + Label done; + + // Since both black and grey have a 1 in the first position and white does + // not have a 1 there we only need to check one bit. + // Note that we are using a 4-byte aligned 8-byte load. + Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + And(t8, mask_scratch, load_scratch); + Branch(&done, ne, t8, Operand(zero_reg)); + + if (emit_debug_code()) { + // Check for impossible bit pattern. + Label ok; + // sll may overflow, making the check conservative. + dsll(t8, mask_scratch, 1); + And(t8, load_scratch, t8); + Branch(&ok, eq, t8, Operand(zero_reg)); + stop("Impossible marking bit pattern"); + bind(&ok); + } + + // Value is white. We check whether it is data that doesn't need scanning. + // Currently only checks for HeapNumber and non-cons strings. + Register map = load_scratch; // Holds map while checking type. + Register length = load_scratch; // Holds length of object after testing type. + Label is_data_object; + + // Check for heap-number + ld(map, FieldMemOperand(value, HeapObject::kMapOffset)); + LoadRoot(t8, Heap::kHeapNumberMapRootIndex); + { + Label skip; + Branch(&skip, ne, t8, Operand(map)); + li(length, HeapNumber::kSize); + Branch(&is_data_object); + bind(&skip); + } + + // Check for strings. + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + Register instance_type = load_scratch; + lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); + And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); + Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg)); + // It's a non-indirect (non-cons and non-slice) string. + // If it's external, the length is just ExternalString::kSize. + // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). + // External strings are the only ones with the kExternalStringTag bit + // set. + DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); + DCHECK_EQ(0, kConsStringTag & kExternalStringTag); + And(t8, instance_type, Operand(kExternalStringTag)); + { + Label skip; + Branch(&skip, eq, t8, Operand(zero_reg)); + li(length, ExternalString::kSize); + Branch(&is_data_object); + bind(&skip); + } + + // Sequential string, either ASCII or UC16. + // For ASCII (char-size of 1) we shift the smi tag away to get the length. + // For UC16 (char-size of 2) we just leave the smi tag in place, thereby + // getting the length multiplied by 2. + DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); + DCHECK(kSmiTag == 0 && kSmiTagSize == 1); + lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset)); + And(t8, instance_type, Operand(kStringEncodingMask)); + { + Label skip; + Branch(&skip, ne, t8, Operand(zero_reg)); + // Adjust length for UC16. + dsll(t9, t9, 1); + bind(&skip); + } + Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); + DCHECK(!length.is(t8)); + And(length, length, Operand(~kObjectAlignmentMask)); + + bind(&is_data_object); + // Value is a data object, and it is white. Mark it black. Since we know + // that the object is white we can make it black by flipping one bit. + Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + Or(t8, t8, Operand(mask_scratch)); + Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + + And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); + Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + Daddu(t8, t8, Operand(length)); + Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + + bind(&done); +} + + +void MacroAssembler::LoadInstanceDescriptors(Register map, + Register descriptors) { + ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); +} + + +void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { + ld(dst, FieldMemOperand(map, Map::kBitField3Offset)); + DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); +} + + +void MacroAssembler::EnumLength(Register dst, Register map) { + STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); + ld(dst, FieldMemOperand(map, Map::kBitField3Offset)); + And(dst, dst, Operand(Map::EnumLengthBits::kMask)); + SmiTag(dst); +} + + +void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { + Register empty_fixed_array_value = a6; + LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); + Label next, start; + mov(a2, a0); + + // Check if the enum length field is properly initialized, indicating that + // there is an enum cache. + ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset)); + + EnumLength(a3, a1); + Branch( + call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel))); + + jmp(&start); + + bind(&next); + ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset)); + + // For all objects but the receiver, check that the cache is empty. + EnumLength(a3, a1); + Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0))); + + bind(&start); + + // Check that there are no elements. Register a2 contains the current JS + // object we've reached through the prototype chain. + Label no_elements; + ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset)); + Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value)); + + // Second chance, the object may be using the empty slow element dictionary. + LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex); + Branch(call_runtime, ne, a2, Operand(at)); + + bind(&no_elements); + ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset)); + Branch(&next, ne, a2, Operand(null_value)); +} + + +void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { + DCHECK(!output_reg.is(input_reg)); + Label done; + li(output_reg, Operand(255)); + // Normal branch: nop in delay slot. + Branch(&done, gt, input_reg, Operand(output_reg)); + // Use delay slot in this branch. + Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg)); + mov(output_reg, zero_reg); // In delay slot. + mov(output_reg, input_reg); // Value is in range 0..255. + bind(&done); +} + + +void MacroAssembler::ClampDoubleToUint8(Register result_reg, + DoubleRegister input_reg, + DoubleRegister temp_double_reg) { + Label above_zero; + Label done; + Label in_bounds; + + Move(temp_double_reg, 0.0); + BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg); + + // Double value is less than zero, NaN or Inf, return 0. + mov(result_reg, zero_reg); + Branch(&done); + + // Double value is >= 255, return 255. + bind(&above_zero); + Move(temp_double_reg, 255.0); + BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg); + li(result_reg, Operand(255)); + Branch(&done); + + // In 0-255 range, round and truncate. + bind(&in_bounds); + cvt_w_d(temp_double_reg, input_reg); + mfc1(result_reg, temp_double_reg); + bind(&done); +} + + +void MacroAssembler::TestJSArrayForAllocationMemento( + Register receiver_reg, + Register scratch_reg, + Label* no_memento_found, + Condition cond, + Label* allocation_memento_present) { + ExternalReference new_space_start = + ExternalReference::new_space_start(isolate()); + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(isolate()); + Daddu(scratch_reg, receiver_reg, + Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); + Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start)); + li(at, Operand(new_space_allocation_top)); + ld(at, MemOperand(at)); + Branch(no_memento_found, gt, scratch_reg, Operand(at)); + ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); + if (allocation_memento_present) { + Branch(allocation_memento_present, cond, scratch_reg, + Operand(isolate()->factory()->allocation_memento_map())); + } +} + + +Register GetRegisterThatIsNotOneOf(Register reg1, + Register reg2, + Register reg3, + Register reg4, + Register reg5, + Register reg6) { + RegList regs = 0; + if (reg1.is_valid()) regs |= reg1.bit(); + if (reg2.is_valid()) regs |= reg2.bit(); + if (reg3.is_valid()) regs |= reg3.bit(); + if (reg4.is_valid()) regs |= reg4.bit(); + if (reg5.is_valid()) regs |= reg5.bit(); + if (reg6.is_valid()) regs |= reg6.bit(); + + for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { + Register candidate = Register::FromAllocationIndex(i); + if (regs & candidate.bit()) continue; + return candidate; + } + UNREACHABLE(); + return no_reg; +} + + +void MacroAssembler::JumpIfDictionaryInPrototypeChain( + Register object, + Register scratch0, + Register scratch1, + Label* found) { + DCHECK(!scratch1.is(scratch0)); + Factory* factory = isolate()->factory(); + Register current = scratch0; + Label loop_again; + + // Scratch contained elements pointer. + Move(current, object); + + // Loop based on the map going up the prototype chain. + bind(&loop_again); + ld(current, FieldMemOperand(current, HeapObject::kMapOffset)); + lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); + DecodeField<Map::ElementsKindBits>(scratch1); + Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS)); + ld(current, FieldMemOperand(current, Map::kPrototypeOffset)); + Branch(&loop_again, ne, current, Operand(factory->null_value())); +} + + +bool AreAliased(Register reg1, + Register reg2, + Register reg3, + Register reg4, + Register reg5, + Register reg6, + Register reg7, + Register reg8) { + int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + + reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + + reg7.is_valid() + reg8.is_valid(); + + RegList regs = 0; + if (reg1.is_valid()) regs |= reg1.bit(); + if (reg2.is_valid()) regs |= reg2.bit(); + if (reg3.is_valid()) regs |= reg3.bit(); + if (reg4.is_valid()) regs |= reg4.bit(); + if (reg5.is_valid()) regs |= reg5.bit(); + if (reg6.is_valid()) regs |= reg6.bit(); + if (reg7.is_valid()) regs |= reg7.bit(); + if (reg8.is_valid()) regs |= reg8.bit(); + int n_of_non_aliasing_regs = NumRegs(regs); + + return n_of_valid_regs != n_of_non_aliasing_regs; +} + + +CodePatcher::CodePatcher(byte* address, + int instructions, + FlushICache flush_cache) + : address_(address), + size_(instructions * Assembler::kInstrSize), + masm_(NULL, address, size_ + Assembler::kGap), + flush_cache_(flush_cache) { + // Create a new macro assembler pointing to the address of the code to patch. + // The size is adjusted with kGap on order for the assembler to generate size + // bytes of instructions without failing with buffer size constraints. + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); +} + + +CodePatcher::~CodePatcher() { + // Indicate that code has changed. + if (flush_cache_ == FLUSH) { + CpuFeatures::FlushICache(address_, size_); + } + // Check that the code was patched as expected. + DCHECK(masm_.pc_ == address_ + size_); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); +} + + +void CodePatcher::Emit(Instr instr) { + masm()->emit(instr); +} + + +void CodePatcher::Emit(Address addr) { + // masm()->emit(reinterpret_cast<Instr>(addr)); +} + + +void CodePatcher::ChangeBranchCondition(Condition cond) { + Instr instr = Assembler::instr_at(masm_.pc_); + DCHECK(Assembler::IsBranch(instr)); + uint32_t opcode = Assembler::GetOpcodeField(instr); + // Currently only the 'eq' and 'ne' cond values are supported and the simple + // branch instructions (with opcode being the branch type). + // There are some special cases (see Assembler::IsBranch()) so extending this + // would be tricky. + DCHECK(opcode == BEQ || + opcode == BNE || + opcode == BLEZ || + opcode == BGTZ || + opcode == BEQL || + opcode == BNEL || + opcode == BLEZL || + opcode == BGTZL); + opcode = (cond == eq) ? BEQ : BNE; + instr = (instr & ~kOpcodeMask) | opcode; + masm_.emit(instr); +} + + +void MacroAssembler::TruncatingDiv(Register result, + Register dividend, + int32_t divisor) { + DCHECK(!dividend.is(result)); + DCHECK(!dividend.is(at)); + DCHECK(!result.is(at)); + MultiplierAndShift ms(divisor); + li(at, Operand(ms.multiplier())); + Mulh(result, dividend, Operand(at)); + if (divisor > 0 && ms.multiplier() < 0) { + Addu(result, result, Operand(dividend)); + } + if (divisor < 0 && ms.multiplier() > 0) { + Subu(result, result, Operand(dividend)); + } + if (ms.shift() > 0) sra(result, result, ms.shift()); + srl(at, dividend, 31); + Addu(result, result, Operand(at)); +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/macro-assembler-mips64.h nodejs-0.11.15/deps/v8/src/mips64/macro-assembler-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/macro-assembler-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/macro-assembler-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1793 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ +#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ + +#include "src/assembler.h" +#include "src/globals.h" +#include "src/mips64/assembler-mips64.h" + +namespace v8 { +namespace internal { + +// Forward declaration. +class JumpTarget; + +// Reserved Register Usage Summary. +// +// Registers t8, t9, and at are reserved for use by the MacroAssembler. +// +// The programmer should know that the MacroAssembler may clobber these three, +// but won't touch other registers except in special cases. +// +// Per the MIPS ABI, register t9 must be used for indirect function call +// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when +// trying to update gp register for position-independent-code. Whenever +// MIPS generated code calls C code, it must be via t9 register. + + +// Flags used for LeaveExitFrame function. +enum LeaveExitFrameMode { + EMIT_RETURN = true, + NO_EMIT_RETURN = false +}; + +// Flags used for AllocateHeapNumber +enum TaggingMode { + // Tag the result. + TAG_RESULT, + // Don't tag + DONT_TAG_RESULT +}; + +// Flags used for the ObjectToDoubleFPURegister function. +enum ObjectToDoubleFlags { + // No special flags. + NO_OBJECT_TO_DOUBLE_FLAGS = 0, + // Object is known to be a non smi. + OBJECT_NOT_SMI = 1 << 0, + // Don't load NaNs or infinities, branch to the non number case instead. + AVOID_NANS_AND_INFINITIES = 1 << 1 +}; + +// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. +enum BranchDelaySlot { + USE_DELAY_SLOT, + PROTECT +}; + +// Flags used for the li macro-assembler function. +enum LiFlags { + // If the constant value can be represented in just 16 bits, then + // optimize the li to use a single instruction, rather than lui/ori/dsll + // sequence. + OPTIMIZE_SIZE = 0, + // Always use 6 instructions (lui/ori/dsll sequence), even if the constant + // could be loaded with just one, so that this value is patchable later. + CONSTANT_SIZE = 1, + // For address loads only 4 instruction are required. Used to mark + // constant load that will be used as address without relocation + // information. It ensures predictable code size, so specific sites + // in code are patchable. + ADDRESS_LOAD = 2 +}; + + +enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; +enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum PointersToHereCheck { + kPointersToHereMaybeInteresting, + kPointersToHereAreAlwaysInteresting +}; +enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; + +Register GetRegisterThatIsNotOneOf(Register reg1, + Register reg2 = no_reg, + Register reg3 = no_reg, + Register reg4 = no_reg, + Register reg5 = no_reg, + Register reg6 = no_reg); + +bool AreAliased(Register reg1, + Register reg2, + Register reg3 = no_reg, + Register reg4 = no_reg, + Register reg5 = no_reg, + Register reg6 = no_reg, + Register reg7 = no_reg, + Register reg8 = no_reg); + + +// ----------------------------------------------------------------------------- +// Static helper functions. + +inline MemOperand ContextOperand(Register context, int index) { + return MemOperand(context, Context::SlotOffset(index)); +} + + +inline MemOperand GlobalObjectOperand() { + return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); +} + + +// Generate a MemOperand for loading a field from an object. +inline MemOperand FieldMemOperand(Register object, int offset) { + return MemOperand(object, offset - kHeapObjectTag); +} + + +inline MemOperand UntagSmiMemOperand(Register rm, int offset) { + // Assumes that Smis are shifted by 32 bits and little endianness. + STATIC_ASSERT(kSmiShift == 32); + return MemOperand(rm, offset + (kSmiShift / kBitsPerByte)); +} + + +inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) { + return UntagSmiMemOperand(rm, offset - kHeapObjectTag); +} + + +// Generate a MemOperand for storing arguments 5..N on the stack +// when calling CallCFunction(). +// TODO(plind): Currently ONLY used for O32. Should be fixed for +// n64, and used in RegExp code, and other places +// with more than 8 arguments. +inline MemOperand CFunctionArgumentOperand(int index) { + DCHECK(index > kCArgSlotCount); + // Argument 5 takes the slot just past the four Arg-slots. + int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; + return MemOperand(sp, offset); +} + + +// MacroAssembler implements a collection of frequently used macros. +class MacroAssembler: public Assembler { + public: + // The isolate parameter can be NULL if the macro assembler should + // not use isolate-dependent functionality. In this case, it's the + // responsibility of the caller to never invoke such function on the + // macro assembler. + MacroAssembler(Isolate* isolate, void* buffer, int size); + + // Arguments macros. +#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2 +#define COND_ARGS cond, r1, r2 + + // Cases when relocation is not needed. +#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \ + void Name(target_type target, BranchDelaySlot bd = PROTECT); \ + inline void Name(BranchDelaySlot bd, target_type target) { \ + Name(target, bd); \ + } \ + void Name(target_type target, \ + COND_TYPED_ARGS, \ + BranchDelaySlot bd = PROTECT); \ + inline void Name(BranchDelaySlot bd, \ + target_type target, \ + COND_TYPED_ARGS) { \ + Name(target, COND_ARGS, bd); \ + } + +#define DECLARE_BRANCH_PROTOTYPES(Name) \ + DECLARE_NORELOC_PROTOTYPE(Name, Label*) \ + DECLARE_NORELOC_PROTOTYPE(Name, int16_t) + + DECLARE_BRANCH_PROTOTYPES(Branch) + DECLARE_BRANCH_PROTOTYPES(BranchAndLink) + DECLARE_BRANCH_PROTOTYPES(BranchShort) + +#undef DECLARE_BRANCH_PROTOTYPES +#undef COND_TYPED_ARGS +#undef COND_ARGS + + + // Jump, Call, and Ret pseudo instructions implementing inter-working. +#define COND_ARGS Condition cond = al, Register rs = zero_reg, \ + const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT + + void Jump(Register target, COND_ARGS); + void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); + void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); + void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS); + static int CallSize(Register target, COND_ARGS); + void Call(Register target, COND_ARGS); + static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS); + void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); + int CallSize(Handle<Code> code, + RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + TypeFeedbackId ast_id = TypeFeedbackId::None(), + COND_ARGS); + void Call(Handle<Code> code, + RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + TypeFeedbackId ast_id = TypeFeedbackId::None(), + COND_ARGS); + void Ret(COND_ARGS); + inline void Ret(BranchDelaySlot bd, Condition cond = al, + Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) { + Ret(cond, rs, rt, bd); + } + + void Branch(Label* L, + Condition cond, + Register rs, + Heap::RootListIndex index, + BranchDelaySlot bdslot = PROTECT); + +#undef COND_ARGS + + // Emit code to discard a non-negative number of pointer-sized elements + // from the stack, clobbering only the sp register. + void Drop(int count, + Condition cond = cc_always, + Register reg = no_reg, + const Operand& op = Operand(no_reg)); + + // Trivial case of DropAndRet that utilizes the delay slot and only emits + // 2 instructions. + void DropAndRet(int drop); + + void DropAndRet(int drop, + Condition cond, + Register reg, + const Operand& op); + + // Swap two registers. If the scratch register is omitted then a slightly + // less efficient form using xor instead of mov is emitted. + void Swap(Register reg1, Register reg2, Register scratch = no_reg); + + void Call(Label* target); + + inline void Move(Register dst, Register src) { + if (!dst.is(src)) { + mov(dst, src); + } + } + + inline void Move(FPURegister dst, FPURegister src) { + if (!dst.is(src)) { + mov_d(dst, src); + } + } + + inline void Move(Register dst_low, Register dst_high, FPURegister src) { + mfc1(dst_low, src); + mfhc1(dst_high, src); + } + + inline void FmoveHigh(Register dst_high, FPURegister src) { + mfhc1(dst_high, src); + } + + inline void FmoveLow(Register dst_low, FPURegister src) { + mfc1(dst_low, src); + } + + inline void Move(FPURegister dst, Register src_low, Register src_high) { + mtc1(src_low, dst); + mthc1(src_high, dst); + } + + // Conditional move. + void Move(FPURegister dst, double imm); + void Movz(Register rd, Register rs, Register rt); + void Movn(Register rd, Register rs, Register rt); + void Movt(Register rd, Register rs, uint16_t cc = 0); + void Movf(Register rd, Register rs, uint16_t cc = 0); + + void Clz(Register rd, Register rs); + + // Jump unconditionally to given label. + // We NEED a nop in the branch delay slot, as it used by v8, for example in + // CodeGenerator::ProcessDeferred(). + // Currently the branch delay slot is filled by the MacroAssembler. + // Use rather b(Label) for code generation. + void jmp(Label* L) { + Branch(L); + } + + void Load(Register dst, const MemOperand& src, Representation r); + void Store(Register src, const MemOperand& dst, Representation r); + + // Load an object from the root table. + void LoadRoot(Register destination, + Heap::RootListIndex index); + void LoadRoot(Register destination, + Heap::RootListIndex index, + Condition cond, Register src1, const Operand& src2); + + // Store an object to the root table. + void StoreRoot(Register source, + Heap::RootListIndex index); + void StoreRoot(Register source, + Heap::RootListIndex index, + Condition cond, Register src1, const Operand& src2); + + // --------------------------------------------------------------------------- + // GC Support + + void IncrementalMarkingRecordWriteHelper(Register object, + Register value, + Register address); + + enum RememberedSetFinalAction { + kReturnAtEnd, + kFallThroughAtEnd + }; + + + // Record in the remembered set the fact that we have a pointer to new space + // at the address pointed to by the addr register. Only works if addr is not + // in new space. + void RememberedSetHelper(Register object, // Used for debug code. + Register addr, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetFinalAction and_then); + + void CheckPageFlag(Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met); + + void CheckMapDeprecated(Handle<Map> map, + Register scratch, + Label* if_deprecated); + + // Check if object is in new space. Jumps if the object is not in new space. + // The register scratch can be object itself, but it will be clobbered. + void JumpIfNotInNewSpace(Register object, + Register scratch, + Label* branch) { + InNewSpace(object, scratch, ne, branch); + } + + // Check if object is in new space. Jumps if the object is in new space. + // The register scratch can be object itself, but scratch will be clobbered. + void JumpIfInNewSpace(Register object, + Register scratch, + Label* branch) { + InNewSpace(object, scratch, eq, branch); + } + + // Check if an object has a given incremental marking color. + void HasColor(Register object, + Register scratch0, + Register scratch1, + Label* has_color, + int first_bit, + int second_bit); + + void JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black); + + // Checks the color of an object. If the object is already grey or black + // then we just fall through, since it is already live. If it is white and + // we can determine that it doesn't need to be scanned, then we just mark it + // black and fall through. For the rest we jump to the label so the + // incremental marker can fix its assumptions. + void EnsureNotWhite(Register object, + Register scratch1, + Register scratch2, + Register scratch3, + Label* object_is_white_and_not_data); + + // Detects conservatively whether an object is data-only, i.e. it does need to + // be scanned by the garbage collector. + void JumpIfDataObject(Register value, + Register scratch, + Label* not_data_object); + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldOperand(reg, off). + void RecordWriteField( + Register object, + int offset, + Register value, + Register scratch, + RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); + + // As above, but the offset has the tag presubtracted. For use with + // MemOperand(reg, off). + inline void RecordWriteContextSlot( + Register context, + int offset, + Register value, + Register scratch, + RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting) { + RecordWriteField(context, + offset + kHeapObjectTag, + value, + scratch, + ra_status, + save_fp, + remembered_set_action, + smi_check, + pointers_to_here_check_for_value); + } + + void RecordWriteForMap( + Register object, + Register map, + Register dst, + RAStatus ra_status, + SaveFPRegsMode save_fp); + + // For a given |object| notify the garbage collector that the slot |address| + // has been written. |value| is the object being stored. The value and + // address registers are clobbered by the operation. + void RecordWrite( + Register object, + Register address, + Register value, + RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); + + + // --------------------------------------------------------------------------- + // Inline caching support. + + // Generate code for checking access rights - used for security checks + // on access to global objects across environments. The holder register + // is left untouched, whereas both scratch registers are clobbered. + void CheckAccessGlobalProxy(Register holder_reg, + Register scratch, + Label* miss); + + void GetNumberHash(Register reg0, Register scratch); + + void LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register result, + Register reg0, + Register reg1, + Register reg2); + + + inline void MarkCode(NopMarkerTypes type) { + nop(type); + } + + // Check if the given instruction is a 'type' marker. + // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as + // nop(type)). These instructions are generated to mark special location in + // the code, like some special IC code. + static inline bool IsMarkedCode(Instr instr, int type) { + DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); + return IsNop(instr, type); + } + + + static inline int GetCodeMarker(Instr instr) { + uint32_t opcode = ((instr & kOpcodeMask)); + uint32_t rt = ((instr & kRtFieldMask) >> kRtShift); + uint32_t rs = ((instr & kRsFieldMask) >> kRsShift); + uint32_t sa = ((instr & kSaFieldMask) >> kSaShift); + + // Return <n> if we have a sll zero_reg, zero_reg, n + // else return -1. + bool sllzz = (opcode == SLL && + rt == static_cast<uint32_t>(ToNumber(zero_reg)) && + rs == static_cast<uint32_t>(ToNumber(zero_reg))); + int type = + (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1; + DCHECK((type == -1) || + ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); + return type; + } + + + + // --------------------------------------------------------------------------- + // Allocation support. + + // Allocate an object in new space or old pointer space. The object_size is + // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS + // is passed. If the space is exhausted control continues at the gc_required + // label. The allocated object is returned in result. If the flag + // tag_allocated_object is true the result is tagged as as a heap object. + // All registers are clobbered also when control continues at the gc_required + // label. + void Allocate(int object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags); + + void Allocate(Register object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags); + + // Undo allocation in new space. The object passed and objects allocated after + // it will no longer be allocated. The caller must make sure that no pointers + // are left to the object(s) no longer allocated as they would be invalid when + // allocation is undone. + void UndoAllocationInNewSpace(Register object, Register scratch); + + + void AllocateTwoByteString(Register result, + Register length, + Register scratch1, + Register scratch2, + Register scratch3, + Label* gc_required); + void AllocateAsciiString(Register result, + Register length, + Register scratch1, + Register scratch2, + Register scratch3, + Label* gc_required); + void AllocateTwoByteConsString(Register result, + Register length, + Register scratch1, + Register scratch2, + Label* gc_required); + void AllocateAsciiConsString(Register result, + Register length, + Register scratch1, + Register scratch2, + Label* gc_required); + void AllocateTwoByteSlicedString(Register result, + Register length, + Register scratch1, + Register scratch2, + Label* gc_required); + void AllocateAsciiSlicedString(Register result, + Register length, + Register scratch1, + Register scratch2, + Label* gc_required); + + // Allocates a heap number or jumps to the gc_required label if the young + // space is full and a scavenge is needed. All registers are clobbered also + // when control continues at the gc_required label. + void AllocateHeapNumber(Register result, + Register scratch1, + Register scratch2, + Register heap_number_map, + Label* gc_required, + TaggingMode tagging_mode = TAG_RESULT, + MutableMode mode = IMMUTABLE); + + void AllocateHeapNumberWithValue(Register result, + FPURegister value, + Register scratch1, + Register scratch2, + Label* gc_required); + + // --------------------------------------------------------------------------- + // Instruction macros. + +#define DEFINE_INSTRUCTION(instr) \ + void instr(Register rd, Register rs, const Operand& rt); \ + void instr(Register rd, Register rs, Register rt) { \ + instr(rd, rs, Operand(rt)); \ + } \ + void instr(Register rs, Register rt, int32_t j) { \ + instr(rs, rt, Operand(j)); \ + } + +#define DEFINE_INSTRUCTION2(instr) \ + void instr(Register rs, const Operand& rt); \ + void instr(Register rs, Register rt) { \ + instr(rs, Operand(rt)); \ + } \ + void instr(Register rs, int32_t j) { \ + instr(rs, Operand(j)); \ + } + + DEFINE_INSTRUCTION(Addu); + DEFINE_INSTRUCTION(Daddu); + DEFINE_INSTRUCTION(Ddiv); + DEFINE_INSTRUCTION(Subu); + DEFINE_INSTRUCTION(Dsubu); + DEFINE_INSTRUCTION(Dmod); + DEFINE_INSTRUCTION(Mul); + DEFINE_INSTRUCTION(Mulh); + DEFINE_INSTRUCTION(Dmul); + DEFINE_INSTRUCTION(Dmulh); + DEFINE_INSTRUCTION2(Mult); + DEFINE_INSTRUCTION2(Dmult); + DEFINE_INSTRUCTION2(Multu); + DEFINE_INSTRUCTION2(Dmultu); + DEFINE_INSTRUCTION2(Div); + DEFINE_INSTRUCTION2(Ddiv); + DEFINE_INSTRUCTION2(Divu); + DEFINE_INSTRUCTION2(Ddivu); + + DEFINE_INSTRUCTION(And); + DEFINE_INSTRUCTION(Or); + DEFINE_INSTRUCTION(Xor); + DEFINE_INSTRUCTION(Nor); + DEFINE_INSTRUCTION2(Neg); + + DEFINE_INSTRUCTION(Slt); + DEFINE_INSTRUCTION(Sltu); + + // MIPS32 R2 instruction macro. + DEFINE_INSTRUCTION(Ror); + DEFINE_INSTRUCTION(Dror); + +#undef DEFINE_INSTRUCTION +#undef DEFINE_INSTRUCTION2 + + void Pref(int32_t hint, const MemOperand& rs); + + + // --------------------------------------------------------------------------- + // Pseudo-instructions. + + void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } + + void Ulw(Register rd, const MemOperand& rs); + void Usw(Register rd, const MemOperand& rs); + void Uld(Register rd, const MemOperand& rs, Register scratch = at); + void Usd(Register rd, const MemOperand& rs, Register scratch = at); + + // Load int32 in the rd register. + void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); + inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) { + li(rd, Operand(j), mode); + } + void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE); + + // Push multiple registers on the stack. + // Registers are saved in numerical order, with higher numbered registers + // saved in higher memory addresses. + void MultiPush(RegList regs); + void MultiPushReversed(RegList regs); + + void MultiPushFPU(RegList regs); + void MultiPushReversedFPU(RegList regs); + + void push(Register src) { + Daddu(sp, sp, Operand(-kPointerSize)); + sd(src, MemOperand(sp, 0)); + } + void Push(Register src) { push(src); } + + // Push a handle. + void Push(Handle<Object> handle); + void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } + + // Push two registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2) { + Dsubu(sp, sp, Operand(2 * kPointerSize)); + sd(src1, MemOperand(sp, 1 * kPointerSize)); + sd(src2, MemOperand(sp, 0 * kPointerSize)); + } + + // Push three registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2, Register src3) { + Dsubu(sp, sp, Operand(3 * kPointerSize)); + sd(src1, MemOperand(sp, 2 * kPointerSize)); + sd(src2, MemOperand(sp, 1 * kPointerSize)); + sd(src3, MemOperand(sp, 0 * kPointerSize)); + } + + // Push four registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2, Register src3, Register src4) { + Dsubu(sp, sp, Operand(4 * kPointerSize)); + sd(src1, MemOperand(sp, 3 * kPointerSize)); + sd(src2, MemOperand(sp, 2 * kPointerSize)); + sd(src3, MemOperand(sp, 1 * kPointerSize)); + sd(src4, MemOperand(sp, 0 * kPointerSize)); + } + + void Push(Register src, Condition cond, Register tst1, Register tst2) { + // Since we don't have conditional execution we use a Branch. + Branch(3, cond, tst1, Operand(tst2)); + Dsubu(sp, sp, Operand(kPointerSize)); + sd(src, MemOperand(sp, 0)); + } + + void PushRegisterAsTwoSmis(Register src, Register scratch = at); + void PopRegisterAsTwoSmis(Register dst, Register scratch = at); + + // Pops multiple values from the stack and load them in the + // registers specified in regs. Pop order is the opposite as in MultiPush. + void MultiPop(RegList regs); + void MultiPopReversed(RegList regs); + + void MultiPopFPU(RegList regs); + void MultiPopReversedFPU(RegList regs); + + void pop(Register dst) { + ld(dst, MemOperand(sp, 0)); + Daddu(sp, sp, Operand(kPointerSize)); + } + void Pop(Register dst) { pop(dst); } + + // Pop two registers. Pops rightmost register first (from lower address). + void Pop(Register src1, Register src2) { + DCHECK(!src1.is(src2)); + ld(src2, MemOperand(sp, 0 * kPointerSize)); + ld(src1, MemOperand(sp, 1 * kPointerSize)); + Daddu(sp, sp, 2 * kPointerSize); + } + + // Pop three registers. Pops rightmost register first (from lower address). + void Pop(Register src1, Register src2, Register src3) { + ld(src3, MemOperand(sp, 0 * kPointerSize)); + ld(src2, MemOperand(sp, 1 * kPointerSize)); + ld(src1, MemOperand(sp, 2 * kPointerSize)); + Daddu(sp, sp, 3 * kPointerSize); + } + + void Pop(uint32_t count = 1) { + Daddu(sp, sp, Operand(count * kPointerSize)); + } + + // Push and pop the registers that can hold pointers, as defined by the + // RegList constant kSafepointSavedRegisters. + void PushSafepointRegisters(); + void PopSafepointRegisters(); + // Store value in register src in the safepoint stack slot for + // register dst. + void StoreToSafepointRegisterSlot(Register src, Register dst); + // Load the value of the src register from its safepoint stack slot + // into register dst. + void LoadFromSafepointRegisterSlot(Register dst, Register src); + + // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache + // from C. + // Does not handle errors. + void FlushICache(Register address, unsigned instructions); + + // MIPS64 R2 instruction macro. + void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); + void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); + + // --------------------------------------------------------------------------- + // FPU macros. These do not handle special cases like NaN or +- inf. + + // Convert unsigned word to double. + void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch); + void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch); + + // Convert double to unsigned long. + void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch); + + void Trunc_l_d(FPURegister fd, FPURegister fs); + void Round_l_d(FPURegister fd, FPURegister fs); + void Floor_l_d(FPURegister fd, FPURegister fs); + void Ceil_l_d(FPURegister fd, FPURegister fs); + + // Convert double to unsigned word. + void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); + void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch); + + void Trunc_w_d(FPURegister fd, FPURegister fs); + void Round_w_d(FPURegister fd, FPURegister fs); + void Floor_w_d(FPURegister fd, FPURegister fs); + void Ceil_w_d(FPURegister fd, FPURegister fs); + + void Madd_d(FPURegister fd, + FPURegister fr, + FPURegister fs, + FPURegister ft, + FPURegister scratch); + + // Wrapper function for the different cmp/branch types. + void BranchF(Label* target, + Label* nan, + Condition cc, + FPURegister cmp1, + FPURegister cmp2, + BranchDelaySlot bd = PROTECT); + + // Alternate (inline) version for better readability with USE_DELAY_SLOT. + inline void BranchF(BranchDelaySlot bd, + Label* target, + Label* nan, + Condition cc, + FPURegister cmp1, + FPURegister cmp2) { + BranchF(target, nan, cc, cmp1, cmp2, bd); + } + + // Truncates a double using a specific rounding mode, and writes the value + // to the result register. + // The except_flag will contain any exceptions caused by the instruction. + // If check_inexact is kDontCheckForInexactConversion, then the inexact + // exception is masked. + void EmitFPUTruncate(FPURoundingMode rounding_mode, + Register result, + DoubleRegister double_input, + Register scratch, + DoubleRegister double_scratch, + Register except_flag, + CheckForInexactConversion check_inexact + = kDontCheckForInexactConversion); + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it + // succeeds, otherwise falls through if result is saturated. On return + // 'result' either holds answer, or is clobbered on fall through. + // + // Only public for the test code in test-code-stubs-arm.cc. + void TryInlineTruncateDoubleToI(Register result, + DoubleRegister input, + Label* done); + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. + // Exits with 'result' holding the answer. + void TruncateDoubleToI(Register result, DoubleRegister double_input); + + // Performs a truncating conversion of a heap number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' + // must be different registers. Exits with 'result' holding the answer. + void TruncateHeapNumberToI(Register result, Register object); + + // Converts the smi or heap number in object to an int32 using the rules + // for ToInt32 as described in ECMAScript 9.5.: the value is truncated + // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be + // different registers. + void TruncateNumberToI(Register object, + Register result, + Register heap_number_map, + Register scratch, + Label* not_int32); + + // Loads the number from object into dst register. + // If |object| is neither smi nor heap number, |not_number| is jumped to + // with |object| still intact. + void LoadNumber(Register object, + FPURegister dst, + Register heap_number_map, + Register scratch, + Label* not_number); + + // Loads the number from object into double_dst in the double format. + // Control will jump to not_int32 if the value cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be loaded. + void LoadNumberAsInt32Double(Register object, + DoubleRegister double_dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + FPURegister double_scratch, + Label* not_int32); + + // Loads the number from object into dst as a 32-bit integer. + // Control will jump to not_int32 if the object cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be converted. + void LoadNumberAsInt32(Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + FPURegister double_scratch0, + FPURegister double_scratch1, + Label* not_int32); + + // Enter exit frame. + // argc - argument count to be dropped by LeaveExitFrame. + // save_doubles - saves FPU registers on stack, currently disabled. + // stack_space - extra stack space. + void EnterExitFrame(bool save_doubles, + int stack_space = 0); + + // Leave the current exit frame. + void LeaveExitFrame(bool save_doubles, + Register arg_count, + bool restore_context, + bool do_return = NO_EMIT_RETURN); + + // Get the actual activation frame alignment for target environment. + static int ActivationFrameAlignment(); + + // Make sure the stack is aligned. Only emits code in debug mode. + void AssertStackIsAligned(); + + void LoadContext(Register dst, int context_chain_length); + + // Conditionally load the cached Array transitioned map of type + // transitioned_kind from the native context if the map in register + // map_in_out is the cached Array map in the native context of + // expected_kind. + void LoadTransitionedArrayMapConditional( + ElementsKind expected_kind, + ElementsKind transitioned_kind, + Register map_in_out, + Register scratch, + Label* no_map_match); + + void LoadGlobalFunction(int index, Register function); + + // Load the initial map from the global function. The registers + // function and map can be the same, function is then overwritten. + void LoadGlobalFunctionInitialMap(Register function, + Register map, + Register scratch); + + void InitializeRootRegister() { + ExternalReference roots_array_start = + ExternalReference::roots_array_start(isolate()); + li(kRootRegister, Operand(roots_array_start)); + } + + // ------------------------------------------------------------------------- + // JavaScript invokes. + + // Invoke the JavaScript function code by either calling or jumping. + void InvokeCode(Register code, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper); + + // Invoke the JavaScript function in the given register. Changes the + // current context to the context in the function before invoking. + void InvokeFunction(Register function, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper); + + void InvokeFunction(Register function, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper); + + void InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper); + + + void IsObjectJSObjectType(Register heap_object, + Register map, + Register scratch, + Label* fail); + + void IsInstanceJSObjectType(Register map, + Register scratch, + Label* fail); + + void IsObjectJSStringType(Register object, + Register scratch, + Label* fail); + + void IsObjectNameType(Register object, + Register scratch, + Label* fail); + + // ------------------------------------------------------------------------- + // Debugger Support. + + void DebugBreak(); + + // ------------------------------------------------------------------------- + // Exception handling. + + // Push a new try handler and link into try handler chain. + void PushTryHandler(StackHandler::Kind kind, int handler_index); + + // Unlink the stack handler on top of the stack from the try handler chain. + // Must preserve the result register. + void PopTryHandler(); + + // Passes thrown value to the handler of top of the try handler chain. + void Throw(Register value); + + // Propagates an uncatchable exception to the top of the current JS stack's + // handler chain. + void ThrowUncatchable(Register value); + + // Copies a fixed number of fields of heap objects from src to dst. + void CopyFields(Register dst, Register src, RegList temps, int field_count); + + // Copies a number of bytes from src to dst. All registers are clobbered. On + // exit src and dst will point to the place just after where the last byte was + // read or written and length will be zero. + void CopyBytes(Register src, + Register dst, + Register length, + Register scratch); + + // Initialize fields with filler values. Fields starting at |start_offset| + // not including end_offset are overwritten with the value in |filler|. At + // the end the loop, |start_offset| takes the value of |end_offset|. + void InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler); + + // ------------------------------------------------------------------------- + // Support functions. + + // Try to get function prototype of a function and puts the value in + // the result register. Checks that the function really is a + // function and jumps to the miss label if the fast checks fail. The + // function register will be untouched; the other registers may be + // clobbered. + void TryGetFunctionPrototype(Register function, + Register result, + Register scratch, + Label* miss, + bool miss_on_bound_function = false); + + void GetObjectType(Register function, + Register map, + Register type_reg); + + // Check if a map for a JSObject indicates that the object has fast elements. + // Jump to the specified label if it does not. + void CheckFastElements(Register map, + Register scratch, + Label* fail); + + // Check if a map for a JSObject indicates that the object can have both smi + // and HeapObject elements. Jump to the specified label if it does not. + void CheckFastObjectElements(Register map, + Register scratch, + Label* fail); + + // Check if a map for a JSObject indicates that the object has fast smi only + // elements. Jump to the specified label if it does not. + void CheckFastSmiElements(Register map, + Register scratch, + Label* fail); + + // Check to see if maybe_number can be stored as a double in + // FastDoubleElements. If it can, store it at the index specified by key in + // the FastDoubleElements array elements. Otherwise jump to fail. + void StoreNumberToDoubleElements(Register value_reg, + Register key_reg, + Register elements_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* fail, + int elements_offset = 0); + + // Compare an object's map with the specified map and its transitioned + // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to + // "branch_to" if the result of the comparison is "cond". If multiple map + // compares are required, the compare sequences branches to early_success. + void CompareMapAndBranch(Register obj, + Register scratch, + Handle<Map> map, + Label* early_success, + Condition cond, + Label* branch_to); + + // As above, but the map of the object is already loaded into the register + // which is preserved by the code generated. + void CompareMapAndBranch(Register obj_map, + Handle<Map> map, + Label* early_success, + Condition cond, + Label* branch_to); + + // Check if the map of an object is equal to a specified map and branch to + // label if not. Skip the smi check if not required (object is known to be a + // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match + // against maps that are ElementsKind transition maps of the specificed map. + void CheckMap(Register obj, + Register scratch, + Handle<Map> map, + Label* fail, + SmiCheckType smi_check_type); + + + void CheckMap(Register obj, + Register scratch, + Heap::RootListIndex index, + Label* fail, + SmiCheckType smi_check_type); + + // Check if the map of an object is equal to a specified map and branch to a + // specified target if equal. Skip the smi check if not required (object is + // known to be a heap object) + void DispatchMap(Register obj, + Register scratch, + Handle<Map> map, + Handle<Code> success, + SmiCheckType smi_check_type); + + + // Load and check the instance type of an object for being a string. + // Loads the type into the second argument register. + // Returns a condition that will be enabled if the object was a string. + Condition IsObjectStringType(Register obj, + Register type, + Register result) { + ld(type, FieldMemOperand(obj, HeapObject::kMapOffset)); + lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); + And(type, type, Operand(kIsNotStringMask)); + DCHECK_EQ(0, kStringTag); + return eq; + } + + + // Picks out an array index from the hash field. + // Register use: + // hash - holds the index's hash. Clobbered. + // index - holds the overwritten index on exit. + void IndexFromHash(Register hash, Register index); + + // Get the number of least significant bits from a register. + void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); + void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); + + // Load the value of a number object into a FPU double register. If the + // object is not a number a jump to the label not_number is performed + // and the FPU double register is unchanged. + void ObjectToDoubleFPURegister( + Register object, + FPURegister value, + Register scratch1, + Register scratch2, + Register heap_number_map, + Label* not_number, + ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS); + + // Load the value of a smi object into a FPU double register. The register + // scratch1 can be the same register as smi in which case smi will hold the + // untagged value afterwards. + void SmiToDoubleFPURegister(Register smi, + FPURegister value, + Register scratch1); + + // ------------------------------------------------------------------------- + // Overflow handling functions. + // Usage: first call the appropriate arithmetic function, then call one of the + // jump functions with the overflow_dst register as the second parameter. + + void AdduAndCheckForOverflow(Register dst, + Register left, + Register right, + Register overflow_dst, + Register scratch = at); + + void SubuAndCheckForOverflow(Register dst, + Register left, + Register right, + Register overflow_dst, + Register scratch = at); + + void BranchOnOverflow(Label* label, + Register overflow_check, + BranchDelaySlot bd = PROTECT) { + Branch(label, lt, overflow_check, Operand(zero_reg), bd); + } + + void BranchOnNoOverflow(Label* label, + Register overflow_check, + BranchDelaySlot bd = PROTECT) { + Branch(label, ge, overflow_check, Operand(zero_reg), bd); + } + + void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) { + Ret(lt, overflow_check, Operand(zero_reg), bd); + } + + void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) { + Ret(ge, overflow_check, Operand(zero_reg), bd); + } + + // ------------------------------------------------------------------------- + // Runtime calls. + + // See comments at the beginning of CEntryStub::Generate. + inline void PrepareCEntryArgs(int num_args) { + li(s0, num_args); + li(s1, (num_args - 1) * kPointerSize); + } + + inline void PrepareCEntryFunction(const ExternalReference& ref) { + li(s2, Operand(ref)); + } + +#define COND_ARGS Condition cond = al, Register rs = zero_reg, \ +const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT + + // Call a code stub. + void CallStub(CodeStub* stub, + TypeFeedbackId ast_id = TypeFeedbackId::None(), + COND_ARGS); + + // Tail call a code stub (jump). + void TailCallStub(CodeStub* stub, COND_ARGS); + +#undef COND_ARGS + + void CallJSExitStub(CodeStub* stub); + + // Call a runtime routine. + void CallRuntime(const Runtime::Function* f, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); + void CallRuntimeSaveDoubles(Runtime::FunctionId id) { + const Runtime::Function* function = Runtime::FunctionForId(id); + CallRuntime(function, function->nargs, kSaveFPRegs); + } + + // Convenience function: Same as above, but takes the fid instead. + void CallRuntime(Runtime::FunctionId id, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); + } + + // Convenience function: call an external reference. + void CallExternalReference(const ExternalReference& ext, + int num_arguments, + BranchDelaySlot bd = PROTECT); + + // Tail call of a runtime routine (jump). + // Like JumpToExternalReference, but also takes care of passing the number + // of parameters. + void TailCallExternalReference(const ExternalReference& ext, + int num_arguments, + int result_size); + + // Convenience function: tail call a runtime routine (jump). + void TailCallRuntime(Runtime::FunctionId fid, + int num_arguments, + int result_size); + + int CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments); + + // Before calling a C-function from generated code, align arguments on stack + // and add space for the four mips argument slots. + // After aligning the frame, non-register arguments must be stored on the + // stack, after the argument-slots using helper: CFunctionArgumentOperand(). + // The argument count assumes all arguments are word sized. + // Some compilers/platforms require the stack to be aligned when calling + // C++ code. + // Needs a scratch register to do some arithmetic. This register will be + // trashed. + void PrepareCallCFunction(int num_reg_arguments, + int num_double_registers, + Register scratch); + void PrepareCallCFunction(int num_reg_arguments, + Register scratch); + + // Arguments 1-4 are placed in registers a0 thru a3 respectively. + // Arguments 5..n are stored to stack using following: + // sw(a4, CFunctionArgumentOperand(5)); + + // Calls a C function and cleans up the space for arguments allocated + // by PrepareCallCFunction. The called function is not allowed to trigger a + // garbage collection, since that might move the code and invalidate the + // return address (unless this is somehow accounted for by the called + // function). + void CallCFunction(ExternalReference function, int num_arguments); + void CallCFunction(Register function, int num_arguments); + void CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments); + void CallCFunction(Register function, + int num_reg_arguments, + int num_double_arguments); + void MovFromFloatResult(DoubleRegister dst); + void MovFromFloatParameter(DoubleRegister dst); + + // There are two ways of passing double arguments on MIPS, depending on + // whether soft or hard floating point ABI is used. These functions + // abstract parameter passing for the three different ways we call + // C functions from generated code. + void MovToFloatParameter(DoubleRegister src); + void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); + void MovToFloatResult(DoubleRegister src); + + // Calls an API function. Allocates HandleScope, extracts returned value + // from handle and propagates exceptions. Restores context. stack_space + // - space to be unwound on exit (includes the call JS arguments space and + // the additional space allocated for the fast call). + void CallApiFunctionAndReturn(Register function_address, + ExternalReference thunk_ref, + int stack_space, + MemOperand return_value_operand, + MemOperand* context_restore_operand); + + // Jump to the builtin routine. + void JumpToExternalReference(const ExternalReference& builtin, + BranchDelaySlot bd = PROTECT); + + // Invoke specified builtin JavaScript function. Adds an entry to + // the unresolved list if the name does not resolve. + void InvokeBuiltin(Builtins::JavaScript id, + InvokeFlag flag, + const CallWrapper& call_wrapper = NullCallWrapper()); + + // Store the code object for the given builtin in the target register and + // setup the function in a1. + void GetBuiltinEntry(Register target, Builtins::JavaScript id); + + // Store the function for the given builtin in the target register. + void GetBuiltinFunction(Register target, Builtins::JavaScript id); + + struct Unresolved { + int pc; + uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders. + const char* name; + }; + + Handle<Object> CodeObject() { + DCHECK(!code_object_.is_null()); + return code_object_; + } + + // Emit code for a truncating division by a constant. The dividend register is + // unchanged and at gets clobbered. Dividend and result must be different. + void TruncatingDiv(Register result, Register dividend, int32_t divisor); + + // ------------------------------------------------------------------------- + // StatsCounter support. + + void SetCounter(StatsCounter* counter, int value, + Register scratch1, Register scratch2); + void IncrementCounter(StatsCounter* counter, int value, + Register scratch1, Register scratch2); + void DecrementCounter(StatsCounter* counter, int value, + Register scratch1, Register scratch2); + + + // ------------------------------------------------------------------------- + // Debugging. + + // Calls Abort(msg) if the condition cc is not satisfied. + // Use --debug_code to enable. + void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt); + void AssertFastElements(Register elements); + + // Like Assert(), but always enabled. + void Check(Condition cc, BailoutReason reason, Register rs, Operand rt); + + // Print a message to stdout and abort execution. + void Abort(BailoutReason msg); + + // Verify restrictions about code generated in stubs. + void set_generating_stub(bool value) { generating_stub_ = value; } + bool generating_stub() { return generating_stub_; } + void set_has_frame(bool value) { has_frame_ = value; } + bool has_frame() { return has_frame_; } + inline bool AllowThisStubCall(CodeStub* stub); + + // --------------------------------------------------------------------------- + // Number utilities. + + // Check whether the value of reg is a power of two and not zero. If not + // control continues at the label not_power_of_two. If reg is a power of two + // the register scratch contains the value of (reg - 1) when control falls + // through. + void JumpIfNotPowerOfTwoOrZero(Register reg, + Register scratch, + Label* not_power_of_two_or_zero); + + // ------------------------------------------------------------------------- + // Smi utilities. + + // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). + void SmiTagCheckOverflow(Register reg, Register overflow); + void SmiTagCheckOverflow(Register dst, Register src, Register overflow); + + void SmiTag(Register dst, Register src) { + STATIC_ASSERT(kSmiTag == 0); + if (SmiValuesAre32Bits()) { + STATIC_ASSERT(kSmiShift == 32); + dsll32(dst, src, 0); + } else { + Addu(dst, src, src); + } + } + + void SmiTag(Register reg) { + SmiTag(reg, reg); + } + + // Try to convert int32 to smi. If the value is to large, preserve + // the original value and jump to not_a_smi. Destroys scratch and + // sets flags. + void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) { + TrySmiTag(reg, reg, scratch, not_a_smi); + } + + void TrySmiTag(Register dst, + Register src, + Register scratch, + Label* not_a_smi) { + if (SmiValuesAre32Bits()) { + SmiTag(dst, src); + } else { + SmiTagCheckOverflow(at, src, scratch); + BranchOnOverflow(not_a_smi, scratch); + mov(dst, at); + } + } + + void SmiUntag(Register dst, Register src) { + if (SmiValuesAre32Bits()) { + STATIC_ASSERT(kSmiShift == 32); + dsra32(dst, src, 0); + } else { + sra(dst, src, kSmiTagSize); + } + } + + void SmiUntag(Register reg) { + SmiUntag(reg, reg); + } + + // Left-shifted from int32 equivalent of Smi. + void SmiScale(Register dst, Register src, int scale) { + if (SmiValuesAre32Bits()) { + // The int portion is upper 32-bits of 64-bit word. + dsra(dst, src, kSmiShift - scale); + } else { + DCHECK(scale >= kSmiTagSize); + sll(dst, src, scale - kSmiTagSize); + } + } + + // Combine load with untagging or scaling. + void SmiLoadUntag(Register dst, MemOperand src); + + void SmiLoadScale(Register dst, MemOperand src, int scale); + + // Returns 2 values: the Smi and a scaled version of the int within the Smi. + void SmiLoadWithScale(Register d_smi, + Register d_scaled, + MemOperand src, + int scale); + + // Returns 2 values: the untagged Smi (int32) and scaled version of that int. + void SmiLoadUntagWithScale(Register d_int, + Register d_scaled, + MemOperand src, + int scale); + + + // Test if the register contains a smi. + inline void SmiTst(Register value, Register scratch) { + And(scratch, value, Operand(kSmiTagMask)); + } + inline void NonNegativeSmiTst(Register value, Register scratch) { + And(scratch, value, Operand(kSmiTagMask | kSmiSignMask)); + } + + // Untag the source value into destination and jump if source is a smi. + // Source and destination can be the same register. + void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); + + // Untag the source value into destination and jump if source is not a smi. + // Source and destination can be the same register. + void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); + + // Jump the register contains a smi. + void JumpIfSmi(Register value, + Label* smi_label, + Register scratch = at, + BranchDelaySlot bd = PROTECT); + + // Jump if the register contains a non-smi. + void JumpIfNotSmi(Register value, + Label* not_smi_label, + Register scratch = at, + BranchDelaySlot bd = PROTECT); + + // Jump if either of the registers contain a non-smi. + void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); + // Jump if either of the registers contain a smi. + void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); + + // Abort execution if argument is a smi, enabled via --debug-code. + void AssertNotSmi(Register object); + void AssertSmi(Register object); + + // Abort execution if argument is not a string, enabled via --debug-code. + void AssertString(Register object); + + // Abort execution if argument is not a name, enabled via --debug-code. + void AssertName(Register object); + + // Abort execution if argument is not undefined or an AllocationSite, enabled + // via --debug-code. + void AssertUndefinedOrAllocationSite(Register object, Register scratch); + + // Abort execution if reg is not the root value with the given index, + // enabled via --debug-code. + void AssertIsRoot(Register reg, Heap::RootListIndex index); + + // --------------------------------------------------------------------------- + // HeapNumber utilities. + + void JumpIfNotHeapNumber(Register object, + Register heap_number_map, + Register scratch, + Label* on_not_heap_number); + + // ------------------------------------------------------------------------- + // String utilities. + + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + void LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + Label* not_found); + + // Checks if both instance types are sequential ASCII strings and jumps to + // label if either is not. + void JumpIfBothInstanceTypesAreNotSequentialAscii( + Register first_object_instance_type, + Register second_object_instance_type, + Register scratch1, + Register scratch2, + Label* failure); + + // Check if instance type is sequential ASCII string and jump to label if + // it is not. + void JumpIfInstanceTypeIsNotSequentialAscii(Register type, + Register scratch, + Label* failure); + + void JumpIfNotUniqueName(Register reg, Label* not_unique_name); + + void EmitSeqStringSetCharCheck(Register string, + Register index, + Register value, + Register scratch, + uint32_t encoding_mask); + + // Test that both first and second are sequential ASCII strings. + // Assume that they are non-smis. + void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first, + Register second, + Register scratch1, + Register scratch2, + Label* failure); + + // Test that both first and second are sequential ASCII strings. + // Check that they are non-smis. + void JumpIfNotBothSequentialAsciiStrings(Register first, + Register second, + Register scratch1, + Register scratch2, + Label* failure); + + void ClampUint8(Register output_reg, Register input_reg); + + void ClampDoubleToUint8(Register result_reg, + DoubleRegister input_reg, + DoubleRegister temp_double_reg); + + + void LoadInstanceDescriptors(Register map, Register descriptors); + void EnumLength(Register dst, Register map); + void NumberOfOwnDescriptors(Register dst, Register map); + + template<typename Field> + void DecodeField(Register dst, Register src) { + Ext(dst, src, Field::kShift, Field::kSize); + } + + template<typename Field> + void DecodeField(Register reg) { + DecodeField<Field>(reg, reg); + } + + template<typename Field> + void DecodeFieldToSmi(Register dst, Register src) { + static const int shift = Field::kShift; + static const int mask = Field::kMask >> shift; + dsrl(dst, src, shift); + And(dst, dst, Operand(mask)); + dsll32(dst, dst, 0); + } + + template<typename Field> + void DecodeFieldToSmi(Register reg) { + DecodeField<Field>(reg, reg); + } + // Generates function and stub prologue code. + void StubPrologue(); + void Prologue(bool code_pre_aging); + + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + + // Patch the relocated value (lui/ori pair). + void PatchRelocatedValue(Register li_location, + Register scratch, + Register new_value); + // Get the relocatad value (loaded data) from the lui/ori pair. + void GetRelocatedValue(Register li_location, + Register value, + Register scratch); + + // Expects object in a0 and returns map with validated enum cache + // in a0. Assumes that any other register can be used as a scratch. + void CheckEnumCache(Register null_value, Label* call_runtime); + + // AllocationMemento support. Arrays may have an associated + // AllocationMemento object that can be checked for in order to pretransition + // to another type. + // On entry, receiver_reg should point to the array object. + // scratch_reg gets clobbered. + // If allocation info is present, jump to allocation_memento_present. + void TestJSArrayForAllocationMemento( + Register receiver_reg, + Register scratch_reg, + Label* no_memento_found, + Condition cond = al, + Label* allocation_memento_present = NULL); + + void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, + Register scratch_reg, + Label* memento_found) { + Label no_memento_found; + TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, + &no_memento_found, eq, memento_found); + bind(&no_memento_found); + } + + // Jumps to found label if a prototype map has dictionary elements. + void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, + Register scratch1, Label* found); + + private: + void CallCFunctionHelper(Register function, + int num_reg_arguments, + int num_double_arguments); + + void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT); + void BranchAndLinkShort(int16_t offset, Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot = PROTECT); + void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT); + void BranchAndLinkShort(Label* L, Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot = PROTECT); + void J(Label* L, BranchDelaySlot bdslot); + void Jr(Label* L, BranchDelaySlot bdslot); + void Jalr(Label* L, BranchDelaySlot bdslot); + + // Helper functions for generating invokes. + void InvokePrologue(const ParameterCount& expected, + const ParameterCount& actual, + Handle<Code> code_constant, + Register code_reg, + Label* done, + bool* definitely_mismatches, + InvokeFlag flag, + const CallWrapper& call_wrapper); + + // Get the code for the given builtin. Returns if able to resolve + // the function in the 'resolved' flag. + Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved); + + void InitializeNewString(Register string, + Register length, + Heap::RootListIndex map_index, + Register scratch1, + Register scratch2); + + // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. + void InNewSpace(Register object, + Register scratch, + Condition cond, // eq for new space, ne otherwise. + Label* branch); + + // Helper for finding the mark bits for an address. Afterwards, the + // bitmap register points at the word with the mark bits and the mask + // the position of the first bit. Leaves addr_reg unchanged. + inline void GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg); + + // Helper for throwing exceptions. Compute a handler address and jump to + // it. See the implementation for register usage. + void JumpToHandlerEntry(); + + // Compute memory operands for safepoint stack slots. + static int SafepointRegisterStackIndex(int reg_code); + MemOperand SafepointRegisterSlot(Register reg); + MemOperand SafepointRegistersAndDoublesSlot(Register reg); + + bool generating_stub_; + bool has_frame_; + // This handle will be patched with the code object on installation. + Handle<Object> code_object_; + + // Needs access to SafepointRegisterStackIndex for compiled frame + // traversal. + friend class StandardFrame; +}; + + +// The code patcher is used to patch (typically) small parts of code e.g. for +// debugging and other types of instrumentation. When using the code patcher +// the exact number of bytes specified must be emitted. It is not legal to emit +// relocation information. If any of these constraints are violated it causes +// an assertion to fail. +class CodePatcher { + public: + enum FlushICache { + FLUSH, + DONT_FLUSH + }; + + CodePatcher(byte* address, + int instructions, + FlushICache flush_cache = FLUSH); + virtual ~CodePatcher(); + + // Macro assembler to emit code. + MacroAssembler* masm() { return &masm_; } + + // Emit an instruction directly. + void Emit(Instr instr); + + // Emit an address directly. + void Emit(Address addr); + + // Change the condition part of an instruction leaving the rest of the current + // instruction unchanged. + void ChangeBranchCondition(Condition cond); + + private: + byte* address_; // The address of the code being patched. + int size_; // Number of bytes of the expected patch size. + MacroAssembler masm_; // Macro assembler used to generate the code. + FlushICache flush_cache_; // Whether to flush the I cache after patching. +}; + + + +#ifdef GENERATED_CODE_COVERAGE +#define CODE_COVERAGE_STRINGIFY(x) #x +#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) +#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) +#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> +#else +#define ACCESS_MASM(masm) masm-> +#endif + +} } // namespace v8::internal + +#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/OWNERS nodejs-0.11.15/deps/v8/src/mips64/OWNERS --- nodejs-0.11.13/deps/v8/src/mips64/OWNERS 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/OWNERS 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +plind44@gmail.com +paul.lind@imgtec.com +gergely@homejinni.com +gergely.kis@imgtec.com +palfia@homejinni.com +akos.palfi@imgtec.com +kilvadyb@homejinni.com +balazs.kilvady@imgtec.com +Dusan.Milosavljevic@rt-rk.com +dusan.milosavljevic@imgtec.com diff -Nru nodejs-0.11.13/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1371 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/code-stubs.h" +#include "src/log.h" +#include "src/macro-assembler.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-stack.h" +#include "src/unicode.h" + +#include "src/mips64/regexp-macro-assembler-mips64.h" + +namespace v8 { +namespace internal { + +#ifndef V8_INTERPRETED_REGEXP +/* + * This assembler uses the following register assignment convention + * - t3 : Temporarily stores the index of capture start after a matching pass + * for a global regexp. + * - a5 : Pointer to current code object (Code*) including heap object tag. + * - a6 : Current position in input, as negative offset from end of string. + * Please notice that this is the byte offset, not the character offset! + * - a7 : Currently loaded character. Must be loaded using + * LoadCurrentCharacter before using any of the dispatch methods. + * - t0 : Points to tip of backtrack stack + * - t1 : Unused. + * - t2 : End of input (points to byte after last character in input). + * - fp : Frame pointer. Used to access arguments, local variables and + * RegExp registers. + * - sp : Points to tip of C stack. + * + * The remaining registers are free for computations. + * Each call to a public method should retain this convention. + * + * TODO(plind): O32 documented here with intent of having single 32/64 codebase + * in the future. + * + * The O32 stack will have the following structure: + * + * - fp[76] Isolate* isolate (address of the current isolate) + * - fp[72] direct_call (if 1, direct call from JavaScript code, + * if 0, call through the runtime system). + * - fp[68] stack_area_base (High end of the memory area to use as + * backtracking stack). + * - fp[64] capture array size (may fit multiple sets of matches) + * - fp[60] int* capture_array (int[num_saved_registers_], for output). + * - fp[44..59] MIPS O32 four argument slots + * - fp[40] secondary link/return address used by native call. + * --- sp when called --- + * - fp[36] return address (lr). + * - fp[32] old frame pointer (r11). + * - fp[0..31] backup of registers s0..s7. + * --- frame pointer ---- + * - fp[-4] end of input (address of end of string). + * - fp[-8] start of input (address of first character in string). + * - fp[-12] start index (character index of start). + * - fp[-16] void* input_string (location of a handle containing the string). + * - fp[-20] success counter (only for global regexps to count matches). + * - fp[-24] Offset of location before start of input (effectively character + * position -1). Used to initialize capture registers to a + * non-position. + * - fp[-28] At start (if 1, we are starting at the start of the + * string, otherwise 0) + * - fp[-32] register 0 (Only positions must be stored in the first + * - register 1 num_saved_registers_ registers) + * - ... + * - register num_registers-1 + * --- sp --- + * + * + * The N64 stack will have the following structure: + * + * - fp[88] Isolate* isolate (address of the current isolate) kIsolate + * - fp[80] secondary link/return address used by exit frame on native call. kSecondaryReturnAddress + kStackFrameHeader + * --- sp when called --- + * - fp[72] ra Return from RegExp code (ra). kReturnAddress + * - fp[64] s9, old-fp Old fp, callee saved(s9). + * - fp[0..63] s0..s7 Callee-saved registers s0..s7. + * --- frame pointer ---- + * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall + * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd + * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters + * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput + * - fp[-40] end of input (address of end of string). kInputEnd + * - fp[-48] start of input (address of first character in string). kInputStart + * - fp[-56] start index (character index of start). kStartIndex + * - fp[-64] void* input_string (location of a handle containing the string). kInputString + * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures + * - fp[-80] Offset of location before start of input (effectively character kInputStartMinusOne + * position -1). Used to initialize capture registers to a + * non-position. + * --------- The following output registers are 32-bit values. --------- + * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero + * - register 1 num_saved_registers_ registers) + * - ... + * - register num_registers-1 + * --- sp --- + * + * The first num_saved_registers_ registers are initialized to point to + * "character -1" in the string (i.e., char_size() bytes before the first + * character of the string). The remaining registers start out as garbage. + * + * The data up to the return address must be placed there by the calling + * code and the remaining arguments are passed in registers, e.g. by calling the + * code entry as cast to a function with the signature: + * int (*match)(String* input_string, + * int start_index, + * Address start, + * Address end, + * Address secondary_return_address, // Only used by native call. + * int* capture_output_array, + * byte* stack_area_base, + * bool direct_call = false, + * void* return_address, + * Isolate* isolate); + * The call is performed by NativeRegExpMacroAssembler::Execute() + * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro + * in mips/simulator-mips.h. + * When calling as a non-direct call (i.e., from C++ code), the return address + * area is overwritten with the ra register by the RegExp code. When doing a + * direct call from generated code, the return address is placed there by + * the calling code, as in a normal exit frame. + */ + +#define __ ACCESS_MASM(masm_) + +RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS( + Mode mode, + int registers_to_save, + Zone* zone) + : NativeRegExpMacroAssembler(zone), + masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)), + mode_(mode), + num_registers_(registers_to_save), + num_saved_registers_(registers_to_save), + entry_label_(), + start_label_(), + success_label_(), + backtrack_label_(), + exit_label_(), + internal_failure_label_() { + DCHECK_EQ(0, registers_to_save % 2); + __ jmp(&entry_label_); // We'll write the entry code later. + // If the code gets too big or corrupted, an internal exception will be + // raised, and we will exit right away. + __ bind(&internal_failure_label_); + __ li(v0, Operand(FAILURE)); + __ Ret(); + __ bind(&start_label_); // And then continue from here. +} + + +RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() { + delete masm_; + // Unuse labels in case we throw away the assembler without calling GetCode. + entry_label_.Unuse(); + start_label_.Unuse(); + success_label_.Unuse(); + backtrack_label_.Unuse(); + exit_label_.Unuse(); + check_preempt_label_.Unuse(); + stack_overflow_label_.Unuse(); + internal_failure_label_.Unuse(); +} + + +int RegExpMacroAssemblerMIPS::stack_limit_slack() { + return RegExpStack::kStackLimitSlack; +} + + +void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) { + if (by != 0) { + __ Daddu(current_input_offset(), + current_input_offset(), Operand(by * char_size())); + } +} + + +void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) { + DCHECK(reg >= 0); + DCHECK(reg < num_registers_); + if (by != 0) { + __ ld(a0, register_location(reg)); + __ Daddu(a0, a0, Operand(by)); + __ sd(a0, register_location(reg)); + } +} + + +void RegExpMacroAssemblerMIPS::Backtrack() { + CheckPreemption(); + // Pop Code* offset from backtrack stack, add Code* and jump to location. + Pop(a0); + __ Daddu(a0, a0, code_pointer()); + __ Jump(a0); +} + + +void RegExpMacroAssemblerMIPS::Bind(Label* label) { + __ bind(label); +} + + +void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) { + BranchOrBacktrack(on_equal, eq, current_character(), Operand(c)); +} + + +void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) { + BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit)); +} + + +void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) { + Label not_at_start; + // Did we start the match at the start of the string at all? + __ lw(a0, MemOperand(frame_pointer(), kStartIndex)); + BranchOrBacktrack(¬_at_start, ne, a0, Operand(zero_reg)); + + // If we did, are we still at the start of the input? + __ ld(a1, MemOperand(frame_pointer(), kInputStart)); + __ Daddu(a0, end_of_input_address(), Operand(current_input_offset())); + BranchOrBacktrack(on_at_start, eq, a0, Operand(a1)); + __ bind(¬_at_start); +} + + +void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) { + // Did we start the match at the start of the string at all? + __ lw(a0, MemOperand(frame_pointer(), kStartIndex)); + BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg)); + // If we did, are we still at the start of the input? + __ ld(a1, MemOperand(frame_pointer(), kInputStart)); + __ Daddu(a0, end_of_input_address(), Operand(current_input_offset())); + BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1)); +} + + +void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) { + BranchOrBacktrack(on_less, lt, current_character(), Operand(limit)); +} + + +void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) { + Label backtrack_non_equal; + __ lw(a0, MemOperand(backtrack_stackpointer(), 0)); + __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0)); + __ Daddu(backtrack_stackpointer(), + backtrack_stackpointer(), + Operand(kIntSize)); + __ bind(&backtrack_non_equal); + BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0)); +} + + +void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase( + int start_reg, + Label* on_no_match) { + Label fallthrough; + __ ld(a0, register_location(start_reg)); // Index of start of capture. + __ ld(a1, register_location(start_reg + 1)); // Index of end of capture. + __ Dsubu(a1, a1, a0); // Length of capture. + + // If length is zero, either the capture is empty or it is not participating. + // In either case succeed immediately. + __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); + + __ Daddu(t1, a1, current_input_offset()); + // Check that there are enough characters left in the input. + BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg)); + + if (mode_ == ASCII) { + Label success; + Label fail; + Label loop_check; + + // a0 - offset of start of capture. + // a1 - length of capture. + __ Daddu(a0, a0, Operand(end_of_input_address())); + __ Daddu(a2, end_of_input_address(), Operand(current_input_offset())); + __ Daddu(a1, a0, Operand(a1)); + + // a0 - Address of start of capture. + // a1 - Address of end of capture. + // a2 - Address of current input position. + + Label loop; + __ bind(&loop); + __ lbu(a3, MemOperand(a0, 0)); + __ daddiu(a0, a0, char_size()); + __ lbu(a4, MemOperand(a2, 0)); + __ daddiu(a2, a2, char_size()); + + __ Branch(&loop_check, eq, a4, Operand(a3)); + + // Mismatch, try case-insensitive match (converting letters to lower-case). + __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case. + __ Or(a4, a4, Operand(0x20)); // Also convert input character. + __ Branch(&fail, ne, a4, Operand(a3)); + __ Dsubu(a3, a3, Operand('a')); + __ Branch(&loop_check, ls, a3, Operand('z' - 'a')); + // Latin-1: Check for values in range [224,254] but not 247. + __ Dsubu(a3, a3, Operand(224 - 'a')); + // Weren't Latin-1 letters. + __ Branch(&fail, hi, a3, Operand(254 - 224)); + // Check for 247. + __ Branch(&fail, eq, a3, Operand(247 - 224)); + + __ bind(&loop_check); + __ Branch(&loop, lt, a0, Operand(a1)); + __ jmp(&success); + + __ bind(&fail); + GoTo(on_no_match); + + __ bind(&success); + // Compute new value of character position after the matched part. + __ Dsubu(current_input_offset(), a2, end_of_input_address()); + } else { + DCHECK(mode_ == UC16); + // Put regexp engine registers on stack. + RegList regexp_registers_to_retain = current_input_offset().bit() | + current_character().bit() | backtrack_stackpointer().bit(); + __ MultiPush(regexp_registers_to_retain); + + int argument_count = 4; + __ PrepareCallCFunction(argument_count, a2); + + // a0 - offset of start of capture. + // a1 - length of capture. + + // Put arguments into arguments registers. + // Parameters are + // a0: Address byte_offset1 - Address captured substring's start. + // a1: Address byte_offset2 - Address of current character position. + // a2: size_t byte_length - length of capture in bytes(!). + // a3: Isolate* isolate. + + // Address of start of capture. + __ Daddu(a0, a0, Operand(end_of_input_address())); + // Length of capture. + __ mov(a2, a1); + // Save length in callee-save register for use on return. + __ mov(s3, a1); + // Address of current input position. + __ Daddu(a1, current_input_offset(), Operand(end_of_input_address())); + // Isolate. + __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate()))); + + { + AllowExternalCallThatCantCauseGC scope(masm_); + ExternalReference function = + ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + __ CallCFunction(function, argument_count); + } + + // Restore regexp engine registers. + __ MultiPop(regexp_registers_to_retain); + __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); + __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); + + // Check if function returned non-zero for success or zero for failure. + BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg)); + // On success, increment position by length of capture. + __ Daddu(current_input_offset(), current_input_offset(), Operand(s3)); + } + + __ bind(&fallthrough); +} + + +void RegExpMacroAssemblerMIPS::CheckNotBackReference( + int start_reg, + Label* on_no_match) { + Label fallthrough; + Label success; + + // Find length of back-referenced capture. + __ ld(a0, register_location(start_reg)); + __ ld(a1, register_location(start_reg + 1)); + __ Dsubu(a1, a1, a0); // Length to check. + // Succeed on empty capture (including no capture). + __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); + + __ Daddu(t1, a1, current_input_offset()); + // Check that there are enough characters left in the input. + BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg)); + + // Compute pointers to match string and capture string. + __ Daddu(a0, a0, Operand(end_of_input_address())); + __ Daddu(a2, end_of_input_address(), Operand(current_input_offset())); + __ Daddu(a1, a1, Operand(a0)); + + Label loop; + __ bind(&loop); + if (mode_ == ASCII) { + __ lbu(a3, MemOperand(a0, 0)); + __ daddiu(a0, a0, char_size()); + __ lbu(a4, MemOperand(a2, 0)); + __ daddiu(a2, a2, char_size()); + } else { + DCHECK(mode_ == UC16); + __ lhu(a3, MemOperand(a0, 0)); + __ daddiu(a0, a0, char_size()); + __ lhu(a4, MemOperand(a2, 0)); + __ daddiu(a2, a2, char_size()); + } + BranchOrBacktrack(on_no_match, ne, a3, Operand(a4)); + __ Branch(&loop, lt, a0, Operand(a1)); + + // Move current character position to position after match. + __ Dsubu(current_input_offset(), a2, end_of_input_address()); + __ bind(&fallthrough); +} + + +void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c, + Label* on_not_equal) { + BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c)); +} + + +void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_equal) { + __ And(a0, current_character(), Operand(mask)); + Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); + BranchOrBacktrack(on_equal, eq, a0, rhs); +} + + +void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_not_equal) { + __ And(a0, current_character(), Operand(mask)); + Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); + BranchOrBacktrack(on_not_equal, ne, a0, rhs); +} + + +void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd( + uc16 c, + uc16 minus, + uc16 mask, + Label* on_not_equal) { + DCHECK(minus < String::kMaxUtf16CodeUnit); + __ Dsubu(a0, current_character(), Operand(minus)); + __ And(a0, a0, Operand(mask)); + BranchOrBacktrack(on_not_equal, ne, a0, Operand(c)); +} + + +void RegExpMacroAssemblerMIPS::CheckCharacterInRange( + uc16 from, + uc16 to, + Label* on_in_range) { + __ Dsubu(a0, current_character(), Operand(from)); + // Unsigned lower-or-same condition. + BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from)); +} + + +void RegExpMacroAssemblerMIPS::CheckCharacterNotInRange( + uc16 from, + uc16 to, + Label* on_not_in_range) { + __ Dsubu(a0, current_character(), Operand(from)); + // Unsigned higher condition. + BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from)); +} + + +void RegExpMacroAssemblerMIPS::CheckBitInTable( + Handle<ByteArray> table, + Label* on_bit_set) { + __ li(a0, Operand(table)); + if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) { + __ And(a1, current_character(), Operand(kTableSize - 1)); + __ Daddu(a0, a0, a1); + } else { + __ Daddu(a0, a0, current_character()); + } + + __ lbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize)); + BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg)); +} + + +bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type, + Label* on_no_match) { + // Range checks (c in min..max) are generally implemented by an unsigned + // (c - min) <= (max - min) check. + switch (type) { + case 's': + // Match space-characters. + if (mode_ == ASCII) { + // One byte space characters are '\t'..'\r', ' ' and \u00a0. + Label success; + __ Branch(&success, eq, current_character(), Operand(' ')); + // Check range 0x09..0x0d. + __ Dsubu(a0, current_character(), Operand('\t')); + __ Branch(&success, ls, a0, Operand('\r' - '\t')); + // \u00a0 (NBSP). + BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00a0 - '\t')); + __ bind(&success); + return true; + } + return false; + case 'S': + // The emitted code for generic character classes is good enough. + return false; + case 'd': + // Match ASCII digits ('0'..'9'). + __ Dsubu(a0, current_character(), Operand('0')); + BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0')); + return true; + case 'D': + // Match non ASCII-digits. + __ Dsubu(a0, current_character(), Operand('0')); + BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0')); + return true; + case '.': { + // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029). + __ Xor(a0, current_character(), Operand(0x01)); + // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c. + __ Dsubu(a0, a0, Operand(0x0b)); + BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b)); + if (mode_ == UC16) { + // Compare original value to 0x2028 and 0x2029, using the already + // computed (current_char ^ 0x01 - 0x0b). I.e., check for + // 0x201d (0x2028 - 0x0b) or 0x201e. + __ Dsubu(a0, a0, Operand(0x2028 - 0x0b)); + BranchOrBacktrack(on_no_match, ls, a0, Operand(1)); + } + return true; + } + case 'n': { + // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029). + __ Xor(a0, current_character(), Operand(0x01)); + // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c. + __ Dsubu(a0, a0, Operand(0x0b)); + if (mode_ == ASCII) { + BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b)); + } else { + Label done; + BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b)); + // Compare original value to 0x2028 and 0x2029, using the already + // computed (current_char ^ 0x01 - 0x0b). I.e., check for + // 0x201d (0x2028 - 0x0b) or 0x201e. + __ Dsubu(a0, a0, Operand(0x2028 - 0x0b)); + BranchOrBacktrack(on_no_match, hi, a0, Operand(1)); + __ bind(&done); + } + return true; + } + case 'w': { + if (mode_ != ASCII) { + // Table is 128 entries, so all ASCII characters can be tested. + BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z')); + } + ExternalReference map = ExternalReference::re_word_character_map(); + __ li(a0, Operand(map)); + __ Daddu(a0, a0, current_character()); + __ lbu(a0, MemOperand(a0, 0)); + BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg)); + return true; + } + case 'W': { + Label done; + if (mode_ != ASCII) { + // Table is 128 entries, so all ASCII characters can be tested. + __ Branch(&done, hi, current_character(), Operand('z')); + } + ExternalReference map = ExternalReference::re_word_character_map(); + __ li(a0, Operand(map)); + __ Daddu(a0, a0, current_character()); + __ lbu(a0, MemOperand(a0, 0)); + BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg)); + if (mode_ != ASCII) { + __ bind(&done); + } + return true; + } + case '*': + // Match any character. + return true; + // No custom implementation (yet): s(UC16), S(UC16). + default: + return false; + } +} + + +void RegExpMacroAssemblerMIPS::Fail() { + __ li(v0, Operand(FAILURE)); + __ jmp(&exit_label_); +} + + +Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) { + Label return_v0; + if (masm_->has_exception()) { + // If the code gets corrupted due to long regular expressions and lack of + // space on trampolines, an internal exception flag is set. If this case + // is detected, we will jump into exit sequence right away. + __ bind_to(&entry_label_, internal_failure_label_.pos()); + } else { + // Finalize code - write the entry point code now we know how many + // registers we need. + + // Entry code: + __ bind(&entry_label_); + + // Tell the system that we have a stack frame. Because the type is MANUAL, + // no is generated. + FrameScope scope(masm_, StackFrame::MANUAL); + + // Actually emit code to start a new stack frame. + // Push arguments + // Save callee-save registers. + // Start new stack frame. + // Store link register in existing stack-cell. + // Order here should correspond to order of offset constants in header file. + // TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs + // or dont save. + RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() | + s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit(); + RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit(); + + if (kMipsAbi == kN64) { + // TODO(plind): Should probably alias a4-a7, for clarity. + argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit(); + } + + __ MultiPush(argument_registers | registers_to_retain | ra.bit()); + // Set frame pointer in space for it if this is not a direct call + // from generated code. + // TODO(plind): this 8 is the # of argument regs, should have definition. + __ Daddu(frame_pointer(), sp, Operand(8 * kPointerSize)); + __ mov(a0, zero_reg); + __ push(a0); // Make room for success counter and initialize it to 0. + __ push(a0); // Make room for "position - 1" constant (value irrelevant). + + // Check if we have space on the stack for registers. + Label stack_limit_hit; + Label stack_ok; + + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(masm_->isolate()); + __ li(a0, Operand(stack_limit)); + __ ld(a0, MemOperand(a0)); + __ Dsubu(a0, sp, a0); + // Handle it if the stack pointer is already below the stack limit. + __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg)); + // Check if there is room for the variable number of registers above + // the stack limit. + __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize)); + // Exit with OutOfMemory exception. There is not enough space on the stack + // for our working registers. + __ li(v0, Operand(EXCEPTION)); + __ jmp(&return_v0); + + __ bind(&stack_limit_hit); + CallCheckStackGuardState(a0); + // If returned value is non-zero, we exit with the returned value as result. + __ Branch(&return_v0, ne, v0, Operand(zero_reg)); + + __ bind(&stack_ok); + // Allocate space on stack for registers. + __ Dsubu(sp, sp, Operand(num_registers_ * kPointerSize)); + // Load string end. + __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); + // Load input start. + __ ld(a0, MemOperand(frame_pointer(), kInputStart)); + // Find negative length (offset of start relative to end). + __ Dsubu(current_input_offset(), a0, end_of_input_address()); + // Set a0 to address of char before start of the input string + // (effectively string position -1). + __ ld(a1, MemOperand(frame_pointer(), kStartIndex)); + __ Dsubu(a0, current_input_offset(), Operand(char_size())); + __ dsll(t1, a1, (mode_ == UC16) ? 1 : 0); + __ Dsubu(a0, a0, t1); + // Store this value in a local variable, for use when clearing + // position registers. + __ sd(a0, MemOperand(frame_pointer(), kInputStartMinusOne)); + + // Initialize code pointer register + __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); + + Label load_char_start_regexp, start_regexp; + // Load newline if index is at start, previous character otherwise. + __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg)); + __ li(current_character(), Operand('\n')); + __ jmp(&start_regexp); + + // Global regexp restarts matching here. + __ bind(&load_char_start_regexp); + // Load previous char as initial value of current character register. + LoadCurrentCharacterUnchecked(-1, 1); + __ bind(&start_regexp); + + // Initialize on-stack registers. + if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. + // Fill saved registers with initial value = start offset - 1. + if (num_saved_registers_ > 8) { + // Address of register 0. + __ Daddu(a1, frame_pointer(), Operand(kRegisterZero)); + __ li(a2, Operand(num_saved_registers_)); + Label init_loop; + __ bind(&init_loop); + __ sd(a0, MemOperand(a1)); + __ Daddu(a1, a1, Operand(-kPointerSize)); + __ Dsubu(a2, a2, Operand(1)); + __ Branch(&init_loop, ne, a2, Operand(zero_reg)); + } else { + for (int i = 0; i < num_saved_registers_; i++) { + __ sd(a0, register_location(i)); + } + } + } + + // Initialize backtrack stack pointer. + __ ld(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd)); + + __ jmp(&start_label_); + + + // Exit code: + if (success_label_.is_linked()) { + // Save captures when successful. + __ bind(&success_label_); + if (num_saved_registers_ > 0) { + // Copy captures to output. + __ ld(a1, MemOperand(frame_pointer(), kInputStart)); + __ ld(a0, MemOperand(frame_pointer(), kRegisterOutput)); + __ ld(a2, MemOperand(frame_pointer(), kStartIndex)); + __ Dsubu(a1, end_of_input_address(), a1); + // a1 is length of input in bytes. + if (mode_ == UC16) { + __ dsrl(a1, a1, 1); + } + // a1 is length of input in characters. + __ Daddu(a1, a1, Operand(a2)); + // a1 is length of string in characters. + + DCHECK_EQ(0, num_saved_registers_ % 2); + // Always an even number of capture registers. This allows us to + // unroll the loop once to add an operation between a load of a register + // and the following use of that register. + for (int i = 0; i < num_saved_registers_; i += 2) { + __ ld(a2, register_location(i)); + __ ld(a3, register_location(i + 1)); + if (i == 0 && global_with_zero_length_check()) { + // Keep capture start in a4 for the zero-length check later. + __ mov(t3, a2); + } + if (mode_ == UC16) { + __ dsra(a2, a2, 1); + __ Daddu(a2, a2, a1); + __ dsra(a3, a3, 1); + __ Daddu(a3, a3, a1); + } else { + __ Daddu(a2, a1, Operand(a2)); + __ Daddu(a3, a1, Operand(a3)); + } + // V8 expects the output to be an int32_t array. + __ sw(a2, MemOperand(a0)); + __ Daddu(a0, a0, kIntSize); + __ sw(a3, MemOperand(a0)); + __ Daddu(a0, a0, kIntSize); + } + } + + if (global()) { + // Restart matching if the regular expression is flagged as global. + __ ld(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + __ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); + __ ld(a2, MemOperand(frame_pointer(), kRegisterOutput)); + // Increment success counter. + __ Daddu(a0, a0, 1); + __ sd(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + // Capture results have been stored, so the number of remaining global + // output registers is reduced by the number of stored captures. + __ Dsubu(a1, a1, num_saved_registers_); + // Check whether we have enough room for another set of capture results. + __ mov(v0, a0); + __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_)); + + __ sd(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); + // Advance the location for output. + __ Daddu(a2, a2, num_saved_registers_ * kIntSize); + __ sd(a2, MemOperand(frame_pointer(), kRegisterOutput)); + + // Prepare a0 to initialize registers with its value in the next run. + __ ld(a0, MemOperand(frame_pointer(), kInputStartMinusOne)); + + if (global_with_zero_length_check()) { + // Special case for zero-length matches. + // t3: capture start index + // Not a zero-length match, restart. + __ Branch( + &load_char_start_regexp, ne, current_input_offset(), Operand(t3)); + // Offset from the end is zero if we already reached the end. + __ Branch(&exit_label_, eq, current_input_offset(), + Operand(zero_reg)); + // Advance current position after a zero-length match. + __ Daddu(current_input_offset(), + current_input_offset(), + Operand((mode_ == UC16) ? 2 : 1)); + } + + __ Branch(&load_char_start_regexp); + } else { + __ li(v0, Operand(SUCCESS)); + } + } + // Exit and return v0. + __ bind(&exit_label_); + if (global()) { + __ ld(v0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + } + + __ bind(&return_v0); + // Skip sp past regexp registers and local variables.. + __ mov(sp, frame_pointer()); + // Restore registers s0..s7 and return (restoring ra to pc). + __ MultiPop(registers_to_retain | ra.bit()); + __ Ret(); + + // Backtrack code (branch target for conditional backtracks). + if (backtrack_label_.is_linked()) { + __ bind(&backtrack_label_); + Backtrack(); + } + + Label exit_with_exception; + + // Preempt-code. + if (check_preempt_label_.is_linked()) { + SafeCallTarget(&check_preempt_label_); + // Put regexp engine registers on stack. + RegList regexp_registers_to_retain = current_input_offset().bit() | + current_character().bit() | backtrack_stackpointer().bit(); + __ MultiPush(regexp_registers_to_retain); + CallCheckStackGuardState(a0); + __ MultiPop(regexp_registers_to_retain); + // If returning non-zero, we should end execution with the given + // result as return value. + __ Branch(&return_v0, ne, v0, Operand(zero_reg)); + + // String might have moved: Reload end of string from frame. + __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); + __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); + SafeReturn(); + } + + // Backtrack stack overflow code. + if (stack_overflow_label_.is_linked()) { + SafeCallTarget(&stack_overflow_label_); + // Reached if the backtrack-stack limit has been hit. + // Put regexp engine registers on stack first. + RegList regexp_registers = current_input_offset().bit() | + current_character().bit(); + __ MultiPush(regexp_registers); + Label grow_failed; + // Call GrowStack(backtrack_stackpointer(), &stack_base) + static const int num_arguments = 3; + __ PrepareCallCFunction(num_arguments, a0); + __ mov(a0, backtrack_stackpointer()); + __ Daddu(a1, frame_pointer(), Operand(kStackHighEnd)); + __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate()))); + ExternalReference grow_stack = + ExternalReference::re_grow_stack(masm_->isolate()); + __ CallCFunction(grow_stack, num_arguments); + // Restore regexp registers. + __ MultiPop(regexp_registers); + // If return NULL, we have failed to grow the stack, and + // must exit with a stack-overflow exception. + __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg)); + // Otherwise use return value as new stack pointer. + __ mov(backtrack_stackpointer(), v0); + // Restore saved registers and continue. + __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); + __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); + SafeReturn(); + } + + if (exit_with_exception.is_linked()) { + // If any of the code above needed to exit with an exception. + __ bind(&exit_with_exception); + // Exit with Result EXCEPTION(-1) to signal thrown exception. + __ li(v0, Operand(EXCEPTION)); + __ jmp(&return_v0); + } + } + + CodeDesc code_desc; + masm_->GetCode(&code_desc); + Handle<Code> code = isolate()->factory()->NewCode( + code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject()); + LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source)); + return Handle<HeapObject>::cast(code); +} + + +void RegExpMacroAssemblerMIPS::GoTo(Label* to) { + if (to == NULL) { + Backtrack(); + return; + } + __ jmp(to); + return; +} + + +void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg, + int comparand, + Label* if_ge) { + __ ld(a0, register_location(reg)); + BranchOrBacktrack(if_ge, ge, a0, Operand(comparand)); +} + + +void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg, + int comparand, + Label* if_lt) { + __ ld(a0, register_location(reg)); + BranchOrBacktrack(if_lt, lt, a0, Operand(comparand)); +} + + +void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg, + Label* if_eq) { + __ ld(a0, register_location(reg)); + BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset())); +} + + +RegExpMacroAssembler::IrregexpImplementation + RegExpMacroAssemblerMIPS::Implementation() { + return kMIPSImplementation; +} + + +void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset, + Label* on_end_of_input, + bool check_bounds, + int characters) { + DCHECK(cp_offset >= -1); // ^ and \b can look behind one character. + DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works). + if (check_bounds) { + CheckPosition(cp_offset + characters - 1, on_end_of_input); + } + LoadCurrentCharacterUnchecked(cp_offset, characters); +} + + +void RegExpMacroAssemblerMIPS::PopCurrentPosition() { + Pop(current_input_offset()); +} + + +void RegExpMacroAssemblerMIPS::PopRegister(int register_index) { + Pop(a0); + __ sd(a0, register_location(register_index)); +} + + +void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) { + if (label->is_bound()) { + int target = label->pos(); + __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag)); + } else { + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); + Label after_constant; + __ Branch(&after_constant); + int offset = masm_->pc_offset(); + int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag; + __ emit(0); + masm_->label_at_put(label, offset); + __ bind(&after_constant); + if (is_int16(cp_offset)) { + __ lwu(a0, MemOperand(code_pointer(), cp_offset)); + } else { + __ Daddu(a0, code_pointer(), cp_offset); + __ lwu(a0, MemOperand(a0, 0)); + } + } + Push(a0); + CheckStackLimit(); +} + + +void RegExpMacroAssemblerMIPS::PushCurrentPosition() { + Push(current_input_offset()); +} + + +void RegExpMacroAssemblerMIPS::PushRegister(int register_index, + StackCheckFlag check_stack_limit) { + __ ld(a0, register_location(register_index)); + Push(a0); + if (check_stack_limit) CheckStackLimit(); +} + + +void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) { + __ ld(current_input_offset(), register_location(reg)); +} + + +void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) { + __ ld(backtrack_stackpointer(), register_location(reg)); + __ ld(a0, MemOperand(frame_pointer(), kStackHighEnd)); + __ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0)); +} + + +void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) { + Label after_position; + __ Branch(&after_position, + ge, + current_input_offset(), + Operand(-by * char_size())); + __ li(current_input_offset(), -by * char_size()); + // On RegExp code entry (where this operation is used), the character before + // the current position is expected to be already loaded. + // We have advanced the position, so it's safe to read backwards. + LoadCurrentCharacterUnchecked(-1, 1); + __ bind(&after_position); +} + + +void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) { + DCHECK(register_index >= num_saved_registers_); // Reserved for positions! + __ li(a0, Operand(to)); + __ sd(a0, register_location(register_index)); +} + + +bool RegExpMacroAssemblerMIPS::Succeed() { + __ jmp(&success_label_); + return global(); +} + + +void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg, + int cp_offset) { + if (cp_offset == 0) { + __ sd(current_input_offset(), register_location(reg)); + } else { + __ Daddu(a0, current_input_offset(), Operand(cp_offset * char_size())); + __ sd(a0, register_location(reg)); + } +} + + +void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) { + DCHECK(reg_from <= reg_to); + __ ld(a0, MemOperand(frame_pointer(), kInputStartMinusOne)); + for (int reg = reg_from; reg <= reg_to; reg++) { + __ sd(a0, register_location(reg)); + } +} + + +void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) { + __ ld(a1, MemOperand(frame_pointer(), kStackHighEnd)); + __ Dsubu(a0, backtrack_stackpointer(), a1); + __ sd(a0, register_location(reg)); +} + + +bool RegExpMacroAssemblerMIPS::CanReadUnaligned() { + return false; +} + + +// Private methods: + +void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) { + int stack_alignment = base::OS::ActivationFrameAlignment(); + + // Align the stack pointer and save the original sp value on the stack. + __ mov(scratch, sp); + __ Dsubu(sp, sp, Operand(kPointerSize)); + DCHECK(IsPowerOf2(stack_alignment)); + __ And(sp, sp, Operand(-stack_alignment)); + __ sd(scratch, MemOperand(sp)); + + __ mov(a2, frame_pointer()); + // Code* of self. + __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE); + + // We need to make room for the return address on the stack. + DCHECK(IsAligned(stack_alignment, kPointerSize)); + __ Dsubu(sp, sp, Operand(stack_alignment)); + + // Stack pointer now points to cell where return address is to be written. + // Arguments are in registers, meaning we teat the return address as + // argument 5. Since DirectCEntryStub will handleallocating space for the C + // argument slots, we don't need to care about that here. This is how the + // stack will look (sp meaning the value of sp at this moment): + // [sp + 3] - empty slot if needed for alignment. + // [sp + 2] - saved sp. + // [sp + 1] - second word reserved for return value. + // [sp + 0] - first word reserved for return value. + + // a0 will point to the return address, placed by DirectCEntry. + __ mov(a0, sp); + + ExternalReference stack_guard_check = + ExternalReference::re_check_stack_guard_state(masm_->isolate()); + __ li(t9, Operand(stack_guard_check)); + DirectCEntryStub stub(isolate()); + stub.GenerateCall(masm_, t9); + + // DirectCEntryStub allocated space for the C argument slots so we have to + // drop them with the return address from the stack with loading saved sp. + // At this point stack must look: + // [sp + 7] - empty slot if needed for alignment. + // [sp + 6] - saved sp. + // [sp + 5] - second word reserved for return value. + // [sp + 4] - first word reserved for return value. + // [sp + 3] - C argument slot. + // [sp + 2] - C argument slot. + // [sp + 1] - C argument slot. + // [sp + 0] - C argument slot. + __ ld(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize)); + + __ li(code_pointer(), Operand(masm_->CodeObject())); +} + + +// Helper function for reading a value out of a stack frame. +template <typename T> +static T& frame_entry(Address re_frame, int frame_offset) { + return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset)); +} + + +int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address, + Code* re_code, + Address re_frame) { + Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate); + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { + isolate->StackOverflow(); + return EXCEPTION; + } + + // If not real stack overflow the stack guard was used to interrupt + // execution for another purpose. + + // If this is a direct call from JavaScript retry the RegExp forcing the call + // through the runtime system. Currently the direct call cannot handle a GC. + if (frame_entry<int>(re_frame, kDirectCall) == 1) { + return RETRY; + } + + // Prepare for possible GC. + HandleScope handles(isolate); + Handle<Code> code_handle(re_code); + + Handle<String> subject(frame_entry<String*>(re_frame, kInputString)); + // Current string. + bool is_ascii = subject->IsOneByteRepresentationUnderneath(); + + DCHECK(re_code->instruction_start() <= *return_address); + DCHECK(*return_address <= + re_code->instruction_start() + re_code->instruction_size()); + + Object* result = isolate->stack_guard()->HandleInterrupts(); + + if (*code_handle != re_code) { // Return address no longer valid. + int delta = code_handle->address() - re_code->address(); + // Overwrite the return address on the stack. + *return_address += delta; + } + + if (result->IsException()) { + return EXCEPTION; + } + + Handle<String> subject_tmp = subject; + int slice_offset = 0; + + // Extract the underlying string and the slice offset. + if (StringShape(*subject_tmp).IsCons()) { + subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first()); + } else if (StringShape(*subject_tmp).IsSliced()) { + SlicedString* slice = SlicedString::cast(*subject_tmp); + subject_tmp = Handle<String>(slice->parent()); + slice_offset = slice->offset(); + } + + // String might have changed. + if (subject_tmp->IsOneByteRepresentation() != is_ascii) { + // If we changed between an ASCII and an UC16 string, the specialized + // code cannot be used, and we need to restart regexp matching from + // scratch (including, potentially, compiling a new version of the code). + return RETRY; + } + + // Otherwise, the content of the string might have moved. It must still + // be a sequential or external string with the same content. + // Update the start and end pointers in the stack frame to the current + // location (whether it has actually moved or not). + DCHECK(StringShape(*subject_tmp).IsSequential() || + StringShape(*subject_tmp).IsExternal()); + + // The original start address of the characters to match. + const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart); + + // Find the current start address of the same character at the current string + // position. + int start_index = frame_entry<int>(re_frame, kStartIndex); + const byte* new_address = StringCharacterPosition(*subject_tmp, + start_index + slice_offset); + + if (start_address != new_address) { + // If there is a difference, update the object pointer and start and end + // addresses in the RegExp stack frame to match the new value. + const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd); + int byte_length = static_cast<int>(end_address - start_address); + frame_entry<const String*>(re_frame, kInputString) = *subject; + frame_entry<const byte*>(re_frame, kInputStart) = new_address; + frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length; + } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) { + // Subject string might have been a ConsString that underwent + // short-circuiting during GC. That will not change start_address but + // will change pointer inside the subject handle. + frame_entry<const String*>(re_frame, kInputString) = *subject; + } + + return 0; +} + + +MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) { + DCHECK(register_index < (1<<30)); + if (num_registers_ <= register_index) { + num_registers_ = register_index + 1; + } + return MemOperand(frame_pointer(), + kRegisterZero - register_index * kPointerSize); +} + + +void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset, + Label* on_outside_input) { + BranchOrBacktrack(on_outside_input, + ge, + current_input_offset(), + Operand(-cp_offset * char_size())); +} + + +void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to, + Condition condition, + Register rs, + const Operand& rt) { + if (condition == al) { // Unconditional. + if (to == NULL) { + Backtrack(); + return; + } + __ jmp(to); + return; + } + if (to == NULL) { + __ Branch(&backtrack_label_, condition, rs, rt); + return; + } + __ Branch(to, condition, rs, rt); +} + + +void RegExpMacroAssemblerMIPS::SafeCall(Label* to, + Condition cond, + Register rs, + const Operand& rt) { + __ BranchAndLink(to, cond, rs, rt); +} + + +void RegExpMacroAssemblerMIPS::SafeReturn() { + __ pop(ra); + __ Daddu(t1, ra, Operand(masm_->CodeObject())); + __ Jump(t1); +} + + +void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) { + __ bind(name); + __ Dsubu(ra, ra, Operand(masm_->CodeObject())); + __ push(ra); +} + + +void RegExpMacroAssemblerMIPS::Push(Register source) { + DCHECK(!source.is(backtrack_stackpointer())); + __ Daddu(backtrack_stackpointer(), + backtrack_stackpointer(), + Operand(-kIntSize)); + __ sw(source, MemOperand(backtrack_stackpointer())); +} + + +void RegExpMacroAssemblerMIPS::Pop(Register target) { + DCHECK(!target.is(backtrack_stackpointer())); + __ lw(target, MemOperand(backtrack_stackpointer())); + __ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize); +} + + +void RegExpMacroAssemblerMIPS::CheckPreemption() { + // Check for preemption. + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(masm_->isolate()); + __ li(a0, Operand(stack_limit)); + __ ld(a0, MemOperand(a0)); + SafeCall(&check_preempt_label_, ls, sp, Operand(a0)); +} + + +void RegExpMacroAssemblerMIPS::CheckStackLimit() { + ExternalReference stack_limit = + ExternalReference::address_of_regexp_stack_limit(masm_->isolate()); + + __ li(a0, Operand(stack_limit)); + __ ld(a0, MemOperand(a0)); + SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0)); +} + + +void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset, + int characters) { + Register offset = current_input_offset(); + if (cp_offset != 0) { + // t3 is not being used to store the capture start index at this point. + __ Daddu(t3, current_input_offset(), Operand(cp_offset * char_size())); + offset = t3; + } + // We assume that we cannot do unaligned loads on MIPS, so this function + // must only be used to load a single character at a time. + DCHECK(characters == 1); + __ Daddu(t1, end_of_input_address(), Operand(offset)); + if (mode_ == ASCII) { + __ lbu(current_character(), MemOperand(t1, 0)); + } else { + DCHECK(mode_ == UC16); + __ lhu(current_character(), MemOperand(t1, 0)); + } +} + +#undef __ + +#endif // V8_INTERPRETED_REGEXP + +}} // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/regexp-macro-assembler-mips64.h nodejs-0.11.15/deps/v8/src/mips64/regexp-macro-assembler-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/regexp-macro-assembler-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/regexp-macro-assembler-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,269 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + +#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ +#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ + +#include "src/macro-assembler.h" +#include "src/mips64/assembler-mips64-inl.h" +#include "src/mips64/assembler-mips64.h" +#include "src/mips64/macro-assembler-mips64.h" + +namespace v8 { +namespace internal { + +#ifndef V8_INTERPRETED_REGEXP +class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler { + public: + RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone); + virtual ~RegExpMacroAssemblerMIPS(); + virtual int stack_limit_slack(); + virtual void AdvanceCurrentPosition(int by); + virtual void AdvanceRegister(int reg, int by); + virtual void Backtrack(); + virtual void Bind(Label* label); + virtual void CheckAtStart(Label* on_at_start); + virtual void CheckCharacter(uint32_t c, Label* on_equal); + virtual void CheckCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_equal); + virtual void CheckCharacterGT(uc16 limit, Label* on_greater); + virtual void CheckCharacterLT(uc16 limit, Label* on_less); + // A "greedy loop" is a loop that is both greedy and with a simple + // body. It has a particularly simple implementation. + virtual void CheckGreedyLoop(Label* on_tos_equals_current_position); + virtual void CheckNotAtStart(Label* on_not_at_start); + virtual void CheckNotBackReference(int start_reg, Label* on_no_match); + virtual void CheckNotBackReferenceIgnoreCase(int start_reg, + Label* on_no_match); + virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal); + virtual void CheckNotCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_not_equal); + virtual void CheckNotCharacterAfterMinusAnd(uc16 c, + uc16 minus, + uc16 mask, + Label* on_not_equal); + virtual void CheckCharacterInRange(uc16 from, + uc16 to, + Label* on_in_range); + virtual void CheckCharacterNotInRange(uc16 from, + uc16 to, + Label* on_not_in_range); + virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set); + + // Checks whether the given offset from the current position is before + // the end of the string. + virtual void CheckPosition(int cp_offset, Label* on_outside_input); + virtual bool CheckSpecialCharacterClass(uc16 type, + Label* on_no_match); + virtual void Fail(); + virtual Handle<HeapObject> GetCode(Handle<String> source); + virtual void GoTo(Label* label); + virtual void IfRegisterGE(int reg, int comparand, Label* if_ge); + virtual void IfRegisterLT(int reg, int comparand, Label* if_lt); + virtual void IfRegisterEqPos(int reg, Label* if_eq); + virtual IrregexpImplementation Implementation(); + virtual void LoadCurrentCharacter(int cp_offset, + Label* on_end_of_input, + bool check_bounds = true, + int characters = 1); + virtual void PopCurrentPosition(); + virtual void PopRegister(int register_index); + virtual void PushBacktrack(Label* label); + virtual void PushCurrentPosition(); + virtual void PushRegister(int register_index, + StackCheckFlag check_stack_limit); + virtual void ReadCurrentPositionFromRegister(int reg); + virtual void ReadStackPointerFromRegister(int reg); + virtual void SetCurrentPositionFromEnd(int by); + virtual void SetRegister(int register_index, int to); + virtual bool Succeed(); + virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); + virtual void ClearRegisters(int reg_from, int reg_to); + virtual void WriteStackPointerToRegister(int reg); + virtual bool CanReadUnaligned(); + + // Called from RegExp if the stack-guard is triggered. + // If the code object is relocated, the return address is fixed before + // returning. + static int CheckStackGuardState(Address* return_address, + Code* re_code, + Address re_frame); + + void print_regexp_frame_constants(); + + private: +#if defined(MIPS_ABI_N64) + // Offsets from frame_pointer() of function parameters and stored registers. + static const int kFramePointer = 0; + + // Above the frame pointer - Stored registers and stack passed parameters. + // Registers s0 to s7, fp, and ra. + static const int kStoredRegisters = kFramePointer; + // Return address (stored from link register, read into pc on return). + +// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp. + + static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize; + static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; + // Stack frame header. + static const int kStackFrameHeader = kSecondaryReturnAddress; + // Stack parameters placed by caller. + static const int kIsolate = kStackFrameHeader + kPointerSize; + + // Below the frame pointer. + // Register parameters stored by setup code. + static const int kDirectCall = kFramePointer - kPointerSize; + static const int kStackHighEnd = kDirectCall - kPointerSize; + static const int kNumOutputRegisters = kStackHighEnd - kPointerSize; + static const int kRegisterOutput = kNumOutputRegisters - kPointerSize; + static const int kInputEnd = kRegisterOutput - kPointerSize; + static const int kInputStart = kInputEnd - kPointerSize; + static const int kStartIndex = kInputStart - kPointerSize; + static const int kInputString = kStartIndex - kPointerSize; + // When adding local variables remember to push space for them in + // the frame in GetCode. + static const int kSuccessfulCaptures = kInputString - kPointerSize; + static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; + // First register address. Following registers are below it on the stack. + static const int kRegisterZero = kInputStartMinusOne - kPointerSize; + +#elif defined(MIPS_ABI_O32) + // Offsets from frame_pointer() of function parameters and stored registers. + static const int kFramePointer = 0; + + // Above the frame pointer - Stored registers and stack passed parameters. + // Registers s0 to s7, fp, and ra. + static const int kStoredRegisters = kFramePointer; + // Return address (stored from link register, read into pc on return). + static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize; + static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; + // Stack frame header. + static const int kStackFrameHeader = kReturnAddress + kPointerSize; + // Stack parameters placed by caller. + static const int kRegisterOutput = + kStackFrameHeader + 4 * kPointerSize + kPointerSize; + static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; + static const int kStackHighEnd = kNumOutputRegisters + kPointerSize; + static const int kDirectCall = kStackHighEnd + kPointerSize; + static const int kIsolate = kDirectCall + kPointerSize; + + // Below the frame pointer. + // Register parameters stored by setup code. + static const int kInputEnd = kFramePointer - kPointerSize; + static const int kInputStart = kInputEnd - kPointerSize; + static const int kStartIndex = kInputStart - kPointerSize; + static const int kInputString = kStartIndex - kPointerSize; + // When adding local variables remember to push space for them in + // the frame in GetCode. + static const int kSuccessfulCaptures = kInputString - kPointerSize; + static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; + // First register address. Following registers are below it on the stack. + static const int kRegisterZero = kInputStartMinusOne - kPointerSize; + +#else +# error "undefined MIPS ABI" +#endif + + // Initial size of code buffer. + static const size_t kRegExpCodeSize = 1024; + + // Load a number of characters at the given offset from the + // current position, into the current-character register. + void LoadCurrentCharacterUnchecked(int cp_offset, int character_count); + + // Check whether preemption has been requested. + void CheckPreemption(); + + // Check whether we are exceeding the stack limit on the backtrack stack. + void CheckStackLimit(); + + + // Generate a call to CheckStackGuardState. + void CallCheckStackGuardState(Register scratch); + + // The ebp-relative location of a regexp register. + MemOperand register_location(int register_index); + + // Register holding the current input position as negative offset from + // the end of the string. + inline Register current_input_offset() { return a6; } + + // The register containing the current character after LoadCurrentCharacter. + inline Register current_character() { return a7; } + + // Register holding address of the end of the input string. + inline Register end_of_input_address() { return t2; } + + // Register holding the frame address. Local variables, parameters and + // regexp registers are addressed relative to this. + inline Register frame_pointer() { return fp; } + + // The register containing the backtrack stack top. Provides a meaningful + // name to the register. + inline Register backtrack_stackpointer() { return t0; } + + // Register holding pointer to the current code object. + inline Register code_pointer() { return a5; } + + // Byte size of chars in the string to match (decided by the Mode argument). + inline int char_size() { return static_cast<int>(mode_); } + + // Equivalent to a conditional branch to the label, unless the label + // is NULL, in which case it is a conditional Backtrack. + void BranchOrBacktrack(Label* to, + Condition condition, + Register rs, + const Operand& rt); + + // Call and return internally in the generated code in a way that + // is GC-safe (i.e., doesn't leave absolute code addresses on the stack) + inline void SafeCall(Label* to, + Condition cond, + Register rs, + const Operand& rt); + inline void SafeReturn(); + inline void SafeCallTarget(Label* name); + + // Pushes the value of a register on the backtrack stack. Decrements the + // stack pointer by a word size and stores the register's value there. + inline void Push(Register source); + + // Pops a value from the backtrack stack. Reads the word at the stack pointer + // and increments it by a word size. + inline void Pop(Register target); + + Isolate* isolate() const { return masm_->isolate(); } + + MacroAssembler* masm_; + + // Which mode to generate code for (ASCII or UC16). + Mode mode_; + + // One greater than maximal register index actually used. + int num_registers_; + + // Number of registers to output at the end (the saved registers + // are always 0..num_saved_registers_-1). + int num_saved_registers_; + + // Labels used internally. + Label entry_label_; + Label start_label_; + Label success_label_; + Label backtrack_label_; + Label exit_label_; + Label check_preempt_label_; + Label stack_overflow_label_; + Label internal_failure_label_; +}; + +#endif // V8_INTERPRETED_REGEXP + + +}} // namespace v8::internal + +#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/simulator-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/simulator-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/simulator-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/simulator-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3451 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <limits.h> +#include <stdarg.h> +#include <stdlib.h> +#include <cmath> + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/assembler.h" +#include "src/disasm.h" +#include "src/globals.h" // Need the BitCast. +#include "src/mips64/constants-mips64.h" +#include "src/mips64/simulator-mips64.h" +#include "src/ostreams.h" + +// Only build the simulator if not compiling for real MIPS hardware. +#if defined(USE_SIMULATOR) + +namespace v8 { +namespace internal { + +// Utils functions. +bool HaveSameSign(int64_t a, int64_t b) { + return ((a ^ b) >= 0); +} + + +uint32_t get_fcsr_condition_bit(uint32_t cc) { + if (cc == 0) { + return 23; + } else { + return 24 + cc; + } +} + + +static int64_t MultiplyHighSigned(int64_t u, int64_t v) { + uint64_t u0, v0, w0; + int64_t u1, v1, w1, w2, t; + + u0 = u & 0xffffffffL; + u1 = u >> 32; + v0 = v & 0xffffffffL; + v1 = v >> 32; + + w0 = u0 * v0; + t = u1 * v0 + (w0 >> 32); + w1 = t & 0xffffffffL; + w2 = t >> 32; + w1 = u0 * v1 + w1; + + return u1 * v1 + w2 + (w1 >> 32); +} + + +// This macro provides a platform independent use of sscanf. The reason for +// SScanF not being implemented in a platform independent was through +// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time +// Library does not provide vsscanf. +#define SScanF sscanf // NOLINT + +// The MipsDebugger class is used by the simulator while debugging simulated +// code. +class MipsDebugger { + public: + explicit MipsDebugger(Simulator* sim) : sim_(sim) { } + ~MipsDebugger(); + + void Stop(Instruction* instr); + void Debug(); + // Print all registers with a nice formatting. + void PrintAllRegs(); + void PrintAllRegsIncludingFPU(); + + private: + // We set the breakpoint code to 0xfffff to easily recognize it. + static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6; + static const Instr kNopInstr = 0x0; + + Simulator* sim_; + + int64_t GetRegisterValue(int regnum); + int64_t GetFPURegisterValue(int regnum); + float GetFPURegisterValueFloat(int regnum); + double GetFPURegisterValueDouble(int regnum); + bool GetValue(const char* desc, int64_t* value); + + // Set or delete a breakpoint. Returns true if successful. + bool SetBreakpoint(Instruction* breakpc); + bool DeleteBreakpoint(Instruction* breakpc); + + // Undo and redo all breakpoints. This is needed to bracket disassembly and + // execution to skip past breakpoints when run from the debugger. + void UndoBreakpoints(); + void RedoBreakpoints(); +}; + + +MipsDebugger::~MipsDebugger() { +} + + +#ifdef GENERATED_CODE_COVERAGE +static FILE* coverage_log = NULL; + + +static void InitializeCoverage() { + char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG"); + if (file_name != NULL) { + coverage_log = fopen(file_name, "aw+"); + } +} + + +void MipsDebugger::Stop(Instruction* instr) { + // Get the stop code. + uint32_t code = instr->Bits(25, 6); + // Retrieve the encoded address, which comes just after this stop. + char** msg_address = + reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize); + char* msg = *msg_address; + DCHECK(msg != NULL); + + // Update this stop description. + if (!watched_stops_[code].desc) { + watched_stops_[code].desc = msg; + } + + if (strlen(msg) > 0) { + if (coverage_log != NULL) { + fprintf(coverage_log, "%s\n", str); + fflush(coverage_log); + } + // Overwrite the instruction and address with nops. + instr->SetInstructionBits(kNopInstr); + reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr); + } + // TODO(yuyin): 2 -> 3? + sim_->set_pc(sim_->get_pc() + 3 * Instruction::kInstructionSize); +} + + +#else // GENERATED_CODE_COVERAGE + +#define UNSUPPORTED() printf("Unsupported instruction.\n"); + +static void InitializeCoverage() {} + + +void MipsDebugger::Stop(Instruction* instr) { + // Get the stop code. + uint32_t code = instr->Bits(25, 6); + // Retrieve the encoded address, which comes just after this stop. + char* msg = *reinterpret_cast<char**>(sim_->get_pc() + + Instruction::kInstrSize); + // Update this stop description. + if (!sim_->watched_stops_[code].desc) { + sim_->watched_stops_[code].desc = msg; + } + PrintF("Simulator hit %s (%u)\n", msg, code); + // TODO(yuyin): 2 -> 3? + sim_->set_pc(sim_->get_pc() + 3 * Instruction::kInstrSize); + Debug(); +} +#endif // GENERATED_CODE_COVERAGE + + +int64_t MipsDebugger::GetRegisterValue(int regnum) { + if (regnum == kNumSimuRegisters) { + return sim_->get_pc(); + } else { + return sim_->get_register(regnum); + } +} + + +int64_t MipsDebugger::GetFPURegisterValue(int regnum) { + if (regnum == kNumFPURegisters) { + return sim_->get_pc(); + } else { + return sim_->get_fpu_register(regnum); + } +} + + +float MipsDebugger::GetFPURegisterValueFloat(int regnum) { + if (regnum == kNumFPURegisters) { + return sim_->get_pc(); + } else { + return sim_->get_fpu_register_float(regnum); + } +} + + +double MipsDebugger::GetFPURegisterValueDouble(int regnum) { + if (regnum == kNumFPURegisters) { + return sim_->get_pc(); + } else { + return sim_->get_fpu_register_double(regnum); + } +} + + +bool MipsDebugger::GetValue(const char* desc, int64_t* value) { + int regnum = Registers::Number(desc); + int fpuregnum = FPURegisters::Number(desc); + + if (regnum != kInvalidRegister) { + *value = GetRegisterValue(regnum); + return true; + } else if (fpuregnum != kInvalidFPURegister) { + *value = GetFPURegisterValue(fpuregnum); + return true; + } else if (strncmp(desc, "0x", 2) == 0) { + return SScanF(desc + 2, "%" SCNx64, + reinterpret_cast<uint64_t*>(value)) == 1; + } else { + return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1; + } + return false; +} + + +bool MipsDebugger::SetBreakpoint(Instruction* breakpc) { + // Check if a breakpoint can be set. If not return without any side-effects. + if (sim_->break_pc_ != NULL) { + return false; + } + + // Set the breakpoint. + sim_->break_pc_ = breakpc; + sim_->break_instr_ = breakpc->InstructionBits(); + // Not setting the breakpoint instruction in the code itself. It will be set + // when the debugger shell continues. + return true; +} + + +bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) { + if (sim_->break_pc_ != NULL) { + sim_->break_pc_->SetInstructionBits(sim_->break_instr_); + } + + sim_->break_pc_ = NULL; + sim_->break_instr_ = 0; + return true; +} + + +void MipsDebugger::UndoBreakpoints() { + if (sim_->break_pc_ != NULL) { + sim_->break_pc_->SetInstructionBits(sim_->break_instr_); + } +} + + +void MipsDebugger::RedoBreakpoints() { + if (sim_->break_pc_ != NULL) { + sim_->break_pc_->SetInstructionBits(kBreakpointInstr); + } +} + + +void MipsDebugger::PrintAllRegs() { +#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n) + + PrintF("\n"); + // at, v0, a0. + PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n", + REG_INFO(1), REG_INFO(2), REG_INFO(4)); + // v1, a1. + PrintF("%34s\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n", + "", REG_INFO(3), REG_INFO(5)); + // a2. + PrintF("%34s\t%34s\t%3s: 0x%016lx %14ld\n", "", "", REG_INFO(6)); + // a3. + PrintF("%34s\t%34s\t%3s: 0x%016lx %14ld\n", "", "", REG_INFO(7)); + PrintF("\n"); + // a4-t3, s0-s7 + for (int i = 0; i < 8; i++) { + PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n", + REG_INFO(8+i), REG_INFO(16+i)); + } + PrintF("\n"); + // t8, k0, LO. + PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n", + REG_INFO(24), REG_INFO(26), REG_INFO(32)); + // t9, k1, HI. + PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n", + REG_INFO(25), REG_INFO(27), REG_INFO(33)); + // sp, fp, gp. + PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n", + REG_INFO(29), REG_INFO(30), REG_INFO(28)); + // pc. + PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n", + REG_INFO(31), REG_INFO(34)); + +#undef REG_INFO +#undef FPU_REG_INFO +} + + +void MipsDebugger::PrintAllRegsIncludingFPU() { +#define FPU_REG_INFO(n) FPURegisters::Name(n), \ + GetFPURegisterValue(n), \ + GetFPURegisterValueDouble(n) + + PrintAllRegs(); + + PrintF("\n\n"); + // f0, f1, f2, ... f31. + // TODO(plind): consider printing 2 columns for space efficiency. + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(0) ); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(1) ); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(2) ); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(3) ); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(4) ); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(5) ); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(6) ); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(7) ); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(8) ); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(9) ); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(10)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(11)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(12)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(13)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(14)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(15)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(16)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(17)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(18)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(19)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(20)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(21)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(22)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(23)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(24)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(25)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(26)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(27)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(28)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(29)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(30)); + PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(31)); + +#undef REG_INFO +#undef FPU_REG_INFO +} + + +void MipsDebugger::Debug() { + intptr_t last_pc = -1; + bool done = false; + +#define COMMAND_SIZE 63 +#define ARG_SIZE 255 + +#define STR(a) #a +#define XSTR(a) STR(a) + + char cmd[COMMAND_SIZE + 1]; + char arg1[ARG_SIZE + 1]; + char arg2[ARG_SIZE + 1]; + char* argv[3] = { cmd, arg1, arg2 }; + + // Make sure to have a proper terminating character if reaching the limit. + cmd[COMMAND_SIZE] = 0; + arg1[ARG_SIZE] = 0; + arg2[ARG_SIZE] = 0; + + // Undo all set breakpoints while running in the debugger shell. This will + // make them invisible to all commands. + UndoBreakpoints(); + + while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) { + if (last_pc != sim_->get_pc()) { + disasm::NameConverter converter; + disasm::Disassembler dasm(converter); + // Use a reasonably large buffer. + v8::internal::EmbeddedVector<char, 256> buffer; + dasm.InstructionDecode(buffer, + reinterpret_cast<byte*>(sim_->get_pc())); + PrintF(" 0x%016lx %s\n", sim_->get_pc(), buffer.start()); + last_pc = sim_->get_pc(); + } + char* line = ReadLine("sim> "); + if (line == NULL) { + break; + } else { + char* last_input = sim_->last_debugger_input(); + if (strcmp(line, "\n") == 0 && last_input != NULL) { + line = last_input; + } else { + // Ownership is transferred to sim_; + sim_->set_last_debugger_input(line); + } + // Use sscanf to parse the individual parts of the command line. At the + // moment no command expects more than two parameters. + int argc = SScanF(line, + "%" XSTR(COMMAND_SIZE) "s " + "%" XSTR(ARG_SIZE) "s " + "%" XSTR(ARG_SIZE) "s", + cmd, arg1, arg2); + if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) { + Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc()); + if (!(instr->IsTrap()) || + instr->InstructionBits() == rtCallRedirInstr) { + sim_->InstructionDecode( + reinterpret_cast<Instruction*>(sim_->get_pc())); + } else { + // Allow si to jump over generated breakpoints. + PrintF("/!\\ Jumping over generated breakpoint.\n"); + sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize); + } + } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) { + // Execute the one instruction we broke at with breakpoints disabled. + sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc())); + // Leave the debugger shell. + done = true; + } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { + if (argc == 2) { + int64_t value; + double dvalue; + if (strcmp(arg1, "all") == 0) { + PrintAllRegs(); + } else if (strcmp(arg1, "allf") == 0) { + PrintAllRegsIncludingFPU(); + } else { + int regnum = Registers::Number(arg1); + int fpuregnum = FPURegisters::Number(arg1); + + if (regnum != kInvalidRegister) { + value = GetRegisterValue(regnum); + PrintF("%s: 0x%08lx %ld \n", arg1, value, value); + } else if (fpuregnum != kInvalidFPURegister) { + value = GetFPURegisterValue(fpuregnum); + dvalue = GetFPURegisterValueDouble(fpuregnum); + PrintF("%3s: 0x%016lx %16.4e\n", + FPURegisters::Name(fpuregnum), value, dvalue); + } else { + PrintF("%s unrecognized\n", arg1); + } + } + } else { + if (argc == 3) { + if (strcmp(arg2, "single") == 0) { + int64_t value; + float fvalue; + int fpuregnum = FPURegisters::Number(arg1); + + if (fpuregnum != kInvalidFPURegister) { + value = GetFPURegisterValue(fpuregnum); + value &= 0xffffffffUL; + fvalue = GetFPURegisterValueFloat(fpuregnum); + PrintF("%s: 0x%08lx %11.4e\n", arg1, value, fvalue); + } else { + PrintF("%s unrecognized\n", arg1); + } + } else { + PrintF("print <fpu register> single\n"); + } + } else { + PrintF("print <register> or print <fpu register> single\n"); + } + } + } else if ((strcmp(cmd, "po") == 0) + || (strcmp(cmd, "printobject") == 0)) { + if (argc == 2) { + int64_t value; + OFStream os(stdout); + if (GetValue(arg1, &value)) { + Object* obj = reinterpret_cast<Object*>(value); + os << arg1 << ": \n"; +#ifdef DEBUG + obj->Print(os); + os << "\n"; +#else + os << Brief(obj) << "\n"; +#endif + } else { + os << arg1 << " unrecognized\n"; + } + } else { + PrintF("printobject <value>\n"); + } + } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) { + int64_t* cur = NULL; + int64_t* end = NULL; + int next_arg = 1; + + if (strcmp(cmd, "stack") == 0) { + cur = reinterpret_cast<int64_t*>(sim_->get_register(Simulator::sp)); + } else { // Command "mem". + int64_t value; + if (!GetValue(arg1, &value)) { + PrintF("%s unrecognized\n", arg1); + continue; + } + cur = reinterpret_cast<int64_t*>(value); + next_arg++; + } + + int64_t words; + if (argc == next_arg) { + words = 10; + } else { + if (!GetValue(argv[next_arg], &words)) { + words = 10; + } + } + end = cur + words; + + while (cur < end) { + PrintF(" 0x%012lx: 0x%016lx %14ld", + reinterpret_cast<intptr_t>(cur), *cur, *cur); + HeapObject* obj = reinterpret_cast<HeapObject*>(*cur); + int64_t value = *cur; + Heap* current_heap = v8::internal::Isolate::Current()->heap(); + if (((value & 1) == 0) || current_heap->Contains(obj)) { + PrintF(" ("); + if ((value & 1) == 0) { + PrintF("smi %d", static_cast<int>(value >> 32)); + } else { + obj->ShortPrint(); + } + PrintF(")"); + } + PrintF("\n"); + cur++; + } + + } else if ((strcmp(cmd, "disasm") == 0) || + (strcmp(cmd, "dpc") == 0) || + (strcmp(cmd, "di") == 0)) { + disasm::NameConverter converter; + disasm::Disassembler dasm(converter); + // Use a reasonably large buffer. + v8::internal::EmbeddedVector<char, 256> buffer; + + byte* cur = NULL; + byte* end = NULL; + + if (argc == 1) { + cur = reinterpret_cast<byte*>(sim_->get_pc()); + end = cur + (10 * Instruction::kInstrSize); + } else if (argc == 2) { + int regnum = Registers::Number(arg1); + if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) { + // The argument is an address or a register name. + int64_t value; + if (GetValue(arg1, &value)) { + cur = reinterpret_cast<byte*>(value); + // Disassemble 10 instructions at <arg1>. + end = cur + (10 * Instruction::kInstrSize); + } + } else { + // The argument is the number of instructions. + int64_t value; + if (GetValue(arg1, &value)) { + cur = reinterpret_cast<byte*>(sim_->get_pc()); + // Disassemble <arg1> instructions. + end = cur + (value * Instruction::kInstrSize); + } + } + } else { + int64_t value1; + int64_t value2; + if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { + cur = reinterpret_cast<byte*>(value1); + end = cur + (value2 * Instruction::kInstrSize); + } + } + + while (cur < end) { + dasm.InstructionDecode(buffer, cur); + PrintF(" 0x%08lx %s\n", + reinterpret_cast<intptr_t>(cur), buffer.start()); + cur += Instruction::kInstrSize; + } + } else if (strcmp(cmd, "gdb") == 0) { + PrintF("relinquishing control to gdb\n"); + v8::base::OS::DebugBreak(); + PrintF("regaining control from gdb\n"); + } else if (strcmp(cmd, "break") == 0) { + if (argc == 2) { + int64_t value; + if (GetValue(arg1, &value)) { + if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) { + PrintF("setting breakpoint failed\n"); + } + } else { + PrintF("%s unrecognized\n", arg1); + } + } else { + PrintF("break <address>\n"); + } + } else if (strcmp(cmd, "del") == 0) { + if (!DeleteBreakpoint(NULL)) { + PrintF("deleting breakpoint failed\n"); + } + } else if (strcmp(cmd, "flags") == 0) { + PrintF("No flags on MIPS !\n"); + } else if (strcmp(cmd, "stop") == 0) { + int64_t value; + intptr_t stop_pc = sim_->get_pc() - + 2 * Instruction::kInstrSize; + Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc); + Instruction* msg_address = + reinterpret_cast<Instruction*>(stop_pc + + Instruction::kInstrSize); + if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) { + // Remove the current stop. + if (sim_->IsStopInstruction(stop_instr)) { + stop_instr->SetInstructionBits(kNopInstr); + msg_address->SetInstructionBits(kNopInstr); + } else { + PrintF("Not at debugger stop.\n"); + } + } else if (argc == 3) { + // Print information about all/the specified breakpoint(s). + if (strcmp(arg1, "info") == 0) { + if (strcmp(arg2, "all") == 0) { + PrintF("Stop information:\n"); + for (uint32_t i = kMaxWatchpointCode + 1; + i <= kMaxStopCode; + i++) { + sim_->PrintStopInfo(i); + } + } else if (GetValue(arg2, &value)) { + sim_->PrintStopInfo(value); + } else { + PrintF("Unrecognized argument.\n"); + } + } else if (strcmp(arg1, "enable") == 0) { + // Enable all/the specified breakpoint(s). + if (strcmp(arg2, "all") == 0) { + for (uint32_t i = kMaxWatchpointCode + 1; + i <= kMaxStopCode; + i++) { + sim_->EnableStop(i); + } + } else if (GetValue(arg2, &value)) { + sim_->EnableStop(value); + } else { + PrintF("Unrecognized argument.\n"); + } + } else if (strcmp(arg1, "disable") == 0) { + // Disable all/the specified breakpoint(s). + if (strcmp(arg2, "all") == 0) { + for (uint32_t i = kMaxWatchpointCode + 1; + i <= kMaxStopCode; + i++) { + sim_->DisableStop(i); + } + } else if (GetValue(arg2, &value)) { + sim_->DisableStop(value); + } else { + PrintF("Unrecognized argument.\n"); + } + } + } else { + PrintF("Wrong usage. Use help command for more information.\n"); + } + } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) { + // Print registers and disassemble. + PrintAllRegs(); + PrintF("\n"); + + disasm::NameConverter converter; + disasm::Disassembler dasm(converter); + // Use a reasonably large buffer. + v8::internal::EmbeddedVector<char, 256> buffer; + + byte* cur = NULL; + byte* end = NULL; + + if (argc == 1) { + cur = reinterpret_cast<byte*>(sim_->get_pc()); + end = cur + (10 * Instruction::kInstrSize); + } else if (argc == 2) { + int64_t value; + if (GetValue(arg1, &value)) { + cur = reinterpret_cast<byte*>(value); + // no length parameter passed, assume 10 instructions + end = cur + (10 * Instruction::kInstrSize); + } + } else { + int64_t value1; + int64_t value2; + if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { + cur = reinterpret_cast<byte*>(value1); + end = cur + (value2 * Instruction::kInstrSize); + } + } + + while (cur < end) { + dasm.InstructionDecode(buffer, cur); + PrintF(" 0x%08lx %s\n", + reinterpret_cast<intptr_t>(cur), buffer.start()); + cur += Instruction::kInstrSize; + } + } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) { + PrintF("cont\n"); + PrintF(" continue execution (alias 'c')\n"); + PrintF("stepi\n"); + PrintF(" step one instruction (alias 'si')\n"); + PrintF("print <register>\n"); + PrintF(" print register content (alias 'p')\n"); + PrintF(" use register name 'all' to print all registers\n"); + PrintF("printobject <register>\n"); + PrintF(" print an object from a register (alias 'po')\n"); + PrintF("stack [<words>]\n"); + PrintF(" dump stack content, default dump 10 words)\n"); + PrintF("mem <address> [<words>]\n"); + PrintF(" dump memory content, default dump 10 words)\n"); + PrintF("flags\n"); + PrintF(" print flags\n"); + PrintF("disasm [<instructions>]\n"); + PrintF("disasm [<address/register>]\n"); + PrintF("disasm [[<address/register>] <instructions>]\n"); + PrintF(" disassemble code, default is 10 instructions\n"); + PrintF(" from pc (alias 'di')\n"); + PrintF("gdb\n"); + PrintF(" enter gdb\n"); + PrintF("break <address>\n"); + PrintF(" set a break point on the address\n"); + PrintF("del\n"); + PrintF(" delete the breakpoint\n"); + PrintF("stop feature:\n"); + PrintF(" Description:\n"); + PrintF(" Stops are debug instructions inserted by\n"); + PrintF(" the Assembler::stop() function.\n"); + PrintF(" When hitting a stop, the Simulator will\n"); + PrintF(" stop and and give control to the Debugger.\n"); + PrintF(" All stop codes are watched:\n"); + PrintF(" - They can be enabled / disabled: the Simulator\n"); + PrintF(" will / won't stop when hitting them.\n"); + PrintF(" - The Simulator keeps track of how many times they \n"); + PrintF(" are met. (See the info command.) Going over a\n"); + PrintF(" disabled stop still increases its counter. \n"); + PrintF(" Commands:\n"); + PrintF(" stop info all/<code> : print infos about number <code>\n"); + PrintF(" or all stop(s).\n"); + PrintF(" stop enable/disable all/<code> : enables / disables\n"); + PrintF(" all or number <code> stop(s)\n"); + PrintF(" stop unstop\n"); + PrintF(" ignore the stop instruction at the current location\n"); + PrintF(" from now on\n"); + } else { + PrintF("Unknown command: %s\n", cmd); + } + } + } + + // Add all the breakpoints back to stop execution and enter the debugger + // shell when hit. + RedoBreakpoints(); + +#undef COMMAND_SIZE +#undef ARG_SIZE + +#undef STR +#undef XSTR +} + + +static bool ICacheMatch(void* one, void* two) { + DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0); + DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0); + return one == two; +} + + +static uint32_t ICacheHash(void* key) { + return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2; +} + + +static bool AllOnOnePage(uintptr_t start, int size) { + intptr_t start_page = (start & ~CachePage::kPageMask); + intptr_t end_page = ((start + size) & ~CachePage::kPageMask); + return start_page == end_page; +} + + +void Simulator::set_last_debugger_input(char* input) { + DeleteArray(last_debugger_input_); + last_debugger_input_ = input; +} + + +void Simulator::FlushICache(v8::internal::HashMap* i_cache, + void* start_addr, + size_t size) { + int64_t start = reinterpret_cast<int64_t>(start_addr); + int64_t intra_line = (start & CachePage::kLineMask); + start -= intra_line; + size += intra_line; + size = ((size - 1) | CachePage::kLineMask) + 1; + int offset = (start & CachePage::kPageMask); + while (!AllOnOnePage(start, size - 1)) { + int bytes_to_flush = CachePage::kPageSize - offset; + FlushOnePage(i_cache, start, bytes_to_flush); + start += bytes_to_flush; + size -= bytes_to_flush; + DCHECK_EQ((uint64_t)0, start & CachePage::kPageMask); + offset = 0; + } + if (size != 0) { + FlushOnePage(i_cache, start, size); + } +} + + +CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) { + v8::internal::HashMap::Entry* entry = i_cache->Lookup(page, + ICacheHash(page), + true); + if (entry->value == NULL) { + CachePage* new_page = new CachePage(); + entry->value = new_page; + } + return reinterpret_cast<CachePage*>(entry->value); +} + + +// Flush from start up to and not including start + size. +void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, + intptr_t start, + int size) { + DCHECK(size <= CachePage::kPageSize); + DCHECK(AllOnOnePage(start, size - 1)); + DCHECK((start & CachePage::kLineMask) == 0); + DCHECK((size & CachePage::kLineMask) == 0); + void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask)); + int offset = (start & CachePage::kPageMask); + CachePage* cache_page = GetCachePage(i_cache, page); + char* valid_bytemap = cache_page->ValidityByte(offset); + memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift); +} + + +void Simulator::CheckICache(v8::internal::HashMap* i_cache, + Instruction* instr) { + int64_t address = reinterpret_cast<int64_t>(instr); + void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask)); + void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask)); + int offset = (address & CachePage::kPageMask); + CachePage* cache_page = GetCachePage(i_cache, page); + char* cache_valid_byte = cache_page->ValidityByte(offset); + bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID); + char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask); + if (cache_hit) { + // Check that the data in memory matches the contents of the I-cache. + CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr), + cache_page->CachedData(offset), + Instruction::kInstrSize)); + } else { + // Cache miss. Load memory into the cache. + memcpy(cached_line, line, CachePage::kLineLength); + *cache_valid_byte = CachePage::LINE_VALID; + } +} + + +void Simulator::Initialize(Isolate* isolate) { + if (isolate->simulator_initialized()) return; + isolate->set_simulator_initialized(true); + ::v8::internal::ExternalReference::set_redirector(isolate, + &RedirectExternalReference); +} + + +Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { + i_cache_ = isolate_->simulator_i_cache(); + if (i_cache_ == NULL) { + i_cache_ = new v8::internal::HashMap(&ICacheMatch); + isolate_->set_simulator_i_cache(i_cache_); + } + Initialize(isolate); + // Set up simulator support first. Some of this information is needed to + // setup the architecture state. + stack_size_ = FLAG_sim_stack_size * KB; + stack_ = reinterpret_cast<char*>(malloc(stack_size_)); + pc_modified_ = false; + icount_ = 0; + break_count_ = 0; + break_pc_ = NULL; + break_instr_ = 0; + + // Set up architecture state. + // All registers are initialized to zero to start with. + for (int i = 0; i < kNumSimuRegisters; i++) { + registers_[i] = 0; + } + for (int i = 0; i < kNumFPURegisters; i++) { + FPUregisters_[i] = 0; + } + FCSR_ = 0; + + // The sp is initialized to point to the bottom (high address) of the + // allocated stack area. To be safe in potential stack underflows we leave + // some buffer below. + registers_[sp] = reinterpret_cast<int64_t>(stack_) + stack_size_ - 64; + // The ra and pc are initialized to a known bad value that will cause an + // access violation if the simulator ever tries to execute it. + registers_[pc] = bad_ra; + registers_[ra] = bad_ra; + InitializeCoverage(); + for (int i = 0; i < kNumExceptions; i++) { + exceptions[i] = 0; + } + + last_debugger_input_ = NULL; +} + + +Simulator::~Simulator() { +} + + +// When the generated code calls an external reference we need to catch that in +// the simulator. The external reference will be a function compiled for the +// host architecture. We need to call that function instead of trying to +// execute it with the simulator. We do that by redirecting the external +// reference to a swi (software-interrupt) instruction that is handled by +// the simulator. We write the original destination of the jump just at a known +// offset from the swi instruction so the simulator knows what to call. +class Redirection { + public: + Redirection(void* external_function, ExternalReference::Type type) + : external_function_(external_function), + swi_instruction_(rtCallRedirInstr), + type_(type), + next_(NULL) { + Isolate* isolate = Isolate::Current(); + next_ = isolate->simulator_redirection(); + Simulator::current(isolate)-> + FlushICache(isolate->simulator_i_cache(), + reinterpret_cast<void*>(&swi_instruction_), + Instruction::kInstrSize); + isolate->set_simulator_redirection(this); + } + + void* address_of_swi_instruction() { + return reinterpret_cast<void*>(&swi_instruction_); + } + + void* external_function() { return external_function_; } + ExternalReference::Type type() { return type_; } + + static Redirection* Get(void* external_function, + ExternalReference::Type type) { + Isolate* isolate = Isolate::Current(); + Redirection* current = isolate->simulator_redirection(); + for (; current != NULL; current = current->next_) { + if (current->external_function_ == external_function) return current; + } + return new Redirection(external_function, type); + } + + static Redirection* FromSwiInstruction(Instruction* swi_instruction) { + char* addr_of_swi = reinterpret_cast<char*>(swi_instruction); + char* addr_of_redirection = + addr_of_swi - OFFSET_OF(Redirection, swi_instruction_); + return reinterpret_cast<Redirection*>(addr_of_redirection); + } + + static void* ReverseRedirection(int64_t reg) { + Redirection* redirection = FromSwiInstruction( + reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg))); + return redirection->external_function(); + } + + private: + void* external_function_; + uint32_t swi_instruction_; + ExternalReference::Type type_; + Redirection* next_; +}; + + +void* Simulator::RedirectExternalReference(void* external_function, + ExternalReference::Type type) { + Redirection* redirection = Redirection::Get(external_function, type); + return redirection->address_of_swi_instruction(); +} + + +// Get the active Simulator for the current thread. +Simulator* Simulator::current(Isolate* isolate) { + v8::internal::Isolate::PerIsolateThreadData* isolate_data = + isolate->FindOrAllocatePerThreadDataForThisThread(); + DCHECK(isolate_data != NULL); + DCHECK(isolate_data != NULL); + + Simulator* sim = isolate_data->simulator(); + if (sim == NULL) { + // TODO(146): delete the simulator object when a thread/isolate goes away. + sim = new Simulator(isolate); + isolate_data->set_simulator(sim); + } + return sim; +} + + +// Sets the register in the architecture state. It will also deal with updating +// Simulator internal state for special registers such as PC. +void Simulator::set_register(int reg, int64_t value) { + DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); + if (reg == pc) { + pc_modified_ = true; + } + + // Zero register always holds 0. + registers_[reg] = (reg == 0) ? 0 : value; +} + + +void Simulator::set_dw_register(int reg, const int* dbl) { + DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); + registers_[reg] = dbl[1]; + registers_[reg] = registers_[reg] << 32; + registers_[reg] += dbl[0]; +} + + +void Simulator::set_fpu_register(int fpureg, int64_t value) { + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + FPUregisters_[fpureg] = value; +} + + +void Simulator::set_fpu_register_word(int fpureg, int32_t value) { + // Set ONLY lower 32-bits, leaving upper bits untouched. + // TODO(plind): big endian issue. + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + int32_t *pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]); + *pword = value; +} + + +void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) { + // Set ONLY upper 32-bits, leaving lower bits untouched. + // TODO(plind): big endian issue. + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + int32_t *phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1; + *phiword = value; +} + + +void Simulator::set_fpu_register_float(int fpureg, float value) { + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + *BitCast<float*>(&FPUregisters_[fpureg]) = value; +} + + +void Simulator::set_fpu_register_double(int fpureg, double value) { + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + *BitCast<double*>(&FPUregisters_[fpureg]) = value; +} + + +// Get the register from the architecture state. This function does handle +// the special case of accessing the PC register. +int64_t Simulator::get_register(int reg) const { + DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); + if (reg == 0) + return 0; + else + return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0); +} + + +double Simulator::get_double_from_register_pair(int reg) { + // TODO(plind): bad ABI stuff, refactor or remove. + DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0)); + + double dm_val = 0.0; + // Read the bits from the unsigned integer register_[] array + // into the double precision floating point value and return it. + char buffer[sizeof(registers_[0])]; + memcpy(buffer, ®isters_[reg], sizeof(registers_[0])); + memcpy(&dm_val, buffer, sizeof(registers_[0])); + return(dm_val); +} + + +int64_t Simulator::get_fpu_register(int fpureg) const { + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + return FPUregisters_[fpureg]; +} + + +int32_t Simulator::get_fpu_register_word(int fpureg) const { + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff); +} + + +int32_t Simulator::get_fpu_register_signed_word(int fpureg) const { + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff); +} + + +uint32_t Simulator::get_fpu_register_hi_word(int fpureg) const { + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + return static_cast<uint32_t>((FPUregisters_[fpureg] >> 32) & 0xffffffff); +} + + +float Simulator::get_fpu_register_float(int fpureg) const { + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + return *BitCast<float*>( + const_cast<int64_t*>(&FPUregisters_[fpureg])); +} + + +double Simulator::get_fpu_register_double(int fpureg) const { + DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); + return *BitCast<double*>(&FPUregisters_[fpureg]); +} + + +// Runtime FP routines take up to two double arguments and zero +// or one integer arguments. All are constructed here, +// from a0-a3 or f12 and f13 (n64), or f14 (O32). +void Simulator::GetFpArgs(double* x, double* y, int32_t* z) { + if (!IsMipsSoftFloatABI) { + const int fparg2 = (kMipsAbi == kN64) ? 13 : 14; + *x = get_fpu_register_double(12); + *y = get_fpu_register_double(fparg2); + *z = get_register(a2); + } else { + // TODO(plind): bad ABI stuff, refactor or remove. + // We use a char buffer to get around the strict-aliasing rules which + // otherwise allow the compiler to optimize away the copy. + char buffer[sizeof(*x)]; + int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer); + + // Registers a0 and a1 -> x. + reg_buffer[0] = get_register(a0); + reg_buffer[1] = get_register(a1); + memcpy(x, buffer, sizeof(buffer)); + // Registers a2 and a3 -> y. + reg_buffer[0] = get_register(a2); + reg_buffer[1] = get_register(a3); + memcpy(y, buffer, sizeof(buffer)); + // Register 2 -> z. + reg_buffer[0] = get_register(a2); + memcpy(z, buffer, sizeof(*z)); + } +} + + +// The return value is either in v0/v1 or f0. +void Simulator::SetFpResult(const double& result) { + if (!IsMipsSoftFloatABI) { + set_fpu_register_double(0, result); + } else { + char buffer[2 * sizeof(registers_[0])]; + int64_t* reg_buffer = reinterpret_cast<int64_t*>(buffer); + memcpy(buffer, &result, sizeof(buffer)); + // Copy result to v0 and v1. + set_register(v0, reg_buffer[0]); + set_register(v1, reg_buffer[1]); + } +} + + +// Helper functions for setting and testing the FCSR register's bits. +void Simulator::set_fcsr_bit(uint32_t cc, bool value) { + if (value) { + FCSR_ |= (1 << cc); + } else { + FCSR_ &= ~(1 << cc); + } +} + + +bool Simulator::test_fcsr_bit(uint32_t cc) { + return FCSR_ & (1 << cc); +} + + +// Sets the rounding error codes in FCSR based on the result of the rounding. +// Returns true if the operation was invalid. +bool Simulator::set_fcsr_round_error(double original, double rounded) { + bool ret = false; + double max_int32 = std::numeric_limits<int32_t>::max(); + double min_int32 = std::numeric_limits<int32_t>::min(); + + if (!std::isfinite(original) || !std::isfinite(rounded)) { + set_fcsr_bit(kFCSRInvalidOpFlagBit, true); + ret = true; + } + + if (original != rounded) { + set_fcsr_bit(kFCSRInexactFlagBit, true); + } + + if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { + set_fcsr_bit(kFCSRUnderflowFlagBit, true); + ret = true; + } + + if (rounded > max_int32 || rounded < min_int32) { + set_fcsr_bit(kFCSROverflowFlagBit, true); + // The reference is not really clear but it seems this is required: + set_fcsr_bit(kFCSRInvalidOpFlagBit, true); + ret = true; + } + + return ret; +} + + +// Sets the rounding error codes in FCSR based on the result of the rounding. +// Returns true if the operation was invalid. +bool Simulator::set_fcsr_round64_error(double original, double rounded) { + bool ret = false; + double max_int64 = std::numeric_limits<int64_t>::max(); + double min_int64 = std::numeric_limits<int64_t>::min(); + + if (!std::isfinite(original) || !std::isfinite(rounded)) { + set_fcsr_bit(kFCSRInvalidOpFlagBit, true); + ret = true; + } + + if (original != rounded) { + set_fcsr_bit(kFCSRInexactFlagBit, true); + } + + if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { + set_fcsr_bit(kFCSRUnderflowFlagBit, true); + ret = true; + } + + if (rounded > max_int64 || rounded < min_int64) { + set_fcsr_bit(kFCSROverflowFlagBit, true); + // The reference is not really clear but it seems this is required: + set_fcsr_bit(kFCSRInvalidOpFlagBit, true); + ret = true; + } + + return ret; +} + + +// Raw access to the PC register. +void Simulator::set_pc(int64_t value) { + pc_modified_ = true; + registers_[pc] = value; +} + + +bool Simulator::has_bad_pc() const { + return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc)); +} + + +// Raw access to the PC register without the special adjustment when reading. +int64_t Simulator::get_pc() const { + return registers_[pc]; +} + + +// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an +// interrupt is caused. On others it does a funky rotation thing. For now we +// simply disallow unaligned reads, but at some point we may want to move to +// emulating the rotate behaviour. Note that simulator runs have the runtime +// system running directly on the host system and only generated code is +// executed in the simulator. Since the host is typically IA32 we will not +// get the correct MIPS-like behaviour on unaligned accesses. + +// TODO(plind): refactor this messy debug code when we do unaligned access. +void Simulator::DieOrDebug() { + if (1) { // Flag for this was removed. + MipsDebugger dbg(this); + dbg.Debug(); + } else { + base::OS::Abort(); + } +} + + +void Simulator::TraceRegWr(int64_t value) { + if (::v8::internal::FLAG_trace_sim) { + SNPrintF(trace_buf_, "%016lx", value); + } +} + + +// TODO(plind): consider making icount_ printing a flag option. +void Simulator::TraceMemRd(int64_t addr, int64_t value) { + if (::v8::internal::FLAG_trace_sim) { + SNPrintF(trace_buf_, "%016lx <-- [%016lx] (%ld)", + value, addr, icount_); + } +} + + +void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) { + if (::v8::internal::FLAG_trace_sim) { + switch (t) { + case BYTE: + SNPrintF(trace_buf_, " %02x --> [%016lx]", + static_cast<int8_t>(value), addr); + break; + case HALF: + SNPrintF(trace_buf_, " %04x --> [%016lx]", + static_cast<int16_t>(value), addr); + break; + case WORD: + SNPrintF(trace_buf_, " %08x --> [%016lx]", + static_cast<int32_t>(value), addr); + break; + case DWORD: + SNPrintF(trace_buf_, "%016lx --> [%016lx] (%ld)", + value, addr, icount_); + break; + } + } +} + + +// TODO(plind): sign-extend and zero-extend not implmented properly +// on all the ReadXX functions, I don't think re-interpret cast does it. +int32_t Simulator::ReadW(int64_t addr, Instruction* instr) { + if (addr >=0 && addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n", + addr, reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); + } + if ((addr & 0x3) == 0) { + int32_t* ptr = reinterpret_cast<int32_t*>(addr); + TraceMemRd(addr, static_cast<int64_t>(*ptr)); + return *ptr; + } + PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); + return 0; +} + + +uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) { + if (addr >=0 && addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n", + addr, reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); + } + if ((addr & 0x3) == 0) { + uint32_t* ptr = reinterpret_cast<uint32_t*>(addr); + TraceMemRd(addr, static_cast<int64_t>(*ptr)); + return *ptr; + } + PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); + return 0; +} + + +void Simulator::WriteW(int64_t addr, int value, Instruction* instr) { + if (addr >= 0 && addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n", + addr, reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); + } + if ((addr & 0x3) == 0) { + TraceMemWr(addr, value, WORD); + int* ptr = reinterpret_cast<int*>(addr); + *ptr = value; + return; + } + PrintF("Unaligned write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); +} + + +int64_t Simulator::Read2W(int64_t addr, Instruction* instr) { + if (addr >=0 && addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n", + addr, reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); + } + if ((addr & kPointerAlignmentMask) == 0) { + int64_t* ptr = reinterpret_cast<int64_t*>(addr); + TraceMemRd(addr, *ptr); + return *ptr; + } + PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); + return 0; +} + + +void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) { + if (addr >= 0 && addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n", + addr, reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); + } + if ((addr & kPointerAlignmentMask) == 0) { + TraceMemWr(addr, value, DWORD); + int64_t* ptr = reinterpret_cast<int64_t*>(addr); + *ptr = value; + return; + } + PrintF("Unaligned write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); +} + + +double Simulator::ReadD(int64_t addr, Instruction* instr) { + if ((addr & kDoubleAlignmentMask) == 0) { + double* ptr = reinterpret_cast<double*>(addr); + return *ptr; + } + PrintF("Unaligned (double) read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + base::OS::Abort(); + return 0; +} + + +void Simulator::WriteD(int64_t addr, double value, Instruction* instr) { + if ((addr & kDoubleAlignmentMask) == 0) { + double* ptr = reinterpret_cast<double*>(addr); + *ptr = value; + return; + } + PrintF("Unaligned (double) write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); +} + + +uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) { + if ((addr & 1) == 0) { + uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); + TraceMemRd(addr, static_cast<int64_t>(*ptr)); + return *ptr; + } + PrintF("Unaligned unsigned halfword read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); + return 0; +} + + +int16_t Simulator::ReadH(int64_t addr, Instruction* instr) { + if ((addr & 1) == 0) { + int16_t* ptr = reinterpret_cast<int16_t*>(addr); + TraceMemRd(addr, static_cast<int64_t>(*ptr)); + return *ptr; + } + PrintF("Unaligned signed halfword read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); + return 0; +} + + +void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) { + if ((addr & 1) == 0) { + TraceMemWr(addr, value, HALF); + uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); + *ptr = value; + return; + } + PrintF( + "Unaligned unsigned halfword write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); +} + + +void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) { + if ((addr & 1) == 0) { + TraceMemWr(addr, value, HALF); + int16_t* ptr = reinterpret_cast<int16_t*>(addr); + *ptr = value; + return; + } + PrintF("Unaligned halfword write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast<intptr_t>(instr)); + DieOrDebug(); +} + + +uint32_t Simulator::ReadBU(int64_t addr) { + uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); + TraceMemRd(addr, static_cast<int64_t>(*ptr)); + return *ptr & 0xff; +} + + +int32_t Simulator::ReadB(int64_t addr) { + int8_t* ptr = reinterpret_cast<int8_t*>(addr); + TraceMemRd(addr, static_cast<int64_t>(*ptr)); + return *ptr; +} + + +void Simulator::WriteB(int64_t addr, uint8_t value) { + TraceMemWr(addr, value, BYTE); + uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); + *ptr = value; +} + + +void Simulator::WriteB(int64_t addr, int8_t value) { + TraceMemWr(addr, value, BYTE); + int8_t* ptr = reinterpret_cast<int8_t*>(addr); + *ptr = value; +} + + +// Returns the limit of the stack area to enable checking for stack overflows. +uintptr_t Simulator::StackLimit() const { + // Leave a safety margin of 1024 bytes to prevent overrunning the stack when + // pushing values. + return reinterpret_cast<uintptr_t>(stack_) + 1024; +} + + +// Unsupported instructions use Format to print an error and stop execution. +void Simulator::Format(Instruction* instr, const char* format) { + PrintF("Simulator found unsupported instruction:\n 0x%08lx: %s\n", + reinterpret_cast<intptr_t>(instr), format); + UNIMPLEMENTED_MIPS(); +} + + +// Calls into the V8 runtime are based on this very simple interface. +// Note: To be able to return two values from some calls the code in runtime.cc +// uses the ObjectPair which is essentially two 32-bit values stuffed into a +// 64-bit value. With the code below we assume that all runtime calls return +// 64 bits of result. If they don't, the v1 result register contains a bogus +// value, which is fine because it is caller-saved. + +struct ObjectPair { + Object* x; + Object* y; +}; + +typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, + int64_t arg1, + int64_t arg2, + int64_t arg3, + int64_t arg4, + int64_t arg5); + + +// These prototypes handle the four types of FP calls. +typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1); +typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1); +typedef double (*SimulatorRuntimeFPCall)(double darg0); +typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0); + +// This signature supports direct call in to API function native callback +// (refer to InvocationCallback in v8.h). +typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0); +typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1); + +// This signature supports direct call to accessor getter callback. +typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1); +typedef void (*SimulatorRuntimeProfilingGetterCall)( + int64_t arg0, int64_t arg1, void* arg2); + +// Software interrupt instructions are used by the simulator to call into the +// C-based V8 runtime. They are also used for debugging with simulator. +void Simulator::SoftwareInterrupt(Instruction* instr) { + // There are several instructions that could get us here, + // the break_ instruction, or several variants of traps. All + // Are "SPECIAL" class opcode, and are distinuished by function. + int32_t func = instr->FunctionFieldRaw(); + uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1; + // We first check if we met a call_rt_redirected. + if (instr->InstructionBits() == rtCallRedirInstr) { + Redirection* redirection = Redirection::FromSwiInstruction(instr); + int64_t arg0 = get_register(a0); + int64_t arg1 = get_register(a1); + int64_t arg2 = get_register(a2); + int64_t arg3 = get_register(a3); + int64_t arg4, arg5; + + if (kMipsAbi == kN64) { + arg4 = get_register(a4); // Abi n64 register a4. + arg5 = get_register(a5); // Abi n64 register a5. + } else { // Abi O32. + int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp)); + // Args 4 and 5 are on the stack after the reserved space for args 0..3. + arg4 = stack_pointer[4]; + arg5 = stack_pointer[5]; + } + bool fp_call = + (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) || + (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) || + (redirection->type() == ExternalReference::BUILTIN_FP_CALL) || + (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL); + + if (!IsMipsSoftFloatABI) { + // With the hard floating point calling convention, double + // arguments are passed in FPU registers. Fetch the arguments + // from there and call the builtin using soft floating point + // convention. + switch (redirection->type()) { + case ExternalReference::BUILTIN_FP_FP_CALL: + case ExternalReference::BUILTIN_COMPARE_CALL: + arg0 = get_fpu_register(f12); + arg1 = get_fpu_register(f13); + arg2 = get_fpu_register(f14); + arg3 = get_fpu_register(f15); + break; + case ExternalReference::BUILTIN_FP_CALL: + arg0 = get_fpu_register(f12); + arg1 = get_fpu_register(f13); + break; + case ExternalReference::BUILTIN_FP_INT_CALL: + arg0 = get_fpu_register(f12); + arg1 = get_fpu_register(f13); + arg2 = get_register(a2); + break; + default: + break; + } + } + + // This is dodgy but it works because the C entry stubs are never moved. + // See comment in codegen-arm.cc and bug 1242173. + int64_t saved_ra = get_register(ra); + + intptr_t external = + reinterpret_cast<intptr_t>(redirection->external_function()); + + // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware + // FPU, or gcc soft-float routines. Hardware FPU is simulated in this + // simulator. Soft-float has additional abstraction of ExternalReference, + // to support serialization. + if (fp_call) { + double dval0, dval1; // one or two double parameters + int32_t ival; // zero or one integer parameters + int64_t iresult = 0; // integer return value + double dresult = 0; // double return value + GetFpArgs(&dval0, &dval1, &ival); + SimulatorRuntimeCall generic_target = + reinterpret_cast<SimulatorRuntimeCall>(external); + if (::v8::internal::FLAG_trace_sim) { + switch (redirection->type()) { + case ExternalReference::BUILTIN_FP_FP_CALL: + case ExternalReference::BUILTIN_COMPARE_CALL: + PrintF("Call to host function at %p with args %f, %f", + FUNCTION_ADDR(generic_target), dval0, dval1); + break; + case ExternalReference::BUILTIN_FP_CALL: + PrintF("Call to host function at %p with arg %f", + FUNCTION_ADDR(generic_target), dval0); + break; + case ExternalReference::BUILTIN_FP_INT_CALL: + PrintF("Call to host function at %p with args %f, %d", + FUNCTION_ADDR(generic_target), dval0, ival); + break; + default: + UNREACHABLE(); + break; + } + } + switch (redirection->type()) { + case ExternalReference::BUILTIN_COMPARE_CALL: { + SimulatorRuntimeCompareCall target = + reinterpret_cast<SimulatorRuntimeCompareCall>(external); + iresult = target(dval0, dval1); + set_register(v0, static_cast<int64_t>(iresult)); + // set_register(v1, static_cast<int64_t>(iresult >> 32)); + break; + } + case ExternalReference::BUILTIN_FP_FP_CALL: { + SimulatorRuntimeFPFPCall target = + reinterpret_cast<SimulatorRuntimeFPFPCall>(external); + dresult = target(dval0, dval1); + SetFpResult(dresult); + break; + } + case ExternalReference::BUILTIN_FP_CALL: { + SimulatorRuntimeFPCall target = + reinterpret_cast<SimulatorRuntimeFPCall>(external); + dresult = target(dval0); + SetFpResult(dresult); + break; + } + case ExternalReference::BUILTIN_FP_INT_CALL: { + SimulatorRuntimeFPIntCall target = + reinterpret_cast<SimulatorRuntimeFPIntCall>(external); + dresult = target(dval0, ival); + SetFpResult(dresult); + break; + } + default: + UNREACHABLE(); + break; + } + if (::v8::internal::FLAG_trace_sim) { + switch (redirection->type()) { + case ExternalReference::BUILTIN_COMPARE_CALL: + PrintF("Returned %08x\n", static_cast<int32_t>(iresult)); + break; + case ExternalReference::BUILTIN_FP_FP_CALL: + case ExternalReference::BUILTIN_FP_CALL: + case ExternalReference::BUILTIN_FP_INT_CALL: + PrintF("Returned %f\n", dresult); + break; + default: + UNREACHABLE(); + break; + } + } + } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { + if (::v8::internal::FLAG_trace_sim) { + PrintF("Call to host function at %p args %08lx\n", + reinterpret_cast<void*>(external), arg0); + } + SimulatorRuntimeDirectApiCall target = + reinterpret_cast<SimulatorRuntimeDirectApiCall>(external); + target(arg0); + } else if ( + redirection->type() == ExternalReference::PROFILING_API_CALL) { + if (::v8::internal::FLAG_trace_sim) { + PrintF("Call to host function at %p args %08lx %08lx\n", + reinterpret_cast<void*>(external), arg0, arg1); + } + SimulatorRuntimeProfilingApiCall target = + reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external); + target(arg0, Redirection::ReverseRedirection(arg1)); + } else if ( + redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { + if (::v8::internal::FLAG_trace_sim) { + PrintF("Call to host function at %p args %08lx %08lx\n", + reinterpret_cast<void*>(external), arg0, arg1); + } + SimulatorRuntimeDirectGetterCall target = + reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external); + target(arg0, arg1); + } else if ( + redirection->type() == ExternalReference::PROFILING_GETTER_CALL) { + if (::v8::internal::FLAG_trace_sim) { + PrintF("Call to host function at %p args %08lx %08lx %08lx\n", + reinterpret_cast<void*>(external), arg0, arg1, arg2); + } + SimulatorRuntimeProfilingGetterCall target = + reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external); + target(arg0, arg1, Redirection::ReverseRedirection(arg2)); + } else { + SimulatorRuntimeCall target = + reinterpret_cast<SimulatorRuntimeCall>(external); + if (::v8::internal::FLAG_trace_sim) { + PrintF( + "Call to host function at %p " + "args %08lx, %08lx, %08lx, %08lx, %08lx, %08lx\n", + FUNCTION_ADDR(target), + arg0, + arg1, + arg2, + arg3, + arg4, + arg5); + } + // int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5); + // set_register(v0, static_cast<int32_t>(result)); + // set_register(v1, static_cast<int32_t>(result >> 32)); + ObjectPair result = target(arg0, arg1, arg2, arg3, arg4, arg5); + set_register(v0, (int64_t)(result.x)); + set_register(v1, (int64_t)(result.y)); + } + if (::v8::internal::FLAG_trace_sim) { + PrintF("Returned %08lx : %08lx\n", get_register(v1), get_register(v0)); + } + set_register(ra, saved_ra); + set_pc(get_register(ra)); + + } else if (func == BREAK && code <= kMaxStopCode) { + if (IsWatchpoint(code)) { + PrintWatchpoint(code); + } else { + IncreaseStopCounter(code); + HandleStop(code, instr); + } + } else { + // All remaining break_ codes, and all traps are handled here. + MipsDebugger dbg(this); + dbg.Debug(); + } +} + + +// Stop helper functions. +bool Simulator::IsWatchpoint(uint64_t code) { + return (code <= kMaxWatchpointCode); +} + + +void Simulator::PrintWatchpoint(uint64_t code) { + MipsDebugger dbg(this); + ++break_count_; + PrintF("\n---- break %ld marker: %3d (instr count: %8ld) ----------" + "----------------------------------", + code, break_count_, icount_); + dbg.PrintAllRegs(); // Print registers and continue running. +} + + +void Simulator::HandleStop(uint64_t code, Instruction* instr) { + // Stop if it is enabled, otherwise go on jumping over the stop + // and the message address. + if (IsEnabledStop(code)) { + MipsDebugger dbg(this); + dbg.Stop(instr); + } else { + set_pc(get_pc() + 2 * Instruction::kInstrSize); + } +} + + +bool Simulator::IsStopInstruction(Instruction* instr) { + int32_t func = instr->FunctionFieldRaw(); + uint32_t code = static_cast<uint32_t>(instr->Bits(25, 6)); + return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode; +} + + +bool Simulator::IsEnabledStop(uint64_t code) { + DCHECK(code <= kMaxStopCode); + DCHECK(code > kMaxWatchpointCode); + return !(watched_stops_[code].count & kStopDisabledBit); +} + + +void Simulator::EnableStop(uint64_t code) { + if (!IsEnabledStop(code)) { + watched_stops_[code].count &= ~kStopDisabledBit; + } +} + + +void Simulator::DisableStop(uint64_t code) { + if (IsEnabledStop(code)) { + watched_stops_[code].count |= kStopDisabledBit; + } +} + + +void Simulator::IncreaseStopCounter(uint64_t code) { + DCHECK(code <= kMaxStopCode); + if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) { + PrintF("Stop counter for code %ld has overflowed.\n" + "Enabling this code and reseting the counter to 0.\n", code); + watched_stops_[code].count = 0; + EnableStop(code); + } else { + watched_stops_[code].count++; + } +} + + +// Print a stop status. +void Simulator::PrintStopInfo(uint64_t code) { + if (code <= kMaxWatchpointCode) { + PrintF("That is a watchpoint, not a stop.\n"); + return; + } else if (code > kMaxStopCode) { + PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1); + return; + } + const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled"; + int32_t count = watched_stops_[code].count & ~kStopDisabledBit; + // Don't print the state of unused breakpoints. + if (count != 0) { + if (watched_stops_[code].desc) { + PrintF("stop %ld - 0x%lx: \t%s, \tcounter = %i, \t%s\n", + code, code, state, count, watched_stops_[code].desc); + } else { + PrintF("stop %ld - 0x%lx: \t%s, \tcounter = %i\n", + code, code, state, count); + } + } +} + + +void Simulator::SignalExceptions() { + for (int i = 1; i < kNumExceptions; i++) { + if (exceptions[i] != 0) { + V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i); + } + } +} + + +// Handle execution based on instruction types. + +void Simulator::ConfigureTypeRegister(Instruction* instr, + int64_t* alu_out, + int64_t* i64hilo, + uint64_t* u64hilo, + int64_t* next_pc, + int64_t* return_addr_reg, + bool* do_interrupt, + int64_t* i128resultH, + int64_t* i128resultL) { + // Every local variable declared here needs to be const. + // This is to make sure that changed values are sent back to + // DecodeTypeRegister correctly. + + // Instruction fields. + const Opcode op = instr->OpcodeFieldRaw(); + const int64_t rs_reg = instr->RsValue(); + const int64_t rs = get_register(rs_reg); + const uint64_t rs_u = static_cast<uint64_t>(rs); + const int64_t rt_reg = instr->RtValue(); + const int64_t rt = get_register(rt_reg); + const uint64_t rt_u = static_cast<uint64_t>(rt); + const int64_t rd_reg = instr->RdValue(); + const uint64_t sa = instr->SaValue(); + + const int32_t fs_reg = instr->FsValue(); + + + // ---------- Configuration. + switch (op) { + case COP1: // Coprocessor instructions. + switch (instr->RsFieldRaw()) { + case CFC1: + // At the moment only FCSR is supported. + DCHECK(fs_reg == kFCSRRegister); + *alu_out = FCSR_; + break; + case MFC1: + *alu_out = static_cast<int64_t>(get_fpu_register_word(fs_reg)); + break; + case DMFC1: + *alu_out = get_fpu_register(fs_reg); + break; + case MFHC1: + *alu_out = get_fpu_register_hi_word(fs_reg); + break; + case CTC1: + case MTC1: + case DMTC1: + case MTHC1: + case S: + case D: + case W: + case L: + case PS: + // Do everything in the execution step. + break; + default: + // BC1 BC1EQZ BC1NEZ handled in DecodeTypeImmed, should never come here. + UNREACHABLE(); + } + break; + case COP1X: + break; + case SPECIAL: + switch (instr->FunctionFieldRaw()) { + case JR: + case JALR: + *next_pc = get_register(instr->RsValue()); + *return_addr_reg = instr->RdValue(); + break; + case SLL: + *alu_out = (int32_t)rt << sa; + break; + case DSLL: + *alu_out = rt << sa; + break; + case DSLL32: + *alu_out = rt << sa << 32; + break; + case SRL: + if (rs_reg == 0) { + // Regular logical right shift of a word by a fixed number of + // bits instruction. RS field is always equal to 0. + *alu_out = (uint32_t)rt_u >> sa; + } else { + // Logical right-rotate of a word by a fixed number of bits. This + // is special case of SRL instruction, added in MIPS32 Release 2. + // RS field is equal to 00001. + *alu_out = ((uint32_t)rt_u >> sa) | ((uint32_t)rt_u << (32 - sa)); + } + break; + case DSRL: + *alu_out = rt_u >> sa; + break; + case DSRL32: + *alu_out = rt_u >> sa >> 32; + break; + case SRA: + *alu_out = (int32_t)rt >> sa; + break; + case DSRA: + *alu_out = rt >> sa; + break; + case DSRA32: + *alu_out = rt >> sa >> 32; + break; + case SLLV: + *alu_out = (int32_t)rt << rs; + break; + case DSLLV: + *alu_out = rt << rs; + break; + case SRLV: + if (sa == 0) { + // Regular logical right-shift of a word by a variable number of + // bits instruction. SA field is always equal to 0. + *alu_out = (uint32_t)rt_u >> rs; + } else { + // Logical right-rotate of a word by a variable number of bits. + // This is special case od SRLV instruction, added in MIPS32 + // Release 2. SA field is equal to 00001. + *alu_out = + ((uint32_t)rt_u >> rs_u) | ((uint32_t)rt_u << (32 - rs_u)); + } + break; + case DSRLV: + if (sa == 0) { + // Regular logical right-shift of a word by a variable number of + // bits instruction. SA field is always equal to 0. + *alu_out = rt_u >> rs; + } else { + // Logical right-rotate of a word by a variable number of bits. + // This is special case od SRLV instruction, added in MIPS32 + // Release 2. SA field is equal to 00001. + *alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u)); + } + break; + case SRAV: + *alu_out = (int32_t)rt >> rs; + break; + case DSRAV: + *alu_out = rt >> rs; + break; + case MFHI: // MFHI == CLZ on R6. + if (kArchVariant != kMips64r6) { + DCHECK(instr->SaValue() == 0); + *alu_out = get_register(HI); + } else { + // MIPS spec: If no bits were set in GPR rs, the result written to + // GPR rd is 32. + // GCC __builtin_clz: If input is 0, the result is undefined. + DCHECK(instr->SaValue() == 1); + *alu_out = + rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u); + } + break; + case MFLO: + *alu_out = get_register(LO); + break; + case MULT: // MULT == D_MUL_MUH. + // TODO(plind) - Unify MULT/DMULT with single set of 64-bit HI/Lo + // regs. + // TODO(plind) - make the 32-bit MULT ops conform to spec regarding + // checking of 32-bit input values, and un-define operations of HW. + *i64hilo = static_cast<int64_t>((int32_t)rs) * + static_cast<int64_t>((int32_t)rt); + break; + case MULTU: + *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u); + break; + case DMULT: // DMULT == D_MUL_MUH. + if (kArchVariant != kMips64r6) { + *i128resultH = MultiplyHighSigned(rs, rt); + *i128resultL = rs * rt; + } else { + switch (instr->SaValue()) { + case MUL_OP: + *i128resultL = rs * rt; + break; + case MUH_OP: + *i128resultH = MultiplyHighSigned(rs, rt); + break; + default: + UNIMPLEMENTED_MIPS(); + break; + } + } + break; + case DMULTU: + UNIMPLEMENTED_MIPS(); + break; + case ADD: + case DADD: + if (HaveSameSign(rs, rt)) { + if (rs > 0) { + exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt); + } else if (rs < 0) { + exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt); + } + } + *alu_out = rs + rt; + break; + case ADDU: { + int32_t alu32_out = rs + rt; + // Sign-extend result of 32bit operation into 64bit register. + *alu_out = static_cast<int64_t>(alu32_out); + } + break; + case DADDU: + *alu_out = rs + rt; + break; + case SUB: + case DSUB: + if (!HaveSameSign(rs, rt)) { + if (rs > 0) { + exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt); + } else if (rs < 0) { + exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt); + } + } + *alu_out = rs - rt; + break; + case SUBU: { + int32_t alu32_out = rs - rt; + // Sign-extend result of 32bit operation into 64bit register. + *alu_out = static_cast<int64_t>(alu32_out); + } + break; + case DSUBU: + *alu_out = rs - rt; + break; + case AND: + *alu_out = rs & rt; + break; + case OR: + *alu_out = rs | rt; + break; + case XOR: + *alu_out = rs ^ rt; + break; + case NOR: + *alu_out = ~(rs | rt); + break; + case SLT: + *alu_out = rs < rt ? 1 : 0; + break; + case SLTU: + *alu_out = rs_u < rt_u ? 1 : 0; + break; + // Break and trap instructions. + case BREAK: + + *do_interrupt = true; + break; + case TGE: + *do_interrupt = rs >= rt; + break; + case TGEU: + *do_interrupt = rs_u >= rt_u; + break; + case TLT: + *do_interrupt = rs < rt; + break; + case TLTU: + *do_interrupt = rs_u < rt_u; + break; + case TEQ: + *do_interrupt = rs == rt; + break; + case TNE: + *do_interrupt = rs != rt; + break; + case MOVN: + case MOVZ: + case MOVCI: + // No action taken on decode. + break; + case DIV: + case DIVU: + case DDIV: + case DDIVU: + // div and divu never raise exceptions. + break; + default: + UNREACHABLE(); + } + break; + case SPECIAL2: + switch (instr->FunctionFieldRaw()) { + case MUL: + // Only the lower 32 bits are kept. + *alu_out = (int32_t)rs_u * (int32_t)rt_u; + break; + case CLZ: + // MIPS32 spec: If no bits were set in GPR rs, the result written to + // GPR rd is 32. + // GCC __builtin_clz: If input is 0, the result is undefined. + *alu_out = + rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u); + break; + default: + UNREACHABLE(); + } + break; + case SPECIAL3: + switch (instr->FunctionFieldRaw()) { + case INS: { // Mips32r2 instruction. + // Interpret rd field as 5-bit msb of insert. + uint16_t msb = rd_reg; + // Interpret sa field as 5-bit lsb of insert. + uint16_t lsb = sa; + uint16_t size = msb - lsb + 1; + uint32_t mask = (1 << size) - 1; + *alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb); + break; + } + case EXT: { // Mips32r2 instruction. + // Interpret rd field as 5-bit msb of extract. + uint16_t msb = rd_reg; + // Interpret sa field as 5-bit lsb of extract. + uint16_t lsb = sa; + uint16_t size = msb + 1; + uint32_t mask = (1 << size) - 1; + *alu_out = (rs_u & (mask << lsb)) >> lsb; + break; + } + default: + UNREACHABLE(); + } + break; + default: + UNREACHABLE(); + } +} + + +void Simulator::DecodeTypeRegister(Instruction* instr) { + // Instruction fields. + const Opcode op = instr->OpcodeFieldRaw(); + const int64_t rs_reg = instr->RsValue(); + const int64_t rs = get_register(rs_reg); + const uint64_t rs_u = static_cast<uint32_t>(rs); + const int64_t rt_reg = instr->RtValue(); + const int64_t rt = get_register(rt_reg); + const uint64_t rt_u = static_cast<uint32_t>(rt); + const int64_t rd_reg = instr->RdValue(); + + const int32_t fr_reg = instr->FrValue(); + const int32_t fs_reg = instr->FsValue(); + const int32_t ft_reg = instr->FtValue(); + const int64_t fd_reg = instr->FdValue(); + int64_t i64hilo = 0; + uint64_t u64hilo = 0; + + // ALU output. + // It should not be used as is. Instructions using it should always + // initialize it first. + int64_t alu_out = 0x12345678; + + // For break and trap instructions. + bool do_interrupt = false; + + // For jr and jalr. + // Get current pc. + int64_t current_pc = get_pc(); + // Next pc + int64_t next_pc = 0; + int64_t return_addr_reg = 31; + + int64_t i128resultH; + int64_t i128resultL; + + // Set up the variables if needed before executing the instruction. + ConfigureTypeRegister(instr, + &alu_out, + &i64hilo, + &u64hilo, + &next_pc, + &return_addr_reg, + &do_interrupt, + &i128resultH, + &i128resultL); + + // ---------- Raise exceptions triggered. + SignalExceptions(); + + // ---------- Execution. + switch (op) { + case COP1: + switch (instr->RsFieldRaw()) { + case BC1: // Branch on coprocessor condition. + case BC1EQZ: + case BC1NEZ: + UNREACHABLE(); + break; + case CFC1: + set_register(rt_reg, alu_out); + break; + case MFC1: + case DMFC1: + case MFHC1: + set_register(rt_reg, alu_out); + break; + case CTC1: + // At the moment only FCSR is supported. + DCHECK(fs_reg == kFCSRRegister); + FCSR_ = registers_[rt_reg]; + break; + case MTC1: + // Hardware writes upper 32-bits to zero on mtc1. + set_fpu_register_hi_word(fs_reg, 0); + set_fpu_register_word(fs_reg, registers_[rt_reg]); + break; + case DMTC1: + set_fpu_register(fs_reg, registers_[rt_reg]); + break; + case MTHC1: + set_fpu_register_hi_word(fs_reg, registers_[rt_reg]); + break; + case S: + float f; + switch (instr->FunctionFieldRaw()) { + case CVT_D_S: + f = get_fpu_register_float(fs_reg); + set_fpu_register_double(fd_reg, static_cast<double>(f)); + break; + default: + // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S + // CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented. + UNREACHABLE(); + } + break; + case D: + double ft, fs; + uint32_t cc, fcsr_cc; + int64_t i64; + fs = get_fpu_register_double(fs_reg); + ft = get_fpu_register_double(ft_reg); + cc = instr->FCccValue(); + fcsr_cc = get_fcsr_condition_bit(cc); + switch (instr->FunctionFieldRaw()) { + case ADD_D: + set_fpu_register_double(fd_reg, fs + ft); + break; + case SUB_D: + set_fpu_register_double(fd_reg, fs - ft); + break; + case MUL_D: + set_fpu_register_double(fd_reg, fs * ft); + break; + case DIV_D: + set_fpu_register_double(fd_reg, fs / ft); + break; + case ABS_D: + set_fpu_register_double(fd_reg, fabs(fs)); + break; + case MOV_D: + set_fpu_register_double(fd_reg, fs); + break; + case NEG_D: + set_fpu_register_double(fd_reg, -fs); + break; + case SQRT_D: + set_fpu_register_double(fd_reg, sqrt(fs)); + break; + case C_UN_D: + set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft)); + break; + case C_EQ_D: + set_fcsr_bit(fcsr_cc, (fs == ft)); + break; + case C_UEQ_D: + set_fcsr_bit(fcsr_cc, + (fs == ft) || (std::isnan(fs) || std::isnan(ft))); + break; + case C_OLT_D: + set_fcsr_bit(fcsr_cc, (fs < ft)); + break; + case C_ULT_D: + set_fcsr_bit(fcsr_cc, + (fs < ft) || (std::isnan(fs) || std::isnan(ft))); + break; + case C_OLE_D: + set_fcsr_bit(fcsr_cc, (fs <= ft)); + break; + case C_ULE_D: + set_fcsr_bit(fcsr_cc, + (fs <= ft) || (std::isnan(fs) || std::isnan(ft))); + break; + case CVT_W_D: // Convert double to word. + // Rounding modes are not yet supported. + DCHECK((FCSR_ & 3) == 0); + // In rounding mode 0 it should behave like ROUND. + // No break. + case ROUND_W_D: // Round double to word (round half to even). + { + double rounded = std::floor(fs + 0.5); + int32_t result = static_cast<int32_t>(rounded); + if ((result & 1) != 0 && result - fs == 0.5) { + // If the number is halfway between two integers, + // round to the even one. + result--; + } + set_fpu_register_word(fd_reg, result); + if (set_fcsr_round_error(fs, rounded)) { + set_fpu_register(fd_reg, kFPUInvalidResult); + } + } + break; + case TRUNC_W_D: // Truncate double to word (round towards 0). + { + double rounded = trunc(fs); + int32_t result = static_cast<int32_t>(rounded); + set_fpu_register_word(fd_reg, result); + if (set_fcsr_round_error(fs, rounded)) { + set_fpu_register(fd_reg, kFPUInvalidResult); + } + } + break; + case FLOOR_W_D: // Round double to word towards negative infinity. + { + double rounded = std::floor(fs); + int32_t result = static_cast<int32_t>(rounded); + set_fpu_register_word(fd_reg, result); + if (set_fcsr_round_error(fs, rounded)) { + set_fpu_register(fd_reg, kFPUInvalidResult); + } + } + break; + case CEIL_W_D: // Round double to word towards positive infinity. + { + double rounded = std::ceil(fs); + int32_t result = static_cast<int32_t>(rounded); + set_fpu_register_word(fd_reg, result); + if (set_fcsr_round_error(fs, rounded)) { + set_fpu_register(fd_reg, kFPUInvalidResult); + } + } + break; + case CVT_S_D: // Convert double to float (single). + set_fpu_register_float(fd_reg, static_cast<float>(fs)); + break; + case CVT_L_D: // Mips64r2: Truncate double to 64-bit long-word. + // Rounding modes are not yet supported. + DCHECK((FCSR_ & 3) == 0); + // In rounding mode 0 it should behave like ROUND. + // No break. + case ROUND_L_D: { // Mips64r2 instruction. + // check error cases + double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5); + int64_t result = static_cast<int64_t>(rounded); + set_fpu_register(fd_reg, result); + if (set_fcsr_round64_error(fs, rounded)) { + set_fpu_register(fd_reg, kFPU64InvalidResult); + } + break; + } + case TRUNC_L_D: { // Mips64r2 instruction. + double rounded = trunc(fs); + int64_t result = static_cast<int64_t>(rounded); + set_fpu_register(fd_reg, result); + if (set_fcsr_round64_error(fs, rounded)) { + set_fpu_register(fd_reg, kFPU64InvalidResult); + } + break; + } + case FLOOR_L_D: { // Mips64r2 instruction. + double rounded = floor(fs); + int64_t result = static_cast<int64_t>(rounded); + set_fpu_register(fd_reg, result); + if (set_fcsr_round64_error(fs, rounded)) { + set_fpu_register(fd_reg, kFPU64InvalidResult); + } + break; + } + case CEIL_L_D: { // Mips64r2 instruction. + double rounded = ceil(fs); + int64_t result = static_cast<int64_t>(rounded); + set_fpu_register(fd_reg, result); + if (set_fcsr_round64_error(fs, rounded)) { + set_fpu_register(fd_reg, kFPU64InvalidResult); + } + break; + } + case C_F_D: + UNIMPLEMENTED_MIPS(); + break; + default: + UNREACHABLE(); + } + break; + case W: + switch (instr->FunctionFieldRaw()) { + case CVT_S_W: // Convert word to float (single). + alu_out = get_fpu_register_signed_word(fs_reg); + set_fpu_register_float(fd_reg, static_cast<float>(alu_out)); + break; + case CVT_D_W: // Convert word to double. + alu_out = get_fpu_register_signed_word(fs_reg); + set_fpu_register_double(fd_reg, static_cast<double>(alu_out)); + break; + default: // Mips64r6 CMP.S instructions unimplemented. + UNREACHABLE(); + } + break; + case L: + fs = get_fpu_register_double(fs_reg); + ft = get_fpu_register_double(ft_reg); + switch (instr->FunctionFieldRaw()) { + case CVT_D_L: // Mips32r2 instruction. + i64 = get_fpu_register(fs_reg); + set_fpu_register_double(fd_reg, static_cast<double>(i64)); + break; + case CVT_S_L: + UNIMPLEMENTED_MIPS(); + break; + case CMP_AF: // Mips64r6 CMP.D instructions. + UNIMPLEMENTED_MIPS(); + break; + case CMP_UN: + if (std::isnan(fs) || std::isnan(ft)) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_EQ: + if (fs == ft) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_UEQ: + if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_LT: + if (fs < ft) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_ULT: + if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_LE: + if (fs <= ft) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_ULE: + if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + default: // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED + UNREACHABLE(); + } + break; + default: + UNREACHABLE(); + } + break; + case COP1X: + switch (instr->FunctionFieldRaw()) { + case MADD_D: + double fr, ft, fs; + fr = get_fpu_register_double(fr_reg); + fs = get_fpu_register_double(fs_reg); + ft = get_fpu_register_double(ft_reg); + set_fpu_register_double(fd_reg, fs * ft + fr); + break; + default: + UNREACHABLE(); + } + break; + case SPECIAL: + switch (instr->FunctionFieldRaw()) { + case JR: { + Instruction* branch_delay_instr = reinterpret_cast<Instruction*>( + current_pc+Instruction::kInstrSize); + BranchDelayInstructionDecode(branch_delay_instr); + set_pc(next_pc); + pc_modified_ = true; + break; + } + case JALR: { + Instruction* branch_delay_instr = reinterpret_cast<Instruction*>( + current_pc+Instruction::kInstrSize); + BranchDelayInstructionDecode(branch_delay_instr); + set_register(return_addr_reg, + current_pc + 2 * Instruction::kInstrSize); + set_pc(next_pc); + pc_modified_ = true; + break; + } + // Instructions using HI and LO registers. + case MULT: + if (kArchVariant != kMips64r6) { + set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff)); + set_register(HI, static_cast<int32_t>(i64hilo >> 32)); + } else { + switch (instr->SaValue()) { + case MUL_OP: + set_register(rd_reg, + static_cast<int32_t>(i64hilo & 0xffffffff)); + break; + case MUH_OP: + set_register(rd_reg, static_cast<int32_t>(i64hilo >> 32)); + break; + default: + UNIMPLEMENTED_MIPS(); + break; + } + } + break; + case MULTU: + set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff)); + set_register(HI, static_cast<int32_t>(u64hilo >> 32)); + break; + case DMULT: // DMULT == D_MUL_MUH. + if (kArchVariant != kMips64r6) { + set_register(LO, static_cast<int64_t>(i128resultL)); + set_register(HI, static_cast<int64_t>(i128resultH)); + } else { + switch (instr->SaValue()) { + case MUL_OP: + set_register(rd_reg, static_cast<int64_t>(i128resultL)); + break; + case MUH_OP: + set_register(rd_reg, static_cast<int64_t>(i128resultH)); + break; + default: + UNIMPLEMENTED_MIPS(); + break; + } + } + break; + case DMULTU: + UNIMPLEMENTED_MIPS(); + break; + case DSLL: + set_register(rd_reg, alu_out); + break; + case DIV: + case DDIV: + switch (kArchVariant) { + case kMips64r2: + // Divide by zero and overflow was not checked in the + // configuration step - div and divu do not raise exceptions. On + // division by 0 the result will be UNPREDICTABLE. On overflow + // (INT_MIN/-1), return INT_MIN which is what the hardware does. + if (rs == INT_MIN && rt == -1) { + set_register(LO, INT_MIN); + set_register(HI, 0); + } else if (rt != 0) { + set_register(LO, rs / rt); + set_register(HI, rs % rt); + } + break; + case kMips64r6: + switch (instr->SaValue()) { + case DIV_OP: + if (rs == INT_MIN && rt == -1) { + set_register(rd_reg, INT_MIN); + } else if (rt != 0) { + set_register(rd_reg, rs / rt); + } + break; + case MOD_OP: + if (rs == INT_MIN && rt == -1) { + set_register(rd_reg, 0); + } else if (rt != 0) { + set_register(rd_reg, rs % rt); + } + break; + default: + UNIMPLEMENTED_MIPS(); + break; + } + break; + default: + break; + } + break; + case DIVU: + if (rt_u != 0) { + set_register(LO, rs_u / rt_u); + set_register(HI, rs_u % rt_u); + } + break; + // Break and trap instructions. + case BREAK: + case TGE: + case TGEU: + case TLT: + case TLTU: + case TEQ: + case TNE: + if (do_interrupt) { + SoftwareInterrupt(instr); + } + break; + // Conditional moves. + case MOVN: + if (rt) { + set_register(rd_reg, rs); + TraceRegWr(rs); + } + break; + case MOVCI: { + uint32_t cc = instr->FBccValue(); + uint32_t fcsr_cc = get_fcsr_condition_bit(cc); + if (instr->Bit(16)) { // Read Tf bit. + if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs); + } else { + if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs); + } + break; + } + case MOVZ: + if (!rt) { + set_register(rd_reg, rs); + TraceRegWr(rs); + } + break; + default: // For other special opcodes we do the default operation. + set_register(rd_reg, alu_out); + TraceRegWr(alu_out); + } + break; + case SPECIAL2: + switch (instr->FunctionFieldRaw()) { + case MUL: + set_register(rd_reg, alu_out); + TraceRegWr(alu_out); + // HI and LO are UNPREDICTABLE after the operation. + set_register(LO, Unpredictable); + set_register(HI, Unpredictable); + break; + default: // For other special2 opcodes we do the default operation. + set_register(rd_reg, alu_out); + } + break; + case SPECIAL3: + switch (instr->FunctionFieldRaw()) { + case INS: + // Ins instr leaves result in Rt, rather than Rd. + set_register(rt_reg, alu_out); + TraceRegWr(alu_out); + break; + case EXT: + // Ext instr leaves result in Rt, rather than Rd. + set_register(rt_reg, alu_out); + TraceRegWr(alu_out); + break; + default: + UNREACHABLE(); + } + break; + // Unimplemented opcodes raised an error in the configuration step before, + // so we can use the default here to set the destination register in common + // cases. + default: + set_register(rd_reg, alu_out); + TraceRegWr(alu_out); + } +} + + +// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq). +void Simulator::DecodeTypeImmediate(Instruction* instr) { + // Instruction fields. + Opcode op = instr->OpcodeFieldRaw(); + int64_t rs = get_register(instr->RsValue()); + uint64_t rs_u = static_cast<uint64_t>(rs); + int64_t rt_reg = instr->RtValue(); // Destination register. + int64_t rt = get_register(rt_reg); + int16_t imm16 = instr->Imm16Value(); + + int32_t ft_reg = instr->FtValue(); // Destination register. + int64_t ft = get_fpu_register(ft_reg); + + // Zero extended immediate. + uint32_t oe_imm16 = 0xffff & imm16; + // Sign extended immediate. + int32_t se_imm16 = imm16; + + // Get current pc. + int64_t current_pc = get_pc(); + // Next pc. + int64_t next_pc = bad_ra; + + // Used for conditional branch instructions. + bool do_branch = false; + bool execute_branch_delay_instruction = false; + + // Used for arithmetic instructions. + int64_t alu_out = 0; + // Floating point. + double fp_out = 0.0; + uint32_t cc, cc_value, fcsr_cc; + + // Used for memory instructions. + int64_t addr = 0x0; + // Value to be written in memory. + uint64_t mem_value = 0x0; + // Alignment for 32-bit integers used in LWL, LWR, etc. + const int kInt32AlignmentMask = sizeof(uint32_t) - 1; + + // ---------- Configuration (and execution for REGIMM). + switch (op) { + // ------------- COP1. Coprocessor instructions. + case COP1: + switch (instr->RsFieldRaw()) { + case BC1: // Branch on coprocessor condition. + cc = instr->FBccValue(); + fcsr_cc = get_fcsr_condition_bit(cc); + cc_value = test_fcsr_bit(fcsr_cc); + do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value; + execute_branch_delay_instruction = true; + // Set next_pc. + if (do_branch) { + next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; + } else { + next_pc = current_pc + kBranchReturnOffset; + } + break; + case BC1EQZ: + do_branch = (ft & 0x1) ? false : true; + execute_branch_delay_instruction = true; + // Set next_pc. + if (do_branch) { + next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; + } else { + next_pc = current_pc + kBranchReturnOffset; + } + break; + case BC1NEZ: + do_branch = (ft & 0x1) ? true : false; + execute_branch_delay_instruction = true; + // Set next_pc. + if (do_branch) { + next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; + } else { + next_pc = current_pc + kBranchReturnOffset; + } + break; + default: + UNREACHABLE(); + } + break; + // ------------- REGIMM class. + case REGIMM: + switch (instr->RtFieldRaw()) { + case BLTZ: + do_branch = (rs < 0); + break; + case BLTZAL: + do_branch = rs < 0; + break; + case BGEZ: + do_branch = rs >= 0; + break; + case BGEZAL: + do_branch = rs >= 0; + break; + default: + UNREACHABLE(); + } + switch (instr->RtFieldRaw()) { + case BLTZ: + case BLTZAL: + case BGEZ: + case BGEZAL: + // Branch instructions common part. + execute_branch_delay_instruction = true; + // Set next_pc. + if (do_branch) { + next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; + if (instr->IsLinkingInstruction()) { + set_register(31, current_pc + kBranchReturnOffset); + } + } else { + next_pc = current_pc + kBranchReturnOffset; + } + default: + break; + } + break; // case REGIMM. + // ------------- Branch instructions. + // When comparing to zero, the encoding of rt field is always 0, so we don't + // need to replace rt with zero. + case BEQ: + do_branch = (rs == rt); + break; + case BNE: + do_branch = rs != rt; + break; + case BLEZ: + do_branch = rs <= 0; + break; + case BGTZ: + do_branch = rs > 0; + break; + // ------------- Arithmetic instructions. + case ADDI: + case DADDI: + if (HaveSameSign(rs, se_imm16)) { + if (rs > 0) { + exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16); + } else if (rs < 0) { + exceptions[kIntegerUnderflow] = + rs < (Registers::kMinValue - se_imm16); + } + } + alu_out = rs + se_imm16; + break; + case ADDIU: { + int32_t alu32_out = rs + se_imm16; + // Sign-extend result of 32bit operation into 64bit register. + alu_out = static_cast<int64_t>(alu32_out); + } + break; + case DADDIU: + alu_out = rs + se_imm16; + break; + case SLTI: + alu_out = (rs < se_imm16) ? 1 : 0; + break; + case SLTIU: + alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0; + break; + case ANDI: + alu_out = rs & oe_imm16; + break; + case ORI: + alu_out = rs | oe_imm16; + break; + case XORI: + alu_out = rs ^ oe_imm16; + break; + case LUI: { + int32_t alu32_out = (oe_imm16 << 16); + // Sign-extend result of 32bit operation into 64bit register. + alu_out = static_cast<int64_t>(alu32_out); + } + break; + // ------------- Memory instructions. + case LB: + addr = rs + se_imm16; + alu_out = ReadB(addr); + break; + case LH: + addr = rs + se_imm16; + alu_out = ReadH(addr, instr); + break; + case LWL: { + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; + uint8_t byte_shift = kInt32AlignmentMask - al_offset; + uint32_t mask = (1 << byte_shift * 8) - 1; + addr = rs + se_imm16 - al_offset; + alu_out = ReadW(addr, instr); + alu_out <<= byte_shift * 8; + alu_out |= rt & mask; + break; + } + case LW: + addr = rs + se_imm16; + alu_out = ReadW(addr, instr); + break; + case LWU: + addr = rs + se_imm16; + alu_out = ReadWU(addr, instr); + break; + case LD: + addr = rs + se_imm16; + alu_out = Read2W(addr, instr); + break; + case LBU: + addr = rs + se_imm16; + alu_out = ReadBU(addr); + break; + case LHU: + addr = rs + se_imm16; + alu_out = ReadHU(addr, instr); + break; + case LWR: { + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; + uint8_t byte_shift = kInt32AlignmentMask - al_offset; + uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0; + addr = rs + se_imm16 - al_offset; + alu_out = ReadW(addr, instr); + alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8; + alu_out |= rt & mask; + break; + } + case SB: + addr = rs + se_imm16; + break; + case SH: + addr = rs + se_imm16; + break; + case SWL: { + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; + uint8_t byte_shift = kInt32AlignmentMask - al_offset; + uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0; + addr = rs + se_imm16 - al_offset; + mem_value = ReadW(addr, instr) & mask; + mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8; + break; + } + case SW: + case SD: + addr = rs + se_imm16; + break; + case SWR: { + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; + uint32_t mask = (1 << al_offset * 8) - 1; + addr = rs + se_imm16 - al_offset; + mem_value = ReadW(addr, instr); + mem_value = (rt << al_offset * 8) | (mem_value & mask); + break; + } + case LWC1: + addr = rs + se_imm16; + alu_out = ReadW(addr, instr); + break; + case LDC1: + addr = rs + se_imm16; + fp_out = ReadD(addr, instr); + break; + case SWC1: + case SDC1: + addr = rs + se_imm16; + break; + default: + UNREACHABLE(); + } + + // ---------- Raise exceptions triggered. + SignalExceptions(); + + // ---------- Execution. + switch (op) { + // ------------- Branch instructions. + case BEQ: + case BNE: + case BLEZ: + case BGTZ: + // Branch instructions common part. + execute_branch_delay_instruction = true; + // Set next_pc. + if (do_branch) { + next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; + if (instr->IsLinkingInstruction()) { + set_register(31, current_pc + 2* Instruction::kInstrSize); + } + } else { + next_pc = current_pc + 2 * Instruction::kInstrSize; + } + break; + // ------------- Arithmetic instructions. + case ADDI: + case DADDI: + case ADDIU: + case DADDIU: + case SLTI: + case SLTIU: + case ANDI: + case ORI: + case XORI: + case LUI: + set_register(rt_reg, alu_out); + TraceRegWr(alu_out); + break; + // ------------- Memory instructions. + case LB: + case LH: + case LWL: + case LW: + case LWU: + case LD: + case LBU: + case LHU: + case LWR: + set_register(rt_reg, alu_out); + break; + case SB: + WriteB(addr, static_cast<int8_t>(rt)); + break; + case SH: + WriteH(addr, static_cast<uint16_t>(rt), instr); + break; + case SWL: + WriteW(addr, mem_value, instr); + break; + case SW: + WriteW(addr, rt, instr); + break; + case SD: + Write2W(addr, rt, instr); + break; + case SWR: + WriteW(addr, mem_value, instr); + break; + case LWC1: + set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits. + set_fpu_register_word(ft_reg, static_cast<int32_t>(alu_out)); + break; + case LDC1: + set_fpu_register_double(ft_reg, fp_out); + break; + case SWC1: + addr = rs + se_imm16; + WriteW(addr, get_fpu_register(ft_reg), instr); + break; + case SDC1: + addr = rs + se_imm16; + WriteD(addr, get_fpu_register_double(ft_reg), instr); + break; + default: + break; + } + + + if (execute_branch_delay_instruction) { + // Execute branch delay slot + // We don't check for end_sim_pc. First it should not be met as the current + // pc is valid. Secondly a jump should always execute its branch delay slot. + Instruction* branch_delay_instr = + reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize); + BranchDelayInstructionDecode(branch_delay_instr); + } + + // If needed update pc after the branch delay execution. + if (next_pc != bad_ra) { + set_pc(next_pc); + } +} + + +// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal). +void Simulator::DecodeTypeJump(Instruction* instr) { + // Get current pc. + int32_t current_pc = get_pc(); + // Get unchanged bits of pc. + int32_t pc_high_bits = current_pc & 0xf0000000; + // Next pc. + int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2); + + // Execute branch delay slot. + // We don't check for end_sim_pc. First it should not be met as the current pc + // is valid. Secondly a jump should always execute its branch delay slot. + Instruction* branch_delay_instr = + reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize); + BranchDelayInstructionDecode(branch_delay_instr); + + // Update pc and ra if necessary. + // Do this after the branch delay execution. + if (instr->IsLinkingInstruction()) { + set_register(31, current_pc + 2 * Instruction::kInstrSize); + } + set_pc(next_pc); + pc_modified_ = true; +} + + +// Executes the current instruction. +void Simulator::InstructionDecode(Instruction* instr) { + if (v8::internal::FLAG_check_icache) { + CheckICache(isolate_->simulator_i_cache(), instr); + } + pc_modified_ = false; + + v8::internal::EmbeddedVector<char, 256> buffer; + + if (::v8::internal::FLAG_trace_sim) { + SNPrintF(trace_buf_, " "); + disasm::NameConverter converter; + disasm::Disassembler dasm(converter); + // Use a reasonably large buffer. + dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr)); + } + + switch (instr->InstructionType()) { + case Instruction::kRegisterType: + DecodeTypeRegister(instr); + break; + case Instruction::kImmediateType: + DecodeTypeImmediate(instr); + break; + case Instruction::kJumpType: + DecodeTypeJump(instr); + break; + default: + UNSUPPORTED(); + } + + if (::v8::internal::FLAG_trace_sim) { + PrintF(" 0x%08lx %-44s %s\n", reinterpret_cast<intptr_t>(instr), + buffer.start(), trace_buf_.start()); + } + + if (!pc_modified_) { + set_register(pc, reinterpret_cast<int64_t>(instr) + + Instruction::kInstrSize); + } +} + + + +void Simulator::Execute() { + // Get the PC to simulate. Cannot use the accessor here as we need the + // raw PC value and not the one used as input to arithmetic instructions. + int64_t program_counter = get_pc(); + if (::v8::internal::FLAG_stop_sim_at == 0) { + // Fast version of the dispatch loop without checking whether the simulator + // should be stopping at a particular executed instruction. + while (program_counter != end_sim_pc) { + Instruction* instr = reinterpret_cast<Instruction*>(program_counter); + icount_++; + InstructionDecode(instr); + program_counter = get_pc(); + } + } else { + // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when + // we reach the particular instuction count. + while (program_counter != end_sim_pc) { + Instruction* instr = reinterpret_cast<Instruction*>(program_counter); + icount_++; + if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) { + MipsDebugger dbg(this); + dbg.Debug(); + } else { + InstructionDecode(instr); + } + program_counter = get_pc(); + } + } +} + + +void Simulator::CallInternal(byte* entry) { + // Prepare to execute the code at entry. + set_register(pc, reinterpret_cast<int64_t>(entry)); + // Put down marker for end of simulation. The simulator will stop simulation + // when the PC reaches this value. By saving the "end simulation" value into + // the LR the simulation stops when returning to this call point. + set_register(ra, end_sim_pc); + + // Remember the values of callee-saved registers. + // The code below assumes that r9 is not used as sb (static base) in + // simulator code and therefore is regarded as a callee-saved register. + int64_t s0_val = get_register(s0); + int64_t s1_val = get_register(s1); + int64_t s2_val = get_register(s2); + int64_t s3_val = get_register(s3); + int64_t s4_val = get_register(s4); + int64_t s5_val = get_register(s5); + int64_t s6_val = get_register(s6); + int64_t s7_val = get_register(s7); + int64_t gp_val = get_register(gp); + int64_t sp_val = get_register(sp); + int64_t fp_val = get_register(fp); + + // Set up the callee-saved registers with a known value. To be able to check + // that they are preserved properly across JS execution. + int64_t callee_saved_value = icount_; + set_register(s0, callee_saved_value); + set_register(s1, callee_saved_value); + set_register(s2, callee_saved_value); + set_register(s3, callee_saved_value); + set_register(s4, callee_saved_value); + set_register(s5, callee_saved_value); + set_register(s6, callee_saved_value); + set_register(s7, callee_saved_value); + set_register(gp, callee_saved_value); + set_register(fp, callee_saved_value); + + // Start the simulation. + Execute(); + + // Check that the callee-saved registers have been preserved. + CHECK_EQ(callee_saved_value, get_register(s0)); + CHECK_EQ(callee_saved_value, get_register(s1)); + CHECK_EQ(callee_saved_value, get_register(s2)); + CHECK_EQ(callee_saved_value, get_register(s3)); + CHECK_EQ(callee_saved_value, get_register(s4)); + CHECK_EQ(callee_saved_value, get_register(s5)); + CHECK_EQ(callee_saved_value, get_register(s6)); + CHECK_EQ(callee_saved_value, get_register(s7)); + CHECK_EQ(callee_saved_value, get_register(gp)); + CHECK_EQ(callee_saved_value, get_register(fp)); + + // Restore callee-saved registers with the original value. + set_register(s0, s0_val); + set_register(s1, s1_val); + set_register(s2, s2_val); + set_register(s3, s3_val); + set_register(s4, s4_val); + set_register(s5, s5_val); + set_register(s6, s6_val); + set_register(s7, s7_val); + set_register(gp, gp_val); + set_register(sp, sp_val); + set_register(fp, fp_val); +} + + +int64_t Simulator::Call(byte* entry, int argument_count, ...) { + const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4; + va_list parameters; + va_start(parameters, argument_count); + // Set up arguments. + + // First four arguments passed in registers in both ABI's. + DCHECK(argument_count >= 4); + set_register(a0, va_arg(parameters, int64_t)); + set_register(a1, va_arg(parameters, int64_t)); + set_register(a2, va_arg(parameters, int64_t)); + set_register(a3, va_arg(parameters, int64_t)); + + if (kMipsAbi == kN64) { + // Up to eight arguments passed in registers in N64 ABI. + // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this. + if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t)); + if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t)); + if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t)); + if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t)); + } + + // Remaining arguments passed on stack. + int64_t original_stack = get_register(sp); + // Compute position of stack on entry to generated code. + int stack_args_count = (argument_count > kRegisterPassedArguments) ? + (argument_count - kRegisterPassedArguments) : 0; + int stack_args_size = stack_args_count * sizeof(int64_t) + kCArgsSlotsSize; + int64_t entry_stack = original_stack - stack_args_size; + + if (base::OS::ActivationFrameAlignment() != 0) { + entry_stack &= -base::OS::ActivationFrameAlignment(); + } + // Store remaining arguments on stack, from low to high memory. + intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); + for (int i = kRegisterPassedArguments; i < argument_count; i++) { + int stack_index = i - kRegisterPassedArguments + kCArgSlotCount; + stack_argument[stack_index] = va_arg(parameters, int64_t); + } + va_end(parameters); + set_register(sp, entry_stack); + + CallInternal(entry); + + // Pop stack passed arguments. + CHECK_EQ(entry_stack, get_register(sp)); + set_register(sp, original_stack); + + int64_t result = get_register(v0); + return result; +} + + +double Simulator::CallFP(byte* entry, double d0, double d1) { + if (!IsMipsSoftFloatABI) { + const FPURegister fparg2 = (kMipsAbi == kN64) ? f13 : f14; + set_fpu_register_double(f12, d0); + set_fpu_register_double(fparg2, d1); + } else { + int buffer[2]; + DCHECK(sizeof(buffer[0]) * 2 == sizeof(d0)); + memcpy(buffer, &d0, sizeof(d0)); + set_dw_register(a0, buffer); + memcpy(buffer, &d1, sizeof(d1)); + set_dw_register(a2, buffer); + } + CallInternal(entry); + if (!IsMipsSoftFloatABI) { + return get_fpu_register_double(f0); + } else { + return get_double_from_register_pair(v0); + } +} + + +uintptr_t Simulator::PushAddress(uintptr_t address) { + int64_t new_sp = get_register(sp) - sizeof(uintptr_t); + uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp); + *stack_slot = address; + set_register(sp, new_sp); + return new_sp; +} + + +uintptr_t Simulator::PopAddress() { + int64_t current_sp = get_register(sp); + uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); + uintptr_t address = *stack_slot; + set_register(sp, current_sp + sizeof(uintptr_t)); + return address; +} + + +#undef UNSUPPORTED + +} } // namespace v8::internal + +#endif // USE_SIMULATOR + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mips64/simulator-mips64.h nodejs-0.11.15/deps/v8/src/mips64/simulator-mips64.h --- nodejs-0.11.13/deps/v8/src/mips64/simulator-mips64.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/simulator-mips64.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,479 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + +// Declares a Simulator for MIPS instructions if we are not generating a native +// MIPS binary. This Simulator allows us to run and debug MIPS code generation +// on regular desktop machines. +// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro, +// which will start execution in the Simulator or forwards to the real entry +// on a MIPS HW platform. + +#ifndef V8_MIPS_SIMULATOR_MIPS_H_ +#define V8_MIPS_SIMULATOR_MIPS_H_ + +#include "src/allocation.h" +#include "src/mips64/constants-mips64.h" + +#if !defined(USE_SIMULATOR) +// Running without a simulator on a native mips platform. + +namespace v8 { +namespace internal { + +// When running without a simulator we call the entry directly. +#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ + entry(p0, p1, p2, p3, p4) + + +// Call the generated regexp code directly. The code at the entry address +// should act as a function matching the type arm_regexp_matcher. +// The fifth (or ninth) argument is a dummy that reserves the space used for +// the return address added by the ExitFrame in native calls. +#ifdef MIPS_ABI_N64 +typedef int (*mips_regexp_matcher)(String* input, + int64_t start_offset, + const byte* input_start, + const byte* input_end, + int* output, + int64_t output_size, + Address stack_base, + int64_t direct_call, + void* return_address, + Isolate* isolate); + +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ + (FUNCTION_CAST<mips_regexp_matcher>(entry)( \ + p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)) + +#else // O32 Abi. + +typedef int (*mips_regexp_matcher)(String* input, + int32_t start_offset, + const byte* input_start, + const byte* input_end, + void* return_address, + int* output, + int32_t output_size, + Address stack_base, + int32_t direct_call, + Isolate* isolate); + +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ + (FUNCTION_CAST<mips_regexp_matcher>(entry)( \ + p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)) + +#endif // MIPS_ABI_N64 + + +// The stack limit beyond which we will throw stack overflow errors in +// generated code. Because generated code on mips uses the C stack, we +// just use the C stack limit. +class SimulatorStack : public v8::internal::AllStatic { + public: + static inline uintptr_t JsLimitFromCLimit(Isolate* isolate, + uintptr_t c_limit) { + return c_limit; + } + + static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { + return try_catch_address; + } + + static inline void UnregisterCTryCatch() { } +}; + +} } // namespace v8::internal + +// Calculated the stack limit beyond which we will throw stack overflow errors. +// This macro must be called from a C++ method. It relies on being able to take +// the address of "this" to get a value on the current execution stack and then +// calculates the stack limit based on that value. +// NOTE: The check for overflow is not safe as there is no guarantee that the +// running thread has its stack in all memory up to address 0x00000000. +#define GENERATED_CODE_STACK_LIMIT(limit) \ + (reinterpret_cast<uintptr_t>(this) >= limit ? \ + reinterpret_cast<uintptr_t>(this) - limit : 0) + +#else // !defined(USE_SIMULATOR) +// Running with a simulator. + +#include "src/assembler.h" +#include "src/hashmap.h" + +namespace v8 { +namespace internal { + +// ----------------------------------------------------------------------------- +// Utility functions + +class CachePage { + public: + static const int LINE_VALID = 0; + static const int LINE_INVALID = 1; + + static const int kPageShift = 12; + static const int kPageSize = 1 << kPageShift; + static const int kPageMask = kPageSize - 1; + static const int kLineShift = 2; // The cache line is only 4 bytes right now. + static const int kLineLength = 1 << kLineShift; + static const int kLineMask = kLineLength - 1; + + CachePage() { + memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); + } + + char* ValidityByte(int offset) { + return &validity_map_[offset >> kLineShift]; + } + + char* CachedData(int offset) { + return &data_[offset]; + } + + private: + char data_[kPageSize]; // The cached data. + static const int kValidityMapSize = kPageSize >> kLineShift; + char validity_map_[kValidityMapSize]; // One byte per line. +}; + +class Simulator { + public: + friend class MipsDebugger; + + // Registers are declared in order. See SMRL chapter 2. + enum Register { + no_reg = -1, + zero_reg = 0, + at, + v0, v1, + a0, a1, a2, a3, a4, a5, a6, a7, + t0, t1, t2, t3, + s0, s1, s2, s3, s4, s5, s6, s7, + t8, t9, + k0, k1, + gp, + sp, + s8, + ra, + // LO, HI, and pc. + LO, + HI, + pc, // pc must be the last register. + kNumSimuRegisters, + // aliases + fp = s8 + }; + + // Coprocessor registers. + // Generated code will always use doubles. So we will only use even registers. + enum FPURegister { + f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, + f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters. + f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, + f26, f27, f28, f29, f30, f31, + kNumFPURegisters + }; + + explicit Simulator(Isolate* isolate); + ~Simulator(); + + // The currently executing Simulator instance. Potentially there can be one + // for each native thread. + static Simulator* current(v8::internal::Isolate* isolate); + + // Accessors for register state. Reading the pc value adheres to the MIPS + // architecture specification and is off by a 8 from the currently executing + // instruction. + void set_register(int reg, int64_t value); + void set_register_word(int reg, int32_t value); + void set_dw_register(int dreg, const int* dbl); + int64_t get_register(int reg) const; + double get_double_from_register_pair(int reg); + // Same for FPURegisters. + void set_fpu_register(int fpureg, int64_t value); + void set_fpu_register_word(int fpureg, int32_t value); + void set_fpu_register_hi_word(int fpureg, int32_t value); + void set_fpu_register_float(int fpureg, float value); + void set_fpu_register_double(int fpureg, double value); + int64_t get_fpu_register(int fpureg) const; + int32_t get_fpu_register_word(int fpureg) const; + int32_t get_fpu_register_signed_word(int fpureg) const; + uint32_t get_fpu_register_hi_word(int fpureg) const; + float get_fpu_register_float(int fpureg) const; + double get_fpu_register_double(int fpureg) const; + void set_fcsr_bit(uint32_t cc, bool value); + bool test_fcsr_bit(uint32_t cc); + bool set_fcsr_round_error(double original, double rounded); + bool set_fcsr_round64_error(double original, double rounded); + + // Special case of set_register and get_register to access the raw PC value. + void set_pc(int64_t value); + int64_t get_pc() const; + + Address get_sp() { + return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp))); + } + + // Accessor to the internal simulator stack area. + uintptr_t StackLimit() const; + + // Executes MIPS instructions until the PC reaches end_sim_pc. + void Execute(); + + // Call on program start. + static void Initialize(Isolate* isolate); + + // V8 generally calls into generated JS code with 5 parameters and into + // generated RegExp code with 7 parameters. This is a convenience function, + // which sets up the simulator state and grabs the result on return. + int64_t Call(byte* entry, int argument_count, ...); + // Alternative: call a 2-argument double function. + double CallFP(byte* entry, double d0, double d1); + + // Push an address onto the JS stack. + uintptr_t PushAddress(uintptr_t address); + + // Pop an address from the JS stack. + uintptr_t PopAddress(); + + // Debugger input. + void set_last_debugger_input(char* input); + char* last_debugger_input() { return last_debugger_input_; } + + // ICache checking. + static void FlushICache(v8::internal::HashMap* i_cache, void* start, + size_t size); + + // Returns true if pc register contains one of the 'special_values' defined + // below (bad_ra, end_sim_pc). + bool has_bad_pc() const; + + private: + enum special_values { + // Known bad pc value to ensure that the simulator does not execute + // without being properly setup. + bad_ra = -1, + // A pc value used to signal the simulator to stop execution. Generally + // the ra is set to this value on transition from native C code to + // simulated execution, so that the simulator can "return" to the native + // C code. + end_sim_pc = -2, + // Unpredictable value. + Unpredictable = 0xbadbeaf + }; + + // Unsupported instructions use Format to print an error and stop execution. + void Format(Instruction* instr, const char* format); + + // Read and write memory. + inline uint32_t ReadBU(int64_t addr); + inline int32_t ReadB(int64_t addr); + inline void WriteB(int64_t addr, uint8_t value); + inline void WriteB(int64_t addr, int8_t value); + + inline uint16_t ReadHU(int64_t addr, Instruction* instr); + inline int16_t ReadH(int64_t addr, Instruction* instr); + // Note: Overloaded on the sign of the value. + inline void WriteH(int64_t addr, uint16_t value, Instruction* instr); + inline void WriteH(int64_t addr, int16_t value, Instruction* instr); + + inline uint32_t ReadWU(int64_t addr, Instruction* instr); + inline int32_t ReadW(int64_t addr, Instruction* instr); + inline void WriteW(int64_t addr, int32_t value, Instruction* instr); + inline int64_t Read2W(int64_t addr, Instruction* instr); + inline void Write2W(int64_t addr, int64_t value, Instruction* instr); + + inline double ReadD(int64_t addr, Instruction* instr); + inline void WriteD(int64_t addr, double value, Instruction* instr); + + // Helper for debugging memory access. + inline void DieOrDebug(); + + // Helpers for data value tracing. + enum TraceType { + BYTE, + HALF, + WORD, + DWORD + // DFLOAT - Floats may have printing issues due to paired lwc1's + }; + + void TraceRegWr(int64_t value); + void TraceMemWr(int64_t addr, int64_t value, TraceType t); + void TraceMemRd(int64_t addr, int64_t value); + + // Operations depending on endianness. + // Get Double Higher / Lower word. + inline int32_t GetDoubleHIW(double* addr); + inline int32_t GetDoubleLOW(double* addr); + // Set Double Higher / Lower word. + inline int32_t SetDoubleHIW(double* addr); + inline int32_t SetDoubleLOW(double* addr); + + // Executing is handled based on the instruction type. + void DecodeTypeRegister(Instruction* instr); + + // Helper function for DecodeTypeRegister. + void ConfigureTypeRegister(Instruction* instr, + int64_t* alu_out, + int64_t* i64hilo, + uint64_t* u64hilo, + int64_t* next_pc, + int64_t* return_addr_reg, + bool* do_interrupt, + int64_t* result128H, + int64_t* result128L); + + void DecodeTypeImmediate(Instruction* instr); + void DecodeTypeJump(Instruction* instr); + + // Used for breakpoints and traps. + void SoftwareInterrupt(Instruction* instr); + + // Stop helper functions. + bool IsWatchpoint(uint64_t code); + void PrintWatchpoint(uint64_t code); + void HandleStop(uint64_t code, Instruction* instr); + bool IsStopInstruction(Instruction* instr); + bool IsEnabledStop(uint64_t code); + void EnableStop(uint64_t code); + void DisableStop(uint64_t code); + void IncreaseStopCounter(uint64_t code); + void PrintStopInfo(uint64_t code); + + + // Executes one instruction. + void InstructionDecode(Instruction* instr); + // Execute one instruction placed in a branch delay slot. + void BranchDelayInstructionDecode(Instruction* instr) { + if (instr->InstructionBits() == nopInstr) { + // Short-cut generic nop instructions. They are always valid and they + // never change the simulator state. + return; + } + + if (instr->IsForbiddenInBranchDelay()) { + V8_Fatal(__FILE__, __LINE__, + "Eror:Unexpected %i opcode in a branch delay slot.", + instr->OpcodeValue()); + } + InstructionDecode(instr); + } + + // ICache. + static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr); + static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start, + int size); + static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page); + + enum Exception { + none, + kIntegerOverflow, + kIntegerUnderflow, + kDivideByZero, + kNumExceptions + }; + int16_t exceptions[kNumExceptions]; + + // Exceptions. + void SignalExceptions(); + + // Runtime call support. + static void* RedirectExternalReference(void* external_function, + ExternalReference::Type type); + + // Handle arguments and return value for runtime FP functions. + void GetFpArgs(double* x, double* y, int32_t* z); + void SetFpResult(const double& result); + + void CallInternal(byte* entry); + + // Architecture state. + // Registers. + int64_t registers_[kNumSimuRegisters]; + // Coprocessor Registers. + int64_t FPUregisters_[kNumFPURegisters]; + // FPU control register. + uint32_t FCSR_; + + // Simulator support. + // Allocate 1MB for stack. + size_t stack_size_; + char* stack_; + bool pc_modified_; + int64_t icount_; + int break_count_; + EmbeddedVector<char, 128> trace_buf_; + + // Debugger input. + char* last_debugger_input_; + + // Icache simulation. + v8::internal::HashMap* i_cache_; + + v8::internal::Isolate* isolate_; + + // Registered breakpoints. + Instruction* break_pc_; + Instr break_instr_; + + // Stop is disabled if bit 31 is set. + static const uint32_t kStopDisabledBit = 1 << 31; + + // A stop is enabled, meaning the simulator will stop when meeting the + // instruction, if bit 31 of watched_stops_[code].count is unset. + // The value watched_stops_[code].count & ~(1 << 31) indicates how many times + // the breakpoint was hit or gone through. + struct StopCountAndDesc { + uint32_t count; + char* desc; + }; + StopCountAndDesc watched_stops_[kMaxStopCode + 1]; +}; + + +// When running with the simulator transition into simulated execution at this +// point. +#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ + reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \ + FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) + +#ifdef MIPS_ABI_N64 +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ + Simulator::current(Isolate::Current())->Call( \ + entry, 10, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8) +#else // Must be O32 Abi. +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ + Simulator::current(Isolate::Current())->Call( \ + entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) +#endif // MIPS_ABI_N64 + + +// The simulator has its own stack. Thus it has a different stack limit from +// the C-based native code. Setting the c_limit to indicate a very small +// stack cause stack overflow errors, since the simulator ignores the input. +// This is unlikely to be an issue in practice, though it might cause testing +// trouble down the line. +class SimulatorStack : public v8::internal::AllStatic { + public: + static inline uintptr_t JsLimitFromCLimit(Isolate* isolate, + uintptr_t c_limit) { + return Simulator::current(isolate)->StackLimit(); + } + + static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { + Simulator* sim = Simulator::current(Isolate::Current()); + return sim->PushAddress(try_catch_address); + } + + static inline void UnregisterCTryCatch() { + Simulator::current(Isolate::Current())->PopAddress(); + } +}; + +} } // namespace v8::internal + +#endif // !defined(USE_SIMULATOR) +#endif // V8_MIPS_SIMULATOR_MIPS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/mips64/stub-cache-mips64.cc nodejs-0.11.15/deps/v8/src/mips64/stub-cache-mips64.cc --- nodejs-0.11.13/deps/v8/src/mips64/stub-cache-mips64.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mips64/stub-cache-mips64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1191 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_MIPS64 + +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) + + +static void ProbeTable(Isolate* isolate, + MacroAssembler* masm, + Code::Flags flags, + StubCache::Table table, + Register receiver, + Register name, + // Number of the cache entry, not scaled. + Register offset, + Register scratch, + Register scratch2, + Register offset_scratch) { + ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); + ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); + ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); + + uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address()); + uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address()); + uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address()); + + // Check the relative positions of the address fields. + DCHECK(value_off_addr > key_off_addr); + DCHECK((value_off_addr - key_off_addr) % 4 == 0); + DCHECK((value_off_addr - key_off_addr) < (256 * 4)); + DCHECK(map_off_addr > key_off_addr); + DCHECK((map_off_addr - key_off_addr) % 4 == 0); + DCHECK((map_off_addr - key_off_addr) < (256 * 4)); + + Label miss; + Register base_addr = scratch; + scratch = no_reg; + + // Multiply by 3 because there are 3 fields per entry (name, code, map). + __ dsll(offset_scratch, offset, 1); + __ Daddu(offset_scratch, offset_scratch, offset); + + // Calculate the base address of the entry. + __ li(base_addr, Operand(key_offset)); + __ dsll(at, offset_scratch, kPointerSizeLog2); + __ Daddu(base_addr, base_addr, at); + + // Check that the key in the entry matches the name. + __ ld(at, MemOperand(base_addr, 0)); + __ Branch(&miss, ne, name, Operand(at)); + + // Check the map matches. + __ ld(at, MemOperand(base_addr, map_off_addr - key_off_addr)); + __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ Branch(&miss, ne, at, Operand(scratch2)); + + // Get the code entry from the cache. + Register code = scratch2; + scratch2 = no_reg; + __ ld(code, MemOperand(base_addr, value_off_addr - key_off_addr)); + + // Check that the flags match what we're looking for. + Register flags_reg = base_addr; + base_addr = no_reg; + __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); + __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup)); + __ Branch(&miss, ne, flags_reg, Operand(flags)); + +#ifdef DEBUG + if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { + __ jmp(&miss); + } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { + __ jmp(&miss); + } +#endif + + // Jump to the first instruction in the code stub. + __ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(at); + + // Miss: fall through. + __ bind(&miss); +} + + +void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( + MacroAssembler* masm, Label* miss_label, Register receiver, + Handle<Name> name, Register scratch0, Register scratch1) { + DCHECK(name->IsUniqueName()); + DCHECK(!receiver.is(scratch0)); + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); + __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); + + Label done; + + const int kInterceptorOrAccessCheckNeededMask = + (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); + + // Bail out if the receiver has a named interceptor or requires access checks. + Register map = scratch1; + __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); + __ Branch(miss_label, ne, scratch0, Operand(zero_reg)); + + // Check that receiver is a JSObject. + __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); + __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); + + // Load properties array. + Register properties = scratch0; + __ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + // Check that the properties array is a dictionary. + __ ld(map, FieldMemOperand(properties, HeapObject::kMapOffset)); + Register tmp = properties; + __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); + __ Branch(miss_label, ne, map, Operand(tmp)); + + // Restore the temporarily used register. + __ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + + + NameDictionaryLookupStub::GenerateNegativeLookup(masm, + miss_label, + &done, + receiver, + properties, + name, + scratch1); + __ bind(&done); + __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); +} + + +void StubCache::GenerateProbe(MacroAssembler* masm, + Code::Flags flags, + Register receiver, + Register name, + Register scratch, + Register extra, + Register extra2, + Register extra3) { + Isolate* isolate = masm->isolate(); + Label miss; + + // Make sure that code is valid. The multiplying code relies on the + // entry size being 12. + // DCHECK(sizeof(Entry) == 12); + // DCHECK(sizeof(Entry) == 3 * kPointerSize); + + // Make sure the flags does not name a specific type. + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); + + // Make sure that there are no register conflicts. + DCHECK(!scratch.is(receiver)); + DCHECK(!scratch.is(name)); + DCHECK(!extra.is(receiver)); + DCHECK(!extra.is(name)); + DCHECK(!extra.is(scratch)); + DCHECK(!extra2.is(receiver)); + DCHECK(!extra2.is(name)); + DCHECK(!extra2.is(scratch)); + DCHECK(!extra2.is(extra)); + + // Check register validity. + DCHECK(!scratch.is(no_reg)); + DCHECK(!extra.is(no_reg)); + DCHECK(!extra2.is(no_reg)); + DCHECK(!extra3.is(no_reg)); + + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, + extra2, extra3); + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &miss); + + // Get the map of the receiver and compute the hash. + __ ld(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); + __ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ Daddu(scratch, scratch, at); + uint64_t mask = kPrimaryTableSize - 1; + // We shift out the last two bits because they are not part of the hash and + // they are always 01 for maps. + __ dsrl(scratch, scratch, kCacheIndexShift); + __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); + __ And(scratch, scratch, Operand(mask)); + + // Probe the primary table. + ProbeTable(isolate, + masm, + flags, + kPrimary, + receiver, + name, + scratch, + extra, + extra2, + extra3); + + // Primary miss: Compute hash for secondary probe. + __ dsrl(at, name, kCacheIndexShift); + __ Dsubu(scratch, scratch, at); + uint64_t mask2 = kSecondaryTableSize - 1; + __ Daddu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); + __ And(scratch, scratch, Operand(mask2)); + + // Probe the secondary table. + ProbeTable(isolate, + masm, + flags, + kSecondary, + receiver, + name, + scratch, + extra, + extra2, + extra3); + + // Cache miss: Fall-through and let caller handle the miss by + // entering the runtime system. + __ bind(&miss); + __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, + extra2, extra3); +} + + +void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( + MacroAssembler* masm, int index, Register prototype, Label* miss) { + Isolate* isolate = masm->isolate(); + // Get the global function with the given index. + Handle<JSFunction> function( + JSFunction::cast(isolate->native_context()->get(index))); + + // Check we're still in the same context. + Register scratch = prototype; + const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); + __ ld(scratch, MemOperand(cp, offset)); + __ ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); + __ ld(scratch, MemOperand(scratch, Context::SlotOffset(index))); + __ li(at, function); + __ Branch(miss, ne, at, Operand(scratch)); + + // Load its initial map. The global functions all have initial maps. + __ li(prototype, Handle<Map>(function->initial_map())); + // Load the prototype from the initial map. + __ ld(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); +} + + +void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype( + MacroAssembler* masm, Register receiver, Register scratch1, + Register scratch2, Label* miss_label) { + __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, scratch1); +} + + +void PropertyHandlerCompiler::GenerateCheckPropertyCell( + MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name, + Register scratch, Label* miss) { + Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name); + DCHECK(cell->value()->IsTheHole()); + __ li(scratch, Operand(cell)); + __ ld(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + __ Branch(miss, ne, scratch, Operand(at)); +} + + +static void PushInterceptorArguments(MacroAssembler* masm, + Register receiver, + Register holder, + Register name, + Handle<JSObject> holder_obj) { + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4); + __ push(name); + Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); + DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor)); + Register scratch = name; + __ li(scratch, Operand(interceptor)); + __ Push(scratch, receiver, holder); +} + + +static void CompileCallLoadPropertyWithInterceptor( + MacroAssembler* masm, + Register receiver, + Register holder, + Register name, + Handle<JSObject> holder_obj, + IC::UtilityId id) { + PushInterceptorArguments(masm, receiver, holder, name, holder_obj); + __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()), + NamedLoadHandlerCompiler::kInterceptorArgsLength); +} + + +// Generate call to api function. +void PropertyHandlerCompiler::GenerateFastApiCall( + MacroAssembler* masm, const CallOptimization& optimization, + Handle<Map> receiver_map, Register receiver, Register scratch_in, + bool is_store, int argc, Register* values) { + DCHECK(!receiver.is(scratch_in)); + // Preparing to push, adjust sp. + __ Dsubu(sp, sp, Operand((argc + 1) * kPointerSize)); + __ sd(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver. + // Write the arguments to stack frame. + for (int i = 0; i < argc; i++) { + Register arg = values[argc-1-i]; + DCHECK(!receiver.is(arg)); + DCHECK(!scratch_in.is(arg)); + __ sd(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg. + } + DCHECK(optimization.is_simple_api_call()); + + // Abi for CallApiFunctionStub. + Register callee = a0; + Register call_data = a4; + Register holder = a2; + Register api_function_address = a1; + + // Put holder in place. + CallOptimization::HolderLookup holder_lookup; + Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType( + receiver_map, + &holder_lookup); + switch (holder_lookup) { + case CallOptimization::kHolderIsReceiver: + __ Move(holder, receiver); + break; + case CallOptimization::kHolderFound: + __ li(holder, api_holder); + break; + case CallOptimization::kHolderNotFound: + UNREACHABLE(); + break; + } + + Isolate* isolate = masm->isolate(); + Handle<JSFunction> function = optimization.constant_function(); + Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); + Handle<Object> call_data_obj(api_call_info->data(), isolate); + + // Put callee in place. + __ li(callee, function); + + bool call_data_undefined = false; + // Put call_data in place. + if (isolate->heap()->InNewSpace(*call_data_obj)) { + __ li(call_data, api_call_info); + __ ld(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); + } else if (call_data_obj->IsUndefined()) { + call_data_undefined = true; + __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); + } else { + __ li(call_data, call_data_obj); + } + // Put api_function_address in place. + Address function_address = v8::ToCData<Address>(api_call_info->callback()); + ApiFunction fun(function_address); + ExternalReference::Type type = ExternalReference::DIRECT_API_CALL; + ExternalReference ref = + ExternalReference(&fun, + type, + masm->isolate()); + __ li(api_function_address, Operand(ref)); + + // Jump to stub. + CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc); + __ TailCallStub(&stub); +} + + +void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm, + Handle<Code> code) { + __ Jump(code, RelocInfo::CODE_TARGET); +} + + +#undef __ +#define __ ACCESS_MASM(masm()) + + +void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ bind(label); + __ li(this->name(), Operand(name)); + } +} + + +// Generate StoreTransition code, value is passed in a0 register. +// After executing generated code, the receiver_reg and name_reg +// may be clobbered. +void NamedStoreHandlerCompiler::GenerateStoreTransition( + Handle<Map> transition, Handle<Name> name, Register receiver_reg, + Register storage_reg, Register value_reg, Register scratch1, + Register scratch2, Register scratch3, Label* miss_label, Label* slow) { + // a0 : value. + Label exit; + + int descriptor = transition->LastAdded(); + DescriptorArray* descriptors = transition->instance_descriptors(); + PropertyDetails details = descriptors->GetDetails(descriptor); + Representation representation = details.representation(); + DCHECK(!representation.IsNone()); + + if (details.type() == CONSTANT) { + Handle<Object> constant(descriptors->GetValue(descriptor), isolate()); + __ li(scratch1, constant); + __ Branch(miss_label, ne, value_reg, Operand(scratch1)); + } else if (representation.IsSmi()) { + __ JumpIfNotSmi(value_reg, miss_label); + } else if (representation.IsHeapObject()) { + __ JumpIfSmi(value_reg, miss_label); + HeapType* field_type = descriptors->GetFieldType(descriptor); + HeapType::Iterator<Map> it = field_type->Classes(); + Handle<Map> current; + if (!it.Done()) { + __ ld(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); + Label do_store; + while (true) { + // Do the CompareMap() directly within the Branch() functions. + current = it.Current(); + it.Advance(); + if (it.Done()) { + __ Branch(miss_label, ne, scratch1, Operand(current)); + break; + } + __ Branch(&do_store, eq, scratch1, Operand(current)); + } + __ bind(&do_store); + } + } else if (representation.IsDouble()) { + Label do_store, heap_number; + __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex); + __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow, + TAG_RESULT, MUTABLE); + + __ JumpIfNotSmi(value_reg, &heap_number); + __ SmiUntag(scratch1, value_reg); + __ mtc1(scratch1, f6); + __ cvt_d_w(f4, f6); + __ jmp(&do_store); + + __ bind(&heap_number); + __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, + miss_label, DONT_DO_SMI_CHECK); + __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); + + __ bind(&do_store); + __ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); + } + + // Stub never generated for objects that require access checks. + DCHECK(!transition->is_access_check_needed()); + + // Perform map transition for the receiver if necessary. + if (details.type() == FIELD && + Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) { + // The properties must be extended before we can store the value. + // We jump to a runtime call that extends the properties array. + __ push(receiver_reg); + __ li(a2, Operand(transition)); + __ Push(a2, a0); + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), + isolate()), + 3, 1); + return; + } + + // Update the map of the object. + __ li(scratch1, Operand(transition)); + __ sd(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); + + // Update the write barrier for the map field. + __ RecordWriteField(receiver_reg, + HeapObject::kMapOffset, + scratch1, + scratch2, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + + if (details.type() == CONSTANT) { + DCHECK(value_reg.is(a0)); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a0); + return; + } + + int index = transition->instance_descriptors()->GetFieldIndex( + transition->LastAdded()); + + // Adjust for the number of properties stored in the object. Even in the + // face of a transition we can use the old map here because the size of the + // object and the number of in-object properties is not going to change. + index -= transition->inobject_properties(); + + // TODO(verwaest): Share this code as a code stub. + SmiCheck smi_check = representation.IsTagged() + ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; + if (index < 0) { + // Set the property straight into the object. + int offset = transition->instance_size() + (index * kPointerSize); + if (representation.IsDouble()) { + __ sd(storage_reg, FieldMemOperand(receiver_reg, offset)); + } else { + __ sd(value_reg, FieldMemOperand(receiver_reg, offset)); + } + + if (!representation.IsSmi()) { + // Update the write barrier for the array address. + if (!representation.IsDouble()) { + __ mov(storage_reg, value_reg); + } + __ RecordWriteField(receiver_reg, + offset, + storage_reg, + scratch1, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + smi_check); + } + } else { + // Write to the properties array. + int offset = index * kPointerSize + FixedArray::kHeaderSize; + // Get the properties array + __ ld(scratch1, + FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); + if (representation.IsDouble()) { + __ sd(storage_reg, FieldMemOperand(scratch1, offset)); + } else { + __ sd(value_reg, FieldMemOperand(scratch1, offset)); + } + + if (!representation.IsSmi()) { + // Update the write barrier for the array address. + if (!representation.IsDouble()) { + __ mov(storage_reg, value_reg); + } + __ RecordWriteField(scratch1, + offset, + storage_reg, + receiver_reg, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + smi_check); + } + } + + // Return the value (register v0). + DCHECK(value_reg.is(a0)); + __ bind(&exit); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, a0); +} + + +void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup, + Register value_reg, + Label* miss_label) { + DCHECK(lookup->representation().IsHeapObject()); + __ JumpIfSmi(value_reg, miss_label); + HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes(); + __ ld(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset)); + Label do_store; + Handle<Map> current; + while (true) { + // Do the CompareMap() directly within the Branch() functions. + current = it.Current(); + it.Advance(); + if (it.Done()) { + __ Branch(miss_label, ne, scratch1(), Operand(current)); + break; + } + __ Branch(&do_store, eq, scratch1(), Operand(current)); + } + __ bind(&do_store); + + StoreFieldStub stub(isolate(), lookup->GetFieldIndex(), + lookup->representation()); + GenerateTailCall(masm(), stub.GetCode()); +} + + +Register PropertyHandlerCompiler::CheckPrototypes( + Register object_reg, Register holder_reg, Register scratch1, + Register scratch2, Handle<Name> name, Label* miss, + PrototypeCheckType check) { + Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate())); + + // Make sure there's no overlap between holder and object registers. + DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); + DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) + && !scratch2.is(scratch1)); + + // Keep track of the current object in register reg. + Register reg = object_reg; + int depth = 0; + + Handle<JSObject> current = Handle<JSObject>::null(); + if (type()->IsConstant()) { + current = Handle<JSObject>::cast(type()->AsConstant()->Value()); + } + Handle<JSObject> prototype = Handle<JSObject>::null(); + Handle<Map> current_map = receiver_map; + Handle<Map> holder_map(holder()->map()); + // Traverse the prototype chain and check the maps in the prototype chain for + // fast and global objects or do negative lookup for normal objects. + while (!current_map.is_identical_to(holder_map)) { + ++depth; + + // Only global objects and objects that do not require access + // checks are allowed in stubs. + DCHECK(current_map->IsJSGlobalProxyMap() || + !current_map->is_access_check_needed()); + + prototype = handle(JSObject::cast(current_map->prototype())); + if (current_map->is_dictionary_map() && + !current_map->IsJSGlobalObjectMap()) { + DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast. + if (!name->IsUniqueName()) { + DCHECK(name->IsString()); + name = factory()->InternalizeString(Handle<String>::cast(name)); + } + DCHECK(current.is_null() || + current->property_dictionary()->FindEntry(name) == + NameDictionary::kNotFound); + + GenerateDictionaryNegativeLookup(masm(), miss, reg, name, + scratch1, scratch2); + + __ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); + reg = holder_reg; // From now on the object will be in holder_reg. + __ ld(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); + } else { + // Two possible reasons for loading the prototype from the map: + // (1) Can't store references to new space in code. + // (2) Handler is shared for all receivers with the same prototype + // map (but not necessarily the same prototype instance). + bool load_prototype_from_map = + heap()->InNewSpace(*prototype) || depth == 1; + Register map_reg = scratch1; + if (depth != 1 || check == CHECK_ALL_MAPS) { + // CheckMap implicitly loads the map of |reg| into |map_reg|. + __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK); + } else { + __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); + } + + // Check access rights to the global object. This has to happen after + // the map check so that we know that the object is actually a global + // object. + // This allows us to install generated handlers for accesses to the + // global proxy (as opposed to using slow ICs). See corresponding code + // in LookupForRead(). + if (current_map->IsJSGlobalProxyMap()) { + __ CheckAccessGlobalProxy(reg, scratch2, miss); + } else if (current_map->IsJSGlobalObjectMap()) { + GenerateCheckPropertyCell( + masm(), Handle<JSGlobalObject>::cast(current), name, + scratch2, miss); + } + + reg = holder_reg; // From now on the object will be in holder_reg. + + if (load_prototype_from_map) { + __ ld(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); + } else { + __ li(reg, Operand(prototype)); + } + } + + // Go to the next object in the prototype chain. + current = prototype; + current_map = handle(current->map()); + } + + // Log the check depth. + LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); + + if (depth != 0 || check == CHECK_ALL_MAPS) { + // Check the holder map. + __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK); + } + + // Perform security check for access to the global object. + DCHECK(current_map->IsJSGlobalProxyMap() || + !current_map->is_access_check_needed()); + if (current_map->IsJSGlobalProxyMap()) { + __ CheckAccessGlobalProxy(reg, scratch1, miss); + } + + // Return the register containing the holder. + return reg; +} + + +void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { + if (!miss->is_unused()) { + Label success; + __ Branch(&success); + __ bind(miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + __ bind(&success); + } +} + + +void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { + if (!miss->is_unused()) { + Label success; + __ Branch(&success); + GenerateRestoreName(miss, name); + TailCallBuiltin(masm(), MissBuiltin(kind())); + __ bind(&success); + } +} + + +void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) { + // Return the constant value. + __ li(v0, value); + __ Ret(); +} + + +void NamedLoadHandlerCompiler::GenerateLoadCallback( + Register reg, Handle<ExecutableAccessorInfo> callback) { + // Build AccessorInfo::args_ list on the stack and push property name below + // the exit frame to make GC aware of them and store pointers to them. + STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); + STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); + STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); + STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); + STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); + DCHECK(!scratch2().is(reg)); + DCHECK(!scratch3().is(reg)); + DCHECK(!scratch4().is(reg)); + __ push(receiver()); + if (heap()->InNewSpace(callback->data())) { + __ li(scratch3(), callback); + __ ld(scratch3(), FieldMemOperand(scratch3(), + ExecutableAccessorInfo::kDataOffset)); + } else { + __ li(scratch3(), Handle<Object>(callback->data(), isolate())); + } + __ Dsubu(sp, sp, 6 * kPointerSize); + __ sd(scratch3(), MemOperand(sp, 5 * kPointerSize)); + __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); + __ sd(scratch3(), MemOperand(sp, 4 * kPointerSize)); + __ sd(scratch3(), MemOperand(sp, 3 * kPointerSize)); + __ li(scratch4(), + Operand(ExternalReference::isolate_address(isolate()))); + __ sd(scratch4(), MemOperand(sp, 2 * kPointerSize)); + __ sd(reg, MemOperand(sp, 1 * kPointerSize)); + __ sd(name(), MemOperand(sp, 0 * kPointerSize)); + __ Daddu(scratch2(), sp, 1 * kPointerSize); + + __ mov(a2, scratch2()); // Saved in case scratch2 == a1. + // Abi for CallApiGetter. + Register getter_address_reg = a2; + + Address getter_address = v8::ToCData<Address>(callback->getter()); + ApiFunction fun(getter_address); + ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL; + ExternalReference ref = ExternalReference(&fun, type, isolate()); + __ li(getter_address_reg, Operand(ref)); + + CallApiGetterStub stub(isolate()); + __ TailCallStub(&stub); +} + + +void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg, + LookupResult* lookup, + Handle<Name> name) { + DCHECK(holder()->HasNamedInterceptor()); + DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined()); + + // So far the most popular follow ups for interceptor loads are FIELD + // and CALLBACKS, so inline only them, other cases may be added + // later. + bool compile_followup_inline = false; + if (lookup->IsFound() && lookup->IsCacheable()) { + if (lookup->IsField()) { + compile_followup_inline = true; + } else if (lookup->type() == CALLBACKS && + lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { + Handle<ExecutableAccessorInfo> callback( + ExecutableAccessorInfo::cast(lookup->GetCallbackObject())); + compile_followup_inline = + callback->getter() != NULL && + ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback, + type()); + } + } + + if (compile_followup_inline) { + // Compile the interceptor call, followed by inline code to load the + // property from further up the prototype chain if the call fails. + // Check that the maps haven't changed. + DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1())); + + // Preserve the receiver register explicitly whenever it is different from + // the holder and it is needed should the interceptor return without any + // result. The CALLBACKS case needs the receiver to be passed into C++ code, + // the FIELD case might cause a miss during the prototype check. + bool must_perfrom_prototype_check = *holder() != lookup->holder(); + bool must_preserve_receiver_reg = !receiver().is(holder_reg) && + (lookup->type() == CALLBACKS || must_perfrom_prototype_check); + + // Save necessary data before invoking an interceptor. + // Requires a frame to make GC aware of pushed pointers. + { + FrameScope frame_scope(masm(), StackFrame::INTERNAL); + if (must_preserve_receiver_reg) { + __ Push(receiver(), holder_reg, this->name()); + } else { + __ Push(holder_reg, this->name()); + } + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method). + CompileCallLoadPropertyWithInterceptor( + masm(), receiver(), holder_reg, this->name(), holder(), + IC::kLoadPropertyWithInterceptorOnly); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex); + __ Branch(&interceptor_failed, eq, v0, Operand(scratch1())); + frame_scope.GenerateLeaveFrame(); + __ Ret(); + + __ bind(&interceptor_failed); + __ pop(this->name()); + __ pop(holder_reg); + if (must_preserve_receiver_reg) { + __ pop(receiver()); + } + // Leave the internal frame. + } + GenerateLoadPostInterceptor(holder_reg, name, lookup); + } else { // !compile_followup_inline + // Call the runtime system to load the interceptor. + // Check that the maps haven't changed. + PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), + holder()); + + ExternalReference ref = ExternalReference( + IC_Utility(IC::kLoadPropertyWithInterceptor), isolate()); + __ TailCallExternalReference( + ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); + } +} + + +Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( + Handle<JSObject> object, Handle<Name> name, + Handle<ExecutableAccessorInfo> callback) { + Register holder_reg = Frontend(receiver(), name); + + __ Push(receiver(), holder_reg); // Receiver. + __ li(at, Operand(callback)); // Callback info. + __ push(at); + __ li(at, Operand(name)); + __ Push(at, value()); + + // Do tail-call to the runtime system. + ExternalReference store_callback_property = + ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate()); + __ TailCallExternalReference(store_callback_property, 5, 1); + + // Return the generated code. + return GetCode(kind(), Code::FAST, name); +} + + +#undef __ +#define __ ACCESS_MASM(masm) + + +void NamedStoreHandlerCompiler::GenerateStoreViaSetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, + Handle<JSFunction> setter) { + // ----------- S t a t e ------------- + // -- ra : return address + // ----------------------------------- + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Save value register, so we can restore it later. + __ push(value()); + + if (!setter.is_null()) { + // Call the JavaScript setter with receiver and value on the stack. + if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { + // Swap in the global receiver. + __ ld(receiver, + FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); + } + __ Push(receiver, value()); + ParameterCount actual(1); + ParameterCount expected(setter); + __ InvokeFunction(setter, expected, actual, + CALL_FUNCTION, NullCallWrapper()); + } else { + // If we generate a global code snippet for deoptimization only, remember + // the place to continue after deoptimization. + masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); + } + + // We have to return the passed value, not the return value of the setter. + __ pop(v0); + + // Restore context register. + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } + __ Ret(); +} + + +#undef __ +#define __ ACCESS_MASM(masm()) + + +Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( + Handle<Name> name) { + __ Push(receiver(), this->name(), value()); + + // Do tail-call to the runtime system. + ExternalReference store_ic_property = ExternalReference( + IC_Utility(IC::kStorePropertyWithInterceptor), isolate()); + __ TailCallExternalReference(store_ic_property, 3, 1); + + // Return the generated code. + return GetCode(kind(), Code::FAST, name); +} + + +Register* PropertyAccessCompiler::load_calling_convention() { + // receiver, name, scratch1, scratch2, scratch3, scratch4. + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + static Register registers[] = { receiver, name, a3, a0, a4, a5 }; + return registers; +} + + +Register* PropertyAccessCompiler::store_calling_convention() { + // receiver, name, scratch1, scratch2, scratch3. + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + DCHECK(a3.is(KeyedStoreIC::MapRegister())); + static Register registers[] = { receiver, name, a3, a4, a5 }; + return registers; +} + + +Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); } + + +#undef __ +#define __ ACCESS_MASM(masm) + + +void NamedLoadHandlerCompiler::GenerateLoadViaGetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, + Handle<JSFunction> getter) { + // ----------- S t a t e ------------- + // -- a0 : receiver + // -- a2 : name + // -- ra : return address + // ----------------------------------- + { + FrameScope scope(masm, StackFrame::INTERNAL); + + if (!getter.is_null()) { + // Call the JavaScript getter with the receiver on the stack. + if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { + // Swap in the global receiver. + __ ld(receiver, + FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); + } + __ push(receiver); + ParameterCount actual(0); + ParameterCount expected(getter); + __ InvokeFunction(getter, expected, actual, + CALL_FUNCTION, NullCallWrapper()); + } else { + // If we generate a global code snippet for deoptimization only, remember + // the place to continue after deoptimization. + masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); + } + + // Restore context register. + __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } + __ Ret(); +} + + +#undef __ +#define __ ACCESS_MASM(masm()) + + +Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal( + Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) { + Label miss; + + FrontendHeader(receiver(), name, &miss); + + // Get the value from the cell. + Register result = StoreIC::ValueRegister(); + __ li(result, Operand(cell)); + __ ld(result, FieldMemOperand(result, Cell::kValueOffset)); + + // Check for deleted property if property can actually be deleted. + if (is_configurable) { + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + __ Branch(&miss, eq, result, Operand(at)); + } + + Counters* counters = isolate()->counters(); + __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, result); + + FrontendFooter(name, &miss); + + // Return the generated code. + return GetCode(kind(), Code::NORMAL, name); +} + + +Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { + Label miss; + + if (check == PROPERTY && + (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { + // In case we are compiling an IC for dictionary loads and stores, just + // check whether the name is unique. + if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { + __ JumpIfNotUniqueName(this->name(), &miss); + } else { + __ Branch(&miss, ne, this->name(), Operand(name)); + } + } + + Label number_case; + Register match = scratch2(); + Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; + __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi. + + // Polymorphic keyed stores may use the map register + Register map_reg = scratch1(); + DCHECK(kind() != Code::KEYED_STORE_IC || + map_reg.is(KeyedStoreIC::MapRegister())); + + int receiver_count = types->length(); + int number_of_handled_maps = 0; + __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); + for (int current = 0; current < receiver_count; ++current) { + Handle<HeapType> type = types->at(current); + Handle<Map> map = IC::TypeToMap(*type, isolate()); + if (!map->is_deprecated()) { + number_of_handled_maps++; + // Check map and tail call if there's a match. + // Separate compare from branch, to provide path for above JumpIfSmi(). + __ Dsubu(match, map_reg, Operand(map)); + if (type->Is(HeapType::Number())) { + DCHECK(!number_case.is_unused()); + __ bind(&number_case); + } + __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, + eq, match, Operand(zero_reg)); + } + } + DCHECK(number_of_handled_maps != 0); + + __ bind(&miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + + // Return the generated code. + InlineCacheState state = + number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; + return GetCode(kind(), type, name, state); +} + + +Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( + MapHandleList* receiver_maps, CodeHandleList* handler_stubs, + MapHandleList* transitioned_maps) { + Label miss; + __ JumpIfSmi(receiver(), &miss); + + int receiver_count = receiver_maps->length(); + __ ld(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); + for (int i = 0; i < receiver_count; ++i) { + if (transitioned_maps->at(i).is_null()) { + __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, + scratch1(), Operand(receiver_maps->at(i))); + } else { + Label next_map; + __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i))); + __ li(transition_map(), Operand(transitioned_maps->at(i))); + __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); + __ bind(&next_map); + } + } + + __ bind(&miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + + // Return the generated code. + return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); +} + + +#undef __ +#define __ ACCESS_MASM(masm) + + +void ElementHandlerCompiler::GenerateLoadDictionaryElement( + MacroAssembler* masm) { + // The return address is in ra + Label slow, miss; + + Register key = LoadIC::NameRegister(); + Register receiver = LoadIC::ReceiverRegister(); + DCHECK(receiver.is(a1)); + DCHECK(key.is(a2)); + + __ UntagAndJumpIfNotSmi(a6, key, &miss); + __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset)); + DCHECK(kSmiTagSize + kSmiShiftSize == 32); + __ LoadFromNumberDictionary(&slow, a4, key, v0, a6, a3, a5); + __ Ret(); + + // Slow case, key and receiver still unmodified. + __ bind(&slow); + __ IncrementCounter( + masm->isolate()->counters()->keyed_load_external_array_slow(), + 1, a2, a3); + + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); + + // Miss case, call the runtime. + __ bind(&miss); + + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_MIPS64 diff -Nru nodejs-0.11.13/deps/v8/src/mirror-debugger.js nodejs-0.11.15/deps/v8/src/mirror-debugger.js --- nodejs-0.11.13/deps/v8/src/mirror-debugger.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mirror-debugger.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2006-2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Handle id counters. var next_handle_ = 0; @@ -31,17 +8,29 @@ // Mirror cache. var mirror_cache_ = []; +var mirror_cache_enabled_ = true; -/** - * Clear the mirror handle cache. - */ -function ClearMirrorCache() { +function ToggleMirrorCache(value) { + mirror_cache_enabled_ = value; next_handle_ = 0; mirror_cache_ = []; } +// Wrapper to check whether an object is a Promise. The call may not work +// if promises are not enabled. +// TODO(yangguo): remove try-catch once promises are enabled by default. +function ObjectIsPromise(value) { + try { + return IS_SPEC_OBJECT(value) && + !IS_UNDEFINED(%DebugGetProperty(value, builtins.promiseStatus)); + } catch (e) { + return false; + } +} + + /** * Returns the mirror for a specified value or object. * @@ -54,7 +43,7 @@ var mirror; // Look for non transient mirrors in the mirror cache. - if (!opt_transient) { + if (!opt_transient && mirror_cache_enabled_) { for (id in mirror_cache_) { mirror = mirror_cache_[id]; if (mirror.value() === value) { @@ -78,6 +67,8 @@ mirror = new NumberMirror(value); } else if (IS_STRING(value)) { mirror = new StringMirror(value); + } else if (IS_SYMBOL(value)) { + mirror = new SymbolMirror(value); } else if (IS_ARRAY(value)) { mirror = new ArrayMirror(value); } else if (IS_DATE(value)) { @@ -90,11 +81,17 @@ mirror = new ErrorMirror(value); } else if (IS_SCRIPT(value)) { mirror = new ScriptMirror(value); + } else if (IS_MAP(value) || IS_WEAKMAP(value)) { + mirror = new MapMirror(value); + } else if (IS_SET(value) || IS_WEAKSET(value)) { + mirror = new SetMirror(value); + } else if (ObjectIsPromise(value)) { + mirror = new PromiseMirror(value); } else { mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient); } - mirror_cache_[mirror.handle()] = mirror; + if (mirror_cache_enabled_) mirror_cache_[mirror.handle()] = mirror; return mirror; } @@ -107,6 +104,7 @@ * undefined if no mirror with the requested handle was found */ function LookupMirror(handle) { + if (!mirror_cache_enabled_) throw new Error("Mirror cache is disabled"); return mirror_cache_[handle]; } @@ -149,6 +147,7 @@ var BOOLEAN_TYPE = 'boolean'; var NUMBER_TYPE = 'number'; var STRING_TYPE = 'string'; +var SYMBOL_TYPE = 'symbol'; var OBJECT_TYPE = 'object'; var FUNCTION_TYPE = 'function'; var REGEXP_TYPE = 'regexp'; @@ -159,6 +158,9 @@ var SCRIPT_TYPE = 'script'; var CONTEXT_TYPE = 'context'; var SCOPE_TYPE = 'scope'; +var PROMISE_TYPE = 'promise'; +var MAP_TYPE = 'map'; +var SET_TYPE = 'set'; // Maximum length when sending strings through the JSON protocol. var kMaxProtocolStringLength = 80; @@ -169,7 +171,7 @@ PropertyKind.Indexed = 2; -// A copy of the PropertyType enum from global.h +// A copy of the PropertyType enum from property-details.h var PropertyType = {}; PropertyType.Normal = 0; PropertyType.Field = 1; @@ -177,8 +179,7 @@ PropertyType.Callbacks = 3; PropertyType.Handler = 4; PropertyType.Interceptor = 5; -PropertyType.Transition = 6; -PropertyType.Nonexistent = 7; +PropertyType.Nonexistent = 6; // Different attributes for a property. @@ -205,6 +206,7 @@ // - NullMirror // - NumberMirror // - StringMirror +// - SymbolMirror // - ObjectMirror // - FunctionMirror // - UnresolvedFunctionMirror @@ -212,6 +214,9 @@ // - DateMirror // - RegExpMirror // - ErrorMirror +// - PromiseMirror +// - MapMirror +// - SetMirror // - PropertyMirror // - InternalPropertyMirror // - FrameMirror @@ -288,6 +293,15 @@ /** + * Check whether the mirror reflects a symbol. + * @returns {boolean} True if the mirror reflects a symbol + */ +Mirror.prototype.isSymbol = function() { + return this instanceof SymbolMirror; +}; + + +/** * Check whether the mirror reflects an object. * @returns {boolean} True if the mirror reflects an object */ @@ -351,6 +365,15 @@ /** + * Check whether the mirror reflects a promise. + * @returns {boolean} True if the mirror reflects a promise + */ +Mirror.prototype.isPromise = function() { + return this instanceof PromiseMirror; +}; + + +/** * Check whether the mirror reflects a property. * @returns {boolean} True if the mirror reflects a property */ @@ -405,10 +428,28 @@ /** + * Check whether the mirror reflects a map. + * @returns {boolean} True if the mirror reflects a map + */ +Mirror.prototype.isMap = function() { + return this instanceof MapMirror; +}; + + +/** + * Check whether the mirror reflects a set. + * @returns {boolean} True if the mirror reflects a set + */ +Mirror.prototype.isSet = function() { + return this instanceof SetMirror; +}; + + +/** * Allocate a handle id for this object. */ Mirror.prototype.allocateHandle_ = function() { - this.handle_ = next_handle_++; + if (mirror_cache_enabled_) this.handle_ = next_handle_++; }; @@ -463,7 +504,8 @@ type === 'null' || type === 'boolean' || type === 'number' || - type === 'string'; + type === 'string' || + type === 'symbol'; }; @@ -572,6 +614,28 @@ /** + * Mirror object for a Symbol + * @param {Object} value The Symbol + * @constructor + * @extends Mirror + */ +function SymbolMirror(value) { + %_CallFunction(this, SYMBOL_TYPE, value, ValueMirror); +} +inherits(SymbolMirror, ValueMirror); + + +SymbolMirror.prototype.description = function() { + return %SymbolDescription(%_ValueOf(this.value_)); +} + + +SymbolMirror.prototype.toText = function() { + return %_CallFunction(this.value_, builtins.SymbolToString); +} + + +/** * Mirror object for objects. * @param {object} value The object reflected by this mirror * @param {boolean} transient indicate whether this object is transient with a @@ -619,6 +683,19 @@ }; +// Get all own property names except for private symbols. +function TryGetPropertyNames(object) { + try { + // TODO(yangguo): Should there be a special debugger implementation of + // %GetOwnPropertyNames that doesn't perform access checks? + return %GetOwnPropertyNames(object, PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL); + } catch (e) { + // Might have hit a failed access check. + return []; + } +} + + /** * Return the property names for this object. * @param {number} kind Indicate whether named, indexed or both kinds of @@ -637,9 +714,7 @@ // Find all the named properties. if (kind & PropertyKind.Named) { - // Get all the local property names. - propertyNames = - %GetLocalPropertyNames(this.value_, PROPERTY_ATTRIBUTES_NONE); + propertyNames = TryGetPropertyNames(this.value_); total += propertyNames.length; // Get names for named interceptor properties if any. @@ -655,8 +730,8 @@ // Find all the indexed properties. if (kind & PropertyKind.Indexed) { - // Get the local element names. - elementNames = %GetLocalElementNames(this.value_); + // Get own element names. + elementNames = %GetOwnElementNames(this.value_); total += elementNames.length; // Get names for indexed interceptor properties. @@ -722,7 +797,7 @@ ObjectMirror.prototype.property = function(name) { - var details = %DebugGetPropertyDetails(this.value_, %ToString(name)); + var details = %DebugGetPropertyDetails(this.value_, %ToName(name)); if (details) { return new PropertyMirror(this, name, details); } @@ -796,7 +871,8 @@ /** * Return the internal properties of the value, such as [[PrimitiveValue]] of - * scalar wrapper objects and properties of the bound function. + * scalar wrapper objects, properties of the bound function and properties of + * the promise. * This method is done static to be accessible from Debug API with the bare * values without mirrors. * @return {Array} array (possibly empty) of InternalProperty instances @@ -820,6 +896,13 @@ result.push(new InternalPropertyMirror("[[BoundArgs]]", boundArgs)); } return result; + } else if (ObjectIsPromise(value)) { + var result = []; + result.push(new InternalPropertyMirror("[[PromiseStatus]]", + PromiseGetStatus_(value))); + result.push(new InternalPropertyMirror("[[PromiseValue]]", + PromiseGetValue_(value))); + return result; } return []; } @@ -1172,6 +1255,106 @@ /** + * Mirror object for a Promise object. + * @param {Object} value The Promise object + * @constructor + * @extends ObjectMirror + */ +function PromiseMirror(value) { + %_CallFunction(this, value, PROMISE_TYPE, ObjectMirror); +} +inherits(PromiseMirror, ObjectMirror); + + +function PromiseGetStatus_(value) { + var status = %DebugGetProperty(value, builtins.promiseStatus); + if (status == 0) return "pending"; + if (status == 1) return "resolved"; + return "rejected"; +} + + +function PromiseGetValue_(value) { + return %DebugGetProperty(value, builtins.promiseValue); +} + + +PromiseMirror.prototype.status = function() { + return PromiseGetStatus_(this.value_); +}; + + +PromiseMirror.prototype.promiseValue = function() { + return MakeMirror(PromiseGetValue_(this.value_)); +}; + + +function MapMirror(value) { + %_CallFunction(this, value, MAP_TYPE, ObjectMirror); +} +inherits(MapMirror, ObjectMirror); + + +/** + * Returns an array of key/value pairs of a map. + * This will keep keys alive for WeakMaps. + * + * @returns {Array.<Object>} Array of key/value pairs of a map. + */ +MapMirror.prototype.entries = function() { + var result = []; + + if (IS_WEAKMAP(this.value_)) { + var entries = %GetWeakMapEntries(this.value_); + for (var i = 0; i < entries.length; i += 2) { + result.push({ + key: entries[i], + value: entries[i + 1] + }); + } + return result; + } + + var iter = %_CallFunction(this.value_, builtins.MapEntries); + var next; + while (!(next = iter.next()).done) { + result.push({ + key: next.value[0], + value: next.value[1] + }); + } + return result; +}; + + +function SetMirror(value) { + %_CallFunction(this, value, SET_TYPE, ObjectMirror); +} +inherits(SetMirror, ObjectMirror); + + +/** + * Returns an array of elements of a set. + * This will keep elements alive for WeakSets. + * + * @returns {Array.<Object>} Array of elements of a set. + */ +SetMirror.prototype.values = function() { + if (IS_WEAKSET(this.value_)) { + return %GetWeakSetValues(this.value_); + } + + var result = []; + var iter = %_CallFunction(this.value_, builtins.SetValues); + var next; + while (!(next = iter.next()).done) { + result.push(next.value); + } + return result; +}; + + +/** * Base mirror object for properties. * @param {ObjectMirror} mirror The mirror object having this property * @param {string} name The name of the property @@ -2272,6 +2455,9 @@ case STRING_TYPE: o.value = mirror.getTruncatedValue(this.maxStringLength_()); break; + case SYMBOL_TYPE: + o.description = mirror.description(); + break; case FUNCTION_TYPE: o.name = mirror.name(); o.inferredName = mirror.inferredName(); @@ -2346,10 +2532,15 @@ content.length = mirror.length(); break; + case SYMBOL_TYPE: + content.description = mirror.description(); + break; + case OBJECT_TYPE: case FUNCTION_TYPE: case ERROR_TYPE: case REGEXP_TYPE: + case PROMISE_TYPE: // Add object representation. this.serializeObject_(mirror, content, details); break; @@ -2452,7 +2643,6 @@ content.indexedInterceptor = true; } - // Add function specific properties. if (mirror.isFunction()) { // Add function specific properties. content.name = mirror.name(); @@ -2480,12 +2670,17 @@ } } - // Add date specific properties. if (mirror.isDate()) { // Add date specific properties. content.value = mirror.value(); } + if (mirror.isPromise()) { + // Add promise specific properties. + content.status = mirror.status(); + content.promiseValue = this.serializeReference(mirror.promiseValue()); + } + // Add actual properties - named properties followed by indexed properties. var propertyNames = mirror.propertyNames(PropertyKind.Named); var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed); diff -Nru nodejs-0.11.13/deps/v8/src/misc-intrinsics.h nodejs-0.11.15/deps/v8/src/misc-intrinsics.h --- nodejs-0.11.13/deps/v8/src/misc-intrinsics.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/misc-intrinsics.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MISC_INTRINSICS_H_ #define V8_MISC_INTRINSICS_H_ -#include "../include/v8.h" -#include "globals.h" +#include "include/v8.h" +#include "src/globals.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/mksnapshot.cc nodejs-0.11.15/deps/v8/src/mksnapshot.cc --- nodejs-0.11.13/deps/v8/src/mksnapshot.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/mksnapshot.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <errno.h> #include <stdio.h> @@ -32,14 +9,17 @@ #endif #include <signal.h> -#include "v8.h" +#include "src/v8.h" + +#include "include/libplatform/libplatform.h" +#include "src/assembler.h" +#include "src/base/platform/platform.h" +#include "src/bootstrapper.h" +#include "src/flags.h" +#include "src/list.h" +#include "src/natives.h" +#include "src/serialize.h" -#include "bootstrapper.h" -#include "flags.h" -#include "natives.h" -#include "platform.h" -#include "serialize.h" -#include "list.h" using namespace v8; @@ -47,153 +27,210 @@ class Compressor { public: virtual ~Compressor() {} - virtual bool Compress(i::Vector<char> input) = 0; - virtual i::Vector<char>* output() = 0; + virtual bool Compress(i::Vector<i::byte> input) = 0; + virtual i::Vector<i::byte>* output() = 0; }; -class PartialSnapshotSink : public i::SnapshotByteSink { +class SnapshotWriter { public: - PartialSnapshotSink() : data_(), raw_size_(-1) { } - virtual ~PartialSnapshotSink() { data_.Free(); } - virtual void Put(int byte, const char* description) { - data_.Add(byte); - } - virtual int Position() { return data_.length(); } - void Print(FILE* fp) { - int length = Position(); - for (int j = 0; j < length; j++) { - if ((j & 0x1f) == 0x1f) { - fprintf(fp, "\n"); - } - if (j != 0) { - fprintf(fp, ","); - } - fprintf(fp, "%u", static_cast<unsigned char>(at(j))); - } + explicit SnapshotWriter(const char* snapshot_file) + : fp_(GetFileDescriptorOrDie(snapshot_file)) + , raw_file_(NULL) + , raw_context_file_(NULL) + , startup_blob_file_(NULL) + , compressor_(NULL) { } - char at(int i) { return data_[i]; } - bool Compress(Compressor* compressor) { - ASSERT_EQ(-1, raw_size_); - raw_size_ = data_.length(); - if (!compressor->Compress(data_.ToVector())) return false; - data_.Clear(); - data_.AddAll(*compressor->output()); - return true; + + ~SnapshotWriter() { + fclose(fp_); + if (raw_file_) fclose(raw_file_); + if (raw_context_file_) fclose(raw_context_file_); + if (startup_blob_file_) fclose(startup_blob_file_); } - int raw_size() { return raw_size_; } - private: - i::List<char> data_; - int raw_size_; -}; + void SetCompressor(Compressor* compressor) { + compressor_ = compressor; + } + void SetRawFiles(const char* raw_file, const char* raw_context_file) { + raw_file_ = GetFileDescriptorOrDie(raw_file); + raw_context_file_ = GetFileDescriptorOrDie(raw_context_file); + } -class CppByteSink : public PartialSnapshotSink { - public: - explicit CppByteSink(const char* snapshot_file) { - fp_ = i::OS::FOpen(snapshot_file, "wb"); - if (fp_ == NULL) { - i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file); + void SetStartupBlobFile(const char* startup_blob_file) { + if (startup_blob_file != NULL) + startup_blob_file_ = GetFileDescriptorOrDie(startup_blob_file); + } + + void WriteSnapshot(const i::List<i::byte>& snapshot_data, + const i::Serializer& serializer, + const i::List<i::byte>& context_snapshot_data, + const i::Serializer& context_serializer) const { + WriteSnapshotFile(snapshot_data, serializer, + context_snapshot_data, context_serializer); + MaybeWriteStartupBlob(snapshot_data, serializer, + context_snapshot_data, context_serializer); + } + + private: + void MaybeWriteStartupBlob(const i::List<i::byte>& snapshot_data, + const i::Serializer& serializer, + const i::List<i::byte>& context_snapshot_data, + const i::Serializer& context_serializer) const { + if (!startup_blob_file_) + return; + + i::List<i::byte> startup_blob; + i::ListSnapshotSink sink(&startup_blob); + + int spaces[] = { + i::NEW_SPACE, i::OLD_POINTER_SPACE, i::OLD_DATA_SPACE, i::CODE_SPACE, + i::MAP_SPACE, i::CELL_SPACE, i::PROPERTY_CELL_SPACE + }; + + i::byte* snapshot_bytes = snapshot_data.begin(); + sink.PutBlob(snapshot_bytes, snapshot_data.length(), "snapshot"); + for (size_t i = 0; i < ARRAY_SIZE(spaces); ++i) + sink.PutInt(serializer.CurrentAllocationAddress(spaces[i]), "spaces"); + + i::byte* context_bytes = context_snapshot_data.begin(); + sink.PutBlob(context_bytes, context_snapshot_data.length(), "context"); + for (size_t i = 0; i < ARRAY_SIZE(spaces); ++i) + sink.PutInt(context_serializer.CurrentAllocationAddress(spaces[i]), + "spaces"); + + size_t written = fwrite(startup_blob.begin(), 1, startup_blob.length(), + startup_blob_file_); + if (written != (size_t)startup_blob.length()) { + i::PrintF("Writing snapshot file failed.. Aborting.\n"); exit(1); } - fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n"); - fprintf(fp_, "#include \"v8.h\"\n"); - fprintf(fp_, "#include \"platform.h\"\n\n"); - fprintf(fp_, "#include \"snapshot.h\"\n\n"); - fprintf(fp_, "namespace v8 {\nnamespace internal {\n\n"); - fprintf(fp_, "const byte Snapshot::data_[] = {"); } - virtual ~CppByteSink() { - fprintf(fp_, "const int Snapshot::size_ = %d;\n", Position()); -#ifdef COMPRESS_STARTUP_DATA_BZ2 - fprintf(fp_, "const byte* Snapshot::raw_data_ = NULL;\n"); - fprintf(fp_, - "const int Snapshot::raw_size_ = %d;\n\n", - raw_size()); -#else - fprintf(fp_, - "const byte* Snapshot::raw_data_ = Snapshot::data_;\n"); - fprintf(fp_, - "const int Snapshot::raw_size_ = Snapshot::size_;\n\n"); -#endif - fprintf(fp_, "} } // namespace v8::internal\n"); - fclose(fp_); + void WriteSnapshotFile(const i::List<i::byte>& snapshot_data, + const i::Serializer& serializer, + const i::List<i::byte>& context_snapshot_data, + const i::Serializer& context_serializer) const { + WriteFilePrefix(); + WriteData("", snapshot_data, raw_file_); + WriteData("context_", context_snapshot_data, raw_context_file_); + WriteMeta("context_", context_serializer); + WriteMeta("", serializer); + WriteFileSuffix(); } - void WriteSpaceUsed( - const char* prefix, - int new_space_used, - int pointer_space_used, - int data_space_used, - int code_space_used, - int map_space_used, - int cell_space_used, - int property_cell_space_used) { - fprintf(fp_, - "const int Snapshot::%snew_space_used_ = %d;\n", - prefix, - new_space_used); - fprintf(fp_, - "const int Snapshot::%spointer_space_used_ = %d;\n", - prefix, - pointer_space_used); - fprintf(fp_, - "const int Snapshot::%sdata_space_used_ = %d;\n", - prefix, - data_space_used); - fprintf(fp_, - "const int Snapshot::%scode_space_used_ = %d;\n", - prefix, - code_space_used); - fprintf(fp_, - "const int Snapshot::%smap_space_used_ = %d;\n", - prefix, - map_space_used); - fprintf(fp_, - "const int Snapshot::%scell_space_used_ = %d;\n", - prefix, - cell_space_used); - fprintf(fp_, - "const int Snapshot::%sproperty_cell_space_used_ = %d;\n", - prefix, - property_cell_space_used); - } - - void WritePartialSnapshot() { - int length = partial_sink_.Position(); - fprintf(fp_, "};\n\n"); - fprintf(fp_, "const int Snapshot::context_size_ = %d;\n", length); -#ifdef COMPRESS_STARTUP_DATA_BZ2 - fprintf(fp_, - "const int Snapshot::context_raw_size_ = %d;\n", - partial_sink_.raw_size()); -#else - fprintf(fp_, - "const int Snapshot::context_raw_size_ = " - "Snapshot::context_size_;\n"); -#endif - fprintf(fp_, "const byte Snapshot::context_data_[] = {\n"); - partial_sink_.Print(fp_); - fprintf(fp_, "};\n\n"); -#ifdef COMPRESS_STARTUP_DATA_BZ2 - fprintf(fp_, "const byte* Snapshot::context_raw_data_ = NULL;\n"); -#else - fprintf(fp_, "const byte* Snapshot::context_raw_data_ =" - " Snapshot::context_data_;\n"); -#endif + void WriteFilePrefix() const { + fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n"); + fprintf(fp_, "#include \"src/v8.h\"\n"); + fprintf(fp_, "#include \"src/base/platform/platform.h\"\n\n"); + fprintf(fp_, "#include \"src/snapshot.h\"\n\n"); + fprintf(fp_, "namespace v8 {\n"); + fprintf(fp_, "namespace internal {\n\n"); + } + + void WriteFileSuffix() const { + fprintf(fp_, "} // namespace internal\n"); + fprintf(fp_, "} // namespace v8\n"); + } + + void WriteData(const char* prefix, const i::List<i::byte>& source_data, + FILE* raw_file) const { + const i::List<i::byte>* data_to_be_written = NULL; + i::List<i::byte> compressed_data; + if (!compressor_) { + data_to_be_written = &source_data; + } else if (compressor_->Compress(source_data.ToVector())) { + compressed_data.AddAll(*compressor_->output()); + data_to_be_written = &compressed_data; + } else { + i::PrintF("Compression failed. Aborting.\n"); + exit(1); + } + + DCHECK(data_to_be_written); + MaybeWriteRawFile(data_to_be_written, raw_file); + WriteData(prefix, source_data, data_to_be_written); + } + + void MaybeWriteRawFile(const i::List<i::byte>* data, FILE* raw_file) const { + if (!data || !raw_file) + return; + + // Sanity check, whether i::List iterators truly return pointers to an + // internal array. + DCHECK(data->end() - data->begin() == data->length()); + + size_t written = fwrite(data->begin(), 1, data->length(), raw_file); + if (written != (size_t)data->length()) { + i::PrintF("Writing raw file failed.. Aborting.\n"); + exit(1); + } + } + + void WriteData(const char* prefix, const i::List<i::byte>& source_data, + const i::List<i::byte>* data_to_be_written) const { + fprintf(fp_, "const byte Snapshot::%sdata_[] = {\n", prefix); + WriteSnapshotData(data_to_be_written); + fprintf(fp_, "};\n"); + fprintf(fp_, "const int Snapshot::%ssize_ = %d;\n", prefix, + data_to_be_written->length()); + + if (data_to_be_written == &source_data) { + fprintf(fp_, "const byte* Snapshot::%sraw_data_ = Snapshot::%sdata_;\n", + prefix, prefix); + fprintf(fp_, "const int Snapshot::%sraw_size_ = Snapshot::%ssize_;\n", + prefix, prefix); + } else { + fprintf(fp_, "const byte* Snapshot::%sraw_data_ = NULL;\n", prefix); + fprintf(fp_, "const int Snapshot::%sraw_size_ = %d;\n", + prefix, source_data.length()); + } + fprintf(fp_, "\n"); } - void WriteSnapshot() { - Print(fp_); + void WriteMeta(const char* prefix, const i::Serializer& ser) const { + WriteSizeVar(ser, prefix, "new", i::NEW_SPACE); + WriteSizeVar(ser, prefix, "pointer", i::OLD_POINTER_SPACE); + WriteSizeVar(ser, prefix, "data", i::OLD_DATA_SPACE); + WriteSizeVar(ser, prefix, "code", i::CODE_SPACE); + WriteSizeVar(ser, prefix, "map", i::MAP_SPACE); + WriteSizeVar(ser, prefix, "cell", i::CELL_SPACE); + WriteSizeVar(ser, prefix, "property_cell", i::PROPERTY_CELL_SPACE); + fprintf(fp_, "\n"); + } + + void WriteSizeVar(const i::Serializer& ser, const char* prefix, + const char* name, int space) const { + fprintf(fp_, "const int Snapshot::%s%s_space_used_ = %d;\n", + prefix, name, ser.CurrentAllocationAddress(space)); + } + + void WriteSnapshotData(const i::List<i::byte>* data) const { + for (int i = 0; i < data->length(); i++) { + if ((i & 0x1f) == 0x1f) + fprintf(fp_, "\n"); + if (i > 0) + fprintf(fp_, ","); + fprintf(fp_, "%u", static_cast<unsigned char>(data->at(i))); + } + fprintf(fp_, "\n"); } - PartialSnapshotSink* partial_sink() { return &partial_sink_; } + FILE* GetFileDescriptorOrDie(const char* filename) { + FILE* fp = base::OS::FOpen(filename, "wb"); + if (fp == NULL) { + i::PrintF("Unable to open file \"%s\" for writing.\n", filename); + exit(1); + } + return fp; + } - private: FILE* fp_; - PartialSnapshotSink partial_sink_; + FILE* raw_file_; + FILE* raw_context_file_; + FILE* startup_blob_file_; + Compressor* compressor_; }; @@ -235,7 +272,7 @@ int* raw_data_size, const char* compressed_data, int compressed_data_size) { - ASSERT_EQ(StartupData::kBZip2, + DCHECK_EQ(StartupData::kBZip2, V8::GetCompressedStartupDataAlgorithm()); unsigned int decompressed_size = *raw_data_size; int result = @@ -267,7 +304,9 @@ int main(int argc, char** argv) { V8::InitializeICU(); - i::Isolate::SetCrashIfDefaultIsolateInitialized(); + v8::Platform* platform = v8::platform::CreateDefaultPlatform(); + v8::V8::InitializePlatform(platform); + i::CpuFeatures::Probe(true); // By default, log code create information in the snapshot. i::FLAG_log_code = true; @@ -291,116 +330,106 @@ i::FLAG_logfile_per_isolate = false; Isolate* isolate = v8::Isolate::New(); - isolate->Enter(); - i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); - i::Serializer::Enable(internal_isolate); - Persistent<Context> context; - { - HandleScope handle_scope(isolate); - context.Reset(isolate, Context::New(isolate)); - } + { Isolate::Scope isolate_scope(isolate); + i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); + internal_isolate->enable_serializer(); + + Persistent<Context> context; + { + HandleScope handle_scope(isolate); + context.Reset(isolate, Context::New(isolate)); + } - if (context.IsEmpty()) { - fprintf(stderr, - "\nException thrown while compiling natives - see above.\n\n"); - exit(1); - } - if (i::FLAG_extra_code != NULL) { - // Capture 100 frames if anything happens. - V8::SetCaptureStackTraceForUncaughtExceptions(true, 100); - HandleScope scope(isolate); - v8::Context::Scope cscope(v8::Local<v8::Context>::New(isolate, context)); - const char* name = i::FLAG_extra_code; - FILE* file = i::OS::FOpen(name, "rb"); - if (file == NULL) { - fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno); + if (context.IsEmpty()) { + fprintf(stderr, + "\nException thrown while compiling natives - see above.\n\n"); exit(1); } + if (i::FLAG_extra_code != NULL) { + // Capture 100 frames if anything happens. + V8::SetCaptureStackTraceForUncaughtExceptions(true, 100); + HandleScope scope(isolate); + v8::Context::Scope cscope(v8::Local<v8::Context>::New(isolate, context)); + const char* name = i::FLAG_extra_code; + FILE* file = base::OS::FOpen(name, "rb"); + if (file == NULL) { + fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno); + exit(1); + } - fseek(file, 0, SEEK_END); - int size = ftell(file); - rewind(file); - - char* chars = new char[size + 1]; - chars[size] = '\0'; - for (int i = 0; i < size;) { - int read = static_cast<int>(fread(&chars[i], 1, size - i, file)); - if (read < 0) { - fprintf(stderr, "Failed to read '%s': errno %d\n", name, errno); + fseek(file, 0, SEEK_END); + int size = ftell(file); + rewind(file); + + char* chars = new char[size + 1]; + chars[size] = '\0'; + for (int i = 0; i < size;) { + int read = static_cast<int>(fread(&chars[i], 1, size - i, file)); + if (read < 0) { + fprintf(stderr, "Failed to read '%s': errno %d\n", name, errno); + exit(1); + } + i += read; + } + fclose(file); + Local<String> source = String::NewFromUtf8(isolate, chars); + TryCatch try_catch; + Local<Script> script = Script::Compile(source); + if (try_catch.HasCaught()) { + fprintf(stderr, "Failure compiling '%s'\n", name); + DumpException(try_catch.Message()); + exit(1); + } + script->Run(); + if (try_catch.HasCaught()) { + fprintf(stderr, "Failure running '%s'\n", name); + DumpException(try_catch.Message()); exit(1); } - i += read; } - fclose(file); - Local<String> source = String::NewFromUtf8(isolate, chars); - TryCatch try_catch; - Local<Script> script = Script::Compile(source); - if (try_catch.HasCaught()) { - fprintf(stderr, "Failure compiling '%s'\n", name); - DumpException(try_catch.Message()); - exit(1); + // Make sure all builtin scripts are cached. + { HandleScope scope(isolate); + for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) { + internal_isolate->bootstrapper()->NativesSourceLookup(i); + } } - script->Run(); - if (try_catch.HasCaught()) { - fprintf(stderr, "Failure running '%s'\n", name); - DumpException(try_catch.Message()); - exit(1); + // If we don't do this then we end up with a stray root pointing at the + // context even after we have disposed of the context. + internal_isolate->heap()->CollectAllGarbage( + i::Heap::kNoGCFlags, "mksnapshot"); + i::Object* raw_context = *v8::Utils::OpenPersistent(context); + context.Reset(); + + // This results in a somewhat smaller snapshot, probably because it gets + // rid of some things that are cached between garbage collections. + i::List<i::byte> snapshot_data; + i::ListSnapshotSink snapshot_sink(&snapshot_data); + i::StartupSerializer ser(internal_isolate, &snapshot_sink); + ser.SerializeStrongReferences(); + + i::List<i::byte> context_data; + i::ListSnapshotSink contex_sink(&context_data); + i::PartialSerializer context_ser(internal_isolate, &ser, &contex_sink); + context_ser.Serialize(&raw_context); + ser.SerializeWeakReferences(); + + { + SnapshotWriter writer(argv[1]); + if (i::FLAG_raw_file && i::FLAG_raw_context_file) + writer.SetRawFiles(i::FLAG_raw_file, i::FLAG_raw_context_file); + if (i::FLAG_startup_blob) + writer.SetStartupBlobFile(i::FLAG_startup_blob); + #ifdef COMPRESS_STARTUP_DATA_BZ2 + BZip2Compressor bzip2; + writer.SetCompressor(&bzip2); + #endif + writer.WriteSnapshot(snapshot_data, ser, context_data, context_ser); } } - // Make sure all builtin scripts are cached. - { HandleScope scope(isolate); - for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) { - internal_isolate->bootstrapper()->NativesSourceLookup(i); - } - } - // If we don't do this then we end up with a stray root pointing at the - // context even after we have disposed of the context. - internal_isolate->heap()->CollectAllGarbage( - i::Heap::kNoGCFlags, "mksnapshot"); - i::Object* raw_context = *v8::Utils::OpenPersistent(context); - context.Reset(); - CppByteSink sink(argv[1]); - // This results in a somewhat smaller snapshot, probably because it gets rid - // of some things that are cached between garbage collections. - i::StartupSerializer ser(internal_isolate, &sink); - ser.SerializeStrongReferences(); - - i::PartialSerializer partial_ser( - internal_isolate, &ser, sink.partial_sink()); - partial_ser.Serialize(&raw_context); - - ser.SerializeWeakReferences(); - -#ifdef COMPRESS_STARTUP_DATA_BZ2 - BZip2Compressor compressor; - if (!sink.Compress(&compressor)) - return 1; - if (!sink.partial_sink()->Compress(&compressor)) - return 1; -#endif - sink.WriteSnapshot(); - sink.WritePartialSnapshot(); - sink.WriteSpaceUsed( - "context_", - partial_ser.CurrentAllocationAddress(i::NEW_SPACE), - partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE), - partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE), - partial_ser.CurrentAllocationAddress(i::CODE_SPACE), - partial_ser.CurrentAllocationAddress(i::MAP_SPACE), - partial_ser.CurrentAllocationAddress(i::CELL_SPACE), - partial_ser.CurrentAllocationAddress(i::PROPERTY_CELL_SPACE)); - sink.WriteSpaceUsed( - "", - ser.CurrentAllocationAddress(i::NEW_SPACE), - ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE), - ser.CurrentAllocationAddress(i::OLD_DATA_SPACE), - ser.CurrentAllocationAddress(i::CODE_SPACE), - ser.CurrentAllocationAddress(i::MAP_SPACE), - ser.CurrentAllocationAddress(i::CELL_SPACE), - ser.CurrentAllocationAddress(i::PROPERTY_CELL_SPACE)); - isolate->Exit(); isolate->Dispose(); V8::Dispose(); + V8::ShutdownPlatform(); + delete platform; return 0; } diff -Nru nodejs-0.11.13/deps/v8/src/msan.h nodejs-0.11.15/deps/v8/src/msan.h --- nodejs-0.11.13/deps/v8/src/msan.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/msan.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // MemorySanitizer support. #ifndef V8_MSAN_H_ #define V8_MSAN_H_ +#include "src/globals.h" + #ifndef __has_feature # define __has_feature(x) 0 #endif @@ -38,12 +17,12 @@ # define MEMORY_SANITIZER #endif -#ifdef MEMORY_SANITIZER -# include <sanitizer/msan_interface.h> +#if defined(MEMORY_SANITIZER) && !defined(USE_SIMULATOR) +# include <sanitizer/msan_interface.h> // NOLINT // Marks a memory range as fully initialized. -# define MSAN_MEMORY_IS_INITIALIZED(p, s) __msan_unpoison((p), (s)) +# define MSAN_MEMORY_IS_INITIALIZED_IN_JIT(p, s) __msan_unpoison((p), (s)) #else -# define MSAN_MEMORY_IS_INITIALIZED(p, s) +# define MSAN_MEMORY_IS_INITIALIZED_IN_JIT(p, s) #endif #endif // V8_MSAN_H_ diff -Nru nodejs-0.11.13/deps/v8/src/natives-external.cc nodejs-0.11.15/deps/v8/src/natives-external.cc --- nodejs-0.11.13/deps/v8/src/natives-external.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/natives-external.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,196 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/natives.h" + +#include "src/base/logging.h" +#include "src/list.h" +#include "src/list-inl.h" +#include "src/snapshot-source-sink.h" +#include "src/vector.h" + +namespace v8 { +namespace internal { + + +/** + * NativesStore stores the 'native' (builtin) JS libraries. + * + * NativesStore needs to be initialized before using V8, usually by the + * embedder calling v8::SetNativesDataBlob, which calls SetNativesFromFile + * below. + */ +class NativesStore { + public: + ~NativesStore() {} + + int GetBuiltinsCount() { return native_names_.length(); } + int GetDebuggerCount() { return debugger_count_; } + Vector<const char> GetScriptName(int index) { return native_names_[index]; } + Vector<const char> GetRawScriptSource(int index) { + return native_source_[index]; + } + + int GetIndex(const char* name) { + for (int i = 0; i < native_names_.length(); ++i) { + if (strcmp(name, native_names_[i].start()) == 0) { + return i; + } + } + DCHECK(false); + return -1; + } + + int GetRawScriptsSize() { + DCHECK(false); // Used for compression. Doesn't really make sense here. + return 0; + } + + Vector<const byte> GetScriptsSource() { + DCHECK(false); // Used for compression. Doesn't really make sense here. + return Vector<const byte>(); + } + + static NativesStore* MakeFromScriptsSource(SnapshotByteSource* source) { + NativesStore* store = new NativesStore; + + // We expect the libraries in the following format: + // int: # of debugger sources. + // 2N blobs: N pairs of source name + actual source. + // then, repeat for non-debugger sources. + int debugger_count = source->GetInt(); + for (int i = 0; i < debugger_count; ++i) + store->ReadNameAndContentPair(source); + int library_count = source->GetInt(); + for (int i = 0; i < library_count; ++i) + store->ReadNameAndContentPair(source); + + store->debugger_count_ = debugger_count; + return store; + } + + private: + NativesStore() : debugger_count_(0) {} + + bool ReadNameAndContentPair(SnapshotByteSource* bytes) { + const byte* name; + int name_length; + const byte* source; + int source_length; + bool success = bytes->GetBlob(&name, &name_length) && + bytes->GetBlob(&source, &source_length); + if (success) { + Vector<const char> name_vector( + reinterpret_cast<const char*>(name), name_length); + Vector<const char> source_vector( + reinterpret_cast<const char*>(source), source_length); + native_names_.Add(name_vector); + native_source_.Add(source_vector); + } + return success; + } + + List<Vector<const char> > native_names_; + List<Vector<const char> > native_source_; + int debugger_count_; + + DISALLOW_COPY_AND_ASSIGN(NativesStore); +}; + + +template<NativeType type> +class NativesHolder { + public: + static NativesStore* get() { + DCHECK(holder_); + return holder_; + } + static void set(NativesStore* store) { + DCHECK(store); + holder_ = store; + } + + private: + static NativesStore* holder_; +}; + +template<NativeType type> +NativesStore* NativesHolder<type>::holder_ = NULL; + + +/** + * Read the Natives (library sources) blob, as generated by js2c + the build + * system. + */ +void SetNativesFromFile(StartupData* natives_blob) { + DCHECK(natives_blob); + DCHECK(natives_blob->data); + DCHECK(natives_blob->raw_size > 0); + + SnapshotByteSource bytes( + reinterpret_cast<const byte*>(natives_blob->data), + natives_blob->raw_size); + NativesHolder<CORE>::set(NativesStore::MakeFromScriptsSource(&bytes)); + NativesHolder<EXPERIMENTAL>::set(NativesStore::MakeFromScriptsSource(&bytes)); + DCHECK(!bytes.HasMore()); +} + + +// Implement NativesCollection<T> bsaed on NativesHolder + NativesStore. +// +// (The callers expect a purely static interface, since this is how the +// natives are usually compiled in. Since we implement them based on +// runtime content, we have to implement this indirection to offer +// a static interface.) +template<NativeType type> +int NativesCollection<type>::GetBuiltinsCount() { + return NativesHolder<type>::get()->GetBuiltinsCount(); +} + +template<NativeType type> +int NativesCollection<type>::GetDebuggerCount() { + return NativesHolder<type>::get()->GetDebuggerCount(); +} + +template<NativeType type> +int NativesCollection<type>::GetIndex(const char* name) { + return NativesHolder<type>::get()->GetIndex(name); +} + +template<NativeType type> +int NativesCollection<type>::GetRawScriptsSize() { + return NativesHolder<type>::get()->GetRawScriptsSize(); +} + +template<NativeType type> +Vector<const char> NativesCollection<type>::GetRawScriptSource(int index) { + return NativesHolder<type>::get()->GetRawScriptSource(index); +} + +template<NativeType type> +Vector<const char> NativesCollection<type>::GetScriptName(int index) { + return NativesHolder<type>::get()->GetScriptName(index); +} + +template<NativeType type> +Vector<const byte> NativesCollection<type>::GetScriptsSource() { + return NativesHolder<type>::get()->GetScriptsSource(); +} + +template<NativeType type> +void NativesCollection<type>::SetRawScriptsSource( + Vector<const char> raw_source) { + CHECK(false); // Use SetNativesFromFile for this implementation. +} + + +// The compiler can't 'see' all uses of the static methods and hence +// my chose to elide them. This we'll explicitly instantiate these. +template class NativesCollection<CORE>; +template class NativesCollection<EXPERIMENTAL>; +template class NativesCollection<D8>; +template class NativesCollection<TEST>; + +} // namespace v8::internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/natives.h nodejs-0.11.15/deps/v8/src/natives.h --- nodejs-0.11.13/deps/v8/src/natives.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/natives.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,33 +1,14 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_NATIVES_H_ #define V8_NATIVES_H_ +#include "src/vector.h" + +namespace v8 { class StartupData; } // Forward declaration. + namespace v8 { namespace internal { @@ -62,6 +43,11 @@ typedef NativesCollection<CORE> Natives; typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives; +#ifdef V8_USE_EXTERNAL_STARTUP_DATA +// Used for reading the natives at runtime. Implementation in natives-empty.cc +void SetNativesFromFile(StartupData* natives_blob); +#endif + } } // namespace v8::internal #endif // V8_NATIVES_H_ diff -Nru nodejs-0.11.13/deps/v8/src/object-observe.js nodejs-0.11.15/deps/v8/src/object-observe.js --- nodejs-0.11.13/deps/v8/src/object-observe.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/object-observe.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. "use strict"; @@ -58,7 +35,7 @@ var observationState; -function GetObservationState() { +function GetObservationStateJS() { if (IS_UNDEFINED(observationState)) observationState = %GetObservationState(); @@ -68,6 +45,7 @@ observationState.notifierObjectInfoMap = %ObservationWeakMapCreate(); observationState.pendingObservers = null; observationState.nextCallbackPriority = 0; + observationState.lastMicrotaskId = 0; } return observationState; @@ -79,14 +57,11 @@ }; MapWrapper.prototype = { + __proto__: null, get: function(key) { - key = %UnwrapGlobalProxy(key); - if (!IS_SPEC_OBJECT(key)) return UNDEFINED; return %WeakCollectionGet(this.map_, key); }, set: function(key, value) { - key = %UnwrapGlobalProxy(key); - if (!IS_SPEC_OBJECT(key)) return UNDEFINED; %WeakCollectionSet(this.map_, key, value); }, has: function(key) { @@ -102,7 +77,7 @@ function GetContextMaps() { if (IS_UNDEFINED(contextMaps)) { var map = GetWeakMapWrapper(); - var observationState = GetObservationState(); + var observationState = GetObservationStateJS(); contextMaps = { callbackInfoMap: new map(observationState.callbackInfoMap), objectInfoMap: new map(observationState.objectInfoMap), @@ -126,15 +101,15 @@ } function GetPendingObservers() { - return GetObservationState().pendingObservers; + return GetObservationStateJS().pendingObservers; } function SetPendingObservers(pendingObservers) { - GetObservationState().pendingObservers = pendingObservers; + GetObservationStateJS().pendingObservers = pendingObservers; } function GetNextCallbackPriority() { - return GetObservationState().nextCallbackPriority++; + return GetObservationStateJS().nextCallbackPriority++; } function nullProtoObject() { @@ -153,9 +128,9 @@ typeMap[type]--; } -function TypeMapCreateFromList(typeList) { +function TypeMapCreateFromList(typeList, length) { var typeMap = TypeMapCreate(); - for (var i = 0; i < typeList.length; i++) { + for (var i = 0; i < length; i++) { TypeMapAddType(typeMap, typeList[i], true); } return typeMap; @@ -177,14 +152,17 @@ return true; } -var defaultAcceptTypes = TypeMapCreateFromList([ - 'add', - 'update', - 'delete', - 'setPrototype', - 'reconfigure', - 'preventExtensions' -]); +var defaultAcceptTypes = (function() { + var defaultTypes = [ + 'add', + 'update', + 'delete', + 'setPrototype', + 'reconfigure', + 'preventExtensions' + ]; + return TypeMapCreateFromList(defaultTypes, defaultTypes.length); +})(); // An Observer is a registration to observe an object by a callback with // a given set of accept types. If the set of accept types is the default @@ -196,7 +174,7 @@ return callback; var observer = nullProtoObject(); observer.callback = callback; - observer.accept = TypeMapCreateFromList(acceptList); + observer.accept = acceptList; return observer; } @@ -332,16 +310,18 @@ return objectInfo.performingCount > 0 ? objectInfo.performing : null; } -function AcceptArgIsValid(arg) { +function ConvertAcceptListToTypeMap(arg) { + // We use undefined as a sentinel for the default accept list. if (IS_UNDEFINED(arg)) - return true; + return arg; - if (!IS_SPEC_OBJECT(arg) || - !IS_NUMBER(arg.length) || - arg.length < 0) - return false; + if (!IS_SPEC_OBJECT(arg)) + throw MakeTypeError("observe_accept_invalid"); - return true; + var len = ToInteger(arg.length); + if (len < 0) len = 0; + + return TypeMapCreateFromList(arg, len); } // CallbackInfo's optimized state is just a number which represents its global @@ -382,21 +362,29 @@ function ObjectObserve(object, callback, acceptList) { if (!IS_SPEC_OBJECT(object)) throw MakeTypeError("observe_non_object", ["observe"]); + if (%IsJSGlobalProxy(object)) + throw MakeTypeError("observe_global_proxy", ["observe"]); if (!IS_SPEC_FUNCTION(callback)) throw MakeTypeError("observe_non_function", ["observe"]); if (ObjectIsFrozen(callback)) throw MakeTypeError("observe_callback_frozen"); - if (!AcceptArgIsValid(acceptList)) - throw MakeTypeError("observe_accept_invalid"); + var objectObserveFn = %GetObjectContextObjectObserve(object); + return objectObserveFn(object, callback, acceptList); +} + +function NativeObjectObserve(object, callback, acceptList) { var objectInfo = ObjectInfoGetOrCreate(object); - ObjectInfoAddObserver(objectInfo, callback, acceptList); + var typeList = ConvertAcceptListToTypeMap(acceptList); + ObjectInfoAddObserver(objectInfo, callback, typeList); return object; } function ObjectUnobserve(object, callback) { if (!IS_SPEC_OBJECT(object)) throw MakeTypeError("observe_non_object", ["unobserve"]); + if (%IsJSGlobalProxy(object)) + throw MakeTypeError("observe_global_proxy", ["unobserve"]); if (!IS_SPEC_FUNCTION(callback)) throw MakeTypeError("observe_non_function", ["unobserve"]); @@ -419,27 +407,33 @@ return ObjectUnobserve(object, callback); } -function ObserverEnqueueIfActive(observer, objectInfo, changeRecord, - needsAccessCheck) { +function ObserverEnqueueIfActive(observer, objectInfo, changeRecord) { if (!ObserverIsActive(observer, objectInfo) || !TypeMapHasType(ObserverGetAcceptTypes(observer), changeRecord.type)) { return; } var callback = ObserverGetCallback(observer); - if (needsAccessCheck && - // Drop all splice records on the floor for access-checked objects - (changeRecord.type == 'splice' || - !%IsAccessAllowedForObserver( - callback, changeRecord.object, changeRecord.name))) { + if (!%ObserverObjectAndRecordHaveSameOrigin(callback, changeRecord.object, + changeRecord)) { return; } var callbackInfo = CallbackInfoNormalize(callback); if (IS_NULL(GetPendingObservers())) { - SetPendingObservers(nullProtoObject()) - GetMicrotaskQueue().push(ObserveMicrotaskRunner); - %SetMicrotaskPending(true); + SetPendingObservers(nullProtoObject()); + if (DEBUG_IS_ACTIVE) { + var id = ++GetObservationStateJS().lastMicrotaskId; + var name = "Object.observe"; + %EnqueueMicrotask(function() { + %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name }); + ObserveMicrotaskRunner(); + %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name }); + }); + %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name }); + } else { + %EnqueueMicrotask(ObserveMicrotaskRunner); + } } GetPendingObservers()[callbackInfo.priority] = callback; callbackInfo.push(changeRecord); @@ -456,27 +450,21 @@ for (var prop in changeRecord) { if (prop === 'object' || (hasType && prop === 'type')) continue; - %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop], - READ_ONLY + DONT_DELETE); + %DefineDataPropertyUnchecked( + newRecord, prop, changeRecord[prop], READ_ONLY + DONT_DELETE); } - ObjectFreeze(newRecord); + ObjectFreezeJS(newRecord); - ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord, - true /* skip access check */); + ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord); } -function ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord, - skipAccessCheck) { +function ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord) { // TODO(rossberg): adjust once there is a story for symbols vs proxies. if (IS_SYMBOL(changeRecord.name)) return; - var needsAccessCheck = !skipAccessCheck && - %IsAccessCheckNeeded(changeRecord.object); - if (ChangeObserversIsOptimized(objectInfo.changeObservers)) { var observer = objectInfo.changeObservers; - ObserverEnqueueIfActive(observer, objectInfo, changeRecord, - needsAccessCheck); + ObserverEnqueueIfActive(observer, objectInfo, changeRecord); return; } @@ -484,8 +472,7 @@ var observer = objectInfo.changeObservers[priority]; if (IS_NULL(observer)) continue; - ObserverEnqueueIfActive(observer, objectInfo, changeRecord, - needsAccessCheck); + ObserverEnqueueIfActive(observer, objectInfo, changeRecord); } } @@ -514,8 +501,8 @@ addedCount: addedCount }; - ObjectFreeze(changeRecord); - ObjectFreeze(changeRecord.removed); + ObjectFreezeJS(changeRecord); + ObjectFreezeJS(changeRecord.removed); ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord); } @@ -538,7 +525,7 @@ }; } - ObjectFreeze(changeRecord); + ObjectFreezeJS(changeRecord); ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord); } @@ -562,7 +549,6 @@ throw MakeTypeError("called_on_non_object", ["performChange"]); var objectInfo = ObjectInfoGetFromNotifier(this); - if (IS_UNDEFINED(objectInfo)) throw MakeTypeError("observe_notify_non_notifier"); if (!IS_STRING(changeType)) @@ -570,6 +556,11 @@ if (!IS_SPEC_FUNCTION(changeFn)) throw MakeTypeError("observe_perform_non_function"); + var performChangeFn = %GetObjectContextNotifierPerformChange(objectInfo); + performChangeFn(objectInfo, changeType, changeFn); +} + +function NativeObjectNotifierPerformChange(objectInfo, changeType, changeFn) { ObjectInfoAddPerformingType(objectInfo, changeType); var changeRecord; @@ -586,9 +577,18 @@ function ObjectGetNotifier(object) { if (!IS_SPEC_OBJECT(object)) throw MakeTypeError("observe_non_object", ["getNotifier"]); + if (%IsJSGlobalProxy(object)) + throw MakeTypeError("observe_global_proxy", ["getNotifier"]); if (ObjectIsFrozen(object)) return null; + if (!%ObjectWasCreatedInCurrentOrigin(object)) return null; + + var getNotifierFn = %GetObjectContextObjectGetNotifier(object); + return getNotifierFn(object); +} + +function NativeObjectGetNotifier(object) { var objectInfo = ObjectInfoGetOrCreate(object); return ObjectInfoGetNotifier(objectInfo); } diff -Nru nodejs-0.11.13/deps/v8/src/objects.cc nodejs-0.11.15/deps/v8/src/objects.cc --- nodejs-0.11.13/deps/v8/src/objects.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/objects.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,112 +1,84 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "accessors.h" -#include "allocation-site-scopes.h" -#include "api.h" -#include "arguments.h" -#include "bootstrapper.h" -#include "codegen.h" -#include "code-stubs.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "deoptimizer.h" -#include "date.h" -#include "elements.h" -#include "execution.h" -#include "full-codegen.h" -#include "hydrogen.h" -#include "isolate-inl.h" -#include "log.h" -#include "objects-inl.h" -#include "objects-visiting-inl.h" -#include "macro-assembler.h" -#include "mark-compact.h" -#include "safepoint-table.h" -#include "string-stream.h" -#include "utils.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/accessors.h" +#include "src/allocation-site-scopes.h" +#include "src/api.h" +#include "src/arguments.h" +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/cpu-profiler.h" +#include "src/date.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/elements.h" +#include "src/execution.h" +#include "src/field-index-inl.h" +#include "src/field-index.h" +#include "src/full-codegen.h" +#include "src/heap/mark-compact.h" +#include "src/heap/objects-visiting-inl.h" +#include "src/hydrogen.h" +#include "src/isolate-inl.h" +#include "src/log.h" +#include "src/lookup.h" +#include "src/macro-assembler.h" +#include "src/objects-inl.h" +#include "src/prototype.h" +#include "src/safepoint-table.h" +#include "src/string-search.h" +#include "src/string-stream.h" +#include "src/utils.h" #ifdef ENABLE_DISASSEMBLER -#include "disasm.h" -#include "disassembler.h" +#include "src/disasm.h" +#include "src/disassembler.h" #endif namespace v8 { namespace internal { - -MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor, - Object* value) { - Object* result; - { MaybeObject* maybe_result = - constructor->GetHeap()->AllocateJSObject(constructor); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - JSValue::cast(result)->set_value(value); - return result; -} - - -MaybeObject* Object::ToObject(Context* native_context) { - if (IsNumber()) { - return CreateJSValue(native_context->number_function(), this); - } else if (IsBoolean()) { - return CreateJSValue(native_context->boolean_function(), this); - } else if (IsString()) { - return CreateJSValue(native_context->string_function(), this); - } else if (IsSymbol()) { - return CreateJSValue(native_context->symbol_function(), this); +Handle<HeapType> Object::OptimalType(Isolate* isolate, + Representation representation) { + if (representation.IsNone()) return HeapType::None(isolate); + if (FLAG_track_field_types) { + if (representation.IsHeapObject() && IsHeapObject()) { + // We can track only JavaScript objects with stable maps. + Handle<Map> map(HeapObject::cast(this)->map(), isolate); + if (map->is_stable() && + map->instance_type() >= FIRST_NONCALLABLE_SPEC_OBJECT_TYPE && + map->instance_type() <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE) { + return HeapType::Class(map, isolate); + } + } } - ASSERT(IsJSObject()); - return this; + return HeapType::Any(isolate); } -MaybeObject* Object::ToObject(Isolate* isolate) { - if (IsJSReceiver()) { - return this; - } else if (IsNumber()) { - Context* native_context = isolate->context()->native_context(); - return CreateJSValue(native_context->number_function(), this); - } else if (IsBoolean()) { - Context* native_context = isolate->context()->native_context(); - return CreateJSValue(native_context->boolean_function(), this); - } else if (IsString()) { - Context* native_context = isolate->context()->native_context(); - return CreateJSValue(native_context->string_function(), this); - } else if (IsSymbol()) { - Context* native_context = isolate->context()->native_context(); - return CreateJSValue(native_context->symbol_function(), this); +MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate, + Handle<Object> object, + Handle<Context> native_context) { + if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object); + Handle<JSFunction> constructor; + if (object->IsNumber()) { + constructor = handle(native_context->number_function(), isolate); + } else if (object->IsBoolean()) { + constructor = handle(native_context->boolean_function(), isolate); + } else if (object->IsString()) { + constructor = handle(native_context->string_function(), isolate); + } else if (object->IsSymbol()) { + constructor = handle(native_context->symbol_function(), isolate); + } else { + return MaybeHandle<JSReceiver>(); } - - // Throw a type error. - return Failure::InternalError(); + Handle<JSObject> result = isolate->factory()->NewJSObject(constructor); + Handle<JSValue>::cast(result)->set_value(*object); + return result; } @@ -121,8 +93,8 @@ } -bool Object::IsCallable() { - Object* fun = this; +bool Object::IsCallable() const { + const Object* fun = this; while (fun->IsJSFunctionProxy()) { fun = JSFunctionProxy::cast(fun)->call_trap(); } @@ -132,7 +104,8 @@ } -void Object::Lookup(Name* name, LookupResult* result) { +void Object::Lookup(Handle<Name> name, LookupResult* result) { + DisallowHeapAllocation no_gc; Object* holder = NULL; if (IsJSReceiver()) { holder = this; @@ -151,33 +124,44 @@ 0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001); } } - ASSERT(holder != NULL); // Cannot handle null or undefined. + DCHECK(holder != NULL); // Cannot handle null or undefined. JSReceiver::cast(holder)->Lookup(name, result); } -Handle<Object> Object::GetPropertyWithReceiver( - Handle<Object> object, - Handle<Object> receiver, - Handle<Name> name, - PropertyAttributes* attributes) { - LookupResult lookup(name->GetIsolate()); - object->Lookup(*name, &lookup); - Handle<Object> result = - GetProperty(object, receiver, &lookup, name, attributes); - ASSERT(*attributes <= ABSENT); - return result; -} - - -MaybeObject* Object::GetPropertyWithReceiver(Object* receiver, - Name* name, - PropertyAttributes* attributes) { - LookupResult result(name->GetIsolate()); - Lookup(name, &result); - MaybeObject* value = GetProperty(receiver, &result, name, attributes); - ASSERT(*attributes <= ABSENT); - return value; +MaybeHandle<Object> Object::GetProperty(LookupIterator* it) { + for (; it->IsFound(); it->Next()) { + switch (it->state()) { + case LookupIterator::NOT_FOUND: + UNREACHABLE(); + case LookupIterator::JSPROXY: + return JSProxy::GetPropertyWithHandler(it->GetHolder<JSProxy>(), + it->GetReceiver(), it->name()); + case LookupIterator::INTERCEPTOR: { + MaybeHandle<Object> maybe_result = JSObject::GetPropertyWithInterceptor( + it->GetHolder<JSObject>(), it->GetReceiver(), it->name()); + if (!maybe_result.is_null()) return maybe_result; + if (it->isolate()->has_pending_exception()) return maybe_result; + break; + } + case LookupIterator::ACCESS_CHECK: + if (it->HasAccess(v8::ACCESS_GET)) break; + return JSObject::GetPropertyWithFailedAccessCheck(it); + case LookupIterator::PROPERTY: + if (it->HasProperty()) { + switch (it->property_kind()) { + case LookupIterator::ACCESSOR: + return GetPropertyWithAccessor(it->GetReceiver(), it->name(), + it->GetHolder<JSObject>(), + it->GetAccessors()); + case LookupIterator::DATA: + return it->GetDataValue(); + } + } + break; + } + } + return it->factory()->undefined_value(); } @@ -244,14 +228,14 @@ template<typename To> static inline To* CheckedCast(void *from) { uintptr_t temp = reinterpret_cast<uintptr_t>(from); - ASSERT(temp % sizeof(To) == 0); + DCHECK(temp % sizeof(To) == 0); return reinterpret_cast<To*>(temp); } -static MaybeObject* PerformCompare(const BitmaskCompareDescriptor& descriptor, - char* ptr, - Heap* heap) { +static Handle<Object> PerformCompare(const BitmaskCompareDescriptor& descriptor, + char* ptr, + Isolate* isolate) { uint32_t bitmask = descriptor.bitmask; uint32_t compare_value = descriptor.compare_value; uint32_t value; @@ -271,26 +255,27 @@ break; default: UNREACHABLE(); - return NULL; + return isolate->factory()->undefined_value(); } - return heap->ToBoolean((bitmask & value) == (bitmask & compare_value)); + return isolate->factory()->ToBoolean( + (bitmask & value) == (bitmask & compare_value)); } -static MaybeObject* PerformCompare(const PointerCompareDescriptor& descriptor, - char* ptr, - Heap* heap) { +static Handle<Object> PerformCompare(const PointerCompareDescriptor& descriptor, + char* ptr, + Isolate* isolate) { uintptr_t compare_value = reinterpret_cast<uintptr_t>(descriptor.compare_value); uintptr_t value = *CheckedCast<uintptr_t>(ptr); - return heap->ToBoolean(compare_value == value); + return isolate->factory()->ToBoolean(compare_value == value); } -static MaybeObject* GetPrimitiveValue( +static Handle<Object> GetPrimitiveValue( const PrimitiveValueDescriptor& descriptor, char* ptr, - Heap* heap) { + Isolate* isolate) { int32_t int32_value = 0; switch (descriptor.data_type) { case kDescriptorInt8Type: @@ -310,124 +295,137 @@ break; case kDescriptorUint32Type: { uint32_t value = *CheckedCast<uint32_t>(ptr); - return heap->NumberFromUint32(value); + AllowHeapAllocation allow_gc; + return isolate->factory()->NewNumberFromUint(value); } case kDescriptorBoolType: { uint8_t byte = *CheckedCast<uint8_t>(ptr); - return heap->ToBoolean(byte & (0x1 << descriptor.bool_offset)); + return isolate->factory()->ToBoolean( + byte & (0x1 << descriptor.bool_offset)); } case kDescriptorFloatType: { float value = *CheckedCast<float>(ptr); - return heap->NumberFromDouble(value); + AllowHeapAllocation allow_gc; + return isolate->factory()->NewNumber(value); } case kDescriptorDoubleType: { double value = *CheckedCast<double>(ptr); - return heap->NumberFromDouble(value); + AllowHeapAllocation allow_gc; + return isolate->factory()->NewNumber(value); } } - return heap->NumberFromInt32(int32_value); + AllowHeapAllocation allow_gc; + return isolate->factory()->NewNumberFromInt(int32_value); } -static MaybeObject* GetDeclaredAccessorProperty(Object* receiver, - DeclaredAccessorInfo* info, - Isolate* isolate) { - char* current = reinterpret_cast<char*>(receiver); +static Handle<Object> GetDeclaredAccessorProperty( + Handle<Object> receiver, + Handle<DeclaredAccessorInfo> info, + Isolate* isolate) { + DisallowHeapAllocation no_gc; + char* current = reinterpret_cast<char*>(*receiver); DeclaredAccessorDescriptorIterator iterator(info->descriptor()); while (true) { const DeclaredAccessorDescriptorData* data = iterator.Next(); switch (data->type) { case kDescriptorReturnObject: { - ASSERT(iterator.Complete()); + DCHECK(iterator.Complete()); current = *CheckedCast<char*>(current); - return *CheckedCast<Object*>(current); + return handle(*CheckedCast<Object*>(current), isolate); } case kDescriptorPointerDereference: - ASSERT(!iterator.Complete()); + DCHECK(!iterator.Complete()); current = *reinterpret_cast<char**>(current); break; case kDescriptorPointerShift: - ASSERT(!iterator.Complete()); + DCHECK(!iterator.Complete()); current += data->pointer_shift_descriptor.byte_offset; break; case kDescriptorObjectDereference: { - ASSERT(!iterator.Complete()); + DCHECK(!iterator.Complete()); Object* object = CheckedCast<Object>(current); int field = data->object_dereference_descriptor.internal_field; Object* smi = JSObject::cast(object)->GetInternalField(field); - ASSERT(smi->IsSmi()); + DCHECK(smi->IsSmi()); current = reinterpret_cast<char*>(smi); break; } case kDescriptorBitmaskCompare: - ASSERT(iterator.Complete()); + DCHECK(iterator.Complete()); return PerformCompare(data->bitmask_compare_descriptor, current, - isolate->heap()); + isolate); case kDescriptorPointerCompare: - ASSERT(iterator.Complete()); + DCHECK(iterator.Complete()); return PerformCompare(data->pointer_compare_descriptor, current, - isolate->heap()); + isolate); case kDescriptorPrimitiveValue: - ASSERT(iterator.Complete()); + DCHECK(iterator.Complete()); return GetPrimitiveValue(data->primitive_value_descriptor, current, - isolate->heap()); + isolate); } } UNREACHABLE(); - return NULL; + return isolate->factory()->undefined_value(); } Handle<FixedArray> JSObject::EnsureWritableFastElements( Handle<JSObject> object) { - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->EnsureWritableFastElements(), - FixedArray); + DCHECK(object->HasFastSmiOrObjectElements()); + Isolate* isolate = object->GetIsolate(); + Handle<FixedArray> elems(FixedArray::cast(object->elements()), isolate); + if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems; + Handle<FixedArray> writable_elems = isolate->factory()->CopyFixedArrayWithMap( + elems, isolate->factory()->fixed_array_map()); + object->set_elements(*writable_elems); + isolate->counters()->cow_arrays_converted()->Increment(); + return writable_elems; } -Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object, - Handle<Object> receiver, - Handle<Object> structure, - Handle<Name> name) { - Isolate* isolate = name->GetIsolate(); - // To accommodate both the old and the new api we switch on the - // data structure used to store the callbacks. Eventually foreign - // callbacks should be phased out. - if (structure->IsForeign()) { - AccessorDescriptor* callback = - reinterpret_cast<AccessorDescriptor*>( - Handle<Foreign>::cast(structure)->foreign_address()); - CALL_HEAP_FUNCTION(isolate, - (callback->getter)(isolate, *receiver, callback->data), - Object); - } +MaybeHandle<Object> JSProxy::GetPropertyWithHandler(Handle<JSProxy> proxy, + Handle<Object> receiver, + Handle<Name> name) { + Isolate* isolate = proxy->GetIsolate(); + + // TODO(rossberg): adjust once there is a story for symbols vs proxies. + if (name->IsSymbol()) return isolate->factory()->undefined_value(); + + Handle<Object> args[] = { receiver, name }; + return CallTrap( + proxy, "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args); +} + +MaybeHandle<Object> Object::GetPropertyWithAccessor(Handle<Object> receiver, + Handle<Name> name, + Handle<JSObject> holder, + Handle<Object> structure) { + Isolate* isolate = name->GetIsolate(); + DCHECK(!structure->IsForeign()); // api style callbacks. if (structure->IsAccessorInfo()) { - Handle<AccessorInfo> accessor_info = Handle<AccessorInfo>::cast(structure); - if (!accessor_info->IsCompatibleReceiver(*receiver)) { + Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure); + if (!info->IsCompatibleReceiver(*receiver)) { Handle<Object> args[2] = { name, receiver }; Handle<Object> error = isolate->factory()->NewTypeError("incompatible_method_receiver", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>::null(); + return isolate->Throw<Object>(error); } // TODO(rossberg): Handling symbols in the API requires changing the API, // so we do not support it for now. if (name->IsSymbol()) return isolate->factory()->undefined_value(); if (structure->IsDeclaredAccessorInfo()) { - CALL_HEAP_FUNCTION( - isolate, - GetDeclaredAccessorProperty(*receiver, - DeclaredAccessorInfo::cast(*structure), - isolate), - Object); + return GetDeclaredAccessorProperty( + receiver, + Handle<DeclaredAccessorInfo>::cast(structure), + isolate); } Handle<ExecutableAccessorInfo> data = @@ -436,20 +434,19 @@ v8::ToCData<v8::AccessorGetterCallback>(data->getter()); if (call_fun == NULL) return isolate->factory()->undefined_value(); - HandleScope scope(isolate); - Handle<JSObject> self = Handle<JSObject>::cast(receiver); Handle<String> key = Handle<String>::cast(name); - LOG(isolate, ApiNamedPropertyAccess("load", *self, *name)); - PropertyCallbackArguments args(isolate, data->data(), *self, *object); + LOG(isolate, ApiNamedPropertyAccess("load", *holder, *name)); + PropertyCallbackArguments args(isolate, data->data(), *receiver, *holder); v8::Handle<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(key)); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); if (result.IsEmpty()) { return isolate->factory()->undefined_value(); } Handle<Object> return_value = v8::Utils::OpenHandle(*result); return_value->VerifyApiCallResultType(); - return scope.CloseAndEscape(return_value); + // Rebox handle before return. + return handle(*return_value, isolate); } // __defineGetter__ callback @@ -457,233 +454,222 @@ isolate); if (getter->IsSpecFunction()) { // TODO(rossberg): nicer would be to cast to some JSCallable here... - CALL_HEAP_FUNCTION( - isolate, - object->GetPropertyWithDefinedGetter(*receiver, - JSReceiver::cast(*getter)), - Object); + return Object::GetPropertyWithDefinedGetter( + receiver, Handle<JSReceiver>::cast(getter)); } // Getter is not a function. return isolate->factory()->undefined_value(); } -MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw, - Name* name_raw) { - Isolate* isolate = GetIsolate(); - HandleScope scope(isolate); - Handle<Object> receiver(receiver_raw, isolate); - Handle<Object> name(name_raw, isolate); - - // TODO(rossberg): adjust once there is a story for symbols vs proxies. - if (name->IsSymbol()) return isolate->heap()->undefined_value(); - - Handle<Object> args[] = { receiver, name }; - Handle<Object> result = CallTrap( - "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args); - if (isolate->has_pending_exception()) return Failure::Exception(); - - return *result; +bool AccessorInfo::IsCompatibleReceiverType(Isolate* isolate, + Handle<AccessorInfo> info, + Handle<HeapType> type) { + if (!info->HasExpectedReceiverType()) return true; + Handle<Map> map = IC::TypeToMap(*type, isolate); + if (!map->IsJSObjectMap()) return false; + return FunctionTemplateInfo::cast(info->expected_receiver_type()) + ->IsTemplateFor(*map); } -Handle<Object> Object::GetProperty(Handle<Object> object, - Handle<Name> name) { - // TODO(rossberg): The index test should not be here but in the GetProperty - // method (or somewhere else entirely). Needs more global clean-up. - uint32_t index; +MaybeHandle<Object> Object::SetPropertyWithAccessor( + Handle<Object> receiver, Handle<Name> name, Handle<Object> value, + Handle<JSObject> holder, Handle<Object> structure, StrictMode strict_mode) { Isolate* isolate = name->GetIsolate(); - if (name->AsArrayIndex(&index)) return GetElement(isolate, object, index); - CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object); -} - - -MaybeObject* JSProxy::GetElementWithHandler(Object* receiver, - uint32_t index) { - String* name; - MaybeObject* maybe = GetHeap()->Uint32ToString(index); - if (!maybe->To<String>(&name)) return maybe; - return GetPropertyWithHandler(receiver, name); -} + // We should never get here to initialize a const with the hole + // value since a const declaration would conflict with the setter. + DCHECK(!structure->IsForeign()); + if (structure->IsExecutableAccessorInfo()) { + // Don't call executable accessor setters with non-JSObject receivers. + if (!receiver->IsJSObject()) return value; + // api style callbacks + ExecutableAccessorInfo* info = ExecutableAccessorInfo::cast(*structure); + if (!info->IsCompatibleReceiver(*receiver)) { + Handle<Object> args[2] = { name, receiver }; + Handle<Object> error = + isolate->factory()->NewTypeError("incompatible_method_receiver", + HandleVector(args, + ARRAY_SIZE(args))); + return isolate->Throw<Object>(error); + } + // TODO(rossberg): Support symbols in the API. + if (name->IsSymbol()) return value; + Object* call_obj = info->setter(); + v8::AccessorSetterCallback call_fun = + v8::ToCData<v8::AccessorSetterCallback>(call_obj); + if (call_fun == NULL) return value; + Handle<String> key = Handle<String>::cast(name); + LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name)); + PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder); + args.Call(call_fun, + v8::Utils::ToLocal(key), + v8::Utils::ToLocal(value)); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); + return value; + } -Handle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy, - Handle<JSReceiver> receiver, - uint32_t index, - Handle<Object> value, - StrictMode strict_mode) { - Isolate* isolate = proxy->GetIsolate(); - Handle<String> name = isolate->factory()->Uint32ToString(index); - return SetPropertyWithHandler( - proxy, receiver, name, value, NONE, strict_mode); -} + if (structure->IsAccessorPair()) { + Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate); + if (setter->IsSpecFunction()) { + // TODO(rossberg): nicer would be to cast to some JSCallable here... + return SetPropertyWithDefinedSetter( + receiver, Handle<JSReceiver>::cast(setter), value); + } else { + if (strict_mode == SLOPPY) return value; + Handle<Object> args[2] = { name, holder }; + Handle<Object> error = + isolate->factory()->NewTypeError("no_setter_in_callback", + HandleVector(args, 2)); + return isolate->Throw<Object>(error); + } + } + // TODO(dcarney): Handle correctly. + if (structure->IsDeclaredAccessorInfo()) { + return value; + } -bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) { - Isolate* isolate = proxy->GetIsolate(); - Handle<String> name = isolate->factory()->Uint32ToString(index); - return HasPropertyWithHandler(proxy, name); + UNREACHABLE(); + return MaybeHandle<Object>(); } -MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver, - JSReceiver* getter) { +MaybeHandle<Object> Object::GetPropertyWithDefinedGetter( + Handle<Object> receiver, + Handle<JSReceiver> getter) { Isolate* isolate = getter->GetIsolate(); - HandleScope scope(isolate); - Handle<JSReceiver> fun(getter); - Handle<Object> self(receiver, isolate); -#ifdef ENABLE_DEBUGGER_SUPPORT Debug* debug = isolate->debug(); // Handle stepping into a getter if step into is active. // TODO(rossberg): should this apply to getters that are function proxies? - if (debug->StepInActive() && fun->IsJSFunction()) { + if (debug->StepInActive() && getter->IsJSFunction()) { debug->HandleStepIn( - Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false); + Handle<JSFunction>::cast(getter), Handle<Object>::null(), 0, false); } -#endif - bool has_pending_exception; - Handle<Object> result = Execution::Call( - isolate, fun, self, 0, NULL, &has_pending_exception, true); - // Check for pending exception and return the result. - if (has_pending_exception) return Failure::Exception(); - return *result; + return Execution::Call(isolate, getter, receiver, 0, NULL, true); } -// Only deal with CALLBACKS and INTERCEPTOR -Handle<Object> JSObject::GetPropertyWithFailedAccessCheck( - Handle<JSObject> object, +MaybeHandle<Object> Object::SetPropertyWithDefinedSetter( Handle<Object> receiver, - LookupResult* result, - Handle<Name> name, - PropertyAttributes* attributes) { - Isolate* isolate = name->GetIsolate(); - if (result->IsProperty()) { - switch (result->type()) { - case CALLBACKS: { - // Only allow API accessors. - Handle<Object> callback_obj(result->GetCallbackObject(), isolate); - if (callback_obj->IsAccessorInfo()) { - if (!AccessorInfo::cast(*callback_obj)->all_can_read()) break; - *attributes = result->GetAttributes(); - // Fall through to GetPropertyWithCallback. - } else if (callback_obj->IsAccessorPair()) { - if (!AccessorPair::cast(*callback_obj)->all_can_read()) break; - // Fall through to GetPropertyWithCallback. - } else { - break; - } - Handle<JSObject> holder(result->holder(), isolate); - return GetPropertyWithCallback(holder, receiver, callback_obj, name); - } - case NORMAL: - case FIELD: - case CONSTANT: { - // Search ALL_CAN_READ accessors in prototype chain. - LookupResult r(isolate); - result->holder()->LookupRealNamedPropertyInPrototypes(*name, &r); - if (r.IsProperty()) { - return GetPropertyWithFailedAccessCheck( - object, receiver, &r, name, attributes); - } - break; - } - case INTERCEPTOR: { - // If the object has an interceptor, try real named properties. - // No access check in GetPropertyAttributeWithInterceptor. - LookupResult r(isolate); - result->holder()->LookupRealNamedProperty(*name, &r); - if (r.IsProperty()) { - return GetPropertyWithFailedAccessCheck( - object, receiver, &r, name, attributes); - } - break; + Handle<JSReceiver> setter, + Handle<Object> value) { + Isolate* isolate = setter->GetIsolate(); + + Debug* debug = isolate->debug(); + // Handle stepping into a setter if step into is active. + // TODO(rossberg): should this apply to getters that are function proxies? + if (debug->StepInActive() && setter->IsJSFunction()) { + debug->HandleStepIn( + Handle<JSFunction>::cast(setter), Handle<Object>::null(), 0, false); + } + + Handle<Object> argv[] = { value }; + RETURN_ON_EXCEPTION(isolate, Execution::Call(isolate, setter, receiver, + ARRAY_SIZE(argv), argv, true), + Object); + return value; +} + + +static bool FindAllCanReadHolder(LookupIterator* it) { + it->skip_interceptor(); + it->skip_access_check(); + for (; it->IsFound(); it->Next()) { + if (it->state() == LookupIterator::PROPERTY && + it->HasProperty() && + it->property_kind() == LookupIterator::ACCESSOR) { + Handle<Object> accessors = it->GetAccessors(); + if (accessors->IsAccessorInfo()) { + if (AccessorInfo::cast(*accessors)->all_can_read()) return true; } - default: - UNREACHABLE(); } } + return false; +} - // No accessible property found. - *attributes = ABSENT; - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_GET); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - return isolate->factory()->undefined_value(); + +MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck( + LookupIterator* it) { + Handle<JSObject> checked = it->GetHolder<JSObject>(); + if (FindAllCanReadHolder(it)) { + return GetPropertyWithAccessor(it->GetReceiver(), it->name(), + it->GetHolder<JSObject>(), + it->GetAccessors()); + } + it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_GET); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object); + return it->factory()->undefined_value(); } -PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck( - Handle<JSObject> object, - LookupResult* result, - Handle<Name> name, - bool continue_search) { - if (result->IsProperty()) { - switch (result->type()) { - case CALLBACKS: { - // Only allow API accessors. - Handle<Object> obj(result->GetCallbackObject(), object->GetIsolate()); - if (obj->IsAccessorInfo()) { - Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(obj); - if (info->all_can_read()) { - return result->GetAttributes(); - } - } else if (obj->IsAccessorPair()) { - Handle<AccessorPair> pair = Handle<AccessorPair>::cast(obj); - if (pair->all_can_read()) { - return result->GetAttributes(); - } - } - break; - } +Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck( + LookupIterator* it) { + Handle<JSObject> checked = it->GetHolder<JSObject>(); + if (FindAllCanReadHolder(it)) + return maybe(it->property_details().attributes()); + it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_HAS); + RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), + Maybe<PropertyAttributes>()); + return maybe(ABSENT); +} - case NORMAL: - case FIELD: - case CONSTANT: { - if (!continue_search) break; - // Search ALL_CAN_READ accessors in prototype chain. - LookupResult r(object->GetIsolate()); - result->holder()->LookupRealNamedPropertyInPrototypes(*name, &r); - if (r.IsProperty()) { - return GetPropertyAttributeWithFailedAccessCheck( - object, &r, name, continue_search); - } - break; - } - case INTERCEPTOR: { - // If the object has an interceptor, try real named properties. - // No access check in GetPropertyAttributeWithInterceptor. - LookupResult r(object->GetIsolate()); - if (continue_search) { - result->holder()->LookupRealNamedProperty(*name, &r); - } else { - result->holder()->LocalLookupRealNamedProperty(*name, &r); - } - if (!r.IsFound()) break; - return GetPropertyAttributeWithFailedAccessCheck( - object, &r, name, continue_search); +static bool FindAllCanWriteHolder(LookupIterator* it) { + it->skip_interceptor(); + it->skip_access_check(); + for (; it->IsFound(); it->Next()) { + if (it->state() == LookupIterator::PROPERTY && it->HasProperty() && + it->property_kind() == LookupIterator::ACCESSOR) { + Handle<Object> accessors = it->GetAccessors(); + if (accessors->IsAccessorInfo()) { + if (AccessorInfo::cast(*accessors)->all_can_write()) return true; } - - case HANDLER: - case TRANSITION: - case NONEXISTENT: - UNREACHABLE(); } } + return false; +} + + +MaybeHandle<Object> JSObject::SetPropertyWithFailedAccessCheck( + LookupIterator* it, Handle<Object> value, StrictMode strict_mode) { + Handle<JSObject> checked = it->GetHolder<JSObject>(); + if (FindAllCanWriteHolder(it)) { + return SetPropertyWithAccessor(it->GetReceiver(), it->name(), value, + it->GetHolder<JSObject>(), + it->GetAccessors(), strict_mode); + } - object->GetIsolate()->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS); - return ABSENT; + it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_SET); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object); + return value; } Object* JSObject::GetNormalizedProperty(const LookupResult* result) { - ASSERT(!HasFastProperties()); + DCHECK(!HasFastProperties()); Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry()); if (IsGlobalObject()) { value = PropertyCell::cast(value)->value(); } - ASSERT(!value->IsPropertyCell() && !value->IsCell()); + DCHECK(!value->IsPropertyCell() && !value->IsCell()); + return value; +} + + +Handle<Object> JSObject::GetNormalizedProperty(Handle<JSObject> object, + const LookupResult* result) { + DCHECK(!object->HasFastProperties()); + Isolate* isolate = object->GetIsolate(); + Handle<Object> value(object->property_dictionary()->ValueAt( + result->GetDictionaryEntry()), isolate); + if (object->IsGlobalObject()) { + value = handle(Handle<PropertyCell>::cast(value)->value(), isolate); + DCHECK(!value->IsTheHole()); + } + DCHECK(!value->IsPropertyCell() && !value->IsCell()); return value; } @@ -691,7 +677,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, const LookupResult* result, Handle<Object> value) { - ASSERT(!object->HasFastProperties()); + DCHECK(!object->HasFastProperties()); NameDictionary* property_dictionary = object->property_dictionary(); if (object->IsGlobalObject()) { Handle<PropertyCell> cell(PropertyCell::cast( @@ -703,22 +689,11 @@ } -// TODO(mstarzinger): Temporary wrapper until handlified. -static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict, - Handle<Name> name, - Handle<Object> value, - PropertyDetails details) { - CALL_HEAP_FUNCTION(dict->GetIsolate(), - dict->Add(*name, *value, details), - NameDictionary); -} - - void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name, Handle<Object> value, PropertyDetails details) { - ASSERT(!object->HasFastProperties()); + DCHECK(!object->HasFastProperties()); Handle<NameDictionary> property_dictionary(object->property_dictionary()); if (!name->IsUniqueName()) { @@ -726,15 +701,15 @@ Handle<String>::cast(name)); } - int entry = property_dictionary->FindEntry(*name); + int entry = property_dictionary->FindEntry(name); if (entry == NameDictionary::kNotFound) { Handle<Object> store_value = value; if (object->IsGlobalObject()) { store_value = object->GetIsolate()->factory()->NewPropertyCell(value); } - property_dictionary = - NameDictionaryAdd(property_dictionary, name, store_value, details); + property_dictionary = NameDictionary::Add( + property_dictionary, name, store_value, details); object->set_properties(*property_dictionary); return; } @@ -747,7 +722,7 @@ property_dictionary->SetNextEnumerationIndex(enumeration_index + 1); } else { enumeration_index = original_details.dictionary_index(); - ASSERT(enumeration_index > 0); + DCHECK(enumeration_index > 0); } details = PropertyDetails( @@ -760,25 +735,18 @@ // Please note we have to update the property details. property_dictionary->DetailsAtPut(entry, details); } else { - property_dictionary->SetEntry(entry, *name, *value, details); + property_dictionary->SetEntry(entry, name, value, details); } } -// TODO(mstarzinger): Temporary wrapper until target is handlified. -Handle<NameDictionary> NameDictionaryShrink(Handle<NameDictionary> dict, - Handle<Name> name) { - CALL_HEAP_FUNCTION(dict->GetIsolate(), dict->Shrink(*name), NameDictionary); -} - - Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object, Handle<Name> name, DeleteMode mode) { - ASSERT(!object->HasFastProperties()); + DCHECK(!object->HasFastProperties()); Isolate* isolate = object->GetIsolate(); Handle<NameDictionary> dictionary(object->property_dictionary()); - int entry = dictionary->FindEntry(*name); + int entry = dictionary->FindEntry(name); if (entry != NameDictionary::kNotFound) { // If we have a global object set the cell to the hole. if (object->IsGlobalObject()) { @@ -790,18 +758,19 @@ // from the DontDelete cell without checking if it contains // the hole value. Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map())); - ASSERT(new_map->is_dictionary_map()); - object->set_map(*new_map); + DCHECK(new_map->is_dictionary_map()); + JSObject::MigrateToMap(object, new_map); } Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry))); Handle<Object> value = isolate->factory()->the_hole_value(); PropertyCell::SetValueInferType(cell, value); dictionary->DetailsAtPut(entry, details.AsDeleted()); } else { - Handle<Object> deleted(dictionary->DeleteProperty(entry, mode), isolate); + Handle<Object> deleted( + NameDictionary::DeleteProperty(dictionary, entry, mode)); if (*deleted == isolate->heap()->true_value()) { Handle<NameDictionary> new_properties = - NameDictionaryShrink(dictionary, name); + NameDictionary::Shrink(dictionary, name); object->set_properties(*new_properties); } return deleted; @@ -826,189 +795,40 @@ } -Handle<Object> Object::GetProperty(Handle<Object> object, - Handle<Object> receiver, - LookupResult* result, - Handle<Name> key, - PropertyAttributes* attributes) { - Isolate* isolate = result->isolate(); - CALL_HEAP_FUNCTION( - isolate, - object->GetProperty(*receiver, result, *key, attributes), - Object); -} - - -MaybeObject* Object::GetPropertyOrFail(Handle<Object> object, - Handle<Object> receiver, - LookupResult* result, - Handle<Name> key, - PropertyAttributes* attributes) { - Isolate* isolate = result->isolate(); - CALL_HEAP_FUNCTION_PASS_EXCEPTION( - isolate, - object->GetProperty(*receiver, result, *key, attributes)); -} - - -// TODO(yangguo): handlify this and get rid of. -MaybeObject* Object::GetProperty(Object* receiver, - LookupResult* result, - Name* name, - PropertyAttributes* attributes) { - Isolate* isolate = name->GetIsolate(); - Heap* heap = isolate->heap(); - -#ifdef DEBUG - // TODO(mstarzinger): Only because of the AssertNoContextChange, drop as soon - // as this method has been fully handlified. - HandleScope scope(isolate); -#endif - - // Make sure that the top context does not change when doing - // callbacks or interceptor calls. - AssertNoContextChange ncc(isolate); - - // Traverse the prototype chain from the current object (this) to - // the holder and check for access rights. This avoids traversing the - // objects more than once in case of interceptors, because the - // holder will always be the interceptor holder and the search may - // only continue with a current object just after the interceptor - // holder in the prototype chain. - // Proxy handlers do not use the proxy's prototype, so we can skip this. - if (!result->IsHandler()) { - Object* last = result->IsProperty() - ? result->holder() - : Object::cast(heap->null_value()); - ASSERT(this != this->GetPrototype(isolate)); - for (Object* current = this; - true; - current = current->GetPrototype(isolate)) { - if (current->IsAccessCheckNeeded()) { - // Check if we're allowed to read from the current object. Note - // that even though we may not actually end up loading the named - // property from the current object, we still check that we have - // access to it. - JSObject* checked = JSObject::cast(current); - if (!isolate->MayNamedAccess(checked, name, v8::ACCESS_GET)) { - HandleScope scope(isolate); - Handle<Object> value = JSObject::GetPropertyWithFailedAccessCheck( - handle(checked, isolate), - handle(receiver, isolate), - result, - handle(name, isolate), - attributes); - RETURN_IF_EMPTY_HANDLE(isolate, value); - return *value; - } - } - // Stop traversing the chain once we reach the last object in the - // chain; either the holder of the result or null in case of an - // absent property. - if (current == last) break; - } - } - - if (!result->IsProperty()) { - *attributes = ABSENT; - return heap->undefined_value(); - } - *attributes = result->GetAttributes(); - Object* value; - switch (result->type()) { - case NORMAL: - value = result->holder()->GetNormalizedProperty(result); - ASSERT(!value->IsTheHole() || result->IsReadOnly()); - return value->IsTheHole() ? heap->undefined_value() : value; - case FIELD: { - MaybeObject* maybe_result = result->holder()->FastPropertyAt( - result->representation(), - result->GetFieldIndex().field_index()); - if (!maybe_result->To(&value)) return maybe_result; - ASSERT(!value->IsTheHole() || result->IsReadOnly()); - return value->IsTheHole() ? heap->undefined_value() : value; - } - case CONSTANT: - return result->GetConstant(); - case CALLBACKS: { - HandleScope scope(isolate); - Handle<Object> value = JSObject::GetPropertyWithCallback( - handle(result->holder(), isolate), - handle(receiver, isolate), - handle(result->GetCallbackObject(), isolate), - handle(name, isolate)); - RETURN_IF_EMPTY_HANDLE(isolate, value); - return *value; - } - case HANDLER: - return result->proxy()->GetPropertyWithHandler(receiver, name); - case INTERCEPTOR: { - HandleScope scope(isolate); - Handle<Object> value = JSObject::GetPropertyWithInterceptor( - handle(result->holder(), isolate), - handle(receiver, isolate), - handle(name, isolate), - attributes); - RETURN_IF_EMPTY_HANDLE(isolate, value); - return *value; - } - case TRANSITION: - case NONEXISTENT: - UNREACHABLE(); - break; +MaybeHandle<Object> Object::GetElementWithReceiver(Isolate* isolate, + Handle<Object> object, + Handle<Object> receiver, + uint32_t index) { + if (object->IsUndefined()) { + // TODO(verwaest): Why is this check here? + UNREACHABLE(); + return isolate->factory()->undefined_value(); } - UNREACHABLE(); - return NULL; -} - - -Handle<Object> Object::GetElementWithReceiver(Isolate* isolate, - Handle<Object> object, - Handle<Object> receiver, - uint32_t index) { - Handle<Object> holder; // Iterate up the prototype chain until an element is found or the null // prototype is encountered. - for (holder = object; - !holder->IsNull(); - holder = Handle<Object>(holder->GetPrototype(isolate), isolate)) { - if (!holder->IsJSObject()) { - Context* native_context = isolate->context()->native_context(); - if (holder->IsNumber()) { - holder = Handle<Object>( - native_context->number_function()->instance_prototype(), isolate); - } else if (holder->IsString()) { - holder = Handle<Object>( - native_context->string_function()->instance_prototype(), isolate); - } else if (holder->IsSymbol()) { - holder = Handle<Object>( - native_context->symbol_function()->instance_prototype(), isolate); - } else if (holder->IsBoolean()) { - holder = Handle<Object>( - native_context->boolean_function()->instance_prototype(), isolate); - } else if (holder->IsJSProxy()) { - CALL_HEAP_FUNCTION(isolate, - Handle<JSProxy>::cast(holder)->GetElementWithHandler( - *receiver, index), - Object); - } else { - // Undefined and null have no indexed properties. - ASSERT(holder->IsUndefined() || holder->IsNull()); - return isolate->factory()->undefined_value(); - } + for (PrototypeIterator iter(isolate, object, + object->IsJSProxy() || object->IsJSObject() + ? PrototypeIterator::START_AT_RECEIVER + : PrototypeIterator::START_AT_PROTOTYPE); + !iter.IsAtEnd(); iter.Advance()) { + if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) { + return JSProxy::GetElementWithHandler( + Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), receiver, + index); } // Inline the case for JSObjects. Doing so significantly improves the // performance of fetching elements where checking the prototype chain is // necessary. - Handle<JSObject> js_object = Handle<JSObject>::cast(holder); + Handle<JSObject> js_object = + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); // Check access rights if needed. if (js_object->IsAccessCheckNeeded()) { - if (!isolate->MayIndexedAccessWrapper(js_object, index, v8::ACCESS_GET)) { - isolate->ReportFailedAccessCheckWrapper(js_object, v8::ACCESS_GET); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) { + isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_GET); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); return isolate->factory()->undefined_value(); } } @@ -1018,9 +838,11 @@ } if (js_object->elements() != isolate->heap()->empty_fixed_array()) { - Handle<Object> result = js_object->GetElementsAccessor()->Get( - receiver, js_object, index); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>()); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + js_object->GetElementsAccessor()->Get(receiver, js_object, index), + Object); if (!result->IsTheHole()) return result; } } @@ -1029,10 +851,11 @@ } -Object* Object::GetPrototype(Isolate* isolate) { +Map* Object::GetRootMap(Isolate* isolate) { + DisallowHeapAllocation no_alloc; if (IsSmi()) { Context* context = isolate->context()->native_context(); - return context->number_function()->instance_prototype(); + return context->number_function()->initial_map(); } HeapObject* heap_object = HeapObject::cast(this); @@ -1040,30 +863,23 @@ // The object is either a number, a string, a boolean, // a real JS object, or a Harmony proxy. if (heap_object->IsJSReceiver()) { - return heap_object->map()->prototype(); + return heap_object->map(); } Context* context = isolate->context()->native_context(); if (heap_object->IsHeapNumber()) { - return context->number_function()->instance_prototype(); + return context->number_function()->initial_map(); } if (heap_object->IsString()) { - return context->string_function()->instance_prototype(); + return context->string_function()->initial_map(); } if (heap_object->IsSymbol()) { - return context->symbol_function()->instance_prototype(); + return context->symbol_function()->initial_map(); } if (heap_object->IsBoolean()) { - return context->boolean_function()->instance_prototype(); - } else { - return isolate->heap()->null_value(); + return context->boolean_function()->initial_map(); } -} - - -Map* Object::GetMarkerMap(Isolate* isolate) { - if (IsSmi()) return isolate->heap()->heap_number_map(); - return HeapObject::cast(this)->map(); + return isolate->heap()->null_value()->map(); } @@ -1083,18 +899,16 @@ return Smi::FromInt(hash); } - ASSERT(IsJSReceiver()); + DCHECK(IsJSReceiver()); return JSReceiver::cast(this)->GetIdentityHash(); } -Handle<Object> Object::GetOrCreateHash(Handle<Object> object, - Isolate* isolate) { +Handle<Smi> Object::GetOrCreateHash(Isolate* isolate, Handle<Object> object) { Handle<Object> hash(object->GetHash(), isolate); - if (hash->IsSmi()) - return hash; + if (hash->IsSmi()) return Handle<Smi>::cast(hash); - ASSERT(object->IsJSReceiver()); + DCHECK(object->IsJSReceiver()); return JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver>::cast(object)); } @@ -1120,42 +934,52 @@ } -void Object::ShortPrint(FILE* out) { - HeapStringAllocator allocator; - StringStream accumulator(&allocator); - ShortPrint(&accumulator); - accumulator.OutputToFile(out); -} +bool Object::SameValueZero(Object* other) { + if (other == this) return true; - -void Object::ShortPrint(StringStream* accumulator) { - if (IsSmi()) { - Smi::cast(this)->SmiPrint(accumulator); - } else if (IsFailure()) { - Failure::cast(this)->FailurePrint(accumulator); - } else { - HeapObject::cast(this)->HeapObjectShortPrint(accumulator); + // The object is either a number, a name, an odd-ball, + // a real JS object, or a Harmony proxy. + if (IsNumber() && other->IsNumber()) { + double this_value = Number(); + double other_value = other->Number(); + // +0 == -0 is true + return this_value == other_value + || (std::isnan(this_value) && std::isnan(other_value)); + } + if (IsString() && other->IsString()) { + return String::cast(this)->Equals(String::cast(other)); } + return false; } -void Smi::SmiPrint(FILE* out) { - PrintF(out, "%d", value()); +void Object::ShortPrint(FILE* out) { + OFStream os(out); + os << Brief(this); } -void Smi::SmiPrint(StringStream* accumulator) { - accumulator->Add("%d", value()); +void Object::ShortPrint(StringStream* accumulator) { + OStringStream os; + os << Brief(this); + accumulator->Add(os.c_str()); } -void Failure::FailurePrint(StringStream* accumulator) { - accumulator->Add("Failure(%p)", reinterpret_cast<void*>(value())); +OStream& operator<<(OStream& os, const Brief& v) { + if (v.value->IsSmi()) { + Smi::cast(v.value)->SmiPrint(os); + } else { + // TODO(svenpanne) Const-correct HeapObjectShortPrint! + HeapObject* obj = const_cast<HeapObject*>(HeapObject::cast(v.value)); + obj->HeapObjectShortPrint(os); + } + return os; } -void Failure::FailurePrint(FILE* out) { - PrintF(out, "Failure(%p)", reinterpret_cast<void*>(value())); +void Smi::SmiPrint(OStream& os) const { // NOLINT + os << value(); } @@ -1183,90 +1007,55 @@ } -MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) { -#ifdef DEBUG - // Do not attempt to flatten in debug mode when allocation is not - // allowed. This is to avoid an assertion failure when allocating. - // Flattening strings is the only case where we always allow - // allocation because no GC is performed if the allocation fails. - if (!AllowHeapAllocation::IsAllowed()) return this; -#endif - - Heap* heap = GetHeap(); - switch (StringShape(this).representation_tag()) { - case kConsStringTag: { - ConsString* cs = ConsString::cast(this); - if (cs->second()->length() == 0) { - return cs->first(); - } - // There's little point in putting the flat string in new space if the - // cons string is in old space. It can never get GCed until there is - // an old space GC. - PretenureFlag tenure = heap->InNewSpace(this) ? pretenure : TENURED; - int len = length(); - Object* object; - String* result; - if (IsOneByteRepresentation()) { - { MaybeObject* maybe_object = - heap->AllocateRawOneByteString(len, tenure); - if (!maybe_object->ToObject(&object)) return maybe_object; - } - result = String::cast(object); - String* first = cs->first(); - int first_length = first->length(); - uint8_t* dest = SeqOneByteString::cast(result)->GetChars(); - WriteToFlat(first, dest, 0, first_length); - String* second = cs->second(); - WriteToFlat(second, - dest + first_length, - 0, - len - first_length); - } else { - { MaybeObject* maybe_object = - heap->AllocateRawTwoByteString(len, tenure); - if (!maybe_object->ToObject(&object)) return maybe_object; - } - result = String::cast(object); - uc16* dest = SeqTwoByteString::cast(result)->GetChars(); - String* first = cs->first(); - int first_length = first->length(); - WriteToFlat(first, dest, 0, first_length); - String* second = cs->second(); - WriteToFlat(second, - dest + first_length, - 0, - len - first_length); - } - cs->set_first(result); - cs->set_second(heap->empty_string(), SKIP_WRITE_BARRIER); - return result; - } - default: - return this; +Handle<String> String::SlowFlatten(Handle<ConsString> cons, + PretenureFlag pretenure) { + DCHECK(AllowHeapAllocation::IsAllowed()); + DCHECK(cons->second()->length() != 0); + Isolate* isolate = cons->GetIsolate(); + int length = cons->length(); + PretenureFlag tenure = isolate->heap()->InNewSpace(*cons) ? pretenure + : TENURED; + Handle<SeqString> result; + if (cons->IsOneByteRepresentation()) { + Handle<SeqOneByteString> flat = isolate->factory()->NewRawOneByteString( + length, tenure).ToHandleChecked(); + DisallowHeapAllocation no_gc; + WriteToFlat(*cons, flat->GetChars(), 0, length); + result = flat; + } else { + Handle<SeqTwoByteString> flat = isolate->factory()->NewRawTwoByteString( + length, tenure).ToHandleChecked(); + DisallowHeapAllocation no_gc; + WriteToFlat(*cons, flat->GetChars(), 0, length); + result = flat; } + cons->set_first(*result); + cons->set_second(isolate->heap()->empty_string()); + DCHECK(result->IsFlat()); + return result; } + bool String::MakeExternal(v8::String::ExternalStringResource* resource) { // Externalizing twice leaks the external resource, so it's // prohibited by the API. - ASSERT(!this->IsExternalString()); -#ifdef ENABLE_SLOW_ASSERTS + DCHECK(!this->IsExternalString()); +#ifdef ENABLE_SLOW_DCHECKS if (FLAG_enable_slow_asserts) { // Assert that the resource and the string are equivalent. - ASSERT(static_cast<size_t>(this->length()) == resource->length()); + DCHECK(static_cast<size_t>(this->length()) == resource->length()); ScopedVector<uc16> smart_chars(this->length()); String::WriteToFlat(this, smart_chars.start(), 0, this->length()); - ASSERT(memcmp(smart_chars.start(), + DCHECK(memcmp(smart_chars.start(), resource->data(), resource->length() * sizeof(smart_chars[0])) == 0); } #endif // DEBUG - Heap* heap = GetHeap(); int size = this->Size(); // Byte size of the original string. - if (size < ExternalString::kShortSize) { - return false; - } + // Abort if size does not allow in-place conversion. + if (size < ExternalString::kShortSize) return false; + Heap* heap = GetHeap(); bool is_ascii = this->IsOneByteRepresentation(); bool is_internalized = this->IsInternalizedString(); @@ -1280,61 +1069,68 @@ // In either case we resort to a short external string instead, omitting // the field caching the address of the backing store. When we encounter // short external strings in generated code, we need to bailout to runtime. + Map* new_map; if (size < ExternalString::kSize || heap->old_pointer_space()->Contains(this)) { - this->set_map_no_write_barrier( - is_internalized - ? (is_ascii - ? heap-> - short_external_internalized_string_with_one_byte_data_map() - : heap->short_external_internalized_string_map()) - : (is_ascii - ? heap->short_external_string_with_one_byte_data_map() - : heap->short_external_string_map())); - } else { - this->set_map_no_write_barrier( - is_internalized - ? (is_ascii - ? heap->external_internalized_string_with_one_byte_data_map() - : heap->external_internalized_string_map()) - : (is_ascii - ? heap->external_string_with_one_byte_data_map() - : heap->external_string_map())); + new_map = is_internalized + ? (is_ascii + ? heap-> + short_external_internalized_string_with_one_byte_data_map() + : heap->short_external_internalized_string_map()) + : (is_ascii + ? heap->short_external_string_with_one_byte_data_map() + : heap->short_external_string_map()); + } else { + new_map = is_internalized + ? (is_ascii + ? heap->external_internalized_string_with_one_byte_data_map() + : heap->external_internalized_string_map()) + : (is_ascii + ? heap->external_string_with_one_byte_data_map() + : heap->external_string_map()); } + + // Byte size of the external String object. + int new_size = this->SizeFromMap(new_map); + heap->CreateFillerObjectAt(this->address() + new_size, size - new_size); + + // We are storing the new map using release store after creating a filler for + // the left-over space to avoid races with the sweeper thread. + this->synchronized_set_map(new_map); + ExternalTwoByteString* self = ExternalTwoByteString::cast(this); self->set_resource(resource); if (is_internalized) self->Hash(); // Force regeneration of the hash value. - // Fill the remainder of the string with dead wood. - int new_size = this->Size(); // Byte size of the external String object. - heap->CreateFillerObjectAt(this->address() + new_size, size - new_size); heap->AdjustLiveBytes(this->address(), new_size - size, Heap::FROM_MUTATOR); return true; } bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) { -#ifdef ENABLE_SLOW_ASSERTS + // Externalizing twice leaks the external resource, so it's + // prohibited by the API. + DCHECK(!this->IsExternalString()); +#ifdef ENABLE_SLOW_DCHECKS if (FLAG_enable_slow_asserts) { // Assert that the resource and the string are equivalent. - ASSERT(static_cast<size_t>(this->length()) == resource->length()); + DCHECK(static_cast<size_t>(this->length()) == resource->length()); if (this->IsTwoByteRepresentation()) { ScopedVector<uint16_t> smart_chars(this->length()); String::WriteToFlat(this, smart_chars.start(), 0, this->length()); - ASSERT(String::IsOneByte(smart_chars.start(), this->length())); + DCHECK(String::IsOneByte(smart_chars.start(), this->length())); } ScopedVector<char> smart_chars(this->length()); String::WriteToFlat(this, smart_chars.start(), 0, this->length()); - ASSERT(memcmp(smart_chars.start(), + DCHECK(memcmp(smart_chars.start(), resource->data(), resource->length() * sizeof(smart_chars[0])) == 0); } #endif // DEBUG - Heap* heap = GetHeap(); int size = this->Size(); // Byte size of the original string. - if (size < ExternalString::kShortSize) { - return false; - } + // Abort if size does not allow in-place conversion. + if (size < ExternalString::kShortSize) return false; + Heap* heap = GetHeap(); bool is_internalized = this->IsInternalizedString(); // Morph the string to an external string by replacing the map and @@ -1347,23 +1143,30 @@ // In either case we resort to a short external string instead, omitting // the field caching the address of the backing store. When we encounter // short external strings in generated code, we need to bailout to runtime. + Map* new_map; if (size < ExternalString::kSize || heap->old_pointer_space()->Contains(this)) { - this->set_map_no_write_barrier( - is_internalized ? heap->short_external_ascii_internalized_string_map() - : heap->short_external_ascii_string_map()); - } else { - this->set_map_no_write_barrier( - is_internalized ? heap->external_ascii_internalized_string_map() - : heap->external_ascii_string_map()); + new_map = is_internalized + ? heap->short_external_ascii_internalized_string_map() + : heap->short_external_ascii_string_map(); + } else { + new_map = is_internalized + ? heap->external_ascii_internalized_string_map() + : heap->external_ascii_string_map(); } + + // Byte size of the external String object. + int new_size = this->SizeFromMap(new_map); + heap->CreateFillerObjectAt(this->address() + new_size, size - new_size); + + // We are storing the new map using release store after creating a filler for + // the left-over space to avoid races with the sweeper thread. + this->synchronized_set_map(new_map); + ExternalAsciiString* self = ExternalAsciiString::cast(this); self->set_resource(resource); if (is_internalized) self->Hash(); // Force regeneration of the hash value. - // Fill the remainder of the string with dead wood. - int new_size = this->Size(); // Byte size of the external String object. - heap->CreateFillerObjectAt(this->address() + new_size, size - new_size); heap->AdjustLiveBytes(this->address(), new_size - size, Heap::FROM_MUTATOR); return true; } @@ -1433,6 +1236,16 @@ } +void String::PrintUC16(OStream& os, int start, int end) { // NOLINT + if (end < 0) end = length(); + ConsStringIteratorOp op; + StringCharacterStream stream(this, &op, start); + for (int i = start; i < end && stream.HasMore(); i++) { + os << AsUC16(stream.GetNext()); + } +} + + void JSObject::JSObjectShortPrint(StringStream* accumulator) { switch (map()->instance_type()) { case JS_ARRAY_TYPE: { @@ -1536,11 +1349,9 @@ ElementsKind from_kind, Handle<FixedArrayBase> from_elements, ElementsKind to_kind, Handle<FixedArrayBase> to_elements) { if (from_kind != to_kind) { - PrintF(file, "elements transition ["); - PrintElementsKind(file, from_kind); - PrintF(file, " -> "); - PrintElementsKind(file, to_kind); - PrintF(file, "] in "); + OFStream os(file); + os << "elements transition [" << ElementsKindToString(from_kind) << " -> " + << ElementsKindToString(to_kind) << "] in "; JavaScriptFrame::PrintTop(object->GetIsolate(), file, false, true); PrintF(file, " for "); object->ShortPrint(file); @@ -1560,32 +1371,38 @@ int descriptors, bool constant_to_field, Representation old_representation, - Representation new_representation) { - PrintF(file, "[generalizing "); + Representation new_representation, + HeapType* old_field_type, + HeapType* new_field_type) { + OFStream os(file); + os << "[generalizing "; constructor_name()->PrintOn(file); - PrintF(file, "] "); + os << "] "; Name* name = instance_descriptors()->GetKey(modify_index); if (name->IsString()) { String::cast(name)->PrintOn(file); } else { - PrintF(file, "{symbol %p}", static_cast<void*>(name)); + os << "{symbol " << static_cast<void*>(name) << "}"; } + os << ":"; if (constant_to_field) { - PrintF(file, ":c->f"); + os << "c"; } else { - PrintF(file, ":%s->%s", - old_representation.Mnemonic(), - new_representation.Mnemonic()); - } - PrintF(file, " ("); + os << old_representation.Mnemonic() << "{"; + old_field_type->PrintTo(os, HeapType::SEMANTIC_DIM); + os << "}"; + } + os << "->" << new_representation.Mnemonic() << "{"; + new_field_type->PrintTo(os, HeapType::SEMANTIC_DIM); + os << "} ("; if (strlen(reason) > 0) { - PrintF(file, "%s", reason); + os << reason; } else { - PrintF(file, "+%i maps", descriptors - split); + os << "+" << (descriptors - split) << " maps"; } - PrintF(file, ") ["); + os << ") ["; JavaScriptFrame::PrintTop(GetIsolate(), file, false, true); - PrintF(file, "]\n"); + os << "]\n"; } @@ -1618,53 +1435,59 @@ } -void HeapObject::HeapObjectShortPrint(StringStream* accumulator) { +void HeapObject::HeapObjectShortPrint(OStream& os) { // NOLINT Heap* heap = GetHeap(); if (!heap->Contains(this)) { - accumulator->Add("!!!INVALID POINTER!!!"); + os << "!!!INVALID POINTER!!!"; return; } if (!heap->Contains(map())) { - accumulator->Add("!!!INVALID MAP!!!"); + os << "!!!INVALID MAP!!!"; return; } - accumulator->Add("%p ", this); + os << this << " "; if (IsString()) { - String::cast(this)->StringShortPrint(accumulator); + HeapStringAllocator allocator; + StringStream accumulator(&allocator); + String::cast(this)->StringShortPrint(&accumulator); + os << accumulator.ToCString().get(); return; } if (IsJSObject()) { - JSObject::cast(this)->JSObjectShortPrint(accumulator); + HeapStringAllocator allocator; + StringStream accumulator(&allocator); + JSObject::cast(this)->JSObjectShortPrint(&accumulator); + os << accumulator.ToCString().get(); return; } switch (map()->instance_type()) { case MAP_TYPE: - accumulator->Add("<Map(elements=%u)>", Map::cast(this)->elements_kind()); + os << "<Map(elements=" << Map::cast(this)->elements_kind() << ")>"; break; case FIXED_ARRAY_TYPE: - accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length()); + os << "<FixedArray[" << FixedArray::cast(this)->length() << "]>"; break; case FIXED_DOUBLE_ARRAY_TYPE: - accumulator->Add("<FixedDoubleArray[%u]>", - FixedDoubleArray::cast(this)->length()); + os << "<FixedDoubleArray[" << FixedDoubleArray::cast(this)->length() + << "]>"; break; case BYTE_ARRAY_TYPE: - accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length()); + os << "<ByteArray[" << ByteArray::cast(this)->length() << "]>"; break; case FREE_SPACE_TYPE: - accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size()); - break; -#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype, size) \ - case EXTERNAL_##TYPE##_ARRAY_TYPE: \ - accumulator->Add("<External" #Type "Array[%u]>", \ - External##Type##Array::cast(this)->length()); \ - break; \ - case FIXED_##TYPE##_ARRAY_TYPE: \ - accumulator->Add("<Fixed" #Type "Array[%u]>", \ - Fixed##Type##Array::cast(this)->length()); \ + os << "<FreeSpace[" << FreeSpace::cast(this)->Size() << "]>"; break; +#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype, size) \ + case EXTERNAL_##TYPE##_ARRAY_TYPE: \ + os << "<External" #Type "Array[" \ + << External##Type##Array::cast(this)->length() << "]>"; \ + break; \ + case FIXED_##TYPE##_ARRAY_TYPE: \ + os << "<Fixed" #Type "Array[" << Fixed##Type##Array::cast(this)->length() \ + << "]>"; \ + break; TYPED_ARRAYS(TYPED_ARRAY_SHORT_PRINT) #undef TYPED_ARRAY_SHORT_PRINT @@ -1674,75 +1497,94 @@ SmartArrayPointer<char> debug_name = shared->DebugName()->ToCString(); if (debug_name[0] != 0) { - accumulator->Add("<SharedFunctionInfo %s>", debug_name.get()); + os << "<SharedFunctionInfo " << debug_name.get() << ">"; } else { - accumulator->Add("<SharedFunctionInfo>"); + os << "<SharedFunctionInfo>"; } break; } case JS_MESSAGE_OBJECT_TYPE: - accumulator->Add("<JSMessageObject>"); + os << "<JSMessageObject>"; break; #define MAKE_STRUCT_CASE(NAME, Name, name) \ case NAME##_TYPE: \ - accumulator->Put('<'); \ - accumulator->Add(#Name); \ - accumulator->Put('>'); \ + os << "<" #Name ">"; \ break; STRUCT_LIST(MAKE_STRUCT_CASE) #undef MAKE_STRUCT_CASE - case CODE_TYPE: - accumulator->Add("<Code>"); + case CODE_TYPE: { + Code* code = Code::cast(this); + os << "<Code: " << Code::Kind2String(code->kind()) << ">"; break; + } case ODDBALL_TYPE: { - if (IsUndefined()) - accumulator->Add("<undefined>"); - else if (IsTheHole()) - accumulator->Add("<the hole>"); - else if (IsNull()) - accumulator->Add("<null>"); - else if (IsTrue()) - accumulator->Add("<true>"); - else if (IsFalse()) - accumulator->Add("<false>"); - else - accumulator->Add("<Odd Oddball>"); + if (IsUndefined()) { + os << "<undefined>"; + } else if (IsTheHole()) { + os << "<the hole>"; + } else if (IsNull()) { + os << "<null>"; + } else if (IsTrue()) { + os << "<true>"; + } else if (IsFalse()) { + os << "<false>"; + } else { + os << "<Odd Oddball>"; + } break; } case SYMBOL_TYPE: { Symbol* symbol = Symbol::cast(this); - accumulator->Add("<Symbol: %d", symbol->Hash()); + os << "<Symbol: " << symbol->Hash(); if (!symbol->name()->IsUndefined()) { - accumulator->Add(" "); - String::cast(symbol->name())->StringShortPrint(accumulator); + os << " "; + HeapStringAllocator allocator; + StringStream accumulator(&allocator); + String::cast(symbol->name())->StringShortPrint(&accumulator); + os << accumulator.ToCString().get(); } - accumulator->Add(">"); + os << ">"; break; } - case HEAP_NUMBER_TYPE: - accumulator->Add("<Number: "); - HeapNumber::cast(this)->HeapNumberPrint(accumulator); - accumulator->Put('>'); + case HEAP_NUMBER_TYPE: { + os << "<Number: "; + HeapNumber::cast(this)->HeapNumberPrint(os); + os << ">"; + break; + } + case MUTABLE_HEAP_NUMBER_TYPE: { + os << "<MutableNumber: "; + HeapNumber::cast(this)->HeapNumberPrint(os); + os << '>'; break; + } case JS_PROXY_TYPE: - accumulator->Add("<JSProxy>"); + os << "<JSProxy>"; break; case JS_FUNCTION_PROXY_TYPE: - accumulator->Add("<JSFunctionProxy>"); + os << "<JSFunctionProxy>"; break; case FOREIGN_TYPE: - accumulator->Add("<Foreign>"); + os << "<Foreign>"; break; - case CELL_TYPE: - accumulator->Add("Cell for "); - Cell::cast(this)->value()->ShortPrint(accumulator); - break; - case PROPERTY_CELL_TYPE: - accumulator->Add("PropertyCell for "); - PropertyCell::cast(this)->value()->ShortPrint(accumulator); + case CELL_TYPE: { + os << "Cell for "; + HeapStringAllocator allocator; + StringStream accumulator(&allocator); + Cell::cast(this)->value()->ShortPrint(&accumulator); + os << accumulator.ToCString().get(); + break; + } + case PROPERTY_CELL_TYPE: { + os << "PropertyCell for "; + HeapStringAllocator allocator; + StringStream accumulator(&allocator); + PropertyCell::cast(this)->value()->ShortPrint(&accumulator); + os << accumulator.ToCString().get(); break; + } default: - accumulator->Add("<Other heap object (%d)>", map()->instance_type()); + os << "<Other heap object (" << map()->instance_type() << ")>"; break; } } @@ -1805,6 +1647,8 @@ case JS_DATA_VIEW_TYPE: case JS_SET_TYPE: case JS_MAP_TYPE: + case JS_SET_ITERATOR_TYPE: + case JS_MAP_ITERATOR_TYPE: case JS_WEAK_MAP_TYPE: case JS_WEAK_SET_TYPE: case JS_REGEXP_TYPE: @@ -1847,6 +1691,7 @@ break; case HEAP_NUMBER_TYPE: + case MUTABLE_HEAP_NUMBER_TYPE: case FILLER_TYPE: case BYTE_ARRAY_TYPE: case FREE_SPACE_TYPE: @@ -1883,45 +1728,17 @@ bool HeapNumber::HeapNumberBooleanValue() { - // NaN, +0, and -0 should return the false object -#if __BYTE_ORDER == __LITTLE_ENDIAN - union IeeeDoubleLittleEndianArchType u; -#elif __BYTE_ORDER == __BIG_ENDIAN - union IeeeDoubleBigEndianArchType u; -#endif - u.d = value(); - if (u.bits.exp == 2047) { - // Detect NaN for IEEE double precision floating point. - if ((u.bits.man_low | u.bits.man_high) != 0) return false; - } - if (u.bits.exp == 0) { - // Detect +0, and -0 for IEEE double precision floating point. - if ((u.bits.man_low | u.bits.man_high) == 0) return false; - } - return true; -} - - -void HeapNumber::HeapNumberPrint(FILE* out) { - PrintF(out, "%.16g", Number()); + return DoubleToBoolean(value()); } -void HeapNumber::HeapNumberPrint(StringStream* accumulator) { - // The Windows version of vsnprintf can allocate when printing a %g string - // into a buffer that may not be big enough. We don't want random memory - // allocation when producing post-crash stack traces, so we print into a - // buffer that is plenty big enough for any floating point number, then - // print that using vsnprintf (which may truncate but never allocate if - // there is no more space in the buffer). - EmbeddedVector<char, 100> buffer; - OS::SNPrintF(buffer, "%.16g", Number()); - accumulator->Add("%s", buffer.start()); +void HeapNumber::HeapNumberPrint(OStream& os) { // NOLINT + os << value(); } String* JSReceiver::class_name() { - if (IsJSFunction() && IsJSFunctionProxy()) { + if (IsJSFunction() || IsJSFunctionProxy()) { return GetHeap()->function_class_string(); } if (map()->constructor()->IsJSFunction()) { @@ -1954,28 +1771,34 @@ } -// TODO(mstarzinger): Temporary wrapper until handlified. -static Handle<Object> NewStorageFor(Isolate* isolate, - Handle<Object> object, - Representation representation) { - Heap* heap = isolate->heap(); - CALL_HEAP_FUNCTION(isolate, - object->AllocateNewStorageFor(heap, representation), - Object); -} +MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, + Handle<Name> name, + Handle<HeapType> type, + PropertyAttributes attributes, + Representation representation, + TransitionFlag flag) { + DCHECK(DescriptorArray::kNotFound == + map->instance_descriptors()->Search( + *name, map->NumberOfOwnDescriptors())); + + // Ensure the descriptor array does not get too big. + if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) { + return MaybeHandle<Map>(); + } + Isolate* isolate = map->GetIsolate(); -static MaybeObject* CopyAddFieldDescriptor(Map* map, - Name* name, - int index, - PropertyAttributes attributes, - Representation representation, - TransitionFlag flag) { - Map* new_map; - FieldDescriptor new_field_desc(name, index, attributes, representation); - MaybeObject* maybe_map = map->CopyAddDescriptor(&new_field_desc, flag); - if (!maybe_map->To(&new_map)) return maybe_map; - int unused_property_fields = map->unused_property_fields() - 1; + // Compute the new index for new field. + int index = map->NextFreePropertyIndex(); + + if (map->instance_type() == JS_CONTEXT_EXTENSION_OBJECT_TYPE) { + representation = Representation::Tagged(); + type = HeapType::Any(isolate); + } + + FieldDescriptor new_field_desc(name, index, type, attributes, representation); + Handle<Map> new_map = Map::CopyAddDescriptor(map, &new_field_desc, flag); + int unused_property_fields = new_map->unused_property_fields() - 1; if (unused_property_fields < 0) { unused_property_fields += JSObject::kFieldsAdded; } @@ -1984,16 +1807,19 @@ } -static Handle<Map> CopyAddFieldDescriptor(Handle<Map> map, - Handle<Name> name, - int index, - PropertyAttributes attributes, - Representation representation, - TransitionFlag flag) { - CALL_HEAP_FUNCTION(map->GetIsolate(), - CopyAddFieldDescriptor( - *map, *name, index, attributes, representation, flag), - Map); +MaybeHandle<Map> Map::CopyWithConstant(Handle<Map> map, + Handle<Name> name, + Handle<Object> constant, + PropertyAttributes attributes, + TransitionFlag flag) { + // Ensure the descriptor array does not get too big. + if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) { + return MaybeHandle<Map>(); + } + + // Allocate new instance descriptors with (name, constant) added. + ConstantDescriptor new_constant_desc(name, constant, attributes); + return Map::CopyAddDescriptor(map, &new_constant_desc, flag); } @@ -2002,87 +1828,29 @@ Handle<Object> value, PropertyAttributes attributes, StoreFromKeyed store_mode, - ValueType value_type, TransitionFlag flag) { - ASSERT(!object->IsJSGlobalProxy()); - ASSERT(DescriptorArray::kNotFound == - object->map()->instance_descriptors()->Search( - *name, object->map()->NumberOfOwnDescriptors())); - - // Normalize the object if the name is an actual name (not the - // hidden strings) and is not a real identifier. - // Normalize the object if it will have too many fast properties. - Isolate* isolate = object->GetIsolate(); - if (!name->IsCacheable(isolate) || - object->TooManyFastProperties(store_mode)) { - NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); - AddSlowProperty(object, name, value, attributes); - return; - } + DCHECK(!object->IsJSGlobalProxy()); - // Compute the new index for new field. - int index = object->map()->NextFreePropertyIndex(); - - // Allocate new instance descriptors with (name, index) added - if (object->IsJSContextExtensionObject()) value_type = FORCE_TAGGED; - Representation representation = value->OptimalRepresentation(value_type); - Handle<Map> new_map = CopyAddFieldDescriptor( - handle(object->map()), name, index, attributes, representation, flag); - - JSObject::MigrateToMap(object, new_map); - - if (representation.IsDouble()) { - // Nothing more to be done. - if (value->IsUninitialized()) return; - HeapNumber* box = HeapNumber::cast(object->RawFastPropertyAt(index)); - box->set_value(value->Number()); - } else { - object->FastPropertyAtPut(index, *value); + MaybeHandle<Map> maybe_map; + if (value->IsJSFunction()) { + maybe_map = Map::CopyWithConstant( + handle(object->map()), name, value, attributes, flag); + } else if (!object->map()->TooManyFastProperties(store_mode)) { + Isolate* isolate = object->GetIsolate(); + Representation representation = value->OptimalRepresentation(); + maybe_map = Map::CopyWithField( + handle(object->map(), isolate), name, + value->OptimalType(isolate, representation), + attributes, representation, flag); } -} - - -static MaybeObject* CopyAddConstantDescriptor(Map* map, - Name* name, - Object* value, - PropertyAttributes attributes, - TransitionFlag flag) { - ConstantDescriptor new_constant_desc(name, value, attributes); - return map->CopyAddDescriptor(&new_constant_desc, flag); -} - - -static Handle<Map> CopyAddConstantDescriptor(Handle<Map> map, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - TransitionFlag flag) { - CALL_HEAP_FUNCTION(map->GetIsolate(), - CopyAddConstantDescriptor( - *map, *name, *value, attributes, flag), - Map); -} - - -void JSObject::AddConstantProperty(Handle<JSObject> object, - Handle<Name> name, - Handle<Object> constant, - PropertyAttributes attributes, - TransitionFlag initial_flag) { - TransitionFlag flag = - // Do not add transitions to global objects. - (object->IsGlobalObject() || - // Don't add transitions to special properties with non-trivial - // attributes. - attributes != NONE) - ? OMIT_TRANSITION - : initial_flag; - // Allocate new instance descriptors with (name, constant) added. - Handle<Map> new_map = CopyAddConstantDescriptor( - handle(object->map()), name, constant, attributes, flag); + Handle<Map> new_map; + if (!maybe_map.ToHandle(&new_map)) { + NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); + return; + } - JSObject::MigrateToMap(object, new_map); + JSObject::MigrateToNewProperty(object, new_map, value); } @@ -2090,12 +1858,12 @@ Handle<Name> name, Handle<Object> value, PropertyAttributes attributes) { - ASSERT(!object->HasFastProperties()); + DCHECK(!object->HasFastProperties()); Isolate* isolate = object->GetIsolate(); Handle<NameDictionary> dict(object->property_dictionary()); if (object->IsGlobalObject()) { // In case name is an orphaned property reuse the cell. - int entry = dict->FindEntry(*name); + int entry = dict->FindEntry(name); if (entry != NameDictionary::kNotFound) { Handle<PropertyCell> cell(PropertyCell::cast(dict->ValueAt(entry))); PropertyCell::SetValueInferType(cell, value); @@ -2104,7 +1872,7 @@ int index = dict->NextEnumerationIndex(); PropertyDetails details = PropertyDetails(attributes, NORMAL, index); dict->SetNextEnumerationIndex(index + 1); - dict->SetEntry(entry, *name, *cell, details); + dict->SetEntry(entry, name, cell, details); return; } Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(value); @@ -2112,22 +1880,17 @@ value = cell; } PropertyDetails details = PropertyDetails(attributes, NORMAL, 0); - Handle<NameDictionary> result = NameDictionaryAdd(dict, name, value, details); + Handle<NameDictionary> result = + NameDictionary::Add(dict, name, value, details); if (*dict != *result) object->set_properties(*result); } -Handle<Object> JSObject::AddProperty(Handle<JSObject> object, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - JSReceiver::StoreFromKeyed store_mode, - ExtensibilityCheck extensibility_check, - ValueType value_type, - StoreMode mode, - TransitionFlag transition_flag) { - ASSERT(!object->IsJSGlobalProxy()); +MaybeHandle<Object> JSObject::AddPropertyInternal( + Handle<JSObject> object, Handle<Name> name, Handle<Object> value, + PropertyAttributes attributes, JSReceiver::StoreFromKeyed store_mode, + ExtensibilityCheck extensibility_check, TransitionFlag transition_flag) { + DCHECK(!object->IsJSGlobalProxy()); Isolate* isolate = object->GetIsolate(); if (!name->IsUniqueName()) { @@ -2137,37 +1900,18 @@ if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK && !object->map()->is_extensible()) { - if (strict_mode == SLOPPY) { - return value; - } else { - Handle<Object> args[1] = { name }; - Handle<Object> error = isolate->factory()->NewTypeError( - "object_not_extensible", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>(); - } + Handle<Object> args[1] = {name}; + Handle<Object> error = isolate->factory()->NewTypeError( + "object_not_extensible", HandleVector(args, ARRAY_SIZE(args))); + return isolate->Throw<Object>(error); } if (object->HasFastProperties()) { - // Ensure the descriptor array does not get too big. - if (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors) { - // TODO(verwaest): Support other constants. - // if (mode == ALLOW_AS_CONSTANT && - // !value->IsTheHole() && - // !value->IsConsString()) { - if (value->IsJSFunction()) { - AddConstantProperty(object, name, value, attributes, transition_flag); - } else { - AddFastProperty(object, name, value, attributes, store_mode, - value_type, transition_flag); - } - } else { - // Normalize the object to prevent very large instance descriptors. - // This eliminates unwanted N^2 allocation and lookup behavior. - NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); - AddSlowProperty(object, name, value, attributes); - } - } else { + AddFastProperty(object, name, value, attributes, store_mode, + transition_flag); + } + + if (!object->HasFastProperties()) { AddSlowProperty(object, name, value, attributes); } @@ -2181,53 +1925,37 @@ } +Context* JSObject::GetCreationContext() { + Object* constructor = this->map()->constructor(); + JSFunction* function; + if (!constructor->IsJSFunction()) { + // Functions have null as a constructor, + // but any JSFunction knows its context immediately. + function = JSFunction::cast(this); + } else { + function = JSFunction::cast(constructor); + } + + return function->context()->native_context(); +} + + void JSObject::EnqueueChangeRecord(Handle<JSObject> object, const char* type_str, Handle<Name> name, Handle<Object> old_value) { + DCHECK(!object->IsJSGlobalProxy()); + DCHECK(!object->IsJSGlobalObject()); Isolate* isolate = object->GetIsolate(); HandleScope scope(isolate); Handle<String> type = isolate->factory()->InternalizeUtf8String(type_str); - if (object->IsJSGlobalObject()) { - object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate); - } Handle<Object> args[] = { type, object, name, old_value }; int argc = name.is_null() ? 2 : old_value->IsTheHole() ? 3 : 4; - bool threw; Execution::Call(isolate, Handle<JSFunction>(isolate->observers_notify_change()), isolate->factory()->undefined_value(), - argc, args, - &threw); - ASSERT(!threw); -} - - -Handle<Object> JSObject::SetPropertyPostInterceptor( - Handle<JSObject> object, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode) { - // Check local property, ignore interceptor. - LookupResult result(object->GetIsolate()); - object->LocalLookupRealNamedProperty(*name, &result); - if (!result.IsFound()) { - object->map()->LookupTransition(*object, *name, &result); - } - if (result.IsFound()) { - // An existing property or a map transition was found. Use set property to - // handle all these cases. - return SetPropertyForResult(object, &result, name, value, attributes, - strict_mode, MAY_BE_STORE_FROM_KEYED); - } - bool done = false; - Handle<Object> result_object = SetPropertyViaPrototypes( - object, name, value, attributes, strict_mode, &done); - if (done) return result_object; - // Add a new real property. - return AddProperty(object, name, value, attributes, strict_mode); + argc, args).Assert(); } @@ -2236,7 +1964,7 @@ Handle<Object> value, PropertyAttributes attributes) { NameDictionary* dictionary = object->property_dictionary(); - int old_index = dictionary->FindEntry(*name); + int old_index = dictionary->FindEntry(name); int new_enumeration_index = 0; // 0 means "Use the next available index." if (old_index != -1) { // All calls to ReplaceSlowProperty have had all transitions removed. @@ -2264,68 +1992,21 @@ } -static void ZapEndOfFixedArray(Address new_end, int to_trim) { - // If we are doing a big trim in old space then we zap the space. - Object** zap = reinterpret_cast<Object**>(new_end); - zap++; // Header of filler must be at least one word so skip that. - for (int i = 1; i < to_trim; i++) { - *zap++ = Smi::FromInt(0); - } -} - - -template<Heap::InvocationMode mode> -static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) { - ASSERT(elms->map() != heap->fixed_cow_array_map()); - // For now this trick is only applied to fixed arrays in new and paged space. - ASSERT(!heap->lo_space()->Contains(elms)); - - const int len = elms->length(); - - ASSERT(to_trim < len); - - Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim); - - if (mode != Heap::FROM_GC || Heap::ShouldZapGarbage()) { - ZapEndOfFixedArray(new_end, to_trim); - } - - int size_delta = to_trim * kPointerSize; - - // Technically in new space this write might be omitted (except for - // debug mode which iterates through the heap), but to play safer - // we still do it. - heap->CreateFillerObjectAt(new_end, size_delta); - - elms->set_length(len - to_trim); - - heap->AdjustLiveBytes(elms->address(), -size_delta, mode); - - // The array may not be moved during GC, - // and size has to be adjusted nevertheless. - HeapProfiler* profiler = heap->isolate()->heap_profiler(); - if (profiler->is_tracking_allocations()) { - profiler->UpdateObjectSizeEvent(elms->address(), elms->Size()); - } -} - - -bool Map::InstancesNeedRewriting(Map* target, - int target_number_of_fields, - int target_inobject, - int target_unused) { +bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields, + int target_inobject, int target_unused, + int* old_number_of_fields) { // If fields were added (or removed), rewrite the instance. - int number_of_fields = NumberOfFields(); - ASSERT(target_number_of_fields >= number_of_fields); - if (target_number_of_fields != number_of_fields) return true; + *old_number_of_fields = NumberOfFields(); + DCHECK(target_number_of_fields >= *old_number_of_fields); + if (target_number_of_fields != *old_number_of_fields) return true; // If smi descriptors were replaced by double descriptors, rewrite. DescriptorArray* old_desc = instance_descriptors(); DescriptorArray* new_desc = target->instance_descriptors(); int limit = NumberOfOwnDescriptors(); for (int i = 0; i < limit; i++) { - if (new_desc->GetDetails(i).representation().IsDouble() && - !old_desc->GetDetails(i).representation().IsDouble()) { + if (new_desc->GetDetails(i).representation().IsDouble() != + old_desc->GetDetails(i).representation().IsDouble()) { return true; } } @@ -2336,9 +2017,9 @@ // In-object slack tracking may have reduced the object size of the new map. // In that case, succeed if all existing fields were inobject, and they still // fit within the new inobject size. - ASSERT(target_inobject < inobject_properties()); + DCHECK(target_inobject < inobject_properties()); if (target_number_of_fields <= target_inobject) { - ASSERT(target_number_of_fields + target_unused == target_inobject); + DCHECK(target_number_of_fields + target_unused == target_inobject); return false; } // Otherwise, properties will need to be moved to the backing store. @@ -2346,7 +2027,43 @@ } -// To migrate an instance to a map: +void Map::ConnectElementsTransition(Handle<Map> parent, Handle<Map> child) { + Isolate* isolate = parent->GetIsolate(); + Handle<Name> name = isolate->factory()->elements_transition_symbol(); + ConnectTransition(parent, child, name, FULL_TRANSITION); +} + + +void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) { + if (object->map() == *new_map) return; + if (object->HasFastProperties()) { + if (!new_map->is_dictionary_map()) { + Handle<Map> old_map(object->map()); + MigrateFastToFast(object, new_map); + if (old_map->is_prototype_map()) { + // Clear out the old descriptor array to avoid problems to sharing + // the descriptor array without using an explicit. + old_map->InitializeDescriptors( + old_map->GetHeap()->empty_descriptor_array()); + // Ensure that no transition was inserted for prototype migrations. + DCHECK(!old_map->HasTransitionArray()); + DCHECK(new_map->GetBackPointer()->IsUndefined()); + } + } else { + MigrateFastToSlow(object, new_map, 0); + } + } else { + // For slow-to-fast migrations JSObject::TransformToFastProperties() + // must be used instead. + CHECK(new_map->is_dictionary_map()); + + // Slow-to-slow migration is trivial. + object->set_map(*new_map); + } +} + + +// To migrate a fast instance to a fast map: // - First check whether the instance needs to be rewritten. If not, simply // change the map. // - Otherwise, allocate a fixed array large enough to hold all fields, in @@ -2361,23 +2078,56 @@ // to temporarily store the inobject properties. // * If there are properties left in the backing store, install the backing // store. -void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) { +void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) { Isolate* isolate = object->GetIsolate(); Handle<Map> old_map(object->map()); + int old_number_of_fields; int number_of_fields = new_map->NumberOfFields(); int inobject = new_map->inobject_properties(); int unused = new_map->unused_property_fields(); // Nothing to do if no functions were converted to fields and no smis were // converted to doubles. - if (!old_map->InstancesNeedRewriting( - *new_map, number_of_fields, inobject, unused)) { - object->set_map(*new_map); + if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject, + unused, &old_number_of_fields)) { + object->synchronized_set_map(*new_map); return; } int total_size = number_of_fields + unused; int external = total_size - inobject; + + if ((old_map->unused_property_fields() == 0) && + (number_of_fields != old_number_of_fields) && + (new_map->GetBackPointer() == *old_map)) { + DCHECK(number_of_fields == old_number_of_fields + 1); + // This migration is a transition from a map that has run out out property + // space. Therefore it could be done by extending the backing store. + Handle<FixedArray> old_storage = handle(object->properties(), isolate); + Handle<FixedArray> new_storage = + FixedArray::CopySize(old_storage, external); + + // Properly initialize newly added property. + PropertyDetails details = new_map->GetLastDescriptorDetails(); + Handle<Object> value; + if (details.representation().IsDouble()) { + value = isolate->factory()->NewHeapNumber(0, MUTABLE); + } else { + value = isolate->factory()->uninitialized_value(); + } + DCHECK(details.type() == FIELD); + int target_index = details.field_index() - inobject; + DCHECK(target_index >= 0); // Must be a backing store index. + new_storage->set(target_index, *value); + + // From here on we cannot fail and we shouldn't GC anymore. + DisallowHeapAllocation no_allocation; + + // Set the new property value and do the map transition. + object->set_properties(*new_storage); + object->synchronized_set_map(*new_map); + return; + } Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size); Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors()); @@ -2387,30 +2137,33 @@ // This method only supports generalizing instances to at least the same // number of properties. - ASSERT(old_nof <= new_nof); + DCHECK(old_nof <= new_nof); for (int i = 0; i < old_nof; i++) { PropertyDetails details = new_descriptors->GetDetails(i); if (details.type() != FIELD) continue; PropertyDetails old_details = old_descriptors->GetDetails(i); if (old_details.type() == CALLBACKS) { - ASSERT(details.representation().IsTagged()); + DCHECK(details.representation().IsTagged()); continue; } - ASSERT(old_details.type() == CONSTANT || + DCHECK(old_details.type() == CONSTANT || old_details.type() == FIELD); Object* raw_value = old_details.type() == CONSTANT ? old_descriptors->GetValue(i) - : object->RawFastPropertyAt(old_descriptors->GetFieldIndex(i)); + : object->RawFastPropertyAt(FieldIndex::ForDescriptor(*old_map, i)); Handle<Object> value(raw_value, isolate); if (!old_details.representation().IsDouble() && details.representation().IsDouble()) { if (old_details.representation().IsNone()) { value = handle(Smi::FromInt(0), isolate); } - value = NewStorageFor(isolate, value, details.representation()); + value = Object::NewStorageFor(isolate, value, details.representation()); + } else if (old_details.representation().IsDouble() && + !details.representation().IsDouble()) { + value = Object::WrapForRead(isolate, value, old_details.representation()); } - ASSERT(!(details.representation().IsDouble() && value->IsSmi())); + DCHECK(!(details.representation().IsDouble() && value->IsSmi())); int target_index = new_descriptors->GetFieldIndex(i) - inobject; if (target_index < 0) target_index += total_size; array->set(target_index, *value); @@ -2419,12 +2172,15 @@ for (int i = old_nof; i < new_nof; i++) { PropertyDetails details = new_descriptors->GetDetails(i); if (details.type() != FIELD) continue; + Handle<Object> value; if (details.representation().IsDouble()) { - int target_index = new_descriptors->GetFieldIndex(i) - inobject; - if (target_index < 0) target_index += total_size; - Handle<Object> box = isolate->factory()->NewHeapNumber(0); - array->set(target_index, *box); + value = isolate->factory()->NewHeapNumber(0, MUTABLE); + } else { + value = isolate->factory()->uninitialized_value(); } + int target_index = new_descriptors->GetFieldIndex(i) - inobject; + if (target_index < 0) target_index += total_size; + array->set(target_index, *value); } // From here on we cannot fail and we shouldn't GC anymore. @@ -2434,45 +2190,45 @@ // avoid overwriting |one_pointer_filler_map|. int limit = Min(inobject, number_of_fields); for (int i = 0; i < limit; i++) { - object->FastPropertyAtPut(i, array->get(external + i)); + FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i); + object->FastPropertyAtPut(index, array->get(external + i)); } - // Create filler object past the new instance size. - int new_instance_size = new_map->instance_size(); - int instance_size_delta = old_map->instance_size() - new_instance_size; - ASSERT(instance_size_delta >= 0); - Address address = object->address() + new_instance_size; - isolate->heap()->CreateFillerObjectAt(address, instance_size_delta); + Heap* heap = isolate->heap(); // If there are properties in the new backing store, trim it to the correct // size and install the backing store into the object. if (external > 0) { - RightTrimFixedArray<Heap::FROM_MUTATOR>(isolate->heap(), *array, inobject); + heap->RightTrimFixedArray<Heap::FROM_MUTATOR>(*array, inobject); object->set_properties(*array); } - object->set_map(*new_map); -} + // Create filler object past the new instance size. + int new_instance_size = new_map->instance_size(); + int instance_size_delta = old_map->instance_size() - new_instance_size; + DCHECK(instance_size_delta >= 0); + if (instance_size_delta > 0) { + Address address = object->address(); + heap->CreateFillerObjectAt( + address + new_instance_size, instance_size_delta); + heap->AdjustLiveBytes(address, -instance_size_delta, Heap::FROM_MUTATOR); + } -Handle<TransitionArray> Map::AddTransition(Handle<Map> map, - Handle<Name> key, - Handle<Map> target, - SimpleTransitionFlag flag) { - CALL_HEAP_FUNCTION(map->GetIsolate(), - map->AddTransition(*key, *target, flag), - TransitionArray); + // We are storing the new map using release store after creating a filler for + // the left-over space to avoid races with the sweeper thread. + object->synchronized_set_map(*new_map); } void JSObject::GeneralizeFieldRepresentation(Handle<JSObject> object, int modify_index, Representation new_representation, - StoreMode store_mode) { + Handle<HeapType> new_field_type) { Handle<Map> new_map = Map::GeneralizeRepresentation( - handle(object->map()), modify_index, new_representation, store_mode); - if (object->map() == *new_map) return; - return MigrateToMap(object, new_map); + handle(object->map()), modify_index, new_representation, new_field_type, + FORCE_FIELD); + MigrateToMap(object, new_map); } @@ -2491,38 +2247,65 @@ StoreMode store_mode, PropertyAttributes attributes, const char* reason) { + Isolate* isolate = map->GetIsolate(); Handle<Map> new_map = Copy(map); DescriptorArray* descriptors = new_map->instance_descriptors(); - descriptors->InitializeRepresentations(Representation::Tagged()); + int length = descriptors->number_of_descriptors(); + for (int i = 0; i < length; i++) { + descriptors->SetRepresentation(i, Representation::Tagged()); + if (descriptors->GetDetails(i).type() == FIELD) { + descriptors->SetValue(i, HeapType::Any()); + } + } // Unless the instance is being migrated, ensure that modify_index is a field. PropertyDetails details = descriptors->GetDetails(modify_index); - if (store_mode == FORCE_FIELD && details.type() != FIELD) { - FieldDescriptor d(descriptors->GetKey(modify_index), - new_map->NumberOfFields(), - attributes, - Representation::Tagged()); - d.SetSortedKeyIndex(details.pointer()); - descriptors->Set(modify_index, &d); - int unused_property_fields = new_map->unused_property_fields() - 1; - if (unused_property_fields < 0) { - unused_property_fields += JSObject::kFieldsAdded; + if (store_mode == FORCE_FIELD && + (details.type() != FIELD || details.attributes() != attributes)) { + int field_index = details.type() == FIELD ? details.field_index() + : new_map->NumberOfFields(); + FieldDescriptor d(handle(descriptors->GetKey(modify_index), isolate), + field_index, attributes, Representation::Tagged()); + descriptors->Replace(modify_index, &d); + if (details.type() != FIELD) { + int unused_property_fields = new_map->unused_property_fields() - 1; + if (unused_property_fields < 0) { + unused_property_fields += JSObject::kFieldsAdded; + } + new_map->set_unused_property_fields(unused_property_fields); } - new_map->set_unused_property_fields(unused_property_fields); + } else { + DCHECK(details.attributes() == attributes); } if (FLAG_trace_generalization) { + HeapType* field_type = (details.type() == FIELD) + ? map->instance_descriptors()->GetFieldType(modify_index) + : NULL; map->PrintGeneralization(stdout, reason, modify_index, new_map->NumberOfOwnDescriptors(), new_map->NumberOfOwnDescriptors(), details.type() == CONSTANT && store_mode == FORCE_FIELD, - Representation::Tagged(), Representation::Tagged()); + details.representation(), Representation::Tagged(), + field_type, HeapType::Any()); } return new_map; } +// static +Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map, + int modify_index, + StoreMode store_mode, + const char* reason) { + PropertyDetails details = + map->instance_descriptors()->GetDetails(modify_index); + return CopyGeneralizeAllRepresentations(map, modify_index, store_mode, + details.attributes(), reason); +} + + void Map::DeprecateTransitionTree() { if (is_deprecated()) return; if (HasTransitionArray()) { @@ -2578,46 +2361,13 @@ } -// Returns NULL if the updated map is incompatible. -Map* Map::FindUpdatedMap(int verbatim, - int length, - DescriptorArray* descriptors) { - // This can only be called on roots of transition trees. - ASSERT(GetBackPointer()->IsUndefined()); - - Map* current = this; - - for (int i = verbatim; i < length; i++) { - if (!current->HasTransitionArray()) break; - Name* name = descriptors->GetKey(i); - TransitionArray* transitions = current->transitions(); - int transition = transitions->Search(name); - if (transition == TransitionArray::kNotFound) break; - current = transitions->GetTarget(transition); - PropertyDetails details = descriptors->GetDetails(i); - PropertyDetails target_details = - current->instance_descriptors()->GetDetails(i); - if (details.attributes() != target_details.attributes()) return NULL; - if (details.type() == CALLBACKS) { - if (target_details.type() != CALLBACKS) return NULL; - if (descriptors->GetValue(i) != - current->instance_descriptors()->GetValue(i)) { - return NULL; - } - } else if (target_details.type() == CALLBACKS) { - return NULL; - } - } - - return current; -} - - Map* Map::FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors) { + DisallowHeapAllocation no_allocation; + // This can only be called on roots of transition trees. - ASSERT(GetBackPointer()->IsUndefined()); + DCHECK(GetBackPointer()->IsUndefined()); Map* current = this; @@ -2631,13 +2381,17 @@ Map* next = transitions->GetTarget(transition); DescriptorArray* next_descriptors = next->instance_descriptors(); - if (next_descriptors->GetValue(i) != descriptors->GetValue(i)) break; - PropertyDetails details = descriptors->GetDetails(i); PropertyDetails next_details = next_descriptors->GetDetails(i); if (details.type() != next_details.type()) break; if (details.attributes() != next_details.attributes()) break; if (!details.representation().Equals(next_details.representation())) break; + if (next_details.type() == FIELD) { + if (!descriptors->GetFieldType(i)->NowIs( + next_descriptors->GetFieldType(i))) break; + } else { + if (descriptors->GetValue(i) != next_descriptors->GetValue(i)) break; + } current = next; } @@ -2645,6 +2399,103 @@ } +Map* Map::FindFieldOwner(int descriptor) { + DisallowHeapAllocation no_allocation; + DCHECK_EQ(FIELD, instance_descriptors()->GetDetails(descriptor).type()); + Map* result = this; + while (true) { + Object* back = result->GetBackPointer(); + if (back->IsUndefined()) break; + Map* parent = Map::cast(back); + if (parent->NumberOfOwnDescriptors() <= descriptor) break; + result = parent; + } + return result; +} + + +void Map::UpdateFieldType(int descriptor, Handle<Name> name, + Handle<HeapType> new_type) { + DisallowHeapAllocation no_allocation; + PropertyDetails details = instance_descriptors()->GetDetails(descriptor); + if (details.type() != FIELD) return; + if (HasTransitionArray()) { + TransitionArray* transitions = this->transitions(); + for (int i = 0; i < transitions->number_of_transitions(); ++i) { + transitions->GetTarget(i)->UpdateFieldType(descriptor, name, new_type); + } + } + // Skip if already updated the shared descriptor. + if (instance_descriptors()->GetFieldType(descriptor) == *new_type) return; + FieldDescriptor d(name, instance_descriptors()->GetFieldIndex(descriptor), + new_type, details.attributes(), details.representation()); + instance_descriptors()->Replace(descriptor, &d); +} + + +// static +Handle<HeapType> Map::GeneralizeFieldType(Handle<HeapType> type1, + Handle<HeapType> type2, + Isolate* isolate) { + static const int kMaxClassesPerFieldType = 5; + if (type1->NowIs(type2)) return type2; + if (type2->NowIs(type1)) return type1; + if (type1->NowStable() && type2->NowStable()) { + Handle<HeapType> type = HeapType::Union(type1, type2, isolate); + if (type->NumClasses() <= kMaxClassesPerFieldType) { + DCHECK(type->NowStable()); + DCHECK(type1->NowIs(type)); + DCHECK(type2->NowIs(type)); + return type; + } + } + return HeapType::Any(isolate); +} + + +// static +void Map::GeneralizeFieldType(Handle<Map> map, + int modify_index, + Handle<HeapType> new_field_type) { + Isolate* isolate = map->GetIsolate(); + + // Check if we actually need to generalize the field type at all. + Handle<HeapType> old_field_type( + map->instance_descriptors()->GetFieldType(modify_index), isolate); + if (new_field_type->NowIs(old_field_type)) { + DCHECK(Map::GeneralizeFieldType(old_field_type, + new_field_type, + isolate)->NowIs(old_field_type)); + return; + } + + // Determine the field owner. + Handle<Map> field_owner(map->FindFieldOwner(modify_index), isolate); + Handle<DescriptorArray> descriptors( + field_owner->instance_descriptors(), isolate); + DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index)); + + // Determine the generalized new field type. + new_field_type = Map::GeneralizeFieldType( + old_field_type, new_field_type, isolate); + + PropertyDetails details = descriptors->GetDetails(modify_index); + Handle<Name> name(descriptors->GetKey(modify_index)); + field_owner->UpdateFieldType(modify_index, name, new_field_type); + field_owner->dependent_code()->DeoptimizeDependentCodeGroup( + isolate, DependentCode::kFieldTypeGroup); + + if (FLAG_trace_generalization) { + map->PrintGeneralization( + stdout, "field type generalization", + modify_index, map->NumberOfOwnDescriptors(), + map->NumberOfOwnDescriptors(), false, + details.representation(), details.representation(), + *old_field_type, *new_field_type); + } +} + + // Generalize the representation of the descriptor at |modify_index|. // This method rewrites the transition tree to reflect the new change. To avoid // high degrees over polymorphism, and to stabilize quickly, on every rewrite @@ -2652,22 +2503,28 @@ // (partial) version of the type in the transition tree. // To do this, on each rewrite: // - Search the root of the transition tree using FindRootMap. -// - Find |updated|, the newest matching version of this map using -// FindUpdatedMap. This uses the keys in the own map's descriptor array to -// walk the transition tree. -// - Merge/generalize the descriptor array of the current map and |updated|. -// - Generalize the |modify_index| descriptor using |new_representation|. -// - Walk the tree again starting from the root towards |updated|. Stop at +// - Find |target_map|, the newest matching version of this map using the keys +// in the |old_map|'s descriptor array to walk the transition tree. +// - Merge/generalize the descriptor array of the |old_map| and |target_map|. +// - Generalize the |modify_index| descriptor using |new_representation| and +// |new_field_type|. +// - Walk the tree again starting from the root towards |target_map|. Stop at // |split_map|, the first map who's descriptor array does not match the merged // descriptor array. -// - If |updated| == |split_map|, |updated| is in the expected state. Return it. -// - Otherwise, invalidate the outdated transition target from |updated|, and +// - If |target_map| == |split_map|, |target_map| is in the expected state. +// Return it. +// - Otherwise, invalidate the outdated transition target from |target_map|, and // replace its transition tree with a new branch for the updated descriptors. Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map, int modify_index, Representation new_representation, + Handle<HeapType> new_field_type, StoreMode store_mode) { - Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors()); + Isolate* isolate = old_map->GetIsolate(); + + Handle<DescriptorArray> old_descriptors( + old_map->instance_descriptors(), isolate); + int old_nof = old_map->NumberOfOwnDescriptors(); PropertyDetails old_details = old_descriptors->GetDetails(modify_index); Representation old_representation = old_details.representation(); @@ -2678,89 +2535,278 @@ if (old_representation.IsNone() && !new_representation.IsNone() && !new_representation.IsDouble()) { + DCHECK(old_details.type() == FIELD); + DCHECK(old_descriptors->GetFieldType(modify_index)->NowIs( + HeapType::None())); + if (FLAG_trace_generalization) { + old_map->PrintGeneralization( + stdout, "uninitialized field", + modify_index, old_map->NumberOfOwnDescriptors(), + old_map->NumberOfOwnDescriptors(), false, + old_representation, new_representation, + old_descriptors->GetFieldType(modify_index), *new_field_type); + } old_descriptors->SetRepresentation(modify_index, new_representation); + old_descriptors->SetValue(modify_index, *new_field_type); return old_map; } - int descriptors = old_map->NumberOfOwnDescriptors(); - Handle<Map> root_map(old_map->FindRootMap()); - // Check the state of the root map. + Handle<Map> root_map(old_map->FindRootMap(), isolate); if (!old_map->EquivalentToForTransition(*root_map)) { - return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode, - old_details.attributes(), "not equivalent"); + return CopyGeneralizeAllRepresentations( + old_map, modify_index, store_mode, "not equivalent"); + } + int root_nof = root_map->NumberOfOwnDescriptors(); + if (modify_index < root_nof) { + PropertyDetails old_details = old_descriptors->GetDetails(modify_index); + if ((old_details.type() != FIELD && store_mode == FORCE_FIELD) || + (old_details.type() == FIELD && + (!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) || + !new_representation.fits_into(old_details.representation())))) { + return CopyGeneralizeAllRepresentations( + old_map, modify_index, store_mode, "root modification"); + } } - int verbatim = root_map->NumberOfOwnDescriptors(); + Handle<Map> target_map = root_map; + for (int i = root_nof; i < old_nof; ++i) { + int j = target_map->SearchTransition(old_descriptors->GetKey(i)); + if (j == TransitionArray::kNotFound) break; + Handle<Map> tmp_map(target_map->GetTransition(j), isolate); + Handle<DescriptorArray> tmp_descriptors = handle( + tmp_map->instance_descriptors(), isolate); - if (store_mode != ALLOW_AS_CONSTANT && modify_index < verbatim) { - return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode, - old_details.attributes(), "root modification"); + // Check if target map is incompatible. + PropertyDetails old_details = old_descriptors->GetDetails(i); + PropertyDetails tmp_details = tmp_descriptors->GetDetails(i); + PropertyType old_type = old_details.type(); + PropertyType tmp_type = tmp_details.type(); + if (tmp_details.attributes() != old_details.attributes() || + ((tmp_type == CALLBACKS || old_type == CALLBACKS) && + (tmp_type != old_type || + tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i)))) { + return CopyGeneralizeAllRepresentations( + old_map, modify_index, store_mode, "incompatible"); + } + Representation old_representation = old_details.representation(); + Representation tmp_representation = tmp_details.representation(); + if (!old_representation.fits_into(tmp_representation) || + (!new_representation.fits_into(tmp_representation) && + modify_index == i)) { + break; + } + if (tmp_type == FIELD) { + // Generalize the field type as necessary. + Handle<HeapType> old_field_type = (old_type == FIELD) + ? handle(old_descriptors->GetFieldType(i), isolate) + : old_descriptors->GetValue(i)->OptimalType( + isolate, tmp_representation); + if (modify_index == i) { + old_field_type = GeneralizeFieldType( + new_field_type, old_field_type, isolate); + } + GeneralizeFieldType(tmp_map, i, old_field_type); + } else if (tmp_type == CONSTANT) { + if (old_type != CONSTANT || + old_descriptors->GetConstant(i) != tmp_descriptors->GetConstant(i)) { + break; + } + } else { + DCHECK_EQ(tmp_type, old_type); + DCHECK_EQ(tmp_descriptors->GetValue(i), old_descriptors->GetValue(i)); + } + target_map = tmp_map; } - Map* raw_updated = root_map->FindUpdatedMap( - verbatim, descriptors, *old_descriptors); - if (raw_updated == NULL) { - return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode, - old_details.attributes(), "incompatible"); - } + // Directly change the map if the target map is more general. + Handle<DescriptorArray> target_descriptors( + target_map->instance_descriptors(), isolate); + int target_nof = target_map->NumberOfOwnDescriptors(); + if (target_nof == old_nof && + (store_mode != FORCE_FIELD || + target_descriptors->GetDetails(modify_index).type() == FIELD)) { + DCHECK(modify_index < target_nof); + DCHECK(new_representation.fits_into( + target_descriptors->GetDetails(modify_index).representation())); + DCHECK(target_descriptors->GetDetails(modify_index).type() != FIELD || + new_field_type->NowIs( + target_descriptors->GetFieldType(modify_index))); + return target_map; + } + + // Find the last compatible target map in the transition tree. + for (int i = target_nof; i < old_nof; ++i) { + int j = target_map->SearchTransition(old_descriptors->GetKey(i)); + if (j == TransitionArray::kNotFound) break; + Handle<Map> tmp_map(target_map->GetTransition(j), isolate); + Handle<DescriptorArray> tmp_descriptors( + tmp_map->instance_descriptors(), isolate); - Handle<Map> updated(raw_updated); - Handle<DescriptorArray> updated_descriptors(updated->instance_descriptors()); + // Check if target map is compatible. + PropertyDetails old_details = old_descriptors->GetDetails(i); + PropertyDetails tmp_details = tmp_descriptors->GetDetails(i); + if (tmp_details.attributes() != old_details.attributes() || + ((tmp_details.type() == CALLBACKS || old_details.type() == CALLBACKS) && + (tmp_details.type() != old_details.type() || + tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i)))) { + return CopyGeneralizeAllRepresentations( + old_map, modify_index, store_mode, "incompatible"); + } + target_map = tmp_map; + } + target_nof = target_map->NumberOfOwnDescriptors(); + target_descriptors = handle(target_map->instance_descriptors(), isolate); - int valid = updated->NumberOfOwnDescriptors(); + // Allocate a new descriptor array large enough to hold the required + // descriptors, with minimally the exact same size as the old descriptor + // array. + int new_slack = Max( + old_nof, old_descriptors->number_of_descriptors()) - old_nof; + Handle<DescriptorArray> new_descriptors = DescriptorArray::Allocate( + isolate, old_nof, new_slack); + DCHECK(new_descriptors->length() > target_descriptors->length() || + new_descriptors->NumberOfSlackDescriptors() > 0 || + new_descriptors->number_of_descriptors() == + old_descriptors->number_of_descriptors()); + DCHECK(new_descriptors->number_of_descriptors() == old_nof); - // Directly change the map if the target map is more general. Ensure that the - // target type of the modify_index is a FIELD, unless we are migrating. - if (updated_descriptors->IsMoreGeneralThan( - verbatim, valid, descriptors, *old_descriptors) && - (store_mode == ALLOW_AS_CONSTANT || - updated_descriptors->GetDetails(modify_index).type() == FIELD)) { - Representation updated_representation = - updated_descriptors->GetDetails(modify_index).representation(); - if (new_representation.fits_into(updated_representation)) return updated; + // 0 -> |root_nof| + int current_offset = 0; + for (int i = 0; i < root_nof; ++i) { + PropertyDetails old_details = old_descriptors->GetDetails(i); + if (old_details.type() == FIELD) current_offset++; + Descriptor d(handle(old_descriptors->GetKey(i), isolate), + handle(old_descriptors->GetValue(i), isolate), + old_details); + new_descriptors->Set(i, &d); } - Handle<DescriptorArray> new_descriptors = DescriptorArray::Merge( - updated_descriptors, verbatim, valid, descriptors, modify_index, - store_mode, old_descriptors); - ASSERT(store_mode == ALLOW_AS_CONSTANT || - new_descriptors->GetDetails(modify_index).type() == FIELD); + // |root_nof| -> |target_nof| + for (int i = root_nof; i < target_nof; ++i) { + Handle<Name> target_key(target_descriptors->GetKey(i), isolate); + PropertyDetails old_details = old_descriptors->GetDetails(i); + PropertyDetails target_details = target_descriptors->GetDetails(i); + target_details = target_details.CopyWithRepresentation( + old_details.representation().generalize( + target_details.representation())); + if (modify_index == i) { + target_details = target_details.CopyWithRepresentation( + new_representation.generalize(target_details.representation())); + } + DCHECK_EQ(old_details.attributes(), target_details.attributes()); + if (old_details.type() == FIELD || + target_details.type() == FIELD || + (modify_index == i && store_mode == FORCE_FIELD) || + (target_descriptors->GetValue(i) != old_descriptors->GetValue(i))) { + Handle<HeapType> old_field_type = (old_details.type() == FIELD) + ? handle(old_descriptors->GetFieldType(i), isolate) + : old_descriptors->GetValue(i)->OptimalType( + isolate, target_details.representation()); + Handle<HeapType> target_field_type = (target_details.type() == FIELD) + ? handle(target_descriptors->GetFieldType(i), isolate) + : target_descriptors->GetValue(i)->OptimalType( + isolate, target_details.representation()); + target_field_type = GeneralizeFieldType( + target_field_type, old_field_type, isolate); + if (modify_index == i) { + target_field_type = GeneralizeFieldType( + target_field_type, new_field_type, isolate); + } + FieldDescriptor d(target_key, + current_offset++, + target_field_type, + target_details.attributes(), + target_details.representation()); + new_descriptors->Set(i, &d); + } else { + DCHECK_NE(FIELD, target_details.type()); + Descriptor d(target_key, + handle(target_descriptors->GetValue(i), isolate), + target_details); + new_descriptors->Set(i, &d); + } + } - old_representation = - new_descriptors->GetDetails(modify_index).representation(); - Representation updated_representation = - new_representation.generalize(old_representation); - if (!updated_representation.Equals(old_representation)) { - new_descriptors->SetRepresentation(modify_index, updated_representation); + // |target_nof| -> |old_nof| + for (int i = target_nof; i < old_nof; ++i) { + PropertyDetails old_details = old_descriptors->GetDetails(i); + Handle<Name> old_key(old_descriptors->GetKey(i), isolate); + if (modify_index == i) { + old_details = old_details.CopyWithRepresentation( + new_representation.generalize(old_details.representation())); + } + if (old_details.type() == FIELD) { + Handle<HeapType> old_field_type( + old_descriptors->GetFieldType(i), isolate); + if (modify_index == i) { + old_field_type = GeneralizeFieldType( + old_field_type, new_field_type, isolate); + } + FieldDescriptor d(old_key, + current_offset++, + old_field_type, + old_details.attributes(), + old_details.representation()); + new_descriptors->Set(i, &d); + } else { + DCHECK(old_details.type() == CONSTANT || old_details.type() == CALLBACKS); + if (modify_index == i && store_mode == FORCE_FIELD) { + FieldDescriptor d(old_key, + current_offset++, + GeneralizeFieldType( + old_descriptors->GetValue(i)->OptimalType( + isolate, old_details.representation()), + new_field_type, isolate), + old_details.attributes(), + old_details.representation()); + new_descriptors->Set(i, &d); + } else { + DCHECK_NE(FIELD, old_details.type()); + Descriptor d(old_key, + handle(old_descriptors->GetValue(i), isolate), + old_details); + new_descriptors->Set(i, &d); + } + } } - Handle<Map> split_map(root_map->FindLastMatchMap( - verbatim, descriptors, *new_descriptors)); + new_descriptors->Sort(); - int split_descriptors = split_map->NumberOfOwnDescriptors(); - // This is shadowed by |updated_descriptors| being more general than - // |old_descriptors|. - ASSERT(descriptors != split_descriptors); + DCHECK(store_mode != FORCE_FIELD || + new_descriptors->GetDetails(modify_index).type() == FIELD); + + Handle<Map> split_map(root_map->FindLastMatchMap( + root_nof, old_nof, *new_descriptors), isolate); + int split_nof = split_map->NumberOfOwnDescriptors(); + DCHECK_NE(old_nof, split_nof); - int descriptor = split_descriptors; split_map->DeprecateTarget( - old_descriptors->GetKey(descriptor), *new_descriptors); + old_descriptors->GetKey(split_nof), *new_descriptors); if (FLAG_trace_generalization) { + PropertyDetails old_details = old_descriptors->GetDetails(modify_index); + PropertyDetails new_details = new_descriptors->GetDetails(modify_index); + Handle<HeapType> old_field_type = (old_details.type() == FIELD) + ? handle(old_descriptors->GetFieldType(modify_index), isolate) + : HeapType::Constant(handle(old_descriptors->GetValue(modify_index), + isolate), isolate); + Handle<HeapType> new_field_type = (new_details.type() == FIELD) + ? handle(new_descriptors->GetFieldType(modify_index), isolate) + : HeapType::Constant(handle(new_descriptors->GetValue(modify_index), + isolate), isolate); old_map->PrintGeneralization( - stdout, "", modify_index, descriptor, descriptors, - old_descriptors->GetDetails(modify_index).type() == CONSTANT && - store_mode == FORCE_FIELD, - old_representation, updated_representation); + stdout, "", modify_index, split_nof, old_nof, + old_details.type() == CONSTANT && store_mode == FORCE_FIELD, + old_details.representation(), new_details.representation(), + *old_field_type, *new_field_type); } // Add missing transitions. Handle<Map> new_map = split_map; - for (; descriptor < descriptors; descriptor++) { - new_map = Map::CopyInstallDescriptors(new_map, descriptor, new_descriptors); + for (int i = split_nof; i < old_nof; ++i) { + new_map = CopyInstallDescriptors(new_map, i, new_descriptors); } - new_map->set_owns_descriptors(true); return new_map; } @@ -2768,240 +2814,345 @@ // Generalize the representation of all FIELD descriptors. Handle<Map> Map::GeneralizeAllFieldRepresentations( - Handle<Map> map, - Representation new_representation) { + Handle<Map> map) { Handle<DescriptorArray> descriptors(map->instance_descriptors()); - for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { - PropertyDetails details = descriptors->GetDetails(i); - if (details.type() == FIELD) { - map = GeneralizeRepresentation(map, i, new_representation, FORCE_FIELD); + for (int i = 0; i < map->NumberOfOwnDescriptors(); ++i) { + if (descriptors->GetDetails(i).type() == FIELD) { + map = GeneralizeRepresentation(map, i, Representation::Tagged(), + HeapType::Any(map->GetIsolate()), + FORCE_FIELD); } } return map; } -Handle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) { +// static +MaybeHandle<Map> Map::TryUpdate(Handle<Map> map) { Handle<Map> proto_map(map); while (proto_map->prototype()->IsJSObject()) { Handle<JSObject> holder(JSObject::cast(proto_map->prototype())); - if (holder->map()->is_deprecated()) { - JSObject::TryMigrateInstance(holder); - } proto_map = Handle<Map>(holder->map()); + if (proto_map->is_deprecated() && JSObject::TryMigrateInstance(holder)) { + proto_map = Handle<Map>(holder->map()); + } } - return CurrentMapForDeprecatedInternal(map); + return TryUpdateInternal(map); } -Handle<Map> Map::CurrentMapForDeprecatedInternal(Handle<Map> map) { - if (!map->is_deprecated()) return map; +// static +Handle<Map> Map::Update(Handle<Map> map) { + return GeneralizeRepresentation(map, 0, Representation::None(), + HeapType::None(map->GetIsolate()), + ALLOW_AS_CONSTANT); +} + +// static +MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) { DisallowHeapAllocation no_allocation; - DescriptorArray* old_descriptors = map->instance_descriptors(); + DisallowDeoptimization no_deoptimization(old_map->GetIsolate()); - int descriptors = map->NumberOfOwnDescriptors(); - Map* root_map = map->FindRootMap(); + if (!old_map->is_deprecated()) return old_map; // Check the state of the root map. - if (!map->EquivalentToForTransition(root_map)) return Handle<Map>(); - int verbatim = root_map->NumberOfOwnDescriptors(); + Map* root_map = old_map->FindRootMap(); + if (!old_map->EquivalentToForTransition(root_map)) return MaybeHandle<Map>(); + int root_nof = root_map->NumberOfOwnDescriptors(); - Map* updated = root_map->FindUpdatedMap( - verbatim, descriptors, old_descriptors); - if (updated == NULL) return Handle<Map>(); + int old_nof = old_map->NumberOfOwnDescriptors(); + DescriptorArray* old_descriptors = old_map->instance_descriptors(); - DescriptorArray* updated_descriptors = updated->instance_descriptors(); - int valid = updated->NumberOfOwnDescriptors(); - if (!updated_descriptors->IsMoreGeneralThan( - verbatim, valid, descriptors, old_descriptors)) { - return Handle<Map>(); - } + Map* new_map = root_map; + for (int i = root_nof; i < old_nof; ++i) { + int j = new_map->SearchTransition(old_descriptors->GetKey(i)); + if (j == TransitionArray::kNotFound) return MaybeHandle<Map>(); + new_map = new_map->GetTransition(j); + DescriptorArray* new_descriptors = new_map->instance_descriptors(); + + PropertyDetails new_details = new_descriptors->GetDetails(i); + PropertyDetails old_details = old_descriptors->GetDetails(i); + if (old_details.attributes() != new_details.attributes() || + !old_details.representation().fits_into(new_details.representation())) { + return MaybeHandle<Map>(); + } + PropertyType new_type = new_details.type(); + PropertyType old_type = old_details.type(); + Object* new_value = new_descriptors->GetValue(i); + Object* old_value = old_descriptors->GetValue(i); + switch (new_type) { + case FIELD: + if ((old_type == FIELD && + !HeapType::cast(old_value)->NowIs(HeapType::cast(new_value))) || + (old_type == CONSTANT && + !HeapType::cast(new_value)->NowContains(old_value)) || + (old_type == CALLBACKS && + !HeapType::Any()->Is(HeapType::cast(new_value)))) { + return MaybeHandle<Map>(); + } + break; - return handle(updated); + case CONSTANT: + case CALLBACKS: + if (old_type != new_type || old_value != new_value) { + return MaybeHandle<Map>(); + } + break; + + case NORMAL: + case HANDLER: + case INTERCEPTOR: + case NONEXISTENT: + UNREACHABLE(); + } + } + if (new_map->NumberOfOwnDescriptors() != old_nof) return MaybeHandle<Map>(); + return handle(new_map); } -Handle<Object> JSObject::SetPropertyWithInterceptor( - Handle<JSObject> object, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode) { +MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it, + Handle<Object> value) { // TODO(rossberg): Support symbols in the API. - if (name->IsSymbol()) return value; - Isolate* isolate = object->GetIsolate(); - Handle<String> name_string = Handle<String>::cast(name); - Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor()); - if (!interceptor->setter()->IsUndefined()) { - LOG(isolate, - ApiNamedPropertyAccess("interceptor-named-set", *object, *name)); - PropertyCallbackArguments args( - isolate, interceptor->data(), *object, *object); - v8::NamedPropertySetterCallback setter = - v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter()); - Handle<Object> value_unhole = value->IsTheHole() - ? Handle<Object>(isolate->factory()->undefined_value()) : value; - v8::Handle<v8::Value> result = args.Call(setter, - v8::Utils::ToLocal(name_string), - v8::Utils::ToLocal(value_unhole)); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - if (!result.IsEmpty()) return value; - } - Handle<Object> result = - SetPropertyPostInterceptor(object, name, value, attributes, strict_mode); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - return result; + if (it->name()->IsSymbol()) return value; + + Handle<String> name_string = Handle<String>::cast(it->name()); + Handle<JSObject> holder = it->GetHolder<JSObject>(); + Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor()); + if (interceptor->setter()->IsUndefined()) return MaybeHandle<Object>(); + + LOG(it->isolate(), + ApiNamedPropertyAccess("interceptor-named-set", *holder, *name_string)); + PropertyCallbackArguments args(it->isolate(), interceptor->data(), *holder, + *holder); + v8::NamedPropertySetterCallback setter = + v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter()); + v8::Handle<v8::Value> result = args.Call( + setter, v8::Utils::ToLocal(name_string), v8::Utils::ToLocal(value)); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object); + if (!result.IsEmpty()) return value; + + return MaybeHandle<Object>(); } -Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - StoreFromKeyed store_mode) { - LookupResult result(object->GetIsolate()); - object->LocalLookup(*name, &result, true); - if (!result.IsFound()) { - object->map()->LookupTransition(JSObject::cast(*object), *name, &result); - } - return SetProperty(object, &result, name, value, attributes, strict_mode, - store_mode); +MaybeHandle<Object> Object::SetProperty(Handle<Object> object, + Handle<Name> name, Handle<Object> value, + StrictMode strict_mode, + StoreFromKeyed store_mode) { + LookupIterator it(object, name); + return SetProperty(&it, value, strict_mode, store_mode); } -Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object, - Handle<Object> structure, - Handle<Name> name, - Handle<Object> value, - Handle<JSObject> holder, - StrictMode strict_mode) { - Isolate* isolate = object->GetIsolate(); +MaybeHandle<Object> Object::SetProperty(LookupIterator* it, + Handle<Object> value, + StrictMode strict_mode, + StoreFromKeyed store_mode) { + // Make sure that the top context does not change when doing callbacks or + // interceptor calls. + AssertNoContextChange ncc(it->isolate()); - // We should never get here to initialize a const with the hole - // value since a const declaration would conflict with the setter. - ASSERT(!value->IsTheHole()); + bool done = false; + for (; it->IsFound(); it->Next()) { + switch (it->state()) { + case LookupIterator::NOT_FOUND: + UNREACHABLE(); - // To accommodate both the old and the new api we switch on the - // data structure used to store the callbacks. Eventually foreign - // callbacks should be phased out. - if (structure->IsForeign()) { - AccessorDescriptor* callback = - reinterpret_cast<AccessorDescriptor*>( - Handle<Foreign>::cast(structure)->foreign_address()); - CALL_AND_RETRY_OR_DIE(isolate, - (callback->setter)( - isolate, *object, *value, callback->data), - break, - return Handle<Object>()); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - return value; - } + case LookupIterator::ACCESS_CHECK: + // TODO(verwaest): Remove the distinction. This is mostly bogus since we + // don't know whether we'll want to fetch attributes or call a setter + // until we find the property. + if (it->HasAccess(v8::ACCESS_SET)) break; + return JSObject::SetPropertyWithFailedAccessCheck(it, value, + strict_mode); + + case LookupIterator::JSPROXY: + if (it->HolderIsReceiverOrHiddenPrototype()) { + return JSProxy::SetPropertyWithHandler(it->GetHolder<JSProxy>(), + it->GetReceiver(), it->name(), + value, strict_mode); + } else { + // TODO(verwaest): Use the MaybeHandle to indicate result. + bool has_result = false; + MaybeHandle<Object> maybe_result = + JSProxy::SetPropertyViaPrototypesWithHandler( + it->GetHolder<JSProxy>(), it->GetReceiver(), it->name(), + value, strict_mode, &has_result); + if (has_result) return maybe_result; + done = true; + } + break; - if (structure->IsExecutableAccessorInfo()) { - // api style callbacks - ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(*structure); - if (!data->IsCompatibleReceiver(*object)) { - Handle<Object> args[2] = { name, object }; - Handle<Object> error = - isolate->factory()->NewTypeError("incompatible_method_receiver", - HandleVector(args, - ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>(); - } - // TODO(rossberg): Support symbols in the API. - if (name->IsSymbol()) return value; - Object* call_obj = data->setter(); - v8::AccessorSetterCallback call_fun = - v8::ToCData<v8::AccessorSetterCallback>(call_obj); - if (call_fun == NULL) return value; - Handle<String> key = Handle<String>::cast(name); - LOG(isolate, ApiNamedPropertyAccess("store", *object, *name)); - PropertyCallbackArguments args( - isolate, data->data(), *object, JSObject::cast(*holder)); - args.Call(call_fun, - v8::Utils::ToLocal(key), - v8::Utils::ToLocal(value)); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - return value; - } + case LookupIterator::INTERCEPTOR: + if (it->HolderIsReceiverOrHiddenPrototype()) { + MaybeHandle<Object> maybe_result = + JSObject::SetPropertyWithInterceptor(it, value); + if (!maybe_result.is_null()) return maybe_result; + if (it->isolate()->has_pending_exception()) return maybe_result; + } else { + Maybe<PropertyAttributes> maybe_attributes = + JSObject::GetPropertyAttributesWithInterceptor( + it->GetHolder<JSObject>(), it->GetReceiver(), it->name()); + if (!maybe_attributes.has_value) return MaybeHandle<Object>(); + done = maybe_attributes.value != ABSENT; + if (done && (maybe_attributes.value & READ_ONLY) != 0) { + return WriteToReadOnlyProperty(it, value, strict_mode); + } + } + break; - if (structure->IsAccessorPair()) { - Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate); - if (setter->IsSpecFunction()) { - // TODO(rossberg): nicer would be to cast to some JSCallable here... - return SetPropertyWithDefinedSetter( - object, Handle<JSReceiver>::cast(setter), value); - } else { - if (strict_mode == SLOPPY) return value; - Handle<Object> args[2] = { name, holder }; - Handle<Object> error = - isolate->factory()->NewTypeError("no_setter_in_callback", - HandleVector(args, 2)); - isolate->Throw(*error); - return Handle<Object>(); + case LookupIterator::PROPERTY: + if (!it->HasProperty()) break; + if (it->property_details().IsReadOnly()) { + return WriteToReadOnlyProperty(it, value, strict_mode); + } + switch (it->property_kind()) { + case LookupIterator::ACCESSOR: + if (it->HolderIsReceiverOrHiddenPrototype() || + !it->GetAccessors()->IsDeclaredAccessorInfo()) { + return SetPropertyWithAccessor(it->GetReceiver(), it->name(), + value, it->GetHolder<JSObject>(), + it->GetAccessors(), strict_mode); + } + break; + case LookupIterator::DATA: + if (it->HolderIsReceiverOrHiddenPrototype()) { + return SetDataProperty(it, value); + } + } + done = true; + break; } + + if (done) break; } - // TODO(dcarney): Handle correctly. - if (structure->IsDeclaredAccessorInfo()) { - return value; + return AddDataProperty(it, value, NONE, strict_mode, store_mode); +} + + +MaybeHandle<Object> Object::WriteToReadOnlyProperty(LookupIterator* it, + Handle<Object> value, + StrictMode strict_mode) { + if (strict_mode != STRICT) return value; + + Handle<Object> args[] = {it->name(), it->GetReceiver()}; + Handle<Object> error = it->factory()->NewTypeError( + "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))); + return it->isolate()->Throw<Object>(error); +} + + +MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it, + Handle<Object> value) { + // Proxies are handled on the WithHandler path. Other non-JSObjects cannot + // have own properties. + Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver()); + + // Store on the holder which may be hidden behind the receiver. + DCHECK(it->HolderIsReceiverOrHiddenPrototype()); + + // Old value for the observation change record. + // Fetch before transforming the object since the encoding may become + // incompatible with what's cached in |it|. + bool is_observed = + receiver->map()->is_observed() && + !it->name().is_identical_to(it->factory()->hidden_string()); + MaybeHandle<Object> maybe_old; + if (is_observed) maybe_old = it->GetDataValue(); + + // Possibly migrate to the most up-to-date map that will be able to store + // |value| under it->name(). + it->PrepareForDataProperty(value); + + // Write the property value. + it->WriteDataValue(value); + + // Send the change record if there are observers. + if (is_observed && !value->SameValue(*maybe_old.ToHandleChecked())) { + JSObject::EnqueueChangeRecord(receiver, "update", it->name(), + maybe_old.ToHandleChecked()); } - UNREACHABLE(); - return Handle<Object>(); + return value; } -Handle<Object> JSReceiver::SetPropertyWithDefinedSetter( - Handle<JSReceiver> object, - Handle<JSReceiver> setter, - Handle<Object> value) { - Isolate* isolate = object->GetIsolate(); +MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it, + Handle<Object> value, + PropertyAttributes attributes, + StrictMode strict_mode, + StoreFromKeyed store_mode) { + DCHECK(!it->GetReceiver()->IsJSProxy()); + if (!it->GetReceiver()->IsJSObject()) { + // TODO(verwaest): Throw a TypeError with a more specific message. + return WriteToReadOnlyProperty(it, value, strict_mode); + } + Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver()); + + // If the receiver is a JSGlobalProxy, store on the prototype (JSGlobalObject) + // instead. If the prototype is Null, the proxy is detached. + if (receiver->IsJSGlobalProxy()) { + // Trying to assign to a detached proxy. + PrototypeIterator iter(it->isolate(), receiver); + if (iter.IsAtEnd()) return value; + receiver = + Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter)); + } -#ifdef ENABLE_DEBUGGER_SUPPORT - Debug* debug = isolate->debug(); - // Handle stepping into a setter if step into is active. - // TODO(rossberg): should this apply to getters that are function proxies? - if (debug->StepInActive() && setter->IsJSFunction()) { - debug->HandleStepIn( - Handle<JSFunction>::cast(setter), Handle<Object>::null(), 0, false); + if (!receiver->map()->is_extensible()) { + if (strict_mode == SLOPPY) return value; + + Handle<Object> args[1] = {it->name()}; + Handle<Object> error = it->factory()->NewTypeError( + "object_not_extensible", HandleVector(args, ARRAY_SIZE(args))); + return it->isolate()->Throw<Object>(error); + } + + // Possibly migrate to the most up-to-date map that will be able to store + // |value| under it->name() with |attributes|. + it->TransitionToDataProperty(value, attributes, store_mode); + + // TODO(verwaest): Encapsulate dictionary handling better. + if (receiver->map()->is_dictionary_map()) { + // TODO(verwaest): Probably should ensure this is done beforehand. + it->InternalizeName(); + JSObject::AddSlowProperty(receiver, it->name(), value, attributes); + } else { + // Write the property value. + it->WriteDataValue(value); + } + + // Send the change record if there are observers. + if (receiver->map()->is_observed() && + !it->name().is_identical_to(it->factory()->hidden_string())) { + JSObject::EnqueueChangeRecord(receiver, "add", it->name(), + it->factory()->the_hole_value()); } -#endif - bool has_pending_exception; - Handle<Object> argv[] = { value }; - Execution::Call( - isolate, setter, object, ARRAY_SIZE(argv), argv, &has_pending_exception); - // Check for pending exception and return the result. - if (has_pending_exception) return Handle<Object>(); return value; } -Handle<Object> JSObject::SetElementWithCallbackSetterInPrototypes( +MaybeHandle<Object> JSObject::SetElementWithCallbackSetterInPrototypes( Handle<JSObject> object, uint32_t index, Handle<Object> value, bool* found, StrictMode strict_mode) { Isolate *isolate = object->GetIsolate(); - for (Handle<Object> proto = handle(object->GetPrototype(), isolate); - !proto->IsNull(); - proto = handle(proto->GetPrototype(isolate), isolate)) { - if (proto->IsJSProxy()) { + for (PrototypeIterator iter(isolate, object); !iter.IsAtEnd(); + iter.Advance()) { + if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) { return JSProxy::SetPropertyViaPrototypesWithHandler( - Handle<JSProxy>::cast(proto), - object, + Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), object, isolate->factory()->Uint32ToString(index), // name - value, - NONE, - strict_mode, - found); + value, strict_mode, found); } - Handle<JSObject> js_proto = Handle<JSObject>::cast(proto); + Handle<JSObject> js_proto = + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); if (!js_proto->HasDictionaryElements()) { continue; } @@ -3022,75 +3173,41 @@ } -Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - bool* done) { - Isolate* isolate = object->GetIsolate(); - - *done = false; - // We could not find a local property so let's check whether there is an - // accessor that wants to handle the property, or whether the property is - // read-only on the prototype chain. - LookupResult result(isolate); - object->LookupRealNamedPropertyInPrototypes(*name, &result); - if (result.IsFound()) { - switch (result.type()) { - case NORMAL: - case FIELD: - case CONSTANT: - *done = result.IsReadOnly(); - break; - case INTERCEPTOR: { - PropertyAttributes attr = GetPropertyAttributeWithInterceptor( - handle(result.holder()), object, name, true); - *done = !!(attr & READ_ONLY); - break; - } - case CALLBACKS: { - *done = true; - Handle<Object> callback_object(result.GetCallbackObject(), isolate); - return SetPropertyWithCallback(object, callback_object, name, value, - handle(result.holder()), strict_mode); - } - case HANDLER: { - Handle<JSProxy> proxy(result.proxy()); - return JSProxy::SetPropertyViaPrototypesWithHandler( - proxy, object, name, value, attributes, strict_mode, done); - } - case TRANSITION: - case NONEXISTENT: - UNREACHABLE(); - break; - } - } - - // If we get here with *done true, we have encountered a read-only property. - if (*done) { - if (strict_mode == SLOPPY) return value; - Handle<Object> args[] = { name, object }; - Handle<Object> error = isolate->factory()->NewTypeError( - "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>(); - } - return isolate->factory()->the_hole_value(); -} - - void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) { + // Only supports adding slack to owned descriptors. + DCHECK(map->owns_descriptors()); + Handle<DescriptorArray> descriptors(map->instance_descriptors()); + int old_size = map->NumberOfOwnDescriptors(); if (slack <= descriptors->NumberOfSlackDescriptors()) return; - int number_of_descriptors = descriptors->number_of_descriptors(); - Isolate* isolate = map->GetIsolate(); - Handle<DescriptorArray> new_descriptors = - isolate->factory()->NewDescriptorArray(number_of_descriptors, slack); - DescriptorArray::WhitenessWitness witness(*new_descriptors); - for (int i = 0; i < number_of_descriptors; ++i) { - new_descriptors->CopyFrom(i, *descriptors, i, witness); + Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo( + descriptors, old_size, slack); + + if (old_size == 0) { + map->set_instance_descriptors(*new_descriptors); + return; + } + + // If the source descriptors had an enum cache we copy it. This ensures + // that the maps to which we push the new descriptor array back can rely + // on a cache always being available once it is set. If the map has more + // enumerated descriptors than available in the original cache, the cache + // will be lazily replaced by the extended cache when needed. + if (descriptors->HasEnumCache()) { + new_descriptors->CopyEnumCacheFrom(*descriptors); + } + + // Replace descriptors by new_descriptors in all maps that share it. + map->GetHeap()->incremental_marking()->RecordWrites(*descriptors); + + Map* walk_map; + for (Object* current = map->GetBackPointer(); + !current->IsUndefined(); + current = walk_map->GetBackPointer()) { + walk_map = Map::cast(current); + if (walk_map->instance_descriptors() != *descriptors) break; + walk_map->set_instance_descriptors(*new_descriptors); } map->set_instance_descriptors(*new_descriptors); @@ -3120,8 +3237,8 @@ // back to front so that the last callback with a given name takes // precedence over previously added callbacks with that name. for (int i = nof_callbacks - 1; i >= 0; i--) { - AccessorInfo* entry = AccessorInfo::cast(callbacks->get(i)); - Name* key = Name::cast(entry->name()); + Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i))); + Handle<Name> key(Name::cast(entry->name())); // Check if a descriptor with this name already exists before writing. if (!T::Contains(key, entry, valid_descriptors, array)) { T::Insert(key, entry, valid_descriptors, array); @@ -3134,16 +3251,18 @@ struct DescriptorArrayAppender { typedef DescriptorArray Array; - static bool Contains(Name* key, - AccessorInfo* entry, + static bool Contains(Handle<Name> key, + Handle<AccessorInfo> entry, int valid_descriptors, Handle<DescriptorArray> array) { - return array->Search(key, valid_descriptors) != DescriptorArray::kNotFound; + DisallowHeapAllocation no_gc; + return array->Search(*key, valid_descriptors) != DescriptorArray::kNotFound; } - static void Insert(Name* key, - AccessorInfo* entry, + static void Insert(Handle<Name> key, + Handle<AccessorInfo> entry, int valid_descriptors, Handle<DescriptorArray> array) { + DisallowHeapAllocation no_gc; CallbacksDescriptor desc(key, entry, entry->property_attributes()); array->Append(&desc); } @@ -3152,20 +3271,21 @@ struct FixedArrayAppender { typedef FixedArray Array; - static bool Contains(Name* key, - AccessorInfo* entry, + static bool Contains(Handle<Name> key, + Handle<AccessorInfo> entry, int valid_descriptors, Handle<FixedArray> array) { for (int i = 0; i < valid_descriptors; i++) { - if (key == AccessorInfo::cast(array->get(i))->name()) return true; + if (*key == AccessorInfo::cast(array->get(i))->name()) return true; } return false; } - static void Insert(Name* key, - AccessorInfo* entry, + static void Insert(Handle<Name> key, + Handle<AccessorInfo> entry, int valid_descriptors, Handle<FixedArray> array) { - array->set(valid_descriptors, entry); + DisallowHeapAllocation no_gc; + array->set(valid_descriptors, *entry); } }; @@ -3175,7 +3295,7 @@ int nof = map->NumberOfOwnDescriptors(); Handle<DescriptorArray> array(map->instance_descriptors()); NeanderArray callbacks(descriptors); - ASSERT(array->NumberOfSlackDescriptors() >= callbacks.length()); + DCHECK(array->NumberOfSlackDescriptors() >= callbacks.length()); nof = AppendUniqueCallbacks<DescriptorArrayAppender>(&callbacks, array, nof); map->SetNumberOfOwnDescriptors(nof); } @@ -3185,7 +3305,7 @@ Handle<FixedArray> array, int valid_descriptors) { NeanderArray callbacks(descriptors); - ASSERT(array->length() >= callbacks.length() + valid_descriptors); + DCHECK(array->length() >= callbacks.length() + valid_descriptors); return AppendUniqueCallbacks<FixedArrayAppender>(&callbacks, array, valid_descriptors); @@ -3193,7 +3313,7 @@ static bool ContainsMap(MapHandleList* maps, Handle<Map> map) { - ASSERT(!map.is_null()); + DCHECK(!map.is_null()); for (int i = 0; i < maps->length(); ++i) { if (!maps->at(i).is_null() && maps->at(i).is_identical_to(map)) return true; } @@ -3238,9 +3358,16 @@ ? to_kind : TERMINAL_FAST_ELEMENTS_KIND; - // Support for legacy API. + // Support for legacy API: SetIndexedPropertiesTo{External,Pixel}Data + // allows to change elements from arbitrary kind to any ExternalArray + // elements kind. Satisfy its requirements, checking whether we already + // have the cached transition. if (IsExternalArrayElementsKind(to_kind) && !IsFixedTypedArrayElementsKind(map->elements_kind())) { + if (map->HasElementsTransition()) { + Map* next_map = map->elements_transition_map(); + if (next_map->elements_kind() == to_kind) return next_map; + } return map; } @@ -3252,12 +3379,12 @@ } if (to_kind != kind && current_map->HasElementsTransition()) { - ASSERT(to_kind == DICTIONARY_ELEMENTS); + DCHECK(to_kind == DICTIONARY_ELEMENTS); Map* next_map = current_map->elements_transition_map(); if (next_map->elements_kind() == to_kind) return next_map; } - ASSERT(current_map->elements_kind() == target_kind); + DCHECK(current_map->elements_kind() == target_kind); return current_map; } @@ -3283,54 +3410,68 @@ } -static MaybeObject* AddMissingElementsTransitions(Map* map, - ElementsKind to_kind) { - ASSERT(IsTransitionElementsKind(map->elements_kind())); +static Handle<Map> AddMissingElementsTransitions(Handle<Map> map, + ElementsKind to_kind) { + DCHECK(IsTransitionElementsKind(map->elements_kind())); - Map* current_map = map; + Handle<Map> current_map = map; ElementsKind kind = map->elements_kind(); - while (kind != to_kind && !IsTerminalElementsKind(kind)) { - kind = GetNextTransitionElementsKind(kind); - MaybeObject* maybe_next_map = - current_map->CopyAsElementsKind(kind, INSERT_TRANSITION); - if (!maybe_next_map->To(¤t_map)) return maybe_next_map; + if (!map->is_prototype_map()) { + while (kind != to_kind && !IsTerminalElementsKind(kind)) { + kind = GetNextTransitionElementsKind(kind); + current_map = + Map::CopyAsElementsKind(current_map, kind, INSERT_TRANSITION); + } } // In case we are exiting the fast elements kind system, just add the map in // the end. if (kind != to_kind) { - MaybeObject* maybe_next_map = - current_map->CopyAsElementsKind(to_kind, INSERT_TRANSITION); - if (!maybe_next_map->To(¤t_map)) return maybe_next_map; + current_map = Map::CopyAsElementsKind( + current_map, to_kind, INSERT_TRANSITION); } - ASSERT(current_map->elements_kind() == to_kind); + DCHECK(current_map->elements_kind() == to_kind); return current_map; } -Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object, - ElementsKind to_kind) { - Isolate* isolate = object->GetIsolate(); - CALL_HEAP_FUNCTION(isolate, - object->GetElementsTransitionMap(isolate, to_kind), - Map); +Handle<Map> Map::TransitionElementsTo(Handle<Map> map, + ElementsKind to_kind) { + ElementsKind from_kind = map->elements_kind(); + if (from_kind == to_kind) return map; + + Isolate* isolate = map->GetIsolate(); + Context* native_context = isolate->context()->native_context(); + Object* maybe_array_maps = native_context->js_array_maps(); + if (maybe_array_maps->IsFixedArray()) { + DisallowHeapAllocation no_gc; + FixedArray* array_maps = FixedArray::cast(maybe_array_maps); + if (array_maps->get(from_kind) == *map) { + Object* maybe_transitioned_map = array_maps->get(to_kind); + if (maybe_transitioned_map->IsMap()) { + return handle(Map::cast(maybe_transitioned_map)); + } + } + } + + return TransitionElementsToSlow(map, to_kind); } -MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) { - Map* start_map = map(); - ElementsKind from_kind = start_map->elements_kind(); +Handle<Map> Map::TransitionElementsToSlow(Handle<Map> map, + ElementsKind to_kind) { + ElementsKind from_kind = map->elements_kind(); if (from_kind == to_kind) { - return start_map; + return map; } bool allow_store_transition = // Only remember the map transition if there is not an already existing // non-matching element transition. - !start_map->IsUndefined() && !start_map->is_shared() && + !map->IsUndefined() && !map->is_dictionary_map() && IsTransitionElementsKind(from_kind); // Only store fast element maps in ascending generality. @@ -3341,24 +3482,16 @@ } if (!allow_store_transition) { - return start_map->CopyAsElementsKind(to_kind, OMIT_TRANSITION); + return Map::CopyAsElementsKind(map, to_kind, OMIT_TRANSITION); } - return start_map->AsElementsKind(to_kind); + return Map::AsElementsKind(map, to_kind); } -// TODO(ishell): Temporary wrapper until handlified. // static Handle<Map> Map::AsElementsKind(Handle<Map> map, ElementsKind kind) { - CALL_HEAP_FUNCTION(map->GetIsolate(), - map->AsElementsKind(kind), - Map); -} - - -MaybeObject* Map::AsElementsKind(ElementsKind kind) { - Map* closest_map = FindClosestElementsTransition(this, kind); + Handle<Map> closest_map(FindClosestElementsTransition(*map, kind)); if (closest_map->elements_kind() == kind) { return closest_map; @@ -3368,29 +3501,31 @@ } -void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) { +Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object, + ElementsKind to_kind) { + Handle<Map> map(object->map()); + return Map::TransitionElementsTo(map, to_kind); +} + + +void JSObject::LookupOwnRealNamedProperty(Handle<Name> name, + LookupResult* result) { DisallowHeapAllocation no_gc; if (IsJSGlobalProxy()) { - Object* proto = GetPrototype(); - if (proto->IsNull()) return result->NotFound(); - ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result); + PrototypeIterator iter(GetIsolate(), this); + if (iter.IsAtEnd()) return result->NotFound(); + DCHECK(iter.GetCurrent()->IsJSGlobalObject()); + return JSObject::cast(iter.GetCurrent()) + ->LookupOwnRealNamedProperty(name, result); } if (HasFastProperties()) { - map()->LookupDescriptor(this, name, result); + map()->LookupDescriptor(this, *name, result); // A property or a map transition was found. We return all of these result - // types because LocalLookupRealNamedProperty is used when setting + // types because LookupOwnRealNamedProperty is used when setting // properties where map transitions are handled. - ASSERT(!result->IsFound() || + DCHECK(!result->IsFound() || (result->holder() == this && result->IsFastPropertyType())); - // Disallow caching for uninitialized constants. These can only - // occur as fields. - if (result->IsField() && - result->IsReadOnly() && - RawFastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) { - result->DisallowCaching(); - } return; } @@ -3399,15 +3534,12 @@ Object* value = property_dictionary()->ValueAt(entry); if (IsGlobalObject()) { PropertyDetails d = property_dictionary()->DetailsAt(entry); - if (d.IsDeleted()) { + if (d.IsDeleted() || PropertyCell::cast(value)->value()->IsTheHole()) { result->NotFound(); return; } value = PropertyCell::cast(value)->value(); } - // Make sure to disallow caching for uninitialized constants - // found in the dictionary-mode objects. - if (value->IsTheHole()) result->DisallowCaching(); result->DictionaryResult(this, entry); return; } @@ -3416,160 +3548,82 @@ } -void JSObject::LookupRealNamedProperty(Name* name, LookupResult* result) { - LocalLookupRealNamedProperty(name, result); +void JSObject::LookupRealNamedProperty(Handle<Name> name, + LookupResult* result) { + DisallowHeapAllocation no_gc; + LookupOwnRealNamedProperty(name, result); if (result->IsFound()) return; LookupRealNamedPropertyInPrototypes(name, result); } -void JSObject::LookupRealNamedPropertyInPrototypes(Name* name, +void JSObject::LookupRealNamedPropertyInPrototypes(Handle<Name> name, LookupResult* result) { + if (name->IsOwn()) { + result->NotFound(); + return; + } + + DisallowHeapAllocation no_gc; Isolate* isolate = GetIsolate(); - Heap* heap = isolate->heap(); - for (Object* pt = GetPrototype(); - pt != heap->null_value(); - pt = pt->GetPrototype(isolate)) { - if (pt->IsJSProxy()) { - return result->HandlerResult(JSProxy::cast(pt)); + for (PrototypeIterator iter(isolate, this); !iter.IsAtEnd(); iter.Advance()) { + if (iter.GetCurrent()->IsJSProxy()) { + return result->HandlerResult(JSProxy::cast(iter.GetCurrent())); } - JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result); - ASSERT(!(result->IsFound() && result->type() == INTERCEPTOR)); + JSObject::cast(iter.GetCurrent())->LookupOwnRealNamedProperty(name, result); + DCHECK(!(result->IsFound() && result->type() == INTERCEPTOR)); if (result->IsFound()) return; } result->NotFound(); } -// We only need to deal with CALLBACKS and INTERCEPTORS -Handle<Object> JSObject::SetPropertyWithFailedAccessCheck( - Handle<JSObject> object, - LookupResult* result, - Handle<Name> name, - Handle<Object> value, - bool check_prototype, - StrictMode strict_mode) { - if (check_prototype && !result->IsProperty()) { - object->LookupRealNamedPropertyInPrototypes(*name, result); - } - - if (result->IsProperty()) { - if (!result->IsReadOnly()) { - switch (result->type()) { - case CALLBACKS: { - Object* obj = result->GetCallbackObject(); - if (obj->IsAccessorInfo()) { - Handle<AccessorInfo> info(AccessorInfo::cast(obj)); - if (info->all_can_write()) { - return SetPropertyWithCallback(object, - info, - name, - value, - handle(result->holder()), - strict_mode); - } - } else if (obj->IsAccessorPair()) { - Handle<AccessorPair> pair(AccessorPair::cast(obj)); - if (pair->all_can_read()) { - return SetPropertyWithCallback(object, - pair, - name, - value, - handle(result->holder()), - strict_mode); - } - } - break; - } - case INTERCEPTOR: { - // Try lookup real named properties. Note that only property can be - // set is callbacks marked as ALL_CAN_WRITE on the prototype chain. - LookupResult r(object->GetIsolate()); - object->LookupRealNamedProperty(*name, &r); - if (r.IsProperty()) { - return SetPropertyWithFailedAccessCheck(object, - &r, - name, - value, - check_prototype, - strict_mode); - } - break; - } - default: { - break; - } - } - } - } - - Isolate* isolate = object->GetIsolate(); - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - return value; -} - - -Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object, - LookupResult* result, - Handle<Name> key, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - StoreFromKeyed store_mode) { - if (result->IsHandler()) { - return JSProxy::SetPropertyWithHandler(handle(result->proxy()), - object, key, value, attributes, strict_mode); - } else { - return JSObject::SetPropertyForResult(Handle<JSObject>::cast(object), - result, key, value, attributes, strict_mode, store_mode); - } -} - - -bool JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name) { +Maybe<bool> JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy, + Handle<Name> name) { Isolate* isolate = proxy->GetIsolate(); // TODO(rossberg): adjust once there is a story for symbols vs proxies. - if (name->IsSymbol()) return false; + if (name->IsSymbol()) return maybe(false); Handle<Object> args[] = { name }; - Handle<Object> result = proxy->CallTrap( - "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args); - if (isolate->has_pending_exception()) return false; + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, result, CallTrap(proxy, "has", isolate->derived_has_trap(), + ARRAY_SIZE(args), args), + Maybe<bool>()); - return result->BooleanValue(); + return maybe(result->BooleanValue()); } -Handle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy, - Handle<JSReceiver> receiver, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode) { +MaybeHandle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy, + Handle<Object> receiver, + Handle<Name> name, + Handle<Object> value, + StrictMode strict_mode) { Isolate* isolate = proxy->GetIsolate(); // TODO(rossberg): adjust once there is a story for symbols vs proxies. if (name->IsSymbol()) return value; Handle<Object> args[] = { receiver, name, value }; - proxy->CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args); - if (isolate->has_pending_exception()) return Handle<Object>(); + RETURN_ON_EXCEPTION( + isolate, + CallTrap(proxy, + "set", + isolate->derived_set_trap(), + ARRAY_SIZE(args), + args), + Object); return value; } -Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler( - Handle<JSProxy> proxy, - Handle<JSReceiver> receiver, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - bool* done) { +MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler( + Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name, + Handle<Object> value, StrictMode strict_mode, bool* done) { Isolate* isolate = proxy->GetIsolate(); Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy. @@ -3581,9 +3635,15 @@ *done = true; // except where redefined... Handle<Object> args[] = { name }; - Handle<Object> result = proxy->CallTrap( - "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args); - if (isolate->has_pending_exception()) return Handle<Object>(); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + CallTrap(proxy, + "getPropertyDescriptor", + Handle<Object>(), + ARRAY_SIZE(args), + args), + Object); if (result->IsUndefined()) { *done = false; @@ -3591,21 +3651,24 @@ } // Emulate [[GetProperty]] semantics for proxies. - bool has_pending_exception; Handle<Object> argv[] = { result }; - Handle<Object> desc = Execution::Call( - isolate, isolate->to_complete_property_descriptor(), result, - ARRAY_SIZE(argv), argv, &has_pending_exception); - if (has_pending_exception) return Handle<Object>(); + Handle<Object> desc; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, desc, + Execution::Call(isolate, + isolate->to_complete_property_descriptor(), + result, + ARRAY_SIZE(argv), + argv), + Object); // [[GetProperty]] requires to check that all properties are configurable. Handle<String> configurable_name = isolate->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("configurable_")); - Handle<Object> configurable( - v8::internal::GetProperty(isolate, desc, configurable_name)); - ASSERT(!isolate->has_pending_exception()); - ASSERT(configurable->IsTrue() || configurable->IsFalse()); + Handle<Object> configurable = + Object::GetProperty(desc, configurable_name).ToHandleChecked(); + DCHECK(configurable->IsBoolean()); if (configurable->IsFalse()) { Handle<String> trap = isolate->factory()->InternalizeOneByteString( @@ -3613,42 +3676,37 @@ Handle<Object> args[] = { handler, trap, name }; Handle<Object> error = isolate->factory()->NewTypeError( "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } - ASSERT(configurable->IsTrue()); + DCHECK(configurable->IsTrue()); // Check for DataDescriptor. Handle<String> hasWritable_name = isolate->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("hasWritable_")); - Handle<Object> hasWritable( - v8::internal::GetProperty(isolate, desc, hasWritable_name)); - ASSERT(!isolate->has_pending_exception()); - ASSERT(hasWritable->IsTrue() || hasWritable->IsFalse()); + Handle<Object> hasWritable = + Object::GetProperty(desc, hasWritable_name).ToHandleChecked(); + DCHECK(hasWritable->IsBoolean()); if (hasWritable->IsTrue()) { Handle<String> writable_name = isolate->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("writable_")); - Handle<Object> writable( - v8::internal::GetProperty(isolate, desc, writable_name)); - ASSERT(!isolate->has_pending_exception()); - ASSERT(writable->IsTrue() || writable->IsFalse()); + Handle<Object> writable = + Object::GetProperty(desc, writable_name).ToHandleChecked(); + DCHECK(writable->IsBoolean()); *done = writable->IsFalse(); if (!*done) return isolate->factory()->the_hole_value(); if (strict_mode == SLOPPY) return value; Handle<Object> args[] = { name, receiver }; Handle<Object> error = isolate->factory()->NewTypeError( "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } // We have an AccessorDescriptor. Handle<String> set_name = isolate->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("set_")); - Handle<Object> setter(v8::internal::GetProperty(isolate, desc, set_name)); - ASSERT(!isolate->has_pending_exception()); + Handle<Object> setter = Object::GetProperty(desc, set_name).ToHandleChecked(); if (!setter->IsUndefined()) { // TODO(rossberg): nicer would be to cast to some JSCallable here... return SetPropertyWithDefinedSetter( @@ -3659,12 +3717,11 @@ Handle<Object> args2[] = { name, proxy }; Handle<Object> error = isolate->factory()->NewTypeError( "no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2))); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } -Handle<Object> JSProxy::DeletePropertyWithHandler( +MaybeHandle<Object> JSProxy::DeletePropertyWithHandler( Handle<JSProxy> proxy, Handle<Name> name, DeleteMode mode) { Isolate* isolate = proxy->GetIsolate(); @@ -3672,9 +3729,15 @@ if (name->IsSymbol()) return isolate->factory()->false_value(); Handle<Object> args[] = { name }; - Handle<Object> result = proxy->CallTrap( - "delete", Handle<Object>(), ARRAY_SIZE(args), args); - if (isolate->has_pending_exception()) return Handle<Object>(); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + CallTrap(proxy, + "delete", + Handle<Object>(), + ARRAY_SIZE(args), + args), + Object); bool result_bool = result->BooleanValue(); if (mode == STRICT_DELETION && !result_bool) { @@ -3684,14 +3747,13 @@ Handle<Object> args[] = { handler, trap_name }; Handle<Object> error = isolate->factory()->NewTypeError( "handler_failed", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } return isolate->factory()->ToBoolean(result_bool); } -Handle<Object> JSProxy::DeleteElementWithHandler( +MaybeHandle<Object> JSProxy::DeleteElementWithHandler( Handle<JSProxy> proxy, uint32_t index, DeleteMode mode) { Isolate* isolate = proxy->GetIsolate(); Handle<String> name = isolate->factory()->Uint32ToString(index); @@ -3699,48 +3761,58 @@ } -PropertyAttributes JSProxy::GetPropertyAttributeWithHandler( - Handle<JSProxy> proxy, - Handle<JSReceiver> receiver, - Handle<Name> name) { +Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler( + Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name) { Isolate* isolate = proxy->GetIsolate(); HandleScope scope(isolate); // TODO(rossberg): adjust once there is a story for symbols vs proxies. - if (name->IsSymbol()) return ABSENT; + if (name->IsSymbol()) return maybe(ABSENT); Handle<Object> args[] = { name }; - Handle<Object> result = proxy->CallTrap( - "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args); - if (isolate->has_pending_exception()) return NONE; + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, result, + proxy->CallTrap(proxy, "getPropertyDescriptor", Handle<Object>(), + ARRAY_SIZE(args), args), + Maybe<PropertyAttributes>()); - if (result->IsUndefined()) return ABSENT; + if (result->IsUndefined()) return maybe(ABSENT); - bool has_pending_exception; Handle<Object> argv[] = { result }; - Handle<Object> desc = Execution::Call( - isolate, isolate->to_complete_property_descriptor(), result, - ARRAY_SIZE(argv), argv, &has_pending_exception); - if (has_pending_exception) return NONE; + Handle<Object> desc; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, desc, + Execution::Call(isolate, isolate->to_complete_property_descriptor(), + result, ARRAY_SIZE(argv), argv), + Maybe<PropertyAttributes>()); // Convert result to PropertyAttributes. Handle<String> enum_n = isolate->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("enumerable_")); - Handle<Object> enumerable(v8::internal::GetProperty(isolate, desc, enum_n)); - if (isolate->has_pending_exception()) return NONE; + Handle<Object> enumerable; + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, enumerable, + Object::GetProperty(desc, enum_n), + Maybe<PropertyAttributes>()); Handle<String> conf_n = isolate->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("configurable_")); - Handle<Object> configurable(v8::internal::GetProperty(isolate, desc, conf_n)); - if (isolate->has_pending_exception()) return NONE; + Handle<Object> configurable; + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, configurable, + Object::GetProperty(desc, conf_n), + Maybe<PropertyAttributes>()); Handle<String> writ_n = isolate->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("writable_")); - Handle<Object> writable(v8::internal::GetProperty(isolate, desc, writ_n)); - if (isolate->has_pending_exception()) return NONE; + Handle<Object> writable; + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, writable, + Object::GetProperty(desc, writ_n), + Maybe<PropertyAttributes>()); if (!writable->BooleanValue()) { Handle<String> set_n = isolate->factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("set_")); - Handle<Object> setter(v8::internal::GetProperty(isolate, desc, set_n)); - if (isolate->has_pending_exception()) return NONE; + Handle<Object> setter; + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, setter, + Object::GetProperty(desc, set_n), + Maybe<PropertyAttributes>()); writable = isolate->factory()->ToBoolean(!setter->IsUndefined()); } @@ -3752,24 +3824,22 @@ Handle<Object> error = isolate->factory()->NewTypeError( "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args))); isolate->Throw(*error); - return NONE; + return maybe(NONE); } int attributes = NONE; if (!enumerable->BooleanValue()) attributes |= DONT_ENUM; if (!configurable->BooleanValue()) attributes |= DONT_DELETE; if (!writable->BooleanValue()) attributes |= READ_ONLY; - return static_cast<PropertyAttributes>(attributes); + return maybe(static_cast<PropertyAttributes>(attributes)); } -PropertyAttributes JSProxy::GetElementAttributeWithHandler( - Handle<JSProxy> proxy, - Handle<JSReceiver> receiver, - uint32_t index) { +Maybe<PropertyAttributes> JSProxy::GetElementAttributeWithHandler( + Handle<JSProxy> proxy, Handle<JSReceiver> receiver, uint32_t index) { Isolate* isolate = proxy->GetIsolate(); Handle<String> name = isolate->factory()->Uint32ToString(index); - return GetPropertyAttributeWithHandler(proxy, receiver, name); + return GetPropertyAttributesWithHandler(proxy, receiver, name); } @@ -3785,7 +3855,7 @@ } else { isolate->factory()->BecomeJSObject(proxy); } - ASSERT(proxy->IsJSObject()); + DCHECK(proxy->IsJSObject()); // Inherit identity, if it was present. if (hash->IsSmi()) { @@ -3795,41 +3865,37 @@ } -MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name, - Handle<Object> derived, - int argc, - Handle<Object> argv[]) { - Isolate* isolate = GetIsolate(); - Handle<Object> handler(this->handler(), isolate); +MaybeHandle<Object> JSProxy::CallTrap(Handle<JSProxy> proxy, + const char* name, + Handle<Object> derived, + int argc, + Handle<Object> argv[]) { + Isolate* isolate = proxy->GetIsolate(); + Handle<Object> handler(proxy->handler(), isolate); Handle<String> trap_name = isolate->factory()->InternalizeUtf8String(name); - Handle<Object> trap(v8::internal::GetProperty(isolate, handler, trap_name)); - if (isolate->has_pending_exception()) return trap; + Handle<Object> trap; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, trap, + Object::GetPropertyOrElement(handler, trap_name), + Object); if (trap->IsUndefined()) { if (derived.is_null()) { Handle<Object> args[] = { handler, trap_name }; Handle<Object> error = isolate->factory()->NewTypeError( "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } trap = Handle<Object>(derived); } - bool threw; - return Execution::Call(isolate, trap, handler, argc, argv, &threw); -} - - -// TODO(mstarzinger): Temporary wrapper until handlified. -static Handle<Map> MapAsElementsKind(Handle<Map> map, ElementsKind kind) { - CALL_HEAP_FUNCTION(map->GetIsolate(), map->AsElementsKind(kind), Map); + return Execution::Call(isolate, trap, handler, argc, argv); } void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) { - ASSERT(object->map()->inobject_properties() == map->inobject_properties()); + DCHECK(object->map()->inobject_properties() == map->inobject_properties()); ElementsKind obj_kind = object->map()->elements_kind(); ElementsKind map_kind = map->elements_kind(); if (map_kind != obj_kind) { @@ -3843,39 +3909,41 @@ } else { TransitionElementsKind(object, to_kind); } - map = MapAsElementsKind(map, to_kind); + map = Map::AsElementsKind(map, to_kind); } JSObject::MigrateToMap(object, map); } void JSObject::MigrateInstance(Handle<JSObject> object) { - // Converting any field to the most specific type will cause the - // GeneralizeFieldRepresentation algorithm to create the most general existing - // transition that matches the object. This achieves what is needed. Handle<Map> original_map(object->map()); - GeneralizeFieldRepresentation( - object, 0, Representation::None(), ALLOW_AS_CONSTANT); - object->map()->set_migration_target(true); + Handle<Map> map = Map::Update(original_map); + map->set_migration_target(true); + MigrateToMap(object, map); if (FLAG_trace_migration) { - object->PrintInstanceMigration(stdout, *original_map, object->map()); + object->PrintInstanceMigration(stdout, *original_map, *map); } } -Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) { - Handle<Map> original_map(object->map()); - Handle<Map> new_map = Map::CurrentMapForDeprecatedInternal(original_map); - if (new_map.is_null()) return Handle<Object>(); +// static +bool JSObject::TryMigrateInstance(Handle<JSObject> object) { + Isolate* isolate = object->GetIsolate(); + DisallowDeoptimization no_deoptimization(isolate); + Handle<Map> original_map(object->map(), isolate); + Handle<Map> new_map; + if (!Map::TryUpdate(original_map).ToHandle(&new_map)) { + return false; + } JSObject::MigrateToMap(object, new_map); if (FLAG_trace_migration) { object->PrintInstanceMigration(stdout, *original_map, object->map()); } - return object; + return true; } -Handle<Object> JSObject::SetPropertyUsingTransition( +MaybeHandle<Object> JSObject::SetPropertyUsingTransition( Handle<JSObject> object, LookupResult* lookup, Handle<Name> name, @@ -3884,97 +3952,101 @@ Handle<Map> transition_map(lookup->GetTransitionTarget()); int descriptor = transition_map->LastAdded(); - DescriptorArray* descriptors = transition_map->instance_descriptors(); + Handle<DescriptorArray> descriptors(transition_map->instance_descriptors()); PropertyDetails details = descriptors->GetDetails(descriptor); if (details.type() == CALLBACKS || attributes != details.attributes()) { - // AddProperty will either normalize the object, or create a new fast copy - // of the map. If we get a fast copy of the map, all field representations - // will be tagged since the transition is omitted. - return JSObject::AddProperty( - object, name, value, attributes, SLOPPY, + // AddPropertyInternal will either normalize the object, or create a new + // fast copy of the map. If we get a fast copy of the map, all field + // representations will be tagged since the transition is omitted. + return JSObject::AddPropertyInternal( + object, name, value, attributes, JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED, - JSReceiver::OMIT_EXTENSIBILITY_CHECK, - JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION); + JSReceiver::OMIT_EXTENSIBILITY_CHECK, OMIT_TRANSITION); } // Keep the target CONSTANT if the same value is stored. // TODO(verwaest): Also support keeping the placeholder // (value->IsUninitialized) as constant. - if (!value->FitsRepresentation(details.representation()) || - (details.type() == CONSTANT && - descriptors->GetValue(descriptor) != *value)) { - transition_map = Map::GeneralizeRepresentation(transition_map, - descriptor, value->OptimalRepresentation(), FORCE_FIELD); + if (!lookup->CanHoldValue(value)) { + Representation field_representation = value->OptimalRepresentation(); + Handle<HeapType> field_type = value->OptimalType( + lookup->isolate(), field_representation); + transition_map = Map::GeneralizeRepresentation( + transition_map, descriptor, + field_representation, field_type, FORCE_FIELD); } - JSObject::MigrateToMap(object, transition_map); + JSObject::MigrateToNewProperty(object, transition_map, value); + return value; +} + + +void JSObject::MigrateToNewProperty(Handle<JSObject> object, + Handle<Map> map, + Handle<Object> value) { + JSObject::MigrateToMap(object, map); + if (map->GetLastDescriptorDetails().type() != FIELD) return; + object->WriteToField(map->LastAdded(), *value); +} + + +void JSObject::WriteToField(int descriptor, Object* value) { + DisallowHeapAllocation no_gc; - // Reload. - descriptors = transition_map->instance_descriptors(); - details = descriptors->GetDetails(descriptor); + DescriptorArray* desc = map()->instance_descriptors(); + PropertyDetails details = desc->GetDetails(descriptor); - if (details.type() != FIELD) return value; + DCHECK(details.type() == FIELD); - int field_index = descriptors->GetFieldIndex(descriptor); + FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor); if (details.representation().IsDouble()) { // Nothing more to be done. - if (value->IsUninitialized()) return value; - HeapNumber* box = HeapNumber::cast(object->RawFastPropertyAt(field_index)); + if (value->IsUninitialized()) return; + HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index)); + DCHECK(box->IsMutableHeapNumber()); box->set_value(value->Number()); } else { - object->FastPropertyAtPut(field_index, *value); + FastPropertyAtPut(index, value); } - - return value; } -static void SetPropertyToField(LookupResult* lookup, - Handle<Name> name, - Handle<Object> value) { - Representation representation = lookup->representation(); - if (!value->FitsRepresentation(representation) || - lookup->type() == CONSTANT) { +void JSObject::SetPropertyToField(LookupResult* lookup, Handle<Object> value) { + if (lookup->type() == CONSTANT || !lookup->CanHoldValue(value)) { + Representation field_representation = value->OptimalRepresentation(); + Handle<HeapType> field_type = value->OptimalType( + lookup->isolate(), field_representation); JSObject::GeneralizeFieldRepresentation(handle(lookup->holder()), lookup->GetDescriptorIndex(), - value->OptimalRepresentation(), - FORCE_FIELD); - DescriptorArray* desc = lookup->holder()->map()->instance_descriptors(); - int descriptor = lookup->GetDescriptorIndex(); - representation = desc->GetDetails(descriptor).representation(); + field_representation, field_type); } - - if (representation.IsDouble()) { - HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt( - lookup->GetFieldIndex().field_index())); - storage->set_value(value->Number()); - return; - } - - lookup->holder()->FastPropertyAtPut( - lookup->GetFieldIndex().field_index(), *value); + lookup->holder()->WriteToField(lookup->GetDescriptorIndex(), *value); } -static void ConvertAndSetLocalProperty(LookupResult* lookup, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes) { +void JSObject::ConvertAndSetOwnProperty(LookupResult* lookup, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes) { Handle<JSObject> object(lookup->holder()); - if (object->TooManyFastProperties()) { + if (object->map()->TooManyFastProperties(Object::MAY_BE_STORE_FROM_KEYED)) { JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); + } else if (object->map()->is_prototype_map()) { + JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0); } if (!object->HasFastProperties()) { ReplaceSlowProperty(object, name, value, attributes); + ReoptimizeIfPrototype(object); return; } int descriptor_index = lookup->GetDescriptorIndex(); if (lookup->GetAttributes() == attributes) { - JSObject::GeneralizeFieldRepresentation( - object, descriptor_index, Representation::Tagged(), FORCE_FIELD); + JSObject::GeneralizeFieldRepresentation(object, descriptor_index, + Representation::Tagged(), + HeapType::Any(lookup->isolate())); } else { Handle<Map> old_map(object->map()); Handle<Map> new_map = Map::CopyGeneralizeAllRepresentations(old_map, @@ -3982,170 +4054,52 @@ JSObject::MigrateToMap(object, new_map); } - DescriptorArray* descriptors = object->map()->instance_descriptors(); - int index = descriptors->GetDetails(descriptor_index).field_index(); - object->FastPropertyAtPut(index, *value); + object->WriteToField(descriptor_index, *value); } -static void SetPropertyToFieldWithAttributes(LookupResult* lookup, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes) { +void JSObject::SetPropertyToFieldWithAttributes(LookupResult* lookup, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes) { if (lookup->GetAttributes() == attributes) { if (value->IsUninitialized()) return; - SetPropertyToField(lookup, name, value); + SetPropertyToField(lookup, value); } else { - ConvertAndSetLocalProperty(lookup, name, value, attributes); + ConvertAndSetOwnProperty(lookup, name, value, attributes); } } -Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object, - LookupResult* lookup, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - StoreFromKeyed store_mode) { - Isolate* isolate = object->GetIsolate(); - - // Make sure that the top context does not change when doing callbacks or - // interceptor calls. - AssertNoContextChange ncc(isolate); - - // Optimization for 2-byte strings often used as keys in a decompression - // dictionary. We internalize these short keys to avoid constantly - // reallocating them. - if (name->IsString() && !name->IsInternalizedString() && - Handle<String>::cast(name)->length() <= 2) { - name = isolate->factory()->InternalizeString(Handle<String>::cast(name)); - } - - // Check access rights if needed. - if (object->IsAccessCheckNeeded()) { - if (!isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) { - return SetPropertyWithFailedAccessCheck(object, lookup, name, value, - true, strict_mode); - } - } - - if (object->IsJSGlobalProxy()) { - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return value; - ASSERT(proto->IsJSGlobalObject()); - return SetPropertyForResult(Handle<JSObject>::cast(proto), - lookup, name, value, attributes, strict_mode, store_mode); - } - - ASSERT(!lookup->IsFound() || lookup->holder() == *object || - lookup->holder()->map()->is_hidden_prototype()); - - if (!lookup->IsProperty() && !object->IsJSContextExtensionObject()) { - bool done = false; - Handle<Object> result_object = SetPropertyViaPrototypes( - object, name, value, attributes, strict_mode, &done); - if (done) return result_object; - } - - if (!lookup->IsFound()) { - // Neither properties nor transitions found. - return AddProperty( - object, name, value, attributes, strict_mode, store_mode); - } - - if (lookup->IsProperty() && lookup->IsReadOnly()) { - if (strict_mode == STRICT) { - Handle<Object> args[] = { name, object }; - Handle<Object> error = isolate->factory()->NewTypeError( - "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>(); - } else { - return value; - } - } - - Handle<Object> old_value = isolate->factory()->the_hole_value(); - bool is_observed = object->map()->is_observed() && - *name != isolate->heap()->hidden_string(); - if (is_observed && lookup->IsDataProperty()) { - old_value = Object::GetProperty(object, name); - CHECK_NOT_EMPTY_HANDLE(isolate, old_value); - } - - // This is a real property that is not read-only, or it is a - // transition or null descriptor and there are no setters in the prototypes. - Handle<Object> result = value; - switch (lookup->type()) { - case NORMAL: - SetNormalizedProperty(handle(lookup->holder()), lookup, value); - break; - case FIELD: - SetPropertyToField(lookup, name, value); - break; - case CONSTANT: - // Only replace the constant if necessary. - if (*value == lookup->GetConstant()) return value; - SetPropertyToField(lookup, name, value); - break; - case CALLBACKS: { - Handle<Object> callback_object(lookup->GetCallbackObject(), isolate); - return SetPropertyWithCallback(object, callback_object, name, value, - handle(lookup->holder()), strict_mode); - } - case INTERCEPTOR: - result = SetPropertyWithInterceptor(handle(lookup->holder()), name, value, - attributes, strict_mode); - break; - case TRANSITION: - result = SetPropertyUsingTransition(handle(lookup->holder()), lookup, - name, value, attributes); - break; - case HANDLER: - case NONEXISTENT: - UNREACHABLE(); - } - - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>()); - - if (is_observed) { - if (lookup->IsTransition()) { - EnqueueChangeRecord(object, "add", name, old_value); - } else { - LookupResult new_lookup(isolate); - object->LocalLookup(*name, &new_lookup, true); - if (new_lookup.IsDataProperty()) { - Handle<Object> new_value = Object::GetProperty(object, name); - CHECK_NOT_EMPTY_HANDLE(isolate, new_value); - if (!new_value->SameValue(*old_value)) { - EnqueueChangeRecord(object, "update", name, old_value); - } - } - } - } - - return result; +void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes) { +#ifdef DEBUG + uint32_t index; + DCHECK(!object->IsJSProxy()); + DCHECK(!name->AsArrayIndex(&index)); + LookupIterator it(object, name, LookupIterator::CHECK_OWN_REAL); + Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it); + DCHECK(maybe.has_value); + DCHECK(!it.IsFound()); + DCHECK(object->map()->is_extensible()); +#endif + SetOwnPropertyIgnoreAttributes(object, name, value, attributes, + OMIT_EXTENSIBILITY_CHECK).Check(); } -// Set a real local property, even if it is READ_ONLY. If the property is not -// present, add it with attributes NONE. This code is an exact clone of -// SetProperty, with the check for IsReadOnly and the check for a -// callback setter removed. The two lines looking up the LookupResult -// result are also added. If one of the functions is changed, the other -// should be. -// Note that this method cannot be used to set the prototype of a function -// because ConvertDescriptorToField() which is called in "case CALLBACKS:" -// doesn't handle function prototypes correctly. -Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes( +// Reconfigures a property to a data property with attributes, even if it is not +// reconfigurable. +MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes( Handle<JSObject> object, Handle<Name> name, Handle<Object> value, PropertyAttributes attributes, - ValueType value_type, - StoreMode mode, - ExtensibilityCheck extensibility_check) { + ExtensibilityCheck extensibility_check, + StoreFromKeyed store_from_keyed, + ExecutableAccessorInfoHandling handling) { + DCHECK(!value->IsTheHole()); Isolate* isolate = object->GetIsolate(); // Make sure that the top context does not change when doing callbacks or @@ -4153,30 +4107,31 @@ AssertNoContextChange ncc(isolate); LookupResult lookup(isolate); - object->LocalLookup(*name, &lookup, true); + object->LookupOwn(name, &lookup, true); if (!lookup.IsFound()) { object->map()->LookupTransition(*object, *name, &lookup); } // Check access rights if needed. if (object->IsAccessCheckNeeded()) { - if (!isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) { - return SetPropertyWithFailedAccessCheck(object, &lookup, name, value, - false, SLOPPY); + if (!isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) { + LookupIterator it(object, name, LookupIterator::CHECK_OWN); + return SetPropertyWithFailedAccessCheck(&it, value, SLOPPY); } } if (object->IsJSGlobalProxy()) { - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return value; - ASSERT(proto->IsJSGlobalObject()); - return SetLocalPropertyIgnoreAttributes(Handle<JSObject>::cast(proto), - name, value, attributes, value_type, mode, extensibility_check); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return value; + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + return SetOwnPropertyIgnoreAttributes( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), name, + value, attributes, extensibility_check); } - if (lookup.IsFound() && - (lookup.type() == INTERCEPTOR || lookup.type() == CALLBACKS)) { - object->LocalLookupRealNamedProperty(*name, &lookup); + if (lookup.IsInterceptor() || + (lookup.IsDescriptorOrDictionary() && lookup.type() == CALLBACKS)) { + object->LookupOwnRealNamedProperty(name, &lookup); } // Check for accessor in prototype chain removed here in clone. @@ -4185,8 +4140,8 @@ TransitionFlag flag = lookup.IsFound() ? OMIT_TRANSITION : INSERT_TRANSITION; // Neither properties nor transitions found. - return AddProperty(object, name, value, attributes, SLOPPY, - MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode, flag); + return AddPropertyInternal(object, name, value, attributes, + store_from_keyed, extensibility_check, flag); } Handle<Object> old_value = isolate->factory()->the_hole_value(); @@ -4195,54 +4150,94 @@ *name != isolate->heap()->hidden_string(); if (is_observed && lookup.IsProperty()) { if (lookup.IsDataProperty()) { - old_value = Object::GetProperty(object, name); - CHECK_NOT_EMPTY_HANDLE(isolate, old_value); + old_value = Object::GetPropertyOrElement(object, name).ToHandleChecked(); } old_attributes = lookup.GetAttributes(); } + bool executed_set_prototype = false; + // Check of IsReadOnly removed from here in clone. - switch (lookup.type()) { - case NORMAL: - ReplaceSlowProperty(object, name, value, attributes); - break; - case FIELD: - SetPropertyToFieldWithAttributes(&lookup, name, value, attributes); - break; - case CONSTANT: - // Only replace the constant if necessary. - if (lookup.GetAttributes() != attributes || - *value != lookup.GetConstant()) { + if (lookup.IsTransition()) { + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + SetPropertyUsingTransition( + handle(lookup.holder()), &lookup, name, value, attributes), + Object); + } else { + switch (lookup.type()) { + case NORMAL: + ReplaceSlowProperty(object, name, value, attributes); + break; + case FIELD: SetPropertyToFieldWithAttributes(&lookup, name, value, attributes); + break; + case CONSTANT: + // Only replace the constant if necessary. + if (lookup.GetAttributes() != attributes || + *value != lookup.GetConstant()) { + SetPropertyToFieldWithAttributes(&lookup, name, value, attributes); + } + break; + case CALLBACKS: + { + Handle<Object> callback(lookup.GetCallbackObject(), isolate); + if (callback->IsExecutableAccessorInfo() && + handling == DONT_FORCE_FIELD) { + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, JSObject::SetPropertyWithAccessor( + object, name, value, handle(lookup.holder()), + callback, STRICT), + Object); + + if (attributes != lookup.GetAttributes()) { + Handle<ExecutableAccessorInfo> new_data = + Accessors::CloneAccessor( + isolate, Handle<ExecutableAccessorInfo>::cast(callback)); + new_data->set_property_attributes(attributes); + if (attributes & READ_ONLY) { + // This way we don't have to introduce a lookup to the setter, + // simply make it unavailable to reflect the attributes. + new_data->clear_setter(); + } + + SetPropertyCallback(object, name, new_data, attributes); + } + if (is_observed) { + // If we are setting the prototype of a function and are observed, + // don't send change records because the prototype handles that + // itself. + executed_set_prototype = object->IsJSFunction() && + String::Equals(isolate->factory()->prototype_string(), + Handle<String>::cast(name)) && + Handle<JSFunction>::cast(object)->should_have_prototype(); + } + } else { + ConvertAndSetOwnProperty(&lookup, name, value, attributes); + } + break; } - break; - case CALLBACKS: - ConvertAndSetLocalProperty(&lookup, name, value, attributes); - break; - case TRANSITION: { - Handle<Object> result = SetPropertyUsingTransition( - handle(lookup.holder()), &lookup, name, value, attributes); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>()); - break; + case NONEXISTENT: + case HANDLER: + case INTERCEPTOR: + UNREACHABLE(); } - case NONEXISTENT: - case HANDLER: - case INTERCEPTOR: - UNREACHABLE(); } - if (is_observed) { + if (is_observed && !executed_set_prototype) { if (lookup.IsTransition()) { EnqueueChangeRecord(object, "add", name, old_value); } else if (old_value->IsTheHole()) { EnqueueChangeRecord(object, "reconfigure", name, old_value); } else { LookupResult new_lookup(isolate); - object->LocalLookup(*name, &new_lookup, true); + object->LookupOwn(name, &new_lookup, true); bool value_changed = false; if (new_lookup.IsDataProperty()) { - Handle<Object> new_value = Object::GetProperty(object, name); - CHECK_NOT_EMPTY_HANDLE(isolate, new_value); + Handle<Object> new_value = + Object::GetPropertyOrElement(object, name).ToHandleChecked(); value_changed = !old_value->SameValue(*new_value); } if (new_lookup.GetAttributes() != old_attributes) { @@ -4258,182 +4253,129 @@ } -PropertyAttributes JSObject::GetPropertyAttributePostInterceptor( - Handle<JSObject> object, - Handle<JSObject> receiver, - Handle<Name> name, - bool continue_search) { - // Check local property, ignore interceptor. - Isolate* isolate = object->GetIsolate(); - LookupResult result(isolate); - object->LocalLookupRealNamedProperty(*name, &result); - if (result.IsFound()) return result.GetAttributes(); - - if (continue_search) { - // Continue searching via the prototype chain. - Handle<Object> proto(object->GetPrototype(), isolate); - if (!proto->IsNull()) { - return JSReceiver::GetPropertyAttributeWithReceiver( - Handle<JSObject>::cast(proto), receiver, name); - } - } - return ABSENT; -} - - -PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor( - Handle<JSObject> object, - Handle<JSObject> receiver, - Handle<Name> name, - bool continue_search) { +Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor( + Handle<JSObject> holder, + Handle<Object> receiver, + Handle<Name> name) { // TODO(rossberg): Support symbols in the API. - if (name->IsSymbol()) return ABSENT; + if (name->IsSymbol()) return maybe(ABSENT); - Isolate* isolate = object->GetIsolate(); + Isolate* isolate = holder->GetIsolate(); HandleScope scope(isolate); // Make sure that the top context does not change when doing // callbacks or interceptor calls. AssertNoContextChange ncc(isolate); - Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor()); + Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor()); PropertyCallbackArguments args( - isolate, interceptor->data(), *receiver, *object); - if (!interceptor->query()->IsUndefined()) { - v8::NamedPropertyQueryCallback query = - v8::ToCData<v8::NamedPropertyQueryCallback>(interceptor->query()); - LOG(isolate, - ApiNamedPropertyAccess("interceptor-named-has", *object, *name)); - v8::Handle<v8::Integer> result = - args.Call(query, v8::Utils::ToLocal(Handle<String>::cast(name))); - if (!result.IsEmpty()) { - ASSERT(result->IsInt32()); - return static_cast<PropertyAttributes>(result->Int32Value()); - } - } else if (!interceptor->getter()->IsUndefined()) { - v8::NamedPropertyGetterCallback getter = - v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter()); - LOG(isolate, - ApiNamedPropertyAccess("interceptor-named-get-has", *object, *name)); - v8::Handle<v8::Value> result = - args.Call(getter, v8::Utils::ToLocal(Handle<String>::cast(name))); - if (!result.IsEmpty()) return DONT_ENUM; - } - return GetPropertyAttributePostInterceptor( - object, receiver, name, continue_search); -} - - -PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver( - Handle<JSReceiver> object, - Handle<JSReceiver> receiver, - Handle<Name> key) { - uint32_t index = 0; - if (object->IsJSObject() && key->AsArrayIndex(&index)) { - return JSObject::GetElementAttributeWithReceiver( - Handle<JSObject>::cast(object), receiver, index, true); - } - // Named property. - LookupResult lookup(object->GetIsolate()); - object->Lookup(*key, &lookup); - return GetPropertyAttributeForResult(object, receiver, &lookup, key, true); -} - - -PropertyAttributes JSReceiver::GetPropertyAttributeForResult( - Handle<JSReceiver> object, - Handle<JSReceiver> receiver, - LookupResult* lookup, - Handle<Name> name, - bool continue_search) { - // Check access rights if needed. - if (object->IsAccessCheckNeeded()) { - Heap* heap = object->GetHeap(); - Handle<JSObject> obj = Handle<JSObject>::cast(object); - if (!heap->isolate()->MayNamedAccessWrapper(obj, name, v8::ACCESS_HAS)) { - return JSObject::GetPropertyAttributeWithFailedAccessCheck( - obj, lookup, name, continue_search); - } - } - if (lookup->IsFound()) { - switch (lookup->type()) { - case NORMAL: // fall through - case FIELD: - case CONSTANT: - case CALLBACKS: - return lookup->GetAttributes(); - case HANDLER: { - return JSProxy::GetPropertyAttributeWithHandler( - handle(lookup->proxy()), receiver, name); - } - case INTERCEPTOR: - return JSObject::GetPropertyAttributeWithInterceptor( - handle(lookup->holder()), - Handle<JSObject>::cast(receiver), - name, - continue_search); - case TRANSITION: - case NONEXISTENT: - UNREACHABLE(); + isolate, interceptor->data(), *receiver, *holder); + if (!interceptor->query()->IsUndefined()) { + v8::NamedPropertyQueryCallback query = + v8::ToCData<v8::NamedPropertyQueryCallback>(interceptor->query()); + LOG(isolate, + ApiNamedPropertyAccess("interceptor-named-has", *holder, *name)); + v8::Handle<v8::Integer> result = + args.Call(query, v8::Utils::ToLocal(Handle<String>::cast(name))); + if (!result.IsEmpty()) { + DCHECK(result->IsInt32()); + return maybe(static_cast<PropertyAttributes>(result->Int32Value())); } + } else if (!interceptor->getter()->IsUndefined()) { + v8::NamedPropertyGetterCallback getter = + v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter()); + LOG(isolate, + ApiNamedPropertyAccess("interceptor-named-get-has", *holder, *name)); + v8::Handle<v8::Value> result = + args.Call(getter, v8::Utils::ToLocal(Handle<String>::cast(name))); + if (!result.IsEmpty()) return maybe(DONT_ENUM); } - return ABSENT; + + RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<PropertyAttributes>()); + return maybe(ABSENT); } -PropertyAttributes JSReceiver::GetLocalPropertyAttribute( +Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes( Handle<JSReceiver> object, Handle<Name> name) { // Check whether the name is an array index. uint32_t index = 0; if (object->IsJSObject() && name->AsArrayIndex(&index)) { - return GetLocalElementAttribute(object, index); + return GetOwnElementAttribute(object, index); } - // Named property. - LookupResult lookup(object->GetIsolate()); - object->LocalLookup(*name, &lookup, true); - return GetPropertyAttributeForResult(object, object, &lookup, name, false); + LookupIterator it(object, name, LookupIterator::CHECK_OWN); + return GetPropertyAttributes(&it); } -PropertyAttributes JSObject::GetElementAttributeWithReceiver( - Handle<JSObject> object, - Handle<JSReceiver> receiver, - uint32_t index, - bool continue_search) { +Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes( + LookupIterator* it) { + for (; it->IsFound(); it->Next()) { + switch (it->state()) { + case LookupIterator::NOT_FOUND: + UNREACHABLE(); + case LookupIterator::JSPROXY: + return JSProxy::GetPropertyAttributesWithHandler( + it->GetHolder<JSProxy>(), it->GetReceiver(), it->name()); + case LookupIterator::INTERCEPTOR: { + Maybe<PropertyAttributes> result = + JSObject::GetPropertyAttributesWithInterceptor( + it->GetHolder<JSObject>(), it->GetReceiver(), it->name()); + if (!result.has_value) return result; + if (result.value != ABSENT) return result; + break; + } + case LookupIterator::ACCESS_CHECK: + if (it->HasAccess(v8::ACCESS_HAS)) break; + return JSObject::GetPropertyAttributesWithFailedAccessCheck(it); + case LookupIterator::PROPERTY: + if (it->HasProperty()) { + return maybe(it->property_details().attributes()); + } + break; + } + } + return maybe(ABSENT); +} + + +Maybe<PropertyAttributes> JSObject::GetElementAttributeWithReceiver( + Handle<JSObject> object, Handle<JSReceiver> receiver, uint32_t index, + bool check_prototype) { Isolate* isolate = object->GetIsolate(); // Check access rights if needed. if (object->IsAccessCheckNeeded()) { - if (!isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_HAS)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS); - return ABSENT; + if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_HAS)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS); + RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<PropertyAttributes>()); + return maybe(ABSENT); } } if (object->IsJSGlobalProxy()) { - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return ABSENT; - ASSERT(proto->IsJSGlobalObject()); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return maybe(ABSENT); + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); return JSObject::GetElementAttributeWithReceiver( - Handle<JSObject>::cast(proto), receiver, index, continue_search); + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), receiver, + index, check_prototype); } // Check for lookup interceptor except when bootstrapping. if (object->HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) { return JSObject::GetElementAttributeWithInterceptor( - object, receiver, index, continue_search); + object, receiver, index, check_prototype); } return GetElementAttributeWithoutInterceptor( - object, receiver, index, continue_search); + object, receiver, index, check_prototype); } -PropertyAttributes JSObject::GetElementAttributeWithInterceptor( - Handle<JSObject> object, - Handle<JSReceiver> receiver, - uint32_t index, - bool continue_search) { +Maybe<PropertyAttributes> JSObject::GetElementAttributeWithInterceptor( + Handle<JSObject> object, Handle<JSReceiver> receiver, uint32_t index, + bool check_prototype) { Isolate* isolate = object->GetIsolate(); HandleScope scope(isolate); @@ -4451,7 +4393,7 @@ ApiIndexedPropertyAccess("interceptor-indexed-has", *object, index)); v8::Handle<v8::Integer> result = args.Call(query, index); if (!result.IsEmpty()) - return static_cast<PropertyAttributes>(result->Int32Value()); + return maybe(static_cast<PropertyAttributes>(result->Int32Value())); } else if (!interceptor->getter()->IsUndefined()) { v8::IndexedPropertyGetterCallback getter = v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter()); @@ -4459,85 +4401,66 @@ ApiIndexedPropertyAccess( "interceptor-indexed-get-has", *object, index)); v8::Handle<v8::Value> result = args.Call(getter, index); - if (!result.IsEmpty()) return NONE; + if (!result.IsEmpty()) return maybe(NONE); } return GetElementAttributeWithoutInterceptor( - object, receiver, index, continue_search); + object, receiver, index, check_prototype); } -PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor( - Handle<JSObject> object, - Handle<JSReceiver> receiver, - uint32_t index, - bool continue_search) { +Maybe<PropertyAttributes> JSObject::GetElementAttributeWithoutInterceptor( + Handle<JSObject> object, Handle<JSReceiver> receiver, uint32_t index, + bool check_prototype) { PropertyAttributes attr = object->GetElementsAccessor()->GetAttributes( - *receiver, *object, index); - if (attr != ABSENT) return attr; + receiver, object, index); + if (attr != ABSENT) return maybe(attr); // Handle [] on String objects. if (object->IsStringObjectWithCharacterAt(index)) { - return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE); + return maybe(static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE)); } - if (!continue_search) return ABSENT; + if (!check_prototype) return maybe(ABSENT); - Handle<Object> proto(object->GetPrototype(), object->GetIsolate()); - if (proto->IsJSProxy()) { + PrototypeIterator iter(object->GetIsolate(), object); + if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) { // We need to follow the spec and simulate a call to [[GetOwnProperty]]. return JSProxy::GetElementAttributeWithHandler( - Handle<JSProxy>::cast(proto), receiver, index); + Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), receiver, + index); } - if (proto->IsNull()) return ABSENT; + if (iter.IsAtEnd()) return maybe(ABSENT); return GetElementAttributeWithReceiver( - Handle<JSObject>::cast(proto), receiver, index, true); + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), receiver, + index, true); } -Handle<Map> NormalizedMapCache::Get(Handle<NormalizedMapCache> cache, - Handle<JSObject> obj, - PropertyNormalizationMode mode) { - int index = obj->map()->Hash() % kEntries; - Handle<Object> result = handle(cache->get(index), cache->GetIsolate()); - if (result->IsMap() && - Handle<Map>::cast(result)->EquivalentToForNormalization(obj->map(), - mode)) { -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - Handle<Map>::cast(result)->SharedMapVerify(); - } -#endif -#ifdef ENABLE_SLOW_ASSERTS - if (FLAG_enable_slow_asserts) { - // The cached map should match newly created normalized map bit-by-bit, - // except for the code cache, which can contain some ics which can be - // applied to the shared map. - Handle<Map> fresh = Map::CopyNormalized(handle(obj->map()), mode, - SHARED_NORMALIZED_MAP); +Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) { + Handle<FixedArray> array( + isolate->factory()->NewFixedArray(kEntries, TENURED)); + return Handle<NormalizedMapCache>::cast(array); +} - ASSERT(memcmp(fresh->address(), - Handle<Map>::cast(result)->address(), - Map::kCodeCacheOffset) == 0); - STATIC_ASSERT(Map::kDependentCodeOffset == - Map::kCodeCacheOffset + kPointerSize); - int offset = Map::kDependentCodeOffset + kPointerSize; - ASSERT(memcmp(fresh->address() + offset, - Handle<Map>::cast(result)->address() + offset, - Map::kSize - offset) == 0); - } -#endif - return Handle<Map>::cast(result); + +MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map, + PropertyNormalizationMode mode) { + DisallowHeapAllocation no_gc; + Object* value = FixedArray::get(GetIndex(fast_map)); + if (!value->IsMap() || + !Map::cast(value)->EquivalentToForNormalization(*fast_map, mode)) { + return MaybeHandle<Map>(); } + return handle(Map::cast(value)); +} - Isolate* isolate = cache->GetIsolate(); - Handle<Map> map = Map::CopyNormalized(handle(obj->map()), mode, - SHARED_NORMALIZED_MAP); - ASSERT(map->is_dictionary_map()); - cache->set(index, *map); - isolate->counters()->normalized_maps()->Increment(); - return map; +void NormalizedMapCache::Set(Handle<Map> fast_map, + Handle<Map> normalized_map) { + DisallowHeapAllocation no_gc; + DCHECK(normalized_map->is_dictionary_map()); + FixedArray::set(GetIndex(fast_map), *normalized_map); } @@ -4562,10 +4485,20 @@ int expected_additional_properties) { if (!object->HasFastProperties()) return; + Handle<Map> map(object->map()); + Handle<Map> new_map = Map::Normalize(map, mode); + + MigrateFastToSlow(object, new_map, expected_additional_properties); +} + + +void JSObject::MigrateFastToSlow(Handle<JSObject> object, + Handle<Map> new_map, + int expected_additional_properties) { // The global object is always normalized. - ASSERT(!object->IsGlobalObject()); + DCHECK(!object->IsGlobalObject()); // JSGlobalProxy must never be normalized - ASSERT(!object->IsJSGlobalProxy()); + DCHECK(!object->IsJSGlobalProxy()); Isolate* isolate = object->GetIsolate(); HandleScope scope(isolate); @@ -4580,7 +4513,7 @@ property_count += 2; // Make space for two more properties. } Handle<NameDictionary> dictionary = - isolate->factory()->NewNameDictionary(property_count); + NameDictionary::New(isolate, property_count); Handle<DescriptorArray> descs(map->instance_descriptors()); for (int i = 0; i < real_size; i++) { @@ -4591,16 +4524,22 @@ Handle<Object> value(descs->GetConstant(i), isolate); PropertyDetails d = PropertyDetails( details.attributes(), NORMAL, i + 1); - dictionary = NameDictionaryAdd(dictionary, key, value, d); + dictionary = NameDictionary::Add(dictionary, key, value, d); break; } case FIELD: { Handle<Name> key(descs->GetKey(i)); + FieldIndex index = FieldIndex::ForDescriptor(*map, i); Handle<Object> value( - object->RawFastPropertyAt(descs->GetFieldIndex(i)), isolate); + object->RawFastPropertyAt(index), isolate); + if (details.representation().IsDouble()) { + DCHECK(value->IsMutableHeapNumber()); + Handle<HeapNumber> old = Handle<HeapNumber>::cast(value); + value = isolate->factory()->NewHeapNumber(old->value()); + } PropertyDetails d = PropertyDetails(details.attributes(), NORMAL, i + 1); - dictionary = NameDictionaryAdd(dictionary, key, value, d); + dictionary = NameDictionary::Add(dictionary, key, value, d); break; } case CALLBACKS: { @@ -4608,14 +4547,13 @@ Handle<Object> value(descs->GetCallbacksObject(i), isolate); PropertyDetails d = PropertyDetails( details.attributes(), CALLBACKS, i + 1); - dictionary = NameDictionaryAdd(dictionary, key, value, d); + dictionary = NameDictionary::Add(dictionary, key, value, d); break; } case INTERCEPTOR: break; case HANDLER: case NORMAL: - case TRANSITION: case NONEXISTENT: UNREACHABLE(); break; @@ -4625,27 +4563,25 @@ // Copy the next enumeration index from instance descriptor. dictionary->SetNextEnumerationIndex(real_size + 1); - Handle<NormalizedMapCache> cache( - isolate->context()->native_context()->normalized_map_cache()); - Handle<Map> new_map = NormalizedMapCache::Get(cache, object, mode); - ASSERT(new_map->is_dictionary_map()); - // From here on we cannot fail and we shouldn't GC anymore. DisallowHeapAllocation no_allocation; // Resize the object in the heap if necessary. int new_instance_size = new_map->instance_size(); int instance_size_delta = map->instance_size() - new_instance_size; - ASSERT(instance_size_delta >= 0); - Heap* heap = isolate->heap(); - heap->CreateFillerObjectAt(object->address() + new_instance_size, - instance_size_delta); - heap->AdjustLiveBytes(object->address(), - -instance_size_delta, - Heap::FROM_MUTATOR); + DCHECK(instance_size_delta >= 0); - object->set_map(*new_map); - map->NotifyLeafMapLayoutChange(); + if (instance_size_delta > 0) { + Heap* heap = isolate->heap(); + heap->CreateFillerObjectAt(object->address() + new_instance_size, + instance_size_delta); + heap->AdjustLiveBytes(object->address(), -instance_size_delta, + Heap::FROM_MUTATOR); + } + + // We are storing the new map using release store after creating a filler for + // the left-over space to avoid races with the sweeper thread. + object->synchronized_set_map(*new_map); object->set_properties(*dictionary); @@ -4653,21 +4589,155 @@ #ifdef DEBUG if (FLAG_trace_normalization) { - PrintF("Object properties have been normalized:\n"); - object->Print(); + OFStream os(stdout); + os << "Object properties have been normalized:\n"; + object->Print(os); } #endif } -void JSObject::TransformToFastProperties(Handle<JSObject> object, - int unused_property_fields) { +void JSObject::MigrateSlowToFast(Handle<JSObject> object, + int unused_property_fields) { if (object->HasFastProperties()) return; - ASSERT(!object->IsGlobalObject()); - CALL_HEAP_FUNCTION_VOID( - object->GetIsolate(), - object->property_dictionary()->TransformPropertiesToFastFor( - *object, unused_property_fields)); + DCHECK(!object->IsGlobalObject()); + Isolate* isolate = object->GetIsolate(); + Factory* factory = isolate->factory(); + Handle<NameDictionary> dictionary(object->property_dictionary()); + + // Make sure we preserve dictionary representation if there are too many + // descriptors. + int number_of_elements = dictionary->NumberOfElements(); + if (number_of_elements > kMaxNumberOfDescriptors) return; + + if (number_of_elements != dictionary->NextEnumerationIndex()) { + NameDictionary::DoGenerateNewEnumerationIndices(dictionary); + } + + int instance_descriptor_length = 0; + int number_of_fields = 0; + + // Compute the length of the instance descriptor. + int capacity = dictionary->Capacity(); + for (int i = 0; i < capacity; i++) { + Object* k = dictionary->KeyAt(i); + if (dictionary->IsKey(k)) { + Object* value = dictionary->ValueAt(i); + PropertyType type = dictionary->DetailsAt(i).type(); + DCHECK(type != FIELD); + instance_descriptor_length++; + if (type == NORMAL && !value->IsJSFunction()) { + number_of_fields += 1; + } + } + } + + int inobject_props = object->map()->inobject_properties(); + + // Allocate new map. + Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map())); + new_map->set_dictionary_map(false); + + if (instance_descriptor_length == 0) { + DisallowHeapAllocation no_gc; + DCHECK_LE(unused_property_fields, inobject_props); + // Transform the object. + new_map->set_unused_property_fields(inobject_props); + object->synchronized_set_map(*new_map); + object->set_properties(isolate->heap()->empty_fixed_array()); + // Check that it really works. + DCHECK(object->HasFastProperties()); + return; + } + + // Allocate the instance descriptor. + Handle<DescriptorArray> descriptors = DescriptorArray::Allocate( + isolate, instance_descriptor_length); + + int number_of_allocated_fields = + number_of_fields + unused_property_fields - inobject_props; + if (number_of_allocated_fields < 0) { + // There is enough inobject space for all fields (including unused). + number_of_allocated_fields = 0; + unused_property_fields = inobject_props - number_of_fields; + } + + // Allocate the fixed array for the fields. + Handle<FixedArray> fields = factory->NewFixedArray( + number_of_allocated_fields); + + // Fill in the instance descriptor and the fields. + int current_offset = 0; + for (int i = 0; i < capacity; i++) { + Object* k = dictionary->KeyAt(i); + if (dictionary->IsKey(k)) { + Object* value = dictionary->ValueAt(i); + Handle<Name> key; + if (k->IsSymbol()) { + key = handle(Symbol::cast(k)); + } else { + // Ensure the key is a unique name before writing into the + // instance descriptor. + key = factory->InternalizeString(handle(String::cast(k))); + } + + PropertyDetails details = dictionary->DetailsAt(i); + int enumeration_index = details.dictionary_index(); + PropertyType type = details.type(); + + if (value->IsJSFunction()) { + ConstantDescriptor d(key, + handle(value, isolate), + details.attributes()); + descriptors->Set(enumeration_index - 1, &d); + } else if (type == NORMAL) { + if (current_offset < inobject_props) { + object->InObjectPropertyAtPut(current_offset, + value, + UPDATE_WRITE_BARRIER); + } else { + int offset = current_offset - inobject_props; + fields->set(offset, value); + } + FieldDescriptor d(key, + current_offset++, + details.attributes(), + // TODO(verwaest): value->OptimalRepresentation(); + Representation::Tagged()); + descriptors->Set(enumeration_index - 1, &d); + } else if (type == CALLBACKS) { + CallbacksDescriptor d(key, + handle(value, isolate), + details.attributes()); + descriptors->Set(enumeration_index - 1, &d); + } else { + UNREACHABLE(); + } + } + } + DCHECK(current_offset == number_of_fields); + + descriptors->Sort(); + + DisallowHeapAllocation no_gc; + new_map->InitializeDescriptors(*descriptors); + new_map->set_unused_property_fields(unused_property_fields); + + // Transform the object. + object->synchronized_set_map(*new_map); + + object->set_properties(*fields); + DCHECK(object->IsJSObject()); + + // Check that it really works. + DCHECK(object->HasFastProperties()); +} + + +void JSObject::ResetElements(Handle<JSObject> object) { + Heap* heap = object->GetIsolate()->heap(); + CHECK(object->map() != heap->sloppy_arguments_elements_map()); + object->set_elements(object->map()->GetInitialElements()); } @@ -4703,10 +4773,9 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements( Handle<JSObject> object) { - ASSERT(!object->HasExternalArrayElements() && + DCHECK(!object->HasExternalArrayElements() && !object->HasFixedTypedArrayElements()); Isolate* isolate = object->GetIsolate(); - Factory* factory = isolate->factory(); // Find the backing store. Handle<FixedArrayBase> array(FixedArrayBase::cast(object->elements())); @@ -4718,7 +4787,7 @@ } if (array->IsDictionary()) return Handle<SeededNumberDictionary>::cast(array); - ASSERT(object->HasFastSmiOrObjectElements() || + DCHECK(object->HasFastSmiOrObjectElements() || object->HasFastDoubleElements() || object->HasFastArgumentsElements()); // Compute the effective length and allocate a new backing store. @@ -4729,7 +4798,7 @@ int used_elements = 0; object->GetElementsCapacityAndUsage(&old_capacity, &used_elements); Handle<SeededNumberDictionary> dictionary = - factory->NewSeededNumberDictionary(used_elements); + SeededNumberDictionary::New(isolate, used_elements); dictionary = CopyFastElementsToDictionary(array, length, dictionary); @@ -4750,20 +4819,19 @@ #ifdef DEBUG if (FLAG_trace_normalization) { - PrintF("Object elements have been normalized:\n"); - object->Print(); + OFStream os(stdout); + os << "Object elements have been normalized:\n"; + object->Print(os); } #endif - ASSERT(object->HasDictionaryElements() || + DCHECK(object->HasDictionaryElements() || object->HasDictionaryArgumentsElements()); return dictionary; } -Smi* JSReceiver::GenerateIdentityHash() { - Isolate* isolate = GetIsolate(); - +static Smi* GenerateIdentityHash(Isolate* isolate) { int hash_value; int attempts = 0; do { @@ -4779,33 +4847,51 @@ void JSObject::SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash) { + DCHECK(!object->IsJSGlobalProxy()); Isolate* isolate = object->GetIsolate(); SetHiddenProperty(object, isolate->factory()->identity_hash_string(), hash); } +template<typename ProxyType> +static Handle<Smi> GetOrCreateIdentityHashHelper(Handle<ProxyType> proxy) { + Isolate* isolate = proxy->GetIsolate(); + + Handle<Object> maybe_hash(proxy->hash(), isolate); + if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash); + + Handle<Smi> hash(GenerateIdentityHash(isolate), isolate); + proxy->set_hash(*hash); + return hash; +} + + Object* JSObject::GetIdentityHash() { - Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_string()); - return stored_value->IsSmi() ? stored_value : GetHeap()->undefined_value(); + DisallowHeapAllocation no_gc; + Isolate* isolate = GetIsolate(); + if (IsJSGlobalProxy()) { + return JSGlobalProxy::cast(this)->hash(); + } + Object* stored_value = + GetHiddenProperty(isolate->factory()->identity_hash_string()); + return stored_value->IsSmi() + ? stored_value + : isolate->heap()->undefined_value(); } -Handle<Object> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) { - Handle<Object> hash(object->GetIdentityHash(), object->GetIsolate()); - if (hash->IsSmi()) - return hash; +Handle<Smi> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) { + if (object->IsJSGlobalProxy()) { + return GetOrCreateIdentityHashHelper(Handle<JSGlobalProxy>::cast(object)); + } Isolate* isolate = object->GetIsolate(); - hash = handle(object->GenerateIdentityHash(), isolate); - Handle<Object> result = SetHiddenProperty(object, - isolate->factory()->identity_hash_string(), hash); - - if (result->IsUndefined()) { - // Trying to get hash of detached proxy. - return handle(Smi::FromInt(0), isolate); - } + Handle<Object> maybe_hash(object->GetIdentityHash(), isolate); + if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash); + Handle<Smi> hash(GenerateIdentityHash(isolate), isolate); + SetHiddenProperty(object, isolate->factory()->identity_hash_string(), hash); return hash; } @@ -4815,35 +4901,30 @@ } -Handle<Object> JSProxy::GetOrCreateIdentityHash(Handle<JSProxy> proxy) { - Isolate* isolate = proxy->GetIsolate(); - - Handle<Object> hash(proxy->GetIdentityHash(), isolate); - if (hash->IsSmi()) - return hash; - - hash = handle(proxy->GenerateIdentityHash(), isolate); - proxy->set_hash(*hash); - return hash; +Handle<Smi> JSProxy::GetOrCreateIdentityHash(Handle<JSProxy> proxy) { + return GetOrCreateIdentityHashHelper(proxy); } -Object* JSObject::GetHiddenProperty(Name* key) { - ASSERT(key->IsUniqueName()); +Object* JSObject::GetHiddenProperty(Handle<Name> key) { + DisallowHeapAllocation no_gc; + DCHECK(key->IsUniqueName()); if (IsJSGlobalProxy()) { + // JSGlobalProxies store their hash internally. + DCHECK(*key != GetHeap()->identity_hash_string()); // For a proxy, use the prototype as target object. - Object* proxy_parent = GetPrototype(); + PrototypeIterator iter(GetIsolate(), this); // If the proxy is detached, return undefined. - if (proxy_parent->IsNull()) return GetHeap()->the_hole_value(); - ASSERT(proxy_parent->IsJSGlobalObject()); - return JSObject::cast(proxy_parent)->GetHiddenProperty(key); + if (iter.IsAtEnd()) return GetHeap()->the_hole_value(); + DCHECK(iter.GetCurrent()->IsJSGlobalObject()); + return JSObject::cast(iter.GetCurrent())->GetHiddenProperty(key); } - ASSERT(!IsJSGlobalProxy()); + DCHECK(!IsJSGlobalProxy()); Object* inline_value = GetHiddenPropertiesHashTable(); if (inline_value->IsSmi()) { // Handle inline-stored identity hash. - if (key == GetHeap()->identity_hash_string()) { + if (*key == GetHeap()->identity_hash_string()) { return inline_value; } else { return GetHeap()->the_hole_value(); @@ -4863,16 +4944,20 @@ Handle<Object> value) { Isolate* isolate = object->GetIsolate(); - ASSERT(key->IsUniqueName()); + DCHECK(key->IsUniqueName()); if (object->IsJSGlobalProxy()) { + // JSGlobalProxies store their hash internally. + DCHECK(*key != *isolate->factory()->identity_hash_string()); // For a proxy, use the prototype as target object. - Handle<Object> proxy_parent(object->GetPrototype(), isolate); + PrototypeIterator iter(isolate, object); // If the proxy is detached, return undefined. - if (proxy_parent->IsNull()) return isolate->factory()->undefined_value(); - ASSERT(proxy_parent->IsJSGlobalObject()); - return SetHiddenProperty(Handle<JSObject>::cast(proxy_parent), key, value); + if (iter.IsAtEnd()) return isolate->factory()->undefined_value(); + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + return SetHiddenProperty( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), key, + value); } - ASSERT(!object->IsJSGlobalProxy()); + DCHECK(!object->IsJSGlobalProxy()); Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate); @@ -4902,35 +4987,40 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) { Isolate* isolate = object->GetIsolate(); - ASSERT(key->IsUniqueName()); + DCHECK(key->IsUniqueName()); if (object->IsJSGlobalProxy()) { - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return; - ASSERT(proto->IsJSGlobalObject()); - return DeleteHiddenProperty(Handle<JSObject>::cast(proto), key); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return; + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + return DeleteHiddenProperty( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), key); } Object* inline_value = object->GetHiddenPropertiesHashTable(); // We never delete (inline-stored) identity hashes. - ASSERT(*key != *isolate->factory()->identity_hash_string()); + DCHECK(*key != *isolate->factory()->identity_hash_string()); if (inline_value->IsUndefined() || inline_value->IsSmi()) return; Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value)); - ObjectHashTable::Put(hashtable, key, isolate->factory()->the_hole_value()); + bool was_present = false; + ObjectHashTable::Remove(hashtable, key, &was_present); } bool JSObject::HasHiddenProperties(Handle<JSObject> object) { Handle<Name> hidden = object->GetIsolate()->factory()->hidden_string(); - return GetPropertyAttributePostInterceptor( - object, object, hidden, false) != ABSENT; + LookupIterator it(object, hidden, LookupIterator::CHECK_OWN_REAL); + Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it); + // Cannot get an exception since the hidden_string isn't accessible to JS. + DCHECK(maybe.has_value); + return maybe.value != ABSENT; } Object* JSObject::GetHiddenPropertiesHashTable() { - ASSERT(!IsJSGlobalProxy()); + DCHECK(!IsJSGlobalProxy()); if (HasFastProperties()) { // If the object has fast properties, check whether the first slot // in the descriptor array matches the hidden string. Since the @@ -4941,11 +5031,12 @@ int sorted_index = descriptors->GetSortedKeyIndex(0); if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() && sorted_index < map()->NumberOfOwnDescriptors()) { - ASSERT(descriptors->GetType(sorted_index) == FIELD); - ASSERT(descriptors->GetDetails(sorted_index).representation(). + DCHECK(descriptors->GetType(sorted_index) == FIELD); + DCHECK(descriptors->GetDetails(sorted_index).representation(). IsCompatibleForLoad(Representation::Tagged())); - return this->RawFastPropertyAt( - descriptors->GetFieldIndex(sorted_index)); + FieldIndex index = FieldIndex::ForDescriptor(this->map(), + sorted_index); + return this->RawFastPropertyAt(index); } else { return GetHeap()->undefined_value(); } @@ -4953,13 +5044,15 @@ return GetHeap()->undefined_value(); } } else { - PropertyAttributes attributes; - // You can't install a getter on a property indexed by the hidden string, - // so we can be sure that GetLocalPropertyPostInterceptor returns a real - // object. - return GetLocalPropertyPostInterceptor(this, - GetHeap()->hidden_string(), - &attributes)->ToObjectUnchecked(); + Isolate* isolate = GetIsolate(); + LookupResult result(isolate); + LookupOwnRealNamedProperty(isolate->factory()->hidden_string(), &result); + if (result.IsFound()) { + DCHECK(result.IsNormal()); + DCHECK(result.holder() == this); + return GetNormalizedProperty(&result); + } + return GetHeap()->undefined_value(); } } @@ -4973,9 +5066,8 @@ return Handle<ObjectHashTable>::cast(inline_value); } - Handle<ObjectHashTable> hashtable = isolate->factory()->NewObjectHashTable( - kInitialCapacity, - USE_CUSTOM_MINIMUM_CAPACITY); + Handle<ObjectHashTable> hashtable = ObjectHashTable::New( + isolate, kInitialCapacity, USE_CUSTOM_MINIMUM_CAPACITY); if (inline_value->IsSmi()) { // We were storing the identity hash inline and now allocated an actual @@ -4985,14 +5077,9 @@ inline_value); } - JSObject::SetLocalPropertyIgnoreAttributes( - object, - isolate->factory()->hidden_string(), - hashtable, - DONT_ENUM, - OPTIMAL_REPRESENTATION, - ALLOW_AS_CONSTANT, - OMIT_EXTENSIBILITY_CHECK); + JSObject::SetOwnPropertyIgnoreAttributes( + object, isolate->factory()->hidden_string(), + hashtable, DONT_ENUM).Assert(); return hashtable; } @@ -5000,13 +5087,13 @@ Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object, Handle<Object> value) { - ASSERT(!object->IsJSGlobalProxy()); + DCHECK(!object->IsJSGlobalProxy()); Isolate* isolate = object->GetIsolate(); // We can store the identity hash inline iff there is no backing store // for hidden properties yet. - ASSERT(JSObject::HasHiddenProperties(object) != value->IsSmi()); + DCHECK(JSObject::HasHiddenProperties(object) != value->IsSmi()); if (object->HasFastProperties()) { // If the object has fast properties, check whether the first slot // in the descriptor array matches the hidden string. Since the @@ -5017,43 +5104,42 @@ int sorted_index = descriptors->GetSortedKeyIndex(0); if (descriptors->GetKey(sorted_index) == isolate->heap()->hidden_string() && sorted_index < object->map()->NumberOfOwnDescriptors()) { - ASSERT(descriptors->GetType(sorted_index) == FIELD); - object->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index), - *value); + object->WriteToField(sorted_index, *value); return object; } } } - SetLocalPropertyIgnoreAttributes(object, - isolate->factory()->hidden_string(), - value, - DONT_ENUM, - OPTIMAL_REPRESENTATION, - ALLOW_AS_CONSTANT, - OMIT_EXTENSIBILITY_CHECK); + SetOwnPropertyIgnoreAttributes(object, isolate->factory()->hidden_string(), + value, DONT_ENUM, + OMIT_EXTENSIBILITY_CHECK).Assert(); return object; } Handle<Object> JSObject::DeletePropertyPostInterceptor(Handle<JSObject> object, Handle<Name> name, - DeleteMode mode) { - // Check local property, ignore interceptor. + DeleteMode delete_mode) { + // Check own property, ignore interceptor. Isolate* isolate = object->GetIsolate(); - LookupResult result(isolate); - object->LocalLookupRealNamedProperty(*name, &result); - if (!result.IsFound()) return isolate->factory()->true_value(); + LookupResult lookup(isolate); + object->LookupOwnRealNamedProperty(name, &lookup); + if (!lookup.IsFound()) return isolate->factory()->true_value(); + PropertyNormalizationMode mode = object->map()->is_prototype_map() + ? KEEP_INOBJECT_PROPERTIES + : CLEAR_INOBJECT_PROPERTIES; // Normalize object if needed. - NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); + NormalizeProperties(object, mode, 0); - return DeleteNormalizedProperty(object, name, mode); + Handle<Object> result = DeleteNormalizedProperty(object, name, delete_mode); + ReoptimizeIfPrototype(object); + return result; } -Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object, - Handle<Name> name) { +MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor( + Handle<JSObject> object, Handle<Name> name) { Isolate* isolate = object->GetIsolate(); // TODO(rossberg): Support symbols in the API. @@ -5069,9 +5155,9 @@ isolate, interceptor->data(), *object, *object); v8::Handle<v8::Boolean> result = args.Call(deleter, v8::Utils::ToLocal(Handle<String>::cast(name))); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); if (!result.IsEmpty()) { - ASSERT(result->IsBoolean()); + DCHECK(result->IsBoolean()); Handle<Object> result_internal = v8::Utils::OpenHandle(*result); result_internal->VerifyApiCallResultType(); // Rebox CustomArguments::kReturnValueOffset before returning. @@ -5080,13 +5166,13 @@ } Handle<Object> result = DeletePropertyPostInterceptor(object, name, NORMAL_DELETION); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); return result; } -Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object, - uint32_t index) { +MaybeHandle<Object> JSObject::DeleteElementWithInterceptor( + Handle<JSObject> object, + uint32_t index) { Isolate* isolate = object->GetIsolate(); Factory* factory = isolate->factory(); @@ -5103,32 +5189,31 @@ PropertyCallbackArguments args( isolate, interceptor->data(), *object, *object); v8::Handle<v8::Boolean> result = args.Call(deleter, index); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); if (!result.IsEmpty()) { - ASSERT(result->IsBoolean()); + DCHECK(result->IsBoolean()); Handle<Object> result_internal = v8::Utils::OpenHandle(*result); result_internal->VerifyApiCallResultType(); // Rebox CustomArguments::kReturnValueOffset before returning. return handle(*result_internal, isolate); } - Handle<Object> delete_result = object->GetElementsAccessor()->Delete( + MaybeHandle<Object> delete_result = object->GetElementsAccessor()->Delete( object, index, NORMAL_DELETION); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); return delete_result; } -Handle<Object> JSObject::DeleteElement(Handle<JSObject> object, - uint32_t index, - DeleteMode mode) { +MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object, + uint32_t index, + DeleteMode mode) { Isolate* isolate = object->GetIsolate(); Factory* factory = isolate->factory(); // Check access rights if needed. if (object->IsAccessCheckNeeded() && - !isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_DELETE)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_DELETE); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + !isolate->MayIndexedAccess(object, index, v8::ACCESS_DELETE)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_DELETE); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); return factory->false_value(); } @@ -5147,76 +5232,88 @@ } if (object->IsJSGlobalProxy()) { - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return factory->false_value(); - ASSERT(proto->IsJSGlobalObject()); - return DeleteElement(Handle<JSObject>::cast(proto), index, mode); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return factory->false_value(); + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + return DeleteElement( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index, + mode); } Handle<Object> old_value; bool should_enqueue_change_record = false; if (object->map()->is_observed()) { - should_enqueue_change_record = HasLocalElement(object, index); + Maybe<bool> maybe = HasOwnElement(object, index); + if (!maybe.has_value) return MaybeHandle<Object>(); + should_enqueue_change_record = maybe.value; if (should_enqueue_change_record) { - if (object->GetLocalElementAccessorPair(index) != NULL) { + if (!GetOwnElementAccessorPair(object, index).is_null()) { old_value = Handle<Object>::cast(factory->the_hole_value()); } else { - old_value = Object::GetElementNoExceptionThrown(isolate, object, index); + old_value = Object::GetElement( + isolate, object, index).ToHandleChecked(); } } } // Skip interceptor if forcing deletion. - Handle<Object> result; + MaybeHandle<Object> maybe_result; if (object->HasIndexedInterceptor() && mode != FORCE_DELETION) { - result = DeleteElementWithInterceptor(object, index); + maybe_result = DeleteElementWithInterceptor(object, index); } else { - result = object->GetElementsAccessor()->Delete(object, index, mode); + maybe_result = object->GetElementsAccessor()->Delete(object, index, mode); } + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION(isolate, result, maybe_result, Object); - if (should_enqueue_change_record && !HasLocalElement(object, index)) { - Handle<String> name = factory->Uint32ToString(index); - EnqueueChangeRecord(object, "delete", name, old_value); + if (should_enqueue_change_record) { + Maybe<bool> maybe = HasOwnElement(object, index); + if (!maybe.has_value) return MaybeHandle<Object>(); + if (!maybe.value) { + Handle<String> name = factory->Uint32ToString(index); + EnqueueChangeRecord(object, "delete", name, old_value); + } } return result; } -Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object, - Handle<Name> name, - DeleteMode mode) { +MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object, + Handle<Name> name, + DeleteMode delete_mode) { Isolate* isolate = object->GetIsolate(); // ECMA-262, 3rd, 8.6.2.5 - ASSERT(name->IsName()); + DCHECK(name->IsName()); // Check access rights if needed. if (object->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_DELETE)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_DELETE); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + !isolate->MayNamedAccess(object, name, v8::ACCESS_DELETE)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_DELETE); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); return isolate->factory()->false_value(); } if (object->IsJSGlobalProxy()) { - Object* proto = object->GetPrototype(); - if (proto->IsNull()) return isolate->factory()->false_value(); - ASSERT(proto->IsJSGlobalObject()); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return isolate->factory()->false_value(); + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); return JSGlobalObject::DeleteProperty( - handle(JSGlobalObject::cast(proto)), name, mode); + Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter)), name, + delete_mode); } uint32_t index = 0; if (name->AsArrayIndex(&index)) { - return DeleteElement(object, index, mode); + return DeleteElement(object, index, delete_mode); } LookupResult lookup(isolate); - object->LocalLookup(*name, &lookup, true); + object->LookupOwn(name, &lookup, true); if (!lookup.IsFound()) return isolate->factory()->true_value(); // Ignore attributes if forcing a deletion. - if (lookup.IsDontDelete() && mode != FORCE_DELETION) { - if (mode == STRICT_DELETION) { + if (lookup.IsDontDelete() && delete_mode != FORCE_DELETION) { + if (delete_mode == STRICT_DELETION) { // Deleting a non-configurable property in strict mode. Handle<Object> args[2] = { name, object }; Handle<Object> error = isolate->factory()->NewTypeError( @@ -5231,37 +5328,47 @@ bool is_observed = object->map()->is_observed() && *name != isolate->heap()->hidden_string(); if (is_observed && lookup.IsDataProperty()) { - old_value = Object::GetProperty(object, name); - CHECK_NOT_EMPTY_HANDLE(isolate, old_value); + old_value = Object::GetPropertyOrElement(object, name).ToHandleChecked(); } Handle<Object> result; // Check for interceptor. if (lookup.IsInterceptor()) { // Skip interceptor if forcing a deletion. - if (mode == FORCE_DELETION) { - result = DeletePropertyPostInterceptor(object, name, mode); + if (delete_mode == FORCE_DELETION) { + result = DeletePropertyPostInterceptor(object, name, delete_mode); } else { - result = DeletePropertyWithInterceptor(object, name); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + DeletePropertyWithInterceptor(object, name), + Object); } } else { + PropertyNormalizationMode mode = object->map()->is_prototype_map() + ? KEEP_INOBJECT_PROPERTIES + : CLEAR_INOBJECT_PROPERTIES; // Normalize object if needed. - NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); + NormalizeProperties(object, mode, 0); // Make sure the properties are normalized before removing the entry. - result = DeleteNormalizedProperty(object, name, mode); + result = DeleteNormalizedProperty(object, name, delete_mode); + ReoptimizeIfPrototype(object); } - if (is_observed && !HasLocalProperty(object, name)) { - EnqueueChangeRecord(object, "delete", name, old_value); + if (is_observed) { + Maybe<bool> maybe = HasOwnProperty(object, name); + if (!maybe.has_value) return MaybeHandle<Object>(); + if (!maybe.value) { + EnqueueChangeRecord(object, "delete", name, old_value); + } } return result; } -Handle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object, - uint32_t index, - DeleteMode mode) { +MaybeHandle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object, + uint32_t index, + DeleteMode mode) { if (object->IsJSProxy()) { return JSProxy::DeleteElementWithHandler( Handle<JSProxy>::cast(object), index, mode); @@ -5270,9 +5377,9 @@ } -Handle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object, - Handle<Name> name, - DeleteMode mode) { +MaybeHandle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object, + Handle<Name> name, + DeleteMode mode) { if (object->IsJSProxy()) { return JSProxy::DeletePropertyWithHandler( Handle<JSProxy>::cast(object), name, mode); @@ -5284,7 +5391,7 @@ bool JSObject::ReferencesObjectFromElements(FixedArray* elements, ElementsKind kind, Object* object) { - ASSERT(IsFastObjectElementsKind(kind) || + DCHECK(IsFastObjectElementsKind(kind) || kind == DICTIONARY_ELEMENTS); if (IsFastObjectElementsKind(kind)) { int length = IsJSArray() @@ -5371,11 +5478,10 @@ // For functions check the context. if (IsJSFunction()) { // Get the constructor function for arguments array. - JSObject* arguments_boilerplate = - heap->isolate()->context()->native_context()-> - sloppy_arguments_boilerplate(); + Map* arguments_map = + heap->isolate()->context()->native_context()->sloppy_arguments_map(); JSFunction* arguments_function = - JSFunction::cast(arguments_boilerplate->map()->constructor()); + JSFunction::cast(arguments_map->constructor()); // Get the context and don't check if it is the native context. JSFunction* f = JSFunction::cast(this); @@ -5417,25 +5523,25 @@ } -Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) { +MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) { Isolate* isolate = object->GetIsolate(); if (!object->map()->is_extensible()) return object; if (object->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(object, - isolate->factory()->undefined_value(), - v8::ACCESS_KEYS)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_KEYS); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + !isolate->MayNamedAccess( + object, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_KEYS); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); return isolate->factory()->false_value(); } if (object->IsJSGlobalProxy()) { - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return object; - ASSERT(proto->IsJSGlobalObject()); - return PreventExtensions(Handle<JSObject>::cast(proto)); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return object; + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + return PreventExtensions( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter))); } // It's not possible to seal objects with external array elements @@ -5445,13 +5551,12 @@ isolate->factory()->NewTypeError( "cant_prevent_ext_external_array_elements", HandleVector(&object, 1)); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } // If there are fast elements we normalize. Handle<SeededNumberDictionary> dictionary = NormalizeElements(object); - ASSERT(object->HasDictionaryElements() || + DCHECK(object->HasDictionaryElements() || object->HasDictionaryArgumentsElements()); // Make sure that we never go back to fast case. @@ -5464,7 +5569,7 @@ new_map->set_is_extensible(false); JSObject::MigrateToMap(object, new_map); - ASSERT(!object->map()->is_extensible()); + DCHECK(!object->map()->is_extensible()); if (object->map()->is_observed()) { EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(), @@ -5479,12 +5584,16 @@ int capacity = dictionary->Capacity(); for (int i = 0; i < capacity; i++) { Object* k = dictionary->KeyAt(i); - if (dictionary->IsKey(k)) { + if (dictionary->IsKey(k) && + !(k->IsSymbol() && Symbol::cast(k)->is_private())) { PropertyDetails details = dictionary->DetailsAt(i); int attrs = DONT_DELETE; // READ_ONLY is an invalid attribute for JS setters/getters. - if (details.type() != CALLBACKS || - !dictionary->ValueAt(i)->IsAccessorPair()) { + if (details.type() == CALLBACKS) { + Object* v = dictionary->ValueAt(i); + if (v->IsPropertyCell()) v = PropertyCell::cast(v)->value(); + if (!v->IsAccessorPair()) attrs |= READ_ONLY; + } else { attrs |= READ_ONLY; } details = details.CopyAddAttributes( @@ -5495,28 +5604,27 @@ } -Handle<Object> JSObject::Freeze(Handle<JSObject> object) { +MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) { // Freezing sloppy arguments should be handled elsewhere. - ASSERT(!object->HasSloppyArgumentsElements()); - ASSERT(!object->map()->is_observed()); + DCHECK(!object->HasSloppyArgumentsElements()); + DCHECK(!object->map()->is_observed()); if (object->map()->is_frozen()) return object; Isolate* isolate = object->GetIsolate(); if (object->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(object, - isolate->factory()->undefined_value(), - v8::ACCESS_KEYS)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_KEYS); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + !isolate->MayNamedAccess( + object, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_KEYS); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); return isolate->factory()->false_value(); } if (object->IsJSGlobalProxy()) { - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return object; - ASSERT(proto->IsJSGlobalObject()); - return Freeze(Handle<JSObject>::cast(proto)); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return object; + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + return Freeze(Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter))); } // It's not possible to freeze objects with external array elements @@ -5526,8 +5634,7 @@ isolate->factory()->NewTypeError( "cant_prevent_ext_external_array_elements", HandleVector(&object, 1)); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } Handle<SeededNumberDictionary> new_element_dictionary; @@ -5539,8 +5646,7 @@ int capacity = 0; int used = 0; object->GetElementsCapacityAndUsage(&capacity, &used); - new_element_dictionary = - isolate->factory()->NewSeededNumberDictionary(used); + new_element_dictionary = SeededNumberDictionary::New(isolate, used); // Move elements to a dictionary; avoid calling NormalizeElements to avoid // unnecessary transitions. @@ -5553,29 +5659,21 @@ } } - LookupResult result(isolate); - Handle<Map> old_map(object->map()); - old_map->LookupTransition(*object, isolate->heap()->frozen_symbol(), &result); - if (result.IsTransition()) { - Handle<Map> transition_map(result.GetTransitionTarget()); - ASSERT(transition_map->has_dictionary_elements()); - ASSERT(transition_map->is_frozen()); - ASSERT(!transition_map->is_extensible()); + Handle<Map> old_map(object->map(), isolate); + int transition_index = old_map->SearchTransition( + isolate->heap()->frozen_symbol()); + if (transition_index != TransitionArray::kNotFound) { + Handle<Map> transition_map(old_map->GetTransition(transition_index)); + DCHECK(transition_map->has_dictionary_elements()); + DCHECK(transition_map->is_frozen()); + DCHECK(!transition_map->is_extensible()); JSObject::MigrateToMap(object, transition_map); } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) { // Create a new descriptor array with fully-frozen properties - int num_descriptors = old_map->NumberOfOwnDescriptors(); - Handle<DescriptorArray> new_descriptors = - DescriptorArray::CopyUpToAddAttributes( - handle(old_map->instance_descriptors()), num_descriptors, FROZEN); - Handle<Map> new_map = Map::CopyReplaceDescriptors( - old_map, new_descriptors, INSERT_TRANSITION, - isolate->factory()->frozen_symbol()); - new_map->freeze(); - new_map->set_is_extensible(false); - new_map->set_elements_kind(DICTIONARY_ELEMENTS); + Handle<Map> new_map = Map::CopyForFreeze(old_map); JSObject::MigrateToMap(object, new_map); } else { + DCHECK(old_map->is_dictionary_map() || !old_map->is_prototype_map()); // Slow path: need to normalize properties for safety NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); @@ -5591,7 +5689,7 @@ FreezeDictionary(object->property_dictionary()); } - ASSERT(object->map()->has_dictionary_elements()); + DCHECK(object->map()->has_dictionary_elements()); if (!new_element_dictionary.is_null()) { object->set_elements(*new_element_dictionary); } @@ -5609,34 +5707,33 @@ void JSObject::SetObserved(Handle<JSObject> object) { + DCHECK(!object->IsJSGlobalProxy()); + DCHECK(!object->IsJSGlobalObject()); Isolate* isolate = object->GetIsolate(); - - if (object->map()->is_observed()) - return; - - LookupResult result(isolate); - object->map()->LookupTransition(*object, - isolate->heap()->observed_symbol(), - &result); - Handle<Map> new_map; - if (result.IsTransition()) { - new_map = handle(result.GetTransitionTarget()); - ASSERT(new_map->is_observed()); - } else if (object->map()->CanHaveMoreTransitions()) { - new_map = Map::CopyForObserved(handle(object->map())); + Handle<Map> old_map(object->map(), isolate); + DCHECK(!old_map->is_observed()); + int transition_index = old_map->SearchTransition( + isolate->heap()->observed_symbol()); + if (transition_index != TransitionArray::kNotFound) { + new_map = handle(old_map->GetTransition(transition_index), isolate); + DCHECK(new_map->is_observed()); + } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) { + new_map = Map::CopyForObserved(old_map); } else { - new_map = Map::Copy(handle(object->map())); + new_map = Map::Copy(old_map); new_map->set_is_observed(); } JSObject::MigrateToMap(object, new_map); } -Handle<JSObject> JSObject::Copy(Handle<JSObject> object) { +Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object, + Representation representation, + FieldIndex index) { Isolate* isolate = object->GetIsolate(); - CALL_HEAP_FUNCTION(isolate, - isolate->heap()->CopyJSObject(*object), JSObject); + Handle<Object> raw_value(object->RawFastPropertyAt(index), isolate); + return Object::WrapForRead(isolate, raw_value, representation); } @@ -5649,13 +5746,14 @@ copying_(copying), hints_(hints) {} - Handle<JSObject> StructureWalk(Handle<JSObject> object); + MUST_USE_RESULT MaybeHandle<JSObject> StructureWalk(Handle<JSObject> object); protected: - inline Handle<JSObject> VisitElementOrProperty(Handle<JSObject> object, - Handle<JSObject> value) { + MUST_USE_RESULT inline MaybeHandle<JSObject> VisitElementOrProperty( + Handle<JSObject> object, + Handle<JSObject> value) { Handle<AllocationSite> current_site = site_context()->EnterNewScope(); - Handle<JSObject> copy_of_value = StructureWalk(value); + MaybeHandle<JSObject> copy_of_value = StructureWalk(value); site_context()->ExitScope(current_site, value); return copy_of_value; } @@ -5673,18 +5771,18 @@ template <class ContextObject> -Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk( +MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk( Handle<JSObject> object) { Isolate* isolate = this->isolate(); bool copying = this->copying(); - bool shallow = hints_ == JSObject::kObjectIsShallowArray; + bool shallow = hints_ == JSObject::kObjectIsShallow; if (!shallow) { StackLimitCheck check(isolate); if (check.HasOverflowed()) { isolate->StackOverflow(); - return Handle<JSObject>::null(); + return MaybeHandle<JSObject>(); } } @@ -5698,19 +5796,13 @@ if (site_context()->ShouldCreateMemento(object)) { site_to_pass = site_context()->current(); } - CALL_AND_RETRY_OR_DIE(isolate, - isolate->heap()->CopyJSObject(*object, - site_to_pass.is_null() ? NULL : *site_to_pass), - { copy = Handle<JSObject>(JSObject::cast(__object__), - isolate); - break; - }, - return Handle<JSObject>()); + copy = isolate->factory()->CopyJSObjectWithAllocationSite( + object, site_to_pass); } else { copy = object; } - ASSERT(copying || copy.is_identical_to(object)); + DCHECK(copying || copy.is_identical_to(object)); ElementsKind kind = copy->GetElementsKind(); if (copying && IsFastSmiOrObjectElementsKind(kind) && @@ -5722,21 +5814,23 @@ if (!shallow) { HandleScope scope(isolate); - // Deep copy local properties. + // Deep copy own properties. if (copy->HasFastProperties()) { Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors()); int limit = copy->map()->NumberOfOwnDescriptors(); for (int i = 0; i < limit; i++) { PropertyDetails details = descriptors->GetDetails(i); if (details.type() != FIELD) continue; - int index = descriptors->GetFieldIndex(i); + FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i); Handle<Object> value(object->RawFastPropertyAt(index), isolate); if (value->IsJSObject()) { - value = VisitElementOrProperty(copy, Handle<JSObject>::cast(value)); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>()); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, value, + VisitElementOrProperty(copy, Handle<JSObject>::cast(value)), + JSObject); } else { Representation representation = details.representation(); - value = NewStorageFor(isolate, value, representation); + value = Object::NewStorageFor(isolate, value, representation); } if (copying) { copy->FastPropertyAtPut(index, *value); @@ -5744,36 +5838,38 @@ } } else { Handle<FixedArray> names = - isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties()); - copy->GetLocalPropertyNames(*names, 0); + isolate->factory()->NewFixedArray(copy->NumberOfOwnProperties()); + copy->GetOwnPropertyNames(*names, 0); for (int i = 0; i < names->length(); i++) { - ASSERT(names->get(i)->IsString()); + DCHECK(names->get(i)->IsString()); Handle<String> key_string(String::cast(names->get(i))); - PropertyAttributes attributes = - JSReceiver::GetLocalPropertyAttribute(copy, key_string); + Maybe<PropertyAttributes> maybe = + JSReceiver::GetOwnPropertyAttributes(copy, key_string); + DCHECK(maybe.has_value); + PropertyAttributes attributes = maybe.value; // Only deep copy fields from the object literal expression. // In particular, don't try to copy the length attribute of // an array. if (attributes != NONE) continue; - Handle<Object> value( - copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(), - isolate); + Handle<Object> value = + Object::GetProperty(copy, key_string).ToHandleChecked(); if (value->IsJSObject()) { - Handle<JSObject> result = VisitElementOrProperty( - copy, Handle<JSObject>::cast(value)); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>()); + Handle<JSObject> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + VisitElementOrProperty(copy, Handle<JSObject>::cast(value)), + JSObject); if (copying) { // Creating object copy for literals. No strict mode needed. - CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetProperty( - copy, key_string, result, NONE, SLOPPY)); + JSObject::SetProperty(copy, key_string, result, SLOPPY).Assert(); } } } } - // Deep copy local elements. + // Deep copy own elements. // Pixel elements cannot be created using an object literal. - ASSERT(!copy->HasExternalArrayElements()); + DCHECK(!copy->HasExternalArrayElements()); switch (kind) { case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: @@ -5783,19 +5879,21 @@ if (elements->map() == isolate->heap()->fixed_cow_array_map()) { #ifdef DEBUG for (int i = 0; i < elements->length(); i++) { - ASSERT(!elements->get(i)->IsJSObject()); + DCHECK(!elements->get(i)->IsJSObject()); } #endif } else { for (int i = 0; i < elements->length(); i++) { Handle<Object> value(elements->get(i), isolate); - ASSERT(value->IsSmi() || + DCHECK(value->IsSmi() || value->IsTheHole() || (IsFastObjectElementsKind(copy->GetElementsKind()))); if (value->IsJSObject()) { - Handle<JSObject> result = VisitElementOrProperty( - copy, Handle<JSObject>::cast(value)); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>()); + Handle<JSObject> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + VisitElementOrProperty(copy, Handle<JSObject>::cast(value)), + JSObject); if (copying) { elements->set(i, *result); } @@ -5813,9 +5911,11 @@ if (element_dictionary->IsKey(k)) { Handle<Object> value(element_dictionary->ValueAt(i), isolate); if (value->IsJSObject()) { - Handle<JSObject> result = VisitElementOrProperty( - copy, Handle<JSObject>::cast(value)); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>()); + Handle<JSObject> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + VisitElementOrProperty(copy, Handle<JSObject>::cast(value)), + JSObject); if (copying) { element_dictionary->ValueAtPut(i, *result); } @@ -5847,44 +5947,81 @@ } -Handle<JSObject> JSObject::DeepWalk( +MaybeHandle<JSObject> JSObject::DeepWalk( Handle<JSObject> object, AllocationSiteCreationContext* site_context) { JSObjectWalkVisitor<AllocationSiteCreationContext> v(site_context, false, kNoHints); - Handle<JSObject> result = v.StructureWalk(object); - ASSERT(result.is_null() || result.is_identical_to(object)); + MaybeHandle<JSObject> result = v.StructureWalk(object); + Handle<JSObject> for_assert; + DCHECK(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object)); return result; } -Handle<JSObject> JSObject::DeepCopy(Handle<JSObject> object, - AllocationSiteUsageContext* site_context, - DeepCopyHints hints) { +MaybeHandle<JSObject> JSObject::DeepCopy( + Handle<JSObject> object, + AllocationSiteUsageContext* site_context, + DeepCopyHints hints) { JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, true, hints); - Handle<JSObject> copy = v.StructureWalk(object); - ASSERT(!copy.is_identical_to(object)); + MaybeHandle<JSObject> copy = v.StructureWalk(object); + Handle<JSObject> for_assert; + DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object)); return copy; } +Handle<Object> JSObject::GetDataProperty(Handle<JSObject> object, + Handle<Name> key) { + Isolate* isolate = object->GetIsolate(); + LookupResult lookup(isolate); + { + DisallowHeapAllocation no_allocation; + object->LookupRealNamedProperty(key, &lookup); + } + Handle<Object> result = isolate->factory()->undefined_value(); + if (lookup.IsFound() && !lookup.IsTransition()) { + switch (lookup.type()) { + case NORMAL: + result = GetNormalizedProperty( + Handle<JSObject>(lookup.holder(), isolate), &lookup); + break; + case FIELD: + result = FastPropertyAt(Handle<JSObject>(lookup.holder(), isolate), + lookup.representation(), + lookup.GetFieldIndex()); + break; + case CONSTANT: + result = Handle<Object>(lookup.GetConstant(), isolate); + break; + case CALLBACKS: + case HANDLER: + case INTERCEPTOR: + break; + case NONEXISTENT: + UNREACHABLE(); + } + } + return result; +} + + // Tests for the fast common case for property enumeration: // - This object and all prototypes has an enum cache (which means that // it is no proxy, has no interceptors and needs no access checks). // - This object has no elements. // - No prototype has enumerable properties/elements. bool JSReceiver::IsSimpleEnum() { - Heap* heap = GetHeap(); - for (Object* o = this; - o != heap->null_value(); - o = JSObject::cast(o)->GetPrototype()) { - if (!o->IsJSObject()) return false; - JSObject* curr = JSObject::cast(o); + for (PrototypeIterator iter(GetIsolate(), this, + PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(); iter.Advance()) { + if (!iter.GetCurrent()->IsJSObject()) return false; + JSObject* curr = JSObject::cast(iter.GetCurrent()); int enum_length = curr->map()->EnumLength(); if (enum_length == kInvalidEnumCacheSentinel) return false; if (curr->IsAccessCheckNeeded()) return false; - ASSERT(!curr->HasNamedInterceptor()); - ASSERT(!curr->HasIndexedInterceptor()); + DCHECK(!curr->HasNamedInterceptor()); + DCHECK(!curr->HasIndexedInterceptor()); if (curr->NumberOfEnumElements() > 0) return false; if (curr != this && enum_length != 0) return false; } @@ -5941,30 +6078,17 @@ } -AccessorDescriptor* Map::FindAccessor(Name* name) { - DescriptorArray* descs = instance_descriptors(); - int number_of_own_descriptors = NumberOfOwnDescriptors(); - for (int i = 0; i < number_of_own_descriptors; i++) { - if (descs->GetType(i) == CALLBACKS && name->Equals(descs->GetKey(i))) { - return descs->GetCallbacks(i); - } - } - return NULL; -} - - -void JSReceiver::LocalLookup( - Name* name, LookupResult* result, bool search_hidden_prototypes) { - ASSERT(name->IsName()); - - Heap* heap = GetHeap(); +void JSReceiver::LookupOwn( + Handle<Name> name, LookupResult* result, bool search_hidden_prototypes) { + DisallowHeapAllocation no_gc; + DCHECK(name->IsName()); if (IsJSGlobalProxy()) { - Object* proto = GetPrototype(); - if (proto->IsNull()) return result->NotFound(); - ASSERT(proto->IsJSGlobalObject()); - return JSReceiver::cast(proto)->LocalLookup( - name, result, search_hidden_prototypes); + PrototypeIterator iter(GetIsolate(), this); + if (iter.IsAtEnd()) return result->NotFound(); + DCHECK(iter.GetCurrent()->IsJSGlobalObject()); + return JSReceiver::cast(iter.GetCurrent()) + ->LookupOwn(name, result, search_hidden_prototypes); } if (IsJSProxy()) { @@ -5982,46 +6106,270 @@ // Check for lookup interceptor except when bootstrapping. if (js_object->HasNamedInterceptor() && - !heap->isolate()->bootstrapper()->IsActive()) { + !GetIsolate()->bootstrapper()->IsActive()) { result->InterceptorResult(js_object); return; } - js_object->LocalLookupRealNamedProperty(name, result); - if (result->IsFound() || !search_hidden_prototypes) return; + js_object->LookupOwnRealNamedProperty(name, result); + if (result->IsFound() || name->IsOwn() || !search_hidden_prototypes) return; - Object* proto = js_object->GetPrototype(); - if (!proto->IsJSReceiver()) return; - JSReceiver* receiver = JSReceiver::cast(proto); + PrototypeIterator iter(GetIsolate(), js_object); + if (!iter.GetCurrent()->IsJSReceiver()) return; + JSReceiver* receiver = JSReceiver::cast(iter.GetCurrent()); if (receiver->map()->is_hidden_prototype()) { - receiver->LocalLookup(name, result, search_hidden_prototypes); + receiver->LookupOwn(name, result, search_hidden_prototypes); } } -void JSReceiver::Lookup(Name* name, LookupResult* result) { +void JSReceiver::Lookup(Handle<Name> name, LookupResult* result) { + DisallowHeapAllocation no_gc; // Ecma-262 3rd 8.6.2.4 - Heap* heap = GetHeap(); - for (Object* current = this; - current != heap->null_value(); - current = JSObject::cast(current)->GetPrototype()) { - JSReceiver::cast(current)->LocalLookup(name, result, false); + for (PrototypeIterator iter(GetIsolate(), this, + PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(); iter.Advance()) { + JSReceiver::cast(iter.GetCurrent())->LookupOwn(name, result, false); if (result->IsFound()) return; + if (name->IsOwn()) { + result->NotFound(); + return; + } } result->NotFound(); } -// Search object and its prototype chain for callback properties. -void JSObject::LookupCallbackProperty(Name* name, LookupResult* result) { - Heap* heap = GetHeap(); - for (Object* current = this; - current != heap->null_value() && current->IsJSObject(); - current = JSObject::cast(current)->GetPrototype()) { - JSObject::cast(current)->LocalLookupRealNamedProperty(name, result); - if (result->IsPropertyCallbacks()) return; +static bool ContainsOnlyValidKeys(Handle<FixedArray> array) { + int len = array->length(); + for (int i = 0; i < len; i++) { + Object* e = array->get(i); + if (!(e->IsString() || e->IsNumber())) return false; } - result->NotFound(); + return true; +} + + +static Handle<FixedArray> ReduceFixedArrayTo( + Handle<FixedArray> array, int length) { + DCHECK(array->length() >= length); + if (array->length() == length) return array; + + Handle<FixedArray> new_array = + array->GetIsolate()->factory()->NewFixedArray(length); + for (int i = 0; i < length; ++i) new_array->set(i, array->get(i)); + return new_array; +} + + +static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, + bool cache_result) { + Isolate* isolate = object->GetIsolate(); + if (object->HasFastProperties()) { + int own_property_count = object->map()->EnumLength(); + // If the enum length of the given map is set to kInvalidEnumCache, this + // means that the map itself has never used the present enum cache. The + // first step to using the cache is to set the enum length of the map by + // counting the number of own descriptors that are not DONT_ENUM or + // SYMBOLIC. + if (own_property_count == kInvalidEnumCacheSentinel) { + own_property_count = object->map()->NumberOfDescribedProperties( + OWN_DESCRIPTORS, DONT_SHOW); + } else { + DCHECK(own_property_count == object->map()->NumberOfDescribedProperties( + OWN_DESCRIPTORS, DONT_SHOW)); + } + + if (object->map()->instance_descriptors()->HasEnumCache()) { + DescriptorArray* desc = object->map()->instance_descriptors(); + Handle<FixedArray> keys(desc->GetEnumCache(), isolate); + + // In case the number of properties required in the enum are actually + // present, we can reuse the enum cache. Otherwise, this means that the + // enum cache was generated for a previous (smaller) version of the + // Descriptor Array. In that case we regenerate the enum cache. + if (own_property_count <= keys->length()) { + if (cache_result) object->map()->SetEnumLength(own_property_count); + isolate->counters()->enum_cache_hits()->Increment(); + return ReduceFixedArrayTo(keys, own_property_count); + } + } + + Handle<Map> map(object->map()); + + if (map->instance_descriptors()->IsEmpty()) { + isolate->counters()->enum_cache_hits()->Increment(); + if (cache_result) map->SetEnumLength(0); + return isolate->factory()->empty_fixed_array(); + } + + isolate->counters()->enum_cache_misses()->Increment(); + + Handle<FixedArray> storage = isolate->factory()->NewFixedArray( + own_property_count); + Handle<FixedArray> indices = isolate->factory()->NewFixedArray( + own_property_count); + + Handle<DescriptorArray> descs = + Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate); + + int size = map->NumberOfOwnDescriptors(); + int index = 0; + + for (int i = 0; i < size; i++) { + PropertyDetails details = descs->GetDetails(i); + Object* key = descs->GetKey(i); + if (!(details.IsDontEnum() || key->IsSymbol())) { + storage->set(index, key); + if (!indices.is_null()) { + if (details.type() != FIELD) { + indices = Handle<FixedArray>(); + } else { + FieldIndex field_index = FieldIndex::ForDescriptor(*map, i); + int load_by_field_index = field_index.GetLoadByFieldIndex(); + indices->set(index, Smi::FromInt(load_by_field_index)); + } + } + index++; + } + } + DCHECK(index == storage->length()); + + Handle<FixedArray> bridge_storage = + isolate->factory()->NewFixedArray( + DescriptorArray::kEnumCacheBridgeLength); + DescriptorArray* desc = object->map()->instance_descriptors(); + desc->SetEnumCache(*bridge_storage, + *storage, + indices.is_null() ? Object::cast(Smi::FromInt(0)) + : Object::cast(*indices)); + if (cache_result) { + object->map()->SetEnumLength(own_property_count); + } + return storage; + } else { + Handle<NameDictionary> dictionary(object->property_dictionary()); + int length = dictionary->NumberOfEnumElements(); + if (length == 0) { + return Handle<FixedArray>(isolate->heap()->empty_fixed_array()); + } + Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length); + dictionary->CopyEnumKeysTo(*storage); + return storage; + } +} + + +MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object, + KeyCollectionType type) { + USE(ContainsOnlyValidKeys); + Isolate* isolate = object->GetIsolate(); + Handle<FixedArray> content = isolate->factory()->empty_fixed_array(); + Handle<JSFunction> arguments_function( + JSFunction::cast(isolate->sloppy_arguments_map()->constructor())); + + // Only collect keys if access is permitted. + for (PrototypeIterator iter(isolate, object, + PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(); iter.Advance()) { + if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) { + Handle<JSProxy> proxy(JSProxy::cast(*PrototypeIterator::GetCurrent(iter)), + isolate); + Handle<Object> args[] = { proxy }; + Handle<Object> names; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, names, + Execution::Call(isolate, + isolate->proxy_enumerate(), + object, + ARRAY_SIZE(args), + args), + FixedArray); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, content, + FixedArray::AddKeysFromArrayLike( + content, Handle<JSObject>::cast(names)), + FixedArray); + break; + } + + Handle<JSObject> current = + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); + + // Check access rights if required. + if (current->IsAccessCheckNeeded() && + !isolate->MayNamedAccess( + current, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) { + isolate->ReportFailedAccessCheck(current, v8::ACCESS_KEYS); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray); + break; + } + + // Compute the element keys. + Handle<FixedArray> element_keys = + isolate->factory()->NewFixedArray(current->NumberOfEnumElements()); + current->GetEnumElementKeys(*element_keys); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, content, + FixedArray::UnionOfKeys(content, element_keys), + FixedArray); + DCHECK(ContainsOnlyValidKeys(content)); + + // Add the element keys from the interceptor. + if (current->HasIndexedInterceptor()) { + Handle<JSObject> result; + if (JSObject::GetKeysForIndexedInterceptor( + current, object).ToHandle(&result)) { + ASSIGN_RETURN_ON_EXCEPTION( + isolate, content, + FixedArray::AddKeysFromArrayLike(content, result), + FixedArray); + } + DCHECK(ContainsOnlyValidKeys(content)); + } + + // We can cache the computed property keys if access checks are + // not needed and no interceptors are involved. + // + // We do not use the cache if the object has elements and + // therefore it does not make sense to cache the property names + // for arguments objects. Arguments objects will always have + // elements. + // Wrapped strings have elements, but don't have an elements + // array or dictionary. So the fast inline test for whether to + // use the cache says yes, so we should not create a cache. + bool cache_enum_keys = + ((current->map()->constructor() != *arguments_function) && + !current->IsJSValue() && + !current->IsAccessCheckNeeded() && + !current->HasNamedInterceptor() && + !current->HasIndexedInterceptor()); + // Compute the property keys and cache them if possible. + ASSIGN_RETURN_ON_EXCEPTION( + isolate, content, + FixedArray::UnionOfKeys( + content, GetEnumPropertyKeys(current, cache_enum_keys)), + FixedArray); + DCHECK(ContainsOnlyValidKeys(content)); + + // Add the property keys from the interceptor. + if (current->HasNamedInterceptor()) { + Handle<JSObject> result; + if (JSObject::GetKeysForNamedInterceptor( + current, object).ToHandle(&result)) { + ASSIGN_RETURN_ON_EXCEPTION( + isolate, content, + FixedArray::AddKeysFromArrayLike(content, result), + FixedArray); + } + DCHECK(ContainsOnlyValidKeys(content)); + } + + // If we only want own properties we bail out after the first + // iteration. + if (type == OWN_ONLY) break; + } + return content; } @@ -6038,7 +6386,7 @@ Object* result = dictionary->ValueAt(entry); PropertyDetails details = dictionary->DetailsAt(entry); if (details.type() == CALLBACKS && result->IsAccessorPair()) { - ASSERT(!details.IsDontDelete()); + DCHECK(!details.IsDontDelete()); if (details.attributes() != attributes) { dictionary->DetailsAtPut( entry, @@ -6056,8 +6404,7 @@ uint32_t index, Handle<Object> getter, Handle<Object> setter, - PropertyAttributes attributes, - v8::AccessControl access_control) { + PropertyAttributes attributes) { switch (object->GetElementsKind()) { case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: @@ -6114,7 +6461,6 @@ Isolate* isolate = object->GetIsolate(); Handle<AccessorPair> accessors = isolate->factory()->NewAccessorPair(); accessors->SetComponents(*getter, *setter); - accessors->set_access_flags(access_control); SetElementCallback(object, index, accessors, attributes); } @@ -6124,7 +6470,7 @@ Handle<Name> name) { Isolate* isolate = object->GetIsolate(); LookupResult result(isolate); - object->LocalLookupRealNamedProperty(*name, &result); + object->LookupOwnRealNamedProperty(name, &result); if (result.IsPropertyCallbacks()) { // Note that the result can actually have IsDontDelete() == true when we // e.g. have to fall back to the slow case while adding a setter after @@ -6145,13 +6491,11 @@ Handle<Name> name, Handle<Object> getter, Handle<Object> setter, - PropertyAttributes attributes, - v8::AccessControl access_control) { + PropertyAttributes attributes) { // We could assert that the property is configurable here, but we would need // to do a lookup, which seems to be a bit of overkill. bool only_attribute_changes = getter->IsNull() && setter->IsNull(); if (object->HasFastProperties() && !only_attribute_changes && - access_control == v8::DEFAULT && (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors)) { bool getterOk = getter->IsNull() || DefineFastAccessor(object, name, ACCESSOR_GETTER, getter, attributes); @@ -6162,55 +6506,24 @@ Handle<AccessorPair> accessors = CreateAccessorPairFor(object, name); accessors->SetComponents(*getter, *setter); - accessors->set_access_flags(access_control); SetPropertyCallback(object, name, accessors, attributes); } -bool JSObject::CanSetCallback(Handle<JSObject> object, Handle<Name> name) { - Isolate* isolate = object->GetIsolate(); - ASSERT(!object->IsAccessCheckNeeded() || - isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)); - - // Check if there is an API defined callback object which prohibits - // callback overwriting in this object or its prototype chain. - // This mechanism is needed for instance in a browser setting, where - // certain accessors such as window.location should not be allowed - // to be overwritten because allowing overwriting could potentially - // cause security problems. - LookupResult callback_result(isolate); - object->LookupCallbackProperty(*name, &callback_result); - if (callback_result.IsFound()) { - Object* callback_obj = callback_result.GetCallbackObject(); - if (callback_obj->IsAccessorInfo()) { - return !AccessorInfo::cast(callback_obj)->prohibits_overwriting(); - } - if (callback_obj->IsAccessorPair()) { - return !AccessorPair::cast(callback_obj)->prohibits_overwriting(); - } - } - return true; -} - - bool Map::DictionaryElementsInPrototypeChainOnly() { - Heap* heap = GetHeap(); - if (IsDictionaryElementsKind(elements_kind())) { return false; } - for (Object* prototype = this->prototype(); - prototype != heap->null_value(); - prototype = prototype->GetPrototype(GetIsolate())) { - if (prototype->IsJSProxy()) { + for (PrototypeIterator iter(this); !iter.IsAtEnd(); iter.Advance()) { + if (iter.GetCurrent()->IsJSProxy()) { // Be conservative, don't walk into proxies. return true; } if (IsDictionaryElementsKind( - JSObject::cast(prototype)->map()->elements_kind())) { + JSObject::cast(iter.GetCurrent())->map()->elements_kind())) { return true; } } @@ -6229,7 +6542,7 @@ // Normalize elements to make this operation simple. bool had_dictionary_elements = object->HasDictionaryElements(); Handle<SeededNumberDictionary> dictionary = NormalizeElements(object); - ASSERT(object->HasDictionaryElements() || + DCHECK(object->HasDictionaryElements() || object->HasDictionaryArgumentsElements()); // Update the dictionary with the new CALLBACKS property. dictionary = SeededNumberDictionary::Set(dictionary, index, structure, @@ -6263,15 +6576,18 @@ Handle<Name> name, Handle<Object> structure, PropertyAttributes attributes) { + PropertyNormalizationMode mode = object->map()->is_prototype_map() + ? KEEP_INOBJECT_PROPERTIES + : CLEAR_INOBJECT_PROPERTIES; // Normalize object to make this operation simple. - NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); + NormalizeProperties(object, mode, 0); // For the global object allocate a new map to invalidate the global inline // caches which have a global property cell reference directly in the code. if (object->IsGlobalObject()) { Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map())); - ASSERT(new_map->is_dictionary_map()); - object->set_map(*new_map); + DCHECK(new_map->is_dictionary_map()); + JSObject::MigrateToMap(object, new_map); // When running crankshaft, changing the map is not enough. We // need to deoptimize all functions that rely on this global @@ -6282,34 +6598,32 @@ // Update the dictionary with the new CALLBACKS property. PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0); SetNormalizedProperty(object, name, structure, details); + + ReoptimizeIfPrototype(object); } -void JSObject::DefineAccessor(Handle<JSObject> object, - Handle<Name> name, - Handle<Object> getter, - Handle<Object> setter, - PropertyAttributes attributes, - v8::AccessControl access_control) { +MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> getter, + Handle<Object> setter, + PropertyAttributes attributes) { Isolate* isolate = object->GetIsolate(); // Check access rights if needed. if (object->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET); - return; + !isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); + return isolate->factory()->undefined_value(); } if (object->IsJSGlobalProxy()) { - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return; - ASSERT(proto->IsJSGlobalObject()); - DefineAccessor(Handle<JSObject>::cast(proto), - name, - getter, - setter, - attributes, - access_control); - return; + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return isolate->factory()->undefined_value(); + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + DefineAccessor(Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), + name, getter, setter, attributes); + return isolate->factory()->undefined_value(); } // Make sure that the top context does not change when doing callbacks or @@ -6317,9 +6631,7 @@ AssertNoContextChange ncc(isolate); // Try to flatten before operating on the string. - if (name->IsString()) String::cast(*name)->TryFlatten(); - - if (!JSObject::CanSetCallback(object, name)) return; + if (name->IsString()) name = String::Flatten(Handle<String>::cast(name)); uint32_t index = 0; bool is_element = name->AsArrayIndex(&index); @@ -6330,33 +6642,41 @@ bool preexists = false; if (is_observed) { if (is_element) { - preexists = HasLocalElement(object, index); - if (preexists && object->GetLocalElementAccessorPair(index) == NULL) { - old_value = Object::GetElementNoExceptionThrown(isolate, object, index); + Maybe<bool> maybe = HasOwnElement(object, index); + // Workaround for a GCC 4.4.3 bug which leads to "‘preexists’ may be used + // uninitialized in this function". + if (!maybe.has_value) { + DCHECK(false); + return isolate->factory()->undefined_value(); + } + preexists = maybe.value; + if (preexists && GetOwnElementAccessorPair(object, index).is_null()) { + old_value = + Object::GetElement(isolate, object, index).ToHandleChecked(); } } else { LookupResult lookup(isolate); - object->LocalLookup(*name, &lookup, true); + object->LookupOwn(name, &lookup, true); preexists = lookup.IsProperty(); if (preexists && lookup.IsDataProperty()) { - old_value = Object::GetProperty(object, name); - CHECK_NOT_EMPTY_HANDLE(isolate, old_value); + old_value = + Object::GetPropertyOrElement(object, name).ToHandleChecked(); } } } if (is_element) { - DefineElementAccessor( - object, index, getter, setter, attributes, access_control); + DefineElementAccessor(object, index, getter, setter, attributes); } else { - DefinePropertyAccessor( - object, name, getter, setter, attributes, access_control); + DefinePropertyAccessor(object, name, getter, setter, attributes); } if (is_observed) { const char* type = preexists ? "reconfigure" : "add"; EnqueueChangeRecord(object, type, name, old_value); } + + return isolate->factory()->undefined_value(); } @@ -6389,34 +6709,15 @@ } -static MaybeObject* CopyInsertDescriptor(Map* map, - Name* name, - AccessorPair* accessors, - PropertyAttributes attributes) { - CallbacksDescriptor new_accessors_desc(name, accessors, attributes); - return map->CopyInsertDescriptor(&new_accessors_desc, INSERT_TRANSITION); -} - - -static Handle<Map> CopyInsertDescriptor(Handle<Map> map, - Handle<Name> name, - Handle<AccessorPair> accessors, - PropertyAttributes attributes) { - CALL_HEAP_FUNCTION(map->GetIsolate(), - CopyInsertDescriptor(*map, *name, *accessors, attributes), - Map); -} - - bool JSObject::DefineFastAccessor(Handle<JSObject> object, Handle<Name> name, AccessorComponent component, Handle<Object> accessor, PropertyAttributes attributes) { - ASSERT(accessor->IsSpecFunction() || accessor->IsUndefined()); + DCHECK(accessor->IsSpecFunction() || accessor->IsUndefined()); Isolate* isolate = object->GetIsolate(); LookupResult result(isolate); - object->LocalLookup(*name, &result); + object->LookupOwn(name, &result); if (result.IsFound() && !result.IsPropertyCallbacks()) { return false; @@ -6442,11 +6743,13 @@ if (result.IsFound()) { Handle<Map> target(result.GetTransitionTarget()); - ASSERT(target->NumberOfOwnDescriptors() == + DCHECK(target->NumberOfOwnDescriptors() == object->map()->NumberOfOwnDescriptors()); // This works since descriptors are sorted in order of addition. - ASSERT(object->map()->instance_descriptors()-> - GetKey(descriptor_number) == *name); + DCHECK(Name::Equals( + handle(object->map()->instance_descriptors()->GetKey( + descriptor_number)), + name)); return TryAccessorTransition(object, target, descriptor_number, component, accessor, attributes); } @@ -6458,8 +6761,8 @@ if (result.IsFound()) { Handle<Map> target(result.GetTransitionTarget()); int descriptor_number = target->LastAdded(); - ASSERT(target->instance_descriptors()->GetKey(descriptor_number) - ->Equals(*name)); + DCHECK(Name::Equals(name, + handle(target->instance_descriptors()->GetKey(descriptor_number)))); return TryAccessorTransition(object, target, descriptor_number, component, accessor, attributes); } @@ -6472,32 +6775,36 @@ ? AccessorPair::Copy(Handle<AccessorPair>(source_accessors)) : isolate->factory()->NewAccessorPair(); accessors->set(component, *accessor); - Handle<Map> new_map = CopyInsertDescriptor(Handle<Map>(object->map()), - name, accessors, attributes); + + CallbacksDescriptor new_accessors_desc(name, accessors, attributes); + Handle<Map> new_map = Map::CopyInsertDescriptor( + handle(object->map()), &new_accessors_desc, INSERT_TRANSITION); + JSObject::MigrateToMap(object, new_map); return true; } -Handle<Object> JSObject::SetAccessor(Handle<JSObject> object, - Handle<AccessorInfo> info) { +MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object, + Handle<AccessorInfo> info) { Isolate* isolate = object->GetIsolate(); Factory* factory = isolate->factory(); Handle<Name> name(Name::cast(info->name())); // Check access rights if needed. if (object->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + !isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); return factory->undefined_value(); } if (object->IsJSGlobalProxy()) { - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return object; - ASSERT(proto->IsJSGlobalObject()); - return SetAccessor(Handle<JSObject>::cast(proto), info); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return object; + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + return SetAccessor( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), info); } // Make sure that the top context does not change when doing callbacks or @@ -6505,11 +6812,7 @@ AssertNoContextChange ncc(isolate); // Try to flatten before operating on the string. - if (name->IsString()) FlattenString(Handle<String>::cast(name)); - - if (!JSObject::CanSetCallback(object, name)) { - return factory->undefined_value(); - } + if (name->IsString()) name = String::Flatten(Handle<String>::cast(name)); uint32_t index = 0; bool is_element = name->AsArrayIndex(&index); @@ -6548,7 +6851,7 @@ } else { // Lookup the name. LookupResult result(isolate); - object->LocalLookup(*name, &result, true); + object->LookupOwn(name, &result, true); // ES5 forbids turning a property into an accessor if it's not // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5). if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) { @@ -6562,9 +6865,9 @@ } -Handle<Object> JSObject::GetAccessor(Handle<JSObject> object, - Handle<Name> name, - AccessorComponent component) { +MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object, + Handle<Name> name, + AccessorComponent component) { Isolate* isolate = object->GetIsolate(); // Make sure that the top context does not change when doing callbacks or @@ -6573,20 +6876,23 @@ // Check access rights if needed. if (object->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_HAS)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + !isolate->MayNamedAccess(object, name, v8::ACCESS_HAS)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); return isolate->factory()->undefined_value(); } // Make the lookup and include prototypes. uint32_t index = 0; if (name->AsArrayIndex(&index)) { - for (Handle<Object> obj = object; - !obj->IsNull(); - obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) { - if (obj->IsJSObject() && JSObject::cast(*obj)->HasDictionaryElements()) { - JSObject* js_object = JSObject::cast(*obj); + for (PrototypeIterator iter(isolate, object, + PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(); iter.Advance()) { + if (PrototypeIterator::GetCurrent(iter)->IsJSObject() && + JSObject::cast(*PrototypeIterator::GetCurrent(iter)) + ->HasDictionaryElements()) { + JSObject* js_object = + JSObject::cast(*PrototypeIterator::GetCurrent(iter)); SeededNumberDictionary* dictionary = js_object->element_dictionary(); int entry = dictionary->FindEntry(index); if (entry != SeededNumberDictionary::kNotFound) { @@ -6600,11 +6906,12 @@ } } } else { - for (Handle<Object> obj = object; - !obj->IsNull(); - obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) { + for (PrototypeIterator iter(isolate, object, + PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(); iter.Advance()) { LookupResult result(isolate); - JSReceiver::cast(*obj)->LocalLookup(*name, &result); + JSReceiver::cast(*PrototypeIterator::GetCurrent(iter)) + ->LookupOwn(name, &result); if (result.IsFound()) { if (result.IsReadOnly()) return isolate->factory()->undefined_value(); if (result.IsPropertyCallbacks()) { @@ -6627,9 +6934,10 @@ DescriptorArray* descs = map()->instance_descriptors(); for (int i = 0; i < number_of_own_descriptors; i++) { if (descs->GetType(i) == FIELD) { - Object* property = RawFastPropertyAt(descs->GetFieldIndex(i)); + Object* property = + RawFastPropertyAt(FieldIndex::ForDescriptor(map(), i)); if (descs->GetDetails(i).representation().IsDouble()) { - ASSERT(property->IsHeapNumber()); + DCHECK(property->IsMutableHeapNumber()); if (value->IsNumber() && property->Number() == value->Number()) { return descs->GetKey(i); } @@ -6649,60 +6957,93 @@ } -Handle<Map> Map::RawCopy(Handle<Map> map, - int instance_size) { - CALL_HEAP_FUNCTION(map->GetIsolate(), - map->RawCopy(instance_size), - Map); -} - - -MaybeObject* Map::RawCopy(int instance_size) { - Map* result; - MaybeObject* maybe_result = - GetHeap()->AllocateMap(instance_type(), instance_size); - if (!maybe_result->To(&result)) return maybe_result; - - result->set_prototype(prototype()); - result->set_constructor(constructor()); - result->set_bit_field(bit_field()); - result->set_bit_field2(bit_field2()); - int new_bit_field3 = bit_field3(); +Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size) { + Handle<Map> result = map->GetIsolate()->factory()->NewMap( + map->instance_type(), instance_size); + result->set_prototype(map->prototype()); + result->set_constructor(map->constructor()); + result->set_bit_field(map->bit_field()); + result->set_bit_field2(map->bit_field2()); + int new_bit_field3 = map->bit_field3(); new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true); new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0); new_bit_field3 = EnumLengthBits::update(new_bit_field3, kInvalidEnumCacheSentinel); new_bit_field3 = Deprecated::update(new_bit_field3, false); - if (!is_dictionary_map()) { + if (!map->is_dictionary_map()) { new_bit_field3 = IsUnstable::update(new_bit_field3, false); } + new_bit_field3 = ConstructionCount::update(new_bit_field3, + JSFunction::kNoSlackTracking); result->set_bit_field3(new_bit_field3); return result; } +Handle<Map> Map::Normalize(Handle<Map> fast_map, + PropertyNormalizationMode mode) { + DCHECK(!fast_map->is_dictionary_map()); + + Isolate* isolate = fast_map->GetIsolate(); + Handle<Object> maybe_cache(isolate->native_context()->normalized_map_cache(), + isolate); + bool use_cache = !maybe_cache->IsUndefined(); + Handle<NormalizedMapCache> cache; + if (use_cache) cache = Handle<NormalizedMapCache>::cast(maybe_cache); + + Handle<Map> new_map; + if (use_cache && cache->Get(fast_map, mode).ToHandle(&new_map)) { +#ifdef VERIFY_HEAP + if (FLAG_verify_heap) new_map->DictionaryMapVerify(); +#endif +#ifdef ENABLE_SLOW_DCHECKS + if (FLAG_enable_slow_asserts) { + // The cached map should match newly created normalized map bit-by-bit, + // except for the code cache, which can contain some ics which can be + // applied to the shared map. + Handle<Map> fresh = Map::CopyNormalized(fast_map, mode); + + DCHECK(memcmp(fresh->address(), + new_map->address(), + Map::kCodeCacheOffset) == 0); + STATIC_ASSERT(Map::kDependentCodeOffset == + Map::kCodeCacheOffset + kPointerSize); + int offset = Map::kDependentCodeOffset + kPointerSize; + DCHECK(memcmp(fresh->address() + offset, + new_map->address() + offset, + Map::kSize - offset) == 0); + } +#endif + } else { + new_map = Map::CopyNormalized(fast_map, mode); + if (use_cache) { + cache->Set(fast_map, new_map); + isolate->counters()->normalized_maps()->Increment(); + } + } + fast_map->NotifyLeafMapLayoutChange(); + return new_map; +} + + Handle<Map> Map::CopyNormalized(Handle<Map> map, - PropertyNormalizationMode mode, - NormalizedMapSharingMode sharing) { + PropertyNormalizationMode mode) { int new_instance_size = map->instance_size(); if (mode == CLEAR_INOBJECT_PROPERTIES) { new_instance_size -= map->inobject_properties() * kPointerSize; } - Handle<Map> result = Map::RawCopy(map, new_instance_size); + Handle<Map> result = RawCopy(map, new_instance_size); if (mode != CLEAR_INOBJECT_PROPERTIES) { result->set_inobject_properties(map->inobject_properties()); } - result->set_is_shared(sharing == SHARED_NORMALIZED_MAP); result->set_dictionary_map(true); result->set_migration_target(false); #ifdef VERIFY_HEAP - if (FLAG_verify_heap && result->is_shared()) { - result->SharedMapVerify(); - } + if (FLAG_verify_heap) result->DictionaryMapVerify(); #endif return result; @@ -6710,134 +7051,94 @@ Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) { - CALL_HEAP_FUNCTION(map->GetIsolate(), map->CopyDropDescriptors(), Map); -} - - -MaybeObject* Map::CopyDropDescriptors() { - Map* result; - MaybeObject* maybe_result = RawCopy(instance_size()); - if (!maybe_result->To(&result)) return maybe_result; + Handle<Map> result = RawCopy(map, map->instance_size()); // Please note instance_type and instance_size are set when allocated. - result->set_inobject_properties(inobject_properties()); - result->set_unused_property_fields(unused_property_fields()); + result->set_inobject_properties(map->inobject_properties()); + result->set_unused_property_fields(map->unused_property_fields()); - result->set_pre_allocated_property_fields(pre_allocated_property_fields()); - result->set_is_shared(false); - result->ClearCodeCache(GetHeap()); - NotifyLeafMapLayoutChange(); + result->set_pre_allocated_property_fields( + map->pre_allocated_property_fields()); + result->ClearCodeCache(map->GetHeap()); + map->NotifyLeafMapLayoutChange(); return result; } -MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors, - Descriptor* descriptor) { +Handle<Map> Map::ShareDescriptor(Handle<Map> map, + Handle<DescriptorArray> descriptors, + Descriptor* descriptor) { // Sanity check. This path is only to be taken if the map owns its descriptor // array, implying that its NumberOfOwnDescriptors equals the number of // descriptors in the descriptor array. - ASSERT(NumberOfOwnDescriptors() == - instance_descriptors()->number_of_descriptors()); - Map* result; - MaybeObject* maybe_result = CopyDropDescriptors(); - if (!maybe_result->To(&result)) return maybe_result; - - Name* name = descriptor->GetKey(); + DCHECK(map->NumberOfOwnDescriptors() == + map->instance_descriptors()->number_of_descriptors()); - TransitionArray* transitions; - MaybeObject* maybe_transitions = - AddTransition(name, result, SIMPLE_TRANSITION); - if (!maybe_transitions->To(&transitions)) return maybe_transitions; + Handle<Map> result = CopyDropDescriptors(map); + Handle<Name> name = descriptor->GetKey(); - int old_size = descriptors->number_of_descriptors(); - - DescriptorArray* new_descriptors; - - if (descriptors->NumberOfSlackDescriptors() > 0) { - new_descriptors = descriptors; - new_descriptors->Append(descriptor); - } else { - // Descriptor arrays grow by 50%. - MaybeObject* maybe_descriptors = DescriptorArray::Allocate( - GetIsolate(), old_size, old_size < 4 ? 1 : old_size / 2); - if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors; - - DescriptorArray::WhitenessWitness witness(new_descriptors); - - // Copy the descriptors, inserting a descriptor. - for (int i = 0; i < old_size; ++i) { - new_descriptors->CopyFrom(i, descriptors, i, witness); + // Ensure there's space for the new descriptor in the shared descriptor array. + if (descriptors->NumberOfSlackDescriptors() == 0) { + int old_size = descriptors->number_of_descriptors(); + if (old_size == 0) { + descriptors = DescriptorArray::Allocate(map->GetIsolate(), 0, 1); + } else { + EnsureDescriptorSlack(map, old_size < 4 ? 1 : old_size / 2); + descriptors = handle(map->instance_descriptors()); } + } - new_descriptors->Append(descriptor, witness); + { + DisallowHeapAllocation no_gc; + descriptors->Append(descriptor); + result->InitializeDescriptors(*descriptors); + } - if (old_size > 0) { - // If the source descriptors had an enum cache we copy it. This ensures - // that the maps to which we push the new descriptor array back can rely - // on a cache always being available once it is set. If the map has more - // enumerated descriptors than available in the original cache, the cache - // will be lazily replaced by the extended cache when needed. - if (descriptors->HasEnumCache()) { - new_descriptors->CopyEnumCacheFrom(descriptors); - } + DCHECK(result->NumberOfOwnDescriptors() == map->NumberOfOwnDescriptors() + 1); + ConnectTransition(map, result, name, SIMPLE_TRANSITION); - Map* map; - // Replace descriptors by new_descriptors in all maps that share it. + return result; +} - GetHeap()->incremental_marking()->RecordWrites(descriptors); - for (Object* current = GetBackPointer(); - !current->IsUndefined(); - current = map->GetBackPointer()) { - map = Map::cast(current); - if (map->instance_descriptors() != descriptors) break; - map->set_instance_descriptors(new_descriptors); - } - set_instance_descriptors(new_descriptors); - } +void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child, + Handle<Name> name, SimpleTransitionFlag flag) { + parent->set_owns_descriptors(false); + if (parent->is_prototype_map()) { + DCHECK(child->is_prototype_map()); + } else { + Handle<TransitionArray> transitions = + TransitionArray::CopyInsert(parent, name, child, flag); + parent->set_transitions(*transitions); + child->SetBackPointer(*parent); } - - result->SetBackPointer(this); - result->InitializeDescriptors(new_descriptors); - ASSERT(result->NumberOfOwnDescriptors() == NumberOfOwnDescriptors() + 1); - - set_transitions(transitions); - set_owns_descriptors(false); - - return result; } Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map, Handle<DescriptorArray> descriptors, TransitionFlag flag, - Handle<Name> name) { - CALL_HEAP_FUNCTION(map->GetIsolate(), - map->CopyReplaceDescriptors(*descriptors, flag, *name), - Map); -} - - -MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors, - TransitionFlag flag, - Name* name, - SimpleTransitionFlag simple_flag) { - ASSERT(descriptors->IsSortedNoDuplicates()); + MaybeHandle<Name> maybe_name, + SimpleTransitionFlag simple_flag) { + DCHECK(descriptors->IsSortedNoDuplicates()); - Map* result; - MaybeObject* maybe_result = CopyDropDescriptors(); - if (!maybe_result->To(&result)) return maybe_result; - - result->InitializeDescriptors(descriptors); + Handle<Map> result = CopyDropDescriptors(map); + result->InitializeDescriptors(*descriptors); - if (flag == INSERT_TRANSITION && CanHaveMoreTransitions()) { - TransitionArray* transitions; - MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag); - if (!maybe_transitions->To(&transitions)) return maybe_transitions; - set_transitions(transitions); - result->SetBackPointer(this); - } else { - descriptors->InitializeRepresentations(Representation::Tagged()); + if (!map->is_prototype_map()) { + if (flag == INSERT_TRANSITION && map->CanHaveMoreTransitions()) { + Handle<Name> name; + CHECK(maybe_name.ToHandle(&name)); + ConnectTransition(map, result, name, simple_flag); + } else { + int length = descriptors->number_of_descriptors(); + for (int i = 0; i < length; i++) { + descriptors->SetRepresentation(i, Representation::Tagged()); + if (descriptors->GetDetails(i).type() == FIELD) { + descriptors->SetValue(i, HeapType::Any()); + } + } + } } return result; @@ -6849,9 +7150,9 @@ Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors) { - ASSERT(descriptors->IsSortedNoDuplicates()); + DCHECK(descriptors->IsSortedNoDuplicates()); - Handle<Map> result = Map::CopyDropDescriptors(map); + Handle<Map> result = CopyDropDescriptors(map); result->InitializeDescriptors(*descriptors); result->SetNumberOfOwnDescriptors(new_descriptor + 1); @@ -6865,65 +7166,53 @@ } result->set_unused_property_fields(unused_property_fields); - result->set_owns_descriptors(false); Handle<Name> name = handle(descriptors->GetKey(new_descriptor)); - Handle<TransitionArray> transitions = Map::AddTransition(map, name, result, - SIMPLE_TRANSITION); - - map->set_transitions(*transitions); - result->SetBackPointer(*map); + ConnectTransition(map, result, name, SIMPLE_TRANSITION); return result; } -MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) { +Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind, + TransitionFlag flag) { if (flag == INSERT_TRANSITION) { - ASSERT(!HasElementsTransition() || - ((elements_transition_map()->elements_kind() == DICTIONARY_ELEMENTS || + DCHECK(!map->HasElementsTransition() || + ((map->elements_transition_map()->elements_kind() == + DICTIONARY_ELEMENTS || IsExternalArrayElementsKind( - elements_transition_map()->elements_kind())) && + map->elements_transition_map()->elements_kind())) && (kind == DICTIONARY_ELEMENTS || IsExternalArrayElementsKind(kind)))); - ASSERT(!IsFastElementsKind(kind) || - IsMoreGeneralElementsKindTransition(elements_kind(), kind)); - ASSERT(kind != elements_kind()); + DCHECK(!IsFastElementsKind(kind) || + IsMoreGeneralElementsKindTransition(map->elements_kind(), kind)); + DCHECK(kind != map->elements_kind()); } bool insert_transition = - flag == INSERT_TRANSITION && !HasElementsTransition(); + flag == INSERT_TRANSITION && !map->HasElementsTransition(); - if (insert_transition && owns_descriptors()) { + if (insert_transition && map->owns_descriptors()) { // In case the map owned its own descriptors, share the descriptors and // transfer ownership to the new map. - Map* new_map; - MaybeObject* maybe_new_map = CopyDropDescriptors(); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; + Handle<Map> new_map = CopyDropDescriptors(map); - MaybeObject* added_elements = set_elements_transition_map(new_map); - if (added_elements->IsFailure()) return added_elements; + ConnectElementsTransition(map, new_map); new_map->set_elements_kind(kind); - new_map->InitializeDescriptors(instance_descriptors()); - new_map->SetBackPointer(this); - set_owns_descriptors(false); + new_map->InitializeDescriptors(map->instance_descriptors()); return new_map; } // In case the map did not own its own descriptors, a split is forced by // copying the map; creating a new descriptor array cell. // Create a new free-floating map only if we are not allowed to store it. - Map* new_map; - MaybeObject* maybe_new_map = Copy(); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; + Handle<Map> new_map = Copy(map); new_map->set_elements_kind(kind); if (insert_transition) { - MaybeObject* added_elements = set_elements_transition_map(new_map); - if (added_elements->IsFailure()) return added_elements; - new_map->SetBackPointer(this); + ConnectElementsTransition(map, new_map); } return new_map; @@ -6931,7 +7220,7 @@ Handle<Map> Map::CopyForObserved(Handle<Map> map) { - ASSERT(!map->is_observed()); + DCHECK(!map->is_observed()); Isolate* isolate = map->GetIsolate(); @@ -6939,234 +7228,305 @@ // transfer ownership to the new map. Handle<Map> new_map; if (map->owns_descriptors()) { - new_map = Map::CopyDropDescriptors(map); + new_map = CopyDropDescriptors(map); } else { - new_map = Map::Copy(map); + DCHECK(!map->is_prototype_map()); + new_map = Copy(map); } - Handle<TransitionArray> transitions = - Map::AddTransition(map, isolate->factory()->observed_symbol(), new_map, - FULL_TRANSITION); - - map->set_transitions(*transitions); - new_map->set_is_observed(); - if (map->owns_descriptors()) { new_map->InitializeDescriptors(map->instance_descriptors()); - map->set_owns_descriptors(false); } - new_map->SetBackPointer(*map); + Handle<Name> name = isolate->factory()->observed_symbol(); + ConnectTransition(map, new_map, name, FULL_TRANSITION); + return new_map; } -MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() { - if (pre_allocated_property_fields() == 0) return CopyDropDescriptors(); - - // If the map has pre-allocated properties always start out with a descriptor - // array describing these properties. - ASSERT(constructor()->IsJSFunction()); - JSFunction* ctor = JSFunction::cast(constructor()); - Map* map = ctor->initial_map(); - DescriptorArray* descriptors = map->instance_descriptors(); - +Handle<Map> Map::Copy(Handle<Map> map) { + Handle<DescriptorArray> descriptors(map->instance_descriptors()); int number_of_own_descriptors = map->NumberOfOwnDescriptors(); - DescriptorArray* new_descriptors; - MaybeObject* maybe_descriptors = - descriptors->CopyUpTo(number_of_own_descriptors); - if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors; + Handle<DescriptorArray> new_descriptors = + DescriptorArray::CopyUpTo(descriptors, number_of_own_descriptors); + return CopyReplaceDescriptors( + map, new_descriptors, OMIT_TRANSITION, MaybeHandle<Name>()); +} + - return CopyReplaceDescriptors(new_descriptors, OMIT_TRANSITION); +Handle<Map> Map::Create(Handle<JSFunction> constructor, + int extra_inobject_properties) { + Handle<Map> copy = Copy(handle(constructor->initial_map())); + + // Check that we do not overflow the instance size when adding the + // extra inobject properties. + int instance_size_delta = extra_inobject_properties * kPointerSize; + int max_instance_size_delta = + JSObject::kMaxInstanceSize - copy->instance_size(); + int max_extra_properties = max_instance_size_delta >> kPointerSizeLog2; + + // If the instance size overflows, we allocate as many properties as we can as + // inobject properties. + if (extra_inobject_properties > max_extra_properties) { + instance_size_delta = max_instance_size_delta; + extra_inobject_properties = max_extra_properties; + } + + // Adjust the map with the extra inobject properties. + int inobject_properties = + copy->inobject_properties() + extra_inobject_properties; + copy->set_inobject_properties(inobject_properties); + copy->set_unused_property_fields(inobject_properties); + copy->set_instance_size(copy->instance_size() + instance_size_delta); + copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy)); + return copy; } -Handle<Map> Map::Copy(Handle<Map> map) { - CALL_HEAP_FUNCTION(map->GetIsolate(), map->Copy(), Map); +Handle<Map> Map::CopyForFreeze(Handle<Map> map) { + int num_descriptors = map->NumberOfOwnDescriptors(); + Isolate* isolate = map->GetIsolate(); + Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes( + handle(map->instance_descriptors(), isolate), num_descriptors, FROZEN); + Handle<Map> new_map = CopyReplaceDescriptors( + map, new_desc, INSERT_TRANSITION, isolate->factory()->frozen_symbol()); + new_map->freeze(); + new_map->set_is_extensible(false); + new_map->set_elements_kind(DICTIONARY_ELEMENTS); + return new_map; } -MaybeObject* Map::Copy() { - DescriptorArray* descriptors = instance_descriptors(); - DescriptorArray* new_descriptors; - int number_of_own_descriptors = NumberOfOwnDescriptors(); - MaybeObject* maybe_descriptors = - descriptors->CopyUpTo(number_of_own_descriptors); - if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors; +bool DescriptorArray::CanHoldValue(int descriptor, Object* value) { + PropertyDetails details = GetDetails(descriptor); + switch (details.type()) { + case FIELD: + return value->FitsRepresentation(details.representation()) && + GetFieldType(descriptor)->NowContains(value); + + case CONSTANT: + DCHECK(GetConstant(descriptor) != value || + value->FitsRepresentation(details.representation())); + return GetConstant(descriptor) == value; + + case CALLBACKS: + return false; + + case NORMAL: + case INTERCEPTOR: + case HANDLER: + case NONEXISTENT: + break; + } - return CopyReplaceDescriptors(new_descriptors, OMIT_TRANSITION); + UNREACHABLE(); + return false; } -MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor, - TransitionFlag flag) { - DescriptorArray* descriptors = instance_descriptors(); +Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor, + Handle<Object> value) { + // Dictionaries can store any property value. + if (map->is_dictionary_map()) return map; - // Ensure the key is unique. - MaybeObject* maybe_failure = descriptor->KeyToUniqueName(); - if (maybe_failure->IsFailure()) return maybe_failure; + // Migrate to the newest map before storing the property. + if (map->is_deprecated()) map = Update(map); - int old_size = NumberOfOwnDescriptors(); - int new_size = old_size + 1; + Handle<DescriptorArray> descriptors(map->instance_descriptors()); - if (flag == INSERT_TRANSITION && - owns_descriptors() && - CanHaveMoreTransitions()) { - return ShareDescriptor(descriptors, descriptor); - } + if (descriptors->CanHoldValue(descriptor, *value)) return map; - DescriptorArray* new_descriptors; - MaybeObject* maybe_descriptors = - DescriptorArray::Allocate(GetIsolate(), old_size, 1); - if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors; + Isolate* isolate = map->GetIsolate(); + Representation representation = value->OptimalRepresentation(); + Handle<HeapType> type = value->OptimalType(isolate, representation); + + return GeneralizeRepresentation(map, descriptor, representation, type, + FORCE_FIELD); +} - DescriptorArray::WhitenessWitness witness(new_descriptors); - // Copy the descriptors, inserting a descriptor. - for (int i = 0; i < old_size; ++i) { - new_descriptors->CopyFrom(i, descriptors, i, witness); +Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StoreFromKeyed store_mode) { + // Dictionary maps can always have additional data properties. + if (map->is_dictionary_map()) return map; + + // Migrate to the newest map before transitioning to the new property. + if (map->is_deprecated()) map = Update(map); + + int index = map->SearchTransition(*name); + if (index != TransitionArray::kNotFound) { + Handle<Map> transition(map->GetTransition(index)); + int descriptor = transition->LastAdded(); + + // TODO(verwaest): Handle attributes better. + DescriptorArray* descriptors = transition->instance_descriptors(); + if (descriptors->GetDetails(descriptor).attributes() != attributes) { + return CopyGeneralizeAllRepresentations(transition, descriptor, + FORCE_FIELD, attributes, + "attributes mismatch"); + } + + return Map::PrepareForDataProperty(transition, descriptor, value); + } + + TransitionFlag flag = INSERT_TRANSITION; + MaybeHandle<Map> maybe_map; + if (value->IsJSFunction()) { + maybe_map = Map::CopyWithConstant(map, name, value, attributes, flag); + } else if (!map->TooManyFastProperties(store_mode)) { + Isolate* isolate = name->GetIsolate(); + Representation representation = value->OptimalRepresentation(); + Handle<HeapType> type = value->OptimalType(isolate, representation); + maybe_map = + Map::CopyWithField(map, name, type, attributes, representation, flag); + } + + Handle<Map> result; + if (!maybe_map.ToHandle(&result)) { + return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES); } - if (old_size != descriptors->number_of_descriptors()) { - new_descriptors->SetNumberOfDescriptors(new_size); - new_descriptors->Set(old_size, descriptor, witness); - new_descriptors->Sort(); - } else { - new_descriptors->Append(descriptor, witness); + return result; +} + + +Handle<Map> Map::CopyAddDescriptor(Handle<Map> map, + Descriptor* descriptor, + TransitionFlag flag) { + Handle<DescriptorArray> descriptors(map->instance_descriptors()); + + // Ensure the key is unique. + descriptor->KeyToUniqueName(); + + if (flag == INSERT_TRANSITION && + map->owns_descriptors() && + map->CanHaveMoreTransitions()) { + return ShareDescriptor(map, descriptors, descriptor); } - Name* key = descriptor->GetKey(); - return CopyReplaceDescriptors(new_descriptors, flag, key, SIMPLE_TRANSITION); + Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo( + descriptors, map->NumberOfOwnDescriptors(), 1); + new_descriptors->Append(descriptor); + + return CopyReplaceDescriptors( + map, new_descriptors, flag, descriptor->GetKey(), SIMPLE_TRANSITION); } -MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor, - TransitionFlag flag) { - DescriptorArray* old_descriptors = instance_descriptors(); +Handle<Map> Map::CopyInsertDescriptor(Handle<Map> map, + Descriptor* descriptor, + TransitionFlag flag) { + Handle<DescriptorArray> old_descriptors(map->instance_descriptors()); // Ensure the key is unique. - MaybeObject* maybe_result = descriptor->KeyToUniqueName(); - if (maybe_result->IsFailure()) return maybe_result; + descriptor->KeyToUniqueName(); // We replace the key if it is already present. - int index = old_descriptors->SearchWithCache(descriptor->GetKey(), this); + int index = old_descriptors->SearchWithCache(*descriptor->GetKey(), *map); if (index != DescriptorArray::kNotFound) { - return CopyReplaceDescriptor(old_descriptors, descriptor, index, flag); + return CopyReplaceDescriptor(map, old_descriptors, descriptor, index, flag); } - return CopyAddDescriptor(descriptor, flag); + return CopyAddDescriptor(map, descriptor, flag); } -Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes( +Handle<DescriptorArray> DescriptorArray::CopyUpTo( Handle<DescriptorArray> desc, int enumeration_index, - PropertyAttributes attributes) { - CALL_HEAP_FUNCTION(desc->GetIsolate(), - desc->CopyUpToAddAttributes(enumeration_index, attributes), - DescriptorArray); + int slack) { + return DescriptorArray::CopyUpToAddAttributes( + desc, enumeration_index, NONE, slack); } -MaybeObject* DescriptorArray::CopyUpToAddAttributes( - int enumeration_index, PropertyAttributes attributes) { - if (enumeration_index == 0) return GetHeap()->empty_descriptor_array(); +Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes( + Handle<DescriptorArray> desc, + int enumeration_index, + PropertyAttributes attributes, + int slack) { + if (enumeration_index + slack == 0) { + return desc->GetIsolate()->factory()->empty_descriptor_array(); + } int size = enumeration_index; - DescriptorArray* descriptors; - MaybeObject* maybe_descriptors = Allocate(GetIsolate(), size); - if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors; - DescriptorArray::WhitenessWitness witness(descriptors); + Handle<DescriptorArray> descriptors = + DescriptorArray::Allocate(desc->GetIsolate(), size, slack); + DescriptorArray::WhitenessWitness witness(*descriptors); if (attributes != NONE) { for (int i = 0; i < size; ++i) { - Object* value = GetValue(i); - PropertyDetails details = GetDetails(i); - int mask = DONT_DELETE | DONT_ENUM; - // READ_ONLY is an invalid attribute for JS setters/getters. - if (details.type() != CALLBACKS || !value->IsAccessorPair()) { - mask |= READ_ONLY; + Object* value = desc->GetValue(i); + Name* key = desc->GetKey(i); + PropertyDetails details = desc->GetDetails(i); + // Bulk attribute changes never affect private properties. + if (!key->IsSymbol() || !Symbol::cast(key)->is_private()) { + int mask = DONT_DELETE | DONT_ENUM; + // READ_ONLY is an invalid attribute for JS setters/getters. + if (details.type() != CALLBACKS || !value->IsAccessorPair()) { + mask |= READ_ONLY; + } + details = details.CopyAddAttributes( + static_cast<PropertyAttributes>(attributes & mask)); } - details = details.CopyAddAttributes( - static_cast<PropertyAttributes>(attributes & mask)); - Descriptor desc(GetKey(i), value, details); - descriptors->Set(i, &desc, witness); + Descriptor inner_desc( + handle(key), handle(value, desc->GetIsolate()), details); + descriptors->Set(i, &inner_desc, witness); } } else { for (int i = 0; i < size; ++i) { - descriptors->CopyFrom(i, this, i, witness); + descriptors->CopyFrom(i, *desc, witness); } } - if (number_of_descriptors() != enumeration_index) descriptors->Sort(); + if (desc->number_of_descriptors() != enumeration_index) descriptors->Sort(); return descriptors; } -MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors, - Descriptor* descriptor, - int insertion_index, - TransitionFlag flag) { +Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map, + Handle<DescriptorArray> descriptors, + Descriptor* descriptor, + int insertion_index, + TransitionFlag flag) { // Ensure the key is unique. - MaybeObject* maybe_failure = descriptor->KeyToUniqueName(); - if (maybe_failure->IsFailure()) return maybe_failure; - - Name* key = descriptor->GetKey(); - ASSERT(key == descriptors->GetKey(insertion_index)); + descriptor->KeyToUniqueName(); - int new_size = NumberOfOwnDescriptors(); - ASSERT(0 <= insertion_index && insertion_index < new_size); + Handle<Name> key = descriptor->GetKey(); + DCHECK(*key == descriptors->GetKey(insertion_index)); - ASSERT_LT(insertion_index, new_size); - - DescriptorArray* new_descriptors; - MaybeObject* maybe_descriptors = - DescriptorArray::Allocate(GetIsolate(), new_size); - if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors; - DescriptorArray::WhitenessWitness witness(new_descriptors); - - for (int i = 0; i < new_size; ++i) { - if (i == insertion_index) { - new_descriptors->Set(i, descriptor, witness); - } else { - new_descriptors->CopyFrom(i, descriptors, i, witness); - } - } + Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo( + descriptors, map->NumberOfOwnDescriptors()); - // Re-sort if descriptors were removed. - if (new_size != descriptors->length()) new_descriptors->Sort(); + new_descriptors->Replace(insertion_index, descriptor); SimpleTransitionFlag simple_flag = (insertion_index == descriptors->number_of_descriptors() - 1) ? SIMPLE_TRANSITION : FULL_TRANSITION; - return CopyReplaceDescriptors(new_descriptors, flag, key, simple_flag); + return CopyReplaceDescriptors(map, new_descriptors, flag, key, simple_flag); } void Map::UpdateCodeCache(Handle<Map> map, Handle<Name> name, - Handle<Code> code) { - Isolate* isolate = map->GetIsolate(); - CALL_HEAP_FUNCTION_VOID(isolate, - map->UpdateCodeCache(*name, *code)); -} - - -MaybeObject* Map::UpdateCodeCache(Name* name, Code* code) { + Handle<Code> code) { + Isolate* isolate = map->GetIsolate(); + HandleScope scope(isolate); // Allocate the code cache if not present. - if (code_cache()->IsFixedArray()) { - Object* result; - { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache(); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - set_code_cache(result); + if (map->code_cache()->IsFixedArray()) { + Handle<Object> result = isolate->factory()->NewCodeCache(); + map->set_code_cache(*result); } // Update the code cache. - return CodeCache::cast(code_cache())->Update(name, code); + Handle<CodeCache> code_cache(CodeCache::cast(map->code_cache()), isolate); + CodeCache::Update(code_cache, name, code); } @@ -7192,79 +7552,97 @@ void Map::RemoveFromCodeCache(Name* name, Code* code, int index) { // No GC is supposed to happen between a call to IndexInCodeCache and // RemoveFromCodeCache so the code cache must be there. - ASSERT(!code_cache()->IsFixedArray()); + DCHECK(!code_cache()->IsFixedArray()); CodeCache::cast(code_cache())->RemoveByIndex(name, code, index); } -// An iterator over all map transitions in an descriptor array, reusing the map -// field of the contens array while it is running. +// An iterator over all map transitions in an descriptor array, reusing the +// constructor field of the map while it is running. Negative values in +// the constructor field indicate an active map transition iteration. The +// original constructor is restored after iterating over all entries. class IntrusiveMapTransitionIterator { public: - explicit IntrusiveMapTransitionIterator(TransitionArray* transition_array) - : transition_array_(transition_array) { } - - void Start() { - ASSERT(!IsIterating()); - *TransitionArrayHeader() = Smi::FromInt(0); + IntrusiveMapTransitionIterator( + Map* map, TransitionArray* transition_array, Object* constructor) + : map_(map), + transition_array_(transition_array), + constructor_(constructor) { } + + void StartIfNotStarted() { + DCHECK(!(*IteratorField())->IsSmi() || IsIterating()); + if (!(*IteratorField())->IsSmi()) { + DCHECK(*IteratorField() == constructor_); + *IteratorField() = Smi::FromInt(-1); + } } bool IsIterating() { - return (*TransitionArrayHeader())->IsSmi(); + return (*IteratorField())->IsSmi() && + Smi::cast(*IteratorField())->value() < 0; } Map* Next() { - ASSERT(IsIterating()); - int index = Smi::cast(*TransitionArrayHeader())->value(); + DCHECK(IsIterating()); + int value = Smi::cast(*IteratorField())->value(); + int index = -value - 1; int number_of_transitions = transition_array_->number_of_transitions(); while (index < number_of_transitions) { - *TransitionArrayHeader() = Smi::FromInt(index + 1); + *IteratorField() = Smi::FromInt(value - 1); return transition_array_->GetTarget(index); } - *TransitionArrayHeader() = transition_array_->GetHeap()->fixed_array_map(); + *IteratorField() = constructor_; return NULL; } private: - Object** TransitionArrayHeader() { - return HeapObject::RawField(transition_array_, TransitionArray::kMapOffset); + Object** IteratorField() { + return HeapObject::RawField(map_, Map::kConstructorOffset); } + Map* map_; TransitionArray* transition_array_; + Object* constructor_; }; -// An iterator over all prototype transitions, reusing the map field of the -// underlying array while it is running. +// An iterator over all prototype transitions, reusing the constructor field +// of the map while it is running. Positive values in the constructor field +// indicate an active prototype transition iteration. The original constructor +// is restored after iterating over all entries. class IntrusivePrototypeTransitionIterator { public: - explicit IntrusivePrototypeTransitionIterator(HeapObject* proto_trans) - : proto_trans_(proto_trans) { } - - void Start() { - ASSERT(!IsIterating()); - *Header() = Smi::FromInt(0); + IntrusivePrototypeTransitionIterator( + Map* map, HeapObject* proto_trans, Object* constructor) + : map_(map), proto_trans_(proto_trans), constructor_(constructor) { } + + void StartIfNotStarted() { + if (!(*IteratorField())->IsSmi()) { + DCHECK(*IteratorField() == constructor_); + *IteratorField() = Smi::FromInt(0); + } } bool IsIterating() { - return (*Header())->IsSmi(); + return (*IteratorField())->IsSmi() && + Smi::cast(*IteratorField())->value() >= 0; } Map* Next() { - ASSERT(IsIterating()); - int transitionNumber = Smi::cast(*Header())->value(); + DCHECK(IsIterating()); + int transitionNumber = Smi::cast(*IteratorField())->value(); if (transitionNumber < NumberOfTransitions()) { - *Header() = Smi::FromInt(transitionNumber + 1); + *IteratorField() = Smi::FromInt(transitionNumber + 1); return GetTransition(transitionNumber); } - *Header() = proto_trans_->GetHeap()->fixed_array_map(); + *IteratorField() = constructor_; return NULL; } private: - Object** Header() { - return HeapObject::RawField(proto_trans_, FixedArray::kMapOffset); + Object** IteratorField() { + return HeapObject::RawField(map_, Map::kConstructorOffset); } int NumberOfTransitions() { @@ -7284,29 +7662,33 @@ transitionNumber * Map::kProtoTransitionElementsPerEntry; } + Map* map_; HeapObject* proto_trans_; + Object* constructor_; }; // To traverse the transition tree iteratively, we have to store two kinds of // information in a map: The parent map in the traversal and which children of a // node have already been visited. To do this without additional memory, we -// temporarily reuse two maps with known values: +// temporarily reuse two fields with known values: // // (1) The map of the map temporarily holds the parent, and is restored to the // meta map afterwards. // // (2) The info which children have already been visited depends on which part -// of the map we currently iterate: +// of the map we currently iterate. We use the constructor field of the +// map to store the current index. We can do that because the constructor +// is the same for all involved maps. // // (a) If we currently follow normal map transitions, we temporarily store -// the current index in the map of the FixedArray of the desciptor -// array's contents, and restore it to the fixed array map afterwards. -// Note that a single descriptor can have 0, 1, or 2 transitions. +// the current index in the constructor field, and restore it to the +// original constructor afterwards. Note that a single descriptor can +// have 0, 1, or 2 transitions. // // (b) If we currently follow prototype transitions, we temporarily store -// the current index in the map of the FixedArray holding the prototype -// transitions, and restore it to the fixed array map afterwards. +// the current index in the constructor field, and restore it to the +// original constructor afterwards. // // Note that the child iterator is just a concatenation of two iterators: One // iterating over map transitions and one iterating over prototype transisitons. @@ -7323,38 +7705,29 @@ return old_parent; } - // Start iterating over this map's children, possibly destroying a FixedArray - // map (see explanation above). - void ChildIteratorStart() { - if (HasTransitionArray()) { - if (HasPrototypeTransitions()) { - IntrusivePrototypeTransitionIterator(GetPrototypeTransitions()).Start(); - } - - IntrusiveMapTransitionIterator(transitions()).Start(); - } - } - // If we have an unvisited child map, return that one and advance. If we have - // none, return NULL and reset any destroyed FixedArray maps. - TraversableMap* ChildIteratorNext() { - TransitionArray* transition_array = unchecked_transition_array(); - if (!transition_array->map()->IsSmi() && - !transition_array->IsTransitionArray()) { - return NULL; - } + // none, return NULL and restore the overwritten constructor field. + TraversableMap* ChildIteratorNext(Object* constructor) { + if (!HasTransitionArray()) return NULL; + TransitionArray* transition_array = transitions(); if (transition_array->HasPrototypeTransitions()) { HeapObject* proto_transitions = - transition_array->UncheckedPrototypeTransitions(); - IntrusivePrototypeTransitionIterator proto_iterator(proto_transitions); + transition_array->GetPrototypeTransitions(); + IntrusivePrototypeTransitionIterator proto_iterator(this, + proto_transitions, + constructor); + proto_iterator.StartIfNotStarted(); if (proto_iterator.IsIterating()) { Map* next = proto_iterator.Next(); if (next != NULL) return static_cast<TraversableMap*>(next); } } - IntrusiveMapTransitionIterator transition_iterator(transition_array); + IntrusiveMapTransitionIterator transition_iterator(this, + transition_array, + constructor); + transition_iterator.StartIfNotStarted(); if (transition_iterator.IsIterating()) { Map* next = transition_iterator.Next(); if (next != NULL) return static_cast<TraversableMap*>(next); @@ -7368,12 +7741,16 @@ // Traverse the transition tree in postorder without using the C++ stack by // doing pointer reversal. void Map::TraverseTransitionTree(TraverseCallback callback, void* data) { + // Make sure that we do not allocate in the callback. + DisallowHeapAllocation no_allocation; + TraversableMap* current = static_cast<TraversableMap*>(this); - current->ChildIteratorStart(); + // Get the root constructor here to restore it later when finished iterating + // over maps. + Object* root_constructor = constructor(); while (true) { - TraversableMap* child = current->ChildIteratorNext(); + TraversableMap* child = current->ChildIteratorNext(root_constructor); if (child != NULL) { - child->ChildIteratorStart(); child->SetParent(current); current = child; } else { @@ -7386,30 +7763,29 @@ } -MaybeObject* CodeCache::Update(Name* name, Code* code) { +void CodeCache::Update( + Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) { // The number of monomorphic stubs for normal load/store/call IC's can grow to // a large number and therefore they need to go into a hash table. They are // used to load global properties from cells. if (code->type() == Code::NORMAL) { // Make sure that a hash table is allocated for the normal load code cache. - if (normal_type_cache()->IsUndefined()) { - Object* result; - { MaybeObject* maybe_result = - CodeCacheHashTable::Allocate(GetHeap(), - CodeCacheHashTable::kInitialSize); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - set_normal_type_cache(result); + if (code_cache->normal_type_cache()->IsUndefined()) { + Handle<Object> result = + CodeCacheHashTable::New(code_cache->GetIsolate(), + CodeCacheHashTable::kInitialSize); + code_cache->set_normal_type_cache(*result); } - return UpdateNormalTypeCache(name, code); + UpdateNormalTypeCache(code_cache, name, code); } else { - ASSERT(default_cache()->IsFixedArray()); - return UpdateDefaultCache(name, code); + DCHECK(code_cache->default_cache()->IsFixedArray()); + UpdateDefaultCache(code_cache, name, code); } } -MaybeObject* CodeCache::UpdateDefaultCache(Name* name, Code* code) { +void CodeCache::UpdateDefaultCache( + Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) { // When updating the default code cache we disregard the type encoded in the // flags. This allows call constant stubs to overwrite call field // stubs, etc. @@ -7417,67 +7793,63 @@ // First check whether we can update existing code cache without // extending it. - FixedArray* cache = default_cache(); + Handle<FixedArray> cache = handle(code_cache->default_cache()); int length = cache->length(); - int deleted_index = -1; - for (int i = 0; i < length; i += kCodeCacheEntrySize) { - Object* key = cache->get(i); - if (key->IsNull()) { - if (deleted_index < 0) deleted_index = i; - continue; - } - if (key->IsUndefined()) { - if (deleted_index >= 0) i = deleted_index; - cache->set(i + kCodeCacheEntryNameOffset, name); - cache->set(i + kCodeCacheEntryCodeOffset, code); - return this; - } - if (name->Equals(Name::cast(key))) { - Code::Flags found = - Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags(); - if (Code::RemoveTypeFromFlags(found) == flags) { - cache->set(i + kCodeCacheEntryCodeOffset, code); - return this; + { + DisallowHeapAllocation no_alloc; + int deleted_index = -1; + for (int i = 0; i < length; i += kCodeCacheEntrySize) { + Object* key = cache->get(i); + if (key->IsNull()) { + if (deleted_index < 0) deleted_index = i; + continue; + } + if (key->IsUndefined()) { + if (deleted_index >= 0) i = deleted_index; + cache->set(i + kCodeCacheEntryNameOffset, *name); + cache->set(i + kCodeCacheEntryCodeOffset, *code); + return; + } + if (name->Equals(Name::cast(key))) { + Code::Flags found = + Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags(); + if (Code::RemoveTypeFromFlags(found) == flags) { + cache->set(i + kCodeCacheEntryCodeOffset, *code); + return; + } } } - } - // Reached the end of the code cache. If there were deleted - // elements, reuse the space for the first of them. - if (deleted_index >= 0) { - cache->set(deleted_index + kCodeCacheEntryNameOffset, name); - cache->set(deleted_index + kCodeCacheEntryCodeOffset, code); - return this; + // Reached the end of the code cache. If there were deleted + // elements, reuse the space for the first of them. + if (deleted_index >= 0) { + cache->set(deleted_index + kCodeCacheEntryNameOffset, *name); + cache->set(deleted_index + kCodeCacheEntryCodeOffset, *code); + return; + } } // Extend the code cache with some new entries (at least one). Must be a // multiple of the entry size. int new_length = length + ((length >> 1)) + kCodeCacheEntrySize; new_length = new_length - new_length % kCodeCacheEntrySize; - ASSERT((new_length % kCodeCacheEntrySize) == 0); - Object* result; - { MaybeObject* maybe_result = cache->CopySize(new_length); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + DCHECK((new_length % kCodeCacheEntrySize) == 0); + cache = FixedArray::CopySize(cache, new_length); // Add the (name, code) pair to the new cache. - cache = FixedArray::cast(result); - cache->set(length + kCodeCacheEntryNameOffset, name); - cache->set(length + kCodeCacheEntryCodeOffset, code); - set_default_cache(cache); - return this; + cache->set(length + kCodeCacheEntryNameOffset, *name); + cache->set(length + kCodeCacheEntryCodeOffset, *code); + code_cache->set_default_cache(*cache); } -MaybeObject* CodeCache::UpdateNormalTypeCache(Name* name, Code* code) { +void CodeCache::UpdateNormalTypeCache( + Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) { // Adding a new entry can cause a new cache to be allocated. - CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache()); - Object* new_cache; - { MaybeObject* maybe_new_cache = cache->Put(name, code); - if (!maybe_new_cache->ToObject(&new_cache)) return maybe_new_cache; - } - set_normal_type_cache(new_cache); - return this; + Handle<CodeCacheHashTable> cache( + CodeCacheHashTable::cast(code_cache->normal_type_cache())); + Handle<Object> new_cache = CodeCacheHashTable::Put(cache, name, code); + code_cache->set_normal_type_cache(*new_cache); } @@ -7538,17 +7910,17 @@ void CodeCache::RemoveByIndex(Object* name, Code* code, int index) { if (code->type() == Code::NORMAL) { - ASSERT(!normal_type_cache()->IsUndefined()); + DCHECK(!normal_type_cache()->IsUndefined()); CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache()); - ASSERT(cache->GetIndex(Name::cast(name), code->flags()) == index); + DCHECK(cache->GetIndex(Name::cast(name), code->flags()) == index); cache->RemoveByIndex(index); } else { FixedArray* array = default_cache(); - ASSERT(array->length() >= index && array->get(index)->IsCode()); + DCHECK(array->length() >= index && array->get(index)->IsCode()); // Use null instead of undefined for deleted elements to distinguish // deleted elements from unused elements. This distinction is used // when looking up in the cache and when updating the cache. - ASSERT_EQ(1, kCodeCacheEntryCodeOffset - kCodeCacheEntryNameOffset); + DCHECK_EQ(1, kCodeCacheEntryCodeOffset - kCodeCacheEntryNameOffset); array->set_null(index - 1); // Name. array->set_null(index); // Code. } @@ -7561,14 +7933,13 @@ // lookup not to create a new entry. class CodeCacheHashTableKey : public HashTableKey { public: - CodeCacheHashTableKey(Name* name, Code::Flags flags) - : name_(name), flags_(flags), code_(NULL) { } + CodeCacheHashTableKey(Handle<Name> name, Code::Flags flags) + : name_(name), flags_(flags), code_() { } - CodeCacheHashTableKey(Name* name, Code* code) + CodeCacheHashTableKey(Handle<Name> name, Handle<Code> code) : name_(name), flags_(code->flags()), code_(code) { } - - bool IsMatch(Object* other) { + bool IsMatch(Object* other) V8_OVERRIDE { if (!other->IsFixedArray()) return false; FixedArray* pair = FixedArray::cast(other); Name* name = Name::cast(pair->get(0)); @@ -7583,75 +7954,66 @@ return name->Hash() ^ flags; } - uint32_t Hash() { return NameFlagsHashHelper(name_, flags_); } + uint32_t Hash() V8_OVERRIDE { return NameFlagsHashHelper(*name_, flags_); } - uint32_t HashForObject(Object* obj) { + uint32_t HashForObject(Object* obj) V8_OVERRIDE { FixedArray* pair = FixedArray::cast(obj); Name* name = Name::cast(pair->get(0)); Code* code = Code::cast(pair->get(1)); return NameFlagsHashHelper(name, code->flags()); } - MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) { - ASSERT(code_ != NULL); - Object* obj; - { MaybeObject* maybe_obj = heap->AllocateFixedArray(2); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - FixedArray* pair = FixedArray::cast(obj); - pair->set(0, name_); - pair->set(1, code_); + MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { + Handle<Code> code = code_.ToHandleChecked(); + Handle<FixedArray> pair = isolate->factory()->NewFixedArray(2); + pair->set(0, *name_); + pair->set(1, *code); return pair; } private: - Name* name_; + Handle<Name> name_; Code::Flags flags_; // TODO(jkummerow): We should be able to get by without this. - Code* code_; + MaybeHandle<Code> code_; }; Object* CodeCacheHashTable::Lookup(Name* name, Code::Flags flags) { - CodeCacheHashTableKey key(name, flags); + DisallowHeapAllocation no_alloc; + CodeCacheHashTableKey key(handle(name), flags); int entry = FindEntry(&key); if (entry == kNotFound) return GetHeap()->undefined_value(); return get(EntryToIndex(entry) + 1); } -MaybeObject* CodeCacheHashTable::Put(Name* name, Code* code) { +Handle<CodeCacheHashTable> CodeCacheHashTable::Put( + Handle<CodeCacheHashTable> cache, Handle<Name> name, Handle<Code> code) { CodeCacheHashTableKey key(name, code); - Object* obj; - { MaybeObject* maybe_obj = EnsureCapacity(1, &key); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - // Don't use |this|, as the table might have grown. - CodeCacheHashTable* cache = reinterpret_cast<CodeCacheHashTable*>(obj); + Handle<CodeCacheHashTable> new_cache = EnsureCapacity(cache, 1, &key); - int entry = cache->FindInsertionEntry(key.Hash()); - Object* k; - { MaybeObject* maybe_k = key.AsObject(GetHeap()); - if (!maybe_k->ToObject(&k)) return maybe_k; - } + int entry = new_cache->FindInsertionEntry(key.Hash()); + Handle<Object> k = key.AsHandle(cache->GetIsolate()); - cache->set(EntryToIndex(entry), k); - cache->set(EntryToIndex(entry) + 1, code); - cache->ElementAdded(); - return cache; + new_cache->set(EntryToIndex(entry), *k); + new_cache->set(EntryToIndex(entry) + 1, *code); + new_cache->ElementAdded(); + return new_cache; } int CodeCacheHashTable::GetIndex(Name* name, Code::Flags flags) { - CodeCacheHashTableKey key(name, flags); + DisallowHeapAllocation no_alloc; + CodeCacheHashTableKey key(handle(name), flags); int entry = FindEntry(&key); return (entry == kNotFound) ? -1 : entry; } void CodeCacheHashTable::RemoveByIndex(int index) { - ASSERT(index >= 0); + DCHECK(index >= 0); Heap* heap = GetHeap(); set(EntryToIndex(index), heap->the_hole_value()); set(EntryToIndex(index) + 1, heap->the_hole_value()); @@ -7659,41 +8021,27 @@ } -void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> cache, +void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> code_cache, MapHandleList* maps, Code::Flags flags, Handle<Code> code) { - Isolate* isolate = cache->GetIsolate(); - CALL_HEAP_FUNCTION_VOID(isolate, cache->Update(maps, flags, *code)); -} - - -MaybeObject* PolymorphicCodeCache::Update(MapHandleList* maps, - Code::Flags flags, - Code* code) { - // Initialize cache if necessary. - if (cache()->IsUndefined()) { - Object* result; - { MaybeObject* maybe_result = - PolymorphicCodeCacheHashTable::Allocate( - GetHeap(), - PolymorphicCodeCacheHashTable::kInitialSize); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - set_cache(result); + Isolate* isolate = code_cache->GetIsolate(); + if (code_cache->cache()->IsUndefined()) { + Handle<PolymorphicCodeCacheHashTable> result = + PolymorphicCodeCacheHashTable::New( + isolate, + PolymorphicCodeCacheHashTable::kInitialSize); + code_cache->set_cache(*result); } else { // This entry shouldn't be contained in the cache yet. - ASSERT(PolymorphicCodeCacheHashTable::cast(cache()) + DCHECK(PolymorphicCodeCacheHashTable::cast(code_cache->cache()) ->Lookup(maps, flags)->IsUndefined()); } - PolymorphicCodeCacheHashTable* hash_table = - PolymorphicCodeCacheHashTable::cast(cache()); - Object* new_cache; - { MaybeObject* maybe_new_cache = hash_table->Put(maps, flags, code); - if (!maybe_new_cache->ToObject(&new_cache)) return maybe_new_cache; - } - set_cache(new_cache); - return this; + Handle<PolymorphicCodeCacheHashTable> hash_table = + handle(PolymorphicCodeCacheHashTable::cast(code_cache->cache())); + Handle<PolymorphicCodeCacheHashTable> new_cache = + PolymorphicCodeCacheHashTable::Put(hash_table, maps, flags, code); + code_cache->set_cache(*new_cache); } @@ -7719,7 +8067,7 @@ : maps_(maps), code_flags_(code_flags) {} - bool IsMatch(Object* other) { + bool IsMatch(Object* other) V8_OVERRIDE { MapHandleList other_maps(kDefaultListAllocationSize); int other_flags; FromObject(other, &other_flags, &other_maps); @@ -7754,27 +8102,23 @@ return hash; } - uint32_t Hash() { + uint32_t Hash() V8_OVERRIDE { return MapsHashHelper(maps_, code_flags_); } - uint32_t HashForObject(Object* obj) { + uint32_t HashForObject(Object* obj) V8_OVERRIDE { MapHandleList other_maps(kDefaultListAllocationSize); int other_flags; FromObject(obj, &other_flags, &other_maps); return MapsHashHelper(&other_maps, other_flags); } - MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) { - Object* obj; + MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { // The maps in |maps_| must be copied to a newly allocated FixedArray, // both because the referenced MapList is short-lived, and because C++ // objects can't be stored in the heap anyway. - { MaybeObject* maybe_obj = - heap->AllocateUninitializedFixedArray(maps_->length() + 1); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - FixedArray* list = FixedArray::cast(obj); + Handle<FixedArray> list = + isolate->factory()->NewUninitializedFixedArray(maps_->length() + 1); list->set(0, Smi::FromInt(code_flags_)); for (int i = 0; i < maps_->length(); ++i) { list->set(i + 1, *maps_->at(i)); @@ -7802,55 +8146,59 @@ Object* PolymorphicCodeCacheHashTable::Lookup(MapHandleList* maps, - int code_flags) { - PolymorphicCodeCacheHashTableKey key(maps, code_flags); + int code_kind) { + DisallowHeapAllocation no_alloc; + PolymorphicCodeCacheHashTableKey key(maps, code_kind); int entry = FindEntry(&key); if (entry == kNotFound) return GetHeap()->undefined_value(); return get(EntryToIndex(entry) + 1); } -MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps, - int code_flags, - Code* code) { - PolymorphicCodeCacheHashTableKey key(maps, code_flags); - Object* obj; - { MaybeObject* maybe_obj = EnsureCapacity(1, &key); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - PolymorphicCodeCacheHashTable* cache = - reinterpret_cast<PolymorphicCodeCacheHashTable*>(obj); +Handle<PolymorphicCodeCacheHashTable> PolymorphicCodeCacheHashTable::Put( + Handle<PolymorphicCodeCacheHashTable> hash_table, + MapHandleList* maps, + int code_kind, + Handle<Code> code) { + PolymorphicCodeCacheHashTableKey key(maps, code_kind); + Handle<PolymorphicCodeCacheHashTable> cache = + EnsureCapacity(hash_table, 1, &key); int entry = cache->FindInsertionEntry(key.Hash()); - { MaybeObject* maybe_obj = key.AsObject(GetHeap()); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - cache->set(EntryToIndex(entry), obj); - cache->set(EntryToIndex(entry) + 1, code); + + Handle<Object> obj = key.AsHandle(hash_table->GetIsolate()); + cache->set(EntryToIndex(entry), *obj); + cache->set(EntryToIndex(entry) + 1, *code); cache->ElementAdded(); return cache; } void FixedArray::Shrink(int new_length) { - ASSERT(0 <= new_length && new_length <= length()); + DCHECK(0 <= new_length && new_length <= length()); if (new_length < length()) { - RightTrimFixedArray<Heap::FROM_MUTATOR>( - GetHeap(), this, length() - new_length); + GetHeap()->RightTrimFixedArray<Heap::FROM_MUTATOR>( + this, length() - new_length); } } -MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) { +MaybeHandle<FixedArray> FixedArray::AddKeysFromArrayLike( + Handle<FixedArray> content, + Handle<JSObject> array) { + DCHECK(array->IsJSArray() || array->HasSloppyArgumentsElements()); ElementsAccessor* accessor = array->GetElementsAccessor(); - MaybeObject* maybe_result = - accessor->AddElementsToFixedArray(array, array, this); - FixedArray* result; - if (!maybe_result->To<FixedArray>(&result)) return maybe_result; -#ifdef ENABLE_SLOW_ASSERTS + Handle<FixedArray> result; + ASSIGN_RETURN_ON_EXCEPTION( + array->GetIsolate(), result, + accessor->AddElementsToFixedArray(array, array, content), + FixedArray); + +#ifdef ENABLE_SLOW_DCHECKS if (FLAG_enable_slow_asserts) { + DisallowHeapAllocation no_allocation; for (int i = 0; i < result->length(); i++) { Object* current = result->get(i); - ASSERT(current->IsNumber() || current->IsName()); + DCHECK(current->IsNumber() || current->IsName()); } } #endif @@ -7858,17 +8206,25 @@ } -MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) { - ElementsAccessor* accessor = ElementsAccessor::ForArray(other); - MaybeObject* maybe_result = - accessor->AddElementsToFixedArray(NULL, NULL, this, other); - FixedArray* result; - if (!maybe_result->To(&result)) return maybe_result; -#ifdef ENABLE_SLOW_ASSERTS +MaybeHandle<FixedArray> FixedArray::UnionOfKeys(Handle<FixedArray> first, + Handle<FixedArray> second) { + ElementsAccessor* accessor = ElementsAccessor::ForArray(second); + Handle<FixedArray> result; + ASSIGN_RETURN_ON_EXCEPTION( + first->GetIsolate(), result, + accessor->AddElementsToFixedArray( + Handle<Object>::null(), // receiver + Handle<JSObject>::null(), // holder + first, + Handle<FixedArrayBase>::cast(second)), + FixedArray); + +#ifdef ENABLE_SLOW_DCHECKS if (FLAG_enable_slow_asserts) { + DisallowHeapAllocation no_allocation; for (int i = 0; i < result->length(); i++) { Object* current = result->get(i); - ASSERT(current->IsNumber() || current->IsName()); + DCHECK(current->IsNumber() || current->IsName()); } } #endif @@ -7876,24 +8232,22 @@ } -MaybeObject* FixedArray::CopySize(int new_length, PretenureFlag pretenure) { - Heap* heap = GetHeap(); - if (new_length == 0) return heap->empty_fixed_array(); - Object* obj; - { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length, pretenure); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - FixedArray* result = FixedArray::cast(obj); +Handle<FixedArray> FixedArray::CopySize( + Handle<FixedArray> array, int new_length, PretenureFlag pretenure) { + Isolate* isolate = array->GetIsolate(); + if (new_length == 0) return isolate->factory()->empty_fixed_array(); + Handle<FixedArray> result = + isolate->factory()->NewFixedArray(new_length, pretenure); // Copy the content DisallowHeapAllocation no_gc; - int len = length(); + int len = array->length(); if (new_length < len) len = new_length; // We are taking the map from the old fixed array so the map is sure to // be an immortal immutable object. - result->set_map_no_write_barrier(map()); + result->set_map_no_write_barrier(array->map()); WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); for (int i = 0; i < len; i++) { - result->set(i, get(i), mode); + result->set(i, array->get(i), mode); } return result; } @@ -7919,21 +8273,20 @@ #endif -MaybeObject* DescriptorArray::Allocate(Isolate* isolate, - int number_of_descriptors, - int slack) { - Heap* heap = isolate->heap(); +Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate, + int number_of_descriptors, + int slack) { + DCHECK(0 <= number_of_descriptors); + Factory* factory = isolate->factory(); // Do not use DescriptorArray::cast on incomplete object. int size = number_of_descriptors + slack; - if (size == 0) return heap->empty_descriptor_array(); - FixedArray* result; + if (size == 0) return factory->empty_descriptor_array(); // Allocate the array of keys. - MaybeObject* maybe_array = heap->AllocateFixedArray(LengthFor(size)); - if (!maybe_array->To(&result)) return maybe_array; + Handle<FixedArray> result = factory->NewFixedArray(LengthFor(size)); result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors)); result->set(kEnumCacheIndex, Smi::FromInt(0)); - return result; + return Handle<DescriptorArray>::cast(result); } @@ -7942,13 +8295,19 @@ } +void DescriptorArray::Replace(int index, Descriptor* descriptor) { + descriptor->SetSortedKeyIndex(GetSortedKeyIndex(index)); + Set(index, descriptor); +} + + void DescriptorArray::SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache, Object* new_index_cache) { - ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength); - ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray()); - ASSERT(!IsEmpty()); - ASSERT(!HasEnumCache() || new_cache->length() > GetEnumCache()->length()); + DCHECK(bridge_storage->length() >= kEnumCacheBridgeLength); + DCHECK(new_index_cache->IsSmi() || new_index_cache->IsFixedArray()); + DCHECK(!IsEmpty()); + DCHECK(!HasEnumCache() || new_cache->length() > GetEnumCache()->length()); FixedArray::cast(bridge_storage)-> set(kEnumCacheBridgeCacheIndex, new_cache); FixedArray::cast(bridge_storage)-> @@ -7957,138 +8316,15 @@ } -void DescriptorArray::CopyFrom(int dst_index, +void DescriptorArray::CopyFrom(int index, DescriptorArray* src, - int src_index, const WhitenessWitness& witness) { - Object* value = src->GetValue(src_index); - PropertyDetails details = src->GetDetails(src_index); - Descriptor desc(src->GetKey(src_index), value, details); - Set(dst_index, &desc, witness); -} - - -Handle<DescriptorArray> DescriptorArray::Merge(Handle<DescriptorArray> desc, - int verbatim, - int valid, - int new_size, - int modify_index, - StoreMode store_mode, - Handle<DescriptorArray> other) { - CALL_HEAP_FUNCTION(desc->GetIsolate(), - desc->Merge(verbatim, valid, new_size, modify_index, - store_mode, *other), - DescriptorArray); -} - - -// Generalize the |other| descriptor array by merging it into the (at least -// partly) updated |this| descriptor array. -// The method merges two descriptor array in three parts. Both descriptor arrays -// are identical up to |verbatim|. They also overlap in keys up to |valid|. -// Between |verbatim| and |valid|, the resulting descriptor type as well as the -// representation are generalized from both |this| and |other|. Beyond |valid|, -// the descriptors are copied verbatim from |other| up to |new_size|. -// In case of incompatible types, the type and representation of |other| is -// used. -MaybeObject* DescriptorArray::Merge(int verbatim, - int valid, - int new_size, - int modify_index, - StoreMode store_mode, - DescriptorArray* other) { - ASSERT(verbatim <= valid); - ASSERT(valid <= new_size); - - DescriptorArray* result; - // Allocate a new descriptor array large enough to hold the required - // descriptors, with minimally the exact same size as this descriptor array. - MaybeObject* maybe_descriptors = DescriptorArray::Allocate( - GetIsolate(), new_size, - Max(new_size, other->number_of_descriptors()) - new_size); - if (!maybe_descriptors->To(&result)) return maybe_descriptors; - ASSERT(result->length() > length() || - result->NumberOfSlackDescriptors() > 0 || - result->number_of_descriptors() == other->number_of_descriptors()); - ASSERT(result->number_of_descriptors() == new_size); - - DescriptorArray::WhitenessWitness witness(result); - - int descriptor; - - // 0 -> |verbatim| - int current_offset = 0; - for (descriptor = 0; descriptor < verbatim; descriptor++) { - if (GetDetails(descriptor).type() == FIELD) current_offset++; - result->CopyFrom(descriptor, other, descriptor, witness); - } - - // |verbatim| -> |valid| - for (; descriptor < valid; descriptor++) { - Name* key = GetKey(descriptor); - PropertyDetails details = GetDetails(descriptor); - PropertyDetails other_details = other->GetDetails(descriptor); - - if (details.type() == FIELD || other_details.type() == FIELD || - (store_mode == FORCE_FIELD && descriptor == modify_index) || - (details.type() == CONSTANT && - other_details.type() == CONSTANT && - GetValue(descriptor) != other->GetValue(descriptor))) { - Representation representation = - details.representation().generalize(other_details.representation()); - FieldDescriptor d(key, - current_offset++, - other_details.attributes(), - representation); - result->Set(descriptor, &d, witness); - } else { - result->CopyFrom(descriptor, other, descriptor, witness); - } - } - - // |valid| -> |new_size| - for (; descriptor < new_size; descriptor++) { - PropertyDetails details = other->GetDetails(descriptor); - if (details.type() == FIELD || - (store_mode == FORCE_FIELD && descriptor == modify_index)) { - Name* key = other->GetKey(descriptor); - FieldDescriptor d(key, - current_offset++, - details.attributes(), - details.representation()); - result->Set(descriptor, &d, witness); - } else { - result->CopyFrom(descriptor, other, descriptor, witness); - } - } - - result->Sort(); - return result; -} - - -// Checks whether a merge of |other| into |this| would return a copy of |this|. -bool DescriptorArray::IsMoreGeneralThan(int verbatim, - int valid, - int new_size, - DescriptorArray* other) { - ASSERT(verbatim <= valid); - ASSERT(valid <= new_size); - if (valid != new_size) return false; - - for (int descriptor = verbatim; descriptor < valid; descriptor++) { - PropertyDetails details = GetDetails(descriptor); - PropertyDetails other_details = other->GetDetails(descriptor); - if (!other_details.representation().fits_into(details.representation())) { - return false; - } - if (details.type() == CONSTANT) { - if (other_details.type() != CONSTANT) return false; - if (GetValue(descriptor) != other->GetValue(descriptor)) return false; - } - } - - return true; + Object* value = src->GetValue(index); + PropertyDetails details = src->GetDetails(index); + Descriptor desc(handle(src->GetKey(index)), + handle(value, src->GetIsolate()), + details); + Set(index, &desc, witness); } @@ -8147,7 +8383,7 @@ parent_index = child_index; } } - ASSERT(IsSortedNoDuplicates()); + DCHECK(IsSortedNoDuplicates()); } @@ -8165,21 +8401,33 @@ } -MaybeObject* DeoptimizationInputData::Allocate(Isolate* isolate, - int deopt_entry_count, - PretenureFlag pretenure) { - ASSERT(deopt_entry_count > 0); - return isolate->heap()->AllocateFixedArray(LengthFor(deopt_entry_count), - pretenure); +Handle<DeoptimizationInputData> DeoptimizationInputData::New( + Isolate* isolate, int deopt_entry_count, int return_patch_address_count, + PretenureFlag pretenure) { + DCHECK(deopt_entry_count + return_patch_address_count > 0); + Handle<FixedArray> deoptimization_data = + Handle<FixedArray>::cast(isolate->factory()->NewFixedArray( + LengthFor(deopt_entry_count, return_patch_address_count), pretenure)); + deoptimization_data->set(kDeoptEntryCountIndex, + Smi::FromInt(deopt_entry_count)); + deoptimization_data->set(kReturnAddressPatchEntryCountIndex, + Smi::FromInt(return_patch_address_count)); + return Handle<DeoptimizationInputData>::cast(deoptimization_data); } -MaybeObject* DeoptimizationOutputData::Allocate(Isolate* isolate, - int number_of_deopt_points, - PretenureFlag pretenure) { - if (number_of_deopt_points == 0) return isolate->heap()->empty_fixed_array(); - return isolate->heap()->AllocateFixedArray( - LengthOfFixedArray(number_of_deopt_points), pretenure); +Handle<DeoptimizationOutputData> DeoptimizationOutputData::New( + Isolate* isolate, + int number_of_deopt_points, + PretenureFlag pretenure) { + Handle<FixedArray> result; + if (number_of_deopt_points == 0) { + result = isolate->factory()->empty_fixed_array(); + } else { + result = isolate->factory()->NewFixedArray( + LengthOfFixedArray(number_of_deopt_points), pretenure); + } + return Handle<DeoptimizationOutputData>::cast(result); } @@ -8196,30 +8444,6 @@ #endif -static bool IsIdentifier(UnicodeCache* cache, Name* name) { - // Checks whether the buffer contains an identifier (no escape). - if (!name->IsString()) return false; - String* string = String::cast(name); - if (string->length() == 0) return true; - ConsStringIteratorOp op; - StringCharacterStream stream(string, &op); - if (!cache->IsIdentifierStart(stream.GetNext())) { - return false; - } - while (stream.HasMore()) { - if (!cache->IsIdentifierPart(stream.GetNext())) { - return false; - } - } - return true; -} - - -bool Name::IsCacheable(Isolate* isolate) { - return IsSymbol() || IsIdentifier(isolate->unicode_cache(), this); -} - - bool String::LooksValid() { if (!GetIsolate()->heap()->Contains(this)) return false; return true; @@ -8227,7 +8451,7 @@ String::FlatContent String::GetFlatContent() { - ASSERT(!AllowHeapAllocation::IsAllowed()); + DCHECK(!AllowHeapAllocation::IsAllowed()); int length = this->length(); StringShape shape(this); String* string = this; @@ -8245,7 +8469,7 @@ offset = slice->offset(); string = slice->parent(); shape = StringShape(string); - ASSERT(shape.representation_tag() != kConsStringTag && + DCHECK(shape.representation_tag() != kConsStringTag && shape.representation_tag() != kSlicedStringTag); } if (shape.encoding_tag() == kOneByteStringTag) { @@ -8255,16 +8479,16 @@ } else { start = ExternalAsciiString::cast(string)->GetChars(); } - return FlatContent(Vector<const uint8_t>(start + offset, length)); + return FlatContent(start + offset, length); } else { - ASSERT(shape.encoding_tag() == kTwoByteStringTag); + DCHECK(shape.encoding_tag() == kTwoByteStringTag); const uc16* start; if (shape.representation_tag() == kSeqStringTag) { start = SeqTwoByteString::cast(string)->GetChars(); } else { start = ExternalTwoByteString::cast(string)->GetChars(); } - return FlatContent(Vector<const uc16>(start + offset, length)); + return FlatContent(start + offset, length); } } @@ -8328,7 +8552,7 @@ const uc16* String::GetTwoByteData(unsigned start) { - ASSERT(!IsOneByteRepresentationUnderneath()); + DCHECK(!IsOneByteRepresentationUnderneath()); switch (StringShape(this).representation_tag()) { case kSeqStringTag: return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start); @@ -8391,7 +8615,7 @@ } -// Archive statics that are thread local. +// Archive statics that are thread-local. char* Relocatable::ArchiveState(Isolate* isolate, char* to) { *reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top(); isolate->set_relocatable_top(NULL); @@ -8399,7 +8623,7 @@ } -// Restore statics that are thread local. +// Restore statics that are thread-local. char* Relocatable::RestoreState(Isolate* isolate, char* from) { isolate->set_relocatable_top(*reinterpret_cast<Relocatable**>(from)); return from + ArchiveSpacePerThread(); @@ -8446,11 +8670,11 @@ void FlatStringReader::PostGarbageCollection() { if (str_ == NULL) return; Handle<String> str(str_); - ASSERT(str->IsFlat()); + DCHECK(str->IsFlat()); DisallowHeapAllocation no_gc; // This does not actually prevent the vector from being relocated later. String::FlatContent content = str->GetFlatContent(); - ASSERT(content.IsFlat()); + DCHECK(content.IsFlat()); is_ascii_ = content.IsAscii(); if (is_ascii_) { start_ = content.ToOneByteVector().start(); @@ -8460,34 +8684,47 @@ } -String* ConsStringIteratorOp::Operate(String* string, - unsigned* offset_out, - int32_t* type_out, - unsigned* length_out) { - ASSERT(string->IsConsString()); - ConsString* cons_string = ConsString::cast(string); - // Set up search data. +void ConsStringIteratorOp::Initialize(ConsString* cons_string, int offset) { + DCHECK(cons_string != NULL); root_ = cons_string; - consumed_ = *offset_out; - // Now search. - return Search(offset_out, type_out, length_out); + consumed_ = offset; + // Force stack blown condition to trigger restart. + depth_ = 1; + maximum_depth_ = kStackSize + depth_; + DCHECK(StackBlown()); +} + + +String* ConsStringIteratorOp::Continue(int* offset_out) { + DCHECK(depth_ != 0); + DCHECK_EQ(0, *offset_out); + bool blew_stack = StackBlown(); + String* string = NULL; + // Get the next leaf if there is one. + if (!blew_stack) string = NextLeaf(&blew_stack); + // Restart search from root. + if (blew_stack) { + DCHECK(string == NULL); + string = Search(offset_out); + } + // Ensure future calls return null immediately. + if (string == NULL) Reset(NULL); + return string; } -String* ConsStringIteratorOp::Search(unsigned* offset_out, - int32_t* type_out, - unsigned* length_out) { +String* ConsStringIteratorOp::Search(int* offset_out) { ConsString* cons_string = root_; // Reset the stack, pushing the root string. depth_ = 1; maximum_depth_ = 1; frames_[0] = cons_string; - const unsigned consumed = consumed_; - unsigned offset = 0; + const int consumed = consumed_; + int offset = 0; while (true) { // Loop until the string is found which contains the target offset. String* string = cons_string->first(); - unsigned length = string->length(); + int length = string->length(); int32_t type; if (consumed < offset + length) { // Target offset is in the left branch. @@ -8498,7 +8735,7 @@ PushLeft(cons_string); continue; } - // Tell the stack we're done decending. + // Tell the stack we're done descending. AdjustMaximumDepth(); } else { // Descend right. @@ -8510,7 +8747,6 @@ if ((type & kStringRepresentationMask) == kConsStringTag) { cons_string = ConsString::cast(string); PushRight(cons_string); - // TODO(dcarney) Add back root optimization. continue; } // Need this to be updated for the current string. @@ -8518,21 +8754,19 @@ // Account for the possibility of an empty right leaf. // This happens only if we have asked for an offset outside the string. if (length == 0) { - // Reset depth so future operations will return null immediately. - Reset(); + // Reset so future operations will return null immediately. + Reset(NULL); return NULL; } - // Tell the stack we're done decending. + // Tell the stack we're done descending. AdjustMaximumDepth(); // Pop stack so next iteration is in correct place. Pop(); } - ASSERT(length != 0); + DCHECK(length != 0); // Adjust return values and exit. consumed_ = offset + length; *offset_out = consumed - offset; - *type_out = type; - *length_out = length; return string; } UNREACHABLE(); @@ -8540,9 +8774,7 @@ } -String* ConsStringIteratorOp::NextLeaf(bool* blew_stack, - int32_t* type_out, - unsigned* length_out) { +String* ConsStringIteratorOp::NextLeaf(bool* blew_stack) { while (true) { // Tree traversal complete. if (depth_ == 0) { @@ -8550,7 +8782,7 @@ return NULL; } // We've lost track of higher nodes. - if (maximum_depth_ - depth_ == kStackSize) { + if (StackBlown()) { *blew_stack = true; return NULL; } @@ -8561,16 +8793,13 @@ if ((type & kStringRepresentationMask) != kConsStringTag) { // Pop stack so next iteration is in correct place. Pop(); - unsigned length = static_cast<unsigned>(string->length()); + int length = string->length(); // Could be a flattened ConsString. if (length == 0) continue; - *length_out = length; - *type_out = type; consumed_ += length; return string; } cons_string = ConsString::cast(string); - // TODO(dcarney) Add back root optimization. PushRight(cons_string); // Need to traverse all the way left. while (true) { @@ -8579,10 +8808,8 @@ type = string->map()->instance_type(); if ((type & kStringRepresentationMask) != kConsStringTag) { AdjustMaximumDepth(); - unsigned length = static_cast<unsigned>(string->length()); - ASSERT(length != 0); - *length_out = length; - *type_out = type; + int length = string->length(); + DCHECK(length != 0); consumed_ += length; return string; } @@ -8596,7 +8823,7 @@ uint16_t ConsString::ConsStringGet(int index) { - ASSERT(index >= 0 && index < this->length()); + DCHECK(index >= 0 && index < this->length()); // Check for a flattened cons string if (second()->length() == 0) { @@ -8640,7 +8867,7 @@ int from = f; int to = t; while (true) { - ASSERT(0 <= from && from <= to && to <= source->length()); + DCHECK(0 <= from && from <= to && to <= source->length()); switch (StringShape(source).full_representation_tag()) { case kOneByteStringTag | kExternalStringTag: { CopyChars(sink, @@ -8721,6 +8948,64 @@ } + +template <typename SourceChar> +static void CalculateLineEndsImpl(Isolate* isolate, + List<int>* line_ends, + Vector<const SourceChar> src, + bool include_ending_line) { + const int src_len = src.length(); + StringSearch<uint8_t, SourceChar> search(isolate, STATIC_ASCII_VECTOR("\n")); + + // Find and record line ends. + int position = 0; + while (position != -1 && position < src_len) { + position = search.Search(src, position); + if (position != -1) { + line_ends->Add(position); + position++; + } else if (include_ending_line) { + // Even if the last line misses a line end, it is counted. + line_ends->Add(src_len); + return; + } + } +} + + +Handle<FixedArray> String::CalculateLineEnds(Handle<String> src, + bool include_ending_line) { + src = Flatten(src); + // Rough estimate of line count based on a roughly estimated average + // length of (unpacked) code. + int line_count_estimate = src->length() >> 4; + List<int> line_ends(line_count_estimate); + Isolate* isolate = src->GetIsolate(); + { DisallowHeapAllocation no_allocation; // ensure vectors stay valid. + // Dispatch on type of strings. + String::FlatContent content = src->GetFlatContent(); + DCHECK(content.IsFlat()); + if (content.IsAscii()) { + CalculateLineEndsImpl(isolate, + &line_ends, + content.ToOneByteVector(), + include_ending_line); + } else { + CalculateLineEndsImpl(isolate, + &line_ends, + content.ToUC16Vector(), + include_ending_line); + } + } + int line_count = line_ends.length(); + Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count); + for (int i = 0; i < line_count; i++) { + array->set(i, Smi::FromInt(line_ends[i])); + } + return array; +} + + // Compares the contents of two strings by reading and comparing // int-sized blocks of characters. template <typename Char> @@ -8733,8 +9018,8 @@ // then we have to check that the strings are aligned before // comparing them blockwise. const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT - uint32_t pa_addr = reinterpret_cast<uint32_t>(a); - uint32_t pb_addr = reinterpret_cast<uint32_t>(b); + uintptr_t pa_addr = reinterpret_cast<uintptr_t>(a); + uintptr_t pb_addr = reinterpret_cast<uintptr_t>(b); if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) { #endif const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT @@ -8764,7 +9049,7 @@ class RawStringComparator : public AllStatic { public: static inline bool compare(const Chars1* a, const Chars2* b, int len) { - ASSERT(sizeof(Chars1) != sizeof(Chars2)); + DCHECK(sizeof(Chars1) != sizeof(Chars2)); for (int i = 0; i < len; i++) { if (a[i] != b[i]) { return false; @@ -8799,26 +9084,30 @@ explicit inline State(ConsStringIteratorOp* op) : op_(op), is_one_byte_(true), length_(0), buffer8_(NULL) {} - inline void Init(String* string, unsigned len) { - op_->Reset(); - int32_t type = string->map()->instance_type(); - String::Visit(string, 0, *this, *op_, type, len); + inline void Init(String* string) { + ConsString* cons_string = String::VisitFlat(this, string); + op_->Reset(cons_string); + if (cons_string != NULL) { + int offset; + string = op_->Next(&offset); + String::VisitFlat(this, string, offset); + } } - inline void VisitOneByteString(const uint8_t* chars, unsigned length) { + inline void VisitOneByteString(const uint8_t* chars, int length) { is_one_byte_ = true; buffer8_ = chars; length_ = length; } - inline void VisitTwoByteString(const uint16_t* chars, unsigned length) { + inline void VisitTwoByteString(const uint16_t* chars, int length) { is_one_byte_ = false; buffer16_ = chars; length_ = length; } - void Advance(unsigned consumed) { - ASSERT(consumed <= length_); + void Advance(int consumed) { + DCHECK(consumed <= length_); // Still in buffer. if (length_ != consumed) { if (is_one_byte_) { @@ -8830,18 +9119,16 @@ return; } // Advance state. - ASSERT(op_->HasMore()); - int32_t type = 0; - unsigned length = 0; - String* next = op_->ContinueOperation(&type, &length); - ASSERT(next != NULL); - ConsStringNullOp null_op; - String::Visit(next, 0, *this, null_op, type, length); + int offset; + String* next = op_->Next(&offset); + DCHECK_EQ(0, offset); + DCHECK(next != NULL); + String::VisitFlat(this, next); } ConsStringIteratorOp* const op_; bool is_one_byte_; - unsigned length_; + int length_; union { const uint8_t* buffer8_; const uint16_t* buffer16_; @@ -8859,19 +9146,19 @@ } template<typename Chars1, typename Chars2> - static inline bool Equals(State* state_1, State* state_2, unsigned to_check) { + static inline bool Equals(State* state_1, State* state_2, int to_check) { const Chars1* a = reinterpret_cast<const Chars1*>(state_1->buffer8_); const Chars2* b = reinterpret_cast<const Chars2*>(state_2->buffer8_); return RawStringComparator<Chars1, Chars2>::compare(a, b, to_check); } - bool Equals(unsigned length, String* string_1, String* string_2) { - ASSERT(length != 0); - state_1_.Init(string_1, length); - state_2_.Init(string_2, length); + bool Equals(String* string_1, String* string_2) { + int length = string_1->length(); + state_1_.Init(string_1); + state_2_.Init(string_2); while (true) { - unsigned to_check = Min(state_1_.length_, state_2_.length_); - ASSERT(to_check > 0 && to_check <= length); + int to_check = Min(state_1_.length_, state_2_.length_); + DCHECK(to_check > 0 && to_check <= length); bool is_equal; if (state_1_.is_one_byte_) { if (state_2_.is_one_byte_) { @@ -8904,6 +9191,7 @@ bool String::SlowEquals(String* other) { + DisallowHeapAllocation no_gc; // Fast check: negative check with lengths. int len = length(); if (len != other->length()) return false; @@ -8912,7 +9200,7 @@ // Fast check: if hash code is computed for both strings // a fast negative check can be performed. if (HasHashCode() && other->HasHashCode()) { -#ifdef ENABLE_SLOW_ASSERTS +#ifdef ENABLE_SLOW_DCHECKS if (FLAG_enable_slow_asserts) { if (Hash() != other->Hash()) { bool found_difference = false; @@ -8922,7 +9210,7 @@ break; } } - ASSERT(found_difference); + DCHECK(found_difference); } } #endif @@ -8933,14 +9221,9 @@ // before we try to flatten the strings. if (this->Get(0) != other->Get(0)) return false; - String* lhs = this->TryFlattenGetString(); - String* rhs = other->TryFlattenGetString(); - - // TODO(dcarney): Compare all types of flat strings with a Visitor. - if (StringShape(lhs).IsSequentialAscii() && - StringShape(rhs).IsSequentialAscii()) { - const uint8_t* str1 = SeqOneByteString::cast(lhs)->GetChars(); - const uint8_t* str2 = SeqOneByteString::cast(rhs)->GetChars(); + if (IsSeqOneByteString() && other->IsSeqOneByteString()) { + const uint8_t* str1 = SeqOneByteString::cast(this)->GetChars(); + const uint8_t* str2 = SeqOneByteString::cast(other)->GetChars(); return CompareRawStringContents(str1, str2, len); } @@ -8948,7 +9231,57 @@ StringComparator comparator(isolate->objects_string_compare_iterator_a(), isolate->objects_string_compare_iterator_b()); - return comparator.Equals(static_cast<unsigned>(len), lhs, rhs); + return comparator.Equals(this, other); +} + + +bool String::SlowEquals(Handle<String> one, Handle<String> two) { + // Fast check: negative check with lengths. + int one_length = one->length(); + if (one_length != two->length()) return false; + if (one_length == 0) return true; + + // Fast check: if hash code is computed for both strings + // a fast negative check can be performed. + if (one->HasHashCode() && two->HasHashCode()) { +#ifdef ENABLE_SLOW_DCHECKS + if (FLAG_enable_slow_asserts) { + if (one->Hash() != two->Hash()) { + bool found_difference = false; + for (int i = 0; i < one_length; i++) { + if (one->Get(i) != two->Get(i)) { + found_difference = true; + break; + } + } + DCHECK(found_difference); + } + } +#endif + if (one->Hash() != two->Hash()) return false; + } + + // We know the strings are both non-empty. Compare the first chars + // before we try to flatten the strings. + if (one->Get(0) != two->Get(0)) return false; + + one = String::Flatten(one); + two = String::Flatten(two); + + DisallowHeapAllocation no_gc; + String::FlatContent flat1 = one->GetFlatContent(); + String::FlatContent flat2 = two->GetFlatContent(); + + if (flat1.IsAscii() && flat2.IsAscii()) { + return CompareRawStringContents(flat1.ToOneByteVector().start(), + flat2.ToOneByteVector().start(), + one_length); + } else { + for (int i = 0; i < one_length; i++) { + if (flat1.Get(i) != flat2.Get(i)) return false; + } + return true; + } } @@ -8984,7 +9317,7 @@ for (i = 0; i < slen && remaining_in_str > 0; i++) { unsigned cursor = 0; uint32_t r = unibrow::Utf8::ValueOf(utf8_data, remaining_in_str, &cursor); - ASSERT(cursor > 0 && cursor <= remaining_in_str); + DCHECK(cursor > 0 && cursor <= remaining_in_str); if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) { if (i > slen - 1) return false; if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false; @@ -9030,68 +9363,18 @@ } -class IteratingStringHasher: public StringHasher { - public: - static inline uint32_t Hash(String* string, uint32_t seed) { - const unsigned len = static_cast<unsigned>(string->length()); - IteratingStringHasher hasher(len, seed); - if (hasher.has_trivial_hash()) { - return hasher.GetHashField(); - } - int32_t type = string->map()->instance_type(); - ConsStringNullOp null_op; - String::Visit(string, 0, hasher, null_op, type, len); - // Flat strings terminate immediately. - if (hasher.consumed_ == len) { - ASSERT(!string->IsConsString()); - return hasher.GetHashField(); - } - ASSERT(string->IsConsString()); - // This is a ConsString, iterate across it. - ConsStringIteratorOp op; - unsigned offset = 0; - unsigned leaf_length = len; - string = op.Operate(string, &offset, &type, &leaf_length); - while (true) { - ASSERT(hasher.consumed_ < len); - String::Visit(string, 0, hasher, null_op, type, leaf_length); - if (hasher.consumed_ == len) break; - string = op.ContinueOperation(&type, &leaf_length); - // This should be taken care of by the length check. - ASSERT(string != NULL); - } - return hasher.GetHashField(); - } - inline void VisitOneByteString(const uint8_t* chars, unsigned length) { - AddCharacters(chars, static_cast<int>(length)); - consumed_ += length; - } - inline void VisitTwoByteString(const uint16_t* chars, unsigned length) { - AddCharacters(chars, static_cast<int>(length)); - consumed_ += length; - } - - private: - inline IteratingStringHasher(int len, uint32_t seed) - : StringHasher(len, seed), - consumed_(0) {} - unsigned consumed_; - DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher); -}; - - uint32_t String::ComputeAndSetHash() { // Should only be called if hash code has not yet been computed. - ASSERT(!HasHashCode()); + DCHECK(!HasHashCode()); // Store the hash code in the object. uint32_t field = IteratingStringHasher::Hash(this, GetHeap()->HashSeed()); set_hash_field(field); // Check the hash code is there. - ASSERT(HasHashCode()); + DCHECK(HasHashCode()); uint32_t result = field >> kHashShift; - ASSERT(result != 0); // Ensure that the hash value of 0 is never computed. + DCHECK(result != 0); // Ensure that the hash value of 0 is never computed. return result; } @@ -9101,29 +9384,7 @@ if (length == 0 || length > kMaxArrayIndexSize) return false; ConsStringIteratorOp op; StringCharacterStream stream(this, &op); - uint16_t ch = stream.GetNext(); - - // If the string begins with a '0' character, it must only consist - // of it to be a legal array index. - if (ch == '0') { - *index = 0; - return length == 1; - } - - // Convert string to uint32 array index; character by character. - int d = ch - '0'; - if (d < 0 || d > 9) return false; - uint32_t result = d; - while (stream.HasMore()) { - d = stream.GetNext() - '0'; - if (d < 0 || d > 9) return false; - // Check that the new result is below the 32 bit limit. - if (result > 429496729U - ((d > 5) ? 1 : 0)) return false; - result = (result * 10) + d; - } - - *index = result; - return true; + return StringToArrayIndex(&stream, index); } @@ -9133,7 +9394,7 @@ uint32_t field = hash_field(); if ((field & kIsNotArrayIndexMask) != 0) return false; // Isolate the array index form the full hash field. - *index = (kArrayIndexHashMask & field) >> kHashShift; + *index = ArrayIndexValueBits::decode(field); return true; } else { return ComputeArrayIndex(index); @@ -9150,17 +9411,16 @@ old_size = SeqOneByteString::SizeFor(old_length); new_size = SeqOneByteString::SizeFor(new_length); } else { - ASSERT(string->IsSeqTwoByteString()); + DCHECK(string->IsSeqTwoByteString()); old_size = SeqTwoByteString::SizeFor(old_length); new_size = SeqTwoByteString::SizeFor(new_length); } int delta = old_size - new_size; - string->set_length(new_length); Address start_of_string = string->address(); - ASSERT_OBJECT_ALIGNED(start_of_string); - ASSERT_OBJECT_ALIGNED(start_of_string + new_size); + DCHECK_OBJECT_ALIGNED(start_of_string); + DCHECK_OBJECT_ALIGNED(start_of_string + new_size); Heap* heap = string->GetHeap(); NewSpace* newspace = heap->new_space(); @@ -9175,6 +9435,10 @@ } heap->AdjustLiveBytes(start_of_string, -delta, Heap::FROM_MUTATOR); + // We are storing the new length using release store after creating a filler + // for the left-over space to avoid races with the sweeper thread. + string->synchronized_set_length(new_length); + if (new_length == 0) return heap->isolate()->factory()->empty_string(); return string; } @@ -9183,16 +9447,16 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) { // For array indexes mix the length into the hash as an array index could // be zero. - ASSERT(length > 0); - ASSERT(length <= String::kMaxArrayIndexSize); - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + DCHECK(length > 0); + DCHECK(length <= String::kMaxArrayIndexSize); + DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < (1 << String::kArrayIndexValueBits)); - value <<= String::kHashShift; - value |= length << String::kArrayIndexHashLengthShift; + value <<= String::ArrayIndexValueBits::kShift; + value |= length << String::ArrayIndexLengthBits::kShift; - ASSERT((value & String::kIsNotArrayIndexMask) == 0); - ASSERT((length > String::kMaxCachedArrayIndexLength) || + DCHECK((value & String::kIsNotArrayIndexMask) == 0); + DCHECK((length > String::kMaxCachedArrayIndexLength) || (value & String::kContainsCachedArrayIndexMask) == 0); return value; } @@ -9217,7 +9481,7 @@ int vector_length = chars.length(); // Handle some edge cases if (vector_length <= 1) { - ASSERT(vector_length == 0 || + DCHECK(vector_length == 0 || static_cast<uint8_t>(chars.start()[0]) <= unibrow::Utf8::kMaxOneByteChar); *utf16_length_out = vector_length; @@ -9226,152 +9490,44 @@ // Start with a fake length which won't affect computation. // It will be updated later. StringHasher hasher(String::kMaxArrayIndexSize, seed); - unsigned remaining = static_cast<unsigned>(vector_length); - const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start()); - int utf16_length = 0; - bool is_index = true; - ASSERT(hasher.is_array_index_); - while (remaining > 0) { - unsigned consumed = 0; - uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed); - ASSERT(consumed > 0 && consumed <= remaining); - stream += consumed; - remaining -= consumed; - bool is_two_characters = c > unibrow::Utf16::kMaxNonSurrogateCharCode; - utf16_length += is_two_characters ? 2 : 1; - // No need to keep hashing. But we do need to calculate utf16_length. - if (utf16_length > String::kMaxHashCalcLength) continue; - if (is_two_characters) { - uint16_t c1 = unibrow::Utf16::LeadSurrogate(c); - uint16_t c2 = unibrow::Utf16::TrailSurrogate(c); - hasher.AddCharacter(c1); - hasher.AddCharacter(c2); - if (is_index) is_index = hasher.UpdateIndex(c1); - if (is_index) is_index = hasher.UpdateIndex(c2); - } else { - hasher.AddCharacter(c); - if (is_index) is_index = hasher.UpdateIndex(c); - } - } - *utf16_length_out = static_cast<int>(utf16_length); - // Must set length here so that hash computation is correct. - hasher.length_ = utf16_length; - return hasher.GetHashField(); -} - - -void String::PrintOn(FILE* file) { - int length = this->length(); - for (int i = 0; i < length; i++) { - PrintF(file, "%c", Get(i)); - } -} - - -static void TrimEnumCache(Heap* heap, Map* map, DescriptorArray* descriptors) { - int live_enum = map->EnumLength(); - if (live_enum == kInvalidEnumCacheSentinel) { - live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM); - } - if (live_enum == 0) return descriptors->ClearEnumCache(); - - FixedArray* enum_cache = descriptors->GetEnumCache(); - - int to_trim = enum_cache->length() - live_enum; - if (to_trim <= 0) return; - RightTrimFixedArray<Heap::FROM_GC>( - heap, descriptors->GetEnumCache(), to_trim); - - if (!descriptors->HasEnumIndicesCache()) return; - FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache(); - RightTrimFixedArray<Heap::FROM_GC>(heap, enum_indices_cache, to_trim); -} - - -static void TrimDescriptorArray(Heap* heap, - Map* map, - DescriptorArray* descriptors, - int number_of_own_descriptors) { - int number_of_descriptors = descriptors->number_of_descriptors_storage(); - int to_trim = number_of_descriptors - number_of_own_descriptors; - if (to_trim == 0) return; - - RightTrimFixedArray<Heap::FROM_GC>( - heap, descriptors, to_trim * DescriptorArray::kDescriptorSize); - descriptors->SetNumberOfDescriptors(number_of_own_descriptors); - - if (descriptors->HasEnumCache()) TrimEnumCache(heap, map, descriptors); - descriptors->Sort(); -} - - -// Clear a possible back pointer in case the transition leads to a dead map. -// Return true in case a back pointer has been cleared and false otherwise. -static bool ClearBackPointer(Heap* heap, Map* target) { - if (Marking::MarkBitFrom(target).Get()) return false; - target->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER); - return true; -} - - -// TODO(mstarzinger): This method should be moved into MarkCompactCollector, -// because it cannot be called from outside the GC and we already have methods -// depending on the transitions layout in the GC anyways. -void Map::ClearNonLiveTransitions(Heap* heap) { - // If there are no transitions to be cleared, return. - // TODO(verwaest) Should be an assert, otherwise back pointers are not - // properly cleared. - if (!HasTransitionArray()) return; - - TransitionArray* t = transitions(); - MarkCompactCollector* collector = heap->mark_compact_collector(); - - int transition_index = 0; - - DescriptorArray* descriptors = instance_descriptors(); - bool descriptors_owner_died = false; - - // Compact all live descriptors to the left. - for (int i = 0; i < t->number_of_transitions(); ++i) { - Map* target = t->GetTarget(i); - if (ClearBackPointer(heap, target)) { - if (target->instance_descriptors() == descriptors) { - descriptors_owner_died = true; - } + unsigned remaining = static_cast<unsigned>(vector_length); + const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start()); + int utf16_length = 0; + bool is_index = true; + DCHECK(hasher.is_array_index_); + while (remaining > 0) { + unsigned consumed = 0; + uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed); + DCHECK(consumed > 0 && consumed <= remaining); + stream += consumed; + remaining -= consumed; + bool is_two_characters = c > unibrow::Utf16::kMaxNonSurrogateCharCode; + utf16_length += is_two_characters ? 2 : 1; + // No need to keep hashing. But we do need to calculate utf16_length. + if (utf16_length > String::kMaxHashCalcLength) continue; + if (is_two_characters) { + uint16_t c1 = unibrow::Utf16::LeadSurrogate(c); + uint16_t c2 = unibrow::Utf16::TrailSurrogate(c); + hasher.AddCharacter(c1); + hasher.AddCharacter(c2); + if (is_index) is_index = hasher.UpdateIndex(c1); + if (is_index) is_index = hasher.UpdateIndex(c2); } else { - if (i != transition_index) { - Name* key = t->GetKey(i); - t->SetKey(transition_index, key); - Object** key_slot = t->GetKeySlot(transition_index); - collector->RecordSlot(key_slot, key_slot, key); - // Target slots do not need to be recorded since maps are not compacted. - t->SetTarget(transition_index, t->GetTarget(i)); - } - transition_index++; + hasher.AddCharacter(c); + if (is_index) is_index = hasher.UpdateIndex(c); } } + *utf16_length_out = static_cast<int>(utf16_length); + // Must set length here so that hash computation is correct. + hasher.length_ = utf16_length; + return hasher.GetHashField(); +} - // If there are no transitions to be cleared, return. - // TODO(verwaest) Should be an assert, otherwise back pointers are not - // properly cleared. - if (transition_index == t->number_of_transitions()) return; - - int number_of_own_descriptors = NumberOfOwnDescriptors(); - - if (descriptors_owner_died) { - if (number_of_own_descriptors > 0) { - TrimDescriptorArray(heap, this, descriptors, number_of_own_descriptors); - ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors); - set_owns_descriptors(true); - } else { - ASSERT(descriptors == GetHeap()->empty_descriptor_array()); - } - } - int trim = t->number_of_transitions() - transition_index; - if (trim > 0) { - RightTrimFixedArray<Heap::FROM_GC>(heap, t, t->IsSimpleTransition() - ? trim : trim * TransitionArray::kTransitionSize); +void String::PrintOn(FILE* file) { + int length = this->length(); + for (int i = 0; i < length; i++) { + PrintF(file, "%c", Get(i)); } } @@ -9401,8 +9557,8 @@ first->instance_type() == second->instance_type() && first->bit_field() == second->bit_field() && first->bit_field2() == second->bit_field2() && - first->is_observed() == second->is_observed() && - first->function_with_prototype() == second->function_with_prototype(); + first->is_frozen() == second->is_frozen() && + first->has_instance_call_handler() == second->has_instance_call_handler(); } @@ -9420,13 +9576,44 @@ void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) { - for (int i = 0; i < count_of_code_ptr_entries(); i++) { - int index = first_code_ptr_index() + i; - v->VisitCodeEntry(reinterpret_cast<Address>(RawFieldOfElementAt(index))); - } - for (int i = 0; i < count_of_heap_ptr_entries(); i++) { - int index = first_heap_ptr_index() + i; - v->VisitPointer(RawFieldOfElementAt(index)); + // Unfortunately the serializer relies on pointers within an object being + // visited in-order, so we have to iterate both the code and heap pointers in + // the small section before doing so in the extended section. + for (int s = 0; s <= final_section(); ++s) { + LayoutSection section = static_cast<LayoutSection>(s); + ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR, + section); + while (!code_iter.is_finished()) { + v->VisitCodeEntry(reinterpret_cast<Address>( + RawFieldOfElementAt(code_iter.next_index()))); + } + + ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR, + section); + while (!heap_iter.is_finished()) { + v->VisitPointer(RawFieldOfElementAt(heap_iter.next_index())); + } + } +} + + +void ConstantPoolArray::ClearPtrEntries(Isolate* isolate) { + Type type[] = { CODE_PTR, HEAP_PTR }; + Address default_value[] = { + isolate->builtins()->builtin(Builtins::kIllegal)->entry(), + reinterpret_cast<Address>(isolate->heap()->undefined_value()) }; + + for (int i = 0; i < 2; ++i) { + for (int s = 0; s <= final_section(); ++s) { + LayoutSection section = static_cast<LayoutSection>(s); + if (number_of_entries(type[i], section) > 0) { + int offset = OffsetOfElementAt(first_index(type[i], section)); + MemsetPointer( + reinterpret_cast<Address*>(HeapObject::RawField(this, offset)), + default_value[i], + number_of_entries(type[i], section)); + } + } } } @@ -9441,11 +9628,11 @@ void JSFunction::MarkForOptimization() { - ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints()); - ASSERT(!IsOptimized()); - ASSERT(shared()->allows_lazy_compilation() || + DCHECK(is_compiled() || GetIsolate()->DebuggerHasBreakPoints()); + DCHECK(!IsOptimized()); + DCHECK(shared()->allows_lazy_compilation() || code()->optimizable()); - ASSERT(!shared()->is_generator()); + DCHECK(!shared()->is_generator()); set_code_no_write_barrier( GetIsolate()->builtins()->builtin(Builtins::kCompileOptimized)); // No write barrier required, since the builtin is part of the root set. @@ -9453,11 +9640,11 @@ void JSFunction::MarkForConcurrentOptimization() { - ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints()); - ASSERT(!IsOptimized()); - ASSERT(shared()->allows_lazy_compilation() || code()->optimizable()); - ASSERT(!shared()->is_generator()); - ASSERT(GetIsolate()->concurrent_recompilation_enabled()); + DCHECK(is_compiled() || GetIsolate()->DebuggerHasBreakPoints()); + DCHECK(!IsOptimized()); + DCHECK(shared()->allows_lazy_compilation() || code()->optimizable()); + DCHECK(!shared()->is_generator()); + DCHECK(GetIsolate()->concurrent_recompilation_enabled()); if (FLAG_trace_concurrent_recompilation) { PrintF(" ** Marking "); PrintName(); @@ -9472,10 +9659,10 @@ void JSFunction::MarkInOptimizationQueue() { // We can only arrive here via the concurrent-recompilation builtin. If // break points were set, the code would point to the lazy-compile builtin. - ASSERT(!GetIsolate()->DebuggerHasBreakPoints()); - ASSERT(IsMarkedForConcurrentOptimization() && !IsOptimized()); - ASSERT(shared()->allows_lazy_compilation() || code()->optimizable()); - ASSERT(GetIsolate()->concurrent_recompilation_enabled()); + DCHECK(!GetIsolate()->DebuggerHasBreakPoints()); + DCHECK(IsMarkedForConcurrentOptimization() && !IsOptimized()); + DCHECK(shared()->allows_lazy_compilation() || code()->optimizable()); + DCHECK(GetIsolate()->concurrent_recompilation_enabled()); if (FLAG_trace_concurrent_recompilation) { PrintF(" ** Queueing "); PrintName(); @@ -9493,73 +9680,58 @@ Handle<Code> code, Handle<FixedArray> literals, BailoutId osr_ast_id) { - CALL_HEAP_FUNCTION_VOID( - shared->GetIsolate(), - shared->AddToOptimizedCodeMap( - *native_context, *code, *literals, osr_ast_id)); -} - - -MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context, - Code* code, - FixedArray* literals, - BailoutId osr_ast_id) { - ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); - ASSERT(native_context->IsNativeContext()); + Isolate* isolate = shared->GetIsolate(); + DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION); + DCHECK(native_context->IsNativeContext()); STATIC_ASSERT(kEntryLength == 4); - Heap* heap = GetHeap(); - FixedArray* new_code_map; - Object* value = optimized_code_map(); - Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt()); + Handle<FixedArray> new_code_map; + Handle<Object> value(shared->optimized_code_map(), isolate); + int old_length; if (value->IsSmi()) { // No optimized code map. - ASSERT_EQ(0, Smi::cast(value)->value()); + DCHECK_EQ(0, Smi::cast(*value)->value()); // Create 3 entries per context {context, code, literals}. - MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength); - if (!maybe->To(&new_code_map)) return maybe; - new_code_map->set(kEntriesStart + kContextOffset, native_context); - new_code_map->set(kEntriesStart + kCachedCodeOffset, code); - new_code_map->set(kEntriesStart + kLiteralsOffset, literals); - new_code_map->set(kEntriesStart + kOsrAstIdOffset, osr_ast_id_smi); + new_code_map = isolate->factory()->NewFixedArray(kInitialLength); + old_length = kEntriesStart; } else { // Copy old map and append one new entry. - FixedArray* old_code_map = FixedArray::cast(value); - ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context, osr_ast_id)); - int old_length = old_code_map->length(); - int new_length = old_length + kEntryLength; - MaybeObject* maybe = old_code_map->CopySize(new_length); - if (!maybe->To(&new_code_map)) return maybe; - new_code_map->set(old_length + kContextOffset, native_context); - new_code_map->set(old_length + kCachedCodeOffset, code); - new_code_map->set(old_length + kLiteralsOffset, literals); - new_code_map->set(old_length + kOsrAstIdOffset, osr_ast_id_smi); + Handle<FixedArray> old_code_map = Handle<FixedArray>::cast(value); + DCHECK_EQ(-1, shared->SearchOptimizedCodeMap(*native_context, osr_ast_id)); + old_length = old_code_map->length(); + new_code_map = FixedArray::CopySize( + old_code_map, old_length + kEntryLength); // Zap the old map for the sake of the heap verifier. if (Heap::ShouldZapGarbage()) { Object** data = old_code_map->data_start(); - MemsetPointer(data, heap->the_hole_value(), old_length); + MemsetPointer(data, isolate->heap()->the_hole_value(), old_length); } } + new_code_map->set(old_length + kContextOffset, *native_context); + new_code_map->set(old_length + kCachedCodeOffset, *code); + new_code_map->set(old_length + kLiteralsOffset, *literals); + new_code_map->set(old_length + kOsrAstIdOffset, + Smi::FromInt(osr_ast_id.ToInt())); + #ifdef DEBUG for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) { - ASSERT(new_code_map->get(i + kContextOffset)->IsNativeContext()); - ASSERT(new_code_map->get(i + kCachedCodeOffset)->IsCode()); - ASSERT(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() == + DCHECK(new_code_map->get(i + kContextOffset)->IsNativeContext()); + DCHECK(new_code_map->get(i + kCachedCodeOffset)->IsCode()); + DCHECK(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() == Code::OPTIMIZED_FUNCTION); - ASSERT(new_code_map->get(i + kLiteralsOffset)->IsFixedArray()); - ASSERT(new_code_map->get(i + kOsrAstIdOffset)->IsSmi()); + DCHECK(new_code_map->get(i + kLiteralsOffset)->IsFixedArray()); + DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi()); } #endif - set_optimized_code_map(new_code_map); - return new_code_map; + shared->set_optimized_code_map(*new_code_map); } FixedArray* SharedFunctionInfo::GetLiteralsFromOptimizedCodeMap(int index) { - ASSERT(index > kEntriesStart); + DCHECK(index > kEntriesStart); FixedArray* code_map = FixedArray::cast(optimized_code_map()); if (!bound()) { FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1)); - ASSERT_NE(NULL, cached_literals); + DCHECK_NE(NULL, cached_literals); return cached_literals; } return NULL; @@ -9567,10 +9739,10 @@ Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) { - ASSERT(index > kEntriesStart); + DCHECK(index > kEntriesStart); FixedArray* code_map = FixedArray::cast(optimized_code_map()); Code* code = Code::cast(code_map->get(index)); - ASSERT_NE(NULL, code); + DCHECK_NE(NULL, code); return code; } @@ -9585,20 +9757,21 @@ flusher->EvictOptimizedCodeMap(this); } - ASSERT(code_map->get(kNextMapIndex)->IsUndefined()); + DCHECK(code_map->get(kNextMapIndex)->IsUndefined()); set_optimized_code_map(Smi::FromInt(0)); } void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason) { + DisallowHeapAllocation no_gc; if (optimized_code_map()->IsSmi()) return; FixedArray* code_map = FixedArray::cast(optimized_code_map()); int dst = kEntriesStart; int length = code_map->length(); for (int src = kEntriesStart; src < length; src += kEntryLength) { - ASSERT(code_map->get(src)->IsNativeContext()); + DCHECK(code_map->get(src)->IsNativeContext()); if (Code::cast(code_map->get(src + kCachedCodeOffset)) == optimized_code) { // Evict the src entry by not copying it to the dst entry. if (FLAG_trace_opt) { @@ -9628,7 +9801,7 @@ } if (dst != length) { // Always trim even when array is cleared because of heap verifier. - RightTrimFixedArray<Heap::FROM_MUTATOR>(GetHeap(), code_map, length - dst); + GetHeap()->RightTrimFixedArray<Heap::FROM_MUTATOR>(code_map, length - dst); if (code_map->length() == kEntriesStart) ClearOptimizedCodeMap(); } } @@ -9636,78 +9809,78 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) { FixedArray* code_map = FixedArray::cast(optimized_code_map()); - ASSERT(shrink_by % kEntryLength == 0); - ASSERT(shrink_by <= code_map->length() - kEntriesStart); + DCHECK(shrink_by % kEntryLength == 0); + DCHECK(shrink_by <= code_map->length() - kEntriesStart); // Always trim even when array is cleared because of heap verifier. - RightTrimFixedArray<Heap::FROM_GC>(GetHeap(), code_map, shrink_by); + GetHeap()->RightTrimFixedArray<Heap::FROM_GC>(code_map, shrink_by); if (code_map->length() == kEntriesStart) { ClearOptimizedCodeMap(); } } -void JSObject::OptimizeAsPrototype(Handle<JSObject> object) { +void JSObject::OptimizeAsPrototype(Handle<JSObject> object, + PrototypeOptimizationMode mode) { if (object->IsGlobalObject()) return; - - // Make sure prototypes are fast objects and their maps have the bit set - // so they remain fast. + if (object->IsJSGlobalProxy()) return; + if (mode == FAST_PROTOTYPE && !object->map()->is_prototype_map()) { + // First normalize to ensure all JSFunctions are CONSTANT. + JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0); + } if (!object->HasFastProperties()) { - TransformToFastProperties(object, 0); + JSObject::MigrateSlowToFast(object, 0); + } + if (mode == FAST_PROTOTYPE && object->HasFastProperties() && + !object->map()->is_prototype_map()) { + Handle<Map> new_map = Map::Copy(handle(object->map())); + JSObject::MigrateToMap(object, new_map); + object->map()->set_is_prototype_map(true); } } -static MUST_USE_RESULT MaybeObject* CacheInitialJSArrayMaps( - Context* native_context, Map* initial_map) { +void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) { + if (!object->map()->is_prototype_map()) return; + OptimizeAsPrototype(object, FAST_PROTOTYPE); +} + + +Handle<Object> CacheInitialJSArrayMaps( + Handle<Context> native_context, Handle<Map> initial_map) { // Replace all of the cached initial array maps in the native context with // the appropriate transitioned elements kind maps. - Heap* heap = native_context->GetHeap(); - MaybeObject* maybe_maps = - heap->AllocateFixedArrayWithHoles(kElementsKindCount, TENURED); - FixedArray* maps; - if (!maybe_maps->To(&maps)) return maybe_maps; + Factory* factory = native_context->GetIsolate()->factory(); + Handle<FixedArray> maps = factory->NewFixedArrayWithHoles( + kElementsKindCount, TENURED); - Map* current_map = initial_map; + Handle<Map> current_map = initial_map; ElementsKind kind = current_map->elements_kind(); - ASSERT(kind == GetInitialFastElementsKind()); - maps->set(kind, current_map); + DCHECK(kind == GetInitialFastElementsKind()); + maps->set(kind, *current_map); for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1; i < kFastElementsKindCount; ++i) { - Map* new_map; + Handle<Map> new_map; ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i); if (current_map->HasElementsTransition()) { - new_map = current_map->elements_transition_map(); - ASSERT(new_map->elements_kind() == next_kind); + new_map = handle(current_map->elements_transition_map()); + DCHECK(new_map->elements_kind() == next_kind); } else { - MaybeObject* maybe_new_map = - current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; + new_map = Map::CopyAsElementsKind( + current_map, next_kind, INSERT_TRANSITION); } - maps->set(next_kind, new_map); + maps->set(next_kind, *new_map); current_map = new_map; } - native_context->set_js_array_maps(maps); + native_context->set_js_array_maps(*maps); return initial_map; } -Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context, - Handle<Map> initial_map) { - CALL_HEAP_FUNCTION(native_context->GetIsolate(), - CacheInitialJSArrayMaps(*native_context, *initial_map), - Object); -} - - void JSFunction::SetInstancePrototype(Handle<JSFunction> function, Handle<Object> value) { - ASSERT(value->IsJSReceiver()); + Isolate* isolate = function->GetIsolate(); - // First some logic for the map of the prototype to make sure it is in fast - // mode. - if (value->IsJSObject()) { - JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value)); - } + DCHECK(value->IsJSReceiver()); // Now some logic for the maps of the objects that are created by using this // function as a constructor. @@ -9716,35 +9889,49 @@ // copy containing the new prototype. Also complete any in-object // slack tracking that is in progress at this point because it is // still tracking the old copy. - if (function->shared()->IsInobjectSlackTrackingInProgress()) { - function->shared()->CompleteInobjectSlackTracking(); + if (function->IsInobjectSlackTrackingInProgress()) { + function->CompleteInobjectSlackTracking(); } - Handle<Map> new_map = Map::Copy(handle(function->initial_map())); - new_map->set_prototype(*value); - // If the function is used as the global Array function, cache the - // initial map (and transitioned versions) in the native context. - Context* native_context = function->context()->native_context(); - Object* array_function = native_context->get(Context::ARRAY_FUNCTION_INDEX); - if (array_function->IsJSFunction() && - *function == JSFunction::cast(array_function)) { - CacheInitialJSArrayMaps(handle(native_context), new_map); + Handle<Map> initial_map(function->initial_map(), isolate); + + if (!initial_map->GetIsolate()->bootstrapper()->IsActive() && + initial_map->instance_type() == JS_OBJECT_TYPE) { + // Put the value in the initial map field until an initial map is needed. + // At that point, a new initial map is created and the prototype is put + // into the initial map where it belongs. + function->set_prototype_or_initial_map(*value); + } else { + Handle<Map> new_map = Map::Copy(initial_map); + JSFunction::SetInitialMap(function, new_map, value); + + // If the function is used as the global Array function, cache the + // initial map (and transitioned versions) in the native context. + Context* native_context = function->context()->native_context(); + Object* array_function = + native_context->get(Context::ARRAY_FUNCTION_INDEX); + if (array_function->IsJSFunction() && + *function == JSFunction::cast(array_function)) { + CacheInitialJSArrayMaps(handle(native_context, isolate), new_map); + } } - function->set_initial_map(*new_map); + // Deoptimize all code that embeds the previous initial map. + initial_map->dependent_code()->DeoptimizeDependentCodeGroup( + isolate, DependentCode::kInitialMapChangedGroup); } else { // Put the value in the initial map field until an initial map is // needed. At that point, a new initial map is created and the // prototype is put into the initial map where it belongs. function->set_prototype_or_initial_map(*value); } - function->GetHeap()->ClearInstanceofCache(); + isolate->heap()->ClearInstanceofCache(); } void JSFunction::SetPrototype(Handle<JSFunction> function, Handle<Object> value) { - ASSERT(function->should_have_prototype()); + DCHECK(function->should_have_prototype()); Handle<Object> construct_prototype = value; // If the value is not a JSReceiver, store the value in the map's @@ -9772,20 +9959,37 @@ } -void JSFunction::RemovePrototype() { +bool JSFunction::RemovePrototype() { Context* native_context = context()->native_context(); Map* no_prototype_map = shared()->strict_mode() == SLOPPY ? native_context->sloppy_function_without_prototype_map() : native_context->strict_function_without_prototype_map(); - if (map() == no_prototype_map) return; + if (map() == no_prototype_map) return true; - ASSERT(map() == (shared()->strict_mode() == SLOPPY +#ifdef DEBUG + if (map() != (shared()->strict_mode() == SLOPPY ? native_context->sloppy_function_map() - : native_context->strict_function_map())); + : native_context->strict_function_map())) { + return false; + } +#endif set_map(no_prototype_map); set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value()); + return true; +} + + +void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map, + Handle<Object> prototype) { + if (prototype->IsJSObject()) { + Handle<JSObject> js_proto = Handle<JSObject>::cast(prototype); + JSObject::OptimizeAsPrototype(js_proto, FAST_PROTOTYPE); + } + map->set_prototype(*prototype); + function->set_prototype_or_initial_map(*map); + map->set_constructor(*function); } @@ -9818,16 +10022,14 @@ } map->set_inobject_properties(in_object_properties); map->set_unused_property_fields(in_object_properties); - map->set_prototype(*prototype); - ASSERT(map->has_fast_object_elements()); + DCHECK(map->has_fast_object_elements()); + + // Finally link initial map and constructor function. + JSFunction::SetInitialMap(function, map, Handle<JSReceiver>::cast(prototype)); if (!function->shared()->is_generator()) { - function->shared()->StartInobjectSlackTracking(*map); + function->StartInobjectSlackTracking(); } - - // Finally link initial map and constructor function. - function->set_initial_map(*map); - map->set_constructor(*function); } @@ -9854,6 +10056,7 @@ // "" only the top-level function // "name" only the function "name" // "name*" only functions starting with "name" +// "~" none; the tilde is not an identifier bool JSFunction::PassesFilter(const char* raw_filter) { if (*raw_filter == '*') return true; String* name = shared()->DebugName(); @@ -9883,20 +10086,176 @@ } -MaybeObject* Oddball::Initialize(Heap* heap, - const char* to_string, - Object* to_number, - byte kind) { - String* internalized_to_string; - { MaybeObject* maybe_string = - heap->InternalizeUtf8String( - CStrVector(to_string)); - if (!maybe_string->To(&internalized_to_string)) return maybe_string; - } - set_to_string(internalized_to_string); - set_to_number(to_number); - set_kind(kind); - return this; +void Oddball::Initialize(Isolate* isolate, + Handle<Oddball> oddball, + const char* to_string, + Handle<Object> to_number, + byte kind) { + Handle<String> internalized_to_string = + isolate->factory()->InternalizeUtf8String(to_string); + oddball->set_to_string(*internalized_to_string); + oddball->set_to_number(*to_number); + oddball->set_kind(kind); +} + + +void Script::InitLineEnds(Handle<Script> script) { + if (!script->line_ends()->IsUndefined()) return; + + Isolate* isolate = script->GetIsolate(); + + if (!script->source()->IsString()) { + DCHECK(script->source()->IsUndefined()); + Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0); + script->set_line_ends(*empty); + DCHECK(script->line_ends()->IsFixedArray()); + return; + } + + Handle<String> src(String::cast(script->source()), isolate); + + Handle<FixedArray> array = String::CalculateLineEnds(src, true); + + if (*array != isolate->heap()->empty_fixed_array()) { + array->set_map(isolate->heap()->fixed_cow_array_map()); + } + + script->set_line_ends(*array); + DCHECK(script->line_ends()->IsFixedArray()); +} + + +int Script::GetColumnNumber(Handle<Script> script, int code_pos) { + int line_number = GetLineNumber(script, code_pos); + if (line_number == -1) return -1; + + DisallowHeapAllocation no_allocation; + FixedArray* line_ends_array = FixedArray::cast(script->line_ends()); + line_number = line_number - script->line_offset()->value(); + if (line_number == 0) return code_pos + script->column_offset()->value(); + int prev_line_end_pos = + Smi::cast(line_ends_array->get(line_number - 1))->value(); + return code_pos - (prev_line_end_pos + 1); +} + + +int Script::GetLineNumberWithArray(int code_pos) { + DisallowHeapAllocation no_allocation; + DCHECK(line_ends()->IsFixedArray()); + FixedArray* line_ends_array = FixedArray::cast(line_ends()); + int line_ends_len = line_ends_array->length(); + if (line_ends_len == 0) return -1; + + if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos) { + return line_offset()->value(); + } + + int left = 0; + int right = line_ends_len; + while (int half = (right - left) / 2) { + if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) { + right -= half; + } else { + left += half; + } + } + return right + line_offset()->value(); +} + + +int Script::GetLineNumber(Handle<Script> script, int code_pos) { + InitLineEnds(script); + return script->GetLineNumberWithArray(code_pos); +} + + +int Script::GetLineNumber(int code_pos) { + DisallowHeapAllocation no_allocation; + if (!line_ends()->IsUndefined()) return GetLineNumberWithArray(code_pos); + + // Slow mode: we do not have line_ends. We have to iterate through source. + if (!source()->IsString()) return -1; + + String* source_string = String::cast(source()); + int line = 0; + int len = source_string->length(); + for (int pos = 0; pos < len; pos++) { + if (pos == code_pos) break; + if (source_string->Get(pos) == '\n') line++; + } + return line; +} + + +Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) { + Isolate* isolate = script->GetIsolate(); + Handle<String> name_or_source_url_key = + isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("nameOrSourceURL")); + Handle<JSObject> script_wrapper = Script::GetWrapper(script); + Handle<Object> property = Object::GetProperty( + script_wrapper, name_or_source_url_key).ToHandleChecked(); + DCHECK(property->IsJSFunction()); + Handle<JSFunction> method = Handle<JSFunction>::cast(property); + Handle<Object> result; + // Do not check against pending exception, since this function may be called + // when an exception has already been pending. + if (!Execution::TryCall(method, script_wrapper, 0, NULL).ToHandle(&result)) { + return isolate->factory()->undefined_value(); + } + return result; +} + + +// Wrappers for scripts are kept alive and cached in weak global +// handles referred from foreign objects held by the scripts as long as +// they are used. When they are not used anymore, the garbage +// collector will call the weak callback on the global handle +// associated with the wrapper and get rid of both the wrapper and the +// handle. +static void ClearWrapperCacheWeakCallback( + const v8::WeakCallbackData<v8::Value, void>& data) { + Object** location = reinterpret_cast<Object**>(data.GetParameter()); + JSValue* wrapper = JSValue::cast(*location); + Script::cast(wrapper->value())->ClearWrapperCache(); +} + + +void Script::ClearWrapperCache() { + Foreign* foreign = wrapper(); + Object** location = reinterpret_cast<Object**>(foreign->foreign_address()); + DCHECK_EQ(foreign->foreign_address(), reinterpret_cast<Address>(location)); + foreign->set_foreign_address(0); + GlobalHandles::Destroy(location); + GetIsolate()->counters()->script_wrappers()->Decrement(); +} + + +Handle<JSObject> Script::GetWrapper(Handle<Script> script) { + if (script->wrapper()->foreign_address() != NULL) { + // Return a handle for the existing script wrapper from the cache. + return Handle<JSValue>( + *reinterpret_cast<JSValue**>(script->wrapper()->foreign_address())); + } + Isolate* isolate = script->GetIsolate(); + // Construct a new script wrapper. + isolate->counters()->script_wrappers()->Increment(); + Handle<JSFunction> constructor = isolate->script_function(); + Handle<JSValue> result = + Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor)); + + result->set_value(*script); + + // Create a new weak global handle and use it to cache the wrapper + // for future use. The cache will automatically be cleared by the + // garbage collector when it is not used anymore. + Handle<Object> handle = isolate->global_handles()->Create(*result); + GlobalHandles::MakeWeak(handle.location(), + reinterpret_cast<void*>(handle.location()), + &ClearWrapperCacheWeakCallback); + script->wrapper()->set_foreign_address( + reinterpret_cast<Address>(handle.location())); + return result; } @@ -9907,7 +10266,7 @@ } -bool SharedFunctionInfo::HasSourceCode() { +bool SharedFunctionInfo::HasSourceCode() const { return !script()->IsUndefined() && !reinterpret_cast<Script*>(script())->source()->IsUndefined(); } @@ -9952,43 +10311,36 @@ } -// Support function for printing the source code to a StringStream -// without any allocation in the heap. -void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator, - int max_length) { +// Output the source code without any allocation in the heap. +OStream& operator<<(OStream& os, const SourceCodeOf& v) { + const SharedFunctionInfo* s = v.value; // For some native functions there is no source. - if (!HasSourceCode()) { - accumulator->Add("<No Source>"); - return; - } + if (!s->HasSourceCode()) return os << "<No Source>"; // Get the source for the script which this function came from. // Don't use String::cast because we don't want more assertion errors while // we are already creating a stack dump. String* script_source = - reinterpret_cast<String*>(Script::cast(script())->source()); + reinterpret_cast<String*>(Script::cast(s->script())->source()); - if (!script_source->LooksValid()) { - accumulator->Add("<Invalid Source>"); - return; - } + if (!script_source->LooksValid()) return os << "<Invalid Source>"; - if (!is_toplevel()) { - accumulator->Add("function "); - Object* name = this->name(); + if (!s->is_toplevel()) { + os << "function "; + Object* name = s->name(); if (name->IsString() && String::cast(name)->length() > 0) { - accumulator->PrintName(name); + String::cast(name)->PrintUC16(os); } } - int len = end_position() - start_position(); - if (len <= max_length || max_length < 0) { - accumulator->Put(script_source, start_position(), end_position()); + int len = s->end_position() - s->start_position(); + if (len <= v.max_length || v.max_length < 0) { + script_source->PrintUC16(os, s->start_position(), s->end_position()); + return os; } else { - accumulator->Put(script_source, - start_position(), - start_position() + max_length); - accumulator->Add("...\n"); + script_source->PrintUC16(os, s->start_position(), + s->start_position() + v.max_length); + return os << "...\n"; } } @@ -10007,7 +10359,7 @@ void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) { - ASSERT(!has_deoptimization_support()); + DCHECK(!has_deoptimization_support()); DisallowHeapAllocation no_allocation; Code* code = this->code(); if (IsCodeEquivalent(code, recompiled)) { @@ -10021,7 +10373,7 @@ // effectively resetting all IC state. ReplaceCode(recompiled); } - ASSERT(has_deoptimization_support()); + DCHECK(has_deoptimization_support()); } @@ -10037,13 +10389,11 @@ set_bailout_reason(reason); // Code should be the lazy compilation stub or else unoptimized. If the // latter, disable optimization for the code too. - ASSERT(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN); + DCHECK(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN); if (code()->kind() == Code::FUNCTION) { code()->set_optimizable(false); } - PROFILE(GetIsolate(), - LogExistingFunction(Handle<SharedFunctionInfo>(this), - Handle<Code>(code()))); + PROFILE(GetIsolate(), CodeDisableOptEvent(code(), this)); if (FLAG_trace_opt) { PrintF("[disabled optimization for "); ShortPrint(); @@ -10053,85 +10403,41 @@ bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) { - ASSERT(!id.IsNone()); + DCHECK(!id.IsNone()); Code* unoptimized = code(); DeoptimizationOutputData* data = DeoptimizationOutputData::cast(unoptimized->deoptimization_data()); unsigned ignore = Deoptimizer::GetOutputInfo(data, id, this); USE(ignore); - return true; // Return true if there was no ASSERT. + return true; // Return true if there was no DCHECK. } -void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) { - ASSERT(!IsInobjectSlackTrackingInProgress()); +void JSFunction::StartInobjectSlackTracking() { + DCHECK(has_initial_map() && !IsInobjectSlackTrackingInProgress()); if (!FLAG_clever_optimizations) return; + Map* map = initial_map(); // Only initiate the tracking the first time. - if (live_objects_may_exist()) return; - set_live_objects_may_exist(true); + if (map->done_inobject_slack_tracking()) return; + map->set_done_inobject_slack_tracking(true); // No tracking during the snapshot construction phase. - if (Serializer::enabled()) return; + Isolate* isolate = GetIsolate(); + if (isolate->serializer_enabled()) return; if (map->unused_property_fields() == 0) return; - // Nonzero counter is a leftover from the previous attempt interrupted - // by GC, keep it. - if (construction_count() == 0) { - set_construction_count(kGenerousAllocationCount); - } - set_initial_map(map); - Builtins* builtins = map->GetHeap()->isolate()->builtins(); - ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric), - construct_stub()); - set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown)); -} - - -// Called from GC, hence reinterpret_cast and unchecked accessors. -void SharedFunctionInfo::DetachInitialMap() { - Map* map = reinterpret_cast<Map*>(initial_map()); - - // Make the map remember to restore the link if it survives the GC. - map->set_bit_field2( - map->bit_field2() | (1 << Map::kAttachedToSharedFunctionInfo)); - - // Undo state changes made by StartInobjectTracking (except the - // construction_count). This way if the initial map does not survive the GC - // then StartInobjectTracking will be called again the next time the - // constructor is called. The countdown will continue and (possibly after - // several more GCs) CompleteInobjectSlackTracking will eventually be called. - Heap* heap = map->GetHeap(); - set_initial_map(heap->undefined_value()); - Builtins* builtins = heap->isolate()->builtins(); - ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown), - *RawField(this, kConstructStubOffset)); - set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric)); - // It is safe to clear the flag: it will be set again if the map is live. - set_live_objects_may_exist(false); -} - - -// Called from GC, hence reinterpret_cast and unchecked accessors. -void SharedFunctionInfo::AttachInitialMap(Map* map) { - map->set_bit_field2( - map->bit_field2() & ~(1 << Map::kAttachedToSharedFunctionInfo)); - - // Resume inobject slack tracking. - set_initial_map(map); - Builtins* builtins = map->GetHeap()->isolate()->builtins(); - ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric), - *RawField(this, kConstructStubOffset)); - set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown)); - // The map survived the gc, so there may be objects referencing it. - set_live_objects_may_exist(true); + map->set_construction_count(kGenerousAllocationCount); } void SharedFunctionInfo::ResetForNewContext(int new_ic_age) { code()->ClearInlineCaches(); + // If we clear ICs, we need to clear the type feedback vector too, since + // CallICs are synced with a feedback vector slot. + ClearTypeFeedbackInfo(); set_ic_age(new_ic_age); if (code()->kind() == Code::FUNCTION) { code()->set_profiler_ticks(0); @@ -10166,33 +10472,26 @@ } -void SharedFunctionInfo::CompleteInobjectSlackTracking() { - ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress()); - Map* map = Map::cast(initial_map()); - - Heap* heap = map->GetHeap(); - set_initial_map(heap->undefined_value()); - Builtins* builtins = heap->isolate()->builtins(); - ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown), - construct_stub()); - set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric)); +void JSFunction::CompleteInobjectSlackTracking() { + DCHECK(has_initial_map()); + Map* map = initial_map(); + + DCHECK(map->done_inobject_slack_tracking()); + map->set_construction_count(kNoSlackTracking); int slack = map->unused_property_fields(); map->TraverseTransitionTree(&GetMinInobjectSlack, &slack); if (slack != 0) { // Resize the initial map and all maps in its transition tree. map->TraverseTransitionTree(&ShrinkInstanceSize, &slack); - - // Give the correct expected_nof_properties to initial maps created later. - ASSERT(expected_nof_properties() >= slack); - set_expected_nof_properties(expected_nof_properties() - slack); } } int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context, BailoutId osr_ast_id) { - ASSERT(native_context->IsNativeContext()); + DisallowHeapAllocation no_gc; + DCHECK(native_context->IsNativeContext()); if (!FLAG_cache_optimized_code) return -1; Object* value = optimized_code_map(); if (!value->IsSmi()) { @@ -10232,7 +10531,7 @@ void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); + DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); Object* old_target = target; VisitPointer(&target); @@ -10241,7 +10540,7 @@ void ObjectVisitor::VisitCodeAgeSequence(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); + DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); Object* stub = rinfo->code_age_stub(); if (stub) { VisitPointer(&stub); @@ -10260,7 +10559,7 @@ void ObjectVisitor::VisitCell(RelocInfo* rinfo) { - ASSERT(rinfo->rmode() == RelocInfo::CELL); + DCHECK(rinfo->rmode() == RelocInfo::CELL); Object* cell = rinfo->target_cell(); Object* old_cell = cell; VisitPointer(&cell); @@ -10271,7 +10570,7 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) { - ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && + DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && rinfo->IsPatchedDebugBreakSlotSequence())); @@ -10283,7 +10582,7 @@ void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) { - ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); + DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); Object* p = rinfo->target_object(); VisitPointer(&p); } @@ -10296,6 +10595,7 @@ void Code::InvalidateRelocation() { + InvalidateEmbeddedObjects(); set_relocation_info(GetHeap()->empty_byte_array()); } @@ -10318,14 +10618,14 @@ void Code::Relocate(intptr_t delta) { for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) { - it.rinfo()->apply(delta); + it.rinfo()->apply(delta, SKIP_ICACHE_FLUSH); } - CPU::FlushICache(instruction_start(), instruction_size()); + CpuFeatures::FlushICache(instruction_start(), instruction_size()); } void Code::CopyFrom(const CodeDesc& desc) { - ASSERT(Marking::Color(this) == Marking::WHITE_OBJECT); + DCHECK(Marking::Color(this) == Marking::WHITE_OBJECT); // copy code CopyBytes(instruction_start(), desc.buffer, @@ -10350,29 +10650,31 @@ RelocInfo::Mode mode = it.rinfo()->rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { Handle<Object> p = it.rinfo()->target_object_handle(origin); - it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER); + it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH); } else if (mode == RelocInfo::CELL) { Handle<Cell> cell = it.rinfo()->target_cell_handle(); - it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER); + it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH); } else if (RelocInfo::IsCodeTarget(mode)) { // rewrite code handles in inline cache targets to direct // pointers to the first instruction in the code object Handle<Object> p = it.rinfo()->target_object_handle(origin); Code* code = Code::cast(*p); it.rinfo()->set_target_address(code->instruction_start(), - SKIP_WRITE_BARRIER); + SKIP_WRITE_BARRIER, + SKIP_ICACHE_FLUSH); } else if (RelocInfo::IsRuntimeEntry(mode)) { Address p = it.rinfo()->target_runtime_entry(origin); - it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER); + it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER, + SKIP_ICACHE_FLUSH); } else if (mode == RelocInfo::CODE_AGE_SEQUENCE) { Handle<Object> p = it.rinfo()->code_age_stub_handle(origin); Code* code = Code::cast(*p); - it.rinfo()->set_code_age_stub(code); + it.rinfo()->set_code_age_stub(code, SKIP_ICACHE_FLUSH); } else { - it.rinfo()->apply(delta); + it.rinfo()->apply(delta, SKIP_ICACHE_FLUSH); } } - CPU::FlushICache(instruction_start(), instruction_size()); + CpuFeatures::FlushICache(instruction_start(), instruction_size()); } @@ -10434,12 +10736,31 @@ SafepointEntry Code::GetSafepointEntry(Address pc) { SafepointTable table(this); - return table.FindEntry(pc); + SafepointEntry entry = table.FindEntry(pc); + if (entry.is_valid() || !is_turbofanned()) { + return entry; + } + + // If the code is turbofanned, we might be looking for + // an address that was patched by lazy deoptimization. + // In that case look through the patch table, try to + // lookup the original address there, and then use this + // to find the safepoint entry. + DeoptimizationInputData* deopt_data = + DeoptimizationInputData::cast(deoptimization_data()); + intptr_t offset = pc - instruction_start(); + for (int i = 0; i < deopt_data->ReturnAddressPatchCount(); i++) { + if (deopt_data->PatchedAddressPc(i)->value() == offset) { + int original_offset = deopt_data->ReturnAddressPc(i)->value(); + return table.FindEntry(instruction_start() + original_offset); + } + } + return SafepointEntry(); } Object* Code::FindNthObject(int n, Map* match_map) { - ASSERT(is_inline_cache_stub()); + DCHECK(is_inline_cache_stub()); DisallowHeapAllocation no_allocation; int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); for (RelocIterator it(this, mask); !it.done(); it.next()) { @@ -10468,7 +10789,7 @@ void Code::FindAndReplace(const FindAndReplacePattern& pattern) { - ASSERT(is_inline_cache_stub() || is_handler()); + DCHECK(is_inline_cache_stub() || is_handler()); DisallowHeapAllocation no_allocation; int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); STATIC_ASSERT(FindAndReplacePattern::kMaxCount < 32); @@ -10489,7 +10810,7 @@ void Code::FindAllMaps(MapHandleList* maps) { - ASSERT(is_inline_cache_stub()); + DCHECK(is_inline_cache_stub()); DisallowHeapAllocation no_allocation; int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); for (RelocIterator it(this, mask); !it.done(); it.next()) { @@ -10500,23 +10821,8 @@ } -void Code::FindAllTypes(TypeHandleList* types) { - ASSERT(is_inline_cache_stub()); - DisallowHeapAllocation no_allocation; - int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); - for (RelocIterator it(this, mask); !it.done(); it.next()) { - RelocInfo* info = it.rinfo(); - Object* object = info->target_object(); - if (object->IsMap()) { - Handle<Map> map(Map::cast(object)); - types->Add(IC::MapToType<HeapType>(map, map->GetIsolate())); - } - } -} - - Code* Code::FindFirstHandler() { - ASSERT(is_inline_cache_stub()); + DCHECK(is_inline_cache_stub()); DisallowHeapAllocation no_allocation; int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET); for (RelocIterator it(this, mask); !it.done(); it.next()) { @@ -10529,7 +10835,7 @@ bool Code::FindHandlers(CodeHandleList* code_list, int length) { - ASSERT(is_inline_cache_stub()); + DCHECK(is_inline_cache_stub()); DisallowHeapAllocation no_allocation; int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET); int i = 0; @@ -10547,8 +10853,28 @@ } +MaybeHandle<Code> Code::FindHandlerForMap(Map* map) { + DCHECK(is_inline_cache_stub()); + int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | + RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + bool return_next = false; + for (RelocIterator it(this, mask); !it.done(); it.next()) { + RelocInfo* info = it.rinfo(); + if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) { + Object* object = info->target_object(); + if (object == map) return_next = true; + } else if (return_next) { + Code* code = Code::GetCodeFromTargetAddress(info->target_address()); + DCHECK(code->kind() == Code::HANDLER); + return handle(code); + } + } + return MaybeHandle<Code>(); +} + + Name* Code::FindFirstName() { - ASSERT(is_inline_cache_stub()); + DCHECK(is_inline_cache_stub()); DisallowHeapAllocation no_allocation; int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); for (RelocIterator it(this, mask); !it.done(); it.next()) { @@ -10587,18 +10913,25 @@ } -void Code::ClearTypeFeedbackInfo(Heap* heap) { - if (kind() != FUNCTION) return; - Object* raw_info = type_feedback_info(); - if (raw_info->IsTypeFeedbackInfo()) { - FixedArray* feedback_vector = - TypeFeedbackInfo::cast(raw_info)->feedback_vector(); - for (int i = 0; i < feedback_vector->length(); i++) { - Object* obj = feedback_vector->get(i); - if (!obj->IsAllocationSite()) { - // TODO(mvstanton): Can't I avoid a write barrier for this sentinel? - feedback_vector->set(i, - TypeFeedbackInfo::RawUninitializedSentinel(heap)); +void SharedFunctionInfo::ClearTypeFeedbackInfo() { + FixedArray* vector = feedback_vector(); + Heap* heap = GetHeap(); + int length = vector->length(); + + for (int i = 0; i < length; i++) { + Object* obj = vector->get(i); + if (obj->IsHeapObject()) { + InstanceType instance_type = + HeapObject::cast(obj)->map()->instance_type(); + switch (instance_type) { + case ALLOCATION_SITE_TYPE: + // AllocationSites are not cleared because they do not store + // information that leaks. + break; + // Fall through... + default: + vector->set(i, TypeFeedbackInfo::RawUninitializedSentinel(heap), + SKIP_WRITE_BARRIER); } } } @@ -10607,7 +10940,7 @@ BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) { DisallowHeapAllocation no_gc; - ASSERT(kind() == FUNCTION); + DCHECK(kind() == FUNCTION); BackEdgeTable back_edges(this, &no_gc); for (uint32_t i = 0; i < back_edges.length(); i++) { if (back_edges.pc_offset(i) == pc_offset) return back_edges.ast_id(i); @@ -10618,7 +10951,7 @@ uint32_t Code::TranslateAstIdToPcOffset(BailoutId ast_id) { DisallowHeapAllocation no_gc; - ASSERT(kind() == FUNCTION); + DCHECK(kind() == FUNCTION); BackEdgeTable back_edges(this, &no_gc); for (uint32_t i = 0; i < back_edges.length(); i++) { if (back_edges.ast_id(i) == ast_id) return back_edges.pc_offset(i); @@ -10656,10 +10989,11 @@ if (sequence != NULL) { Age age; MarkingParity code_parity; - GetCodeAgeAndParity(sequence, &age, &code_parity); + Isolate* isolate = GetIsolate(); + GetCodeAgeAndParity(isolate, sequence, &age, &code_parity); age = EffectiveAge(age); if (age != kLastCodeAge && code_parity != current_parity) { - PatchPlatformCodeAge(GetIsolate(), + PatchPlatformCodeAge(isolate, sequence, static_cast<Age>(age + 1), current_parity); @@ -10695,7 +11029,7 @@ } Age age; MarkingParity parity; - GetCodeAgeAndParity(sequence, &age, &parity); + GetCodeAgeAndParity(GetIsolate(), sequence, &age, &parity); return age; } @@ -10749,11 +11083,11 @@ CODE_AGE_LIST(HANDLE_CODE_AGE) #undef HANDLE_CODE_AGE case kNotExecutedCodeAge: { - ASSERT(parity == NO_MARKING_PARITY); + DCHECK(parity == NO_MARKING_PARITY); return *builtins->MarkCodeAsExecutedOnce(); } case kExecutedOnceCodeAge: { - ASSERT(parity == NO_MARKING_PARITY); + DCHECK(parity == NO_MARKING_PARITY); return *builtins->MarkCodeAsExecutedTwice(); } default: @@ -10776,7 +11110,9 @@ if ((bailout_id == Deoptimizer::GetDeoptimizationId( GetIsolate(), info->target_address(), Deoptimizer::EAGER)) || (bailout_id == Deoptimizer::GetDeoptimizationId( - GetIsolate(), info->target_address(), Deoptimizer::SOFT))) { + GetIsolate(), info->target_address(), Deoptimizer::SOFT)) || + (bailout_id == Deoptimizer::GetDeoptimizationId( + GetIsolate(), info->target_address(), Deoptimizer::LAZY))) { CHECK(RelocInfo::IsRuntimeEntry(info->rmode())); PrintF(out, " %s\n", last_comment); return; @@ -10814,23 +11150,25 @@ #ifdef ENABLE_DISASSEMBLER -void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) { +void DeoptimizationInputData::DeoptimizationInputDataPrint( + OStream& os) { // NOLINT disasm::NameConverter converter; int deopt_count = DeoptCount(); - PrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count); - if (0 == deopt_count) return; - - PrintF(out, "%6s %6s %6s %6s %12s\n", "index", "ast id", "argc", "pc", - FLAG_print_code_verbose ? "commands" : ""); + os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n"; + if (0 != deopt_count) { + os << " index ast id argc pc"; + if (FLAG_print_code_verbose) os << " commands"; + os << "\n"; + } for (int i = 0; i < deopt_count; i++) { - PrintF(out, "%6d %6d %6d %6d", - i, - AstId(i).ToInt(), - ArgumentsStackHeight(i)->value(), - Pc(i)->value()); + // TODO(svenpanne) Add some basic formatting to our streams. + Vector<char> buf1 = Vector<char>::New(128); + SNPrintF(buf1, "%6d %6d %6d %6d", i, AstId(i).ToInt(), + ArgumentsStackHeight(i)->value(), Pc(i)->value()); + os << buf1.start(); if (!FLAG_print_code_verbose) { - PrintF(out, "\n"); + os << "\n"; continue; } // Print details of the frame translation. @@ -10838,18 +11176,19 @@ TranslationIterator iterator(TranslationByteArray(), translation_index); Translation::Opcode opcode = static_cast<Translation::Opcode>(iterator.Next()); - ASSERT(Translation::BEGIN == opcode); + DCHECK(Translation::BEGIN == opcode); int frame_count = iterator.Next(); int jsframe_count = iterator.Next(); - PrintF(out, " %s {frame count=%d, js frame count=%d}\n", - Translation::StringFor(opcode), - frame_count, - jsframe_count); + os << " " << Translation::StringFor(opcode) + << " {frame count=" << frame_count + << ", js frame count=" << jsframe_count << "}\n"; while (iterator.HasNext() && Translation::BEGIN != (opcode = static_cast<Translation::Opcode>(iterator.Next()))) { - PrintF(out, "%24s %s ", "", Translation::StringFor(opcode)); + Vector<char> buf2 = Vector<char>::New(128); + SNPrintF(buf2, "%27s %s ", "", Translation::StringFor(opcode)); + os << buf2.start(); switch (opcode) { case Translation::BEGIN: @@ -10860,20 +11199,20 @@ int ast_id = iterator.Next(); int function_id = iterator.Next(); unsigned height = iterator.Next(); - PrintF(out, "{ast_id=%d, function=", ast_id); + os << "{ast_id=" << ast_id << ", function="; if (function_id != Translation::kSelfLiteralId) { Object* function = LiteralArray()->get(function_id); - JSFunction::cast(function)->PrintName(out); + os << Brief(JSFunction::cast(function)->shared()->DebugName()); } else { - PrintF(out, "<self>"); + os << "<self>"; } - PrintF(out, ", height=%u}", height); + os << ", height=" << height << "}"; break; } case Translation::COMPILED_STUB_FRAME: { Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next()); - PrintF(out, "{kind=%d}", stub_kind); + os << "{kind=" << stub_kind << "}"; break; } @@ -10883,9 +11222,8 @@ JSFunction* function = JSFunction::cast(LiteralArray()->get(function_id)); unsigned height = iterator.Next(); - PrintF(out, "{function="); - function->PrintName(out); - PrintF(out, ", height=%u}", height); + os << "{function=" << Brief(function->shared()->DebugName()) + << ", height=" << height << "}"; break; } @@ -10894,100 +11232,114 @@ int function_id = iterator.Next(); JSFunction* function = JSFunction::cast(LiteralArray()->get(function_id)); - PrintF(out, "{function="); - function->PrintName(out); - PrintF(out, "}"); + os << "{function=" << Brief(function->shared()->DebugName()) << "}"; break; } case Translation::REGISTER: { int reg_code = iterator.Next(); - PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code)); + os << "{input=" << converter.NameOfCPURegister(reg_code) << "}"; break; } case Translation::INT32_REGISTER: { int reg_code = iterator.Next(); - PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code)); + os << "{input=" << converter.NameOfCPURegister(reg_code) << "}"; break; } case Translation::UINT32_REGISTER: { int reg_code = iterator.Next(); - PrintF(out, "{input=%s (unsigned)}", - converter.NameOfCPURegister(reg_code)); + os << "{input=" << converter.NameOfCPURegister(reg_code) + << " (unsigned)}"; break; } case Translation::DOUBLE_REGISTER: { int reg_code = iterator.Next(); - PrintF(out, "{input=%s}", - DoubleRegister::AllocationIndexToString(reg_code)); + os << "{input=" << DoubleRegister::AllocationIndexToString(reg_code) + << "}"; break; } case Translation::STACK_SLOT: { int input_slot_index = iterator.Next(); - PrintF(out, "{input=%d}", input_slot_index); + os << "{input=" << input_slot_index << "}"; break; } case Translation::INT32_STACK_SLOT: { int input_slot_index = iterator.Next(); - PrintF(out, "{input=%d}", input_slot_index); + os << "{input=" << input_slot_index << "}"; break; } case Translation::UINT32_STACK_SLOT: { int input_slot_index = iterator.Next(); - PrintF(out, "{input=%d (unsigned)}", input_slot_index); + os << "{input=" << input_slot_index << " (unsigned)}"; break; } case Translation::DOUBLE_STACK_SLOT: { int input_slot_index = iterator.Next(); - PrintF(out, "{input=%d}", input_slot_index); + os << "{input=" << input_slot_index << "}"; break; } case Translation::LITERAL: { unsigned literal_index = iterator.Next(); - PrintF(out, "{literal_id=%u}", literal_index); + os << "{literal_id=" << literal_index << "}"; break; } case Translation::DUPLICATED_OBJECT: { int object_index = iterator.Next(); - PrintF(out, "{object_index=%d}", object_index); + os << "{object_index=" << object_index << "}"; break; } case Translation::ARGUMENTS_OBJECT: case Translation::CAPTURED_OBJECT: { int args_length = iterator.Next(); - PrintF(out, "{length=%d}", args_length); + os << "{length=" << args_length << "}"; break; } } - PrintF(out, "\n"); + os << "\n"; } } + + int return_address_patch_count = ReturnAddressPatchCount(); + if (return_address_patch_count != 0) { + os << "Return address patch data (count = " << return_address_patch_count + << ")\n"; + os << " index pc patched_pc\n"; + } + for (int i = 0; i < return_address_patch_count; i++) { + Vector<char> buf = Vector<char>::New(128); + SNPrintF(buf, "%6d %6d %12d\n", i, ReturnAddressPc(i)->value(), + PatchedAddressPc(i)->value()); + os << buf.start(); + } } -void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) { - PrintF(out, "Deoptimization Output Data (deopt points = %d)\n", - this->DeoptPoints()); +void DeoptimizationOutputData::DeoptimizationOutputDataPrint( + OStream& os) { // NOLINT + os << "Deoptimization Output Data (deopt points = " << this->DeoptPoints() + << ")\n"; if (this->DeoptPoints() == 0) return; - PrintF(out, "%6s %8s %s\n", "ast id", "pc", "state"); + os << "ast id pc state\n"; for (int i = 0; i < this->DeoptPoints(); i++) { int pc_and_state = this->PcAndState(i)->value(); - PrintF(out, "%6d %8d %s\n", - this->AstId(i).ToInt(), - FullCodeGenerator::PcField::decode(pc_and_state), - FullCodeGenerator::State2String( - FullCodeGenerator::StateField::decode(pc_and_state))); + // TODO(svenpanne) Add some basic formatting to our streams. + Vector<char> buf = Vector<char>::New(100); + SNPrintF(buf, "%6d %8d %s\n", this->AstId(i).ToInt(), + FullCodeGenerator::PcField::decode(pc_and_state), + FullCodeGenerator::State2String( + FullCodeGenerator::StateField::decode(pc_and_state))); + os << buf.start(); } } @@ -10997,11 +11349,14 @@ case UNINITIALIZED: return "UNINITIALIZED"; case PREMONOMORPHIC: return "PREMONOMORPHIC"; case MONOMORPHIC: return "MONOMORPHIC"; - case MONOMORPHIC_PROTOTYPE_FAILURE: return "MONOMORPHIC_PROTOTYPE_FAILURE"; + case PROTOTYPE_FAILURE: + return "PROTOTYPE_FAILURE"; case POLYMORPHIC: return "POLYMORPHIC"; case MEGAMORPHIC: return "MEGAMORPHIC"; case GENERIC: return "GENERIC"; case DEBUG_STUB: return "DEBUG_STUB"; + case DEFAULT: + return "DEFAULT"; } UNREACHABLE(); return NULL; @@ -11018,92 +11373,93 @@ } -void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) { - PrintF(out, "extra_ic_state = "); - const char* name = NULL; - switch (kind) { - case STORE_IC: - case KEYED_STORE_IC: - if (extra == STRICT) name = "STRICT"; - break; - default: - break; - } - if (name != NULL) { - PrintF(out, "%s\n", name); +void Code::PrintExtraICState(OStream& os, // NOLINT + Kind kind, ExtraICState extra) { + os << "extra_ic_state = "; + if ((kind == STORE_IC || kind == KEYED_STORE_IC) && (extra == STRICT)) { + os << "STRICT\n"; } else { - PrintF(out, "%d\n", extra); + os << extra << "\n"; } } -void Code::Disassemble(const char* name, FILE* out) { - PrintF(out, "kind = %s\n", Kind2String(kind())); - if (has_major_key()) { - PrintF(out, "major_key = %s\n", - CodeStub::MajorName(CodeStub::GetMajorKey(this), true)); +void Code::Disassemble(const char* name, OStream& os) { // NOLINT + os << "kind = " << Kind2String(kind()) << "\n"; + if (IsCodeStubOrIC()) { + const char* n = CodeStub::MajorName(CodeStub::GetMajorKey(this), true); + os << "major_key = " << (n == NULL ? "null" : n) << "\n"; } if (is_inline_cache_stub()) { - PrintF(out, "ic_state = %s\n", ICState2String(ic_state())); - PrintExtraICState(out, kind(), extra_ic_state()); + os << "ic_state = " << ICState2String(ic_state()) << "\n"; + PrintExtraICState(os, kind(), extra_ic_state()); if (ic_state() == MONOMORPHIC) { - PrintF(out, "type = %s\n", StubType2String(type())); + os << "type = " << StubType2String(type()) << "\n"; } if (is_compare_ic_stub()) { - ASSERT(major_key() == CodeStub::CompareIC); + DCHECK(CodeStub::GetMajorKey(this) == CodeStub::CompareIC); CompareIC::State left_state, right_state, handler_state; Token::Value op; - ICCompareStub::DecodeMinorKey(stub_info(), &left_state, &right_state, - &handler_state, &op); - PrintF(out, "compare_state = %s*%s -> %s\n", - CompareIC::GetStateName(left_state), - CompareIC::GetStateName(right_state), - CompareIC::GetStateName(handler_state)); - PrintF(out, "compare_operation = %s\n", Token::Name(op)); + ICCompareStub::DecodeKey(stub_key(), &left_state, &right_state, + &handler_state, &op); + os << "compare_state = " << CompareIC::GetStateName(left_state) << "*" + << CompareIC::GetStateName(right_state) << " -> " + << CompareIC::GetStateName(handler_state) << "\n"; + os << "compare_operation = " << Token::Name(op) << "\n"; } } if ((name != NULL) && (name[0] != '\0')) { - PrintF(out, "name = %s\n", name); + os << "name = " << name << "\n"; } if (kind() == OPTIMIZED_FUNCTION) { - PrintF(out, "stack_slots = %d\n", stack_slots()); + os << "stack_slots = " << stack_slots() << "\n"; } - PrintF(out, "Instructions (size = %d)\n", instruction_size()); - Disassembler::Decode(out, this); - PrintF(out, "\n"); + os << "Instructions (size = " << instruction_size() << ")\n"; + // TODO(svenpanne) The Disassembler should use streams, too! + { + CodeTracer::Scope trace_scope(GetIsolate()->GetCodeTracer()); + Disassembler::Decode(trace_scope.file(), this); + } + os << "\n"; if (kind() == FUNCTION) { DeoptimizationOutputData* data = DeoptimizationOutputData::cast(this->deoptimization_data()); - data->DeoptimizationOutputDataPrint(out); + data->DeoptimizationOutputDataPrint(os); } else if (kind() == OPTIMIZED_FUNCTION) { DeoptimizationInputData* data = DeoptimizationInputData::cast(this->deoptimization_data()); - data->DeoptimizationInputDataPrint(out); + data->DeoptimizationInputDataPrint(os); } - PrintF(out, "\n"); + os << "\n"; if (is_crankshafted()) { SafepointTable table(this); - PrintF(out, "Safepoints (size = %u)\n", table.size()); + os << "Safepoints (size = " << table.size() << ")\n"; for (unsigned i = 0; i < table.length(); i++) { unsigned pc_offset = table.GetPcOffset(i); - PrintF(out, "%p %4d ", (instruction_start() + pc_offset), pc_offset); - table.PrintEntry(i, out); - PrintF(out, " (sp -> fp)"); + os << (instruction_start() + pc_offset) << " "; + // TODO(svenpanne) Add some basic formatting to our streams. + Vector<char> buf1 = Vector<char>::New(30); + SNPrintF(buf1, "%4d", pc_offset); + os << buf1.start() << " "; + table.PrintEntry(i, os); + os << " (sp -> fp) "; SafepointEntry entry = table.GetEntry(i); if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) { - PrintF(out, " %6d", entry.deoptimization_index()); + Vector<char> buf2 = Vector<char>::New(30); + SNPrintF(buf2, "%6d", entry.deoptimization_index()); + os << buf2.start(); } else { - PrintF(out, " <none>"); + os << "<none>"; } if (entry.argument_count() > 0) { - PrintF(out, " argc: %d", entry.argument_count()); + os << " argc: " << entry.argument_count(); } - PrintF(out, "\n"); + os << "\n"; } - PrintF(out, "\n"); + os << "\n"; } else if (kind() == FUNCTION) { unsigned offset = back_edge_table_offset(); // If there is no back edge table, the "table start" will be at or after @@ -11112,30 +11468,32 @@ DisallowHeapAllocation no_gc; BackEdgeTable back_edges(this, &no_gc); - PrintF(out, "Back edges (size = %u)\n", back_edges.length()); - PrintF(out, "ast_id pc_offset loop_depth\n"); + os << "Back edges (size = " << back_edges.length() << ")\n"; + os << "ast_id pc_offset loop_depth\n"; for (uint32_t i = 0; i < back_edges.length(); i++) { - PrintF(out, "%6d %9u %10u\n", back_edges.ast_id(i).ToInt(), - back_edges.pc_offset(i), - back_edges.loop_depth(i)); + Vector<char> buf = Vector<char>::New(100); + SNPrintF(buf, "%6d %9u %10u\n", back_edges.ast_id(i).ToInt(), + back_edges.pc_offset(i), back_edges.loop_depth(i)); + os << buf.start(); } - PrintF(out, "\n"); + os << "\n"; } #ifdef OBJECT_PRINT if (!type_feedback_info()->IsUndefined()) { - TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(out); - PrintF(out, "\n"); + OFStream os(stdout); + TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(os); + os << "\n"; } #endif } - PrintF(out, "RelocInfo (size = %d)\n", relocation_size()); + os << "RelocInfo (size = " << relocation_size() << ")\n"; for (RelocIterator it(this); !it.done(); it.next()) { - it.rinfo()->Print(GetIsolate(), out); + it.rinfo()->Print(GetIsolate(), os); } - PrintF(out, "\n"); + os << "\n"; } #endif // ENABLE_DISASSEMBLER @@ -11146,7 +11504,7 @@ int length, SetFastElementsCapacitySmiMode smi_mode) { // We should never end in here with a pixel or external array. - ASSERT(!object->HasExternalArrayElements()); + DCHECK(!object->HasExternalArrayElements()); // Allocate a new fast elements backing store. Handle<FixedArray> new_elements = @@ -11180,8 +11538,8 @@ Handle<Map> new_map = (new_elements_kind != elements_kind) ? GetElementsTransitionMap(object, new_elements_kind) : handle(object->map()); - object->ValidateElements(); - object->set_map_and_elements(*new_map, *new_elements); + JSObject::ValidateElements(object); + JSObject::SetMapAndElements(object, new_map, new_elements); // Transition through the allocation site as well if present. JSObject::UpdateAllocationSite(object, new_elements_kind); @@ -11206,7 +11564,7 @@ int capacity, int length) { // We should never end in here with a pixel or external array. - ASSERT(!object->HasExternalArrayElements()); + DCHECK(!object->HasExternalArrayElements()); Handle<FixedArrayBase> elems = object->GetIsolate()->factory()->NewFixedDoubleArray(capacity); @@ -11226,8 +11584,8 @@ ElementsAccessor* accessor = ElementsAccessor::ForKind(FAST_DOUBLE_ELEMENTS); accessor->CopyElements(object, elems, elements_kind); - object->ValidateElements(); - object->set_map_and_elements(*new_map, *elems); + JSObject::ValidateElements(object); + JSObject::SetMapAndElements(object, new_map, elems); if (FLAG_trace_elements_transitions) { PrintElementsTransition(stdout, object, elements_kind, old_elements, @@ -11242,7 +11600,7 @@ // static void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) { - ASSERT(capacity >= 0); + DCHECK(capacity >= 0); array->GetIsolate()->factory()->NewJSArrayStorage( array, length, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); } @@ -11262,15 +11620,16 @@ uint32_t index, List<Handle<Object> >* old_values, List<uint32_t>* indices) { - PropertyAttributes attributes = - JSReceiver::GetLocalElementAttribute(object, index); - ASSERT(attributes != ABSENT); - if (attributes == DONT_DELETE) return false; + Maybe<PropertyAttributes> maybe = + JSReceiver::GetOwnElementAttribute(object, index); + DCHECK(maybe.has_value); + DCHECK(maybe.value != ABSENT); + if (maybe.value == DONT_DELETE) return false; Handle<Object> value; - if (object->GetLocalElementAccessorPair(index) != NULL) { + if (!JSObject::GetOwnElementAccessorPair(object, index).is_null()) { value = Handle<Object>::cast(isolate->factory()->the_hole_value()); } else { - value = Object::GetElementNoExceptionThrown(isolate, object, index); + value = Object::GetElement(isolate, object, index).ToHandleChecked(); } old_values->Add(value); indices->Add(index); @@ -11290,12 +11649,11 @@ Handle<Object> args[] = { object, index_object, deleted, add_count_object }; - bool threw; Execution::Call(isolate, Handle<JSFunction>(isolate->observers_enqueue_splice()), - isolate->factory()->undefined_value(), ARRAY_SIZE(args), args, - &threw); - ASSERT(!threw); + isolate->factory()->undefined_value(), + ARRAY_SIZE(args), + args).Assert(); } @@ -11304,12 +11662,11 @@ HandleScope scope(isolate); Handle<Object> args[] = { object }; - bool threw; Execution::Call(isolate, Handle<JSFunction>(isolate->observers_begin_perform_splice()), - isolate->factory()->undefined_value(), ARRAY_SIZE(args), args, - &threw); - ASSERT(!threw); + isolate->factory()->undefined_value(), + ARRAY_SIZE(args), + args).Assert(); } @@ -11318,19 +11675,30 @@ HandleScope scope(isolate); Handle<Object> args[] = { object }; - bool threw; Execution::Call(isolate, Handle<JSFunction>(isolate->observers_end_perform_splice()), - isolate->factory()->undefined_value(), ARRAY_SIZE(args), args, - &threw); - ASSERT(!threw); + isolate->factory()->undefined_value(), + ARRAY_SIZE(args), + args).Assert(); } -Handle<Object> JSArray::SetElementsLength(Handle<JSArray> array, - Handle<Object> new_length_handle) { +MaybeHandle<Object> JSArray::SetElementsLength( + Handle<JSArray> array, + Handle<Object> new_length_handle) { + if (array->HasFastElements()) { + // If the new array won't fit in a some non-trivial fraction of the max old + // space size, then force it to go dictionary mode. + int max_fast_array_size = static_cast<int>( + (array->GetHeap()->MaxOldGenerationSize() / kDoubleSize) / 4); + if (new_length_handle->IsNumber() && + NumberToInt32(*new_length_handle) >= max_fast_array_size) { + NormalizeElements(array); + } + } + // We should never end in here with a pixel or external array. - ASSERT(array->AllowsSetElementsLength()); + DCHECK(array->AllowsSetElementsLength()); if (!array->map()->is_observed()) { return array->GetElementsAccessor()->SetLength(array, new_length_handle); } @@ -11345,7 +11713,7 @@ CHECK(new_length_handle->ToArrayIndex(&new_length)); static const PropertyAttributes kNoAttrFilter = NONE; - int num_elements = array->NumberOfLocalElements(kNoAttrFilter); + int num_elements = array->NumberOfOwnElements(kNoAttrFilter); if (num_elements > 0) { if (old_length == static_cast<uint32_t>(num_elements)) { // Simple case for arrays without holes. @@ -11357,7 +11725,7 @@ // TODO(rafaelw): For fast, sparse arrays, we can avoid iterating over // the to-be-removed indices twice. Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements); - array->GetLocalElementKeys(*keys, kNoAttrFilter); + array->GetOwnElementKeys(*keys, kNoAttrFilter); while (num_elements-- > 0) { uint32_t index = NumberToUint32(keys->get(num_elements)); if (index < new_length) break; @@ -11366,9 +11734,11 @@ } } - Handle<Object> hresult = - array->GetElementsAccessor()->SetLength(array, new_length_handle); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, hresult, hresult); + Handle<Object> hresult; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, hresult, + array->GetElementsAccessor()->SetLength(array, new_length_handle), + Object); CHECK(array->length()->ToArrayIndex(&new_length)); if (old_length == new_length) return hresult; @@ -11398,13 +11768,13 @@ // Skip deletions where the property was an accessor, leaving holes // in the array of old values. if (old_values[i]->IsTheHole()) continue; - JSObject::SetElement(deleted, indices[i] - index, old_values[i], NONE, - SLOPPY); + JSObject::SetElement( + deleted, indices[i] - index, old_values[i], NONE, SLOPPY).Assert(); } SetProperty(deleted, isolate->factory()->length_string(), isolate->factory()->NewNumberFromUint(delete_count), - NONE, SLOPPY); + STRICT).Assert(); } EnqueueSpliceRecord(array, index, deleted, add_count); @@ -11434,10 +11804,12 @@ Handle<Map> Map::PutPrototypeTransition(Handle<Map> map, Handle<Object> prototype, Handle<Map> target_map) { - ASSERT(target_map->IsMap()); - ASSERT(HeapObject::cast(*prototype)->map()->IsMap()); - // Don't cache prototype transition if this map is shared. - if (map->is_shared() || !FLAG_cache_prototype_transitions) return map; + DCHECK(target_map->IsMap()); + DCHECK(HeapObject::cast(*prototype)->map()->IsMap()); + // Don't cache prototype transition if this map is either shared, or a map of + // a prototype. + if (map->is_prototype_map()) return map; + if (map->is_dictionary_map() || !FLAG_cache_prototype_transitions) return map; const int step = kProtoTransitionElementsPerEntry; const int header = kProtoTransitionHeaderSize; @@ -11450,13 +11822,9 @@ if (capacity > kMaxCachedPrototypeTransitions) return map; // Grow array by factor 2 over and above what we need. - Factory* factory = map->GetIsolate()->factory(); - cache = factory->CopySizeFixedArray(cache, transitions * 2 * step + header); + cache = FixedArray::CopySize(cache, transitions * 2 * step + header); - CALL_AND_RETRY_OR_DIE(map->GetIsolate(), - map->SetPrototypeTransitions(*cache), - break, - return Handle<Map>()); + SetPrototypeTransitions(map, cache); } // Reload number of transitions as GC might shrink them. @@ -11492,21 +11860,42 @@ } -void Map::AddDependentCompilationInfo(DependentCode::DependencyGroup group, +// static +void Map::AddDependentCompilationInfo(Handle<Map> map, + DependentCode::DependencyGroup group, CompilationInfo* info) { - Handle<DependentCode> dep(dependent_code()); Handle<DependentCode> codes = - DependentCode::Insert(dep, group, info->object_wrapper()); - if (*codes != dependent_code()) set_dependent_code(*codes); - info->dependencies(group)->Add(Handle<HeapObject>(this), info->zone()); + DependentCode::Insert(handle(map->dependent_code(), info->isolate()), + group, info->object_wrapper()); + if (*codes != map->dependent_code()) map->set_dependent_code(*codes); + info->dependencies(group)->Add(map, info->zone()); } -void Map::AddDependentCode(DependentCode::DependencyGroup group, +// static +void Map::AddDependentCode(Handle<Map> map, + DependentCode::DependencyGroup group, Handle<Code> code) { Handle<DependentCode> codes = DependentCode::Insert( - Handle<DependentCode>(dependent_code()), group, code); - if (*codes != dependent_code()) set_dependent_code(*codes); + Handle<DependentCode>(map->dependent_code()), group, code); + if (*codes != map->dependent_code()) map->set_dependent_code(*codes); +} + + +// static +void Map::AddDependentIC(Handle<Map> map, + Handle<Code> stub) { + DCHECK(stub->next_code_link()->IsUndefined()); + int n = map->dependent_code()->number_of_entries(DependentCode::kWeakICGroup); + if (n == 0) { + // Slow path: insert the head of the list with possible heap allocation. + Map::AddDependentCode(map, DependentCode::kWeakICGroup, stub); + } else { + // Fast path: link the stub to the existing head of the list without any + // heap allocation. + DCHECK(n == 1); + map->dependent_code()->AddToDependentICList(stub); + } } @@ -11549,11 +11938,10 @@ if (entries->object_at(i) == *object) return entries; } if (entries->length() < kCodesStartIndex + number_of_entries + 1) { - Factory* factory = entries->GetIsolate()->factory(); int capacity = kCodesStartIndex + number_of_entries + 1; if (capacity > 5) capacity = capacity * 5 / 4; Handle<DependentCode> new_entries = Handle<DependentCode>::cast( - factory->CopySizeFixedArray(entries, capacity, TENURED)); + FixedArray::CopySize(entries, capacity, TENURED)); // The number of codes can change after GC. starts.Recompute(*entries); start = starts.at(group); @@ -11596,7 +11984,7 @@ #ifdef DEBUG for (int i = start; i < end; i++) { - ASSERT(is_code_at(i) || compilation_info_at(i) != info); + DCHECK(is_code_at(i) || compilation_info_at(i) != info); } #endif } @@ -11623,27 +12011,39 @@ // Use the last of each group to fill the gap in the previous group. for (int i = group; i < kGroupCount; i++) { int last_of_group = starts.at(i + 1) - 1; - ASSERT(last_of_group >= gap); + DCHECK(last_of_group >= gap); if (last_of_group == gap) continue; copy(last_of_group, gap); gap = last_of_group; } - ASSERT(gap == starts.number_of_entries() - 1); + DCHECK(gap == starts.number_of_entries() - 1); clear_at(gap); // Clear last gap. set_number_of_entries(group, end - start - 1); #ifdef DEBUG for (int i = start; i < end - 1; i++) { - ASSERT(is_code_at(i) || compilation_info_at(i) != info); + DCHECK(is_code_at(i) || compilation_info_at(i) != info); } #endif } +static bool CodeListContains(Object* head, Code* code) { + while (!head->IsUndefined()) { + if (head == code) return true; + head = Code::cast(head)->next_code_link(); + } + return false; +} + + bool DependentCode::Contains(DependencyGroup group, Code* code) { GroupStartIndexes starts(this); int start = starts.at(group); int end = starts.at(group + 1); + if (group == kWeakICGroup) { + return CodeListContains(object_at(start), code); + } for (int i = start; i < end; i++) { if (object_at(i) == code) return true; } @@ -11692,7 +12092,7 @@ void DependentCode::DeoptimizeDependentCodeGroup( Isolate* isolate, DependentCode::DependencyGroup group) { - ASSERT(AllowCodeDependencyChange::IsAllowed()); + DCHECK(AllowCodeDependencyChange::IsAllowed()); DisallowHeapAllocation no_allocation_scope; bool marked = MarkCodeForDeoptimization(isolate, group); @@ -11700,9 +12100,39 @@ } -Handle<Object> JSObject::SetPrototype(Handle<JSObject> object, - Handle<Object> value, - bool skip_hidden_prototypes) { +void DependentCode::AddToDependentICList(Handle<Code> stub) { + DisallowHeapAllocation no_heap_allocation; + GroupStartIndexes starts(this); + int i = starts.at(kWeakICGroup); + Object* head = object_at(i); + // Try to insert the stub after the head of the list to minimize number of + // writes to the DependentCode array, since a write to the array can make it + // strong if it was alread marked by incremental marker. + if (head->IsCode()) { + stub->set_next_code_link(Code::cast(head)->next_code_link()); + Code::cast(head)->set_next_code_link(*stub); + } else { + stub->set_next_code_link(head); + set_object_at(i, *stub); + } +} + + +Handle<Map> Map::TransitionToPrototype(Handle<Map> map, + Handle<Object> prototype) { + Handle<Map> new_map = GetPrototypeTransition(map, prototype); + if (new_map.is_null()) { + new_map = Copy(map); + PutPrototypeTransition(map, prototype, new_map); + new_map->set_prototype(*prototype); + } + return new_map; +} + + +MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object, + Handle<Object> value, + bool from_javascript) { #ifdef DEBUG int size = object->Size(); #endif @@ -11725,23 +12155,21 @@ Handle<Object> args[] = { object }; Handle<Object> error = isolate->factory()->NewTypeError( "non_extensible_proto", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } // Before we can set the prototype we need to be sure // prototype cycles are prevented. // It is sufficient to validate that the receiver is not in the new prototype // chain. - for (Object* pt = *value; - pt != heap->null_value(); - pt = pt->GetPrototype(isolate)) { - if (JSReceiver::cast(pt) == *object) { + for (PrototypeIterator iter(isolate, *value, + PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(); iter.Advance()) { + if (JSReceiver::cast(iter.GetCurrent()) == *object) { // Cycle detected. Handle<Object> error = isolate->factory()->NewError( "cyclic_proto", HandleVector<Object>(NULL, 0)); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } } @@ -11749,14 +12177,14 @@ object->map()->DictionaryElementsInPrototypeChainOnly(); Handle<JSObject> real_receiver = object; - if (skip_hidden_prototypes) { + if (from_javascript) { // Find the first object in the chain whose prototype object is not // hidden and set the new prototype on that object. - Object* current_proto = real_receiver->GetPrototype(); - while (current_proto->IsJSObject() && - JSObject::cast(current_proto)->map()->is_hidden_prototype()) { - real_receiver = handle(JSObject::cast(current_proto), isolate); - current_proto = current_proto->GetPrototype(isolate); + PrototypeIterator iter(isolate, real_receiver); + while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) { + real_receiver = + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); + iter.Advance(); } } @@ -11767,16 +12195,13 @@ if (map->prototype() == *value) return value; if (value->IsJSObject()) { - JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value)); + PrototypeOptimizationMode mode = + from_javascript ? REGULAR_PROTOTYPE : FAST_PROTOTYPE; + JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value), mode); } - Handle<Map> new_map = Map::GetPrototypeTransition(map, value); - if (new_map.is_null()) { - new_map = Map::Copy(map); - Map::PutPrototypeTransition(map, value, new_map); - new_map->set_prototype(*value); - } - ASSERT(new_map->prototype() == *value); + Handle<Map> new_map = Map::TransitionToPrototype(map, value); + DCHECK(new_map->prototype() == *value); JSObject::MigrateToMap(real_receiver, new_map); if (!dictionary_elements_in_chain && @@ -11788,7 +12213,7 @@ } heap->ClearInstanceofCache(); - ASSERT(size == object->Size()); + DCHECK(size == object->Size()); return value; } @@ -11806,39 +12231,25 @@ } -AccessorPair* JSObject::GetLocalPropertyAccessorPair(Name* name) { - uint32_t index = 0; - if (name->AsArrayIndex(&index)) { - return GetLocalElementAccessorPair(index); - } - - LookupResult lookup(GetIsolate()); - LocalLookupRealNamedProperty(name, &lookup); - - if (lookup.IsPropertyCallbacks() && - lookup.GetCallbackObject()->IsAccessorPair()) { - return AccessorPair::cast(lookup.GetCallbackObject()); - } - return NULL; -} - - -AccessorPair* JSObject::GetLocalElementAccessorPair(uint32_t index) { - if (IsJSGlobalProxy()) { - Object* proto = GetPrototype(); - if (proto->IsNull()) return NULL; - ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->GetLocalElementAccessorPair(index); +MaybeHandle<AccessorPair> JSObject::GetOwnElementAccessorPair( + Handle<JSObject> object, + uint32_t index) { + if (object->IsJSGlobalProxy()) { + PrototypeIterator iter(object->GetIsolate(), object); + if (iter.IsAtEnd()) return MaybeHandle<AccessorPair>(); + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + return GetOwnElementAccessorPair( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index); } // Check for lookup interceptor. - if (HasIndexedInterceptor()) return NULL; + if (object->HasIndexedInterceptor()) return MaybeHandle<AccessorPair>(); - return GetElementsAccessor()->GetAccessorPair(this, this, index); + return object->GetElementsAccessor()->GetAccessorPair(object, object, index); } -Handle<Object> JSObject::SetElementWithInterceptor( +MaybeHandle<Object> JSObject::SetElementWithInterceptor( Handle<JSObject> object, uint32_t index, Handle<Object> value, @@ -11862,7 +12273,7 @@ *object); v8::Handle<v8::Value> result = args.Call(setter, index, v8::Utils::ToLocal(value)); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); if (!result.IsEmpty()) return value; } @@ -11873,76 +12284,72 @@ } -MaybeObject* JSObject::GetElementWithCallback(Object* receiver, - Object* structure, - uint32_t index, - Object* holder) { - Isolate* isolate = GetIsolate(); - ASSERT(!structure->IsForeign()); - +MaybeHandle<Object> JSObject::GetElementWithCallback( + Handle<JSObject> object, + Handle<Object> receiver, + Handle<Object> structure, + uint32_t index, + Handle<Object> holder) { + Isolate* isolate = object->GetIsolate(); + DCHECK(!structure->IsForeign()); // api style callbacks. if (structure->IsExecutableAccessorInfo()) { - Handle<ExecutableAccessorInfo> data( - ExecutableAccessorInfo::cast(structure)); + Handle<ExecutableAccessorInfo> data = + Handle<ExecutableAccessorInfo>::cast(structure); Object* fun_obj = data->getter(); v8::AccessorGetterCallback call_fun = v8::ToCData<v8::AccessorGetterCallback>(fun_obj); - if (call_fun == NULL) return isolate->heap()->undefined_value(); - HandleScope scope(isolate); - Handle<JSObject> self(JSObject::cast(receiver)); - Handle<JSObject> holder_handle(JSObject::cast(holder)); + if (call_fun == NULL) return isolate->factory()->undefined_value(); + Handle<JSObject> holder_handle = Handle<JSObject>::cast(holder); Handle<Object> number = isolate->factory()->NewNumberFromUint(index); Handle<String> key = isolate->factory()->NumberToString(number); - LOG(isolate, ApiNamedPropertyAccess("load", *self, *key)); + LOG(isolate, ApiNamedPropertyAccess("load", *holder_handle, *key)); PropertyCallbackArguments - args(isolate, data->data(), *self, *holder_handle); + args(isolate, data->data(), *receiver, *holder_handle); v8::Handle<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(key)); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - if (result.IsEmpty()) return isolate->heap()->undefined_value(); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); + if (result.IsEmpty()) return isolate->factory()->undefined_value(); Handle<Object> result_internal = v8::Utils::OpenHandle(*result); result_internal->VerifyApiCallResultType(); - return *result_internal; + // Rebox handle before return. + return handle(*result_internal, isolate); } // __defineGetter__ callback if (structure->IsAccessorPair()) { - Object* getter = AccessorPair::cast(structure)->getter(); + Handle<Object> getter(Handle<AccessorPair>::cast(structure)->getter(), + isolate); if (getter->IsSpecFunction()) { // TODO(rossberg): nicer would be to cast to some JSCallable here... - return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter)); + return GetPropertyWithDefinedGetter( + receiver, Handle<JSReceiver>::cast(getter)); } // Getter is not a function. - return isolate->heap()->undefined_value(); + return isolate->factory()->undefined_value(); } if (structure->IsDeclaredAccessorInfo()) { - return GetDeclaredAccessorProperty(receiver, - DeclaredAccessorInfo::cast(structure), - isolate); + return GetDeclaredAccessorProperty( + receiver, Handle<DeclaredAccessorInfo>::cast(structure), isolate); } UNREACHABLE(); - return NULL; + return MaybeHandle<Object>(); } -Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object, - Handle<Object> structure, - uint32_t index, - Handle<Object> value, - Handle<JSObject> holder, - StrictMode strict_mode) { +MaybeHandle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object, + Handle<Object> structure, + uint32_t index, + Handle<Object> value, + Handle<JSObject> holder, + StrictMode strict_mode) { Isolate* isolate = object->GetIsolate(); // We should never get here to initialize a const with the hole // value since a const declaration would conflict with the setter. - ASSERT(!value->IsTheHole()); - - // To accommodate both the old and the new api we switch on the - // data structure used to store the callbacks. Eventually foreign - // callbacks should be phased out. - ASSERT(!structure->IsForeign()); - + DCHECK(!value->IsTheHole()); + DCHECK(!structure->IsForeign()); if (structure->IsExecutableAccessorInfo()) { // api style callbacks Handle<ExecutableAccessorInfo> data = @@ -11959,7 +12366,7 @@ args.Call(call_fun, v8::Utils::ToLocal(key), v8::Utils::ToLocal(value)); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); return value; } @@ -11975,8 +12382,7 @@ Handle<Object> args[2] = { key, holder }; Handle<Object> error = isolate->factory()->NewTypeError( "no_setter_in_callback", HandleVector(args, 2)); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } } @@ -11984,7 +12390,7 @@ if (structure->IsDeclaredAccessorInfo()) return value; UNREACHABLE(); - return Handle<Object>(); + return MaybeHandle<Object>(); } @@ -12015,12 +12421,12 @@ // Adding n elements in fast case is O(n*n). // Note: revisit design to have dual undefined values to capture absent // elements. -Handle<Object> JSObject::SetFastElement(Handle<JSObject> object, - uint32_t index, - Handle<Object> value, - StrictMode strict_mode, - bool check_prototype) { - ASSERT(object->HasFastSmiOrObjectElements() || +MaybeHandle<Object> JSObject::SetFastElement(Handle<JSObject> object, + uint32_t index, + Handle<Object> value, + StrictMode strict_mode, + bool check_prototype) { + DCHECK(object->HasFastSmiOrObjectElements() || object->HasFastArgumentsElements()); Isolate* isolate = object->GetIsolate(); @@ -12046,7 +12452,7 @@ if (check_prototype && (index >= capacity || backing_store->get(index)->IsTheHole())) { bool found; - Handle<Object> result = SetElementWithCallbackSetterInPrototypes( + MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes( object, index, value, &found, strict_mode); if (found) return result; } @@ -12083,7 +12489,7 @@ bool convert_to_slow = true; if ((index - capacity) < kMaxGap) { new_capacity = NewElementsCapacity(index + 1); - ASSERT(new_capacity > index); + DCHECK(new_capacity > index); if (!object->ShouldConvertToSlowElements(new_capacity)) { convert_to_slow = false; } @@ -12105,7 +12511,7 @@ SetFastDoubleElementsCapacityAndLength(object, new_capacity, array_length); FixedDoubleArray::cast(object->elements())->set(index, value->Number()); - object->ValidateElements(); + JSObject::ValidateElements(object); return value; } // Change elements kind from Smi-only to generic FAST if necessary. @@ -12117,7 +12523,7 @@ UpdateAllocationSite(object, kind); Handle<Map> new_map = GetElementsTransitionMap(object, kind); JSObject::MigrateToMap(object, new_map); - ASSERT(IsFastObjectElementsKind(object->GetElementsKind())); + DCHECK(IsFastObjectElementsKind(object->GetElementsKind())); } // Increase backing store capacity if that's been decided previously. if (new_capacity != capacity) { @@ -12129,12 +12535,12 @@ SetFastElementsCapacityAndLength(object, new_capacity, array_length, smi_mode); new_elements->set(index, *value); - object->ValidateElements(); + JSObject::ValidateElements(object); return value; } // Finally, set the new element and length. - ASSERT(object->elements()->IsFixedArray()); + DCHECK(object->elements()->IsFixedArray()); backing_store->set(index, *value); if (must_update_array_length) { Handle<JSArray>::cast(object)->set_length(Smi::FromInt(array_length)); @@ -12143,14 +12549,15 @@ } -Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object, - uint32_t index, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - bool check_prototype, - SetPropertyMode set_mode) { - ASSERT(object->HasDictionaryElements() || +MaybeHandle<Object> JSObject::SetDictionaryElement( + Handle<JSObject> object, + uint32_t index, + Handle<Object> value, + PropertyAttributes attributes, + StrictMode strict_mode, + bool check_prototype, + SetPropertyMode set_mode) { + DCHECK(object->HasDictionaryElements() || object->HasDictionaryArgumentsElements()); Isolate* isolate = object->GetIsolate(); @@ -12187,8 +12594,7 @@ Handle<Object> error = isolate->factory()->NewTypeError("strict_read_only_property", HandleVector(args, 2)); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } } // Elements of the arguments object in slow mode might be slow aliases. @@ -12197,7 +12603,7 @@ Handle<AliasedArgumentsEntry>::cast(element); Handle<Context> context(Context::cast(elements->get(0))); int context_index = entry->aliased_context_slot(); - ASSERT(!context->get(context_index)->IsTheHole()); + DCHECK(!context->get(context_index)->IsTheHole()); context->set(context_index, *value); // For elements that are still writable we keep slow aliasing. if (!details.IsReadOnly()) value = element; @@ -12209,8 +12615,8 @@ // Can cause GC! if (check_prototype) { bool found; - Handle<Object> result = SetElementWithCallbackSetterInPrototypes(object, - index, value, &found, strict_mode); + MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes( + object, index, value, &found, strict_mode); if (found) return result; } @@ -12226,8 +12632,7 @@ Handle<Object> error = isolate->factory()->NewTypeError("object_not_extensible", HandleVector(args, 1)); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } } @@ -12259,15 +12664,11 @@ } else { new_length = dictionary->max_number_key() + 1; } - SetFastElementsCapacitySmiMode smi_mode = FLAG_smi_only_arrays - ? kAllowSmiElements - : kDontAllowSmiElements; bool has_smi_only_elements = false; bool should_convert_to_fast_double_elements = object->ShouldConvertToFastDoubleElements(&has_smi_only_elements); - if (has_smi_only_elements) { - smi_mode = kForceSmiElements; - } + SetFastElementsCapacitySmiMode smi_mode = + has_smi_only_elements ? kForceSmiElements : kAllowSmiElements; if (should_convert_to_fast_double_elements) { SetFastDoubleElementsCapacityAndLength(object, new_length, new_length); @@ -12275,24 +12676,25 @@ SetFastElementsCapacityAndLength(object, new_length, new_length, smi_mode); } - object->ValidateElements(); + JSObject::ValidateElements(object); #ifdef DEBUG if (FLAG_trace_normalization) { - PrintF("Object elements are fast case again:\n"); - object->Print(); + OFStream os(stdout); + os << "Object elements are fast case again:\n"; + object->Print(os); } #endif } return value; } -Handle<Object> JSObject::SetFastDoubleElement( +MaybeHandle<Object> JSObject::SetFastDoubleElement( Handle<JSObject> object, uint32_t index, Handle<Object> value, StrictMode strict_mode, bool check_prototype) { - ASSERT(object->HasFastDoubleElements()); + DCHECK(object->HasFastDoubleElements()); Handle<FixedArrayBase> base_elms(FixedArrayBase::cast(object->elements())); uint32_t elms_length = static_cast<uint32_t>(base_elms->length()); @@ -12303,8 +12705,8 @@ (index >= elms_length || Handle<FixedDoubleArray>::cast(base_elms)->is_the_hole(index))) { bool found; - Handle<Object> result = SetElementWithCallbackSetterInPrototypes(object, - index, value, &found, strict_mode); + MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes( + object, index, value, &found, strict_mode); if (found) return result; } @@ -12323,11 +12725,12 @@ if (!value->IsNumber()) { SetFastElementsCapacityAndLength(object, elms_length, length, kDontAllowSmiElements); - Handle<Object> result = SetFastElement(object, index, value, strict_mode, - check_prototype); - RETURN_IF_EMPTY_HANDLE_VALUE(object->GetIsolate(), result, - Handle<Object>()); - object->ValidateElements(); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + object->GetIsolate(), result, + SetFastElement(object, index, value, strict_mode, check_prototype), + Object); + JSObject::ValidateElements(object); return result; } @@ -12364,31 +12767,31 @@ // Try allocating extra space. int new_capacity = NewElementsCapacity(index+1); if (!object->ShouldConvertToSlowElements(new_capacity)) { - ASSERT(static_cast<uint32_t>(new_capacity) > index); + DCHECK(static_cast<uint32_t>(new_capacity) > index); SetFastDoubleElementsCapacityAndLength(object, new_capacity, index + 1); FixedDoubleArray::cast(object->elements())->set(index, double_value); - object->ValidateElements(); + JSObject::ValidateElements(object); return value; } } // Otherwise default to slow case. - ASSERT(object->HasFastDoubleElements()); - ASSERT(object->map()->has_fast_double_elements()); - ASSERT(object->elements()->IsFixedDoubleArray() || + DCHECK(object->HasFastDoubleElements()); + DCHECK(object->map()->has_fast_double_elements()); + DCHECK(object->elements()->IsFixedDoubleArray() || object->elements()->length() == 0); NormalizeElements(object); - ASSERT(object->HasDictionaryElements()); + DCHECK(object->HasDictionaryElements()); return SetElement(object, index, value, NONE, strict_mode, check_prototype); } -Handle<Object> JSReceiver::SetElement(Handle<JSReceiver> object, - uint32_t index, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode) { +MaybeHandle<Object> JSReceiver::SetElement(Handle<JSReceiver> object, + uint32_t index, + Handle<Object> value, + PropertyAttributes attributes, + StrictMode strict_mode) { if (object->IsJSProxy()) { return JSProxy::SetElementWithHandler( Handle<JSProxy>::cast(object), object, index, value, strict_mode); @@ -12398,52 +12801,49 @@ } -Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object, - uint32_t index, - Handle<Object> value, - StrictMode strict_mode) { - ASSERT(!object->HasExternalArrayElements()); +MaybeHandle<Object> JSObject::SetOwnElement(Handle<JSObject> object, + uint32_t index, + Handle<Object> value, + StrictMode strict_mode) { + DCHECK(!object->HasExternalArrayElements()); return JSObject::SetElement(object, index, value, NONE, strict_mode, false); } -Handle<Object> JSObject::SetElement(Handle<JSObject> object, - uint32_t index, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - bool check_prototype, - SetPropertyMode set_mode) { +MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object, + uint32_t index, + Handle<Object> value, + PropertyAttributes attributes, + StrictMode strict_mode, + bool check_prototype, + SetPropertyMode set_mode) { Isolate* isolate = object->GetIsolate(); if (object->HasExternalArrayElements() || object->HasFixedTypedArrayElements()) { if (!value->IsNumber() && !value->IsUndefined()) { - bool has_exception; - Handle<Object> number = - Execution::ToNumber(isolate, value, &has_exception); - if (has_exception) return Handle<Object>(); - value = number; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, value, + Execution::ToNumber(isolate, value), Object); } } // Check access rights if needed. if (object->IsAccessCheckNeeded()) { - if (!isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_SET)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_SET)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); return value; } } if (object->IsJSGlobalProxy()) { - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return value; - ASSERT(proto->IsJSGlobalObject()); - return SetElement(Handle<JSObject>::cast(proto), index, value, attributes, - strict_mode, - check_prototype, - set_mode); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return value; + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + return SetElement( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index, + value, attributes, strict_mode, check_prototype, set_mode); } // Don't allow element properties to be redefined for external arrays. @@ -12454,8 +12854,7 @@ Handle<Object> args[] = { object, number }; Handle<Object> error = isolate->factory()->NewTypeError( "redef_external_array_element", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } // Normalize the elements to enable attributes on the property. @@ -12467,24 +12866,24 @@ if (!object->map()->is_observed()) { return object->HasIndexedInterceptor() - ? SetElementWithInterceptor(object, index, value, attributes, strict_mode, - check_prototype, - set_mode) + ? SetElementWithInterceptor(object, index, value, attributes, + strict_mode, check_prototype, set_mode) : SetElementWithoutInterceptor(object, index, value, attributes, - strict_mode, - check_prototype, - set_mode); + strict_mode, check_prototype, set_mode); } - PropertyAttributes old_attributes = - JSReceiver::GetLocalElementAttribute(object, index); + Maybe<PropertyAttributes> maybe = + JSReceiver::GetOwnElementAttribute(object, index); + if (!maybe.has_value) return MaybeHandle<Object>(); + PropertyAttributes old_attributes = maybe.value; + Handle<Object> old_value = isolate->factory()->the_hole_value(); Handle<Object> old_length_handle; Handle<Object> new_length_handle; if (old_attributes != ABSENT) { - if (object->GetLocalElementAccessorPair(index) == NULL) { - old_value = Object::GetElementNoExceptionThrown(isolate, object, index); + if (GetOwnElementAccessorPair(object, index).is_null()) { + old_value = Object::GetElement(isolate, object, index).ToHandleChecked(); } } else if (object->IsJSArray()) { // Store old array length in case adding an element grows the array. @@ -12493,18 +12892,23 @@ } // Check for lookup interceptor - Handle<Object> result = object->HasIndexedInterceptor() - ? SetElementWithInterceptor(object, index, value, attributes, strict_mode, - check_prototype, - set_mode) - : SetElementWithoutInterceptor(object, index, value, attributes, - strict_mode, - check_prototype, - set_mode); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>()); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + object->HasIndexedInterceptor() + ? SetElementWithInterceptor( + object, index, value, attributes, + strict_mode, check_prototype, set_mode) + : SetElementWithoutInterceptor( + object, index, value, attributes, + strict_mode, check_prototype, set_mode), + Object); Handle<String> name = isolate->factory()->Uint32ToString(index); - PropertyAttributes new_attributes = GetLocalElementAttribute(object, index); + maybe = GetOwnElementAttribute(object, index); + if (!maybe.has_value) return MaybeHandle<Object>(); + PropertyAttributes new_attributes = maybe.value; + if (old_attributes == ABSENT) { if (object->IsJSArray() && !old_length_handle->SameValue( @@ -12531,7 +12935,7 @@ EnqueueChangeRecord(object, "reconfigure", name, old_value); } else { Handle<Object> new_value = - Object::GetElementNoExceptionThrown(isolate, object, index); + Object::GetElement(isolate, object, index).ToHandleChecked(); bool value_changed = !old_value->SameValue(*new_value); if (old_attributes != new_attributes) { if (!value_changed) old_value = isolate->factory()->the_hole_value(); @@ -12545,7 +12949,7 @@ } -Handle<Object> JSObject::SetElementWithoutInterceptor( +MaybeHandle<Object> JSObject::SetElementWithoutInterceptor( Handle<JSObject> object, uint32_t index, Handle<Object> value, @@ -12553,18 +12957,26 @@ StrictMode strict_mode, bool check_prototype, SetPropertyMode set_mode) { - ASSERT(object->HasDictionaryElements() || + DCHECK(object->HasDictionaryElements() || object->HasDictionaryArgumentsElements() || (attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0); Isolate* isolate = object->GetIsolate(); if (FLAG_trace_external_array_abuse && IsExternalArrayElementsKind(object->GetElementsKind())) { - CheckArrayAbuse(*object, "external elements write", index); + CheckArrayAbuse(object, "external elements write", index); } if (FLAG_trace_js_array_abuse && !IsExternalArrayElementsKind(object->GetElementsKind())) { if (object->IsJSArray()) { - CheckArrayAbuse(*object, "elements write", index, true); + CheckArrayAbuse(object, "elements write", index, true); + } + } + if (object->IsJSArray() && JSArray::WouldChangeReadOnlyLength( + Handle<JSArray>::cast(object), index)) { + if (strict_mode == SLOPPY) { + return value; + } else { + return JSArray::ReadOnlyLengthError(Handle<JSArray>::cast(object)); } } switch (object->GetElementsKind()) { @@ -12607,7 +13019,7 @@ if (!probe.is_null() && !probe->IsTheHole()) { Handle<Context> context(Context::cast(parameter_map->get(0))); int context_index = Handle<Smi>::cast(probe)->value(); - ASSERT(!context->get(context_index)->IsTheHole()); + DCHECK(!context->get(context_index)->IsTheHole()); context->set(context_index, *value); // Redefining attributes of an aliased element destroys fast aliasing. if (set_mode == SET_PROPERTY || attributes == NONE) return value; @@ -12655,7 +13067,7 @@ bool AllocationSite::IsNestedSite() { - ASSERT(FLAG_trace_track_allocation_sites); + DCHECK(FLAG_trace_track_allocation_sites); Object* current = GetHeap()->allocation_sites_list(); while (current->IsAllocationSite()) { AllocationSite* current_site = AllocationSite::cast(current); @@ -12734,6 +13146,19 @@ } +const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) { + switch (decision) { + case kUndecided: return "undecided"; + case kDontTenure: return "don't tenure"; + case kMaybeTenure: return "maybe tenure"; + case kTenure: return "tenure"; + case kZombie: return "zombie"; + default: UNREACHABLE(); + } + return NULL; +} + + void JSObject::UpdateAllocationSite(Handle<JSObject> object, ElementsKind to_kind) { if (!object->IsJSArray()) return; @@ -12744,30 +13169,9 @@ Handle<AllocationSite> site; { DisallowHeapAllocation no_allocation; - // Check if there is potentially a memento behind the object. If - // the last word of the momento is on another page we return - // immediatelly. - Address object_address = object->address(); - Address memento_address = object_address + JSArray::kSize; - Address last_memento_word_address = memento_address + kPointerSize; - if (!NewSpacePage::OnSamePage(object_address, - last_memento_word_address)) { - return; - } - - // Either object is the last object in the new space, or there is another - // object of at least word size (the header map word) following it, so - // suffices to compare ptr and top here. - Address top = heap->NewSpaceTop(); - ASSERT(memento_address == top || - memento_address + HeapObject::kHeaderSize <= top); - if (memento_address == top) return; - HeapObject* candidate = HeapObject::FromAddress(memento_address); - if (candidate->map() != heap->allocation_memento_map()) return; - - AllocationMemento* memento = AllocationMemento::cast(candidate); - if (!memento->IsValid()) return; + AllocationMemento* memento = heap->FindAllocationMemento(*object); + if (memento == NULL) return; // Walk through to the Allocation Site site = handle(memento->GetAllocationSite()); @@ -12796,7 +13200,7 @@ IsFastSmiOrObjectElementsKind(to_kind)) || (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_HOLEY_DOUBLE_ELEMENTS)) { - ASSERT(from_kind != TERMINAL_FAST_ELEMENTS_KIND); + DCHECK(from_kind != TERMINAL_FAST_ELEMENTS_KIND); // No change is needed to the elements() buffer, the transition // only requires a map change. Handle<Map> new_map = GetElementsTransitionMap(object, to_kind); @@ -12826,7 +13230,7 @@ if (IsFastSmiElementsKind(from_kind) && IsFastDoubleElementsKind(to_kind)) { SetFastDoubleElementsCapacityAndLength(object, capacity, length); - object->ValidateElements(); + JSObject::ValidateElements(object); return; } @@ -12834,7 +13238,7 @@ IsFastObjectElementsKind(to_kind)) { SetFastElementsCapacityAndLength(object, capacity, length, kDontAllowSmiElements); - object->ValidateElements(); + JSObject::ValidateElements(object); return; } @@ -12861,32 +13265,57 @@ void JSArray::JSArrayUpdateLengthFromIndex(Handle<JSArray> array, uint32_t index, Handle<Object> value) { - CALL_HEAP_FUNCTION_VOID(array->GetIsolate(), - array->JSArrayUpdateLengthFromIndex(index, *value)); -} - - -MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index, - Object* value) { uint32_t old_len = 0; - CHECK(length()->ToArrayIndex(&old_len)); + CHECK(array->length()->ToArrayIndex(&old_len)); // Check to see if we need to update the length. For now, we make // sure that the length stays within 32-bits (unsigned). if (index >= old_len && index != 0xffffffff) { - Object* len; - { MaybeObject* maybe_len = - GetHeap()->NumberFromDouble(static_cast<double>(index) + 1); - if (!maybe_len->ToObject(&len)) return maybe_len; - } - set_length(len); + Handle<Object> len = array->GetIsolate()->factory()->NewNumber( + static_cast<double>(index) + 1); + array->set_length(*len); } - return value; } -Handle<Object> JSObject::GetElementWithInterceptor(Handle<JSObject> object, - Handle<Object> receiver, - uint32_t index) { +bool JSArray::IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) { + Isolate* isolate = jsarray_map->GetIsolate(); + DCHECK(!jsarray_map->is_dictionary_map()); + LookupResult lookup(isolate); + Handle<Name> length_string = isolate->factory()->length_string(); + jsarray_map->LookupDescriptor(NULL, *length_string, &lookup); + return lookup.IsReadOnly(); +} + + +bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array, + uint32_t index) { + uint32_t length = 0; + CHECK(array->length()->ToArrayIndex(&length)); + if (length <= index) { + Isolate* isolate = array->GetIsolate(); + LookupResult lookup(isolate); + Handle<Name> length_string = isolate->factory()->length_string(); + array->LookupOwnRealNamedProperty(length_string, &lookup); + return lookup.IsReadOnly(); + } + return false; +} + + +MaybeHandle<Object> JSArray::ReadOnlyLengthError(Handle<JSArray> array) { + Isolate* isolate = array->GetIsolate(); + Handle<Name> length = isolate->factory()->length_string(); + Handle<Object> args[2] = { length, array }; + Handle<Object> error = isolate->factory()->NewTypeError( + "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))); + return isolate->Throw<Object>(error); +} + + +MaybeHandle<Object> JSObject::GetElementWithInterceptor( + Handle<JSObject> object, + Handle<Object> receiver, + uint32_t index) { Isolate* isolate = object->GetIsolate(); // Make sure that the top context does not change when doing @@ -12902,23 +13331,26 @@ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver, *object); v8::Handle<v8::Value> result = args.Call(getter, index); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); if (!result.IsEmpty()) { Handle<Object> result_internal = v8::Utils::OpenHandle(*result); result_internal->VerifyApiCallResultType(); // Rebox handle before return. - return Handle<Object>(*result_internal, isolate); + return handle(*result_internal, isolate); } } ElementsAccessor* handler = object->GetElementsAccessor(); - Handle<Object> result = handler->Get(receiver, object, index); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>()); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, handler->Get(receiver, object, index), + Object); if (!result->IsTheHole()) return result; - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return isolate->factory()->undefined_value(); - return Object::GetElementWithReceiver(isolate, proto, receiver, index); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return isolate->factory()->undefined_value(); + return Object::GetElementWithReceiver( + isolate, PrototypeIterator::GetCurrent(iter), receiver, index); } @@ -13041,7 +13473,7 @@ bool JSObject::ShouldConvertToFastElements() { - ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements()); + DCHECK(HasDictionaryElements() || HasDictionaryArgumentsElements()); // If the elements are sparse, we should not go back to fast case. if (!HasDenseElements()) return false; // An object requiring access checks is never allowed to have fast @@ -13081,7 +13513,7 @@ *has_smi_only_elements = false; if (HasSloppyArgumentsElements()) return false; if (FLAG_unbox_double_arrays) { - ASSERT(HasDictionaryElements()); + DCHECK(HasDictionaryElements()); SeededNumberDictionary* dictionary = element_dictionary(); bool found_double = false; for (int i = 0; i < dictionary->Capacity(); i++) { @@ -13108,47 +13540,45 @@ // together, so even though this function belongs in objects-debug.cc, // we keep it here instead to satisfy certain compilers. #ifdef OBJECT_PRINT -template<typename Shape, typename Key> -void Dictionary<Shape, Key>::Print(FILE* out) { - int capacity = HashTable<Shape, Key>::Capacity(); +template <typename Derived, typename Shape, typename Key> +void Dictionary<Derived, Shape, Key>::Print(OStream& os) { // NOLINT + int capacity = DerivedHashTable::Capacity(); for (int i = 0; i < capacity; i++) { - Object* k = HashTable<Shape, Key>::KeyAt(i); - if (HashTable<Shape, Key>::IsKey(k)) { - PrintF(out, " "); + Object* k = DerivedHashTable::KeyAt(i); + if (DerivedHashTable::IsKey(k)) { + os << " "; if (k->IsString()) { - String::cast(k)->StringPrint(out); + String::cast(k)->StringPrint(os); } else { - k->ShortPrint(out); + os << Brief(k); } - PrintF(out, ": "); - ValueAt(i)->ShortPrint(out); - PrintF(out, "\n"); + os << ": " << Brief(ValueAt(i)) << "\n"; } } } #endif -template<typename Shape, typename Key> -void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) { +template<typename Derived, typename Shape, typename Key> +void Dictionary<Derived, Shape, Key>::CopyValuesTo(FixedArray* elements) { int pos = 0; - int capacity = HashTable<Shape, Key>::Capacity(); + int capacity = DerivedHashTable::Capacity(); DisallowHeapAllocation no_gc; WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc); for (int i = 0; i < capacity; i++) { - Object* k = Dictionary<Shape, Key>::KeyAt(i); - if (Dictionary<Shape, Key>::IsKey(k)) { + Object* k = Dictionary::KeyAt(i); + if (Dictionary::IsKey(k)) { elements->set(pos++, ValueAt(i), mode); } } - ASSERT(pos == elements->length()); + DCHECK(pos == elements->length()); } InterceptorInfo* JSObject::GetNamedInterceptor() { - ASSERT(map()->has_named_interceptor()); + DCHECK(map()->has_named_interceptor()); JSFunction* constructor = JSFunction::cast(map()->constructor()); - ASSERT(constructor->shared()->IsApiFunction()); + DCHECK(constructor->shared()->IsApiFunction()); Object* result = constructor->shared()->get_api_func_data()->named_property_handler(); return InterceptorInfo::cast(result); @@ -13156,149 +13586,166 @@ InterceptorInfo* JSObject::GetIndexedInterceptor() { - ASSERT(map()->has_indexed_interceptor()); + DCHECK(map()->has_indexed_interceptor()); JSFunction* constructor = JSFunction::cast(map()->constructor()); - ASSERT(constructor->shared()->IsApiFunction()); + DCHECK(constructor->shared()->IsApiFunction()); Object* result = constructor->shared()->get_api_func_data()->indexed_property_handler(); return InterceptorInfo::cast(result); } -Handle<Object> JSObject::GetPropertyPostInterceptor( - Handle<JSObject> object, +MaybeHandle<Object> JSObject::GetPropertyWithInterceptor( + Handle<JSObject> holder, Handle<Object> receiver, - Handle<Name> name, - PropertyAttributes* attributes) { - // Check local property in holder, ignore interceptor. - Isolate* isolate = object->GetIsolate(); - LookupResult lookup(isolate); - object->LocalLookupRealNamedProperty(*name, &lookup); - Handle<Object> result; - if (lookup.IsFound()) { - result = GetProperty(object, receiver, &lookup, name, attributes); - } else { - // Continue searching via the prototype chain. - Handle<Object> prototype(object->GetPrototype(), isolate); - *attributes = ABSENT; - if (prototype->IsNull()) return isolate->factory()->undefined_value(); - result = GetPropertyWithReceiver(prototype, receiver, name, attributes); - } - return result; -} + Handle<Name> name) { + Isolate* isolate = holder->GetIsolate(); + // TODO(rossberg): Support symbols in the API. + if (name->IsSymbol()) return isolate->factory()->undefined_value(); -MaybeObject* JSObject::GetLocalPropertyPostInterceptor( - Object* receiver, - Name* name, - PropertyAttributes* attributes) { - // Check local property in holder, ignore interceptor. - LookupResult result(GetIsolate()); - LocalLookupRealNamedProperty(name, &result); - if (result.IsFound()) { - return GetProperty(receiver, &result, name, attributes); - } - return GetHeap()->undefined_value(); -} + Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor(), isolate); + Handle<String> name_string = Handle<String>::cast(name); + if (interceptor->getter()->IsUndefined()) return MaybeHandle<Object>(); -Handle<Object> JSObject::GetPropertyWithInterceptor( - Handle<JSObject> object, - Handle<Object> receiver, - Handle<Name> name, - PropertyAttributes* attributes) { - Isolate* isolate = object->GetIsolate(); + v8::NamedPropertyGetterCallback getter = + v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter()); + LOG(isolate, + ApiNamedPropertyAccess("interceptor-named-get", *holder, *name)); + PropertyCallbackArguments + args(isolate, interceptor->data(), *receiver, *holder); + v8::Handle<v8::Value> result = + args.Call(getter, v8::Utils::ToLocal(name_string)); + RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); + if (result.IsEmpty()) return MaybeHandle<Object>(); + + Handle<Object> result_internal = v8::Utils::OpenHandle(*result); + result_internal->VerifyApiCallResultType(); + // Rebox handle before return + return handle(*result_internal, isolate); +} - // TODO(rossberg): Support symbols in the API. - if (name->IsSymbol()) return isolate->factory()->undefined_value(); - Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor(), isolate); - Handle<String> name_string = Handle<String>::cast(name); +// Compute the property keys from the interceptor. +// TODO(rossberg): support symbols in API, and filter here if needed. +MaybeHandle<JSObject> JSObject::GetKeysForNamedInterceptor( + Handle<JSObject> object, Handle<JSReceiver> receiver) { + Isolate* isolate = receiver->GetIsolate(); + Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor()); + PropertyCallbackArguments + args(isolate, interceptor->data(), *receiver, *object); + v8::Handle<v8::Object> result; + if (!interceptor->enumerator()->IsUndefined()) { + v8::NamedPropertyEnumeratorCallback enum_fun = + v8::ToCData<v8::NamedPropertyEnumeratorCallback>( + interceptor->enumerator()); + LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object)); + result = args.Call(enum_fun); + } + if (result.IsEmpty()) return MaybeHandle<JSObject>(); +#if ENABLE_EXTRA_CHECKS + CHECK(v8::Utils::OpenHandle(*result)->IsJSArray() || + v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements()); +#endif + // Rebox before returning. + return handle(*v8::Utils::OpenHandle(*result), isolate); +} - if (!interceptor->getter()->IsUndefined()) { - v8::NamedPropertyGetterCallback getter = - v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter()); - LOG(isolate, - ApiNamedPropertyAccess("interceptor-named-get", *object, *name)); - PropertyCallbackArguments - args(isolate, interceptor->data(), *receiver, *object); - v8::Handle<v8::Value> result = - args.Call(getter, v8::Utils::ToLocal(name_string)); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - if (!result.IsEmpty()) { - *attributes = NONE; - Handle<Object> result_internal = v8::Utils::OpenHandle(*result); - result_internal->VerifyApiCallResultType(); - // Rebox handle to escape this scope. - return handle(*result_internal, isolate); - } - } - return GetPropertyPostInterceptor(object, receiver, name, attributes); +// Compute the element keys from the interceptor. +MaybeHandle<JSObject> JSObject::GetKeysForIndexedInterceptor( + Handle<JSObject> object, Handle<JSReceiver> receiver) { + Isolate* isolate = receiver->GetIsolate(); + Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor()); + PropertyCallbackArguments + args(isolate, interceptor->data(), *receiver, *object); + v8::Handle<v8::Object> result; + if (!interceptor->enumerator()->IsUndefined()) { + v8::IndexedPropertyEnumeratorCallback enum_fun = + v8::ToCData<v8::IndexedPropertyEnumeratorCallback>( + interceptor->enumerator()); + LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object)); + result = args.Call(enum_fun); + } + if (result.IsEmpty()) return MaybeHandle<JSObject>(); +#if ENABLE_EXTRA_CHECKS + CHECK(v8::Utils::OpenHandle(*result)->IsJSArray() || + v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements()); +#endif + // Rebox before returning. + return handle(*v8::Utils::OpenHandle(*result), isolate); } -bool JSObject::HasRealNamedProperty(Handle<JSObject> object, - Handle<Name> key) { +Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object, + Handle<Name> key) { Isolate* isolate = object->GetIsolate(); SealHandleScope shs(isolate); // Check access rights if needed. if (object->IsAccessCheckNeeded()) { - if (!isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_HAS)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS); - return false; + if (!isolate->MayNamedAccess(object, key, v8::ACCESS_HAS)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS); + RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<bool>()); + return maybe(false); } } LookupResult result(isolate); - object->LocalLookupRealNamedProperty(*key, &result); - return result.IsFound() && !result.IsInterceptor(); + object->LookupOwnRealNamedProperty(key, &result); + return maybe(result.IsFound() && !result.IsInterceptor()); } -bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) { +Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object, + uint32_t index) { Isolate* isolate = object->GetIsolate(); HandleScope scope(isolate); // Check access rights if needed. if (object->IsAccessCheckNeeded()) { - if (!isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_HAS)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS); - return false; + if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_HAS)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS); + RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<bool>()); + return maybe(false); } } if (object->IsJSGlobalProxy()) { HandleScope scope(isolate); - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsNull()) return false; - ASSERT(proto->IsJSGlobalObject()); - return HasRealElementProperty(Handle<JSObject>::cast(proto), index); + PrototypeIterator iter(isolate, object); + if (iter.IsAtEnd()) return maybe(false); + DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject()); + return HasRealElementProperty( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index); } - return GetElementAttributeWithoutInterceptor( - object, object, index, false) != ABSENT; + Maybe<PropertyAttributes> result = + GetElementAttributeWithoutInterceptor(object, object, index, false); + if (!result.has_value) return Maybe<bool>(); + return maybe(result.value != ABSENT); } -bool JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object, - Handle<Name> key) { +Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object, + Handle<Name> key) { Isolate* isolate = object->GetIsolate(); SealHandleScope shs(isolate); // Check access rights if needed. if (object->IsAccessCheckNeeded()) { - if (!isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_HAS)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS); - return false; + if (!isolate->MayNamedAccess(object, key, v8::ACCESS_HAS)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS); + RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<bool>()); + return maybe(false); } } LookupResult result(isolate); - object->LocalLookupRealNamedProperty(*key, &result); - return result.IsPropertyCallbacks(); + object->LookupOwnRealNamedProperty(key, &result); + return maybe(result.IsPropertyCallbacks()); } -int JSObject::NumberOfLocalProperties(PropertyAttributes filter) { +int JSObject::NumberOfOwnProperties(PropertyAttributes filter) { if (HasFastProperties()) { Map* map = this->map(); if (filter == NONE) return map->NumberOfOwnDescriptors(); @@ -13341,7 +13788,7 @@ void HeapSortPairs(FixedArray* content, FixedArray* numbers, int len) { // In-place heap sort. - ASSERT(content->length() == numbers->length()); + DCHECK(content->length() == numbers->length()); // Bottom-up max-heap construction. for (int i = 1; i < len; ++i) { @@ -13387,7 +13834,7 @@ // Sort this array and the numbers as pairs wrt. the (distinct) numbers. void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) { - ASSERT(this->length() == numbers->length()); + DCHECK(this->length() == numbers->length()); // For small arrays, simply use insertion sort. if (len <= 10) { InsertionSortPairs(this, numbers, len); @@ -13425,12 +13872,12 @@ } -// Fill in the names of local properties into the supplied storage. The main +// Fill in the names of own properties into the supplied storage. The main // purpose of this function is to provide reflection information for the object // mirrors. -void JSObject::GetLocalPropertyNames( +void JSObject::GetOwnPropertyNames( FixedArray* storage, int index, PropertyAttributes filter) { - ASSERT(storage->length() >= (NumberOfLocalProperties(filter) - index)); + DCHECK(storage->length() >= (NumberOfOwnProperties(filter) - index)); if (HasFastProperties()) { int real_size = map()->NumberOfOwnDescriptors(); DescriptorArray* descs = map()->instance_descriptors(); @@ -13449,8 +13896,8 @@ } -int JSObject::NumberOfLocalElements(PropertyAttributes filter) { - return GetLocalElementKeys(NULL, filter); +int JSObject::NumberOfOwnElements(PropertyAttributes filter) { + return GetOwnElementKeys(NULL, filter); } @@ -13464,12 +13911,12 @@ if (length == 0) return 0; } // Compute the number of enumerable elements. - return NumberOfLocalElements(static_cast<PropertyAttributes>(DONT_ENUM)); + return NumberOfOwnElements(static_cast<PropertyAttributes>(DONT_ENUM)); } -int JSObject::GetLocalElementKeys(FixedArray* storage, - PropertyAttributes filter) { +int JSObject::GetOwnElementKeys(FixedArray* storage, + PropertyAttributes filter) { int counter = 0; switch (GetElementsKind()) { case FAST_SMI_ELEMENTS: @@ -13487,14 +13934,14 @@ counter++; } } - ASSERT(!storage || storage->length() >= counter); + DCHECK(!storage || storage->length() >= counter); break; } case FAST_DOUBLE_ELEMENTS: case FAST_HOLEY_DOUBLE_ELEMENTS: { int length = IsJSArray() ? Smi::cast(JSArray::cast(this)->length())->value() : - FixedDoubleArray::cast(elements())->length(); + FixedArrayBase::cast(elements())->length(); for (int i = 0; i < length; i++) { if (!FixedDoubleArray::cast(elements())->is_the_hole(i)) { if (storage != NULL) { @@ -13503,7 +13950,7 @@ counter++; } } - ASSERT(!storage || storage->length() >= counter); + DCHECK(!storage || storage->length() >= counter); break; } @@ -13521,7 +13968,7 @@ } counter++; } - ASSERT(!storage || storage->length() >= counter); + DCHECK(!storage || storage->length() >= counter); break; } @@ -13589,49 +14036,21 @@ counter += str->length(); } } - ASSERT(!storage || storage->length() == counter); + DCHECK(!storage || storage->length() == counter); return counter; } int JSObject::GetEnumElementKeys(FixedArray* storage) { - return GetLocalElementKeys(storage, - static_cast<PropertyAttributes>(DONT_ENUM)); + return GetOwnElementKeys(storage, static_cast<PropertyAttributes>(DONT_ENUM)); } -// StringKey simply carries a string object as key. -class StringKey : public HashTableKey { - public: - explicit StringKey(String* string) : - string_(string), - hash_(HashForObject(string)) { } - - bool IsMatch(Object* string) { - // We know that all entries in a hash table had their hash keys created. - // Use that knowledge to have fast failure. - if (hash_ != HashForObject(string)) { - return false; - } - return string_->Equals(String::cast(string)); - } - - uint32_t Hash() { return hash_; } - - uint32_t HashForObject(Object* other) { return String::cast(other)->Hash(); } - - Object* AsObject(Heap* heap) { return string_; } - - String* string_; - uint32_t hash_; -}; - - // StringSharedKeys are used as keys in the eval cache. class StringSharedKey : public HashTableKey { public: - StringSharedKey(String* source, - SharedFunctionInfo* shared, + StringSharedKey(Handle<String> source, + Handle<SharedFunctionInfo> shared, StrictMode strict_mode, int scope_position) : source_(source), @@ -13639,19 +14058,20 @@ strict_mode_(strict_mode), scope_position_(scope_position) { } - bool IsMatch(Object* other) { + bool IsMatch(Object* other) V8_OVERRIDE { + DisallowHeapAllocation no_allocation; if (!other->IsFixedArray()) return false; FixedArray* other_array = FixedArray::cast(other); SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0)); - if (shared != shared_) return false; + if (shared != *shared_) return false; int strict_unchecked = Smi::cast(other_array->get(2))->value(); - ASSERT(strict_unchecked == SLOPPY || strict_unchecked == STRICT); + DCHECK(strict_unchecked == SLOPPY || strict_unchecked == STRICT); StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked); if (strict_mode != strict_mode_) return false; int scope_position = Smi::cast(other_array->get(3))->value(); if (scope_position != scope_position_) return false; String* source = String::cast(other_array->get(1)); - return source->Equals(source_); + return source->Equals(*source_); } static uint32_t StringSharedHashHelper(String* source, @@ -13665,7 +14085,7 @@ // script source code and the start position of the calling scope. // We do this to ensure that the cache entries can survive garbage // collection. - Script* script = Script::cast(shared->script()); + Script* script(Script::cast(shared->script())); hash ^= String::cast(script->source())->Hash(); if (strict_mode == STRICT) hash ^= 0x8000; hash += scope_position; @@ -13673,39 +14093,37 @@ return hash; } - uint32_t Hash() { - return StringSharedHashHelper( - source_, shared_, strict_mode_, scope_position_); + uint32_t Hash() V8_OVERRIDE { + return StringSharedHashHelper(*source_, *shared_, strict_mode_, + scope_position_); } - uint32_t HashForObject(Object* obj) { + uint32_t HashForObject(Object* obj) V8_OVERRIDE { + DisallowHeapAllocation no_allocation; FixedArray* other_array = FixedArray::cast(obj); SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0)); String* source = String::cast(other_array->get(1)); int strict_unchecked = Smi::cast(other_array->get(2))->value(); - ASSERT(strict_unchecked == SLOPPY || strict_unchecked == STRICT); + DCHECK(strict_unchecked == SLOPPY || strict_unchecked == STRICT); StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked); int scope_position = Smi::cast(other_array->get(3))->value(); return StringSharedHashHelper( source, shared, strict_mode, scope_position); } - MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) { - Object* obj; - { MaybeObject* maybe_obj = heap->AllocateFixedArray(4); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - FixedArray* other_array = FixedArray::cast(obj); - other_array->set(0, shared_); - other_array->set(1, source_); - other_array->set(2, Smi::FromInt(strict_mode_)); - other_array->set(3, Smi::FromInt(scope_position_)); - return other_array; + + Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { + Handle<FixedArray> array = isolate->factory()->NewFixedArray(4); + array->set(0, *shared_); + array->set(1, *source_); + array->set(2, Smi::FromInt(strict_mode_)); + array->set(3, Smi::FromInt(scope_position_)); + return array; } private: - String* source_; - SharedFunctionInfo* shared_; + Handle<String> source_; + Handle<SharedFunctionInfo> shared_; StrictMode strict_mode_; int scope_position_; }; @@ -13714,7 +14132,7 @@ // RegExpKey carries the source and flags of a regular expression as key. class RegExpKey : public HashTableKey { public: - RegExpKey(String* string, JSRegExp::Flags flags) + RegExpKey(Handle<String> string, JSRegExp::Flags flags) : string_(string), flags_(Smi::FromInt(flags.value())) { } @@ -13722,22 +14140,22 @@ // stored value is stored where the key should be. IsMatch then // compares the search key to the found object, rather than comparing // a key to a key. - bool IsMatch(Object* obj) { + bool IsMatch(Object* obj) V8_OVERRIDE { FixedArray* val = FixedArray::cast(obj); return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex))) && (flags_ == val->get(JSRegExp::kFlagsIndex)); } - uint32_t Hash() { return RegExpHash(string_, flags_); } + uint32_t Hash() V8_OVERRIDE { return RegExpHash(*string_, flags_); } - Object* AsObject(Heap* heap) { + Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { // Plain hash maps, which is where regexp keys are used, don't // use this function. UNREACHABLE(); - return NULL; + return MaybeHandle<Object>().ToHandleChecked(); } - uint32_t HashForObject(Object* obj) { + uint32_t HashForObject(Object* obj) V8_OVERRIDE { FixedArray* val = FixedArray::cast(obj); return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)), Smi::cast(val->get(JSRegExp::kFlagsIndex))); @@ -13747,20 +14165,20 @@ return string->Hash() + flags->value(); } - String* string_; + Handle<String> string_; Smi* flags_; }; -MaybeObject* OneByteStringKey::AsObject(Heap* heap) { +Handle<Object> OneByteStringKey::AsHandle(Isolate* isolate) { if (hash_field_ == 0) Hash(); - return heap->AllocateOneByteInternalizedString(string_, hash_field_); + return isolate->factory()->NewOneByteInternalizedString(string_, hash_field_); } -MaybeObject* TwoByteStringKey::AsObject(Heap* heap) { +Handle<Object> TwoByteStringKey::AsHandle(Isolate* isolate) { if (hash_field_ == 0) Hash(); - return heap->AllocateTwoByteInternalizedString(string_, hash_field_); + return isolate->factory()->NewTwoByteInternalizedString(string_, hash_field_); } @@ -13781,19 +14199,18 @@ template<> -MaybeObject* SubStringKey<uint8_t>::AsObject(Heap* heap) { +Handle<Object> SubStringKey<uint8_t>::AsHandle(Isolate* isolate) { if (hash_field_ == 0) Hash(); Vector<const uint8_t> chars(GetChars() + from_, length_); - return heap->AllocateOneByteInternalizedString(chars, hash_field_); + return isolate->factory()->NewOneByteInternalizedString(chars, hash_field_); } template<> -MaybeObject* SubStringKey<uint16_t>::AsObject( - Heap* heap) { +Handle<Object> SubStringKey<uint16_t>::AsHandle(Isolate* isolate) { if (hash_field_ == 0) Hash(); Vector<const uint16_t> chars(GetChars() + from_, length_); - return heap->AllocateTwoByteInternalizedString(chars, hash_field_); + return isolate->factory()->NewTwoByteInternalizedString(chars, hash_field_); } @@ -13818,32 +14235,31 @@ // InternalizedStringKey carries a string/internalized-string object as key. class InternalizedStringKey : public HashTableKey { public: - explicit InternalizedStringKey(String* string) + explicit InternalizedStringKey(Handle<String> string) : string_(string) { } - bool IsMatch(Object* string) { - return String::cast(string)->Equals(string_); + virtual bool IsMatch(Object* string) V8_OVERRIDE { + return String::cast(string)->Equals(*string_); } - uint32_t Hash() { return string_->Hash(); } + virtual uint32_t Hash() V8_OVERRIDE { return string_->Hash(); } - uint32_t HashForObject(Object* other) { + virtual uint32_t HashForObject(Object* other) V8_OVERRIDE { return String::cast(other)->Hash(); } - MaybeObject* AsObject(Heap* heap) { - // Attempt to flatten the string, so that internalized strings will most - // often be flat strings. - string_ = string_->TryFlattenGetString(); + virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { // Internalize the string if possible. - Map* map = heap->InternalizedStringMapForString(string_); - if (map != NULL) { - string_->set_map_no_write_barrier(map); - ASSERT(string_->IsInternalizedString()); + MaybeHandle<Map> maybe_map = + isolate->factory()->InternalizedStringMapForString(string_); + Handle<Map> map; + if (maybe_map.ToHandle(&map)) { + string_->set_map_no_write_barrier(*map); + DCHECK(string_->IsInternalizedString()); return string_; } // Otherwise allocate a new internalized string. - return heap->AllocateInternalizedStringImpl( + return isolate->factory()->NewInternalizedStringImpl( string_, string_->length(), string_->hash_field()); } @@ -13851,30 +14267,32 @@ return String::cast(obj)->Hash(); } - String* string_; + Handle<String> string_; }; -template<typename Shape, typename Key> -void HashTable<Shape, Key>::IteratePrefix(ObjectVisitor* v) { +template<typename Derived, typename Shape, typename Key> +void HashTable<Derived, Shape, Key>::IteratePrefix(ObjectVisitor* v) { IteratePointers(v, 0, kElementsStartOffset); } -template<typename Shape, typename Key> -void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) { +template<typename Derived, typename Shape, typename Key> +void HashTable<Derived, Shape, Key>::IterateElements(ObjectVisitor* v) { IteratePointers(v, kElementsStartOffset, kHeaderSize + length() * kPointerSize); } -template<typename Shape, typename Key> -MaybeObject* HashTable<Shape, Key>::Allocate(Heap* heap, - int at_least_space_for, - MinimumCapacity capacity_option, - PretenureFlag pretenure) { - ASSERT(!capacity_option || IS_POWER_OF_TWO(at_least_space_for)); +template<typename Derived, typename Shape, typename Key> +Handle<Derived> HashTable<Derived, Shape, Key>::New( + Isolate* isolate, + int at_least_space_for, + MinimumCapacity capacity_option, + PretenureFlag pretenure) { + DCHECK(0 <= at_least_space_for); + DCHECK(!capacity_option || IsPowerOf2(at_least_space_for)); int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY) ? at_least_space_for : ComputeCapacity(at_least_space_for); @@ -13882,22 +14300,23 @@ v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true); } - Object* obj; - { MaybeObject* maybe_obj = - heap-> AllocateHashTable(EntryToIndex(capacity), pretenure); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - HashTable::cast(obj)->SetNumberOfElements(0); - HashTable::cast(obj)->SetNumberOfDeletedElements(0); - HashTable::cast(obj)->SetCapacity(capacity); - return obj; + Factory* factory = isolate->factory(); + int length = EntryToIndex(capacity); + Handle<FixedArray> array = factory->NewFixedArray(length, pretenure); + array->set_map_no_write_barrier(*factory->hash_table_map()); + Handle<Derived> table = Handle<Derived>::cast(array); + + table->SetNumberOfElements(0); + table->SetNumberOfDeletedElements(0); + table->SetCapacity(capacity); + return table; } // Find entry for key otherwise return kNotFound. -int NameDictionary::FindEntry(Name* key) { +int NameDictionary::FindEntry(Handle<Name> key) { if (!key->IsUniqueName()) { - return HashTable<NameDictionaryShape, Name*>::FindEntry(key); + return DerivedHashTable::FindEntry(key); } // Optimized for unique names. Knowledge of the key type allows: @@ -13918,25 +14337,27 @@ int index = EntryToIndex(entry); Object* element = get(index); if (element->IsUndefined()) break; // Empty entry. - if (key == element) return entry; + if (*key == element) return entry; if (!element->IsUniqueName() && !element->IsTheHole() && - Name::cast(element)->Equals(key)) { + Name::cast(element)->Equals(*key)) { // Replace a key that is a non-internalized string by the equivalent // internalized string for faster further lookups. - set(index, key); + set(index, *key); return entry; } - ASSERT(element->IsTheHole() || !Name::cast(element)->Equals(key)); + DCHECK(element->IsTheHole() || !Name::cast(element)->Equals(*key)); entry = NextProbe(entry, count++, capacity); } return kNotFound; } -template<typename Shape, typename Key> -MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) { - ASSERT(NumberOfElements() < new_table->Capacity()); +template<typename Derived, typename Shape, typename Key> +void HashTable<Derived, Shape, Key>::Rehash( + Handle<Derived> new_table, + Key key) { + DCHECK(NumberOfElements() < new_table->Capacity()); DisallowHeapAllocation no_gc; WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc); @@ -13954,7 +14375,7 @@ uint32_t from_index = EntryToIndex(i); Object* k = get(from_index); if (IsKey(k)) { - uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k); + uint32_t hash = HashTable::HashForObject(key, k); uint32_t insertion_index = EntryToIndex(new_table->FindInsertionEntry(hash)); for (int j = 0; j < Shape::kEntrySize; j++) { @@ -13964,16 +14385,16 @@ } new_table->SetNumberOfElements(NumberOfElements()); new_table->SetNumberOfDeletedElements(0); - return new_table; } -template<typename Shape, typename Key> -uint32_t HashTable<Shape, Key>::EntryForProbe(Key key, - Object* k, - int probe, - uint32_t expected) { - uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k); +template<typename Derived, typename Shape, typename Key> +uint32_t HashTable<Derived, Shape, Key>::EntryForProbe( + Key key, + Object* k, + int probe, + uint32_t expected) { + uint32_t hash = HashTable::HashForObject(key, k); uint32_t capacity = Capacity(); uint32_t entry = FirstProbe(hash, capacity); for (int i = 1; i < probe; i++) { @@ -13984,10 +14405,10 @@ } -template<typename Shape, typename Key> -void HashTable<Shape, Key>::Swap(uint32_t entry1, - uint32_t entry2, - WriteBarrierMode mode) { +template<typename Derived, typename Shape, typename Key> +void HashTable<Derived, Shape, Key>::Swap(uint32_t entry1, + uint32_t entry2, + WriteBarrierMode mode) { int index1 = EntryToIndex(entry1); int index2 = EntryToIndex(entry2); Object* temp[Shape::kEntrySize]; @@ -14003,8 +14424,8 @@ } -template<typename Shape, typename Key> -void HashTable<Shape, Key>::Rehash(Key key) { +template<typename Derived, typename Shape, typename Key> +void HashTable<Derived, Shape, Key>::Rehash(Key key) { DisallowHeapAllocation no_gc; WriteBarrierMode mode = GetWriteBarrierMode(no_gc); uint32_t capacity = Capacity(); @@ -14036,71 +14457,73 @@ } -template<typename Shape, typename Key> -MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, - Key key, - PretenureFlag pretenure) { - int capacity = Capacity(); - int nof = NumberOfElements() + n; - int nod = NumberOfDeletedElements(); +template<typename Derived, typename Shape, typename Key> +Handle<Derived> HashTable<Derived, Shape, Key>::EnsureCapacity( + Handle<Derived> table, + int n, + Key key, + PretenureFlag pretenure) { + Isolate* isolate = table->GetIsolate(); + int capacity = table->Capacity(); + int nof = table->NumberOfElements() + n; + int nod = table->NumberOfDeletedElements(); // Return if: // 50% is still free after adding n elements and // at most 50% of the free elements are deleted elements. if (nod <= (capacity - nof) >> 1) { int needed_free = nof >> 1; - if (nof + needed_free <= capacity) return this; + if (nof + needed_free <= capacity) return table; } const int kMinCapacityForPretenure = 256; bool should_pretenure = pretenure == TENURED || - ((capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this)); - Object* obj; - { MaybeObject* maybe_obj = - Allocate(GetHeap(), - nof * 2, - USE_DEFAULT_MINIMUM_CAPACITY, - should_pretenure ? TENURED : NOT_TENURED); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + ((capacity > kMinCapacityForPretenure) && + !isolate->heap()->InNewSpace(*table)); + Handle<Derived> new_table = HashTable::New( + isolate, + nof * 2, + USE_DEFAULT_MINIMUM_CAPACITY, + should_pretenure ? TENURED : NOT_TENURED); - return Rehash(HashTable::cast(obj), key); + table->Rehash(new_table, key); + return new_table; } -template<typename Shape, typename Key> -MaybeObject* HashTable<Shape, Key>::Shrink(Key key) { - int capacity = Capacity(); - int nof = NumberOfElements(); +template<typename Derived, typename Shape, typename Key> +Handle<Derived> HashTable<Derived, Shape, Key>::Shrink(Handle<Derived> table, + Key key) { + int capacity = table->Capacity(); + int nof = table->NumberOfElements(); // Shrink to fit the number of elements if only a quarter of the // capacity is filled with elements. - if (nof > (capacity >> 2)) return this; + if (nof > (capacity >> 2)) return table; // Allocate a new dictionary with room for at least the current // number of elements. The allocation method will make sure that // there is extra room in the dictionary for additions. Don't go // lower than room for 16 elements. int at_least_room_for = nof; - if (at_least_room_for < 16) return this; + if (at_least_room_for < 16) return table; + Isolate* isolate = table->GetIsolate(); const int kMinCapacityForPretenure = 256; bool pretenure = (at_least_room_for > kMinCapacityForPretenure) && - !GetHeap()->InNewSpace(this); - Object* obj; - { MaybeObject* maybe_obj = - Allocate(GetHeap(), - at_least_room_for, - USE_DEFAULT_MINIMUM_CAPACITY, - pretenure ? TENURED : NOT_TENURED); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + !isolate->heap()->InNewSpace(*table); + Handle<Derived> new_table = HashTable::New( + isolate, + at_least_room_for, + USE_DEFAULT_MINIMUM_CAPACITY, + pretenure ? TENURED : NOT_TENURED); - return Rehash(HashTable::cast(obj), key); + table->Rehash(new_table, key); + return new_table; } -template<typename Shape, typename Key> -uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) { +template<typename Derived, typename Shape, typename Key> +uint32_t HashTable<Derived, Shape, Key>::FindInsertionEntry(uint32_t hash) { uint32_t capacity = Capacity(); uint32_t entry = FirstProbe(hash, capacity); uint32_t count = 1; @@ -14117,221 +14540,234 @@ // Force instantiation of template instances class. // Please note this list is compiler dependent. -template class HashTable<StringTableShape, HashTableKey*>; +template class HashTable<StringTable, StringTableShape, HashTableKey*>; -template class HashTable<CompilationCacheShape, HashTableKey*>; +template class HashTable<CompilationCacheTable, + CompilationCacheShape, + HashTableKey*>; -template class HashTable<MapCacheShape, HashTableKey*>; +template class HashTable<MapCache, MapCacheShape, HashTableKey*>; -template class HashTable<ObjectHashTableShape<1>, Object*>; +template class HashTable<ObjectHashTable, + ObjectHashTableShape, + Handle<Object> >; -template class HashTable<ObjectHashTableShape<2>, Object*>; +template class HashTable<WeakHashTable, WeakHashTableShape<2>, Handle<Object> >; -template class HashTable<WeakHashTableShape<2>, Object*>; +template class Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >; -template class Dictionary<NameDictionaryShape, Name*>; +template class Dictionary<SeededNumberDictionary, + SeededNumberDictionaryShape, + uint32_t>; -template class Dictionary<SeededNumberDictionaryShape, uint32_t>; +template class Dictionary<UnseededNumberDictionary, + UnseededNumberDictionaryShape, + uint32_t>; -template class Dictionary<UnseededNumberDictionaryShape, uint32_t>; +template Handle<SeededNumberDictionary> +Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: + New(Isolate*, int at_least_space_for, PretenureFlag pretenure); -template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>:: - Allocate(Heap* heap, int at_least_space_for, PretenureFlag pretenure); +template Handle<UnseededNumberDictionary> +Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>:: + New(Isolate*, int at_least_space_for, PretenureFlag pretenure); -template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>:: - Allocate(Heap* heap, int at_least_space_for, PretenureFlag pretenure); +template Handle<NameDictionary> +Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >:: + New(Isolate*, int n, PretenureFlag pretenure); -template MaybeObject* Dictionary<NameDictionaryShape, Name*>:: - Allocate(Heap* heap, int n, PretenureFlag pretenure); +template Handle<SeededNumberDictionary> +Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: + AtPut(Handle<SeededNumberDictionary>, uint32_t, Handle<Object>); -template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::AtPut( - uint32_t, Object*); +template Handle<UnseededNumberDictionary> +Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>:: + AtPut(Handle<UnseededNumberDictionary>, uint32_t, Handle<Object>); -template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>:: - AtPut(uint32_t, Object*); - -template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>:: +template Object* +Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: SlowReverseLookup(Object* value); -template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>:: +template Object* +Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >:: SlowReverseLookup(Object* value); -template Object* Dictionary<NameDictionaryShape, Name*>::SlowReverseLookup( - Object*); - -template void Dictionary<SeededNumberDictionaryShape, uint32_t>::CopyKeysTo( - FixedArray*, - PropertyAttributes, - Dictionary<SeededNumberDictionaryShape, uint32_t>::SortMode); - -template Object* Dictionary<NameDictionaryShape, Name*>::DeleteProperty( - int, JSObject::DeleteMode); - -template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>:: - DeleteProperty(int, JSObject::DeleteMode); - -template MaybeObject* Dictionary<NameDictionaryShape, Name*>::Shrink(Name* n); - -template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Shrink( - uint32_t); - -template void Dictionary<NameDictionaryShape, Name*>::CopyKeysTo( - FixedArray*, - int, - PropertyAttributes, - Dictionary<NameDictionaryShape, Name*>::SortMode); +template void +Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: + CopyKeysTo( + FixedArray*, + PropertyAttributes, + Dictionary<SeededNumberDictionary, + SeededNumberDictionaryShape, + uint32_t>::SortMode); + +template Handle<Object> +Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::DeleteProperty( + Handle<NameDictionary>, int, JSObject::DeleteMode); + +template Handle<Object> +Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: + DeleteProperty(Handle<SeededNumberDictionary>, int, JSObject::DeleteMode); + +template Handle<NameDictionary> +HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >:: + New(Isolate*, int, MinimumCapacity, PretenureFlag); + +template Handle<NameDictionary> +HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >:: + Shrink(Handle<NameDictionary>, Handle<Name>); + +template Handle<SeededNumberDictionary> +HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: + Shrink(Handle<SeededNumberDictionary>, uint32_t); + +template void Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >:: + CopyKeysTo( + FixedArray*, + int, + PropertyAttributes, + Dictionary< + NameDictionary, NameDictionaryShape, Handle<Name> >::SortMode); template int -Dictionary<NameDictionaryShape, Name*>::NumberOfElementsFilterAttributes( - PropertyAttributes); - -template MaybeObject* Dictionary<NameDictionaryShape, Name*>::Add( - Name*, Object*, PropertyDetails); +Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >:: + NumberOfElementsFilterAttributes(PropertyAttributes); -template MaybeObject* -Dictionary<NameDictionaryShape, Name*>::GenerateNewEnumerationIndices(); +template Handle<NameDictionary> +Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::Add( + Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails); + +template void +Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >:: + GenerateNewEnumerationIndices(Handle<NameDictionary>); template int -Dictionary<SeededNumberDictionaryShape, uint32_t>:: +Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: NumberOfElementsFilterAttributes(PropertyAttributes); -template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Add( - uint32_t, Object*, PropertyDetails); - -template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::Add( - uint32_t, Object*, PropertyDetails); - -template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>:: - EnsureCapacity(int, uint32_t); - -template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>:: - EnsureCapacity(int, uint32_t); - -template MaybeObject* Dictionary<NameDictionaryShape, Name*>:: - EnsureCapacity(int, Name*); - -template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>:: - AddEntry(uint32_t, Object*, PropertyDetails, uint32_t); - -template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>:: - AddEntry(uint32_t, Object*, PropertyDetails, uint32_t); - -template MaybeObject* Dictionary<NameDictionaryShape, Name*>::AddEntry( - Name*, Object*, PropertyDetails, uint32_t); +template Handle<SeededNumberDictionary> +Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: + Add(Handle<SeededNumberDictionary>, + uint32_t, + Handle<Object>, + PropertyDetails); + +template Handle<UnseededNumberDictionary> +Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>:: + Add(Handle<UnseededNumberDictionary>, + uint32_t, + Handle<Object>, + PropertyDetails); + +template Handle<SeededNumberDictionary> +Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: + EnsureCapacity(Handle<SeededNumberDictionary>, int, uint32_t); + +template Handle<UnseededNumberDictionary> +Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>:: + EnsureCapacity(Handle<UnseededNumberDictionary>, int, uint32_t); + +template Handle<NameDictionary> +Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >:: + EnsureCapacity(Handle<NameDictionary>, int, Handle<Name>); template -int Dictionary<SeededNumberDictionaryShape, uint32_t>::NumberOfEnumElements(); +int Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: + NumberOfEnumElements(); template -int Dictionary<NameDictionaryShape, Name*>::NumberOfEnumElements(); +int Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >:: + NumberOfEnumElements(); template -int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t); +int HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>:: + FindEntry(uint32_t); Handle<Object> JSObject::PrepareSlowElementsForSort( Handle<JSObject> object, uint32_t limit) { - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->PrepareSlowElementsForSort(limit), - Object); -} - - -// Collates undefined and unexisting elements below limit from position -// zero of the elements. The object stays in Dictionary mode. -MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) { - ASSERT(HasDictionaryElements()); + DCHECK(object->HasDictionaryElements()); + Isolate* isolate = object->GetIsolate(); // Must stay in dictionary mode, either because of requires_slow_elements, // or because we are not going to sort (and therefore compact) all of the // elements. - SeededNumberDictionary* dict = element_dictionary(); - HeapNumber* result_double = NULL; - if (limit > static_cast<uint32_t>(Smi::kMaxValue)) { - // Allocate space for result before we start mutating the object. - Object* new_double; - { MaybeObject* maybe_new_double = GetHeap()->AllocateHeapNumber(0.0); - if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double; - } - result_double = HeapNumber::cast(new_double); - } - - Object* obj; - { MaybeObject* maybe_obj = - SeededNumberDictionary::Allocate(GetHeap(), dict->NumberOfElements()); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - SeededNumberDictionary* new_dict = SeededNumberDictionary::cast(obj); - - DisallowHeapAllocation no_alloc; + Handle<SeededNumberDictionary> dict(object->element_dictionary(), isolate); + Handle<SeededNumberDictionary> new_dict = + SeededNumberDictionary::New(isolate, dict->NumberOfElements()); uint32_t pos = 0; uint32_t undefs = 0; int capacity = dict->Capacity(); + Handle<Smi> bailout(Smi::FromInt(-1), isolate); + // Entry to the new dictionary does not cause it to grow, as we have + // allocated one that is large enough for all entries. + DisallowHeapAllocation no_gc; for (int i = 0; i < capacity; i++) { Object* k = dict->KeyAt(i); - if (dict->IsKey(k)) { - ASSERT(k->IsNumber()); - ASSERT(!k->IsSmi() || Smi::cast(k)->value() >= 0); - ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0); - ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32); - Object* value = dict->ValueAt(i); - PropertyDetails details = dict->DetailsAt(i); - if (details.type() == CALLBACKS || details.IsReadOnly()) { - // Bail out and do the sorting of undefineds and array holes in JS. - // Also bail out if the element is not supposed to be moved. - return Smi::FromInt(-1); - } - uint32_t key = NumberToUint32(k); - // In the following we assert that adding the entry to the new dictionary - // does not cause GC. This is the case because we made sure to allocate - // the dictionary big enough above, so it need not grow. - if (key < limit) { - if (value->IsUndefined()) { - undefs++; - } else { - if (pos > static_cast<uint32_t>(Smi::kMaxValue)) { - // Adding an entry with the key beyond smi-range requires - // allocation. Bailout. - return Smi::FromInt(-1); - } - new_dict->AddNumberEntry(pos, value, details)->ToObjectUnchecked(); - pos++; - } + if (!dict->IsKey(k)) continue; + + DCHECK(k->IsNumber()); + DCHECK(!k->IsSmi() || Smi::cast(k)->value() >= 0); + DCHECK(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0); + DCHECK(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32); + + HandleScope scope(isolate); + Handle<Object> value(dict->ValueAt(i), isolate); + PropertyDetails details = dict->DetailsAt(i); + if (details.type() == CALLBACKS || details.IsReadOnly()) { + // Bail out and do the sorting of undefineds and array holes in JS. + // Also bail out if the element is not supposed to be moved. + return bailout; + } + + uint32_t key = NumberToUint32(k); + if (key < limit) { + if (value->IsUndefined()) { + undefs++; + } else if (pos > static_cast<uint32_t>(Smi::kMaxValue)) { + // Adding an entry with the key beyond smi-range requires + // allocation. Bailout. + return bailout; } else { - if (key > static_cast<uint32_t>(Smi::kMaxValue)) { - // Adding an entry with the key beyond smi-range requires - // allocation. Bailout. - return Smi::FromInt(-1); - } - new_dict->AddNumberEntry(key, value, details)->ToObjectUnchecked(); + Handle<Object> result = SeededNumberDictionary::AddNumberEntry( + new_dict, pos, value, details); + DCHECK(result.is_identical_to(new_dict)); + USE(result); + pos++; } + } else if (key > static_cast<uint32_t>(Smi::kMaxValue)) { + // Adding an entry with the key beyond smi-range requires + // allocation. Bailout. + return bailout; + } else { + Handle<Object> result = SeededNumberDictionary::AddNumberEntry( + new_dict, key, value, details); + DCHECK(result.is_identical_to(new_dict)); + USE(result); } } uint32_t result = pos; PropertyDetails no_details = PropertyDetails(NONE, NORMAL, 0); - Heap* heap = GetHeap(); while (undefs > 0) { if (pos > static_cast<uint32_t>(Smi::kMaxValue)) { // Adding an entry with the key beyond smi-range requires // allocation. Bailout. - return Smi::FromInt(-1); + return bailout; } - new_dict->AddNumberEntry(pos, heap->undefined_value(), no_details)-> - ToObjectUnchecked(); + HandleScope scope(isolate); + Handle<Object> result = SeededNumberDictionary::AddNumberEntry( + new_dict, pos, isolate->factory()->undefined_value(), no_details); + DCHECK(result.is_identical_to(new_dict)); + USE(result); pos++; undefs--; } - set_elements(new_dict); - - if (result <= static_cast<uint32_t>(Smi::kMaxValue)) { - return Smi::FromInt(static_cast<int>(result)); - } + object->set_elements(*new_dict); - ASSERT_NE(NULL, result_double); - result_double->set_value(static_cast<double>(result)); - return result_double; + AllowHeapAllocation allocate_return_value; + return isolate->factory()->NewNumberFromUint(result); } @@ -14365,9 +14801,9 @@ Handle<FixedArray> fast_elements = isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure); dict->CopyValuesTo(*fast_elements); - object->ValidateElements(); + JSObject::ValidateElements(object); - object->set_map_and_elements(*new_map, *fast_elements); + JSObject::SetMapAndElements(object, new_map, fast_elements); } else if (object->HasExternalArrayElements() || object->HasFixedTypedArrayElements()) { // Typed arrays cannot have holes or undefined elements. @@ -14376,7 +14812,7 @@ } else if (!object->HasFastDoubleElements()) { EnsureWritableFastElements(object); } - ASSERT(object->HasFastSmiOrObjectElements() || + DCHECK(object->HasFastSmiOrObjectElements() || object->HasFastDoubleElements()); // Collect holes at the end, undefined before that and the rest at the @@ -14502,11 +14938,14 @@ } -Object* ExternalUint8ClampedArray::SetValue(uint32_t index, Object* value) { +Handle<Object> ExternalUint8ClampedArray::SetValue( + Handle<ExternalUint8ClampedArray> array, + uint32_t index, + Handle<Object> value) { uint8_t clamped_value = 0; - if (index < static_cast<uint32_t>(length())) { + if (index < static_cast<uint32_t>(array->length())) { if (value->IsSmi()) { - int int_value = Smi::cast(value)->value(); + int int_value = Handle<Smi>::cast(value)->value(); if (int_value < 0) { clamped_value = 0; } else if (int_value > 255) { @@ -14515,7 +14954,7 @@ clamped_value = static_cast<uint8_t>(int_value); } } else if (value->IsHeapNumber()) { - double double_value = HeapNumber::cast(value)->value(); + double double_value = Handle<HeapNumber>::cast(value)->value(); if (!(double_value > 0)) { // NaN and less than zero clamp to zero. clamped_value = 0; @@ -14529,124 +14968,76 @@ } else { // Clamp undefined to zero (default). All other types have been // converted to a number type further up in the call chain. - ASSERT(value->IsUndefined()); + DCHECK(value->IsUndefined()); } - set(index, clamped_value); + array->set(index, clamped_value); } - return Smi::FromInt(clamped_value); + return handle(Smi::FromInt(clamped_value), array->GetIsolate()); } -Handle<Object> ExternalUint8ClampedArray::SetValue( - Handle<ExternalUint8ClampedArray> array, +template<typename ExternalArrayClass, typename ValueType> +static Handle<Object> ExternalArrayIntSetter( + Isolate* isolate, + Handle<ExternalArrayClass> receiver, uint32_t index, Handle<Object> value) { - return Handle<Object>(array->SetValue(index, *value), array->GetIsolate()); -} - - -template<typename ExternalArrayClass, typename ValueType> -static MaybeObject* ExternalArrayIntSetter(Heap* heap, - ExternalArrayClass* receiver, - uint32_t index, - Object* value) { ValueType cast_value = 0; if (index < static_cast<uint32_t>(receiver->length())) { if (value->IsSmi()) { - int int_value = Smi::cast(value)->value(); + int int_value = Handle<Smi>::cast(value)->value(); cast_value = static_cast<ValueType>(int_value); } else if (value->IsHeapNumber()) { - double double_value = HeapNumber::cast(value)->value(); + double double_value = Handle<HeapNumber>::cast(value)->value(); cast_value = static_cast<ValueType>(DoubleToInt32(double_value)); } else { // Clamp undefined to zero (default). All other types have been // converted to a number type further up in the call chain. - ASSERT(value->IsUndefined()); + DCHECK(value->IsUndefined()); } receiver->set(index, cast_value); } - return heap->NumberFromInt32(cast_value); + return isolate->factory()->NewNumberFromInt(cast_value); } Handle<Object> ExternalInt8Array::SetValue(Handle<ExternalInt8Array> array, uint32_t index, Handle<Object> value) { - CALL_HEAP_FUNCTION(array->GetIsolate(), - array->SetValue(index, *value), - Object); -} - - -MaybeObject* ExternalInt8Array::SetValue(uint32_t index, Object* value) { - return ExternalArrayIntSetter<ExternalInt8Array, int8_t> - (GetHeap(), this, index, value); -} - - -Handle<Object> ExternalUint8Array::SetValue( - Handle<ExternalUint8Array> array, - uint32_t index, - Handle<Object> value) { - CALL_HEAP_FUNCTION(array->GetIsolate(), - array->SetValue(index, *value), - Object); -} - - -MaybeObject* ExternalUint8Array::SetValue(uint32_t index, - Object* value) { - return ExternalArrayIntSetter<ExternalUint8Array, uint8_t> - (GetHeap(), this, index, value); -} - - -Handle<Object> ExternalInt16Array::SetValue( - Handle<ExternalInt16Array> array, - uint32_t index, - Handle<Object> value) { - CALL_HEAP_FUNCTION(array->GetIsolate(), - array->SetValue(index, *value), - Object); + return ExternalArrayIntSetter<ExternalInt8Array, int8_t>( + array->GetIsolate(), array, index, value); } -MaybeObject* ExternalInt16Array::SetValue(uint32_t index, - Object* value) { - return ExternalArrayIntSetter<ExternalInt16Array, int16_t> - (GetHeap(), this, index, value); +Handle<Object> ExternalUint8Array::SetValue(Handle<ExternalUint8Array> array, + uint32_t index, + Handle<Object> value) { + return ExternalArrayIntSetter<ExternalUint8Array, uint8_t>( + array->GetIsolate(), array, index, value); } -Handle<Object> ExternalUint16Array::SetValue( - Handle<ExternalUint16Array> array, - uint32_t index, - Handle<Object> value) { - CALL_HEAP_FUNCTION(array->GetIsolate(), - array->SetValue(index, *value), - Object); +Handle<Object> ExternalInt16Array::SetValue(Handle<ExternalInt16Array> array, + uint32_t index, + Handle<Object> value) { + return ExternalArrayIntSetter<ExternalInt16Array, int16_t>( + array->GetIsolate(), array, index, value); } -MaybeObject* ExternalUint16Array::SetValue(uint32_t index, - Object* value) { - return ExternalArrayIntSetter<ExternalUint16Array, uint16_t> - (GetHeap(), this, index, value); +Handle<Object> ExternalUint16Array::SetValue(Handle<ExternalUint16Array> array, + uint32_t index, + Handle<Object> value) { + return ExternalArrayIntSetter<ExternalUint16Array, uint16_t>( + array->GetIsolate(), array, index, value); } Handle<Object> ExternalInt32Array::SetValue(Handle<ExternalInt32Array> array, - uint32_t index, - Handle<Object> value) { - CALL_HEAP_FUNCTION(array->GetIsolate(), - array->SetValue(index, *value), - Object); -} - - -MaybeObject* ExternalInt32Array::SetValue(uint32_t index, Object* value) { - return ExternalArrayIntSetter<ExternalInt32Array, int32_t> - (GetHeap(), this, index, value); + uint32_t index, + Handle<Object> value) { + return ExternalArrayIntSetter<ExternalInt32Array, int32_t>( + array->GetIsolate(), array, index, value); } @@ -14654,30 +15045,22 @@ Handle<ExternalUint32Array> array, uint32_t index, Handle<Object> value) { - CALL_HEAP_FUNCTION(array->GetIsolate(), - array->SetValue(index, *value), - Object); -} - - -MaybeObject* ExternalUint32Array::SetValue(uint32_t index, Object* value) { uint32_t cast_value = 0; - Heap* heap = GetHeap(); - if (index < static_cast<uint32_t>(length())) { + if (index < static_cast<uint32_t>(array->length())) { if (value->IsSmi()) { - int int_value = Smi::cast(value)->value(); + int int_value = Handle<Smi>::cast(value)->value(); cast_value = static_cast<uint32_t>(int_value); } else if (value->IsHeapNumber()) { - double double_value = HeapNumber::cast(value)->value(); + double double_value = Handle<HeapNumber>::cast(value)->value(); cast_value = static_cast<uint32_t>(DoubleToUint32(double_value)); } else { // Clamp undefined to zero (default). All other types have been // converted to a number type further up in the call chain. - ASSERT(value->IsUndefined()); + DCHECK(value->IsUndefined()); } - set(index, cast_value); + array->set(index, cast_value); } - return heap->NumberFromUint32(cast_value); + return array->GetIsolate()->factory()->NewNumberFromUint(cast_value); } @@ -14685,30 +15068,22 @@ Handle<ExternalFloat32Array> array, uint32_t index, Handle<Object> value) { - CALL_HEAP_FUNCTION(array->GetIsolate(), - array->SetValue(index, *value), - Object); -} - - -MaybeObject* ExternalFloat32Array::SetValue(uint32_t index, Object* value) { - float cast_value = static_cast<float>(OS::nan_value()); - Heap* heap = GetHeap(); - if (index < static_cast<uint32_t>(length())) { + float cast_value = static_cast<float>(base::OS::nan_value()); + if (index < static_cast<uint32_t>(array->length())) { if (value->IsSmi()) { - int int_value = Smi::cast(value)->value(); + int int_value = Handle<Smi>::cast(value)->value(); cast_value = static_cast<float>(int_value); } else if (value->IsHeapNumber()) { - double double_value = HeapNumber::cast(value)->value(); + double double_value = Handle<HeapNumber>::cast(value)->value(); cast_value = static_cast<float>(double_value); } else { // Clamp undefined to NaN (default). All other types have been // converted to a number type further up in the call chain. - ASSERT(value->IsUndefined()); + DCHECK(value->IsUndefined()); } - set(index, cast_value); + array->set(index, cast_value); } - return heap->AllocateHeapNumber(cast_value); + return array->GetIsolate()->factory()->NewNumber(cast_value); } @@ -14716,34 +15091,23 @@ Handle<ExternalFloat64Array> array, uint32_t index, Handle<Object> value) { - CALL_HEAP_FUNCTION(array->GetIsolate(), - array->SetValue(index, *value), - Object); -} - - -MaybeObject* ExternalFloat64Array::SetValue(uint32_t index, Object* value) { - double double_value = OS::nan_value(); - Heap* heap = GetHeap(); - if (index < static_cast<uint32_t>(length())) { - if (value->IsSmi()) { - int int_value = Smi::cast(value)->value(); - double_value = static_cast<double>(int_value); - } else if (value->IsHeapNumber()) { - double_value = HeapNumber::cast(value)->value(); + double double_value = base::OS::nan_value(); + if (index < static_cast<uint32_t>(array->length())) { + if (value->IsNumber()) { + double_value = value->Number(); } else { // Clamp undefined to NaN (default). All other types have been // converted to a number type further up in the call chain. - ASSERT(value->IsUndefined()); + DCHECK(value->IsUndefined()); } - set(index, double_value); + array->set(index, double_value); } - return heap->AllocateHeapNumber(double_value); + return array->GetIsolate()->factory()->NewNumber(double_value); } PropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) { - ASSERT(!HasFastProperties()); + DCHECK(!HasFastProperties()); Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry()); return PropertyCell::cast(value); } @@ -14752,32 +15116,26 @@ Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell( Handle<JSGlobalObject> global, Handle<Name> name) { - ASSERT(!global->HasFastProperties()); - int entry = global->property_dictionary()->FindEntry(*name); + DCHECK(!global->HasFastProperties()); + int entry = global->property_dictionary()->FindEntry(name); if (entry == NameDictionary::kNotFound) { Isolate* isolate = global->GetIsolate(); Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell( isolate->factory()->the_hole_value()); PropertyDetails details(NONE, NORMAL, 0); details = details.AsDeleted(); - Handle<NameDictionary> dictionary = NameDictionaryAdd( + Handle<NameDictionary> dictionary = NameDictionary::Add( handle(global->property_dictionary()), name, cell, details); global->set_properties(*dictionary); return cell; } else { Object* value = global->property_dictionary()->ValueAt(entry); - ASSERT(value->IsPropertyCell()); + DCHECK(value->IsPropertyCell()); return handle(PropertyCell::cast(value)); } } -MaybeObject* StringTable::LookupString(String* string, Object** s) { - InternalizedStringKey key(string); - return LookupKey(&key, s); -} - - // This class is used for looking up two character strings in the string table. // If we don't have a hit we don't want to waste much time so we unroll the // string hash calculation loop here for speed. Doesn't work if the two @@ -14810,11 +15168,11 @@ uint16_t chars[2] = {c1, c2}; uint32_t check_hash = StringHasher::HashSequentialString(chars, 2, seed); hash = (hash << String::kHashShift) | String::kIsNotArrayIndexMask; - ASSERT_EQ(static_cast<int32_t>(hash), static_cast<int32_t>(check_hash)); + DCHECK_EQ(static_cast<int32_t>(hash), static_cast<int32_t>(check_hash)); #endif } - bool IsMatch(Object* o) { + bool IsMatch(Object* o) V8_OVERRIDE { if (!o->IsString()) return false; String* other = String::cast(o); if (other->length() != 2) return false; @@ -14822,17 +15180,17 @@ return other->Get(1) == c2_; } - uint32_t Hash() { return hash_; } - uint32_t HashForObject(Object* key) { + uint32_t Hash() V8_OVERRIDE { return hash_; } + uint32_t HashForObject(Object* key) V8_OVERRIDE { if (!key->IsString()) return 0; return String::cast(key)->Hash(); } - Object* AsObject(Heap* heap) { + Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { // The TwoCharHashTableKey is only used for looking in the string // table, not for adding to it. UNREACHABLE(); - return NULL; + return MaybeHandle<Object>().ToHandleChecked(); } private: @@ -14842,179 +15200,171 @@ }; -bool StringTable::LookupStringIfExists(String* string, String** result) { +MaybeHandle<String> StringTable::InternalizeStringIfExists( + Isolate* isolate, + Handle<String> string) { + if (string->IsInternalizedString()) { + return string; + } + return LookupStringIfExists(isolate, string); +} + + +MaybeHandle<String> StringTable::LookupStringIfExists( + Isolate* isolate, + Handle<String> string) { + Handle<StringTable> string_table = isolate->factory()->string_table(); InternalizedStringKey key(string); - int entry = FindEntry(&key); + int entry = string_table->FindEntry(&key); if (entry == kNotFound) { - return false; + return MaybeHandle<String>(); } else { - *result = String::cast(KeyAt(entry)); - ASSERT(StringShape(*result).IsInternalized()); - return true; + Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate); + DCHECK(StringShape(*result).IsInternalized()); + return result; } } -bool StringTable::LookupTwoCharsStringIfExists(uint16_t c1, - uint16_t c2, - String** result) { - TwoCharHashTableKey key(c1, c2, GetHeap()->HashSeed()); - int entry = FindEntry(&key); +MaybeHandle<String> StringTable::LookupTwoCharsStringIfExists( + Isolate* isolate, + uint16_t c1, + uint16_t c2) { + Handle<StringTable> string_table = isolate->factory()->string_table(); + TwoCharHashTableKey key(c1, c2, isolate->heap()->HashSeed()); + int entry = string_table->FindEntry(&key); if (entry == kNotFound) { - return false; + return MaybeHandle<String>(); } else { - *result = String::cast(KeyAt(entry)); - ASSERT(StringShape(*result).IsInternalized()); - return true; + Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate); + DCHECK(StringShape(*result).IsInternalized()); + return result; } } -MaybeObject* StringTable::LookupKey(HashTableKey* key, Object** s) { - int entry = FindEntry(key); +Handle<String> StringTable::LookupString(Isolate* isolate, + Handle<String> string) { + InternalizedStringKey key(string); + return LookupKey(isolate, &key); +} + + +Handle<String> StringTable::LookupKey(Isolate* isolate, HashTableKey* key) { + Handle<StringTable> table = isolate->factory()->string_table(); + int entry = table->FindEntry(key); // String already in table. if (entry != kNotFound) { - *s = KeyAt(entry); - return this; + return handle(String::cast(table->KeyAt(entry)), isolate); } // Adding new string. Grow table if needed. - Object* obj; - { MaybeObject* maybe_obj = EnsureCapacity(1, key); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + table = StringTable::EnsureCapacity(table, 1, key); // Create string object. - Object* string; - { MaybeObject* maybe_string = key->AsObject(GetHeap()); - if (!maybe_string->ToObject(&string)) return maybe_string; - } - - // If the string table grew as part of EnsureCapacity, obj is not - // the current string table and therefore we cannot use - // StringTable::cast here. - StringTable* table = reinterpret_cast<StringTable*>(obj); + Handle<Object> string = key->AsHandle(isolate); + // There must be no attempts to internalize strings that could throw + // InvalidStringLength error. + CHECK(!string.is_null()); // Add the new string and return it along with the string table. entry = table->FindInsertionEntry(key->Hash()); - table->set(EntryToIndex(entry), string); + table->set(EntryToIndex(entry), *string); table->ElementAdded(); - *s = string; - return table; + + isolate->factory()->set_string_table(table); + return Handle<String>::cast(string); } -Object* CompilationCacheTable::Lookup(String* src, Context* context) { - SharedFunctionInfo* shared = context->closure()->shared(); - StringSharedKey key(src, - shared, - FLAG_use_strict ? STRICT : SLOPPY, +Handle<Object> CompilationCacheTable::Lookup(Handle<String> src, + Handle<Context> context) { + Isolate* isolate = GetIsolate(); + Handle<SharedFunctionInfo> shared(context->closure()->shared()); + StringSharedKey key(src, shared, FLAG_use_strict ? STRICT : SLOPPY, RelocInfo::kNoPosition); int entry = FindEntry(&key); - if (entry == kNotFound) return GetHeap()->undefined_value(); - return get(EntryToIndex(entry) + 1); + if (entry == kNotFound) return isolate->factory()->undefined_value(); + return Handle<Object>(get(EntryToIndex(entry) + 1), isolate); } -Object* CompilationCacheTable::LookupEval(String* src, - Context* context, - StrictMode strict_mode, - int scope_position) { - StringSharedKey key(src, - context->closure()->shared(), - strict_mode, - scope_position); +Handle<Object> CompilationCacheTable::LookupEval(Handle<String> src, + Handle<Context> context, + StrictMode strict_mode, + int scope_position) { + Isolate* isolate = GetIsolate(); + Handle<SharedFunctionInfo> shared(context->closure()->shared()); + StringSharedKey key(src, shared, strict_mode, scope_position); int entry = FindEntry(&key); - if (entry == kNotFound) return GetHeap()->undefined_value(); - return get(EntryToIndex(entry) + 1); + if (entry == kNotFound) return isolate->factory()->undefined_value(); + return Handle<Object>(get(EntryToIndex(entry) + 1), isolate); } -Object* CompilationCacheTable::LookupRegExp(String* src, - JSRegExp::Flags flags) { +Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src, + JSRegExp::Flags flags) { + Isolate* isolate = GetIsolate(); + DisallowHeapAllocation no_allocation; RegExpKey key(src, flags); int entry = FindEntry(&key); - if (entry == kNotFound) return GetHeap()->undefined_value(); - return get(EntryToIndex(entry) + 1); + if (entry == kNotFound) return isolate->factory()->undefined_value(); + return Handle<Object>(get(EntryToIndex(entry) + 1), isolate); } -MaybeObject* CompilationCacheTable::Put(String* src, - Context* context, - Object* value) { - SharedFunctionInfo* shared = context->closure()->shared(); - StringSharedKey key(src, - shared, - FLAG_use_strict ? STRICT : SLOPPY, +Handle<CompilationCacheTable> CompilationCacheTable::Put( + Handle<CompilationCacheTable> cache, Handle<String> src, + Handle<Context> context, Handle<Object> value) { + Isolate* isolate = cache->GetIsolate(); + Handle<SharedFunctionInfo> shared(context->closure()->shared()); + StringSharedKey key(src, shared, FLAG_use_strict ? STRICT : SLOPPY, RelocInfo::kNoPosition); - CompilationCacheTable* cache; - MaybeObject* maybe_cache = EnsureCapacity(1, &key); - if (!maybe_cache->To(&cache)) return maybe_cache; - - Object* k; - MaybeObject* maybe_k = key.AsObject(GetHeap()); - if (!maybe_k->To(&k)) return maybe_k; - + cache = EnsureCapacity(cache, 1, &key); + Handle<Object> k = key.AsHandle(isolate); int entry = cache->FindInsertionEntry(key.Hash()); - cache->set(EntryToIndex(entry), k); - cache->set(EntryToIndex(entry) + 1, value); + cache->set(EntryToIndex(entry), *k); + cache->set(EntryToIndex(entry) + 1, *value); cache->ElementAdded(); return cache; } -MaybeObject* CompilationCacheTable::PutEval(String* src, - Context* context, - SharedFunctionInfo* value, - int scope_position) { - StringSharedKey key(src, - context->closure()->shared(), - value->strict_mode(), - scope_position); - Object* obj; - { MaybeObject* maybe_obj = EnsureCapacity(1, &key); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - - CompilationCacheTable* cache = - reinterpret_cast<CompilationCacheTable*>(obj); +Handle<CompilationCacheTable> CompilationCacheTable::PutEval( + Handle<CompilationCacheTable> cache, Handle<String> src, + Handle<Context> context, Handle<SharedFunctionInfo> value, + int scope_position) { + Isolate* isolate = cache->GetIsolate(); + Handle<SharedFunctionInfo> shared(context->closure()->shared()); + StringSharedKey key(src, shared, value->strict_mode(), scope_position); + cache = EnsureCapacity(cache, 1, &key); + Handle<Object> k = key.AsHandle(isolate); int entry = cache->FindInsertionEntry(key.Hash()); - - Object* k; - { MaybeObject* maybe_k = key.AsObject(GetHeap()); - if (!maybe_k->ToObject(&k)) return maybe_k; - } - - cache->set(EntryToIndex(entry), k); - cache->set(EntryToIndex(entry) + 1, value); + cache->set(EntryToIndex(entry), *k); + cache->set(EntryToIndex(entry) + 1, *value); cache->ElementAdded(); return cache; } -MaybeObject* CompilationCacheTable::PutRegExp(String* src, - JSRegExp::Flags flags, - FixedArray* value) { +Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp( + Handle<CompilationCacheTable> cache, Handle<String> src, + JSRegExp::Flags flags, Handle<FixedArray> value) { RegExpKey key(src, flags); - Object* obj; - { MaybeObject* maybe_obj = EnsureCapacity(1, &key); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - - CompilationCacheTable* cache = - reinterpret_cast<CompilationCacheTable*>(obj); + cache = EnsureCapacity(cache, 1, &key); int entry = cache->FindInsertionEntry(key.Hash()); // We store the value in the key slot, and compare the search key // to the stored value with a custon IsMatch function during lookups. - cache->set(EntryToIndex(entry), value); - cache->set(EntryToIndex(entry) + 1, value); + cache->set(EntryToIndex(entry), *value); + cache->set(EntryToIndex(entry) + 1, *value); cache->ElementAdded(); return cache; } void CompilationCacheTable::Remove(Object* value) { + DisallowHeapAllocation no_allocation; Object* the_hole_value = GetHeap()->the_hole_value(); for (int entry = 0, size = Capacity(); entry < size; entry++) { int entry_index = EntryToIndex(entry); @@ -15032,9 +15382,9 @@ // StringsKey used for HashTable where key is array of internalized strings. class StringsKey : public HashTableKey { public: - explicit StringsKey(FixedArray* strings) : strings_(strings) { } + explicit StringsKey(Handle<FixedArray> strings) : strings_(strings) { } - bool IsMatch(Object* strings) { + bool IsMatch(Object* strings) V8_OVERRIDE { FixedArray* o = FixedArray::cast(strings); int len = strings_->length(); if (o->length() != len) return false; @@ -15044,9 +15394,9 @@ return true; } - uint32_t Hash() { return HashForObject(strings_); } + uint32_t Hash() V8_OVERRIDE { return HashForObject(*strings_); } - uint32_t HashForObject(Object* obj) { + uint32_t HashForObject(Object* obj) V8_OVERRIDE { FixedArray* strings = FixedArray::cast(obj); int len = strings->length(); uint32_t hash = 0; @@ -15056,96 +15406,79 @@ return hash; } - Object* AsObject(Heap* heap) { return strings_; } + Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { return strings_; } private: - FixedArray* strings_; + Handle<FixedArray> strings_; }; Object* MapCache::Lookup(FixedArray* array) { - StringsKey key(array); + DisallowHeapAllocation no_alloc; + StringsKey key(handle(array)); int entry = FindEntry(&key); if (entry == kNotFound) return GetHeap()->undefined_value(); return get(EntryToIndex(entry) + 1); } -MaybeObject* MapCache::Put(FixedArray* array, Map* value) { +Handle<MapCache> MapCache::Put( + Handle<MapCache> map_cache, Handle<FixedArray> array, Handle<Map> value) { StringsKey key(array); - Object* obj; - { MaybeObject* maybe_obj = EnsureCapacity(1, &key); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - MapCache* cache = reinterpret_cast<MapCache*>(obj); - int entry = cache->FindInsertionEntry(key.Hash()); - cache->set(EntryToIndex(entry), array); - cache->set(EntryToIndex(entry) + 1, value); - cache->ElementAdded(); - return cache; + Handle<MapCache> new_cache = EnsureCapacity(map_cache, 1, &key); + int entry = new_cache->FindInsertionEntry(key.Hash()); + new_cache->set(EntryToIndex(entry), *array); + new_cache->set(EntryToIndex(entry) + 1, *value); + new_cache->ElementAdded(); + return new_cache; } -template<typename Shape, typename Key> -MaybeObject* Dictionary<Shape, Key>::Allocate(Heap* heap, - int at_least_space_for, - PretenureFlag pretenure) { - Object* obj; - { MaybeObject* maybe_obj = - HashTable<Shape, Key>::Allocate( - heap, - at_least_space_for, - USE_DEFAULT_MINIMUM_CAPACITY, - pretenure); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } +template<typename Derived, typename Shape, typename Key> +Handle<Derived> Dictionary<Derived, Shape, Key>::New( + Isolate* isolate, + int at_least_space_for, + PretenureFlag pretenure) { + DCHECK(0 <= at_least_space_for); + Handle<Derived> dict = DerivedHashTable::New(isolate, + at_least_space_for, + USE_DEFAULT_MINIMUM_CAPACITY, + pretenure); + // Initialize the next enumeration index. - Dictionary<Shape, Key>::cast(obj)-> - SetNextEnumerationIndex(PropertyDetails::kInitialIndex); - return obj; + dict->SetNextEnumerationIndex(PropertyDetails::kInitialIndex); + return dict; } -void NameDictionary::DoGenerateNewEnumerationIndices( - Handle<NameDictionary> dictionary) { - CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(), - dictionary->GenerateNewEnumerationIndices()); -} - -template<typename Shape, typename Key> -MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() { - Heap* heap = Dictionary<Shape, Key>::GetHeap(); - int length = HashTable<Shape, Key>::NumberOfElements(); +template<typename Derived, typename Shape, typename Key> +void Dictionary<Derived, Shape, Key>::GenerateNewEnumerationIndices( + Handle<Derived> dictionary) { + Factory* factory = dictionary->GetIsolate()->factory(); + int length = dictionary->NumberOfElements(); // Allocate and initialize iteration order array. - Object* obj; - { MaybeObject* maybe_obj = heap->AllocateFixedArray(length); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - FixedArray* iteration_order = FixedArray::cast(obj); + Handle<FixedArray> iteration_order = factory->NewFixedArray(length); for (int i = 0; i < length; i++) { iteration_order->set(i, Smi::FromInt(i)); } // Allocate array with enumeration order. - { MaybeObject* maybe_obj = heap->AllocateFixedArray(length); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - FixedArray* enumeration_order = FixedArray::cast(obj); + Handle<FixedArray> enumeration_order = factory->NewFixedArray(length); // Fill the enumeration order array with property details. - int capacity = HashTable<Shape, Key>::Capacity(); + int capacity = dictionary->Capacity(); int pos = 0; for (int i = 0; i < capacity; i++) { - if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) { - int index = DetailsAt(i).dictionary_index(); + if (dictionary->IsKey(dictionary->KeyAt(i))) { + int index = dictionary->DetailsAt(i).dictionary_index(); enumeration_order->set(pos++, Smi::FromInt(index)); } } // Sort the arrays wrt. enumeration order. - iteration_order->SortPairs(enumeration_order, enumeration_order->length()); + iteration_order->SortPairs(*enumeration_order, enumeration_order->length()); // Overwrite the enumeration_order with the enumeration indices. for (int i = 0; i < length; i++) { @@ -15155,135 +15488,125 @@ } // Update the dictionary with new indices. - capacity = HashTable<Shape, Key>::Capacity(); + capacity = dictionary->Capacity(); pos = 0; for (int i = 0; i < capacity; i++) { - if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) { + if (dictionary->IsKey(dictionary->KeyAt(i))) { int enum_index = Smi::cast(enumeration_order->get(pos++))->value(); - PropertyDetails details = DetailsAt(i); + PropertyDetails details = dictionary->DetailsAt(i); PropertyDetails new_details = PropertyDetails( details.attributes(), details.type(), enum_index); - DetailsAtPut(i, new_details); + dictionary->DetailsAtPut(i, new_details); } } // Set the next enumeration index. - SetNextEnumerationIndex(PropertyDetails::kInitialIndex+length); - return this; + dictionary->SetNextEnumerationIndex(PropertyDetails::kInitialIndex+length); } -template<typename Shape, typename Key> -MaybeObject* Dictionary<Shape, Key>::EnsureCapacity(int n, Key key) { + +template<typename Derived, typename Shape, typename Key> +Handle<Derived> Dictionary<Derived, Shape, Key>::EnsureCapacity( + Handle<Derived> dictionary, int n, Key key) { // Check whether there are enough enumeration indices to add n elements. if (Shape::kIsEnumerable && - !PropertyDetails::IsValidIndex(NextEnumerationIndex() + n)) { + !PropertyDetails::IsValidIndex(dictionary->NextEnumerationIndex() + n)) { // If not, we generate new indices for the properties. - Object* result; - { MaybeObject* maybe_result = GenerateNewEnumerationIndices(); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + GenerateNewEnumerationIndices(dictionary); } - return HashTable<Shape, Key>::EnsureCapacity(n, key); + return DerivedHashTable::EnsureCapacity(dictionary, n, key); } -template<typename Shape, typename Key> -Object* Dictionary<Shape, Key>::DeleteProperty(int entry, - JSReceiver::DeleteMode mode) { - Heap* heap = Dictionary<Shape, Key>::GetHeap(); - PropertyDetails details = DetailsAt(entry); +template<typename Derived, typename Shape, typename Key> +Handle<Object> Dictionary<Derived, Shape, Key>::DeleteProperty( + Handle<Derived> dictionary, + int entry, + JSObject::DeleteMode mode) { + Factory* factory = dictionary->GetIsolate()->factory(); + PropertyDetails details = dictionary->DetailsAt(entry); // Ignore attributes if forcing a deletion. if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) { - return heap->false_value(); + return factory->false_value(); } - SetEntry(entry, heap->the_hole_value(), heap->the_hole_value()); - HashTable<Shape, Key>::ElementRemoved(); - return heap->true_value(); -} - -template<typename Shape, typename Key> -MaybeObject* Dictionary<Shape, Key>::Shrink(Key key) { - return HashTable<Shape, Key>::Shrink(key); + dictionary->SetEntry( + entry, factory->the_hole_value(), factory->the_hole_value()); + dictionary->ElementRemoved(); + return factory->true_value(); } -template<typename Shape, typename Key> -MaybeObject* Dictionary<Shape, Key>::AtPut(Key key, Object* value) { - int entry = this->FindEntry(key); +template<typename Derived, typename Shape, typename Key> +Handle<Derived> Dictionary<Derived, Shape, Key>::AtPut( + Handle<Derived> dictionary, Key key, Handle<Object> value) { + int entry = dictionary->FindEntry(key); // If the entry is present set the value; - if (entry != Dictionary<Shape, Key>::kNotFound) { - ValueAtPut(entry, value); - return this; + if (entry != Dictionary::kNotFound) { + dictionary->ValueAtPut(entry, *value); + return dictionary; } // Check whether the dictionary should be extended. - Object* obj; - { MaybeObject* maybe_obj = EnsureCapacity(1, key); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - - Object* k; - { MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key); - if (!maybe_k->ToObject(&k)) return maybe_k; - } + dictionary = EnsureCapacity(dictionary, 1, key); +#ifdef DEBUG + USE(Shape::AsHandle(dictionary->GetIsolate(), key)); +#endif PropertyDetails details = PropertyDetails(NONE, NORMAL, 0); - return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details, - Dictionary<Shape, Key>::Hash(key)); + AddEntry(dictionary, key, value, details, dictionary->Hash(key)); + return dictionary; } -template<typename Shape, typename Key> -MaybeObject* Dictionary<Shape, Key>::Add(Key key, - Object* value, - PropertyDetails details) { +template<typename Derived, typename Shape, typename Key> +Handle<Derived> Dictionary<Derived, Shape, Key>::Add( + Handle<Derived> dictionary, + Key key, + Handle<Object> value, + PropertyDetails details) { // Valdate key is absent. - SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound)); + SLOW_DCHECK((dictionary->FindEntry(key) == Dictionary::kNotFound)); // Check whether the dictionary should be extended. - Object* obj; - { MaybeObject* maybe_obj = EnsureCapacity(1, key); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + dictionary = EnsureCapacity(dictionary, 1, key); - return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details, - Dictionary<Shape, Key>::Hash(key)); + AddEntry(dictionary, key, value, details, dictionary->Hash(key)); + return dictionary; } // Add a key, value pair to the dictionary. -template<typename Shape, typename Key> -MaybeObject* Dictionary<Shape, Key>::AddEntry(Key key, - Object* value, - PropertyDetails details, - uint32_t hash) { +template<typename Derived, typename Shape, typename Key> +void Dictionary<Derived, Shape, Key>::AddEntry( + Handle<Derived> dictionary, + Key key, + Handle<Object> value, + PropertyDetails details, + uint32_t hash) { // Compute the key object. - Object* k; - { MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key); - if (!maybe_k->ToObject(&k)) return maybe_k; - } + Handle<Object> k = Shape::AsHandle(dictionary->GetIsolate(), key); - uint32_t entry = Dictionary<Shape, Key>::FindInsertionEntry(hash); + uint32_t entry = dictionary->FindInsertionEntry(hash); // Insert element at empty or deleted entry if (!details.IsDeleted() && details.dictionary_index() == 0 && Shape::kIsEnumerable) { // Assign an enumeration index to the property and update // SetNextEnumerationIndex. - int index = NextEnumerationIndex(); + int index = dictionary->NextEnumerationIndex(); details = PropertyDetails(details.attributes(), details.type(), index); - SetNextEnumerationIndex(index + 1); + dictionary->SetNextEnumerationIndex(index + 1); } - SetEntry(entry, k, value, details); - ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber() || - Dictionary<Shape, Key>::KeyAt(entry)->IsName())); - HashTable<Shape, Key>::ElementAdded(); - return this; + dictionary->SetEntry(entry, k, value, details); + DCHECK((dictionary->KeyAt(entry)->IsNumber() || + dictionary->KeyAt(entry)->IsName())); + dictionary->ElementAdded(); } void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) { + DisallowHeapAllocation no_allocation; // If the dictionary requires slow elements an element has already // been added at a high index. if (requires_slow_elements()) return; @@ -15301,105 +15624,86 @@ } } + Handle<SeededNumberDictionary> SeededNumberDictionary::AddNumberEntry( Handle<SeededNumberDictionary> dictionary, uint32_t key, Handle<Object> value, PropertyDetails details) { - CALL_HEAP_FUNCTION(dictionary->GetIsolate(), - dictionary->AddNumberEntry(key, *value, details), - SeededNumberDictionary); -} - -MaybeObject* SeededNumberDictionary::AddNumberEntry(uint32_t key, - Object* value, - PropertyDetails details) { - UpdateMaxNumberKey(key); - SLOW_ASSERT(this->FindEntry(key) == kNotFound); - return Add(key, value, details); + dictionary->UpdateMaxNumberKey(key); + SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound); + return Add(dictionary, key, value, details); } -MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key, - Object* value) { - SLOW_ASSERT(this->FindEntry(key) == kNotFound); - return Add(key, value, PropertyDetails(NONE, NORMAL, 0)); +Handle<UnseededNumberDictionary> UnseededNumberDictionary::AddNumberEntry( + Handle<UnseededNumberDictionary> dictionary, + uint32_t key, + Handle<Object> value) { + SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound); + return Add(dictionary, key, value, PropertyDetails(NONE, NORMAL, 0)); } -MaybeObject* SeededNumberDictionary::AtNumberPut(uint32_t key, Object* value) { - UpdateMaxNumberKey(key); - return AtPut(key, value); +Handle<SeededNumberDictionary> SeededNumberDictionary::AtNumberPut( + Handle<SeededNumberDictionary> dictionary, + uint32_t key, + Handle<Object> value) { + dictionary->UpdateMaxNumberKey(key); + return AtPut(dictionary, key, value); } -MaybeObject* UnseededNumberDictionary::AtNumberPut(uint32_t key, - Object* value) { - return AtPut(key, value); +Handle<UnseededNumberDictionary> UnseededNumberDictionary::AtNumberPut( + Handle<UnseededNumberDictionary> dictionary, + uint32_t key, + Handle<Object> value) { + return AtPut(dictionary, key, value); } Handle<SeededNumberDictionary> SeededNumberDictionary::Set( Handle<SeededNumberDictionary> dictionary, - uint32_t index, + uint32_t key, Handle<Object> value, PropertyDetails details) { - CALL_HEAP_FUNCTION(dictionary->GetIsolate(), - dictionary->Set(index, *value, details), - SeededNumberDictionary); -} - - -Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set( - Handle<UnseededNumberDictionary> dictionary, - uint32_t index, - Handle<Object> value) { - CALL_HEAP_FUNCTION(dictionary->GetIsolate(), - dictionary->Set(index, *value), - UnseededNumberDictionary); -} - - -MaybeObject* SeededNumberDictionary::Set(uint32_t key, - Object* value, - PropertyDetails details) { - int entry = FindEntry(key); - if (entry == kNotFound) return AddNumberEntry(key, value, details); + int entry = dictionary->FindEntry(key); + if (entry == kNotFound) { + return AddNumberEntry(dictionary, key, value, details); + } // Preserve enumeration index. details = PropertyDetails(details.attributes(), details.type(), - DetailsAt(entry).dictionary_index()); - MaybeObject* maybe_object_key = - SeededNumberDictionaryShape::AsObject(GetHeap(), key); - Object* object_key; - if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key; - SetEntry(entry, object_key, value, details); - return this; + dictionary->DetailsAt(entry).dictionary_index()); + Handle<Object> object_key = + SeededNumberDictionaryShape::AsHandle(dictionary->GetIsolate(), key); + dictionary->SetEntry(entry, object_key, value, details); + return dictionary; } -MaybeObject* UnseededNumberDictionary::Set(uint32_t key, - Object* value) { - int entry = FindEntry(key); - if (entry == kNotFound) return AddNumberEntry(key, value); - MaybeObject* maybe_object_key = - UnseededNumberDictionaryShape::AsObject(GetHeap(), key); - Object* object_key; - if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key; - SetEntry(entry, object_key, value); - return this; +Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set( + Handle<UnseededNumberDictionary> dictionary, + uint32_t key, + Handle<Object> value) { + int entry = dictionary->FindEntry(key); + if (entry == kNotFound) return AddNumberEntry(dictionary, key, value); + Handle<Object> object_key = + UnseededNumberDictionaryShape::AsHandle(dictionary->GetIsolate(), key); + dictionary->SetEntry(entry, object_key, value); + return dictionary; } -template<typename Shape, typename Key> -int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes( +template<typename Derived, typename Shape, typename Key> +int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes( PropertyAttributes filter) { - int capacity = HashTable<Shape, Key>::Capacity(); + int capacity = DerivedHashTable::Capacity(); int result = 0; for (int i = 0; i < capacity; i++) { - Object* k = HashTable<Shape, Key>::KeyAt(i); - if (HashTable<Shape, Key>::IsKey(k) && !FilterKey(k, filter)) { + Object* k = DerivedHashTable::KeyAt(i); + if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) { PropertyDetails details = DetailsAt(i); if (details.IsDeleted()) continue; PropertyAttributes attr = details.attributes(); @@ -15410,451 +15714,644 @@ } -template<typename Shape, typename Key> -int Dictionary<Shape, Key>::NumberOfEnumElements() { +template<typename Derived, typename Shape, typename Key> +int Dictionary<Derived, Shape, Key>::NumberOfEnumElements() { return NumberOfElementsFilterAttributes( static_cast<PropertyAttributes>(DONT_ENUM | SYMBOLIC)); } -template<typename Shape, typename Key> -void Dictionary<Shape, Key>::CopyKeysTo( - FixedArray* storage, - PropertyAttributes filter, - typename Dictionary<Shape, Key>::SortMode sort_mode) { - ASSERT(storage->length() >= NumberOfElementsFilterAttributes(filter)); - int capacity = HashTable<Shape, Key>::Capacity(); - int index = 0; - for (int i = 0; i < capacity; i++) { - Object* k = HashTable<Shape, Key>::KeyAt(i); - if (HashTable<Shape, Key>::IsKey(k) && !FilterKey(k, filter)) { - PropertyDetails details = DetailsAt(i); - if (details.IsDeleted()) continue; - PropertyAttributes attr = details.attributes(); - if ((attr & filter) == 0) storage->set(index++, k); - } - } - if (sort_mode == Dictionary<Shape, Key>::SORTED) { - storage->SortPairs(storage, index); +template<typename Derived, typename Shape, typename Key> +void Dictionary<Derived, Shape, Key>::CopyKeysTo( + FixedArray* storage, + PropertyAttributes filter, + typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) { + DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter)); + int capacity = DerivedHashTable::Capacity(); + int index = 0; + for (int i = 0; i < capacity; i++) { + Object* k = DerivedHashTable::KeyAt(i); + if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) { + PropertyDetails details = DetailsAt(i); + if (details.IsDeleted()) continue; + PropertyAttributes attr = details.attributes(); + if ((attr & filter) == 0) storage->set(index++, k); + } + } + if (sort_mode == Dictionary::SORTED) { + storage->SortPairs(storage, index); + } + DCHECK(storage->length() >= index); +} + + +struct EnumIndexComparator { + explicit EnumIndexComparator(NameDictionary* dict) : dict(dict) { } + bool operator() (Smi* a, Smi* b) { + PropertyDetails da(dict->DetailsAt(a->value())); + PropertyDetails db(dict->DetailsAt(b->value())); + return da.dictionary_index() < db.dictionary_index(); + } + NameDictionary* dict; +}; + + +void NameDictionary::CopyEnumKeysTo(FixedArray* storage) { + int length = storage->length(); + int capacity = Capacity(); + int properties = 0; + for (int i = 0; i < capacity; i++) { + Object* k = KeyAt(i); + if (IsKey(k) && !k->IsSymbol()) { + PropertyDetails details = DetailsAt(i); + if (details.IsDeleted() || details.IsDontEnum()) continue; + storage->set(properties, Smi::FromInt(i)); + properties++; + if (properties == length) break; + } + } + CHECK_EQ(length, properties); + EnumIndexComparator cmp(this); + Smi** start = reinterpret_cast<Smi**>(storage->GetFirstElementAddress()); + std::sort(start, start + length, cmp); + for (int i = 0; i < length; i++) { + int index = Smi::cast(storage->get(i))->value(); + storage->set(i, KeyAt(index)); + } +} + + +template<typename Derived, typename Shape, typename Key> +void Dictionary<Derived, Shape, Key>::CopyKeysTo( + FixedArray* storage, + int index, + PropertyAttributes filter, + typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) { + DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter)); + int capacity = DerivedHashTable::Capacity(); + for (int i = 0; i < capacity; i++) { + Object* k = DerivedHashTable::KeyAt(i); + if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) { + PropertyDetails details = DetailsAt(i); + if (details.IsDeleted()) continue; + PropertyAttributes attr = details.attributes(); + if ((attr & filter) == 0) storage->set(index++, k); + } + } + if (sort_mode == Dictionary::SORTED) { + storage->SortPairs(storage, index); + } + DCHECK(storage->length() >= index); +} + + +// Backwards lookup (slow). +template<typename Derived, typename Shape, typename Key> +Object* Dictionary<Derived, Shape, Key>::SlowReverseLookup(Object* value) { + int capacity = DerivedHashTable::Capacity(); + for (int i = 0; i < capacity; i++) { + Object* k = DerivedHashTable::KeyAt(i); + if (Dictionary::IsKey(k)) { + Object* e = ValueAt(i); + if (e->IsPropertyCell()) { + e = PropertyCell::cast(e)->value(); + } + if (e == value) return k; + } + } + Heap* heap = Dictionary::GetHeap(); + return heap->undefined_value(); +} + + +Object* ObjectHashTable::Lookup(Handle<Object> key) { + DisallowHeapAllocation no_gc; + DCHECK(IsKey(*key)); + + // If the object does not have an identity hash, it was never used as a key. + Object* hash = key->GetHash(); + if (hash->IsUndefined()) { + return GetHeap()->the_hole_value(); + } + int entry = FindEntry(key); + if (entry == kNotFound) return GetHeap()->the_hole_value(); + return get(EntryToIndex(entry) + 1); +} + + +Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table, + Handle<Object> key, + Handle<Object> value) { + DCHECK(table->IsKey(*key)); + DCHECK(!value->IsTheHole()); + + Isolate* isolate = table->GetIsolate(); + + // Make sure the key object has an identity hash code. + Handle<Smi> hash = Object::GetOrCreateHash(isolate, key); + + int entry = table->FindEntry(key); + + // Key is already in table, just overwrite value. + if (entry != kNotFound) { + table->set(EntryToIndex(entry) + 1, *value); + return table; + } + + // Check whether the hash table should be extended. + table = EnsureCapacity(table, 1, key); + table->AddEntry(table->FindInsertionEntry(hash->value()), + *key, + *value); + return table; +} + + +Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table, + Handle<Object> key, + bool* was_present) { + DCHECK(table->IsKey(*key)); + + Object* hash = key->GetHash(); + if (hash->IsUndefined()) { + *was_present = false; + return table; + } + + int entry = table->FindEntry(key); + if (entry == kNotFound) { + *was_present = false; + return table; + } + + *was_present = true; + table->RemoveEntry(entry); + return Shrink(table, key); +} + + +void ObjectHashTable::AddEntry(int entry, Object* key, Object* value) { + set(EntryToIndex(entry), key); + set(EntryToIndex(entry) + 1, value); + ElementAdded(); +} + + +void ObjectHashTable::RemoveEntry(int entry) { + set_the_hole(EntryToIndex(entry)); + set_the_hole(EntryToIndex(entry) + 1); + ElementRemoved(); +} + + +Object* WeakHashTable::Lookup(Handle<Object> key) { + DisallowHeapAllocation no_gc; + DCHECK(IsKey(*key)); + int entry = FindEntry(key); + if (entry == kNotFound) return GetHeap()->the_hole_value(); + return get(EntryToValueIndex(entry)); +} + + +Handle<WeakHashTable> WeakHashTable::Put(Handle<WeakHashTable> table, + Handle<Object> key, + Handle<Object> value) { + DCHECK(table->IsKey(*key)); + int entry = table->FindEntry(key); + // Key is already in table, just overwrite value. + if (entry != kNotFound) { + // TODO(ulan): Skipping write barrier is a temporary solution to avoid + // memory leaks. Remove this once we have special visitor for weak fixed + // arrays. + table->set(EntryToValueIndex(entry), *value, SKIP_WRITE_BARRIER); + return table; } - ASSERT(storage->length() >= index); + + // Check whether the hash table should be extended. + table = EnsureCapacity(table, 1, key, TENURED); + + table->AddEntry(table->FindInsertionEntry(table->Hash(key)), key, value); + return table; } -struct EnumIndexComparator { - explicit EnumIndexComparator(NameDictionary* dict) : dict(dict) { } - bool operator() (Smi* a, Smi* b) { - PropertyDetails da(dict->DetailsAt(a->value())); - PropertyDetails db(dict->DetailsAt(b->value())); - return da.dictionary_index() < db.dictionary_index(); - } - NameDictionary* dict; -}; +void WeakHashTable::AddEntry(int entry, + Handle<Object> key, + Handle<Object> value) { + DisallowHeapAllocation no_allocation; + // TODO(ulan): Skipping write barrier is a temporary solution to avoid + // memory leaks. Remove this once we have special visitor for weak fixed + // arrays. + set(EntryToIndex(entry), *key, SKIP_WRITE_BARRIER); + set(EntryToValueIndex(entry), *value, SKIP_WRITE_BARRIER); + ElementAdded(); +} -void NameDictionary::CopyEnumKeysTo(FixedArray* storage) { - int length = storage->length(); - int capacity = Capacity(); - int properties = 0; - for (int i = 0; i < capacity; i++) { - Object* k = KeyAt(i); - if (IsKey(k) && !k->IsSymbol()) { - PropertyDetails details = DetailsAt(i); - if (details.IsDeleted() || details.IsDontEnum()) continue; - storage->set(properties, Smi::FromInt(i)); - properties++; - if (properties == length) break; - } - } - EnumIndexComparator cmp(this); - Smi** start = reinterpret_cast<Smi**>(storage->GetFirstElementAddress()); - std::sort(start, start + length, cmp); - for (int i = 0; i < length; i++) { - int index = Smi::cast(storage->get(i))->value(); - storage->set(i, KeyAt(index)); +template<class Derived, class Iterator, int entrysize> +Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Allocate( + Isolate* isolate, int capacity, PretenureFlag pretenure) { + // Capacity must be a power of two, since we depend on being able + // to divide and multiple by 2 (kLoadFactor) to derive capacity + // from number of buckets. If we decide to change kLoadFactor + // to something other than 2, capacity should be stored as another + // field of this object. + capacity = RoundUpToPowerOf2(Max(kMinCapacity, capacity)); + if (capacity > kMaxCapacity) { + v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true); } + int num_buckets = capacity / kLoadFactor; + Handle<FixedArray> backing_store = isolate->factory()->NewFixedArray( + kHashTableStartIndex + num_buckets + (capacity * kEntrySize), pretenure); + backing_store->set_map_no_write_barrier( + isolate->heap()->ordered_hash_table_map()); + Handle<Derived> table = Handle<Derived>::cast(backing_store); + for (int i = 0; i < num_buckets; ++i) { + table->set(kHashTableStartIndex + i, Smi::FromInt(kNotFound)); + } + table->SetNumberOfBuckets(num_buckets); + table->SetNumberOfElements(0); + table->SetNumberOfDeletedElements(0); + return table; } -template<typename Shape, typename Key> -void Dictionary<Shape, Key>::CopyKeysTo( - FixedArray* storage, - int index, - PropertyAttributes filter, - typename Dictionary<Shape, Key>::SortMode sort_mode) { - ASSERT(storage->length() >= NumberOfElementsFilterAttributes(filter)); - int capacity = HashTable<Shape, Key>::Capacity(); - for (int i = 0; i < capacity; i++) { - Object* k = HashTable<Shape, Key>::KeyAt(i); - if (HashTable<Shape, Key>::IsKey(k) && !FilterKey(k, filter)) { - PropertyDetails details = DetailsAt(i); - if (details.IsDeleted()) continue; - PropertyAttributes attr = details.attributes(); - if ((attr & filter) == 0) storage->set(index++, k); - } - } - if (sort_mode == Dictionary<Shape, Key>::SORTED) { - storage->SortPairs(storage, index); - } - ASSERT(storage->length() >= index); +template<class Derived, class Iterator, int entrysize> +Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::EnsureGrowable( + Handle<Derived> table) { + DCHECK(!table->IsObsolete()); + + int nof = table->NumberOfElements(); + int nod = table->NumberOfDeletedElements(); + int capacity = table->Capacity(); + if ((nof + nod) < capacity) return table; + // Don't need to grow if we can simply clear out deleted entries instead. + // Note that we can't compact in place, though, so we always allocate + // a new table. + return Rehash(table, (nod < (capacity >> 1)) ? capacity << 1 : capacity); } -// Backwards lookup (slow). -template<typename Shape, typename Key> -Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) { - int capacity = HashTable<Shape, Key>::Capacity(); - for (int i = 0; i < capacity; i++) { - Object* k = HashTable<Shape, Key>::KeyAt(i); - if (Dictionary<Shape, Key>::IsKey(k)) { - Object* e = ValueAt(i); - if (e->IsPropertyCell()) { - e = PropertyCell::cast(e)->value(); - } - if (e == value) return k; - } - } - Heap* heap = Dictionary<Shape, Key>::GetHeap(); - return heap->undefined_value(); +template<class Derived, class Iterator, int entrysize> +Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Shrink( + Handle<Derived> table) { + DCHECK(!table->IsObsolete()); + + int nof = table->NumberOfElements(); + int capacity = table->Capacity(); + if (nof >= (capacity >> 2)) return table; + return Rehash(table, capacity / 2); } -MaybeObject* NameDictionary::TransformPropertiesToFastFor( - JSObject* obj, int unused_property_fields) { - // Make sure we preserve dictionary representation if there are too many - // descriptors. - int number_of_elements = NumberOfElements(); - if (number_of_elements > kMaxNumberOfDescriptors) return obj; +template<class Derived, class Iterator, int entrysize> +Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Clear( + Handle<Derived> table) { + DCHECK(!table->IsObsolete()); + + Handle<Derived> new_table = + Allocate(table->GetIsolate(), + kMinCapacity, + table->GetHeap()->InNewSpace(*table) ? NOT_TENURED : TENURED); - if (number_of_elements != NextEnumerationIndex()) { - MaybeObject* maybe_result = GenerateNewEnumerationIndices(); - if (maybe_result->IsFailure()) return maybe_result; - } + table->SetNextTable(*new_table); + table->SetNumberOfDeletedElements(-1); - int instance_descriptor_length = 0; - int number_of_fields = 0; + return new_table; +} - Heap* heap = GetHeap(); - // Compute the length of the instance descriptor. - int capacity = Capacity(); - for (int i = 0; i < capacity; i++) { - Object* k = KeyAt(i); - if (IsKey(k)) { - Object* value = ValueAt(i); - PropertyType type = DetailsAt(i).type(); - ASSERT(type != FIELD); - instance_descriptor_length++; - if (type == NORMAL && !value->IsJSFunction()) { - number_of_fields += 1; - } - } +template<class Derived, class Iterator, int entrysize> +Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Remove( + Handle<Derived> table, Handle<Object> key, bool* was_present) { + int entry = table->FindEntry(key); + if (entry == kNotFound) { + *was_present = false; + return table; } + *was_present = true; + table->RemoveEntry(entry); + return Shrink(table); +} - int inobject_props = obj->map()->inobject_properties(); - - // Allocate new map. - Map* new_map; - MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors(); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; - new_map->set_dictionary_map(false); - if (instance_descriptor_length == 0) { - ASSERT_LE(unused_property_fields, inobject_props); - // Transform the object. - new_map->set_unused_property_fields(inobject_props); - obj->set_map(new_map); - obj->set_properties(heap->empty_fixed_array()); - // Check that it really works. - ASSERT(obj->HasFastProperties()); - return obj; - } +template<class Derived, class Iterator, int entrysize> +Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Rehash( + Handle<Derived> table, int new_capacity) { + DCHECK(!table->IsObsolete()); + + Handle<Derived> new_table = + Allocate(table->GetIsolate(), + new_capacity, + table->GetHeap()->InNewSpace(*table) ? NOT_TENURED : TENURED); + int nof = table->NumberOfElements(); + int nod = table->NumberOfDeletedElements(); + int new_buckets = new_table->NumberOfBuckets(); + int new_entry = 0; + int removed_holes_index = 0; + + for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) { + Object* key = table->KeyAt(old_entry); + if (key->IsTheHole()) { + table->SetRemovedIndexAt(removed_holes_index++, old_entry); + continue; + } - // Allocate the instance descriptor. - DescriptorArray* descriptors; - MaybeObject* maybe_descriptors = - DescriptorArray::Allocate(GetIsolate(), instance_descriptor_length); - if (!maybe_descriptors->To(&descriptors)) { - return maybe_descriptors; + Object* hash = key->GetHash(); + int bucket = Smi::cast(hash)->value() & (new_buckets - 1); + Object* chain_entry = new_table->get(kHashTableStartIndex + bucket); + new_table->set(kHashTableStartIndex + bucket, Smi::FromInt(new_entry)); + int new_index = new_table->EntryToIndex(new_entry); + int old_index = table->EntryToIndex(old_entry); + for (int i = 0; i < entrysize; ++i) { + Object* value = table->get(old_index + i); + new_table->set(new_index + i, value); + } + new_table->set(new_index + kChainOffset, chain_entry); + ++new_entry; } - DescriptorArray::WhitenessWitness witness(descriptors); + DCHECK_EQ(nod, removed_holes_index); - int number_of_allocated_fields = - number_of_fields + unused_property_fields - inobject_props; - if (number_of_allocated_fields < 0) { - // There is enough inobject space for all fields (including unused). - number_of_allocated_fields = 0; - unused_property_fields = inobject_props - number_of_fields; - } + new_table->SetNumberOfElements(nof); + table->SetNextTable(*new_table); - // Allocate the fixed array for the fields. - FixedArray* fields; - MaybeObject* maybe_fields = - heap->AllocateFixedArray(number_of_allocated_fields); - if (!maybe_fields->To(&fields)) return maybe_fields; + return new_table; +} - // Fill in the instance descriptor and the fields. - int current_offset = 0; - for (int i = 0; i < capacity; i++) { - Object* k = KeyAt(i); - if (IsKey(k)) { - Object* value = ValueAt(i); - Name* key; - if (k->IsSymbol()) { - key = Symbol::cast(k); - } else { - // Ensure the key is a unique name before writing into the - // instance descriptor. - MaybeObject* maybe_key = heap->InternalizeString(String::cast(k)); - if (!maybe_key->To(&key)) return maybe_key; - } - PropertyDetails details = DetailsAt(i); - int enumeration_index = details.dictionary_index(); - PropertyType type = details.type(); +template <class Derived, class Iterator, int entrysize> +int OrderedHashTable<Derived, Iterator, entrysize>::FindEntry( + Handle<Object> key, int hash) { + DCHECK(!IsObsolete()); - if (value->IsJSFunction()) { - ConstantDescriptor d(key, value, details.attributes()); - descriptors->Set(enumeration_index - 1, &d, witness); - } else if (type == NORMAL) { - if (current_offset < inobject_props) { - obj->InObjectPropertyAtPut(current_offset, - value, - UPDATE_WRITE_BARRIER); - } else { - int offset = current_offset - inobject_props; - fields->set(offset, value); - } - FieldDescriptor d(key, - current_offset++, - details.attributes(), - // TODO(verwaest): value->OptimalRepresentation(); - Representation::Tagged()); - descriptors->Set(enumeration_index - 1, &d, witness); - } else if (type == CALLBACKS) { - CallbacksDescriptor d(key, - value, - details.attributes()); - descriptors->Set(enumeration_index - 1, &d, witness); - } else { - UNREACHABLE(); - } - } + DisallowHeapAllocation no_gc; + DCHECK(!key->IsTheHole()); + for (int entry = HashToEntry(hash); entry != kNotFound; + entry = ChainAt(entry)) { + Object* candidate = KeyAt(entry); + if (candidate->SameValueZero(*key)) + return entry; } - ASSERT(current_offset == number_of_fields); - - descriptors->Sort(); + return kNotFound; +} - new_map->InitializeDescriptors(descriptors); - new_map->set_unused_property_fields(unused_property_fields); - // Transform the object. - obj->set_map(new_map); +template <class Derived, class Iterator, int entrysize> +int OrderedHashTable<Derived, Iterator, entrysize>::FindEntry( + Handle<Object> key) { + DisallowHeapAllocation no_gc; + Object* hash = key->GetHash(); + if (!hash->IsSmi()) return kNotFound; + return FindEntry(key, Smi::cast(hash)->value()); +} - obj->set_properties(fields); - ASSERT(obj->IsJSObject()); - // Check that it really works. - ASSERT(obj->HasFastProperties()); +template <class Derived, class Iterator, int entrysize> +int OrderedHashTable<Derived, Iterator, entrysize>::AddEntry(int hash) { + DCHECK(!IsObsolete()); - return obj; + int entry = UsedCapacity(); + int bucket = HashToBucket(hash); + int index = EntryToIndex(entry); + Object* chain_entry = get(kHashTableStartIndex + bucket); + set(kHashTableStartIndex + bucket, Smi::FromInt(entry)); + set(index + kChainOffset, chain_entry); + SetNumberOfElements(NumberOfElements() + 1); + return index; } -Handle<ObjectHashSet> ObjectHashSet::EnsureCapacity( - Handle<ObjectHashSet> table, - int n, - Handle<Object> key, - PretenureFlag pretenure) { - Handle<HashTable<ObjectHashTableShape<1>, Object*> > table_base = table; - CALL_HEAP_FUNCTION(table_base->GetIsolate(), - table_base->EnsureCapacity(n, *key, pretenure), - ObjectHashSet); +template<class Derived, class Iterator, int entrysize> +void OrderedHashTable<Derived, Iterator, entrysize>::RemoveEntry(int entry) { + DCHECK(!IsObsolete()); + + int index = EntryToIndex(entry); + for (int i = 0; i < entrysize; ++i) { + set_the_hole(index + i); + } + SetNumberOfElements(NumberOfElements() - 1); + SetNumberOfDeletedElements(NumberOfDeletedElements() + 1); } -Handle<ObjectHashSet> ObjectHashSet::Shrink(Handle<ObjectHashSet> table, - Handle<Object> key) { - Handle<HashTable<ObjectHashTableShape<1>, Object*> > table_base = table; - CALL_HEAP_FUNCTION(table_base->GetIsolate(), - table_base->Shrink(*key), - ObjectHashSet); -} +template Handle<OrderedHashSet> +OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::Allocate( + Isolate* isolate, int capacity, PretenureFlag pretenure); +template Handle<OrderedHashSet> +OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::EnsureGrowable( + Handle<OrderedHashSet> table); -bool ObjectHashSet::Contains(Object* key) { - ASSERT(IsKey(key)); +template Handle<OrderedHashSet> +OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::Shrink( + Handle<OrderedHashSet> table); - // If the object does not have an identity hash, it was never used as a key. - Object* hash = key->GetHash(); - if (hash->IsUndefined()) return false; +template Handle<OrderedHashSet> +OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::Clear( + Handle<OrderedHashSet> table); - return (FindEntry(key) != kNotFound); -} +template Handle<OrderedHashSet> +OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::Remove( + Handle<OrderedHashSet> table, Handle<Object> key, bool* was_present); +template int OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::FindEntry( + Handle<Object> key, int hash); +template int OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::FindEntry( + Handle<Object> key); -Handle<ObjectHashSet> ObjectHashSet::Add(Handle<ObjectHashSet> table, - Handle<Object> key) { - ASSERT(table->IsKey(*key)); +template int +OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::AddEntry(int hash); - // Make sure the key object has an identity hash code. - Handle<Object> object_hash = Object::GetOrCreateHash(key, - table->GetIsolate()); +template void +OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::RemoveEntry(int entry); - int entry = table->FindEntry(*key); - // Check whether key is already present. - if (entry != kNotFound) return table; +template Handle<OrderedHashMap> +OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Allocate( + Isolate* isolate, int capacity, PretenureFlag pretenure); - // Check whether the hash set should be extended and add entry. - Handle<ObjectHashSet> new_table = - ObjectHashSet::EnsureCapacity(table, 1, key); - entry = new_table->FindInsertionEntry(Smi::cast(*object_hash)->value()); - new_table->set(EntryToIndex(entry), *key); - new_table->ElementAdded(); - return new_table; -} +template Handle<OrderedHashMap> +OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::EnsureGrowable( + Handle<OrderedHashMap> table); +template Handle<OrderedHashMap> +OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Shrink( + Handle<OrderedHashMap> table); -Handle<ObjectHashSet> ObjectHashSet::Remove(Handle<ObjectHashSet> table, - Handle<Object> key) { - ASSERT(table->IsKey(*key)); +template Handle<OrderedHashMap> +OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Clear( + Handle<OrderedHashMap> table); - // If the object does not have an identity hash, it was never used as a key. - if (key->GetHash()->IsUndefined()) return table; +template Handle<OrderedHashMap> +OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Remove( + Handle<OrderedHashMap> table, Handle<Object> key, bool* was_present); + +template int OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::FindEntry( + Handle<Object> key, int hash); +template int OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::FindEntry( + Handle<Object> key); - int entry = table->FindEntry(*key); +template int +OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::AddEntry(int hash); - // Check whether key is actually present. - if (entry == kNotFound) return table; +template void +OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::RemoveEntry(int entry); - // Remove entry and try to shrink this hash set. - table->set_the_hole(EntryToIndex(entry)); - table->ElementRemoved(); - return ObjectHashSet::Shrink(table, key); +bool OrderedHashSet::Contains(Handle<Object> key) { + return FindEntry(key) != kNotFound; } -Handle<ObjectHashTable> ObjectHashTable::EnsureCapacity( - Handle<ObjectHashTable> table, - int n, - Handle<Object> key, - PretenureFlag pretenure) { - Handle<HashTable<ObjectHashTableShape<2>, Object*> > table_base = table; - CALL_HEAP_FUNCTION(table_base->GetIsolate(), - table_base->EnsureCapacity(n, *key, pretenure), - ObjectHashTable); -} +Handle<OrderedHashSet> OrderedHashSet::Add(Handle<OrderedHashSet> table, + Handle<Object> key) { + int hash = GetOrCreateHash(table->GetIsolate(), key)->value(); + if (table->FindEntry(key, hash) != kNotFound) return table; + table = EnsureGrowable(table); -Handle<ObjectHashTable> ObjectHashTable::Shrink( - Handle<ObjectHashTable> table, Handle<Object> key) { - Handle<HashTable<ObjectHashTableShape<2>, Object*> > table_base = table; - CALL_HEAP_FUNCTION(table_base->GetIsolate(), - table_base->Shrink(*key), - ObjectHashTable); + int index = table->AddEntry(hash); + table->set(index, *key); + return table; } -Object* ObjectHashTable::Lookup(Object* key) { - ASSERT(IsKey(key)); - - // If the object does not have an identity hash, it was never used as a key. - Object* hash = key->GetHash(); - if (hash->IsUndefined()) { - return GetHeap()->the_hole_value(); - } +Object* OrderedHashMap::Lookup(Handle<Object> key) { + DisallowHeapAllocation no_gc; int entry = FindEntry(key); if (entry == kNotFound) return GetHeap()->the_hole_value(); - return get(EntryToIndex(entry) + 1); + return ValueAt(entry); } -Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table, - Handle<Object> key, - Handle<Object> value) { - ASSERT(table->IsKey(*key)); - - Isolate* isolate = table->GetIsolate(); - - // Make sure the key object has an identity hash code. - Handle<Object> hash = Object::GetOrCreateHash(key, isolate); - - int entry = table->FindEntry(*key); +Handle<OrderedHashMap> OrderedHashMap::Put(Handle<OrderedHashMap> table, + Handle<Object> key, + Handle<Object> value) { + DCHECK(!key->IsTheHole()); - // Check whether to perform removal operation. - if (value->IsTheHole()) { - if (entry == kNotFound) return table; - table->RemoveEntry(entry); - return Shrink(table, key); - } + int hash = GetOrCreateHash(table->GetIsolate(), key)->value(); + int entry = table->FindEntry(key, hash); - // Key is already in table, just overwrite value. if (entry != kNotFound) { - table->set(EntryToIndex(entry) + 1, *value); + table->set(table->EntryToIndex(entry) + kValueOffset, *value); return table; } - // Check whether the hash table should be extended. - table = EnsureCapacity(table, 1, key); - table->AddEntry(table->FindInsertionEntry(Handle<Smi>::cast(hash)->value()), - *key, - *value); + table = EnsureGrowable(table); + + int index = table->AddEntry(hash); + table->set(index, *key); + table->set(index + kValueOffset, *value); return table; } -void ObjectHashTable::AddEntry(int entry, Object* key, Object* value) { - set(EntryToIndex(entry), key); - set(EntryToIndex(entry) + 1, value); - ElementAdded(); -} +template<class Derived, class TableType> +void OrderedHashTableIterator<Derived, TableType>::Transition() { + DisallowHeapAllocation no_allocation; + TableType* table = TableType::cast(this->table()); + if (!table->IsObsolete()) return; + int index = Smi::cast(this->index())->value(); + while (table->IsObsolete()) { + TableType* next_table = table->NextTable(); + + if (index > 0) { + int nod = table->NumberOfDeletedElements(); + + // When we clear the table we set the number of deleted elements to -1. + if (nod == -1) { + index = 0; + } else { + int old_index = index; + for (int i = 0; i < nod; ++i) { + int removed_index = table->RemovedIndexAt(i); + if (removed_index >= old_index) break; + --index; + } + } + } -void ObjectHashTable::RemoveEntry(int entry) { - set_the_hole(EntryToIndex(entry)); - set_the_hole(EntryToIndex(entry) + 1); - ElementRemoved(); + table = next_table; + } + + set_table(table); + set_index(Smi::FromInt(index)); } -Object* WeakHashTable::Lookup(Object* key) { - ASSERT(IsKey(key)); - int entry = FindEntry(key); - if (entry == kNotFound) return GetHeap()->the_hole_value(); - return get(EntryToValueIndex(entry)); -} +template<class Derived, class TableType> +bool OrderedHashTableIterator<Derived, TableType>::HasMore() { + DisallowHeapAllocation no_allocation; + if (this->table()->IsUndefined()) return false; + Transition(); -MaybeObject* WeakHashTable::Put(Object* key, Object* value) { - ASSERT(IsKey(key)); - int entry = FindEntry(key); - // Key is already in table, just overwrite value. - if (entry != kNotFound) { - set(EntryToValueIndex(entry), value); - return this; - } + TableType* table = TableType::cast(this->table()); + int index = Smi::cast(this->index())->value(); + int used_capacity = table->UsedCapacity(); - // Check whether the hash table should be extended. - Object* obj; - { MaybeObject* maybe_obj = EnsureCapacity(1, key, TENURED); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; + while (index < used_capacity && table->KeyAt(index)->IsTheHole()) { + index++; } - WeakHashTable* table = WeakHashTable::cast(obj); - table->AddEntry(table->FindInsertionEntry(Hash(key)), key, value); - return table; + + set_index(Smi::FromInt(index)); + + if (index < used_capacity) return true; + + set_table(GetHeap()->undefined_value()); + return false; } -void WeakHashTable::AddEntry(int entry, Object* key, Object* value) { - set(EntryToIndex(entry), key); - set(EntryToValueIndex(entry), value); - ElementAdded(); +template<class Derived, class TableType> +Smi* OrderedHashTableIterator<Derived, TableType>::Next(JSArray* value_array) { + DisallowHeapAllocation no_allocation; + if (HasMore()) { + FixedArray* array = FixedArray::cast(value_array->elements()); + static_cast<Derived*>(this)->PopulateValueArray(array); + MoveNext(); + return kind(); + } + return Smi::FromInt(0); } +template Smi* +OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::Next( + JSArray* value_array); + +template bool +OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::HasMore(); + +template void +OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::MoveNext(); + +template Object* +OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::CurrentKey(); + +template void +OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::Transition(); + + +template Smi* +OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::Next( + JSArray* value_array); + +template bool +OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::HasMore(); + +template void +OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::MoveNext(); + +template Object* +OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::CurrentKey(); + +template void +OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::Transition(); + + DeclaredAccessorDescriptorIterator::DeclaredAccessorDescriptorIterator( DeclaredAccessorDescriptor* descriptor) : array_(descriptor->serialized_data()->GetDataStartAddress()), @@ -15865,13 +16362,13 @@ const DeclaredAccessorDescriptorData* DeclaredAccessorDescriptorIterator::Next() { - ASSERT(offset_ < length_); + DCHECK(offset_ < length_); uint8_t* ptr = &array_[offset_]; - ASSERT(reinterpret_cast<uintptr_t>(ptr) % sizeof(uintptr_t) == 0); + DCHECK(reinterpret_cast<uintptr_t>(ptr) % sizeof(uintptr_t) == 0); const DeclaredAccessorDescriptorData* data = reinterpret_cast<const DeclaredAccessorDescriptorData*>(ptr); offset_ += sizeof(*data); - ASSERT(offset_ <= length_); + DCHECK(offset_ <= length_); return data; } @@ -15895,10 +16392,10 @@ if (previous_length != 0) { uint8_t* previous_array = previous->serialized_data()->GetDataStartAddress(); - OS::MemCopy(array, previous_array, previous_length); + MemCopy(array, previous_array, previous_length); array += previous_length; } - ASSERT(reinterpret_cast<uintptr_t>(array) % sizeof(uintptr_t) == 0); + DCHECK(reinterpret_cast<uintptr_t>(array) % sizeof(uintptr_t) == 0); DeclaredAccessorDescriptorData* data = reinterpret_cast<DeclaredAccessorDescriptorData*>(array); *data = descriptor; @@ -15907,7 +16404,6 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT // Check if there is a break point at this code position. bool DebugInfo::HasBreakPoint(int code_position) { // Get the break point info object for this code position. @@ -15975,7 +16471,7 @@ Handle<FixedArray> new_break_points = isolate->factory()->NewFixedArray( old_break_points->length() + - Debug::kEstimatedNofBreakPointsInFunction); + DebugInfo::kEstimatedNofBreakPointsInFunction); debug_info->set_break_points(*new_break_points); for (int i = 0; i < old_break_points->length(); i++) { @@ -15983,7 +16479,7 @@ } index = old_break_points->length(); } - ASSERT(index != kNoBreakPointInfo); + DCHECK(index != kNoBreakPointInfo); // Allocate new BreakPointInfo object and set the break point. Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast( @@ -16075,7 +16571,7 @@ return; } // If there are multiple break points shrink the array - ASSERT(break_point_info->break_point_objects()->IsFixedArray()); + DCHECK(break_point_info->break_point_objects()->IsFixedArray()); Handle<FixedArray> old_array = Handle<FixedArray>( FixedArray::cast(break_point_info->break_point_objects())); @@ -16084,7 +16580,7 @@ int found_count = 0; for (int i = 0; i < old_array->length(); i++) { if (old_array->get(i) == *break_point_object) { - ASSERT(found_count == 0); + DCHECK(found_count == 0); found_count++; } else { new_array->set(i - found_count, old_array->get(i)); @@ -16161,7 +16657,6 @@ // Multiple break points. return FixedArray::cast(break_point_objects())->length(); } -#endif // ENABLE_DEBUGGER_SUPPORT Object* JSDate::GetField(Object* object, Smi* index) { @@ -16171,7 +16666,7 @@ Object* JSDate::DoGetField(FieldIndex index) { - ASSERT(index != kDateValue); + DCHECK(index != kDateValue); DateCache* date_cache = GetIsolate()->date_cache(); @@ -16181,7 +16676,7 @@ // Since the stamp is not NaN, the value is also not NaN. int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(value()->Number())); - SetLocalFields(local_time_ms, date_cache); + SetCachedFields(local_time_ms, date_cache); } switch (index) { case kYear: return year(); @@ -16209,7 +16704,7 @@ int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days); if (index == kMillisecond) return Smi::FromInt(time_in_day_ms % 1000); - ASSERT(index == kTimeInDay); + DCHECK(index == kTimeInDay); return Smi::FromInt(time_in_day_ms); } @@ -16217,7 +16712,7 @@ Object* JSDate::GetUTCField(FieldIndex index, double value, DateCache* date_cache) { - ASSERT(index >= kFirstUTCField); + DCHECK(index >= kFirstUTCField); if (std::isnan(value)) return GetIsolate()->heap()->nan_value(); @@ -16236,7 +16731,7 @@ date_cache->YearMonthDayFromDays(days, &year, &month, &day); if (index == kYearUTC) return Smi::FromInt(year); if (index == kMonthUTC) return Smi::FromInt(month); - ASSERT(index == kDayUTC); + DCHECK(index == kDayUTC); return Smi::FromInt(day); } @@ -16274,7 +16769,7 @@ } -void JSDate::SetLocalFields(int64_t local_time_ms, DateCache* date_cache) { +void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) { int days = DateCache::DaysFromTime(local_time_ms); int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days); int year, month, day; @@ -16295,7 +16790,7 @@ void JSArrayBuffer::Neuter() { - ASSERT(is_external()); + DCHECK(is_external()); set_backing_store(NULL); set_byte_length(Smi::FromInt(0)); } @@ -16340,7 +16835,11 @@ Handle<Map> map(typed_array->map()); Isolate* isolate = typed_array->GetIsolate(); - ASSERT(IsFixedTypedArrayElementsKind(map->elements_kind())); + DCHECK(IsFixedTypedArrayElementsKind(map->elements_kind())); + + Handle<Map> new_map = Map::TransitionElementsTo( + map, + FixedToExternalElementsKind(map->elements_kind())); Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer(); Handle<FixedTypedArrayBase> fixed_typed_array( @@ -16354,14 +16853,11 @@ isolate->factory()->NewExternalArray( fixed_typed_array->length(), typed_array->type(), static_cast<uint8_t*>(buffer->backing_store())); - Handle<Map> new_map = JSObject::GetElementsTransitionMap( - typed_array, - FixedToExternalElementsKind(map->elements_kind())); buffer->set_weak_first_view(*typed_array); - ASSERT(typed_array->weak_next() == isolate->heap()->undefined_value()); + DCHECK(typed_array->weak_next() == isolate->heap()->undefined_value()); typed_array->set_buffer(*buffer); - typed_array->set_map_and_elements(*new_map, *new_elements); + JSObject::SetMapAndElements(typed_array, new_map, new_elements); return buffer; } @@ -16370,7 +16866,7 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() { Handle<Object> result(buffer(), GetIsolate()); if (*result != Smi::FromInt(0)) { - ASSERT(IsExternalArrayElementsKind(map()->elements_kind())); + DCHECK(IsExternalArrayElementsKind(map()->elements_kind())); return Handle<JSArrayBuffer>::cast(result); } Handle<JSTypedArray> self(this); @@ -16384,7 +16880,7 @@ void PropertyCell::set_type(HeapType* type, WriteBarrierMode ignored) { - ASSERT(IsPropertyCell()); + DCHECK(IsPropertyCell()); set_type_raw(type, ignored); } @@ -16393,14 +16889,9 @@ Handle<Object> value) { Isolate* isolate = cell->GetIsolate(); Handle<HeapType> old_type(cell->type(), isolate); - // TODO(2803): Do not track ConsString as constant because they cannot be - // embedded into code. - Handle<HeapType> new_type = value->IsConsString() || value->IsTheHole() - ? HeapType::Any(isolate) : HeapType::Constant(value, isolate); + Handle<HeapType> new_type = HeapType::Constant(value, isolate); - if (new_type->Is(old_type)) { - return old_type; - } + if (new_type->Is(old_type)) return old_type; cell->dependent_code()->DeoptimizeDependentCodeGroup( isolate, DependentCode::kPropertyCellChangedGroup); @@ -16423,19 +16914,21 @@ } -void PropertyCell::AddDependentCompilationInfo(CompilationInfo* info) { - Handle<DependentCode> dep(dependent_code()); +// static +void PropertyCell::AddDependentCompilationInfo(Handle<PropertyCell> cell, + CompilationInfo* info) { Handle<DependentCode> codes = - DependentCode::Insert(dep, DependentCode::kPropertyCellChangedGroup, + DependentCode::Insert(handle(cell->dependent_code(), info->isolate()), + DependentCode::kPropertyCellChangedGroup, info->object_wrapper()); - if (*codes != dependent_code()) set_dependent_code(*codes); + if (*codes != cell->dependent_code()) cell->set_dependent_code(*codes); info->dependencies(DependentCode::kPropertyCellChangedGroup)->Add( - Handle<HeapObject>(this), info->zone()); + cell, info->zone()); } const char* GetBailoutReason(BailoutReason reason) { - ASSERT(reason < kLastErrorMessage); + DCHECK(reason < kLastErrorMessage); #define ERROR_MESSAGES_TEXTS(C, T) T, static const char* error_messages_[] = { ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS) diff -Nru nodejs-0.11.13/deps/v8/src/objects-debug.cc nodejs-0.11.15/deps/v8/src/objects-debug.cc --- nodejs-0.11.13/deps/v8/src/objects-debug.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/objects-debug.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,53 +1,26 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "disassembler.h" -#include "disasm.h" -#include "jsregexp.h" -#include "macro-assembler.h" -#include "objects-visiting.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/disasm.h" +#include "src/disassembler.h" +#include "src/heap/objects-visiting.h" +#include "src/jsregexp.h" +#include "src/macro-assembler.h" +#include "src/ostreams.h" namespace v8 { namespace internal { #ifdef VERIFY_HEAP -void MaybeObject::Verify() { - Object* this_as_object; - if (ToObject(&this_as_object)) { - if (this_as_object->IsSmi()) { - Smi::cast(this_as_object)->SmiVerify(); - } else { - HeapObject::cast(this_as_object)->HeapObjectVerify(); - } +void Object::ObjectVerify() { + if (IsSmi()) { + Smi::cast(this)->SmiVerify(); } else { - Failure::cast(this)->FailureVerify(); + HeapObject::cast(this)->HeapObjectVerify(); } } @@ -66,11 +39,6 @@ } -void Failure::FailureVerify() { - CHECK(IsFailure()); -} - - void HeapObject::HeapObjectVerify() { InstanceType instance_type = map()->instance_type(); @@ -87,6 +55,7 @@ Map::cast(this)->MapVerify(); break; case HEAP_NUMBER_TYPE: + case MUTABLE_HEAP_NUMBER_TYPE: HeapNumber::cast(this)->HeapNumberVerify(); break; case FIXED_ARRAY_TYPE: @@ -165,6 +134,12 @@ case JS_MAP_TYPE: JSMap::cast(this)->JSMapVerify(); break; + case JS_SET_ITERATOR_TYPE: + JSSetIterator::cast(this)->JSSetIteratorVerify(); + break; + case JS_MAP_ITERATOR_TYPE: + JSMapIterator::cast(this)->JSMapIteratorVerify(); + break; case JS_WEAK_MAP_TYPE: JSWeakMap::cast(this)->JSWeakMapVerify(); break; @@ -232,7 +207,7 @@ void HeapNumber::HeapNumberVerify() { - CHECK(IsHeapNumber()); + CHECK(IsHeapNumber() || IsMutableHeapNumber()); } @@ -288,12 +263,18 @@ for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) { if (descriptors->GetDetails(i).type() == FIELD) { Representation r = descriptors->GetDetails(i).representation(); - int field = descriptors->GetFieldIndex(i); - Object* value = RawFastPropertyAt(field); - if (r.IsDouble()) ASSERT(value->IsHeapNumber()); + FieldIndex index = FieldIndex::ForDescriptor(map(), i); + Object* value = RawFastPropertyAt(index); + if (r.IsDouble()) DCHECK(value->IsMutableHeapNumber()); if (value->IsUninitialized()) continue; - if (r.IsSmi()) ASSERT(value->IsSmi()); - if (r.IsHeapObject()) ASSERT(value->IsHeapObject()); + if (r.IsSmi()) DCHECK(value->IsSmi()); + if (r.IsHeapObject()) DCHECK(value->IsHeapObject()); + HeapType* field_type = descriptors->GetFieldType(i); + if (r.IsNone()) { + CHECK(field_type->Is(HeapType::None())); + } else if (!HeapType::Any()->Is(field_type)) { + CHECK(!field_type->NowStable() || field_type->NowContains(value)); + } } } } @@ -319,17 +300,17 @@ instance_size() < heap->Capacity())); VerifyHeapPointer(prototype()); VerifyHeapPointer(instance_descriptors()); - SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates()); + SLOW_DCHECK(instance_descriptors()->IsSortedNoDuplicates()); if (HasTransitionArray()) { - SLOW_ASSERT(transitions()->IsSortedNoDuplicates()); - SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this)); + SLOW_DCHECK(transitions()->IsSortedNoDuplicates()); + SLOW_DCHECK(transitions()->IsConsistentWithBackPointers(this)); } } -void Map::SharedMapVerify() { +void Map::DictionaryMapVerify() { MapVerify(); - CHECK(is_shared()); + CHECK(is_dictionary_map()); CHECK(instance_descriptors()->IsEmpty()); CHECK_EQ(0, pre_allocated_property_fields()); CHECK_EQ(0, unused_property_fields()); @@ -368,7 +349,7 @@ void TypeFeedbackInfo::TypeFeedbackInfoVerify() { VerifyObjectField(kStorage1Offset); VerifyObjectField(kStorage2Offset); - VerifyHeapPointer(feedback_vector()); + VerifyObjectField(kStorage3Offset); } @@ -380,11 +361,7 @@ void FixedArray::FixedArrayVerify() { for (int i = 0; i < length(); i++) { Object* e = get(i); - if (e->IsHeapObject()) { - VerifyHeapPointer(e); - } else { - e->Verify(); - } + VerifyPointer(e); } } @@ -404,12 +381,14 @@ void ConstantPoolArray::ConstantPoolArrayVerify() { CHECK(IsConstantPoolArray()); - for (int i = 0; i < count_of_code_ptr_entries(); i++) { - Address code_entry = get_code_ptr_entry(first_code_ptr_index() + i); + ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR); + while (!code_iter.is_finished()) { + Address code_entry = get_code_ptr_entry(code_iter.next_index()); VerifyPointer(Code::GetCodeFromTargetAddress(code_entry)); } - for (int i = 0; i < count_of_heap_ptr_entries(); i++) { - VerifyObjectField(OffsetOfElementAt(first_heap_ptr_index() + i)); + ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR); + while (!heap_iter.is_finished()) { + VerifyObjectField(OffsetOfElementAt(heap_iter.next_index())); } } @@ -521,6 +500,7 @@ CHECK(this->second() == GetHeap()->empty_string() || this->second()->IsString()); CHECK(this->length() >= ConsString::kMinLength); + CHECK(this->length() == this->first()->length() + this->second()->length()); if (this->IsFlat()) { // A flat cons can only be created by String::SlowTryFlatten. // Afterwards, the first part may be externalized. @@ -552,6 +532,7 @@ VerifyObjectField(kNameOffset); VerifyObjectField(kCodeOffset); VerifyObjectField(kOptimizedCodeMapOffset); + VerifyObjectField(kFeedbackVectorOffset); VerifyObjectField(kScopeInfoOffset); VerifyObjectField(kInstanceClassNameOffset); VerifyObjectField(kFunctionDataOffset); @@ -566,7 +547,7 @@ VerifyObjectField(JSGlobalProxy::kNativeContextOffset); // Make sure that this object has no properties, elements. CHECK_EQ(0, properties()->length()); - CHECK(HasFastObjectElements()); + CHECK(HasFastSmiElements()); CHECK_EQ(0, FixedArray::cast(elements())->length()); } @@ -595,18 +576,41 @@ void Oddball::OddballVerify() { CHECK(IsOddball()); + Heap* heap = GetHeap(); VerifyHeapPointer(to_string()); Object* number = to_number(); if (number->IsHeapObject()) { - CHECK(number == HeapObject::cast(number)->GetHeap()->nan_value()); + CHECK(number == heap->nan_value()); } else { CHECK(number->IsSmi()); int value = Smi::cast(number)->value(); // Hidden oddballs have negative smis. - const int kLeastHiddenOddballNumber = -4; + const int kLeastHiddenOddballNumber = -5; CHECK_LE(value, 1); CHECK(value >= kLeastHiddenOddballNumber); } + if (map() == heap->undefined_map()) { + CHECK(this == heap->undefined_value()); + } else if (map() == heap->the_hole_map()) { + CHECK(this == heap->the_hole_value()); + } else if (map() == heap->null_map()) { + CHECK(this == heap->null_value()); + } else if (map() == heap->boolean_map()) { + CHECK(this == heap->true_value() || + this == heap->false_value()); + } else if (map() == heap->uninitialized_map()) { + CHECK(this == heap->uninitialized_value()); + } else if (map() == heap->no_interceptor_result_sentinel_map()) { + CHECK(this == heap->no_interceptor_result_sentinel()); + } else if (map() == heap->arguments_marker_map()) { + CHECK(this == heap->arguments_marker()); + } else if (map() == heap->termination_exception_map()) { + CHECK(this == heap->termination_exception()); + } else if (map() == heap->exception_map()) { + CHECK(this == heap->exception()); + } else { + UNREACHABLE(); + } } @@ -626,33 +630,42 @@ void Code::CodeVerify() { CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()), kCodeAlignment)); - relocation_info()->Verify(); + relocation_info()->ObjectVerify(); Address last_gc_pc = NULL; + Isolate* isolate = GetIsolate(); for (RelocIterator it(this); !it.done(); it.next()) { - it.rinfo()->Verify(); + it.rinfo()->Verify(isolate); // Ensure that GC will not iterate twice over the same pointer. if (RelocInfo::IsGCRelocMode(it.rinfo()->rmode())) { CHECK(it.rinfo()->pc() != last_gc_pc); last_gc_pc = it.rinfo()->pc(); } } + CHECK(raw_type_feedback_info() == Smi::FromInt(0) || + raw_type_feedback_info()->IsSmi() == IsCodeStubOrIC()); } void Code::VerifyEmbeddedObjectsDependency() { + if (!CanContainWeakObjects()) return; + DisallowHeapAllocation no_gc; + Isolate* isolate = GetIsolate(); + HandleScope scope(isolate); int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); for (RelocIterator it(this, mode_mask); !it.done(); it.next()) { Object* obj = it.rinfo()->target_object(); if (IsWeakObject(obj)) { if (obj->IsMap()) { Map* map = Map::cast(obj); - CHECK(map->dependent_code()->Contains( - DependentCode::kWeaklyEmbeddedGroup, this)); + DependentCode::DependencyGroup group = is_optimized_code() ? + DependentCode::kWeakCodeGroup : DependentCode::kWeakICGroup; + CHECK(map->dependent_code()->Contains(group, this)); } else if (obj->IsJSObject()) { Object* raw_table = GetIsolate()->heap()->weak_object_to_code_table(); WeakHashTable* table = WeakHashTable::cast(raw_table); - CHECK(DependentCode::cast(table->Lookup(obj))->Contains( - DependentCode::kWeaklyEmbeddedGroup, this)); + Handle<Object> key_obj(obj, isolate); + CHECK(DependentCode::cast(table->Lookup(key_obj))->Contains( + DependentCode::kWeakCodeGroup, this)); } } } @@ -676,7 +689,8 @@ CHECK(IsJSSet()); JSObjectVerify(); VerifyHeapPointer(table()); - CHECK(table()->IsHashTable() || table()->IsUndefined()); + CHECK(table()->IsOrderedHashTable() || table()->IsUndefined()); + // TODO(arv): Verify OrderedHashTable too. } @@ -684,7 +698,28 @@ CHECK(IsJSMap()); JSObjectVerify(); VerifyHeapPointer(table()); - CHECK(table()->IsHashTable() || table()->IsUndefined()); + CHECK(table()->IsOrderedHashTable() || table()->IsUndefined()); + // TODO(arv): Verify OrderedHashTable too. +} + + +void JSSetIterator::JSSetIteratorVerify() { + CHECK(IsJSSetIterator()); + JSObjectVerify(); + VerifyHeapPointer(table()); + CHECK(table()->IsOrderedHashTable() || table()->IsUndefined()); + CHECK(index()->IsSmi() || index()->IsUndefined()); + CHECK(kind()->IsSmi() || kind()->IsUndefined()); +} + + +void JSMapIterator::JSMapIteratorVerify() { + CHECK(IsJSMapIterator()); + JSObjectVerify(); + VerifyHeapPointer(table()); + CHECK(table()->IsOrderedHashTable() || table()->IsUndefined()); + CHECK(index()->IsSmi() || index()->IsUndefined()); + CHECK(kind()->IsSmi() || kind()->IsUndefined()); } @@ -811,7 +846,7 @@ void Box::BoxVerify() { CHECK(IsBox()); - value()->Verify(); + value()->ObjectVerify(); } @@ -848,7 +883,6 @@ CHECK(IsAccessorPair()); VerifyPointer(getter()); VerifyPointer(setter()); - VerifySmiField(kAccessFlagsOffset); } @@ -947,7 +981,7 @@ void JSFunctionResultCache::JSFunctionResultCacheVerify() { - JSFunction::cast(get(kFactoryIndex))->Verify(); + JSFunction::cast(get(kFactoryIndex))->ObjectVerify(); int size = Smi::cast(get(kCacheSizeIndex))->value(); CHECK(kEntriesIndex <= size); @@ -962,23 +996,23 @@ if (FLAG_enable_slow_asserts) { for (int i = kEntriesIndex; i < size; i++) { CHECK(!get(i)->IsTheHole()); - get(i)->Verify(); + get(i)->ObjectVerify(); } for (int i = size; i < length(); i++) { CHECK(get(i)->IsTheHole()); - get(i)->Verify(); + get(i)->ObjectVerify(); } } } void NormalizedMapCache::NormalizedMapCacheVerify() { - FixedArray::cast(this)->Verify(); + FixedArray::cast(this)->FixedArrayVerify(); if (FLAG_enable_slow_asserts) { for (int i = 0; i < length(); i++) { - Object* e = get(i); + Object* e = FixedArray::get(i); if (e->IsMap()) { - Map::cast(e)->SharedMapVerify(); + Map::cast(e)->DictionaryMapVerify(); } else { CHECK(e->IsUndefined()); } @@ -987,7 +1021,6 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT void DebugInfo::DebugInfoVerify() { CHECK(IsDebugInfo()); VerifyPointer(shared()); @@ -1004,7 +1037,6 @@ statement_position()->SmiVerify(); VerifyPointer(break_point_objects()); } -#endif // ENABLE_DEBUGGER_SUPPORT #endif // VERIFY_HEAP #ifdef DEBUG @@ -1112,13 +1144,15 @@ for (int i = 0; i < number_of_descriptors(); i++) { Name* key = GetSortedKey(i); if (key == current_key) { - PrintDescriptors(); + OFStream os(stdout); + PrintDescriptors(os); return false; } current_key = key; uint32_t hash = GetSortedKey(i)->Hash(); if (hash < current) { - PrintDescriptors(); + OFStream os(stdout); + PrintDescriptors(os); return false; } current = hash; @@ -1128,19 +1162,21 @@ bool TransitionArray::IsSortedNoDuplicates(int valid_entries) { - ASSERT(valid_entries == -1); + DCHECK(valid_entries == -1); Name* current_key = NULL; uint32_t current = 0; for (int i = 0; i < number_of_transitions(); i++) { Name* key = GetSortedKey(i); if (key == current_key) { - PrintTransitions(); + OFStream os(stdout); + PrintTransitions(os); return false; } current_key = key; uint32_t hash = GetSortedKey(i)->Hash(); if (hash < current) { - PrintTransitions(); + OFStream os(stdout); + PrintTransitions(os); return false; } current = hash; diff -Nru nodejs-0.11.13/deps/v8/src/objects.h nodejs-0.11.15/deps/v8/src/objects.h --- nodejs-0.11.13/deps/v8/src/objects.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/objects.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,166 +1,152 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_OBJECTS_H_ #define V8_OBJECTS_H_ -#include "allocation.h" -#include "assert-scope.h" -#include "builtins.h" -#include "elements-kind.h" -#include "flags.h" -#include "list.h" -#include "property-details.h" -#include "smart-pointers.h" -#include "unicode-inl.h" -#if V8_TARGET_ARCH_ARM64 -#include "arm64/constants-arm64.h" -#elif V8_TARGET_ARCH_ARM -#include "arm/constants-arm.h" +#include "src/allocation.h" +#include "src/assert-scope.h" +#include "src/builtins.h" +#include "src/checks.h" +#include "src/elements-kind.h" +#include "src/field-index.h" +#include "src/flags.h" +#include "src/list.h" +#include "src/property-details.h" +#include "src/smart-pointers.h" +#include "src/unicode-inl.h" +#include "src/zone.h" + +#if V8_TARGET_ARCH_ARM +#include "src/arm/constants-arm.h" // NOLINT +#elif V8_TARGET_ARCH_ARM64 +#include "src/arm64/constants-arm64.h" // NOLINT #elif V8_TARGET_ARCH_MIPS -#include "mips/constants-mips.h" +#include "src/mips/constants-mips.h" // NOLINT +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/constants-mips64.h" // NOLINT #endif -#include "v8checks.h" -#include "zone.h" // // Most object types in the V8 JavaScript are described in this file. // // Inheritance hierarchy: -// - MaybeObject (an object or a failure) -// - Failure (immediate for marking failed operation) -// - Object -// - Smi (immediate small integer) -// - HeapObject (superclass for everything allocated in the heap) -// - JSReceiver (suitable for property access) -// - JSObject -// - JSArray -// - JSArrayBuffer -// - JSArrayBufferView -// - JSTypedArray -// - JSDataView +// - Object +// - Smi (immediate small integer) +// - HeapObject (superclass for everything allocated in the heap) +// - JSReceiver (suitable for property access) +// - JSObject +// - JSArray +// - JSArrayBuffer +// - JSArrayBufferView +// - JSTypedArray +// - JSDataView +// - JSCollection // - JSSet // - JSMap -// - JSWeakCollection -// - JSWeakMap -// - JSWeakSet -// - JSRegExp -// - JSFunction -// - JSGeneratorObject -// - JSModule -// - GlobalObject -// - JSGlobalObject -// - JSBuiltinsObject -// - JSGlobalProxy -// - JSValue -// - JSDate -// - JSMessageObject -// - JSProxy -// - JSFunctionProxy -// - FixedArrayBase -// - ByteArray -// - FixedArray -// - DescriptorArray -// - HashTable -// - Dictionary -// - StringTable -// - CompilationCacheTable -// - CodeCacheHashTable -// - MapCache -// - Context -// - JSFunctionResultCache -// - ScopeInfo -// - TransitionArray -// - FixedDoubleArray -// - ExternalArray -// - ExternalUint8ClampedArray -// - ExternalInt8Array -// - ExternalUint8Array -// - ExternalInt16Array -// - ExternalUint16Array -// - ExternalInt32Array -// - ExternalUint32Array -// - ExternalFloat32Array -// - Name -// - String -// - SeqString -// - SeqOneByteString -// - SeqTwoByteString -// - SlicedString -// - ConsString -// - ExternalString -// - ExternalAsciiString -// - ExternalTwoByteString -// - InternalizedString -// - SeqInternalizedString -// - SeqOneByteInternalizedString -// - SeqTwoByteInternalizedString -// - ConsInternalizedString -// - ExternalInternalizedString -// - ExternalAsciiInternalizedString -// - ExternalTwoByteInternalizedString -// - Symbol -// - HeapNumber -// - Cell -// - PropertyCell -// - Code -// - Map -// - Oddball -// - Foreign -// - SharedFunctionInfo -// - Struct -// - Box -// - DeclaredAccessorDescriptor -// - AccessorInfo -// - DeclaredAccessorInfo -// - ExecutableAccessorInfo -// - AccessorPair -// - AccessCheckInfo -// - InterceptorInfo -// - CallHandlerInfo -// - TemplateInfo -// - FunctionTemplateInfo -// - ObjectTemplateInfo -// - Script -// - SignatureInfo -// - TypeSwitchInfo -// - DebugInfo -// - BreakPointInfo -// - CodeCache +// - JSSetIterator +// - JSMapIterator +// - JSWeakCollection +// - JSWeakMap +// - JSWeakSet +// - JSRegExp +// - JSFunction +// - JSGeneratorObject +// - JSModule +// - GlobalObject +// - JSGlobalObject +// - JSBuiltinsObject +// - JSGlobalProxy +// - JSValue +// - JSDate +// - JSMessageObject +// - JSProxy +// - JSFunctionProxy +// - FixedArrayBase +// - ByteArray +// - FixedArray +// - DescriptorArray +// - HashTable +// - Dictionary +// - StringTable +// - CompilationCacheTable +// - CodeCacheHashTable +// - MapCache +// - OrderedHashTable +// - OrderedHashSet +// - OrderedHashMap +// - Context +// - JSFunctionResultCache +// - ScopeInfo +// - TransitionArray +// - FixedDoubleArray +// - ExternalArray +// - ExternalUint8ClampedArray +// - ExternalInt8Array +// - ExternalUint8Array +// - ExternalInt16Array +// - ExternalUint16Array +// - ExternalInt32Array +// - ExternalUint32Array +// - ExternalFloat32Array +// - Name +// - String +// - SeqString +// - SeqOneByteString +// - SeqTwoByteString +// - SlicedString +// - ConsString +// - ExternalString +// - ExternalAsciiString +// - ExternalTwoByteString +// - InternalizedString +// - SeqInternalizedString +// - SeqOneByteInternalizedString +// - SeqTwoByteInternalizedString +// - ConsInternalizedString +// - ExternalInternalizedString +// - ExternalAsciiInternalizedString +// - ExternalTwoByteInternalizedString +// - Symbol +// - HeapNumber +// - Cell +// - PropertyCell +// - Code +// - Map +// - Oddball +// - Foreign +// - SharedFunctionInfo +// - Struct +// - Box +// - DeclaredAccessorDescriptor +// - AccessorInfo +// - DeclaredAccessorInfo +// - ExecutableAccessorInfo +// - AccessorPair +// - AccessCheckInfo +// - InterceptorInfo +// - CallHandlerInfo +// - TemplateInfo +// - FunctionTemplateInfo +// - ObjectTemplateInfo +// - Script +// - SignatureInfo +// - TypeSwitchInfo +// - DebugInfo +// - BreakPointInfo +// - CodeCache // // Formats of Object*: // Smi: [31 bit signed int] 0 // HeapObject: [32 bit direct pointer] (4 byte aligned) | 01 -// Failure: [30 bit signed int] 11 namespace v8 { namespace internal { +class OStream; + enum KeyedAccessStoreMode { STANDARD_STORE, STORE_TRANSITION_SMI_TO_OBJECT, @@ -187,6 +173,12 @@ }; +enum MutableMode { + MUTABLE, + IMMUTABLE +}; + + static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION - STANDARD_STORE; STATIC_ASSERT(STANDARD_STORE == 0); @@ -255,12 +247,12 @@ }; -// NormalizedMapSharingMode is used to specify whether a map may be shared -// by different objects with normalized properties. -enum NormalizedMapSharingMode { - UNIQUE_NORMALIZED_MAP, - SHARED_NORMALIZED_MAP -}; +// Indicates how aggressively the prototype should be optimized. FAST_PROTOTYPE +// will give the fastest result by tailoring the map to the prototype, but that +// will cause polymorphism with other objects. REGULAR_PROTOTYPE is to be used +// (at least for now) when dynamically modifying the prototype chain of an +// object using __proto__ or Object.setPrototypeOf. +enum PrototypeOptimizationMode { REGULAR_PROTOTYPE, FAST_PROTOTYPE }; // Indicates whether transitions can be added to a source map or not. @@ -313,8 +305,10 @@ // Instance size sentinel for objects of variable size. const int kVariableSizeSentinel = 0; +// We may store the unsigned bit field as signed Smi value and do not +// use the sign bit. const int kStubMajorKeyBits = 7; -const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; +const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1; // All Maps have a field instance_type containing a InstanceType. // It describes the type of the instances. @@ -355,8 +349,6 @@ \ V(INTERNALIZED_STRING_TYPE) \ V(ASCII_INTERNALIZED_STRING_TYPE) \ - V(CONS_INTERNALIZED_STRING_TYPE) \ - V(CONS_ASCII_INTERNALIZED_STRING_TYPE) \ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \ V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \ @@ -373,6 +365,7 @@ V(PROPERTY_CELL_TYPE) \ \ V(HEAP_NUMBER_TYPE) \ + V(MUTABLE_HEAP_NUMBER_TYPE) \ V(FOREIGN_TYPE) \ V(BYTE_ARRAY_TYPE) \ V(FREE_SPACE_TYPE) \ @@ -444,6 +437,8 @@ V(JS_PROXY_TYPE) \ V(JS_SET_TYPE) \ V(JS_MAP_TYPE) \ + V(JS_SET_ITERATOR_TYPE) \ + V(JS_MAP_ITERATOR_TYPE) \ V(JS_WEAK_MAP_TYPE) \ V(JS_WEAK_SET_TYPE) \ V(JS_REGEXP_TYPE) \ @@ -514,14 +509,6 @@ kVariableSizeSentinel, \ ascii_internalized_string, \ AsciiInternalizedString) \ - V(CONS_INTERNALIZED_STRING_TYPE, \ - ConsString::kSize, \ - cons_internalized_string, \ - ConsInternalizedString) \ - V(CONS_ASCII_INTERNALIZED_STRING_TYPE, \ - ConsString::kSize, \ - cons_ascii_internalized_string, \ - ConsAsciiInternalizedString) \ V(EXTERNAL_INTERNALIZED_STRING_TYPE, \ ExternalTwoByteString::kSize, \ external_internalized_string, \ @@ -556,7 +543,7 @@ // Note that for subtle reasons related to the ordering or numerical values of // type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST // manually. -#define STRUCT_LIST_ALL(V) \ +#define STRUCT_LIST(V) \ V(BOX, Box, box) \ V(DECLARED_ACCESSOR_DESCRIPTOR, \ DeclaredAccessorDescriptor, \ @@ -577,19 +564,9 @@ V(CODE_CACHE, CodeCache, code_cache) \ V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \ V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \ - V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) - -#ifdef ENABLE_DEBUGGER_SUPPORT -#define STRUCT_LIST_DEBUGGER(V) \ + V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \ V(DEBUG_INFO, DebugInfo, debug_info) \ V(BREAK_POINT_INFO, BreakPointInfo, break_point_info) -#else -#define STRUCT_LIST_DEBUGGER(V) -#endif - -#define STRUCT_LIST(V) \ - STRUCT_LIST_ALL(V) \ - STRUCT_LIST_DEBUGGER(V) // We use the full 8 bits of the instance_type field to encode heap object // instance types. The high-order bit (bit 7) is set if the object is not a @@ -621,17 +598,17 @@ }; const uint32_t kIsIndirectStringMask = 0x1; const uint32_t kIsIndirectStringTag = 0x1; -STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0); -STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0); -STATIC_ASSERT( - (kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag); -STATIC_ASSERT( - (kSlicedStringTag & kIsIndirectStringMask) == kIsIndirectStringTag); +STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0); // NOLINT +STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0); // NOLINT +STATIC_ASSERT((kConsStringTag & + kIsIndirectStringMask) == kIsIndirectStringTag); // NOLINT +STATIC_ASSERT((kSlicedStringTag & + kIsIndirectStringMask) == kIsIndirectStringTag); // NOLINT // Use this mask to distinguish between cons and slice only after making // sure that the string is one of the two (an indirect string). const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag; -STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0); +STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask)); // If bit 7 is clear, then bit 3 indicates whether this two-byte // string actually contains one byte data. @@ -645,16 +622,21 @@ // A ConsString with an empty string as the right side is a candidate -// for being shortcut by the garbage collector unless it is internalized. -// It's not common to have non-flat internalized strings, so we do not -// shortcut them thereby avoiding turning internalized strings into strings. -// See heap.cc and mark-compact.cc. +// for being shortcut by the garbage collector. We don't allocate any +// non-flat internalized strings, so we do not shortcut them thereby +// avoiding turning internalized strings into strings. The bit-masks +// below contain the internalized bit as additional safety. +// See heap.cc, mark-compact.cc and objects-visiting.cc. const uint32_t kShortcutTypeMask = kIsNotStringMask | kIsNotInternalizedMask | kStringRepresentationMask; const uint32_t kShortcutTypeTag = kConsStringTag | kNotInternalizedTag; +static inline bool IsShortcutCandidate(int type) { + return ((type & kShortcutTypeMask) == kShortcutTypeTag); +} + enum InstanceType { // String types. @@ -662,10 +644,6 @@ | kInternalizedTag, ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kSeqStringTag | kInternalizedTag, - CONS_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kConsStringTag - | kInternalizedTag, - CONS_ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kConsStringTag - | kInternalizedTag, EXTERNAL_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kExternalStringTag | kInternalizedTag, EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag @@ -685,9 +663,9 @@ STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag, ASCII_STRING_TYPE = ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag, - CONS_STRING_TYPE = CONS_INTERNALIZED_STRING_TYPE | kNotInternalizedTag, + CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag | kNotInternalizedTag, CONS_ASCII_STRING_TYPE = - CONS_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag, + kOneByteStringTag | kConsStringTag | kNotInternalizedTag, SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag | kNotInternalizedTag, @@ -721,6 +699,7 @@ // "Data", objects that cannot contain non-map-word pointers to heap // objects. HEAP_NUMBER_TYPE, + MUTABLE_HEAP_NUMBER_TYPE, FOREIGN_TYPE, BYTE_ARRAY_TYPE, FREE_SPACE_TYPE, @@ -768,10 +747,6 @@ TYPE_FEEDBACK_INFO_TYPE, ALIASED_ARGUMENTS_ENTRY_TYPE, BOX_TYPE, - // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT - // is defined. However as include/v8.h contain some of the instance type - // constants always having them avoids them getting different numbers - // depending on whether ENABLE_DEBUGGER_SUPPORT is defined or not. DEBUG_INFO_TYPE, BREAK_POINT_INFO_TYPE, @@ -779,8 +754,6 @@ CONSTANT_POOL_ARRAY_TYPE, SHARED_FUNCTION_INFO_TYPE, - JS_MESSAGE_OBJECT_TYPE, - // All the following types are subtypes of JSReceiver, which corresponds to // objects in the JS sense. The first and the last type in this range are // the two forms of function. This organization enables using the same @@ -790,6 +763,7 @@ JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE + JS_MESSAGE_OBJECT_TYPE, JS_DATE_TYPE, JS_OBJECT_TYPE, JS_CONTEXT_EXTENSION_OBJECT_TYPE, @@ -804,6 +778,8 @@ JS_DATA_VIEW_TYPE, JS_SET_TYPE, JS_MAP_TYPE, + JS_SET_ITERATOR_TYPE, + JS_MAP_ITERATOR_TYPE, JS_WEAK_MAP_TYPE, JS_WEAK_SET_TYPE, @@ -854,10 +830,10 @@ const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE - FIRST_EXTERNAL_ARRAY_TYPE + 1; -STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType); -STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType); -STATIC_CHECK(ODDBALL_TYPE == Internals::kOddballType); -STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType); +STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType); +STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType); +STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType); +STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType); #define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \ @@ -889,25 +865,31 @@ #define DECL_BOOLEAN_ACCESSORS(name) \ - inline bool name(); \ + inline bool name() const; \ inline void set_##name(bool value); \ #define DECL_ACCESSORS(name, type) \ - inline type* name(); \ + inline type* name() const; \ inline void set_##name(type* value, \ WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \ + +#define DECLARE_CAST(type) \ + INLINE(static type* cast(Object* object)); \ + INLINE(static const type* cast(const Object* object)); + + class AccessorPair; class AllocationSite; class AllocationSiteCreationContext; class AllocationSiteUsageContext; class DictionaryElementsAccessor; class ElementsAccessor; -class Failure; class FixedArrayBase; class GlobalObject; class ObjectVisitor; +class LookupIterator; class StringStream; // We cannot just say "class HeapType;" if it is created from a template... =8-? template<class> class TypeImpl; @@ -925,65 +907,11 @@ #endif #ifdef OBJECT_PRINT -#define DECLARE_PRINTER(Name) void Name##Print(FILE* out = stdout); +#define DECLARE_PRINTER(Name) void Name##Print(OStream& os); // NOLINT #else #define DECLARE_PRINTER(Name) #endif -class MaybeObject BASE_EMBEDDED { - public: - inline bool IsFailure(); - inline bool IsRetryAfterGC(); - inline bool IsException(); - INLINE(bool IsTheHole()); - INLINE(bool IsUninitialized()); - inline bool ToObject(Object** obj) { - if (IsFailure()) return false; - *obj = reinterpret_cast<Object*>(this); - return true; - } - inline Failure* ToFailureUnchecked() { - ASSERT(IsFailure()); - return reinterpret_cast<Failure*>(this); - } - inline Object* ToObjectUnchecked() { - // TODO(jkummerow): Turn this back into an ASSERT when we can be certain - // that it never fires in Release mode in the wild. - CHECK(!IsFailure()); - return reinterpret_cast<Object*>(this); - } - inline Object* ToObjectChecked() { - CHECK(!IsFailure()); - return reinterpret_cast<Object*>(this); - } - - template<typename T> - inline bool To(T** obj) { - if (IsFailure()) return false; - *obj = T::cast(reinterpret_cast<Object*>(this)); - return true; - } - - template<typename T> - inline bool ToHandle(Handle<T>* obj, Isolate* isolate) { - if (IsFailure()) return false; - *obj = handle(T::cast(reinterpret_cast<Object*>(this)), isolate); - return true; - } - -#ifdef OBJECT_PRINT - // Prints this object with details. - void Print(); - void Print(FILE* out); - void PrintLn(); - void PrintLn(FILE* out); -#endif -#ifdef VERIFY_HEAP - // Verifies the object. - void Verify(); -#endif -}; - #define OBJECT_TYPE_LIST(V) \ V(Smi) \ @@ -992,6 +920,7 @@ #define HEAP_OBJECT_TYPE_LIST(V) \ V(HeapNumber) \ + V(MutableHeapNumber) \ V(Name) \ V(UniqueName) \ V(String) \ @@ -1064,6 +993,8 @@ V(JSFunctionProxy) \ V(JSSet) \ V(JSMap) \ + V(JSSetIterator) \ + V(JSMapIterator) \ V(JSWeakCollection) \ V(JSWeakMap) \ V(JSWeakSet) \ @@ -1087,7 +1018,8 @@ V(Cell) \ V(PropertyCell) \ V(ObjectHashTable) \ - V(WeakHashTable) + V(WeakHashTable) \ + V(OrderedHashTable) #define ERROR_MESSAGES_LIST(V) \ @@ -1132,7 +1064,7 @@ V(kCopyBuffersOverlap, "Copy buffers overlap") \ V(kCouldNotGenerateZero, "Could not generate +0.0") \ V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \ - V(kDebuggerIsActive, "Debugger is active") \ + V(kDebuggerHasBreakPoints, "Debugger has break points") \ V(kDebuggerStatement, "DebuggerStatement") \ V(kDeclarationInCatchContext, "Declaration in catch context") \ V(kDeclarationInWithContext, "Declaration in with context") \ @@ -1169,6 +1101,7 @@ "Expected fixed array in register r2") \ V(kExpectedFixedArrayInRegisterRbx, \ "Expected fixed array in register rbx") \ + V(kExpectedNewSpaceObject, "Expected new space object") \ V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \ V(kExpectedUndefinedOrCell, \ "Expected undefined or cell in register") \ @@ -1245,15 +1178,11 @@ V(kLetBindingReInitialization, "Let binding re-initialization") \ V(kLhsHasBeenClobbered, "lhs has been clobbered") \ V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \ - V(kLiveEditFrameDroppingIsNotSupportedOnARM64, \ - "LiveEdit frame dropping is not supported on arm64") \ - V(kLiveEditFrameDroppingIsNotSupportedOnArm, \ - "LiveEdit frame dropping is not supported on arm") \ - V(kLiveEditFrameDroppingIsNotSupportedOnMips, \ - "LiveEdit frame dropping is not supported on mips") \ V(kLiveEdit, "LiveEdit") \ V(kLookupVariableInCountOperation, \ "Lookup variable in count operation") \ + V(kMapBecameDeprecated, "Map became deprecated") \ + V(kMapBecameUnstable, "Map became unstable") \ V(kMapIsNoLongerInEax, "Map is no longer in eax") \ V(kModuleDeclaration, "Module declaration") \ V(kModuleLiteral, "Module literal") \ @@ -1262,6 +1191,7 @@ V(kModuleVariable, "Module variable") \ V(kModuleUrl, "Module url") \ V(kNativeFunctionLiteral, "Native function literal") \ + V(kNeedSmiLiteral, "Need a Smi literal here") \ V(kNoCasesLeft, "No cases left") \ V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin, \ "No empty arrays here in EmitFastAsciiArrayJoin") \ @@ -1292,6 +1222,8 @@ V(kOperandIsNotAString, "Operand is not a string") \ V(kOperandIsNotSmi, "Operand is not smi") \ V(kOperandNotANumber, "Operand not a number") \ + V(kObjectTagged, "The object is tagged") \ + V(kObjectNotTagged, "The object is not tagged") \ V(kOptimizationDisabled, "Optimization is disabled") \ V(kOptimizedTooManyTimes, "Optimized too many times") \ V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \ @@ -1323,10 +1255,8 @@ "The current stack pointer is below csp") \ V(kTheInstructionShouldBeALui, "The instruction should be a lui") \ V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \ - V(kTheInstructionToPatchShouldBeALoadFromPc, \ - "The instruction to patch should be a load from pc") \ - V(kTheInstructionToPatchShouldBeALoadFromPp, \ - "The instruction to patch should be a load from pp") \ + V(kTheInstructionToPatchShouldBeALoadFromConstantPool, \ + "The instruction to patch should be a load from the constant pool") \ V(kTheInstructionToPatchShouldBeAnLdrLiteral, \ "The instruction to patch should be a ldr literal") \ V(kTheInstructionToPatchShouldBeALui, \ @@ -1385,6 +1315,7 @@ V(kUnexpectedNegativeValue, "Unexpected negative value") \ V(kUnexpectedNumberOfPreAllocatedPropertyFields, \ "Unexpected number of pre-allocated property fields") \ + V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \ V(kUnexpectedSmi, "Unexpected smi value") \ V(kUnexpectedStringFunction, "Unexpected String function") \ V(kUnexpectedStringType, "Unexpected string type") \ @@ -1435,61 +1366,67 @@ // object hierarchy. // Object does not use any virtual functions to avoid the // allocation of the C++ vtable. -// Since Smi and Failure are subclasses of Object no +// Since both Smi and HeapObject are subclasses of Object no // data members can be present in Object. -class Object : public MaybeObject { +class Object { public: // Type testing. - bool IsObject() { return true; } + bool IsObject() const { return true; } -#define IS_TYPE_FUNCTION_DECL(type_) inline bool Is##type_(); +#define IS_TYPE_FUNCTION_DECL(type_) INLINE(bool Is##type_() const); OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL) HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL) #undef IS_TYPE_FUNCTION_DECL - inline bool IsFixedArrayBase(); - inline bool IsExternal(); - inline bool IsAccessorInfo(); + // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas + // a keyed store is of the form a[expression] = foo. + enum StoreFromKeyed { + MAY_BE_STORE_FROM_KEYED, + CERTAINLY_NOT_STORE_FROM_KEYED + }; - inline bool IsStruct(); -#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) inline bool Is##Name(); + INLINE(bool IsFixedArrayBase() const); + INLINE(bool IsExternal() const); + INLINE(bool IsAccessorInfo() const); + + INLINE(bool IsStruct() const); +#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \ + INLINE(bool Is##Name() const); STRUCT_LIST(DECLARE_STRUCT_PREDICATE) #undef DECLARE_STRUCT_PREDICATE - INLINE(bool IsSpecObject()); - INLINE(bool IsSpecFunction()); - bool IsCallable(); + INLINE(bool IsSpecObject()) const; + INLINE(bool IsSpecFunction()) const; + INLINE(bool IsTemplateInfo()) const; + INLINE(bool IsNameDictionary() const); + INLINE(bool IsSeededNumberDictionary() const); + INLINE(bool IsUnseededNumberDictionary() const); + INLINE(bool IsOrderedHashSet() const); + INLINE(bool IsOrderedHashMap() const); + bool IsCallable() const; // Oddball testing. - INLINE(bool IsUndefined()); - INLINE(bool IsNull()); - INLINE(bool IsTheHole()); // Shadows MaybeObject's implementation. - INLINE(bool IsUninitialized()); - INLINE(bool IsTrue()); - INLINE(bool IsFalse()); - inline bool IsArgumentsMarker(); - inline bool NonFailureIsHeapObject(); + INLINE(bool IsUndefined() const); + INLINE(bool IsNull() const); + INLINE(bool IsTheHole() const); + INLINE(bool IsException() const); + INLINE(bool IsUninitialized() const); + INLINE(bool IsTrue() const); + INLINE(bool IsFalse() const); + INLINE(bool IsArgumentsMarker() const); // Filler objects (fillers and free space objects). - inline bool IsFiller(); + INLINE(bool IsFiller() const); // Extract the number. inline double Number(); - inline bool IsNaN(); + INLINE(bool IsNaN() const); + INLINE(bool IsMinusZero() const); bool ToInt32(int32_t* value); bool ToUint32(uint32_t* value); - // Indicates whether OptimalRepresentation can do its work, or whether it - // always has to return Representation::Tagged(). - enum ValueType { - OPTIMAL_REPRESENTATION, - FORCE_TAGGED - }; - - inline Representation OptimalRepresentation( - ValueType type = OPTIMAL_REPRESENTATION) { + inline Representation OptimalRepresentation() { if (!FLAG_track_fields) return Representation::Tagged(); - if (type == FORCE_TAGGED) return Representation::Tagged(); if (IsSmi()) { return Representation::Smi(); } else if (FLAG_track_double_fields && IsHeapNumber()) { @@ -1497,7 +1434,7 @@ } else if (FLAG_track_computed_fields && IsUninitialized()) { return Representation::None(); } else if (FLAG_track_heap_object_fields) { - ASSERT(IsHeapObject()); + DCHECK(IsHeapObject()); return Representation::HeapObject(); } else { return Representation::Tagged(); @@ -1510,15 +1447,22 @@ } else if (FLAG_track_fields && representation.IsSmi()) { return IsSmi(); } else if (FLAG_track_double_fields && representation.IsDouble()) { - return IsNumber(); + return IsMutableHeapNumber() || IsNumber(); } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { return IsHeapObject(); } return true; } - inline MaybeObject* AllocateNewStorageFor(Heap* heap, - Representation representation); + Handle<HeapType> OptimalType(Isolate* isolate, Representation representation); + + inline static Handle<Object> NewStorageFor(Isolate* isolate, + Handle<Object> object, + Representation representation); + + inline static Handle<Object> WrapForRead(Isolate* isolate, + Handle<Object> object, + Representation representation); // Returns true if the object is of the correct type to be used as a // implementation of a JSObject's elements. @@ -1526,78 +1470,79 @@ inline bool HasSpecificClassOf(String* name); - MUST_USE_RESULT MaybeObject* ToObject(Isolate* isolate); // ECMA-262 9.9. bool BooleanValue(); // ECMA-262 9.2. // Convert to a JSObject if needed. // native_context is used when creating wrapper object. - MUST_USE_RESULT MaybeObject* ToObject(Context* native_context); + static inline MaybeHandle<JSReceiver> ToObject(Isolate* isolate, + Handle<Object> object); + static MaybeHandle<JSReceiver> ToObject(Isolate* isolate, + Handle<Object> object, + Handle<Context> context); // Converts this to a Smi if possible. - // Failure is returned otherwise. - static MUST_USE_RESULT inline Handle<Object> ToSmi(Isolate* isolate, - Handle<Object> object); - MUST_USE_RESULT inline MaybeObject* ToSmi(); - - void Lookup(Name* name, LookupResult* result); - - // Property access. - MUST_USE_RESULT inline MaybeObject* GetProperty(Name* key); - MUST_USE_RESULT inline MaybeObject* GetProperty( - Name* key, - PropertyAttributes* attributes); - - // TODO(yangguo): this should eventually replace the non-handlified version. - static Handle<Object> GetPropertyWithReceiver(Handle<Object> object, - Handle<Object> receiver, - Handle<Name> name, - PropertyAttributes* attributes); - MUST_USE_RESULT MaybeObject* GetPropertyWithReceiver( - Object* receiver, - Name* key, - PropertyAttributes* attributes); - - static Handle<Object> GetProperty(Handle<Object> object, - Handle<Name> key); - static Handle<Object> GetProperty(Handle<Object> object, - Handle<Object> receiver, - LookupResult* result, - Handle<Name> key, - PropertyAttributes* attributes); + static MUST_USE_RESULT inline MaybeHandle<Smi> ToSmi(Isolate* isolate, + Handle<Object> object); - MUST_USE_RESULT static MaybeObject* GetPropertyOrFail( - Handle<Object> object, - Handle<Object> receiver, - LookupResult* result, - Handle<Name> key, - PropertyAttributes* attributes); + void Lookup(Handle<Name> name, LookupResult* result); - MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver, - LookupResult* result, - Name* key, - PropertyAttributes* attributes); + MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it); - MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver, - JSReceiver* getter); + // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5. + MUST_USE_RESULT static MaybeHandle<Object> SetProperty( + Handle<Object> object, Handle<Name> key, Handle<Object> value, + StrictMode strict_mode, + StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED); - static inline Handle<Object> GetElement(Isolate* isolate, - Handle<Object> object, - uint32_t index); + MUST_USE_RESULT static MaybeHandle<Object> SetProperty( + LookupIterator* it, Handle<Object> value, StrictMode strict_mode, + StoreFromKeyed store_mode); + MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty( + LookupIterator* it, Handle<Object> value, StrictMode strict_mode); + MUST_USE_RESULT static MaybeHandle<Object> SetDataProperty( + LookupIterator* it, Handle<Object> value); + MUST_USE_RESULT static MaybeHandle<Object> AddDataProperty( + LookupIterator* it, Handle<Object> value, PropertyAttributes attributes, + StrictMode strict_mode, StoreFromKeyed store_mode); + MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement( + Handle<Object> object, + Handle<Name> key); + MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty( + Isolate* isolate, + Handle<Object> object, + const char* key); + MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty( + Handle<Object> object, + Handle<Name> key); + + MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithAccessor( + Handle<Object> receiver, + Handle<Name> name, + Handle<JSObject> holder, + Handle<Object> structure); + MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithAccessor( + Handle<Object> receiver, Handle<Name> name, Handle<Object> value, + Handle<JSObject> holder, Handle<Object> structure, + StrictMode strict_mode); + + MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter( + Handle<Object> receiver, + Handle<JSReceiver> getter); + MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithDefinedSetter( + Handle<Object> receiver, + Handle<JSReceiver> setter, + Handle<Object> value); - // For use when we know that no exception can be thrown. - static inline Handle<Object> GetElementNoExceptionThrown( + MUST_USE_RESULT static inline MaybeHandle<Object> GetElement( Isolate* isolate, Handle<Object> object, uint32_t index); - static Handle<Object> GetElementWithReceiver(Isolate* isolate, - Handle<Object> object, - Handle<Object> receiver, - uint32_t index); - - // Return the object's prototype (might be Heap::null_value()). - Object* GetPrototype(Isolate* isolate); - Map* GetMarkerMap(Isolate* isolate); + MUST_USE_RESULT static MaybeHandle<Object> GetElementWithReceiver( + Isolate* isolate, + Handle<Object> object, + Handle<Object> receiver, + uint32_t index); // Returns the permanent hash code associated with this object. May return // undefined if not yet created. @@ -1606,16 +1551,19 @@ // Returns the permanent hash code associated with this object depending on // the actual object type. May create and store a hash code if needed and none // exists. - // TODO(rafaelw): Remove isolate parameter when objects.cc is fully - // handlified. - static Handle<Object> GetOrCreateHash(Handle<Object> object, - Isolate* isolate); + static Handle<Smi> GetOrCreateHash(Isolate* isolate, Handle<Object> object); // Checks whether this object has the same value as the given one. This // function is implemented according to ES5, section 9.12 and can be used // to implement the Harmony "egal" function. bool SameValue(Object* other); + // Checks whether this object has the same value as the given one. + // +0 and -0 are treated equal. Everything else is the same as SameValue. + // This function is implemented according to ES6, section 7.2.4 and is used + // by ES6 Map and Set. + bool SameValueZero(Object* other); + // Tries to convert an object to an array index. Returns true and sets // the output parameter if it succeeds. inline bool ToArrayIndex(uint32_t* index); @@ -1624,6 +1572,7 @@ // < the length of the string. Used to implement [] on strings. inline bool IsStringObjectWithCharacterAt(uint32_t index); + DECLARE_VERIFIER(Object) #ifdef VERIFY_HEAP // Verify a pointer is a valid object pointer. static void VerifyPointer(Object* p); @@ -1637,17 +1586,39 @@ // Prints this object without details to a message accumulator. void ShortPrint(StringStream* accumulator); - // Casting: This cast is only needed to satisfy macros in objects-inl.h. - static Object* cast(Object* value) { return value; } + DECLARE_CAST(Object) // Layout description. static const int kHeaderSize = 0; // Object does not take up any space. +#ifdef OBJECT_PRINT + // For our gdb macros, we should perhaps change these in the future. + void Print(); + + // Prints this object with details. + void Print(OStream& os); // NOLINT +#endif + private: + friend class LookupIterator; + friend class PrototypeIterator; + + // Return the map of the root of object's prototype chain. + Map* GetRootMap(Isolate* isolate); + DISALLOW_IMPLICIT_CONSTRUCTORS(Object); }; +struct Brief { + explicit Brief(const Object* const v) : value(v) {} + const Object* value; +}; + + +OStream& operator<<(OStream& os, const Brief& v); + + // Smi represents integer Numbers that can be stored in 31 bits. // Smis are immediate which means they are NOT allocated in the heap. // The this pointer has the following format: [31 bit signed int] 0 @@ -1657,7 +1628,7 @@ class Smi: public Object { public: // Returns the integer value. - inline int value(); + inline int value() const; // Convert a value to a Smi object. static inline Smi* FromInt(int value); @@ -1667,13 +1638,10 @@ // Returns whether value can be represented in a Smi. static inline bool IsValid(intptr_t value); - // Casting. - static inline Smi* cast(Object* object); + DECLARE_CAST(Smi) // Dispatched behavior. - void SmiPrint(FILE* out = stdout); - void SmiPrint(StringStream* accumulator); - + void SmiPrint(OStream& os) const; // NOLINT DECLARE_VERIFIER(Smi) static const int kMinValue = @@ -1685,72 +1653,6 @@ }; -// Failure is used for reporting out of memory situations and -// propagating exceptions through the runtime system. Failure objects -// are transient and cannot occur as part of the object graph. -// -// Failures are a single word, encoded as follows: -// +-------------------------+---+--+--+ -// |.........unused..........|sss|tt|11| -// +-------------------------+---+--+--+ -// 7 6 4 32 10 -// -// -// The low two bits, 0-1, are the failure tag, 11. The next two bits, -// 2-3, are a failure type tag 'tt' with possible values: -// 00 RETRY_AFTER_GC -// 01 EXCEPTION -// 10 INTERNAL_ERROR -// 11 OUT_OF_MEMORY_EXCEPTION -// -// The next three bits, 4-6, are an allocation space tag 'sss'. The -// allocation space tag is 000 for all failure types except -// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are the -// allocation spaces (the encoding is found in globals.h). - -// Failure type tag info. -const int kFailureTypeTagSize = 2; -const int kFailureTypeTagMask = (1 << kFailureTypeTagSize) - 1; - -class Failure: public MaybeObject { - public: - // RuntimeStubs assumes EXCEPTION = 1 in the compiler-generated code. - enum Type { - RETRY_AFTER_GC = 0, - EXCEPTION = 1, // Returning this marker tells the real exception - // is in Isolate::pending_exception. - INTERNAL_ERROR = 2, - OUT_OF_MEMORY_EXCEPTION = 3 - }; - - inline Type type() const; - - // Returns the space that needs to be collected for RetryAfterGC failures. - inline AllocationSpace allocation_space() const; - - inline bool IsInternalError() const; - - static inline Failure* RetryAfterGC(AllocationSpace space); - static inline Failure* RetryAfterGC(); // NEW_SPACE - static inline Failure* Exception(); - static inline Failure* InternalError(); - // Casting. - static inline Failure* cast(MaybeObject* object); - - // Dispatched behavior. - void FailurePrint(FILE* out = stdout); - void FailurePrint(StringStream* accumulator); - - DECLARE_VERIFIER(Failure) - - private: - inline intptr_t value() const; - static inline Failure* Construct(Type type, intptr_t value = 0); - - DISALLOW_IMPLICIT_CONSTRUCTORS(Failure); -}; - - // Heap objects typically have a map pointer in their first word. However, // during GC other data (e.g. mark bits, forwarding addresses) is sometimes // encoded in the first word. The class MapWord is an abstraction of the @@ -1760,7 +1662,7 @@ // Normal state: the map word contains a map pointer. // Create a map word from a map pointer. - static inline MapWord FromMap(Map* map); + static inline MapWord FromMap(const Map* map); // View this map word as a map pointer. inline Map* ToMap(); @@ -1804,23 +1706,32 @@ public: // [map]: Contains a map which contains the object's reflective // information. - inline Map* map(); + inline Map* map() const; inline void set_map(Map* value); // The no-write-barrier version. This is OK if the object is white and in // new space, or if the value is an immortal immutable object, like the maps // of primitive (non-JS) objects like strings, heap numbers etc. inline void set_map_no_write_barrier(Map* value); + // Get the map using acquire load. + inline Map* synchronized_map(); + inline MapWord synchronized_map_word() const; + + // Set the map using release store + inline void synchronized_set_map(Map* value); + inline void synchronized_set_map_no_write_barrier(Map* value); + inline void synchronized_set_map_word(MapWord map_word); + // During garbage collection, the map word of a heap object does not // necessarily contain a map pointer. - inline MapWord map_word(); + inline MapWord map_word() const; inline void set_map_word(MapWord map_word); // The Heap the object was allocated in. Used also to access Isolate. - inline Heap* GetHeap(); + inline Heap* GetHeap() const; // Convenience method to get current isolate. - inline Isolate* GetIsolate(); + inline Isolate* GetIsolate() const; // Converts an address to a HeapObject pointer. static inline HeapObject* FromAddress(Address address); @@ -1840,6 +1751,10 @@ // Returns the heap object's size in bytes inline int Size(); + // Returns true if this heap object may contain pointers to objects in new + // space. + inline bool MayContainNewSpacePointers(); + // Given a heap object's map pointer, returns the heap size in bytes // Useful when the map pointer field is used for other purposes. // GC internal. @@ -1858,8 +1773,7 @@ Handle<Name> name, Handle<Code> code); - // Casting. - static inline HeapObject* cast(Object* obj); + DECLARE_CAST(HeapObject) // Return the write barrier mode for this. Callers of this function // must be able to present a reference to an DisallowHeapAllocation @@ -1870,9 +1784,9 @@ const DisallowHeapAllocation& promise); // Dispatched behavior. - void HeapObjectShortPrint(StringStream* accumulator); + void HeapObjectShortPrint(OStream& os); // NOLINT #ifdef OBJECT_PRINT - void PrintHeader(FILE* out, const char* id); + void PrintHeader(OStream& os, const char* id); // NOLINT #endif DECLARE_PRINTER(HeapObject) DECLARE_VERIFIER(HeapObject) @@ -1890,7 +1804,7 @@ static const int kMapOffset = Object::kHeaderSize; static const int kHeaderSize = kMapOffset + kPointerSize; - STATIC_CHECK(kMapOffset == Internals::kHeapObjectMapOffset); + STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset); protected: // helpers for calling an ObjectVisitor to iterate over pointers in the @@ -1951,17 +1865,15 @@ class HeapNumber: public HeapObject { public: // [value]: number value. - inline double value(); + inline double value() const; inline void set_value(double value); - // Casting. - static inline HeapNumber* cast(Object* obj); + DECLARE_CAST(HeapNumber) // Dispatched behavior. bool HeapNumberBooleanValue(); - void HeapNumberPrint(FILE* out = stdout); - void HeapNumberPrint(StringStream* accumulator); + void HeapNumberPrint(OStream& os); // NOLINT DECLARE_VERIFIER(HeapNumber) inline int get_exponent(); @@ -1970,11 +1882,18 @@ // Layout description. static const int kValueOffset = HeapObject::kHeaderSize; // IEEE doubles are two 32 bit words. The first is just mantissa, the second - // is a mixture of sign, exponent and mantissa. Our current platforms are all - // little endian apart from non-EABI arm which is little endian with big - // endian floating point word ordering! + // is a mixture of sign, exponent and mantissa. The offsets of two 32 bit + // words within double numbers are endian dependent and they are set + // accordingly. +#if defined(V8_TARGET_LITTLE_ENDIAN) static const int kMantissaOffset = kValueOffset; static const int kExponentOffset = kValueOffset + 4; +#elif defined(V8_TARGET_BIG_ENDIAN) + static const int kMantissaOffset = kValueOffset + 4; + static const int kExponentOffset = kValueOffset; +#else +#error Unknown byte ordering +#endif static const int kSize = kValueOffset + kDoubleSize; static const uint32_t kSignMask = 0x80000000u; @@ -2028,13 +1947,6 @@ FORCE_DELETION }; - // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas - // a keyed store is of the form a[expression] = foo. - enum StoreFromKeyed { - MAY_BE_STORE_FROM_KEYED, - CERTAINLY_NOT_STORE_FROM_KEYED - }; - // Internal properties (e.g. the hidden properties dictionary) might // be added even though the receiver is non-extensible. enum ExtensibilityCheck { @@ -2042,36 +1954,34 @@ OMIT_EXTENSIBILITY_CHECK }; - // Casting. - static inline JSReceiver* cast(Object* obj); + DECLARE_CAST(JSReceiver) - // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5. - static Handle<Object> SetProperty(Handle<JSReceiver> object, - Handle<Name> key, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - StoreFromKeyed store_mode = - MAY_BE_STORE_FROM_KEYED); - static Handle<Object> SetElement(Handle<JSReceiver> object, - uint32_t index, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode); + MUST_USE_RESULT static MaybeHandle<Object> SetElement( + Handle<JSReceiver> object, + uint32_t index, + Handle<Object> value, + PropertyAttributes attributes, + StrictMode strict_mode); // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6. - static inline bool HasProperty(Handle<JSReceiver> object, Handle<Name> name); - static inline bool HasLocalProperty(Handle<JSReceiver>, Handle<Name> name); - static inline bool HasElement(Handle<JSReceiver> object, uint32_t index); - static inline bool HasLocalElement(Handle<JSReceiver> object, uint32_t index); + MUST_USE_RESULT static inline Maybe<bool> HasProperty( + Handle<JSReceiver> object, Handle<Name> name); + MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(Handle<JSReceiver>, + Handle<Name> name); + MUST_USE_RESULT static inline Maybe<bool> HasElement( + Handle<JSReceiver> object, uint32_t index); + MUST_USE_RESULT static inline Maybe<bool> HasOwnElement( + Handle<JSReceiver> object, uint32_t index); // Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7. - static Handle<Object> DeleteProperty(Handle<JSReceiver> object, - Handle<Name> name, - DeleteMode mode = NORMAL_DELETION); - static Handle<Object> DeleteElement(Handle<JSReceiver> object, - uint32_t index, - DeleteMode mode = NORMAL_DELETION); + MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty( + Handle<JSReceiver> object, + Handle<Name> name, + DeleteMode mode = NORMAL_DELETION); + MUST_USE_RESULT static MaybeHandle<Object> DeleteElement( + Handle<JSReceiver> object, + uint32_t index, + DeleteMode mode = NORMAL_DELETION); // Tests for the fast common case for property enumeration. bool IsSimpleEnum(); @@ -2083,26 +1993,17 @@ // function that was used to instantiate the object). String* constructor_name(); - static inline PropertyAttributes GetPropertyAttribute( - Handle<JSReceiver> object, - Handle<Name> name); - static PropertyAttributes GetPropertyAttributeWithReceiver( - Handle<JSReceiver> object, - Handle<JSReceiver> receiver, - Handle<Name> name); - static PropertyAttributes GetLocalPropertyAttribute( - Handle<JSReceiver> object, - Handle<Name> name); - - static inline PropertyAttributes GetElementAttribute( - Handle<JSReceiver> object, - uint32_t index); - static inline PropertyAttributes GetLocalElementAttribute( - Handle<JSReceiver> object, - uint32_t index); - - // Return the object's prototype (might be Heap::null_value()). - inline Object* GetPrototype(); + MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetPropertyAttributes( + Handle<JSReceiver> object, Handle<Name> name); + MUST_USE_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes( + LookupIterator* it); + MUST_USE_RESULT static Maybe<PropertyAttributes> GetOwnPropertyAttributes( + Handle<JSReceiver> object, Handle<Name> name); + + MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetElementAttribute( + Handle<JSReceiver> object, uint32_t index); + MUST_USE_RESULT static inline Maybe<PropertyAttributes> + GetOwnElementAttribute(Handle<JSReceiver> object, uint32_t index); // Return the constructor function (may be Heap::null_value()). inline Object* GetConstructor(); @@ -2113,44 +2014,34 @@ // Retrieves a permanent object identity hash code. May create and store a // hash code if needed and none exists. - inline static Handle<Object> GetOrCreateIdentityHash( + inline static Handle<Smi> GetOrCreateIdentityHash( Handle<JSReceiver> object); // Lookup a property. If found, the result is valid and has // detailed information. - void LocalLookup(Name* name, LookupResult* result, - bool search_hidden_prototypes = false); - void Lookup(Name* name, LookupResult* result); - - protected: - Smi* GenerateIdentityHash(); - - static Handle<Object> SetPropertyWithDefinedSetter(Handle<JSReceiver> object, - Handle<JSReceiver> setter, - Handle<Object> value); - - private: - static PropertyAttributes GetPropertyAttributeForResult( + void LookupOwn(Handle<Name> name, LookupResult* result, + bool search_hidden_prototypes = false); + void Lookup(Handle<Name> name, LookupResult* result); + + enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS }; + + // Computes the enumerable keys for a JSObject. Used for implementing + // "for (n in object) { }". + MUST_USE_RESULT static MaybeHandle<FixedArray> GetKeys( Handle<JSReceiver> object, - Handle<JSReceiver> receiver, - LookupResult* result, - Handle<Name> name, - bool continue_search); - - static Handle<Object> SetProperty(Handle<JSReceiver> receiver, - LookupResult* result, - Handle<Name> key, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - StoreFromKeyed store_from_keyed); + KeyCollectionType type); + private: DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver); }; // Forward declaration for JSObject::GetOrCreateHiddenPropertiesHashTable. class ObjectHashTable; +// Forward declaration for JSObject::Copy. +class AllocationSite; + + // The JSObject describes real heap allocated JavaScript objects with // properties. // Note that the map of JSObject changes during execution to enable inline @@ -2185,7 +2076,10 @@ // arguments object. DECL_ACCESSORS(elements, FixedArrayBase) inline void initialize_elements(); - MUST_USE_RESULT inline MaybeObject* ResetElements(); + static void ResetElements(Handle<JSObject> object); + static inline void SetMapAndElements(Handle<JSObject> object, + Handle<Map> map, + Handle<FixedArrayBase> elements); inline ElementsKind GetElementsKind(); inline ElementsAccessor* GetElementsAccessor(); // Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind. @@ -2234,15 +2128,9 @@ bool HasDictionaryArgumentsElements(); inline SeededNumberDictionary* element_dictionary(); // Gets slow elements. - inline void set_map_and_elements( - Map* map, - FixedArrayBase* value, - WriteBarrierMode mode = UPDATE_WRITE_BARRIER); - // Requires: HasFastElements(). static Handle<FixedArray> EnsureWritableFastElements( Handle<JSObject> object); - MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements(); // Collects elements starting at index 0. // Undefined values are placed after non-undefined values. @@ -2250,56 +2138,32 @@ static Handle<Object> PrepareElementsForSort(Handle<JSObject> object, uint32_t limit); // As PrepareElementsForSort, but only on objects where elements is - // a dictionary, and it will stay a dictionary. + // a dictionary, and it will stay a dictionary. Collates undefined and + // unexisting elements below limit from position zero of the elements. static Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object, uint32_t limit); - MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit); - - static Handle<Object> GetPropertyWithCallback(Handle<JSObject> object, - Handle<Object> receiver, - Handle<Object> structure, - Handle<Name> name); - static Handle<Object> SetPropertyWithCallback( - Handle<JSObject> object, - Handle<Object> structure, - Handle<Name> name, - Handle<Object> value, - Handle<JSObject> holder, - StrictMode strict_mode); - - static Handle<Object> SetPropertyWithInterceptor( - Handle<JSObject> object, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode); + MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithInterceptor( + LookupIterator* it, Handle<Object> value); - static Handle<Object> SetPropertyForResult( - Handle<JSObject> object, - LookupResult* result, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED); + // SetLocalPropertyIgnoreAttributes converts callbacks to fields. We need to + // grant an exemption to ExecutableAccessor callbacks in some cases. + enum ExecutableAccessorInfoHandling { + DEFAULT_HANDLING, + DONT_FORCE_FIELD + }; - static Handle<Object> SetLocalPropertyIgnoreAttributes( + MUST_USE_RESULT static MaybeHandle<Object> SetOwnPropertyIgnoreAttributes( Handle<JSObject> object, Handle<Name> key, Handle<Object> value, PropertyAttributes attributes, - ValueType value_type = OPTIMAL_REPRESENTATION, - StoreMode mode = ALLOW_AS_CONSTANT, - ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK); - - static inline Handle<String> ExpectedTransitionKey(Handle<Map> map); - static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map); + ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK, + StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED, + ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING); - // Try to follow an existing transition to a field with attributes NONE. The - // return value indicates whether the transition was successful. - static inline Handle<Map> FindTransitionToField(Handle<Map> map, - Handle<Name> key); + static void AddProperty(Handle<JSObject> object, Handle<Name> key, + Handle<Object> value, PropertyAttributes attributes); // Extend the receiver with a single fast property appeared first in the // passed map. This also extends the property backing store if necessary. @@ -2310,12 +2174,14 @@ static void MigrateInstance(Handle<JSObject> instance); // Migrates the given object only if the target map is already available, - // or returns an empty handle if such a map is not yet available. - static Handle<Object> TryMigrateInstance(Handle<JSObject> instance); + // or returns false if such a map is not yet available. + static bool TryMigrateInstance(Handle<JSObject> instance); // Retrieve a value in a normalized object given a lookup result. // Handles the special representation of JS global objects. Object* GetNormalizedProperty(const LookupResult* result); + static Handle<Object> GetNormalizedProperty(Handle<JSObject> object, + const LookupResult* result); // Sets the property value in a normalized object given a lookup result. // Handles the special representation of JS global objects. @@ -2330,68 +2196,50 @@ Handle<Object> value, PropertyDetails details); - static void OptimizeAsPrototype(Handle<JSObject> object); + static void OptimizeAsPrototype(Handle<JSObject> object, + PrototypeOptimizationMode mode); + static void ReoptimizeIfPrototype(Handle<JSObject> object); // Retrieve interceptors. InterceptorInfo* GetNamedInterceptor(); InterceptorInfo* GetIndexedInterceptor(); // Used from JSReceiver. - static PropertyAttributes GetPropertyAttributePostInterceptor( - Handle<JSObject> object, - Handle<JSObject> receiver, - Handle<Name> name, - bool continue_search); - static PropertyAttributes GetPropertyAttributeWithInterceptor( - Handle<JSObject> object, - Handle<JSObject> receiver, - Handle<Name> name, - bool continue_search); - static PropertyAttributes GetPropertyAttributeWithFailedAccessCheck( - Handle<JSObject> object, - LookupResult* result, - Handle<Name> name, - bool continue_search); - static PropertyAttributes GetElementAttributeWithReceiver( - Handle<JSObject> object, - Handle<JSReceiver> receiver, - uint32_t index, - bool continue_search); + MUST_USE_RESULT static Maybe<PropertyAttributes> + GetPropertyAttributesWithInterceptor(Handle<JSObject> holder, + Handle<Object> receiver, + Handle<Name> name); + MUST_USE_RESULT static Maybe<PropertyAttributes> + GetPropertyAttributesWithFailedAccessCheck(LookupIterator* it); + MUST_USE_RESULT static Maybe<PropertyAttributes> + GetElementAttributeWithReceiver(Handle<JSObject> object, + Handle<JSReceiver> receiver, + uint32_t index, bool check_prototype); // Retrieves an AccessorPair property from the given object. Might return // undefined if the property doesn't exist or is of a different kind. - static Handle<Object> GetAccessor(Handle<JSObject> object, - Handle<Name> name, - AccessorComponent component); + MUST_USE_RESULT static MaybeHandle<Object> GetAccessor( + Handle<JSObject> object, + Handle<Name> name, + AccessorComponent component); // Defines an AccessorPair property on the given object. - // TODO(mstarzinger): Rename to SetAccessor() and return empty handle on - // exception instead of letting callers check for scheduled exception. - static void DefineAccessor(Handle<JSObject> object, - Handle<Name> name, - Handle<Object> getter, - Handle<Object> setter, - PropertyAttributes attributes, - v8::AccessControl access_control = v8::DEFAULT); + // TODO(mstarzinger): Rename to SetAccessor(). + static MaybeHandle<Object> DefineAccessor(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> getter, + Handle<Object> setter, + PropertyAttributes attributes); // Defines an AccessorInfo property on the given object. - static Handle<Object> SetAccessor(Handle<JSObject> object, - Handle<AccessorInfo> info); - - static Handle<Object> GetPropertyWithInterceptor( + MUST_USE_RESULT static MaybeHandle<Object> SetAccessor( Handle<JSObject> object, - Handle<Object> receiver, - Handle<Name> name, - PropertyAttributes* attributes); - static Handle<Object> GetPropertyPostInterceptor( + Handle<AccessorInfo> info); + + MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor( Handle<JSObject> object, Handle<Object> receiver, - Handle<Name> name, - PropertyAttributes* attributes); - MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor( - Object* receiver, - Name* name, - PropertyAttributes* attributes); + Handle<Name> name); // Returns true if this is an instance of an api function and has // been modified since it was created. May give false positives. @@ -2399,8 +2247,8 @@ // Accessors for hidden properties object. // - // Hidden properties are not local properties of the object itself. - // Instead they are stored in an auxiliary structure kept as a local + // Hidden properties are not own properties of the object itself. + // Instead they are stored in an auxiliary structure kept as an own // property with a special name Heap::hidden_string(). But if the // receiver is a JSGlobalProxy then the auxiliary object is a property // of its prototype, and if it's a detached proxy, then you can't have @@ -2414,7 +2262,7 @@ // Gets the value of a hidden property with the given key. Returns the hole // if the property doesn't exist (or if called on a detached proxy), // otherwise returns the value set for the key. - Object* GetHiddenProperty(Name* key); + Object* GetHiddenProperty(Handle<Name> key); // Deletes a hidden property. Deleting a non-existing property is // considered successful. static void DeleteHiddenProperty(Handle<JSObject> object, @@ -2424,7 +2272,7 @@ static void SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash); - inline void ValidateElements(); + static inline void ValidateElements(Handle<JSObject> object); // Makes sure that this object can contain HeapObject as elements. static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj); @@ -2470,21 +2318,25 @@ } // These methods do not perform access checks! - AccessorPair* GetLocalPropertyAccessorPair(Name* name); - AccessorPair* GetLocalElementAccessorPair(uint32_t index); + MUST_USE_RESULT static MaybeHandle<AccessorPair> GetOwnElementAccessorPair( + Handle<JSObject> object, + uint32_t index); - static Handle<Object> SetFastElement(Handle<JSObject> object, uint32_t index, - Handle<Object> value, - StrictMode strict_mode, - bool check_prototype); + MUST_USE_RESULT static MaybeHandle<Object> SetFastElement( + Handle<JSObject> object, + uint32_t index, + Handle<Object> value, + StrictMode strict_mode, + bool check_prototype); - static Handle<Object> SetOwnElement(Handle<JSObject> object, - uint32_t index, - Handle<Object> value, - StrictMode strict_mode); + MUST_USE_RESULT static MaybeHandle<Object> SetOwnElement( + Handle<JSObject> object, + uint32_t index, + Handle<Object> value, + StrictMode strict_mode); // Empty handle is returned if the element cannot be set to the given value. - static Handle<Object> SetElement( + MUST_USE_RESULT static MaybeHandle<Object> SetElement( Handle<JSObject> object, uint32_t index, Handle<Object> value, @@ -2495,9 +2347,10 @@ // Returns the index'th element. // The undefined object if index is out of bounds. - static Handle<Object> GetElementWithInterceptor(Handle<JSObject> object, - Handle<Object> receiver, - uint32_t index); + MUST_USE_RESULT static MaybeHandle<Object> GetElementWithInterceptor( + Handle<JSObject> object, + Handle<Object> receiver, + uint32_t index); enum SetFastElementsCapacitySmiMode { kAllowSmiElements, @@ -2517,21 +2370,28 @@ Handle<JSObject> object, int capacity, int length); - MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength( - int capacity, - int length); // Lookup interceptors are used for handling properties controlled by host // objects. inline bool HasNamedInterceptor(); inline bool HasIndexedInterceptor(); + // Computes the enumerable keys from interceptors. Used for debug mirrors and + // by JSReceiver::GetKeys. + MUST_USE_RESULT static MaybeHandle<JSObject> GetKeysForNamedInterceptor( + Handle<JSObject> object, + Handle<JSReceiver> receiver); + MUST_USE_RESULT static MaybeHandle<JSObject> GetKeysForIndexedInterceptor( + Handle<JSObject> object, + Handle<JSReceiver> receiver); + // Support functions for v8 api (needed for correct interceptor behavior). - static bool HasRealNamedProperty(Handle<JSObject> object, - Handle<Name> key); - static bool HasRealElementProperty(Handle<JSObject> object, uint32_t index); - static bool HasRealNamedCallbackProperty(Handle<JSObject> object, - Handle<Name> key); + MUST_USE_RESULT static Maybe<bool> HasRealNamedProperty( + Handle<JSObject> object, Handle<Name> key); + MUST_USE_RESULT static Maybe<bool> HasRealElementProperty( + Handle<JSObject> object, uint32_t index); + MUST_USE_RESULT static Maybe<bool> HasRealNamedCallbackProperty( + Handle<JSObject> object, Handle<Name> key); // Get the header size for a JSObject. Used to compute the index of // internal fields as well as the number of internal fields. @@ -2544,27 +2404,27 @@ inline void SetInternalField(int index, Smi* value); // The following lookup functions skip interceptors. - void LocalLookupRealNamedProperty(Name* name, LookupResult* result); - void LookupRealNamedProperty(Name* name, LookupResult* result); - void LookupRealNamedPropertyInPrototypes(Name* name, LookupResult* result); - void LookupCallbackProperty(Name* name, LookupResult* result); + void LookupOwnRealNamedProperty(Handle<Name> name, LookupResult* result); + void LookupRealNamedProperty(Handle<Name> name, LookupResult* result); + void LookupRealNamedPropertyInPrototypes(Handle<Name> name, + LookupResult* result); // Returns the number of properties on this object filtering out properties // with the specified attributes (ignoring interceptors). - int NumberOfLocalProperties(PropertyAttributes filter = NONE); + int NumberOfOwnProperties(PropertyAttributes filter = NONE); // Fill in details for properties into storage starting at the specified // index. - void GetLocalPropertyNames( + void GetOwnPropertyNames( FixedArray* storage, int index, PropertyAttributes filter = NONE); // Returns the number of properties on this object filtering out properties // with the specified attributes (ignoring interceptors). - int NumberOfLocalElements(PropertyAttributes filter); + int NumberOfOwnElements(PropertyAttributes filter); // Returns the number of enumerable elements (ignoring interceptors). int NumberOfEnumElements(); // Returns the number of elements on this object filtering out elements // with the specified attributes (ignoring interceptors). - int GetLocalElementKeys(FixedArray* storage, PropertyAttributes filter); + int GetOwnElementKeys(FixedArray* storage, PropertyAttributes filter); // Count and fill in the enumerable elements into storage. // (storage->length() == NumberOfEnumElements()). // If storage is NULL, will count the elements without adding @@ -2576,21 +2436,10 @@ // map and the ElementsKind set. static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object, ElementsKind to_kind); - inline MUST_USE_RESULT MaybeObject* GetElementsTransitionMap( - Isolate* isolate, - ElementsKind elements_kind); - MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow( - ElementsKind elements_kind); - static void TransitionElementsKind(Handle<JSObject> object, ElementsKind to_kind); - // TODO(mstarzinger): Both public because of ConvertAnsSetLocalProperty(). static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map); - static void GeneralizeFieldRepresentation(Handle<JSObject> object, - int modify_index, - Representation new_representation, - StoreMode store_mode); // Convert the object to use the canonical dictionary // representation. If the object is expected to have additional properties @@ -2606,15 +2455,16 @@ Handle<JSObject> object); // Transform slow named properties to fast variants. - static void TransformToFastProperties(Handle<JSObject> object, - int unused_property_fields); + static void MigrateSlowToFast(Handle<JSObject> object, + int unused_property_fields); // Access fast-case object properties at index. - MUST_USE_RESULT inline MaybeObject* FastPropertyAt( - Representation representation, - int index); - inline Object* RawFastPropertyAt(int index); - inline void FastPropertyAtPut(int index, Object* value); + static Handle<Object> FastPropertyAt(Handle<JSObject> object, + Representation representation, + FieldIndex index); + inline Object* RawFastPropertyAt(FieldIndex index); + inline void FastPropertyAtPut(FieldIndex index, Object* value); + void WriteToField(int descriptor, Object* value); // Access to in object properties. inline int GetInObjectPropertyOffset(int index); @@ -2625,9 +2475,8 @@ = UPDATE_WRITE_BARRIER); // Set the object's prototype (only JSReceiver and null are allowed values). - static Handle<Object> SetPrototype(Handle<JSObject> object, - Handle<Object> value, - bool skip_hidden_prototypes = false); + MUST_USE_RESULT static MaybeHandle<Object> SetPrototype( + Handle<JSObject> object, Handle<Object> value, bool from_javascript); // Initializes the body after properties slot, properties slot is // initialized by set_properties. Fill the pre-allocated fields with @@ -2642,38 +2491,40 @@ bool ReferencesObject(Object* obj); // Disalow further properties to be added to the object. - static Handle<Object> PreventExtensions(Handle<JSObject> object); + MUST_USE_RESULT static MaybeHandle<Object> PreventExtensions( + Handle<JSObject> object); // ES5 Object.freeze - static Handle<Object> Freeze(Handle<JSObject> object); + MUST_USE_RESULT static MaybeHandle<Object> Freeze(Handle<JSObject> object); // Called the first time an object is observed with ES7 Object.observe. static void SetObserved(Handle<JSObject> object); // Copy object. - enum DeepCopyHints { - kNoHints = 0, - kObjectIsShallowArray = 1 - }; + enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 }; static Handle<JSObject> Copy(Handle<JSObject> object); - static Handle<JSObject> DeepCopy(Handle<JSObject> object, - AllocationSiteUsageContext* site_context, - DeepCopyHints hints = kNoHints); - static Handle<JSObject> DeepWalk(Handle<JSObject> object, - AllocationSiteCreationContext* site_context); + MUST_USE_RESULT static MaybeHandle<JSObject> DeepCopy( + Handle<JSObject> object, + AllocationSiteUsageContext* site_context, + DeepCopyHints hints = kNoHints); + MUST_USE_RESULT static MaybeHandle<JSObject> DeepWalk( + Handle<JSObject> object, + AllocationSiteCreationContext* site_context); + + static Handle<Object> GetDataProperty(Handle<JSObject> object, + Handle<Name> key); - // Casting. - static inline JSObject* cast(Object* obj); + DECLARE_CAST(JSObject) // Dispatched behavior. void JSObjectShortPrint(StringStream* accumulator); DECLARE_PRINTER(JSObject) DECLARE_VERIFIER(JSObject) #ifdef OBJECT_PRINT - void PrintProperties(FILE* out = stdout); - void PrintElements(FILE* out = stdout); - void PrintTransitions(FILE* out = stdout); + void PrintProperties(OStream& os); // NOLINT + void PrintElements(OStream& os); // NOLINT + void PrintTransitions(OStream& os); // NOLINT #endif static void PrintElementsTransition( @@ -2714,12 +2565,6 @@ Object* SlowReverseLookup(Object* value); - // Maximal number of fast properties for the JSObject. Used to - // restrict the number of map transitions to avoid an explosion in - // the number of maps for objects used as dictionaries. - inline bool TooManyFastProperties( - StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED); - // Maximal number of elements (numbered 0 .. kMaxElementCount - 1). // Also maximal value of JSArray's length property. static const uint32_t kMaxElementCount = 0xffffffffu; @@ -2740,11 +2585,13 @@ static const int kMaxUncheckedOldFastElementsLength = 500; // Note that Page::kMaxRegularHeapObjectSize puts a limit on - // permissible values (see the ASSERT in heap.cc). + // permissible values (see the DCHECK in heap.cc). static const int kInitialMaxFastElementArray = 100000; - static const int kFastPropertiesSoftLimit = 12; - static const int kMaxFastProperties = 64; + // This constant applies only to the initial map of "$Object" aka + // "global.Object" and not to arbitrary other JSObject maps. + static const int kInitialGlobalObjectUnusedPropertiesCount = 4; + static const int kMaxInstanceSize = 255 * kPointerSize; // When extending the backing storage for property values, we increase // its size by more than the 1 entry necessary, so sequentially adding fields @@ -2756,57 +2603,82 @@ static const int kElementsOffset = kPropertiesOffset + kPointerSize; static const int kHeaderSize = kElementsOffset + kPointerSize; - STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize); + STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize); class BodyDescriptor : public FlexibleBodyDescriptor<kPropertiesOffset> { public: static inline int SizeOf(Map* map, HeapObject* object); }; + Context* GetCreationContext(); + // Enqueue change record for Object.observe. May cause GC. static void EnqueueChangeRecord(Handle<JSObject> object, const char* type, Handle<Name> name, Handle<Object> old_value); + static void MigrateToNewProperty(Handle<JSObject> object, + Handle<Map> transition, + Handle<Object> value); + private: friend class DictionaryElementsAccessor; friend class JSReceiver; friend class Object; - static void UpdateAllocationSite(Handle<JSObject> object, - ElementsKind to_kind); + static void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map); + static void MigrateFastToSlow(Handle<JSObject> object, + Handle<Map> new_map, + int expected_additional_properties); - // Used from Object::GetProperty(). - static Handle<Object> GetPropertyWithFailedAccessCheck( - Handle<JSObject> object, - Handle<Object> receiver, - LookupResult* result, - Handle<Name> name, - PropertyAttributes* attributes); + static void SetPropertyToField(LookupResult* lookup, Handle<Object> value); - MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver, - Object* structure, - uint32_t index, - Object* holder); - static PropertyAttributes GetElementAttributeWithInterceptor( - Handle<JSObject> object, - Handle<JSReceiver> receiver, - uint32_t index, - bool continue_search); - static PropertyAttributes GetElementAttributeWithoutInterceptor( + static void ConvertAndSetOwnProperty(LookupResult* lookup, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes); + + static void SetPropertyToFieldWithAttributes(LookupResult* lookup, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes); + static void GeneralizeFieldRepresentation(Handle<JSObject> object, + int modify_index, + Representation new_representation, + Handle<HeapType> new_field_type); + + static void UpdateAllocationSite(Handle<JSObject> object, + ElementsKind to_kind); + + // Used from Object::GetProperty(). + MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithFailedAccessCheck( + LookupIterator* it); + + MUST_USE_RESULT static MaybeHandle<Object> GetElementWithCallback( Handle<JSObject> object, - Handle<JSReceiver> receiver, + Handle<Object> receiver, + Handle<Object> structure, uint32_t index, - bool continue_search); - static Handle<Object> SetElementWithCallback( + Handle<Object> holder); + + MUST_USE_RESULT static Maybe<PropertyAttributes> + GetElementAttributeWithInterceptor(Handle<JSObject> object, + Handle<JSReceiver> receiver, + uint32_t index, bool continue_search); + MUST_USE_RESULT static Maybe<PropertyAttributes> + GetElementAttributeWithoutInterceptor(Handle<JSObject> object, + Handle<JSReceiver> receiver, + uint32_t index, + bool continue_search); + MUST_USE_RESULT static MaybeHandle<Object> SetElementWithCallback( Handle<JSObject> object, Handle<Object> structure, uint32_t index, Handle<Object> value, Handle<JSObject> holder, StrictMode strict_mode); - static Handle<Object> SetElementWithInterceptor( + MUST_USE_RESULT static MaybeHandle<Object> SetElementWithInterceptor( Handle<JSObject> object, uint32_t index, Handle<Object> value, @@ -2814,7 +2686,7 @@ StrictMode strict_mode, bool check_prototype, SetPropertyMode set_mode); - static Handle<Object> SetElementWithoutInterceptor( + MUST_USE_RESULT static MaybeHandle<Object> SetElementWithoutInterceptor( Handle<JSObject> object, uint32_t index, Handle<Object> value, @@ -2822,13 +2694,14 @@ StrictMode strict_mode, bool check_prototype, SetPropertyMode set_mode); - static Handle<Object> SetElementWithCallbackSetterInPrototypes( + MUST_USE_RESULT + static MaybeHandle<Object> SetElementWithCallbackSetterInPrototypes( Handle<JSObject> object, uint32_t index, Handle<Object> value, bool* found, StrictMode strict_mode); - static Handle<Object> SetDictionaryElement( + MUST_USE_RESULT static MaybeHandle<Object> SetDictionaryElement( Handle<JSObject> object, uint32_t index, Handle<Object> value, @@ -2836,68 +2709,27 @@ StrictMode strict_mode, bool check_prototype, SetPropertyMode set_mode = SET_PROPERTY); - static Handle<Object> SetFastDoubleElement( + MUST_USE_RESULT static MaybeHandle<Object> SetFastDoubleElement( Handle<JSObject> object, uint32_t index, Handle<Object> value, StrictMode strict_mode, bool check_prototype = true); - // Searches the prototype chain for property 'name'. If it is found and - // has a setter, invoke it and set '*done' to true. If it is found and is - // read-only, reject and set '*done' to true. Otherwise, set '*done' to - // false. Can throw and return an empty handle with '*done==true'. - static Handle<Object> SetPropertyViaPrototypes( - Handle<JSObject> object, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - bool* done); - static Handle<Object> SetPropertyPostInterceptor( - Handle<JSObject> object, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode); - static Handle<Object> SetPropertyUsingTransition( + MUST_USE_RESULT static MaybeHandle<Object> SetPropertyUsingTransition( Handle<JSObject> object, LookupResult* lookup, Handle<Name> name, Handle<Object> value, PropertyAttributes attributes); - static Handle<Object> SetPropertyWithFailedAccessCheck( - Handle<JSObject> object, - LookupResult* result, - Handle<Name> name, - Handle<Object> value, - bool check_prototype, - StrictMode strict_mode); + MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithFailedAccessCheck( + LookupIterator* it, Handle<Object> value, StrictMode strict_mode); // Add a property to an object. - static Handle<Object> AddProperty( - Handle<JSObject> object, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED, - ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK, - ValueType value_type = OPTIMAL_REPRESENTATION, - StoreMode mode = ALLOW_AS_CONSTANT, - TransitionFlag flag = INSERT_TRANSITION); - - // Add a constant function property to a fast-case object. - // This leaves a CONSTANT_TRANSITION in the old map, and - // if it is called on a second object with this map, a - // normal property is added instead, with a map transition. - // This avoids the creation of many maps with the same constant - // function, all orphaned. - static void AddConstantProperty(Handle<JSObject> object, - Handle<Name> name, - Handle<Object> constant, - PropertyAttributes attributes, - TransitionFlag flag); + MUST_USE_RESULT static MaybeHandle<Object> AddPropertyInternal( + Handle<JSObject> object, Handle<Name> name, Handle<Object> value, + PropertyAttributes attributes, StoreFromKeyed store_mode, + ExtensibilityCheck extensibility_check, TransitionFlag flag); // Add a property to a fast-case object. static void AddFastProperty(Handle<JSObject> object, @@ -2905,7 +2737,6 @@ Handle<Object> value, PropertyAttributes attributes, StoreFromKeyed store_mode, - ValueType value_type, TransitionFlag flag); // Add a property to a slow-case object. @@ -2914,25 +2745,29 @@ Handle<Object> value, PropertyAttributes attributes); - static Handle<Object> DeleteProperty(Handle<JSObject> object, - Handle<Name> name, - DeleteMode mode); + MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty( + Handle<JSObject> object, + Handle<Name> name, + DeleteMode mode); static Handle<Object> DeletePropertyPostInterceptor(Handle<JSObject> object, Handle<Name> name, DeleteMode mode); - static Handle<Object> DeletePropertyWithInterceptor(Handle<JSObject> object, - Handle<Name> name); + MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithInterceptor( + Handle<JSObject> object, + Handle<Name> name); // Deletes the named property in a normalized object. static Handle<Object> DeleteNormalizedProperty(Handle<JSObject> object, Handle<Name> name, DeleteMode mode); - static Handle<Object> DeleteElement(Handle<JSObject> object, - uint32_t index, - DeleteMode mode); - static Handle<Object> DeleteElementWithInterceptor(Handle<JSObject> object, - uint32_t index); + MUST_USE_RESULT static MaybeHandle<Object> DeleteElement( + Handle<JSObject> object, + uint32_t index, + DeleteMode mode); + MUST_USE_RESULT static MaybeHandle<Object> DeleteElementWithInterceptor( + Handle<JSObject> object, + uint32_t index); bool ReferencesObjectFromElements(FixedArray* elements, ElementsKind kind, @@ -2957,16 +2792,14 @@ uint32_t index, Handle<Object> getter, Handle<Object> setter, - PropertyAttributes attributes, - v8::AccessControl access_control); + PropertyAttributes attributes); static Handle<AccessorPair> CreateAccessorPairFor(Handle<JSObject> object, Handle<Name> name); static void DefinePropertyAccessor(Handle<JSObject> object, Handle<Name> name, Handle<Object> getter, Handle<Object> setter, - PropertyAttributes attributes, - v8::AccessControl access_control); + PropertyAttributes attributes); // Try to define a single accessor paying attention to map transitions. // Returns false if this was not possible and we have to use the slow case. @@ -2994,7 +2827,7 @@ MUST_USE_RESULT Object* GetIdentityHash(); - static Handle<Object> GetOrCreateIdentityHash(Handle<JSObject> object); + static Handle<Smi> GetOrCreateIdentityHash(Handle<JSObject> object); DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject); }; @@ -3005,10 +2838,14 @@ class FixedArrayBase: public HeapObject { public: // [length]: length of the array. - inline int length(); + inline int length() const; inline void set_length(int value); - inline static FixedArrayBase* cast(Object* object); + // Get and set the length using acquire loads and release stores. + inline int synchronized_length() const; + inline void synchronized_set_length(int value); + + DECLARE_CAST(FixedArrayBase) // Layout description. // Length is smi tagged when it is stored. @@ -3026,6 +2863,7 @@ public: // Setter and getter for elements. inline Object* get(int index); + static inline Handle<Object> get(Handle<FixedArray> array, int index); // Setter that uses write barrier. inline void set(int index, Object* value); inline bool is_the_hole(int index); @@ -3046,19 +2884,26 @@ // Gives access to raw memory which stores the array's data. inline Object** data_start(); + inline void FillWithHoles(int from, int to); + // Shrink length and insert filler objects. void Shrink(int length); - // Copy operations. - MUST_USE_RESULT inline MaybeObject* Copy(); - MUST_USE_RESULT MaybeObject* CopySize(int new_length, - PretenureFlag pretenure = NOT_TENURED); + // Copy operation. + static Handle<FixedArray> CopySize(Handle<FixedArray> array, + int new_length, + PretenureFlag pretenure = NOT_TENURED); // Add the elements of a JSArray to this FixedArray. - MUST_USE_RESULT MaybeObject* AddKeysFromJSArray(JSArray* array); - - // Compute the union of this and other. - MUST_USE_RESULT MaybeObject* UnionOfKeys(FixedArray* other); + MUST_USE_RESULT static MaybeHandle<FixedArray> AddKeysFromArrayLike( + Handle<FixedArray> content, + Handle<JSObject> array); + + // Computes the union of keys and return the result. + // Used for implementing "for (n in object) { }" + MUST_USE_RESULT static MaybeHandle<FixedArray> UnionOfKeys( + Handle<FixedArray> first, + Handle<FixedArray> second); // Copy a sub array from the receiver to dest. void CopyTo(int pos, FixedArray* dest, int dest_pos, int len); @@ -3074,8 +2919,7 @@ return HeapObject::RawField(this, OffsetOfElementAt(index)); } - // Casting. - static inline FixedArray* cast(Object* obj); + DECLARE_CAST(FixedArray) // Maximal allowed size, in bytes, of a single FixedArray. // Prevents overflowing size computations, as well as extreme memory @@ -3124,7 +2968,7 @@ Object* value); private: - STATIC_CHECK(kHeaderSize == Internals::kFixedArrayHeaderSize); + STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize); DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray); }; @@ -3136,18 +2980,13 @@ // Setter and getter for elements. inline double get_scalar(int index); inline int64_t get_representation(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); - // TODO(ishell): Rename as get() once all usages handlified. - inline Handle<Object> get_as_handle(int index); + static inline Handle<Object> get(Handle<FixedDoubleArray> array, int index); inline void set(int index, double value); inline void set_the_hole(int index); // Checking for the hole. inline bool is_the_hole(int index); - // Copy operations - MUST_USE_RESULT inline MaybeObject* Copy(); - // Garbage collection support. inline static int SizeFor(int length) { return kHeaderSize + length * kDoubleSize; @@ -3156,6 +2995,8 @@ // Gives access to raw memory which stores the array's data. inline double* data_start(); + inline void FillWithHoles(int from, int to); + // Code Generation support. static int OffsetOfElementAt(int index) { return SizeFor(index); } @@ -3163,8 +3004,7 @@ inline static double hole_nan_as_double(); inline static double canonical_not_the_hole_nan_as_double(); - // Casting. - static inline FixedDoubleArray* cast(Object* obj); + DECLARE_CAST(FixedDoubleArray) // Maximal allowed size, in bytes, of a single FixedDoubleArray. // Prevents overflowing size computations, as well as extreme memory @@ -3183,28 +3023,142 @@ // ConstantPoolArray describes a fixed-sized array containing constant pool -// entires. -// The format of the pool is: -// [0]: Field holding the first index which is a raw code target pointer entry -// [1]: Field holding the first index which is a heap pointer entry -// [2]: Field holding the first index which is a int32 entry -// [3] ... [first_code_ptr_index() - 1] : 64 bit entries -// [first_code_ptr_index()] ... [first_heap_ptr_index() - 1] : code pointers -// [first_heap_ptr_index()] ... [first_int32_index() - 1] : heap pointers -// [first_int32_index()] ... [length - 1] : 32 bit entries -class ConstantPoolArray: public FixedArrayBase { - public: - // Getters for the field storing the first index for different type entries. - inline int first_code_ptr_index(); - inline int first_heap_ptr_index(); - inline int first_int64_index(); - inline int first_int32_index(); - - // Getters for counts of different type entries. - inline int count_of_code_ptr_entries(); - inline int count_of_heap_ptr_entries(); - inline int count_of_int64_entries(); - inline int count_of_int32_entries(); +// entries. +// +// A ConstantPoolArray can be structured in two different ways depending upon +// whether it is extended or small. The is_extended_layout() method can be used +// to discover which layout the constant pool has. +// +// The format of a small constant pool is: +// [kSmallLayout1Offset] : Small section layout bitmap 1 +// [kSmallLayout2Offset] : Small section layout bitmap 2 +// [first_index(INT64, SMALL_SECTION)] : 64 bit entries +// ... : ... +// [first_index(CODE_PTR, SMALL_SECTION)] : code pointer entries +// ... : ... +// [first_index(HEAP_PTR, SMALL_SECTION)] : heap pointer entries +// ... : ... +// [first_index(INT32, SMALL_SECTION)] : 32 bit entries +// ... : ... +// +// If the constant pool has an extended layout, the extended section constant +// pool also contains an extended section, which has the following format at +// location get_extended_section_header_offset(): +// [kExtendedInt64CountOffset] : count of extended 64 bit entries +// [kExtendedCodePtrCountOffset] : count of extended code pointers +// [kExtendedHeapPtrCountOffset] : count of extended heap pointers +// [kExtendedInt32CountOffset] : count of extended 32 bit entries +// [first_index(INT64, EXTENDED_SECTION)] : 64 bit entries +// ... : ... +// [first_index(CODE_PTR, EXTENDED_SECTION)]: code pointer entries +// ... : ... +// [first_index(HEAP_PTR, EXTENDED_SECTION)]: heap pointer entries +// ... : ... +// [first_index(INT32, EXTENDED_SECTION)] : 32 bit entries +// ... : ... +// +class ConstantPoolArray: public HeapObject { + public: + enum WeakObjectState { + NO_WEAK_OBJECTS, + WEAK_OBJECTS_IN_OPTIMIZED_CODE, + WEAK_OBJECTS_IN_IC + }; + + enum Type { + INT64 = 0, + CODE_PTR, + HEAP_PTR, + INT32, + // Number of types stored by the ConstantPoolArrays. + NUMBER_OF_TYPES, + FIRST_TYPE = INT64, + LAST_TYPE = INT32 + }; + + enum LayoutSection { + SMALL_SECTION = 0, + EXTENDED_SECTION, + NUMBER_OF_LAYOUT_SECTIONS + }; + + class NumberOfEntries BASE_EMBEDDED { + public: + inline NumberOfEntries() { + for (int i = 0; i < NUMBER_OF_TYPES; i++) { + element_counts_[i] = 0; + } + } + + inline NumberOfEntries(int int64_count, int code_ptr_count, + int heap_ptr_count, int int32_count) { + element_counts_[INT64] = int64_count; + element_counts_[CODE_PTR] = code_ptr_count; + element_counts_[HEAP_PTR] = heap_ptr_count; + element_counts_[INT32] = int32_count; + } + + inline NumberOfEntries(ConstantPoolArray* array, LayoutSection section) { + element_counts_[INT64] = array->number_of_entries(INT64, section); + element_counts_[CODE_PTR] = array->number_of_entries(CODE_PTR, section); + element_counts_[HEAP_PTR] = array->number_of_entries(HEAP_PTR, section); + element_counts_[INT32] = array->number_of_entries(INT32, section); + } + + inline void increment(Type type); + inline int equals(const NumberOfEntries& other) const; + inline bool is_empty() const; + inline int count_of(Type type) const; + inline int base_of(Type type) const; + inline int total_count() const; + inline int are_in_range(int min, int max) const; + + private: + int element_counts_[NUMBER_OF_TYPES]; + }; + + class Iterator BASE_EMBEDDED { + public: + inline Iterator(ConstantPoolArray* array, Type type) + : array_(array), + type_(type), + final_section_(array->final_section()), + current_section_(SMALL_SECTION), + next_index_(array->first_index(type, SMALL_SECTION)) { + update_section(); + } + + inline Iterator(ConstantPoolArray* array, Type type, LayoutSection section) + : array_(array), + type_(type), + final_section_(section), + current_section_(section), + next_index_(array->first_index(type, section)) { + update_section(); + } + + inline int next_index(); + inline bool is_finished(); + + private: + inline void update_section(); + ConstantPoolArray* array_; + const Type type_; + const LayoutSection final_section_; + + LayoutSection current_section_; + int next_index_; + }; + + // Getters for the first index, the last index and the count of entries of + // a given type for a given layout section. + inline int first_index(Type type, LayoutSection layout_section); + inline int last_index(Type type, LayoutSection layout_section); + inline int number_of_entries(Type type, LayoutSection layout_section); + + // Returns the type of the entry at the given index. + inline Type get_type(int index); + inline bool offset_is_type(int offset, Type type); // Setter and getter for pool elements. inline Address get_code_ptr_entry(int index); @@ -3219,59 +3173,144 @@ inline void set(int index, double value); inline void set(int index, int32_t value); - // Set up initial state. - inline void SetEntryCounts(int number_of_int64_entries, - int number_of_code_ptr_entries, - int number_of_heap_ptr_entries, - int number_of_int32_entries); + // Setters which take a raw offset rather than an index (for code generation). + inline void set_at_offset(int offset, int32_t value); + inline void set_at_offset(int offset, int64_t value); + inline void set_at_offset(int offset, double value); + inline void set_at_offset(int offset, Address value); + inline void set_at_offset(int offset, Object* value); + + // Setter and getter for weak objects state + inline void set_weak_object_state(WeakObjectState state); + inline WeakObjectState get_weak_object_state(); + + // Returns true if the constant pool has an extended layout, false if it has + // only the small layout. + inline bool is_extended_layout(); + + // Returns the last LayoutSection in this constant pool array. + inline LayoutSection final_section(); + + // Set up initial state for a small layout constant pool array. + inline void Init(const NumberOfEntries& small); + + // Set up initial state for an extended layout constant pool array. + inline void InitExtended(const NumberOfEntries& small, + const NumberOfEntries& extended); - // Copy operations - MUST_USE_RESULT inline MaybeObject* Copy(); + // Clears the pointer entries with GC safe values. + void ClearPtrEntries(Isolate* isolate); + + // returns the total number of entries in the constant pool array. + inline int length(); // Garbage collection support. - inline static int SizeFor(int number_of_int64_entries, - int number_of_code_ptr_entries, - int number_of_heap_ptr_entries, - int number_of_int32_entries) { - return RoundUp(OffsetAt(number_of_int64_entries, - number_of_code_ptr_entries, - number_of_heap_ptr_entries, - number_of_int32_entries), - kPointerSize); + inline int size(); + + + inline static int MaxInt64Offset(int number_of_int64) { + return kFirstEntryOffset + (number_of_int64 * kInt64Size); + } + + inline static int SizeFor(const NumberOfEntries& small) { + int size = kFirstEntryOffset + + (small.count_of(INT64) * kInt64Size) + + (small.count_of(CODE_PTR) * kPointerSize) + + (small.count_of(HEAP_PTR) * kPointerSize) + + (small.count_of(INT32) * kInt32Size); + return RoundUp(size, kPointerSize); + } + + inline static int SizeForExtended(const NumberOfEntries& small, + const NumberOfEntries& extended) { + int size = SizeFor(small); + size = RoundUp(size, kInt64Size); // Align extended header to 64 bits. + size += kExtendedFirstOffset + + (extended.count_of(INT64) * kInt64Size) + + (extended.count_of(CODE_PTR) * kPointerSize) + + (extended.count_of(HEAP_PTR) * kPointerSize) + + (extended.count_of(INT32) * kInt32Size); + return RoundUp(size, kPointerSize); + } + + inline static int entry_size(Type type) { + switch (type) { + case INT32: + return kInt32Size; + case INT64: + return kInt64Size; + case CODE_PTR: + case HEAP_PTR: + return kPointerSize; + default: + UNREACHABLE(); + return 0; + } } // Code Generation support. inline int OffsetOfElementAt(int index) { - ASSERT(index < length()); - if (index >= first_int32_index()) { - return OffsetAt(count_of_int64_entries(), count_of_code_ptr_entries(), - count_of_heap_ptr_entries(), index - first_int32_index()); - } else if (index >= first_heap_ptr_index()) { - return OffsetAt(count_of_int64_entries(), count_of_code_ptr_entries(), - index - first_heap_ptr_index(), 0); - } else if (index >= first_code_ptr_index()) { - return OffsetAt(count_of_int64_entries(), index - first_code_ptr_index(), - 0, 0); + int offset; + LayoutSection section; + if (is_extended_layout() && index >= first_extended_section_index()) { + section = EXTENDED_SECTION; + offset = get_extended_section_header_offset() + kExtendedFirstOffset; } else { - return OffsetAt(index, 0, 0, 0); + section = SMALL_SECTION; + offset = kFirstEntryOffset; } + + // Add offsets for the preceding type sections. + DCHECK(index <= last_index(LAST_TYPE, section)); + for (Type type = FIRST_TYPE; index > last_index(type, section); + type = next_type(type)) { + offset += entry_size(type) * number_of_entries(type, section); + } + + // Add offset for the index in it's type. + Type type = get_type(index); + offset += entry_size(type) * (index - first_index(type, section)); + return offset; } - // Casting. - static inline ConstantPoolArray* cast(Object* obj); + DECLARE_CAST(ConstantPoolArray) // Garbage collection support. Object** RawFieldOfElementAt(int index) { return HeapObject::RawField(this, OffsetOfElementAt(index)); } - // Layout description. - static const int kFirstCodePointerIndexOffset = FixedArray::kHeaderSize; - static const int kFirstHeapPointerIndexOffset = - kFirstCodePointerIndexOffset + kPointerSize; - static const int kFirstInt32IndexOffset = - kFirstHeapPointerIndexOffset + kPointerSize; - static const int kFirstOffset = kFirstInt32IndexOffset + kPointerSize; + // Small Layout description. + static const int kSmallLayout1Offset = HeapObject::kHeaderSize; + static const int kSmallLayout2Offset = kSmallLayout1Offset + kInt32Size; + static const int kHeaderSize = kSmallLayout2Offset + kInt32Size; + static const int kFirstEntryOffset = ROUND_UP(kHeaderSize, kInt64Size); + + static const int kSmallLayoutCountBits = 10; + static const int kMaxSmallEntriesPerType = (1 << kSmallLayoutCountBits) - 1; + + // Fields in kSmallLayout1Offset. + class Int64CountField: public BitField<int, 1, kSmallLayoutCountBits> {}; + class CodePtrCountField: public BitField<int, 11, kSmallLayoutCountBits> {}; + class HeapPtrCountField: public BitField<int, 21, kSmallLayoutCountBits> {}; + class IsExtendedField: public BitField<bool, 31, 1> {}; + + // Fields in kSmallLayout2Offset. + class Int32CountField: public BitField<int, 1, kSmallLayoutCountBits> {}; + class TotalCountField: public BitField<int, 11, 12> {}; + class WeakObjectStateField: public BitField<WeakObjectState, 23, 2> {}; + + // Extended layout description, which starts at + // get_extended_section_header_offset(). + static const int kExtendedInt64CountOffset = 0; + static const int kExtendedCodePtrCountOffset = + kExtendedInt64CountOffset + kPointerSize; + static const int kExtendedHeapPtrCountOffset = + kExtendedCodePtrCountOffset + kPointerSize; + static const int kExtendedInt32CountOffset = + kExtendedHeapPtrCountOffset + kPointerSize; + static const int kExtendedFirstOffset = + kExtendedInt32CountOffset + kPointerSize; // Dispatched behavior. void ConstantPoolIterateBody(ObjectVisitor* v); @@ -3280,19 +3319,13 @@ DECLARE_VERIFIER(ConstantPoolArray) private: - inline void set_first_code_ptr_index(int value); - inline void set_first_heap_ptr_index(int value); - inline void set_first_int32_index(int value); - - inline static int OffsetAt(int number_of_int64_entries, - int number_of_code_ptr_entries, - int number_of_heap_ptr_entries, - int number_of_int32_entries) { - return kFirstOffset - + (number_of_int64_entries * kInt64Size) - + (number_of_code_ptr_entries * kPointerSize) - + (number_of_heap_ptr_entries * kPointerSize) - + (number_of_int32_entries * kInt32Size); + inline int first_extended_section_index(); + inline int get_extended_section_header_offset(); + + inline static Type next_type(Type type) { + DCHECK(type >= FIRST_TYPE && type < NUMBER_OF_TYPES); + int type_int = static_cast<int>(type); + return static_cast<Type>(++type_int); } DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolArray); @@ -3309,23 +3342,6 @@ // [2 + number of descriptors * kDescriptorSize]: start of slack class DescriptorArray: public FixedArray { public: - // WhitenessWitness is used to prove that a descriptor array is white - // (unmarked), so incremental write barriers can be skipped because the - // marking invariant cannot be broken and slots pointing into evacuation - // candidates will be discovered when the object is scanned. A witness is - // always stack-allocated right after creating an array. By allocating a - // witness, incremental marking is globally disabled. The witness is then - // passed along wherever needed to statically prove that the array is known to - // be white. - class WhitenessWitness { - public: - inline explicit WhitenessWitness(FixedArray* array); - inline ~WhitenessWitness(); - - private: - IncrementalMarking* marking_; - }; - // Returns true for both shared empty_descriptor_array and for smis, which the // map uses to encode additional bit fields when the descriptor array is not // yet used. @@ -3333,7 +3349,7 @@ // Returns the number of descriptors in the array. int number_of_descriptors() { - ASSERT(length() >= kFirstIndex || IsEmpty()); + DCHECK(length() >= kFirstIndex || IsEmpty()); int len = length(); return len == 0 ? 0 : Smi::cast(get(kDescriptorLengthIndex))->value(); } @@ -3359,7 +3375,7 @@ } FixedArray* GetEnumCache() { - ASSERT(HasEnumCache()); + DCHECK(HasEnumCache()); FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex)); return FixedArray::cast(bridge->get(kEnumCacheBridgeCacheIndex)); } @@ -3373,13 +3389,13 @@ } FixedArray* GetEnumIndicesCache() { - ASSERT(HasEnumIndicesCache()); + DCHECK(HasEnumIndicesCache()); FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex)); return FixedArray::cast(bridge->get(kEnumCacheBridgeIndicesCacheIndex)); } Object** GetEnumCacheSlot() { - ASSERT(HasEnumCache()); + DCHECK(HasEnumCache()); return HeapObject::RawField(reinterpret_cast<HeapObject*>(this), kEnumCacheOffset); } @@ -3392,16 +3408,21 @@ FixedArray* new_cache, Object* new_index_cache); + bool CanHoldValue(int descriptor, Object* value); + // Accessors for fetching instance descriptor at descriptor number. inline Name* GetKey(int descriptor_number); inline Object** GetKeySlot(int descriptor_number); inline Object* GetValue(int descriptor_number); + inline void SetValue(int descriptor_number, Object* value); inline Object** GetValueSlot(int descriptor_number); + static inline int GetValueOffset(int descriptor_number); inline Object** GetDescriptorStartSlot(int descriptor_number); inline Object** GetDescriptorEndSlot(int descriptor_number); inline PropertyDetails GetDetails(int descriptor_number); inline PropertyType GetType(int descriptor_number); inline int GetFieldIndex(int descriptor_number); + inline HeapType* GetFieldType(int descriptor_number); inline Object* GetConstant(int descriptor_number); inline Object* GetCallbacksObject(int descriptor_number); inline AccessorDescriptor* GetCallbacks(int descriptor_number); @@ -3409,59 +3430,28 @@ inline Name* GetSortedKey(int descriptor_number); inline int GetSortedKeyIndex(int descriptor_number); inline void SetSortedKey(int pointer, int descriptor_number); - inline void InitializeRepresentations(Representation representation); inline void SetRepresentation(int descriptor_number, Representation representation); // Accessor for complete descriptor. inline void Get(int descriptor_number, Descriptor* desc); - inline void Set(int descriptor_number, - Descriptor* desc, - const WhitenessWitness&); inline void Set(int descriptor_number, Descriptor* desc); + void Replace(int descriptor_number, Descriptor* descriptor); // Append automatically sets the enumeration index. This should only be used // to add descriptors in bulk at the end, followed by sorting the descriptor // array. - inline void Append(Descriptor* desc, const WhitenessWitness&); inline void Append(Descriptor* desc); - // Transfer a complete descriptor from the src descriptor array to this - // descriptor array. - void CopyFrom(int dst_index, - DescriptorArray* src, - int src_index, - const WhitenessWitness&); - static Handle<DescriptorArray> Merge(Handle<DescriptorArray> desc, - int verbatim, - int valid, - int new_size, - int modify_index, - StoreMode store_mode, - Handle<DescriptorArray> other); - MUST_USE_RESULT MaybeObject* Merge(int verbatim, - int valid, - int new_size, - int modify_index, - StoreMode store_mode, - DescriptorArray* other); - - bool IsMoreGeneralThan(int verbatim, - int valid, - int new_size, - DescriptorArray* other); - - MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index) { - return CopyUpToAddAttributes(enumeration_index, NONE); - } + static Handle<DescriptorArray> CopyUpTo(Handle<DescriptorArray> desc, + int enumeration_index, + int slack = 0); static Handle<DescriptorArray> CopyUpToAddAttributes( Handle<DescriptorArray> desc, int enumeration_index, - PropertyAttributes attributes); - MUST_USE_RESULT MaybeObject* CopyUpToAddAttributes( - int enumeration_index, - PropertyAttributes attributes); + PropertyAttributes attributes, + int slack = 0); // Sort the instance descriptors by the hash codes of their keys. void Sort(); @@ -3475,12 +3465,11 @@ // Allocates a DescriptorArray, but returns the singleton // empty descriptor array object if number_of_descriptors is 0. - MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate, - int number_of_descriptors, - int slack = 0); + static Handle<DescriptorArray> Allocate(Isolate* isolate, + int number_of_descriptors, + int slack = 0); - // Casting. - static inline DescriptorArray* cast(Object* obj); + DECLARE_CAST(DescriptorArray) // Constant for denoting key was not found. static const int kNotFound = -1; @@ -3510,7 +3499,7 @@ #ifdef OBJECT_PRINT // Print all the descriptors. - void PrintDescriptors(FILE* out = stdout); + void PrintDescriptors(OStream& os); // NOLINT #endif #ifdef DEBUG @@ -3531,6 +3520,23 @@ } private: + // WhitenessWitness is used to prove that a descriptor array is white + // (unmarked), so incremental write barriers can be skipped because the + // marking invariant cannot be broken and slots pointing into evacuation + // candidates will be discovered when the object is scanned. A witness is + // always stack-allocated right after creating an array. By allocating a + // witness, incremental marking is globally disabled. The witness is then + // passed along wherever needed to statically prove that the array is known to + // be white. + class WhitenessWitness { + public: + inline explicit WhitenessWitness(DescriptorArray* array); + inline ~WhitenessWitness(); + + private: + IncrementalMarking* marking_; + }; + // An entry in a DescriptorArray, represented as an (array, index) pair. class Entry { public: @@ -3564,6 +3570,18 @@ kDescriptorValue; } + // Transfer a complete descriptor from the src descriptor array to this + // descriptor array. + void CopyFrom(int index, + DescriptorArray* src, + const WhitenessWitness&); + + inline void Set(int descriptor_number, + Descriptor* desc, + const WhitenessWitness&); + + inline void Append(Descriptor* desc, const WhitenessWitness&); + // Swap first and second descriptor. inline void SwapSortedKeys(int first, int second); @@ -3603,7 +3621,7 @@ // // Returns the hash value for object. // static uint32_t HashForObject(Key key, Object* object); // // Convert key to an object. -// static inline Object* AsObject(Heap* heap, Key key); +// static inline Handle<Object> AsHandle(Isolate* isolate, Key key); // // The prefix size indicates number of elements in the beginning // // of the backing storage. // static const int kPrefixSize = ..; @@ -3620,24 +3638,23 @@ static const bool UsesSeed = false; static uint32_t Hash(Key key) { return 0; } static uint32_t SeededHash(Key key, uint32_t seed) { - ASSERT(UsesSeed); + DCHECK(UsesSeed); return Hash(key); } static uint32_t HashForObject(Key key, Object* object) { return 0; } static uint32_t SeededHashForObject(Key key, uint32_t seed, Object* object) { - ASSERT(UsesSeed); + DCHECK(UsesSeed); return HashForObject(key, object); } }; -template<typename Shape, typename Key> +template<typename Derived, typename Shape, typename Key> class HashTable: public FixedArray { public: // Wrapper methods inline uint32_t Hash(Key key) { if (Shape::UsesSeed) { - return Shape::SeededHash(key, - GetHeap()->HashSeed()); + return Shape::SeededHash(key, GetHeap()->HashSeed()); } else { return Shape::Hash(key); } @@ -3645,8 +3662,7 @@ inline uint32_t HashForObject(Key key, Object* object) { if (Shape::UsesSeed) { - return Shape::SeededHashForObject(key, - GetHeap()->HashSeed(), object); + return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object); } else { return Shape::HashForObject(key, object); } @@ -3682,9 +3698,9 @@ SetNumberOfDeletedElements(NumberOfDeletedElements() + n); } - // Returns a new HashTable object. Might return Failure. - MUST_USE_RESULT static MaybeObject* Allocate( - Heap* heap, + // Returns a new HashTable object. + MUST_USE_RESULT static Handle<Derived> New( + Isolate* isolate, int at_least_space_for, MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY, PretenureFlag pretenure = NOT_TENURED); @@ -3706,8 +3722,7 @@ void IteratePrefix(ObjectVisitor* visitor); void IterateElements(ObjectVisitor* visitor); - // Casting. - static inline HashTable* cast(Object* obj); + DECLARE_CAST(HashTable) // Compute the probe offset (quadratic probing). INLINE(static uint32_t GetProbeOffset(uint32_t n)) { @@ -3743,7 +3758,6 @@ void Rehash(Key key); protected: - friend class ObjectHashSet; friend class ObjectHashTable; // Find the entry at which to insert element with the given key that @@ -3770,15 +3784,15 @@ // To scale a computed hash code to fit within the hash table, we // use bit-wise AND with a mask, so the capacity must be positive // and non-zero. - ASSERT(capacity > 0); - ASSERT(capacity <= kMaxCapacity); + DCHECK(capacity > 0); + DCHECK(capacity <= kMaxCapacity); set(kCapacityIndex, Smi::FromInt(capacity)); } // Returns probe entry. static uint32_t GetProbe(uint32_t hash, uint32_t number, uint32_t size) { - ASSERT(IsPowerOf2(size)); + DCHECK(IsPowerOf2(size)); return (hash + GetProbeOffset(number)) & (size - 1); } @@ -3791,6 +3805,17 @@ return (last + number) & (size - 1); } + // Attempt to shrink hash table after removal of key. + MUST_USE_RESULT static Handle<Derived> Shrink(Handle<Derived> table, Key key); + + // Ensure enough space for n additional elements. + MUST_USE_RESULT static Handle<Derived> EnsureCapacity( + Handle<Derived> table, + int n, + Key key, + PretenureFlag pretenure = NOT_TENURED); + + private: // Returns _expected_ if one of entries given by the first _probe_ probes is // equal to _expected_. Otherwise, returns the entry given by the probe // number _probe_. @@ -3799,16 +3824,7 @@ void Swap(uint32_t entry1, uint32_t entry2, WriteBarrierMode mode); // Rehashes this hash-table into the new table. - MUST_USE_RESULT MaybeObject* Rehash(HashTable* new_table, Key key); - - // Attempt to shrink hash table after removal of key. - MUST_USE_RESULT MaybeObject* Shrink(Key key); - - // Ensure enough space for n additional elements. - MUST_USE_RESULT MaybeObject* EnsureCapacity( - int n, - Key key, - PretenureFlag pretenure = NOT_TENURED); + void Rehash(Handle<Derived> new_table, Key key); }; @@ -3822,8 +3838,7 @@ // Returns the hash value for object. virtual uint32_t HashForObject(Object* key) = 0; // Returns the key object for storing into the hash table. - // If allocations fails a failure object is returned. - MUST_USE_RESULT virtual MaybeObject* AsObject(Heap* heap) = 0; + MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate) = 0; // Required. virtual ~HashTableKey() {} }; @@ -3834,16 +3849,16 @@ static inline bool IsMatch(HashTableKey* key, Object* value) { return key->IsMatch(value); } + static inline uint32_t Hash(HashTableKey* key) { return key->Hash(); } + static inline uint32_t HashForObject(HashTableKey* key, Object* object) { return key->HashForObject(object); } - MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, - HashTableKey* key) { - return key->AsObject(heap); - } + + static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key); static const int kPrefixSize = 0; static const int kEntrySize = 1; @@ -3855,23 +3870,32 @@ // // No special elements in the prefix and the element size is 1 // because only the string itself (the key) needs to be stored. -class StringTable: public HashTable<StringTableShape, HashTableKey*> { - public: - // Find string in the string table. If it is not there yet, it is - // added. The return value is the string table which might have - // been enlarged. If the return value is not a failure, the string - // pointer *s is set to the string found. - MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s); - MUST_USE_RESULT MaybeObject* LookupKey(HashTableKey* key, Object** s); +class StringTable: public HashTable<StringTable, + StringTableShape, + HashTableKey*> { + public: + // Find string in the string table. If it is not there yet, it is + // added. The return value is the string found. + static Handle<String> LookupString(Isolate* isolate, Handle<String> key); + static Handle<String> LookupKey(Isolate* isolate, HashTableKey* key); + + // Tries to internalize given string and returns string handle on success + // or an empty handle otherwise. + MUST_USE_RESULT static MaybeHandle<String> InternalizeStringIfExists( + Isolate* isolate, + Handle<String> string); // Looks up a string that is equal to the given string and returns - // true if it is found, assigning the string to the given output - // parameter. - bool LookupStringIfExists(String* str, String** result); - bool LookupTwoCharsStringIfExists(uint16_t c1, uint16_t c2, String** result); + // string handle if it is found, or an empty handle otherwise. + MUST_USE_RESULT static MaybeHandle<String> LookupStringIfExists( + Isolate* isolate, + Handle<String> str); + MUST_USE_RESULT static MaybeHandle<String> LookupTwoCharsStringIfExists( + Isolate* isolate, + uint16_t c1, + uint16_t c2); - // Casting. - static inline StringTable* cast(Object* obj); + DECLARE_CAST(StringTable) private: template <bool seq_ascii> friend class JsonParser; @@ -3885,6 +3909,7 @@ static inline bool IsMatch(HashTableKey* key, Object* value) { return key->IsMatch(value); } + static inline uint32_t Hash(HashTableKey* key) { return key->Hash(); } @@ -3893,10 +3918,7 @@ return key->HashForObject(object); } - MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, - HashTableKey* key) { - return key->AsObject(heap); - } + static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key); static const int kPrefixSize = 0; static const int kEntrySize = 2; @@ -3907,55 +3929,62 @@ // // Maps keys that are a fixed array of unique names to a map. // Used for canonicalize maps for object literals. -class MapCache: public HashTable<MapCacheShape, HashTableKey*> { +class MapCache: public HashTable<MapCache, MapCacheShape, HashTableKey*> { public: // Find cached value for a name key, otherwise return null. Object* Lookup(FixedArray* key); - MUST_USE_RESULT MaybeObject* Put(FixedArray* key, Map* value); - static inline MapCache* cast(Object* obj); + static Handle<MapCache> Put( + Handle<MapCache> map_cache, Handle<FixedArray> key, Handle<Map> value); + DECLARE_CAST(MapCache) private: DISALLOW_IMPLICIT_CONSTRUCTORS(MapCache); }; -template <typename Shape, typename Key> -class Dictionary: public HashTable<Shape, Key> { - public: - static inline Dictionary<Shape, Key>* cast(Object* obj) { - return reinterpret_cast<Dictionary<Shape, Key>*>(obj); - } +template <typename Derived, typename Shape, typename Key> +class Dictionary: public HashTable<Derived, Shape, Key> { + protected: + typedef HashTable<Derived, Shape, Key> DerivedHashTable; + public: // Returns the value at entry. Object* ValueAt(int entry) { - return this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 1); + return this->get(DerivedHashTable::EntryToIndex(entry) + 1); } // Set the value for entry. void ValueAtPut(int entry, Object* value) { - this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 1, value); + this->set(DerivedHashTable::EntryToIndex(entry) + 1, value); } // Returns the property details for the property at entry. PropertyDetails DetailsAt(int entry) { - ASSERT(entry >= 0); // Not found is -1, which is not caught by get(). + DCHECK(entry >= 0); // Not found is -1, which is not caught by get(). return PropertyDetails( - Smi::cast(this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 2))); + Smi::cast(this->get(DerivedHashTable::EntryToIndex(entry) + 2))); } // Set the details for entry. void DetailsAtPut(int entry, PropertyDetails value) { - this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi()); + this->set(DerivedHashTable::EntryToIndex(entry) + 2, value.AsSmi()); } // Sorting support void CopyValuesTo(FixedArray* elements); // Delete a property from the dictionary. - Object* DeleteProperty(int entry, JSObject::DeleteMode mode); + static Handle<Object> DeleteProperty( + Handle<Derived> dictionary, + int entry, + JSObject::DeleteMode mode); // Attempt to shrink the dictionary after deletion of key. - MUST_USE_RESULT MaybeObject* Shrink(Key key); + MUST_USE_RESULT static inline Handle<Derived> Shrink( + Handle<Derived> dictionary, + Key key) { + return DerivedHashTable::Shrink(dictionary, key); + } // Returns the number of elements in the dictionary filtering out properties // with the specified attributes. @@ -3977,101 +4006,102 @@ // Accessors for next enumeration index. void SetNextEnumerationIndex(int index) { - ASSERT(index != 0); + DCHECK(index != 0); this->set(kNextEnumerationIndexIndex, Smi::FromInt(index)); } int NextEnumerationIndex() { - return Smi::cast(FixedArray::get(kNextEnumerationIndexIndex))->value(); + return Smi::cast(this->get(kNextEnumerationIndexIndex))->value(); } - // Returns a new array for dictionary usage. Might return Failure. - MUST_USE_RESULT static MaybeObject* Allocate( - Heap* heap, + // Creates a new dictionary. + MUST_USE_RESULT static Handle<Derived> New( + Isolate* isolate, int at_least_space_for, PretenureFlag pretenure = NOT_TENURED); // Ensure enough space for n additional elements. - MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key); + static Handle<Derived> EnsureCapacity(Handle<Derived> obj, int n, Key key); #ifdef OBJECT_PRINT - void Print(FILE* out = stdout); + void Print(OStream& os); // NOLINT #endif // Returns the key (slow). Object* SlowReverseLookup(Object* value); // Sets the entry to (key, value) pair. inline void SetEntry(int entry, - Object* key, - Object* value); + Handle<Object> key, + Handle<Object> value); inline void SetEntry(int entry, - Object* key, - Object* value, + Handle<Object> key, + Handle<Object> value, PropertyDetails details); - MUST_USE_RESULT MaybeObject* Add(Key key, - Object* value, - PropertyDetails details); + MUST_USE_RESULT static Handle<Derived> Add( + Handle<Derived> dictionary, + Key key, + Handle<Object> value, + PropertyDetails details); protected: // Generic at put operation. - MUST_USE_RESULT MaybeObject* AtPut(Key key, Object* value); + MUST_USE_RESULT static Handle<Derived> AtPut( + Handle<Derived> dictionary, + Key key, + Handle<Object> value); // Add entry to dictionary. - MUST_USE_RESULT MaybeObject* AddEntry(Key key, - Object* value, - PropertyDetails details, - uint32_t hash); + static void AddEntry( + Handle<Derived> dictionary, + Key key, + Handle<Object> value, + PropertyDetails details, + uint32_t hash); // Generate new enumeration indices to avoid enumeration index overflow. - MUST_USE_RESULT MaybeObject* GenerateNewEnumerationIndices(); - static const int kMaxNumberKeyIndex = - HashTable<Shape, Key>::kPrefixStartIndex; + static void GenerateNewEnumerationIndices(Handle<Derived> dictionary); + static const int kMaxNumberKeyIndex = DerivedHashTable::kPrefixStartIndex; static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1; }; -class NameDictionaryShape : public BaseShape<Name*> { +class NameDictionaryShape : public BaseShape<Handle<Name> > { public: - static inline bool IsMatch(Name* key, Object* other); - static inline uint32_t Hash(Name* key); - static inline uint32_t HashForObject(Name* key, Object* object); - MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, - Name* key); + static inline bool IsMatch(Handle<Name> key, Object* other); + static inline uint32_t Hash(Handle<Name> key); + static inline uint32_t HashForObject(Handle<Name> key, Object* object); + static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key); static const int kPrefixSize = 2; static const int kEntrySize = 3; static const bool kIsEnumerable = true; }; -class NameDictionary: public Dictionary<NameDictionaryShape, Name*> { +class NameDictionary: public Dictionary<NameDictionary, + NameDictionaryShape, + Handle<Name> > { + typedef Dictionary< + NameDictionary, NameDictionaryShape, Handle<Name> > DerivedDictionary; + public: - static inline NameDictionary* cast(Object* obj) { - ASSERT(obj->IsDictionary()); - return reinterpret_cast<NameDictionary*>(obj); - } + DECLARE_CAST(NameDictionary) // Copies enumerable keys to preallocated fixed array. void CopyEnumKeysTo(FixedArray* storage); - static void DoGenerateNewEnumerationIndices( + inline static void DoGenerateNewEnumerationIndices( Handle<NameDictionary> dictionary); - // For transforming properties of a JSObject. - MUST_USE_RESULT MaybeObject* TransformPropertiesToFastFor( - JSObject* obj, - int unused_property_fields); - // Find entry for key, otherwise return kNotFound. Optimized version of // HashTable::FindEntry. - int FindEntry(Name* key); + int FindEntry(Handle<Name> key); }; class NumberDictionaryShape : public BaseShape<uint32_t> { public: static inline bool IsMatch(uint32_t key, Object* other); - MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, - uint32_t key); + static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key); static const int kEntrySize = 3; static const bool kIsEnumerable = false; }; @@ -4099,36 +4129,31 @@ class SeededNumberDictionary - : public Dictionary<SeededNumberDictionaryShape, uint32_t> { + : public Dictionary<SeededNumberDictionary, + SeededNumberDictionaryShape, + uint32_t> { public: - static SeededNumberDictionary* cast(Object* obj) { - ASSERT(obj->IsDictionary()); - return reinterpret_cast<SeededNumberDictionary*>(obj); - } + DECLARE_CAST(SeededNumberDictionary) // Type specific at put (default NONE attributes is used when adding). - MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value); + MUST_USE_RESULT static Handle<SeededNumberDictionary> AtNumberPut( + Handle<SeededNumberDictionary> dictionary, + uint32_t key, + Handle<Object> value); MUST_USE_RESULT static Handle<SeededNumberDictionary> AddNumberEntry( Handle<SeededNumberDictionary> dictionary, uint32_t key, Handle<Object> value, PropertyDetails details); - MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key, - Object* value, - PropertyDetails details); // Set an existing entry or add a new one if needed. // Return the updated dictionary. MUST_USE_RESULT static Handle<SeededNumberDictionary> Set( Handle<SeededNumberDictionary> dictionary, - uint32_t index, + uint32_t key, Handle<Object> value, PropertyDetails details); - MUST_USE_RESULT MaybeObject* Set(uint32_t key, - Object* value, - PropertyDetails details); - void UpdateMaxNumberKey(uint32_t key); // If slow elements are required we will never go back to fast-case @@ -4152,102 +4177,71 @@ class UnseededNumberDictionary - : public Dictionary<UnseededNumberDictionaryShape, uint32_t> { + : public Dictionary<UnseededNumberDictionary, + UnseededNumberDictionaryShape, + uint32_t> { public: - static UnseededNumberDictionary* cast(Object* obj) { - ASSERT(obj->IsDictionary()); - return reinterpret_cast<UnseededNumberDictionary*>(obj); - } + DECLARE_CAST(UnseededNumberDictionary) // Type specific at put (default NONE attributes is used when adding). - MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value); - MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key, Object* value); + MUST_USE_RESULT static Handle<UnseededNumberDictionary> AtNumberPut( + Handle<UnseededNumberDictionary> dictionary, + uint32_t key, + Handle<Object> value); + MUST_USE_RESULT static Handle<UnseededNumberDictionary> AddNumberEntry( + Handle<UnseededNumberDictionary> dictionary, + uint32_t key, + Handle<Object> value); // Set an existing entry or add a new one if needed. // Return the updated dictionary. MUST_USE_RESULT static Handle<UnseededNumberDictionary> Set( Handle<UnseededNumberDictionary> dictionary, - uint32_t index, + uint32_t key, Handle<Object> value); - - MUST_USE_RESULT MaybeObject* Set(uint32_t key, Object* value); }; -template <int entrysize> -class ObjectHashTableShape : public BaseShape<Object*> { +class ObjectHashTableShape : public BaseShape<Handle<Object> > { public: - static inline bool IsMatch(Object* key, Object* other); - static inline uint32_t Hash(Object* key); - static inline uint32_t HashForObject(Object* key, Object* object); - MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, - Object* key); + static inline bool IsMatch(Handle<Object> key, Object* other); + static inline uint32_t Hash(Handle<Object> key); + static inline uint32_t HashForObject(Handle<Object> key, Object* object); + static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Object> key); static const int kPrefixSize = 0; - static const int kEntrySize = entrysize; -}; - - -// ObjectHashSet holds keys that are arbitrary objects by using the identity -// hash of the key for hashing purposes. -class ObjectHashSet: public HashTable<ObjectHashTableShape<1>, Object*> { - public: - static inline ObjectHashSet* cast(Object* obj) { - ASSERT(obj->IsHashTable()); - return reinterpret_cast<ObjectHashSet*>(obj); - } - - // Looks up whether the given key is part of this hash set. - bool Contains(Object* key); - - static Handle<ObjectHashSet> EnsureCapacity( - Handle<ObjectHashSet> table, - int n, - Handle<Object> key, - PretenureFlag pretenure = NOT_TENURED); - - // Attempt to shrink hash table after removal of key. - static Handle<ObjectHashSet> Shrink(Handle<ObjectHashSet> table, - Handle<Object> key); - - // Adds the given key to this hash set. - static Handle<ObjectHashSet> Add(Handle<ObjectHashSet> table, - Handle<Object> key); - - // Removes the given key from this hash set. - static Handle<ObjectHashSet> Remove(Handle<ObjectHashSet> table, - Handle<Object> key); + static const int kEntrySize = 2; }; // ObjectHashTable maps keys that are arbitrary objects to object values by // using the identity hash of the key for hashing purposes. -class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> { +class ObjectHashTable: public HashTable<ObjectHashTable, + ObjectHashTableShape, + Handle<Object> > { + typedef HashTable< + ObjectHashTable, ObjectHashTableShape, Handle<Object> > DerivedHashTable; public: - static inline ObjectHashTable* cast(Object* obj) { - ASSERT(obj->IsHashTable()); - return reinterpret_cast<ObjectHashTable*>(obj); - } - - static Handle<ObjectHashTable> EnsureCapacity( - Handle<ObjectHashTable> table, - int n, - Handle<Object> key, - PretenureFlag pretenure = NOT_TENURED); + DECLARE_CAST(ObjectHashTable) // Attempt to shrink hash table after removal of key. - static Handle<ObjectHashTable> Shrink(Handle<ObjectHashTable> table, - Handle<Object> key); + MUST_USE_RESULT static inline Handle<ObjectHashTable> Shrink( + Handle<ObjectHashTable> table, + Handle<Object> key); // Looks up the value associated with the given key. The hole value is // returned in case the key is not present. - Object* Lookup(Object* key); + Object* Lookup(Handle<Object> key); - // Adds (or overwrites) the value associated with the given key. Mapping a - // key to the hole value causes removal of the whole entry. + // Adds (or overwrites) the value associated with the given key. static Handle<ObjectHashTable> Put(Handle<ObjectHashTable> table, Handle<Object> key, Handle<Object> value); + // Returns an ObjectHashTable (possibly |table|) where |key| has been removed. + static Handle<ObjectHashTable> Remove(Handle<ObjectHashTable> table, + Handle<Object> key, + bool* was_present); + private: friend class MarkCompactCollector; @@ -4261,14 +4255,222 @@ }; +// OrderedHashTable is a HashTable with Object keys that preserves +// insertion order. There are Map and Set interfaces (OrderedHashMap +// and OrderedHashTable, below). It is meant to be used by JSMap/JSSet. +// +// Only Object* keys are supported, with Object::SameValueZero() used as the +// equality operator and Object::GetHash() for the hash function. +// +// Based on the "Deterministic Hash Table" as described by Jason Orendorff at +// https://wiki.mozilla.org/User:Jorend/Deterministic_hash_tables +// Originally attributed to Tyler Close. +// +// Memory layout: +// [0]: bucket count +// [1]: element count +// [2]: deleted element count +// [3..(3 + NumberOfBuckets() - 1)]: "hash table", where each item is an +// offset into the data table (see below) where the +// first item in this bucket is stored. +// [3 + NumberOfBuckets()..length]: "data table", an array of length +// Capacity() * kEntrySize, where the first entrysize +// items are handled by the derived class and the +// item at kChainOffset is another entry into the +// data table indicating the next entry in this hash +// bucket. +// +// When we transition the table to a new version we obsolete it and reuse parts +// of the memory to store information how to transition an iterator to the new +// table: +// +// Memory layout for obsolete table: +// [0]: bucket count +// [1]: Next newer table +// [2]: Number of removed holes or -1 when the table was cleared. +// [3..(3 + NumberOfRemovedHoles() - 1)]: The indexes of the removed holes. +// [3 + NumberOfRemovedHoles()..length]: Not used +// +template<class Derived, class Iterator, int entrysize> +class OrderedHashTable: public FixedArray { + public: + // Returns an OrderedHashTable with a capacity of at least |capacity|. + static Handle<Derived> Allocate( + Isolate* isolate, int capacity, PretenureFlag pretenure = NOT_TENURED); + + // Returns an OrderedHashTable (possibly |table|) with enough space + // to add at least one new element. + static Handle<Derived> EnsureGrowable(Handle<Derived> table); + + // Returns an OrderedHashTable (possibly |table|) that's shrunken + // if possible. + static Handle<Derived> Shrink(Handle<Derived> table); + + // Returns a new empty OrderedHashTable and records the clearing so that + // exisiting iterators can be updated. + static Handle<Derived> Clear(Handle<Derived> table); + + // Returns an OrderedHashTable (possibly |table|) where |key| has been + // removed. + static Handle<Derived> Remove(Handle<Derived> table, Handle<Object> key, + bool* was_present); + + // Returns kNotFound if the key isn't present. + int FindEntry(Handle<Object> key, int hash); + + // Like the above, but doesn't require the caller to provide a hash. + int FindEntry(Handle<Object> key); + + int NumberOfElements() { + return Smi::cast(get(kNumberOfElementsIndex))->value(); + } + + int NumberOfDeletedElements() { + return Smi::cast(get(kNumberOfDeletedElementsIndex))->value(); + } + + int UsedCapacity() { return NumberOfElements() + NumberOfDeletedElements(); } + + int NumberOfBuckets() { + return Smi::cast(get(kNumberOfBucketsIndex))->value(); + } + + // Returns the index into the data table where the new entry + // should be placed. The table is assumed to have enough space + // for a new entry. + int AddEntry(int hash); + + // Removes the entry, and puts the_hole in entrysize pointers + // (leaving the hash table chain intact). + void RemoveEntry(int entry); + + // Returns an index into |this| for the given entry. + int EntryToIndex(int entry) { + return kHashTableStartIndex + NumberOfBuckets() + (entry * kEntrySize); + } + + Object* KeyAt(int entry) { return get(EntryToIndex(entry)); } + + bool IsObsolete() { + return !get(kNextTableIndex)->IsSmi(); + } + + // The next newer table. This is only valid if the table is obsolete. + Derived* NextTable() { + return Derived::cast(get(kNextTableIndex)); + } + + // When the table is obsolete we store the indexes of the removed holes. + int RemovedIndexAt(int index) { + return Smi::cast(get(kRemovedHolesIndex + index))->value(); + } + + static const int kNotFound = -1; + static const int kMinCapacity = 4; + + private: + static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity); + + void SetNumberOfBuckets(int num) { + set(kNumberOfBucketsIndex, Smi::FromInt(num)); + } + + void SetNumberOfElements(int num) { + set(kNumberOfElementsIndex, Smi::FromInt(num)); + } + + void SetNumberOfDeletedElements(int num) { + set(kNumberOfDeletedElementsIndex, Smi::FromInt(num)); + } + + int Capacity() { + return NumberOfBuckets() * kLoadFactor; + } + + // Returns the next entry for the given entry. + int ChainAt(int entry) { + return Smi::cast(get(EntryToIndex(entry) + kChainOffset))->value(); + } + + int HashToBucket(int hash) { + return hash & (NumberOfBuckets() - 1); + } + + int HashToEntry(int hash) { + int bucket = HashToBucket(hash); + return Smi::cast(get(kHashTableStartIndex + bucket))->value(); + } + + void SetNextTable(Derived* next_table) { + set(kNextTableIndex, next_table); + } + + void SetRemovedIndexAt(int index, int removed_index) { + return set(kRemovedHolesIndex + index, Smi::FromInt(removed_index)); + } + + static const int kNumberOfBucketsIndex = 0; + static const int kNumberOfElementsIndex = kNumberOfBucketsIndex + 1; + static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1; + static const int kHashTableStartIndex = kNumberOfDeletedElementsIndex + 1; + + static const int kNextTableIndex = kNumberOfElementsIndex; + static const int kRemovedHolesIndex = kHashTableStartIndex; + + static const int kEntrySize = entrysize + 1; + static const int kChainOffset = entrysize; + + static const int kLoadFactor = 2; + static const int kMaxCapacity = + (FixedArray::kMaxLength - kHashTableStartIndex) + / (1 + (kEntrySize * kLoadFactor)); +}; + + +class JSSetIterator; + + +class OrderedHashSet: public OrderedHashTable< + OrderedHashSet, JSSetIterator, 1> { + public: + DECLARE_CAST(OrderedHashSet) + + bool Contains(Handle<Object> key); + static Handle<OrderedHashSet> Add( + Handle<OrderedHashSet> table, Handle<Object> key); +}; + + +class JSMapIterator; + + +class OrderedHashMap:public OrderedHashTable< + OrderedHashMap, JSMapIterator, 2> { + public: + DECLARE_CAST(OrderedHashMap) + + Object* Lookup(Handle<Object> key); + static Handle<OrderedHashMap> Put( + Handle<OrderedHashMap> table, + Handle<Object> key, + Handle<Object> value); + + Object* ValueAt(int entry) { + return get(EntryToIndex(entry) + kValueOffset); + } + + private: + static const int kValueOffset = 1; +}; + + template <int entrysize> -class WeakHashTableShape : public BaseShape<Object*> { +class WeakHashTableShape : public BaseShape<Handle<Object> > { public: - static inline bool IsMatch(Object* key, Object* other); - static inline uint32_t Hash(Object* key); - static inline uint32_t HashForObject(Object* key, Object* object); - MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, - Object* key); + static inline bool IsMatch(Handle<Object> key, Object* other); + static inline uint32_t Hash(Handle<Object> key); + static inline uint32_t HashForObject(Handle<Object> key, Object* object); + static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Object> key); static const int kPrefixSize = 0; static const int kEntrySize = entrysize; }; @@ -4277,20 +4479,23 @@ // WeakHashTable maps keys that are arbitrary objects to object values. // It is used for the global weak hash table that maps objects // embedded in optimized code to dependent code lists. -class WeakHashTable: public HashTable<WeakHashTableShape<2>, Object*> { +class WeakHashTable: public HashTable<WeakHashTable, + WeakHashTableShape<2>, + Handle<Object> > { + typedef HashTable< + WeakHashTable, WeakHashTableShape<2>, Handle<Object> > DerivedHashTable; public: - static inline WeakHashTable* cast(Object* obj) { - ASSERT(obj->IsHashTable()); - return reinterpret_cast<WeakHashTable*>(obj); - } + DECLARE_CAST(WeakHashTable) // Looks up the value associated with the given key. The hole value is // returned in case the key is not present. - Object* Lookup(Object* key); + Object* Lookup(Handle<Object> key); // Adds (or overwrites) the value associated with the given key. Mapping a // key to the hole value causes removal of the whole entry. - MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value); + MUST_USE_RESULT static Handle<WeakHashTable> Put(Handle<WeakHashTable> table, + Handle<Object> key, + Handle<Object> value); // This function is called when heap verification is turned on. void Zap(Object* value) { @@ -4304,7 +4509,7 @@ private: friend class MarkCompactCollector; - void AddEntry(int entry, Object* key, Object* value); + void AddEntry(int entry, Handle<Object> key, Handle<Object> value); // Returns the index to the value of an entry. static inline int EntryToValueIndex(int entry) { @@ -4342,8 +4547,7 @@ inline int finger_index(); inline void set_finger_index(int finger_index); - // Casting - static inline JSFunctionResultCache* cast(Object* obj); + DECLARE_CAST(JSFunctionResultCache) DECLARE_VERIFIER(JSFunctionResultCache) }; @@ -4358,7 +4562,7 @@ // routines. class ScopeInfo : public FixedArray { public: - static inline ScopeInfo* cast(Object* object); + DECLARE_CAST(ScopeInfo) // Return the type of this scope. ScopeType scope_type(); @@ -4421,6 +4625,13 @@ // Return the initialization flag of the given context local. InitializationFlag ContextLocalInitFlag(int var); + // Return the initialization flag of the given context local. + MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var); + + // Return true if this local was introduced by the compiler, and should not be + // exposed to the user in a debugger. + bool LocalIsSynthetic(int var); + // Lookup support for serialized scope info. Returns the // the stack slot index for a given slot name if the slot is // present; otherwise returns a value < 0. The name must be an internalized @@ -4432,9 +4643,9 @@ // returns a value < 0. The name must be an internalized string. // If the slot is present and mode != NULL, sets *mode to the corresponding // mode for that variable. - int ContextSlotIndex(String* name, - VariableMode* mode, - InitializationFlag* init_flag); + static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name, + VariableMode* mode, InitializationFlag* init_flag, + MaybeAssignedFlag* maybe_assigned_flag); // Lookup support for serialized scope info. Returns the // parameter index for a given parameter name if the parameter is present; @@ -4552,6 +4763,8 @@ // ContextLocalInfoEntries part. class ContextLocalMode: public BitField<VariableMode, 0, 3> {}; class ContextLocalInitFlag: public BitField<InitializationFlag, 3, 1> {}; + class ContextLocalMaybeAssignedFlag + : public BitField<MaybeAssignedFlag, 4, 1> {}; }; @@ -4560,18 +4773,27 @@ // needs very limited number of distinct normalized maps. class NormalizedMapCache: public FixedArray { public: - static const int kEntries = 64; + static Handle<NormalizedMapCache> New(Isolate* isolate); - static Handle<Map> Get(Handle<NormalizedMapCache> cache, - Handle<JSObject> object, - PropertyNormalizationMode mode); + MUST_USE_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map, + PropertyNormalizationMode mode); + void Set(Handle<Map> fast_map, Handle<Map> normalized_map); void Clear(); - // Casting - static inline NormalizedMapCache* cast(Object* obj); + DECLARE_CAST(NormalizedMapCache) + + static inline bool IsNormalizedMapCache(const Object* obj); + + DECLARE_VERIFIER(NormalizedMapCache) + private: + static const int kEntries = 64; + + static inline int GetIndex(Handle<Map> map); - DECLARE_VERIFIER(NormalizedMapCache) + // The following declarations hide base class methods. + Object* get(int index); + void set(int index, Object* value); }; @@ -4596,8 +4818,8 @@ // array, this function returns the number of elements a byte array should // have. static int LengthFor(int size_in_bytes) { - ASSERT(IsAligned(size_in_bytes, kPointerSize)); - ASSERT(size_in_bytes >= kHeaderSize); + DCHECK(IsAligned(size_in_bytes, kPointerSize)); + DCHECK(size_in_bytes >= kHeaderSize); return size_in_bytes - kHeaderSize; } @@ -4607,8 +4829,7 @@ // Returns a pointer to the ByteArray object for a given data start address. static inline ByteArray* FromDataStartAddress(Address address); - // Casting. - static inline ByteArray* cast(Object* obj); + DECLARE_CAST(ByteArray) // Dispatched behavior. inline int ByteArraySize() { @@ -4635,13 +4856,15 @@ class FreeSpace: public HeapObject { public: // [size]: size of the free space including the header. - inline int size(); + inline int size() const; inline void set_size(int value); + inline int nobarrier_size() const; + inline void nobarrier_set_size(int value); + inline int Size() { return size(); } - // Casting. - static inline FreeSpace* cast(Object* obj); + DECLARE_CAST(FreeSpace) // Dispatched behavior. DECLARE_PRINTER(FreeSpace) @@ -4692,8 +4915,7 @@ // external array. DECL_ACCESSORS(external_pointer, void) // Pointer to the data store. - // Casting. - static inline ExternalArray* cast(Object* obj); + DECLARE_CAST(ExternalArray) // Maximal acceptable length for an external array. static const int kMaxLength = 0x3fffffff; @@ -4723,19 +4945,17 @@ // Setter and getter. inline uint8_t get_scalar(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); + static inline Handle<Object> get(Handle<ExternalUint8ClampedArray> array, + int index); inline void set(int index, uint8_t value); - // This accessor applies the correct conversion from Smi, HeapNumber and - // undefined and clamps the converted value between 0 and 255. - Object* SetValue(uint32_t index, Object* value); - + // This accessor applies the correct conversion from Smi, HeapNumber + // and undefined and clamps the converted value between 0 and 255. static Handle<Object> SetValue(Handle<ExternalUint8ClampedArray> array, uint32_t index, Handle<Object> value); - // Casting. - static inline ExternalUint8ClampedArray* cast(Object* obj); + DECLARE_CAST(ExternalUint8ClampedArray) // Dispatched behavior. DECLARE_PRINTER(ExternalUint8ClampedArray) @@ -4750,19 +4970,16 @@ public: // Setter and getter. inline int8_t get_scalar(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); + static inline Handle<Object> get(Handle<ExternalInt8Array> array, int index); inline void set(int index, int8_t value); + // This accessor applies the correct conversion from Smi, HeapNumber + // and undefined. static Handle<Object> SetValue(Handle<ExternalInt8Array> array, uint32_t index, Handle<Object> value); - // This accessor applies the correct conversion from Smi, HeapNumber - // and undefined. - MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); - - // Casting. - static inline ExternalInt8Array* cast(Object* obj); + DECLARE_CAST(ExternalInt8Array) // Dispatched behavior. DECLARE_PRINTER(ExternalInt8Array) @@ -4777,19 +4994,16 @@ public: // Setter and getter. inline uint8_t get_scalar(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); + static inline Handle<Object> get(Handle<ExternalUint8Array> array, int index); inline void set(int index, uint8_t value); + // This accessor applies the correct conversion from Smi, HeapNumber + // and undefined. static Handle<Object> SetValue(Handle<ExternalUint8Array> array, uint32_t index, Handle<Object> value); - // This accessor applies the correct conversion from Smi, HeapNumber - // and undefined. - MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); - - // Casting. - static inline ExternalUint8Array* cast(Object* obj); + DECLARE_CAST(ExternalUint8Array) // Dispatched behavior. DECLARE_PRINTER(ExternalUint8Array) @@ -4804,19 +5018,16 @@ public: // Setter and getter. inline int16_t get_scalar(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); + static inline Handle<Object> get(Handle<ExternalInt16Array> array, int index); inline void set(int index, int16_t value); + // This accessor applies the correct conversion from Smi, HeapNumber + // and undefined. static Handle<Object> SetValue(Handle<ExternalInt16Array> array, uint32_t index, Handle<Object> value); - // This accessor applies the correct conversion from Smi, HeapNumber - // and undefined. - MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); - - // Casting. - static inline ExternalInt16Array* cast(Object* obj); + DECLARE_CAST(ExternalInt16Array) // Dispatched behavior. DECLARE_PRINTER(ExternalInt16Array) @@ -4831,19 +5042,17 @@ public: // Setter and getter. inline uint16_t get_scalar(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); + static inline Handle<Object> get(Handle<ExternalUint16Array> array, + int index); inline void set(int index, uint16_t value); + // This accessor applies the correct conversion from Smi, HeapNumber + // and undefined. static Handle<Object> SetValue(Handle<ExternalUint16Array> array, uint32_t index, Handle<Object> value); - // This accessor applies the correct conversion from Smi, HeapNumber - // and undefined. - MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); - - // Casting. - static inline ExternalUint16Array* cast(Object* obj); + DECLARE_CAST(ExternalUint16Array) // Dispatched behavior. DECLARE_PRINTER(ExternalUint16Array) @@ -4858,19 +5067,16 @@ public: // Setter and getter. inline int32_t get_scalar(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); + static inline Handle<Object> get(Handle<ExternalInt32Array> array, int index); inline void set(int index, int32_t value); + // This accessor applies the correct conversion from Smi, HeapNumber + // and undefined. static Handle<Object> SetValue(Handle<ExternalInt32Array> array, uint32_t index, Handle<Object> value); - // This accessor applies the correct conversion from Smi, HeapNumber - // and undefined. - MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); - - // Casting. - static inline ExternalInt32Array* cast(Object* obj); + DECLARE_CAST(ExternalInt32Array) // Dispatched behavior. DECLARE_PRINTER(ExternalInt32Array) @@ -4885,19 +5091,17 @@ public: // Setter and getter. inline uint32_t get_scalar(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); + static inline Handle<Object> get(Handle<ExternalUint32Array> array, + int index); inline void set(int index, uint32_t value); + // This accessor applies the correct conversion from Smi, HeapNumber + // and undefined. static Handle<Object> SetValue(Handle<ExternalUint32Array> array, uint32_t index, Handle<Object> value); - // This accessor applies the correct conversion from Smi, HeapNumber - // and undefined. - MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); - - // Casting. - static inline ExternalUint32Array* cast(Object* obj); + DECLARE_CAST(ExternalUint32Array) // Dispatched behavior. DECLARE_PRINTER(ExternalUint32Array) @@ -4912,19 +5116,17 @@ public: // Setter and getter. inline float get_scalar(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); + static inline Handle<Object> get(Handle<ExternalFloat32Array> array, + int index); inline void set(int index, float value); + // This accessor applies the correct conversion from Smi, HeapNumber + // and undefined. static Handle<Object> SetValue(Handle<ExternalFloat32Array> array, uint32_t index, Handle<Object> value); - // This accessor applies the correct conversion from Smi, HeapNumber - // and undefined. - MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); - - // Casting. - static inline ExternalFloat32Array* cast(Object* obj); + DECLARE_CAST(ExternalFloat32Array) // Dispatched behavior. DECLARE_PRINTER(ExternalFloat32Array) @@ -4939,19 +5141,17 @@ public: // Setter and getter. inline double get_scalar(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); + static inline Handle<Object> get(Handle<ExternalFloat64Array> array, + int index); inline void set(int index, double value); + // This accessor applies the correct conversion from Smi, HeapNumber + // and undefined. static Handle<Object> SetValue(Handle<ExternalFloat64Array> array, uint32_t index, Handle<Object> value); - // This accessor applies the correct conversion from Smi, HeapNumber - // and undefined. - MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); - - // Casting. - static inline ExternalFloat64Array* cast(Object* obj); + DECLARE_CAST(ExternalFloat64Array) // Dispatched behavior. DECLARE_PRINTER(ExternalFloat64Array) @@ -4964,19 +5164,22 @@ class FixedTypedArrayBase: public FixedArrayBase { public: - // Casting: - static inline FixedTypedArrayBase* cast(Object* obj); + DECLARE_CAST(FixedTypedArrayBase) static const int kDataOffset = kHeaderSize; inline int size(); + inline int TypedArraySize(InstanceType type); + // Use with care: returns raw pointer into heap. inline void* DataPtr(); inline int DataSize(); private: + inline int DataSize(InstanceType type); + DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase); }; @@ -4987,8 +5190,7 @@ typedef typename Traits::ElementType ElementType; static const InstanceType kInstanceType = Traits::kInstanceType; - // Casting: - static inline FixedTypedArray<Traits>* cast(Object* obj); + DECLARE_CAST(FixedTypedArray<Traits>) static inline int ElementOffset(int index) { return kDataOffset + index * sizeof(ElementType); @@ -4999,7 +5201,7 @@ } inline ElementType get_scalar(int index); - MUST_USE_RESULT inline MaybeObject* get(int index); + static inline Handle<Object> get(Handle<FixedTypedArray> array, int index); inline void set(int index, ElementType value); static inline ElementType from_int(int value); @@ -5007,8 +5209,6 @@ // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. - MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); - static Handle<Object> SetValue(Handle<FixedTypedArray<Traits> > array, uint32_t index, Handle<Object> value); @@ -5022,12 +5222,13 @@ #define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \ class Type##ArrayTraits { \ - public: \ - typedef elementType ElementType; \ - static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \ - static const char* Designator() { return #type " array"; } \ - static inline MaybeObject* ToObject(Heap* heap, elementType scalar); \ - static inline elementType defaultValue(); \ + public: /* NOLINT */ \ + typedef elementType ElementType; \ + static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \ + static const char* Designator() { return #type " array"; } \ + static inline Handle<Object> ToHandle(Isolate* isolate, \ + elementType scalar); \ + static inline elementType defaultValue(); \ }; \ \ typedef FixedTypedArray<Type##ArrayTraits> Fixed##Type##Array; @@ -5046,14 +5247,16 @@ class DeoptimizationInputData: public FixedArray { public: // Layout description. Indices in the array. - static const int kTranslationByteArrayIndex = 0; - static const int kInlinedFunctionCountIndex = 1; - static const int kLiteralArrayIndex = 2; - static const int kOsrAstIdIndex = 3; - static const int kOsrPcOffsetIndex = 4; - static const int kOptimizationIdIndex = 5; - static const int kSharedFunctionInfoIndex = 6; - static const int kFirstDeoptEntryIndex = 7; + static const int kDeoptEntryCountIndex = 0; + static const int kReturnAddressPatchEntryCountIndex = 1; + static const int kTranslationByteArrayIndex = 2; + static const int kInlinedFunctionCountIndex = 3; + static const int kLiteralArrayIndex = 4; + static const int kOsrAstIdIndex = 5; + static const int kOsrPcOffsetIndex = 6; + static const int kOptimizationIdIndex = 7; + static const int kSharedFunctionInfoIndex = 8; + static const int kFirstDeoptEntryIndex = 9; // Offsets of deopt entry elements relative to the start of the entry. static const int kAstIdRawOffset = 0; @@ -5062,6 +5265,12 @@ static const int kPcOffset = 3; static const int kDeoptEntrySize = 4; + // Offsets of return address patch entry elements relative to the start of the + // entry + static const int kReturnAddressPcOffset = 0; + static const int kPatchedAddressPcOffset = 1; + static const int kReturnAddressPatchEntrySize = 2; + // Simple element accessors. #define DEFINE_ELEMENT_ACCESSORS(name, type) \ type* name() { \ @@ -5082,20 +5291,35 @@ #undef DEFINE_ELEMENT_ACCESSORS // Accessors for elements of the ith deoptimization entry. -#define DEFINE_ENTRY_ACCESSORS(name, type) \ - type* name(int i) { \ - return type::cast(get(IndexForEntry(i) + k##name##Offset)); \ - } \ - void Set##name(int i, type* value) { \ - set(IndexForEntry(i) + k##name##Offset, value); \ +#define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \ + type* name(int i) { \ + return type::cast(get(IndexForEntry(i) + k##name##Offset)); \ + } \ + void Set##name(int i, type* value) { \ + set(IndexForEntry(i) + k##name##Offset, value); \ + } + + DEFINE_DEOPT_ENTRY_ACCESSORS(AstIdRaw, Smi) + DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi) + DEFINE_DEOPT_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi) + DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi) + +#undef DEFINE_DEOPT_ENTRY_ACCESSORS + +// Accessors for elements of the ith deoptimization entry. +#define DEFINE_PATCH_ENTRY_ACCESSORS(name, type) \ + type* name(int i) { \ + return type::cast( \ + get(IndexForReturnAddressPatchEntry(i) + k##name##Offset)); \ + } \ + void Set##name(int i, type* value) { \ + set(IndexForReturnAddressPatchEntry(i) + k##name##Offset, value); \ } - DEFINE_ENTRY_ACCESSORS(AstIdRaw, Smi) - DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi) - DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi) - DEFINE_ENTRY_ACCESSORS(Pc, Smi) + DEFINE_PATCH_ENTRY_ACCESSORS(ReturnAddressPc, Smi) + DEFINE_PATCH_ENTRY_ACCESSORS(PatchedAddressPc, Smi) -#undef DEFINE_ENTRY_ACCESSORS +#undef DEFINE_PATCH_ENTRY_ACCESSORS BailoutId AstId(int i) { return BailoutId(AstIdRaw(i)->value()); @@ -5106,28 +5330,42 @@ } int DeoptCount() { - return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize; + return length() == 0 ? 0 : Smi::cast(get(kDeoptEntryCountIndex))->value(); + } + + int ReturnAddressPatchCount() { + return length() == 0 + ? 0 + : Smi::cast(get(kReturnAddressPatchEntryCountIndex))->value(); } // Allocates a DeoptimizationInputData. - MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate, - int deopt_entry_count, - PretenureFlag pretenure); + static Handle<DeoptimizationInputData> New(Isolate* isolate, + int deopt_entry_count, + int return_address_patch_count, + PretenureFlag pretenure); - // Casting. - static inline DeoptimizationInputData* cast(Object* obj); + DECLARE_CAST(DeoptimizationInputData) #ifdef ENABLE_DISASSEMBLER - void DeoptimizationInputDataPrint(FILE* out); + void DeoptimizationInputDataPrint(OStream& os); // NOLINT #endif private: + friend class Object; // For accessing LengthFor. + static int IndexForEntry(int i) { return kFirstDeoptEntryIndex + (i * kDeoptEntrySize); } - static int LengthFor(int entry_count) { - return IndexForEntry(entry_count); + int IndexForReturnAddressPatchEntry(int i) { + return kFirstDeoptEntryIndex + (DeoptCount() * kDeoptEntrySize) + + (i * kReturnAddressPatchEntrySize); + } + + static int LengthFor(int deopt_count, int return_address_patch_count) { + return kFirstDeoptEntryIndex + (deopt_count * kDeoptEntrySize) + + (return_address_patch_count * kReturnAddressPatchEntrySize); } }; @@ -5157,15 +5395,14 @@ } // Allocates a DeoptimizationOutputData. - MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate, - int number_of_deopt_points, - PretenureFlag pretenure); + static Handle<DeoptimizationOutputData> New(Isolate* isolate, + int number_of_deopt_points, + PretenureFlag pretenure); - // Casting. - static inline DeoptimizationOutputData* cast(Object* obj); + DECLARE_CAST(DeoptimizationOutputData) #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER) - void DeoptimizationOutputDataPrint(FILE* out); + void DeoptimizationOutputDataPrint(OStream& os); // NOLINT #endif }; @@ -5194,6 +5431,7 @@ #define IC_KIND_LIST(V) \ V(LOAD_IC) \ V(KEYED_LOAD_IC) \ + V(CALL_IC) \ V(STORE_IC) \ V(KEYED_STORE_IC) \ V(BINARY_OP_IC) \ @@ -5230,12 +5468,13 @@ // Printing static const char* ICState2String(InlineCacheState state); static const char* StubType2String(StubType type); - static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra); - void Disassemble(const char* name, FILE* out = stdout); + static void PrintExtraICState(OStream& os, // NOLINT + Kind kind, ExtraICState extra); + void Disassemble(const char* name, OStream& os); // NOLINT #endif // ENABLE_DISASSEMBLER // [instruction_size]: Size of the native instructions - inline int instruction_size(); + inline int instruction_size() const; inline void set_instruction_size(int value); // [relocation_info]: Code relocation information @@ -5252,13 +5491,13 @@ // [raw_type_feedback_info]: This field stores various things, depending on // the kind of the code object. // FUNCTION => type feedback information. - // STUB => various things, e.g. a SMI + // STUB and ICs => major/minor key as Smi. DECL_ACCESSORS(raw_type_feedback_info, Object) inline Object* type_feedback_info(); inline void set_type_feedback_info( Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); - inline int stub_info(); - inline void set_stub_info(int info); + inline uint32_t stub_key(); + inline void set_stub_key(uint32_t key); // [next_code_link]: Link for lists of optimized or deoptimized code. // Note that storage for this field is overlapped with typefeedback_info. @@ -5272,11 +5511,11 @@ // [ic_age]: Inline caching age: the value of the Heap::global_ic_age // at the moment when this object was created. inline void set_ic_age(int count); - inline int ic_age(); + inline int ic_age() const; // [prologue_offset]: Offset of the function prologue, used for aging // FUNCTIONs and OPTIMIZED_FUNCTIONs. - inline int prologue_offset(); + inline int prologue_offset() const; inline void set_prologue_offset(int offset); // Unchecked accessors to be used during GC. @@ -5303,26 +5542,42 @@ inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; } inline bool is_store_stub() { return kind() == STORE_IC; } inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; } + inline bool is_call_stub() { return kind() == CALL_IC; } inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; } inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; } inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; } inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; } inline bool is_keyed_stub(); inline bool is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; } + inline bool is_weak_stub(); + inline void mark_as_weak_stub(); + inline bool is_invalidated_weak_stub(); + inline void mark_as_invalidated_weak_stub(); + + inline bool CanBeWeakStub() { + Kind k = kind(); + return (k == LOAD_IC || k == STORE_IC || k == KEYED_LOAD_IC || + k == KEYED_STORE_IC || k == COMPARE_NIL_IC) && + ic_state() == MONOMORPHIC; + } + + inline bool IsCodeStubOrIC(); inline void set_raw_kind_specific_flags1(int value); inline void set_raw_kind_specific_flags2(int value); - // [major_key]: For kind STUB or BINARY_OP_IC, the major key. - inline int major_key(); - inline void set_major_key(int value); - inline bool has_major_key(); - - // For kind STUB or ICs, tells whether or not a code object was generated by - // the optimizing compiler (but it may not be an optimized function). - bool is_crankshafted(); + // [is_crankshafted]: For kind STUB or ICs, tells whether or not a code + // object was generated by either the hydrogen or the TurboFan optimizing + // compiler (but it may not be an optimized function). + inline bool is_crankshafted(); + inline bool is_hydrogen_stub(); // Crankshafted, but not a function. inline void set_is_crankshafted(bool value); + // [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the + // code object was generated by the TurboFan optimizing compiler. + inline bool is_turbofanned(); + inline void set_is_turbofanned(bool value); + // [optimizable]: For FUNCTION kind, tells if it is optimizable. inline bool optimizable(); inline void set_optimizable(bool value); @@ -5354,12 +5609,16 @@ inline int profiler_ticks(); inline void set_profiler_ticks(int ticks); + // [builtin_index]: For BUILTIN kind, tells which builtin index it has. + inline int builtin_index(); + inline void set_builtin_index(int id); + // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots // reserved in the code prologue. inline unsigned stack_slots(); inline void set_stack_slots(unsigned slots); - // [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in + // [safepoint_table_start]: For kind OPTIMIZED_FUNCTION, the offset in // the instruction stream where the safepoint table starts. inline unsigned safepoint_table_offset(); inline void set_safepoint_table_offset(unsigned offset); @@ -5370,7 +5629,6 @@ inline void set_back_edge_table_offset(unsigned offset); inline bool back_edges_patched_for_osr(); - inline void set_back_edges_patched_for_osr(bool value); // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in. inline byte to_boolean_state(); @@ -5402,7 +5660,6 @@ // Find the first map in an IC stub. Map* FindFirstMap(); void FindAllMaps(MapHandleList* maps); - void FindAllTypes(TypeHandleList* types); // Find the first handler in an IC stub. Code* FindFirstHandler(); @@ -5411,6 +5668,9 @@ // enough handlers can be found. bool FindHandlers(CodeHandleList* code_list, int length = -1); + // Find the handler for |map|. + MaybeHandle<Code> FindHandlerForMap(Map* map); + // Find the first name in an IC stub. Name* FindFirstName(); @@ -5432,30 +5692,26 @@ // Flags operations. static inline Flags ComputeFlags( - Kind kind, - InlineCacheState ic_state = UNINITIALIZED, - ExtraICState extra_ic_state = kNoExtraICState, - StubType type = NORMAL, - InlineCacheHolderFlag holder = OWN_MAP); + Kind kind, InlineCacheState ic_state = UNINITIALIZED, + ExtraICState extra_ic_state = kNoExtraICState, StubType type = NORMAL, + CacheHolderFlag holder = kCacheOnReceiver); static inline Flags ComputeMonomorphicFlags( - Kind kind, - ExtraICState extra_ic_state = kNoExtraICState, - InlineCacheHolderFlag holder = OWN_MAP, - StubType type = NORMAL); + Kind kind, ExtraICState extra_ic_state = kNoExtraICState, + CacheHolderFlag holder = kCacheOnReceiver, StubType type = NORMAL); static inline Flags ComputeHandlerFlags( - Kind handler_kind, - StubType type = NORMAL, - InlineCacheHolderFlag holder = OWN_MAP); + Kind handler_kind, StubType type = NORMAL, + CacheHolderFlag holder = kCacheOnReceiver); static inline InlineCacheState ExtractICStateFromFlags(Flags flags); static inline StubType ExtractTypeFromFlags(Flags flags); + static inline CacheHolderFlag ExtractCacheHolderFromFlags(Flags flags); static inline Kind ExtractKindFromFlags(Flags flags); - static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags); static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags); static inline Flags RemoveTypeFromFlags(Flags flags); + static inline Flags RemoveTypeAndHolderFromFlags(Flags flags); // Convert a target address into a code object. static inline Code* GetCodeFromTargetAddress(Address address); @@ -5490,7 +5746,7 @@ // Returns the object size for a given body (used for allocation). static int SizeFor(int body_size) { - ASSERT_SIZE_TAG_ALIGNED(body_size); + DCHECK_SIZE_TAG_ALIGNED(body_size); return RoundUp(kHeaderSize + body_size, kCodeAlignment); } @@ -5498,7 +5754,7 @@ // the layout of the code object into account. int ExecutableSize() { // Check that the assumptions about the layout of the code object holds. - ASSERT_EQ(static_cast<int>(instruction_start() - address()), + DCHECK_EQ(static_cast<int>(instruction_start() - address()), Code::kHeaderSize); return instruction_size() + Code::kHeaderSize; } @@ -5507,8 +5763,7 @@ int SourcePosition(Address pc); int SourceStatementPosition(Address pc); - // Casting. - static inline Code* cast(Object* obj); + DECLARE_CAST(Code) // Dispatched behavior. int CodeSize() { return SizeFor(body_size()); } @@ -5523,8 +5778,6 @@ void ClearInlineCaches(); void ClearInlineCaches(Kind kind); - void ClearTypeFeedbackInfo(Heap* heap); - BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset); uint32_t TranslateAstIdToPcOffset(BailoutId ast_id); @@ -5550,7 +5803,7 @@ static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate); static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate); void MakeOlder(MarkingParity); - static bool IsYoungSequence(byte* sequence); + static bool IsYoungSequence(Isolate* isolate, byte* sequence); bool IsOld(); Age GetAge(); // Gets the raw code age, including psuedo code-age values such as @@ -5567,11 +5820,18 @@ void VerifyEmbeddedObjectsDependency(); #endif + inline bool CanContainWeakObjects() { + return is_optimized_code() || is_weak_stub(); + } + inline bool IsWeakObject(Object* object) { - return is_optimized_code() && IsWeakObjectInOptimizedCode(object); + return (is_optimized_code() && !is_turbofanned() && + IsWeakObjectInOptimizedCode(object)) || + (is_weak_stub() && IsWeakObjectInIC(object)); } - inline bool IsWeakObjectInOptimizedCode(Object* object); + static inline bool IsWeakObjectInOptimizedCode(Object* object); + static inline bool IsWeakObjectInIC(Object* object); // Max loop nesting marker used to postpose OSR. We don't take loop // nesting that is deeper than 5 levels into account. @@ -5583,6 +5843,7 @@ static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize; static const int kDeoptimizationDataOffset = kHandlerTableOffset + kPointerSize; + // For FUNCTION kind, we store the type feedback info here. static const int kTypeFeedbackInfoOffset = kDeoptimizationDataOffset + kPointerSize; static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize; @@ -5613,40 +5874,40 @@ class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {}; class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {}; - static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1; - static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1; + static const int kProfilerTicksOffset = kFullCodeFlags + 1; // Flags layout. BitField<type, shift, size>. - class ICStateField: public BitField<InlineCacheState, 0, 3> {}; - class TypeField: public BitField<StubType, 3, 1> {}; - class CacheHolderField: public BitField<InlineCacheHolderFlag, 5, 1> {}; - class KindField: public BitField<Kind, 6, 4> {}; - // TODO(bmeurer): Bit 10 is available for free use. :-) + class ICStateField : public BitField<InlineCacheState, 0, 4> {}; + class TypeField : public BitField<StubType, 4, 1> {}; + class CacheHolderField : public BitField<CacheHolderFlag, 5, 2> {}; + class KindField : public BitField<Kind, 7, 4> {}; class ExtraICStateField: public BitField<ExtraICState, 11, PlatformSmiTagging::kSmiValueSize - 11 + 1> {}; // NOLINT // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION) static const int kStackSlotsFirstBit = 0; static const int kStackSlotsBitCount = 24; - static const int kHasFunctionCacheFirstBit = + static const int kHasFunctionCacheBit = kStackSlotsFirstBit + kStackSlotsBitCount; - static const int kHasFunctionCacheBitCount = 1; - static const int kMarkedForDeoptimizationFirstBit = - kStackSlotsFirstBit + kStackSlotsBitCount + 1; - static const int kMarkedForDeoptimizationBitCount = 1; + static const int kMarkedForDeoptimizationBit = kHasFunctionCacheBit + 1; + static const int kWeakStubBit = kMarkedForDeoptimizationBit + 1; + static const int kInvalidatedWeakStubBit = kWeakStubBit + 1; + static const int kIsTurbofannedBit = kInvalidatedWeakStubBit + 1; STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32); - STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32); - STATIC_ASSERT(kMarkedForDeoptimizationFirstBit + - kMarkedForDeoptimizationBitCount <= 32); + STATIC_ASSERT(kIsTurbofannedBit + 1 <= 32); class StackSlotsField: public BitField<int, kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT - class HasFunctionCacheField: public BitField<bool, - kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT - class MarkedForDeoptimizationField: public BitField<bool, - kMarkedForDeoptimizationFirstBit, - kMarkedForDeoptimizationBitCount> {}; // NOLINT + class HasFunctionCacheField : public BitField<bool, kHasFunctionCacheBit, 1> { + }; // NOLINT + class MarkedForDeoptimizationField + : public BitField<bool, kMarkedForDeoptimizationBit, 1> {}; // NOLINT + class WeakStubField : public BitField<bool, kWeakStubBit, 1> {}; // NOLINT + class InvalidatedWeakStubField + : public BitField<bool, kInvalidatedWeakStubBit, 1> {}; // NOLINT + class IsTurbofannedField : public BitField<bool, kIsTurbofannedBit, 1> { + }; // NOLINT // KindSpecificFlags2 layout (ALL) static const int kIsCrankshaftedBit = 0; @@ -5654,28 +5915,23 @@ kIsCrankshaftedBit, 1> {}; // NOLINT // KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION) - static const int kStubMajorKeyFirstBit = kIsCrankshaftedBit + 1; - static const int kSafepointTableOffsetFirstBit = - kStubMajorKeyFirstBit + kStubMajorKeyBits; + static const int kSafepointTableOffsetFirstBit = kIsCrankshaftedBit + 1; static const int kSafepointTableOffsetBitCount = 24; - STATIC_ASSERT(kStubMajorKeyFirstBit + kStubMajorKeyBits <= 32); STATIC_ASSERT(kSafepointTableOffsetFirstBit + kSafepointTableOffsetBitCount <= 32); - STATIC_ASSERT(1 + kStubMajorKeyBits + - kSafepointTableOffsetBitCount <= 32); + STATIC_ASSERT(1 + kSafepointTableOffsetBitCount <= 32); class SafepointTableOffsetField: public BitField<int, kSafepointTableOffsetFirstBit, kSafepointTableOffsetBitCount> {}; // NOLINT - class StubMajorKeyField: public BitField<int, - kStubMajorKeyFirstBit, kStubMajorKeyBits> {}; // NOLINT // KindSpecificFlags2 layout (FUNCTION) class BackEdgeTableOffsetField: public BitField<int, - kIsCrankshaftedBit + 1, 29> {}; // NOLINT - class BackEdgesPatchedForOSRField: public BitField<bool, - kIsCrankshaftedBit + 1 + 29, 1> {}; // NOLINT + kIsCrankshaftedBit + 1, 27> {}; // NOLINT + class AllowOSRAtLoopNestingLevelField: public BitField<int, + kIsCrankshaftedBit + 1 + 27, 4> {}; // NOLINT + STATIC_ASSERT(AllowOSRAtLoopNestingLevelField::kMax >= kMaxLoopNestingMarker); static const int kArgumentsBits = 16; static const int kMaxArguments = (1 << kArgumentsBits) - 1; @@ -5694,7 +5950,7 @@ byte* FindCodeAgeSequence(); static void GetCodeAgeAndParity(Code* code, Age* age, MarkingParity* parity); - static void GetCodeAgeAndParity(byte* sequence, Age* age, + static void GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, MarkingParity* parity); static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity); @@ -5731,9 +5987,14 @@ class DependentCode: public FixedArray { public: enum DependencyGroup { + // Group of IC stubs that weakly embed this map and depend on being + // invalidated when the map is garbage collected. Dependent IC stubs form + // a linked list. This group stores only the head of the list. This means + // that the number_of_entries(kWeakICGroup) is 0 or 1. + kWeakICGroup, // Group of code that weakly embed this map and depend on being // deoptimized when the map is garbage collected. - kWeaklyEmbeddedGroup, + kWeakCodeGroup, // Group of code that embed a transition to this map, and depend on being // deoptimized when the transition is replaced by a new version. kTransitionGroup, @@ -5748,6 +6009,12 @@ // Group of code that depends on global property values in property cells // not being changed. kPropertyCellChangedGroup, + // Group of code that omit run-time type checks for the field(s) introduced + // by this map. + kFieldTypeGroup, + // Group of code that omit run-time type checks for initial maps of + // constructors. + kInitialMapChangedGroup, // Group of code that depends on tenuring information in AllocationSites // not being changed. kAllocationSiteTenuringChangedGroup, @@ -5784,6 +6051,7 @@ bool MarkCodeForDeoptimization(Isolate* isolate, DependentCode::DependencyGroup group); + void AddToDependentICList(Handle<Code> stub); // The following low-level accessors should only be used by this class // and the mark compact collector. @@ -5797,7 +6065,7 @@ inline Object* object_at(int i); inline void clear_at(int i); inline void copy(int from, int to); - static inline DependentCode* cast(Object* object); + DECLARE_CAST(DependentCode) static DependentCode* ForObject(Handle<HeapObject> object, DependencyGroup group); @@ -5856,15 +6124,19 @@ class NumberOfOwnDescriptorsBits: public BitField<int, kDescriptorIndexBitCount, kDescriptorIndexBitCount> {}; // NOLINT STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20); - class IsShared: public BitField<bool, 20, 1> {}; - class FunctionWithPrototype: public BitField<bool, 21, 1> {}; - class DictionaryMap: public BitField<bool, 22, 1> {}; - class OwnsDescriptors: public BitField<bool, 23, 1> {}; - class HasInstanceCallHandler: public BitField<bool, 24, 1> {}; - class Deprecated: public BitField<bool, 25, 1> {}; - class IsFrozen: public BitField<bool, 26, 1> {}; - class IsUnstable: public BitField<bool, 27, 1> {}; - class IsMigrationTarget: public BitField<bool, 28, 1> {}; + class DictionaryMap : public BitField<bool, 20, 1> {}; + class OwnsDescriptors : public BitField<bool, 21, 1> {}; + class HasInstanceCallHandler : public BitField<bool, 22, 1> {}; + class Deprecated : public BitField<bool, 23, 1> {}; + class IsFrozen : public BitField<bool, 24, 1> {}; + class IsUnstable : public BitField<bool, 25, 1> {}; + class IsMigrationTarget : public BitField<bool, 26, 1> {}; + class DoneInobjectSlackTracking : public BitField<bool, 27, 1> {}; + // Bit 28 is free. + + // Keep this bit field at the very end for better code in + // Builtins::kJSConstructStubGeneric stub. + class ConstructionCount: public BitField<int, 29, 3> {}; // Tells whether the object in the prototype property will be used // for instances created from this function. If the prototype @@ -5933,18 +6205,18 @@ inline void set_is_extensible(bool value); inline bool is_extensible(); + inline void set_is_prototype_map(bool value); + inline bool is_prototype_map(); inline void set_elements_kind(ElementsKind elements_kind) { - ASSERT(elements_kind < kElementsKindCount); - ASSERT(kElementsKindCount <= (1 << kElementsKindBitCount)); - set_bit_field2((bit_field2() & ~kElementsKindMask) | - (elements_kind << kElementsKindShift)); - ASSERT(this->elements_kind() == elements_kind); + DCHECK(elements_kind < kElementsKindCount); + DCHECK(kElementsKindCount <= (1 << Map::ElementsKindBits::kSize)); + set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind)); + DCHECK(this->elements_kind() == elements_kind); } inline ElementsKind elements_kind() { - return static_cast<ElementsKind>( - (bit_field2() & kElementsKindMask) >> kElementsKindShift); + return Map::ElementsKindBits::decode(bit_field2()); } // Tells whether the instance has fast elements that are only Smis. @@ -5997,48 +6269,49 @@ // map with DICTIONARY_ELEMENTS was found in the prototype chain. bool DictionaryElementsInPrototypeChainOnly(); - inline bool HasTransitionArray(); + inline bool HasTransitionArray() const; inline bool HasElementsTransition(); inline Map* elements_transition_map(); - MUST_USE_RESULT inline MaybeObject* set_elements_transition_map( - Map* transitioned_map); - inline void SetTransition(int transition_index, Map* target); + inline Map* GetTransition(int transition_index); + inline int SearchTransition(Name* name); + inline FixedArrayBase* GetInitialElements(); - static Handle<TransitionArray> AddTransition(Handle<Map> map, - Handle<Name> key, - Handle<Map> target, - SimpleTransitionFlag flag); - - MUST_USE_RESULT inline MaybeObject* AddTransition(Name* key, - Map* target, - SimpleTransitionFlag flag); DECL_ACCESSORS(transitions, TransitionArray) - inline void ClearTransitions(Heap* heap, - WriteBarrierMode mode = UPDATE_WRITE_BARRIER); - void DeprecateTransitionTree(); - void DeprecateTarget(Name* key, DescriptorArray* new_descriptors); + static inline Handle<String> ExpectedTransitionKey(Handle<Map> map); + static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map); + + // Try to follow an existing transition to a field with attributes NONE. The + // return value indicates whether the transition was successful. + static inline Handle<Map> FindTransitionToField(Handle<Map> map, + Handle<Name> key); Map* FindRootMap(); - Map* FindUpdatedMap(int verbatim, int length, DescriptorArray* descriptors); - Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors); + Map* FindFieldOwner(int descriptor); inline int GetInObjectPropertyOffset(int index); int NumberOfFields(); - bool InstancesNeedRewriting(Map* target, - int target_number_of_fields, - int target_inobject, - int target_unused); - static Handle<Map> GeneralizeAllFieldRepresentations( - Handle<Map> map, - Representation new_representation); + // TODO(ishell): candidate with JSObject::MigrateToMap(). + bool InstancesNeedRewriting(Map* target, int target_number_of_fields, + int target_inobject, int target_unused, + int* old_number_of_fields); + // TODO(ishell): moveit! + static Handle<Map> GeneralizeAllFieldRepresentations(Handle<Map> map); + MUST_USE_RESULT static Handle<HeapType> GeneralizeFieldType( + Handle<HeapType> type1, + Handle<HeapType> type2, + Isolate* isolate); + static void GeneralizeFieldType(Handle<Map> map, + int modify_index, + Handle<HeapType> new_field_type); static Handle<Map> GeneralizeRepresentation( Handle<Map> map, int modify_index, Representation new_representation, + Handle<HeapType> new_field_type, StoreMode store_mode); static Handle<Map> CopyGeneralizeAllRepresentations( Handle<Map> map, @@ -6046,32 +6319,22 @@ StoreMode store_mode, PropertyAttributes attributes, const char* reason); + static Handle<Map> CopyGeneralizeAllRepresentations( + Handle<Map> map, + int modify_index, + StoreMode store_mode, + const char* reason); - void PrintGeneralization(FILE* file, - const char* reason, - int modify_index, - int split, - int descriptors, - bool constant_to_field, - Representation old_representation, - Representation new_representation); + static Handle<Map> PrepareForDataProperty(Handle<Map> old_map, + int descriptor_number, + Handle<Object> value); + + static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode); // Returns the constructor name (the name (possibly, inferred name) of the // function that was used to instantiate the object). String* constructor_name(); - // Tells whether the map is attached to SharedFunctionInfo - // (for inobject slack tracking). - inline void set_attached_to_shared_function_info(bool value); - - inline bool attached_to_shared_function_info(); - - // Tells whether the map is shared between objects that may have different - // behavior. If true, the map should never be modified, instead a clone - // should be created and modified. - inline void set_is_shared(bool value); - inline bool is_shared(); - // Tells whether the map is used for JSObjects in dictionary mode (ie // normalized objects, ie objects for which HasFastProperties returns false). // A map can never be used for both dictionary mode and fast mode JSObjects. @@ -6100,7 +6363,7 @@ // [stub cache]: contains stubs compiled for this map. DECL_ACCESSORS(code_cache, Object) - // [dependent code]: list of optimized codes that have this map embedded. + // [dependent code]: list of optimized codes that weakly embed this map. DECL_ACCESSORS(dependent_code, DependentCode) // [back pointer]: points back to the parent map from which a transition @@ -6121,13 +6384,8 @@ // 2 + 2 * i: prototype // 3 + 2 * i: target map inline FixedArray* GetPrototypeTransitions(); - MUST_USE_RESULT inline MaybeObject* SetPrototypeTransitions( - FixedArray* prototype_transitions); inline bool HasPrototypeTransitions(); - inline HeapObject* UncheckedPrototypeTransitions(); - inline TransitionArray* unchecked_transition_array(); - static const int kProtoTransitionHeaderSize = 1; static const int kProtoTransitionNumberOfEntriesOffset = 0; static const int kProtoTransitionElementsPerEntry = 2; @@ -6143,7 +6401,7 @@ inline void SetNumberOfProtoTransitions(int value) { FixedArray* cache = GetPrototypeTransitions(); - ASSERT(cache->length() != 0); + DCHECK(cache->length() != 0); cache->set(kProtoTransitionNumberOfEntriesOffset, Smi::FromInt(value)); } @@ -6167,7 +6425,7 @@ int LastAdded() { int number_of_own_descriptors = NumberOfOwnDescriptors(); - ASSERT(number_of_own_descriptors > 0); + DCHECK(number_of_own_descriptors > 0); return number_of_own_descriptors - 1; } @@ -6176,7 +6434,7 @@ } void SetNumberOfOwnDescriptors(int number) { - ASSERT(number <= instance_descriptors()->number_of_descriptors()); + DCHECK(number <= instance_descriptors()->number_of_descriptors()); set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number)); } @@ -6188,15 +6446,15 @@ void SetEnumLength(int length) { if (length != kInvalidEnumCacheSentinel) { - ASSERT(length >= 0); - ASSERT(length == 0 || instance_descriptors()->HasEnumCache()); - ASSERT(length <= NumberOfOwnDescriptors()); + DCHECK(length >= 0); + DCHECK(length == 0 || instance_descriptors()->HasEnumCache()); + DCHECK(length <= NumberOfOwnDescriptors()); } set_bit_field3(EnumLengthBits::update(bit_field3(), length)); } inline bool owns_descriptors(); - inline void set_owns_descriptors(bool is_shared); + inline void set_owns_descriptors(bool owns_descriptors); inline bool has_instance_call_handler(); inline void set_has_instance_call_handler(); inline void freeze(); @@ -6205,6 +6463,10 @@ inline bool is_stable(); inline void set_migration_target(bool value); inline bool is_migration_target(); + inline void set_done_inobject_slack_tracking(bool value); + inline bool done_inobject_slack_tracking(); + inline void set_construction_count(int value); + inline int construction_count(); inline void deprecate(); inline bool is_deprecated(); inline bool CanBeDeprecated(); @@ -6213,60 +6475,67 @@ // is found by re-transitioning from the root of the transition tree using the // descriptor array of the map. Returns NULL if no updated map is found. // This method also applies any pending migrations along the prototype chain. - static Handle<Map> CurrentMapForDeprecated(Handle<Map> map); + static MaybeHandle<Map> TryUpdate(Handle<Map> map) V8_WARN_UNUSED_RESULT; // Same as above, but does not touch the prototype chain. - static Handle<Map> CurrentMapForDeprecatedInternal(Handle<Map> map); + static MaybeHandle<Map> TryUpdateInternal(Handle<Map> map) + V8_WARN_UNUSED_RESULT; + + // Returns a non-deprecated version of the input. This method may deprecate + // existing maps along the way if encodings conflict. Not for use while + // gathering type feedback. Use TryUpdate in those cases instead. + static Handle<Map> Update(Handle<Map> map); - static Handle<Map> RawCopy(Handle<Map> map, int instance_size); - MUST_USE_RESULT MaybeObject* RawCopy(int instance_size); - MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors(); static Handle<Map> CopyDropDescriptors(Handle<Map> map); - MUST_USE_RESULT MaybeObject* CopyDropDescriptors(); - static Handle<Map> CopyReplaceDescriptors(Handle<Map> map, - Handle<DescriptorArray> descriptors, - TransitionFlag flag, - Handle<Name> name); - MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors( - DescriptorArray* descriptors, - TransitionFlag flag, - Name* name = NULL, - SimpleTransitionFlag simple_flag = FULL_TRANSITION); - static Handle<Map> CopyInstallDescriptors( + static Handle<Map> CopyInsertDescriptor(Handle<Map> map, + Descriptor* descriptor, + TransitionFlag flag); + + MUST_USE_RESULT static MaybeHandle<Map> CopyWithField( Handle<Map> map, - int new_descriptor, - Handle<DescriptorArray> descriptors); - MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors, - Descriptor* descriptor); - MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor, - TransitionFlag flag); - MUST_USE_RESULT MaybeObject* CopyInsertDescriptor(Descriptor* descriptor, - TransitionFlag flag); - MUST_USE_RESULT MaybeObject* CopyReplaceDescriptor( - DescriptorArray* descriptors, - Descriptor* descriptor, - int index, + Handle<Name> name, + Handle<HeapType> type, + PropertyAttributes attributes, + Representation representation, + TransitionFlag flag); + + MUST_USE_RESULT static MaybeHandle<Map> CopyWithConstant( + Handle<Map> map, + Handle<Name> name, + Handle<Object> constant, + PropertyAttributes attributes, TransitionFlag flag); - MUST_USE_RESULT MaybeObject* AsElementsKind(ElementsKind kind); + // Returns a new map with all transitions dropped from the given map and + // the ElementsKind set. + static Handle<Map> TransitionElementsTo(Handle<Map> map, + ElementsKind to_kind); static Handle<Map> AsElementsKind(Handle<Map> map, ElementsKind kind); - MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind, - TransitionFlag flag); + static Handle<Map> CopyAsElementsKind(Handle<Map> map, + ElementsKind kind, + TransitionFlag flag); static Handle<Map> CopyForObserved(Handle<Map> map); - static Handle<Map> CopyNormalized(Handle<Map> map, - PropertyNormalizationMode mode, - NormalizedMapSharingMode sharing); + static Handle<Map> CopyForFreeze(Handle<Map> map); + // Maximal number of fast properties. Used to restrict the number of map + // transitions to avoid an explosion in the number of maps for objects used as + // dictionaries. + inline bool TooManyFastProperties(StoreFromKeyed store_mode); + static Handle<Map> TransitionToDataProperty(Handle<Map> map, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StoreFromKeyed store_mode); - inline void AppendDescriptor(Descriptor* desc, - const DescriptorArray::WhitenessWitness&); + inline void AppendDescriptor(Descriptor* desc); // Returns a copy of the map, with all transitions dropped from the // instance descriptors. static Handle<Map> Copy(Handle<Map> map); - MUST_USE_RESULT MaybeObject* Copy(); + static Handle<Map> Create(Handle<JSFunction> constructor, + int extra_inobject_properties); // Returns the next free property index (only valid for FAST MODE). int NextFreePropertyIndex(); @@ -6283,11 +6552,7 @@ inobject_properties(); } - // Casting. - static inline Map* cast(Object* obj); - - // Locate an accessor in the instance descriptor. - AccessorDescriptor* FindAccessor(Name* name); + DECLARE_CAST(Map) // Code cache operations. @@ -6298,7 +6563,6 @@ static void UpdateCodeCache(Handle<Map> map, Handle<Name> name, Handle<Code> code); - MUST_USE_RESULT MaybeObject* UpdateCodeCache(Name* name, Code* code); // Extend the descriptor array of the map with the list of descriptors. // In case of duplicates, the latest descriptor is used. @@ -6325,14 +6589,6 @@ // Computes a hash value for this map, to be used in HashTables and such. int Hash(); - bool EquivalentToForTransition(Map* other); - - // Compares this map to another to see if they describe equivalent objects. - // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if - // it had exactly zero inobject properties. - // The "shared" flags of both this map and |other| are ignored. - bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode); - // Returns the map that this map transitions to if its elements_kind // is changed to |elements_kind|, or NULL if no such map is cached yet. // |safe_to_add_transitions| is set to false if adding transitions is not @@ -6343,16 +6599,6 @@ // elements_kind that's found in |candidates|, or null handle if no match is // found at all. Handle<Map> FindTransitionedMap(MapHandleList* candidates); - Map* FindTransitionedMap(MapList* candidates); - - // Zaps the contents of backing data structures. Note that the - // heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects - // holding weak references when incremental marking is used, because it also - // iterates over objects that are otherwise unreachable. - // In general we only want to call these functions in release mode when - // heap verification is turned on. - void ZapPrototypeTransitions(); - void ZapTransitions(); bool CanTransition() { // Only JSObject and subtypes have map transitions and back pointers. @@ -6363,6 +6609,10 @@ bool IsJSObjectMap() { return instance_type() >= FIRST_JS_OBJECT_TYPE; } + bool IsJSProxyMap() { + InstanceType type = instance_type(); + return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE; + } bool IsJSGlobalProxyMap() { return instance_type() == JS_GLOBAL_PROXY_TYPE; } @@ -6374,18 +6624,17 @@ return type == JS_GLOBAL_OBJECT_TYPE || type == JS_BUILTINS_OBJECT_TYPE; } - // Fires when the layout of an object with a leaf map changes. - // This includes adding transitions to the leaf map or changing - // the descriptor array. - inline void NotifyLeafMapLayoutChange(); - inline bool CanOmitMapChecks(); - void AddDependentCompilationInfo(DependentCode::DependencyGroup group, - CompilationInfo* info); + static void AddDependentCompilationInfo(Handle<Map> map, + DependentCode::DependencyGroup group, + CompilationInfo* info); - void AddDependentCode(DependentCode::DependencyGroup group, - Handle<Code> code); + static void AddDependentCode(Handle<Map> map, + DependentCode::DependencyGroup group, + Handle<Code> code); + static void AddDependentIC(Handle<Map> map, + Handle<Code> stub); bool IsMapInArrayPrototypeChain(); @@ -6394,7 +6643,7 @@ DECLARE_VERIFIER(Map) #ifdef VERIFY_HEAP - void SharedMapVerify(); + void DictionaryMapVerify(); void VerifyOmittedMapChecks(); #endif @@ -6413,18 +6662,16 @@ // transitions are in the form of a map where the keys are prototype objects // and the values are the maps the are transitioned to. static const int kMaxCachedPrototypeTransitions = 256; - static Handle<Map> GetPrototypeTransition(Handle<Map> map, - Handle<Object> prototype); - static Handle<Map> PutPrototypeTransition(Handle<Map> map, - Handle<Object> prototype, - Handle<Map> target_map); + static Handle<Map> TransitionToPrototype(Handle<Map> map, + Handle<Object> prototype); static const int kMaxPreAllocatedPropertyFields = 255; // Layout description. static const int kInstanceSizesOffset = HeapObject::kHeaderSize; static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize; - static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize; + static const int kBitField3Offset = kInstanceAttributesOffset + kIntSize; + static const int kPrototypeOffset = kBitField3Offset + kPointerSize; static const int kConstructorOffset = kPrototypeOffset + kPointerSize; // Storage for the transition array is overloaded to directly contain a back // pointer if unused. When the map has transitions, the back pointer is @@ -6436,13 +6683,12 @@ kTransitionsOrBackPointerOffset + kPointerSize; static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize; static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize; - static const int kBitField3Offset = kDependentCodeOffset + kPointerSize; - static const int kSize = kBitField3Offset + kPointerSize; + static const int kSize = kDependentCodeOffset + kPointerSize; // Layout of pointer fields. Heap iteration code relies on them // being continuously allocated. static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset; - static const int kPointerFieldsEndOffset = kBitField3Offset + kPointerSize; + static const int kPointerFieldsEndOffset = kSize; // Byte offsets within kInstanceSizesOffset. static const int kInstanceSizeOffset = kInstanceSizesOffset + 0; @@ -6456,52 +6702,144 @@ static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte; // Byte offsets within kInstanceAttributesOffset attributes. +#if V8_TARGET_LITTLE_ENDIAN + // Order instance type and bit field together such that they can be loaded + // together as a 16-bit word with instance type in the lower 8 bits regardless + // of endianess. Also provide endian-independent offset to that 16-bit word. static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0; - static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 1; - static const int kBitFieldOffset = kInstanceAttributesOffset + 2; - static const int kBitField2Offset = kInstanceAttributesOffset + 3; + static const int kBitFieldOffset = kInstanceAttributesOffset + 1; +#else + static const int kBitFieldOffset = kInstanceAttributesOffset + 0; + static const int kInstanceTypeOffset = kInstanceAttributesOffset + 1; +#endif + static const int kInstanceTypeAndBitFieldOffset = + kInstanceAttributesOffset + 0; + static const int kBitField2Offset = kInstanceAttributesOffset + 2; + static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 3; - STATIC_CHECK(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset); + STATIC_ASSERT(kInstanceTypeAndBitFieldOffset == + Internals::kMapInstanceTypeAndBitFieldOffset); // Bit positions for bit field. - static const int kUnused = 0; // To be used for marking recently used maps. - static const int kHasNonInstancePrototype = 1; - static const int kIsHiddenPrototype = 2; - static const int kHasNamedInterceptor = 3; - static const int kHasIndexedInterceptor = 4; - static const int kIsUndetectable = 5; - static const int kIsObserved = 6; - static const int kIsAccessCheckNeeded = 7; + static const int kHasNonInstancePrototype = 0; + static const int kIsHiddenPrototype = 1; + static const int kHasNamedInterceptor = 2; + static const int kHasIndexedInterceptor = 3; + static const int kIsUndetectable = 4; + static const int kIsObserved = 5; + static const int kIsAccessCheckNeeded = 6; + class FunctionWithPrototype: public BitField<bool, 7, 1> {}; // Bit positions for bit field 2 static const int kIsExtensible = 0; static const int kStringWrapperSafeForDefaultValueOf = 1; - static const int kAttachedToSharedFunctionInfo = 2; - // No bits can be used after kElementsKindFirstBit, they are all reserved for - // storing ElementKind. - static const int kElementsKindShift = 3; - static const int kElementsKindBitCount = 5; + class IsPrototypeMapBits : public BitField<bool, 2, 1> {}; + class ElementsKindBits: public BitField<ElementsKind, 3, 5> {}; // Derived values from bit field 2 - static const int kElementsKindMask = (-1 << kElementsKindShift) & - ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1); static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>( - (FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1; + (FAST_ELEMENTS + 1) << Map::ElementsKindBits::kShift) - 1; static const int8_t kMaximumBitField2FastSmiElementValue = static_cast<int8_t>((FAST_SMI_ELEMENTS + 1) << - Map::kElementsKindShift) - 1; + Map::ElementsKindBits::kShift) - 1; static const int8_t kMaximumBitField2FastHoleyElementValue = static_cast<int8_t>((FAST_HOLEY_ELEMENTS + 1) << - Map::kElementsKindShift) - 1; + Map::ElementsKindBits::kShift) - 1; static const int8_t kMaximumBitField2FastHoleySmiElementValue = static_cast<int8_t>((FAST_HOLEY_SMI_ELEMENTS + 1) << - Map::kElementsKindShift) - 1; + Map::ElementsKindBits::kShift) - 1; typedef FixedBodyDescriptor<kPointerFieldsBeginOffset, kPointerFieldsEndOffset, kSize> BodyDescriptor; + // Compares this map to another to see if they describe equivalent objects. + // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if + // it had exactly zero inobject properties. + // The "shared" flags of both this map and |other| are ignored. + bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode); + private: + static void ConnectElementsTransition(Handle<Map> parent, Handle<Map> child); + static void ConnectTransition(Handle<Map> parent, Handle<Map> child, + Handle<Name> name, SimpleTransitionFlag flag); + + bool EquivalentToForTransition(Map* other); + static Handle<Map> RawCopy(Handle<Map> map, int instance_size); + static Handle<Map> ShareDescriptor(Handle<Map> map, + Handle<DescriptorArray> descriptors, + Descriptor* descriptor); + static Handle<Map> CopyInstallDescriptors( + Handle<Map> map, + int new_descriptor, + Handle<DescriptorArray> descriptors); + static Handle<Map> CopyAddDescriptor(Handle<Map> map, + Descriptor* descriptor, + TransitionFlag flag); + static Handle<Map> CopyReplaceDescriptors( + Handle<Map> map, + Handle<DescriptorArray> descriptors, + TransitionFlag flag, + MaybeHandle<Name> maybe_name, + SimpleTransitionFlag simple_flag = FULL_TRANSITION); + static Handle<Map> CopyReplaceDescriptor(Handle<Map> map, + Handle<DescriptorArray> descriptors, + Descriptor* descriptor, + int index, + TransitionFlag flag); + + static Handle<Map> CopyNormalized(Handle<Map> map, + PropertyNormalizationMode mode); + + // Fires when the layout of an object with a leaf map changes. + // This includes adding transitions to the leaf map or changing + // the descriptor array. + inline void NotifyLeafMapLayoutChange(); + + static Handle<Map> TransitionElementsToSlow(Handle<Map> object, + ElementsKind to_kind); + + // Zaps the contents of backing data structures. Note that the + // heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects + // holding weak references when incremental marking is used, because it also + // iterates over objects that are otherwise unreachable. + // In general we only want to call these functions in release mode when + // heap verification is turned on. + void ZapPrototypeTransitions(); + void ZapTransitions(); + + void DeprecateTransitionTree(); + void DeprecateTarget(Name* key, DescriptorArray* new_descriptors); + + Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors); + + void UpdateFieldType(int descriptor_number, Handle<Name> name, + Handle<HeapType> new_type); + + void PrintGeneralization(FILE* file, + const char* reason, + int modify_index, + int split, + int descriptors, + bool constant_to_field, + Representation old_representation, + Representation new_representation, + HeapType* old_field_type, + HeapType* new_field_type); + + static inline void SetPrototypeTransitions( + Handle<Map> map, + Handle<FixedArray> prototype_transitions); + + static Handle<Map> GetPrototypeTransition(Handle<Map> map, + Handle<Object> prototype); + static Handle<Map> PutPrototypeTransition(Handle<Map> map, + Handle<Object> prototype, + Handle<Map> target_map); + + static const int kFastPropertiesSoftLimit = 12; + static const int kMaxFastProperties = 128; + DISALLOW_IMPLICIT_CONSTRUCTORS(Map); }; @@ -6512,7 +6850,7 @@ class Struct: public HeapObject { public: inline void InitializeBody(int object_size); - static inline Struct* cast(Object* that); + DECLARE_CAST(Struct) }; @@ -6522,7 +6860,7 @@ // [value]: the boxed contents. DECL_ACCESSORS(value, Object) - static inline Box* cast(Object* obj); + DECLARE_CAST(Box) // Dispatched behavior. DECLARE_PRINTER(Box) @@ -6597,6 +6935,12 @@ // [flags]: Holds an exciting bitfield. DECL_ACCESSORS(flags, Smi) + // [source_url]: sourceURL from magic comment + DECL_ACCESSORS(source_url, Object) + + // [source_url]: sourceMappingURL magic comment + DECL_ACCESSORS(source_mapping_url, Object) + // [compilation_type]: how the the script was compiled. Encoded in the // 'flags' field. inline CompilationType compilation_type(); @@ -6613,12 +6957,29 @@ // the 'flags' field. DECL_BOOLEAN_ACCESSORS(is_shared_cross_origin) - static inline Script* cast(Object* obj); + DECLARE_CAST(Script) // If script source is an external string, check that the underlying // resource is accessible. Otherwise, always return true. inline bool HasValidSource(); + // Convert code position into column number. + static int GetColumnNumber(Handle<Script> script, int code_pos); + + // Convert code position into (zero-based) line number. + // The non-handlified version does not allocate, but may be much slower. + static int GetLineNumber(Handle<Script> script, int code_pos); + int GetLineNumber(int code_pos); + + static Handle<Object> GetNameOrSourceURL(Handle<Script> script); + + // Init line_ends array with code positions of line ends inside script source. + static void InitLineEnds(Handle<Script> script); + + // Get the JS object wrapping the given script; create it if none exists. + static Handle<JSObject> GetWrapper(Handle<Script> script); + void ClearWrapperCache(); + // Dispatched behavior. DECLARE_PRINTER(Script) DECLARE_VERIFIER(Script) @@ -6637,9 +6998,13 @@ kEvalFromSharedOffset + kPointerSize; static const int kFlagsOffset = kEvalFrominstructionsOffsetOffset + kPointerSize; - static const int kSize = kFlagsOffset + kPointerSize; + static const int kSourceUrlOffset = kFlagsOffset + kPointerSize; + static const int kSourceMappingUrlOffset = kSourceUrlOffset + kPointerSize; + static const int kSize = kSourceMappingUrlOffset + kPointerSize; private: + int GetLineNumberWithArray(int code_pos); + // Bit positions in the flags field. static const int kCompilationTypeBit = 0; static const int kCompilationStateBit = 1; @@ -6659,8 +7024,11 @@ // Installation of ids for the selected builtin functions is handled // by the bootstrapper. #define FUNCTIONS_WITH_ID_LIST(V) \ + V(Array.prototype, indexOf, ArrayIndexOf) \ + V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \ V(Array.prototype, push, ArrayPush) \ V(Array.prototype, pop, ArrayPop) \ + V(Array.prototype, shift, ArrayShift) \ V(Function.prototype, apply, FunctionApply) \ V(String.prototype, charCodeAt, StringCharCodeAt) \ V(String.prototype, charAt, StringCharAt) \ @@ -6675,7 +7043,9 @@ V(Math, pow, MathPow) \ V(Math, max, MathMax) \ V(Math, min, MathMin) \ - V(Math, imul, MathImul) + V(Math, imul, MathImul) \ + V(Math, clz32, MathClz32) \ + V(Math, fround, MathFround) enum BuiltinFunctionId { kArrayCode, @@ -6685,9 +7055,7 @@ #undef DECLARE_FUNCTION_ID // Fake id for a special case of Math.pow. Note, it continues the // list of math functions. - kMathPowHalf, - // Installed only on --harmony-maths. - kMathClz32 + kMathPowHalf }; @@ -6723,14 +7091,12 @@ // Removed a specific optimized code object from the optimized code map. void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason); + void ClearTypeFeedbackInfo(); + // Trims the optimized code map after entries have been removed. void TrimOptimizedCodeMap(int shrink_by); // Add a new entry to the optimized code map. - MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context, - Code* code, - FixedArray* literals, - BailoutId osr_ast_id); static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared, Handle<Context> native_context, Handle<Code> code, @@ -6753,121 +7119,31 @@ // [construct stub]: Code stub for constructing instances of this function. DECL_ACCESSORS(construct_stub, Code) - // Returns if this function has been compiled to native code yet. - inline bool is_compiled(); - - // [length]: The function length - usually the number of declared parameters. - // Use up to 2^30 parameters. - inline int length(); - inline void set_length(int value); - - // [formal parameter count]: The declared number of parameters. - inline int formal_parameter_count(); - inline void set_formal_parameter_count(int value); - - // Set the formal parameter count so the function code will be - // called without using argument adaptor frames. - inline void DontAdaptArguments(); - - // [expected_nof_properties]: Expected number of properties for the function. - inline int expected_nof_properties(); - inline void set_expected_nof_properties(int value); - - // Inobject slack tracking is the way to reclaim unused inobject space. - // - // The instance size is initially determined by adding some slack to - // expected_nof_properties (to allow for a few extra properties added - // after the constructor). There is no guarantee that the extra space - // will not be wasted. - // - // Here is the algorithm to reclaim the unused inobject space: - // - Detect the first constructor call for this SharedFunctionInfo. - // When it happens enter the "in progress" state: remember the - // constructor's initial_map and install a special construct stub that - // counts constructor calls. - // - While the tracking is in progress create objects filled with - // one_pointer_filler_map instead of undefined_value. This way they can be - // resized quickly and safely. - // - Once enough (kGenerousAllocationCount) objects have been created - // compute the 'slack' (traverse the map transition tree starting from the - // initial_map and find the lowest value of unused_property_fields). - // - Traverse the transition tree again and decrease the instance size - // of every map. Existing objects will resize automatically (they are - // filled with one_pointer_filler_map). All further allocations will - // use the adjusted instance size. - // - Decrease expected_nof_properties so that an allocations made from - // another context will use the adjusted instance size too. - // - Exit "in progress" state by clearing the reference to the initial_map - // and setting the regular construct stub (generic or inline). - // - // The above is the main event sequence. Some special cases are possible - // while the tracking is in progress: - // - // - GC occurs. - // Check if the initial_map is referenced by any live objects (except this - // SharedFunctionInfo). If it is, continue tracking as usual. - // If it is not, clear the reference and reset the tracking state. The - // tracking will be initiated again on the next constructor call. - // - // - The constructor is called from another context. - // Immediately complete the tracking, perform all the necessary changes - // to maps. This is necessary because there is no efficient way to track - // multiple initial_maps. - // Proceed to create an object in the current context (with the adjusted - // size). - // - // - A different constructor function sharing the same SharedFunctionInfo is - // called in the same context. This could be another closure in the same - // context, or the first function could have been disposed. - // This is handled the same way as the previous case. - // - // Important: inobject slack tracking is not attempted during the snapshot - // creation. - - static const int kGenerousAllocationCount = 8; - - // [construction_count]: Counter for constructor calls made during - // the tracking phase. - inline int construction_count(); - inline void set_construction_count(int value); + // Returns if this function has been compiled to native code yet. + inline bool is_compiled(); - // [initial_map]: initial map of the first function called as a constructor. - // Saved for the duration of the tracking phase. - // This is a weak link (GC resets it to undefined_value if no other live - // object reference this map). - DECL_ACCESSORS(initial_map, Object) + // [length]: The function length - usually the number of declared parameters. + // Use up to 2^30 parameters. + inline int length() const; + inline void set_length(int value); - // True if the initial_map is not undefined and the countdown stub is - // installed. - inline bool IsInobjectSlackTrackingInProgress(); + // [formal parameter count]: The declared number of parameters. + inline int formal_parameter_count() const; + inline void set_formal_parameter_count(int value); - // Starts the tracking. - // Stores the initial map and installs the countdown stub. - // IsInobjectSlackTrackingInProgress is normally true after this call, - // except when tracking have not been started (e.g. the map has no unused - // properties or the snapshot is being built). - void StartInobjectSlackTracking(Map* map); + // Set the formal parameter count so the function code will be + // called without using argument adaptor frames. + inline void DontAdaptArguments(); - // Completes the tracking. - // IsInobjectSlackTrackingInProgress is false after this call. - void CompleteInobjectSlackTracking(); + // [expected_nof_properties]: Expected number of properties for the function. + inline int expected_nof_properties() const; + inline void set_expected_nof_properties(int value); - // Invoked before pointers in SharedFunctionInfo are being marked. - // Also clears the optimized code map. - inline void BeforeVisitingPointers(); - - // Clears the initial_map before the GC marking phase to ensure the reference - // is weak. IsInobjectSlackTrackingInProgress is false after this call. - void DetachInitialMap(); - - // Restores the link to the initial map after the GC marking phase. - // IsInobjectSlackTrackingInProgress is true after this call. - void AttachInitialMap(Map* map); - - // False if there are definitely no live objects created from this function. - // True if live objects _may_ exist (existence not guaranteed). - // May go back from true to false after GC. - DECL_BOOLEAN_ACCESSORS(live_objects_may_exist) + // [feedback_vector] - accumulates ast node feedback from full-codegen and + // (increasingly) from crankshafted code where sufficient feedback isn't + // available. Currently the field is duplicated in + // TypeFeedbackInfo::feedback_vector, but the allocation is done here. + DECL_ACCESSORS(feedback_vector, FixedArray) // [instance class name]: class name for instances. DECL_ACCESSORS(instance_class_name, Object) @@ -6889,7 +7165,7 @@ DECL_ACCESSORS(script, Object) // [num_literals]: Number of literals used by this function. - inline int num_literals(); + inline int num_literals() const; inline void set_num_literals(int value); // [start_position_and_type]: Field used to store both the source code @@ -6897,7 +7173,7 @@ // and whether or not the function is a toplevel function. The two // least significants bit indicates whether the function is an // expression and the rest contains the source code position. - inline int start_position_and_type(); + inline int start_position_and_type() const; inline void set_start_position_and_type(int value); // [debug info]: Debug information. @@ -6914,15 +7190,15 @@ String* DebugName(); // Position of the 'function' token in the script source. - inline int function_token_position(); + inline int function_token_position() const; inline void set_function_token_position(int function_token_position); // Position of this function in the script source. - inline int start_position(); + inline int start_position() const; inline void set_start_position(int start_position); // End position of this function in the script source. - inline int end_position(); + inline int end_position() const; inline void set_end_position(int end_position); // Is this function a function expression in the source code. @@ -6933,13 +7209,14 @@ // Bit field containing various information collected by the compiler to // drive optimization. - inline int compiler_hints(); + inline int compiler_hints() const; inline void set_compiler_hints(int value); - inline int ast_node_count(); + inline int ast_node_count() const; inline void set_ast_node_count(int count); - inline int profiler_ticks(); + inline int profiler_ticks() const; + inline void set_profiler_ticks(int ticks); // Inline cache age is used to infer whether the function survived a context // disposal or not. In the former case we reset the opt_count. @@ -6999,12 +7276,6 @@ // Is this a function or top-level/eval code. DECL_BOOLEAN_ACCESSORS(is_function) - // Indicates that the function cannot be optimized. - DECL_BOOLEAN_ACCESSORS(dont_optimize) - - // Indicates that the function cannot be inlined. - DECL_BOOLEAN_ACCESSORS(dont_inline) - // Indicates that code for this function cannot be cached. DECL_BOOLEAN_ACCESSORS(dont_cache) @@ -7014,6 +7285,9 @@ // Indicates that this function is a generator. DECL_BOOLEAN_ACCESSORS(is_generator) + // Indicates that this function is an arrow function. + DECL_BOOLEAN_ACCESSORS(is_arrow) + // Indicates whether or not the code in the shared function support // deoptimization. inline bool has_deoptimization_support(); @@ -7027,13 +7301,13 @@ inline BailoutReason DisableOptimizationReason(); - // Lookup the bailout ID and ASSERT that it exists in the non-optimized + // Lookup the bailout ID and DCHECK that it exists in the non-optimized // code, returns whether it asserted (i.e., always true if assertions are // disabled). bool VerifyBailoutId(BailoutId id); // [source code]: Source code for the function. - bool HasSourceCode(); + bool HasSourceCode() const; Handle<Object> GetSourceCode(); // Number of times the function was optimized. @@ -7054,11 +7328,11 @@ // Stores deopt_count, opt_reenable_tries and ic_age as bit-fields. inline void set_counters(int value); - inline int counters(); + inline int counters() const; // Stores opt_count and bailout_reason as bit-fields. inline void set_opt_count_and_bailout_reason(int value); - inline int opt_count_and_bailout_reason(); + inline int opt_count_and_bailout_reason() const; void set_bailout_reason(BailoutReason reason) { set_opt_count_and_bailout_reason( @@ -7066,11 +7340,6 @@ reason)); } - void set_dont_optimize_reason(BailoutReason reason) { - set_bailout_reason(reason); - set_dont_optimize(reason != kNoReason); - } - // Check whether or not this function is inlineable. bool IsInlineable(); @@ -7084,15 +7353,12 @@ int CalculateInObjectProperties(); // Dispatched behavior. - // Set max_length to -1 for unlimited length. - void SourceCodePrint(StringStream* accumulator, int max_length); DECLARE_PRINTER(SharedFunctionInfo) DECLARE_VERIFIER(SharedFunctionInfo) void ResetForNewContext(int new_ic_age); - // Casting. - static inline SharedFunctionInfo* cast(Object* obj); + DECLARE_CAST(SharedFunctionInfo) // Constants. static const int kDontAdaptArgumentsSentinel = -1; @@ -7111,16 +7377,12 @@ static const int kScriptOffset = kFunctionDataOffset + kPointerSize; static const int kDebugInfoOffset = kScriptOffset + kPointerSize; static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize; - static const int kInitialMapOffset = + static const int kFeedbackVectorOffset = kInferredNameOffset + kPointerSize; - // ast_node_count is a Smi field. It could be grouped with another Smi field - // into a PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available. - static const int kAstNodeCountOffset = - kInitialMapOffset + kPointerSize; #if V8_HOST_ARCH_32_BIT // Smi fields. static const int kLengthOffset = - kAstNodeCountOffset + kPointerSize; + kFeedbackVectorOffset + kPointerSize; static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize; static const int kExpectedNofPropertiesOffset = kFormalParameterCountOffset + kPointerSize; @@ -7138,9 +7400,13 @@ kCompilerHintsOffset + kPointerSize; static const int kCountersOffset = kOptCountAndBailoutReasonOffset + kPointerSize; + static const int kAstNodeCountOffset = + kCountersOffset + kPointerSize; + static const int kProfilerTicksOffset = + kAstNodeCountOffset + kPointerSize; // Total size. - static const int kSize = kCountersOffset + kPointerSize; + static const int kSize = kProfilerTicksOffset + kPointerSize; #else // The only reason to use smi fields instead of int fields // is to allow iteration without maps decoding during @@ -7152,7 +7418,7 @@ // word is not set and thus this word cannot be treated as pointer // to HeapObject during old space traversal. static const int kLengthOffset = - kAstNodeCountOffset + kPointerSize; + kFeedbackVectorOffset + kPointerSize; static const int kFormalParameterCountOffset = kLengthOffset + kIntSize; @@ -7173,30 +7439,23 @@ static const int kOptCountAndBailoutReasonOffset = kCompilerHintsOffset + kIntSize; - static const int kCountersOffset = kOptCountAndBailoutReasonOffset + kIntSize; - // Total size. - static const int kSize = kCountersOffset + kIntSize; + static const int kAstNodeCountOffset = + kCountersOffset + kIntSize; + static const int kProfilerTicksOffset = + kAstNodeCountOffset + kIntSize; -#endif + // Total size. + static const int kSize = kProfilerTicksOffset + kIntSize; - // The construction counter for inobject slack tracking is stored in the - // most significant byte of compiler_hints which is otherwise unused. - // Its offset depends on the endian-ness of the architecture. -#if __BYTE_ORDER == __LITTLE_ENDIAN - static const int kConstructionCountOffset = kCompilerHintsOffset + 3; -#elif __BYTE_ORDER == __BIG_ENDIAN - static const int kConstructionCountOffset = kCompilerHintsOffset + 0; -#else -#error Unknown byte ordering #endif static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize); typedef FixedBodyDescriptor<kNameOffset, - kInitialMapOffset + kPointerSize, + kFeedbackVectorOffset + kPointerSize, kSize> BodyDescriptor; // Bit positions in start_position_and_type. @@ -7211,7 +7470,6 @@ enum CompilerHints { kAllowLazyCompilation, kAllowLazyCompilationWithoutContext, - kLiveObjectsMayExist, kOptimizationDisabled, kStrictModeFunction, kUsesArguments, @@ -7222,11 +7480,10 @@ kIsAnonymous, kNameShouldPrintAsAnonymous, kIsFunction, - kDontOptimize, - kDontInline, kDontCache, kDontFlush, kIsGenerator, + kIsArrow, kCompilerHintsCount // Pseudo entry }; @@ -7261,12 +7518,12 @@ static const int kNativeBitWithinByte = (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte; -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if defined(V8_TARGET_LITTLE_ENDIAN) static const int kStrictModeByteOffset = kCompilerHintsOffset + (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte; static const int kNativeByteOffset = kCompilerHintsOffset + (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte; -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif defined(V8_TARGET_BIG_ENDIAN) static const int kStrictModeByteOffset = kCompilerHintsOffset + (kCompilerHintsSize - 1) - ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte); @@ -7282,6 +7539,18 @@ }; +// Printing support. +struct SourceCodeOf { + explicit SourceCodeOf(SharedFunctionInfo* v, int max = -1) + : value(v), max_length(max) {} + const SharedFunctionInfo* value; + int max_length; +}; + + +OStream& operator<<(OStream& os, const SourceCodeOf& v); + + class JSGeneratorObject: public JSObject { public: // [function]: The function corresponding to this generator object. @@ -7298,19 +7567,21 @@ // A positive offset indicates a suspended generator. The special // kGeneratorExecuting and kGeneratorClosed values indicate that a generator // cannot be resumed. - inline int continuation(); + inline int continuation() const; inline void set_continuation(int continuation); + inline bool is_closed(); + inline bool is_executing(); + inline bool is_suspended(); // [operand_stack]: Saved operand stack. DECL_ACCESSORS(operand_stack, FixedArray) // [stack_handler_index]: Index of first stack handler in operand_stack, or -1 // if the captured activation had no stack handler. - inline int stack_handler_index(); + inline int stack_handler_index() const; inline void set_stack_handler_index(int stack_handler_index); - // Casting. - static inline JSGeneratorObject* cast(Object* obj); + DECLARE_CAST(JSGeneratorObject) // Dispatched behavior. DECLARE_PRINTER(JSGeneratorObject) @@ -7334,7 +7605,7 @@ enum ResumeMode { NEXT, THROW }; // Yielding from a generator returns an object with the following inobject - // properties. See Context::generator_result_map() for the map. + // properties. See Context::iterator_result_map() for the map. static const int kResultValuePropertyIndex = 0; static const int kResultDonePropertyIndex = 1; static const int kResultPropertyCount = 2; @@ -7358,8 +7629,7 @@ // [scope_info]: Scope info. DECL_ACCESSORS(scope_info, ScopeInfo) - // Casting. - static inline JSModule* cast(Object* obj); + DECLARE_CAST(JSModule) // Dispatched behavior. DECLARE_PRINTER(JSModule) @@ -7388,6 +7658,7 @@ // [context]: The context for this function. inline Context* context(); inline void set_context(Object* context); + inline JSObject* global_proxy(); // [code]: The generated code object for this function. Executed // when the function is invoked, e.g. foo() or new foo(). See @@ -7401,6 +7672,12 @@ // Tells whether this function is builtin. inline bool IsBuiltin(); + // Tells whether this function is defined in a native script. + inline bool IsFromNativeScript(); + + // Tells whether this function is defined in an extension script. + inline bool IsFromExtensionScript(); + // Tells whether or not the function needs arguments adaption. inline bool NeedsArgumentsAdaption(); @@ -7424,6 +7701,54 @@ // Tells whether or not the function is on the concurrent recompilation queue. inline bool IsInOptimizationQueue(); + // Inobject slack tracking is the way to reclaim unused inobject space. + // + // The instance size is initially determined by adding some slack to + // expected_nof_properties (to allow for a few extra properties added + // after the constructor). There is no guarantee that the extra space + // will not be wasted. + // + // Here is the algorithm to reclaim the unused inobject space: + // - Detect the first constructor call for this JSFunction. + // When it happens enter the "in progress" state: initialize construction + // counter in the initial_map and set the |done_inobject_slack_tracking| + // flag. + // - While the tracking is in progress create objects filled with + // one_pointer_filler_map instead of undefined_value. This way they can be + // resized quickly and safely. + // - Once enough (kGenerousAllocationCount) objects have been created + // compute the 'slack' (traverse the map transition tree starting from the + // initial_map and find the lowest value of unused_property_fields). + // - Traverse the transition tree again and decrease the instance size + // of every map. Existing objects will resize automatically (they are + // filled with one_pointer_filler_map). All further allocations will + // use the adjusted instance size. + // - SharedFunctionInfo's expected_nof_properties left unmodified since + // allocations made using different closures could actually create different + // kind of objects (see prototype inheritance pattern). + // + // Important: inobject slack tracking is not attempted during the snapshot + // creation. + + static const int kGenerousAllocationCount = Map::ConstructionCount::kMax; + static const int kFinishSlackTracking = 1; + static const int kNoSlackTracking = 0; + + // True if the initial_map is set and the object constructions countdown + // counter is not zero. + inline bool IsInobjectSlackTrackingInProgress(); + + // Starts the tracking. + // Initializes object constructions countdown counter in the initial map. + // IsInobjectSlackTrackingInProgress is normally true after this call, + // except when tracking have not been started (e.g. the map has no unused + // properties or the snapshot is being built). + void StartInobjectSlackTracking(); + + // Completes the tracking. + // IsInobjectSlackTrackingInProgress is false after this call. + void CompleteInobjectSlackTracking(); + // [literals_or_bindings]: Fixed array holding either // the materialized literals or the bindings of a bound function. // @@ -7448,7 +7773,8 @@ // The initial map for an object created by this constructor. inline Map* initial_map(); - inline void set_initial_map(Map* value); + static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map, + Handle<Object> prototype); inline bool has_initial_map(); static void EnsureHasInitialMap(Handle<JSFunction> function); @@ -7467,7 +7793,7 @@ // After prototype is removed, it will not be created when accessed, and // [[Construct]] from this function will not be allowed. - void RemovePrototype(); + bool RemovePrototype(); inline bool should_have_prototype(); // Accessor for this function's initial map's [[class]] @@ -7493,8 +7819,7 @@ // Prints the name of the function using PrintF. void PrintName(FILE* out = stdout); - // Casting. - static inline JSFunction* cast(Object* obj); + DECLARE_CAST(JSFunction) // Iterates the objects, including code objects indirectly referenced // through pointers to the first instruction in the code object. @@ -7554,10 +7879,12 @@ // It is null value if this object is not used by any context. DECL_ACCESSORS(native_context, Object) - // Casting. - static inline JSGlobalProxy* cast(Object* obj); + // [hash]: The hash code property (undefined if not initialized yet). + DECL_ACCESSORS(hash, Object) - inline bool IsDetachedFrom(GlobalObject* global); + DECLARE_CAST(JSGlobalProxy) + + inline bool IsDetachedFrom(GlobalObject* global) const; // Dispatched behavior. DECLARE_PRINTER(JSGlobalProxy) @@ -7565,7 +7892,8 @@ // Layout description. static const int kNativeContextOffset = JSObject::kHeaderSize; - static const int kSize = kNativeContextOffset + kPointerSize; + static const int kHashOffset = kNativeContextOffset + kPointerSize; + static const int kSize = kHashOffset + kPointerSize; private: DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy); @@ -7588,30 +7916,20 @@ // [global context]: the most recent (i.e. innermost) global context. DECL_ACCESSORS(global_context, Context) - // [global receiver]: the global receiver object of the context - DECL_ACCESSORS(global_receiver, JSObject) + // [global proxy]: the global proxy object of the context + DECL_ACCESSORS(global_proxy, JSObject) // Retrieve the property cell used to store a property. PropertyCell* GetPropertyCell(LookupResult* result); - // This is like GetProperty, but is used when you know the lookup won't fail - // by throwing an exception. This is for the debug and builtins global - // objects, where it is known which properties can be expected to be present - // on the object. - Object* GetPropertyNoExceptionThrown(Name* key) { - Object* answer = GetProperty(key)->ToObjectUnchecked(); - return answer; - } - - // Casting. - static inline GlobalObject* cast(Object* obj); + DECLARE_CAST(GlobalObject) // Layout description. static const int kBuiltinsOffset = JSObject::kHeaderSize; static const int kNativeContextOffset = kBuiltinsOffset + kPointerSize; static const int kGlobalContextOffset = kNativeContextOffset + kPointerSize; - static const int kGlobalReceiverOffset = kGlobalContextOffset + kPointerSize; - static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize; + static const int kGlobalProxyOffset = kGlobalContextOffset + kPointerSize; + static const int kHeaderSize = kGlobalProxyOffset + kPointerSize; private: DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject); @@ -7621,8 +7939,7 @@ // JavaScript global object. class JSGlobalObject: public GlobalObject { public: - // Casting. - static inline JSGlobalObject* cast(Object* obj); + DECLARE_CAST(JSGlobalObject) // Ensure that the global object has a cell for the given property name. static Handle<PropertyCell> EnsurePropertyCell(Handle<JSGlobalObject> global, @@ -7654,8 +7971,7 @@ inline Code* javascript_builtin_code(Builtins::JavaScript id); inline void set_javascript_builtin_code(Builtins::JavaScript id, Code* value); - // Casting. - static inline JSBuiltinsObject* cast(Object* obj); + DECLARE_CAST(JSBuiltinsObject) // Dispatched behavior. DECLARE_PRINTER(JSBuiltinsObject) @@ -7690,8 +8006,7 @@ // [value]: the object being wrapped. DECL_ACCESSORS(value, Object) - // Casting. - static inline JSValue* cast(Object* obj); + DECLARE_CAST(JSValue) // Dispatched behavior. DECLARE_PRINTER(JSValue) @@ -7729,11 +8044,10 @@ // [sec]: caches seconds. Either undefined, smi, or NaN. DECL_ACCESSORS(sec, Object) // [cache stamp]: sample of the date cache stamp at the - // moment when local fields were cached. + // moment when chached fields were cached. DECL_ACCESSORS(cache_stamp, Object) - // Casting. - static inline JSDate* cast(Object* obj); + DECLARE_CAST(JSDate) // Returns the date field with the specified index. // See FieldIndex for the list of date fields. @@ -7793,7 +8107,7 @@ Object* GetUTCField(FieldIndex index, double value, DateCache* date_cache); // Computes and caches the cacheable fields of the date. - inline void SetLocalFields(int64_t local_time_ms, DateCache* date_cache); + inline void SetCachedFields(int64_t local_time_ms, DateCache* date_cache); DISALLOW_IMPLICIT_CONSTRUCTORS(JSDate); @@ -7821,15 +8135,14 @@ DECL_ACCESSORS(stack_frames, Object) // [start_position]: the start position in the script for the error message. - inline int start_position(); + inline int start_position() const; inline void set_start_position(int value); // [end_position]: the end position in the script for the error message. - inline int end_position(); + inline int end_position() const; inline void set_end_position(int value); - // Casting. - static inline JSMessageObject* cast(Object* obj); + DECLARE_CAST(JSMessageObject) // Dispatched behavior. DECLARE_PRINTER(JSMessageObject) @@ -7913,7 +8226,7 @@ } } - static inline JSRegExp* cast(Object* obj); + DECLARE_CAST(JSRegExp) // Dispatched behavior. DECLARE_VERIFIER(JSRegExp) @@ -8003,41 +8316,35 @@ return key->HashForObject(object); } - MUST_USE_RESULT static MaybeObject* AsObject(Heap* heap, - HashTableKey* key) { - return key->AsObject(heap); - } + static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key); static const int kPrefixSize = 0; static const int kEntrySize = 2; }; -class CompilationCacheTable: public HashTable<CompilationCacheShape, +class CompilationCacheTable: public HashTable<CompilationCacheTable, + CompilationCacheShape, HashTableKey*> { public: // Find cached value for a string key, otherwise return null. - Object* Lookup(String* src, Context* context); - Object* LookupEval(String* src, - Context* context, - StrictMode strict_mode, - int scope_position); - Object* LookupRegExp(String* source, JSRegExp::Flags flags); - MUST_USE_RESULT MaybeObject* Put(String* src, - Context* context, - Object* value); - MUST_USE_RESULT MaybeObject* PutEval(String* src, - Context* context, - SharedFunctionInfo* value, - int scope_position); - MUST_USE_RESULT MaybeObject* PutRegExp(String* src, - JSRegExp::Flags flags, - FixedArray* value); - - // Remove given value from cache. + Handle<Object> Lookup(Handle<String> src, Handle<Context> context); + Handle<Object> LookupEval(Handle<String> src, Handle<Context> context, + StrictMode strict_mode, int scope_position); + Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags); + static Handle<CompilationCacheTable> Put( + Handle<CompilationCacheTable> cache, Handle<String> src, + Handle<Context> context, Handle<Object> value); + static Handle<CompilationCacheTable> PutEval( + Handle<CompilationCacheTable> cache, Handle<String> src, + Handle<Context> context, Handle<SharedFunctionInfo> value, + int scope_position); + static Handle<CompilationCacheTable> PutRegExp( + Handle<CompilationCacheTable> cache, Handle<String> src, + JSRegExp::Flags flags, Handle<FixedArray> value); void Remove(Object* value); - static inline CompilationCacheTable* cast(Object* obj); + DECLARE_CAST(CompilationCacheTable) private: DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheTable); @@ -8050,7 +8357,8 @@ DECL_ACCESSORS(normal_type_cache, Object) // Add the code object to the cache. - MUST_USE_RESULT MaybeObject* Update(Name* name, Code* code); + static void Update( + Handle<CodeCache> cache, Handle<Name> name, Handle<Code> code); // Lookup code object in the cache. Returns code object if found and undefined // if not. @@ -8065,7 +8373,7 @@ // Remove an object from the cache with the provided internal index. void RemoveByIndex(Object* name, Code* code, int index); - static inline CodeCache* cast(Object* obj); + DECLARE_CAST(CodeCache) // Dispatched behavior. DECLARE_PRINTER(CodeCache) @@ -8077,8 +8385,10 @@ static const int kSize = kNormalTypeCacheOffset + kPointerSize; private: - MUST_USE_RESULT MaybeObject* UpdateDefaultCache(Name* name, Code* code); - MUST_USE_RESULT MaybeObject* UpdateNormalTypeCache(Name* name, Code* code); + static void UpdateDefaultCache( + Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code); + static void UpdateNormalTypeCache( + Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code); Object* LookupDefaultCache(Name* name, Code::Flags flags); Object* LookupNormalTypeCache(Name* name, Code::Flags flags); @@ -8106,26 +8416,27 @@ return key->HashForObject(object); } - MUST_USE_RESULT static MaybeObject* AsObject(Heap* heap, - HashTableKey* key) { - return key->AsObject(heap); - } + static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key); static const int kPrefixSize = 0; static const int kEntrySize = 2; }; -class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape, +class CodeCacheHashTable: public HashTable<CodeCacheHashTable, + CodeCacheHashTableShape, HashTableKey*> { public: Object* Lookup(Name* name, Code::Flags flags); - MUST_USE_RESULT MaybeObject* Put(Name* name, Code* code); + static Handle<CodeCacheHashTable> Put( + Handle<CodeCacheHashTable> table, + Handle<Name> name, + Handle<Code> code); int GetIndex(Name* name, Code::Flags flags); void RemoveByIndex(int index); - static inline CodeCacheHashTable* cast(Object* obj); + DECLARE_CAST(CodeCacheHashTable) // Initial size of the fixed array backing the hash table. static const int kInitialSize = 64; @@ -8144,14 +8455,11 @@ Code::Flags flags, Handle<Code> code); - MUST_USE_RESULT MaybeObject* Update(MapHandleList* maps, - Code::Flags flags, - Code* code); // Returns an undefined value if the entry is not found. Handle<Object> Lookup(MapHandleList* maps, Code::Flags flags); - static inline PolymorphicCodeCache* cast(Object* obj); + DECLARE_CAST(PolymorphicCodeCache) // Dispatched behavior. DECLARE_PRINTER(PolymorphicCodeCache) @@ -8166,15 +8474,19 @@ class PolymorphicCodeCacheHashTable - : public HashTable<CodeCacheHashTableShape, HashTableKey*> { + : public HashTable<PolymorphicCodeCacheHashTable, + CodeCacheHashTableShape, + HashTableKey*> { public: Object* Lookup(MapHandleList* maps, int code_kind); - MUST_USE_RESULT MaybeObject* Put(MapHandleList* maps, - int code_kind, - Code* code); + static Handle<PolymorphicCodeCacheHashTable> Put( + Handle<PolymorphicCodeCacheHashTable> hash_table, + MapHandleList* maps, + int code_kind, + Handle<Code> code); - static inline PolymorphicCodeCacheHashTable* cast(Object* obj); + DECLARE_CAST(PolymorphicCodeCacheHashTable) static const int kInitialSize = 64; private: @@ -8188,7 +8500,10 @@ inline void set_ic_total_count(int count); inline int ic_with_type_info_count(); - inline void change_ic_with_type_info_count(int count); + inline void change_ic_with_type_info_count(int delta); + + inline int ic_generic_count(); + inline void change_ic_generic_count(int delta); inline void initialize_storage(); @@ -8198,9 +8513,8 @@ inline void set_inlined_type_change_checksum(int checksum); inline bool matches_inlined_type_change_checksum(int checksum); - DECL_ACCESSORS(feedback_vector, FixedArray) - static inline TypeFeedbackInfo* cast(Object* obj); + DECLARE_CAST(TypeFeedbackInfo) // Dispatched behavior. DECLARE_PRINTER(TypeFeedbackInfo) @@ -8208,10 +8522,10 @@ static const int kStorage1Offset = HeapObject::kHeaderSize; static const int kStorage2Offset = kStorage1Offset + kPointerSize; - static const int kFeedbackVectorOffset = - kStorage2Offset + kPointerSize; - static const int kSize = kFeedbackVectorOffset + kPointerSize; + static const int kStorage3Offset = kStorage2Offset + kPointerSize; + static const int kSize = kStorage3Offset + kPointerSize; + // TODO(mvstanton): move these sentinel declarations to shared function info. // The object that indicates an uninitialized cache. static inline Handle<Object> UninitializedSentinel(Isolate* isolate); @@ -8227,9 +8541,6 @@ // garbage collection (e.g., for patching the cache). static inline Object* RawUninitializedSentinel(Heap* heap); - static const int kForInFastCaseMarker = 0; - static const int kForInSlowCaseMarker = 1; - private: static const int kTypeChangeChecksumBits = 7; @@ -8265,11 +8576,14 @@ enum PretenureDecision { kUndecided = 0, kDontTenure = 1, - kTenure = 2, - kZombie = 3, + kMaybeTenure = 2, + kTenure = 3, + kZombie = 4, kLastPretenureDecisionValue = kZombie }; + const char* PretenureDecisionName(PretenureDecision decision); + DECL_ACCESSORS(transition_info, Object) // nested_site threads a list of sites that represent nested literals // walked in a particular order. So [[1, 2], 1, 2] will have one @@ -8291,8 +8605,8 @@ class DoNotInlineBit: public BitField<bool, 29, 1> {}; // Bitfields for pretenure_data - class MementoFoundCountBits: public BitField<int, 0, 27> {}; - class PretenureDecisionBits: public BitField<PretenureDecision, 27, 2> {}; + class MementoFoundCountBits: public BitField<int, 0, 26> {}; + class PretenureDecisionBits: public BitField<PretenureDecision, 26, 3> {}; class DeoptDependentCodeBit: public BitField<bool, 29, 1> {}; STATIC_ASSERT(PretenureDecisionBits::kMax >= kLastPretenureDecisionValue); @@ -8353,12 +8667,20 @@ return pretenure_decision() == kZombie; } + bool IsMaybeTenure() { + return pretenure_decision() == kMaybeTenure; + } + inline void MarkZombie(); - inline bool DigestPretenuringFeedback(); + inline bool MakePretenureDecision(PretenureDecision current_decision, + double ratio, + bool maximum_size_scavenge); + + inline bool DigestPretenuringFeedback(bool maximum_size_scavenge); ElementsKind GetElementsKind() { - ASSERT(!SitePointsToLiteral()); + DCHECK(!SitePointsToLiteral()); int value = Smi::cast(transition_info())->value(); return ElementsKindBits::decode(value); } @@ -8402,7 +8724,7 @@ DECLARE_PRINTER(AllocationSite) DECLARE_VERIFIER(AllocationSite) - static inline AllocationSite* cast(Object* obj); + DECLARE_CAST(AllocationSite) static inline AllocationSiteMode GetMode( ElementsKind boilerplate_elements_kind); static inline AllocationSiteMode GetMode(ElementsKind from, ElementsKind to); @@ -8450,14 +8772,14 @@ !AllocationSite::cast(allocation_site())->IsZombie(); } AllocationSite* GetAllocationSite() { - ASSERT(IsValid()); + DCHECK(IsValid()); return AllocationSite::cast(allocation_site()); } DECLARE_PRINTER(AllocationMemento) DECLARE_VERIFIER(AllocationMemento) - static inline AllocationMemento* cast(Object* obj); + DECLARE_CAST(AllocationMemento) private: DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationMemento); @@ -8474,10 +8796,10 @@ // - all attributes are available as part if the property details class AliasedArgumentsEntry: public Struct { public: - inline int aliased_context_slot(); + inline int aliased_context_slot() const; inline void set_aliased_context_slot(int count); - static inline AliasedArgumentsEntry* cast(Object* obj); + DECLARE_CAST(AliasedArgumentsEntry) // Dispatched behavior. DECLARE_PRINTER(AliasedArgumentsEntry) @@ -8549,6 +8871,19 @@ }; +class IteratingStringHasher : public StringHasher { + public: + static inline uint32_t Hash(String* string, uint32_t seed); + inline void VisitOneByteString(const uint8_t* chars, int length); + inline void VisitTwoByteString(const uint16_t* chars, int length); + + private: + inline IteratingStringHasher(int len, uint32_t seed) + : StringHasher(len, seed) {} + DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher); +}; + + // The characteristics of a string are stored in its map. Retrieving these // few bits of information is moderately expensive, involving two memory // loads where the second is dependent on the first. To improve efficiency @@ -8562,7 +8897,7 @@ // concrete performance benefit at that particular point in the code. class StringShape BASE_EMBEDDED { public: - inline explicit StringShape(String* s); + inline explicit StringShape(const String* s); inline explicit StringShape(Map* s); inline explicit StringShape(InstanceType t); inline bool IsSequential(); @@ -8614,14 +8949,15 @@ // Equality operations. inline bool Equals(Name* other); + inline static bool Equals(Handle<Name> one, Handle<Name> two); // Conversion. inline bool AsArrayIndex(uint32_t* index); - // Casting. - static inline Name* cast(Object* obj); + // Whether name can only name own properties. + inline bool IsOwn(); - bool IsCacheable(Isolate* isolate); + DECLARE_CAST(Name) DECLARE_PRINTER(Name) @@ -8655,23 +8991,22 @@ static const int kArrayIndexLengthBits = kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields; - STATIC_CHECK((kArrayIndexLengthBits > 0)); - - static const int kArrayIndexHashLengthShift = - kArrayIndexValueBits + kNofHashBitFields; + STATIC_ASSERT((kArrayIndexLengthBits > 0)); - static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1; - - static const int kArrayIndexValueMask = - ((1 << kArrayIndexValueBits) - 1) << kHashShift; + class ArrayIndexValueBits : public BitField<unsigned int, kNofHashBitFields, + kArrayIndexValueBits> {}; // NOLINT + class ArrayIndexLengthBits : public BitField<unsigned int, + kNofHashBitFields + kArrayIndexValueBits, + kArrayIndexLengthBits> {}; // NOLINT // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we // could use a mask to test if the length of string is less than or equal to // kMaxCachedArrayIndexLength. - STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1)); + STATIC_ASSERT(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1)); static const unsigned int kContainsCachedArrayIndexMask = - (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) | + (~static_cast<unsigned>(kMaxCachedArrayIndexLength) + << ArrayIndexLengthBits::kShift) | kIsNotArrayIndexMask; // Value of empty hash field indicating that the hash is not computed. @@ -8697,8 +9032,11 @@ // [is_private]: whether this is a private symbol. DECL_BOOLEAN_ACCESSORS(is_private) - // Casting. - static inline Symbol* cast(Object* obj); + // [is_own]: whether this is an own symbol, that is, only used to designate + // own properties of objects. + DECL_BOOLEAN_ACCESSORS(is_own) + + DECLARE_CAST(Symbol) // Dispatched behavior. DECLARE_PRINTER(Symbol) @@ -8713,6 +9051,7 @@ private: static const int kPrivateBit = 0; + static const int kOwnBit = 1; DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol); }; @@ -8732,6 +9071,34 @@ public: enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING }; + // Array index strings this short can keep their index in the hash field. + static const int kMaxCachedArrayIndexLength = 7; + + // For strings which are array indexes the hash value has the string length + // mixed into the hash, mainly to avoid a hash value of zero which would be + // the case for the string '0'. 24 bits are used for the array index value. + static const int kArrayIndexValueBits = 24; + static const int kArrayIndexLengthBits = + kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields; + + STATIC_ASSERT((kArrayIndexLengthBits > 0)); + + class ArrayIndexValueBits : public BitField<unsigned int, kNofHashBitFields, + kArrayIndexValueBits> {}; // NOLINT + class ArrayIndexLengthBits : public BitField<unsigned int, + kNofHashBitFields + kArrayIndexValueBits, + kArrayIndexLengthBits> {}; // NOLINT + + // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we + // could use a mask to test if the length of string is less than or equal to + // kMaxCachedArrayIndexLength. + STATIC_ASSERT(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1)); + + static const unsigned int kContainsCachedArrayIndexMask = + (~static_cast<unsigned>(kMaxCachedArrayIndexLength) + << ArrayIndexLengthBits::kShift) | + kIsNotArrayIndexMask; + // Representation of the flat content of a String. // A non-flat string doesn't have flat content. // A flat string has content that's encoded as a sequence of either @@ -8749,44 +9116,58 @@ // Return the one byte content of the string. Only use if IsAscii() returns // true. Vector<const uint8_t> ToOneByteVector() { - ASSERT_EQ(ASCII, state_); - return buffer_; + DCHECK_EQ(ASCII, state_); + return Vector<const uint8_t>(onebyte_start, length_); } // Return the two-byte content of the string. Only use if IsTwoByte() // returns true. Vector<const uc16> ToUC16Vector() { - ASSERT_EQ(TWO_BYTE, state_); - return Vector<const uc16>::cast(buffer_); + DCHECK_EQ(TWO_BYTE, state_); + return Vector<const uc16>(twobyte_start, length_); + } + + uc16 Get(int i) { + DCHECK(i < length_); + DCHECK(state_ != NON_FLAT); + if (state_ == ASCII) return onebyte_start[i]; + return twobyte_start[i]; } private: enum State { NON_FLAT, ASCII, TWO_BYTE }; // Constructors only used by String::GetFlatContent(). - explicit FlatContent(Vector<const uint8_t> chars) - : buffer_(chars), - state_(ASCII) { } - explicit FlatContent(Vector<const uc16> chars) - : buffer_(Vector<const byte>::cast(chars)), - state_(TWO_BYTE) { } - FlatContent() : buffer_(), state_(NON_FLAT) { } - - Vector<const uint8_t> buffer_; + explicit FlatContent(const uint8_t* start, int length) + : onebyte_start(start), length_(length), state_(ASCII) { } + explicit FlatContent(const uc16* start, int length) + : twobyte_start(start), length_(length), state_(TWO_BYTE) { } + FlatContent() : onebyte_start(NULL), length_(0), state_(NON_FLAT) { } + + union { + const uint8_t* onebyte_start; + const uc16* twobyte_start; + }; + int length_; State state_; friend class String; }; // Get and set the length of the string. - inline int length(); + inline int length() const; inline void set_length(int value); + // Get and set the length of the string using acquire loads and release + // stores. + inline int synchronized_length() const; + inline void synchronized_set_length(int value); + // Returns whether this string has only ASCII chars, i.e. all of them can // be ASCII encoded. This might be the case even if the string is // two-byte. Such strings may appear when the embedder prefers // two-byte external representations even for ASCII data. - inline bool IsOneByteRepresentation(); - inline bool IsTwoByteRepresentation(); + inline bool IsOneByteRepresentation() const; + inline bool IsTwoByteRepresentation() const; // Cons and slices have an encoding flag that may not represent the actual // encoding of the underlying string. This is taken into account here. @@ -8804,7 +9185,7 @@ // to this method are not efficient unless the string is flat. INLINE(uint16_t Get(int index)); - // Try to flatten the string. Checks first inline to see if it is + // Flattens the string. Checks first inline to see if it is // necessary. Does nothing if the string is not a cons string. // Flattening allocates a sequential string with the same data as // the given string and mutates the cons string to a degenerate @@ -8816,15 +9197,9 @@ // // Degenerate cons strings are handled specially by the garbage // collector (see IsShortcutCandidate). - // - // Use FlattenString from Handles.cc to flatten even in case an - // allocation failure happens. - inline MaybeObject* TryFlatten(PretenureFlag pretenure = NOT_TENURED); - // Convenience function. Has exactly the same behavior as - // TryFlatten(), except in the case of failure returns the original - // string. - inline String* TryFlattenGetString(PretenureFlag pretenure = NOT_TENURED); + static inline Handle<String> Flatten(Handle<String> string, + PretenureFlag pretenure = NOT_TENURED); // Tries to return the content of a flat string as a structure holding either // a flat vector of char or of uc16. @@ -8843,6 +9218,7 @@ // String equality operations. inline bool Equals(String* other); + inline static bool Equals(Handle<String> one, Handle<String> two); bool IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match = false); bool IsOneByteEqualTo(Vector<const uint8_t> str); bool IsTwoByteEqualTo(Vector<const uc16> str); @@ -8883,8 +9259,7 @@ // Conversion. inline bool AsArrayIndex(uint32_t* index); - // Casting. - static inline String* cast(Object* obj); + DECLARE_CAST(String) void PrintOn(FILE* out); @@ -8893,6 +9268,7 @@ // Dispatched behavior. void StringShortPrint(StringStream* accumulator); + void PrintUC16(OStream& os, int start = 0, int end = -1); // NOLINT #ifdef OBJECT_PRINT char* ToAsciiArray(); #endif @@ -8908,12 +9284,13 @@ // Maximum number of characters to consider when trying to convert a string // value into an array index. static const int kMaxArrayIndexSize = 10; - STATIC_CHECK(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits)); + STATIC_ASSERT(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits)); // Max char codes. static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar; static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar; static const int kMaxUtf16CodeUnit = 0xffff; + static const uint32_t kMaxUtf16CodeUnitU = kMaxUtf16CodeUnit; // Value of hash field containing computed hash equal to zero. static const int kEmptyStringHash = kIsNotArrayIndexMask; @@ -8945,7 +9322,7 @@ const char* start = chars; const char* limit = chars + length; #ifdef V8_HOST_CAN_READ_UNALIGNED - ASSERT(unibrow::Utf8::kMaxOneByteChar == 0x7F); + DCHECK(unibrow::Utf8::kMaxOneByteChar == 0x7F); const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80; while (chars + sizeof(uintptr_t) <= limit) { if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) { @@ -8986,42 +9363,32 @@ return NonOneByteStart(chars, length) >= length; } - // TODO(dcarney): Replace all instances of this with VisitFlat. - template<class Visitor, class ConsOp> - static inline void Visit(String* string, - unsigned offset, - Visitor& visitor, - ConsOp& cons_op, - int32_t type, - unsigned length); - template<class Visitor> static inline ConsString* VisitFlat(Visitor* visitor, String* string, - int offset, - int length, - int32_t type); + int offset = 0); - template<class Visitor> - static inline ConsString* VisitFlat(Visitor* visitor, - String* string, - int offset = 0) { - int32_t type = string->map()->instance_type(); - return VisitFlat(visitor, string, offset, string->length(), type); - } + static Handle<FixedArray> CalculateLineEnds(Handle<String> string, + bool include_ending_line); + + // Use the hash field to forward to the canonical internalized string + // when deserializing an internalized string. + inline void SetForwardedInternalizedString(String* string); + inline String* GetForwardedInternalizedString(); private: friend class Name; + friend class StringTableInsertionKey; - // Try to flatten the top level ConsString that is hiding behind this - // string. This is a no-op unless the string is a ConsString. Flatten - // mutates the ConsString and might return a failure. - MUST_USE_RESULT MaybeObject* SlowTryFlatten(PretenureFlag pretenure); + static Handle<String> SlowFlatten(Handle<ConsString> cons, + PretenureFlag tenure); // Slow case of String::Equals. This implementation works on any strings // but it is most efficient on strings that are almost flat. bool SlowEquals(String* other); + static bool SlowEquals(Handle<String> one, Handle<String> two); + // Slow case of AsArrayIndex. bool SlowAsArrayIndex(uint32_t* index); @@ -9035,8 +9402,7 @@ // The SeqString abstract class captures sequential string values. class SeqString: public String { public: - // Casting. - static inline SeqString* cast(Object* obj); + DECLARE_CAST(SeqString) // Layout description. static const int kHeaderSize = String::kSize; @@ -9066,8 +9432,7 @@ inline uint8_t* GetChars(); - // Casting - static inline SeqOneByteString* cast(Object* obj); + DECLARE_CAST(SeqOneByteString) // Garbage collection support. This method is called by the // garbage collector to compute the actual size of an AsciiString @@ -9081,7 +9446,7 @@ // Maximal memory usage for a single sequential ASCII string. static const int kMaxSize = 512 * MB - 1; - STATIC_CHECK((kMaxSize - kHeaderSize) >= String::kMaxLength); + STATIC_ASSERT((kMaxSize - kHeaderSize) >= String::kMaxLength); private: DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString); @@ -9106,8 +9471,7 @@ // For regexp code. const uint16_t* SeqTwoByteStringGetData(unsigned start); - // Casting - static inline SeqTwoByteString* cast(Object* obj); + DECLARE_CAST(SeqTwoByteString) // Garbage collection support. This method is called by the // garbage collector to compute the actual size of a TwoByteString @@ -9121,7 +9485,7 @@ // Maximal memory usage for a single sequential two-byte string. static const int kMaxSize = 512 * MB - 1; - STATIC_CHECK(static_cast<int>((kMaxSize - kHeaderSize)/sizeof(uint16_t)) >= + STATIC_ASSERT(static_cast<int>((kMaxSize - kHeaderSize)/sizeof(uint16_t)) >= String::kMaxLength); private: @@ -9158,8 +9522,7 @@ // Dispatched behavior. uint16_t ConsStringGet(int index); - // Casting. - static inline ConsString* cast(Object* obj); + DECLARE_CAST(ConsString) // Layout description. static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize); @@ -9196,14 +9559,13 @@ inline String* parent(); inline void set_parent(String* parent, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); - inline int offset(); + inline int offset() const; inline void set_offset(int offset); // Dispatched behavior. uint16_t SlicedStringGet(int index); - // Casting. - static inline SlicedString* cast(Object* obj); + DECLARE_CAST(SlicedString) // Layout description. static const int kParentOffset = POINTER_SIZE_ALIGN(String::kSize); @@ -9235,8 +9597,7 @@ // API. Therefore, ExternalStrings should not be used internally. class ExternalString: public String { public: - // Casting - static inline ExternalString* cast(Object* obj); + DECLARE_CAST(ExternalString) // Layout description. static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize); @@ -9250,7 +9611,7 @@ // Return whether external string is short (data pointer is not cached). inline bool is_short(); - STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset); + STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset); private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString); @@ -9280,8 +9641,7 @@ // Dispatched behavior. inline uint16_t ExternalAsciiStringGet(int index); - // Casting. - static inline ExternalAsciiString* cast(Object* obj); + DECLARE_CAST(ExternalAsciiString) // Garbage collection support. inline void ExternalAsciiStringIterateBody(ObjectVisitor* v); @@ -9320,8 +9680,7 @@ // For regexp code. inline const uint16_t* ExternalTwoByteStringGetData(unsigned start); - // Casting. - static inline ExternalTwoByteString* cast(Object* obj); + DECLARE_CAST(ExternalTwoByteString) // Garbage collection support. inline void ExternalTwoByteStringIterateBody(ObjectVisitor* v); @@ -9391,57 +9750,64 @@ // This maintains an off-stack representation of the stack frames required // to traverse a ConsString, allowing an entirely iterative and restartable // traversal of the entire string -// Note: this class is not GC-safe. class ConsStringIteratorOp { public: inline ConsStringIteratorOp() {} - String* Operate(String* string, - unsigned* offset_out, - int32_t* type_out, - unsigned* length_out); - inline String* ContinueOperation(int32_t* type_out, unsigned* length_out); - inline void Reset(); - inline bool HasMore(); + inline explicit ConsStringIteratorOp(ConsString* cons_string, + int offset = 0) { + Reset(cons_string, offset); + } + inline void Reset(ConsString* cons_string, int offset = 0) { + depth_ = 0; + // Next will always return NULL. + if (cons_string == NULL) return; + Initialize(cons_string, offset); + } + // Returns NULL when complete. + inline String* Next(int* offset_out) { + *offset_out = 0; + if (depth_ == 0) return NULL; + return Continue(offset_out); + } private: - // TODO(dcarney): Templatize this out for different stack sizes. - static const unsigned kStackSize = 32; + static const int kStackSize = 32; // Use a mask instead of doing modulo operations for stack wrapping. - static const unsigned kDepthMask = kStackSize-1; + static const int kDepthMask = kStackSize-1; STATIC_ASSERT(IS_POWER_OF_TWO(kStackSize)); - static inline unsigned OffsetForDepth(unsigned depth); + static inline int OffsetForDepth(int depth); inline void PushLeft(ConsString* string); inline void PushRight(ConsString* string); inline void AdjustMaximumDepth(); inline void Pop(); - String* NextLeaf(bool* blew_stack, int32_t* type_out, unsigned* length_out); - String* Search(unsigned* offset_out, - int32_t* type_out, - unsigned* length_out); + inline bool StackBlown() { return maximum_depth_ - depth_ == kStackSize; } + void Initialize(ConsString* cons_string, int offset); + String* Continue(int* offset_out); + String* NextLeaf(bool* blew_stack); + String* Search(int* offset_out); - unsigned depth_; - unsigned maximum_depth_; // Stack must always contain only frames for which right traversal // has not yet been performed. ConsString* frames_[kStackSize]; - unsigned consumed_; ConsString* root_; + int depth_; + int maximum_depth_; + int consumed_; DISALLOW_COPY_AND_ASSIGN(ConsStringIteratorOp); }; -// Note: this class is not GC-safe. class StringCharacterStream { public: inline StringCharacterStream(String* string, ConsStringIteratorOp* op, - unsigned offset = 0); + int offset = 0); inline uint16_t GetNext(); inline bool HasMore(); - inline void Reset(String* string, unsigned offset = 0); - inline void VisitOneByteString(const uint8_t* chars, unsigned length); - inline void VisitTwoByteString(const uint16_t* chars, unsigned length); + inline void Reset(String* string, int offset = 0); + inline void VisitOneByteString(const uint8_t* chars, int length); + inline void VisitTwoByteString(const uint16_t* chars, int length); private: bool is_one_byte_; @@ -9477,20 +9843,20 @@ // [to_number]: Cached to_number computed at startup. DECL_ACCESSORS(to_number, Object) - inline byte kind(); + inline byte kind() const; inline void set_kind(byte kind); - // Casting. - static inline Oddball* cast(Object* obj); + DECLARE_CAST(Oddball) // Dispatched behavior. DECLARE_VERIFIER(Oddball) // Initialize the fields. - MUST_USE_RESULT MaybeObject* Initialize(Heap* heap, - const char* to_string, - Object* to_number, - byte kind); + static void Initialize(Isolate* isolate, + Handle<Oddball> oddball, + const char* to_string, + Handle<Object> to_number, + byte kind); // Layout description. static const int kToStringOffset = HeapObject::kHeaderSize; @@ -9507,14 +9873,15 @@ static const byte kUndefined = 5; static const byte kUninitialized = 6; static const byte kOther = 7; + static const byte kException = 8; typedef FixedBodyDescriptor<kToStringOffset, kToNumberOffset + kPointerSize, kSize> BodyDescriptor; - STATIC_CHECK(kKindOffset == Internals::kOddballKindOffset); - STATIC_CHECK(kNull == Internals::kNullOddballKind); - STATIC_CHECK(kUndefined == Internals::kUndefinedOddballKind); + STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset); + STATIC_ASSERT(kNull == Internals::kNullOddballKind); + STATIC_ASSERT(kUndefined == Internals::kUndefinedOddballKind); private: DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball); @@ -9526,12 +9893,11 @@ // [value]: value of the global property. DECL_ACCESSORS(value, Object) - // Casting. - static inline Cell* cast(Object* obj); + DECLARE_CAST(Cell) static inline Cell* FromValueAddress(Address value) { Object* result = FromAddress(value - kValueOffset); - ASSERT(result->IsCell() || result->IsPropertyCell()); + DCHECK(result->IsCell() || result->IsPropertyCell()); return static_cast<Cell*>(result); } @@ -9578,12 +9944,10 @@ static Handle<HeapType> UpdatedType(Handle<PropertyCell> cell, Handle<Object> value); - void AddDependentCompilationInfo(CompilationInfo* info); - - void AddDependentCode(Handle<Code> code); + static void AddDependentCompilationInfo(Handle<PropertyCell> cell, + CompilationInfo* info); - // Casting. - static inline PropertyCell* cast(Object* obj); + DECLARE_CAST(PropertyCell) inline Address TypeAddress() { return address() + kTypeOffset; @@ -9620,37 +9984,37 @@ // [hash]: The hash code property (undefined if not initialized yet). DECL_ACCESSORS(hash, Object) - // Casting. - static inline JSProxy* cast(Object* obj); + DECLARE_CAST(JSProxy) - MUST_USE_RESULT MaybeObject* GetPropertyWithHandler( - Object* receiver, - Name* name); - MUST_USE_RESULT MaybeObject* GetElementWithHandler( - Object* receiver, + MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithHandler( + Handle<JSProxy> proxy, + Handle<Object> receiver, + Handle<Name> name); + MUST_USE_RESULT static inline MaybeHandle<Object> GetElementWithHandler( + Handle<JSProxy> proxy, + Handle<Object> receiver, uint32_t index); // If the handler defines an accessor property with a setter, invoke it. // If it defines an accessor property without a setter, or a data property // that is read-only, throw. In all these cases set '*done' to true, // otherwise set it to false. - static Handle<Object> SetPropertyViaPrototypesWithHandler( - Handle<JSProxy> proxy, - Handle<JSReceiver> receiver, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode, - bool* done); - - static PropertyAttributes GetPropertyAttributeWithHandler( - Handle<JSProxy> proxy, - Handle<JSReceiver> receiver, - Handle<Name> name); - static PropertyAttributes GetElementAttributeWithHandler( - Handle<JSProxy> proxy, - Handle<JSReceiver> receiver, - uint32_t index); + MUST_USE_RESULT + static MaybeHandle<Object> SetPropertyViaPrototypesWithHandler( + Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name, + Handle<Object> value, StrictMode strict_mode, bool* done); + + MUST_USE_RESULT static Maybe<PropertyAttributes> + GetPropertyAttributesWithHandler(Handle<JSProxy> proxy, + Handle<Object> receiver, + Handle<Name> name); + MUST_USE_RESULT static Maybe<PropertyAttributes> + GetElementAttributeWithHandler(Handle<JSProxy> proxy, + Handle<JSReceiver> receiver, + uint32_t index); + MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithHandler( + Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name, + Handle<Object> value, StrictMode strict_mode); // Turn the proxy into an (empty) JSObject. static void Fix(Handle<JSProxy> proxy); @@ -9660,10 +10024,12 @@ // Invoke a trap by name. If the trap does not exist on this's handler, // but derived_trap is non-NULL, invoke that instead. May cause GC. - Handle<Object> CallTrap(const char* name, - Handle<Object> derived_trap, - int argc, - Handle<Object> args[]); + MUST_USE_RESULT static MaybeHandle<Object> CallTrap( + Handle<JSProxy> proxy, + const char* name, + Handle<Object> derived_trap, + int argc, + Handle<Object> args[]); // Dispatched behavior. DECLARE_PRINTER(JSProxy) @@ -9679,7 +10045,7 @@ static const int kHeaderSize = kPaddingOffset; static const int kPaddingSize = kSize - kPaddingOffset; - STATIC_CHECK(kPaddingSize >= 0); + STATIC_ASSERT(kPaddingSize >= 0); typedef FixedBodyDescriptor<kHandlerOffset, kPaddingOffset, @@ -9688,31 +10054,30 @@ private: friend class JSReceiver; - static Handle<Object> SetPropertyWithHandler(Handle<JSProxy> proxy, - Handle<JSReceiver> receiver, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes, - StrictMode strict_mode); - static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy, - Handle<JSReceiver> receiver, - uint32_t index, - Handle<Object> value, - StrictMode strict_mode); + MUST_USE_RESULT static inline MaybeHandle<Object> SetElementWithHandler( + Handle<JSProxy> proxy, + Handle<JSReceiver> receiver, + uint32_t index, + Handle<Object> value, + StrictMode strict_mode); - static bool HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name); - static bool HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index); + MUST_USE_RESULT static Maybe<bool> HasPropertyWithHandler( + Handle<JSProxy> proxy, Handle<Name> name); + MUST_USE_RESULT static inline Maybe<bool> HasElementWithHandler( + Handle<JSProxy> proxy, uint32_t index); - static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> proxy, - Handle<Name> name, - DeleteMode mode); - static Handle<Object> DeleteElementWithHandler(Handle<JSProxy> proxy, - uint32_t index, - DeleteMode mode); + MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithHandler( + Handle<JSProxy> proxy, + Handle<Name> name, + DeleteMode mode); + MUST_USE_RESULT static MaybeHandle<Object> DeleteElementWithHandler( + Handle<JSProxy> proxy, + uint32_t index, + DeleteMode mode); MUST_USE_RESULT Object* GetIdentityHash(); - static Handle<Object> GetOrCreateIdentityHash(Handle<JSProxy> proxy); + static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy); DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy); }; @@ -9726,8 +10091,7 @@ // [construct_trap]: The construct trap. DECL_ACCESSORS(construct_trap, Object) - // Casting. - static inline JSFunctionProxy* cast(Object* obj); + DECLARE_CAST(JSFunctionProxy) // Dispatched behavior. DECLARE_PRINTER(JSFunctionProxy) @@ -9740,7 +10104,7 @@ static const int kSize = JSFunction::kSize; static const int kPaddingSize = kSize - kPaddingOffset; - STATIC_CHECK(kPaddingSize >= 0); + STATIC_ASSERT(kPaddingSize >= 0); typedef FixedBodyDescriptor<kHandlerOffset, kConstructTrapOffset + kPointerSize, @@ -9751,45 +10115,150 @@ }; -// The JSSet describes EcmaScript Harmony sets -class JSSet: public JSObject { +class JSCollection : public JSObject { public: - // [set]: the backing hash set containing keys. + // [table]: the backing hash table DECL_ACCESSORS(table, Object) - // Casting. - static inline JSSet* cast(Object* obj); + static const int kTableOffset = JSObject::kHeaderSize; + static const int kSize = kTableOffset + kPointerSize; + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollection); +}; + + +// The JSSet describes EcmaScript Harmony sets +class JSSet : public JSCollection { + public: + DECLARE_CAST(JSSet) // Dispatched behavior. DECLARE_PRINTER(JSSet) DECLARE_VERIFIER(JSSet) - static const int kTableOffset = JSObject::kHeaderSize; - static const int kSize = kTableOffset + kPointerSize; - private: DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet); }; // The JSMap describes EcmaScript Harmony maps -class JSMap: public JSObject { +class JSMap : public JSCollection { public: - // [table]: the backing hash table mapping keys to values. - DECL_ACCESSORS(table, Object) - - // Casting. - static inline JSMap* cast(Object* obj); + DECLARE_CAST(JSMap) // Dispatched behavior. DECLARE_PRINTER(JSMap) DECLARE_VERIFIER(JSMap) + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap); +}; + + +// OrderedHashTableIterator is an iterator that iterates over the keys and +// values of an OrderedHashTable. +// +// The iterator has a reference to the underlying OrderedHashTable data, +// [table], as well as the current [index] the iterator is at. +// +// When the OrderedHashTable is rehashed it adds a reference from the old table +// to the new table as well as storing enough data about the changes so that the +// iterator [index] can be adjusted accordingly. +// +// When the [Next] result from the iterator is requested, the iterator checks if +// there is a newer table that it needs to transition to. +template<class Derived, class TableType> +class OrderedHashTableIterator: public JSObject { + public: + // [table]: the backing hash table mapping keys to values. + DECL_ACCESSORS(table, Object) + + // [index]: The index into the data table. + DECL_ACCESSORS(index, Smi) + + // [kind]: The kind of iteration this is. One of the [Kind] enum values. + DECL_ACCESSORS(kind, Smi) + +#ifdef OBJECT_PRINT + void OrderedHashTableIteratorPrint(OStream& os); // NOLINT +#endif + static const int kTableOffset = JSObject::kHeaderSize; - static const int kSize = kTableOffset + kPointerSize; + static const int kIndexOffset = kTableOffset + kPointerSize; + static const int kKindOffset = kIndexOffset + kPointerSize; + static const int kSize = kKindOffset + kPointerSize; + + enum Kind { + kKindKeys = 1, + kKindValues = 2, + kKindEntries = 3 + }; + + // Whether the iterator has more elements. This needs to be called before + // calling |CurrentKey| and/or |CurrentValue|. + bool HasMore(); + + // Move the index forward one. + void MoveNext() { + set_index(Smi::FromInt(Smi::cast(index())->value() + 1)); + } + + // Populates the array with the next key and value and then moves the iterator + // forward. + // This returns the |kind| or 0 if the iterator is already at the end. + Smi* Next(JSArray* value_array); + + // Returns the current key of the iterator. This should only be called when + // |HasMore| returns true. + inline Object* CurrentKey(); private: - DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap); + // Transitions the iterator to the non obsolete backing store. This is a NOP + // if the [table] is not obsolete. + void Transition(); + + DISALLOW_IMPLICIT_CONSTRUCTORS(OrderedHashTableIterator); +}; + + +class JSSetIterator: public OrderedHashTableIterator<JSSetIterator, + OrderedHashSet> { + public: + // Dispatched behavior. + DECLARE_PRINTER(JSSetIterator) + DECLARE_VERIFIER(JSSetIterator) + + DECLARE_CAST(JSSetIterator) + + // Called by |Next| to populate the array. This allows the subclasses to + // populate the array differently. + inline void PopulateValueArray(FixedArray* array); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(JSSetIterator); +}; + + +class JSMapIterator: public OrderedHashTableIterator<JSMapIterator, + OrderedHashMap> { + public: + // Dispatched behavior. + DECLARE_PRINTER(JSMapIterator) + DECLARE_VERIFIER(JSMapIterator) + + DECLARE_CAST(JSMapIterator) + + // Called by |Next| to populate the array. This allows the subclasses to + // populate the array differently. + inline void PopulateValueArray(FixedArray* array); + + private: + // Returns the current value of the iterator. This should only be called when + // |HasMore| returns true. + inline Object* CurrentValue(); + + DISALLOW_IMPLICIT_CONSTRUCTORS(JSMapIterator); }; @@ -9814,8 +10283,7 @@ // The JSWeakMap describes EcmaScript Harmony weak maps class JSWeakMap: public JSWeakCollection { public: - // Casting. - static inline JSWeakMap* cast(Object* obj); + DECLARE_CAST(JSWeakMap) // Dispatched behavior. DECLARE_PRINTER(JSWeakMap) @@ -9829,8 +10297,7 @@ // The JSWeakSet describes EcmaScript Harmony weak sets class JSWeakSet: public JSWeakCollection { public: - // Casting. - static inline JSWeakSet* cast(Object* obj); + DECLARE_CAST(JSWeakSet) // Dispatched behavior. DECLARE_PRINTER(JSWeakSet) @@ -9864,8 +10331,7 @@ // [weak_first_array]: weak linked list of views. DECL_ACCESSORS(weak_first_view, Object) - // Casting. - static inline JSArrayBuffer* cast(Object* obj); + DECLARE_CAST(JSArrayBuffer) // Neutering. Only neuters the buffer, not associated typed arrays. void Neuter(); @@ -9907,8 +10373,7 @@ // [weak_next]: linked list of typed arrays over the same array buffer. DECL_ACCESSORS(weak_next, Object) - // Casting. - static inline JSArrayBufferView* cast(Object* obj); + DECLARE_CAST(JSArrayBufferView) DECLARE_VERIFIER(JSArrayBufferView) @@ -9934,8 +10399,7 @@ // Neutering. Only neuters this typed array. void Neuter(); - // Casting. - static inline JSTypedArray* cast(Object* obj); + DECLARE_CAST(JSTypedArray) ExternalArrayType type(); size_t element_size(); @@ -9965,8 +10429,7 @@ // Only neuters this DataView void Neuter(); - // Casting. - static inline JSDataView* cast(Object* obj); + DECLARE_CAST(JSDataView) // Dispatched behavior. DECLARE_PRINTER(JSDataView) @@ -9991,8 +10454,7 @@ inline Address foreign_address(); inline void set_foreign_address(Address value); - // Casting. - static inline Foreign* cast(Object* obj); + DECLARE_CAST(Foreign) // Dispatched behavior. inline void ForeignIterateBody(ObjectVisitor* v); @@ -10009,7 +10471,7 @@ static const int kForeignAddressOffset = HeapObject::kHeaderSize; static const int kSize = kForeignAddressOffset + kPointerSize; - STATIC_CHECK(kForeignAddressOffset == Internals::kForeignAddressOffset); + STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset); private: DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign); @@ -10034,8 +10496,9 @@ uint32_t index, Handle<Object> value); - MUST_USE_RESULT MaybeObject* JSArrayUpdateLengthFromIndex(uint32_t index, - Object* value); + static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map); + static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index); + static MaybeHandle<Object> ReadOnlyLengthError(Handle<JSArray> array); // Initialize the array with the given capacity. The function may // fail due to out-of-memory situations, but only if the requested @@ -10045,15 +10508,15 @@ // Initializes the array to a certain length. inline bool AllowsSetElementsLength(); // Can cause GC. - static Handle<Object> SetElementsLength(Handle<JSArray> array, - Handle<Object> length); + MUST_USE_RESULT static MaybeHandle<Object> SetElementsLength( + Handle<JSArray> array, + Handle<Object> length); // Set the content of the array to the content of storage. static inline void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> storage); - // Casting. - static inline JSArray* cast(Object* obj); + DECLARE_CAST(JSArray) // Ensures that the fixed array backing the JSArray has at // least the stated size. @@ -10117,16 +10580,16 @@ inline bool all_can_write(); inline void set_all_can_write(bool value); - inline bool prohibits_overwriting(); - inline void set_prohibits_overwriting(bool value); - inline PropertyAttributes property_attributes(); inline void set_property_attributes(PropertyAttributes attributes); // Checks whether the given receiver is compatible with this accessor. + static bool IsCompatibleReceiverType(Isolate* isolate, + Handle<AccessorInfo> info, + Handle<HeapType> type); inline bool IsCompatibleReceiver(Object* receiver); - static inline AccessorInfo* cast(Object* obj); + DECLARE_CAST(AccessorInfo) // Dispatched behavior. DECLARE_VERIFIER(AccessorInfo) @@ -10143,11 +10606,13 @@ static const int kSize = kExpectedReceiverTypeOffset + kPointerSize; private: + inline bool HasExpectedReceiverType() { + return expected_receiver_type()->IsFunctionTemplateInfo(); + } // Bit positions in flag. static const int kAllCanReadBit = 0; static const int kAllCanWriteBit = 1; - static const int kProhibitsOverwritingBit = 2; - class AttributesField: public BitField<PropertyAttributes, 3, 3> {}; + class AttributesField: public BitField<PropertyAttributes, 2, 3> {}; DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo); }; @@ -10225,7 +10690,7 @@ public: DECL_ACCESSORS(serialized_data, ByteArray) - static inline DeclaredAccessorDescriptor* cast(Object* obj); + DECLARE_CAST(DeclaredAccessorDescriptor) static Handle<DeclaredAccessorDescriptor> Create( Isolate* isolate, @@ -10248,7 +10713,7 @@ public: DECL_ACCESSORS(descriptor, DeclaredAccessorDescriptor) - static inline DeclaredAccessorInfo* cast(Object* obj); + DECLARE_CAST(DeclaredAccessorInfo) // Dispatched behavior. DECLARE_PRINTER(DeclaredAccessorInfo) @@ -10269,7 +10734,7 @@ // the request is ignored. // // If the accessor in the prototype has the READ_ONLY property attribute, then -// a new value is added to the local object when the property is set. +// a new value is added to the derived object when the property is set. // This shadows the accessor in the prototype. class ExecutableAccessorInfo: public AccessorInfo { public: @@ -10277,7 +10742,7 @@ DECL_ACCESSORS(setter, Object) DECL_ACCESSORS(data, Object) - static inline ExecutableAccessorInfo* cast(Object* obj); + DECLARE_CAST(ExecutableAccessorInfo) // Dispatched behavior. DECLARE_PRINTER(ExecutableAccessorInfo) @@ -10288,6 +10753,8 @@ static const int kDataOffset = kSetterOffset + kPointerSize; static const int kSize = kDataOffset + kPointerSize; + inline void clear_setter(); + private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutableAccessorInfo); }; @@ -10299,20 +10766,12 @@ // * undefined: considered an accessor by the spec, too, strangely enough // * the hole: an accessor which has not been set // * a pointer to a map: a transition used to ensure map sharing -// access_flags provides the ability to override access checks on access check -// failure. class AccessorPair: public Struct { public: DECL_ACCESSORS(getter, Object) DECL_ACCESSORS(setter, Object) - DECL_ACCESSORS(access_flags, Smi) - inline void set_access_flags(v8::AccessControl access_control); - inline bool all_can_read(); - inline bool all_can_write(); - inline bool prohibits_overwriting(); - - static inline AccessorPair* cast(Object* obj); + DECLARE_CAST(AccessorPair) static Handle<AccessorPair> Copy(Handle<AccessorPair> pair); @@ -10347,14 +10806,9 @@ static const int kGetterOffset = HeapObject::kHeaderSize; static const int kSetterOffset = kGetterOffset + kPointerSize; - static const int kAccessFlagsOffset = kSetterOffset + kPointerSize; - static const int kSize = kAccessFlagsOffset + kPointerSize; + static const int kSize = kSetterOffset + kPointerSize; private: - static const int kAllCanReadBit = 0; - static const int kAllCanWriteBit = 1; - static const int kProhibitsOverwritingBit = 2; - // Strangely enough, in addition to functions and harmony proxies, the spec // requires us to consider undefined as a kind of accessor, too: // var obj = {}; @@ -10374,7 +10828,7 @@ DECL_ACCESSORS(indexed_callback, Object) DECL_ACCESSORS(data, Object) - static inline AccessCheckInfo* cast(Object* obj); + DECLARE_CAST(AccessCheckInfo) // Dispatched behavior. DECLARE_PRINTER(AccessCheckInfo) @@ -10399,7 +10853,7 @@ DECL_ACCESSORS(enumerator, Object) DECL_ACCESSORS(data, Object) - static inline InterceptorInfo* cast(Object* obj); + DECLARE_CAST(InterceptorInfo) // Dispatched behavior. DECLARE_PRINTER(InterceptorInfo) @@ -10423,7 +10877,7 @@ DECL_ACCESSORS(callback, Object) DECL_ACCESSORS(data, Object) - static inline CallHandlerInfo* cast(Object* obj); + DECLARE_CAST(CallHandlerInfo) // Dispatched behavior. DECLARE_PRINTER(CallHandlerInfo) @@ -10472,7 +10926,7 @@ DECL_ACCESSORS(access_check_info, Object) DECL_ACCESSORS(flag, Smi) - inline int length(); + inline int length() const; inline void set_length(int value); // Following properties use flag bits. @@ -10485,7 +10939,7 @@ DECL_BOOLEAN_ACCESSORS(remove_prototype) DECL_BOOLEAN_ACCESSORS(do_not_cache) - static inline FunctionTemplateInfo* cast(Object* obj); + DECLARE_CAST(FunctionTemplateInfo) // Dispatched behavior. DECLARE_PRINTER(FunctionTemplateInfo) @@ -10534,7 +10988,7 @@ DECL_ACCESSORS(constructor, Object) DECL_ACCESSORS(internal_field_count, Object) - static inline ObjectTemplateInfo* cast(Object* obj); + DECLARE_CAST(ObjectTemplateInfo) // Dispatched behavior. DECLARE_PRINTER(ObjectTemplateInfo) @@ -10552,7 +11006,7 @@ DECL_ACCESSORS(receiver, Object) DECL_ACCESSORS(args, Object) - static inline SignatureInfo* cast(Object* obj); + DECLARE_CAST(SignatureInfo) // Dispatched behavior. DECLARE_PRINTER(SignatureInfo) @@ -10571,7 +11025,7 @@ public: DECL_ACCESSORS(types, Object) - static inline TypeSwitchInfo* cast(Object* obj); + DECLARE_CAST(TypeSwitchInfo) // Dispatched behavior. DECLARE_PRINTER(TypeSwitchInfo) @@ -10582,7 +11036,6 @@ }; -#ifdef ENABLE_DEBUGGER_SUPPORT // The DebugInfo class holds additional information for a function being // debugged. class DebugInfo: public Struct { @@ -10617,7 +11070,7 @@ // Get the number of break points for this function. int GetBreakPointCount(); - static inline DebugInfo* cast(Object* obj); + DECLARE_CAST(DebugInfo) // Dispatched behavior. DECLARE_PRINTER(DebugInfo) @@ -10632,6 +11085,8 @@ kActiveBreakPointsCountIndex + kPointerSize; static const int kSize = kBreakPointsStateIndex + kPointerSize; + static const int kEstimatedNofBreakPointsInFunction = 16; + private: static const int kNoBreakPointInfo = -1; @@ -10669,7 +11124,7 @@ // Get the number of break points for this code position. int GetBreakPointCount(); - static inline BreakPointInfo* cast(Object* obj); + DECLARE_CAST(BreakPointInfo) // Dispatched behavior. DECLARE_PRINTER(BreakPointInfo) @@ -10686,11 +11141,11 @@ private: DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo); }; -#endif // ENABLE_DEBUGGER_SUPPORT #undef DECL_BOOLEAN_ACCESSORS #undef DECL_ACCESSORS +#undef DECLARE_CAST #undef DECLARE_VERIFIER #define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \ diff -Nru nodejs-0.11.13/deps/v8/src/objects-inl.h nodejs-0.11.15/deps/v8/src/objects-inl.h --- nodejs-0.11.13/deps/v8/src/objects-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/objects-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // // Review notes: // @@ -35,21 +12,25 @@ #ifndef V8_OBJECTS_INL_H_ #define V8_OBJECTS_INL_H_ -#include "elements.h" -#include "objects.h" -#include "contexts.h" -#include "conversions-inl.h" -#include "heap.h" -#include "isolate.h" -#include "heap-inl.h" -#include "property.h" -#include "spaces.h" -#include "store-buffer.h" -#include "v8memory.h" -#include "factory.h" -#include "incremental-marking.h" -#include "transitions-inl.h" -#include "objects-visiting.h" +#include "src/base/atomicops.h" +#include "src/contexts.h" +#include "src/conversions-inl.h" +#include "src/elements.h" +#include "src/factory.h" +#include "src/field-index-inl.h" +#include "src/heap/heap-inl.h" +#include "src/heap/heap.h" +#include "src/heap/incremental-marking.h" +#include "src/heap/objects-visiting.h" +#include "src/heap/spaces.h" +#include "src/heap/store-buffer.h" +#include "src/isolate.h" +#include "src/lookup.h" +#include "src/objects.h" +#include "src/property.h" +#include "src/prototype.h" +#include "src/transitions-inl.h" +#include "src/v8memory.h" namespace v8 { namespace internal { @@ -74,50 +55,47 @@ #define TYPE_CHECKER(type, instancetype) \ - bool Object::Is##type() { \ + bool Object::Is##type() const { \ return Object::IsHeapObject() && \ HeapObject::cast(this)->map()->instance_type() == instancetype; \ } -#define CAST_ACCESSOR(type) \ - type* type::cast(Object* object) { \ - SLOW_ASSERT(object->Is##type()); \ - return reinterpret_cast<type*>(object); \ +#define CAST_ACCESSOR(type) \ + type* type::cast(Object* object) { \ + SLOW_DCHECK(object->Is##type()); \ + return reinterpret_cast<type*>(object); \ + } \ + const type* type::cast(const Object* object) { \ + SLOW_DCHECK(object->Is##type()); \ + return reinterpret_cast<const type*>(object); \ } -#define FIXED_TYPED_ARRAY_CAST_ACCESSOR(type) \ - template<> \ - type* type::cast(Object* object) { \ - SLOW_ASSERT(object->Is##type()); \ - return reinterpret_cast<type*>(object); \ - } - -#define INT_ACCESSORS(holder, name, offset) \ - int holder::name() { return READ_INT_FIELD(this, offset); } \ +#define INT_ACCESSORS(holder, name, offset) \ + int holder::name() const { return READ_INT_FIELD(this, offset); } \ void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); } -#define ACCESSORS(holder, name, type, offset) \ - type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \ - void holder::set_##name(type* value, WriteBarrierMode mode) { \ - WRITE_FIELD(this, offset, value); \ - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \ +#define ACCESSORS(holder, name, type, offset) \ + type* holder::name() const { return type::cast(READ_FIELD(this, offset)); } \ + void holder::set_##name(type* value, WriteBarrierMode mode) { \ + WRITE_FIELD(this, offset, value); \ + CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \ } // Getter that returns a tagged Smi and setter that writes a tagged Smi. -#define ACCESSORS_TO_SMI(holder, name, offset) \ - Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); } \ - void holder::set_##name(Smi* value, WriteBarrierMode mode) { \ - WRITE_FIELD(this, offset, value); \ +#define ACCESSORS_TO_SMI(holder, name, offset) \ + Smi* holder::name() const { return Smi::cast(READ_FIELD(this, offset)); } \ + void holder::set_##name(Smi* value, WriteBarrierMode mode) { \ + WRITE_FIELD(this, offset, value); \ } // Getter that returns a Smi as an int and writes an int as a Smi. #define SMI_ACCESSORS(holder, name, offset) \ - int holder::name() { \ + int holder::name() const { \ Object* value = READ_FIELD(this, offset); \ return Smi::cast(value)->value(); \ } \ @@ -125,15 +103,32 @@ WRITE_FIELD(this, offset, Smi::FromInt(value)); \ } +#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ + int holder::synchronized_##name() const { \ + Object* value = ACQUIRE_READ_FIELD(this, offset); \ + return Smi::cast(value)->value(); \ + } \ + void holder::synchronized_set_##name(int value) { \ + RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ + } + +#define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \ + int holder::nobarrier_##name() const { \ + Object* value = NOBARRIER_READ_FIELD(this, offset); \ + return Smi::cast(value)->value(); \ + } \ + void holder::nobarrier_set_##name(int value) { \ + NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ + } #define BOOL_GETTER(holder, field, name, offset) \ - bool holder::name() { \ + bool holder::name() const { \ return BooleanBit::get(field(), offset); \ } \ #define BOOL_ACCESSORS(holder, field, name, offset) \ - bool holder::name() { \ + bool holder::name() const { \ return BooleanBit::get(field(), offset); \ } \ void holder::set_##name(bool value) { \ @@ -141,75 +136,75 @@ } -bool Object::IsFixedArrayBase() { +bool Object::IsFixedArrayBase() const { return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray() || IsFixedTypedArrayBase() || IsExternalArray(); } // External objects are not extensible, so the map check is enough. -bool Object::IsExternal() { +bool Object::IsExternal() const { return Object::IsHeapObject() && HeapObject::cast(this)->map() == HeapObject::cast(this)->GetHeap()->external_map(); } -bool Object::IsAccessorInfo() { +bool Object::IsAccessorInfo() const { return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo(); } -bool Object::IsSmi() { +bool Object::IsSmi() const { return HAS_SMI_TAG(this); } -bool Object::IsHeapObject() { +bool Object::IsHeapObject() const { return Internals::HasHeapObjectTag(this); } -bool Object::NonFailureIsHeapObject() { - ASSERT(!this->IsFailure()); - return (reinterpret_cast<intptr_t>(this) & kSmiTagMask) != 0; -} - - TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE) +TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE) TYPE_CHECKER(Symbol, SYMBOL_TYPE) -bool Object::IsString() { +bool Object::IsString() const { return Object::IsHeapObject() && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE; } -bool Object::IsName() { +bool Object::IsName() const { return IsString() || IsSymbol(); } -bool Object::IsUniqueName() { +bool Object::IsUniqueName() const { return IsInternalizedString() || IsSymbol(); } -bool Object::IsSpecObject() { +bool Object::IsSpecObject() const { return Object::IsHeapObject() && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE; } -bool Object::IsSpecFunction() { +bool Object::IsSpecFunction() const { if (!Object::IsHeapObject()) return false; InstanceType type = HeapObject::cast(this)->map()->instance_type(); return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE; } -bool Object::IsInternalizedString() { +bool Object::IsTemplateInfo() const { + return IsObjectTemplateInfo() || IsFunctionTemplateInfo(); +} + + +bool Object::IsInternalizedString() const { if (!this->IsHeapObject()) return false; uint32_t type = HeapObject::cast(this)->map()->instance_type(); STATIC_ASSERT(kNotInternalizedTag != 0); @@ -218,57 +213,58 @@ } -bool Object::IsConsString() { +bool Object::IsConsString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsCons(); } -bool Object::IsSlicedString() { +bool Object::IsSlicedString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsSliced(); } -bool Object::IsSeqString() { +bool Object::IsSeqString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential(); } -bool Object::IsSeqOneByteString() { +bool Object::IsSeqOneByteString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential() && String::cast(this)->IsOneByteRepresentation(); } -bool Object::IsSeqTwoByteString() { +bool Object::IsSeqTwoByteString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential() && String::cast(this)->IsTwoByteRepresentation(); } -bool Object::IsExternalString() { +bool Object::IsExternalString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsExternal(); } -bool Object::IsExternalAsciiString() { +bool Object::IsExternalAsciiString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsExternal() && String::cast(this)->IsOneByteRepresentation(); } -bool Object::IsExternalTwoByteString() { +bool Object::IsExternalTwoByteString() const { if (!IsString()) return false; return StringShape(String::cast(this)).IsExternal() && String::cast(this)->IsTwoByteRepresentation(); } + bool Object::HasValidElements() { // Dictionary is covered under FixedArray. return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray() || @@ -276,55 +272,73 @@ } -MaybeObject* Object::AllocateNewStorageFor(Heap* heap, - Representation representation) { - if (representation.IsSmi() && IsUninitialized()) { - return Smi::FromInt(0); +Handle<Object> Object::NewStorageFor(Isolate* isolate, + Handle<Object> object, + Representation representation) { + if (representation.IsSmi() && object->IsUninitialized()) { + return handle(Smi::FromInt(0), isolate); + } + if (!representation.IsDouble()) return object; + double value; + if (object->IsUninitialized()) { + value = 0; + } else if (object->IsMutableHeapNumber()) { + value = HeapNumber::cast(*object)->value(); + } else { + value = object->Number(); } - if (!representation.IsDouble()) return this; - if (IsUninitialized()) { - return heap->AllocateHeapNumber(0); + return isolate->factory()->NewHeapNumber(value, MUTABLE); +} + + +Handle<Object> Object::WrapForRead(Isolate* isolate, + Handle<Object> object, + Representation representation) { + DCHECK(!object->IsUninitialized()); + if (!representation.IsDouble()) { + DCHECK(object->FitsRepresentation(representation)); + return object; } - return heap->AllocateHeapNumber(Number()); + return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value()); } -StringShape::StringShape(String* str) +StringShape::StringShape(const String* str) : type_(str->map()->instance_type()) { set_valid(); - ASSERT((type_ & kIsNotStringMask) == kStringTag); + DCHECK((type_ & kIsNotStringMask) == kStringTag); } StringShape::StringShape(Map* map) : type_(map->instance_type()) { set_valid(); - ASSERT((type_ & kIsNotStringMask) == kStringTag); + DCHECK((type_ & kIsNotStringMask) == kStringTag); } StringShape::StringShape(InstanceType t) : type_(static_cast<uint32_t>(t)) { set_valid(); - ASSERT((type_ & kIsNotStringMask) == kStringTag); + DCHECK((type_ & kIsNotStringMask) == kStringTag); } bool StringShape::IsInternalized() { - ASSERT(valid()); + DCHECK(valid()); STATIC_ASSERT(kNotInternalizedTag != 0); return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) == (kStringTag | kInternalizedTag); } -bool String::IsOneByteRepresentation() { +bool String::IsOneByteRepresentation() const { uint32_t type = map()->instance_type(); return (type & kStringEncodingMask) == kOneByteStringTag; } -bool String::IsTwoByteRepresentation() { +bool String::IsTwoByteRepresentation() const { uint32_t type = map()->instance_type(); return (type & kStringEncodingMask) == kTwoByteStringTag; } @@ -334,7 +348,7 @@ uint32_t type = map()->instance_type(); STATIC_ASSERT(kIsIndirectStringTag != 0); STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0); - ASSERT(IsFlat()); + DCHECK(IsFlat()); switch (type & (kIsIndirectStringMask | kStringEncodingMask)) { case kOneByteStringTag: return true; @@ -350,7 +364,7 @@ uint32_t type = map()->instance_type(); STATIC_ASSERT(kIsIndirectStringTag != 0); STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0); - ASSERT(IsFlat()); + DCHECK(IsFlat()); switch (type & (kIsIndirectStringMask | kStringEncodingMask)) { case kOneByteStringTag: return false; @@ -410,10 +424,10 @@ } -STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) == +STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) == Internals::kFullStringRepresentationMask); -STATIC_CHECK(static_cast<uint32_t>(kStringEncodingMask) == +STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) == Internals::kStringEncodingMask); @@ -432,10 +446,10 @@ } -STATIC_CHECK((kExternalStringTag | kOneByteStringTag) == +STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) == Internals::kExternalAsciiRepresentationTag); -STATIC_CHECK(v8::String::ASCII_ENCODING == kOneByteStringTag); +STATIC_ASSERT(v8::String::ASCII_ENCODING == kOneByteStringTag); bool StringShape::IsExternalTwoByte() { @@ -443,13 +457,13 @@ } -STATIC_CHECK((kExternalStringTag | kTwoByteStringTag) == +STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) == Internals::kExternalTwoByteRepresentationTag); -STATIC_CHECK(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag); +STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag); uc32 FlatStringReader::Get(int index) { - ASSERT(0 <= index && index <= length_); + DCHECK(0 <= index && index <= length_); if (is_ascii_) { return static_cast<const byte*>(start_)[index]; } else { @@ -458,24 +472,45 @@ } +Handle<Object> StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) { + return key->AsHandle(isolate); +} + + +Handle<Object> MapCacheShape::AsHandle(Isolate* isolate, HashTableKey* key) { + return key->AsHandle(isolate); +} + + +Handle<Object> CompilationCacheShape::AsHandle(Isolate* isolate, + HashTableKey* key) { + return key->AsHandle(isolate); +} + + +Handle<Object> CodeCacheHashTableShape::AsHandle(Isolate* isolate, + HashTableKey* key) { + return key->AsHandle(isolate); +} + template <typename Char> class SequentialStringKey : public HashTableKey { public: explicit SequentialStringKey(Vector<const Char> string, uint32_t seed) : string_(string), hash_field_(0), seed_(seed) { } - virtual uint32_t Hash() { + virtual uint32_t Hash() V8_OVERRIDE { hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(), string_.length(), seed_); uint32_t result = hash_field_ >> String::kHashShift; - ASSERT(result != 0); // Ensure that the hash value of 0 is never computed. + DCHECK(result != 0); // Ensure that the hash value of 0 is never computed. return result; } - virtual uint32_t HashForObject(Object* other) { + virtual uint32_t HashForObject(Object* other) V8_OVERRIDE { return String::cast(other)->Hash(); } @@ -490,11 +525,11 @@ OneByteStringKey(Vector<const uint8_t> str, uint32_t seed) : SequentialStringKey<uint8_t>(str, seed) { } - virtual bool IsMatch(Object* string) { + virtual bool IsMatch(Object* string) V8_OVERRIDE { return String::cast(string)->IsOneByteEqualTo(string_); } - virtual MaybeObject* AsObject(Heap* heap); + virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE; }; @@ -506,26 +541,26 @@ if (string_->IsSlicedString()) { string_ = Handle<String>(Unslice(*string_, &from_)); } - ASSERT(string_->IsSeqString() || string->IsExternalString()); + DCHECK(string_->IsSeqString() || string->IsExternalString()); } - virtual uint32_t Hash() { - ASSERT(length_ >= 0); - ASSERT(from_ + length_ <= string_->length()); + virtual uint32_t Hash() V8_OVERRIDE { + DCHECK(length_ >= 0); + DCHECK(from_ + length_ <= string_->length()); const Char* chars = GetChars() + from_; hash_field_ = StringHasher::HashSequentialString( chars, length_, string_->GetHeap()->HashSeed()); uint32_t result = hash_field_ >> String::kHashShift; - ASSERT(result != 0); // Ensure that the hash value of 0 is never computed. + DCHECK(result != 0); // Ensure that the hash value of 0 is never computed. return result; } - virtual uint32_t HashForObject(Object* other) { + virtual uint32_t HashForObject(Object* other) V8_OVERRIDE { return String::cast(other)->Hash(); } - virtual bool IsMatch(Object* string); - virtual MaybeObject* AsObject(Heap* heap); + virtual bool IsMatch(Object* string) V8_OVERRIDE; + virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE; private: const Char* GetChars(); @@ -550,11 +585,11 @@ explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed) : SequentialStringKey<uc16>(str, seed) { } - virtual bool IsMatch(Object* string) { + virtual bool IsMatch(Object* string) V8_OVERRIDE { return String::cast(string)->IsTwoByteEqualTo(string_); } - virtual MaybeObject* AsObject(Heap* heap); + virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE; }; @@ -564,27 +599,26 @@ explicit Utf8StringKey(Vector<const char> string, uint32_t seed) : string_(string), hash_field_(0), seed_(seed) { } - virtual bool IsMatch(Object* string) { + virtual bool IsMatch(Object* string) V8_OVERRIDE { return String::cast(string)->IsUtf8EqualTo(string_); } - virtual uint32_t Hash() { + virtual uint32_t Hash() V8_OVERRIDE { if (hash_field_ != 0) return hash_field_ >> String::kHashShift; hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_); uint32_t result = hash_field_ >> String::kHashShift; - ASSERT(result != 0); // Ensure that the hash value of 0 is never computed. + DCHECK(result != 0); // Ensure that the hash value of 0 is never computed. return result; } - virtual uint32_t HashForObject(Object* other) { + virtual uint32_t HashForObject(Object* other) V8_OVERRIDE { return String::cast(other)->Hash(); } - virtual MaybeObject* AsObject(Heap* heap) { + virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { if (hash_field_ == 0) Hash(); - return heap->AllocateInternalizedStringFromUtf8(string_, - chars_, - hash_field_); + return isolate->factory()->NewInternalizedStringFromUtf8( + string_, chars_, hash_field_); } Vector<const char> string_; @@ -594,7 +628,7 @@ }; -bool Object::IsNumber() { +bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); } @@ -603,14 +637,14 @@ TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE) -bool Object::IsFiller() { +bool Object::IsFiller() const { if (!Object::IsHeapObject()) return false; InstanceType instance_type = HeapObject::cast(this)->map()->instance_type(); return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE; } -bool Object::IsExternalArray() { +bool Object::IsExternalArray() const { if (!Object::IsHeapObject()) return false; InstanceType instance_type = @@ -628,7 +662,7 @@ #undef TYPED_ARRAY_TYPE_CHECKER -bool Object::IsFixedTypedArrayBase() { +bool Object::IsFixedTypedArrayBase() const { if (!Object::IsHeapObject()) return false; InstanceType instance_type = @@ -638,62 +672,31 @@ } -bool MaybeObject::IsFailure() { - return HAS_FAILURE_TAG(this); -} - - -bool MaybeObject::IsRetryAfterGC() { - return HAS_FAILURE_TAG(this) - && Failure::cast(this)->type() == Failure::RETRY_AFTER_GC; -} - - -bool MaybeObject::IsException() { - return this == Failure::Exception(); -} - - -bool MaybeObject::IsTheHole() { - return !IsFailure() && ToObjectUnchecked()->IsTheHole(); -} - - -bool MaybeObject::IsUninitialized() { - return !IsFailure() && ToObjectUnchecked()->IsUninitialized(); -} - - -Failure* Failure::cast(MaybeObject* obj) { - ASSERT(HAS_FAILURE_TAG(obj)); - return reinterpret_cast<Failure*>(obj); -} - - -bool Object::IsJSReceiver() { +bool Object::IsJSReceiver() const { STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); return IsHeapObject() && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE; } -bool Object::IsJSObject() { +bool Object::IsJSObject() const { STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); return IsHeapObject() && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE; } -bool Object::IsJSProxy() { +bool Object::IsJSProxy() const { if (!Object::IsHeapObject()) return false; - InstanceType type = HeapObject::cast(this)->map()->instance_type(); - return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE; + return HeapObject::cast(this)->map()->IsJSProxyMap(); } TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE) TYPE_CHECKER(JSSet, JS_SET_TYPE) TYPE_CHECKER(JSMap, JS_MAP_TYPE) +TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE) +TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE) TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE) TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE) TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE) @@ -703,22 +706,22 @@ TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE) -bool Object::IsJSWeakCollection() { +bool Object::IsJSWeakCollection() const { return IsJSWeakMap() || IsJSWeakSet(); } -bool Object::IsDescriptorArray() { +bool Object::IsDescriptorArray() const { return IsFixedArray(); } -bool Object::IsTransitionArray() { +bool Object::IsTransitionArray() const { return IsFixedArray(); } -bool Object::IsDeoptimizationInputData() { +bool Object::IsDeoptimizationInputData() const { // Must be a fixed array. if (!IsFixedArray()) return false; @@ -728,14 +731,23 @@ // the entry size. int length = FixedArray::cast(this)->length(); if (length == 0) return true; + if (length < DeoptimizationInputData::kFirstDeoptEntryIndex) return false; + + FixedArray* self = FixedArray::cast(const_cast<Object*>(this)); + int deopt_count = + Smi::cast(self->get(DeoptimizationInputData::kDeoptEntryCountIndex)) + ->value(); + int patch_count = + Smi::cast( + self->get( + DeoptimizationInputData::kReturnAddressPatchEntryCountIndex)) + ->value(); - length -= DeoptimizationInputData::kFirstDeoptEntryIndex; - return length >= 0 && - length % DeoptimizationInputData::kDeoptEntrySize == 0; + return length == DeoptimizationInputData::LengthFor(deopt_count, patch_count); } -bool Object::IsDeoptimizationOutputData() { +bool Object::IsDeoptimizationOutputData() const { if (!IsFixedArray()) return false; // There's actually no way to see the difference between a fixed array and // a deoptimization data array. Since this is used for asserts we can check @@ -745,7 +757,7 @@ } -bool Object::IsDependentCode() { +bool Object::IsDependentCode() const { if (!IsFixedArray()) return false; // There's actually no way to see the difference between a fixed array and // a dependent codes array. @@ -753,7 +765,7 @@ } -bool Object::IsContext() { +bool Object::IsContext() const { if (!Object::IsHeapObject()) return false; Map* map = HeapObject::cast(this)->map(); Heap* heap = map->GetHeap(); @@ -767,14 +779,14 @@ } -bool Object::IsNativeContext() { +bool Object::IsNativeContext() const { return Object::IsHeapObject() && HeapObject::cast(this)->map() == HeapObject::cast(this)->GetHeap()->native_context_map(); } -bool Object::IsScopeInfo() { +bool Object::IsScopeInfo() const { return Object::IsHeapObject() && HeapObject::cast(this)->map() == HeapObject::cast(this)->GetHeap()->scope_info_map(); @@ -801,7 +813,7 @@ TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE) -bool Object::IsStringWrapper() { +bool Object::IsStringWrapper() const { return IsJSValue() && JSValue::cast(this)->value()->IsString(); } @@ -809,7 +821,7 @@ TYPE_CHECKER(Foreign, FOREIGN_TYPE) -bool Object::IsBoolean() { +bool Object::IsBoolean() const { return IsOddball() && ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0); } @@ -821,7 +833,7 @@ TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE) -bool Object::IsJSArrayBufferView() { +bool Object::IsJSArrayBufferView() const { return IsJSDataView() || IsJSTypedArray(); } @@ -834,28 +846,47 @@ } -bool Object::IsHashTable() { +bool Object::IsHashTable() const { return Object::IsHeapObject() && HeapObject::cast(this)->map() == HeapObject::cast(this)->GetHeap()->hash_table_map(); } -bool Object::IsDictionary() { +bool Object::IsWeakHashTable() const { + return IsHashTable(); +} + + +bool Object::IsDictionary() const { return IsHashTable() && this != HeapObject::cast(this)->GetHeap()->string_table(); } -bool Object::IsStringTable() { - return IsHashTable() && - this == HeapObject::cast(this)->GetHeap()->raw_unchecked_string_table(); +bool Object::IsNameDictionary() const { + return IsDictionary(); +} + + +bool Object::IsSeededNumberDictionary() const { + return IsDictionary(); } -bool Object::IsJSFunctionResultCache() { +bool Object::IsUnseededNumberDictionary() const { + return IsDictionary(); +} + + +bool Object::IsStringTable() const { + return IsHashTable(); +} + + +bool Object::IsJSFunctionResultCache() const { if (!IsFixedArray()) return false; - FixedArray* self = FixedArray::cast(this); + const FixedArray* self = FixedArray::cast(this); int length = self->length(); if (length < JSFunctionResultCache::kEntriesIndex) return false; if ((length - JSFunctionResultCache::kEntriesIndex) @@ -864,7 +895,10 @@ } #ifdef VERIFY_HEAP if (FLAG_verify_heap) { - reinterpret_cast<JSFunctionResultCache*>(this)-> + // TODO(svenpanne) We use const_cast here and below to break our dependency + // cycle between the predicates and the verifiers. This can be removed when + // the verifiers are const-correct, too. + reinterpret_cast<JSFunctionResultCache*>(const_cast<Object*>(this))-> JSFunctionResultCacheVerify(); } #endif @@ -872,61 +906,89 @@ } -bool Object::IsNormalizedMapCache() { - if (!IsFixedArray()) return false; - if (FixedArray::cast(this)->length() != NormalizedMapCache::kEntries) { +bool Object::IsNormalizedMapCache() const { + return NormalizedMapCache::IsNormalizedMapCache(this); +} + + +int NormalizedMapCache::GetIndex(Handle<Map> map) { + return map->Hash() % NormalizedMapCache::kEntries; +} + + +bool NormalizedMapCache::IsNormalizedMapCache(const Object* obj) { + if (!obj->IsFixedArray()) return false; + if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) { return false; } #ifdef VERIFY_HEAP if (FLAG_verify_heap) { - reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify(); + reinterpret_cast<NormalizedMapCache*>(const_cast<Object*>(obj))-> + NormalizedMapCacheVerify(); } #endif return true; } -bool Object::IsCompilationCacheTable() { +bool Object::IsCompilationCacheTable() const { return IsHashTable(); } -bool Object::IsCodeCacheHashTable() { +bool Object::IsCodeCacheHashTable() const { return IsHashTable(); } -bool Object::IsPolymorphicCodeCacheHashTable() { +bool Object::IsPolymorphicCodeCacheHashTable() const { return IsHashTable(); } -bool Object::IsMapCache() { +bool Object::IsMapCache() const { return IsHashTable(); } -bool Object::IsObjectHashTable() { +bool Object::IsObjectHashTable() const { return IsHashTable(); } -bool Object::IsPrimitive() { +bool Object::IsOrderedHashTable() const { + return IsHeapObject() && + HeapObject::cast(this)->map() == + HeapObject::cast(this)->GetHeap()->ordered_hash_table_map(); +} + + +bool Object::IsOrderedHashSet() const { + return IsOrderedHashTable(); +} + + +bool Object::IsOrderedHashMap() const { + return IsOrderedHashTable(); +} + + +bool Object::IsPrimitive() const { return IsOddball() || IsNumber() || IsString(); } -bool Object::IsJSGlobalProxy() { +bool Object::IsJSGlobalProxy() const { bool result = IsHeapObject() && (HeapObject::cast(this)->map()->instance_type() == JS_GLOBAL_PROXY_TYPE); - ASSERT(!result || + DCHECK(!result || HeapObject::cast(this)->map()->is_access_check_needed()); return result; } -bool Object::IsGlobalObject() { +bool Object::IsGlobalObject() const { if (!IsHeapObject()) return false; InstanceType type = HeapObject::cast(this)->map()->instance_type(); @@ -939,25 +1001,24 @@ TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE) -bool Object::IsUndetectableObject() { +bool Object::IsUndetectableObject() const { return IsHeapObject() && HeapObject::cast(this)->map()->is_undetectable(); } -bool Object::IsAccessCheckNeeded() { +bool Object::IsAccessCheckNeeded() const { if (!IsHeapObject()) return false; if (IsJSGlobalProxy()) { - JSGlobalProxy* proxy = JSGlobalProxy::cast(this); - GlobalObject* global = - proxy->GetIsolate()->context()->global_object(); + const JSGlobalProxy* proxy = JSGlobalProxy::cast(this); + GlobalObject* global = proxy->GetIsolate()->context()->global_object(); return proxy->IsDetachedFrom(global); } return HeapObject::cast(this)->map()->is_access_check_needed(); } -bool Object::IsStruct() { +bool Object::IsStruct() const { if (!IsHeapObject()) return false; switch (HeapObject::cast(this)->map()->instance_type()) { #define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true; @@ -968,65 +1029,76 @@ } -#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \ - bool Object::Is##Name() { \ - return Object::IsHeapObject() \ +#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \ + bool Object::Is##Name() const { \ + return Object::IsHeapObject() \ && HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \ } STRUCT_LIST(MAKE_STRUCT_PREDICATE) #undef MAKE_STRUCT_PREDICATE -bool Object::IsUndefined() { +bool Object::IsUndefined() const { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined; } -bool Object::IsNull() { +bool Object::IsNull() const { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull; } -bool Object::IsTheHole() { +bool Object::IsTheHole() const { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole; } -bool Object::IsUninitialized() { +bool Object::IsException() const { + return IsOddball() && Oddball::cast(this)->kind() == Oddball::kException; +} + + +bool Object::IsUninitialized() const { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized; } -bool Object::IsTrue() { +bool Object::IsTrue() const { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue; } -bool Object::IsFalse() { +bool Object::IsFalse() const { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse; } -bool Object::IsArgumentsMarker() { +bool Object::IsArgumentsMarker() const { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker; } double Object::Number() { - ASSERT(IsNumber()); + DCHECK(IsNumber()); return IsSmi() ? static_cast<double>(reinterpret_cast<Smi*>(this)->value()) : reinterpret_cast<HeapNumber*>(this)->value(); } -bool Object::IsNaN() { +bool Object::IsNaN() const { return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value()); } -Handle<Object> Object::ToSmi(Isolate* isolate, Handle<Object> object) { - if (object->IsSmi()) return object; +bool Object::IsMinusZero() const { + return this->IsHeapNumber() && + i::IsMinusZero(HeapNumber::cast(this)->value()); +} + + +MaybeHandle<Smi> Object::ToSmi(Isolate* isolate, Handle<Object> object) { + if (object->IsSmi()) return Handle<Smi>::cast(object); if (object->IsHeapNumber()) { double value = Handle<HeapNumber>::cast(object)->value(); int int_value = FastD2I(value); @@ -1034,21 +1106,14 @@ return handle(Smi::FromInt(int_value), isolate); } } - return Handle<Object>(); + return Handle<Smi>(); } -// TODO(ishell): Use handlified version instead. -MaybeObject* Object::ToSmi() { - if (IsSmi()) return this; - if (IsHeapNumber()) { - double value = HeapNumber::cast(this)->value(); - int int_value = FastD2I(value); - if (value == FastI2D(int_value) && Smi::IsValid(int_value)) { - return Smi::FromInt(int_value); - } - } - return Failure::Exception(); +MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate, + Handle<Object> object) { + return ToObject( + isolate, object, handle(isolate->context()->native_context(), isolate)); } @@ -1057,47 +1122,103 @@ } -Handle<Object> Object::GetElement(Isolate* isolate, - Handle<Object> object, - uint32_t index) { +MaybeHandle<Object> Object::GetProperty(Handle<Object> object, + Handle<Name> name) { + LookupIterator it(object, name); + return GetProperty(&it); +} + + +MaybeHandle<Object> Object::GetElement(Isolate* isolate, + Handle<Object> object, + uint32_t index) { // GetElement can trigger a getter which can cause allocation. - // This was not always the case. This ASSERT is here to catch + // This was not always the case. This DCHECK is here to catch // leftover incorrect uses. - ASSERT(AllowHeapAllocation::IsAllowed()); + DCHECK(AllowHeapAllocation::IsAllowed()); return Object::GetElementWithReceiver(isolate, object, object, index); } -Handle<Object> Object::GetElementNoExceptionThrown(Isolate* isolate, - Handle<Object> object, +MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object, + Handle<Name> name) { + uint32_t index; + Isolate* isolate = name->GetIsolate(); + if (name->AsArrayIndex(&index)) return GetElement(isolate, object, index); + return GetProperty(object, name); +} + + +MaybeHandle<Object> Object::GetProperty(Isolate* isolate, + Handle<Object> object, + const char* name) { + Handle<String> str = isolate->factory()->InternalizeUtf8String(name); + DCHECK(!str.is_null()); +#ifdef DEBUG + uint32_t index; // Assert that the name is not an array index. + DCHECK(!str->AsArrayIndex(&index)); +#endif // DEBUG + return GetProperty(object, str); +} + + +MaybeHandle<Object> JSProxy::GetElementWithHandler(Handle<JSProxy> proxy, + Handle<Object> receiver, uint32_t index) { - Handle<Object> result = - Object::GetElementWithReceiver(isolate, object, object, index); - CHECK_NOT_EMPTY_HANDLE(isolate, result); - return result; + return GetPropertyWithHandler( + proxy, receiver, proxy->GetIsolate()->factory()->Uint32ToString(index)); } -MaybeObject* Object::GetProperty(Name* key) { - PropertyAttributes attributes; - return GetPropertyWithReceiver(this, key, &attributes); +MaybeHandle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy, + Handle<JSReceiver> receiver, + uint32_t index, + Handle<Object> value, + StrictMode strict_mode) { + Isolate* isolate = proxy->GetIsolate(); + Handle<String> name = isolate->factory()->Uint32ToString(index); + return SetPropertyWithHandler(proxy, receiver, name, value, strict_mode); } -MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) { - return GetPropertyWithReceiver(this, key, attributes); +Maybe<bool> JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, + uint32_t index) { + Isolate* isolate = proxy->GetIsolate(); + Handle<String> name = isolate->factory()->Uint32ToString(index); + return HasPropertyWithHandler(proxy, name); } #define FIELD_ADDR(p, offset) \ (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag) +#define FIELD_ADDR_CONST(p, offset) \ + (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag) + #define READ_FIELD(p, offset) \ - (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset))) + (*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset))) + +#define ACQUIRE_READ_FIELD(p, offset) \ + reinterpret_cast<Object*>(base::Acquire_Load( \ + reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) + +#define NOBARRIER_READ_FIELD(p, offset) \ + reinterpret_cast<Object*>(base::NoBarrier_Load( \ + reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) #define WRITE_FIELD(p, offset, value) \ (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) +#define RELEASE_WRITE_FIELD(p, offset, value) \ + base::Release_Store( \ + reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ + reinterpret_cast<base::AtomicWord>(value)); + +#define NOBARRIER_WRITE_FIELD(p, offset, value) \ + base::NoBarrier_Store( \ + reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ + reinterpret_cast<base::AtomicWord>(value)); + #define WRITE_BARRIER(heap, object, offset, value) \ heap->incremental_marking()->RecordWrite( \ object, HeapObject::RawField(object, offset), value); \ @@ -1116,17 +1237,19 @@ #ifndef V8_TARGET_ARCH_MIPS #define READ_DOUBLE_FIELD(p, offset) \ - (*reinterpret_cast<double*>(FIELD_ADDR(p, offset))) + (*reinterpret_cast<const double*>(FIELD_ADDR_CONST(p, offset))) #else // V8_TARGET_ARCH_MIPS // Prevent gcc from using load-double (mips ldc1) on (possibly) // non-64-bit aligned HeapNumber::value. - static inline double read_double_field(void* p, int offset) { + static inline double read_double_field(const void* p, int offset) { union conversion { double d; uint32_t u[2]; } c; - c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))); - c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))); + c.u[0] = (*reinterpret_cast<const uint32_t*>( + FIELD_ADDR_CONST(p, offset))); + c.u[1] = (*reinterpret_cast<const uint32_t*>( + FIELD_ADDR_CONST(p, offset + 4))); return c.d; } #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset) @@ -1154,135 +1277,87 @@ #define READ_INT_FIELD(p, offset) \ - (*reinterpret_cast<int*>(FIELD_ADDR(p, offset))) + (*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset))) #define WRITE_INT_FIELD(p, offset, value) \ (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value) #define READ_INTPTR_FIELD(p, offset) \ - (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset))) + (*reinterpret_cast<const intptr_t*>(FIELD_ADDR_CONST(p, offset))) #define WRITE_INTPTR_FIELD(p, offset, value) \ (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value) #define READ_UINT32_FIELD(p, offset) \ - (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) + (*reinterpret_cast<const uint32_t*>(FIELD_ADDR_CONST(p, offset))) #define WRITE_UINT32_FIELD(p, offset, value) \ (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value) #define READ_INT32_FIELD(p, offset) \ - (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset))) + (*reinterpret_cast<const int32_t*>(FIELD_ADDR_CONST(p, offset))) #define WRITE_INT32_FIELD(p, offset, value) \ (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value) #define READ_INT64_FIELD(p, offset) \ - (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset))) + (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset))) #define WRITE_INT64_FIELD(p, offset, value) \ (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value) #define READ_SHORT_FIELD(p, offset) \ - (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset))) + (*reinterpret_cast<const uint16_t*>(FIELD_ADDR_CONST(p, offset))) #define WRITE_SHORT_FIELD(p, offset, value) \ (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value) #define READ_BYTE_FIELD(p, offset) \ - (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset))) + (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset))) + +#define NOBARRIER_READ_BYTE_FIELD(p, offset) \ + static_cast<byte>(base::NoBarrier_Load( \ + reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)))) #define WRITE_BYTE_FIELD(p, offset, value) \ (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value) +#define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \ + base::NoBarrier_Store( \ + reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \ + static_cast<base::Atomic8>(value)); Object** HeapObject::RawField(HeapObject* obj, int byte_offset) { - return &READ_FIELD(obj, byte_offset); + return reinterpret_cast<Object**>(FIELD_ADDR(obj, byte_offset)); } -int Smi::value() { +int Smi::value() const { return Internals::SmiValue(this); } Smi* Smi::FromInt(int value) { - ASSERT(Smi::IsValid(value)); + DCHECK(Smi::IsValid(value)); return reinterpret_cast<Smi*>(Internals::IntToSmi(value)); } Smi* Smi::FromIntptr(intptr_t value) { - ASSERT(Smi::IsValid(value)); + DCHECK(Smi::IsValid(value)); int smi_shift_bits = kSmiTagSize + kSmiShiftSize; return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag); } -Failure::Type Failure::type() const { - return static_cast<Type>(value() & kFailureTypeTagMask); -} - - -bool Failure::IsInternalError() const { - return type() == INTERNAL_ERROR; -} - - -AllocationSpace Failure::allocation_space() const { - ASSERT_EQ(RETRY_AFTER_GC, type()); - return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize) - & kSpaceTagMask); -} - - -Failure* Failure::InternalError() { - return Construct(INTERNAL_ERROR); -} - - -Failure* Failure::Exception() { - return Construct(EXCEPTION); -} - - -intptr_t Failure::value() const { - return static_cast<intptr_t>( - reinterpret_cast<uintptr_t>(this) >> kFailureTagSize); -} - - -Failure* Failure::RetryAfterGC() { - return RetryAfterGC(NEW_SPACE); -} - - -Failure* Failure::RetryAfterGC(AllocationSpace space) { - ASSERT((space & ~kSpaceTagMask) == 0); - return Construct(RETRY_AFTER_GC, space); -} - - -Failure* Failure::Construct(Type type, intptr_t value) { - uintptr_t info = - (static_cast<uintptr_t>(value) << kFailureTypeTagSize) | type; - ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info); - // Fill the unused bits with a pattern that's easy to recognize in crash - // dumps. - static const int kFailureMagicPattern = 0x0BAD0000; - return reinterpret_cast<Failure*>( - (info << kFailureTagSize) | kFailureTag | kFailureMagicPattern); -} - - bool Smi::IsValid(intptr_t value) { bool result = Internals::IsValidSmi(value); - ASSERT_EQ(result, value >= kMinValue && value <= kMaxValue); + DCHECK_EQ(result, value >= kMinValue && value <= kMaxValue); return result; } -MapWord MapWord::FromMap(Map* map) { +MapWord MapWord::FromMap(const Map* map) { return MapWord(reinterpret_cast<uintptr_t>(map)); } @@ -1304,7 +1379,7 @@ HeapObject* MapWord::ToForwardingAddress() { - ASSERT(IsForwardingAddress()); + DCHECK(IsForwardingAddress()); return HeapObject::FromAddress(reinterpret_cast<Address>(value_)); } @@ -1320,21 +1395,28 @@ #endif -Heap* HeapObject::GetHeap() { +Heap* HeapObject::GetHeap() const { Heap* heap = - MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap(); - SLOW_ASSERT(heap != NULL); + MemoryChunk::FromAddress(reinterpret_cast<const byte*>(this))->heap(); + SLOW_DCHECK(heap != NULL); return heap; } -Isolate* HeapObject::GetIsolate() { +Isolate* HeapObject::GetIsolate() const { return GetHeap()->isolate(); } -Map* HeapObject::map() { +Map* HeapObject::map() const { +#ifdef DEBUG + // Clear mark potentially added by PathTracer. + uintptr_t raw_value = + map_word().ToRawValue() & ~static_cast<uintptr_t>(PathTracer::kMarkTag); + return MapWord::FromRawValue(raw_value).ToMap(); +#else return map_word().ToMap(); +#endif } @@ -1348,26 +1430,58 @@ } +Map* HeapObject::synchronized_map() { + return synchronized_map_word().ToMap(); +} + + +void HeapObject::synchronized_set_map(Map* value) { + synchronized_set_map_word(MapWord::FromMap(value)); + if (value != NULL) { + // TODO(1600) We are passing NULL as a slot because maps can never be on + // evacuation candidate. + value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value); + } +} + + +void HeapObject::synchronized_set_map_no_write_barrier(Map* value) { + synchronized_set_map_word(MapWord::FromMap(value)); +} + + // Unsafe accessor omitting write barrier. void HeapObject::set_map_no_write_barrier(Map* value) { set_map_word(MapWord::FromMap(value)); } -MapWord HeapObject::map_word() { - return MapWord(reinterpret_cast<uintptr_t>(READ_FIELD(this, kMapOffset))); +MapWord HeapObject::map_word() const { + return MapWord( + reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset))); } void HeapObject::set_map_word(MapWord map_word) { - // WRITE_FIELD does not invoke write barrier, but there is no need - // here. - WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); + NOBARRIER_WRITE_FIELD( + this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); +} + + +MapWord HeapObject::synchronized_map_word() const { + return MapWord( + reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset))); +} + + +void HeapObject::synchronized_set_map_word(MapWord map_word) { + RELEASE_WRITE_FIELD( + this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); } HeapObject* HeapObject::FromAddress(Address address) { - ASSERT_TAG_ALIGNED(address); + DCHECK_TAG_ALIGNED(address); return reinterpret_cast<HeapObject*>(address + kHeapObjectTag); } @@ -1382,6 +1496,24 @@ } +bool HeapObject::MayContainNewSpacePointers() { + InstanceType type = map()->instance_type(); + if (type <= LAST_NAME_TYPE) { + if (type == SYMBOL_TYPE) { + return true; + } + DCHECK(type < FIRST_NONSTRING_TYPE); + // There are four string representations: sequential strings, external + // strings, cons strings, and sliced strings. + // Only the latter two contain non-map-word pointers to heap objects. + return ((type & kIsIndirectStringMask) == kIsIndirectStringTag); + } + // The ConstantPoolArray contains heap pointers, but not new space pointers. + if (type == CONSTANT_POOL_ARRAY_TYPE) return false; + return (type > LAST_DATA_TYPE); +} + + void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) { v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)), reinterpret_cast<Object**>(FIELD_ADDR(this, end))); @@ -1398,7 +1530,7 @@ } -double HeapNumber::value() { +double HeapNumber::value() const { return READ_DOUBLE_FIELD(this, kValueOffset); } @@ -1438,17 +1570,17 @@ } -FixedArrayBase* JSObject::elements() { +FixedArrayBase* JSObject::elements() const { Object* array = READ_FIELD(this, kElementsOffset); return static_cast<FixedArrayBase*>(array); } -void JSObject::ValidateElements() { -#ifdef ENABLE_SLOW_ASSERTS +void JSObject::ValidateElements(Handle<JSObject> object) { +#ifdef ENABLE_SLOW_DCHECKS if (FLAG_enable_slow_asserts) { - ElementsAccessor* accessor = GetElementsAccessor(); - accessor->Validate(this); + ElementsAccessor* accessor = object->GetElementsAccessor(); + accessor->Validate(object); } #endif } @@ -1466,7 +1598,7 @@ void AllocationSite::MarkZombie() { - ASSERT(!IsZombie()); + DCHECK(!IsZombie()); Initialize(); set_pretenure_decision(kZombie); } @@ -1526,10 +1658,10 @@ int value = pretenure_data()->value(); // Verify that we can count more mementos than we can possibly find in one // new space collection. - ASSERT((GetHeap()->MaxSemiSpaceSize() / + DCHECK((GetHeap()->MaxSemiSpaceSize() / (StaticVisitorBase::kMinObjectSizeInWords * kPointerSize + AllocationMemento::kSize)) < MementoFoundCountBits::kMax); - ASSERT(count < MementoFoundCountBits::kMax); + DCHECK(count < MementoFoundCountBits::kMax); set_pretenure_data( Smi::FromInt(MementoFoundCountBits::update(value, count)), SKIP_WRITE_BARRIER); @@ -1540,55 +1672,76 @@ int value = memento_found_count(); set_memento_found_count(value + 1); - return value == 0; + return memento_found_count() == kPretenureMinimumCreated; } inline void AllocationSite::IncrementMementoCreateCount() { - ASSERT(FLAG_allocation_site_pretenuring); + DCHECK(FLAG_allocation_site_pretenuring); int value = memento_create_count(); set_memento_create_count(value + 1); } -inline bool AllocationSite::DigestPretenuringFeedback() { - bool decision_changed = false; +inline bool AllocationSite::MakePretenureDecision( + PretenureDecision current_decision, + double ratio, + bool maximum_size_scavenge) { + // Here we just allow state transitions from undecided or maybe tenure + // to don't tenure, maybe tenure, or tenure. + if ((current_decision == kUndecided || current_decision == kMaybeTenure)) { + if (ratio >= kPretenureRatio) { + // We just transition into tenure state when the semi-space was at + // maximum capacity. + if (maximum_size_scavenge) { + set_deopt_dependent_code(true); + set_pretenure_decision(kTenure); + // Currently we just need to deopt when we make a state transition to + // tenure. + return true; + } + set_pretenure_decision(kMaybeTenure); + } else { + set_pretenure_decision(kDontTenure); + } + } + return false; +} + + +inline bool AllocationSite::DigestPretenuringFeedback( + bool maximum_size_scavenge) { + bool deopt = false; int create_count = memento_create_count(); int found_count = memento_found_count(); bool minimum_mementos_created = create_count >= kPretenureMinimumCreated; double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics ? static_cast<double>(found_count) / create_count : 0.0; - PretenureFlag current_mode = GetPretenureMode(); + PretenureDecision current_decision = pretenure_decision(); if (minimum_mementos_created) { - PretenureDecision result = ratio >= kPretenureRatio - ? kTenure - : kDontTenure; - set_pretenure_decision(result); - if (current_mode != GetPretenureMode()) { - decision_changed = true; - set_deopt_dependent_code(true); - } + deopt = MakePretenureDecision( + current_decision, ratio, maximum_size_scavenge); } if (FLAG_trace_pretenuring_statistics) { PrintF( "AllocationSite(%p): (created, found, ratio) (%d, %d, %f) %s => %s\n", static_cast<void*>(this), create_count, found_count, ratio, - current_mode == TENURED ? "tenured" : "not tenured", - GetPretenureMode() == TENURED ? "tenured" : "not tenured"); + PretenureDecisionName(current_decision), + PretenureDecisionName(pretenure_decision())); } // Clear feedback calculation fields until the next gc. set_memento_found_count(0); set_memento_create_count(0); - return decision_changed; + return deopt; } void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) { - object->ValidateElements(); + JSObject::ValidateElements(object); ElementsKind elements_kind = object->map()->elements_kind(); if (!IsFastObjectElementsKind(elements_kind)) { if (IsFastHoleyElementsKind(elements_kind)) { @@ -1608,7 +1761,7 @@ ElementsKind target_kind = current_kind; { DisallowHeapAllocation no_allocation; - ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS); + DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS); bool is_holey = IsFastHoleyElementsKind(current_kind); if (current_kind == FAST_HOLEY_ELEMENTS) return; Heap* heap = object->GetHeap(); @@ -1648,7 +1801,7 @@ EnsureElementsMode mode) { Heap* heap = object->GetHeap(); if (elements->map() != heap->fixed_double_array_map()) { - ASSERT(elements->map() == heap->fixed_array_map() || + DCHECK(elements->map() == heap->fixed_array_map() || elements->map() == heap->fixed_cow_array_map()); if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) { mode = DONT_ALLOW_DOUBLE_ELEMENTS; @@ -1659,7 +1812,7 @@ return; } - ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS); + DCHECK(mode == ALLOW_COPIED_DOUBLE_ELEMENTS); if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) { TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS); } else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) { @@ -1676,111 +1829,40 @@ } -MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate, - ElementsKind to_kind) { - Map* current_map = map(); - ElementsKind from_kind = current_map->elements_kind(); - if (from_kind == to_kind) return current_map; - - Context* native_context = isolate->context()->native_context(); - Object* maybe_array_maps = native_context->js_array_maps(); - if (maybe_array_maps->IsFixedArray()) { - FixedArray* array_maps = FixedArray::cast(maybe_array_maps); - if (array_maps->get(from_kind) == current_map) { - Object* maybe_transitioned_map = array_maps->get(to_kind); - if (maybe_transitioned_map->IsMap()) { - return Map::cast(maybe_transitioned_map); - } - } - } - - return GetElementsTransitionMapSlow(to_kind); +void JSObject::SetMapAndElements(Handle<JSObject> object, + Handle<Map> new_map, + Handle<FixedArrayBase> value) { + JSObject::MigrateToMap(object, new_map); + DCHECK((object->map()->has_fast_smi_or_object_elements() || + (*value == object->GetHeap()->empty_fixed_array())) == + (value->map() == object->GetHeap()->fixed_array_map() || + value->map() == object->GetHeap()->fixed_cow_array_map())); + DCHECK((*value == object->GetHeap()->empty_fixed_array()) || + (object->map()->has_fast_double_elements() == + value->IsFixedDoubleArray())); + object->set_elements(*value); } -void JSObject::set_map_and_elements(Map* new_map, - FixedArrayBase* value, - WriteBarrierMode mode) { - ASSERT(value->HasValidElements()); - if (new_map != NULL) { - if (mode == UPDATE_WRITE_BARRIER) { - set_map(new_map); - } else { - ASSERT(mode == SKIP_WRITE_BARRIER); - set_map_no_write_barrier(new_map); - } - } - ASSERT((map()->has_fast_smi_or_object_elements() || - (value == GetHeap()->empty_fixed_array())) == - (value->map() == GetHeap()->fixed_array_map() || - value->map() == GetHeap()->fixed_cow_array_map())); - ASSERT((value == GetHeap()->empty_fixed_array()) || - (map()->has_fast_double_elements() == value->IsFixedDoubleArray())); +void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) { WRITE_FIELD(this, kElementsOffset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode); } -void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) { - set_map_and_elements(NULL, value, mode); -} - - void JSObject::initialize_properties() { - ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array())); + DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array())); WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array()); } void JSObject::initialize_elements() { - if (map()->has_fast_smi_or_object_elements() || - map()->has_fast_double_elements()) { - ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array())); - WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array()); - } else if (map()->has_external_array_elements()) { - ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(map()); - ASSERT(!GetHeap()->InNewSpace(empty_array)); - WRITE_FIELD(this, kElementsOffset, empty_array); - } else if (map()->has_fixed_typed_array_elements()) { - FixedTypedArrayBase* empty_array = - GetHeap()->EmptyFixedTypedArrayForMap(map()); - ASSERT(!GetHeap()->InNewSpace(empty_array)); - WRITE_FIELD(this, kElementsOffset, empty_array); - } else { - UNREACHABLE(); - } -} - - -MaybeObject* JSObject::ResetElements() { - if (map()->is_observed()) { - // Maintain invariant that observed elements are always in dictionary mode. - SeededNumberDictionary* dictionary; - MaybeObject* maybe = SeededNumberDictionary::Allocate(GetHeap(), 0); - if (!maybe->To(&dictionary)) return maybe; - if (map() == GetHeap()->sloppy_arguments_elements_map()) { - FixedArray::cast(elements())->set(1, dictionary); - } else { - set_elements(dictionary); - } - return this; - } - - ElementsKind elements_kind = GetInitialFastElementsKind(); - if (!FLAG_smi_only_arrays) { - elements_kind = FastSmiToObjectElementsKind(elements_kind); - } - MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind); - Map* map; - if (!maybe->To(&map)) return maybe; - set_map(map); - initialize_elements(); - - return this; + FixedArrayBase* elements = map()->GetInitialElements(); + WRITE_FIELD(this, kElementsOffset, elements); } -Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) { +Handle<String> Map::ExpectedTransitionKey(Handle<Map> map) { DisallowHeapAllocation no_gc; if (!map->HasTransitionArray()) return Handle<String>::null(); TransitionArray* transitions = map->transitions(); @@ -1795,14 +1877,14 @@ } -Handle<Map> JSObject::ExpectedTransitionTarget(Handle<Map> map) { - ASSERT(!ExpectedTransitionKey(map).is_null()); +Handle<Map> Map::ExpectedTransitionTarget(Handle<Map> map) { + DCHECK(!ExpectedTransitionKey(map).is_null()); return Handle<Map>(map->transitions()->GetTarget( TransitionArray::kSimpleTransitionIndex)); } -Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) { +Handle<Map> Map::FindTransitionToField(Handle<Map> map, Handle<Name> key) { DisallowHeapAllocation no_allocation; if (!map->HasTransitionArray()) return Handle<Map>::null(); TransitionArray* transitions = map->transitions(); @@ -1819,7 +1901,7 @@ ACCESSORS(Oddball, to_number, Object, kToNumberOffset) -byte Oddball::kind() { +byte Oddball::kind() const { return Smi::cast(READ_FIELD(this, kKindOffset))->value(); } @@ -1829,20 +1911,20 @@ } -Object* Cell::value() { +Object* Cell::value() const { return READ_FIELD(this, kValueOffset); } void Cell::set_value(Object* val, WriteBarrierMode ignored) { // The write barrier is not used for global property cells. - ASSERT(!val->IsPropertyCell() && !val->IsCell()); + DCHECK(!val->IsPropertyCell() && !val->IsCell()); WRITE_FIELD(this, kValueOffset, val); } ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset) -Object* PropertyCell::type_raw() { +Object* PropertyCell::type_raw() const { return READ_FIELD(this, kTypeOffset); } @@ -1887,6 +1969,10 @@ return JSSet::kSize; case JS_MAP_TYPE: return JSMap::kSize; + case JS_SET_ITERATOR_TYPE: + return JSSetIterator::kSize; + case JS_MAP_ITERATOR_TYPE: + return JSMapIterator::kSize; case JS_WEAK_MAP_TYPE: return JSWeakMap::kSize; case JS_WEAK_SET_TYPE: @@ -1907,7 +1993,7 @@ int JSObject::GetInternalFieldCount() { - ASSERT(1 << kPointerSizeLog2 == kPointerSize); + DCHECK(1 << kPointerSizeLog2 == kPointerSize); // Make sure to adjust for the number of in-object properties. These // properties do contribute to the size, but are not internal fields. return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) - @@ -1916,13 +2002,13 @@ int JSObject::GetInternalFieldOffset(int index) { - ASSERT(index < GetInternalFieldCount() && index >= 0); + DCHECK(index < GetInternalFieldCount() && index >= 0); return GetHeaderSize() + (kPointerSize * index); } Object* JSObject::GetInternalField(int index) { - ASSERT(index < GetInternalFieldCount() && index >= 0); + DCHECK(index < GetInternalFieldCount() && index >= 0); // Internal objects do follow immediately after the header, whereas in-object // properties are at the end of the object. Therefore there is no need // to adjust the index here. @@ -1931,7 +2017,7 @@ void JSObject::SetInternalField(int index, Object* value) { - ASSERT(index < GetInternalFieldCount() && index >= 0); + DCHECK(index < GetInternalFieldCount() && index >= 0); // Internal objects do follow immediately after the header, whereas in-object // properties are at the end of the object. Therefore there is no need // to adjust the index here. @@ -1942,7 +2028,7 @@ void JSObject::SetInternalField(int index, Smi* value) { - ASSERT(index < GetInternalFieldCount() && index >= 0); + DCHECK(index < GetInternalFieldCount() && index >= 0); // Internal objects do follow immediately after the header, whereas in-object // properties are at the end of the object. Therefore there is no need // to adjust the index here. @@ -1951,39 +2037,25 @@ } -MaybeObject* JSObject::FastPropertyAt(Representation representation, - int index) { - Object* raw_value = RawFastPropertyAt(index); - return raw_value->AllocateNewStorageFor(GetHeap(), representation); -} - - // Access fast-case object properties at index. The use of these routines // is needed to correctly distinguish between properties stored in-object and // properties stored in the properties array. -Object* JSObject::RawFastPropertyAt(int index) { - // Adjust for the number of properties stored in the object. - index -= map()->inobject_properties(); - if (index < 0) { - int offset = map()->instance_size() + (index * kPointerSize); - return READ_FIELD(this, offset); +Object* JSObject::RawFastPropertyAt(FieldIndex index) { + if (index.is_inobject()) { + return READ_FIELD(this, index.offset()); } else { - ASSERT(index < properties()->length()); - return properties()->get(index); + return properties()->get(index.outobject_array_index()); } } -void JSObject::FastPropertyAtPut(int index, Object* value) { - // Adjust for the number of properties stored in the object. - index -= map()->inobject_properties(); - if (index < 0) { - int offset = map()->instance_size() + (index * kPointerSize); +void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) { + if (index.is_inobject()) { + int offset = index.offset(); WRITE_FIELD(this, offset, value); WRITE_BARRIER(GetHeap(), this, offset, value); } else { - ASSERT(index < properties()->length()); - properties()->set(index, value); + properties()->set(index.outobject_array_index(), value); } } @@ -2014,15 +2086,15 @@ void JSObject::InitializeBody(Map* map, Object* pre_allocated_value, Object* filler_value) { - ASSERT(!filler_value->IsHeapObject() || + DCHECK(!filler_value->IsHeapObject() || !GetHeap()->InNewSpace(filler_value)); - ASSERT(!pre_allocated_value->IsHeapObject() || + DCHECK(!pre_allocated_value->IsHeapObject() || !GetHeap()->InNewSpace(pre_allocated_value)); int size = map->instance_size(); int offset = kHeaderSize; if (filler_value != pre_allocated_value) { int pre_allocated = map->pre_allocated_property_fields(); - ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size); + DCHECK(pre_allocated * kPointerSize + kHeaderSize <= size); for (int i = 0; i < pre_allocated; i++) { WRITE_FIELD(this, offset, pre_allocated_value); offset += kPointerSize; @@ -2036,28 +2108,18 @@ bool JSObject::HasFastProperties() { - ASSERT(properties()->IsDictionary() == map()->is_dictionary_map()); + DCHECK(properties()->IsDictionary() == map()->is_dictionary_map()); return !properties()->IsDictionary(); } -bool JSObject::TooManyFastProperties(StoreFromKeyed store_mode) { - // Allow extra fast properties if the object has more than - // kFastPropertiesSoftLimit in-object properties. When this is the case, it is - // very unlikely that the object is being used as a dictionary and there is a - // good chance that allowing more map transitions will be worth it. - Map* map = this->map(); - if (map->unused_property_fields() != 0) return false; - - int inobject = map->inobject_properties(); - - int limit; - if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED) { - limit = Max(inobject, kMaxFastProperties); - } else { - limit = Max(inobject, kFastPropertiesSoftLimit); - } - return properties()->length() > limit; +bool Map::TooManyFastProperties(StoreFromKeyed store_mode) { + if (unused_property_fields() != 0) return false; + if (is_prototype_map()) return false; + int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12; + int limit = Max(minimum, inobject_properties()); + int external = NumberOfFields() - inobject_properties(); + return external > limit; } @@ -2118,15 +2180,14 @@ } -FixedArrayBase* FixedArrayBase::cast(Object* object) { - ASSERT(object->IsFixedArrayBase()); - return reinterpret_cast<FixedArrayBase*>(object); +Object* FixedArray::get(int index) { + SLOW_DCHECK(index >= 0 && index < this->length()); + return READ_FIELD(this, kHeaderSize + index * kPointerSize); } -Object* FixedArray::get(int index) { - SLOW_ASSERT(index >= 0 && index < this->length()); - return READ_FIELD(this, kHeaderSize + index * kPointerSize); +Handle<Object> FixedArray::get(Handle<FixedArray> array, int index) { + return handle(array->get(index), array->GetIsolate()); } @@ -2136,17 +2197,18 @@ void FixedArray::set(int index, Smi* value) { - ASSERT(map() != GetHeap()->fixed_cow_array_map()); - ASSERT(index >= 0 && index < this->length()); - ASSERT(reinterpret_cast<Object*>(value)->IsSmi()); + DCHECK(map() != GetHeap()->fixed_cow_array_map()); + DCHECK(index >= 0 && index < this->length()); + DCHECK(reinterpret_cast<Object*>(value)->IsSmi()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); } void FixedArray::set(int index, Object* value) { - ASSERT(map() != GetHeap()->fixed_cow_array_map()); - ASSERT(index >= 0 && index < this->length()); + DCHECK_NE(GetHeap()->fixed_cow_array_map(), map()); + DCHECK_EQ(FIXED_ARRAY_TYPE, map()->instance_type()); + DCHECK(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); WRITE_BARRIER(GetHeap(), this, offset, value); @@ -2164,48 +2226,41 @@ inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() { - ASSERT(BitCast<uint64_t>(OS::nan_value()) != kHoleNanInt64); - ASSERT((BitCast<uint64_t>(OS::nan_value()) >> 32) != kHoleNanUpper32); - return OS::nan_value(); + DCHECK(BitCast<uint64_t>(base::OS::nan_value()) != kHoleNanInt64); + DCHECK((BitCast<uint64_t>(base::OS::nan_value()) >> 32) != kHoleNanUpper32); + return base::OS::nan_value(); } double FixedDoubleArray::get_scalar(int index) { - ASSERT(map() != GetHeap()->fixed_cow_array_map() && + DCHECK(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); - ASSERT(index >= 0 && index < this->length()); + DCHECK(index >= 0 && index < this->length()); double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize); - ASSERT(!is_the_hole_nan(result)); + DCHECK(!is_the_hole_nan(result)); return result; } int64_t FixedDoubleArray::get_representation(int index) { - ASSERT(map() != GetHeap()->fixed_cow_array_map() && + DCHECK(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); - ASSERT(index >= 0 && index < this->length()); + DCHECK(index >= 0 && index < this->length()); return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize); } -MaybeObject* FixedDoubleArray::get(int index) { - if (is_the_hole(index)) { - return GetHeap()->the_hole_value(); - } else { - return GetHeap()->NumberFromDouble(get_scalar(index)); - } -} - -Handle<Object> FixedDoubleArray::get_as_handle(int index) { - if (is_the_hole(index)) { - return GetIsolate()->factory()->the_hole_value(); +Handle<Object> FixedDoubleArray::get(Handle<FixedDoubleArray> array, + int index) { + if (array->is_the_hole(index)) { + return array->GetIsolate()->factory()->the_hole_value(); } else { - return GetIsolate()->factory()->NewNumber(get_scalar(index)); + return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index)); } } void FixedDoubleArray::set(int index, double value) { - ASSERT(map() != GetHeap()->fixed_cow_array_map() && + DCHECK(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); int offset = kHeaderSize + index * kDoubleSize; if (std::isnan(value)) value = canonical_not_the_hole_nan_as_double(); @@ -2214,7 +2269,7 @@ void FixedDoubleArray::set_the_hole(int index) { - ASSERT(map() != GetHeap()->fixed_cow_array_map() && + DCHECK(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); int offset = kHeaderSize + index * kDoubleSize; WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double()); @@ -2227,129 +2282,402 @@ } -SMI_ACCESSORS( - ConstantPoolArray, first_code_ptr_index, kFirstCodePointerIndexOffset) -SMI_ACCESSORS( - ConstantPoolArray, first_heap_ptr_index, kFirstHeapPointerIndexOffset) -SMI_ACCESSORS( - ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset) +double* FixedDoubleArray::data_start() { + return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize)); +} -int ConstantPoolArray::first_int64_index() { - return 0; +void FixedDoubleArray::FillWithHoles(int from, int to) { + for (int i = from; i < to; i++) { + set_the_hole(i); + } } -int ConstantPoolArray::count_of_int64_entries() { - return first_code_ptr_index(); +void ConstantPoolArray::NumberOfEntries::increment(Type type) { + DCHECK(type < NUMBER_OF_TYPES); + element_counts_[type]++; } -int ConstantPoolArray::count_of_code_ptr_entries() { - return first_heap_ptr_index() - first_code_ptr_index(); +int ConstantPoolArray::NumberOfEntries::equals( + const ConstantPoolArray::NumberOfEntries& other) const { + for (int i = 0; i < NUMBER_OF_TYPES; i++) { + if (element_counts_[i] != other.element_counts_[i]) return false; + } + return true; } -int ConstantPoolArray::count_of_heap_ptr_entries() { - return first_int32_index() - first_heap_ptr_index(); +bool ConstantPoolArray::NumberOfEntries::is_empty() const { + return total_count() == 0; } -int ConstantPoolArray::count_of_int32_entries() { - return length() - first_int32_index(); +int ConstantPoolArray::NumberOfEntries::count_of(Type type) const { + DCHECK(type < NUMBER_OF_TYPES); + return element_counts_[type]; } -void ConstantPoolArray::SetEntryCounts(int number_of_int64_entries, - int number_of_code_ptr_entries, - int number_of_heap_ptr_entries, - int number_of_int32_entries) { - int current_index = number_of_int64_entries; - set_first_code_ptr_index(current_index); - current_index += number_of_code_ptr_entries; - set_first_heap_ptr_index(current_index); - current_index += number_of_heap_ptr_entries; - set_first_int32_index(current_index); - current_index += number_of_int32_entries; - set_length(current_index); +int ConstantPoolArray::NumberOfEntries::base_of(Type type) const { + int base = 0; + DCHECK(type < NUMBER_OF_TYPES); + for (int i = 0; i < type; i++) { + base += element_counts_[i]; + } + return base; } -int64_t ConstantPoolArray::get_int64_entry(int index) { - ASSERT(map() == GetHeap()->constant_pool_array_map()); - ASSERT(index >= 0 && index < first_code_ptr_index()); - return READ_INT64_FIELD(this, OffsetOfElementAt(index)); +int ConstantPoolArray::NumberOfEntries::total_count() const { + int count = 0; + for (int i = 0; i < NUMBER_OF_TYPES; i++) { + count += element_counts_[i]; + } + return count; } -double ConstantPoolArray::get_int64_entry_as_double(int index) { - STATIC_ASSERT(kDoubleSize == kInt64Size); - ASSERT(map() == GetHeap()->constant_pool_array_map()); - ASSERT(index >= 0 && index < first_code_ptr_index()); - return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index)); + +int ConstantPoolArray::NumberOfEntries::are_in_range(int min, int max) const { + for (int i = FIRST_TYPE; i < NUMBER_OF_TYPES; i++) { + if (element_counts_[i] < min || element_counts_[i] > max) { + return false; + } + } + return true; } -Address ConstantPoolArray::get_code_ptr_entry(int index) { - ASSERT(map() == GetHeap()->constant_pool_array_map()); - ASSERT(index >= first_code_ptr_index() && index < first_heap_ptr_index()); - return reinterpret_cast<Address>(READ_FIELD(this, OffsetOfElementAt(index))); +int ConstantPoolArray::Iterator::next_index() { + DCHECK(!is_finished()); + int ret = next_index_++; + update_section(); + return ret; } -Object* ConstantPoolArray::get_heap_ptr_entry(int index) { - ASSERT(map() == GetHeap()->constant_pool_array_map()); - ASSERT(index >= first_heap_ptr_index() && index < first_int32_index()); - return READ_FIELD(this, OffsetOfElementAt(index)); +bool ConstantPoolArray::Iterator::is_finished() { + return next_index_ > array_->last_index(type_, final_section_); } -int32_t ConstantPoolArray::get_int32_entry(int index) { - ASSERT(map() == GetHeap()->constant_pool_array_map()); - ASSERT(index >= first_int32_index() && index < length()); - return READ_INT32_FIELD(this, OffsetOfElementAt(index)); +void ConstantPoolArray::Iterator::update_section() { + if (next_index_ > array_->last_index(type_, current_section_) && + current_section_ != final_section_) { + DCHECK(final_section_ == EXTENDED_SECTION); + current_section_ = EXTENDED_SECTION; + next_index_ = array_->first_index(type_, EXTENDED_SECTION); + } } -void ConstantPoolArray::set(int index, Address value) { - ASSERT(map() == GetHeap()->constant_pool_array_map()); - ASSERT(index >= first_code_ptr_index() && index < first_heap_ptr_index()); - WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast<Object*>(value)); +bool ConstantPoolArray::is_extended_layout() { + uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset); + return IsExtendedField::decode(small_layout_1); } -void ConstantPoolArray::set(int index, Object* value) { - ASSERT(map() == GetHeap()->constant_pool_array_map()); - ASSERT(index >= first_code_ptr_index() && index < first_int32_index()); - WRITE_FIELD(this, OffsetOfElementAt(index), value); - WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value); +ConstantPoolArray::LayoutSection ConstantPoolArray::final_section() { + return is_extended_layout() ? EXTENDED_SECTION : SMALL_SECTION; } -void ConstantPoolArray::set(int index, int64_t value) { - ASSERT(map() == GetHeap()->constant_pool_array_map()); - ASSERT(index >= first_int64_index() && index < first_code_ptr_index()); - WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value); +int ConstantPoolArray::first_extended_section_index() { + DCHECK(is_extended_layout()); + uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset); + return TotalCountField::decode(small_layout_2); } -void ConstantPoolArray::set(int index, double value) { - STATIC_ASSERT(kDoubleSize == kInt64Size); - ASSERT(map() == GetHeap()->constant_pool_array_map()); - ASSERT(index >= first_int64_index() && index < first_code_ptr_index()); - WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value); +int ConstantPoolArray::get_extended_section_header_offset() { + return RoundUp(SizeFor(NumberOfEntries(this, SMALL_SECTION)), kInt64Size); } -void ConstantPoolArray::set(int index, int32_t value) { - ASSERT(map() == GetHeap()->constant_pool_array_map()); - ASSERT(index >= this->first_int32_index() && index < length()); - WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value); +ConstantPoolArray::WeakObjectState ConstantPoolArray::get_weak_object_state() { + uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset); + return WeakObjectStateField::decode(small_layout_2); } -WriteBarrierMode HeapObject::GetWriteBarrierMode( - const DisallowHeapAllocation& promise) { - Heap* heap = GetHeap(); +void ConstantPoolArray::set_weak_object_state( + ConstantPoolArray::WeakObjectState state) { + uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset); + small_layout_2 = WeakObjectStateField::update(small_layout_2, state); + WRITE_INT32_FIELD(this, kSmallLayout2Offset, small_layout_2); +} + + +int ConstantPoolArray::first_index(Type type, LayoutSection section) { + int index = 0; + if (section == EXTENDED_SECTION) { + DCHECK(is_extended_layout()); + index += first_extended_section_index(); + } + + for (Type type_iter = FIRST_TYPE; type_iter < type; + type_iter = next_type(type_iter)) { + index += number_of_entries(type_iter, section); + } + + return index; +} + + +int ConstantPoolArray::last_index(Type type, LayoutSection section) { + return first_index(type, section) + number_of_entries(type, section) - 1; +} + + +int ConstantPoolArray::number_of_entries(Type type, LayoutSection section) { + if (section == SMALL_SECTION) { + uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset); + uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset); + switch (type) { + case INT64: + return Int64CountField::decode(small_layout_1); + case CODE_PTR: + return CodePtrCountField::decode(small_layout_1); + case HEAP_PTR: + return HeapPtrCountField::decode(small_layout_1); + case INT32: + return Int32CountField::decode(small_layout_2); + default: + UNREACHABLE(); + return 0; + } + } else { + DCHECK(section == EXTENDED_SECTION && is_extended_layout()); + int offset = get_extended_section_header_offset(); + switch (type) { + case INT64: + offset += kExtendedInt64CountOffset; + break; + case CODE_PTR: + offset += kExtendedCodePtrCountOffset; + break; + case HEAP_PTR: + offset += kExtendedHeapPtrCountOffset; + break; + case INT32: + offset += kExtendedInt32CountOffset; + break; + default: + UNREACHABLE(); + } + return READ_INT_FIELD(this, offset); + } +} + + +bool ConstantPoolArray::offset_is_type(int offset, Type type) { + return (offset >= OffsetOfElementAt(first_index(type, SMALL_SECTION)) && + offset <= OffsetOfElementAt(last_index(type, SMALL_SECTION))) || + (is_extended_layout() && + offset >= OffsetOfElementAt(first_index(type, EXTENDED_SECTION)) && + offset <= OffsetOfElementAt(last_index(type, EXTENDED_SECTION))); +} + + +ConstantPoolArray::Type ConstantPoolArray::get_type(int index) { + LayoutSection section; + if (is_extended_layout() && index >= first_extended_section_index()) { + section = EXTENDED_SECTION; + } else { + section = SMALL_SECTION; + } + + Type type = FIRST_TYPE; + while (index > last_index(type, section)) { + type = next_type(type); + } + DCHECK(type <= LAST_TYPE); + return type; +} + + +int64_t ConstantPoolArray::get_int64_entry(int index) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(get_type(index) == INT64); + return READ_INT64_FIELD(this, OffsetOfElementAt(index)); +} + + +double ConstantPoolArray::get_int64_entry_as_double(int index) { + STATIC_ASSERT(kDoubleSize == kInt64Size); + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(get_type(index) == INT64); + return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index)); +} + + +Address ConstantPoolArray::get_code_ptr_entry(int index) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(get_type(index) == CODE_PTR); + return reinterpret_cast<Address>(READ_FIELD(this, OffsetOfElementAt(index))); +} + + +Object* ConstantPoolArray::get_heap_ptr_entry(int index) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(get_type(index) == HEAP_PTR); + return READ_FIELD(this, OffsetOfElementAt(index)); +} + + +int32_t ConstantPoolArray::get_int32_entry(int index) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(get_type(index) == INT32); + return READ_INT32_FIELD(this, OffsetOfElementAt(index)); +} + + +void ConstantPoolArray::set(int index, int64_t value) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(get_type(index) == INT64); + WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value); +} + + +void ConstantPoolArray::set(int index, double value) { + STATIC_ASSERT(kDoubleSize == kInt64Size); + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(get_type(index) == INT64); + WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value); +} + + +void ConstantPoolArray::set(int index, Address value) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(get_type(index) == CODE_PTR); + WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast<Object*>(value)); +} + + +void ConstantPoolArray::set(int index, Object* value) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(!GetHeap()->InNewSpace(value)); + DCHECK(get_type(index) == HEAP_PTR); + WRITE_FIELD(this, OffsetOfElementAt(index), value); + WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value); +} + + +void ConstantPoolArray::set(int index, int32_t value) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(get_type(index) == INT32); + WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value); +} + + +void ConstantPoolArray::set_at_offset(int offset, int32_t value) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(offset_is_type(offset, INT32)); + WRITE_INT32_FIELD(this, offset, value); +} + + +void ConstantPoolArray::set_at_offset(int offset, int64_t value) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(offset_is_type(offset, INT64)); + WRITE_INT64_FIELD(this, offset, value); +} + + +void ConstantPoolArray::set_at_offset(int offset, double value) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(offset_is_type(offset, INT64)); + WRITE_DOUBLE_FIELD(this, offset, value); +} + + +void ConstantPoolArray::set_at_offset(int offset, Address value) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(offset_is_type(offset, CODE_PTR)); + WRITE_FIELD(this, offset, reinterpret_cast<Object*>(value)); + WRITE_BARRIER(GetHeap(), this, offset, reinterpret_cast<Object*>(value)); +} + + +void ConstantPoolArray::set_at_offset(int offset, Object* value) { + DCHECK(map() == GetHeap()->constant_pool_array_map()); + DCHECK(!GetHeap()->InNewSpace(value)); + DCHECK(offset_is_type(offset, HEAP_PTR)); + WRITE_FIELD(this, offset, value); + WRITE_BARRIER(GetHeap(), this, offset, value); +} + + +void ConstantPoolArray::Init(const NumberOfEntries& small) { + uint32_t small_layout_1 = + Int64CountField::encode(small.count_of(INT64)) | + CodePtrCountField::encode(small.count_of(CODE_PTR)) | + HeapPtrCountField::encode(small.count_of(HEAP_PTR)) | + IsExtendedField::encode(false); + uint32_t small_layout_2 = + Int32CountField::encode(small.count_of(INT32)) | + TotalCountField::encode(small.total_count()) | + WeakObjectStateField::encode(NO_WEAK_OBJECTS); + WRITE_UINT32_FIELD(this, kSmallLayout1Offset, small_layout_1); + WRITE_UINT32_FIELD(this, kSmallLayout2Offset, small_layout_2); + if (kHeaderSize != kFirstEntryOffset) { + DCHECK(kFirstEntryOffset - kHeaderSize == kInt32Size); + WRITE_UINT32_FIELD(this, kHeaderSize, 0); // Zero out header padding. + } +} + + +void ConstantPoolArray::InitExtended(const NumberOfEntries& small, + const NumberOfEntries& extended) { + // Initialize small layout fields first. + Init(small); + + // Set is_extended_layout field. + uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset); + small_layout_1 = IsExtendedField::update(small_layout_1, true); + WRITE_INT32_FIELD(this, kSmallLayout1Offset, small_layout_1); + + // Initialize the extended layout fields. + int extended_header_offset = get_extended_section_header_offset(); + WRITE_INT_FIELD(this, extended_header_offset + kExtendedInt64CountOffset, + extended.count_of(INT64)); + WRITE_INT_FIELD(this, extended_header_offset + kExtendedCodePtrCountOffset, + extended.count_of(CODE_PTR)); + WRITE_INT_FIELD(this, extended_header_offset + kExtendedHeapPtrCountOffset, + extended.count_of(HEAP_PTR)); + WRITE_INT_FIELD(this, extended_header_offset + kExtendedInt32CountOffset, + extended.count_of(INT32)); +} + + +int ConstantPoolArray::size() { + NumberOfEntries small(this, SMALL_SECTION); + if (!is_extended_layout()) { + return SizeFor(small); + } else { + NumberOfEntries extended(this, EXTENDED_SECTION); + return SizeForExtended(small, extended); + } +} + + +int ConstantPoolArray::length() { + uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset); + int length = TotalCountField::decode(small_layout_2); + if (is_extended_layout()) { + length += number_of_entries(INT64, EXTENDED_SECTION) + + number_of_entries(CODE_PTR, EXTENDED_SECTION) + + number_of_entries(HEAP_PTR, EXTENDED_SECTION) + + number_of_entries(INT32, EXTENDED_SECTION); + } + return length; +} + + +WriteBarrierMode HeapObject::GetWriteBarrierMode( + const DisallowHeapAllocation& promise) { + Heap* heap = GetHeap(); if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER; if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER; return UPDATE_WRITE_BARRIER; @@ -2359,8 +2687,8 @@ void FixedArray::set(int index, Object* value, WriteBarrierMode mode) { - ASSERT(map() != GetHeap()->fixed_cow_array_map()); - ASSERT(index >= 0 && index < this->length()); + DCHECK(map() != GetHeap()->fixed_cow_array_map()); + DCHECK(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); @@ -2370,8 +2698,8 @@ void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array, int index, Object* value) { - ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map()); - ASSERT(index >= 0 && index < array->length()); + DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map()); + DCHECK(index >= 0 && index < array->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(array, offset, value); Heap* heap = array->GetHeap(); @@ -2384,17 +2712,17 @@ void FixedArray::NoWriteBarrierSet(FixedArray* array, int index, Object* value) { - ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map()); - ASSERT(index >= 0 && index < array->length()); - ASSERT(!array->GetHeap()->InNewSpace(value)); + DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map()); + DCHECK(index >= 0 && index < array->length()); + DCHECK(!array->GetHeap()->InNewSpace(value)); WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value); } void FixedArray::set_undefined(int index) { - ASSERT(map() != GetHeap()->fixed_cow_array_map()); - ASSERT(index >= 0 && index < this->length()); - ASSERT(!GetHeap()->InNewSpace(GetHeap()->undefined_value())); + DCHECK(map() != GetHeap()->fixed_cow_array_map()); + DCHECK(index >= 0 && index < this->length()); + DCHECK(!GetHeap()->InNewSpace(GetHeap()->undefined_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, GetHeap()->undefined_value()); @@ -2402,8 +2730,8 @@ void FixedArray::set_null(int index) { - ASSERT(index >= 0 && index < this->length()); - ASSERT(!GetHeap()->InNewSpace(GetHeap()->null_value())); + DCHECK(index >= 0 && index < this->length()); + DCHECK(!GetHeap()->InNewSpace(GetHeap()->null_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, GetHeap()->null_value()); @@ -2411,17 +2739,19 @@ void FixedArray::set_the_hole(int index) { - ASSERT(map() != GetHeap()->fixed_cow_array_map()); - ASSERT(index >= 0 && index < this->length()); - ASSERT(!GetHeap()->InNewSpace(GetHeap()->the_hole_value())); + DCHECK(map() != GetHeap()->fixed_cow_array_map()); + DCHECK(index >= 0 && index < this->length()); + DCHECK(!GetHeap()->InNewSpace(GetHeap()->the_hole_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, GetHeap()->the_hole_value()); } -double* FixedDoubleArray::data_start() { - return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize)); +void FixedArray::FillWithHoles(int from, int to) { + for (int i = from; i < to; i++) { + set_the_hole(i); + } } @@ -2431,7 +2761,7 @@ bool DescriptorArray::IsEmpty() { - ASSERT(length() >= kFirstIndex || + DCHECK(length() >= kFirstIndex || this == GetHeap()->empty_descriptor_array()); return length() < kFirstIndex; } @@ -2451,7 +2781,7 @@ uint32_t hash = name->Hash(); int limit = high; - ASSERT(low <= high); + DCHECK(low <= high); while (low != high) { int mid = (low + high) / 2; @@ -2495,7 +2825,7 @@ if (current_hash == hash && entry->Equals(name)) return sorted_index; } } else { - ASSERT(len >= valid_entries); + DCHECK(len >= valid_entries); for (int number = 0; number < valid_entries; number++) { Name* entry = array->GetKey(number); uint32_t current_hash = entry->Hash(); @@ -2509,9 +2839,9 @@ template<SearchMode search_mode, typename T> int Search(T* array, Name* name, int valid_entries) { if (search_mode == VALID_ENTRIES) { - SLOW_ASSERT(array->IsSortedNoDuplicates(valid_entries)); + SLOW_DCHECK(array->IsSortedNoDuplicates(valid_entries)); } else { - SLOW_ASSERT(array->IsSortedNoDuplicates()); + SLOW_DCHECK(array->IsSortedNoDuplicates()); } int nof = array->number_of_entries(); @@ -2570,20 +2900,38 @@ void Map::LookupTransition(JSObject* holder, Name* name, LookupResult* result) { - if (HasTransitionArray()) { - TransitionArray* transition_array = transitions(); - int number = transition_array->Search(name); - if (number != TransitionArray::kNotFound) { - return result->TransitionResult( - holder, transition_array->GetTarget(number)); - } + int transition_index = this->SearchTransition(name); + if (transition_index == TransitionArray::kNotFound) return result->NotFound(); + result->TransitionResult(holder, this->GetTransition(transition_index)); +} + + +FixedArrayBase* Map::GetInitialElements() { + if (has_fast_smi_or_object_elements() || + has_fast_double_elements()) { + DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array())); + return GetHeap()->empty_fixed_array(); + } else if (has_external_array_elements()) { + ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(this); + DCHECK(!GetHeap()->InNewSpace(empty_array)); + return empty_array; + } else if (has_fixed_typed_array_elements()) { + FixedTypedArrayBase* empty_array = + GetHeap()->EmptyFixedTypedArrayForMap(this); + DCHECK(!GetHeap()->InNewSpace(empty_array)); + return empty_array; + } else if (has_dictionary_elements()) { + DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_slow_element_dictionary())); + return GetHeap()->empty_slow_element_dictionary(); + } else { + UNREACHABLE(); } - result->NotFound(); + return NULL; } Object** DescriptorArray::GetKeySlot(int descriptor_number) { - ASSERT(descriptor_number < number_of_descriptors()); + DCHECK(descriptor_number < number_of_descriptors()); return RawFieldOfElementAt(ToKeyIndex(descriptor_number)); } @@ -2599,7 +2947,7 @@ Name* DescriptorArray::GetKey(int descriptor_number) { - ASSERT(descriptor_number < number_of_descriptors()); + DCHECK(descriptor_number < number_of_descriptors()); return Name::cast(get(ToKeyIndex(descriptor_number))); } @@ -2622,35 +2970,37 @@ void DescriptorArray::SetRepresentation(int descriptor_index, Representation representation) { - ASSERT(!representation.IsNone()); + DCHECK(!representation.IsNone()); PropertyDetails details = GetDetails(descriptor_index); set(ToDetailsIndex(descriptor_index), details.CopyWithRepresentation(representation).AsSmi()); } -void DescriptorArray::InitializeRepresentations(Representation representation) { - int length = number_of_descriptors(); - for (int i = 0; i < length; i++) { - SetRepresentation(i, representation); - } +Object** DescriptorArray::GetValueSlot(int descriptor_number) { + DCHECK(descriptor_number < number_of_descriptors()); + return RawFieldOfElementAt(ToValueIndex(descriptor_number)); } -Object** DescriptorArray::GetValueSlot(int descriptor_number) { - ASSERT(descriptor_number < number_of_descriptors()); - return RawFieldOfElementAt(ToValueIndex(descriptor_number)); +int DescriptorArray::GetValueOffset(int descriptor_number) { + return OffsetOfElementAt(ToValueIndex(descriptor_number)); } Object* DescriptorArray::GetValue(int descriptor_number) { - ASSERT(descriptor_number < number_of_descriptors()); + DCHECK(descriptor_number < number_of_descriptors()); return get(ToValueIndex(descriptor_number)); } +void DescriptorArray::SetValue(int descriptor_index, Object* value) { + set(ToValueIndex(descriptor_index), value); +} + + PropertyDetails DescriptorArray::GetDetails(int descriptor_number) { - ASSERT(descriptor_number < number_of_descriptors()); + DCHECK(descriptor_number < number_of_descriptors()); Object* details = get(ToDetailsIndex(descriptor_number)); return PropertyDetails(Smi::cast(details)); } @@ -2662,32 +3012,38 @@ int DescriptorArray::GetFieldIndex(int descriptor_number) { - ASSERT(GetDetails(descriptor_number).type() == FIELD); + DCHECK(GetDetails(descriptor_number).type() == FIELD); return GetDetails(descriptor_number).field_index(); } +HeapType* DescriptorArray::GetFieldType(int descriptor_number) { + DCHECK(GetDetails(descriptor_number).type() == FIELD); + return HeapType::cast(GetValue(descriptor_number)); +} + + Object* DescriptorArray::GetConstant(int descriptor_number) { return GetValue(descriptor_number); } Object* DescriptorArray::GetCallbacksObject(int descriptor_number) { - ASSERT(GetType(descriptor_number) == CALLBACKS); + DCHECK(GetType(descriptor_number) == CALLBACKS); return GetValue(descriptor_number); } AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) { - ASSERT(GetType(descriptor_number) == CALLBACKS); + DCHECK(GetType(descriptor_number) == CALLBACKS); Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number)); return reinterpret_cast<AccessorDescriptor*>(p->foreign_address()); } void DescriptorArray::Get(int descriptor_number, Descriptor* desc) { - desc->Init(GetKey(descriptor_number), - GetValue(descriptor_number), + desc->Init(handle(GetKey(descriptor_number), GetIsolate()), + handle(GetValue(descriptor_number), GetIsolate()), GetDetails(descriptor_number)); } @@ -2696,14 +3052,14 @@ Descriptor* desc, const WhitenessWitness&) { // Range check. - ASSERT(descriptor_number < number_of_descriptors()); + DCHECK(descriptor_number < number_of_descriptors()); NoIncrementalWriteBarrierSet(this, ToKeyIndex(descriptor_number), - desc->GetKey()); + *desc->GetKey()); NoIncrementalWriteBarrierSet(this, ToValueIndex(descriptor_number), - desc->GetValue()); + *desc->GetValue()); NoIncrementalWriteBarrierSet(this, ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi()); @@ -2712,16 +3068,17 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) { // Range check. - ASSERT(descriptor_number < number_of_descriptors()); + DCHECK(descriptor_number < number_of_descriptors()); - set(ToKeyIndex(descriptor_number), desc->GetKey()); - set(ToValueIndex(descriptor_number), desc->GetValue()); + set(ToKeyIndex(descriptor_number), *desc->GetKey()); + set(ToValueIndex(descriptor_number), *desc->GetValue()); set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi()); } void DescriptorArray::Append(Descriptor* desc, const WhitenessWitness& witness) { + DisallowHeapAllocation no_gc; int descriptor_number = number_of_descriptors(); SetNumberOfDescriptors(descriptor_number + 1); Set(descriptor_number, desc, witness); @@ -2741,6 +3098,7 @@ void DescriptorArray::Append(Descriptor* desc) { + DisallowHeapAllocation no_gc; int descriptor_number = number_of_descriptors(); SetNumberOfDescriptors(descriptor_number + 1); Set(descriptor_number, desc); @@ -2766,10 +3124,10 @@ } -DescriptorArray::WhitenessWitness::WhitenessWitness(FixedArray* array) +DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array) : marking_(array->GetHeap()->incremental_marking()) { marking_->EnterNoMarkingScope(); - ASSERT(!marking_->IsMarking() || + DCHECK(!marking_->IsMarking() || Marking::Color(array) == Marking::WHITE_OBJECT); } @@ -2779,8 +3137,8 @@ } -template<typename Shape, typename Key> -int HashTable<Shape, Key>::ComputeCapacity(int at_least_space_for) { +template<typename Derived, typename Shape, typename Key> +int HashTable<Derived, Shape, Key>::ComputeCapacity(int at_least_space_for) { const int kMinCapacity = 32; int capacity = RoundUpToPowerOf2(at_least_space_for * 2); if (capacity < kMinCapacity) { @@ -2790,17 +3148,17 @@ } -template<typename Shape, typename Key> -int HashTable<Shape, Key>::FindEntry(Key key) { +template<typename Derived, typename Shape, typename Key> +int HashTable<Derived, Shape, Key>::FindEntry(Key key) { return FindEntry(GetIsolate(), key); } // Find entry for key otherwise return kNotFound. -template<typename Shape, typename Key> -int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) { +template<typename Derived, typename Shape, typename Key> +int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key) { uint32_t capacity = Capacity(); - uint32_t entry = FirstProbe(HashTable<Shape, Key>::Hash(key), capacity); + uint32_t entry = FirstProbe(HashTable::Hash(key), capacity); uint32_t count = 1; // EnsureCapacity will guarantee the hash table is never full. while (true) { @@ -2824,7 +3182,7 @@ } uint32_t SeededNumberDictionary::max_number_key() { - ASSERT(!requires_slow_elements()); + DCHECK(!requires_slow_elements()); Object* max_index_object = get(kMaxNumberKeyIndex); if (!max_index_object->IsSmi()) return 0; uint32_t value = static_cast<uint32_t>(Smi::cast(max_index_object)->value()); @@ -2840,82 +3198,108 @@ // Cast operations -CAST_ACCESSOR(FixedArray) -CAST_ACCESSOR(FixedDoubleArray) -CAST_ACCESSOR(FixedTypedArrayBase) +CAST_ACCESSOR(AccessorInfo) +CAST_ACCESSOR(ByteArray) +CAST_ACCESSOR(Cell) +CAST_ACCESSOR(Code) +CAST_ACCESSOR(CodeCacheHashTable) +CAST_ACCESSOR(CompilationCacheTable) +CAST_ACCESSOR(ConsString) CAST_ACCESSOR(ConstantPoolArray) -CAST_ACCESSOR(DescriptorArray) CAST_ACCESSOR(DeoptimizationInputData) CAST_ACCESSOR(DeoptimizationOutputData) CAST_ACCESSOR(DependentCode) -CAST_ACCESSOR(StringTable) -CAST_ACCESSOR(JSFunctionResultCache) -CAST_ACCESSOR(NormalizedMapCache) -CAST_ACCESSOR(ScopeInfo) -CAST_ACCESSOR(CompilationCacheTable) -CAST_ACCESSOR(CodeCacheHashTable) -CAST_ACCESSOR(PolymorphicCodeCacheHashTable) -CAST_ACCESSOR(MapCache) -CAST_ACCESSOR(String) -CAST_ACCESSOR(SeqString) -CAST_ACCESSOR(SeqOneByteString) -CAST_ACCESSOR(SeqTwoByteString) -CAST_ACCESSOR(SlicedString) -CAST_ACCESSOR(ConsString) -CAST_ACCESSOR(ExternalString) +CAST_ACCESSOR(DescriptorArray) +CAST_ACCESSOR(ExternalArray) CAST_ACCESSOR(ExternalAsciiString) +CAST_ACCESSOR(ExternalFloat32Array) +CAST_ACCESSOR(ExternalFloat64Array) +CAST_ACCESSOR(ExternalInt16Array) +CAST_ACCESSOR(ExternalInt32Array) +CAST_ACCESSOR(ExternalInt8Array) +CAST_ACCESSOR(ExternalString) CAST_ACCESSOR(ExternalTwoByteString) -CAST_ACCESSOR(Symbol) -CAST_ACCESSOR(Name) -CAST_ACCESSOR(JSReceiver) -CAST_ACCESSOR(JSObject) -CAST_ACCESSOR(Smi) -CAST_ACCESSOR(HeapObject) -CAST_ACCESSOR(HeapNumber) -CAST_ACCESSOR(Oddball) -CAST_ACCESSOR(Cell) -CAST_ACCESSOR(PropertyCell) -CAST_ACCESSOR(SharedFunctionInfo) -CAST_ACCESSOR(Map) -CAST_ACCESSOR(JSFunction) +CAST_ACCESSOR(ExternalUint16Array) +CAST_ACCESSOR(ExternalUint32Array) +CAST_ACCESSOR(ExternalUint8Array) +CAST_ACCESSOR(ExternalUint8ClampedArray) +CAST_ACCESSOR(FixedArray) +CAST_ACCESSOR(FixedArrayBase) +CAST_ACCESSOR(FixedDoubleArray) +CAST_ACCESSOR(FixedTypedArrayBase) +CAST_ACCESSOR(Foreign) +CAST_ACCESSOR(FreeSpace) CAST_ACCESSOR(GlobalObject) -CAST_ACCESSOR(JSGlobalProxy) -CAST_ACCESSOR(JSGlobalObject) -CAST_ACCESSOR(JSBuiltinsObject) -CAST_ACCESSOR(Code) +CAST_ACCESSOR(HeapObject) CAST_ACCESSOR(JSArray) CAST_ACCESSOR(JSArrayBuffer) CAST_ACCESSOR(JSArrayBufferView) -CAST_ACCESSOR(JSTypedArray) +CAST_ACCESSOR(JSBuiltinsObject) CAST_ACCESSOR(JSDataView) -CAST_ACCESSOR(JSRegExp) -CAST_ACCESSOR(JSProxy) +CAST_ACCESSOR(JSDate) +CAST_ACCESSOR(JSFunction) CAST_ACCESSOR(JSFunctionProxy) -CAST_ACCESSOR(JSSet) +CAST_ACCESSOR(JSFunctionResultCache) +CAST_ACCESSOR(JSGeneratorObject) +CAST_ACCESSOR(JSGlobalObject) +CAST_ACCESSOR(JSGlobalProxy) CAST_ACCESSOR(JSMap) +CAST_ACCESSOR(JSMapIterator) +CAST_ACCESSOR(JSMessageObject) +CAST_ACCESSOR(JSModule) +CAST_ACCESSOR(JSObject) +CAST_ACCESSOR(JSProxy) +CAST_ACCESSOR(JSReceiver) +CAST_ACCESSOR(JSRegExp) +CAST_ACCESSOR(JSSet) +CAST_ACCESSOR(JSSetIterator) +CAST_ACCESSOR(JSTypedArray) +CAST_ACCESSOR(JSValue) CAST_ACCESSOR(JSWeakMap) CAST_ACCESSOR(JSWeakSet) -CAST_ACCESSOR(Foreign) -CAST_ACCESSOR(ByteArray) -CAST_ACCESSOR(FreeSpace) -CAST_ACCESSOR(ExternalArray) -CAST_ACCESSOR(ExternalInt8Array) -CAST_ACCESSOR(ExternalUint8Array) -CAST_ACCESSOR(ExternalInt16Array) -CAST_ACCESSOR(ExternalUint16Array) -CAST_ACCESSOR(ExternalInt32Array) -CAST_ACCESSOR(ExternalUint32Array) -CAST_ACCESSOR(ExternalFloat32Array) -CAST_ACCESSOR(ExternalFloat64Array) -CAST_ACCESSOR(ExternalUint8ClampedArray) +CAST_ACCESSOR(Map) +CAST_ACCESSOR(MapCache) +CAST_ACCESSOR(Name) +CAST_ACCESSOR(NameDictionary) +CAST_ACCESSOR(NormalizedMapCache) +CAST_ACCESSOR(Object) +CAST_ACCESSOR(ObjectHashTable) +CAST_ACCESSOR(Oddball) +CAST_ACCESSOR(OrderedHashMap) +CAST_ACCESSOR(OrderedHashSet) +CAST_ACCESSOR(PolymorphicCodeCacheHashTable) +CAST_ACCESSOR(PropertyCell) +CAST_ACCESSOR(ScopeInfo) +CAST_ACCESSOR(SeededNumberDictionary) +CAST_ACCESSOR(SeqOneByteString) +CAST_ACCESSOR(SeqString) +CAST_ACCESSOR(SeqTwoByteString) +CAST_ACCESSOR(SharedFunctionInfo) +CAST_ACCESSOR(SlicedString) +CAST_ACCESSOR(Smi) +CAST_ACCESSOR(String) +CAST_ACCESSOR(StringTable) CAST_ACCESSOR(Struct) -CAST_ACCESSOR(AccessorInfo) +CAST_ACCESSOR(Symbol) +CAST_ACCESSOR(UnseededNumberDictionary) +CAST_ACCESSOR(WeakHashTable) + template <class Traits> FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) { - SLOW_ASSERT(object->IsHeapObject() && - HeapObject::cast(object)->map()->instance_type() == - Traits::kInstanceType); + SLOW_DCHECK(object->IsHeapObject() && + HeapObject::cast(object)->map()->instance_type() == + Traits::kInstanceType); + return reinterpret_cast<FixedTypedArray<Traits>*>(object); +} + + +template <class Traits> +const FixedTypedArray<Traits>* +FixedTypedArray<Traits>::cast(const Object* object) { + SLOW_DCHECK(object->IsHeapObject() && + HeapObject::cast(object)->map()->instance_type() == + Traits::kInstanceType); return reinterpret_cast<FixedTypedArray<Traits>*>(object); } @@ -2925,17 +3309,30 @@ #undef MAKE_STRUCT_CAST -template <typename Shape, typename Key> -HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) { - ASSERT(obj->IsHashTable()); +template <typename Derived, typename Shape, typename Key> +HashTable<Derived, Shape, Key>* +HashTable<Derived, Shape, Key>::cast(Object* obj) { + SLOW_DCHECK(obj->IsHashTable()); return reinterpret_cast<HashTable*>(obj); } +template <typename Derived, typename Shape, typename Key> +const HashTable<Derived, Shape, Key>* +HashTable<Derived, Shape, Key>::cast(const Object* obj) { + SLOW_DCHECK(obj->IsHashTable()); + return reinterpret_cast<const HashTable*>(obj); +} + + SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) +SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) + SMI_ACCESSORS(FreeSpace, size, kSizeOffset) +NOBARRIER_SMI_ACCESSORS(FreeSpace, size, kSizeOffset) SMI_ACCESSORS(String, length, kLengthOffset) +SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset) uint32_t Name::hash_field() { @@ -2961,9 +3358,21 @@ } +bool Name::Equals(Handle<Name> one, Handle<Name> two) { + if (one.is_identical_to(two)) return true; + if ((one->IsInternalizedString() && two->IsInternalizedString()) || + one->IsSymbol() || two->IsSymbol()) { + return false; + } + return String::SlowEquals(Handle<String>::cast(one), + Handle<String>::cast(two)); +} + + ACCESSORS(Symbol, name, Object, kNameOffset) ACCESSORS(Symbol, flags, Smi, kFlagsOffset) BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit) +BOOL_ACCESSORS(Symbol, flags, is_own, kOwnBit) bool String::Equals(String* other) { @@ -2975,24 +3384,25 @@ } -MaybeObject* String::TryFlatten(PretenureFlag pretenure) { - if (!StringShape(this).IsCons()) return this; - ConsString* cons = ConsString::cast(this); - if (cons->IsFlat()) return cons->first(); - return SlowTryFlatten(pretenure); +bool String::Equals(Handle<String> one, Handle<String> two) { + if (one.is_identical_to(two)) return true; + if (one->IsInternalizedString() && two->IsInternalizedString()) { + return false; + } + return SlowEquals(one, two); } -String* String::TryFlattenGetString(PretenureFlag pretenure) { - MaybeObject* flat = TryFlatten(pretenure); - Object* successfully_flattened; - if (!flat->ToObject(&successfully_flattened)) return this; - return String::cast(successfully_flattened); +Handle<String> String::Flatten(Handle<String> string, PretenureFlag pretenure) { + if (!string->IsConsString()) return string; + Handle<ConsString> cons = Handle<ConsString>::cast(string); + if (cons->IsFlat()) return handle(cons->first()); + return SlowFlatten(cons, pretenure); } uint16_t String::Get(int index) { - ASSERT(index >= 0 && index < length()); + DCHECK(index >= 0 && index < length()); switch (StringShape(this).full_representation_tag()) { case kSeqStringTag | kOneByteStringTag: return SeqOneByteString::cast(this)->SeqOneByteStringGet(index); @@ -3018,8 +3428,8 @@ void String::Set(int index, uint16_t value) { - ASSERT(index >= 0 && index < length()); - ASSERT(StringShape(this).IsSequential()); + DCHECK(index >= 0 && index < length()); + DCHECK(StringShape(this).IsSequential()); return this->IsOneByteRepresentation() ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value) @@ -3036,112 +3446,76 @@ String* String::GetUnderlying() { // Giving direct access to underlying string only makes sense if the // wrapping string is already flattened. - ASSERT(this->IsFlat()); - ASSERT(StringShape(this).IsIndirect()); + DCHECK(this->IsFlat()); + DCHECK(StringShape(this).IsIndirect()); STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset); const int kUnderlyingOffset = SlicedString::kParentOffset; return String::cast(READ_FIELD(this, kUnderlyingOffset)); } -template<class Visitor, class ConsOp> -void String::Visit( - String* string, - unsigned offset, - Visitor& visitor, - ConsOp& cons_op, - int32_t type, - unsigned length) { - ASSERT(length == static_cast<unsigned>(string->length())); - ASSERT(offset <= length); - unsigned slice_offset = offset; +template<class Visitor> +ConsString* String::VisitFlat(Visitor* visitor, + String* string, + const int offset) { + int slice_offset = offset; + const int length = string->length(); + DCHECK(offset <= length); while (true) { - ASSERT(type == string->map()->instance_type()); - + int32_t type = string->map()->instance_type(); switch (type & (kStringRepresentationMask | kStringEncodingMask)) { case kSeqStringTag | kOneByteStringTag: - visitor.VisitOneByteString( + visitor->VisitOneByteString( SeqOneByteString::cast(string)->GetChars() + slice_offset, length - offset); - return; + return NULL; case kSeqStringTag | kTwoByteStringTag: - visitor.VisitTwoByteString( + visitor->VisitTwoByteString( SeqTwoByteString::cast(string)->GetChars() + slice_offset, length - offset); - return; + return NULL; case kExternalStringTag | kOneByteStringTag: - visitor.VisitOneByteString( + visitor->VisitOneByteString( ExternalAsciiString::cast(string)->GetChars() + slice_offset, length - offset); - return; + return NULL; case kExternalStringTag | kTwoByteStringTag: - visitor.VisitTwoByteString( + visitor->VisitTwoByteString( ExternalTwoByteString::cast(string)->GetChars() + slice_offset, length - offset); - return; + return NULL; case kSlicedStringTag | kOneByteStringTag: case kSlicedStringTag | kTwoByteStringTag: { SlicedString* slicedString = SlicedString::cast(string); slice_offset += slicedString->offset(); string = slicedString->parent(); - type = string->map()->instance_type(); continue; } case kConsStringTag | kOneByteStringTag: case kConsStringTag | kTwoByteStringTag: - string = cons_op.Operate(string, &offset, &type, &length); - if (string == NULL) return; - slice_offset = offset; - ASSERT(length == static_cast<unsigned>(string->length())); - continue; + return ConsString::cast(string); default: UNREACHABLE(); - return; + return NULL; } } } -// TODO(dcarney): Remove this class after conversion to VisitFlat. -class ConsStringCaptureOp { - public: - inline ConsStringCaptureOp() : cons_string_(NULL) {} - inline String* Operate(String* string, unsigned*, int32_t*, unsigned*) { - cons_string_ = ConsString::cast(string); - return NULL; - } - ConsString* cons_string_; -}; - - -template<class Visitor> -ConsString* String::VisitFlat(Visitor* visitor, - String* string, - int offset, - int length, - int32_t type) { - ASSERT(length >= 0 && length == string->length()); - ASSERT(offset >= 0 && offset <= length); - ConsStringCaptureOp op; - Visit(string, offset, *visitor, op, type, static_cast<unsigned>(length)); - return op.cons_string_; -} - - uint16_t SeqOneByteString::SeqOneByteStringGet(int index) { - ASSERT(index >= 0 && index < length()); + DCHECK(index >= 0 && index < length()); return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize); } void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) { - ASSERT(index >= 0 && index < length() && value <= kMaxOneByteCharCode); + DCHECK(index >= 0 && index < length() && value <= kMaxOneByteCharCode); WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, static_cast<byte>(value)); } @@ -3168,13 +3542,13 @@ uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) { - ASSERT(index >= 0 && index < length()); + DCHECK(index >= 0 && index < length()); return READ_SHORT_FIELD(this, kHeaderSize + index * kShortSize); } void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) { - ASSERT(index >= 0 && index < length()); + DCHECK(index >= 0 && index < length()); WRITE_SHORT_FIELD(this, kHeaderSize + index * kShortSize, value); } @@ -3195,7 +3569,7 @@ void SlicedString::set_parent(String* parent, WriteBarrierMode mode) { - ASSERT(parent->IsSeqString() || parent->IsExternalString()); + DCHECK(parent->IsSeqString() || parent->IsExternalString()); WRITE_FIELD(this, kParentOffset, parent); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode); } @@ -3257,7 +3631,7 @@ void ExternalAsciiString::set_resource( const ExternalAsciiString::Resource* resource) { - ASSERT(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize)); + DCHECK(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize)); *reinterpret_cast<const Resource**>( FIELD_ADDR(this, kResourceOffset)) = resource; if (resource != NULL) update_data_cache(); @@ -3270,7 +3644,7 @@ uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) { - ASSERT(index >= 0 && index < length()); + DCHECK(index >= 0 && index < length()); return GetChars()[index]; } @@ -3302,7 +3676,7 @@ uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) { - ASSERT(index >= 0 && index < length()); + DCHECK(index >= 0 && index < length()); return GetChars()[index]; } @@ -3313,12 +3687,7 @@ } -String* ConsStringNullOp::Operate(String*, unsigned*, int32_t*, unsigned*) { - return NULL; -} - - -unsigned ConsStringIteratorOp::OffsetForDepth(unsigned depth) { +int ConsStringIteratorOp::OffsetForDepth(int depth) { return depth & kDepthMask; } @@ -3340,94 +3709,56 @@ void ConsStringIteratorOp::Pop() { - ASSERT(depth_ > 0); - ASSERT(depth_ <= maximum_depth_); + DCHECK(depth_ > 0); + DCHECK(depth_ <= maximum_depth_); depth_--; } -bool ConsStringIteratorOp::HasMore() { - return depth_ != 0; -} - - -void ConsStringIteratorOp::Reset() { - depth_ = 0; -} - - -String* ConsStringIteratorOp::ContinueOperation(int32_t* type_out, - unsigned* length_out) { - bool blew_stack = false; - String* string = NextLeaf(&blew_stack, type_out, length_out); - // String found. - if (string != NULL) { - // Verify output. - ASSERT(*length_out == static_cast<unsigned>(string->length())); - ASSERT(*type_out == string->map()->instance_type()); - return string; - } - // Traversal complete. - if (!blew_stack) return NULL; - // Restart search from root. - unsigned offset_out; - string = Search(&offset_out, type_out, length_out); - // Verify output. - ASSERT(string == NULL || offset_out == 0); - ASSERT(string == NULL || - *length_out == static_cast<unsigned>(string->length())); - ASSERT(string == NULL || *type_out == string->map()->instance_type()); - return string; -} - - uint16_t StringCharacterStream::GetNext() { - ASSERT(buffer8_ != NULL && end_ != NULL); + DCHECK(buffer8_ != NULL && end_ != NULL); // Advance cursor if needed. - // TODO(dcarney): Ensure uses of the api call HasMore first and avoid this. if (buffer8_ == end_) HasMore(); - ASSERT(buffer8_ < end_); + DCHECK(buffer8_ < end_); return is_one_byte_ ? *buffer8_++ : *buffer16_++; } StringCharacterStream::StringCharacterStream(String* string, ConsStringIteratorOp* op, - unsigned offset) + int offset) : is_one_byte_(false), op_(op) { Reset(string, offset); } -void StringCharacterStream::Reset(String* string, unsigned offset) { - op_->Reset(); +void StringCharacterStream::Reset(String* string, int offset) { buffer8_ = NULL; end_ = NULL; - int32_t type = string->map()->instance_type(); - unsigned length = string->length(); - String::Visit(string, offset, *this, *op_, type, length); + ConsString* cons_string = String::VisitFlat(this, string, offset); + op_->Reset(cons_string, offset); + if (cons_string != NULL) { + string = op_->Next(&offset); + if (string != NULL) String::VisitFlat(this, string, offset); + } } bool StringCharacterStream::HasMore() { if (buffer8_ != end_) return true; - if (!op_->HasMore()) return false; - unsigned length; - int32_t type; - String* string = op_->ContinueOperation(&type, &length); + int offset; + String* string = op_->Next(&offset); + DCHECK_EQ(offset, 0); if (string == NULL) return false; - ASSERT(!string->IsConsString()); - ASSERT(string->length() != 0); - ConsStringNullOp null_op; - String::Visit(string, 0, *this, null_op, type, length); - ASSERT(buffer8_ != end_); + String::VisitFlat(this, string); + DCHECK(buffer8_ != end_); return true; } void StringCharacterStream::VisitOneByteString( - const uint8_t* chars, unsigned length) { + const uint8_t* chars, int length) { is_one_byte_ = true; buffer8_ = chars; end_ = chars + length; @@ -3435,7 +3766,7 @@ void StringCharacterStream::VisitTwoByteString( - const uint16_t* chars, unsigned length) { + const uint16_t* chars, int length) { is_one_byte_ = false; buffer16_ = chars; end_ = reinterpret_cast<const uint8_t*>(chars + length); @@ -3479,25 +3810,25 @@ byte ByteArray::get(int index) { - ASSERT(index >= 0 && index < this->length()); + DCHECK(index >= 0 && index < this->length()); return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize); } void ByteArray::set(int index, byte value) { - ASSERT(index >= 0 && index < this->length()); + DCHECK(index >= 0 && index < this->length()); WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value); } int ByteArray::get_int(int index) { - ASSERT(index >= 0 && (index * kIntSize) < this->length()); + DCHECK(index >= 0 && (index * kIntSize) < this->length()); return READ_INT_FIELD(this, kHeaderSize + index * kIntSize); } ByteArray* ByteArray::FromDataStartAddress(Address address) { - ASSERT_TAG_ALIGNED(address); + DCHECK_TAG_ALIGNED(address); return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag); } @@ -3513,25 +3844,28 @@ uint8_t ExternalUint8ClampedArray::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); uint8_t* ptr = external_uint8_clamped_pointer(); return ptr[index]; } -MaybeObject* ExternalUint8ClampedArray::get(int index) { - return Smi::FromInt(static_cast<int>(get_scalar(index))); +Handle<Object> ExternalUint8ClampedArray::get( + Handle<ExternalUint8ClampedArray> array, + int index) { + return Handle<Smi>(Smi::FromInt(array->get_scalar(index)), + array->GetIsolate()); } void ExternalUint8ClampedArray::set(int index, uint8_t value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); uint8_t* ptr = external_uint8_clamped_pointer(); ptr[index] = value; } -void* ExternalArray::external_pointer() { +void* ExternalArray::external_pointer() const { intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset); return reinterpret_cast<void*>(ptr); } @@ -3544,152 +3878,166 @@ int8_t ExternalInt8Array::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); int8_t* ptr = static_cast<int8_t*>(external_pointer()); return ptr[index]; } -MaybeObject* ExternalInt8Array::get(int index) { - return Smi::FromInt(static_cast<int>(get_scalar(index))); +Handle<Object> ExternalInt8Array::get(Handle<ExternalInt8Array> array, + int index) { + return Handle<Smi>(Smi::FromInt(array->get_scalar(index)), + array->GetIsolate()); } void ExternalInt8Array::set(int index, int8_t value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); int8_t* ptr = static_cast<int8_t*>(external_pointer()); ptr[index] = value; } uint8_t ExternalUint8Array::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); uint8_t* ptr = static_cast<uint8_t*>(external_pointer()); return ptr[index]; } -MaybeObject* ExternalUint8Array::get(int index) { - return Smi::FromInt(static_cast<int>(get_scalar(index))); +Handle<Object> ExternalUint8Array::get(Handle<ExternalUint8Array> array, + int index) { + return Handle<Smi>(Smi::FromInt(array->get_scalar(index)), + array->GetIsolate()); } void ExternalUint8Array::set(int index, uint8_t value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); uint8_t* ptr = static_cast<uint8_t*>(external_pointer()); ptr[index] = value; } int16_t ExternalInt16Array::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); int16_t* ptr = static_cast<int16_t*>(external_pointer()); return ptr[index]; } -MaybeObject* ExternalInt16Array::get(int index) { - return Smi::FromInt(static_cast<int>(get_scalar(index))); +Handle<Object> ExternalInt16Array::get(Handle<ExternalInt16Array> array, + int index) { + return Handle<Smi>(Smi::FromInt(array->get_scalar(index)), + array->GetIsolate()); } void ExternalInt16Array::set(int index, int16_t value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); int16_t* ptr = static_cast<int16_t*>(external_pointer()); ptr[index] = value; } uint16_t ExternalUint16Array::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); uint16_t* ptr = static_cast<uint16_t*>(external_pointer()); return ptr[index]; } -MaybeObject* ExternalUint16Array::get(int index) { - return Smi::FromInt(static_cast<int>(get_scalar(index))); +Handle<Object> ExternalUint16Array::get(Handle<ExternalUint16Array> array, + int index) { + return Handle<Smi>(Smi::FromInt(array->get_scalar(index)), + array->GetIsolate()); } void ExternalUint16Array::set(int index, uint16_t value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); uint16_t* ptr = static_cast<uint16_t*>(external_pointer()); ptr[index] = value; } int32_t ExternalInt32Array::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); int32_t* ptr = static_cast<int32_t*>(external_pointer()); return ptr[index]; } -MaybeObject* ExternalInt32Array::get(int index) { - return GetHeap()->NumberFromInt32(get_scalar(index)); +Handle<Object> ExternalInt32Array::get(Handle<ExternalInt32Array> array, + int index) { + return array->GetIsolate()->factory()-> + NewNumberFromInt(array->get_scalar(index)); } void ExternalInt32Array::set(int index, int32_t value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); int32_t* ptr = static_cast<int32_t*>(external_pointer()); ptr[index] = value; } uint32_t ExternalUint32Array::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); uint32_t* ptr = static_cast<uint32_t*>(external_pointer()); return ptr[index]; } -MaybeObject* ExternalUint32Array::get(int index) { - return GetHeap()->NumberFromUint32(get_scalar(index)); +Handle<Object> ExternalUint32Array::get(Handle<ExternalUint32Array> array, + int index) { + return array->GetIsolate()->factory()-> + NewNumberFromUint(array->get_scalar(index)); } void ExternalUint32Array::set(int index, uint32_t value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); uint32_t* ptr = static_cast<uint32_t*>(external_pointer()); ptr[index] = value; } float ExternalFloat32Array::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); float* ptr = static_cast<float*>(external_pointer()); return ptr[index]; } -MaybeObject* ExternalFloat32Array::get(int index) { - return GetHeap()->NumberFromDouble(get_scalar(index)); +Handle<Object> ExternalFloat32Array::get(Handle<ExternalFloat32Array> array, + int index) { + return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index)); } void ExternalFloat32Array::set(int index, float value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); float* ptr = static_cast<float*>(external_pointer()); ptr[index] = value; } double ExternalFloat64Array::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); double* ptr = static_cast<double*>(external_pointer()); return ptr[index]; } -MaybeObject* ExternalFloat64Array::get(int index) { - return GetHeap()->NumberFromDouble(get_scalar(index)); +Handle<Object> ExternalFloat64Array::get(Handle<ExternalFloat64Array> array, + int index) { + return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index)); } void ExternalFloat64Array::set(int index, double value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); double* ptr = static_cast<double*>(external_pointer()); ptr[index] = value; } @@ -3700,10 +4048,9 @@ } -int FixedTypedArrayBase::DataSize() { - InstanceType instance_type = map()->instance_type(); +int FixedTypedArrayBase::DataSize(InstanceType type) { int element_size; - switch (instance_type) { + switch (type) { #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \ case FIXED_##TYPE##_ARRAY_TYPE: \ element_size = size; \ @@ -3719,11 +4066,21 @@ } +int FixedTypedArrayBase::DataSize() { + return DataSize(map()->instance_type()); +} + + int FixedTypedArrayBase::size() { return OBJECT_POINTER_ALIGN(kDataOffset + DataSize()); } +int FixedTypedArrayBase::TypedArraySize(InstanceType type) { + return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type)); +} + + uint8_t Uint8ArrayTraits::defaultValue() { return 0; } @@ -3746,16 +4103,16 @@ float Float32ArrayTraits::defaultValue() { - return static_cast<float>(OS::nan_value()); + return static_cast<float>(base::OS::nan_value()); } -double Float64ArrayTraits::defaultValue() { return OS::nan_value(); } +double Float64ArrayTraits::defaultValue() { return base::OS::nan_value(); } template <class Traits> typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); ElementType* ptr = reinterpret_cast<ElementType*>( FIELD_ADDR(this, kDataOffset)); return ptr[index]; @@ -3765,14 +4122,14 @@ template<> inline FixedTypedArray<Float64ArrayTraits>::ElementType FixedTypedArray<Float64ArrayTraits>::get_scalar(int index) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); return READ_DOUBLE_FIELD(this, ElementOffset(index)); } template <class Traits> void FixedTypedArray<Traits>::set(int index, ElementType value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); ElementType* ptr = reinterpret_cast<ElementType*>( FIELD_ADDR(this, kDataOffset)); ptr[index] = value; @@ -3782,7 +4139,7 @@ template<> inline void FixedTypedArray<Float64ArrayTraits>::set( int index, Float64ArrayTraits::ElementType value) { - ASSERT((index >= 0) && (index < this->length())); + DCHECK((index >= 0) && (index < this->length())); WRITE_DOUBLE_FIELD(this, ElementOffset(index), value); } @@ -3829,83 +4186,80 @@ template <class Traits> -MaybeObject* FixedTypedArray<Traits>::get(int index) { - return Traits::ToObject(GetHeap(), get_scalar(index)); +Handle<Object> FixedTypedArray<Traits>::get( + Handle<FixedTypedArray<Traits> > array, + int index) { + return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index)); } + template <class Traits> -MaybeObject* FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) { +Handle<Object> FixedTypedArray<Traits>::SetValue( + Handle<FixedTypedArray<Traits> > array, + uint32_t index, + Handle<Object> value) { ElementType cast_value = Traits::defaultValue(); - if (index < static_cast<uint32_t>(length())) { + if (index < static_cast<uint32_t>(array->length())) { if (value->IsSmi()) { - int int_value = Smi::cast(value)->value(); + int int_value = Handle<Smi>::cast(value)->value(); cast_value = from_int(int_value); } else if (value->IsHeapNumber()) { - double double_value = HeapNumber::cast(value)->value(); + double double_value = Handle<HeapNumber>::cast(value)->value(); cast_value = from_double(double_value); } else { // Clamp undefined to the default value. All other types have been // converted to a number type further up in the call chain. - ASSERT(value->IsUndefined()); + DCHECK(value->IsUndefined()); } - set(index, cast_value); + array->set(index, cast_value); } - return Traits::ToObject(GetHeap(), cast_value); -} - -template <class Traits> -Handle<Object> FixedTypedArray<Traits>::SetValue( - Handle<FixedTypedArray<Traits> > array, - uint32_t index, - Handle<Object> value) { - CALL_HEAP_FUNCTION(array->GetIsolate(), - array->SetValue(index, *value), - Object); + return Traits::ToHandle(array->GetIsolate(), cast_value); } -MaybeObject* Uint8ArrayTraits::ToObject(Heap*, uint8_t scalar) { - return Smi::FromInt(scalar); +Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) { + return handle(Smi::FromInt(scalar), isolate); } -MaybeObject* Uint8ClampedArrayTraits::ToObject(Heap*, uint8_t scalar) { - return Smi::FromInt(scalar); +Handle<Object> Uint8ClampedArrayTraits::ToHandle(Isolate* isolate, + uint8_t scalar) { + return handle(Smi::FromInt(scalar), isolate); } -MaybeObject* Int8ArrayTraits::ToObject(Heap*, int8_t scalar) { - return Smi::FromInt(scalar); +Handle<Object> Int8ArrayTraits::ToHandle(Isolate* isolate, int8_t scalar) { + return handle(Smi::FromInt(scalar), isolate); } -MaybeObject* Uint16ArrayTraits::ToObject(Heap*, uint16_t scalar) { - return Smi::FromInt(scalar); +Handle<Object> Uint16ArrayTraits::ToHandle(Isolate* isolate, uint16_t scalar) { + return handle(Smi::FromInt(scalar), isolate); } -MaybeObject* Int16ArrayTraits::ToObject(Heap*, int16_t scalar) { - return Smi::FromInt(scalar); +Handle<Object> Int16ArrayTraits::ToHandle(Isolate* isolate, int16_t scalar) { + return handle(Smi::FromInt(scalar), isolate); } -MaybeObject* Uint32ArrayTraits::ToObject(Heap* heap, uint32_t scalar) { - return heap->NumberFromUint32(scalar); +Handle<Object> Uint32ArrayTraits::ToHandle(Isolate* isolate, uint32_t scalar) { + return isolate->factory()->NewNumberFromUint(scalar); } -MaybeObject* Int32ArrayTraits::ToObject(Heap* heap, int32_t scalar) { - return heap->NumberFromInt32(scalar); +Handle<Object> Int32ArrayTraits::ToHandle(Isolate* isolate, int32_t scalar) { + return isolate->factory()->NewNumberFromInt(scalar); } -MaybeObject* Float32ArrayTraits::ToObject(Heap* heap, float scalar) { - return heap->NumberFromDouble(scalar); +Handle<Object> Float32ArrayTraits::ToHandle(Isolate* isolate, float scalar) { + return isolate->factory()->NewNumber(scalar); } -MaybeObject* Float64ArrayTraits::ToObject(Heap* heap, double scalar) { - return heap->NumberFromDouble(scalar); +Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) { + return isolate->factory()->NewNumber(scalar); } @@ -3915,13 +4269,14 @@ void Map::set_visitor_id(int id) { - ASSERT(0 <= id && id < 256); + DCHECK(0 <= id && id < 256); WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id)); } int Map::instance_size() { - return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2; + return NOBARRIER_READ_BYTE_FIELD( + this, kInstanceSizeOffset) << kPointerSizeLog2; } @@ -3938,7 +4293,7 @@ int Map::GetInObjectPropertyOffset(int index) { // Adjust for the number of properties stored in the object. index -= inobject_properties(); - ASSERT(index < 0); + DCHECK(index <= 0); return instance_size() + (index * kPointerSize); } @@ -3947,7 +4302,7 @@ int instance_size = map->instance_size(); if (instance_size != kVariableSizeSentinel) return instance_size; // Only inline the most frequent cases. - int instance_type = static_cast<int>(map->instance_type()); + InstanceType instance_type = map->instance_type(); if (instance_type == FIXED_ARRAY_TYPE) { return FixedArray::BodyDescriptor::SizeOf(map, this); } @@ -3960,7 +4315,7 @@ return reinterpret_cast<ByteArray*>(this)->ByteArraySize(); } if (instance_type == FREE_SPACE_TYPE) { - return reinterpret_cast<FreeSpace*>(this)->size(); + return reinterpret_cast<FreeSpace*>(this)->nobarrier_size(); } if (instance_type == STRING_TYPE || instance_type == INTERNALIZED_STRING_TYPE) { @@ -3972,37 +4327,35 @@ reinterpret_cast<FixedDoubleArray*>(this)->length()); } if (instance_type == CONSTANT_POOL_ARRAY_TYPE) { - return ConstantPoolArray::SizeFor( - reinterpret_cast<ConstantPoolArray*>(this)->count_of_int64_entries(), - reinterpret_cast<ConstantPoolArray*>(this)->count_of_code_ptr_entries(), - reinterpret_cast<ConstantPoolArray*>(this)->count_of_heap_ptr_entries(), - reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries()); + return reinterpret_cast<ConstantPoolArray*>(this)->size(); } if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE && instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE) { - return reinterpret_cast<FixedTypedArrayBase*>(this)->size(); + return reinterpret_cast<FixedTypedArrayBase*>( + this)->TypedArraySize(instance_type); } - ASSERT(instance_type == CODE_TYPE); + DCHECK(instance_type == CODE_TYPE); return reinterpret_cast<Code*>(this)->CodeSize(); } void Map::set_instance_size(int value) { - ASSERT_EQ(0, value & (kPointerSize - 1)); + DCHECK_EQ(0, value & (kPointerSize - 1)); value >>= kPointerSizeLog2; - ASSERT(0 <= value && value < 256); - WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value)); + DCHECK(0 <= value && value < 256); + NOBARRIER_WRITE_BYTE_FIELD( + this, kInstanceSizeOffset, static_cast<byte>(value)); } void Map::set_inobject_properties(int value) { - ASSERT(0 <= value && value < 256); + DCHECK(0 <= value && value < 256); WRITE_BYTE_FIELD(this, kInObjectPropertiesOffset, static_cast<byte>(value)); } void Map::set_pre_allocated_property_fields(int value) { - ASSERT(0 <= value && value < 256); + DCHECK(0 <= value && value < 256); WRITE_BYTE_FIELD(this, kPreAllocatedPropertyFieldsOffset, static_cast<byte>(value)); @@ -4064,12 +4417,12 @@ void Map::set_function_with_prototype(bool value) { - set_bit_field3(FunctionWithPrototype::update(bit_field3(), value)); + set_bit_field(FunctionWithPrototype::update(bit_field(), value)); } bool Map::function_with_prototype() { - return FunctionWithPrototype::decode(bit_field3()); + return FunctionWithPrototype::decode(bit_field()); } @@ -4100,28 +4453,15 @@ } -void Map::set_attached_to_shared_function_info(bool value) { - if (value) { - set_bit_field2(bit_field2() | (1 << kAttachedToSharedFunctionInfo)); - } else { - set_bit_field2(bit_field2() & ~(1 << kAttachedToSharedFunctionInfo)); - } -} - -bool Map::attached_to_shared_function_info() { - return ((1 << kAttachedToSharedFunctionInfo) & bit_field2()) != 0; +void Map::set_is_prototype_map(bool value) { + set_bit_field2(IsPrototypeMapBits::update(bit_field2(), value)); } - -void Map::set_is_shared(bool value) { - set_bit_field3(IsShared::update(bit_field3(), value)); +bool Map::is_prototype_map() { + return IsPrototypeMapBits::decode(bit_field2()); } -bool Map::is_shared() { - return IsShared::decode(bit_field3()); } - - void Map::set_dictionary_map(bool value) { uint32_t new_bit_field3 = DictionaryMap::update(bit_field3(), value); new_bit_field3 = IsUnstable::update(new_bit_field3, value); @@ -4139,8 +4479,8 @@ } -void Map::set_owns_descriptors(bool is_shared) { - set_bit_field3(OwnsDescriptors::update(bit_field3(), is_shared)); +void Map::set_owns_descriptors(bool owns_descriptors) { + set_bit_field3(OwnsDescriptors::update(bit_field3(), owns_descriptors)); } @@ -4179,6 +4519,26 @@ } +void Map::set_done_inobject_slack_tracking(bool value) { + set_bit_field3(DoneInobjectSlackTracking::update(bit_field3(), value)); +} + + +bool Map::done_inobject_slack_tracking() { + return DoneInobjectSlackTracking::decode(bit_field3()); +} + + +void Map::set_construction_count(int value) { + set_bit_field3(ConstructionCount::update(bit_field3(), value)); +} + + +int Map::construction_count() { + return ConstructionCount::decode(bit_field3()); +} + + void Map::freeze() { set_bit_field3(IsFrozen::update(bit_field3(), true)); } @@ -4305,12 +4665,21 @@ } +bool Code::IsCodeStubOrIC() { + return kind() == STUB || kind() == HANDLER || kind() == LOAD_IC || + kind() == KEYED_LOAD_IC || kind() == CALL_IC || kind() == STORE_IC || + kind() == KEYED_STORE_IC || kind() == BINARY_OP_IC || + kind() == COMPARE_IC || kind() == COMPARE_NIL_IC || + kind() == TO_BOOLEAN_IC; +} + + InlineCacheState Code::ic_state() { InlineCacheState result = ExtractICStateFromFlags(flags()); // Only allow uninitialized or debugger states for non-IC code // objects. This is used in the debugger to determine whether or not // a call to code object has been replaced with a debug break call. - ASSERT(is_inline_cache_stub() || + DCHECK(is_inline_cache_stub() || result == UNINITIALIZED || result == DEBUG_STUB); return result; @@ -4318,7 +4687,7 @@ ExtraICState Code::extra_ic_state() { - ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB); + DCHECK(is_inline_cache_stub() || ic_state() == DEBUG_STUB); return ExtractExtraICStateFromFlags(flags()); } @@ -4345,6 +4714,11 @@ } +inline bool Code::is_hydrogen_stub() { + return is_crankshafted() && kind() != OPTIMIZED_FUNCTION; +} + + inline void Code::set_is_crankshafted(bool value) { int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); int updated = IsCrankshaftedField::update(previous, value); @@ -4352,57 +4726,42 @@ } -int Code::major_key() { - ASSERT(has_major_key()); - return StubMajorKeyField::decode( - READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)); -} - - -void Code::set_major_key(int major) { - ASSERT(has_major_key()); - ASSERT(0 <= major && major < 256); - int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); - int updated = StubMajorKeyField::update(previous, major); - WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated); +inline bool Code::is_turbofanned() { + DCHECK(kind() == OPTIMIZED_FUNCTION || kind() == STUB); + return IsTurbofannedField::decode( + READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); } -bool Code::has_major_key() { - return kind() == STUB || - kind() == HANDLER || - kind() == BINARY_OP_IC || - kind() == COMPARE_IC || - kind() == COMPARE_NIL_IC || - kind() == LOAD_IC || - kind() == KEYED_LOAD_IC || - kind() == STORE_IC || - kind() == KEYED_STORE_IC || - kind() == TO_BOOLEAN_IC; +inline void Code::set_is_turbofanned(bool value) { + DCHECK(kind() == OPTIMIZED_FUNCTION || kind() == STUB); + int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); + int updated = IsTurbofannedField::update(previous, value); + WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); } bool Code::optimizable() { - ASSERT_EQ(FUNCTION, kind()); + DCHECK_EQ(FUNCTION, kind()); return READ_BYTE_FIELD(this, kOptimizableOffset) == 1; } void Code::set_optimizable(bool value) { - ASSERT_EQ(FUNCTION, kind()); + DCHECK_EQ(FUNCTION, kind()); WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0); } bool Code::has_deoptimization_support() { - ASSERT_EQ(FUNCTION, kind()); + DCHECK_EQ(FUNCTION, kind()); byte flags = READ_BYTE_FIELD(this, kFullCodeFlags); return FullCodeFlagsHasDeoptimizationSupportField::decode(flags); } void Code::set_has_deoptimization_support(bool value) { - ASSERT_EQ(FUNCTION, kind()); + DCHECK_EQ(FUNCTION, kind()); byte flags = READ_BYTE_FIELD(this, kFullCodeFlags); flags = FullCodeFlagsHasDeoptimizationSupportField::update(flags, value); WRITE_BYTE_FIELD(this, kFullCodeFlags, flags); @@ -4410,14 +4769,14 @@ bool Code::has_debug_break_slots() { - ASSERT_EQ(FUNCTION, kind()); + DCHECK_EQ(FUNCTION, kind()); byte flags = READ_BYTE_FIELD(this, kFullCodeFlags); return FullCodeFlagsHasDebugBreakSlotsField::decode(flags); } void Code::set_has_debug_break_slots(bool value) { - ASSERT_EQ(FUNCTION, kind()); + DCHECK_EQ(FUNCTION, kind()); byte flags = READ_BYTE_FIELD(this, kFullCodeFlags); flags = FullCodeFlagsHasDebugBreakSlotsField::update(flags, value); WRITE_BYTE_FIELD(this, kFullCodeFlags, flags); @@ -4425,14 +4784,14 @@ bool Code::is_compiled_optimizable() { - ASSERT_EQ(FUNCTION, kind()); + DCHECK_EQ(FUNCTION, kind()); byte flags = READ_BYTE_FIELD(this, kFullCodeFlags); return FullCodeFlagsIsCompiledOptimizable::decode(flags); } void Code::set_compiled_optimizable(bool value) { - ASSERT_EQ(FUNCTION, kind()); + DCHECK_EQ(FUNCTION, kind()); byte flags = READ_BYTE_FIELD(this, kFullCodeFlags); flags = FullCodeFlagsIsCompiledOptimizable::update(flags, value); WRITE_BYTE_FIELD(this, kFullCodeFlags, flags); @@ -4440,33 +4799,48 @@ int Code::allow_osr_at_loop_nesting_level() { - ASSERT_EQ(FUNCTION, kind()); - return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset); + DCHECK_EQ(FUNCTION, kind()); + int fields = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); + return AllowOSRAtLoopNestingLevelField::decode(fields); } void Code::set_allow_osr_at_loop_nesting_level(int level) { - ASSERT_EQ(FUNCTION, kind()); - ASSERT(level >= 0 && level <= kMaxLoopNestingMarker); - WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level); + DCHECK_EQ(FUNCTION, kind()); + DCHECK(level >= 0 && level <= kMaxLoopNestingMarker); + int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); + int updated = AllowOSRAtLoopNestingLevelField::update(previous, level); + WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated); } int Code::profiler_ticks() { - ASSERT_EQ(FUNCTION, kind()); + DCHECK_EQ(FUNCTION, kind()); return READ_BYTE_FIELD(this, kProfilerTicksOffset); } void Code::set_profiler_ticks(int ticks) { - ASSERT_EQ(FUNCTION, kind()); - ASSERT(ticks < 256); + DCHECK_EQ(FUNCTION, kind()); + DCHECK(ticks < 256); WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks); } +int Code::builtin_index() { + DCHECK_EQ(BUILTIN, kind()); + return READ_INT32_FIELD(this, kKindSpecificFlags1Offset); +} + + +void Code::set_builtin_index(int index) { + DCHECK_EQ(BUILTIN, kind()); + WRITE_INT32_FIELD(this, kKindSpecificFlags1Offset, index); +} + + unsigned Code::stack_slots() { - ASSERT(is_crankshafted()); + DCHECK(is_crankshafted()); return StackSlotsField::decode( READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); } @@ -4474,7 +4848,7 @@ void Code::set_stack_slots(unsigned slots) { CHECK(slots <= (1 << kStackSlotsBitCount)); - ASSERT(is_crankshafted()); + DCHECK(is_crankshafted()); int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); int updated = StackSlotsField::update(previous, slots); WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); @@ -4482,7 +4856,7 @@ unsigned Code::safepoint_table_offset() { - ASSERT(is_crankshafted()); + DCHECK(is_crankshafted()); return SafepointTableOffsetField::decode( READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)); } @@ -4490,8 +4864,8 @@ void Code::set_safepoint_table_offset(unsigned offset) { CHECK(offset <= (1 << kSafepointTableOffsetBitCount)); - ASSERT(is_crankshafted()); - ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize))); + DCHECK(is_crankshafted()); + DCHECK(IsAligned(offset, static_cast<unsigned>(kIntSize))); int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); int updated = SafepointTableOffsetField::update(previous, offset); WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated); @@ -4499,15 +4873,16 @@ unsigned Code::back_edge_table_offset() { - ASSERT_EQ(FUNCTION, kind()); + DCHECK_EQ(FUNCTION, kind()); return BackEdgeTableOffsetField::decode( - READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)); + READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)) << kPointerSizeLog2; } void Code::set_back_edge_table_offset(unsigned offset) { - ASSERT_EQ(FUNCTION, kind()); - ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize))); + DCHECK_EQ(FUNCTION, kind()); + DCHECK(IsAligned(offset, static_cast<unsigned>(kPointerSize))); + offset = offset >> kPointerSizeLog2; int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); int updated = BackEdgeTableOffsetField::update(previous, offset); WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated); @@ -4515,35 +4890,25 @@ bool Code::back_edges_patched_for_osr() { - ASSERT_EQ(FUNCTION, kind()); - return BackEdgesPatchedForOSRField::decode( - READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)); -} - - -void Code::set_back_edges_patched_for_osr(bool value) { - ASSERT_EQ(FUNCTION, kind()); - int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); - int updated = BackEdgesPatchedForOSRField::update(previous, value); - WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated); + DCHECK_EQ(FUNCTION, kind()); + return allow_osr_at_loop_nesting_level() > 0; } - byte Code::to_boolean_state() { return extra_ic_state(); } bool Code::has_function_cache() { - ASSERT(kind() == STUB); + DCHECK(kind() == STUB); return HasFunctionCacheField::decode( READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); } void Code::set_has_function_cache(bool flag) { - ASSERT(kind() == STUB); + DCHECK(kind() == STUB); int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); int updated = HasFunctionCacheField::update(previous, flag); WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); @@ -4551,20 +4916,49 @@ bool Code::marked_for_deoptimization() { - ASSERT(kind() == OPTIMIZED_FUNCTION); + DCHECK(kind() == OPTIMIZED_FUNCTION); return MarkedForDeoptimizationField::decode( READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); } void Code::set_marked_for_deoptimization(bool flag) { - ASSERT(kind() == OPTIMIZED_FUNCTION); + DCHECK(kind() == OPTIMIZED_FUNCTION); + DCHECK(!flag || AllowDeoptimization::IsAllowed(GetIsolate())); int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); int updated = MarkedForDeoptimizationField::update(previous, flag); WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); } +bool Code::is_weak_stub() { + return CanBeWeakStub() && WeakStubField::decode( + READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); +} + + +void Code::mark_as_weak_stub() { + DCHECK(CanBeWeakStub()); + int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); + int updated = WeakStubField::update(previous, true); + WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); +} + + +bool Code::is_invalidated_weak_stub() { + return is_weak_stub() && InvalidatedWeakStubField::decode( + READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); +} + + +void Code::mark_as_invalidated_weak_stub() { + DCHECK(is_inline_cache_stub()); + int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); + int updated = InvalidatedWeakStubField::update(previous, true); + WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); +} + + bool Code::is_inline_cache_stub() { Kind kind = this->kind(); switch (kind) { @@ -4592,17 +4986,15 @@ void Code::set_constant_pool(Object* value) { - ASSERT(value->IsConstantPoolArray()); + DCHECK(value->IsConstantPoolArray()); WRITE_FIELD(this, kConstantPoolOffset, value); WRITE_BARRIER(GetHeap(), this, kConstantPoolOffset, value); } -Code::Flags Code::ComputeFlags(Kind kind, - InlineCacheState ic_state, - ExtraICState extra_ic_state, - StubType type, - InlineCacheHolderFlag holder) { +Code::Flags Code::ComputeFlags(Kind kind, InlineCacheState ic_state, + ExtraICState extra_ic_state, StubType type, + CacheHolderFlag holder) { // Compute the bit mask. unsigned int bits = KindField::encode(kind) | ICStateField::encode(ic_state) @@ -4615,15 +5007,14 @@ Code::Flags Code::ComputeMonomorphicFlags(Kind kind, ExtraICState extra_ic_state, - InlineCacheHolderFlag holder, + CacheHolderFlag holder, StubType type) { return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, holder); } -Code::Flags Code::ComputeHandlerFlags(Kind handler_kind, - StubType type, - InlineCacheHolderFlag holder) { +Code::Flags Code::ComputeHandlerFlags(Kind handler_kind, StubType type, + CacheHolderFlag holder) { return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, type, holder); } @@ -4648,7 +5039,7 @@ } -InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) { +CacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) { return CacheHolderField::decode(flags); } @@ -4659,6 +5050,12 @@ } +Code::Flags Code::RemoveTypeAndHolderFromFlags(Flags flags) { + int bits = flags & ~TypeField::kMask & ~CacheHolderField::kMask; + return static_cast<Flags>(bits); +} + + Code* Code::GetCodeFromTargetAddress(Address address) { HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize); // GetCodeFromTargetAddress might be called when marking objects during mark @@ -4677,10 +5074,9 @@ bool Code::IsWeakObjectInOptimizedCode(Object* object) { - ASSERT(is_optimized_code()); + if (!FLAG_collect_maps) return false; if (object->IsMap()) { return Map::cast(object)->CanTransition() && - FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code; } if (object->IsJSObject() || @@ -4695,7 +5091,7 @@ public: FindAndReplacePattern() : count_(0) { } void Add(Handle<Map> map_to_find, Handle<Object> obj_to_replace) { - ASSERT(count_ < kMaxCount); + DCHECK(count_ < kMaxCount); find_[count_] = map_to_find; replace_[count_] = obj_to_replace; ++count_; @@ -4709,13 +5105,20 @@ }; -Object* Map::prototype() { +bool Code::IsWeakObjectInIC(Object* object) { + return object->IsMap() && Map::cast(object)->CanTransition() && + FLAG_collect_maps && + FLAG_weak_embedded_maps_in_ic; +} + + +Object* Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); } void Map::set_prototype(Object* value, WriteBarrierMode mode) { - ASSERT(value->IsNull() || value->IsJSReceiver()); + DCHECK(value->IsNull() || value->IsJSReceiver()); WRITE_FIELD(this, kPrototypeOffset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode); } @@ -4723,21 +5126,17 @@ // If the descriptor is using the empty transition array, install a new empty // transition array that will have place for an element transition. -static MaybeObject* EnsureHasTransitionArray(Map* map) { - TransitionArray* transitions; - MaybeObject* maybe_transitions; +static void EnsureHasTransitionArray(Handle<Map> map) { + Handle<TransitionArray> transitions; if (!map->HasTransitionArray()) { - maybe_transitions = TransitionArray::Allocate(map->GetIsolate(), 0); - if (!maybe_transitions->To(&transitions)) return maybe_transitions; + transitions = TransitionArray::Allocate(map->GetIsolate(), 0); transitions->set_back_pointer_storage(map->GetBackPointer()); } else if (!map->transitions()->IsFullTransitionArray()) { - maybe_transitions = map->transitions()->ExtendToFullTransitionArray(); - if (!maybe_transitions->To(&transitions)) return maybe_transitions; + transitions = TransitionArray::ExtendToFullTransitionArray(map); } else { - return map; + return; } - map->set_transitions(transitions); - return transitions; + map->set_transitions(*transitions); } @@ -4752,38 +5151,23 @@ void Map::set_bit_field3(uint32_t bits) { - // Ensure the upper 2 bits have the same value by sign extending it. This is - // necessary to be able to use the 31st bit. - int value = bits << 1; - WRITE_FIELD(this, kBitField3Offset, Smi::FromInt(value >> 1)); + if (kInt32Size != kPointerSize) { + WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0); + } + WRITE_UINT32_FIELD(this, kBitField3Offset, bits); } uint32_t Map::bit_field3() { - Object* value = READ_FIELD(this, kBitField3Offset); - return Smi::cast(value)->value(); -} - - -void Map::ClearTransitions(Heap* heap, WriteBarrierMode mode) { - Object* back_pointer = GetBackPointer(); - - if (Heap::ShouldZapGarbage() && HasTransitionArray()) { - ZapTransitions(); - } - - WRITE_FIELD(this, kTransitionsOrBackPointerOffset, back_pointer); - CONDITIONAL_WRITE_BARRIER( - heap, this, kTransitionsOrBackPointerOffset, back_pointer, mode); + return READ_UINT32_FIELD(this, kBitField3Offset); } -void Map::AppendDescriptor(Descriptor* desc, - const DescriptorArray::WhitenessWitness& witness) { +void Map::AppendDescriptor(Descriptor* desc) { DescriptorArray* descriptors = instance_descriptors(); int number_of_own_descriptors = NumberOfOwnDescriptors(); - ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors); - descriptors->Append(desc, witness); + DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors); + descriptors->Append(desc); SetNumberOfOwnDescriptors(number_of_own_descriptors + 1); } @@ -4793,7 +5177,7 @@ if (object->IsDescriptorArray()) { return TransitionArray::cast(object)->back_pointer_storage(); } else { - ASSERT(object->IsMap() || object->IsUndefined()); + DCHECK(object->IsMap() || object->IsUndefined()); return object; } } @@ -4804,7 +5188,7 @@ } -bool Map::HasTransitionArray() { +bool Map::HasTransitionArray() const { Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset); return object->IsTransitionArray(); } @@ -4824,33 +5208,14 @@ } -MaybeObject* Map::AddTransition(Name* key, - Map* target, - SimpleTransitionFlag flag) { - if (HasTransitionArray()) return transitions()->CopyInsert(key, target); - return TransitionArray::NewWith(flag, key, target, GetBackPointer()); -} - - -void Map::SetTransition(int transition_index, Map* target) { - transitions()->SetTarget(transition_index, target); -} - - Map* Map::GetTransition(int transition_index) { return transitions()->GetTarget(transition_index); } -MaybeObject* Map::set_elements_transition_map(Map* transitioned_map) { - TransitionArray* transitions; - MaybeObject* maybe_transitions = AddTransition( - GetHeap()->elements_transition_symbol(), - transitioned_map, - FULL_TRANSITION); - if (!maybe_transitions->To(&transitions)) return maybe_transitions; - set_transitions(transitions); - return transitions; +int Map::SearchTransition(Name* name) { + if (HasTransitionArray()) return transitions()->Search(name); + return TransitionArray::kNotFound; } @@ -4863,19 +5228,18 @@ } -MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) { - MaybeObject* allow_prototype = EnsureHasTransitionArray(this); - if (allow_prototype->IsFailure()) return allow_prototype; - int old_number_of_transitions = NumberOfProtoTransitions(); +void Map::SetPrototypeTransitions( + Handle<Map> map, Handle<FixedArray> proto_transitions) { + EnsureHasTransitionArray(map); + int old_number_of_transitions = map->NumberOfProtoTransitions(); #ifdef DEBUG - if (HasPrototypeTransitions()) { - ASSERT(GetPrototypeTransitions() != proto_transitions); - ZapPrototypeTransitions(); + if (map->HasPrototypeTransitions()) { + DCHECK(map->GetPrototypeTransitions() != *proto_transitions); + map->ZapPrototypeTransitions(); } #endif - transitions()->SetPrototypeTransitions(proto_transitions); - SetNumberOfProtoTransitions(old_number_of_transitions); - return this; + map->transitions()->SetPrototypeTransitions(*proto_transitions); + map->SetNumberOfProtoTransitions(old_number_of_transitions); } @@ -4884,8 +5248,8 @@ } -TransitionArray* Map::transitions() { - ASSERT(HasTransitionArray()); +TransitionArray* Map::transitions() const { + DCHECK(HasTransitionArray()); Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset); return TransitionArray::cast(object); } @@ -4904,12 +5268,12 @@ if (target->instance_descriptors() == instance_descriptors()) { Name* key = transitions()->GetKey(i); int new_target_index = transition_array->Search(key); - ASSERT(new_target_index != TransitionArray::kNotFound); - ASSERT(transition_array->GetTarget(new_target_index) == target); + DCHECK(new_target_index != TransitionArray::kNotFound); + DCHECK(transition_array->GetTarget(new_target_index) == target); } } #endif - ASSERT(transitions() != transition_array); + DCHECK(transitions() != transition_array); ZapTransitions(); } @@ -4920,14 +5284,14 @@ void Map::init_back_pointer(Object* undefined) { - ASSERT(undefined->IsUndefined()); + DCHECK(undefined->IsUndefined()); WRITE_FIELD(this, kTransitionsOrBackPointerOffset, undefined); } void Map::SetBackPointer(Object* value, WriteBarrierMode mode) { - ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE); - ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) || + DCHECK(instance_type() >= FIRST_JS_RECEIVER_TYPE); + DCHECK((value->IsUndefined() && GetBackPointer()->IsMap()) || (value->IsMap() && GetBackPointer()->IsUndefined())); Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset); if (object->IsTransitionArray()) { @@ -4940,23 +5304,6 @@ } -// Can either be Smi (no transitions), normal transition array, or a transition -// array with the header overwritten as a Smi (thus iterating). -TransitionArray* Map::unchecked_transition_array() { - Object* object = *HeapObject::RawField(this, - Map::kTransitionsOrBackPointerOffset); - TransitionArray* transition_array = static_cast<TransitionArray*>(object); - return transition_array; -} - - -HeapObject* Map::UncheckedPrototypeTransitions() { - ASSERT(HasTransitionArray()); - ASSERT(unchecked_transition_array()->HasPrototypeTransitions()); - return unchecked_transition_array()->UncheckedPrototypeTransitions(); -} - - ACCESSORS(Map, code_cache, Object, kCodeCacheOffset) ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset) ACCESSORS(Map, constructor, Object, kConstructorOffset) @@ -4968,9 +5315,10 @@ ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset) ACCESSORS(GlobalObject, native_context, Context, kNativeContextOffset) ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset) -ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset) +ACCESSORS(GlobalObject, global_proxy, JSObject, kGlobalProxyOffset) ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset) +ACCESSORS(JSGlobalProxy, hash, Object, kHashOffset) ACCESSORS(AccessorInfo, name, Object, kNameOffset) ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset) @@ -4991,7 +5339,6 @@ ACCESSORS(AccessorPair, getter, Object, kGetterOffset) ACCESSORS(AccessorPair, setter, Object, kSetterOffset) -ACCESSORS_TO_SMI(AccessorPair, access_flags, kAccessFlagsOffset) ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset) ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset) @@ -5063,6 +5410,8 @@ kEvalFrominstructionsOffsetOffset) ACCESSORS_TO_SMI(Script, flags, kFlagsOffset) BOOL_ACCESSORS(Script, flags, is_shared_cross_origin, kIsSharedCrossOriginBit) +ACCESSORS(Script, source_url, Object, kSourceUrlOffset) +ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset) Script::CompilationType Script::compilation_type() { return BooleanBit::get(flags(), kCompilationTypeBit) ? @@ -5082,7 +5431,6 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex) ACCESSORS(DebugInfo, original_code, Code, kOriginalCodeIndex) ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex) @@ -5092,20 +5440,19 @@ ACCESSORS_TO_SMI(BreakPointInfo, source_position, kSourcePositionIndex) ACCESSORS_TO_SMI(BreakPointInfo, statement_position, kStatementPositionIndex) ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex) -#endif ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset) ACCESSORS(SharedFunctionInfo, optimized_code_map, Object, kOptimizedCodeMapOffset) ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset) -ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset) +ACCESSORS(SharedFunctionInfo, feedback_vector, FixedArray, + kFeedbackVectorOffset) ACCESSORS(SharedFunctionInfo, instance_class_name, Object, kInstanceClassNameOffset) ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset) ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset) ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset) ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset) -SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset) SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset) @@ -5160,21 +5507,23 @@ SMI_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason, kOptCountAndBailoutReasonOffset) SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset) +SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset) +SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset) #else #define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \ STATIC_ASSERT(holder::offset % kPointerSize == 0); \ - int holder::name() { \ + int holder::name() const { \ int value = READ_INT_FIELD(this, offset); \ - ASSERT(kHeapObjectTag == 1); \ - ASSERT((value & kHeapObjectTag) == 0); \ + DCHECK(kHeapObjectTag == 1); \ + DCHECK((value & kHeapObjectTag) == 0); \ return value >> 1; \ } \ void holder::set_##name(int value) { \ - ASSERT(kHeapObjectTag == 1); \ - ASSERT((value & 0xC0000000) == 0xC0000000 || \ - (value & 0xC0000000) == 0x000000000); \ + DCHECK(kHeapObjectTag == 1); \ + DCHECK((value & 0xC0000000) == 0xC0000000 || \ + (value & 0xC0000000) == 0x0); \ WRITE_INT_FIELD(this, \ offset, \ (value << 1) & ~kHeapObjectTag); \ @@ -5210,32 +5559,16 @@ PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, opt_count_and_bailout_reason, kOptCountAndBailoutReasonOffset) - PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, counters, kCountersOffset) -#endif - - -int SharedFunctionInfo::construction_count() { - return READ_BYTE_FIELD(this, kConstructionCountOffset); -} - - -void SharedFunctionInfo::set_construction_count(int value) { - ASSERT(0 <= value && value < 256); - WRITE_BYTE_FIELD(this, kConstructionCountOffset, static_cast<byte>(value)); -} - - -BOOL_ACCESSORS(SharedFunctionInfo, - compiler_hints, - live_objects_may_exist, - kLiveObjectsMayExist) - +PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, + ast_node_count, + kAstNodeCountOffset) +PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, + profiler_ticks, + kProfilerTicksOffset) -bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() { - return initial_map() != GetHeap()->undefined_value(); -} +#endif BOOL_GETTER(SharedFunctionInfo, @@ -5256,12 +5589,6 @@ } -int SharedFunctionInfo::profiler_ticks() { - if (code()->kind() != Code::FUNCTION) return 0; - return code()->profiler_ticks(); -} - - StrictMode SharedFunctionInfo::strict_mode() { return BooleanBit::get(compiler_hints(), kStrictModeFunction) ? STRICT : SLOPPY; @@ -5270,7 +5597,7 @@ void SharedFunctionInfo::set_strict_mode(StrictMode strict_mode) { // We only allow mode transitions from sloppy to strict. - ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode); + DCHECK(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode); int hints = compiler_hints(); hints = BooleanBit::set(hints, kStrictModeFunction, strict_mode == STRICT); set_compiler_hints(hints); @@ -5286,17 +5613,10 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction) -BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize, - kDontOptimize) -BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator) - -void SharedFunctionInfo::BeforeVisitingPointers() { - if (IsInobjectSlackTrackingInProgress()) DetachInitialMap(); -} - +BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow) ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset) ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset) @@ -5318,12 +5638,12 @@ void SharedFunctionInfo::DontAdaptArguments() { - ASSERT(code()->kind() == Code::BUILTIN); + DCHECK(code()->kind() == Code::BUILTIN); set_formal_parameter_count(kDontAdaptArgumentsSentinel); } -int SharedFunctionInfo::start_position() { +int SharedFunctionInfo::start_position() const { return start_position_and_type() >> kStartPositionShift; } @@ -5334,13 +5654,13 @@ } -Code* SharedFunctionInfo::code() { +Code* SharedFunctionInfo::code() const { return Code::cast(READ_FIELD(this, kCodeOffset)); } void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) { - ASSERT(value->kind() != Code::OPTIMIZED_FUNCTION); + DCHECK(value->kind() != Code::OPTIMIZED_FUNCTION); WRITE_FIELD(this, kCodeOffset, value); CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode); } @@ -5354,12 +5674,13 @@ flusher->EvictCandidate(this); } - ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL); + DCHECK(code()->gc_metadata() == NULL && value->gc_metadata() == NULL); + set_code(value); } -ScopeInfo* SharedFunctionInfo::scope_info() { +ScopeInfo* SharedFunctionInfo::scope_info() const { return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset)); } @@ -5387,7 +5708,7 @@ FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() { - ASSERT(IsApiFunction()); + DCHECK(IsApiFunction()); return FunctionTemplateInfo::cast(function_data()); } @@ -5398,7 +5719,7 @@ BuiltinFunctionId SharedFunctionInfo::builtin_function_id() { - ASSERT(HasBuiltinFunctionId()); + DCHECK(HasBuiltinFunctionId()); return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value()); } @@ -5484,6 +5805,22 @@ } +bool JSFunction::IsFromNativeScript() { + Object* script = shared()->script(); + bool native = script->IsScript() && + Script::cast(script)->type()->value() == Script::TYPE_NATIVE; + DCHECK(!IsBuiltin() || native); // All builtins are also native. + return native; +} + + +bool JSFunction::IsFromExtensionScript() { + Object* script = shared()->script(); + return script->IsScript() && + Script::cast(script)->type()->value() == Script::TYPE_EXTENSION; +} + + bool JSFunction::NeedsArgumentsAdaption() { return shared()->formal_parameter_count() != SharedFunctionInfo::kDontAdaptArgumentsSentinel; @@ -5518,6 +5855,12 @@ } +bool JSFunction::IsInobjectSlackTrackingInProgress() { + return has_initial_map() && + initial_map()->construction_count() != JSFunction::kNoSlackTracking; +} + + Code* JSFunction::code() { return Code::cast( Code::GetObjectFromEntryAddress(FIELD_ADDR(this, kCodeEntryOffset))); @@ -5525,7 +5868,7 @@ void JSFunction::set_code(Code* value) { - ASSERT(!GetHeap()->InNewSpace(value)); + DCHECK(!GetHeap()->InNewSpace(value)); Address entry = value->entry(); WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry)); GetHeap()->incremental_marking()->RecordWriteOfCodeEntry( @@ -5536,7 +5879,7 @@ void JSFunction::set_code_no_write_barrier(Code* value) { - ASSERT(!GetHeap()->InNewSpace(value)); + DCHECK(!GetHeap()->InNewSpace(value)); Address entry = value->entry(); WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry)); } @@ -5570,8 +5913,13 @@ } +JSObject* JSFunction::global_proxy() { + return context()->global_proxy(); +} + + void JSFunction::set_context(Object* value) { - ASSERT(value->IsUndefined() || value->IsContext()); + DCHECK(value->IsUndefined() || value->IsContext()); WRITE_FIELD(this, kContextOffset, value); WRITE_BARRIER(GetHeap(), this, kContextOffset, value); } @@ -5585,11 +5933,6 @@ } -void JSFunction::set_initial_map(Map* value) { - set_prototype_or_initial_map(value); -} - - bool JSFunction::has_initial_map() { return prototype_or_initial_map()->IsMap(); } @@ -5606,7 +5949,7 @@ Object* JSFunction::instance_prototype() { - ASSERT(has_instance_prototype()); + DCHECK(has_instance_prototype()); if (has_initial_map()) return initial_map()->prototype(); // When there is no initial map and the prototype is a JSObject, the // initial map field is used for the prototype field. @@ -5615,7 +5958,7 @@ Object* JSFunction::prototype() { - ASSERT(has_prototype()); + DCHECK(has_prototype()); // If the function's prototype property has been set to a non-JSObject // value, that value is stored in the constructor field of the map. if (map()->has_non_instance_prototype()) return map()->constructor(); @@ -5635,64 +5978,64 @@ FixedArray* JSFunction::literals() { - ASSERT(!shared()->bound()); + DCHECK(!shared()->bound()); return literals_or_bindings(); } void JSFunction::set_literals(FixedArray* literals) { - ASSERT(!shared()->bound()); + DCHECK(!shared()->bound()); set_literals_or_bindings(literals); } FixedArray* JSFunction::function_bindings() { - ASSERT(shared()->bound()); + DCHECK(shared()->bound()); return literals_or_bindings(); } void JSFunction::set_function_bindings(FixedArray* bindings) { - ASSERT(shared()->bound()); + DCHECK(shared()->bound()); // Bound function literal may be initialized to the empty fixed array // before the bindings are set. - ASSERT(bindings == GetHeap()->empty_fixed_array() || + DCHECK(bindings == GetHeap()->empty_fixed_array() || bindings->map() == GetHeap()->fixed_cow_array_map()); set_literals_or_bindings(bindings); } int JSFunction::NumberOfLiterals() { - ASSERT(!shared()->bound()); + DCHECK(!shared()->bound()); return literals()->length(); } Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) { - ASSERT(id < kJSBuiltinsCount); // id is unsigned. + DCHECK(id < kJSBuiltinsCount); // id is unsigned. return READ_FIELD(this, OffsetOfFunctionWithId(id)); } void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id, Object* value) { - ASSERT(id < kJSBuiltinsCount); // id is unsigned. + DCHECK(id < kJSBuiltinsCount); // id is unsigned. WRITE_FIELD(this, OffsetOfFunctionWithId(id), value); WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value); } Code* JSBuiltinsObject::javascript_builtin_code(Builtins::JavaScript id) { - ASSERT(id < kJSBuiltinsCount); // id is unsigned. + DCHECK(id < kJSBuiltinsCount); // id is unsigned. return Code::cast(READ_FIELD(this, OffsetOfCodeWithId(id))); } void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id, Code* value) { - ASSERT(id < kJSBuiltinsCount); // id is unsigned. + DCHECK(id < kJSBuiltinsCount); // id is unsigned. WRITE_FIELD(this, OffsetOfCodeWithId(id), value); - ASSERT(!GetHeap()->InNewSpace(value)); + DCHECK(!GetHeap()->InNewSpace(value)); } @@ -5703,15 +6046,35 @@ void JSProxy::InitializeBody(int object_size, Object* value) { - ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value)); + DCHECK(!value->IsHeapObject() || !GetHeap()->InNewSpace(value)); for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) { WRITE_FIELD(this, offset, value); } } -ACCESSORS(JSSet, table, Object, kTableOffset) -ACCESSORS(JSMap, table, Object, kTableOffset) +ACCESSORS(JSCollection, table, Object, kTableOffset) + + +#define ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(name, type, offset) \ + template<class Derived, class TableType> \ + type* OrderedHashTableIterator<Derived, TableType>::name() const { \ + return type::cast(READ_FIELD(this, offset)); \ + } \ + template<class Derived, class TableType> \ + void OrderedHashTableIterator<Derived, TableType>::set_##name( \ + type* value, WriteBarrierMode mode) { \ + WRITE_FIELD(this, offset, value); \ + CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \ + } + +ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(table, Object, kTableOffset) +ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(index, Smi, kIndexOffset) +ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(kind, Smi, kKindOffset) + +#undef ORDERED_HASH_TABLE_ITERATOR_ACCESSORS + + ACCESSORS(JSWeakCollection, table, Object, kTableOffset) ACCESSORS(JSWeakCollection, next, Object, kNextOffset) @@ -5733,32 +6096,36 @@ ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset) SMI_ACCESSORS(JSGeneratorObject, stack_handler_index, kStackHandlerIndexOffset) +bool JSGeneratorObject::is_suspended() { + DCHECK_LT(kGeneratorExecuting, kGeneratorClosed); + DCHECK_EQ(kGeneratorClosed, 0); + return continuation() > 0; +} -JSGeneratorObject* JSGeneratorObject::cast(Object* obj) { - ASSERT(obj->IsJSGeneratorObject()); - ASSERT(HeapObject::cast(obj)->Size() == JSGeneratorObject::kSize); - return reinterpret_cast<JSGeneratorObject*>(obj); +bool JSGeneratorObject::is_closed() { + return continuation() == kGeneratorClosed; } +bool JSGeneratorObject::is_executing() { + return continuation() == kGeneratorExecuting; +} ACCESSORS(JSModule, context, Object, kContextOffset) ACCESSORS(JSModule, scope_info, ScopeInfo, kScopeInfoOffset) -JSModule* JSModule::cast(Object* obj) { - ASSERT(obj->IsJSModule()); - ASSERT(HeapObject::cast(obj)->Size() == JSModule::kSize); - return reinterpret_cast<JSModule*>(obj); -} +ACCESSORS(JSValue, value, Object, kValueOffset) -ACCESSORS(JSValue, value, Object, kValueOffset) +HeapNumber* HeapNumber::cast(Object* object) { + SLOW_DCHECK(object->IsHeapNumber() || object->IsMutableHeapNumber()); + return reinterpret_cast<HeapNumber*>(object); +} -JSValue* JSValue::cast(Object* obj) { - ASSERT(obj->IsJSValue()); - ASSERT(HeapObject::cast(obj)->Size() == JSValue::kSize); - return reinterpret_cast<JSValue*>(obj); +const HeapNumber* HeapNumber::cast(const Object* object) { + SLOW_DCHECK(object->IsHeapNumber() || object->IsMutableHeapNumber()); + return reinterpret_cast<const HeapNumber*>(object); } @@ -5773,13 +6140,6 @@ ACCESSORS(JSDate, sec, Object, kSecOffset) -JSDate* JSDate::cast(Object* obj) { - ASSERT(obj->IsJSDate()); - ASSERT(HeapObject::cast(obj)->Size() == JSDate::kSize); - return reinterpret_cast<JSDate*>(obj); -} - - ACCESSORS(JSMessageObject, type, String, kTypeOffset) ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset) ACCESSORS(JSMessageObject, script, Object, kScriptOffset) @@ -5788,13 +6148,6 @@ SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset) -JSMessageObject* JSMessageObject::cast(Object* obj) { - ASSERT(obj->IsJSMessageObject()); - ASSERT(HeapObject::cast(obj)->Size() == JSMessageObject::kSize); - return reinterpret_cast<JSMessageObject*>(obj); -} - - INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset) INT_ACCESSORS(Code, prologue_offset, kPrologueOffset) ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset) @@ -5809,7 +6162,7 @@ WRITE_FIELD(this, kHandlerTableOffset, NULL); WRITE_FIELD(this, kDeoptimizationDataOffset, NULL); WRITE_FIELD(this, kConstantPoolOffset, NULL); - // Do not wipe out e.g. a minor key. + // Do not wipe out major/minor keys on a code stub or IC if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) { WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL); } @@ -5817,36 +6170,29 @@ Object* Code::type_feedback_info() { - ASSERT(kind() == FUNCTION); + DCHECK(kind() == FUNCTION); return raw_type_feedback_info(); } void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) { - ASSERT(kind() == FUNCTION); + DCHECK(kind() == FUNCTION); set_raw_type_feedback_info(value, mode); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset, value, mode); } -int Code::stub_info() { - ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC || - kind() == BINARY_OP_IC || kind() == LOAD_IC); - return Smi::cast(raw_type_feedback_info())->value(); +uint32_t Code::stub_key() { + DCHECK(IsCodeStubOrIC()); + Smi* smi_key = Smi::cast(raw_type_feedback_info()); + return static_cast<uint32_t>(smi_key->value()); } -void Code::set_stub_info(int value) { - ASSERT(kind() == COMPARE_IC || - kind() == COMPARE_NIL_IC || - kind() == BINARY_OP_IC || - kind() == STUB || - kind() == LOAD_IC || - kind() == KEYED_LOAD_IC || - kind() == STORE_IC || - kind() == KEYED_STORE_IC); - set_raw_type_feedback_info(Smi::FromInt(value)); +void Code::set_stub_key(uint32_t key) { + DCHECK(IsCodeStubOrIC()); + set_raw_type_feedback_info(Smi::FromInt(key)); } @@ -5897,7 +6243,7 @@ ACCESSORS(JSArray, length, Object, kLengthOffset) -void* JSArrayBuffer::backing_store() { +void* JSArrayBuffer::backing_store() const { intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset); return reinterpret_cast<void*>(ptr); } @@ -5968,7 +6314,7 @@ JSRegExp::Flags JSRegExp::GetFlags() { - ASSERT(this->data()->IsFixedArray()); + DCHECK(this->data()->IsFixedArray()); Object* data = this->data(); Smi* smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex)); return Flags(smi->value()); @@ -5976,7 +6322,7 @@ String* JSRegExp::Pattern() { - ASSERT(this->data()->IsFixedArray()); + DCHECK(this->data()->IsFixedArray()); Object* data = this->data(); String* pattern= String::cast(FixedArray::cast(data)->get(kSourceIndex)); return pattern; @@ -5984,14 +6330,14 @@ Object* JSRegExp::DataAt(int index) { - ASSERT(TypeTag() != NOT_COMPILED); + DCHECK(TypeTag() != NOT_COMPILED); return FixedArray::cast(data())->get(index); } void JSRegExp::SetDataAt(int index, Object* value) { - ASSERT(TypeTag() != NOT_COMPILED); - ASSERT(index >= kDataIndex); // Only implementation data can be set this way. + DCHECK(TypeTag() != NOT_COMPILED); + DCHECK(index >= kDataIndex); // Only implementation data can be set this way. FixedArray::cast(data())->set(index, value); } @@ -6006,7 +6352,7 @@ // pointer may point to a one pointer filler map. if (ElementsAreSafeToExamine()) { Map* map = fixed_array->map(); - ASSERT((IsFastSmiOrObjectElementsKind(kind) && + DCHECK((IsFastSmiOrObjectElementsKind(kind) && (map == GetHeap()->fixed_array_map() || map == GetHeap()->fixed_cow_array_map())) || (IsFastDoubleElementsKind(kind) && @@ -6016,7 +6362,7 @@ fixed_array->IsFixedArray() && fixed_array->IsDictionary()) || (kind > DICTIONARY_ELEMENTS)); - ASSERT((kind != SLOPPY_ARGUMENTS_ELEMENTS) || + DCHECK((kind != SLOPPY_ARGUMENTS_ELEMENTS) || (elements()->IsFixedArray() && elements()->length() >= 2)); } #endif @@ -6071,7 +6417,7 @@ bool JSObject::HasExternalArrayElements() { HeapObject* array = elements(); - ASSERT(array != NULL); + DCHECK(array != NULL); return array->IsExternalArray(); } @@ -6079,7 +6425,7 @@ #define EXTERNAL_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \ bool JSObject::HasExternal##Type##Elements() { \ HeapObject* array = elements(); \ - ASSERT(array != NULL); \ + DCHECK(array != NULL); \ if (!array->IsHeapObject()) \ return false; \ return array->map()->instance_type() == EXTERNAL_##TYPE##_ARRAY_TYPE; \ @@ -6092,7 +6438,7 @@ bool JSObject::HasFixedTypedArrayElements() { HeapObject* array = elements(); - ASSERT(array != NULL); + DCHECK(array != NULL); return array->IsFixedTypedArrayBase(); } @@ -6100,7 +6446,7 @@ #define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \ bool JSObject::HasFixed##Type##Elements() { \ HeapObject* array = elements(); \ - ASSERT(array != NULL); \ + DCHECK(array != NULL); \ if (!array->IsHeapObject()) \ return false; \ return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \ @@ -6121,32 +6467,14 @@ } -MaybeObject* JSObject::EnsureWritableFastElements() { - ASSERT(HasFastSmiOrObjectElements()); - FixedArray* elems = FixedArray::cast(elements()); - Isolate* isolate = GetIsolate(); - if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems; - Object* writable_elems; - { MaybeObject* maybe_writable_elems = isolate->heap()->CopyFixedArrayWithMap( - elems, isolate->heap()->fixed_array_map()); - if (!maybe_writable_elems->ToObject(&writable_elems)) { - return maybe_writable_elems; - } - } - set_elements(FixedArray::cast(writable_elems)); - isolate->counters()->cow_arrays_converted()->Increment(); - return writable_elems; -} - - NameDictionary* JSObject::property_dictionary() { - ASSERT(!HasFastProperties()); + DCHECK(!HasFastProperties()); return NameDictionary::cast(properties()); } SeededNumberDictionary* JSObject::element_dictionary() { - ASSERT(HasDictionaryElements()); + DCHECK(HasDictionaryElements()); return SeededNumberDictionary::cast(elements()); } @@ -6169,6 +6497,10 @@ return String::cast(this)->ComputeAndSetHash(); } +bool Name::IsOwn() { + return this->IsSymbol() && Symbol::cast(this)->is_own(); +} + StringHasher::StringHasher(int length, uint32_t seed) : length_(length), @@ -6176,7 +6508,7 @@ array_index_(0), is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize), is_first_char_(true) { - ASSERT(FLAG_randomize_hashes || raw_running_hash_ == 0); + DCHECK(FLAG_randomize_hashes || raw_running_hash_ == 0); } @@ -6212,7 +6544,7 @@ bool StringHasher::UpdateIndex(uint16_t c) { - ASSERT(is_array_index_); + DCHECK(is_array_index_); if (c < '0' || c > '9') { is_array_index_ = false; return false; @@ -6236,7 +6568,7 @@ template<typename Char> inline void StringHasher::AddCharacters(const Char* chars, int length) { - ASSERT(sizeof(Char) == 1 || sizeof(Char) == 2); + DCHECK(sizeof(Char) == 1 || sizeof(Char) == 2); int i = 0; if (is_array_index_) { for (; i < length; i++) { @@ -6248,7 +6580,7 @@ } } for (; i < length; i++) { - ASSERT(!is_array_index_); + DCHECK(!is_array_index_); AddCharacter(chars[i]); } } @@ -6264,6 +6596,35 @@ } +uint32_t IteratingStringHasher::Hash(String* string, uint32_t seed) { + IteratingStringHasher hasher(string->length(), seed); + // Nothing to do. + if (hasher.has_trivial_hash()) return hasher.GetHashField(); + ConsString* cons_string = String::VisitFlat(&hasher, string); + // The string was flat. + if (cons_string == NULL) return hasher.GetHashField(); + // This is a ConsString, iterate across it. + ConsStringIteratorOp op(cons_string); + int offset; + while (NULL != (string = op.Next(&offset))) { + String::VisitFlat(&hasher, string, offset); + } + return hasher.GetHashField(); +} + + +void IteratingStringHasher::VisitOneByteString(const uint8_t* chars, + int length) { + AddCharacters(chars, length); +} + + +void IteratingStringHasher::VisitTwoByteString(const uint16_t* chars, + int length) { + AddCharacters(chars, length); +} + + bool Name::AsArrayIndex(uint32_t* index) { return IsString() && String::cast(this)->AsArrayIndex(index); } @@ -6278,8 +6639,29 @@ } -Object* JSReceiver::GetPrototype() { - return map()->prototype(); +void String::SetForwardedInternalizedString(String* canonical) { + DCHECK(IsInternalizedString()); + DCHECK(HasHashCode()); + if (canonical == this) return; // No need to forward. + DCHECK(SlowEquals(canonical)); + DCHECK(canonical->IsInternalizedString()); + DCHECK(canonical->HasHashCode()); + WRITE_FIELD(this, kHashFieldOffset, canonical); + // Setting the hash field to a tagged value sets the LSB, causing the hash + // code to be interpreted as uninitialized. We use this fact to recognize + // that we have a forwarded string. + DCHECK(!HasHashCode()); +} + + +String* String::GetForwardedInternalizedString() { + DCHECK(IsInternalizedString()); + if (HasHashCode()) return this; + String* canonical = String::cast(READ_FIELD(this, kHashFieldOffset)); + DCHECK(canonical->IsInternalizedString()); + DCHECK(SlowEquals(canonical)); + DCHECK(canonical->HasHashCode()); + return canonical; } @@ -6288,38 +6670,43 @@ } -bool JSReceiver::HasProperty(Handle<JSReceiver> object, - Handle<Name> name) { +Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object, + Handle<Name> name) { if (object->IsJSProxy()) { Handle<JSProxy> proxy = Handle<JSProxy>::cast(object); return JSProxy::HasPropertyWithHandler(proxy, name); } - return GetPropertyAttribute(object, name) != ABSENT; + Maybe<PropertyAttributes> result = GetPropertyAttributes(object, name); + if (!result.has_value) return Maybe<bool>(); + return maybe(result.value != ABSENT); } -bool JSReceiver::HasLocalProperty(Handle<JSReceiver> object, - Handle<Name> name) { +Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object, + Handle<Name> name) { if (object->IsJSProxy()) { Handle<JSProxy> proxy = Handle<JSProxy>::cast(object); return JSProxy::HasPropertyWithHandler(proxy, name); } - return GetLocalPropertyAttribute(object, name) != ABSENT; + Maybe<PropertyAttributes> result = GetOwnPropertyAttributes(object, name); + if (!result.has_value) return Maybe<bool>(); + return maybe(result.value != ABSENT); } -PropertyAttributes JSReceiver::GetPropertyAttribute(Handle<JSReceiver> object, - Handle<Name> key) { +Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes( + Handle<JSReceiver> object, Handle<Name> key) { uint32_t index; if (object->IsJSObject() && key->AsArrayIndex(&index)) { return GetElementAttribute(object, index); } - return GetPropertyAttributeWithReceiver(object, object, key); + LookupIterator it(object, key); + return GetPropertyAttributes(&it); } -PropertyAttributes JSReceiver::GetElementAttribute(Handle<JSReceiver> object, - uint32_t index) { +Maybe<PropertyAttributes> JSReceiver::GetElementAttribute( + Handle<JSReceiver> object, uint32_t index) { if (object->IsJSProxy()) { return JSProxy::GetElementAttributeWithHandler( Handle<JSProxy>::cast(object), object, index); @@ -6330,16 +6717,18 @@ bool JSGlobalObject::IsDetached() { - return JSGlobalProxy::cast(global_receiver())->IsDetachedFrom(this); + return JSGlobalProxy::cast(global_proxy())->IsDetachedFrom(this); } -bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) { - return GetPrototype() != global; +bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) const { + const PrototypeIterator iter(this->GetIsolate(), + const_cast<JSGlobalProxy*>(this)); + return iter.GetCurrent() != global; } -Handle<Object> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) { +Handle<Smi> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) { return object->IsJSProxy() ? JSProxy::GetOrCreateIdentityHash(Handle<JSProxy>::cast(object)) : JSObject::GetOrCreateIdentityHash(Handle<JSObject>::cast(object)); @@ -6353,27 +6742,32 @@ } -bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) { +Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) { if (object->IsJSProxy()) { Handle<JSProxy> proxy = Handle<JSProxy>::cast(object); return JSProxy::HasElementWithHandler(proxy, index); } - return JSObject::GetElementAttributeWithReceiver( - Handle<JSObject>::cast(object), object, index, true) != ABSENT; + Maybe<PropertyAttributes> result = JSObject::GetElementAttributeWithReceiver( + Handle<JSObject>::cast(object), object, index, true); + if (!result.has_value) return Maybe<bool>(); + return maybe(result.value != ABSENT); } -bool JSReceiver::HasLocalElement(Handle<JSReceiver> object, uint32_t index) { +Maybe<bool> JSReceiver::HasOwnElement(Handle<JSReceiver> object, + uint32_t index) { if (object->IsJSProxy()) { Handle<JSProxy> proxy = Handle<JSProxy>::cast(object); return JSProxy::HasElementWithHandler(proxy, index); } - return JSObject::GetElementAttributeWithReceiver( - Handle<JSObject>::cast(object), object, index, false) != ABSENT; + Maybe<PropertyAttributes> result = JSObject::GetElementAttributeWithReceiver( + Handle<JSObject>::cast(object), object, index, false); + if (!result.has_value) return Maybe<bool>(); + return maybe(result.value != ABSENT); } -PropertyAttributes JSReceiver::GetLocalElementAttribute( +Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttribute( Handle<JSReceiver> object, uint32_t index) { if (object->IsJSProxy()) { return JSProxy::GetElementAttributeWithHandler( @@ -6404,16 +6798,6 @@ } -bool AccessorInfo::prohibits_overwriting() { - return BooleanBit::get(flag(), kProhibitsOverwritingBit); -} - - -void AccessorInfo::set_prohibits_overwriting(bool value) { - set_flag(BooleanBit::set(flag(), kProhibitsOverwritingBit, value)); -} - - PropertyAttributes AccessorInfo::property_attributes() { return AttributesField::decode(static_cast<uint32_t>(flag()->value())); } @@ -6425,69 +6809,45 @@ bool AccessorInfo::IsCompatibleReceiver(Object* receiver) { - Object* function_template = expected_receiver_type(); - if (!function_template->IsFunctionTemplateInfo()) return true; - return FunctionTemplateInfo::cast(function_template)->IsTemplateFor(receiver); -} - - -void AccessorPair::set_access_flags(v8::AccessControl access_control) { - int current = access_flags()->value(); - current = BooleanBit::set(current, - kProhibitsOverwritingBit, - access_control & PROHIBITS_OVERWRITING); - current = BooleanBit::set(current, - kAllCanReadBit, - access_control & ALL_CAN_READ); - current = BooleanBit::set(current, - kAllCanWriteBit, - access_control & ALL_CAN_WRITE); - set_access_flags(Smi::FromInt(current)); -} - - -bool AccessorPair::all_can_read() { - return BooleanBit::get(access_flags(), kAllCanReadBit); -} - - -bool AccessorPair::all_can_write() { - return BooleanBit::get(access_flags(), kAllCanWriteBit); + if (!HasExpectedReceiverType()) return true; + if (!receiver->IsJSObject()) return false; + return FunctionTemplateInfo::cast(expected_receiver_type()) + ->IsTemplateFor(JSObject::cast(receiver)->map()); } -bool AccessorPair::prohibits_overwriting() { - return BooleanBit::get(access_flags(), kProhibitsOverwritingBit); +void ExecutableAccessorInfo::clear_setter() { + set_setter(GetIsolate()->heap()->undefined_value(), SKIP_WRITE_BARRIER); } -template<typename Shape, typename Key> -void Dictionary<Shape, Key>::SetEntry(int entry, - Object* key, - Object* value) { +template<typename Derived, typename Shape, typename Key> +void Dictionary<Derived, Shape, Key>::SetEntry(int entry, + Handle<Object> key, + Handle<Object> value) { SetEntry(entry, key, value, PropertyDetails(Smi::FromInt(0))); } -template<typename Shape, typename Key> -void Dictionary<Shape, Key>::SetEntry(int entry, - Object* key, - Object* value, - PropertyDetails details) { - ASSERT(!key->IsName() || +template<typename Derived, typename Shape, typename Key> +void Dictionary<Derived, Shape, Key>::SetEntry(int entry, + Handle<Object> key, + Handle<Object> value, + PropertyDetails details) { + DCHECK(!key->IsName() || details.IsDeleted() || details.dictionary_index() > 0); - int index = HashTable<Shape, Key>::EntryToIndex(entry); + int index = DerivedHashTable::EntryToIndex(entry); DisallowHeapAllocation no_gc; WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc); - FixedArray::set(index, key, mode); - FixedArray::set(index+1, value, mode); + FixedArray::set(index, *key, mode); + FixedArray::set(index+1, *value, mode); FixedArray::set(index+2, details.AsSmi()); } bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) { - ASSERT(other->IsNumber()); + DCHECK(other->IsNumber()); return key == static_cast<uint32_t>(other->Number()); } @@ -6499,27 +6859,30 @@ uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key, Object* other) { - ASSERT(other->IsNumber()); + DCHECK(other->IsNumber()); return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), 0); } + uint32_t SeededNumberDictionaryShape::SeededHash(uint32_t key, uint32_t seed) { return ComputeIntegerHash(key, seed); } + uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key, uint32_t seed, Object* other) { - ASSERT(other->IsNumber()); + DCHECK(other->IsNumber()); return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed); } -MaybeObject* NumberDictionaryShape::AsObject(Heap* heap, uint32_t key) { - return heap->NumberFromUint32(key); + +Handle<Object> NumberDictionaryShape::AsHandle(Isolate* isolate, uint32_t key) { + return isolate->factory()->NewNumberFromUint(key); } -bool NameDictionaryShape::IsMatch(Name* key, Object* other) { +bool NameDictionaryShape::IsMatch(Handle<Name> key, Object* other) { // We know that all entries in a hash table had their hash keys created. // Use that knowledge to have fast failure. if (key->Hash() != Name::cast(other)->Hash()) return false; @@ -6527,63 +6890,72 @@ } -uint32_t NameDictionaryShape::Hash(Name* key) { +uint32_t NameDictionaryShape::Hash(Handle<Name> key) { return key->Hash(); } -uint32_t NameDictionaryShape::HashForObject(Name* key, Object* other) { +uint32_t NameDictionaryShape::HashForObject(Handle<Name> key, Object* other) { return Name::cast(other)->Hash(); } -MaybeObject* NameDictionaryShape::AsObject(Heap* heap, Name* key) { - ASSERT(key->IsUniqueName()); +Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate, + Handle<Name> key) { + DCHECK(key->IsUniqueName()); return key; } -template <int entrysize> -bool ObjectHashTableShape<entrysize>::IsMatch(Object* key, Object* other) { +void NameDictionary::DoGenerateNewEnumerationIndices( + Handle<NameDictionary> dictionary) { + DerivedDictionary::GenerateNewEnumerationIndices(dictionary); +} + + +bool ObjectHashTableShape::IsMatch(Handle<Object> key, Object* other) { return key->SameValue(other); } -template <int entrysize> -uint32_t ObjectHashTableShape<entrysize>::Hash(Object* key) { +uint32_t ObjectHashTableShape::Hash(Handle<Object> key) { return Smi::cast(key->GetHash())->value(); } -template <int entrysize> -uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key, - Object* other) { +uint32_t ObjectHashTableShape::HashForObject(Handle<Object> key, + Object* other) { return Smi::cast(other->GetHash())->value(); } -template <int entrysize> -MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Heap* heap, - Object* key) { +Handle<Object> ObjectHashTableShape::AsHandle(Isolate* isolate, + Handle<Object> key) { return key; } +Handle<ObjectHashTable> ObjectHashTable::Shrink( + Handle<ObjectHashTable> table, Handle<Object> key) { + return DerivedHashTable::Shrink(table, key); +} + + template <int entrysize> -bool WeakHashTableShape<entrysize>::IsMatch(Object* key, Object* other) { +bool WeakHashTableShape<entrysize>::IsMatch(Handle<Object> key, Object* other) { return key->SameValue(other); } template <int entrysize> -uint32_t WeakHashTableShape<entrysize>::Hash(Object* key) { - intptr_t hash = reinterpret_cast<intptr_t>(key); +uint32_t WeakHashTableShape<entrysize>::Hash(Handle<Object> key) { + intptr_t hash = reinterpret_cast<intptr_t>(*key); return (uint32_t)(hash & 0xFFFFFFFF); } template <int entrysize> -uint32_t WeakHashTableShape<entrysize>::HashForObject(Object* key, +uint32_t WeakHashTableShape<entrysize>::HashForObject(Handle<Object> key, Object* other) { intptr_t hash = reinterpret_cast<intptr_t>(other); return (uint32_t)(hash & 0xFFFFFFFF); @@ -6591,8 +6963,8 @@ template <int entrysize> -MaybeObject* WeakHashTableShape<entrysize>::AsObject(Heap* heap, - Object* key) { +Handle<Object> WeakHashTableShape<entrysize>::AsHandle(Isolate* isolate, + Handle<Object> key) { return key; } @@ -6602,13 +6974,13 @@ // Please note this function is used during marking: // - MarkCompactCollector::MarkUnmarkedObject // - IncrementalMarking::Step - ASSERT(!heap->InNewSpace(heap->empty_fixed_array())); + DCHECK(!heap->InNewSpace(heap->empty_fixed_array())); WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array()); } void JSArray::EnsureSize(Handle<JSArray> array, int required_size) { - ASSERT(array->HasFastSmiOrObjectElements()); + DCHECK(array->HasFastSmiOrObjectElements()); Handle<FixedArray> elts = handle(FixedArray::cast(array->elements())); const int kArraySizeThatFitsComfortablyInNewSpace = 128; if (elts->length() < required_size) { @@ -6633,7 +7005,7 @@ bool JSArray::AllowsSetElementsLength() { bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray(); - ASSERT(result == !HasExternalArrayElements()); + DCHECK(result == !HasExternalArrayElements()); return result; } @@ -6643,7 +7015,7 @@ EnsureCanContainElements(array, storage, storage->length(), ALLOW_COPIED_DOUBLE_ELEMENTS); - ASSERT((storage->map() == array->GetHeap()->fixed_double_array_map() && + DCHECK((storage->map() == array->GetHeap()->fixed_double_array_map() && IsFastDoubleElementsKind(array->GetElementsKind())) || ((storage->map() != array->GetHeap()->fixed_double_array_map()) && (IsFastObjectElementsKind(array->GetElementsKind()) || @@ -6654,24 +7026,6 @@ } -MaybeObject* FixedArray::Copy() { - if (length() == 0) return this; - return GetHeap()->CopyFixedArray(this); -} - - -MaybeObject* FixedDoubleArray::Copy() { - if (length() == 0) return this; - return GetHeap()->CopyFixedDoubleArray(this); -} - - -MaybeObject* ConstantPoolArray::Copy() { - if (length() == 0) return this; - return GetHeap()->CopyConstantPoolArray(this); -} - - Handle<Object> TypeFeedbackInfo::UninitializedSentinel(Isolate* isolate) { return isolate->factory()->uninitialized_symbol(); } @@ -6714,6 +7068,7 @@ void TypeFeedbackInfo::change_ic_with_type_info_count(int delta) { + if (delta == 0) return; int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value(); int new_count = ICsWithTypeInfoCountField::decode(value) + delta; // We can get negative count here when the type-feedback info is @@ -6729,9 +7084,25 @@ } +int TypeFeedbackInfo::ic_generic_count() { + return Smi::cast(READ_FIELD(this, kStorage3Offset))->value(); +} + + +void TypeFeedbackInfo::change_ic_generic_count(int delta) { + if (delta == 0) return; + int new_count = ic_generic_count() + delta; + if (new_count >= 0) { + new_count &= ~Smi::kMinValue; + WRITE_FIELD(this, kStorage3Offset, Smi::FromInt(new_count)); + } +} + + void TypeFeedbackInfo::initialize_storage() { WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(0)); WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(0)); + WRITE_FIELD(this, kStorage3Offset, Smi::FromInt(0)); } @@ -6771,10 +7142,6 @@ } -ACCESSORS(TypeFeedbackInfo, feedback_vector, FixedArray, - kFeedbackVectorOffset) - - SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot) @@ -6786,7 +7153,7 @@ Relocatable::~Relocatable() { - ASSERT_EQ(isolate_->relocatable_top(), this); + DCHECK_EQ(isolate_->relocatable_top(), this); isolate_->set_relocatable_top(prev_); } @@ -6857,17 +7224,52 @@ } +template<class Derived, class TableType> +Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() { + TableType* table(TableType::cast(this->table())); + int index = Smi::cast(this->index())->value(); + Object* key = table->KeyAt(index); + DCHECK(!key->IsTheHole()); + return key; +} + + +void JSSetIterator::PopulateValueArray(FixedArray* array) { + array->set(0, CurrentKey()); +} + + +void JSMapIterator::PopulateValueArray(FixedArray* array) { + array->set(0, CurrentKey()); + array->set(1, CurrentValue()); +} + + +Object* JSMapIterator::CurrentValue() { + OrderedHashMap* table(OrderedHashMap::cast(this->table())); + int index = Smi::cast(this->index())->value(); + Object* value = table->ValueAt(index); + DCHECK(!value->IsTheHole()); + return value; +} + + #undef TYPE_CHECKER #undef CAST_ACCESSOR #undef INT_ACCESSORS #undef ACCESSORS #undef ACCESSORS_TO_SMI #undef SMI_ACCESSORS +#undef SYNCHRONIZED_SMI_ACCESSORS +#undef NOBARRIER_SMI_ACCESSORS #undef BOOL_GETTER #undef BOOL_ACCESSORS #undef FIELD_ADDR +#undef FIELD_ADDR_CONST #undef READ_FIELD +#undef NOBARRIER_READ_FIELD #undef WRITE_FIELD +#undef NOBARRIER_WRITE_FIELD #undef WRITE_BARRIER #undef CONDITIONAL_WRITE_BARRIER #undef READ_DOUBLE_FIELD @@ -6882,6 +7284,8 @@ #undef WRITE_SHORT_FIELD #undef READ_BYTE_FIELD #undef WRITE_BYTE_FIELD +#undef NOBARRIER_READ_BYTE_FIELD +#undef NOBARRIER_WRITE_BYTE_FIELD } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/objects-printer.cc nodejs-0.11.15/deps/v8/src/objects-printer.cc --- nodejs-0.11.13/deps/v8/src/objects-printer.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/objects-printer.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,237 +1,211 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "disassembler.h" -#include "disasm.h" -#include "jsregexp.h" -#include "objects-visiting.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/disasm.h" +#include "src/disassembler.h" +#include "src/heap/objects-visiting.h" +#include "src/jsregexp.h" +#include "src/ostreams.h" namespace v8 { namespace internal { #ifdef OBJECT_PRINT -void MaybeObject::Print() { - Print(stdout); +void Object::Print() { + OFStream os(stdout); + this->Print(os); + os << flush; } -void MaybeObject::Print(FILE* out) { - Object* this_as_object; - if (ToObject(&this_as_object)) { - if (this_as_object->IsSmi()) { - Smi::cast(this_as_object)->SmiPrint(out); - } else { - HeapObject::cast(this_as_object)->HeapObjectPrint(out); - } +void Object::Print(OStream& os) { // NOLINT + if (IsSmi()) { + Smi::cast(this)->SmiPrint(os); } else { - Failure::cast(this)->FailurePrint(out); + HeapObject::cast(this)->HeapObjectPrint(os); } - Flush(out); -} - - -void MaybeObject::PrintLn() { - PrintLn(stdout); -} - - -void MaybeObject::PrintLn(FILE* out) { - Print(out); - PrintF(out, "\n"); } -void HeapObject::PrintHeader(FILE* out, const char* id) { - PrintF(out, "%p: [%s]\n", reinterpret_cast<void*>(this), id); +void HeapObject::PrintHeader(OStream& os, const char* id) { // NOLINT + os << "" << reinterpret_cast<void*>(this) << ": [" << id << "]\n"; } -void HeapObject::HeapObjectPrint(FILE* out) { +void HeapObject::HeapObjectPrint(OStream& os) { // NOLINT InstanceType instance_type = map()->instance_type(); HandleScope scope(GetIsolate()); if (instance_type < FIRST_NONSTRING_TYPE) { - String::cast(this)->StringPrint(out); + String::cast(this)->StringPrint(os); return; } switch (instance_type) { case SYMBOL_TYPE: - Symbol::cast(this)->SymbolPrint(out); + Symbol::cast(this)->SymbolPrint(os); break; case MAP_TYPE: - Map::cast(this)->MapPrint(out); + Map::cast(this)->MapPrint(os); break; case HEAP_NUMBER_TYPE: - HeapNumber::cast(this)->HeapNumberPrint(out); + HeapNumber::cast(this)->HeapNumberPrint(os); + break; + case MUTABLE_HEAP_NUMBER_TYPE: + os << "<mutable "; + HeapNumber::cast(this)->HeapNumberPrint(os); + os << ">"; break; case FIXED_DOUBLE_ARRAY_TYPE: - FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out); + FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os); break; case CONSTANT_POOL_ARRAY_TYPE: - ConstantPoolArray::cast(this)->ConstantPoolArrayPrint(out); + ConstantPoolArray::cast(this)->ConstantPoolArrayPrint(os); break; case FIXED_ARRAY_TYPE: - FixedArray::cast(this)->FixedArrayPrint(out); + FixedArray::cast(this)->FixedArrayPrint(os); break; case BYTE_ARRAY_TYPE: - ByteArray::cast(this)->ByteArrayPrint(out); + ByteArray::cast(this)->ByteArrayPrint(os); break; case FREE_SPACE_TYPE: - FreeSpace::cast(this)->FreeSpacePrint(out); + FreeSpace::cast(this)->FreeSpacePrint(os); break; -#define PRINT_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \ - case EXTERNAL_##TYPE##_ARRAY_TYPE: \ - External##Type##Array::cast(this)->External##Type##ArrayPrint(out); \ - break; +#define PRINT_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \ + case EXTERNAL_##TYPE##_ARRAY_TYPE: \ + External##Type##Array::cast(this)->External##Type##ArrayPrint(os); \ + break; TYPED_ARRAYS(PRINT_EXTERNAL_ARRAY) #undef PRINT_EXTERNAL_ARRAY -#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \ - case Fixed##Type##Array::kInstanceType: \ - Fixed##Type##Array::cast(this)->FixedTypedArrayPrint(out); \ - break; +#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \ + case Fixed##Type##Array::kInstanceType: \ + Fixed##Type##Array::cast(this)->FixedTypedArrayPrint(os); \ + break; TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY) #undef PRINT_FIXED_TYPED_ARRAY case FILLER_TYPE: - PrintF(out, "filler"); + os << "filler"; break; case JS_OBJECT_TYPE: // fall through case JS_CONTEXT_EXTENSION_OBJECT_TYPE: case JS_ARRAY_TYPE: case JS_GENERATOR_OBJECT_TYPE: case JS_REGEXP_TYPE: - JSObject::cast(this)->JSObjectPrint(out); + JSObject::cast(this)->JSObjectPrint(os); break; case ODDBALL_TYPE: - Oddball::cast(this)->to_string()->Print(out); + Oddball::cast(this)->to_string()->Print(os); break; case JS_MODULE_TYPE: - JSModule::cast(this)->JSModulePrint(out); + JSModule::cast(this)->JSModulePrint(os); break; case JS_FUNCTION_TYPE: - JSFunction::cast(this)->JSFunctionPrint(out); + JSFunction::cast(this)->JSFunctionPrint(os); break; case JS_GLOBAL_PROXY_TYPE: - JSGlobalProxy::cast(this)->JSGlobalProxyPrint(out); + JSGlobalProxy::cast(this)->JSGlobalProxyPrint(os); break; case JS_GLOBAL_OBJECT_TYPE: - JSGlobalObject::cast(this)->JSGlobalObjectPrint(out); + JSGlobalObject::cast(this)->JSGlobalObjectPrint(os); break; case JS_BUILTINS_OBJECT_TYPE: - JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint(out); + JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint(os); break; case JS_VALUE_TYPE: - PrintF(out, "Value wrapper around:"); - JSValue::cast(this)->value()->Print(out); + os << "Value wrapper around:"; + JSValue::cast(this)->value()->Print(os); break; case JS_DATE_TYPE: - JSDate::cast(this)->JSDatePrint(out); + JSDate::cast(this)->JSDatePrint(os); break; case CODE_TYPE: - Code::cast(this)->CodePrint(out); + Code::cast(this)->CodePrint(os); break; case JS_PROXY_TYPE: - JSProxy::cast(this)->JSProxyPrint(out); + JSProxy::cast(this)->JSProxyPrint(os); break; case JS_FUNCTION_PROXY_TYPE: - JSFunctionProxy::cast(this)->JSFunctionProxyPrint(out); + JSFunctionProxy::cast(this)->JSFunctionProxyPrint(os); break; case JS_SET_TYPE: - JSSet::cast(this)->JSSetPrint(out); + JSSet::cast(this)->JSSetPrint(os); break; case JS_MAP_TYPE: - JSMap::cast(this)->JSMapPrint(out); + JSMap::cast(this)->JSMapPrint(os); + break; + case JS_SET_ITERATOR_TYPE: + JSSetIterator::cast(this)->JSSetIteratorPrint(os); + break; + case JS_MAP_ITERATOR_TYPE: + JSMapIterator::cast(this)->JSMapIteratorPrint(os); break; case JS_WEAK_MAP_TYPE: - JSWeakMap::cast(this)->JSWeakMapPrint(out); + JSWeakMap::cast(this)->JSWeakMapPrint(os); break; case JS_WEAK_SET_TYPE: - JSWeakSet::cast(this)->JSWeakSetPrint(out); + JSWeakSet::cast(this)->JSWeakSetPrint(os); break; case FOREIGN_TYPE: - Foreign::cast(this)->ForeignPrint(out); + Foreign::cast(this)->ForeignPrint(os); break; case SHARED_FUNCTION_INFO_TYPE: - SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(out); + SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(os); break; case JS_MESSAGE_OBJECT_TYPE: - JSMessageObject::cast(this)->JSMessageObjectPrint(out); + JSMessageObject::cast(this)->JSMessageObjectPrint(os); break; case CELL_TYPE: - Cell::cast(this)->CellPrint(out); + Cell::cast(this)->CellPrint(os); break; case PROPERTY_CELL_TYPE: - PropertyCell::cast(this)->PropertyCellPrint(out); + PropertyCell::cast(this)->PropertyCellPrint(os); break; case JS_ARRAY_BUFFER_TYPE: - JSArrayBuffer::cast(this)->JSArrayBufferPrint(out); + JSArrayBuffer::cast(this)->JSArrayBufferPrint(os); break; case JS_TYPED_ARRAY_TYPE: - JSTypedArray::cast(this)->JSTypedArrayPrint(out); + JSTypedArray::cast(this)->JSTypedArrayPrint(os); break; case JS_DATA_VIEW_TYPE: - JSDataView::cast(this)->JSDataViewPrint(out); + JSDataView::cast(this)->JSDataViewPrint(os); break; #define MAKE_STRUCT_CASE(NAME, Name, name) \ case NAME##_TYPE: \ - Name::cast(this)->Name##Print(out); \ + Name::cast(this)->Name##Print(os); \ break; STRUCT_LIST(MAKE_STRUCT_CASE) #undef MAKE_STRUCT_CASE default: - PrintF(out, "UNKNOWN TYPE %d", map()->instance_type()); + os << "UNKNOWN TYPE " << map()->instance_type(); UNREACHABLE(); break; } } -void ByteArray::ByteArrayPrint(FILE* out) { - PrintF(out, "byte array, data starts at %p", GetDataStartAddress()); +void ByteArray::ByteArrayPrint(OStream& os) { // NOLINT + os << "byte array, data starts at " << GetDataStartAddress(); } -void FreeSpace::FreeSpacePrint(FILE* out) { - PrintF(out, "free space, size %d", Size()); +void FreeSpace::FreeSpacePrint(OStream& os) { // NOLINT + os << "free space, size " << Size(); } -#define EXTERNAL_ARRAY_PRINTER(Type, type, TYPE, ctype, size) \ - void External##Type##Array::External##Type##ArrayPrint(FILE* out) { \ - PrintF(out, "external " #type " array"); \ +#define EXTERNAL_ARRAY_PRINTER(Type, type, TYPE, ctype, size) \ + void External##Type##Array::External##Type##ArrayPrint(OStream& os) { \ + os << "external " #type " array"; \ } TYPED_ARRAYS(EXTERNAL_ARRAY_PRINTER) @@ -240,68 +214,56 @@ template <class Traits> -void FixedTypedArray<Traits>::FixedTypedArrayPrint(FILE* out) { - PrintF(out, "fixed %s", Traits::Designator()); +void FixedTypedArray<Traits>::FixedTypedArrayPrint(OStream& os) { // NOLINT + os << "fixed " << Traits::Designator(); } -void JSObject::PrintProperties(FILE* out) { +void JSObject::PrintProperties(OStream& os) { // NOLINT if (HasFastProperties()) { DescriptorArray* descs = map()->instance_descriptors(); for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) { - PrintF(out, " "); - descs->GetKey(i)->NamePrint(out); - PrintF(out, ": "); + os << " "; + descs->GetKey(i)->NamePrint(os); + os << ": "; switch (descs->GetType(i)) { case FIELD: { - int index = descs->GetFieldIndex(i); - RawFastPropertyAt(index)->ShortPrint(out); - PrintF(out, " (field at offset %d)\n", index); + FieldIndex index = FieldIndex::ForDescriptor(map(), i); + os << Brief(RawFastPropertyAt(index)) << " (field at offset " + << index.property_index() << ")\n"; break; } case CONSTANT: - descs->GetConstant(i)->ShortPrint(out); - PrintF(out, " (constant)\n"); + os << Brief(descs->GetConstant(i)) << " (constant)\n"; break; case CALLBACKS: - descs->GetCallbacksObject(i)->ShortPrint(out); - PrintF(out, " (callback)\n"); + os << Brief(descs->GetCallbacksObject(i)) << " (callback)\n"; break; case NORMAL: // only in slow mode case HANDLER: // only in lookup results, not in descriptors case INTERCEPTOR: // only in lookup results, not in descriptors // There are no transitions in the descriptor array. - case TRANSITION: case NONEXISTENT: UNREACHABLE(); break; } } } else { - property_dictionary()->Print(out); - } -} - - -template<class T> -static void DoPrintElements(FILE *out, Object* object) { - T* p = T::cast(object); - for (int i = 0; i < p->length(); i++) { - PrintF(out, " %d: %d\n", i, p->get_scalar(i)); + property_dictionary()->Print(os); } } -template<class T> -static void DoPrintDoubleElements(FILE* out, Object* object) { +template <class T> +static void DoPrintElements(OStream& os, Object* object) { // NOLINT T* p = T::cast(object); for (int i = 0; i < p->length(); i++) { - PrintF(out, " %d: %f\n", i, p->get_scalar(i)); + os << " " << i << ": " << p->get_scalar(i) << "\n"; } } -void JSObject::PrintElements(FILE* out) { +void JSObject::PrintElements(OStream& os) { // NOLINT // Don't call GetElementsKind, its validation code can cause the printer to // fail when debugging. switch (map()->elements_kind()) { @@ -312,9 +274,7 @@ // Print in array notation for non-sparse arrays. FixedArray* p = FixedArray::cast(elements()); for (int i = 0; i < p->length(); i++) { - PrintF(out, " %d: ", i); - p->get(i)->ShortPrint(out); - PrintF(out, "\n"); + os << " " << i << ": " << Brief(p->get(i)) << "\n"; } break; } @@ -324,29 +284,24 @@ if (elements()->length() > 0) { FixedDoubleArray* p = FixedDoubleArray::cast(elements()); for (int i = 0; i < p->length(); i++) { + os << " " << i << ": "; if (p->is_the_hole(i)) { - PrintF(out, " %d: <the hole>", i); + os << "<the hole>"; } else { - PrintF(out, " %d: %g", i, p->get_scalar(i)); + os << p->get_scalar(i); } - PrintF(out, "\n"); + os << "\n"; } } break; } -#define PRINT_ELEMENTS(Kind, Type) \ - case Kind: { \ - DoPrintElements<Type>(out, elements()); \ - break; \ - } - -#define PRINT_DOUBLE_ELEMENTS(Kind, Type) \ - case Kind: { \ - DoPrintDoubleElements<Type>(out, elements()); \ - break; \ - } +#define PRINT_ELEMENTS(Kind, Type) \ + case Kind: { \ + DoPrintElements<Type>(os, elements()); \ + break; \ + } PRINT_ELEMENTS(EXTERNAL_UINT8_CLAMPED_ELEMENTS, ExternalUint8ClampedArray) PRINT_ELEMENTS(EXTERNAL_INT8_ELEMENTS, ExternalInt8Array) @@ -358,9 +313,8 @@ PRINT_ELEMENTS(EXTERNAL_INT32_ELEMENTS, ExternalInt32Array) PRINT_ELEMENTS(EXTERNAL_UINT32_ELEMENTS, ExternalUint32Array) - PRINT_DOUBLE_ELEMENTS(EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array) - PRINT_DOUBLE_ELEMENTS(EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array) - + PRINT_ELEMENTS(EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array) + PRINT_ELEMENTS(EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array) PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array) PRINT_ELEMENTS(UINT8_CLAMPED_ELEMENTS, FixedUint8ClampedArray) @@ -369,66 +323,60 @@ PRINT_ELEMENTS(INT16_ELEMENTS, FixedInt16Array) PRINT_ELEMENTS(UINT32_ELEMENTS, FixedUint32Array) PRINT_ELEMENTS(INT32_ELEMENTS, FixedInt32Array) - PRINT_DOUBLE_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array) - PRINT_DOUBLE_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array) + PRINT_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array) + PRINT_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array) -#undef PRINT_DOUBLE_ELEMENTS #undef PRINT_ELEMENTS case DICTIONARY_ELEMENTS: - elements()->Print(out); + elements()->Print(os); break; case SLOPPY_ARGUMENTS_ELEMENTS: { FixedArray* p = FixedArray::cast(elements()); - PrintF(out, " parameter map:"); + os << " parameter map:"; for (int i = 2; i < p->length(); i++) { - PrintF(out, " %d:", i - 2); - p->get(i)->ShortPrint(out); + os << " " << (i - 2) << ":" << Brief(p->get(i)); } - PrintF(out, "\n context: "); - p->get(0)->ShortPrint(out); - PrintF(out, "\n arguments: "); - p->get(1)->ShortPrint(out); - PrintF(out, "\n"); + os << "\n context: " << Brief(p->get(0)) + << "\n arguments: " << Brief(p->get(1)) << "\n"; break; } } } -void JSObject::PrintTransitions(FILE* out) { +void JSObject::PrintTransitions(OStream& os) { // NOLINT if (!map()->HasTransitionArray()) return; TransitionArray* transitions = map()->transitions(); for (int i = 0; i < transitions->number_of_transitions(); i++) { Name* key = transitions->GetKey(i); - PrintF(out, " "); - key->NamePrint(out); - PrintF(out, ": "); + os << " "; + key->NamePrint(os); + os << ": "; if (key == GetHeap()->frozen_symbol()) { - PrintF(out, " (transition to frozen)\n"); + os << " (transition to frozen)\n"; } else if (key == GetHeap()->elements_transition_symbol()) { - PrintF(out, " (transition to "); - PrintElementsKind(out, transitions->GetTarget(i)->elements_kind()); - PrintF(out, ")\n"); + os << " (transition to " + << ElementsKindToString(transitions->GetTarget(i)->elements_kind()) + << ")\n"; } else if (key == GetHeap()->observed_symbol()) { - PrintF(out, " (transition to Object.observe)\n"); + os << " (transition to Object.observe)\n"; } else { switch (transitions->GetTargetDetails(i).type()) { case FIELD: { - PrintF(out, " (transition to field)\n"); + os << " (transition to field)\n"; break; } case CONSTANT: - PrintF(out, " (transition to constant)\n"); + os << " (transition to constant)\n"; break; case CALLBACKS: - PrintF(out, " (transition to callback)\n"); + os << " (transition to callback)\n"; break; // Values below are never in the target descriptor array. case NORMAL: case HANDLER: case INTERCEPTOR: - case TRANSITION: case NONEXISTENT: UNREACHABLE(); break; @@ -438,35 +386,32 @@ } -void JSObject::JSObjectPrint(FILE* out) { - PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this)); - PrintF(out, " - map = %p [", reinterpret_cast<void*>(map())); +void JSObject::JSObjectPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSObject"); // Don't call GetElementsKind, its validation code can cause the printer to // fail when debugging. - PrintElementsKind(out, this->map()->elements_kind()); - PrintF(out, - "]\n - prototype = %p\n", - reinterpret_cast<void*>(GetPrototype())); - PrintF(out, " {\n"); - PrintProperties(out); - PrintTransitions(out); - PrintElements(out); - PrintF(out, " }\n"); + PrototypeIterator iter(GetIsolate(), this); + os << " - map = " << reinterpret_cast<void*>(map()) << " [" + << ElementsKindToString(this->map()->elements_kind()) + << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent()) + << "\n {\n"; + PrintProperties(os); + PrintTransitions(os); + PrintElements(os); + os << " }\n"; } -void JSModule::JSModulePrint(FILE* out) { - HeapObject::PrintHeader(out, "JSModule"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - context = "); - context()->Print(out); - PrintF(out, " - scope_info = "); - scope_info()->ShortPrint(out); - PrintElementsKind(out, this->map()->elements_kind()); - PrintF(out, " {\n"); - PrintProperties(out); - PrintElements(out); - PrintF(out, " }\n"); +void JSModule::JSModulePrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSModule"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n" + << " - context = "; + context()->Print(os); + os << " - scope_info = " << Brief(scope_info()) + << ElementsKindToString(this->map()->elements_kind()) << " {\n"; + PrintProperties(os); + PrintElements(os); + os << " }\n"; } @@ -481,176 +426,165 @@ } -void Symbol::SymbolPrint(FILE* out) { - HeapObject::PrintHeader(out, "Symbol"); - PrintF(out, " - hash: %d\n", Hash()); - PrintF(out, " - name: "); - name()->ShortPrint(); - PrintF(out, " - private: %d\n", is_private()); - PrintF(out, "\n"); +void Symbol::SymbolPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "Symbol"); + os << " - hash: " << Hash(); + os << "\n - name: " << Brief(name()); + os << "\n - private: " << is_private(); + os << "\n - own: " << is_own(); + os << "\n"; } -void Map::MapPrint(FILE* out) { - HeapObject::PrintHeader(out, "Map"); - PrintF(out, " - type: %s\n", TypeToString(instance_type())); - PrintF(out, " - instance size: %d\n", instance_size()); - PrintF(out, " - inobject properties: %d\n", inobject_properties()); - PrintF(out, " - elements kind: "); - PrintElementsKind(out, elements_kind()); - PrintF(out, "\n - pre-allocated property fields: %d\n", - pre_allocated_property_fields()); - PrintF(out, " - unused property fields: %d\n", unused_property_fields()); - if (is_hidden_prototype()) { - PrintF(out, " - hidden_prototype\n"); - } - if (has_named_interceptor()) { - PrintF(out, " - named_interceptor\n"); - } - if (has_indexed_interceptor()) { - PrintF(out, " - indexed_interceptor\n"); - } - if (is_undetectable()) { - PrintF(out, " - undetectable\n"); - } - if (has_instance_call_handler()) { - PrintF(out, " - instance_call_handler\n"); - } - if (is_access_check_needed()) { - PrintF(out, " - access_check_needed\n"); - } +void Map::MapPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "Map"); + os << " - type: " << TypeToString(instance_type()) << "\n"; + os << " - instance size: " << instance_size() << "\n"; + os << " - inobject properties: " << inobject_properties() << "\n"; + os << " - elements kind: " << ElementsKindToString(elements_kind()); + os << "\n - pre-allocated property fields: " + << pre_allocated_property_fields() << "\n"; + os << " - unused property fields: " << unused_property_fields() << "\n"; + if (is_hidden_prototype()) os << " - hidden_prototype\n"; + if (has_named_interceptor()) os << " - named_interceptor\n"; + if (has_indexed_interceptor()) os << " - indexed_interceptor\n"; + if (is_undetectable()) os << " - undetectable\n"; + if (has_instance_call_handler()) os << " - instance_call_handler\n"; + if (is_access_check_needed()) os << " - access_check_needed\n"; if (is_frozen()) { - PrintF(out, " - frozen\n"); + os << " - frozen\n"; } else if (!is_extensible()) { - PrintF(out, " - sealed\n"); + os << " - sealed\n"; } - PrintF(out, " - back pointer: "); - GetBackPointer()->ShortPrint(out); - PrintF(out, "\n - instance descriptors %s#%i: ", - owns_descriptors() ? "(own) " : "", - NumberOfOwnDescriptors()); - instance_descriptors()->ShortPrint(out); + os << " - back pointer: " << Brief(GetBackPointer()); + os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "") + << "#" << NumberOfOwnDescriptors() << ": " + << Brief(instance_descriptors()); if (HasTransitionArray()) { - PrintF(out, "\n - transitions: "); - transitions()->ShortPrint(out); + os << "\n - transitions: " << Brief(transitions()); } - PrintF(out, "\n - prototype: "); - prototype()->ShortPrint(out); - PrintF(out, "\n - constructor: "); - constructor()->ShortPrint(out); - PrintF(out, "\n - code cache: "); - code_cache()->ShortPrint(out); - PrintF(out, "\n - dependent code: "); - dependent_code()->ShortPrint(out); - PrintF(out, "\n"); + os << "\n - prototype: " << Brief(prototype()); + os << "\n - constructor: " << Brief(constructor()); + os << "\n - code cache: " << Brief(code_cache()); + os << "\n - dependent code: " << Brief(dependent_code()); + os << "\n"; } -void CodeCache::CodeCachePrint(FILE* out) { - HeapObject::PrintHeader(out, "CodeCache"); - PrintF(out, "\n - default_cache: "); - default_cache()->ShortPrint(out); - PrintF(out, "\n - normal_type_cache: "); - normal_type_cache()->ShortPrint(out); +void CodeCache::CodeCachePrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "CodeCache"); + os << "\n - default_cache: " << Brief(default_cache()); + os << "\n - normal_type_cache: " << Brief(normal_type_cache()); } -void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) { - HeapObject::PrintHeader(out, "PolymorphicCodeCache"); - PrintF(out, "\n - cache: "); - cache()->ShortPrint(out); +void PolymorphicCodeCache::PolymorphicCodeCachePrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "PolymorphicCodeCache"); + os << "\n - cache: " << Brief(cache()); } -void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "TypeFeedbackInfo"); - PrintF(out, " - ic_total_count: %d, ic_with_type_info_count: %d\n", - ic_total_count(), ic_with_type_info_count()); - PrintF(out, " - feedback_vector: "); - feedback_vector()->FixedArrayPrint(out); +void TypeFeedbackInfo::TypeFeedbackInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "TypeFeedbackInfo"); + os << " - ic_total_count: " << ic_total_count() + << ", ic_with_type_info_count: " << ic_with_type_info_count() + << ", ic_generic_count: " << ic_generic_count() << "\n"; } -void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(FILE* out) { - HeapObject::PrintHeader(out, "AliasedArgumentsEntry"); - PrintF(out, "\n - aliased_context_slot: %d", aliased_context_slot()); +void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "AliasedArgumentsEntry"); + os << "\n - aliased_context_slot: " << aliased_context_slot(); } -void FixedArray::FixedArrayPrint(FILE* out) { - HeapObject::PrintHeader(out, "FixedArray"); - PrintF(out, " - length: %d", length()); +void FixedArray::FixedArrayPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "FixedArray"); + os << " - length: " << length(); for (int i = 0; i < length(); i++) { - PrintF(out, "\n [%d]: ", i); - get(i)->ShortPrint(out); + os << "\n [" << i << "]: " << Brief(get(i)); } - PrintF(out, "\n"); + os << "\n"; } -void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) { - HeapObject::PrintHeader(out, "FixedDoubleArray"); - PrintF(out, " - length: %d", length()); +void FixedDoubleArray::FixedDoubleArrayPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "FixedDoubleArray"); + os << " - length: " << length(); for (int i = 0; i < length(); i++) { + os << "\n [" << i << "]: "; if (is_the_hole(i)) { - PrintF(out, "\n [%d]: <the hole>", i); + os << "<the hole>"; } else { - PrintF(out, "\n [%d]: %g", i, get_scalar(i)); + os << get_scalar(i); } } - PrintF(out, "\n"); + os << "\n"; } -void ConstantPoolArray::ConstantPoolArrayPrint(FILE* out) { - HeapObject::PrintHeader(out, "ConstantPoolArray"); - PrintF(out, " - length: %d", length()); - for (int i = 0; i < length(); i++) { - if (i < first_code_ptr_index()) { - PrintF(out, "\n [%d]: double: %g", i, get_int64_entry_as_double(i)); - } else if (i < first_heap_ptr_index()) { - PrintF(out, "\n [%d]: code target pointer: %p", i, - reinterpret_cast<void*>(get_code_ptr_entry(i))); - } else if (i < first_int32_index()) { - PrintF(out, "\n [%d]: heap pointer: %p", i, - reinterpret_cast<void*>(get_heap_ptr_entry(i))); - } else { - PrintF(out, "\n [%d]: int32: %d", i, get_int32_entry(i)); +void ConstantPoolArray::ConstantPoolArrayPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "ConstantPoolArray"); + os << " - length: " << length(); + for (int i = 0; i <= last_index(INT32, SMALL_SECTION); i++) { + if (i < last_index(INT64, SMALL_SECTION)) { + os << "\n [" << i << "]: double: " << get_int64_entry_as_double(i); + } else if (i <= last_index(CODE_PTR, SMALL_SECTION)) { + os << "\n [" << i << "]: code target pointer: " + << reinterpret_cast<void*>(get_code_ptr_entry(i)); + } else if (i <= last_index(HEAP_PTR, SMALL_SECTION)) { + os << "\n [" << i << "]: heap pointer: " + << reinterpret_cast<void*>(get_heap_ptr_entry(i)); + } else if (i <= last_index(INT32, SMALL_SECTION)) { + os << "\n [" << i << "]: int32: " << get_int32_entry(i); } } - PrintF(out, "\n"); + if (is_extended_layout()) { + os << "\n Extended section:"; + for (int i = first_extended_section_index(); + i <= last_index(INT32, EXTENDED_SECTION); i++) { + if (i < last_index(INT64, EXTENDED_SECTION)) { + os << "\n [" << i << "]: double: " << get_int64_entry_as_double(i); + } else if (i <= last_index(CODE_PTR, EXTENDED_SECTION)) { + os << "\n [" << i << "]: code target pointer: " + << reinterpret_cast<void*>(get_code_ptr_entry(i)); + } else if (i <= last_index(HEAP_PTR, EXTENDED_SECTION)) { + os << "\n [" << i << "]: heap pointer: " + << reinterpret_cast<void*>(get_heap_ptr_entry(i)); + } else if (i <= last_index(INT32, EXTENDED_SECTION)) { + os << "\n [" << i << "]: int32: " << get_int32_entry(i); + } + } + } + os << "\n"; } -void JSValue::JSValuePrint(FILE* out) { - HeapObject::PrintHeader(out, "ValueObject"); - value()->Print(out); +void JSValue::JSValuePrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "ValueObject"); + value()->Print(os); } -void JSMessageObject::JSMessageObjectPrint(FILE* out) { - HeapObject::PrintHeader(out, "JSMessageObject"); - PrintF(out, " - type: "); - type()->ShortPrint(out); - PrintF(out, "\n - arguments: "); - arguments()->ShortPrint(out); - PrintF(out, "\n - start_position: %d", start_position()); - PrintF(out, "\n - end_position: %d", end_position()); - PrintF(out, "\n - script: "); - script()->ShortPrint(out); - PrintF(out, "\n - stack_frames: "); - stack_frames()->ShortPrint(out); - PrintF(out, "\n"); +void JSMessageObject::JSMessageObjectPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSMessageObject"); + os << " - type: " << Brief(type()); + os << "\n - arguments: " << Brief(arguments()); + os << "\n - start_position: " << start_position(); + os << "\n - end_position: " << end_position(); + os << "\n - script: " << Brief(script()); + os << "\n - stack_frames: " << Brief(stack_frames()); + os << "\n"; } -void String::StringPrint(FILE* out) { +void String::StringPrint(OStream& os) { // NOLINT if (StringShape(this).IsInternalized()) { - PrintF(out, "#"); + os << "#"; } else if (StringShape(this).IsCons()) { - PrintF(out, "c\""); + os << "c\""; } else { - PrintF(out, "\""); + os << "\""; } const char truncated_epilogue[] = "...<truncated>"; @@ -661,21 +595,21 @@ } } for (int i = 0; i < len; i++) { - PrintF(out, "%c", Get(i)); + os << AsUC16(Get(i)); } if (len != length()) { - PrintF(out, "%s", truncated_epilogue); + os << truncated_epilogue; } - if (!StringShape(this).IsInternalized()) PrintF(out, "\""); + if (!StringShape(this).IsInternalized()) os << "\""; } -void Name::NamePrint(FILE* out) { +void Name::NamePrint(OStream& os) { // NOLINT if (IsString()) - String::cast(this)->StringPrint(out); + String::cast(this)->StringPrint(os); else - ShortPrint(); + os << Brief(this); } @@ -699,167 +633,181 @@ }; -void JSDate::JSDatePrint(FILE* out) { - HeapObject::PrintHeader(out, "JSDate"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - value = "); - value()->Print(out); +void JSDate::JSDatePrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSDate"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - value = "; + value()->Print(os); if (!year()->IsSmi()) { - PrintF(out, " - time = NaN\n"); + os << " - time = NaN\n"; } else { - PrintF(out, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n", - weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0], - year()->IsSmi() ? Smi::cast(year())->value() : -1, - month()->IsSmi() ? Smi::cast(month())->value() : -1, - day()->IsSmi() ? Smi::cast(day())->value() : -1, - hour()->IsSmi() ? Smi::cast(hour())->value() : -1, - min()->IsSmi() ? Smi::cast(min())->value() : -1, - sec()->IsSmi() ? Smi::cast(sec())->value() : -1); + // TODO(svenpanne) Add some basic formatting to our streams. + Vector<char> buf = Vector<char>::New(100); + SNPrintF( + buf, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n", + weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0], + year()->IsSmi() ? Smi::cast(year())->value() : -1, + month()->IsSmi() ? Smi::cast(month())->value() : -1, + day()->IsSmi() ? Smi::cast(day())->value() : -1, + hour()->IsSmi() ? Smi::cast(hour())->value() : -1, + min()->IsSmi() ? Smi::cast(min())->value() : -1, + sec()->IsSmi() ? Smi::cast(sec())->value() : -1); + os << buf.start(); } } -void JSProxy::JSProxyPrint(FILE* out) { - HeapObject::PrintHeader(out, "JSProxy"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - handler = "); - handler()->Print(out); - PrintF(out, " - hash = "); - hash()->Print(out); - PrintF(out, "\n"); +void JSProxy::JSProxyPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSProxy"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - handler = "; + handler()->Print(os); + os << "\n - hash = "; + hash()->Print(os); + os << "\n"; } -void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) { - HeapObject::PrintHeader(out, "JSFunctionProxy"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - handler = "); - handler()->Print(out); - PrintF(out, " - call_trap = "); - call_trap()->Print(out); - PrintF(out, " - construct_trap = "); - construct_trap()->Print(out); - PrintF(out, "\n"); +void JSFunctionProxy::JSFunctionProxyPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSFunctionProxy"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - handler = "; + handler()->Print(os); + os << "\n - call_trap = "; + call_trap()->Print(os); + os << "\n - construct_trap = "; + construct_trap()->Print(os); + os << "\n"; } -void JSSet::JSSetPrint(FILE* out) { - HeapObject::PrintHeader(out, "JSSet"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - table = "); - table()->ShortPrint(out); - PrintF(out, "\n"); +void JSSet::JSSetPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSSet"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - table = " << Brief(table()); + os << "\n"; } -void JSMap::JSMapPrint(FILE* out) { - HeapObject::PrintHeader(out, "JSMap"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - table = "); - table()->ShortPrint(out); - PrintF(out, "\n"); +void JSMap::JSMapPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSMap"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - table = " << Brief(table()); + os << "\n"; } -void JSWeakMap::JSWeakMapPrint(FILE* out) { - HeapObject::PrintHeader(out, "JSWeakMap"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - table = "); - table()->ShortPrint(out); - PrintF(out, "\n"); +template <class Derived, class TableType> +void OrderedHashTableIterator< + Derived, TableType>::OrderedHashTableIteratorPrint(OStream& os) { // NOLINT + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - table = " << Brief(table()); + os << "\n - index = " << Brief(index()); + os << "\n - kind = " << Brief(kind()); + os << "\n"; } -void JSWeakSet::JSWeakSetPrint(FILE* out) { - HeapObject::PrintHeader(out, "JSWeakSet"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - table = "); - table()->ShortPrint(out); - PrintF(out, "\n"); +template void OrderedHashTableIterator< + JSSetIterator, + OrderedHashSet>::OrderedHashTableIteratorPrint(OStream& os); // NOLINT + + +template void OrderedHashTableIterator< + JSMapIterator, + OrderedHashMap>::OrderedHashTableIteratorPrint(OStream& os); // NOLINT + + +void JSSetIterator::JSSetIteratorPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSSetIterator"); + OrderedHashTableIteratorPrint(os); } -void JSArrayBuffer::JSArrayBufferPrint(FILE* out) { - HeapObject::PrintHeader(out, "JSArrayBuffer"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - backing_store = %p\n", backing_store()); - PrintF(out, " - byte_length = "); - byte_length()->ShortPrint(out); - PrintF(out, "\n"); +void JSMapIterator::JSMapIteratorPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSMapIterator"); + OrderedHashTableIteratorPrint(os); } -void JSTypedArray::JSTypedArrayPrint(FILE* out) { - HeapObject::PrintHeader(out, "JSTypedArray"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - buffer ="); - buffer()->ShortPrint(out); - PrintF(out, "\n - byte_offset = "); - byte_offset()->ShortPrint(out); - PrintF(out, "\n - byte_length = "); - byte_length()->ShortPrint(out); - PrintF(out, "\n - length = "); - length()->ShortPrint(out); - PrintF(out, "\n"); - PrintElements(out); +void JSWeakMap::JSWeakMapPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSWeakMap"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - table = " << Brief(table()); + os << "\n"; } -void JSDataView::JSDataViewPrint(FILE* out) { - HeapObject::PrintHeader(out, "JSDataView"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - buffer ="); - buffer()->ShortPrint(out); - PrintF(out, "\n - byte_offset = "); - byte_offset()->ShortPrint(out); - PrintF(out, "\n - byte_length = "); - byte_length()->ShortPrint(out); - PrintF(out, "\n"); +void JSWeakSet::JSWeakSetPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSWeakSet"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - table = " << Brief(table()); + os << "\n"; } -void JSFunction::JSFunctionPrint(FILE* out) { - HeapObject::PrintHeader(out, "Function"); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - initial_map = "); - if (has_initial_map()) { - initial_map()->ShortPrint(out); - } - PrintF(out, "\n - shared_info = "); - shared()->ShortPrint(out); - PrintF(out, "\n - name = "); - shared()->name()->Print(out); - PrintF(out, "\n - context = "); - context()->ShortPrint(out); +void JSArrayBuffer::JSArrayBufferPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSArrayBuffer"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - backing_store = " << backing_store() << "\n"; + os << " - byte_length = " << Brief(byte_length()); + os << "\n"; +} + + +void JSTypedArray::JSTypedArrayPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSTypedArray"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - buffer =" << Brief(buffer()); + os << "\n - byte_offset = " << Brief(byte_offset()); + os << "\n - byte_length = " << Brief(byte_length()); + os << "\n - length = " << Brief(length()); + os << "\n"; + PrintElements(os); +} + + +void JSDataView::JSDataViewPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "JSDataView"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - buffer =" << Brief(buffer()); + os << "\n - byte_offset = " << Brief(byte_offset()); + os << "\n - byte_length = " << Brief(byte_length()); + os << "\n"; +} + + +void JSFunction::JSFunctionPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "Function"); + os << " - map = " << reinterpret_cast<void*>(map()) << "\n"; + os << " - initial_map = "; + if (has_initial_map()) os << Brief(initial_map()); + os << "\n - shared_info = " << Brief(shared()); + os << "\n - name = " << Brief(shared()->name()); + os << "\n - context = " << Brief(context()); if (shared()->bound()) { - PrintF(out, "\n - bindings = "); - function_bindings()->ShortPrint(out); + os << "\n - bindings = " << Brief(function_bindings()); } else { - PrintF(out, "\n - literals = "); - literals()->ShortPrint(out); + os << "\n - literals = " << Brief(literals()); } - PrintF(out, "\n - code = "); - code()->ShortPrint(out); - PrintF(out, "\n"); - - PrintProperties(out); - PrintElements(out); - - PrintF(out, "\n"); + os << "\n - code = " << Brief(code()); + os << "\n"; + PrintProperties(os); + PrintElements(os); + os << "\n"; } -void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "SharedFunctionInfo"); - PrintF(out, " - name: "); - name()->ShortPrint(out); - PrintF(out, "\n - expected_nof_properties: %d", expected_nof_properties()); - PrintF(out, "\n - instance class name = "); - instance_class_name()->Print(out); - PrintF(out, "\n - code = "); - code()->ShortPrint(out); +void SharedFunctionInfo::SharedFunctionInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "SharedFunctionInfo"); + os << " - name: " << Brief(name()); + os << "\n - expected_nof_properties: " << expected_nof_properties(); + os << "\n - ast_node_count: " << ast_node_count(); + os << "\n - instance class name = "; + instance_class_name()->Print(os); + os << "\n - code = " << Brief(code()); if (HasSourceCode()) { - PrintF(out, "\n - source code = "); + os << "\n - source code = "; String* source = String::cast(Script::cast(script())->source()); int start = start_position(); int length = end_position() - start; @@ -867,369 +815,304 @@ source->ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, start, length, NULL); - PrintF(out, "%s", source_string.get()); + os << source_string.get(); } // Script files are often large, hard to read. - // PrintF(out, "\n - script ="); - // script()->Print(out); - PrintF(out, "\n - function token position = %d", function_token_position()); - PrintF(out, "\n - start position = %d", start_position()); - PrintF(out, "\n - end position = %d", end_position()); - PrintF(out, "\n - is expression = %d", is_expression()); - PrintF(out, "\n - debug info = "); - debug_info()->ShortPrint(out); - PrintF(out, "\n - length = %d", length()); - PrintF(out, "\n - optimized_code_map = "); - optimized_code_map()->ShortPrint(out); - PrintF(out, "\n"); + // os << "\n - script ="; + // script()->Print(os); + os << "\n - function token position = " << function_token_position(); + os << "\n - start position = " << start_position(); + os << "\n - end position = " << end_position(); + os << "\n - is expression = " << is_expression(); + os << "\n - debug info = " << Brief(debug_info()); + os << "\n - length = " << length(); + os << "\n - optimized_code_map = " << Brief(optimized_code_map()); + os << "\n - feedback_vector = "; + feedback_vector()->FixedArrayPrint(os); + os << "\n"; } -void JSGlobalProxy::JSGlobalProxyPrint(FILE* out) { - PrintF(out, "global_proxy "); - JSObjectPrint(out); - PrintF(out, "native context : "); - native_context()->ShortPrint(out); - PrintF(out, "\n"); +void JSGlobalProxy::JSGlobalProxyPrint(OStream& os) { // NOLINT + os << "global_proxy "; + JSObjectPrint(os); + os << "native context : " << Brief(native_context()); + os << "\n"; } -void JSGlobalObject::JSGlobalObjectPrint(FILE* out) { - PrintF(out, "global "); - JSObjectPrint(out); - PrintF(out, "native context : "); - native_context()->ShortPrint(out); - PrintF(out, "\n"); +void JSGlobalObject::JSGlobalObjectPrint(OStream& os) { // NOLINT + os << "global "; + JSObjectPrint(os); + os << "native context : " << Brief(native_context()); + os << "\n"; } -void JSBuiltinsObject::JSBuiltinsObjectPrint(FILE* out) { - PrintF(out, "builtins "); - JSObjectPrint(out); +void JSBuiltinsObject::JSBuiltinsObjectPrint(OStream& os) { // NOLINT + os << "builtins "; + JSObjectPrint(os); } -void Cell::CellPrint(FILE* out) { - HeapObject::PrintHeader(out, "Cell"); +void Cell::CellPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "Cell"); } -void PropertyCell::PropertyCellPrint(FILE* out) { - HeapObject::PrintHeader(out, "PropertyCell"); +void PropertyCell::PropertyCellPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "PropertyCell"); } -void Code::CodePrint(FILE* out) { - HeapObject::PrintHeader(out, "Code"); +void Code::CodePrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "Code"); #ifdef ENABLE_DISASSEMBLER if (FLAG_use_verbose_printer) { - Disassemble(NULL, out); + Disassemble(NULL, os); } #endif } -void Foreign::ForeignPrint(FILE* out) { - PrintF(out, "foreign address : %p", foreign_address()); +void Foreign::ForeignPrint(OStream& os) { // NOLINT + os << "foreign address : " << foreign_address(); +} + + +void ExecutableAccessorInfo::ExecutableAccessorInfoPrint( + OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "ExecutableAccessorInfo"); + os << "\n - name: " << Brief(name()); + os << "\n - flag: " << Brief(flag()); + os << "\n - getter: " << Brief(getter()); + os << "\n - setter: " << Brief(setter()); + os << "\n - data: " << Brief(data()); + os << "\n"; +} + + +void DeclaredAccessorInfo::DeclaredAccessorInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "DeclaredAccessorInfo"); + os << "\n - name: " << Brief(name()); + os << "\n - flag: " << Brief(flag()); + os << "\n - descriptor: " << Brief(descriptor()); + os << "\n"; +} + + +void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint( + OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "DeclaredAccessorDescriptor"); + os << "\n - internal field: " << Brief(serialized_data()); + os << "\n"; +} + + +void Box::BoxPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "Box"); + os << "\n - value: " << Brief(value()); + os << "\n"; +} + + +void AccessorPair::AccessorPairPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "AccessorPair"); + os << "\n - getter: " << Brief(getter()); + os << "\n - setter: " << Brief(setter()); + os << "\n"; +} + + +void AccessCheckInfo::AccessCheckInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "AccessCheckInfo"); + os << "\n - named_callback: " << Brief(named_callback()); + os << "\n - indexed_callback: " << Brief(indexed_callback()); + os << "\n - data: " << Brief(data()); + os << "\n"; +} + + +void InterceptorInfo::InterceptorInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "InterceptorInfo"); + os << "\n - getter: " << Brief(getter()); + os << "\n - setter: " << Brief(setter()); + os << "\n - query: " << Brief(query()); + os << "\n - deleter: " << Brief(deleter()); + os << "\n - enumerator: " << Brief(enumerator()); + os << "\n - data: " << Brief(data()); + os << "\n"; +} + + +void CallHandlerInfo::CallHandlerInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "CallHandlerInfo"); + os << "\n - callback: " << Brief(callback()); + os << "\n - data: " << Brief(data()); + os << "\n"; +} + + +void FunctionTemplateInfo::FunctionTemplateInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "FunctionTemplateInfo"); + os << "\n - class name: " << Brief(class_name()); + os << "\n - tag: " << Brief(tag()); + os << "\n - property_list: " << Brief(property_list()); + os << "\n - serial_number: " << Brief(serial_number()); + os << "\n - call_code: " << Brief(call_code()); + os << "\n - property_accessors: " << Brief(property_accessors()); + os << "\n - prototype_template: " << Brief(prototype_template()); + os << "\n - parent_template: " << Brief(parent_template()); + os << "\n - named_property_handler: " << Brief(named_property_handler()); + os << "\n - indexed_property_handler: " << Brief(indexed_property_handler()); + os << "\n - instance_template: " << Brief(instance_template()); + os << "\n - signature: " << Brief(signature()); + os << "\n - access_check_info: " << Brief(access_check_info()); + os << "\n - hidden_prototype: " << (hidden_prototype() ? "true" : "false"); + os << "\n - undetectable: " << (undetectable() ? "true" : "false"); + os << "\n - need_access_check: " << (needs_access_check() ? "true" : "false"); + os << "\n"; } -void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "ExecutableAccessorInfo"); - PrintF(out, "\n - name: "); - name()->ShortPrint(out); - PrintF(out, "\n - flag: "); - flag()->ShortPrint(out); - PrintF(out, "\n - getter: "); - getter()->ShortPrint(out); - PrintF(out, "\n - setter: "); - setter()->ShortPrint(out); - PrintF(out, "\n - data: "); - data()->ShortPrint(out); -} - - -void DeclaredAccessorInfo::DeclaredAccessorInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "DeclaredAccessorInfo"); - PrintF(out, "\n - name: "); - name()->ShortPrint(out); - PrintF(out, "\n - flag: "); - flag()->ShortPrint(out); - PrintF(out, "\n - descriptor: "); - descriptor()->ShortPrint(out); -} - - -void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint(FILE* out) { - HeapObject::PrintHeader(out, "DeclaredAccessorDescriptor"); - PrintF(out, "\n - internal field: "); - serialized_data()->ShortPrint(out); -} - - -void Box::BoxPrint(FILE* out) { - HeapObject::PrintHeader(out, "Box"); - PrintF(out, "\n - value: "); - value()->ShortPrint(out); -} - - -void AccessorPair::AccessorPairPrint(FILE* out) { - HeapObject::PrintHeader(out, "AccessorPair"); - PrintF(out, "\n - getter: "); - getter()->ShortPrint(out); - PrintF(out, "\n - setter: "); - setter()->ShortPrint(out); - PrintF(out, "\n - flag: "); - access_flags()->ShortPrint(out); -} - - -void AccessCheckInfo::AccessCheckInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "AccessCheckInfo"); - PrintF(out, "\n - named_callback: "); - named_callback()->ShortPrint(out); - PrintF(out, "\n - indexed_callback: "); - indexed_callback()->ShortPrint(out); - PrintF(out, "\n - data: "); - data()->ShortPrint(out); -} - - -void InterceptorInfo::InterceptorInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "InterceptorInfo"); - PrintF(out, "\n - getter: "); - getter()->ShortPrint(out); - PrintF(out, "\n - setter: "); - setter()->ShortPrint(out); - PrintF(out, "\n - query: "); - query()->ShortPrint(out); - PrintF(out, "\n - deleter: "); - deleter()->ShortPrint(out); - PrintF(out, "\n - enumerator: "); - enumerator()->ShortPrint(out); - PrintF(out, "\n - data: "); - data()->ShortPrint(out); -} - - -void CallHandlerInfo::CallHandlerInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "CallHandlerInfo"); - PrintF(out, "\n - callback: "); - callback()->ShortPrint(out); - PrintF(out, "\n - data: "); - data()->ShortPrint(out); - PrintF(out, "\n - call_stub_cache: "); -} - - -void FunctionTemplateInfo::FunctionTemplateInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "FunctionTemplateInfo"); - PrintF(out, "\n - class name: "); - class_name()->ShortPrint(out); - PrintF(out, "\n - tag: "); - tag()->ShortPrint(out); - PrintF(out, "\n - property_list: "); - property_list()->ShortPrint(out); - PrintF(out, "\n - serial_number: "); - serial_number()->ShortPrint(out); - PrintF(out, "\n - call_code: "); - call_code()->ShortPrint(out); - PrintF(out, "\n - property_accessors: "); - property_accessors()->ShortPrint(out); - PrintF(out, "\n - prototype_template: "); - prototype_template()->ShortPrint(out); - PrintF(out, "\n - parent_template: "); - parent_template()->ShortPrint(out); - PrintF(out, "\n - named_property_handler: "); - named_property_handler()->ShortPrint(out); - PrintF(out, "\n - indexed_property_handler: "); - indexed_property_handler()->ShortPrint(out); - PrintF(out, "\n - instance_template: "); - instance_template()->ShortPrint(out); - PrintF(out, "\n - signature: "); - signature()->ShortPrint(out); - PrintF(out, "\n - access_check_info: "); - access_check_info()->ShortPrint(out); - PrintF(out, "\n - hidden_prototype: %s", - hidden_prototype() ? "true" : "false"); - PrintF(out, "\n - undetectable: %s", undetectable() ? "true" : "false"); - PrintF(out, "\n - need_access_check: %s", - needs_access_check() ? "true" : "false"); -} - - -void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "ObjectTemplateInfo"); - PrintF(out, " - tag: "); - tag()->ShortPrint(out); - PrintF(out, "\n - property_list: "); - property_list()->ShortPrint(out); - PrintF(out, "\n - property_accessors: "); - property_accessors()->ShortPrint(out); - PrintF(out, "\n - constructor: "); - constructor()->ShortPrint(out); - PrintF(out, "\n - internal_field_count: "); - internal_field_count()->ShortPrint(out); - PrintF(out, "\n"); -} - - -void SignatureInfo::SignatureInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "SignatureInfo"); - PrintF(out, "\n - receiver: "); - receiver()->ShortPrint(out); - PrintF(out, "\n - args: "); - args()->ShortPrint(out); -} - - -void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "TypeSwitchInfo"); - PrintF(out, "\n - types: "); - types()->ShortPrint(out); -} - - -void AllocationSite::AllocationSitePrint(FILE* out) { - HeapObject::PrintHeader(out, "AllocationSite"); - PrintF(out, " - weak_next: "); - weak_next()->ShortPrint(out); - PrintF(out, "\n - dependent code: "); - dependent_code()->ShortPrint(out); - PrintF(out, "\n - nested site: "); - nested_site()->ShortPrint(out); - PrintF(out, "\n - memento found count: "); - Smi::FromInt(memento_found_count())->ShortPrint(out); - PrintF(out, "\n - memento create count: "); - Smi::FromInt(memento_create_count())->ShortPrint(out); - PrintF(out, "\n - pretenure decision: "); - Smi::FromInt(pretenure_decision())->ShortPrint(out); - PrintF(out, "\n - transition_info: "); +void ObjectTemplateInfo::ObjectTemplateInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "ObjectTemplateInfo"); + os << " - tag: " << Brief(tag()); + os << "\n - property_list: " << Brief(property_list()); + os << "\n - property_accessors: " << Brief(property_accessors()); + os << "\n - constructor: " << Brief(constructor()); + os << "\n - internal_field_count: " << Brief(internal_field_count()); + os << "\n"; +} + + +void SignatureInfo::SignatureInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "SignatureInfo"); + os << "\n - receiver: " << Brief(receiver()); + os << "\n - args: " << Brief(args()); + os << "\n"; +} + + +void TypeSwitchInfo::TypeSwitchInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "TypeSwitchInfo"); + os << "\n - types: " << Brief(types()); + os << "\n"; +} + + +void AllocationSite::AllocationSitePrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "AllocationSite"); + os << " - weak_next: " << Brief(weak_next()); + os << "\n - dependent code: " << Brief(dependent_code()); + os << "\n - nested site: " << Brief(nested_site()); + os << "\n - memento found count: " + << Brief(Smi::FromInt(memento_found_count())); + os << "\n - memento create count: " + << Brief(Smi::FromInt(memento_create_count())); + os << "\n - pretenure decision: " + << Brief(Smi::FromInt(pretenure_decision())); + os << "\n - transition_info: "; if (transition_info()->IsSmi()) { ElementsKind kind = GetElementsKind(); - PrintF(out, "Array allocation with ElementsKind "); - PrintElementsKind(out, kind); - PrintF(out, "\n"); - return; + os << "Array allocation with ElementsKind " << ElementsKindToString(kind); } else if (transition_info()->IsJSArray()) { - PrintF(out, "Array literal "); - transition_info()->ShortPrint(out); - PrintF(out, "\n"); - return; + os << "Array literal " << Brief(transition_info()); + } else { + os << "unknown transition_info" << Brief(transition_info()); } - - PrintF(out, "unknown transition_info"); - transition_info()->ShortPrint(out); - PrintF(out, "\n"); + os << "\n"; } -void AllocationMemento::AllocationMementoPrint(FILE* out) { - HeapObject::PrintHeader(out, "AllocationMemento"); - PrintF(out, " - allocation site: "); +void AllocationMemento::AllocationMementoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "AllocationMemento"); + os << " - allocation site: "; if (IsValid()) { - GetAllocationSite()->Print(); + GetAllocationSite()->Print(os); } else { - PrintF(out, "<invalid>\n"); + os << "<invalid>\n"; } } -void Script::ScriptPrint(FILE* out) { - HeapObject::PrintHeader(out, "Script"); - PrintF(out, "\n - source: "); - source()->ShortPrint(out); - PrintF(out, "\n - name: "); - name()->ShortPrint(out); - PrintF(out, "\n - line_offset: "); - line_offset()->ShortPrint(out); - PrintF(out, "\n - column_offset: "); - column_offset()->ShortPrint(out); - PrintF(out, "\n - type: "); - type()->ShortPrint(out); - PrintF(out, "\n - id: "); - id()->ShortPrint(out); - PrintF(out, "\n - context data: "); - context_data()->ShortPrint(out); - PrintF(out, "\n - wrapper: "); - wrapper()->ShortPrint(out); - PrintF(out, "\n - compilation type: %d", compilation_type()); - PrintF(out, "\n - line ends: "); - line_ends()->ShortPrint(out); - PrintF(out, "\n - eval from shared: "); - eval_from_shared()->ShortPrint(out); - PrintF(out, "\n - eval from instructions offset: "); - eval_from_instructions_offset()->ShortPrint(out); - PrintF(out, "\n"); +void Script::ScriptPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "Script"); + os << "\n - source: " << Brief(source()); + os << "\n - name: " << Brief(name()); + os << "\n - line_offset: " << Brief(line_offset()); + os << "\n - column_offset: " << Brief(column_offset()); + os << "\n - type: " << Brief(type()); + os << "\n - id: " << Brief(id()); + os << "\n - context data: " << Brief(context_data()); + os << "\n - wrapper: " << Brief(wrapper()); + os << "\n - compilation type: " << compilation_type(); + os << "\n - line ends: " << Brief(line_ends()); + os << "\n - eval from shared: " << Brief(eval_from_shared()); + os << "\n - eval from instructions offset: " + << Brief(eval_from_instructions_offset()); + os << "\n"; } -#ifdef ENABLE_DEBUGGER_SUPPORT -void DebugInfo::DebugInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "DebugInfo"); - PrintF(out, "\n - shared: "); - shared()->ShortPrint(out); - PrintF(out, "\n - original_code: "); - original_code()->ShortPrint(out); - PrintF(out, "\n - code: "); - code()->ShortPrint(out); - PrintF(out, "\n - break_points: "); - break_points()->Print(out); +void DebugInfo::DebugInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "DebugInfo"); + os << "\n - shared: " << Brief(shared()); + os << "\n - original_code: " << Brief(original_code()); + os << "\n - code: " << Brief(code()); + os << "\n - break_points: "; + break_points()->Print(os); } -void BreakPointInfo::BreakPointInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "BreakPointInfo"); - PrintF(out, "\n - code_position: %d", code_position()->value()); - PrintF(out, "\n - source_position: %d", source_position()->value()); - PrintF(out, "\n - statement_position: %d", statement_position()->value()); - PrintF(out, "\n - break_point_objects: "); - break_point_objects()->ShortPrint(out); +void BreakPointInfo::BreakPointInfoPrint(OStream& os) { // NOLINT + HeapObject::PrintHeader(os, "BreakPointInfo"); + os << "\n - code_position: " << code_position()->value(); + os << "\n - source_position: " << source_position()->value(); + os << "\n - statement_position: " << statement_position()->value(); + os << "\n - break_point_objects: " << Brief(break_point_objects()); + os << "\n"; } -#endif // ENABLE_DEBUGGER_SUPPORT -void DescriptorArray::PrintDescriptors(FILE* out) { - PrintF(out, "Descriptor array %d\n", number_of_descriptors()); +void DescriptorArray::PrintDescriptors(OStream& os) { // NOLINT + os << "Descriptor array " << number_of_descriptors() << "\n"; for (int i = 0; i < number_of_descriptors(); i++) { - PrintF(out, " %d: ", i); Descriptor desc; Get(i, &desc); - desc.Print(out); + os << " " << i << ": " << desc; } - PrintF(out, "\n"); + os << "\n"; } -void TransitionArray::PrintTransitions(FILE* out) { - PrintF(out, "Transition array %d\n", number_of_transitions()); +void TransitionArray::PrintTransitions(OStream& os) { // NOLINT + os << "Transition array %d\n", number_of_transitions(); for (int i = 0; i < number_of_transitions(); i++) { - PrintF(out, " %d: ", i); - GetKey(i)->NamePrint(out); - PrintF(out, ": "); + os << " " << i << ": "; + GetKey(i)->NamePrint(os); + os << ": "; switch (GetTargetDetails(i).type()) { case FIELD: { - PrintF(out, " (transition to field)\n"); + os << " (transition to field)\n"; break; } case CONSTANT: - PrintF(out, " (transition to constant)\n"); + os << " (transition to constant)\n"; break; case CALLBACKS: - PrintF(out, " (transition to callback)\n"); + os << " (transition to callback)\n"; break; // Values below are never in the target descriptor array. case NORMAL: case HANDLER: case INTERCEPTOR: - case TRANSITION: case NONEXISTENT: UNREACHABLE(); break; } } - PrintF(out, "\n"); + os << "\n"; } diff -Nru nodejs-0.11.13/deps/v8/src/objects-visiting.cc nodejs-0.11.15/deps/v8/src/objects-visiting.cc --- nodejs-0.11.13/deps/v8/src/objects-visiting.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/objects-visiting.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,214 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "ic-inl.h" -#include "objects-visiting.h" - -namespace v8 { -namespace internal { - - -static inline bool IsShortcutCandidate(int type) { - return ((type & kShortcutTypeMask) == kShortcutTypeTag); -} - - -StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId( - int instance_type, - int instance_size) { - if (instance_type < FIRST_NONSTRING_TYPE) { - switch (instance_type & kStringRepresentationMask) { - case kSeqStringTag: - if ((instance_type & kStringEncodingMask) == kOneByteStringTag) { - return kVisitSeqOneByteString; - } else { - return kVisitSeqTwoByteString; - } - - case kConsStringTag: - if (IsShortcutCandidate(instance_type)) { - return kVisitShortcutCandidate; - } else { - return kVisitConsString; - } - - case kSlicedStringTag: - return kVisitSlicedString; - - case kExternalStringTag: - return GetVisitorIdForSize(kVisitDataObject, - kVisitDataObjectGeneric, - instance_size); - } - UNREACHABLE(); - } - - switch (instance_type) { - case BYTE_ARRAY_TYPE: - return kVisitByteArray; - - case FREE_SPACE_TYPE: - return kVisitFreeSpace; - - case FIXED_ARRAY_TYPE: - return kVisitFixedArray; - - case FIXED_DOUBLE_ARRAY_TYPE: - return kVisitFixedDoubleArray; - - case CONSTANT_POOL_ARRAY_TYPE: - return kVisitConstantPoolArray; - - case ODDBALL_TYPE: - return kVisitOddball; - - case MAP_TYPE: - return kVisitMap; - - case CODE_TYPE: - return kVisitCode; - - case CELL_TYPE: - return kVisitCell; - - case PROPERTY_CELL_TYPE: - return kVisitPropertyCell; - - case JS_SET_TYPE: - return GetVisitorIdForSize(kVisitStruct, - kVisitStructGeneric, - JSSet::kSize); - - case JS_MAP_TYPE: - return GetVisitorIdForSize(kVisitStruct, - kVisitStructGeneric, - JSMap::kSize); - - case JS_WEAK_MAP_TYPE: - return kVisitJSWeakMap; - - case JS_WEAK_SET_TYPE: - return kVisitJSWeakSet; - - case JS_REGEXP_TYPE: - return kVisitJSRegExp; - - case SHARED_FUNCTION_INFO_TYPE: - return kVisitSharedFunctionInfo; - - case JS_PROXY_TYPE: - return GetVisitorIdForSize(kVisitStruct, - kVisitStructGeneric, - JSProxy::kSize); - - case JS_FUNCTION_PROXY_TYPE: - return GetVisitorIdForSize(kVisitStruct, - kVisitStructGeneric, - JSFunctionProxy::kSize); - - case FOREIGN_TYPE: - return GetVisitorIdForSize(kVisitDataObject, - kVisitDataObjectGeneric, - Foreign::kSize); - - case SYMBOL_TYPE: - return kVisitSymbol; - - case FILLER_TYPE: - return kVisitDataObjectGeneric; - - case JS_ARRAY_BUFFER_TYPE: - return kVisitJSArrayBuffer; - - case JS_TYPED_ARRAY_TYPE: - return kVisitJSTypedArray; - - case JS_DATA_VIEW_TYPE: - return kVisitJSDataView; - - case JS_OBJECT_TYPE: - case JS_CONTEXT_EXTENSION_OBJECT_TYPE: - case JS_GENERATOR_OBJECT_TYPE: - case JS_MODULE_TYPE: - case JS_VALUE_TYPE: - case JS_DATE_TYPE: - case JS_ARRAY_TYPE: - case JS_GLOBAL_PROXY_TYPE: - case JS_GLOBAL_OBJECT_TYPE: - case JS_BUILTINS_OBJECT_TYPE: - case JS_MESSAGE_OBJECT_TYPE: - return GetVisitorIdForSize(kVisitJSObject, - kVisitJSObjectGeneric, - instance_size); - - case JS_FUNCTION_TYPE: - return kVisitJSFunction; - - case HEAP_NUMBER_TYPE: -#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size) \ - case EXTERNAL_##TYPE##_ARRAY_TYPE: - - TYPED_ARRAYS(EXTERNAL_ARRAY_CASE) - return GetVisitorIdForSize(kVisitDataObject, - kVisitDataObjectGeneric, - instance_size); -#undef EXTERNAL_ARRAY_CASE - - case FIXED_UINT8_ARRAY_TYPE: - case FIXED_INT8_ARRAY_TYPE: - case FIXED_UINT16_ARRAY_TYPE: - case FIXED_INT16_ARRAY_TYPE: - case FIXED_UINT32_ARRAY_TYPE: - case FIXED_INT32_ARRAY_TYPE: - case FIXED_FLOAT32_ARRAY_TYPE: - case FIXED_UINT8_CLAMPED_ARRAY_TYPE: - return kVisitFixedTypedArray; - - case FIXED_FLOAT64_ARRAY_TYPE: - return kVisitFixedFloat64Array; - -#define MAKE_STRUCT_CASE(NAME, Name, name) \ - case NAME##_TYPE: - STRUCT_LIST(MAKE_STRUCT_CASE) -#undef MAKE_STRUCT_CASE - if (instance_type == ALLOCATION_SITE_TYPE) { - return kVisitAllocationSite; - } - - return GetVisitorIdForSize(kVisitStruct, - kVisitStructGeneric, - instance_size); - - default: - UNREACHABLE(); - return kVisitorIdCount; - } -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/objects-visiting.h nodejs-0.11.15/deps/v8/src/objects-visiting.h --- nodejs-0.11.13/deps/v8/src/objects-visiting.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/objects-visiting.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,488 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_OBJECTS_VISITING_H_ -#define V8_OBJECTS_VISITING_H_ - -#include "allocation.h" - -// This file provides base classes and auxiliary methods for defining -// static object visitors used during GC. -// Visiting HeapObject body with a normal ObjectVisitor requires performing -// two switches on object's instance type to determine object size and layout -// and one or more virtual method calls on visitor itself. -// Static visitor is different: it provides a dispatch table which contains -// pointers to specialized visit functions. Each map has the visitor_id -// field which contains an index of specialized visitor to use. - -namespace v8 { -namespace internal { - - -// Base class for all static visitors. -class StaticVisitorBase : public AllStatic { - public: -#define VISITOR_ID_LIST(V) \ - V(SeqOneByteString) \ - V(SeqTwoByteString) \ - V(ShortcutCandidate) \ - V(ByteArray) \ - V(FreeSpace) \ - V(FixedArray) \ - V(FixedDoubleArray) \ - V(FixedTypedArray) \ - V(FixedFloat64Array) \ - V(ConstantPoolArray) \ - V(NativeContext) \ - V(AllocationSite) \ - V(DataObject2) \ - V(DataObject3) \ - V(DataObject4) \ - V(DataObject5) \ - V(DataObject6) \ - V(DataObject7) \ - V(DataObject8) \ - V(DataObject9) \ - V(DataObjectGeneric) \ - V(JSObject2) \ - V(JSObject3) \ - V(JSObject4) \ - V(JSObject5) \ - V(JSObject6) \ - V(JSObject7) \ - V(JSObject8) \ - V(JSObject9) \ - V(JSObjectGeneric) \ - V(Struct2) \ - V(Struct3) \ - V(Struct4) \ - V(Struct5) \ - V(Struct6) \ - V(Struct7) \ - V(Struct8) \ - V(Struct9) \ - V(StructGeneric) \ - V(ConsString) \ - V(SlicedString) \ - V(Symbol) \ - V(Oddball) \ - V(Code) \ - V(Map) \ - V(Cell) \ - V(PropertyCell) \ - V(SharedFunctionInfo) \ - V(JSFunction) \ - V(JSWeakMap) \ - V(JSWeakSet) \ - V(JSArrayBuffer) \ - V(JSTypedArray) \ - V(JSDataView) \ - V(JSRegExp) - - // For data objects, JS objects and structs along with generic visitor which - // can visit object of any size we provide visitors specialized by - // object size in words. - // Ids of specialized visitors are declared in a linear order (without - // holes) starting from the id of visitor specialized for 2 words objects - // (base visitor id) and ending with the id of generic visitor. - // Method GetVisitorIdForSize depends on this ordering to calculate visitor - // id of specialized visitor from given instance size, base visitor id and - // generic visitor's id. - enum VisitorId { -#define VISITOR_ID_ENUM_DECL(id) kVisit##id, - VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL) -#undef VISITOR_ID_ENUM_DECL - kVisitorIdCount, - kVisitDataObject = kVisitDataObject2, - kVisitJSObject = kVisitJSObject2, - kVisitStruct = kVisitStruct2, - kMinObjectSizeInWords = 2 - }; - - // Visitor ID should fit in one byte. - STATIC_ASSERT(kVisitorIdCount <= 256); - - // Determine which specialized visitor should be used for given instance type - // and instance type. - static VisitorId GetVisitorId(int instance_type, int instance_size); - - static VisitorId GetVisitorId(Map* map) { - return GetVisitorId(map->instance_type(), map->instance_size()); - } - - // For visitors that allow specialization by size calculate VisitorId based - // on size, base visitor id and generic visitor id. - static VisitorId GetVisitorIdForSize(VisitorId base, - VisitorId generic, - int object_size) { - ASSERT((base == kVisitDataObject) || - (base == kVisitStruct) || - (base == kVisitJSObject)); - ASSERT(IsAligned(object_size, kPointerSize)); - ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size); - ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); - - const VisitorId specialization = static_cast<VisitorId>( - base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords); - - return Min(specialization, generic); - } -}; - - -template<typename Callback> -class VisitorDispatchTable { - public: - void CopyFrom(VisitorDispatchTable* other) { - // We are not using memcpy to guarantee that during update - // every element of callbacks_ array will remain correct - // pointer (memcpy might be implemented as a byte copying loop). - for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) { - NoBarrier_Store(&callbacks_[i], other->callbacks_[i]); - } - } - - inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) { - return reinterpret_cast<Callback>(callbacks_[id]); - } - - inline Callback GetVisitor(Map* map) { - return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]); - } - - void Register(StaticVisitorBase::VisitorId id, Callback callback) { - ASSERT(id < StaticVisitorBase::kVisitorIdCount); // id is unsigned. - callbacks_[id] = reinterpret_cast<AtomicWord>(callback); - } - - template<typename Visitor, - StaticVisitorBase::VisitorId base, - StaticVisitorBase::VisitorId generic, - int object_size_in_words> - void RegisterSpecialization() { - static const int size = object_size_in_words * kPointerSize; - Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size), - &Visitor::template VisitSpecialized<size>); - } - - - template<typename Visitor, - StaticVisitorBase::VisitorId base, - StaticVisitorBase::VisitorId generic> - void RegisterSpecializations() { - STATIC_ASSERT( - (generic - base + StaticVisitorBase::kMinObjectSizeInWords) == 10); - RegisterSpecialization<Visitor, base, generic, 2>(); - RegisterSpecialization<Visitor, base, generic, 3>(); - RegisterSpecialization<Visitor, base, generic, 4>(); - RegisterSpecialization<Visitor, base, generic, 5>(); - RegisterSpecialization<Visitor, base, generic, 6>(); - RegisterSpecialization<Visitor, base, generic, 7>(); - RegisterSpecialization<Visitor, base, generic, 8>(); - RegisterSpecialization<Visitor, base, generic, 9>(); - Register(generic, &Visitor::Visit); - } - - private: - AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount]; -}; - - -template<typename StaticVisitor> -class BodyVisitorBase : public AllStatic { - public: - INLINE(static void IteratePointers(Heap* heap, - HeapObject* object, - int start_offset, - int end_offset)) { - Object** start_slot = reinterpret_cast<Object**>(object->address() + - start_offset); - Object** end_slot = reinterpret_cast<Object**>(object->address() + - end_offset); - StaticVisitor::VisitPointers(heap, start_slot, end_slot); - } -}; - - -template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType> -class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> { - public: - INLINE(static ReturnType Visit(Map* map, HeapObject* object)) { - int object_size = BodyDescriptor::SizeOf(map, object); - BodyVisitorBase<StaticVisitor>::IteratePointers( - map->GetHeap(), - object, - BodyDescriptor::kStartOffset, - object_size); - return static_cast<ReturnType>(object_size); - } - - template<int object_size> - static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) { - ASSERT(BodyDescriptor::SizeOf(map, object) == object_size); - BodyVisitorBase<StaticVisitor>::IteratePointers( - map->GetHeap(), - object, - BodyDescriptor::kStartOffset, - object_size); - return static_cast<ReturnType>(object_size); - } -}; - - -template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType> -class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> { - public: - INLINE(static ReturnType Visit(Map* map, HeapObject* object)) { - BodyVisitorBase<StaticVisitor>::IteratePointers( - map->GetHeap(), - object, - BodyDescriptor::kStartOffset, - BodyDescriptor::kEndOffset); - return static_cast<ReturnType>(BodyDescriptor::kSize); - } -}; - - -// Base class for visitors used for a linear new space iteration. -// IterateBody returns size of visited object. -// Certain types of objects (i.e. Code objects) are not handled -// by dispatch table of this visitor because they cannot appear -// in the new space. -// -// This class is intended to be used in the following way: -// -// class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> { -// ... -// } -// -// This is an example of Curiously recurring template pattern -// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern). -// We use CRTP to guarantee aggressive compile time optimizations (i.e. -// inlining and specialization of StaticVisitor::VisitPointers methods). -template<typename StaticVisitor> -class StaticNewSpaceVisitor : public StaticVisitorBase { - public: - static void Initialize(); - - INLINE(static int IterateBody(Map* map, HeapObject* obj)) { - return table_.GetVisitor(map)(map, obj); - } - - INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { - for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p); - } - - private: - INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) { - Heap* heap = map->GetHeap(); - VisitPointers(heap, - HeapObject::RawField(object, JSFunction::kPropertiesOffset), - HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); - - // Don't visit code entry. We are using this visitor only during scavenges. - - VisitPointers( - heap, - HeapObject::RawField(object, - JSFunction::kCodeEntryOffset + kPointerSize), - HeapObject::RawField(object, - JSFunction::kNonWeakFieldsEndOffset)); - return JSFunction::kSize; - } - - INLINE(static int VisitByteArray(Map* map, HeapObject* object)) { - return reinterpret_cast<ByteArray*>(object)->ByteArraySize(); - } - - INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) { - int length = reinterpret_cast<FixedDoubleArray*>(object)->length(); - return FixedDoubleArray::SizeFor(length); - } - - INLINE(static int VisitFixedTypedArray(Map* map, HeapObject* object)) { - return reinterpret_cast<FixedTypedArrayBase*>(object)->size(); - } - - INLINE(static int VisitJSObject(Map* map, HeapObject* object)) { - return JSObjectVisitor::Visit(map, object); - } - - INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) { - return SeqOneByteString::cast(object)-> - SeqOneByteStringSize(map->instance_type()); - } - - INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) { - return SeqTwoByteString::cast(object)-> - SeqTwoByteStringSize(map->instance_type()); - } - - INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) { - return FreeSpace::cast(object)->Size(); - } - - INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object)); - INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object)); - INLINE(static int VisitJSDataView(Map* map, HeapObject* object)); - - class DataObjectVisitor { - public: - template<int object_size> - static inline int VisitSpecialized(Map* map, HeapObject* object) { - return object_size; - } - - INLINE(static int Visit(Map* map, HeapObject* object)) { - return map->instance_size(); - } - }; - - typedef FlexibleBodyVisitor<StaticVisitor, - StructBodyDescriptor, - int> StructVisitor; - - typedef FlexibleBodyVisitor<StaticVisitor, - JSObject::BodyDescriptor, - int> JSObjectVisitor; - - typedef int (*Callback)(Map* map, HeapObject* object); - - static VisitorDispatchTable<Callback> table_; -}; - - -template<typename StaticVisitor> -VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback> - StaticNewSpaceVisitor<StaticVisitor>::table_; - - -// Base class for visitors used to transitively mark the entire heap. -// IterateBody returns nothing. -// Certain types of objects might not be handled by this base class and -// no visitor function is registered by the generic initialization. A -// specialized visitor function needs to be provided by the inheriting -// class itself for those cases. -// -// This class is intended to be used in the following way: -// -// class SomeVisitor : public StaticMarkingVisitor<SomeVisitor> { -// ... -// } -// -// This is an example of Curiously recurring template pattern. -template<typename StaticVisitor> -class StaticMarkingVisitor : public StaticVisitorBase { - public: - static void Initialize(); - - INLINE(static void IterateBody(Map* map, HeapObject* obj)) { - table_.GetVisitor(map)(map, obj); - } - - INLINE(static void VisitPropertyCell(Map* map, HeapObject* object)); - INLINE(static void VisitAllocationSite(Map* map, HeapObject* object)); - INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address)); - INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo)); - INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo)); - INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo)); - INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo)); - INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo)); - INLINE(static void VisitExternalReference(RelocInfo* rinfo)) { } - INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) { } - // Skip the weak next code link in a code object. - INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) { } - - // TODO(mstarzinger): This should be made protected once refactoring is done. - // Mark non-optimize code for functions inlined into the given optimized - // code. This will prevent it from being flushed. - static void MarkInlinedFunctionsCode(Heap* heap, Code* code); - - protected: - INLINE(static void VisitMap(Map* map, HeapObject* object)); - INLINE(static void VisitCode(Map* map, HeapObject* object)); - INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object)); - INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object)); - INLINE(static void VisitJSFunction(Map* map, HeapObject* object)); - INLINE(static void VisitJSRegExp(Map* map, HeapObject* object)); - INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object)); - INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object)); - INLINE(static void VisitJSDataView(Map* map, HeapObject* object)); - INLINE(static void VisitNativeContext(Map* map, HeapObject* object)); - - // Mark pointers in a Map and its TransitionArray together, possibly - // treating transitions or back pointers weak. - static void MarkMapContents(Heap* heap, Map* map); - static void MarkTransitionArray(Heap* heap, TransitionArray* transitions); - - // Code flushing support. - INLINE(static bool IsFlushable(Heap* heap, JSFunction* function)); - INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info)); - - // Helpers used by code flushing support that visit pointer fields and treat - // references to code objects either strongly or weakly. - static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object); - static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object); - static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object); - static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object); - - class DataObjectVisitor { - public: - template<int size> - static inline void VisitSpecialized(Map* map, HeapObject* object) { - } - - INLINE(static void Visit(Map* map, HeapObject* object)) { - } - }; - - typedef FlexibleBodyVisitor<StaticVisitor, - FixedArray::BodyDescriptor, - void> FixedArrayVisitor; - - typedef FlexibleBodyVisitor<StaticVisitor, - JSObject::BodyDescriptor, - void> JSObjectVisitor; - - typedef FlexibleBodyVisitor<StaticVisitor, - StructBodyDescriptor, - void> StructObjectVisitor; - - typedef void (*Callback)(Map* map, HeapObject* object); - - static VisitorDispatchTable<Callback> table_; -}; - - -template<typename StaticVisitor> -VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback> - StaticMarkingVisitor<StaticVisitor>::table_; - - -} } // namespace v8::internal - -#endif // V8_OBJECTS_VISITING_H_ diff -Nru nodejs-0.11.13/deps/v8/src/objects-visiting-inl.h nodejs-0.11.15/deps/v8/src/objects-visiting-inl.h --- nodejs-0.11.13/deps/v8/src/objects-visiting-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/objects-visiting-inl.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,954 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_OBJECTS_VISITING_INL_H_ -#define V8_OBJECTS_VISITING_INL_H_ - - -namespace v8 { -namespace internal { - -template<typename StaticVisitor> -void StaticNewSpaceVisitor<StaticVisitor>::Initialize() { - table_.Register(kVisitShortcutCandidate, - &FixedBodyVisitor<StaticVisitor, - ConsString::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitConsString, - &FixedBodyVisitor<StaticVisitor, - ConsString::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitSlicedString, - &FixedBodyVisitor<StaticVisitor, - SlicedString::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitSymbol, - &FixedBodyVisitor<StaticVisitor, - Symbol::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitFixedArray, - &FlexibleBodyVisitor<StaticVisitor, - FixedArray::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray); - table_.Register(kVisitFixedTypedArray, &VisitFixedTypedArray); - table_.Register(kVisitFixedFloat64Array, &VisitFixedTypedArray); - - table_.Register(kVisitNativeContext, - &FixedBodyVisitor<StaticVisitor, - Context::ScavengeBodyDescriptor, - int>::Visit); - - table_.Register(kVisitByteArray, &VisitByteArray); - - table_.Register(kVisitSharedFunctionInfo, - &FixedBodyVisitor<StaticVisitor, - SharedFunctionInfo::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString); - - table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString); - - table_.Register(kVisitJSFunction, &VisitJSFunction); - - table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer); - - table_.Register(kVisitJSTypedArray, &VisitJSTypedArray); - - table_.Register(kVisitJSDataView, &VisitJSDataView); - - table_.Register(kVisitFreeSpace, &VisitFreeSpace); - - table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit); - - table_.Register(kVisitJSWeakSet, &JSObjectVisitor::Visit); - - table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit); - - table_.template RegisterSpecializations<DataObjectVisitor, - kVisitDataObject, - kVisitDataObjectGeneric>(); - - table_.template RegisterSpecializations<JSObjectVisitor, - kVisitJSObject, - kVisitJSObjectGeneric>(); - table_.template RegisterSpecializations<StructVisitor, - kVisitStruct, - kVisitStructGeneric>(); -} - - -template<typename StaticVisitor> -int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer( - Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - - STATIC_ASSERT( - JSArrayBuffer::kWeakFirstViewOffset == - JSArrayBuffer::kWeakNextOffset + kPointerSize); - VisitPointers( - heap, - HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset), - HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset)); - VisitPointers( - heap, - HeapObject::RawField(object, - JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize), - HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields)); - return JSArrayBuffer::kSizeWithInternalFields; -} - - -template<typename StaticVisitor> -int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray( - Map* map, HeapObject* object) { - VisitPointers( - map->GetHeap(), - HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset), - HeapObject::RawField(object, JSTypedArray::kWeakNextOffset)); - VisitPointers( - map->GetHeap(), - HeapObject::RawField(object, - JSTypedArray::kWeakNextOffset + kPointerSize), - HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields)); - return JSTypedArray::kSizeWithInternalFields; -} - - -template<typename StaticVisitor> -int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView( - Map* map, HeapObject* object) { - VisitPointers( - map->GetHeap(), - HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset), - HeapObject::RawField(object, JSDataView::kWeakNextOffset)); - VisitPointers( - map->GetHeap(), - HeapObject::RawField(object, - JSDataView::kWeakNextOffset + kPointerSize), - HeapObject::RawField(object, JSDataView::kSizeWithInternalFields)); - return JSDataView::kSizeWithInternalFields; -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::Initialize() { - table_.Register(kVisitShortcutCandidate, - &FixedBodyVisitor<StaticVisitor, - ConsString::BodyDescriptor, - void>::Visit); - - table_.Register(kVisitConsString, - &FixedBodyVisitor<StaticVisitor, - ConsString::BodyDescriptor, - void>::Visit); - - table_.Register(kVisitSlicedString, - &FixedBodyVisitor<StaticVisitor, - SlicedString::BodyDescriptor, - void>::Visit); - - table_.Register(kVisitSymbol, - &FixedBodyVisitor<StaticVisitor, - Symbol::BodyDescriptor, - void>::Visit); - - table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit); - - table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit); - - table_.Register(kVisitFixedTypedArray, &DataObjectVisitor::Visit); - - table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit); - - table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray); - - table_.Register(kVisitNativeContext, &VisitNativeContext); - - table_.Register(kVisitAllocationSite, &VisitAllocationSite); - - table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); - - table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit); - - table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit); - - table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); - - table_.Register(kVisitJSWeakMap, &StaticVisitor::VisitWeakCollection); - - table_.Register(kVisitJSWeakSet, &StaticVisitor::VisitWeakCollection); - - table_.Register(kVisitOddball, - &FixedBodyVisitor<StaticVisitor, - Oddball::BodyDescriptor, - void>::Visit); - - table_.Register(kVisitMap, &VisitMap); - - table_.Register(kVisitCode, &VisitCode); - - table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo); - - table_.Register(kVisitJSFunction, &VisitJSFunction); - - table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer); - - table_.Register(kVisitJSTypedArray, &VisitJSTypedArray); - - table_.Register(kVisitJSDataView, &VisitJSDataView); - - // Registration for kVisitJSRegExp is done by StaticVisitor. - - table_.Register(kVisitCell, - &FixedBodyVisitor<StaticVisitor, - Cell::BodyDescriptor, - void>::Visit); - - table_.Register(kVisitPropertyCell, &VisitPropertyCell); - - table_.template RegisterSpecializations<DataObjectVisitor, - kVisitDataObject, - kVisitDataObjectGeneric>(); - - table_.template RegisterSpecializations<JSObjectVisitor, - kVisitJSObject, - kVisitJSObjectGeneric>(); - - table_.template RegisterSpecializations<StructObjectVisitor, - kVisitStruct, - kVisitStructGeneric>(); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry( - Heap* heap, Address entry_address) { - Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); - heap->mark_compact_collector()->RecordCodeEntrySlot(entry_address, code); - StaticVisitor::MarkObject(heap, code); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer( - Heap* heap, RelocInfo* rinfo) { - ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); - ASSERT(!rinfo->target_object()->IsConsString()); - HeapObject* object = HeapObject::cast(rinfo->target_object()); - heap->mark_compact_collector()->RecordRelocSlot(rinfo, object); - // TODO(ulan): It could be better to record slots only for strongly embedded - // objects here and record slots for weakly embedded object during clearing - // of non-live references in mark-compact. - if (!rinfo->host()->IsWeakObject(object)) { - StaticVisitor::MarkObject(heap, object); - } -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitCell( - Heap* heap, RelocInfo* rinfo) { - ASSERT(rinfo->rmode() == RelocInfo::CELL); - Cell* cell = rinfo->target_cell(); - // No need to record slots because the cell space is not compacted during GC. - if (!rinfo->host()->IsWeakObject(cell)) { - StaticVisitor::MarkObject(heap, cell); - } -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget( - Heap* heap, RelocInfo* rinfo) { - ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && - rinfo->IsPatchedReturnSequence()) || - (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && - rinfo->IsPatchedDebugBreakSlotSequence())); - Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); - heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); - StaticVisitor::MarkObject(heap, target); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget( - Heap* heap, RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); - Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); - // Monomorphic ICs are preserved when possible, but need to be flushed - // when they might be keeping a Context alive, or when the heap is about - // to be serialized. - if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() - && (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC || - target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() || - Serializer::enabled() || target->ic_age() != heap->global_ic_age())) { - IC::Clear(target->GetIsolate(), rinfo->pc(), - rinfo->host()->constant_pool()); - target = Code::GetCodeFromTargetAddress(rinfo->target_address()); - } - heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); - StaticVisitor::MarkObject(heap, target); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence( - Heap* heap, RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); - Code* target = rinfo->code_age_stub(); - ASSERT(target != NULL); - heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); - StaticVisitor::MarkObject(heap, target); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext( - Map* map, HeapObject* object) { - FixedBodyVisitor<StaticVisitor, - Context::MarkCompactBodyDescriptor, - void>::Visit(map, object); - - MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); - for (int idx = Context::FIRST_WEAK_SLOT; - idx < Context::NATIVE_CONTEXT_SLOTS; - ++idx) { - Object** slot = Context::cast(object)->RawFieldOfElementAt(idx); - collector->RecordSlot(slot, slot, *slot); - } -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitMap( - Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - Map* map_object = Map::cast(object); - - // Clears the cache of ICs related to this map. - if (FLAG_cleanup_code_caches_at_gc) { - map_object->ClearCodeCache(heap); - } - - // When map collection is enabled we have to mark through map's transitions - // and back pointers in a special way to make these links weak. - if (FLAG_collect_maps && map_object->CanTransition()) { - MarkMapContents(heap, map_object); - } else { - StaticVisitor::VisitPointers(heap, - HeapObject::RawField(object, Map::kPointerFieldsBeginOffset), - HeapObject::RawField(object, Map::kPointerFieldsEndOffset)); - } -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell( - Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - - Object** slot = - HeapObject::RawField(object, PropertyCell::kDependentCodeOffset); - if (FLAG_collect_maps) { - // Mark property cell dependent codes array but do not push it onto marking - // stack, this will make references from it weak. We will clean dead - // codes when we iterate over property cells in ClearNonLiveReferences. - HeapObject* obj = HeapObject::cast(*slot); - heap->mark_compact_collector()->RecordSlot(slot, slot, obj); - StaticVisitor::MarkObjectWithoutPush(heap, obj); - } else { - StaticVisitor::VisitPointer(heap, slot); - } - - StaticVisitor::VisitPointers(heap, - HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset), - HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset)); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite( - Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - - Object** slot = - HeapObject::RawField(object, AllocationSite::kDependentCodeOffset); - if (FLAG_collect_maps) { - // Mark allocation site dependent codes array but do not push it onto - // marking stack, this will make references from it weak. We will clean - // dead codes when we iterate over allocation sites in - // ClearNonLiveReferences. - HeapObject* obj = HeapObject::cast(*slot); - heap->mark_compact_collector()->RecordSlot(slot, slot, obj); - StaticVisitor::MarkObjectWithoutPush(heap, obj); - } else { - StaticVisitor::VisitPointer(heap, slot); - } - - StaticVisitor::VisitPointers(heap, - HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset), - HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset)); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitCode( - Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - Code* code = Code::cast(object); - if (FLAG_cleanup_code_caches_at_gc) { - code->ClearTypeFeedbackInfo(heap); - } - if (FLAG_age_code && !Serializer::enabled()) { - code->MakeOlder(heap->mark_compact_collector()->marking_parity()); - } - code->CodeIterateBody<StaticVisitor>(heap); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo( - Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - SharedFunctionInfo* shared = SharedFunctionInfo::cast(object); - if (shared->ic_age() != heap->global_ic_age()) { - shared->ResetForNewContext(heap->global_ic_age()); - } - if (FLAG_cache_optimized_code && - FLAG_flush_optimized_code_cache && - !shared->optimized_code_map()->IsSmi()) { - // Always flush the optimized code map if requested by flag. - shared->ClearOptimizedCodeMap(); - } - MarkCompactCollector* collector = heap->mark_compact_collector(); - if (collector->is_code_flushing_enabled()) { - if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) { - // Add the shared function info holding an optimized code map to - // the code flusher for processing of code maps after marking. - collector->code_flusher()->AddOptimizedCodeMap(shared); - // Treat all references within the code map weakly by marking the - // code map itself but not pushing it onto the marking deque. - FixedArray* code_map = FixedArray::cast(shared->optimized_code_map()); - StaticVisitor::MarkObjectWithoutPush(heap, code_map); - } - if (IsFlushable(heap, shared)) { - // This function's code looks flushable. But we have to postpone - // the decision until we see all functions that point to the same - // SharedFunctionInfo because some of them might be optimized. - // That would also make the non-optimized version of the code - // non-flushable, because it is required for bailing out from - // optimized code. - collector->code_flusher()->AddCandidate(shared); - // Treat the reference to the code object weakly. - VisitSharedFunctionInfoWeakCode(heap, object); - return; - } - } else { - if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) { - // Flush optimized code map on major GCs without code flushing, - // needed because cached code doesn't contain breakpoints. - shared->ClearOptimizedCodeMap(); - } - } - VisitSharedFunctionInfoStrongCode(heap, object); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray( - Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object); - for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) { - int index = constant_pool->first_code_ptr_index() + i; - Address code_entry = - reinterpret_cast<Address>(constant_pool->RawFieldOfElementAt(index)); - StaticVisitor::VisitCodeEntry(heap, code_entry); - } - for (int i = 0; i < constant_pool->count_of_heap_ptr_entries(); i++) { - int index = constant_pool->first_heap_ptr_index() + i; - StaticVisitor::VisitPointer(heap, - constant_pool->RawFieldOfElementAt(index)); - } -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction( - Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - JSFunction* function = JSFunction::cast(object); - MarkCompactCollector* collector = heap->mark_compact_collector(); - if (collector->is_code_flushing_enabled()) { - if (IsFlushable(heap, function)) { - // This function's code looks flushable. But we have to postpone - // the decision until we see all functions that point to the same - // SharedFunctionInfo because some of them might be optimized. - // That would also make the non-optimized version of the code - // non-flushable, because it is required for bailing out from - // optimized code. - collector->code_flusher()->AddCandidate(function); - // Visit shared function info immediately to avoid double checking - // of its flushability later. This is just an optimization because - // the shared function info would eventually be visited. - SharedFunctionInfo* shared = function->shared(); - if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) { - StaticVisitor::MarkObject(heap, shared->map()); - VisitSharedFunctionInfoWeakCode(heap, shared); - } - // Treat the reference to the code object weakly. - VisitJSFunctionWeakCode(heap, object); - return; - } else { - // Visit all unoptimized code objects to prevent flushing them. - StaticVisitor::MarkObject(heap, function->shared()->code()); - if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) { - MarkInlinedFunctionsCode(heap, function->code()); - } - } - } - VisitJSFunctionStrongCode(heap, object); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp( - Map* map, HeapObject* object) { - int last_property_offset = - JSRegExp::kSize + kPointerSize * map->inobject_properties(); - StaticVisitor::VisitPointers(map->GetHeap(), - HeapObject::RawField(object, JSRegExp::kPropertiesOffset), - HeapObject::RawField(object, last_property_offset)); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer( - Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - - STATIC_ASSERT( - JSArrayBuffer::kWeakFirstViewOffset == - JSArrayBuffer::kWeakNextOffset + kPointerSize); - StaticVisitor::VisitPointers( - heap, - HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset), - HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset)); - StaticVisitor::VisitPointers( - heap, - HeapObject::RawField(object, - JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize), - HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields)); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray( - Map* map, HeapObject* object) { - StaticVisitor::VisitPointers( - map->GetHeap(), - HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset), - HeapObject::RawField(object, JSTypedArray::kWeakNextOffset)); - StaticVisitor::VisitPointers( - map->GetHeap(), - HeapObject::RawField(object, - JSTypedArray::kWeakNextOffset + kPointerSize), - HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields)); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView( - Map* map, HeapObject* object) { - StaticVisitor::VisitPointers( - map->GetHeap(), - HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset), - HeapObject::RawField(object, JSDataView::kWeakNextOffset)); - StaticVisitor::VisitPointers( - map->GetHeap(), - HeapObject::RawField(object, - JSDataView::kWeakNextOffset + kPointerSize), - HeapObject::RawField(object, JSDataView::kSizeWithInternalFields)); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::MarkMapContents( - Heap* heap, Map* map) { - // Make sure that the back pointer stored either in the map itself or - // inside its transitions array is marked. Skip recording the back - // pointer slot since map space is not compacted. - StaticVisitor::MarkObject(heap, HeapObject::cast(map->GetBackPointer())); - - // Treat pointers in the transitions array as weak and also mark that - // array to prevent visiting it later. Skip recording the transition - // array slot, since it will be implicitly recorded when the pointer - // fields of this map are visited. - TransitionArray* transitions = map->unchecked_transition_array(); - if (transitions->IsTransitionArray()) { - MarkTransitionArray(heap, transitions); - } else { - // Already marked by marking map->GetBackPointer() above. - ASSERT(transitions->IsMap() || transitions->IsUndefined()); - } - - // Since descriptor arrays are potentially shared, ensure that only the - // descriptors that belong to this map are marked. The first time a - // non-empty descriptor array is marked, its header is also visited. The slot - // holding the descriptor array will be implicitly recorded when the pointer - // fields of this map are visited. - DescriptorArray* descriptors = map->instance_descriptors(); - if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) && - descriptors->length() > 0) { - StaticVisitor::VisitPointers(heap, - descriptors->GetFirstElementAddress(), - descriptors->GetDescriptorEndSlot(0)); - } - int start = 0; - int end = map->NumberOfOwnDescriptors(); - if (start < end) { - StaticVisitor::VisitPointers(heap, - descriptors->GetDescriptorStartSlot(start), - descriptors->GetDescriptorEndSlot(end)); - } - - // Mark prototype dependent codes array but do not push it onto marking - // stack, this will make references from it weak. We will clean dead - // codes when we iterate over maps in ClearNonLiveTransitions. - Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset); - HeapObject* obj = HeapObject::cast(*slot); - heap->mark_compact_collector()->RecordSlot(slot, slot, obj); - StaticVisitor::MarkObjectWithoutPush(heap, obj); - - // Mark the pointer fields of the Map. Since the transitions array has - // been marked already, it is fine that one of these fields contains a - // pointer to it. - StaticVisitor::VisitPointers(heap, - HeapObject::RawField(map, Map::kPointerFieldsBeginOffset), - HeapObject::RawField(map, Map::kPointerFieldsEndOffset)); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray( - Heap* heap, TransitionArray* transitions) { - if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return; - - // Simple transitions do not have keys nor prototype transitions. - if (transitions->IsSimpleTransition()) return; - - if (transitions->HasPrototypeTransitions()) { - // Mark prototype transitions array but do not push it onto marking - // stack, this will make references from it weak. We will clean dead - // prototype transitions in ClearNonLiveTransitions. - Object** slot = transitions->GetPrototypeTransitionsSlot(); - HeapObject* obj = HeapObject::cast(*slot); - heap->mark_compact_collector()->RecordSlot(slot, slot, obj); - StaticVisitor::MarkObjectWithoutPush(heap, obj); - } - - for (int i = 0; i < transitions->number_of_transitions(); ++i) { - StaticVisitor::VisitPointer(heap, transitions->GetKeySlot(i)); - } -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode( - Heap* heap, Code* code) { - // For optimized functions we should retain both non-optimized version - // of its code and non-optimized version of all inlined functions. - // This is required to support bailing out from inlined code. - DeoptimizationInputData* data = - DeoptimizationInputData::cast(code->deoptimization_data()); - FixedArray* literals = data->LiteralArray(); - for (int i = 0, count = data->InlinedFunctionCount()->value(); - i < count; - i++) { - JSFunction* inlined = JSFunction::cast(literals->get(i)); - StaticVisitor::MarkObject(heap, inlined->shared()->code()); - } -} - - -inline static bool IsValidNonBuiltinContext(Object* context) { - return context->IsContext() && - !Context::cast(context)->global_object()->IsJSBuiltinsObject(); -} - - -inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) { - Object* undefined = heap->undefined_value(); - return (info->script() != undefined) && - (reinterpret_cast<Script*>(info->script())->source() != undefined); -} - - -template<typename StaticVisitor> -bool StaticMarkingVisitor<StaticVisitor>::IsFlushable( - Heap* heap, JSFunction* function) { - SharedFunctionInfo* shared_info = function->shared(); - - // Code is either on stack, in compilation cache or referenced - // by optimized version of function. - MarkBit code_mark = Marking::MarkBitFrom(function->code()); - if (code_mark.Get()) { - return false; - } - - // The function must have a valid context and not be a builtin. - if (!IsValidNonBuiltinContext(function->context())) { - return false; - } - - // We do not (yet) flush code for optimized functions. - if (function->code() != shared_info->code()) { - return false; - } - - // Check age of optimized code. - if (FLAG_age_code && !function->code()->IsOld()) { - return false; - } - - return IsFlushable(heap, shared_info); -} - - -template<typename StaticVisitor> -bool StaticMarkingVisitor<StaticVisitor>::IsFlushable( - Heap* heap, SharedFunctionInfo* shared_info) { - // Code is either on stack, in compilation cache or referenced - // by optimized version of function. - MarkBit code_mark = Marking::MarkBitFrom(shared_info->code()); - if (code_mark.Get()) { - return false; - } - - // The function must be compiled and have the source code available, - // to be able to recompile it in case we need the function again. - if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) { - return false; - } - - // We never flush code for API functions. - Object* function_data = shared_info->function_data(); - if (function_data->IsFunctionTemplateInfo()) { - return false; - } - - // Only flush code for functions. - if (shared_info->code()->kind() != Code::FUNCTION) { - return false; - } - - // Function must be lazy compilable. - if (!shared_info->allows_lazy_compilation()) { - return false; - } - - // We do not (yet?) flush code for generator functions, because we don't know - // if there are still live activations (generator objects) on the heap. - if (shared_info->is_generator()) { - return false; - } - - // If this is a full script wrapped in a function we do not flush the code. - if (shared_info->is_toplevel()) { - return false; - } - - // If this is a function initialized with %SetCode then the one-to-one - // relation between SharedFunctionInfo and Code is broken. - if (shared_info->dont_flush()) { - return false; - } - - // Check age of code. If code aging is disabled we never flush. - if (!FLAG_age_code || !shared_info->code()->IsOld()) { - return false; - } - - return true; -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode( - Heap* heap, HeapObject* object) { - StaticVisitor::BeforeVisitingSharedFunctionInfo(object); - Object** start_slot = - HeapObject::RawField(object, - SharedFunctionInfo::BodyDescriptor::kStartOffset); - Object** end_slot = - HeapObject::RawField(object, - SharedFunctionInfo::BodyDescriptor::kEndOffset); - StaticVisitor::VisitPointers(heap, start_slot, end_slot); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode( - Heap* heap, HeapObject* object) { - StaticVisitor::BeforeVisitingSharedFunctionInfo(object); - Object** name_slot = - HeapObject::RawField(object, SharedFunctionInfo::kNameOffset); - StaticVisitor::VisitPointer(heap, name_slot); - - // Skip visiting kCodeOffset as it is treated weakly here. - STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize == - SharedFunctionInfo::kCodeOffset); - STATIC_ASSERT(SharedFunctionInfo::kCodeOffset + kPointerSize == - SharedFunctionInfo::kOptimizedCodeMapOffset); - - Object** start_slot = - HeapObject::RawField(object, - SharedFunctionInfo::kOptimizedCodeMapOffset); - Object** end_slot = - HeapObject::RawField(object, - SharedFunctionInfo::BodyDescriptor::kEndOffset); - StaticVisitor::VisitPointers(heap, start_slot, end_slot); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode( - Heap* heap, HeapObject* object) { - Object** start_slot = - HeapObject::RawField(object, JSFunction::kPropertiesOffset); - Object** end_slot = - HeapObject::RawField(object, JSFunction::kCodeEntryOffset); - StaticVisitor::VisitPointers(heap, start_slot, end_slot); - - VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); - STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize == - JSFunction::kPrototypeOrInitialMapOffset); - - start_slot = - HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset); - end_slot = - HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset); - StaticVisitor::VisitPointers(heap, start_slot, end_slot); -} - - -template<typename StaticVisitor> -void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode( - Heap* heap, HeapObject* object) { - Object** start_slot = - HeapObject::RawField(object, JSFunction::kPropertiesOffset); - Object** end_slot = - HeapObject::RawField(object, JSFunction::kCodeEntryOffset); - StaticVisitor::VisitPointers(heap, start_slot, end_slot); - - // Skip visiting kCodeEntryOffset as it is treated weakly here. - STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize == - JSFunction::kPrototypeOrInitialMapOffset); - - start_slot = - HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset); - end_slot = - HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset); - StaticVisitor::VisitPointers(heap, start_slot, end_slot); -} - - -void Code::CodeIterateBody(ObjectVisitor* v) { - int mode_mask = RelocInfo::kCodeTargetMask | - RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | - RelocInfo::ModeMask(RelocInfo::CELL) | - RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | - RelocInfo::ModeMask(RelocInfo::JS_RETURN) | - RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | - RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); - - // There are two places where we iterate code bodies: here and the - // templated CodeIterateBody (below). They should be kept in sync. - IteratePointer(v, kRelocationInfoOffset); - IteratePointer(v, kHandlerTableOffset); - IteratePointer(v, kDeoptimizationDataOffset); - IteratePointer(v, kTypeFeedbackInfoOffset); - IterateNextCodeLink(v, kNextCodeLinkOffset); - IteratePointer(v, kConstantPoolOffset); - - RelocIterator it(this, mode_mask); - Isolate* isolate = this->GetIsolate(); - for (; !it.done(); it.next()) { - it.rinfo()->Visit(isolate, v); - } -} - - -template<typename StaticVisitor> -void Code::CodeIterateBody(Heap* heap) { - int mode_mask = RelocInfo::kCodeTargetMask | - RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | - RelocInfo::ModeMask(RelocInfo::CELL) | - RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | - RelocInfo::ModeMask(RelocInfo::JS_RETURN) | - RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | - RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); - - // There are two places where we iterate code bodies: here and the non- - // templated CodeIterateBody (above). They should be kept in sync. - StaticVisitor::VisitPointer( - heap, - reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset)); - StaticVisitor::VisitPointer( - heap, - reinterpret_cast<Object**>(this->address() + kHandlerTableOffset)); - StaticVisitor::VisitPointer( - heap, - reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset)); - StaticVisitor::VisitPointer( - heap, - reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset)); - StaticVisitor::VisitNextCodeLink( - heap, - reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset)); - StaticVisitor::VisitPointer( - heap, - reinterpret_cast<Object**>(this->address() + kConstantPoolOffset)); - - - RelocIterator it(this, mode_mask); - for (; !it.done(); it.next()) { - it.rinfo()->template Visit<StaticVisitor>(heap); - } -} - - -} } // namespace v8::internal - -#endif // V8_OBJECTS_VISITING_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/once.cc nodejs-0.11.15/deps/v8/src/once.cc --- nodejs-0.11.13/deps/v8/src/once.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/once.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,77 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "once.h" - -#ifdef _WIN32 -#include <windows.h> -#else -#include <sched.h> -#endif - -#include "atomicops.h" -#include "checks.h" - -namespace v8 { -namespace internal { - -void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) { - AtomicWord state = Acquire_Load(once); - // Fast path. The provided function was already executed. - if (state == ONCE_STATE_DONE) { - return; - } - - // The function execution did not complete yet. The once object can be in one - // of the two following states: - // - UNINITIALIZED: We are the first thread calling this function. - // - EXECUTING_FUNCTION: Another thread is already executing the function. - // - // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION - // atomically. - state = Acquire_CompareAndSwap( - once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION); - if (state == ONCE_STATE_UNINITIALIZED) { - // We are the first thread to call this function, so we have to call the - // function. - init_func(arg); - Release_Store(once, ONCE_STATE_DONE); - } else { - // Another thread has already started executing the function. We need to - // wait until it completes the initialization. - while (state == ONCE_STATE_EXECUTING_FUNCTION) { -#ifdef _WIN32 - ::Sleep(0); -#else - sched_yield(); -#endif - state = Acquire_Load(once); - } - } -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/once.h nodejs-0.11.15/deps/v8/src/once.h --- nodejs-0.11.13/deps/v8/src/once.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/once.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,123 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// emulates google3/base/once.h -// -// This header is intended to be included only by v8's internal code. Users -// should not use this directly. -// -// This is basically a portable version of pthread_once(). -// -// This header declares: -// * A type called OnceType. -// * A macro V8_DECLARE_ONCE() which declares a (global) variable of type -// OnceType. -// * A function CallOnce(OnceType* once, void (*init_func)()). -// This function, when invoked multiple times given the same OnceType object, -// will invoke init_func on the first call only, and will make sure none of -// the calls return before that first call to init_func has finished. -// -// Additionally, the following features are supported: -// * A macro V8_ONCE_INIT which is expanded into the expression used to -// initialize a OnceType. This is only useful when clients embed a OnceType -// into a structure of their own and want to initialize it statically. -// * The user can provide a parameter which CallOnce() forwards to the -// user-provided function when it is called. Usage example: -// CallOnce(&my_once, &MyFunctionExpectingIntArgument, 10); -// * This implementation guarantees that OnceType is a POD (i.e. no static -// initializer generated). -// -// This implements a way to perform lazy initialization. It's more efficient -// than using mutexes as no lock is needed if initialization has already -// happened. -// -// Example usage: -// void Init(); -// V8_DECLARE_ONCE(once_init); -// -// // Calls Init() exactly once. -// void InitOnce() { -// CallOnce(&once_init, &Init); -// } -// -// Note that if CallOnce() is called before main() has begun, it must -// only be called by the thread that will eventually call main() -- that is, -// the thread that performs dynamic initialization. In general this is a safe -// assumption since people don't usually construct threads before main() starts, -// but it is technically not guaranteed. Unfortunately, Win32 provides no way -// whatsoever to statically-initialize its synchronization primitives, so our -// only choice is to assume that dynamic initialization is single-threaded. - -#ifndef V8_ONCE_H_ -#define V8_ONCE_H_ - -#include "atomicops.h" - -namespace v8 { -namespace internal { - -typedef AtomicWord OnceType; - -#define V8_ONCE_INIT 0 - -#define V8_DECLARE_ONCE(NAME) ::v8::internal::OnceType NAME - -enum { - ONCE_STATE_UNINITIALIZED = 0, - ONCE_STATE_EXECUTING_FUNCTION = 1, - ONCE_STATE_DONE = 2 -}; - -typedef void (*NoArgFunction)(); -typedef void (*PointerArgFunction)(void* arg); - -template <typename T> -struct OneArgFunction { - typedef void (*type)(T); -}; - -void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg); - -inline void CallOnce(OnceType* once, NoArgFunction init_func) { - if (Acquire_Load(once) != ONCE_STATE_DONE) { - CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL); - } -} - - -template <typename Arg> -inline void CallOnce(OnceType* once, - typename OneArgFunction<Arg*>::type init_func, Arg* arg) { - if (Acquire_Load(once) != ONCE_STATE_DONE) { - CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), - static_cast<void*>(arg)); - } -} - -} } // namespace v8::internal - -#endif // V8_ONCE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/optimizing-compiler-thread.cc nodejs-0.11.15/deps/v8/src/optimizing-compiler-thread.cc --- nodejs-0.11.13/deps/v8/src/optimizing-compiler-thread.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/optimizing-compiler-thread.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,22 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "optimizing-compiler-thread.h" - -#include "v8.h" - -#include "full-codegen.h" -#include "hydrogen.h" -#include "isolate.h" -#include "v8threads.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/optimizing-compiler-thread.h" + +#include "src/v8.h" + +#include "src/base/atomicops.h" +#include "src/full-codegen.h" +#include "src/hydrogen.h" +#include "src/isolate.h" +#include "src/v8threads.h" namespace v8 { namespace internal { OptimizingCompilerThread::~OptimizingCompilerThread() { - ASSERT_EQ(0, input_queue_length_); + DCHECK_EQ(0, input_queue_length_); DeleteArray(input_queue_); if (FLAG_concurrent_osr) { #ifdef DEBUG @@ -53,7 +31,7 @@ void OptimizingCompilerThread::Run() { #ifdef DEBUG - { LockGuard<Mutex> lock_guard(&thread_id_mutex_); + { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); thread_id_ = ThreadId::Current().ToInteger(); } #endif @@ -62,19 +40,18 @@ DisallowHandleAllocation no_handles; DisallowHandleDereference no_deref; - ElapsedTimer total_timer; + base::ElapsedTimer total_timer; if (FLAG_trace_concurrent_recompilation) total_timer.Start(); while (true) { input_queue_semaphore_.Wait(); - Logger::TimerEventScope timer( - isolate_, Logger::TimerEventScope::v8_recompile_concurrent); + TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); if (FLAG_concurrent_recompilation_delay != 0) { - OS::Sleep(FLAG_concurrent_recompilation_delay); + base::OS::Sleep(FLAG_concurrent_recompilation_delay); } - switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) { + switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) { case CONTINUE: break; case STOP: @@ -88,13 +65,14 @@ { AllowHandleDereference allow_handle_dereference; FlushInputQueue(true); } - Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); + base::Release_Store(&stop_thread_, + static_cast<base::AtomicWord>(CONTINUE)); stop_semaphore_.Signal(); // Return to start of consumer loop. continue; } - ElapsedTimer compiling_timer; + base::ElapsedTimer compiling_timer; if (FLAG_trace_concurrent_recompilation) compiling_timer.Start(); CompileNext(); @@ -107,10 +85,10 @@ OptimizedCompileJob* OptimizingCompilerThread::NextInput() { - LockGuard<Mutex> access_input_queue_(&input_queue_mutex_); + base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); if (input_queue_length_ == 0) return NULL; OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; - ASSERT_NE(NULL, job); + DCHECK_NE(NULL, job); input_queue_shift_ = InputQueueIndex(1); input_queue_length_--; return job; @@ -119,12 +97,12 @@ void OptimizingCompilerThread::CompileNext() { OptimizedCompileJob* job = NextInput(); - ASSERT_NE(NULL, job); + DCHECK_NE(NULL, job); // The function may have already been optimized by OSR. Simply continue. OptimizedCompileJob::Status status = job->OptimizeGraph(); USE(status); // Prevent an unused-variable error in release mode. - ASSERT(status != OptimizedCompileJob::FAILED); + DCHECK(status != OptimizedCompileJob::FAILED); // The function may have already been optimized by OSR. Simply continue. // Use a mutex to make sure that functions marked for install @@ -191,8 +169,8 @@ void OptimizingCompilerThread::Flush() { - ASSERT(!IsOptimizerThread()); - Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); + DCHECK(!IsOptimizerThread()); + base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); if (FLAG_block_concurrent_recompilation) Unblock(); input_queue_semaphore_.Signal(); stop_semaphore_.Wait(); @@ -205,8 +183,8 @@ void OptimizingCompilerThread::Stop() { - ASSERT(!IsOptimizerThread()); - Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP)); + DCHECK(!IsOptimizerThread()); + base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); if (FLAG_block_concurrent_recompilation) Unblock(); input_queue_semaphore_.Signal(); stop_semaphore_.Wait(); @@ -238,7 +216,7 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() { - ASSERT(!IsOptimizerThread()); + DCHECK(!IsOptimizerThread()); HandleScope handle_scope(isolate_); OptimizedCompileJob* job; @@ -271,23 +249,23 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) { - ASSERT(IsQueueAvailable()); - ASSERT(!IsOptimizerThread()); + DCHECK(IsQueueAvailable()); + DCHECK(!IsOptimizerThread()); CompilationInfo* info = job->info(); if (info->is_osr()) { osr_attempts_++; AddToOsrBuffer(job); // Add job to the front of the input queue. - LockGuard<Mutex> access_input_queue(&input_queue_mutex_); - ASSERT_LT(input_queue_length_, input_queue_capacity_); + base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); + DCHECK_LT(input_queue_length_, input_queue_capacity_); // Move shift_ back by one. input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); input_queue_[InputQueueIndex(0)] = job; input_queue_length_++; } else { // Add job to the back of the input queue. - LockGuard<Mutex> access_input_queue(&input_queue_mutex_); - ASSERT_LT(input_queue_length_, input_queue_capacity_); + base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); + DCHECK_LT(input_queue_length_, input_queue_capacity_); input_queue_[InputQueueIndex(input_queue_length_)] = job; input_queue_length_++; } @@ -300,7 +278,7 @@ void OptimizingCompilerThread::Unblock() { - ASSERT(!IsOptimizerThread()); + DCHECK(!IsOptimizerThread()); while (blocked_jobs_ > 0) { input_queue_semaphore_.Signal(); blocked_jobs_--; @@ -310,7 +288,7 @@ OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( Handle<JSFunction> function, BailoutId osr_ast_id) { - ASSERT(!IsOptimizerThread()); + DCHECK(!IsOptimizerThread()); for (int i = 0; i < osr_buffer_capacity_; i++) { OptimizedCompileJob* current = osr_buffer_[i]; if (current != NULL && @@ -327,7 +305,7 @@ bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, BailoutId osr_ast_id) { - ASSERT(!IsOptimizerThread()); + DCHECK(!IsOptimizerThread()); for (int i = 0; i < osr_buffer_capacity_; i++) { OptimizedCompileJob* current = osr_buffer_[i]; if (current != NULL && @@ -340,7 +318,7 @@ bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) { - ASSERT(!IsOptimizerThread()); + DCHECK(!IsOptimizerThread()); for (int i = 0; i < osr_buffer_capacity_; i++) { OptimizedCompileJob* current = osr_buffer_[i]; if (current != NULL && *current->info()->closure() == function) { @@ -352,7 +330,7 @@ void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) { - ASSERT(!IsOptimizerThread()); + DCHECK(!IsOptimizerThread()); // Find the next slot that is empty or has a stale job. OptimizedCompileJob* stale = NULL; while (true) { @@ -363,7 +341,7 @@ // Add to found slot and dispose the evicted job. if (stale != NULL) { - ASSERT(stale->IsWaitingForInstall()); + DCHECK(stale->IsWaitingForInstall()); CompilationInfo* info = stale->info(); if (FLAG_trace_osr) { PrintF("[COSR - Discarded "); @@ -385,7 +363,7 @@ bool OptimizingCompilerThread::IsOptimizerThread() { - LockGuard<Mutex> lock_guard(&thread_id_mutex_); + base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); return ThreadId::Current().ToInteger() == thread_id_; } #endif diff -Nru nodejs-0.11.13/deps/v8/src/optimizing-compiler-thread.h nodejs-0.11.15/deps/v8/src/optimizing-compiler-thread.h --- nodejs-0.11.13/deps/v8/src/optimizing-compiler-thread.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/optimizing-compiler-thread.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_OPTIMIZING_COMPILER_THREAD_H_ #define V8_OPTIMIZING_COMPILER_THREAD_H_ -#include "atomicops.h" -#include "flags.h" -#include "list.h" -#include "platform.h" -#include "platform/mutex.h" -#include "platform/time.h" -#include "unbound-queue-inl.h" +#include "src/base/atomicops.h" +#include "src/base/platform/mutex.h" +#include "src/base/platform/platform.h" +#include "src/base/platform/time.h" +#include "src/flags.h" +#include "src/list.h" +#include "src/unbound-queue-inl.h" namespace v8 { namespace internal { @@ -43,25 +20,26 @@ class OptimizedCompileJob; class SharedFunctionInfo; -class OptimizingCompilerThread : public Thread { +class OptimizingCompilerThread : public base::Thread { public: - explicit OptimizingCompilerThread(Isolate *isolate) : - Thread("OptimizingCompilerThread"), + explicit OptimizingCompilerThread(Isolate* isolate) + : Thread(Options("OptimizingCompilerThread")), #ifdef DEBUG - thread_id_(0), + thread_id_(0), #endif - isolate_(isolate), - stop_semaphore_(0), - input_queue_semaphore_(0), - input_queue_capacity_(FLAG_concurrent_recompilation_queue_length), - input_queue_length_(0), - input_queue_shift_(0), - osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4), - osr_buffer_cursor_(0), - osr_hits_(0), - osr_attempts_(0), - blocked_jobs_(0) { - NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); + isolate_(isolate), + stop_semaphore_(0), + input_queue_semaphore_(0), + input_queue_capacity_(FLAG_concurrent_recompilation_queue_length), + input_queue_length_(0), + input_queue_shift_(0), + osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4), + osr_buffer_cursor_(0), + osr_hits_(0), + osr_attempts_(0), + blocked_jobs_(0) { + base::NoBarrier_Store(&stop_thread_, + static_cast<base::AtomicWord>(CONTINUE)); input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_); if (FLAG_concurrent_osr) { // Allocate and mark OSR buffer slots as empty. @@ -85,7 +63,7 @@ bool IsQueuedForOSR(JSFunction* function); inline bool IsQueueAvailable() { - LockGuard<Mutex> access_input_queue(&input_queue_mutex_); + base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); return input_queue_length_ < input_queue_capacity_; } @@ -120,26 +98,26 @@ inline int InputQueueIndex(int i) { int result = (i + input_queue_shift_) % input_queue_capacity_; - ASSERT_LE(0, result); - ASSERT_LT(result, input_queue_capacity_); + DCHECK_LE(0, result); + DCHECK_LT(result, input_queue_capacity_); return result; } #ifdef DEBUG int thread_id_; - Mutex thread_id_mutex_; + base::Mutex thread_id_mutex_; #endif Isolate* isolate_; - Semaphore stop_semaphore_; - Semaphore input_queue_semaphore_; + base::Semaphore stop_semaphore_; + base::Semaphore input_queue_semaphore_; // Circular queue of incoming recompilation tasks (including OSR). OptimizedCompileJob** input_queue_; int input_queue_capacity_; int input_queue_length_; int input_queue_shift_; - Mutex input_queue_mutex_; + base::Mutex input_queue_mutex_; // Queue of recompilation tasks ready to be installed (excluding OSR). UnboundQueue<OptimizedCompileJob*> output_queue_; @@ -149,9 +127,9 @@ int osr_buffer_capacity_; int osr_buffer_cursor_; - volatile AtomicWord stop_thread_; - TimeDelta time_spent_compiling_; - TimeDelta time_spent_total_; + volatile base::AtomicWord stop_thread_; + base::TimeDelta time_spent_compiling_; + base::TimeDelta time_spent_total_; int osr_hits_; int osr_attempts_; diff -Nru nodejs-0.11.13/deps/v8/src/ostreams.cc nodejs-0.11.15/deps/v8/src/ostreams.cc --- nodejs-0.11.13/deps/v8/src/ostreams.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ostreams.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,185 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <algorithm> +#include <cctype> +#include <cmath> + +#include "src/base/platform/platform.h" // For isinf/isnan with MSVC +#include "src/ostreams.h" + +#if V8_OS_WIN +#define snprintf sprintf_s +#endif + +namespace v8 { +namespace internal { + +// Be lazy and delegate the value=>char conversion to snprintf. +template<class T> +OStream& OStream::print(const char* format, T x) { + char buf[32]; + int n = snprintf(buf, sizeof(buf), format, x); + return (n < 0) ? *this : write(buf, n); +} + + +OStream& OStream::operator<<(short x) { // NOLINT(runtime/int) + return print(hex_ ? "%hx" : "%hd", x); +} + + +OStream& OStream::operator<<(unsigned short x) { // NOLINT(runtime/int) + return print(hex_ ? "%hx" : "%hu", x); +} + + +OStream& OStream::operator<<(int x) { + return print(hex_ ? "%x" : "%d", x); +} + + +OStream& OStream::operator<<(unsigned int x) { + return print(hex_ ? "%x" : "%u", x); +} + + +OStream& OStream::operator<<(long x) { // NOLINT(runtime/int) + return print(hex_ ? "%lx" : "%ld", x); +} + + +OStream& OStream::operator<<(unsigned long x) { // NOLINT(runtime/int) + return print(hex_ ? "%lx" : "%lu", x); +} + + +OStream& OStream::operator<<(long long x) { // NOLINT(runtime/int) + return print(hex_ ? "%llx" : "%lld", x); +} + + +OStream& OStream::operator<<(unsigned long long x) { // NOLINT(runtime/int) + return print(hex_ ? "%llx" : "%llu", x); +} + + +OStream& OStream::operator<<(double x) { + if (std::isinf(x)) return *this << (x < 0 ? "-inf" : "inf"); + if (std::isnan(x)) return *this << "nan"; + return print("%g", x); +} + + +OStream& OStream::operator<<(void* x) { + return print("%p", x); +} + + +OStream& OStream::operator<<(char x) { + return put(x); +} + + +OStream& OStream::operator<<(signed char x) { + return put(x); +} + + +OStream& OStream::operator<<(unsigned char x) { + return put(x); +} + + +OStream& OStream::dec() { + hex_ = false; + return *this; +} + + +OStream& OStream::hex() { + hex_ = true; + return *this; +} + + +OStream& flush(OStream& os) { // NOLINT(runtime/references) + return os.flush(); +} + + +OStream& endl(OStream& os) { // NOLINT(runtime/references) + return flush(os.put('\n')); +} + + +OStream& hex(OStream& os) { // NOLINT(runtime/references) + return os.hex(); +} + + +OStream& dec(OStream& os) { // NOLINT(runtime/references) + return os.dec(); +} + + +OStringStream& OStringStream::write(const char* s, size_t n) { + size_t new_size = size_ + n; + if (new_size < size_) return *this; // Overflow => no-op. + reserve(new_size + 1); + memcpy(data_ + size_, s, n); + size_ = new_size; + data_[size_] = '\0'; + return *this; +} + + +OStringStream& OStringStream::flush() { + return *this; +} + + +void OStringStream::reserve(size_t requested_capacity) { + if (requested_capacity <= capacity_) return; + size_t new_capacity = // Handle possible overflow by not doubling. + std::max(std::max(capacity_ * 2, capacity_), requested_capacity); + char * new_data = allocate(new_capacity); + memcpy(new_data, data_, size_); + deallocate(data_, capacity_); + capacity_ = new_capacity; + data_ = new_data; +} + + +OFStream& OFStream::write(const char* s, size_t n) { + if (f_) fwrite(s, n, 1, f_); + return *this; +} + + +OFStream& OFStream::flush() { + if (f_) fflush(f_); + return *this; +} + + +OStream& operator<<(OStream& os, const AsReversiblyEscapedUC16& c) { + char buf[10]; + const char* format = + (std::isprint(c.value) || std::isspace(c.value)) && c.value != '\\' + ? "%c" + : (c.value <= 0xff) ? "\\x%02x" : "\\u%04x"; + snprintf(buf, sizeof(buf), format, c.value); + return os << buf; +} + + +OStream& operator<<(OStream& os, const AsUC16& c) { + char buf[10]; + const char* format = + std::isprint(c.value) ? "%c" : (c.value <= 0xff) ? "\\x%02x" : "\\u%04x"; + snprintf(buf, sizeof(buf), format, c.value); + return os << buf; +} +} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/ostreams.h nodejs-0.11.15/deps/v8/src/ostreams.h --- nodejs-0.11.13/deps/v8/src/ostreams.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/ostreams.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,143 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OSTREAMS_H_ +#define V8_OSTREAMS_H_ + +#include <stddef.h> +#include <stdio.h> +#include <string.h> + +#include "include/v8config.h" +#include "src/base/macros.h" + +namespace v8 { +namespace internal { + +// An abstract base class for output streams with a cut-down standard interface. +class OStream { + public: + OStream() : hex_(false) { } + virtual ~OStream() { } + + // For manipulators like 'os << endl' or 'os << flush', etc. + OStream& operator<<(OStream& (*manipulator)(OStream& os)) { + return manipulator(*this); + } + + // Numeric conversions. + OStream& operator<<(short x); // NOLINT(runtime/int) + OStream& operator<<(unsigned short x); // NOLINT(runtime/int) + OStream& operator<<(int x); + OStream& operator<<(unsigned int x); + OStream& operator<<(long x); // NOLINT(runtime/int) + OStream& operator<<(unsigned long x); // NOLINT(runtime/int) + OStream& operator<<(long long x); // NOLINT(runtime/int) + OStream& operator<<(unsigned long long x); // NOLINT(runtime/int) + OStream& operator<<(double x); + OStream& operator<<(void* x); + + // Character output. + OStream& operator<<(char x); + OStream& operator<<(signed char x); + OStream& operator<<(unsigned char x); + OStream& operator<<(const char* s) { return write(s, strlen(s)); } + OStream& put(char c) { return write(&c, 1); } + + // Primitive format flag handling, can be extended if needed. + OStream& dec(); + OStream& hex(); + + virtual OStream& write(const char* s, size_t n) = 0; + virtual OStream& flush() = 0; + + private: + template<class T> OStream& print(const char* format, T x); + + bool hex_; + + DISALLOW_COPY_AND_ASSIGN(OStream); +}; + + +// Some manipulators. +OStream& flush(OStream& os); // NOLINT(runtime/references) +OStream& endl(OStream& os); // NOLINT(runtime/references) +OStream& dec(OStream& os); // NOLINT(runtime/references) +OStream& hex(OStream& os); // NOLINT(runtime/references) + + +// An output stream writing to a character buffer. +class OStringStream: public OStream { + public: + OStringStream() : size_(0), capacity_(32), data_(allocate(capacity_)) { + data_[0] = '\0'; + } + ~OStringStream() { deallocate(data_, capacity_); } + + size_t size() const { return size_; } + size_t capacity() const { return capacity_; } + const char* data() const { return data_; } + + // Internally, our character data is always 0-terminated. + const char* c_str() const { return data(); } + + virtual OStringStream& write(const char* s, size_t n) V8_OVERRIDE; + virtual OStringStream& flush() V8_OVERRIDE; + + private: + // Primitive allocator interface, can be extracted if needed. + static char* allocate (size_t n) { return new char[n]; } + static void deallocate (char* s, size_t n) { delete[] s; } + + void reserve(size_t requested_capacity); + + size_t size_; + size_t capacity_; + char* data_; + + DISALLOW_COPY_AND_ASSIGN(OStringStream); +}; + + +// An output stream writing to a file. +class OFStream: public OStream { + public: + explicit OFStream(FILE* f) : f_(f) { } + virtual ~OFStream() { } + + virtual OFStream& write(const char* s, size_t n) V8_OVERRIDE; + virtual OFStream& flush() V8_OVERRIDE; + + private: + FILE* const f_; + + DISALLOW_COPY_AND_ASSIGN(OFStream); +}; + + +// Wrappers to disambiguate uint16_t and uc16. +struct AsUC16 { + explicit AsUC16(uint16_t v) : value(v) {} + uint16_t value; +}; + + +struct AsReversiblyEscapedUC16 { + explicit AsReversiblyEscapedUC16(uint16_t v) : value(v) {} + uint16_t value; +}; + + +// Writes the given character to the output escaping everything outside of +// printable/space ASCII range. Additionally escapes '\' making escaping +// reversible. +OStream& operator<<(OStream& os, const AsReversiblyEscapedUC16& c); + +// Writes the given character to the output escaping everything outside +// of printable ASCII range. +OStream& operator<<(OStream& os, const AsUC16& c); +} } // namespace v8::internal + +#endif // V8_OSTREAMS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/parser.cc nodejs-0.11.15/deps/v8/src/parser.cc --- nodejs-0.11.13/deps/v8/src/parser.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/parser.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,23 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "api.h" -#include "ast.h" -#include "bootstrapper.h" -#include "char-predicates-inl.h" -#include "codegen.h" -#include "compiler.h" -#include "messages.h" -#include "parser.h" -#include "platform.h" -#include "preparser.h" -#include "runtime.h" -#include "scanner-character-streams.h" -#include "scopeinfo.h" -#include "string-stream.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/api.h" +#include "src/ast.h" +#include "src/base/platform/platform.h" +#include "src/bootstrapper.h" +#include "src/char-predicates-inl.h" +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/messages.h" +#include "src/parser.h" +#include "src/preparser.h" +#include "src/runtime.h" +#include "src/scanner-character-streams.h" +#include "src/scopeinfo.h" +#include "src/string-stream.h" namespace v8 { namespace internal { @@ -166,7 +143,7 @@ } RegExpTree* atom; if (characters_ != NULL) { - ASSERT(last_added_ == ADD_CHAR); + DCHECK(last_added_ == ADD_CHAR); // Last atom was character. Vector<const uc16> char_vector = characters_->ToConstVector(); int num_chars = char_vector.length(); @@ -179,11 +156,11 @@ atom = new(zone()) RegExpAtom(char_vector); FlushText(); } else if (text_.length() > 0) { - ASSERT(last_added_ == ADD_ATOM); + DCHECK(last_added_ == ADD_ATOM); atom = text_.RemoveLast(); FlushText(); } else if (terms_.length() > 0) { - ASSERT(last_added_ == ADD_ATOM); + DCHECK(last_added_ == ADD_ATOM); atom = terms_.RemoveLast(); if (atom->max_match() == 0) { // Guaranteed to only match an empty string. @@ -205,146 +182,93 @@ } -Handle<String> Parser::LookupCachedSymbol(int symbol_id) { - // Make sure the cache is large enough to hold the symbol identifier. - if (symbol_cache_.length() <= symbol_id) { - // Increase length to index + 1. - symbol_cache_.AddBlock(Handle<String>::null(), - symbol_id + 1 - symbol_cache_.length(), zone()); - } - Handle<String> result = symbol_cache_.at(symbol_id); - if (result.is_null()) { - result = scanner()->AllocateInternalizedString(isolate_); - ASSERT(!result.is_null()); - symbol_cache_.at(symbol_id) = result; - return result; - } - isolate()->counters()->total_preparse_symbols_skipped()->Increment(); - return result; -} - - -FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) { +FunctionEntry ParseData::GetFunctionEntry(int start) { // The current pre-data entry must be a FunctionEntry with the given // start position. - if ((function_index_ + FunctionEntry::kSize <= store_.length()) - && (static_cast<int>(store_[function_index_]) == start)) { + if ((function_index_ + FunctionEntry::kSize <= Length()) && + (static_cast<int>(Data()[function_index_]) == start)) { int index = function_index_; function_index_ += FunctionEntry::kSize; - return FunctionEntry(store_.SubVector(index, - index + FunctionEntry::kSize)); + Vector<unsigned> subvector(&(Data()[index]), FunctionEntry::kSize); + return FunctionEntry(subvector); } return FunctionEntry(); } -int ScriptDataImpl::GetSymbolIdentifier() { - return ReadNumber(&symbol_data_); +int ParseData::FunctionCount() { + int functions_size = FunctionsSize(); + if (functions_size < 0) return 0; + if (functions_size % FunctionEntry::kSize != 0) return 0; + return functions_size / FunctionEntry::kSize; } -bool ScriptDataImpl::SanityCheck() { +bool ParseData::IsSane() { // Check that the header data is valid and doesn't specify // point to positions outside the store. - if (store_.length() < PreparseDataConstants::kHeaderSize) return false; - if (magic() != PreparseDataConstants::kMagicNumber) return false; - if (version() != PreparseDataConstants::kCurrentVersion) return false; - if (has_error()) { - // Extra sane sanity check for error message encoding. - if (store_.length() <= PreparseDataConstants::kHeaderSize - + PreparseDataConstants::kMessageTextPos) { - return false; - } - if (Read(PreparseDataConstants::kMessageStartPos) > - Read(PreparseDataConstants::kMessageEndPos)) { - return false; - } - unsigned arg_count = Read(PreparseDataConstants::kMessageArgCountPos); - int pos = PreparseDataConstants::kMessageTextPos; - for (unsigned int i = 0; i <= arg_count; i++) { - if (store_.length() <= PreparseDataConstants::kHeaderSize + pos) { - return false; - } - int length = static_cast<int>(Read(pos)); - if (length < 0) return false; - pos += 1 + length; - } - if (store_.length() < PreparseDataConstants::kHeaderSize + pos) { - return false; - } - return true; - } + int data_length = Length(); + if (data_length < PreparseDataConstants::kHeaderSize) return false; + if (Magic() != PreparseDataConstants::kMagicNumber) return false; + if (Version() != PreparseDataConstants::kCurrentVersion) return false; + if (HasError()) return false; // Check that the space allocated for function entries is sane. - int functions_size = - static_cast<int>(store_[PreparseDataConstants::kFunctionsSizeOffset]); + int functions_size = FunctionsSize(); if (functions_size < 0) return false; if (functions_size % FunctionEntry::kSize != 0) return false; - // Check that the count of symbols is non-negative. - int symbol_count = - static_cast<int>(store_[PreparseDataConstants::kSymbolCountOffset]); - if (symbol_count < 0) return false; // Check that the total size has room for header and function entries. int minimum_size = PreparseDataConstants::kHeaderSize + functions_size; - if (store_.length() < minimum_size) return false; + if (data_length < minimum_size) return false; return true; } - -const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) { - int length = start[0]; - char* result = NewArray<char>(length + 1); - for (int i = 0; i < length; i++) { - result[i] = start[i + 1]; +void ParseData::Initialize() { + // Prepares state for use. + int data_length = Length(); + if (data_length >= PreparseDataConstants::kHeaderSize) { + function_index_ = PreparseDataConstants::kHeaderSize; } - result[length] = '\0'; - if (chars != NULL) *chars = length; - return result; } -Scanner::Location ScriptDataImpl::MessageLocation() { - int beg_pos = Read(PreparseDataConstants::kMessageStartPos); - int end_pos = Read(PreparseDataConstants::kMessageEndPos); - return Scanner::Location(beg_pos, end_pos); +bool ParseData::HasError() { + return Data()[PreparseDataConstants::kHasErrorOffset]; } -const char* ScriptDataImpl::BuildMessage() { - unsigned* start = ReadAddress(PreparseDataConstants::kMessageTextPos); - return ReadString(start, NULL); +unsigned ParseData::Magic() { + return Data()[PreparseDataConstants::kMagicOffset]; } -Vector<const char*> ScriptDataImpl::BuildArgs() { - int arg_count = Read(PreparseDataConstants::kMessageArgCountPos); - const char** array = NewArray<const char*>(arg_count); - // Position after text found by skipping past length field and - // length field content words. - int pos = PreparseDataConstants::kMessageTextPos + 1 - + Read(PreparseDataConstants::kMessageTextPos); - for (int i = 0; i < arg_count; i++) { - int count = 0; - array[i] = ReadString(ReadAddress(pos), &count); - pos += count + 1; - } - return Vector<const char*>(array, arg_count); +unsigned ParseData::Version() { + return Data()[PreparseDataConstants::kVersionOffset]; } -unsigned ScriptDataImpl::Read(int position) { - return store_[PreparseDataConstants::kHeaderSize + position]; +int ParseData::FunctionsSize() { + return static_cast<int>(Data()[PreparseDataConstants::kFunctionsSizeOffset]); } -unsigned* ScriptDataImpl::ReadAddress(int position) { - return &store_[PreparseDataConstants::kHeaderSize + position]; +void Parser::SetCachedData() { + if (compile_options() == ScriptCompiler::kNoCompileOptions) { + cached_parse_data_ = NULL; + } else { + DCHECK(info_->cached_data() != NULL); + if (compile_options() == ScriptCompiler::kConsumeParserCache) { + cached_parse_data_ = new ParseData(*info_->cached_data()); + } + } } Scope* Parser::NewScope(Scope* parent, ScopeType scope_type) { - Scope* result = new(zone()) Scope(parent, scope_type, zone()); + DCHECK(ast_value_factory_); + Scope* result = + new (zone()) Scope(parent, scope_type, ast_value_factory_, zone()); result->Initialize(); return result; } @@ -417,16 +341,14 @@ // ---------------------------------------------------------------------------- // Implementation of Parser -bool ParserTraits::IsEvalOrArguments(Handle<String> identifier) const { - return identifier.is_identical_to( - parser_->isolate()->factory()->eval_string()) || - identifier.is_identical_to( - parser_->isolate()->factory()->arguments_string()); +bool ParserTraits::IsEvalOrArguments(const AstRawString* identifier) const { + return identifier == parser_->ast_value_factory_->eval_string() || + identifier == parser_->ast_value_factory_->arguments_string(); } bool ParserTraits::IsThisProperty(Expression* expression) { - ASSERT(expression != NULL); + DCHECK(expression != NULL); Property* property = expression->AsProperty(); return property != NULL && property->obj()->AsVariableProxy() != NULL && @@ -443,17 +365,17 @@ void ParserTraits::PushPropertyName(FuncNameInferrer* fni, Expression* expression) { if (expression->IsPropertyName()) { - fni->PushLiteralName(expression->AsLiteral()->AsPropertyName()); + fni->PushLiteralName(expression->AsLiteral()->AsRawPropertyName()); } else { fni->PushLiteralName( - parser_->isolate()->factory()->anonymous_function_string()); + parser_->ast_value_factory_->anonymous_function_string()); } } void ParserTraits::CheckAssigningFunctionLiteralToProperty(Expression* left, Expression* right) { - ASSERT(left != NULL); + DCHECK(left != NULL); if (left->AsProperty() != NULL && right->AsFunctionLiteral() != NULL) { right->AsFunctionLiteral()->set_pretenure(); @@ -465,41 +387,27 @@ Scope* scope) { VariableProxy* callee = expression->AsVariableProxy(); if (callee != NULL && - callee->IsVariable(parser_->isolate()->factory()->eval_string())) { + callee->raw_name() == parser_->ast_value_factory_->eval_string()) { scope->DeclarationScope()->RecordEvalCall(); } } -Expression* ParserTraits::MarkExpressionAsLValue(Expression* expression) { - VariableProxy* proxy = expression != NULL - ? expression->AsVariableProxy() - : NULL; - if (proxy != NULL) proxy->MarkAsLValue(); +Expression* ParserTraits::MarkExpressionAsAssigned(Expression* expression) { + VariableProxy* proxy = + expression != NULL ? expression->AsVariableProxy() : NULL; + if (proxy != NULL) proxy->set_is_assigned(); return expression; } -void ParserTraits::CheckStrictModeLValue(Expression* expression, - bool* ok) { - VariableProxy* lhs = expression != NULL - ? expression->AsVariableProxy() - : NULL; - if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) { - parser_->ReportMessage("strict_eval_arguments", - Vector<const char*>::empty()); - *ok = false; - } -} - - bool ParserTraits::ShortcutNumericLiteralBinaryExpression( Expression** x, Expression* y, Token::Value op, int pos, AstNodeFactory<AstConstructionVisitor>* factory) { - if ((*x)->AsLiteral() && (*x)->AsLiteral()->value()->IsNumber() && - y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) { - double x_val = (*x)->AsLiteral()->value()->Number(); - double y_val = y->AsLiteral()->value()->Number(); + if ((*x)->AsLiteral() && (*x)->AsLiteral()->raw_value()->IsNumber() && + y->AsLiteral() && y->AsLiteral()->raw_value()->IsNumber()) { + double x_val = (*x)->AsLiteral()->raw_value()->AsNumber(); + double y_val = y->AsLiteral()->raw_value()->AsNumber(); switch (op) { case Token::ADD: *x = factory->NewNumberLiteral(x_val + y_val, pos); @@ -556,18 +464,16 @@ Expression* ParserTraits::BuildUnaryExpression( Expression* expression, Token::Value op, int pos, AstNodeFactory<AstConstructionVisitor>* factory) { - ASSERT(expression != NULL); - if (expression->AsLiteral() != NULL) { - Handle<Object> literal = expression->AsLiteral()->value(); + DCHECK(expression != NULL); + if (expression->IsLiteral()) { + const AstValue* literal = expression->AsLiteral()->raw_value(); if (op == Token::NOT) { // Convert the literal to a boolean condition and negate it. bool condition = literal->BooleanValue(); - Handle<Object> result = - parser_->isolate()->factory()->ToBoolean(!condition); - return factory->NewLiteral(result, pos); + return factory->NewBooleanLiteral(!condition, pos); } else if (literal->IsNumber()) { // Compute some expressions involving only number literals. - double value = literal->Number(); + double value = literal->AsNumber(); switch (op) { case Token::ADD: return expression; @@ -599,9 +505,51 @@ } +Expression* ParserTraits::NewThrowReferenceError(const char* message, int pos) { + return NewThrowError( + parser_->ast_value_factory_->make_reference_error_string(), message, NULL, + pos); +} + + +Expression* ParserTraits::NewThrowSyntaxError( + const char* message, const AstRawString* arg, int pos) { + return NewThrowError(parser_->ast_value_factory_->make_syntax_error_string(), + message, arg, pos); +} + + +Expression* ParserTraits::NewThrowTypeError( + const char* message, const AstRawString* arg, int pos) { + return NewThrowError(parser_->ast_value_factory_->make_type_error_string(), + message, arg, pos); +} + + +Expression* ParserTraits::NewThrowError( + const AstRawString* constructor, const char* message, + const AstRawString* arg, int pos) { + Zone* zone = parser_->zone(); + int argc = arg != NULL ? 1 : 0; + const AstRawString* type = + parser_->ast_value_factory_->GetOneByteString(message); + ZoneList<const AstRawString*>* array = + new (zone) ZoneList<const AstRawString*>(argc, zone); + if (arg != NULL) { + array->Add(arg, zone); + } + ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(2, zone); + args->Add(parser_->factory()->NewStringLiteral(type, pos), zone); + args->Add(parser_->factory()->NewStringListLiteral(array, pos), zone); + CallRuntime* call_constructor = + parser_->factory()->NewCallRuntime(constructor, NULL, args, pos); + return parser_->factory()->NewThrow(call_constructor, pos); +} + + void ParserTraits::ReportMessageAt(Scanner::Location source_location, const char* message, - Vector<const char*> args, + const char* arg, bool is_reference_error) { if (parser_->stack_overflow()) { // Suppress the error message (syntax error or such) in the presence of a @@ -609,35 +557,34 @@ // and we want to report the stack overflow later. return; } - MessageLocation location(parser_->script_, - source_location.beg_pos, - source_location.end_pos); - Factory* factory = parser_->isolate()->factory(); - Handle<FixedArray> elements = factory->NewFixedArray(args.length()); - for (int i = 0; i < args.length(); i++) { - Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i])); - ASSERT(!arg_string.is_null()); - elements->set(i, *arg_string); - } - Handle<JSArray> array = factory->NewJSArrayWithElements(elements); - Handle<Object> result = is_reference_error - ? factory->NewReferenceError(message, array) - : factory->NewSyntaxError(message, array); - parser_->isolate()->Throw(*result, &location); + parser_->has_pending_error_ = true; + parser_->pending_error_location_ = source_location; + parser_->pending_error_message_ = message; + parser_->pending_error_char_arg_ = arg; + parser_->pending_error_arg_ = NULL; + parser_->pending_error_is_reference_error_ = is_reference_error; } void ParserTraits::ReportMessage(const char* message, - Vector<Handle<String> > args, + const char* arg, bool is_reference_error) { Scanner::Location source_location = parser_->scanner()->location(); - ReportMessageAt(source_location, message, args, is_reference_error); + ReportMessageAt(source_location, message, arg, is_reference_error); +} + + +void ParserTraits::ReportMessage(const char* message, + const AstRawString* arg, + bool is_reference_error) { + Scanner::Location source_location = parser_->scanner()->location(); + ReportMessageAt(source_location, message, arg, is_reference_error); } void ParserTraits::ReportMessageAt(Scanner::Location source_location, const char* message, - Vector<Handle<String> > args, + const AstRawString* arg, bool is_reference_error) { if (parser_->stack_overflow()) { // Suppress the error message (syntax error or such) in the presence of a @@ -645,52 +592,31 @@ // and we want to report the stack overflow later. return; } - MessageLocation location(parser_->script_, - source_location.beg_pos, - source_location.end_pos); - Factory* factory = parser_->isolate()->factory(); - Handle<FixedArray> elements = factory->NewFixedArray(args.length()); - for (int i = 0; i < args.length(); i++) { - elements->set(i, *args[i]); - } - Handle<JSArray> array = factory->NewJSArrayWithElements(elements); - Handle<Object> result = is_reference_error - ? factory->NewReferenceError(message, array) - : factory->NewSyntaxError(message, array); - parser_->isolate()->Throw(*result, &location); + parser_->has_pending_error_ = true; + parser_->pending_error_location_ = source_location; + parser_->pending_error_message_ = message; + parser_->pending_error_char_arg_ = NULL; + parser_->pending_error_arg_ = arg; + parser_->pending_error_is_reference_error_ = is_reference_error; } -Handle<String> ParserTraits::GetSymbol(Scanner* scanner) { - if (parser_->cached_data_mode() == CONSUME_CACHED_DATA) { - int symbol_id = (*parser_->cached_data())->GetSymbolIdentifier(); - // If there is no symbol data, -1 will be returned. - if (symbol_id >= 0 && - symbol_id < (*parser_->cached_data())->symbol_count()) { - return parser_->LookupCachedSymbol(symbol_id); - } - } else if (parser_->cached_data_mode() == PRODUCE_CACHED_DATA) { - if (parser_->log_->ShouldLogSymbols()) { - parser_->scanner()->LogSymbol(parser_->log_, parser_->position()); - } - } - Handle<String> result = - parser_->scanner()->AllocateInternalizedString(parser_->isolate_); - ASSERT(!result.is_null()); +const AstRawString* ParserTraits::GetSymbol(Scanner* scanner) { + const AstRawString* result = + parser_->scanner()->CurrentSymbol(parser_->ast_value_factory_); + DCHECK(result != NULL); return result; } -Handle<String> ParserTraits::NextLiteralString(Scanner* scanner, - PretenureFlag tenured) { - return scanner->AllocateNextLiteralString(parser_->isolate(), tenured); +const AstRawString* ParserTraits::GetNextSymbol(Scanner* scanner) { + return parser_->scanner()->NextSymbol(parser_->ast_value_factory_); } Expression* ParserTraits::ThisExpression( - Scope* scope, - AstNodeFactory<AstConstructionVisitor>* factory) { - return factory->NewVariableProxy(scope->receiver()); + Scope* scope, AstNodeFactory<AstConstructionVisitor>* factory, int pos) { + return factory->NewVariableProxy(scope->receiver(), pos); } @@ -698,33 +624,32 @@ Token::Value token, int pos, Scanner* scanner, AstNodeFactory<AstConstructionVisitor>* factory) { - Factory* isolate_factory = parser_->isolate()->factory(); switch (token) { case Token::NULL_LITERAL: - return factory->NewLiteral(isolate_factory->null_value(), pos); + return factory->NewNullLiteral(pos); case Token::TRUE_LITERAL: - return factory->NewLiteral(isolate_factory->true_value(), pos); + return factory->NewBooleanLiteral(true, pos); case Token::FALSE_LITERAL: - return factory->NewLiteral(isolate_factory->false_value(), pos); + return factory->NewBooleanLiteral(false, pos); case Token::NUMBER: { double value = scanner->DoubleValue(); return factory->NewNumberLiteral(value, pos); } default: - ASSERT(false); + DCHECK(false); } return NULL; } Expression* ParserTraits::ExpressionFromIdentifier( - Handle<String> name, int pos, Scope* scope, + const AstRawString* name, int pos, Scope* scope, AstNodeFactory<AstConstructionVisitor>* factory) { if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name); // The name may refer to a module instance object, so its type is unknown. #ifdef DEBUG if (FLAG_print_interface_details) - PrintF("# Variable %s ", name->ToAsciiArray()); + PrintF("# Variable %.*s ", name->length(), name->raw_data()); #endif Interface* interface = Interface::NewUnknown(parser_->zone()); return scope->NewUnresolved(factory, name, interface, pos); @@ -734,16 +659,28 @@ Expression* ParserTraits::ExpressionFromString( int pos, Scanner* scanner, AstNodeFactory<AstConstructionVisitor>* factory) { - Handle<String> symbol = GetSymbol(scanner); + const AstRawString* symbol = GetSymbol(scanner); if (parser_->fni_ != NULL) parser_->fni_->PushLiteralName(symbol); - return factory->NewLiteral(symbol, pos); + return factory->NewStringLiteral(symbol, pos); +} + + +Expression* ParserTraits::GetIterator( + Expression* iterable, AstNodeFactory<AstConstructionVisitor>* factory) { + Expression* iterator_symbol_literal = + factory->NewSymbolLiteral("symbolIterator", RelocInfo::kNoPosition); + int pos = iterable->position(); + Expression* prop = + factory->NewProperty(iterable, iterator_symbol_literal, pos); + Zone* zone = parser_->zone(); + ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(0, zone); + return factory->NewCall(prop, args, pos); } Literal* ParserTraits::GetLiteralTheHole( int position, AstNodeFactory<AstConstructionVisitor>* factory) { - return factory->NewLiteral(parser_->isolate()->factory()->the_hole_value(), - RelocInfo::kNoPosition); + return factory->NewTheHoleLiteral(RelocInfo::kNoPosition); } @@ -753,45 +690,51 @@ FunctionLiteral* ParserTraits::ParseFunctionLiteral( - Handle<String> name, + const AstRawString* name, Scanner::Location function_name_location, bool name_is_strict_reserved, bool is_generator, int function_token_position, FunctionLiteral::FunctionType type, + FunctionLiteral::ArityRestriction arity_restriction, bool* ok) { return parser_->ParseFunctionLiteral(name, function_name_location, name_is_strict_reserved, is_generator, - function_token_position, type, ok); + function_token_position, type, + arity_restriction, ok); } Parser::Parser(CompilationInfo* info) : ParserBase<ParserTraits>(&scanner_, info->isolate()->stack_guard()->real_climit(), - info->extension(), - NULL, - info->zone(), - this), + info->extension(), NULL, info->zone(), this), isolate_(info->isolate()), - symbol_cache_(0, info->zone()), script_(info->script()), scanner_(isolate_->unicode_cache()), reusable_preparser_(NULL), original_scope_(NULL), target_stack_(NULL), - cached_data_(NULL), - cached_data_mode_(NO_CACHED_DATA), - info_(info) { - ASSERT(!script_.is_null()); + cached_parse_data_(NULL), + ast_value_factory_(NULL), + info_(info), + has_pending_error_(false), + pending_error_message_(NULL), + pending_error_arg_(NULL), + pending_error_char_arg_(NULL) { + DCHECK(!script_.is_null()); isolate_->set_ast_node_id(0); set_allow_harmony_scoping(!info->is_native() && FLAG_harmony_scoping); set_allow_modules(!info->is_native() && FLAG_harmony_modules); set_allow_natives_syntax(FLAG_allow_natives_syntax || info->is_native()); set_allow_lazy(false); // Must be explicitly enabled. set_allow_generators(FLAG_harmony_generators); - set_allow_for_of(FLAG_harmony_iteration); + set_allow_arrow_functions(FLAG_harmony_arrow_functions); set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals); + for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount; + ++feature) { + use_counts_[feature] = 0; + } } @@ -801,21 +744,22 @@ HistogramTimerScope timer_scope(isolate()->counters()->parse(), true); Handle<String> source(String::cast(script_->source())); isolate()->counters()->total_parse_size()->Increment(source->length()); - ElapsedTimer timer; + base::ElapsedTimer timer; if (FLAG_trace_parse) { timer.Start(); } - fni_ = new(zone()) FuncNameInferrer(isolate(), zone()); + fni_ = new(zone()) FuncNameInferrer(ast_value_factory_, zone()); // Initialize parser state. CompleteParserRecorder recorder; - if (cached_data_mode_ == PRODUCE_CACHED_DATA) { + + if (compile_options() == ScriptCompiler::kProduceParserCache) { log_ = &recorder; - } else if (cached_data_mode_ == CONSUME_CACHED_DATA) { - (*cached_data_)->Initialize(); + } else if (compile_options() == ScriptCompiler::kConsumeParserCache) { + cached_parse_data_->Initialize(); } - source->TryFlatten(); + source = String::Flatten(source); FunctionLiteral* result; if (source->IsExternalTwoByteString()) { // Notice that the stream is destroyed at the end of the branch block. @@ -844,9 +788,8 @@ } PrintF(" - took %0.3f ms]\n", ms); } - if (cached_data_mode_ == PRODUCE_CACHED_DATA) { - Vector<unsigned> store = recorder.ExtractData(); - *cached_data_ = new ScriptDataImpl(store); + if (compile_options() == ScriptCompiler::kProduceParserCache) { + if (result != NULL) *info_->cached_data() = recorder.GetScriptData(); log_ = NULL; } return result; @@ -855,16 +798,19 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Handle<String> source) { - ASSERT(scope_ == NULL); - ASSERT(target_stack_ == NULL); - - Handle<String> no_name = isolate()->factory()->empty_string(); + DCHECK(scope_ == NULL); + DCHECK(target_stack_ == NULL); FunctionLiteral* result = NULL; { Scope* scope = NewScope(scope_, GLOBAL_SCOPE); info->SetGlobalScope(scope); - if (!info->context().is_null()) { + if (!info->context().is_null() && !info->context()->IsNativeContext()) { scope = Scope::DeserializeScopeChain(*info->context(), scope, zone()); + // The Scope is backed up by ScopeInfo (which is in the V8 heap); this + // means the Parser cannot operate independent of the V8 heap. Tell the + // string table to internalize strings and values right after they're + // created. + ast_value_factory_->Internalize(isolate()); } original_scope_ = scope; if (info->is_eval()) { @@ -887,13 +833,17 @@ ParsingModeScope parsing_mode(this, mode); // Enters 'scope'. - FunctionState function_state(&function_state_, &scope_, scope, zone()); + FunctionState function_state(&function_state_, &scope_, scope, zone(), + ast_value_factory_); scope_->SetStrictMode(info->strict_mode()); ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone()); bool ok = true; int beg_pos = scanner()->location().beg_pos; ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok); + + HandleSourceURLComments(); + if (ok && strict_mode() == STRICT) { CheckOctalLiteral(beg_pos, scanner()->location().end_pos, &ok); } @@ -907,37 +857,34 @@ !body->at(0)->IsExpressionStatement() || !body->at(0)->AsExpressionStatement()-> expression()->IsFunctionLiteral()) { - ReportMessage("single_function_literal", Vector<const char*>::empty()); + ReportMessage("single_function_literal"); ok = false; } } + ast_value_factory_->Internalize(isolate()); if (ok) { result = factory()->NewFunctionLiteral( - no_name, - scope_, - body, + ast_value_factory_->empty_string(), ast_value_factory_, scope_, body, function_state.materialized_literal_count(), function_state.expected_property_count(), - function_state.handler_count(), - 0, + function_state.handler_count(), 0, FunctionLiteral::kNoDuplicateParameters, - FunctionLiteral::ANONYMOUS_EXPRESSION, - FunctionLiteral::kGlobalOrEval, - FunctionLiteral::kNotParenthesized, - FunctionLiteral::kNotGenerator, + FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kGlobalOrEval, + FunctionLiteral::kNotParenthesized, FunctionLiteral::kNormalFunction, 0); result->set_ast_properties(factory()->visitor()->ast_properties()); - result->set_slot_processor(factory()->visitor()->slot_processor()); result->set_dont_optimize_reason( factory()->visitor()->dont_optimize_reason()); } else if (stack_overflow()) { isolate()->StackOverflow(); + } else { + ThrowPendingError(); } } // Make sure the target stack is empty. - ASSERT(target_stack_ == NULL); + DCHECK(target_stack_ == NULL); return result; } @@ -947,14 +894,14 @@ HistogramTimerScope timer_scope(isolate()->counters()->parse_lazy()); Handle<String> source(String::cast(script_->source())); isolate()->counters()->total_parse_size()->Increment(source->length()); - ElapsedTimer timer; + base::ElapsedTimer timer; if (FLAG_trace_parse) { timer.Start(); } Handle<SharedFunctionInfo> shared_info = info()->shared_info(); // Initialize parser state. - source->TryFlatten(); + source = String::Flatten(source); FunctionLiteral* result; if (source->IsExternalTwoByteString()) { ExternalTwoByteStringUtf16CharacterStream stream( @@ -981,12 +928,14 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) { Handle<SharedFunctionInfo> shared_info = info()->shared_info(); scanner_.Initialize(source); - ASSERT(scope_ == NULL); - ASSERT(target_stack_ == NULL); + DCHECK(scope_ == NULL); + DCHECK(target_stack_ == NULL); Handle<String> name(String::cast(shared_info->name())); - fni_ = new(zone()) FuncNameInferrer(isolate(), zone()); - fni_->PushEnclosingName(name); + DCHECK(ast_value_factory_); + fni_ = new(zone()) FuncNameInferrer(ast_value_factory_, zone()); + const AstRawString* raw_name = ast_value_factory_->GetString(name); + fni_->PushEnclosingName(raw_name); ParsingModeScope parsing_mode(this, PARSE_EAGERLY); @@ -1002,32 +951,45 @@ zone()); } original_scope_ = scope; - FunctionState function_state(&function_state_, &scope_, scope, zone()); - ASSERT(scope->strict_mode() == SLOPPY || info()->strict_mode() == STRICT); - ASSERT(info()->strict_mode() == shared_info->strict_mode()); + FunctionState function_state(&function_state_, &scope_, scope, zone(), + ast_value_factory_); + DCHECK(scope->strict_mode() == SLOPPY || info()->strict_mode() == STRICT); + DCHECK(info()->strict_mode() == shared_info->strict_mode()); scope->SetStrictMode(shared_info->strict_mode()); FunctionLiteral::FunctionType function_type = shared_info->is_expression() ? (shared_info->is_anonymous() ? FunctionLiteral::ANONYMOUS_EXPRESSION : FunctionLiteral::NAMED_EXPRESSION) : FunctionLiteral::DECLARATION; + bool is_generator = shared_info->is_generator(); bool ok = true; - result = ParseFunctionLiteral(name, - Scanner::Location::invalid(), - false, // Strict mode name already checked. - shared_info->is_generator(), - RelocInfo::kNoPosition, - function_type, - &ok); + + if (shared_info->is_arrow()) { + DCHECK(!is_generator); + Expression* expression = ParseExpression(false, &ok); + DCHECK(expression->IsFunctionLiteral()); + result = expression->AsFunctionLiteral(); + } else { + result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(), + false, // Strict mode name already checked. + is_generator, RelocInfo::kNoPosition, + function_type, + FunctionLiteral::NORMAL_ARITY, &ok); + } // Make sure the results agree. - ASSERT(ok == (result != NULL)); + DCHECK(ok == (result != NULL)); } // Make sure the target stack is empty. - ASSERT(target_stack_ == NULL); + DCHECK(target_stack_ == NULL); + ast_value_factory_->Internalize(isolate()); if (result == NULL) { - if (stack_overflow()) isolate()->StackOverflow(); + if (stack_overflow()) { + isolate()->StackOverflow(); + } else { + ThrowPendingError(); + } } else { Handle<String> inferred_name(shared_info->inferred_name()); result->set_inferred_name(inferred_name); @@ -1050,7 +1012,7 @@ // functions. TargetScope scope(&this->target_stack_); - ASSERT(processor != NULL); + DCHECK(processor != NULL); bool directive_prologue = true; // Parsing directive prologue. while (peek() != end_token) { @@ -1077,21 +1039,21 @@ // Still processing directive prologue? if ((e_stat = stat->AsExpressionStatement()) != NULL && (literal = e_stat->expression()->AsLiteral()) != NULL && - literal->value()->IsString()) { - Handle<String> directive = Handle<String>::cast(literal->value()); - - // Check "use strict" directive (ES5 14.1). + literal->raw_value()->IsString()) { + // Check "use strict" directive (ES5 14.1) and "use asm" directive. Only + // one can be present. if (strict_mode() == SLOPPY && - directive->Equals(isolate()->heap()->use_strict_string()) && + literal->raw_value()->AsString() == + ast_value_factory_->use_strict_string() && token_loc.end_pos - token_loc.beg_pos == - isolate()->heap()->use_strict_string()->length() + 2) { + ast_value_factory_->use_strict_string()->length() + 2) { // TODO(mstarzinger): Global strict eval calls, need their own scope // as specified in ES5 10.4.2(3). The correct fix would be to always // add this scope in DoParseProgram(), but that requires adaptations // all over the code base, so we go with a quick-fix for now. // In the same manner, we have to patch the parsing mode. if (is_eval && !scope_->is_eval_scope()) { - ASSERT(scope_->is_global_scope()); + DCHECK(scope_->is_global_scope()); Scope* scope = NewScope(scope_, EVAL_SCOPE); scope->set_start_position(scope_->start_position()); scope->set_end_position(scope_->end_position()); @@ -1101,6 +1063,13 @@ scope_->SetStrictMode(STRICT); // "use strict" is the only directive for now. directive_prologue = false; + } else if (literal->raw_value()->AsString() == + ast_value_factory_->use_asm_string() && + token_loc.end_pos - token_loc.beg_pos == + ast_value_factory_->use_asm_string()->length() + 2) { + // Store the usage count; The actual use counter on the isolate is + // incremented after parsing is done. + ++use_counts_[v8::Isolate::kUseAsm]; } } else { // End of the directive prologue. @@ -1115,7 +1084,7 @@ } -Statement* Parser::ParseModuleElement(ZoneStringList* labels, +Statement* Parser::ParseModuleElement(ZoneList<const AstRawString*>* labels, bool* ok) { // (Ecma 262 5th Edition, clause 14): // SourceElement: @@ -1134,13 +1103,18 @@ switch (peek()) { case Token::FUNCTION: return ParseFunctionDeclaration(NULL, ok); - case Token::LET: - case Token::CONST: - return ParseVariableStatement(kModuleElement, NULL, ok); case Token::IMPORT: return ParseImportDeclaration(ok); case Token::EXPORT: return ParseExportDeclaration(ok); + case Token::CONST: + return ParseVariableStatement(kModuleElement, NULL, ok); + case Token::LET: + DCHECK(allow_harmony_scoping()); + if (strict_mode() == STRICT) { + return ParseVariableStatement(kModuleElement, NULL, ok); + } + // Fall through. default: { Statement* stmt = ParseStatement(labels, CHECK_OK); // Handle 'module' as a context-sensitive keyword. @@ -1149,10 +1123,9 @@ !scanner()->HasAnyLineTerminatorBeforeNext() && stmt != NULL) { ExpressionStatement* estmt = stmt->AsExpressionStatement(); - if (estmt != NULL && - estmt->expression()->AsVariableProxy() != NULL && - estmt->expression()->AsVariableProxy()->name()->Equals( - isolate()->heap()->module_string()) && + if (estmt != NULL && estmt->expression()->AsVariableProxy() != NULL && + estmt->expression()->AsVariableProxy()->raw_name() == + ast_value_factory_->module_string() && !scanner()->literal_contains_escapes()) { return ParseModuleDeclaration(NULL, ok); } @@ -1163,16 +1136,18 @@ } -Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) { +Statement* Parser::ParseModuleDeclaration(ZoneList<const AstRawString*>* names, + bool* ok) { // ModuleDeclaration: // 'module' Identifier Module int pos = peek_position(); - Handle<String> name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK); + const AstRawString* name = + ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK); #ifdef DEBUG if (FLAG_print_interface_details) - PrintF("# Module %s...\n", name->ToAsciiArray()); + PrintF("# Module %.*s ", name->length(), name->raw_data()); #endif Module* module = ParseModule(CHECK_OK); @@ -1183,10 +1158,9 @@ #ifdef DEBUG if (FLAG_print_interface_details) - PrintF("# Module %s.\n", name->ToAsciiArray()); - + PrintF("# Module %.*s ", name->length(), name->raw_data()); if (FLAG_print_interfaces) { - PrintF("module %s : ", name->ToAsciiArray()); + PrintF("module %.*s: ", name->length(), name->raw_data()); module->interface()->Print(); } #endif @@ -1264,19 +1238,17 @@ Interface* interface = scope->interface(); for (Interface::Iterator it = interface->iterator(); !it.done(); it.Advance()) { - if (scope->LocalLookup(it.name()) == NULL) { - Handle<String> name(it.name()); - ParserTraits::ReportMessage("module_export_undefined", - Vector<Handle<String> >(&name, 1)); + if (scope->LookupLocal(it.name()) == NULL) { + ParserTraits::ReportMessage("module_export_undefined", it.name()); *ok = false; return NULL; } } interface->MakeModule(ok); - ASSERT(*ok); + DCHECK(*ok); interface->Freeze(ok); - ASSERT(*ok); + DCHECK(*ok); return factory()->NewModuleLiteral(body, interface, pos); } @@ -1289,25 +1261,24 @@ int pos = peek_position(); Module* result = ParseModuleVariable(CHECK_OK); while (Check(Token::PERIOD)) { - Handle<String> name = ParseIdentifierName(CHECK_OK); + const AstRawString* name = ParseIdentifierName(CHECK_OK); #ifdef DEBUG if (FLAG_print_interface_details) - PrintF("# Path .%s ", name->ToAsciiArray()); + PrintF("# Path .%.*s ", name->length(), name->raw_data()); #endif Module* member = factory()->NewModulePath(result, name, pos); result->interface()->Add(name, member->interface(), zone(), ok); if (!*ok) { #ifdef DEBUG if (FLAG_print_interfaces) { - PrintF("PATH TYPE ERROR at '%s'\n", name->ToAsciiArray()); + PrintF("PATH TYPE ERROR at '%.*s'\n", name->length(), name->raw_data()); PrintF("result: "); result->interface()->Print(); PrintF("member: "); member->interface()->Print(); } #endif - ParserTraits::ReportMessage("invalid_module_path", - Vector<Handle<String> >(&name, 1)); + ParserTraits::ReportMessage("invalid_module_path", name); return NULL; } result = member; @@ -1322,10 +1293,11 @@ // Identifier int pos = peek_position(); - Handle<String> name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK); + const AstRawString* name = + ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK); #ifdef DEBUG if (FLAG_print_interface_details) - PrintF("# Module variable %s ", name->ToAsciiArray()); + PrintF("# Module variable %.*s ", name->length(), name->raw_data()); #endif VariableProxy* proxy = scope_->NewUnresolved( factory(), name, Interface::NewModule(zone()), @@ -1341,7 +1313,7 @@ int pos = peek_position(); Expect(Token::STRING, CHECK_OK); - Handle<String> symbol = GetSymbol(); + const AstRawString* symbol = GetSymbol(scanner()); // TODO(ES6): Request JS resource from environment... @@ -1357,9 +1329,9 @@ Interface* interface = scope->interface(); Module* result = factory()->NewModuleLiteral(body, interface, pos); interface->Freeze(ok); - ASSERT(*ok); + DCHECK(*ok); interface->Unify(scope->interface(), zone(), ok); - ASSERT(*ok); + DCHECK(*ok); return result; } @@ -1385,9 +1357,9 @@ int pos = peek_position(); Expect(Token::IMPORT, CHECK_OK); - ZoneStringList names(1, zone()); + ZoneList<const AstRawString*> names(1, zone()); - Handle<String> name = ParseIdentifierName(CHECK_OK); + const AstRawString* name = ParseIdentifierName(CHECK_OK); names.Add(name, zone()); while (peek() == Token::COMMA) { Consume(Token::COMMA); @@ -1405,20 +1377,20 @@ for (int i = 0; i < names.length(); ++i) { #ifdef DEBUG if (FLAG_print_interface_details) - PrintF("# Import %s ", names[i]->ToAsciiArray()); + PrintF("# Import %.*s ", name->length(), name->raw_data()); #endif Interface* interface = Interface::NewUnknown(zone()); module->interface()->Add(names[i], interface, zone(), ok); if (!*ok) { #ifdef DEBUG if (FLAG_print_interfaces) { - PrintF("IMPORT TYPE ERROR at '%s'\n", names[i]->ToAsciiArray()); + PrintF("IMPORT TYPE ERROR at '%.*s'\n", name->length(), + name->raw_data()); PrintF("module: "); module->interface()->Print(); } #endif - ParserTraits::ReportMessage("invalid_module_path", - Vector<Handle<String> >(&name, 1)); + ParserTraits::ReportMessage("invalid_module_path", name); return NULL; } VariableProxy* proxy = NewUnresolved(names[i], LET, interface); @@ -1444,14 +1416,14 @@ Expect(Token::EXPORT, CHECK_OK); Statement* result = NULL; - ZoneStringList names(1, zone()); + ZoneList<const AstRawString*> names(1, zone()); switch (peek()) { case Token::IDENTIFIER: { int pos = position(); - Handle<String> name = + const AstRawString* name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK); // Handle 'module' as a context-sensitive keyword. - if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) { + if (name != ast_value_factory_->module_string()) { names.Add(name, zone()); while (peek() == Token::COMMA) { Consume(Token::COMMA); @@ -1482,12 +1454,25 @@ return NULL; } + // Every export of a module may be assigned. + for (int i = 0; i < names.length(); ++i) { + Variable* var = scope_->Lookup(names[i]); + if (var == NULL) { + // TODO(sigurds) This is an export that has no definition yet, + // not clear what to do in this case. + continue; + } + if (!IsImmutableVariableMode(var->mode())) { + var->set_maybe_assigned(); + } + } + // Extract declared names into export declarations and interface. Interface* interface = scope_->interface(); for (int i = 0; i < names.length(); ++i) { #ifdef DEBUG if (FLAG_print_interface_details) - PrintF("# Export %s ", names[i]->ToAsciiArray()); + PrintF("# Export %.*s ", names[i]->length(), names[i]->raw_data()); #endif Interface* inner = Interface::NewUnknown(zone()); interface->Add(names[i], inner, zone(), CHECK_OK); @@ -1502,12 +1487,12 @@ // scope_->AddDeclaration(declaration); } - ASSERT(result != NULL); + DCHECK(result != NULL); return result; } -Statement* Parser::ParseBlockElement(ZoneStringList* labels, +Statement* Parser::ParseBlockElement(ZoneList<const AstRawString*>* labels, bool* ok) { // (Ecma 262 5th Edition, clause 14): // SourceElement: @@ -1523,16 +1508,22 @@ switch (peek()) { case Token::FUNCTION: return ParseFunctionDeclaration(NULL, ok); - case Token::LET: case Token::CONST: return ParseVariableStatement(kModuleElement, NULL, ok); + case Token::LET: + DCHECK(allow_harmony_scoping()); + if (strict_mode() == STRICT) { + return ParseVariableStatement(kModuleElement, NULL, ok); + } + // Fall through. default: return ParseStatement(labels, ok); } } -Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) { +Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels, + bool* ok) { // Statement :: // Block // VariableStatement @@ -1560,11 +1551,6 @@ case Token::LBRACE: return ParseBlock(labels, ok); - case Token::CONST: // fall through - case Token::LET: - case Token::VAR: - return ParseVariableStatement(kStatement, NULL, ok); - case Token::SEMICOLON: Next(); return factory()->NewEmptyStatement(RelocInfo::kNoPosition); @@ -1636,14 +1622,24 @@ case Token::DEBUGGER: return ParseDebuggerStatement(ok); + case Token::VAR: + case Token::CONST: + return ParseVariableStatement(kStatement, NULL, ok); + + case Token::LET: + DCHECK(allow_harmony_scoping()); + if (strict_mode() == STRICT) { + return ParseVariableStatement(kStatement, NULL, ok); + } + // Fall through. default: return ParseExpressionOrLabelledStatement(labels, ok); } } -VariableProxy* Parser::NewUnresolved( - Handle<String> name, VariableMode mode, Interface* interface) { +VariableProxy* Parser::NewUnresolved(const AstRawString* name, + VariableMode mode, Interface* interface) { // If we are inside a function, a declaration of a var/const variable is a // truly local variable, and the scope of the variable is always the function // scope. @@ -1656,7 +1652,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) { VariableProxy* proxy = declaration->proxy(); - Handle<String> name = proxy->name(); + DCHECK(proxy->raw_name() != NULL); + const AstRawString* name = proxy->raw_name(); VariableMode mode = declaration->mode(); Scope* declaration_scope = DeclarationScope(mode); Variable* var = NULL; @@ -1680,20 +1677,20 @@ // global scope. var = declaration_scope->is_global_scope() ? declaration_scope->Lookup(name) - : declaration_scope->LocalLookup(name); + : declaration_scope->LookupLocal(name); if (var == NULL) { // Declare the name. - var = declaration_scope->DeclareLocal( - name, mode, declaration->initialization(), proxy->interface()); - } else if ((mode != VAR || var->mode() != VAR) && - (!declaration_scope->is_global_scope() || - IsLexicalVariableMode(mode) || - IsLexicalVariableMode(var->mode()))) { + var = declaration_scope->DeclareLocal(name, mode, + declaration->initialization(), + kNotAssigned, proxy->interface()); + } else if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(var->mode()) + || ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) && + !declaration_scope->is_global_scope())) { // The name was declared in this scope before; check for conflicting // re-declarations. We have a conflict if either of the declarations is // not a var (in the global scope, we also have to ignore legacy const for // compatibility). There is similar code in runtime.cc in the Declare - // functions. The function CheckNonConflictingScope checks for conflicting + // functions. The function CheckConflictingVarDeclarations checks for // var and let bindings from different scopes whereas this is a check for // conflicting declarations within the same scope. This check also covers // the special case @@ -1702,24 +1699,19 @@ // // because the var declaration is hoisted to the function scope where 'x' // is already bound. - ASSERT(IsDeclaredVariableMode(var->mode())); + DCHECK(IsDeclaredVariableMode(var->mode())); if (allow_harmony_scoping() && strict_mode() == STRICT) { // In harmony we treat re-declarations as early errors. See // ES5 16 for a definition of early errors. - SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS); - const char* elms[2] = { "Variable", c_string.get() }; - Vector<const char*> args(elms, 2); - ReportMessage("redeclaration", args); + ParserTraits::ReportMessage("var_redeclaration", name); *ok = false; return; } - Handle<String> message_string = - isolate()->factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR("Variable")); - Expression* expression = - NewThrowTypeError(isolate()->factory()->redeclaration_string(), - message_string, name); + Expression* expression = NewThrowTypeError( + "var_redeclaration", name, declaration->position()); declaration_scope->SetIllegalRedeclaration(expression); + } else if (mode == VAR) { + var->set_maybe_assigned(); } } @@ -1738,25 +1730,26 @@ // same variable if it is declared several times. This is not a // semantic issue as long as we keep the source order, but it may be // a performance issue since it may lead to repeated - // RuntimeHidden_DeclareContextSlot calls. + // RuntimeHidden_DeclareLookupSlot calls. declaration_scope->AddDeclaration(declaration); if (mode == CONST_LEGACY && declaration_scope->is_global_scope()) { // For global const variables we bind the proxy to a variable. - ASSERT(resolve); // should be set by all callers + DCHECK(resolve); // should be set by all callers Variable::Kind kind = Variable::NORMAL; - var = new(zone()) Variable( - declaration_scope, name, mode, true, kind, - kNeedsInitialization, proxy->interface()); + var = new (zone()) + Variable(declaration_scope, name, mode, true, kind, + kNeedsInitialization, kNotAssigned, proxy->interface()); } else if (declaration_scope->is_eval_scope() && declaration_scope->strict_mode() == SLOPPY) { // For variable declarations in a sloppy eval scope the proxy is bound // to a lookup variable to force a dynamic declaration using the - // DeclareContextSlot runtime function. + // DeclareLookupSlot runtime function. Variable::Kind kind = Variable::NORMAL; - var = new(zone()) Variable( - declaration_scope, name, mode, true, kind, - declaration->initialization(), proxy->interface()); + // TODO(sigurds) figure out if kNotAssigned is OK here + var = new (zone()) Variable(declaration_scope, name, mode, true, kind, + declaration->initialization(), kNotAssigned, + proxy->interface()); var->AllocateTo(Variable::LOOKUP, -1); resolve = true; } @@ -1791,8 +1784,10 @@ if (FLAG_harmony_modules) { bool ok; #ifdef DEBUG - if (FLAG_print_interface_details) - PrintF("# Declare %s\n", var->name()->ToAsciiArray()); + if (FLAG_print_interface_details) { + PrintF("# Declare %.*s ", var->raw_name()->length(), + var->raw_name()->raw_data()); + } #endif proxy->interface()->Unify(var->interface(), zone(), &ok); if (!ok) { @@ -1805,8 +1800,7 @@ var->interface()->Print(); } #endif - ParserTraits::ReportMessage("module_type_error", - Vector<Handle<String> >(&name, 1)); + ParserTraits::ReportMessage("module_type_error", name); } } } @@ -1821,7 +1815,7 @@ int pos = peek_position(); Expect(Token::FUNCTION, CHECK_OK); // Allow "eval" or "arguments" for backward compatibility. - Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK); + const AstRawString* name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK); Expect(Token::LPAREN, CHECK_OK); bool done = (peek() == Token::RPAREN); while (!done) { @@ -1856,7 +1850,8 @@ } -Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) { +Statement* Parser::ParseFunctionDeclaration( + ZoneList<const AstRawString*>* names, bool* ok) { // FunctionDeclaration :: // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}' // GeneratorDeclaration :: @@ -1866,7 +1861,7 @@ int pos = position(); bool is_generator = allow_generators() && Check(Token::MUL); bool is_strict_reserved = false; - Handle<String> name = ParseIdentifierOrStrictReservedWord( + const AstRawString* name = ParseIdentifierOrStrictReservedWord( &is_strict_reserved, CHECK_OK); FunctionLiteral* fun = ParseFunctionLiteral(name, scanner()->location(), @@ -1874,15 +1869,17 @@ is_generator, pos, FunctionLiteral::DECLARATION, + FunctionLiteral::NORMAL_ARITY, CHECK_OK); // Even if we're not at the top-level of the global or a function // scope, we treat it as such and introduce the function with its // initial value upon entering the corresponding scope. - // In extended mode, a function behaves as a lexical binding, except in the - // global scope. + // In ES6, a function behaves as a lexical binding, except in the + // global scope, or the initial scope of eval or another function. VariableMode mode = - allow_harmony_scoping() && - strict_mode() == STRICT && !scope_->is_global_scope() ? LET : VAR; + allow_harmony_scoping() && strict_mode() == STRICT && + !(scope_->is_global_scope() || scope_->is_eval_scope() || + scope_->is_function_scope()) ? LET : VAR; VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue()); Declaration* declaration = factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos); @@ -1892,7 +1889,7 @@ } -Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) { +Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) { if (allow_harmony_scoping() && strict_mode() == STRICT) { return ParseScopedBlock(labels, ok); } @@ -1919,7 +1916,8 @@ } -Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) { +Block* Parser::ParseScopedBlock(ZoneList<const AstRawString*>* labels, + bool* ok) { // The harmony mode uses block elements instead of statements. // // Block :: @@ -1954,12 +1952,12 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context, - ZoneStringList* names, + ZoneList<const AstRawString*>* names, bool* ok) { // VariableStatement :: // VariableDeclarations ';' - Handle<String> ignore; + const AstRawString* ignore; Block* result = ParseVariableDeclarations(var_context, NULL, names, &ignore, CHECK_OK); ExpectSemicolon(CHECK_OK); @@ -1975,8 +1973,8 @@ Block* Parser::ParseVariableDeclarations( VariableDeclarationContext var_context, VariableDeclarationProperties* decl_props, - ZoneStringList* names, - Handle<String>* out, + ZoneList<const AstRawString*>* names, + const AstRawString** out, bool* ok) { // VariableDeclarations :: // ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[','] @@ -2025,38 +2023,26 @@ if (var_context == kStatement) { // In strict mode 'const' declarations are only allowed in source // element positions. - ReportMessage("unprotected_const", Vector<const char*>::empty()); + ReportMessage("unprotected_const"); *ok = false; return NULL; } mode = CONST; init_op = Token::INIT_CONST; } else { - ReportMessage("strict_const", Vector<const char*>::empty()); + ReportMessage("strict_const"); *ok = false; return NULL; } } is_const = true; needs_init = true; - } else if (peek() == Token::LET) { - // ES6 Draft Rev4 section 12.2.1: - // - // LetDeclaration : let LetBindingList ; - // - // * It is a Syntax Error if the code that matches this production is not - // contained in extended code. - // - // TODO(rossberg): make 'let' a legal identifier in sloppy mode. - if (!allow_harmony_scoping() || strict_mode() == SLOPPY) { - ReportMessage("illegal_let", Vector<const char*>::empty()); - *ok = false; - return NULL; - } + } else if (peek() == Token::LET && strict_mode() == STRICT) { + DCHECK(allow_harmony_scoping()); Consume(Token::LET); if (var_context == kStatement) { // Let declarations are only allowed in source element positions. - ReportMessage("unprotected_let", Vector<const char*>::empty()); + ReportMessage("unprotected_let"); *ok = false; return NULL; } @@ -2084,7 +2070,7 @@ // Create new block with one expected declaration. Block* block = factory()->NewBlock(NULL, 1, true, pos); int nvars = 0; // the number of variables declared - Handle<String> name; + const AstRawString* name = NULL; do { if (fni_ != NULL) fni_->Enter(); @@ -2116,7 +2102,7 @@ Declare(declaration, mode != VAR, CHECK_OK); nvars++; if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) { - ReportMessageAt(scanner()->location(), "too_many_variables"); + ReportMessage("too_many_variables"); *ok = false; return NULL; } @@ -2190,9 +2176,8 @@ // executed. // // Executing the variable declaration statement will always - // guarantee to give the global object a "local" variable; a - // variable defined in the global object and not in any - // prototype. This way, global variable declarations can shadow + // guarantee to give the global object an own property. + // This way, global variable declarations can shadow // properties in the prototype chain, but only after the variable // declaration statement has been executed. This is important in // browsers where the global object (window) has lots of @@ -2203,7 +2188,7 @@ ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3, zone()); // We have at least 1 parameter. - arguments->Add(factory()->NewLiteral(name, pos), zone()); + arguments->Add(factory()->NewStringLiteral(name, pos), zone()); CallRuntime* initialize; if (is_const) { @@ -2215,8 +2200,8 @@ // Note that the function does different things depending on // the number of arguments (1 or 2). initialize = factory()->NewCallRuntime( - isolate()->factory()->InitializeConstGlobal_string(), - Runtime::FunctionForId(Runtime::kHiddenInitializeConstGlobal), + ast_value_factory_->initialize_const_global_string(), + Runtime::FunctionForId(Runtime::kInitializeConstGlobal), arguments, pos); } else { // Add strict mode. @@ -2231,21 +2216,22 @@ if (value != NULL && !inside_with()) { arguments->Add(value, zone()); value = NULL; // zap the value to avoid the unnecessary assignment + // Construct the call to Runtime_InitializeVarGlobal + // and add it to the initialization statement block. + initialize = factory()->NewCallRuntime( + ast_value_factory_->initialize_var_global_string(), + Runtime::FunctionForId(Runtime::kInitializeVarGlobal), arguments, + pos); + } else { + initialize = NULL; } - - // Construct the call to Runtime_InitializeVarGlobal - // and add it to the initialization statement block. - // Note that the function does different things depending on - // the number of arguments (2 or 3). - initialize = factory()->NewCallRuntime( - isolate()->factory()->InitializeVarGlobal_string(), - Runtime::FunctionForId(Runtime::kInitializeVarGlobal), - arguments, pos); } - block->AddStatement( - factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition), - zone()); + if (initialize != NULL) { + block->AddStatement(factory()->NewExpressionStatement( + initialize, RelocInfo::kNoPosition), + zone()); + } } else if (needs_init) { // Constant initializations always assign to the declared constant which // is always at the function scope level. This is only relevant for @@ -2254,9 +2240,9 @@ // context for var declared variables). Sigh... // For 'let' and 'const' declared variables in harmony mode the // initialization also always assigns to the declared variable. - ASSERT(proxy != NULL); - ASSERT(proxy->var() != NULL); - ASSERT(value != NULL); + DCHECK(proxy != NULL); + DCHECK(proxy->var() != NULL); + DCHECK(value != NULL); Assignment* assignment = factory()->NewAssignment(init_op, proxy, value, pos); block->AddStatement( @@ -2268,7 +2254,7 @@ // Add an assignment node to the initialization statement block if we still // have a pending initialization value. if (value != NULL) { - ASSERT(mode == VAR); + DCHECK(mode == VAR); // 'var' initializations are simply assignments (with all the consequences // if they are inside a 'with' statement - they may change a 'with' object // property). @@ -2294,19 +2280,22 @@ } -static bool ContainsLabel(ZoneStringList* labels, Handle<String> label) { - ASSERT(!label.is_null()); - if (labels != NULL) - for (int i = labels->length(); i-- > 0; ) - if (labels->at(i).is_identical_to(label)) +static bool ContainsLabel(ZoneList<const AstRawString*>* labels, + const AstRawString* label) { + DCHECK(label != NULL); + if (labels != NULL) { + for (int i = labels->length(); i-- > 0; ) { + if (labels->at(i) == label) { return true; - + } + } + } return false; } -Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels, - bool* ok) { +Statement* Parser::ParseExpressionOrLabelledStatement( + ZoneList<const AstRawString*>* labels, bool* ok) { // ExpressionStatement | LabelledStatement :: // Expression ';' // Identifier ':' Statement @@ -2319,22 +2308,19 @@ // Expression is a single identifier, and not, e.g., a parenthesized // identifier. VariableProxy* var = expr->AsVariableProxy(); - Handle<String> label = var->name(); + const AstRawString* label = var->raw_name(); // TODO(1240780): We don't check for redeclaration of labels // during preparsing since keeping track of the set of active // labels requires nontrivial changes to the way scopes are // structured. However, these are probably changes we want to // make later anyway so we should go back and fix this then. if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) { - SmartArrayPointer<char> c_string = label->ToCString(DISALLOW_NULLS); - const char* elms[2] = { "Label", c_string.get() }; - Vector<const char*> args(elms, 2); - ReportMessage("redeclaration", args); + ParserTraits::ReportMessage("label_redeclaration", label); *ok = false; return NULL; } if (labels == NULL) { - labels = new(zone()) ZoneStringList(4, zone()); + labels = new(zone()) ZoneList<const AstRawString*>(4, zone()); } labels->Add(label, zone()); // Remove the "ghost" variable that turned out to be a label @@ -2353,8 +2339,8 @@ !scanner()->HasAnyLineTerminatorBeforeNext() && expr != NULL && expr->AsVariableProxy() != NULL && - expr->AsVariableProxy()->name()->Equals( - isolate()->heap()->native_string()) && + expr->AsVariableProxy()->raw_name() == + ast_value_factory_->native_string() && !scanner()->literal_contains_escapes()) { return ParseNativeDeclaration(ok); } @@ -2365,8 +2351,8 @@ peek() != Token::IDENTIFIER || scanner()->HasAnyLineTerminatorBeforeNext() || expr->AsVariableProxy() == NULL || - !expr->AsVariableProxy()->name()->Equals( - isolate()->heap()->module_string()) || + expr->AsVariableProxy()->raw_name() != + ast_value_factory_->module_string() || scanner()->literal_contains_escapes()) { ExpectSemicolon(CHECK_OK); } @@ -2374,7 +2360,8 @@ } -IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) { +IfStatement* Parser::ParseIfStatement(ZoneList<const AstRawString*>* labels, + bool* ok) { // IfStatement :: // 'if' '(' Expression ')' Statement ('else' Statement)? @@ -2402,24 +2389,21 @@ int pos = peek_position(); Expect(Token::CONTINUE, CHECK_OK); - Handle<String> label = Handle<String>::null(); + const AstRawString* label = NULL; Token::Value tok = peek(); if (!scanner()->HasAnyLineTerminatorBeforeNext() && tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) { // ECMA allows "eval" or "arguments" as labels even in strict mode. label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK); } - IterationStatement* target = NULL; - target = LookupContinueTarget(label, CHECK_OK); + IterationStatement* target = LookupContinueTarget(label, CHECK_OK); if (target == NULL) { // Illegal continue statement. const char* message = "illegal_continue"; - Vector<Handle<String> > args; - if (!label.is_null()) { + if (label != NULL) { message = "unknown_label"; - args = Vector<Handle<String> >(&label, 1); } - ParserTraits::ReportMessageAt(scanner()->location(), message, args); + ParserTraits::ReportMessage(message, label); *ok = false; return NULL; } @@ -2428,13 +2412,14 @@ } -Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) { +Statement* Parser::ParseBreakStatement(ZoneList<const AstRawString*>* labels, + bool* ok) { // BreakStatement :: // 'break' Identifier? ';' int pos = peek_position(); Expect(Token::BREAK, CHECK_OK); - Handle<String> label; + const AstRawString* label = NULL; Token::Value tok = peek(); if (!scanner()->HasAnyLineTerminatorBeforeNext() && tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) { @@ -2443,7 +2428,7 @@ } // Parse labeled break statements that target themselves into // empty statements, e.g. 'l1: l2: l3: break l2;' - if (!label.is_null() && ContainsLabel(labels, label)) { + if (label != NULL && ContainsLabel(labels, label)) { ExpectSemicolon(CHECK_OK); return factory()->NewEmptyStatement(pos); } @@ -2452,12 +2437,10 @@ if (target == NULL) { // Illegal break statement. const char* message = "illegal_break"; - Vector<Handle<String> > args; - if (!label.is_null()) { + if (label != NULL) { message = "unknown_label"; - args = Vector<Handle<String> >(&label, 1); } - ParserTraits::ReportMessageAt(scanner()->location(), message, args); + ParserTraits::ReportMessage(message, label); *ok = false; return NULL; } @@ -2474,7 +2457,7 @@ // reporting any errors on it, because of the way errors are // reported (underlining). Expect(Token::RETURN, CHECK_OK); - int pos = position(); + Scanner::Location loc = scanner()->location(); Token::Value tok = peek(); Statement* result; @@ -2492,30 +2475,24 @@ Expression* generator = factory()->NewVariableProxy( function_state_->generator_object_variable()); Expression* yield = factory()->NewYield( - generator, return_value, Yield::FINAL, pos); - result = factory()->NewExpressionStatement(yield, pos); + generator, return_value, Yield::FINAL, loc.beg_pos); + result = factory()->NewExpressionStatement(yield, loc.beg_pos); } else { - result = factory()->NewReturnStatement(return_value, pos); + result = factory()->NewReturnStatement(return_value, loc.beg_pos); } - // An ECMAScript program is considered syntactically incorrect if it - // contains a return statement that is not within the body of a - // function. See ECMA-262, section 12.9, page 67. - // - // To be consistent with KJS we report the syntax error at runtime. - Scope* declaration_scope = scope_->DeclarationScope(); - if (declaration_scope->is_global_scope() || - declaration_scope->is_eval_scope()) { - Handle<String> message = isolate()->factory()->illegal_return_string(); - Expression* throw_error = - NewThrowSyntaxError(message, Handle<Object>::null()); - return factory()->NewExpressionStatement(throw_error, pos); + Scope* decl_scope = scope_->DeclarationScope(); + if (decl_scope->is_global_scope() || decl_scope->is_eval_scope()) { + ReportMessageAt(loc, "illegal_return"); + *ok = false; + return NULL; } return result; } -Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) { +Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels, + bool* ok) { // WithStatement :: // 'with' '(' Expression ')' Statement @@ -2523,7 +2500,7 @@ int pos = position(); if (strict_mode() == STRICT) { - ReportMessage("strict_mode_with", Vector<const char*>::empty()); + ReportMessage("strict_mode_with"); *ok = false; return NULL; } @@ -2556,8 +2533,7 @@ } else { Expect(Token::DEFAULT, CHECK_OK); if (*default_seen_ptr) { - ReportMessage("multiple_defaults_in_switch", - Vector<const char*>::empty()); + ReportMessage("multiple_defaults_in_switch"); *ok = false; return NULL; } @@ -2578,8 +2554,8 @@ } -SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels, - bool* ok) { +SwitchStatement* Parser::ParseSwitchStatement( + ZoneList<const AstRawString*>* labels, bool* ok) { // SwitchStatement :: // 'switch' '(' Expression ')' '{' CaseClause* '}' @@ -2613,7 +2589,7 @@ Expect(Token::THROW, CHECK_OK); int pos = position(); if (scanner()->HasAnyLineTerminatorBeforeNext()) { - ReportMessage("newline_after_throw", Vector<const char*>::empty()); + ReportMessage("newline_after_throw"); *ok = false; return NULL; } @@ -2649,7 +2625,7 @@ Token::Value tok = peek(); if (tok != Token::CATCH && tok != Token::FINALLY) { - ReportMessage("no_catch_or_finally", Vector<const char*>::empty()); + ReportMessage("no_catch_or_finally"); *ok = false; return NULL; } @@ -2662,7 +2638,7 @@ Scope* catch_scope = NULL; Variable* catch_variable = NULL; Block* catch_block = NULL; - Handle<String> name; + const AstRawString* name = NULL; if (tok == Token::CATCH) { Consume(Token::CATCH); @@ -2676,9 +2652,7 @@ Target target(&this->target_stack_, &catch_collector); VariableMode mode = allow_harmony_scoping() && strict_mode() == STRICT ? LET : VAR; - catch_variable = - catch_scope->DeclareLocal(name, mode, kCreatedInitialized); - + catch_variable = catch_scope->DeclareLocal(name, mode, kCreatedInitialized); BlockState block_state(&scope_, catch_scope); catch_block = ParseBlock(NULL, CHECK_OK); @@ -2687,7 +2661,7 @@ } Block* finally_block = NULL; - ASSERT(tok == Token::FINALLY || catch_block != NULL); + DCHECK(tok == Token::FINALLY || catch_block != NULL); if (tok == Token::FINALLY) { Consume(Token::FINALLY); finally_block = ParseBlock(NULL, CHECK_OK); @@ -2700,7 +2674,7 @@ if (catch_block != NULL && finally_block != NULL) { // If we have both, create an inner try/catch. - ASSERT(catch_scope != NULL && catch_variable != NULL); + DCHECK(catch_scope != NULL && catch_variable != NULL); int index = function_state_->NextHandlerIndex(); TryCatchStatement* statement = factory()->NewTryCatchStatement( index, try_block, catch_scope, catch_variable, catch_block, @@ -2713,13 +2687,13 @@ TryStatement* result = NULL; if (catch_block != NULL) { - ASSERT(finally_block == NULL); - ASSERT(catch_scope != NULL && catch_variable != NULL); + DCHECK(finally_block == NULL); + DCHECK(catch_scope != NULL && catch_variable != NULL); int index = function_state_->NextHandlerIndex(); result = factory()->NewTryCatchStatement( index, try_block, catch_scope, catch_variable, catch_block, pos); } else { - ASSERT(finally_block != NULL); + DCHECK(finally_block != NULL); int index = function_state_->NextHandlerIndex(); result = factory()->NewTryFinallyStatement( index, try_block, finally_block, pos); @@ -2732,8 +2706,8 @@ } -DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels, - bool* ok) { +DoWhileStatement* Parser::ParseDoWhileStatement( + ZoneList<const AstRawString*>* labels, bool* ok) { // DoStatement :: // 'do' Statement 'while' '(' Expression ')' ';' @@ -2760,7 +2734,8 @@ } -WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) { +WhileStatement* Parser::ParseWhileStatement( + ZoneList<const AstRawString*>* labels, bool* ok) { // WhileStatement :: // 'while' '(' Expression ')' Statement @@ -2783,8 +2758,7 @@ if (Check(Token::IN)) { *visit_mode = ForEachStatement::ENUMERATE; return true; - } else if (allow_for_of() && accept_OF && - CheckContextualKeyword(CStrVector("of"))) { + } else if (accept_OF && CheckContextualKeyword(CStrVector("of"))) { *visit_mode = ForEachStatement::ITERATE; return true; } @@ -2799,29 +2773,26 @@ ForOfStatement* for_of = stmt->AsForOfStatement(); if (for_of != NULL) { - Factory* heap_factory = isolate()->factory(); Variable* iterator = scope_->DeclarationScope()->NewTemporary( - heap_factory->dot_iterator_string()); + ast_value_factory_->dot_iterator_string()); Variable* result = scope_->DeclarationScope()->NewTemporary( - heap_factory->dot_result_string()); + ast_value_factory_->dot_result_string()); Expression* assign_iterator; Expression* next_result; Expression* result_done; Expression* assign_each; - // var iterator = iterable; - { - Expression* iterator_proxy = factory()->NewVariableProxy(iterator); - assign_iterator = factory()->NewAssignment( - Token::ASSIGN, iterator_proxy, subject, RelocInfo::kNoPosition); - } + // var iterator = subject[Symbol.iterator](); + assign_iterator = factory()->NewAssignment( + Token::ASSIGN, factory()->NewVariableProxy(iterator), + GetIterator(subject, factory()), RelocInfo::kNoPosition); // var result = iterator.next(); { Expression* iterator_proxy = factory()->NewVariableProxy(iterator); - Expression* next_literal = factory()->NewLiteral( - heap_factory->next_string(), RelocInfo::kNoPosition); + Expression* next_literal = factory()->NewStringLiteral( + ast_value_factory_->next_string(), RelocInfo::kNoPosition); Expression* next_property = factory()->NewProperty( iterator_proxy, next_literal, RelocInfo::kNoPosition); ZoneList<Expression*>* next_arguments = @@ -2835,8 +2806,8 @@ // result.done { - Expression* done_literal = factory()->NewLiteral( - heap_factory->done_string(), RelocInfo::kNoPosition); + Expression* done_literal = factory()->NewStringLiteral( + ast_value_factory_->done_string(), RelocInfo::kNoPosition); Expression* result_proxy = factory()->NewVariableProxy(result); result_done = factory()->NewProperty( result_proxy, done_literal, RelocInfo::kNoPosition); @@ -2844,8 +2815,8 @@ // each = result.value { - Expression* value_literal = factory()->NewLiteral( - heap_factory->value_string(), RelocInfo::kNoPosition); + Expression* value_literal = factory()->NewStringLiteral( + ast_value_factory_->value_string(), RelocInfo::kNoPosition); Expression* result_proxy = factory()->NewVariableProxy(result); Expression* result_value = factory()->NewProperty( result_proxy, value_literal, RelocInfo::kNoPosition); @@ -2854,19 +2825,183 @@ } for_of->Initialize(each, subject, body, - assign_iterator, next_result, result_done, assign_each); + assign_iterator, + next_result, + result_done, + assign_each); } else { stmt->Initialize(each, subject, body); } } -Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { +Statement* Parser::DesugarLetBindingsInForStatement( + Scope* inner_scope, ZoneList<const AstRawString*>* names, + ForStatement* loop, Statement* init, Expression* cond, Statement* next, + Statement* body, bool* ok) { + // ES6 13.6.3.4 specifies that on each loop iteration the let variables are + // copied into a new environment. After copying, the "next" statement of the + // loop is executed to update the loop variables. The loop condition is + // checked and the loop body is executed. + // + // We rewrite a for statement of the form + // + // for (let x = i; cond; next) body + // + // into + // + // { + // let x = i; + // temp_x = x; + // flag = 1; + // for (;;) { + // let x = temp_x; + // if (flag == 1) { + // flag = 0; + // } else { + // next; + // } + // if (cond) { + // <empty> + // } else { + // break; + // } + // b + // temp_x = x; + // } + // } + + DCHECK(names->length() > 0); + Scope* for_scope = scope_; + ZoneList<Variable*> temps(names->length(), zone()); + + Block* outer_block = factory()->NewBlock(NULL, names->length() + 3, false, + RelocInfo::kNoPosition); + outer_block->AddStatement(init, zone()); + + const AstRawString* temp_name = ast_value_factory_->dot_for_string(); + + // For each let variable x: + // make statement: temp_x = x. + for (int i = 0; i < names->length(); i++) { + VariableProxy* proxy = + NewUnresolved(names->at(i), LET, Interface::NewValue()); + Variable* temp = scope_->DeclarationScope()->NewTemporary(temp_name); + VariableProxy* temp_proxy = factory()->NewVariableProxy(temp); + Assignment* assignment = factory()->NewAssignment( + Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition); + Statement* assignment_statement = factory()->NewExpressionStatement( + assignment, RelocInfo::kNoPosition); + outer_block->AddStatement(assignment_statement, zone()); + temps.Add(temp, zone()); + } + + Variable* flag = scope_->DeclarationScope()->NewTemporary(temp_name); + // Make statement: flag = 1. + { + VariableProxy* flag_proxy = factory()->NewVariableProxy(flag); + Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition); + Assignment* assignment = factory()->NewAssignment( + Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition); + Statement* assignment_statement = factory()->NewExpressionStatement( + assignment, RelocInfo::kNoPosition); + outer_block->AddStatement(assignment_statement, zone()); + } + + outer_block->AddStatement(loop, zone()); + outer_block->set_scope(for_scope); + scope_ = inner_scope; + + Block* inner_block = factory()->NewBlock(NULL, 2 * names->length() + 3, + false, RelocInfo::kNoPosition); + int pos = scanner()->location().beg_pos; + ZoneList<Variable*> inner_vars(names->length(), zone()); + + // For each let variable x: + // make statement: let x = temp_x. + for (int i = 0; i < names->length(); i++) { + VariableProxy* proxy = + NewUnresolved(names->at(i), LET, Interface::NewValue()); + Declaration* declaration = + factory()->NewVariableDeclaration(proxy, LET, scope_, pos); + Declare(declaration, true, CHECK_OK); + inner_vars.Add(declaration->proxy()->var(), zone()); + VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i)); + Assignment* assignment = factory()->NewAssignment( + Token::INIT_LET, proxy, temp_proxy, pos); + Statement* assignment_statement = factory()->NewExpressionStatement( + assignment, pos); + proxy->var()->set_initializer_position(pos); + inner_block->AddStatement(assignment_statement, zone()); + } + + // Make statement: if (flag == 1) { flag = 0; } else { next; }. + if (next) { + Expression* compare = NULL; + // Make compare expresion: flag == 1. + { + Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition); + VariableProxy* flag_proxy = factory()->NewVariableProxy(flag); + compare = factory()->NewCompareOperation( + Token::EQ, flag_proxy, const1, pos); + } + Statement* clear_flag = NULL; + // Make statement: flag = 0. + { + VariableProxy* flag_proxy = factory()->NewVariableProxy(flag); + Expression* const0 = factory()->NewSmiLiteral(0, RelocInfo::kNoPosition); + Assignment* assignment = factory()->NewAssignment( + Token::ASSIGN, flag_proxy, const0, RelocInfo::kNoPosition); + clear_flag = factory()->NewExpressionStatement(assignment, pos); + } + Statement* clear_flag_or_next = factory()->NewIfStatement( + compare, clear_flag, next, RelocInfo::kNoPosition); + inner_block->AddStatement(clear_flag_or_next, zone()); + } + + + // Make statement: if (cond) { } else { break; }. + if (cond) { + Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition); + BreakableStatement* t = LookupBreakTarget(NULL, CHECK_OK); + Statement* stop = factory()->NewBreakStatement(t, RelocInfo::kNoPosition); + Statement* if_not_cond_break = factory()->NewIfStatement( + cond, empty, stop, cond->position()); + inner_block->AddStatement(if_not_cond_break, zone()); + } + + inner_block->AddStatement(body, zone()); + + // For each let variable x: + // make statement: temp_x = x; + for (int i = 0; i < names->length(); i++) { + VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i)); + int pos = scanner()->location().end_pos; + VariableProxy* proxy = factory()->NewVariableProxy(inner_vars.at(i), pos); + Assignment* assignment = factory()->NewAssignment( + Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition); + Statement* assignment_statement = factory()->NewExpressionStatement( + assignment, RelocInfo::kNoPosition); + inner_block->AddStatement(assignment_statement, zone()); + } + + inner_scope->set_end_position(scanner()->location().end_pos); + inner_block->set_scope(inner_scope); + scope_ = for_scope; + + loop->Initialize(NULL, NULL, NULL, inner_block); + return outer_block; +} + + +Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels, + bool* ok) { // ForStatement :: // 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement int pos = peek_position(); Statement* init = NULL; + ZoneList<const AstRawString*> let_bindings(1, zone()); // Create an in-between scope for let-bound iteration variables. Scope* saved_scope = scope_; @@ -2879,7 +3014,7 @@ if (peek() != Token::SEMICOLON) { if (peek() == Token::VAR || peek() == Token::CONST) { bool is_const = peek() == Token::CONST; - Handle<String> name; + const AstRawString* name = NULL; VariableDeclarationProperties decl_props = kHasNoInitializers; Block* variable_statement = ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name, @@ -2887,7 +3022,7 @@ bool accept_OF = decl_props == kHasNoInitializers; ForEachStatement::VisitMode mode; - if (!name.is_null() && CheckInOrOf(accept_OF, &mode)) { + if (name != NULL && CheckInOrOf(accept_OF, &mode)) { Interface* interface = is_const ? Interface::NewConst() : Interface::NewValue(); ForEachStatement* loop = @@ -2908,19 +3043,20 @@ scope_ = saved_scope; for_scope->set_end_position(scanner()->location().end_pos); for_scope = for_scope->FinalizeBlockScope(); - ASSERT(for_scope == NULL); + DCHECK(for_scope == NULL); // Parsed for-in loop w/ variable/const declaration. return result; } else { init = variable_statement; } - } else if (peek() == Token::LET) { - Handle<String> name; + } else if (peek() == Token::LET && strict_mode() == STRICT) { + DCHECK(allow_harmony_scoping()); + const AstRawString* name = NULL; VariableDeclarationProperties decl_props = kHasNoInitializers; Block* variable_statement = - ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name, - CHECK_OK); - bool accept_IN = !name.is_null() && decl_props != kHasInitializers; + ParseVariableDeclarations(kForStatement, &decl_props, &let_bindings, + &name, CHECK_OK); + bool accept_IN = name != NULL && decl_props != kHasInitializers; bool accept_OF = decl_props == kHasNoInitializers; ForEachStatement::VisitMode mode; @@ -2940,12 +3076,8 @@ // TODO(keuchel): Move the temporary variable to the block scope, after // implementing stack allocated block scoped variables. - Factory* heap_factory = isolate()->factory(); - Handle<String> tempstr = - heap_factory->NewConsString(heap_factory->dot_for_string(), name); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate(), tempstr, 0); - Handle<String> tempname = heap_factory->InternalizeString(tempstr); - Variable* temp = scope_->DeclarationScope()->NewTemporary(tempname); + Variable* temp = scope_->DeclarationScope()->NewTemporary( + ast_value_factory_->dot_for_string()); VariableProxy* temp_proxy = factory()->NewVariableProxy(temp); ForEachStatement* loop = factory()->NewForEachStatement(mode, labels, pos); @@ -2987,11 +3119,9 @@ bool accept_OF = expression->AsVariableProxy(); if (CheckInOrOf(accept_OF, &mode)) { - if (expression == NULL || !expression->IsValidLeftHandSide()) { - ReportMessageAt(lhs_location, "invalid_lhs_in_for", true); - *ok = false; - return NULL; - } + expression = this->CheckAndRewriteReferenceExpression( + expression, lhs_location, "invalid_lhs_in_for", CHECK_OK); + ForEachStatement* loop = factory()->NewForEachStatement(mode, labels, pos); Target target(&this->target_stack_, loop); @@ -3004,7 +3134,7 @@ scope_ = saved_scope; for_scope->set_end_position(scanner()->location().end_pos); for_scope = for_scope->FinalizeBlockScope(); - ASSERT(for_scope == NULL); + DCHECK(for_scope == NULL); // Parsed for-in loop. return loop; @@ -3022,6 +3152,15 @@ // Parsed initializer at this point. Expect(Token::SEMICOLON, CHECK_OK); + // If there are let bindings, then condition and the next statement of the + // for loop must be parsed in a new scope. + Scope* inner_scope = NULL; + if (let_bindings.length() > 0) { + inner_scope = NewScope(for_scope, BLOCK_SCOPE); + inner_scope->set_start_position(scanner()->location().beg_pos); + scope_ = inner_scope; + } + Expression* cond = NULL; if (peek() != Token::SEMICOLON) { cond = ParseExpression(true, CHECK_OK); @@ -3036,31 +3175,42 @@ Expect(Token::RPAREN, CHECK_OK); Statement* body = ParseStatement(NULL, CHECK_OK); - scope_ = saved_scope; - for_scope->set_end_position(scanner()->location().end_pos); - for_scope = for_scope->FinalizeBlockScope(); - if (for_scope != NULL) { - // Rewrite a for statement of the form - // - // for (let x = i; c; n) b - // - // into - // - // { - // let x = i; - // for (; c; n) b - // } - ASSERT(init != NULL); - Block* result = factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition); - result->AddStatement(init, zone()); - result->AddStatement(loop, zone()); - result->set_scope(for_scope); - loop->Initialize(NULL, cond, next, body); - return result; + + Statement* result = NULL; + if (let_bindings.length() > 0) { + scope_ = for_scope; + result = DesugarLetBindingsInForStatement(inner_scope, &let_bindings, loop, + init, cond, next, body, CHECK_OK); + scope_ = saved_scope; + for_scope->set_end_position(scanner()->location().end_pos); } else { - loop->Initialize(init, cond, next, body); - return loop; + scope_ = saved_scope; + for_scope->set_end_position(scanner()->location().end_pos); + for_scope = for_scope->FinalizeBlockScope(); + if (for_scope) { + // Rewrite a for statement of the form + // for (const x = i; c; n) b + // + // into + // + // { + // const x = i; + // for (; c; n) b + // } + DCHECK(init != NULL); + Block* block = + factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition); + block->AddStatement(init, zone()); + block->AddStatement(loop, zone()); + block->set_scope(for_scope); + loop->Initialize(NULL, cond, next, body); + result = block; + } else { + loop->Initialize(init, cond, next, body); + result = loop; + } } + return result; } @@ -3078,17 +3228,8 @@ } -void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) { - SmartArrayPointer<char> name_string = name->ToCString(DISALLOW_NULLS); - const char* element[1] = { name_string.get() }; - ReportMessage("invalid_preparser_data", - Vector<const char*>(element, 1)); - *ok = false; -} - - bool CompileTimeValue::IsCompileTimeValue(Expression* expression) { - if (expression->AsLiteral() != NULL) return true; + if (expression->IsLiteral()) return true; MaterializedLiteral* lit = expression->AsMaterializedLiteral(); return lit != NULL && lit->is_simple(); } @@ -3097,11 +3238,11 @@ Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate, Expression* expression) { Factory* factory = isolate->factory(); - ASSERT(IsCompileTimeValue(expression)); + DCHECK(IsCompileTimeValue(expression)); Handle<FixedArray> result = factory->NewFixedArray(2, TENURED); ObjectLiteral* object_literal = expression->AsObjectLiteral(); if (object_literal != NULL) { - ASSERT(object_literal->is_simple()); + DCHECK(object_literal->is_simple()); if (object_literal->fast_elements()) { result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS)); } else { @@ -3110,7 +3251,7 @@ result->set(kElementsSlot, *object_literal->constant_properties()); } else { ArrayLiteral* array_literal = expression->AsArrayLiteral(); - ASSERT(array_literal != NULL && array_literal->is_simple()); + DCHECK(array_literal != NULL && array_literal->is_simple()); result->set(kLiteralTypeSlot, Smi::FromInt(ARRAY_LITERAL)); result->set(kElementsSlot, *array_literal->constant_elements()); } @@ -3130,16 +3271,85 @@ } +bool CheckAndDeclareArrowParameter(ParserTraits* traits, Expression* expression, + Scope* scope, int* num_params, + Scanner::Location* dupe_loc) { + // Case for empty parameter lists: + // () => ... + if (expression == NULL) return true; + + // Too many parentheses around expression: + // (( ... )) => ... + if (expression->parenthesization_level() > 1) return false; + + // Case for a single parameter: + // (foo) => ... + // foo => ... + if (expression->IsVariableProxy()) { + if (expression->AsVariableProxy()->is_this()) return false; + + const AstRawString* raw_name = expression->AsVariableProxy()->raw_name(); + if (traits->IsEvalOrArguments(raw_name) || + traits->IsFutureStrictReserved(raw_name)) + return false; + + if (scope->IsDeclared(raw_name)) { + *dupe_loc = Scanner::Location( + expression->position(), expression->position() + raw_name->length()); + return false; + } + + scope->DeclareParameter(raw_name, VAR); + ++(*num_params); + return true; + } + + // Case for more than one parameter: + // (foo, bar [, ...]) => ... + if (expression->IsBinaryOperation()) { + BinaryOperation* binop = expression->AsBinaryOperation(); + if (binop->op() != Token::COMMA || binop->left()->is_parenthesized() || + binop->right()->is_parenthesized()) + return false; + + return CheckAndDeclareArrowParameter(traits, binop->left(), scope, + num_params, dupe_loc) && + CheckAndDeclareArrowParameter(traits, binop->right(), scope, + num_params, dupe_loc); + } + + // Any other kind of expression is not a valid parameter list. + return false; +} + + +int ParserTraits::DeclareArrowParametersFromExpression( + Expression* expression, Scope* scope, Scanner::Location* dupe_loc, + bool* ok) { + int num_params = 0; + *ok = CheckAndDeclareArrowParameter(this, expression, scope, &num_params, + dupe_loc); + return num_params; +} + + FunctionLiteral* Parser::ParseFunctionLiteral( - Handle<String> function_name, + const AstRawString* function_name, Scanner::Location function_name_location, bool name_is_strict_reserved, bool is_generator, int function_token_pos, FunctionLiteral::FunctionType function_type, + FunctionLiteral::ArityRestriction arity_restriction, bool* ok) { // Function :: // '(' FormalParameterList? ')' '{' FunctionBody '}' + // + // Getter :: + // '(' ')' '{' FunctionBody '}' + // + // Setter :: + // '(' PropertySetParameterList ')' '{' FunctionBody '}' int pos = function_token_pos == RelocInfo::kNoPosition ? peek_position() : function_token_pos; @@ -3147,11 +3357,11 @@ // Anonymous functions were passed either the empty symbol or a null // handle as the function name. Remember if we were passed a non-empty // handle to decide whether to invoke function name inference. - bool should_infer_name = function_name.is_null(); + bool should_infer_name = function_name == NULL; // We want a non-null handle as the function name. if (should_infer_name) { - function_name = isolate()->factory()->empty_string(); + function_name = ast_value_factory_->empty_string(); } int num_parameters = 0; @@ -3200,14 +3410,12 @@ FunctionLiteral::IsParenthesizedFlag parenthesized = parenthesized_function_ ? FunctionLiteral::kIsParenthesized : FunctionLiteral::kNotParenthesized; - FunctionLiteral::IsGeneratorFlag generator = is_generator - ? FunctionLiteral::kIsGenerator - : FunctionLiteral::kNotGenerator; - DeferredFeedbackSlotProcessor* slot_processor; AstProperties ast_properties; BailoutReason dont_optimize_reason = kNoReason; // Parse function body. - { FunctionState function_state(&function_state_, &scope_, scope, zone()); + { + FunctionState function_state(&function_state_, &scope_, scope, zone(), + ast_value_factory_); scope_->SetScopeName(function_name); if (is_generator) { @@ -3220,7 +3428,7 @@ // in a temporary variable, a definition that is used by "yield" // expressions. This also marks the FunctionState as a generator. Variable* temp = scope_->DeclarationScope()->NewTemporary( - isolate()->factory()->dot_generator_object_string()); + ast_value_factory_->dot_generator_object_string()); function_state.set_generator_object_variable(temp); } @@ -3236,10 +3444,12 @@ Scanner::Location dupe_error_loc = Scanner::Location::invalid(); Scanner::Location reserved_loc = Scanner::Location::invalid(); - bool done = (peek() == Token::RPAREN); + bool done = arity_restriction == FunctionLiteral::GETTER_ARITY || + (peek() == Token::RPAREN && + arity_restriction != FunctionLiteral::SETTER_ARITY); while (!done) { bool is_strict_reserved = false; - Handle<String> param_name = + const AstRawString* param_name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK); // Store locations for possible future error reports. @@ -3254,13 +3464,21 @@ dupe_error_loc = scanner()->location(); } - scope_->DeclareParameter(param_name, VAR); + Variable* var = scope_->DeclareParameter(param_name, VAR); + if (scope->strict_mode() == SLOPPY) { + // TODO(sigurds) Mark every parameter as maybe assigned. This is a + // conservative approximation necessary to account for parameters + // that are assigned via the arguments array. + var->set_maybe_assigned(); + } + num_parameters++; if (num_parameters > Code::kMaxArguments) { - ReportMessageAt(scanner()->location(), "too_many_parameters"); + ReportMessage("too_many_parameters"); *ok = false; return NULL; } + if (arity_restriction == FunctionLiteral::SETTER_ARITY) break; done = (peek() == Token::RPAREN); if (!done) Expect(Token::COMMA, CHECK_OK); } @@ -3281,11 +3499,13 @@ fvar_init_op = Token::INIT_CONST; } VariableMode fvar_mode = - allow_harmony_scoping() && strict_mode() == STRICT ? CONST - : CONST_LEGACY; - fvar = new(zone()) Variable(scope_, - function_name, fvar_mode, true /* is valid LHS */, - Variable::NORMAL, kCreatedInitialized, Interface::NewConst()); + allow_harmony_scoping() && strict_mode() == STRICT + ? CONST : CONST_LEGACY; + DCHECK(function_name != NULL); + fvar = new (zone()) + Variable(scope_, function_name, fvar_mode, true /* is valid LHS */, + Variable::NORMAL, kCreatedInitialized, kNotAssigned, + Interface::NewConst()); VariableProxy* proxy = factory()->NewVariableProxy(fvar); VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration( proxy, fvar_mode, scope_, RelocInfo::kNoPosition); @@ -3331,200 +3551,47 @@ parenthesized_function_ = false; // The bit was set for this function only. if (is_lazily_parsed) { - int function_block_pos = position(); - FunctionEntry entry; - if (cached_data_mode_ == CONSUME_CACHED_DATA) { - // If we have cached data, we use it to skip parsing the function body. - // The data contains the information we need to construct the lazy - // function. - entry = (*cached_data())->GetFunctionEntry(function_block_pos); - if (entry.is_valid()) { - if (entry.end_pos() <= function_block_pos) { - // End position greater than end of stream is safe, and hard - // to check. - ReportInvalidPreparseData(function_name, CHECK_OK); - } - scanner()->SeekForward(entry.end_pos() - 1); - - scope->set_end_position(entry.end_pos()); - Expect(Token::RBRACE, CHECK_OK); - isolate()->counters()->total_preparse_skipped()->Increment( - scope->end_position() - function_block_pos); - materialized_literal_count = entry.literal_count(); - expected_property_count = entry.property_count(); - scope_->SetStrictMode(entry.strict_mode()); - } else { - // This case happens when we have preparse data but it doesn't contain - // an entry for the function. As a safety net, fall back to eager - // parsing. It is unclear whether PreParser's laziness analysis can - // produce different results than the Parser's laziness analysis (see - // https://codereview.chromium.org/7565003 ). In this case, we must - // discard all the preparse data, since the symbol data will be wrong. - is_lazily_parsed = false; - cached_data_mode_ = NO_CACHED_DATA; - } - } else { - // With no cached data, we partially parse the function, without - // building an AST. This gathers the data needed to build a lazy - // function. - // FIXME(marja): Now the PreParser doesn't need to log functions / - // symbols; only errors -> clean that up. - SingletonLogger logger; - PreParser::PreParseResult result = LazyParseFunctionLiteral(&logger); - if (result == PreParser::kPreParseStackOverflow) { - // Propagate stack overflow. - set_stack_overflow(); - *ok = false; - return NULL; - } - if (logger.has_error()) { - const char* arg = logger.argument_opt(); - Vector<const char*> args; - if (arg != NULL) { - args = Vector<const char*>(&arg, 1); - } - ParserTraits::ReportMessageAt( - Scanner::Location(logger.start(), logger.end()), - logger.message(), - args); - *ok = false; - return NULL; - } - scope->set_end_position(logger.end()); - Expect(Token::RBRACE, CHECK_OK); - isolate()->counters()->total_preparse_skipped()->Increment( - scope->end_position() - function_block_pos); - materialized_literal_count = logger.literals(); - expected_property_count = logger.properties(); - scope_->SetStrictMode(logger.strict_mode()); - if (cached_data_mode_ == PRODUCE_CACHED_DATA) { - ASSERT(log_); - // Position right after terminal '}'. - int body_end = scanner()->location().end_pos; - log_->LogFunction(function_block_pos, body_end, - materialized_literal_count, - expected_property_count, - scope_->strict_mode()); - } - } - } - - if (!is_lazily_parsed) { - // Everything inside an eagerly parsed function will be parsed eagerly - // (see comment above). - ParsingModeScope parsing_mode(this, PARSE_EAGERLY); - body = new(zone()) ZoneList<Statement*>(8, zone()); - if (fvar != NULL) { - VariableProxy* fproxy = scope_->NewUnresolved( - factory(), function_name, Interface::NewConst()); - fproxy->BindTo(fvar); - body->Add(factory()->NewExpressionStatement( - factory()->NewAssignment(fvar_init_op, - fproxy, - factory()->NewThisFunction(pos), - RelocInfo::kNoPosition), - RelocInfo::kNoPosition), zone()); - } - - // For generators, allocate and yield an iterator on function entry. - if (is_generator) { - ZoneList<Expression*>* arguments = - new(zone()) ZoneList<Expression*>(0, zone()); - CallRuntime* allocation = factory()->NewCallRuntime( - isolate()->factory()->empty_string(), - Runtime::FunctionForId(Runtime::kHiddenCreateJSGeneratorObject), - arguments, pos); - VariableProxy* init_proxy = factory()->NewVariableProxy( - function_state_->generator_object_variable()); - Assignment* assignment = factory()->NewAssignment( - Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition); - VariableProxy* get_proxy = factory()->NewVariableProxy( - function_state_->generator_object_variable()); - Yield* yield = factory()->NewYield( - get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition); - body->Add(factory()->NewExpressionStatement( - yield, RelocInfo::kNoPosition), zone()); - } - - ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK); - - if (is_generator) { - VariableProxy* get_proxy = factory()->NewVariableProxy( - function_state_->generator_object_variable()); - Expression *undefined = factory()->NewLiteral( - isolate()->factory()->undefined_value(), RelocInfo::kNoPosition); - Yield* yield = factory()->NewYield( - get_proxy, undefined, Yield::FINAL, RelocInfo::kNoPosition); - body->Add(factory()->NewExpressionStatement( - yield, RelocInfo::kNoPosition), zone()); - } - + SkipLazyFunctionBody(function_name, &materialized_literal_count, + &expected_property_count, CHECK_OK); + } else { + body = ParseEagerFunctionBody(function_name, pos, fvar, fvar_init_op, + is_generator, CHECK_OK); materialized_literal_count = function_state.materialized_literal_count(); expected_property_count = function_state.expected_property_count(); handler_count = function_state.handler_count(); - - Expect(Token::RBRACE, CHECK_OK); - scope->set_end_position(scanner()->location().end_pos); } - // Validate strict mode. We can do this only after parsing the function, - // since the function can declare itself strict. + // Validate strict mode. if (strict_mode() == STRICT) { - if (IsEvalOrArguments(function_name)) { - ReportMessageAt(function_name_location, "strict_eval_arguments"); - *ok = false; - return NULL; - } - if (name_is_strict_reserved) { - ReportMessageAt(function_name_location, "unexpected_strict_reserved"); - *ok = false; - return NULL; - } - if (eval_args_error_log.IsValid()) { - ReportMessageAt(eval_args_error_log, "strict_eval_arguments"); - *ok = false; - return NULL; - } - if (dupe_error_loc.IsValid()) { - ReportMessageAt(dupe_error_loc, "strict_param_dupe"); - *ok = false; - return NULL; - } - if (reserved_loc.IsValid()) { - ReportMessageAt(reserved_loc, "unexpected_strict_reserved"); - *ok = false; - return NULL; - } + CheckStrictFunctionNameAndParameters(function_name, + name_is_strict_reserved, + function_name_location, + eval_args_error_log, + dupe_error_loc, + reserved_loc, + CHECK_OK); CheckOctalLiteral(scope->start_position(), scope->end_position(), CHECK_OK); } ast_properties = *factory()->visitor()->ast_properties(); - slot_processor = factory()->visitor()->slot_processor(); dont_optimize_reason = factory()->visitor()->dont_optimize_reason(); - } - if (allow_harmony_scoping() && strict_mode() == STRICT) { - CheckConflictingVarDeclarations(scope, CHECK_OK); + if (allow_harmony_scoping() && strict_mode() == STRICT) { + CheckConflictingVarDeclarations(scope, CHECK_OK); + } } - FunctionLiteral* function_literal = - factory()->NewFunctionLiteral(function_name, - scope, - body, - materialized_literal_count, - expected_property_count, - handler_count, - num_parameters, - duplicate_parameters, - function_type, - FunctionLiteral::kIsFunction, - parenthesized, - generator, - pos); + FunctionLiteral::KindFlag kind = is_generator + ? FunctionLiteral::kGeneratorFunction + : FunctionLiteral::kNormalFunction; + FunctionLiteral* function_literal = factory()->NewFunctionLiteral( + function_name, ast_value_factory_, scope, body, + materialized_literal_count, expected_property_count, handler_count, + num_parameters, duplicate_parameters, function_type, + FunctionLiteral::kIsFunction, parenthesized, kind, pos); function_literal->set_function_token_position(function_token_pos); function_literal->set_ast_properties(&ast_properties); - function_literal->set_slot_processor(slot_processor); function_literal->set_dont_optimize_reason(dont_optimize_reason); if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal); @@ -3532,10 +3599,137 @@ } -PreParser::PreParseResult Parser::LazyParseFunctionLiteral( +void Parser::SkipLazyFunctionBody(const AstRawString* function_name, + int* materialized_literal_count, + int* expected_property_count, + bool* ok) { + int function_block_pos = position(); + if (compile_options() == ScriptCompiler::kConsumeParserCache) { + // If we have cached data, we use it to skip parsing the function body. The + // data contains the information we need to construct the lazy function. + FunctionEntry entry = + cached_parse_data_->GetFunctionEntry(function_block_pos); + // Check that cached data is valid. + CHECK(entry.is_valid()); + // End position greater than end of stream is safe, and hard to check. + CHECK(entry.end_pos() > function_block_pos); + scanner()->SeekForward(entry.end_pos() - 1); + + scope_->set_end_position(entry.end_pos()); + Expect(Token::RBRACE, ok); + if (!*ok) { + return; + } + isolate()->counters()->total_preparse_skipped()->Increment( + scope_->end_position() - function_block_pos); + *materialized_literal_count = entry.literal_count(); + *expected_property_count = entry.property_count(); + scope_->SetStrictMode(entry.strict_mode()); + } else { + // With no cached data, we partially parse the function, without building an + // AST. This gathers the data needed to build a lazy function. + SingletonLogger logger; + PreParser::PreParseResult result = + ParseLazyFunctionBodyWithPreParser(&logger); + if (result == PreParser::kPreParseStackOverflow) { + // Propagate stack overflow. + set_stack_overflow(); + *ok = false; + return; + } + if (logger.has_error()) { + ParserTraits::ReportMessageAt( + Scanner::Location(logger.start(), logger.end()), + logger.message(), logger.argument_opt(), logger.is_reference_error()); + *ok = false; + return; + } + scope_->set_end_position(logger.end()); + Expect(Token::RBRACE, ok); + if (!*ok) { + return; + } + isolate()->counters()->total_preparse_skipped()->Increment( + scope_->end_position() - function_block_pos); + *materialized_literal_count = logger.literals(); + *expected_property_count = logger.properties(); + scope_->SetStrictMode(logger.strict_mode()); + if (compile_options() == ScriptCompiler::kProduceParserCache) { + DCHECK(log_); + // Position right after terminal '}'. + int body_end = scanner()->location().end_pos; + log_->LogFunction(function_block_pos, body_end, + *materialized_literal_count, + *expected_property_count, + scope_->strict_mode()); + } + } +} + + +ZoneList<Statement*>* Parser::ParseEagerFunctionBody( + const AstRawString* function_name, int pos, Variable* fvar, + Token::Value fvar_init_op, bool is_generator, bool* ok) { + // Everything inside an eagerly parsed function will be parsed eagerly + // (see comment above). + ParsingModeScope parsing_mode(this, PARSE_EAGERLY); + ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8, zone()); + if (fvar != NULL) { + VariableProxy* fproxy = scope_->NewUnresolved( + factory(), function_name, Interface::NewConst()); + fproxy->BindTo(fvar); + body->Add(factory()->NewExpressionStatement( + factory()->NewAssignment(fvar_init_op, + fproxy, + factory()->NewThisFunction(pos), + RelocInfo::kNoPosition), + RelocInfo::kNoPosition), zone()); + } + + // For generators, allocate and yield an iterator on function entry. + if (is_generator) { + ZoneList<Expression*>* arguments = + new(zone()) ZoneList<Expression*>(0, zone()); + CallRuntime* allocation = factory()->NewCallRuntime( + ast_value_factory_->empty_string(), + Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject), + arguments, pos); + VariableProxy* init_proxy = factory()->NewVariableProxy( + function_state_->generator_object_variable()); + Assignment* assignment = factory()->NewAssignment( + Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition); + VariableProxy* get_proxy = factory()->NewVariableProxy( + function_state_->generator_object_variable()); + Yield* yield = factory()->NewYield( + get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition); + body->Add(factory()->NewExpressionStatement( + yield, RelocInfo::kNoPosition), zone()); + } + + ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK); + + if (is_generator) { + VariableProxy* get_proxy = factory()->NewVariableProxy( + function_state_->generator_object_variable()); + Expression* undefined = + factory()->NewUndefinedLiteral(RelocInfo::kNoPosition); + Yield* yield = factory()->NewYield(get_proxy, undefined, Yield::FINAL, + RelocInfo::kNoPosition); + body->Add(factory()->NewExpressionStatement( + yield, RelocInfo::kNoPosition), zone()); + } + + Expect(Token::RBRACE, CHECK_OK); + scope_->set_end_position(scanner()->location().end_pos); + + return body; +} + + +PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser( SingletonLogger* logger) { HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse()); - ASSERT_EQ(Token::LBRACE, scanner()->current_token()); + DCHECK_EQ(Token::LBRACE, scanner()->current_token()); if (reusable_preparser_ == NULL) { intptr_t stack_limit = isolate()->stack_guard()->real_climit(); @@ -3545,7 +3739,7 @@ reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax()); reusable_preparser_->set_allow_lazy(true); reusable_preparser_->set_allow_generators(allow_generators()); - reusable_preparser_->set_allow_for_of(allow_for_of()); + reusable_preparser_->set_allow_arrow_functions(allow_arrow_functions()); reusable_preparser_->set_allow_harmony_numeric_literals( allow_harmony_numeric_literals()); } @@ -3564,7 +3758,7 @@ int pos = peek_position(); Expect(Token::MOD, CHECK_OK); // Allow "eval" or "arguments" for backward compatibility. - Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK); + const AstRawString* name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK); ZoneList<Expression*>* args = ParseArguments(CHECK_OK); if (extension_ != NULL) { @@ -3573,7 +3767,7 @@ scope_->DeclarationScope()->ForceEagerCompilation(); } - const Runtime::Function* function = Runtime::FunctionForName(name); + const Runtime::Function* function = Runtime::FunctionForName(name->string()); // Check for built-in IS_VAR macro. if (function != NULL && @@ -3585,7 +3779,7 @@ if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) { return args->at(0); } else { - ReportMessage("not_isvar", Vector<const char*>::empty()); + ReportMessage("not_isvar"); *ok = false; return NULL; } @@ -3595,15 +3789,14 @@ if (function != NULL && function->nargs != -1 && function->nargs != args->length()) { - ReportMessage("illegal_access", Vector<const char*>::empty()); + ReportMessage("illegal_access"); *ok = false; return NULL; } // Check that the function is defined if it's an inline runtime call. - if (function == NULL && name->Get(0) == '_') { - ParserTraits::ReportMessage("not_defined", - Vector<Handle<String> >(&name, 1)); + if (function == NULL && name->FirstCharacter() == '_') { + ParserTraits::ReportMessage("not_defined", name); *ok = false; return NULL; } @@ -3614,8 +3807,7 @@ Literal* Parser::GetLiteralUndefined(int position) { - return factory()->NewLiteral( - isolate()->factory()->undefined_value(), position); + return factory()->NewUndefinedLiteral(position); } @@ -3624,15 +3816,12 @@ if (decl != NULL) { // In harmony mode we treat conflicting variable bindinds as early // errors. See ES5 16 for a definition of early errors. - Handle<String> name = decl->proxy()->name(); - SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS); - const char* elms[2] = { "Variable", c_string.get() }; - Vector<const char*> args(elms, 2); + const AstRawString* name = decl->proxy()->raw_name(); int position = decl->proxy()->position(); Scanner::Location location = position == RelocInfo::kNoPosition ? Scanner::Location::invalid() : Scanner::Location(position, position + 1); - ParserTraits::ReportMessageAt(location, "redeclaration", args); + ParserTraits::ReportMessageAt(location, "var_redeclaration", name); *ok = false; } } @@ -3642,7 +3831,7 @@ // Parser support -bool Parser::TargetStackContainsLabel(Handle<String> label) { +bool Parser::TargetStackContainsLabel(const AstRawString* label) { for (Target* t = target_stack_; t != NULL; t = t->previous()) { BreakableStatement* stat = t->node()->AsBreakableStatement(); if (stat != NULL && ContainsLabel(stat->labels(), label)) @@ -3652,8 +3841,9 @@ } -BreakableStatement* Parser::LookupBreakTarget(Handle<String> label, bool* ok) { - bool anonymous = label.is_null(); +BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label, + bool* ok) { + bool anonymous = label == NULL; for (Target* t = target_stack_; t != NULL; t = t->previous()) { BreakableStatement* stat = t->node()->AsBreakableStatement(); if (stat == NULL) continue; @@ -3667,14 +3857,14 @@ } -IterationStatement* Parser::LookupContinueTarget(Handle<String> label, +IterationStatement* Parser::LookupContinueTarget(const AstRawString* label, bool* ok) { - bool anonymous = label.is_null(); + bool anonymous = label == NULL; for (Target* t = target_stack_; t != NULL; t = t->previous()) { IterationStatement* stat = t->node()->AsIterationStatement(); if (stat == NULL) continue; - ASSERT(stat->is_target_for_anonymous()); + DCHECK(stat->is_target_for_anonymous()); if (anonymous || ContainsLabel(stat->labels(), label)) { RegisterTargetUse(stat->continue_target(), t->previous()); return stat; @@ -3695,55 +3885,56 @@ } -Expression* Parser::NewThrowReferenceError(Handle<String> message) { - return NewThrowError(isolate()->factory()->MakeReferenceError_string(), - message, HandleVector<Object>(NULL, 0)); -} - - -Expression* Parser::NewThrowSyntaxError(Handle<String> message, - Handle<Object> first) { - int argc = first.is_null() ? 0 : 1; - Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc); - return NewThrowError( - isolate()->factory()->MakeSyntaxError_string(), message, arguments); +void Parser::HandleSourceURLComments() { + if (scanner_.source_url()->length() > 0) { + Handle<String> source_url = scanner_.source_url()->Internalize(isolate()); + info_->script()->set_source_url(*source_url); + } + if (scanner_.source_mapping_url()->length() > 0) { + Handle<String> source_mapping_url = + scanner_.source_mapping_url()->Internalize(isolate()); + info_->script()->set_source_mapping_url(*source_mapping_url); + } } -Expression* Parser::NewThrowTypeError(Handle<String> message, - Handle<Object> first, - Handle<Object> second) { - ASSERT(!first.is_null() && !second.is_null()); - Handle<Object> elements[] = { first, second }; - Vector< Handle<Object> > arguments = - HandleVector<Object>(elements, ARRAY_SIZE(elements)); - return NewThrowError( - isolate()->factory()->MakeTypeError_string(), message, arguments); +void Parser::ThrowPendingError() { + DCHECK(ast_value_factory_->IsInternalized()); + if (has_pending_error_) { + MessageLocation location(script_, + pending_error_location_.beg_pos, + pending_error_location_.end_pos); + Factory* factory = isolate()->factory(); + bool has_arg = + pending_error_arg_ != NULL || pending_error_char_arg_ != NULL; + Handle<FixedArray> elements = factory->NewFixedArray(has_arg ? 1 : 0); + if (pending_error_arg_ != NULL) { + Handle<String> arg_string = pending_error_arg_->string(); + elements->set(0, *arg_string); + } else if (pending_error_char_arg_ != NULL) { + Handle<String> arg_string = + factory->NewStringFromUtf8(CStrVector(pending_error_char_arg_)) + .ToHandleChecked(); + elements->set(0, *arg_string); + } + isolate()->debug()->OnCompileError(script_); + + Handle<JSArray> array = factory->NewJSArrayWithElements(elements); + Handle<Object> result = pending_error_is_reference_error_ + ? factory->NewReferenceError(pending_error_message_, array) + : factory->NewSyntaxError(pending_error_message_, array); + isolate()->Throw(*result, &location); + } } -Expression* Parser::NewThrowError(Handle<String> constructor, - Handle<String> message, - Vector< Handle<Object> > arguments) { - int argc = arguments.length(); - Handle<FixedArray> elements = isolate()->factory()->NewFixedArray(argc, - TENURED); - for (int i = 0; i < argc; i++) { - Handle<Object> element = arguments[i]; - if (!element.is_null()) { - elements->set(i, *element); +void Parser::InternalizeUseCounts() { + for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount; + ++feature) { + for (int i = 0; i < use_counts_[feature]; ++i) { + isolate()->CountUsage(v8::Isolate::UseCounterFeature(feature)); } } - Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements( - elements, FAST_ELEMENTS, TENURED); - - int pos = position(); - ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2, zone()); - args->Add(factory()->NewLiteral(message, pos), zone()); - args->Add(factory()->NewLiteral(array, pos), zone()); - CallRuntime* call_constructor = - factory()->NewCallRuntime(constructor, NULL, args, pos); - return factory()->NewThrow(call_constructor, pos); } @@ -3820,8 +4011,7 @@ RegExpTree* RegExpParser::ReportError(Vector<const char> message) { failed_ = true; - *error_ = isolate()->factory()->NewStringFromAscii(message, NOT_TENURED); - ASSERT(!error_->is_null()); + *error_ = isolate()->factory()->NewStringFromAscii(message).ToHandleChecked(); // Zip to the end to make sure the no more input is read. current_ = kEndMarker; next_pos_ = in()->length(); @@ -3833,7 +4023,7 @@ // Disjunction RegExpTree* RegExpParser::ParsePattern() { RegExpTree* result = ParseDisjunction(CHECK_FAILED); - ASSERT(!has_more()); + DCHECK(!has_more()); // If the result of parsing is a literal string atom, and it has the // same length as the input, then the atom is identical to the input. if (result->IsAtom() && result->AsAtom()->length() == in()->length()) { @@ -3866,14 +4056,14 @@ // Inside a parenthesized group when hitting end of input. ReportError(CStrVector("Unterminated group") CHECK_FAILED); } - ASSERT_EQ(INITIAL, stored_state->group_type()); + DCHECK_EQ(INITIAL, stored_state->group_type()); // Parsing completed successfully. return builder->ToRegExp(); case ')': { if (!stored_state->IsSubexpression()) { ReportError(CStrVector("Unmatched ')'") CHECK_FAILED); } - ASSERT_NE(INITIAL, stored_state->group_type()); + DCHECK_NE(INITIAL, stored_state->group_type()); Advance(); // End disjunction parsing and convert builder content to new single @@ -3895,7 +4085,7 @@ captures_->at(capture_index - 1) = capture; body = capture; } else if (group_type != GROUPING) { - ASSERT(group_type == POSITIVE_LOOKAHEAD || + DCHECK(group_type == POSITIVE_LOOKAHEAD || group_type == NEGATIVE_LOOKAHEAD); bool is_positive = (group_type == POSITIVE_LOOKAHEAD); body = new(zone()) RegExpLookahead(body, @@ -4179,7 +4369,7 @@ #ifdef DEBUG -// Currently only used in an ASSERT. +// Currently only used in an DCHECK. static bool IsSpecialClassEscape(uc32 c) { switch (c) { case 'd': case 'D': @@ -4233,8 +4423,8 @@ bool RegExpParser::ParseBackReferenceIndex(int* index_out) { - ASSERT_EQ('\\', current()); - ASSERT('1' <= Next() && Next() <= '9'); + DCHECK_EQ('\\', current()); + DCHECK('1' <= Next() && Next() <= '9'); // Try to parse a decimal literal that is no greater than the total number // of left capturing parentheses in the input. int start = position(); @@ -4277,7 +4467,7 @@ // Returns true if parsing succeeds, and set the min_out and max_out // values. Values are truncated to RegExpTree::kInfinity if they overflow. bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) { - ASSERT_EQ(current(), '{'); + DCHECK_EQ(current(), '{'); int start = position(); Advance(); int min = 0; @@ -4337,7 +4527,7 @@ uc32 RegExpParser::ParseOctalLiteral() { - ASSERT(('0' <= current() && current() <= '7') || current() == kEndMarker); + DCHECK(('0' <= current() && current() <= '7') || current() == kEndMarker); // For compatibility with some other browsers (not all), we parse // up to three octal digits with a value below 256. uc32 value = current() - '0'; @@ -4377,8 +4567,8 @@ uc32 RegExpParser::ParseClassCharacterEscape() { - ASSERT(current() == '\\'); - ASSERT(has_next() && !IsSpecialClassEscape(Next())); + DCHECK(current() == '\\'); + DCHECK(has_next() && !IsSpecialClassEscape(Next())); Advance(); switch (current()) { case 'b': @@ -4458,7 +4648,7 @@ CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) { - ASSERT_EQ(0, *char_class); + DCHECK_EQ(0, *char_class); uc32 first = current(); if (first == '\\') { switch (Next()) { @@ -4501,7 +4691,7 @@ static const char* kUnterminated = "Unterminated character class"; static const char* kRangeOutOfOrder = "Range out of order in character class"; - ASSERT_EQ(current(), '['); + DCHECK_EQ(current(), '['); Advance(); bool is_negated = false; if (current() == '^') { @@ -4556,110 +4746,19 @@ // ---------------------------------------------------------------------------- // The Parser interface. -ScriptDataImpl::~ScriptDataImpl() { - if (owns_store_) store_.Dispose(); -} - - -int ScriptDataImpl::Length() { - return store_.length() * sizeof(unsigned); -} - - -const char* ScriptDataImpl::Data() { - return reinterpret_cast<const char*>(store_.start()); -} - - -bool ScriptDataImpl::HasError() { - return has_error(); -} - - -void ScriptDataImpl::Initialize() { - // Prepares state for use. - if (store_.length() >= PreparseDataConstants::kHeaderSize) { - function_index_ = PreparseDataConstants::kHeaderSize; - int symbol_data_offset = PreparseDataConstants::kHeaderSize - + store_[PreparseDataConstants::kFunctionsSizeOffset]; - if (store_.length() > symbol_data_offset) { - symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]); - } else { - // Partial preparse causes no symbol information. - symbol_data_ = reinterpret_cast<byte*>(&store_[0] + store_.length()); - } - symbol_data_end_ = reinterpret_cast<byte*>(&store_[0] + store_.length()); - } -} - - -int ScriptDataImpl::ReadNumber(byte** source) { - // Reads a number from symbol_data_ in base 128. The most significant - // bit marks that there are more digits. - // If the first byte is 0x80 (kNumberTerminator), it would normally - // represent a leading zero. Since that is useless, and therefore won't - // appear as the first digit of any actual value, it is used to - // mark the end of the input stream. - byte* data = *source; - if (data >= symbol_data_end_) return -1; - byte input = *data; - if (input == PreparseDataConstants::kNumberTerminator) { - // End of stream marker. - return -1; - } - int result = input & 0x7f; - data++; - while ((input & 0x80u) != 0) { - if (data >= symbol_data_end_) return -1; - input = *data; - result = (result << 7) | (input & 0x7f); - data++; - } - *source = data; - return result; -} - - -// Create a Scanner for the preparser to use as input, and preparse the source. -ScriptDataImpl* PreParserApi::PreParse(Isolate* isolate, - Utf16CharacterStream* source) { - CompleteParserRecorder recorder; - HistogramTimerScope timer(isolate->counters()->pre_parse()); - Scanner scanner(isolate->unicode_cache()); - intptr_t stack_limit = isolate->stack_guard()->real_climit(); - PreParser preparser(&scanner, &recorder, stack_limit); - preparser.set_allow_lazy(true); - preparser.set_allow_generators(FLAG_harmony_generators); - preparser.set_allow_for_of(FLAG_harmony_iteration); - preparser.set_allow_harmony_scoping(FLAG_harmony_scoping); - preparser.set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals); - scanner.Initialize(source); - PreParser::PreParseResult result = preparser.PreParseProgram(); - if (result == PreParser::kPreParseStackOverflow) { - isolate->StackOverflow(); - return NULL; - } - - // Extract the accumulated data from the recorder as a single - // contiguous vector that we are responsible for disposing. - Vector<unsigned> store = recorder.ExtractData(); - return new ScriptDataImpl(store); -} - - bool RegExpParser::ParseRegExp(FlatStringReader* input, bool multiline, RegExpCompileData* result, Zone* zone) { - ASSERT(result != NULL); + DCHECK(result != NULL); RegExpParser parser(input, &result->error, multiline, zone); RegExpTree* tree = parser.ParsePattern(); if (parser.failed()) { - ASSERT(tree == NULL); - ASSERT(!result->error.is_null()); + DCHECK(tree == NULL); + DCHECK(!result->error.is_null()); } else { - ASSERT(tree != NULL); - ASSERT(result->error.is_null()); + DCHECK(tree != NULL); + DCHECK(result->error.is_null()); result->tree = tree; int capture_count = parser.captures_started(); result->simple = tree->IsAtom() && parser.simple() && capture_count == 0; @@ -4671,35 +4770,41 @@ bool Parser::Parse() { - ASSERT(info()->function() == NULL); + DCHECK(info()->function() == NULL); FunctionLiteral* result = NULL; + ast_value_factory_ = info()->ast_value_factory(); + if (ast_value_factory_ == NULL) { + ast_value_factory_ = + new AstValueFactory(zone(), isolate()->heap()->HashSeed()); + } + if (allow_natives_syntax() || extension_ != NULL) { + // If intrinsics are allowed, the Parser cannot operate independent of the + // V8 heap because of Rumtime. Tell the string table to internalize strings + // and values right after they're created. + ast_value_factory_->Internalize(isolate()); + } + if (info()->is_lazy()) { - ASSERT(!info()->is_eval()); + DCHECK(!info()->is_eval()); if (info()->shared_info()->is_function()) { result = ParseLazy(); } else { result = ParseProgram(); } } else { - SetCachedData(info()->cached_data(), info()->cached_data_mode()); - if (info()->cached_data_mode() == CONSUME_CACHED_DATA && - (*info()->cached_data())->has_error()) { - ScriptDataImpl* cached_data = *(info()->cached_data()); - Scanner::Location loc = cached_data->MessageLocation(); - const char* message = cached_data->BuildMessage(); - Vector<const char*> args = cached_data->BuildArgs(); - ParserTraits::ReportMessageAt(loc, message, args); - DeleteArray(message); - for (int i = 0; i < args.length(); i++) { - DeleteArray(args[i]); - } - DeleteArray(args.start()); - ASSERT(info()->isolate()->has_pending_exception()); - } else { - result = ParseProgram(); - } + SetCachedData(); + result = ParseProgram(); } info()->SetFunction(result); + DCHECK(ast_value_factory_->IsInternalized()); + // info takes ownership of ast_value_factory_. + if (info()->ast_value_factory() == NULL) { + info()->SetAstValueFactory(ast_value_factory_); + } + ast_value_factory_ = NULL; + + InternalizeUseCounts(); + return (result != NULL); } diff -Nru nodejs-0.11.13/deps/v8/src/parser.h nodejs-0.11.15/deps/v8/src/parser.h --- nodejs-0.11.13/deps/v8/src/parser.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/parser.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_PARSER_H_ #define V8_PARSER_H_ -#include "allocation.h" -#include "ast.h" -#include "compiler.h" // For CachedDataMode -#include "preparse-data-format.h" -#include "preparse-data.h" -#include "scopes.h" -#include "preparser.h" +#include "src/allocation.h" +#include "src/ast.h" +#include "src/compiler.h" // For CachedDataMode +#include "src/preparse-data.h" +#include "src/preparse-data-format.h" +#include "src/preparser.h" +#include "src/scopes.h" namespace v8 { class ScriptCompiler; @@ -70,7 +47,7 @@ int literal_count() { return backing_[kLiteralCountIndex]; } int property_count() { return backing_[kPropertyCountIndex]; } StrictMode strict_mode() { - ASSERT(backing_[kStrictModeIndex] == SLOPPY || + DCHECK(backing_[kStrictModeIndex] == SLOPPY || backing_[kStrictModeIndex] == STRICT); return static_cast<StrictMode>(backing_[kStrictModeIndex]); } @@ -82,83 +59,39 @@ }; -class ScriptDataImpl : public ScriptData { +// Wrapper around ScriptData to provide parser-specific functionality. +class ParseData { public: - explicit ScriptDataImpl(Vector<unsigned> store) - : store_(store), - owns_store_(true) { } - - // Create an empty ScriptDataImpl that is guaranteed to not satisfy - // a SanityCheck. - ScriptDataImpl() : owns_store_(false) { } - - virtual ~ScriptDataImpl(); - virtual int Length(); - virtual const char* Data(); - virtual bool HasError(); - + explicit ParseData(ScriptData* script_data) : script_data_(script_data) { + CHECK(IsAligned(script_data->length(), sizeof(unsigned))); + CHECK(IsSane()); + } void Initialize(); - void ReadNextSymbolPosition(); - FunctionEntry GetFunctionEntry(int start); - int GetSymbolIdentifier(); - bool SanityCheck(); + int FunctionCount(); - Scanner::Location MessageLocation(); - const char* BuildMessage(); - Vector<const char*> BuildArgs(); - - int symbol_count() { - return (store_.length() > PreparseDataConstants::kHeaderSize) - ? store_[PreparseDataConstants::kSymbolCountOffset] - : 0; - } - // The following functions should only be called if SanityCheck has - // returned true. - bool has_error() { return store_[PreparseDataConstants::kHasErrorOffset]; } - unsigned magic() { return store_[PreparseDataConstants::kMagicOffset]; } - unsigned version() { return store_[PreparseDataConstants::kVersionOffset]; } + bool HasError(); - private: - friend class v8::ScriptCompiler; - Vector<unsigned> store_; - unsigned char* symbol_data_; - unsigned char* symbol_data_end_; - int function_index_; - bool owns_store_; - - unsigned Read(int position); - unsigned* ReadAddress(int position); - // Reads a number from the current symbols - int ReadNumber(byte** source); - - ScriptDataImpl(const char* backing_store, int length) - : store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)), - length / static_cast<int>(sizeof(unsigned))), - owns_store_(false) { - ASSERT_EQ(0, static_cast<int>( - reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned))); + unsigned* Data() { // Writable data as unsigned int array. + return reinterpret_cast<unsigned*>(const_cast<byte*>(script_data_->data())); } - // Read strings written by ParserRecorder::WriteString. - static const char* ReadString(unsigned* start, int* chars); - - friend class ScriptData; -}; + private: + bool IsSane(); + unsigned Magic(); + unsigned Version(); + int FunctionsSize(); + int Length() const { + // Script data length is already checked to be a multiple of unsigned size. + return script_data_->length() / sizeof(unsigned); + } + ScriptData* script_data_; + int function_index_; -class PreParserApi { - public: - // Pre-parse a character stream and return full preparse data. - // - // This interface is here instead of in preparser.h because it instantiates a - // preparser recorder object that is suited to the parser's purposes. Also, - // the preparser doesn't know about ScriptDataImpl. - static ScriptDataImpl* PreParse(Isolate* isolate, - Utf16CharacterStream* source); + DISALLOW_COPY_AND_ASSIGN(ParseData); }; - // ---------------------------------------------------------------------------- // REGEXP PARSING @@ -187,12 +120,12 @@ } T* last() { - ASSERT(last_ != NULL); + DCHECK(last_ != NULL); return last_; } T* RemoveLast() { - ASSERT(last_ != NULL); + DCHECK(last_ != NULL); T* result = last_; if ((list_ != NULL) && (list_->length() > 0)) last_ = list_->RemoveLast(); @@ -202,13 +135,13 @@ } T* Get(int i) { - ASSERT((0 <= i) && (i < length())); + DCHECK((0 <= i) && (i < length())); if (list_ == NULL) { - ASSERT_EQ(0, i); + DCHECK_EQ(0, i); return last_; } else { if (i == list_->length()) { - ASSERT(last_ != NULL); + DCHECK(last_ != NULL); return last_; } else { return list_->at(i); @@ -418,11 +351,30 @@ // Used by FunctionState and BlockState. typedef v8::internal::Scope Scope; + typedef v8::internal::Scope* ScopePtr; typedef Variable GeneratorVariable; typedef v8::internal::Zone Zone; + class Checkpoint BASE_EMBEDDED { + public: + template <typename Parser> + explicit Checkpoint(Parser* parser) { + isolate_ = parser->zone()->isolate(); + saved_ast_node_id_ = isolate_->ast_node_id(); + } + + void Restore() { isolate_->set_ast_node_id(saved_ast_node_id_); } + + private: + Isolate* isolate_; + int saved_ast_node_id_; + }; + + typedef v8::internal::AstProperties AstProperties; + typedef Vector<VariableProxy*> ParameterIdentifierVector; + // Return types for traversing functions. - typedef Handle<String> Identifier; + typedef const AstRawString* Identifier; typedef v8::internal::Expression* Expression; typedef Yield* YieldExpression; typedef v8::internal::FunctionLiteral* FunctionLiteral; @@ -430,6 +382,7 @@ typedef ObjectLiteral::Property* ObjectLiteralProperty; typedef ZoneList<v8::internal::Expression*>* ExpressionList; typedef ZoneList<ObjectLiteral::Property*>* PropertyList; + typedef ZoneList<v8::internal::Statement*>* StatementList; // For constructing objects returned by the traversing functions. typedef AstNodeFactory<AstConstructionVisitor> Factory; @@ -441,41 +394,49 @@ template<typename FunctionState> static void SetUpFunctionState(FunctionState* function_state, Zone* zone) { Isolate* isolate = zone->isolate(); - function_state->isolate_ = isolate; function_state->saved_ast_node_id_ = isolate->ast_node_id(); isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt()); } template<typename FunctionState> - static void TearDownFunctionState(FunctionState* function_state) { + static void TearDownFunctionState(FunctionState* function_state, Zone* zone) { if (function_state->outer_function_state_ != NULL) { - function_state->isolate_->set_ast_node_id( - function_state->saved_ast_node_id_); + zone->isolate()->set_ast_node_id(function_state->saved_ast_node_id_); } } // Helper functions for recursive descent. - bool IsEvalOrArguments(Handle<String> identifier) const; + bool IsEvalOrArguments(const AstRawString* identifier) const; + V8_INLINE bool IsFutureStrictReserved(const AstRawString* identifier) const; // Returns true if the expression is of type "this.foo". static bool IsThisProperty(Expression* expression); static bool IsIdentifier(Expression* expression); + static const AstRawString* AsIdentifier(Expression* expression) { + DCHECK(IsIdentifier(expression)); + return expression->AsVariableProxy()->raw_name(); + } + static bool IsBoilerplateProperty(ObjectLiteral::Property* property) { return ObjectLiteral::IsBoilerplateProperty(property); } - static bool IsArrayIndex(Handle<String> string, uint32_t* index) { - return !string.is_null() && string->AsArrayIndex(index); + static bool IsArrayIndex(const AstRawString* string, uint32_t* index) { + return string->AsArrayIndex(index); } // Functions for encapsulating the differences between parsing and preparsing; // operations interleaved with the recursive descent. - static void PushLiteralName(FuncNameInferrer* fni, Handle<String> id) { + static void PushLiteralName(FuncNameInferrer* fni, const AstRawString* id) { fni->PushLiteralName(id); } void PushPropertyName(FuncNameInferrer* fni, Expression* expression); + static void InferFunctionName(FuncNameInferrer* fni, + FunctionLiteral* func_to_infer) { + fni->AddFunction(func_to_infer); + } static void CheckFunctionLiteralInsideTopLevelObjectLiteral( Scope* scope, Expression* value, bool* has_function) { @@ -497,13 +458,8 @@ void CheckPossibleEvalCall(Expression* expression, Scope* scope); // Determine if the expression is a variable proxy and mark it as being used - // in an assignment or with a increment/decrement operator. This is currently - // used on for the statically checking assignments to harmony const bindings. - static Expression* MarkExpressionAsLValue(Expression* expression); - - // Checks LHS expression for assignment and prefix/postfix increment/decrement - // in strict mode. - void CheckStrictModeLValue(Expression* expression, bool* ok); + // in an assignment or with a increment/decrement operator. + static Expression* MarkExpressionAsAssigned(Expression* expression); // Returns true if we have a binary expression between two numeric // literals. In that case, *x will be changed to an expression which is the @@ -527,70 +483,119 @@ Expression* expression, Token::Value op, int pos, AstNodeFactory<AstConstructionVisitor>* factory); + // Generate AST node that throws a ReferenceError with the given type. + Expression* NewThrowReferenceError(const char* type, int pos); + + // Generate AST node that throws a SyntaxError with the given + // type. The first argument may be null (in the handle sense) in + // which case no arguments are passed to the constructor. + Expression* NewThrowSyntaxError( + const char* type, const AstRawString* arg, int pos); + + // Generate AST node that throws a TypeError with the given + // type. Both arguments must be non-null (in the handle sense). + Expression* NewThrowTypeError(const char* type, const AstRawString* arg, + int pos); + + // Generic AST generator for throwing errors from compiled code. + Expression* NewThrowError( + const AstRawString* constructor, const char* type, + const AstRawString* arg, int pos); + // Reporting errors. void ReportMessageAt(Scanner::Location source_location, const char* message, - Vector<const char*> args, + const char* arg = NULL, bool is_reference_error = false); void ReportMessage(const char* message, - Vector<Handle<String> > args, + const char* arg = NULL, + bool is_reference_error = false); + void ReportMessage(const char* message, + const AstRawString* arg, bool is_reference_error = false); void ReportMessageAt(Scanner::Location source_location, const char* message, - Vector<Handle<String> > args, + const AstRawString* arg, bool is_reference_error = false); // "null" return type creators. - static Handle<String> EmptyIdentifier() { - return Handle<String>(); + static const AstRawString* EmptyIdentifier() { + return NULL; } static Expression* EmptyExpression() { return NULL; } + static Expression* EmptyArrowParamList() { return NULL; } static Literal* EmptyLiteral() { return NULL; } + // Used in error return values. static ZoneList<Expression*>* NullExpressionList() { return NULL; } + // Non-NULL empty string. + V8_INLINE const AstRawString* EmptyIdentifierString(); + // Odd-ball literal creators. Literal* GetLiteralTheHole(int position, AstNodeFactory<AstConstructionVisitor>* factory); // Producing data during the recursive descent. - Handle<String> GetSymbol(Scanner* scanner = NULL); - Handle<String> NextLiteralString(Scanner* scanner, - PretenureFlag tenured); + const AstRawString* GetSymbol(Scanner* scanner); + const AstRawString* GetNextSymbol(Scanner* scanner); + Expression* ThisExpression(Scope* scope, - AstNodeFactory<AstConstructionVisitor>* factory); + AstNodeFactory<AstConstructionVisitor>* factory, + int pos = RelocInfo::kNoPosition); Literal* ExpressionFromLiteral( Token::Value token, int pos, Scanner* scanner, AstNodeFactory<AstConstructionVisitor>* factory); Expression* ExpressionFromIdentifier( - Handle<String> name, int pos, Scope* scope, + const AstRawString* name, int pos, Scope* scope, AstNodeFactory<AstConstructionVisitor>* factory); Expression* ExpressionFromString( int pos, Scanner* scanner, AstNodeFactory<AstConstructionVisitor>* factory); + Expression* GetIterator(Expression* iterable, + AstNodeFactory<AstConstructionVisitor>* factory); ZoneList<v8::internal::Expression*>* NewExpressionList(int size, Zone* zone) { return new(zone) ZoneList<v8::internal::Expression*>(size, zone); } ZoneList<ObjectLiteral::Property*>* NewPropertyList(int size, Zone* zone) { return new(zone) ZoneList<ObjectLiteral::Property*>(size, zone); } + ZoneList<v8::internal::Statement*>* NewStatementList(int size, Zone* zone) { + return new(zone) ZoneList<v8::internal::Statement*>(size, zone); + } + V8_INLINE Scope* NewScope(Scope* parent_scope, ScopeType scope_type); + + // Utility functions + int DeclareArrowParametersFromExpression(Expression* expression, Scope* scope, + Scanner::Location* dupe_loc, + bool* ok); + V8_INLINE AstValueFactory* ast_value_factory(); // Temporary glue; these functions will move to ParserBase. Expression* ParseV8Intrinsic(bool* ok); FunctionLiteral* ParseFunctionLiteral( - Handle<String> name, + const AstRawString* name, Scanner::Location function_name_location, bool name_is_strict_reserved, bool is_generator, int function_token_position, FunctionLiteral::FunctionType type, + FunctionLiteral::ArityRestriction arity_restriction, bool* ok); + V8_INLINE void SkipLazyFunctionBody(const AstRawString* name, + int* materialized_literal_count, + int* expected_property_count, bool* ok); + V8_INLINE ZoneList<Statement*>* ParseEagerFunctionBody( + const AstRawString* name, int pos, Variable* fvar, + Token::Value fvar_init_op, bool is_generator, bool* ok); + V8_INLINE void CheckConflictingVarDeclarations(v8::internal::Scope* scope, + bool* ok); private: Parser* parser_; @@ -603,6 +608,8 @@ ~Parser() { delete reusable_preparser_; reusable_preparser_ = NULL; + delete cached_parse_data_; + cached_parse_data_ = NULL; } // Parses the source code represented by the compilation info and sets its @@ -654,24 +661,12 @@ FunctionLiteral* DoParseProgram(CompilationInfo* info, Handle<String> source); - // Report syntax error - void ReportInvalidPreparseData(Handle<String> name, bool* ok); - - void SetCachedData(ScriptDataImpl** data, - CachedDataMode cached_data_mode) { - cached_data_mode_ = cached_data_mode; - if (cached_data_mode == NO_CACHED_DATA) { - cached_data_ = NULL; - } else { - ASSERT(data != NULL); - cached_data_ = data; - symbol_cache_.Initialize(*data ? (*data)->symbol_count() : 0, zone()); - } - } + void SetCachedData(); bool inside_with() const { return scope_->inside_with(); } - ScriptDataImpl** cached_data() const { return cached_data_; } - CachedDataMode cached_data_mode() const { return cached_data_mode_; } + ScriptCompiler::CompileOptions compile_options() const { + return info_->compile_options(); + } Scope* DeclarationScope(VariableMode mode) { return IsLexicalVariableMode(mode) ? scope_ : scope_->DeclarationScope(); @@ -683,8 +678,10 @@ // for failure at the call sites. void* ParseSourceElements(ZoneList<Statement*>* processor, int end_token, bool is_eval, bool is_global, bool* ok); - Statement* ParseModuleElement(ZoneStringList* labels, bool* ok); - Statement* ParseModuleDeclaration(ZoneStringList* names, bool* ok); + Statement* ParseModuleElement(ZoneList<const AstRawString*>* labels, + bool* ok); + Statement* ParseModuleDeclaration(ZoneList<const AstRawString*>* names, + bool* ok); Module* ParseModule(bool* ok); Module* ParseModuleLiteral(bool* ok); Module* ParseModulePath(bool* ok); @@ -693,52 +690,64 @@ Module* ParseModuleSpecifier(bool* ok); Block* ParseImportDeclaration(bool* ok); Statement* ParseExportDeclaration(bool* ok); - Statement* ParseBlockElement(ZoneStringList* labels, bool* ok); - Statement* ParseStatement(ZoneStringList* labels, bool* ok); - Statement* ParseFunctionDeclaration(ZoneStringList* names, bool* ok); + Statement* ParseBlockElement(ZoneList<const AstRawString*>* labels, bool* ok); + Statement* ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok); + Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names, + bool* ok); Statement* ParseNativeDeclaration(bool* ok); - Block* ParseBlock(ZoneStringList* labels, bool* ok); + Block* ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok); Block* ParseVariableStatement(VariableDeclarationContext var_context, - ZoneStringList* names, + ZoneList<const AstRawString*>* names, bool* ok); Block* ParseVariableDeclarations(VariableDeclarationContext var_context, VariableDeclarationProperties* decl_props, - ZoneStringList* names, - Handle<String>* out, + ZoneList<const AstRawString*>* names, + const AstRawString** out, bool* ok); - Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels, - bool* ok); - IfStatement* ParseIfStatement(ZoneStringList* labels, bool* ok); + Statement* ParseExpressionOrLabelledStatement( + ZoneList<const AstRawString*>* labels, bool* ok); + IfStatement* ParseIfStatement(ZoneList<const AstRawString*>* labels, + bool* ok); Statement* ParseContinueStatement(bool* ok); - Statement* ParseBreakStatement(ZoneStringList* labels, bool* ok); + Statement* ParseBreakStatement(ZoneList<const AstRawString*>* labels, + bool* ok); Statement* ParseReturnStatement(bool* ok); - Statement* ParseWithStatement(ZoneStringList* labels, bool* ok); + Statement* ParseWithStatement(ZoneList<const AstRawString*>* labels, + bool* ok); CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok); - SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok); - DoWhileStatement* ParseDoWhileStatement(ZoneStringList* labels, bool* ok); - WhileStatement* ParseWhileStatement(ZoneStringList* labels, bool* ok); - Statement* ParseForStatement(ZoneStringList* labels, bool* ok); + SwitchStatement* ParseSwitchStatement(ZoneList<const AstRawString*>* labels, + bool* ok); + DoWhileStatement* ParseDoWhileStatement(ZoneList<const AstRawString*>* labels, + bool* ok); + WhileStatement* ParseWhileStatement(ZoneList<const AstRawString*>* labels, + bool* ok); + Statement* ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok); Statement* ParseThrowStatement(bool* ok); Expression* MakeCatchContext(Handle<String> id, VariableProxy* value); TryStatement* ParseTryStatement(bool* ok); DebuggerStatement* ParseDebuggerStatement(bool* ok); // Support for hamony block scoped bindings. - Block* ParseScopedBlock(ZoneStringList* labels, bool* ok); + Block* ParseScopedBlock(ZoneList<const AstRawString*>* labels, bool* ok); // Initialize the components of a for-in / for-of statement. void InitializeForEachStatement(ForEachStatement* stmt, Expression* each, Expression* subject, Statement* body); + Statement* DesugarLetBindingsInForStatement( + Scope* inner_scope, ZoneList<const AstRawString*>* names, + ForStatement* loop, Statement* init, Expression* cond, Statement* next, + Statement* body, bool* ok); FunctionLiteral* ParseFunctionLiteral( - Handle<String> name, + const AstRawString* name, Scanner::Location function_name_location, bool name_is_strict_reserved, bool is_generator, int function_token_position, FunctionLiteral::FunctionType type, + FunctionLiteral::ArityRestriction arity_restriction, bool* ok); // Magical syntax support. @@ -761,14 +770,14 @@ void CheckConflictingVarDeclarations(Scope* scope, bool* ok); // Parser support - VariableProxy* NewUnresolved(Handle<String> name, + VariableProxy* NewUnresolved(const AstRawString* name, VariableMode mode, Interface* interface); void Declare(Declaration* declaration, bool resolve, bool* ok); - bool TargetStackContainsLabel(Handle<String> label); - BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok); - IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok); + bool TargetStackContainsLabel(const AstRawString* label); + BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok); + IterationStatement* LookupContinueTarget(const AstRawString* label, bool* ok); void RegisterTargetUse(Label* target, Target* stop); @@ -776,45 +785,95 @@ Scope* NewScope(Scope* parent, ScopeType type); - Handle<String> LookupCachedSymbol(int symbol_id); - - // Generate AST node that throw a ReferenceError with the given type. - Expression* NewThrowReferenceError(Handle<String> type); - - // Generate AST node that throw a SyntaxError with the given - // type. The first argument may be null (in the handle sense) in - // which case no arguments are passed to the constructor. - Expression* NewThrowSyntaxError(Handle<String> type, Handle<Object> first); + // Skip over a lazy function, either using cached data if we have it, or + // by parsing the function with PreParser. Consumes the ending }. + void SkipLazyFunctionBody(const AstRawString* function_name, + int* materialized_literal_count, + int* expected_property_count, + bool* ok); + + PreParser::PreParseResult ParseLazyFunctionBodyWithPreParser( + SingletonLogger* logger); + + // Consumes the ending }. + ZoneList<Statement*>* ParseEagerFunctionBody( + const AstRawString* function_name, int pos, Variable* fvar, + Token::Value fvar_init_op, bool is_generator, bool* ok); - // Generate AST node that throw a TypeError with the given - // type. Both arguments must be non-null (in the handle sense). - Expression* NewThrowTypeError(Handle<String> type, - Handle<Object> first, - Handle<Object> second); + void HandleSourceURLComments(); - // Generic AST generator for throwing errors from compiled code. - Expression* NewThrowError(Handle<String> constructor, - Handle<String> type, - Vector< Handle<Object> > arguments); + void ThrowPendingError(); - PreParser::PreParseResult LazyParseFunctionLiteral( - SingletonLogger* logger); + void InternalizeUseCounts(); Isolate* isolate_; - ZoneList<Handle<String> > symbol_cache_; Handle<Script> script_; Scanner scanner_; PreParser* reusable_preparser_; Scope* original_scope_; // for ES5 function declarations in sloppy eval Target* target_stack_; // for break, continue statements - ScriptDataImpl** cached_data_; - CachedDataMode cached_data_mode_; + ParseData* cached_parse_data_; + AstValueFactory* ast_value_factory_; CompilationInfo* info_; + + // Pending errors. + bool has_pending_error_; + Scanner::Location pending_error_location_; + const char* pending_error_message_; + const AstRawString* pending_error_arg_; + const char* pending_error_char_arg_; + bool pending_error_is_reference_error_; + + int use_counts_[v8::Isolate::kUseCounterFeatureCount]; }; +bool ParserTraits::IsFutureStrictReserved( + const AstRawString* identifier) const { + return identifier->IsOneByteEqualTo("yield") || + parser_->scanner()->IdentifierIsFutureStrictReserved(identifier); +} + + +Scope* ParserTraits::NewScope(Scope* parent_scope, ScopeType scope_type) { + return parser_->NewScope(parent_scope, scope_type); +} + + +const AstRawString* ParserTraits::EmptyIdentifierString() { + return parser_->ast_value_factory_->empty_string(); +} + + +void ParserTraits::SkipLazyFunctionBody(const AstRawString* function_name, + int* materialized_literal_count, + int* expected_property_count, + bool* ok) { + return parser_->SkipLazyFunctionBody( + function_name, materialized_literal_count, expected_property_count, ok); +} + + +ZoneList<Statement*>* ParserTraits::ParseEagerFunctionBody( + const AstRawString* name, int pos, Variable* fvar, + Token::Value fvar_init_op, bool is_generator, bool* ok) { + return parser_->ParseEagerFunctionBody(name, pos, fvar, fvar_init_op, + is_generator, ok); +} + +void ParserTraits::CheckConflictingVarDeclarations(v8::internal::Scope* scope, + bool* ok) { + parser_->CheckConflictingVarDeclarations(scope, ok); +} + + +AstValueFactory* ParserTraits::ast_value_factory() { + return parser_->ast_value_factory_; +} + + // Support for handling complex values (array and object literals) that // can be fully handled at compile time. class CompileTimeValue: public AllStatic { diff -Nru nodejs-0.11.13/deps/v8/src/perf-jit.cc nodejs-0.11.15/deps/v8/src/perf-jit.cc --- nodejs-0.11.13/deps/v8/src/perf-jit.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/perf-jit.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,147 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "src/perf-jit.h" + +#if V8_OS_LINUX +#include <fcntl.h> +#include <unistd.h> +#include "src/third_party/kernel/tools/perf/util/jitdump.h" +#endif // V8_OS_LINUX + +namespace v8 { +namespace internal { + +#if V8_OS_LINUX + +const char PerfJitLogger::kFilenameFormatString[] = "perfjit-%d.dump"; + +// Extra padding for the PID in the filename +const int PerfJitLogger::kFilenameBufferPadding = 16; + + +PerfJitLogger::PerfJitLogger() : perf_output_handle_(NULL), code_index_(0) { + if (!base::TimeTicks::KernelTimestampAvailable()) { + FATAL("Cannot profile with perf JIT - kernel timestamps not available."); + } + + // Open the perf JIT dump file. + int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding; + ScopedVector<char> perf_dump_name(bufferSize); + int size = SNPrintF(perf_dump_name, kFilenameFormatString, + base::OS::GetCurrentProcessId()); + CHECK_NE(size, -1); + perf_output_handle_ = + base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode); + CHECK_NE(perf_output_handle_, NULL); + setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize); + + LogWriteHeader(); +} + + +PerfJitLogger::~PerfJitLogger() { + fclose(perf_output_handle_); + perf_output_handle_ = NULL; +} + + +uint64_t PerfJitLogger::GetTimestamp() { + return static_cast<int64_t>( + base::TimeTicks::KernelTimestampNow().ToInternalValue()); +} + + +void PerfJitLogger::LogRecordedBuffer(Code* code, SharedFunctionInfo*, + const char* name, int length) { + DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize); + DCHECK(perf_output_handle_ != NULL); + + const char* code_name = name; + uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start()); + uint32_t code_size = code->instruction_size(); + + static const char string_terminator[] = "\0"; + + jr_code_load code_load; + code_load.p.id = JIT_CODE_LOAD; + code_load.p.total_size = sizeof(code_load) + length + 1 + code_size; + code_load.p.timestamp = GetTimestamp(); + code_load.pid = static_cast<uint32_t>(base::OS::GetCurrentProcessId()); + code_load.tid = static_cast<uint32_t>(base::OS::GetCurrentThreadId()); + code_load.vma = 0x0; // Our addresses are absolute. + code_load.code_addr = reinterpret_cast<uint64_t>(code_pointer); + code_load.code_size = code_size; + code_load.code_index = code_index_; + + code_index_++; + + LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load)); + LogWriteBytes(code_name, length); + LogWriteBytes(string_terminator, 1); + LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size); +} + + +void PerfJitLogger::CodeMoveEvent(Address from, Address to) { + // Code relocation not supported. + UNREACHABLE(); +} + + +void PerfJitLogger::CodeDeleteEvent(Address from) { + // V8 does not send notification on code unload +} + + +void PerfJitLogger::SnapshotPositionEvent(Address addr, int pos) {} + + +void PerfJitLogger::LogWriteBytes(const char* bytes, int size) { + size_t rv = fwrite(bytes, 1, size, perf_output_handle_); + DCHECK(static_cast<size_t>(size) == rv); + USE(rv); +} + + +void PerfJitLogger::LogWriteHeader() { + DCHECK(perf_output_handle_ != NULL); + jitheader header; + header.magic = JITHEADER_MAGIC; + header.version = JITHEADER_VERSION; + header.total_size = sizeof(jitheader); + header.pad1 = 0xdeadbeef; + header.elf_mach = GetElfMach(); + header.pid = base::OS::GetCurrentProcessId(); + header.timestamp = + static_cast<uint64_t>(base::OS::TimeCurrentMillis() * 1000.0); + LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header)); +} + +#endif // V8_OS_LINUX +} +} // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/perf-jit.h nodejs-0.11.15/deps/v8/src/perf-jit.h --- nodejs-0.11.13/deps/v8/src/perf-jit.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/perf-jit.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,120 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_PERF_JIT_H_ +#define V8_PERF_JIT_H_ + +#include "src/v8.h" + +namespace v8 { +namespace internal { + +// TODO(jarin) For now, we disable perf integration on Android because of a +// build problem - when building the snapshot with AOSP, librt is not +// available, so we cannot use the clock_gettime function. To fix this, we +// should thread through the V8_LIBRT_NOT_AVAILABLE flag here and only disable +// the perf integration when this flag is present (the perf integration is not +// needed when generating snapshot, so it is fine to ifdef it away). + +#if V8_OS_LINUX + +// Linux perf tool logging support +class PerfJitLogger : public CodeEventLogger { + public: + PerfJitLogger(); + virtual ~PerfJitLogger(); + + virtual void CodeMoveEvent(Address from, Address to); + virtual void CodeDeleteEvent(Address from); + virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {} + virtual void SnapshotPositionEvent(Address addr, int pos); + + private: + uint64_t GetTimestamp(); + virtual void LogRecordedBuffer(Code* code, SharedFunctionInfo* shared, + const char* name, int length); + + // Extension added to V8 log file name to get the low-level log name. + static const char kFilenameFormatString[]; + static const int kFilenameBufferPadding; + + // File buffer size of the low-level log. We don't use the default to + // minimize the associated overhead. + static const int kLogBufferSize = 2 * MB; + + void LogWriteBytes(const char* bytes, int size); + void LogWriteHeader(); + + static const uint32_t kElfMachIA32 = 3; + static const uint32_t kElfMachX64 = 62; + static const uint32_t kElfMachARM = 40; + static const uint32_t kElfMachMIPS = 10; + + uint32_t GetElfMach() { +#if V8_TARGET_ARCH_IA32 + return kElfMachIA32; +#elif V8_TARGET_ARCH_X64 + return kElfMachX64; +#elif V8_TARGET_ARCH_ARM + return kElfMachARM; +#elif V8_TARGET_ARCH_MIPS + return kElfMachMIPS; +#else + UNIMPLEMENTED(); + return 0; +#endif + } + + FILE* perf_output_handle_; + uint64_t code_index_; +}; + +#else + +// PerfJitLogger is only implemented on Linux +class PerfJitLogger : public CodeEventLogger { + public: + virtual void CodeMoveEvent(Address from, Address to) { UNIMPLEMENTED(); } + + virtual void CodeDeleteEvent(Address from) { UNIMPLEMENTED(); } + + virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { + UNIMPLEMENTED(); + } + + virtual void SnapshotPositionEvent(Address addr, int pos) { UNIMPLEMENTED(); } + + virtual void LogRecordedBuffer(Code* code, SharedFunctionInfo* shared, + const char* name, int length) { + UNIMPLEMENTED(); + } +}; + +#endif // V8_OS_LINUX +} +} // namespace v8::internal +#endif diff -Nru nodejs-0.11.13/deps/v8/src/platform/condition-variable.cc nodejs-0.11.15/deps/v8/src/platform/condition-variable.cc --- nodejs-0.11.13/deps/v8/src/platform/condition-variable.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/condition-variable.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,345 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "platform/condition-variable.h" - -#include <errno.h> -#include <time.h> - -#include "platform/time.h" - -namespace v8 { -namespace internal { - -#if V8_OS_POSIX - -ConditionVariable::ConditionVariable() { - // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary - // hack to support cross-compiling Chrome for Android in AOSP. Remove - // this once AOSP is fixed. -#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \ - (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE - // On Free/Net/OpenBSD and Linux with glibc we can change the time - // source for pthread_cond_timedwait() to use the monotonic clock. - pthread_condattr_t attr; - int result = pthread_condattr_init(&attr); - ASSERT_EQ(0, result); - result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); - ASSERT_EQ(0, result); - result = pthread_cond_init(&native_handle_, &attr); - ASSERT_EQ(0, result); - result = pthread_condattr_destroy(&attr); -#else - int result = pthread_cond_init(&native_handle_, NULL); -#endif - ASSERT_EQ(0, result); - USE(result); -} - - -ConditionVariable::~ConditionVariable() { - int result = pthread_cond_destroy(&native_handle_); - ASSERT_EQ(0, result); - USE(result); -} - - -void ConditionVariable::NotifyOne() { - int result = pthread_cond_signal(&native_handle_); - ASSERT_EQ(0, result); - USE(result); -} - - -void ConditionVariable::NotifyAll() { - int result = pthread_cond_broadcast(&native_handle_); - ASSERT_EQ(0, result); - USE(result); -} - - -void ConditionVariable::Wait(Mutex* mutex) { - mutex->AssertHeldAndUnmark(); - int result = pthread_cond_wait(&native_handle_, &mutex->native_handle()); - ASSERT_EQ(0, result); - USE(result); - mutex->AssertUnheldAndMark(); -} - - -bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) { - struct timespec ts; - int result; - mutex->AssertHeldAndUnmark(); -#if V8_OS_MACOSX - // Mac OS X provides pthread_cond_timedwait_relative_np(), which does - // not depend on the real time clock, which is what you really WANT here! - ts = rel_time.ToTimespec(); - ASSERT_GE(ts.tv_sec, 0); - ASSERT_GE(ts.tv_nsec, 0); - result = pthread_cond_timedwait_relative_np( - &native_handle_, &mutex->native_handle(), &ts); -#else - // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary - // hack to support cross-compiling Chrome for Android in AOSP. Remove - // this once AOSP is fixed. -#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \ - (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE - // On Free/Net/OpenBSD and Linux with glibc we can change the time - // source for pthread_cond_timedwait() to use the monotonic clock. - result = clock_gettime(CLOCK_MONOTONIC, &ts); - ASSERT_EQ(0, result); - Time now = Time::FromTimespec(ts); -#else - // The timeout argument to pthread_cond_timedwait() is in absolute time. - Time now = Time::NowFromSystemTime(); -#endif - Time end_time = now + rel_time; - ASSERT_GE(end_time, now); - ts = end_time.ToTimespec(); - result = pthread_cond_timedwait( - &native_handle_, &mutex->native_handle(), &ts); -#endif // V8_OS_MACOSX - mutex->AssertUnheldAndMark(); - if (result == ETIMEDOUT) { - return false; - } - ASSERT_EQ(0, result); - return true; -} - -#elif V8_OS_WIN - -struct ConditionVariable::Event { - Event() : handle_(::CreateEventA(NULL, true, false, NULL)) { - ASSERT(handle_ != NULL); - } - - ~Event() { - BOOL ok = ::CloseHandle(handle_); - ASSERT(ok); - USE(ok); - } - - bool WaitFor(DWORD timeout_ms) { - DWORD result = ::WaitForSingleObject(handle_, timeout_ms); - if (result == WAIT_OBJECT_0) { - return true; - } - ASSERT(result == WAIT_TIMEOUT); - return false; - } - - HANDLE handle_; - Event* next_; - HANDLE thread_; - volatile bool notified_; -}; - - -ConditionVariable::NativeHandle::~NativeHandle() { - ASSERT(waitlist_ == NULL); - - while (freelist_ != NULL) { - Event* event = freelist_; - freelist_ = event->next_; - delete event; - } -} - - -ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() { - LockGuard<Mutex> lock_guard(&mutex_); - - // Grab an event from the free list or create a new one. - Event* event = freelist_; - if (event != NULL) { - freelist_ = event->next_; - } else { - event = new Event; - } - event->thread_ = GetCurrentThread(); - event->notified_ = false; - -#ifdef DEBUG - // The event must not be on the wait list. - for (Event* we = waitlist_; we != NULL; we = we->next_) { - ASSERT_NE(event, we); - } -#endif - - // Prepend the event to the wait list. - event->next_ = waitlist_; - waitlist_ = event; - - return event; -} - - -void ConditionVariable::NativeHandle::Post(Event* event, bool result) { - LockGuard<Mutex> lock_guard(&mutex_); - - // Remove the event from the wait list. - for (Event** wep = &waitlist_;; wep = &(*wep)->next_) { - ASSERT_NE(NULL, *wep); - if (*wep == event) { - *wep = event->next_; - break; - } - } - -#ifdef DEBUG - // The event must not be on the free list. - for (Event* fe = freelist_; fe != NULL; fe = fe->next_) { - ASSERT_NE(event, fe); - } -#endif - - // Reset the event. - BOOL ok = ::ResetEvent(event->handle_); - ASSERT(ok); - USE(ok); - - // Insert the event into the free list. - event->next_ = freelist_; - freelist_ = event; - - // Forward signals delivered after the timeout to the next waiting event. - if (!result && event->notified_ && waitlist_ != NULL) { - ok = ::SetEvent(waitlist_->handle_); - ASSERT(ok); - USE(ok); - waitlist_->notified_ = true; - } -} - - -ConditionVariable::ConditionVariable() {} - - -ConditionVariable::~ConditionVariable() {} - - -void ConditionVariable::NotifyOne() { - // Notify the thread with the highest priority in the waitlist - // that was not already signalled. - LockGuard<Mutex> lock_guard(native_handle_.mutex()); - Event* highest_event = NULL; - int highest_priority = std::numeric_limits<int>::min(); - for (Event* event = native_handle().waitlist(); - event != NULL; - event = event->next_) { - if (event->notified_) { - continue; - } - int priority = GetThreadPriority(event->thread_); - ASSERT_NE(THREAD_PRIORITY_ERROR_RETURN, priority); - if (priority >= highest_priority) { - highest_priority = priority; - highest_event = event; - } - } - if (highest_event != NULL) { - ASSERT(!highest_event->notified_); - ::SetEvent(highest_event->handle_); - highest_event->notified_ = true; - } -} - - -void ConditionVariable::NotifyAll() { - // Notify all threads on the waitlist. - LockGuard<Mutex> lock_guard(native_handle_.mutex()); - for (Event* event = native_handle().waitlist(); - event != NULL; - event = event->next_) { - if (!event->notified_) { - ::SetEvent(event->handle_); - event->notified_ = true; - } - } -} - - -void ConditionVariable::Wait(Mutex* mutex) { - // Create and setup the wait event. - Event* event = native_handle_.Pre(); - - // Release the user mutex. - mutex->Unlock(); - - // Wait on the wait event. - while (!event->WaitFor(INFINITE)) - ; - - // Reaquire the user mutex. - mutex->Lock(); - - // Release the wait event (we must have been notified). - ASSERT(event->notified_); - native_handle_.Post(event, true); -} - - -bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) { - // Create and setup the wait event. - Event* event = native_handle_.Pre(); - - // Release the user mutex. - mutex->Unlock(); - - // Wait on the wait event. - TimeTicks now = TimeTicks::Now(); - TimeTicks end = now + rel_time; - bool result = false; - while (true) { - int64_t msec = (end - now).InMilliseconds(); - if (msec >= static_cast<int64_t>(INFINITE)) { - result = event->WaitFor(INFINITE - 1); - if (result) { - break; - } - now = TimeTicks::Now(); - } else { - result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec)); - break; - } - } - - // Reaquire the user mutex. - mutex->Lock(); - - // Release the wait event. - ASSERT(!result || event->notified_); - native_handle_.Post(event, result); - - return result; -} - -#endif // V8_OS_POSIX - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform/condition-variable.h nodejs-0.11.15/deps/v8/src/platform/condition-variable.h --- nodejs-0.11.13/deps/v8/src/platform/condition-variable.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/condition-variable.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,140 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_PLATFORM_CONDITION_VARIABLE_H_ -#define V8_PLATFORM_CONDITION_VARIABLE_H_ - -#include "platform/mutex.h" - -namespace v8 { -namespace internal { - -// Forward declarations. -class ConditionVariableEvent; -class TimeDelta; - -// ----------------------------------------------------------------------------- -// ConditionVariable -// -// This class is a synchronization primitive that can be used to block a thread, -// or multiple threads at the same time, until: -// - a notification is received from another thread, -// - a timeout expires, or -// - a spurious wakeup occurs -// Any thread that intends to wait on a ConditionVariable has to acquire a lock -// on a Mutex first. The |Wait()| and |WaitFor()| operations atomically release -// the mutex and suspend the execution of the calling thread. When the condition -// variable is notified, the thread is awakened, and the mutex is reacquired. - -class ConditionVariable V8_FINAL { - public: - ConditionVariable(); - ~ConditionVariable(); - - // If any threads are waiting on this condition variable, calling - // |NotifyOne()| unblocks one of the waiting threads. - void NotifyOne(); - - // Unblocks all threads currently waiting for this condition variable. - void NotifyAll(); - - // |Wait()| causes the calling thread to block until the condition variable is - // notified or a spurious wakeup occurs. Atomically releases the mutex, blocks - // the current executing thread, and adds it to the list of threads waiting on - // this condition variable. The thread will be unblocked when |NotifyAll()| or - // |NotifyOne()| is executed. It may also be unblocked spuriously. When - // unblocked, regardless of the reason, the lock on the mutex is reacquired - // and |Wait()| exits. - void Wait(Mutex* mutex); - - // Atomically releases the mutex, blocks the current executing thread, and - // adds it to the list of threads waiting on this condition variable. The - // thread will be unblocked when |NotifyAll()| or |NotifyOne()| is executed, - // or when the relative timeout |rel_time| expires. It may also be unblocked - // spuriously. When unblocked, regardless of the reason, the lock on the mutex - // is reacquired and |WaitFor()| exits. Returns true if the condition variable - // was notified prior to the timeout. - bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT; - - // The implementation-defined native handle type. -#if V8_OS_POSIX - typedef pthread_cond_t NativeHandle; -#elif V8_OS_WIN - struct Event; - class NativeHandle V8_FINAL { - public: - NativeHandle() : waitlist_(NULL), freelist_(NULL) {} - ~NativeHandle(); - - Event* Pre() V8_WARN_UNUSED_RESULT; - void Post(Event* event, bool result); - - Mutex* mutex() { return &mutex_; } - Event* waitlist() { return waitlist_; } - - private: - Event* waitlist_; - Event* freelist_; - Mutex mutex_; - - DISALLOW_COPY_AND_ASSIGN(NativeHandle); - }; -#endif - - NativeHandle& native_handle() { - return native_handle_; - } - const NativeHandle& native_handle() const { - return native_handle_; - } - - private: - NativeHandle native_handle_; - - DISALLOW_COPY_AND_ASSIGN(ConditionVariable); -}; - - -// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is -// called). -// Usage: -// static LazyConditionVariable my_condvar = -// LAZY_CONDITION_VARIABLE_INITIALIZER; -// -// void my_function() { -// LockGuard<Mutex> lock_guard(&my_mutex); -// my_condvar.Pointer()->Wait(&my_mutex); -// } -typedef LazyStaticInstance<ConditionVariable, - DefaultConstructTrait<ConditionVariable>, - ThreadSafeInitOnceTrait>::type LazyConditionVariable; - -#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER - -} } // namespace v8::internal - -#endif // V8_PLATFORM_CONDITION_VARIABLE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/platform/elapsed-timer.h nodejs-0.11.15/deps/v8/src/platform/elapsed-timer.h --- nodejs-0.11.13/deps/v8/src/platform/elapsed-timer.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/elapsed-timer.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_PLATFORM_ELAPSED_TIMER_H_ -#define V8_PLATFORM_ELAPSED_TIMER_H_ - -#include "../checks.h" -#include "time.h" - -namespace v8 { -namespace internal { - -class ElapsedTimer V8_FINAL BASE_EMBEDDED { - public: -#ifdef DEBUG - ElapsedTimer() : started_(false) {} -#endif - - // Starts this timer. Once started a timer can be checked with - // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|. - // This method must not be called on an already started timer. - void Start() { - ASSERT(!IsStarted()); - start_ticks_ = Now(); -#ifdef DEBUG - started_ = true; -#endif - ASSERT(IsStarted()); - } - - // Stops this timer. Must not be called on a timer that was not - // started before. - void Stop() { - ASSERT(IsStarted()); - start_ticks_ = TimeTicks(); -#ifdef DEBUG - started_ = false; -#endif - ASSERT(!IsStarted()); - } - - // Returns |true| if this timer was started previously. - bool IsStarted() const { - ASSERT(started_ || start_ticks_.IsNull()); - ASSERT(!started_ || !start_ticks_.IsNull()); - return !start_ticks_.IsNull(); - } - - // Restarts the timer and returns the time elapsed since the previous start. - // This method is equivalent to obtaining the elapsed time with |Elapsed()| - // and then starting the timer again, but does so in one single operation, - // avoiding the need to obtain the clock value twice. It may only be called - // on a previously started timer. - TimeDelta Restart() { - ASSERT(IsStarted()); - TimeTicks ticks = Now(); - TimeDelta elapsed = ticks - start_ticks_; - ASSERT(elapsed.InMicroseconds() >= 0); - start_ticks_ = ticks; - ASSERT(IsStarted()); - return elapsed; - } - - // Returns the time elapsed since the previous start. This method may only - // be called on a previously started timer. - TimeDelta Elapsed() const { - ASSERT(IsStarted()); - TimeDelta elapsed = Now() - start_ticks_; - ASSERT(elapsed.InMicroseconds() >= 0); - return elapsed; - } - - // Returns |true| if the specified |time_delta| has elapsed since the - // previous start, or |false| if not. This method may only be called on - // a previously started timer. - bool HasExpired(TimeDelta time_delta) const { - ASSERT(IsStarted()); - return Elapsed() >= time_delta; - } - - private: - static V8_INLINE TimeTicks Now() { - TimeTicks now = TimeTicks::HighResolutionNow(); - ASSERT(!now.IsNull()); - return now; - } - - TimeTicks start_ticks_; -#ifdef DEBUG - bool started_; -#endif -}; - -} } // namespace v8::internal - -#endif // V8_PLATFORM_ELAPSED_TIMER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/platform/mutex.cc nodejs-0.11.15/deps/v8/src/platform/mutex.cc --- nodejs-0.11.13/deps/v8/src/platform/mutex.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/mutex.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,214 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "platform/mutex.h" - -#include <errno.h> - -namespace v8 { -namespace internal { - -#if V8_OS_POSIX - -static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) { - int result; -#if defined(DEBUG) - // Use an error checking mutex in debug mode. - pthread_mutexattr_t attr; - result = pthread_mutexattr_init(&attr); - ASSERT_EQ(0, result); - result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); - ASSERT_EQ(0, result); - result = pthread_mutex_init(mutex, &attr); - ASSERT_EQ(0, result); - result = pthread_mutexattr_destroy(&attr); -#else - // Use a fast mutex (default attributes). - result = pthread_mutex_init(mutex, NULL); -#endif // defined(DEBUG) - ASSERT_EQ(0, result); - USE(result); -} - - -static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) { - pthread_mutexattr_t attr; - int result = pthread_mutexattr_init(&attr); - ASSERT_EQ(0, result); - result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); - ASSERT_EQ(0, result); - result = pthread_mutex_init(mutex, &attr); - ASSERT_EQ(0, result); - result = pthread_mutexattr_destroy(&attr); - ASSERT_EQ(0, result); - USE(result); -} - - -static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) { - int result = pthread_mutex_destroy(mutex); - ASSERT_EQ(0, result); - USE(result); -} - - -static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) { - int result = pthread_mutex_lock(mutex); - ASSERT_EQ(0, result); - USE(result); -} - - -static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) { - int result = pthread_mutex_unlock(mutex); - ASSERT_EQ(0, result); - USE(result); -} - - -static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) { - int result = pthread_mutex_trylock(mutex); - if (result == EBUSY) { - return false; - } - ASSERT_EQ(0, result); - return true; -} - -#elif V8_OS_WIN - -static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) { - InitializeCriticalSection(cs); -} - - -static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) { - InitializeCriticalSection(cs); -} - - -static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) { - DeleteCriticalSection(cs); -} - - -static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) { - EnterCriticalSection(cs); -} - - -static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) { - LeaveCriticalSection(cs); -} - - -static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) { - return TryEnterCriticalSection(cs); -} - -#endif // V8_OS_POSIX - - -Mutex::Mutex() { - InitializeNativeHandle(&native_handle_); -#ifdef DEBUG - level_ = 0; -#endif -} - - -Mutex::~Mutex() { - DestroyNativeHandle(&native_handle_); - ASSERT_EQ(0, level_); -} - - -void Mutex::Lock() { - LockNativeHandle(&native_handle_); - AssertUnheldAndMark(); -} - - -void Mutex::Unlock() { - AssertHeldAndUnmark(); - UnlockNativeHandle(&native_handle_); -} - - -bool Mutex::TryLock() { - if (!TryLockNativeHandle(&native_handle_)) { - return false; - } - AssertUnheldAndMark(); - return true; -} - - -RecursiveMutex::RecursiveMutex() { - InitializeRecursiveNativeHandle(&native_handle_); -#ifdef DEBUG - level_ = 0; -#endif -} - - -RecursiveMutex::~RecursiveMutex() { - DestroyNativeHandle(&native_handle_); - ASSERT_EQ(0, level_); -} - - -void RecursiveMutex::Lock() { - LockNativeHandle(&native_handle_); -#ifdef DEBUG - ASSERT_LE(0, level_); - level_++; -#endif -} - - -void RecursiveMutex::Unlock() { -#ifdef DEBUG - ASSERT_LT(0, level_); - level_--; -#endif - UnlockNativeHandle(&native_handle_); -} - - -bool RecursiveMutex::TryLock() { - if (!TryLockNativeHandle(&native_handle_)) { - return false; - } -#ifdef DEBUG - ASSERT_LE(0, level_); - level_++; -#endif - return true; -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform/mutex.h nodejs-0.11.15/deps/v8/src/platform/mutex.h --- nodejs-0.11.13/deps/v8/src/platform/mutex.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/mutex.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,238 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_PLATFORM_MUTEX_H_ -#define V8_PLATFORM_MUTEX_H_ - -#include "../lazy-instance.h" -#if V8_OS_WIN -#include "../win32-headers.h" -#endif - -#if V8_OS_POSIX -#include <pthread.h> // NOLINT -#endif - -namespace v8 { -namespace internal { - -// ---------------------------------------------------------------------------- -// Mutex -// -// This class is a synchronization primitive that can be used to protect shared -// data from being simultaneously accessed by multiple threads. A mutex offers -// exclusive, non-recursive ownership semantics: -// - A calling thread owns a mutex from the time that it successfully calls -// either |Lock()| or |TryLock()| until it calls |Unlock()|. -// - When a thread owns a mutex, all other threads will block (for calls to -// |Lock()|) or receive a |false| return value (for |TryLock()|) if they -// attempt to claim ownership of the mutex. -// A calling thread must not own the mutex prior to calling |Lock()| or -// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed -// while still owned by some thread. The Mutex class is non-copyable. - -class Mutex V8_FINAL { - public: - Mutex(); - ~Mutex(); - - // Locks the given mutex. If the mutex is currently unlocked, it becomes - // locked and owned by the calling thread, and immediately. If the mutex - // is already locked by another thread, suspends the calling thread until - // the mutex is unlocked. - void Lock(); - - // Unlocks the given mutex. The mutex is assumed to be locked and owned by - // the calling thread on entrance. - void Unlock(); - - // Tries to lock the given mutex. Returns whether the mutex was - // successfully locked. - bool TryLock() V8_WARN_UNUSED_RESULT; - - // The implementation-defined native handle type. -#if V8_OS_POSIX - typedef pthread_mutex_t NativeHandle; -#elif V8_OS_WIN - typedef CRITICAL_SECTION NativeHandle; -#endif - - NativeHandle& native_handle() { - return native_handle_; - } - const NativeHandle& native_handle() const { - return native_handle_; - } - - private: - NativeHandle native_handle_; -#ifdef DEBUG - int level_; -#endif - - V8_INLINE void AssertHeldAndUnmark() { -#ifdef DEBUG - ASSERT_EQ(1, level_); - level_--; -#endif - } - - V8_INLINE void AssertUnheldAndMark() { -#ifdef DEBUG - ASSERT_EQ(0, level_); - level_++; -#endif - } - - friend class ConditionVariable; - - DISALLOW_COPY_AND_ASSIGN(Mutex); -}; - - -// POD Mutex initialized lazily (i.e. the first time Pointer() is called). -// Usage: -// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER; -// -// void my_function() { -// LockGuard<Mutex> guard(my_mutex.Pointer()); -// // Do something. -// } -// -typedef LazyStaticInstance<Mutex, - DefaultConstructTrait<Mutex>, - ThreadSafeInitOnceTrait>::type LazyMutex; - -#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER - - -// ----------------------------------------------------------------------------- -// RecursiveMutex -// -// This class is a synchronization primitive that can be used to protect shared -// data from being simultaneously accessed by multiple threads. A recursive -// mutex offers exclusive, recursive ownership semantics: -// - A calling thread owns a recursive mutex for a period of time that starts -// when it successfully calls either |Lock()| or |TryLock()|. During this -// period, the thread may make additional calls to |Lock()| or |TryLock()|. -// The period of ownership ends when the thread makes a matching number of -// calls to |Unlock()|. -// - When a thread owns a recursive mutex, all other threads will block (for -// calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if -// they attempt to claim ownership of the recursive mutex. -// - The maximum number of times that a recursive mutex may be locked is -// unspecified, but after that number is reached, calls to |Lock()| will -// probably abort the process and calls to |TryLock()| return false. -// The behavior of a program is undefined if a recursive mutex is destroyed -// while still owned by some thread. The RecursiveMutex class is non-copyable. - -class RecursiveMutex V8_FINAL { - public: - RecursiveMutex(); - ~RecursiveMutex(); - - // Locks the mutex. If another thread has already locked the mutex, a call to - // |Lock()| will block execution until the lock is acquired. A thread may call - // |Lock()| on a recursive mutex repeatedly. Ownership will only be released - // after the thread makes a matching number of calls to |Unlock()|. - // The behavior is undefined if the mutex is not unlocked before being - // destroyed, i.e. some thread still owns it. - void Lock(); - - // Unlocks the mutex if its level of ownership is 1 (there was exactly one - // more call to |Lock()| than there were calls to unlock() made by this - // thread), reduces the level of ownership by 1 otherwise. The mutex must be - // locked by the current thread of execution, otherwise, the behavior is - // undefined. - void Unlock(); - - // Tries to lock the given mutex. Returns whether the mutex was - // successfully locked. - bool TryLock() V8_WARN_UNUSED_RESULT; - - // The implementation-defined native handle type. - typedef Mutex::NativeHandle NativeHandle; - - NativeHandle& native_handle() { - return native_handle_; - } - const NativeHandle& native_handle() const { - return native_handle_; - } - - private: - NativeHandle native_handle_; -#ifdef DEBUG - int level_; -#endif - - DISALLOW_COPY_AND_ASSIGN(RecursiveMutex); -}; - - -// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is -// called). -// Usage: -// static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER; -// -// void my_function() { -// LockGuard<RecursiveMutex> guard(my_mutex.Pointer()); -// // Do something. -// } -// -typedef LazyStaticInstance<RecursiveMutex, - DefaultConstructTrait<RecursiveMutex>, - ThreadSafeInitOnceTrait>::type LazyRecursiveMutex; - -#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER - - -// ----------------------------------------------------------------------------- -// LockGuard -// -// This class is a mutex wrapper that provides a convenient RAII-style mechanism -// for owning a mutex for the duration of a scoped block. -// When a LockGuard object is created, it attempts to take ownership of the -// mutex it is given. When control leaves the scope in which the LockGuard -// object was created, the LockGuard is destructed and the mutex is released. -// The LockGuard class is non-copyable. - -template <typename Mutex> -class LockGuard V8_FINAL { - public: - explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); } - ~LockGuard() { mutex_->Unlock(); } - - private: - Mutex* mutex_; - - DISALLOW_COPY_AND_ASSIGN(LockGuard); -}; - -} } // namespace v8::internal - -#endif // V8_PLATFORM_MUTEX_H_ diff -Nru nodejs-0.11.13/deps/v8/src/platform/semaphore.cc nodejs-0.11.15/deps/v8/src/platform/semaphore.cc --- nodejs-0.11.13/deps/v8/src/platform/semaphore.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/semaphore.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,214 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "platform/semaphore.h" - -#if V8_OS_MACOSX -#include <mach/mach_init.h> -#include <mach/task.h> -#endif - -#include <errno.h> - -#include "checks.h" -#include "platform/time.h" - -namespace v8 { -namespace internal { - -#if V8_OS_MACOSX - -Semaphore::Semaphore(int count) { - kern_return_t result = semaphore_create( - mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count); - ASSERT_EQ(KERN_SUCCESS, result); - USE(result); -} - - -Semaphore::~Semaphore() { - kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_); - ASSERT_EQ(KERN_SUCCESS, result); - USE(result); -} - - -void Semaphore::Signal() { - kern_return_t result = semaphore_signal(native_handle_); - ASSERT_EQ(KERN_SUCCESS, result); - USE(result); -} - - -void Semaphore::Wait() { - while (true) { - kern_return_t result = semaphore_wait(native_handle_); - if (result == KERN_SUCCESS) return; // Semaphore was signalled. - ASSERT_EQ(KERN_ABORTED, result); - } -} - - -bool Semaphore::WaitFor(const TimeDelta& rel_time) { - TimeTicks now = TimeTicks::Now(); - TimeTicks end = now + rel_time; - while (true) { - mach_timespec_t ts; - if (now >= end) { - // Return immediately if semaphore was not signalled. - ts.tv_sec = 0; - ts.tv_nsec = 0; - } else { - ts = (end - now).ToMachTimespec(); - } - kern_return_t result = semaphore_timedwait(native_handle_, ts); - if (result == KERN_SUCCESS) return true; // Semaphore was signalled. - if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout. - ASSERT_EQ(KERN_ABORTED, result); - now = TimeTicks::Now(); - } -} - -#elif V8_OS_POSIX - -Semaphore::Semaphore(int count) { - ASSERT(count >= 0); - int result = sem_init(&native_handle_, 0, count); - ASSERT_EQ(0, result); - USE(result); -} - - -Semaphore::~Semaphore() { - int result = sem_destroy(&native_handle_); - ASSERT_EQ(0, result); - USE(result); -} - - -void Semaphore::Signal() { - int result = sem_post(&native_handle_); - ASSERT_EQ(0, result); - USE(result); -} - - -void Semaphore::Wait() { - while (true) { - int result = sem_wait(&native_handle_); - if (result == 0) return; // Semaphore was signalled. - // Signal caused spurious wakeup. - ASSERT_EQ(-1, result); - ASSERT_EQ(EINTR, errno); - } -} - - -bool Semaphore::WaitFor(const TimeDelta& rel_time) { - // Compute the time for end of timeout. - const Time time = Time::NowFromSystemTime() + rel_time; - const struct timespec ts = time.ToTimespec(); - - // Wait for semaphore signalled or timeout. - while (true) { - int result = sem_timedwait(&native_handle_, &ts); - if (result == 0) return true; // Semaphore was signalled. -#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) - if (result > 0) { - // sem_timedwait in glibc prior to 2.3.4 returns the errno instead of -1. - errno = result; - result = -1; - } -#endif - if (result == -1 && errno == ETIMEDOUT) { - // Timed out while waiting for semaphore. - return false; - } - // Signal caused spurious wakeup. - ASSERT_EQ(-1, result); - ASSERT_EQ(EINTR, errno); - } -} - -#elif V8_OS_WIN - -Semaphore::Semaphore(int count) { - ASSERT(count >= 0); - native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL); - ASSERT(native_handle_ != NULL); -} - - -Semaphore::~Semaphore() { - BOOL result = CloseHandle(native_handle_); - ASSERT(result); - USE(result); -} - - -void Semaphore::Signal() { - LONG dummy; - BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy); - ASSERT(result); - USE(result); -} - - -void Semaphore::Wait() { - DWORD result = WaitForSingleObject(native_handle_, INFINITE); - ASSERT(result == WAIT_OBJECT_0); - USE(result); -} - - -bool Semaphore::WaitFor(const TimeDelta& rel_time) { - TimeTicks now = TimeTicks::Now(); - TimeTicks end = now + rel_time; - while (true) { - int64_t msec = (end - now).InMilliseconds(); - if (msec >= static_cast<int64_t>(INFINITE)) { - DWORD result = WaitForSingleObject(native_handle_, INFINITE - 1); - if (result == WAIT_OBJECT_0) { - return true; - } - ASSERT(result == WAIT_TIMEOUT); - now = TimeTicks::Now(); - } else { - DWORD result = WaitForSingleObject( - native_handle_, (msec < 0) ? 0 : static_cast<DWORD>(msec)); - if (result == WAIT_TIMEOUT) { - return false; - } - ASSERT(result == WAIT_OBJECT_0); - return true; - } - } -} - -#endif // V8_OS_MACOSX - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform/semaphore.h nodejs-0.11.15/deps/v8/src/platform/semaphore.h --- nodejs-0.11.13/deps/v8/src/platform/semaphore.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/semaphore.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_PLATFORM_SEMAPHORE_H_ -#define V8_PLATFORM_SEMAPHORE_H_ - -#include "../lazy-instance.h" -#if V8_OS_WIN -#include "../win32-headers.h" -#endif - -#if V8_OS_MACOSX -#include <mach/semaphore.h> // NOLINT -#elif V8_OS_POSIX -#include <semaphore.h> // NOLINT -#endif - -namespace v8 { -namespace internal { - -// Forward declarations. -class TimeDelta; - -// ---------------------------------------------------------------------------- -// Semaphore -// -// A semaphore object is a synchronization object that maintains a count. The -// count is decremented each time a thread completes a wait for the semaphore -// object and incremented each time a thread signals the semaphore. When the -// count reaches zero, threads waiting for the semaphore blocks until the -// count becomes non-zero. - -class Semaphore V8_FINAL { - public: - explicit Semaphore(int count); - ~Semaphore(); - - // Increments the semaphore counter. - void Signal(); - - // Suspends the calling thread until the semaphore counter is non zero - // and then decrements the semaphore counter. - void Wait(); - - // Suspends the calling thread until the counter is non zero or the timeout - // time has passed. If timeout happens the return value is false and the - // counter is unchanged. Otherwise the semaphore counter is decremented and - // true is returned. - bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT; - -#if V8_OS_MACOSX - typedef semaphore_t NativeHandle; -#elif V8_OS_POSIX - typedef sem_t NativeHandle; -#elif V8_OS_WIN - typedef HANDLE NativeHandle; -#endif - - NativeHandle& native_handle() { - return native_handle_; - } - const NativeHandle& native_handle() const { - return native_handle_; - } - - private: - NativeHandle native_handle_; - - DISALLOW_COPY_AND_ASSIGN(Semaphore); -}; - - -// POD Semaphore initialized lazily (i.e. the first time Pointer() is called). -// Usage: -// // The following semaphore starts at 0. -// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER; -// -// void my_function() { -// // Do something with my_semaphore.Pointer(). -// } -// - -template <int N> -struct CreateSemaphoreTrait { - static Semaphore* Create() { - return new Semaphore(N); - } -}; - -template <int N> -struct LazySemaphore { - typedef typename LazyDynamicInstance< - Semaphore, - CreateSemaphoreTrait<N>, - ThreadSafeInitOnceTrait>::type type; -}; - -#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER - -} } // namespace v8::internal - -#endif // V8_PLATFORM_SEMAPHORE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/platform/socket.cc nodejs-0.11.15/deps/v8/src/platform/socket.cc --- nodejs-0.11.13/deps/v8/src/platform/socket.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/socket.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,224 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "platform/socket.h" - -#if V8_OS_POSIX -#include <sys/types.h> -#include <sys/socket.h> - -#include <netinet/in.h> -#include <netdb.h> - -#include <unistd.h> -#endif - -#include <errno.h> - -#include "checks.h" -#include "once.h" - -namespace v8 { -namespace internal { - -#if V8_OS_WIN - -static V8_DECLARE_ONCE(initialize_winsock) = V8_ONCE_INIT; - - -static void InitializeWinsock() { - WSADATA wsa_data; - int result = WSAStartup(MAKEWORD(1, 0), &wsa_data); - CHECK_EQ(0, result); -} - -#endif // V8_OS_WIN - - -Socket::Socket() { -#if V8_OS_WIN - // Be sure to initialize the WinSock DLL first. - CallOnce(&initialize_winsock, &InitializeWinsock); -#endif // V8_OS_WIN - - // Create the native socket handle. - native_handle_ = ::socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); -} - - -bool Socket::Bind(int port) { - ASSERT_GE(port, 0); - ASSERT_LT(port, 65536); - if (!IsValid()) return false; - struct sockaddr_in sin; - memset(&sin, 0, sizeof(sin)); - sin.sin_family = AF_INET; - sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - sin.sin_port = htons(static_cast<uint16_t>(port)); - int result = ::bind( - native_handle_, reinterpret_cast<struct sockaddr*>(&sin), sizeof(sin)); - return result == 0; -} - - -bool Socket::Listen(int backlog) { - if (!IsValid()) return false; - int result = ::listen(native_handle_, backlog); - return result == 0; -} - - -Socket* Socket::Accept() { - if (!IsValid()) return NULL; - while (true) { - NativeHandle native_handle = ::accept(native_handle_, NULL, NULL); - if (native_handle == kInvalidNativeHandle) { -#if V8_OS_POSIX - if (errno == EINTR) continue; // Retry after signal. -#endif - return NULL; - } - return new Socket(native_handle); - } -} - - -bool Socket::Connect(const char* host, const char* port) { - ASSERT_NE(NULL, host); - ASSERT_NE(NULL, port); - if (!IsValid()) return false; - - // Lookup host and port. - struct addrinfo* info = NULL; - struct addrinfo hint; - memset(&hint, 0, sizeof(hint)); - hint.ai_family = AF_INET; - hint.ai_socktype = SOCK_STREAM; - hint.ai_protocol = IPPROTO_TCP; - int result = ::getaddrinfo(host, port, &hint, &info); - if (result != 0) { - return false; - } - - // Connect to the host on the given port. - for (struct addrinfo* ai = info; ai != NULL; ai = ai->ai_next) { - // Try to connect using this addr info. - while (true) { - result = ::connect( - native_handle_, ai->ai_addr, static_cast<int>(ai->ai_addrlen)); - if (result == 0) { - freeaddrinfo(info); - return true; - } -#if V8_OS_POSIX - if (errno == EINTR) continue; // Retry after signal. -#endif - break; - } - } - freeaddrinfo(info); - return false; -} - - -bool Socket::Shutdown() { - if (!IsValid()) return false; - // Shutdown socket for both read and write. -#if V8_OS_POSIX - int result = ::shutdown(native_handle_, SHUT_RDWR); - ::close(native_handle_); -#elif V8_OS_WIN - int result = ::shutdown(native_handle_, SD_BOTH); - ::closesocket(native_handle_); -#endif - native_handle_ = kInvalidNativeHandle; - return result == 0; -} - - -int Socket::Send(const char* buffer, int length) { - ASSERT(length <= 0 || buffer != NULL); - if (!IsValid()) return 0; - int offset = 0; - while (offset < length) { - int result = ::send(native_handle_, buffer + offset, length - offset, 0); - if (result == 0) { - break; - } else if (result > 0) { - ASSERT(result <= length - offset); - offset += result; - } else { -#if V8_OS_POSIX - if (errno == EINTR) continue; // Retry after signal. -#endif - return 0; - } - } - return offset; -} - - -int Socket::Receive(char* buffer, int length) { - if (!IsValid()) return 0; - if (length <= 0) return 0; - ASSERT_NE(NULL, buffer); - while (true) { - int result = ::recv(native_handle_, buffer, length, 0); - if (result < 0) { -#if V8_OS_POSIX - if (errno == EINTR) continue; // Retry after signal. -#endif - return 0; - } - return result; - } -} - - -bool Socket::SetReuseAddress(bool reuse_address) { - if (!IsValid()) return 0; - int v = reuse_address ? 1 : 0; - int result = ::setsockopt(native_handle_, SOL_SOCKET, SO_REUSEADDR, - reinterpret_cast<char*>(&v), sizeof(v)); - return result == 0; -} - - -// static -int Socket::GetLastError() { -#if V8_OS_POSIX - return errno; -#elif V8_OS_WIN - // Be sure to initialize the WinSock DLL first. - CallOnce(&initialize_winsock, &InitializeWinsock); - - // Now we can safely perform WSA calls. - return ::WSAGetLastError(); -#endif -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform/socket.h nodejs-0.11.15/deps/v8/src/platform/socket.h --- nodejs-0.11.13/deps/v8/src/platform/socket.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/socket.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_PLATFORM_SOCKET_H_ -#define V8_PLATFORM_SOCKET_H_ - -#include "globals.h" -#if V8_OS_WIN -#include "win32-headers.h" -#endif - -namespace v8 { -namespace internal { - -// ---------------------------------------------------------------------------- -// Socket -// - -class Socket V8_FINAL { - public: - Socket(); - ~Socket() { Shutdown(); } - - // Server initialization. - bool Bind(int port) V8_WARN_UNUSED_RESULT; - bool Listen(int backlog) V8_WARN_UNUSED_RESULT; - Socket* Accept() V8_WARN_UNUSED_RESULT; - - // Client initialization. - bool Connect(const char* host, const char* port) V8_WARN_UNUSED_RESULT; - - // Shutdown socket for both read and write. This causes blocking Send and - // Receive calls to exit. After |Shutdown()| the Socket object cannot be - // used for any communication. - bool Shutdown(); - - // Data Transimission - // Return 0 on failure. - int Send(const char* buffer, int length) V8_WARN_UNUSED_RESULT; - int Receive(char* buffer, int length) V8_WARN_UNUSED_RESULT; - - // Set the value of the SO_REUSEADDR socket option. - bool SetReuseAddress(bool reuse_address); - - V8_INLINE bool IsValid() const { - return native_handle_ != kInvalidNativeHandle; - } - - static int GetLastError(); - - // The implementation-defined native handle type. -#if V8_OS_POSIX - typedef int NativeHandle; - static const NativeHandle kInvalidNativeHandle = -1; -#elif V8_OS_WIN - typedef SOCKET NativeHandle; - static const NativeHandle kInvalidNativeHandle = INVALID_SOCKET; -#endif - - NativeHandle& native_handle() { - return native_handle_; - } - const NativeHandle& native_handle() const { - return native_handle_; - } - - private: - explicit Socket(NativeHandle native_handle) : native_handle_(native_handle) {} - - NativeHandle native_handle_; - - DISALLOW_COPY_AND_ASSIGN(Socket); -}; - -} } // namespace v8::internal - -#endif // V8_PLATFORM_SOCKET_H_ diff -Nru nodejs-0.11.13/deps/v8/src/platform/time.cc nodejs-0.11.15/deps/v8/src/platform/time.cc --- nodejs-0.11.13/deps/v8/src/platform/time.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/time.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,591 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "platform/time.h" - -#if V8_OS_POSIX -#include <sys/time.h> -#endif -#if V8_OS_MACOSX -#include <mach/mach_time.h> -#endif - -#include <string.h> - -#include "checks.h" -#include "cpu.h" -#include "platform.h" -#if V8_OS_WIN -#include "win32-headers.h" -#endif - -namespace v8 { -namespace internal { - -TimeDelta TimeDelta::FromDays(int days) { - return TimeDelta(days * Time::kMicrosecondsPerDay); -} - - -TimeDelta TimeDelta::FromHours(int hours) { - return TimeDelta(hours * Time::kMicrosecondsPerHour); -} - - -TimeDelta TimeDelta::FromMinutes(int minutes) { - return TimeDelta(minutes * Time::kMicrosecondsPerMinute); -} - - -TimeDelta TimeDelta::FromSeconds(int64_t seconds) { - return TimeDelta(seconds * Time::kMicrosecondsPerSecond); -} - - -TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) { - return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond); -} - - -TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) { - return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond); -} - - -int TimeDelta::InDays() const { - return static_cast<int>(delta_ / Time::kMicrosecondsPerDay); -} - - -int TimeDelta::InHours() const { - return static_cast<int>(delta_ / Time::kMicrosecondsPerHour); -} - - -int TimeDelta::InMinutes() const { - return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute); -} - - -double TimeDelta::InSecondsF() const { - return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond; -} - - -int64_t TimeDelta::InSeconds() const { - return delta_ / Time::kMicrosecondsPerSecond; -} - - -double TimeDelta::InMillisecondsF() const { - return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond; -} - - -int64_t TimeDelta::InMilliseconds() const { - return delta_ / Time::kMicrosecondsPerMillisecond; -} - - -int64_t TimeDelta::InNanoseconds() const { - return delta_ * Time::kNanosecondsPerMicrosecond; -} - - -#if V8_OS_MACOSX - -TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) { - ASSERT_GE(ts.tv_nsec, 0); - ASSERT_LT(ts.tv_nsec, - static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT - return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond + - ts.tv_nsec / Time::kNanosecondsPerMicrosecond); -} - - -struct mach_timespec TimeDelta::ToMachTimespec() const { - struct mach_timespec ts; - ASSERT(delta_ >= 0); - ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond; - ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) * - Time::kNanosecondsPerMicrosecond; - return ts; -} - -#endif // V8_OS_MACOSX - - -#if V8_OS_POSIX - -TimeDelta TimeDelta::FromTimespec(struct timespec ts) { - ASSERT_GE(ts.tv_nsec, 0); - ASSERT_LT(ts.tv_nsec, - static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT - return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond + - ts.tv_nsec / Time::kNanosecondsPerMicrosecond); -} - - -struct timespec TimeDelta::ToTimespec() const { - struct timespec ts; - ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond; - ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) * - Time::kNanosecondsPerMicrosecond; - return ts; -} - -#endif // V8_OS_POSIX - - -#if V8_OS_WIN - -// We implement time using the high-resolution timers so that we can get -// timeouts which are smaller than 10-15ms. To avoid any drift, we -// periodically resync the internal clock to the system clock. -class Clock V8_FINAL { - public: - Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {} - - Time Now() { - // Time between resampling the un-granular clock for this API (1 minute). - const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1); - - LockGuard<Mutex> lock_guard(&mutex_); - - // Determine current time and ticks. - TimeTicks ticks = GetSystemTicks(); - Time time = GetSystemTime(); - - // Check if we need to synchronize with the system clock due to a backwards - // time change or the amount of time elapsed. - TimeDelta elapsed = ticks - initial_ticks_; - if (time < initial_time_ || elapsed > kMaxElapsedTime) { - initial_ticks_ = ticks; - initial_time_ = time; - return time; - } - - return initial_time_ + elapsed; - } - - Time NowFromSystemTime() { - LockGuard<Mutex> lock_guard(&mutex_); - initial_ticks_ = GetSystemTicks(); - initial_time_ = GetSystemTime(); - return initial_time_; - } - - private: - static TimeTicks GetSystemTicks() { - return TimeTicks::Now(); - } - - static Time GetSystemTime() { - FILETIME ft; - ::GetSystemTimeAsFileTime(&ft); - return Time::FromFiletime(ft); - } - - TimeTicks initial_ticks_; - Time initial_time_; - Mutex mutex_; -}; - - -static LazyStaticInstance<Clock, - DefaultConstructTrait<Clock>, - ThreadSafeInitOnceTrait>::type clock = LAZY_STATIC_INSTANCE_INITIALIZER; - - -Time Time::Now() { - return clock.Pointer()->Now(); -} - - -Time Time::NowFromSystemTime() { - return clock.Pointer()->NowFromSystemTime(); -} - - -// Time between windows epoch and standard epoch. -static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000); - - -Time Time::FromFiletime(FILETIME ft) { - if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) { - return Time(); - } - if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() && - ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) { - return Max(); - } - int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) + - (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10; - return Time(us - kTimeToEpochInMicroseconds); -} - - -FILETIME Time::ToFiletime() const { - ASSERT(us_ >= 0); - FILETIME ft; - if (IsNull()) { - ft.dwLowDateTime = 0; - ft.dwHighDateTime = 0; - return ft; - } - if (IsMax()) { - ft.dwLowDateTime = std::numeric_limits<DWORD>::max(); - ft.dwHighDateTime = std::numeric_limits<DWORD>::max(); - return ft; - } - uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10; - ft.dwLowDateTime = static_cast<DWORD>(us); - ft.dwHighDateTime = static_cast<DWORD>(us >> 32); - return ft; -} - -#elif V8_OS_POSIX - -Time Time::Now() { - struct timeval tv; - int result = gettimeofday(&tv, NULL); - ASSERT_EQ(0, result); - USE(result); - return FromTimeval(tv); -} - - -Time Time::NowFromSystemTime() { - return Now(); -} - - -Time Time::FromTimespec(struct timespec ts) { - ASSERT(ts.tv_nsec >= 0); - ASSERT(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT - if (ts.tv_nsec == 0 && ts.tv_sec == 0) { - return Time(); - } - if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT - ts.tv_sec == std::numeric_limits<time_t>::max()) { - return Max(); - } - return Time(ts.tv_sec * kMicrosecondsPerSecond + - ts.tv_nsec / kNanosecondsPerMicrosecond); -} - - -struct timespec Time::ToTimespec() const { - struct timespec ts; - if (IsNull()) { - ts.tv_sec = 0; - ts.tv_nsec = 0; - return ts; - } - if (IsMax()) { - ts.tv_sec = std::numeric_limits<time_t>::max(); - ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT - return ts; - } - ts.tv_sec = us_ / kMicrosecondsPerSecond; - ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond; - return ts; -} - - -Time Time::FromTimeval(struct timeval tv) { - ASSERT(tv.tv_usec >= 0); - ASSERT(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond)); - if (tv.tv_usec == 0 && tv.tv_sec == 0) { - return Time(); - } - if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) && - tv.tv_sec == std::numeric_limits<time_t>::max()) { - return Max(); - } - return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec); -} - - -struct timeval Time::ToTimeval() const { - struct timeval tv; - if (IsNull()) { - tv.tv_sec = 0; - tv.tv_usec = 0; - return tv; - } - if (IsMax()) { - tv.tv_sec = std::numeric_limits<time_t>::max(); - tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1); - return tv; - } - tv.tv_sec = us_ / kMicrosecondsPerSecond; - tv.tv_usec = us_ % kMicrosecondsPerSecond; - return tv; -} - -#endif // V8_OS_WIN - - -Time Time::FromJsTime(double ms_since_epoch) { - // The epoch is a valid time, so this constructor doesn't interpret - // 0 as the null time. - if (ms_since_epoch == std::numeric_limits<double>::max()) { - return Max(); - } - return Time( - static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond)); -} - - -double Time::ToJsTime() const { - if (IsNull()) { - // Preserve 0 so the invalid result doesn't depend on the platform. - return 0; - } - if (IsMax()) { - // Preserve max without offset to prevent overflow. - return std::numeric_limits<double>::max(); - } - return static_cast<double>(us_) / kMicrosecondsPerMillisecond; -} - - -#if V8_OS_WIN - -class TickClock { - public: - virtual ~TickClock() {} - virtual int64_t Now() = 0; - virtual bool IsHighResolution() = 0; -}; - - -// Overview of time counters: -// (1) CPU cycle counter. (Retrieved via RDTSC) -// The CPU counter provides the highest resolution time stamp and is the least -// expensive to retrieve. However, the CPU counter is unreliable and should not -// be used in production. Its biggest issue is that it is per processor and it -// is not synchronized between processors. Also, on some computers, the counters -// will change frequency due to thermal and power changes, and stop in some -// states. -// -// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high- -// resolution (100 nanoseconds) time stamp but is comparatively more expensive -// to retrieve. What QueryPerformanceCounter actually does is up to the HAL. -// (with some help from ACPI). -// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx -// in the worst case, it gets the counter from the rollover interrupt on the -// programmable interrupt timer. In best cases, the HAL may conclude that the -// RDTSC counter runs at a constant frequency, then it uses that instead. On -// multiprocessor machines, it will try to verify the values returned from -// RDTSC on each processor are consistent with each other, and apply a handful -// of workarounds for known buggy hardware. In other words, QPC is supposed to -// give consistent result on a multiprocessor computer, but it is unreliable in -// reality due to bugs in BIOS or HAL on some, especially old computers. -// With recent updates on HAL and newer BIOS, QPC is getting more reliable but -// it should be used with caution. -// -// (3) System time. The system time provides a low-resolution (typically 10ms -// to 55 milliseconds) time stamp but is comparatively less expensive to -// retrieve and more reliable. -class HighResolutionTickClock V8_FINAL : public TickClock { - public: - explicit HighResolutionTickClock(int64_t ticks_per_second) - : ticks_per_second_(ticks_per_second) { - ASSERT_LT(0, ticks_per_second); - } - virtual ~HighResolutionTickClock() {} - - virtual int64_t Now() V8_OVERRIDE { - LARGE_INTEGER now; - BOOL result = QueryPerformanceCounter(&now); - ASSERT(result); - USE(result); - - // Intentionally calculate microseconds in a round about manner to avoid - // overflow and precision issues. Think twice before simplifying! - int64_t whole_seconds = now.QuadPart / ticks_per_second_; - int64_t leftover_ticks = now.QuadPart % ticks_per_second_; - int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) + - ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_); - - // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow() - // will never return 0. - return ticks + 1; - } - - virtual bool IsHighResolution() V8_OVERRIDE { - return true; - } - - private: - int64_t ticks_per_second_; -}; - - -class RolloverProtectedTickClock V8_FINAL : public TickClock { - public: - // We initialize rollover_ms_ to 1 to ensure that we will never - // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below. - RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {} - virtual ~RolloverProtectedTickClock() {} - - virtual int64_t Now() V8_OVERRIDE { - LockGuard<Mutex> lock_guard(&mutex_); - // We use timeGetTime() to implement TimeTicks::Now(), which rolls over - // every ~49.7 days. We try to track rollover ourselves, which works if - // TimeTicks::Now() is called at least every 49 days. - // Note that we do not use GetTickCount() here, since timeGetTime() gives - // more predictable delta values, as described here: - // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx - // timeGetTime() provides 1ms granularity when combined with - // timeBeginPeriod(). If the host application for V8 wants fast timers, it - // can use timeBeginPeriod() to increase the resolution. - DWORD now = timeGetTime(); - if (now < last_seen_now_) { - rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days. - } - last_seen_now_ = now; - return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond; - } - - virtual bool IsHighResolution() V8_OVERRIDE { - return false; - } - - private: - Mutex mutex_; - DWORD last_seen_now_; - int64_t rollover_ms_; -}; - - -static LazyStaticInstance<RolloverProtectedTickClock, - DefaultConstructTrait<RolloverProtectedTickClock>, - ThreadSafeInitOnceTrait>::type tick_clock = - LAZY_STATIC_INSTANCE_INITIALIZER; - - -struct CreateHighResTickClockTrait { - static TickClock* Create() { - // Check if the installed hardware supports a high-resolution performance - // counter, and if not fallback to the low-resolution tick clock. - LARGE_INTEGER ticks_per_second; - if (!QueryPerformanceFrequency(&ticks_per_second)) { - return tick_clock.Pointer(); - } - - // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter - // is unreliable, fallback to the low-resolution tick clock. - CPU cpu; - if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) { - return tick_clock.Pointer(); - } - - return new HighResolutionTickClock(ticks_per_second.QuadPart); - } -}; - - -static LazyDynamicInstance<TickClock, - CreateHighResTickClockTrait, - ThreadSafeInitOnceTrait>::type high_res_tick_clock = - LAZY_DYNAMIC_INSTANCE_INITIALIZER; - - -TimeTicks TimeTicks::Now() { - // Make sure we never return 0 here. - TimeTicks ticks(tick_clock.Pointer()->Now()); - ASSERT(!ticks.IsNull()); - return ticks; -} - - -TimeTicks TimeTicks::HighResolutionNow() { - // Make sure we never return 0 here. - TimeTicks ticks(high_res_tick_clock.Pointer()->Now()); - ASSERT(!ticks.IsNull()); - return ticks; -} - - -// static -bool TimeTicks::IsHighResolutionClockWorking() { - return high_res_tick_clock.Pointer()->IsHighResolution(); -} - -#else // V8_OS_WIN - -TimeTicks TimeTicks::Now() { - return HighResolutionNow(); -} - - -TimeTicks TimeTicks::HighResolutionNow() { - int64_t ticks; -#if V8_OS_MACOSX - static struct mach_timebase_info info; - if (info.denom == 0) { - kern_return_t result = mach_timebase_info(&info); - ASSERT_EQ(KERN_SUCCESS, result); - USE(result); - } - ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond * - info.numer / info.denom); -#elif V8_OS_SOLARIS - ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond); -#elif V8_LIBRT_NOT_AVAILABLE - // TODO(bmeurer): This is a temporary hack to support cross-compiling - // Chrome for Android in AOSP. Remove this once AOSP is fixed, also - // cleanup the tools/gyp/v8.gyp file. - struct timeval tv; - int result = gettimeofday(&tv, NULL); - ASSERT_EQ(0, result); - USE(result); - ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec); -#elif V8_OS_POSIX - struct timespec ts; - int result = clock_gettime(CLOCK_MONOTONIC, &ts); - ASSERT_EQ(0, result); - USE(result); - ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond + - ts.tv_nsec / Time::kNanosecondsPerMicrosecond); -#endif // V8_OS_MACOSX - // Make sure we never return 0 here. - return TimeTicks(ticks + 1); -} - - -// static -bool TimeTicks::IsHighResolutionClockWorking() { - return true; -} - -#endif // V8_OS_WIN - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform/time.h nodejs-0.11.15/deps/v8/src/platform/time.h --- nodejs-0.11.13/deps/v8/src/platform/time.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform/time.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,416 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_PLATFORM_TIME_H_ -#define V8_PLATFORM_TIME_H_ - -#include <time.h> -#include <limits> - -#include "../allocation.h" - -// Forward declarations. -extern "C" { -struct _FILETIME; -struct mach_timespec; -struct timespec; -struct timeval; -} - -namespace v8 { -namespace internal { - -class Time; -class TimeTicks; - -// ----------------------------------------------------------------------------- -// TimeDelta -// -// This class represents a duration of time, internally represented in -// microseonds. - -class TimeDelta V8_FINAL BASE_EMBEDDED { - public: - TimeDelta() : delta_(0) {} - - // Converts units of time to TimeDeltas. - static TimeDelta FromDays(int days); - static TimeDelta FromHours(int hours); - static TimeDelta FromMinutes(int minutes); - static TimeDelta FromSeconds(int64_t seconds); - static TimeDelta FromMilliseconds(int64_t milliseconds); - static TimeDelta FromMicroseconds(int64_t microseconds) { - return TimeDelta(microseconds); - } - static TimeDelta FromNanoseconds(int64_t nanoseconds); - - // Returns the time delta in some unit. The F versions return a floating - // point value, the "regular" versions return a rounded-down value. - // - // InMillisecondsRoundedUp() instead returns an integer that is rounded up - // to the next full millisecond. - int InDays() const; - int InHours() const; - int InMinutes() const; - double InSecondsF() const; - int64_t InSeconds() const; - double InMillisecondsF() const; - int64_t InMilliseconds() const; - int64_t InMillisecondsRoundedUp() const; - int64_t InMicroseconds() const { return delta_; } - int64_t InNanoseconds() const; - - // Converts to/from Mach time specs. - static TimeDelta FromMachTimespec(struct mach_timespec ts); - struct mach_timespec ToMachTimespec() const; - - // Converts to/from POSIX time specs. - static TimeDelta FromTimespec(struct timespec ts); - struct timespec ToTimespec() const; - - TimeDelta& operator=(const TimeDelta& other) { - delta_ = other.delta_; - return *this; - } - - // Computations with other deltas. - TimeDelta operator+(const TimeDelta& other) const { - return TimeDelta(delta_ + other.delta_); - } - TimeDelta operator-(const TimeDelta& other) const { - return TimeDelta(delta_ - other.delta_); - } - - TimeDelta& operator+=(const TimeDelta& other) { - delta_ += other.delta_; - return *this; - } - TimeDelta& operator-=(const TimeDelta& other) { - delta_ -= other.delta_; - return *this; - } - TimeDelta operator-() const { - return TimeDelta(-delta_); - } - - double TimesOf(const TimeDelta& other) const { - return static_cast<double>(delta_) / static_cast<double>(other.delta_); - } - double PercentOf(const TimeDelta& other) const { - return TimesOf(other) * 100.0; - } - - // Computations with ints, note that we only allow multiplicative operations - // with ints, and additive operations with other deltas. - TimeDelta operator*(int64_t a) const { - return TimeDelta(delta_ * a); - } - TimeDelta operator/(int64_t a) const { - return TimeDelta(delta_ / a); - } - TimeDelta& operator*=(int64_t a) { - delta_ *= a; - return *this; - } - TimeDelta& operator/=(int64_t a) { - delta_ /= a; - return *this; - } - int64_t operator/(const TimeDelta& other) const { - return delta_ / other.delta_; - } - - // Comparison operators. - bool operator==(const TimeDelta& other) const { - return delta_ == other.delta_; - } - bool operator!=(const TimeDelta& other) const { - return delta_ != other.delta_; - } - bool operator<(const TimeDelta& other) const { - return delta_ < other.delta_; - } - bool operator<=(const TimeDelta& other) const { - return delta_ <= other.delta_; - } - bool operator>(const TimeDelta& other) const { - return delta_ > other.delta_; - } - bool operator>=(const TimeDelta& other) const { - return delta_ >= other.delta_; - } - - private: - // Constructs a delta given the duration in microseconds. This is private - // to avoid confusion by callers with an integer constructor. Use - // FromSeconds, FromMilliseconds, etc. instead. - explicit TimeDelta(int64_t delta) : delta_(delta) {} - - // Delta in microseconds. - int64_t delta_; -}; - - -// ----------------------------------------------------------------------------- -// Time -// -// This class represents an absolute point in time, internally represented as -// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970. - -class Time V8_FINAL BASE_EMBEDDED { - public: - static const int64_t kMillisecondsPerSecond = 1000; - static const int64_t kMicrosecondsPerMillisecond = 1000; - static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond * - kMillisecondsPerSecond; - static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60; - static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60; - static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24; - static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7; - static const int64_t kNanosecondsPerMicrosecond = 1000; - static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond * - kMicrosecondsPerSecond; - - // Contains the NULL time. Use Time::Now() to get the current time. - Time() : us_(0) {} - - // Returns true if the time object has not been initialized. - bool IsNull() const { return us_ == 0; } - - // Returns true if the time object is the maximum time. - bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); } - - // Returns the current time. Watch out, the system might adjust its clock - // in which case time will actually go backwards. We don't guarantee that - // times are increasing, or that two calls to Now() won't be the same. - static Time Now(); - - // Returns the current time. Same as Now() except that this function always - // uses system time so that there are no discrepancies between the returned - // time and system time even on virtual environments including our test bot. - // For timing sensitive unittests, this function should be used. - static Time NowFromSystemTime(); - - // Returns the time for epoch in Unix-like system (Jan 1, 1970). - static Time UnixEpoch() { return Time(0); } - - // Returns the maximum time, which should be greater than any reasonable time - // with which we might compare it. - static Time Max() { return Time(std::numeric_limits<int64_t>::max()); } - - // Converts to/from internal values. The meaning of the "internal value" is - // completely up to the implementation, so it should be treated as opaque. - static Time FromInternalValue(int64_t value) { - return Time(value); - } - int64_t ToInternalValue() const { - return us_; - } - - // Converts to/from POSIX time specs. - static Time FromTimespec(struct timespec ts); - struct timespec ToTimespec() const; - - // Converts to/from POSIX time values. - static Time FromTimeval(struct timeval tv); - struct timeval ToTimeval() const; - - // Converts to/from Windows file times. - static Time FromFiletime(struct _FILETIME ft); - struct _FILETIME ToFiletime() const; - - // Converts to/from the Javascript convention for times, a number of - // milliseconds since the epoch: - static Time FromJsTime(double ms_since_epoch); - double ToJsTime() const; - - Time& operator=(const Time& other) { - us_ = other.us_; - return *this; - } - - // Compute the difference between two times. - TimeDelta operator-(const Time& other) const { - return TimeDelta::FromMicroseconds(us_ - other.us_); - } - - // Modify by some time delta. - Time& operator+=(const TimeDelta& delta) { - us_ += delta.InMicroseconds(); - return *this; - } - Time& operator-=(const TimeDelta& delta) { - us_ -= delta.InMicroseconds(); - return *this; - } - - // Return a new time modified by some delta. - Time operator+(const TimeDelta& delta) const { - return Time(us_ + delta.InMicroseconds()); - } - Time operator-(const TimeDelta& delta) const { - return Time(us_ - delta.InMicroseconds()); - } - - // Comparison operators - bool operator==(const Time& other) const { - return us_ == other.us_; - } - bool operator!=(const Time& other) const { - return us_ != other.us_; - } - bool operator<(const Time& other) const { - return us_ < other.us_; - } - bool operator<=(const Time& other) const { - return us_ <= other.us_; - } - bool operator>(const Time& other) const { - return us_ > other.us_; - } - bool operator>=(const Time& other) const { - return us_ >= other.us_; - } - - private: - explicit Time(int64_t us) : us_(us) {} - - // Time in microseconds in UTC. - int64_t us_; -}; - -inline Time operator+(const TimeDelta& delta, const Time& time) { - return time + delta; -} - - -// ----------------------------------------------------------------------------- -// TimeTicks -// -// This class represents an abstract time that is most of the time incrementing -// for use in measuring time durations. It is internally represented in -// microseconds. It can not be converted to a human-readable time, but is -// guaranteed not to decrease (if the user changes the computer clock, -// Time::Now() may actually decrease or jump). But note that TimeTicks may -// "stand still", for example if the computer suspended. - -class TimeTicks V8_FINAL BASE_EMBEDDED { - public: - TimeTicks() : ticks_(0) {} - - // Platform-dependent tick count representing "right now." - // The resolution of this clock is ~1-15ms. Resolution varies depending - // on hardware/operating system configuration. - // This method never returns a null TimeTicks. - static TimeTicks Now(); - - // Returns a platform-dependent high-resolution tick count. Implementation - // is hardware dependent and may or may not return sub-millisecond - // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND - // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED. - // This method never returns a null TimeTicks. - static TimeTicks HighResolutionNow(); - - // Returns true if the high-resolution clock is working on this system. - static bool IsHighResolutionClockWorking(); - - // Returns true if this object has not been initialized. - bool IsNull() const { return ticks_ == 0; } - - // Converts to/from internal values. The meaning of the "internal value" is - // completely up to the implementation, so it should be treated as opaque. - static TimeTicks FromInternalValue(int64_t value) { - return TimeTicks(value); - } - int64_t ToInternalValue() const { - return ticks_; - } - - TimeTicks& operator=(const TimeTicks other) { - ticks_ = other.ticks_; - return *this; - } - - // Compute the difference between two times. - TimeDelta operator-(const TimeTicks other) const { - return TimeDelta::FromMicroseconds(ticks_ - other.ticks_); - } - - // Modify by some time delta. - TimeTicks& operator+=(const TimeDelta& delta) { - ticks_ += delta.InMicroseconds(); - return *this; - } - TimeTicks& operator-=(const TimeDelta& delta) { - ticks_ -= delta.InMicroseconds(); - return *this; - } - - // Return a new TimeTicks modified by some delta. - TimeTicks operator+(const TimeDelta& delta) const { - return TimeTicks(ticks_ + delta.InMicroseconds()); - } - TimeTicks operator-(const TimeDelta& delta) const { - return TimeTicks(ticks_ - delta.InMicroseconds()); - } - - // Comparison operators - bool operator==(const TimeTicks& other) const { - return ticks_ == other.ticks_; - } - bool operator!=(const TimeTicks& other) const { - return ticks_ != other.ticks_; - } - bool operator<(const TimeTicks& other) const { - return ticks_ < other.ticks_; - } - bool operator<=(const TimeTicks& other) const { - return ticks_ <= other.ticks_; - } - bool operator>(const TimeTicks& other) const { - return ticks_ > other.ticks_; - } - bool operator>=(const TimeTicks& other) const { - return ticks_ >= other.ticks_; - } - - private: - // Please use Now() to create a new object. This is for internal use - // and testing. Ticks is in microseconds. - explicit TimeTicks(int64_t ticks) : ticks_(ticks) {} - - // Tick count in microseconds. - int64_t ticks_; -}; - -inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) { - return ticks + delta; -} - -} } // namespace v8::internal - -#endif // V8_PLATFORM_TIME_H_ diff -Nru nodejs-0.11.13/deps/v8/src/platform-cygwin.cc nodejs-0.11.15/deps/v8/src/platform-cygwin.cc --- nodejs-0.11.13/deps/v8/src/platform-cygwin.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform-cygwin.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,357 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Platform-specific code for Cygwin goes here. For the POSIX-compatible -// parts, the implementation is in platform-posix.cc. - -#include <errno.h> -#include <pthread.h> -#include <semaphore.h> -#include <stdarg.h> -#include <strings.h> // index -#include <sys/time.h> -#include <sys/mman.h> // mmap & munmap -#include <unistd.h> // sysconf - -#undef MAP_TYPE - -#include "v8.h" - -#include "platform.h" -#include "simulator.h" -#include "v8threads.h" -#include "vm-state-inl.h" -#include "win32-headers.h" - -namespace v8 { -namespace internal { - - -const char* OS::LocalTimezone(double time, TimezoneCache* cache) { - if (std::isnan(time)) return ""; - time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); - struct tm* t = localtime(&tv); - if (NULL == t) return ""; - return tzname[0]; // The location of the timezone string on Cygwin. -} - - -double OS::LocalTimeOffset(TimezoneCache* cache) { - // On Cygwin, struct tm does not contain a tm_gmtoff field. - time_t utc = time(NULL); - ASSERT(utc != -1); - struct tm* loc = localtime(&utc); - ASSERT(loc != NULL); - // time - localtime includes any daylight savings offset, so subtract it. - return static_cast<double>((mktime(loc) - utc) * msPerSecond - - (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0)); -} - - -void* OS::Allocate(const size_t requested, - size_t* allocated, - bool is_executable) { - const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (mbase == MAP_FAILED) { - LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); - return NULL; - } - *allocated = msize; - return mbase; -} - - -class PosixMemoryMappedFile : public OS::MemoryMappedFile { - public: - PosixMemoryMappedFile(FILE* file, void* memory, int size) - : file_(file), memory_(memory), size_(size) { } - virtual ~PosixMemoryMappedFile(); - virtual void* memory() { return memory_; } - virtual int size() { return size_; } - private: - FILE* file_; - void* memory_; - int size_; -}; - - -OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "r+"); - if (file == NULL) return NULL; - - fseek(file, 0, SEEK_END); - int size = ftell(file); - - void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, - void* initial) { - FILE* file = fopen(name, "w+"); - if (file == NULL) return NULL; - int result = fwrite(initial, size, 1, file); - if (result < 1) { - fclose(file); - return NULL; - } - void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) munmap(memory_, size_); - fclose(file_); -} - - -void OS::LogSharedLibraryAddresses(Isolate* isolate) { - // This function assumes that the layout of the file is as follows: - // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] - // If we encounter an unexpected situation we abort scanning further entries. - FILE* fp = fopen("/proc/self/maps", "r"); - if (fp == NULL) return; - - // Allocate enough room to be able to store a full file name. - const int kLibNameLen = FILENAME_MAX + 1; - char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); - - // This loop will terminate once the scanning hits an EOF. - while (true) { - uintptr_t start, end; - char attr_r, attr_w, attr_x, attr_p; - // Parse the addresses and permission bits at the beginning of the line. - if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; - if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; - - int c; - if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { - // Found a read-only executable entry. Skip characters until we reach - // the beginning of the filename or the end of the line. - do { - c = getc(fp); - } while ((c != EOF) && (c != '\n') && (c != '/')); - if (c == EOF) break; // EOF: Was unexpected, just exit. - - // Process the filename if found. - if (c == '/') { - ungetc(c, fp); // Push the '/' back into the stream to be read below. - - // Read to the end of the line. Exit if the read fails. - if (fgets(lib_name, kLibNameLen, fp) == NULL) break; - - // Drop the newline character read by fgets. We do not need to check - // for a zero-length string because we know that we at least read the - // '/' character. - lib_name[strlen(lib_name) - 1] = '\0'; - } else { - // No library name found, just record the raw address range. - snprintf(lib_name, kLibNameLen, - "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); - } - LOG(isolate, SharedLibraryEvent(lib_name, start, end)); - } else { - // Entry not describing executable data. Skip to end of line to set up - // reading the next entry. - do { - c = getc(fp); - } while ((c != EOF) && (c != '\n')); - if (c == EOF) break; - } - } - free(lib_name); - fclose(fp); -} - - -void OS::SignalCodeMovingGC() { - // Nothing to do on Cygwin. -} - - -// The VirtualMemory implementation is taken from platform-win32.cc. -// The mmap-based virtual memory implementation as it is used on most posix -// platforms does not work well because Cygwin does not support MAP_FIXED. -// This causes VirtualMemory::Commit to not always commit the memory region -// specified. - -static void* GetRandomAddr() { - Isolate* isolate = Isolate::UncheckedCurrent(); - // Note that the current isolate isn't set up in a call path via - // CpuFeatures::Probe. We don't care about randomization in this case because - // the code page is immediately freed. - if (isolate != NULL) { - // The address range used to randomize RWX allocations in OS::Allocate - // Try not to map pages into the default range that windows loads DLLs - // Use a multiple of 64k to prevent committing unused memory. - // Note: This does not guarantee RWX regions will be within the - // range kAllocationRandomAddressMin to kAllocationRandomAddressMax -#ifdef V8_HOST_ARCH_64_BIT - static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; - static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; -#else - static const intptr_t kAllocationRandomAddressMin = 0x04000000; - static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; -#endif - uintptr_t address = - (isolate->random_number_generator()->NextInt() << kPageSizeBits) | - kAllocationRandomAddressMin; - address &= kAllocationRandomAddressMax; - return reinterpret_cast<void *>(address); - } - return NULL; -} - - -static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { - LPVOID base = NULL; - - if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { - // For exectutable pages try and randomize the allocation address - for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { - base = VirtualAlloc(GetRandomAddr(), size, action, protection); - } - } - - // After three attempts give up and let the OS find an address to use. - if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); - - return base; -} - - -VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } - - -VirtualMemory::VirtualMemory(size_t size) - : address_(ReserveRegion(size)), size_(size) { } - - -VirtualMemory::VirtualMemory(size_t size, size_t alignment) - : address_(NULL), size_(0) { - ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* address = ReserveRegion(request_size); - if (address == NULL) return; - Address base = RoundUp(static_cast<Address>(address), alignment); - // Try reducing the size by freeing and then reallocating a specific area. - bool result = ReleaseRegion(address, request_size); - USE(result); - ASSERT(result); - address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); - if (address != NULL) { - request_size = size; - ASSERT(base == static_cast<Address>(address)); - } else { - // Resizing failed, just go with a bigger area. - address = ReserveRegion(request_size); - if (address == NULL) return; - } - address_ = address; - size_ = request_size; -} - - -VirtualMemory::~VirtualMemory() { - if (IsReserved()) { - bool result = ReleaseRegion(address_, size_); - ASSERT(result); - USE(result); - } -} - - -bool VirtualMemory::IsReserved() { - return address_ != NULL; -} - - -void VirtualMemory::Reset() { - address_ = NULL; - size_ = 0; -} - - -bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { - return CommitRegion(address, size, is_executable); -} - - -bool VirtualMemory::Uncommit(void* address, size_t size) { - ASSERT(IsReserved()); - return UncommitRegion(address, size); -} - - -void* VirtualMemory::ReserveRegion(size_t size) { - return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); -} - - -bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { - int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; - if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { - return false; - } - return true; -} - - -bool VirtualMemory::Guard(void* address) { - if (NULL == VirtualAlloc(address, - OS::CommitPageSize(), - MEM_COMMIT, - PAGE_NOACCESS)) { - return false; - } - return true; -} - - -bool VirtualMemory::UncommitRegion(void* base, size_t size) { - return VirtualFree(base, size, MEM_DECOMMIT) != 0; -} - - -bool VirtualMemory::ReleaseRegion(void* base, size_t size) { - return VirtualFree(base, 0, MEM_RELEASE) != 0; -} - - -bool VirtualMemory::HasLazyCommits() { - // TODO(alph): implement for the platform. - return false; -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform-freebsd.cc nodejs-0.11.15/deps/v8/src/platform-freebsd.cc --- nodejs-0.11.13/deps/v8/src/platform-freebsd.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform-freebsd.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,332 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Platform-specific code for FreeBSD goes here. For the POSIX-compatible -// parts, the implementation is in platform-posix.cc. - -#include <pthread.h> -#include <semaphore.h> -#include <signal.h> -#include <sys/time.h> -#include <sys/resource.h> -#include <sys/types.h> -#include <sys/ucontext.h> -#include <stdlib.h> - -#include <sys/types.h> // mmap & munmap -#include <sys/mman.h> // mmap & munmap -#include <sys/stat.h> // open -#include <sys/fcntl.h> // open -#include <unistd.h> // getpagesize -// If you don't have execinfo.h then you need devel/libexecinfo from ports. -#include <strings.h> // index -#include <errno.h> -#include <stdarg.h> -#include <limits.h> - -#undef MAP_TYPE - -#include "v8.h" -#include "v8threads.h" - -#include "platform.h" -#include "vm-state-inl.h" - - -namespace v8 { -namespace internal { - - -const char* OS::LocalTimezone(double time, TimezoneCache* cache) { - if (std::isnan(time)) return ""; - time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); - struct tm* t = localtime(&tv); - if (NULL == t) return ""; - return t->tm_zone; -} - - -double OS::LocalTimeOffset(TimezoneCache* cache) { - time_t tv = time(NULL); - struct tm* t = localtime(&tv); - // tm_gmtoff includes any daylight savings offset, so subtract it. - return static_cast<double>(t->tm_gmtoff * msPerSecond - - (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); -} - - -void* OS::Allocate(const size_t requested, - size_t* allocated, - bool executable) { - const size_t msize = RoundUp(requested, getpagesize()); - int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); - void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); - - if (mbase == MAP_FAILED) { - LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); - return NULL; - } - *allocated = msize; - return mbase; -} - - -class PosixMemoryMappedFile : public OS::MemoryMappedFile { - public: - PosixMemoryMappedFile(FILE* file, void* memory, int size) - : file_(file), memory_(memory), size_(size) { } - virtual ~PosixMemoryMappedFile(); - virtual void* memory() { return memory_; } - virtual int size() { return size_; } - private: - FILE* file_; - void* memory_; - int size_; -}; - - -OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "r+"); - if (file == NULL) return NULL; - - fseek(file, 0, SEEK_END); - int size = ftell(file); - - void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, - void* initial) { - FILE* file = fopen(name, "w+"); - if (file == NULL) return NULL; - int result = fwrite(initial, size, 1, file); - if (result < 1) { - fclose(file); - return NULL; - } - void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) munmap(memory_, size_); - fclose(file_); -} - - -static unsigned StringToLong(char* buffer) { - return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT -} - - -void OS::LogSharedLibraryAddresses(Isolate* isolate) { - static const int MAP_LENGTH = 1024; - int fd = open("/proc/self/maps", O_RDONLY); - if (fd < 0) return; - while (true) { - char addr_buffer[11]; - addr_buffer[0] = '0'; - addr_buffer[1] = 'x'; - addr_buffer[10] = 0; - int result = read(fd, addr_buffer + 2, 8); - if (result < 8) break; - unsigned start = StringToLong(addr_buffer); - result = read(fd, addr_buffer + 2, 1); - if (result < 1) break; - if (addr_buffer[2] != '-') break; - result = read(fd, addr_buffer + 2, 8); - if (result < 8) break; - unsigned end = StringToLong(addr_buffer); - char buffer[MAP_LENGTH]; - int bytes_read = -1; - do { - bytes_read++; - if (bytes_read >= MAP_LENGTH - 1) - break; - result = read(fd, buffer + bytes_read, 1); - if (result < 1) break; - } while (buffer[bytes_read] != '\n'); - buffer[bytes_read] = 0; - // Ignore mappings that are not executable. - if (buffer[3] != 'x') continue; - char* start_of_path = index(buffer, '/'); - // There may be no filename in this line. Skip to next. - if (start_of_path == NULL) continue; - buffer[bytes_read] = 0; - LOG(isolate, SharedLibraryEvent(start_of_path, start, end)); - } - close(fd); -} - - -void OS::SignalCodeMovingGC() { -} - - - -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - - -VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } - - -VirtualMemory::VirtualMemory(size_t size) - : address_(ReserveRegion(size)), size_(size) { } - - -VirtualMemory::VirtualMemory(size_t size, size_t alignment) - : address_(NULL), size_(0) { - ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* reservation = mmap(OS::GetRandomMmapAddr(), - request_size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, - kMmapFd, - kMmapFdOffset); - if (reservation == MAP_FAILED) return; - - Address base = static_cast<Address>(reservation); - Address aligned_base = RoundUp(base, alignment); - ASSERT_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - ASSERT_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - ASSERT(aligned_size == request_size); - - address_ = static_cast<void*>(aligned_base); - size_ = aligned_size; -} - - -VirtualMemory::~VirtualMemory() { - if (IsReserved()) { - bool result = ReleaseRegion(address(), size()); - ASSERT(result); - USE(result); - } -} - - -bool VirtualMemory::IsReserved() { - return address_ != NULL; -} - - -void VirtualMemory::Reset() { - address_ = NULL; - size_ = 0; -} - - -bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { - return CommitRegion(address, size, is_executable); -} - - -bool VirtualMemory::Uncommit(void* address, size_t size) { - return UncommitRegion(address, size); -} - - -bool VirtualMemory::Guard(void* address) { - OS::Guard(address, OS::CommitPageSize()); - return true; -} - - -void* VirtualMemory::ReserveRegion(size_t size) { - void* result = mmap(OS::GetRandomMmapAddr(), - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, - kMmapFd, - kMmapFdOffset); - - if (result == MAP_FAILED) return NULL; - - return result; -} - - -bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(base, - size, - prot, - MAP_PRIVATE | MAP_ANON | MAP_FIXED, - kMmapFd, - kMmapFdOffset)) { - return false; - } - return true; -} - - -bool VirtualMemory::UncommitRegion(void* base, size_t size) { - return mmap(base, - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, - kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - - -bool VirtualMemory::ReleaseRegion(void* base, size_t size) { - return munmap(base, size) == 0; -} - - -bool VirtualMemory::HasLazyCommits() { - // TODO(alph): implement for the platform. - return false; -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform.h nodejs-0.11.15/deps/v8/src/platform.h --- nodejs-0.11.13/deps/v8/src/platform.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,626 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This module contains the platform-specific code. This make the rest of the -// code less dependent on operating system, compilers and runtime libraries. -// This module does specifically not deal with differences between different -// processor architecture. -// The platform classes have the same definition for all platforms. The -// implementation for a particular platform is put in platform_<os>.cc. -// The build system then uses the implementation for the target platform. -// -// This design has been chosen because it is simple and fast. Alternatively, -// the platform dependent classes could have been implemented using abstract -// superclasses with virtual methods and having specializations for each -// platform. This design was rejected because it was more complicated and -// slower. It would require factory methods for selecting the right -// implementation and the overhead of virtual methods for performance -// sensitive like mutex locking/unlocking. - -#ifndef V8_PLATFORM_H_ -#define V8_PLATFORM_H_ - -#include <stdarg.h> - -#include "platform/mutex.h" -#include "platform/semaphore.h" -#include "utils.h" -#include "v8globals.h" - -#ifdef __sun -# ifndef signbit -namespace std { -int signbit(double x); -} -# endif -#endif - -#if V8_OS_QNX -#include "qnx-math.h" -#endif - -// Microsoft Visual C++ specific stuff. -#if V8_LIBC_MSVCRT - -#include "win32-headers.h" -#include "win32-math.h" - -int strncasecmp(const char* s1, const char* s2, int n); - -// Visual C++ 2013 and higher implement this function. -#if (_MSC_VER < 1800) -inline int lrint(double flt) { - int intgr; -#if V8_TARGET_ARCH_IA32 - __asm { - fld flt - fistp intgr - }; -#else - intgr = static_cast<int>(flt + 0.5); - if ((intgr & 1) != 0 && intgr - flt == 0.5) { - // If the number is halfway between two integers, round to the even one. - intgr--; - } -#endif - return intgr; -} -#endif // _MSC_VER < 1800 - -#endif // V8_LIBC_MSVCRT - -namespace v8 { -namespace internal { - -double modulo(double x, double y); - -// Custom implementation of math functions. -double fast_exp(double input); -double fast_sqrt(double input); -// The custom exp implementation needs 16KB of lookup data; initialize it -// on demand. -void lazily_initialize_fast_exp(); - -// ---------------------------------------------------------------------------- -// Fast TLS support - -#ifndef V8_NO_FAST_TLS - -#if defined(_MSC_VER) && V8_HOST_ARCH_IA32 - -#define V8_FAST_TLS_SUPPORTED 1 - -INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); - -inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { - const intptr_t kTibInlineTlsOffset = 0xE10; - const intptr_t kTibExtraTlsOffset = 0xF94; - const intptr_t kMaxInlineSlots = 64; - const intptr_t kMaxSlots = kMaxInlineSlots + 1024; - ASSERT(0 <= index && index < kMaxSlots); - if (index < kMaxInlineSlots) { - return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset + - kPointerSize * index)); - } - intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset)); - ASSERT(extra != 0); - return *reinterpret_cast<intptr_t*>(extra + - kPointerSize * (index - kMaxInlineSlots)); -} - -#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) - -#define V8_FAST_TLS_SUPPORTED 1 - -extern intptr_t kMacTlsBaseOffset; - -INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); - -inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { - intptr_t result; -#if V8_HOST_ARCH_IA32 - asm("movl %%gs:(%1,%2,4), %0;" - :"=r"(result) // Output must be a writable register. - :"r"(kMacTlsBaseOffset), "r"(index)); -#else - asm("movq %%gs:(%1,%2,8), %0;" - :"=r"(result) - :"r"(kMacTlsBaseOffset), "r"(index)); -#endif - return result; -} - -#endif - -#endif // V8_NO_FAST_TLS - - -class TimezoneCache; - - -// ---------------------------------------------------------------------------- -// OS -// -// This class has static methods for the different platform specific -// functions. Add methods here to cope with differences between the -// supported platforms. - -class OS { - public: - // Initializes the platform OS support that depend on CPU features. This is - // called after CPU initialization. - static void PostSetUp(); - - // Returns the accumulated user time for thread. This routine - // can be used for profiling. The implementation should - // strive for high-precision timer resolution, preferable - // micro-second resolution. - static int GetUserTime(uint32_t* secs, uint32_t* usecs); - - // Returns current time as the number of milliseconds since - // 00:00:00 UTC, January 1, 1970. - static double TimeCurrentMillis(); - - static TimezoneCache* CreateTimezoneCache(); - static void DisposeTimezoneCache(TimezoneCache* cache); - static void ClearTimezoneCache(TimezoneCache* cache); - - // Returns a string identifying the current time zone. The - // timestamp is used for determining if DST is in effect. - static const char* LocalTimezone(double time, TimezoneCache* cache); - - // Returns the local time offset in milliseconds east of UTC without - // taking daylight savings time into account. - static double LocalTimeOffset(TimezoneCache* cache); - - // Returns the daylight savings offset for the given time. - static double DaylightSavingsOffset(double time, TimezoneCache* cache); - - // Returns last OS error. - static int GetLastError(); - - static FILE* FOpen(const char* path, const char* mode); - static bool Remove(const char* path); - - // Opens a temporary file, the file is auto removed on close. - static FILE* OpenTemporaryFile(); - - // Log file open mode is platform-dependent due to line ends issues. - static const char* const LogFileOpenMode; - - // Print output to console. This is mostly used for debugging output. - // On platforms that has standard terminal output, the output - // should go to stdout. - static void Print(const char* format, ...); - static void VPrint(const char* format, va_list args); - - // Print output to a file. This is mostly used for debugging output. - static void FPrint(FILE* out, const char* format, ...); - static void VFPrint(FILE* out, const char* format, va_list args); - - // Print error output to console. This is mostly used for error message - // output. On platforms that has standard terminal output, the output - // should go to stderr. - static void PrintError(const char* format, ...); - static void VPrintError(const char* format, va_list args); - - // Allocate/Free memory used by JS heap. Pages are readable/writable, but - // they are not guaranteed to be executable unless 'executable' is true. - // Returns the address of allocated memory, or NULL if failed. - static void* Allocate(const size_t requested, - size_t* allocated, - bool is_executable); - static void Free(void* address, const size_t size); - - // This is the granularity at which the ProtectCode(...) call can set page - // permissions. - static intptr_t CommitPageSize(); - - // Mark code segments non-writable. - static void ProtectCode(void* address, const size_t size); - - // Assign memory as a guard page so that access will cause an exception. - static void Guard(void* address, const size_t size); - - // Generate a random address to be used for hinting mmap(). - static void* GetRandomMmapAddr(); - - // Get the Alignment guaranteed by Allocate(). - static size_t AllocateAlignment(); - - // Sleep for a number of milliseconds. - static void Sleep(const int milliseconds); - - // Abort the current process. - static void Abort(); - - // Debug break. - static void DebugBreak(); - - // Walk the stack. - static const int kStackWalkError = -1; - static const int kStackWalkMaxNameLen = 256; - static const int kStackWalkMaxTextLen = 256; - struct StackFrame { - void* address; - char text[kStackWalkMaxTextLen]; - }; - - class MemoryMappedFile { - public: - static MemoryMappedFile* open(const char* name); - static MemoryMappedFile* create(const char* name, int size, void* initial); - virtual ~MemoryMappedFile() { } - virtual void* memory() = 0; - virtual int size() = 0; - }; - - // Safe formatting print. Ensures that str is always null-terminated. - // Returns the number of chars written, or -1 if output was truncated. - static int SNPrintF(Vector<char> str, const char* format, ...); - static int VSNPrintF(Vector<char> str, - const char* format, - va_list args); - - static char* StrChr(char* str, int c); - static void StrNCpy(Vector<char> dest, const char* src, size_t n); - - // Support for the profiler. Can do nothing, in which case ticks - // occuring in shared libraries will not be properly accounted for. - static void LogSharedLibraryAddresses(Isolate* isolate); - - // Support for the profiler. Notifies the external profiling - // process that a code moving garbage collection starts. Can do - // nothing, in which case the code objects must not move (e.g., by - // using --never-compact) if accurate profiling is desired. - static void SignalCodeMovingGC(); - - // The return value indicates the CPU features we are sure of because of the - // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2 - // instructions. - // This is a little messy because the interpretation is subject to the cross - // of the CPU and the OS. The bits in the answer correspond to the bit - // positions indicated by the members of the CpuFeature enum from globals.h - static uint64_t CpuFeaturesImpliedByPlatform(); - - // The total amount of physical memory available on the current system. - static uint64_t TotalPhysicalMemory(); - - // Maximum size of the virtual memory. 0 means there is no artificial - // limit. - static intptr_t MaxVirtualMemory(); - - // Returns the double constant NAN - static double nan_value(); - - // Support runtime detection of whether the hard float option of the - // EABI is used. - static bool ArmUsingHardFloat(); - - // Returns the activation frame alignment constraint or zero if - // the platform doesn't care. Guaranteed to be a power of two. - static int ActivationFrameAlignment(); - -#if defined(V8_TARGET_ARCH_IA32) - // Limit below which the extra overhead of the MemCopy function is likely - // to outweigh the benefits of faster copying. - static const int kMinComplexMemCopy = 64; - - // Copy memory area. No restrictions. - static void MemMove(void* dest, const void* src, size_t size); - typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); - - // Keep the distinction of "move" vs. "copy" for the benefit of other - // architectures. - static void MemCopy(void* dest, const void* src, size_t size) { - MemMove(dest, src, size); - } -#elif defined(V8_HOST_ARCH_ARM) - typedef void (*MemCopyUint8Function)(uint8_t* dest, - const uint8_t* src, - size_t size); - static MemCopyUint8Function memcopy_uint8_function; - static void MemCopyUint8Wrapper(uint8_t* dest, - const uint8_t* src, - size_t chars) { - memcpy(dest, src, chars); - } - // For values < 16, the assembler function is slower than the inlined C code. - static const int kMinComplexMemCopy = 16; - static void MemCopy(void* dest, const void* src, size_t size) { - (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), - reinterpret_cast<const uint8_t*>(src), - size); - } - static void MemMove(void* dest, const void* src, size_t size) { - memmove(dest, src, size); - } - - typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, - const uint8_t* src, - size_t size); - static MemCopyUint16Uint8Function memcopy_uint16_uint8_function; - static void MemCopyUint16Uint8Wrapper(uint16_t* dest, - const uint8_t* src, - size_t chars); - // For values < 12, the assembler function is slower than the inlined C code. - static const int kMinComplexConvertMemCopy = 12; - static void MemCopyUint16Uint8(uint16_t* dest, - const uint8_t* src, - size_t size) { - (*memcopy_uint16_uint8_function)(dest, src, size); - } -#elif defined(V8_HOST_ARCH_MIPS) - typedef void (*MemCopyUint8Function)(uint8_t* dest, - const uint8_t* src, - size_t size); - static MemCopyUint8Function memcopy_uint8_function; - static void MemCopyUint8Wrapper(uint8_t* dest, - const uint8_t* src, - size_t chars) { - memcpy(dest, src, chars); - } - // For values < 16, the assembler function is slower than the inlined C code. - static const int kMinComplexMemCopy = 16; - static void MemCopy(void* dest, const void* src, size_t size) { - (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), - reinterpret_cast<const uint8_t*>(src), - size); - } - static void MemMove(void* dest, const void* src, size_t size) { - memmove(dest, src, size); - } -#else - // Copy memory area to disjoint memory area. - static void MemCopy(void* dest, const void* src, size_t size) { - memcpy(dest, src, size); - } - static void MemMove(void* dest, const void* src, size_t size) { - memmove(dest, src, size); - } - static const int kMinComplexMemCopy = 16 * kPointerSize; -#endif // V8_TARGET_ARCH_IA32 - - static int GetCurrentProcessId(); - - private: - static const int msPerSecond = 1000; - - DISALLOW_IMPLICIT_CONSTRUCTORS(OS); -}; - -// Represents and controls an area of reserved memory. -// Control of the reserved memory can be assigned to another VirtualMemory -// object by assignment or copy-contructing. This removes the reserved memory -// from the original object. -class VirtualMemory { - public: - // Empty VirtualMemory object, controlling no reserved memory. - VirtualMemory(); - - // Reserves virtual memory with size. - explicit VirtualMemory(size_t size); - - // Reserves virtual memory containing an area of the given size that - // is aligned per alignment. This may not be at the position returned - // by address(). - VirtualMemory(size_t size, size_t alignment); - - // Releases the reserved memory, if any, controlled by this VirtualMemory - // object. - ~VirtualMemory(); - - // Returns whether the memory has been reserved. - bool IsReserved(); - - // Initialize or resets an embedded VirtualMemory object. - void Reset(); - - // Returns the start address of the reserved memory. - // If the memory was reserved with an alignment, this address is not - // necessarily aligned. The user might need to round it up to a multiple of - // the alignment to get the start of the aligned block. - void* address() { - ASSERT(IsReserved()); - return address_; - } - - // Returns the size of the reserved memory. The returned value is only - // meaningful when IsReserved() returns true. - // If the memory was reserved with an alignment, this size may be larger - // than the requested size. - size_t size() { return size_; } - - // Commits real memory. Returns whether the operation succeeded. - bool Commit(void* address, size_t size, bool is_executable); - - // Uncommit real memory. Returns whether the operation succeeded. - bool Uncommit(void* address, size_t size); - - // Creates a single guard page at the given address. - bool Guard(void* address); - - void Release() { - ASSERT(IsReserved()); - // Notice: Order is important here. The VirtualMemory object might live - // inside the allocated region. - void* address = address_; - size_t size = size_; - Reset(); - bool result = ReleaseRegion(address, size); - USE(result); - ASSERT(result); - } - - // Assign control of the reserved region to a different VirtualMemory object. - // The old object is no longer functional (IsReserved() returns false). - void TakeControl(VirtualMemory* from) { - ASSERT(!IsReserved()); - address_ = from->address_; - size_ = from->size_; - from->Reset(); - } - - static void* ReserveRegion(size_t size); - - static bool CommitRegion(void* base, size_t size, bool is_executable); - - static bool UncommitRegion(void* base, size_t size); - - // Must be called with a base pointer that has been returned by ReserveRegion - // and the same size it was reserved with. - static bool ReleaseRegion(void* base, size_t size); - - // Returns true if OS performs lazy commits, i.e. the memory allocation call - // defers actual physical memory allocation till the first memory access. - // Otherwise returns false. - static bool HasLazyCommits(); - - private: - void* address_; // Start address of the virtual memory. - size_t size_; // Size of the virtual memory. -}; - - -// ---------------------------------------------------------------------------- -// Thread -// -// Thread objects are used for creating and running threads. When the start() -// method is called the new thread starts running the run() method in the new -// thread. The Thread object should not be deallocated before the thread has -// terminated. - -class Thread { - public: - // Opaque data type for thread-local storage keys. - // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified - // to ensure that enumeration type has correct value range (see Issue 830 for - // more details). - enum LocalStorageKey { - LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt, - LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt - }; - - class Options { - public: - Options() : name_("v8:<unknown>"), stack_size_(0) {} - Options(const char* name, int stack_size = 0) - : name_(name), stack_size_(stack_size) {} - - const char* name() const { return name_; } - int stack_size() const { return stack_size_; } - - private: - const char* name_; - int stack_size_; - }; - - // Create new thread. - explicit Thread(const Options& options); - virtual ~Thread(); - - // Start new thread by calling the Run() method on the new thread. - void Start(); - - // Start new thread and wait until Run() method is called on the new thread. - void StartSynchronously() { - start_semaphore_ = new Semaphore(0); - Start(); - start_semaphore_->Wait(); - delete start_semaphore_; - start_semaphore_ = NULL; - } - - // Wait until thread terminates. - void Join(); - - inline const char* name() const { - return name_; - } - - // Abstract method for run handler. - virtual void Run() = 0; - - // Thread-local storage. - static LocalStorageKey CreateThreadLocalKey(); - static void DeleteThreadLocalKey(LocalStorageKey key); - static void* GetThreadLocal(LocalStorageKey key); - static int GetThreadLocalInt(LocalStorageKey key) { - return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key))); - } - static void SetThreadLocal(LocalStorageKey key, void* value); - static void SetThreadLocalInt(LocalStorageKey key, int value) { - SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value))); - } - static bool HasThreadLocal(LocalStorageKey key) { - return GetThreadLocal(key) != NULL; - } - -#ifdef V8_FAST_TLS_SUPPORTED - static inline void* GetExistingThreadLocal(LocalStorageKey key) { - void* result = reinterpret_cast<void*>( - InternalGetExistingThreadLocal(static_cast<intptr_t>(key))); - ASSERT(result == GetThreadLocal(key)); - return result; - } -#else - static inline void* GetExistingThreadLocal(LocalStorageKey key) { - return GetThreadLocal(key); - } -#endif - - // A hint to the scheduler to let another thread run. - static void YieldCPU(); - - - // The thread name length is limited to 16 based on Linux's implementation of - // prctl(). - static const int kMaxThreadNameLength = 16; - - class PlatformData; - PlatformData* data() { return data_; } - - void NotifyStartedAndRun() { - if (start_semaphore_) start_semaphore_->Signal(); - Run(); - } - - private: - void set_name(const char* name); - - PlatformData* data_; - - char name_[kMaxThreadNameLength]; - int stack_size_; - Semaphore* start_semaphore_; - - DISALLOW_COPY_AND_ASSIGN(Thread); -}; - -} } // namespace v8::internal - -#endif // V8_PLATFORM_H_ diff -Nru nodejs-0.11.13/deps/v8/src/platform-linux.cc nodejs-0.11.15/deps/v8/src/platform-linux.cc --- nodejs-0.11.13/deps/v8/src/platform-linux.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform-linux.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,458 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Platform-specific code for Linux goes here. For the POSIX-compatible -// parts, the implementation is in platform-posix.cc. - -#include <pthread.h> -#include <semaphore.h> -#include <signal.h> -#include <sys/prctl.h> -#include <sys/time.h> -#include <sys/resource.h> -#include <sys/syscall.h> -#include <sys/types.h> -#include <stdlib.h> - -// Ubuntu Dapper requires memory pages to be marked as -// executable. Otherwise, OS raises an exception when executing code -// in that page. -#include <sys/types.h> // mmap & munmap -#include <sys/mman.h> // mmap & munmap -#include <sys/stat.h> // open -#include <fcntl.h> // open -#include <unistd.h> // sysconf -#include <strings.h> // index -#include <errno.h> -#include <stdarg.h> - -// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'. -// Old versions of the C library <signal.h> didn't define the type. -#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \ - (defined(__arm__) || defined(__aarch64__)) && \ - !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT) -#include <asm/sigcontext.h> -#endif - -#if defined(LEAK_SANITIZER) -#include <sanitizer/lsan_interface.h> -#endif - -#undef MAP_TYPE - -#include "v8.h" - -#include "platform.h" -#include "v8threads.h" -#include "vm-state-inl.h" - - -namespace v8 { -namespace internal { - - -#ifdef __arm__ - -bool OS::ArmUsingHardFloat() { - // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify - // the Floating Point ABI used (PCS stands for Procedure Call Standard). - // We use these as well as a couple of other defines to statically determine - // what FP ABI used. - // GCC versions 4.4 and below don't support hard-fp. - // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or - // __ARM_PCS_VFP. - -#define GCC_VERSION (__GNUC__ * 10000 \ - + __GNUC_MINOR__ * 100 \ - + __GNUC_PATCHLEVEL__) -#if GCC_VERSION >= 40600 -#if defined(__ARM_PCS_VFP) - return true; -#else - return false; -#endif - -#elif GCC_VERSION < 40500 - return false; - -#else -#if defined(__ARM_PCS_VFP) - return true; -#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \ - !defined(__VFP_FP__) - return false; -#else -#error "Your version of GCC does not report the FP ABI compiled for." \ - "Please report it on this issue" \ - "http://code.google.com/p/v8/issues/detail?id=2140" - -#endif -#endif -#undef GCC_VERSION -} - -#endif // def __arm__ - - -const char* OS::LocalTimezone(double time, TimezoneCache* cache) { - if (std::isnan(time)) return ""; - time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); - struct tm* t = localtime(&tv); - if (NULL == t) return ""; - return t->tm_zone; -} - - -double OS::LocalTimeOffset(TimezoneCache* cache) { - time_t tv = time(NULL); - struct tm* t = localtime(&tv); - // tm_gmtoff includes any daylight savings offset, so subtract it. - return static_cast<double>(t->tm_gmtoff * msPerSecond - - (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); -} - - -void* OS::Allocate(const size_t requested, - size_t* allocated, - bool is_executable) { - const size_t msize = RoundUp(requested, AllocateAlignment()); - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - void* addr = OS::GetRandomMmapAddr(); - void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (mbase == MAP_FAILED) { - LOG(i::Isolate::Current(), - StringEvent("OS::Allocate", "mmap failed")); - return NULL; - } - *allocated = msize; - return mbase; -} - - -class PosixMemoryMappedFile : public OS::MemoryMappedFile { - public: - PosixMemoryMappedFile(FILE* file, void* memory, int size) - : file_(file), memory_(memory), size_(size) { } - virtual ~PosixMemoryMappedFile(); - virtual void* memory() { return memory_; } - virtual int size() { return size_; } - private: - FILE* file_; - void* memory_; - int size_; -}; - - -OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "r+"); - if (file == NULL) return NULL; - - fseek(file, 0, SEEK_END); - int size = ftell(file); - - void* memory = - mmap(OS::GetRandomMmapAddr(), - size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - fileno(file), - 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, - void* initial) { - FILE* file = fopen(name, "w+"); - if (file == NULL) return NULL; - int result = fwrite(initial, size, 1, file); - if (result < 1) { - fclose(file); - return NULL; - } - void* memory = - mmap(OS::GetRandomMmapAddr(), - size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - fileno(file), - 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) OS::Free(memory_, size_); - fclose(file_); -} - - -void OS::LogSharedLibraryAddresses(Isolate* isolate) { - // This function assumes that the layout of the file is as follows: - // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] - // If we encounter an unexpected situation we abort scanning further entries. - FILE* fp = fopen("/proc/self/maps", "r"); - if (fp == NULL) return; - - // Allocate enough room to be able to store a full file name. - const int kLibNameLen = FILENAME_MAX + 1; - char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); - - // This loop will terminate once the scanning hits an EOF. - while (true) { - uintptr_t start, end; - char attr_r, attr_w, attr_x, attr_p; - // Parse the addresses and permission bits at the beginning of the line. - if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; - if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; - - int c; - if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { - // Found a read-only executable entry. Skip characters until we reach - // the beginning of the filename or the end of the line. - do { - c = getc(fp); - } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '[')); - if (c == EOF) break; // EOF: Was unexpected, just exit. - - // Process the filename if found. - if ((c == '/') || (c == '[')) { - // Push the '/' or '[' back into the stream to be read below. - ungetc(c, fp); - - // Read to the end of the line. Exit if the read fails. - if (fgets(lib_name, kLibNameLen, fp) == NULL) break; - - // Drop the newline character read by fgets. We do not need to check - // for a zero-length string because we know that we at least read the - // '/' or '[' character. - lib_name[strlen(lib_name) - 1] = '\0'; - } else { - // No library name found, just record the raw address range. - snprintf(lib_name, kLibNameLen, - "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); - } - LOG(isolate, SharedLibraryEvent(lib_name, start, end)); - } else { - // Entry not describing executable data. Skip to end of line to set up - // reading the next entry. - do { - c = getc(fp); - } while ((c != EOF) && (c != '\n')); - if (c == EOF) break; - } - } - free(lib_name); - fclose(fp); -} - - -void OS::SignalCodeMovingGC() { - // Support for ll_prof.py. - // - // The Linux profiler built into the kernel logs all mmap's with - // PROT_EXEC so that analysis tools can properly attribute ticks. We - // do a mmap with a name known by ll_prof.py and immediately munmap - // it. This injects a GC marker into the stream of events generated - // by the kernel and allows us to synchronize V8 code log and the - // kernel log. - int size = sysconf(_SC_PAGESIZE); - FILE* f = fopen(FLAG_gc_fake_mmap, "w+"); - if (f == NULL) { - OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap); - OS::Abort(); - } - void* addr = mmap(OS::GetRandomMmapAddr(), - size, -#if defined(__native_client__) - // The Native Client port of V8 uses an interpreter, - // so code pages don't need PROT_EXEC. - PROT_READ, -#else - PROT_READ | PROT_EXEC, -#endif - MAP_PRIVATE, - fileno(f), - 0); - ASSERT(addr != MAP_FAILED); - OS::Free(addr, size); - fclose(f); -} - - -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - - -VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } - - -VirtualMemory::VirtualMemory(size_t size) - : address_(ReserveRegion(size)), size_(size) { } - - -VirtualMemory::VirtualMemory(size_t size, size_t alignment) - : address_(NULL), size_(0) { - ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* reservation = mmap(OS::GetRandomMmapAddr(), - request_size, - PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, - kMmapFd, - kMmapFdOffset); - if (reservation == MAP_FAILED) return; - - Address base = static_cast<Address>(reservation); - Address aligned_base = RoundUp(base, alignment); - ASSERT_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - ASSERT_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - ASSERT(aligned_size == request_size); - - address_ = static_cast<void*>(aligned_base); - size_ = aligned_size; -#if defined(LEAK_SANITIZER) - __lsan_register_root_region(address_, size_); -#endif -} - - -VirtualMemory::~VirtualMemory() { - if (IsReserved()) { - bool result = ReleaseRegion(address(), size()); - ASSERT(result); - USE(result); - } -} - - -bool VirtualMemory::IsReserved() { - return address_ != NULL; -} - - -void VirtualMemory::Reset() { - address_ = NULL; - size_ = 0; -} - - -bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { - return CommitRegion(address, size, is_executable); -} - - -bool VirtualMemory::Uncommit(void* address, size_t size) { - return UncommitRegion(address, size); -} - - -bool VirtualMemory::Guard(void* address) { - OS::Guard(address, OS::CommitPageSize()); - return true; -} - - -void* VirtualMemory::ReserveRegion(size_t size) { - void* result = mmap(OS::GetRandomMmapAddr(), - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, - kMmapFd, - kMmapFdOffset); - - if (result == MAP_FAILED) return NULL; - -#if defined(LEAK_SANITIZER) - __lsan_register_root_region(result, size); -#endif - return result; -} - - -bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { -#if defined(__native_client__) - // The Native Client port of V8 uses an interpreter, - // so code pages don't need PROT_EXEC. - int prot = PROT_READ | PROT_WRITE; -#else - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); -#endif - if (MAP_FAILED == mmap(base, - size, - prot, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, - kMmapFd, - kMmapFdOffset)) { - return false; - } - - return true; -} - - -bool VirtualMemory::UncommitRegion(void* base, size_t size) { - return mmap(base, - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, - kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - - -bool VirtualMemory::ReleaseRegion(void* base, size_t size) { -#if defined(LEAK_SANITIZER) - __lsan_unregister_root_region(base, size); -#endif - return munmap(base, size) == 0; -} - - -bool VirtualMemory::HasLazyCommits() { - return true; -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform-macos.cc nodejs-0.11.15/deps/v8/src/platform-macos.cc --- nodejs-0.11.13/deps/v8/src/platform-macos.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform-macos.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,335 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Platform-specific code for MacOS goes here. For the POSIX-compatible -// parts, the implementation is in platform-posix.cc. - -#include <dlfcn.h> -#include <unistd.h> -#include <sys/mman.h> -#include <mach/mach_init.h> -#include <mach-o/dyld.h> -#include <mach-o/getsect.h> - -#include <AvailabilityMacros.h> - -#include <pthread.h> -#include <semaphore.h> -#include <signal.h> -#include <libkern/OSAtomic.h> -#include <mach/mach.h> -#include <mach/semaphore.h> -#include <mach/task.h> -#include <mach/vm_statistics.h> -#include <sys/time.h> -#include <sys/resource.h> -#include <sys/types.h> -#include <sys/sysctl.h> -#include <stdarg.h> -#include <stdlib.h> -#include <string.h> -#include <errno.h> - -#undef MAP_TYPE - -#include "v8.h" - -#include "platform.h" -#include "simulator.h" -#include "vm-state-inl.h" - - -namespace v8 { -namespace internal { - - -// Constants used for mmap. -// kMmapFd is used to pass vm_alloc flags to tag the region with the user -// defined tag 255 This helps identify V8-allocated regions in memory analysis -// tools like vmmap(1). -static const int kMmapFd = VM_MAKE_TAG(255); -static const off_t kMmapFdOffset = 0; - - -void* OS::Allocate(const size_t requested, - size_t* allocated, - bool is_executable) { - const size_t msize = RoundUp(requested, getpagesize()); - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - void* mbase = mmap(OS::GetRandomMmapAddr(), - msize, - prot, - MAP_PRIVATE | MAP_ANON, - kMmapFd, - kMmapFdOffset); - if (mbase == MAP_FAILED) { - LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); - return NULL; - } - *allocated = msize; - return mbase; -} - - -class PosixMemoryMappedFile : public OS::MemoryMappedFile { - public: - PosixMemoryMappedFile(FILE* file, void* memory, int size) - : file_(file), memory_(memory), size_(size) { } - virtual ~PosixMemoryMappedFile(); - virtual void* memory() { return memory_; } - virtual int size() { return size_; } - private: - FILE* file_; - void* memory_; - int size_; -}; - - -OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "r+"); - if (file == NULL) return NULL; - - fseek(file, 0, SEEK_END); - int size = ftell(file); - - void* memory = - mmap(OS::GetRandomMmapAddr(), - size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - fileno(file), - 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, - void* initial) { - FILE* file = fopen(name, "w+"); - if (file == NULL) return NULL; - int result = fwrite(initial, size, 1, file); - if (result < 1) { - fclose(file); - return NULL; - } - void* memory = - mmap(OS::GetRandomMmapAddr(), - size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - fileno(file), - 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) OS::Free(memory_, size_); - fclose(file_); -} - - -void OS::LogSharedLibraryAddresses(Isolate* isolate) { - unsigned int images_count = _dyld_image_count(); - for (unsigned int i = 0; i < images_count; ++i) { - const mach_header* header = _dyld_get_image_header(i); - if (header == NULL) continue; -#if V8_HOST_ARCH_X64 - uint64_t size; - char* code_ptr = getsectdatafromheader_64( - reinterpret_cast<const mach_header_64*>(header), - SEG_TEXT, - SECT_TEXT, - &size); -#else - unsigned int size; - char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); -#endif - if (code_ptr == NULL) continue; - const uintptr_t slide = _dyld_get_image_vmaddr_slide(i); - const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide; - LOG(isolate, - SharedLibraryEvent(_dyld_get_image_name(i), start, start + size)); - } -} - - -void OS::SignalCodeMovingGC() { -} - - -const char* OS::LocalTimezone(double time, TimezoneCache* cache) { - if (std::isnan(time)) return ""; - time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); - struct tm* t = localtime(&tv); - if (NULL == t) return ""; - return t->tm_zone; -} - - -double OS::LocalTimeOffset(TimezoneCache* cache) { - time_t tv = time(NULL); - struct tm* t = localtime(&tv); - // tm_gmtoff includes any daylight savings offset, so subtract it. - return static_cast<double>(t->tm_gmtoff * msPerSecond - - (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); -} - - -VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } - - -VirtualMemory::VirtualMemory(size_t size) - : address_(ReserveRegion(size)), size_(size) { } - - -VirtualMemory::VirtualMemory(size_t size, size_t alignment) - : address_(NULL), size_(0) { - ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* reservation = mmap(OS::GetRandomMmapAddr(), - request_size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, - kMmapFd, - kMmapFdOffset); - if (reservation == MAP_FAILED) return; - - Address base = static_cast<Address>(reservation); - Address aligned_base = RoundUp(base, alignment); - ASSERT_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - ASSERT_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - ASSERT(aligned_size == request_size); - - address_ = static_cast<void*>(aligned_base); - size_ = aligned_size; -} - - -VirtualMemory::~VirtualMemory() { - if (IsReserved()) { - bool result = ReleaseRegion(address(), size()); - ASSERT(result); - USE(result); - } -} - - -bool VirtualMemory::IsReserved() { - return address_ != NULL; -} - - -void VirtualMemory::Reset() { - address_ = NULL; - size_ = 0; -} - - -bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { - return CommitRegion(address, size, is_executable); -} - - -bool VirtualMemory::Uncommit(void* address, size_t size) { - return UncommitRegion(address, size); -} - - -bool VirtualMemory::Guard(void* address) { - OS::Guard(address, OS::CommitPageSize()); - return true; -} - - -void* VirtualMemory::ReserveRegion(size_t size) { - void* result = mmap(OS::GetRandomMmapAddr(), - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, - kMmapFd, - kMmapFdOffset); - - if (result == MAP_FAILED) return NULL; - - return result; -} - - -bool VirtualMemory::CommitRegion(void* address, - size_t size, - bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(address, - size, - prot, - MAP_PRIVATE | MAP_ANON | MAP_FIXED, - kMmapFd, - kMmapFdOffset)) { - return false; - } - return true; -} - - -bool VirtualMemory::UncommitRegion(void* address, size_t size) { - return mmap(address, - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, - kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - - -bool VirtualMemory::ReleaseRegion(void* address, size_t size) { - return munmap(address, size) == 0; -} - - -bool VirtualMemory::HasLazyCommits() { - return false; -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform-openbsd.cc nodejs-0.11.15/deps/v8/src/platform-openbsd.cc --- nodejs-0.11.13/deps/v8/src/platform-openbsd.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform-openbsd.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,364 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Platform-specific code for OpenBSD and NetBSD goes here. For the -// POSIX-compatible parts, the implementation is in platform-posix.cc. - -#include <pthread.h> -#include <semaphore.h> -#include <signal.h> -#include <sys/time.h> -#include <sys/resource.h> -#include <sys/syscall.h> -#include <sys/types.h> -#include <stdlib.h> - -#include <sys/types.h> // mmap & munmap -#include <sys/mman.h> // mmap & munmap -#include <sys/stat.h> // open -#include <fcntl.h> // open -#include <unistd.h> // sysconf -#include <strings.h> // index -#include <errno.h> -#include <stdarg.h> - -#undef MAP_TYPE - -#include "v8.h" - -#include "platform.h" -#include "v8threads.h" -#include "vm-state-inl.h" - - -namespace v8 { -namespace internal { - - -const char* OS::LocalTimezone(double time, TimezoneCache* cache) { - if (std::isnan(time)) return ""; - time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); - struct tm* t = localtime(&tv); - if (NULL == t) return ""; - return t->tm_zone; -} - - -double OS::LocalTimeOffset(TimezoneCache* cache) { - time_t tv = time(NULL); - struct tm* t = localtime(&tv); - // tm_gmtoff includes any daylight savings offset, so subtract it. - return static_cast<double>(t->tm_gmtoff * msPerSecond - - (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); -} - - -void* OS::Allocate(const size_t requested, - size_t* allocated, - bool is_executable) { - const size_t msize = RoundUp(requested, AllocateAlignment()); - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - void* addr = OS::GetRandomMmapAddr(); - void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); - if (mbase == MAP_FAILED) { - LOG(i::Isolate::Current(), - StringEvent("OS::Allocate", "mmap failed")); - return NULL; - } - *allocated = msize; - return mbase; -} - - -class PosixMemoryMappedFile : public OS::MemoryMappedFile { - public: - PosixMemoryMappedFile(FILE* file, void* memory, int size) - : file_(file), memory_(memory), size_(size) { } - virtual ~PosixMemoryMappedFile(); - virtual void* memory() { return memory_; } - virtual int size() { return size_; } - private: - FILE* file_; - void* memory_; - int size_; -}; - - -OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "r+"); - if (file == NULL) return NULL; - - fseek(file, 0, SEEK_END); - int size = ftell(file); - - void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, - void* initial) { - FILE* file = fopen(name, "w+"); - if (file == NULL) return NULL; - int result = fwrite(initial, size, 1, file); - if (result < 1) { - fclose(file); - return NULL; - } - void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) OS::Free(memory_, size_); - fclose(file_); -} - - -void OS::LogSharedLibraryAddresses(Isolate* isolate) { - // This function assumes that the layout of the file is as follows: - // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] - // If we encounter an unexpected situation we abort scanning further entries. - FILE* fp = fopen("/proc/self/maps", "r"); - if (fp == NULL) return; - - // Allocate enough room to be able to store a full file name. - const int kLibNameLen = FILENAME_MAX + 1; - char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); - - // This loop will terminate once the scanning hits an EOF. - while (true) { - uintptr_t start, end; - char attr_r, attr_w, attr_x, attr_p; - // Parse the addresses and permission bits at the beginning of the line. - if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; - if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; - - int c; - if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { - // Found a read-only executable entry. Skip characters until we reach - // the beginning of the filename or the end of the line. - do { - c = getc(fp); - } while ((c != EOF) && (c != '\n') && (c != '/')); - if (c == EOF) break; // EOF: Was unexpected, just exit. - - // Process the filename if found. - if (c == '/') { - ungetc(c, fp); // Push the '/' back into the stream to be read below. - - // Read to the end of the line. Exit if the read fails. - if (fgets(lib_name, kLibNameLen, fp) == NULL) break; - - // Drop the newline character read by fgets. We do not need to check - // for a zero-length string because we know that we at least read the - // '/' character. - lib_name[strlen(lib_name) - 1] = '\0'; - } else { - // No library name found, just record the raw address range. - snprintf(lib_name, kLibNameLen, - "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); - } - LOG(isolate, SharedLibraryEvent(lib_name, start, end)); - } else { - // Entry not describing executable data. Skip to end of line to set up - // reading the next entry. - do { - c = getc(fp); - } while ((c != EOF) && (c != '\n')); - if (c == EOF) break; - } - } - free(lib_name); - fclose(fp); -} - - -void OS::SignalCodeMovingGC() { - // Support for ll_prof.py. - // - // The Linux profiler built into the kernel logs all mmap's with - // PROT_EXEC so that analysis tools can properly attribute ticks. We - // do a mmap with a name known by ll_prof.py and immediately munmap - // it. This injects a GC marker into the stream of events generated - // by the kernel and allows us to synchronize V8 code log and the - // kernel log. - int size = sysconf(_SC_PAGESIZE); - FILE* f = fopen(FLAG_gc_fake_mmap, "w+"); - if (f == NULL) { - OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap); - OS::Abort(); - } - void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, - fileno(f), 0); - ASSERT(addr != MAP_FAILED); - OS::Free(addr, size); - fclose(f); -} - - - -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - - -VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } - - -VirtualMemory::VirtualMemory(size_t size) - : address_(ReserveRegion(size)), size_(size) { } - - -VirtualMemory::VirtualMemory(size_t size, size_t alignment) - : address_(NULL), size_(0) { - ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* reservation = mmap(OS::GetRandomMmapAddr(), - request_size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, - kMmapFd, - kMmapFdOffset); - if (reservation == MAP_FAILED) return; - - Address base = static_cast<Address>(reservation); - Address aligned_base = RoundUp(base, alignment); - ASSERT_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - ASSERT_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - ASSERT(aligned_size == request_size); - - address_ = static_cast<void*>(aligned_base); - size_ = aligned_size; -} - - -VirtualMemory::~VirtualMemory() { - if (IsReserved()) { - bool result = ReleaseRegion(address(), size()); - ASSERT(result); - USE(result); - } -} - - -bool VirtualMemory::IsReserved() { - return address_ != NULL; -} - - -void VirtualMemory::Reset() { - address_ = NULL; - size_ = 0; -} - - -bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { - return CommitRegion(address, size, is_executable); -} - - -bool VirtualMemory::Uncommit(void* address, size_t size) { - return UncommitRegion(address, size); -} - - -bool VirtualMemory::Guard(void* address) { - OS::Guard(address, OS::CommitPageSize()); - return true; -} - - -void* VirtualMemory::ReserveRegion(size_t size) { - void* result = mmap(OS::GetRandomMmapAddr(), - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, - kMmapFd, - kMmapFdOffset); - - if (result == MAP_FAILED) return NULL; - - return result; -} - - -bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(base, - size, - prot, - MAP_PRIVATE | MAP_ANON | MAP_FIXED, - kMmapFd, - kMmapFdOffset)) { - return false; - } - return true; -} - - -bool VirtualMemory::UncommitRegion(void* base, size_t size) { - return mmap(base, - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, - kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - - -bool VirtualMemory::ReleaseRegion(void* base, size_t size) { - return munmap(base, size) == 0; -} - - -bool VirtualMemory::HasLazyCommits() { - // TODO(alph): implement for the platform. - return false; -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform-posix.cc nodejs-0.11.15/deps/v8/src/platform-posix.cc --- nodejs-0.11.13/deps/v8/src/platform-posix.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform-posix.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,810 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Platform-specific code for POSIX goes here. This is not a platform on its -// own, but contains the parts which are the same across the POSIX platforms -// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX. - -#include <dlfcn.h> -#include <pthread.h> -#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) -#include <pthread_np.h> // for pthread_set_name_np -#endif -#include <sched.h> // for sched_yield -#include <unistd.h> -#include <errno.h> -#include <time.h> - -#include <sys/mman.h> -#include <sys/socket.h> -#include <sys/resource.h> -#include <sys/time.h> -#include <sys/types.h> -#include <sys/stat.h> -#if defined(__linux__) -#include <sys/prctl.h> // for prctl -#endif -#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \ - defined(__NetBSD__) || defined(__OpenBSD__) -#include <sys/sysctl.h> // for sysctl -#endif - -#include <arpa/inet.h> -#include <netinet/in.h> -#include <netdb.h> - -#undef MAP_TYPE - -#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) -#define LOG_TAG "v8" -#include <android/log.h> -#endif - -#include "v8.h" - -#include "codegen.h" -#include "isolate-inl.h" -#include "platform.h" - -namespace v8 { -namespace internal { - -// 0 is never a valid thread id. -static const pthread_t kNoThread = (pthread_t) 0; - - -uint64_t OS::CpuFeaturesImpliedByPlatform() { -#if V8_OS_MACOSX - // Mac OS X requires all these to install so we can assume they are present. - // These constants are defined by the CPUid instructions. - const uint64_t one = 1; - return (one << SSE2) | (one << CMOV); -#else - return 0; // Nothing special about the other systems. -#endif -} - - -// Maximum size of the virtual memory. 0 means there is no artificial -// limit. - -intptr_t OS::MaxVirtualMemory() { - struct rlimit limit; - int result = getrlimit(RLIMIT_DATA, &limit); - if (result != 0) return 0; - return limit.rlim_cur; -} - - -uint64_t OS::TotalPhysicalMemory() { -#if V8_OS_MACOSX - int mib[2]; - mib[0] = CTL_HW; - mib[1] = HW_MEMSIZE; - int64_t size = 0; - size_t len = sizeof(size); - if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) { - UNREACHABLE(); - return 0; - } - return static_cast<uint64_t>(size); -#elif V8_OS_FREEBSD - int pages, page_size; - size_t size = sizeof(pages); - sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0); - sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0); - if (pages == -1 || page_size == -1) { - UNREACHABLE(); - return 0; - } - return static_cast<uint64_t>(pages) * page_size; -#elif V8_OS_CYGWIN - MEMORYSTATUS memory_info; - memory_info.dwLength = sizeof(memory_info); - if (!GlobalMemoryStatus(&memory_info)) { - UNREACHABLE(); - return 0; - } - return static_cast<uint64_t>(memory_info.dwTotalPhys); -#elif V8_OS_QNX - struct stat stat_buf; - if (stat("/proc", &stat_buf) != 0) { - UNREACHABLE(); - return 0; - } - return static_cast<uint64_t>(stat_buf.st_size); -#else - intptr_t pages = sysconf(_SC_PHYS_PAGES); - intptr_t page_size = sysconf(_SC_PAGESIZE); - if (pages == -1 || page_size == -1) { - UNREACHABLE(); - return 0; - } - return static_cast<uint64_t>(pages) * page_size; -#endif -} - - -int OS::ActivationFrameAlignment() { -#if V8_TARGET_ARCH_ARM - // On EABI ARM targets this is required for fp correctness in the - // runtime system. - return 8; -#elif V8_TARGET_ARCH_MIPS - return 8; -#else - // Otherwise we just assume 16 byte alignment, i.e.: - // - With gcc 4.4 the tree vectorization optimizer can generate code - // that requires 16 byte alignment such as movdqa on x86. - // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned; - // see "Mac OS X ABI Function Call Guide" - return 16; -#endif -} - - -intptr_t OS::CommitPageSize() { - static intptr_t page_size = getpagesize(); - return page_size; -} - - -void OS::Free(void* address, const size_t size) { - // TODO(1240712): munmap has a return value which is ignored here. - int result = munmap(address, size); - USE(result); - ASSERT(result == 0); -} - - -// Get rid of writable permission on code allocations. -void OS::ProtectCode(void* address, const size_t size) { -#if V8_OS_CYGWIN - DWORD old_protect; - VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); -#elif V8_OS_NACL - // The Native Client port of V8 uses an interpreter, so - // code pages don't need PROT_EXEC. - mprotect(address, size, PROT_READ); -#else - mprotect(address, size, PROT_READ | PROT_EXEC); -#endif -} - - -// Create guard pages. -void OS::Guard(void* address, const size_t size) { -#if V8_OS_CYGWIN - DWORD oldprotect; - VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect); -#else - mprotect(address, size, PROT_NONE); -#endif -} - - -void* OS::GetRandomMmapAddr() { -#if V8_OS_NACL - // TODO(bradchen): restore randomization once Native Client gets - // smarter about using mmap address hints. - // See http://code.google.com/p/nativeclient/issues/3341 - return NULL; -#endif - Isolate* isolate = Isolate::UncheckedCurrent(); - // Note that the current isolate isn't set up in a call path via - // CpuFeatures::Probe. We don't care about randomization in this case because - // the code page is immediately freed. - if (isolate != NULL) { - uintptr_t raw_addr; - isolate->random_number_generator()->NextBytes(&raw_addr, sizeof(raw_addr)); -#if V8_TARGET_ARCH_X64 - // Currently available CPUs have 48 bits of virtual addressing. Truncate - // the hint address to 46 bits to give the kernel a fighting chance of - // fulfilling our placement request. - raw_addr &= V8_UINT64_C(0x3ffffffff000); -#else - raw_addr &= 0x3ffff000; - -# ifdef __sun - // For our Solaris/illumos mmap hint, we pick a random address in the bottom - // half of the top half of the address space (that is, the third quarter). - // Because we do not MAP_FIXED, this will be treated only as a hint -- the - // system will not fail to mmap() because something else happens to already - // be mapped at our random address. We deliberately set the hint high enough - // to get well above the system's break (that is, the heap); Solaris and - // illumos will try the hint and if that fails allocate as if there were - // no hint at all. The high hint prevents the break from getting hemmed in - // at low values, ceding half of the address space to the system heap. - raw_addr += 0x80000000; -# else - // The range 0x20000000 - 0x60000000 is relatively unpopulated across a - // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos - // 10.6 and 10.7. - raw_addr += 0x20000000; -# endif -#endif - return reinterpret_cast<void*>(raw_addr); - } - return NULL; -} - - -size_t OS::AllocateAlignment() { - return static_cast<size_t>(sysconf(_SC_PAGESIZE)); -} - - -void OS::Sleep(int milliseconds) { - useconds_t ms = static_cast<useconds_t>(milliseconds); - usleep(1000 * ms); -} - - -void OS::Abort() { - if (FLAG_hard_abort) { - V8_IMMEDIATE_CRASH(); - } - // Redirect to std abort to signal abnormal program termination. - abort(); -} - - -void OS::DebugBreak() { -#if V8_HOST_ARCH_ARM - asm("bkpt 0"); -#elif V8_HOST_ARCH_ARM64 - asm("brk 0"); -#elif V8_HOST_ARCH_MIPS - asm("break"); -#elif V8_HOST_ARCH_IA32 -#if defined(__native_client__) - asm("hlt"); -#else - asm("int $3"); -#endif // __native_client__ -#elif V8_HOST_ARCH_X64 - asm("int $3"); -#else -#error Unsupported host architecture. -#endif -} - - -// ---------------------------------------------------------------------------- -// Math functions - -double modulo(double x, double y) { - return std::fmod(x, y); -} - - -#define UNARY_MATH_FUNCTION(name, generator) \ -static UnaryMathFunction fast_##name##_function = NULL; \ -void init_fast_##name##_function() { \ - fast_##name##_function = generator; \ -} \ -double fast_##name(double x) { \ - return (*fast_##name##_function)(x); \ -} - -UNARY_MATH_FUNCTION(exp, CreateExpFunction()) -UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction()) - -#undef UNARY_MATH_FUNCTION - - -void lazily_initialize_fast_exp() { - if (fast_exp_function == NULL) { - init_fast_exp_function(); - } -} - - -double OS::nan_value() { - // NAN from math.h is defined in C99 and not in POSIX. - return NAN; -} - - -int OS::GetCurrentProcessId() { - return static_cast<int>(getpid()); -} - - -// ---------------------------------------------------------------------------- -// POSIX date/time support. -// - -int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { - struct rusage usage; - - if (getrusage(RUSAGE_SELF, &usage) < 0) return -1; - *secs = usage.ru_utime.tv_sec; - *usecs = usage.ru_utime.tv_usec; - return 0; -} - - -double OS::TimeCurrentMillis() { - return Time::Now().ToJsTime(); -} - - -class TimezoneCache {}; - - -TimezoneCache* OS::CreateTimezoneCache() { - return NULL; -} - - -void OS::DisposeTimezoneCache(TimezoneCache* cache) { - ASSERT(cache == NULL); -} - - -void OS::ClearTimezoneCache(TimezoneCache* cache) { - ASSERT(cache == NULL); -} - - -double OS::DaylightSavingsOffset(double time, TimezoneCache*) { - if (std::isnan(time)) return nan_value(); - time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); - struct tm* t = localtime(&tv); - if (NULL == t) return nan_value(); - return t->tm_isdst > 0 ? 3600 * msPerSecond : 0; -} - - -int OS::GetLastError() { - return errno; -} - - -// ---------------------------------------------------------------------------- -// POSIX stdio support. -// - -FILE* OS::FOpen(const char* path, const char* mode) { - FILE* file = fopen(path, mode); - if (file == NULL) return NULL; - struct stat file_stat; - if (fstat(fileno(file), &file_stat) != 0) return NULL; - bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0); - if (is_regular_file) return file; - fclose(file); - return NULL; -} - - -bool OS::Remove(const char* path) { - return (remove(path) == 0); -} - - -FILE* OS::OpenTemporaryFile() { - return tmpfile(); -} - - -const char* const OS::LogFileOpenMode = "w"; - - -void OS::Print(const char* format, ...) { - va_list args; - va_start(args, format); - VPrint(format, args); - va_end(args); -} - - -void OS::VPrint(const char* format, va_list args) { -#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) - __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args); -#else - vprintf(format, args); -#endif -} - - -void OS::FPrint(FILE* out, const char* format, ...) { - va_list args; - va_start(args, format); - VFPrint(out, format, args); - va_end(args); -} - - -void OS::VFPrint(FILE* out, const char* format, va_list args) { -#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) - __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args); -#else - vfprintf(out, format, args); -#endif -} - - -void OS::PrintError(const char* format, ...) { - va_list args; - va_start(args, format); - VPrintError(format, args); - va_end(args); -} - - -void OS::VPrintError(const char* format, va_list args) { -#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) - __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args); -#else - vfprintf(stderr, format, args); -#endif -} - - -int OS::SNPrintF(Vector<char> str, const char* format, ...) { - va_list args; - va_start(args, format); - int result = VSNPrintF(str, format, args); - va_end(args); - return result; -} - - -int OS::VSNPrintF(Vector<char> str, - const char* format, - va_list args) { - int n = vsnprintf(str.start(), str.length(), format, args); - if (n < 0 || n >= str.length()) { - // If the length is zero, the assignment fails. - if (str.length() > 0) - str[str.length() - 1] = '\0'; - return -1; - } else { - return n; - } -} - - -#if V8_TARGET_ARCH_IA32 -static void MemMoveWrapper(void* dest, const void* src, size_t size) { - memmove(dest, src, size); -} - - -// Initialize to library version so we can call this at any time during startup. -static OS::MemMoveFunction memmove_function = &MemMoveWrapper; - -// Defined in codegen-ia32.cc. -OS::MemMoveFunction CreateMemMoveFunction(); - -// Copy memory area. No restrictions. -void OS::MemMove(void* dest, const void* src, size_t size) { - if (size == 0) return; - // Note: here we rely on dependent reads being ordered. This is true - // on all architectures we currently support. - (*memmove_function)(dest, src, size); -} - -#elif defined(V8_HOST_ARCH_ARM) -void OS::MemCopyUint16Uint8Wrapper(uint16_t* dest, - const uint8_t* src, - size_t chars) { - uint16_t *limit = dest + chars; - while (dest < limit) { - *dest++ = static_cast<uint16_t>(*src++); - } -} - - -OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper; -OS::MemCopyUint16Uint8Function OS::memcopy_uint16_uint8_function = - &OS::MemCopyUint16Uint8Wrapper; -// Defined in codegen-arm.cc. -OS::MemCopyUint8Function CreateMemCopyUint8Function( - OS::MemCopyUint8Function stub); -OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( - OS::MemCopyUint16Uint8Function stub); - -#elif defined(V8_HOST_ARCH_MIPS) -OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper; -// Defined in codegen-mips.cc. -OS::MemCopyUint8Function CreateMemCopyUint8Function( - OS::MemCopyUint8Function stub); -#endif - - -void OS::PostSetUp() { -#if V8_TARGET_ARCH_IA32 - OS::MemMoveFunction generated_memmove = CreateMemMoveFunction(); - if (generated_memmove != NULL) { - memmove_function = generated_memmove; - } -#elif defined(V8_HOST_ARCH_ARM) - OS::memcopy_uint8_function = - CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper); - OS::memcopy_uint16_uint8_function = - CreateMemCopyUint16Uint8Function(&OS::MemCopyUint16Uint8Wrapper); -#elif defined(V8_HOST_ARCH_MIPS) - OS::memcopy_uint8_function = - CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper); -#endif - // fast_exp is initialized lazily. - init_fast_sqrt_function(); -} - - -// ---------------------------------------------------------------------------- -// POSIX string support. -// - -char* OS::StrChr(char* str, int c) { - return strchr(str, c); -} - - -void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) { - strncpy(dest.start(), src, n); -} - - -// ---------------------------------------------------------------------------- -// POSIX thread support. -// - -class Thread::PlatformData : public Malloced { - public: - PlatformData() : thread_(kNoThread) {} - pthread_t thread_; // Thread handle for pthread. - // Synchronizes thread creation - Mutex thread_creation_mutex_; -}; - -Thread::Thread(const Options& options) - : data_(new PlatformData), - stack_size_(options.stack_size()), - start_semaphore_(NULL) { - if (stack_size_ > 0 && stack_size_ < PTHREAD_STACK_MIN) { - stack_size_ = PTHREAD_STACK_MIN; - } - set_name(options.name()); -} - - -Thread::~Thread() { - delete data_; -} - - -static void SetThreadName(const char* name) { -#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD - pthread_set_name_np(pthread_self(), name); -#elif V8_OS_NETBSD - STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP); - pthread_setname_np(pthread_self(), "%s", name); -#elif V8_OS_MACOSX - // pthread_setname_np is only available in 10.6 or later, so test - // for it at runtime. - int (*dynamic_pthread_setname_np)(const char*); - *reinterpret_cast<void**>(&dynamic_pthread_setname_np) = - dlsym(RTLD_DEFAULT, "pthread_setname_np"); - if (dynamic_pthread_setname_np == NULL) - return; - - // Mac OS X does not expose the length limit of the name, so hardcode it. - static const int kMaxNameLength = 63; - STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength); - dynamic_pthread_setname_np(name); -#elif defined(PR_SET_NAME) - prctl(PR_SET_NAME, - reinterpret_cast<unsigned long>(name), // NOLINT - 0, 0, 0); -#endif -} - - -static void* ThreadEntry(void* arg) { - Thread* thread = reinterpret_cast<Thread*>(arg); - // We take the lock here to make sure that pthread_create finished first since - // we don't know which thread will run first (the original thread or the new - // one). - { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); } - SetThreadName(thread->name()); - ASSERT(thread->data()->thread_ != kNoThread); - thread->NotifyStartedAndRun(); - return NULL; -} - - -void Thread::set_name(const char* name) { - strncpy(name_, name, sizeof(name_)); - name_[sizeof(name_) - 1] = '\0'; -} - - -void Thread::Start() { - int result; - pthread_attr_t attr; - memset(&attr, 0, sizeof(attr)); - result = pthread_attr_init(&attr); - ASSERT_EQ(0, result); - // Native client uses default stack size. -#if !V8_OS_NACL - if (stack_size_ > 0) { - result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_)); - ASSERT_EQ(0, result); - } -#endif - { - LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_); - result = pthread_create(&data_->thread_, &attr, ThreadEntry, this); - } - ASSERT_EQ(0, result); - result = pthread_attr_destroy(&attr); - ASSERT_EQ(0, result); - ASSERT(data_->thread_ != kNoThread); - USE(result); -} - - -void Thread::Join() { - pthread_join(data_->thread_, NULL); -} - - -void Thread::YieldCPU() { - int result = sched_yield(); - ASSERT_EQ(0, result); - USE(result); -} - - -static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) { -#if V8_OS_CYGWIN - // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps - // because pthread_key_t is a pointer type on Cygwin. This will probably not - // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway. - STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); - intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key); - return static_cast<Thread::LocalStorageKey>(ptr_key); -#else - return static_cast<Thread::LocalStorageKey>(pthread_key); -#endif -} - - -static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) { -#if V8_OS_CYGWIN - STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); - intptr_t ptr_key = static_cast<intptr_t>(local_key); - return reinterpret_cast<pthread_key_t>(ptr_key); -#else - return static_cast<pthread_key_t>(local_key); -#endif -} - - -#ifdef V8_FAST_TLS_SUPPORTED - -static Atomic32 tls_base_offset_initialized = 0; -intptr_t kMacTlsBaseOffset = 0; - -// It's safe to do the initialization more that once, but it has to be -// done at least once. -static void InitializeTlsBaseOffset() { - const size_t kBufferSize = 128; - char buffer[kBufferSize]; - size_t buffer_size = kBufferSize; - int ctl_name[] = { CTL_KERN , KERN_OSRELEASE }; - if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) { - V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version"); - } - // The buffer now contains a string of the form XX.YY.ZZ, where - // XX is the major kernel version component. - // Make sure the buffer is 0-terminated. - buffer[kBufferSize - 1] = '\0'; - char* period_pos = strchr(buffer, '.'); - *period_pos = '\0'; - int kernel_version_major = - static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT - // The constants below are taken from pthreads.s from the XNU kernel - // sources archive at www.opensource.apple.com. - if (kernel_version_major < 11) { - // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the - // same offsets. -#if V8_HOST_ARCH_IA32 - kMacTlsBaseOffset = 0x48; -#else - kMacTlsBaseOffset = 0x60; -#endif - } else { - // 11.x.x (Lion) changed the offset. - kMacTlsBaseOffset = 0; - } - - Release_Store(&tls_base_offset_initialized, 1); -} - - -static void CheckFastTls(Thread::LocalStorageKey key) { - void* expected = reinterpret_cast<void*>(0x1234CAFE); - Thread::SetThreadLocal(key, expected); - void* actual = Thread::GetExistingThreadLocal(key); - if (expected != actual) { - V8_Fatal(__FILE__, __LINE__, - "V8 failed to initialize fast TLS on current kernel"); - } - Thread::SetThreadLocal(key, NULL); -} - -#endif // V8_FAST_TLS_SUPPORTED - - -Thread::LocalStorageKey Thread::CreateThreadLocalKey() { -#ifdef V8_FAST_TLS_SUPPORTED - bool check_fast_tls = false; - if (tls_base_offset_initialized == 0) { - check_fast_tls = true; - InitializeTlsBaseOffset(); - } -#endif - pthread_key_t key; - int result = pthread_key_create(&key, NULL); - ASSERT_EQ(0, result); - USE(result); - LocalStorageKey local_key = PthreadKeyToLocalKey(key); -#ifdef V8_FAST_TLS_SUPPORTED - // If we just initialized fast TLS support, make sure it works. - if (check_fast_tls) CheckFastTls(local_key); -#endif - return local_key; -} - - -void Thread::DeleteThreadLocalKey(LocalStorageKey key) { - pthread_key_t pthread_key = LocalKeyToPthreadKey(key); - int result = pthread_key_delete(pthread_key); - ASSERT_EQ(0, result); - USE(result); -} - - -void* Thread::GetThreadLocal(LocalStorageKey key) { - pthread_key_t pthread_key = LocalKeyToPthreadKey(key); - return pthread_getspecific(pthread_key); -} - - -void Thread::SetThreadLocal(LocalStorageKey key, void* value) { - pthread_key_t pthread_key = LocalKeyToPthreadKey(key); - int result = pthread_setspecific(pthread_key, value); - ASSERT_EQ(0, result); - USE(result); -} - - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform-qnx.cc nodejs-0.11.15/deps/v8/src/platform-qnx.cc --- nodejs-0.11.13/deps/v8/src/platform-qnx.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform-qnx.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,401 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Platform-specific code for QNX goes here. For the POSIX-compatible -// parts the implementation is in platform-posix.cc. - -#include <pthread.h> -#include <semaphore.h> -#include <signal.h> -#include <sys/time.h> -#include <sys/resource.h> -#include <sys/types.h> -#include <stdlib.h> -#include <ucontext.h> -#include <backtrace.h> - -// QNX requires memory pages to be marked as executable. -// Otherwise, the OS raises an exception when executing code in that page. -#include <sys/types.h> // mmap & munmap -#include <sys/mman.h> // mmap & munmap -#include <sys/stat.h> // open -#include <fcntl.h> // open -#include <unistd.h> // sysconf -#include <strings.h> // index -#include <errno.h> -#include <stdarg.h> -#include <sys/procfs.h> - -#undef MAP_TYPE - -#include "v8.h" - -#include "platform.h" -#include "v8threads.h" -#include "vm-state-inl.h" - - -namespace v8 { -namespace internal { - -// 0 is never a valid thread id on Qnx since tids and pids share a -// name space and pid 0 is reserved (see man 2 kill). -static const pthread_t kNoThread = (pthread_t) 0; - - -#ifdef __arm__ - -bool OS::ArmUsingHardFloat() { - // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify - // the Floating Point ABI used (PCS stands for Procedure Call Standard). - // We use these as well as a couple of other defines to statically determine - // what FP ABI used. - // GCC versions 4.4 and below don't support hard-fp. - // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or - // __ARM_PCS_VFP. - -#define GCC_VERSION (__GNUC__ * 10000 \ - + __GNUC_MINOR__ * 100 \ - + __GNUC_PATCHLEVEL__) -#if GCC_VERSION >= 40600 -#if defined(__ARM_PCS_VFP) - return true; -#else - return false; -#endif - -#elif GCC_VERSION < 40500 - return false; - -#else -#if defined(__ARM_PCS_VFP) - return true; -#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \ - !defined(__VFP_FP__) - return false; -#else -#error "Your version of GCC does not report the FP ABI compiled for." \ - "Please report it on this issue" \ - "http://code.google.com/p/v8/issues/detail?id=2140" - -#endif -#endif -#undef GCC_VERSION -} - -#endif // __arm__ - - -const char* OS::LocalTimezone(double time, TimezoneCache* cache) { - if (std::isnan(time)) return ""; - time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); - struct tm* t = localtime(&tv); - if (NULL == t) return ""; - return t->tm_zone; -} - - -double OS::LocalTimeOffset(TimezoneCache* cache) { - time_t tv = time(NULL); - struct tm* t = localtime(&tv); - // tm_gmtoff includes any daylight savings offset, so subtract it. - return static_cast<double>(t->tm_gmtoff * msPerSecond - - (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); -} - - -void* OS::Allocate(const size_t requested, - size_t* allocated, - bool is_executable) { - const size_t msize = RoundUp(requested, AllocateAlignment()); - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - void* addr = OS::GetRandomMmapAddr(); - void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (mbase == MAP_FAILED) { - LOG(i::Isolate::Current(), - StringEvent("OS::Allocate", "mmap failed")); - return NULL; - } - *allocated = msize; - return mbase; -} - - -class PosixMemoryMappedFile : public OS::MemoryMappedFile { - public: - PosixMemoryMappedFile(FILE* file, void* memory, int size) - : file_(file), memory_(memory), size_(size) { } - virtual ~PosixMemoryMappedFile(); - virtual void* memory() { return memory_; } - virtual int size() { return size_; } - private: - FILE* file_; - void* memory_; - int size_; -}; - - -OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "r+"); - if (file == NULL) return NULL; - - fseek(file, 0, SEEK_END); - int size = ftell(file); - - void* memory = - mmap(OS::GetRandomMmapAddr(), - size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - fileno(file), - 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, - void* initial) { - FILE* file = fopen(name, "w+"); - if (file == NULL) return NULL; - int result = fwrite(initial, size, 1, file); - if (result < 1) { - fclose(file); - return NULL; - } - void* memory = - mmap(OS::GetRandomMmapAddr(), - size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - fileno(file), - 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) OS::Free(memory_, size_); - fclose(file_); -} - - -void OS::LogSharedLibraryAddresses(Isolate* isolate) { - procfs_mapinfo *mapinfos = NULL, *mapinfo; - int proc_fd, num, i; - - struct { - procfs_debuginfo info; - char buff[PATH_MAX]; - } map; - - char buf[PATH_MAX + 1]; - snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid()); - - if ((proc_fd = open(buf, O_RDONLY)) == -1) { - close(proc_fd); - return; - } - - /* Get the number of map entries. */ - if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) { - close(proc_fd); - return; - } - - mapinfos = reinterpret_cast<procfs_mapinfo *>( - malloc(num * sizeof(procfs_mapinfo))); - if (mapinfos == NULL) { - close(proc_fd); - return; - } - - /* Fill the map entries. */ - if (devctl(proc_fd, DCMD_PROC_PAGEDATA, - mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) { - free(mapinfos); - close(proc_fd); - return; - } - - for (i = 0; i < num; i++) { - mapinfo = mapinfos + i; - if (mapinfo->flags & MAP_ELF) { - map.info.vaddr = mapinfo->vaddr; - if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) { - continue; - } - LOG(isolate, SharedLibraryEvent(map.info.path, - mapinfo->vaddr, - mapinfo->vaddr + mapinfo->size)); - } - } - free(mapinfos); - close(proc_fd); -} - - -void OS::SignalCodeMovingGC() { -} - - -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - - -VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } - - -VirtualMemory::VirtualMemory(size_t size) - : address_(ReserveRegion(size)), size_(size) { } - - -VirtualMemory::VirtualMemory(size_t size, size_t alignment) - : address_(NULL), size_(0) { - ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* reservation = mmap(OS::GetRandomMmapAddr(), - request_size, - PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY, - kMmapFd, - kMmapFdOffset); - if (reservation == MAP_FAILED) return; - - Address base = static_cast<Address>(reservation); - Address aligned_base = RoundUp(base, alignment); - ASSERT_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - ASSERT_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - ASSERT(aligned_size == request_size); - - address_ = static_cast<void*>(aligned_base); - size_ = aligned_size; -} - - -VirtualMemory::~VirtualMemory() { - if (IsReserved()) { - bool result = ReleaseRegion(address(), size()); - ASSERT(result); - USE(result); - } -} - - -bool VirtualMemory::IsReserved() { - return address_ != NULL; -} - - -void VirtualMemory::Reset() { - address_ = NULL; - size_ = 0; -} - - -bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { - return CommitRegion(address, size, is_executable); -} - - -bool VirtualMemory::Uncommit(void* address, size_t size) { - return UncommitRegion(address, size); -} - - -bool VirtualMemory::Guard(void* address) { - OS::Guard(address, OS::CommitPageSize()); - return true; -} - - -void* VirtualMemory::ReserveRegion(size_t size) { - void* result = mmap(OS::GetRandomMmapAddr(), - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY, - kMmapFd, - kMmapFdOffset); - - if (result == MAP_FAILED) return NULL; - - return result; -} - - -bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(base, - size, - prot, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, - kMmapFd, - kMmapFdOffset)) { - return false; - } - - return true; -} - - -bool VirtualMemory::UncommitRegion(void* base, size_t size) { - return mmap(base, - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY, - kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - - -bool VirtualMemory::ReleaseRegion(void* base, size_t size) { - return munmap(base, size) == 0; -} - - -bool VirtualMemory::HasLazyCommits() { - return false; -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform-solaris.cc nodejs-0.11.15/deps/v8/src/platform-solaris.cc --- nodejs-0.11.13/deps/v8/src/platform-solaris.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform-solaris.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,344 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Platform-specific code for Solaris 10 goes here. For the POSIX-compatible -// parts, the implementation is in platform-posix.cc. - -#ifdef __sparc -# error "V8 does not support the SPARC CPU architecture." -#endif - -#include <sys/stack.h> // for stack alignment -#include <unistd.h> // getpagesize(), usleep() -#include <sys/mman.h> // mmap() -#include <ucontext.h> // walkstack(), getcontext() -#include <dlfcn.h> // dladdr -#include <pthread.h> -#include <semaphore.h> -#include <time.h> -#include <sys/time.h> // gettimeofday(), timeradd() -#include <errno.h> -#include <ieeefp.h> // finite() -#include <signal.h> // sigemptyset(), etc -#include <sys/regset.h> - - -#undef MAP_TYPE - -#include "v8.h" - -#include "platform.h" -#include "v8threads.h" -#include "vm-state-inl.h" - - -// It seems there is a bug in some Solaris distributions (experienced in -// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to -// access signbit() despite the availability of other C99 math functions. -#ifndef signbit -namespace std { -// Test sign - usually defined in math.h -int signbit(double x) { - // We need to take care of the special case of both positive and negative - // versions of zero. - if (x == 0) { - return fpclass(x) & FP_NZERO; - } else { - // This won't detect negative NaN but that should be okay since we don't - // assume that behavior. - return x < 0; - } -} -} // namespace std -#endif // signbit - -namespace v8 { -namespace internal { - - -const char* OS::LocalTimezone(double time, TimezoneCache* cache) { - if (std::isnan(time)) return ""; - time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); - struct tm* t = localtime(&tv); - if (NULL == t) return ""; - return tzname[0]; // The location of the timezone string on Solaris. -} - - -double OS::LocalTimeOffset(TimezoneCache* cache) { - tzset(); - return -static_cast<double>(timezone * msPerSecond); -} - - -void* OS::Allocate(const size_t requested, - size_t* allocated, - bool is_executable) { - const size_t msize = RoundUp(requested, getpagesize()); - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); - - if (mbase == MAP_FAILED) { - LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); - return NULL; - } - *allocated = msize; - return mbase; -} - - -class PosixMemoryMappedFile : public OS::MemoryMappedFile { - public: - PosixMemoryMappedFile(FILE* file, void* memory, int size) - : file_(file), memory_(memory), size_(size) { } - virtual ~PosixMemoryMappedFile(); - virtual void* memory() { return memory_; } - virtual int size() { return size_; } - private: - FILE* file_; - void* memory_; - int size_; -}; - - -OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "r+"); - if (file == NULL) return NULL; - - fseek(file, 0, SEEK_END); - int size = ftell(file); - - void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, - void* initial) { - FILE* file = fopen(name, "w+"); - if (file == NULL) return NULL; - int result = fwrite(initial, size, 1, file); - if (result < 1) { - fclose(file); - return NULL; - } - void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); - return new PosixMemoryMappedFile(file, memory, size); -} - - -PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) munmap(memory_, size_); - fclose(file_); -} - - -void OS::LogSharedLibraryAddresses(Isolate* isolate) { -} - - -void OS::SignalCodeMovingGC() { -} - - -struct StackWalker { - Vector<OS::StackFrame>& frames; - int index; -}; - - -static int StackWalkCallback(uintptr_t pc, int signo, void* data) { - struct StackWalker* walker = static_cast<struct StackWalker*>(data); - Dl_info info; - - int i = walker->index; - - walker->frames[i].address = reinterpret_cast<void*>(pc); - - // Make sure line termination is in place. - walker->frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0'; - - Vector<char> text = MutableCStrVector(walker->frames[i].text, - OS::kStackWalkMaxTextLen); - - if (dladdr(reinterpret_cast<void*>(pc), &info) == 0) { - OS::SNPrintF(text, "[0x%p]", pc); - } else if ((info.dli_fname != NULL && info.dli_sname != NULL)) { - // We have symbol info. - OS::SNPrintF(text, "%s'%s+0x%x", info.dli_fname, info.dli_sname, pc); - } else { - // No local symbol info. - OS::SNPrintF(text, - "%s'0x%p [0x%p]", - info.dli_fname, - pc - reinterpret_cast<uintptr_t>(info.dli_fbase), - pc); - } - walker->index++; - return 0; -} - - -// Constants used for mmap. -static const int kMmapFd = -1; -static const int kMmapFdOffset = 0; - - -VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } - - -VirtualMemory::VirtualMemory(size_t size) - : address_(ReserveRegion(size)), size_(size) { } - - -VirtualMemory::VirtualMemory(size_t size, size_t alignment) - : address_(NULL), size_(0) { - ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* reservation = mmap(OS::GetRandomMmapAddr(), - request_size, - PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, - kMmapFd, - kMmapFdOffset); - if (reservation == MAP_FAILED) return; - - Address base = static_cast<Address>(reservation); - Address aligned_base = RoundUp(base, alignment); - ASSERT_LE(base, aligned_base); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - size_t prefix_size = static_cast<size_t>(aligned_base - base); - OS::Free(base, prefix_size); - request_size -= prefix_size; - } - - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); - ASSERT_LE(aligned_size, request_size); - - if (aligned_size != request_size) { - size_t suffix_size = request_size - aligned_size; - OS::Free(aligned_base + aligned_size, suffix_size); - request_size -= suffix_size; - } - - ASSERT(aligned_size == request_size); - - address_ = static_cast<void*>(aligned_base); - size_ = aligned_size; -} - - -VirtualMemory::~VirtualMemory() { - if (IsReserved()) { - bool result = ReleaseRegion(address(), size()); - ASSERT(result); - USE(result); - } -} - - -bool VirtualMemory::IsReserved() { - return address_ != NULL; -} - - -void VirtualMemory::Reset() { - address_ = NULL; - size_ = 0; -} - - -bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { - return CommitRegion(address, size, is_executable); -} - - -bool VirtualMemory::Uncommit(void* address, size_t size) { - return UncommitRegion(address, size); -} - - -bool VirtualMemory::Guard(void* address) { - OS::Guard(address, OS::CommitPageSize()); - return true; -} - - -void* VirtualMemory::ReserveRegion(size_t size) { - void* result = mmap(OS::GetRandomMmapAddr(), - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, - kMmapFd, - kMmapFdOffset); - - if (result == MAP_FAILED) return NULL; - - return result; -} - - -bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(base, - size, - prot, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, - kMmapFd, - kMmapFdOffset)) { - return false; - } - return true; -} - - -bool VirtualMemory::UncommitRegion(void* base, size_t size) { - return mmap(base, - size, - PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, - kMmapFd, - kMmapFdOffset) != MAP_FAILED; -} - - -bool VirtualMemory::ReleaseRegion(void* base, size_t size) { - return munmap(base, size) == 0; -} - - -bool VirtualMemory::HasLazyCommits() { - // TODO(alph): implement for the platform. - return false; -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/platform-win32.cc nodejs-0.11.15/deps/v8/src/platform-win32.cc --- nodejs-0.11.13/deps/v8/src/platform-win32.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/platform-win32.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,1522 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Platform-specific code for Win32. - -// Secure API functions are not available using MinGW with msvcrt.dll -// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to -// disable definition of secure API functions in standard headers that -// would conflict with our own implementation. -#ifdef __MINGW32__ -#include <_mingw.h> -#ifdef MINGW_HAS_SECURE_API -#undef MINGW_HAS_SECURE_API -#endif // MINGW_HAS_SECURE_API -#endif // __MINGW32__ - -#include "win32-headers.h" - -#include "v8.h" - -#include "codegen.h" -#include "isolate-inl.h" -#include "platform.h" -#include "simulator.h" -#include "vm-state-inl.h" - -#ifdef _MSC_VER - -// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually -// defined in strings.h. -int strncasecmp(const char* s1, const char* s2, int n) { - return _strnicmp(s1, s2, n); -} - -#endif // _MSC_VER - - -// Extra functions for MinGW. Most of these are the _s functions which are in -// the Microsoft Visual Studio C++ CRT. -#ifdef __MINGW32__ - - -#ifndef __MINGW64_VERSION_MAJOR - -#define _TRUNCATE 0 -#define STRUNCATE 80 - -inline void MemoryBarrier() { - int barrier = 0; - __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier)); -} - -#endif // __MINGW64_VERSION_MAJOR - - -int localtime_s(tm* out_tm, const time_t* time) { - tm* posix_local_time_struct = localtime(time); - if (posix_local_time_struct == NULL) return 1; - *out_tm = *posix_local_time_struct; - return 0; -} - - -int fopen_s(FILE** pFile, const char* filename, const char* mode) { - *pFile = fopen(filename, mode); - return *pFile != NULL ? 0 : 1; -} - -int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count, - const char* format, va_list argptr) { - ASSERT(count == _TRUNCATE); - return _vsnprintf(buffer, sizeOfBuffer, format, argptr); -} - - -int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) { - CHECK(source != NULL); - CHECK(dest != NULL); - CHECK_GT(dest_size, 0); - - if (count == _TRUNCATE) { - while (dest_size > 0 && *source != 0) { - *(dest++) = *(source++); - --dest_size; - } - if (dest_size == 0) { - *(dest - 1) = 0; - return STRUNCATE; - } - } else { - while (dest_size > 0 && count > 0 && *source != 0) { - *(dest++) = *(source++); - --dest_size; - --count; - } - } - CHECK_GT(dest_size, 0); - *dest = 0; - return 0; -} - -#endif // __MINGW32__ - -namespace v8 { -namespace internal { - -intptr_t OS::MaxVirtualMemory() { - return 0; -} - - -#if V8_TARGET_ARCH_IA32 -static void MemMoveWrapper(void* dest, const void* src, size_t size) { - memmove(dest, src, size); -} - - -// Initialize to library version so we can call this at any time during startup. -static OS::MemMoveFunction memmove_function = &MemMoveWrapper; - -// Defined in codegen-ia32.cc. -OS::MemMoveFunction CreateMemMoveFunction(); - -// Copy memory area to disjoint memory area. -void OS::MemMove(void* dest, const void* src, size_t size) { - if (size == 0) return; - // Note: here we rely on dependent reads being ordered. This is true - // on all architectures we currently support. - (*memmove_function)(dest, src, size); -} - -#endif // V8_TARGET_ARCH_IA32 - -#ifdef _WIN64 -typedef double (*ModuloFunction)(double, double); -static ModuloFunction modulo_function = NULL; -// Defined in codegen-x64.cc. -ModuloFunction CreateModuloFunction(); - -void init_modulo_function() { - modulo_function = CreateModuloFunction(); -} - - -double modulo(double x, double y) { - // Note: here we rely on dependent reads being ordered. This is true - // on all architectures we currently support. - return (*modulo_function)(x, y); -} -#else // Win32 - -double modulo(double x, double y) { - // Workaround MS fmod bugs. ECMA-262 says: - // dividend is finite and divisor is an infinity => result equals dividend - // dividend is a zero and divisor is nonzero finite => result equals dividend - if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) && - !(x == 0 && (y != 0 && std::isfinite(y)))) { - x = fmod(x, y); - } - return x; -} - -#endif // _WIN64 - - -#define UNARY_MATH_FUNCTION(name, generator) \ -static UnaryMathFunction fast_##name##_function = NULL; \ -void init_fast_##name##_function() { \ - fast_##name##_function = generator; \ -} \ -double fast_##name(double x) { \ - return (*fast_##name##_function)(x); \ -} - -UNARY_MATH_FUNCTION(exp, CreateExpFunction()) -UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction()) - -#undef UNARY_MATH_FUNCTION - - -void lazily_initialize_fast_exp() { - if (fast_exp_function == NULL) { - init_fast_exp_function(); - } -} - - -void MathSetup() { -#ifdef _WIN64 - init_modulo_function(); -#endif - // fast_exp is initialized lazily. - init_fast_sqrt_function(); -} - - -class TimezoneCache { - public: - TimezoneCache() : initialized_(false) { } - - void Clear() { - initialized_ = false; - } - - // Initialize timezone information. The timezone information is obtained from - // windows. If we cannot get the timezone information we fall back to CET. - void InitializeIfNeeded() { - // Just return if timezone information has already been initialized. - if (initialized_) return; - - // Initialize POSIX time zone data. - _tzset(); - // Obtain timezone information from operating system. - memset(&tzinfo_, 0, sizeof(tzinfo_)); - if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) { - // If we cannot get timezone information we fall back to CET. - tzinfo_.Bias = -60; - tzinfo_.StandardDate.wMonth = 10; - tzinfo_.StandardDate.wDay = 5; - tzinfo_.StandardDate.wHour = 3; - tzinfo_.StandardBias = 0; - tzinfo_.DaylightDate.wMonth = 3; - tzinfo_.DaylightDate.wDay = 5; - tzinfo_.DaylightDate.wHour = 2; - tzinfo_.DaylightBias = -60; - } - - // Make standard and DST timezone names. - WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1, - std_tz_name_, kTzNameSize, NULL, NULL); - std_tz_name_[kTzNameSize - 1] = '\0'; - WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1, - dst_tz_name_, kTzNameSize, NULL, NULL); - dst_tz_name_[kTzNameSize - 1] = '\0'; - - // If OS returned empty string or resource id (like "@tzres.dll,-211") - // simply guess the name from the UTC bias of the timezone. - // To properly resolve the resource identifier requires a library load, - // which is not possible in a sandbox. - if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') { - OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize - 1), - "%s Standard Time", - GuessTimezoneNameFromBias(tzinfo_.Bias)); - } - if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') { - OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize - 1), - "%s Daylight Time", - GuessTimezoneNameFromBias(tzinfo_.Bias)); - } - // Timezone information initialized. - initialized_ = true; - } - - // Guess the name of the timezone from the bias. - // The guess is very biased towards the northern hemisphere. - const char* GuessTimezoneNameFromBias(int bias) { - static const int kHour = 60; - switch (-bias) { - case -9*kHour: return "Alaska"; - case -8*kHour: return "Pacific"; - case -7*kHour: return "Mountain"; - case -6*kHour: return "Central"; - case -5*kHour: return "Eastern"; - case -4*kHour: return "Atlantic"; - case 0*kHour: return "GMT"; - case +1*kHour: return "Central Europe"; - case +2*kHour: return "Eastern Europe"; - case +3*kHour: return "Russia"; - case +5*kHour + 30: return "India"; - case +8*kHour: return "China"; - case +9*kHour: return "Japan"; - case +12*kHour: return "New Zealand"; - default: return "Local"; - } - } - - - private: - static const int kTzNameSize = 128; - bool initialized_; - char std_tz_name_[kTzNameSize]; - char dst_tz_name_[kTzNameSize]; - TIME_ZONE_INFORMATION tzinfo_; - friend class Win32Time; -}; - - -// ---------------------------------------------------------------------------- -// The Time class represents time on win32. A timestamp is represented as -// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript -// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC, -// January 1, 1970. - -class Win32Time { - public: - // Constructors. - Win32Time(); - explicit Win32Time(double jstime); - Win32Time(int year, int mon, int day, int hour, int min, int sec); - - // Convert timestamp to JavaScript representation. - double ToJSTime(); - - // Set timestamp to current time. - void SetToCurrentTime(); - - // Returns the local timezone offset in milliseconds east of UTC. This is - // the number of milliseconds you must add to UTC to get local time, i.e. - // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This - // routine also takes into account whether daylight saving is effect - // at the time. - int64_t LocalOffset(TimezoneCache* cache); - - // Returns the daylight savings time offset for the time in milliseconds. - int64_t DaylightSavingsOffset(TimezoneCache* cache); - - // Returns a string identifying the current timezone for the - // timestamp taking into account daylight saving. - char* LocalTimezone(TimezoneCache* cache); - - private: - // Constants for time conversion. - static const int64_t kTimeEpoc = 116444736000000000LL; - static const int64_t kTimeScaler = 10000; - static const int64_t kMsPerMinute = 60000; - - // Constants for timezone information. - static const bool kShortTzNames = false; - - // Return whether or not daylight savings time is in effect at this time. - bool InDST(TimezoneCache* cache); - - // Accessor for FILETIME representation. - FILETIME& ft() { return time_.ft_; } - - // Accessor for integer representation. - int64_t& t() { return time_.t_; } - - // Although win32 uses 64-bit integers for representing timestamps, - // these are packed into a FILETIME structure. The FILETIME structure - // is just a struct representing a 64-bit integer. The TimeStamp union - // allows access to both a FILETIME and an integer representation of - // the timestamp. - union TimeStamp { - FILETIME ft_; - int64_t t_; - }; - - TimeStamp time_; -}; - - -// Initialize timestamp to start of epoc. -Win32Time::Win32Time() { - t() = 0; -} - - -// Initialize timestamp from a JavaScript timestamp. -Win32Time::Win32Time(double jstime) { - t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc; -} - - -// Initialize timestamp from date/time components. -Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) { - SYSTEMTIME st; - st.wYear = year; - st.wMonth = mon; - st.wDay = day; - st.wHour = hour; - st.wMinute = min; - st.wSecond = sec; - st.wMilliseconds = 0; - SystemTimeToFileTime(&st, &ft()); -} - - -// Convert timestamp to JavaScript timestamp. -double Win32Time::ToJSTime() { - return static_cast<double>((t() - kTimeEpoc) / kTimeScaler); -} - - -// Set timestamp to current time. -void Win32Time::SetToCurrentTime() { - // The default GetSystemTimeAsFileTime has a ~15.5ms resolution. - // Because we're fast, we like fast timers which have at least a - // 1ms resolution. - // - // timeGetTime() provides 1ms granularity when combined with - // timeBeginPeriod(). If the host application for v8 wants fast - // timers, it can use timeBeginPeriod to increase the resolution. - // - // Using timeGetTime() has a drawback because it is a 32bit value - // and hence rolls-over every ~49days. - // - // To use the clock, we use GetSystemTimeAsFileTime as our base; - // and then use timeGetTime to extrapolate current time from the - // start time. To deal with rollovers, we resync the clock - // any time when more than kMaxClockElapsedTime has passed or - // whenever timeGetTime creates a rollover. - - static bool initialized = false; - static TimeStamp init_time; - static DWORD init_ticks; - static const int64_t kHundredNanosecondsPerSecond = 10000000; - static const int64_t kMaxClockElapsedTime = - 60*kHundredNanosecondsPerSecond; // 1 minute - - // If we are uninitialized, we need to resync the clock. - bool needs_resync = !initialized; - - // Get the current time. - TimeStamp time_now; - GetSystemTimeAsFileTime(&time_now.ft_); - DWORD ticks_now = timeGetTime(); - - // Check if we need to resync due to clock rollover. - needs_resync |= ticks_now < init_ticks; - - // Check if we need to resync due to elapsed time. - needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime; - - // Check if we need to resync due to backwards time change. - needs_resync |= time_now.t_ < init_time.t_; - - // Resync the clock if necessary. - if (needs_resync) { - GetSystemTimeAsFileTime(&init_time.ft_); - init_ticks = ticks_now = timeGetTime(); - initialized = true; - } - - // Finally, compute the actual time. Why is this so hard. - DWORD elapsed = ticks_now - init_ticks; - this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000); -} - - -// Return the local timezone offset in milliseconds east of UTC. This -// takes into account whether daylight saving is in effect at the time. -// Only times in the 32-bit Unix range may be passed to this function. -// Also, adding the time-zone offset to the input must not overflow. -// The function EquivalentTime() in date.js guarantees this. -int64_t Win32Time::LocalOffset(TimezoneCache* cache) { - cache->InitializeIfNeeded(); - - Win32Time rounded_to_second(*this); - rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler * - 1000 * kTimeScaler; - // Convert to local time using POSIX localtime function. - // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime() - // very slow. Other browsers use localtime(). - - // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to - // POSIX seconds past 1/1/1970 0:00:00. - double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000; - if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) { - return 0; - } - // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int. - time_t posix_time = static_cast<time_t>(unchecked_posix_time); - - // Convert to local time, as struct with fields for day, hour, year, etc. - tm posix_local_time_struct; - if (localtime_s(&posix_local_time_struct, &posix_time)) return 0; - - if (posix_local_time_struct.tm_isdst > 0) { - return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute; - } else if (posix_local_time_struct.tm_isdst == 0) { - return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute; - } else { - return cache->tzinfo_.Bias * -kMsPerMinute; - } -} - - -// Return whether or not daylight savings time is in effect at this time. -bool Win32Time::InDST(TimezoneCache* cache) { - cache->InitializeIfNeeded(); - - // Determine if DST is in effect at the specified time. - bool in_dst = false; - if (cache->tzinfo_.StandardDate.wMonth != 0 || - cache->tzinfo_.DaylightDate.wMonth != 0) { - // Get the local timezone offset for the timestamp in milliseconds. - int64_t offset = LocalOffset(cache); - - // Compute the offset for DST. The bias parameters in the timezone info - // are specified in minutes. These must be converted to milliseconds. - int64_t dstofs = - -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute; - - // If the local time offset equals the timezone bias plus the daylight - // bias then DST is in effect. - in_dst = offset == dstofs; - } - - return in_dst; -} - - -// Return the daylight savings time offset for this time. -int64_t Win32Time::DaylightSavingsOffset(TimezoneCache* cache) { - return InDST(cache) ? 60 * kMsPerMinute : 0; -} - - -// Returns a string identifying the current timezone for the -// timestamp taking into account daylight saving. -char* Win32Time::LocalTimezone(TimezoneCache* cache) { - // Return the standard or DST time zone name based on whether daylight - // saving is in effect at the given time. - return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_; -} - - -void OS::PostSetUp() { - // Math functions depend on CPU features therefore they are initialized after - // CPU. - MathSetup(); -#if V8_TARGET_ARCH_IA32 - OS::MemMoveFunction generated_memmove = CreateMemMoveFunction(); - if (generated_memmove != NULL) { - memmove_function = generated_memmove; - } -#endif -} - - -// Returns the accumulated user time for thread. -int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { - FILETIME dummy; - uint64_t usertime; - - // Get the amount of time that the thread has executed in user mode. - if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy, - reinterpret_cast<FILETIME*>(&usertime))) return -1; - - // Adjust the resolution to micro-seconds. - usertime /= 10; - - // Convert to seconds and microseconds - *secs = static_cast<uint32_t>(usertime / 1000000); - *usecs = static_cast<uint32_t>(usertime % 1000000); - return 0; -} - - -// Returns current time as the number of milliseconds since -// 00:00:00 UTC, January 1, 1970. -double OS::TimeCurrentMillis() { - return Time::Now().ToJsTime(); -} - - -TimezoneCache* OS::CreateTimezoneCache() { - return new TimezoneCache(); -} - - -void OS::DisposeTimezoneCache(TimezoneCache* cache) { - delete cache; -} - - -void OS::ClearTimezoneCache(TimezoneCache* cache) { - cache->Clear(); -} - - -// Returns a string identifying the current timezone taking into -// account daylight saving. -const char* OS::LocalTimezone(double time, TimezoneCache* cache) { - return Win32Time(time).LocalTimezone(cache); -} - - -// Returns the local time offset in milliseconds east of UTC without -// taking daylight savings time into account. -double OS::LocalTimeOffset(TimezoneCache* cache) { - // Use current time, rounded to the millisecond. - Win32Time t(TimeCurrentMillis()); - // Time::LocalOffset inlcudes any daylight savings offset, so subtract it. - return static_cast<double>(t.LocalOffset(cache) - - t.DaylightSavingsOffset(cache)); -} - - -// Returns the daylight savings offset in milliseconds for the given -// time. -double OS::DaylightSavingsOffset(double time, TimezoneCache* cache) { - int64_t offset = Win32Time(time).DaylightSavingsOffset(cache); - return static_cast<double>(offset); -} - - -int OS::GetLastError() { - return ::GetLastError(); -} - - -int OS::GetCurrentProcessId() { - return static_cast<int>(::GetCurrentProcessId()); -} - - -// ---------------------------------------------------------------------------- -// Win32 console output. -// -// If a Win32 application is linked as a console application it has a normal -// standard output and standard error. In this case normal printf works fine -// for output. However, if the application is linked as a GUI application, -// the process doesn't have a console, and therefore (debugging) output is lost. -// This is the case if we are embedded in a windows program (like a browser). -// In order to be able to get debug output in this case the the debugging -// facility using OutputDebugString. This output goes to the active debugger -// for the process (if any). Else the output can be monitored using DBMON.EXE. - -enum OutputMode { - UNKNOWN, // Output method has not yet been determined. - CONSOLE, // Output is written to stdout. - ODS // Output is written to debug facility. -}; - -static OutputMode output_mode = UNKNOWN; // Current output mode. - - -// Determine if the process has a console for output. -static bool HasConsole() { - // Only check the first time. Eventual race conditions are not a problem, - // because all threads will eventually determine the same mode. - if (output_mode == UNKNOWN) { - // We cannot just check that the standard output is attached to a console - // because this would fail if output is redirected to a file. Therefore we - // say that a process does not have an output console if either the - // standard output handle is invalid or its file type is unknown. - if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE && - GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN) - output_mode = CONSOLE; - else - output_mode = ODS; - } - return output_mode == CONSOLE; -} - - -static void VPrintHelper(FILE* stream, const char* format, va_list args) { - if ((stream == stdout || stream == stderr) && !HasConsole()) { - // It is important to use safe print here in order to avoid - // overflowing the buffer. We might truncate the output, but this - // does not crash. - EmbeddedVector<char, 4096> buffer; - OS::VSNPrintF(buffer, format, args); - OutputDebugStringA(buffer.start()); - } else { - vfprintf(stream, format, args); - } -} - - -FILE* OS::FOpen(const char* path, const char* mode) { - FILE* result; - if (fopen_s(&result, path, mode) == 0) { - return result; - } else { - return NULL; - } -} - - -bool OS::Remove(const char* path) { - return (DeleteFileA(path) != 0); -} - - -FILE* OS::OpenTemporaryFile() { - // tmpfile_s tries to use the root dir, don't use it. - char tempPathBuffer[MAX_PATH]; - DWORD path_result = 0; - path_result = GetTempPathA(MAX_PATH, tempPathBuffer); - if (path_result > MAX_PATH || path_result == 0) return NULL; - UINT name_result = 0; - char tempNameBuffer[MAX_PATH]; - name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer); - if (name_result == 0) return NULL; - FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses. - if (result != NULL) { - Remove(tempNameBuffer); // Delete on close. - } - return result; -} - - -// Open log file in binary mode to avoid /n -> /r/n conversion. -const char* const OS::LogFileOpenMode = "wb"; - - -// Print (debug) message to console. -void OS::Print(const char* format, ...) { - va_list args; - va_start(args, format); - VPrint(format, args); - va_end(args); -} - - -void OS::VPrint(const char* format, va_list args) { - VPrintHelper(stdout, format, args); -} - - -void OS::FPrint(FILE* out, const char* format, ...) { - va_list args; - va_start(args, format); - VFPrint(out, format, args); - va_end(args); -} - - -void OS::VFPrint(FILE* out, const char* format, va_list args) { - VPrintHelper(out, format, args); -} - - -// Print error message to console. -void OS::PrintError(const char* format, ...) { - va_list args; - va_start(args, format); - VPrintError(format, args); - va_end(args); -} - - -void OS::VPrintError(const char* format, va_list args) { - VPrintHelper(stderr, format, args); -} - - -int OS::SNPrintF(Vector<char> str, const char* format, ...) { - va_list args; - va_start(args, format); - int result = VSNPrintF(str, format, args); - va_end(args); - return result; -} - - -int OS::VSNPrintF(Vector<char> str, const char* format, va_list args) { - int n = _vsnprintf_s(str.start(), str.length(), _TRUNCATE, format, args); - // Make sure to zero-terminate the string if the output was - // truncated or if there was an error. - if (n < 0 || n >= str.length()) { - if (str.length() > 0) - str[str.length() - 1] = '\0'; - return -1; - } else { - return n; - } -} - - -char* OS::StrChr(char* str, int c) { - return const_cast<char*>(strchr(str, c)); -} - - -void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) { - // Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small. - size_t buffer_size = static_cast<size_t>(dest.length()); - if (n + 1 > buffer_size) // count for trailing '\0' - n = _TRUNCATE; - int result = strncpy_s(dest.start(), dest.length(), src, n); - USE(result); - ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE)); -} - - -#undef _TRUNCATE -#undef STRUNCATE - - -// Get the system's page size used by VirtualAlloc() or the next power -// of two. The reason for always returning a power of two is that the -// rounding up in OS::Allocate expects that. -static size_t GetPageSize() { - static size_t page_size = 0; - if (page_size == 0) { - SYSTEM_INFO info; - GetSystemInfo(&info); - page_size = RoundUpToPowerOf2(info.dwPageSize); - } - return page_size; -} - - -// The allocation alignment is the guaranteed alignment for -// VirtualAlloc'ed blocks of memory. -size_t OS::AllocateAlignment() { - static size_t allocate_alignment = 0; - if (allocate_alignment == 0) { - SYSTEM_INFO info; - GetSystemInfo(&info); - allocate_alignment = info.dwAllocationGranularity; - } - return allocate_alignment; -} - - -void* OS::GetRandomMmapAddr() { - Isolate* isolate = Isolate::UncheckedCurrent(); - // Note that the current isolate isn't set up in a call path via - // CpuFeatures::Probe. We don't care about randomization in this case because - // the code page is immediately freed. - if (isolate != NULL) { - // The address range used to randomize RWX allocations in OS::Allocate - // Try not to map pages into the default range that windows loads DLLs - // Use a multiple of 64k to prevent committing unused memory. - // Note: This does not guarantee RWX regions will be within the - // range kAllocationRandomAddressMin to kAllocationRandomAddressMax -#ifdef V8_HOST_ARCH_64_BIT - static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; - static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; -#else - static const intptr_t kAllocationRandomAddressMin = 0x04000000; - static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; -#endif - uintptr_t address = - (isolate->random_number_generator()->NextInt() << kPageSizeBits) | - kAllocationRandomAddressMin; - address &= kAllocationRandomAddressMax; - return reinterpret_cast<void *>(address); - } - return NULL; -} - - -static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { - LPVOID base = NULL; - - if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { - // For exectutable pages try and randomize the allocation address - for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { - base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection); - } - } - - // After three attempts give up and let the OS find an address to use. - if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); - - return base; -} - - -void* OS::Allocate(const size_t requested, - size_t* allocated, - bool is_executable) { - // VirtualAlloc rounds allocated size to page size automatically. - size_t msize = RoundUp(requested, static_cast<int>(GetPageSize())); - - // Windows XP SP2 allows Data Excution Prevention (DEP). - int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; - - LPVOID mbase = RandomizedVirtualAlloc(msize, - MEM_COMMIT | MEM_RESERVE, - prot); - - if (mbase == NULL) { - LOG(Isolate::Current(), StringEvent("OS::Allocate", "VirtualAlloc failed")); - return NULL; - } - - ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment())); - - *allocated = msize; - return mbase; -} - - -void OS::Free(void* address, const size_t size) { - // TODO(1240712): VirtualFree has a return value which is ignored here. - VirtualFree(address, 0, MEM_RELEASE); - USE(size); -} - - -intptr_t OS::CommitPageSize() { - return 4096; -} - - -void OS::ProtectCode(void* address, const size_t size) { - DWORD old_protect; - VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); -} - - -void OS::Guard(void* address, const size_t size) { - DWORD oldprotect; - VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect); -} - - -void OS::Sleep(int milliseconds) { - ::Sleep(milliseconds); -} - - -void OS::Abort() { - if (FLAG_hard_abort) { - V8_IMMEDIATE_CRASH(); - } - // Make the MSVCRT do a silent abort. - raise(SIGABRT); -} - - -void OS::DebugBreak() { -#ifdef _MSC_VER - // To avoid Visual Studio runtime support the following code can be used - // instead - // __asm { int 3 } - __debugbreak(); -#else - ::DebugBreak(); -#endif -} - - -class Win32MemoryMappedFile : public OS::MemoryMappedFile { - public: - Win32MemoryMappedFile(HANDLE file, - HANDLE file_mapping, - void* memory, - int size) - : file_(file), - file_mapping_(file_mapping), - memory_(memory), - size_(size) { } - virtual ~Win32MemoryMappedFile(); - virtual void* memory() { return memory_; } - virtual int size() { return size_; } - private: - HANDLE file_; - HANDLE file_mapping_; - void* memory_; - int size_; -}; - - -OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - // Open a physical file - HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL); - if (file == INVALID_HANDLE_VALUE) return NULL; - - int size = static_cast<int>(GetFileSize(file, NULL)); - - // Create a file mapping for the physical file - HANDLE file_mapping = CreateFileMapping(file, NULL, - PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL); - if (file_mapping == NULL) return NULL; - - // Map a view of the file into memory - void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size); - return new Win32MemoryMappedFile(file, file_mapping, memory, size); -} - - -OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, - void* initial) { - // Open a physical file - HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL); - if (file == NULL) return NULL; - // Create a file mapping for the physical file - HANDLE file_mapping = CreateFileMapping(file, NULL, - PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL); - if (file_mapping == NULL) return NULL; - // Map a view of the file into memory - void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size); - if (memory) OS::MemMove(memory, initial, size); - return new Win32MemoryMappedFile(file, file_mapping, memory, size); -} - - -Win32MemoryMappedFile::~Win32MemoryMappedFile() { - if (memory_ != NULL) - UnmapViewOfFile(memory_); - CloseHandle(file_mapping_); - CloseHandle(file_); -} - - -// The following code loads functions defined in DbhHelp.h and TlHelp32.h -// dynamically. This is to avoid being depending on dbghelp.dll and -// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to -// kernel32.dll at some point so loading functions defines in TlHelp32.h -// dynamically might not be necessary any more - for some versions of Windows?). - -// Function pointers to functions dynamically loaded from dbghelp.dll. -#define DBGHELP_FUNCTION_LIST(V) \ - V(SymInitialize) \ - V(SymGetOptions) \ - V(SymSetOptions) \ - V(SymGetSearchPath) \ - V(SymLoadModule64) \ - V(StackWalk64) \ - V(SymGetSymFromAddr64) \ - V(SymGetLineFromAddr64) \ - V(SymFunctionTableAccess64) \ - V(SymGetModuleBase64) - -// Function pointers to functions dynamically loaded from dbghelp.dll. -#define TLHELP32_FUNCTION_LIST(V) \ - V(CreateToolhelp32Snapshot) \ - V(Module32FirstW) \ - V(Module32NextW) - -// Define the decoration to use for the type and variable name used for -// dynamically loaded DLL function.. -#define DLL_FUNC_TYPE(name) _##name##_ -#define DLL_FUNC_VAR(name) _##name - -// Define the type for each dynamically loaded DLL function. The function -// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros -// from the Windows include files are redefined here to have the function -// definitions to be as close to the ones in the original .h files as possible. -#ifndef IN -#define IN -#endif -#ifndef VOID -#define VOID void -#endif - -// DbgHelp isn't supported on MinGW yet -#ifndef __MINGW32__ -// DbgHelp.h functions. -typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess, - IN PSTR UserSearchPath, - IN BOOL fInvadeProcess); -typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID); -typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions); -typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))( - IN HANDLE hProcess, - OUT PSTR SearchPath, - IN DWORD SearchPathLength); -typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))( - IN HANDLE hProcess, - IN HANDLE hFile, - IN PSTR ImageName, - IN PSTR ModuleName, - IN DWORD64 BaseOfDll, - IN DWORD SizeOfDll); -typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))( - DWORD MachineType, - HANDLE hProcess, - HANDLE hThread, - LPSTACKFRAME64 StackFrame, - PVOID ContextRecord, - PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine, - PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine, - PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine, - PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress); -typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))( - IN HANDLE hProcess, - IN DWORD64 qwAddr, - OUT PDWORD64 pdwDisplacement, - OUT PIMAGEHLP_SYMBOL64 Symbol); -typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))( - IN HANDLE hProcess, - IN DWORD64 qwAddr, - OUT PDWORD pdwDisplacement, - OUT PIMAGEHLP_LINE64 Line64); -// DbgHelp.h typedefs. Implementation found in dbghelp.dll. -typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))( - HANDLE hProcess, - DWORD64 AddrBase); // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64 -typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))( - HANDLE hProcess, - DWORD64 AddrBase); // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64 - -// TlHelp32.h functions. -typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))( - DWORD dwFlags, - DWORD th32ProcessID); -typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot, - LPMODULEENTRY32W lpme); -typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot, - LPMODULEENTRY32W lpme); - -#undef IN -#undef VOID - -// Declare a variable for each dynamically loaded DLL function. -#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL; -DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION) -TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION) -#undef DEF_DLL_FUNCTION - -// Load the functions. This function has a lot of "ugly" macros in order to -// keep down code duplication. - -static bool LoadDbgHelpAndTlHelp32() { - static bool dbghelp_loaded = false; - - if (dbghelp_loaded) return true; - - HMODULE module; - - // Load functions from the dbghelp.dll module. - module = LoadLibrary(TEXT("dbghelp.dll")); - if (module == NULL) { - return false; - } - -#define LOAD_DLL_FUNC(name) \ - DLL_FUNC_VAR(name) = \ - reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name)); - -DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC) - -#undef LOAD_DLL_FUNC - - // Load functions from the kernel32.dll module (the TlHelp32.h function used - // to be in tlhelp32.dll but are now moved to kernel32.dll). - module = LoadLibrary(TEXT("kernel32.dll")); - if (module == NULL) { - return false; - } - -#define LOAD_DLL_FUNC(name) \ - DLL_FUNC_VAR(name) = \ - reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name)); - -TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC) - -#undef LOAD_DLL_FUNC - - // Check that all functions where loaded. - bool result = -#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) && - -DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED) -TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED) - -#undef DLL_FUNC_LOADED - true; - - dbghelp_loaded = result; - return result; - // NOTE: The modules are never unloaded and will stay around until the - // application is closed. -} - -#undef DBGHELP_FUNCTION_LIST -#undef TLHELP32_FUNCTION_LIST -#undef DLL_FUNC_VAR -#undef DLL_FUNC_TYPE - - -// Load the symbols for generating stack traces. -static bool LoadSymbols(Isolate* isolate, HANDLE process_handle) { - static bool symbols_loaded = false; - - if (symbols_loaded) return true; - - BOOL ok; - - // Initialize the symbol engine. - ok = _SymInitialize(process_handle, // hProcess - NULL, // UserSearchPath - false); // fInvadeProcess - if (!ok) return false; - - DWORD options = _SymGetOptions(); - options |= SYMOPT_LOAD_LINES; - options |= SYMOPT_FAIL_CRITICAL_ERRORS; - options = _SymSetOptions(options); - - char buf[OS::kStackWalkMaxNameLen] = {0}; - ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen); - if (!ok) { - int err = GetLastError(); - PrintF("%d\n", err); - return false; - } - - HANDLE snapshot = _CreateToolhelp32Snapshot( - TH32CS_SNAPMODULE, // dwFlags - GetCurrentProcessId()); // th32ProcessId - if (snapshot == INVALID_HANDLE_VALUE) return false; - MODULEENTRY32W module_entry; - module_entry.dwSize = sizeof(module_entry); // Set the size of the structure. - BOOL cont = _Module32FirstW(snapshot, &module_entry); - while (cont) { - DWORD64 base; - // NOTE the SymLoadModule64 function has the peculiarity of accepting a - // both unicode and ASCII strings even though the parameter is PSTR. - base = _SymLoadModule64( - process_handle, // hProcess - 0, // hFile - reinterpret_cast<PSTR>(module_entry.szExePath), // ImageName - reinterpret_cast<PSTR>(module_entry.szModule), // ModuleName - reinterpret_cast<DWORD64>(module_entry.modBaseAddr), // BaseOfDll - module_entry.modBaseSize); // SizeOfDll - if (base == 0) { - int err = GetLastError(); - if (err != ERROR_MOD_NOT_FOUND && - err != ERROR_INVALID_HANDLE) return false; - } - LOG(isolate, - SharedLibraryEvent( - module_entry.szExePath, - reinterpret_cast<unsigned int>(module_entry.modBaseAddr), - reinterpret_cast<unsigned int>(module_entry.modBaseAddr + - module_entry.modBaseSize))); - cont = _Module32NextW(snapshot, &module_entry); - } - CloseHandle(snapshot); - - symbols_loaded = true; - return true; -} - - -void OS::LogSharedLibraryAddresses(Isolate* isolate) { - // SharedLibraryEvents are logged when loading symbol information. - // Only the shared libraries loaded at the time of the call to - // LogSharedLibraryAddresses are logged. DLLs loaded after - // initialization are not accounted for. - if (!LoadDbgHelpAndTlHelp32()) return; - HANDLE process_handle = GetCurrentProcess(); - LoadSymbols(isolate, process_handle); -} - - -void OS::SignalCodeMovingGC() { -} - - -uint64_t OS::TotalPhysicalMemory() { - MEMORYSTATUSEX memory_info; - memory_info.dwLength = sizeof(memory_info); - if (!GlobalMemoryStatusEx(&memory_info)) { - UNREACHABLE(); - return 0; - } - - return static_cast<uint64_t>(memory_info.ullTotalPhys); -} - - -#else // __MINGW32__ -void OS::LogSharedLibraryAddresses(Isolate* isolate) { } -void OS::SignalCodeMovingGC() { } -#endif // __MINGW32__ - - -uint64_t OS::CpuFeaturesImpliedByPlatform() { - return 0; // Windows runs on anything. -} - - -double OS::nan_value() { -#ifdef _MSC_VER - // Positive Quiet NaN with no payload (aka. Indeterminate) has all bits - // in mask set, so value equals mask. - static const __int64 nanval = kQuietNaNMask; - return *reinterpret_cast<const double*>(&nanval); -#else // _MSC_VER - return NAN; -#endif // _MSC_VER -} - - -int OS::ActivationFrameAlignment() { -#ifdef _WIN64 - return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned. -#elif defined(__MINGW32__) - // With gcc 4.4 the tree vectorization optimizer can generate code - // that requires 16 byte alignment such as movdqa on x86. - return 16; -#else - return 8; // Floating-point math runs faster with 8-byte alignment. -#endif -} - - -VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } - - -VirtualMemory::VirtualMemory(size_t size) - : address_(ReserveRegion(size)), size_(size) { } - - -VirtualMemory::VirtualMemory(size_t size, size_t alignment) - : address_(NULL), size_(0) { - ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); - size_t request_size = RoundUp(size + alignment, - static_cast<intptr_t>(OS::AllocateAlignment())); - void* address = ReserveRegion(request_size); - if (address == NULL) return; - Address base = RoundUp(static_cast<Address>(address), alignment); - // Try reducing the size by freeing and then reallocating a specific area. - bool result = ReleaseRegion(address, request_size); - USE(result); - ASSERT(result); - address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); - if (address != NULL) { - request_size = size; - ASSERT(base == static_cast<Address>(address)); - } else { - // Resizing failed, just go with a bigger area. - address = ReserveRegion(request_size); - if (address == NULL) return; - } - address_ = address; - size_ = request_size; -} - - -VirtualMemory::~VirtualMemory() { - if (IsReserved()) { - bool result = ReleaseRegion(address(), size()); - ASSERT(result); - USE(result); - } -} - - -bool VirtualMemory::IsReserved() { - return address_ != NULL; -} - - -void VirtualMemory::Reset() { - address_ = NULL; - size_ = 0; -} - - -bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { - return CommitRegion(address, size, is_executable); -} - - -bool VirtualMemory::Uncommit(void* address, size_t size) { - ASSERT(IsReserved()); - return UncommitRegion(address, size); -} - - -bool VirtualMemory::Guard(void* address) { - if (NULL == VirtualAlloc(address, - OS::CommitPageSize(), - MEM_COMMIT, - PAGE_NOACCESS)) { - return false; - } - return true; -} - - -void* VirtualMemory::ReserveRegion(size_t size) { - return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); -} - - -bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { - int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; - if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { - return false; - } - return true; -} - - -bool VirtualMemory::UncommitRegion(void* base, size_t size) { - return VirtualFree(base, size, MEM_DECOMMIT) != 0; -} - - -bool VirtualMemory::ReleaseRegion(void* base, size_t size) { - return VirtualFree(base, 0, MEM_RELEASE) != 0; -} - - -bool VirtualMemory::HasLazyCommits() { - // TODO(alph): implement for the platform. - return false; -} - - -// ---------------------------------------------------------------------------- -// Win32 thread support. - -// Definition of invalid thread handle and id. -static const HANDLE kNoThread = INVALID_HANDLE_VALUE; - -// Entry point for threads. The supplied argument is a pointer to the thread -// object. The entry function dispatches to the run method in the thread -// object. It is important that this function has __stdcall calling -// convention. -static unsigned int __stdcall ThreadEntry(void* arg) { - Thread* thread = reinterpret_cast<Thread*>(arg); - thread->NotifyStartedAndRun(); - return 0; -} - - -class Thread::PlatformData : public Malloced { - public: - explicit PlatformData(HANDLE thread) : thread_(thread) {} - HANDLE thread_; - unsigned thread_id_; -}; - - -// Initialize a Win32 thread object. The thread has an invalid thread -// handle until it is started. - -Thread::Thread(const Options& options) - : stack_size_(options.stack_size()), - start_semaphore_(NULL) { - data_ = new PlatformData(kNoThread); - set_name(options.name()); -} - - -void Thread::set_name(const char* name) { - OS::StrNCpy(Vector<char>(name_, sizeof(name_)), name, strlen(name)); - name_[sizeof(name_) - 1] = '\0'; -} - - -// Close our own handle for the thread. -Thread::~Thread() { - if (data_->thread_ != kNoThread) CloseHandle(data_->thread_); - delete data_; -} - - -// Create a new thread. It is important to use _beginthreadex() instead of -// the Win32 function CreateThread(), because the CreateThread() does not -// initialize thread specific structures in the C runtime library. -void Thread::Start() { - data_->thread_ = reinterpret_cast<HANDLE>( - _beginthreadex(NULL, - static_cast<unsigned>(stack_size_), - ThreadEntry, - this, - 0, - &data_->thread_id_)); -} - - -// Wait for thread to terminate. -void Thread::Join() { - if (data_->thread_id_ != GetCurrentThreadId()) { - WaitForSingleObject(data_->thread_, INFINITE); - } -} - - -Thread::LocalStorageKey Thread::CreateThreadLocalKey() { - DWORD result = TlsAlloc(); - ASSERT(result != TLS_OUT_OF_INDEXES); - return static_cast<LocalStorageKey>(result); -} - - -void Thread::DeleteThreadLocalKey(LocalStorageKey key) { - BOOL result = TlsFree(static_cast<DWORD>(key)); - USE(result); - ASSERT(result); -} - - -void* Thread::GetThreadLocal(LocalStorageKey key) { - return TlsGetValue(static_cast<DWORD>(key)); -} - - -void Thread::SetThreadLocal(LocalStorageKey key, void* value) { - BOOL result = TlsSetValue(static_cast<DWORD>(key), value); - USE(result); - ASSERT(result); -} - - - -void Thread::YieldCPU() { - Sleep(0); -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/preparse-data.cc nodejs-0.11.15/deps/v8/src/preparse-data.cc --- nodejs-0.11.13/deps/v8/src/preparse-data.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/preparse-data.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,97 +1,41 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "../include/v8stdint.h" - -#include "preparse-data-format.h" -#include "preparse-data.h" - -#include "checks.h" -#include "globals.h" -#include "hashmap.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "include/v8stdint.h" +#include "src/base/logging.h" +#include "src/compiler.h" +#include "src/globals.h" +#include "src/hashmap.h" +#include "src/preparse-data.h" +#include "src/preparse-data-format.h" namespace v8 { namespace internal { -template <typename Char> -static int vector_hash(Vector<const Char> string) { - int hash = 0; - for (int i = 0; i < string.length(); i++) { - int c = static_cast<int>(string[i]); - hash += c; - hash += (hash << 10); - hash ^= (hash >> 6); - } - return hash; -} - - -static bool vector_compare(void* a, void* b) { - CompleteParserRecorder::Key* string1 = - reinterpret_cast<CompleteParserRecorder::Key*>(a); - CompleteParserRecorder::Key* string2 = - reinterpret_cast<CompleteParserRecorder::Key*>(b); - if (string1->is_one_byte != string2->is_one_byte) return false; - int length = string1->literal_bytes.length(); - if (string2->literal_bytes.length() != length) return false; - return memcmp(string1->literal_bytes.start(), - string2->literal_bytes.start(), length) == 0; -} - - CompleteParserRecorder::CompleteParserRecorder() - : function_store_(0), - literal_chars_(0), - symbol_store_(0), - symbol_keys_(0), - string_table_(vector_compare), - symbol_id_(0) { + : function_store_(0) { preamble_[PreparseDataConstants::kMagicOffset] = PreparseDataConstants::kMagicNumber; preamble_[PreparseDataConstants::kVersionOffset] = PreparseDataConstants::kCurrentVersion; preamble_[PreparseDataConstants::kHasErrorOffset] = false; preamble_[PreparseDataConstants::kFunctionsSizeOffset] = 0; - preamble_[PreparseDataConstants::kSymbolCountOffset] = 0; preamble_[PreparseDataConstants::kSizeOffset] = 0; - ASSERT_EQ(6, PreparseDataConstants::kHeaderSize); + DCHECK_EQ(5, PreparseDataConstants::kHeaderSize); #ifdef DEBUG prev_start_ = -1; #endif - should_log_symbols_ = true; } void CompleteParserRecorder::LogMessage(int start_pos, - int end_pos, - const char* message, - const char* arg_opt) { - if (has_error()) return; + int end_pos, + const char* message, + const char* arg_opt, + bool is_reference_error) { + if (HasError()) return; preamble_[PreparseDataConstants::kHasErrorOffset] = true; function_store_.Reset(); STATIC_ASSERT(PreparseDataConstants::kMessageStartPos == 0); @@ -100,10 +44,11 @@ function_store_.Add(end_pos); STATIC_ASSERT(PreparseDataConstants::kMessageArgCountPos == 2); function_store_.Add((arg_opt == NULL) ? 0 : 1); - STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 3); + STATIC_ASSERT(PreparseDataConstants::kIsReferenceErrorPos == 3); + function_store_.Add(is_reference_error ? 1 : 0); + STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 4); WriteString(CStrVector(message)); if (arg_opt != NULL) WriteString(CStrVector(arg_opt)); - should_log_symbols_ = false; } @@ -115,91 +60,21 @@ } -void CompleteParserRecorder::LogOneByteSymbol(int start, - Vector<const uint8_t> literal) { - ASSERT(should_log_symbols_); - int hash = vector_hash(literal); - LogSymbol(start, hash, true, literal); -} - - -void CompleteParserRecorder::LogTwoByteSymbol(int start, - Vector<const uint16_t> literal) { - ASSERT(should_log_symbols_); - int hash = vector_hash(literal); - LogSymbol(start, hash, false, Vector<const byte>::cast(literal)); -} - - -void CompleteParserRecorder::LogSymbol(int start, - int hash, - bool is_one_byte, - Vector<const byte> literal_bytes) { - Key key = { is_one_byte, literal_bytes }; - HashMap::Entry* entry = string_table_.Lookup(&key, hash, true); - int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); - if (id == 0) { - // Copy literal contents for later comparison. - key.literal_bytes = - Vector<const byte>::cast(literal_chars_.AddBlock(literal_bytes)); - // Put (symbol_id_ + 1) into entry and increment it. - id = ++symbol_id_; - entry->value = reinterpret_cast<void*>(id); - Vector<Key> symbol = symbol_keys_.AddBlock(1, key); - entry->key = &symbol[0]; - } - WriteNumber(id - 1); -} - - -Vector<unsigned> CompleteParserRecorder::ExtractData() { +ScriptData* CompleteParserRecorder::GetScriptData() { int function_size = function_store_.size(); - // Add terminator to symbols, then pad to unsigned size. - int symbol_size = symbol_store_.size(); - int padding = sizeof(unsigned) - (symbol_size % sizeof(unsigned)); - symbol_store_.AddBlock(padding, PreparseDataConstants::kNumberTerminator); - symbol_size += padding; - int total_size = PreparseDataConstants::kHeaderSize + function_size - + (symbol_size / sizeof(unsigned)); - Vector<unsigned> data = Vector<unsigned>::New(total_size); + int total_size = PreparseDataConstants::kHeaderSize + function_size; + unsigned* data = NewArray<unsigned>(total_size); preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size; - preamble_[PreparseDataConstants::kSymbolCountOffset] = symbol_id_; - OS::MemCopy(data.start(), preamble_, sizeof(preamble_)); - int symbol_start = PreparseDataConstants::kHeaderSize + function_size; + MemCopy(data, preamble_, sizeof(preamble_)); if (function_size > 0) { - function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize, - symbol_start)); - } - if (!has_error()) { - symbol_store_.WriteTo( - Vector<byte>::cast(data.SubVector(symbol_start, total_size))); - } - return data; -} - - -void CompleteParserRecorder::WriteNumber(int number) { - // Split the number into chunks of 7 bits. Write them one after another (the - // most significant first). Use the MSB of each byte for signalling that the - // number continues. See ScriptDataImpl::ReadNumber for the reading side. - ASSERT(number >= 0); - - int mask = (1 << 28) - 1; - int i = 28; - // 26 million symbols ought to be enough for anybody. - ASSERT(number <= mask); - while (number < mask) { - mask >>= 7; - i -= 7; - } - while (i > 0) { - symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u); - number &= mask; - mask >>= 7; - i -= 7; + function_store_.WriteTo(Vector<unsigned>( + data + PreparseDataConstants::kHeaderSize, function_size)); } - ASSERT(number < (1 << 7)); - symbol_store_.Add(static_cast<byte>(number)); + DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)); + ScriptData* result = new ScriptData(reinterpret_cast<byte*>(data), + total_size * sizeof(unsigned)); + result->AcquireDataOwnership(); + return result; } diff -Nru nodejs-0.11.13/deps/v8/src/preparse-data-format.h nodejs-0.11.15/deps/v8/src/preparse-data-format.h --- nodejs-0.11.13/deps/v8/src/preparse-data-format.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/preparse-data-format.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_PREPARSE_DATA_FORMAT_H_ #define V8_PREPARSE_DATA_FORMAT_H_ @@ -37,21 +14,21 @@ public: // Layout and constants of the preparse data exchange format. static const unsigned kMagicNumber = 0xBadDead; - static const unsigned kCurrentVersion = 8; + static const unsigned kCurrentVersion = 9; static const int kMagicOffset = 0; static const int kVersionOffset = 1; static const int kHasErrorOffset = 2; static const int kFunctionsSizeOffset = 3; - static const int kSymbolCountOffset = 4; - static const int kSizeOffset = 5; - static const int kHeaderSize = 6; + static const int kSizeOffset = 4; + static const int kHeaderSize = 5; // If encoding a message, the following positions are fixed. static const int kMessageStartPos = 0; static const int kMessageEndPos = 1; static const int kMessageArgCountPos = 2; - static const int kMessageTextPos = 3; + static const int kIsReferenceErrorPos = 3; + static const int kMessageTextPos = 4; static const unsigned char kNumberTerminator = 0x80u; }; diff -Nru nodejs-0.11.13/deps/v8/src/preparse-data.h nodejs-0.11.15/deps/v8/src/preparse-data.h --- nodejs-0.11.13/deps/v8/src/preparse-data.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/preparse-data.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,45 +1,25 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_PREPARSE_DATA_H_ #define V8_PREPARSE_DATA_H_ -#include "allocation.h" -#include "hashmap.h" -#include "utils-inl.h" +#include "src/allocation.h" +#include "src/hashmap.h" +#include "src/preparse-data-format.h" +#include "src/utils-inl.h" namespace v8 { namespace internal { +class ScriptData; + // Abstract interface for preparse data recorder. class ParserRecorder { public: - ParserRecorder() : should_log_symbols_(false) { } + ParserRecorder() { } virtual ~ParserRecorder() { } // Logs the scope and some details of a function literal in the source. @@ -55,24 +35,8 @@ virtual void LogMessage(int start, int end, const char* message, - const char* argument_opt) = 0; - - // Logs a symbol creation of a literal or identifier. - bool ShouldLogSymbols() { return should_log_symbols_; } - // The following functions are only callable on CompleteParserRecorder - // and are guarded by calls to ShouldLogSymbols. - virtual void LogOneByteSymbol(int start, Vector<const uint8_t> literal) { - UNREACHABLE(); - } - virtual void LogTwoByteSymbol(int start, Vector<const uint16_t> literal) { - UNREACHABLE(); - } - virtual void PauseRecording() { UNREACHABLE(); } - virtual void ResumeRecording() { UNREACHABLE(); } - - protected: - bool should_log_symbols_; - + const char* argument_opt, + bool is_reference_error) = 0; private: DISALLOW_COPY_AND_ASSIGN(ParserRecorder); }; @@ -80,8 +44,9 @@ class SingletonLogger : public ParserRecorder { public: - SingletonLogger() : has_error_(false), start_(-1), end_(-1) { } - virtual ~SingletonLogger() { } + SingletonLogger() + : has_error_(false), start_(-1), end_(-1), is_reference_error_(false) {} + virtual ~SingletonLogger() {} void Reset() { has_error_ = false; } @@ -90,13 +55,13 @@ int literals, int properties, StrictMode strict_mode) { - ASSERT(!has_error_); + DCHECK(!has_error_); start_ = start; end_ = end; literals_ = literals; properties_ = properties; strict_mode_ = strict_mode; - }; + } // Logs an error message and marks the log as containing an error. // Further logging will be ignored, and ExtractData will return a vector @@ -104,37 +69,40 @@ virtual void LogMessage(int start, int end, const char* message, - const char* argument_opt) { + const char* argument_opt, + bool is_reference_error) { if (has_error_) return; has_error_ = true; start_ = start; end_ = end; message_ = message; argument_opt_ = argument_opt; + is_reference_error_ = is_reference_error; } - bool has_error() { return has_error_; } + bool has_error() const { return has_error_; } - int start() { return start_; } - int end() { return end_; } - int literals() { - ASSERT(!has_error_); + int start() const { return start_; } + int end() const { return end_; } + int literals() const { + DCHECK(!has_error_); return literals_; } - int properties() { - ASSERT(!has_error_); + int properties() const { + DCHECK(!has_error_); return properties_; } - StrictMode strict_mode() { - ASSERT(!has_error_); + StrictMode strict_mode() const { + DCHECK(!has_error_); return strict_mode_; } + int is_reference_error() const { return is_reference_error_; } const char* message() { - ASSERT(has_error_); + DCHECK(has_error_); return message_; } - const char* argument_opt() { - ASSERT(has_error_); + const char* argument_opt() const { + DCHECK(has_error_); return argument_opt_; } @@ -149,6 +117,7 @@ // For error messages. const char* message_; const char* argument_opt_; + bool is_reference_error_; }; @@ -180,37 +149,21 @@ virtual void LogMessage(int start, int end, const char* message, - const char* argument_opt); + const char* argument_opt, + bool is_reference_error_); + ScriptData* GetScriptData(); - virtual void PauseRecording() { - ASSERT(should_log_symbols_); - should_log_symbols_ = false; + bool HasError() { + return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]); } - - virtual void ResumeRecording() { - ASSERT(!should_log_symbols_); - should_log_symbols_ = !has_error(); + Vector<unsigned> ErrorMessageData() { + DCHECK(HasError()); + return function_store_.ToVector(); } - virtual void LogOneByteSymbol(int start, Vector<const uint8_t> literal); - virtual void LogTwoByteSymbol(int start, Vector<const uint16_t> literal); - Vector<unsigned> ExtractData(); - private: - bool has_error() { - return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]); - } - void WriteString(Vector<const char> str); - // For testing. Defined in test-parsing.cc. - friend struct CompleteParserRecorderFriend; - - void LogSymbol(int start, - int hash, - bool is_one_byte, - Vector<const byte> literal); - // Write a non-negative number to the symbol store. void WriteNumber(int number); @@ -220,12 +173,6 @@ #ifdef DEBUG int prev_start_; #endif - - Collector<byte> literal_chars_; - Collector<byte> symbol_store_; - Collector<Key> symbol_keys_; - HashMap string_table_; - int symbol_id_; }; diff -Nru nodejs-0.11.13/deps/v8/src/preparser.cc nodejs-0.11.15/deps/v8/src/preparser.cc --- nodejs-0.11.13/deps/v8/src/preparser.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/preparser.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,23 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <cmath> -#include "../include/v8stdint.h" +#include "include/v8stdint.h" -#include "allocation.h" -#include "checks.h" -#include "conversions.h" -#include "conversions-inl.h" -#include "globals.h" -#include "hashmap.h" -#include "list.h" -#include "preparse-data-format.h" -#include "preparse-data.h" -#include "preparser.h" -#include "unicode.h" -#include "utils.h" +#include "src/allocation.h" +#include "src/base/logging.h" +#include "src/conversions-inl.h" +#include "src/conversions.h" +#include "src/globals.h" +#include "src/hashmap.h" +#include "src/list.h" +#include "src/preparse-data.h" +#include "src/preparse-data-format.h" +#include "src/preparser.h" +#include "src/unicode.h" +#include "src/utils.h" #if V8_LIBC_MSVCRT && (_MSC_VER < 1800) namespace std { @@ -56,54 +33,36 @@ namespace internal { -void PreParserTraits::CheckStrictModeLValue(PreParserExpression expression, - bool* ok) { - if (expression.IsIdentifier() && - expression.AsIdentifier().IsEvalOrArguments()) { - pre_parser_->ReportMessage("strict_eval_arguments", - Vector<const char*>::empty()); - *ok = false; - } -} - - void PreParserTraits::ReportMessageAt(Scanner::Location location, const char* message, - Vector<const char*> args, + const char* arg, bool is_reference_error) { ReportMessageAt(location.beg_pos, location.end_pos, message, - args.length() > 0 ? args[0] : NULL, + arg, is_reference_error); } -void PreParserTraits::ReportMessageAt(Scanner::Location location, - const char* type, - const char* name_opt, - bool is_reference_error) { - pre_parser_->log_ - ->LogMessage(location.beg_pos, location.end_pos, type, name_opt); -} - - void PreParserTraits::ReportMessageAt(int start_pos, int end_pos, - const char* type, - const char* name_opt, + const char* message, + const char* arg, bool is_reference_error) { - pre_parser_->log_->LogMessage(start_pos, end_pos, type, name_opt); + pre_parser_->log_->LogMessage(start_pos, end_pos, message, arg, + is_reference_error); } PreParserIdentifier PreParserTraits::GetSymbol(Scanner* scanner) { - pre_parser_->LogSymbol(); if (scanner->current_token() == Token::FUTURE_RESERVED_WORD) { return PreParserIdentifier::FutureReserved(); } else if (scanner->current_token() == Token::FUTURE_STRICT_RESERVED_WORD) { return PreParserIdentifier::FutureStrictReserved(); + } else if (scanner->current_token() == Token::LET) { + return PreParserIdentifier::Let(); } else if (scanner->current_token() == Token::YIELD) { return PreParserIdentifier::Yield(); } @@ -119,7 +78,6 @@ PreParserExpression PreParserTraits::ExpressionFromString( int pos, Scanner* scanner, PreParserFactory* factory) { - pre_parser_->LogSymbol(); if (scanner->UnescapedLiteralMatches("use strict", 10)) { return PreParserExpression::UseStrictStringLiteral(); } @@ -139,10 +97,11 @@ bool is_generator, int function_token_position, FunctionLiteral::FunctionType type, + FunctionLiteral::ArityRestriction arity_restriction, bool* ok) { return pre_parser_->ParseFunctionLiteral( name, function_name_location, name_is_strict_reserved, is_generator, - function_token_position, type, ok); + function_token_position, type, arity_restriction, ok); } @@ -151,12 +110,14 @@ log_ = log; // Lazy functions always have trivial outer scopes (no with/catch scopes). PreParserScope top_scope(scope_, GLOBAL_SCOPE); - FunctionState top_state(&function_state_, &scope_, &top_scope); + FunctionState top_state(&function_state_, &scope_, &top_scope, NULL, + this->ast_value_factory()); scope_->SetStrictMode(strict_mode); PreParserScope function_scope(scope_, FUNCTION_SCOPE); - FunctionState function_state(&function_state_, &scope_, &function_scope); + FunctionState function_state(&function_state_, &scope_, &function_scope, NULL, + this->ast_value_factory()); function_state.set_is_generator(is_generator); - ASSERT_EQ(Token::LBRACE, scanner()->current_token()); + DCHECK_EQ(Token::LBRACE, scanner()->current_token()); bool ok = true; int start_position = peek_position(); ParseLazyFunctionLiteralBody(&ok); @@ -164,7 +125,7 @@ if (!ok) { ReportUnexpectedToken(scanner()->current_token()); } else { - ASSERT_EQ(Token::RBRACE, scanner()->peek()); + DCHECK_EQ(Token::RBRACE, scanner()->peek()); if (scope_->strict_mode() == STRICT) { int end_pos = scanner()->location().end_pos; CheckOctalLiteral(start_position, end_pos, &ok); @@ -210,9 +171,14 @@ switch (peek()) { case Token::FUNCTION: return ParseFunctionDeclaration(ok); - case Token::LET: case Token::CONST: return ParseVariableStatement(kSourceElement, ok); + case Token::LET: + DCHECK(allow_harmony_scoping()); + if (strict_mode() == STRICT) { + return ParseVariableStatement(kSourceElement, ok); + } + // Fall through. default: return ParseStatement(ok); } @@ -280,11 +246,6 @@ case Token::LBRACE: return ParseBlock(ok); - case Token::CONST: - case Token::LET: - case Token::VAR: - return ParseVariableStatement(kStatement, ok); - case Token::SEMICOLON: Next(); return Statement::Default(); @@ -329,8 +290,7 @@ if (strict_mode() == STRICT) { PreParserTraits::ReportMessageAt(start_location.beg_pos, end_location.end_pos, - "strict_function", - NULL); + "strict_function"); *ok = false; return Statement::Default(); } else { @@ -341,6 +301,16 @@ case Token::DEBUGGER: return ParseDebuggerStatement(ok); + case Token::VAR: + case Token::CONST: + return ParseVariableStatement(kStatement, ok); + + case Token::LET: + DCHECK(allow_harmony_scoping()); + if (strict_mode() == STRICT) { + return ParseVariableStatement(kStatement, ok); + } + // Fall through. default: return ParseExpressionOrLabelledStatement(ok); } @@ -365,6 +335,7 @@ is_generator, pos, FunctionLiteral::DECLARATION, + FunctionLiteral::NORMAL_ARITY, CHECK_OK); return Statement::FunctionDeclaration(); } @@ -458,23 +429,9 @@ return Statement::Default(); } } - } else if (peek() == Token::LET) { - // ES6 Draft Rev4 section 12.2.1: - // - // LetDeclaration : let LetBindingList ; - // - // * It is a Syntax Error if the code that matches this production is not - // contained in extended code. - // - // TODO(rossberg): make 'let' a legal identifier in sloppy mode. - if (!allow_harmony_scoping() || strict_mode() == SLOPPY) { - ReportMessageAt(scanner()->peek_location(), "illegal_let"); - *ok = false; - return Statement::Default(); - } + } else if (peek() == Token::LET && strict_mode() == STRICT) { Consume(Token::LET); - if (var_context != kSourceElement && - var_context != kForStatement) { + if (var_context != kSourceElement && var_context != kForStatement) { ReportMessageAt(scanner()->peek_location(), "unprotected_let"); *ok = false; return Statement::Default(); @@ -519,8 +476,8 @@ if (starts_with_identifier && expr.IsIdentifier() && peek() == Token::COLON) { // Expression is a single identifier, and not, e.g., a parenthesized // identifier. - ASSERT(!expr.AsIdentifier().IsFutureReserved()); - ASSERT(strict_mode() == SLOPPY || + DCHECK(!expr.AsIdentifier().IsFutureReserved()); + DCHECK(strict_mode() == SLOPPY || (!expr.AsIdentifier().IsFutureStrictReserved() && !expr.AsIdentifier().IsYield())); Consume(Token::COLON); @@ -592,7 +549,7 @@ // ReturnStatement :: // 'return' [no line terminator] Expression? ';' - // Consume the return token. It is necessary to do the before + // Consume the return token. It is necessary to do before // reporting any errors on it, because of the way errors are // reported (underlining). Expect(Token::RETURN, CHECK_OK); @@ -696,8 +653,7 @@ bool PreParser::CheckInOrOf(bool accept_OF) { if (Check(Token::IN) || - (allow_for_of() && accept_OF && - CheckContextualKeyword(CStrVector("of")))) { + (accept_OF && CheckContextualKeyword(CStrVector("of")))) { return true; } return false; @@ -712,7 +668,7 @@ Expect(Token::LPAREN, CHECK_OK); if (peek() != Token::SEMICOLON) { if (peek() == Token::VAR || peek() == Token::CONST || - peek() == Token::LET) { + (peek() == Token::LET && strict_mode() == STRICT)) { bool is_let = peek() == Token::LET; int decl_count; VariableDeclarationProperties decl_props = kHasNoInitializers; @@ -844,6 +800,7 @@ bool is_generator, int function_token_pos, FunctionLiteral::FunctionType function_type, + FunctionLiteral::ArityRestriction arity_restriction, bool* ok) { // Function :: // '(' FormalParameterList? ')' '{' FunctionBody '}' @@ -851,13 +808,13 @@ // Parse function body. ScopeType outer_scope_type = scope_->type(); PreParserScope function_scope(scope_, FUNCTION_SCOPE); - FunctionState function_state(&function_state_, &scope_, &function_scope); + FunctionState function_state(&function_state_, &scope_, &function_scope, NULL, + this->ast_value_factory()); function_state.set_is_generator(is_generator); // FormalParameterList :: // '(' (Identifier)*[','] ')' Expect(Token::LPAREN, CHECK_OK); int start_position = position(); - bool done = (peek() == Token::RPAREN); DuplicateFinder duplicate_finder(scanner()->unicode_cache()); // We don't yet know if the function will be strict, so we cannot yet produce // errors for parameter names or duplicates. However, we remember the @@ -865,6 +822,10 @@ Scanner::Location eval_args_error_loc = Scanner::Location::invalid(); Scanner::Location dupe_error_loc = Scanner::Location::invalid(); Scanner::Location reserved_error_loc = Scanner::Location::invalid(); + + bool done = arity_restriction == FunctionLiteral::GETTER_ARITY || + (peek() == Token::RPAREN && + arity_restriction != FunctionLiteral::SETTER_ARITY); while (!done) { bool is_strict_reserved = false; Identifier param_name = @@ -882,10 +843,9 @@ dupe_error_loc = scanner()->location(); } + if (arity_restriction == FunctionLiteral::SETTER_ARITY) break; done = (peek() == Token::RPAREN); - if (!done) { - Expect(Token::COMMA, CHECK_OK); - } + if (!done) Expect(Token::COMMA, CHECK_OK); } Expect(Token::RPAREN, CHECK_OK); @@ -942,14 +902,11 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok) { int body_start = position(); - bool is_logging = log_->ShouldLogSymbols(); - if (is_logging) log_->PauseRecording(); ParseSourceElements(Token::RBRACE, ok); - if (is_logging) log_->ResumeRecording(); if (!*ok) return; // Position right after terminal '}'. - ASSERT_EQ(Token::RBRACE, scanner()->peek()); + DCHECK_EQ(Token::RBRACE, scanner()->peek()); int body_end = scanner()->peek_location().end_pos; log_->LogFunction(body_start, body_end, function_state_->materialized_literal_count(), @@ -976,11 +933,4 @@ #undef CHECK_OK -void PreParser::LogSymbol() { - if (log_->ShouldLogSymbols()) { - scanner()->LogSymbol(log_, position()); - } -} - - } } // v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/preparser.h nodejs-0.11.15/deps/v8/src/preparser.h --- nodejs-0.11.13/deps/v8/src/preparser.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/preparser.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,17 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_PREPARSER_H #define V8_PREPARSER_H -#include "func-name-inferrer.h" -#include "hashmap.h" -#include "scopes.h" -#include "token.h" -#include "scanner.h" -#include "v8.h" +#include "src/v8.h" + +#include "src/func-name-inferrer.h" +#include "src/hashmap.h" +#include "src/scanner.h" +#include "src/scopes.h" +#include "src/token.h" namespace v8 { namespace internal { @@ -84,11 +62,10 @@ // Shorten type names defined by Traits. typedef typename Traits::Type::Expression ExpressionT; typedef typename Traits::Type::Identifier IdentifierT; + typedef typename Traits::Type::FunctionLiteral FunctionLiteralT; - ParserBase(Scanner* scanner, uintptr_t stack_limit, - v8::Extension* extension, - ParserRecorder* log, - typename Traits::Type::Zone* zone, + ParserBase(Scanner* scanner, uintptr_t stack_limit, v8::Extension* extension, + ParserRecorder* log, typename Traits::Type::Zone* zone, typename Traits::Type::Parser this_object) : Traits(this_object), parenthesized_function_(false), @@ -104,15 +81,15 @@ allow_lazy_(false), allow_natives_syntax_(false), allow_generators_(false), - allow_for_of_(false), - zone_(zone) { } + allow_arrow_functions_(false), + zone_(zone) {} // Getters that indicate whether certain syntactical constructs are // allowed to be parsed by this instance of the parser. bool allow_lazy() const { return allow_lazy_; } bool allow_natives_syntax() const { return allow_natives_syntax_; } bool allow_generators() const { return allow_generators_; } - bool allow_for_of() const { return allow_for_of_; } + bool allow_arrow_functions() const { return allow_arrow_functions_; } bool allow_modules() const { return scanner()->HarmonyModules(); } bool allow_harmony_scoping() const { return scanner()->HarmonyScoping(); } bool allow_harmony_numeric_literals() const { @@ -124,7 +101,7 @@ void set_allow_lazy(bool allow) { allow_lazy_ = allow; } void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; } void set_allow_generators(bool allow) { allow_generators_ = allow; } - void set_allow_for_of(bool allow) { allow_for_of_ = allow; } + void set_allow_arrow_functions(bool allow) { allow_arrow_functions_ = allow; } void set_allow_modules(bool allow) { scanner()->SetHarmonyModules(allow); } void set_allow_harmony_scoping(bool allow) { scanner()->SetHarmonyScoping(allow); @@ -134,6 +111,8 @@ } protected: + friend class Traits::Type::Checkpoint; + enum AllowEvalOrArgumentsAsIdentifier { kAllowEvalOrArguments, kDontAllowEvalOrArguments @@ -144,6 +123,8 @@ PARSE_EAGERLY }; + class ParserCheckpoint; + // --------------------------------------------------------------------------- // FunctionState and BlockState together implement the parser's scope stack. // The parser's current scope is in scope_. BlockState and FunctionState @@ -172,7 +153,13 @@ FunctionState** function_state_stack, typename Traits::Type::Scope** scope_stack, typename Traits::Type::Scope* scope, - typename Traits::Type::Zone* zone = NULL); + typename Traits::Type::Zone* zone = NULL, + AstValueFactory* ast_value_factory = NULL); + FunctionState(FunctionState** function_state_stack, + typename Traits::Type::Scope** scope_stack, + typename Traits::Type::Scope** scope, + typename Traits::Type::Zone* zone = NULL, + AstValueFactory* ast_value_factory = NULL); ~FunctionState(); int NextMaterializedLiteralIndex() { @@ -193,8 +180,8 @@ void set_generator_object_variable( typename Traits::Type::GeneratorVariable* variable) { - ASSERT(variable != NULL); - ASSERT(!is_generator()); + DCHECK(variable != NULL); + DCHECK(!is_generator()); generator_object_variable_ = variable; is_generator_ = true; } @@ -228,11 +215,43 @@ FunctionState* outer_function_state_; typename Traits::Type::Scope** scope_stack_; typename Traits::Type::Scope* outer_scope_; - Isolate* isolate_; // Only used by ParserTraits. int saved_ast_node_id_; // Only used by ParserTraits. + typename Traits::Type::Zone* extra_param_; typename Traits::Type::Factory factory_; friend class ParserTraits; + friend class ParserCheckpoint; + }; + + // Annoyingly, arrow functions first parse as comma expressions, then when we + // see the => we have to go back and reinterpret the arguments as being formal + // parameters. To do so we need to reset some of the parser state back to + // what it was before the arguments were first seen. + class ParserCheckpoint : public Traits::Type::Checkpoint { + public: + template <typename Parser> + explicit ParserCheckpoint(Parser* parser) + : Traits::Type::Checkpoint(parser) { + function_state_ = parser->function_state_; + next_materialized_literal_index_ = + function_state_->next_materialized_literal_index_; + next_handler_index_ = function_state_->next_handler_index_; + expected_property_count_ = function_state_->expected_property_count_; + } + + void Restore() { + Traits::Type::Checkpoint::Restore(); + function_state_->next_materialized_literal_index_ = + next_materialized_literal_index_; + function_state_->next_handler_index_ = next_handler_index_; + function_state_->expected_property_count_ = expected_property_count_; + } + + private: + FunctionState* function_state_; + int next_materialized_literal_index_; + int next_handler_index_; + int expected_property_count_; }; class ParsingModeScope BASE_EMBEDDED { @@ -267,8 +286,7 @@ INLINE(Token::Value Next()) { if (stack_overflow_) return Token::ILLEGAL; { - int marker; - if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) { + if (GetCurrentStackPosition() < stack_limit_) { // Any further calls to Next or peek will return the illegal token. // The current call must return the next token, which might already // have been peek'ed. @@ -282,7 +300,7 @@ Token::Value next = Next(); USE(next); USE(token); - ASSERT(next == token); + DCHECK(next == token); } bool Check(Token::Value token) { @@ -323,6 +341,7 @@ return next == Token::IDENTIFIER || next == Token::FUTURE_RESERVED_WORD || next == Token::FUTURE_STRICT_RESERVED_WORD || + next == Token::LET || next == Token::YIELD; } @@ -356,6 +375,44 @@ } } + // Validates strict mode for function parameter lists. This has to be + // done after parsing the function, since the function can declare + // itself strict. + void CheckStrictFunctionNameAndParameters( + IdentifierT function_name, + bool function_name_is_strict_reserved, + const Scanner::Location& function_name_loc, + const Scanner::Location& eval_args_error_loc, + const Scanner::Location& dupe_error_loc, + const Scanner::Location& reserved_loc, + bool* ok) { + if (this->IsEvalOrArguments(function_name)) { + Traits::ReportMessageAt(function_name_loc, "strict_eval_arguments"); + *ok = false; + return; + } + if (function_name_is_strict_reserved) { + Traits::ReportMessageAt(function_name_loc, "unexpected_strict_reserved"); + *ok = false; + return; + } + if (eval_args_error_loc.IsValid()) { + Traits::ReportMessageAt(eval_args_error_loc, "strict_eval_arguments"); + *ok = false; + return; + } + if (dupe_error_loc.IsValid()) { + Traits::ReportMessageAt(dupe_error_loc, "strict_param_dupe"); + *ok = false; + return; + } + if (reserved_loc.IsValid()) { + Traits::ReportMessageAt(reserved_loc, "unexpected_strict_reserved"); + *ok = false; + return; + } + } + // Determine precedence of given token. static int Precedence(Token::Value token, bool accept_IN) { if (token == Token::IN && !accept_IN) @@ -371,15 +428,16 @@ bool is_generator() const { return function_state_->is_generator(); } // Report syntax errors. - void ReportMessage(const char* message, Vector<const char*> args, + void ReportMessage(const char* message, const char* arg = NULL, bool is_reference_error = false) { Scanner::Location source_location = scanner()->location(); - Traits::ReportMessageAt(source_location, message, args, is_reference_error); + Traits::ReportMessageAt(source_location, message, arg, is_reference_error); } void ReportMessageAt(Scanner::Location location, const char* message, bool is_reference_error = false) { - Traits::ReportMessageAt(location, message, Vector<const char*>::empty(), + Traits::ReportMessageAt(location, message, + reinterpret_cast<const char*>(NULL), is_reference_error); } @@ -424,6 +482,15 @@ ExpressionT ParseMemberExpression(bool* ok); ExpressionT ParseMemberExpressionContinuation(ExpressionT expression, bool* ok); + ExpressionT ParseArrowFunctionLiteral(int start_pos, ExpressionT params_ast, + bool* ok); + + // Checks if the expression is a valid reference expression (e.g., on the + // left-hand side of assignments). Although ruled out by ECMA as early errors, + // we allow calls for web compatibility and rewrite them to a runtime throw. + ExpressionT CheckAndRewriteReferenceExpression( + ExpressionT expression, + Scanner::Location location, const char* message, bool* ok); // Used to detect duplicates in object literals. Each of the values // kGetterProperty, kSetterProperty and kValueProperty represents @@ -500,7 +567,7 @@ bool allow_lazy_; bool allow_natives_syntax_; bool allow_generators_; - bool allow_for_of_; + bool allow_arrow_functions_; typename Traits::Type::Zone* zone_; // Only used by Parser. }; @@ -524,24 +591,36 @@ static PreParserIdentifier FutureStrictReserved() { return PreParserIdentifier(kFutureStrictReservedIdentifier); } + static PreParserIdentifier Let() { + return PreParserIdentifier(kLetIdentifier); + } static PreParserIdentifier Yield() { return PreParserIdentifier(kYieldIdentifier); } - bool IsEval() { return type_ == kEvalIdentifier; } - bool IsArguments() { return type_ == kArgumentsIdentifier; } - bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; } - bool IsYield() { return type_ == kYieldIdentifier; } - bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; } - bool IsFutureStrictReserved() { + bool IsEval() const { return type_ == kEvalIdentifier; } + bool IsArguments() const { return type_ == kArgumentsIdentifier; } + bool IsEvalOrArguments() const { return type_ >= kEvalIdentifier; } + bool IsYield() const { return type_ == kYieldIdentifier; } + bool IsFutureReserved() const { return type_ == kFutureReservedIdentifier; } + bool IsFutureStrictReserved() const { return type_ == kFutureStrictReservedIdentifier; } - bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; } + bool IsValidStrictVariable() const { return type_ == kUnknownIdentifier; } + + // Allow identifier->name()[->length()] to work. The preparser + // does not need the actual positions/lengths of the identifiers. + const PreParserIdentifier* operator->() const { return this; } + const PreParserIdentifier raw_name() const { return *this; } + + int position() const { return 0; } + int length() const { return 0; } private: enum Type { kUnknownIdentifier, kFutureReservedIdentifier, kFutureStrictReservedIdentifier, + kLetIdentifier, kYieldIdentifier, kEvalIdentifier, kArgumentsIdentifier @@ -550,6 +629,7 @@ Type type_; friend class PreParserExpression; + friend class PreParserScope; }; @@ -565,10 +645,26 @@ } static PreParserExpression FromIdentifier(PreParserIdentifier id) { - return PreParserExpression(kIdentifierFlag | + return PreParserExpression(kTypeIdentifier | (id.type_ << kIdentifierShift)); } + static PreParserExpression BinaryOperation(PreParserExpression left, + Token::Value op, + PreParserExpression right) { + int code = ((op == Token::COMMA) && !left.is_parenthesized() && + !right.is_parenthesized()) + ? left.ArrowParamListBit() & right.ArrowParamListBit() + : 0; + return PreParserExpression(kTypeBinaryOperation | code); + } + + static PreParserExpression EmptyArrowParamList() { + // Any expression for which IsValidArrowParamList() returns true + // will work here. + return FromIdentifier(PreParserIdentifier::Default()); + } + static PreParserExpression StringLiteral() { return PreParserExpression(kUnknownStringLiteral); } @@ -589,39 +685,67 @@ return PreParserExpression(kPropertyExpression); } - bool IsIdentifier() { return (code_ & kIdentifierFlag) != 0; } + static PreParserExpression Call() { + return PreParserExpression(kCallExpression); + } + + bool IsIdentifier() const { return (code_ & kTypeMask) == kTypeIdentifier; } - // Only works corretly if it is actually an identifier expression. - PreParserIdentifier AsIdentifier() { + PreParserIdentifier AsIdentifier() const { + DCHECK(IsIdentifier()); return PreParserIdentifier( static_cast<PreParserIdentifier::Type>(code_ >> kIdentifierShift)); } - bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; } + bool IsStringLiteral() const { + return (code_ & kTypeMask) == kTypeStringLiteral; + } - bool IsUseStrictLiteral() { - return (code_ & kStringLiteralMask) == kUseStrictString; + bool IsUseStrictLiteral() const { + return (code_ & kUseStrictString) == kUseStrictString; } - bool IsThis() { return code_ == kThisExpression; } + bool IsThis() const { return (code_ & kThisExpression) == kThisExpression; } - bool IsThisProperty() { return code_ == kThisPropertyExpression; } + bool IsThisProperty() const { + return (code_ & kThisPropertyExpression) == kThisPropertyExpression; + } - bool IsProperty() { - return code_ == kPropertyExpression || code_ == kThisPropertyExpression; + bool IsProperty() const { + return (code_ & kPropertyExpression) == kPropertyExpression || + (code_ & kThisPropertyExpression) == kThisPropertyExpression; } - bool IsValidLeftHandSide() { + bool IsCall() const { return (code_ & kCallExpression) == kCallExpression; } + + bool IsValidReferenceExpression() const { return IsIdentifier() || IsProperty(); } + bool IsValidArrowParamList() const { + return (ArrowParamListBit() & kBinaryOperationArrowParamList) != 0 && + (code_ & kMultiParenthesizedExpression) == 0; + } + // At the moment PreParser doesn't track these expression types. bool IsFunctionLiteral() const { return false; } - bool IsCall() const { return false; } bool IsCallNew() const { return false; } PreParserExpression AsFunctionLiteral() { return *this; } + bool IsBinaryOperation() const { + return (code_ & kTypeMask) == kTypeBinaryOperation; + } + + bool is_parenthesized() const { + return (code_ & kParenthesizedExpression) != 0; + } + + void increase_parenthesization_level() { + code_ |= is_parenthesized() ? kMultiParenthesizedExpression + : kParenthesizedExpression; + } + // Dummy implementation for making expression->somefunc() work in both Parser // and PreParser. PreParserExpression* operator->() { return this; } @@ -630,32 +754,69 @@ void set_index(int index) {} // For YieldExpressions void set_parenthesized() {} + int position() const { return RelocInfo::kNoPosition; } + void set_function_token_position(int position) {} + void set_ast_properties(int* ast_properties) {} + void set_dont_optimize_reason(BailoutReason dont_optimize_reason) {} + + bool operator==(const PreParserExpression& other) const { + return code_ == other.code_; + } + bool operator!=(const PreParserExpression& other) const { + return code_ != other.code_; + } + private: - // Least significant 2 bits are used as flags. Bits 0 and 1 represent - // identifiers or strings literals, and are mutually exclusive, but can both - // be absent. If the expression is an identifier or a string literal, the - // other bits describe the type (see PreParserIdentifier::Type and string - // literal constants below). + // Least significant 2 bits are used as expression type. The third least + // significant bit tracks whether an expression is parenthesized. If the + // expression is an identifier or a string literal, the other bits + // describe the type/ (see PreParserIdentifier::Type and string literal + // constants below). For binary operations, the other bits are flags + // which further describe the contents of the expression. enum { kUnknownExpression = 0, - // Identifiers - kIdentifierFlag = 1, // Used to detect labels. - kIdentifierShift = 3, + kTypeMask = 1 | 2, + kParenthesizedExpression = (1 << 2), + kMultiParenthesizedExpression = (1 << 3), - kStringLiteralFlag = 2, // Used to detect directive prologue. - kUnknownStringLiteral = kStringLiteralFlag, - kUseStrictString = kStringLiteralFlag | 8, + // Identifiers + kTypeIdentifier = 1, // Used to detect labels. + kIdentifierShift = 5, + kTypeStringLiteral = 2, // Used to detect directive prologue. + kUnknownStringLiteral = kTypeStringLiteral, + kUseStrictString = kTypeStringLiteral | 32, kStringLiteralMask = kUseStrictString, + // Binary operations. Those are needed to detect certain keywords and + // duplicated identifier in parameter lists for arrow functions, because + // they are initially parsed as comma-separated expressions. + kTypeBinaryOperation = 3, + kBinaryOperationArrowParamList = (1 << 4), + // Below here applies if neither identifier nor string literal. Reserve the // 2 least significant bits for flags. - kThisExpression = 1 << 2, - kThisPropertyExpression = 2 << 2, - kPropertyExpression = 3 << 2 + kThisExpression = (1 << 4), + kThisPropertyExpression = (2 << 4), + kPropertyExpression = (3 << 4), + kCallExpression = (4 << 4) }; explicit PreParserExpression(int expression_code) : code_(expression_code) {} + V8_INLINE int ArrowParamListBit() const { + if (IsBinaryOperation()) return code_ & kBinaryOperationArrowParamList; + if (IsIdentifier()) { + const PreParserIdentifier ident = AsIdentifier(); + // A valid identifier can be an arrow function parameter list + // except for eval, arguments, yield, and reserved keywords. + if (ident.IsEval() || ident.IsArguments() || ident.IsYield() || + ident.IsFutureStrictReserved()) + return 0; + return kBinaryOperationArrowParamList; + } + return 0; + } + int code_; }; @@ -674,9 +835,71 @@ }; +class PreParserStatement { + public: + static PreParserStatement Default() { + return PreParserStatement(kUnknownStatement); + } + + static PreParserStatement FunctionDeclaration() { + return PreParserStatement(kFunctionDeclaration); + } + + // Creates expression statement from expression. + // Preserves being an unparenthesized string literal, possibly + // "use strict". + static PreParserStatement ExpressionStatement( + PreParserExpression expression) { + if (expression.IsUseStrictLiteral()) { + return PreParserStatement(kUseStrictExpressionStatement); + } + if (expression.IsStringLiteral()) { + return PreParserStatement(kStringLiteralExpressionStatement); + } + return Default(); + } + + bool IsStringLiteral() { + return code_ == kStringLiteralExpressionStatement; + } + + bool IsUseStrictLiteral() { + return code_ == kUseStrictExpressionStatement; + } + + bool IsFunctionDeclaration() { + return code_ == kFunctionDeclaration; + } + + private: + enum Type { + kUnknownStatement, + kStringLiteralExpressionStatement, + kUseStrictExpressionStatement, + kFunctionDeclaration + }; + + explicit PreParserStatement(Type code) : code_(code) {} + Type code_; +}; + + + +// PreParserStatementList doesn't actually store the statements because +// the PreParser does not need them. +class PreParserStatementList { + public: + // These functions make list->Add(some_expression) work as no-ops. + PreParserStatementList() {} + PreParserStatementList* operator->() { return this; } + void Add(PreParserStatement, void*) {} +}; + + class PreParserScope { public: - explicit PreParserScope(PreParserScope* outer_scope, ScopeType scope_type) + explicit PreParserScope(PreParserScope* outer_scope, ScopeType scope_type, + void* = NULL) : scope_type_(scope_type) { strict_mode_ = outer_scope ? outer_scope->strict_mode() : SLOPPY; } @@ -685,6 +908,19 @@ StrictMode strict_mode() const { return strict_mode_; } void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; } + // When PreParser is in use, lazy compilation is already being done, + // things cannot get lazier than that. + bool AllowsLazyCompilation() const { return false; } + + void set_start_position(int position) {} + void set_end_position(int position) {} + + bool IsDeclared(const PreParserIdentifier& identifier) const { return false; } + void DeclareParameter(const PreParserIdentifier& identifier, VariableMode) {} + + // Allow scope->Foo() to work. + PreParserScope* operator->() { return this; } + private: ScopeType scope_type_; StrictMode strict_mode_; @@ -693,9 +929,9 @@ class PreParserFactory { public: - explicit PreParserFactory(void* extra_param) {} - PreParserExpression NewLiteral(PreParserIdentifier identifier, - int pos) { + explicit PreParserFactory(void* extra_param1, void* extra_param2) {} + PreParserExpression NewStringLiteral(PreParserIdentifier identifier, + int pos) { return PreParserExpression::Default(); } PreParserExpression NewNumberLiteral(double number, @@ -748,7 +984,7 @@ PreParserExpression NewBinaryOperation(Token::Value op, PreParserExpression left, PreParserExpression right, int pos) { - return PreParserExpression::Default(); + return PreParserExpression::BinaryOperation(left, op, right); } PreParserExpression NewCompareOperation(Token::Value op, PreParserExpression left, @@ -782,13 +1018,38 @@ PreParserExpression NewCall(PreParserExpression expression, PreParserExpressionList arguments, int pos) { - return PreParserExpression::Default(); + return PreParserExpression::Call(); } PreParserExpression NewCallNew(PreParserExpression expression, PreParserExpressionList arguments, int pos) { return PreParserExpression::Default(); } + PreParserStatement NewReturnStatement(PreParserExpression expression, + int pos) { + return PreParserStatement::Default(); + } + PreParserExpression NewFunctionLiteral( + PreParserIdentifier name, AstValueFactory* ast_value_factory, + const PreParserScope& scope, PreParserStatementList body, + int materialized_literal_count, int expected_property_count, + int handler_count, int parameter_count, + FunctionLiteral::ParameterFlag has_duplicate_parameters, + FunctionLiteral::FunctionType function_type, + FunctionLiteral::IsFunctionFlag is_function, + FunctionLiteral::IsParenthesizedFlag is_parenthesized, + FunctionLiteral::KindFlag kind, int position) { + return PreParserExpression::Default(); + } + + // Return the object itself as AstVisitor and implement the needed + // dummy method right in this class. + PreParserFactory* visitor() { return this; } + BailoutReason dont_optimize_reason() { return kNoReason; } + int* ast_properties() { + static int dummy = 42; + return &dummy; + } }; @@ -803,11 +1064,23 @@ // Used by FunctionState and BlockState. typedef PreParserScope Scope; + typedef PreParserScope ScopePtr; + + class Checkpoint BASE_EMBEDDED { + public: + template <typename Parser> + explicit Checkpoint(Parser* parser) {} + void Restore() {} + }; + // PreParser doesn't need to store generator variables. typedef void GeneratorVariable; // No interaction with Zones. typedef void Zone; + typedef int AstProperties; + typedef Vector<PreParserIdentifier> ParameterIdentifierVector; + // Return types for traversing functions. typedef PreParserIdentifier Identifier; typedef PreParserExpression Expression; @@ -817,6 +1090,7 @@ typedef PreParserExpression Literal; typedef PreParserExpressionList ExpressionList; typedef PreParserExpressionList PropertyList; + typedef PreParserStatementList StatementList; // For constructing objects returned by the traversing functions. typedef PreParserFactory Factory; @@ -829,7 +1103,7 @@ template<typename FunctionState> static void SetUpFunctionState(FunctionState* function_state, void*) {} template<typename FunctionState> - static void TearDownFunctionState(FunctionState* function_state) {} + static void TearDownFunctionState(FunctionState* function_state, void*) {} // Helper functions for recursive descent. static bool IsEvalOrArguments(PreParserIdentifier identifier) { @@ -845,6 +1119,14 @@ return expression.IsIdentifier(); } + static PreParserIdentifier AsIdentifier(PreParserExpression expression) { + return expression.AsIdentifier(); + } + + static bool IsFutureStrictReserved(PreParserIdentifier identifier) { + return identifier.IsYield() || identifier.IsFutureStrictReserved(); + } + static bool IsBoilerplateProperty(PreParserExpression property) { // PreParser doesn't count boilerplate properties. return false; @@ -865,6 +1147,11 @@ // PreParser should not use FuncNameInferrer. UNREACHABLE(); } + static void InferFunctionName(FuncNameInferrer* fni, + PreParserExpression expression) { + // PreParser should not use FuncNameInferrer. + UNREACHABLE(); + } static void CheckFunctionLiteralInsideTopLevelObjectLiteral( PreParserScope* scope, PreParserExpression value, bool* has_function) {} @@ -876,17 +1163,13 @@ static void CheckPossibleEvalCall(PreParserExpression expression, PreParserScope* scope) {} - static PreParserExpression MarkExpressionAsLValue( + static PreParserExpression MarkExpressionAsAssigned( PreParserExpression expression) { // TODO(marja): To be able to produce the same errors, the preparser needs - // to start tracking which expressions are variables and which are lvalues. + // to start tracking which expressions are variables and which are assigned. return expression; } - // Checks LHS expression for assignment and prefix/postfix increment/decrement - // in strict mode. - void CheckStrictModeLValue(PreParserExpression expression, bool* ok); - bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x, PreParserExpression y, Token::Value op, @@ -901,28 +1184,45 @@ return PreParserExpression::Default(); } + PreParserExpression NewThrowReferenceError(const char* type, int pos) { + return PreParserExpression::Default(); + } + PreParserExpression NewThrowSyntaxError( + const char* type, Handle<Object> arg, int pos) { + return PreParserExpression::Default(); + } + PreParserExpression NewThrowTypeError( + const char* type, Handle<Object> arg, int pos) { + return PreParserExpression::Default(); + } + PreParserScope NewScope(PreParserScope* outer_scope, ScopeType scope_type) { + return PreParserScope(outer_scope, scope_type); + } + // Reporting errors. void ReportMessageAt(Scanner::Location location, const char* message, - Vector<const char*> args, - bool is_reference_error = false); - void ReportMessageAt(Scanner::Location location, - const char* type, - const char* name_opt, + const char* arg = NULL, bool is_reference_error = false); void ReportMessageAt(int start_pos, int end_pos, - const char* type, - const char* name_opt, + const char* message, + const char* arg = NULL, bool is_reference_error = false); // "null" return type creators. static PreParserIdentifier EmptyIdentifier() { return PreParserIdentifier::Default(); } + static PreParserIdentifier EmptyIdentifierString() { + return PreParserIdentifier::Default(); + } static PreParserExpression EmptyExpression() { return PreParserExpression::Default(); } + static PreParserExpression EmptyArrowParamList() { + return PreParserExpression::EmptyArrowParamList(); + } static PreParserExpression EmptyLiteral() { return PreParserExpression::Default(); } @@ -938,8 +1238,8 @@ // Producing data during the recursive descent. PreParserIdentifier GetSymbol(Scanner* scanner); - static PreParserIdentifier NextLiteralString(Scanner* scanner, - PretenureFlag tenured) { + + static PreParserIdentifier GetNextSymbol(Scanner* scanner) { return PreParserIdentifier::Default(); } @@ -964,14 +1264,48 @@ Scanner* scanner, PreParserFactory* factory = NULL); + PreParserExpression GetIterator(PreParserExpression iterable, + PreParserFactory* factory) { + return PreParserExpression::Default(); + } + static PreParserExpressionList NewExpressionList(int size, void* zone) { return PreParserExpressionList(); } + static PreParserStatementList NewStatementList(int size, void* zone) { + return PreParserStatementList(); + } + static PreParserExpressionList NewPropertyList(int size, void* zone) { return PreParserExpressionList(); } + V8_INLINE void SkipLazyFunctionBody(PreParserIdentifier function_name, + int* materialized_literal_count, + int* expected_property_count, bool* ok) { + UNREACHABLE(); + } + + V8_INLINE PreParserStatementList + ParseEagerFunctionBody(PreParserIdentifier function_name, int pos, + Variable* fvar, Token::Value fvar_init_op, + bool is_generator, bool* ok); + + // Utility functions + int DeclareArrowParametersFromExpression(PreParserExpression expression, + PreParserScope* scope, + Scanner::Location* dupe_loc, + bool* ok) { + // TODO(aperez): Detect duplicated identifiers in paramlists. + *ok = expression.IsValidArrowParamList(); + return 0; + } + + static AstValueFactory* ast_value_factory() { return NULL; } + + void CheckConflictingVarDeclarations(PreParserScope scope, bool* ok) {} + // Temporary glue; these functions will move to ParserBase. PreParserExpression ParseV8Intrinsic(bool* ok); PreParserExpression ParseFunctionLiteral( @@ -981,6 +1315,7 @@ bool is_generator, int function_token_position, FunctionLiteral::FunctionType type, + FunctionLiteral::ArityRestriction arity_restriction, bool* ok); private: @@ -1004,6 +1339,7 @@ public: typedef PreParserIdentifier Identifier; typedef PreParserExpression Expression; + typedef PreParserStatement Statement; enum PreParseResult { kPreParseStackOverflow, @@ -1020,7 +1356,7 @@ // during parsing. PreParseResult PreParseProgram() { PreParserScope scope(scope_, GLOBAL_SCOPE); - FunctionState top_scope(&function_state_, &scope_, &scope, NULL); + FunctionState top_scope(&function_state_, &scope_, &scope); bool ok = true; int start_position = scanner()->peek_location().beg_pos; ParseSourceElements(Token::EOS, &ok); @@ -1065,52 +1401,6 @@ kHasNoInitializers }; - class Statement { - public: - static Statement Default() { - return Statement(kUnknownStatement); - } - - static Statement FunctionDeclaration() { - return Statement(kFunctionDeclaration); - } - - // Creates expression statement from expression. - // Preserves being an unparenthesized string literal, possibly - // "use strict". - static Statement ExpressionStatement(Expression expression) { - if (expression.IsUseStrictLiteral()) { - return Statement(kUseStrictExpressionStatement); - } - if (expression.IsStringLiteral()) { - return Statement(kStringLiteralExpressionStatement); - } - return Default(); - } - - bool IsStringLiteral() { - return code_ == kStringLiteralExpressionStatement; - } - - bool IsUseStrictLiteral() { - return code_ == kUseStrictExpressionStatement; - } - - bool IsFunctionDeclaration() { - return code_ == kFunctionDeclaration; - } - - private: - enum Type { - kUnknownStatement, - kStringLiteralExpressionStatement, - kUseStrictExpressionStatement, - kFunctionDeclaration - }; - - explicit Statement(Type code) : code_(code) {} - Type code_; - }; enum SourceElements { kUnknownSourceElements @@ -1148,6 +1438,14 @@ Expression ParseObjectLiteral(bool* ok); Expression ParseV8Intrinsic(bool* ok); + V8_INLINE void SkipLazyFunctionBody(PreParserIdentifier function_name, + int* materialized_literal_count, + int* expected_property_count, bool* ok); + V8_INLINE PreParserStatementList + ParseEagerFunctionBody(PreParserIdentifier function_name, int pos, + Variable* fvar, Token::Value fvar_init_op, + bool is_generator, bool* ok); + Expression ParseFunctionLiteral( Identifier name, Scanner::Location function_name_location, @@ -1155,23 +1453,42 @@ bool is_generator, int function_token_pos, FunctionLiteral::FunctionType function_type, + FunctionLiteral::ArityRestriction arity_restriction, bool* ok); void ParseLazyFunctionLiteralBody(bool* ok); - // Logs the currently parsed literal as a symbol in the preparser data. - void LogSymbol(); - // Log the currently parsed string literal. - Expression GetStringSymbol(); - bool CheckInOrOf(bool accept_OF); }; + +PreParserStatementList PreParser::ParseEagerFunctionBody( + PreParserIdentifier function_name, int pos, Variable* fvar, + Token::Value fvar_init_op, bool is_generator, bool* ok) { + ParsingModeScope parsing_mode(this, PARSE_EAGERLY); + + ParseSourceElements(Token::RBRACE, ok); + if (!*ok) return PreParserStatementList(); + + Expect(Token::RBRACE, ok); + return PreParserStatementList(); +} + + +PreParserStatementList PreParserTraits::ParseEagerFunctionBody( + PreParserIdentifier function_name, int pos, Variable* fvar, + Token::Value fvar_init_op, bool is_generator, bool* ok) { + return pre_parser_->ParseEagerFunctionBody(function_name, pos, fvar, + fvar_init_op, is_generator, ok); +} + + template<class Traits> ParserBase<Traits>::FunctionState::FunctionState( FunctionState** function_state_stack, typename Traits::Type::Scope** scope_stack, typename Traits::Type::Scope* scope, - typename Traits::Type::Zone* extra_param) + typename Traits::Type::Zone* extra_param, + AstValueFactory* ast_value_factory) : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize), next_handler_index_(0), expected_property_count_(0), @@ -1181,20 +1498,45 @@ outer_function_state_(*function_state_stack), scope_stack_(scope_stack), outer_scope_(*scope_stack), - isolate_(NULL), saved_ast_node_id_(0), - factory_(extra_param) { + extra_param_(extra_param), + factory_(extra_param, ast_value_factory) { *scope_stack_ = scope; *function_state_stack = this; Traits::SetUpFunctionState(this, extra_param); } -template<class Traits> +template <class Traits> +ParserBase<Traits>::FunctionState::FunctionState( + FunctionState** function_state_stack, + typename Traits::Type::Scope** scope_stack, + typename Traits::Type::Scope** scope, + typename Traits::Type::Zone* extra_param, + AstValueFactory* ast_value_factory) + : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize), + next_handler_index_(0), + expected_property_count_(0), + is_generator_(false), + generator_object_variable_(NULL), + function_state_stack_(function_state_stack), + outer_function_state_(*function_state_stack), + scope_stack_(scope_stack), + outer_scope_(*scope_stack), + saved_ast_node_id_(0), + extra_param_(extra_param), + factory_(extra_param, ast_value_factory) { + *scope_stack_ = *scope; + *function_state_stack = this; + Traits::SetUpFunctionState(this, extra_param); +} + + +template <class Traits> ParserBase<Traits>::FunctionState::~FunctionState() { *scope_stack_ = outer_scope_; *function_state_stack_ = outer_function_state_; - Traits::TearDownFunctionState(this); + Traits::TearDownFunctionState(this, extra_param_); } @@ -1214,15 +1556,15 @@ return ReportMessageAt(source_location, "unexpected_token_identifier"); case Token::FUTURE_RESERVED_WORD: return ReportMessageAt(source_location, "unexpected_reserved"); + case Token::LET: case Token::YIELD: case Token::FUTURE_STRICT_RESERVED_WORD: return ReportMessageAt(source_location, strict_mode() == SLOPPY ? "unexpected_token_identifier" : "unexpected_strict_reserved"); default: const char* name = Token::String(token); - ASSERT(name != NULL); - Traits::ReportMessageAt( - source_location, "unexpected_token", Vector<const char*>(&name, 1)); + DCHECK(name != NULL); + Traits::ReportMessageAt(source_location, "unexpected_token", name); } } @@ -1236,12 +1578,13 @@ IdentifierT name = this->GetSymbol(scanner()); if (allow_eval_or_arguments == kDontAllowEvalOrArguments && strict_mode() == STRICT && this->IsEvalOrArguments(name)) { - ReportMessageAt(scanner()->location(), "strict_eval_arguments"); + ReportMessage("strict_eval_arguments"); *ok = false; } return name; } else if (strict_mode() == SLOPPY && (next == Token::FUTURE_STRICT_RESERVED_WORD || + (next == Token::LET) || (next == Token::YIELD && !is_generator()))) { return this->GetSymbol(scanner()); } else { @@ -1260,6 +1603,7 @@ if (next == Token::IDENTIFIER) { *is_strict_reserved = false; } else if (next == Token::FUTURE_STRICT_RESERVED_WORD || + next == Token::LET || (next == Token::YIELD && !this->is_generator())) { *is_strict_reserved = true; } else { @@ -1276,6 +1620,7 @@ ParserBase<Traits>::ParseIdentifierName(bool* ok) { Token::Value next = Next(); if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD && + next != Token::LET && next != Token::YIELD && next != Token::FUTURE_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) { this->ReportUnexpectedToken(next); *ok = false; @@ -1303,21 +1648,21 @@ int pos = peek_position(); if (!scanner()->ScanRegExpPattern(seen_equal)) { Next(); - ReportMessage("unterminated_regexp", Vector<const char*>::empty()); + ReportMessage("unterminated_regexp"); *ok = false; return Traits::EmptyExpression(); } int literal_index = function_state_->NextMaterializedLiteralIndex(); - IdentifierT js_pattern = this->NextLiteralString(scanner(), TENURED); + IdentifierT js_pattern = this->GetNextSymbol(scanner()); if (!scanner()->ScanRegExpFlags()) { Next(); - ReportMessageAt(scanner()->location(), "invalid_regexp_flags"); + ReportMessage("invalid_regexp_flags"); *ok = false; return Traits::EmptyExpression(); } - IdentifierT js_flags = this->NextLiteralString(scanner(), TENURED); + IdentifierT js_flags = this->GetNextSymbol(scanner()); Next(); return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos); } @@ -1371,6 +1716,7 @@ break; case Token::IDENTIFIER: + case Token::LET: case Token::YIELD: case Token::FUTURE_STRICT_RESERVED_WORD: { // Using eval or arguments in this context is OK even in strict mode. @@ -1403,11 +1749,20 @@ case Token::LPAREN: Consume(Token::LPAREN); - // Heuristically try to detect immediately called functions before - // seeing the call parentheses. - parenthesized_function_ = (peek() == Token::FUNCTION); - result = this->ParseExpression(true, CHECK_OK); - Expect(Token::RPAREN, CHECK_OK); + if (allow_arrow_functions() && peek() == Token::RPAREN) { + // Arrow functions are the only expression type constructions + // for which an empty parameter list "()" is valid input. + Consume(Token::RPAREN); + result = this->ParseArrowFunctionLiteral( + pos, this->EmptyArrowParamList(), CHECK_OK); + } else { + // Heuristically try to detect immediately called functions before + // seeing the call parentheses. + parenthesized_function_ = (peek() == Token::FUNCTION); + result = this->ParseExpression(true, CHECK_OK); + result->increase_parenthesization_level(); + Expect(Token::RPAREN, CHECK_OK); + } break; case Token::MOD: @@ -1486,7 +1841,7 @@ // ((IdentifierName | String | Number) ':' AssignmentExpression) | // (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral) // ) ',')* '}' - // (Except that trailing comma is not required and not allowed.) + // (Except that the trailing comma is not required.) int pos = peek_position(); typename Traits::Type::PropertyList properties = @@ -1508,6 +1863,8 @@ switch (next) { case Token::FUTURE_RESERVED_WORD: case Token::FUTURE_STRICT_RESERVED_WORD: + case Token::LET: + case Token::YIELD: case Token::IDENTIFIER: { bool is_getter = false; bool is_setter = false; @@ -1523,6 +1880,8 @@ if (next != i::Token::IDENTIFIER && next != i::Token::FUTURE_RESERVED_WORD && next != i::Token::FUTURE_STRICT_RESERVED_WORD && + next != i::Token::LET && + next != i::Token::YIELD && next != i::Token::NUMBER && next != i::Token::STRING && !Token::IsKeyword(next)) { @@ -1540,9 +1899,9 @@ false, // reserved words are allowed here false, // not a generator RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION, + is_getter ? FunctionLiteral::GETTER_ARITY + : FunctionLiteral::SETTER_ARITY, CHECK_OK); - // Allow any number of parameters for compatibilty with JSC. - // Specification only allows zero parameters for get and one for set. typename Traits::Type::ObjectLiteralProperty property = factory()->NewObjectLiteralProperty(is_getter, value, next_pos); if (this->IsBoilerplateProperty(property)) { @@ -1562,7 +1921,7 @@ } // Failed to parse as get/set property, so it's just a normal property // (which might be called "get" or "set" or something else). - key = factory()->NewLiteral(id, next_pos); + key = factory()->NewStringLiteral(id, next_pos); break; } case Token::STRING: { @@ -1574,7 +1933,7 @@ key = factory()->NewNumberLiteral(index, next_pos); break; } - key = factory()->NewLiteral(string, next_pos); + key = factory()->NewStringLiteral(string, next_pos); break; } case Token::NUMBER: { @@ -1587,7 +1946,7 @@ if (Token::IsKeyword(next)) { Consume(next); IdentifierT string = this->GetSymbol(scanner_); - key = factory()->NewLiteral(string, next_pos); + key = factory()->NewStringLiteral(string, next_pos); } else { Token::Value next = Next(); ReportUnexpectedToken(next); @@ -1617,7 +1976,6 @@ } properties->Add(property, zone()); - // TODO(1240767): Consider allowing trailing comma. if (peek() != Token::RBRACE) { // Need {} because of the CHECK_OK macro. Expect(Token::COMMA, CHECK_OK); @@ -1656,7 +2014,7 @@ true, CHECK_OK_CUSTOM(NullExpressionList)); result->Add(argument, zone_); if (result->length() > Code::kMaxArguments) { - ReportMessageAt(scanner()->location(), "too_many_arguments"); + ReportMessage("too_many_arguments"); *ok = false; return this->NullExpressionList(); } @@ -1676,6 +2034,7 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, bool* ok) { // AssignmentExpression :: // ConditionalExpression + // ArrowFunction // YieldExpression // LeftHandSideExpression AssignmentOperator AssignmentExpression @@ -1686,26 +2045,26 @@ } if (fni_ != NULL) fni_->Enter(); + ParserCheckpoint checkpoint(this); ExpressionT expression = this->ParseConditionalExpression(accept_IN, CHECK_OK); + if (allow_arrow_functions() && peek() == Token::ARROW) { + checkpoint.Restore(); + expression = this->ParseArrowFunctionLiteral(lhs_location.beg_pos, + expression, CHECK_OK); + return expression; + } + if (!Token::IsAssignmentOp(peek())) { if (fni_ != NULL) fni_->Leave(); // Parsed conditional expression only (no assignment). return expression; } - if (!expression->IsValidLeftHandSide()) { - this->ReportMessageAt(lhs_location, "invalid_lhs_in_assignment", true); - *ok = false; - return this->EmptyExpression(); - } - - if (strict_mode() == STRICT) { - // Assignment to eval or arguments is disallowed in strict mode. - this->CheckStrictModeLValue(expression, CHECK_OK); - } - expression = this->MarkExpressionAsLValue(expression); + expression = this->CheckAndRewriteReferenceExpression( + expression, lhs_location, "invalid_lhs_in_assignment", CHECK_OK); + expression = this->MarkExpressionAsAssigned(expression); Token::Value op = Next(); // Get assignment operator. int pos = position(); @@ -1744,15 +2103,40 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseYieldExpression(bool* ok) { // YieldExpression :: - // 'yield' '*'? AssignmentExpression + // 'yield' ([no line terminator] '*'? AssignmentExpression)? int pos = peek_position(); Expect(Token::YIELD, CHECK_OK); - Yield::Kind kind = - Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND; ExpressionT generator_object = factory()->NewVariableProxy(function_state_->generator_object_variable()); - ExpressionT expression = - ParseAssignmentExpression(false, CHECK_OK); + ExpressionT expression = Traits::EmptyExpression(); + Yield::Kind kind = Yield::SUSPEND; + if (!scanner()->HasAnyLineTerminatorBeforeNext()) { + if (Check(Token::MUL)) kind = Yield::DELEGATING; + switch (peek()) { + case Token::EOS: + case Token::SEMICOLON: + case Token::RBRACE: + case Token::RBRACK: + case Token::RPAREN: + case Token::COLON: + case Token::COMMA: + // The above set of tokens is the complete set of tokens that can appear + // after an AssignmentExpression, and none of them can start an + // AssignmentExpression. This allows us to avoid looking for an RHS for + // a Yield::SUSPEND operation, given only one look-ahead token. + if (kind == Yield::SUSPEND) + break; + DCHECK(kind == Yield::DELEGATING); + // Delegating yields require an RHS; fall through. + default: + expression = ParseAssignmentExpression(false, CHECK_OK); + break; + } + } + if (kind == Yield::DELEGATING) { + // var iterator = subject[Symbol.iterator](); + expression = this->GetIterator(expression, factory()); + } typename Traits::Type::YieldExpression yield = factory()->NewYield(generator_object, expression, kind, pos); if (kind == Yield::DELEGATING) { @@ -1789,7 +2173,7 @@ template <class Traits> typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) { - ASSERT(prec >= 4); + DCHECK(prec >= 4); ExpressionT x = this->ParseUnaryExpression(CHECK_OK); for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) { // prec1 >= 4 @@ -1854,7 +2238,7 @@ // "delete identifier" is a syntax error in strict mode. if (op == Token::DELETE && strict_mode() == STRICT && this->IsIdentifier(expression)) { - ReportMessage("strict_delete", Vector<const char*>::empty()); + ReportMessage("strict_delete"); *ok = false; return this->EmptyExpression(); } @@ -1864,18 +2248,10 @@ } else if (Token::IsCountOp(op)) { op = Next(); Scanner::Location lhs_location = scanner()->peek_location(); - ExpressionT expression = ParseUnaryExpression(CHECK_OK); - if (!expression->IsValidLeftHandSide()) { - ReportMessageAt(lhs_location, "invalid_lhs_in_prefix_op", true); - *ok = false; - return this->EmptyExpression(); - } - - if (strict_mode() == STRICT) { - // Prefix expression operand in strict mode may not be eval or arguments. - this->CheckStrictModeLValue(expression, CHECK_OK); - } - this->MarkExpressionAsLValue(expression); + ExpressionT expression = this->ParseUnaryExpression(CHECK_OK); + expression = this->CheckAndRewriteReferenceExpression( + expression, lhs_location, "invalid_lhs_in_prefix_op", CHECK_OK); + this->MarkExpressionAsAssigned(expression); return factory()->NewCountOperation(op, true /* prefix */, @@ -1898,17 +2274,9 @@ ExpressionT expression = this->ParseLeftHandSideExpression(CHECK_OK); if (!scanner()->HasAnyLineTerminatorBeforeNext() && Token::IsCountOp(peek())) { - if (!expression->IsValidLeftHandSide()) { - ReportMessageAt(lhs_location, "invalid_lhs_in_postfix_op", true); - *ok = false; - return this->EmptyExpression(); - } - - if (strict_mode() == STRICT) { - // Postfix expression operand in strict mode may not be eval or arguments. - this->CheckStrictModeLValue(expression, CHECK_OK); - } - expression = this->MarkExpressionAsLValue(expression); + expression = this->CheckAndRewriteReferenceExpression( + expression, lhs_location, "invalid_lhs_in_postfix_op", CHECK_OK); + expression = this->MarkExpressionAsAssigned(expression); Token::Value next = Next(); expression = @@ -1980,7 +2348,7 @@ int pos = position(); IdentifierT name = ParseIdentifierName(CHECK_OK); result = factory()->NewProperty( - result, factory()->NewLiteral(name, pos), pos); + result, factory()->NewStringLiteral(name, pos), pos); if (fni_ != NULL) this->PushLiteralName(fni_, name); break; } @@ -2051,7 +2419,7 @@ Consume(Token::FUNCTION); int function_token_position = position(); bool is_generator = allow_generators() && Check(Token::MUL); - IdentifierT name; + IdentifierT name = this->EmptyIdentifier(); bool is_strict_reserved_name = false; Scanner::Location function_name_location = Scanner::Location::invalid(); FunctionLiteral::FunctionType function_type = @@ -2068,6 +2436,7 @@ is_generator, function_token_position, function_type, + FunctionLiteral::NORMAL_ARITY, CHECK_OK); } else { result = ParsePrimaryExpression(CHECK_OK); @@ -2102,7 +2471,7 @@ int pos = position(); IdentifierT name = ParseIdentifierName(CHECK_OK); expression = factory()->NewProperty( - expression, factory()->NewLiteral(name, pos), pos); + expression, factory()->NewStringLiteral(name, pos), pos); if (fni_ != NULL) { this->PushLiteralName(fni_, name); } @@ -2112,11 +2481,149 @@ return expression; } } - ASSERT(false); + DCHECK(false); return this->EmptyExpression(); } +template <class Traits> +typename ParserBase<Traits>::ExpressionT ParserBase< + Traits>::ParseArrowFunctionLiteral(int start_pos, ExpressionT params_ast, + bool* ok) { + // TODO(aperez): Change this to use ARROW_SCOPE + typename Traits::Type::ScopePtr scope = + this->NewScope(scope_, FUNCTION_SCOPE); + typename Traits::Type::StatementList body; + typename Traits::Type::AstProperties ast_properties; + BailoutReason dont_optimize_reason = kNoReason; + int num_parameters = -1; + int materialized_literal_count = -1; + int expected_property_count = -1; + int handler_count = 0; + + { + FunctionState function_state(&function_state_, &scope_, &scope, zone(), + this->ast_value_factory()); + Scanner::Location dupe_error_loc = Scanner::Location::invalid(); + num_parameters = Traits::DeclareArrowParametersFromExpression( + params_ast, scope_, &dupe_error_loc, ok); + if (!*ok) { + ReportMessageAt( + Scanner::Location(start_pos, scanner()->location().beg_pos), + "malformed_arrow_function_parameter_list"); + return this->EmptyExpression(); + } + + if (num_parameters > Code::kMaxArguments) { + ReportMessageAt(Scanner::Location(params_ast->position(), position()), + "too_many_parameters"); + *ok = false; + return this->EmptyExpression(); + } + + Expect(Token::ARROW, CHECK_OK); + + if (peek() == Token::LBRACE) { + // Multiple statemente body + Consume(Token::LBRACE); + bool is_lazily_parsed = + (mode() == PARSE_LAZILY && scope_->AllowsLazyCompilation()); + if (is_lazily_parsed) { + body = this->NewStatementList(0, zone()); + this->SkipLazyFunctionBody(this->EmptyIdentifier(), + &materialized_literal_count, + &expected_property_count, CHECK_OK); + } else { + body = this->ParseEagerFunctionBody( + this->EmptyIdentifier(), RelocInfo::kNoPosition, NULL, + Token::INIT_VAR, false, // Not a generator. + CHECK_OK); + materialized_literal_count = + function_state.materialized_literal_count(); + expected_property_count = function_state.expected_property_count(); + handler_count = function_state.handler_count(); + } + } else { + // Single-expression body + int pos = position(); + parenthesized_function_ = false; + ExpressionT expression = ParseAssignmentExpression(true, CHECK_OK); + body = this->NewStatementList(1, zone()); + body->Add(factory()->NewReturnStatement(expression, pos), zone()); + materialized_literal_count = function_state.materialized_literal_count(); + expected_property_count = function_state.expected_property_count(); + handler_count = function_state.handler_count(); + } + + scope->set_start_position(start_pos); + scope->set_end_position(scanner()->location().end_pos); + + // Arrow function *parameter lists* are always checked as in strict mode. + bool function_name_is_strict_reserved = false; + Scanner::Location function_name_loc = Scanner::Location::invalid(); + Scanner::Location eval_args_error_loc = Scanner::Location::invalid(); + Scanner::Location reserved_loc = Scanner::Location::invalid(); + this->CheckStrictFunctionNameAndParameters( + this->EmptyIdentifier(), function_name_is_strict_reserved, + function_name_loc, eval_args_error_loc, dupe_error_loc, reserved_loc, + CHECK_OK); + + // Validate strict mode. + if (strict_mode() == STRICT) { + CheckOctalLiteral(start_pos, scanner()->location().end_pos, CHECK_OK); + } + + if (allow_harmony_scoping() && strict_mode() == STRICT) + this->CheckConflictingVarDeclarations(scope, CHECK_OK); + + ast_properties = *factory()->visitor()->ast_properties(); + dont_optimize_reason = factory()->visitor()->dont_optimize_reason(); + } + + FunctionLiteralT function_literal = factory()->NewFunctionLiteral( + this->EmptyIdentifierString(), this->ast_value_factory(), scope, body, + materialized_literal_count, expected_property_count, handler_count, + num_parameters, FunctionLiteral::kNoDuplicateParameters, + FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction, + FunctionLiteral::kNotParenthesized, FunctionLiteral::kArrowFunction, + start_pos); + + function_literal->set_function_token_position(start_pos); + function_literal->set_ast_properties(&ast_properties); + function_literal->set_dont_optimize_reason(dont_optimize_reason); + + if (fni_ != NULL) this->InferFunctionName(fni_, function_literal); + + return function_literal; +} + + +template <typename Traits> +typename ParserBase<Traits>::ExpressionT +ParserBase<Traits>::CheckAndRewriteReferenceExpression( + ExpressionT expression, + Scanner::Location location, const char* message, bool* ok) { + if (strict_mode() == STRICT && this->IsIdentifier(expression) && + this->IsEvalOrArguments(this->AsIdentifier(expression))) { + this->ReportMessageAt(location, "strict_eval_arguments", false); + *ok = false; + return this->EmptyExpression(); + } else if (expression->IsValidReferenceExpression()) { + return expression; + } else if (expression->IsCall()) { + // If it is a call, make it a runtime error for legacy web compatibility. + // Rewrite `expr' to `expr[throw ReferenceError]'. + int pos = location.beg_pos; + ExpressionT error = this->NewThrowReferenceError(message, pos); + return factory()->NewProperty(expression, error, pos); + } else { + this->ReportMessageAt(location, message, true); + *ok = false; + return this->EmptyExpression(); + } +} + + #undef CHECK_OK #undef CHECK_OK_CUSTOM @@ -2137,17 +2644,14 @@ if (IsDataDataConflict(old_type, type)) { // Both are data properties. if (strict_mode_ == SLOPPY) return; - parser()->ReportMessageAt(scanner()->location(), - "strict_duplicate_property"); + parser()->ReportMessage("strict_duplicate_property"); } else if (IsDataAccessorConflict(old_type, type)) { // Both a data and an accessor property with the same name. - parser()->ReportMessageAt(scanner()->location(), - "accessor_data_property"); + parser()->ReportMessage("accessor_data_property"); } else { - ASSERT(IsAccessorAccessorConflict(old_type, type)); + DCHECK(IsAccessorAccessorConflict(old_type, type)); // Both accessors of the same type. - parser()->ReportMessageAt(scanner()->location(), - "accessor_get_set"); + parser()->ReportMessage("accessor_get_set"); } *ok = false; } diff -Nru nodejs-0.11.13/deps/v8/src/prettyprinter.cc nodejs-0.11.15/deps/v8/src/prettyprinter.cc --- nodejs-0.11.13/deps/v8/src/prettyprinter.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/prettyprinter.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdarg.h> -#include "v8.h" +#include "src/v8.h" -#include "prettyprinter.h" -#include "scopes.h" -#include "platform.h" +#include "src/ast-value-factory.h" +#include "src/base/platform/platform.h" +#include "src/prettyprinter.h" +#include "src/scopes.h" namespace v8 { namespace internal { @@ -156,10 +134,10 @@ void PrettyPrinter::VisitContinueStatement(ContinueStatement* node) { Print("continue"); - ZoneStringList* labels = node->target()->labels(); + ZoneList<const AstRawString*>* labels = node->target()->labels(); if (labels != NULL) { Print(" "); - ASSERT(labels->length() > 0); // guaranteed to have at least one entry + DCHECK(labels->length() > 0); // guaranteed to have at least one entry PrintLiteral(labels->at(0), false); // any label from the list is fine } Print(";"); @@ -168,10 +146,10 @@ void PrettyPrinter::VisitBreakStatement(BreakStatement* node) { Print("break"); - ZoneStringList* labels = node->target()->labels(); + ZoneList<const AstRawString*>* labels = node->target()->labels(); if (labels != NULL) { Print(" "); - ASSERT(labels->length() > 0); // guaranteed to have at least one entry + DCHECK(labels->length() > 0); // guaranteed to have at least one entry PrintLiteral(labels->at(0), false); // any label from the list is fine } Print(";"); @@ -501,7 +479,7 @@ void PrettyPrinter::Init() { if (size_ == 0) { - ASSERT(output_ == NULL); + DCHECK(output_ == NULL); const int initial_size = 256; output_ = NewArray<char>(initial_size); size_ = initial_size; @@ -515,9 +493,9 @@ for (;;) { va_list arguments; va_start(arguments, format); - int n = OS::VSNPrintF(Vector<char>(output_, size_) + pos_, - format, - arguments); + int n = VSNPrintF(Vector<char>(output_, size_) + pos_, + format, + arguments); va_end(arguments); if (n >= 0) { @@ -529,7 +507,7 @@ const int slack = 32; int new_size = size_ + (size_ >> 1) + slack; char* new_output = NewArray<char>(new_size); - OS::MemCopy(new_output, output_, pos_); + MemCopy(new_output, output_, pos_); DeleteArray(output_); output_ = new_output; size_ = new_size; @@ -547,7 +525,7 @@ } -void PrettyPrinter::PrintLabels(ZoneStringList* labels) { +void PrettyPrinter::PrintLabels(ZoneList<const AstRawString*>* labels) { if (labels != NULL) { for (int i = 0; i < labels->length(); i++) { PrintLiteral(labels->at(i), false); @@ -605,6 +583,11 @@ } +void PrettyPrinter::PrintLiteral(const AstRawString* value, bool quote) { + PrintLiteral(value->string(), quote); +} + + void PrettyPrinter::PrintParameters(Scope* scope) { Print("("); for (int i = 0; i < scope->num_parameters(); i++) { @@ -662,7 +645,7 @@ AstPrinter::~AstPrinter() { - ASSERT(indent_ == 0); + DCHECK(indent_ == 0); } @@ -691,15 +674,15 @@ PrintLiteralIndented(info, value, true); } else { EmbeddedVector<char, 256> buf; - int pos = OS::SNPrintF(buf, "%s (mode = %s", info, - Variable::Mode2String(var->mode())); - OS::SNPrintF(buf + pos, ")"); + int pos = SNPrintF(buf, "%s (mode = %s", info, + Variable::Mode2String(var->mode())); + SNPrintF(buf + pos, ")"); PrintLiteralIndented(buf.start(), value, true); } } -void AstPrinter::PrintLabelsIndented(ZoneStringList* labels) { +void AstPrinter::PrintLabelsIndented(ZoneList<const AstRawString*>* labels) { if (labels == NULL || labels->length() == 0) return; PrintIndented("LABELS "); PrintLabels(labels); @@ -1056,21 +1039,21 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) { Variable* var = node->var(); EmbeddedVector<char, 128> buf; - int pos = OS::SNPrintF(buf, "VAR PROXY"); + int pos = SNPrintF(buf, "VAR PROXY"); switch (var->location()) { case Variable::UNALLOCATED: break; case Variable::PARAMETER: - OS::SNPrintF(buf + pos, " parameter[%d]", var->index()); + SNPrintF(buf + pos, " parameter[%d]", var->index()); break; case Variable::LOCAL: - OS::SNPrintF(buf + pos, " local[%d]", var->index()); + SNPrintF(buf + pos, " local[%d]", var->index()); break; case Variable::CONTEXT: - OS::SNPrintF(buf + pos, " context[%d]", var->index()); + SNPrintF(buf + pos, " context[%d]", var->index()); break; case Variable::LOOKUP: - OS::SNPrintF(buf + pos, " lookup"); + SNPrintF(buf + pos, " lookup"); break; } PrintLiteralWithModeIndented(buf.start(), var, node->name()); @@ -1137,8 +1120,8 @@ void AstPrinter::VisitCountOperation(CountOperation* node) { EmbeddedVector<char, 128> buf; - OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"), - Token::Name(node->op())); + SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"), + Token::Name(node->op())); IndentedScope indent(this, buf.start()); Visit(node->expression()); } diff -Nru nodejs-0.11.13/deps/v8/src/prettyprinter.h nodejs-0.11.15/deps/v8/src/prettyprinter.h --- nodejs-0.11.13/deps/v8/src/prettyprinter.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/prettyprinter.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_PRETTYPRINTER_H_ #define V8_PRETTYPRINTER_H_ -#include "allocation.h" -#include "ast.h" +#include "src/allocation.h" +#include "src/ast.h" namespace v8 { namespace internal { @@ -67,9 +44,10 @@ const char* Output() const { return output_; } virtual void PrintStatements(ZoneList<Statement*>* statements); - void PrintLabels(ZoneStringList* labels); + void PrintLabels(ZoneList<const AstRawString*>* labels); virtual void PrintArguments(ZoneList<Expression*>* arguments); void PrintLiteral(Handle<Object> value, bool quote); + void PrintLiteral(const AstRawString* value, bool quote); void PrintParameters(Scope* scope); void PrintDeclarations(ZoneList<Declaration*>* declarations); void PrintFunctionLiteral(FunctionLiteral* function); @@ -106,7 +84,7 @@ void PrintLiteralWithModeIndented(const char* info, Variable* var, Handle<Object> value); - void PrintLabelsIndented(ZoneStringList* labels); + void PrintLabelsIndented(ZoneList<const AstRawString*>* labels); void inc_indent() { indent_++; } void dec_indent() { indent_--; } diff -Nru nodejs-0.11.13/deps/v8/src/profile-generator.cc nodejs-0.11.15/deps/v8/src/profile-generator.cc --- nodejs-0.11.13/deps/v8/src/profile-generator.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/profile-generator.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "profile-generator-inl.h" - -#include "compiler.h" -#include "debug.h" -#include "sampler.h" -#include "global-handles.h" -#include "scopeinfo.h" -#include "unicode.h" -#include "zone-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/profile-generator-inl.h" + +#include "src/compiler.h" +#include "src/debug.h" +#include "src/global-handles.h" +#include "src/sampler.h" +#include "src/scopeinfo.h" +#include "src/unicode.h" +#include "src/zone-inl.h" namespace v8 { namespace internal { @@ -66,7 +43,7 @@ HashMap::Entry* entry = GetEntry(src, len); if (entry->value == NULL) { Vector<char> dst = Vector<char>::New(len + 1); - OS::StrNCpy(dst, src, len); + StrNCpy(dst, src, len); dst[len] = '\0'; entry->key = dst.start(); entry->value = entry->key; @@ -99,7 +76,7 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) { Vector<char> str = Vector<char>::New(1024); - int len = OS::VSNPrintF(str, format, args); + int len = VSNPrintF(str, format, args); if (len == -1) { DeleteArray(str.start()); return GetCopy(format); @@ -130,17 +107,12 @@ const char* StringsStorage::GetFunctionName(Name* name) { - return BeautifyFunctionName(GetName(name)); + return GetName(name); } const char* StringsStorage::GetFunctionName(const char* name) { - return BeautifyFunctionName(GetCopy(name)); -} - - -const char* StringsStorage::BeautifyFunctionName(const char* name) { - return (*name == 0) ? ProfileGenerator::kAnonymousFunctionName : name; + return GetCopy(name); } @@ -231,17 +203,12 @@ void ProfileNode::Print(int indent) { - OS::Print("%5u %*c %s%s %d #%d %s", - self_ticks_, - indent, ' ', - entry_->name_prefix(), - entry_->name(), - entry_->script_id(), - id(), - entry_->bailout_reason()); + base::OS::Print("%5u %*s %s%s %d #%d %s", self_ticks_, indent, "", + entry_->name_prefix(), entry_->name(), entry_->script_id(), + id(), entry_->bailout_reason()); if (entry_->resource_name()[0] != '\0') - OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number()); - OS::Print("\n"); + base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number()); + base::OS::Print("\n"); for (HashMap::Entry* p = children_.Start(); p != NULL; p = children_.Next(p)) { @@ -355,24 +322,27 @@ CpuProfile::CpuProfile(const char* title, bool record_samples) : title_(title), record_samples_(record_samples), - start_time_(Time::NowFromSystemTime()) { - timer_.Start(); + start_time_(base::TimeTicks::HighResolutionNow()) { } -void CpuProfile::AddPath(const Vector<CodeEntry*>& path) { +void CpuProfile::AddPath(base::TimeTicks timestamp, + const Vector<CodeEntry*>& path) { ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path); - if (record_samples_) samples_.Add(top_frame_node); + if (record_samples_) { + timestamps_.Add(timestamp); + samples_.Add(top_frame_node); + } } void CpuProfile::CalculateTotalTicksAndSamplingRate() { - end_time_ = start_time_ + timer_.Elapsed(); + end_time_ = base::TimeTicks::HighResolutionNow(); } void CpuProfile::Print() { - OS::Print("[Top down]:\n"); + base::OS::Print("[Top down]:\n"); top_down_.Print(); } @@ -424,7 +394,7 @@ // For shared function entries, 'size' field is used to store their IDs. if (tree_.Find(addr, &locator)) { const CodeEntryInfo& entry = locator.value(); - ASSERT(entry.entry == kSharedFunctionCodeEntry); + DCHECK(entry.entry == kSharedFunctionCodeEntry); return entry.size; } else { tree_.Insert(addr, &locator); @@ -449,9 +419,9 @@ const Address& key, const CodeMap::CodeEntryInfo& value) { // For shared function entries, 'size' field is used to store their IDs. if (value.entry == kSharedFunctionCodeEntry) { - OS::Print("%p SharedFunctionInfo %d\n", key, value.size); + base::OS::Print("%p SharedFunctionInfo %d\n", key, value.size); } else { - OS::Print("%p %5d %s\n", key, value.size, value.entry->name()); + base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name()); } } @@ -494,9 +464,10 @@ } for (int i = 0; i < current_profiles_.length(); ++i) { if (strcmp(current_profiles_[i]->title(), title) == 0) { - // Ignore attempts to start profile with the same title. + // Ignore attempts to start profile with the same title... current_profiles_semaphore_.Signal(); - return false; + // ... though return true to force it collect a sample. + return true; } } current_profiles_.Add(new CpuProfile(title, record_samples)); @@ -546,13 +517,13 @@ void CpuProfilesCollection::AddPathToCurrentProfiles( - const Vector<CodeEntry*>& path) { + base::TimeTicks timestamp, const Vector<CodeEntry*>& path) { // As starting / stopping profiles is rare relatively to this // method, we don't bother minimizing the duration of lock holding, // e.g. copying contents of the list to a local vector. current_profiles_semaphore_.Wait(); for (int i = 0; i < current_profiles_.length(); ++i) { - current_profiles_[i]->AddPath(path); + current_profiles_[i]->AddPath(timestamp, path); } current_profiles_semaphore_.Signal(); } @@ -576,8 +547,6 @@ } -const char* const ProfileGenerator::kAnonymousFunctionName = - "(anonymous function)"; const char* const ProfileGenerator::kProgramEntryName = "(program)"; const char* const ProfileGenerator::kIdleEntryName = @@ -675,7 +644,7 @@ } } - profiles_->AddPathToCurrentProfiles(entries); + profiles_->AddPathToCurrentProfiles(sample.timestamp, entries); } diff -Nru nodejs-0.11.13/deps/v8/src/profile-generator.h nodejs-0.11.15/deps/v8/src/profile-generator.h --- nodejs-0.11.13/deps/v8/src/profile-generator.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/profile-generator.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_PROFILE_GENERATOR_H_ #define V8_PROFILE_GENERATOR_H_ -#include "allocation.h" -#include "hashmap.h" -#include "../include/v8-profiler.h" +#include "include/v8-profiler.h" +#include "src/allocation.h" +#include "src/hashmap.h" namespace v8 { namespace internal { @@ -57,7 +34,6 @@ static const int kMaxNameSize = 1024; static bool StringsMatch(void* key1, void* key2); - const char* BeautifyFunctionName(const char* name); const char* AddOrDisposeString(char* str, int len); HashMap::Entry* GetEntry(const char* str, int len); @@ -199,7 +175,7 @@ CpuProfile(const char* title, bool record_samples); // Add pc -> ... -> main() call path to the profile. - void AddPath(const Vector<CodeEntry*>& path); + void AddPath(base::TimeTicks timestamp, const Vector<CodeEntry*>& path); void CalculateTotalTicksAndSamplingRate(); const char* title() const { return title_; } @@ -207,9 +183,12 @@ int samples_count() const { return samples_.length(); } ProfileNode* sample(int index) const { return samples_.at(index); } + base::TimeTicks sample_timestamp(int index) const { + return timestamps_.at(index); + } - Time start_time() const { return start_time_; } - Time end_time() const { return end_time_; } + base::TimeTicks start_time() const { return start_time_; } + base::TimeTicks end_time() const { return end_time_; } void UpdateTicksScale(); @@ -218,10 +197,10 @@ private: const char* title_; bool record_samples_; - Time start_time_; - Time end_time_; - ElapsedTimer timer_; + base::TimeTicks start_time_; + base::TimeTicks end_time_; List<ProfileNode*> samples_; + List<base::TimeTicks> timestamps_; ProfileTree top_down_; DISALLOW_COPY_AND_ASSIGN(CpuProfile); @@ -306,7 +285,8 @@ int column_number = v8::CpuProfileNode::kNoColumnNumberInfo); // Called from profile generator thread. - void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path); + void AddPathToCurrentProfiles( + base::TimeTicks timestamp, const Vector<CodeEntry*>& path); // Limits the number of profiles that can be simultaneously collected. static const int kMaxSimultaneousProfiles = 100; @@ -318,7 +298,7 @@ // Accessed by VM thread and profile generator thread. List<CpuProfile*> current_profiles_; - Semaphore current_profiles_semaphore_; + base::Semaphore current_profiles_semaphore_; DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection); }; @@ -332,7 +312,6 @@ CodeMap* code_map() { return &code_map_; } - static const char* const kAnonymousFunctionName; static const char* const kProgramEntryName; static const char* const kIdleEntryName; static const char* const kGarbageCollectorEntryName; diff -Nru nodejs-0.11.13/deps/v8/src/profile-generator-inl.h nodejs-0.11.15/deps/v8/src/profile-generator-inl.h --- nodejs-0.11.13/deps/v8/src/profile-generator-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/profile-generator-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_PROFILE_GENERATOR_INL_H_ #define V8_PROFILE_GENERATOR_INL_H_ -#include "profile-generator.h" +#include "src/profile-generator.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/promise.js nodejs-0.11.15/deps/v8/src/promise.js --- nodejs-0.11.13/deps/v8/src/promise.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/promise.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,30 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. "use strict"; @@ -33,25 +9,20 @@ // var $Object = global.Object // var $WeakMap = global.WeakMap +// For bootstrapper. -var $Promise = function Promise(resolver) { - if (resolver === promiseRaw) return; - if (!%_IsConstructCall()) throw MakeTypeError('not_a_promise', [this]); - if (typeof resolver !== 'function') - throw MakeTypeError('resolver_not_a_function', [resolver]); - var promise = PromiseInit(this); - try { - resolver(function(x) { PromiseResolve(promise, x) }, - function(r) { PromiseReject(promise, r) }); - } catch (e) { - PromiseReject(promise, e); - } -} - - -//------------------------------------------------------------------- - -// Core functionality. +var IsPromise; +var PromiseCreate; +var PromiseResolve; +var PromiseReject; +var PromiseChain; +var PromiseCatch; +var PromiseThen; +var PromiseHasRejectHandler; + +// mirror-debugger.js currently uses builtins.promiseStatus. It would be nice +// if we could move these property names into the closure below. +// TODO(jkummerow/rossberg/yangguo): Find a better solution. // Status values: 0 = pending, +1 = resolved, -1 = rejected var promiseStatus = GLOBAL_PRIVATE("Promise#status"); @@ -59,183 +30,63 @@ var promiseOnResolve = GLOBAL_PRIVATE("Promise#onResolve"); var promiseOnReject = GLOBAL_PRIVATE("Promise#onReject"); var promiseRaw = GLOBAL_PRIVATE("Promise#raw"); +var promiseDebug = GLOBAL_PRIVATE("Promise#debug"); +var lastMicrotaskId = 0; -function IsPromise(x) { - return IS_SPEC_OBJECT(x) && %HasLocalProperty(x, promiseStatus); -} - -function PromiseSet(promise, status, value, onResolve, onReject) { - SET_PRIVATE(promise, promiseStatus, status); - SET_PRIVATE(promise, promiseValue, value); - SET_PRIVATE(promise, promiseOnResolve, onResolve); - SET_PRIVATE(promise, promiseOnReject, onReject); - return promise; -} - -function PromiseInit(promise) { - return PromiseSet(promise, 0, UNDEFINED, new InternalArray, new InternalArray) -} - -function PromiseDone(promise, status, value, promiseQueue) { - if (GET_PRIVATE(promise, promiseStatus) === 0) { - PromiseEnqueue(value, GET_PRIVATE(promise, promiseQueue)); - PromiseSet(promise, status, value); - } -} - -function PromiseResolve(promise, x) { - PromiseDone(promise, +1, x, promiseOnResolve) -} - -function PromiseReject(promise, r) { - PromiseDone(promise, -1, r, promiseOnReject) -} - - -// For API. - -function PromiseNopResolver() {} - -function PromiseCreate() { - return new $Promise(PromiseNopResolver) -} - - -// Convenience. - -function PromiseDeferred() { - if (this === $Promise) { - // Optimized case, avoid extra closure. - var promise = PromiseInit(new $Promise(promiseRaw)); - return { - promise: promise, - resolve: function(x) { PromiseResolve(promise, x) }, - reject: function(r) { PromiseReject(promise, r) } - }; - } else { - var result = {}; - result.promise = new this(function(resolve, reject) { - result.resolve = resolve; - result.reject = reject; - }) - return result; - } -} - -function PromiseResolved(x) { - if (this === $Promise) { - // Optimized case, avoid extra closure. - return PromiseSet(new $Promise(promiseRaw), +1, x); - } else { - return new this(function(resolve, reject) { resolve(x) }); - } -} - -function PromiseRejected(r) { - if (this === $Promise) { - // Optimized case, avoid extra closure. - return PromiseSet(new $Promise(promiseRaw), -1, r); - } else { - return new this(function(resolve, reject) { reject(r) }); - } -} - - -// Simple chaining. - -function PromiseIdResolveHandler(x) { return x } -function PromiseIdRejectHandler(r) { throw r } - -function PromiseChain(onResolve, onReject) { // a.k.a. flatMap - onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve; - onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject; - var deferred = %_CallFunction(this.constructor, PromiseDeferred); - switch (GET_PRIVATE(this, promiseStatus)) { - case UNDEFINED: - throw MakeTypeError('not_a_promise', [this]); - case 0: // Pending - GET_PRIVATE(this, promiseOnResolve).push(onResolve, deferred); - GET_PRIVATE(this, promiseOnReject).push(onReject, deferred); - break; - case +1: // Resolved - PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onResolve, deferred]); - break; - case -1: // Rejected - PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onReject, deferred]); - break; - } - return deferred.promise; -} - -function PromiseCatch(onReject) { - return this.then(UNDEFINED, onReject); -} - -function PromiseEnqueue(value, tasks) { - GetMicrotaskQueue().push(function() { - for (var i = 0; i < tasks.length; i += 2) { - PromiseHandle(value, tasks[i], tasks[i + 1]) - } - }); - - %SetMicrotaskPending(true); -} - -function PromiseHandle(value, handler, deferred) { - try { - var result = handler(value); - if (result === deferred.promise) - throw MakeTypeError('promise_cyclic', [result]); - else if (IsPromise(result)) - %_CallFunction(result, deferred.resolve, deferred.reject, PromiseChain); - else - deferred.resolve(result); - } catch(e) { - // TODO(rossberg): perhaps log uncaught exceptions below. - try { deferred.reject(e) } catch(e) {} - } -} - - -// Multi-unwrapped chaining with thenable coercion. - -function PromiseThen(onResolve, onReject) { - onResolve = - IS_NULL_OR_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve; - onReject = - IS_NULL_OR_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject; - var that = this; - var constructor = this.constructor; - return %_CallFunction( - this, - function(x) { - x = PromiseCoerce(constructor, x); - return x === that ? onReject(MakeTypeError('promise_cyclic', [x])) : - IsPromise(x) ? x.then(onResolve, onReject) : onResolve(x); - }, - onReject, - PromiseChain - ); -} - -PromiseCoerce.table = new $WeakMap; - -function PromiseCoerce(constructor, x) { - if (!IsPromise(x) && IS_SPEC_OBJECT(x)) { - var then; +(function() { + + var $Promise = function Promise(resolver) { + if (resolver === promiseRaw) return; + if (!%_IsConstructCall()) throw MakeTypeError('not_a_promise', [this]); + if (!IS_SPEC_FUNCTION(resolver)) + throw MakeTypeError('resolver_not_a_function', [resolver]); + var promise = PromiseInit(this); try { - then = x.then; - } catch(r) { - var promise = %_CallFunction(constructor, r, PromiseRejected); - PromiseCoerce.table.set(x, promise); - return promise; - } - if (typeof then === 'function') { - if (PromiseCoerce.table.has(x)) { - return PromiseCoerce.table.get(x); - } else { + %DebugPushPromise(promise); + resolver(function(x) { PromiseResolve(promise, x) }, + function(r) { PromiseReject(promise, r) }); + } catch (e) { + PromiseReject(promise, e); + } finally { + %DebugPopPromise(); + } + } + + // Core functionality. + + function PromiseSet(promise, status, value, onResolve, onReject) { + SET_PRIVATE(promise, promiseStatus, status); + SET_PRIVATE(promise, promiseValue, value); + SET_PRIVATE(promise, promiseOnResolve, onResolve); + SET_PRIVATE(promise, promiseOnReject, onReject); + if (DEBUG_IS_ACTIVE) { + %DebugPromiseEvent({ promise: promise, status: status, value: value }); + } + return promise; + } + + function PromiseInit(promise) { + return PromiseSet( + promise, 0, UNDEFINED, new InternalArray, new InternalArray) + } + + function PromiseDone(promise, status, value, promiseQueue) { + if (GET_PRIVATE(promise, promiseStatus) === 0) { + PromiseEnqueue(value, GET_PRIVATE(promise, promiseQueue), status); + PromiseSet(promise, status, value); + } + } + + function PromiseCoerce(constructor, x) { + if (!IsPromise(x) && IS_SPEC_OBJECT(x)) { + var then; + try { + then = x.then; + } catch(r) { + return %_CallFunction(constructor, r, PromiseRejected); + } + if (IS_SPEC_FUNCTION(then)) { var deferred = %_CallFunction(constructor, PromiseDeferred); - PromiseCoerce.table.set(x, deferred.promise); try { %_CallFunction(x, deferred.resolve, deferred.reject, then); } catch(r) { @@ -244,70 +95,248 @@ return deferred.promise; } } + return x; } - return x; -} + function PromiseHandle(value, handler, deferred) { + try { + %DebugPushPromise(deferred.promise); + var result = handler(value); + if (result === deferred.promise) + throw MakeTypeError('promise_cyclic', [result]); + else if (IsPromise(result)) + %_CallFunction(result, deferred.resolve, deferred.reject, PromiseChain); + else + deferred.resolve(result); + } catch (exception) { + try { deferred.reject(exception); } catch (e) { } + } finally { + %DebugPopPromise(); + } + } + + function PromiseEnqueue(value, tasks, status) { + var id, name, instrumenting = DEBUG_IS_ACTIVE; + %EnqueueMicrotask(function() { + if (instrumenting) { + %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name }); + } + for (var i = 0; i < tasks.length; i += 2) { + PromiseHandle(value, tasks[i], tasks[i + 1]) + } + if (instrumenting) { + %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name }); + } + }); + if (instrumenting) { + id = ++lastMicrotaskId; + name = status > 0 ? "Promise.resolve" : "Promise.reject"; + %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name }); + } + } -// Combinators. + function PromiseIdResolveHandler(x) { return x } + function PromiseIdRejectHandler(r) { throw r } -function PromiseCast(x) { - // TODO(rossberg): cannot do better until we support @@create. - return IsPromise(x) ? x : new this(function(resolve) { resolve(x) }); -} - -function PromiseAll(values) { - var deferred = %_CallFunction(this, PromiseDeferred); - var resolutions = []; - if (!%_IsArray(values)) { - deferred.reject(MakeTypeError('invalid_argument')); - return deferred.promise; + function PromiseNopResolver() {} + + // ------------------------------------------------------------------- + // Define exported functions. + + // For bootstrapper. + + IsPromise = function IsPromise(x) { + return IS_SPEC_OBJECT(x) && HAS_PRIVATE(x, promiseStatus); + } + + PromiseCreate = function PromiseCreate() { + return new $Promise(PromiseNopResolver) + } + + PromiseResolve = function PromiseResolve(promise, x) { + PromiseDone(promise, +1, x, promiseOnResolve) + } + + PromiseReject = function PromiseReject(promise, r) { + // Check promise status to confirm that this reject has an effect. + // Check promiseDebug property to avoid duplicate event. + if (DEBUG_IS_ACTIVE && + GET_PRIVATE(promise, promiseStatus) == 0 && + !HAS_PRIVATE(promise, promiseDebug)) { + %DebugPromiseRejectEvent(promise, r); + } + PromiseDone(promise, -1, r, promiseOnReject) } - try { - var count = values.length; - if (count === 0) { - deferred.resolve(resolutions); + + // Convenience. + + function PromiseDeferred() { + if (this === $Promise) { + // Optimized case, avoid extra closure. + var promise = PromiseInit(new $Promise(promiseRaw)); + return { + promise: promise, + resolve: function(x) { PromiseResolve(promise, x) }, + reject: function(r) { PromiseReject(promise, r) } + }; + } else { + var result = {}; + result.promise = new this(function(resolve, reject) { + result.resolve = resolve; + result.reject = reject; + }) + return result; + } + } + + function PromiseResolved(x) { + if (this === $Promise) { + // Optimized case, avoid extra closure. + return PromiseSet(new $Promise(promiseRaw), +1, x); + } else { + return new this(function(resolve, reject) { resolve(x) }); + } + } + + function PromiseRejected(r) { + if (this === $Promise) { + // Optimized case, avoid extra closure. + return PromiseSet(new $Promise(promiseRaw), -1, r); } else { + return new this(function(resolve, reject) { reject(r) }); + } + } + + // Simple chaining. + + PromiseChain = function PromiseChain(onResolve, onReject) { // a.k.a. + // flatMap + onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve; + onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject; + var deferred = %_CallFunction(this.constructor, PromiseDeferred); + switch (GET_PRIVATE(this, promiseStatus)) { + case UNDEFINED: + throw MakeTypeError('not_a_promise', [this]); + case 0: // Pending + GET_PRIVATE(this, promiseOnResolve).push(onResolve, deferred); + GET_PRIVATE(this, promiseOnReject).push(onReject, deferred); + break; + case +1: // Resolved + PromiseEnqueue(GET_PRIVATE(this, promiseValue), + [onResolve, deferred], + +1); + break; + case -1: // Rejected + PromiseEnqueue(GET_PRIVATE(this, promiseValue), + [onReject, deferred], + -1); + break; + } + if (DEBUG_IS_ACTIVE) { + %DebugPromiseEvent({ promise: deferred.promise, parentPromise: this }); + } + return deferred.promise; + } + + PromiseCatch = function PromiseCatch(onReject) { + return this.then(UNDEFINED, onReject); + } + + // Multi-unwrapped chaining with thenable coercion. + + PromiseThen = function PromiseThen(onResolve, onReject) { + onResolve = IS_SPEC_FUNCTION(onResolve) ? onResolve + : PromiseIdResolveHandler; + onReject = IS_SPEC_FUNCTION(onReject) ? onReject + : PromiseIdRejectHandler; + var that = this; + var constructor = this.constructor; + return %_CallFunction( + this, + function(x) { + x = PromiseCoerce(constructor, x); + return x === that ? onReject(MakeTypeError('promise_cyclic', [x])) : + IsPromise(x) ? x.then(onResolve, onReject) : onResolve(x); + }, + onReject, + PromiseChain + ); + } + + // Combinators. + + function PromiseCast(x) { + // TODO(rossberg): cannot do better until we support @@create. + return IsPromise(x) ? x : new this(function(resolve) { resolve(x) }); + } + + function PromiseAll(values) { + var deferred = %_CallFunction(this, PromiseDeferred); + var resolutions = []; + if (!%_IsArray(values)) { + deferred.reject(MakeTypeError('invalid_argument')); + return deferred.promise; + } + try { + var count = values.length; + if (count === 0) { + deferred.resolve(resolutions); + } else { + for (var i = 0; i < values.length; ++i) { + this.resolve(values[i]).then( + (function() { + // Nested scope to get closure over current i (and avoid .bind). + // TODO(rossberg): Use for-let instead once available. + var i_captured = i; + return function(x) { + resolutions[i_captured] = x; + if (--count === 0) deferred.resolve(resolutions); + }; + })(), + function(r) { deferred.reject(r) } + ); + } + } + } catch (e) { + deferred.reject(e) + } + return deferred.promise; + } + + function PromiseOne(values) { + var deferred = %_CallFunction(this, PromiseDeferred); + if (!%_IsArray(values)) { + deferred.reject(MakeTypeError('invalid_argument')); + return deferred.promise; + } + try { for (var i = 0; i < values.length; ++i) { this.resolve(values[i]).then( - function(i, x) { - resolutions[i] = x; - if (--count === 0) deferred.resolve(resolutions); - }.bind(UNDEFINED, i), // TODO(rossberg): use let loop once available + function(x) { deferred.resolve(x) }, function(r) { deferred.reject(r) } ); } + } catch (e) { + deferred.reject(e) } - } catch (e) { - deferred.reject(e) - } - return deferred.promise; -} - -function PromiseOne(values) { - var deferred = %_CallFunction(this, PromiseDeferred); - if (!%_IsArray(values)) { - deferred.reject(MakeTypeError('invalid_argument')); return deferred.promise; } - try { - for (var i = 0; i < values.length; ++i) { - this.resolve(values[i]).then( - function(x) { deferred.resolve(x) }, - function(r) { deferred.reject(r) } - ); - } - } catch (e) { - deferred.reject(e) - } - return deferred.promise; -} -//------------------------------------------------------------------- -function SetUpPromise() { + // Utility for debugger + + PromiseHasRejectHandler = function PromiseHasRejectHandler() { + // Mark promise as already having triggered a reject event. + SET_PRIVATE(this, promiseDebug, true); + var queue = GET_PRIVATE(this, promiseOnReject); + return !IS_UNDEFINED(queue) && queue.length > 0; + }; + + // ------------------------------------------------------------------- + // Install exported functions. + %CheckIsBootstrapping(); - %SetProperty(global, 'Promise', $Promise, DONT_ENUM); + %AddNamedProperty(global, 'Promise', $Promise, DONT_ENUM); InstallFunctions($Promise, DONT_ENUM, [ "defer", PromiseDeferred, "accept", PromiseResolved, @@ -321,6 +350,5 @@ "then", PromiseThen, "catch", PromiseCatch ]); -} -SetUpPromise(); +})(); diff -Nru nodejs-0.11.13/deps/v8/src/property.cc nodejs-0.11.15/deps/v8/src/property.cc --- nodejs-0.11.13/deps/v8/src/property.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/property.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,15 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/property.h" + +#include "src/handles-inl.h" +#include "src/ostreams.h" namespace v8 { namespace internal { - void LookupResult::Iterate(ObjectVisitor* visitor) { LookupResult* current = this; // Could be NULL. while (current != NULL) { @@ -41,82 +20,46 @@ } -#ifdef OBJECT_PRINT -void LookupResult::Print(FILE* out) { - if (!IsFound()) { - PrintF(out, "Not Found\n"); - return; - } +OStream& operator<<(OStream& os, const LookupResult& r) { + if (!r.IsFound()) return os << "Not Found\n"; - PrintF(out, "LookupResult:\n"); - PrintF(out, " -cacheable = %s\n", IsCacheable() ? "true" : "false"); - PrintF(out, " -attributes = %x\n", GetAttributes()); - switch (type()) { + os << "LookupResult:\n"; + os << " -cacheable = " << (r.IsCacheable() ? "true" : "false") << "\n"; + os << " -attributes = " << hex << r.GetAttributes() << dec << "\n"; + if (r.IsTransition()) { + os << " -transition target:\n" << Brief(r.GetTransitionTarget()) << "\n"; + } + switch (r.type()) { case NORMAL: - PrintF(out, " -type = normal\n"); - PrintF(out, " -entry = %d", GetDictionaryEntry()); - break; + return os << " -type = normal\n" + << " -entry = " << r.GetDictionaryEntry() << "\n"; case CONSTANT: - PrintF(out, " -type = constant\n"); - PrintF(out, " -value:\n"); - GetConstant()->Print(out); - PrintF(out, "\n"); - break; + return os << " -type = constant\n" + << " -value:\n" << Brief(r.GetConstant()) << "\n"; case FIELD: - PrintF(out, " -type = field\n"); - PrintF(out, " -index = %d", GetFieldIndex().field_index()); - PrintF(out, "\n"); - break; + os << " -type = field\n" + << " -index = " << r.GetFieldIndex().property_index() << "\n" + << " -field type:"; + r.GetFieldType()->PrintTo(os); + return os << "\n"; case CALLBACKS: - PrintF(out, " -type = call backs\n"); - PrintF(out, " -callback object:\n"); - GetCallbackObject()->Print(out); - break; + return os << " -type = call backs\n" + << " -callback object:\n" << Brief(r.GetCallbackObject()); case HANDLER: - PrintF(out, " -type = lookup proxy\n"); - break; + return os << " -type = lookup proxy\n"; case INTERCEPTOR: - PrintF(out, " -type = lookup interceptor\n"); - break; - case TRANSITION: - switch (GetTransitionDetails().type()) { - case FIELD: - PrintF(out, " -type = map transition\n"); - PrintF(out, " -map:\n"); - GetTransitionTarget()->Print(out); - PrintF(out, "\n"); - return; - case CONSTANT: - PrintF(out, " -type = constant property transition\n"); - PrintF(out, " -map:\n"); - GetTransitionTarget()->Print(out); - PrintF(out, "\n"); - return; - case CALLBACKS: - PrintF(out, " -type = callbacks transition\n"); - PrintF(out, " -callback object:\n"); - GetCallbackObject()->Print(out); - return; - default: - UNREACHABLE(); - return; - } + return os << " -type = lookup interceptor\n"; case NONEXISTENT: UNREACHABLE(); break; } + return os; } -void Descriptor::Print(FILE* out) { - PrintF(out, "Descriptor "); - GetKey()->ShortPrint(out); - PrintF(out, " @ "); - GetValue()->ShortPrint(out); +OStream& operator<<(OStream& os, const Descriptor& d) { + return os << "Descriptor " << Brief(*d.GetKey()) << " @ " + << Brief(*d.GetValue()); } - -#endif - - } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/property-details.h nodejs-0.11.15/deps/v8/src/property-details.h --- nodejs-0.11.13/deps/v8/src/property-details.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/property-details.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_PROPERTY_DETAILS_H_ #define V8_PROPERTY_DETAILS_H_ -#include "../include/v8.h" -#include "allocation.h" -#include "utils.h" +#include "include/v8.h" +#include "src/allocation.h" +#include "src/utils.h" // Ecma-262 3rd 8.6.1 enum PropertyAttributes { @@ -77,9 +54,8 @@ // Only in lookup results, not in descriptors. HANDLER = 4, INTERCEPTOR = 5, - TRANSITION = 6, // Only used as a marker in LookupResult. - NONEXISTENT = 7 + NONEXISTENT = 6 }; @@ -136,8 +112,8 @@ if (kind_ == kExternal && other.kind_ == kExternal) return false; if (kind_ == kNone && other.kind_ == kExternal) return false; - ASSERT(kind_ != kExternal); - ASSERT(other.kind_ != kExternal); + DCHECK(kind_ != kExternal); + DCHECK(other.kind_ != kExternal); if (IsHeapObject()) return other.IsNone(); if (kind_ == kUInteger8 && other.kind_ == kInteger8) return false; if (kind_ == kUInteger16 && other.kind_ == kInteger16) return false; @@ -157,7 +133,7 @@ } int size() const { - ASSERT(!IsNone()); + DCHECK(!IsNone()); if (IsInteger8() || IsUInteger8()) { return sizeof(uint8_t); } @@ -221,8 +197,8 @@ | AttributesField::encode(attributes) | DictionaryStorageField::encode(index); - ASSERT(type == this->type()); - ASSERT(attributes == this->attributes()); + DCHECK(type == this->type()); + DCHECK(attributes == this->attributes()); } PropertyDetails(PropertyAttributes attributes, @@ -271,7 +247,7 @@ } Representation representation() const { - ASSERT(type() != NORMAL); + DCHECK(type() != NORMAL); return DecodeRepresentation(RepresentationField::decode(value_)); } diff -Nru nodejs-0.11.13/deps/v8/src/property-details-inl.h nodejs-0.11.15/deps/v8/src/property-details-inl.h --- nodejs-0.11.13/deps/v8/src/property-details-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/property-details-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,14 @@ // Copyright 2014 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_PROPERTY_DETAILS_INL_H_ #define V8_PROPERTY_DETAILS_INL_H_ -#include "objects.h" -#include "property-details.h" -#include "v8conversions.h" +#include "src/conversions.h" +#include "src/objects.h" +#include "src/property-details.h" +#include "src/types.h" namespace v8 { namespace internal { @@ -46,6 +24,16 @@ return false; } + +Representation Representation::FromType(Type* type) { + DisallowHeapAllocation no_allocation; + if (type->Is(Type::None())) return Representation::None(); + if (type->Is(Type::SignedSmall())) return Representation::Smi(); + if (type->Is(Type::Signed32())) return Representation::Integer32(); + if (type->Is(Type::Number())) return Representation::Double(); + return Representation::Tagged(); +} + } } // namespace v8::internal #endif // V8_PROPERTY_DETAILS_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/property.h nodejs-0.11.15/deps/v8/src/property.h --- nodejs-0.11.13/deps/v8/src/property.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/property.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,89 +1,62 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_PROPERTY_H_ #define V8_PROPERTY_H_ -#include "allocation.h" -#include "transitions.h" +#include "src/factory.h" +#include "src/field-index.h" +#include "src/field-index-inl.h" +#include "src/isolate.h" +#include "src/types.h" namespace v8 { namespace internal { +class OStream; // Abstraction for elements in instance-descriptor arrays. // // Each descriptor has a key, property attributes, property type, // property index (in the actual instance-descriptor array) and // optionally a piece of data. -// - class Descriptor BASE_EMBEDDED { public: - MUST_USE_RESULT MaybeObject* KeyToUniqueName() { + void KeyToUniqueName() { if (!key_->IsUniqueName()) { - MaybeObject* maybe_result = - key_->GetIsolate()->heap()->InternalizeString(String::cast(key_)); - if (!maybe_result->To(&key_)) return maybe_result; + key_ = key_->GetIsolate()->factory()->InternalizeString( + Handle<String>::cast(key_)); } - return key_; } - Name* GetKey() { return key_; } - Object* GetValue() { return value_; } - PropertyDetails GetDetails() { return details_; } - -#ifdef OBJECT_PRINT - void Print(FILE* out); -#endif + Handle<Name> GetKey() const { return key_; } + Handle<Object> GetValue() const { return value_; } + PropertyDetails GetDetails() const { return details_; } void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); } private: - Name* key_; - Object* value_; + Handle<Name> key_; + Handle<Object> value_; PropertyDetails details_; protected: Descriptor() : details_(Smi::FromInt(0)) {} - void Init(Name* key, Object* value, PropertyDetails details) { + void Init(Handle<Name> key, Handle<Object> value, PropertyDetails details) { key_ = key; value_ = value; details_ = details; } - Descriptor(Name* key, Object* value, PropertyDetails details) + Descriptor(Handle<Name> key, Handle<Object> value, PropertyDetails details) : key_(key), value_(value), details_(details) { } - Descriptor(Name* key, - Object* value, + Descriptor(Handle<Name> key, + Handle<Object> value, PropertyAttributes attributes, PropertyType type, Representation representation, @@ -93,91 +66,52 @@ details_(attributes, type, representation, field_index) { } friend class DescriptorArray; + friend class Map; }; -class FieldDescriptor: public Descriptor { +OStream& operator<<(OStream& os, const Descriptor& d); + + +class FieldDescriptor V8_FINAL : public Descriptor { public: - FieldDescriptor(Name* key, + FieldDescriptor(Handle<Name> key, int field_index, PropertyAttributes attributes, Representation representation) - : Descriptor(key, Smi::FromInt(0), attributes, + : Descriptor(key, HeapType::Any(key->GetIsolate()), attributes, FIELD, representation, field_index) {} + FieldDescriptor(Handle<Name> key, + int field_index, + Handle<HeapType> field_type, + PropertyAttributes attributes, + Representation representation) + : Descriptor(key, field_type, attributes, FIELD, + representation, field_index) { } }; -class ConstantDescriptor: public Descriptor { +class ConstantDescriptor V8_FINAL : public Descriptor { public: - ConstantDescriptor(Name* key, - Object* value, + ConstantDescriptor(Handle<Name> key, + Handle<Object> value, PropertyAttributes attributes) : Descriptor(key, value, attributes, CONSTANT, value->OptimalRepresentation()) {} }; -class CallbacksDescriptor: public Descriptor { +class CallbacksDescriptor V8_FINAL : public Descriptor { public: - CallbacksDescriptor(Name* key, - Object* foreign, + CallbacksDescriptor(Handle<Name> key, + Handle<Object> foreign, PropertyAttributes attributes) : Descriptor(key, foreign, attributes, CALLBACKS, Representation::Tagged()) {} }; -// Holds a property index value distinguishing if it is a field index or an -// index inside the object header. -class PropertyIndex { - public: - static PropertyIndex NewFieldIndex(int index) { - return PropertyIndex(index, false); - } - static PropertyIndex NewHeaderIndex(int index) { - return PropertyIndex(index, true); - } - - bool is_field_index() { return (index_ & kHeaderIndexBit) == 0; } - bool is_header_index() { return (index_ & kHeaderIndexBit) != 0; } - - int field_index() { - ASSERT(is_field_index()); - return value(); - } - int header_index() { - ASSERT(is_header_index()); - return value(); - } - - bool is_inobject(Handle<JSObject> holder) { - if (is_header_index()) return true; - return field_index() < holder->map()->inobject_properties(); - } - - int translate(Handle<JSObject> holder) { - if (is_header_index()) return header_index(); - int index = field_index() - holder->map()->inobject_properties(); - if (index >= 0) return index; - return index + holder->map()->instance_size() / kPointerSize; - } - - private: - static const int kHeaderIndexBit = 1 << 31; - static const int kIndexMask = ~kHeaderIndexBit; - - int value() { return index_ & kIndexMask; } - - PropertyIndex(int index, bool is_header_based) - : index_(index | (is_header_based ? kHeaderIndexBit : 0)) { - ASSERT(index <= kIndexMask); - } - - int index_; -}; - - -class LookupResult BASE_EMBEDDED { +class LookupResult V8_FINAL BASE_EMBEDDED { public: explicit LookupResult(Isolate* isolate) : isolate_(isolate), @@ -191,7 +125,7 @@ } ~LookupResult() { - ASSERT(isolate()->top_lookup_result() == this); + DCHECK(isolate()->top_lookup_result() == this); isolate()->set_top_lookup_result(next_); } @@ -205,18 +139,34 @@ number_ = number; } - bool CanHoldValue(Handle<Object> value) { - if (IsNormal()) return true; - ASSERT(!IsTransition()); - return value->FitsRepresentation(details_.representation()); + bool CanHoldValue(Handle<Object> value) const { + switch (type()) { + case NORMAL: + return true; + case FIELD: + return value->FitsRepresentation(representation()) && + GetFieldType()->NowContains(value); + case CONSTANT: + DCHECK(GetConstant() != *value || + value->FitsRepresentation(representation())); + return GetConstant() == *value; + case CALLBACKS: + case HANDLER: + case INTERCEPTOR: + return true; + case NONEXISTENT: + UNREACHABLE(); + } + UNREACHABLE(); + return true; } void TransitionResult(JSObject* holder, Map* target) { lookup_type_ = TRANSITION_TYPE; - details_ = PropertyDetails(NONE, TRANSITION, Representation::None()); + number_ = target->LastAdded(); + details_ = target->instance_descriptors()->GetDetails(number_); holder_ = holder; transition_ = target; - number_ = 0xAAAA; } void DictionaryResult(JSObject* holder, int entry) { @@ -250,79 +200,78 @@ } JSObject* holder() const { - ASSERT(IsFound()); + DCHECK(IsFound()); return JSObject::cast(holder_); } JSProxy* proxy() const { - ASSERT(IsHandler()); + DCHECK(IsHandler()); return JSProxy::cast(holder_); } PropertyType type() const { - ASSERT(IsFound()); + DCHECK(IsFound()); return details_.type(); } Representation representation() const { - ASSERT(IsFound()); - ASSERT(!IsTransition()); - ASSERT(details_.type() != NONEXISTENT); + DCHECK(IsFound()); + DCHECK(details_.type() != NONEXISTENT); return details_.representation(); } PropertyAttributes GetAttributes() const { - ASSERT(!IsTransition()); - ASSERT(IsFound()); - ASSERT(details_.type() != NONEXISTENT); + DCHECK(IsFound()); + DCHECK(details_.type() != NONEXISTENT); return details_.attributes(); } PropertyDetails GetPropertyDetails() const { - ASSERT(!IsTransition()); return details_; } bool IsFastPropertyType() const { - ASSERT(IsFound()); + DCHECK(IsFound()); return IsTransition() || type() != NORMAL; } // Property callbacks does not include transitions to callbacks. bool IsPropertyCallbacks() const { - ASSERT(!(details_.type() == CALLBACKS && !IsFound())); - return details_.type() == CALLBACKS; + DCHECK(!(details_.type() == CALLBACKS && !IsFound())); + return !IsTransition() && details_.type() == CALLBACKS; } bool IsReadOnly() const { - ASSERT(IsFound()); - ASSERT(!IsTransition()); - ASSERT(details_.type() != NONEXISTENT); + DCHECK(IsFound()); + DCHECK(details_.type() != NONEXISTENT); return details_.IsReadOnly(); } bool IsField() const { - ASSERT(!(details_.type() == FIELD && !IsFound())); - return details_.type() == FIELD; + DCHECK(!(details_.type() == FIELD && !IsFound())); + return IsDescriptorOrDictionary() && type() == FIELD; } bool IsNormal() const { - ASSERT(!(details_.type() == NORMAL && !IsFound())); - return details_.type() == NORMAL; + DCHECK(!(details_.type() == NORMAL && !IsFound())); + return IsDescriptorOrDictionary() && type() == NORMAL; } bool IsConstant() const { - ASSERT(!(details_.type() == CONSTANT && !IsFound())); - return details_.type() == CONSTANT; + DCHECK(!(details_.type() == CONSTANT && !IsFound())); + return IsDescriptorOrDictionary() && type() == CONSTANT; } bool IsConstantFunction() const { - return IsConstant() && GetValue()->IsJSFunction(); + return IsConstant() && GetConstant()->IsJSFunction(); } bool IsDontDelete() const { return details_.IsDontDelete(); } bool IsDontEnum() const { return details_.IsDontEnum(); } bool IsFound() const { return lookup_type_ != NOT_FOUND; } + bool IsDescriptorOrDictionary() const { + return lookup_type_ == DESCRIPTOR_TYPE || lookup_type_ == DICTIONARY_TYPE; + } bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; } bool IsHandler() const { return lookup_type_ == HANDLER_TYPE; } bool IsInterceptor() const { return lookup_type_ == INTERCEPTOR_TYPE; } @@ -333,20 +282,31 @@ } bool IsDataProperty() const { - switch (type()) { - case FIELD: - case NORMAL: - case CONSTANT: - return true; - case CALLBACKS: { - Object* callback = GetCallbackObject(); - return callback->IsAccessorInfo() || callback->IsForeign(); - } - case HANDLER: - case INTERCEPTOR: - case TRANSITION: - case NONEXISTENT: + switch (lookup_type_) { + case NOT_FOUND: + case TRANSITION_TYPE: + case HANDLER_TYPE: + case INTERCEPTOR_TYPE: return false; + + case DESCRIPTOR_TYPE: + case DICTIONARY_TYPE: + switch (type()) { + case FIELD: + case NORMAL: + case CONSTANT: + return true; + case CALLBACKS: { + Object* callback = GetCallbackObject(); + DCHECK(!callback->IsForeign()); + return callback->IsAccessorInfo(); + } + case HANDLER: + case INTERCEPTOR: + case NONEXISTENT: + UNREACHABLE(); + return false; + } } UNREACHABLE(); return false; @@ -356,55 +316,63 @@ void DisallowCaching() { cacheable_ = false; } Object* GetLazyValue() const { - switch (type()) { - case FIELD: - return holder()->RawFastPropertyAt(GetFieldIndex().field_index()); - case NORMAL: { - Object* value; - value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry()); - if (holder()->IsGlobalObject()) { - value = PropertyCell::cast(value)->value(); - } - return value; - } - case CONSTANT: - return GetConstant(); - case CALLBACKS: - case HANDLER: - case INTERCEPTOR: - case TRANSITION: - case NONEXISTENT: + switch (lookup_type_) { + case NOT_FOUND: + case TRANSITION_TYPE: + case HANDLER_TYPE: + case INTERCEPTOR_TYPE: return isolate()->heap()->the_hole_value(); + + case DESCRIPTOR_TYPE: + case DICTIONARY_TYPE: + switch (type()) { + case FIELD: + return holder()->RawFastPropertyAt(GetFieldIndex()); + case NORMAL: { + Object* value = holder()->property_dictionary()->ValueAt( + GetDictionaryEntry()); + if (holder()->IsGlobalObject()) { + value = PropertyCell::cast(value)->value(); + } + return value; + } + case CONSTANT: + return GetConstant(); + case CALLBACKS: + return isolate()->heap()->the_hole_value(); + case HANDLER: + case INTERCEPTOR: + case NONEXISTENT: + UNREACHABLE(); + return NULL; + } } UNREACHABLE(); return NULL; } Map* GetTransitionTarget() const { + DCHECK(IsTransition()); return transition_; } - PropertyDetails GetTransitionDetails() const { - ASSERT(IsTransition()); - return transition_->GetLastDescriptorDetails(); - } - bool IsTransitionToField() const { - return IsTransition() && GetTransitionDetails().type() == FIELD; + return IsTransition() && details_.type() == FIELD; } bool IsTransitionToConstant() const { - return IsTransition() && GetTransitionDetails().type() == CONSTANT; + return IsTransition() && details_.type() == CONSTANT; } int GetDescriptorIndex() const { - ASSERT(lookup_type_ == DESCRIPTOR_TYPE); + DCHECK(lookup_type_ == DESCRIPTOR_TYPE); return number_; } - PropertyIndex GetFieldIndex() const { - ASSERT(lookup_type_ == DESCRIPTOR_TYPE); - return PropertyIndex::NewFieldIndex(GetFieldIndexFromMap(holder()->map())); + FieldIndex GetFieldIndex() const { + DCHECK(lookup_type_ == DESCRIPTOR_TYPE || + lookup_type_ == TRANSITION_TYPE); + return FieldIndex::ForLookupResult(this); } int GetLocalFieldIndexFromMap(Map* map) const { @@ -412,17 +380,17 @@ } int GetDictionaryEntry() const { - ASSERT(lookup_type_ == DICTIONARY_TYPE); + DCHECK(lookup_type_ == DICTIONARY_TYPE); return number_; } JSFunction* GetConstantFunction() const { - ASSERT(type() == CONSTANT); + DCHECK(type() == CONSTANT); return JSFunction::cast(GetValue()); } Object* GetConstantFromMap(Map* map) const { - ASSERT(type() == CONSTANT); + DCHECK(type() == CONSTANT); return GetValueFromMap(map); } @@ -431,40 +399,74 @@ } Object* GetConstant() const { - ASSERT(type() == CONSTANT); + DCHECK(type() == CONSTANT); return GetValue(); } Object* GetCallbackObject() const { - ASSERT(type() == CALLBACKS && !IsTransition()); + DCHECK(!IsTransition()); + DCHECK(type() == CALLBACKS); return GetValue(); } -#ifdef OBJECT_PRINT - void Print(FILE* out); -#endif - Object* GetValue() const { if (lookup_type_ == DESCRIPTOR_TYPE) { return GetValueFromMap(holder()->map()); + } else if (lookup_type_ == TRANSITION_TYPE) { + return GetValueFromMap(transition_); } // In the dictionary case, the data is held in the value field. - ASSERT(lookup_type_ == DICTIONARY_TYPE); + DCHECK(lookup_type_ == DICTIONARY_TYPE); return holder()->GetNormalizedProperty(this); } Object* GetValueFromMap(Map* map) const { - ASSERT(lookup_type_ == DESCRIPTOR_TYPE); - ASSERT(number_ < map->NumberOfOwnDescriptors()); + DCHECK(lookup_type_ == DESCRIPTOR_TYPE || + lookup_type_ == TRANSITION_TYPE); + DCHECK(number_ < map->NumberOfOwnDescriptors()); return map->instance_descriptors()->GetValue(number_); } int GetFieldIndexFromMap(Map* map) const { - ASSERT(lookup_type_ == DESCRIPTOR_TYPE); - ASSERT(number_ < map->NumberOfOwnDescriptors()); + DCHECK(lookup_type_ == DESCRIPTOR_TYPE || + lookup_type_ == TRANSITION_TYPE); + DCHECK(number_ < map->NumberOfOwnDescriptors()); return map->instance_descriptors()->GetFieldIndex(number_); } + HeapType* GetFieldType() const { + DCHECK(type() == FIELD); + if (lookup_type_ == DESCRIPTOR_TYPE) { + return GetFieldTypeFromMap(holder()->map()); + } + DCHECK(lookup_type_ == TRANSITION_TYPE); + return GetFieldTypeFromMap(transition_); + } + + HeapType* GetFieldTypeFromMap(Map* map) const { + DCHECK(lookup_type_ == DESCRIPTOR_TYPE || + lookup_type_ == TRANSITION_TYPE); + DCHECK(number_ < map->NumberOfOwnDescriptors()); + return map->instance_descriptors()->GetFieldType(number_); + } + + Map* GetFieldOwner() const { + return GetFieldOwnerFromMap(holder()->map()); + } + + Map* GetFieldOwnerFromMap(Map* map) const { + DCHECK(lookup_type_ == DESCRIPTOR_TYPE || + lookup_type_ == TRANSITION_TYPE); + DCHECK(number_ < map->NumberOfOwnDescriptors()); + return map->FindFieldOwner(number_); + } + + bool ReceiverIsHolder(Handle<Object> receiver) { + if (*receiver == holder()) return true; + if (lookup_type_ == TRANSITION_TYPE) return true; + return false; + } + void Iterate(ObjectVisitor* visitor); private: @@ -489,6 +491,7 @@ }; +OStream& operator<<(OStream& os, const LookupResult& r); } } // namespace v8::internal #endif // V8_PROPERTY_H_ diff -Nru nodejs-0.11.13/deps/v8/src/prototype.h nodejs-0.11.15/deps/v8/src/prototype.h --- nodejs-0.11.13/deps/v8/src/prototype.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/prototype.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,135 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_PROTOTYPE_H_ +#define V8_PROTOTYPE_H_ + +#include "src/isolate.h" +#include "src/objects.h" + +namespace v8 { +namespace internal { + +/** + * A class to uniformly access the prototype of any Object and walk its + * prototype chain. + * + * The PrototypeIterator can either start at the prototype (default), or + * include the receiver itself. If a PrototypeIterator is constructed for a + * Map, it will always start at the prototype. + * + * The PrototypeIterator can either run to the null_value(), the first + * non-hidden prototype, or a given object. + */ +class PrototypeIterator { + public: + enum WhereToStart { START_AT_RECEIVER, START_AT_PROTOTYPE }; + + enum WhereToEnd { END_AT_NULL, END_AT_NON_HIDDEN }; + + PrototypeIterator(Isolate* isolate, Handle<Object> receiver, + WhereToStart where_to_start = START_AT_PROTOTYPE) + : did_jump_to_prototype_chain_(false), + object_(NULL), + handle_(receiver), + isolate_(isolate) { + CHECK(!handle_.is_null()); + if (where_to_start == START_AT_PROTOTYPE) { + Advance(); + } + } + PrototypeIterator(Isolate* isolate, Object* receiver, + WhereToStart where_to_start = START_AT_PROTOTYPE) + : did_jump_to_prototype_chain_(false), + object_(receiver), + isolate_(isolate) { + if (where_to_start == START_AT_PROTOTYPE) { + Advance(); + } + } + explicit PrototypeIterator(Map* receiver_map) + : did_jump_to_prototype_chain_(true), + object_(receiver_map->prototype()), + isolate_(receiver_map->GetIsolate()) {} + explicit PrototypeIterator(Handle<Map> receiver_map) + : did_jump_to_prototype_chain_(true), + object_(NULL), + handle_(handle(receiver_map->prototype(), receiver_map->GetIsolate())), + isolate_(receiver_map->GetIsolate()) {} + ~PrototypeIterator() {} + + Object* GetCurrent() const { + DCHECK(handle_.is_null()); + return object_; + } + static Handle<Object> GetCurrent(const PrototypeIterator& iterator) { + DCHECK(!iterator.handle_.is_null()); + return iterator.handle_; + } + void Advance() { + if (handle_.is_null() && object_->IsJSProxy()) { + did_jump_to_prototype_chain_ = true; + object_ = isolate_->heap()->null_value(); + return; + } else if (!handle_.is_null() && handle_->IsJSProxy()) { + did_jump_to_prototype_chain_ = true; + handle_ = handle(isolate_->heap()->null_value(), isolate_); + return; + } + AdvanceIgnoringProxies(); + } + void AdvanceIgnoringProxies() { + if (!did_jump_to_prototype_chain_) { + did_jump_to_prototype_chain_ = true; + if (handle_.is_null()) { + object_ = object_->GetRootMap(isolate_)->prototype(); + } else { + handle_ = handle(handle_->GetRootMap(isolate_)->prototype(), isolate_); + } + } else { + if (handle_.is_null()) { + object_ = HeapObject::cast(object_)->map()->prototype(); + } else { + handle_ = + handle(HeapObject::cast(*handle_)->map()->prototype(), isolate_); + } + } + } + bool IsAtEnd(WhereToEnd where_to_end = END_AT_NULL) const { + if (handle_.is_null()) { + return object_->IsNull() || + (did_jump_to_prototype_chain_ && + where_to_end == END_AT_NON_HIDDEN && + !HeapObject::cast(object_)->map()->is_hidden_prototype()); + } else { + return handle_->IsNull() || + (did_jump_to_prototype_chain_ && + where_to_end == END_AT_NON_HIDDEN && + !Handle<HeapObject>::cast(handle_)->map()->is_hidden_prototype()); + } + } + bool IsAtEnd(Object* final_object) { + DCHECK(handle_.is_null()); + return object_->IsNull() || object_ == final_object; + } + bool IsAtEnd(Handle<Object> final_object) { + DCHECK(!handle_.is_null()); + return handle_->IsNull() || *handle_ == *final_object; + } + + private: + bool did_jump_to_prototype_chain_; + Object* object_; + Handle<Object> handle_; + Isolate* isolate_; + + DISALLOW_COPY_AND_ASSIGN(PrototypeIterator); +}; + + +} // namespace internal + +} // namespace v8 + +#endif // V8_PROTOTYPE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/proxy.js nodejs-0.11.15/deps/v8/src/proxy.js --- nodejs-0.11.13/deps/v8/src/proxy.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/proxy.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. "use strict"; @@ -72,8 +49,8 @@ function SetUpProxy() { %CheckIsBootstrapping() - var global_receiver = %GlobalReceiver(global); - global_receiver.Proxy = $Proxy; + var global_proxy = %GlobalProxy(global); + global_proxy.Proxy = $Proxy; // Set up non-enumerable properties of the Proxy object. InstallFunctions($Proxy, DONT_ENUM, [ diff -Nru nodejs-0.11.13/deps/v8/src/qnx-math.h nodejs-0.11.15/deps/v8/src/qnx-math.h --- nodejs-0.11.13/deps/v8/src/qnx-math.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/qnx-math.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_QNX_MATH_H_ -#define V8_QNX_MATH_H_ - -#include <cmath> - -#undef fpclassify -#undef isfinite -#undef isinf -#undef isnan -#undef isnormal -#undef signbit - -using std::lrint; - -#endif // V8_QNX_MATH_H_ diff -Nru nodejs-0.11.13/deps/v8/src/regexp.js nodejs-0.11.15/deps/v8/src/regexp.js --- nodejs-0.11.13/deps/v8/src/regexp.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/regexp.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // This file relies on the fact that the following declaration has been made // in runtime.js: @@ -103,7 +80,7 @@ // were called again. In SpiderMonkey, this method returns the regexp object. // In JSC, it returns undefined. For compatibility with JSC, we match their // behavior. -function RegExpCompile(pattern, flags) { +function RegExpCompileJS(pattern, flags) { // Both JSC and SpiderMonkey treat a missing pattern argument as the // empty subject string, and an actual undefined value passed as the // pattern as the string 'undefined'. Note that JSC is inconsistent @@ -131,23 +108,30 @@ } -function BuildResultFromMatchInfo(lastMatchInfo, s) { - var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1; - var start = lastMatchInfo[CAPTURE0]; - var end = lastMatchInfo[CAPTURE1]; - var result = %_RegExpConstructResult(numResults, start, s); - result[0] = %_SubString(s, start, end); +// This is kind of performance sensitive, so we want to avoid unnecessary +// type checks on inputs. But we also don't want to inline it several times +// manually, so we use a macro :-) +macro RETURN_NEW_RESULT_FROM_MATCH_INFO(MATCHINFO, STRING) + var numResults = NUMBER_OF_CAPTURES(MATCHINFO) >> 1; + var start = MATCHINFO[CAPTURE0]; + var end = MATCHINFO[CAPTURE1]; + // Calculate the substring of the first match before creating the result array + // to avoid an unnecessary write barrier storing the first result. + var first = %_SubString(STRING, start, end); + var result = %_RegExpConstructResult(numResults, start, STRING); + result[0] = first; + if (numResults == 1) return result; var j = REGEXP_FIRST_CAPTURE + 2; for (var i = 1; i < numResults; i++) { - start = lastMatchInfo[j++]; + start = MATCHINFO[j++]; if (start != -1) { - end = lastMatchInfo[j]; - result[i] = %_SubString(s, start, end); + end = MATCHINFO[j]; + result[i] = %_SubString(STRING, start, end); } j++; } return result; -} +endmacro function RegExpExecNoTests(regexp, string, start) { @@ -155,7 +139,7 @@ var matchInfo = %_RegExpExec(regexp, string, start, lastMatchInfo); if (matchInfo !== null) { lastMatchInfoOverride = null; - return BuildResultFromMatchInfo(matchInfo, string); + RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, string); } regexp.lastIndex = 0; return null; @@ -185,7 +169,6 @@ i = 0; } - %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]); // matchIndices is either null or the lastMatchInfo array. var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo); @@ -199,7 +182,7 @@ if (global) { this.lastIndex = lastMatchInfo[CAPTURE1]; } - return BuildResultFromMatchInfo(matchIndices, string); + RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string); } @@ -229,7 +212,6 @@ this.lastIndex = 0; return false; } - %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]); // matchIndices is either null or the lastMatchInfo array. var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo); if (IS_NULL(matchIndices)) { @@ -250,7 +232,6 @@ %_StringCharCodeAt(regexp.source, 2) != 63) { // '?' regexp = TrimRegExp(regexp); } - %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]); // matchIndices is either null or the lastMatchInfo array. var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo); if (IS_NULL(matchIndices)) { @@ -400,14 +381,14 @@ function SetUpRegExp() { %CheckIsBootstrapping(); %FunctionSetInstanceClassName($RegExp, 'RegExp'); - %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM); + %AddNamedProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM); %SetCode($RegExp, RegExpConstructor); InstallFunctions($RegExp.prototype, DONT_ENUM, $Array( "exec", RegExpExec, "test", RegExpTest, "toString", RegExpToString, - "compile", RegExpCompile + "compile", RegExpCompileJS )); // The length of compile is 1 in SpiderMonkey. @@ -425,12 +406,12 @@ }; %OptimizeObjectForAddingMultipleProperties($RegExp, 22); - %DefineOrRedefineAccessorProperty($RegExp, 'input', RegExpGetInput, - RegExpSetInput, DONT_DELETE); - %DefineOrRedefineAccessorProperty($RegExp, '$_', RegExpGetInput, - RegExpSetInput, DONT_ENUM | DONT_DELETE); - %DefineOrRedefineAccessorProperty($RegExp, '$input', RegExpGetInput, - RegExpSetInput, DONT_ENUM | DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, 'input', RegExpGetInput, + RegExpSetInput, DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, '$_', RegExpGetInput, + RegExpSetInput, DONT_ENUM | DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, '$input', RegExpGetInput, + RegExpSetInput, DONT_ENUM | DONT_DELETE); // The properties multiline and $* are aliases for each other. When this // value is set in SpiderMonkey, the value it is set to is coerced to a @@ -444,40 +425,40 @@ var RegExpGetMultiline = function() { return multiline; }; var RegExpSetMultiline = function(flag) { multiline = flag ? true : false; }; - %DefineOrRedefineAccessorProperty($RegExp, 'multiline', RegExpGetMultiline, - RegExpSetMultiline, DONT_DELETE); - %DefineOrRedefineAccessorProperty($RegExp, '$*', RegExpGetMultiline, - RegExpSetMultiline, - DONT_ENUM | DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, 'multiline', RegExpGetMultiline, + RegExpSetMultiline, DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, '$*', RegExpGetMultiline, + RegExpSetMultiline, + DONT_ENUM | DONT_DELETE); var NoOpSetter = function(ignored) {}; // Static properties set by a successful match. - %DefineOrRedefineAccessorProperty($RegExp, 'lastMatch', RegExpGetLastMatch, - NoOpSetter, DONT_DELETE); - %DefineOrRedefineAccessorProperty($RegExp, '$&', RegExpGetLastMatch, - NoOpSetter, DONT_ENUM | DONT_DELETE); - %DefineOrRedefineAccessorProperty($RegExp, 'lastParen', RegExpGetLastParen, - NoOpSetter, DONT_DELETE); - %DefineOrRedefineAccessorProperty($RegExp, '$+', RegExpGetLastParen, - NoOpSetter, DONT_ENUM | DONT_DELETE); - %DefineOrRedefineAccessorProperty($RegExp, 'leftContext', - RegExpGetLeftContext, NoOpSetter, - DONT_DELETE); - %DefineOrRedefineAccessorProperty($RegExp, '$`', RegExpGetLeftContext, - NoOpSetter, DONT_ENUM | DONT_DELETE); - %DefineOrRedefineAccessorProperty($RegExp, 'rightContext', - RegExpGetRightContext, NoOpSetter, - DONT_DELETE); - %DefineOrRedefineAccessorProperty($RegExp, "$'", RegExpGetRightContext, - NoOpSetter, DONT_ENUM | DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, 'lastMatch', RegExpGetLastMatch, + NoOpSetter, DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, '$&', RegExpGetLastMatch, + NoOpSetter, DONT_ENUM | DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, 'lastParen', RegExpGetLastParen, + NoOpSetter, DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, '$+', RegExpGetLastParen, + NoOpSetter, DONT_ENUM | DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, 'leftContext', + RegExpGetLeftContext, NoOpSetter, + DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, '$`', RegExpGetLeftContext, + NoOpSetter, DONT_ENUM | DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, 'rightContext', + RegExpGetRightContext, NoOpSetter, + DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, "$'", RegExpGetRightContext, + NoOpSetter, DONT_ENUM | DONT_DELETE); for (var i = 1; i < 10; ++i) { - %DefineOrRedefineAccessorProperty($RegExp, '$' + i, - RegExpMakeCaptureGetter(i), NoOpSetter, - DONT_DELETE); + %DefineAccessorPropertyUnchecked($RegExp, '$' + i, + RegExpMakeCaptureGetter(i), NoOpSetter, + DONT_DELETE); } %ToFastProperties($RegExp); } diff -Nru nodejs-0.11.13/deps/v8/src/regexp-macro-assembler.cc nodejs-0.11.15/deps/v8/src/regexp-macro-assembler.cc --- nodejs-0.11.13/deps/v8/src/regexp-macro-assembler.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/regexp-macro-assembler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" -#include "ast.h" -#include "assembler.h" -#include "regexp-stack.h" -#include "regexp-macro-assembler.h" -#include "simulator.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/assembler.h" +#include "src/ast.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-stack.h" +#include "src/simulator.h" namespace v8 { namespace internal { @@ -74,16 +52,16 @@ String* subject, int start_index) { // Not just flat, but ultra flat. - ASSERT(subject->IsExternalString() || subject->IsSeqString()); - ASSERT(start_index >= 0); - ASSERT(start_index <= subject->length()); + DCHECK(subject->IsExternalString() || subject->IsSeqString()); + DCHECK(start_index >= 0); + DCHECK(start_index <= subject->length()); if (subject->IsOneByteRepresentation()) { const byte* address; if (StringShape(subject).IsExternal()) { const uint8_t* data = ExternalAsciiString::cast(subject)->GetChars(); address = reinterpret_cast<const byte*>(data); } else { - ASSERT(subject->IsSeqOneByteString()); + DCHECK(subject->IsSeqOneByteString()); const uint8_t* data = SeqOneByteString::cast(subject)->GetChars(); address = reinterpret_cast<const byte*>(data); } @@ -93,7 +71,7 @@ if (StringShape(subject).IsExternal()) { data = ExternalTwoByteString::cast(subject)->GetChars(); } else { - ASSERT(subject->IsSeqTwoByteString()); + DCHECK(subject->IsSeqTwoByteString()); data = SeqTwoByteString::cast(subject)->GetChars(); } return reinterpret_cast<const byte*>(data + start_index); @@ -108,9 +86,9 @@ int previous_index, Isolate* isolate) { - ASSERT(subject->IsFlat()); - ASSERT(previous_index >= 0); - ASSERT(previous_index <= subject->length()); + DCHECK(subject->IsFlat()); + DCHECK(previous_index >= 0); + DCHECK(previous_index <= subject->length()); // No allocations before calling the regexp, but we can't use // DisallowHeapAllocation, since regexps might be preempted, and another @@ -125,7 +103,7 @@ // The string has been flattened, so if it is a cons string it contains the // full string in the first part. if (StringShape(subject_ptr).IsCons()) { - ASSERT_EQ(0, ConsString::cast(subject_ptr)->second()->length()); + DCHECK_EQ(0, ConsString::cast(subject_ptr)->second()->length()); subject_ptr = ConsString::cast(subject_ptr)->first(); } else if (StringShape(subject_ptr).IsSliced()) { SlicedString* slice = SlicedString::cast(subject_ptr); @@ -134,7 +112,7 @@ } // Ensure that an underlying string has the same ASCII-ness. bool is_ascii = subject_ptr->IsOneByteRepresentation(); - ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString()); + DCHECK(subject_ptr->IsExternalString() || subject_ptr->IsSeqString()); // String is now either Sequential or External int char_size_shift = is_ascii ? 0 : 1; @@ -178,7 +156,7 @@ stack_base, direct_call, isolate); - ASSERT(result >= RETRY); + DCHECK(result >= RETRY); if (result == EXCEPTION && !isolate->has_pending_exception()) { // We detected a stack overflow (on the backtrack stack) in RegExp code, @@ -242,7 +220,7 @@ // This function is not allowed to cause a garbage collection. // A GC might move the calling generated code and invalidate the // return address on the stack. - ASSERT(byte_length % 2 == 0); + DCHECK(byte_length % 2 == 0); uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1); uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2); size_t length = byte_length >> 1; @@ -272,9 +250,9 @@ RegExpStack* regexp_stack = isolate->regexp_stack(); size_t size = regexp_stack->stack_capacity(); Address old_stack_base = regexp_stack->stack_base(); - ASSERT(old_stack_base == *stack_base); - ASSERT(stack_pointer <= old_stack_base); - ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size); + DCHECK(old_stack_base == *stack_base); + DCHECK(stack_pointer <= old_stack_base); + DCHECK(static_cast<size_t>(old_stack_base - stack_pointer) <= size); Address new_stack_base = regexp_stack->EnsureCapacity(size * 2); if (new_stack_base == NULL) { return NULL; diff -Nru nodejs-0.11.13/deps/v8/src/regexp-macro-assembler.h nodejs-0.11.15/deps/v8/src/regexp-macro-assembler.h --- nodejs-0.11.13/deps/v8/src/regexp-macro-assembler.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/regexp-macro-assembler.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_REGEXP_MACRO_ASSEMBLER_H_ #define V8_REGEXP_MACRO_ASSEMBLER_H_ -#include "ast.h" +#include "src/ast.h" namespace v8 { namespace internal { @@ -56,6 +33,7 @@ kARM64Implementation, kMIPSImplementation, kX64Implementation, + kX87Implementation, kBytecodeImplementation }; diff -Nru nodejs-0.11.13/deps/v8/src/regexp-macro-assembler-irregexp.cc nodejs-0.11.15/deps/v8/src/regexp-macro-assembler-irregexp.cc --- nodejs-0.11.13/deps/v8/src/regexp-macro-assembler-irregexp.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/regexp-macro-assembler-irregexp.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,14 @@ // Copyright 2008-2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" -#include "ast.h" -#include "bytecodes-irregexp.h" -#include "regexp-macro-assembler.h" -#include "regexp-macro-assembler-irregexp.h" -#include "regexp-macro-assembler-irregexp-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/ast.h" +#include "src/bytecodes-irregexp.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-macro-assembler-irregexp.h" +#include "src/regexp-macro-assembler-irregexp-inl.h" namespace v8 { @@ -62,7 +40,7 @@ void RegExpMacroAssemblerIrregexp::Bind(Label* l) { advance_current_end_ = kInvalidPC; - ASSERT(!l->is_bound()); + DCHECK(!l->is_bound()); if (l->is_linked()) { int pos = l->pos(); while (pos != 0) { @@ -91,8 +69,8 @@ void RegExpMacroAssemblerIrregexp::PopRegister(int register_index) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_POP_REGISTER, register_index); } @@ -100,23 +78,23 @@ void RegExpMacroAssemblerIrregexp::PushRegister( int register_index, StackCheckFlag check_stack_limit) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_PUSH_REGISTER, register_index); } void RegExpMacroAssemblerIrregexp::WriteCurrentPositionToRegister( int register_index, int cp_offset) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_SET_REGISTER_TO_CP, register_index); Emit32(cp_offset); // Current position offset. } void RegExpMacroAssemblerIrregexp::ClearRegisters(int reg_from, int reg_to) { - ASSERT(reg_from <= reg_to); + DCHECK(reg_from <= reg_to); for (int reg = reg_from; reg <= reg_to; reg++) { SetRegister(reg, -1); } @@ -125,45 +103,45 @@ void RegExpMacroAssemblerIrregexp::ReadCurrentPositionFromRegister( int register_index) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_SET_CP_TO_REGISTER, register_index); } void RegExpMacroAssemblerIrregexp::WriteStackPointerToRegister( int register_index) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_SET_REGISTER_TO_SP, register_index); } void RegExpMacroAssemblerIrregexp::ReadStackPointerFromRegister( int register_index) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_SET_SP_TO_REGISTER, register_index); } void RegExpMacroAssemblerIrregexp::SetCurrentPositionFromEnd(int by) { - ASSERT(is_uint24(by)); + DCHECK(is_uint24(by)); Emit(BC_SET_CURRENT_POSITION_FROM_END, by); } void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_SET_REGISTER, register_index); Emit32(to); } void RegExpMacroAssemblerIrregexp::AdvanceRegister(int register_index, int by) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_ADVANCE_REGISTER, register_index); Emit32(by); } @@ -217,8 +195,8 @@ void RegExpMacroAssemblerIrregexp::AdvanceCurrentPosition(int by) { - ASSERT(by >= kMinCPOffset); - ASSERT(by <= kMaxCPOffset); + DCHECK(by >= kMinCPOffset); + DCHECK(by <= kMaxCPOffset); advance_current_start_ = pc_; advance_current_offset_ = by; Emit(BC_ADVANCE_CP, by); @@ -237,8 +215,8 @@ Label* on_failure, bool check_bounds, int characters) { - ASSERT(cp_offset >= kMinCPOffset); - ASSERT(cp_offset <= kMaxCPOffset); + DCHECK(cp_offset >= kMinCPOffset); + DCHECK(cp_offset <= kMaxCPOffset); int bytecode; if (check_bounds) { if (characters == 4) { @@ -246,7 +224,7 @@ } else if (characters == 2) { bytecode = BC_LOAD_2_CURRENT_CHARS; } else { - ASSERT(characters == 1); + DCHECK(characters == 1); bytecode = BC_LOAD_CURRENT_CHAR; } } else { @@ -255,7 +233,7 @@ } else if (characters == 2) { bytecode = BC_LOAD_2_CURRENT_CHARS_UNCHECKED; } else { - ASSERT(characters == 1); + DCHECK(characters == 1); bytecode = BC_LOAD_CURRENT_CHAR_UNCHECKED; } } @@ -393,8 +371,8 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg, Label* on_not_equal) { - ASSERT(start_reg >= 0); - ASSERT(start_reg <= kMaxRegister); + DCHECK(start_reg >= 0); + DCHECK(start_reg <= kMaxRegister); Emit(BC_CHECK_NOT_BACK_REF, start_reg); EmitOrLink(on_not_equal); } @@ -403,8 +381,8 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase( int start_reg, Label* on_not_equal) { - ASSERT(start_reg >= 0); - ASSERT(start_reg <= kMaxRegister); + DCHECK(start_reg >= 0); + DCHECK(start_reg <= kMaxRegister); Emit(BC_CHECK_NOT_BACK_REF_NO_CASE, start_reg); EmitOrLink(on_not_equal); } @@ -413,8 +391,8 @@ void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index, int comparand, Label* on_less_than) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_CHECK_REGISTER_LT, register_index); Emit32(comparand); EmitOrLink(on_less_than); @@ -424,8 +402,8 @@ void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index, int comparand, Label* on_greater_or_equal) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_CHECK_REGISTER_GE, register_index); Emit32(comparand); EmitOrLink(on_greater_or_equal); @@ -434,8 +412,8 @@ void RegExpMacroAssemblerIrregexp::IfRegisterEqPos(int register_index, Label* on_eq) { - ASSERT(register_index >= 0); - ASSERT(register_index <= kMaxRegister); + DCHECK(register_index >= 0); + DCHECK(register_index <= kMaxRegister); Emit(BC_CHECK_REGISTER_EQ_POS, register_index); EmitOrLink(on_eq); } @@ -457,7 +435,7 @@ void RegExpMacroAssemblerIrregexp::Copy(Address a) { - OS::MemCopy(a, buffer_.start(), length()); + MemCopy(a, buffer_.start(), length()); } @@ -466,7 +444,7 @@ Vector<byte> old_buffer = buffer_; buffer_ = Vector<byte>::New(old_buffer.length() * 2); own_buffer_ = true; - OS::MemCopy(buffer_.start(), old_buffer.start(), old_buffer.length()); + MemCopy(buffer_.start(), old_buffer.start(), old_buffer.length()); if (old_buffer_was_our_own) { old_buffer.Dispose(); } diff -Nru nodejs-0.11.13/deps/v8/src/regexp-macro-assembler-irregexp.h nodejs-0.11.15/deps/v8/src/regexp-macro-assembler-irregexp.h --- nodejs-0.11.13/deps/v8/src/regexp-macro-assembler-irregexp.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/regexp-macro-assembler-irregexp.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,33 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_ #define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_ +#include "src/regexp-macro-assembler.h" + namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/regexp-macro-assembler-irregexp-inl.h nodejs-0.11.15/deps/v8/src/regexp-macro-assembler-irregexp-inl.h --- nodejs-0.11.13/deps/v8/src/regexp-macro-assembler-irregexp-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/regexp-macro-assembler-irregexp-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,14 @@ // Copyright 2008-2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // A light-weight assembler for the Irregexp byte code. -#include "v8.h" -#include "ast.h" -#include "bytecodes-irregexp.h" +#include "src/v8.h" + +#include "src/ast.h" +#include "src/bytecodes-irregexp.h" #ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_ #define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_ @@ -43,7 +21,7 @@ void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte, uint32_t twenty_four_bits) { uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte); - ASSERT(pc_ <= buffer_.length()); + DCHECK(pc_ <= buffer_.length()); if (pc_ + 3 >= buffer_.length()) { Expand(); } @@ -53,7 +31,7 @@ void RegExpMacroAssemblerIrregexp::Emit16(uint32_t word) { - ASSERT(pc_ <= buffer_.length()); + DCHECK(pc_ <= buffer_.length()); if (pc_ + 1 >= buffer_.length()) { Expand(); } @@ -63,7 +41,7 @@ void RegExpMacroAssemblerIrregexp::Emit8(uint32_t word) { - ASSERT(pc_ <= buffer_.length()); + DCHECK(pc_ <= buffer_.length()); if (pc_ == buffer_.length()) { Expand(); } @@ -73,7 +51,7 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) { - ASSERT(pc_ <= buffer_.length()); + DCHECK(pc_ <= buffer_.length()); if (pc_ + 3 >= buffer_.length()) { Expand(); } diff -Nru nodejs-0.11.13/deps/v8/src/regexp-macro-assembler-tracer.cc nodejs-0.11.15/deps/v8/src/regexp-macro-assembler-tracer.cc --- nodejs-0.11.13/deps/v8/src/regexp-macro-assembler-tracer.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/regexp-macro-assembler-tracer.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" -#include "ast.h" -#include "regexp-macro-assembler.h" -#include "regexp-macro-assembler-tracer.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/ast.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-macro-assembler-tracer.h" namespace v8 { namespace internal { @@ -38,9 +16,9 @@ RegExpMacroAssembler(assembler->zone()), assembler_(assembler) { unsigned int type = assembler->Implementation(); - ASSERT(type < 6); + DCHECK(type < 6); const char* impl_names[] = {"IA32", "ARM", "ARM64", - "MIPS", "X64", "Bytecode"}; + "MIPS", "X64", "X87", "Bytecode"}; PrintF("RegExpMacroAssembler%s();\n", impl_names[type]); } @@ -215,7 +193,7 @@ buffer_[0] = '\0'; } return &buffer_[0]; - }; + } private: uc16 character_; diff -Nru nodejs-0.11.13/deps/v8/src/regexp-macro-assembler-tracer.h nodejs-0.11.15/deps/v8/src/regexp-macro-assembler-tracer.h --- nodejs-0.11.13/deps/v8/src/regexp-macro-assembler-tracer.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/regexp-macro-assembler-tracer.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_ #define V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/regexp-stack.cc nodejs-0.11.15/deps/v8/src/regexp-stack.cc --- nodejs-0.11.13/deps/v8/src/regexp-stack.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/regexp-stack.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,10 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" -#include "regexp-stack.h" +#include "src/v8.h" + +#include "src/regexp-stack.h" namespace v8 { namespace internal { @@ -56,7 +34,7 @@ char* RegExpStack::ArchiveStack(char* to) { size_t size = sizeof(thread_local_); - OS::MemCopy(reinterpret_cast<void*>(to), &thread_local_, size); + MemCopy(reinterpret_cast<void*>(to), &thread_local_, size); thread_local_ = ThreadLocal(); return to + size; } @@ -64,7 +42,7 @@ char* RegExpStack::RestoreStack(char* from) { size_t size = sizeof(thread_local_); - OS::MemCopy(&thread_local_, reinterpret_cast<void*>(from), size); + MemCopy(&thread_local_, reinterpret_cast<void*>(from), size); return from + size; } @@ -92,11 +70,10 @@ Address new_memory = NewArray<byte>(static_cast<int>(size)); if (thread_local_.memory_size_ > 0) { // Copy original memory into top of new memory. - OS::MemCopy( - reinterpret_cast<void*>( - new_memory + size - thread_local_.memory_size_), - reinterpret_cast<void*>(thread_local_.memory_), - thread_local_.memory_size_); + MemCopy(reinterpret_cast<void*>(new_memory + size - + thread_local_.memory_size_), + reinterpret_cast<void*>(thread_local_.memory_), + thread_local_.memory_size_); DeleteArray(thread_local_.memory_); } thread_local_.memory_ = new_memory; diff -Nru nodejs-0.11.13/deps/v8/src/regexp-stack.h nodejs-0.11.15/deps/v8/src/regexp-stack.h --- nodejs-0.11.13/deps/v8/src/regexp-stack.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/regexp-stack.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_REGEXP_STACK_H_ #define V8_REGEXP_STACK_H_ @@ -64,7 +41,7 @@ // Gives the top of the memory used as stack. Address stack_base() { - ASSERT(thread_local_.memory_size_ != 0); + DCHECK(thread_local_.memory_size_ != 0); return thread_local_.memory_ + thread_local_.memory_size_; } diff -Nru nodejs-0.11.13/deps/v8/src/rewriter.cc nodejs-0.11.15/deps/v8/src/rewriter.cc --- nodejs-0.11.13/deps/v8/src/rewriter.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/rewriter.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "rewriter.h" - -#include "ast.h" -#include "compiler.h" -#include "scopes.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/rewriter.h" + +#include "src/ast.h" +#include "src/compiler.h" +#include "src/scopes.h" namespace v8 { namespace internal { @@ -43,7 +20,9 @@ result_assigned_(false), is_set_(false), in_try_(false), - factory_(zone) { + // Passing a null AstValueFactory is fine, because Processor doesn't + // need to create strings or literals. + factory_(zone, NULL) { InitializeAstVisitor(zone); } @@ -250,21 +229,23 @@ // continue to be used in the case of failure. bool Rewriter::Rewrite(CompilationInfo* info) { FunctionLiteral* function = info->function(); - ASSERT(function != NULL); + DCHECK(function != NULL); Scope* scope = function->scope(); - ASSERT(scope != NULL); + DCHECK(scope != NULL); if (!scope->is_global_scope() && !scope->is_eval_scope()) return true; ZoneList<Statement*>* body = function->body(); if (!body->is_empty()) { - Variable* result = scope->NewTemporary( - info->isolate()->factory()->dot_result_string()); + Variable* result = + scope->NewTemporary(info->ast_value_factory()->dot_result_string()); + // The name string must be internalized at this point. + DCHECK(!result->name().is_null()); Processor processor(result, info->zone()); processor.Process(body); if (processor.HasStackOverflow()) return false; if (processor.result_assigned()) { - ASSERT(function->end_position() != RelocInfo::kNoPosition); + DCHECK(function->end_position() != RelocInfo::kNoPosition); // Set the position of the assignment statement one character past the // source code, such that it definitely is not in the source code range // of an immediate inner scope. For example in @@ -273,7 +254,7 @@ // coincides with the end of the with scope which is the position of '1'. int pos = function->end_position(); VariableProxy* result_proxy = processor.factory()->NewVariableProxy( - result->name(), false, result->interface(), pos); + result->raw_name(), false, result->interface(), pos); result_proxy->BindTo(result); Statement* result_statement = processor.factory()->NewReturnStatement(result_proxy, pos); diff -Nru nodejs-0.11.13/deps/v8/src/rewriter.h nodejs-0.11.15/deps/v8/src/rewriter.h --- nodejs-0.11.13/deps/v8/src/rewriter.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/rewriter.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_REWRITER_H_ #define V8_REWRITER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/runtime.cc nodejs-0.11.15/deps/v8/src/runtime.cc --- nodejs-0.11.13/deps/v8/src/runtime.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/runtime.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,74 +1,54 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdlib.h> #include <limits> -#include "v8.h" +#include "src/v8.h" -#include "accessors.h" -#include "allocation-site-scopes.h" -#include "api.h" -#include "arguments.h" -#include "bootstrapper.h" -#include "codegen.h" -#include "compilation-cache.h" -#include "compiler.h" -#include "cpu.h" -#include "cpu-profiler.h" -#include "dateparser-inl.h" -#include "debug.h" -#include "deoptimizer.h" -#include "date.h" -#include "execution.h" -#include "full-codegen.h" -#include "global-handles.h" -#include "isolate-inl.h" -#include "jsregexp.h" -#include "jsregexp-inl.h" -#include "json-parser.h" -#include "json-stringifier.h" -#include "liveedit.h" -#include "misc-intrinsics.h" -#include "parser.h" -#include "platform.h" -#include "runtime-profiler.h" -#include "runtime.h" -#include "scopeinfo.h" -#include "smart-pointers.h" -#include "string-search.h" -#include "stub-cache.h" -#include "uri.h" -#include "v8conversions.h" -#include "v8threads.h" -#include "vm-state-inl.h" +#include "src/accessors.h" +#include "src/allocation-site-scopes.h" +#include "src/api.h" +#include "src/arguments.h" +#include "src/base/cpu.h" +#include "src/base/platform/platform.h" +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/compilation-cache.h" +#include "src/compiler.h" +#include "src/conversions.h" +#include "src/cpu-profiler.h" +#include "src/date.h" +#include "src/dateparser-inl.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/execution.h" +#include "src/full-codegen.h" +#include "src/global-handles.h" +#include "src/isolate-inl.h" +#include "src/json-parser.h" +#include "src/json-stringifier.h" +#include "src/jsregexp-inl.h" +#include "src/jsregexp.h" +#include "src/liveedit.h" +#include "src/misc-intrinsics.h" +#include "src/parser.h" +#include "src/prototype.h" +#include "src/runtime.h" +#include "src/runtime-profiler.h" +#include "src/scopeinfo.h" +#include "src/smart-pointers.h" +#include "src/string-search.h" +#include "src/stub-cache.h" +#include "src/uri.h" +#include "src/utils.h" +#include "src/v8threads.h" +#include "src/vm-state-inl.h" +#include "third_party/fdlibm/fdlibm.h" #ifdef V8_I18N_SUPPORT -#include "i18n.h" +#include "src/i18n.h" #include "unicode/brkiter.h" #include "unicode/calendar.h" #include "unicode/coll.h" @@ -105,6 +85,12 @@ #define RUNTIME_ASSERT(value) \ if (!(value)) return isolate->ThrowIllegalOperation(); +#define RUNTIME_ASSERT_HANDLIFIED(value, T) \ + if (!(value)) { \ + isolate->ThrowIllegalOperation(); \ + return MaybeHandle<T>(); \ + } + // Cast the given object to a value of the specified type and store // it in a variable with the given name. If the object is not of the // expected type call IllegalOperation and return. @@ -116,6 +102,10 @@ RUNTIME_ASSERT(args[index]->Is##Type()); \ Handle<Type> name = args.at<Type>(index); +#define CONVERT_NUMBER_ARG_HANDLE_CHECKED(name, index) \ + RUNTIME_ASSERT(args[index]->IsNumber()); \ + Handle<Object> name = args.at<Object>(index); + // Cast the given object to a boolean and store it in a variable with // the given name. If the object is not a boolean call IllegalOperation // and return. @@ -182,8 +172,8 @@ } else { // Bail out as a non-internalized-string non-index key makes caching // impossible. - // ASSERT to make sure that the if condition after the loop is false. - ASSERT(number_of_string_keys != number_of_properties); + // DCHECK to make sure that the if condition after the loop is false. + DCHECK(number_of_string_keys != number_of_properties); break; } } @@ -203,25 +193,23 @@ keys->set(index++, key); } } - ASSERT(index == number_of_string_keys); + DCHECK(index == number_of_string_keys); } *is_result_from_cache = true; return isolate->factory()->ObjectLiteralMapFromCache(context, keys); } *is_result_from_cache = false; - return isolate->factory()->CopyMap( - Handle<Map>(context->object_function()->initial_map()), - number_of_properties); + return Map::Create(handle(context->object_function()), number_of_properties); } -static Handle<Object> CreateLiteralBoilerplate( +MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate( Isolate* isolate, Handle<FixedArray> literals, Handle<FixedArray> constant_properties); -static Handle<Object> CreateObjectLiteralBoilerplate( +MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate( Isolate* isolate, Handle<FixedArray> literals, Handle<FixedArray> constant_properties, @@ -260,14 +248,12 @@ int length = constant_properties->length(); bool should_transform = !is_result_from_cache && boilerplate->HasFastProperties(); - if (should_transform || has_function_literal) { - // Normalize the properties of object to avoid n^2 behavior - // when extending the object multiple properties. Indicate the number of - // properties to be added. + bool should_normalize = should_transform || has_function_literal; + if (should_normalize) { + // TODO(verwaest): We might not want to ever normalize here. JSObject::NormalizeProperties( boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2); } - // TODO(verwaest): Support tracking representations in the boilerplate. for (int index = 0; index < length; index +=2) { Handle<Object> key(constant_properties->get(index+0), isolate); @@ -276,46 +262,46 @@ // The value contains the constant_properties of a // simple object or array literal. Handle<FixedArray> array = Handle<FixedArray>::cast(value); - value = CreateLiteralBoilerplate(isolate, literals, array); - if (value.is_null()) return value; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, value, + CreateLiteralBoilerplate(isolate, literals, array), + Object); } - Handle<Object> result; + MaybeHandle<Object> maybe_result; uint32_t element_index = 0; - StoreMode mode = value->IsJSObject() ? FORCE_FIELD : ALLOW_AS_CONSTANT; if (key->IsInternalizedString()) { if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) { // Array index as string (uint32). - result = JSObject::SetOwnElement( - boilerplate, element_index, value, SLOPPY); + if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate); + maybe_result = + JSObject::SetOwnElement(boilerplate, element_index, value, SLOPPY); } else { Handle<String> name(String::cast(*key)); - ASSERT(!name->AsArrayIndex(&element_index)); - result = JSObject::SetLocalPropertyIgnoreAttributes( - boilerplate, name, value, NONE, - Object::OPTIMAL_REPRESENTATION, mode); + DCHECK(!name->AsArrayIndex(&element_index)); + maybe_result = JSObject::SetOwnPropertyIgnoreAttributes( + boilerplate, name, value, NONE); } } else if (key->ToArrayIndex(&element_index)) { // Array index (uint32). - result = JSObject::SetOwnElement( - boilerplate, element_index, value, SLOPPY); + if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate); + maybe_result = + JSObject::SetOwnElement(boilerplate, element_index, value, SLOPPY); } else { // Non-uint32 number. - ASSERT(key->IsNumber()); + DCHECK(key->IsNumber()); double num = key->Number(); char arr[100]; Vector<char> buffer(arr, ARRAY_SIZE(arr)); const char* str = DoubleToCString(num, buffer); - Handle<String> name = - isolate->factory()->NewStringFromAscii(CStrVector(str)); - result = JSObject::SetLocalPropertyIgnoreAttributes( - boilerplate, name, value, NONE, - Object::OPTIMAL_REPRESENTATION, mode); + Handle<String> name = isolate->factory()->NewStringFromAsciiChecked(str); + maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name, + value, NONE); } // If setting the property on the boilerplate throws an // exception, the exception is converted to an empty handle in // the handle based operations. In that case, we need to // convert back to an exception. - if (result.is_null()) return result; + RETURN_ON_EXCEPTION(isolate, maybe_result, Object); } // Transform to fast properties if necessary. For object literals with @@ -323,7 +309,7 @@ // computed properties have been assigned so that we can generate // constant function properties. if (should_transform && !has_function_literal) { - JSObject::TransformToFastProperties( + JSObject::MigrateSlowToFast( boilerplate, boilerplate->map()->unused_property_fields()); } @@ -331,25 +317,27 @@ } -MaybeObject* TransitionElements(Handle<Object> object, - ElementsKind to_kind, - Isolate* isolate) { +MUST_USE_RESULT static MaybeHandle<Object> TransitionElements( + Handle<Object> object, + ElementsKind to_kind, + Isolate* isolate) { HandleScope scope(isolate); - if (!object->IsJSObject()) return isolate->ThrowIllegalOperation(); + if (!object->IsJSObject()) { + isolate->ThrowIllegalOperation(); + return MaybeHandle<Object>(); + } ElementsKind from_kind = Handle<JSObject>::cast(object)->map()->elements_kind(); if (Map::IsValidElementsTransition(from_kind, to_kind)) { JSObject::TransitionElementsKind(Handle<JSObject>::cast(object), to_kind); - return *object; + return object; } - return isolate->ThrowIllegalOperation(); + isolate->ThrowIllegalOperation(); + return MaybeHandle<Object>(); } -static const int kSmiLiteralMinimumLength = 1024; - - -Handle<Object> Runtime::CreateArrayLiteralBoilerplate( +MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate( Isolate* isolate, Handle<FixedArray> literals, Handle<FixedArray> elements) { @@ -368,22 +356,21 @@ Handle<FixedArrayBase> constant_elements_values( FixedArrayBase::cast(elements->get(1))); - ASSERT(IsFastElementsKind(constant_elements_kind)); - Context* native_context = isolate->context()->native_context(); - Object* maybe_maps_array = native_context->js_array_maps(); - ASSERT(!maybe_maps_array->IsUndefined()); - Object* maybe_map = FixedArray::cast(maybe_maps_array)->get( - constant_elements_kind); - ASSERT(maybe_map->IsMap()); - object->set_map(Map::cast(maybe_map)); + { DisallowHeapAllocation no_gc; + DCHECK(IsFastElementsKind(constant_elements_kind)); + Context* native_context = isolate->context()->native_context(); + Object* maps_array = native_context->js_array_maps(); + DCHECK(!maps_array->IsUndefined()); + Object* map = FixedArray::cast(maps_array)->get(constant_elements_kind); + object->set_map(Map::cast(map)); + } Handle<FixedArrayBase> copied_elements_values; if (IsFastDoubleElementsKind(constant_elements_kind)) { - ASSERT(FLAG_smi_only_arrays); copied_elements_values = isolate->factory()->CopyFixedDoubleArray( Handle<FixedDoubleArray>::cast(constant_elements_values)); } else { - ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind)); + DCHECK(IsFastSmiOrObjectElementsKind(constant_elements_kind)); const bool is_cow = (constant_elements_values->map() == isolate->heap()->fixed_cow_array_map()); @@ -393,7 +380,7 @@ Handle<FixedArray> fixed_array_values = Handle<FixedArray>::cast(copied_elements_values); for (int i = 0; i < fixed_array_values->length(); i++) { - ASSERT(!fixed_array_values->get(i)->IsFixedArray()); + DCHECK(!fixed_array_values->get(i)->IsFixedArray()); } #endif } else { @@ -403,14 +390,15 @@ isolate->factory()->CopyFixedArray(fixed_array_values); copied_elements_values = fixed_array_values_copy; for (int i = 0; i < fixed_array_values->length(); i++) { - Object* current = fixed_array_values->get(i); - if (current->IsFixedArray()) { + if (fixed_array_values->get(i)->IsFixedArray()) { // The value contains the constant_properties of a // simple object or array literal. Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i))); - Handle<Object> result = - CreateLiteralBoilerplate(isolate, literals, fa); - if (result.is_null()) return result; + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + CreateLiteralBoilerplate(isolate, literals, fa), + Object); fixed_array_values_copy->set(i, *result); } } @@ -419,27 +407,12 @@ object->set_elements(*copied_elements_values); object->set_length(Smi::FromInt(copied_elements_values->length())); - // Ensure that the boilerplate object has FAST_*_ELEMENTS, unless the flag is - // on or the object is larger than the threshold. - if (!FLAG_smi_only_arrays && - constant_elements_values->length() < kSmiLiteralMinimumLength) { - ElementsKind elements_kind = object->GetElementsKind(); - if (!IsFastObjectElementsKind(elements_kind)) { - if (IsFastHoleyElementsKind(elements_kind)) { - CHECK(!TransitionElements(object, FAST_HOLEY_ELEMENTS, - isolate)->IsFailure()); - } else { - CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure()); - } - } - } - - object->ValidateElements(); + JSObject::ValidateElements(object); return object; } -static Handle<Object> CreateLiteralBoilerplate( +MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate( Isolate* isolate, Handle<FixedArray> literals, Handle<FixedArray> array) { @@ -463,14 +436,14 @@ isolate, literals, elements); default: UNREACHABLE(); - return Handle<Object>::null(); + return MaybeHandle<Object>(); } } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateObjectLiteral) { +RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0); CONVERT_SMI_ARG_CHECKED(literals_index, 1); CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2); @@ -478,24 +451,29 @@ bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0; bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0; + RUNTIME_ASSERT(literals_index >= 0 && literals_index < literals->length()); + // Check if boilerplate exists. If not, create it first. Handle<Object> literal_site(literals->get(literals_index), isolate); Handle<AllocationSite> site; Handle<JSObject> boilerplate; if (*literal_site == isolate->heap()->undefined_value()) { - Handle<Object> raw_boilerplate = CreateObjectLiteralBoilerplate( - isolate, - literals, - constant_properties, - should_have_fast_elements, - has_function_literal); - RETURN_IF_EMPTY_HANDLE(isolate, raw_boilerplate); + Handle<Object> raw_boilerplate; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, raw_boilerplate, + CreateObjectLiteralBoilerplate( + isolate, + literals, + constant_properties, + should_have_fast_elements, + has_function_literal)); boilerplate = Handle<JSObject>::cast(raw_boilerplate); AllocationSiteCreationContext creation_context(isolate); site = creation_context.EnterNewScope(); - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::DeepWalk(boilerplate, &creation_context)); + RETURN_FAILURE_ON_EXCEPTION( + isolate, + JSObject::DeepWalk(boilerplate, &creation_context)); creation_context.ExitScope(site, boilerplate); // Update the functions literal and return the boilerplate. @@ -508,14 +486,16 @@ AllocationSiteUsageContext usage_context(isolate, site, true); usage_context.EnterNewScope(); - Handle<Object> copy = JSObject::DeepCopy(boilerplate, &usage_context); + MaybeHandle<Object> maybe_copy = JSObject::DeepCopy( + boilerplate, &usage_context); usage_context.ExitScope(site, boilerplate); - RETURN_IF_EMPTY_HANDLE(isolate, copy); + Handle<Object> copy; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, copy, maybe_copy); return *copy; } -static Handle<AllocationSite> GetLiteralAllocationSite( +MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite( Isolate* isolate, Handle<FixedArray> literals, int literals_index, @@ -524,10 +504,12 @@ Handle<Object> literal_site(literals->get(literals_index), isolate); Handle<AllocationSite> site; if (*literal_site == isolate->heap()->undefined_value()) { - ASSERT(*elements != isolate->heap()->empty_fixed_array()); - Handle<Object> boilerplate = - Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements); - if (boilerplate.is_null()) return Handle<AllocationSite>::null(); + DCHECK(*elements != isolate->heap()->empty_fixed_array()); + Handle<Object> boilerplate; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, boilerplate, + Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements), + AllocationSite); AllocationSiteCreationContext creation_context(isolate); site = creation_context.EnterNewScope(); @@ -546,199 +528,218 @@ } -static MaybeObject* CreateArrayLiteralImpl(Isolate* isolate, +static MaybeHandle<JSObject> CreateArrayLiteralImpl(Isolate* isolate, Handle<FixedArray> literals, int literals_index, Handle<FixedArray> elements, int flags) { - Handle<AllocationSite> site = GetLiteralAllocationSite(isolate, literals, - literals_index, elements); - RETURN_IF_EMPTY_HANDLE(isolate, site); + RUNTIME_ASSERT_HANDLIFIED(literals_index >= 0 && + literals_index < literals->length(), JSObject); + Handle<AllocationSite> site; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, site, + GetLiteralAllocationSite(isolate, literals, literals_index, elements), + JSObject); bool enable_mementos = (flags & ArrayLiteral::kDisableMementos) == 0; Handle<JSObject> boilerplate(JSObject::cast(site->transition_info())); AllocationSiteUsageContext usage_context(isolate, site, enable_mementos); usage_context.EnterNewScope(); JSObject::DeepCopyHints hints = (flags & ArrayLiteral::kShallowElements) == 0 - ? JSObject::kNoHints - : JSObject::kObjectIsShallowArray; - Handle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context, - hints); + ? JSObject::kNoHints + : JSObject::kObjectIsShallow; + MaybeHandle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context, + hints); usage_context.ExitScope(site, boilerplate); - RETURN_IF_EMPTY_HANDLE(isolate, copy); - return *copy; + return copy; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateArrayLiteral) { +RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0); CONVERT_SMI_ARG_CHECKED(literals_index, 1); CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2); CONVERT_SMI_ARG_CHECKED(flags, 3); - return CreateArrayLiteralImpl(isolate, literals, literals_index, elements, - flags); + Handle<JSObject> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, + CreateArrayLiteralImpl(isolate, literals, literals_index, elements, + flags)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateArrayLiteralStubBailout) { +RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0); CONVERT_SMI_ARG_CHECKED(literals_index, 1); CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2); - return CreateArrayLiteralImpl(isolate, literals, literals_index, elements, - ArrayLiteral::kShallowElements); + Handle<JSObject> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, + CreateArrayLiteralImpl(isolate, literals, literals_index, elements, + ArrayLiteral::kShallowElements)); + return *result; +} + + +RUNTIME_FUNCTION(Runtime_CreateSymbol) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, name, 0); + RUNTIME_ASSERT(name->IsString() || name->IsUndefined()); + Handle<Symbol> symbol = isolate->factory()->NewSymbol(); + if (name->IsString()) symbol->set_name(*name); + return *symbol; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateSymbol) { +RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - Handle<Object> name(args[0], isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, name, 0); RUNTIME_ASSERT(name->IsString() || name->IsUndefined()); - Symbol* symbol; - MaybeObject* maybe = isolate->heap()->AllocateSymbol(); - if (!maybe->To(&symbol)) return maybe; + Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol(); if (name->IsString()) symbol->set_name(*name); - return symbol; + return *symbol; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreatePrivateSymbol) { +RUNTIME_FUNCTION(Runtime_CreatePrivateOwnSymbol) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - Handle<Object> name(args[0], isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, name, 0); RUNTIME_ASSERT(name->IsString() || name->IsUndefined()); - Symbol* symbol; - MaybeObject* maybe = isolate->heap()->AllocatePrivateSymbol(); - if (!maybe->To(&symbol)) return maybe; + Handle<Symbol> symbol = isolate->factory()->NewPrivateOwnSymbol(); if (name->IsString()) symbol->set_name(*name); - return symbol; + return *symbol; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateGlobalPrivateSymbol) { +RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateSymbol) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(String, name, 0); Handle<JSObject> registry = isolate->GetSymbolRegistry(); Handle<String> part = isolate->factory()->private_intern_string(); - Handle<JSObject> privates = - Handle<JSObject>::cast(JSObject::GetProperty(registry, part)); - Handle<Object> symbol = JSObject::GetProperty(privates, name); + Handle<Object> privates; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, privates, Object::GetPropertyOrElement(registry, part)); + Handle<Object> symbol; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, symbol, Object::GetPropertyOrElement(privates, name)); if (!symbol->IsSymbol()) { - ASSERT(symbol->IsUndefined()); + DCHECK(symbol->IsUndefined()); symbol = isolate->factory()->NewPrivateSymbol(); Handle<Symbol>::cast(symbol)->set_name(*name); - JSObject::SetProperty(privates, name, symbol, NONE, STRICT); + JSObject::SetProperty(Handle<JSObject>::cast(privates), name, symbol, + STRICT).Assert(); } return *symbol; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NewSymbolWrapper) { - ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(Symbol, symbol, 0); - return symbol->ToObject(isolate); +RUNTIME_FUNCTION(Runtime_NewSymbolWrapper) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Symbol, symbol, 0); + return *Object::ToObject(isolate, symbol).ToHandleChecked(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolDescription) { +RUNTIME_FUNCTION(Runtime_SymbolDescription) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(Symbol, symbol, 0); return symbol->name(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolRegistry) { +RUNTIME_FUNCTION(Runtime_SymbolRegistry) { HandleScope scope(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); return *isolate->GetSymbolRegistry(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolIsPrivate) { +RUNTIME_FUNCTION(Runtime_SymbolIsPrivate) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(Symbol, symbol, 0); return isolate->heap()->ToBoolean(symbol->is_private()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSReceiver, handler, 0); - Object* prototype = args[1]; - Object* used_prototype = - prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value(); - return isolate->heap()->AllocateJSProxy(handler, used_prototype); +RUNTIME_FUNCTION(Runtime_CreateJSProxy) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1); + if (!prototype->IsJSReceiver()) prototype = isolate->factory()->null_value(); + return *isolate->factory()->NewJSProxy(handler, prototype); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 4); - CONVERT_ARG_CHECKED(JSReceiver, handler, 0); - Object* call_trap = args[1]; +RUNTIME_FUNCTION(Runtime_CreateJSFunctionProxy) { + HandleScope scope(isolate); + DCHECK(args.length() == 4); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, call_trap, 1); RUNTIME_ASSERT(call_trap->IsJSFunction() || call_trap->IsJSFunctionProxy()); - CONVERT_ARG_CHECKED(JSFunction, construct_trap, 2); - Object* prototype = args[3]; - Object* used_prototype = - prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value(); - return isolate->heap()->AllocateJSFunctionProxy( - handler, call_trap, construct_trap, used_prototype); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, construct_trap, 2); + CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 3); + if (!prototype->IsJSReceiver()) prototype = isolate->factory()->null_value(); + return *isolate->factory()->NewJSFunctionProxy( + handler, call_trap, construct_trap, prototype); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) { +RUNTIME_FUNCTION(Runtime_IsJSProxy) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - Object* obj = args[0]; + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0); return isolate->heap()->ToBoolean(obj->IsJSProxy()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) { +RUNTIME_FUNCTION(Runtime_IsJSFunctionProxy) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - Object* obj = args[0]; + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0); return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) { +RUNTIME_FUNCTION(Runtime_GetHandler) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSProxy, proxy, 0); return proxy->handler(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) { +RUNTIME_FUNCTION(Runtime_GetCallTrap) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0); return proxy->call_trap(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) { +RUNTIME_FUNCTION(Runtime_GetConstructTrap) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0); return proxy->construct_trap(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) { +RUNTIME_FUNCTION(Runtime_Fix) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0); JSProxy::Fix(proxy); return isolate->heap()->undefined_value(); @@ -748,7 +749,7 @@ void Runtime::FreeArrayBuffer(Isolate* isolate, JSArrayBuffer* phantom_array_buffer) { if (phantom_array_buffer->should_be_freed()) { - ASSERT(phantom_array_buffer->is_external()); + DCHECK(phantom_array_buffer->is_external()); free(phantom_array_buffer->backing_store()); } if (phantom_array_buffer->is_external()) return; @@ -756,8 +757,9 @@ size_t allocated_length = NumberToSize( isolate, phantom_array_buffer->byte_length()); - isolate->heap()->AdjustAmountOfExternalAllocatedMemory( - -static_cast<int64_t>(allocated_length)); + reinterpret_cast<v8::Isolate*>(isolate) + ->AdjustAmountOfExternalAllocatedMemory( + -static_cast<int64_t>(allocated_length)); CHECK(V8::ArrayBufferAllocator() != NULL); V8::ArrayBufferAllocator()->Free( phantom_array_buffer->backing_store(), @@ -770,7 +772,7 @@ bool is_external, void* data, size_t allocated_length) { - ASSERT(array_buffer->GetInternalFieldCount() == + DCHECK(array_buffer->GetInternalFieldCount() == v8::ArrayBuffer::kInternalFieldCount); for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) { array_buffer->SetInternalField(i, Smi::FromInt(0)); @@ -802,7 +804,7 @@ data = V8::ArrayBufferAllocator()->Allocate(allocated_length); } else { data = - V8::ArrayBufferAllocator()->AllocateUninitialized(allocated_length); + V8::ArrayBufferAllocator()->AllocateUninitialized(allocated_length); } if (data == NULL) return false; } else { @@ -811,7 +813,8 @@ SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length); - isolate->heap()->AdjustAmountOfExternalAllocatedMemory(allocated_length); + reinterpret_cast<v8::Isolate*>(isolate) + ->AdjustAmountOfExternalAllocatedMemory(allocated_length); return true; } @@ -835,62 +838,55 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferInitialize) { +RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, holder, 0); - CONVERT_ARG_HANDLE_CHECKED(Object, byteLength, 1); - size_t allocated_length; - if (byteLength->IsSmi()) { - allocated_length = Smi::cast(*byteLength)->value(); - } else { - ASSERT(byteLength->IsHeapNumber()); - double value = HeapNumber::cast(*byteLength)->value(); - - ASSERT(value >= 0); - - if (value > std::numeric_limits<size_t>::max()) { - return isolate->Throw( - *isolate->factory()->NewRangeError("invalid_array_buffer_length", - HandleVector<Object>(NULL, 0))); - } - - allocated_length = static_cast<size_t>(value); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(byteLength, 1); + if (!holder->byte_length()->IsUndefined()) { + // ArrayBuffer is already initialized; probably a fuzz test. + return *holder; + } + size_t allocated_length = 0; + if (!TryNumberToSize(isolate, *byteLength, &allocated_length)) { + return isolate->Throw( + *isolate->factory()->NewRangeError("invalid_array_buffer_length", + HandleVector<Object>(NULL, 0))); } - if (!Runtime::SetupArrayBufferAllocatingData(isolate, holder, allocated_length)) { - return isolate->Throw(*isolate->factory()-> - NewRangeError("invalid_array_buffer_length", - HandleVector<Object>(NULL, 0))); + return isolate->Throw( + *isolate->factory()->NewRangeError("invalid_array_buffer_length", + HandleVector<Object>(NULL, 0))); } - return *holder; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferGetByteLength) { +RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0); return holder->byte_length(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferSliceImpl) { +RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, source, 0); CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1); - CONVERT_DOUBLE_ARG_CHECKED(first, 2); - size_t start = static_cast<size_t>(first); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2); + RUNTIME_ASSERT(!source.is_identical_to(target)); + size_t start = 0; + RUNTIME_ASSERT(TryNumberToSize(isolate, *first, &start)); size_t target_length = NumberToSize(isolate, target->byte_length()); if (target_length == 0) return isolate->heap()->undefined_value(); size_t source_byte_length = NumberToSize(isolate, source->byte_length()); - CHECK(start <= source_byte_length); - CHECK(source_byte_length - start >= target_length); + RUNTIME_ASSERT(start <= source_byte_length); + RUNTIME_ASSERT(source_byte_length - start >= target_length); uint8_t* source_data = reinterpret_cast<uint8_t*>(source->backing_store()); uint8_t* target_data = reinterpret_cast<uint8_t*>(target->backing_store()); CopyBytes(target_data, source_data + start, target_length); @@ -898,24 +894,23 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferIsView) { +RUNTIME_FUNCTION(Runtime_ArrayBufferIsView) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(Object, object, 0); - return object->IsJSArrayBufferView() - ? isolate->heap()->true_value() - : isolate->heap()->false_value(); + return isolate->heap()->ToBoolean(object->IsJSArrayBufferView()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferNeuter) { +RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) { HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0); if (array_buffer->backing_store() == NULL) { CHECK(Smi::FromInt(0) == array_buffer->byte_length()); return isolate->heap()->undefined_value(); } - ASSERT(!array_buffer->is_external()); + DCHECK(!array_buffer->is_external()); void* backing_store = array_buffer->backing_store(); size_t byte_length = NumberToSize(isolate, array_buffer->byte_length()); array_buffer->set_is_external(true); @@ -949,20 +944,17 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) { +RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) { HandleScope scope(isolate); - ASSERT(args.length() == 5); + DCHECK(args.length() == 5); CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0); CONVERT_SMI_ARG_CHECKED(arrayId, 1); CONVERT_ARG_HANDLE_CHECKED(Object, maybe_buffer, 2); - CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset_object, 3); - CONVERT_ARG_HANDLE_CHECKED(Object, byte_length_object, 4); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset_object, 3); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length_object, 4); - ASSERT(holder->GetInternalFieldCount() == - v8::ArrayBufferView::kInternalFieldCount); - for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) { - holder->SetInternalField(i, Smi::FromInt(0)); - } + RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST && + arrayId <= Runtime::ARRAY_ID_LAST); ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization. size_t element_size = 1; // Bogus initialization. @@ -974,32 +966,46 @@ &external_elements_kind, &fixed_elements_kind, &element_size); + RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind); - holder->set_byte_offset(*byte_offset_object); - holder->set_byte_length(*byte_length_object); + size_t byte_offset = 0; + size_t byte_length = 0; + RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset_object, &byte_offset)); + RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length_object, &byte_length)); - size_t byte_offset = NumberToSize(isolate, *byte_offset_object); - size_t byte_length = NumberToSize(isolate, *byte_length_object); + if (maybe_buffer->IsJSArrayBuffer()) { + Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer); + size_t array_buffer_byte_length = + NumberToSize(isolate, buffer->byte_length()); + RUNTIME_ASSERT(byte_offset <= array_buffer_byte_length); + RUNTIME_ASSERT(array_buffer_byte_length - byte_offset >= byte_length); + } else { + RUNTIME_ASSERT(maybe_buffer->IsNull()); + } - CHECK_EQ(0, static_cast<int>(byte_length % element_size)); + RUNTIME_ASSERT(byte_length % element_size == 0); size_t length = byte_length / element_size; if (length > static_cast<unsigned>(Smi::kMaxValue)) { - return isolate->Throw(*isolate->factory()-> - NewRangeError("invalid_typed_array_length", - HandleVector<Object>(NULL, 0))); + return isolate->Throw( + *isolate->factory()->NewRangeError("invalid_typed_array_length", + HandleVector<Object>(NULL, 0))); } + // All checks are done, now we can modify objects. + + DCHECK(holder->GetInternalFieldCount() == + v8::ArrayBufferView::kInternalFieldCount); + for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) { + holder->SetInternalField(i, Smi::FromInt(0)); + } Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length); holder->set_length(*length_obj); - if (!maybe_buffer->IsNull()) { - Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(*maybe_buffer)); - - size_t array_buffer_byte_length = - NumberToSize(isolate, buffer->byte_length()); - CHECK(byte_offset <= array_buffer_byte_length); - CHECK(array_buffer_byte_length - byte_offset >= byte_length); + holder->set_byte_offset(*byte_offset_object); + holder->set_byte_length(*byte_length_object); + if (!maybe_buffer->IsNull()) { + Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer); holder->set_buffer(*buffer); holder->set_weak_next(buffer->weak_first_view()); buffer->set_weak_first_view(*holder); @@ -1010,8 +1016,8 @@ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset); Handle<Map> map = JSObject::GetElementsTransitionMap(holder, external_elements_kind); - holder->set_map_and_elements(*map, *elements); - ASSERT(IsExternalArrayElementsKind(holder->map()->elements_kind())); + JSObject::SetMapAndElements(holder, map, elements); + DCHECK(IsExternalArrayElementsKind(holder->map()->elements_kind())); } else { holder->set_buffer(Smi::FromInt(0)); holder->set_weak_next(isolate->heap()->undefined_value()); @@ -1029,19 +1035,16 @@ // initializes backing store using memove. // // Returns true if backing store was initialized or false otherwise. -RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) { +RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0); CONVERT_SMI_ARG_CHECKED(arrayId, 1); CONVERT_ARG_HANDLE_CHECKED(Object, source, 2); - CONVERT_ARG_HANDLE_CHECKED(Object, length_obj, 3); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 3); - ASSERT(holder->GetInternalFieldCount() == - v8::ArrayBufferView::kInternalFieldCount); - for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) { - holder->SetInternalField(i, Smi::FromInt(0)); - } + RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST && + arrayId <= Runtime::ARRAY_ID_LAST); ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization. size_t element_size = 1; // Bogus initialization. @@ -1054,12 +1057,15 @@ &fixed_elements_kind, &element_size); + RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind); + Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer(); if (source->IsJSTypedArray() && JSTypedArray::cast(*source)->type() == array_type) { length_obj = Handle<Object>(JSTypedArray::cast(*source)->length(), isolate); } - size_t length = NumberToSize(isolate, *length_obj); + size_t length = 0; + RUNTIME_ASSERT(TryNumberToSize(isolate, *length_obj, &length)); if ((length > static_cast<unsigned>(Smi::kMaxValue)) || (length > (kMaxInt / element_size))) { @@ -1069,6 +1075,12 @@ } size_t byte_length = length * element_size; + DCHECK(holder->GetInternalFieldCount() == + v8::ArrayBufferView::kInternalFieldCount); + for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) { + holder->SetInternalField(i, Smi::FromInt(0)); + } + // NOTE: not initializing backing store. // We assume that the caller of this function will initialize holder // with the loop @@ -1107,7 +1119,7 @@ static_cast<uint8_t*>(buffer->backing_store())); Handle<Map> map = JSObject::GetElementsTransitionMap( holder, external_elements_kind); - holder->set_map_and_elements(*map, *elements); + JSObject::SetMapAndElements(holder, map, elements); if (source->IsJSTypedArray()) { Handle<JSTypedArray> typed_array(JSTypedArray::cast(*source)); @@ -1122,43 +1134,34 @@ buffer->backing_store(), backing_store + source_byte_offset, byte_length); - return *isolate->factory()->true_value(); - } else { - return *isolate->factory()->false_value(); + return isolate->heap()->true_value(); } } - return *isolate->factory()->false_value(); + return isolate->heap()->false_value(); } -#define TYPED_ARRAY_GETTER(getter, accessor) \ - RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGet##getter) { \ +#define BUFFER_VIEW_GETTER(Type, getter, accessor) \ + RUNTIME_FUNCTION(Runtime_##Type##Get##getter) { \ HandleScope scope(isolate); \ - ASSERT(args.length() == 1); \ - CONVERT_ARG_HANDLE_CHECKED(Object, holder, 0); \ - if (!holder->IsJSTypedArray()) \ - return isolate->Throw(*isolate->factory()->NewTypeError( \ - "not_typed_array", HandleVector<Object>(NULL, 0))); \ - Handle<JSTypedArray> typed_array(JSTypedArray::cast(*holder)); \ - return typed_array->accessor(); \ - } - -TYPED_ARRAY_GETTER(ByteLength, byte_length) -TYPED_ARRAY_GETTER(ByteOffset, byte_offset) -TYPED_ARRAY_GETTER(Length, length) - -#undef TYPED_ARRAY_GETTER - -RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGetBuffer) { - HandleScope scope(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_HANDLE_CHECKED(Object, holder, 0); - if (!holder->IsJSTypedArray()) - return isolate->Throw(*isolate->factory()->NewTypeError( - "not_typed_array", HandleVector<Object>(NULL, 0))); - Handle<JSTypedArray> typed_array(JSTypedArray::cast(*holder)); - return *typed_array->GetBuffer(); + DCHECK(args.length() == 1); \ + CONVERT_ARG_HANDLE_CHECKED(JS##Type, holder, 0); \ + return holder->accessor(); \ + } + +BUFFER_VIEW_GETTER(ArrayBufferView, ByteLength, byte_length) +BUFFER_VIEW_GETTER(ArrayBufferView, ByteOffset, byte_offset) +BUFFER_VIEW_GETTER(TypedArray, Length, length) +BUFFER_VIEW_GETTER(DataView, Buffer, buffer) + +#undef BUFFER_VIEW_GETTER + +RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0); + return *holder->GetBuffer(); } @@ -1177,22 +1180,24 @@ }; -RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) { +RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) { HandleScope scope(isolate); - CONVERT_ARG_HANDLE_CHECKED(Object, target_obj, 0); - CONVERT_ARG_HANDLE_CHECKED(Object, source_obj, 1); - CONVERT_ARG_HANDLE_CHECKED(Object, offset_obj, 2); - - if (!target_obj->IsJSTypedArray()) + DCHECK(args.length() == 3); + if (!args[0]->IsJSTypedArray()) return isolate->Throw(*isolate->factory()->NewTypeError( "not_typed_array", HandleVector<Object>(NULL, 0))); - if (!source_obj->IsJSTypedArray()) + if (!args[1]->IsJSTypedArray()) return Smi::FromInt(TYPED_ARRAY_SET_NON_TYPED_ARRAY); + CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target_obj, 0); + CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, source_obj, 1); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset_obj, 2); + Handle<JSTypedArray> target(JSTypedArray::cast(*target_obj)); Handle<JSTypedArray> source(JSTypedArray::cast(*source_obj)); - size_t offset = NumberToSize(isolate, *offset_obj); + size_t offset = 0; + RUNTIME_ASSERT(TryNumberToSize(isolate, *offset_obj, &offset)); size_t target_length = NumberToSize(isolate, target->length()); size_t source_length = NumberToSize(isolate, source->length()); size_t target_byte_length = NumberToSize(isolate, target->byte_length()); @@ -1225,7 +1230,7 @@ (target_base <= source_base && target_base + target_byte_length > source_base)) { // We do not support overlapping ArrayBuffers - ASSERT( + DCHECK( target->GetBuffer()->backing_store() == source->GetBuffer()->backing_store()); return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING); @@ -1235,34 +1240,44 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayMaxSizeInHeap) { - ASSERT_OBJECT_SIZE(FLAG_typed_array_max_size_in_heap); +RUNTIME_FUNCTION(Runtime_TypedArrayMaxSizeInHeap) { + DCHECK(args.length() == 0); + DCHECK_OBJECT_SIZE( + FLAG_typed_array_max_size_in_heap + FixedTypedArrayBase::kDataOffset); return Smi::FromInt(FLAG_typed_array_max_size_in_heap); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewInitialize) { +RUNTIME_FUNCTION(Runtime_DataViewInitialize) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 1); - CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset, 2); - CONVERT_ARG_HANDLE_CHECKED(Object, byte_length, 3); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset, 2); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length, 3); - ASSERT(holder->GetInternalFieldCount() == + DCHECK(holder->GetInternalFieldCount() == v8::ArrayBufferView::kInternalFieldCount); for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) { holder->SetInternalField(i, Smi::FromInt(0)); } + size_t buffer_length = 0; + size_t offset = 0; + size_t length = 0; + RUNTIME_ASSERT( + TryNumberToSize(isolate, buffer->byte_length(), &buffer_length)); + RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset, &offset)); + RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length, &length)); + + // TODO(jkummerow): When we have a "safe numerics" helper class, use it here. + // Entire range [offset, offset + length] must be in bounds. + RUNTIME_ASSERT(offset <= buffer_length); + RUNTIME_ASSERT(offset + length <= buffer_length); + // No overflow. + RUNTIME_ASSERT(offset + length >= offset); holder->set_buffer(*buffer); - ASSERT(byte_offset->IsNumber()); - ASSERT( - NumberToSize(isolate, buffer->byte_length()) >= - NumberToSize(isolate, *byte_offset) - + NumberToSize(isolate, *byte_length)); holder->set_byte_offset(*byte_offset); - ASSERT(byte_length->IsNumber()); holder->set_byte_length(*byte_length); holder->set_weak_next(buffer->weak_first_view()); @@ -1272,30 +1287,6 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetBuffer) { - HandleScope scope(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0); - return data_view->buffer(); -} - - -RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetByteOffset) { - HandleScope scope(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0); - return data_view->byte_offset(); -} - - -RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetByteLength) { - HandleScope scope(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0); - return data_view->byte_length(); -} - - inline static bool NeedToFlipBytes(bool is_little_endian) { #ifdef V8_TARGET_LITTLE_ENDIAN return !is_little_endian; @@ -1351,7 +1342,7 @@ Value value; size_t buffer_offset = data_view_byte_offset + byte_offset; - ASSERT( + DCHECK( NumberToSize(isolate, buffer->byte_length()) >= buffer_offset + sizeof(T)); uint8_t* source = @@ -1396,7 +1387,7 @@ Value value; value.data = data; size_t buffer_offset = data_view_byte_offset + byte_offset; - ASSERT( + DCHECK( NumberToSize(isolate, buffer->byte_length()) >= buffer_offset + sizeof(T)); uint8_t* target = @@ -1411,16 +1402,16 @@ #define DATA_VIEW_GETTER(TypeName, Type, Converter) \ - RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGet##TypeName) { \ + RUNTIME_FUNCTION(Runtime_DataViewGet##TypeName) { \ HandleScope scope(isolate); \ - ASSERT(args.length() == 3); \ + DCHECK(args.length() == 3); \ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \ - CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1); \ + CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1); \ CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2); \ Type result; \ if (DataViewGetValue( \ isolate, holder, offset, is_little_endian, &result)) { \ - return isolate->heap()->Converter(result); \ + return *isolate->factory()->Converter(result); \ } else { \ return isolate->Throw(*isolate->factory()->NewRangeError( \ "invalid_data_view_accessor_offset", \ @@ -1428,14 +1419,14 @@ } \ } -DATA_VIEW_GETTER(Uint8, uint8_t, NumberFromUint32) -DATA_VIEW_GETTER(Int8, int8_t, NumberFromInt32) -DATA_VIEW_GETTER(Uint16, uint16_t, NumberFromUint32) -DATA_VIEW_GETTER(Int16, int16_t, NumberFromInt32) -DATA_VIEW_GETTER(Uint32, uint32_t, NumberFromUint32) -DATA_VIEW_GETTER(Int32, int32_t, NumberFromInt32) -DATA_VIEW_GETTER(Float32, float, NumberFromDouble) -DATA_VIEW_GETTER(Float64, double, NumberFromDouble) +DATA_VIEW_GETTER(Uint8, uint8_t, NewNumberFromUint) +DATA_VIEW_GETTER(Int8, int8_t, NewNumberFromInt) +DATA_VIEW_GETTER(Uint16, uint16_t, NewNumberFromUint) +DATA_VIEW_GETTER(Int16, int16_t, NewNumberFromInt) +DATA_VIEW_GETTER(Uint32, uint32_t, NewNumberFromUint) +DATA_VIEW_GETTER(Int32, int32_t, NewNumberFromInt) +DATA_VIEW_GETTER(Float32, float, NewNumber) +DATA_VIEW_GETTER(Float64, double, NewNumber) #undef DATA_VIEW_GETTER @@ -1493,12 +1484,12 @@ #define DATA_VIEW_SETTER(TypeName, Type) \ - RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewSet##TypeName) { \ + RUNTIME_FUNCTION(Runtime_DataViewSet##TypeName) { \ HandleScope scope(isolate); \ - ASSERT(args.length() == 4); \ + DCHECK(args.length() == 4); \ CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \ - CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1); \ - CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); \ + CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1); \ + CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); \ CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3); \ Type v = DataViewConvertValue<Type>(value->Number()); \ if (DataViewSetValue( \ @@ -1523,263 +1514,399 @@ #undef DATA_VIEW_SETTER -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) { +RUNTIME_FUNCTION(Runtime_SetInitialize) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); - Handle<ObjectHashSet> table = isolate->factory()->NewObjectHashSet(0); + Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet(); holder->set_table(*table); return *holder; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) { +RUNTIME_FUNCTION(Runtime_SetAdd) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); - Handle<Object> key(args[1], isolate); - Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); - table = ObjectHashSet::Add(table, key); + CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); + Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table())); + table = OrderedHashSet::Add(table, key); holder->set_table(*table); - return isolate->heap()->undefined_value(); + return *holder; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) { +RUNTIME_FUNCTION(Runtime_SetHas) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); - Handle<Object> key(args[1], isolate); - Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); - return isolate->heap()->ToBoolean(table->Contains(*key)); + CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); + Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table())); + return isolate->heap()->ToBoolean(table->Contains(key)); +} + + +RUNTIME_FUNCTION(Runtime_SetDelete) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); + Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table())); + bool was_present = false; + table = OrderedHashSet::Remove(table, key, &was_present); + holder->set_table(*table); + return isolate->heap()->ToBoolean(was_present); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) { +RUNTIME_FUNCTION(Runtime_SetClear) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); - Handle<Object> key(args[1], isolate); - Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); - table = ObjectHashSet::Remove(table, key); + Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table())); + table = OrderedHashSet::Clear(table); holder->set_table(*table); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetGetSize) { +RUNTIME_FUNCTION(Runtime_SetGetSize) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); - Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); + Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table())); return Smi::FromInt(table->NumberOfElements()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) { +RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) { + HandleScope scope(isolate); + DCHECK(args.length() == 3); + CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(JSSet, set, 1); + CONVERT_SMI_ARG_CHECKED(kind, 2) + RUNTIME_ASSERT(kind == JSSetIterator::kKindValues || + kind == JSSetIterator::kKindEntries); + Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table())); + holder->set_table(*table); + holder->set_index(Smi::FromInt(0)); + holder->set_kind(Smi::FromInt(kind)); + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(Runtime_SetIteratorNext) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_CHECKED(JSSetIterator, holder, 0); + CONVERT_ARG_CHECKED(JSArray, value_array, 1); + return holder->Next(value_array); +} + + +RUNTIME_FUNCTION(Runtime_MapInitialize) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); - Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0); + Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap(); holder->set_table(*table); return *holder; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) { +RUNTIME_FUNCTION(Runtime_MapGet) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); - Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); - Handle<Object> lookup(table->Lookup(*key), isolate); + Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table())); + Handle<Object> lookup(table->Lookup(key), isolate); return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_MapHas) { +RUNTIME_FUNCTION(Runtime_MapHas) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); - Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); - Handle<Object> lookup(table->Lookup(*key), isolate); + Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table())); + Handle<Object> lookup(table->Lookup(key), isolate); return isolate->heap()->ToBoolean(!lookup->IsTheHole()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_MapDelete) { +RUNTIME_FUNCTION(Runtime_MapDelete) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); - Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); - Handle<Object> lookup(table->Lookup(*key), isolate); - Handle<ObjectHashTable> new_table = - ObjectHashTable::Put(table, key, isolate->factory()->the_hole_value()); + Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table())); + bool was_present = false; + Handle<OrderedHashMap> new_table = + OrderedHashMap::Remove(table, key, &was_present); holder->set_table(*new_table); - return isolate->heap()->ToBoolean(!lookup->IsTheHole()); + return isolate->heap()->ToBoolean(was_present); +} + + +RUNTIME_FUNCTION(Runtime_MapClear) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); + Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table())); + table = OrderedHashMap::Clear(table); + holder->set_table(*table); + return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) { +RUNTIME_FUNCTION(Runtime_MapSet) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); - Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); - Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value); + Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table())); + Handle<OrderedHashMap> new_table = OrderedHashMap::Put(table, key, value); holder->set_table(*new_table); - return isolate->heap()->undefined_value(); + return *holder; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGetSize) { +RUNTIME_FUNCTION(Runtime_MapGetSize) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); - Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); + Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table())); return Smi::FromInt(table->NumberOfElements()); } -static JSWeakCollection* WeakCollectionInitialize(Isolate* isolate, +RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) { + HandleScope scope(isolate); + DCHECK(args.length() == 3); + CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(JSMap, map, 1); + CONVERT_SMI_ARG_CHECKED(kind, 2) + RUNTIME_ASSERT(kind == JSMapIterator::kKindKeys + || kind == JSMapIterator::kKindValues + || kind == JSMapIterator::kKindEntries); + Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table())); + holder->set_table(*table); + holder->set_index(Smi::FromInt(0)); + holder->set_kind(Smi::FromInt(kind)); + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0); + Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); + Handle<FixedArray> entries = + isolate->factory()->NewFixedArray(table->NumberOfElements() * 2); + { + DisallowHeapAllocation no_gc; + int number_of_non_hole_elements = 0; + for (int i = 0; i < table->Capacity(); i++) { + Handle<Object> key(table->KeyAt(i), isolate); + if (table->IsKey(*key)) { + entries->set(number_of_non_hole_elements++, *key); + entries->set(number_of_non_hole_elements++, table->Lookup(key)); + } + } + DCHECK_EQ(table->NumberOfElements() * 2, number_of_non_hole_elements); + } + return *isolate->factory()->NewJSArrayWithElements(entries); +} + + +RUNTIME_FUNCTION(Runtime_MapIteratorNext) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_CHECKED(JSMapIterator, holder, 0); + CONVERT_ARG_CHECKED(JSArray, value_array, 1); + return holder->Next(value_array); +} + + +static Handle<JSWeakCollection> WeakCollectionInitialize( + Isolate* isolate, Handle<JSWeakCollection> weak_collection) { - ASSERT(weak_collection->map()->inobject_properties() == 0); - Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0); + DCHECK(weak_collection->map()->inobject_properties() == 0); + Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0); weak_collection->set_table(*table); - weak_collection->set_next(Smi::FromInt(0)); - return *weak_collection; + return weak_collection; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionInitialize) { +RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0); - return WeakCollectionInitialize(isolate, weak_collection); + return *WeakCollectionInitialize(isolate, weak_collection); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionGet) { +RUNTIME_FUNCTION(Runtime_WeakCollectionGet) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); + RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol()); Handle<ObjectHashTable> table( ObjectHashTable::cast(weak_collection->table())); - Handle<Object> lookup(table->Lookup(*key), isolate); + RUNTIME_ASSERT(table->IsKey(*key)); + Handle<Object> lookup(table->Lookup(key), isolate); return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionHas) { +RUNTIME_FUNCTION(Runtime_WeakCollectionHas) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); + RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol()); Handle<ObjectHashTable> table( ObjectHashTable::cast(weak_collection->table())); - Handle<Object> lookup(table->Lookup(*key), isolate); + RUNTIME_ASSERT(table->IsKey(*key)); + Handle<Object> lookup(table->Lookup(key), isolate); return isolate->heap()->ToBoolean(!lookup->IsTheHole()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionDelete) { +RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); + RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol()); Handle<ObjectHashTable> table(ObjectHashTable::cast( weak_collection->table())); - Handle<Object> lookup(table->Lookup(*key), isolate); + RUNTIME_ASSERT(table->IsKey(*key)); + bool was_present = false; Handle<ObjectHashTable> new_table = - ObjectHashTable::Put(table, key, isolate->factory()->the_hole_value()); + ObjectHashTable::Remove(table, key, &was_present); weak_collection->set_table(*new_table); - return isolate->heap()->ToBoolean(!lookup->IsTheHole()); + return isolate->heap()->ToBoolean(was_present); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionSet) { +RUNTIME_FUNCTION(Runtime_WeakCollectionSet) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); - Handle<Object> value(args[2], isolate); + RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol()); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); Handle<ObjectHashTable> table( ObjectHashTable::cast(weak_collection->table())); + RUNTIME_ASSERT(table->IsKey(*key)); Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value); weak_collection->set_table(*new_table); - return isolate->heap()->undefined_value(); + return *weak_collection; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - Object* obj = args[0]; - if (!obj->IsJSObject()) return isolate->heap()->null_value(); - return JSObject::cast(obj)->class_name(); +RUNTIME_FUNCTION(Runtime_GetWeakSetValues) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0); + Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); + Handle<FixedArray> values = + isolate->factory()->NewFixedArray(table->NumberOfElements()); + { + DisallowHeapAllocation no_gc; + int number_of_non_hole_elements = 0; + for (int i = 0; i < table->Capacity(); i++) { + Handle<Object> key(table->KeyAt(i), isolate); + if (table->IsKey(*key)) { + values->set(number_of_non_hole_elements++, *key); + } + } + DCHECK_EQ(table->NumberOfElements(), number_of_non_hole_elements); + } + return *isolate->factory()->NewJSArrayWithElements(values); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) { +RUNTIME_FUNCTION(Runtime_GetPrototype) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0); // We don't expect access checks to be needed on JSProxy objects. - ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject()); + DCHECK(!obj->IsAccessCheckNeeded() || obj->IsJSObject()); + PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER); do { - if (obj->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(Handle<JSObject>::cast(obj), - isolate->factory()->proto_string(), - v8::ACCESS_GET)) { - isolate->ReportFailedAccessCheckWrapper(Handle<JSObject>::cast(obj), - v8::ACCESS_GET); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + if (PrototypeIterator::GetCurrent(iter)->IsAccessCheckNeeded() && + !isolate->MayNamedAccess( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), + isolate->factory()->proto_string(), v8::ACCESS_GET)) { + isolate->ReportFailedAccessCheck( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), + v8::ACCESS_GET); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); return isolate->heap()->undefined_value(); } - obj = handle(obj->GetPrototype(isolate), isolate); - } while (obj->IsJSObject() && - JSObject::cast(*obj)->map()->is_hidden_prototype()); - return *obj; + iter.AdvanceIgnoringProxies(); + if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) { + return *PrototypeIterator::GetCurrent(iter); + } + } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)); + return *PrototypeIterator::GetCurrent(iter); } -static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate, - Object* receiver) { - Object* current = receiver->GetPrototype(isolate); - while (current->IsJSObject() && - JSObject::cast(current)->map()->is_hidden_prototype()) { - current = current->GetPrototype(isolate); +static inline Handle<Object> GetPrototypeSkipHiddenPrototypes( + Isolate* isolate, Handle<Object> receiver) { + PrototypeIterator iter(isolate, receiver); + while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) { + if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) { + return PrototypeIterator::GetCurrent(iter); + } + iter.Advance(); } - return current; + return PrototypeIterator::GetCurrent(iter); +} + + +RUNTIME_FUNCTION(Runtime_InternalSetPrototype) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1); + DCHECK(!obj->IsAccessCheckNeeded()); + DCHECK(!obj->map()->is_observed()); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, JSObject::SetPrototype(obj, prototype, false)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) { +RUNTIME_FUNCTION(Runtime_SetPrototype) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1); if (obj->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(obj, - isolate->factory()->proto_string(), - v8::ACCESS_SET)) { - isolate->ReportFailedAccessCheckWrapper(obj, v8::ACCESS_SET); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + !isolate->MayNamedAccess( + obj, isolate->factory()->proto_string(), v8::ACCESS_SET)) { + isolate->ReportFailedAccessCheck(obj, v8::ACCESS_SET); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); return isolate->heap()->undefined_value(); } if (obj->map()->is_observed()) { - Handle<Object> old_value( - GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate); - - Handle<Object> result = JSObject::SetPrototype(obj, prototype, true); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> old_value = GetPrototypeSkipHiddenPrototypes(isolate, obj); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::SetPrototype(obj, prototype, true)); - Handle<Object> new_value( - GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate); + Handle<Object> new_value = GetPrototypeSkipHiddenPrototypes(isolate, obj); if (!new_value->SameValue(*old_value)) { JSObject::EnqueueChangeRecord(obj, "setPrototype", isolate->factory()->proto_string(), @@ -1787,130 +1914,26 @@ } return *result; } - Handle<Object> result = JSObject::SetPrototype(obj, prototype, true); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::SetPrototype(obj, prototype, true)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_IsInPrototypeChain) { + HandleScope shs(isolate); + DCHECK(args.length() == 2); // See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8). - Object* O = args[0]; - Object* V = args[1]; + CONVERT_ARG_HANDLE_CHECKED(Object, O, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, V, 1); + PrototypeIterator iter(isolate, V, PrototypeIterator::START_AT_RECEIVER); while (true) { - Object* prototype = V->GetPrototype(isolate); - if (prototype->IsNull()) return isolate->heap()->false_value(); - if (O == prototype) return isolate->heap()->true_value(); - V = prototype; - } -} - - -static bool CheckAccessException(Object* callback, - v8::AccessType access_type) { - DisallowHeapAllocation no_gc; - if (callback->IsAccessorInfo()) { - AccessorInfo* info = AccessorInfo::cast(callback); - return - (access_type == v8::ACCESS_HAS && - (info->all_can_read() || info->all_can_write())) || - (access_type == v8::ACCESS_GET && info->all_can_read()) || - (access_type == v8::ACCESS_SET && info->all_can_write()); - } - if (callback->IsAccessorPair()) { - AccessorPair* info = AccessorPair::cast(callback); - return - (access_type == v8::ACCESS_HAS && - (info->all_can_read() || info->all_can_write())) || - (access_type == v8::ACCESS_GET && info->all_can_read()) || - (access_type == v8::ACCESS_SET && info->all_can_write()); - } - return false; -} - - -template<class Key> -static bool CheckGenericAccess( - Handle<JSObject> receiver, - Handle<JSObject> holder, - Key key, - v8::AccessType access_type, - bool (Isolate::*mayAccess)(Handle<JSObject>, Key, v8::AccessType)) { - Isolate* isolate = receiver->GetIsolate(); - for (Handle<JSObject> current = receiver; - true; - current = handle(JSObject::cast(current->GetPrototype()), isolate)) { - if (current->IsAccessCheckNeeded() && - !(isolate->*mayAccess)(current, key, access_type)) { - return false; - } - if (current.is_identical_to(holder)) break; + iter.AdvanceIgnoringProxies(); + if (iter.IsAtEnd()) return isolate->heap()->false_value(); + if (iter.IsAtEnd(O)) return isolate->heap()->true_value(); } - return true; -} - - -enum AccessCheckResult { - ACCESS_FORBIDDEN, - ACCESS_ALLOWED, - ACCESS_ABSENT -}; - - -static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj, - Handle<Name> name, - v8::AccessType access_type) { - uint32_t index; - if (name->AsArrayIndex(&index)) { - // TODO(1095): we should traverse hidden prototype hierachy as well. - if (CheckGenericAccess( - obj, obj, index, access_type, &Isolate::MayIndexedAccessWrapper)) { - return ACCESS_ALLOWED; - } - - obj->GetIsolate()->ReportFailedAccessCheckWrapper(obj, access_type); - return ACCESS_FORBIDDEN; - } - - Isolate* isolate = obj->GetIsolate(); - LookupResult lookup(isolate); - obj->LocalLookup(*name, &lookup, true); - - if (!lookup.IsProperty()) return ACCESS_ABSENT; - Handle<JSObject> holder(lookup.holder(), isolate); - if (CheckGenericAccess<Handle<Object> >( - obj, holder, name, access_type, &Isolate::MayNamedAccessWrapper)) { - return ACCESS_ALLOWED; - } - - // Access check callback denied the access, but some properties - // can have a special permissions which override callbacks descision - // (currently see v8::AccessControl). - // API callbacks can have per callback access exceptions. - switch (lookup.type()) { - case CALLBACKS: - if (CheckAccessException(lookup.GetCallbackObject(), access_type)) { - return ACCESS_ALLOWED; - } - break; - case INTERCEPTOR: - // If the object has an interceptor, try real named properties. - // Overwrite the result to fetch the correct property later. - holder->LookupRealNamedProperty(*name, &lookup); - if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) { - if (CheckAccessException(lookup.GetCallbackObject(), access_type)) { - return ACCESS_ALLOWED; - } - } - break; - default: - break; - } - - isolate->ReportFailedAccessCheckWrapper(obj, access_type); - return ACCESS_FORBIDDEN; } @@ -1927,63 +1950,73 @@ }; -static Handle<Object> GetOwnProperty(Isolate* isolate, - Handle<JSObject> obj, - Handle<Name> name) { +MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate, + Handle<JSObject> obj, + Handle<Name> name) { Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); - // Due to some WebKit tests, we want to make sure that we do not log - // more than one access failure here. - AccessCheckResult access_check_result = - CheckPropertyAccess(obj, name, v8::ACCESS_HAS); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - switch (access_check_result) { - case ACCESS_FORBIDDEN: return factory->false_value(); - case ACCESS_ALLOWED: break; - case ACCESS_ABSENT: return factory->undefined_value(); - } - - PropertyAttributes attrs = JSReceiver::GetLocalPropertyAttribute(obj, name); - if (attrs == ABSENT) { - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - return factory->undefined_value(); - } - ASSERT(!isolate->has_scheduled_exception()); - AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name); - Handle<AccessorPair> accessors(raw_accessors, isolate); - Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE); + + PropertyAttributes attrs; + uint32_t index = 0; + Handle<Object> value; + MaybeHandle<AccessorPair> maybe_accessors; + // TODO(verwaest): Unify once indexed properties can be handled by the + // LookupIterator. + if (name->AsArrayIndex(&index)) { + // Get attributes. + Maybe<PropertyAttributes> maybe = + JSReceiver::GetOwnElementAttribute(obj, index); + if (!maybe.has_value) return MaybeHandle<Object>(); + attrs = maybe.value; + if (attrs == ABSENT) return factory->undefined_value(); + + // Get AccessorPair if present. + maybe_accessors = JSObject::GetOwnElementAccessorPair(obj, index); + + // Get value if not an AccessorPair. + if (maybe_accessors.is_null()) { + ASSIGN_RETURN_ON_EXCEPTION(isolate, value, + Runtime::GetElementOrCharAt(isolate, obj, index), Object); + } + } else { + // Get attributes. + LookupIterator it(obj, name, LookupIterator::CHECK_OWN); + Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it); + if (!maybe.has_value) return MaybeHandle<Object>(); + attrs = maybe.value; + if (attrs == ABSENT) return factory->undefined_value(); + + // Get AccessorPair if present. + if (it.state() == LookupIterator::PROPERTY && + it.property_kind() == LookupIterator::ACCESSOR && + it.GetAccessors()->IsAccessorPair()) { + maybe_accessors = Handle<AccessorPair>::cast(it.GetAccessors()); + } + + // Get value if not an AccessorPair. + if (maybe_accessors.is_null()) { + ASSIGN_RETURN_ON_EXCEPTION( + isolate, value, Object::GetProperty(&it), Object); + } + } + DCHECK(!isolate->has_pending_exception()); + Handle<FixedArray> elms = factory->NewFixedArray(DESCRIPTOR_SIZE); elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0)); elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0)); - elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(raw_accessors != NULL)); + elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(!maybe_accessors.is_null())); - if (raw_accessors == NULL) { - elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0)); - // GetProperty does access check. - Handle<Object> value = GetProperty(isolate, obj, name); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<Object>::null()); - elms->set(VALUE_INDEX, *value); - } else { - // Access checks are performed for both accessors separately. - // When they fail, the respective field is not set in the descriptor. + Handle<AccessorPair> accessors; + if (maybe_accessors.ToHandle(&accessors)) { Handle<Object> getter(accessors->GetComponent(ACCESSOR_GETTER), isolate); Handle<Object> setter(accessors->GetComponent(ACCESSOR_SETTER), isolate); - - if (!getter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_GET)) { - ASSERT(!isolate->has_scheduled_exception()); - elms->set(GETTER_INDEX, *getter); - } else { - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - } - - if (!setter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_SET)) { - ASSERT(!isolate->has_scheduled_exception()); - elms->set(SETTER_INDEX, *setter); - } else { - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - } + elms->set(GETTER_INDEX, *getter); + elms->set(SETTER_INDEX, *setter); + } else { + elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0)); + elms->set(VALUE_INDEX, *value); } - return isolate->factory()->NewJSArrayWithElements(elms); + return factory->NewJSArrayWithElements(elms); } @@ -1994,79 +2027,83 @@ // [false, value, Writeable, Enumerable, Configurable] // if args[1] is an accessor on args[0] // [true, GetFunction, SetFunction, Enumerable, Configurable] -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) { +RUNTIME_FUNCTION(Runtime_GetOwnProperty) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); - Handle<Object> result = GetOwnProperty(isolate, obj, name); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, GetOwnProperty(isolate, obj, name)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) { +RUNTIME_FUNCTION(Runtime_PreventExtensions) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); - Handle<Object> result = JSObject::PreventExtensions(obj); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, JSObject::PreventExtensions(obj)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) { +RUNTIME_FUNCTION(Runtime_IsExtensible) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSObject, obj, 0); if (obj->IsJSGlobalProxy()) { - Object* proto = obj->GetPrototype(); - if (proto->IsNull()) return isolate->heap()->false_value(); - ASSERT(proto->IsJSGlobalObject()); - obj = JSObject::cast(proto); + PrototypeIterator iter(isolate, obj); + if (iter.IsAtEnd()) return isolate->heap()->false_value(); + DCHECK(iter.GetCurrent()->IsJSGlobalObject()); + obj = JSObject::cast(iter.GetCurrent()); } return isolate->heap()->ToBoolean(obj->map()->is_extensible()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) { +RUNTIME_FUNCTION(Runtime_RegExpCompile) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0); CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1); CONVERT_ARG_HANDLE_CHECKED(String, flags, 2); - Handle<Object> result = RegExpImpl::Compile(re, pattern, flags); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, RegExpImpl::Compile(re, pattern, flags)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) { +RUNTIME_FUNCTION(Runtime_CreateApiFunction) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(FunctionTemplateInfo, data, 0); - return *isolate->factory()->CreateApiFunction(data); + CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1); + return *isolate->factory()->CreateApiFunction(data, prototype); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) { +RUNTIME_FUNCTION(Runtime_IsTemplate) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - Object* arg = args[0]; + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, arg, 0); bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo(); return isolate->heap()->ToBoolean(result); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) { +RUNTIME_FUNCTION(Runtime_GetTemplateField) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_CHECKED(HeapObject, templ, 0); - CONVERT_SMI_ARG_CHECKED(index, 1) + CONVERT_SMI_ARG_CHECKED(index, 1); int offset = index * kPointerSize + HeapObject::kHeaderSize; InstanceType type = templ->map()->instance_type(); - RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE || - type == OBJECT_TEMPLATE_INFO_TYPE); + RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE || + type == OBJECT_TEMPLATE_INFO_TYPE); RUNTIME_ASSERT(offset > 0); if (type == FUNCTION_TEMPLATE_INFO_TYPE) { RUNTIME_ASSERT(offset < FunctionTemplateInfo::kSize); @@ -2077,94 +2114,97 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(HeapObject, object, 0); - Map* old_map = object->map(); +RUNTIME_FUNCTION(Runtime_DisableAccessChecks) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(HeapObject, object, 0); + Handle<Map> old_map(object->map()); bool needs_access_checks = old_map->is_access_check_needed(); if (needs_access_checks) { // Copy map so it won't interfere constructor's initial map. - Map* new_map; - MaybeObject* maybe_new_map = old_map->Copy(); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; - + Handle<Map> new_map = Map::Copy(old_map); new_map->set_is_access_check_needed(false); - object->set_map(new_map); + JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map); } return isolate->heap()->ToBoolean(needs_access_checks); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(HeapObject, object, 0); - Map* old_map = object->map(); - if (!old_map->is_access_check_needed()) { - // Copy map so it won't interfere constructor's initial map. - Map* new_map; - MaybeObject* maybe_new_map = old_map->Copy(); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; - - new_map->set_is_access_check_needed(true); - object->set_map(new_map); - } +RUNTIME_FUNCTION(Runtime_EnableAccessChecks) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); + Handle<Map> old_map(object->map()); + RUNTIME_ASSERT(!old_map->is_access_check_needed()); + // Copy map so it won't interfere constructor's initial map. + Handle<Map> new_map = Map::Copy(old_map); + new_map->set_is_access_check_needed(true); + JSObject::MigrateToMap(object, new_map); return isolate->heap()->undefined_value(); } -// Transform getter or setter into something DefineAccessor can handle. -static Handle<Object> InstantiateAccessorComponent(Isolate* isolate, - Handle<Object> component) { - if (component->IsUndefined()) return isolate->factory()->null_value(); - Handle<FunctionTemplateInfo> info = - Handle<FunctionTemplateInfo>::cast(component); - return Utils::OpenHandle(*Utils::ToLocal(info)->GetFunction()); +static Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name) { + HandleScope scope(isolate); + Handle<Object> args[1] = { name }; + Handle<Object> error = isolate->factory()->NewTypeError( + "var_redeclaration", HandleVector(args, 1)); + return isolate->Throw(*error); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAccessorProperty) { - HandleScope scope(isolate); - ASSERT(args.length() == 6); - CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); - CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); - CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2); - CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3); - CONVERT_SMI_ARG_CHECKED(attribute, 4); - CONVERT_SMI_ARG_CHECKED(access_control, 5); - RUNTIME_ASSERT(getter->IsUndefined() || getter->IsFunctionTemplateInfo()); - RUNTIME_ASSERT(setter->IsUndefined() || setter->IsFunctionTemplateInfo()); - JSObject::DefineAccessor(object, - name, - InstantiateAccessorComponent(isolate, getter), - InstantiateAccessorComponent(isolate, setter), - static_cast<PropertyAttributes>(attribute), - static_cast<v8::AccessControl>(access_control)); - return isolate->heap()->undefined_value(); -} +// May throw a RedeclarationError. +static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global, + Handle<String> name, Handle<Object> value, + PropertyAttributes attr, bool is_var, + bool is_const, bool is_function) { + // Do the lookup own properties only, see ES5 erratum. + LookupIterator it(global, name, LookupIterator::CHECK_HIDDEN); + Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it); + DCHECK(maybe.has_value); + PropertyAttributes old_attributes = maybe.value; + if (old_attributes != ABSENT) { + // The name was declared before; check for conflicting re-declarations. + if (is_const) return ThrowRedeclarationError(isolate, name); -static Failure* ThrowRedeclarationError(Isolate* isolate, - const char* type, - Handle<String> name) { - HandleScope scope(isolate); - Handle<Object> type_handle = - isolate->factory()->NewStringFromAscii(CStrVector(type)); - Handle<Object> args[2] = { type_handle, name }; - Handle<Object> error = - isolate->factory()->NewTypeError("redeclaration", HandleVector(args, 2)); - return isolate->Throw(*error); + // Skip var re-declarations. + if (is_var) return isolate->heap()->undefined_value(); + + DCHECK(is_function); + if ((old_attributes & DONT_DELETE) != 0) { + // Only allow reconfiguring globals to functions in user code (no + // natives, which are marked as read-only). + DCHECK((attr & READ_ONLY) == 0); + + // Check whether we can reconfigure the existing property into a + // function. + PropertyDetails old_details = it.property_details(); + // TODO(verwaest): CALLBACKS invalidly includes ExecutableAccessInfo, + // which are actually data properties, not accessor properties. + if (old_details.IsReadOnly() || old_details.IsDontEnum() || + old_details.type() == CALLBACKS) { + return ThrowRedeclarationError(isolate, name); + } + // If the existing property is not configurable, keep its attributes. Do + attr = old_attributes; + } + } + + // Define or redefine own property. + RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes( + global, name, value, attr)); + + return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeclareGlobals) { +RUNTIME_FUNCTION(Runtime_DeclareGlobals) { HandleScope scope(isolate); - ASSERT(args.length() == 3); - Handle<GlobalObject> global = Handle<GlobalObject>( - isolate->context()->global_object()); + DCHECK(args.length() == 3); + Handle<GlobalObject> global(isolate->global_object()); - Handle<Context> context = args.at<Context>(0); + CONVERT_ARG_HANDLE_CHECKED(Context, context, 0); CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1); CONVERT_SMI_ARG_CHECKED(flags, 2); @@ -2173,188 +2213,49 @@ for (int i = 0; i < length; i += 2) { HandleScope scope(isolate); Handle<String> name(String::cast(pairs->get(i))); - Handle<Object> value(pairs->get(i + 1), isolate); + Handle<Object> initial_value(pairs->get(i + 1), isolate); // We have to declare a global const property. To capture we only // assign to it when evaluating the assignment for "const x = // <expr>" the initial value is the hole. - bool is_var = value->IsUndefined(); - bool is_const = value->IsTheHole(); - bool is_function = value->IsSharedFunctionInfo(); - ASSERT(is_var + is_const + is_function == 1); - - if (is_var || is_const) { - // Lookup the property in the global object, and don't set the - // value of the variable if the property is already there. - // Do the lookup locally only, see ES5 erratum. - LookupResult lookup(isolate); - global->LocalLookup(*name, &lookup, true); - if (lookup.IsFound()) { - // We found an existing property. Unless it was an interceptor - // that claims the property is absent, skip this declaration. - if (!lookup.IsInterceptor()) continue; - if (JSReceiver::GetPropertyAttribute(global, name) != ABSENT) continue; - // Fall-through and introduce the absent property by using - // SetProperty. - } - } else if (is_function) { + bool is_var = initial_value->IsUndefined(); + bool is_const = initial_value->IsTheHole(); + bool is_function = initial_value->IsSharedFunctionInfo(); + DCHECK(is_var + is_const + is_function == 1); + + Handle<Object> value; + if (is_function) { // Copy the function and update its context. Use it as value. Handle<SharedFunctionInfo> shared = - Handle<SharedFunctionInfo>::cast(value); + Handle<SharedFunctionInfo>::cast(initial_value); Handle<JSFunction> function = - isolate->factory()->NewFunctionFromSharedFunctionInfo( - shared, context, TENURED); + isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context, + TENURED); value = function; + } else { + value = isolate->factory()->undefined_value(); } - LookupResult lookup(isolate); - global->LocalLookup(*name, &lookup, true); - // Compute the property attributes. According to ECMA-262, // the property must be non-configurable except in eval. - int attr = NONE; - bool is_eval = DeclareGlobalsEvalFlag::decode(flags); - if (!is_eval) { - attr |= DONT_DELETE; - } bool is_native = DeclareGlobalsNativeFlag::decode(flags); - if (is_const || (is_native && is_function)) { - attr |= READ_ONLY; - } - - StrictMode strict_mode = DeclareGlobalsStrictMode::decode(flags); - - if (!lookup.IsFound() || is_function) { - // If the local property exists, check that we can reconfigure it - // as required for function declarations. - if (lookup.IsFound() && lookup.IsDontDelete()) { - if (lookup.IsReadOnly() || lookup.IsDontEnum() || - lookup.IsPropertyCallbacks()) { - return ThrowRedeclarationError(isolate, "function", name); - } - // If the existing property is not configurable, keep its attributes. - attr = lookup.GetAttributes(); - } - // Define or redefine own property. - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - global, name, value, static_cast<PropertyAttributes>(attr))); - } else { - // Do a [[Put]] on the existing (own) property. - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::SetProperty( - global, name, value, static_cast<PropertyAttributes>(attr), - strict_mode)); - } - } - - ASSERT(!isolate->has_pending_exception()); - return isolate->heap()->undefined_value(); -} - - -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeclareContextSlot) { - HandleScope scope(isolate); - ASSERT(args.length() == 4); - - // Declarations are always made in a function or native context. In the - // case of eval code, the context passed is the context of the caller, - // which may be some nested context and not the declaration context. - RUNTIME_ASSERT(args[0]->IsContext()); - Handle<Context> context(Context::cast(args[0])->declaration_context()); - - Handle<String> name(String::cast(args[1])); - PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2)); - RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE); - Handle<Object> initial_value(args[3], isolate); - - int index; - PropertyAttributes attributes; - ContextLookupFlags flags = DONT_FOLLOW_CHAINS; - BindingFlags binding_flags; - Handle<Object> holder = - context->Lookup(name, flags, &index, &attributes, &binding_flags); - - if (attributes != ABSENT) { - // The name was declared before; check for conflicting re-declarations. - // Note: this is actually inconsistent with what happens for globals (where - // we silently ignore such declarations). - if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) { - // Functions are not read-only. - ASSERT(mode != READ_ONLY || initial_value->IsTheHole()); - const char* type = ((attributes & READ_ONLY) != 0) ? "const" : "var"; - return ThrowRedeclarationError(isolate, type, name); - } - - // Initialize it if necessary. - if (*initial_value != NULL) { - if (index >= 0) { - ASSERT(holder.is_identical_to(context)); - if (((attributes & READ_ONLY) == 0) || - context->get(index)->IsTheHole()) { - context->set(index, *initial_value); - } - } else { - // Slow case: The property is in the context extension object of a - // function context or the global object of a native context. - Handle<JSObject> object = Handle<JSObject>::cast(holder); - RETURN_IF_EMPTY_HANDLE( - isolate, - JSReceiver::SetProperty(object, name, initial_value, mode, SLOPPY)); - } - } - - } else { - // The property is not in the function context. It needs to be - // "declared" in the function context's extension context or as a - // property of the the global object. - Handle<JSObject> object; - if (context->has_extension()) { - object = Handle<JSObject>(JSObject::cast(context->extension())); - } else { - // Context extension objects are allocated lazily. - ASSERT(context->IsFunctionContext()); - object = isolate->factory()->NewJSObject( - isolate->context_extension_function()); - context->set_extension(*object); - } - ASSERT(*object != NULL); + bool is_eval = DeclareGlobalsEvalFlag::decode(flags); + int attr = NONE; + if (is_const) attr |= READ_ONLY; + if (is_function && is_native) attr |= READ_ONLY; + if (!is_const && !is_eval) attr |= DONT_DELETE; - // Declare the property by setting it to the initial value if provided, - // or undefined, and use the correct mode (e.g. READ_ONLY attribute for - // constant declarations). - ASSERT(!JSReceiver::HasLocalProperty(object, name)); - Handle<Object> value(isolate->heap()->undefined_value(), isolate); - if (*initial_value != NULL) value = initial_value; - // Declaring a const context slot is a conflicting declaration if - // there is a callback with that name in a prototype. It is - // allowed to introduce const variables in - // JSContextExtensionObjects. They are treated specially in - // SetProperty and no setters are invoked for those since they are - // not real JSObjects. - if (initial_value->IsTheHole() && - !object->IsJSContextExtensionObject()) { - LookupResult lookup(isolate); - object->Lookup(*name, &lookup); - if (lookup.IsPropertyCallbacks()) { - return ThrowRedeclarationError(isolate, "const", name); - } - } - if (object->IsJSGlobalObject()) { - // Define own property on the global object. - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes(object, name, value, mode)); - } else { - RETURN_IF_EMPTY_HANDLE(isolate, - JSReceiver::SetProperty(object, name, value, mode, SLOPPY)); - } + Object* result = DeclareGlobals(isolate, global, name, value, + static_cast<PropertyAttributes>(attr), + is_var, is_const, is_function); + if (isolate->has_pending_exception()) return result; } return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) { +RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) { HandleScope scope(isolate); // args[0] == name // args[1] == language_mode @@ -2362,237 +2263,212 @@ // Determine if we need to assign to the variable if it already // exists (based on the number of arguments). - RUNTIME_ASSERT(args.length() == 2 || args.length() == 3); - bool assign = args.length() == 3; + RUNTIME_ASSERT(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, name, 0); - RUNTIME_ASSERT(args[1]->IsSmi()); CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); - // According to ECMA-262, section 12.2, page 62, the property must - // not be deletable. - PropertyAttributes attributes = DONT_DELETE; - - // Lookup the property locally in the global object. If it isn't - // there, there is a property with this name in the prototype chain. - // We follow Safari and Firefox behavior and only set the property - // locally if there is an explicit initialization value that we have - // to assign to the property. - // Note that objects can have hidden prototypes, so we need to traverse - // the whole chain of hidden prototypes to do a 'local' lookup. - LookupResult lookup(isolate); - isolate->context()->global_object()->LocalLookup(*name, &lookup, true); - if (lookup.IsInterceptor()) { - Handle<JSObject> holder(lookup.holder()); - PropertyAttributes intercepted = - JSReceiver::GetPropertyAttribute(holder, name); - if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) { - // Found an interceptor that's not read only. - if (assign) { - CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); - Handle<Object> result = JSObject::SetPropertyForResult( - holder, &lookup, name, value, attributes, strict_mode); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; - } else { - return isolate->heap()->undefined_value(); - } - } - } - - if (assign) { - CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); - Handle<GlobalObject> global(isolate->context()->global_object()); - Handle<Object> result = JSReceiver::SetProperty( - global, name, value, attributes, strict_mode); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; - } - return isolate->heap()->undefined_value(); + Handle<GlobalObject> global(isolate->context()->global_object()); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, Object::SetProperty(global, name, value, strict_mode)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_InitializeConstGlobal) { - SealHandleScope shs(isolate); +RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) { + HandleScope handle_scope(isolate); // All constants are declared with an initial value. The name // of the constant is the first argument and the initial value // is the second. RUNTIME_ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(String, name, 0); - Handle<Object> value = args.at<Object>(1); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 1); - // Get the current global object from top. - GlobalObject* global = isolate->context()->global_object(); + Handle<GlobalObject> global = isolate->global_object(); - // According to ECMA-262, section 12.2, page 62, the property must - // not be deletable. Since it's a const, it must be READ_ONLY too. - PropertyAttributes attributes = - static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY); + // Lookup the property as own on the global object. + LookupIterator it(global, name, LookupIterator::CHECK_HIDDEN); + Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it); + DCHECK(maybe.has_value); + PropertyAttributes old_attributes = maybe.value; - // Lookup the property locally in the global object. If it isn't - // there, we add the property and take special precautions to always - // add it as a local property even in case of callbacks in the - // prototype chain (this rules out using SetProperty). - // We use SetLocalPropertyIgnoreAttributes instead - LookupResult lookup(isolate); - global->LocalLookup(*name, &lookup); - if (!lookup.IsFound()) { - HandleScope handle_scope(isolate); - Handle<GlobalObject> global(isolate->context()->global_object()); - RETURN_IF_EMPTY_HANDLE( - isolate, - JSObject::SetLocalPropertyIgnoreAttributes(global, name, value, - attributes)); - return *value; + PropertyAttributes attr = + static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY); + // Set the value if the property is either missing, or the property attributes + // allow setting the value without invoking an accessor. + if (it.IsFound()) { + // Ignore if we can't reconfigure the value. + if ((old_attributes & DONT_DELETE) != 0) { + if ((old_attributes & READ_ONLY) != 0 || + it.property_kind() == LookupIterator::ACCESSOR) { + return *value; + } + attr = static_cast<PropertyAttributes>(old_attributes | READ_ONLY); + } } - if (!lookup.IsReadOnly()) { - // Restore global object from context (in case of GC) and continue - // with setting the value. - HandleScope handle_scope(isolate); - Handle<GlobalObject> global(isolate->context()->global_object()); - - // BUG 1213575: Handle the case where we have to set a read-only - // property through an interceptor and only do it if it's - // uninitialized, e.g. the hole. Nirk... - // Passing sloppy mode because the property is writable. - RETURN_IF_EMPTY_HANDLE( - isolate, - JSReceiver::SetProperty(global, name, value, attributes, SLOPPY)); - return *value; + RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes( + global, name, value, attr)); + + return *value; +} + + +RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) { + HandleScope scope(isolate); + DCHECK(args.length() == 4); + + // Declarations are always made in a function, native, or global context. In + // the case of eval code, the context passed is the context of the caller, + // which may be some nested context and not the declaration context. + CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 0); + Handle<Context> context(context_arg->declaration_context()); + CONVERT_ARG_HANDLE_CHECKED(String, name, 1); + CONVERT_SMI_ARG_CHECKED(attr_arg, 2); + PropertyAttributes attr = static_cast<PropertyAttributes>(attr_arg); + RUNTIME_ASSERT(attr == READ_ONLY || attr == NONE); + CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 3); + + // TODO(verwaest): Unify the encoding indicating "var" with DeclareGlobals. + bool is_var = *initial_value == NULL; + bool is_const = initial_value->IsTheHole(); + bool is_function = initial_value->IsJSFunction(); + DCHECK(is_var + is_const + is_function == 1); + + int index; + PropertyAttributes attributes; + ContextLookupFlags flags = DONT_FOLLOW_CHAINS; + BindingFlags binding_flags; + Handle<Object> holder = + context->Lookup(name, flags, &index, &attributes, &binding_flags); + + Handle<JSObject> object; + Handle<Object> value = + is_function ? initial_value + : Handle<Object>::cast(isolate->factory()->undefined_value()); + + // TODO(verwaest): This case should probably not be covered by this function, + // but by DeclareGlobals instead. + if ((attributes != ABSENT && holder->IsJSGlobalObject()) || + (context_arg->has_extension() && + context_arg->extension()->IsJSGlobalObject())) { + return DeclareGlobals(isolate, Handle<JSGlobalObject>::cast(holder), name, + value, attr, is_var, is_const, is_function); } - // Set the value, but only if we're assigning the initial value to a - // constant. For now, we determine this by checking if the - // current value is the hole. - // Strict mode handling not needed (const is disallowed in strict mode). - if (lookup.IsField()) { - FixedArray* properties = global->properties(); - int index = lookup.GetFieldIndex().field_index(); - if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) { - properties->set(index, *value); - } - } else if (lookup.IsNormal()) { - if (global->GetNormalizedProperty(&lookup)->IsTheHole() || - !lookup.IsReadOnly()) { - HandleScope scope(isolate); - JSObject::SetNormalizedProperty(Handle<JSObject>(global), &lookup, value); + if (attributes != ABSENT) { + // The name was declared before; check for conflicting re-declarations. + if (is_const || (attributes & READ_ONLY) != 0) { + return ThrowRedeclarationError(isolate, name); + } + + // Skip var re-declarations. + if (is_var) return isolate->heap()->undefined_value(); + + DCHECK(is_function); + if (index >= 0) { + DCHECK(holder.is_identical_to(context)); + context->set(index, *initial_value); + return isolate->heap()->undefined_value(); } + + object = Handle<JSObject>::cast(holder); + + } else if (context->has_extension()) { + object = handle(JSObject::cast(context->extension())); + DCHECK(object->IsJSContextExtensionObject() || object->IsJSGlobalObject()); } else { - // Ignore re-initialization of constants that have already been - // assigned a constant value. - ASSERT(lookup.IsReadOnly() && lookup.IsConstant()); + DCHECK(context->IsFunctionContext()); + object = + isolate->factory()->NewJSObject(isolate->context_extension_function()); + context->set_extension(*object); } - // Use the set value as the result of the operation. - return *value; + RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes( + object, name, value, attr)); + + return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_InitializeConstContextSlot) { +RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) { HandleScope scope(isolate); - ASSERT(args.length() == 3); - - Handle<Object> value(args[0], isolate); - ASSERT(!value->IsTheHole()); + DCHECK(args.length() == 3); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 0); + DCHECK(!value->IsTheHole()); // Initializations are always done in a function or native context. - RUNTIME_ASSERT(args[1]->IsContext()); - Handle<Context> context(Context::cast(args[1])->declaration_context()); - - Handle<String> name(String::cast(args[2])); + CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 1); + Handle<Context> context(context_arg->declaration_context()); + CONVERT_ARG_HANDLE_CHECKED(String, name, 2); int index; PropertyAttributes attributes; - ContextLookupFlags flags = FOLLOW_CHAINS; + ContextLookupFlags flags = DONT_FOLLOW_CHAINS; BindingFlags binding_flags; Handle<Object> holder = context->Lookup(name, flags, &index, &attributes, &binding_flags); if (index >= 0) { - ASSERT(holder->IsContext()); - // Property was found in a context. Perform the assignment if we - // found some non-constant or an uninitialized constant. + DCHECK(holder->IsContext()); + // Property was found in a context. Perform the assignment if the constant + // was uninitialized. Handle<Context> context = Handle<Context>::cast(holder); - if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) { - context->set(index, *value); - } + DCHECK((attributes & READ_ONLY) != 0); + if (context->get(index)->IsTheHole()) context->set(index, *value); return *value; } - // The property could not be found, we introduce it as a property of the - // global object. - if (attributes == ABSENT) { - Handle<JSObject> global = Handle<JSObject>( - isolate->context()->global_object()); - // Strict mode not needed (const disallowed in strict mode). - RETURN_IF_EMPTY_HANDLE( - isolate, - JSReceiver::SetProperty(global, name, value, NONE, SLOPPY)); - return *value; - } + PropertyAttributes attr = + static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY); - // The property was present in some function's context extension object, - // as a property on the subject of a with, or as a property of the global - // object. - // - // In most situations, eval-introduced consts should still be present in - // the context extension object. However, because declaration and - // initialization are separate, the property might have been deleted - // before we reach the initialization point. - // - // Example: - // - // function f() { eval("delete x; const x;"); } - // - // In that case, the initialization behaves like a normal assignment. - Handle<JSObject> object = Handle<JSObject>::cast(holder); + // Strict mode handling not needed (legacy const is disallowed in strict + // mode). - if (*object == context->extension()) { - // This is the property that was introduced by the const declaration. - // Set it if it hasn't been set before. NOTE: We cannot use - // GetProperty() to get the current value as it 'unholes' the value. - LookupResult lookup(isolate); - object->LocalLookupRealNamedProperty(*name, &lookup); - ASSERT(lookup.IsFound()); // the property was declared - ASSERT(lookup.IsReadOnly()); // and it was declared as read-only - - if (lookup.IsField()) { - FixedArray* properties = object->properties(); - int index = lookup.GetFieldIndex().field_index(); - if (properties->get(index)->IsTheHole()) { - properties->set(index, *value); - } - } else if (lookup.IsNormal()) { - if (object->GetNormalizedProperty(&lookup)->IsTheHole()) { - JSObject::SetNormalizedProperty(object, &lookup, value); - } - } else { - // We should not reach here. Any real, named property should be - // either a field or a dictionary slot. - UNREACHABLE(); - } + // The declared const was configurable, and may have been deleted in the + // meanwhile. If so, re-introduce the variable in the context extension. + DCHECK(context_arg->has_extension()); + if (attributes == ABSENT) { + holder = handle(context_arg->extension(), isolate); } else { - // The property was found on some other object. Set it if it is not a - // read-only property. - if ((attributes & READ_ONLY) == 0) { - // Strict mode not needed (const disallowed in strict mode). - RETURN_IF_EMPTY_HANDLE( - isolate, - JSReceiver::SetProperty(object, name, value, attributes, SLOPPY)); + // For JSContextExtensionObjects, the initializer can be run multiple times + // if in a for loop: for (var i = 0; i < 2; i++) { const x = i; }. Only the + // first assignment should go through. For JSGlobalObjects, additionally any + // code can run in between that modifies the declared property. + DCHECK(holder->IsJSGlobalObject() || holder->IsJSContextExtensionObject()); + + LookupIterator it(holder, name, LookupIterator::CHECK_HIDDEN); + Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it); + if (!maybe.has_value) return isolate->heap()->exception(); + PropertyAttributes old_attributes = maybe.value; + + // Ignore if we can't reconfigure the value. + if ((old_attributes & DONT_DELETE) != 0) { + if ((old_attributes & READ_ONLY) != 0 || + it.property_kind() == LookupIterator::ACCESSOR) { + return *value; + } + attr = static_cast<PropertyAttributes>(old_attributes | READ_ONLY); } } + RETURN_FAILURE_ON_EXCEPTION( + isolate, JSObject::SetOwnPropertyIgnoreAttributes( + Handle<JSObject>::cast(holder), name, value, attr)); + return *value; } -RUNTIME_FUNCTION(MaybeObject*, - Runtime_OptimizeObjectForAddingMultipleProperties) { +RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_SMI_ARG_CHECKED(properties, 1); + // Conservative upper limit to prevent fuzz tests from going OOM. + RUNTIME_ASSERT(properties <= 100000); if (object->HasFastProperties() && !object->IsJSGlobalProxy()) { JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties); } @@ -2600,9 +2476,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_RegExpExec) { +RUNTIME_FUNCTION(Runtime_RegExpExecRT) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0); CONVERT_ARG_HANDLE_CHECKED(String, subject, 1); // Due to the way the JS calls are constructed this must be less than the @@ -2612,54 +2488,38 @@ RUNTIME_ASSERT(index >= 0); RUNTIME_ASSERT(index <= subject->length()); isolate->counters()->regexp_entry_runtime()->Increment(); - Handle<Object> result = RegExpImpl::Exec(regexp, - subject, - index, - last_match_info); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + RegExpImpl::Exec(regexp, subject, index, last_match_info)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_RegExpConstructResult) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 3); - CONVERT_SMI_ARG_CHECKED(elements_count, 0); - if (elements_count < 0 || - elements_count > FixedArray::kMaxLength || - !Smi::IsValid(elements_count)) { - return isolate->ThrowIllegalOperation(); - } - Object* new_object; - { MaybeObject* maybe_new_object = - isolate->heap()->AllocateFixedArray(elements_count); - if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object; - } - FixedArray* elements = FixedArray::cast(new_object); - { MaybeObject* maybe_new_object = isolate->heap()->AllocateRaw( - JSRegExpResult::kSize, NEW_SPACE, OLD_POINTER_SPACE); - if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object; - } - { - DisallowHeapAllocation no_gc; - HandleScope scope(isolate); - reinterpret_cast<HeapObject*>(new_object)-> - set_map(isolate->native_context()->regexp_result_map()); - } - JSArray* array = JSArray::cast(new_object); - array->set_properties(isolate->heap()->empty_fixed_array()); - array->set_elements(elements); - array->set_length(Smi::FromInt(elements_count)); +RUNTIME_FUNCTION(Runtime_RegExpConstructResult) { + HandleScope handle_scope(isolate); + DCHECK(args.length() == 3); + CONVERT_SMI_ARG_CHECKED(size, 0); + RUNTIME_ASSERT(size >= 0 && size <= FixedArray::kMaxLength); + CONVERT_ARG_HANDLE_CHECKED(Object, index, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, input, 2); + Handle<FixedArray> elements = isolate->factory()->NewFixedArray(size); + Handle<Map> regexp_map(isolate->native_context()->regexp_result_map()); + Handle<JSObject> object = + isolate->factory()->NewJSObjectFromMap(regexp_map, NOT_TENURED, false); + Handle<JSArray> array = Handle<JSArray>::cast(object); + array->set_elements(*elements); + array->set_length(Smi::FromInt(size)); // Write in-object properties after the length of the array. - array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, args[1]); - array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, args[2]); - return array; + array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, *index); + array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, *input); + return *array; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) { +RUNTIME_FUNCTION(Runtime_RegExpInitializeObject) { HandleScope scope(isolate); - ASSERT(args.length() == 5); + DCHECK(args.length() == 5); CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0); CONVERT_ARG_HANDLE_CHECKED(String, source, 1); // If source is the empty string we set it to "(?:)" instead as @@ -2701,24 +2561,27 @@ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE); Handle<Object> zero(Smi::FromInt(0), isolate); Factory* factory = isolate->factory(); - CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( - regexp, factory->source_string(), source, final)); - CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( - regexp, factory->global_string(), global, final)); - CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( - regexp, factory->ignore_case_string(), ignoreCase, final)); - CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( - regexp, factory->multiline_string(), multiline, final)); - CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( - regexp, factory->last_index_string(), zero, writable)); + JSObject::SetOwnPropertyIgnoreAttributes( + regexp, factory->source_string(), source, final).Check(); + JSObject::SetOwnPropertyIgnoreAttributes( + regexp, factory->global_string(), global, final).Check(); + JSObject::SetOwnPropertyIgnoreAttributes( + regexp, factory->ignore_case_string(), ignoreCase, final).Check(); + JSObject::SetOwnPropertyIgnoreAttributes( + regexp, factory->multiline_string(), multiline, final).Check(); + JSObject::SetOwnPropertyIgnoreAttributes( + regexp, factory->last_index_string(), zero, writable).Check(); return *regexp; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) { +RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0); + Object* length = prototype->length(); + RUNTIME_ASSERT(length->IsSmi() && Smi::cast(length)->value() == 0); + RUNTIME_ASSERT(prototype->HasFastSmiOrObjectElements()); // This is necessary to enable fast checks for absence of elements // on Array.prototype and below. prototype->set_elements(isolate->heap()->empty_fixed_array()); @@ -2726,28 +2589,24 @@ } -static Handle<JSFunction> InstallBuiltin(Isolate* isolate, - Handle<JSObject> holder, - const char* name, - Builtins::Name builtin_name) { +static void InstallBuiltin(Isolate* isolate, + Handle<JSObject> holder, + const char* name, + Builtins::Name builtin_name) { Handle<String> key = isolate->factory()->InternalizeUtf8String(name); Handle<Code> code(isolate->builtins()->builtin(builtin_name)); Handle<JSFunction> optimized = - isolate->factory()->NewFunction(key, - JS_OBJECT_TYPE, - JSObject::kHeaderSize, - code, - false); + isolate->factory()->NewFunctionWithoutPrototype(key, code); optimized->shared()->DontAdaptArguments(); - JSReceiver::SetProperty(holder, key, optimized, NONE, STRICT); - return optimized; + JSObject::AddProperty(holder, key, optimized, NONE); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) { +RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_HANDLE_CHECKED(JSObject, holder, 0); + DCHECK(args.length() == 0); + Handle<JSObject> holder = + isolate->factory()->NewJSObject(isolate->object_function()); InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop); InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush); @@ -2761,16 +2620,17 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsSloppyModeFunction) { +RUNTIME_FUNCTION(Runtime_IsSloppyModeFunction) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSReceiver, callable, 0); if (!callable->IsJSFunction()) { HandleScope scope(isolate); - bool threw = false; - Handle<Object> delegate = Execution::TryGetFunctionDelegate( - isolate, Handle<JSReceiver>(callable), &threw); - if (threw) return Failure::Exception(); + Handle<Object> delegate; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, delegate, + Execution::TryGetFunctionDelegate( + isolate, Handle<JSReceiver>(callable))); callable = JSFunction::cast(*delegate); } JSFunction* function = JSFunction::cast(callable); @@ -2779,17 +2639,18 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) { +RUNTIME_FUNCTION(Runtime_GetDefaultReceiver) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSReceiver, callable, 0); if (!callable->IsJSFunction()) { HandleScope scope(isolate); - bool threw = false; - Handle<Object> delegate = Execution::TryGetFunctionDelegate( - isolate, Handle<JSReceiver>(callable), &threw); - if (threw) return Failure::Exception(); + Handle<Object> delegate; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, delegate, + Execution::TryGetFunctionDelegate( + isolate, Handle<JSReceiver>(callable))); callable = JSFunction::cast(*delegate); } JSFunction* function = JSFunction::cast(callable); @@ -2801,19 +2662,17 @@ // Returns undefined for strict or native functions, or // the associated global receiver for "normal" functions. - Context* native_context = - function->context()->global_object()->native_context(); - return native_context->global_object()->global_receiver(); + return function->global_proxy(); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_MaterializeRegExpLiteral) { +RUNTIME_FUNCTION(Runtime_MaterializeRegExpLiteral) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0); - int index = args.smi_at(1); - Handle<String> pattern = args.at<String>(2); - Handle<String> flags = args.at<String>(3); + CONVERT_SMI_ARG_CHECKED(index, 1); + CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2); + CONVERT_ARG_HANDLE_CHECKED(String, flags, 3); // Get the RegExp function from the context in the literals array. // This is the RegExp function from the context in which the @@ -2824,31 +2683,27 @@ Handle<JSFunction>( JSFunction::NativeContextFromLiterals(*literals)->regexp_function()); // Compute the regular expression literal. - bool has_pending_exception; - Handle<Object> regexp = - RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags, - &has_pending_exception); - if (has_pending_exception) { - ASSERT(isolate->has_pending_exception()); - return Failure::Exception(); - } + Handle<Object> regexp; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, regexp, + RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags)); literals->set(index, *regexp); return *regexp; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) { +RUNTIME_FUNCTION(Runtime_FunctionGetName) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); return f->shared()->name(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) { +RUNTIME_FUNCTION(Runtime_FunctionSetName) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_CHECKED(JSFunction, f, 0); CONVERT_ARG_CHECKED(String, name, 1); @@ -2857,58 +2712,66 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) { +RUNTIME_FUNCTION(Runtime_FunctionNameShouldPrintAsAnonymous) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); return isolate->heap()->ToBoolean( f->shared()->name_should_print_as_anonymous()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) { +RUNTIME_FUNCTION(Runtime_FunctionMarkNameShouldPrintAsAnonymous) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); f->shared()->set_name_should_print_as_anonymous(true); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsGenerator) { +RUNTIME_FUNCTION(Runtime_FunctionIsGenerator) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); return isolate->heap()->ToBoolean(f->shared()->is_generator()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) { +RUNTIME_FUNCTION(Runtime_FunctionIsArrow) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(JSFunction, f, 0); + return isolate->heap()->ToBoolean(f->shared()->is_arrow()); +} + + +RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); - f->RemovePrototype(); + RUNTIME_ASSERT(f->RemovePrototype()); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScript) { +RUNTIME_FUNCTION(Runtime_FunctionGetScript) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, fun, 0); Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate); if (!script->IsScript()) return isolate->heap()->undefined_value(); - return *GetScriptWrapper(Handle<Script>::cast(script)); + return *Script::GetWrapper(Handle<Script>::cast(script)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) { +RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0); Handle<SharedFunctionInfo> shared(f->shared()); @@ -2916,9 +2779,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) { +RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, fun, 0); int pos = fun->shared()->start_position(); @@ -2926,9 +2789,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) { +RUNTIME_FUNCTION(Runtime_FunctionGetPositionForOffset) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_CHECKED(Code, code, 0); CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]); @@ -2940,9 +2803,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) { +RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_CHECKED(JSFunction, fun, 0); CONVERT_ARG_CHECKED(String, name, 1); @@ -2951,110 +2814,68 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) { +RUNTIME_FUNCTION(Runtime_FunctionSetLength) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_CHECKED(JSFunction, fun, 0); CONVERT_SMI_ARG_CHECKED(length, 1); + RUNTIME_ASSERT((length & 0xC0000000) == 0xC0000000 || + (length & 0xC0000000) == 0x0); fun->shared()->set_length(length); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) { +RUNTIME_FUNCTION(Runtime_FunctionSetPrototype) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0); CONVERT_ARG_HANDLE_CHECKED(Object, value, 1); - ASSERT(fun->should_have_prototype()); + RUNTIME_ASSERT(fun->should_have_prototype()); Accessors::FunctionSetPrototype(fun, value); return args[0]; // return TOS } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) { - SealHandleScope shs(isolate); - RUNTIME_ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSFunction, function, 0); - - String* name = isolate->heap()->prototype_string(); - - if (function->HasFastProperties()) { - // Construct a new field descriptor with updated attributes. - DescriptorArray* instance_desc = function->map()->instance_descriptors(); - - int index = instance_desc->SearchWithCache(name, function->map()); - ASSERT(index != DescriptorArray::kNotFound); - PropertyDetails details = instance_desc->GetDetails(index); - - CallbacksDescriptor new_desc(name, - instance_desc->GetValue(index), - static_cast<PropertyAttributes>(details.attributes() | READ_ONLY)); - - // Create a new map featuring the new field descriptors array. - Map* new_map; - MaybeObject* maybe_map = - function->map()->CopyReplaceDescriptor( - instance_desc, &new_desc, index, OMIT_TRANSITION); - if (!maybe_map->To(&new_map)) return maybe_map; - - function->set_map(new_map); - } else { // Dictionary properties. - // Directly manipulate the property details. - int entry = function->property_dictionary()->FindEntry(name); - ASSERT(entry != NameDictionary::kNotFound); - PropertyDetails details = function->property_dictionary()->DetailsAt(entry); - PropertyDetails new_details( - static_cast<PropertyAttributes>(details.attributes() | READ_ONLY), - details.type(), - details.dictionary_index()); - function->property_dictionary()->DetailsAtPut(entry, new_details); - } - return function; -} - - -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) { +RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); return isolate->heap()->ToBoolean(f->shared()->IsApiFunction()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) { +RUNTIME_FUNCTION(Runtime_FunctionIsBuiltin) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); return isolate->heap()->ToBoolean(f->IsBuiltin()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) { +RUNTIME_FUNCTION(Runtime_SetCode) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0); - Handle<Object> code = args.at<Object>(1); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, source, 1); - if (code->IsNull()) return *target; - RUNTIME_ASSERT(code->IsJSFunction()); - Handle<JSFunction> source = Handle<JSFunction>::cast(code); Handle<SharedFunctionInfo> target_shared(target->shared()); Handle<SharedFunctionInfo> source_shared(source->shared()); + RUNTIME_ASSERT(!source_shared->bound()); if (!Compiler::EnsureCompiled(source, KEEP_EXCEPTION)) { - return Failure::Exception(); + return isolate->heap()->exception(); } // Mark both, the source and the target, as un-flushable because the // shared unoptimized code makes them impossible to enqueue in a list. - ASSERT(target_shared->code()->gc_metadata() == NULL); - ASSERT(source_shared->code()->gc_metadata() == NULL); + DCHECK(target_shared->code()->gc_metadata() == NULL); + DCHECK(source_shared->code()->gc_metadata() == NULL); target_shared->set_dont_flush(true); source_shared->set_dont_flush(true); @@ -3063,6 +2884,7 @@ target_shared->ReplaceCode(source_shared->code()); target_shared->set_scope_info(source_shared->scope_info()); target_shared->set_length(source_shared->length()); + target_shared->set_feedback_vector(source_shared->feedback_vector()); target_shared->set_formal_parameter_count( source_shared->formal_parameter_count()); target_shared->set_script(source_shared->script()); @@ -3072,10 +2894,11 @@ bool was_native = target_shared->native(); target_shared->set_compiler_hints(source_shared->compiler_hints()); target_shared->set_native(was_native); + target_shared->set_profiler_ticks(source_shared->profiler_ticks()); // Set the code of the target function. target->ReplaceCode(source_shared->code()); - ASSERT(target->next_function_link()->IsUndefined()); + DCHECK(target->next_function_link()->IsUndefined()); // Make sure we get a fresh copy of the literal vector to avoid cross // context contamination. @@ -3100,34 +2923,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) { - HandleScope scope(isolate); - ASSERT(args.length() == 2); - CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0); - CONVERT_SMI_ARG_CHECKED(num, 1); - RUNTIME_ASSERT(num >= 0); - // If objects constructed from this function exist then changing - // 'estimated_nof_properties' is dangerous since the previous value might - // have been compiled into the fast construct stub. Moreover, the inobject - // slack tracking logic might have adjusted the previous value, so even - // passing the same value is risky. - if (!func->shared()->live_objects_may_exist()) { - func->shared()->set_expected_nof_properties(num); - if (func->has_initial_map()) { - Handle<Map> new_initial_map = - func->GetIsolate()->factory()->CopyMap( - Handle<Map>(func->initial_map())); - new_initial_map->set_unused_property_fields(num); - func->set_initial_map(*new_initial_map); - } - } - return isolate->heap()->undefined_value(); -} - - -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateJSGeneratorObject) { +RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) { HandleScope scope(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); JavaScriptFrameIterator it(isolate); JavaScriptFrame* frame = it.frame(); @@ -3151,43 +2949,42 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_SuspendJSGeneratorObject) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0); +RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) { + HandleScope handle_scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0); JavaScriptFrameIterator stack_iterator(isolate); JavaScriptFrame* frame = stack_iterator.frame(); RUNTIME_ASSERT(frame->function()->shared()->is_generator()); - ASSERT_EQ(frame->function(), generator_object->function()); + DCHECK_EQ(frame->function(), generator_object->function()); // The caller should have saved the context and continuation already. - ASSERT_EQ(generator_object->context(), Context::cast(frame->context())); - ASSERT_LT(0, generator_object->continuation()); + DCHECK_EQ(generator_object->context(), Context::cast(frame->context())); + DCHECK_LT(0, generator_object->continuation()); // We expect there to be at least two values on the operand stack: the return // value of the yield expression, and the argument to this runtime call. // Neither of those should be saved. int operands_count = frame->ComputeOperandsCount(); - ASSERT_GE(operands_count, 2); + DCHECK_GE(operands_count, 2); operands_count -= 2; if (operands_count == 0) { // Although it's semantically harmless to call this function with an // operands_count of zero, it is also unnecessary. - ASSERT_EQ(generator_object->operand_stack(), + DCHECK_EQ(generator_object->operand_stack(), isolate->heap()->empty_fixed_array()); - ASSERT_EQ(generator_object->stack_handler_index(), -1); + DCHECK_EQ(generator_object->stack_handler_index(), -1); // If there are no operands on the stack, there shouldn't be a handler // active either. - ASSERT(!frame->HasHandler()); + DCHECK(!frame->HasHandler()); } else { int stack_handler_index = -1; - MaybeObject* alloc = isolate->heap()->AllocateFixedArray(operands_count); - FixedArray* operand_stack; - if (!alloc->To(&operand_stack)) return alloc; - frame->SaveOperandStack(operand_stack, &stack_handler_index); - generator_object->set_operand_stack(operand_stack); + Handle<FixedArray> operand_stack = + isolate->factory()->NewFixedArray(operands_count); + frame->SaveOperandStack(*operand_stack, &stack_handler_index); + generator_object->set_operand_stack(*operand_stack); generator_object->set_stack_handler_index(stack_handler_index); } @@ -3202,24 +2999,24 @@ // inlined into GeneratorNext and GeneratorThrow. EmitGeneratorResumeResume is // called in any case, as it needs to reconstruct the stack frame and make space // for arguments and operands. -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ResumeJSGeneratorObject) { +RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) { SealHandleScope shs(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0); CONVERT_ARG_CHECKED(Object, value, 1); CONVERT_SMI_ARG_CHECKED(resume_mode_int, 2); JavaScriptFrameIterator stack_iterator(isolate); JavaScriptFrame* frame = stack_iterator.frame(); - ASSERT_EQ(frame->function(), generator_object->function()); - ASSERT(frame->function()->is_compiled()); + DCHECK_EQ(frame->function(), generator_object->function()); + DCHECK(frame->function()->is_compiled()); STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0); STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0); Address pc = generator_object->function()->code()->instruction_start(); int offset = generator_object->continuation(); - ASSERT(offset > 0); + DCHECK(offset > 0); frame->set_pc(pc + offset); if (FLAG_enable_ool_constant_pool) { frame->set_constant_pool( @@ -3250,9 +3047,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowGeneratorStateError) { +RUNTIME_FUNCTION(Runtime_ThrowGeneratorStateError) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0); int continuation = generator->continuation(); const char* message = continuation == JSGeneratorObject::kGeneratorClosed ? @@ -3263,41 +3060,33 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectFreeze) { +RUNTIME_FUNCTION(Runtime_ObjectFreeze) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); - Handle<Object> result = JSObject::Freeze(object); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; -} + // %ObjectFreeze is a fast path and these cases are handled elsewhere. + RUNTIME_ASSERT(!object->HasSloppyArgumentsElements() && + !object->map()->is_observed() && + !object->IsJSProxy()); -MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate, - Object* char_code) { - if (char_code->IsNumber()) { - return isolate->heap()->LookupSingleCharacterStringFromCode( - NumberToUint32(char_code) & 0xffff); - } - return isolate->heap()->empty_string(); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, JSObject::Freeze(object)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StringCharCodeAt) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) { + HandleScope handle_scope(isolate); + DCHECK(args.length() == 2); - CONVERT_ARG_CHECKED(String, subject, 0); + CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]); // Flatten the string. If someone wants to get a char at an index // in a cons string, it is likely that more indices will be // accessed. - Object* flat; - { MaybeObject* maybe_flat = subject->TryFlatten(); - if (!maybe_flat->ToObject(&flat)) return maybe_flat; - } - subject = String::cast(flat); + subject = String::Flatten(subject); if (i >= static_cast<uint32_t>(subject->length())) { return isolate->heap()->nan_value(); @@ -3307,10 +3096,15 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - return CharFromCode(isolate, args[0]); +RUNTIME_FUNCTION(Runtime_CharFromCode) { + HandleScope handlescope(isolate); + DCHECK(args.length() == 1); + if (args[0]->IsNumber()) { + CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]); + code &= 0xffff; + return *isolate->factory()->LookupSingleCharacterStringFromCode(code); + } + return isolate->heap()->empty_string(); } @@ -3322,7 +3116,7 @@ has_non_smi_elements_(false) { // Require a non-zero initial size. Ensures that doubling the size to // extend the array will work. - ASSERT(initial_capacity > 0); + DCHECK(initial_capacity > 0); } explicit FixedArrayBuilder(Handle<FixedArray> backing_store) @@ -3331,7 +3125,7 @@ has_non_smi_elements_(false) { // Require a non-zero initial size. Ensures that doubling the size to // extend the array will work. - ASSERT(backing_store->length() > 0); + DCHECK(backing_store->length() > 0); } bool HasCapacity(int elements) { @@ -3356,16 +3150,16 @@ } void Add(Object* value) { - ASSERT(!value->IsSmi()); - ASSERT(length_ < capacity()); + DCHECK(!value->IsSmi()); + DCHECK(length_ < capacity()); array_->set(length_, value); length_++; has_non_smi_elements_ = true; } void Add(Smi* value) { - ASSERT(value->IsSmi()); - ASSERT(length_ < capacity()); + DCHECK(value->IsSmi()); + DCHECK(length_ < capacity()); array_->set(length_, value); length_++; } @@ -3426,15 +3220,15 @@ is_ascii_(subject->IsOneByteRepresentation()) { // Require a non-zero initial size. Ensures that doubling the size to // extend the array will work. - ASSERT(estimated_part_count > 0); + DCHECK(estimated_part_count > 0); } static inline void AddSubjectSlice(FixedArrayBuilder* builder, int from, int to) { - ASSERT(from >= 0); + DCHECK(from >= 0); int length = to - from; - ASSERT(length > 0); + DCHECK(length > 0); if (StringBuilderSubstringLength::is_valid(length) && StringBuilderSubstringPosition::is_valid(from)) { int encoded_slice = StringBuilderSubstringLength::encode(length) | @@ -3461,7 +3255,7 @@ void AddString(Handle<String> string) { int length = string->length(); - ASSERT(length > 0); + DCHECK(length > 0); AddElement(*string); if (!string->IsOneByteRepresentation()) { is_ascii_ = false; @@ -3470,15 +3264,20 @@ } - Handle<String> ToString() { + MaybeHandle<String> ToString() { + Isolate* isolate = heap_->isolate(); if (array_builder_.length() == 0) { - return heap_->isolate()->factory()->empty_string(); + return isolate->factory()->empty_string(); } Handle<String> joined_string; if (is_ascii_) { - Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_); - RETURN_IF_EMPTY_HANDLE_VALUE(heap_->isolate(), seq, Handle<String>()); + Handle<SeqOneByteString> seq; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, seq, + isolate->factory()->NewRawOneByteString(character_count_), + String); + DisallowHeapAllocation no_gc; uint8_t* char_buffer = seq->GetChars(); StringBuilderConcatHelper(*subject_, @@ -3488,8 +3287,12 @@ joined_string = Handle<String>::cast(seq); } else { // Non-ASCII. - Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_); - RETURN_IF_EMPTY_HANDLE_VALUE(heap_->isolate(), seq, Handle<String>()); + Handle<SeqTwoByteString> seq; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, seq, + isolate->factory()->NewRawTwoByteString(character_count_), + String); + DisallowHeapAllocation no_gc; uc16* char_buffer = seq->GetChars(); StringBuilderConcatHelper(*subject_, @@ -3512,19 +3315,9 @@ } private: - Handle<SeqOneByteString> NewRawOneByteString(int length) { - return heap_->isolate()->factory()->NewRawOneByteString(length); - } - - - Handle<SeqTwoByteString> NewRawTwoByteString(int length) { - return heap_->isolate()->factory()->NewRawTwoByteString(length); - } - - void AddElement(Object* element) { - ASSERT(element->IsSmi() || element->IsString()); - ASSERT(array_builder_.capacity() > array_builder_.length()); + DCHECK(element->IsSmi() || element->IsString()); + DCHECK(array_builder_.capacity() > array_builder_.length()); array_builder_.Add(element); } @@ -3587,8 +3380,8 @@ return ReplacementPart(REPLACEMENT_STRING, 0); } static inline ReplacementPart ReplacementSubString(int from, int to) { - ASSERT(from >= 0); - ASSERT(to > from); + DCHECK(from >= 0); + DCHECK(to > from); return ReplacementPart(-from, to); } @@ -3597,7 +3390,7 @@ ReplacementPart(int tag, int data) : tag(tag), data(data) { // Must be non-positive or a PartType value. - ASSERT(tag < NUMBER_OF_PART_TYPES); + DCHECK(tag < NUMBER_OF_PART_TYPES); } // Either a value of PartType or a non-positive number that is // the negation of an index into the replacement string. @@ -3700,7 +3493,7 @@ if (i > last) { parts->Add(ReplacementPart::ReplacementSubString(last, i), zone); } - ASSERT(capture_ref <= capture_count); + DCHECK(capture_ref <= capture_count); parts->Add(ReplacementPart::SubjectCapture(capture_ref), zone); last = next_index + 1; } @@ -3736,7 +3529,7 @@ { DisallowHeapAllocation no_gc; String::FlatContent content = replacement->GetFlatContent(); - ASSERT(content.IsFlat()); + DCHECK(content.IsFlat()); bool simple = false; if (content.IsAscii()) { simple = ParseReplacementPattern(&parts_, @@ -3745,7 +3538,7 @@ subject_length, zone()); } else { - ASSERT(content.IsTwoByte()); + DCHECK(content.IsTwoByte()); simple = ParseReplacementPattern(&parts_, content.ToUC16Vector(), capture_count, @@ -3782,7 +3575,7 @@ int match_from, int match_to, int32_t* match) { - ASSERT_LT(0, parts_.length()); + DCHECK_LT(0, parts_.length()); for (int i = 0, n = parts_.length(); i < n; i++) { ReplacementPart part = parts_[i]; switch (part.tag) { @@ -3821,7 +3614,7 @@ ZoneList<int>* indices, unsigned int limit, Zone* zone) { - ASSERT(limit > 0); + DCHECK(limit > 0); // Collect indices of pattern in subject using memchr. // Stop after finding at most limit values. const uint8_t* subject_start = subject.start(); @@ -3843,7 +3636,7 @@ ZoneList<int>* indices, unsigned int limit, Zone* zone) { - ASSERT(limit > 0); + DCHECK(limit > 0); const uc16* subject_start = subject.start(); const uc16* subject_end = subject_start + subject.length(); for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) { @@ -3862,7 +3655,7 @@ ZoneList<int>* indices, unsigned int limit, Zone* zone) { - ASSERT(limit > 0); + DCHECK(limit > 0); // Collect indices of pattern in subject. // Stop after finding at most limit values. int pattern_length = pattern.length(); @@ -3888,8 +3681,8 @@ DisallowHeapAllocation no_gc; String::FlatContent subject_content = subject->GetFlatContent(); String::FlatContent pattern_content = pattern->GetFlatContent(); - ASSERT(subject_content.IsFlat()); - ASSERT(pattern_content.IsFlat()); + DCHECK(subject_content.IsFlat()); + DCHECK(pattern_content.IsFlat()); if (subject_content.IsAscii()) { Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector(); if (pattern_content.IsAscii()) { @@ -3959,18 +3752,18 @@ template<typename ResultSeqString> -MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString( +MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString( Isolate* isolate, Handle<String> subject, Handle<JSRegExp> pattern_regexp, Handle<String> replacement, Handle<JSArray> last_match_info) { - ASSERT(subject->IsFlat()); - ASSERT(replacement->IsFlat()); + DCHECK(subject->IsFlat()); + DCHECK(replacement->IsFlat()); ZoneScope zone_scope(isolate->runtime_zone()); ZoneList<int> indices(8, zone_scope.zone()); - ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag()); + DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag()); String* pattern = String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex)); int subject_len = subject->length(); @@ -4000,14 +3793,15 @@ int subject_pos = 0; int result_pos = 0; - Handle<String> result_seq; + MaybeHandle<SeqString> maybe_res; if (ResultSeqString::kHasAsciiEncoding) { - result_seq = isolate->factory()->NewRawOneByteString(result_len); + maybe_res = isolate->factory()->NewRawOneByteString(result_len); } else { - result_seq = isolate->factory()->NewRawTwoByteString(result_len); + maybe_res = isolate->factory()->NewRawTwoByteString(result_len); } - RETURN_IF_EMPTY_HANDLE(isolate, result_seq); - Handle<ResultSeqString> result = Handle<ResultSeqString>::cast(result_seq); + Handle<SeqString> untyped_res; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, untyped_res, maybe_res); + Handle<ResultSeqString> result = Handle<ResultSeqString>::cast(untyped_res); for (int i = 0; i < matches; i++) { // Copy non-matched subject content. @@ -4046,14 +3840,14 @@ } -MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString( +MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString( Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp, Handle<String> replacement, Handle<JSArray> last_match_info) { - ASSERT(subject->IsFlat()); - ASSERT(replacement->IsFlat()); + DCHECK(subject->IsFlat()); + DCHECK(replacement->IsFlat()); int capture_count = regexp->CaptureCount(); int subject_length = subject->length(); @@ -4078,11 +3872,11 @@ } RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate); - if (global_cache.HasException()) return Failure::Exception(); + if (global_cache.HasException()) return isolate->heap()->exception(); int32_t* current_match = global_cache.FetchNext(); if (current_match == NULL) { - if (global_cache.HasException()) return Failure::Exception(); + if (global_cache.HasException()) return isolate->heap()->exception(); return *subject; } @@ -4124,7 +3918,7 @@ current_match = global_cache.FetchNext(); } while (current_match != NULL); - if (global_cache.HasException()) return Failure::Exception(); + if (global_cache.HasException()) return isolate->heap()->exception(); if (prev < subject_length) { builder.EnsureCapacity(2); @@ -4136,19 +3930,19 @@ capture_count, global_cache.LastSuccessfulMatch()); - Handle<String> result = builder.ToString(); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<String> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, builder.ToString()); return *result; } template <typename ResultSeqString> -MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString( +MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString( Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp, Handle<JSArray> last_match_info) { - ASSERT(subject->IsFlat()); + DCHECK(subject->IsFlat()); // Shortcut for simple non-regexp global replacements if (regexp->TypeTag() == JSRegExp::ATOM) { @@ -4163,11 +3957,11 @@ } RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate); - if (global_cache.HasException()) return Failure::Exception(); + if (global_cache.HasException()) return isolate->heap()->exception(); int32_t* current_match = global_cache.FetchNext(); if (current_match == NULL) { - if (global_cache.HasException()) return Failure::Exception(); + if (global_cache.HasException()) return isolate->heap()->exception(); return *subject; } @@ -4182,12 +3976,11 @@ Handle<ResultSeqString> answer; if (ResultSeqString::kHasAsciiEncoding) { answer = Handle<ResultSeqString>::cast( - isolate->factory()->NewRawOneByteString(new_length)); + isolate->factory()->NewRawOneByteString(new_length).ToHandleChecked()); } else { answer = Handle<ResultSeqString>::cast( - isolate->factory()->NewRawTwoByteString(new_length)); + isolate->factory()->NewRawTwoByteString(new_length).ToHandleChecked()); } - ASSERT(!answer.is_null()); int prev = 0; int position = 0; @@ -4205,7 +3998,7 @@ current_match = global_cache.FetchNext(); } while (current_match != NULL); - if (global_cache.HasException()) return Failure::Exception(); + if (global_cache.HasException()) return isolate->heap()->exception(); RegExpImpl::SetLastMatchInfo(last_match_info, subject, @@ -4231,24 +4024,30 @@ Address end_of_string = answer->address() + string_size; Heap* heap = isolate->heap(); + + // The trimming is performed on a newly allocated object, which is on a + // fresly allocated page or on an already swept page. Hence, the sweeper + // thread can not get confused with the filler creation. No synchronization + // needed. heap->CreateFillerObjectAt(end_of_string, delta); heap->AdjustLiveBytes(answer->address(), -delta, Heap::FROM_MUTATOR); return *answer; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceGlobalRegExpWithString) { +RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2); CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1); CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3); - ASSERT(regexp->GetFlags().is_global()); + RUNTIME_ASSERT(regexp->GetFlags().is_global()); + RUNTIME_ASSERT(last_match_info->HasFastObjectElements()); - if (!subject->IsFlat()) subject = FlattenGetString(subject); + subject = String::Flatten(subject); if (replacement->length() == 0) { if (subject->HasOnlyOneByteChars()) { @@ -4260,42 +4059,44 @@ } } - if (!replacement->IsFlat()) replacement = FlattenGetString(replacement); + replacement = String::Flatten(replacement); return StringReplaceGlobalRegExpWithString( isolate, subject, regexp, replacement, last_match_info); } -Handle<String> StringReplaceOneCharWithString(Isolate* isolate, - Handle<String> subject, - Handle<String> search, - Handle<String> replace, - bool* found, - int recursion_limit) { - if (recursion_limit == 0) return Handle<String>::null(); +// This may return an empty MaybeHandle if an exception is thrown or +// we abort due to reaching the recursion limit. +MaybeHandle<String> StringReplaceOneCharWithString(Isolate* isolate, + Handle<String> subject, + Handle<String> search, + Handle<String> replace, + bool* found, + int recursion_limit) { + StackLimitCheck stackLimitCheck(isolate); + if (stackLimitCheck.HasOverflowed() || (recursion_limit == 0)) { + return MaybeHandle<String>(); + } + recursion_limit--; if (subject->IsConsString()) { ConsString* cons = ConsString::cast(*subject); Handle<String> first = Handle<String>(cons->first()); Handle<String> second = Handle<String>(cons->second()); - Handle<String> new_first = - StringReplaceOneCharWithString(isolate, - first, - search, - replace, - found, - recursion_limit - 1); - if (new_first.is_null()) return new_first; + Handle<String> new_first; + if (!StringReplaceOneCharWithString( + isolate, first, search, replace, found, recursion_limit) + .ToHandle(&new_first)) { + return MaybeHandle<String>(); + } if (*found) return isolate->factory()->NewConsString(new_first, second); - Handle<String> new_second = - StringReplaceOneCharWithString(isolate, - second, - search, - replace, - found, - recursion_limit - 1); - if (new_second.is_null()) return new_second; + Handle<String> new_second; + if (!StringReplaceOneCharWithString( + isolate, second, search, replace, found, recursion_limit) + .ToHandle(&new_second)) { + return MaybeHandle<String>(); + } if (*found) return isolate->factory()->NewConsString(first, new_second); return subject; @@ -4304,8 +4105,11 @@ if (index == -1) return subject; *found = true; Handle<String> first = isolate->factory()->NewSubString(subject, 0, index); - Handle<String> cons1 = isolate->factory()->NewConsString(first, replace); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, cons1, Handle<String>()); + Handle<String> cons1; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, cons1, + isolate->factory()->NewConsString(first, replace), + String); Handle<String> second = isolate->factory()->NewSubString(subject, index + 1, subject->length()); return isolate->factory()->NewConsString(cons1, second); @@ -4313,9 +4117,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) { +RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); CONVERT_ARG_HANDLE_CHECKED(String, search, 1); CONVERT_ARG_HANDLE_CHECKED(String, replace, 2); @@ -4324,20 +4128,20 @@ // retry with a flattened subject string. const int kRecursionLimit = 0x1000; bool found = false; - Handle<String> result = StringReplaceOneCharWithString(isolate, - subject, - search, - replace, - &found, - kRecursionLimit); - if (!result.is_null()) return *result; - if (isolate->has_pending_exception()) return Failure::Exception(); - return *StringReplaceOneCharWithString(isolate, - FlattenGetString(subject), - search, - replace, - &found, - kRecursionLimit); + Handle<String> result; + if (StringReplaceOneCharWithString( + isolate, subject, search, replace, &found, kRecursionLimit) + .ToHandle(&result)) { + return *result; + } + if (isolate->has_pending_exception()) return isolate->heap()->exception(); + + subject = String::Flatten(subject); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + StringReplaceOneCharWithString( + isolate, subject, search, replace, &found, kRecursionLimit)); + return *result; } @@ -4348,8 +4152,8 @@ Handle<String> sub, Handle<String> pat, int start_index) { - ASSERT(0 <= start_index); - ASSERT(start_index <= sub->length()); + DCHECK(0 <= start_index); + DCHECK(start_index <= sub->length()); int pattern_length = pat->length(); if (pattern_length == 0) return start_index; @@ -4357,8 +4161,8 @@ int subject_length = sub->length(); if (start_index + pattern_length > subject_length) return -1; - if (!sub->IsFlat()) FlattenString(sub); - if (!pat->IsFlat()) FlattenString(pat); + sub = String::Flatten(sub); + pat = String::Flatten(pat); DisallowHeapAllocation no_gc; // ensure vectors stay valid // Extract flattened substrings of cons strings before determining asciiness. @@ -4393,20 +4197,19 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringIndexOf) { +RUNTIME_FUNCTION(Runtime_StringIndexOf) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, sub, 0); CONVERT_ARG_HANDLE_CHECKED(String, pat, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, index, 2); - Object* index = args[2]; uint32_t start_index; if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1); RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length())); - int position = - Runtime::StringMatch(isolate, sub, pat, start_index); + int position = Runtime::StringMatch(isolate, sub, pat, start_index); return Smi::FromInt(position); } @@ -4416,8 +4219,8 @@ Vector<const pchar> pattern, int idx) { int pattern_length = pattern.length(); - ASSERT(pattern_length >= 1); - ASSERT(idx + pattern_length <= subject.length()); + DCHECK(pattern_length >= 1); + DCHECK(idx + pattern_length <= subject.length()); if (sizeof(schar) == 1 && sizeof(pchar) > 1) { for (int i = 0; i < pattern_length; i++) { @@ -4446,14 +4249,14 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) { +RUNTIME_FUNCTION(Runtime_StringLastIndexOf) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, sub, 0); CONVERT_ARG_HANDLE_CHECKED(String, pat, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, index, 2); - Object* index = args[2]; uint32_t start_index; if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1); @@ -4468,8 +4271,8 @@ return Smi::FromInt(start_index); } - if (!sub->IsFlat()) FlattenString(sub); - if (!pat->IsFlat()) FlattenString(pat); + sub = String::Flatten(sub); + pat = String::Flatten(pat); int position = -1; DisallowHeapAllocation no_gc; // ensure vectors stay valid @@ -4505,14 +4308,14 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_StringLocaleCompare) { + HandleScope handle_scope(isolate); + DCHECK(args.length() == 2); - CONVERT_ARG_CHECKED(String, str1, 0); - CONVERT_ARG_CHECKED(String, str2, 1); + CONVERT_ARG_HANDLE_CHECKED(String, str1, 0); + CONVERT_ARG_HANDLE_CHECKED(String, str2, 1); - if (str1 == str2) return Smi::FromInt(0); // Equal. + if (str1.is_identical_to(str2)) return Smi::FromInt(0); // Equal. int str1_length = str1->length(); int str2_length = str2->length(); @@ -4532,30 +4335,26 @@ int d = str1->Get(0) - str2->Get(0); if (d != 0) return Smi::FromInt(d); - str1->TryFlatten(); - str2->TryFlatten(); + str1 = String::Flatten(str1); + str2 = String::Flatten(str2); - ConsStringIteratorOp* op1 = - isolate->runtime_state()->string_locale_compare_it1(); - ConsStringIteratorOp* op2 = - isolate->runtime_state()->string_locale_compare_it2(); - // TODO(dcarney) Can do array compares here more efficiently. - StringCharacterStream stream1(str1, op1); - StringCharacterStream stream2(str2, op2); + DisallowHeapAllocation no_gc; + String::FlatContent flat1 = str1->GetFlatContent(); + String::FlatContent flat2 = str2->GetFlatContent(); for (int i = 0; i < end; i++) { - uint16_t char1 = stream1.GetNext(); - uint16_t char2 = stream2.GetNext(); - if (char1 != char2) return Smi::FromInt(char1 - char2); + if (flat1.Get(i) != flat2.Get(i)) { + return Smi::FromInt(flat1.Get(i) - flat2.Get(i)); + } } return Smi::FromInt(str1_length - str2_length); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_SubString) { +RUNTIME_FUNCTION(Runtime_SubString) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, string, 0); int start, end; @@ -4581,16 +4380,26 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) { +RUNTIME_FUNCTION(Runtime_InternalizeString) { + HandleScope handles(isolate); + RUNTIME_ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(String, string, 0); + return *isolate->factory()->InternalizeString(string); +} + + +RUNTIME_FUNCTION(Runtime_StringMatch) { HandleScope handles(isolate); - ASSERT_EQ(3, args.length()); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1); CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2); + RUNTIME_ASSERT(regexp_info->HasFastObjectElements()); + RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate); - if (global_cache.HasException()) return Failure::Exception(); + if (global_cache.HasException()) return isolate->heap()->exception(); int capture_count = regexp->CaptureCount(); @@ -4604,7 +4413,7 @@ offsets.Add(match[1], zone_scope.zone()); // end } - if (global_cache.HasException()) return Failure::Exception(); + if (global_cache.HasException()) return isolate->heap()->exception(); if (offsets.length() == 0) { // Not a single match. @@ -4638,14 +4447,14 @@ // Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain // separate last match info. See comment on that function. template<bool has_capture> -static MaybeObject* SearchRegExpMultiple( +static Object* SearchRegExpMultiple( Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp, Handle<JSArray> last_match_array, Handle<JSArray> result_array) { - ASSERT(subject->IsFlat()); - ASSERT_NE(has_capture, regexp->CaptureCount() == 0); + DCHECK(subject->IsFlat()); + DCHECK_NE(has_capture, regexp->CaptureCount() == 0); int capture_count = regexp->CaptureCount(); int subject_length = subject->length(); @@ -4676,14 +4485,13 @@ } RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate); - if (global_cache.HasException()) return Failure::Exception(); + if (global_cache.HasException()) return isolate->heap()->exception(); - Handle<FixedArray> result_elements; - if (result_array->HasFastObjectElements()) { - result_elements = - Handle<FixedArray>(FixedArray::cast(result_array->elements())); - } - if (result_elements.is_null() || result_elements->length() < 16) { + // Ensured in Runtime_RegExpExecMultiple. + DCHECK(result_array->HasFastObjectElements()); + Handle<FixedArray> result_elements( + FixedArray::cast(result_array->elements())); + if (result_elements->length() < 16) { result_elements = isolate->factory()->NewFixedArrayWithHoles(16); } @@ -4734,12 +4542,12 @@ int start = current_match[i * 2]; if (start >= 0) { int end = current_match[i * 2 + 1]; - ASSERT(start <= end); + DCHECK(start <= end); Handle<String> substring = isolate->factory()->NewSubString(subject, start, end); elements->set(i, *substring); } else { - ASSERT(current_match[i * 2 + 1] < 0); + DCHECK(current_match[i * 2 + 1] < 0); elements->set(i, isolate->heap()->undefined_value()); } } @@ -4752,7 +4560,7 @@ } } - if (global_cache.HasException()) return Failure::Exception(); + if (global_cache.HasException()) return isolate->heap()->exception(); if (match_start >= 0) { // Finished matching, with at least one match. @@ -4773,10 +4581,10 @@ fixed_array->set(fixed_array->length() - 1, Smi::FromInt(builder.length())); // Cache the result and turn the FixedArray into a COW array. - RegExpResultsCache::Enter(isolate->heap(), - *subject, - regexp->data(), - *fixed_array, + RegExpResultsCache::Enter(isolate, + subject, + handle(regexp->data(), isolate), + fixed_array, RegExpResultsCache::REGEXP_MULTIPLE_INDICES); } return *builder.ToJSArray(result_array); @@ -4789,17 +4597,19 @@ // This is only called for StringReplaceGlobalRegExpWithFunction. This sets // lastMatchInfoOverride to maintain the last match info, so we don't need to // set any other last match array info. -RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) { +RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) { HandleScope handles(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(String, subject, 1); - if (!subject->IsFlat()) FlattenString(subject); CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0); CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2); CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3); + RUNTIME_ASSERT(last_match_info->HasFastObjectElements()); + RUNTIME_ASSERT(result_array->HasFastObjectElements()); - ASSERT(regexp->GetFlags().is_global()); + subject = String::Flatten(subject); + RUNTIME_ASSERT(regexp->GetFlags().is_global()); if (regexp->CaptureCount() == 0) { return SearchRegExpMultiple<false>( @@ -4811,9 +4621,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberToRadixString) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_SMI_ARG_CHECKED(radix, 1); RUNTIME_ASSERT(2 <= radix && radix <= 36); @@ -4823,7 +4633,7 @@ if (value >= 0 && value < radix) { // Character array used for conversion. static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz"; - return isolate->heap()-> + return *isolate->factory()-> LookupSingleCharacterStringFromCode(kCharTable[value]); } } @@ -4831,80 +4641,76 @@ // Slow case. CONVERT_DOUBLE_ARG_CHECKED(value, 0); if (std::isnan(value)) { - return *isolate->factory()->nan_string(); + return isolate->heap()->nan_string(); } if (std::isinf(value)) { if (value < 0) { - return *isolate->factory()->minus_infinity_string(); + return isolate->heap()->minus_infinity_string(); } - return *isolate->factory()->infinity_string(); + return isolate->heap()->infinity_string(); } char* str = DoubleToRadixCString(value, radix); - MaybeObject* result = - isolate->heap()->AllocateStringFromOneByte(CStrVector(str)); + Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str); DeleteArray(str); - return result; + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberToFixed) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(value, 0); CONVERT_DOUBLE_ARG_CHECKED(f_number, 1); int f = FastD2IChecked(f_number); - RUNTIME_ASSERT(f >= 0); + // See DoubleToFixedCString for these constants: + RUNTIME_ASSERT(f >= 0 && f <= 20); + RUNTIME_ASSERT(!Double(value).IsSpecial()); char* str = DoubleToFixedCString(value, f); - MaybeObject* res = - isolate->heap()->AllocateStringFromOneByte(CStrVector(str)); + Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str); DeleteArray(str); - return res; + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberToExponential) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(value, 0); CONVERT_DOUBLE_ARG_CHECKED(f_number, 1); int f = FastD2IChecked(f_number); RUNTIME_ASSERT(f >= -1 && f <= 20); + RUNTIME_ASSERT(!Double(value).IsSpecial()); char* str = DoubleToExponentialCString(value, f); - MaybeObject* res = - isolate->heap()->AllocateStringFromOneByte(CStrVector(str)); + Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str); DeleteArray(str); - return res; + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberToPrecision) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(value, 0); CONVERT_DOUBLE_ARG_CHECKED(f_number, 1); int f = FastD2IChecked(f_number); RUNTIME_ASSERT(f >= 1 && f <= 21); + RUNTIME_ASSERT(!Double(value).IsSpecial()); char* str = DoubleToPrecisionCString(value, f); - MaybeObject* res = - isolate->heap()->AllocateStringFromOneByte(CStrVector(str)); + Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str); DeleteArray(str); - return res; + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsValidSmi) { - HandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_IsValidSmi) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]); - if (Smi::IsValid(number)) { - return isolate->heap()->true_value(); - } else { - return isolate->heap()->false_value(); - } + return isolate->heap()->ToBoolean(Smi::IsValid(number)); } @@ -4912,18 +4718,17 @@ // string->Get(index). static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) { if (index < static_cast<uint32_t>(string->length())) { - string->TryFlatten(); - return LookupSingleCharacterStringFromCode( - string->GetIsolate(), - string->Get(index)); + Factory* factory = string->GetIsolate()->factory(); + return factory->LookupSingleCharacterStringFromCode( + String::Flatten(string)->Get(index)); } return Execution::CharAt(string, index); } -Handle<Object> Runtime::GetElementOrCharAt(Isolate* isolate, - Handle<Object> object, - uint32_t index) { +MaybeHandle<Object> Runtime::GetElementOrCharAt(Isolate* isolate, + Handle<Object> object, + uint32_t index) { // Handle [] indexing on Strings if (object->IsString()) { Handle<Object> result = GetCharAt(Handle<String>::cast(object), index); @@ -4940,147 +4745,144 @@ Handle<Object> result; if (object->IsString() || object->IsNumber() || object->IsBoolean()) { - Handle<Object> proto(object->GetPrototype(isolate), isolate); - return Object::GetElement(isolate, proto, index); + PrototypeIterator iter(isolate, object); + return Object::GetElement(isolate, PrototypeIterator::GetCurrent(iter), + index); } else { return Object::GetElement(isolate, object, index); } } -static Handle<Name> ToName(Isolate* isolate, Handle<Object> key) { +MUST_USE_RESULT +static MaybeHandle<Name> ToName(Isolate* isolate, Handle<Object> key) { if (key->IsName()) { return Handle<Name>::cast(key); } else { - bool has_pending_exception = false; - Handle<Object> converted = - Execution::ToString(isolate, key, &has_pending_exception); - if (has_pending_exception) return Handle<Name>(); + Handle<Object> converted; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, converted, Execution::ToString(isolate, key), Name); return Handle<Name>::cast(converted); } } -MaybeObject* Runtime::HasObjectProperty(Isolate* isolate, - Handle<JSReceiver> object, - Handle<Object> key) { - HandleScope scope(isolate); - +MaybeHandle<Object> Runtime::HasObjectProperty(Isolate* isolate, + Handle<JSReceiver> object, + Handle<Object> key) { + Maybe<bool> maybe; // Check if the given key is an array index. uint32_t index; if (key->ToArrayIndex(&index)) { - return isolate->heap()->ToBoolean(JSReceiver::HasElement(object, index)); - } - - // Convert the key to a name - possibly by calling back into JavaScript. - Handle<Name> name = ToName(isolate, key); - RETURN_IF_EMPTY_HANDLE(isolate, name); + maybe = JSReceiver::HasElement(object, index); + } else { + // Convert the key to a name - possibly by calling back into JavaScript. + Handle<Name> name; + ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object); - return isolate->heap()->ToBoolean(JSReceiver::HasProperty(object, name)); -} + maybe = JSReceiver::HasProperty(object, name); + } -MaybeObject* Runtime::GetObjectPropertyOrFail( - Isolate* isolate, - Handle<Object> object, - Handle<Object> key) { - CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate, - GetObjectProperty(isolate, object, key)); + if (!maybe.has_value) return MaybeHandle<Object>(); + return isolate->factory()->ToBoolean(maybe.value); } -MaybeObject* Runtime::GetObjectProperty(Isolate* isolate, - Handle<Object> object, - Handle<Object> key) { - HandleScope scope(isolate); +MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate, + Handle<Object> object, + Handle<Object> key) { if (object->IsUndefined() || object->IsNull()) { Handle<Object> args[2] = { key, object }; - Handle<Object> error = + return isolate->Throw<Object>( isolate->factory()->NewTypeError("non_object_property_load", - HandleVector(args, 2)); - return isolate->Throw(*error); + HandleVector(args, 2))); } // Check if the given key is an array index. uint32_t index; if (key->ToArrayIndex(&index)) { - Handle<Object> result = GetElementOrCharAt(isolate, object, index); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; + return GetElementOrCharAt(isolate, object, index); } // Convert the key to a name - possibly by calling back into JavaScript. - Handle<Name> name = ToName(isolate, key); - RETURN_IF_EMPTY_HANDLE(isolate, name); + Handle<Name> name; + ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object); // Check if the name is trivially convertible to an index and get // the element if so. if (name->AsArrayIndex(&index)) { - Handle<Object> result = GetElementOrCharAt(isolate, object, index); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; + return GetElementOrCharAt(isolate, object, index); } else { - return object->GetProperty(*name); + return Object::GetProperty(object, name); } } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); - - Handle<Object> object = args.at<Object>(0); - Handle<Object> key = args.at<Object>(1); +RUNTIME_FUNCTION(Runtime_GetProperty) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); - return Runtime::GetObjectProperty(isolate, object, key); + CONVERT_ARG_HANDLE_CHECKED(Object, object, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Runtime::GetObjectProperty(isolate, object, key)); + return *result; } // KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric. -RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_KeyedGetProperty) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); + + CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1); // Fast cases for getting named properties of the receiver JSObject // itself. // - // The global proxy objects has to be excluded since LocalLookup on + // The global proxy objects has to be excluded since LookupOwn on // the global proxy object can return a valid result even though the // global proxy object never has properties. This is the case // because the global proxy object forwards everything to its hidden - // prototype including local lookups. + // prototype including own lookups. // // Additionally, we need to make sure that we do not cache results // for objects that require access checks. - if (args[0]->IsJSObject()) { - if (!args[0]->IsJSGlobalProxy() && - !args[0]->IsAccessCheckNeeded() && - args[1]->IsName()) { - JSObject* receiver = JSObject::cast(args[0]); - Name* key = Name::cast(args[1]); + if (receiver_obj->IsJSObject()) { + if (!receiver_obj->IsJSGlobalProxy() && + !receiver_obj->IsAccessCheckNeeded() && + key_obj->IsName()) { + DisallowHeapAllocation no_allocation; + Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj); + Handle<Name> key = Handle<Name>::cast(key_obj); if (receiver->HasFastProperties()) { // Attempt to use lookup cache. - Map* receiver_map = receiver->map(); + Handle<Map> receiver_map(receiver->map(), isolate); KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache(); - int offset = keyed_lookup_cache->Lookup(receiver_map, key); - if (offset != -1) { + int index = keyed_lookup_cache->Lookup(receiver_map, key); + if (index != -1) { // Doubles are not cached, so raw read the value. - Object* value = receiver->RawFastPropertyAt(offset); - return value->IsTheHole() - ? isolate->heap()->undefined_value() - : value; + return receiver->RawFastPropertyAt( + FieldIndex::ForKeyedLookupCacheIndex(*receiver_map, index)); } // Lookup cache miss. Perform lookup and update the cache if // appropriate. LookupResult result(isolate); - receiver->LocalLookup(key, &result); + receiver->LookupOwn(key, &result); if (result.IsField()) { - int offset = result.GetFieldIndex().field_index(); + FieldIndex field_index = result.GetFieldIndex(); // Do not track double fields in the keyed lookup cache. Reading // double values requires boxing. if (!result.representation().IsDouble()) { - keyed_lookup_cache->Update(receiver_map, key, offset); + keyed_lookup_cache->Update(receiver_map, key, + field_index.GetKeyedLookupCacheIndex()); } - return receiver->FastPropertyAt(result.representation(), offset); + AllowHeapAllocation allow_allocation; + return *JSObject::FastPropertyAt(receiver, result.representation(), + field_index); } } else { // Attempt dictionary lookup. @@ -5092,51 +4894,49 @@ if (!receiver->IsGlobalObject()) return value; value = PropertyCell::cast(value)->value(); if (!value->IsTheHole()) return value; - // If value is the hole do the general lookup. + // If value is the hole (meaning, absent) do the general lookup. } } - } else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) { + } else if (key_obj->IsSmi()) { // JSObject without a name key. If the key is a Smi, check for a // definite out-of-bounds access to elements, which is a strong indicator // that subsequent accesses will also call the runtime. Proactively // transition elements to FAST_*_ELEMENTS to avoid excessive boxing of // doubles for those future calls in the case that the elements would // become FAST_DOUBLE_ELEMENTS. - Handle<JSObject> js_object(args.at<JSObject>(0)); + Handle<JSObject> js_object = Handle<JSObject>::cast(receiver_obj); ElementsKind elements_kind = js_object->GetElementsKind(); if (IsFastDoubleElementsKind(elements_kind)) { - FixedArrayBase* elements = js_object->elements(); - if (args.at<Smi>(1)->value() >= elements->length()) { + Handle<Smi> key = Handle<Smi>::cast(key_obj); + if (key->value() >= js_object->elements()->length()) { if (IsFastHoleyElementsKind(elements_kind)) { elements_kind = FAST_HOLEY_ELEMENTS; } else { elements_kind = FAST_ELEMENTS; } - MaybeObject* maybe_object = TransitionElements(js_object, - elements_kind, - isolate); - if (maybe_object->IsFailure()) return maybe_object; + RETURN_FAILURE_ON_EXCEPTION( + isolate, TransitionElements(js_object, elements_kind, isolate)); } } else { - ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) || + DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) || !IsFastElementsKind(elements_kind)); } } - } else if (args[0]->IsString() && args[1]->IsSmi()) { + } else if (receiver_obj->IsString() && key_obj->IsSmi()) { // Fast case for string indexing using [] with a smi index. - HandleScope scope(isolate); - Handle<String> str = args.at<String>(0); + Handle<String> str = Handle<String>::cast(receiver_obj); int index = args.smi_at(1); if (index >= 0 && index < str->length()) { - Handle<Object> result = GetCharAt(str, index); - return *result; + return *GetCharAt(str, index); } } // Fall back to GetObjectProperty. - return Runtime::GetObjectProperty(isolate, - args.at<Object>(0), - args.at<Object>(1)); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Runtime::GetObjectProperty(isolate, receiver_obj, key_obj)); + return *result; } @@ -5145,15 +4945,46 @@ } +// Transform getter or setter into something DefineAccessor can handle. +static Handle<Object> InstantiateAccessorComponent(Isolate* isolate, + Handle<Object> component) { + if (component->IsUndefined()) return isolate->factory()->null_value(); + Handle<FunctionTemplateInfo> info = + Handle<FunctionTemplateInfo>::cast(component); + return Utils::OpenHandle(*Utils::ToLocal(info)->GetFunction()); +} + + +RUNTIME_FUNCTION(Runtime_DefineApiAccessorProperty) { + HandleScope scope(isolate); + DCHECK(args.length() == 5); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); + CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2); + CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3); + CONVERT_SMI_ARG_CHECKED(attribute, 4); + RUNTIME_ASSERT(getter->IsUndefined() || getter->IsFunctionTemplateInfo()); + RUNTIME_ASSERT(setter->IsUndefined() || setter->IsFunctionTemplateInfo()); + RUNTIME_ASSERT(PropertyDetails::AttributesField::is_valid( + static_cast<PropertyAttributes>(attribute))); + RETURN_FAILURE_ON_EXCEPTION( + isolate, JSObject::DefineAccessor( + object, name, InstantiateAccessorComponent(isolate, getter), + InstantiateAccessorComponent(isolate, setter), + static_cast<PropertyAttributes>(attribute))); + return isolate->heap()->undefined_value(); +} + + // Implements part of 8.12.9 DefineOwnProperty. // There are 3 cases that lead here: // Step 4b - define a new accessor property. // Steps 9c & 12 - replace an existing data property with an accessor property. // Step 12 - update an existing accessor property with an accessor or generic // descriptor. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) { +RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) { HandleScope scope(isolate); - ASSERT(args.length() == 5); + DCHECK(args.length() == 5); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); RUNTIME_ASSERT(!obj->IsNull()); CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); @@ -5166,9 +4997,9 @@ PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked); bool fast = obj->HasFastProperties(); - JSObject::DefineAccessor(obj, name, getter, setter, attr); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - if (fast) JSObject::TransformToFastProperties(obj, 0); + RETURN_FAILURE_ON_EXCEPTION( + isolate, JSObject::DefineAccessor(obj, name, getter, setter, attr)); + if (fast) JSObject::MigrateSlowToFast(obj, 0); return isolate->heap()->undefined_value(); } @@ -5179,9 +5010,9 @@ // Steps 9b & 12 - replace an existing accessor property with a data property. // Step 12 - update an existing data property with a data or generic // descriptor. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) { +RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0); CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); CONVERT_ARG_HANDLE_CHECKED(Object, obj_value, 2); @@ -5189,133 +5020,87 @@ RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked); - LookupResult lookup(isolate); - js_object->LocalLookupRealNamedProperty(*name, &lookup); - - // Special case for callback properties. - if (lookup.IsPropertyCallbacks()) { - Handle<Object> callback(lookup.GetCallbackObject(), isolate); - // To be compatible with Safari we do not change the value on API objects - // in Object.defineProperty(). Firefox disagrees here, and actually changes - // the value. - if (callback->IsAccessorInfo()) { - return isolate->heap()->undefined_value(); - } - // Avoid redefining foreign callback as data property, just use the stored - // setter to update the value instead. - // TODO(mstarzinger): So far this only works if property attributes don't - // change, this should be fixed once we cleanup the underlying code. - if (callback->IsForeign() && lookup.GetAttributes() == attr) { - Handle<Object> result_object = - JSObject::SetPropertyWithCallback(js_object, - callback, - name, - obj_value, - handle(lookup.holder()), - STRICT); - RETURN_IF_EMPTY_HANDLE(isolate, result_object); - return *result_object; - } + // Check access rights if needed. + if (js_object->IsAccessCheckNeeded() && + !isolate->MayNamedAccess(js_object, name, v8::ACCESS_SET)) { + return isolate->heap()->undefined_value(); } + LookupResult lookup(isolate); + js_object->LookupOwnRealNamedProperty(name, &lookup); + // Take special care when attributes are different and there is already // a property. For simplicity we normalize the property which enables us // to not worry about changing the instance_descriptor and creating a new - // map. The current version of SetObjectProperty does not handle attributes - // correctly in the case where a property is a field and is reset with - // new attributes. + // map. if (lookup.IsFound() && (attr != lookup.GetAttributes() || lookup.IsPropertyCallbacks())) { - // New attributes - normalize to avoid writing to instance descriptor - if (js_object->IsJSGlobalProxy()) { - // Since the result is a property, the prototype will exist so - // we don't have to check for null. - js_object = Handle<JSObject>(JSObject::cast(js_object->GetPrototype())); - } - JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0); // Use IgnoreAttributes version since a readonly property may be // overridden and SetProperty does not allow this. - Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes( - js_object, name, obj_value, attr); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::SetOwnPropertyIgnoreAttributes( + js_object, name, obj_value, attr, + JSReceiver::PERFORM_EXTENSIBILITY_CHECK, + JSReceiver::MAY_BE_STORE_FROM_KEYED, + JSObject::DONT_FORCE_FIELD)); return *result; } - Handle<Object> result = Runtime::ForceSetObjectProperty(isolate, js_object, - name, - obj_value, - attr); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Runtime::DefineObjectProperty( + js_object, name, obj_value, attr, + JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED)); return *result; } // Return property without being observable by accessors or interceptors. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_GetDataProperty) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_ARG_HANDLE_CHECKED(Name, key, 1); - LookupResult lookup(isolate); - object->LookupRealNamedProperty(*key, &lookup); - if (!lookup.IsFound()) return isolate->heap()->undefined_value(); - switch (lookup.type()) { - case NORMAL: - return lookup.holder()->GetNormalizedProperty(&lookup); - case FIELD: - return lookup.holder()->FastPropertyAt( - lookup.representation(), - lookup.GetFieldIndex().field_index()); - case CONSTANT: - return lookup.GetConstant(); - case CALLBACKS: - case HANDLER: - case INTERCEPTOR: - case TRANSITION: - return isolate->heap()->undefined_value(); - case NONEXISTENT: - UNREACHABLE(); - } - return isolate->heap()->undefined_value(); + return *JSObject::GetDataProperty(object, key); } -Handle<Object> Runtime::SetObjectProperty(Isolate* isolate, - Handle<Object> object, - Handle<Object> key, - Handle<Object> value, - PropertyAttributes attr, - StrictMode strict_mode) { - SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY; - +MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate, + Handle<Object> object, + Handle<Object> key, + Handle<Object> value, + StrictMode strict_mode) { if (object->IsUndefined() || object->IsNull()) { Handle<Object> args[2] = { key, object }; Handle<Object> error = isolate->factory()->NewTypeError("non_object_property_store", HandleVector(args, 2)); - isolate->Throw(*error); - return Handle<Object>(); + return isolate->Throw<Object>(error); } if (object->IsJSProxy()) { - bool has_pending_exception = false; - Handle<Object> name_object = key->IsSymbol() - ? key : Execution::ToString(isolate, key, &has_pending_exception); - if (has_pending_exception) return Handle<Object>(); // exception + Handle<Object> name_object; + if (key->IsSymbol()) { + name_object = key; + } else { + ASSIGN_RETURN_ON_EXCEPTION( + isolate, name_object, Execution::ToString(isolate, key), Object); + } Handle<Name> name = Handle<Name>::cast(name_object); - return JSReceiver::SetProperty(Handle<JSProxy>::cast(object), name, value, - attr, - strict_mode); + return Object::SetProperty(Handle<JSProxy>::cast(object), name, value, + strict_mode); } - // If the object isn't a JavaScript object, we ignore the store. - if (!object->IsJSObject()) return value; - - Handle<JSObject> js_object = Handle<JSObject>::cast(object); - // Check if the given key is an array index. uint32_t index; if (key->ToArrayIndex(&index)) { + // TODO(verwaest): Support non-JSObject receivers. + if (!object->IsJSObject()) return value; + Handle<JSObject> js_object = Handle<JSObject>::cast(object); + // In Firefox/SpiderMonkey, Safari and Opera you can access the characters // of a string using [] notation. We need to support this too in // JavaScript. @@ -5327,68 +5112,66 @@ return value; } - js_object->ValidateElements(); + JSObject::ValidateElements(js_object); if (js_object->HasExternalArrayElements() || js_object->HasFixedTypedArrayElements()) { if (!value->IsNumber() && !value->IsUndefined()) { - bool has_exception; - Handle<Object> number = - Execution::ToNumber(isolate, value, &has_exception); - if (has_exception) return Handle<Object>(); // exception - value = number; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, value, Execution::ToNumber(isolate, value), Object); } } - Handle<Object> result = JSObject::SetElement(js_object, index, value, attr, - strict_mode, - true, - set_mode); - js_object->ValidateElements(); + + MaybeHandle<Object> result = JSObject::SetElement( + js_object, index, value, NONE, strict_mode, true, SET_PROPERTY); + JSObject::ValidateElements(js_object); + return result.is_null() ? result : value; } if (key->IsName()) { Handle<Name> name = Handle<Name>::cast(key); if (name->AsArrayIndex(&index)) { + // TODO(verwaest): Support non-JSObject receivers. + if (!object->IsJSObject()) return value; + Handle<JSObject> js_object = Handle<JSObject>::cast(object); if (js_object->HasExternalArrayElements()) { if (!value->IsNumber() && !value->IsUndefined()) { - bool has_exception; - Handle<Object> number = - Execution::ToNumber(isolate, value, &has_exception); - if (has_exception) return Handle<Object>(); // exception - value = number; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, value, Execution::ToNumber(isolate, value), Object); } } - return JSObject::SetElement(js_object, index, value, attr, strict_mode, - true, - set_mode); + return JSObject::SetElement(js_object, index, value, NONE, strict_mode, + true, SET_PROPERTY); } else { - if (name->IsString()) Handle<String>::cast(name)->TryFlatten(); - return JSReceiver::SetProperty(js_object, name, value, attr, strict_mode); + if (name->IsString()) name = String::Flatten(Handle<String>::cast(name)); + return Object::SetProperty(object, name, value, strict_mode); } } // Call-back into JavaScript to convert the key to a string. - bool has_pending_exception = false; - Handle<Object> converted = - Execution::ToString(isolate, key, &has_pending_exception); - if (has_pending_exception) return Handle<Object>(); // exception + Handle<Object> converted; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, converted, Execution::ToString(isolate, key), Object); Handle<String> name = Handle<String>::cast(converted); if (name->AsArrayIndex(&index)) { - return JSObject::SetElement(js_object, index, value, attr, strict_mode, - true, - set_mode); - } else { - return JSReceiver::SetProperty(js_object, name, value, attr, strict_mode); - } + // TODO(verwaest): Support non-JSObject receivers. + if (!object->IsJSObject()) return value; + Handle<JSObject> js_object = Handle<JSObject>::cast(object); + return JSObject::SetElement(js_object, index, value, NONE, strict_mode, + true, SET_PROPERTY); + } + return Object::SetProperty(object, name, value, strict_mode); } -Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate, - Handle<JSObject> js_object, - Handle<Object> key, - Handle<Object> value, - PropertyAttributes attr) { +MaybeHandle<Object> Runtime::DefineObjectProperty( + Handle<JSObject> js_object, + Handle<Object> key, + Handle<Object> value, + PropertyAttributes attr, + JSReceiver::StoreFromKeyed store_from_keyed) { + Isolate* isolate = js_object->GetIsolate(); // Check if the given key is an array index. uint32_t index; if (key->ToArrayIndex(&index)) { @@ -5403,48 +5186,44 @@ return value; } - return JSObject::SetElement(js_object, index, value, attr, SLOPPY, - false, - DEFINE_PROPERTY); + return JSObject::SetElement(js_object, index, value, attr, + SLOPPY, false, DEFINE_PROPERTY); } if (key->IsName()) { Handle<Name> name = Handle<Name>::cast(key); if (name->AsArrayIndex(&index)) { - return JSObject::SetElement(js_object, index, value, attr, SLOPPY, - false, - DEFINE_PROPERTY); - } else { - if (name->IsString()) Handle<String>::cast(name)->TryFlatten(); - return JSObject::SetLocalPropertyIgnoreAttributes(js_object, name, - value, attr); + return JSObject::SetElement(js_object, index, value, attr, + SLOPPY, false, DEFINE_PROPERTY); + } else { + if (name->IsString()) name = String::Flatten(Handle<String>::cast(name)); + return JSObject::SetOwnPropertyIgnoreAttributes( + js_object, name, value, attr, JSReceiver::PERFORM_EXTENSIBILITY_CHECK, + store_from_keyed); } } // Call-back into JavaScript to convert the key to a string. - bool has_pending_exception = false; - Handle<Object> converted = - Execution::ToString(isolate, key, &has_pending_exception); - if (has_pending_exception) return Handle<Object>(); // exception + Handle<Object> converted; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, converted, Execution::ToString(isolate, key), Object); Handle<String> name = Handle<String>::cast(converted); if (name->AsArrayIndex(&index)) { - return JSObject::SetElement(js_object, index, value, attr, SLOPPY, - false, - DEFINE_PROPERTY); + return JSObject::SetElement(js_object, index, value, attr, + SLOPPY, false, DEFINE_PROPERTY); } else { - return JSObject::SetLocalPropertyIgnoreAttributes(js_object, name, value, - attr); + return JSObject::SetOwnPropertyIgnoreAttributes( + js_object, name, value, attr, JSReceiver::PERFORM_EXTENSIBILITY_CHECK, + store_from_keyed); } } -MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate, - Handle<JSReceiver> receiver, - Handle<Object> key, - JSReceiver::DeleteMode mode) { - HandleScope scope(isolate); - +MaybeHandle<Object> Runtime::DeleteObjectProperty(Isolate* isolate, + Handle<JSReceiver> receiver, + Handle<Object> key, + JSReceiver::DeleteMode mode) { // Check if the given key is an array index. uint32_t index; if (key->ToArrayIndex(&index)) { @@ -5455,12 +5234,10 @@ // underlying string does nothing with the deletion, we can ignore // such deletions. if (receiver->IsStringObjectWithCharacterAt(index)) { - return isolate->heap()->true_value(); + return isolate->factory()->true_value(); } - Handle<Object> result = JSReceiver::DeleteElement(receiver, index, mode); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; + return JSReceiver::DeleteElement(receiver, index, mode); } Handle<Name> name; @@ -5468,36 +5245,65 @@ name = Handle<Name>::cast(key); } else { // Call-back into JavaScript to convert the key to a string. - bool has_pending_exception = false; - Handle<Object> converted = Execution::ToString( - isolate, key, &has_pending_exception); - if (has_pending_exception) return Failure::Exception(); + Handle<Object> converted; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, converted, Execution::ToString(isolate, key), Object); name = Handle<String>::cast(converted); } - if (name->IsString()) Handle<String>::cast(name)->TryFlatten(); - Handle<Object> result = JSReceiver::DeleteProperty(receiver, name, mode); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; + if (name->IsString()) name = String::Flatten(Handle<String>::cast(name)); + return JSReceiver::DeleteProperty(receiver, name, mode); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenProperty) { +RUNTIME_FUNCTION(Runtime_SetHiddenProperty) { HandleScope scope(isolate); RUNTIME_ASSERT(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_ARG_HANDLE_CHECKED(String, key, 1); CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); + RUNTIME_ASSERT(key->IsUniqueName()); return *JSObject::SetHiddenProperty(object, key, value); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) { +RUNTIME_FUNCTION(Runtime_AddNamedProperty) { HandleScope scope(isolate); - RUNTIME_ASSERT(args.length() == 4 || args.length() == 5); + RUNTIME_ASSERT(args.length() == 4); - CONVERT_ARG_HANDLE_CHECKED(Object, object, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); + CONVERT_ARG_HANDLE_CHECKED(Name, key, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); + CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3); + RUNTIME_ASSERT( + (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); + // Compute attributes. + PropertyAttributes attributes = + static_cast<PropertyAttributes>(unchecked_attributes); + +#ifdef DEBUG + uint32_t index = 0; + DCHECK(!key->ToArrayIndex(&index)); + LookupIterator it(object, key, LookupIterator::CHECK_OWN_REAL); + Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it); + DCHECK(maybe.has_value); + RUNTIME_ASSERT(!it.IsFound()); +#endif + + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::SetOwnPropertyIgnoreAttributes(object, key, value, attributes)); + return *result; +} + + +RUNTIME_FUNCTION(Runtime_AddPropertyForTemplate) { + HandleScope scope(isolate); + RUNTIME_ASSERT(args.length() == 4); + + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3); @@ -5507,22 +5313,56 @@ PropertyAttributes attributes = static_cast<PropertyAttributes>(unchecked_attributes); - StrictMode strict_mode = SLOPPY; - if (args.length() == 5) { - CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_arg, 4); - strict_mode = strict_mode_arg; +#ifdef DEBUG + bool duplicate; + if (key->IsName()) { + LookupIterator it(object, Handle<Name>::cast(key), + LookupIterator::CHECK_OWN_REAL); + Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it); + DCHECK(maybe.has_value); + duplicate = it.IsFound(); + } else { + uint32_t index = 0; + RUNTIME_ASSERT(key->ToArrayIndex(&index)); + Maybe<bool> maybe = JSReceiver::HasOwnElement(object, index); + if (!maybe.has_value) return isolate->heap()->exception(); + duplicate = maybe.value; + } + if (duplicate) { + Handle<Object> args[1] = { key }; + Handle<Object> error = isolate->factory()->NewTypeError( + "duplicate_template_property", HandleVector(args, 1)); + return isolate->Throw(*error); } +#endif + + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Runtime::DefineObjectProperty(object, key, value, attributes)); + return *result; +} + + +RUNTIME_FUNCTION(Runtime_SetProperty) { + HandleScope scope(isolate); + RUNTIME_ASSERT(args.length() == 4); + + CONVERT_ARG_HANDLE_CHECKED(Object, object, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); + CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_arg, 3); + StrictMode strict_mode = strict_mode_arg; - Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key, - value, - attributes, - strict_mode); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Runtime::SetObjectProperty(isolate, object, key, value, strict_mode)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsKind) { +RUNTIME_FUNCTION(Runtime_TransitionElementsKind) { HandleScope scope(isolate); RUNTIME_ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0); @@ -5535,7 +5375,7 @@ // Set the native flag on the function. // This is used to decide if we should transform null and undefined // into the global object when doing call and apply. -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) { +RUNTIME_FUNCTION(Runtime_SetNativeFlag) { SealHandleScope shs(isolate); RUNTIME_ASSERT(args.length() == 1); @@ -5549,11 +5389,10 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInlineBuiltinFlag) { +RUNTIME_FUNCTION(Runtime_SetInlineBuiltinFlag) { SealHandleScope shs(isolate); RUNTIME_ASSERT(args.length() == 1); - - Handle<Object> object = args.at<Object>(0); + CONVERT_ARG_HANDLE_CHECKED(Object, object, 0); if (object->IsJSFunction()) { JSFunction* func = JSFunction::cast(*object); @@ -5563,12 +5402,12 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) { +RUNTIME_FUNCTION(Runtime_StoreArrayLiteralElement) { HandleScope scope(isolate); RUNTIME_ASSERT(args.length() == 5); CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_SMI_ARG_CHECKED(store_index, 1); - Handle<Object> value = args.at<Object>(2); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3); CONVERT_SMI_ARG_CHECKED(literal_index, 4); @@ -5582,12 +5421,12 @@ } Handle<JSArray> boilerplate_object(boilerplate); ElementsKind elements_kind = object->GetElementsKind(); - ASSERT(IsFastElementsKind(elements_kind)); + DCHECK(IsFastElementsKind(elements_kind)); // Smis should never trigger transitions. - ASSERT(!value->IsSmi()); + DCHECK(!value->IsSmi()); if (value->IsNumber()) { - ASSERT(IsFastSmiElementsKind(elements_kind)); + DCHECK(IsFastSmiElementsKind(elements_kind)); ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind) ? FAST_HOLEY_DOUBLE_ELEMENTS : FAST_DOUBLE_ELEMENTS; @@ -5597,21 +5436,22 @@ JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind); } JSObject::TransitionElementsKind(object, transitioned_kind); - ASSERT(IsFastDoubleElementsKind(object->GetElementsKind())); + DCHECK(IsFastDoubleElementsKind(object->GetElementsKind())); FixedDoubleArray* double_array = FixedDoubleArray::cast(object->elements()); HeapNumber* number = HeapNumber::cast(*value); double_array->set(store_index, number->Number()); } else { - ASSERT(IsFastSmiElementsKind(elements_kind) || - IsFastDoubleElementsKind(elements_kind)); - ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind) - ? FAST_HOLEY_ELEMENTS - : FAST_ELEMENTS; - JSObject::TransitionElementsKind(object, transitioned_kind); - if (IsMoreGeneralElementsKindTransition( - boilerplate_object->GetElementsKind(), - transitioned_kind)) { - JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind); + if (!IsFastObjectElementsKind(elements_kind)) { + ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind) + ? FAST_HOLEY_ELEMENTS + : FAST_ELEMENTS; + JSObject::TransitionElementsKind(object, transitioned_kind); + ElementsKind boilerplate_elements_kind = + boilerplate_object->GetElementsKind(); + if (IsMoreGeneralElementsKindTransition(boilerplate_elements_kind, + transitioned_kind)) { + JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind); + } } FixedArray* object_array = FixedArray::cast(object->elements()); object_array->set(store_index, *value); @@ -5622,29 +5462,22 @@ // Check whether debugger and is about to step into the callback that is passed // to a built-in function such as Array.forEach. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) { - SealHandleScope shs(isolate); -#ifdef ENABLE_DEBUGGER_SUPPORT - if (!isolate->IsDebuggerActive() || !isolate->debug()->StepInActive()) { +RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) { + DCHECK(args.length() == 1); + if (!isolate->debug()->is_active() || !isolate->debug()->StepInActive()) { return isolate->heap()->false_value(); } CONVERT_ARG_CHECKED(Object, callback, 0); // We do not step into the callback if it's a builtin or not even a function. - if (!callback->IsJSFunction() || JSFunction::cast(callback)->IsBuiltin()) { - return isolate->heap()->false_value(); - } - return isolate->heap()->true_value(); -#else - return isolate->heap()->false_value(); -#endif // ENABLE_DEBUGGER_SUPPORT + return isolate->heap()->ToBoolean( + callback->IsJSFunction() && !JSFunction::cast(callback)->IsBuiltin()); } // Set one shot breakpoints for the callback function that is passed to a // built-in function such as Array.forEach to enable stepping into the callback. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) { - SealHandleScope shs(isolate); -#ifdef ENABLE_DEBUGGER_SUPPORT +RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) { + DCHECK(args.length() == 1); Debug* debug = isolate->debug(); if (!debug->IsStepping()) return isolate->heap()->undefined_value(); CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0); @@ -5654,75 +5487,101 @@ // again, we need to clear the step out at this point. debug->ClearStepOut(); debug->FloodWithOneShot(callback); -#endif // ENABLE_DEBUGGER_SUPPORT return isolate->heap()->undefined_value(); } -// Set a local property, even if it is READ_ONLY. If the property does not -// exist, it will be added with attributes NONE. -RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) { +RUNTIME_FUNCTION(Runtime_DebugPushPromise) { + DCHECK(args.length() == 1); HandleScope scope(isolate); - RUNTIME_ASSERT(args.length() == 3 || args.length() == 4); - CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); - CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); - CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); - // Compute attributes. - PropertyAttributes attributes = NONE; - if (args.length() == 4) { - CONVERT_SMI_ARG_CHECKED(unchecked_value, 3); - // Only attribute bits should be set. - RUNTIME_ASSERT( - (unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); - attributes = static_cast<PropertyAttributes>(unchecked_value); - } - Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes( - object, name, value, attributes); - RETURN_IF_EMPTY_HANDLE(isolate, result); - return *result; + CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0); + isolate->debug()->PushPromise(promise); + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(Runtime_DebugPopPromise) { + DCHECK(args.length() == 0); + SealHandleScope shs(isolate); + isolate->debug()->PopPromise(); + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(Runtime_DebugPromiseEvent) { + DCHECK(args.length() == 1); + HandleScope scope(isolate); + CONVERT_ARG_HANDLE_CHECKED(JSObject, data, 0); + isolate->debug()->OnPromiseEvent(data); + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(Runtime_DebugPromiseRejectEvent) { + DCHECK(args.length() == 2); + HandleScope scope(isolate); + CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 1); + isolate->debug()->OnPromiseReject(promise, value); + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(Runtime_DebugAsyncTaskEvent) { + DCHECK(args.length() == 1); + HandleScope scope(isolate); + CONVERT_ARG_HANDLE_CHECKED(JSObject, data, 0); + isolate->debug()->OnAsyncTaskEvent(data); + return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) { +RUNTIME_FUNCTION(Runtime_DeleteProperty) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0); CONVERT_ARG_HANDLE_CHECKED(Name, key, 1); CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2); JSReceiver::DeleteMode delete_mode = strict_mode == STRICT ? JSReceiver::STRICT_DELETION : JSReceiver::NORMAL_DELETION; - Handle<Object> result = JSReceiver::DeleteProperty(object, key, delete_mode); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSReceiver::DeleteProperty(object, key, delete_mode)); return *result; } -static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate, - Handle<JSObject> object, - Handle<Name> key) { - if (JSReceiver::HasLocalProperty(object, key)) { - return isolate->heap()->true_value(); - } +static Object* HasOwnPropertyImplementation(Isolate* isolate, + Handle<JSObject> object, + Handle<Name> key) { + Maybe<bool> maybe = JSReceiver::HasOwnProperty(object, key); + if (!maybe.has_value) return isolate->heap()->exception(); + if (maybe.value) return isolate->heap()->true_value(); // Handle hidden prototypes. If there's a hidden prototype above this thing // then we have to check it for properties, because they are supposed to // look like they are on this object. - Handle<Object> proto(object->GetPrototype(), isolate); - if (proto->IsJSObject() && - Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) { - return HasLocalPropertyImplementation(isolate, - Handle<JSObject>::cast(proto), - key); + PrototypeIterator iter(isolate, object); + if (!iter.IsAtEnd() && + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)) + ->map() + ->is_hidden_prototype()) { + // TODO(verwaest): The recursion is not necessary for keys that are array + // indices. Removing this. + return HasOwnPropertyImplementation( + isolate, Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), + key); } - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); return isolate->heap()->false_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) { +RUNTIME_FUNCTION(Runtime_HasOwnProperty) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(Object, object, 0) CONVERT_ARG_HANDLE_CHECKED(Name, key, 1); - Handle<Object> object = args.at<Object>(0); uint32_t index; const bool key_is_array_index = key->AsArrayIndex(&index); @@ -5733,11 +5592,11 @@ // Fast case: either the key is a real named property or it is not // an array index and there are no interceptors or hidden // prototypes. - if (JSObject::HasRealNamedProperty(js_obj, key)) { - ASSERT(!isolate->has_scheduled_exception()); + Maybe<bool> maybe = JSObject::HasRealNamedProperty(js_obj, key); + if (!maybe.has_value) return isolate->heap()->exception(); + DCHECK(!isolate->has_pending_exception()); + if (maybe.value) { return isolate->heap()->true_value(); - } else { - RETURN_IF_SCHEDULED_EXCEPTION(isolate); } Map* map = js_obj->map(); if (!key_is_array_index && @@ -5746,9 +5605,9 @@ return isolate->heap()->false_value(); } // Slow case. - return HasLocalPropertyImplementation(isolate, - Handle<JSObject>(js_obj), - Handle<Name>(key)); + return HasOwnPropertyImplementation(isolate, + Handle<JSObject>(js_obj), + Handle<Name>(key)); } else if (object->IsString() && key_is_array_index) { // Well, there is one exception: Handle [] on strings. Handle<String> string = Handle<String>::cast(object); @@ -5760,57 +5619,57 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) { +RUNTIME_FUNCTION(Runtime_HasProperty) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0); CONVERT_ARG_HANDLE_CHECKED(Name, key, 1); - bool result = JSReceiver::HasProperty(receiver, key); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - if (isolate->has_pending_exception()) return Failure::Exception(); - return isolate->heap()->ToBoolean(result); + Maybe<bool> maybe = JSReceiver::HasProperty(receiver, key); + if (!maybe.has_value) return isolate->heap()->exception(); + return isolate->heap()->ToBoolean(maybe.value); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) { +RUNTIME_FUNCTION(Runtime_HasElement) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0); CONVERT_SMI_ARG_CHECKED(index, 1); - bool result = JSReceiver::HasElement(receiver, index); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - if (isolate->has_pending_exception()) return Failure::Exception(); - return isolate->heap()->ToBoolean(result); + Maybe<bool> maybe = JSReceiver::HasElement(receiver, index); + if (!maybe.has_value) return isolate->heap()->exception(); + return isolate->heap()->ToBoolean(maybe.value); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) { +RUNTIME_FUNCTION(Runtime_IsPropertyEnumerable) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_ARG_HANDLE_CHECKED(Name, key, 1); - PropertyAttributes att = JSReceiver::GetLocalPropertyAttribute(object, key); - if (att == ABSENT || (att & DONT_ENUM) != 0) { - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return isolate->heap()->false_value(); - } - ASSERT(!isolate->has_scheduled_exception()); - return isolate->heap()->true_value(); + Maybe<PropertyAttributes> maybe = + JSReceiver::GetOwnPropertyAttributes(object, key); + if (!maybe.has_value) return isolate->heap()->exception(); + if (maybe.value == ABSENT) maybe.value = DONT_ENUM; + return isolate->heap()->ToBoolean((maybe.value & DONT_ENUM) == 0); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) { +RUNTIME_FUNCTION(Runtime_GetPropertyNames) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0); - bool threw = false; - Handle<JSArray> result = GetKeysFor(object, &threw); - if (threw) return Failure::Exception(); - return *result; + Handle<JSArray> result; + + isolate->counters()->for_in()->Increment(); + Handle<FixedArray> elements; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, elements, + JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS)); + return *isolate->factory()->NewJSArrayWithElements(elements); } @@ -5819,9 +5678,9 @@ // all enumerable properties of the object and its prototypes // have none, the map of the object. This is used to speed up // the check for deletions during a for-in. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) { +RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0); @@ -5829,10 +5688,10 @@ HandleScope scope(isolate); Handle<JSReceiver> object(raw_object); - bool threw = false; - Handle<FixedArray> content = - GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, &threw); - if (threw) return Failure::Exception(); + Handle<FixedArray> content; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, content, + JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS)); // Test again, since cache may have been built by preceding call. if (object->IsSimpleEnum()) return object->map(); @@ -5841,27 +5700,25 @@ } -// Find the length of the prototype chain that is to to handled as one. If a +// Find the length of the prototype chain that is to be handled as one. If a // prototype object is hidden it is to be viewed as part of the the object it // is prototype for. -static int LocalPrototypeChainLength(JSObject* obj) { +static int OwnPrototypeChainLength(JSObject* obj) { int count = 1; - Object* proto = obj->GetPrototype(); - while (proto->IsJSObject() && - JSObject::cast(proto)->map()->is_hidden_prototype()) { + for (PrototypeIterator iter(obj->GetIsolate(), obj); + !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) { count++; - proto = JSObject::cast(proto)->GetPrototype(); } return count; } -// Return the names of the local named properties. +// Return the names of the own named properties. // args[0]: object // args[1]: PropertyAttributes as int -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) { +RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); if (!args[0]->IsJSObject()) { return isolate->heap()->undefined_value(); } @@ -5874,39 +5731,42 @@ if (obj->IsJSGlobalProxy()) { // Only collect names if access is permitted. if (obj->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(obj, - isolate->factory()->undefined_value(), - v8::ACCESS_KEYS)) { - isolate->ReportFailedAccessCheckWrapper(obj, v8::ACCESS_KEYS); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + !isolate->MayNamedAccess( + obj, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) { + isolate->ReportFailedAccessCheck(obj, v8::ACCESS_KEYS); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); return *isolate->factory()->NewJSArray(0); } - obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype())); + PrototypeIterator iter(isolate, obj); + obj = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); } // Find the number of objects making up this. - int length = LocalPrototypeChainLength(*obj); + int length = OwnPrototypeChainLength(*obj); - // Find the number of local properties for each of the objects. - ScopedVector<int> local_property_count(length); + // Find the number of own properties for each of the objects. + ScopedVector<int> own_property_count(length); int total_property_count = 0; - Handle<JSObject> jsproto = obj; - for (int i = 0; i < length; i++) { - // Only collect names if access is permitted. - if (jsproto->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(jsproto, - isolate->factory()->undefined_value(), - v8::ACCESS_KEYS)) { - isolate->ReportFailedAccessCheckWrapper(jsproto, v8::ACCESS_KEYS); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return *isolate->factory()->NewJSArray(0); - } - int n; - n = jsproto->NumberOfLocalProperties(filter); - local_property_count[i] = n; - total_property_count += n; - if (i < length - 1) { - jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype())); + { + PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER); + for (int i = 0; i < length; i++) { + DCHECK(!iter.IsAtEnd()); + Handle<JSObject> jsproto = + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); + // Only collect names if access is permitted. + if (jsproto->IsAccessCheckNeeded() && + !isolate->MayNamedAccess(jsproto, + isolate->factory()->undefined_value(), + v8::ACCESS_KEYS)) { + isolate->ReportFailedAccessCheck(jsproto, v8::ACCESS_KEYS); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); + return *isolate->factory()->NewJSArray(0); + } + int n; + n = jsproto->NumberOfOwnProperties(filter); + own_property_count[i] = n; + total_property_count += n; + iter.Advance(); } } @@ -5915,39 +5775,41 @@ isolate->factory()->NewFixedArray(total_property_count); // Get the property names. - jsproto = obj; int next_copy_index = 0; int hidden_strings = 0; - for (int i = 0; i < length; i++) { - jsproto->GetLocalPropertyNames(*names, next_copy_index, filter); - if (i > 0) { - // Names from hidden prototypes may already have been added - // for inherited function template instances. Count the duplicates - // and stub them out; the final copy pass at the end ignores holes. - for (int j = next_copy_index; - j < next_copy_index + local_property_count[i]; - j++) { - Object* name_from_hidden_proto = names->get(j); - for (int k = 0; k < next_copy_index; k++) { - if (names->get(k) != isolate->heap()->hidden_string()) { - Object* name = names->get(k); - if (name_from_hidden_proto == name) { - names->set(j, isolate->heap()->hidden_string()); - hidden_strings++; - break; + { + PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER); + for (int i = 0; i < length; i++) { + DCHECK(!iter.IsAtEnd()); + Handle<JSObject> jsproto = + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); + jsproto->GetOwnPropertyNames(*names, next_copy_index, filter); + if (i > 0) { + // Names from hidden prototypes may already have been added + // for inherited function template instances. Count the duplicates + // and stub them out; the final copy pass at the end ignores holes. + for (int j = next_copy_index; + j < next_copy_index + own_property_count[i]; j++) { + Object* name_from_hidden_proto = names->get(j); + for (int k = 0; k < next_copy_index; k++) { + if (names->get(k) != isolate->heap()->hidden_string()) { + Object* name = names->get(k); + if (name_from_hidden_proto == name) { + names->set(j, isolate->heap()->hidden_string()); + hidden_strings++; + break; + } } } } } - } - next_copy_index += local_property_count[i]; + next_copy_index += own_property_count[i]; - // Hidden properties only show up if the filter does not skip strings. - if ((filter & STRING) == 0 && JSObject::HasHiddenProperties(jsproto)) { - hidden_strings++; - } - if (i < length - 1) { - jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype())); + // Hidden properties only show up if the filter does not skip strings. + if ((filter & STRING) == 0 && JSObject::HasHiddenProperties(jsproto)) { + hidden_strings++; + } + iter.Advance(); } } @@ -5966,35 +5828,35 @@ } names->set(dest_pos++, name); } - ASSERT_EQ(0, hidden_strings); + DCHECK_EQ(0, hidden_strings); } return *isolate->factory()->NewJSArrayWithElements(names); } -// Return the names of the local indexed properties. +// Return the names of the own indexed properties. // args[0]: object -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalElementNames) { +RUNTIME_FUNCTION(Runtime_GetOwnElementNames) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); if (!args[0]->IsJSObject()) { return isolate->heap()->undefined_value(); } CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); - int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE)); + int n = obj->NumberOfOwnElements(static_cast<PropertyAttributes>(NONE)); Handle<FixedArray> names = isolate->factory()->NewFixedArray(n); - obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE)); + obj->GetOwnElementKeys(*names, static_cast<PropertyAttributes>(NONE)); return *isolate->factory()->NewJSArrayWithElements(names); } // Return information on whether an object has a named or indexed interceptor. // args[0]: object -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetInterceptorInfo) { +RUNTIME_FUNCTION(Runtime_GetInterceptorInfo) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); if (!args[0]->IsJSObject()) { return Smi::FromInt(0); } @@ -6010,14 +5872,16 @@ // Return property names from named interceptor. // args[0]: object -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) { +RUNTIME_FUNCTION(Runtime_GetNamedInterceptorPropertyNames) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); if (obj->HasNamedInterceptor()) { - v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj); - if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result); + Handle<JSObject> result; + if (JSObject::GetKeysForNamedInterceptor(obj, obj).ToHandle(&result)) { + return *result; + } } return isolate->heap()->undefined_value(); } @@ -6025,46 +5889,47 @@ // Return element names from indexed interceptor. // args[0]: object -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) { +RUNTIME_FUNCTION(Runtime_GetIndexedInterceptorElementNames) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); if (obj->HasIndexedInterceptor()) { - v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj); - if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result); - } + Handle<JSObject> result; + if (JSObject::GetKeysForIndexedInterceptor(obj, obj).ToHandle(&result)) { + return *result; + } + } return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) { +RUNTIME_FUNCTION(Runtime_OwnKeys) { HandleScope scope(isolate); - ASSERT_EQ(args.length(), 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSObject, raw_object, 0); Handle<JSObject> object(raw_object); if (object->IsJSGlobalProxy()) { // Do access checks before going to the global object. if (object->IsAccessCheckNeeded() && - !isolate->MayNamedAccessWrapper(object, - isolate->factory()->undefined_value(), - v8::ACCESS_KEYS)) { - isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_KEYS); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + !isolate->MayNamedAccess( + object, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) { + isolate->ReportFailedAccessCheck(object, v8::ACCESS_KEYS); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); return *isolate->factory()->NewJSArray(0); } - Handle<Object> proto(object->GetPrototype(), isolate); + PrototypeIterator iter(isolate, object); // If proxy is detached we simply return an empty array. - if (proto->IsNull()) return *isolate->factory()->NewJSArray(0); - object = Handle<JSObject>::cast(proto); + if (iter.IsAtEnd()) return *isolate->factory()->NewJSArray(0); + object = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); } - bool threw = false; - Handle<FixedArray> contents = - GetKeysInFixedArrayFor(object, LOCAL_ONLY, &threw); - if (threw) return Failure::Exception(); + Handle<FixedArray> contents; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, contents, + JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY)); // Some fast paths through GetKeysInFixedArrayFor reuse a cached // property array and since the result is mutable we have to create @@ -6076,7 +5941,7 @@ if (entry->IsString()) { copy->set(i, entry); } else { - ASSERT(entry->IsNumber()); + DCHECK(entry->IsNumber()); HandleScope scope(isolate); Handle<Object> entry_handle(entry, isolate); Handle<Object> entry_str = @@ -6088,9 +5953,10 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) { +RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, raw_key, 0); // Compute the frame holding the arguments. JavaScriptFrameIterator it(isolate); @@ -6103,22 +5969,25 @@ // Try to convert the key to an index. If successful and within // index return the the argument from the frame. uint32_t index; - if (args[0]->ToArrayIndex(&index) && index < n) { + if (raw_key->ToArrayIndex(&index) && index < n) { return frame->GetParameter(index); } - if (args[0]->IsSymbol()) { + HandleScope scope(isolate); + if (raw_key->IsSymbol()) { // Lookup in the initial Object.prototype object. - return isolate->initial_object_prototype()->GetProperty( - Symbol::cast(args[0])); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Object::GetProperty(isolate->initial_object_prototype(), + Handle<Symbol>::cast(raw_key))); + return *result; } // Convert the key to a string. - HandleScope scope(isolate); - bool exception = false; - Handle<Object> converted = - Execution::ToString(isolate, args.at<Object>(0), &exception); - if (exception) return Failure::Exception(); + Handle<Object> converted; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, converted, Execution::ToString(isolate, raw_key)); Handle<String> key = Handle<String>::cast(converted); // Try to convert the string key into an array index. @@ -6127,16 +5996,19 @@ return frame->GetParameter(index); } else { Handle<Object> initial_prototype(isolate->initial_object_prototype()); - Handle<Object> result = - Object::GetElement(isolate, initial_prototype, index); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Object::GetElement(isolate, initial_prototype, index)); return *result; } } // Handle special arguments properties. - if (key->Equals(isolate->heap()->length_string())) return Smi::FromInt(n); - if (key->Equals(isolate->heap()->callee_string())) { + if (String::Equals(isolate->factory()->length_string(), key)) { + return Smi::FromInt(n); + } + if (String::Equals(isolate->factory()->callee_string(), key)) { JSFunction* function = frame->function(); if (function->shared()->strict_mode() == STRICT) { return isolate->Throw(*isolate->factory()->NewTypeError( @@ -6146,35 +6018,40 @@ } // Lookup in the initial Object.prototype object. - return isolate->initial_object_prototype()->GetProperty(*key); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Object::GetProperty(isolate->initial_object_prototype(), key)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) { +RUNTIME_FUNCTION(Runtime_ToFastProperties) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(Object, object, 0); if (object->IsJSObject() && !object->IsGlobalObject()) { - JSObject::TransformToFastProperties(Handle<JSObject>::cast(object), 0); + JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0); } return *object; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) { +RUNTIME_FUNCTION(Runtime_ToBool) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, object, 0); - return isolate->heap()->ToBoolean(args[0]->BooleanValue()); + return isolate->heap()->ToBoolean(object->BooleanValue()); } // Returns the type string of a value; see ECMA-262, 11.4.3 (p 47). // Possible optimizations: put the type string into the oddballs. -RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) { +RUNTIME_FUNCTION(Runtime_Typeof) { SealHandleScope shs(isolate); - - Object* obj = args[0]; + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); if (obj->IsNumber()) return isolate->heap()->number_string(); HeapObject* heap_obj = HeapObject::cast(obj); @@ -6194,11 +6071,9 @@ return isolate->heap()->boolean_string(); } if (heap_obj->IsNull()) { - return FLAG_harmony_typeof - ? isolate->heap()->null_string() - : isolate->heap()->object_string(); + return isolate->heap()->object_string(); } - ASSERT(heap_obj->IsUndefined()); + DCHECK(heap_obj->IsUndefined()); return isolate->heap()->undefined_string(); case SYMBOL_TYPE: return isolate->heap()->symbol_string(); @@ -6213,6 +6088,35 @@ } +RUNTIME_FUNCTION(Runtime_Booleanize) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_CHECKED(Object, value_raw, 0); + CONVERT_SMI_ARG_CHECKED(token_raw, 1); + intptr_t value = reinterpret_cast<intptr_t>(value_raw); + Token::Value token = static_cast<Token::Value>(token_raw); + switch (token) { + case Token::EQ: + case Token::EQ_STRICT: + return isolate->heap()->ToBoolean(value == 0); + case Token::NE: + case Token::NE_STRICT: + return isolate->heap()->ToBoolean(value != 0); + case Token::LT: + return isolate->heap()->ToBoolean(value < 0); + case Token::GT: + return isolate->heap()->ToBoolean(value > 0); + case Token::LTE: + return isolate->heap()->ToBoolean(value <= 0); + case Token::GTE: + return isolate->heap()->ToBoolean(value >= 0); + default: + // This should only happen during natives fuzzing. + return isolate->heap()->undefined_value(); + } +} + + static bool AreDigits(const uint8_t*s, int from, int to) { for (int i = from; i < to; i++) { if (s[i] < '0' || s[i] > '9') return false; @@ -6223,8 +6127,8 @@ static int ParseDecimalInteger(const uint8_t*s, int from, int to) { - ASSERT(to - from < 10); // Overflow is not possible. - ASSERT(from < to); + DCHECK(to - from < 10); // Overflow is not possible. + DCHECK(from < to); int d = s[from] - '0'; for (int i = from + 1; i < to; i++) { @@ -6235,18 +6139,19 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(String, subject, 0); - subject->TryFlatten(); +RUNTIME_FUNCTION(Runtime_StringToNumber) { + HandleScope handle_scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); + subject = String::Flatten(subject); // Fast case: short integer or some sorts of junk values. - int len = subject->length(); if (subject->IsSeqOneByteString()) { + int len = subject->length(); if (len == 0) return Smi::FromInt(0); - uint8_t const* data = SeqOneByteString::cast(subject)->GetChars(); + DisallowHeapAllocation no_gc; + uint8_t const* data = Handle<SeqOneByteString>::cast(subject)->GetChars(); bool minus = (data[0] == '-'); int start_pos = (minus ? 1 : 0); @@ -6254,15 +6159,15 @@ return isolate->heap()->nan_value(); } else if (data[start_pos] > '9') { // Fast check for a junk value. A valid string may start from a - // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or - // the 'I' character ('Infinity'). All of that have codes not greater than - // '9' except 'I' and  . + // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit + // or the 'I' character ('Infinity'). All of that have codes not greater + // than '9' except 'I' and  . if (data[start_pos] != 'I' && data[start_pos] != 0xa0) { return isolate->heap()->nan_value(); } } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) { - // The maximal/minimal smi has 10 digits. If the string has less digits we - // know it will fit into the smi-data type. + // The maximal/minimal smi has 10 digits. If the string has less digits + // we know it will fit into the smi-data type. int d = ParseDecimalInteger(data, start_pos, len); if (minus) { if (d == 0) return isolate->heap()->minus_zero_value(); @@ -6275,7 +6180,7 @@ uint32_t hash = StringHasher::MakeArrayIndexHash(d, len); #ifdef DEBUG subject->Hash(); // Force hash calculation. - ASSERT_EQ(static_cast<int>(subject->hash_field()), + DCHECK_EQ(static_cast<int>(subject->hash_field()), static_cast<int>(hash)); #endif subject->set_hash_field(hash); @@ -6291,98 +6196,132 @@ // Type", https://bugs.ecmascript.org/show_bug.cgi?id=1584 flags |= ALLOW_OCTAL | ALLOW_BINARY; } - return isolate->heap()->NumberFromDouble( - StringToDouble(isolate->unicode_cache(), subject, flags)); + + return *isolate->factory()->NewNumber(StringToDouble( + isolate->unicode_cache(), *subject, flags)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NewString) { - SealHandleScope shs(isolate); +RUNTIME_FUNCTION(Runtime_NewString) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_SMI_ARG_CHECKED(length, 0); CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1); if (length == 0) return isolate->heap()->empty_string(); + Handle<String> result; if (is_one_byte) { - return isolate->heap()->AllocateRawOneByteString(length); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, isolate->factory()->NewRawOneByteString(length)); } else { - return isolate->heap()->AllocateRawTwoByteString(length); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, isolate->factory()->NewRawTwoByteString(length)); } + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_TruncateString) { +RUNTIME_FUNCTION(Runtime_TruncateString) { HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(SeqString, string, 0); CONVERT_SMI_ARG_CHECKED(new_length, 1); + RUNTIME_ASSERT(new_length >= 0); return *SeqString::Truncate(string, new_length); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) { +RUNTIME_FUNCTION(Runtime_URIEscape) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(String, source, 0); - Handle<String> string = FlattenGetString(source); - ASSERT(string->IsFlat()); - Handle<String> result = string->IsOneByteRepresentationUnderneath() - ? URIEscape::Escape<uint8_t>(isolate, source) - : URIEscape::Escape<uc16>(isolate, source); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<String> string = String::Flatten(source); + DCHECK(string->IsFlat()); + Handle<String> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + string->IsOneByteRepresentationUnderneath() + ? URIEscape::Escape<uint8_t>(isolate, source) + : URIEscape::Escape<uc16>(isolate, source)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) { +RUNTIME_FUNCTION(Runtime_URIUnescape) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(String, source, 0); - Handle<String> string = FlattenGetString(source); - ASSERT(string->IsFlat()); - return string->IsOneByteRepresentationUnderneath() - ? *URIUnescape::Unescape<uint8_t>(isolate, source) - : *URIUnescape::Unescape<uc16>(isolate, source); + Handle<String> string = String::Flatten(source); + DCHECK(string->IsFlat()); + Handle<String> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + string->IsOneByteRepresentationUnderneath() + ? URIUnescape::Unescape<uint8_t>(isolate, source) + : URIUnescape::Unescape<uc16>(isolate, source)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) { +RUNTIME_FUNCTION(Runtime_QuoteJSONString) { HandleScope scope(isolate); CONVERT_ARG_HANDLE_CHECKED(String, string, 0); - ASSERT(args.length() == 1); - return BasicJsonStringifier::StringifyString(isolate, string); + DCHECK(args.length() == 1); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, BasicJsonStringifier::StringifyString(isolate, string)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_BasicJSONStringify) { +RUNTIME_FUNCTION(Runtime_BasicJSONStringify) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, object, 0); BasicJsonStringifier stringifier(isolate); - return stringifier.Stringify(Handle<Object>(args[0], isolate)); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, stringifier.Stringify(object)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) { - SealHandleScope shs(isolate); +RUNTIME_FUNCTION(Runtime_StringParseInt) { + HandleScope handle_scope(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); + CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]); + RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36)); - CONVERT_ARG_CHECKED(String, s, 0); - CONVERT_SMI_ARG_CHECKED(radix, 1); + subject = String::Flatten(subject); + double value; - s->TryFlatten(); + { DisallowHeapAllocation no_gc; + String::FlatContent flat = subject->GetFlatContent(); - RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36)); - double value = StringToInt(isolate->unicode_cache(), s, radix); - return isolate->heap()->NumberFromDouble(value); + // ECMA-262 section 15.1.2.3, empty string is NaN + if (flat.IsAscii()) { + value = StringToInt( + isolate->unicode_cache(), flat.ToOneByteVector(), radix); + } else { + value = StringToInt( + isolate->unicode_cache(), flat.ToUC16Vector(), radix); + } + } + + return *isolate->factory()->NewNumber(value); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) { - SealHandleScope shs(isolate); - CONVERT_ARG_CHECKED(String, str, 0); +RUNTIME_FUNCTION(Runtime_StringParseFloat) { + HandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); - // ECMA-262 section 15.1.2.3, empty string is NaN - double value = StringToDouble(isolate->unicode_cache(), - str, ALLOW_TRAILING_JUNK, OS::nan_value()); + subject = String::Flatten(subject); + double value = StringToDouble(isolate->unicode_cache(), *subject, + ALLOW_TRAILING_JUNK, base::OS::nan_value()); - // Create a number object from the value. - return isolate->heap()->NumberFromDouble(value); + return *isolate->factory()->NewNumber(value); } @@ -6396,7 +6335,7 @@ template <class Converter> -MUST_USE_RESULT static MaybeObject* ConvertCaseHelper( +MUST_USE_RESULT static Object* ConvertCaseHelper( Isolate* isolate, String* string, SeqString* result, @@ -6434,7 +6373,7 @@ } else if (char_length == 1 && (ignore_overflow || !ToUpperOverflows(current))) { // Common case: converting the letter resulted in one character. - ASSERT(static_cast<uc32>(chars[0]) != current); + DCHECK(static_cast<uc32>(chars[0]) != current); result->Set(i, chars[0]); has_changed_character = true; i++; @@ -6512,7 +6451,7 @@ static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) { // Use strict inequalities since in edge cases the function could be // further simplified. - ASSERT(0 < m && m < n); + DCHECK(0 < m && m < n); // Has high bit set in every w byte less than n. uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w; // Has high bit set in every w byte greater than m. @@ -6532,11 +6471,11 @@ if (dst[i] == src[i]) continue; expected_changed = true; if (is_to_lower) { - ASSERT('A' <= src[i] && src[i] <= 'Z'); - ASSERT(dst[i] == src[i] + ('a' - 'A')); + DCHECK('A' <= src[i] && src[i] <= 'Z'); + DCHECK(dst[i] == src[i] + ('a' - 'A')); } else { - ASSERT('a' <= src[i] && src[i] <= 'z'); - ASSERT(dst[i] == src[i] - ('a' - 'A')); + DCHECK('a' <= src[i] && src[i] <= 'z'); + DCHECK(dst[i] == src[i] - ('a' - 'A')); } } return (expected_changed == changed); @@ -6556,7 +6495,7 @@ DisallowHeapAllocation no_gc; // We rely on the distance between upper and lower case letters // being a known power of 2. - ASSERT('a' - 'A' == (1 << 5)); + DCHECK('a' - 'A' == (1 << 5)); // Boundaries for the range of input characters than require conversion. static const char lo = Converter::kIsToLower ? 'A' - 1 : 'a' - 1; static const char hi = Converter::kIsToLower ? 'Z' + 1 : 'z' + 1; @@ -6608,7 +6547,7 @@ return false; } - ASSERT(CheckFastAsciiConvert( + DCHECK(CheckFastAsciiConvert( saved_dst, saved_src, length, changed, Converter::kIsToLower)); *changed_out = changed; @@ -6619,13 +6558,11 @@ template <class Converter> -MUST_USE_RESULT static MaybeObject* ConvertCase( - Arguments args, +MUST_USE_RESULT static Object* ConvertCase( + Handle<String> s, Isolate* isolate, unibrow::Mapping<Converter, 128>* mapping) { - HandleScope handle_scope(isolate); - CONVERT_ARG_HANDLE_CHECKED(String, s, 0); - s = FlattenGetString(s); + s = String::Flatten(s); int length = s->length(); // Assume that the string is not empty; we need this assumption later if (length == 0) return *s; @@ -6637,12 +6574,12 @@ // might break in the future if we implement more context and locale // dependent upper/lower conversions. if (s->IsOneByteRepresentationUnderneath()) { + // Same length as input. Handle<SeqOneByteString> result = - isolate->factory()->NewRawOneByteString(length); - ASSERT(!result.is_null()); // Same length as input. + isolate->factory()->NewRawOneByteString(length).ToHandleChecked(); DisallowHeapAllocation no_gc; String::FlatContent flat_content = s->GetFlatContent(); - ASSERT(flat_content.IsFlat()); + DCHECK(flat_content.IsFlat()); bool has_changed_character = false; bool is_ascii = FastAsciiConvert<Converter>( reinterpret_cast<char*>(result->GetChars()), @@ -6650,56 +6587,60 @@ length, &has_changed_character); // If not ASCII, we discard the result and take the 2 byte path. - if (is_ascii) return has_changed_character ? *result : *s; + if (is_ascii) return has_changed_character ? *result : *s; } - Handle<SeqString> result; + Handle<SeqString> result; // Same length as input. if (s->IsOneByteRepresentation()) { - result = isolate->factory()->NewRawOneByteString(length); + result = isolate->factory()->NewRawOneByteString(length).ToHandleChecked(); } else { - result = isolate->factory()->NewRawTwoByteString(length); + result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked(); } - ASSERT(!result.is_null()); // Same length as input. - MaybeObject* maybe = ConvertCaseHelper(isolate, *s, *result, length, mapping); - Object* answer; - if (!maybe->ToObject(&answer)) return maybe; - if (answer->IsString()) return answer; + Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping); + if (answer->IsException() || answer->IsString()) return answer; - ASSERT(answer->IsSmi()); + DCHECK(answer->IsSmi()); length = Smi::cast(answer)->value(); if (s->IsOneByteRepresentation() && length > 0) { - result = isolate->factory()->NewRawOneByteString(length); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, isolate->factory()->NewRawOneByteString(length)); } else { if (length < 0) length = -length; - result = isolate->factory()->NewRawTwoByteString(length); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, isolate->factory()->NewRawTwoByteString(length)); } - RETURN_IF_EMPTY_HANDLE(isolate, result); return ConvertCaseHelper(isolate, *s, *result, length, mapping); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToLowerCase) { +RUNTIME_FUNCTION(Runtime_StringToLowerCase) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(String, s, 0); return ConvertCase( - args, isolate, isolate->runtime_state()->to_lower_mapping()); + s, isolate, isolate->runtime_state()->to_lower_mapping()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) { +RUNTIME_FUNCTION(Runtime_StringToUpperCase) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(String, s, 0); return ConvertCase( - args, isolate, isolate->runtime_state()->to_upper_mapping()); + s, isolate, isolate->runtime_state()->to_upper_mapping()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) { +RUNTIME_FUNCTION(Runtime_StringTrim) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, string, 0); CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1); CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2); - string = FlattenGetString(string); + string = String::Flatten(string); int length = string->length(); int left = 0; @@ -6724,12 +6665,13 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) { +RUNTIME_FUNCTION(Runtime_StringSplit) { HandleScope handle_scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1); CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]); + RUNTIME_ASSERT(limit > 0); int subject_length = subject->length(); int pattern_length = pattern->length(); @@ -6755,7 +6697,8 @@ // isn't empty, we can never create more parts than ~half the length // of the subject. - if (!subject->IsFlat()) FlattenString(subject); + subject = String::Flatten(subject); + pattern = String::Flatten(pattern); static const int kMaxInitialListCapacity = 16; @@ -6764,7 +6707,6 @@ // Find (up to limit) indices of separator and end-of-string in subject int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit); ZoneList<int> indices(initial_capacity, zone_scope.zone()); - if (!pattern->IsFlat()) FlattenString(pattern); FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit, zone_scope.zone()); @@ -6782,7 +6724,7 @@ JSObject::EnsureCanContainHeapObjectElements(result); result->set_length(Smi::FromInt(part_count)); - ASSERT(result->HasFastObjectElements()); + DCHECK(result->HasFastObjectElements()); if (part_count == 1 && indices.at(0) == subject_length) { FixedArray::cast(result->elements())->set(0, *subject); @@ -6802,10 +6744,10 @@ if (limit == 0xffffffffu) { if (result->HasFastObjectElements()) { - RegExpResultsCache::Enter(isolate->heap(), - *subject, - *pattern, - *elements, + RegExpResultsCache::Enter(isolate, + subject, + pattern, + elements, RegExpResultsCache::STRING_SPLIT_SUBSTRINGS); } } @@ -6833,13 +6775,13 @@ elements->set(i, value, mode); } if (i < length) { - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); memset(elements->data_start() + i, 0, kPointerSize * (length - i)); } #ifdef DEBUG for (int j = 0; j < length; ++j) { Object* element = elements->get(j); - ASSERT(element == Smi::FromInt(0) || + DCHECK(element == Smi::FromInt(0) || (element->IsString() && String::cast(element)->LooksValid())); } #endif @@ -6849,25 +6791,21 @@ // Converts a String to JSArray. // For example, "foo" => ["f", "o", "o"]. -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) { +RUNTIME_FUNCTION(Runtime_StringToArray) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(String, s, 0); CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]); - s = FlattenGetString(s); + s = String::Flatten(s); const int length = static_cast<int>(Min<uint32_t>(s->length(), limit)); Handle<FixedArray> elements; int position = 0; if (s->IsFlat() && s->IsOneByteRepresentation()) { // Try using cached chars where possible. - Object* obj; - { MaybeObject* maybe_obj = - isolate->heap()->AllocateUninitializedFixedArray(length); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - elements = Handle<FixedArray>(FixedArray::cast(obj), isolate); + elements = isolate->factory()->NewUninitializedFixedArray(length); + DisallowHeapAllocation no_gc; String::FlatContent content = s->GetFlatContent(); if (content.IsAscii()) { @@ -6888,13 +6826,13 @@ } for (int i = position; i < length; ++i) { Handle<Object> str = - LookupSingleCharacterStringFromCode(isolate, s->Get(i)); + isolate->factory()->LookupSingleCharacterStringFromCode(s->Get(i)); elements->set(i, *str); } #ifdef DEBUG for (int i = 0; i < length; ++i) { - ASSERT(String::cast(elements->get(i))->length() == 1); + DCHECK(String::cast(elements->get(i))->length() == 1); } #endif @@ -6902,11 +6840,11 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(String, value, 0); - return value->ToObject(isolate); +RUNTIME_FUNCTION(Runtime_NewStringWrapper) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(String, value, 0); + return *Object::ToObject(isolate, value).ToHandleChecked(); } @@ -6917,91 +6855,70 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NumberToString) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - - Object* number = args[0]; - RUNTIME_ASSERT(number->IsNumber()); +RUNTIME_FUNCTION(Runtime_NumberToStringRT) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0); - return isolate->heap()->NumberToString(number); + return *isolate->factory()->NumberToString(number); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NumberToStringSkipCache) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - - Object* number = args[0]; - RUNTIME_ASSERT(number->IsNumber()); +RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0); - return isolate->heap()->NumberToString(number, false); + return *isolate->factory()->NumberToString(number, false); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_NumberToInteger) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(number, 0); - - // We do not include 0 so that we don't have to treat +0 / -0 cases. - if (number > 0 && number <= Smi::kMaxValue) { - return Smi::FromInt(static_cast<int>(number)); - } - return isolate->heap()->NumberFromDouble(DoubleToInteger(number)); + return *isolate->factory()->NewNumber(DoubleToInteger(number)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(number, 0); - - // We do not include 0 so that we don't have to treat +0 / -0 cases. - if (number > 0 && number <= Smi::kMaxValue) { - return Smi::FromInt(static_cast<int>(number)); - } - double double_value = DoubleToInteger(number); // Map both -0 and +0 to +0. if (double_value == 0) double_value = 0; - return isolate->heap()->NumberFromDouble(double_value); + return *isolate->factory()->NewNumber(double_value); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_NumberToJSUint32) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]); - return isolate->heap()->NumberFromUint32(number); + return *isolate->factory()->NewNumberFromUint(number); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_NumberToJSInt32) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(number, 0); - - // We do not include 0 so that we don't have to treat +0 / -0 cases. - if (number > 0 && number <= Smi::kMaxValue) { - return Smi::FromInt(static_cast<int>(number)); - } - return isolate->heap()->NumberFromInt32(DoubleToInt32(number)); + return *isolate->factory()->NewNumberFromInt(DoubleToInt32(number)); } // Converts a Number to a Smi, if possible. Returns NaN if the number is not // a small integer. -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NumberToSmi) { +RUNTIME_FUNCTION(Runtime_NumberToSmi) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - - Object* obj = args[0]; + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); if (obj->IsSmi()) { return obj; } @@ -7016,101 +6933,94 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_AllocateHeapNumber) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 0); - return isolate->heap()->AllocateHeapNumber(0); +RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) { + HandleScope scope(isolate); + DCHECK(args.length() == 0); + return *isolate->factory()->NewHeapNumber(0); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberAdd) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(y, 1); - return isolate->heap()->NumberFromDouble(x + y); + return *isolate->factory()->NewNumber(x + y); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberSub) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(y, 1); - return isolate->heap()->NumberFromDouble(x - y); + return *isolate->factory()->NewNumber(x - y); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberMul) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(y, 1); - return isolate->heap()->NumberFromDouble(x * y); + return *isolate->factory()->NewNumber(x * y); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_NumberUnaryMinus) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(x, 0); - return isolate->heap()->NumberFromDouble(-x); -} - - -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 0); - - return isolate->heap()->NumberFromDouble(9876543210.0); + return *isolate->factory()->NewNumber(-x); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberDiv) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(y, 1); - return isolate->heap()->NumberFromDouble(x / y); + return *isolate->factory()->NewNumber(x / y); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberMod) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(y, 1); - - x = modulo(x, y); - // NumberFromDouble may return a Smi instead of a Number object - return isolate->heap()->NumberFromDouble(x); + return *isolate->factory()->NewNumber(modulo(x, y)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberImul) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberImul) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); - CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); - CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]); - return isolate->heap()->NumberFromInt32(x * y); + // We rely on implementation-defined behavior below, but at least not on + // undefined behavior. + CONVERT_NUMBER_CHECKED(uint32_t, x, Int32, args[0]); + CONVERT_NUMBER_CHECKED(uint32_t, y, Int32, args[1]); + int32_t product = static_cast<int32_t>(x * y); + return *isolate->factory()->NewNumberFromInt(product); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StringAdd) { +RUNTIME_FUNCTION(Runtime_StringAdd) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(String, str1, 0); CONVERT_ARG_HANDLE_CHECKED(String, str2, 1); isolate->counters()->string_add_runtime()->Increment(); - Handle<String> result = isolate->factory()->NewConsString(str1, str2); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<String> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, isolate->factory()->NewConsString(str1, str2)); return *result; } @@ -7120,6 +7030,7 @@ sinkchar* sink, FixedArray* fixed_array, int array_length) { + DisallowHeapAllocation no_gc; int position = 0; for (int i = 0; i < array_length; i++) { Object* element = fixed_array->get(i); @@ -7135,7 +7046,7 @@ } else { // Position and length encoded in two smis. Object* obj = fixed_array->get(++i); - ASSERT(obj->IsSmi()); + DCHECK(obj->IsSmi()); pos = Smi::cast(obj)->value(); len = -encoded_slice; } @@ -7154,36 +7065,13 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { - HandleScope scope(isolate); - ASSERT(args.length() == 3); - CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0); - if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength(); - int array_length = args.smi_at(1); - CONVERT_ARG_HANDLE_CHECKED(String, special, 2); - - // This assumption is used by the slice encoding in one or two smis. - ASSERT(Smi::kMaxValue >= String::kMaxLength); - - JSObject::EnsureCanContainHeapObjectElements(array); - - int special_length = special->length(); - if (!array->HasFastObjectElements()) { - return isolate->Throw(isolate->heap()->illegal_argument_string()); - } - FixedArray* fixed_array = FixedArray::cast(array->elements()); - if (fixed_array->length() < array_length) { - array_length = fixed_array->length(); - } - - if (array_length == 0) { - return isolate->heap()->empty_string(); - } else if (array_length == 1) { - Object* first = fixed_array->get(0); - if (first->IsString()) return first; - } - - bool one_byte = special->HasOnlyOneByteChars(); +// Returns the result length of the concatenation. +// On illegal argument, -1 is returned. +static inline int StringBuilderConcatLength(int special_length, + FixedArray* fixed_array, + int array_length, + bool* one_byte) { + DisallowHeapAllocation no_gc; int position = 0; for (int i = 0; i < array_length; i++) { int increment = 0; @@ -7202,78 +7090,116 @@ len = -smi_value; // Get the position and check that it is a positive smi. i++; - if (i >= array_length) { - return isolate->Throw(isolate->heap()->illegal_argument_string()); - } + if (i >= array_length) return -1; Object* next_smi = fixed_array->get(i); - if (!next_smi->IsSmi()) { - return isolate->Throw(isolate->heap()->illegal_argument_string()); - } + if (!next_smi->IsSmi()) return -1; pos = Smi::cast(next_smi)->value(); - if (pos < 0) { - return isolate->Throw(isolate->heap()->illegal_argument_string()); - } - } - ASSERT(pos >= 0); - ASSERT(len >= 0); - if (pos > special_length || len > special_length - pos) { - return isolate->Throw(isolate->heap()->illegal_argument_string()); + if (pos < 0) return -1; } + DCHECK(pos >= 0); + DCHECK(len >= 0); + if (pos > special_length || len > special_length - pos) return -1; increment = len; } else if (elt->IsString()) { String* element = String::cast(elt); int element_length = element->length(); increment = element_length; - if (one_byte && !element->HasOnlyOneByteChars()) { - one_byte = false; + if (*one_byte && !element->HasOnlyOneByteChars()) { + *one_byte = false; } } else { - ASSERT(!elt->IsTheHole()); - return isolate->Throw(isolate->heap()->illegal_argument_string()); + return -1; } if (increment > String::kMaxLength - position) { - return isolate->ThrowInvalidStringLength(); + return kMaxInt; // Provoke throw on allocation. } position += increment; } + return position; +} - int length = position; - Object* object; - if (one_byte) { - { MaybeObject* maybe_object = - isolate->heap()->AllocateRawOneByteString(length); - if (!maybe_object->ToObject(&object)) return maybe_object; +RUNTIME_FUNCTION(Runtime_StringBuilderConcat) { + HandleScope scope(isolate); + DCHECK(args.length() == 3); + CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0); + if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength(); + CONVERT_SMI_ARG_CHECKED(array_length, 1); + CONVERT_ARG_HANDLE_CHECKED(String, special, 2); + + size_t actual_array_length = 0; + RUNTIME_ASSERT( + TryNumberToSize(isolate, array->length(), &actual_array_length)); + RUNTIME_ASSERT(array_length >= 0); + RUNTIME_ASSERT(static_cast<size_t>(array_length) <= actual_array_length); + + // This assumption is used by the slice encoding in one or two smis. + DCHECK(Smi::kMaxValue >= String::kMaxLength); + + RUNTIME_ASSERT(array->HasFastElements()); + JSObject::EnsureCanContainHeapObjectElements(array); + + int special_length = special->length(); + if (!array->HasFastObjectElements()) { + return isolate->Throw(isolate->heap()->illegal_argument_string()); + } + + int length; + bool one_byte = special->HasOnlyOneByteChars(); + + { DisallowHeapAllocation no_gc; + FixedArray* fixed_array = FixedArray::cast(array->elements()); + if (fixed_array->length() < array_length) { + array_length = fixed_array->length(); + } + + if (array_length == 0) { + return isolate->heap()->empty_string(); + } else if (array_length == 1) { + Object* first = fixed_array->get(0); + if (first->IsString()) return first; } - SeqOneByteString* answer = SeqOneByteString::cast(object); + length = StringBuilderConcatLength( + special_length, fixed_array, array_length, &one_byte); + } + + if (length == -1) { + return isolate->Throw(isolate->heap()->illegal_argument_string()); + } + + if (one_byte) { + Handle<SeqOneByteString> answer; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, answer, + isolate->factory()->NewRawOneByteString(length)); StringBuilderConcatHelper(*special, answer->GetChars(), - fixed_array, + FixedArray::cast(array->elements()), array_length); - return answer; + return *answer; } else { - { MaybeObject* maybe_object = - isolate->heap()->AllocateRawTwoByteString(length); - if (!maybe_object->ToObject(&object)) return maybe_object; - } - SeqTwoByteString* answer = SeqTwoByteString::cast(object); + Handle<SeqTwoByteString> answer; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, answer, + isolate->factory()->NewRawTwoByteString(length)); StringBuilderConcatHelper(*special, answer->GetChars(), - fixed_array, + FixedArray::cast(array->elements()), array_length); - return answer; + return *answer; } } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) { +RUNTIME_FUNCTION(Runtime_StringBuilderJoin) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0); if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength(); - int array_length = args.smi_at(1); + CONVERT_SMI_ARG_CHECKED(array_length, 1); CONVERT_ARG_HANDLE_CHECKED(String, separator, 2); RUNTIME_ASSERT(array->HasFastObjectElements()); + RUNTIME_ASSERT(array_length >= 0); Handle<FixedArray> fixed_array(FixedArray::cast(array->elements())); if (fixed_array->length() < array_length) { @@ -7289,6 +7215,7 @@ } int separator_length = separator->length(); + RUNTIME_ASSERT(separator_length > 0); int max_nof_separators = (String::kMaxLength + separator_length - 1) / separator_length; if (max_nof_separators < (array_length - 1)) { @@ -7308,9 +7235,10 @@ length += increment; } - Handle<SeqTwoByteString> answer = - isolate->factory()->NewRawTwoByteString(length); - RETURN_IF_EMPTY_HANDLE(isolate, answer); + Handle<SeqTwoByteString> answer; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, answer, + isolate->factory()->NewRawTwoByteString(length)); DisallowHeapAllocation no_gc; @@ -7319,27 +7247,29 @@ uc16* end = sink + length; #endif + RUNTIME_ASSERT(fixed_array->get(0)->IsString()); String* first = String::cast(fixed_array->get(0)); - String* seperator_raw = *separator; + String* separator_raw = *separator; int first_length = first->length(); String::WriteToFlat(first, sink, 0, first_length); sink += first_length; for (int i = 1; i < array_length; i++) { - ASSERT(sink + separator_length <= end); - String::WriteToFlat(seperator_raw, sink, 0, separator_length); + DCHECK(sink + separator_length <= end); + String::WriteToFlat(separator_raw, sink, 0, separator_length); sink += separator_length; + RUNTIME_ASSERT(fixed_array->get(i)->IsString()); String* element = String::cast(fixed_array->get(i)); int element_length = element->length(); - ASSERT(sink + element_length <= end); + DCHECK(sink + element_length <= end); String::WriteToFlat(element, sink, 0, element_length); sink += element_length; } - ASSERT(sink == end); + DCHECK(sink == end); // Use %_FastAsciiArrayJoin instead. - ASSERT(!answer->IsOneByteRepresentation()); + DCHECK(!answer->IsOneByteRepresentation()); return *answer; } @@ -7349,6 +7279,7 @@ uint32_t array_length, String* separator, Vector<Char> buffer) { + DisallowHeapAllocation no_gc; int previous_separator_position = 0; int separator_length = separator->length(); int cursor = 0; @@ -7371,7 +7302,7 @@ if (separator_length > 0) { // Array length must be representable as a signed 32-bit number, // otherwise the total string length would have been too large. - ASSERT(array_length <= 0x7fffffff); // Is int32_t. + DCHECK(array_length <= 0x7fffffff); // Is int32_t. int last_array_index = static_cast<int>(array_length - 1); while (previous_separator_position < last_array_index) { String::WriteToFlat<Char>(separator, &buffer[cursor], @@ -7380,45 +7311,54 @@ previous_separator_position++; } } - ASSERT(cursor <= buffer.length()); + DCHECK(cursor <= buffer.length()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) { +RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) { HandleScope scope(isolate); - ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(JSArray, elements_array, 0); - RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements()); + DCHECK(args.length() == 3); + CONVERT_ARG_HANDLE_CHECKED(JSArray, elements_array, 0); CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]); - CONVERT_ARG_CHECKED(String, separator, 2); + CONVERT_ARG_HANDLE_CHECKED(String, separator, 2); // elements_array is fast-mode JSarray of alternating positions // (increasing order) and strings. + RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements()); // array_length is length of original array (used to add separators); // separator is string to put between elements. Assumed to be non-empty. + RUNTIME_ASSERT(array_length > 0); // Find total length of join result. int string_length = 0; bool is_ascii = separator->IsOneByteRepresentation(); bool overflow = false; - CONVERT_NUMBER_CHECKED(int, elements_length, - Int32, elements_array->length()); + CONVERT_NUMBER_CHECKED(int, elements_length, Int32, elements_array->length()); + RUNTIME_ASSERT(elements_length <= elements_array->elements()->length()); RUNTIME_ASSERT((elements_length & 1) == 0); // Even length. FixedArray* elements = FixedArray::cast(elements_array->elements()); for (int i = 0; i < elements_length; i += 2) { RUNTIME_ASSERT(elements->get(i)->IsNumber()); + CONVERT_NUMBER_CHECKED(uint32_t, position, Uint32, elements->get(i)); + RUNTIME_ASSERT(position < array_length); RUNTIME_ASSERT(elements->get(i + 1)->IsString()); - String* string = String::cast(elements->get(i + 1)); - int length = string->length(); - if (is_ascii && !string->IsOneByteRepresentation()) { - is_ascii = false; - } - if (length > String::kMaxLength || - String::kMaxLength - length < string_length) { - overflow = true; - break; + } + + { DisallowHeapAllocation no_gc; + for (int i = 0; i < elements_length; i += 2) { + String* string = String::cast(elements->get(i + 1)); + int length = string->length(); + if (is_ascii && !string->IsOneByteRepresentation()) { + is_ascii = false; + } + if (length > String::kMaxLength || + String::kMaxLength - length < string_length) { + overflow = true; + break; + } + string_length += length; } - string_length += length; } + int separator_length = separator->length(); if (!overflow && separator_length > 0) { if (array_length <= 0x7fffffffu) { @@ -7445,99 +7385,93 @@ } if (is_ascii) { - MaybeObject* result_allocation = - isolate->heap()->AllocateRawOneByteString(string_length); - if (result_allocation->IsFailure()) return result_allocation; - SeqOneByteString* result_string = - SeqOneByteString::cast(result_allocation->ToObjectUnchecked()); - JoinSparseArrayWithSeparator<uint8_t>(elements, - elements_length, - array_length, - separator, - Vector<uint8_t>( - result_string->GetChars(), - string_length)); - return result_string; - } else { - MaybeObject* result_allocation = - isolate->heap()->AllocateRawTwoByteString(string_length); - if (result_allocation->IsFailure()) return result_allocation; - SeqTwoByteString* result_string = - SeqTwoByteString::cast(result_allocation->ToObjectUnchecked()); - JoinSparseArrayWithSeparator<uc16>(elements, - elements_length, - array_length, - separator, - Vector<uc16>(result_string->GetChars(), - string_length)); - return result_string; + Handle<SeqOneByteString> result = isolate->factory()->NewRawOneByteString( + string_length).ToHandleChecked(); + JoinSparseArrayWithSeparator<uint8_t>( + FixedArray::cast(elements_array->elements()), + elements_length, + array_length, + *separator, + Vector<uint8_t>(result->GetChars(), string_length)); + return *result; + } else { + Handle<SeqTwoByteString> result = isolate->factory()->NewRawTwoByteString( + string_length).ToHandleChecked(); + JoinSparseArrayWithSeparator<uc16>( + FixedArray::cast(elements_array->elements()), + elements_length, + array_length, + *separator, + Vector<uc16>(result->GetChars(), string_length)); + return *result; } } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberOr) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]); - return isolate->heap()->NumberFromInt32(x | y); + return *isolate->factory()->NewNumberFromInt(x | y); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberAnd) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]); - return isolate->heap()->NumberFromInt32(x & y); + return *isolate->factory()->NewNumberFromInt(x & y); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberXor) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]); - return isolate->heap()->NumberFromInt32(x ^ y); + return *isolate->factory()->NewNumberFromInt(x ^ y); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberShl) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]); - return isolate->heap()->NumberFromInt32(x << (y & 0x1f)); + return *isolate->factory()->NewNumberFromInt(x << (y & 0x1f)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberShr) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]); CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]); - return isolate->heap()->NumberFromUint32(x >> (y & 0x1f)); + return *isolate->factory()->NewNumberFromUint(x >> (y & 0x1f)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_NumberSar) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]); - return isolate->heap()->NumberFromInt32(ArithmeticShiftRight(x, y & 0x1f)); + return *isolate->factory()->NewNumberFromInt( + ArithmeticShiftRight(x, y & 0x1f)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) { +RUNTIME_FUNCTION(Runtime_NumberEquals) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(y, 1); @@ -7554,31 +7488,32 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_StringEquals) { + HandleScope handle_scope(isolate); + DCHECK(args.length() == 2); - CONVERT_ARG_CHECKED(String, x, 0); - CONVERT_ARG_CHECKED(String, y, 1); + CONVERT_ARG_HANDLE_CHECKED(String, x, 0); + CONVERT_ARG_HANDLE_CHECKED(String, y, 1); - bool not_equal = !x->Equals(y); + bool not_equal = !String::Equals(x, y); // This is slightly convoluted because the value that signifies // equality is 0 and inequality is 1 so we have to negate the result // from String::Equals. - ASSERT(not_equal == 0 || not_equal == 1); - STATIC_CHECK(EQUAL == 0); - STATIC_CHECK(NOT_EQUAL == 1); + DCHECK(not_equal == 0 || not_equal == 1); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(NOT_EQUAL == 1); return Smi::FromInt(not_equal); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) { +RUNTIME_FUNCTION(Runtime_NumberCompare) { SealHandleScope shs(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(y, 1); - if (std::isnan(x) || std::isnan(y)) return args[2]; + CONVERT_ARG_HANDLE_CHECKED(Object, uncomparable_result, 2) + if (std::isnan(x) || std::isnan(y)) return *uncomparable_result; if (x == y) return Smi::FromInt(EQUAL); if (isless(x, y)) return Smi::FromInt(LESS); return Smi::FromInt(GREATER); @@ -7587,9 +7522,9 @@ // Compare two Smis as if they were converted to strings and then // compared lexicographically. -RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) { +RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_SMI_ARG_CHECKED(x_value, 0); CONVERT_SMI_ARG_CHECKED(y_value, 1); @@ -7662,27 +7597,33 @@ } -static Object* StringCharacterStreamCompare(RuntimeState* state, - String* x, - String* y) { - StringCharacterStream stream_x(x, state->string_iterator_compare_x()); - StringCharacterStream stream_y(y, state->string_iterator_compare_y()); - while (stream_x.HasMore() && stream_y.HasMore()) { - int d = stream_x.GetNext() - stream_y.GetNext(); - if (d < 0) return Smi::FromInt(LESS); - else if (d > 0) return Smi::FromInt(GREATER); - } - - // x is (non-trivial) prefix of y: - if (stream_y.HasMore()) return Smi::FromInt(LESS); - // y is prefix of x: - return Smi::FromInt(stream_x.HasMore() ? GREATER : EQUAL); -} +RUNTIME_FUNCTION(Runtime_StringCompare) { + HandleScope handle_scope(isolate); + DCHECK(args.length() == 2); + + CONVERT_ARG_HANDLE_CHECKED(String, x, 0); + CONVERT_ARG_HANDLE_CHECKED(String, y, 1); + + isolate->counters()->string_compare_runtime()->Increment(); + + // A few fast case tests before we flatten. + if (x.is_identical_to(y)) return Smi::FromInt(EQUAL); + if (y->length() == 0) { + if (x->length() == 0) return Smi::FromInt(EQUAL); + return Smi::FromInt(GREATER); + } else if (x->length() == 0) { + return Smi::FromInt(LESS); + } + int d = x->Get(0) - y->Get(0); + if (d < 0) return Smi::FromInt(LESS); + else if (d > 0) return Smi::FromInt(GREATER); + + // Slow case. + x = String::Flatten(x); + y = String::Flatten(y); -static Object* FlatStringCompare(String* x, String* y) { - ASSERT(x->IsFlat()); - ASSERT(y->IsFlat()); + DisallowHeapAllocation no_gc; Object* equal_prefix_result = Smi::FromInt(EQUAL); int prefix_length = x->length(); if (y->length() < prefix_length) { @@ -7692,7 +7633,6 @@ equal_prefix_result = Smi::FromInt(LESS); } int r; - DisallowHeapAllocation no_gc; String::FlatContent x_content = x->GetFlatContent(); String::FlatContent y_content = y->GetFlatContent(); if (x_content.IsAscii()) { @@ -7720,98 +7660,78 @@ } else { result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER); } - ASSERT(result == - StringCharacterStreamCompare(x->GetIsolate()->runtime_state(), x, y)); return result; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StringCompare) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); - - CONVERT_ARG_CHECKED(String, x, 0); - CONVERT_ARG_CHECKED(String, y, 1); - - isolate->counters()->string_compare_runtime()->Increment(); - - // A few fast case tests before we flatten. - if (x == y) return Smi::FromInt(EQUAL); - if (y->length() == 0) { - if (x->length() == 0) return Smi::FromInt(EQUAL); - return Smi::FromInt(GREATER); - } else if (x->length() == 0) { - return Smi::FromInt(LESS); - } - - int d = x->Get(0) - y->Get(0); - if (d < 0) return Smi::FromInt(LESS); - else if (d > 0) return Smi::FromInt(GREATER); - - Object* obj; - { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(x); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(y); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - - return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y) - : StringCharacterStreamCompare(isolate->runtime_state(), x, y); -} - - -#define RUNTIME_UNARY_MATH(NAME) \ -RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_##NAME) { \ - SealHandleScope shs(isolate); \ - ASSERT(args.length() == 1); \ - isolate->counters()->math_##NAME()->Increment(); \ +#define RUNTIME_UNARY_MATH(Name, name) \ +RUNTIME_FUNCTION(Runtime_Math##Name) { \ + HandleScope scope(isolate); \ + DCHECK(args.length() == 1); \ + isolate->counters()->math_##name()->Increment(); \ CONVERT_DOUBLE_ARG_CHECKED(x, 0); \ - return isolate->heap()->AllocateHeapNumber(std::NAME(x)); \ + return *isolate->factory()->NewHeapNumber(std::name(x)); \ } -RUNTIME_UNARY_MATH(acos) -RUNTIME_UNARY_MATH(asin) -RUNTIME_UNARY_MATH(atan) -RUNTIME_UNARY_MATH(log) +RUNTIME_UNARY_MATH(Acos, acos) +RUNTIME_UNARY_MATH(Asin, asin) +RUNTIME_UNARY_MATH(Atan, atan) +RUNTIME_UNARY_MATH(LogRT, log) #undef RUNTIME_UNARY_MATH -RUNTIME_FUNCTION(MaybeObject*, Runtime_DoubleHi) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_DoubleHi) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(x, 0); uint64_t integer = double_to_uint64(x); integer = (integer >> 32) & 0xFFFFFFFFu; - return isolate->heap()->NumberFromDouble(static_cast<int32_t>(integer)); + return *isolate->factory()->NewNumber(static_cast<int32_t>(integer)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DoubleLo) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_DoubleLo) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(x, 0); - return isolate->heap()->NumberFromDouble( + return *isolate->factory()->NewNumber( static_cast<int32_t>(double_to_uint64(x) & 0xFFFFFFFFu)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ConstructDouble) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_ConstructDouble) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]); CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]); uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo; - return isolate->heap()->AllocateHeapNumber(uint64_to_double(result)); + return *isolate->factory()->NewNumber(uint64_to_double(result)); +} + + +RUNTIME_FUNCTION(Runtime_RemPiO2) { + HandleScope handle_scope(isolate); + DCHECK(args.length() == 1); + CONVERT_DOUBLE_ARG_CHECKED(x, 0); + Factory* factory = isolate->factory(); + double y[2]; + int n = fdlibm::rempio2(x, y); + Handle<FixedArray> array = factory->NewFixedArray(3); + Handle<HeapNumber> y0 = factory->NewHeapNumber(y[0]); + Handle<HeapNumber> y1 = factory->NewHeapNumber(y[1]); + array->set(0, Smi::FromInt(n)); + array->set(1, *y0); + array->set(2, *y1); + return *factory->NewJSArrayWithElements(array); } static const double kPiDividedBy4 = 0.78539816339744830962; -RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_MathAtan2) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); isolate->counters()->math_atan2()->Increment(); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -7828,36 +7748,36 @@ } else { result = std::atan2(x, y); } - return isolate->heap()->AllocateHeapNumber(result); + return *isolate->factory()->NewNumber(result); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_MathExpRT) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); isolate->counters()->math_exp()->Increment(); CONVERT_DOUBLE_ARG_CHECKED(x, 0); lazily_initialize_fast_exp(); - return isolate->heap()->NumberFromDouble(fast_exp(x)); + return *isolate->factory()->NewNumber(fast_exp(x)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_MathFloorRT) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); isolate->counters()->math_floor()->Increment(); CONVERT_DOUBLE_ARG_CHECKED(x, 0); - return isolate->heap()->NumberFromDouble(std::floor(x)); + return *isolate->factory()->NewNumber(Floor(x)); } // Slow version of Math.pow. We check for fast paths for special cases. -// Used if SSE2/VFP3 is not available. -RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +// Used if VFP3 is not available. +RUNTIME_FUNCTION(Runtime_MathPowSlow) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); isolate->counters()->math_pow()->Increment(); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -7866,21 +7786,21 @@ // custom powi() function than the generic pow(). if (args[1]->IsSmi()) { int y = args.smi_at(1); - return isolate->heap()->NumberFromDouble(power_double_int(x, y)); + return *isolate->factory()->NewNumber(power_double_int(x, y)); } CONVERT_DOUBLE_ARG_CHECKED(y, 1); double result = power_helper(x, y); if (std::isnan(result)) return isolate->heap()->nan_value(); - return isolate->heap()->AllocateHeapNumber(result); + return *isolate->factory()->NewNumber(result); } // Fast version of Math.pow if we know that y is not an integer and y is not // -0.5 or 0.5. Used as slow case from full codegen. -RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_MathPowRT) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); isolate->counters()->math_pow()->Increment(); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -7890,23 +7810,23 @@ } else { double result = power_double_double(x, y); if (std::isnan(result)) return isolate->heap()->nan_value(); - return isolate->heap()->AllocateHeapNumber(result); + return *isolate->factory()->NewNumber(result); } } -RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_RoundNumber) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(input, 0); isolate->counters()->math_round()->Increment(); - if (!args[0]->IsHeapNumber()) { - // Must be smi. Return the argument unchanged for all the other types - // to make fuzz-natives test happy. - return args[0]; + if (!input->IsHeapNumber()) { + DCHECK(input->IsSmi()); + return *input; } - HeapNumber* number = reinterpret_cast<HeapNumber*>(args[0]); + Handle<HeapNumber> number = Handle<HeapNumber>::cast(input); double value = number->value(); int exponent = number->get_exponent(); @@ -7928,50 +7848,52 @@ // If the magnitude is big enough, there's no place for fraction part. If we // try to add 0.5 to this number, 1.0 will be added instead. if (exponent >= 52) { - return number; + return *number; } if (sign && value >= -0.5) return isolate->heap()->minus_zero_value(); // Do not call NumberFromDouble() to avoid extra checks. - return isolate->heap()->AllocateHeapNumber(std::floor(value + 0.5)); + return *isolate->factory()->NewNumber(Floor(value + 0.5)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_MathSqrtRT) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); isolate->counters()->math_sqrt()->Increment(); CONVERT_DOUBLE_ARG_CHECKED(x, 0); - return isolate->heap()->AllocateHeapNumber(fast_sqrt(x)); + return *isolate->factory()->NewNumber(fast_sqrt(x)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_fround) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_MathFround) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(x, 0); float xf = static_cast<float>(x); - return isolate->heap()->AllocateHeapNumber(xf); + return *isolate->factory()->NewNumber(xf); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) { +RUNTIME_FUNCTION(Runtime_DateMakeDay) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_SMI_ARG_CHECKED(year, 0); CONVERT_SMI_ARG_CHECKED(month, 1); - return Smi::FromInt(isolate->date_cache()->DaysFromYearMonth(year, month)); + int days = isolate->date_cache()->DaysFromYearMonth(year, month); + RUNTIME_ASSERT(Smi::IsValid(days)); + return Smi::FromInt(days); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DateSetValue) { +RUNTIME_FUNCTION(Runtime_DateSetValue) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 0); CONVERT_DOUBLE_ARG_CHECKED(time, 1); @@ -7979,43 +7901,38 @@ DateCache* date_cache = isolate->date_cache(); - Object* value = NULL; + Handle<Object> value;; bool is_value_nan = false; if (std::isnan(time)) { - value = isolate->heap()->nan_value(); + value = isolate->factory()->nan_value(); is_value_nan = true; } else if (!is_utc && (time < -DateCache::kMaxTimeBeforeUTCInMs || time > DateCache::kMaxTimeBeforeUTCInMs)) { - value = isolate->heap()->nan_value(); + value = isolate->factory()->nan_value(); is_value_nan = true; } else { time = is_utc ? time : date_cache->ToUTC(static_cast<int64_t>(time)); if (time < -DateCache::kMaxTimeInMs || time > DateCache::kMaxTimeInMs) { - value = isolate->heap()->nan_value(); + value = isolate->factory()->nan_value(); is_value_nan = true; } else { - MaybeObject* maybe_result = - isolate->heap()->AllocateHeapNumber(DoubleToInteger(time)); - if (!maybe_result->ToObject(&value)) return maybe_result; + value = isolate->factory()->NewNumber(DoubleToInteger(time)); } } - date->SetValue(value, is_value_nan); - return value; + date->SetValue(*value, is_value_nan); + return *value; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewArgumentsFast) { - HandleScope scope(isolate); - ASSERT(args.length() == 3); - - Handle<JSFunction> callee = args.at<JSFunction>(0); - Object** parameters = reinterpret_cast<Object**>(args[1]); - const int argument_count = Smi::cast(args[2])->value(); - +static Handle<JSObject> NewSloppyArguments(Isolate* isolate, + Handle<JSFunction> callee, + Object** parameters, + int argument_count) { Handle<JSObject> result = isolate->factory()->NewArgumentsObject(callee, argument_count); + // Allocate the elements if needed. int parameter_count = callee->shared()->formal_parameter_count(); if (argument_count > 0) { @@ -8026,11 +7943,10 @@ parameter_map->set_map( isolate->heap()->sloppy_arguments_elements_map()); - Handle<Map> old_map(result->map()); - Handle<Map> new_map = isolate->factory()->CopyMap(old_map); - new_map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS); + Handle<Map> map = Map::Copy(handle(result->map())); + map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS); - result->set_map(*new_map); + result->set_map(*map); result->set_elements(*parameter_map); // Store the context and the arguments array at the beginning of the @@ -8078,7 +7994,7 @@ break; } } - ASSERT(context_index >= 0); + DCHECK(context_index >= 0); arguments->set_the_hole(index); parameter_map->set(index + 2, Smi::FromInt( Context::MIN_CONTEXT_SLOTS + context_index)); @@ -8097,60 +8013,84 @@ } } } - return *result; + return result; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewStrictArgumentsFast) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 3); - - JSFunction* callee = JSFunction::cast(args[0]); - Object** parameters = reinterpret_cast<Object**>(args[1]); - const int length = args.smi_at(2); - - Object* result; - { MaybeObject* maybe_result = - isolate->heap()->AllocateArgumentsObject(callee, length); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - // Allocate the elements if needed. - if (length > 0) { - // Allocate the fixed array. - FixedArray* array; - { MaybeObject* maybe_obj = - isolate->heap()->AllocateUninitializedFixedArray(length); - if (!maybe_obj->To(&array)) return maybe_obj; - } +static Handle<JSObject> NewStrictArguments(Isolate* isolate, + Handle<JSFunction> callee, + Object** parameters, + int argument_count) { + Handle<JSObject> result = + isolate->factory()->NewArgumentsObject(callee, argument_count); + if (argument_count > 0) { + Handle<FixedArray> array = + isolate->factory()->NewUninitializedFixedArray(argument_count); DisallowHeapAllocation no_gc; WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc); - for (int i = 0; i < length; i++) { + for (int i = 0; i < argument_count; i++) { array->set(i, *--parameters, mode); } - JSObject::cast(result)->set_elements(array); + result->set_elements(*array); } return result; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewClosureFromStubFailure) { +RUNTIME_FUNCTION(Runtime_NewArguments) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0); + JavaScriptFrameIterator it(isolate); + + // Find the frame that holds the actual arguments passed to the function. + it.AdvanceToArgumentsFrame(); + JavaScriptFrame* frame = it.frame(); + + // Determine parameter location on the stack and dispatch on language mode. + int argument_count = frame->GetArgumentsLength(); + Object** parameters = reinterpret_cast<Object**>(frame->GetParameterSlot(-1)); + return callee->shared()->strict_mode() == STRICT + ? *NewStrictArguments(isolate, callee, parameters, argument_count) + : *NewSloppyArguments(isolate, callee, parameters, argument_count); +} + + +RUNTIME_FUNCTION(Runtime_NewSloppyArguments) { + HandleScope scope(isolate); + DCHECK(args.length() == 3); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0); + Object** parameters = reinterpret_cast<Object**>(args[1]); + CONVERT_SMI_ARG_CHECKED(argument_count, 2); + return *NewSloppyArguments(isolate, callee, parameters, argument_count); +} + + +RUNTIME_FUNCTION(Runtime_NewStrictArguments) { + HandleScope scope(isolate); + DCHECK(args.length() == 3); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0) + Object** parameters = reinterpret_cast<Object**>(args[1]); + CONVERT_SMI_ARG_CHECKED(argument_count, 2); + return *NewStrictArguments(isolate, callee, parameters, argument_count); +} + + +RUNTIME_FUNCTION(Runtime_NewClosureFromStubFailure) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0); Handle<Context> context(isolate->context()); PretenureFlag pretenure_flag = NOT_TENURED; - Handle<JSFunction> result = - isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, - context, - pretenure_flag); - return *result; + return *isolate->factory()->NewFunctionFromSharedFunctionInfo( + shared, context, pretenure_flag); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewClosure) { +RUNTIME_FUNCTION(Runtime_NewClosure) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(Context, context, 0); CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 1); CONVERT_BOOLEAN_ARG_CHECKED(pretenure, 2); @@ -8158,11 +8098,8 @@ // The caller ensures that we pretenure closures that are assigned // directly to properties. PretenureFlag pretenure_flag = pretenure ? TENURED : NOT_TENURED; - Handle<JSFunction> result = - isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, - context, - pretenure_flag); - return *result; + return *isolate->factory()->NewFunctionFromSharedFunctionInfo( + shared, context, pretenure_flag); } @@ -8216,12 +8153,13 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) { +RUNTIME_FUNCTION(Runtime_FunctionBindArguments) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(JSFunction, bound_function, 0); - RUNTIME_ASSERT(args[3]->IsNumber()); - Handle<Object> bindee = args.at<Object>(1); + CONVERT_ARG_HANDLE_CHECKED(Object, bindee, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, this_object, 2); + CONVERT_NUMBER_ARG_HANDLE_CHECKED(new_length, 3); // TODO(lrn): Create bound function in C++ code from premade shared info. bound_function->shared()->set_bound(true); @@ -8231,10 +8169,10 @@ GetCallerArguments(isolate, 0, &argc); // Don't count the this-arg. if (argc > 0) { - ASSERT(*arguments[0] == args[2]); + RUNTIME_ASSERT(arguments[0].is_identical_to(this_object)); argc--; } else { - ASSERT(args[2]->IsUndefined()); + RUNTIME_ASSERT(this_object->IsUndefined()); } // Initialize array of bindings (function, this, and any existing arguments // if the function was already bound). @@ -8243,6 +8181,7 @@ if (bindee->IsJSFunction() && JSFunction::cast(*bindee)->shared()->bound()) { Handle<FixedArray> old_bindings( JSFunction::cast(*bindee)->function_bindings()); + RUNTIME_ASSERT(old_bindings->length() > JSFunction::kBoundFunctionIndex); new_bindings = isolate->factory()->NewFixedArray(old_bindings->length() + argc); bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex), @@ -8255,7 +8194,7 @@ int array_size = JSFunction::kBoundArgumentsStartIndex + argc; new_bindings = isolate->factory()->NewFixedArray(array_size); new_bindings->set(JSFunction::kBoundFunctionIndex, *bindee); - new_bindings->set(JSFunction::kBoundThisIndex, args[2]); + new_bindings->set(JSFunction::kBoundThisIndex, *this_object); i = 2; } // Copy arguments, skipping the first which is "this_arg". @@ -8266,25 +8205,32 @@ isolate->heap()->fixed_cow_array_map()); bound_function->set_function_bindings(*new_bindings); - // Update length. + // Update length. Have to remove the prototype first so that map migration + // is happy about the number of fields. + RUNTIME_ASSERT(bound_function->RemovePrototype()); + Handle<Map> bound_function_map( + isolate->native_context()->bound_function_map()); + JSObject::MigrateToMap(bound_function, bound_function_map); Handle<String> length_string = isolate->factory()->length_string(); - Handle<Object> new_length(args.at<Object>(3)); PropertyAttributes attr = static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY); - ForceSetProperty(bound_function, length_string, new_length, attr); + RETURN_FAILURE_ON_EXCEPTION( + isolate, + JSObject::SetOwnPropertyIgnoreAttributes( + bound_function, length_string, new_length, attr)); return *bound_function; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionGetBindings) { +RUNTIME_FUNCTION(Runtime_BoundFunctionGetBindings) { HandleScope handles(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callable, 0); if (callable->IsJSFunction()) { Handle<JSFunction> function = Handle<JSFunction>::cast(callable); if (function->shared()->bound()) { Handle<FixedArray> bindings(function->function_bindings()); - ASSERT(bindings->map() == isolate->heap()->fixed_cow_array_map()); + RUNTIME_ASSERT(bindings->map() == isolate->heap()->fixed_cow_array_map()); return *isolate->factory()->NewJSArrayWithElements(bindings); } } @@ -8292,9 +8238,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) { +RUNTIME_FUNCTION(Runtime_NewObjectFromBound) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); // First argument is a function to use as a constructor. CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); RUNTIME_ASSERT(function->shared()->bound()); @@ -8307,7 +8253,7 @@ Handle<Object> bound_function( JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)), isolate); - ASSERT(!bound_function->IsJSFunction() || + DCHECK(!bound_function->IsJSFunction() || !Handle<JSFunction>::cast(bound_function)->shared()->bound()); int total_argc = 0; @@ -8319,27 +8265,22 @@ } if (!bound_function->IsJSFunction()) { - bool exception_thrown; - bound_function = Execution::TryGetConstructorDelegate(isolate, - bound_function, - &exception_thrown); - if (exception_thrown) return Failure::Exception(); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, bound_function, + Execution::TryGetConstructorDelegate(isolate, bound_function)); } - ASSERT(bound_function->IsJSFunction()); + DCHECK(bound_function->IsJSFunction()); - bool exception = false; - Handle<Object> result = + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, Execution::New(Handle<JSFunction>::cast(bound_function), - total_argc, param_data.get(), &exception); - if (exception) { - return Failure::Exception(); - } - ASSERT(!result.is_null()); + total_argc, param_data.get())); return *result; } -static MaybeObject* Runtime_NewObjectHelper(Isolate* isolate, +static Object* Runtime_NewObjectHelper(Isolate* isolate, Handle<Object> constructor, Handle<AllocationSite> site) { // If the constructor isn't a proper function we throw a type error. @@ -8361,13 +8302,11 @@ return isolate->Throw(*type_error); } -#ifdef ENABLE_DEBUGGER_SUPPORT Debug* debug = isolate->debug(); // Handle stepping into constructors if step into is active. if (debug->StepInActive()) { debug->HandleStepIn(function, Handle<Object>::null(), 0, true); } -#endif if (function->has_initial_map()) { if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) { @@ -8382,7 +8321,7 @@ // instead of a new JSFunction object. This way, errors are // reported the same way whether or not 'Function' is called // using 'new'. - return isolate->context()->global_object(); + return isolate->global_proxy(); } } @@ -8390,22 +8329,12 @@ // available. Compiler::EnsureCompiled(function, CLEAR_EXCEPTION); - Handle<SharedFunctionInfo> shared(function->shared(), isolate); - if (!function->has_initial_map() && - shared->IsInobjectSlackTrackingInProgress()) { - // The tracking is already in progress for another function. We can only - // track one initial_map at a time, so we force the completion before the - // function is called as a constructor for the first time. - shared->CompleteInobjectSlackTracking(); - } - Handle<JSObject> result; if (site.is_null()) { result = isolate->factory()->NewJSObject(function); } else { result = isolate->factory()->NewJSObjectWithMemento(function, site); } - RETURN_IF_EMPTY_HANDLE(isolate, result); isolate->counters()->constructed_objects()->Increment(); isolate->counters()->constructed_objects_runtime()->Increment(); @@ -8414,50 +8343,45 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewObject) { +RUNTIME_FUNCTION(Runtime_NewObject) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - - Handle<Object> constructor = args.at<Object>(0); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 0); return Runtime_NewObjectHelper(isolate, constructor, Handle<AllocationSite>::null()); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewObjectWithAllocationSite) { +RUNTIME_FUNCTION(Runtime_NewObjectWithAllocationSite) { HandleScope scope(isolate); - ASSERT(args.length() == 2); - - Handle<Object> constructor = args.at<Object>(1); - Handle<Object> feedback = args.at<Object>(0); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, feedback, 0); Handle<AllocationSite> site; if (feedback->IsAllocationSite()) { // The feedback can be an AllocationSite or undefined. site = Handle<AllocationSite>::cast(feedback); } - return Runtime_NewObjectHelper(isolate, - constructor, - site); + return Runtime_NewObjectHelper(isolate, constructor, site); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_FinalizeInstanceSize) { +RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); - function->shared()->CompleteInobjectSlackTracking(); + function->CompleteInobjectSlackTracking(); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CompileUnoptimized) { +RUNTIME_FUNCTION(Runtime_CompileUnoptimized) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - - Handle<JSFunction> function = args.at<JSFunction>(0); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); #ifdef DEBUG if (FLAG_trace_lazy && !function->shared()->is_compiled()) { PrintF("[unoptimized: "); @@ -8467,25 +8391,26 @@ #endif // Compile the target function. - ASSERT(function->shared()->allows_lazy_compilation()); + DCHECK(function->shared()->allows_lazy_compilation()); - Handle<Code> code = Compiler::GetUnoptimizedCode(function); - RETURN_IF_EMPTY_HANDLE(isolate, code); + Handle<Code> code; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, code, + Compiler::GetUnoptimizedCode(function)); function->ReplaceCode(*code); // All done. Return the compiled code. - ASSERT(function->is_compiled()); - ASSERT(function->code()->kind() == Code::FUNCTION || + DCHECK(function->is_compiled()); + DCHECK(function->code()->kind() == Code::FUNCTION || (FLAG_always_opt && function->code()->kind() == Code::OPTIMIZED_FUNCTION)); return *code; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CompileOptimized) { +RUNTIME_FUNCTION(Runtime_CompileOptimized) { HandleScope scope(isolate); - ASSERT(args.length() == 2); - Handle<JSFunction> function = args.at<JSFunction>(0); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1); Handle<Code> unoptimized(function->shared()->code()); @@ -8511,11 +8436,16 @@ } else { Compiler::ConcurrencyMode mode = concurrent ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT; - Handle<Code> code = Compiler::GetOptimizedCode(function, unoptimized, mode); - function->ReplaceCode(code.is_null() ? *unoptimized : *code); + Handle<Code> code; + if (Compiler::GetOptimizedCode( + function, unoptimized, mode).ToHandle(&code)) { + function->ReplaceCode(*code); + } else { + function->ReplaceCode(*unoptimized); + } } - ASSERT(function->code()->kind() == Code::FUNCTION || + DCHECK(function->code()->kind() == Code::FUNCTION || function->code()->kind() == Code::OPTIMIZED_FUNCTION || function->IsInOptimizationQueue()); return function->code(); @@ -8545,30 +8475,30 @@ }; -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NotifyStubFailure) { +RUNTIME_FUNCTION(Runtime_NotifyStubFailure) { HandleScope scope(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate); - ASSERT(AllowHeapAllocation::IsAllowed()); + DCHECK(AllowHeapAllocation::IsAllowed()); delete deoptimizer; return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NotifyDeoptimized) { +RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - RUNTIME_ASSERT(args[0]->IsSmi()); + DCHECK(args.length() == 1); + CONVERT_SMI_ARG_CHECKED(type_arg, 0); Deoptimizer::BailoutType type = - static_cast<Deoptimizer::BailoutType>(args.smi_at(0)); + static_cast<Deoptimizer::BailoutType>(type_arg); Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate); - ASSERT(AllowHeapAllocation::IsAllowed()); + DCHECK(AllowHeapAllocation::IsAllowed()); Handle<JSFunction> function = deoptimizer->function(); Handle<Code> optimized_code = deoptimizer->compiled_code(); - ASSERT(optimized_code->kind() == Code::OPTIMIZED_FUNCTION); - ASSERT(type == deoptimizer->bailout_type()); + DCHECK(optimized_code->kind() == Code::OPTIMIZED_FUNCTION); + DCHECK(type == deoptimizer->bailout_type()); // Make sure to materialize objects before causing any allocation. JavaScriptFrameIterator it(isolate); @@ -8577,7 +8507,7 @@ JavaScriptFrame* frame = it.frame(); RUNTIME_ASSERT(frame->function()->IsJSFunction()); - ASSERT(frame->function() == *function); + DCHECK(frame->function() == *function); // Avoid doing too much work when running with --always-opt and keep // the optimized code around. @@ -8614,33 +8544,39 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) { +RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); if (!function->IsOptimized()) return isolate->heap()->undefined_value(); + // TODO(turbofan): Deoptimization is not supported yet. + if (function->code()->is_turbofanned() && !FLAG_turbo_deoptimization) { + return isolate->heap()->undefined_value(); + } + Deoptimizer::DeoptimizeFunction(*function); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) { +RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); + function->shared()->ClearTypeFeedbackInfo(); Code* unoptimized = function->shared()->code(); if (unoptimized->kind() == Code::FUNCTION) { unoptimized->ClearInlineCaches(); - unoptimized->ClearTypeFeedbackInfo(isolate->heap()); } return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) { +RUNTIME_FUNCTION(Runtime_RunningInSimulator) { SealHandleScope shs(isolate); + DCHECK(args.length() == 0); #if defined(USE_SIMULATOR) return isolate->heap()->true_value(); #else @@ -8649,14 +8585,15 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConcurrentRecompilationSupported) { - HandleScope scope(isolate); - return isolate->concurrent_recompilation_enabled() - ? isolate->heap()->true_value() : isolate->heap()->false_value(); +RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 0); + return isolate->heap()->ToBoolean( + isolate->concurrent_recompilation_enabled()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) { +RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) { HandleScope scope(isolate); RUNTIME_ASSERT(args.length() == 1 || args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); @@ -8673,14 +8610,11 @@ if (args.length() == 2 && unoptimized->kind() == Code::FUNCTION) { CONVERT_ARG_HANDLE_CHECKED(String, type, 1); - if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) { + if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr")) && FLAG_use_osr) { // Start patching from the currently patched loop nesting level. - int current_level = unoptimized->allow_osr_at_loop_nesting_level(); - ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level)); - for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) { - unoptimized->set_allow_osr_at_loop_nesting_level(i); - isolate->runtime_profiler()->AttemptOnStackReplacement(*function); - } + DCHECK(BackEdgeTable::Verify(isolate, unoptimized)); + isolate->runtime_profiler()->AttemptOnStackReplacement( + *function, Code::kMaxLoopNestingMarker); } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent")) && isolate->concurrent_recompilation_enabled()) { function->MarkForConcurrentOptimization(); @@ -8691,16 +8625,16 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NeverOptimizeFunction) { +RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, function, 0); function->shared()->set_optimization_disabled(true); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) { +RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) { HandleScope scope(isolate); RUNTIME_ASSERT(args.length() == 1 || args.length() == 2); if (!isolate->use_crankshaft()) { @@ -8718,7 +8652,7 @@ sync_with_compiler_thread) { while (function->IsInOptimizationQueue()) { isolate->optimizing_compiler_thread()->InstallOptimizedFunctions(); - OS::Sleep(50); + base::OS::Sleep(50); } } if (FLAG_always_opt) { @@ -8730,12 +8664,16 @@ if (FLAG_deopt_every_n_times) { return Smi::FromInt(6); // 6 == "maybe deopted". } + if (function->IsOptimized() && function->code()->is_turbofanned()) { + return Smi::FromInt(7); // 7 == "TurboFan compiler". + } return function->IsOptimized() ? Smi::FromInt(1) // 1 == "yes". : Smi::FromInt(2); // 2 == "no". } -RUNTIME_FUNCTION(MaybeObject*, Runtime_UnblockConcurrentRecompilation) { +RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) { + DCHECK(args.length() == 0); RUNTIME_ASSERT(FLAG_block_concurrent_recompilation); RUNTIME_ASSERT(isolate->concurrent_recompilation_enabled()); isolate->optimizing_compiler_thread()->Unblock(); @@ -8743,9 +8681,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) { +RUNTIME_FUNCTION(Runtime_GetOptimizationCount) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); return Smi::FromInt(function->shared()->opt_count()); } @@ -8770,14 +8708,16 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) { +RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); Handle<Code> caller_code(function->shared()->code()); // We're not prepared to handle a function with arguments object. - ASSERT(!function->shared()->uses_arguments()); + DCHECK(!function->shared()->uses_arguments()); + + RUNTIME_ASSERT(FLAG_use_osr); // Passing the PC in the javascript frame from the caller directly is // not GC safe, so we walk the stack to get it. @@ -8793,17 +8733,19 @@ frame->pc() - caller_code->instruction_start()); #ifdef DEBUG - ASSERT_EQ(frame->function(), *function); - ASSERT_EQ(frame->LookupCode(), *caller_code); - ASSERT(caller_code->contains(frame->pc())); + DCHECK_EQ(frame->function(), *function); + DCHECK_EQ(frame->LookupCode(), *caller_code); + DCHECK(caller_code->contains(frame->pc())); #endif // DEBUG BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset); - ASSERT(!ast_id.IsNone()); + DCHECK(!ast_id.IsNone()); - Compiler::ConcurrencyMode mode = isolate->concurrent_osr_enabled() - ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT; + Compiler::ConcurrencyMode mode = + isolate->concurrent_osr_enabled() && + (function->shared()->ast_node_count() > 512) ? Compiler::CONCURRENT + : Compiler::NOT_CONCURRENT; Handle<Code> result = Handle<Code>::null(); OptimizedCompileJob* job = NULL; @@ -8831,15 +8773,16 @@ PrintF(" at AST id %d]\n", ast_id.ToInt()); } result = Compiler::GetConcurrentlyOptimizedCode(job); - } else if (result.is_null() && - IsSuitableForOnStackReplacement(isolate, function, caller_code)) { + } else if (IsSuitableForOnStackReplacement(isolate, function, caller_code)) { if (FLAG_trace_osr) { PrintF("[OSR - Compiling: "); function->PrintName(); PrintF(" at AST id %d]\n", ast_id.ToInt()); } - result = Compiler::GetOptimizedCode(function, caller_code, mode, ast_id); - if (result.is_identical_to(isolate->builtins()->InOptimizationQueue())) { + MaybeHandle<Code> maybe_result = Compiler::GetOptimizedCode( + function, caller_code, mode, ast_id); + if (maybe_result.ToHandle(&result) && + result.is_identical_to(isolate->builtins()->InOptimizationQueue())) { // Optimization is queued. Return to check later. return NULL; } @@ -8854,7 +8797,7 @@ DeoptimizationInputData::cast(result->deoptimization_data()); if (data->OsrPcOffset()->value() >= 0) { - ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id); + DCHECK(BailoutId(data->OsrAstId()->value()) == ast_id); if (FLAG_trace_osr) { PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n", ast_id.ToInt(), data->OsrPcOffset()->value()); @@ -8883,9 +8826,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAllocationTimeout) { +RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2 || args.length() == 3); + DCHECK(args.length() == 2 || args.length() == 3); #ifdef DEBUG CONVERT_SMI_ARG_CHECKED(interval, 0); CONVERT_SMI_ARG_CHECKED(timeout, 1); @@ -8905,23 +8848,25 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) { +RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) { SealHandleScope shs(isolate); + DCHECK(args.length() == 0); RUNTIME_ASSERT(isolate->bootstrapper()->IsActive()); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetRootNaN) { +RUNTIME_FUNCTION(Runtime_GetRootNaN) { SealHandleScope shs(isolate); + DCHECK(args.length() == 0); RUNTIME_ASSERT(isolate->bootstrapper()->IsActive()); return isolate->heap()->nan_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) { +RUNTIME_FUNCTION(Runtime_Call) { HandleScope scope(isolate); - ASSERT(args.length() >= 2); + DCHECK(args.length() >= 2); int argc = args.length() - 2; CONVERT_ARG_CHECKED(JSReceiver, fun, argc + 1); Object* receiver = args[0]; @@ -8938,33 +8883,32 @@ } for (int i = 0; i < argc; ++i) { - MaybeObject* maybe = args[1 + i]; - Object* object; - if (!maybe->To<Object>(&object)) return maybe; - argv[i] = Handle<Object>(object, isolate); + argv[i] = Handle<Object>(args[1 + i], isolate); } - bool threw; Handle<JSReceiver> hfun(fun); Handle<Object> hreceiver(receiver, isolate); - Handle<Object> result = Execution::Call( - isolate, hfun, hreceiver, argc, argv, &threw, true); - - if (threw) return Failure::Exception(); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Execution::Call(isolate, hfun, hreceiver, argc, argv, true)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) { +RUNTIME_FUNCTION(Runtime_Apply) { HandleScope scope(isolate); - ASSERT(args.length() == 5); + DCHECK(args.length() == 5); CONVERT_ARG_HANDLE_CHECKED(JSReceiver, fun, 0); - Handle<Object> receiver = args.at<Object>(1); + CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1); CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2); CONVERT_SMI_ARG_CHECKED(offset, 3); CONVERT_SMI_ARG_CHECKED(argc, 4); RUNTIME_ASSERT(offset >= 0); - RUNTIME_ASSERT(argc >= 0); + // Loose upper bound to allow fuzzing. We'll most likely run out of + // stack space before hitting this limit. + static int kMaxArgc = 1000000; + RUNTIME_ASSERT(argc >= 0 && argc <= kMaxArgc); // If there are too many arguments, allocate argv via malloc. const int argv_small_size = 10; @@ -8978,176 +8922,163 @@ } for (int i = 0; i < argc; ++i) { - argv[i] = Object::GetElement(isolate, arguments, offset + i); - RETURN_IF_EMPTY_HANDLE(isolate, argv[i]); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, argv[i], + Object::GetElement(isolate, arguments, offset + i)); } - bool threw; - Handle<Object> result = Execution::Call( - isolate, fun, receiver, argc, argv, &threw, true); - - if (threw) return Failure::Exception(); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Execution::Call(isolate, fun, receiver, argc, argv, true)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) { +RUNTIME_FUNCTION(Runtime_GetFunctionDelegate) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - RUNTIME_ASSERT(!args[0]->IsJSFunction()); - return *Execution::GetFunctionDelegate(isolate, args.at<Object>(0)); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, object, 0); + RUNTIME_ASSERT(!object->IsJSFunction()); + return *Execution::GetFunctionDelegate(isolate, object); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) { +RUNTIME_FUNCTION(Runtime_GetConstructorDelegate) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - RUNTIME_ASSERT(!args[0]->IsJSFunction()); - return *Execution::GetConstructorDelegate(isolate, args.at<Object>(0)); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, object, 0); + RUNTIME_ASSERT(!object->IsJSFunction()); + return *Execution::GetConstructorDelegate(isolate, object); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewGlobalContext) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); - - CONVERT_ARG_CHECKED(JSFunction, function, 0); - CONVERT_ARG_CHECKED(ScopeInfo, scope_info, 1); - Context* result; - MaybeObject* maybe_result = - isolate->heap()->AllocateGlobalContext(function, scope_info); - if (!maybe_result->To(&result)) return maybe_result; +RUNTIME_FUNCTION(Runtime_NewGlobalContext) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); - ASSERT(function->context() == isolate->context()); - ASSERT(function->context()->global_object() == result->global_object()); - result->global_object()->set_global_context(result); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1); + Handle<Context> result = + isolate->factory()->NewGlobalContext(function, scope_info); - return result; // non-failure + DCHECK(function->context() == isolate->context()); + DCHECK(function->context()->global_object() == result->global_object()); + result->global_object()->set_global_context(*result); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewFunctionContext) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_NewFunctionContext) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); - CONVERT_ARG_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); + + DCHECK(function->context() == isolate->context()); int length = function->shared()->scope_info()->ContextLength(); - return isolate->heap()->AllocateFunctionContext(length, function); + return *isolate->factory()->NewFunctionContext(length, function); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushWithContext) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); - JSReceiver* extension_object; +RUNTIME_FUNCTION(Runtime_PushWithContext) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); + Handle<JSReceiver> extension_object; if (args[0]->IsJSReceiver()) { - extension_object = JSReceiver::cast(args[0]); + extension_object = args.at<JSReceiver>(0); } else { - // Convert the object to a proper JavaScript object. - MaybeObject* maybe_js_object = args[0]->ToObject(isolate); - if (!maybe_js_object->To(&extension_object)) { - if (Failure::cast(maybe_js_object)->IsInternalError()) { - HandleScope scope(isolate); - Handle<Object> handle = args.at<Object>(0); - Handle<Object> result = - isolate->factory()->NewTypeError("with_expression", - HandleVector(&handle, 1)); - return isolate->Throw(*result); - } else { - return maybe_js_object; - } + // Try to convert the object to a proper JavaScript object. + MaybeHandle<JSReceiver> maybe_object = + Object::ToObject(isolate, args.at<Object>(0)); + if (!maybe_object.ToHandle(&extension_object)) { + Handle<Object> handle = args.at<Object>(0); + Handle<Object> result = + isolate->factory()->NewTypeError("with_expression", + HandleVector(&handle, 1)); + return isolate->Throw(*result); } } - JSFunction* function; + Handle<JSFunction> function; if (args[1]->IsSmi()) { // A smi sentinel indicates a context nested inside global code rather // than some function. There is a canonical empty function that can be // gotten from the native context. - function = isolate->context()->native_context()->closure(); + function = handle(isolate->native_context()->closure()); } else { - function = JSFunction::cast(args[1]); + function = args.at<JSFunction>(1); } - Context* context; - MaybeObject* maybe_context = - isolate->heap()->AllocateWithContext(function, - isolate->context(), - extension_object); - if (!maybe_context->To(&context)) return maybe_context; - isolate->set_context(context); - return context; + Handle<Context> current(isolate->context()); + Handle<Context> context = isolate->factory()->NewWithContext( + function, current, extension_object); + isolate->set_context(*context); + return *context; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushCatchContext) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 3); - String* name = String::cast(args[0]); - Object* thrown_object = args[1]; - JSFunction* function; +RUNTIME_FUNCTION(Runtime_PushCatchContext) { + HandleScope scope(isolate); + DCHECK(args.length() == 3); + CONVERT_ARG_HANDLE_CHECKED(String, name, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, thrown_object, 1); + Handle<JSFunction> function; if (args[2]->IsSmi()) { // A smi sentinel indicates a context nested inside global code rather // than some function. There is a canonical empty function that can be // gotten from the native context. - function = isolate->context()->native_context()->closure(); + function = handle(isolate->native_context()->closure()); } else { - function = JSFunction::cast(args[2]); + function = args.at<JSFunction>(2); } - Context* context; - MaybeObject* maybe_context = - isolate->heap()->AllocateCatchContext(function, - isolate->context(), - name, - thrown_object); - if (!maybe_context->To(&context)) return maybe_context; - isolate->set_context(context); - return context; + Handle<Context> current(isolate->context()); + Handle<Context> context = isolate->factory()->NewCatchContext( + function, current, name, thrown_object); + isolate->set_context(*context); + return *context; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushBlockContext) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); - ScopeInfo* scope_info = ScopeInfo::cast(args[0]); - JSFunction* function; +RUNTIME_FUNCTION(Runtime_PushBlockContext) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 0); + Handle<JSFunction> function; if (args[1]->IsSmi()) { // A smi sentinel indicates a context nested inside global code rather // than some function. There is a canonical empty function that can be // gotten from the native context. - function = isolate->context()->native_context()->closure(); + function = handle(isolate->native_context()->closure()); } else { - function = JSFunction::cast(args[1]); + function = args.at<JSFunction>(1); } - Context* context; - MaybeObject* maybe_context = - isolate->heap()->AllocateBlockContext(function, - isolate->context(), - scope_info); - if (!maybe_context->To(&context)) return maybe_context; - isolate->set_context(context); - return context; + Handle<Context> current(isolate->context()); + Handle<Context> context = isolate->factory()->NewBlockContext( + function, current, scope_info); + isolate->set_context(*context); + return *context; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) { +RUNTIME_FUNCTION(Runtime_IsJSModule) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - Object* obj = args[0]; + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); return isolate->heap()->ToBoolean(obj->IsJSModule()); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushModuleContext) { +RUNTIME_FUNCTION(Runtime_PushModuleContext) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_SMI_ARG_CHECKED(index, 0); if (!args[1]->IsScopeInfo()) { // Module already initialized. Find hosting context and retrieve context. Context* host = Context::cast(isolate->context())->global_context(); Context* context = Context::cast(host->get(index)); - ASSERT(context->previous() == isolate->context()); + DCHECK(context->previous() == isolate->context()); isolate->set_context(context); return context; } @@ -9173,9 +9104,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeclareModules) { +RUNTIME_FUNCTION(Runtime_DeclareModules) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0); Context* host_context = isolate->context(); @@ -9198,15 +9129,17 @@ IsImmutableVariableMode(mode) ? FROZEN : SEALED; Handle<AccessorInfo> info = Accessors::MakeModuleExport(name, index, attr); - Handle<Object> result = JSObject::SetAccessor(module, info); - ASSERT(!(result.is_null() || result->IsUndefined())); + Handle<Object> result = + JSObject::SetAccessor(module, info).ToHandleChecked(); + DCHECK(!result->IsUndefined()); USE(result); break; } case MODULE: { Object* referenced_context = Context::cast(host_context)->get(index); Handle<JSModule> value(Context::cast(referenced_context)->module()); - JSReceiver::SetProperty(module, name, value, FROZEN, STRICT); + JSObject::SetOwnPropertyIgnoreAttributes(module, name, value, FROZEN) + .Assert(); break; } case INTERNAL: @@ -9218,17 +9151,17 @@ } } - JSObject::PreventExtensions(module); + JSObject::PreventExtensions(module).Assert(); } - ASSERT(!isolate->has_pending_exception()); + DCHECK(!isolate->has_pending_exception()); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeleteContextSlot) { +RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(Context, context, 0); CONVERT_ARG_HANDLE_CHECKED(String, name, 1); @@ -9257,8 +9190,10 @@ // the global object, or the subject of a with. Try to delete it // (respecting DONT_DELETE). Handle<JSObject> object = Handle<JSObject>::cast(holder); - Handle<Object> result = JSReceiver::DeleteProperty(object, name); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSReceiver::DeleteProperty(object, name)); return *result; } @@ -9273,38 +9208,53 @@ // allocated by the caller, and passed as a pointer in a hidden first parameter. #ifdef V8_HOST_ARCH_64_BIT struct ObjectPair { - MaybeObject* x; - MaybeObject* y; + Object* x; + Object* y; }; -static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) { +static inline ObjectPair MakePair(Object* x, Object* y) { ObjectPair result = {x, y}; // Pointers x and y returned in rax and rdx, in AMD-x64-abi. // In Win64 they are assigned to a hidden first argument. return result; } +#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT +// For x32 a 128-bit struct return is done as rax and rdx from the ObjectPair +// are used in the full codegen and Crankshaft compiler. An alternative is +// using uint64_t and modifying full codegen and Crankshaft compiler. +struct ObjectPair { + Object* x; + uint32_t x_upper; + Object* y; + uint32_t y_upper; +}; + + +static inline ObjectPair MakePair(Object* x, Object* y) { + ObjectPair result = {x, 0, y, 0}; + // Pointers x and y returned in rax and rdx, in x32-abi. + return result; +} #else typedef uint64_t ObjectPair; -static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) { +static inline ObjectPair MakePair(Object* x, Object* y) { +#if defined(V8_TARGET_LITTLE_ENDIAN) return reinterpret_cast<uint32_t>(x) | (reinterpret_cast<ObjectPair>(y) << 32); -} +#elif defined(V8_TARGET_BIG_ENDIAN) + return reinterpret_cast<uint32_t>(y) | + (reinterpret_cast<ObjectPair>(x) << 32); +#else +#error Unknown endianness #endif - - -static inline MaybeObject* Unhole(Heap* heap, - MaybeObject* x, - PropertyAttributes attributes) { - ASSERT(!x->IsTheHole() || (attributes & READ_ONLY) != 0); - USE(attributes); - return x->IsTheHole() ? heap->undefined_value() : x; } +#endif static Object* ComputeReceiverForNonGlobal(Isolate* isolate, JSObject* holder) { - ASSERT(!holder->IsGlobalObject()); + DCHECK(!holder->IsGlobalObject()); Context* top = isolate->context(); // Get the context extension function. JSFunction* context_extension_function = @@ -9322,11 +9272,10 @@ } -static ObjectPair LoadContextSlotHelper(Arguments args, - Isolate* isolate, - bool throw_error) { +static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate, + bool throw_error) { HandleScope scope(isolate); - ASSERT_EQ(2, args.length()); + DCHECK_EQ(2, args.length()); if (!args[0]->IsContext() || !args[1]->IsString()) { return MakePair(isolate->ThrowIllegalOperation(), NULL); @@ -9344,12 +9293,12 @@ &attributes, &binding_flags); if (isolate->has_pending_exception()) { - return MakePair(Failure::Exception(), NULL); + return MakePair(isolate->heap()->exception(), NULL); } // If the index is non-negative, the slot has been found in a context. if (index >= 0) { - ASSERT(holder->IsContext()); + DCHECK(holder->IsContext()); // If the "property" we were looking for is a local variable, the // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3. Handle<Object> receiver = isolate->factory()->undefined_value(); @@ -9368,10 +9317,14 @@ case MUTABLE_IS_INITIALIZED: case IMMUTABLE_IS_INITIALIZED: case IMMUTABLE_IS_INITIALIZED_HARMONY: - ASSERT(!value->IsTheHole()); + DCHECK(!value->IsTheHole()); return MakePair(value, *receiver); case IMMUTABLE_CHECK_INITIALIZED: - return MakePair(Unhole(isolate->heap(), value, attributes), *receiver); + if (value->IsTheHole()) { + DCHECK((attributes & READ_ONLY) != 0); + value = isolate->heap()->undefined_value(); + } + return MakePair(value, *receiver); case MISSING_BINDING: UNREACHABLE(); return MakePair(NULL, NULL); @@ -9383,7 +9336,13 @@ // property from it. if (!holder.is_null()) { Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder); - ASSERT(object->IsJSProxy() || JSReceiver::HasProperty(object, name)); +#ifdef DEBUG + if (!object->IsJSProxy()) { + Maybe<bool> maybe = JSReceiver::HasProperty(object, name); + DCHECK(maybe.has_value); + DCHECK(maybe.value); + } +#endif // GetProperty below can cause GC. Handle<Object> receiver_handle( object->IsGlobalObject() @@ -9394,8 +9353,12 @@ // No need to unhole the value here. This is taken care of by the // GetProperty function. - MaybeObject* value = object->GetProperty(*name); - return MakePair(value, *receiver_handle); + Handle<Object> value; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, value, + Object::GetProperty(object, name), + MakePair(isolate->heap()->exception(), NULL)); + return MakePair(*value, *receiver_handle); } if (throw_error) { @@ -9412,21 +9375,21 @@ } -RUNTIME_FUNCTION(ObjectPair, RuntimeHidden_LoadContextSlot) { - return LoadContextSlotHelper(args, isolate, true); +RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlot) { + return LoadLookupSlotHelper(args, isolate, true); } -RUNTIME_FUNCTION(ObjectPair, RuntimeHidden_LoadContextSlotNoReferenceError) { - return LoadContextSlotHelper(args, isolate, false); +RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotNoReferenceError) { + return LoadLookupSlotHelper(args, isolate, false); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StoreContextSlot) { +RUNTIME_FUNCTION(Runtime_StoreLookupSlot) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); - Handle<Object> value(args[0], isolate); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 0); CONVERT_ARG_HANDLE_CHECKED(Context, context, 1); CONVERT_ARG_HANDLE_CHECKED(String, name, 2); CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 3); @@ -9440,22 +9403,13 @@ &index, &attributes, &binding_flags); - if (isolate->has_pending_exception()) return Failure::Exception(); + // In case of JSProxy, an exception might have been thrown. + if (isolate->has_pending_exception()) return isolate->heap()->exception(); + // The property was found in a context slot. if (index >= 0) { - // The property was found in a context slot. - Handle<Context> context = Handle<Context>::cast(holder); - if (binding_flags == MUTABLE_CHECK_INITIALIZED && - context->get(index)->IsTheHole()) { - Handle<Object> error = - isolate->factory()->NewReferenceError("not_defined", - HandleVector(&name, 1)); - return isolate->Throw(*error); - } - // Ignore if read_only variable. if ((attributes & READ_ONLY) == 0) { - // Context is a fixed array and set cannot fail. - context->set(index, *value); + Handle<Context>::cast(holder)->set(index, *value); } else if (strict_mode == STRICT) { // Setting read only property in strict mode. Handle<Object> error = @@ -9470,71 +9424,53 @@ // context extension object, a property of the subject of a with, or a // property of the global object. Handle<JSReceiver> object; - - if (!holder.is_null()) { + if (attributes != ABSENT) { // The property exists on the holder. object = Handle<JSReceiver>::cast(holder); + } else if (strict_mode == STRICT) { + // If absent in strict mode: throw. + Handle<Object> error = isolate->factory()->NewReferenceError( + "not_defined", HandleVector(&name, 1)); + return isolate->Throw(*error); } else { - // The property was not found. - ASSERT(attributes == ABSENT); - - if (strict_mode == STRICT) { - // Throw in strict mode (assignment to undefined variable). - Handle<Object> error = - isolate->factory()->NewReferenceError( - "not_defined", HandleVector(&name, 1)); - return isolate->Throw(*error); - } - // In sloppy mode, the property is added to the global object. - attributes = NONE; - object = Handle<JSReceiver>(isolate->context()->global_object()); + // If absent in sloppy mode: add the property to the global object. + object = Handle<JSReceiver>(context->global_object()); } - // Set the property if it's not read only or doesn't yet exist. - if ((attributes & READ_ONLY) == 0 || - (JSReceiver::GetLocalPropertyAttribute(object, name) == ABSENT)) { - RETURN_IF_EMPTY_HANDLE( - isolate, - JSReceiver::SetProperty(object, name, value, NONE, strict_mode)); - } else if (strict_mode == STRICT && (attributes & READ_ONLY) != 0) { - // Setting read only property in strict mode. - Handle<Object> error = - isolate->factory()->NewTypeError( - "strict_cannot_assign", HandleVector(&name, 1)); - return isolate->Throw(*error); - } + RETURN_FAILURE_ON_EXCEPTION( + isolate, Object::SetProperty(object, name, value, strict_mode)); + return *value; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_Throw) { +RUNTIME_FUNCTION(Runtime_Throw) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); return isolate->Throw(args[0]); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ReThrow) { +RUNTIME_FUNCTION(Runtime_ReThrow) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); return isolate->ReThrow(args[0]); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PromoteScheduledException) { +RUNTIME_FUNCTION(Runtime_PromoteScheduledException) { SealHandleScope shs(isolate); - ASSERT_EQ(0, args.length()); + DCHECK(args.length() == 0); return isolate->PromoteScheduledException(); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowReferenceError) { +RUNTIME_FUNCTION(Runtime_ThrowReferenceError) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - - Handle<Object> name(args[0], isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, name, 0); Handle<Object> reference_error = isolate->factory()->NewReferenceError("not_defined", HandleVector(&name, 1)); @@ -9542,47 +9478,36 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowNotDateError) { +RUNTIME_FUNCTION(Runtime_ThrowNotDateError) { HandleScope scope(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); return isolate->Throw(*isolate->factory()->NewTypeError( "not_date_object", HandleVector<Object>(NULL, 0))); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowMessage) { - HandleScope scope(isolate); - ASSERT(args.length() == 1); - CONVERT_SMI_ARG_CHECKED(message_id, 0); - const char* message = GetBailoutReason( - static_cast<BailoutReason>(message_id)); - Handle<String> message_handle = - isolate->factory()->NewStringFromAscii(CStrVector(message)); - RETURN_IF_EMPTY_HANDLE(isolate, message_handle); - return isolate->Throw(*message_handle); -} - - -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StackGuard) { +RUNTIME_FUNCTION(Runtime_StackGuard) { SealHandleScope shs(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); // First check if this is a real stack overflow. - if (isolate->stack_guard()->IsStackOverflow()) { + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { return isolate->StackOverflow(); } - return Execution::HandleStackGuardInterrupt(isolate); + return isolate->stack_guard()->HandleInterrupts(); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_TryInstallOptimizedCode) { +RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); // First check if this is a real stack overflow. - if (isolate->stack_guard()->IsStackOverflow()) { + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { SealHandleScope shs(isolate); return isolate->StackOverflow(); } @@ -9593,10 +9518,10 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_Interrupt) { +RUNTIME_FUNCTION(Runtime_Interrupt) { SealHandleScope shs(isolate); - ASSERT(args.length() == 0); - return Execution::HandleStackGuardInterrupt(isolate); + DCHECK(args.length() == 0); + return isolate->stack_guard()->HandleInterrupts(); } @@ -9629,99 +9554,107 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) { +RUNTIME_FUNCTION(Runtime_TraceEnter) { SealHandleScope shs(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); PrintTransition(isolate, NULL); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) { +RUNTIME_FUNCTION(Runtime_TraceExit) { SealHandleScope shs(isolate); - PrintTransition(isolate, args[0]); - return args[0]; // return TOS + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + PrintTransition(isolate, obj); + return obj; // return TOS } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) { +RUNTIME_FUNCTION(Runtime_DebugPrint) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); + OFStream os(stdout); #ifdef DEBUG if (args[0]->IsString()) { // If we have a string, assume it's a code "marker" // and print some interesting cpu debugging info. JavaScriptFrameIterator it(isolate); JavaScriptFrame* frame = it.frame(); - PrintF("fp = %p, sp = %p, caller_sp = %p: ", - frame->fp(), frame->sp(), frame->caller_sp()); + os << "fp = " << frame->fp() << ", sp = " << frame->sp() + << ", caller_sp = " << frame->caller_sp() << ": "; } else { - PrintF("DebugPrint: "); + os << "DebugPrint: "; } - args[0]->Print(); + args[0]->Print(os); if (args[0]->IsHeapObject()) { - PrintF("\n"); - HeapObject::cast(args[0])->map()->Print(); + os << "\n"; + HeapObject::cast(args[0])->map()->Print(os); } #else // ShortPrint is available in release mode. Print is not. - args[0]->ShortPrint(); + os << Brief(args[0]); #endif - PrintF("\n"); - Flush(); + os << endl; return args[0]; // return TOS } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) { +RUNTIME_FUNCTION(Runtime_DebugTrace) { SealHandleScope shs(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); isolate->PrintStack(stdout); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 0); +RUNTIME_FUNCTION(Runtime_DateCurrentTime) { + HandleScope scope(isolate); + DCHECK(args.length() == 0); + if (FLAG_log_timer_events) LOG(isolate, CurrentTimeEvent()); // According to ECMA-262, section 15.9.1, page 117, the precision of // the number in a Date object representing a particular instant in // time is milliseconds. Therefore, we floor the result of getting // the OS time. - double millis = std::floor(OS::TimeCurrentMillis()); - return isolate->heap()->NumberFromDouble(millis); + double millis; + if (FLAG_verify_predictable) { + millis = 1388534400000.0; // Jan 1 2014 00:00:00 GMT+0000 + millis += Floor(isolate->heap()->synthetic_time()); + } else { + millis = Floor(base::OS::TimeCurrentMillis()); + } + return *isolate->factory()->NewNumber(millis); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) { +RUNTIME_FUNCTION(Runtime_DateParseString) { HandleScope scope(isolate); - ASSERT(args.length() == 2); - + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(String, str, 0); - FlattenString(str); - CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1); + RUNTIME_ASSERT(output->HasFastElements()); JSObject::EnsureCanContainHeapObjectElements(output); RUNTIME_ASSERT(output->HasFastObjectElements()); + Handle<FixedArray> output_array(FixedArray::cast(output->elements())); + RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE); + str = String::Flatten(str); DisallowHeapAllocation no_gc; - FixedArray* output_array = FixedArray::cast(output->elements()); - RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE); bool result; String::FlatContent str_content = str->GetFlatContent(); if (str_content.IsAscii()) { result = DateParser::Parse(str_content.ToOneByteVector(), - output_array, + *output_array, isolate->unicode_cache()); } else { - ASSERT(str_content.IsTwoByte()); + DCHECK(str_content.IsTwoByte()); result = DateParser::Parse(str_content.ToUC16Vector(), - output_array, + *output_array, isolate->unicode_cache()); } @@ -9733,31 +9666,37 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_DateLocalTimezone) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(x, 0); + RUNTIME_ASSERT(x >= -DateCache::kMaxTimeBeforeUTCInMs && + x <= DateCache::kMaxTimeBeforeUTCInMs); const char* zone = isolate->date_cache()->LocalTimezone(static_cast<int64_t>(x)); - return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone)); + Handle<String> result = isolate->factory()->NewStringFromUtf8( + CStrVector(zone)).ToHandleChecked(); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); +RUNTIME_FUNCTION(Runtime_DateToUTC) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(x, 0); + RUNTIME_ASSERT(x >= -DateCache::kMaxTimeBeforeUTCInMs && + x <= DateCache::kMaxTimeBeforeUTCInMs); int64_t time = isolate->date_cache()->ToUTC(static_cast<int64_t>(x)); - return isolate->heap()->NumberFromDouble(static_cast<double>(time)); + return *isolate->factory()->NewNumber(static_cast<double>(time)); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCacheVersion) { +RUNTIME_FUNCTION(Runtime_DateCacheVersion) { HandleScope hs(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) { Handle<FixedArray> date_cache_version = isolate->factory()->NewFixedArray(1, TENURED); @@ -9776,50 +9715,44 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) { +RUNTIME_FUNCTION(Runtime_GlobalProxy) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - Object* global = args[0]; + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, global, 0); if (!global->IsJSGlobalObject()) return isolate->heap()->null_value(); - return JSGlobalObject::cast(global)->global_receiver(); + return JSGlobalObject::cast(global)->global_proxy(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAttachedGlobal) { +RUNTIME_FUNCTION(Runtime_IsAttachedGlobal) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - Object* global = args[0]; + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, global, 0); if (!global->IsJSGlobalObject()) return isolate->heap()->false_value(); return isolate->heap()->ToBoolean( !JSGlobalObject::cast(global)->IsDetached()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) { +RUNTIME_FUNCTION(Runtime_ParseJson) { HandleScope scope(isolate); - ASSERT_EQ(1, args.length()); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(String, source, 0); - source = Handle<String>(FlattenGetString(source)); + source = String::Flatten(source); // Optimized fast case where we only have ASCII characters. Handle<Object> result; - if (source->IsSeqOneByteString()) { - result = JsonParser<true>::Parse(source); - } else { - result = JsonParser<false>::Parse(source); - } - if (result.is_null()) { - // Syntax error or stack overflow in scanner. - ASSERT(isolate->has_pending_exception()); - return Failure::Exception(); - } + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + source->IsSeqOneByteString() ? JsonParser<true>::Parse(source) + : JsonParser<false>::Parse(source)); return *result; } bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle<Context> context) { - ASSERT(context->allow_code_gen_from_strings()->IsFalse()); + DCHECK(context->allow_code_gen_from_strings()->IsFalse()); // Check with callback if set. AllowCodeGenerationFromStringsCallback callback = isolate->allow_code_gen_callback(); @@ -9834,14 +9767,72 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) { +// Walk up the stack expecting: +// - Runtime_CompileString +// - JSFunction callee (eval, Function constructor, etc) +// - call() (maybe) +// - apply() (maybe) +// - bind() (maybe) +// - JSFunction caller (maybe) +// +// return true if the caller has the same security token as the callee +// or if an exit frame was hit, in which case allow it through, as it could +// have come through the api. +static bool TokensMatchForCompileString(Isolate* isolate) { + MaybeHandle<JSFunction> callee; + bool exit_handled = true; + bool tokens_match = true; + bool done = false; + for (StackFrameIterator it(isolate); !it.done() && !done; it.Advance()) { + StackFrame* raw_frame = it.frame(); + if (!raw_frame->is_java_script()) { + if (raw_frame->is_exit()) exit_handled = false; + continue; + } + JavaScriptFrame* outer_frame = JavaScriptFrame::cast(raw_frame); + List<FrameSummary> frames(FLAG_max_inlining_levels + 1); + outer_frame->Summarize(&frames); + for (int i = frames.length() - 1; i >= 0 && !done; --i) { + FrameSummary& frame = frames[i]; + Handle<JSFunction> fun = frame.function(); + // Capture the callee function. + if (callee.is_null()) { + callee = fun; + exit_handled = true; + continue; + } + // Exit condition. + Handle<Context> context(callee.ToHandleChecked()->context()); + if (!fun->context()->HasSameSecurityTokenAs(*context)) { + tokens_match = false; + done = true; + continue; + } + // Skip bound functions in correct origin. + if (fun->shared()->bound()) { + exit_handled = true; + continue; + } + done = true; + } + } + return !exit_handled || tokens_match; +} + + +RUNTIME_FUNCTION(Runtime_CompileString) { HandleScope scope(isolate); - ASSERT_EQ(2, args.length()); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(String, source, 0); CONVERT_BOOLEAN_ARG_CHECKED(function_literal_only, 1); // Extract native context. - Handle<Context> context(isolate->context()->native_context()); + Handle<Context> context(isolate->native_context()); + + // Filter cross security context calls. + if (!TokensMatchForCompileString(isolate)) { + return isolate->heap()->undefined_value(); + } // Check if native context allows code generation from // strings. Throw an exception if it doesn't. @@ -9856,9 +9847,11 @@ // Compile source string in the native context. ParseRestriction restriction = function_literal_only ? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION; - Handle<JSFunction> fun = Compiler::GetFunctionFromEval( - source, context, SLOPPY, restriction, RelocInfo::kNoPosition); - RETURN_IF_EMPTY_HANDLE(isolate, fun); + Handle<JSFunction> fun; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, fun, + Compiler::GetFunctionFromEval( + source, context, SLOPPY, restriction, RelocInfo::kNoPosition)); return *fun; } @@ -9879,23 +9872,25 @@ native_context->ErrorMessageForCodeGenerationFromStrings(); isolate->Throw(*isolate->factory()->NewEvalError( "code_gen_from_strings", HandleVector<Object>(&error_message, 1))); - return MakePair(Failure::Exception(), NULL); + return MakePair(isolate->heap()->exception(), NULL); } // Deal with a normal eval call with a string argument. Compile it // and return the compiled function bound in the local context. static const ParseRestriction restriction = NO_PARSE_RESTRICTION; - Handle<JSFunction> compiled = Compiler::GetFunctionFromEval( - source, context, strict_mode, restriction, scope_position); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, compiled, - MakePair(Failure::Exception(), NULL)); + Handle<JSFunction> compiled; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, compiled, + Compiler::GetFunctionFromEval( + source, context, strict_mode, restriction, scope_position), + MakePair(isolate->heap()->exception(), NULL)); return MakePair(*compiled, *receiver); } -RUNTIME_FUNCTION(ObjectPair, RuntimeHidden_ResolvePossiblyDirectEval) { +RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ResolvePossiblyDirectEval) { HandleScope scope(isolate); - ASSERT(args.length() == 5); + DCHECK(args.length() == 5); Handle<Object> callee = args.at<Object>(0); @@ -9909,10 +9904,10 @@ return MakePair(*callee, isolate->heap()->undefined_value()); } - ASSERT(args[3]->IsSmi()); - ASSERT(args.smi_at(3) == SLOPPY || args.smi_at(3) == STRICT); + DCHECK(args[3]->IsSmi()); + DCHECK(args.smi_at(3) == SLOPPY || args.smi_at(3) == STRICT); StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(3)); - ASSERT(args[4]->IsSmi()); + DCHECK(args[4]->IsSmi()); return CompileGlobalEval(isolate, args.at<String>(1), args.at<Object>(2), @@ -9921,54 +9916,37 @@ } -// Allocate a block of memory in the given space (filled with a filler). -// Used as a fall-back for generated code when the space is full. -static MaybeObject* Allocate(Isolate* isolate, - int size, - bool double_align, - AllocationSpace space) { - Heap* heap = isolate->heap(); +RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_SMI_ARG_CHECKED(size, 0); RUNTIME_ASSERT(IsAligned(size, kPointerSize)); RUNTIME_ASSERT(size > 0); RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize); - HeapObject* allocation; - { MaybeObject* maybe_allocation = heap->AllocateRaw(size, space, space); - if (!maybe_allocation->To(&allocation)) return maybe_allocation; - } -#ifdef DEBUG - MemoryChunk* chunk = MemoryChunk::FromAddress(allocation->address()); - ASSERT(chunk->owner()->identity() == space); -#endif - heap->CreateFillerObjectAt(allocation->address(), size); - return allocation; -} - - -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_AllocateInNewSpace) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - CONVERT_SMI_ARG_CHECKED(size, 0); - return Allocate(isolate, size, false, NEW_SPACE); + return *isolate->factory()->NewFillerObject(size, false, NEW_SPACE); } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_AllocateInTargetSpace) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); +RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); CONVERT_SMI_ARG_CHECKED(size, 0); CONVERT_SMI_ARG_CHECKED(flags, 1); + RUNTIME_ASSERT(IsAligned(size, kPointerSize)); + RUNTIME_ASSERT(size > 0); + RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize); bool double_align = AllocateDoubleAlignFlag::decode(flags); AllocationSpace space = AllocateTargetSpace::decode(flags); - return Allocate(isolate, size, double_align, space); + return *isolate->factory()->NewFillerObject(size, double_align, space); } // Push an object unto an array of objects if it is not already in the // array. Returns true if the element was pushed on the stack and // false otherwise. -RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) { +RUNTIME_FUNCTION(Runtime_PushIfAbsent) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0); CONVERT_ARG_HANDLE_CHECKED(JSReceiver, element, 1); RUNTIME_ASSERT(array->HasFastSmiOrObjectElements()); @@ -9979,10 +9957,9 @@ } // Strict not needed. Used for cycle detection in Array join implementation. - RETURN_IF_EMPTY_HANDLE(isolate, JSObject::SetFastElement(array, length, - element, - SLOPPY, - true)); + RETURN_FAILURE_ON_EXCEPTION( + isolate, + JSObject::SetFastElement(array, length, element, SLOPPY, true)); return isolate->heap()->true_value(); } @@ -10030,14 +10007,14 @@ // getters on the arrays increasing the length of later arrays // during iteration. // This shouldn't happen in anything but pathological cases. - SetDictionaryMode(index); + SetDictionaryMode(); // Fall-through to dictionary mode. } - ASSERT(!fast_elements_); + DCHECK(!fast_elements_); Handle<SeededNumberDictionary> dict( SeededNumberDictionary::cast(*storage_)); Handle<SeededNumberDictionary> result = - isolate_->factory()->DictionaryAtNumberPut(dict, index, elm); + SeededNumberDictionary::AtNumberPut(dict, index, elm); if (!result.is_identical_to(dict)) { // Dictionary needed to grow. clear_storage(); @@ -10051,6 +10028,14 @@ } else { index_offset_ += delta; } + // If the initial length estimate was off (see special case in visit()), + // but the array blowing the limit didn't contain elements beyond the + // provided-for index range, go to dictionary mode now. + if (fast_elements_ && + index_offset_ > + static_cast<uint32_t>(FixedArrayBase::cast(*storage_)->length())) { + SetDictionaryMode(); + } } bool exceeds_array_limit() { @@ -10061,12 +10046,9 @@ Handle<JSArray> array = isolate_->factory()->NewJSArray(0); Handle<Object> length = isolate_->factory()->NewNumber(static_cast<double>(index_offset_)); - Handle<Map> map; - if (fast_elements_) { - map = JSObject::GetElementsTransitionMap(array, FAST_HOLEY_ELEMENTS); - } else { - map = JSObject::GetElementsTransitionMap(array, DICTIONARY_ELEMENTS); - } + Handle<Map> map = JSObject::GetElementsTransitionMap( + array, + fast_elements_ ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS); array->set_map(*map); array->set_length(*length); array->set_elements(*storage_); @@ -10075,19 +10057,18 @@ private: // Convert storage to dictionary mode. - void SetDictionaryMode(uint32_t index) { - ASSERT(fast_elements_); + void SetDictionaryMode() { + DCHECK(fast_elements_); Handle<FixedArray> current_storage(*storage_); Handle<SeededNumberDictionary> slow_storage( - isolate_->factory()->NewSeededNumberDictionary( - current_storage->length())); + SeededNumberDictionary::New(isolate_, current_storage->length())); uint32_t current_length = static_cast<uint32_t>(current_storage->length()); for (uint32_t i = 0; i < current_length; i++) { HandleScope loop_scope(isolate_); Handle<Object> element(current_storage->get(i), isolate_); if (!element->IsTheHole()) { Handle<SeededNumberDictionary> new_storage = - isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element); + SeededNumberDictionary::AtNumberPut(slow_storage, i, element); if (!new_storage.is_identical_to(slow_storage)) { slow_storage = loop_scope.CloseAndEscape(new_storage); } @@ -10127,7 +10108,7 @@ case FAST_HOLEY_ELEMENTS: { // Fast elements can't have lengths that are not representable by // a 32-bit signed integer. - ASSERT(static_cast<int32_t>(FixedArray::kMaxLength) >= 0); + DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0); int fast_length = static_cast<int>(length); Handle<FixedArray> elements(FixedArray::cast(array->elements())); for (int i = 0; i < fast_length; i++) { @@ -10139,10 +10120,10 @@ case FAST_HOLEY_DOUBLE_ELEMENTS: { // Fast elements can't have lengths that are not representable by // a 32-bit signed integer. - ASSERT(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0); + DCHECK(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0); int fast_length = static_cast<int>(length); if (array->elements()->IsFixedArray()) { - ASSERT(FixedArray::cast(array->elements())->length() == 0); + DCHECK(FixedArray::cast(array->elements())->length() == 0); break; } Handle<FixedDoubleArray> elements( @@ -10191,7 +10172,7 @@ ExternalArrayClass::cast(receiver->elements())); uint32_t len = static_cast<uint32_t>(array->length()); - ASSERT(visitor != NULL); + DCHECK(visitor != NULL); if (elements_are_ints) { if (elements_are_guaranteed_smis) { for (uint32_t j = 0; j < len; j++) { @@ -10266,7 +10247,7 @@ HandleScope loop_scope(isolate); Handle<Object> k(dict->KeyAt(j), isolate); if (dict->IsKey(*k)) { - ASSERT(k->IsNumber()); + DCHECK(k->IsNumber()); uint32_t index = static_cast<uint32_t>(k->Number()); if (index < range) { indices->Add(index); @@ -10308,11 +10289,13 @@ } } - Handle<Object> prototype(object->GetPrototype(), isolate); - if (prototype->IsJSObject()) { + PrototypeIterator iter(isolate, object); + if (!iter.IsAtEnd()) { // The prototype will usually have no inherited element indices, // but we have to check. - CollectElementIndices(Handle<JSObject>::cast(prototype), range, indices); + CollectElementIndices( + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), range, + indices); } } @@ -10340,30 +10323,41 @@ // to check the prototype for missing elements. Handle<FixedArray> elements(FixedArray::cast(receiver->elements())); int fast_length = static_cast<int>(length); - ASSERT(fast_length <= elements->length()); + DCHECK(fast_length <= elements->length()); for (int j = 0; j < fast_length; j++) { HandleScope loop_scope(isolate); Handle<Object> element_value(elements->get(j), isolate); if (!element_value->IsTheHole()) { visitor->visit(j, element_value); - } else if (JSReceiver::HasElement(receiver, j)) { - // Call GetElement on receiver, not its prototype, or getters won't - // have the correct receiver. - element_value = Object::GetElement(isolate, receiver, j); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false); - visitor->visit(j, element_value); + } else { + Maybe<bool> maybe = JSReceiver::HasElement(receiver, j); + if (!maybe.has_value) return false; + if (maybe.value) { + // Call GetElement on receiver, not its prototype, or getters won't + // have the correct receiver. + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, element_value, + Object::GetElement(isolate, receiver, j), false); + visitor->visit(j, element_value); + } } } break; } case FAST_HOLEY_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: { + // Empty array is FixedArray but not FixedDoubleArray. + if (length == 0) break; // Run through the elements FixedArray and use HasElement and GetElement // to check the prototype for missing elements. + if (receiver->elements()->IsFixedArray()) { + DCHECK(receiver->elements()->length() == 0); + break; + } Handle<FixedDoubleArray> elements( FixedDoubleArray::cast(receiver->elements())); int fast_length = static_cast<int>(length); - ASSERT(fast_length <= elements->length()); + DCHECK(fast_length <= elements->length()); for (int j = 0; j < fast_length; j++) { HandleScope loop_scope(isolate); if (!elements->is_the_hole(j)) { @@ -10371,13 +10365,18 @@ Handle<Object> element_value = isolate->factory()->NewNumber(double_value); visitor->visit(j, element_value); - } else if (JSReceiver::HasElement(receiver, j)) { - // Call GetElement on receiver, not its prototype, or getters won't - // have the correct receiver. - Handle<Object> element_value = - Object::GetElement(isolate, receiver, j); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false); - visitor->visit(j, element_value); + } else { + Maybe<bool> maybe = JSReceiver::HasElement(receiver, j); + if (!maybe.has_value) return false; + if (maybe.value) { + // Call GetElement on receiver, not its prototype, or getters won't + // have the correct receiver. + Handle<Object> element_value; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, element_value, + Object::GetElement(isolate, receiver, j), false); + visitor->visit(j, element_value); + } } } break; @@ -10394,8 +10393,11 @@ while (j < n) { HandleScope loop_scope(isolate); uint32_t index = indices[j]; - Handle<Object> element = Object::GetElement(isolate, receiver, index); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false); + Handle<Object> element; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, element, + Object::GetElement(isolate, receiver, index), + false); visitor->visit(index, element); // Skip to next different index (i.e., omit duplicates). do { @@ -10468,9 +10470,9 @@ * TODO(581): Fix non-compliance for very large concatenations and update to * following the ECMAScript 5 specification. */ -RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) { +RUNTIME_FUNCTION(Runtime_ArrayConcat) { HandleScope handle_scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0); int argument_count = static_cast<int>(arguments->length()->Number()); @@ -10535,12 +10537,13 @@ // dictionary. bool fast_case = (estimate_nof_elements * 2) >= estimate_result_length; - Handle<FixedArray> storage; - if (fast_case) { - if (kind == FAST_DOUBLE_ELEMENTS) { + if (fast_case && kind == FAST_DOUBLE_ELEMENTS) { + Handle<FixedArrayBase> storage = + isolate->factory()->NewFixedDoubleArray(estimate_result_length); + int j = 0; + if (estimate_result_length > 0) { Handle<FixedDoubleArray> double_storage = - isolate->factory()->NewFixedDoubleArray(estimate_result_length); - int j = 0; + Handle<FixedDoubleArray>::cast(storage); bool failure = false; for (int i = 0; i < argument_count; i++) { Handle<Object> obj(elements->get(i), isolate); @@ -10556,8 +10559,8 @@ switch (array->map()->elements_kind()) { case FAST_HOLEY_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: { - // Empty fixed array indicates that there are no elements. - if (array->elements()->IsFixedArray()) break; + // Empty array is FixedArray but not FixedDoubleArray. + if (length == 0) break; FixedDoubleArray* elements = FixedDoubleArray::cast(array->elements()); for (uint32_t i = 0; i < length; i++) { @@ -10588,7 +10591,7 @@ break; } case FAST_HOLEY_ELEMENTS: - ASSERT_EQ(0, length); + DCHECK_EQ(0, length); break; default: UNREACHABLE(); @@ -10596,15 +10599,19 @@ } if (failure) break; } - Handle<JSArray> array = isolate->factory()->NewJSArray(0); - Smi* length = Smi::FromInt(j); - Handle<Map> map; - map = JSObject::GetElementsTransitionMap(array, kind); - array->set_map(*map); - array->set_length(length); - array->set_elements(*double_storage); - return *array; } + Handle<JSArray> array = isolate->factory()->NewJSArray(0); + Smi* length = Smi::FromInt(j); + Handle<Map> map; + map = JSObject::GetElementsTransitionMap(array, kind); + array->set_map(*map); + array->set_length(length); + array->set_elements(*storage); + return *array; + } + + Handle<FixedArray> storage; + if (fast_case) { // The backing storage array must have non-existing elements to preserve // holes across concat operations. storage = isolate->factory()->NewFixedArrayWithHoles( @@ -10614,7 +10621,7 @@ uint32_t at_least_space_for = estimate_nof_elements + (estimate_nof_elements >> 2); storage = Handle<FixedArray>::cast( - isolate->factory()->NewSeededNumberDictionary(at_least_space_for)); + SeededNumberDictionary::New(isolate, at_least_space_for)); } ArrayConcatVisitor visitor(isolate, storage, fast_case); @@ -10624,7 +10631,7 @@ if (obj->IsJSArray()) { Handle<JSArray> array = Handle<JSArray>::cast(obj); if (!IterateElements(isolate, array, &visitor)) { - return Failure::Exception(); + return isolate->heap()->exception(); } } else { visitor.visit(0, obj); @@ -10643,9 +10650,9 @@ // This will not allocate (flatten the string), but it may run // very slowly for very deeply nested ConsStrings. For debugging use only. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) { +RUNTIME_FUNCTION(Runtime_GlobalPrint) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(String, string, 0); ConsStringIteratorOp op; @@ -10664,9 +10671,9 @@ // property. // Returns the number of non-undefined elements collected. // Returns -1 if hole removal is not supported by this method. -RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) { +RUNTIME_FUNCTION(Runtime_RemoveArrayHoles) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]); return *JSObject::PrepareElementsForSort(object, limit); @@ -10674,44 +10681,63 @@ // Move contents of argument 0 (an array) to argument 1 (an array) -RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSArray, from, 0); - CONVERT_ARG_CHECKED(JSArray, to, 1); - from->ValidateElements(); - to->ValidateElements(); - FixedArrayBase* new_elements = from->elements(); +RUNTIME_FUNCTION(Runtime_MoveArrayContents) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(JSArray, from, 0); + CONVERT_ARG_HANDLE_CHECKED(JSArray, to, 1); + JSObject::ValidateElements(from); + JSObject::ValidateElements(to); + + Handle<FixedArrayBase> new_elements(from->elements()); ElementsKind from_kind = from->GetElementsKind(); - MaybeObject* maybe_new_map; - maybe_new_map = to->GetElementsTransitionMap(isolate, from_kind); - Object* new_map; - if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map; - to->set_map_and_elements(Map::cast(new_map), new_elements); + Handle<Map> new_map = JSObject::GetElementsTransitionMap(to, from_kind); + JSObject::SetMapAndElements(to, new_map, new_elements); to->set_length(from->length()); - Object* obj; - { MaybeObject* maybe_obj = from->ResetElements(); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + + JSObject::ResetElements(from); from->set_length(Smi::FromInt(0)); - to->ValidateElements(); - return to; + + JSObject::ValidateElements(to); + return *to; } // How many elements does this object/array have? -RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) { +RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0); + Handle<FixedArrayBase> elements(array->elements(), isolate); SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSObject, object, 0); - HeapObject* elements = object->elements(); if (elements->IsDictionary()) { - int result = SeededNumberDictionary::cast(elements)->NumberOfElements(); + int result = + Handle<SeededNumberDictionary>::cast(elements)->NumberOfElements(); return Smi::FromInt(result); - } else if (object->IsJSArray()) { - return JSArray::cast(object)->length(); } else { - return Smi::FromInt(FixedArray::cast(elements)->length()); + DCHECK(array->length()->IsSmi()); + // For packed elements, we know the exact number of elements + int length = elements->length(); + ElementsKind kind = array->GetElementsKind(); + if (IsFastPackedElementsKind(kind)) { + return Smi::FromInt(length); + } + // For holey elements, take samples from the buffer checking for holes + // to generate the estimate. + const int kNumberOfHoleCheckSamples = 97; + int increment = (length < kNumberOfHoleCheckSamples) + ? 1 + : static_cast<int>(length / kNumberOfHoleCheckSamples); + ElementsAccessor* accessor = array->GetElementsAccessor(); + int holes = 0; + for (int i = 0; i < length; i += increment) { + if (!accessor->HasElement(array, array, i, elements)) { + ++holes; + } + } + int estimate = static_cast<int>((kNumberOfHoleCheckSamples - holes) / + kNumberOfHoleCheckSamples * length); + return Smi::FromInt(estimate); } } @@ -10721,27 +10747,30 @@ // or undefined) or a number representing the positive length of an interval // starting at index 0. // Intervals can span over some keys that are not in the object. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) { +RUNTIME_FUNCTION(Runtime_GetArrayKeys) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0); CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]); if (array->elements()->IsDictionary()) { Handle<FixedArray> keys = isolate->factory()->empty_fixed_array(); - for (Handle<Object> p = array; - !p->IsNull(); - p = Handle<Object>(p->GetPrototype(isolate), isolate)) { - if (p->IsJSProxy() || JSObject::cast(*p)->HasIndexedInterceptor()) { + for (PrototypeIterator iter(isolate, array, + PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(); iter.Advance()) { + if (PrototypeIterator::GetCurrent(iter)->IsJSProxy() || + JSObject::cast(*PrototypeIterator::GetCurrent(iter)) + ->HasIndexedInterceptor()) { // Bail out if we find a proxy or interceptor, likely not worth // collecting keys in that case. return *isolate->factory()->NewNumberFromUint(length); } - Handle<JSObject> current = Handle<JSObject>::cast(p); + Handle<JSObject> current = + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); Handle<FixedArray> current_keys = - isolate->factory()->NewFixedArray( - current->NumberOfLocalElements(NONE)); - current->GetLocalElementKeys(*current_keys, NONE); - keys = UnionOfKeys(keys, current_keys); + isolate->factory()->NewFixedArray(current->NumberOfOwnElements(NONE)); + current->GetOwnElementKeys(*current_keys, NONE); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, keys, FixedArray::UnionOfKeys(keys, current_keys)); } // Erase any keys >= length. // TODO(adamk): Remove this step when the contract of %GetArrayKeys @@ -10751,40 +10780,41 @@ } return *isolate->factory()->NewJSArrayWithElements(keys); } else { - ASSERT(array->HasFastSmiOrObjectElements() || - array->HasFastDoubleElements()); + RUNTIME_ASSERT(array->HasFastSmiOrObjectElements() || + array->HasFastDoubleElements()); uint32_t actual_length = static_cast<uint32_t>(array->elements()->length()); return *isolate->factory()->NewNumberFromUint(Min(actual_length, length)); } } -RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) { +RUNTIME_FUNCTION(Runtime_LookupAccessor) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0); CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); CONVERT_SMI_ARG_CHECKED(flag, 2); AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER; if (!receiver->IsJSObject()) return isolate->heap()->undefined_value(); - Handle<Object> result = - JSObject::GetAccessor(Handle<JSObject>::cast(receiver), name, component); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::GetAccessor(Handle<JSObject>::cast(receiver), name, component)); return *result; } -#ifdef ENABLE_DEBUGGER_SUPPORT -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) { +RUNTIME_FUNCTION(Runtime_DebugBreak) { SealHandleScope shs(isolate); - ASSERT(args.length() == 0); - return Execution::DebugBreakHelper(isolate); + DCHECK(args.length() == 0); + isolate->debug()->HandleDebugBreak(); + return isolate->heap()->undefined_value(); } // Helper functions for wrapping and unwrapping stack frame ids. static Smi* WrapFrameId(StackFrame::Id id) { - ASSERT(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4))); + DCHECK(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4))); return Smi::FromInt(id >> 2); } @@ -10798,86 +10828,68 @@ // args[0]: debug event listener function to set or null or undefined for // clearing the event listener function // args[1]: object supplied during callback -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) { +RUNTIME_FUNCTION(Runtime_SetDebugEventListener) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); RUNTIME_ASSERT(args[0]->IsJSFunction() || args[0]->IsUndefined() || args[0]->IsNull()); - Handle<Object> callback = args.at<Object>(0); - Handle<Object> data = args.at<Object>(1); - isolate->debugger()->SetEventListener(callback, data); + CONVERT_ARG_HANDLE_CHECKED(Object, callback, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, data, 1); + isolate->debug()->SetEventListener(callback, data); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) { +RUNTIME_FUNCTION(Runtime_Break) { SealHandleScope shs(isolate); - ASSERT(args.length() == 0); - isolate->stack_guard()->DebugBreak(); + DCHECK(args.length() == 0); + isolate->stack_guard()->RequestDebugBreak(); return isolate->heap()->undefined_value(); } -static MaybeObject* DebugLookupResultValue(Heap* heap, - Object* receiver, - Name* name, - LookupResult* result, - bool* caught_exception) { - Object* value; +static Handle<Object> DebugLookupResultValue(Isolate* isolate, + Handle<Object> receiver, + Handle<Name> name, + LookupResult* result, + bool* has_caught = NULL) { + Handle<Object> value = isolate->factory()->undefined_value(); + if (!result->IsFound()) return value; switch (result->type()) { case NORMAL: - value = result->holder()->GetNormalizedProperty(result); - if (value->IsTheHole()) { - return heap->undefined_value(); - } - return value; - case FIELD: { - Object* value; - MaybeObject* maybe_value = - JSObject::cast(result->holder())->FastPropertyAt( - result->representation(), - result->GetFieldIndex().field_index()); - if (!maybe_value->To(&value)) return maybe_value; - if (value->IsTheHole()) { - return heap->undefined_value(); - } - return value; - } + return JSObject::GetNormalizedProperty(handle(result->holder(), isolate), + result); + case FIELD: + return JSObject::FastPropertyAt(handle(result->holder(), isolate), + result->representation(), + result->GetFieldIndex()); case CONSTANT: - return result->GetConstant(); + return handle(result->GetConstant(), isolate); case CALLBACKS: { - Object* structure = result->GetCallbackObject(); - if (structure->IsForeign() || structure->IsAccessorInfo()) { - Isolate* isolate = heap->isolate(); - HandleScope scope(isolate); - Handle<Object> value = JSObject::GetPropertyWithCallback( - handle(result->holder(), isolate), - handle(receiver, isolate), - handle(structure, isolate), - handle(name, isolate)); - if (value.is_null()) { - MaybeObject* exception = heap->isolate()->pending_exception(); - heap->isolate()->clear_pending_exception(); - if (caught_exception != NULL) *caught_exception = true; - return exception; + Handle<Object> structure(result->GetCallbackObject(), isolate); + DCHECK(!structure->IsForeign()); + if (structure->IsAccessorInfo()) { + MaybeHandle<Object> obj = JSObject::GetPropertyWithAccessor( + receiver, name, handle(result->holder(), isolate), structure); + if (!obj.ToHandle(&value)) { + value = handle(isolate->pending_exception(), isolate); + isolate->clear_pending_exception(); + if (has_caught != NULL) *has_caught = true; + return value; } - return *value; - } else { - return heap->undefined_value(); } + break; } case INTERCEPTOR: - case TRANSITION: - return heap->undefined_value(); case HANDLER: + break; case NONEXISTENT: UNREACHABLE(); - return heap->undefined_value(); + break; } - UNREACHABLE(); // keep the compiler happy - return heap->undefined_value(); + return value; } @@ -10893,10 +10905,10 @@ // 4: Setter function if defined // Items 2-4 are only filled if the property has either a getter or a setter // defined through __defineGetter__ and/or __defineSetter__. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) { +RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); @@ -10908,25 +10920,19 @@ // could have the assumption that its own native context is the current // context and not some internal debugger context. SaveContext save(isolate); - if (isolate->debug()->InDebugger()) { + if (isolate->debug()->in_debug_scope()) { isolate->set_context(*isolate->debug()->debugger_entry()->GetContext()); } - // Skip the global proxy as it has no properties and always delegates to the - // real global object. - if (obj->IsJSGlobalProxy()) { - obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype())); - } - - // Check if the name is trivially convertible to an index and get the element // if so. uint32_t index; if (name->AsArrayIndex(&index)) { Handle<FixedArray> details = isolate->factory()->NewFixedArray(2); - Handle<Object> element_or_char = - Runtime::GetElementOrCharAt(isolate, obj, index); - RETURN_IF_EMPTY_HANDLE(isolate, element_or_char); + Handle<Object> element_or_char; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, element_or_char, + Runtime::GetElementOrCharAt(isolate, obj, index)); details->set(0, *element_or_char); details->set( 1, PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi()); @@ -10934,13 +10940,16 @@ } // Find the number of objects making up this. - int length = LocalPrototypeChainLength(*obj); + int length = OwnPrototypeChainLength(*obj); - // Try local lookup on each of the objects. - Handle<JSObject> jsproto = obj; + // Try own lookup on each of the objects. + PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER); for (int i = 0; i < length; i++) { + DCHECK(!iter.IsAtEnd()); + Handle<JSObject> jsproto = + Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); LookupResult result(isolate); - jsproto->LocalLookup(*name, &result); + jsproto->LookupOwn(name, &result); if (result.IsFound()) { // LookupResult is not GC safe as it holds raw object pointers. // GC can happen later in this code so put the required fields into @@ -10950,66 +10959,55 @@ result_callback_obj = Handle<Object>(result.GetCallbackObject(), isolate); } - Smi* property_details = result.GetPropertyDetails().AsSmi(); - // DebugLookupResultValue can cause GC so details from LookupResult needs - // to be copied to handles before this. - bool caught_exception = false; - Object* raw_value; - { MaybeObject* maybe_raw_value = - DebugLookupResultValue(isolate->heap(), *obj, *name, - &result, &caught_exception); - if (!maybe_raw_value->ToObject(&raw_value)) return maybe_raw_value; - } - Handle<Object> value(raw_value, isolate); + + + bool has_caught = false; + Handle<Object> value = DebugLookupResultValue( + isolate, obj, name, &result, &has_caught); // If the callback object is a fixed array then it contains JavaScript // getter and/or setter. - bool hasJavaScriptAccessors = result.IsPropertyCallbacks() && - result_callback_obj->IsAccessorPair(); + bool has_js_accessors = result.IsPropertyCallbacks() && + result_callback_obj->IsAccessorPair(); Handle<FixedArray> details = - isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2); + isolate->factory()->NewFixedArray(has_js_accessors ? 5 : 2); details->set(0, *value); - details->set(1, property_details); - if (hasJavaScriptAccessors) { + details->set(1, result.GetPropertyDetails().AsSmi()); + if (has_js_accessors) { AccessorPair* accessors = AccessorPair::cast(*result_callback_obj); - details->set(2, isolate->heap()->ToBoolean(caught_exception)); + details->set(2, isolate->heap()->ToBoolean(has_caught)); details->set(3, accessors->GetComponent(ACCESSOR_GETTER)); details->set(4, accessors->GetComponent(ACCESSOR_SETTER)); } return *isolate->factory()->NewJSArrayWithElements(details); } - if (i < length - 1) { - jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype())); - } + iter.Advance(); } return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) { +RUNTIME_FUNCTION(Runtime_DebugGetProperty) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); LookupResult result(isolate); - obj->Lookup(*name, &result); - if (result.IsFound()) { - return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL); - } - return isolate->heap()->undefined_value(); + obj->Lookup(name, &result); + return *DebugLookupResultValue(isolate, obj, name, &result); } // Return the property type calculated from the property details. // args[0]: smi with property details. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) { +RUNTIME_FUNCTION(Runtime_DebugPropertyTypeFromDetails) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_PROPERTY_DETAILS_CHECKED(details, 0); return Smi::FromInt(static_cast<int>(details.type())); } @@ -11017,9 +11015,9 @@ // Return the property attribute calculated from the property details. // args[0]: smi with property details. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) { +RUNTIME_FUNCTION(Runtime_DebugPropertyAttributesFromDetails) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_PROPERTY_DETAILS_CHECKED(details, 0); return Smi::FromInt(static_cast<int>(details.attributes())); } @@ -11027,9 +11025,9 @@ // Return the property insertion index calculated from the property details. // args[0]: smi with property details. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) { +RUNTIME_FUNCTION(Runtime_DebugPropertyIndexFromDetails) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_PROPERTY_DETAILS_CHECKED(details, 0); // TODO(verwaest): Depends on the type of details. return Smi::FromInt(details.dictionary_index()); @@ -11039,17 +11037,16 @@ // Return property value from named interceptor. // args[0]: object // args[1]: property name -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) { +RUNTIME_FUNCTION(Runtime_DebugNamedInterceptorPropertyValue) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); RUNTIME_ASSERT(obj->HasNamedInterceptor()); CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); - PropertyAttributes attributes; - Handle<Object> result = - JSObject::GetPropertyWithInterceptor(obj, obj, name, &attributes); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, JSObject::GetProperty(obj, name)); return *result; } @@ -11057,43 +11054,40 @@ // Return element value from indexed interceptor. // args[0]: object // args[1]: index -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) { +RUNTIME_FUNCTION(Runtime_DebugIndexedInterceptorElementValue) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); RUNTIME_ASSERT(obj->HasIndexedInterceptor()); CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]); - Handle<Object> result = JSObject::GetElementWithInterceptor(obj, obj, index); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, JSObject::GetElementWithInterceptor(obj, obj, index)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) { +static bool CheckExecutionState(Isolate* isolate, int break_id) { + return !isolate->debug()->debug_context().is_null() && + isolate->debug()->break_id() != 0 && + isolate->debug()->break_id() == break_id; +} + + +RUNTIME_FUNCTION(Runtime_CheckExecutionState) { SealHandleScope shs(isolate); - ASSERT(args.length() >= 1); + DCHECK(args.length() == 1); CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); - // Check that the break id is valid. - if (isolate->debug()->break_id() == 0 || - break_id != isolate->debug()->break_id()) { - return isolate->Throw( - isolate->heap()->illegal_execution_state_string()); - } - + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); return isolate->heap()->true_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) { +RUNTIME_FUNCTION(Runtime_GetFrameCount) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - - // Check arguments. - Object* result; - { MaybeObject* maybe_result = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + DCHECK(args.length() == 1); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); // Count all frames which are relevant to debugging stack trace. int n = 0; @@ -11104,7 +11098,12 @@ } for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) { - n += it.frame()->GetInlineCount(); + List<FrameSummary> frames(FLAG_max_inlining_levels + 1); + it.frame()->Summarize(&frames); + for (int i = frames.length() - 1; i >= 0; i--) { + // Omit functions from native scripts. + if (!frames[i].function()->IsFromNativeScript()) n++; + } } return Smi::FromInt(n); } @@ -11169,10 +11168,10 @@ // To inspect all the provided arguments the frame might need to be // replaced with the arguments frame. void SetArgumentsFrame(JavaScriptFrame* frame) { - ASSERT(has_adapted_arguments_); + DCHECK(has_adapted_arguments_); frame_ = frame; is_optimized_ = frame_->is_optimized(); - ASSERT(!is_optimized_); + DCHECK(!is_optimized_); } private: @@ -11205,11 +11204,37 @@ while (save != NULL && !save->IsBelowFrame(frame)) { save = save->prev(); } - ASSERT(save != NULL); + DCHECK(save != NULL); return save; } +RUNTIME_FUNCTION(Runtime_IsOptimized) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 0); + JavaScriptFrameIterator it(isolate); + JavaScriptFrame* frame = it.frame(); + return isolate->heap()->ToBoolean(frame->is_optimized()); +} + + +// Advances the iterator to the frame that matches the index and returns the +// inlined frame index, or -1 if not found. Skips native JS functions. +static int FindIndexedNonNativeFrame(JavaScriptFrameIterator* it, int index) { + int count = -1; + for (; !it->done(); it->Advance()) { + List<FrameSummary> frames(FLAG_max_inlining_levels + 1); + it->frame()->Summarize(&frames); + for (int i = frames.length() - 1; i >= 0; i--) { + // Omit functions from native scripts. + if (frames[i].function()->IsFromNativeScript()) continue; + if (++count == index) return i; + } + } + return -1; +} + + // Return an array with frame details // args[0]: number: break id // args[1]: number: frame index @@ -11227,16 +11252,12 @@ // Arguments name, value // Locals name, value // Return value if any -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { +RUNTIME_FUNCTION(Runtime_GetFrameDetails) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); - // Check arguments. - Object* check; - { MaybeObject* maybe_check = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_check->ToObject(&check)) return maybe_check; - } CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]); Heap* heap = isolate->heap(); @@ -11247,22 +11268,13 @@ return heap->undefined_value(); } - int count = 0; JavaScriptFrameIterator it(isolate, id); - for (; !it.done(); it.Advance()) { - if (index < count + it.frame()->GetInlineCount()) break; - count += it.frame()->GetInlineCount(); - } - if (it.done()) return heap->undefined_value(); + // Inlined frame index in optimized frame, starting from outer function. + int inlined_jsframe_index = FindIndexedNonNativeFrame(&it, index); + if (inlined_jsframe_index == -1) return heap->undefined_value(); - bool is_optimized = it.frame()->is_optimized(); - - int inlined_jsframe_index = 0; // Inlined frame index in optimized frame. - if (is_optimized) { - inlined_jsframe_index = - it.frame()->GetInlineCount() - (index - count) - 1; - } FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate); + bool is_optimized = it.frame()->is_optimized(); // Traverse the saved contexts chain to find the active context for the // selected frame. @@ -11281,36 +11293,48 @@ Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction())); Handle<SharedFunctionInfo> shared(function->shared()); Handle<ScopeInfo> scope_info(shared->scope_info()); - ASSERT(*scope_info != ScopeInfo::Empty(isolate)); + DCHECK(*scope_info != ScopeInfo::Empty(isolate)); // Get the locals names and values into a temporary array. - // - // TODO(1240907): Hide compiler-introduced stack variables - // (e.g. .result)? For users of the debugger, they will probably be - // confusing. + int local_count = scope_info->LocalCount(); + for (int slot = 0; slot < scope_info->LocalCount(); ++slot) { + // Hide compiler-introduced temporary variables, whether on the stack or on + // the context. + if (scope_info->LocalIsSynthetic(slot)) + local_count--; + } + Handle<FixedArray> locals = - isolate->factory()->NewFixedArray(scope_info->LocalCount() * 2); + isolate->factory()->NewFixedArray(local_count * 2); // Fill in the values of the locals. + int local = 0; int i = 0; for (; i < scope_info->StackLocalCount(); ++i) { // Use the value from the stack. - locals->set(i * 2, scope_info->LocalName(i)); - locals->set(i * 2 + 1, frame_inspector.GetExpression(i)); + if (scope_info->LocalIsSynthetic(i)) + continue; + locals->set(local * 2, scope_info->LocalName(i)); + locals->set(local * 2 + 1, frame_inspector.GetExpression(i)); + local++; } - if (i < scope_info->LocalCount()) { + if (local < local_count) { // Get the context containing declarations. Handle<Context> context( Context::cast(it.frame()->context())->declaration_context()); for (; i < scope_info->LocalCount(); ++i) { + if (scope_info->LocalIsSynthetic(i)) + continue; Handle<String> name(scope_info->LocalName(i)); VariableMode mode; InitializationFlag init_flag; - locals->set(i * 2, *name); - int context_slot_index = - scope_info->ContextSlotIndex(*name, &mode, &init_flag); + MaybeAssignedFlag maybe_assigned_flag; + locals->set(local * 2, *name); + int context_slot_index = ScopeInfo::ContextSlotIndex( + scope_info, name, &mode, &init_flag, &maybe_assigned_flag); Object* value = context->get(context_slot_index); - locals->set(i * 2 + 1, value); + locals->set(local * 2 + 1, value); + local++; } } @@ -11371,7 +11395,7 @@ // Calculate the size of the result. int details_size = kFrameDetailsFirstDynamicIndex + - 2 * (argument_count + scope_info->LocalCount()) + + 2 * (argument_count + local_count) + (at_return ? 1 : 0); Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size); @@ -11386,7 +11410,7 @@ // Add the locals count details->set(kFrameDetailsLocalCountIndex, - Smi::FromInt(scope_info->LocalCount())); + Smi::FromInt(local_count)); // Add the source position. if (position != RelocInfo::kNoPosition) { @@ -11437,7 +11461,7 @@ } // Add locals name and value from the temporary copy from the function frame. - for (int i = 0; i < scope_info->LocalCount() * 2; i++) { + for (int i = 0; i < local_count * 2; i++) { details->set(details_index++, locals->get(i)); } @@ -11461,33 +11485,36 @@ // native context. it.Advance(); if (receiver->IsUndefined()) { - Context* context = function->context(); - receiver = handle(context->global_object()->global_receiver()); + receiver = handle(function->global_proxy()); } else { - ASSERT(!receiver->IsNull()); + DCHECK(!receiver->IsNull()); Context* context = Context::cast(it.frame()->context()); Handle<Context> native_context(Context::cast(context->native_context())); - receiver = isolate->factory()->ToObject(receiver, native_context); + receiver = Object::ToObject( + isolate, receiver, native_context).ToHandleChecked(); } } details->set(kFrameDetailsReceiverIndex, *receiver); - ASSERT_EQ(details_size, details_index); + DCHECK_EQ(details_size, details_index); return *isolate->factory()->NewJSArrayWithElements(details); } static bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info, - int index) { + Handle<String> parameter_name) { VariableMode mode; - InitializationFlag flag; - return info->ContextSlotIndex(info->ParameterName(index), &mode, &flag) != -1; + InitializationFlag init_flag; + MaybeAssignedFlag maybe_assigned_flag; + return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &init_flag, + &maybe_assigned_flag) != -1; } // Create a plain JSObject which materializes the local scope for the specified // frame. -static Handle<JSObject> MaterializeStackLocalsWithFrameInspector( +MUST_USE_RESULT +static MaybeHandle<JSObject> MaterializeStackLocalsWithFrameInspector( Isolate* isolate, Handle<JSObject> target, Handle<JSFunction> function, @@ -11498,32 +11525,33 @@ // First fill all parameters. for (int i = 0; i < scope_info->ParameterCount(); ++i) { // Do not materialize the parameter if it is shadowed by a context local. - if (ParameterIsShadowedByContextLocal(scope_info, i)) continue; + Handle<String> name(scope_info->ParameterName(i)); + if (ParameterIsShadowedByContextLocal(scope_info, name)) continue; HandleScope scope(isolate); Handle<Object> value(i < frame_inspector->GetParametersCount() ? frame_inspector->GetParameter(i) : isolate->heap()->undefined_value(), isolate); - ASSERT(!value->IsTheHole()); - Handle<String> name(scope_info->ParameterName(i)); + DCHECK(!value->IsTheHole()); - RETURN_IF_EMPTY_HANDLE_VALUE( + RETURN_ON_EXCEPTION( isolate, - Runtime::SetObjectProperty(isolate, target, name, value, NONE, SLOPPY), - Handle<JSObject>()); + Runtime::SetObjectProperty(isolate, target, name, value, SLOPPY), + JSObject); } // Second fill all stack locals. for (int i = 0; i < scope_info->StackLocalCount(); ++i) { + if (scope_info->LocalIsSynthetic(i)) continue; Handle<String> name(scope_info->StackLocalName(i)); Handle<Object> value(frame_inspector->GetExpression(i), isolate); if (value->IsTheHole()) continue; - RETURN_IF_EMPTY_HANDLE_VALUE( + RETURN_ON_EXCEPTION( isolate, - Runtime::SetObjectProperty(isolate, target, name, value, NONE, SLOPPY), - Handle<JSObject>()); + Runtime::SetObjectProperty(isolate, target, name, value, SLOPPY), + JSObject); } return target; @@ -11548,30 +11576,34 @@ // Parameters. for (int i = 0; i < scope_info->ParameterCount(); ++i) { // Shadowed parameters were not materialized. - if (ParameterIsShadowedByContextLocal(scope_info, i)) continue; + Handle<String> name(scope_info->ParameterName(i)); + if (ParameterIsShadowedByContextLocal(scope_info, name)) continue; - ASSERT(!frame->GetParameter(i)->IsTheHole()); + DCHECK(!frame->GetParameter(i)->IsTheHole()); HandleScope scope(isolate); - Handle<String> name(scope_info->ParameterName(i)); - Handle<Object> value = GetProperty(isolate, target, name); + Handle<Object> value = + Object::GetPropertyOrElement(target, name).ToHandleChecked(); frame->SetParameterValue(i, *value); } // Stack locals. for (int i = 0; i < scope_info->StackLocalCount(); ++i) { + if (scope_info->LocalIsSynthetic(i)) continue; if (frame->GetExpression(i)->IsTheHole()) continue; HandleScope scope(isolate); - Handle<Object> value = GetProperty( - isolate, target, Handle<String>(scope_info->StackLocalName(i))); + Handle<Object> value = Object::GetPropertyOrElement( + target, + handle(scope_info->StackLocalName(i), isolate)).ToHandleChecked(); frame->SetExpression(i, *value); } } -static Handle<JSObject> MaterializeLocalContext(Isolate* isolate, - Handle<JSObject> target, - Handle<JSFunction> function, - JavaScriptFrame* frame) { +MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalContext( + Isolate* isolate, + Handle<JSObject> target, + Handle<JSFunction> function, + JavaScriptFrame* frame) { HandleScope scope(isolate); Handle<SharedFunctionInfo> shared(function->shared()); Handle<ScopeInfo> scope_info(shared->scope_info()); @@ -11583,7 +11615,7 @@ Handle<Context> function_context(frame_context->declaration_context()); if (!ScopeInfo::CopyContextLocalsToScopeObject( scope_info, function_context, target)) { - return Handle<JSObject>(); + return MaybeHandle<JSObject>(); } // Finally copy any properties from the function context extension. @@ -11592,24 +11624,23 @@ if (function_context->has_extension() && !function_context->IsNativeContext()) { Handle<JSObject> ext(JSObject::cast(function_context->extension())); - bool threw = false; - Handle<FixedArray> keys = - GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw); - if (threw) return Handle<JSObject>(); + Handle<FixedArray> keys; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, keys, + JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS), + JSObject); for (int i = 0; i < keys->length(); i++) { // Names of variables introduced by eval are strings. - ASSERT(keys->get(i)->IsString()); + DCHECK(keys->get(i)->IsString()); Handle<String> key(String::cast(keys->get(i))); - RETURN_IF_EMPTY_HANDLE_VALUE( + Handle<Object> value; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, value, Object::GetPropertyOrElement(ext, key), JSObject); + RETURN_ON_EXCEPTION( isolate, - Runtime::SetObjectProperty(isolate, - target, - key, - GetProperty(isolate, ext, key), - NONE, - SLOPPY), - Handle<JSObject>()); + Runtime::SetObjectProperty(isolate, target, key, value, SLOPPY), + JSObject); } } } @@ -11618,7 +11649,7 @@ } -static Handle<JSObject> MaterializeLocalScope( +MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalScope( Isolate* isolate, JavaScriptFrame* frame, int inlined_jsframe_index) { @@ -11627,9 +11658,11 @@ Handle<JSObject> local_scope = isolate->factory()->NewJSObject(isolate->object_function()); - local_scope = MaterializeStackLocalsWithFrameInspector( - isolate, local_scope, function, &frame_inspector); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, local_scope, Handle<JSObject>()); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, local_scope, + MaterializeStackLocalsWithFrameInspector( + isolate, local_scope, function, &frame_inspector), + JSObject); return MaterializeLocalContext(isolate, local_scope, function, frame); } @@ -11643,11 +11676,12 @@ Handle<Object> new_value) { for (int i = 0; i < scope_info->ContextLocalCount(); i++) { Handle<String> next_name(scope_info->ContextLocalName(i)); - if (variable_name->Equals(*next_name)) { + if (String::Equals(variable_name, next_name)) { VariableMode mode; InitializationFlag init_flag; - int context_index = - scope_info->ContextSlotIndex(*next_name, &mode, &init_flag); + MaybeAssignedFlag maybe_assigned_flag; + int context_index = ScopeInfo::ContextSlotIndex( + scope_info, next_name, &mode, &init_flag, &maybe_assigned_flag); context->set(context_index, *new_value); return true; } @@ -11675,7 +11709,8 @@ // Parameters. for (int i = 0; i < scope_info->ParameterCount(); ++i) { - if (scope_info->ParameterName(i)->Equals(*variable_name)) { + HandleScope scope(isolate); + if (String::Equals(handle(scope_info->ParameterName(i)), variable_name)) { frame->SetParameterValue(i, *new_value); // Argument might be shadowed in heap context, don't stop here. default_result = true; @@ -11684,7 +11719,8 @@ // Stack locals. for (int i = 0; i < scope_info->StackLocalCount(); ++i) { - if (scope_info->StackLocalName(i)->Equals(*variable_name)) { + HandleScope scope(isolate); + if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) { frame->SetExpression(i, *new_value); return true; } @@ -11705,11 +11741,13 @@ !function_context->IsNativeContext()) { Handle<JSObject> ext(JSObject::cast(function_context->extension())); - if (JSReceiver::HasProperty(ext, variable_name)) { + Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name); + DCHECK(maybe.has_value); + if (maybe.value) { // We don't expect this to do anything except replacing // property value. Runtime::SetObjectProperty(isolate, ext, variable_name, new_value, - NONE, SLOPPY); + SLOPPY).Assert(); return true; } } @@ -11722,9 +11760,10 @@ // Create a plain JSObject which materializes the closure content for the // context. -static Handle<JSObject> MaterializeClosure(Isolate* isolate, - Handle<Context> context) { - ASSERT(context->IsFunctionContext()); +MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeClosure( + Isolate* isolate, + Handle<Context> context) { + DCHECK(context->IsFunctionContext()); Handle<SharedFunctionInfo> shared(context->closure()->shared()); Handle<ScopeInfo> scope_info(shared->scope_info()); @@ -11737,28 +11776,30 @@ // Fill all context locals to the context extension. if (!ScopeInfo::CopyContextLocalsToScopeObject( scope_info, context, closure_scope)) { - return Handle<JSObject>(); + return MaybeHandle<JSObject>(); } // Finally copy any properties from the function context extension. This will // be variables introduced by eval. if (context->has_extension()) { Handle<JSObject> ext(JSObject::cast(context->extension())); - bool threw = false; - Handle<FixedArray> keys = - GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw); - if (threw) return Handle<JSObject>(); + Handle<FixedArray> keys; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, keys, + JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS), JSObject); for (int i = 0; i < keys->length(); i++) { + HandleScope scope(isolate); // Names of variables introduced by eval are strings. - ASSERT(keys->get(i)->IsString()); + DCHECK(keys->get(i)->IsString()); Handle<String> key(String::cast(keys->get(i))); - RETURN_IF_EMPTY_HANDLE_VALUE( + Handle<Object> value; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, value, Object::GetPropertyOrElement(ext, key), JSObject); + RETURN_ON_EXCEPTION( isolate, - Runtime::SetObjectProperty(isolate, closure_scope, key, - GetProperty(isolate, ext, key), - NONE, SLOPPY), - Handle<JSObject>()); + Runtime::DefineObjectProperty(closure_scope, key, value, NONE), + JSObject); } } @@ -11771,7 +11812,7 @@ Handle<Context> context, Handle<String> variable_name, Handle<Object> new_value) { - ASSERT(context->IsFunctionContext()); + DCHECK(context->IsFunctionContext()); Handle<SharedFunctionInfo> shared(context->closure()->shared()); Handle<ScopeInfo> scope_info(shared->scope_info()); @@ -11786,10 +11827,12 @@ // be variables introduced by eval. if (context->has_extension()) { Handle<JSObject> ext(JSObject::cast(context->extension())); - if (JSReceiver::HasProperty(ext, variable_name)) { + Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name); + DCHECK(maybe.has_value); + if (maybe.value) { // We don't expect this to do anything except replacing property value. - Runtime::SetObjectProperty(isolate, ext, variable_name, new_value, - NONE, SLOPPY); + Runtime::DefineObjectProperty( + ext, variable_name, new_value, NONE).Assert(); return true; } } @@ -11800,19 +11843,19 @@ // Create a plain JSObject which materializes the scope for the specified // catch context. -static Handle<JSObject> MaterializeCatchScope(Isolate* isolate, - Handle<Context> context) { - ASSERT(context->IsCatchContext()); +MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeCatchScope( + Isolate* isolate, + Handle<Context> context) { + DCHECK(context->IsCatchContext()); Handle<String> name(String::cast(context->extension())); Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX), isolate); Handle<JSObject> catch_scope = isolate->factory()->NewJSObject(isolate->object_function()); - RETURN_IF_EMPTY_HANDLE_VALUE( + RETURN_ON_EXCEPTION( isolate, - Runtime::SetObjectProperty(isolate, catch_scope, name, thrown_object, - NONE, SLOPPY), - Handle<JSObject>()); + Runtime::DefineObjectProperty(catch_scope, name, thrown_object, NONE), + JSObject); return catch_scope; } @@ -11821,9 +11864,9 @@ Handle<Context> context, Handle<String> variable_name, Handle<Object> new_value) { - ASSERT(context->IsCatchContext()); + DCHECK(context->IsCatchContext()); Handle<String> name(String::cast(context->extension())); - if (!name->Equals(*variable_name)) { + if (!String::Equals(name, variable_name)) { return false; } context->set(Context::THROWN_OBJECT_INDEX, *new_value); @@ -11833,10 +11876,10 @@ // Create a plain JSObject which materializes the block scope for the specified // block context. -static Handle<JSObject> MaterializeBlockScope( +MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeBlockScope( Isolate* isolate, Handle<Context> context) { - ASSERT(context->IsBlockContext()); + DCHECK(context->IsBlockContext()); Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension())); // Allocate and initialize a JSObject with all the arguments, stack locals @@ -11847,7 +11890,7 @@ // Fill all context locals. if (!ScopeInfo::CopyContextLocalsToScopeObject( scope_info, context, block_scope)) { - return Handle<JSObject>(); + return MaybeHandle<JSObject>(); } return block_scope; @@ -11856,10 +11899,10 @@ // Create a plain JSObject which materializes the module scope for the specified // module context. -static Handle<JSObject> MaterializeModuleScope( +MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeModuleScope( Isolate* isolate, Handle<Context> context) { - ASSERT(context->IsModuleContext()); + DCHECK(context->IsModuleContext()); Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension())); // Allocate and initialize a JSObject with all the members of the debugged @@ -11870,7 +11913,7 @@ // Fill all context locals. if (!ScopeInfo::CopyContextLocalsToScopeObject( scope_info, context, module_scope)) { - return Handle<JSObject>(); + return MaybeHandle<JSObject>(); } return module_scope; @@ -11968,7 +12011,7 @@ if (scope_info->scope_type() == GLOBAL_SCOPE) { info.MarkAsGlobal(); } else { - ASSERT(scope_info->scope_type() == EVAL_SCOPE); + DCHECK(scope_info->scope_type() == EVAL_SCOPE); info.MarkAsEval(); info.SetContext(Handle<Context>(function_->context())); } @@ -12002,7 +12045,7 @@ // More scopes? bool Done() { - ASSERT(!failed_); + DCHECK(!failed_); return context_.is_null(); } @@ -12010,11 +12053,11 @@ // Move to the next scope. void Next() { - ASSERT(!failed_); + DCHECK(!failed_); ScopeType scope_type = Type(); if (scope_type == ScopeTypeGlobal) { // The global scope is always the last in the chain. - ASSERT(context_->IsNativeContext()); + DCHECK(context_->IsNativeContext()); context_ = Handle<Context>(); return; } @@ -12022,7 +12065,7 @@ context_ = Handle<Context>(context_->previous(), isolate_); } else { if (nested_scope_chain_.last()->HasContext()) { - ASSERT(context_->previous() != NULL); + DCHECK(context_->previous() != NULL); context_ = Handle<Context>(context_->previous(), isolate_); } nested_scope_chain_.RemoveLast(); @@ -12031,28 +12074,28 @@ // Return the type of the current scope. ScopeType Type() { - ASSERT(!failed_); + DCHECK(!failed_); if (!nested_scope_chain_.is_empty()) { Handle<ScopeInfo> scope_info = nested_scope_chain_.last(); switch (scope_info->scope_type()) { case FUNCTION_SCOPE: - ASSERT(context_->IsFunctionContext() || + DCHECK(context_->IsFunctionContext() || !scope_info->HasContext()); return ScopeTypeLocal; case MODULE_SCOPE: - ASSERT(context_->IsModuleContext()); + DCHECK(context_->IsModuleContext()); return ScopeTypeModule; case GLOBAL_SCOPE: - ASSERT(context_->IsNativeContext()); + DCHECK(context_->IsNativeContext()); return ScopeTypeGlobal; case WITH_SCOPE: - ASSERT(context_->IsWithContext()); + DCHECK(context_->IsWithContext()); return ScopeTypeWith; case CATCH_SCOPE: - ASSERT(context_->IsCatchContext()); + DCHECK(context_->IsCatchContext()); return ScopeTypeCatch; case BLOCK_SCOPE: - ASSERT(!scope_info->HasContext() || + DCHECK(!scope_info->HasContext() || context_->IsBlockContext()); return ScopeTypeBlock; case EVAL_SCOPE: @@ -12060,7 +12103,7 @@ } } if (context_->IsNativeContext()) { - ASSERT(context_->global_object()->IsGlobalObject()); + DCHECK(context_->global_object()->IsGlobalObject()); return ScopeTypeGlobal; } if (context_->IsFunctionContext()) { @@ -12075,19 +12118,19 @@ if (context_->IsModuleContext()) { return ScopeTypeModule; } - ASSERT(context_->IsWithContext()); + DCHECK(context_->IsWithContext()); return ScopeTypeWith; } // Return the JavaScript object with the content of the current scope. - Handle<JSObject> ScopeObject() { - ASSERT(!failed_); + MaybeHandle<JSObject> ScopeObject() { + DCHECK(!failed_); switch (Type()) { case ScopeIterator::ScopeTypeGlobal: return Handle<JSObject>(CurrentContext()->global_object()); case ScopeIterator::ScopeTypeLocal: // Materialize the content of the local scope into a JSObject. - ASSERT(nested_scope_chain_.length() == 1); + DCHECK(nested_scope_chain_.length() == 1); return MaterializeLocalScope(isolate_, frame_, inlined_jsframe_index_); case ScopeIterator::ScopeTypeWith: // Return the with object. @@ -12108,7 +12151,7 @@ bool SetVariableValue(Handle<String> variable_name, Handle<Object> new_value) { - ASSERT(!failed_); + DCHECK(!failed_); switch (Type()) { case ScopeIterator::ScopeTypeGlobal: break; @@ -12134,7 +12177,7 @@ } Handle<ScopeInfo> CurrentScopeInfo() { - ASSERT(!failed_); + DCHECK(!failed_); if (!nested_scope_chain_.is_empty()) { return nested_scope_chain_.last(); } else if (context_->IsBlockContext()) { @@ -12148,7 +12191,7 @@ // Return the context for this scope. For the local context there might not // be an actual context. Handle<Context> CurrentContext() { - ASSERT(!failed_); + DCHECK(!failed_); if (Type() == ScopeTypeGlobal || nested_scope_chain_.is_empty()) { return context_; @@ -12162,22 +12205,23 @@ #ifdef DEBUG // Debug print of the content of the current scope. void DebugPrint() { - ASSERT(!failed_); + OFStream os(stdout); + DCHECK(!failed_); switch (Type()) { case ScopeIterator::ScopeTypeGlobal: - PrintF("Global:\n"); - CurrentContext()->Print(); + os << "Global:\n"; + CurrentContext()->Print(os); break; case ScopeIterator::ScopeTypeLocal: { - PrintF("Local:\n"); + os << "Local:\n"; function_->shared()->scope_info()->Print(); if (!CurrentContext().is_null()) { - CurrentContext()->Print(); + CurrentContext()->Print(os); if (CurrentContext()->has_extension()) { Handle<Object> extension(CurrentContext()->extension(), isolate_); if (extension->IsJSContextExtensionObject()) { - extension->Print(); + extension->Print(os); } } } @@ -12185,23 +12229,23 @@ } case ScopeIterator::ScopeTypeWith: - PrintF("With:\n"); - CurrentContext()->extension()->Print(); + os << "With:\n"; + CurrentContext()->extension()->Print(os); break; case ScopeIterator::ScopeTypeCatch: - PrintF("Catch:\n"); - CurrentContext()->extension()->Print(); - CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print(); + os << "Catch:\n"; + CurrentContext()->extension()->Print(os); + CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print(os); break; case ScopeIterator::ScopeTypeClosure: - PrintF("Closure:\n"); - CurrentContext()->Print(); + os << "Closure:\n"; + CurrentContext()->Print(os); if (CurrentContext()->has_extension()) { Handle<Object> extension(CurrentContext()->extension(), isolate_); if (extension->IsJSContextExtensionObject()) { - extension->Print(); + extension->Print(os); } } break; @@ -12234,7 +12278,7 @@ // information we get from the context chain but nothing about // completely stack allocated scopes or stack allocated locals. // Or it could be due to stack overflow. - ASSERT(isolate_->has_pending_exception()); + DCHECK(isolate_->has_pending_exception()); failed_ = true; } } @@ -12243,16 +12287,12 @@ }; -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) { +RUNTIME_FUNCTION(Runtime_GetScopeCount) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); - // Check arguments. - Object* check; - { MaybeObject* maybe_check = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_check->ToObject(&check)) return maybe_check; - } CONVERT_SMI_ARG_CHECKED(wrapped_id, 1); // Get the frame where the debugging is performed. @@ -12275,16 +12315,12 @@ // Returns the list of step-in positions (text offset) in a function of the // stack frame in a range from the current debug break position to the end // of the corresponding statement. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) { +RUNTIME_FUNCTION(Runtime_GetStepInPositions) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); - // Check arguments. - Object* check; - { MaybeObject* maybe_check = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_check->ToObject(&check)) return maybe_check; - } CONVERT_SMI_ARG_CHECKED(wrapped_id, 1); // Get the frame where the debugging is performed. @@ -12335,9 +12371,11 @@ if (accept) { if (break_location_iterator.IsStepInLocation(isolate)) { Smi* position_value = Smi::FromInt(break_location_iterator.position()); - JSObject::SetElement(array, len, - Handle<Object>(position_value, isolate), - NONE, SLOPPY); + RETURN_FAILURE_ON_EXCEPTION( + isolate, + JSObject::SetElement(array, len, + Handle<Object>(position_value, isolate), + NONE, SLOPPY)); len++; } } @@ -12357,7 +12395,8 @@ static const int kScopeDetailsSize = 2; -static Handle<JSObject> MaterializeScopeDetails(Isolate* isolate, +MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeScopeDetails( + Isolate* isolate, ScopeIterator* it) { // Calculate the size of the result. int details_size = kScopeDetailsSize; @@ -12365,8 +12404,9 @@ // Fill in scope details. details->set(kScopeDetailsTypeIndex, Smi::FromInt(it->Type())); - Handle<JSObject> scope_object = it->ScopeObject(); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, scope_object, Handle<JSObject>()); + Handle<JSObject> scope_object; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, scope_object, it->ScopeObject(), JSObject); details->set(kScopeDetailsObjectIndex, *scope_object); return isolate->factory()->NewJSArrayWithElements(details); @@ -12382,16 +12422,12 @@ // The array returned contains the following information: // 0: Scope type // 1: Scope object -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) { +RUNTIME_FUNCTION(Runtime_GetScopeDetails) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); - // Check arguments. - Object* check; - { MaybeObject* maybe_check = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_check->ToObject(&check)) return maybe_check; - } CONVERT_SMI_ARG_CHECKED(wrapped_id, 1); CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]); CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]); @@ -12410,8 +12446,9 @@ if (it.Done()) { return isolate->heap()->undefined_value(); } - Handle<JSObject> details = MaterializeScopeDetails(isolate, &it); - RETURN_IF_EMPTY_HANDLE(isolate, details); + Handle<JSObject> details; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, details, MaterializeScopeDetails(isolate, &it)); return *details; } @@ -12425,16 +12462,12 @@ // The array returned contains arrays with the following information: // 0: Scope type // 1: Scope object -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetAllScopesDetails) { +RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) { HandleScope scope(isolate); - ASSERT(args.length() == 3 || args.length() == 4); + DCHECK(args.length() == 3 || args.length() == 4); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); - // Check arguments. - Object* check; - { MaybeObject* maybe_check = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_check->ToObject(&check)) return maybe_check; - } CONVERT_SMI_ARG_CHECKED(wrapped_id, 1); CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]); @@ -12452,8 +12485,9 @@ List<Handle<JSObject> > result(4); ScopeIterator it(isolate, frame, inlined_jsframe_index, ignore_nested_scopes); for (; !it.Done(); it.Next()) { - Handle<JSObject> details = MaterializeScopeDetails(isolate, &it); - RETURN_IF_EMPTY_HANDLE(isolate, details); + Handle<JSObject> details; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, details, MaterializeScopeDetails(isolate, &it)); result.Add(details); } @@ -12465,9 +12499,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeCount) { +RUNTIME_FUNCTION(Runtime_GetFunctionScopeCount) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); // Check arguments. CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0); @@ -12482,9 +12516,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeDetails) { +RUNTIME_FUNCTION(Runtime_GetFunctionScopeDetails) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); // Check arguments. CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0); @@ -12500,8 +12534,9 @@ return isolate->heap()->undefined_value(); } - Handle<JSObject> details = MaterializeScopeDetails(isolate, &it); - RETURN_IF_EMPTY_HANDLE(isolate, details); + Handle<JSObject> details; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, details, MaterializeScopeDetails(isolate, &it)); return *details; } @@ -12528,22 +12563,20 @@ // args[5]: object: new value // // Return true if success and false otherwise -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScopeVariableValue) { +RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) { HandleScope scope(isolate); - ASSERT(args.length() == 6); + DCHECK(args.length() == 6); // Check arguments. CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]); CONVERT_ARG_HANDLE_CHECKED(String, variable_name, 4); - Handle<Object> new_value = args.at<Object>(5); + CONVERT_ARG_HANDLE_CHECKED(Object, new_value, 5); bool res; if (args[0]->IsNumber()) { - Object* check; - { MaybeObject* maybe_check = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_check->ToObject(&check)) return maybe_check; - } + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); + CONVERT_SMI_ARG_CHECKED(wrapped_id, 1); CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]); @@ -12564,9 +12597,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) { +RUNTIME_FUNCTION(Runtime_DebugPrintScopes) { HandleScope scope(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); #ifdef DEBUG // Print the scopes for the top frame. @@ -12582,16 +12615,11 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadCount) { +RUNTIME_FUNCTION(Runtime_GetThreadCount) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - - // Check arguments. - Object* result; - { MaybeObject* maybe_result = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + DCHECK(args.length() == 1); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); // Count all archived V8 threads. int n = 0; @@ -12618,16 +12646,12 @@ // The array returned contains the following information: // 0: Is current thread? // 1: Thread id -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadDetails) { +RUNTIME_FUNCTION(Runtime_GetThreadDetails) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); - // Check arguments. - Object* check; - { MaybeObject* maybe_check = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_check->ToObject(&check)) return maybe_check; - } CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]); // Allocate array for result. @@ -12668,9 +12692,9 @@ // Sets the disable break state // args[0]: disable break state -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) { +RUNTIME_FUNCTION(Runtime_SetDisableBreak) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 0); isolate->debug()->set_disable_break(disable_break); return isolate->heap()->undefined_value(); @@ -12682,9 +12706,9 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) { +RUNTIME_FUNCTION(Runtime_GetBreakLocations) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0); CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[1]); @@ -12710,17 +12734,18 @@ // args[0]: function // args[1]: number: break source position (within the function source) // args[2]: number: break point object -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) { +RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]); - RUNTIME_ASSERT(source_position >= 0); - Handle<Object> break_point_object_arg = args.at<Object>(2); + RUNTIME_ASSERT(source_position >= function->shared()->start_position() && + source_position <= function->shared()->end_position()); + CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 2); // Set break point. - isolate->debug()->SetBreakPoint(function, break_point_object_arg, - &source_position); + RUNTIME_ASSERT(isolate->debug()->SetBreakPoint( + function, break_point_object_arg, &source_position)); return Smi::FromInt(source_position); } @@ -12733,14 +12758,14 @@ // args[1]: number: break source position (within the script source) // args[2]: number, breakpoint position alignment // args[3]: number: break point object -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) { +RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) { HandleScope scope(isolate); - ASSERT(args.length() == 4); + DCHECK(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0); CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]); RUNTIME_ASSERT(source_position >= 0); CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[2]); - Handle<Object> break_point_object_arg = args.at<Object>(3); + CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 3); if (!IsPositionAlignmentCodeCorrect(statement_aligned_code)) { return isolate->ThrowIllegalOperation(); @@ -12765,10 +12790,10 @@ // Clear a break point // args[0]: number: break point object -RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearBreakPoint) { +RUNTIME_FUNCTION(Runtime_ClearBreakPoint) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - Handle<Object> break_point_object_arg = args.at<Object>(0); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 0); // Clear break point. isolate->debug()->ClearBreakPoint(break_point_object_arg); @@ -12780,16 +12805,15 @@ // Change the state of break on exceptions. // args[0]: Enum value indicating whether to affect caught/uncaught exceptions. // args[1]: Boolean indicating on/off. -RUNTIME_FUNCTION(MaybeObject*, Runtime_ChangeBreakOnException) { +RUNTIME_FUNCTION(Runtime_ChangeBreakOnException) { HandleScope scope(isolate); - ASSERT(args.length() == 2); - RUNTIME_ASSERT(args[0]->IsNumber()); + DCHECK(args.length() == 2); + CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]); CONVERT_BOOLEAN_ARG_CHECKED(enable, 1); // If the number doesn't match an enum value, the ChangeBreakOnException // function will default to affecting caught exceptions. - ExceptionBreakType type = - static_cast<ExceptionBreakType>(NumberToUint32(args[0])); + ExceptionBreakType type = static_cast<ExceptionBreakType>(type_arg); // Update break point state. isolate->debug()->ChangeBreakOnException(type, enable); return isolate->heap()->undefined_value(); @@ -12798,13 +12822,12 @@ // Returns the state of break on exceptions // args[0]: boolean indicating uncaught exceptions -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) { +RUNTIME_FUNCTION(Runtime_IsBreakOnException) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - RUNTIME_ASSERT(args[0]->IsNumber()); + DCHECK(args.length() == 1); + CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]); - ExceptionBreakType type = - static_cast<ExceptionBreakType>(NumberToUint32(args[0])); + ExceptionBreakType type = static_cast<ExceptionBreakType>(type_arg); bool result = isolate->debug()->IsBreakOnException(type); return Smi::FromInt(result); } @@ -12815,15 +12838,12 @@ // args[1]: step action from the enumeration StepAction // args[2]: number of times to perform the step, for step out it is the number // of frames to step down. -RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) { +RUNTIME_FUNCTION(Runtime_PrepareStep) { HandleScope scope(isolate); - ASSERT(args.length() == 4); - // Check arguments. - Object* check; - { MaybeObject* maybe_check = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_check->ToObject(&check)) return maybe_check; - } + DCHECK(args.length() == 4); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); + if (!args[1]->IsNumber() || !args[2]->IsNumber()) { return isolate->Throw(isolate->heap()->illegal_argument_string()); } @@ -12870,9 +12890,9 @@ // Clear all stepping set by PrepareStep. -RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) { +RUNTIME_FUNCTION(Runtime_ClearStepping) { HandleScope scope(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); isolate->debug()->ClearStepping(); return isolate->heap()->undefined_value(); } @@ -12880,65 +12900,69 @@ // Helper function to find or create the arguments object for // Runtime_DebugEvaluate. -static Handle<JSObject> MaterializeArgumentsObject( +MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeArgumentsObject( Isolate* isolate, Handle<JSObject> target, Handle<JSFunction> function) { // Do not materialize the arguments object for eval or top-level code. // Skip if "arguments" is already taken. - if (!function->shared()->is_function() || - JSReceiver::HasLocalProperty(target, - isolate->factory()->arguments_string())) { - return target; - } + if (!function->shared()->is_function()) return target; + Maybe<bool> maybe = JSReceiver::HasOwnProperty( + target, isolate->factory()->arguments_string()); + if (!maybe.has_value) return MaybeHandle<JSObject>(); + if (maybe.value) return target; // FunctionGetArguments can't throw an exception. Handle<JSObject> arguments = Handle<JSObject>::cast( Accessors::FunctionGetArguments(function)); - Runtime::SetObjectProperty(isolate, target, - isolate->factory()->arguments_string(), - arguments, - ::NONE, - SLOPPY); + Handle<String> arguments_str = isolate->factory()->arguments_string(); + RETURN_ON_EXCEPTION( + isolate, + Runtime::DefineObjectProperty(target, arguments_str, arguments, NONE), + JSObject); return target; } // Compile and evaluate source for the given context. -static MaybeObject* DebugEvaluate(Isolate* isolate, - Handle<Context> context, - Handle<Object> context_extension, - Handle<Object> receiver, - Handle<String> source) { +static MaybeHandle<Object> DebugEvaluate(Isolate* isolate, + Handle<Context> context, + Handle<Object> context_extension, + Handle<Object> receiver, + Handle<String> source) { if (context_extension->IsJSObject()) { Handle<JSObject> extension = Handle<JSObject>::cast(context_extension); Handle<JSFunction> closure(context->closure(), isolate); context = isolate->factory()->NewWithContext(closure, context, extension); } - Handle<JSFunction> eval_fun = + Handle<JSFunction> eval_fun; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, eval_fun, Compiler::GetFunctionFromEval(source, context, SLOPPY, NO_PARSE_RESTRICTION, - RelocInfo::kNoPosition); - RETURN_IF_EMPTY_HANDLE(isolate, eval_fun); - - bool pending_exception; - Handle<Object> result = Execution::Call( - isolate, eval_fun, receiver, 0, NULL, &pending_exception); + RelocInfo::kNoPosition), + Object); - if (pending_exception) return Failure::Exception(); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + Execution::Call(isolate, eval_fun, receiver, 0, NULL), + Object); // Skip the global proxy as it has no properties and always delegates to the // real global object. if (result->IsJSGlobalProxy()) { - result = Handle<JSObject>(JSObject::cast(result->GetPrototype(isolate))); + PrototypeIterator iter(isolate, result); + // TODO(verwaest): This will crash when the global proxy is detached. + result = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); } // Clear the oneshot breakpoints so that the debugger does not step further. isolate->debug()->ClearStepping(); - return *result; + return result; } @@ -12947,25 +12971,23 @@ // - Parameters and stack-allocated locals need to be materialized. Altered // values need to be written back to the stack afterwards. // - The arguments object needs to materialized. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { +RUNTIME_FUNCTION(Runtime_DebugEvaluate) { HandleScope scope(isolate); // Check the execution state and decode arguments frame and source to be // evaluated. - ASSERT(args.length() == 6); - Object* check_result; - { MaybeObject* maybe_result = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_result->ToObject(&check_result)) return maybe_result; - } + DCHECK(args.length() == 6); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); + CONVERT_SMI_ARG_CHECKED(wrapped_id, 1); CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]); CONVERT_ARG_HANDLE_CHECKED(String, source, 3); CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4); - Handle<Object> context_extension(args[5], isolate); + CONVERT_ARG_HANDLE_CHECKED(Object, context_extension, 5); // Handle the processing of break. - DisableBreak disable_break_save(isolate, disable_break); + DisableBreak disable_break_scope(isolate->debug(), disable_break); // Get the frame where the debugging is performed. StackFrame::Id id = UnwrapFrameId(wrapped_id); @@ -12983,30 +13005,29 @@ // Evaluate on the context of the frame. Handle<Context> context(Context::cast(frame->context())); - ASSERT(!context.is_null()); + DCHECK(!context.is_null()); // Materialize stack locals and the arguments object. Handle<JSObject> materialized = isolate->factory()->NewJSObject(isolate->object_function()); - materialized = MaterializeStackLocalsWithFrameInspector( - isolate, materialized, function, &frame_inspector); - RETURN_IF_EMPTY_HANDLE(isolate, materialized); - - materialized = MaterializeArgumentsObject(isolate, materialized, function); - RETURN_IF_EMPTY_HANDLE(isolate, materialized); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, materialized, + MaterializeStackLocalsWithFrameInspector( + isolate, materialized, function, &frame_inspector)); + + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, materialized, + MaterializeArgumentsObject(isolate, materialized, function)); // Add the materialized object in a with-scope to shadow the stack locals. context = isolate->factory()->NewWithContext(function, context, materialized); Handle<Object> receiver(frame->receiver(), isolate); - Object* evaluate_result_object; - { MaybeObject* maybe_result = - DebugEvaluate(isolate, context, context_extension, receiver, source); - if (!maybe_result->ToObject(&evaluate_result_object)) return maybe_result; - } - - Handle<Object> result(evaluate_result_object, isolate); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + DebugEvaluate(isolate, context, context_extension, receiver, source)); // Write back potential changes to materialized stack locals to the stack. UpdateStackLocalsFromMaterializedObject( @@ -13016,23 +13037,21 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) { +RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) { HandleScope scope(isolate); // Check the execution state and decode arguments frame and source to be // evaluated. - ASSERT(args.length() == 4); - Object* check_result; - { MaybeObject* maybe_result = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_result->ToObject(&check_result)) return maybe_result; - } + DCHECK(args.length() == 4); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); + CONVERT_ARG_HANDLE_CHECKED(String, source, 1); CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2); - Handle<Object> context_extension(args[3], isolate); + CONVERT_ARG_HANDLE_CHECKED(Object, context_extension, 3); // Handle the processing of break. - DisableBreak disable_break_save(isolate, disable_break); + DisableBreak disable_break_scope(isolate->debug(), disable_break); // Enter the top context from before the debugger was invoked. SaveContext save(isolate); @@ -13047,14 +13066,18 @@ // Get the native context now set to the top context from before the // debugger was invoked. Handle<Context> context = isolate->native_context(); - Handle<Object> receiver = isolate->global_object(); - return DebugEvaluate(isolate, context, context_extension, receiver, source); + Handle<JSObject> receiver(context->global_proxy()); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + DebugEvaluate(isolate, context, context_extension, receiver, source)); + return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) { +RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) { HandleScope scope(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); // Fill the script objects. Handle<FixedArray> instances = isolate->debug()->GetLoadedScripts(); @@ -13067,7 +13090,7 @@ // instances->set(i, *GetScriptWrapper(script)) // is unsafe as GetScriptWrapper might call GC and the C++ compiler might // already have dereferenced the instances handle. - Handle<JSValue> wrapper = GetScriptWrapper(script); + Handle<JSObject> wrapper = Script::GetWrapper(script); instances->set(i, *wrapper); } @@ -13110,17 +13133,12 @@ // Check instance filter if supplied. This is normally used to avoid // references from mirror objects (see Runtime_IsInPrototypeChain). if (!instance_filter->IsUndefined()) { - Object* V = obj; - while (true) { - Object* prototype = V->GetPrototype(isolate); - if (prototype->IsNull()) { - break; - } - if (instance_filter == prototype) { + for (PrototypeIterator iter(isolate, obj); !iter.IsAtEnd(); + iter.Advance()) { + if (iter.GetCurrent() == instance_filter) { obj = NULL; // Don't add this object. break; } - V = prototype; } } @@ -13154,21 +13172,13 @@ // args[0]: the object to find references to // args[1]: constructor function for instances to exclude (Mirror) // args[2]: the the maximum number of objects to return -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) { +RUNTIME_FUNCTION(Runtime_DebugReferencedBy) { HandleScope scope(isolate); - ASSERT(args.length() == 3); - - // First perform a full GC in order to avoid references from dead objects. - Heap* heap = isolate->heap(); - heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugReferencedBy"); - // The heap iterator reserves the right to do a GC to make the heap iterable. - // Due to the GC above we know it won't need to do that, but it seems cleaner - // to get the heap iterator constructed before we start having unprotected - // Object* locals that are not protected by handles. + DCHECK(args.length() == 3); // Check parameters. CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0); - Handle<Object> instance_filter = args.at<Object>(1); + CONVERT_ARG_HANDLE_CHECKED(Object, instance_filter, 1); RUNTIME_ASSERT(instance_filter->IsUndefined() || instance_filter->IsJSObject()); CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]); @@ -13176,32 +13186,35 @@ // Get the constructor function for context extension and arguments array. - Handle<JSObject> arguments_boilerplate( - isolate->context()->native_context()->sloppy_arguments_boilerplate()); Handle<JSFunction> arguments_function( - JSFunction::cast(arguments_boilerplate->map()->constructor())); + JSFunction::cast(isolate->sloppy_arguments_map()->constructor())); // Get the number of referencing objects. int count; - HeapIterator heap_iterator(heap); - count = DebugReferencedBy(&heap_iterator, - *target, *instance_filter, max_references, - NULL, 0, *arguments_function); + // First perform a full GC in order to avoid dead objects and to make the heap + // iterable. + Heap* heap = isolate->heap(); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy"); + { + HeapIterator heap_iterator(heap); + count = DebugReferencedBy(&heap_iterator, + *target, *instance_filter, max_references, + NULL, 0, *arguments_function); + } // Allocate an array to hold the result. Handle<FixedArray> instances = isolate->factory()->NewFixedArray(count); // Fill the referencing objects. - // AllocateFixedArray above does not make the heap non-iterable. - ASSERT(heap->IsHeapIterable()); - HeapIterator heap_iterator2(heap); - count = DebugReferencedBy(&heap_iterator2, - *target, *instance_filter, max_references, - *instances, count, *arguments_function); + { + HeapIterator heap_iterator(heap); + count = DebugReferencedBy(&heap_iterator, + *target, *instance_filter, max_references, + *instances, count, *arguments_function); + } // Return result as JS array. - Handle<JSFunction> constructor( - isolate->context()->native_context()->array_function()); + Handle<JSFunction> constructor = isolate->array_function(); Handle<JSObject> result = isolate->factory()->NewJSObject(constructor); JSArray::SetContent(Handle<JSArray>::cast(result), instances); @@ -13244,13 +13257,10 @@ // Scan the heap for objects constructed by a specific function. // args[0]: the constructor to find instances of // args[1]: the the maximum number of objects to return -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { +RUNTIME_FUNCTION(Runtime_DebugConstructedBy) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); - // First perform a full GC in order to avoid dead objects. - Heap* heap = isolate->heap(); - heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy"); // Check parameters. CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0); @@ -13259,28 +13269,34 @@ // Get the number of referencing objects. int count; - HeapIterator heap_iterator(heap); - count = DebugConstructedBy(&heap_iterator, - *constructor, - max_references, - NULL, - 0); + // First perform a full GC in order to avoid dead objects and to make the heap + // iterable. + Heap* heap = isolate->heap(); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy"); + { + HeapIterator heap_iterator(heap); + count = DebugConstructedBy(&heap_iterator, + *constructor, + max_references, + NULL, + 0); + } // Allocate an array to hold the result. Handle<FixedArray> instances = isolate->factory()->NewFixedArray(count); - ASSERT(heap->IsHeapIterable()); // Fill the referencing objects. - HeapIterator heap_iterator2(heap); - count = DebugConstructedBy(&heap_iterator2, - *constructor, - max_references, - *instances, - count); + { + HeapIterator heap_iterator2(heap); + count = DebugConstructedBy(&heap_iterator2, + *constructor, + max_references, + *instances, + count); + } // Return result as JS array. - Handle<JSFunction> array_function( - isolate->context()->native_context()->array_function()); + Handle<JSFunction> array_function = isolate->array_function(); Handle<JSObject> result = isolate->factory()->NewJSObject(array_function); JSArray::SetContent(Handle<JSArray>::cast(result), instances); return *result; @@ -13289,18 +13305,18 @@ // Find the effective prototype object as returned by __proto__. // args[0]: the object to find the prototype for. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSObject, obj, 0); - return GetPrototypeSkipHiddenPrototypes(isolate, obj); +RUNTIME_FUNCTION(Runtime_DebugGetPrototype) { + HandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); + return *GetPrototypeSkipHiddenPrototypes(isolate, obj); } // Patches script source (should be called upon BeforeCompile event). -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) { +RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0); CONVERT_ARG_HANDLE_CHECKED(String, source, 1); @@ -13316,47 +13332,51 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) { +RUNTIME_FUNCTION(Runtime_SystemBreak) { SealHandleScope shs(isolate); - ASSERT(args.length() == 0); - OS::DebugBreak(); + DCHECK(args.length() == 0); + base::OS::DebugBreak(); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) { +RUNTIME_FUNCTION(Runtime_DebugDisassembleFunction) { HandleScope scope(isolate); #ifdef DEBUG - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); // Get the function and make sure it is compiled. CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0); if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) { - return Failure::Exception(); + return isolate->heap()->exception(); } - func->code()->PrintLn(); + OFStream os(stdout); + func->code()->Print(os); + os << endl; #endif // DEBUG return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) { +RUNTIME_FUNCTION(Runtime_DebugDisassembleConstructor) { HandleScope scope(isolate); #ifdef DEBUG - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); // Get the function and make sure it is compiled. CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0); if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) { - return Failure::Exception(); + return isolate->heap()->exception(); } - func->shared()->construct_stub()->PrintLn(); + OFStream os(stdout); + func->shared()->construct_stub()->Print(os); + os << endl; #endif // DEBUG return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) { +RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); return f->shared()->inferred_name(); @@ -13372,7 +13392,7 @@ for (HeapObject* obj = iterator->next(); obj != NULL; obj = iterator->next()) { - ASSERT(obj != NULL); + DCHECK(obj != NULL); if (!obj->IsSharedFunctionInfo()) { continue; } @@ -13392,11 +13412,10 @@ // For a script finds all SharedFunctionInfo's in the heap that points // to this script. Returns JSArray of SharedFunctionInfo wrapped // in OpaqueReferences. -RUNTIME_FUNCTION(MaybeObject*, - Runtime_LiveEditFindSharedFunctionInfosForScript) { +RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 1); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSValue, script_value, 0); RUNTIME_ASSERT(script_value->value()->IsScript()); @@ -13409,8 +13428,6 @@ int number; Heap* heap = isolate->heap(); { - heap->EnsureHeapIsIterable(); - DisallowHeapAllocation no_allocation; HeapIterator heap_iterator(heap); Script* scr = *script; FixedArray* arr = *array; @@ -13418,8 +13435,6 @@ } if (number > kBufferSize) { array = isolate->factory()->NewFixedArray(number); - heap->EnsureHeapIsIterable(); - DisallowHeapAllocation no_allocation; HeapIterator heap_iterator(heap); Script* scr = *script; FixedArray* arr = *array; @@ -13442,81 +13457,82 @@ // Returns a JSArray of compilation infos. The array is ordered so that // each function with all its descendant is always stored in a continues range // with the function itself going first. The root function is a script function. -RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) { +RUNTIME_FUNCTION(Runtime_LiveEditGatherCompileInfo) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 2); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 2); CONVERT_ARG_CHECKED(JSValue, script, 0); CONVERT_ARG_HANDLE_CHECKED(String, source, 1); RUNTIME_ASSERT(script->value()->IsScript()); Handle<Script> script_handle = Handle<Script>(Script::cast(script->value())); - JSArray* result = LiveEdit::GatherCompileInfo(script_handle, source); - - if (isolate->has_pending_exception()) { - return Failure::Exception(); - } - - return result; + Handle<JSArray> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, LiveEdit::GatherCompileInfo(script_handle, source)); + return *result; } // Changes the source of the script to a new_source. // If old_script_name is provided (i.e. is a String), also creates a copy of // the script with its original source and sends notification to debugger. -RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) { +RUNTIME_FUNCTION(Runtime_LiveEditReplaceScript) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 3); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 3); CONVERT_ARG_CHECKED(JSValue, original_script_value, 0); CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1); - Handle<Object> old_script_name(args[2], isolate); + CONVERT_ARG_HANDLE_CHECKED(Object, old_script_name, 2); RUNTIME_ASSERT(original_script_value->value()->IsScript()); Handle<Script> original_script(Script::cast(original_script_value->value())); - Object* old_script = LiveEdit::ChangeScriptSource(original_script, - new_source, - old_script_name); + Handle<Object> old_script = LiveEdit::ChangeScriptSource( + original_script, new_source, old_script_name); if (old_script->IsScript()) { - Handle<Script> script_handle(Script::cast(old_script)); - return *(GetScriptWrapper(script_handle)); + Handle<Script> script_handle = Handle<Script>::cast(old_script); + return *Script::GetWrapper(script_handle); } else { return isolate->heap()->null_value(); } } -RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) { +RUNTIME_FUNCTION(Runtime_LiveEditFunctionSourceUpdated) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 1); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0); - return LiveEdit::FunctionSourceUpdated(shared_info); + RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_info)); + + LiveEdit::FunctionSourceUpdated(shared_info); + return isolate->heap()->undefined_value(); } // Replaces code of SharedFunctionInfo with a new one. -RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) { +RUNTIME_FUNCTION(Runtime_LiveEditReplaceFunctionCode) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 2); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0); CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1); + RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_info)); - return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info); + LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info); + return isolate->heap()->undefined_value(); } // Connects SharedFunctionInfo to another script. -RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) { +RUNTIME_FUNCTION(Runtime_LiveEditFunctionSetScript) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 2); - Handle<Object> function_object(args[0], isolate); - Handle<Object> script_object(args[1], isolate); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, script_object, 1); if (function_object->IsJSValue()) { Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object); @@ -13525,7 +13541,7 @@ Script* script = Script::cast(JSValue::cast(*script_object)->value()); script_object = Handle<Object>(script, isolate); } - + RUNTIME_ASSERT(function_wrapper->value()->IsSharedFunctionInfo()); LiveEdit::SetFunctionScript(function_wrapper, script_object); } else { // Just ignore this. We may not have a SharedFunctionInfo for some functions @@ -13538,18 +13554,20 @@ // In a code of a parent function replaces original function as embedded object // with a substitution one. -RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) { +RUNTIME_FUNCTION(Runtime_LiveEditReplaceRefToNestedFunction) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 3); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0); CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1); CONVERT_ARG_HANDLE_CHECKED(JSValue, subst_wrapper, 2); + RUNTIME_ASSERT(parent_wrapper->value()->IsSharedFunctionInfo()); + RUNTIME_ASSERT(orig_wrapper->value()->IsSharedFunctionInfo()); + RUNTIME_ASSERT(subst_wrapper->value()->IsSharedFunctionInfo()); - LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper, - subst_wrapper); - + LiveEdit::ReplaceRefToNestedFunction( + parent_wrapper, orig_wrapper, subst_wrapper); return isolate->heap()->undefined_value(); } @@ -13559,14 +13577,16 @@ // array of groups of 3 numbers: // (change_begin, change_end, change_end_new_position). // Each group describes a change in text; groups are sorted by change_begin. -RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) { +RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 2); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0); CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1); + RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_array)) - return LiveEdit::PatchFunctionPositions(shared_array, position_change_array); + LiveEdit::PatchFunctionPositions(shared_array, position_change_array); + return isolate->heap()->undefined_value(); } @@ -13574,12 +13594,22 @@ // checks that none of them have activations on stacks (of any thread). // Returns array of the same length with corresponding results of // LiveEdit::FunctionPatchabilityStatus type. -RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) { +RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 2); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0); CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1); + RUNTIME_ASSERT(shared_array->length()->IsSmi()); + RUNTIME_ASSERT(shared_array->HasFastElements()) + int array_length = Smi::cast(shared_array->length())->value(); + for (int i = 0; i < array_length; i++) { + Handle<Object> element = + Object::GetElement(isolate, shared_array, i).ToHandleChecked(); + RUNTIME_ASSERT( + element->IsJSValue() && + Handle<JSValue>::cast(element)->value()->IsSharedFunctionInfo()); + } return *LiveEdit::CheckAndDropActivations(shared_array, do_drop); } @@ -13588,10 +13618,10 @@ // Compares 2 strings line-by-line, then token-wise and returns diff in form // of JSArray of triplets (pos1, pos1_end, pos2_end) describing list // of diff chunks. -RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) { +RUNTIME_FUNCTION(Runtime_LiveEditCompareStrings) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 2); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(String, s1, 0); CONVERT_ARG_HANDLE_CHECKED(String, s2, 1); @@ -13601,17 +13631,13 @@ // Restarts a call frame and completely drops all frames above. // Returns true if successful. Otherwise returns undefined or an error message. -RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) { +RUNTIME_FUNCTION(Runtime_LiveEditRestartFrame) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 2); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 2); + CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); + RUNTIME_ASSERT(CheckExecutionState(isolate, break_id)); - // Check arguments. - Object* check; - { MaybeObject* maybe_check = Runtime_CheckExecutionState( - RUNTIME_ARGUMENTS(isolate, args)); - if (!maybe_check->ToObject(&check)) return maybe_check; - } CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]); Heap* heap = isolate->heap(); @@ -13622,14 +13648,11 @@ return heap->undefined_value(); } - int count = 0; JavaScriptFrameIterator it(isolate, id); - for (; !it.done(); it.Advance()) { - if (index < count + it.frame()->GetInlineCount()) break; - count += it.frame()->GetInlineCount(); - } - if (it.done()) return heap->undefined_value(); - + int inlined_jsframe_index = FindIndexedNonNativeFrame(&it, index); + if (inlined_jsframe_index == -1) return heap->undefined_value(); + // We don't really care what the inlined frame index is, since we are + // throwing away the entire frame anyways. const char* error_message = LiveEdit::RestartFrame(it.frame()); if (error_message) { return *(isolate->factory()->InternalizeUtf8String(error_message)); @@ -13640,10 +13663,10 @@ // A testing entry. Returns statement position which is the closest to // source_position. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) { +RUNTIME_FUNCTION(Runtime_GetFunctionCodePositionFromSource) { HandleScope scope(isolate); - CHECK(isolate->debugger()->live_edit_enabled()); - ASSERT(args.length() == 2); + CHECK(isolate->debug()->live_edit_enabled()); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]); @@ -13678,43 +13701,37 @@ // Calls specified function with or without entering the debugger. // This is used in unit tests to run code as if debugger is entered or simply // to have a stack with C++ frame in the middle. -RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) { +RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); CONVERT_BOOLEAN_ARG_CHECKED(without_debugger, 1); - Handle<Object> result; - bool pending_exception; - { - if (without_debugger) { - result = Execution::Call(isolate, - function, - isolate->global_object(), - 0, - NULL, - &pending_exception); - } else { - EnterDebugger enter_debugger(isolate); - result = Execution::Call(isolate, - function, - isolate->global_object(), - 0, - NULL, - &pending_exception); - } - } - if (!pending_exception) { - return *result; - } else { - return Failure::Exception(); + MaybeHandle<Object> maybe_result; + if (without_debugger) { + maybe_result = Execution::Call(isolate, + function, + handle(function->global_proxy()), + 0, + NULL); + } else { + DebugScope debug_scope(isolate->debug()); + maybe_result = Execution::Call(isolate, + function, + handle(function->global_proxy()), + 0, + NULL); } + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, maybe_result); + return *result; } // Sets a v8 flag. -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) { +RUNTIME_FUNCTION(Runtime_SetFlags) { SealHandleScope shs(isolate); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(String, arg, 0); SmartArrayPointer<char> flags = arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); @@ -13725,16 +13742,18 @@ // Performs a GC. // Presently, it only does a full GC. -RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) { +RUNTIME_FUNCTION(Runtime_CollectGarbage) { SealHandleScope shs(isolate); + DCHECK(args.length() == 1); isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage"); return isolate->heap()->undefined_value(); } // Gets the current heap usage. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) { +RUNTIME_FUNCTION(Runtime_GetHeapUsage) { SealHandleScope shs(isolate); + DCHECK(args.length() == 0); int usage = static_cast<int>(isolate->heap()->SizeOfObjects()); if (!Smi::IsValid(usage)) { return *isolate->factory()->NewNumberFromInt(usage); @@ -13742,14 +13761,13 @@ return Smi::FromInt(usage); } -#endif // ENABLE_DEBUGGER_SUPPORT - #ifdef V8_I18N_SUPPORT -RUNTIME_FUNCTION(MaybeObject*, Runtime_CanonicalizeLanguageTag) { +RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) { HandleScope scope(isolate); + Factory* factory = isolate->factory(); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(String, locale_id_str, 0); v8::String::Utf8Value locale_id(v8::Utils::ToLocal(locale_id_str)); @@ -13764,7 +13782,7 @@ uloc_forLanguageTag(*locale_id, icu_result, ULOC_FULLNAME_CAPACITY, &icu_length, &error); if (U_FAILURE(error) || icu_length == 0) { - return isolate->heap()->AllocateStringFromOneByte(CStrVector(kInvalidTag)); + return *factory->NewStringFromAsciiChecked(kInvalidTag); } char result[ULOC_FULLNAME_CAPACITY]; @@ -13773,17 +13791,18 @@ uloc_toLanguageTag(icu_result, result, ULOC_FULLNAME_CAPACITY, TRUE, &error); if (U_FAILURE(error)) { - return isolate->heap()->AllocateStringFromOneByte(CStrVector(kInvalidTag)); + return *factory->NewStringFromAsciiChecked(kInvalidTag); } - return isolate->heap()->AllocateStringFromOneByte(CStrVector(result)); + return *factory->NewStringFromAsciiChecked(result); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_AvailableLocalesOf) { +RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) { HandleScope scope(isolate); + Factory* factory = isolate->factory(); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(String, service, 0); const icu::Locale* available_locales = NULL; @@ -13802,7 +13821,7 @@ UErrorCode error = U_ZERO_ERROR; char result[ULOC_FULLNAME_CAPACITY]; Handle<JSObject> locales = - isolate->factory()->NewJSObject(isolate->object_function()); + factory->NewJSObject(isolate->object_function()); for (int32_t i = 0; i < count; ++i) { const char* icu_name = available_locales[i].getName(); @@ -13815,11 +13834,11 @@ continue; } - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( + RETURN_FAILURE_ON_EXCEPTION(isolate, + JSObject::SetOwnPropertyIgnoreAttributes( locales, - isolate->factory()->NewStringFromAscii(CStrVector(result)), - isolate->factory()->NewNumber(i), + factory->NewStringFromAsciiChecked(result), + factory->NewNumber(i), NONE)); } @@ -13827,10 +13846,11 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultICULocale) { - SealHandleScope shs(isolate); +RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) { + HandleScope scope(isolate); + Factory* factory = isolate->factory(); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); icu::Locale default_locale; @@ -13840,31 +13860,34 @@ uloc_toLanguageTag( default_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status); if (U_SUCCESS(status)) { - return isolate->heap()->AllocateStringFromOneByte(CStrVector(result)); + return *factory->NewStringFromAsciiChecked(result); } - return isolate->heap()->AllocateStringFromOneByte(CStrVector("und")); + return *factory->NewStringFromStaticAscii("und"); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLanguageTagVariants) { +RUNTIME_FUNCTION(Runtime_GetLanguageTagVariants) { HandleScope scope(isolate); + Factory* factory = isolate->factory(); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSArray, input, 0); uint32_t length = static_cast<uint32_t>(input->length()->Number()); - Handle<FixedArray> output = isolate->factory()->NewFixedArray(length); - Handle<Name> maximized = - isolate->factory()->NewStringFromAscii(CStrVector("maximized")); - Handle<Name> base = - isolate->factory()->NewStringFromAscii(CStrVector("base")); + // Set some limit to prevent fuzz tests from going OOM. + // Can be bumped when callers' requirements change. + RUNTIME_ASSERT(length < 100); + Handle<FixedArray> output = factory->NewFixedArray(length); + Handle<Name> maximized = factory->NewStringFromStaticAscii("maximized"); + Handle<Name> base = factory->NewStringFromStaticAscii("base"); for (unsigned int i = 0; i < length; ++i) { - Handle<Object> locale_id = Object::GetElement(isolate, input, i); - RETURN_IF_EMPTY_HANDLE(isolate, locale_id); + Handle<Object> locale_id; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, locale_id, Object::GetElement(isolate, input, i)); if (!locale_id->IsString()) { - return isolate->Throw(isolate->heap()->illegal_argument_string()); + return isolate->Throw(*factory->illegal_argument_string()); } v8::String::Utf8Value utf8_locale_id( @@ -13879,7 +13902,7 @@ uloc_forLanguageTag(*utf8_locale_id, icu_locale, ULOC_FULLNAME_CAPACITY, &icu_locale_length, &error); if (U_FAILURE(error) || icu_locale_length == 0) { - return isolate->Throw(isolate->heap()->illegal_argument_string()); + return isolate->Throw(*factory->illegal_argument_string()); } // Maximize the locale. @@ -13912,36 +13935,108 @@ icu_base_locale, base_locale, ULOC_FULLNAME_CAPACITY, FALSE, &error); if (U_FAILURE(error)) { - return isolate->Throw(isolate->heap()->illegal_argument_string()); + return isolate->Throw(*factory->illegal_argument_string()); } - Handle<JSObject> result = - isolate->factory()->NewJSObject(isolate->object_function()); - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - result, - maximized, - isolate->factory()->NewStringFromAscii(CStrVector(base_max_locale)), - NONE)); - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - result, - base, - isolate->factory()->NewStringFromAscii(CStrVector(base_locale)), - NONE)); + Handle<JSObject> result = factory->NewJSObject(isolate->object_function()); + Handle<String> value = factory->NewStringFromAsciiChecked(base_max_locale); + JSObject::AddProperty(result, maximized, value, NONE); + value = factory->NewStringFromAsciiChecked(base_locale); + JSObject::AddProperty(result, base, value, NONE); output->set(i, *result); } - Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(output); + Handle<JSArray> result = factory->NewJSArrayWithElements(output); result->set_length(Smi::FromInt(length)); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateDateTimeFormat) { +RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) { + HandleScope scope(isolate); + + DCHECK(args.length() == 1); + + CONVERT_ARG_HANDLE_CHECKED(Object, input, 0); + + if (!input->IsJSObject()) return isolate->heap()->false_value(); + Handle<JSObject> obj = Handle<JSObject>::cast(input); + + Handle<String> marker = isolate->factory()->intl_initialized_marker_string(); + Handle<Object> tag(obj->GetHiddenProperty(marker), isolate); + return isolate->heap()->ToBoolean(!tag->IsTheHole()); +} + + +RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) { + HandleScope scope(isolate); + + DCHECK(args.length() == 2); + + CONVERT_ARG_HANDLE_CHECKED(Object, input, 0); + CONVERT_ARG_HANDLE_CHECKED(String, expected_type, 1); + + if (!input->IsJSObject()) return isolate->heap()->false_value(); + Handle<JSObject> obj = Handle<JSObject>::cast(input); + + Handle<String> marker = isolate->factory()->intl_initialized_marker_string(); + Handle<Object> tag(obj->GetHiddenProperty(marker), isolate); + return isolate->heap()->ToBoolean( + tag->IsString() && String::cast(*tag)->Equals(*expected_type)); +} + + +RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) { + HandleScope scope(isolate); + + DCHECK(args.length() == 3); + + CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0); + CONVERT_ARG_HANDLE_CHECKED(String, type, 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, impl, 2); + + Handle<String> marker = isolate->factory()->intl_initialized_marker_string(); + JSObject::SetHiddenProperty(input, marker, type); + + marker = isolate->factory()->intl_impl_object_string(); + JSObject::SetHiddenProperty(input, marker, impl); + + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(Runtime_GetImplFromInitializedIntlObject) { + HandleScope scope(isolate); + + DCHECK(args.length() == 1); + + CONVERT_ARG_HANDLE_CHECKED(Object, input, 0); + + if (!input->IsJSObject()) { + Vector< Handle<Object> > arguments = HandleVector(&input, 1); + Handle<Object> type_error = + isolate->factory()->NewTypeError("not_intl_object", arguments); + return isolate->Throw(*type_error); + } + + Handle<JSObject> obj = Handle<JSObject>::cast(input); + + Handle<String> marker = isolate->factory()->intl_impl_object_string(); + Handle<Object> impl(obj->GetHiddenProperty(marker), isolate); + if (impl->IsTheHole()) { + Vector< Handle<Object> > arguments = HandleVector(&obj, 1); + Handle<Object> type_error = + isolate->factory()->NewTypeError("not_intl_object", arguments); + return isolate->Throw(*type_error); + } + return *impl; +} + + +RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, locale, 0); CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1); @@ -13951,13 +14046,10 @@ I18N::GetTemplate(isolate); // Create an empty object wrapper. - bool has_pending_exception = false; - Handle<JSObject> local_object = Execution::InstantiateObject( - date_format_template, &has_pending_exception); - if (has_pending_exception) { - ASSERT(isolate->has_pending_exception()); - return Failure::Exception(); - } + Handle<JSObject> local_object; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, local_object, + Execution::InstantiateObject(date_format_template)); // Set date time formatter as internal field of the resulting JS object. icu::SimpleDateFormat* date_format = DateFormat::InitializeDateTimeFormat( @@ -13967,12 +14059,10 @@ local_object->SetInternalField(0, reinterpret_cast<Smi*>(date_format)); - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - local_object, - isolate->factory()->NewStringFromAscii(CStrVector("dateFormat")), - isolate->factory()->NewStringFromAscii(CStrVector("valid")), - NONE)); + Factory* factory = isolate->factory(); + Handle<String> key = factory->NewStringFromStaticAscii("dateFormat"); + Handle<String> value = factory->NewStringFromStaticAscii("valid"); + JSObject::AddProperty(local_object, key, value, NONE); // Make object handle weak so we can delete the data format once GC kicks in. Handle<Object> wrapper = isolate->global_handles()->Create(*local_object); @@ -13983,21 +14073,17 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateFormat) { +RUNTIME_FUNCTION(Runtime_InternalDateFormat) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0); CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1); - bool has_pending_exception = false; - Handle<Object> value = - Execution::ToNumber(isolate, date, &has_pending_exception); - if (has_pending_exception) { - ASSERT(isolate->has_pending_exception()); - return Failure::Exception(); - } + Handle<Object> value; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, value, Execution::ToNumber(isolate, date)); icu::SimpleDateFormat* date_format = DateFormat::UnpackDateFormat(isolate, date_format_holder); @@ -14006,17 +14092,21 @@ icu::UnicodeString result; date_format->format(value->Number(), result); - return *isolate->factory()->NewStringFromTwoByte( - Vector<const uint16_t>( - reinterpret_cast<const uint16_t*>(result.getBuffer()), - result.length())); + Handle<String> result_str; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result_str, + isolate->factory()->NewStringFromTwoByte( + Vector<const uint16_t>( + reinterpret_cast<const uint16_t*>(result.getBuffer()), + result.length()))); + return *result_str; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateParse) { +RUNTIME_FUNCTION(Runtime_InternalDateParse) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0); CONVERT_ARG_HANDLE_CHECKED(String, date_string, 1); @@ -14031,22 +14121,19 @@ UDate date = date_format->parse(u_date, status); if (U_FAILURE(status)) return isolate->heap()->undefined_value(); - bool has_pending_exception = false; - Handle<JSDate> result = Handle<JSDate>::cast( - Execution::NewDate( - isolate, static_cast<double>(date), &has_pending_exception)); - if (has_pending_exception) { - ASSERT(isolate->has_pending_exception()); - return Failure::Exception(); - } + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Execution::NewDate(isolate, static_cast<double>(date))); + DCHECK(result->IsJSDate()); return *result; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateNumberFormat) { +RUNTIME_FUNCTION(Runtime_CreateNumberFormat) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, locale, 0); CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1); @@ -14056,13 +14143,10 @@ I18N::GetTemplate(isolate); // Create an empty object wrapper. - bool has_pending_exception = false; - Handle<JSObject> local_object = Execution::InstantiateObject( - number_format_template, &has_pending_exception); - if (has_pending_exception) { - ASSERT(isolate->has_pending_exception()); - return Failure::Exception(); - } + Handle<JSObject> local_object; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, local_object, + Execution::InstantiateObject(number_format_template)); // Set number formatter as internal field of the resulting JS object. icu::DecimalFormat* number_format = NumberFormat::InitializeNumberFormat( @@ -14072,12 +14156,10 @@ local_object->SetInternalField(0, reinterpret_cast<Smi*>(number_format)); - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - local_object, - isolate->factory()->NewStringFromAscii(CStrVector("numberFormat")), - isolate->factory()->NewStringFromAscii(CStrVector("valid")), - NONE)); + Factory* factory = isolate->factory(); + Handle<String> key = factory->NewStringFromStaticAscii("numberFormat"); + Handle<String> value = factory->NewStringFromStaticAscii("valid"); + JSObject::AddProperty(local_object, key, value, NONE); Handle<Object> wrapper = isolate->global_handles()->Create(*local_object); GlobalHandles::MakeWeak(wrapper.location(), @@ -14087,21 +14169,17 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberFormat) { +RUNTIME_FUNCTION(Runtime_InternalNumberFormat) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0); CONVERT_ARG_HANDLE_CHECKED(Object, number, 1); - bool has_pending_exception = false; - Handle<Object> value = Execution::ToNumber( - isolate, number, &has_pending_exception); - if (has_pending_exception) { - ASSERT(isolate->has_pending_exception()); - return Failure::Exception(); - } + Handle<Object> value; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, value, Execution::ToNumber(isolate, number)); icu::DecimalFormat* number_format = NumberFormat::UnpackNumberFormat(isolate, number_format_holder); @@ -14110,17 +14188,21 @@ icu::UnicodeString result; number_format->format(value->Number(), result); - return *isolate->factory()->NewStringFromTwoByte( - Vector<const uint16_t>( - reinterpret_cast<const uint16_t*>(result.getBuffer()), - result.length())); + Handle<String> result_str; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result_str, + isolate->factory()->NewStringFromTwoByte( + Vector<const uint16_t>( + reinterpret_cast<const uint16_t*>(result.getBuffer()), + result.length()))); + return *result_str; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberParse) { +RUNTIME_FUNCTION(Runtime_InternalNumberParse) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0); CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1); @@ -14156,10 +14238,10 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCollator) { +RUNTIME_FUNCTION(Runtime_CreateCollator) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, locale, 0); CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1); @@ -14168,13 +14250,9 @@ Handle<ObjectTemplateInfo> collator_template = I18N::GetTemplate(isolate); // Create an empty object wrapper. - bool has_pending_exception = false; - Handle<JSObject> local_object = Execution::InstantiateObject( - collator_template, &has_pending_exception); - if (has_pending_exception) { - ASSERT(isolate->has_pending_exception()); - return Failure::Exception(); - } + Handle<JSObject> local_object; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, local_object, Execution::InstantiateObject(collator_template)); // Set collator as internal field of the resulting JS object. icu::Collator* collator = Collator::InitializeCollator( @@ -14184,12 +14262,10 @@ local_object->SetInternalField(0, reinterpret_cast<Smi*>(collator)); - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - local_object, - isolate->factory()->NewStringFromAscii(CStrVector("collator")), - isolate->factory()->NewStringFromAscii(CStrVector("valid")), - NONE)); + Factory* factory = isolate->factory(); + Handle<String> key = factory->NewStringFromStaticAscii("collator"); + Handle<String> value = factory->NewStringFromStaticAscii("valid"); + JSObject::AddProperty(local_object, key, value, NONE); Handle<Object> wrapper = isolate->global_handles()->Create(*local_object); GlobalHandles::MakeWeak(wrapper.location(), @@ -14199,10 +14275,10 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalCompare) { +RUNTIME_FUNCTION(Runtime_InternalCompare) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSObject, collator_holder, 0); CONVERT_ARG_HANDLE_CHECKED(String, string1, 1); @@ -14227,15 +14303,17 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringNormalize) { +RUNTIME_FUNCTION(Runtime_StringNormalize) { HandleScope scope(isolate); static const UNormalizationMode normalizationForms[] = { UNORM_NFC, UNORM_NFD, UNORM_NFKC, UNORM_NFKD }; - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(String, stringValue, 0); CONVERT_NUMBER_CHECKED(int, form_id, Int32, args[1]); + RUNTIME_ASSERT(form_id >= 0 && + static_cast<size_t>(form_id) < ARRAY_SIZE(normalizationForms)); v8::String::Value string_value(v8::Utils::ToLocal(stringValue)); const UChar* u_value = reinterpret_cast<const UChar*>(*string_value); @@ -14249,17 +14327,21 @@ return isolate->heap()->undefined_value(); } - return *isolate->factory()->NewStringFromTwoByte( - Vector<const uint16_t>( - reinterpret_cast<const uint16_t*>(result.getBuffer()), - result.length())); + Handle<String> result_str; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result_str, + isolate->factory()->NewStringFromTwoByte( + Vector<const uint16_t>( + reinterpret_cast<const uint16_t*>(result.getBuffer()), + result.length()))); + return *result_str; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateBreakIterator) { +RUNTIME_FUNCTION(Runtime_CreateBreakIterator) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, locale, 0); CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1); @@ -14269,13 +14351,10 @@ I18N::GetTemplate2(isolate); // Create an empty object wrapper. - bool has_pending_exception = false; - Handle<JSObject> local_object = Execution::InstantiateObject( - break_iterator_template, &has_pending_exception); - if (has_pending_exception) { - ASSERT(isolate->has_pending_exception()); - return Failure::Exception(); - } + Handle<JSObject> local_object; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, local_object, + Execution::InstantiateObject(break_iterator_template)); // Set break iterator as internal field of the resulting JS object. icu::BreakIterator* break_iterator = BreakIterator::InitializeBreakIterator( @@ -14287,12 +14366,10 @@ // Make sure that the pointer to adopted text is NULL. local_object->SetInternalField(1, reinterpret_cast<Smi*>(NULL)); - RETURN_IF_EMPTY_HANDLE(isolate, - JSObject::SetLocalPropertyIgnoreAttributes( - local_object, - isolate->factory()->NewStringFromAscii(CStrVector("breakIterator")), - isolate->factory()->NewStringFromAscii(CStrVector("valid")), - NONE)); + Factory* factory = isolate->factory(); + Handle<String> key = factory->NewStringFromStaticAscii("breakIterator"); + Handle<String> value = factory->NewStringFromStaticAscii("valid"); + JSObject::AddProperty(local_object, key, value, NONE); // Make object handle weak so we can delete the break iterator once GC kicks // in. @@ -14304,10 +14381,10 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorAdoptText) { +RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) { HandleScope scope(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0); CONVERT_ARG_HANDLE_CHECKED(String, text, 1); @@ -14331,10 +14408,10 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorFirst) { +RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0); @@ -14346,10 +14423,10 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorNext) { +RUNTIME_FUNCTION(Runtime_BreakIteratorNext) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0); @@ -14361,10 +14438,10 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorCurrent) { +RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0); @@ -14376,10 +14453,10 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorBreakType) { +RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0); @@ -14393,17 +14470,17 @@ int32_t status = rule_based_iterator->getRuleStatus(); // Keep return values in sync with JavaScript BreakType enum. if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) { - return *isolate->factory()->NewStringFromAscii(CStrVector("none")); + return *isolate->factory()->NewStringFromStaticAscii("none"); } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) { - return *isolate->factory()->NewStringFromAscii(CStrVector("number")); + return *isolate->factory()->NewStringFromStaticAscii("number"); } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) { - return *isolate->factory()->NewStringFromAscii(CStrVector("letter")); + return *isolate->factory()->NewStringFromStaticAscii("letter"); } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) { - return *isolate->factory()->NewStringFromAscii(CStrVector("kana")); + return *isolate->factory()->NewStringFromStaticAscii("kana"); } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) { - return *isolate->factory()->NewStringFromAscii(CStrVector("ideo")); + return *isolate->factory()->NewStringFromStaticAscii("ideo"); } else { - return *isolate->factory()->NewStringFromAscii(CStrVector("unknown")); + return *isolate->factory()->NewStringFromStaticAscii("unknown"); } } #endif // V8_I18N_SUPPORT @@ -14422,8 +14499,6 @@ Handle<Script> script; Factory* factory = script_name->GetIsolate()->factory(); Heap* heap = script_name->GetHeap(); - heap->EnsureHeapIsIterable(); - DisallowHeapAllocation no_allocation_during_heap_iteration; HeapIterator iterator(heap); HeapObject* obj = NULL; while (script.is_null() && ((obj = iterator.next()) != NULL)) { @@ -14441,17 +14516,17 @@ if (script.is_null()) return factory->undefined_value(); // Return the script found. - return GetScriptWrapper(script); + return Script::GetWrapper(script); } // Get the script object from script data. NOTE: Regarding performance // see the NOTE for GetScriptFromScriptData. // args[0]: script data for the script to find the source for -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) { +RUNTIME_FUNCTION(Runtime_GetScript) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(String, script_name, 0); @@ -14465,93 +14540,99 @@ // Collect the raw data for a stack trace. Returns an array of 4 // element segments each containing a receiver, function, code and // native code offset. -RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) { +RUNTIME_FUNCTION(Runtime_CollectStackTrace) { HandleScope scope(isolate); - ASSERT_EQ(args.length(), 3); + DCHECK(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0); - Handle<Object> caller = args.at<Object>(1); - CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]); + CONVERT_ARG_HANDLE_CHECKED(Object, caller, 1); - // Optionally capture a more detailed stack trace for the message. - isolate->CaptureAndSetDetailedStackTrace(error_object); - // Capture a simple stack trace for the stack property. - return *isolate->CaptureSimpleStackTrace(error_object, caller, limit); + if (!isolate->bootstrapper()->IsActive()) { + // Optionally capture a more detailed stack trace for the message. + isolate->CaptureAndSetDetailedStackTrace(error_object); + // Capture a simple stack trace for the stack property. + isolate->CaptureAndSetSimpleStackTrace(error_object, caller); + } + return isolate->heap()->undefined_value(); } -// Retrieve the stack trace. This is the raw stack trace that yet has to -// be formatted. Since we only need this once, clear it afterwards. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetAndClearOverflowedStackTrace) { +// Returns V8 version as a string. +RUNTIME_FUNCTION(Runtime_GetV8Version) { HandleScope scope(isolate); - ASSERT_EQ(args.length(), 1); - CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0); - Handle<String> key = isolate->factory()->hidden_stack_trace_string(); - Handle<Object> result(error_object->GetHiddenProperty(*key), isolate); - if (result->IsTheHole()) return isolate->heap()->undefined_value(); - RUNTIME_ASSERT(result->IsJSArray() || result->IsUndefined()); - JSObject::DeleteHiddenProperty(error_object, key); - return *result; + DCHECK(args.length() == 0); + + const char* version_string = v8::V8::GetVersion(); + + return *isolate->factory()->NewStringFromAsciiChecked(version_string); } -// Returns V8 version as a string. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) { +RUNTIME_FUNCTION(Runtime_Abort) { SealHandleScope shs(isolate); - ASSERT_EQ(args.length(), 0); - - const char* version_string = v8::V8::GetVersion(); - - return isolate->heap()->AllocateStringFromOneByte(CStrVector(version_string), - NOT_TENURED); -} - - -RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_SMI_ARG_CHECKED(message_id, 0); const char* message = GetBailoutReason( static_cast<BailoutReason>(message_id)); - OS::PrintError("abort: %s\n", message); + base::OS::PrintError("abort: %s\n", message); isolate->PrintStack(stderr); - OS::Abort(); + base::OS::Abort(); UNREACHABLE(); return NULL; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_AbortJS) { +RUNTIME_FUNCTION(Runtime_AbortJS) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(String, message, 0); - OS::PrintError("abort: %s\n", message->ToCString().get()); + base::OS::PrintError("abort: %s\n", message->ToCString().get()); isolate->PrintStack(stderr); - OS::Abort(); + base::OS::Abort(); UNREACHABLE(); return NULL; } -RUNTIME_FUNCTION(MaybeObject*, Runtime_FlattenString) { +RUNTIME_FUNCTION(Runtime_FlattenString) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(String, str, 0); - FlattenString(str); - return isolate->heap()->undefined_value(); + return *String::Flatten(str); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyContextDisposed) { +RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) { HandleScope scope(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); isolate->heap()->NotifyContextDisposed(); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_TryMigrateInstance) { +RUNTIME_FUNCTION(Runtime_LoadMutableDouble) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); + CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1); + RUNTIME_ASSERT((index->value() & 1) == 1); + FieldIndex field_index = + FieldIndex::ForLoadByFieldIndex(object->map(), index->value()); + if (field_index.is_inobject()) { + RUNTIME_ASSERT(field_index.property_index() < + object->map()->inobject_properties()); + } else { + RUNTIME_ASSERT(field_index.outobject_array_index() < + object->properties()->length()); + } + Handle<Object> raw_value(object->RawFastPropertyAt(field_index), isolate); + RUNTIME_ASSERT(raw_value->IsMutableHeapNumber()); + return *Object::WrapForRead(isolate, raw_value, Representation::Double()); +} + + +RUNTIME_FUNCTION(Runtime_TryMigrateInstance) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(Object, object, 0); if (!object->IsJSObject()) return Smi::FromInt(0); Handle<JSObject> js_object = Handle<JSObject>::cast(object); @@ -14560,43 +14641,46 @@ // code where we can't handle lazy deopts for lack of a suitable bailout // ID. So we just try migration and signal failure if necessary, // which will also trigger a deopt. - Handle<Object> result = JSObject::TryMigrateInstance(js_object); - if (result.is_null()) return Smi::FromInt(0); + if (!JSObject::TryMigrateInstance(js_object)) return Smi::FromInt(0); return *object; } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_GetFromCache) { +RUNTIME_FUNCTION(Runtime_GetFromCache) { SealHandleScope shs(isolate); // This is only called from codegen, so checks might be more lax. CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0); - Object* key = args[1]; + CONVERT_ARG_CHECKED(Object, key, 1); - int finger_index = cache->finger_index(); - Object* o = cache->get(finger_index); - if (o == key) { - // The fastest case: hit the same place again. - return cache->get(finger_index + 1); - } + { + DisallowHeapAllocation no_alloc; - for (int i = finger_index - 2; - i >= JSFunctionResultCache::kEntriesIndex; - i -= 2) { - o = cache->get(i); + int finger_index = cache->finger_index(); + Object* o = cache->get(finger_index); if (o == key) { - cache->set_finger_index(i); - return cache->get(i + 1); + // The fastest case: hit the same place again. + return cache->get(finger_index + 1); } - } - int size = cache->size(); - ASSERT(size <= cache->length()); + for (int i = finger_index - 2; + i >= JSFunctionResultCache::kEntriesIndex; + i -= 2) { + o = cache->get(i); + if (o == key) { + cache->set_finger_index(i); + return cache->get(i + 1); + } + } - for (int i = size - 2; i > finger_index; i -= 2) { - o = cache->get(i); - if (o == key) { - cache->set_finger_index(i); - return cache->get(i + 1); + int size = cache->size(); + DCHECK(size <= cache->length()); + + for (int i = size - 2; i > finger_index; i -= 2) { + o = cache->get(i); + if (o == key) { + cache->set_finger_index(i); + return cache->get(i + 1); + } } } @@ -14610,18 +14694,12 @@ Handle<JSFunction> factory(JSFunction::cast( cache_handle->get(JSFunctionResultCache::kFactoryIndex))); // TODO(antonm): consider passing a receiver when constructing a cache. - Handle<Object> receiver(isolate->native_context()->global_object(), - isolate); + Handle<JSObject> receiver(isolate->global_proxy()); // This handle is nor shared, nor used later, so it's safe. Handle<Object> argv[] = { key_handle }; - bool pending_exception; - value = Execution::Call(isolate, - factory, - receiver, - ARRAY_SIZE(argv), - argv, - &pending_exception); - if (pending_exception) return Failure::Exception(); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, value, + Execution::Call(isolate, factory, receiver, ARRAY_SIZE(argv), argv)); } #ifdef VERIFY_HEAP @@ -14631,8 +14709,8 @@ #endif // Function invocation may have cleared the cache. Reread all the data. - finger_index = cache_handle->finger_index(); - size = cache_handle->size(); + int finger_index = cache_handle->finger_index(); + int size = cache_handle->size(); // If we have spare room, put new data into it, otherwise evict post finger // entry which is likely to be the least recently used. @@ -14647,9 +14725,9 @@ } } - ASSERT(index % 2 == 0); - ASSERT(index >= JSFunctionResultCache::kEntriesIndex); - ASSERT(index < cache_handle->length()); + DCHECK(index % 2 == 0); + DCHECK(index >= JSFunctionResultCache::kEntriesIndex); + DCHECK(index < cache_handle->length()); cache_handle->set(index, *key_handle); cache_handle->set(index + 1, *value); @@ -14665,15 +14743,17 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) { +RUNTIME_FUNCTION(Runtime_MessageGetStartPosition) { SealHandleScope shs(isolate); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSMessageObject, message, 0); return Smi::FromInt(message->start_position()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) { +RUNTIME_FUNCTION(Runtime_MessageGetScript) { SealHandleScope shs(isolate); + DCHECK(args.length() == 1); CONVERT_ARG_CHECKED(JSMessageObject, message, 0); return message->script(); } @@ -14682,14 +14762,14 @@ #ifdef DEBUG // ListNatives is ONLY used by the fuzz-natives.js in debug mode // Exclude the code in release mode. -RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) { +RUNTIME_FUNCTION(Runtime_ListNatives) { HandleScope scope(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); #define COUNT_ENTRY(Name, argc, ressize) + 1 int entry_count = 0 RUNTIME_FUNCTION_LIST(COUNT_ENTRY) - RUNTIME_HIDDEN_FUNCTION_LIST(COUNT_ENTRY) - INLINE_FUNCTION_LIST(COUNT_ENTRY); + INLINE_FUNCTION_LIST(COUNT_ENTRY) + INLINE_OPTIMIZED_FUNCTION_LIST(COUNT_ENTRY); #undef COUNT_ENTRY Factory* factory = isolate->factory(); Handle<FixedArray> elements = factory->NewFixedArray(entry_count); @@ -14701,11 +14781,9 @@ Handle<String> name; \ /* Inline runtime functions have an underscore in front of the name. */ \ if (inline_runtime_functions) { \ - name = factory->NewStringFromAscii( \ - Vector<const char>("_" #Name, StrLength("_" #Name))); \ + name = factory->NewStringFromStaticAscii("_" #Name); \ } else { \ - name = factory->NewStringFromAscii( \ - Vector<const char>(#Name, StrLength(#Name))); \ + name = factory->NewStringFromStaticAscii(#Name); \ } \ Handle<FixedArray> pair_elements = factory->NewFixedArray(2); \ pair_elements->set(0, *name); \ @@ -14715,40 +14793,26 @@ } inline_runtime_functions = false; RUNTIME_FUNCTION_LIST(ADD_ENTRY) - // Calling hidden runtime functions should just throw. - RUNTIME_HIDDEN_FUNCTION_LIST(ADD_ENTRY) + INLINE_OPTIMIZED_FUNCTION_LIST(ADD_ENTRY) inline_runtime_functions = true; INLINE_FUNCTION_LIST(ADD_ENTRY) #undef ADD_ENTRY - ASSERT_EQ(index, entry_count); + DCHECK_EQ(index, entry_count); Handle<JSArray> result = factory->NewJSArrayWithElements(elements); return *result; } #endif -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_Log) { - HandleScope handle_scope(isolate); - ASSERT(args.length() == 2); - CONVERT_ARG_HANDLE_CHECKED(String, format, 0); - CONVERT_ARG_HANDLE_CHECKED(JSArray, elms, 1); - - SmartArrayPointer<char> format_chars = format->ToCString(); - isolate->logger()->LogRuntime( - Vector<const char>(format_chars.get(), format->length()), elms); - return isolate->heap()->undefined_value(); -} - - -RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) { +RUNTIME_FUNCTION(Runtime_IS_VAR) { UNREACHABLE(); // implemented as macro in the parser return NULL; } #define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \ - RUNTIME_FUNCTION(MaybeObject*, Runtime_Has##Name) { \ - CONVERT_ARG_CHECKED(JSObject, obj, 0); \ + RUNTIME_FUNCTION(Runtime_Has##Name) { \ + CONVERT_ARG_CHECKED(JSObject, obj, 0); \ return isolate->heap()->ToBoolean(obj->Has##Name()); \ } @@ -14767,7 +14831,7 @@ #define TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, size) \ - RUNTIME_FUNCTION(MaybeObject*, Runtime_HasExternal##Type##Elements) { \ + RUNTIME_FUNCTION(Runtime_HasExternal##Type##Elements) { \ CONVERT_ARG_CHECKED(JSObject, obj, 0); \ return isolate->heap()->ToBoolean(obj->HasExternal##Type##Elements()); \ } @@ -14778,7 +14842,7 @@ #define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, s) \ - RUNTIME_FUNCTION(MaybeObject*, Runtime_HasFixed##Type##Elements) { \ + RUNTIME_FUNCTION(Runtime_HasFixed##Type##Elements) { \ CONVERT_ARG_CHECKED(JSObject, obj, 0); \ return isolate->heap()->ToBoolean(obj->HasFixed##Type##Elements()); \ } @@ -14788,96 +14852,75 @@ #undef FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION -RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) { +RUNTIME_FUNCTION(Runtime_HaveSameMap) { SealHandleScope shs(isolate); - ASSERT(args.length() == 2); + DCHECK(args.length() == 2); CONVERT_ARG_CHECKED(JSObject, obj1, 0); CONVERT_ARG_CHECKED(JSObject, obj2, 1); return isolate->heap()->ToBoolean(obj1->map() == obj2->map()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessCheckNeeded) { +RUNTIME_FUNCTION(Runtime_IsJSGlobalProxy) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(HeapObject, obj, 0); - return isolate->heap()->ToBoolean(obj->IsAccessCheckNeeded()); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + return isolate->heap()->ToBoolean(obj->IsJSGlobalProxy()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) { +RUNTIME_FUNCTION(Runtime_IsObserved) { SealHandleScope shs(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); if (!args[0]->IsJSReceiver()) return isolate->heap()->false_value(); - JSReceiver* obj = JSReceiver::cast(args[0]); - if (obj->IsJSGlobalProxy()) { - Object* proto = obj->GetPrototype(); - if (proto->IsNull()) return isolate->heap()->false_value(); - ASSERT(proto->IsJSGlobalObject()); - obj = JSReceiver::cast(proto); - } + CONVERT_ARG_CHECKED(JSReceiver, obj, 0); + DCHECK(!obj->IsJSGlobalProxy() || !obj->map()->is_observed()); return isolate->heap()->ToBoolean(obj->map()->is_observed()); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) { +RUNTIME_FUNCTION(Runtime_SetIsObserved) { HandleScope scope(isolate); - ASSERT(args.length() == 1); + DCHECK(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0); - if (obj->IsJSGlobalProxy()) { - Object* proto = obj->GetPrototype(); - if (proto->IsNull()) return isolate->heap()->undefined_value(); - ASSERT(proto->IsJSGlobalObject()); - obj = handle(JSReceiver::cast(proto)); - } - if (obj->IsJSProxy()) - return isolate->heap()->undefined_value(); + RUNTIME_ASSERT(!obj->IsJSGlobalProxy()); + if (obj->IsJSProxy()) return isolate->heap()->undefined_value(); + RUNTIME_ASSERT(!obj->map()->is_observed()); - ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() && - Handle<JSObject>::cast(obj)->HasFastElements())); - ASSERT(obj->IsJSObject()); + DCHECK(obj->IsJSObject()); JSObject::SetObserved(Handle<JSObject>::cast(obj)); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SetMicrotaskPending) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - CONVERT_BOOLEAN_ARG_CHECKED(new_state, 0); - bool old_state = isolate->microtask_pending(); - isolate->set_microtask_pending(new_state); - return isolate->heap()->ToBoolean(old_state); -} - - -RUNTIME_FUNCTION(MaybeObject*, Runtime_RunMicrotasks) { +RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) { HandleScope scope(isolate); - ASSERT(args.length() == 0); - if (isolate->microtask_pending()) - Execution::RunMicrotasks(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0); + isolate->EnqueueMicrotask(microtask); return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetMicrotaskState) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 0); - return isolate->heap()->microtask_state(); +RUNTIME_FUNCTION(Runtime_RunMicrotasks) { + HandleScope scope(isolate); + DCHECK(args.length() == 0); + isolate->RunMicrotasks(); + return isolate->heap()->undefined_value(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) { +RUNTIME_FUNCTION(Runtime_GetObservationState) { SealHandleScope shs(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); return isolate->heap()->observation_state(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_ObservationWeakMapCreate) { +RUNTIME_FUNCTION(Runtime_ObservationWeakMapCreate) { HandleScope scope(isolate); - ASSERT(args.length() == 0); + DCHECK(args.length() == 0); // TODO(adamk): Currently this runtime function is only called three times per // isolate. If it's called more often, the map should be moved into the // strong root list. @@ -14885,53 +14928,75 @@ isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize); Handle<JSWeakMap> weakmap = Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map)); - return WeakCollectionInitialize(isolate, weakmap); + return *WeakCollectionInitialize(isolate, weakmap); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) { - SealHandleScope shs(isolate); - ASSERT(args.length() == 1); - Object* object = args[0]; - if (object->IsJSGlobalProxy()) { - object = object->GetPrototype(isolate); - if (object->IsNull()) return isolate->heap()->undefined_value(); - } - return object; +static bool ContextsHaveSameOrigin(Handle<Context> context1, + Handle<Context> context2) { + return context1->security_token() == context2->security_token(); } -RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) { +RUNTIME_FUNCTION(Runtime_ObserverObjectAndRecordHaveSameOrigin) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0); CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1); - ASSERT(object->map()->is_access_check_needed()); - Handle<Object> key = args.at<Object>(2); - SaveContext save(isolate); - isolate->set_context(observer->context()); - if (!isolate->MayNamedAccessWrapper(object, - isolate->factory()->undefined_value(), - v8::ACCESS_KEYS)) { - return isolate->heap()->false_value(); - } - bool access_allowed = false; - uint32_t index = 0; - if (key->ToArrayIndex(&index) || - (key->IsString() && String::cast(*key)->AsArrayIndex(&index))) { - access_allowed = - isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_GET) && - isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_HAS); - } else { - access_allowed = - isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_GET) && - isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_HAS); - } - return isolate->heap()->ToBoolean(access_allowed); + CONVERT_ARG_HANDLE_CHECKED(JSObject, record, 2); + + Handle<Context> observer_context(observer->context()->native_context()); + Handle<Context> object_context(object->GetCreationContext()); + Handle<Context> record_context(record->GetCreationContext()); + + return isolate->heap()->ToBoolean( + ContextsHaveSameOrigin(object_context, observer_context) && + ContextsHaveSameOrigin(object_context, record_context)); +} + + +RUNTIME_FUNCTION(Runtime_ObjectWasCreatedInCurrentOrigin) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); + + Handle<Context> creation_context(object->GetCreationContext(), isolate); + return isolate->heap()->ToBoolean( + ContextsHaveSameOrigin(creation_context, isolate->native_context())); +} + + +RUNTIME_FUNCTION(Runtime_GetObjectContextObjectObserve) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); + + Handle<Context> context(object->GetCreationContext(), isolate); + return context->native_object_observe(); } -static MaybeObject* ArrayConstructorCommon(Isolate* isolate, +RUNTIME_FUNCTION(Runtime_GetObjectContextObjectGetNotifier) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); + + Handle<Context> context(object->GetCreationContext(), isolate); + return context->native_object_get_notifier(); +} + + +RUNTIME_FUNCTION(Runtime_GetObjectContextNotifierPerformChange) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object_info, 0); + + Handle<Context> context(object_info->GetCreationContext(), isolate); + return context->native_object_notifier_perform_change(); +} + + +static Object* ArrayConstructorCommon(Isolate* isolate, Handle<JSFunction> constructor, Handle<AllocationSite> site, Arguments* caller_args) { @@ -14970,7 +15035,6 @@ Handle<Map> initial_map(constructor->initial_map(), isolate); if (to_kind != initial_map->elements_kind()) { initial_map = Map::AsElementsKind(initial_map, to_kind); - RETURN_IF_EMPTY_HANDLE(isolate, initial_map); } // If we don't care to track arrays of to_kind ElementsKind, then @@ -14996,8 +15060,8 @@ factory->NewJSArrayStorage(array, 0, 0, DONT_INITIALIZE_ARRAY_ELEMENTS); ElementsKind old_kind = array->GetElementsKind(); - RETURN_IF_EMPTY_HANDLE(isolate, - ArrayConstructInitializeElements(array, caller_args)); + RETURN_FAILURE_ON_EXCEPTION( + isolate, ArrayConstructInitializeElements(array, caller_args)); if (!site.is_null() && (old_kind != array->GetElementsKind() || !can_use_type_feedback)) { @@ -15010,7 +15074,7 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ArrayConstructor) { +RUNTIME_FUNCTION(Runtime_ArrayConstructor) { HandleScope scope(isolate); // If we get 2 arguments then they are the stub parameters (constructor, type // info). If we get 4, then the first one is a pointer to the arguments @@ -15019,7 +15083,7 @@ // with an assert). Arguments empty_args(0, NULL); bool no_caller_args = args.length() == 2; - ASSERT(no_caller_args || args.length() == 4); + DCHECK(no_caller_args || args.length() == 4); int parameters_start = no_caller_args ? 0 : 1; Arguments* caller_args = no_caller_args ? &empty_args @@ -15029,7 +15093,7 @@ #ifdef DEBUG if (!no_caller_args) { CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 2); - ASSERT(arg_count == caller_args->length()); + DCHECK(arg_count == caller_args->length()); } #endif @@ -15037,7 +15101,7 @@ if (!type_info.is_null() && *type_info != isolate->heap()->undefined_value()) { site = Handle<AllocationSite>::cast(type_info); - ASSERT(!site->SitePointsToLiteral()); + DCHECK(!site->SitePointsToLiteral()); } return ArrayConstructorCommon(isolate, @@ -15047,11 +15111,11 @@ } -RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_InternalArrayConstructor) { +RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) { HandleScope scope(isolate); Arguments empty_args(0, NULL); bool no_caller_args = args.length() == 1; - ASSERT(no_caller_args || args.length() == 3); + DCHECK(no_caller_args || args.length() == 3); int parameters_start = no_caller_args ? 0 : 1; Arguments* caller_args = no_caller_args ? &empty_args @@ -15060,7 +15124,7 @@ #ifdef DEBUG if (!no_caller_args) { CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 1); - ASSERT(arg_count == caller_args->length()); + DCHECK(arg_count == caller_args->length()); } #endif return ArrayConstructorCommon(isolate, @@ -15070,78 +15134,485 @@ } -RUNTIME_FUNCTION(MaybeObject*, Runtime_MaxSmi) { +RUNTIME_FUNCTION(Runtime_NormalizeElements) { + HandleScope scope(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0); + RUNTIME_ASSERT(!array->HasExternalArrayElements() && + !array->HasFixedTypedArrayElements()); + JSObject::NormalizeElements(array); + return *array; +} + + +RUNTIME_FUNCTION(Runtime_MaxSmi) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 0); return Smi::FromInt(Smi::kMaxValue); } +// TODO(dcarney): remove this function when TurboFan supports it. +// Takes the object to be iterated over and the result of GetPropertyNamesFast +// Returns pair (cache_array, cache_type). +RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInInit) { + SealHandleScope scope(isolate); + DCHECK(args.length() == 2); + // This simulates CONVERT_ARG_HANDLE_CHECKED for calls returning pairs. + // Not worth creating a macro atm as this function should be removed. + if (!args[0]->IsJSReceiver() || !args[1]->IsObject()) { + Object* error = isolate->ThrowIllegalOperation(); + return MakePair(error, isolate->heap()->undefined_value()); + } + Handle<JSReceiver> object = args.at<JSReceiver>(0); + Handle<Object> cache_type = args.at<Object>(1); + if (cache_type->IsMap()) { + // Enum cache case. + if (Map::EnumLengthBits::decode(Map::cast(*cache_type)->bit_field3()) == + 0) { + // 0 length enum. + // Can't handle this case in the graph builder, + // so transform it into the empty fixed array case. + return MakePair(isolate->heap()->empty_fixed_array(), Smi::FromInt(1)); + } + return MakePair(object->map()->instance_descriptors()->GetEnumCache(), + *cache_type); + } else { + // FixedArray case. + Smi* new_cache_type = Smi::FromInt(object->IsJSProxy() ? 0 : 1); + return MakePair(*Handle<FixedArray>::cast(cache_type), new_cache_type); + } +} + + +// TODO(dcarney): remove this function when TurboFan supports it. +RUNTIME_FUNCTION(Runtime_ForInCacheArrayLength) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(Object, cache_type, 0); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, array, 1); + int length = 0; + if (cache_type->IsMap()) { + length = Map::cast(*cache_type)->EnumLength(); + } else { + DCHECK(cache_type->IsSmi()); + length = array->length(); + } + return Smi::FromInt(length); +} + + +// TODO(dcarney): remove this function when TurboFan supports it. +// Takes (the object to be iterated over, +// cache_array from ForInInit, +// cache_type from ForInInit, +// the current index) +// Returns pair (array[index], needs_filtering). +RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInNext) { + SealHandleScope scope(isolate); + DCHECK(args.length() == 4); + // This simulates CONVERT_ARG_HANDLE_CHECKED for calls returning pairs. + // Not worth creating a macro atm as this function should be removed. + if (!args[0]->IsJSReceiver() || !args[1]->IsFixedArray() || + !args[2]->IsObject() || !args[3]->IsSmi()) { + Object* error = isolate->ThrowIllegalOperation(); + return MakePair(error, isolate->heap()->undefined_value()); + } + Handle<JSReceiver> object = args.at<JSReceiver>(0); + Handle<FixedArray> array = args.at<FixedArray>(1); + Handle<Object> cache_type = args.at<Object>(2); + int index = args.smi_at(3); + // Figure out first if a slow check is needed for this object. + bool slow_check_needed = false; + if (cache_type->IsMap()) { + if (object->map() != Map::cast(*cache_type)) { + // Object transitioned. Need slow check. + slow_check_needed = true; + } + } else { + // No slow check needed for proxies. + slow_check_needed = Smi::cast(*cache_type)->value() == 1; + } + return MakePair(array->get(index), + isolate->heap()->ToBoolean(slow_check_needed)); +} + + // ---------------------------------------------------------------------------- -// Implementation of Runtime +// Reference implementation for inlined runtime functions. Only used when the +// compiler does not support a certain intrinsic. Don't optimize these, but +// implement the intrinsic in the respective compiler instead. -#define F(name, number_of_args, result_size) \ - { Runtime::k##name, Runtime::RUNTIME, #name, \ - FUNCTION_ADDR(Runtime_##name), number_of_args, result_size }, +// TODO(mstarzinger): These are place-holder stubs for TurboFan and will +// eventually all have a C++ implementation and this macro will be gone. +#define U(name) \ + RUNTIME_FUNCTION(RuntimeReference_##name) { \ + UNIMPLEMENTED(); \ + return NULL; \ + } + +U(IsStringWrapperSafeForDefaultValueOf) +U(GeneratorNext) +U(GeneratorThrow) +U(DebugBreakInOptimizedCode) +#undef U -#define FH(name, number_of_args, result_size) \ - { Runtime::kHidden##name, Runtime::RUNTIME_HIDDEN, NULL, \ - FUNCTION_ADDR(RuntimeHidden_##name), number_of_args, result_size }, +RUNTIME_FUNCTION(RuntimeReference_IsSmi) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + return isolate->heap()->ToBoolean(obj->IsSmi()); +} -#define I(name, number_of_args, result_size) \ - { Runtime::kInline##name, Runtime::INLINE, \ - "_" #name, NULL, number_of_args, result_size }, +RUNTIME_FUNCTION(RuntimeReference_IsNonNegativeSmi) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + return isolate->heap()->ToBoolean(obj->IsSmi() && + Smi::cast(obj)->value() >= 0); +} -#define IO(name, number_of_args, result_size) \ - { Runtime::kInlineOptimized##name, Runtime::INLINE_OPTIMIZED, \ - "_" #name, FUNCTION_ADDR(Runtime_##name), number_of_args, result_size }, + +RUNTIME_FUNCTION(RuntimeReference_IsArray) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + return isolate->heap()->ToBoolean(obj->IsJSArray()); +} + + +RUNTIME_FUNCTION(RuntimeReference_IsRegExp) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + return isolate->heap()->ToBoolean(obj->IsJSRegExp()); +} + + +RUNTIME_FUNCTION(RuntimeReference_IsConstructCall) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 0); + JavaScriptFrameIterator it(isolate); + JavaScriptFrame* frame = it.frame(); + return isolate->heap()->ToBoolean(frame->IsConstructor()); +} + + +RUNTIME_FUNCTION(RuntimeReference_CallFunction) { + SealHandleScope shs(isolate); + return __RT_impl_Runtime_Call(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_ArgumentsLength) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 0); + JavaScriptFrameIterator it(isolate); + JavaScriptFrame* frame = it.frame(); + return Smi::FromInt(frame->GetArgumentsLength()); +} + + +RUNTIME_FUNCTION(RuntimeReference_Arguments) { + SealHandleScope shs(isolate); + return __RT_impl_Runtime_GetArgumentsProperty(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_ValueOf) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + if (!obj->IsJSValue()) return obj; + return JSValue::cast(obj)->value(); +} + + +RUNTIME_FUNCTION(RuntimeReference_SetValueOf) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_CHECKED(Object, obj, 0); + CONVERT_ARG_CHECKED(Object, value, 1); + if (!obj->IsJSValue()) return value; + JSValue::cast(obj)->set_value(value); + return value; +} + + +RUNTIME_FUNCTION(RuntimeReference_DateField) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_CHECKED(Object, obj, 0); + CONVERT_SMI_ARG_CHECKED(index, 1); + if (!obj->IsJSDate()) { + HandleScope scope(isolate); + return isolate->Throw(*isolate->factory()->NewTypeError( + "not_date_object", HandleVector<Object>(NULL, 0))); + } + JSDate* date = JSDate::cast(obj); + if (index == 0) return date->value(); + return JSDate::GetField(date, Smi::FromInt(index)); +} + + +RUNTIME_FUNCTION(RuntimeReference_StringCharFromCode) { + SealHandleScope shs(isolate); + return __RT_impl_Runtime_CharFromCode(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_StringCharAt) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 2); + if (!args[0]->IsString()) return Smi::FromInt(0); + if (!args[1]->IsNumber()) return Smi::FromInt(0); + if (std::isinf(args.number_at(1))) return isolate->heap()->empty_string(); + Object* code = __RT_impl_Runtime_StringCharCodeAtRT(args, isolate); + if (code->IsNaN()) return isolate->heap()->empty_string(); + return __RT_impl_Runtime_CharFromCode(Arguments(1, &code), isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_OneByteSeqStringSetChar) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 3); + CONVERT_ARG_CHECKED(SeqOneByteString, string, 0); + CONVERT_SMI_ARG_CHECKED(index, 1); + CONVERT_SMI_ARG_CHECKED(value, 2); + string->SeqOneByteStringSet(index, value); + return string; +} + + +RUNTIME_FUNCTION(RuntimeReference_TwoByteSeqStringSetChar) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 3); + CONVERT_ARG_CHECKED(SeqTwoByteString, string, 0); + CONVERT_SMI_ARG_CHECKED(index, 1); + CONVERT_SMI_ARG_CHECKED(value, 2); + string->SeqTwoByteStringSet(index, value); + return string; +} + + +RUNTIME_FUNCTION(RuntimeReference_ObjectEquals) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 2); + CONVERT_ARG_CHECKED(Object, obj1, 0); + CONVERT_ARG_CHECKED(Object, obj2, 1); + return isolate->heap()->ToBoolean(obj1 == obj2); +} + + +RUNTIME_FUNCTION(RuntimeReference_IsObject) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + if (!obj->IsHeapObject()) return isolate->heap()->false_value(); + if (obj->IsNull()) return isolate->heap()->true_value(); + if (obj->IsUndetectableObject()) return isolate->heap()->false_value(); + Map* map = HeapObject::cast(obj)->map(); + bool is_non_callable_spec_object = + map->instance_type() >= FIRST_NONCALLABLE_SPEC_OBJECT_TYPE && + map->instance_type() <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE; + return isolate->heap()->ToBoolean(is_non_callable_spec_object); +} + + +RUNTIME_FUNCTION(RuntimeReference_IsFunction) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + return isolate->heap()->ToBoolean(obj->IsJSFunction()); +} + + +RUNTIME_FUNCTION(RuntimeReference_IsUndetectableObject) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + return isolate->heap()->ToBoolean(obj->IsUndetectableObject()); +} + + +RUNTIME_FUNCTION(RuntimeReference_IsSpecObject) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + return isolate->heap()->ToBoolean(obj->IsSpecObject()); +} + + +RUNTIME_FUNCTION(RuntimeReference_MathPow) { + SealHandleScope shs(isolate); + return __RT_impl_Runtime_MathPowSlow(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_IsMinusZero) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + if (!obj->IsHeapNumber()) return isolate->heap()->false_value(); + HeapNumber* number = HeapNumber::cast(obj); + return isolate->heap()->ToBoolean(IsMinusZero(number->value())); +} + + +RUNTIME_FUNCTION(RuntimeReference_HasCachedArrayIndex) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + return isolate->heap()->false_value(); +} + + +RUNTIME_FUNCTION(RuntimeReference_GetCachedArrayIndex) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(RuntimeReference_FastAsciiArrayJoin) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 2); + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(RuntimeReference_ClassOf) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 1); + CONVERT_ARG_CHECKED(Object, obj, 0); + if (!obj->IsJSReceiver()) return isolate->heap()->null_value(); + return JSReceiver::cast(obj)->class_name(); +} + + +RUNTIME_FUNCTION(RuntimeReference_StringCharCodeAt) { + SealHandleScope shs(isolate); + DCHECK(args.length() == 2); + if (!args[0]->IsString()) return isolate->heap()->undefined_value(); + if (!args[1]->IsNumber()) return isolate->heap()->undefined_value(); + if (std::isinf(args.number_at(1))) return isolate->heap()->nan_value(); + return __RT_impl_Runtime_StringCharCodeAtRT(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_StringAdd) { + SealHandleScope shs(isolate); + return __RT_impl_Runtime_StringAdd(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_SubString) { + SealHandleScope shs(isolate); + return __RT_impl_Runtime_SubString(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_StringCompare) { + SealHandleScope shs(isolate); + return __RT_impl_Runtime_StringCompare(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_RegExpExec) { + SealHandleScope shs(isolate); + return __RT_impl_Runtime_RegExpExecRT(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_RegExpConstructResult) { + SealHandleScope shs(isolate); + return __RT_impl_Runtime_RegExpConstructResult(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_GetFromCache) { + HandleScope scope(isolate); + DCHECK(args.length() == 2); + CONVERT_SMI_ARG_CHECKED(id, 0); + args[0] = isolate->native_context()->jsfunction_result_caches()->get(id); + return __RT_impl_Runtime_GetFromCache(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_NumberToString) { + SealHandleScope shs(isolate); + return __RT_impl_Runtime_NumberToStringRT(args, isolate); +} + + +RUNTIME_FUNCTION(RuntimeReference_DebugIsActive) { + SealHandleScope shs(isolate); + return Smi::FromInt(isolate->debug()->is_active()); +} + + +// ---------------------------------------------------------------------------- +// Implementation of Runtime + +#define F(name, number_of_args, result_size) \ + { \ + Runtime::k##name, Runtime::RUNTIME, #name, FUNCTION_ADDR(Runtime_##name), \ + number_of_args, result_size \ + } \ + , + + +#define I(name, number_of_args, result_size) \ + { \ + Runtime::kInline##name, Runtime::INLINE, "_" #name, \ + FUNCTION_ADDR(RuntimeReference_##name), number_of_args, result_size \ + } \ + , + + +#define IO(name, number_of_args, result_size) \ + { \ + Runtime::kInlineOptimized##name, Runtime::INLINE_OPTIMIZED, "_" #name, \ + FUNCTION_ADDR(Runtime_##name), number_of_args, result_size \ + } \ + , static const Runtime::Function kIntrinsicFunctions[] = { RUNTIME_FUNCTION_LIST(F) - RUNTIME_HIDDEN_FUNCTION_LIST(FH) + INLINE_OPTIMIZED_FUNCTION_LIST(F) INLINE_FUNCTION_LIST(I) INLINE_OPTIMIZED_FUNCTION_LIST(IO) }; #undef IO #undef I -#undef FH #undef F -MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap, - Object* dictionary) { - ASSERT(dictionary != NULL); - ASSERT(NameDictionary::cast(dictionary)->NumberOfElements() == 0); +void Runtime::InitializeIntrinsicFunctionNames(Isolate* isolate, + Handle<NameDictionary> dict) { + DCHECK(dict->NumberOfElements() == 0); + HandleScope scope(isolate); for (int i = 0; i < kNumFunctions; ++i) { const char* name = kIntrinsicFunctions[i].name; if (name == NULL) continue; - Object* name_string; - { MaybeObject* maybe_name_string = - heap->InternalizeUtf8String(name); - if (!maybe_name_string->ToObject(&name_string)) return maybe_name_string; - } - NameDictionary* name_dictionary = NameDictionary::cast(dictionary); - { MaybeObject* maybe_dictionary = name_dictionary->Add( - String::cast(name_string), - Smi::FromInt(i), - PropertyDetails(NONE, NORMAL, Representation::None())); - if (!maybe_dictionary->ToObject(&dictionary)) { - // Non-recoverable failure. Calling code must restart heap - // initialization. - return maybe_dictionary; - } - } + Handle<NameDictionary> new_dict = NameDictionary::Add( + dict, + isolate->factory()->InternalizeUtf8String(name), + Handle<Smi>(Smi::FromInt(i), isolate), + PropertyDetails(NONE, NORMAL, Representation::None())); + // The dictionary does not need to grow. + CHECK(new_dict.is_identical_to(dict)); } - return dictionary; } const Runtime::Function* Runtime::FunctionForName(Handle<String> name) { Heap* heap = name->GetHeap(); - int entry = heap->intrinsic_function_names()->FindEntry(*name); + int entry = heap->intrinsic_function_names()->FindEntry(name); if (entry != kNotFound) { Object* smi_index = heap->intrinsic_function_names()->ValueAt(entry); int function_index = Smi::cast(smi_index)->value(); @@ -15151,35 +15622,18 @@ } -const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) { - return &(kIntrinsicFunctions[static_cast<int>(id)]); -} - - -void Runtime::PerformGC(Object* result, Isolate* isolate) { - Failure* failure = Failure::cast(result); - if (failure->IsRetryAfterGC()) { - if (isolate->heap()->new_space()->AddFreshPage()) { - return; +const Runtime::Function* Runtime::FunctionForEntry(Address entry) { + for (size_t i = 0; i < ARRAY_SIZE(kIntrinsicFunctions); ++i) { + if (entry == kIntrinsicFunctions[i].entry) { + return &(kIntrinsicFunctions[i]); } - - // Try to do a garbage collection; ignore it if it fails. The C - // entry stub will throw an out-of-memory exception in that case. - isolate->heap()->CollectGarbage(failure->allocation_space(), - "Runtime::PerformGC"); - } else { - // Handle last resort GC and make sure to allow future allocations - // to grow the heap without causing GCs (if possible). - isolate->counters()->gc_last_resort_from_js()->Increment(); - isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, - "Runtime::PerformGC"); } + return NULL; } -void Runtime::OutOfMemory() { - Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); - UNREACHABLE(); +const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) { + return &(kIntrinsicFunctions[static_cast<int>(id)]); } } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/runtime.h nodejs-0.11.15/deps/v8/src/runtime.h --- nodejs-0.11.13/deps/v8/src/runtime.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/runtime.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_RUNTIME_H_ #define V8_RUNTIME_H_ -#include "allocation.h" -#include "zone.h" +#include "src/allocation.h" +#include "src/zone.h" namespace v8 { namespace internal { @@ -44,387 +21,487 @@ // WARNING: RUNTIME_FUNCTION_LIST_ALWAYS_* is a very large macro that caused // MSVC Intellisense to crash. It was broken into two macros to work around // this problem. Please avoid large recursive macros whenever possible. -#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \ - /* Property access */ \ - F(GetProperty, 2, 1) \ - F(KeyedGetProperty, 2, 1) \ - F(DeleteProperty, 3, 1) \ - F(HasLocalProperty, 2, 1) \ - F(HasProperty, 2, 1) \ - F(HasElement, 2, 1) \ - F(IsPropertyEnumerable, 2, 1) \ - F(GetPropertyNames, 1, 1) \ - F(GetPropertyNamesFast, 1, 1) \ - F(GetLocalPropertyNames, 2, 1) \ - F(GetLocalElementNames, 1, 1) \ - F(GetInterceptorInfo, 1, 1) \ - F(GetNamedInterceptorPropertyNames, 1, 1) \ - F(GetIndexedInterceptorElementNames, 1, 1) \ - F(GetArgumentsProperty, 1, 1) \ - F(ToFastProperties, 1, 1) \ - F(FinishArrayPrototypeSetup, 1, 1) \ - F(SpecialArrayFunctions, 1, 1) \ - F(IsSloppyModeFunction, 1, 1) \ - F(GetDefaultReceiver, 1, 1) \ - \ - F(GetPrototype, 1, 1) \ - F(SetPrototype, 2, 1) \ - F(IsInPrototypeChain, 2, 1) \ - \ - F(GetOwnProperty, 2, 1) \ - \ - F(IsExtensible, 1, 1) \ - F(PreventExtensions, 1, 1)\ - \ - /* Utilities */ \ - F(CheckIsBootstrapping, 0, 1) \ - F(GetRootNaN, 0, 1) \ - F(Call, -1 /* >= 2 */, 1) \ - F(Apply, 5, 1) \ - F(GetFunctionDelegate, 1, 1) \ - F(GetConstructorDelegate, 1, 1) \ - F(DeoptimizeFunction, 1, 1) \ - F(ClearFunctionTypeFeedback, 1, 1) \ - F(RunningInSimulator, 0, 1) \ - F(IsConcurrentRecompilationSupported, 0, 1) \ - F(OptimizeFunctionOnNextCall, -1, 1) \ - F(NeverOptimizeFunction, 1, 1) \ - F(GetOptimizationStatus, -1, 1) \ - F(GetOptimizationCount, 1, 1) \ - F(UnblockConcurrentRecompilation, 0, 1) \ - F(CompileForOnStackReplacement, 1, 1) \ - F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \ - F(SetNativeFlag, 1, 1) \ - F(SetInlineBuiltinFlag, 1, 1) \ - F(StoreArrayLiteralElement, 5, 1) \ - F(DebugCallbackSupportsStepping, 1, 1) \ - F(DebugPrepareStepInIfStepping, 1, 1) \ - F(FlattenString, 1, 1) \ - F(TryMigrateInstance, 1, 1) \ - F(NotifyContextDisposed, 0, 1) \ - \ - /* Array join support */ \ - F(PushIfAbsent, 2, 1) \ - F(ArrayConcat, 1, 1) \ - \ - /* Conversions */ \ - F(ToBool, 1, 1) \ - F(Typeof, 1, 1) \ - \ - F(StringToNumber, 1, 1) \ - F(StringParseInt, 2, 1) \ - F(StringParseFloat, 1, 1) \ - F(StringToLowerCase, 1, 1) \ - F(StringToUpperCase, 1, 1) \ - F(StringSplit, 3, 1) \ - F(CharFromCode, 1, 1) \ - F(URIEscape, 1, 1) \ - F(URIUnescape, 1, 1) \ - \ - F(NumberToInteger, 1, 1) \ - F(NumberToIntegerMapMinusZero, 1, 1) \ - F(NumberToJSUint32, 1, 1) \ - F(NumberToJSInt32, 1, 1) \ - \ - /* Arithmetic operations */ \ - F(NumberAdd, 2, 1) \ - F(NumberSub, 2, 1) \ - F(NumberMul, 2, 1) \ - F(NumberDiv, 2, 1) \ - F(NumberMod, 2, 1) \ - F(NumberUnaryMinus, 1, 1) \ - F(NumberAlloc, 0, 1) \ - F(NumberImul, 2, 1) \ - \ - F(StringBuilderConcat, 3, 1) \ - F(StringBuilderJoin, 3, 1) \ - F(SparseJoinWithSeparator, 3, 1) \ - \ - /* Bit operations */ \ - F(NumberOr, 2, 1) \ - F(NumberAnd, 2, 1) \ - F(NumberXor, 2, 1) \ - \ - F(NumberShl, 2, 1) \ - F(NumberShr, 2, 1) \ - F(NumberSar, 2, 1) \ - \ - /* Comparisons */ \ - F(NumberEquals, 2, 1) \ - F(StringEquals, 2, 1) \ - \ - F(NumberCompare, 3, 1) \ - F(SmiLexicographicCompare, 2, 1) \ - \ - /* Math */ \ - F(Math_acos, 1, 1) \ - F(Math_asin, 1, 1) \ - F(Math_atan, 1, 1) \ - F(Math_log, 1, 1) \ - F(Math_sqrt, 1, 1) \ - F(Math_exp, 1, 1) \ - F(Math_floor, 1, 1) \ - F(Math_pow, 2, 1) \ - F(Math_pow_cfunction, 2, 1) \ - F(Math_atan2, 2, 1) \ - F(RoundNumber, 1, 1) \ - F(Math_fround, 1, 1) \ - \ - /* Regular expressions */ \ - F(RegExpCompile, 3, 1) \ - F(RegExpExecMultiple, 4, 1) \ - F(RegExpInitializeObject, 5, 1) \ - \ - /* JSON */ \ - F(ParseJson, 1, 1) \ - F(BasicJSONStringify, 1, 1) \ - F(QuoteJSONString, 1, 1) \ - \ - /* Strings */ \ - F(StringIndexOf, 3, 1) \ - F(StringLastIndexOf, 3, 1) \ - F(StringLocaleCompare, 2, 1) \ - F(StringReplaceGlobalRegExpWithString, 4, 1) \ - F(StringReplaceOneCharWithString, 3, 1) \ - F(StringMatch, 3, 1) \ - F(StringTrim, 3, 1) \ - F(StringToArray, 2, 1) \ - F(NewStringWrapper, 1, 1) \ - F(NewString, 2, 1) \ - F(TruncateString, 2, 1) \ - \ - /* Numbers */ \ - F(NumberToRadixString, 2, 1) \ - F(NumberToFixed, 2, 1) \ - F(NumberToExponential, 2, 1) \ - F(NumberToPrecision, 2, 1) \ +#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \ + /* Property access */ \ + F(GetProperty, 2, 1) \ + F(KeyedGetProperty, 2, 1) \ + F(DeleteProperty, 3, 1) \ + F(HasOwnProperty, 2, 1) \ + F(HasProperty, 2, 1) \ + F(HasElement, 2, 1) \ + F(IsPropertyEnumerable, 2, 1) \ + F(GetPropertyNames, 1, 1) \ + F(GetPropertyNamesFast, 1, 1) \ + F(GetOwnPropertyNames, 2, 1) \ + F(GetOwnElementNames, 1, 1) \ + F(GetInterceptorInfo, 1, 1) \ + F(GetNamedInterceptorPropertyNames, 1, 1) \ + F(GetIndexedInterceptorElementNames, 1, 1) \ + F(GetArgumentsProperty, 1, 1) \ + F(ToFastProperties, 1, 1) \ + F(FinishArrayPrototypeSetup, 1, 1) \ + F(SpecialArrayFunctions, 0, 1) \ + F(IsSloppyModeFunction, 1, 1) \ + F(GetDefaultReceiver, 1, 1) \ + \ + F(GetPrototype, 1, 1) \ + F(SetPrototype, 2, 1) \ + F(InternalSetPrototype, 2, 1) \ + F(IsInPrototypeChain, 2, 1) \ + \ + F(GetOwnProperty, 2, 1) \ + \ + F(IsExtensible, 1, 1) \ + F(PreventExtensions, 1, 1) \ + \ + /* Utilities */ \ + F(CheckIsBootstrapping, 0, 1) \ + F(GetRootNaN, 0, 1) \ + F(Call, -1 /* >= 2 */, 1) \ + F(Apply, 5, 1) \ + F(GetFunctionDelegate, 1, 1) \ + F(GetConstructorDelegate, 1, 1) \ + F(DeoptimizeFunction, 1, 1) \ + F(ClearFunctionTypeFeedback, 1, 1) \ + F(RunningInSimulator, 0, 1) \ + F(IsConcurrentRecompilationSupported, 0, 1) \ + F(OptimizeFunctionOnNextCall, -1, 1) \ + F(NeverOptimizeFunction, 1, 1) \ + F(GetOptimizationStatus, -1, 1) \ + F(IsOptimized, 0, 1) /* TODO(turbofan): Only temporary */ \ + F(GetOptimizationCount, 1, 1) \ + F(UnblockConcurrentRecompilation, 0, 1) \ + F(CompileForOnStackReplacement, 1, 1) \ + F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \ + F(SetNativeFlag, 1, 1) \ + F(SetInlineBuiltinFlag, 1, 1) \ + F(StoreArrayLiteralElement, 5, 1) \ + F(DebugPrepareStepInIfStepping, 1, 1) \ + F(DebugPushPromise, 1, 1) \ + F(DebugPopPromise, 0, 1) \ + F(DebugPromiseEvent, 1, 1) \ + F(DebugPromiseRejectEvent, 2, 1) \ + F(DebugAsyncTaskEvent, 1, 1) \ + F(FlattenString, 1, 1) \ + F(LoadMutableDouble, 2, 1) \ + F(TryMigrateInstance, 1, 1) \ + F(NotifyContextDisposed, 0, 1) \ + \ + /* Array join support */ \ + F(PushIfAbsent, 2, 1) \ + F(ArrayConcat, 1, 1) \ + \ + /* Conversions */ \ + F(ToBool, 1, 1) \ + F(Typeof, 1, 1) \ + \ + F(Booleanize, 2, 1) /* TODO(turbofan): Only temporary */ \ + \ + F(StringToNumber, 1, 1) \ + F(StringParseInt, 2, 1) \ + F(StringParseFloat, 1, 1) \ + F(StringToLowerCase, 1, 1) \ + F(StringToUpperCase, 1, 1) \ + F(StringSplit, 3, 1) \ + F(CharFromCode, 1, 1) \ + F(URIEscape, 1, 1) \ + F(URIUnescape, 1, 1) \ + \ + F(NumberToInteger, 1, 1) \ + F(NumberToIntegerMapMinusZero, 1, 1) \ + F(NumberToJSUint32, 1, 1) \ + F(NumberToJSInt32, 1, 1) \ + \ + /* Arithmetic operations */ \ + F(NumberAdd, 2, 1) \ + F(NumberSub, 2, 1) \ + F(NumberMul, 2, 1) \ + F(NumberDiv, 2, 1) \ + F(NumberMod, 2, 1) \ + F(NumberUnaryMinus, 1, 1) \ + F(NumberImul, 2, 1) \ + \ + F(StringBuilderConcat, 3, 1) \ + F(StringBuilderJoin, 3, 1) \ + F(SparseJoinWithSeparator, 3, 1) \ + \ + /* Bit operations */ \ + F(NumberOr, 2, 1) \ + F(NumberAnd, 2, 1) \ + F(NumberXor, 2, 1) \ + \ + F(NumberShl, 2, 1) \ + F(NumberShr, 2, 1) \ + F(NumberSar, 2, 1) \ + \ + /* Comparisons */ \ + F(NumberEquals, 2, 1) \ + F(StringEquals, 2, 1) \ + \ + F(NumberCompare, 3, 1) \ + F(SmiLexicographicCompare, 2, 1) \ + \ + /* Math */ \ + F(MathAcos, 1, 1) \ + F(MathAsin, 1, 1) \ + F(MathAtan, 1, 1) \ + F(MathFloorRT, 1, 1) \ + F(MathAtan2, 2, 1) \ + F(MathExpRT, 1, 1) \ + F(RoundNumber, 1, 1) \ + F(MathFround, 1, 1) \ + F(RemPiO2, 1, 1) \ + \ + /* Regular expressions */ \ + F(RegExpCompile, 3, 1) \ + F(RegExpExecMultiple, 4, 1) \ + F(RegExpInitializeObject, 5, 1) \ + \ + /* JSON */ \ + F(ParseJson, 1, 1) \ + F(BasicJSONStringify, 1, 1) \ + F(QuoteJSONString, 1, 1) \ + \ + /* Strings */ \ + F(StringIndexOf, 3, 1) \ + F(StringLastIndexOf, 3, 1) \ + F(StringLocaleCompare, 2, 1) \ + F(StringReplaceGlobalRegExpWithString, 4, 1) \ + F(StringReplaceOneCharWithString, 3, 1) \ + F(StringMatch, 3, 1) \ + F(StringTrim, 3, 1) \ + F(StringToArray, 2, 1) \ + F(NewStringWrapper, 1, 1) \ + F(NewString, 2, 1) \ + F(TruncateString, 2, 1) \ + \ + /* Numbers */ \ + F(NumberToRadixString, 2, 1) \ + F(NumberToFixed, 2, 1) \ + F(NumberToExponential, 2, 1) \ + F(NumberToPrecision, 2, 1) \ F(IsValidSmi, 1, 1) -#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \ - /* Reflection */ \ - F(FunctionSetInstanceClassName, 2, 1) \ - F(FunctionSetLength, 2, 1) \ - F(FunctionSetPrototype, 2, 1) \ - F(FunctionSetReadOnlyPrototype, 1, 1) \ - F(FunctionGetName, 1, 1) \ - F(FunctionSetName, 2, 1) \ - F(FunctionNameShouldPrintAsAnonymous, 1, 1) \ - F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \ - F(FunctionIsGenerator, 1, 1) \ - F(FunctionBindArguments, 4, 1) \ - F(BoundFunctionGetBindings, 1, 1) \ - F(FunctionRemovePrototype, 1, 1) \ - F(FunctionGetSourceCode, 1, 1) \ - F(FunctionGetScript, 1, 1) \ - F(FunctionGetScriptSourcePosition, 1, 1) \ - F(FunctionGetPositionForOffset, 2, 1) \ - F(FunctionIsAPIFunction, 1, 1) \ - F(FunctionIsBuiltin, 1, 1) \ - F(GetScript, 1, 1) \ - F(CollectStackTrace, 3, 1) \ - F(GetAndClearOverflowedStackTrace, 1, 1) \ - F(GetV8Version, 0, 1) \ - \ - F(SetCode, 2, 1) \ - F(SetExpectedNumberOfProperties, 2, 1) \ - \ - F(CreateApiFunction, 1, 1) \ - F(IsTemplate, 1, 1) \ - F(GetTemplateField, 2, 1) \ - F(DisableAccessChecks, 1, 1) \ - F(EnableAccessChecks, 1, 1) \ - F(SetAccessorProperty, 6, 1) \ - \ - /* Dates */ \ - F(DateCurrentTime, 0, 1) \ - F(DateParseString, 2, 1) \ - F(DateLocalTimezone, 1, 1) \ - F(DateToUTC, 1, 1) \ - F(DateMakeDay, 2, 1) \ - F(DateSetValue, 3, 1) \ - F(DateCacheVersion, 0, 1) \ - \ - /* Globals */ \ - F(CompileString, 2, 1) \ - \ - /* Eval */ \ - F(GlobalReceiver, 1, 1) \ - F(IsAttachedGlobal, 1, 1) \ - \ - F(SetProperty, -1 /* 4 or 5 */, 1) \ - F(DefineOrRedefineDataProperty, 4, 1) \ - F(DefineOrRedefineAccessorProperty, 5, 1) \ - F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \ - F(GetDataProperty, 2, 1) \ - F(SetHiddenProperty, 3, 1) \ - \ - /* Arrays */ \ - F(RemoveArrayHoles, 2, 1) \ - F(GetArrayKeys, 2, 1) \ - F(MoveArrayContents, 2, 1) \ - F(EstimateNumberOfElements, 1, 1) \ - \ - /* Getters and Setters */ \ - F(LookupAccessor, 3, 1) \ - \ - /* ES5 */ \ - F(ObjectFreeze, 1, 1) \ - \ - /* Harmony microtasks */ \ - F(GetMicrotaskState, 0, 1) \ - \ - /* Harmony modules */ \ - F(IsJSModule, 1, 1) \ - \ - /* Harmony symbols */ \ - F(CreateSymbol, 1, 1) \ - F(CreatePrivateSymbol, 1, 1) \ - F(CreateGlobalPrivateSymbol, 1, 1) \ - F(NewSymbolWrapper, 1, 1) \ - F(SymbolDescription, 1, 1) \ - F(SymbolRegistry, 0, 1) \ - F(SymbolIsPrivate, 1, 1) \ - \ - /* Harmony proxies */ \ - F(CreateJSProxy, 2, 1) \ - F(CreateJSFunctionProxy, 4, 1) \ - F(IsJSProxy, 1, 1) \ - F(IsJSFunctionProxy, 1, 1) \ - F(GetHandler, 1, 1) \ - F(GetCallTrap, 1, 1) \ - F(GetConstructTrap, 1, 1) \ - F(Fix, 1, 1) \ - \ - /* Harmony sets */ \ - F(SetInitialize, 1, 1) \ - F(SetAdd, 2, 1) \ - F(SetHas, 2, 1) \ - F(SetDelete, 2, 1) \ - F(SetGetSize, 1, 1) \ - \ - /* Harmony maps */ \ - F(MapInitialize, 1, 1) \ - F(MapGet, 2, 1) \ - F(MapHas, 2, 1) \ - F(MapDelete, 2, 1) \ - F(MapSet, 3, 1) \ - F(MapGetSize, 1, 1) \ - \ - /* Harmony weak maps and sets */ \ - F(WeakCollectionInitialize, 1, 1) \ - F(WeakCollectionGet, 2, 1) \ - F(WeakCollectionHas, 2, 1) \ - F(WeakCollectionDelete, 2, 1) \ - F(WeakCollectionSet, 3, 1) \ - \ - /* Harmony events */ \ - F(SetMicrotaskPending, 1, 1) \ - F(RunMicrotasks, 0, 1) \ - \ - /* Harmony observe */ \ - F(IsObserved, 1, 1) \ - F(SetIsObserved, 1, 1) \ - F(GetObservationState, 0, 1) \ - F(ObservationWeakMapCreate, 0, 1) \ - F(UnwrapGlobalProxy, 1, 1) \ - F(IsAccessAllowedForObserver, 3, 1) \ - \ - /* Harmony typed arrays */ \ - F(ArrayBufferInitialize, 2, 1)\ - F(ArrayBufferGetByteLength, 1, 1)\ - F(ArrayBufferSliceImpl, 3, 1) \ - F(ArrayBufferIsView, 1, 1) \ - F(ArrayBufferNeuter, 1, 1) \ - \ - F(TypedArrayInitializeFromArrayLike, 4, 1) \ - F(TypedArrayGetBuffer, 1, 1) \ - F(TypedArrayGetByteLength, 1, 1) \ - F(TypedArrayGetByteOffset, 1, 1) \ - F(TypedArrayGetLength, 1, 1) \ - F(TypedArraySetFastCases, 3, 1) \ - \ - F(DataViewGetBuffer, 1, 1) \ - F(DataViewGetByteLength, 1, 1) \ - F(DataViewGetByteOffset, 1, 1) \ - F(DataViewGetInt8, 3, 1) \ - F(DataViewGetUint8, 3, 1) \ - F(DataViewGetInt16, 3, 1) \ - F(DataViewGetUint16, 3, 1) \ - F(DataViewGetInt32, 3, 1) \ - F(DataViewGetUint32, 3, 1) \ - F(DataViewGetFloat32, 3, 1) \ - F(DataViewGetFloat64, 3, 1) \ - \ - F(DataViewSetInt8, 4, 1) \ - F(DataViewSetUint8, 4, 1) \ - F(DataViewSetInt16, 4, 1) \ - F(DataViewSetUint16, 4, 1) \ - F(DataViewSetInt32, 4, 1) \ - F(DataViewSetUint32, 4, 1) \ - F(DataViewSetFloat32, 4, 1) \ - F(DataViewSetFloat64, 4, 1) \ - \ - /* Statements */ \ - F(NewObjectFromBound, 1, 1) \ - \ - /* Declarations and initialization */ \ - F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \ - F(OptimizeObjectForAddingMultipleProperties, 2, 1) \ - \ - /* Debugging */ \ - F(DebugPrint, 1, 1) \ - F(GlobalPrint, 1, 1) \ - F(DebugTrace, 0, 1) \ - F(TraceEnter, 0, 1) \ - F(TraceExit, 1, 1) \ - F(Abort, 1, 1) \ - F(AbortJS, 1, 1) \ - /* ES5 */ \ - F(LocalKeys, 1, 1) \ - \ - /* Message objects */ \ - F(MessageGetStartPosition, 1, 1) \ - F(MessageGetScript, 1, 1) \ - \ - /* Pseudo functions - handled as macros by parser */ \ - F(IS_VAR, 1, 1) \ - \ - /* expose boolean functions from objects-inl.h */ \ - F(HasFastSmiElements, 1, 1) \ - F(HasFastSmiOrObjectElements, 1, 1) \ - F(HasFastObjectElements, 1, 1) \ - F(HasFastDoubleElements, 1, 1) \ - F(HasFastHoleyElements, 1, 1) \ - F(HasDictionaryElements, 1, 1) \ - F(HasSloppyArgumentsElements, 1, 1) \ - F(HasExternalUint8ClampedElements, 1, 1) \ - F(HasExternalArrayElements, 1, 1) \ - F(HasExternalInt8Elements, 1, 1) \ - F(HasExternalUint8Elements, 1, 1) \ - F(HasExternalInt16Elements, 1, 1) \ - F(HasExternalUint16Elements, 1, 1) \ - F(HasExternalInt32Elements, 1, 1) \ - F(HasExternalUint32Elements, 1, 1) \ - F(HasExternalFloat32Elements, 1, 1) \ - F(HasExternalFloat64Elements, 1, 1) \ - F(HasFixedUint8ClampedElements, 1, 1) \ - F(HasFixedInt8Elements, 1, 1) \ - F(HasFixedUint8Elements, 1, 1) \ - F(HasFixedInt16Elements, 1, 1) \ - F(HasFixedUint16Elements, 1, 1) \ - F(HasFixedInt32Elements, 1, 1) \ - F(HasFixedUint32Elements, 1, 1) \ - F(HasFixedFloat32Elements, 1, 1) \ - F(HasFixedFloat64Elements, 1, 1) \ - F(HasFastProperties, 1, 1) \ - F(TransitionElementsKind, 2, 1) \ - F(HaveSameMap, 2, 1) \ - F(IsAccessCheckNeeded, 1, 1) +#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \ + /* Reflection */ \ + F(FunctionSetInstanceClassName, 2, 1) \ + F(FunctionSetLength, 2, 1) \ + F(FunctionSetPrototype, 2, 1) \ + F(FunctionGetName, 1, 1) \ + F(FunctionSetName, 2, 1) \ + F(FunctionNameShouldPrintAsAnonymous, 1, 1) \ + F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \ + F(FunctionIsGenerator, 1, 1) \ + F(FunctionIsArrow, 1, 1) \ + F(FunctionBindArguments, 4, 1) \ + F(BoundFunctionGetBindings, 1, 1) \ + F(FunctionRemovePrototype, 1, 1) \ + F(FunctionGetSourceCode, 1, 1) \ + F(FunctionGetScript, 1, 1) \ + F(FunctionGetScriptSourcePosition, 1, 1) \ + F(FunctionGetPositionForOffset, 2, 1) \ + F(FunctionIsAPIFunction, 1, 1) \ + F(FunctionIsBuiltin, 1, 1) \ + F(GetScript, 1, 1) \ + F(CollectStackTrace, 2, 1) \ + F(GetV8Version, 0, 1) \ + \ + F(SetCode, 2, 1) \ + \ + F(CreateApiFunction, 2, 1) \ + F(IsTemplate, 1, 1) \ + F(GetTemplateField, 2, 1) \ + F(DisableAccessChecks, 1, 1) \ + F(EnableAccessChecks, 1, 1) \ + \ + /* Dates */ \ + F(DateCurrentTime, 0, 1) \ + F(DateParseString, 2, 1) \ + F(DateLocalTimezone, 1, 1) \ + F(DateToUTC, 1, 1) \ + F(DateMakeDay, 2, 1) \ + F(DateSetValue, 3, 1) \ + F(DateCacheVersion, 0, 1) \ + \ + /* Globals */ \ + F(CompileString, 2, 1) \ + \ + /* Eval */ \ + F(GlobalProxy, 1, 1) \ + F(IsAttachedGlobal, 1, 1) \ + \ + F(AddNamedProperty, 4, 1) \ + F(AddPropertyForTemplate, 4, 1) \ + F(SetProperty, 4, 1) \ + F(DefineApiAccessorProperty, 5, 1) \ + F(DefineDataPropertyUnchecked, 4, 1) \ + F(DefineAccessorPropertyUnchecked, 5, 1) \ + F(GetDataProperty, 2, 1) \ + F(SetHiddenProperty, 3, 1) \ + \ + /* Arrays */ \ + F(RemoveArrayHoles, 2, 1) \ + F(GetArrayKeys, 2, 1) \ + F(MoveArrayContents, 2, 1) \ + F(EstimateNumberOfElements, 1, 1) \ + F(NormalizeElements, 1, 1) \ + \ + /* Getters and Setters */ \ + F(LookupAccessor, 3, 1) \ + \ + /* ES5 */ \ + F(ObjectFreeze, 1, 1) \ + \ + /* Harmony modules */ \ + F(IsJSModule, 1, 1) \ + \ + /* Harmony symbols */ \ + F(CreateSymbol, 1, 1) \ + F(CreatePrivateSymbol, 1, 1) \ + F(CreateGlobalPrivateSymbol, 1, 1) \ + F(CreatePrivateOwnSymbol, 1, 1) \ + F(NewSymbolWrapper, 1, 1) \ + F(SymbolDescription, 1, 1) \ + F(SymbolRegistry, 0, 1) \ + F(SymbolIsPrivate, 1, 1) \ + \ + /* Harmony proxies */ \ + F(CreateJSProxy, 2, 1) \ + F(CreateJSFunctionProxy, 4, 1) \ + F(IsJSProxy, 1, 1) \ + F(IsJSFunctionProxy, 1, 1) \ + F(GetHandler, 1, 1) \ + F(GetCallTrap, 1, 1) \ + F(GetConstructTrap, 1, 1) \ + F(Fix, 1, 1) \ + \ + /* Harmony sets */ \ + F(SetInitialize, 1, 1) \ + F(SetAdd, 2, 1) \ + F(SetHas, 2, 1) \ + F(SetDelete, 2, 1) \ + F(SetClear, 1, 1) \ + F(SetGetSize, 1, 1) \ + \ + F(SetIteratorInitialize, 3, 1) \ + F(SetIteratorNext, 2, 1) \ + \ + /* Harmony maps */ \ + F(MapInitialize, 1, 1) \ + F(MapGet, 2, 1) \ + F(MapHas, 2, 1) \ + F(MapDelete, 2, 1) \ + F(MapClear, 1, 1) \ + F(MapSet, 3, 1) \ + F(MapGetSize, 1, 1) \ + \ + F(MapIteratorInitialize, 3, 1) \ + F(MapIteratorNext, 2, 1) \ + \ + /* Harmony weak maps and sets */ \ + F(WeakCollectionInitialize, 1, 1) \ + F(WeakCollectionGet, 2, 1) \ + F(WeakCollectionHas, 2, 1) \ + F(WeakCollectionDelete, 2, 1) \ + F(WeakCollectionSet, 3, 1) \ + \ + F(GetWeakMapEntries, 1, 1) \ + F(GetWeakSetValues, 1, 1) \ + \ + /* Harmony events */ \ + F(EnqueueMicrotask, 1, 1) \ + F(RunMicrotasks, 0, 1) \ + \ + /* Harmony observe */ \ + F(IsObserved, 1, 1) \ + F(SetIsObserved, 1, 1) \ + F(GetObservationState, 0, 1) \ + F(ObservationWeakMapCreate, 0, 1) \ + F(ObserverObjectAndRecordHaveSameOrigin, 3, 1) \ + F(ObjectWasCreatedInCurrentOrigin, 1, 1) \ + F(GetObjectContextObjectObserve, 1, 1) \ + F(GetObjectContextObjectGetNotifier, 1, 1) \ + F(GetObjectContextNotifierPerformChange, 1, 1) \ + \ + /* Harmony typed arrays */ \ + F(ArrayBufferInitialize, 2, 1) \ + F(ArrayBufferSliceImpl, 3, 1) \ + F(ArrayBufferIsView, 1, 1) \ + F(ArrayBufferNeuter, 1, 1) \ + \ + F(TypedArrayInitializeFromArrayLike, 4, 1) \ + F(TypedArrayGetBuffer, 1, 1) \ + F(TypedArraySetFastCases, 3, 1) \ + \ + F(DataViewGetBuffer, 1, 1) \ + F(DataViewGetInt8, 3, 1) \ + F(DataViewGetUint8, 3, 1) \ + F(DataViewGetInt16, 3, 1) \ + F(DataViewGetUint16, 3, 1) \ + F(DataViewGetInt32, 3, 1) \ + F(DataViewGetUint32, 3, 1) \ + F(DataViewGetFloat32, 3, 1) \ + F(DataViewGetFloat64, 3, 1) \ + \ + F(DataViewSetInt8, 4, 1) \ + F(DataViewSetUint8, 4, 1) \ + F(DataViewSetInt16, 4, 1) \ + F(DataViewSetUint16, 4, 1) \ + F(DataViewSetInt32, 4, 1) \ + F(DataViewSetUint32, 4, 1) \ + F(DataViewSetFloat32, 4, 1) \ + F(DataViewSetFloat64, 4, 1) \ + \ + /* Statements */ \ + F(NewObjectFromBound, 1, 1) \ + \ + /* Declarations and initialization */ \ + F(InitializeVarGlobal, 3, 1) \ + F(OptimizeObjectForAddingMultipleProperties, 2, 1) \ + \ + /* Debugging */ \ + F(DebugPrint, 1, 1) \ + F(GlobalPrint, 1, 1) \ + F(DebugTrace, 0, 1) \ + F(TraceEnter, 0, 1) \ + F(TraceExit, 1, 1) \ + F(Abort, 1, 1) \ + F(AbortJS, 1, 1) \ + /* ES5 */ \ + F(OwnKeys, 1, 1) \ + \ + /* Message objects */ \ + F(MessageGetStartPosition, 1, 1) \ + F(MessageGetScript, 1, 1) \ + \ + /* Pseudo functions - handled as macros by parser */ \ + F(IS_VAR, 1, 1) \ + \ + /* expose boolean functions from objects-inl.h */ \ + F(HasFastSmiElements, 1, 1) \ + F(HasFastSmiOrObjectElements, 1, 1) \ + F(HasFastObjectElements, 1, 1) \ + F(HasFastDoubleElements, 1, 1) \ + F(HasFastHoleyElements, 1, 1) \ + F(HasDictionaryElements, 1, 1) \ + F(HasSloppyArgumentsElements, 1, 1) \ + F(HasExternalUint8ClampedElements, 1, 1) \ + F(HasExternalArrayElements, 1, 1) \ + F(HasExternalInt8Elements, 1, 1) \ + F(HasExternalUint8Elements, 1, 1) \ + F(HasExternalInt16Elements, 1, 1) \ + F(HasExternalUint16Elements, 1, 1) \ + F(HasExternalInt32Elements, 1, 1) \ + F(HasExternalUint32Elements, 1, 1) \ + F(HasExternalFloat32Elements, 1, 1) \ + F(HasExternalFloat64Elements, 1, 1) \ + F(HasFixedUint8ClampedElements, 1, 1) \ + F(HasFixedInt8Elements, 1, 1) \ + F(HasFixedUint8Elements, 1, 1) \ + F(HasFixedInt16Elements, 1, 1) \ + F(HasFixedUint16Elements, 1, 1) \ + F(HasFixedInt32Elements, 1, 1) \ + F(HasFixedUint32Elements, 1, 1) \ + F(HasFixedFloat32Elements, 1, 1) \ + F(HasFixedFloat64Elements, 1, 1) \ + F(HasFastProperties, 1, 1) \ + F(TransitionElementsKind, 2, 1) \ + F(HaveSameMap, 2, 1) \ + F(IsJSGlobalProxy, 1, 1) \ + F(ForInInit, 2, 2) /* TODO(turbofan): Only temporary */ \ + F(ForInNext, 4, 2) /* TODO(turbofan): Only temporary */ \ + F(ForInCacheArrayLength, 2, 1) /* TODO(turbofan): Only temporary */ + + +#define RUNTIME_FUNCTION_LIST_ALWAYS_3(F) \ + /* String and Regexp */ \ + F(NumberToStringRT, 1, 1) \ + F(RegExpConstructResult, 3, 1) \ + F(RegExpExecRT, 4, 1) \ + F(StringAdd, 2, 1) \ + F(SubString, 3, 1) \ + F(InternalizeString, 1, 1) \ + F(StringCompare, 2, 1) \ + F(StringCharCodeAtRT, 2, 1) \ + F(GetFromCache, 2, 1) \ + \ + /* Compilation */ \ + F(CompileUnoptimized, 1, 1) \ + F(CompileOptimized, 2, 1) \ + F(TryInstallOptimizedCode, 1, 1) \ + F(NotifyDeoptimized, 1, 1) \ + F(NotifyStubFailure, 0, 1) \ + \ + /* Utilities */ \ + F(AllocateInNewSpace, 1, 1) \ + F(AllocateInTargetSpace, 2, 1) \ + F(AllocateHeapNumber, 0, 1) \ + F(NumberToSmi, 1, 1) \ + F(NumberToStringSkipCache, 1, 1) \ + \ + F(NewArguments, 1, 1) /* TODO(turbofan): Only temporary */ \ + F(NewSloppyArguments, 3, 1) \ + F(NewStrictArguments, 3, 1) \ + \ + /* Harmony generators */ \ + F(CreateJSGeneratorObject, 0, 1) \ + F(SuspendJSGeneratorObject, 1, 1) \ + F(ResumeJSGeneratorObject, 3, 1) \ + F(ThrowGeneratorStateError, 1, 1) \ + \ + /* Arrays */ \ + F(ArrayConstructor, -1, 1) \ + F(InternalArrayConstructor, -1, 1) \ + \ + /* Literals */ \ + F(MaterializeRegExpLiteral, 4, 1) \ + F(CreateObjectLiteral, 4, 1) \ + F(CreateArrayLiteral, 4, 1) \ + F(CreateArrayLiteralStubBailout, 3, 1) \ + \ + /* Statements */ \ + F(NewClosure, 3, 1) \ + F(NewClosureFromStubFailure, 1, 1) \ + F(NewObject, 1, 1) \ + F(NewObjectWithAllocationSite, 2, 1) \ + F(FinalizeInstanceSize, 1, 1) \ + F(Throw, 1, 1) \ + F(ReThrow, 1, 1) \ + F(ThrowReferenceError, 1, 1) \ + F(ThrowNotDateError, 0, 1) \ + F(StackGuard, 0, 1) \ + F(Interrupt, 0, 1) \ + F(PromoteScheduledException, 0, 1) \ + \ + /* Contexts */ \ + F(NewGlobalContext, 2, 1) \ + F(NewFunctionContext, 1, 1) \ + F(PushWithContext, 2, 1) \ + F(PushCatchContext, 3, 1) \ + F(PushBlockContext, 2, 1) \ + F(PushModuleContext, 2, 1) \ + F(DeleteLookupSlot, 2, 1) \ + F(LoadLookupSlot, 2, 2) \ + F(LoadLookupSlotNoReferenceError, 2, 2) \ + F(StoreLookupSlot, 4, 1) \ + \ + /* Declarations and initialization */ \ + F(DeclareGlobals, 3, 1) \ + F(DeclareModules, 1, 1) \ + F(DeclareLookupSlot, 4, 1) \ + F(InitializeConstGlobal, 2, 1) \ + F(InitializeLegacyConstLookupSlot, 3, 1) \ + \ + /* Eval */ \ + F(ResolvePossiblyDirectEval, 5, 2) \ + \ + /* Maths */ \ + F(MathPowSlow, 2, 1) \ + F(MathPowRT, 2, 1) -#ifdef ENABLE_DEBUGGER_SUPPORT -#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \ +#define RUNTIME_FUNCTION_LIST_DEBUGGER(F) \ /* Debugger support*/ \ F(DebugBreak, 0, 1) \ F(SetDebugEventListener, 2, 1) \ @@ -465,6 +542,7 @@ F(DebugConstructedBy, 2, 1) \ F(DebugGetPrototype, 1, 1) \ F(DebugSetScriptSource, 2, 1) \ + F(DebugCallbackSupportsStepping, 1, 1) \ F(SystemBreak, 0, 1) \ F(DebugDisassembleFunction, 1, 1) \ F(DebugDisassembleConstructor, 1, 1) \ @@ -487,10 +565,6 @@ F(CollectGarbage, 1, 1) \ F(GetHeapUsage, 0, 1) \ -#else -#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) -#endif - #ifdef V8_I18N_SUPPORT #define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F) \ @@ -500,6 +574,10 @@ F(AvailableLocalesOf, 1, 1) \ F(GetDefaultICULocale, 0, 1) \ F(GetLanguageTagVariants, 1, 1) \ + F(IsInitializedIntlObject, 1, 1) \ + F(IsInitializedIntlObjectOfType, 2, 1) \ + F(MarkAsInitializedIntlObjectOfType, 3, 1) \ + F(GetImplFromInitializedIntlObject, 1, 1) \ \ /* Date format and parse. */ \ F(CreateDateTimeFormat, 3, 1) \ @@ -548,94 +626,11 @@ #define RUNTIME_FUNCTION_LIST(F) \ RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \ RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \ + RUNTIME_FUNCTION_LIST_ALWAYS_3(F) \ RUNTIME_FUNCTION_LIST_DEBUG(F) \ - RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \ + RUNTIME_FUNCTION_LIST_DEBUGGER(F) \ RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F) -// RUNTIME_HIDDEN_FUNCTION_LIST defines all runtime functions accessed -// by id from code generator, but not via native call by name. -// Entries have the form F(name, number of arguments, number of return values). -#define RUNTIME_HIDDEN_FUNCTION_LIST(F) \ - F(NumberToString, 1, 1) \ - F(RegExpConstructResult, 3, 1) \ - F(RegExpExec, 4, 1) \ - F(StringAdd, 2, 1) \ - F(SubString, 3, 1) \ - F(StringCompare, 2, 1) \ - F(StringCharCodeAt, 2, 1) \ - F(Log, 3, 1) \ - F(GetFromCache, 2, 1) \ - \ - /* Compilation */ \ - F(CompileUnoptimized, 1, 1) \ - F(CompileOptimized, 2, 1) \ - F(TryInstallOptimizedCode, 1, 1) \ - F(NotifyDeoptimized, 1, 1) \ - F(NotifyStubFailure, 0, 1) \ - \ - /* Utilities */ \ - F(AllocateInNewSpace, 1, 1) \ - F(AllocateInTargetSpace, 2, 1) \ - F(AllocateHeapNumber, 0, 1) \ - F(NumberToSmi, 1, 1) \ - F(NumberToStringSkipCache, 1, 1) \ - \ - F(NewArgumentsFast, 3, 1) \ - F(NewStrictArgumentsFast, 3, 1) \ - \ - /* Harmony generators */ \ - F(CreateJSGeneratorObject, 0, 1) \ - F(SuspendJSGeneratorObject, 1, 1) \ - F(ResumeJSGeneratorObject, 3, 1) \ - F(ThrowGeneratorStateError, 1, 1) \ - \ - /* Arrays */ \ - F(ArrayConstructor, -1, 1) \ - F(InternalArrayConstructor, -1, 1) \ - \ - /* Literals */ \ - F(MaterializeRegExpLiteral, 4, 1)\ - F(CreateObjectLiteral, 4, 1) \ - F(CreateArrayLiteral, 4, 1) \ - F(CreateArrayLiteralStubBailout, 3, 1) \ - \ - /* Statements */ \ - F(NewClosure, 3, 1) \ - F(NewClosureFromStubFailure, 1, 1) \ - F(NewObject, 1, 1) \ - F(NewObjectWithAllocationSite, 2, 1) \ - F(FinalizeInstanceSize, 1, 1) \ - F(Throw, 1, 1) \ - F(ReThrow, 1, 1) \ - F(ThrowReferenceError, 1, 1) \ - F(ThrowNotDateError, 0, 1) \ - F(ThrowMessage, 1, 1) \ - F(StackGuard, 0, 1) \ - F(Interrupt, 0, 1) \ - F(PromoteScheduledException, 0, 1) \ - \ - /* Contexts */ \ - F(NewGlobalContext, 2, 1) \ - F(NewFunctionContext, 1, 1) \ - F(PushWithContext, 2, 1) \ - F(PushCatchContext, 3, 1) \ - F(PushBlockContext, 2, 1) \ - F(PushModuleContext, 2, 1) \ - F(DeleteContextSlot, 2, 1) \ - F(LoadContextSlot, 2, 2) \ - F(LoadContextSlotNoReferenceError, 2, 2) \ - F(StoreContextSlot, 4, 1) \ - \ - /* Declarations and initialization */ \ - F(DeclareGlobals, 3, 1) \ - F(DeclareModules, 1, 1) \ - F(DeclareContextSlot, 4, 1) \ - F(InitializeConstGlobal, 2, 1) \ - F(InitializeConstContextSlot, 3, 1) \ - \ - /* Eval */ \ - F(ResolvePossiblyDirectEval, 5, 2) - // ---------------------------------------------------------------------------- // INLINE_FUNCTION_LIST defines all inlined functions accessed // with a native call of the form %_name from within JS code. @@ -663,8 +658,6 @@ F(IsSpecObject, 1, 1) \ F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \ F(MathPow, 2, 1) \ - F(MathSqrt, 1, 1) \ - F(MathLog, 1, 1) \ F(IsMinusZero, 1, 1) \ F(HasCachedArrayIndex, 1, 1) \ F(GetCachedArrayIndex, 1, 1) \ @@ -674,29 +667,40 @@ F(DebugBreakInOptimizedCode, 0, 1) \ F(ClassOf, 1, 1) \ F(StringCharCodeAt, 2, 1) \ - F(Log, 3, 1) \ F(StringAdd, 2, 1) \ F(SubString, 3, 1) \ F(StringCompare, 2, 1) \ F(RegExpExec, 4, 1) \ F(RegExpConstructResult, 3, 1) \ F(GetFromCache, 2, 1) \ - F(NumberToString, 1, 1) + F(NumberToString, 1, 1) \ + F(DebugIsActive, 0, 1) // ---------------------------------------------------------------------------- // INLINE_OPTIMIZED_FUNCTION_LIST defines all inlined functions accessed // with a native call of the form %_name from within JS code that also have // a corresponding runtime function, that is called from non-optimized code. +// For the benefit of (fuzz) tests, the runtime version can also be called +// directly as %name (i.e. without the leading underscore). // Entries have the form F(name, number of arguments, number of return values). #define INLINE_OPTIMIZED_FUNCTION_LIST(F) \ - F(DoubleHi, 1, 1) \ - F(DoubleLo, 1, 1) \ - F(ConstructDouble, 2, 1) \ + /* Typed Arrays */ \ F(TypedArrayInitialize, 5, 1) \ F(DataViewInitialize, 4, 1) \ F(MaxSmi, 0, 1) \ - F(TypedArrayMaxSizeInHeap, 0, 1) + F(TypedArrayMaxSizeInHeap, 0, 1) \ + F(ArrayBufferViewGetByteLength, 1, 1) \ + F(ArrayBufferViewGetByteOffset, 1, 1) \ + F(TypedArrayGetLength, 1, 1) \ + /* ArrayBuffer */ \ + F(ArrayBufferGetByteLength, 1, 1) \ + /* Maths */ \ + F(ConstructDouble, 2, 1) \ + F(DoubleHi, 1, 1) \ + F(DoubleLo, 1, 1) \ + F(MathSqrtRT, 1, 1) \ + F(MathLogRT, 1, 1) //--------------------------------------------------------------------------- @@ -749,9 +753,7 @@ enum FunctionId { #define F(name, nargs, ressize) k##name, RUNTIME_FUNCTION_LIST(F) -#undef F -#define F(name, nargs, ressize) kHidden##name, - RUNTIME_HIDDEN_FUNCTION_LIST(F) + INLINE_OPTIMIZED_FUNCTION_LIST(F) #undef F #define F(name, nargs, ressize) kInline##name, INLINE_FUNCTION_LIST(F) @@ -765,7 +767,6 @@ enum IntrinsicType { RUNTIME, - RUNTIME_HIDDEN, INLINE, INLINE_OPTIMIZED }; @@ -791,11 +792,8 @@ // Add internalized strings for all the intrinsic function names to a // StringDictionary. - // Returns failure if an allocation fails. In this case, it must be - // retried with a new, empty StringDictionary, not with the same one. - // Alternatively, heap initialization can be completely restarted. - MUST_USE_RESULT static MaybeObject* InitializeIntrinsicFunctionNames( - Heap* heap, Object* dictionary); + static void InitializeIntrinsicFunctionNames(Isolate* isolate, + Handle<NameDictionary> dict); // Get the intrinsic function with the given name, which must be internalized. static const Function* FunctionForName(Handle<String> name); @@ -803,6 +801,9 @@ // Get the intrinsic function with the given FunctionId. static const Function* FunctionForId(FunctionId id); + // Get the intrinsic function with the given function entry address. + static const Function* FunctionForEntry(Address ref); + // General-purpose helper functions for runtime system. static int StringMatch(Isolate* isolate, Handle<String> sub, @@ -816,42 +817,35 @@ // Support getting the characters in a string using [] notation as // in Firefox/SpiderMonkey, Safari and Opera. - static Handle<Object> GetElementOrCharAt(Isolate* isolate, - Handle<Object> object, - uint32_t index); - - static Handle<Object> SetObjectProperty( + MUST_USE_RESULT static MaybeHandle<Object> GetElementOrCharAt( Isolate* isolate, Handle<Object> object, - Handle<Object> key, - Handle<Object> value, - PropertyAttributes attr, - StrictMode strict_mode); + uint32_t index); - static Handle<Object> ForceSetObjectProperty( - Isolate* isolate, + MUST_USE_RESULT static MaybeHandle<Object> SetObjectProperty( + Isolate* isolate, Handle<Object> object, Handle<Object> key, + Handle<Object> value, StrictMode strict_mode); + + MUST_USE_RESULT static MaybeHandle<Object> DefineObjectProperty( Handle<JSObject> object, Handle<Object> key, Handle<Object> value, - PropertyAttributes attr); + PropertyAttributes attr, + JSReceiver::StoreFromKeyed store_from_keyed = + JSReceiver::MAY_BE_STORE_FROM_KEYED); - MUST_USE_RESULT static MaybeObject* DeleteObjectProperty( + MUST_USE_RESULT static MaybeHandle<Object> DeleteObjectProperty( Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key, JSReceiver::DeleteMode mode); - MUST_USE_RESULT static MaybeObject* HasObjectProperty( + MUST_USE_RESULT static MaybeHandle<Object> HasObjectProperty( Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key); - MUST_USE_RESULT static MaybeObject* GetObjectProperty( - Isolate* isolate, - Handle<Object> object, - Handle<Object> key); - - MUST_USE_RESULT static MaybeObject* GetObjectPropertyOrFail( + MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty( Isolate* isolate, Handle<Object> object, Handle<Object> key); @@ -884,7 +878,10 @@ ARRAY_ID_INT32 = 6, ARRAY_ID_FLOAT32 = 7, ARRAY_ID_FLOAT64 = 8, - ARRAY_ID_UINT8_CLAMPED = 9 + ARRAY_ID_UINT8_CLAMPED = 9, + + ARRAY_ID_FIRST = ARRAY_ID_UINT8, + ARRAY_ID_LAST = ARRAY_ID_UINT8_CLAMPED }; static void ArrayIdToTypeAndSize(int array_id, @@ -893,12 +890,8 @@ ElementsKind* fixed_elements_kind, size_t *element_size); - // Helper functions used stubs. - static void PerformGC(Object* result, Isolate* isolate); - static void OutOfMemory(); - // Used in runtime.cc and hydrogen's VisitArrayLiteral. - static Handle<Object> CreateArrayLiteralBoilerplate( + MUST_USE_RESULT static MaybeHandle<Object> CreateArrayLiteralBoilerplate( Isolate* isolate, Handle<FixedArray> literals, Handle<FixedArray> elements); diff -Nru nodejs-0.11.13/deps/v8/src/runtime.js nodejs-0.11.15/deps/v8/src/runtime.js --- nodejs-0.11.13/deps/v8/src/runtime.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/runtime.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // This files contains runtime support implemented in JavaScript. @@ -59,6 +36,7 @@ while (true) { if (IS_NUMBER(y)) return %NumberEquals(x, y); if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal + if (IS_SYMBOL(y)) return 1; // not equal if (!IS_SPEC_OBJECT(y)) { // String or boolean. return %NumberEquals(x, %ToNumber(y)); @@ -464,7 +442,7 @@ } -function APPLY_OVERFLOW(length) { +function STACK_OVERFLOW(length) { throw %MakeRangeError('stack_overflow', []); } @@ -524,7 +502,7 @@ } if (IS_BOOLEAN(x)) return x ? 1 : 0; if (IS_UNDEFINED(x)) return NAN; - if (IS_SYMBOL(x)) return NAN; + if (IS_SYMBOL(x)) throw MakeTypeError('symbol_to_number', []); return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x)); } @@ -535,7 +513,7 @@ } if (IS_BOOLEAN(x)) return x ? 1 : 0; if (IS_UNDEFINED(x)) return NAN; - if (IS_SYMBOL(x)) return NAN; + if (IS_SYMBOL(x)) throw MakeTypeError('symbol_to_number', []); return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x)); } @@ -630,35 +608,37 @@ // ECMA-262, section 8.6.2.6, page 28. function DefaultNumber(x) { - var valueOf = x.valueOf; - if (IS_SPEC_FUNCTION(valueOf)) { - var v = %_CallFunction(x, valueOf); - if (%IsPrimitive(v)) return v; - } + if (!IS_SYMBOL_WRAPPER(x)) { + var valueOf = x.valueOf; + if (IS_SPEC_FUNCTION(valueOf)) { + var v = %_CallFunction(x, valueOf); + if (%IsPrimitive(v)) return v; + } - var toString = x.toString; - if (IS_SPEC_FUNCTION(toString)) { - var s = %_CallFunction(x, toString); - if (%IsPrimitive(s)) return s; + var toString = x.toString; + if (IS_SPEC_FUNCTION(toString)) { + var s = %_CallFunction(x, toString); + if (%IsPrimitive(s)) return s; + } } - throw %MakeTypeError('cannot_convert_to_primitive', []); } // ECMA-262, section 8.6.2.6, page 28. function DefaultString(x) { - var toString = x.toString; - if (IS_SPEC_FUNCTION(toString)) { - var s = %_CallFunction(x, toString); - if (%IsPrimitive(s)) return s; - } + if (!IS_SYMBOL_WRAPPER(x)) { + var toString = x.toString; + if (IS_SPEC_FUNCTION(toString)) { + var s = %_CallFunction(x, toString); + if (%IsPrimitive(s)) return s; + } - var valueOf = x.valueOf; - if (IS_SPEC_FUNCTION(valueOf)) { - var v = %_CallFunction(x, valueOf); - if (%IsPrimitive(v)) return v; + var valueOf = x.valueOf; + if (IS_SPEC_FUNCTION(valueOf)) { + var v = %_CallFunction(x, valueOf); + if (%IsPrimitive(v)) return v; + } } - throw %MakeTypeError('cannot_convert_to_primitive', []); } diff -Nru nodejs-0.11.13/deps/v8/src/runtime-profiler.cc nodejs-0.11.15/deps/v8/src/runtime-profiler.cc --- nodejs-0.11.13/deps/v8/src/runtime-profiler.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/runtime-profiler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,45 +1,22 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "runtime-profiler.h" - -#include "assembler.h" -#include "bootstrapper.h" -#include "code-stubs.h" -#include "compilation-cache.h" -#include "execution.h" -#include "full-codegen.h" -#include "global-handles.h" -#include "isolate-inl.h" -#include "mark-compact.h" -#include "platform.h" -#include "scopeinfo.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/runtime-profiler.h" + +#include "src/assembler.h" +#include "src/base/platform/platform.h" +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/compilation-cache.h" +#include "src/execution.h" +#include "src/full-codegen.h" +#include "src/global-handles.h" +#include "src/heap/mark-compact.h" +#include "src/isolate-inl.h" +#include "src/scopeinfo.h" namespace v8 { namespace internal { @@ -80,35 +57,43 @@ } -static void GetICCounts(Code* shared_code, - int* ic_with_type_info_count, - int* ic_total_count, - int* percentage) { +static void GetICCounts(Code* shared_code, int* ic_with_type_info_count, + int* ic_generic_count, int* ic_total_count, + int* type_info_percentage, int* generic_percentage) { *ic_total_count = 0; + *ic_generic_count = 0; *ic_with_type_info_count = 0; Object* raw_info = shared_code->type_feedback_info(); if (raw_info->IsTypeFeedbackInfo()) { TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info); *ic_with_type_info_count = info->ic_with_type_info_count(); + *ic_generic_count = info->ic_generic_count(); *ic_total_count = info->ic_total_count(); } - *percentage = *ic_total_count > 0 - ? 100 * *ic_with_type_info_count / *ic_total_count - : 100; + if (*ic_total_count > 0) { + *type_info_percentage = 100 * *ic_with_type_info_count / *ic_total_count; + *generic_percentage = 100 * *ic_generic_count / *ic_total_count; + } else { + *type_info_percentage = 100; // Compared against lower bound. + *generic_percentage = 0; // Compared against upper bound. + } } void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) { - ASSERT(function->IsOptimizable()); + DCHECK(function->IsOptimizable()); if (FLAG_trace_opt && function->PassesFilter(FLAG_hydrogen_filter)) { PrintF("[marking "); function->ShortPrint(); PrintF(" for recompilation, reason: %s", reason); if (FLAG_type_info_threshold > 0) { - int typeinfo, total, percentage; - GetICCounts(function->shared()->code(), &typeinfo, &total, &percentage); - PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage); + int typeinfo, generic, total, type_percentage, generic_percentage; + GetICCounts(function->shared()->code(), &typeinfo, &generic, &total, + &type_percentage, &generic_percentage); + PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, + type_percentage); + PrintF(", generic ICs: %d/%d (%d%%)", generic, total, generic_percentage); } PrintF("]\n"); } @@ -124,7 +109,7 @@ // recompilation race. This goes away as soon as OSR becomes one-shot. return; } - ASSERT(!function->IsInOptimizationQueue()); + DCHECK(!function->IsInOptimizationQueue()); function->MarkForConcurrentOptimization(); } else { // The next call to the function will trigger optimization. @@ -133,7 +118,9 @@ } -void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) { +void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function, + int loop_nesting_levels) { + SharedFunctionInfo* shared = function->shared(); // See AlwaysFullCompiler (in compiler.cc) comment on why we need // Debug::has_break_points(). if (!FLAG_use_osr || @@ -142,7 +129,6 @@ return; } - SharedFunctionInfo* shared = function->shared(); // If the code is not optimizable, don't try OSR. if (!shared->code()->optimizable()) return; @@ -160,7 +146,9 @@ PrintF("]\n"); } - BackEdgeTable::Patch(isolate_, shared->code()); + for (int i = 0; i < loop_nesting_levels; i++) { + BackEdgeTable::Patch(isolate_, shared->code()); + } } @@ -185,17 +173,21 @@ SharedFunctionInfo* shared = function->shared(); Code* shared_code = shared->code(); + List<JSFunction*> functions(4); + frame->GetFunctions(&functions); + for (int i = functions.length(); --i >= 0; ) { + SharedFunctionInfo* shared_function_info = functions[i]->shared(); + int ticks = shared_function_info->profiler_ticks(); + if (ticks < Smi::kMaxValue) { + shared_function_info->set_profiler_ticks(ticks + 1); + } + } + if (shared_code->kind() != Code::FUNCTION) continue; if (function->IsInOptimizationQueue()) continue; - if (FLAG_always_osr && - shared_code->allow_osr_at_loop_nesting_level() == 0) { - // Testing mode: always try an OSR compile for every function. - for (int i = 0; i < Code::kMaxLoopNestingMarker; i++) { - // TODO(titzer): fix AttemptOnStackReplacement to avoid this dumb loop. - shared_code->set_allow_osr_at_loop_nesting_level(i); - AttemptOnStackReplacement(function); - } + if (FLAG_always_osr) { + AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker); // Fall through and do a normal optimized compile as well. } else if (!frame->is_optimized() && (function->IsMarkedForOptimization() || @@ -209,12 +201,7 @@ if (shared_code->CodeSize() > allowance) { if (ticks < 255) shared_code->set_profiler_ticks(ticks + 1); } else { - int nesting = shared_code->allow_osr_at_loop_nesting_level(); - if (nesting < Code::kMaxLoopNestingMarker) { - int new_nesting = nesting + 1; - shared_code->set_allow_osr_at_loop_nesting_level(new_nesting); - AttemptOnStackReplacement(function); - } + AttemptOnStackReplacement(function); } continue; } @@ -248,9 +235,11 @@ int ticks = shared_code->profiler_ticks(); if (ticks >= kProfilerTicksBeforeOptimization) { - int typeinfo, total, percentage; - GetICCounts(shared_code, &typeinfo, &total, &percentage); - if (percentage >= FLAG_type_info_threshold) { + int typeinfo, generic, total, type_percentage, generic_percentage; + GetICCounts(shared_code, &typeinfo, &generic, &total, &type_percentage, + &generic_percentage); + if (type_percentage >= FLAG_type_info_threshold && + generic_percentage <= FLAG_generic_ic_threshold) { // If this particular function hasn't had any ICs patched for enough // ticks, optimize it now. Optimize(function, "hot and stable"); @@ -261,15 +250,23 @@ if (FLAG_trace_opt_verbose) { PrintF("[not yet optimizing "); function->PrintName(); - PrintF(", not enough type info: %d/%d (%d%%)]\n", - typeinfo, total, percentage); + PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total, + type_percentage); } } } else if (!any_ic_changed_ && shared_code->instruction_size() < kMaxSizeEarlyOpt) { // If no IC was patched since the last tick and this function is very // small, optimistically optimize it now. - Optimize(function, "small function"); + int typeinfo, generic, total, type_percentage, generic_percentage; + GetICCounts(shared_code, &typeinfo, &generic, &total, &type_percentage, + &generic_percentage); + if (type_percentage >= FLAG_type_info_threshold && + generic_percentage <= FLAG_generic_ic_threshold) { + Optimize(function, "small function"); + } else { + shared_code->set_profiler_ticks(ticks + 1); + } } else { shared_code->set_profiler_ticks(ticks + 1); } diff -Nru nodejs-0.11.13/deps/v8/src/runtime-profiler.h nodejs-0.11.15/deps/v8/src/runtime-profiler.h --- nodejs-0.11.13/deps/v8/src/runtime-profiler.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/runtime-profiler.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,23 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_RUNTIME_PROFILER_H_ #define V8_RUNTIME_PROFILER_H_ -#include "allocation.h" -#include "atomicops.h" +#include "src/allocation.h" namespace v8 { + +namespace base { +class Semaphore; +} + namespace internal { class Isolate; class JSFunction; class Object; -class Semaphore; class RuntimeProfiler { public: @@ -47,7 +27,7 @@ void NotifyICChanged() { any_ic_changed_ = true; } - void AttemptOnStackReplacement(JSFunction* function); + void AttemptOnStackReplacement(JSFunction* function, int nesting_levels = 1); private: void Optimize(JSFunction* function, const char* reason); diff -Nru nodejs-0.11.13/deps/v8/src/safepoint-table.cc nodejs-0.11.15/deps/v8/src/safepoint-table.cc --- nodejs-0.11.13/deps/v8/src/safepoint-table.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/safepoint-table.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,24 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "safepoint-table.h" - -#include "deoptimizer.h" -#include "disasm.h" -#include "macro-assembler.h" -#include "zone-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/safepoint-table.h" + +#include "src/deoptimizer.h" +#include "src/disasm.h" +#include "src/macro-assembler.h" +#include "src/ostreams.h" +#include "src/zone-inl.h" namespace v8 { namespace internal { bool SafepointEntry::HasRegisters() const { - ASSERT(is_valid()); - ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte)); + DCHECK(is_valid()); + DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte)); const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2; for (int i = 0; i < num_reg_bytes; i++) { if (bits_[i] != SafepointTable::kNoRegisters) return true; @@ -50,8 +28,8 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const { - ASSERT(is_valid()); - ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters); + DCHECK(is_valid()); + DCHECK(reg_index >= 0 && reg_index < kNumSafepointRegisters); int byte_index = reg_index >> kBitsPerByteLog2; int bit_index = reg_index & (kBitsPerByte - 1); return (bits_[byte_index] & (1 << bit_index)) != 0; @@ -59,7 +37,7 @@ SafepointTable::SafepointTable(Code* code) { - ASSERT(code->is_crankshafted()); + DCHECK(code->is_crankshafted()); code_ = code; Address header = code->instruction_start() + code->safepoint_table_offset(); length_ = Memory::uint32_at(header + kLengthOffset); @@ -67,7 +45,7 @@ pc_and_deoptimization_indexes_ = header + kHeaderSize; entries_ = pc_and_deoptimization_indexes_ + (length_ * kPcAndDeoptimizationIndexSize); - ASSERT(entry_size_ > 0); + DCHECK(entry_size_ > 0); STATIC_ASSERT(SafepointEntry::DeoptimizationIndexField::kMax == Safepoint::kNoDeoptimizationIndex); } @@ -83,35 +61,36 @@ } -void SafepointTable::PrintEntry(unsigned index, FILE* out) const { +void SafepointTable::PrintEntry(unsigned index, OStream& os) const { // NOLINT disasm::NameConverter converter; SafepointEntry entry = GetEntry(index); uint8_t* bits = entry.bits(); // Print the stack slot bits. if (entry_size_ > 0) { - ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte)); + DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte)); const int first = kNumSafepointRegisters >> kBitsPerByteLog2; int last = entry_size_ - 1; - for (int i = first; i < last; i++) PrintBits(out, bits[i], kBitsPerByte); + for (int i = first; i < last; i++) PrintBits(os, bits[i], kBitsPerByte); int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte); - PrintBits(out, bits[last], last_bits); + PrintBits(os, bits[last], last_bits); // Print the registers (if any). if (!entry.HasRegisters()) return; for (int j = 0; j < kNumSafepointRegisters; j++) { if (entry.HasRegisterAt(j)) { - PrintF(out, " | %s", converter.NameOfCPURegister(j)); + os << " | " << converter.NameOfCPURegister(j); } } } } -void SafepointTable::PrintBits(FILE* out, uint8_t byte, int digits) { - ASSERT(digits >= 0 && digits <= kBitsPerByte); +void SafepointTable::PrintBits(OStream& os, // NOLINT + uint8_t byte, int digits) { + DCHECK(digits >= 0 && digits <= kBitsPerByte); for (int i = 0; i < digits; i++) { - PrintF(out, "%c", ((byte & (1 << i)) == 0) ? '0' : '1'); + os << (((byte & (1 << i)) == 0) ? "0" : "1"); } } @@ -126,7 +105,7 @@ Safepoint::Kind kind, int arguments, Safepoint::DeoptMode deopt_mode) { - ASSERT(arguments >= 0); + DCHECK(arguments >= 0); DeoptimizationInfo info; info.pc = assembler->pc_offset(); info.arguments = arguments; @@ -152,7 +131,7 @@ } unsigned SafepointTableBuilder::GetCodeOffset() const { - ASSERT(emitted_); + DCHECK(emitted_); return offset_; } @@ -191,7 +170,7 @@ bits.AddBlock(0, bytes_per_entry, zone_); // Run through the registers (if any). - ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte)); + DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte)); if (registers == NULL) { const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2; for (int j = 0; j < num_reg_bytes; j++) { @@ -200,7 +179,7 @@ } else { for (int j = 0; j < registers->length(); j++) { int index = registers->at(j); - ASSERT(index >= 0 && index < kNumSafepointRegisters); + DCHECK(index >= 0 && index < kNumSafepointRegisters); int byte_index = index >> kBitsPerByteLog2; int bit_index = index & (kBitsPerByte - 1); bits[byte_index] |= (1 << bit_index); diff -Nru nodejs-0.11.13/deps/v8/src/safepoint-table.h nodejs-0.11.15/deps/v8/src/safepoint-table.h --- nodejs-0.11.13/deps/v8/src/safepoint-table.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/safepoint-table.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SAFEPOINT_TABLE_H_ #define V8_SAFEPOINT_TABLE_H_ -#include "allocation.h" -#include "heap.h" -#include "v8memory.h" -#include "zone.h" +#include "src/allocation.h" +#include "src/heap/heap.h" +#include "src/v8memory.h" +#include "src/zone.h" namespace v8 { namespace internal { @@ -43,7 +20,7 @@ SafepointEntry() : info_(0), bits_(NULL) {} SafepointEntry(unsigned info, uint8_t* bits) : info_(info), bits_(bits) { - ASSERT(is_valid()); + DCHECK(is_valid()); } bool is_valid() const { return bits_ != NULL; } @@ -58,7 +35,7 @@ } int deoptimization_index() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return DeoptimizationIndexField::decode(info_); } @@ -78,17 +55,17 @@ kSaveDoublesFieldBits> { }; // NOLINT int argument_count() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return ArgumentsField::decode(info_); } bool has_doubles() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return SaveDoublesField::decode(info_); } uint8_t* bits() { - ASSERT(is_valid()); + DCHECK(is_valid()); return bits_; } @@ -112,12 +89,12 @@ unsigned entry_size() const { return entry_size_; } unsigned GetPcOffset(unsigned index) const { - ASSERT(index < length_); + DCHECK(index < length_); return Memory::uint32_at(GetPcOffsetLocation(index)); } SafepointEntry GetEntry(unsigned index) const { - ASSERT(index < length_); + DCHECK(index < length_); unsigned info = Memory::uint32_at(GetInfoLocation(index)); uint8_t* bits = &Memory::uint8_at(entries_ + (index * entry_size_)); return SafepointEntry(info, bits); @@ -126,7 +103,7 @@ // Returns the entry for the given pc. SafepointEntry FindEntry(Address pc) const; - void PrintEntry(unsigned index, FILE* out = stdout) const; + void PrintEntry(unsigned index, OStream& os) const; // NOLINT private: static const uint8_t kNoRegisters = 0xFF; @@ -149,7 +126,8 @@ return GetPcOffsetLocation(index) + kPcSize; } - static void PrintBits(FILE* out, uint8_t byte, int digits); + static void PrintBits(OStream& os, // NOLINT + uint8_t byte, int digits); DisallowHeapAllocation no_allocation_; Code* code_; diff -Nru nodejs-0.11.13/deps/v8/src/sampler.cc nodejs-0.11.15/deps/v8/src/sampler.cc --- nodejs-0.11.13/deps/v8/src/sampler.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/sampler.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "sampler.h" +#include "src/sampler.h" #if V8_OS_POSIX && !V8_OS_CYGWIN @@ -37,7 +14,7 @@ #include <sys/time.h> #if !V8_OS_QNX -#include <sys/syscall.h> +#include <sys/syscall.h> // NOLINT #endif #if V8_OS_MACOSX @@ -56,25 +33,25 @@ #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \ (defined(__arm__) || defined(__aarch64__)) && \ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT) -#include <asm/sigcontext.h> +#include <asm/sigcontext.h> // NOLINT #endif #elif V8_OS_WIN || V8_OS_CYGWIN -#include "win32-headers.h" +#include "src/base/win32-headers.h" #endif -#include "v8.h" +#include "src/v8.h" -#include "cpu-profiler-inl.h" -#include "flags.h" -#include "frames-inl.h" -#include "log.h" -#include "platform.h" -#include "simulator.h" -#include "v8threads.h" -#include "vm-state-inl.h" +#include "src/base/platform/platform.h" +#include "src/cpu-profiler-inl.h" +#include "src/flags.h" +#include "src/frames-inl.h" +#include "src/log.h" +#include "src/simulator.h" +#include "src/v8threads.h" +#include "src/vm-state-inl.h" #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) @@ -279,6 +256,12 @@ Simulator::sp)); state->fp = reinterpret_cast<Address>(simulator_->get_register( Simulator::fp)); +#elif V8_TARGET_ARCH_MIPS64 + state->pc = reinterpret_cast<Address>(simulator_->get_pc()); + state->sp = reinterpret_cast<Address>(simulator_->get_register( + Simulator::sp)); + state->fp = reinterpret_cast<Address>(simulator_->get_register( + Simulator::fp)); #endif } @@ -292,16 +275,16 @@ class SignalHandler : public AllStatic { public: - static void SetUp() { if (!mutex_) mutex_ = new Mutex(); } + static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } static void TearDown() { delete mutex_; } static void IncreaseSamplerCount() { - LockGuard<Mutex> lock_guard(mutex_); + base::LockGuard<base::Mutex> lock_guard(mutex_); if (++client_count_ == 1) Install(); } static void DecreaseSamplerCount() { - LockGuard<Mutex> lock_guard(mutex_); + base::LockGuard<base::Mutex> lock_guard(mutex_); if (--client_count_ == 0) Restore(); } @@ -332,14 +315,14 @@ static void HandleProfilerSignal(int signal, siginfo_t* info, void* context); // Protects the process wide state below. - static Mutex* mutex_; + static base::Mutex* mutex_; static int client_count_; static bool signal_handler_installed_; static struct sigaction old_signal_handler_; }; -Mutex* SignalHandler::mutex_ = NULL; +base::Mutex* SignalHandler::mutex_ = NULL; int SignalHandler::client_count_ = 0; struct sigaction SignalHandler::old_signal_handler_; bool SignalHandler::signal_handler_installed_ = false; @@ -354,7 +337,7 @@ #else USE(info); if (signal != SIGPROF) return; - Isolate* isolate = Isolate::UncheckedCurrent(); + Isolate* isolate = Isolate::UnsafeCurrent(); if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) { // We require a fully initialized and entered isolate. return; @@ -416,6 +399,10 @@ state.pc = reinterpret_cast<Address>(mcontext.pc); state.sp = reinterpret_cast<Address>(mcontext.gregs[29]); state.fp = reinterpret_cast<Address>(mcontext.gregs[30]); +#elif V8_HOST_ARCH_MIPS64 + state.pc = reinterpret_cast<Address>(mcontext.pc); + state.sp = reinterpret_cast<Address>(mcontext.gregs[29]); + state.fp = reinterpret_cast<Address>(mcontext.gregs[30]); #endif // V8_HOST_ARCH_* #elif V8_OS_MACOSX #if V8_HOST_ARCH_X64 @@ -496,20 +483,20 @@ #endif -class SamplerThread : public Thread { +class SamplerThread : public base::Thread { public: static const int kSamplerThreadStackSize = 64 * KB; explicit SamplerThread(int interval) - : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)), + : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)), interval_(interval) {} - static void SetUp() { if (!mutex_) mutex_ = new Mutex(); } + static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } static void TearDown() { delete mutex_; mutex_ = NULL; } static void AddActiveSampler(Sampler* sampler) { bool need_to_start = false; - LockGuard<Mutex> lock_guard(mutex_); + base::LockGuard<base::Mutex> lock_guard(mutex_); if (instance_ == NULL) { // Start a thread that will send SIGPROF signal to VM threads, // when CPU profiling will be enabled. @@ -517,9 +504,9 @@ need_to_start = true; } - ASSERT(sampler->IsActive()); - ASSERT(!instance_->active_samplers_.Contains(sampler)); - ASSERT(instance_->interval_ == sampler->interval()); + DCHECK(sampler->IsActive()); + DCHECK(!instance_->active_samplers_.Contains(sampler)); + DCHECK(instance_->interval_ == sampler->interval()); instance_->active_samplers_.Add(sampler); if (need_to_start) instance_->StartSynchronously(); @@ -528,11 +515,11 @@ static void RemoveActiveSampler(Sampler* sampler) { SamplerThread* instance_to_remove = NULL; { - LockGuard<Mutex> lock_guard(mutex_); + base::LockGuard<base::Mutex> lock_guard(mutex_); - ASSERT(sampler->IsActive()); + DCHECK(sampler->IsActive()); bool removed = instance_->active_samplers_.RemoveElement(sampler); - ASSERT(removed); + DCHECK(removed); USE(removed); // We cannot delete the instance immediately as we need to Join() the @@ -552,7 +539,7 @@ virtual void Run() { while (true) { { - LockGuard<Mutex> lock_guard(mutex_); + base::LockGuard<base::Mutex> lock_guard(mutex_); if (active_samplers_.is_empty()) break; // When CPU profiling is enabled both JavaScript and C++ code is // profiled. We must not suspend. @@ -563,13 +550,13 @@ sampler->DoSample(); } } - OS::Sleep(interval_); + base::OS::Sleep(interval_); } } private: // Protects the process wide state below. - static Mutex* mutex_; + static base::Mutex* mutex_; static SamplerThread* instance_; const int interval_; @@ -579,7 +566,7 @@ }; -Mutex* SamplerThread::mutex_ = NULL; +base::Mutex* SamplerThread::mutex_ = NULL; SamplerThread* SamplerThread::instance_ = NULL; @@ -588,7 +575,8 @@ // DISABLE_ASAN void TickSample::Init(Isolate* isolate, const RegisterState& regs) { - ASSERT(isolate->IsInitialized()); + DCHECK(isolate->IsInitialized()); + timestamp = base::TimeTicks::HighResolutionNow(); pc = regs.pc; state = isolate->current_vm_state(); @@ -618,7 +606,7 @@ SafeStackFrameIterator it(isolate, regs.fp, regs.sp, js_entry_sp); top_frame_type = it.top_frame_type(); - int i = 0; + unsigned i = 0; while (!it.done() && i < TickSample::kMaxFramesCount) { stack[i++] = it.frame()->pc(); it.Advance(); @@ -656,27 +644,27 @@ Sampler::~Sampler() { - ASSERT(!IsActive()); + DCHECK(!IsActive()); delete data_; } void Sampler::Start() { - ASSERT(!IsActive()); + DCHECK(!IsActive()); SetActive(true); SamplerThread::AddActiveSampler(this); } void Sampler::Stop() { - ASSERT(IsActive()); + DCHECK(IsActive()); SamplerThread::RemoveActiveSampler(this); SetActive(false); } void Sampler::IncreaseProfilingDepth() { - NoBarrier_AtomicIncrement(&profiling_, 1); + base::NoBarrier_AtomicIncrement(&profiling_, 1); #if defined(USE_SIGNALS) SignalHandler::IncreaseSamplerCount(); #endif @@ -687,7 +675,7 @@ #if defined(USE_SIGNALS) SignalHandler::DecreaseSamplerCount(); #endif - NoBarrier_AtomicIncrement(&profiling_, -1); + base::NoBarrier_AtomicIncrement(&profiling_, -1); } diff -Nru nodejs-0.11.13/deps/v8/src/sampler.h nodejs-0.11.15/deps/v8/src/sampler.h --- nodejs-0.11.13/deps/v8/src/sampler.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/sampler.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SAMPLER_H_ #define V8_SAMPLER_H_ -#include "atomicops.h" -#include "frames.h" -#include "v8globals.h" +#include "src/base/atomicops.h" +#include "src/frames.h" +#include "src/globals.h" namespace v8 { namespace internal { @@ -67,9 +44,11 @@ Address tos; // Top stack value (*sp). Address external_callback; }; - static const int kMaxFramesCount = 64; + static const unsigned kMaxFramesCountLog2 = 8; + static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1; Address stack[kMaxFramesCount]; // Call stack. - int frames_count : 8; // Number of captured frames. + base::TimeTicks timestamp; + unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames. bool has_external_callback : 1; StackFrame::Type top_frame_type : 4; }; @@ -96,20 +75,20 @@ // Whether the sampling thread should use this Sampler for CPU profiling? bool IsProfiling() const { - return NoBarrier_Load(&profiling_) > 0 && - !NoBarrier_Load(&has_processing_thread_); + return base::NoBarrier_Load(&profiling_) > 0 && + !base::NoBarrier_Load(&has_processing_thread_); } void IncreaseProfilingDepth(); void DecreaseProfilingDepth(); // Whether the sampler is running (that is, consumes resources). - bool IsActive() const { return NoBarrier_Load(&active_); } + bool IsActive() const { return base::NoBarrier_Load(&active_); } void DoSample(); // If true next sample must be initiated on the profiler event processor // thread right after latest sample is processed. void SetHasProcessingThread(bool value) { - NoBarrier_Store(&has_processing_thread_, value); + base::NoBarrier_Store(&has_processing_thread_, value); } // Used in tests to make sure that stack sampling is performed. @@ -130,13 +109,13 @@ virtual void Tick(TickSample* sample) = 0; private: - void SetActive(bool value) { NoBarrier_Store(&active_, value); } + void SetActive(bool value) { base::NoBarrier_Store(&active_, value); } Isolate* isolate_; const int interval_; - Atomic32 profiling_; - Atomic32 has_processing_thread_; - Atomic32 active_; + base::Atomic32 profiling_; + base::Atomic32 has_processing_thread_; + base::Atomic32 active_; PlatformData* data_; // Platform specific data. bool is_counting_samples_; // Counts stack samples taken in JS VM state. diff -Nru nodejs-0.11.13/deps/v8/src/scanner.cc nodejs-0.11.15/deps/v8/src/scanner.cc --- nodejs-0.11.13/deps/v8/src/scanner.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/scanner.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,33 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Features shared by parsing and pre-parsing scanners. #include <cmath> -#include "scanner.h" +#include "src/v8.h" -#include "../include/v8stdint.h" -#include "char-predicates-inl.h" -#include "conversions-inl.h" -#include "list-inl.h" -#include "v8.h" -#include "parser.h" +#include "include/v8stdint.h" +#include "src/ast-value-factory.h" +#include "src/char-predicates-inl.h" +#include "src/conversions-inl.h" +#include "src/list-inl.h" +#include "src/parser.h" +#include "src/scanner.h" namespace v8 { namespace internal { + +Handle<String> LiteralBuffer::Internalize(Isolate* isolate) const { + if (is_one_byte()) { + return isolate->factory()->InternalizeOneByteString(one_byte_literal()); + } + return isolate->factory()->InternalizeTwoByteString(two_byte_literal()); +} + + // ---------------------------------------------------------------------------- // Scanner @@ -66,7 +53,7 @@ uc32 Scanner::ScanHexNumber(int expected_length) { - ASSERT(expected_length <= 4); // prevent overflow + DCHECK(expected_length <= 4); // prevent overflow uc32 digits[4] = { 0, 0, 0, 0 }; uc32 x = 0; @@ -317,8 +304,70 @@ } +Token::Value Scanner::SkipSourceURLComment() { + TryToParseSourceURLComment(); + while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) { + Advance(); + } + + return Token::WHITESPACE; +} + + +void Scanner::TryToParseSourceURLComment() { + // Magic comments are of the form: //[#@]\s<name>=\s*<value>\s*.* and this + // function will just return if it cannot parse a magic comment. + if (!unicode_cache_->IsWhiteSpace(c0_)) + return; + Advance(); + LiteralBuffer name; + while (c0_ >= 0 && !unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_) && + c0_ != '=') { + name.AddChar(c0_); + Advance(); + } + if (!name.is_one_byte()) return; + Vector<const uint8_t> name_literal = name.one_byte_literal(); + LiteralBuffer* value; + if (name_literal == STATIC_ASCII_VECTOR("sourceURL")) { + value = &source_url_; + } else if (name_literal == STATIC_ASCII_VECTOR("sourceMappingURL")) { + value = &source_mapping_url_; + } else { + return; + } + if (c0_ != '=') + return; + Advance(); + value->Reset(); + while (c0_ >= 0 && unicode_cache_->IsWhiteSpace(c0_)) { + Advance(); + } + while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) { + // Disallowed characters. + if (c0_ == '"' || c0_ == '\'') { + value->Reset(); + return; + } + if (unicode_cache_->IsWhiteSpace(c0_)) { + break; + } + value->AddChar(c0_); + Advance(); + } + // Allow whitespace at the end. + while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) { + if (!unicode_cache_->IsWhiteSpace(c0_)) { + value->Reset(); + break; + } + Advance(); + } +} + + Token::Value Scanner::SkipMultiLineComment() { - ASSERT(c0_ == '*'); + DCHECK(c0_ == '*'); Advance(); while (c0_ >= 0) { @@ -345,7 +394,7 @@ Token::Value Scanner::ScanHtmlComment() { // Check for <!-- comments. - ASSERT(c0_ == '!'); + DCHECK(c0_ == '!'); Advance(); if (c0_ == '-') { Advance(); @@ -353,7 +402,7 @@ PushBack('-'); // undo Advance() } PushBack('!'); // undo Advance() - ASSERT(c0_ == '!'); + DCHECK(c0_ == '!'); return Token::LT; } @@ -417,10 +466,12 @@ break; case '=': - // = == === + // = == === => Advance(); if (c0_ == '=') { token = Select('=', Token::EQ_STRICT, Token::EQ); + } else if (c0_ == '>') { + token = Select(Token::ARROW); } else { token = Token::ASSIGN; } @@ -481,7 +532,14 @@ // / // /* /= Advance(); if (c0_ == '/') { - token = SkipSingleLineComment(); + Advance(); + if (c0_ == '@' || c0_ == '#') { + Advance(); + token = SkipSourceURLComment(); + } else { + PushBack(c0_); + token = SkipSingleLineComment(); + } } else if (c0_ == '*') { token = SkipMultiLineComment(); } else if (c0_ == '=') { @@ -603,9 +661,9 @@ // the "next" token. The "current" token will be invalid. if (pos == next_.location.beg_pos) return; int current_pos = source_pos(); - ASSERT_EQ(next_.location.end_pos, current_pos); + DCHECK_EQ(next_.location.end_pos, current_pos); // Positions inside the lookahead token aren't supported. - ASSERT(pos >= current_pos); + DCHECK(pos >= current_pos); if (pos != current_pos) { source_->SeekForward(pos - source_->pos()); Advance(); @@ -725,7 +783,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) { - ASSERT(IsDecimalDigit(c0_)); // the first digit of the number or the fraction + DCHECK(IsDecimalDigit(c0_)); // the first digit of the number or the fraction enum { DECIMAL, HEX, OCTAL, IMPLICIT_OCTAL, BINARY } kind = DECIMAL; @@ -804,7 +862,7 @@ // scan exponent, if any if (c0_ == 'e' || c0_ == 'E') { - ASSERT(kind != HEX); // 'e'/'E' must be scanned as part of the hex number + DCHECK(kind != HEX); // 'e'/'E' must be scanned as part of the hex number if (kind != DECIMAL) return Token::ILLEGAL; // scan exponent AddLiteralCharAdvance(); @@ -913,7 +971,7 @@ int input_length, bool harmony_scoping, bool harmony_modules) { - ASSERT(input_length >= 1); + DCHECK(input_length >= 1); const int kMinLength = 2; const int kMaxLength = 10; if (input_length < kMinLength || input_length > kMaxLength) { @@ -950,8 +1008,18 @@ } +bool Scanner::IdentifierIsFutureStrictReserved( + const AstRawString* string) const { + // Keywords are always 1-byte strings. + return string->is_one_byte() && + Token::FUTURE_STRICT_RESERVED_WORD == + KeywordOrIdentifierToken(string->raw_data(), string->length(), + harmony_scoping_, harmony_modules_); +} + + Token::Value Scanner::ScanIdentifierOrKeyword() { - ASSERT(unicode_cache_->IsIdentifierStart(c0_)); + DCHECK(unicode_cache_->IsIdentifierStart(c0_)); LiteralScope literal(this); // Scan identifier start character. if (c0_ == '\\') { @@ -1067,7 +1135,7 @@ bool Scanner::ScanLiteralUnicodeEscape() { - ASSERT(c0_ == '\\'); + DCHECK(c0_ == '\\'); uc32 chars_read[6] = {'\\', 'u', 0, 0, 0, 0}; Advance(); int i = 1; @@ -1116,33 +1184,27 @@ } -Handle<String> Scanner::AllocateNextLiteralString(Isolate* isolate, - PretenureFlag tenured) { - if (is_next_literal_one_byte()) { - return isolate->factory()->NewStringFromOneByte( - Vector<const uint8_t>::cast(next_literal_one_byte_string()), tenured); - } else { - return isolate->factory()->NewStringFromTwoByte( - next_literal_two_byte_string(), tenured); +const AstRawString* Scanner::CurrentSymbol(AstValueFactory* ast_value_factory) { + if (is_literal_one_byte()) { + return ast_value_factory->GetOneByteString(literal_one_byte_string()); } + return ast_value_factory->GetTwoByteString(literal_two_byte_string()); } -Handle<String> Scanner::AllocateInternalizedString(Isolate* isolate) { - if (is_literal_one_byte()) { - return isolate->factory()->InternalizeOneByteString( - literal_one_byte_string()); - } else { - return isolate->factory()->InternalizeTwoByteString( - literal_two_byte_string()); +const AstRawString* Scanner::NextSymbol(AstValueFactory* ast_value_factory) { + if (is_next_literal_one_byte()) { + return ast_value_factory->GetOneByteString(next_literal_one_byte_string()); } + return ast_value_factory->GetTwoByteString(next_literal_two_byte_string()); } double Scanner::DoubleValue() { - ASSERT(is_literal_one_byte()); + DCHECK(is_literal_one_byte()); return StringToDouble( - unicode_cache_, Vector<const char>::cast(literal_one_byte_string()), + unicode_cache_, + literal_one_byte_string(), ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY); } @@ -1160,15 +1222,6 @@ } -void Scanner::LogSymbol(ParserRecorder* log, int position) { - if (is_literal_one_byte()) { - log->LogOneByteSymbol(position, literal_one_byte_string()); - } else { - log->LogTwoByteSymbol(position, literal_two_byte_string()); - } -} - - int DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key, int value) { return AddSymbol(key, true, value); } @@ -1193,7 +1246,7 @@ int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) { - ASSERT(key.length() > 0); + DCHECK(key.length() > 0); // Quick check for already being in canonical form. if (IsNumberCanonical(key)) { return AddOneByteSymbol(key, value); @@ -1201,7 +1254,7 @@ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY; double double_value = StringToDouble( - unicode_constants_, Vector<const char>::cast(key), flags, 0.0); + unicode_constants_, key, flags, 0.0); int length; const char* string; if (!std::isfinite(double_value)) { diff -Nru nodejs-0.11.13/deps/v8/src/scanner-character-streams.cc nodejs-0.11.15/deps/v8/src/scanner-character-streams.cc --- nodejs-0.11.13/deps/v8/src/scanner-character-streams.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/scanner-character-streams.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" -#include "scanner-character-streams.h" +#include "src/scanner-character-streams.h" -#include "handles.h" -#include "unicode-inl.h" +#include "src/handles.h" +#include "src/unicode-inl.h" namespace v8 { namespace internal { @@ -78,8 +55,8 @@ buffer_cursor_ = buffer_end_; } // Ensure that there is room for at least one pushback. - ASSERT(buffer_cursor_ > buffer_); - ASSERT(pos_ > 0); + DCHECK(buffer_cursor_ > buffer_); + DCHECK(pos_ > 0); buffer_[--buffer_cursor_ - buffer_] = character; if (buffer_cursor_ == buffer_) { pushback_limit_ = NULL; @@ -101,7 +78,7 @@ if (buffer_cursor_ < buffer_end_) return true; // Otherwise read a new block. } - unsigned length = FillBuffer(pos_, kBufferSize); + unsigned length = FillBuffer(pos_); buffer_end_ = buffer_ + length; return length > 0; } @@ -125,9 +102,7 @@ unsigned end_position) : string_(data), length_(end_position) { - ASSERT(end_position >= start_position); - buffer_cursor_ = buffer_; - buffer_end_ = buffer_; + DCHECK(end_position >= start_position); pos_ = start_position; } @@ -143,9 +118,9 @@ } -unsigned GenericStringUtf16CharacterStream::FillBuffer(unsigned from_pos, - unsigned length) { +unsigned GenericStringUtf16CharacterStream::FillBuffer(unsigned from_pos) { if (from_pos >= length_) return 0; + unsigned length = kBufferSize; if (from_pos + length > length_) { length = length_ - from_pos; } @@ -180,8 +155,7 @@ } -unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position, - unsigned length) { +unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position) { static const unibrow::uchar kMaxUtf16Character = 0xffff; SetRawPosition(char_position); if (raw_character_position_ != char_position) { @@ -190,7 +164,7 @@ return 0u; } unsigned i = 0; - while (i < length - 1) { + while (i < kBufferSize - 1) { if (raw_data_pos_ == raw_data_length_) break; unibrow::uchar c = raw_data_[raw_data_pos_]; if (c <= unibrow::Utf8::kMaxOneByteChar) { @@ -234,12 +208,12 @@ static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) { byte character = buffer[--*cursor]; if (character > unibrow::Utf8::kMaxOneByteChar) { - ASSERT(IsUtf8MultiCharacterFollower(character)); + DCHECK(IsUtf8MultiCharacterFollower(character)); // Last byte of a multi-byte character encoding. Step backwards until // pointing to the first byte of the encoding, recognized by having the // top two bits set. while (IsUtf8MultiCharacterFollower(buffer[--*cursor])) { } - ASSERT(IsUtf8MultiCharacterStart(buffer[*cursor])); + DCHECK(IsUtf8MultiCharacterStart(buffer[*cursor])); } } @@ -255,7 +229,7 @@ // 110..... - (0xCx, 0xDx) one additional byte (minimum). // 1110.... - (0xEx) two additional bytes. // 11110... - (0xFx) three additional bytes (maximum). - ASSERT(IsUtf8MultiCharacterStart(character)); + DCHECK(IsUtf8MultiCharacterStart(character)); // Additional bytes is: // 1 if value in range 0xC0 .. 0xDF. // 2 if value in range 0xE0 .. 0xEF. @@ -264,7 +238,7 @@ unsigned additional_bytes = ((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03; *cursor += additional_bytes; - ASSERT(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes])); + DCHECK(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes])); } } @@ -280,12 +254,12 @@ int old_pos = raw_data_pos_; Utf8CharacterBack(raw_data_, &raw_data_pos_); raw_character_position_--; - ASSERT(old_pos - raw_data_pos_ <= 4); + DCHECK(old_pos - raw_data_pos_ <= 4); // Step back over both code units for surrogate pairs. if (old_pos - raw_data_pos_ == 4) raw_character_position_--; } while (raw_character_position_ > target_position); // No surrogate pair splitting. - ASSERT(raw_character_position_ == target_position); + DCHECK(raw_character_position_ == target_position); return; } // Spool forwards in the utf8 buffer. @@ -294,11 +268,11 @@ int old_pos = raw_data_pos_; Utf8CharacterForward(raw_data_, &raw_data_pos_); raw_character_position_++; - ASSERT(raw_data_pos_ - old_pos <= 4); + DCHECK(raw_data_pos_ - old_pos <= 4); if (raw_data_pos_ - old_pos == 4) raw_character_position_++; } // No surrogate pair splitting. - ASSERT(raw_character_position_ == target_position); + DCHECK(raw_character_position_ == target_position); } diff -Nru nodejs-0.11.13/deps/v8/src/scanner-character-streams.h nodejs-0.11.15/deps/v8/src/scanner-character-streams.h --- nodejs-0.11.13/deps/v8/src/scanner-character-streams.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/scanner-character-streams.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SCANNER_CHARACTER_STREAMS_H_ #define V8_SCANNER_CHARACTER_STREAMS_H_ -#include "scanner.h" +#include "src/scanner.h" namespace v8 { namespace internal { @@ -52,7 +29,7 @@ virtual void SlowPushBack(uc16 character); virtual unsigned BufferSeekForward(unsigned delta) = 0; - virtual unsigned FillBuffer(unsigned position, unsigned length) = 0; + virtual unsigned FillBuffer(unsigned position) = 0; const uc16* pushback_limit_; uc16 buffer_[kBufferSize]; @@ -69,10 +46,9 @@ protected: virtual unsigned BufferSeekForward(unsigned delta); - virtual unsigned FillBuffer(unsigned position, unsigned length); + virtual unsigned FillBuffer(unsigned position); Handle<String> string_; - unsigned start_position_; unsigned length_; }; @@ -85,7 +61,7 @@ protected: virtual unsigned BufferSeekForward(unsigned delta); - virtual unsigned FillBuffer(unsigned char_position, unsigned length); + virtual unsigned FillBuffer(unsigned char_position); void SetRawPosition(unsigned char_position); const byte* raw_data_; @@ -106,7 +82,7 @@ virtual ~ExternalTwoByteStringUtf16CharacterStream(); virtual void PushBack(uc32 character) { - ASSERT(buffer_cursor_ > raw_data_); + DCHECK(buffer_cursor_ > raw_data_); buffer_cursor_--; pos_--; } diff -Nru nodejs-0.11.13/deps/v8/src/scanner.h nodejs-0.11.15/deps/v8/src/scanner.h --- nodejs-0.11.13/deps/v8/src/scanner.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/scanner.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,49 +1,28 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Features shared by parsing and pre-parsing scanners. #ifndef V8_SCANNER_H_ #define V8_SCANNER_H_ -#include "allocation.h" -#include "char-predicates.h" -#include "checks.h" -#include "globals.h" -#include "hashmap.h" -#include "list.h" -#include "token.h" -#include "unicode-inl.h" -#include "utils.h" +#include "src/allocation.h" +#include "src/base/logging.h" +#include "src/char-predicates.h" +#include "src/globals.h" +#include "src/hashmap.h" +#include "src/list.h" +#include "src/token.h" +#include "src/unicode-inl.h" +#include "src/utils.h" namespace v8 { namespace internal { +class AstRawString; +class AstValueFactory; class ParserRecorder; @@ -232,34 +211,34 @@ } ConvertToTwoByte(); } - ASSERT(code_unit < 0x10000u); + DCHECK(code_unit < 0x10000u); *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit; position_ += kUC16Size; } - bool is_one_byte() { return is_one_byte_; } + bool is_one_byte() const { return is_one_byte_; } - bool is_contextual_keyword(Vector<const char> keyword) { + bool is_contextual_keyword(Vector<const char> keyword) const { return is_one_byte() && keyword.length() == position_ && (memcmp(keyword.start(), backing_store_.start(), position_) == 0); } - Vector<const uint16_t> two_byte_literal() { - ASSERT(!is_one_byte_); - ASSERT((position_ & 0x1) == 0); + Vector<const uint16_t> two_byte_literal() const { + DCHECK(!is_one_byte_); + DCHECK((position_ & 0x1) == 0); return Vector<const uint16_t>( reinterpret_cast<const uint16_t*>(backing_store_.start()), position_ >> 1); } - Vector<const uint8_t> one_byte_literal() { - ASSERT(is_one_byte_); + Vector<const uint8_t> one_byte_literal() const { + DCHECK(is_one_byte_); return Vector<const uint8_t>( reinterpret_cast<const uint8_t*>(backing_store_.start()), position_); } - int length() { + int length() const { return is_one_byte_ ? position_ : (position_ >> 1); } @@ -268,6 +247,8 @@ is_one_byte_ = true; } + Handle<String> Internalize(Isolate* isolate) const; + private: static const int kInitialCapacity = 16; static const int kGrowthFactory = 4; @@ -281,13 +262,13 @@ void ExpandBuffer() { Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity)); - OS::MemCopy(new_store.start(), backing_store_.start(), position_); + MemCopy(new_store.start(), backing_store_.start(), position_); backing_store_.Dispose(); backing_store_ = new_store; } void ConvertToTwoByte() { - ASSERT(is_one_byte_); + DCHECK(is_one_byte_); Vector<byte> new_store; int new_content_size = position_ * kUC16Size; if (new_content_size >= backing_store_.length()) { @@ -391,17 +372,16 @@ return current_.literal_chars->length() != source_length; } bool is_literal_contextual_keyword(Vector<const char> keyword) { - ASSERT_NOT_NULL(current_.literal_chars); + DCHECK_NOT_NULL(current_.literal_chars); return current_.literal_chars->is_contextual_keyword(keyword); } bool is_next_contextual_keyword(Vector<const char> keyword) { - ASSERT_NOT_NULL(next_.literal_chars); + DCHECK_NOT_NULL(next_.literal_chars); return next_.literal_chars->is_contextual_keyword(keyword); } - Handle<String> AllocateNextLiteralString(Isolate* isolate, - PretenureFlag tenured); - Handle<String> AllocateInternalizedString(Isolate* isolate); + const AstRawString* CurrentSymbol(AstValueFactory* ast_value_factory); + const AstRawString* NextSymbol(AstValueFactory* ast_value_factory); double DoubleValue(); bool UnescapedLiteralMatches(const char* data, int length) { @@ -428,8 +408,6 @@ int FindNumber(DuplicateFinder* finder, int value); int FindSymbol(DuplicateFinder* finder, int value); - void LogSymbol(ParserRecorder* log, int position); - UnicodeCache* unicode_cache() { return unicode_cache_; } // Returns the location of the last seen octal literal. @@ -475,6 +453,13 @@ // be empty). bool ScanRegExpFlags(); + const LiteralBuffer* source_url() const { return &source_url_; } + const LiteralBuffer* source_mapping_url() const { + return &source_mapping_url_; + } + + bool IdentifierIsFutureStrictReserved(const AstRawString* string) const; + private: // The current and look-ahead token. struct TokenDesc { @@ -506,7 +491,7 @@ } INLINE(void AddLiteralChar(uc32 c)) { - ASSERT_NOT_NULL(next_.literal_chars); + DCHECK_NOT_NULL(next_.literal_chars); next_.literal_chars->AddChar(c); } @@ -555,37 +540,37 @@ // These functions only give the correct result if the literal // was scanned between calls to StartLiteral() and TerminateLiteral(). Vector<const uint8_t> literal_one_byte_string() { - ASSERT_NOT_NULL(current_.literal_chars); + DCHECK_NOT_NULL(current_.literal_chars); return current_.literal_chars->one_byte_literal(); } Vector<const uint16_t> literal_two_byte_string() { - ASSERT_NOT_NULL(current_.literal_chars); + DCHECK_NOT_NULL(current_.literal_chars); return current_.literal_chars->two_byte_literal(); } bool is_literal_one_byte() { - ASSERT_NOT_NULL(current_.literal_chars); + DCHECK_NOT_NULL(current_.literal_chars); return current_.literal_chars->is_one_byte(); } int literal_length() const { - ASSERT_NOT_NULL(current_.literal_chars); + DCHECK_NOT_NULL(current_.literal_chars); return current_.literal_chars->length(); } // Returns the literal string for the next token (the token that // would be returned if Next() were called). Vector<const uint8_t> next_literal_one_byte_string() { - ASSERT_NOT_NULL(next_.literal_chars); + DCHECK_NOT_NULL(next_.literal_chars); return next_.literal_chars->one_byte_literal(); } Vector<const uint16_t> next_literal_two_byte_string() { - ASSERT_NOT_NULL(next_.literal_chars); + DCHECK_NOT_NULL(next_.literal_chars); return next_.literal_chars->two_byte_literal(); } bool is_next_literal_one_byte() { - ASSERT_NOT_NULL(next_.literal_chars); + DCHECK_NOT_NULL(next_.literal_chars); return next_.literal_chars->is_one_byte(); } int next_literal_length() const { - ASSERT_NOT_NULL(next_.literal_chars); + DCHECK_NOT_NULL(next_.literal_chars); return next_.literal_chars->length(); } @@ -596,6 +581,8 @@ bool SkipWhiteSpace(); Token::Value SkipSingleLineComment(); + Token::Value SkipSourceURLComment(); + void TryToParseSourceURLComment(); Token::Value SkipMultiLineComment(); // Scans a possible HTML comment -- begins with '<!'. Token::Value ScanHtmlComment(); @@ -630,6 +617,10 @@ LiteralBuffer literal_buffer1_; LiteralBuffer literal_buffer2_; + // Values parsed from magic comments. + LiteralBuffer source_url_; + LiteralBuffer source_mapping_url_; + TokenDesc current_; // desc for current token (as returned by Next()) TokenDesc next_; // desc for next token (one token look-ahead) diff -Nru nodejs-0.11.13/deps/v8/src/scopeinfo.cc nodejs-0.11.15/deps/v8/src/scopeinfo.cc --- nodejs-0.11.13/deps/v8/src/scopeinfo.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/scopeinfo.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdlib.h> -#include "v8.h" +#include "src/v8.h" -#include "scopeinfo.h" -#include "scopes.h" +#include "src/scopeinfo.h" +#include "src/scopes.h" namespace v8 { namespace internal { @@ -44,8 +21,8 @@ const int stack_local_count = stack_locals.length(); const int context_local_count = context_locals.length(); // Make sure we allocate the correct amount. - ASSERT(scope->StackLocalCount() == stack_local_count); - ASSERT(scope->ContextLocalCount() == context_local_count); + DCHECK(scope->StackLocalCount() == stack_local_count); + DCHECK(scope->ContextLocalCount() == context_local_count); // Determine use and location of the function variable if it is present. FunctionVariableInfo function_name_info; @@ -57,7 +34,7 @@ } else if (var->IsContextSlot()) { function_name_info = CONTEXT; } else { - ASSERT(var->IsStackLocal()); + DCHECK(var->IsStackLocal()); function_name_info = STACK; } function_variable_mode = var->mode(); @@ -88,7 +65,7 @@ int index = kVariablePartIndex; // Add parameters. - ASSERT(index == scope_info->ParameterEntriesIndex()); + DCHECK(index == scope_info->ParameterEntriesIndex()); for (int i = 0; i < parameter_count; ++i) { scope_info->set(index++, *scope->parameter(i)->name()); } @@ -96,9 +73,9 @@ // Add stack locals' names. We are assuming that the stack locals' // slots are allocated in increasing order, so we can simply add // them to the ScopeInfo object. - ASSERT(index == scope_info->StackLocalEntriesIndex()); + DCHECK(index == scope_info->StackLocalEntriesIndex()); for (int i = 0; i < stack_local_count; ++i) { - ASSERT(stack_locals[i]->index() == i); + DCHECK(stack_locals[i]->index() == i); scope_info->set(index++, *stack_locals[i]->name()); } @@ -111,37 +88,39 @@ context_locals.Sort(&Variable::CompareIndex); // Add context locals' names. - ASSERT(index == scope_info->ContextLocalNameEntriesIndex()); + DCHECK(index == scope_info->ContextLocalNameEntriesIndex()); for (int i = 0; i < context_local_count; ++i) { scope_info->set(index++, *context_locals[i]->name()); } // Add context locals' info. - ASSERT(index == scope_info->ContextLocalInfoEntriesIndex()); + DCHECK(index == scope_info->ContextLocalInfoEntriesIndex()); for (int i = 0; i < context_local_count; ++i) { Variable* var = context_locals[i]; - uint32_t value = ContextLocalMode::encode(var->mode()) | - ContextLocalInitFlag::encode(var->initialization_flag()); + uint32_t value = + ContextLocalMode::encode(var->mode()) | + ContextLocalInitFlag::encode(var->initialization_flag()) | + ContextLocalMaybeAssignedFlag::encode(var->maybe_assigned()); scope_info->set(index++, Smi::FromInt(value)); } // If present, add the function variable name and its index. - ASSERT(index == scope_info->FunctionNameEntryIndex()); + DCHECK(index == scope_info->FunctionNameEntryIndex()); if (has_function_name) { int var_index = scope->function()->proxy()->var()->index(); scope_info->set(index++, *scope->function()->proxy()->name()); scope_info->set(index++, Smi::FromInt(var_index)); - ASSERT(function_name_info != STACK || + DCHECK(function_name_info != STACK || (var_index == scope_info->StackLocalCount() && var_index == scope_info->StackSlotCount() - 1)); - ASSERT(function_name_info != CONTEXT || + DCHECK(function_name_info != CONTEXT || var_index == scope_info->ContextLength() - 1); } - ASSERT(index == scope_info->length()); - ASSERT(scope->num_parameters() == scope_info->ParameterCount()); - ASSERT(scope->num_stack_slots() == scope_info->StackSlotCount()); - ASSERT(scope->num_heap_slots() == scope_info->ContextLength() || + DCHECK(index == scope_info->length()); + DCHECK(scope->num_parameters() == scope_info->ParameterCount()); + DCHECK(scope->num_stack_slots() == scope_info->StackSlotCount()); + DCHECK(scope->num_heap_slots() == scope_info->ContextLength() || (scope->num_heap_slots() == kVariablePartIndex && scope_info->ContextLength() == 0)); return scope_info; @@ -154,7 +133,7 @@ ScopeType ScopeInfo::scope_type() { - ASSERT(length() > 0); + DCHECK(length() > 0); return ScopeTypeField::decode(Flags()); } @@ -227,21 +206,21 @@ String* ScopeInfo::FunctionName() { - ASSERT(HasFunctionName()); + DCHECK(HasFunctionName()); return String::cast(get(FunctionNameEntryIndex())); } String* ScopeInfo::ParameterName(int var) { - ASSERT(0 <= var && var < ParameterCount()); + DCHECK(0 <= var && var < ParameterCount()); int info_index = ParameterEntriesIndex() + var; return String::cast(get(info_index)); } String* ScopeInfo::LocalName(int var) { - ASSERT(0 <= var && var < LocalCount()); - ASSERT(StackLocalEntriesIndex() + StackLocalCount() == + DCHECK(0 <= var && var < LocalCount()); + DCHECK(StackLocalEntriesIndex() + StackLocalCount() == ContextLocalNameEntriesIndex()); int info_index = StackLocalEntriesIndex() + var; return String::cast(get(info_index)); @@ -249,21 +228,21 @@ String* ScopeInfo::StackLocalName(int var) { - ASSERT(0 <= var && var < StackLocalCount()); + DCHECK(0 <= var && var < StackLocalCount()); int info_index = StackLocalEntriesIndex() + var; return String::cast(get(info_index)); } String* ScopeInfo::ContextLocalName(int var) { - ASSERT(0 <= var && var < ContextLocalCount()); + DCHECK(0 <= var && var < ContextLocalCount()); int info_index = ContextLocalNameEntriesIndex() + var; return String::cast(get(info_index)); } VariableMode ScopeInfo::ContextLocalMode(int var) { - ASSERT(0 <= var && var < ContextLocalCount()); + DCHECK(0 <= var && var < ContextLocalCount()); int info_index = ContextLocalInfoEntriesIndex() + var; int value = Smi::cast(get(info_index))->value(); return ContextLocalMode::decode(value); @@ -271,15 +250,34 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) { - ASSERT(0 <= var && var < ContextLocalCount()); + DCHECK(0 <= var && var < ContextLocalCount()); int info_index = ContextLocalInfoEntriesIndex() + var; int value = Smi::cast(get(info_index))->value(); return ContextLocalInitFlag::decode(value); } +MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) { + DCHECK(0 <= var && var < ContextLocalCount()); + int info_index = ContextLocalInfoEntriesIndex() + var; + int value = Smi::cast(get(info_index))->value(); + return ContextLocalMaybeAssignedFlag::decode(value); +} + + +bool ScopeInfo::LocalIsSynthetic(int var) { + DCHECK(0 <= var && var < LocalCount()); + // There's currently no flag stored on the ScopeInfo to indicate that a + // variable is a compiler-introduced temporary. However, to avoid conflict + // with user declarations, the current temporaries like .generator_object and + // .result start with a dot, so we can use that as a flag. It's a hack! + Handle<String> name(LocalName(var)); + return name->length() > 0 && name->Get(0) == '.'; +} + + int ScopeInfo::StackSlotIndex(String* name) { - ASSERT(name->IsInternalizedString()); + DCHECK(name->IsInternalizedString()); if (length() > 0) { int start = StackLocalEntriesIndex(); int end = StackLocalEntriesIndex() + StackLocalCount(); @@ -293,42 +291,49 @@ } -int ScopeInfo::ContextSlotIndex(String* name, - VariableMode* mode, - InitializationFlag* init_flag) { - ASSERT(name->IsInternalizedString()); - ASSERT(mode != NULL); - ASSERT(init_flag != NULL); - if (length() > 0) { - ContextSlotCache* context_slot_cache = GetIsolate()->context_slot_cache(); - int result = context_slot_cache->Lookup(this, name, mode, init_flag); +int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info, + Handle<String> name, VariableMode* mode, + InitializationFlag* init_flag, + MaybeAssignedFlag* maybe_assigned_flag) { + DCHECK(name->IsInternalizedString()); + DCHECK(mode != NULL); + DCHECK(init_flag != NULL); + if (scope_info->length() > 0) { + ContextSlotCache* context_slot_cache = + scope_info->GetIsolate()->context_slot_cache(); + int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag, + maybe_assigned_flag); if (result != ContextSlotCache::kNotFound) { - ASSERT(result < ContextLength()); + DCHECK(result < scope_info->ContextLength()); return result; } - int start = ContextLocalNameEntriesIndex(); - int end = ContextLocalNameEntriesIndex() + ContextLocalCount(); + int start = scope_info->ContextLocalNameEntriesIndex(); + int end = scope_info->ContextLocalNameEntriesIndex() + + scope_info->ContextLocalCount(); for (int i = start; i < end; ++i) { - if (name == get(i)) { + if (*name == scope_info->get(i)) { int var = i - start; - *mode = ContextLocalMode(var); - *init_flag = ContextLocalInitFlag(var); + *mode = scope_info->ContextLocalMode(var); + *init_flag = scope_info->ContextLocalInitFlag(var); + *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var); result = Context::MIN_CONTEXT_SLOTS + var; - context_slot_cache->Update(this, name, *mode, *init_flag, result); - ASSERT(result < ContextLength()); + context_slot_cache->Update(scope_info, name, *mode, *init_flag, + *maybe_assigned_flag, result); + DCHECK(result < scope_info->ContextLength()); return result; } } - // Cache as not found. Mode and init flag don't matter. - context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1); + // Cache as not found. Mode, init flag and maybe assigned flag don't matter. + context_slot_cache->Update(scope_info, name, INTERNAL, kNeedsInitialization, + kNotAssigned, -1); } return -1; } int ScopeInfo::ParameterIndex(String* name) { - ASSERT(name->IsInternalizedString()); + DCHECK(name->IsInternalizedString()); if (length() > 0) { // We must read parameters from the end since for // multiply declared parameters the value of the @@ -348,8 +353,8 @@ int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) { - ASSERT(name->IsInternalizedString()); - ASSERT(mode != NULL); + DCHECK(name->IsInternalizedString()); + DCHECK(mode != NULL); if (length() > 0) { if (FunctionVariableField::decode(Flags()) == CONTEXT && FunctionName() == name) { @@ -368,25 +373,26 @@ int local_count = scope_info->ContextLocalCount(); if (local_count == 0) return true; // Fill all context locals to the context extension. + int first_context_var = scope_info->StackLocalCount(); int start = scope_info->ContextLocalNameEntriesIndex(); - int end = start + local_count; - for (int i = start; i < end; ++i) { - int context_index = Context::MIN_CONTEXT_SLOTS + i - start; - Handle<Object> result = Runtime::SetObjectProperty( + for (int i = 0; i < local_count; ++i) { + if (scope_info->LocalIsSynthetic(first_context_var + i)) continue; + int context_index = Context::MIN_CONTEXT_SLOTS + i; + RETURN_ON_EXCEPTION_VALUE( isolate, - scope_object, - Handle<String>(String::cast(scope_info->get(i))), - Handle<Object>(context->get(context_index), isolate), - ::NONE, - SLOPPY); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, false); + Runtime::DefineObjectProperty( + scope_object, + Handle<String>(String::cast(scope_info->get(i + start))), + Handle<Object>(context->get(context_index), isolate), + ::NONE), + false); } return true; } int ScopeInfo::ParameterEntriesIndex() { - ASSERT(length() > 0); + DCHECK(length() > 0); return kVariablePartIndex; } @@ -419,39 +425,41 @@ } -int ContextSlotCache::Lookup(Object* data, - String* name, - VariableMode* mode, - InitializationFlag* init_flag) { +int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode, + InitializationFlag* init_flag, + MaybeAssignedFlag* maybe_assigned_flag) { int index = Hash(data, name); Key& key = keys_[index]; if ((key.data == data) && key.name->Equals(name)) { Value result(values_[index]); if (mode != NULL) *mode = result.mode(); if (init_flag != NULL) *init_flag = result.initialization_flag(); + if (maybe_assigned_flag != NULL) + *maybe_assigned_flag = result.maybe_assigned_flag(); return result.index() + kNotFound; } return kNotFound; } -void ContextSlotCache::Update(Object* data, - String* name, - VariableMode mode, - InitializationFlag init_flag, +void ContextSlotCache::Update(Handle<Object> data, Handle<String> name, + VariableMode mode, InitializationFlag init_flag, + MaybeAssignedFlag maybe_assigned_flag, int slot_index) { - String* internalized_name; - ASSERT(slot_index > kNotFound); - if (name->GetIsolate()->heap()->InternalizeStringIfExists( - name, &internalized_name)) { - int index = Hash(data, internalized_name); + DisallowHeapAllocation no_gc; + Handle<String> internalized_name; + DCHECK(slot_index > kNotFound); + if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name). + ToHandle(&internalized_name)) { + int index = Hash(*data, *internalized_name); Key& key = keys_[index]; - key.data = data; - key.name = internalized_name; + key.data = *data; + key.name = *internalized_name; // Please note value only takes a uint as index. - values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw(); + values_[index] = Value(mode, init_flag, maybe_assigned_flag, + slot_index - kNotFound).raw(); #ifdef DEBUG - ValidateEntry(data, name, mode, init_flag, slot_index); + ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index); #endif } } @@ -464,22 +472,24 @@ #ifdef DEBUG -void ContextSlotCache::ValidateEntry(Object* data, - String* name, +void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name, VariableMode mode, InitializationFlag init_flag, + MaybeAssignedFlag maybe_assigned_flag, int slot_index) { - String* internalized_name; - if (name->GetIsolate()->heap()->InternalizeStringIfExists( - name, &internalized_name)) { - int index = Hash(data, name); + DisallowHeapAllocation no_gc; + Handle<String> internalized_name; + if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name). + ToHandle(&internalized_name)) { + int index = Hash(*data, *name); Key& key = keys_[index]; - ASSERT(key.data == data); - ASSERT(key.name->Equals(name)); + DCHECK(key.data == *data); + DCHECK(key.name->Equals(*name)); Value result(values_[index]); - ASSERT(result.mode() == mode); - ASSERT(result.initialization_flag() == init_flag); - ASSERT(result.index() + kNotFound == slot_index); + DCHECK(result.mode() == mode); + DCHECK(result.initialization_flag() == init_flag); + DCHECK(result.maybe_assigned_flag() == maybe_assigned_flag); + DCHECK(result.index() + kNotFound == slot_index); } } @@ -541,20 +551,20 @@ int i = 0; for (Interface::Iterator it = interface->iterator(); !it.done(); it.Advance(), ++i) { - Variable* var = scope->LocalLookup(it.name()); - info->set_name(i, *it.name()); + Variable* var = scope->LookupLocal(it.name()); + info->set_name(i, *(it.name()->string())); info->set_mode(i, var->mode()); - ASSERT((var->mode() == MODULE) == (it.interface()->IsModule())); + DCHECK((var->mode() == MODULE) == (it.interface()->IsModule())); if (var->mode() == MODULE) { - ASSERT(it.interface()->IsFrozen()); - ASSERT(it.interface()->Index() >= 0); + DCHECK(it.interface()->IsFrozen()); + DCHECK(it.interface()->Index() >= 0); info->set_index(i, it.interface()->Index()); } else { - ASSERT(var->index() >= 0); + DCHECK(var->index() >= 0); info->set_index(i, var->index()); } } - ASSERT(i == info->length()); + DCHECK(i == info->length()); return info; } diff -Nru nodejs-0.11.13/deps/v8/src/scopeinfo.h nodejs-0.11.15/deps/v8/src/scopeinfo.h --- nodejs-0.11.13/deps/v8/src/scopeinfo.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/scopeinfo.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SCOPEINFO_H_ #define V8_SCOPEINFO_H_ -#include "allocation.h" -#include "variables.h" -#include "zone-inl.h" +#include "src/allocation.h" +#include "src/variables.h" +#include "src/zone-inl.h" namespace v8 { namespace internal { @@ -43,17 +20,14 @@ public: // Lookup context slot index for (data, name). // If absent, kNotFound is returned. - int Lookup(Object* data, - String* name, - VariableMode* mode, - InitializationFlag* init_flag); + int Lookup(Object* data, String* name, VariableMode* mode, + InitializationFlag* init_flag, + MaybeAssignedFlag* maybe_assigned_flag); // Update an element in the cache. - void Update(Object* data, - String* name, - VariableMode mode, + void Update(Handle<Object> data, Handle<String> name, VariableMode mode, InitializationFlag init_flag, - int slot_index); + MaybeAssignedFlag maybe_assigned_flag, int slot_index); // Clear the cache. void Clear(); @@ -72,11 +46,9 @@ inline static int Hash(Object* data, String* name); #ifdef DEBUG - void ValidateEntry(Object* data, - String* name, - VariableMode mode, - InitializationFlag init_flag, - int slot_index); + void ValidateEntry(Handle<Object> data, Handle<String> name, + VariableMode mode, InitializationFlag init_flag, + MaybeAssignedFlag maybe_assigned_flag, int slot_index); #endif static const int kLength = 256; @@ -86,18 +58,19 @@ }; struct Value { - Value(VariableMode mode, - InitializationFlag init_flag, - int index) { - ASSERT(ModeField::is_valid(mode)); - ASSERT(InitField::is_valid(init_flag)); - ASSERT(IndexField::is_valid(index)); - value_ = ModeField::encode(mode) | - IndexField::encode(index) | - InitField::encode(init_flag); - ASSERT(mode == this->mode()); - ASSERT(init_flag == this->initialization_flag()); - ASSERT(index == this->index()); + Value(VariableMode mode, InitializationFlag init_flag, + MaybeAssignedFlag maybe_assigned_flag, int index) { + DCHECK(ModeField::is_valid(mode)); + DCHECK(InitField::is_valid(init_flag)); + DCHECK(MaybeAssignedField::is_valid(maybe_assigned_flag)); + DCHECK(IndexField::is_valid(index)); + value_ = ModeField::encode(mode) | IndexField::encode(index) | + InitField::encode(init_flag) | + MaybeAssignedField::encode(maybe_assigned_flag); + DCHECK(mode == this->mode()); + DCHECK(init_flag == this->initialization_flag()); + DCHECK(maybe_assigned_flag == this->maybe_assigned_flag()); + DCHECK(index == this->index()); } explicit inline Value(uint32_t value) : value_(value) {} @@ -110,13 +83,18 @@ return InitField::decode(value_); } + MaybeAssignedFlag maybe_assigned_flag() { + return MaybeAssignedField::decode(value_); + } + int index() { return IndexField::decode(value_); } // Bit fields in value_ (type, shift, size). Must be public so the // constants can be embedded in generated code. - class ModeField: public BitField<VariableMode, 0, 4> {}; - class InitField: public BitField<InitializationFlag, 4, 1> {}; - class IndexField: public BitField<int, 5, 32-5> {}; + class ModeField : public BitField<VariableMode, 0, 4> {}; + class InitField : public BitField<InitializationFlag, 4, 1> {}; + class MaybeAssignedField : public BitField<MaybeAssignedFlag, 5, 1> {}; + class IndexField : public BitField<int, 6, 32 - 6> {}; private: uint32_t value_; diff -Nru nodejs-0.11.13/deps/v8/src/scopes.cc nodejs-0.11.15/deps/v8/src/scopes.cc --- nodejs-0.11.13/deps/v8/src/scopes.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/scopes.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,16 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "scopes.h" - -#include "accessors.h" -#include "bootstrapper.h" -#include "compiler.h" -#include "messages.h" -#include "scopeinfo.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/scopes.h" + +#include "src/accessors.h" +#include "src/bootstrapper.h" +#include "src/compiler.h" +#include "src/messages.h" +#include "src/scopeinfo.h" namespace v8 { namespace internal { @@ -47,52 +24,40 @@ // use. Because a Variable holding a handle with the same location exists // this is ensured. -static bool Match(void* key1, void* key2) { - String* name1 = *reinterpret_cast<String**>(key1); - String* name2 = *reinterpret_cast<String**>(key2); - ASSERT(name1->IsInternalizedString()); - ASSERT(name2->IsInternalizedString()); - return name1 == name2; -} - - VariableMap::VariableMap(Zone* zone) - : ZoneHashMap(Match, 8, ZoneAllocationPolicy(zone)), + : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)), zone_(zone) {} VariableMap::~VariableMap() {} -Variable* VariableMap::Declare( - Scope* scope, - Handle<String> name, - VariableMode mode, - bool is_valid_lhs, - Variable::Kind kind, - InitializationFlag initialization_flag, - Interface* interface) { - Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true, - ZoneAllocationPolicy(zone())); +Variable* VariableMap::Declare(Scope* scope, const AstRawString* name, + VariableMode mode, bool is_valid_lhs, + Variable::Kind kind, + InitializationFlag initialization_flag, + MaybeAssignedFlag maybe_assigned_flag, + Interface* interface) { + // AstRawStrings are unambiguous, i.e., the same string is always represented + // by the same AstRawString*. + // FIXME(marja): fix the type of Lookup. + Entry* p = ZoneHashMap::Lookup(const_cast<AstRawString*>(name), name->hash(), + true, ZoneAllocationPolicy(zone())); if (p->value == NULL) { // The variable has not been declared yet -> insert it. - ASSERT(p->key == name.location()); - p->value = new(zone()) Variable(scope, - name, - mode, - is_valid_lhs, - kind, - initialization_flag, - interface); + DCHECK(p->key == name); + p->value = new (zone()) + Variable(scope, name, mode, is_valid_lhs, kind, initialization_flag, + maybe_assigned_flag, interface); } return reinterpret_cast<Variable*>(p->value); } -Variable* VariableMap::Lookup(Handle<String> name) { - Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false, - ZoneAllocationPolicy(NULL)); +Variable* VariableMap::Lookup(const AstRawString* name) { + Entry* p = ZoneHashMap::Lookup(const_cast<AstRawString*>(name), name->hash(), + false, ZoneAllocationPolicy(NULL)); if (p != NULL) { - ASSERT(*reinterpret_cast<String**>(p->key) == *name); - ASSERT(p->value != NULL); + DCHECK(reinterpret_cast<const AstRawString*>(p->key) == name); + DCHECK(p->value != NULL); return reinterpret_cast<Variable*>(p->value); } return NULL; @@ -102,7 +67,8 @@ // ---------------------------------------------------------------------------- // Implementation of Scope -Scope::Scope(Scope* outer_scope, ScopeType scope_type, Zone* zone) +Scope::Scope(Scope* outer_scope, ScopeType scope_type, + AstValueFactory* ast_value_factory, Zone* zone) : isolate_(zone->isolate()), inner_scopes_(4, zone), variables_(zone), @@ -115,17 +81,19 @@ (scope_type == MODULE_SCOPE || scope_type == GLOBAL_SCOPE) ? Interface::NewModule(zone) : NULL), already_resolved_(false), + ast_value_factory_(ast_value_factory), zone_(zone) { SetDefaults(scope_type, outer_scope, Handle<ScopeInfo>::null()); // The outermost scope must be a global scope. - ASSERT(scope_type == GLOBAL_SCOPE || outer_scope != NULL); - ASSERT(!HasIllegalRedeclaration()); + DCHECK(scope_type == GLOBAL_SCOPE || outer_scope != NULL); + DCHECK(!HasIllegalRedeclaration()); } Scope::Scope(Scope* inner_scope, ScopeType scope_type, Handle<ScopeInfo> scope_info, + AstValueFactory* value_factory, Zone* zone) : isolate_(zone->isolate()), inner_scopes_(4, zone), @@ -137,6 +105,7 @@ decls_(4, zone), interface_(NULL), already_resolved_(true), + ast_value_factory_(value_factory), zone_(zone) { SetDefaults(scope_type, NULL, scope_info); if (!scope_info.is_null()) { @@ -149,7 +118,8 @@ } -Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone) +Scope::Scope(Scope* inner_scope, const AstRawString* catch_variable_name, + AstValueFactory* value_factory, Zone* zone) : isolate_(zone->isolate()), inner_scopes_(1, zone), variables_(zone), @@ -160,6 +130,7 @@ decls_(0, zone), interface_(NULL), already_resolved_(true), + ast_value_factory_(value_factory), zone_(zone) { SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null()); AddInnerScope(inner_scope); @@ -180,7 +151,7 @@ Handle<ScopeInfo> scope_info) { outer_scope_ = outer_scope; scope_type_ = scope_type; - scope_name_ = isolate_->factory()->empty_string(); + scope_name_ = ast_value_factory_->empty_string(); dynamics_ = NULL; receiver_ = NULL; function_ = NULL; @@ -222,6 +193,7 @@ Scope* with_scope = new(zone) Scope(current_scope, WITH_SCOPE, Handle<ScopeInfo>::null(), + global_scope->ast_value_factory_, zone); current_scope = with_scope; // All the inner scopes are inside a with. @@ -234,30 +206,36 @@ current_scope = new(zone) Scope(current_scope, GLOBAL_SCOPE, Handle<ScopeInfo>(scope_info), + global_scope->ast_value_factory_, zone); } else if (context->IsModuleContext()) { ScopeInfo* scope_info = ScopeInfo::cast(context->module()->scope_info()); current_scope = new(zone) Scope(current_scope, MODULE_SCOPE, Handle<ScopeInfo>(scope_info), + global_scope->ast_value_factory_, zone); } else if (context->IsFunctionContext()) { ScopeInfo* scope_info = context->closure()->shared()->scope_info(); current_scope = new(zone) Scope(current_scope, FUNCTION_SCOPE, Handle<ScopeInfo>(scope_info), + global_scope->ast_value_factory_, zone); } else if (context->IsBlockContext()) { ScopeInfo* scope_info = ScopeInfo::cast(context->extension()); current_scope = new(zone) Scope(current_scope, BLOCK_SCOPE, Handle<ScopeInfo>(scope_info), + global_scope->ast_value_factory_, zone); } else { - ASSERT(context->IsCatchContext()); + DCHECK(context->IsCatchContext()); String* name = String::cast(context->extension()); - current_scope = new(zone) Scope( - current_scope, Handle<String>(name), zone); + current_scope = new (zone) Scope( + current_scope, + global_scope->ast_value_factory_->GetString(Handle<String>(name)), + global_scope->ast_value_factory_, zone); } if (contains_with) current_scope->RecordWithStatement(); if (innermost_scope == NULL) innermost_scope = current_scope; @@ -276,7 +254,7 @@ bool Scope::Analyze(CompilationInfo* info) { - ASSERT(info->function() != NULL); + DCHECK(info->function() != NULL); Scope* scope = info->function()->scope(); Scope* top = scope; @@ -289,7 +267,9 @@ // Allocate the variables. { - AstNodeFactory<AstNullVisitor> ast_node_factory(info->zone()); + // Passing NULL as AstValueFactory is ok, because AllocateVariables doesn't + // need to create new strings or values. + AstNodeFactory<AstNullVisitor> ast_node_factory(info->zone(), NULL); if (!top->AllocateVariables(info, &ast_node_factory)) return false; } @@ -312,7 +292,7 @@ void Scope::Initialize() { - ASSERT(!already_resolved()); + DCHECK(!already_resolved()); // Add this scope as a new inner scope of the outer scope. if (outer_scope_ != NULL) { @@ -333,7 +313,7 @@ if (is_declaration_scope()) { Variable* var = variables_.Declare(this, - isolate_->factory()->this_string(), + ast_value_factory_->this_string(), VAR, false, Variable::THIS, @@ -341,7 +321,7 @@ var->AllocateTo(Variable::PARAMETER, -1); receiver_ = var; } else { - ASSERT(outer_scope() != NULL); + DCHECK(outer_scope() != NULL); receiver_ = outer_scope()->receiver(); } @@ -350,7 +330,7 @@ // Note that it might never be accessed, in which case it won't be // allocated during variable allocation. variables_.Declare(this, - isolate_->factory()->arguments_string(), + ast_value_factory_->arguments_string(), VAR, true, Variable::ARGUMENTS, @@ -360,10 +340,10 @@ Scope* Scope::FinalizeBlockScope() { - ASSERT(is_block_scope()); - ASSERT(internals_.is_empty()); - ASSERT(temps_.is_empty()); - ASSERT(params_.is_empty()); + DCHECK(is_block_scope()); + DCHECK(internals_.is_empty()); + DCHECK(temps_.is_empty()); + DCHECK(params_.is_empty()); if (num_var_or_const() > 0) return this; @@ -389,45 +369,55 @@ } -Variable* Scope::LocalLookup(Handle<String> name) { +Variable* Scope::LookupLocal(const AstRawString* name) { Variable* result = variables_.Lookup(name); if (result != NULL || scope_info_.is_null()) { return result; } + // The Scope is backed up by ScopeInfo. This means it cannot operate in a + // heap-independent mode, and all strings must be internalized immediately. So + // it's ok to get the Handle<String> here. + Handle<String> name_handle = name->string(); // If we have a serialized scope info, we might find the variable there. // There should be no local slot with the given name. - ASSERT(scope_info_->StackSlotIndex(*name) < 0); + DCHECK(scope_info_->StackSlotIndex(*name_handle) < 0); // Check context slot lookup. VariableMode mode; Variable::Location location = Variable::CONTEXT; InitializationFlag init_flag; - int index = scope_info_->ContextSlotIndex(*name, &mode, &init_flag); + MaybeAssignedFlag maybe_assigned_flag; + int index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode, + &init_flag, &maybe_assigned_flag); if (index < 0) { // Check parameters. - index = scope_info_->ParameterIndex(*name); + index = scope_info_->ParameterIndex(*name_handle); if (index < 0) return NULL; mode = DYNAMIC; location = Variable::LOOKUP; init_flag = kCreatedInitialized; + // Be conservative and flag parameters as maybe assigned. Better information + // would require ScopeInfo to serialize the maybe_assigned bit also for + // parameters. + maybe_assigned_flag = kMaybeAssigned; } Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL, - init_flag); + init_flag, maybe_assigned_flag); var->AllocateTo(location, index); return var; } -Variable* Scope::LookupFunctionVar(Handle<String> name, +Variable* Scope::LookupFunctionVar(const AstRawString* name, AstNodeFactory<AstNullVisitor>* factory) { - if (function_ != NULL && function_->proxy()->name().is_identical_to(name)) { + if (function_ != NULL && function_->proxy()->raw_name() == name) { return function_->proxy()->var(); } else if (!scope_info_.is_null()) { // If we are backed by a scope info, try to lookup the variable there. VariableMode mode; - int index = scope_info_->FunctionContextSlotIndex(*name, &mode); + int index = scope_info_->FunctionContextSlotIndex(*(name->string()), &mode); if (index < 0) return NULL; Variable* var = new(zone()) Variable( this, name, mode, true /* is valid LHS */, @@ -444,43 +434,44 @@ } -Variable* Scope::Lookup(Handle<String> name) { +Variable* Scope::Lookup(const AstRawString* name) { for (Scope* scope = this; scope != NULL; scope = scope->outer_scope()) { - Variable* var = scope->LocalLookup(name); + Variable* var = scope->LookupLocal(name); if (var != NULL) return var; } return NULL; } -void Scope::DeclareParameter(Handle<String> name, VariableMode mode) { - ASSERT(!already_resolved()); - ASSERT(is_function_scope()); +Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode) { + DCHECK(!already_resolved()); + DCHECK(is_function_scope()); Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL, kCreatedInitialized); params_.Add(var, zone()); + return var; } -Variable* Scope::DeclareLocal(Handle<String> name, - VariableMode mode, +Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode, InitializationFlag init_flag, + MaybeAssignedFlag maybe_assigned_flag, Interface* interface) { - ASSERT(!already_resolved()); + DCHECK(!already_resolved()); // This function handles VAR, LET, and CONST modes. DYNAMIC variables are // introduces during variable allocation, INTERNAL variables are allocated // explicitly, and TEMPORARY variables are allocated via NewTemporary(). - ASSERT(IsDeclaredVariableMode(mode)); + DCHECK(IsDeclaredVariableMode(mode)); ++num_var_or_const_; - return variables_.Declare( - this, name, mode, true, Variable::NORMAL, init_flag, interface); + return variables_.Declare(this, name, mode, true, Variable::NORMAL, init_flag, + maybe_assigned_flag, interface); } -Variable* Scope::DeclareDynamicGlobal(Handle<String> name) { - ASSERT(is_global_scope()); +Variable* Scope::DeclareDynamicGlobal(const AstRawString* name) { + DCHECK(is_global_scope()); return variables_.Declare(this, name, DYNAMIC_GLOBAL, @@ -502,8 +493,8 @@ } -Variable* Scope::NewInternal(Handle<String> name) { - ASSERT(!already_resolved()); +Variable* Scope::NewInternal(const AstRawString* name) { + DCHECK(!already_resolved()); Variable* var = new(zone()) Variable(this, name, INTERNAL, @@ -515,8 +506,8 @@ } -Variable* Scope::NewTemporary(Handle<String> name) { - ASSERT(!already_resolved()); +Variable* Scope::NewTemporary(const AstRawString* name) { + DCHECK(!already_resolved()); Variable* var = new(zone()) Variable(this, name, TEMPORARY, @@ -538,12 +529,12 @@ if (!HasIllegalRedeclaration()) { illegal_redecl_ = expression; } - ASSERT(HasIllegalRedeclaration()); + DCHECK(HasIllegalRedeclaration()); } void Scope::VisitIllegalRedeclaration(AstVisitor* visitor) { - ASSERT(HasIllegalRedeclaration()); + DCHECK(HasIllegalRedeclaration()); illegal_redecl_->Accept(visitor); } @@ -553,7 +544,7 @@ for (int i = 0; i < length; i++) { Declaration* decl = decls_[i]; if (decl->mode() != VAR) continue; - Handle<String> name = decl->proxy()->name(); + const AstRawString* name = decl->proxy()->raw_name(); // Iterate through all scopes until and including the declaration scope. Scope* previous = NULL; @@ -589,14 +580,14 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals, ZoneList<Variable*>* context_locals) { - ASSERT(stack_locals != NULL); - ASSERT(context_locals != NULL); + DCHECK(stack_locals != NULL); + DCHECK(context_locals != NULL); // Collect internals which are always allocated on the heap. for (int i = 0; i < internals_.length(); i++) { Variable* var = internals_[i]; if (var->is_used()) { - ASSERT(var->IsContextSlot()); + DCHECK(var->IsContextSlot()); context_locals->Add(var, zone()); } } @@ -607,10 +598,10 @@ Variable* var = temps_[i]; if (var->is_used()) { if (var->IsContextSlot()) { - ASSERT(has_forced_context_allocation()); + DCHECK(has_forced_context_allocation()); context_locals->Add(var, zone()); } else { - ASSERT(var->IsStackLocal()); + DCHECK(var->IsStackLocal()); stack_locals->Add(var, zone()); } } @@ -652,7 +643,7 @@ // 2) Allocate module instances. if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) { - ASSERT(num_modules_ == 0); + DCHECK(num_modules_ == 0); AllocateModulesRecursively(this); } @@ -721,11 +712,11 @@ int Scope::ContextChainLength(Scope* scope) { int n = 0; for (Scope* s = this; s != scope; s = s->outer_scope_) { - ASSERT(s != NULL); // scope must be in the scope chain + DCHECK(s != NULL); // scope must be in the scope chain if (s->is_with_scope() || s->num_heap_slots() > 0) n++; // Catch and module scopes always have heap slots. - ASSERT(!s->is_catch_scope() || s->num_heap_slots() > 0); - ASSERT(!s->is_module_scope() || s->num_heap_slots() > 0); + DCHECK(!s->is_catch_scope() || s->num_heap_slots() > 0); + DCHECK(!s->is_module_scope() || s->num_heap_slots() > 0); } return n; } @@ -766,7 +757,7 @@ Scope* scope = inner_scopes_[i]; int beg_pos = scope->start_position(); int end_pos = scope->end_position(); - ASSERT(beg_pos >= 0 && end_pos >= 0); + DCHECK(beg_pos >= 0 && end_pos >= 0); if (beg_pos <= position && position < end_pos) { scope->GetNestedScopeChain(chain, position); return; @@ -796,9 +787,8 @@ } -static void PrintName(Handle<String> name) { - SmartArrayPointer<char> s = name->ToCString(DISALLOW_NULLS); - PrintF("%s", s.get()); +static void PrintName(const AstRawString* name) { + PrintF("%.*s", name->length(), name->raw_data()); } @@ -826,12 +816,18 @@ if (var->is_used() || !var->IsUnallocated()) { Indent(indent, Variable::Mode2String(var->mode())); PrintF(" "); - PrintName(var->name()); + PrintName(var->raw_name()); PrintF("; // "); PrintLocation(var); + bool comma = !var->IsUnallocated(); if (var->has_forced_context_allocation()) { - if (!var->IsUnallocated()) PrintF(", "); + if (comma) PrintF(", "); PrintF("forced context allocation"); + comma = true; + } + if (var->maybe_assigned() == kMaybeAssigned) { + if (comma) PrintF(", "); + PrintF("maybe assigned"); } PrintF("\n"); } @@ -852,7 +848,7 @@ // Print header. Indent(n0, Header(scope_type_)); - if (scope_name_->length() > 0) { + if (!scope_name_->IsEmpty()) { PrintF(" "); PrintName(scope_name_); } @@ -862,7 +858,7 @@ PrintF(" ("); for (int i = 0; i < params_.length(); i++) { if (i > 0) PrintF(", "); - PrintName(params_[i]->name()); + PrintName(params_[i]->raw_name()); } PrintF(")"); } @@ -872,7 +868,7 @@ // Function name, if any (named function literals, only). if (function_ != NULL) { Indent(n1, "// (local) function name: "); - PrintName(function_->proxy()->name()); + PrintName(function_->proxy()->raw_name()); PrintF("\n"); } @@ -940,8 +936,8 @@ #endif // DEBUG -Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) { - if (dynamics_ == NULL) dynamics_ = new(zone()) DynamicScopePart(zone()); +Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) { + if (dynamics_ == NULL) dynamics_ = new (zone()) DynamicScopePart(zone()); VariableMap* map = dynamics_->GetMap(mode); Variable* var = map->Lookup(name); if (var == NULL) { @@ -961,10 +957,10 @@ } -Variable* Scope::LookupRecursive(Handle<String> name, +Variable* Scope::LookupRecursive(VariableProxy* proxy, BindingKind* binding_kind, AstNodeFactory<AstNullVisitor>* factory) { - ASSERT(binding_kind != NULL); + DCHECK(binding_kind != NULL); if (already_resolved() && is_with_scope()) { // Short-cut: if the scope is deserialized from a scope info, variable // allocation is already fixed. We can simply return with dynamic lookup. @@ -973,7 +969,7 @@ } // Try to find the variable in this scope. - Variable* var = LocalLookup(name); + Variable* var = LookupLocal(proxy->raw_name()); // We found a variable and we are done. (Even if there is an 'eval' in // this scope which introduces the same variable again, the resulting @@ -987,26 +983,27 @@ // if any. We can do this for all scopes, since the function variable is // only present - if at all - for function scopes. *binding_kind = UNBOUND; - var = LookupFunctionVar(name, factory); + var = LookupFunctionVar(proxy->raw_name(), factory); if (var != NULL) { *binding_kind = BOUND; } else if (outer_scope_ != NULL) { - var = outer_scope_->LookupRecursive(name, binding_kind, factory); + var = outer_scope_->LookupRecursive(proxy, binding_kind, factory); if (*binding_kind == BOUND && (is_function_scope() || is_with_scope())) { var->ForceContextAllocation(); } } else { - ASSERT(is_global_scope()); + DCHECK(is_global_scope()); } if (is_with_scope()) { - ASSERT(!already_resolved()); + DCHECK(!already_resolved()); // The current scope is a with scope, so the variable binding can not be // statically resolved. However, note that it was necessary to do a lookup // in the outer scope anyway, because if a binding exists in an outer scope, // the associated variable has to be marked as potentially being accessed // from inside of an inner with scope (the property may not be in the 'with' // object). + if (var != NULL && proxy->is_assigned()) var->set_maybe_assigned(); *binding_kind = DYNAMIC_LOOKUP; return NULL; } else if (calls_sloppy_eval()) { @@ -1027,7 +1024,7 @@ bool Scope::ResolveVariable(CompilationInfo* info, VariableProxy* proxy, AstNodeFactory<AstNullVisitor>* factory) { - ASSERT(info->global_scope()->is_global_scope()); + DCHECK(info->global_scope()->is_global_scope()); // If the proxy is already resolved there's nothing to do // (functions and consts may be resolved by the parser). @@ -1035,7 +1032,7 @@ // Otherwise, try to resolve the variable. BindingKind binding_kind; - Variable* var = LookupRecursive(proxy->name(), &binding_kind, factory); + Variable* var = LookupRecursive(proxy, &binding_kind, factory); switch (binding_kind) { case BOUND: // We found a variable binding. @@ -1047,36 +1044,37 @@ // scope which was not promoted to a context, this can happen if we use // debugger to evaluate arbitrary expressions at a break point). if (var->IsGlobalObjectProperty()) { - var = NonLocal(proxy->name(), DYNAMIC_GLOBAL); + var = NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL); } else if (var->is_dynamic()) { - var = NonLocal(proxy->name(), DYNAMIC); + var = NonLocal(proxy->raw_name(), DYNAMIC); } else { Variable* invalidated = var; - var = NonLocal(proxy->name(), DYNAMIC_LOCAL); + var = NonLocal(proxy->raw_name(), DYNAMIC_LOCAL); var->set_local_if_not_shadowed(invalidated); } break; case UNBOUND: // No binding has been found. Declare a variable on the global object. - var = info->global_scope()->DeclareDynamicGlobal(proxy->name()); + var = info->global_scope()->DeclareDynamicGlobal(proxy->raw_name()); break; case UNBOUND_EVAL_SHADOWED: // No binding has been found. But some scope makes a sloppy 'eval' call. - var = NonLocal(proxy->name(), DYNAMIC_GLOBAL); + var = NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL); break; case DYNAMIC_LOOKUP: // The variable could not be resolved statically. - var = NonLocal(proxy->name(), DYNAMIC); + var = NonLocal(proxy->raw_name(), DYNAMIC); break; } - ASSERT(var != NULL); + DCHECK(var != NULL); + if (proxy->is_assigned()) var->set_maybe_assigned(); if (FLAG_harmony_scoping && strict_mode() == STRICT && - var->is_const_mode() && proxy->IsLValue()) { + var->is_const_mode() && proxy->is_assigned()) { // Assignment to const. Throw a syntax error. MessageLocation location( info->script(), proxy->position(), proxy->position()); @@ -1092,8 +1090,10 @@ if (FLAG_harmony_modules) { bool ok; #ifdef DEBUG - if (FLAG_print_interface_details) - PrintF("# Resolve %s:\n", var->name()->ToAsciiArray()); + if (FLAG_print_interface_details) { + PrintF("# Resolve %.*s:\n", var->raw_name()->length(), + var->raw_name()->raw_data()); + } #endif proxy->interface()->Unify(var->interface(), zone(), &ok); if (!ok) { @@ -1114,7 +1114,7 @@ Isolate* isolate = info->isolate(); Factory* factory = isolate->factory(); Handle<JSArray> array = factory->NewJSArray(1); - USE(JSObject::SetElement(array, 0, var->name(), NONE, STRICT)); + JSObject::SetElement(array, 0, var->name(), NONE, STRICT).Assert(); Handle<Object> result = factory->NewSyntaxError("module_type_error", array); isolate->Throw(*result, &location); @@ -1131,7 +1131,7 @@ bool Scope::ResolveVariablesRecursively( CompilationInfo* info, AstNodeFactory<AstNullVisitor>* factory) { - ASSERT(info->global_scope()->is_global_scope()); + DCHECK(info->global_scope()->is_global_scope()); // Resolve unresolved variables for this scope. for (int i = 0; i < unresolved_.length(); i++) { @@ -1148,7 +1148,7 @@ } -bool Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) { +void Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) { if (outer_scope_calls_sloppy_eval) { outer_scope_calls_sloppy_eval_ = true; } @@ -1156,16 +1156,15 @@ bool calls_sloppy_eval = this->calls_sloppy_eval() || outer_scope_calls_sloppy_eval_; for (int i = 0; i < inner_scopes_.length(); i++) { - Scope* inner_scope = inner_scopes_[i]; - if (inner_scope->PropagateScopeInfo(calls_sloppy_eval)) { + Scope* inner = inner_scopes_[i]; + inner->PropagateScopeInfo(calls_sloppy_eval); + if (inner->scope_calls_eval_ || inner->inner_scope_calls_eval_) { inner_scope_calls_eval_ = true; } - if (inner_scope->force_eager_compilation_) { + if (inner->force_eager_compilation_) { force_eager_compilation_ = true; } } - - return scope_calls_eval_ || inner_scope_calls_eval_; } @@ -1173,7 +1172,7 @@ // Give var a read/write use if there is a chance it might be accessed // via an eval() call. This is only possible if the variable has a // visible name. - if ((var->is_this() || var->name()->length() > 0) && + if ((var->is_this() || !var->raw_name()->IsEmpty()) && (var->has_forced_context_allocation() || scope_calls_eval_ || inner_scope_calls_eval_ || @@ -1182,7 +1181,8 @@ is_block_scope() || is_module_scope() || is_global_scope())) { - var->set_is_used(true); + var->set_is_used(); + if (scope_calls_eval_ || inner_scope_calls_eval_) var->set_maybe_assigned(); } // Global variables do not need to be allocated. return !var->IsGlobalObjectProperty() && var->is_used(); @@ -1233,9 +1233,9 @@ void Scope::AllocateParameterLocals() { - ASSERT(is_function_scope()); - Variable* arguments = LocalLookup(isolate_->factory()->arguments_string()); - ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly + DCHECK(is_function_scope()); + Variable* arguments = LookupLocal(ast_value_factory_->arguments_string()); + DCHECK(arguments != NULL); // functions have 'arguments' declared implicitly bool uses_sloppy_arguments = false; @@ -1265,20 +1265,20 @@ // order is relevant! for (int i = params_.length() - 1; i >= 0; --i) { Variable* var = params_[i]; - ASSERT(var->scope() == this); - if (uses_sloppy_arguments) { + DCHECK(var->scope() == this); + if (uses_sloppy_arguments || has_forced_context_allocation()) { // Force context allocation of the parameter. var->ForceContextAllocation(); } if (MustAllocate(var)) { if (MustAllocateInContext(var)) { - ASSERT(var->IsUnallocated() || var->IsContextSlot()); + DCHECK(var->IsUnallocated() || var->IsContextSlot()); if (var->IsUnallocated()) { AllocateHeapSlot(var); } } else { - ASSERT(var->IsUnallocated() || var->IsParameter()); + DCHECK(var->IsUnallocated() || var->IsParameter()); if (var->IsUnallocated()) { var->AllocateTo(Variable::PARAMETER, i); } @@ -1289,8 +1289,8 @@ void Scope::AllocateNonParameterLocal(Variable* var) { - ASSERT(var->scope() == this); - ASSERT(!var->IsVariable(isolate_->factory()->dot_result_string()) || + DCHECK(var->scope() == this); + DCHECK(!var->IsVariable(isolate_->factory()->dot_result_string()) || !var->IsStackLocal()); if (var->IsUnallocated() && MustAllocate(var)) { if (MustAllocateInContext(var)) { @@ -1367,18 +1367,17 @@ } // Allocation done. - ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS); + DCHECK(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS); } void Scope::AllocateModulesRecursively(Scope* host_scope) { if (already_resolved()) return; if (is_module_scope()) { - ASSERT(interface_->IsFrozen()); - Handle<String> name = isolate_->factory()->InternalizeOneByteString( - STATIC_ASCII_VECTOR(".module")); - ASSERT(module_var_ == NULL); - module_var_ = host_scope->NewInternal(name); + DCHECK(interface_->IsFrozen()); + DCHECK(module_var_ == NULL); + module_var_ = + host_scope->NewInternal(ast_value_factory_->dot_module_string()); ++host_scope->num_modules_; } diff -Nru nodejs-0.11.13/deps/v8/src/scopes.h nodejs-0.11.15/deps/v8/src/scopes.h --- nodejs-0.11.13/deps/v8/src/scopes.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/scopes.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SCOPES_H_ #define V8_SCOPES_H_ -#include "ast.h" -#include "zone.h" +#include "src/ast.h" +#include "src/zone.h" namespace v8 { namespace internal { @@ -44,15 +21,13 @@ virtual ~VariableMap(); - Variable* Declare(Scope* scope, - Handle<String> name, - VariableMode mode, - bool is_valid_lhs, - Variable::Kind kind, + Variable* Declare(Scope* scope, const AstRawString* name, VariableMode mode, + bool is_valid_lhs, Variable::Kind kind, InitializationFlag initialization_flag, + MaybeAssignedFlag maybe_assigned_flag = kNotAssigned, Interface* interface = Interface::NewValue()); - Variable* Lookup(Handle<String> name); + Variable* Lookup(const AstRawString* name); Zone* zone() const { return zone_; } @@ -74,7 +49,7 @@ VariableMap* GetMap(VariableMode mode) { int index = mode - DYNAMIC; - ASSERT(index >= 0 && index < 3); + DCHECK(index >= 0 && index < 3); return maps_[index]; } @@ -97,7 +72,8 @@ // --------------------------------------------------------------------------- // Construction - Scope(Scope* outer_scope, ScopeType scope_type, Zone* zone); + Scope(Scope* outer_scope, ScopeType scope_type, + AstValueFactory* value_factory, Zone* zone); // Compute top scope and allocate variables. For lazy compilation the top // scope only contains the single lazily compiled function, so this @@ -108,7 +84,9 @@ Zone* zone); // The scope name is only used for printing/debugging. - void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; } + void SetScopeName(const AstRawString* scope_name) { + scope_name_ = scope_name; + } void Initialize(); @@ -123,55 +101,55 @@ // Declarations // Lookup a variable in this scope. Returns the variable or NULL if not found. - Variable* LocalLookup(Handle<String> name); + Variable* LookupLocal(const AstRawString* name); // This lookup corresponds to a lookup in the "intermediate" scope sitting // between this scope and the outer scope. (ECMA-262, 3rd., requires that // the name of named function literal is kept in an intermediate scope // in between this scope and the next outer scope.) - Variable* LookupFunctionVar(Handle<String> name, + Variable* LookupFunctionVar(const AstRawString* name, AstNodeFactory<AstNullVisitor>* factory); // Lookup a variable in this scope or outer scopes. // Returns the variable or NULL if not found. - Variable* Lookup(Handle<String> name); + Variable* Lookup(const AstRawString* name); // Declare the function variable for a function literal. This variable // is in an intermediate scope between this function scope and the the // outer scope. Only possible for function scopes; at most one variable. void DeclareFunctionVar(VariableDeclaration* declaration) { - ASSERT(is_function_scope()); + DCHECK(is_function_scope()); function_ = declaration; } // Declare a parameter in this scope. When there are duplicated // parameters the rightmost one 'wins'. However, the implementation // expects all parameters to be declared and from left to right. - void DeclareParameter(Handle<String> name, VariableMode mode); + Variable* DeclareParameter(const AstRawString* name, VariableMode mode); // Declare a local variable in this scope. If the variable has been // declared before, the previously declared variable is returned. - Variable* DeclareLocal(Handle<String> name, - VariableMode mode, + Variable* DeclareLocal(const AstRawString* name, VariableMode mode, InitializationFlag init_flag, + MaybeAssignedFlag maybe_assigned_flag = kNotAssigned, Interface* interface = Interface::NewValue()); // Declare an implicit global variable in this scope which must be a // global scope. The variable was introduced (possibly from an inner // scope) by a reference to an unresolved variable with no intervening // with statements or eval calls. - Variable* DeclareDynamicGlobal(Handle<String> name); + Variable* DeclareDynamicGlobal(const AstRawString* name); // Create a new unresolved variable. template<class Visitor> VariableProxy* NewUnresolved(AstNodeFactory<Visitor>* factory, - Handle<String> name, + const AstRawString* name, Interface* interface = Interface::NewValue(), int position = RelocInfo::kNoPosition) { // Note that we must not share the unresolved variables with // the same name because they may be removed selectively via // RemoveUnresolved(). - ASSERT(!already_resolved()); + DCHECK(!already_resolved()); VariableProxy* proxy = factory->NewVariableProxy(name, false, interface, position); unresolved_.Add(proxy, zone_); @@ -190,13 +168,13 @@ // for printing and cannot be used to find the variable. In particular, // the only way to get hold of the temporary is by keeping the Variable* // around. - Variable* NewInternal(Handle<String> name); + Variable* NewInternal(const AstRawString* name); // Creates a new temporary variable in this scope. The name is only used // for printing and cannot be used to find the variable. In particular, // the only way to get hold of the temporary is by keeping the Variable* // around. The name should not clash with a legitimate variable names. - Variable* NewTemporary(Handle<String> name); + Variable* NewTemporary(const AstRawString* name); // Adds the specific declaration node to the list of declarations in // this scope. The declarations are processed as part of entering @@ -269,7 +247,7 @@ // In some cases we want to force context allocation for a whole scope. void ForceContextAllocation() { - ASSERT(!already_resolved()); + DCHECK(!already_resolved()); force_context_allocation_ = true; } bool has_forced_context_allocation() const { @@ -324,14 +302,14 @@ // The variable holding the function literal for named function // literals, or NULL. Only valid for function scopes. VariableDeclaration* function() const { - ASSERT(is_function_scope()); + DCHECK(is_function_scope()); return function_; } // Parameters. The left-most parameter has index 0. // Only valid for function scopes. Variable* parameter(int index) const { - ASSERT(is_function_scope()); + DCHECK(is_function_scope()); return params_[index]; } @@ -413,7 +391,7 @@ // --------------------------------------------------------------------------- // Strict mode support. - bool IsDeclared(Handle<String> name) { + bool IsDeclared(const AstRawString* name) { // During formal parameter list parsing the scope only contains // two variables inserted at initialization: "this" and "arguments". // "this" is an invalid parameter name and "arguments" is invalid parameter @@ -444,7 +422,7 @@ ScopeType scope_type_; // Debugging support. - Handle<String> scope_name_; + const AstRawString* scope_name_; // The variables declared in this scope: // @@ -520,7 +498,7 @@ // Create a non-local variable with a given name. // These variables are looked up dynamically at runtime. - Variable* NonLocal(Handle<String> name, VariableMode mode); + Variable* NonLocal(const AstRawString* name, VariableMode mode); // Variable resolution. // Possible results of a recursive variable lookup telling if and how a @@ -571,7 +549,7 @@ // Lookup a variable reference given by name recursively starting with this // scope. If the code is executed because of a call to 'eval', the context // parameter should be set to the calling context of 'eval'. - Variable* LookupRecursive(Handle<String> name, + Variable* LookupRecursive(VariableProxy* proxy, BindingKind* binding_kind, AstNodeFactory<AstNullVisitor>* factory); MUST_USE_RESULT @@ -583,7 +561,7 @@ AstNodeFactory<AstNullVisitor>* factory); // Scope analysis. - bool PropagateScopeInfo(bool outer_scope_calls_sloppy_eval); + void PropagateScopeInfo(bool outer_scope_calls_sloppy_eval); bool HasTrivialContext() const; // Predicates. @@ -615,10 +593,12 @@ private: // Construct a scope based on the scope info. Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info, - Zone* zone); + AstValueFactory* value_factory, Zone* zone); // Construct a catch scope with a binding for the name. - Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone); + Scope(Scope* inner_scope, + const AstRawString* catch_variable_name, + AstValueFactory* value_factory, Zone* zone); void AddInnerScope(Scope* inner_scope) { if (inner_scope != NULL) { @@ -631,6 +611,7 @@ Scope* outer_scope, Handle<ScopeInfo> scope_info); + AstValueFactory* ast_value_factory_; Zone* zone_; }; diff -Nru nodejs-0.11.13/deps/v8/src/serialize.cc nodejs-0.11.15/deps/v8/src/serialize.cc --- nodejs-0.11.13/deps/v8/src/serialize.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/serialize.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,46 +1,26 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "accessors.h" -#include "api.h" -#include "bootstrapper.h" -#include "deoptimizer.h" -#include "execution.h" -#include "global-handles.h" -#include "ic-inl.h" -#include "natives.h" -#include "platform.h" -#include "runtime.h" -#include "serialize.h" -#include "snapshot.h" -#include "stub-cache.h" -#include "v8threads.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/accessors.h" +#include "src/api.h" +#include "src/base/platform/platform.h" +#include "src/bootstrapper.h" +#include "src/deoptimizer.h" +#include "src/execution.h" +#include "src/global-handles.h" +#include "src/ic-inl.h" +#include "src/natives.h" +#include "src/objects.h" +#include "src/runtime.h" +#include "src/serialize.h" +#include "src/snapshot.h" +#include "src/snapshot-source-sink.h" +#include "src/stub-cache.h" +#include "src/v8threads.h" +#include "src/version.h" namespace v8 { namespace internal { @@ -114,12 +94,14 @@ TypeCode type, uint16_t id, const char* name) { - ASSERT_NE(NULL, address); + DCHECK_NE(NULL, address); ExternalReferenceEntry entry; entry.address = address; entry.code = EncodeExternal(type, id); entry.name = name; - ASSERT_NE(0, entry.code); + DCHECK_NE(0, entry.code); + // Assert that the code is added in ascending order to rule out duplicates. + DCHECK((size() == 0) || (code(size() - 1) < entry.code)); refs_.Add(entry); if (id > max_id_[type]) max_id_[type] = id; } @@ -130,6 +112,144 @@ max_id_[type_code] = 0; } + // Miscellaneous + Add(ExternalReference::roots_array_start(isolate).address(), + "Heap::roots_array_start()"); + Add(ExternalReference::address_of_stack_limit(isolate).address(), + "StackGuard::address_of_jslimit()"); + Add(ExternalReference::address_of_real_stack_limit(isolate).address(), + "StackGuard::address_of_real_jslimit()"); + Add(ExternalReference::new_space_start(isolate).address(), + "Heap::NewSpaceStart()"); + Add(ExternalReference::new_space_mask(isolate).address(), + "Heap::NewSpaceMask()"); + Add(ExternalReference::new_space_allocation_limit_address(isolate).address(), + "Heap::NewSpaceAllocationLimitAddress()"); + Add(ExternalReference::new_space_allocation_top_address(isolate).address(), + "Heap::NewSpaceAllocationTopAddress()"); + Add(ExternalReference::debug_break(isolate).address(), "Debug::Break()"); + Add(ExternalReference::debug_step_in_fp_address(isolate).address(), + "Debug::step_in_fp_addr()"); + Add(ExternalReference::mod_two_doubles_operation(isolate).address(), + "mod_two_doubles"); + // Keyed lookup cache. + Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(), + "KeyedLookupCache::keys()"); + Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(), + "KeyedLookupCache::field_offsets()"); + Add(ExternalReference::handle_scope_next_address(isolate).address(), + "HandleScope::next"); + Add(ExternalReference::handle_scope_limit_address(isolate).address(), + "HandleScope::limit"); + Add(ExternalReference::handle_scope_level_address(isolate).address(), + "HandleScope::level"); + Add(ExternalReference::new_deoptimizer_function(isolate).address(), + "Deoptimizer::New()"); + Add(ExternalReference::compute_output_frames_function(isolate).address(), + "Deoptimizer::ComputeOutputFrames()"); + Add(ExternalReference::address_of_min_int().address(), + "LDoubleConstant::min_int"); + Add(ExternalReference::address_of_one_half().address(), + "LDoubleConstant::one_half"); + Add(ExternalReference::isolate_address(isolate).address(), "isolate"); + Add(ExternalReference::address_of_negative_infinity().address(), + "LDoubleConstant::negative_infinity"); + Add(ExternalReference::power_double_double_function(isolate).address(), + "power_double_double_function"); + Add(ExternalReference::power_double_int_function(isolate).address(), + "power_double_int_function"); + Add(ExternalReference::math_log_double_function(isolate).address(), + "std::log"); + Add(ExternalReference::store_buffer_top(isolate).address(), + "store_buffer_top"); + Add(ExternalReference::address_of_canonical_non_hole_nan().address(), + "canonical_nan"); + Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan"); + Add(ExternalReference::get_date_field_function(isolate).address(), + "JSDate::GetField"); + Add(ExternalReference::date_cache_stamp(isolate).address(), + "date_cache_stamp"); + Add(ExternalReference::address_of_pending_message_obj(isolate).address(), + "address_of_pending_message_obj"); + Add(ExternalReference::address_of_has_pending_message(isolate).address(), + "address_of_has_pending_message"); + Add(ExternalReference::address_of_pending_message_script(isolate).address(), + "pending_message_script"); + Add(ExternalReference::get_make_code_young_function(isolate).address(), + "Code::MakeCodeYoung"); + Add(ExternalReference::cpu_features().address(), "cpu_features"); + Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(), + "Runtime::AllocateInNewSpace"); + Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(), + "Runtime::AllocateInTargetSpace"); + Add(ExternalReference::old_pointer_space_allocation_top_address(isolate) + .address(), + "Heap::OldPointerSpaceAllocationTopAddress"); + Add(ExternalReference::old_pointer_space_allocation_limit_address(isolate) + .address(), + "Heap::OldPointerSpaceAllocationLimitAddress"); + Add(ExternalReference::old_data_space_allocation_top_address(isolate) + .address(), + "Heap::OldDataSpaceAllocationTopAddress"); + Add(ExternalReference::old_data_space_allocation_limit_address(isolate) + .address(), + "Heap::OldDataSpaceAllocationLimitAddress"); + Add(ExternalReference::allocation_sites_list_address(isolate).address(), + "Heap::allocation_sites_list_address()"); + Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias"); + Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(), + "Code::MarkCodeAsExecuted"); + Add(ExternalReference::is_profiling_address(isolate).address(), + "CpuProfiler::is_profiling"); + Add(ExternalReference::scheduled_exception_address(isolate).address(), + "Isolate::scheduled_exception"); + Add(ExternalReference::invoke_function_callback(isolate).address(), + "InvokeFunctionCallback"); + Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(), + "InvokeAccessorGetterCallback"); + Add(ExternalReference::flush_icache_function(isolate).address(), + "CpuFeatures::FlushICache"); + Add(ExternalReference::log_enter_external_function(isolate).address(), + "Logger::EnterExternal"); + Add(ExternalReference::log_leave_external_function(isolate).address(), + "Logger::LeaveExternal"); + Add(ExternalReference::address_of_minus_one_half().address(), + "double_constants.minus_one_half"); + Add(ExternalReference::stress_deopt_count(isolate).address(), + "Isolate::stress_deopt_count_address()"); + Add(ExternalReference::incremental_marking_record_write_function(isolate) + .address(), + "IncrementalMarking::RecordWriteFromCode"); + + // Debug addresses + Add(ExternalReference::debug_after_break_target_address(isolate).address(), + "Debug::after_break_target_address()"); + Add(ExternalReference::debug_restarter_frame_function_pointer_address(isolate) + .address(), + "Debug::restarter_frame_function_pointer_address()"); + Add(ExternalReference::debug_is_active_address(isolate).address(), + "Debug::is_active_address()"); + +#ifndef V8_INTERPRETED_REGEXP + Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(), + "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); + Add(ExternalReference::re_check_stack_guard_state(isolate).address(), + "RegExpMacroAssembler*::CheckStackGuardState()"); + Add(ExternalReference::re_grow_stack(isolate).address(), + "NativeRegExpMacroAssembler::GrowStack()"); + Add(ExternalReference::re_word_character_map().address(), + "NativeRegExpMacroAssembler::word_character_map"); + Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(), + "RegExpStack::limit_address()"); + Add(ExternalReference::address_of_regexp_stack_memory_address(isolate) + .address(), + "RegExpStack::memory_address()"); + Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(), + "RegExpStack::memory_size()"); + Add(ExternalReference::address_of_static_offsets_vector(isolate).address(), + "OffsetsVector::static_offsets_vector"); +#endif // V8_INTERPRETED_REGEXP + // The following populates all of the different type of external references // into the ExternalReferenceTable. // @@ -173,16 +293,9 @@ "Runtime::" #name }, RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY) + INLINE_OPTIMIZED_FUNCTION_LIST(RUNTIME_ENTRY) #undef RUNTIME_ENTRY -#define RUNTIME_HIDDEN_ENTRY(name, nargs, ressize) \ - { RUNTIME_FUNCTION, \ - Runtime::kHidden##name, \ - "Runtime::Hidden" #name }, - - RUNTIME_HIDDEN_FUNCTION_LIST(RUNTIME_HIDDEN_ENTRY) -#undef RUNTIME_HIDDEN_ENTRY - #define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize) \ { RUNTIME_FUNCTION, \ Runtime::kInlineOptimized##name, \ @@ -208,26 +321,6 @@ isolate); } -#ifdef ENABLE_DEBUGGER_SUPPORT - // Debug addresses - Add(Debug_Address(Debug::k_after_break_target_address).address(isolate), - DEBUG_ADDRESS, - Debug::k_after_break_target_address << kDebugIdShift, - "Debug::after_break_target_address()"); - Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate), - DEBUG_ADDRESS, - Debug::k_debug_break_slot_address << kDebugIdShift, - "Debug::debug_break_slot_address()"); - Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate), - DEBUG_ADDRESS, - Debug::k_debug_break_return_address << kDebugIdShift, - "Debug::debug_break_return_address()"); - Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate), - DEBUG_ADDRESS, - Debug::k_restarter_frame_function_pointer << kDebugIdShift, - "Debug::restarter_frame_function_pointer_address()"); -#endif - // Stat counters struct StatsRefTableEntry { StatsCounter* (Counters::*counter)(); @@ -271,294 +364,42 @@ } // Accessors -#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \ - Add((Address)&Accessors::name, \ +#define ACCESSOR_INFO_DECLARATION(name) \ + Add(FUNCTION_ADDR(&Accessors::name##Getter), \ ACCESSOR, \ - Accessors::k##name, \ - "Accessors::" #name); - - ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION) -#undef ACCESSOR_DESCRIPTOR_DECLARATION + Accessors::k##name##Getter, \ + "Accessors::" #name "Getter"); \ + Add(FUNCTION_ADDR(&Accessors::name##Setter), \ + ACCESSOR, \ + Accessors::k##name##Setter, \ + "Accessors::" #name "Setter"); + ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION) +#undef ACCESSOR_INFO_DECLARATION StubCache* stub_cache = isolate->stub_cache(); // Stub cache tables Add(stub_cache->key_reference(StubCache::kPrimary).address(), - STUB_CACHE_TABLE, - 1, - "StubCache::primary_->key"); + STUB_CACHE_TABLE, 1, "StubCache::primary_->key"); Add(stub_cache->value_reference(StubCache::kPrimary).address(), - STUB_CACHE_TABLE, - 2, - "StubCache::primary_->value"); + STUB_CACHE_TABLE, 2, "StubCache::primary_->value"); Add(stub_cache->map_reference(StubCache::kPrimary).address(), - STUB_CACHE_TABLE, - 3, - "StubCache::primary_->map"); + STUB_CACHE_TABLE, 3, "StubCache::primary_->map"); Add(stub_cache->key_reference(StubCache::kSecondary).address(), - STUB_CACHE_TABLE, - 4, - "StubCache::secondary_->key"); + STUB_CACHE_TABLE, 4, "StubCache::secondary_->key"); Add(stub_cache->value_reference(StubCache::kSecondary).address(), - STUB_CACHE_TABLE, - 5, - "StubCache::secondary_->value"); + STUB_CACHE_TABLE, 5, "StubCache::secondary_->value"); Add(stub_cache->map_reference(StubCache::kSecondary).address(), - STUB_CACHE_TABLE, - 6, - "StubCache::secondary_->map"); + STUB_CACHE_TABLE, 6, "StubCache::secondary_->map"); // Runtime entries - Add(ExternalReference::perform_gc_function(isolate).address(), - RUNTIME_ENTRY, - 1, - "Runtime::PerformGC"); - // Runtime entries - Add(ExternalReference::out_of_memory_function(isolate).address(), - RUNTIME_ENTRY, - 2, - "Runtime::OutOfMemory"); Add(ExternalReference::delete_handle_scope_extensions(isolate).address(), - RUNTIME_ENTRY, - 4, - "HandleScope::DeleteExtensions"); - Add(ExternalReference:: - incremental_marking_record_write_function(isolate).address(), - RUNTIME_ENTRY, - 5, - "IncrementalMarking::RecordWrite"); + RUNTIME_ENTRY, 1, "HandleScope::DeleteExtensions"); + Add(ExternalReference::incremental_marking_record_write_function(isolate) + .address(), + RUNTIME_ENTRY, 2, "IncrementalMarking::RecordWrite"); Add(ExternalReference::store_buffer_overflow_function(isolate).address(), - RUNTIME_ENTRY, - 6, - "StoreBuffer::StoreBufferOverflow"); - - // Miscellaneous - Add(ExternalReference::roots_array_start(isolate).address(), - UNCLASSIFIED, - 3, - "Heap::roots_array_start()"); - Add(ExternalReference::address_of_stack_limit(isolate).address(), - UNCLASSIFIED, - 4, - "StackGuard::address_of_jslimit()"); - Add(ExternalReference::address_of_real_stack_limit(isolate).address(), - UNCLASSIFIED, - 5, - "StackGuard::address_of_real_jslimit()"); -#ifndef V8_INTERPRETED_REGEXP - Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(), - UNCLASSIFIED, - 6, - "RegExpStack::limit_address()"); - Add(ExternalReference::address_of_regexp_stack_memory_address( - isolate).address(), - UNCLASSIFIED, - 7, - "RegExpStack::memory_address()"); - Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(), - UNCLASSIFIED, - 8, - "RegExpStack::memory_size()"); - Add(ExternalReference::address_of_static_offsets_vector(isolate).address(), - UNCLASSIFIED, - 9, - "OffsetsVector::static_offsets_vector"); -#endif // V8_INTERPRETED_REGEXP - Add(ExternalReference::new_space_start(isolate).address(), - UNCLASSIFIED, - 10, - "Heap::NewSpaceStart()"); - Add(ExternalReference::new_space_mask(isolate).address(), - UNCLASSIFIED, - 11, - "Heap::NewSpaceMask()"); - Add(ExternalReference::heap_always_allocate_scope_depth(isolate).address(), - UNCLASSIFIED, - 12, - "Heap::always_allocate_scope_depth()"); - Add(ExternalReference::new_space_allocation_limit_address(isolate).address(), - UNCLASSIFIED, - 14, - "Heap::NewSpaceAllocationLimitAddress()"); - Add(ExternalReference::new_space_allocation_top_address(isolate).address(), - UNCLASSIFIED, - 15, - "Heap::NewSpaceAllocationTopAddress()"); -#ifdef ENABLE_DEBUGGER_SUPPORT - Add(ExternalReference::debug_break(isolate).address(), - UNCLASSIFIED, - 16, - "Debug::Break()"); - Add(ExternalReference::debug_step_in_fp_address(isolate).address(), - UNCLASSIFIED, - 17, - "Debug::step_in_fp_addr()"); -#endif - Add(ExternalReference::mod_two_doubles_operation(isolate).address(), - UNCLASSIFIED, - 22, - "mod_two_doubles"); -#ifndef V8_INTERPRETED_REGEXP - Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(), - UNCLASSIFIED, - 24, - "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); - Add(ExternalReference::re_check_stack_guard_state(isolate).address(), - UNCLASSIFIED, - 25, - "RegExpMacroAssembler*::CheckStackGuardState()"); - Add(ExternalReference::re_grow_stack(isolate).address(), - UNCLASSIFIED, - 26, - "NativeRegExpMacroAssembler::GrowStack()"); - Add(ExternalReference::re_word_character_map().address(), - UNCLASSIFIED, - 27, - "NativeRegExpMacroAssembler::word_character_map"); -#endif // V8_INTERPRETED_REGEXP - // Keyed lookup cache. - Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(), - UNCLASSIFIED, - 28, - "KeyedLookupCache::keys()"); - Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(), - UNCLASSIFIED, - 29, - "KeyedLookupCache::field_offsets()"); - Add(ExternalReference::handle_scope_next_address(isolate).address(), - UNCLASSIFIED, - 31, - "HandleScope::next"); - Add(ExternalReference::handle_scope_limit_address(isolate).address(), - UNCLASSIFIED, - 32, - "HandleScope::limit"); - Add(ExternalReference::handle_scope_level_address(isolate).address(), - UNCLASSIFIED, - 33, - "HandleScope::level"); - Add(ExternalReference::new_deoptimizer_function(isolate).address(), - UNCLASSIFIED, - 34, - "Deoptimizer::New()"); - Add(ExternalReference::compute_output_frames_function(isolate).address(), - UNCLASSIFIED, - 35, - "Deoptimizer::ComputeOutputFrames()"); - Add(ExternalReference::address_of_min_int().address(), - UNCLASSIFIED, - 36, - "LDoubleConstant::min_int"); - Add(ExternalReference::address_of_one_half().address(), - UNCLASSIFIED, - 37, - "LDoubleConstant::one_half"); - Add(ExternalReference::isolate_address(isolate).address(), - UNCLASSIFIED, - 38, - "isolate"); - Add(ExternalReference::address_of_minus_zero().address(), - UNCLASSIFIED, - 39, - "LDoubleConstant::minus_zero"); - Add(ExternalReference::address_of_negative_infinity().address(), - UNCLASSIFIED, - 40, - "LDoubleConstant::negative_infinity"); - Add(ExternalReference::power_double_double_function(isolate).address(), - UNCLASSIFIED, - 41, - "power_double_double_function"); - Add(ExternalReference::power_double_int_function(isolate).address(), - UNCLASSIFIED, - 42, - "power_double_int_function"); - Add(ExternalReference::store_buffer_top(isolate).address(), - UNCLASSIFIED, - 43, - "store_buffer_top"); - Add(ExternalReference::address_of_canonical_non_hole_nan().address(), - UNCLASSIFIED, - 44, - "canonical_nan"); - Add(ExternalReference::address_of_the_hole_nan().address(), - UNCLASSIFIED, - 45, - "the_hole_nan"); - Add(ExternalReference::get_date_field_function(isolate).address(), - UNCLASSIFIED, - 46, - "JSDate::GetField"); - Add(ExternalReference::date_cache_stamp(isolate).address(), - UNCLASSIFIED, - 47, - "date_cache_stamp"); - Add(ExternalReference::address_of_pending_message_obj(isolate).address(), - UNCLASSIFIED, - 48, - "address_of_pending_message_obj"); - Add(ExternalReference::address_of_has_pending_message(isolate).address(), - UNCLASSIFIED, - 49, - "address_of_has_pending_message"); - Add(ExternalReference::address_of_pending_message_script(isolate).address(), - UNCLASSIFIED, - 50, - "pending_message_script"); - Add(ExternalReference::get_make_code_young_function(isolate).address(), - UNCLASSIFIED, - 51, - "Code::MakeCodeYoung"); - Add(ExternalReference::cpu_features().address(), - UNCLASSIFIED, - 52, - "cpu_features"); - Add(ExternalReference(Runtime::kHiddenAllocateInNewSpace, isolate).address(), - UNCLASSIFIED, - 53, - "Runtime::AllocateInNewSpace"); - Add(ExternalReference( - Runtime::kHiddenAllocateInTargetSpace, isolate).address(), - UNCLASSIFIED, - 54, - "Runtime::AllocateInTargetSpace"); - Add(ExternalReference::old_pointer_space_allocation_top_address( - isolate).address(), - UNCLASSIFIED, - 55, - "Heap::OldPointerSpaceAllocationTopAddress"); - Add(ExternalReference::old_pointer_space_allocation_limit_address( - isolate).address(), - UNCLASSIFIED, - 56, - "Heap::OldPointerSpaceAllocationLimitAddress"); - Add(ExternalReference::old_data_space_allocation_top_address( - isolate).address(), - UNCLASSIFIED, - 57, - "Heap::OldDataSpaceAllocationTopAddress"); - Add(ExternalReference::old_data_space_allocation_limit_address( - isolate).address(), - UNCLASSIFIED, - 58, - "Heap::OldDataSpaceAllocationLimitAddress"); - Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate). - address(), - UNCLASSIFIED, - 59, - "Heap::NewSpaceAllocationLimitAddress"); - Add(ExternalReference::allocation_sites_list_address(isolate).address(), - UNCLASSIFIED, - 60, - "Heap::allocation_sites_list_address()"); - Add(ExternalReference::address_of_uint32_bias().address(), - UNCLASSIFIED, - 61, - "uint32_bias"); - Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(), - UNCLASSIFIED, - 62, - "Code::MarkCodeAsExecuted"); + RUNTIME_ENTRY, 3, "StoreBuffer::StoreBufferOverflow"); // Add a small set of deopt entry addresses to encoder without generating the // deopt table code, which isn't possible at deserialization time. @@ -575,7 +416,7 @@ ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) - : encodings_(Match), + : encodings_(HashMap::PointersMatch), isolate_(isolate) { ExternalReferenceTable* external_references = ExternalReferenceTable::instance(isolate_); @@ -587,16 +428,16 @@ uint32_t ExternalReferenceEncoder::Encode(Address key) const { int index = IndexOf(key); - ASSERT(key == NULL || index >= 0); - return index >=0 ? + DCHECK(key == NULL || index >= 0); + return index >= 0 ? ExternalReferenceTable::instance(isolate_)->code(index) : 0; } const char* ExternalReferenceEncoder::NameOfAddress(Address key) const { int index = IndexOf(key); - return index >= 0 ? - ExternalReferenceTable::instance(isolate_)->name(index) : NULL; + return index >= 0 ? ExternalReferenceTable::instance(isolate_)->name(index) + : "<unknown>"; } @@ -639,10 +480,6 @@ } -bool Serializer::serialization_enabled_ = false; -bool Serializer::too_late_to_enable_now_ = false; - - class CodeAddressMap: public CodeEventLogger { public: explicit CodeAddressMap(Isolate* isolate) @@ -658,6 +495,9 @@ address_to_name_map_.Move(from, to); } + virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { + } + virtual void CodeDeleteEvent(Address from) { address_to_name_map_.Remove(from); } @@ -669,7 +509,7 @@ private: class NameMap { public: - NameMap() : impl_(&PointerEquals) {} + NameMap() : impl_(HashMap::PointersMatch) {} ~NameMap() { for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) { @@ -700,19 +540,15 @@ void Move(Address from, Address to) { if (from == to) return; HashMap::Entry* from_entry = FindEntry(from); - ASSERT(from_entry != NULL); + DCHECK(from_entry != NULL); void* value = from_entry->value; RemoveEntry(from_entry); HashMap::Entry* to_entry = FindOrCreateEntry(to); - ASSERT(to_entry->value == NULL); + DCHECK(to_entry->value == NULL); to_entry->value = value; } private: - static bool PointerEquals(void* lhs, void* rhs) { - return lhs == rhs; - } - static char* CopyName(const char* name, int name_size) { char* result = NewArray<char>(name_size + 1); for (int i = 0; i < name_size; ++i) { @@ -755,30 +591,9 @@ }; -CodeAddressMap* Serializer::code_address_map_ = NULL; - - -void Serializer::Enable(Isolate* isolate) { - if (!serialization_enabled_) { - ASSERT(!too_late_to_enable_now_); - } - if (serialization_enabled_) return; - serialization_enabled_ = true; - isolate->InitializeLoggingAndCounters(); - code_address_map_ = new CodeAddressMap(isolate); -} - - -void Serializer::Disable() { - if (!serialization_enabled_) return; - serialization_enabled_ = false; - delete code_address_map_; - code_address_map_ = NULL; -} - - Deserializer::Deserializer(SnapshotByteSource* source) : isolate_(NULL), + attached_objects_(NULL), source_(source), external_reference_decoder_(NULL) { for (int i = 0; i < LAST_SPACE + 1; i++) { @@ -791,20 +606,20 @@ PageIterator it(isolate_->heap()->code_space()); while (it.has_next()) { Page* p = it.next(); - CPU::FlushICache(p->area_start(), p->area_end() - p->area_start()); + CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start()); } } void Deserializer::Deserialize(Isolate* isolate) { isolate_ = isolate; - ASSERT(isolate_ != NULL); + DCHECK(isolate_ != NULL); isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]); // No active threads. - ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); + DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); // No active handles. - ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); - ASSERT_EQ(NULL, external_reference_decoder_); + DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty()); + DCHECK_EQ(NULL, external_reference_decoder_); external_reference_decoder_ = new ExternalReferenceDecoder(isolate); isolate_->heap()->IterateSmiRoots(this); isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); @@ -844,13 +659,15 @@ void Deserializer::DeserializePartial(Isolate* isolate, Object** root) { isolate_ = isolate; for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) { - ASSERT(reservations_[i] != kUninitializedReservation); + DCHECK(reservations_[i] != kUninitializedReservation); } isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]); if (external_reference_decoder_ == NULL) { external_reference_decoder_ = new ExternalReferenceDecoder(isolate); } + DisallowHeapAllocation no_gc; + // Keep track of the code space start and end pointers in case new // code objects were unserialized OldSpace* code_space = isolate_->heap()->code_space(); @@ -865,11 +682,13 @@ Deserializer::~Deserializer() { - ASSERT(source_->AtEOF()); + // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed. + // DCHECK(source_->AtEOF()); if (external_reference_decoder_) { delete external_reference_decoder_; external_reference_decoder_ = NULL; } + if (attached_objects_) attached_objects_->Dispose(); } @@ -892,6 +711,64 @@ } +// Used to insert a deserialized internalized string into the string table. +class StringTableInsertionKey : public HashTableKey { + public: + explicit StringTableInsertionKey(String* string) + : string_(string), hash_(HashForObject(string)) { + DCHECK(string->IsInternalizedString()); + } + + virtual bool IsMatch(Object* string) { + // We know that all entries in a hash table had their hash keys created. + // Use that knowledge to have fast failure. + if (hash_ != HashForObject(string)) return false; + // We want to compare the content of two internalized strings here. + return string_->SlowEquals(String::cast(string)); + } + + virtual uint32_t Hash() V8_OVERRIDE { return hash_; } + + virtual uint32_t HashForObject(Object* key) V8_OVERRIDE { + return String::cast(key)->Hash(); + } + + MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate) + V8_OVERRIDE { + return handle(string_, isolate); + } + + String* string_; + uint32_t hash_; +}; + + +HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) { + if (obj->IsString()) { + String* string = String::cast(obj); + // Uninitialize hash field as the hash seed may have changed. + string->set_hash_field(String::kEmptyHashField); + if (string->IsInternalizedString()) { + DisallowHeapAllocation no_gc; + HandleScope scope(isolate_); + StringTableInsertionKey key(string); + String* canonical = *StringTable::LookupKey(isolate_, &key); + string->SetForwardedInternalizedString(canonical); + return canonical; + } + } + return obj; +} + + +Object* Deserializer::ProcessBackRefInSerializedCode(Object* obj) { + if (obj->IsInternalizedString()) { + return String::cast(obj)->GetForwardedInternalizedString(); + } + return obj; +} + + // This routine writes the new object into the pointer provided and then // returns true if the new object was in young space and false otherwise. // The reason for this strange interface is that otherwise the object is @@ -902,7 +779,7 @@ int size = source_->GetInt() << kObjectAlignmentBits; Address address = Allocate(space_number, size); HeapObject* obj = HeapObject::FromAddress(address); - *write_back = obj; + isolate_->heap()->OnAllocationEvent(obj, size); Object** current = reinterpret_cast<Object**>(address); Object** limit = current + (size >> kPointerSizeLog2); if (FLAG_log_snapshot_positions) { @@ -913,13 +790,15 @@ // TODO(mvstanton): consider treating the heap()->allocation_sites_list() // as a (weak) root. If this root is relocated correctly, // RelinkAllocationSite() isn't necessary. - if (obj->IsAllocationSite()) { - RelinkAllocationSite(AllocationSite::cast(obj)); - } + if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj)); + + // Fix up strings from serialized user code. + if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj); + *write_back = obj; #ifdef DEBUG bool is_codespace = (space_number == CODE_SPACE); - ASSERT(obj->IsCode() == is_codespace); + DCHECK(obj->IsCode() == is_codespace); #endif } @@ -940,91 +819,107 @@ while (current < limit) { int data = source_->Get(); switch (data) { -#define CASE_STATEMENT(where, how, within, space_number) \ - case where + how + within + space_number: \ - ASSERT((where & ~kPointedToMask) == 0); \ - ASSERT((how & ~kHowToCodeMask) == 0); \ - ASSERT((within & ~kWhereToPointMask) == 0); \ - ASSERT((space_number & ~kSpaceMask) == 0); +#define CASE_STATEMENT(where, how, within, space_number) \ + case where + how + within + space_number: \ + STATIC_ASSERT((where & ~kPointedToMask) == 0); \ + STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \ + STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \ + STATIC_ASSERT((space_number & ~kSpaceMask) == 0); #define CASE_BODY(where, how, within, space_number_if_any) \ - { \ - bool emit_write_barrier = false; \ - bool current_was_incremented = false; \ - int space_number = space_number_if_any == kAnyOldSpace ? \ - (data & kSpaceMask) : space_number_if_any; \ - if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ - ReadObject(space_number, current); \ - emit_write_barrier = (space_number == NEW_SPACE); \ - } else { \ - Object* new_object = NULL; /* May not be a real Object pointer. */ \ - if (where == kNewObject) { \ - ReadObject(space_number, &new_object); \ - } else if (where == kRootArray) { \ - int root_id = source_->GetInt(); \ - new_object = isolate->heap()->roots_array_start()[root_id]; \ - emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ - } else if (where == kPartialSnapshotCache) { \ - int cache_index = source_->GetInt(); \ - new_object = isolate->serialize_partial_snapshot_cache() \ - [cache_index]; \ - emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ - } else if (where == kExternalReference) { \ - int skip = source_->GetInt(); \ - current = reinterpret_cast<Object**>(reinterpret_cast<Address>( \ - current) + skip); \ - int reference_id = source_->GetInt(); \ - Address address = external_reference_decoder_-> \ - Decode(reference_id); \ - new_object = reinterpret_cast<Object*>(address); \ - } else if (where == kBackref) { \ - emit_write_barrier = (space_number == NEW_SPACE); \ - new_object = GetAddressFromEnd(data & kSpaceMask); \ - } else { \ - ASSERT(where == kBackrefWithSkip); \ - int skip = source_->GetInt(); \ - current = reinterpret_cast<Object**>( \ - reinterpret_cast<Address>(current) + skip); \ - emit_write_barrier = (space_number == NEW_SPACE); \ - new_object = GetAddressFromEnd(data & kSpaceMask); \ - } \ - if (within == kInnerPointer) { \ - if (space_number != CODE_SPACE || new_object->IsCode()) { \ - Code* new_code_object = reinterpret_cast<Code*>(new_object); \ - new_object = reinterpret_cast<Object*>( \ - new_code_object->instruction_start()); \ - } else { \ - ASSERT(space_number == CODE_SPACE); \ - Cell* cell = Cell::cast(new_object); \ - new_object = reinterpret_cast<Object*>( \ - cell->ValueAddress()); \ - } \ - } \ - if (how == kFromCode) { \ - Address location_of_branch_data = \ - reinterpret_cast<Address>(current); \ - Assembler::deserialization_set_special_target_at( \ - location_of_branch_data, \ - Code::cast(HeapObject::FromAddress(current_object_address)), \ - reinterpret_cast<Address>(new_object)); \ - location_of_branch_data += Assembler::kSpecialTargetSize; \ - current = reinterpret_cast<Object**>(location_of_branch_data); \ - current_was_incremented = true; \ - } else { \ - *current = new_object; \ - } \ + { \ + bool emit_write_barrier = false; \ + bool current_was_incremented = false; \ + int space_number = space_number_if_any == kAnyOldSpace \ + ? (data & kSpaceMask) \ + : space_number_if_any; \ + if (where == kNewObject && how == kPlain && within == kStartOfObject) { \ + ReadObject(space_number, current); \ + emit_write_barrier = (space_number == NEW_SPACE); \ + } else { \ + Object* new_object = NULL; /* May not be a real Object pointer. */ \ + if (where == kNewObject) { \ + ReadObject(space_number, &new_object); \ + } else if (where == kRootArray) { \ + int root_id = source_->GetInt(); \ + new_object = isolate->heap()->roots_array_start()[root_id]; \ + emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ + } else if (where == kPartialSnapshotCache) { \ + int cache_index = source_->GetInt(); \ + new_object = isolate->serialize_partial_snapshot_cache()[cache_index]; \ + emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ + } else if (where == kExternalReference) { \ + int skip = source_->GetInt(); \ + current = reinterpret_cast<Object**>( \ + reinterpret_cast<Address>(current) + skip); \ + int reference_id = source_->GetInt(); \ + Address address = external_reference_decoder_->Decode(reference_id); \ + new_object = reinterpret_cast<Object*>(address); \ + } else if (where == kBackref) { \ + emit_write_barrier = (space_number == NEW_SPACE); \ + new_object = GetAddressFromEnd(data & kSpaceMask); \ + if (deserializing_user_code()) { \ + new_object = ProcessBackRefInSerializedCode(new_object); \ } \ - if (emit_write_barrier && write_barrier_needed) { \ - Address current_address = reinterpret_cast<Address>(current); \ - isolate->heap()->RecordWrite( \ - current_object_address, \ - static_cast<int>(current_address - current_object_address)); \ + } else if (where == kBuiltin) { \ + DCHECK(deserializing_user_code()); \ + int builtin_id = source_->GetInt(); \ + DCHECK_LE(0, builtin_id); \ + DCHECK_LT(builtin_id, Builtins::builtin_count); \ + Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \ + new_object = isolate->builtins()->builtin(name); \ + emit_write_barrier = false; \ + } else if (where == kAttachedReference) { \ + DCHECK(deserializing_user_code()); \ + int index = source_->GetInt(); \ + new_object = attached_objects_->at(index); \ + emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ + } else { \ + DCHECK(where == kBackrefWithSkip); \ + int skip = source_->GetInt(); \ + current = reinterpret_cast<Object**>( \ + reinterpret_cast<Address>(current) + skip); \ + emit_write_barrier = (space_number == NEW_SPACE); \ + new_object = GetAddressFromEnd(data & kSpaceMask); \ + if (deserializing_user_code()) { \ + new_object = ProcessBackRefInSerializedCode(new_object); \ } \ - if (!current_was_incremented) { \ - current++; \ + } \ + if (within == kInnerPointer) { \ + if (space_number != CODE_SPACE || new_object->IsCode()) { \ + Code* new_code_object = reinterpret_cast<Code*>(new_object); \ + new_object = \ + reinterpret_cast<Object*>(new_code_object->instruction_start()); \ + } else { \ + DCHECK(space_number == CODE_SPACE); \ + Cell* cell = Cell::cast(new_object); \ + new_object = reinterpret_cast<Object*>(cell->ValueAddress()); \ } \ - break; \ } \ + if (how == kFromCode) { \ + Address location_of_branch_data = reinterpret_cast<Address>(current); \ + Assembler::deserialization_set_special_target_at( \ + location_of_branch_data, \ + Code::cast(HeapObject::FromAddress(current_object_address)), \ + reinterpret_cast<Address>(new_object)); \ + location_of_branch_data += Assembler::kSpecialTargetSize; \ + current = reinterpret_cast<Object**>(location_of_branch_data); \ + current_was_incremented = true; \ + } else { \ + *current = new_object; \ + } \ + } \ + if (emit_write_barrier && write_barrier_needed) { \ + Address current_address = reinterpret_cast<Address>(current); \ + isolate->heap()->RecordWrite( \ + current_object_address, \ + static_cast<int>(current_address - current_object_address)); \ + } \ + if (!current_was_incremented) { \ + current++; \ + } \ + break; \ + } // This generates a case and a body for the new space (which has to do extra // write barrier handling) and handles the other spaces with 8 fall-through @@ -1111,7 +1006,7 @@ SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance + 16) { int root_id = RootArrayConstantFromByteCode(data); Object* object = isolate->heap()->roots_array_start()[root_id]; - ASSERT(!isolate->heap()->InNewSpace(object)); + DCHECK(!isolate->heap()->InNewSpace(object)); *current++ = object; break; } @@ -1123,7 +1018,7 @@ current = reinterpret_cast<Object**>( reinterpret_cast<intptr_t>(current) + skip); Object* object = isolate->heap()->roots_array_start()[root_id]; - ASSERT(!isolate->heap()->InNewSpace(object)); + DCHECK(!isolate->heap()->InNewSpace(object)); *current++ = object; break; } @@ -1131,7 +1026,7 @@ case kRepeat: { int repeats = source_->GetInt(); Object* object = current[-1]; - ASSERT(!isolate->heap()->InNewSpace(object)); + DCHECK(!isolate->heap()->InNewSpace(object)); for (int i = 0; i < repeats; i++) current[i] = object; current += repeats; break; @@ -1146,7 +1041,7 @@ FOUR_CASES(kConstantRepeat + 9) { int repeats = RepeatsForCode(data); Object* object = current[-1]; - ASSERT(!isolate->heap()->InNewSpace(object)); + DCHECK(!isolate->heap()->InNewSpace(object)); for (int i = 0; i < repeats; i++) current[i] = object; current += repeats; break; @@ -1167,7 +1062,8 @@ // allocation point and write a pointer to it to the current object. ALL_SPACES(kBackref, kPlain, kStartOfObject) ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject) -#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL +#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \ + defined(V8_TARGET_ARCH_MIPS64) // Deserialize a new object from pointer found in code and write // a pointer to it to the current object. Required only for MIPS or ARM // with ool constant pool, and omitted on the other architectures because @@ -1219,6 +1115,16 @@ kFromCode, kStartOfObject, 0) + // Find a builtin and write a pointer to it to the current object. + CASE_STATEMENT(kBuiltin, kPlain, kStartOfObject, 0) + CASE_BODY(kBuiltin, kPlain, kStartOfObject, 0) + // Find a builtin and write a pointer to it in the current code object. + CASE_STATEMENT(kBuiltin, kFromCode, kInnerPointer, 0) + CASE_BODY(kBuiltin, kFromCode, kInnerPointer, 0) + // Find an object in the attached references and write a pointer to it to + // the current object. + CASE_STATEMENT(kAttachedReference, kPlain, kStartOfObject, 0) + CASE_BODY(kAttachedReference, kPlain, kStartOfObject, 0) #undef CASE_STATEMENT #undef CASE_BODY @@ -1252,20 +1158,7 @@ UNREACHABLE(); } } - ASSERT_EQ(limit, current); -} - - -void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { - ASSERT(integer < 1 << 22); - integer <<= 2; - int bytes = 1; - if (integer > 0xff) bytes = 2; - if (integer > 0xffff) bytes = 3; - integer |= bytes; - Put(static_cast<int>(integer & 0xff), "IntPart1"); - if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2"); - if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3"); + DCHECK_EQ(limit, current); } @@ -1273,7 +1166,8 @@ : isolate_(isolate), sink_(sink), external_reference_encoder_(new ExternalReferenceEncoder(isolate)), - root_index_wave_front_(0) { + root_index_wave_front_(0), + code_address_map_(NULL) { // The serializer is meant to be used only to generate initial heap images // from a context in which there is only one isolate. for (int i = 0; i <= LAST_SPACE; i++) { @@ -1284,6 +1178,7 @@ Serializer::~Serializer() { delete external_reference_encoder_; + if (code_address_map_ != NULL) delete code_address_map_; } @@ -1349,7 +1244,7 @@ // deserialized objects. void SerializerDeserializer::Iterate(Isolate* isolate, ObjectVisitor* visitor) { - if (Serializer::enabled()) return; + if (isolate->serializer_enabled()) return; for (int i = 0; ; i++) { if (isolate->serialize_partial_snapshot_cache_length() <= i) { // Extend the array ready to get a value from the visitor when @@ -1385,7 +1280,7 @@ startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object)); // We don't recurse from the startup snapshot generator into the partial // snapshot generator. - ASSERT(length == isolate->serialize_partial_snapshot_cache_length() - 1); + DCHECK(length == isolate->serialize_partial_snapshot_cache_length() - 1); return length; } @@ -1396,7 +1291,8 @@ for (int i = 0; i < root_index_wave_front_; i++) { Object* root = heap->roots_array_start()[i]; if (!root->IsSmi() && root == heap_object) { -#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL +#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \ + defined(V8_TARGET_ARCH_MIPS64) if (from == kFromCode) { // In order to avoid code bloat in the deserializer we don't have // support for the encoding that specifies a particular root should @@ -1442,6 +1338,7 @@ int skip) { CHECK(o->IsHeapObject()); HeapObject* heap_object = HeapObject::cast(o); + DCHECK(!heap_object->IsJSFunction()); int root_index; if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { @@ -1526,7 +1423,7 @@ if (heap_object->IsMap()) { // The code-caches link to context-specific code objects, which // the startup and context serializes cannot currently handle. - ASSERT(Map::cast(heap_object)->code_cache() == + DCHECK(Map::cast(heap_object)->code_cache() == heap_object->GetHeap()->empty_fixed_array()); } @@ -1552,10 +1449,10 @@ // Pointers from the partial snapshot to the objects in the startup snapshot // should go through the root array or through the partial snapshot cache. // If this is not the case you may have to add something to the root array. - ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); + DCHECK(!startup_serializer_->address_mapper()->IsMapped(heap_object)); // All the internalized strings that the partial snapshot needs should be // either in the root table or in the partial snapshot cache. - ASSERT(!heap_object->IsInternalizedString()); + DCHECK(!heap_object->IsInternalizedString()); if (address_mapper_.IsMapped(heap_object)) { int space = SpaceOfObject(heap_object); @@ -1589,12 +1486,14 @@ "ObjectSerialization"); sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); - ASSERT(code_address_map_); - const char* code_name = code_address_map_->Lookup(object_->address()); - LOG(serializer_->isolate_, - CodeNameEvent(object_->address(), sink_->Position(), code_name)); - LOG(serializer_->isolate_, - SnapshotPositionEvent(object_->address(), sink_->Position())); + if (serializer_->code_address_map_) { + const char* code_name = + serializer_->code_address_map_->Lookup(object_->address()); + LOG(serializer_->isolate_, + CodeNameEvent(object_->address(), sink_->Position(), code_name)); + LOG(serializer_->isolate_, + SnapshotPositionEvent(object_->address(), sink_->Position())); + } // Mark this object as already serialized. int offset = serializer_->Allocate(space, size); @@ -1628,7 +1527,7 @@ root_index != kInvalidRootIndex && root_index < kRootArrayNumberOfConstantEncodings && current_contents == current[-1]) { - ASSERT(!serializer_->isolate()->heap()->InNewSpace(current_contents)); + DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents)); int repeat_count = 1; while (current < end - 1 && current[repeat_count] == current_contents) { repeat_count++; @@ -1757,7 +1656,7 @@ static Code* CloneCodeObject(HeapObject* code) { Address copy = new byte[code->Size()]; - OS::MemCopy(copy, code->address(), code->Size()); + MemCopy(copy, code->address(), code->Size()); return Code::cast(HeapObject::FromAddress(copy)); } @@ -1783,10 +1682,10 @@ int up_to_offset = static_cast<int>(up_to - object_start); int to_skip = up_to_offset - bytes_processed_so_far_; int bytes_to_output = to_skip; - bytes_processed_so_far_ += to_skip; + bytes_processed_so_far_ += to_skip; // This assert will fail if the reloc info gives us the target_address_address // locations in a non-ascending order. Luckily that doesn't happen. - ASSERT(to_skip >= 0); + DCHECK(to_skip >= 0); bool outputting_code = false; if (to_skip != 0 && code_object_ && !code_has_been_output_) { // Output the code all at once and fix later. @@ -1839,7 +1738,7 @@ for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { AllocationSpace s = static_cast<AllocationSpace>(i); if (object->GetHeap()->InSpace(object, s)) { - ASSERT(i < kNumberOfSpaces); + DCHECK(i < kNumberOfSpaces); return i; } } @@ -1874,12 +1773,182 @@ } -bool SnapshotByteSource::AtEOF() { - if (0u + length_ - position_ > 2 * sizeof(uint32_t)) return false; - for (int x = position_; x < length_; x++) { - if (data_[x] != SerializerDeserializer::nop()) return false; +void Serializer::InitializeCodeAddressMap() { + isolate_->InitializeLoggingAndCounters(); + code_address_map_ = new CodeAddressMap(isolate_); +} + + +ScriptData* CodeSerializer::Serialize(Isolate* isolate, + Handle<SharedFunctionInfo> info, + Handle<String> source) { + // Serialize code object. + List<byte> payload; + ListSnapshotSink list_sink(&payload); + CodeSerializer cs(isolate, &list_sink, *source); + DisallowHeapAllocation no_gc; + Object** location = Handle<Object>::cast(info).location(); + cs.VisitPointer(location); + cs.Pad(); + + SerializedCodeData data(&payload, &cs); + return data.GetScriptData(); +} + + +void CodeSerializer::SerializeObject(Object* o, HowToCode how_to_code, + WhereToPoint where_to_point, int skip) { + CHECK(o->IsHeapObject()); + HeapObject* heap_object = HeapObject::cast(o); + + // The code-caches link to context-specific code objects, which + // the startup and context serializes cannot currently handle. + DCHECK(!heap_object->IsMap() || + Map::cast(heap_object)->code_cache() == + heap_object->GetHeap()->empty_fixed_array()); + + int root_index; + if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { + PutRoot(root_index, heap_object, how_to_code, where_to_point, skip); + return; + } + + // TODO(yangguo) wire up stubs from stub cache. + // TODO(yangguo) wire up global object. + // TODO(yangguo) We cannot deal with different hash seeds yet. + DCHECK(!heap_object->IsHashTable()); + + if (address_mapper_.IsMapped(heap_object)) { + int space = SpaceOfObject(heap_object); + int address = address_mapper_.MappedTo(heap_object); + SerializeReferenceToPreviousObject(space, address, how_to_code, + where_to_point, skip); + return; } - return true; + + if (heap_object->IsCode()) { + Code* code_object = Code::cast(heap_object); + if (code_object->kind() == Code::BUILTIN) { + SerializeBuiltin(code_object, how_to_code, where_to_point, skip); + return; + } + // TODO(yangguo) figure out whether other code kinds can be handled smarter. + } + + if (heap_object == source_) { + SerializeSourceObject(how_to_code, where_to_point, skip); + return; + } + + if (heap_object->IsScript()) { + // The wrapper cache uses a Foreign object to point to a global handle. + // However, the object visitor expects foreign objects to point to external + // references. Clear the cache to avoid this issue. + Script::cast(heap_object)->ClearWrapperCache(); + } + + if (skip != 0) { + sink_->Put(kSkip, "SkipFromSerializeObject"); + sink_->PutInt(skip, "SkipDistanceFromSerializeObject"); + } + // Object has not yet been serialized. Serialize it here. + ObjectSerializer serializer(this, heap_object, sink_, how_to_code, + where_to_point); + serializer.Serialize(); +} + + +void CodeSerializer::SerializeBuiltin(Code* builtin, HowToCode how_to_code, + WhereToPoint where_to_point, int skip) { + if (skip != 0) { + sink_->Put(kSkip, "SkipFromSerializeBuiltin"); + sink_->PutInt(skip, "SkipDistanceFromSerializeBuiltin"); + } + + DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) || + (how_to_code == kFromCode && where_to_point == kInnerPointer)); + int builtin_index = builtin->builtin_index(); + DCHECK_LT(builtin_index, Builtins::builtin_count); + DCHECK_LE(0, builtin_index); + sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin"); + sink_->PutInt(builtin_index, "builtin_index"); +} + + +void CodeSerializer::SerializeSourceObject(HowToCode how_to_code, + WhereToPoint where_to_point, + int skip) { + if (skip != 0) { + sink_->Put(kSkip, "SkipFromSerializeSourceObject"); + sink_->PutInt(skip, "SkipDistanceFromSerializeSourceObject"); + } + + DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject); + sink_->Put(kAttachedReference + how_to_code + where_to_point, "Source"); + sink_->PutInt(kSourceObjectIndex, "kSourceObjectIndex"); +} + + +Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate, + ScriptData* data, + Handle<String> source) { + base::ElapsedTimer timer; + if (FLAG_profile_deserialization) timer.Start(); + SerializedCodeData scd(data, *source); + SnapshotByteSource payload(scd.Payload(), scd.PayloadLength()); + Deserializer deserializer(&payload); + STATIC_ASSERT(NEW_SPACE == 0); + for (int i = NEW_SPACE; i <= PROPERTY_CELL_SPACE; i++) { + deserializer.set_reservation(i, scd.GetReservation(i)); + } + + // Prepare and register list of attached objects. + Vector<Object*> attached_objects = Vector<Object*>::New(1); + attached_objects[kSourceObjectIndex] = *source; + deserializer.SetAttachedObjects(&attached_objects); + + Object* root; + deserializer.DeserializePartial(isolate, &root); + deserializer.FlushICacheForNewCodeObjects(); + if (FLAG_profile_deserialization) { + double ms = timer.Elapsed().InMillisecondsF(); + int length = data->length(); + PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms); + } + return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root), isolate); } + +SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs) + : owns_script_data_(true) { + DisallowHeapAllocation no_gc; + int data_length = payload->length() + kHeaderEntries * kIntSize; + byte* data = NewArray<byte>(data_length); + DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)); + CopyBytes(data + kHeaderEntries * kIntSize, payload->begin(), + static_cast<size_t>(payload->length())); + script_data_ = new ScriptData(data, data_length); + script_data_->AcquireDataOwnership(); + SetHeaderValue(kCheckSumOffset, CheckSum(cs->source())); + STATIC_ASSERT(NEW_SPACE == 0); + for (int i = NEW_SPACE; i <= PROPERTY_CELL_SPACE; i++) { + SetHeaderValue(kReservationsOffset + i, cs->CurrentAllocationAddress(i)); + } +} + + +bool SerializedCodeData::IsSane(String* source) { + return GetHeaderValue(kCheckSumOffset) == CheckSum(source) && + PayloadLength() >= SharedFunctionInfo::kSize; +} + + +int SerializedCodeData::CheckSum(String* string) { + int checksum = Version::Hash(); +#ifdef DEBUG + uint32_t seed = static_cast<uint32_t>(checksum); + checksum = static_cast<int>(IteratingStringHasher::Hash(string, seed)); +#endif // DEBUG + return checksum; +} } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/serialize.h nodejs-0.11.15/deps/v8/src/serialize.h --- nodejs-0.11.13/deps/v8/src/serialize.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/serialize.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SERIALIZE_H_ #define V8_SERIALIZE_H_ -#include "hashmap.h" +#include "src/compiler.h" +#include "src/hashmap.h" +#include "src/heap-profiler.h" +#include "src/isolate.h" +#include "src/snapshot-source-sink.h" namespace v8 { namespace internal { @@ -36,18 +17,16 @@ // A TypeCode is used to distinguish different kinds of external reference. // It is a single bit to make testing for types easy. enum TypeCode { - UNCLASSIFIED, // One-of-a-kind references. + UNCLASSIFIED, // One-of-a-kind references. + C_BUILTIN, BUILTIN, RUNTIME_FUNCTION, IC_UTILITY, - DEBUG_ADDRESS, STATS_COUNTER, TOP_ADDRESS, - C_BUILTIN, - EXTENSION, ACCESSOR, - RUNTIME_ENTRY, STUB_CACHE_TABLE, + RUNTIME_ENTRY, LAZY_DEOPTIMIZATION }; @@ -57,10 +36,8 @@ const int kReferenceIdBits = 16; const int kReferenceIdMask = (1 << kReferenceIdBits) - 1; const int kReferenceTypeShift = kReferenceIdBits; -const int kDebugRegisterBits = 4; -const int kDebugIdShift = kDebugRegisterBits; -const int kDeoptTableSerializeEntryCount = 12; +const int kDeoptTableSerializeEntryCount = 64; // ExternalReferenceTable is a helper class that defines the relationship // between external references and their encodings. It is used to build @@ -83,7 +60,7 @@ private: explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) { - PopulateTable(isolate); + PopulateTable(isolate); } struct ExternalReferenceEntry { @@ -103,8 +80,12 @@ // For other types of references, the caller will figure out the address. void Add(Address address, TypeCode type, uint16_t id, const char* name); + void Add(Address address, const char* name) { + Add(address, UNCLASSIFIED, ++max_id_[UNCLASSIFIED], name); + } + List<ExternalReferenceEntry> refs_; - int max_id_[kTypeCodeCount]; + uint16_t max_id_[kTypeCodeCount]; }; @@ -124,8 +105,6 @@ int IndexOf(Address key) const; - static bool Match(void* key1, void* key2) { return key1 == key2; } - void Put(Address key, int index); Isolate* isolate_; @@ -147,7 +126,7 @@ Address* Lookup(uint32_t key) const { int type = key >> kReferenceTypeShift; - ASSERT(kFirstTypeCode <= type && type < kTypeCodeCount); + DCHECK(kFirstTypeCode <= type && type < kTypeCodeCount); int id = key & kReferenceIdMask; return &encodings_[type][id]; } @@ -160,49 +139,6 @@ }; -class SnapshotByteSource { - public: - SnapshotByteSource(const byte* array, int length) - : data_(array), length_(length), position_(0) { } - - bool HasMore() { return position_ < length_; } - - int Get() { - ASSERT(position_ < length_); - return data_[position_++]; - } - - int32_t GetUnalignedInt() { -#if defined(V8_HOST_CAN_READ_UNALIGNED) && __BYTE_ORDER == __LITTLE_ENDIAN - int32_t answer; - ASSERT(position_ + sizeof(answer) <= length_ + 0u); - answer = *reinterpret_cast<const int32_t*>(data_ + position_); -#else - int32_t answer = data_[position_]; - answer |= data_[position_ + 1] << 8; - answer |= data_[position_ + 2] << 16; - answer |= data_[position_ + 3] << 24; -#endif - return answer; - } - - void Advance(int by) { position_ += by; } - - inline void CopyRaw(byte* to, int number_of_bytes); - - inline int GetInt(); - - bool AtEOF(); - - int position() { return position_; } - - private: - const byte* data_; - int length_; - int position_; -}; - - // The Serializer/Deserializer class is a common superclass for Serializer and // Deserializer which is used to store common constants and methods used by // both. @@ -215,17 +151,18 @@ protected: // Where the pointed-to object can be found: enum Where { - kNewObject = 0, // Object is next in snapshot. + kNewObject = 0, // Object is next in snapshot. // 1-6 One per space. - kRootArray = 0x9, // Object is found in root array. - kPartialSnapshotCache = 0xa, // Object is in the cache. - kExternalReference = 0xb, // Pointer to an external reference. - kSkip = 0xc, // Skip n bytes. - kNop = 0xd, // Does nothing, used to pad. - // 0xe-0xf Free. - kBackref = 0x10, // Object is described relative to end. + kRootArray = 0x9, // Object is found in root array. + kPartialSnapshotCache = 0xa, // Object is in the cache. + kExternalReference = 0xb, // Pointer to an external reference. + kSkip = 0xc, // Skip n bytes. + kBuiltin = 0xd, // Builtin code object. + kAttachedReference = 0xe, // Object is described in an attached list. + kNop = 0xf, // Does nothing, used to pad. + kBackref = 0x10, // Object is described relative to end. // 0x11-0x16 One per space. - kBackrefWithSkip = 0x18, // Object is described relative to end. + kBackrefWithSkip = 0x18, // Object is described relative to end. // 0x19-0x1e One per space. // 0x20-0x3f Used by misc. tags below. kPointedToMask = 0x3f @@ -274,11 +211,11 @@ // 0x73-0x7f Repeat last word (subtract 0x72 to get the count). static const int kMaxRepeats = 0x7f - 0x72; static int CodeForRepeats(int repeats) { - ASSERT(repeats >= 1 && repeats <= kMaxRepeats); + DCHECK(repeats >= 1 && repeats <= kMaxRepeats); return 0x72 + repeats; } static int RepeatsForCode(int byte_code) { - ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f); + DCHECK(byte_code >= kConstantRepeat && byte_code <= 0x7f); return byte_code - 0x72; } static const int kRootArrayConstants = 0xa0; @@ -296,26 +233,6 @@ }; -int SnapshotByteSource::GetInt() { - // This way of variable-length encoding integers does not suffer from branch - // mispredictions. - uint32_t answer = GetUnalignedInt(); - int bytes = answer & 3; - Advance(bytes); - uint32_t mask = 0xffffffffu; - mask >>= 32 - (bytes << 3); - answer &= mask; - answer >>= 2; - return answer; -} - - -void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) { - OS::MemCopy(to, data_ + position_, number_of_bytes); - position_ += number_of_bytes; -} - - // A Deserializer reads a snapshot and reconstructs the Object graph it defines. class Deserializer: public SerializerDeserializer { public: @@ -331,11 +248,21 @@ void DeserializePartial(Isolate* isolate, Object** root); void set_reservation(int space_number, int reservation) { - ASSERT(space_number >= 0); - ASSERT(space_number <= LAST_SPACE); + DCHECK(space_number >= 0); + DCHECK(space_number <= LAST_SPACE); reservations_[space_number] = reservation; } + void FlushICacheForNewCodeObjects(); + + // Serialized user code reference certain objects that are provided in a list + // By calling this method, we assume that we are deserializing user code. + void SetAttachedObjects(Vector<Object*>* attached_objects) { + attached_objects_ = attached_objects; + } + + bool deserializing_user_code() { return attached_objects_ != NULL; } + private: virtual void VisitPointers(Object** start, Object** end); @@ -356,16 +283,16 @@ Object** start, Object** end, int space, Address object_address); void ReadObject(int space_number, Object** write_back); + // Special handling for serialized code like hooking up internalized strings. + HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj); + Object* ProcessBackRefInSerializedCode(Object* obj); + // This routine both allocates a new object, and also keeps // track of where objects have been allocated so that we can // fix back references when deserializing. Address Allocate(int space_index, int size) { Address address = high_water_[space_index]; high_water_[space_index] = address + size; - HeapProfiler* profiler = isolate_->heap_profiler(); - if (profiler->is_tracking_allocations()) { - profiler->AllocationEvent(address, size); - } return address; } @@ -377,11 +304,12 @@ return HeapObject::FromAddress(high_water_[space] - offset); } - void FlushICacheForNewCodeObjects(); - // Cached current isolate. Isolate* isolate_; + // Objects from the attached object descriptions in the serialized user code. + Vector<Object*>* attached_objects_; + SnapshotByteSource* source_; // This is the address of the next object that will be allocated in each // space. It is used to calculate the addresses of back-references. @@ -396,25 +324,13 @@ }; -class SnapshotByteSink { - public: - virtual ~SnapshotByteSink() { } - virtual void Put(int byte, const char* description) = 0; - virtual void PutSection(int byte, const char* description) { - Put(byte, description); - } - void PutInt(uintptr_t integer, const char* description); - virtual int Position() = 0; -}; - - // Mapping objects to their location after deserialization. // This is used during building, but not at runtime by V8. class SerializationAddressMapper { public: SerializationAddressMapper() : no_allocation_(), - serialization_map_(new HashMap(&SerializationMatchFun)) { } + serialization_map_(new HashMap(HashMap::PointersMatch)) { } ~SerializationAddressMapper() { delete serialization_map_; @@ -425,23 +341,19 @@ } int MappedTo(HeapObject* obj) { - ASSERT(IsMapped(obj)); + DCHECK(IsMapped(obj)); return static_cast<int>(reinterpret_cast<intptr_t>( serialization_map_->Lookup(Key(obj), Hash(obj), false)->value)); } void AddMapping(HeapObject* obj, int to) { - ASSERT(!IsMapped(obj)); + DCHECK(!IsMapped(obj)); HashMap::Entry* entry = serialization_map_->Lookup(Key(obj), Hash(obj), true); entry->value = Value(to); } private: - static bool SerializationMatchFun(void* key1, void* key2) { - return key1 == key2; - } - static uint32_t Hash(HeapObject* obj) { return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address())); } @@ -470,19 +382,13 @@ void VisitPointers(Object** start, Object** end); // You can call this after serialization to find out how much space was used // in each space. - int CurrentAllocationAddress(int space) { - ASSERT(space < kNumberOfSpaces); + int CurrentAllocationAddress(int space) const { + DCHECK(space < kNumberOfSpaces); return fullness_[space]; } Isolate* isolate() const { return isolate_; } - static void Enable(Isolate* isolate); - static void Disable(); - // Call this when you have made use of the fact that there is no serialization - // going on. - static void TooLateToEnableNow() { too_late_to_enable_now_ = true; } - static bool enabled() { return serialization_enabled_; } SerializationAddressMapper* address_mapper() { return &address_mapper_; } void PutRoot(int index, HeapObject* object, @@ -494,10 +400,9 @@ static const int kInvalidRootIndex = -1; int RootIndex(HeapObject* heap_object, HowToCode from); - virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0; intptr_t root_index_wave_front() { return root_index_wave_front_; } void set_root_index_wave_front(intptr_t value) { - ASSERT(value >= root_index_wave_front_); + DCHECK(value >= root_index_wave_front_); root_index_wave_front_ = value; } @@ -580,9 +485,7 @@ int fullness_[LAST_SPACE + 1]; SnapshotByteSink* sink_; ExternalReferenceEncoder* external_reference_encoder_; - static bool serialization_enabled_; - // Did we already make use of the fact that serialization was not enabled? - static bool too_late_to_enable_now_; + SerializationAddressMapper address_mapper_; intptr_t root_index_wave_front_; void Pad(); @@ -590,8 +493,12 @@ friend class ObjectSerializer; friend class Deserializer; + // We may not need the code address map for logging for every instance + // of the serializer. Initialize it on demand. + void InitializeCodeAddressMap(); + private: - static CodeAddressMap* code_address_map_; + CodeAddressMap* code_address_map_; DISALLOW_COPY_AND_ASSIGN(Serializer); }; @@ -604,23 +511,24 @@ : Serializer(isolate, sink), startup_serializer_(startup_snapshot_serializer) { set_root_index_wave_front(Heap::kStrongRootListLength); + InitializeCodeAddressMap(); } // Serialize the objects reachable from a single object pointer. - virtual void Serialize(Object** o); + void Serialize(Object** o); virtual void SerializeObject(Object* o, HowToCode how_to_code, WhereToPoint where_to_point, int skip); - protected: - virtual int PartialSnapshotCacheIndex(HeapObject* o); - virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { + private: + int PartialSnapshotCacheIndex(HeapObject* o); + bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { // Scripts should be referred only through shared function infos. We can't // allow them to be part of the partial snapshot because they contain a // unique ID, and deserializing several partial snapshots containing script // would cause dupes. - ASSERT(!o->IsScript()); + DCHECK(!o->IsScript()); return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() || o->IsCode() || o->IsScopeInfo() || @@ -628,7 +536,7 @@ startup_serializer_->isolate()->heap()->fixed_cow_array_map(); } - private: + Serializer* startup_serializer_; DISALLOW_COPY_AND_ASSIGN(PartialSerializer); }; @@ -643,6 +551,7 @@ // which will repopulate the cache with objects needed by that partial // snapshot. isolate->set_serialize_partial_snapshot_cache_length(0); + InitializeCodeAddressMap(); } // Serialize the current state of the heap. The order is: // 1) Strong references. @@ -661,12 +570,110 @@ } private: - virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { - return false; + DISALLOW_COPY_AND_ASSIGN(StartupSerializer); +}; + + +class CodeSerializer : public Serializer { + public: + CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source) + : Serializer(isolate, sink), source_(source) { + set_root_index_wave_front(Heap::kStrongRootListLength); + InitializeCodeAddressMap(); } + + static ScriptData* Serialize(Isolate* isolate, + Handle<SharedFunctionInfo> info, + Handle<String> source); + + virtual void SerializeObject(Object* o, HowToCode how_to_code, + WhereToPoint where_to_point, int skip); + + static Handle<SharedFunctionInfo> Deserialize(Isolate* isolate, + ScriptData* data, + Handle<String> source); + + static const int kSourceObjectIndex = 0; + + String* source() { + DCHECK(!AllowHeapAllocation::IsAllowed()); + return source_; + } + + private: + void SerializeBuiltin(Code* builtin, HowToCode how_to_code, + WhereToPoint where_to_point, int skip); + void SerializeSourceObject(HowToCode how_to_code, WhereToPoint where_to_point, + int skip); + + DisallowHeapAllocation no_gc_; + String* source_; + DISALLOW_COPY_AND_ASSIGN(CodeSerializer); }; +// Wrapper around ScriptData to provide code-serializer-specific functionality. +class SerializedCodeData { + public: + // Used by when consuming. + explicit SerializedCodeData(ScriptData* data, String* source) + : script_data_(data), owns_script_data_(false) { + DisallowHeapAllocation no_gc; + CHECK(IsSane(source)); + } + + // Used when producing. + SerializedCodeData(List<byte>* payload, CodeSerializer* cs); + + ~SerializedCodeData() { + if (owns_script_data_) delete script_data_; + } + + // Return ScriptData object and relinquish ownership over it to the caller. + ScriptData* GetScriptData() { + ScriptData* result = script_data_; + script_data_ = NULL; + DCHECK(owns_script_data_); + owns_script_data_ = false; + return result; + } + + const byte* Payload() const { + return script_data_->data() + kHeaderEntries * kIntSize; + } + + int PayloadLength() const { + return script_data_->length() - kHeaderEntries * kIntSize; + } + + int GetReservation(int space) const { + return GetHeaderValue(kReservationsOffset + space); + } + + private: + void SetHeaderValue(int offset, int value) { + reinterpret_cast<int*>(const_cast<byte*>(script_data_->data()))[offset] = + value; + } + + int GetHeaderValue(int offset) const { + return reinterpret_cast<const int*>(script_data_->data())[offset]; + } + + bool IsSane(String* source); + + int CheckSum(String* source); + + // The data header consists of int-sized entries: + // [0] version hash + // [1..7] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE. + static const int kCheckSumOffset = 0; + static const int kReservationsOffset = 1; + static const int kHeaderEntries = 8; + + ScriptData* script_data_; + bool owns_script_data_; +}; } } // namespace v8::internal #endif // V8_SERIALIZE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/simulator.h nodejs-0.11.15/deps/v8/src/simulator.h --- nodejs-0.11.13/deps/v8/src/simulator.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/simulator.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,24 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SIMULATOR_H_ #define V8_SIMULATOR_H_ #if V8_TARGET_ARCH_IA32 -#include "ia32/simulator-ia32.h" +#include "src/ia32/simulator-ia32.h" #elif V8_TARGET_ARCH_X64 -#include "x64/simulator-x64.h" +#include "src/x64/simulator-x64.h" #elif V8_TARGET_ARCH_ARM64 -#include "arm64/simulator-arm64.h" +#include "src/arm64/simulator-arm64.h" #elif V8_TARGET_ARCH_ARM -#include "arm/simulator-arm.h" +#include "src/arm/simulator-arm.h" #elif V8_TARGET_ARCH_MIPS -#include "mips/simulator-mips.h" +#include "src/mips/simulator-mips.h" +#elif V8_TARGET_ARCH_MIPS64 +#include "src/mips64/simulator-mips64.h" +#elif V8_TARGET_ARCH_X87 +#include "src/x87/simulator-x87.h" #else #error Unsupported target architecture. #endif diff -Nru nodejs-0.11.13/deps/v8/src/small-pointer-list.h nodejs-0.11.15/deps/v8/src/small-pointer-list.h --- nodejs-0.11.13/deps/v8/src/small-pointer-list.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/small-pointer-list.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SMALL_POINTER_LIST_H_ #define V8_SMALL_POINTER_LIST_H_ -#include "checks.h" -#include "v8globals.h" -#include "zone.h" +#include "src/base/logging.h" +#include "src/globals.h" +#include "src/zone.h" namespace v8 { namespace internal { @@ -61,7 +38,7 @@ if ((data_ & kTagMask) == kSingletonTag) { list->Add(single_value(), zone); } - ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment)); + DCHECK(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment)); data_ = reinterpret_cast<intptr_t>(list) | kListTag; } @@ -84,7 +61,7 @@ } void Add(T* pointer, Zone* zone) { - ASSERT(IsAligned(reinterpret_cast<intptr_t>(pointer), kPointerAlignment)); + DCHECK(IsAligned(reinterpret_cast<intptr_t>(pointer), kPointerAlignment)); if ((data_ & kTagMask) == kEmptyTag) { data_ = reinterpret_cast<intptr_t>(pointer) | kSingletonTag; return; @@ -93,7 +70,7 @@ PointerList* list = new(zone) PointerList(2, zone); list->Add(single_value(), zone); list->Add(pointer, zone); - ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment)); + DCHECK(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment)); data_ = reinterpret_cast<intptr_t>(list) | kListTag; return; } @@ -103,9 +80,9 @@ // Note: returns T* and not T*& (unlike List from list.h). // This makes the implementation simpler and more const correct. T* at(int i) const { - ASSERT((data_ & kTagMask) != kEmptyTag); + DCHECK((data_ & kTagMask) != kEmptyTag); if ((data_ & kTagMask) == kSingletonTag) { - ASSERT(i == 0); + DCHECK(i == 0); return single_value(); } return list()->at(i); @@ -127,7 +104,7 @@ } T* RemoveLast() { - ASSERT((data_ & kTagMask) != kEmptyTag); + DCHECK((data_ & kTagMask) != kEmptyTag); if ((data_ & kTagMask) == kSingletonTag) { T* result = single_value(); data_ = kEmptyTag; @@ -138,11 +115,11 @@ void Rewind(int pos) { if ((data_ & kTagMask) == kEmptyTag) { - ASSERT(pos == 0); + DCHECK(pos == 0); return; } if ((data_ & kTagMask) == kSingletonTag) { - ASSERT(pos == 0 || pos == 1); + DCHECK(pos == 0 || pos == 1); if (pos == 0) { data_ = kEmptyTag; } @@ -178,13 +155,13 @@ STATIC_ASSERT(kTagMask + 1 <= kPointerAlignment); T* single_value() const { - ASSERT((data_ & kTagMask) == kSingletonTag); + DCHECK((data_ & kTagMask) == kSingletonTag); STATIC_ASSERT(kSingletonTag == 0); return reinterpret_cast<T*>(data_); } PointerList* list() const { - ASSERT((data_ & kTagMask) == kListTag); + DCHECK((data_ & kTagMask) == kListTag); return reinterpret_cast<PointerList*>(data_ & kValueMask); } diff -Nru nodejs-0.11.13/deps/v8/src/smart-pointers.h nodejs-0.11.15/deps/v8/src/smart-pointers.h --- nodejs-0.11.13/deps/v8/src/smart-pointers.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/smart-pointers.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SMART_POINTERS_H_ #define V8_SMART_POINTERS_H_ @@ -79,7 +56,7 @@ } void Reset(T* new_value) { - ASSERT(p_ == NULL || p_ != new_value); + DCHECK(p_ == NULL || p_ != new_value); if (p_) Deallocator::Delete(p_); p_ = new_value; } @@ -89,7 +66,7 @@ // double freeing. SmartPointerBase<Deallocator, T>& operator=( const SmartPointerBase<Deallocator, T>& rhs) { - ASSERT(is_empty()); + DCHECK(is_empty()); T* tmp = rhs.p_; // swap to handle self-assignment const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL; p_ = tmp; diff -Nru nodejs-0.11.13/deps/v8/src/snapshot-common.cc nodejs-0.11.15/deps/v8/src/snapshot-common.cc --- nodejs-0.11.13/deps/v8/src/snapshot-common.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/snapshot-common.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,80 +1,19 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // The common functionality when building with or without snapshots. -#include "v8.h" +#include "src/v8.h" -#include "api.h" -#include "serialize.h" -#include "snapshot.h" -#include "platform.h" +#include "src/api.h" +#include "src/base/platform/platform.h" +#include "src/serialize.h" +#include "src/snapshot.h" namespace v8 { namespace internal { - -static void ReserveSpaceForSnapshot(Deserializer* deserializer, - const char* file_name) { - int file_name_length = StrLength(file_name) + 10; - Vector<char> name = Vector<char>::New(file_name_length + 1); - OS::SNPrintF(name, "%s.size", file_name); - FILE* fp = OS::FOpen(name.start(), "r"); - CHECK_NE(NULL, fp); - int new_size, pointer_size, data_size, code_size, map_size, cell_size, - property_cell_size; -#ifdef _MSC_VER - // Avoid warning about unsafe fscanf from MSVC. - // Please note that this is only fine if %c and %s are not being used. -#define fscanf fscanf_s -#endif - CHECK_EQ(1, fscanf(fp, "new %d\n", &new_size)); - CHECK_EQ(1, fscanf(fp, "pointer %d\n", &pointer_size)); - CHECK_EQ(1, fscanf(fp, "data %d\n", &data_size)); - CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size)); - CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size)); - CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size)); - CHECK_EQ(1, fscanf(fp, "property cell %d\n", &property_cell_size)); -#ifdef _MSC_VER -#undef fscanf -#endif - fclose(fp); - deserializer->set_reservation(NEW_SPACE, new_size); - deserializer->set_reservation(OLD_POINTER_SPACE, pointer_size); - deserializer->set_reservation(OLD_DATA_SPACE, data_size); - deserializer->set_reservation(CODE_SPACE, code_size); - deserializer->set_reservation(MAP_SPACE, map_size); - deserializer->set_reservation(CELL_SPACE, cell_size); - deserializer->set_reservation(PROPERTY_CELL_SPACE, - property_cell_size); - name.Dispose(); -} - - void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) { deserializer->set_reservation(NEW_SPACE, new_space_used_); deserializer->set_reservation(OLD_POINTER_SPACE, pointer_space_used_); @@ -87,22 +26,9 @@ } -bool Snapshot::Initialize(const char* snapshot_file) { - if (snapshot_file) { - int len; - byte* str = ReadBytes(snapshot_file, &len); - if (!str) return false; - bool success; - { - SnapshotByteSource source(str, len); - Deserializer deserializer(&source); - ReserveSpaceForSnapshot(&deserializer, snapshot_file); - success = V8::Initialize(&deserializer); - } - DeleteArray(str); - return success; - } else if (size_ > 0) { - ElapsedTimer timer; +bool Snapshot::Initialize() { + if (size_ > 0) { + base::ElapsedTimer timer; if (FLAG_profile_deserialization) { timer.Start(); } @@ -146,4 +72,15 @@ return Handle<Context>(Context::cast(root)); } + +#ifdef V8_USE_EXTERNAL_STARTUP_DATA +// Dummy implementations of Set*FromFile(..) APIs. +// +// These are meant for use with snapshot-external.cc. Should this file +// be compiled with those options we just supply these dummy implementations +// below. This happens when compiling the mksnapshot utility. +void SetNativesFromFile(StartupData* data) { CHECK(false); } +void SetSnapshotFromFile(StartupData* data) { CHECK(false); } +#endif // V8_USE_EXTERNAL_STARTUP_DATA + } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/snapshot-empty.cc nodejs-0.11.15/deps/v8/src/snapshot-empty.cc --- nodejs-0.11.13/deps/v8/src/snapshot-empty.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/snapshot-empty.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Used for building without snapshots. -#include "v8.h" +#include "src/v8.h" -#include "snapshot.h" +#include "src/snapshot.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/snapshot-external.cc nodejs-0.11.15/deps/v8/src/snapshot-external.cc --- nodejs-0.11.13/deps/v8/src/snapshot-external.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/snapshot-external.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,140 @@ +// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Used for building with external snapshots. + +#include "src/snapshot.h" + +#include "src/serialize.h" +#include "src/snapshot-source-sink.h" +#include "src/v8.h" // for V8::Initialize + +namespace v8 { +namespace internal { + + +struct SnapshotImpl { + public: + const byte* data; + int size; + int new_space_used; + int pointer_space_used; + int data_space_used; + int code_space_used; + int map_space_used; + int cell_space_used; + int property_cell_space_used; + + const byte* context_data; + int context_size; + int context_new_space_used; + int context_pointer_space_used; + int context_data_space_used; + int context_code_space_used; + int context_map_space_used; + int context_cell_space_used; + int context_property_cell_space_used; +}; + + +static SnapshotImpl* snapshot_impl_ = NULL; + + +bool Snapshot::HaveASnapshotToStartFrom() { + return snapshot_impl_ != NULL; +} + + +bool Snapshot::Initialize() { + if (!HaveASnapshotToStartFrom()) + return false; + + base::ElapsedTimer timer; + if (FLAG_profile_deserialization) { + timer.Start(); + } + SnapshotByteSource source(snapshot_impl_->data, snapshot_impl_->size); + Deserializer deserializer(&source); + deserializer.set_reservation(NEW_SPACE, snapshot_impl_->new_space_used); + deserializer.set_reservation(OLD_POINTER_SPACE, + snapshot_impl_->pointer_space_used); + deserializer.set_reservation(OLD_DATA_SPACE, + snapshot_impl_->data_space_used); + deserializer.set_reservation(CODE_SPACE, snapshot_impl_->code_space_used); + deserializer.set_reservation(MAP_SPACE, snapshot_impl_->map_space_used); + deserializer.set_reservation(CELL_SPACE, snapshot_impl_->cell_space_used); + deserializer.set_reservation(PROPERTY_CELL_SPACE, + snapshot_impl_->property_cell_space_used); + bool success = V8::Initialize(&deserializer); + if (FLAG_profile_deserialization) { + double ms = timer.Elapsed().InMillisecondsF(); + PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms); + } + return success; +} + + +Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) { + if (!HaveASnapshotToStartFrom()) + return Handle<Context>(); + + SnapshotByteSource source(snapshot_impl_->context_data, + snapshot_impl_->context_size); + Deserializer deserializer(&source); + deserializer.set_reservation(NEW_SPACE, + snapshot_impl_->context_new_space_used); + deserializer.set_reservation(OLD_POINTER_SPACE, + snapshot_impl_->context_pointer_space_used); + deserializer.set_reservation(OLD_DATA_SPACE, + snapshot_impl_->context_data_space_used); + deserializer.set_reservation(CODE_SPACE, + snapshot_impl_->context_code_space_used); + deserializer.set_reservation(MAP_SPACE, + snapshot_impl_->context_map_space_used); + deserializer.set_reservation(CELL_SPACE, + snapshot_impl_->context_cell_space_used); + deserializer.set_reservation(PROPERTY_CELL_SPACE, + snapshot_impl_-> + context_property_cell_space_used); + Object* root; + deserializer.DeserializePartial(isolate, &root); + CHECK(root->IsContext()); + return Handle<Context>(Context::cast(root)); +} + + +void SetSnapshotFromFile(StartupData* snapshot_blob) { + DCHECK(snapshot_blob); + DCHECK(snapshot_blob->data); + DCHECK(snapshot_blob->raw_size > 0); + DCHECK(!snapshot_impl_); + + snapshot_impl_ = new SnapshotImpl; + SnapshotByteSource source(reinterpret_cast<const byte*>(snapshot_blob->data), + snapshot_blob->raw_size); + + bool success = source.GetBlob(&snapshot_impl_->data, + &snapshot_impl_->size); + snapshot_impl_->new_space_used = source.GetInt(); + snapshot_impl_->pointer_space_used = source.GetInt(); + snapshot_impl_->data_space_used = source.GetInt(); + snapshot_impl_->code_space_used = source.GetInt(); + snapshot_impl_->map_space_used = source.GetInt(); + snapshot_impl_->cell_space_used = source.GetInt(); + snapshot_impl_->property_cell_space_used = source.GetInt(); + + success &= source.GetBlob(&snapshot_impl_->context_data, + &snapshot_impl_->context_size); + snapshot_impl_->context_new_space_used = source.GetInt(); + snapshot_impl_->context_pointer_space_used = source.GetInt(); + snapshot_impl_->context_data_space_used = source.GetInt(); + snapshot_impl_->context_code_space_used = source.GetInt(); + snapshot_impl_->context_map_space_used = source.GetInt(); + snapshot_impl_->context_cell_space_used = source.GetInt(); + snapshot_impl_->context_property_cell_space_used = source.GetInt(); + + DCHECK(success); +} + +} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/snapshot.h nodejs-0.11.15/deps/v8/src/snapshot.h --- nodejs-0.11.13/deps/v8/src/snapshot.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/snapshot.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,31 +1,8 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "isolate.h" +#include "src/isolate.h" #ifndef V8_SNAPSHOT_H_ #define V8_SNAPSHOT_H_ @@ -35,23 +12,16 @@ class Snapshot { public: - // Initialize the VM from the given snapshot file. If snapshot_file is - // NULL, use the internal snapshot instead. Returns false if no snapshot + // Initialize the VM from the internal snapshot. Returns false if no snapshot // could be found. - static bool Initialize(const char* snapshot_file = NULL); + static bool Initialize(); static bool HaveASnapshotToStartFrom(); // Create a new context using the internal partial snapshot. static Handle<Context> NewContextFromSnapshot(Isolate* isolate); - // Returns whether or not the snapshot is enabled. - static bool IsEnabled() { return size_ != 0; } - - // Write snapshot to the given file. Returns true if snapshot was written - // successfully. - static bool WriteToFile(const char* snapshot_file); - + // These methods support COMPRESS_STARTUP_DATA_BZ2. static const byte* data() { return data_; } static int size() { return size_; } static int raw_size() { return raw_size_; } @@ -95,6 +65,10 @@ DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot); }; +#ifdef V8_USE_EXTERNAL_STARTUP_DATA +void SetSnapshotFromFile(StartupData* snapshot_blob); +#endif + } } // namespace v8::internal #endif // V8_SNAPSHOT_H_ diff -Nru nodejs-0.11.13/deps/v8/src/snapshot-source-sink.cc nodejs-0.11.15/deps/v8/src/snapshot-source-sink.cc --- nodejs-0.11.13/deps/v8/src/snapshot-source-sink.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/snapshot-source-sink.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,101 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + +#include "src/snapshot-source-sink.h" + +#include "src/base/logging.h" +#include "src/handles-inl.h" +#include "src/serialize.h" // for SerializerDeserializer::nop() in AtEOF() + + +namespace v8 { +namespace internal { + + +SnapshotByteSource::SnapshotByteSource(const byte* array, int length) + : data_(array), length_(length), position_(0) { +} + + +SnapshotByteSource::~SnapshotByteSource() { } + + +int32_t SnapshotByteSource::GetUnalignedInt() { + DCHECK(position_ < length_); // Require at least one byte left. +#if defined(V8_HOST_CAN_READ_UNALIGNED) && __BYTE_ORDER == __LITTLE_ENDIAN + int32_t answer = *reinterpret_cast<const int32_t*>(data_ + position_); +#else + int32_t answer = data_[position_]; + answer |= data_[position_ + 1] << 8; + answer |= data_[position_ + 2] << 16; + answer |= data_[position_ + 3] << 24; +#endif + return answer; +} + + +void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) { + MemCopy(to, data_ + position_, number_of_bytes); + position_ += number_of_bytes; +} + + +void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { + DCHECK(integer < 1 << 22); + integer <<= 2; + int bytes = 1; + if (integer > 0xff) bytes = 2; + if (integer > 0xffff) bytes = 3; + integer |= bytes; + Put(static_cast<int>(integer & 0xff), "IntPart1"); + if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2"); + if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3"); +} + +void SnapshotByteSink::PutRaw(byte* data, int number_of_bytes, + const char* description) { + for (int i = 0; i < number_of_bytes; ++i) { + Put(data[i], description); + } +} + +void SnapshotByteSink::PutBlob(byte* data, int number_of_bytes, + const char* description) { + PutInt(number_of_bytes, description); + PutRaw(data, number_of_bytes, description); +} + + +bool SnapshotByteSource::AtEOF() { + if (0u + length_ - position_ > 2 * sizeof(uint32_t)) return false; + for (int x = position_; x < length_; x++) { + if (data_[x] != SerializerDeserializer::nop()) return false; + } + return true; +} + + +bool SnapshotByteSource::GetBlob(const byte** data, int* number_of_bytes) { + int size = GetInt(); + *number_of_bytes = size; + + if (position_ + size < length_) { + *data = &data_[position_]; + Advance(size); + return true; + } else { + Advance(length_ - position_); // proceed until end. + return false; + } +} + + +void DebugSnapshotSink::Put(byte b, const char* description) { + PrintF("%24s: %x\n", description, b); + sink_->Put(b, description); +} + +} // namespace v8::internal +} // namespace v8 diff -Nru nodejs-0.11.13/deps/v8/src/snapshot-source-sink.h nodejs-0.11.15/deps/v8/src/snapshot-source-sink.h --- nodejs-0.11.13/deps/v8/src/snapshot-source-sink.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/snapshot-source-sink.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,125 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_SNAPSHOT_SOURCE_SINK_H_ +#define V8_SNAPSHOT_SOURCE_SINK_H_ + +#include "src/base/logging.h" +#include "src/utils.h" + +namespace v8 { +namespace internal { + + +/** + * Source to read snapshot and builtins files from. + * + * Note: Memory ownership remains with callee. + */ +class SnapshotByteSource V8_FINAL { + public: + SnapshotByteSource(const byte* array, int length); + ~SnapshotByteSource(); + + bool HasMore() { return position_ < length_; } + + int Get() { + DCHECK(position_ < length_); + return data_[position_++]; + } + + int32_t GetUnalignedInt(); + + void Advance(int by) { position_ += by; } + + void CopyRaw(byte* to, int number_of_bytes); + + inline int GetInt() { + // This way of variable-length encoding integers does not suffer from branch + // mispredictions. + uint32_t answer = GetUnalignedInt(); + int bytes = answer & 3; + Advance(bytes); + uint32_t mask = 0xffffffffu; + mask >>= 32 - (bytes << 3); + answer &= mask; + answer >>= 2; + return answer; + } + + bool GetBlob(const byte** data, int* number_of_bytes); + + bool AtEOF(); + + int position() { return position_; } + + private: + const byte* data_; + int length_; + int position_; + + DISALLOW_COPY_AND_ASSIGN(SnapshotByteSource); +}; + + +/** + * Sink to write snapshot files to. + * + * Subclasses must implement actual storage or i/o. + */ +class SnapshotByteSink { + public: + virtual ~SnapshotByteSink() { } + virtual void Put(byte b, const char* description) = 0; + virtual void PutSection(int b, const char* description) { + DCHECK_LE(b, kMaxUInt8); + Put(static_cast<byte>(b), description); + } + void PutInt(uintptr_t integer, const char* description); + void PutRaw(byte* data, int number_of_bytes, const char* description); + void PutBlob(byte* data, int number_of_bytes, const char* description); + virtual int Position() = 0; +}; + + +class DummySnapshotSink : public SnapshotByteSink { + public: + DummySnapshotSink() : length_(0) {} + virtual ~DummySnapshotSink() {} + virtual void Put(byte b, const char* description) { length_++; } + virtual int Position() { return length_; } + + private: + int length_; +}; + + +// Wrap a SnapshotByteSink into a DebugSnapshotSink to get debugging output. +class DebugSnapshotSink : public SnapshotByteSink { + public: + explicit DebugSnapshotSink(SnapshotByteSink* chained) : sink_(chained) {} + virtual void Put(byte b, const char* description) V8_OVERRIDE; + virtual int Position() V8_OVERRIDE { return sink_->Position(); } + + private: + SnapshotByteSink* sink_; +}; + + +class ListSnapshotSink : public i::SnapshotByteSink { + public: + explicit ListSnapshotSink(i::List<byte>* data) : data_(data) {} + virtual void Put(byte b, const char* description) V8_OVERRIDE { + data_->Add(b); + } + virtual int Position() V8_OVERRIDE { return data_->length(); } + + private: + i::List<byte>* data_; +}; + +} // namespace v8::internal +} // namespace v8 + +#endif // V8_SNAPSHOT_SOURCE_SINK_H_ diff -Nru nodejs-0.11.13/deps/v8/src/spaces.cc nodejs-0.11.15/deps/v8/src/spaces.cc --- nodejs-0.11.13/deps/v8/src/spaces.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/spaces.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,3208 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "macro-assembler.h" -#include "mark-compact.h" -#include "msan.h" -#include "platform.h" - -namespace v8 { -namespace internal { - - -// ---------------------------------------------------------------------------- -// HeapObjectIterator - -HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { - // You can't actually iterate over the anchor page. It is not a real page, - // just an anchor for the double linked page list. Initialize as if we have - // reached the end of the anchor page, then the first iteration will move on - // to the first page. - Initialize(space, - NULL, - NULL, - kAllPagesInSpace, - NULL); -} - - -HeapObjectIterator::HeapObjectIterator(PagedSpace* space, - HeapObjectCallback size_func) { - // You can't actually iterate over the anchor page. It is not a real page, - // just an anchor for the double linked page list. Initialize the current - // address and end as NULL, then the first iteration will move on - // to the first page. - Initialize(space, - NULL, - NULL, - kAllPagesInSpace, - size_func); -} - - -HeapObjectIterator::HeapObjectIterator(Page* page, - HeapObjectCallback size_func) { - Space* owner = page->owner(); - ASSERT(owner == page->heap()->old_pointer_space() || - owner == page->heap()->old_data_space() || - owner == page->heap()->map_space() || - owner == page->heap()->cell_space() || - owner == page->heap()->property_cell_space() || - owner == page->heap()->code_space()); - Initialize(reinterpret_cast<PagedSpace*>(owner), - page->area_start(), - page->area_end(), - kOnePageOnly, - size_func); - ASSERT(page->WasSweptPrecisely()); -} - - -void HeapObjectIterator::Initialize(PagedSpace* space, - Address cur, Address end, - HeapObjectIterator::PageMode mode, - HeapObjectCallback size_f) { - // Check that we actually can iterate this space. - ASSERT(!space->was_swept_conservatively()); - - space_ = space; - cur_addr_ = cur; - cur_end_ = end; - page_mode_ = mode; - size_func_ = size_f; -} - - -// We have hit the end of the page and should advance to the next block of -// objects. This happens at the end of the page. -bool HeapObjectIterator::AdvanceToNextPage() { - ASSERT(cur_addr_ == cur_end_); - if (page_mode_ == kOnePageOnly) return false; - Page* cur_page; - if (cur_addr_ == NULL) { - cur_page = space_->anchor(); - } else { - cur_page = Page::FromAddress(cur_addr_ - 1); - ASSERT(cur_addr_ == cur_page->area_end()); - } - cur_page = cur_page->next_page(); - if (cur_page == space_->anchor()) return false; - cur_addr_ = cur_page->area_start(); - cur_end_ = cur_page->area_end(); - ASSERT(cur_page->WasSweptPrecisely()); - return true; -} - - -// ----------------------------------------------------------------------------- -// CodeRange - - -CodeRange::CodeRange(Isolate* isolate) - : isolate_(isolate), - code_range_(NULL), - free_list_(0), - allocation_list_(0), - current_allocation_block_index_(0) { -} - - -bool CodeRange::SetUp(const size_t requested) { - ASSERT(code_range_ == NULL); - - code_range_ = new VirtualMemory(requested); - CHECK(code_range_ != NULL); - if (!code_range_->IsReserved()) { - delete code_range_; - code_range_ = NULL; - return false; - } - - // We are sure that we have mapped a block of requested addresses. - ASSERT(code_range_->size() == requested); - LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); - Address base = reinterpret_cast<Address>(code_range_->address()); - Address aligned_base = - RoundUp(reinterpret_cast<Address>(code_range_->address()), - MemoryChunk::kAlignment); - size_t size = code_range_->size() - (aligned_base - base); - allocation_list_.Add(FreeBlock(aligned_base, size)); - current_allocation_block_index_ = 0; - return true; -} - - -int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, - const FreeBlock* right) { - // The entire point of CodeRange is that the difference between two - // addresses in the range can be represented as a signed 32-bit int, - // so the cast is semantically correct. - return static_cast<int>(left->start - right->start); -} - - -void CodeRange::GetNextAllocationBlock(size_t requested) { - for (current_allocation_block_index_++; - current_allocation_block_index_ < allocation_list_.length(); - current_allocation_block_index_++) { - if (requested <= allocation_list_[current_allocation_block_index_].size) { - return; // Found a large enough allocation block. - } - } - - // Sort and merge the free blocks on the free list and the allocation list. - free_list_.AddAll(allocation_list_); - allocation_list_.Clear(); - free_list_.Sort(&CompareFreeBlockAddress); - for (int i = 0; i < free_list_.length();) { - FreeBlock merged = free_list_[i]; - i++; - // Add adjacent free blocks to the current merged block. - while (i < free_list_.length() && - free_list_[i].start == merged.start + merged.size) { - merged.size += free_list_[i].size; - i++; - } - if (merged.size > 0) { - allocation_list_.Add(merged); - } - } - free_list_.Clear(); - - for (current_allocation_block_index_ = 0; - current_allocation_block_index_ < allocation_list_.length(); - current_allocation_block_index_++) { - if (requested <= allocation_list_[current_allocation_block_index_].size) { - return; // Found a large enough allocation block. - } - } - - // Code range is full or too fragmented. - V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); -} - - -Address CodeRange::AllocateRawMemory(const size_t requested_size, - const size_t commit_size, - size_t* allocated) { - ASSERT(commit_size <= requested_size); - ASSERT(current_allocation_block_index_ < allocation_list_.length()); - if (requested_size > allocation_list_[current_allocation_block_index_].size) { - // Find an allocation block large enough. This function call may - // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. - GetNextAllocationBlock(requested_size); - } - // Commit the requested memory at the start of the current allocation block. - size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); - FreeBlock current = allocation_list_[current_allocation_block_index_]; - if (aligned_requested >= (current.size - Page::kPageSize)) { - // Don't leave a small free block, useless for a large object or chunk. - *allocated = current.size; - } else { - *allocated = aligned_requested; - } - ASSERT(*allocated <= current.size); - ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); - if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_, - current.start, - commit_size, - *allocated)) { - *allocated = 0; - return NULL; - } - allocation_list_[current_allocation_block_index_].start += *allocated; - allocation_list_[current_allocation_block_index_].size -= *allocated; - if (*allocated == current.size) { - GetNextAllocationBlock(0); // This block is used up, get the next one. - } - return current.start; -} - - -bool CodeRange::CommitRawMemory(Address start, size_t length) { - return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); -} - - -bool CodeRange::UncommitRawMemory(Address start, size_t length) { - return code_range_->Uncommit(start, length); -} - - -void CodeRange::FreeRawMemory(Address address, size_t length) { - ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); - free_list_.Add(FreeBlock(address, length)); - code_range_->Uncommit(address, length); -} - - -void CodeRange::TearDown() { - delete code_range_; // Frees all memory in the virtual memory range. - code_range_ = NULL; - free_list_.Free(); - allocation_list_.Free(); -} - - -// ----------------------------------------------------------------------------- -// MemoryAllocator -// - -MemoryAllocator::MemoryAllocator(Isolate* isolate) - : isolate_(isolate), - capacity_(0), - capacity_executable_(0), - size_(0), - size_executable_(0), - lowest_ever_allocated_(reinterpret_cast<void*>(-1)), - highest_ever_allocated_(reinterpret_cast<void*>(0)) { -} - - -bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { - capacity_ = RoundUp(capacity, Page::kPageSize); - capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); - ASSERT_GE(capacity_, capacity_executable_); - - size_ = 0; - size_executable_ = 0; - - return true; -} - - -void MemoryAllocator::TearDown() { - // Check that spaces were torn down before MemoryAllocator. - ASSERT(size_ == 0); - // TODO(gc) this will be true again when we fix FreeMemory. - // ASSERT(size_executable_ == 0); - capacity_ = 0; - capacity_executable_ = 0; -} - - -bool MemoryAllocator::CommitMemory(Address base, - size_t size, - Executability executable) { - if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) { - return false; - } - UpdateAllocatedSpaceLimits(base, base + size); - return true; -} - - -void MemoryAllocator::FreeMemory(VirtualMemory* reservation, - Executability executable) { - // TODO(gc) make code_range part of memory allocator? - ASSERT(reservation->IsReserved()); - size_t size = reservation->size(); - ASSERT(size_ >= size); - size_ -= size; - - isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); - - if (executable == EXECUTABLE) { - ASSERT(size_executable_ >= size); - size_executable_ -= size; - } - // Code which is part of the code-range does not have its own VirtualMemory. - ASSERT(!isolate_->code_range()->contains( - static_cast<Address>(reservation->address()))); - ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); - reservation->Release(); -} - - -void MemoryAllocator::FreeMemory(Address base, - size_t size, - Executability executable) { - // TODO(gc) make code_range part of memory allocator? - ASSERT(size_ >= size); - size_ -= size; - - isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); - - if (executable == EXECUTABLE) { - ASSERT(size_executable_ >= size); - size_executable_ -= size; - } - if (isolate_->code_range()->contains(static_cast<Address>(base))) { - ASSERT(executable == EXECUTABLE); - isolate_->code_range()->FreeRawMemory(base, size); - } else { - ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); - bool result = VirtualMemory::ReleaseRegion(base, size); - USE(result); - ASSERT(result); - } -} - - -Address MemoryAllocator::ReserveAlignedMemory(size_t size, - size_t alignment, - VirtualMemory* controller) { - VirtualMemory reservation(size, alignment); - - if (!reservation.IsReserved()) return NULL; - size_ += reservation.size(); - Address base = RoundUp(static_cast<Address>(reservation.address()), - alignment); - controller->TakeControl(&reservation); - return base; -} - - -Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size, - size_t commit_size, - size_t alignment, - Executability executable, - VirtualMemory* controller) { - ASSERT(commit_size <= reserve_size); - VirtualMemory reservation; - Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); - if (base == NULL) return NULL; - - if (executable == EXECUTABLE) { - if (!CommitExecutableMemory(&reservation, - base, - commit_size, - reserve_size)) { - base = NULL; - } - } else { - if (reservation.Commit(base, commit_size, false)) { - UpdateAllocatedSpaceLimits(base, base + commit_size); - } else { - base = NULL; - } - } - - if (base == NULL) { - // Failed to commit the body. Release the mapping and any partially - // commited regions inside it. - reservation.Release(); - return NULL; - } - - controller->TakeControl(&reservation); - return base; -} - - -void Page::InitializeAsAnchor(PagedSpace* owner) { - set_owner(owner); - set_prev_page(this); - set_next_page(this); -} - - -NewSpacePage* NewSpacePage::Initialize(Heap* heap, - Address start, - SemiSpace* semi_space) { - Address area_start = start + NewSpacePage::kObjectStartOffset; - Address area_end = start + Page::kPageSize; - - MemoryChunk* chunk = MemoryChunk::Initialize(heap, - start, - Page::kPageSize, - area_start, - area_end, - NOT_EXECUTABLE, - semi_space); - chunk->set_next_chunk(NULL); - chunk->set_prev_chunk(NULL); - chunk->initialize_scan_on_scavenge(true); - bool in_to_space = (semi_space->id() != kFromSpace); - chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE - : MemoryChunk::IN_FROM_SPACE); - ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE - : MemoryChunk::IN_TO_SPACE)); - NewSpacePage* page = static_cast<NewSpacePage*>(chunk); - heap->incremental_marking()->SetNewSpacePageFlags(page); - return page; -} - - -void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { - set_owner(semi_space); - set_next_chunk(this); - set_prev_chunk(this); - // Flags marks this invalid page as not being in new-space. - // All real new-space pages will be in new-space. - SetFlags(0, ~0); -} - - -MemoryChunk* MemoryChunk::Initialize(Heap* heap, - Address base, - size_t size, - Address area_start, - Address area_end, - Executability executable, - Space* owner) { - MemoryChunk* chunk = FromAddress(base); - - ASSERT(base == chunk->address()); - - chunk->heap_ = heap; - chunk->size_ = size; - chunk->area_start_ = area_start; - chunk->area_end_ = area_end; - chunk->flags_ = 0; - chunk->set_owner(owner); - chunk->InitializeReservedMemory(); - chunk->slots_buffer_ = NULL; - chunk->skip_list_ = NULL; - chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; - chunk->progress_bar_ = 0; - chunk->high_water_mark_ = static_cast<int>(area_start - base); - chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE); - chunk->available_in_small_free_list_ = 0; - chunk->available_in_medium_free_list_ = 0; - chunk->available_in_large_free_list_ = 0; - chunk->available_in_huge_free_list_ = 0; - chunk->non_available_small_blocks_ = 0; - chunk->ResetLiveBytes(); - Bitmap::Clear(chunk); - chunk->initialize_scan_on_scavenge(false); - chunk->SetFlag(WAS_SWEPT_PRECISELY); - - ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); - ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); - - if (executable == EXECUTABLE) { - chunk->SetFlag(IS_EXECUTABLE); - } - - if (owner == heap->old_data_space()) { - chunk->SetFlag(CONTAINS_ONLY_DATA); - } - - return chunk; -} - - -// Commit MemoryChunk area to the requested size. -bool MemoryChunk::CommitArea(size_t requested) { - size_t guard_size = IsFlagSet(IS_EXECUTABLE) ? - MemoryAllocator::CodePageGuardSize() : 0; - size_t header_size = area_start() - address() - guard_size; - size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize()); - size_t committed_size = RoundUp(header_size + (area_end() - area_start()), - OS::CommitPageSize()); - - if (commit_size > committed_size) { - // Commit size should be less or equal than the reserved size. - ASSERT(commit_size <= size() - 2 * guard_size); - // Append the committed area. - Address start = address() + committed_size + guard_size; - size_t length = commit_size - committed_size; - if (reservation_.IsReserved()) { - Executability executable = IsFlagSet(IS_EXECUTABLE) - ? EXECUTABLE : NOT_EXECUTABLE; - if (!heap()->isolate()->memory_allocator()->CommitMemory( - start, length, executable)) { - return false; - } - } else { - CodeRange* code_range = heap_->isolate()->code_range(); - ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE)); - if (!code_range->CommitRawMemory(start, length)) return false; - } - - if (Heap::ShouldZapGarbage()) { - heap_->isolate()->memory_allocator()->ZapBlock(start, length); - } - } else if (commit_size < committed_size) { - ASSERT(commit_size > 0); - // Shrink the committed area. - size_t length = committed_size - commit_size; - Address start = address() + committed_size + guard_size - length; - if (reservation_.IsReserved()) { - if (!reservation_.Uncommit(start, length)) return false; - } else { - CodeRange* code_range = heap_->isolate()->code_range(); - ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE)); - if (!code_range->UncommitRawMemory(start, length)) return false; - } - } - - area_end_ = area_start_ + requested; - return true; -} - - -void MemoryChunk::InsertAfter(MemoryChunk* other) { - MemoryChunk* other_next = other->next_chunk(); - - set_next_chunk(other_next); - set_prev_chunk(other); - other_next->set_prev_chunk(this); - other->set_next_chunk(this); -} - - -void MemoryChunk::Unlink() { - if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { - heap_->decrement_scan_on_scavenge_pages(); - ClearFlag(SCAN_ON_SCAVENGE); - } - MemoryChunk* next_element = next_chunk(); - MemoryChunk* prev_element = prev_chunk(); - next_element->set_prev_chunk(prev_element); - prev_element->set_next_chunk(next_element); - set_prev_chunk(NULL); - set_next_chunk(NULL); -} - - -MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, - intptr_t commit_area_size, - Executability executable, - Space* owner) { - ASSERT(commit_area_size <= reserve_area_size); - - size_t chunk_size; - Heap* heap = isolate_->heap(); - Address base = NULL; - VirtualMemory reservation; - Address area_start = NULL; - Address area_end = NULL; - - // - // MemoryChunk layout: - // - // Executable - // +----------------------------+<- base aligned with MemoryChunk::kAlignment - // | Header | - // +----------------------------+<- base + CodePageGuardStartOffset - // | Guard | - // +----------------------------+<- area_start_ - // | Area | - // +----------------------------+<- area_end_ (area_start + commit_area_size) - // | Committed but not used | - // +----------------------------+<- aligned at OS page boundary - // | Reserved but not committed | - // +----------------------------+<- aligned at OS page boundary - // | Guard | - // +----------------------------+<- base + chunk_size - // - // Non-executable - // +----------------------------+<- base aligned with MemoryChunk::kAlignment - // | Header | - // +----------------------------+<- area_start_ (base + kObjectStartOffset) - // | Area | - // +----------------------------+<- area_end_ (area_start + commit_area_size) - // | Committed but not used | - // +----------------------------+<- aligned at OS page boundary - // | Reserved but not committed | - // +----------------------------+<- base + chunk_size - // - - if (executable == EXECUTABLE) { - chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, - OS::CommitPageSize()) + CodePageGuardSize(); - - // Check executable memory limit. - if (size_executable_ + chunk_size > capacity_executable_) { - LOG(isolate_, - StringEvent("MemoryAllocator::AllocateRawMemory", - "V8 Executable Allocation capacity exceeded")); - return NULL; - } - - // Size of header (not executable) plus area (executable). - size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, - OS::CommitPageSize()); - // Allocate executable memory either from code range or from the - // OS. - if (isolate_->code_range()->exists()) { - base = isolate_->code_range()->AllocateRawMemory(chunk_size, - commit_size, - &chunk_size); - ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), - MemoryChunk::kAlignment)); - if (base == NULL) return NULL; - size_ += chunk_size; - // Update executable memory size. - size_executable_ += chunk_size; - } else { - base = AllocateAlignedMemory(chunk_size, - commit_size, - MemoryChunk::kAlignment, - executable, - &reservation); - if (base == NULL) return NULL; - // Update executable memory size. - size_executable_ += reservation.size(); - } - - if (Heap::ShouldZapGarbage()) { - ZapBlock(base, CodePageGuardStartOffset()); - ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); - } - - area_start = base + CodePageAreaStartOffset(); - area_end = area_start + commit_area_size; - } else { - chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, - OS::CommitPageSize()); - size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset + - commit_area_size, OS::CommitPageSize()); - base = AllocateAlignedMemory(chunk_size, - commit_size, - MemoryChunk::kAlignment, - executable, - &reservation); - - if (base == NULL) return NULL; - - if (Heap::ShouldZapGarbage()) { - ZapBlock(base, Page::kObjectStartOffset + commit_area_size); - } - - area_start = base + Page::kObjectStartOffset; - area_end = area_start + commit_area_size; - } - - // Use chunk_size for statistics and callbacks because we assume that they - // treat reserved but not-yet committed memory regions of chunks as allocated. - isolate_->counters()->memory_allocated()-> - Increment(static_cast<int>(chunk_size)); - - LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); - if (owner != NULL) { - ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); - PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); - } - - MemoryChunk* result = MemoryChunk::Initialize(heap, - base, - chunk_size, - area_start, - area_end, - executable, - owner); - result->set_reserved_memory(&reservation); - MSAN_MEMORY_IS_INITIALIZED(base, chunk_size); - return result; -} - - -void Page::ResetFreeListStatistics() { - non_available_small_blocks_ = 0; - available_in_small_free_list_ = 0; - available_in_medium_free_list_ = 0; - available_in_large_free_list_ = 0; - available_in_huge_free_list_ = 0; -} - - -Page* MemoryAllocator::AllocatePage(intptr_t size, - PagedSpace* owner, - Executability executable) { - MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); - - if (chunk == NULL) return NULL; - - return Page::Initialize(isolate_->heap(), chunk, executable, owner); -} - - -LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, - Space* owner, - Executability executable) { - MemoryChunk* chunk = AllocateChunk(object_size, - object_size, - executable, - owner); - if (chunk == NULL) return NULL; - return LargePage::Initialize(isolate_->heap(), chunk); -} - - -void MemoryAllocator::Free(MemoryChunk* chunk) { - LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); - if (chunk->owner() != NULL) { - ObjectSpace space = - static_cast<ObjectSpace>(1 << chunk->owner()->identity()); - PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); - } - - isolate_->heap()->RememberUnmappedPage( - reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate()); - - delete chunk->slots_buffer(); - delete chunk->skip_list(); - - VirtualMemory* reservation = chunk->reserved_memory(); - if (reservation->IsReserved()) { - FreeMemory(reservation, chunk->executable()); - } else { - FreeMemory(chunk->address(), - chunk->size(), - chunk->executable()); - } -} - - -bool MemoryAllocator::CommitBlock(Address start, - size_t size, - Executability executable) { - if (!CommitMemory(start, size, executable)) return false; - - if (Heap::ShouldZapGarbage()) { - ZapBlock(start, size); - } - - isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); - return true; -} - - -bool MemoryAllocator::UncommitBlock(Address start, size_t size) { - if (!VirtualMemory::UncommitRegion(start, size)) return false; - isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); - return true; -} - - -void MemoryAllocator::ZapBlock(Address start, size_t size) { - for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { - Memory::Address_at(start + s) = kZapValue; - } -} - - -void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, - AllocationAction action, - size_t size) { - for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { - MemoryAllocationCallbackRegistration registration = - memory_allocation_callbacks_[i]; - if ((registration.space & space) == space && - (registration.action & action) == action) - registration.callback(space, action, static_cast<int>(size)); - } -} - - -bool MemoryAllocator::MemoryAllocationCallbackRegistered( - MemoryAllocationCallback callback) { - for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { - if (memory_allocation_callbacks_[i].callback == callback) return true; - } - return false; -} - - -void MemoryAllocator::AddMemoryAllocationCallback( - MemoryAllocationCallback callback, - ObjectSpace space, - AllocationAction action) { - ASSERT(callback != NULL); - MemoryAllocationCallbackRegistration registration(callback, space, action); - ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); - return memory_allocation_callbacks_.Add(registration); -} - - -void MemoryAllocator::RemoveMemoryAllocationCallback( - MemoryAllocationCallback callback) { - ASSERT(callback != NULL); - for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { - if (memory_allocation_callbacks_[i].callback == callback) { - memory_allocation_callbacks_.Remove(i); - return; - } - } - UNREACHABLE(); -} - - -#ifdef DEBUG -void MemoryAllocator::ReportStatistics() { - float pct = static_cast<float>(capacity_ - size_) / capacity_; - PrintF(" capacity: %" V8_PTR_PREFIX "d" - ", used: %" V8_PTR_PREFIX "d" - ", available: %%%d\n\n", - capacity_, size_, static_cast<int>(pct*100)); -} -#endif - - -int MemoryAllocator::CodePageGuardStartOffset() { - // We are guarding code pages: the first OS page after the header - // will be protected as non-writable. - return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize()); -} - - -int MemoryAllocator::CodePageGuardSize() { - return static_cast<int>(OS::CommitPageSize()); -} - - -int MemoryAllocator::CodePageAreaStartOffset() { - // We are guarding code pages: the first OS page after the header - // will be protected as non-writable. - return CodePageGuardStartOffset() + CodePageGuardSize(); -} - - -int MemoryAllocator::CodePageAreaEndOffset() { - // We are guarding code pages: the last OS page will be protected as - // non-writable. - return Page::kPageSize - static_cast<int>(OS::CommitPageSize()); -} - - -bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, - Address start, - size_t commit_size, - size_t reserved_size) { - // Commit page header (not executable). - if (!vm->Commit(start, - CodePageGuardStartOffset(), - false)) { - return false; - } - - // Create guard page after the header. - if (!vm->Guard(start + CodePageGuardStartOffset())) { - return false; - } - - // Commit page body (executable). - if (!vm->Commit(start + CodePageAreaStartOffset(), - commit_size - CodePageGuardStartOffset(), - true)) { - return false; - } - - // Create guard page before the end. - if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { - return false; - } - - UpdateAllocatedSpaceLimits(start, - start + CodePageAreaStartOffset() + - commit_size - CodePageGuardStartOffset()); - return true; -} - - -// ----------------------------------------------------------------------------- -// MemoryChunk implementation - -void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { - MemoryChunk* chunk = MemoryChunk::FromAddress(address); - if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { - static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); - } - chunk->IncrementLiveBytes(by); -} - - -// ----------------------------------------------------------------------------- -// PagedSpace implementation - -PagedSpace::PagedSpace(Heap* heap, - intptr_t max_capacity, - AllocationSpace id, - Executability executable) - : Space(heap, id, executable), - free_list_(this), - was_swept_conservatively_(false), - first_unswept_page_(Page::FromAddress(NULL)), - unswept_free_bytes_(0) { - if (id == CODE_SPACE) { - area_size_ = heap->isolate()->memory_allocator()-> - CodePageAreaSize(); - } else { - area_size_ = Page::kPageSize - Page::kObjectStartOffset; - } - max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) - * AreaSize(); - accounting_stats_.Clear(); - - allocation_info_.set_top(NULL); - allocation_info_.set_limit(NULL); - - anchor_.InitializeAsAnchor(this); -} - - -bool PagedSpace::SetUp() { - return true; -} - - -bool PagedSpace::HasBeenSetUp() { - return true; -} - - -void PagedSpace::TearDown() { - PageIterator iterator(this); - while (iterator.has_next()) { - heap()->isolate()->memory_allocator()->Free(iterator.next()); - } - anchor_.set_next_page(&anchor_); - anchor_.set_prev_page(&anchor_); - accounting_stats_.Clear(); -} - - -size_t PagedSpace::CommittedPhysicalMemory() { - if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); - MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); - size_t size = 0; - PageIterator it(this); - while (it.has_next()) { - size += it.next()->CommittedPhysicalMemory(); - } - return size; -} - - -MaybeObject* PagedSpace::FindObject(Address addr) { - // Note: this function can only be called on precisely swept spaces. - ASSERT(!heap()->mark_compact_collector()->in_use()); - - if (!Contains(addr)) return Failure::Exception(); - - Page* p = Page::FromAddress(addr); - HeapObjectIterator it(p, NULL); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { - Address cur = obj->address(); - Address next = cur + obj->Size(); - if ((cur <= addr) && (addr < next)) return obj; - } - - UNREACHABLE(); - return Failure::Exception(); -} - - -bool PagedSpace::CanExpand() { - ASSERT(max_capacity_ % AreaSize() == 0); - - if (Capacity() == max_capacity_) return false; - - ASSERT(Capacity() < max_capacity_); - - // Are we going to exceed capacity for this space? - if ((Capacity() + Page::kPageSize) > max_capacity_) return false; - - return true; -} - - -bool PagedSpace::Expand() { - if (!CanExpand()) return false; - - intptr_t size = AreaSize(); - - if (anchor_.next_page() == &anchor_) { - size = SizeOfFirstPage(); - } - - Page* p = heap()->isolate()->memory_allocator()->AllocatePage( - size, this, executable()); - if (p == NULL) return false; - - ASSERT(Capacity() <= max_capacity_); - - p->InsertAfter(anchor_.prev_page()); - - return true; -} - - -intptr_t PagedSpace::SizeOfFirstPage() { - int size = 0; - switch (identity()) { - case OLD_POINTER_SPACE: - size = 72 * kPointerSize * KB; - break; - case OLD_DATA_SPACE: - size = 192 * KB; - break; - case MAP_SPACE: - size = 16 * kPointerSize * KB; - break; - case CELL_SPACE: - size = 16 * kPointerSize * KB; - break; - case PROPERTY_CELL_SPACE: - size = 8 * kPointerSize * KB; - break; - case CODE_SPACE: - if (heap()->isolate()->code_range()->exists()) { - // When code range exists, code pages are allocated in a special way - // (from the reserved code range). That part of the code is not yet - // upgraded to handle small pages. - size = AreaSize(); - } else { - size = 480 * KB; - } - break; - default: - UNREACHABLE(); - } - return Min(size, AreaSize()); -} - - -int PagedSpace::CountTotalPages() { - PageIterator it(this); - int count = 0; - while (it.has_next()) { - it.next(); - count++; - } - return count; -} - - -void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) { - sizes->huge_size_ = page->available_in_huge_free_list(); - sizes->small_size_ = page->available_in_small_free_list(); - sizes->medium_size_ = page->available_in_medium_free_list(); - sizes->large_size_ = page->available_in_large_free_list(); -} - - -void PagedSpace::ResetFreeListStatistics() { - PageIterator page_iterator(this); - while (page_iterator.has_next()) { - Page* page = page_iterator.next(); - page->ResetFreeListStatistics(); - } -} - - -void PagedSpace::IncreaseCapacity(int size) { - accounting_stats_.ExpandSpace(size); -} - - -void PagedSpace::ReleasePage(Page* page, bool unlink) { - ASSERT(page->LiveBytes() == 0); - ASSERT(AreaSize() == page->area_size()); - - // Adjust list of unswept pages if the page is the head of the list. - if (first_unswept_page_ == page) { - first_unswept_page_ = page->next_page(); - if (first_unswept_page_ == anchor()) { - first_unswept_page_ = Page::FromAddress(NULL); - } - } - - if (page->WasSwept()) { - intptr_t size = free_list_.EvictFreeListItems(page); - accounting_stats_.AllocateBytes(size); - ASSERT_EQ(AreaSize(), static_cast<int>(size)); - } else { - DecreaseUnsweptFreeBytes(page); - } - - // TODO(hpayer): This check is just used for debugging purpose and - // should be removed or turned into an assert after investigating the - // crash in concurrent sweeping. - CHECK(!free_list_.ContainsPageFreeListItems(page)); - - if (Page::FromAllocationTop(allocation_info_.top()) == page) { - allocation_info_.set_top(NULL); - allocation_info_.set_limit(NULL); - } - - if (unlink) { - page->Unlink(); - } - if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { - heap()->isolate()->memory_allocator()->Free(page); - } else { - heap()->QueueMemoryChunkForFree(page); - } - - ASSERT(Capacity() > 0); - accounting_stats_.ShrinkSpace(AreaSize()); -} - - -#ifdef DEBUG -void PagedSpace::Print() { } -#endif - -#ifdef VERIFY_HEAP -void PagedSpace::Verify(ObjectVisitor* visitor) { - // We can only iterate over the pages if they were swept precisely. - if (was_swept_conservatively_) return; - - bool allocation_pointer_found_in_space = - (allocation_info_.top() == allocation_info_.limit()); - PageIterator page_iterator(this); - while (page_iterator.has_next()) { - Page* page = page_iterator.next(); - CHECK(page->owner() == this); - if (page == Page::FromAllocationTop(allocation_info_.top())) { - allocation_pointer_found_in_space = true; - } - CHECK(page->WasSweptPrecisely()); - HeapObjectIterator it(page, NULL); - Address end_of_previous_object = page->area_start(); - Address top = page->area_end(); - int black_size = 0; - for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { - CHECK(end_of_previous_object <= object->address()); - - // The first word should be a map, and we expect all map pointers to - // be in map space. - Map* map = object->map(); - CHECK(map->IsMap()); - CHECK(heap()->map_space()->Contains(map)); - - // Perform space-specific object verification. - VerifyObject(object); - - // The object itself should look OK. - object->Verify(); - - // All the interior pointers should be contained in the heap. - int size = object->Size(); - object->IterateBody(map->instance_type(), size, visitor); - if (Marking::IsBlack(Marking::MarkBitFrom(object))) { - black_size += size; - } - - CHECK(object->address() + size <= top); - end_of_previous_object = object->address() + size; - } - CHECK_LE(black_size, page->LiveBytes()); - } - CHECK(allocation_pointer_found_in_space); -} -#endif // VERIFY_HEAP - -// ----------------------------------------------------------------------------- -// NewSpace implementation - - -bool NewSpace::SetUp(int reserved_semispace_capacity, - int maximum_semispace_capacity) { - // Set up new space based on the preallocated memory block defined by - // start and size. The provided space is divided into two semi-spaces. - // To support fast containment testing in the new space, the size of - // this chunk must be a power of two and it must be aligned to its size. - int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); - - size_t size = 2 * reserved_semispace_capacity; - Address base = - heap()->isolate()->memory_allocator()->ReserveAlignedMemory( - size, size, &reservation_); - if (base == NULL) return false; - - chunk_base_ = base; - chunk_size_ = static_cast<uintptr_t>(size); - LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); - - ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); - ASSERT(IsPowerOf2(maximum_semispace_capacity)); - - // Allocate and set up the histogram arrays if necessary. - allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); - promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); - -#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ - promoted_histogram_[name].set_name(#name); - INSTANCE_TYPE_LIST(SET_NAME) -#undef SET_NAME - - ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); - ASSERT(static_cast<intptr_t>(chunk_size_) >= - 2 * heap()->ReservedSemiSpaceSize()); - ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); - - to_space_.SetUp(chunk_base_, - initial_semispace_capacity, - maximum_semispace_capacity); - from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, - initial_semispace_capacity, - maximum_semispace_capacity); - if (!to_space_.Commit()) { - return false; - } - ASSERT(!from_space_.is_committed()); // No need to use memory yet. - - start_ = chunk_base_; - address_mask_ = ~(2 * reserved_semispace_capacity - 1); - object_mask_ = address_mask_ | kHeapObjectTagMask; - object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; - - ResetAllocationInfo(); - - return true; -} - - -void NewSpace::TearDown() { - if (allocated_histogram_) { - DeleteArray(allocated_histogram_); - allocated_histogram_ = NULL; - } - if (promoted_histogram_) { - DeleteArray(promoted_histogram_); - promoted_histogram_ = NULL; - } - - start_ = NULL; - allocation_info_.set_top(NULL); - allocation_info_.set_limit(NULL); - - to_space_.TearDown(); - from_space_.TearDown(); - - LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); - - ASSERT(reservation_.IsReserved()); - heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, - NOT_EXECUTABLE); - chunk_base_ = NULL; - chunk_size_ = 0; -} - - -void NewSpace::Flip() { - SemiSpace::Swap(&from_space_, &to_space_); -} - - -void NewSpace::Grow() { - // Double the semispace size but only up to maximum capacity. - ASSERT(Capacity() < MaximumCapacity()); - int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); - if (to_space_.GrowTo(new_capacity)) { - // Only grow from space if we managed to grow to-space. - if (!from_space_.GrowTo(new_capacity)) { - // If we managed to grow to-space but couldn't grow from-space, - // attempt to shrink to-space. - if (!to_space_.ShrinkTo(from_space_.Capacity())) { - // We are in an inconsistent state because we could not - // commit/uncommit memory from new space. - V8::FatalProcessOutOfMemory("Failed to grow new space."); - } - } - } - ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); -} - - -void NewSpace::Shrink() { - int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); - int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); - if (rounded_new_capacity < Capacity() && - to_space_.ShrinkTo(rounded_new_capacity)) { - // Only shrink from-space if we managed to shrink to-space. - from_space_.Reset(); - if (!from_space_.ShrinkTo(rounded_new_capacity)) { - // If we managed to shrink to-space but couldn't shrink from - // space, attempt to grow to-space again. - if (!to_space_.GrowTo(from_space_.Capacity())) { - // We are in an inconsistent state because we could not - // commit/uncommit memory from new space. - V8::FatalProcessOutOfMemory("Failed to shrink new space."); - } - } - } - ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); -} - - -void NewSpace::UpdateAllocationInfo() { - MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); - allocation_info_.set_top(to_space_.page_low()); - allocation_info_.set_limit(to_space_.page_high()); - UpdateInlineAllocationLimit(0); - ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); -} - - -void NewSpace::ResetAllocationInfo() { - to_space_.Reset(); - UpdateAllocationInfo(); - pages_used_ = 0; - // Clear all mark-bits in the to-space. - NewSpacePageIterator it(&to_space_); - while (it.has_next()) { - Bitmap::Clear(it.next()); - } -} - - -void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { - if (heap()->inline_allocation_disabled()) { - // Lowest limit when linear allocation was disabled. - Address high = to_space_.page_high(); - Address new_top = allocation_info_.top() + size_in_bytes; - allocation_info_.set_limit(Min(new_top, high)); - } else if (inline_allocation_limit_step() == 0) { - // Normal limit is the end of the current page. - allocation_info_.set_limit(to_space_.page_high()); - } else { - // Lower limit during incremental marking. - Address high = to_space_.page_high(); - Address new_top = allocation_info_.top() + size_in_bytes; - Address new_limit = new_top + inline_allocation_limit_step_; - allocation_info_.set_limit(Min(new_limit, high)); - } - ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); -} - - -bool NewSpace::AddFreshPage() { - Address top = allocation_info_.top(); - if (NewSpacePage::IsAtStart(top)) { - // The current page is already empty. Don't try to make another. - - // We should only get here if someone asks to allocate more - // than what can be stored in a single page. - // TODO(gc): Change the limit on new-space allocation to prevent this - // from happening (all such allocations should go directly to LOSpace). - return false; - } - if (!to_space_.AdvancePage()) { - // Failed to get a new page in to-space. - return false; - } - - // Clear remainder of current page. - Address limit = NewSpacePage::FromLimit(top)->area_end(); - if (heap()->gc_state() == Heap::SCAVENGE) { - heap()->promotion_queue()->SetNewLimit(limit); - heap()->promotion_queue()->ActivateGuardIfOnTheSamePage(); - } - - int remaining_in_page = static_cast<int>(limit - top); - heap()->CreateFillerObjectAt(top, remaining_in_page); - pages_used_++; - UpdateAllocationInfo(); - - return true; -} - - -MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) { - Address old_top = allocation_info_.top(); - Address high = to_space_.page_high(); - if (allocation_info_.limit() < high) { - // Either the limit has been lowered because linear allocation was disabled - // or because incremental marking wants to get a chance to do a step. Set - // the new limit accordingly. - Address new_top = old_top + size_in_bytes; - int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); - heap()->incremental_marking()->Step( - bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); - UpdateInlineAllocationLimit(size_in_bytes); - top_on_previous_step_ = new_top; - return AllocateRaw(size_in_bytes); - } else if (AddFreshPage()) { - // Switched to new page. Try allocating again. - int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); - heap()->incremental_marking()->Step( - bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); - top_on_previous_step_ = to_space_.page_low(); - return AllocateRaw(size_in_bytes); - } else { - return Failure::RetryAfterGC(); - } -} - - -#ifdef VERIFY_HEAP -// We do not use the SemiSpaceIterator because verification doesn't assume -// that it works (it depends on the invariants we are checking). -void NewSpace::Verify() { - // The allocation pointer should be in the space or at the very end. - ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); - - // There should be objects packed in from the low address up to the - // allocation pointer. - Address current = to_space_.first_page()->area_start(); - CHECK_EQ(current, to_space_.space_start()); - - while (current != top()) { - if (!NewSpacePage::IsAtEnd(current)) { - // The allocation pointer should not be in the middle of an object. - CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || - current < top()); - - HeapObject* object = HeapObject::FromAddress(current); - - // The first word should be a map, and we expect all map pointers to - // be in map space. - Map* map = object->map(); - CHECK(map->IsMap()); - CHECK(heap()->map_space()->Contains(map)); - - // The object should not be code or a map. - CHECK(!object->IsMap()); - CHECK(!object->IsCode()); - - // The object itself should look OK. - object->Verify(); - - // All the interior pointers should be contained in the heap. - VerifyPointersVisitor visitor; - int size = object->Size(); - object->IterateBody(map->instance_type(), size, &visitor); - - current += size; - } else { - // At end of page, switch to next page. - NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); - // Next page should be valid. - CHECK(!page->is_anchor()); - current = page->area_start(); - } - } - - // Check semi-spaces. - CHECK_EQ(from_space_.id(), kFromSpace); - CHECK_EQ(to_space_.id(), kToSpace); - from_space_.Verify(); - to_space_.Verify(); -} -#endif - -// ----------------------------------------------------------------------------- -// SemiSpace implementation - -void SemiSpace::SetUp(Address start, - int initial_capacity, - int maximum_capacity) { - // Creates a space in the young generation. The constructor does not - // allocate memory from the OS. A SemiSpace is given a contiguous chunk of - // memory of size 'capacity' when set up, and does not grow or shrink - // otherwise. In the mark-compact collector, the memory region of the from - // space is used as the marking stack. It requires contiguous memory - // addresses. - ASSERT(maximum_capacity >= Page::kPageSize); - initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); - capacity_ = initial_capacity; - maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); - maximum_committed_ = 0; - committed_ = false; - start_ = start; - address_mask_ = ~(maximum_capacity - 1); - object_mask_ = address_mask_ | kHeapObjectTagMask; - object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; - age_mark_ = start_; -} - - -void SemiSpace::TearDown() { - start_ = NULL; - capacity_ = 0; -} - - -bool SemiSpace::Commit() { - ASSERT(!is_committed()); - int pages = capacity_ / Page::kPageSize; - if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, - capacity_, - executable())) { - return false; - } - - NewSpacePage* current = anchor(); - for (int i = 0; i < pages; i++) { - NewSpacePage* new_page = - NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); - new_page->InsertAfter(current); - current = new_page; - } - - SetCapacity(capacity_); - committed_ = true; - Reset(); - return true; -} - - -bool SemiSpace::Uncommit() { - ASSERT(is_committed()); - Address start = start_ + maximum_capacity_ - capacity_; - if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { - return false; - } - anchor()->set_next_page(anchor()); - anchor()->set_prev_page(anchor()); - - committed_ = false; - return true; -} - - -size_t SemiSpace::CommittedPhysicalMemory() { - if (!is_committed()) return 0; - size_t size = 0; - NewSpacePageIterator it(this); - while (it.has_next()) { - size += it.next()->CommittedPhysicalMemory(); - } - return size; -} - - -bool SemiSpace::GrowTo(int new_capacity) { - if (!is_committed()) { - if (!Commit()) return false; - } - ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); - ASSERT(new_capacity <= maximum_capacity_); - ASSERT(new_capacity > capacity_); - int pages_before = capacity_ / Page::kPageSize; - int pages_after = new_capacity / Page::kPageSize; - - size_t delta = new_capacity - capacity_; - - ASSERT(IsAligned(delta, OS::AllocateAlignment())); - if (!heap()->isolate()->memory_allocator()->CommitBlock( - start_ + capacity_, delta, executable())) { - return false; - } - SetCapacity(new_capacity); - NewSpacePage* last_page = anchor()->prev_page(); - ASSERT(last_page != anchor()); - for (int i = pages_before; i < pages_after; i++) { - Address page_address = start_ + i * Page::kPageSize; - NewSpacePage* new_page = NewSpacePage::Initialize(heap(), - page_address, - this); - new_page->InsertAfter(last_page); - Bitmap::Clear(new_page); - // Duplicate the flags that was set on the old page. - new_page->SetFlags(last_page->GetFlags(), - NewSpacePage::kCopyOnFlipFlagsMask); - last_page = new_page; - } - return true; -} - - -bool SemiSpace::ShrinkTo(int new_capacity) { - ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); - ASSERT(new_capacity >= initial_capacity_); - ASSERT(new_capacity < capacity_); - if (is_committed()) { - size_t delta = capacity_ - new_capacity; - ASSERT(IsAligned(delta, OS::AllocateAlignment())); - - MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); - if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { - return false; - } - - int pages_after = new_capacity / Page::kPageSize; - NewSpacePage* new_last_page = - NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); - new_last_page->set_next_page(anchor()); - anchor()->set_prev_page(new_last_page); - ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page)); - } - - SetCapacity(new_capacity); - - return true; -} - - -void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { - anchor_.set_owner(this); - // Fixup back-pointers to anchor. Address of anchor changes - // when we swap. - anchor_.prev_page()->set_next_page(&anchor_); - anchor_.next_page()->set_prev_page(&anchor_); - - bool becomes_to_space = (id_ == kFromSpace); - id_ = becomes_to_space ? kToSpace : kFromSpace; - NewSpacePage* page = anchor_.next_page(); - while (page != &anchor_) { - page->set_owner(this); - page->SetFlags(flags, mask); - if (becomes_to_space) { - page->ClearFlag(MemoryChunk::IN_FROM_SPACE); - page->SetFlag(MemoryChunk::IN_TO_SPACE); - page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); - page->ResetLiveBytes(); - } else { - page->SetFlag(MemoryChunk::IN_FROM_SPACE); - page->ClearFlag(MemoryChunk::IN_TO_SPACE); - } - ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); - ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) || - page->IsFlagSet(MemoryChunk::IN_FROM_SPACE)); - page = page->next_page(); - } -} - - -void SemiSpace::Reset() { - ASSERT(anchor_.next_page() != &anchor_); - current_page_ = anchor_.next_page(); -} - - -void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { - // We won't be swapping semispaces without data in them. - ASSERT(from->anchor_.next_page() != &from->anchor_); - ASSERT(to->anchor_.next_page() != &to->anchor_); - - // Swap bits. - SemiSpace tmp = *from; - *from = *to; - *to = tmp; - - // Fixup back-pointers to the page list anchor now that its address - // has changed. - // Swap to/from-space bits on pages. - // Copy GC flags from old active space (from-space) to new (to-space). - intptr_t flags = from->current_page()->GetFlags(); - to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); - - from->FlipPages(0, 0); -} - - -void SemiSpace::SetCapacity(int new_capacity) { - capacity_ = new_capacity; - if (capacity_ > maximum_committed_) { - maximum_committed_ = capacity_; - } -} - - -void SemiSpace::set_age_mark(Address mark) { - ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this); - age_mark_ = mark; - // Mark all pages up to the one containing mark. - NewSpacePageIterator it(space_start(), mark); - while (it.has_next()) { - it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); - } -} - - -#ifdef DEBUG -void SemiSpace::Print() { } -#endif - -#ifdef VERIFY_HEAP -void SemiSpace::Verify() { - bool is_from_space = (id_ == kFromSpace); - NewSpacePage* page = anchor_.next_page(); - CHECK(anchor_.semi_space() == this); - while (page != &anchor_) { - CHECK(page->semi_space() == this); - CHECK(page->InNewSpace()); - CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE - : MemoryChunk::IN_TO_SPACE)); - CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE - : MemoryChunk::IN_FROM_SPACE)); - CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING)); - if (!is_from_space) { - // The pointers-from-here-are-interesting flag isn't updated dynamically - // on from-space pages, so it might be out of sync with the marking state. - if (page->heap()->incremental_marking()->IsMarking()) { - CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); - } else { - CHECK(!page->IsFlagSet( - MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); - } - // TODO(gc): Check that the live_bytes_count_ field matches the - // black marking on the page (if we make it match in new-space). - } - CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); - CHECK(page->prev_page()->next_page() == page); - page = page->next_page(); - } -} -#endif - -#ifdef DEBUG -void SemiSpace::AssertValidRange(Address start, Address end) { - // Addresses belong to same semi-space - NewSpacePage* page = NewSpacePage::FromLimit(start); - NewSpacePage* end_page = NewSpacePage::FromLimit(end); - SemiSpace* space = page->semi_space(); - CHECK_EQ(space, end_page->semi_space()); - // Start address is before end address, either on same page, - // or end address is on a later page in the linked list of - // semi-space pages. - if (page == end_page) { - CHECK(start <= end); - } else { - while (page != end_page) { - page = page->next_page(); - CHECK_NE(page, space->anchor()); - } - } -} -#endif - - -// ----------------------------------------------------------------------------- -// SemiSpaceIterator implementation. -SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) { - Initialize(space->bottom(), space->top(), NULL); -} - - -SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, - HeapObjectCallback size_func) { - Initialize(space->bottom(), space->top(), size_func); -} - - -SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { - Initialize(start, space->top(), NULL); -} - - -SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) { - Initialize(from, to, NULL); -} - - -void SemiSpaceIterator::Initialize(Address start, - Address end, - HeapObjectCallback size_func) { - SemiSpace::AssertValidRange(start, end); - current_ = start; - limit_ = end; - size_func_ = size_func; -} - - -#ifdef DEBUG -// heap_histograms is shared, always clear it before using it. -static void ClearHistograms(Isolate* isolate) { - // We reset the name each time, though it hasn't changed. -#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name); - INSTANCE_TYPE_LIST(DEF_TYPE_NAME) -#undef DEF_TYPE_NAME - -#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear(); - INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) -#undef CLEAR_HISTOGRAM - - isolate->js_spill_information()->Clear(); -} - - -static void ClearCodeKindStatistics(int* code_kind_statistics) { - for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { - code_kind_statistics[i] = 0; - } -} - - -static void ReportCodeKindStatistics(int* code_kind_statistics) { - PrintF("\n Code kind histograms: \n"); - for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { - if (code_kind_statistics[i] > 0) { - PrintF(" %-20s: %10d bytes\n", - Code::Kind2String(static_cast<Code::Kind>(i)), - code_kind_statistics[i]); - } - } - PrintF("\n"); -} - - -static int CollectHistogramInfo(HeapObject* obj) { - Isolate* isolate = obj->GetIsolate(); - InstanceType type = obj->map()->instance_type(); - ASSERT(0 <= type && type <= LAST_TYPE); - ASSERT(isolate->heap_histograms()[type].name() != NULL); - isolate->heap_histograms()[type].increment_number(1); - isolate->heap_histograms()[type].increment_bytes(obj->Size()); - - if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { - JSObject::cast(obj)->IncrementSpillStatistics( - isolate->js_spill_information()); - } - - return obj->Size(); -} - - -static void ReportHistogram(Isolate* isolate, bool print_spill) { - PrintF("\n Object Histogram:\n"); - for (int i = 0; i <= LAST_TYPE; i++) { - if (isolate->heap_histograms()[i].number() > 0) { - PrintF(" %-34s%10d (%10d bytes)\n", - isolate->heap_histograms()[i].name(), - isolate->heap_histograms()[i].number(), - isolate->heap_histograms()[i].bytes()); - } - } - PrintF("\n"); - - // Summarize string types. - int string_number = 0; - int string_bytes = 0; -#define INCREMENT(type, size, name, camel_name) \ - string_number += isolate->heap_histograms()[type].number(); \ - string_bytes += isolate->heap_histograms()[type].bytes(); - STRING_TYPE_LIST(INCREMENT) -#undef INCREMENT - if (string_number > 0) { - PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, - string_bytes); - } - - if (FLAG_collect_heap_spill_statistics && print_spill) { - isolate->js_spill_information()->Print(); - } -} -#endif // DEBUG - - -// Support for statistics gathering for --heap-stats and --log-gc. -void NewSpace::ClearHistograms() { - for (int i = 0; i <= LAST_TYPE; i++) { - allocated_histogram_[i].clear(); - promoted_histogram_[i].clear(); - } -} - - -// Because the copying collector does not touch garbage objects, we iterate -// the new space before a collection to get a histogram of allocated objects. -// This only happens when --log-gc flag is set. -void NewSpace::CollectStatistics() { - ClearHistograms(); - SemiSpaceIterator it(this); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) - RecordAllocation(obj); -} - - -static void DoReportStatistics(Isolate* isolate, - HistogramInfo* info, const char* description) { - LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); - // Lump all the string types together. - int string_number = 0; - int string_bytes = 0; -#define INCREMENT(type, size, name, camel_name) \ - string_number += info[type].number(); \ - string_bytes += info[type].bytes(); - STRING_TYPE_LIST(INCREMENT) -#undef INCREMENT - if (string_number > 0) { - LOG(isolate, - HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); - } - - // Then do the other types. - for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { - if (info[i].number() > 0) { - LOG(isolate, - HeapSampleItemEvent(info[i].name(), info[i].number(), - info[i].bytes())); - } - } - LOG(isolate, HeapSampleEndEvent("NewSpace", description)); -} - - -void NewSpace::ReportStatistics() { -#ifdef DEBUG - if (FLAG_heap_stats) { - float pct = static_cast<float>(Available()) / Capacity(); - PrintF(" capacity: %" V8_PTR_PREFIX "d" - ", available: %" V8_PTR_PREFIX "d, %%%d\n", - Capacity(), Available(), static_cast<int>(pct*100)); - PrintF("\n Object Histogram:\n"); - for (int i = 0; i <= LAST_TYPE; i++) { - if (allocated_histogram_[i].number() > 0) { - PrintF(" %-34s%10d (%10d bytes)\n", - allocated_histogram_[i].name(), - allocated_histogram_[i].number(), - allocated_histogram_[i].bytes()); - } - } - PrintF("\n"); - } -#endif // DEBUG - - if (FLAG_log_gc) { - Isolate* isolate = heap()->isolate(); - DoReportStatistics(isolate, allocated_histogram_, "allocated"); - DoReportStatistics(isolate, promoted_histogram_, "promoted"); - } -} - - -void NewSpace::RecordAllocation(HeapObject* obj) { - InstanceType type = obj->map()->instance_type(); - ASSERT(0 <= type && type <= LAST_TYPE); - allocated_histogram_[type].increment_number(1); - allocated_histogram_[type].increment_bytes(obj->Size()); -} - - -void NewSpace::RecordPromotion(HeapObject* obj) { - InstanceType type = obj->map()->instance_type(); - ASSERT(0 <= type && type <= LAST_TYPE); - promoted_histogram_[type].increment_number(1); - promoted_histogram_[type].increment_bytes(obj->Size()); -} - - -size_t NewSpace::CommittedPhysicalMemory() { - if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); - MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); - size_t size = to_space_.CommittedPhysicalMemory(); - if (from_space_.is_committed()) { - size += from_space_.CommittedPhysicalMemory(); - } - return size; -} - - -// ----------------------------------------------------------------------------- -// Free lists for old object spaces implementation - -void FreeListNode::set_size(Heap* heap, int size_in_bytes) { - ASSERT(size_in_bytes > 0); - ASSERT(IsAligned(size_in_bytes, kPointerSize)); - - // We write a map and possibly size information to the block. If the block - // is big enough to be a FreeSpace with at least one extra word (the next - // pointer), we set its map to be the free space map and its size to an - // appropriate array length for the desired size from HeapObject::Size(). - // If the block is too small (eg, one or two words), to hold both a size - // field and a next pointer, we give it a filler map that gives it the - // correct size. - if (size_in_bytes > FreeSpace::kHeaderSize) { - set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); - // Can't use FreeSpace::cast because it fails during deserialization. - FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); - this_as_free_space->set_size(size_in_bytes); - } else if (size_in_bytes == kPointerSize) { - set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); - } else if (size_in_bytes == 2 * kPointerSize) { - set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map()); - } else { - UNREACHABLE(); - } - // We would like to ASSERT(Size() == size_in_bytes) but this would fail during - // deserialization because the free space map is not done yet. -} - - -FreeListNode* FreeListNode::next() { - ASSERT(IsFreeListNode(this)); - if (map() == GetHeap()->raw_unchecked_free_space_map()) { - ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); - return reinterpret_cast<FreeListNode*>( - Memory::Address_at(address() + kNextOffset)); - } else { - return reinterpret_cast<FreeListNode*>( - Memory::Address_at(address() + kPointerSize)); - } -} - - -FreeListNode** FreeListNode::next_address() { - ASSERT(IsFreeListNode(this)); - if (map() == GetHeap()->raw_unchecked_free_space_map()) { - ASSERT(Size() >= kNextOffset + kPointerSize); - return reinterpret_cast<FreeListNode**>(address() + kNextOffset); - } else { - return reinterpret_cast<FreeListNode**>(address() + kPointerSize); - } -} - - -void FreeListNode::set_next(FreeListNode* next) { - ASSERT(IsFreeListNode(this)); - // While we are booting the VM the free space map will actually be null. So - // we have to make sure that we don't try to use it for anything at that - // stage. - if (map() == GetHeap()->raw_unchecked_free_space_map()) { - ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); - Memory::Address_at(address() + kNextOffset) = - reinterpret_cast<Address>(next); - } else { - Memory::Address_at(address() + kPointerSize) = - reinterpret_cast<Address>(next); - } -} - - -intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { - intptr_t free_bytes = 0; - if (category->top() != NULL) { - // This is safe (not going to deadlock) since Concatenate operations - // are never performed on the same free lists at the same time in - // reverse order. - LockGuard<Mutex> target_lock_guard(mutex()); - LockGuard<Mutex> source_lock_guard(category->mutex()); - ASSERT(category->end_ != NULL); - free_bytes = category->available(); - if (end_ == NULL) { - end_ = category->end(); - } else { - category->end()->set_next(top()); - } - set_top(category->top()); - NoBarrier_Store(&top_, category->top_); - available_ += category->available(); - category->Reset(); - } - return free_bytes; -} - - -void FreeListCategory::Reset() { - set_top(NULL); - set_end(NULL); - set_available(0); -} - - -intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) { - int sum = 0; - FreeListNode* t = top(); - FreeListNode** n = &t; - while (*n != NULL) { - if (Page::FromAddress((*n)->address()) == p) { - FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); - sum += free_space->Size(); - *n = (*n)->next(); - } else { - n = (*n)->next_address(); - } - } - set_top(t); - if (top() == NULL) { - set_end(NULL); - } - available_ -= sum; - return sum; -} - - -bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) { - FreeListNode* node = top(); - while (node != NULL) { - if (Page::FromAddress(node->address()) == p) return true; - node = node->next(); - } - return false; -} - - -FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) { - FreeListNode* node = top(); - - if (node == NULL) return NULL; - - while (node != NULL && - Page::FromAddress(node->address())->IsEvacuationCandidate()) { - available_ -= reinterpret_cast<FreeSpace*>(node)->Size(); - node = node->next(); - } - - if (node != NULL) { - set_top(node->next()); - *node_size = reinterpret_cast<FreeSpace*>(node)->Size(); - available_ -= *node_size; - } else { - set_top(NULL); - } - - if (top() == NULL) { - set_end(NULL); - } - - return node; -} - - -FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes, - int *node_size) { - FreeListNode* node = PickNodeFromList(node_size); - if (node != NULL && *node_size < size_in_bytes) { - Free(node, *node_size); - *node_size = 0; - return NULL; - } - return node; -} - - -void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) { - node->set_next(top()); - set_top(node); - if (end_ == NULL) { - end_ = node; - } - available_ += size_in_bytes; -} - - -void FreeListCategory::RepairFreeList(Heap* heap) { - FreeListNode* n = top(); - while (n != NULL) { - Map** map_location = reinterpret_cast<Map**>(n->address()); - if (*map_location == NULL) { - *map_location = heap->free_space_map(); - } else { - ASSERT(*map_location == heap->free_space_map()); - } - n = n->next(); - } -} - - -FreeList::FreeList(PagedSpace* owner) - : owner_(owner), heap_(owner->heap()) { - Reset(); -} - - -intptr_t FreeList::Concatenate(FreeList* free_list) { - intptr_t free_bytes = 0; - free_bytes += small_list_.Concatenate(free_list->small_list()); - free_bytes += medium_list_.Concatenate(free_list->medium_list()); - free_bytes += large_list_.Concatenate(free_list->large_list()); - free_bytes += huge_list_.Concatenate(free_list->huge_list()); - return free_bytes; -} - - -void FreeList::Reset() { - small_list_.Reset(); - medium_list_.Reset(); - large_list_.Reset(); - huge_list_.Reset(); -} - - -int FreeList::Free(Address start, int size_in_bytes) { - if (size_in_bytes == 0) return 0; - - FreeListNode* node = FreeListNode::FromAddress(start); - node->set_size(heap_, size_in_bytes); - Page* page = Page::FromAddress(start); - - // Early return to drop too-small blocks on the floor. - if (size_in_bytes < kSmallListMin) { - page->add_non_available_small_blocks(size_in_bytes); - return size_in_bytes; - } - - // Insert other blocks at the head of a free list of the appropriate - // magnitude. - if (size_in_bytes <= kSmallListMax) { - small_list_.Free(node, size_in_bytes); - page->add_available_in_small_free_list(size_in_bytes); - } else if (size_in_bytes <= kMediumListMax) { - medium_list_.Free(node, size_in_bytes); - page->add_available_in_medium_free_list(size_in_bytes); - } else if (size_in_bytes <= kLargeListMax) { - large_list_.Free(node, size_in_bytes); - page->add_available_in_large_free_list(size_in_bytes); - } else { - huge_list_.Free(node, size_in_bytes); - page->add_available_in_huge_free_list(size_in_bytes); - } - - ASSERT(IsVeryLong() || available() == SumFreeLists()); - return 0; -} - - -FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { - FreeListNode* node = NULL; - Page* page = NULL; - - if (size_in_bytes <= kSmallAllocationMax) { - node = small_list_.PickNodeFromList(node_size); - if (node != NULL) { - ASSERT(size_in_bytes <= *node_size); - page = Page::FromAddress(node->address()); - page->add_available_in_small_free_list(-(*node_size)); - ASSERT(IsVeryLong() || available() == SumFreeLists()); - return node; - } - } - - if (size_in_bytes <= kMediumAllocationMax) { - node = medium_list_.PickNodeFromList(node_size); - if (node != NULL) { - ASSERT(size_in_bytes <= *node_size); - page = Page::FromAddress(node->address()); - page->add_available_in_medium_free_list(-(*node_size)); - ASSERT(IsVeryLong() || available() == SumFreeLists()); - return node; - } - } - - if (size_in_bytes <= kLargeAllocationMax) { - node = large_list_.PickNodeFromList(node_size); - if (node != NULL) { - ASSERT(size_in_bytes <= *node_size); - page = Page::FromAddress(node->address()); - page->add_available_in_large_free_list(-(*node_size)); - ASSERT(IsVeryLong() || available() == SumFreeLists()); - return node; - } - } - - int huge_list_available = huge_list_.available(); - FreeListNode* top_node = huge_list_.top(); - for (FreeListNode** cur = &top_node; - *cur != NULL; - cur = (*cur)->next_address()) { - FreeListNode* cur_node = *cur; - while (cur_node != NULL && - Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { - int size = reinterpret_cast<FreeSpace*>(cur_node)->Size(); - huge_list_available -= size; - page = Page::FromAddress(cur_node->address()); - page->add_available_in_huge_free_list(-size); - cur_node = cur_node->next(); - } - - *cur = cur_node; - if (cur_node == NULL) { - huge_list_.set_end(NULL); - break; - } - - ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map()); - FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); - int size = cur_as_free_space->Size(); - if (size >= size_in_bytes) { - // Large enough node found. Unlink it from the list. - node = *cur; - *cur = node->next(); - *node_size = size; - huge_list_available -= size; - page = Page::FromAddress(node->address()); - page->add_available_in_huge_free_list(-size); - break; - } - } - - huge_list_.set_top(top_node); - if (huge_list_.top() == NULL) { - huge_list_.set_end(NULL); - } - huge_list_.set_available(huge_list_available); - - if (node != NULL) { - ASSERT(IsVeryLong() || available() == SumFreeLists()); - return node; - } - - if (size_in_bytes <= kSmallListMax) { - node = small_list_.PickNodeFromList(size_in_bytes, node_size); - if (node != NULL) { - ASSERT(size_in_bytes <= *node_size); - page = Page::FromAddress(node->address()); - page->add_available_in_small_free_list(-(*node_size)); - } - } else if (size_in_bytes <= kMediumListMax) { - node = medium_list_.PickNodeFromList(size_in_bytes, node_size); - if (node != NULL) { - ASSERT(size_in_bytes <= *node_size); - page = Page::FromAddress(node->address()); - page->add_available_in_medium_free_list(-(*node_size)); - } - } else if (size_in_bytes <= kLargeListMax) { - node = large_list_.PickNodeFromList(size_in_bytes, node_size); - if (node != NULL) { - ASSERT(size_in_bytes <= *node_size); - page = Page::FromAddress(node->address()); - page->add_available_in_large_free_list(-(*node_size)); - } - } - - ASSERT(IsVeryLong() || available() == SumFreeLists()); - return node; -} - - -// Allocation on the old space free list. If it succeeds then a new linear -// allocation space has been set up with the top and limit of the space. If -// the allocation fails then NULL is returned, and the caller can perform a GC -// or allocate a new page before retrying. -HeapObject* FreeList::Allocate(int size_in_bytes) { - ASSERT(0 < size_in_bytes); - ASSERT(size_in_bytes <= kMaxBlockSize); - ASSERT(IsAligned(size_in_bytes, kPointerSize)); - // Don't free list allocate if there is linear space available. - ASSERT(owner_->limit() - owner_->top() < size_in_bytes); - - int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); - // Mark the old linear allocation area with a free space map so it can be - // skipped when scanning the heap. This also puts it back in the free list - // if it is big enough. - owner_->Free(owner_->top(), old_linear_size); - - owner_->heap()->incremental_marking()->OldSpaceStep( - size_in_bytes - old_linear_size); - - int new_node_size = 0; - FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); - if (new_node == NULL) { - owner_->SetTopAndLimit(NULL, NULL); - return NULL; - } - - int bytes_left = new_node_size - size_in_bytes; - ASSERT(bytes_left >= 0); - -#ifdef DEBUG - for (int i = 0; i < size_in_bytes / kPointerSize; i++) { - reinterpret_cast<Object**>(new_node->address())[i] = - Smi::FromInt(kCodeZapValue); - } -#endif - - // The old-space-step might have finished sweeping and restarted marking. - // Verify that it did not turn the page of the new node into an evacuation - // candidate. - ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); - - const int kThreshold = IncrementalMarking::kAllocatedThreshold; - - // Memory in the linear allocation area is counted as allocated. We may free - // a little of this again immediately - see below. - owner_->Allocate(new_node_size); - - if (owner_->heap()->inline_allocation_disabled()) { - // Keep the linear allocation area empty if requested to do so, just - // return area back to the free list instead. - owner_->Free(new_node->address() + size_in_bytes, bytes_left); - ASSERT(owner_->top() == NULL && owner_->limit() == NULL); - } else if (bytes_left > kThreshold && - owner_->heap()->incremental_marking()->IsMarkingIncomplete() && - FLAG_incremental_marking_steps) { - int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); - // We don't want to give too large linear areas to the allocator while - // incremental marking is going on, because we won't check again whether - // we want to do another increment until the linear area is used up. - owner_->Free(new_node->address() + size_in_bytes + linear_size, - new_node_size - size_in_bytes - linear_size); - owner_->SetTopAndLimit(new_node->address() + size_in_bytes, - new_node->address() + size_in_bytes + linear_size); - } else if (bytes_left > 0) { - // Normally we give the rest of the node to the allocator as its new - // linear allocation area. - owner_->SetTopAndLimit(new_node->address() + size_in_bytes, - new_node->address() + new_node_size); - } else { - // TODO(gc) Try not freeing linear allocation region when bytes_left - // are zero. - owner_->SetTopAndLimit(NULL, NULL); - } - - return new_node; -} - - -intptr_t FreeList::EvictFreeListItems(Page* p) { - intptr_t sum = huge_list_.EvictFreeListItemsInList(p); - p->set_available_in_huge_free_list(0); - - if (sum < p->area_size()) { - sum += small_list_.EvictFreeListItemsInList(p) + - medium_list_.EvictFreeListItemsInList(p) + - large_list_.EvictFreeListItemsInList(p); - p->set_available_in_small_free_list(0); - p->set_available_in_medium_free_list(0); - p->set_available_in_large_free_list(0); - } - - return sum; -} - - -bool FreeList::ContainsPageFreeListItems(Page* p) { - return huge_list_.EvictFreeListItemsInList(p) || - small_list_.EvictFreeListItemsInList(p) || - medium_list_.EvictFreeListItemsInList(p) || - large_list_.EvictFreeListItemsInList(p); -} - - -void FreeList::RepairLists(Heap* heap) { - small_list_.RepairFreeList(heap); - medium_list_.RepairFreeList(heap); - large_list_.RepairFreeList(heap); - huge_list_.RepairFreeList(heap); -} - - -#ifdef DEBUG -intptr_t FreeListCategory::SumFreeList() { - intptr_t sum = 0; - FreeListNode* cur = top(); - while (cur != NULL) { - ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map()); - FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); - sum += cur_as_free_space->Size(); - cur = cur->next(); - } - return sum; -} - - -static const int kVeryLongFreeList = 500; - - -int FreeListCategory::FreeListLength() { - int length = 0; - FreeListNode* cur = top(); - while (cur != NULL) { - length++; - cur = cur->next(); - if (length == kVeryLongFreeList) return length; - } - return length; -} - - -bool FreeList::IsVeryLong() { - if (small_list_.FreeListLength() == kVeryLongFreeList) return true; - if (medium_list_.FreeListLength() == kVeryLongFreeList) return true; - if (large_list_.FreeListLength() == kVeryLongFreeList) return true; - if (huge_list_.FreeListLength() == kVeryLongFreeList) return true; - return false; -} - - -// This can take a very long time because it is linear in the number of entries -// on the free list, so it should not be called if FreeListLength returns -// kVeryLongFreeList. -intptr_t FreeList::SumFreeLists() { - intptr_t sum = small_list_.SumFreeList(); - sum += medium_list_.SumFreeList(); - sum += large_list_.SumFreeList(); - sum += huge_list_.SumFreeList(); - return sum; -} -#endif - - -// ----------------------------------------------------------------------------- -// OldSpace implementation - -void PagedSpace::PrepareForMarkCompact() { - // We don't have a linear allocation area while sweeping. It will be restored - // on the first allocation after the sweep. - EmptyAllocationInfo(); - - // Stop lazy sweeping and clear marking bits for unswept pages. - if (first_unswept_page_ != NULL) { - Page* p = first_unswept_page_; - do { - // Do not use ShouldBeSweptLazily predicate here. - // New evacuation candidates were selected but they still have - // to be swept before collection starts. - if (!p->WasSwept()) { - Bitmap::Clear(p); - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n", - reinterpret_cast<intptr_t>(p)); - } - } - p = p->next_page(); - } while (p != anchor()); - } - first_unswept_page_ = Page::FromAddress(NULL); - unswept_free_bytes_ = 0; - - // Clear the free list before a full GC---it will be rebuilt afterward. - free_list_.Reset(); -} - - -intptr_t PagedSpace::SizeOfObjects() { - ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0)); - return Size() - unswept_free_bytes_ - (limit() - top()); -} - - -// After we have booted, we have created a map which represents free space -// on the heap. If there was already a free list then the elements on it -// were created with the wrong FreeSpaceMap (normally NULL), so we need to -// fix them. -void PagedSpace::RepairFreeListsAfterBoot() { - free_list_.RepairLists(heap()); -} - - -bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { - if (IsLazySweepingComplete()) return true; - - intptr_t freed_bytes = 0; - Page* p = first_unswept_page_; - do { - Page* next_page = p->next_page(); - if (ShouldBeSweptLazily(p)) { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", - reinterpret_cast<intptr_t>(p)); - } - DecreaseUnsweptFreeBytes(p); - freed_bytes += - MarkCompactCollector:: - SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>( - this, NULL, p); - } - p = next_page; - } while (p != anchor() && freed_bytes < bytes_to_sweep); - - if (p == anchor()) { - first_unswept_page_ = Page::FromAddress(NULL); - } else { - first_unswept_page_ = p; - } - - heap()->FreeQueuedChunks(); - - return IsLazySweepingComplete(); -} - - -void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { - if (allocation_info_.top() >= allocation_info_.limit()) return; - - if (Page::FromAllocationTop(allocation_info_.top())-> - IsEvacuationCandidate()) { - // Create filler object to keep page iterable if it was iterable. - int remaining = - static_cast<int>(allocation_info_.limit() - allocation_info_.top()); - heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); - - allocation_info_.set_top(NULL); - allocation_info_.set_limit(NULL); - } -} - - -bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { - MarkCompactCollector* collector = heap()->mark_compact_collector(); - if (collector->AreSweeperThreadsActivated()) { - if (collector->IsConcurrentSweepingInProgress()) { - if (collector->RefillFreeLists(this) < size_in_bytes) { - if (!collector->sequential_sweeping()) { - collector->WaitUntilSweepingCompleted(); - return true; - } - } - return false; - } - return true; - } else { - return AdvanceSweeper(size_in_bytes); - } -} - - -HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { - // Allocation in this space has failed. - - // If there are unswept pages advance lazy sweeper a bounded number of times - // until we find a size_in_bytes contiguous piece of memory - const int kMaxSweepingTries = 5; - bool sweeping_complete = false; - - for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { - sweeping_complete = EnsureSweeperProgress(size_in_bytes); - - // Retry the free list allocation. - HeapObject* object = free_list_.Allocate(size_in_bytes); - if (object != NULL) return object; - } - - // Free list allocation failed and there is no next page. Fail if we have - // hit the old generation size limit that should cause a garbage - // collection. - if (!heap()->always_allocate() && - heap()->OldGenerationAllocationLimitReached()) { - return NULL; - } - - // Try to expand the space and allocate in the new next page. - if (Expand()) { - ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); - return free_list_.Allocate(size_in_bytes); - } - - // Last ditch, sweep all the remaining pages to try to find space. This may - // cause a pause. - if (!IsLazySweepingComplete()) { - EnsureSweeperProgress(kMaxInt); - - // Retry the free list allocation. - HeapObject* object = free_list_.Allocate(size_in_bytes); - if (object != NULL) return object; - } - - // Finally, fail. - return NULL; -} - - -#ifdef DEBUG -void PagedSpace::ReportCodeStatistics(Isolate* isolate) { - CommentStatistic* comments_statistics = - isolate->paged_space_comments_statistics(); - ReportCodeKindStatistics(isolate->code_kind_statistics()); - PrintF("Code comment statistics (\" [ comment-txt : size/ " - "count (average)\"):\n"); - for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { - const CommentStatistic& cs = comments_statistics[i]; - if (cs.size > 0) { - PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, - cs.size/cs.count); - } - } - PrintF("\n"); -} - - -void PagedSpace::ResetCodeStatistics(Isolate* isolate) { - CommentStatistic* comments_statistics = - isolate->paged_space_comments_statistics(); - ClearCodeKindStatistics(isolate->code_kind_statistics()); - for (int i = 0; i < CommentStatistic::kMaxComments; i++) { - comments_statistics[i].Clear(); - } - comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown"; - comments_statistics[CommentStatistic::kMaxComments].size = 0; - comments_statistics[CommentStatistic::kMaxComments].count = 0; -} - - -// Adds comment to 'comment_statistics' table. Performance OK as long as -// 'kMaxComments' is small -static void EnterComment(Isolate* isolate, const char* comment, int delta) { - CommentStatistic* comments_statistics = - isolate->paged_space_comments_statistics(); - // Do not count empty comments - if (delta <= 0) return; - CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments]; - // Search for a free or matching entry in 'comments_statistics': 'cs' - // points to result. - for (int i = 0; i < CommentStatistic::kMaxComments; i++) { - if (comments_statistics[i].comment == NULL) { - cs = &comments_statistics[i]; - cs->comment = comment; - break; - } else if (strcmp(comments_statistics[i].comment, comment) == 0) { - cs = &comments_statistics[i]; - break; - } - } - // Update entry for 'comment' - cs->size += delta; - cs->count += 1; -} - - -// Call for each nested comment start (start marked with '[ xxx', end marked -// with ']'. RelocIterator 'it' must point to a comment reloc info. -static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { - ASSERT(!it->done()); - ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT); - const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); - if (tmp[0] != '[') { - // Not a nested comment; skip - return; - } - - // Search for end of nested comment or a new nested comment - const char* const comment_txt = - reinterpret_cast<const char*>(it->rinfo()->data()); - const byte* prev_pc = it->rinfo()->pc(); - int flat_delta = 0; - it->next(); - while (true) { - // All nested comments must be terminated properly, and therefore exit - // from loop. - ASSERT(!it->done()); - if (it->rinfo()->rmode() == RelocInfo::COMMENT) { - const char* const txt = - reinterpret_cast<const char*>(it->rinfo()->data()); - flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); - if (txt[0] == ']') break; // End of nested comment - // A new comment - CollectCommentStatistics(isolate, it); - // Skip code that was covered with previous comment - prev_pc = it->rinfo()->pc(); - } - it->next(); - } - EnterComment(isolate, comment_txt, flat_delta); -} - - -// Collects code size statistics: -// - by code kind -// - by code comment -void PagedSpace::CollectCodeStatistics() { - Isolate* isolate = heap()->isolate(); - HeapObjectIterator obj_it(this); - for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { - if (obj->IsCode()) { - Code* code = Code::cast(obj); - isolate->code_kind_statistics()[code->kind()] += code->Size(); - RelocIterator it(code); - int delta = 0; - const byte* prev_pc = code->instruction_start(); - while (!it.done()) { - if (it.rinfo()->rmode() == RelocInfo::COMMENT) { - delta += static_cast<int>(it.rinfo()->pc() - prev_pc); - CollectCommentStatistics(isolate, &it); - prev_pc = it.rinfo()->pc(); - } - it.next(); - } - - ASSERT(code->instruction_start() <= prev_pc && - prev_pc <= code->instruction_end()); - delta += static_cast<int>(code->instruction_end() - prev_pc); - EnterComment(isolate, "NoComment", delta); - } - } -} - - -void PagedSpace::ReportStatistics() { - int pct = static_cast<int>(Available() * 100 / Capacity()); - PrintF(" capacity: %" V8_PTR_PREFIX "d" - ", waste: %" V8_PTR_PREFIX "d" - ", available: %" V8_PTR_PREFIX "d, %%%d\n", - Capacity(), Waste(), Available(), pct); - - if (was_swept_conservatively_) return; - ClearHistograms(heap()->isolate()); - HeapObjectIterator obj_it(this); - for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) - CollectHistogramInfo(obj); - ReportHistogram(heap()->isolate(), true); -} -#endif - - -// ----------------------------------------------------------------------------- -// MapSpace implementation -// TODO(mvstanton): this is weird...the compiler can't make a vtable unless -// there is at least one non-inlined virtual function. I would prefer to hide -// the VerifyObject definition behind VERIFY_HEAP. - -void MapSpace::VerifyObject(HeapObject* object) { - CHECK(object->IsMap()); -} - - -// ----------------------------------------------------------------------------- -// CellSpace and PropertyCellSpace implementation -// TODO(mvstanton): this is weird...the compiler can't make a vtable unless -// there is at least one non-inlined virtual function. I would prefer to hide -// the VerifyObject definition behind VERIFY_HEAP. - -void CellSpace::VerifyObject(HeapObject* object) { - CHECK(object->IsCell()); -} - - -void PropertyCellSpace::VerifyObject(HeapObject* object) { - CHECK(object->IsPropertyCell()); -} - - -// ----------------------------------------------------------------------------- -// LargeObjectIterator - -LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { - current_ = space->first_page_; - size_func_ = NULL; -} - - -LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space, - HeapObjectCallback size_func) { - current_ = space->first_page_; - size_func_ = size_func; -} - - -HeapObject* LargeObjectIterator::Next() { - if (current_ == NULL) return NULL; - - HeapObject* object = current_->GetObject(); - current_ = current_->next_page(); - return object; -} - - -// ----------------------------------------------------------------------------- -// LargeObjectSpace -static bool ComparePointers(void* key1, void* key2) { - return key1 == key2; -} - - -LargeObjectSpace::LargeObjectSpace(Heap* heap, - intptr_t max_capacity, - AllocationSpace id) - : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis - max_capacity_(max_capacity), - first_page_(NULL), - size_(0), - page_count_(0), - objects_size_(0), - chunk_map_(ComparePointers, 1024) {} - - -bool LargeObjectSpace::SetUp() { - first_page_ = NULL; - size_ = 0; - maximum_committed_ = 0; - page_count_ = 0; - objects_size_ = 0; - chunk_map_.Clear(); - return true; -} - - -void LargeObjectSpace::TearDown() { - while (first_page_ != NULL) { - LargePage* page = first_page_; - first_page_ = first_page_->next_page(); - LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); - - ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); - heap()->isolate()->memory_allocator()->PerformAllocationCallback( - space, kAllocationActionFree, page->size()); - heap()->isolate()->memory_allocator()->Free(page); - } - SetUp(); -} - - -MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, - Executability executable) { - // Check if we want to force a GC before growing the old space further. - // If so, fail the allocation. - if (!heap()->always_allocate() && - heap()->OldGenerationAllocationLimitReached()) { - return Failure::RetryAfterGC(identity()); - } - - if (Size() + object_size > max_capacity_) { - return Failure::RetryAfterGC(identity()); - } - - LargePage* page = heap()->isolate()->memory_allocator()-> - AllocateLargePage(object_size, this, executable); - if (page == NULL) return Failure::RetryAfterGC(identity()); - ASSERT(page->area_size() >= object_size); - - size_ += static_cast<int>(page->size()); - objects_size_ += object_size; - page_count_++; - page->set_next_page(first_page_); - first_page_ = page; - - if (size_ > maximum_committed_) { - maximum_committed_ = size_; - } - - // Register all MemoryChunk::kAlignment-aligned chunks covered by - // this large page in the chunk map. - uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; - uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; - for (uintptr_t key = base; key <= limit; key++) { - HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), - static_cast<uint32_t>(key), - true); - ASSERT(entry != NULL); - entry->value = page; - } - - HeapObject* object = page->GetObject(); - - if (Heap::ShouldZapGarbage()) { - // Make the object consistent so the heap can be verified in OldSpaceStep. - // We only need to do this in debug builds or if verify_heap is on. - reinterpret_cast<Object**>(object->address())[0] = - heap()->fixed_array_map(); - reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); - } - - heap()->incremental_marking()->OldSpaceStep(object_size); - return object; -} - - -size_t LargeObjectSpace::CommittedPhysicalMemory() { - if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); - size_t size = 0; - LargePage* current = first_page_; - while (current != NULL) { - size += current->CommittedPhysicalMemory(); - current = current->next_page(); - } - return size; -} - - -// GC support -MaybeObject* LargeObjectSpace::FindObject(Address a) { - LargePage* page = FindPage(a); - if (page != NULL) { - return page->GetObject(); - } - return Failure::Exception(); -} - - -LargePage* LargeObjectSpace::FindPage(Address a) { - uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; - HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), - static_cast<uint32_t>(key), - false); - if (e != NULL) { - ASSERT(e->value != NULL); - LargePage* page = reinterpret_cast<LargePage*>(e->value); - ASSERT(page->is_valid()); - if (page->Contains(a)) { - return page; - } - } - return NULL; -} - - -void LargeObjectSpace::FreeUnmarkedObjects() { - LargePage* previous = NULL; - LargePage* current = first_page_; - while (current != NULL) { - HeapObject* object = current->GetObject(); - // Can this large page contain pointers to non-trivial objects. No other - // pointer object is this big. - bool is_pointer_object = object->IsFixedArray(); - MarkBit mark_bit = Marking::MarkBitFrom(object); - if (mark_bit.Get()) { - mark_bit.Clear(); - Page::FromAddress(object->address())->ResetProgressBar(); - Page::FromAddress(object->address())->ResetLiveBytes(); - previous = current; - current = current->next_page(); - } else { - LargePage* page = current; - // Cut the chunk out from the chunk list. - current = current->next_page(); - if (previous == NULL) { - first_page_ = current; - } else { - previous->set_next_page(current); - } - - // Free the chunk. - heap()->mark_compact_collector()->ReportDeleteIfNeeded( - object, heap()->isolate()); - size_ -= static_cast<int>(page->size()); - objects_size_ -= object->Size(); - page_count_--; - - // Remove entries belonging to this page. - // Use variable alignment to help pass length check (<= 80 characters) - // of single line in tools/presubmit.py. - const intptr_t alignment = MemoryChunk::kAlignment; - uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment; - uintptr_t limit = base + (page->size()-1)/alignment; - for (uintptr_t key = base; key <= limit; key++) { - chunk_map_.Remove(reinterpret_cast<void*>(key), - static_cast<uint32_t>(key)); - } - - if (is_pointer_object) { - heap()->QueueMemoryChunkForFree(page); - } else { - heap()->isolate()->memory_allocator()->Free(page); - } - } - } - heap()->FreeQueuedChunks(); -} - - -bool LargeObjectSpace::Contains(HeapObject* object) { - Address address = object->address(); - MemoryChunk* chunk = MemoryChunk::FromAddress(address); - - bool owned = (chunk->owner() == this); - - SLOW_ASSERT(!owned || !FindObject(address)->IsFailure()); - - return owned; -} - - -#ifdef VERIFY_HEAP -// We do not assume that the large object iterator works, because it depends -// on the invariants we are checking during verification. -void LargeObjectSpace::Verify() { - for (LargePage* chunk = first_page_; - chunk != NULL; - chunk = chunk->next_page()) { - // Each chunk contains an object that starts at the large object page's - // object area start. - HeapObject* object = chunk->GetObject(); - Page* page = Page::FromAddress(object->address()); - CHECK(object->address() == page->area_start()); - - // The first word should be a map, and we expect all map pointers to be - // in map space. - Map* map = object->map(); - CHECK(map->IsMap()); - CHECK(heap()->map_space()->Contains(map)); - - // We have only code, sequential strings, external strings - // (sequential strings that have been morphed into external - // strings), fixed arrays, and byte arrays in large object space. - CHECK(object->IsCode() || object->IsSeqString() || - object->IsExternalString() || object->IsFixedArray() || - object->IsFixedDoubleArray() || object->IsByteArray()); - - // The object itself should look OK. - object->Verify(); - - // Byte arrays and strings don't have interior pointers. - if (object->IsCode()) { - VerifyPointersVisitor code_visitor; - object->IterateBody(map->instance_type(), - object->Size(), - &code_visitor); - } else if (object->IsFixedArray()) { - FixedArray* array = FixedArray::cast(object); - for (int j = 0; j < array->length(); j++) { - Object* element = array->get(j); - if (element->IsHeapObject()) { - HeapObject* element_object = HeapObject::cast(element); - CHECK(heap()->Contains(element_object)); - CHECK(element_object->map()->IsMap()); - } - } - } - } -} -#endif - - -#ifdef DEBUG -void LargeObjectSpace::Print() { - LargeObjectIterator it(this); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { - obj->Print(); - } -} - - -void LargeObjectSpace::ReportStatistics() { - PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); - int num_objects = 0; - ClearHistograms(heap()->isolate()); - LargeObjectIterator it(this); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { - num_objects++; - CollectHistogramInfo(obj); - } - - PrintF(" number of objects %d, " - "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_); - if (num_objects > 0) ReportHistogram(heap()->isolate(), false); -} - - -void LargeObjectSpace::CollectCodeStatistics() { - Isolate* isolate = heap()->isolate(); - LargeObjectIterator obj_it(this); - for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { - if (obj->IsCode()) { - Code* code = Code::cast(obj); - isolate->code_kind_statistics()[code->kind()] += code->Size(); - } - } -} - - -void Page::Print() { - // Make a best-effort to print the objects in the page. - PrintF("Page@%p in %s\n", - this->address(), - AllocationSpaceName(this->owner()->identity())); - printf(" --------------------------------------\n"); - HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction()); - unsigned mark_size = 0; - for (HeapObject* object = objects.Next(); - object != NULL; - object = objects.Next()) { - bool is_marked = Marking::MarkBitFrom(object).Get(); - PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little. - if (is_marked) { - mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object); - } - object->ShortPrint(); - PrintF("\n"); - } - printf(" --------------------------------------\n"); - printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); -} - -#endif // DEBUG - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/spaces.h nodejs-0.11.15/deps/v8/src/spaces.h --- nodejs-0.11.13/deps/v8/src/spaces.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/spaces.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,2990 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_SPACES_H_ -#define V8_SPACES_H_ - -#include "allocation.h" -#include "hashmap.h" -#include "list.h" -#include "log.h" -#include "platform/mutex.h" -#include "v8utils.h" - -namespace v8 { -namespace internal { - -class Isolate; - -// ----------------------------------------------------------------------------- -// Heap structures: -// -// A JS heap consists of a young generation, an old generation, and a large -// object space. The young generation is divided into two semispaces. A -// scavenger implements Cheney's copying algorithm. The old generation is -// separated into a map space and an old object space. The map space contains -// all (and only) map objects, the rest of old objects go into the old space. -// The old generation is collected by a mark-sweep-compact collector. -// -// The semispaces of the young generation are contiguous. The old and map -// spaces consists of a list of pages. A page has a page header and an object -// area. -// -// There is a separate large object space for objects larger than -// Page::kMaxHeapObjectSize, so that they do not have to move during -// collection. The large object space is paged. Pages in large object space -// may be larger than the page size. -// -// A store-buffer based write barrier is used to keep track of intergenerational -// references. See store-buffer.h. -// -// During scavenges and mark-sweep collections we sometimes (after a store -// buffer overflow) iterate intergenerational pointers without decoding heap -// object maps so if the page belongs to old pointer space or large object -// space it is essential to guarantee that the page does not contain any -// garbage pointers to new space: every pointer aligned word which satisfies -// the Heap::InNewSpace() predicate must be a pointer to a live heap object in -// new space. Thus objects in old pointer and large object spaces should have a -// special layout (e.g. no bare integer fields). This requirement does not -// apply to map space which is iterated in a special fashion. However we still -// require pointer fields of dead maps to be cleaned. -// -// To enable lazy cleaning of old space pages we can mark chunks of the page -// as being garbage. Garbage sections are marked with a special map. These -// sections are skipped when scanning the page, even if we are otherwise -// scanning without regard for object boundaries. Garbage sections are chained -// together to form a free list after a GC. Garbage sections created outside -// of GCs by object trunctation etc. may not be in the free list chain. Very -// small free spaces are ignored, they need only be cleaned of bogus pointers -// into new space. -// -// Each page may have up to one special garbage section. The start of this -// section is denoted by the top field in the space. The end of the section -// is denoted by the limit field in the space. This special garbage section -// is not marked with a free space map in the data. The point of this section -// is to enable linear allocation without having to constantly update the byte -// array every time the top field is updated and a new object is created. The -// special garbage section is not in the chain of garbage sections. -// -// Since the top and limit fields are in the space, not the page, only one page -// has a special garbage section, and if the top and limit are equal then there -// is no special garbage section. - -// Some assertion macros used in the debugging mode. - -#define ASSERT_PAGE_ALIGNED(address) \ - ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) - -#define ASSERT_OBJECT_ALIGNED(address) \ - ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) - -#define ASSERT_OBJECT_SIZE(size) \ - ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) - -#define ASSERT_PAGE_OFFSET(offset) \ - ASSERT((Page::kObjectStartOffset <= offset) \ - && (offset <= Page::kPageSize)) - -#define ASSERT_MAP_PAGE_INDEX(index) \ - ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) - - -class PagedSpace; -class MemoryAllocator; -class AllocationInfo; -class Space; -class FreeList; -class MemoryChunk; - -class MarkBit { - public: - typedef uint32_t CellType; - - inline MarkBit(CellType* cell, CellType mask, bool data_only) - : cell_(cell), mask_(mask), data_only_(data_only) { } - - inline CellType* cell() { return cell_; } - inline CellType mask() { return mask_; } - -#ifdef DEBUG - bool operator==(const MarkBit& other) { - return cell_ == other.cell_ && mask_ == other.mask_; - } -#endif - - inline void Set() { *cell_ |= mask_; } - inline bool Get() { return (*cell_ & mask_) != 0; } - inline void Clear() { *cell_ &= ~mask_; } - - inline bool data_only() { return data_only_; } - - inline MarkBit Next() { - CellType new_mask = mask_ << 1; - if (new_mask == 0) { - return MarkBit(cell_ + 1, 1, data_only_); - } else { - return MarkBit(cell_, new_mask, data_only_); - } - } - - private: - CellType* cell_; - CellType mask_; - // This boolean indicates that the object is in a data-only space with no - // pointers. This enables some optimizations when marking. - // It is expected that this field is inlined and turned into control flow - // at the place where the MarkBit object is created. - bool data_only_; -}; - - -// Bitmap is a sequence of cells each containing fixed number of bits. -class Bitmap { - public: - static const uint32_t kBitsPerCell = 32; - static const uint32_t kBitsPerCellLog2 = 5; - static const uint32_t kBitIndexMask = kBitsPerCell - 1; - static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; - static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; - - static const size_t kLength = - (1 << kPageSizeBits) >> (kPointerSizeLog2); - - static const size_t kSize = - (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); - - - static int CellsForLength(int length) { - return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; - } - - int CellsCount() { - return CellsForLength(kLength); - } - - static int SizeFor(int cells_count) { - return sizeof(MarkBit::CellType) * cells_count; - } - - INLINE(static uint32_t IndexToCell(uint32_t index)) { - return index >> kBitsPerCellLog2; - } - - INLINE(static uint32_t CellToIndex(uint32_t index)) { - return index << kBitsPerCellLog2; - } - - INLINE(static uint32_t CellAlignIndex(uint32_t index)) { - return (index + kBitIndexMask) & ~kBitIndexMask; - } - - INLINE(MarkBit::CellType* cells()) { - return reinterpret_cast<MarkBit::CellType*>(this); - } - - INLINE(Address address()) { - return reinterpret_cast<Address>(this); - } - - INLINE(static Bitmap* FromAddress(Address addr)) { - return reinterpret_cast<Bitmap*>(addr); - } - - inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { - MarkBit::CellType mask = 1 << (index & kBitIndexMask); - MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); - return MarkBit(cell, mask, data_only); - } - - static inline void Clear(MemoryChunk* chunk); - - static void PrintWord(uint32_t word, uint32_t himask = 0) { - for (uint32_t mask = 1; mask != 0; mask <<= 1) { - if ((mask & himask) != 0) PrintF("["); - PrintF((mask & word) ? "1" : "0"); - if ((mask & himask) != 0) PrintF("]"); - } - } - - class CellPrinter { - public: - CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { } - - void Print(uint32_t pos, uint32_t cell) { - if (cell == seq_type) { - seq_length++; - return; - } - - Flush(); - - if (IsSeq(cell)) { - seq_start = pos; - seq_length = 0; - seq_type = cell; - return; - } - - PrintF("%d: ", pos); - PrintWord(cell); - PrintF("\n"); - } - - void Flush() { - if (seq_length > 0) { - PrintF("%d: %dx%d\n", - seq_start, - seq_type == 0 ? 0 : 1, - seq_length * kBitsPerCell); - seq_length = 0; - } - } - - static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } - - private: - uint32_t seq_start; - uint32_t seq_type; - uint32_t seq_length; - }; - - void Print() { - CellPrinter printer; - for (int i = 0; i < CellsCount(); i++) { - printer.Print(i, cells()[i]); - } - printer.Flush(); - PrintF("\n"); - } - - bool IsClean() { - for (int i = 0; i < CellsCount(); i++) { - if (cells()[i] != 0) { - return false; - } - } - return true; - } -}; - - -class SkipList; -class SlotsBuffer; - -// MemoryChunk represents a memory region owned by a specific space. -// It is divided into the header and the body. Chunk start is always -// 1MB aligned. Start of the body is aligned so it can accommodate -// any heap object. -class MemoryChunk { - public: - // Only works if the pointer is in the first kPageSize of the MemoryChunk. - static MemoryChunk* FromAddress(Address a) { - return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); - } - - // Only works for addresses in pointer spaces, not data or code spaces. - static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); - - Address address() { return reinterpret_cast<Address>(this); } - - bool is_valid() { return address() != NULL; } - - MemoryChunk* next_chunk() const { - return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_)); - } - - MemoryChunk* prev_chunk() const { - return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_)); - } - - void set_next_chunk(MemoryChunk* next) { - Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next)); - } - - void set_prev_chunk(MemoryChunk* prev) { - Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev)); - } - - Space* owner() const { - if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == - kFailureTag) { - return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - - kFailureTag); - } else { - return NULL; - } - } - - void set_owner(Space* space) { - ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0); - owner_ = reinterpret_cast<Address>(space) + kFailureTag; - ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == - kFailureTag); - } - - VirtualMemory* reserved_memory() { - return &reservation_; - } - - void InitializeReservedMemory() { - reservation_.Reset(); - } - - void set_reserved_memory(VirtualMemory* reservation) { - ASSERT_NOT_NULL(reservation); - reservation_.TakeControl(reservation); - } - - bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } - void initialize_scan_on_scavenge(bool scan) { - if (scan) { - SetFlag(SCAN_ON_SCAVENGE); - } else { - ClearFlag(SCAN_ON_SCAVENGE); - } - } - inline void set_scan_on_scavenge(bool scan); - - int store_buffer_counter() { return store_buffer_counter_; } - void set_store_buffer_counter(int counter) { - store_buffer_counter_ = counter; - } - - bool Contains(Address addr) { - return addr >= area_start() && addr < area_end(); - } - - // Checks whether addr can be a limit of addresses in this page. - // It's a limit if it's in the page, or if it's just after the - // last byte of the page. - bool ContainsLimit(Address addr) { - return addr >= area_start() && addr <= area_end(); - } - - // Every n write barrier invocations we go to runtime even though - // we could have handled it in generated code. This lets us check - // whether we have hit the limit and should do some more marking. - static const int kWriteBarrierCounterGranularity = 500; - - enum MemoryChunkFlags { - IS_EXECUTABLE, - ABOUT_TO_BE_FREED, - POINTERS_TO_HERE_ARE_INTERESTING, - POINTERS_FROM_HERE_ARE_INTERESTING, - SCAN_ON_SCAVENGE, - IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. - IN_TO_SPACE, // All pages in new space has one of these two set. - NEW_SPACE_BELOW_AGE_MARK, - CONTAINS_ONLY_DATA, - EVACUATION_CANDIDATE, - RESCAN_ON_EVACUATION, - - // Pages swept precisely can be iterated, hitting only the live objects. - // Whereas those swept conservatively cannot be iterated over. Both flags - // indicate that marking bits have been cleared by the sweeper, otherwise - // marking bits are still intact. - WAS_SWEPT_PRECISELY, - WAS_SWEPT_CONSERVATIVELY, - - // Large objects can have a progress bar in their page header. These object - // are scanned in increments and will be kept black while being scanned. - // Even if the mutator writes to them they will be kept black and a white - // to grey transition is performed in the value. - HAS_PROGRESS_BAR, - - // Last flag, keep at bottom. - NUM_MEMORY_CHUNK_FLAGS - }; - - - static const int kPointersToHereAreInterestingMask = - 1 << POINTERS_TO_HERE_ARE_INTERESTING; - - static const int kPointersFromHereAreInterestingMask = - 1 << POINTERS_FROM_HERE_ARE_INTERESTING; - - static const int kEvacuationCandidateMask = - 1 << EVACUATION_CANDIDATE; - - static const int kSkipEvacuationSlotsRecordingMask = - (1 << EVACUATION_CANDIDATE) | - (1 << RESCAN_ON_EVACUATION) | - (1 << IN_FROM_SPACE) | - (1 << IN_TO_SPACE); - - - void SetFlag(int flag) { - flags_ |= static_cast<uintptr_t>(1) << flag; - } - - void ClearFlag(int flag) { - flags_ &= ~(static_cast<uintptr_t>(1) << flag); - } - - void SetFlagTo(int flag, bool value) { - if (value) { - SetFlag(flag); - } else { - ClearFlag(flag); - } - } - - bool IsFlagSet(int flag) { - return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; - } - - // Set or clear multiple flags at a time. The flags in the mask - // are set to the value in "flags", the rest retain the current value - // in flags_. - void SetFlags(intptr_t flags, intptr_t mask) { - flags_ = (flags_ & ~mask) | (flags & mask); - } - - // Return all current flags. - intptr_t GetFlags() { return flags_; } - - - // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or - // sweeping must not be performed on that page. - // PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this - // page and will not touch the page memory anymore. - // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a - // sweeper thread. - // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping. - enum ParallelSweepingState { - PARALLEL_SWEEPING_DONE, - PARALLEL_SWEEPING_FINALIZE, - PARALLEL_SWEEPING_IN_PROGRESS, - PARALLEL_SWEEPING_PENDING - }; - - ParallelSweepingState parallel_sweeping() { - return static_cast<ParallelSweepingState>( - Acquire_Load(¶llel_sweeping_)); - } - - void set_parallel_sweeping(ParallelSweepingState state) { - Release_Store(¶llel_sweeping_, state); - } - - bool TryParallelSweeping() { - return Acquire_CompareAndSwap(¶llel_sweeping_, - PARALLEL_SWEEPING_PENDING, - PARALLEL_SWEEPING_IN_PROGRESS) == - PARALLEL_SWEEPING_PENDING; - } - - // Manage live byte count (count of bytes known to be live, - // because they are marked black). - void ResetLiveBytes() { - if (FLAG_gc_verbose) { - PrintF("ResetLiveBytes:%p:%x->0\n", - static_cast<void*>(this), live_byte_count_); - } - live_byte_count_ = 0; - } - void IncrementLiveBytes(int by) { - if (FLAG_gc_verbose) { - printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", - static_cast<void*>(this), live_byte_count_, - ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), - live_byte_count_ + by); - } - live_byte_count_ += by; - ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_); - } - int LiveBytes() { - ASSERT(static_cast<unsigned>(live_byte_count_) <= size_); - return live_byte_count_; - } - - int write_barrier_counter() { - return static_cast<int>(write_barrier_counter_); - } - - void set_write_barrier_counter(int counter) { - write_barrier_counter_ = counter; - } - - int progress_bar() { - ASSERT(IsFlagSet(HAS_PROGRESS_BAR)); - return progress_bar_; - } - - void set_progress_bar(int progress_bar) { - ASSERT(IsFlagSet(HAS_PROGRESS_BAR)); - progress_bar_ = progress_bar; - } - - void ResetProgressBar() { - if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { - set_progress_bar(0); - ClearFlag(MemoryChunk::HAS_PROGRESS_BAR); - } - } - - bool IsLeftOfProgressBar(Object** slot) { - Address slot_address = reinterpret_cast<Address>(slot); - ASSERT(slot_address > this->address()); - return (slot_address - (this->address() + kObjectStartOffset)) < - progress_bar(); - } - - static void IncrementLiveBytesFromGC(Address address, int by) { - MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); - } - - static void IncrementLiveBytesFromMutator(Address address, int by); - - static const intptr_t kAlignment = - (static_cast<uintptr_t>(1) << kPageSizeBits); - - static const intptr_t kAlignmentMask = kAlignment - 1; - - static const intptr_t kSizeOffset = 0; - - static const intptr_t kLiveBytesOffset = - kSizeOffset + kPointerSize + kPointerSize + kPointerSize + - kPointerSize + kPointerSize + - kPointerSize + kPointerSize + kPointerSize + kIntSize; - - static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; - - static const size_t kWriteBarrierCounterOffset = - kSlotsBufferOffset + kPointerSize + kPointerSize; - - static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize + - kIntSize + kIntSize + kPointerSize + - 5 * kPointerSize + - kPointerSize + kPointerSize; - - static const int kBodyOffset = - CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); - - // The start offset of the object area in a page. Aligned to both maps and - // code alignment to be suitable for both. Also aligned to 32 words because - // the marking bitmap is arranged in 32 bit chunks. - static const int kObjectStartAlignment = 32 * kPointerSize; - static const int kObjectStartOffset = kBodyOffset - 1 + - (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); - - size_t size() const { return size_; } - - void set_size(size_t size) { - size_ = size; - } - - void SetArea(Address area_start, Address area_end) { - area_start_ = area_start; - area_end_ = area_end; - } - - Executability executable() { - return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; - } - - bool ContainsOnlyData() { - return IsFlagSet(CONTAINS_ONLY_DATA); - } - - bool InNewSpace() { - return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; - } - - bool InToSpace() { - return IsFlagSet(IN_TO_SPACE); - } - - bool InFromSpace() { - return IsFlagSet(IN_FROM_SPACE); - } - - // --------------------------------------------------------------------- - // Markbits support - - inline Bitmap* markbits() { - return Bitmap::FromAddress(address() + kHeaderSize); - } - - void PrintMarkbits() { markbits()->Print(); } - - inline uint32_t AddressToMarkbitIndex(Address addr) { - return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; - } - - inline static uint32_t FastAddressToMarkbitIndex(Address addr) { - const intptr_t offset = - reinterpret_cast<intptr_t>(addr) & kAlignmentMask; - - return static_cast<uint32_t>(offset) >> kPointerSizeLog2; - } - - inline Address MarkbitIndexToAddress(uint32_t index) { - return this->address() + (index << kPointerSizeLog2); - } - - void InsertAfter(MemoryChunk* other); - void Unlink(); - - inline Heap* heap() { return heap_; } - - static const int kFlagsOffset = kPointerSize; - - bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } - - bool ShouldSkipEvacuationSlotRecording() { - return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; - } - - inline SkipList* skip_list() { - return skip_list_; - } - - inline void set_skip_list(SkipList* skip_list) { - skip_list_ = skip_list; - } - - inline SlotsBuffer* slots_buffer() { - return slots_buffer_; - } - - inline SlotsBuffer** slots_buffer_address() { - return &slots_buffer_; - } - - void MarkEvacuationCandidate() { - ASSERT(slots_buffer_ == NULL); - SetFlag(EVACUATION_CANDIDATE); - } - - void ClearEvacuationCandidate() { - ASSERT(slots_buffer_ == NULL); - ClearFlag(EVACUATION_CANDIDATE); - } - - Address area_start() { return area_start_; } - Address area_end() { return area_end_; } - int area_size() { - return static_cast<int>(area_end() - area_start()); - } - bool CommitArea(size_t requested); - - // Approximate amount of physical memory committed for this chunk. - size_t CommittedPhysicalMemory() { - return high_water_mark_; - } - - static inline void UpdateHighWaterMark(Address mark); - - protected: - size_t size_; - intptr_t flags_; - - // Start and end of allocatable memory on this chunk. - Address area_start_; - Address area_end_; - - // If the chunk needs to remember its memory reservation, it is stored here. - VirtualMemory reservation_; - // The identity of the owning space. This is tagged as a failure pointer, but - // no failure can be in an object, so this can be distinguished from any entry - // in a fixed array. - Address owner_; - Heap* heap_; - // Used by the store buffer to keep track of which pages to mark scan-on- - // scavenge. - int store_buffer_counter_; - // Count of bytes marked black on page. - int live_byte_count_; - SlotsBuffer* slots_buffer_; - SkipList* skip_list_; - intptr_t write_barrier_counter_; - // Used by the incremental marker to keep track of the scanning progress in - // large objects that have a progress bar and are scanned in increments. - int progress_bar_; - // Assuming the initial allocation on a page is sequential, - // count highest number of bytes ever allocated on the page. - int high_water_mark_; - - AtomicWord parallel_sweeping_; - - // PagedSpace free-list statistics. - intptr_t available_in_small_free_list_; - intptr_t available_in_medium_free_list_; - intptr_t available_in_large_free_list_; - intptr_t available_in_huge_free_list_; - intptr_t non_available_small_blocks_; - - static MemoryChunk* Initialize(Heap* heap, - Address base, - size_t size, - Address area_start, - Address area_end, - Executability executable, - Space* owner); - - private: - // next_chunk_ holds a pointer of type MemoryChunk - AtomicWord next_chunk_; - // prev_chunk_ holds a pointer of type MemoryChunk - AtomicWord prev_chunk_; - - friend class MemoryAllocator; -}; - - -STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); - - -// ----------------------------------------------------------------------------- -// A page is a memory chunk of a size 1MB. Large object pages may be larger. -// -// The only way to get a page pointer is by calling factory methods: -// Page* p = Page::FromAddress(addr); or -// Page* p = Page::FromAllocationTop(top); -class Page : public MemoryChunk { - public: - // Returns the page containing a given address. The address ranges - // from [page_addr .. page_addr + kPageSize[ - // This only works if the object is in fact in a page. See also MemoryChunk:: - // FromAddress() and FromAnyAddress(). - INLINE(static Page* FromAddress(Address a)) { - return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); - } - - // Returns the page containing an allocation top. Because an allocation - // top address can be the upper bound of the page, we need to subtract - // it with kPointerSize first. The address ranges from - // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. - INLINE(static Page* FromAllocationTop(Address top)) { - Page* p = FromAddress(top - kPointerSize); - return p; - } - - // Returns the next page in the chain of pages owned by a space. - inline Page* next_page(); - inline Page* prev_page(); - inline void set_next_page(Page* page); - inline void set_prev_page(Page* page); - - // Checks whether an address is page aligned. - static bool IsAlignedToPageSize(Address a) { - return 0 == (OffsetFrom(a) & kPageAlignmentMask); - } - - // Returns the offset of a given address to this page. - INLINE(int Offset(Address a)) { - int offset = static_cast<int>(a - address()); - return offset; - } - - // Returns the address for a given offset to the this page. - Address OffsetToAddress(int offset) { - ASSERT_PAGE_OFFSET(offset); - return address() + offset; - } - - // --------------------------------------------------------------------- - - // Page size in bytes. This must be a multiple of the OS page size. - static const int kPageSize = 1 << kPageSizeBits; - - // Maximum object size that fits in a page. Objects larger than that size - // are allocated in large object space and are never moved in memory. This - // also applies to new space allocation, since objects are never migrated - // from new space to large object space. Takes double alignment into account. - static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset; - - // Page size mask. - static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; - - inline void ClearGCFields(); - - static inline Page* Initialize(Heap* heap, - MemoryChunk* chunk, - Executability executable, - PagedSpace* owner); - - void InitializeAsAnchor(PagedSpace* owner); - - bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } - bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } - bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } - - void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } - void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } - - void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } - void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } - - void ResetFreeListStatistics(); - -#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ - type name() { return name##_; } \ - void set_##name(type name) { name##_ = name; } \ - void add_##name(type name) { name##_ += name; } - - FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) - -#undef FRAGMENTATION_STATS_ACCESSORS - -#ifdef DEBUG - void Print(); -#endif // DEBUG - - friend class MemoryAllocator; -}; - - -STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); - - -class LargePage : public MemoryChunk { - public: - HeapObject* GetObject() { - return HeapObject::FromAddress(area_start()); - } - - inline LargePage* next_page() const { - return static_cast<LargePage*>(next_chunk()); - } - - inline void set_next_page(LargePage* page) { - set_next_chunk(page); - } - private: - static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); - - friend class MemoryAllocator; -}; - -STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize); - -// ---------------------------------------------------------------------------- -// Space is the abstract superclass for all allocation spaces. -class Space : public Malloced { - public: - Space(Heap* heap, AllocationSpace id, Executability executable) - : heap_(heap), id_(id), executable_(executable) {} - - virtual ~Space() {} - - Heap* heap() const { return heap_; } - - // Does the space need executable memory? - Executability executable() { return executable_; } - - // Identity used in error reporting. - AllocationSpace identity() { return id_; } - - // Returns allocated size. - virtual intptr_t Size() = 0; - - // Returns size of objects. Can differ from the allocated size - // (e.g. see LargeObjectSpace). - virtual intptr_t SizeOfObjects() { return Size(); } - - virtual int RoundSizeDownToObjectAlignment(int size) { - if (id_ == CODE_SPACE) { - return RoundDown(size, kCodeAlignment); - } else { - return RoundDown(size, kPointerSize); - } - } - -#ifdef DEBUG - virtual void Print() = 0; -#endif - - private: - Heap* heap_; - AllocationSpace id_; - Executability executable_; -}; - - -// ---------------------------------------------------------------------------- -// All heap objects containing executable code (code objects) must be allocated -// from a 2 GB range of memory, so that they can call each other using 32-bit -// displacements. This happens automatically on 32-bit platforms, where 32-bit -// displacements cover the entire 4GB virtual address space. On 64-bit -// platforms, we support this using the CodeRange object, which reserves and -// manages a range of virtual memory. -class CodeRange { - public: - explicit CodeRange(Isolate* isolate); - ~CodeRange() { TearDown(); } - - // Reserves a range of virtual memory, but does not commit any of it. - // Can only be called once, at heap initialization time. - // Returns false on failure. - bool SetUp(const size_t requested_size); - - // Frees the range of virtual memory, and frees the data structures used to - // manage it. - void TearDown(); - - bool exists() { return this != NULL && code_range_ != NULL; } - Address start() { - if (this == NULL || code_range_ == NULL) return NULL; - return static_cast<Address>(code_range_->address()); - } - bool contains(Address address) { - if (this == NULL || code_range_ == NULL) return false; - Address start = static_cast<Address>(code_range_->address()); - return start <= address && address < start + code_range_->size(); - } - - // Allocates a chunk of memory from the large-object portion of - // the code range. On platforms with no separate code range, should - // not be called. - MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, - const size_t commit_size, - size_t* allocated); - bool CommitRawMemory(Address start, size_t length); - bool UncommitRawMemory(Address start, size_t length); - void FreeRawMemory(Address buf, size_t length); - - private: - Isolate* isolate_; - - // The reserved range of virtual memory that all code objects are put in. - VirtualMemory* code_range_; - // Plain old data class, just a struct plus a constructor. - class FreeBlock { - public: - FreeBlock(Address start_arg, size_t size_arg) - : start(start_arg), size(size_arg) { - ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); - ASSERT(size >= static_cast<size_t>(Page::kPageSize)); - } - FreeBlock(void* start_arg, size_t size_arg) - : start(static_cast<Address>(start_arg)), size(size_arg) { - ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); - ASSERT(size >= static_cast<size_t>(Page::kPageSize)); - } - - Address start; - size_t size; - }; - - // Freed blocks of memory are added to the free list. When the allocation - // list is exhausted, the free list is sorted and merged to make the new - // allocation list. - List<FreeBlock> free_list_; - // Memory is allocated from the free blocks on the allocation list. - // The block at current_allocation_block_index_ is the current block. - List<FreeBlock> allocation_list_; - int current_allocation_block_index_; - - // Finds a block on the allocation list that contains at least the - // requested amount of memory. If none is found, sorts and merges - // the existing free memory blocks, and searches again. - // If none can be found, terminates V8 with FatalProcessOutOfMemory. - void GetNextAllocationBlock(size_t requested); - // Compares the start addresses of two free blocks. - static int CompareFreeBlockAddress(const FreeBlock* left, - const FreeBlock* right); - - DISALLOW_COPY_AND_ASSIGN(CodeRange); -}; - - -class SkipList { - public: - SkipList() { - Clear(); - } - - void Clear() { - for (int idx = 0; idx < kSize; idx++) { - starts_[idx] = reinterpret_cast<Address>(-1); - } - } - - Address StartFor(Address addr) { - return starts_[RegionNumber(addr)]; - } - - void AddObject(Address addr, int size) { - int start_region = RegionNumber(addr); - int end_region = RegionNumber(addr + size - kPointerSize); - for (int idx = start_region; idx <= end_region; idx++) { - if (starts_[idx] > addr) starts_[idx] = addr; - } - } - - static inline int RegionNumber(Address addr) { - return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2; - } - - static void Update(Address addr, int size) { - Page* page = Page::FromAddress(addr); - SkipList* list = page->skip_list(); - if (list == NULL) { - list = new SkipList(); - page->set_skip_list(list); - } - - list->AddObject(addr, size); - } - - private: - static const int kRegionSizeLog2 = 13; - static const int kRegionSize = 1 << kRegionSizeLog2; - static const int kSize = Page::kPageSize / kRegionSize; - - STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); - - Address starts_[kSize]; -}; - - -// ---------------------------------------------------------------------------- -// A space acquires chunks of memory from the operating system. The memory -// allocator allocated and deallocates pages for the paged heap spaces and large -// pages for large object space. -// -// Each space has to manage it's own pages. -// -class MemoryAllocator { - public: - explicit MemoryAllocator(Isolate* isolate); - - // Initializes its internal bookkeeping structures. - // Max capacity of the total space and executable memory limit. - bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); - - void TearDown(); - - Page* AllocatePage( - intptr_t size, PagedSpace* owner, Executability executable); - - LargePage* AllocateLargePage( - intptr_t object_size, Space* owner, Executability executable); - - void Free(MemoryChunk* chunk); - - // Returns the maximum available bytes of heaps. - intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } - - // Returns allocated spaces in bytes. - intptr_t Size() { return size_; } - - // Returns the maximum available executable bytes of heaps. - intptr_t AvailableExecutable() { - if (capacity_executable_ < size_executable_) return 0; - return capacity_executable_ - size_executable_; - } - - // Returns allocated executable spaces in bytes. - intptr_t SizeExecutable() { return size_executable_; } - - // Returns maximum available bytes that the old space can have. - intptr_t MaxAvailable() { - return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; - } - - // Returns an indication of whether a pointer is in a space that has - // been allocated by this MemoryAllocator. - V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { - return address < lowest_ever_allocated_ || - address >= highest_ever_allocated_; - } - -#ifdef DEBUG - // Reports statistic info of the space. - void ReportStatistics(); -#endif - - // Returns a MemoryChunk in which the memory region from commit_area_size to - // reserve_area_size of the chunk area is reserved but not committed, it - // could be committed later by calling MemoryChunk::CommitArea. - MemoryChunk* AllocateChunk(intptr_t reserve_area_size, - intptr_t commit_area_size, - Executability executable, - Space* space); - - Address ReserveAlignedMemory(size_t requested, - size_t alignment, - VirtualMemory* controller); - Address AllocateAlignedMemory(size_t reserve_size, - size_t commit_size, - size_t alignment, - Executability executable, - VirtualMemory* controller); - - bool CommitMemory(Address addr, size_t size, Executability executable); - - void FreeMemory(VirtualMemory* reservation, Executability executable); - void FreeMemory(Address addr, size_t size, Executability executable); - - // Commit a contiguous block of memory from the initial chunk. Assumes that - // the address is not NULL, the size is greater than zero, and that the - // block is contained in the initial chunk. Returns true if it succeeded - // and false otherwise. - bool CommitBlock(Address start, size_t size, Executability executable); - - // Uncommit a contiguous block of memory [start..(start+size)[. - // start is not NULL, the size is greater than zero, and the - // block is contained in the initial chunk. Returns true if it succeeded - // and false otherwise. - bool UncommitBlock(Address start, size_t size); - - // Zaps a contiguous block of memory [start..(start+size)[ thus - // filling it up with a recognizable non-NULL bit pattern. - void ZapBlock(Address start, size_t size); - - void PerformAllocationCallback(ObjectSpace space, - AllocationAction action, - size_t size); - - void AddMemoryAllocationCallback(MemoryAllocationCallback callback, - ObjectSpace space, - AllocationAction action); - - void RemoveMemoryAllocationCallback( - MemoryAllocationCallback callback); - - bool MemoryAllocationCallbackRegistered( - MemoryAllocationCallback callback); - - static int CodePageGuardStartOffset(); - - static int CodePageGuardSize(); - - static int CodePageAreaStartOffset(); - - static int CodePageAreaEndOffset(); - - static int CodePageAreaSize() { - return CodePageAreaEndOffset() - CodePageAreaStartOffset(); - } - - MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, - Address start, - size_t commit_size, - size_t reserved_size); - - private: - Isolate* isolate_; - - // Maximum space size in bytes. - size_t capacity_; - // Maximum subset of capacity_ that can be executable - size_t capacity_executable_; - - // Allocated space size in bytes. - size_t size_; - // Allocated executable space size in bytes. - size_t size_executable_; - - // We keep the lowest and highest addresses allocated as a quick way - // of determining that pointers are outside the heap. The estimate is - // conservative, i.e. not all addrsses in 'allocated' space are allocated - // to our heap. The range is [lowest, highest[, inclusive on the low end - // and exclusive on the high end. - void* lowest_ever_allocated_; - void* highest_ever_allocated_; - - struct MemoryAllocationCallbackRegistration { - MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, - ObjectSpace space, - AllocationAction action) - : callback(callback), space(space), action(action) { - } - MemoryAllocationCallback callback; - ObjectSpace space; - AllocationAction action; - }; - - // A List of callback that are triggered when memory is allocated or free'd - List<MemoryAllocationCallbackRegistration> - memory_allocation_callbacks_; - - // Initializes pages in a chunk. Returns the first page address. - // This function and GetChunkId() are provided for the mark-compact - // collector to rebuild page headers in the from space, which is - // used as a marking stack and its page headers are destroyed. - Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, - PagedSpace* owner); - - void UpdateAllocatedSpaceLimits(void* low, void* high) { - lowest_ever_allocated_ = Min(lowest_ever_allocated_, low); - highest_ever_allocated_ = Max(highest_ever_allocated_, high); - } - - DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); -}; - - -// ----------------------------------------------------------------------------- -// Interface for heap object iterator to be implemented by all object space -// object iterators. -// -// NOTE: The space specific object iterators also implements the own next() -// method which is used to avoid using virtual functions -// iterating a specific space. - -class ObjectIterator : public Malloced { - public: - virtual ~ObjectIterator() { } - - virtual HeapObject* next_object() = 0; -}; - - -// ----------------------------------------------------------------------------- -// Heap object iterator in new/old/map spaces. -// -// A HeapObjectIterator iterates objects from the bottom of the given space -// to its top or from the bottom of the given page to its top. -// -// If objects are allocated in the page during iteration the iterator may -// or may not iterate over those objects. The caller must create a new -// iterator in order to be sure to visit these new objects. -class HeapObjectIterator: public ObjectIterator { - public: - // Creates a new object iterator in a given space. - // If the size function is not given, the iterator calls the default - // Object::Size(). - explicit HeapObjectIterator(PagedSpace* space); - HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); - HeapObjectIterator(Page* page, HeapObjectCallback size_func); - - // Advance to the next object, skipping free spaces and other fillers and - // skipping the special garbage section of which there is one per space. - // Returns NULL when the iteration has ended. - inline HeapObject* Next() { - do { - HeapObject* next_obj = FromCurrentPage(); - if (next_obj != NULL) return next_obj; - } while (AdvanceToNextPage()); - return NULL; - } - - virtual HeapObject* next_object() { - return Next(); - } - - private: - enum PageMode { kOnePageOnly, kAllPagesInSpace }; - - Address cur_addr_; // Current iteration point. - Address cur_end_; // End iteration point. - HeapObjectCallback size_func_; // Size function or NULL. - PagedSpace* space_; - PageMode page_mode_; - - // Fast (inlined) path of next(). - inline HeapObject* FromCurrentPage(); - - // Slow path of next(), goes into the next page. Returns false if the - // iteration has ended. - bool AdvanceToNextPage(); - - // Initializes fields. - inline void Initialize(PagedSpace* owner, - Address start, - Address end, - PageMode mode, - HeapObjectCallback size_func); -}; - - -// ----------------------------------------------------------------------------- -// A PageIterator iterates the pages in a paged space. - -class PageIterator BASE_EMBEDDED { - public: - explicit inline PageIterator(PagedSpace* space); - - inline bool has_next(); - inline Page* next(); - - private: - PagedSpace* space_; - Page* prev_page_; // Previous page returned. - // Next page that will be returned. Cached here so that we can use this - // iterator for operations that deallocate pages. - Page* next_page_; -}; - - -// ----------------------------------------------------------------------------- -// A space has a circular list of pages. The next page can be accessed via -// Page::next_page() call. - -// An abstraction of allocation and relocation pointers in a page-structured -// space. -class AllocationInfo { - public: - AllocationInfo() : top_(NULL), limit_(NULL) { - } - - INLINE(void set_top(Address top)) { - SLOW_ASSERT(top == NULL || - (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0); - top_ = top; - } - - INLINE(Address top()) const { - SLOW_ASSERT(top_ == NULL || - (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0); - return top_; - } - - Address* top_address() { - return &top_; - } - - INLINE(void set_limit(Address limit)) { - SLOW_ASSERT(limit == NULL || - (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0); - limit_ = limit; - } - - INLINE(Address limit()) const { - SLOW_ASSERT(limit_ == NULL || - (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0); - return limit_; - } - - Address* limit_address() { - return &limit_; - } - -#ifdef DEBUG - bool VerifyPagedAllocation() { - return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) - && (top_ <= limit_); - } -#endif - - private: - // Current allocation top. - Address top_; - // Current allocation limit. - Address limit_; -}; - - -// An abstraction of the accounting statistics of a page-structured space. -// The 'capacity' of a space is the number of object-area bytes (i.e., not -// including page bookkeeping structures) currently in the space. The 'size' -// of a space is the number of allocated bytes, the 'waste' in the space is -// the number of bytes that are not allocated and not available to -// allocation without reorganizing the space via a GC (e.g. small blocks due -// to internal fragmentation, top of page areas in map space), and the bytes -// 'available' is the number of unallocated bytes that are not waste. The -// capacity is the sum of size, waste, and available. -// -// The stats are only set by functions that ensure they stay balanced. These -// functions increase or decrease one of the non-capacity stats in -// conjunction with capacity, or else they always balance increases and -// decreases to the non-capacity stats. -class AllocationStats BASE_EMBEDDED { - public: - AllocationStats() { Clear(); } - - // Zero out all the allocation statistics (i.e., no capacity). - void Clear() { - capacity_ = 0; - max_capacity_ = 0; - size_ = 0; - waste_ = 0; - } - - void ClearSizeWaste() { - size_ = capacity_; - waste_ = 0; - } - - // Reset the allocation statistics (i.e., available = capacity with no - // wasted or allocated bytes). - void Reset() { - size_ = 0; - waste_ = 0; - } - - // Accessors for the allocation statistics. - intptr_t Capacity() { return capacity_; } - intptr_t MaxCapacity() { return max_capacity_; } - intptr_t Size() { return size_; } - intptr_t Waste() { return waste_; } - - // Grow the space by adding available bytes. They are initially marked as - // being in use (part of the size), but will normally be immediately freed, - // putting them on the free list and removing them from size_. - void ExpandSpace(int size_in_bytes) { - capacity_ += size_in_bytes; - size_ += size_in_bytes; - if (capacity_ > max_capacity_) { - max_capacity_ = capacity_; - } - ASSERT(size_ >= 0); - } - - // Shrink the space by removing available bytes. Since shrinking is done - // during sweeping, bytes have been marked as being in use (part of the size) - // and are hereby freed. - void ShrinkSpace(int size_in_bytes) { - capacity_ -= size_in_bytes; - size_ -= size_in_bytes; - ASSERT(size_ >= 0); - } - - // Allocate from available bytes (available -> size). - void AllocateBytes(intptr_t size_in_bytes) { - size_ += size_in_bytes; - ASSERT(size_ >= 0); - } - - // Free allocated bytes, making them available (size -> available). - void DeallocateBytes(intptr_t size_in_bytes) { - size_ -= size_in_bytes; - ASSERT(size_ >= 0); - } - - // Waste free bytes (available -> waste). - void WasteBytes(int size_in_bytes) { - size_ -= size_in_bytes; - waste_ += size_in_bytes; - ASSERT(size_ >= 0); - } - - private: - intptr_t capacity_; - intptr_t max_capacity_; - intptr_t size_; - intptr_t waste_; -}; - - -// ----------------------------------------------------------------------------- -// Free lists for old object spaces -// -// Free-list nodes are free blocks in the heap. They look like heap objects -// (free-list node pointers have the heap object tag, and they have a map like -// a heap object). They have a size and a next pointer. The next pointer is -// the raw address of the next free list node (or NULL). -class FreeListNode: public HeapObject { - public: - // Obtain a free-list node from a raw address. This is not a cast because - // it does not check nor require that the first word at the address is a map - // pointer. - static FreeListNode* FromAddress(Address address) { - return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); - } - - static inline bool IsFreeListNode(HeapObject* object); - - // Set the size in bytes, which can be read with HeapObject::Size(). This - // function also writes a map to the first word of the block so that it - // looks like a heap object to the garbage collector and heap iteration - // functions. - void set_size(Heap* heap, int size_in_bytes); - - // Accessors for the next field. - inline FreeListNode* next(); - inline FreeListNode** next_address(); - inline void set_next(FreeListNode* next); - - inline void Zap(); - - static inline FreeListNode* cast(MaybeObject* maybe) { - ASSERT(!maybe->IsFailure()); - return reinterpret_cast<FreeListNode*>(maybe); - } - - private: - static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); - - DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); -}; - - -// The free list category holds a pointer to the top element and a pointer to -// the end element of the linked list of free memory blocks. -class FreeListCategory { - public: - FreeListCategory() : - top_(0), - end_(NULL), - available_(0) {} - - intptr_t Concatenate(FreeListCategory* category); - - void Reset(); - - void Free(FreeListNode* node, int size_in_bytes); - - FreeListNode* PickNodeFromList(int *node_size); - FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size); - - intptr_t EvictFreeListItemsInList(Page* p); - bool ContainsPageFreeListItemsInList(Page* p); - - void RepairFreeList(Heap* heap); - - FreeListNode* top() const { - return reinterpret_cast<FreeListNode*>(NoBarrier_Load(&top_)); - } - - void set_top(FreeListNode* top) { - NoBarrier_Store(&top_, reinterpret_cast<AtomicWord>(top)); - } - - FreeListNode** GetEndAddress() { return &end_; } - FreeListNode* end() const { return end_; } - void set_end(FreeListNode* end) { end_ = end; } - - int* GetAvailableAddress() { return &available_; } - int available() const { return available_; } - void set_available(int available) { available_ = available; } - - Mutex* mutex() { return &mutex_; } - - bool IsEmpty() { - return top() == 0; - } - -#ifdef DEBUG - intptr_t SumFreeList(); - int FreeListLength(); -#endif - - private: - // top_ points to the top FreeListNode* in the free list category. - AtomicWord top_; - FreeListNode* end_; - Mutex mutex_; - - // Total available bytes in all blocks of this free list category. - int available_; -}; - - -// The free list for the old space. The free list is organized in such a way -// as to encourage objects allocated around the same time to be near each -// other. The normal way to allocate is intended to be by bumping a 'top' -// pointer until it hits a 'limit' pointer. When the limit is hit we need to -// find a new space to allocate from. This is done with the free list, which -// is divided up into rough categories to cut down on waste. Having finer -// categories would scatter allocation more. - -// The old space free list is organized in categories. -// 1-31 words: Such small free areas are discarded for efficiency reasons. -// They can be reclaimed by the compactor. However the distance between top -// and limit may be this small. -// 32-255 words: There is a list of spaces this large. It is used for top and -// limit when the object we need to allocate is 1-31 words in size. These -// spaces are called small. -// 256-2047 words: There is a list of spaces this large. It is used for top and -// limit when the object we need to allocate is 32-255 words in size. These -// spaces are called medium. -// 1048-16383 words: There is a list of spaces this large. It is used for top -// and limit when the object we need to allocate is 256-2047 words in size. -// These spaces are call large. -// At least 16384 words. This list is for objects of 2048 words or larger. -// Empty pages are added to this list. These spaces are called huge. -class FreeList { - public: - explicit FreeList(PagedSpace* owner); - - intptr_t Concatenate(FreeList* free_list); - - // Clear the free list. - void Reset(); - - // Return the number of bytes available on the free list. - intptr_t available() { - return small_list_.available() + medium_list_.available() + - large_list_.available() + huge_list_.available(); - } - - // Place a node on the free list. The block of size 'size_in_bytes' - // starting at 'start' is placed on the free list. The return value is the - // number of bytes that have been lost due to internal fragmentation by - // freeing the block. Bookkeeping information will be written to the block, - // i.e., its contents will be destroyed. The start address should be word - // aligned, and the size should be a non-zero multiple of the word size. - int Free(Address start, int size_in_bytes); - - // Allocate a block of size 'size_in_bytes' from the free list. The block - // is unitialized. A failure is returned if no block is available. The - // number of bytes lost to fragmentation is returned in the output parameter - // 'wasted_bytes'. The size should be a non-zero multiple of the word size. - MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); - - bool IsEmpty() { - return small_list_.IsEmpty() && medium_list_.IsEmpty() && - large_list_.IsEmpty() && huge_list_.IsEmpty(); - } - -#ifdef DEBUG - void Zap(); - intptr_t SumFreeLists(); - bool IsVeryLong(); -#endif - - // Used after booting the VM. - void RepairLists(Heap* heap); - - intptr_t EvictFreeListItems(Page* p); - bool ContainsPageFreeListItems(Page* p); - - FreeListCategory* small_list() { return &small_list_; } - FreeListCategory* medium_list() { return &medium_list_; } - FreeListCategory* large_list() { return &large_list_; } - FreeListCategory* huge_list() { return &huge_list_; } - - private: - // The size range of blocks, in bytes. - static const int kMinBlockSize = 3 * kPointerSize; - static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize; - - FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); - - PagedSpace* owner_; - Heap* heap_; - - static const int kSmallListMin = 0x20 * kPointerSize; - static const int kSmallListMax = 0xff * kPointerSize; - static const int kMediumListMax = 0x7ff * kPointerSize; - static const int kLargeListMax = 0x3fff * kPointerSize; - static const int kSmallAllocationMax = kSmallListMin - kPointerSize; - static const int kMediumAllocationMax = kSmallListMax; - static const int kLargeAllocationMax = kMediumListMax; - FreeListCategory small_list_; - FreeListCategory medium_list_; - FreeListCategory large_list_; - FreeListCategory huge_list_; - - DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); -}; - - -class PagedSpace : public Space { - public: - // Creates a space with a maximum capacity, and an id. - PagedSpace(Heap* heap, - intptr_t max_capacity, - AllocationSpace id, - Executability executable); - - virtual ~PagedSpace() {} - - // Set up the space using the given address range of virtual memory (from - // the memory allocator's initial chunk) if possible. If the block of - // addresses is not big enough to contain a single page-aligned page, a - // fresh chunk will be allocated. - bool SetUp(); - - // Returns true if the space has been successfully set up and not - // subsequently torn down. - bool HasBeenSetUp(); - - // Cleans up the space, frees all pages in this space except those belonging - // to the initial chunk, uncommits addresses in the initial chunk. - void TearDown(); - - // Checks whether an object/address is in this space. - inline bool Contains(Address a); - bool Contains(HeapObject* o) { return Contains(o->address()); } - - // Given an address occupied by a live object, return that object if it is - // in this space, or Failure::Exception() if it is not. The implementation - // iterates over objects in the page containing the address, the cost is - // linear in the number of objects in the page. It may be slow. - MUST_USE_RESULT MaybeObject* FindObject(Address addr); - - // During boot the free_space_map is created, and afterwards we may need - // to write it into the free list nodes that were already created. - void RepairFreeListsAfterBoot(); - - // Prepares for a mark-compact GC. - void PrepareForMarkCompact(); - - // Current capacity without growing (Size() + Available()). - intptr_t Capacity() { return accounting_stats_.Capacity(); } - - // Total amount of memory committed for this space. For paged - // spaces this equals the capacity. - intptr_t CommittedMemory() { return Capacity(); } - - // The maximum amount of memory ever committed for this space. - intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); } - - // Approximate amount of physical memory committed for this space. - size_t CommittedPhysicalMemory(); - - struct SizeStats { - intptr_t Total() { - return small_size_ + medium_size_ + large_size_ + huge_size_; - } - - intptr_t small_size_; - intptr_t medium_size_; - intptr_t large_size_; - intptr_t huge_size_; - }; - - void ObtainFreeListStatistics(Page* p, SizeStats* sizes); - void ResetFreeListStatistics(); - - // Sets the capacity, the available space and the wasted space to zero. - // The stats are rebuilt during sweeping by adding each page to the - // capacity and the size when it is encountered. As free spaces are - // discovered during the sweeping they are subtracted from the size and added - // to the available and wasted totals. - void ClearStats() { - accounting_stats_.ClearSizeWaste(); - ResetFreeListStatistics(); - } - - // Increases the number of available bytes of that space. - void AddToAccountingStats(intptr_t bytes) { - accounting_stats_.DeallocateBytes(bytes); - } - - // Available bytes without growing. These are the bytes on the free list. - // The bytes in the linear allocation area are not included in this total - // because updating the stats would slow down allocation. New pages are - // immediately added to the free list so they show up here. - intptr_t Available() { return free_list_.available(); } - - // Allocated bytes in this space. Garbage bytes that were not found due to - // lazy sweeping are counted as being allocated! The bytes in the current - // linear allocation area (between top and limit) are also counted here. - virtual intptr_t Size() { return accounting_stats_.Size(); } - - // As size, but the bytes in lazily swept pages are estimated and the bytes - // in the current linear allocation area are not included. - virtual intptr_t SizeOfObjects(); - - // Wasted bytes in this space. These are just the bytes that were thrown away - // due to being too small to use for allocation. They do not include the - // free bytes that were not found at all due to lazy sweeping. - virtual intptr_t Waste() { return accounting_stats_.Waste(); } - - // Returns the allocation pointer in this space. - Address top() { return allocation_info_.top(); } - Address limit() { return allocation_info_.limit(); } - - // The allocation top address. - Address* allocation_top_address() { - return allocation_info_.top_address(); - } - - // The allocation limit address. - Address* allocation_limit_address() { - return allocation_info_.limit_address(); - } - - // Allocate the requested number of bytes in the space if possible, return a - // failure object if not. - MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); - - // Give a block of memory to the space's free list. It might be added to - // the free list or accounted as waste. - // If add_to_freelist is false then just accounting stats are updated and - // no attempt to add area to free list is made. - int Free(Address start, int size_in_bytes) { - int wasted = free_list_.Free(start, size_in_bytes); - accounting_stats_.DeallocateBytes(size_in_bytes - wasted); - return size_in_bytes - wasted; - } - - void ResetFreeList() { - free_list_.Reset(); - } - - // Set space allocation info. - void SetTopAndLimit(Address top, Address limit) { - ASSERT(top == limit || - Page::FromAddress(top) == Page::FromAddress(limit - 1)); - MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); - allocation_info_.set_top(top); - allocation_info_.set_limit(limit); - } - - // Empty space allocation info, returning unused area to free list. - void EmptyAllocationInfo() { - // Mark the old linear allocation area with a free space map so it can be - // skipped when scanning the heap. - int old_linear_size = static_cast<int>(limit() - top()); - Free(top(), old_linear_size); - SetTopAndLimit(NULL, NULL); - } - - void Allocate(int bytes) { - accounting_stats_.AllocateBytes(bytes); - } - - void IncreaseCapacity(int size); - - // Releases an unused page and shrinks the space. - void ReleasePage(Page* page, bool unlink); - - // The dummy page that anchors the linked list of pages. - Page* anchor() { return &anchor_; } - -#ifdef VERIFY_HEAP - // Verify integrity of this space. - virtual void Verify(ObjectVisitor* visitor); - - // Overridden by subclasses to verify space-specific object - // properties (e.g., only maps or free-list nodes are in map space). - virtual void VerifyObject(HeapObject* obj) {} -#endif - -#ifdef DEBUG - // Print meta info and objects in this space. - virtual void Print(); - - // Reports statistics for the space - void ReportStatistics(); - - // Report code object related statistics - void CollectCodeStatistics(); - static void ReportCodeStatistics(Isolate* isolate); - static void ResetCodeStatistics(Isolate* isolate); -#endif - - bool was_swept_conservatively() { return was_swept_conservatively_; } - void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; } - - // Evacuation candidates are swept by evacuator. Needs to return a valid - // result before _and_ after evacuation has finished. - static bool ShouldBeSweptLazily(Page* p) { - return !p->IsEvacuationCandidate() && - !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && - !p->WasSweptPrecisely(); - } - - void SetPagesToSweep(Page* first) { - ASSERT(unswept_free_bytes_ == 0); - if (first == &anchor_) first = NULL; - first_unswept_page_ = first; - } - - void IncrementUnsweptFreeBytes(intptr_t by) { - unswept_free_bytes_ += by; - } - - void IncreaseUnsweptFreeBytes(Page* p) { - ASSERT(ShouldBeSweptLazily(p)); - unswept_free_bytes_ += (p->area_size() - p->LiveBytes()); - } - - void DecrementUnsweptFreeBytes(intptr_t by) { - unswept_free_bytes_ -= by; - } - - void DecreaseUnsweptFreeBytes(Page* p) { - ASSERT(ShouldBeSweptLazily(p)); - unswept_free_bytes_ -= (p->area_size() - p->LiveBytes()); - } - - void ResetUnsweptFreeBytes() { - unswept_free_bytes_ = 0; - } - - bool AdvanceSweeper(intptr_t bytes_to_sweep); - - // When parallel sweeper threads are active and the main thread finished - // its sweeping phase, this function waits for them to complete, otherwise - // AdvanceSweeper with size_in_bytes is called. - bool EnsureSweeperProgress(intptr_t size_in_bytes); - - bool IsLazySweepingComplete() { - return !first_unswept_page_->is_valid(); - } - - Page* FirstPage() { return anchor_.next_page(); } - Page* LastPage() { return anchor_.prev_page(); } - - void EvictEvacuationCandidatesFromFreeLists(); - - bool CanExpand(); - - // Returns the number of total pages in this space. - int CountTotalPages(); - - // Return size of allocatable area on a page in this space. - inline int AreaSize() { - return area_size_; - } - - protected: - FreeList* free_list() { return &free_list_; } - - int area_size_; - - // Maximum capacity of this space. - intptr_t max_capacity_; - - intptr_t SizeOfFirstPage(); - - // Accounting information for this space. - AllocationStats accounting_stats_; - - // The dummy page that anchors the double linked list of pages. - Page anchor_; - - // The space's free list. - FreeList free_list_; - - // Normal allocation information. - AllocationInfo allocation_info_; - - bool was_swept_conservatively_; - - // The first page to be swept when the lazy sweeper advances. Is set - // to NULL when all pages have been swept. - Page* first_unswept_page_; - - // The number of free bytes which could be reclaimed by advancing the - // lazy sweeper. This is only an estimation because lazy sweeping is - // done conservatively. - intptr_t unswept_free_bytes_; - - // Expands the space by allocating a fixed number of pages. Returns false if - // it cannot allocate requested number of pages from OS, or if the hard heap - // size limit has been hit. - bool Expand(); - - // Generic fast case allocation function that tries linear allocation at the - // address denoted by top in allocation_info_. - inline HeapObject* AllocateLinearly(int size_in_bytes); - - // Slow path of AllocateRaw. This function is space-dependent. - MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); - - friend class PageIterator; - friend class MarkCompactCollector; -}; - - -class NumberAndSizeInfo BASE_EMBEDDED { - public: - NumberAndSizeInfo() : number_(0), bytes_(0) {} - - int number() const { return number_; } - void increment_number(int num) { number_ += num; } - - int bytes() const { return bytes_; } - void increment_bytes(int size) { bytes_ += size; } - - void clear() { - number_ = 0; - bytes_ = 0; - } - - private: - int number_; - int bytes_; -}; - - -// HistogramInfo class for recording a single "bar" of a histogram. This -// class is used for collecting statistics to print to the log file. -class HistogramInfo: public NumberAndSizeInfo { - public: - HistogramInfo() : NumberAndSizeInfo() {} - - const char* name() { return name_; } - void set_name(const char* name) { name_ = name; } - - private: - const char* name_; -}; - - -enum SemiSpaceId { - kFromSpace = 0, - kToSpace = 1 -}; - - -class SemiSpace; - - -class NewSpacePage : public MemoryChunk { - public: - // GC related flags copied from from-space to to-space when - // flipping semispaces. - static const intptr_t kCopyOnFlipFlagsMask = - (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | - (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | - (1 << MemoryChunk::SCAN_ON_SCAVENGE); - - static const int kAreaSize = Page::kMaxRegularHeapObjectSize; - - inline NewSpacePage* next_page() const { - return static_cast<NewSpacePage*>(next_chunk()); - } - - inline void set_next_page(NewSpacePage* page) { - set_next_chunk(page); - } - - inline NewSpacePage* prev_page() const { - return static_cast<NewSpacePage*>(prev_chunk()); - } - - inline void set_prev_page(NewSpacePage* page) { - set_prev_chunk(page); - } - - SemiSpace* semi_space() { - return reinterpret_cast<SemiSpace*>(owner()); - } - - bool is_anchor() { return !this->InNewSpace(); } - - static bool IsAtStart(Address addr) { - return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) - == kObjectStartOffset; - } - - static bool IsAtEnd(Address addr) { - return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; - } - - Address address() { - return reinterpret_cast<Address>(this); - } - - // Finds the NewSpacePage containg the given address. - static inline NewSpacePage* FromAddress(Address address_in_page) { - Address page_start = - reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & - ~Page::kPageAlignmentMask); - NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); - return page; - } - - // Find the page for a limit address. A limit address is either an address - // inside a page, or the address right after the last byte of a page. - static inline NewSpacePage* FromLimit(Address address_limit) { - return NewSpacePage::FromAddress(address_limit - 1); - } - - // Checks if address1 and address2 are on the same new space page. - static inline bool OnSamePage(Address address1, Address address2) { - return NewSpacePage::FromAddress(address1) == - NewSpacePage::FromAddress(address2); - } - - private: - // Create a NewSpacePage object that is only used as anchor - // for the doubly-linked list of real pages. - explicit NewSpacePage(SemiSpace* owner) { - InitializeAsAnchor(owner); - } - - static NewSpacePage* Initialize(Heap* heap, - Address start, - SemiSpace* semi_space); - - // Intialize a fake NewSpacePage used as sentinel at the ends - // of a doubly-linked list of real NewSpacePages. - // Only uses the prev/next links, and sets flags to not be in new-space. - void InitializeAsAnchor(SemiSpace* owner); - - friend class SemiSpace; - friend class SemiSpaceIterator; -}; - - -// ----------------------------------------------------------------------------- -// SemiSpace in young generation -// -// A semispace is a contiguous chunk of memory holding page-like memory -// chunks. The mark-compact collector uses the memory of the first page in -// the from space as a marking stack when tracing live objects. - -class SemiSpace : public Space { - public: - // Constructor. - SemiSpace(Heap* heap, SemiSpaceId semispace) - : Space(heap, NEW_SPACE, NOT_EXECUTABLE), - start_(NULL), - age_mark_(NULL), - id_(semispace), - anchor_(this), - current_page_(NULL) { } - - // Sets up the semispace using the given chunk. - void SetUp(Address start, int initial_capacity, int maximum_capacity); - - // Tear down the space. Heap memory was not allocated by the space, so it - // is not deallocated here. - void TearDown(); - - // True if the space has been set up but not torn down. - bool HasBeenSetUp() { return start_ != NULL; } - - // Grow the semispace to the new capacity. The new capacity - // requested must be larger than the current capacity and less than - // the maximum capacity. - bool GrowTo(int new_capacity); - - // Shrinks the semispace to the new capacity. The new capacity - // requested must be more than the amount of used memory in the - // semispace and less than the current capacity. - bool ShrinkTo(int new_capacity); - - // Returns the start address of the first page of the space. - Address space_start() { - ASSERT(anchor_.next_page() != &anchor_); - return anchor_.next_page()->area_start(); - } - - // Returns the start address of the current page of the space. - Address page_low() { - return current_page_->area_start(); - } - - // Returns one past the end address of the space. - Address space_end() { - return anchor_.prev_page()->area_end(); - } - - // Returns one past the end address of the current page of the space. - Address page_high() { - return current_page_->area_end(); - } - - bool AdvancePage() { - NewSpacePage* next_page = current_page_->next_page(); - if (next_page == anchor()) return false; - current_page_ = next_page; - return true; - } - - // Resets the space to using the first page. - void Reset(); - - // Age mark accessors. - Address age_mark() { return age_mark_; } - void set_age_mark(Address mark); - - // True if the address is in the address range of this semispace (not - // necessarily below the allocation pointer). - bool Contains(Address a) { - return (reinterpret_cast<uintptr_t>(a) & address_mask_) - == reinterpret_cast<uintptr_t>(start_); - } - - // True if the object is a heap object in the address range of this - // semispace (not necessarily below the allocation pointer). - bool Contains(Object* o) { - return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; - } - - // If we don't have these here then SemiSpace will be abstract. However - // they should never be called. - virtual intptr_t Size() { - UNREACHABLE(); - return 0; - } - - bool is_committed() { return committed_; } - bool Commit(); - bool Uncommit(); - - NewSpacePage* first_page() { return anchor_.next_page(); } - NewSpacePage* current_page() { return current_page_; } - -#ifdef VERIFY_HEAP - virtual void Verify(); -#endif - -#ifdef DEBUG - virtual void Print(); - // Validate a range of of addresses in a SemiSpace. - // The "from" address must be on a page prior to the "to" address, - // in the linked page order, or it must be earlier on the same page. - static void AssertValidRange(Address from, Address to); -#else - // Do nothing. - inline static void AssertValidRange(Address from, Address to) {} -#endif - - // Returns the current capacity of the semi space. - int Capacity() { return capacity_; } - - // Returns the maximum capacity of the semi space. - int MaximumCapacity() { return maximum_capacity_; } - - // Returns the initial capacity of the semi space. - int InitialCapacity() { return initial_capacity_; } - - SemiSpaceId id() { return id_; } - - static void Swap(SemiSpace* from, SemiSpace* to); - - // Returns the maximum amount of memory ever committed by the semi space. - size_t MaximumCommittedMemory() { return maximum_committed_; } - - // Approximate amount of physical memory committed for this space. - size_t CommittedPhysicalMemory(); - - private: - // Flips the semispace between being from-space and to-space. - // Copies the flags into the masked positions on all pages in the space. - void FlipPages(intptr_t flags, intptr_t flag_mask); - - // Updates Capacity and MaximumCommitted based on new capacity. - void SetCapacity(int new_capacity); - - NewSpacePage* anchor() { return &anchor_; } - - // The current and maximum capacity of the space. - int capacity_; - int maximum_capacity_; - int initial_capacity_; - - intptr_t maximum_committed_; - - // The start address of the space. - Address start_; - // Used to govern object promotion during mark-compact collection. - Address age_mark_; - - // Masks and comparison values to test for containment in this semispace. - uintptr_t address_mask_; - uintptr_t object_mask_; - uintptr_t object_expected_; - - bool committed_; - SemiSpaceId id_; - - NewSpacePage anchor_; - NewSpacePage* current_page_; - - friend class SemiSpaceIterator; - friend class NewSpacePageIterator; - public: - TRACK_MEMORY("SemiSpace") -}; - - -// A SemiSpaceIterator is an ObjectIterator that iterates over the active -// semispace of the heap's new space. It iterates over the objects in the -// semispace from a given start address (defaulting to the bottom of the -// semispace) to the top of the semispace. New objects allocated after the -// iterator is created are not iterated. -class SemiSpaceIterator : public ObjectIterator { - public: - // Create an iterator over the objects in the given space. If no start - // address is given, the iterator starts from the bottom of the space. If - // no size function is given, the iterator calls Object::Size(). - - // Iterate over all of allocated to-space. - explicit SemiSpaceIterator(NewSpace* space); - // Iterate over all of allocated to-space, with a custome size function. - SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func); - // Iterate over part of allocated to-space, from start to the end - // of allocation. - SemiSpaceIterator(NewSpace* space, Address start); - // Iterate from one address to another in the same semi-space. - SemiSpaceIterator(Address from, Address to); - - HeapObject* Next() { - if (current_ == limit_) return NULL; - if (NewSpacePage::IsAtEnd(current_)) { - NewSpacePage* page = NewSpacePage::FromLimit(current_); - page = page->next_page(); - ASSERT(!page->is_anchor()); - current_ = page->area_start(); - if (current_ == limit_) return NULL; - } - - HeapObject* object = HeapObject::FromAddress(current_); - int size = (size_func_ == NULL) ? object->Size() : size_func_(object); - - current_ += size; - return object; - } - - // Implementation of the ObjectIterator functions. - virtual HeapObject* next_object() { return Next(); } - - private: - void Initialize(Address start, - Address end, - HeapObjectCallback size_func); - - // The current iteration point. - Address current_; - // The end of iteration. - Address limit_; - // The callback function. - HeapObjectCallback size_func_; -}; - - -// ----------------------------------------------------------------------------- -// A PageIterator iterates the pages in a semi-space. -class NewSpacePageIterator BASE_EMBEDDED { - public: - // Make an iterator that runs over all pages in to-space. - explicit inline NewSpacePageIterator(NewSpace* space); - - // Make an iterator that runs over all pages in the given semispace, - // even those not used in allocation. - explicit inline NewSpacePageIterator(SemiSpace* space); - - // Make iterator that iterates from the page containing start - // to the page that contains limit in the same semispace. - inline NewSpacePageIterator(Address start, Address limit); - - inline bool has_next(); - inline NewSpacePage* next(); - - private: - NewSpacePage* prev_page_; // Previous page returned. - // Next page that will be returned. Cached here so that we can use this - // iterator for operations that deallocate pages. - NewSpacePage* next_page_; - // Last page returned. - NewSpacePage* last_page_; -}; - - -// ----------------------------------------------------------------------------- -// The young generation space. -// -// The new space consists of a contiguous pair of semispaces. It simply -// forwards most functions to the appropriate semispace. - -class NewSpace : public Space { - public: - // Constructor. - explicit NewSpace(Heap* heap) - : Space(heap, NEW_SPACE, NOT_EXECUTABLE), - to_space_(heap, kToSpace), - from_space_(heap, kFromSpace), - reservation_(), - inline_allocation_limit_step_(0) {} - - // Sets up the new space using the given chunk. - bool SetUp(int reserved_semispace_size_, int max_semispace_size); - - // Tears down the space. Heap memory was not allocated by the space, so it - // is not deallocated here. - void TearDown(); - - // True if the space has been set up but not torn down. - bool HasBeenSetUp() { - return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp(); - } - - // Flip the pair of spaces. - void Flip(); - - // Grow the capacity of the semispaces. Assumes that they are not at - // their maximum capacity. - void Grow(); - - // Shrink the capacity of the semispaces. - void Shrink(); - - // True if the address or object lies in the address range of either - // semispace (not necessarily below the allocation pointer). - bool Contains(Address a) { - return (reinterpret_cast<uintptr_t>(a) & address_mask_) - == reinterpret_cast<uintptr_t>(start_); - } - - bool Contains(Object* o) { - Address a = reinterpret_cast<Address>(o); - return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; - } - - // Return the allocated bytes in the active semispace. - virtual intptr_t Size() { - return pages_used_ * NewSpacePage::kAreaSize + - static_cast<int>(top() - to_space_.page_low()); - } - - // The same, but returning an int. We have to have the one that returns - // intptr_t because it is inherited, but if we know we are dealing with the - // new space, which can't get as big as the other spaces then this is useful: - int SizeAsInt() { return static_cast<int>(Size()); } - - // Return the current capacity of a semispace. - intptr_t EffectiveCapacity() { - SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity()); - return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize; - } - - // Return the current capacity of a semispace. - intptr_t Capacity() { - ASSERT(to_space_.Capacity() == from_space_.Capacity()); - return to_space_.Capacity(); - } - - // Return the total amount of memory committed for new space. - intptr_t CommittedMemory() { - if (from_space_.is_committed()) return 2 * Capacity(); - return Capacity(); - } - - // Return the total amount of memory committed for new space. - intptr_t MaximumCommittedMemory() { - return to_space_.MaximumCommittedMemory() + - from_space_.MaximumCommittedMemory(); - } - - // Approximate amount of physical memory committed for this space. - size_t CommittedPhysicalMemory(); - - // Return the available bytes without growing. - intptr_t Available() { - return Capacity() - Size(); - } - - // Return the maximum capacity of a semispace. - int MaximumCapacity() { - ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); - return to_space_.MaximumCapacity(); - } - - // Returns the initial capacity of a semispace. - int InitialCapacity() { - ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity()); - return to_space_.InitialCapacity(); - } - - // Return the address of the allocation pointer in the active semispace. - Address top() { - ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top())); - return allocation_info_.top(); - } - - void set_top(Address top) { - ASSERT(to_space_.current_page()->ContainsLimit(top)); - allocation_info_.set_top(top); - } - - // Return the address of the allocation pointer limit in the active semispace. - Address limit() { - ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.limit())); - return allocation_info_.limit(); - } - - // Return the address of the first object in the active semispace. - Address bottom() { return to_space_.space_start(); } - - // Get the age mark of the inactive semispace. - Address age_mark() { return from_space_.age_mark(); } - // Set the age mark in the active semispace. - void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } - - // The start address of the space and a bit mask. Anding an address in the - // new space with the mask will result in the start address. - Address start() { return start_; } - uintptr_t mask() { return address_mask_; } - - INLINE(uint32_t AddressToMarkbitIndex(Address addr)) { - ASSERT(Contains(addr)); - ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) || - IsAligned(OffsetFrom(addr) - 1, kPointerSize)); - return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; - } - - INLINE(Address MarkbitIndexToAddress(uint32_t index)) { - return reinterpret_cast<Address>(index << kPointerSizeLog2); - } - - // The allocation top and limit address. - Address* allocation_top_address() { - return allocation_info_.top_address(); - } - - // The allocation limit address. - Address* allocation_limit_address() { - return allocation_info_.limit_address(); - } - - MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes)); - - // Reset the allocation pointer to the beginning of the active semispace. - void ResetAllocationInfo(); - - void UpdateInlineAllocationLimit(int size_in_bytes); - void LowerInlineAllocationLimit(intptr_t step) { - inline_allocation_limit_step_ = step; - UpdateInlineAllocationLimit(0); - top_on_previous_step_ = allocation_info_.top(); - } - - // Get the extent of the inactive semispace (for use as a marking stack, - // or to zap it). Notice: space-addresses are not necessarily on the - // same page, so FromSpaceStart() might be above FromSpaceEnd(). - Address FromSpacePageLow() { return from_space_.page_low(); } - Address FromSpacePageHigh() { return from_space_.page_high(); } - Address FromSpaceStart() { return from_space_.space_start(); } - Address FromSpaceEnd() { return from_space_.space_end(); } - - // Get the extent of the active semispace's pages' memory. - Address ToSpaceStart() { return to_space_.space_start(); } - Address ToSpaceEnd() { return to_space_.space_end(); } - - inline bool ToSpaceContains(Address address) { - return to_space_.Contains(address); - } - inline bool FromSpaceContains(Address address) { - return from_space_.Contains(address); - } - - // True if the object is a heap object in the address range of the - // respective semispace (not necessarily below the allocation pointer of the - // semispace). - inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } - inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } - - // Try to switch the active semispace to a new, empty, page. - // Returns false if this isn't possible or reasonable (i.e., there - // are no pages, or the current page is already empty), or true - // if successful. - bool AddFreshPage(); - -#ifdef VERIFY_HEAP - // Verify the active semispace. - virtual void Verify(); -#endif - -#ifdef DEBUG - // Print the active semispace. - virtual void Print() { to_space_.Print(); } -#endif - - // Iterates the active semispace to collect statistics. - void CollectStatistics(); - // Reports previously collected statistics of the active semispace. - void ReportStatistics(); - // Clears previously collected statistics. - void ClearHistograms(); - - // Record the allocation or promotion of a heap object. Note that we don't - // record every single allocation, but only those that happen in the - // to space during a scavenge GC. - void RecordAllocation(HeapObject* obj); - void RecordPromotion(HeapObject* obj); - - // Return whether the operation succeded. - bool CommitFromSpaceIfNeeded() { - if (from_space_.is_committed()) return true; - return from_space_.Commit(); - } - - bool UncommitFromSpace() { - if (!from_space_.is_committed()) return true; - return from_space_.Uncommit(); - } - - inline intptr_t inline_allocation_limit_step() { - return inline_allocation_limit_step_; - } - - SemiSpace* active_space() { return &to_space_; } - - private: - // Update allocation info to match the current to-space page. - void UpdateAllocationInfo(); - - Address chunk_base_; - uintptr_t chunk_size_; - - // The semispaces. - SemiSpace to_space_; - SemiSpace from_space_; - VirtualMemory reservation_; - int pages_used_; - - // Start address and bit mask for containment testing. - Address start_; - uintptr_t address_mask_; - uintptr_t object_mask_; - uintptr_t object_expected_; - - // Allocation pointer and limit for normal allocation and allocation during - // mark-compact collection. - AllocationInfo allocation_info_; - - // When incremental marking is active we will set allocation_info_.limit - // to be lower than actual limit and then will gradually increase it - // in steps to guarantee that we do incremental marking steps even - // when all allocation is performed from inlined generated code. - intptr_t inline_allocation_limit_step_; - - Address top_on_previous_step_; - - HistogramInfo* allocated_histogram_; - HistogramInfo* promoted_histogram_; - - MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes); - - friend class SemiSpaceIterator; - - public: - TRACK_MEMORY("NewSpace") -}; - - -// ----------------------------------------------------------------------------- -// Old object space (excluding map objects) - -class OldSpace : public PagedSpace { - public: - // Creates an old space object with a given maximum capacity. - // The constructor does not allocate pages from OS. - OldSpace(Heap* heap, - intptr_t max_capacity, - AllocationSpace id, - Executability executable) - : PagedSpace(heap, max_capacity, id, executable) { - } - - public: - TRACK_MEMORY("OldSpace") -}; - - -// For contiguous spaces, top should be in the space (or at the end) and limit -// should be the end of the space. -#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ - SLOW_ASSERT((space).page_low() <= (info).top() \ - && (info).top() <= (space).page_high() \ - && (info).limit() <= (space).page_high()) - - -// ----------------------------------------------------------------------------- -// Old space for all map objects - -class MapSpace : public PagedSpace { - public: - // Creates a map space object with a maximum capacity. - MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) - : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), - max_map_space_pages_(kMaxMapPageIndex - 1) { - } - - // Given an index, returns the page address. - // TODO(1600): this limit is artifical just to keep code compilable - static const int kMaxMapPageIndex = 1 << 16; - - virtual int RoundSizeDownToObjectAlignment(int size) { - if (IsPowerOf2(Map::kSize)) { - return RoundDown(size, Map::kSize); - } else { - return (size / Map::kSize) * Map::kSize; - } - } - - protected: - virtual void VerifyObject(HeapObject* obj); - - private: - static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize; - - // Do map space compaction if there is a page gap. - int CompactionThreshold() { - return kMapsPerPage * (max_map_space_pages_ - 1); - } - - const int max_map_space_pages_; - - public: - TRACK_MEMORY("MapSpace") -}; - - -// ----------------------------------------------------------------------------- -// Old space for simple property cell objects - -class CellSpace : public PagedSpace { - public: - // Creates a property cell space object with a maximum capacity. - CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) - : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) { - } - - virtual int RoundSizeDownToObjectAlignment(int size) { - if (IsPowerOf2(Cell::kSize)) { - return RoundDown(size, Cell::kSize); - } else { - return (size / Cell::kSize) * Cell::kSize; - } - } - - protected: - virtual void VerifyObject(HeapObject* obj); - - public: - TRACK_MEMORY("CellSpace") -}; - - -// ----------------------------------------------------------------------------- -// Old space for all global object property cell objects - -class PropertyCellSpace : public PagedSpace { - public: - // Creates a property cell space object with a maximum capacity. - PropertyCellSpace(Heap* heap, intptr_t max_capacity, - AllocationSpace id) - : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) { - } - - virtual int RoundSizeDownToObjectAlignment(int size) { - if (IsPowerOf2(PropertyCell::kSize)) { - return RoundDown(size, PropertyCell::kSize); - } else { - return (size / PropertyCell::kSize) * PropertyCell::kSize; - } - } - - protected: - virtual void VerifyObject(HeapObject* obj); - - public: - TRACK_MEMORY("PropertyCellSpace") -}; - - -// ----------------------------------------------------------------------------- -// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by -// the large object space. A large object is allocated from OS heap with -// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). -// A large object always starts at Page::kObjectStartOffset to a page. -// Large objects do not move during garbage collections. - -class LargeObjectSpace : public Space { - public: - LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id); - virtual ~LargeObjectSpace() {} - - // Initializes internal data structures. - bool SetUp(); - - // Releases internal resources, frees objects in this space. - void TearDown(); - - static intptr_t ObjectSizeFor(intptr_t chunk_size) { - if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; - return chunk_size - Page::kPageSize - Page::kObjectStartOffset; - } - - // Shared implementation of AllocateRaw, AllocateRawCode and - // AllocateRawFixedArray. - MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size, - Executability executable); - - // Available bytes for objects in this space. - inline intptr_t Available(); - - virtual intptr_t Size() { - return size_; - } - - virtual intptr_t SizeOfObjects() { - return objects_size_; - } - - intptr_t MaximumCommittedMemory() { - return maximum_committed_; - } - - intptr_t CommittedMemory() { - return Size(); - } - - // Approximate amount of physical memory committed for this space. - size_t CommittedPhysicalMemory(); - - int PageCount() { - return page_count_; - } - - // Finds an object for a given address, returns Failure::Exception() - // if it is not found. The function iterates through all objects in this - // space, may be slow. - MaybeObject* FindObject(Address a); - - // Finds a large object page containing the given address, returns NULL - // if such a page doesn't exist. - LargePage* FindPage(Address a); - - // Frees unmarked objects. - void FreeUnmarkedObjects(); - - // Checks whether a heap object is in this space; O(1). - bool Contains(HeapObject* obj); - - // Checks whether the space is empty. - bool IsEmpty() { return first_page_ == NULL; } - - LargePage* first_page() { return first_page_; } - -#ifdef VERIFY_HEAP - virtual void Verify(); -#endif - -#ifdef DEBUG - virtual void Print(); - void ReportStatistics(); - void CollectCodeStatistics(); -#endif - // Checks whether an address is in the object area in this space. It - // iterates all objects in the space. May be slow. - bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); } - - private: - intptr_t max_capacity_; - intptr_t maximum_committed_; - // The head of the linked list of large object chunks. - LargePage* first_page_; - intptr_t size_; // allocated bytes - int page_count_; // number of chunks - intptr_t objects_size_; // size of objects - // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them - HashMap chunk_map_; - - friend class LargeObjectIterator; - - public: - TRACK_MEMORY("LargeObjectSpace") -}; - - -class LargeObjectIterator: public ObjectIterator { - public: - explicit LargeObjectIterator(LargeObjectSpace* space); - LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); - - HeapObject* Next(); - - // implementation of ObjectIterator. - virtual HeapObject* next_object() { return Next(); } - - private: - LargePage* current_; - HeapObjectCallback size_func_; -}; - - -// Iterates over the chunks (pages and large object pages) that can contain -// pointers to new space. -class PointerChunkIterator BASE_EMBEDDED { - public: - inline explicit PointerChunkIterator(Heap* heap); - - // Return NULL when the iterator is done. - MemoryChunk* next() { - switch (state_) { - case kOldPointerState: { - if (old_pointer_iterator_.has_next()) { - return old_pointer_iterator_.next(); - } - state_ = kMapState; - // Fall through. - } - case kMapState: { - if (map_iterator_.has_next()) { - return map_iterator_.next(); - } - state_ = kLargeObjectState; - // Fall through. - } - case kLargeObjectState: { - HeapObject* heap_object; - do { - heap_object = lo_iterator_.Next(); - if (heap_object == NULL) { - state_ = kFinishedState; - return NULL; - } - // Fixed arrays are the only pointer-containing objects in large - // object space. - } while (!heap_object->IsFixedArray()); - MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address()); - return answer; - } - case kFinishedState: - return NULL; - default: - break; - } - UNREACHABLE(); - return NULL; - } - - - private: - enum State { - kOldPointerState, - kMapState, - kLargeObjectState, - kFinishedState - }; - State state_; - PageIterator old_pointer_iterator_; - PageIterator map_iterator_; - LargeObjectIterator lo_iterator_; -}; - - -#ifdef DEBUG -struct CommentStatistic { - const char* comment; - int size; - int count; - void Clear() { - comment = NULL; - size = 0; - count = 0; - } - // Must be small, since an iteration is used for lookup. - static const int kMaxComments = 64; -}; -#endif - - -} } // namespace v8::internal - -#endif // V8_SPACES_H_ diff -Nru nodejs-0.11.13/deps/v8/src/spaces-inl.h nodejs-0.11.15/deps/v8/src/spaces-inl.h --- nodejs-0.11.13/deps/v8/src/spaces-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/spaces-inl.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,365 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_SPACES_INL_H_ -#define V8_SPACES_INL_H_ - -#include "heap-profiler.h" -#include "isolate.h" -#include "spaces.h" -#include "v8memory.h" - -namespace v8 { -namespace internal { - - -// ----------------------------------------------------------------------------- -// Bitmap - -void Bitmap::Clear(MemoryChunk* chunk) { - Bitmap* bitmap = chunk->markbits(); - for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0; - chunk->ResetLiveBytes(); -} - - -// ----------------------------------------------------------------------------- -// PageIterator - - -PageIterator::PageIterator(PagedSpace* space) - : space_(space), - prev_page_(&space->anchor_), - next_page_(prev_page_->next_page()) { } - - -bool PageIterator::has_next() { - return next_page_ != &space_->anchor_; -} - - -Page* PageIterator::next() { - ASSERT(has_next()); - prev_page_ = next_page_; - next_page_ = next_page_->next_page(); - return prev_page_; -} - - -// ----------------------------------------------------------------------------- -// NewSpacePageIterator - - -NewSpacePageIterator::NewSpacePageIterator(NewSpace* space) - : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()), - next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())), - last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { } - -NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space) - : prev_page_(space->anchor()), - next_page_(prev_page_->next_page()), - last_page_(prev_page_->prev_page()) { } - -NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit) - : prev_page_(NewSpacePage::FromAddress(start)->prev_page()), - next_page_(NewSpacePage::FromAddress(start)), - last_page_(NewSpacePage::FromLimit(limit)) { - SemiSpace::AssertValidRange(start, limit); -} - - -bool NewSpacePageIterator::has_next() { - return prev_page_ != last_page_; -} - - -NewSpacePage* NewSpacePageIterator::next() { - ASSERT(has_next()); - prev_page_ = next_page_; - next_page_ = next_page_->next_page(); - return prev_page_; -} - - -// ----------------------------------------------------------------------------- -// HeapObjectIterator -HeapObject* HeapObjectIterator::FromCurrentPage() { - while (cur_addr_ != cur_end_) { - if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) { - cur_addr_ = space_->limit(); - continue; - } - HeapObject* obj = HeapObject::FromAddress(cur_addr_); - int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); - cur_addr_ += obj_size; - ASSERT(cur_addr_ <= cur_end_); - if (!obj->IsFiller()) { - ASSERT_OBJECT_SIZE(obj_size); - return obj; - } - } - return NULL; -} - - -// ----------------------------------------------------------------------------- -// MemoryAllocator - -#ifdef ENABLE_HEAP_PROTECTION - -void MemoryAllocator::Protect(Address start, size_t size) { - OS::Protect(start, size); -} - - -void MemoryAllocator::Unprotect(Address start, - size_t size, - Executability executable) { - OS::Unprotect(start, size, executable); -} - - -void MemoryAllocator::ProtectChunkFromPage(Page* page) { - int id = GetChunkId(page); - OS::Protect(chunks_[id].address(), chunks_[id].size()); -} - - -void MemoryAllocator::UnprotectChunkFromPage(Page* page) { - int id = GetChunkId(page); - OS::Unprotect(chunks_[id].address(), chunks_[id].size(), - chunks_[id].owner()->executable() == EXECUTABLE); -} - -#endif - - -// -------------------------------------------------------------------------- -// PagedSpace -Page* Page::Initialize(Heap* heap, - MemoryChunk* chunk, - Executability executable, - PagedSpace* owner) { - Page* page = reinterpret_cast<Page*>(chunk); - ASSERT(page->area_size() <= kMaxRegularHeapObjectSize); - ASSERT(chunk->owner() == owner); - owner->IncreaseCapacity(page->area_size()); - owner->Free(page->area_start(), page->area_size()); - - heap->incremental_marking()->SetOldSpacePageFlags(chunk); - - return page; -} - - -bool PagedSpace::Contains(Address addr) { - Page* p = Page::FromAddress(addr); - if (!p->is_valid()) return false; - return p->owner() == this; -} - - -void MemoryChunk::set_scan_on_scavenge(bool scan) { - if (scan) { - if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages(); - SetFlag(SCAN_ON_SCAVENGE); - } else { - if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages(); - ClearFlag(SCAN_ON_SCAVENGE); - } - heap_->incremental_marking()->SetOldSpacePageFlags(this); -} - - -MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) { - MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>( - OffsetFrom(addr) & ~Page::kPageAlignmentMask); - if (maybe->owner() != NULL) return maybe; - LargeObjectIterator iterator(heap->lo_space()); - for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) { - // Fixed arrays are the only pointer-containing objects in large object - // space. - if (o->IsFixedArray()) { - MemoryChunk* chunk = MemoryChunk::FromAddress(o->address()); - if (chunk->Contains(addr)) { - return chunk; - } - } - } - UNREACHABLE(); - return NULL; -} - - -void MemoryChunk::UpdateHighWaterMark(Address mark) { - if (mark == NULL) return; - // Need to subtract one from the mark because when a chunk is full the - // top points to the next address after the chunk, which effectively belongs - // to another chunk. See the comment to Page::FromAllocationTop. - MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); - int new_mark = static_cast<int>(mark - chunk->address()); - if (new_mark > chunk->high_water_mark_) { - chunk->high_water_mark_ = new_mark; - } -} - - -PointerChunkIterator::PointerChunkIterator(Heap* heap) - : state_(kOldPointerState), - old_pointer_iterator_(heap->old_pointer_space()), - map_iterator_(heap->map_space()), - lo_iterator_(heap->lo_space()) { } - - -Page* Page::next_page() { - ASSERT(next_chunk()->owner() == owner()); - return static_cast<Page*>(next_chunk()); -} - - -Page* Page::prev_page() { - ASSERT(prev_chunk()->owner() == owner()); - return static_cast<Page*>(prev_chunk()); -} - - -void Page::set_next_page(Page* page) { - ASSERT(page->owner() == owner()); - set_next_chunk(page); -} - - -void Page::set_prev_page(Page* page) { - ASSERT(page->owner() == owner()); - set_prev_chunk(page); -} - - -// Try linear allocation in the page of alloc_info's allocation top. Does -// not contain slow case logic (e.g. move to the next page or try free list -// allocation) so it can be used by all the allocation functions and for all -// the paged spaces. -HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { - Address current_top = allocation_info_.top(); - Address new_top = current_top + size_in_bytes; - if (new_top > allocation_info_.limit()) return NULL; - - allocation_info_.set_top(new_top); - return HeapObject::FromAddress(current_top); -} - - -// Raw allocation. -MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { - HeapObject* object = AllocateLinearly(size_in_bytes); - if (object != NULL) { - if (identity() == CODE_SPACE) { - SkipList::Update(object->address(), size_in_bytes); - } - return object; - } - - ASSERT(!heap()->linear_allocation() || - (anchor_.next_chunk() == &anchor_ && - anchor_.prev_chunk() == &anchor_)); - - object = free_list_.Allocate(size_in_bytes); - if (object != NULL) { - if (identity() == CODE_SPACE) { - SkipList::Update(object->address(), size_in_bytes); - } - return object; - } - - object = SlowAllocateRaw(size_in_bytes); - if (object != NULL) { - if (identity() == CODE_SPACE) { - SkipList::Update(object->address(), size_in_bytes); - } - return object; - } - - return Failure::RetryAfterGC(identity()); -} - - -// ----------------------------------------------------------------------------- -// NewSpace - - -MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) { - Address old_top = allocation_info_.top(); -#ifdef DEBUG - // If we are stressing compaction we waste some memory in new space - // in order to get more frequent GCs. - if (FLAG_stress_compaction && !heap()->linear_allocation()) { - if (allocation_info_.limit() - old_top >= size_in_bytes * 4) { - int filler_size = size_in_bytes * 4; - for (int i = 0; i < filler_size; i += kPointerSize) { - *(reinterpret_cast<Object**>(old_top + i)) = - heap()->one_pointer_filler_map(); - } - old_top += filler_size; - allocation_info_.set_top(allocation_info_.top() + filler_size); - } - } -#endif - - if (allocation_info_.limit() - old_top < size_in_bytes) { - return SlowAllocateRaw(size_in_bytes); - } - - HeapObject* obj = HeapObject::FromAddress(old_top); - allocation_info_.set_top(allocation_info_.top() + size_in_bytes); - ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); - - return obj; -} - - -LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { - heap->incremental_marking()->SetOldSpacePageFlags(chunk); - return static_cast<LargePage*>(chunk); -} - - -intptr_t LargeObjectSpace::Available() { - return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); -} - - -bool FreeListNode::IsFreeListNode(HeapObject* object) { - Map* map = object->map(); - Heap* heap = object->GetHeap(); - return map == heap->raw_unchecked_free_space_map() - || map == heap->raw_unchecked_one_pointer_filler_map() - || map == heap->raw_unchecked_two_pointer_filler_map(); -} - -} } // namespace v8::internal - -#endif // V8_SPACES_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/splay-tree.h nodejs-0.11.15/deps/v8/src/splay-tree.h --- nodejs-0.11.13/deps/v8/src/splay-tree.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/splay-tree.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SPLAY_TREE_H_ #define V8_SPLAY_TREE_H_ -#include "allocation.h" +#include "src/allocation.h" namespace v8 { namespace internal { @@ -58,8 +35,8 @@ class Locator; - SplayTree(AllocationPolicy allocator = AllocationPolicy()) - : root_(NULL), allocator_(allocator) { } + explicit SplayTree(AllocationPolicy allocator = AllocationPolicy()) + : root_(NULL), allocator_(allocator) {} ~SplayTree(); INLINE(void* operator new(size_t size, diff -Nru nodejs-0.11.13/deps/v8/src/splay-tree-inl.h nodejs-0.11.15/deps/v8/src/splay-tree-inl.h --- nodejs-0.11.13/deps/v8/src/splay-tree-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/splay-tree-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_SPLAY_TREE_INL_H_ #define V8_SPLAY_TREE_INL_H_ -#include "splay-tree.h" +#include "src/splay-tree.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/store-buffer.cc nodejs-0.11.15/deps/v8/src/store-buffer.cc --- nodejs-0.11.13/deps/v8/src/store-buffer.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/store-buffer.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,740 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "store-buffer.h" - -#include <algorithm> - -#include "v8.h" -#include "store-buffer-inl.h" -#include "v8-counters.h" - -namespace v8 { -namespace internal { - -StoreBuffer::StoreBuffer(Heap* heap) - : heap_(heap), - start_(NULL), - limit_(NULL), - old_start_(NULL), - old_limit_(NULL), - old_top_(NULL), - old_reserved_limit_(NULL), - old_buffer_is_sorted_(false), - old_buffer_is_filtered_(false), - during_gc_(false), - store_buffer_rebuilding_enabled_(false), - callback_(NULL), - may_move_store_buffer_entries_(true), - virtual_memory_(NULL), - hash_set_1_(NULL), - hash_set_2_(NULL), - hash_sets_are_empty_(true) { -} - - -void StoreBuffer::SetUp() { - virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); - uintptr_t start_as_int = - reinterpret_cast<uintptr_t>(virtual_memory_->address()); - start_ = - reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); - limit_ = start_ + (kStoreBufferSize / kPointerSize); - - old_virtual_memory_ = - new VirtualMemory(kOldStoreBufferLength * kPointerSize); - old_top_ = old_start_ = - reinterpret_cast<Address*>(old_virtual_memory_->address()); - // Don't know the alignment requirements of the OS, but it is certainly not - // less than 0xfff. - ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); - int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize); - ASSERT(initial_length > 0); - ASSERT(initial_length <= kOldStoreBufferLength); - old_limit_ = old_start_ + initial_length; - old_reserved_limit_ = old_start_ + kOldStoreBufferLength; - - CHECK(old_virtual_memory_->Commit( - reinterpret_cast<void*>(old_start_), - (old_limit_ - old_start_) * kPointerSize, - false)); - - ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); - ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); - Address* vm_limit = reinterpret_cast<Address*>( - reinterpret_cast<char*>(virtual_memory_->address()) + - virtual_memory_->size()); - ASSERT(start_ <= vm_limit); - ASSERT(limit_ <= vm_limit); - USE(vm_limit); - ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); - ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == - 0); - - CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), - kStoreBufferSize, - false)); // Not executable. - heap_->public_set_store_buffer_top(start_); - - hash_set_1_ = new uintptr_t[kHashSetLength]; - hash_set_2_ = new uintptr_t[kHashSetLength]; - hash_sets_are_empty_ = false; - - ClearFilteringHashSets(); -} - - -void StoreBuffer::TearDown() { - delete virtual_memory_; - delete old_virtual_memory_; - delete[] hash_set_1_; - delete[] hash_set_2_; - old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL; - start_ = limit_ = NULL; - heap_->public_set_store_buffer_top(start_); -} - - -void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { - isolate->heap()->store_buffer()->Compact(); - isolate->counters()->store_buffer_overflows()->Increment(); -} - - -void StoreBuffer::Uniq() { - // Remove adjacent duplicates and cells that do not point at new space. - Address previous = NULL; - Address* write = old_start_; - ASSERT(may_move_store_buffer_entries_); - for (Address* read = old_start_; read < old_top_; read++) { - Address current = *read; - if (current != previous) { - if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { - *write++ = current; - } - } - previous = current; - } - old_top_ = write; -} - - -bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { - return old_limit_ - old_top_ >= space_needed; -} - - -void StoreBuffer::EnsureSpace(intptr_t space_needed) { - while (old_limit_ - old_top_ < space_needed && - old_limit_ < old_reserved_limit_) { - size_t grow = old_limit_ - old_start_; // Double size. - CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), - grow * kPointerSize, - false)); - old_limit_ += grow; - } - - if (SpaceAvailable(space_needed)) return; - - if (old_buffer_is_filtered_) return; - ASSERT(may_move_store_buffer_entries_); - Compact(); - - old_buffer_is_filtered_ = true; - bool page_has_scan_on_scavenge_flag = false; - - PointerChunkIterator it(heap_); - MemoryChunk* chunk; - while ((chunk = it.next()) != NULL) { - if (chunk->scan_on_scavenge()) { - page_has_scan_on_scavenge_flag = true; - break; - } - } - - if (page_has_scan_on_scavenge_flag) { - Filter(MemoryChunk::SCAN_ON_SCAVENGE); - } - - if (SpaceAvailable(space_needed)) return; - - // Sample 1 entry in 97 and filter out the pages where we estimate that more - // than 1 in 8 pointers are to new space. - static const int kSampleFinenesses = 5; - static const struct Samples { - int prime_sample_step; - int threshold; - } samples[kSampleFinenesses] = { - { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 }, - { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 }, - { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 }, - { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 }, - { 1, 0} - }; - for (int i = 0; i < kSampleFinenesses; i++) { - ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); - // As a last resort we mark all pages as being exempt from the store buffer. - ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_); - if (SpaceAvailable(space_needed)) return; - } - UNREACHABLE(); -} - - -// Sample the store buffer to see if some pages are taking up a lot of space -// in the store buffer. -void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { - PointerChunkIterator it(heap_); - MemoryChunk* chunk; - while ((chunk = it.next()) != NULL) { - chunk->set_store_buffer_counter(0); - } - bool created_new_scan_on_scavenge_pages = false; - MemoryChunk* previous_chunk = NULL; - for (Address* p = old_start_; p < old_top_; p += prime_sample_step) { - Address addr = *p; - MemoryChunk* containing_chunk = NULL; - if (previous_chunk != NULL && previous_chunk->Contains(addr)) { - containing_chunk = previous_chunk; - } else { - containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); - } - int old_counter = containing_chunk->store_buffer_counter(); - if (old_counter >= threshold) { - containing_chunk->set_scan_on_scavenge(true); - created_new_scan_on_scavenge_pages = true; - } - containing_chunk->set_store_buffer_counter(old_counter + 1); - previous_chunk = containing_chunk; - } - if (created_new_scan_on_scavenge_pages) { - Filter(MemoryChunk::SCAN_ON_SCAVENGE); - } - old_buffer_is_filtered_ = true; -} - - -void StoreBuffer::Filter(int flag) { - Address* new_top = old_start_; - MemoryChunk* previous_chunk = NULL; - for (Address* p = old_start_; p < old_top_; p++) { - Address addr = *p; - MemoryChunk* containing_chunk = NULL; - if (previous_chunk != NULL && previous_chunk->Contains(addr)) { - containing_chunk = previous_chunk; - } else { - containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); - previous_chunk = containing_chunk; - } - if (!containing_chunk->IsFlagSet(flag)) { - *new_top++ = addr; - } - } - old_top_ = new_top; - - // Filtering hash sets are inconsistent with the store buffer after this - // operation. - ClearFilteringHashSets(); -} - - -void StoreBuffer::SortUniq() { - Compact(); - if (old_buffer_is_sorted_) return; - std::sort(old_start_, old_top_); - Uniq(); - - old_buffer_is_sorted_ = true; - - // Filtering hash sets are inconsistent with the store buffer after this - // operation. - ClearFilteringHashSets(); -} - - -bool StoreBuffer::PrepareForIteration() { - Compact(); - PointerChunkIterator it(heap_); - MemoryChunk* chunk; - bool page_has_scan_on_scavenge_flag = false; - while ((chunk = it.next()) != NULL) { - if (chunk->scan_on_scavenge()) { - page_has_scan_on_scavenge_flag = true; - break; - } - } - - if (page_has_scan_on_scavenge_flag) { - Filter(MemoryChunk::SCAN_ON_SCAVENGE); - } - - // Filtering hash sets are inconsistent with the store buffer after - // iteration. - ClearFilteringHashSets(); - - return page_has_scan_on_scavenge_flag; -} - - -#ifdef DEBUG -void StoreBuffer::Clean() { - ClearFilteringHashSets(); - Uniq(); // Also removes things that no longer point to new space. - EnsureSpace(kStoreBufferSize / 2); -} - - -static Address* in_store_buffer_1_element_cache = NULL; - - -bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { - if (!FLAG_enable_slow_asserts) return true; - if (in_store_buffer_1_element_cache != NULL && - *in_store_buffer_1_element_cache == cell_address) { - return true; - } - Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); - for (Address* current = top - 1; current >= start_; current--) { - if (*current == cell_address) { - in_store_buffer_1_element_cache = current; - return true; - } - } - for (Address* current = old_top_ - 1; current >= old_start_; current--) { - if (*current == cell_address) { - in_store_buffer_1_element_cache = current; - return true; - } - } - return false; -} -#endif - - -void StoreBuffer::ClearFilteringHashSets() { - if (!hash_sets_are_empty_) { - memset(reinterpret_cast<void*>(hash_set_1_), - 0, - sizeof(uintptr_t) * kHashSetLength); - memset(reinterpret_cast<void*>(hash_set_2_), - 0, - sizeof(uintptr_t) * kHashSetLength); - hash_sets_are_empty_ = true; - } -} - - -void StoreBuffer::GCPrologue() { - ClearFilteringHashSets(); - during_gc_ = true; -} - - -#ifdef VERIFY_HEAP -static void DummyScavengePointer(HeapObject** p, HeapObject* o) { - // Do nothing. -} - - -void StoreBuffer::VerifyPointers(PagedSpace* space, - RegionCallback region_callback) { - PageIterator it(space); - - while (it.has_next()) { - Page* page = it.next(); - FindPointersToNewSpaceOnPage( - reinterpret_cast<PagedSpace*>(page->owner()), - page, - region_callback, - &DummyScavengePointer, - false); - } -} - - -void StoreBuffer::VerifyPointers(LargeObjectSpace* space) { - LargeObjectIterator it(space); - for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { - if (object->IsFixedArray()) { - Address slot_address = object->address(); - Address end = object->address() + object->Size(); - - while (slot_address < end) { - HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); - // When we are not in GC the Heap::InNewSpace() predicate - // checks that pointers which satisfy predicate point into - // the active semispace. - heap_->InNewSpace(*slot); - slot_address += kPointerSize; - } - } - } -} -#endif - - -void StoreBuffer::Verify() { -#ifdef VERIFY_HEAP - VerifyPointers(heap_->old_pointer_space(), - &StoreBuffer::FindPointersToNewSpaceInRegion); - VerifyPointers(heap_->map_space(), - &StoreBuffer::FindPointersToNewSpaceInMapsRegion); - VerifyPointers(heap_->lo_space()); -#endif -} - - -void StoreBuffer::GCEpilogue() { - during_gc_ = false; -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - Verify(); - } -#endif -} - - -void StoreBuffer::FindPointersToNewSpaceInRegion( - Address start, - Address end, - ObjectSlotCallback slot_callback, - bool clear_maps) { - for (Address slot_address = start; - slot_address < end; - slot_address += kPointerSize) { - Object** slot = reinterpret_cast<Object**>(slot_address); - if (heap_->InNewSpace(*slot)) { - HeapObject* object = reinterpret_cast<HeapObject*>(*slot); - ASSERT(object->IsHeapObject()); - // The new space object was not promoted if it still contains a map - // pointer. Clear the map field now lazily. - if (clear_maps) ClearDeadObject(object); - slot_callback(reinterpret_cast<HeapObject**>(slot), object); - if (heap_->InNewSpace(*slot)) { - EnterDirectlyIntoStoreBuffer(slot_address); - } - } - } -} - - -// Compute start address of the first map following given addr. -static inline Address MapStartAlign(Address addr) { - Address page = Page::FromAddress(addr)->area_start(); - return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize); -} - - -// Compute end address of the first map preceding given addr. -static inline Address MapEndAlign(Address addr) { - Address page = Page::FromAllocationTop(addr)->area_start(); - return page + ((addr - page) / Map::kSize * Map::kSize); -} - - -void StoreBuffer::FindPointersToNewSpaceInMaps( - Address start, - Address end, - ObjectSlotCallback slot_callback, - bool clear_maps) { - ASSERT(MapStartAlign(start) == start); - ASSERT(MapEndAlign(end) == end); - - Address map_address = start; - while (map_address < end) { - ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address))); - ASSERT(Memory::Object_at(map_address)->IsMap()); - - Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; - Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset; - - FindPointersToNewSpaceInRegion(pointer_fields_start, - pointer_fields_end, - slot_callback, - clear_maps); - map_address += Map::kSize; - } -} - - -void StoreBuffer::FindPointersToNewSpaceInMapsRegion( - Address start, - Address end, - ObjectSlotCallback slot_callback, - bool clear_maps) { - Address map_aligned_start = MapStartAlign(start); - Address map_aligned_end = MapEndAlign(end); - - ASSERT(map_aligned_start == start); - ASSERT(map_aligned_end == end); - - FindPointersToNewSpaceInMaps(map_aligned_start, - map_aligned_end, - slot_callback, - clear_maps); -} - - -// This function iterates over all the pointers in a paged space in the heap, -// looking for pointers into new space. Within the pages there may be dead -// objects that have not been overwritten by free spaces or fillers because of -// lazy sweeping. These dead objects may not contain pointers to new space. -// The garbage areas that have been swept properly (these will normally be the -// large ones) will be marked with free space and filler map words. In -// addition any area that has never been used at all for object allocation must -// be marked with a free space or filler. Because the free space and filler -// maps do not move we can always recognize these even after a compaction. -// Normal objects like FixedArrays and JSObjects should not contain references -// to these maps. Constant pool array objects may contain references to these -// maps, however, constant pool arrays cannot contain pointers to new space -// objects, therefore they are skipped. The special garbage section (see -// comment in spaces.h) is skipped since it can contain absolutely anything. -// Any objects that are allocated during iteration may or may not be visited by -// the iteration, but they will not be partially visited. -void StoreBuffer::FindPointersToNewSpaceOnPage( - PagedSpace* space, - Page* page, - RegionCallback region_callback, - ObjectSlotCallback slot_callback, - bool clear_maps) { - Address visitable_start = page->area_start(); - Address end_of_page = page->area_end(); - - Address visitable_end = visitable_start; - - Object* free_space_map = heap_->free_space_map(); - Object* two_pointer_filler_map = heap_->two_pointer_filler_map(); - Object* constant_pool_array_map = heap_->constant_pool_array_map(); - - while (visitable_end < end_of_page) { - Object* o = *reinterpret_cast<Object**>(visitable_end); - // Skip fillers or constant pool arrays (which never contain new-space - // pointers but can contain pointers which can be confused for fillers) - // but not things that look like fillers in the special garbage section - // which can contain anything. - if (o == free_space_map || - o == two_pointer_filler_map || - o == constant_pool_array_map || - (visitable_end == space->top() && visitable_end != space->limit())) { - if (visitable_start != visitable_end) { - // After calling this the special garbage section may have moved. - (this->*region_callback)(visitable_start, - visitable_end, - slot_callback, - clear_maps); - if (visitable_end >= space->top() && visitable_end < space->limit()) { - visitable_end = space->limit(); - visitable_start = visitable_end; - continue; - } - } - if (visitable_end == space->top() && visitable_end != space->limit()) { - visitable_start = visitable_end = space->limit(); - } else { - // At this point we are either at the start of a filler, a - // constant pool array, or we are at the point where the space->top() - // used to be before the visit_pointer_region call above. Either way we - // can skip the object at the current spot: We don't promise to visit - // objects allocated during heap traversal, and if space->top() moved - // then it must be because an object was allocated at this point. - visitable_start = - visitable_end + HeapObject::FromAddress(visitable_end)->Size(); - visitable_end = visitable_start; - } - } else { - ASSERT(o != free_space_map); - ASSERT(o != two_pointer_filler_map); - ASSERT(o != constant_pool_array_map); - ASSERT(visitable_end < space->top() || visitable_end >= space->limit()); - visitable_end += kPointerSize; - } - } - ASSERT(visitable_end == end_of_page); - if (visitable_start != visitable_end) { - (this->*region_callback)(visitable_start, - visitable_end, - slot_callback, - clear_maps); - } -} - - -void StoreBuffer::IteratePointersInStoreBuffer( - ObjectSlotCallback slot_callback, - bool clear_maps) { - Address* limit = old_top_; - old_top_ = old_start_; - { - DontMoveStoreBufferEntriesScope scope(this); - for (Address* current = old_start_; current < limit; current++) { -#ifdef DEBUG - Address* saved_top = old_top_; -#endif - Object** slot = reinterpret_cast<Object**>(*current); - Object* object = *slot; - if (heap_->InFromSpace(object)) { - HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); - // The new space object was not promoted if it still contains a map - // pointer. Clear the map field now lazily. - if (clear_maps) ClearDeadObject(heap_object); - slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); - if (heap_->InNewSpace(*slot)) { - EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); - } - } - ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top); - } - } -} - - -void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { - IteratePointersToNewSpace(slot_callback, false); -} - - -void StoreBuffer::IteratePointersToNewSpaceAndClearMaps( - ObjectSlotCallback slot_callback) { - IteratePointersToNewSpace(slot_callback, true); -} - - -void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, - bool clear_maps) { - // We do not sort or remove duplicated entries from the store buffer because - // we expect that callback will rebuild the store buffer thus removing - // all duplicates and pointers to old space. - bool some_pages_to_scan = PrepareForIteration(); - - // TODO(gc): we want to skip slots on evacuation candidates - // but we can't simply figure that out from slot address - // because slot can belong to a large object. - IteratePointersInStoreBuffer(slot_callback, clear_maps); - - // We are done scanning all the pointers that were in the store buffer, but - // there may be some pages marked scan_on_scavenge that have pointers to new - // space that are not in the store buffer. We must scan them now. As we - // scan, the surviving pointers to new space will be added to the store - // buffer. If there are still a lot of pointers to new space then we will - // keep the scan_on_scavenge flag on the page and discard the pointers that - // were added to the store buffer. If there are not many pointers to new - // space left on the page we will keep the pointers in the store buffer and - // remove the flag from the page. - if (some_pages_to_scan) { - if (callback_ != NULL) { - (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); - } - PointerChunkIterator it(heap_); - MemoryChunk* chunk; - while ((chunk = it.next()) != NULL) { - if (chunk->scan_on_scavenge()) { - chunk->set_scan_on_scavenge(false); - if (callback_ != NULL) { - (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); - } - if (chunk->owner() == heap_->lo_space()) { - LargePage* large_page = reinterpret_cast<LargePage*>(chunk); - HeapObject* array = large_page->GetObject(); - ASSERT(array->IsFixedArray()); - Address start = array->address(); - Address end = start + array->Size(); - FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps); - } else { - Page* page = reinterpret_cast<Page*>(chunk); - PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); - FindPointersToNewSpaceOnPage( - owner, - page, - (owner == heap_->map_space() ? - &StoreBuffer::FindPointersToNewSpaceInMapsRegion : - &StoreBuffer::FindPointersToNewSpaceInRegion), - slot_callback, - clear_maps); - } - } - } - if (callback_ != NULL) { - (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent); - } - } -} - - -void StoreBuffer::Compact() { - Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); - - if (top == start_) return; - - // There's no check of the limit in the loop below so we check here for - // the worst case (compaction doesn't eliminate any pointers). - ASSERT(top <= limit_); - heap_->public_set_store_buffer_top(start_); - EnsureSpace(top - start_); - ASSERT(may_move_store_buffer_entries_); - // Goes through the addresses in the store buffer attempting to remove - // duplicates. In the interest of speed this is a lossy operation. Some - // duplicates will remain. We have two hash sets with different hash - // functions to reduce the number of unnecessary clashes. - hash_sets_are_empty_ = false; // Hash sets are in use. - for (Address* current = start_; current < top; current++) { - ASSERT(!heap_->cell_space()->Contains(*current)); - ASSERT(!heap_->code_space()->Contains(*current)); - ASSERT(!heap_->old_data_space()->Contains(*current)); - uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); - // Shift out the last bits including any tags. - int_addr >>= kPointerSizeLog2; - // The upper part of an address is basically random because of ASLR and OS - // non-determinism, so we use only the bits within a page for hashing to - // make v8's behavior (more) deterministic. - uintptr_t hash_addr = - int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2); - int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) & - (kHashSetLength - 1)); - if (hash_set_1_[hash1] == int_addr) continue; - uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2)); - hash2 ^= hash2 >> (kHashSetLengthLog2 * 2); - hash2 &= (kHashSetLength - 1); - if (hash_set_2_[hash2] == int_addr) continue; - if (hash_set_1_[hash1] == 0) { - hash_set_1_[hash1] = int_addr; - } else if (hash_set_2_[hash2] == 0) { - hash_set_2_[hash2] = int_addr; - } else { - // Rather than slowing down we just throw away some entries. This will - // cause some duplicates to remain undetected. - hash_set_1_[hash1] = int_addr; - hash_set_2_[hash2] = 0; - } - old_buffer_is_sorted_ = false; - old_buffer_is_filtered_ = false; - *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); - ASSERT(old_top_ <= old_limit_); - } - heap_->isolate()->counters()->store_buffer_compactions()->Increment(); -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/store-buffer.h nodejs-0.11.15/deps/v8/src/store-buffer.h --- nodejs-0.11.13/deps/v8/src/store-buffer.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/store-buffer.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,270 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_STORE_BUFFER_H_ -#define V8_STORE_BUFFER_H_ - -#include "allocation.h" -#include "checks.h" -#include "globals.h" -#include "platform.h" -#include "v8globals.h" - -namespace v8 { -namespace internal { - -class Page; -class PagedSpace; -class StoreBuffer; - -typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to); - -typedef void (StoreBuffer::*RegionCallback)(Address start, - Address end, - ObjectSlotCallback slot_callback, - bool clear_maps); - -// Used to implement the write barrier by collecting addresses of pointers -// between spaces. -class StoreBuffer { - public: - explicit StoreBuffer(Heap* heap); - - static void StoreBufferOverflow(Isolate* isolate); - - inline Address TopAddress(); - - void SetUp(); - void TearDown(); - - // This is used by the mutator to enter addresses into the store buffer. - inline void Mark(Address addr); - - // This is used by the heap traversal to enter the addresses into the store - // buffer that should still be in the store buffer after GC. It enters - // addresses directly into the old buffer because the GC starts by wiping the - // old buffer and thereafter only visits each cell once so there is no need - // to attempt to remove any dupes. During the first part of a GC we - // are using the store buffer to access the old spaces and at the same time - // we are rebuilding the store buffer using this function. There is, however - // no issue of overwriting the buffer we are iterating over, because this - // stage of the scavenge can only reduce the number of addresses in the store - // buffer (some objects are promoted so pointers to them do not need to be in - // the store buffer). The later parts of the GC scan the pages that are - // exempt from the store buffer and process the promotion queue. These steps - // can overflow this buffer. We check for this and on overflow we call the - // callback set up with the StoreBufferRebuildScope object. - inline void EnterDirectlyIntoStoreBuffer(Address addr); - - // Iterates over all pointers that go from old space to new space. It will - // delete the store buffer as it starts so the callback should reenter - // surviving old-to-new pointers into the store buffer to rebuild it. - void IteratePointersToNewSpace(ObjectSlotCallback callback); - - // Same as IteratePointersToNewSpace but additonally clears maps in objects - // referenced from the store buffer that do not contain a forwarding pointer. - void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback); - - static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2); - static const int kStoreBufferSize = kStoreBufferOverflowBit; - static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address); - static const int kOldStoreBufferLength = kStoreBufferLength * 16; - static const int kHashSetLengthLog2 = 12; - static const int kHashSetLength = 1 << kHashSetLengthLog2; - - void Compact(); - - void GCPrologue(); - void GCEpilogue(); - - Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); } - Object*** Start() { return reinterpret_cast<Object***>(old_start_); } - Object*** Top() { return reinterpret_cast<Object***>(old_top_); } - void SetTop(Object*** top) { - ASSERT(top >= Start()); - ASSERT(top <= Limit()); - old_top_ = reinterpret_cast<Address*>(top); - } - - bool old_buffer_is_sorted() { return old_buffer_is_sorted_; } - bool old_buffer_is_filtered() { return old_buffer_is_filtered_; } - - // Goes through the store buffer removing pointers to things that have - // been promoted. Rebuilds the store buffer completely if it overflowed. - void SortUniq(); - - void EnsureSpace(intptr_t space_needed); - void Verify(); - - bool PrepareForIteration(); - -#ifdef DEBUG - void Clean(); - // Slow, for asserts only. - bool CellIsInStoreBuffer(Address cell); -#endif - - void Filter(int flag); - - private: - Heap* heap_; - - // The store buffer is divided up into a new buffer that is constantly being - // filled by mutator activity and an old buffer that is filled with the data - // from the new buffer after compression. - Address* start_; - Address* limit_; - - Address* old_start_; - Address* old_limit_; - Address* old_top_; - Address* old_reserved_limit_; - VirtualMemory* old_virtual_memory_; - - bool old_buffer_is_sorted_; - bool old_buffer_is_filtered_; - bool during_gc_; - // The garbage collector iterates over many pointers to new space that are not - // handled by the store buffer. This flag indicates whether the pointers - // found by the callbacks should be added to the store buffer or not. - bool store_buffer_rebuilding_enabled_; - StoreBufferCallback callback_; - bool may_move_store_buffer_entries_; - - VirtualMemory* virtual_memory_; - - // Two hash sets used for filtering. - // If address is in the hash set then it is guaranteed to be in the - // old part of the store buffer. - uintptr_t* hash_set_1_; - uintptr_t* hash_set_2_; - bool hash_sets_are_empty_; - - void ClearFilteringHashSets(); - - bool SpaceAvailable(intptr_t space_needed); - void Uniq(); - void ExemptPopularPages(int prime_sample_step, int threshold); - - // Set the map field of the object to NULL if contains a map. - inline void ClearDeadObject(HeapObject *object); - - void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps); - - void FindPointersToNewSpaceInRegion(Address start, - Address end, - ObjectSlotCallback slot_callback, - bool clear_maps); - - // For each region of pointers on a page in use from an old space call - // visit_pointer_region callback. - // If either visit_pointer_region or callback can cause an allocation - // in old space and changes in allocation watermark then - // can_preallocate_during_iteration should be set to true. - void IteratePointersOnPage( - PagedSpace* space, - Page* page, - RegionCallback region_callback, - ObjectSlotCallback slot_callback); - - void FindPointersToNewSpaceInMaps( - Address start, - Address end, - ObjectSlotCallback slot_callback, - bool clear_maps); - - void FindPointersToNewSpaceInMapsRegion( - Address start, - Address end, - ObjectSlotCallback slot_callback, - bool clear_maps); - - void FindPointersToNewSpaceOnPage( - PagedSpace* space, - Page* page, - RegionCallback region_callback, - ObjectSlotCallback slot_callback, - bool clear_maps); - - void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback, - bool clear_maps); - -#ifdef VERIFY_HEAP - void VerifyPointers(PagedSpace* space, RegionCallback region_callback); - void VerifyPointers(LargeObjectSpace* space); -#endif - - friend class StoreBufferRebuildScope; - friend class DontMoveStoreBufferEntriesScope; -}; - - -class StoreBufferRebuildScope { - public: - explicit StoreBufferRebuildScope(Heap* heap, - StoreBuffer* store_buffer, - StoreBufferCallback callback) - : store_buffer_(store_buffer), - stored_state_(store_buffer->store_buffer_rebuilding_enabled_), - stored_callback_(store_buffer->callback_) { - store_buffer_->store_buffer_rebuilding_enabled_ = true; - store_buffer_->callback_ = callback; - (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent); - } - - ~StoreBufferRebuildScope() { - store_buffer_->callback_ = stored_callback_; - store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_; - } - - private: - StoreBuffer* store_buffer_; - bool stored_state_; - StoreBufferCallback stored_callback_; -}; - - -class DontMoveStoreBufferEntriesScope { - public: - explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer) - : store_buffer_(store_buffer), - stored_state_(store_buffer->may_move_store_buffer_entries_) { - store_buffer_->may_move_store_buffer_entries_ = false; - } - - ~DontMoveStoreBufferEntriesScope() { - store_buffer_->may_move_store_buffer_entries_ = stored_state_; - } - - private: - StoreBuffer* store_buffer_; - bool stored_state_; -}; - -} } // namespace v8::internal - -#endif // V8_STORE_BUFFER_H_ diff -Nru nodejs-0.11.13/deps/v8/src/store-buffer-inl.h nodejs-0.11.15/deps/v8/src/store-buffer-inl.h --- nodejs-0.11.13/deps/v8/src/store-buffer-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/store-buffer-inl.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,88 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_STORE_BUFFER_INL_H_ -#define V8_STORE_BUFFER_INL_H_ - -#include "store-buffer.h" - -namespace v8 { -namespace internal { - -Address StoreBuffer::TopAddress() { - return reinterpret_cast<Address>(heap_->store_buffer_top_address()); -} - - -void StoreBuffer::Mark(Address addr) { - ASSERT(!heap_->cell_space()->Contains(addr)); - ASSERT(!heap_->code_space()->Contains(addr)); - ASSERT(!heap_->old_data_space()->Contains(addr)); - Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); - *top++ = addr; - heap_->public_set_store_buffer_top(top); - if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) { - ASSERT(top == limit_); - Compact(); - } else { - ASSERT(top < limit_); - } -} - - -void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) { - if (store_buffer_rebuilding_enabled_) { - SLOW_ASSERT(!heap_->cell_space()->Contains(addr) && - !heap_->code_space()->Contains(addr) && - !heap_->old_data_space()->Contains(addr) && - !heap_->new_space()->Contains(addr)); - Address* top = old_top_; - *top++ = addr; - old_top_ = top; - old_buffer_is_sorted_ = false; - old_buffer_is_filtered_ = false; - if (top >= old_limit_) { - ASSERT(callback_ != NULL); - (*callback_)(heap_, - MemoryChunk::FromAnyPointerAddress(heap_, addr), - kStoreBufferFullEvent); - } - } -} - - -void StoreBuffer::ClearDeadObject(HeapObject* object) { - Address& map_field = Memory::Address_at(object->address()); - if (heap_->map_space()->Contains(map_field)) { - map_field = NULL; - } -} - - -} } // namespace v8::internal - -#endif // V8_STORE_BUFFER_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/string-iterator.js nodejs-0.11.15/deps/v8/src/string-iterator.js --- nodejs-0.11.13/deps/v8/src/string-iterator.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/string-iterator.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,106 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +'use strict'; + + +// This file relies on the fact that the following declaration has been made +// in runtime.js: +// var $String = global.String; + + +var stringIteratorIteratedStringSymbol = + GLOBAL_PRIVATE("StringIterator#iteratedString"); +var stringIteratorNextIndexSymbol = GLOBAL_PRIVATE("StringIterator#next"); + + +function StringIterator() {} + + +// 21.1.5.1 CreateStringIterator Abstract Operation +function CreateStringIterator(string) { + var s = TO_STRING_INLINE(string); + var iterator = new StringIterator; + SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, s); + SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, 0); + return iterator; +} + + +// 21.1.5.2.2 %StringIteratorPrototype%[@@iterator] +function StringIteratorIterator() { + return this; +} + + +// 21.1.5.2.1 %StringIteratorPrototype%.next( ) +function StringIteratorNext() { + var iterator = ToObject(this); + + if (!HAS_PRIVATE(iterator, stringIteratorIteratedStringSymbol)) { + throw MakeTypeError('incompatible_method_receiver', + ['String Iterator.prototype.next']); + } + + var s = GET_PRIVATE(iterator, stringIteratorIteratedStringSymbol); + if (IS_UNDEFINED(s)) { + return CreateIteratorResultObject(UNDEFINED, true); + } + + var position = GET_PRIVATE(iterator, stringIteratorNextIndexSymbol); + var length = TO_UINT32(s.length); + + if (position >= length) { + SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, UNDEFINED); + return CreateIteratorResultObject(UNDEFINED, true); + } + + var first = %_StringCharCodeAt(s, position); + var resultString = %_StringCharFromCode(first); + position++; + + if (first >= 0xD800 && first <= 0xDBFF && position < length) { + var second = %_StringCharCodeAt(s, position); + if (second >= 0xDC00 && second <= 0xDFFF) { + resultString += %_StringCharFromCode(second); + position++; + } + } + + SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, position); + + return CreateIteratorResultObject(resultString, false); +} + + +function SetUpStringIterator() { + %CheckIsBootstrapping(); + + %FunctionSetPrototype(StringIterator, new $Object()); + %FunctionSetInstanceClassName(StringIterator, 'String Iterator'); + + InstallFunctions(StringIterator.prototype, DONT_ENUM, $Array( + 'next', StringIteratorNext + )); + %FunctionSetName(StringIteratorIterator, '[Symbol.iterator]'); + %AddNamedProperty(StringIterator.prototype, symbolIterator, + StringIteratorIterator, DONT_ENUM); +} +SetUpStringIterator(); + + +// 21.1.3.27 String.prototype [ @@iterator ]( ) +function StringPrototypeIterator() { + return CreateStringIterator(this); +} + + +function ExtendStringPrototypeWithIterator() { + %CheckIsBootstrapping(); + + %FunctionSetName(StringPrototypeIterator, '[Symbol.iterator]'); + %AddNamedProperty($String.prototype, symbolIterator, + StringPrototypeIterator, DONT_ENUM); +} +ExtendStringPrototypeWithIterator(); diff -Nru nodejs-0.11.13/deps/v8/src/string.js nodejs-0.11.15/deps/v8/src/string.js --- nodejs-0.11.13/deps/v8/src/string.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/string.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // This file relies on the fact that the following declaration has been made // in runtime.js: @@ -84,13 +61,13 @@ // ECMA-262, section 15.5.4.6 -function StringConcat() { +function StringConcat(other /* and more */) { // length == 1 CHECK_OBJECT_COERCIBLE(this, "String.prototype.concat"); var len = %_ArgumentsLength(); var this_as_string = TO_STRING_INLINE(this); if (len === 1) { - return this_as_string + %_Arguments(0); + return this_as_string + other; } var parts = new InternalArray(len + 1); parts[0] = this_as_string; @@ -101,12 +78,9 @@ return %StringBuilderConcat(parts, len + 1, ""); } -// Match ES3 and Safari -%FunctionSetLength(StringConcat, 1); - // ECMA-262 section 15.5.4.7 -function StringIndexOf(pattern /* position */) { // length == 1 +function StringIndexOfJS(pattern /* position */) { // length == 1 CHECK_OBJECT_COERCIBLE(this, "String.prototype.indexOf"); var subject = TO_STRING_INLINE(this); @@ -123,7 +97,7 @@ // ECMA-262 section 15.5.4.8 -function StringLastIndexOf(pat /* position */) { // length == 1 +function StringLastIndexOfJS(pat /* position */) { // length == 1 CHECK_OBJECT_COERCIBLE(this, "String.prototype.lastIndexOf"); var sub = TO_STRING_INLINE(this); @@ -154,7 +128,7 @@ // // This function is implementation specific. For now, we do not // do anything locale specific. -function StringLocaleCompare(other) { +function StringLocaleCompareJS(other) { CHECK_OBJECT_COERCIBLE(this, "String.prototype.localeCompare"); return %StringLocaleCompare(TO_STRING_INLINE(this), @@ -163,7 +137,7 @@ // ECMA-262 section 15.5.4.10 -function StringMatch(regexp) { +function StringMatchJS(regexp) { CHECK_OBJECT_COERCIBLE(this, "String.prototype.match"); var subject = TO_STRING_INLINE(this); @@ -173,7 +147,6 @@ var lastIndex = regexp.lastIndex; TO_INTEGER_FOR_SIDE_EFFECT(lastIndex); if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0); - %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]); // lastMatchInfo is defined in regexp.js. var result = %StringMatch(subject, regexp, lastMatchInfo); if (result !== null) lastMatchInfoOverride = null; @@ -194,7 +167,7 @@ // For now we do nothing, as proper normalization requires big tables. // If Intl is enabled, then i18n.js will override it and provide the the // proper functionality. -function StringNormalize(form) { +function StringNormalizeJS(form) { CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize"); var form = form ? TO_STRING_INLINE(form) : 'NFC'; @@ -244,7 +217,6 @@ // value is discarded. var lastIndex = search.lastIndex; TO_INTEGER_FOR_SIDE_EFFECT(lastIndex); - %_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]); if (!IS_SPEC_FUNCTION(replace)) { replace = TO_STRING_INLINE(replace); @@ -610,7 +582,7 @@ // ECMA-262 section 15.5.4.14 -function StringSplit(separator, limit) { +function StringSplitJS(separator, limit) { CHECK_OBJECT_COERCIBLE(this, "String.prototype.split"); var subject = TO_STRING_INLINE(this); @@ -643,11 +615,7 @@ } -var ArrayPushBuiltin = $Array.prototype.push; - function StringSplitOnRegExp(subject, separator, limit, length) { - %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]); - if (length === 0) { if (DoRegExpExec(separator, subject, 0, 0) != null) { return []; @@ -658,21 +626,19 @@ var currentIndex = 0; var startIndex = 0; var startMatch = 0; - var result = []; + var result = new InternalArray(); outer_loop: while (true) { if (startIndex === length) { - %_CallFunction(result, %_SubString(subject, currentIndex, length), - ArrayPushBuiltin); + result[result.length] = %_SubString(subject, currentIndex, length); break; } var matchInfo = DoRegExpExec(separator, subject, startIndex); if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) { - %_CallFunction(result, %_SubString(subject, currentIndex, length), - ArrayPushBuiltin); + result[result.length] = %_SubString(subject, currentIndex, length); break; } var endIndex = matchInfo[CAPTURE1]; @@ -683,8 +649,7 @@ continue; } - %_CallFunction(result, %_SubString(subject, currentIndex, startMatch), - ArrayPushBuiltin); + result[result.length] = %_SubString(subject, currentIndex, startMatch); if (result.length === limit) break; @@ -693,17 +658,18 @@ var start = matchInfo[i++]; var end = matchInfo[i++]; if (end != -1) { - %_CallFunction(result, %_SubString(subject, start, end), - ArrayPushBuiltin); + result[result.length] = %_SubString(subject, start, end); } else { - %_CallFunction(result, UNDEFINED, ArrayPushBuiltin); + result[result.length] = UNDEFINED; } if (result.length === limit) break outer_loop; } startIndex = currentIndex = endIndex; } - return result; + var array_result = []; + %MoveArrayContents(result, array_result); + return array_result; } @@ -740,7 +706,7 @@ } -// This is not a part of ECMA-262. +// ES6 draft, revision 26 (2014-07-18), section B.2.3.1 function StringSubstr(start, n) { CHECK_OBJECT_COERCIBLE(this, "String.prototype.substr"); @@ -781,7 +747,7 @@ // ECMA-262, 15.5.4.16 -function StringToLowerCase() { +function StringToLowerCaseJS() { CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase"); return %StringToLowerCase(TO_STRING_INLINE(this)); @@ -797,7 +763,7 @@ // ECMA-262, 15.5.4.18 -function StringToUpperCase() { +function StringToUpperCaseJS() { CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase"); return %StringToUpperCase(TO_STRING_INLINE(this)); @@ -812,7 +778,7 @@ } // ES5, 15.5.4.20 -function StringTrim() { +function StringTrimJS() { CHECK_OBJECT_COERCIBLE(this, "String.prototype.trim"); return %StringTrim(TO_STRING_INLINE(this), true, true); @@ -861,78 +827,99 @@ } -// Helper function for very basic XSS protection. +// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1 function HtmlEscape(str) { - return TO_STRING_INLINE(str).replace(/</g, "<") - .replace(/>/g, ">") - .replace(/"/g, """) - .replace(/'/g, "'"); -} - - -// Compatibility support for KJS. -// Tested by mozilla/js/tests/js1_5/Regress/regress-276103.js. -function StringLink(s) { - return "<a href=\"" + HtmlEscape(s) + "\">" + this + "</a>"; + return TO_STRING_INLINE(str).replace(/"/g, """); } +// ES6 draft, revision 26 (2014-07-18), section B.2.3.2 function StringAnchor(name) { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.anchor"); return "<a name=\"" + HtmlEscape(name) + "\">" + this + "</a>"; } -function StringFontcolor(color) { - return "<font color=\"" + HtmlEscape(color) + "\">" + this + "</font>"; -} - - -function StringFontsize(size) { - return "<font size=\"" + HtmlEscape(size) + "\">" + this + "</font>"; -} - - +// ES6 draft, revision 26 (2014-07-18), section B.2.3.3 function StringBig() { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.big"); return "<big>" + this + "</big>"; } +// ES6 draft, revision 26 (2014-07-18), section B.2.3.4 function StringBlink() { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.blink"); return "<blink>" + this + "</blink>"; } +// ES6 draft, revision 26 (2014-07-18), section B.2.3.5 function StringBold() { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.bold"); return "<b>" + this + "</b>"; } +// ES6 draft, revision 26 (2014-07-18), section B.2.3.6 function StringFixed() { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.fixed"); return "<tt>" + this + "</tt>"; } +// ES6 draft, revision 26 (2014-07-18), section B.2.3.7 +function StringFontcolor(color) { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontcolor"); + return "<font color=\"" + HtmlEscape(color) + "\">" + this + "</font>"; +} + + +// ES6 draft, revision 26 (2014-07-18), section B.2.3.8 +function StringFontsize(size) { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontsize"); + return "<font size=\"" + HtmlEscape(size) + "\">" + this + "</font>"; +} + + +// ES6 draft, revision 26 (2014-07-18), section B.2.3.9 function StringItalics() { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.italics"); return "<i>" + this + "</i>"; } +// ES6 draft, revision 26 (2014-07-18), section B.2.3.10 +function StringLink(s) { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.link"); + return "<a href=\"" + HtmlEscape(s) + "\">" + this + "</a>"; +} + + +// ES6 draft, revision 26 (2014-07-18), section B.2.3.11 function StringSmall() { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.small"); return "<small>" + this + "</small>"; } +// ES6 draft, revision 26 (2014-07-18), section B.2.3.12 function StringStrike() { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.strike"); return "<strike>" + this + "</strike>"; } +// ES6 draft, revision 26 (2014-07-18), section B.2.3.13 function StringSub() { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.sub"); return "<sub>" + this + "</sub>"; } +// ES6 draft, revision 26 (2014-07-18), section B.2.3.14 function StringSup() { + CHECK_OBJECT_COERCIBLE(this, "String.prototype.sup"); return "<sup>" + this + "</sup>"; } @@ -946,7 +933,7 @@ %FunctionSetPrototype($String, new $String()); // Set up the constructor property on the String prototype object. - %SetProperty($String.prototype, "constructor", $String, DONT_ENUM); + %AddNamedProperty($String.prototype, "constructor", $String, DONT_ENUM); // Set up the non-enumerable functions on the String object. InstallFunctions($String, DONT_ENUM, $Array( @@ -960,22 +947,22 @@ "charAt", StringCharAt, "charCodeAt", StringCharCodeAt, "concat", StringConcat, - "indexOf", StringIndexOf, - "lastIndexOf", StringLastIndexOf, - "localeCompare", StringLocaleCompare, - "match", StringMatch, - "normalize", StringNormalize, + "indexOf", StringIndexOfJS, + "lastIndexOf", StringLastIndexOfJS, + "localeCompare", StringLocaleCompareJS, + "match", StringMatchJS, + "normalize", StringNormalizeJS, "replace", StringReplace, "search", StringSearch, "slice", StringSlice, - "split", StringSplit, + "split", StringSplitJS, "substring", StringSubstring, "substr", StringSubstr, - "toLowerCase", StringToLowerCase, + "toLowerCase", StringToLowerCaseJS, "toLocaleLowerCase", StringToLocaleLowerCase, - "toUpperCase", StringToUpperCase, + "toUpperCase", StringToUpperCaseJS, "toLocaleUpperCase", StringToLocaleUpperCase, - "trim", StringTrim, + "trim", StringTrimJS, "trimLeft", StringTrimLeft, "trimRight", StringTrimRight, "link", StringLink, diff -Nru nodejs-0.11.13/deps/v8/src/string-search.cc nodejs-0.11.15/deps/v8/src/string-search.cc --- nodejs-0.11.13/deps/v8/src/string-search.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/string-search.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,10 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" -#include "string-search.h" +#include "src/v8.h" + +#include "src/string-search.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/string-search.h nodejs-0.11.15/deps/v8/src/string-search.h --- nodejs-0.11.13/deps/v8/src/string-search.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/string-search.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_STRING_SEARCH_H_ #define V8_STRING_SEARCH_H_ @@ -107,7 +84,7 @@ // ASCII needle. return kAsciiAlphabetSize; } else { - ASSERT(sizeof(PatternChar) == 2); + DCHECK(sizeof(PatternChar) == 2); // UC16 needle. return kUC16AlphabetSize; } @@ -219,7 +196,7 @@ StringSearch<PatternChar, SubjectChar>* search, Vector<const SubjectChar> subject, int index) { - ASSERT_EQ(1, search->pattern_.length()); + DCHECK_EQ(1, search->pattern_.length()); PatternChar pattern_first_char = search->pattern_[0]; int i = index; if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) { @@ -253,7 +230,7 @@ inline bool CharCompare(const PatternChar* pattern, const SubjectChar* subject, int length) { - ASSERT(length > 0); + DCHECK(length > 0); int pos = 0; do { if (pattern[pos] != subject[pos]) { @@ -272,7 +249,7 @@ Vector<const SubjectChar> subject, int index) { Vector<const PatternChar> pattern = search->pattern_; - ASSERT(pattern.length() > 1); + DCHECK(pattern.length() > 1); int pattern_length = pattern.length(); PatternChar pattern_first_char = pattern[0]; int i = index; diff -Nru nodejs-0.11.13/deps/v8/src/string-stream.cc nodejs-0.11.15/deps/v8/src/string-stream.cc --- nodejs-0.11.13/deps/v8/src/string-stream.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/string-stream.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/string-stream.h" -#include "factory.h" -#include "string-stream.h" +#include "src/handles-inl.h" +#include "src/prototype.h" namespace v8 { namespace internal { @@ -41,16 +18,9 @@ } -NoAllocationStringAllocator::NoAllocationStringAllocator(char* memory, - unsigned size) { - size_ = size; - space_ = memory; -} - - bool StringStream::Put(char c) { if (full()) return false; - ASSERT(length_ < capacity_); + DCHECK(length_ < capacity_); // Since the trailing '\0' is not accounted for in length_ fullness is // indicated by a difference of 1 between length_ and capacity_. Thus when // reaching a difference of 2 we need to grow the buffer. @@ -62,7 +32,7 @@ buffer_ = new_buffer; } else { // Reached the end of the available buffer. - ASSERT(capacity_ >= 5); + DCHECK(capacity_ >= 5); length_ = capacity_ - 1; // Indicate fullness of the stream. buffer_[length_ - 4] = '.'; buffer_[length_ - 3] = '.'; @@ -120,26 +90,26 @@ FmtElm current = elms[elm++]; switch (type) { case 's': { - ASSERT_EQ(FmtElm::C_STR, current.type_); + DCHECK_EQ(FmtElm::C_STR, current.type_); const char* value = current.data_.u_c_str_; Add(value); break; } case 'w': { - ASSERT_EQ(FmtElm::LC_STR, current.type_); + DCHECK_EQ(FmtElm::LC_STR, current.type_); Vector<const uc16> value = *current.data_.u_lc_str_; for (int i = 0; i < value.length(); i++) Put(static_cast<char>(value[i])); break; } case 'o': { - ASSERT_EQ(FmtElm::OBJ, current.type_); + DCHECK_EQ(FmtElm::OBJ, current.type_); Object* obj = current.data_.u_obj_; PrintObject(obj); break; } case 'k': { - ASSERT_EQ(FmtElm::INT, current.type_); + DCHECK_EQ(FmtElm::INT, current.type_); int value = current.data_.u_int_; if (0x20 <= value && value <= 0x7F) { Put(value); @@ -153,21 +123,30 @@ case 'i': case 'd': case 'u': case 'x': case 'c': case 'X': { int value = current.data_.u_int_; EmbeddedVector<char, 24> formatted; - int length = OS::SNPrintF(formatted, temp.start(), value); + int length = SNPrintF(formatted, temp.start(), value); Add(Vector<const char>(formatted.start(), length)); break; } case 'f': case 'g': case 'G': case 'e': case 'E': { double value = current.data_.u_double_; - EmbeddedVector<char, 28> formatted; - OS::SNPrintF(formatted, temp.start(), value); - Add(formatted.start()); + int inf = std::isinf(value); + if (inf == -1) { + Add("-inf"); + } else if (inf == 1) { + Add("inf"); + } else if (std::isnan(value)) { + Add("nan"); + } else { + EmbeddedVector<char, 28> formatted; + SNPrintF(formatted, temp.start(), value); + Add(formatted.start()); + } break; } case 'p': { void* value = current.data_.u_pointer_; EmbeddedVector<char, 20> formatted; - OS::SNPrintF(formatted, temp.start(), value); + SNPrintF(formatted, temp.start(), value); Add(formatted.start()); break; } @@ -178,7 +157,7 @@ } // Verify that the buffer is 0-terminated - ASSERT(buffer_[length_] == '\0'); + DCHECK(buffer_[length_] == '\0'); } @@ -261,7 +240,7 @@ SmartArrayPointer<const char> StringStream::ToCString() const { char* str = NewArray<char>(length_ + 1); - OS::MemCopy(str, buffer_, length_); + MemCopy(str, buffer_, length_); str[length_] = '\0'; return SmartArrayPointer<const char>(str); } @@ -290,7 +269,7 @@ Handle<String> StringStream::ToString(Isolate* isolate) { return isolate->factory()->NewStringFromUtf8( - Vector<const char>(buffer_, length_)); + Vector<const char>(buffer_, length_)).ToHandleChecked(); } @@ -372,7 +351,8 @@ key->ShortPrint(); } Add(": "); - Object* value = js_object->RawFastPropertyAt(descs->GetFieldIndex(i)); + FieldIndex index = FieldIndex::ForDescriptor(map, i); + Object* value = js_object->RawFastPropertyAt(index); Add("%o\n", value); } } @@ -529,11 +509,11 @@ Object* name = fun->shared()->name(); bool print_name = false; Isolate* isolate = fun->GetIsolate(); - for (Object* p = receiver; - p != isolate->heap()->null_value(); - p = p->GetPrototype(isolate)) { - if (p->IsJSObject()) { - Object* key = JSObject::cast(p)->SlowReverseLookup(fun); + for (PrototypeIterator iter(isolate, receiver, + PrototypeIterator::START_AT_RECEIVER); + !iter.IsAtEnd(); iter.Advance()) { + if (iter.GetCurrent()->IsJSObject()) { + Object* key = JSObject::cast(iter.GetCurrent())->SlowReverseLookup(fun); if (key != isolate->heap()->undefined_value()) { if (!name->IsString() || !key->IsString() || @@ -570,7 +550,7 @@ if (new_space == NULL) { return space_; } - OS::MemCopy(new_space, space_, *bytes); + MemCopy(new_space, space_, *bytes); *bytes = new_bytes; DeleteArray(space_); space_ = new_space; @@ -578,12 +558,4 @@ } -// Only grow once to the maximum allowable size. -char* NoAllocationStringAllocator::grow(unsigned* bytes) { - ASSERT(size_ >= *bytes); - *bytes = size_; - return space_; -} - - } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/string-stream.h nodejs-0.11.15/deps/v8/src/string-stream.h --- nodejs-0.11.13/deps/v8/src/string-stream.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/string-stream.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,18 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_STRING_STREAM_H_ #define V8_STRING_STREAM_H_ +#include "src/handles.h" + namespace v8 { namespace internal { - class StringAllocator { public: - virtual ~StringAllocator() {} + virtual ~StringAllocator() { } // Allocate a number of bytes. virtual char* allocate(unsigned bytes) = 0; // Allocate a larger number of bytes and copy the old buffer to the new one. @@ -46,31 +24,18 @@ // Normal allocator uses new[] and delete[]. -class HeapStringAllocator: public StringAllocator { +class HeapStringAllocator V8_FINAL : public StringAllocator { public: ~HeapStringAllocator() { DeleteArray(space_); } - char* allocate(unsigned bytes); - char* grow(unsigned* bytes); - private: - char* space_; -}; + virtual char* allocate(unsigned bytes) V8_OVERRIDE; + virtual char* grow(unsigned* bytes) V8_OVERRIDE; - -// Allocator for use when no new c++ heap allocation is allowed. -// Given a preallocated buffer up front and does no allocation while -// building message. -class NoAllocationStringAllocator: public StringAllocator { - public: - NoAllocationStringAllocator(char* memory, unsigned size); - char* allocate(unsigned bytes) { return space_; } - char* grow(unsigned* bytes); private: - unsigned size_; char* space_; }; -class FmtElm { +class FmtElm V8_FINAL { public: FmtElm(int value) : type_(INT) { // NOLINT data_.u_int_ = value; @@ -110,7 +75,7 @@ }; -class StringStream { +class StringStream V8_FINAL { public: explicit StringStream(StringAllocator* allocator): allocator_(allocator), @@ -120,9 +85,6 @@ buffer_[0] = 0; } - ~StringStream() { - } - bool Put(char c); bool Put(String* str); bool Put(String* str, int start, int end); @@ -175,7 +137,6 @@ static bool IsMentionedObjectCacheClear(Isolate* isolate); #endif - static const int kInitialCapacity = 16; private: @@ -192,32 +153,6 @@ DISALLOW_IMPLICIT_CONSTRUCTORS(StringStream); }; - -// Utility class to print a list of items to a stream, divided by a separator. -class SimpleListPrinter { - public: - explicit SimpleListPrinter(StringStream* stream, char separator = ',') { - separator_ = separator; - stream_ = stream; - first_ = true; - } - - void Add(const char* str) { - if (first_) { - first_ = false; - } else { - stream_->Put(separator_); - } - stream_->Add(str); - } - - private: - bool first_; - char separator_; - StringStream* stream_; -}; - - } } // namespace v8::internal #endif // V8_STRING_STREAM_H_ diff -Nru nodejs-0.11.13/deps/v8/src/strtod.cc nodejs-0.11.15/deps/v8/src/strtod.cc --- nodejs-0.11.13/deps/v8/src/strtod.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/strtod.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,39 +1,18 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdarg.h> #include <cmath> -#include "globals.h" -#include "utils.h" -#include "strtod.h" -#include "bignum.h" -#include "cached-powers.h" -#include "double.h" +#include "src/v8.h" + +#include "src/bignum.h" +#include "src/cached-powers.h" +#include "src/double.h" +#include "src/globals.h" +#include "src/strtod.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -120,7 +99,7 @@ } // The input buffer has been trimmed. Therefore the last digit must be // different from '0'. - ASSERT(buffer[buffer.length() - 1] != '0'); + DCHECK(buffer[buffer.length() - 1] != '0'); // Set the last digit to be non-zero. This is sufficient to guarantee // correct rounding. significant_buffer[kMaxSignificantDecimalDigits - 1] = '1'; @@ -140,7 +119,7 @@ int i = 0; while (i < buffer.length() && result <= (kMaxUint64 / 10 - 1)) { int digit = buffer[i++] - '0'; - ASSERT(0 <= digit && digit <= 9); + DCHECK(0 <= digit && digit <= 9); result = 10 * result + digit; } *number_of_read_digits = i; @@ -176,7 +155,8 @@ static bool DoubleStrtod(Vector<const char> trimmed, int exponent, double* result) { -#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) && !defined(_MSC_VER) +#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 || defined(USE_SIMULATOR)) && \ + !defined(_MSC_VER) // On x86 the floating-point stack can be 64 or 80 bits wide. If it is // 80 bits wide (as is the case on Linux) then double-rounding occurs and the // result is not accurate. @@ -197,14 +177,14 @@ if (exponent < 0 && -exponent < kExactPowersOfTenSize) { // 10^-exponent fits into a double. *result = static_cast<double>(ReadUint64(trimmed, &read_digits)); - ASSERT(read_digits == trimmed.length()); + DCHECK(read_digits == trimmed.length()); *result /= exact_powers_of_ten[-exponent]; return true; } if (0 <= exponent && exponent < kExactPowersOfTenSize) { // 10^exponent fits into a double. *result = static_cast<double>(ReadUint64(trimmed, &read_digits)); - ASSERT(read_digits == trimmed.length()); + DCHECK(read_digits == trimmed.length()); *result *= exact_powers_of_ten[exponent]; return true; } @@ -216,7 +196,7 @@ // 10^remaining_digits. As a result the remaining exponent now fits // into a double too. *result = static_cast<double>(ReadUint64(trimmed, &read_digits)); - ASSERT(read_digits == trimmed.length()); + DCHECK(read_digits == trimmed.length()); *result *= exact_powers_of_ten[remaining_digits]; *result *= exact_powers_of_ten[exponent - remaining_digits]; return true; @@ -229,11 +209,11 @@ // Returns 10^exponent as an exact DiyFp. // The given exponent must be in the range [1; kDecimalExponentDistance[. static DiyFp AdjustmentPowerOfTen(int exponent) { - ASSERT(0 < exponent); - ASSERT(exponent < PowersOfTenCache::kDecimalExponentDistance); + DCHECK(0 < exponent); + DCHECK(exponent < PowersOfTenCache::kDecimalExponentDistance); // Simply hardcode the remaining powers for the given decimal exponent // distance. - ASSERT(PowersOfTenCache::kDecimalExponentDistance == 8); + DCHECK(PowersOfTenCache::kDecimalExponentDistance == 8); switch (exponent) { case 1: return DiyFp(V8_2PART_UINT64_C(0xa0000000, 00000000), -60); case 2: return DiyFp(V8_2PART_UINT64_C(0xc8000000, 00000000), -57); @@ -267,13 +247,13 @@ const int kDenominator = 1 << kDenominatorLog; // Move the remaining decimals into the exponent. exponent += remaining_decimals; - int error = (remaining_decimals == 0 ? 0 : kDenominator / 2); + int64_t error = (remaining_decimals == 0 ? 0 : kDenominator / 2); int old_e = input.e(); input.Normalize(); error <<= old_e - input.e(); - ASSERT(exponent <= PowersOfTenCache::kMaxDecimalExponent); + DCHECK(exponent <= PowersOfTenCache::kMaxDecimalExponent); if (exponent < PowersOfTenCache::kMinDecimalExponent) { *result = 0.0; return true; @@ -291,7 +271,7 @@ if (kMaxUint64DecimalDigits - buffer.length() >= adjustment_exponent) { // The product of input with the adjustment power fits into a 64 bit // integer. - ASSERT(DiyFp::kSignificandSize == 64); + DCHECK(DiyFp::kSignificandSize == 64); } else { // The adjustment power is exact. There is hence only an error of 0.5. error += kDenominator / 2; @@ -333,8 +313,8 @@ precision_digits_count -= shift_amount; } // We use uint64_ts now. This only works if the DiyFp uses uint64_ts too. - ASSERT(DiyFp::kSignificandSize == 64); - ASSERT(precision_digits_count < 64); + DCHECK(DiyFp::kSignificandSize == 64); + DCHECK(precision_digits_count < 64); uint64_t one64 = 1; uint64_t precision_bits_mask = (one64 << precision_digits_count) - 1; uint64_t precision_bits = input.f() & precision_bits_mask; @@ -378,14 +358,14 @@ DiyFp upper_boundary = Double(guess).UpperBoundary(); - ASSERT(buffer.length() + exponent <= kMaxDecimalPower + 1); - ASSERT(buffer.length() + exponent > kMinDecimalPower); - ASSERT(buffer.length() <= kMaxSignificantDecimalDigits); + DCHECK(buffer.length() + exponent <= kMaxDecimalPower + 1); + DCHECK(buffer.length() + exponent > kMinDecimalPower); + DCHECK(buffer.length() <= kMaxSignificantDecimalDigits); // Make sure that the Bignum will be able to hold all our numbers. // Our Bignum implementation has a separate field for exponents. Shifts will // consume at most one bigit (< 64 bits). // ln(10) == 3.3219... - ASSERT(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits); + DCHECK(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits); Bignum input; Bignum boundary; input.AssignDecimalString(buffer); diff -Nru nodejs-0.11.13/deps/v8/src/strtod.h nodejs-0.11.15/deps/v8/src/strtod.h --- nodejs-0.11.13/deps/v8/src/strtod.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/strtod.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_STRTOD_H_ #define V8_STRTOD_H_ diff -Nru nodejs-0.11.13/deps/v8/src/stub-cache.cc nodejs-0.11.15/deps/v8/src/stub-cache.cc --- nodejs-0.11.13/deps/v8/src/stub-cache.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/stub-cache.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "api.h" -#include "arguments.h" -#include "ast.h" -#include "code-stubs.h" -#include "cpu-profiler.h" -#include "gdb-jit.h" -#include "ic-inl.h" -#include "stub-cache.h" -#include "type-info.h" -#include "vm-state-inl.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/api.h" +#include "src/arguments.h" +#include "src/ast.h" +#include "src/code-stubs.h" +#include "src/cpu-profiler.h" +#include "src/gdb-jit.h" +#include "src/ic-inl.h" +#include "src/stub-cache.h" +#include "src/type-info.h" +#include "src/vm-state-inl.h" namespace v8 { namespace internal { @@ -50,30 +27,37 @@ void StubCache::Initialize() { - ASSERT(IsPowerOf2(kPrimaryTableSize)); - ASSERT(IsPowerOf2(kSecondaryTableSize)); + DCHECK(IsPowerOf2(kPrimaryTableSize)); + DCHECK(IsPowerOf2(kSecondaryTableSize)); Clear(); } -Code* StubCache::Set(Name* name, Map* map, Code* code) { - // Get the flags from the code. - Code::Flags flags = Code::RemoveTypeFromFlags(code->flags()); +static Code::Flags CommonStubCacheChecks(Name* name, Map* map, + Code::Flags flags) { + flags = Code::RemoveTypeAndHolderFromFlags(flags); // Validate that the name does not move on scavenge, and that we // can use identity checks instead of structural equality checks. - ASSERT(!heap()->InNewSpace(name)); - ASSERT(name->IsUniqueName()); + DCHECK(!name->GetHeap()->InNewSpace(name)); + DCHECK(name->IsUniqueName()); - // The state bits are not important to the hash function because - // the stub cache only contains monomorphic stubs. Make sure that - // the bits are the least significant so they will be the ones - // masked out. - ASSERT(Code::ExtractICStateFromFlags(flags) == MONOMORPHIC); + // The state bits are not important to the hash function because the stub + // cache only contains handlers. Make sure that the bits are the least + // significant so they will be the ones masked out. + DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags)); STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1); - // Make sure that the code type is not included in the hash. - ASSERT(Code::ExtractTypeFromFlags(flags) == 0); + // Make sure that the code type and cache holder are not included in the hash. + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); + DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0); + + return flags; +} + + +Code* StubCache::Set(Name* name, Map* map, Code* code) { + Code::Flags flags = CommonStubCacheChecks(name, map, code->flags()); // Compute the primary entry. int primary_offset = PrimaryOffset(name, flags, map); @@ -84,7 +68,8 @@ // secondary cache before overwriting it. if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) { Map* old_map = primary->map; - Code::Flags old_flags = Code::RemoveTypeFromFlags(old_code->flags()); + Code::Flags old_flags = + Code::RemoveTypeAndHolderFromFlags(old_code->flags()); int seed = PrimaryOffset(primary->key, old_flags, old_map); int secondary_offset = SecondaryOffset(primary->key, old_flags, seed); Entry* secondary = entry(secondary_, secondary_offset); @@ -100,150 +85,183 @@ } -Handle<Code> StubCache::FindIC(Handle<Name> name, - Handle<Map> stub_holder, - Code::Kind kind, - ExtraICState extra_state, - InlineCacheHolderFlag cache_holder) { +Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) { + flags = CommonStubCacheChecks(name, map, flags); + int primary_offset = PrimaryOffset(name, flags, map); + Entry* primary = entry(primary_, primary_offset); + if (primary->key == name && primary->map == map) { + return primary->value; + } + int secondary_offset = SecondaryOffset(name, flags, primary_offset); + Entry* secondary = entry(secondary_, secondary_offset); + if (secondary->key == name && secondary->map == map) { + return secondary->value; + } + return NULL; +} + + +Handle<Code> PropertyICCompiler::Find(Handle<Name> name, + Handle<Map> stub_holder, Code::Kind kind, + ExtraICState extra_state, + CacheHolderFlag cache_holder) { Code::Flags flags = Code::ComputeMonomorphicFlags( kind, extra_state, cache_holder); - Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Object* probe = stub_holder->FindInCodeCache(*name, flags); + if (probe->IsCode()) return handle(Code::cast(probe)); return Handle<Code>::null(); } -Handle<Code> StubCache::FindHandler(Handle<Name> name, - Handle<Map> stub_holder, - Code::Kind kind, - InlineCacheHolderFlag cache_holder, - Code::StubType type) { +Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name, + Handle<Map> stub_holder, + Code::Kind kind, + CacheHolderFlag cache_holder, + Code::StubType type) { Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder); - - Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Object* probe = stub_holder->FindInCodeCache(*name, flags); + if (probe->IsCode()) return handle(Code::cast(probe)); return Handle<Code>::null(); } -Handle<Code> StubCache::ComputeMonomorphicIC( - Code::Kind kind, - Handle<Name> name, - Handle<HeapType> type, - Handle<Code> handler, - ExtraICState extra_ic_state) { - InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type); +Handle<Code> PropertyICCompiler::ComputeMonomorphic( + Code::Kind kind, Handle<Name> name, Handle<HeapType> type, + Handle<Code> handler, ExtraICState extra_ic_state) { + Isolate* isolate = name->GetIsolate(); + if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) || + handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) { + name = isolate->factory()->normal_ic_symbol(); + } + + CacheHolderFlag flag; + Handle<Map> stub_holder = IC::GetICCacheHolder(*type, isolate, &flag); - Handle<Map> stub_holder; Handle<Code> ic; // There are multiple string maps that all use the same prototype. That // prototype cannot hold multiple handlers, one for each of the string maps, // for a single name. Hence, turn off caching of the IC. bool can_be_cached = !type->Is(HeapType::String()); if (can_be_cached) { - stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate()); - ic = FindIC(name, stub_holder, kind, extra_ic_state, flag); + ic = Find(name, stub_holder, kind, extra_ic_state, flag); if (!ic.is_null()) return ic; } - if (kind == Code::LOAD_IC) { - LoadStubCompiler ic_compiler(isolate(), extra_ic_state, flag); - ic = ic_compiler.CompileMonomorphicIC(type, handler, name); - } else if (kind == Code::KEYED_LOAD_IC) { - KeyedLoadStubCompiler ic_compiler(isolate(), extra_ic_state, flag); - ic = ic_compiler.CompileMonomorphicIC(type, handler, name); - } else if (kind == Code::STORE_IC) { - StoreStubCompiler ic_compiler(isolate(), extra_ic_state); - ic = ic_compiler.CompileMonomorphicIC(type, handler, name); - } else { - ASSERT(kind == Code::KEYED_STORE_IC); - ASSERT(STANDARD_STORE == +#ifdef DEBUG + if (kind == Code::KEYED_STORE_IC) { + DCHECK(STANDARD_STORE == KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state)); - KeyedStoreStubCompiler ic_compiler(isolate(), extra_ic_state); - ic = ic_compiler.CompileMonomorphicIC(type, handler, name); } +#endif + + PropertyICCompiler ic_compiler(isolate, kind, extra_ic_state, flag); + ic = ic_compiler.CompileMonomorphic(type, handler, name, PROPERTY); if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic); return ic; } -Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name, - Handle<HeapType> type) { - InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type); - Handle<Map> stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate()); +Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent( + Handle<Name> name, Handle<HeapType> type) { + Isolate* isolate = name->GetIsolate(); + Handle<Map> receiver_map = IC::TypeToMap(*type, isolate); + if (receiver_map->prototype()->IsNull()) { + // TODO(jkummerow/verwaest): If there is no prototype and the property + // is nonexistent, introduce a builtin to handle this (fast properties + // -> return undefined, dictionary properties -> do negative lookup). + return Handle<Code>(); + } + CacheHolderFlag flag; + Handle<Map> stub_holder_map = + IC::GetHandlerCacheHolder(*type, false, isolate, &flag); + // If no dictionary mode objects are present in the prototype chain, the load // nonexistent IC stub can be shared for all names for a given map and we use // the empty string for the map cache in that case. If there are dictionary // mode objects involved, we need to do negative lookups in the stub and // therefore the stub will be specific to the name. - Handle<Map> current_map = stub_holder; - Handle<Name> cache_name = current_map->is_dictionary_map() - ? name : Handle<Name>::cast(isolate()->factory()->nonexistent_symbol()); - Handle<Object> next(current_map->prototype(), isolate()); - Handle<JSObject> last = Handle<JSObject>::null(); - while (!next->IsNull()) { - last = Handle<JSObject>::cast(next); - next = handle(current_map->prototype(), isolate()); - current_map = handle(Handle<HeapObject>::cast(next)->map()); + Handle<Name> cache_name = + receiver_map->is_dictionary_map() + ? name + : Handle<Name>::cast(isolate->factory()->nonexistent_symbol()); + Handle<Map> current_map = stub_holder_map; + Handle<JSObject> last(JSObject::cast(receiver_map->prototype())); + while (true) { if (current_map->is_dictionary_map()) cache_name = name; + if (current_map->prototype()->IsNull()) break; + last = handle(JSObject::cast(current_map->prototype())); + current_map = handle(last->map()); } - // Compile the stub that is either shared for all names or // name specific if there are global objects involved. - Handle<Code> handler = FindHandler( - cache_name, stub_holder, Code::LOAD_IC, flag, Code::FAST); - if (!handler.is_null()) { - return handler; - } - - LoadStubCompiler compiler(isolate_, kNoExtraICState, flag); - handler = compiler.CompileLoadNonexistent(type, last, cache_name); - Map::UpdateCodeCache(stub_holder, cache_name, handler); + Handle<Code> handler = PropertyHandlerCompiler::Find( + cache_name, stub_holder_map, Code::LOAD_IC, flag, Code::FAST); + if (!handler.is_null()) return handler; + + NamedLoadHandlerCompiler compiler(isolate, type, last, flag); + handler = compiler.CompileLoadNonexistent(cache_name); + Map::UpdateCodeCache(stub_holder_map, cache_name, handler); return handler; } -Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) { +Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic( + Handle<Map> receiver_map) { + Isolate* isolate = receiver_map->GetIsolate(); Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC); - Handle<Name> name = - isolate()->factory()->KeyedLoadElementMonomorphic_string(); + Handle<Name> name = isolate->factory()->KeyedLoadMonomorphic_string(); - Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_); + Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate); if (probe->IsCode()) return Handle<Code>::cast(probe); - KeyedLoadStubCompiler compiler(isolate()); - Handle<Code> code = compiler.CompileLoadElement(receiver_map); + ElementsKind elements_kind = receiver_map->elements_kind(); + Handle<Code> stub; + if (receiver_map->has_fast_elements() || + receiver_map->has_external_array_elements() || + receiver_map->has_fixed_typed_array_elements()) { + stub = LoadFastElementStub(isolate, + receiver_map->instance_type() == JS_ARRAY_TYPE, + elements_kind).GetCode(); + } else { + stub = FLAG_compiled_keyed_dictionary_loads + ? LoadDictionaryElementStub(isolate).GetCode() + : LoadDictionaryElementPlatformStub(isolate).GetCode(); + } + PropertyICCompiler compiler(isolate, Code::KEYED_LOAD_IC); + Handle<Code> code = + compiler.CompileMonomorphic(HeapType::Class(receiver_map, isolate), stub, + isolate->factory()->empty_string(), ELEMENT); Map::UpdateCodeCache(receiver_map, name, code); return code; } -Handle<Code> StubCache::ComputeKeyedStoreElement( - Handle<Map> receiver_map, - StrictMode strict_mode, +Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic( + Handle<Map> receiver_map, StrictMode strict_mode, KeyedAccessStoreMode store_mode) { + Isolate* isolate = receiver_map->GetIsolate(); ExtraICState extra_state = KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode); - Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::KEYED_STORE_IC, extra_state); + Code::Flags flags = + Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, extra_state); - ASSERT(store_mode == STANDARD_STORE || + DCHECK(store_mode == STANDARD_STORE || store_mode == STORE_AND_GROW_NO_TRANSITION || store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS || store_mode == STORE_NO_TRANSITION_HANDLE_COW); - Handle<String> name = - isolate()->factory()->KeyedStoreElementMonomorphic_string(); - Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_); + Handle<String> name = isolate->factory()->KeyedStoreMonomorphic_string(); + Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate); if (probe->IsCode()) return Handle<Code>::cast(probe); - KeyedStoreStubCompiler compiler(isolate(), extra_state); - Handle<Code> code = compiler.CompileStoreElement(receiver_map); + PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state); + Handle<Code> code = + compiler.CompileKeyedStoreMonomorphic(receiver_map, store_mode); Map::UpdateCodeCache(receiver_map, name, code); - ASSERT(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state()) + DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state()) == store_mode); return code; } @@ -260,12 +278,13 @@ } -Code* StubCache::FindPreMonomorphicIC(Code::Kind kind, ExtraICState state) { +Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind, + ExtraICState state) { Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state); UnseededNumberDictionary* dictionary = - isolate()->heap()->non_monomorphic_cache(); - int entry = dictionary->FindEntry(isolate(), flags); - ASSERT(entry != -1); + isolate->heap()->non_monomorphic_cache(); + int entry = dictionary->FindEntry(isolate, flags); + DCHECK(entry != -1); Object* code = dictionary->ValueAt(entry); // This might be called during the marking phase of the collector // hence the unchecked cast. @@ -273,15 +292,16 @@ } -Handle<Code> StubCache::ComputeLoad(InlineCacheState ic_state, - ExtraICState extra_state) { +Handle<Code> PropertyICCompiler::ComputeLoad(Isolate* isolate, + InlineCacheState ic_state, + ExtraICState extra_state) { Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, ic_state, extra_state); Handle<UnseededNumberDictionary> cache = - isolate_->factory()->non_monomorphic_cache(); - int entry = cache->FindEntry(isolate_, flags); + isolate->factory()->non_monomorphic_cache(); + int entry = cache->FindEntry(isolate, flags); if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry))); - StubCompiler compiler(isolate_); + PropertyICCompiler compiler(isolate, Code::LOAD_IC); Handle<Code> code; if (ic_state == UNINITIALIZED) { code = compiler.CompileLoadInitialize(flags); @@ -292,20 +312,21 @@ } else { UNREACHABLE(); } - FillCache(isolate_, code); + FillCache(isolate, code); return code; } -Handle<Code> StubCache::ComputeStore(InlineCacheState ic_state, - ExtraICState extra_state) { +Handle<Code> PropertyICCompiler::ComputeStore(Isolate* isolate, + InlineCacheState ic_state, + ExtraICState extra_state) { Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state); Handle<UnseededNumberDictionary> cache = - isolate_->factory()->non_monomorphic_cache(); - int entry = cache->FindEntry(isolate_, flags); + isolate->factory()->non_monomorphic_cache(); + int entry = cache->FindEntry(isolate, flags); if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry))); - StubCompiler compiler(isolate_); + PropertyICCompiler compiler(isolate, Code::STORE_IC); Handle<Code> code; if (ic_state == UNINITIALIZED) { code = compiler.CompileStoreInitialize(flags); @@ -319,25 +340,26 @@ UNREACHABLE(); } - FillCache(isolate_, code); + FillCache(isolate, code); return code; } -Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map, - CompareNilICStub& stub) { - Handle<String> name(isolate_->heap()->empty_string()); - if (!receiver_map->is_shared()) { - Handle<Code> cached_ic = FindIC(name, receiver_map, Code::COMPARE_NIL_IC, - stub.GetExtraICState()); +Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map, + CompareNilICStub* stub) { + Isolate* isolate = receiver_map->GetIsolate(); + Handle<String> name(isolate->heap()->empty_string()); + if (!receiver_map->is_dictionary_map()) { + Handle<Code> cached_ic = + Find(name, receiver_map, Code::COMPARE_NIL_IC, stub->GetExtraICState()); if (!cached_ic.is_null()) return cached_ic; } Code::FindAndReplacePattern pattern; - pattern.Add(isolate_->factory()->meta_map(), receiver_map); - Handle<Code> ic = stub.GetCodeCopy(isolate_, pattern); + pattern.Add(isolate->factory()->meta_map(), receiver_map); + Handle<Code> ic = stub->GetCodeCopy(pattern); - if (!receiver_map->is_shared()) { + if (!receiver_map->is_dictionary_map()) { Map::UpdateCodeCache(receiver_map, name, ic); } @@ -346,64 +368,55 @@ // TODO(verwaest): Change this method so it takes in a TypeHandleList. -Handle<Code> StubCache::ComputeLoadElementPolymorphic( +Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic( MapHandleList* receiver_maps) { + Isolate* isolate = receiver_maps->at(0)->GetIsolate(); Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC); Handle<PolymorphicCodeCache> cache = - isolate_->factory()->polymorphic_code_cache(); + isolate->factory()->polymorphic_code_cache(); Handle<Object> probe = cache->Lookup(receiver_maps, flags); if (probe->IsCode()) return Handle<Code>::cast(probe); TypeHandleList types(receiver_maps->length()); for (int i = 0; i < receiver_maps->length(); i++) { - types.Add(HeapType::Class(receiver_maps->at(i), isolate())); + types.Add(HeapType::Class(receiver_maps->at(i), isolate)); } CodeHandleList handlers(receiver_maps->length()); - KeyedLoadStubCompiler compiler(isolate_); + ElementHandlerCompiler compiler(isolate); compiler.CompileElementHandlers(receiver_maps, &handlers); - Handle<Code> code = compiler.CompilePolymorphicIC( - &types, &handlers, factory()->empty_string(), Code::NORMAL, ELEMENT); + PropertyICCompiler ic_compiler(isolate, Code::KEYED_LOAD_IC); + Handle<Code> code = ic_compiler.CompilePolymorphic( + &types, &handlers, isolate->factory()->empty_string(), Code::NORMAL, + ELEMENT); - isolate()->counters()->keyed_load_polymorphic_stubs()->Increment(); + isolate->counters()->keyed_load_polymorphic_stubs()->Increment(); PolymorphicCodeCache::Update(cache, receiver_maps, flags, code); return code; } -Handle<Code> StubCache::ComputePolymorphicIC( - Code::Kind kind, - TypeHandleList* types, - CodeHandleList* handlers, - int number_of_valid_types, - Handle<Name> name, - ExtraICState extra_ic_state) { +Handle<Code> PropertyICCompiler::ComputePolymorphic( + Code::Kind kind, TypeHandleList* types, CodeHandleList* handlers, + int valid_types, Handle<Name> name, ExtraICState extra_ic_state) { Handle<Code> handler = handlers->at(0); - Code::StubType type = number_of_valid_types == 1 ? handler->type() - : Code::NORMAL; - if (kind == Code::LOAD_IC) { - LoadStubCompiler ic_compiler(isolate_, extra_ic_state); - return ic_compiler.CompilePolymorphicIC( - types, handlers, name, type, PROPERTY); - } else { - ASSERT(kind == Code::STORE_IC); - StoreStubCompiler ic_compiler(isolate_, extra_ic_state); - return ic_compiler.CompilePolymorphicIC( - types, handlers, name, type, PROPERTY); - } + Code::StubType type = valid_types == 1 ? handler->type() : Code::NORMAL; + DCHECK(kind == Code::LOAD_IC || kind == Code::STORE_IC); + PropertyICCompiler ic_compiler(name->GetIsolate(), kind, extra_ic_state); + return ic_compiler.CompilePolymorphic(types, handlers, name, type, PROPERTY); } -Handle<Code> StubCache::ComputeStoreElementPolymorphic( - MapHandleList* receiver_maps, - KeyedAccessStoreMode store_mode, +Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic( + MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode, StrictMode strict_mode) { - ASSERT(store_mode == STANDARD_STORE || + Isolate* isolate = receiver_maps->at(0)->GetIsolate(); + DCHECK(store_mode == STANDARD_STORE || store_mode == STORE_AND_GROW_NO_TRANSITION || store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS || store_mode == STORE_NO_TRANSITION_HANDLE_COW); Handle<PolymorphicCodeCache> cache = - isolate_->factory()->polymorphic_code_cache(); + isolate->factory()->polymorphic_code_cache(); ExtraICState extra_state = KeyedStoreIC::ComputeExtraICState( strict_mode, store_mode); Code::Flags flags = @@ -411,8 +424,9 @@ Handle<Object> probe = cache->Lookup(receiver_maps, flags); if (probe->IsCode()) return Handle<Code>::cast(probe); - KeyedStoreStubCompiler compiler(isolate_, extra_state); - Handle<Code> code = compiler.CompileStoreElementPolymorphic(receiver_maps); + PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state); + Handle<Code> code = + compiler.CompileKeyedStorePolymorphic(receiver_maps, store_mode); PolymorphicCodeCache::Update(cache, receiver_maps, flags, code); return code; } @@ -421,12 +435,12 @@ void StubCache::Clear() { Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal); for (int i = 0; i < kPrimaryTableSize; i++) { - primary_[i].key = heap()->empty_string(); + primary_[i].key = isolate()->heap()->empty_string(); primary_[i].map = NULL; primary_[i].value = empty; } for (int j = 0; j < kSecondaryTableSize; j++) { - secondary_[j].key = heap()->empty_string(); + secondary_[j].key = isolate()->heap()->empty_string(); secondary_[j].map = NULL; secondary_[j].value = empty; } @@ -478,28 +492,30 @@ // StubCompiler implementation. -RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) { - JSObject* receiver = JSObject::cast(args[0]); - JSObject* holder = JSObject::cast(args[1]); - ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[2]); - Address setter_address = v8::ToCData<Address>(callback->setter()); - v8::AccessorSetterCallback fun = - FUNCTION_CAST<v8::AccessorSetterCallback>(setter_address); - ASSERT(fun != NULL); - ASSERT(callback->IsCompatibleReceiver(receiver)); +RUNTIME_FUNCTION(StoreCallbackProperty) { + Handle<JSObject> receiver = args.at<JSObject>(0); + Handle<JSObject> holder = args.at<JSObject>(1); + Handle<ExecutableAccessorInfo> callback = args.at<ExecutableAccessorInfo>(2); Handle<Name> name = args.at<Name>(3); Handle<Object> value = args.at<Object>(4); HandleScope scope(isolate); + DCHECK(callback->IsCompatibleReceiver(*receiver)); + + Address setter_address = v8::ToCData<Address>(callback->setter()); + v8::AccessorSetterCallback fun = + FUNCTION_CAST<v8::AccessorSetterCallback>(setter_address); + DCHECK(fun != NULL); + // TODO(rossberg): Support symbols in the API. if (name->IsSymbol()) return *value; Handle<String> str = Handle<String>::cast(name); - LOG(isolate, ApiNamedPropertyAccess("store", receiver, *name)); - PropertyCallbackArguments - custom_args(isolate, callback->data(), receiver, holder); + LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name)); + PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver, + *holder); custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value)); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); return *value; } @@ -511,12 +527,12 @@ * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't * provide any value for the given name. */ -RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) { - ASSERT(args.length() == StubCache::kInterceptorArgsLength); +RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) { + DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength); Handle<Name> name_handle = - args.at<Name>(StubCache::kInterceptorArgsNameIndex); - Handle<InterceptorInfo> interceptor_info = - args.at<InterceptorInfo>(StubCache::kInterceptorArgsInfoIndex); + args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex); + Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>( + NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex); // TODO(rossberg): Support symbols in the API. if (name_handle->IsSymbol()) @@ -526,12 +542,12 @@ Address getter_address = v8::ToCData<Address>(interceptor_info->getter()); v8::NamedPropertyGetterCallback getter = FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address); - ASSERT(getter != NULL); + DCHECK(getter != NULL); Handle<JSObject> receiver = - args.at<JSObject>(StubCache::kInterceptorArgsThisIndex); + args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex); Handle<JSObject> holder = - args.at<JSObject>(StubCache::kInterceptorArgsHolderIndex); + args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex); PropertyCallbackArguments callback_args( isolate, interceptor_info->data(), *receiver, *holder); { @@ -539,7 +555,7 @@ HandleScope scope(isolate); v8::Handle<v8::Value> r = callback_args.Call(getter, v8::Utils::ToLocal(name)); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); if (!r.IsEmpty()) { Handle<Object> result = v8::Utils::OpenHandle(*r); result->VerifyApiCallResultType(); @@ -551,7 +567,7 @@ } -static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) { +static Object* ThrowReferenceError(Isolate* isolate, Name* name) { // If the load is non-contextual, just return the undefined result. // Note that both keyed and non-keyed loads may end up here. HandleScope scope(isolate); @@ -569,180 +585,130 @@ } -static Handle<Object> LoadWithInterceptor(Arguments* args, - PropertyAttributes* attrs) { - ASSERT(args->length() == StubCache::kInterceptorArgsLength); - Handle<Name> name_handle = - args->at<Name>(StubCache::kInterceptorArgsNameIndex); - Handle<InterceptorInfo> interceptor_info = - args->at<InterceptorInfo>(StubCache::kInterceptorArgsInfoIndex); - Handle<JSObject> receiver_handle = - args->at<JSObject>(StubCache::kInterceptorArgsThisIndex); - Handle<JSObject> holder_handle = - args->at<JSObject>(StubCache::kInterceptorArgsHolderIndex); - - Isolate* isolate = receiver_handle->GetIsolate(); - - // TODO(rossberg): Support symbols in the API. - if (name_handle->IsSymbol()) { - return JSObject::GetPropertyPostInterceptor( - holder_handle, receiver_handle, name_handle, attrs); - } - Handle<String> name = Handle<String>::cast(name_handle); - - Address getter_address = v8::ToCData<Address>(interceptor_info->getter()); - v8::NamedPropertyGetterCallback getter = - FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address); - ASSERT(getter != NULL); - - PropertyCallbackArguments callback_args(isolate, - interceptor_info->data(), - *receiver_handle, - *holder_handle); - { - HandleScope scope(isolate); - // Use the interceptor getter. - v8::Handle<v8::Value> r = - callback_args.Call(getter, v8::Utils::ToLocal(name)); - RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); - if (!r.IsEmpty()) { - *attrs = NONE; - Handle<Object> result = v8::Utils::OpenHandle(*r); - result->VerifyApiCallResultType(); - return scope.CloseAndEscape(result); - } - } - - Handle<Object> result = JSObject::GetPropertyPostInterceptor( - holder_handle, receiver_handle, name_handle, attrs); - return result; -} - - /** * Loads a property with an interceptor performing post interceptor * lookup if interceptor failed. */ -RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) { - PropertyAttributes attr = NONE; +RUNTIME_FUNCTION(LoadPropertyWithInterceptor) { HandleScope scope(isolate); - Handle<Object> result = LoadWithInterceptor(&args, &attr); - RETURN_IF_EMPTY_HANDLE(isolate, result); + DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength); + Handle<Name> name = + args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex); + Handle<JSObject> receiver = + args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex); + Handle<JSObject> holder = + args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex); - // If the property is present, return it. - if (attr != ABSENT) return *result; - return ThrowReferenceError(isolate, Name::cast(args[0])); -} + Handle<Object> result; + LookupIterator it(receiver, name, holder); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, JSObject::GetProperty(&it)); + if (it.IsFound()) return *result; -RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) { - PropertyAttributes attr; - HandleScope scope(isolate); - Handle<Object> result = LoadWithInterceptor(&args, &attr); - RETURN_IF_EMPTY_HANDLE(isolate, result); - // This is call IC. In this case, we simply return the undefined result which - // will lead to an exception when trying to invoke the result as a - // function. - return *result; + return ThrowReferenceError(isolate, Name::cast(args[0])); } -RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) { +RUNTIME_FUNCTION(StorePropertyWithInterceptor) { HandleScope scope(isolate); - ASSERT(args.length() == 3); + DCHECK(args.length() == 3); StoreIC ic(IC::NO_EXTRA_FRAME, isolate); Handle<JSObject> receiver = args.at<JSObject>(0); Handle<Name> name = args.at<Name>(1); Handle<Object> value = args.at<Object>(2); - ASSERT(receiver->HasNamedInterceptor()); - PropertyAttributes attr = NONE; - Handle<Object> result = JSObject::SetPropertyWithInterceptor( - receiver, name, value, attr, ic.strict_mode()); - RETURN_IF_EMPTY_HANDLE(isolate, result); +#ifdef DEBUG + if (receiver->IsJSGlobalProxy()) { + PrototypeIterator iter(isolate, receiver); + DCHECK(iter.IsAtEnd() || + Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter)) + ->HasNamedInterceptor()); + } else { + DCHECK(receiver->HasNamedInterceptor()); + } +#endif + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::SetProperty(receiver, name, value, ic.strict_mode())); return *result; } -RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) { +RUNTIME_FUNCTION(LoadElementWithInterceptor) { HandleScope scope(isolate); Handle<JSObject> receiver = args.at<JSObject>(0); - ASSERT(args.smi_at(1) >= 0); + DCHECK(args.smi_at(1) >= 0); uint32_t index = args.smi_at(1); - Handle<Object> result = - JSObject::GetElementWithInterceptor(receiver, receiver, index); - RETURN_IF_EMPTY_HANDLE(isolate, result); + Handle<Object> result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::GetElementWithInterceptor(receiver, receiver, index)); return *result; } -Handle<Code> StubCompiler::CompileLoadInitialize(Code::Flags flags) { +Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) { LoadIC::GenerateInitialize(masm()); Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize"); PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code)); return code; } -Handle<Code> StubCompiler::CompileLoadPreMonomorphic(Code::Flags flags) { +Handle<Code> PropertyICCompiler::CompileLoadPreMonomorphic(Code::Flags flags) { LoadIC::GeneratePreMonomorphic(masm()); Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadPreMonomorphic"); PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_PREMONOMORPHIC_TAG, *code, 0)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code)); return code; } -Handle<Code> StubCompiler::CompileLoadMegamorphic(Code::Flags flags) { +Handle<Code> PropertyICCompiler::CompileLoadMegamorphic(Code::Flags flags) { LoadIC::GenerateMegamorphic(masm()); Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadMegamorphic"); PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code)); return code; } -Handle<Code> StubCompiler::CompileStoreInitialize(Code::Flags flags) { +Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) { StoreIC::GenerateInitialize(masm()); Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize"); PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0)); - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code)); return code; } -Handle<Code> StubCompiler::CompileStorePreMonomorphic(Code::Flags flags) { +Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) { StoreIC::GeneratePreMonomorphic(masm()); Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic"); PROFILE(isolate(), CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0)); - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code)); return code; } -Handle<Code> StubCompiler::CompileStoreGeneric(Code::Flags flags) { +Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) { ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags); StrictMode strict_mode = StoreIC::GetStrictMode(extra_state); StoreIC::GenerateRuntimeSetProperty(masm(), strict_mode); Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric"); PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0)); - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code)); return code; } -Handle<Code> StubCompiler::CompileStoreMegamorphic(Code::Flags flags) { +Handle<Code> PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) { StoreIC::GenerateMegamorphic(masm()); Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic"); PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0)); - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code)); return code; } @@ -750,61 +716,46 @@ #undef CALL_LOGGER_TAG -Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags, - const char* name) { +Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags, + const char* name) { // Create code object in the heap. CodeDesc desc; - masm_.GetCode(&desc); - Handle<Code> code = factory()->NewCode(desc, flags, masm_.CodeObject()); - if (code->has_major_key()) { - code->set_major_key(CodeStub::NoCache); - } + masm()->GetCode(&desc); + Handle<Code> code = factory()->NewCode(desc, flags, masm()->CodeObject()); + if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey()); #ifdef ENABLE_DISASSEMBLER - if (FLAG_print_code_stubs) code->Disassemble(name); + if (FLAG_print_code_stubs) { + OFStream os(stdout); + code->Disassemble(name, os); + } #endif return code; } -Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags, - Handle<Name> name) { +Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags, + Handle<Name> name) { return (FLAG_print_code_stubs && !name.is_null() && name->IsString()) ? GetCodeWithFlags(flags, Handle<String>::cast(name)->ToCString().get()) : GetCodeWithFlags(flags, NULL); } -void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder, - Handle<Name> name, - LookupResult* lookup) { - holder->LocalLookupRealNamedProperty(*name, lookup); - if (lookup->IsFound()) return; - if (holder->GetPrototype()->IsNull()) return; - holder->GetPrototype()->Lookup(*name, lookup); -} - - #define __ ACCESS_MASM(masm()) -Register LoadStubCompiler::HandlerFrontendHeader( - Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Label* miss) { +Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg, + Handle<Name> name, + Label* miss) { PrototypeCheckType check_type = CHECK_ALL_MAPS; int function_index = -1; - if (type->Is(HeapType::String())) { + if (type()->Is(HeapType::String())) { function_index = Context::STRING_FUNCTION_INDEX; - } else if (type->Is(HeapType::Symbol())) { + } else if (type()->Is(HeapType::Symbol())) { function_index = Context::SYMBOL_FUNCTION_INDEX; - } else if (type->Is(HeapType::Number())) { + } else if (type()->Is(HeapType::Number())) { function_index = Context::NUMBER_FUNCTION_INDEX; - } else if (type->Is(HeapType::Boolean())) { - // Booleans use the generic oddball map, so an additional check is needed to - // ensure the receiver is really a boolean. - GenerateBooleanCheck(object_reg, miss); + } else if (type()->Is(HeapType::Boolean())) { function_index = Context::BOOLEAN_FUNCTION_INDEX; } else { check_type = SKIP_RECEIVER; @@ -815,31 +766,27 @@ masm(), function_index, scratch1(), miss); Object* function = isolate()->native_context()->get(function_index); Object* prototype = JSFunction::cast(function)->instance_prototype(); - type = IC::CurrentTypeOf(handle(prototype, isolate()), isolate()); + set_type_for_object(handle(prototype, isolate())); object_reg = scratch1(); } // Check that the maps starting from the prototype haven't changed. - return CheckPrototypes( - type, object_reg, holder, scratch1(), scratch2(), scratch3(), - name, miss, check_type); + return CheckPrototypes(object_reg, scratch1(), scratch2(), scratch3(), name, + miss, check_type); } -// HandlerFrontend for store uses the name register. It has to be restored -// before a miss. -Register StoreStubCompiler::HandlerFrontendHeader( - Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Label* miss) { - return CheckPrototypes(type, object_reg, holder, this->name(), - scratch1(), scratch2(), name, miss, SKIP_RECEIVER); +// Frontend for store uses the name register. It has to be restored before a +// miss. +Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg, + Handle<Name> name, + Label* miss) { + return CheckPrototypes(object_reg, this->name(), scratch1(), scratch2(), name, + miss, SKIP_RECEIVER); } -bool BaseLoadStoreStubCompiler::IncludesNumberType(TypeHandleList* types) { +bool PropertyICCompiler::IncludesNumberType(TypeHandleList* types) { for (int i = 0; i < types->length(); ++i) { if (types->at(i)->Is(HeapType::Number())) return true; } @@ -847,451 +794,301 @@ } -Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name) { +Register PropertyHandlerCompiler::Frontend(Register object_reg, + Handle<Name> name) { Label miss; - - Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); - - HandlerFrontendFooter(name, &miss); - + Register reg = FrontendHeader(object_reg, name, &miss); + FrontendFooter(name, &miss); return reg; } -void LoadStubCompiler::NonexistentHandlerFrontend(Handle<HeapType> type, - Handle<JSObject> last, - Handle<Name> name) { - Label miss; - - Register holder; +void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name, + Label* miss, + Register scratch1, + Register scratch2) { + Register holder_reg; Handle<Map> last_map; - if (last.is_null()) { - holder = receiver(); - last_map = IC::TypeToMap(*type, isolate()); - // If |type| has null as its prototype, |last| is Handle<JSObject>::null(). - ASSERT(last_map->prototype() == isolate()->heap()->null_value()); + if (holder().is_null()) { + holder_reg = receiver(); + last_map = IC::TypeToMap(*type(), isolate()); + // If |type| has null as its prototype, |holder()| is + // Handle<JSObject>::null(). + DCHECK(last_map->prototype() == isolate()->heap()->null_value()); } else { - holder = HandlerFrontendHeader(type, receiver(), last, name, &miss); - last_map = handle(last->map()); + holder_reg = FrontendHeader(receiver(), name, miss); + last_map = handle(holder()->map()); } - if (last_map->is_dictionary_map() && - !last_map->IsJSGlobalObjectMap() && - !last_map->IsJSGlobalProxyMap()) { - if (!name->IsUniqueName()) { - ASSERT(name->IsString()); - name = factory()->InternalizeString(Handle<String>::cast(name)); + if (last_map->is_dictionary_map()) { + if (last_map->IsJSGlobalObjectMap()) { + Handle<JSGlobalObject> global = + holder().is_null() + ? Handle<JSGlobalObject>::cast(type()->AsConstant()->Value()) + : Handle<JSGlobalObject>::cast(holder()); + GenerateCheckPropertyCell(masm(), global, name, scratch1, miss); + } else { + if (!name->IsUniqueName()) { + DCHECK(name->IsString()); + name = factory()->InternalizeString(Handle<String>::cast(name)); + } + DCHECK(holder().is_null() || + holder()->property_dictionary()->FindEntry(name) == + NameDictionary::kNotFound); + GenerateDictionaryNegativeLookup(masm(), miss, holder_reg, name, scratch1, + scratch2); } - ASSERT(last.is_null() || - last->property_dictionary()->FindEntry(*name) == - NameDictionary::kNotFound); - GenerateDictionaryNegativeLookup(masm(), &miss, holder, name, - scratch2(), scratch3()); } +} - // If the last object in the prototype chain is a global object, - // check that the global property cell is empty. - if (last_map->IsJSGlobalObjectMap()) { - Handle<JSGlobalObject> global = last.is_null() - ? Handle<JSGlobalObject>::cast(type->AsConstant()) - : Handle<JSGlobalObject>::cast(last); - GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss); - } - HandlerFrontendFooter(name, &miss); +Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name, + FieldIndex field) { + Register reg = Frontend(receiver(), name); + __ Move(receiver(), reg); + LoadFieldStub stub(isolate(), field); + GenerateTailCall(masm(), stub.GetCode()); + return GetCode(kind(), Code::FAST, name); } -Handle<Code> LoadStubCompiler::CompileLoadField( - Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name, - PropertyIndex field, - Representation representation) { - Register reg = HandlerFrontend(type, receiver(), holder, name); - GenerateLoadField(reg, holder, field, representation); - - // Return the generated code. +Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name, + int constant_index) { + Register reg = Frontend(receiver(), name); + __ Move(receiver(), reg); + LoadConstantStub stub(isolate(), constant_index); + GenerateTailCall(masm(), stub.GetCode()); return GetCode(kind(), Code::FAST, name); } -Handle<Code> LoadStubCompiler::CompileLoadConstant( - Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name, - Handle<Object> value) { - HandlerFrontend(type, receiver(), holder, name); - GenerateLoadConstant(value); - - // Return the generated code. +Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent( + Handle<Name> name) { + Label miss; + NonexistentFrontendHeader(name, &miss, scratch2(), scratch3()); + GenerateLoadConstant(isolate()->factory()->undefined_value()); + FrontendFooter(name, &miss); return GetCode(kind(), Code::FAST, name); } -Handle<Code> LoadStubCompiler::CompileLoadCallback( - Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name, - Handle<ExecutableAccessorInfo> callback) { - Register reg = CallbackHandlerFrontend( - type, receiver(), holder, name, callback); +Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback( + Handle<Name> name, Handle<ExecutableAccessorInfo> callback) { + Register reg = Frontend(receiver(), name); GenerateLoadCallback(reg, callback); - - // Return the generated code. return GetCode(kind(), Code::FAST, name); } -Handle<Code> LoadStubCompiler::CompileLoadCallback( - Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name, - const CallOptimization& call_optimization) { - ASSERT(call_optimization.is_simple_api_call()); - Handle<JSFunction> callback = call_optimization.constant_function(); - CallbackHandlerFrontend(type, receiver(), holder, name, callback); - Handle<Map>receiver_map = IC::TypeToMap(*type, isolate()); +Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback( + Handle<Name> name, const CallOptimization& call_optimization) { + DCHECK(call_optimization.is_simple_api_call()); + Frontend(receiver(), name); + Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate()); GenerateFastApiCall( masm(), call_optimization, receiver_map, receiver(), scratch1(), false, 0, NULL); - // Return the generated code. return GetCode(kind(), Code::FAST, name); } -Handle<Code> LoadStubCompiler::CompileLoadInterceptor( - Handle<HeapType> type, - Handle<JSObject> holder, +Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor( Handle<Name> name) { + // Perform a lookup after the interceptor. LookupResult lookup(isolate()); - LookupPostInterceptor(holder, name, &lookup); + holder()->LookupOwnRealNamedProperty(name, &lookup); + if (!lookup.IsFound()) { + PrototypeIterator iter(holder()->GetIsolate(), holder()); + if (!iter.IsAtEnd()) { + PrototypeIterator::GetCurrent(iter)->Lookup(name, &lookup); + } + } - Register reg = HandlerFrontend(type, receiver(), holder, name); + Register reg = Frontend(receiver(), name); // TODO(368): Compile in the whole chain: all the interceptors in // prototypes and ultimate answer. - GenerateLoadInterceptor(reg, type, holder, &lookup, name); - - // Return the generated code. + GenerateLoadInterceptor(reg, &lookup, name); return GetCode(kind(), Code::FAST, name); } -void LoadStubCompiler::GenerateLoadPostInterceptor( - Register interceptor_reg, - Handle<JSObject> interceptor_holder, - Handle<Name> name, - LookupResult* lookup) { - Handle<JSObject> holder(lookup->holder()); +void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor( + Register interceptor_reg, Handle<Name> name, LookupResult* lookup) { + Handle<JSObject> real_named_property_holder(lookup->holder()); + + set_type_for_object(holder()); + set_holder(real_named_property_holder); + Register reg = Frontend(interceptor_reg, name); + if (lookup->IsField()) { - PropertyIndex field = lookup->GetFieldIndex(); - if (interceptor_holder.is_identical_to(holder)) { - GenerateLoadField( - interceptor_reg, holder, field, lookup->representation()); - } else { - // We found FIELD property in prototype chain of interceptor's holder. - // Retrieve a field from field's holder. - Register reg = HandlerFrontend( - IC::CurrentTypeOf(interceptor_holder, isolate()), - interceptor_reg, holder, name); - GenerateLoadField( - reg, holder, field, lookup->representation()); - } + __ Move(receiver(), reg); + LoadFieldStub stub(isolate(), lookup->GetFieldIndex()); + GenerateTailCall(masm(), stub.GetCode()); } else { - // We found CALLBACKS property in prototype chain of interceptor's - // holder. - ASSERT(lookup->type() == CALLBACKS); + DCHECK(lookup->type() == CALLBACKS); Handle<ExecutableAccessorInfo> callback( ExecutableAccessorInfo::cast(lookup->GetCallbackObject())); - ASSERT(callback->getter() != NULL); - - Register reg = CallbackHandlerFrontend( - IC::CurrentTypeOf(interceptor_holder, isolate()), - interceptor_reg, holder, name, callback); + DCHECK(callback->getter() != NULL); GenerateLoadCallback(reg, callback); } } -Handle<Code> BaseLoadStoreStubCompiler::CompileMonomorphicIC( - Handle<HeapType> type, - Handle<Code> handler, - Handle<Name> name) { +Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<HeapType> type, + Handle<Code> handler, + Handle<Name> name, + IcCheckType check) { TypeHandleList types(1); CodeHandleList handlers(1); types.Add(type); handlers.Add(handler); Code::StubType stub_type = handler->type(); - return CompilePolymorphicIC(&types, &handlers, name, stub_type, PROPERTY); + return CompilePolymorphic(&types, &handlers, name, stub_type, check); } -Handle<Code> LoadStubCompiler::CompileLoadViaGetter( - Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name, - Handle<JSFunction> getter) { - HandlerFrontend(type, receiver(), holder, name); - GenerateLoadViaGetter(masm(), type, receiver(), getter); - - // Return the generated code. +Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter( + Handle<Name> name, Handle<JSFunction> getter) { + Frontend(receiver(), name); + GenerateLoadViaGetter(masm(), type(), receiver(), getter); return GetCode(kind(), Code::FAST, name); } -Handle<Code> StoreStubCompiler::CompileStoreTransition( - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name) { +// TODO(verwaest): Cleanup. holder() is actually the receiver. +Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition( + Handle<Map> transition, Handle<Name> name) { Label miss, slow; // Ensure no transitions to deprecated maps are followed. __ CheckMapDeprecated(transition, scratch1(), &miss); // Check that we are allowed to write this. - if (object->GetPrototype()->IsJSObject()) { - Handle<JSObject> holder; - // holder == object indicates that no property was found. - if (lookup->holder() != *object) { - holder = Handle<JSObject>(lookup->holder()); - } else { - // Find the top object. - holder = object; - do { - holder = Handle<JSObject>(JSObject::cast(holder->GetPrototype())); - } while (holder->GetPrototype()->IsJSObject()); - } - - Register holder_reg = HandlerFrontendHeader( - IC::CurrentTypeOf(object, isolate()), receiver(), holder, name, &miss); - - // If no property was found, and the holder (the last object in the - // prototype chain) is in slow mode, we need to do a negative lookup on the - // holder. - if (lookup->holder() == *object) { - GenerateNegativeHolderLookup(masm(), holder, holder_reg, name, &miss); + bool is_nonexistent = holder()->map() == transition->GetBackPointer(); + if (is_nonexistent) { + // Find the top object. + Handle<JSObject> last; + PrototypeIterator iter(isolate(), holder()); + while (!iter.IsAtEnd()) { + last = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)); + iter.Advance(); } + if (!last.is_null()) set_holder(last); + NonexistentFrontendHeader(name, &miss, scratch1(), scratch2()); + } else { + FrontendHeader(receiver(), name, &miss); + DCHECK(holder()->HasFastProperties()); } - GenerateStoreTransition(masm(), - object, - lookup, - transition, - name, - receiver(), this->name(), value(), - scratch1(), scratch2(), scratch3(), - &miss, - &slow); + GenerateStoreTransition(transition, name, receiver(), this->name(), value(), + scratch1(), scratch2(), scratch3(), &miss, &slow); - // Handle store cache miss. - GenerateRestoreName(masm(), &miss, name); + GenerateRestoreName(&miss, name); TailCallBuiltin(masm(), MissBuiltin(kind())); - GenerateRestoreName(masm(), &slow, name); + GenerateRestoreName(&slow, name); TailCallBuiltin(masm(), SlowBuiltin(kind())); - - // Return the generated code. return GetCode(kind(), Code::FAST, name); } -Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object, - LookupResult* lookup, - Handle<Name> name) { +Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupResult* lookup, + Handle<Name> name) { Label miss; - - HandlerFrontendHeader(IC::CurrentTypeOf(object, isolate()), - receiver(), object, name, &miss); - - // Generate store field code. - GenerateStoreField(masm(), - object, - lookup, - receiver(), this->name(), value(), scratch1(), scratch2(), - &miss); - - // Handle store cache miss. + GenerateStoreField(lookup, value(), &miss); __ bind(&miss); TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. return GetCode(kind(), Code::FAST, name); } -Handle<Code> StoreStubCompiler::CompileStoreArrayLength(Handle<JSObject> object, - LookupResult* lookup, - Handle<Name> name) { - // This accepts as a receiver anything JSArray::SetElementsLength accepts - // (currently anything except for external arrays which means anything with - // elements of FixedArray type). Value must be a number, but only smis are - // accepted as the most common case. - Label miss; - - // Check that value is a smi. - __ JumpIfNotSmi(value(), &miss); - - // Generate tail call to StoreIC_ArrayLength. - GenerateStoreArrayLength(); +Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter( + Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) { + Frontend(receiver(), name); + GenerateStoreViaSetter(masm(), type(), receiver(), setter); - // Handle miss case. - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. return GetCode(kind(), Code::FAST, name); } -Handle<Code> StoreStubCompiler::CompileStoreViaSetter( - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, - Handle<JSFunction> setter) { - Handle<HeapType> type = IC::CurrentTypeOf(object, isolate()); - HandlerFrontend(type, receiver(), holder, name); - GenerateStoreViaSetter(masm(), type, receiver(), setter); - - return GetCode(kind(), Code::FAST, name); -} - - -Handle<Code> StoreStubCompiler::CompileStoreCallback( - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( + Handle<JSObject> object, Handle<Name> name, const CallOptimization& call_optimization) { - HandlerFrontend(IC::CurrentTypeOf(object, isolate()), - receiver(), holder, name); + Frontend(receiver(), name); Register values[] = { value() }; GenerateFastApiCall( masm(), call_optimization, handle(object->map()), receiver(), scratch1(), true, 1, values); - // Return the generated code. return GetCode(kind(), Code::FAST, name); } -Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( - Handle<Map> receiver_map) { - ElementsKind elements_kind = receiver_map->elements_kind(); - if (receiver_map->has_fast_elements() || - receiver_map->has_external_array_elements() || - receiver_map->has_fixed_typed_array_elements()) { - Handle<Code> stub = KeyedLoadFastElementStub( - receiver_map->instance_type() == JS_ARRAY_TYPE, - elements_kind).GetCode(isolate()); - __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK); - } else { - Handle<Code> stub = FLAG_compiled_keyed_dictionary_loads - ? KeyedLoadDictionaryElementStub().GetCode(isolate()) - : KeyedLoadDictionaryElementPlatformStub().GetCode(isolate()); - __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK); - } - - TailCallBuiltin(masm(), Builtins::kKeyedLoadIC_Miss); - - // Return the generated code. - return GetICCode(kind(), Code::NORMAL, factory()->empty_string()); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( - Handle<Map> receiver_map) { +Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic( + Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) { ElementsKind elements_kind = receiver_map->elements_kind(); bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE; Handle<Code> stub; if (receiver_map->has_fast_elements() || receiver_map->has_external_array_elements() || receiver_map->has_fixed_typed_array_elements()) { - stub = KeyedStoreFastElementStub( - is_jsarray, - elements_kind, - store_mode()).GetCode(isolate()); + stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind, + store_mode).GetCode(); } else { - stub = KeyedStoreElementStub(is_jsarray, - elements_kind, - store_mode()).GetCode(isolate()); + stub = StoreElementStub(isolate(), is_jsarray, elements_kind, store_mode) + .GetCode(); } __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK); TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss); - // Return the generated code. - return GetICCode(kind(), Code::NORMAL, factory()->empty_string()); + return GetCode(kind(), Code::NORMAL, factory()->empty_string()); } #undef __ -void StubCompiler::TailCallBuiltin(MacroAssembler* masm, Builtins::Name name) { +void PropertyAccessCompiler::TailCallBuiltin(MacroAssembler* masm, + Builtins::Name name) { Handle<Code> code(masm->isolate()->builtins()->builtin(name)); GenerateTailCall(masm, code); } -void BaseLoadStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) { -#ifdef ENABLE_GDB_JIT_INTERFACE - GDBJITInterface::CodeTag tag; - if (kind_ == Code::LOAD_IC) { - tag = GDBJITInterface::LOAD_IC; - } else if (kind_ == Code::KEYED_LOAD_IC) { - tag = GDBJITInterface::KEYED_LOAD_IC; - } else if (kind_ == Code::STORE_IC) { - tag = GDBJITInterface::STORE_IC; - } else { - tag = GDBJITInterface::KEYED_STORE_IC; +Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) { + if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC) { + return load_calling_convention(); } - GDBJIT(AddCode(tag, *name, *code)); -#endif + DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC); + return store_calling_convention(); } -void BaseLoadStoreStubCompiler::InitializeRegisters() { - if (kind_ == Code::LOAD_IC) { - registers_ = LoadStubCompiler::registers(); - } else if (kind_ == Code::KEYED_LOAD_IC) { - registers_ = KeyedLoadStubCompiler::registers(); - } else if (kind_ == Code::STORE_IC) { - registers_ = StoreStubCompiler::registers(); - } else { - registers_ = KeyedStoreStubCompiler::registers(); - } -} - - -Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind, - Code::StubType type, - Handle<Name> name, - InlineCacheState state) { - Code::Flags flags = Code::ComputeFlags(kind, state, extra_state(), type); +Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type, + Handle<Name> name, + InlineCacheState state) { + Code::Flags flags = + Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder()); Handle<Code> code = GetCodeWithFlags(flags, name); + IC::RegisterWeakMapDependency(code); PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name)); - JitEvent(name, code); return code; } -Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind, - Code::StubType type, - Handle<Name> name) { - ASSERT_EQ(kNoExtraICState, extra_state()); - Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder_); +Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name) { + Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder()); Handle<Code> code = GetCodeWithFlags(flags, name); - PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name)); - JitEvent(name, code); + PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, *name)); return code; } -void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps, - CodeHandleList* handlers) { +void ElementHandlerCompiler::CompileElementHandlers( + MapHandleList* receiver_maps, CodeHandleList* handlers) { for (int i = 0; i < receiver_maps->length(); ++i) { Handle<Map> receiver_map = receiver_maps->at(i); Handle<Code> cached_stub; @@ -1307,14 +1104,13 @@ if (IsFastElementsKind(elements_kind) || IsExternalArrayElementsKind(elements_kind) || IsFixedTypedArrayElementsKind(elements_kind)) { - cached_stub = - KeyedLoadFastElementStub(is_js_array, - elements_kind).GetCode(isolate()); + cached_stub = LoadFastElementStub(isolate(), is_js_array, elements_kind) + .GetCode(); } else if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) { cached_stub = isolate()->builtins()->KeyedLoadIC_SloppyArguments(); } else { - ASSERT(elements_kind == DICTIONARY_ELEMENTS); - cached_stub = KeyedLoadDictionaryElementStub().GetCode(isolate()); + DCHECK(elements_kind == DICTIONARY_ELEMENTS); + cached_stub = LoadDictionaryElementStub(isolate()).GetCode(); } } @@ -1323,8 +1119,8 @@ } -Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic( - MapHandleList* receiver_maps) { +Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( + MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) { // Collect MONOMORPHIC stubs for all |receiver_maps|. CodeHandleList handlers(receiver_maps->length()); MapHandleList transitioned_maps(receiver_maps->length()); @@ -1342,42 +1138,37 @@ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; ElementsKind elements_kind = receiver_map->elements_kind(); if (!transitioned_map.is_null()) { - cached_stub = ElementsTransitionAndStoreStub( - elements_kind, - transitioned_map->elements_kind(), - is_js_array, - store_mode()).GetCode(isolate()); + cached_stub = + ElementsTransitionAndStoreStub(isolate(), elements_kind, + transitioned_map->elements_kind(), + is_js_array, store_mode).GetCode(); } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) { cached_stub = isolate()->builtins()->KeyedStoreIC_Slow(); } else { if (receiver_map->has_fast_elements() || receiver_map->has_external_array_elements() || receiver_map->has_fixed_typed_array_elements()) { - cached_stub = KeyedStoreFastElementStub( - is_js_array, - elements_kind, - store_mode()).GetCode(isolate()); + cached_stub = StoreFastElementStub(isolate(), is_js_array, + elements_kind, store_mode).GetCode(); } else { - cached_stub = KeyedStoreElementStub( - is_js_array, - elements_kind, - store_mode()).GetCode(isolate()); + cached_stub = StoreElementStub(isolate(), is_js_array, elements_kind, + store_mode).GetCode(); } } - ASSERT(!cached_stub.is_null()); + DCHECK(!cached_stub.is_null()); handlers.Add(cached_stub); transitioned_maps.Add(transitioned_map); } - Handle<Code> code = - CompileStorePolymorphic(receiver_maps, &handlers, &transitioned_maps); + + Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers, + &transitioned_maps); isolate()->counters()->keyed_store_polymorphic_stubs()->Increment(); - PROFILE(isolate(), - CodeCreateEvent(Logger::KEYED_STORE_POLYMORPHIC_IC_TAG, *code, 0)); + PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, 0)); return code; } -void KeyedStoreStubCompiler::GenerateStoreDictionaryElement( +void ElementHandlerCompiler::GenerateStoreDictionaryElement( MacroAssembler* masm) { KeyedStoreIC::GenerateSlow(masm); } @@ -1403,7 +1194,7 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType( Handle<Map> object_map, HolderLookup* holder_lookup) const { - ASSERT(is_simple_api_call()); + DCHECK(is_simple_api_call()); if (!object_map->IsJSObjectMap()) { *holder_lookup = kHolderNotFound; return Handle<JSObject>::null(); @@ -1430,7 +1221,7 @@ bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver, Handle<JSObject> holder) const { - ASSERT(is_simple_api_call()); + DCHECK(is_simple_api_call()); if (!receiver->IsJSObject()) return false; Handle<Map> map(JSObject::cast(*receiver)->map()); HolderLookup holder_lookup; diff -Nru nodejs-0.11.13/deps/v8/src/stub-cache.h nodejs-0.11.15/deps/v8/src/stub-cache.h --- nodejs-0.11.13/deps/v8/src/stub-cache.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/stub-cache.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,52 +1,26 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_STUB_CACHE_H_ #define V8_STUB_CACHE_H_ -#include "allocation.h" -#include "arguments.h" -#include "code-stubs.h" -#include "ic-inl.h" -#include "macro-assembler.h" -#include "objects.h" -#include "zone-inl.h" +#include "src/allocation.h" +#include "src/arguments.h" +#include "src/code-stubs.h" +#include "src/ic-inl.h" +#include "src/macro-assembler.h" +#include "src/objects.h" +#include "src/zone-inl.h" namespace v8 { namespace internal { -// The stub cache is used for megamorphic calls and property accesses. -// It maps (map, name, type)->Code* - -// The design of the table uses the inline cache stubs used for -// mono-morphic calls. The beauty of this, we do not have to -// invalidate the cache whenever a prototype map is changed. The stub -// validates the map chain as in the mono-morphic case. +// The stub cache is used for megamorphic property accesses. +// It maps (map, name, type) to property access handlers. The cache does not +// need explicit invalidation when a prototype chain is modified, since the +// handlers verify the chain. class CallOptimization; @@ -76,77 +50,17 @@ }; void Initialize(); - - Handle<JSObject> StubHolder(Handle<JSObject> receiver, - Handle<JSObject> holder); - - Handle<Code> FindIC(Handle<Name> name, - Handle<Map> stub_holder_map, - Code::Kind kind, - ExtraICState extra_state = kNoExtraICState, - InlineCacheHolderFlag cache_holder = OWN_MAP); - - Handle<Code> FindHandler(Handle<Name> name, - Handle<Map> map, - Code::Kind kind, - InlineCacheHolderFlag cache_holder, - Code::StubType type); - - Handle<Code> ComputeMonomorphicIC(Code::Kind kind, - Handle<Name> name, - Handle<HeapType> type, - Handle<Code> handler, - ExtraICState extra_ic_state); - - Handle<Code> ComputeLoadNonexistent(Handle<Name> name, Handle<HeapType> type); - - Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map); - - Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map, - StrictMode strict_mode, - KeyedAccessStoreMode store_mode); - - // --- - - Handle<Code> ComputeLoad(InlineCacheState ic_state, ExtraICState extra_state); - Handle<Code> ComputeStore(InlineCacheState ic_state, - ExtraICState extra_state); - - // --- - - Handle<Code> ComputeCompareNil(Handle<Map> receiver_map, - CompareNilICStub& stub); - - // --- - - Handle<Code> ComputeLoadElementPolymorphic(MapHandleList* receiver_maps); - Handle<Code> ComputeStoreElementPolymorphic(MapHandleList* receiver_maps, - KeyedAccessStoreMode store_mode, - StrictMode strict_mode); - - Handle<Code> ComputePolymorphicIC(Code::Kind kind, - TypeHandleList* types, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name, - ExtraICState extra_ic_state); - - // Finds the Code object stored in the Heap::non_monomorphic_cache(). - Code* FindPreMonomorphicIC(Code::Kind kind, ExtraICState extra_ic_state); - - // Update cache for entry hash(name, map). + // Access cache for entry hash(name, map). Code* Set(Name* name, Map* map, Code* code); - + Code* Get(Name* name, Map* map, Code::Flags flags); // Clear the lookup table (@ mark compact collection). void Clear(); - // Collect all maps that match the name and flags. void CollectMatchingMaps(SmallMapList* types, Handle<Name> name, Code::Flags flags, Handle<Context> native_context, Zone* zone); - // Generate code for probing the stub cache table. // Arguments extra, extra2 and extra3 may be used to pass additional scratch // registers. Set to no_reg if not needed. @@ -164,25 +78,21 @@ kSecondary }; - SCTableReference key_reference(StubCache::Table table) { return SCTableReference( reinterpret_cast<Address>(&first_entry(table)->key)); } - SCTableReference map_reference(StubCache::Table table) { return SCTableReference( reinterpret_cast<Address>(&first_entry(table)->map)); } - SCTableReference value_reference(StubCache::Table table) { return SCTableReference( reinterpret_cast<Address>(&first_entry(table)->value)); } - StubCache::Entry* first_entry(StubCache::Table table) { switch (table) { case StubCache::kPrimary: return StubCache::primary_; @@ -193,18 +103,11 @@ } Isolate* isolate() { return isolate_; } - Heap* heap() { return isolate()->heap(); } - Factory* factory() { return isolate()->factory(); } - // These constants describe the structure of the interceptor arguments on the - // stack. The arguments are pushed by the (platform-specific) - // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and - // LoadWithInterceptor. - static const int kInterceptorArgsNameIndex = 0; - static const int kInterceptorArgsInfoIndex = 1; - static const int kInterceptorArgsThisIndex = 2; - static const int kInterceptorArgsHolderIndex = 3; - static const int kInterceptorArgsLength = 4; + // Setting the entry size such that the index is shifted by Name::kHashShift + // is convenient; shifting down the length field (to extract the hash code) + // automatically discards the hash bit field. + static const int kCacheIndexShift = Name::kHashShift; private: explicit StubCache(Isolate* isolate); @@ -218,15 +121,11 @@ // Hash algorithm for the primary table. This algorithm is replicated in // assembler for every architecture. Returns an index into the table that - // is scaled by 1 << kHeapObjectTagSize. + // is scaled by 1 << kCacheIndexShift. static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) { - // This works well because the heap object tag size and the hash - // shift are equal. Shifting down the length field to get the - // hash code would effectively throw away two bits of the hash - // code. - STATIC_ASSERT(kHeapObjectTagSize == Name::kHashShift); + STATIC_ASSERT(kCacheIndexShift == Name::kHashShift); // Compute the hash of the name (use entire hash field). - ASSERT(name->HasHashCode()); + DCHECK(name->HasHashCode()); uint32_t field = name->hash_field(); // Using only the low bits in 64-bit mode is unlikely to increase the // risk of collision even if the heap is spread over an area larger than @@ -239,12 +138,12 @@ (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); // Base the offset on a simple combination of name, flags, and map. uint32_t key = (map_low32bits + field) ^ iflags; - return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize); + return key & ((kPrimaryTableSize - 1) << kCacheIndexShift); } // Hash algorithm for the secondary table. This algorithm is replicated in // assembler for every architecture. Returns an index into the table that - // is scaled by 1 << kHeapObjectTagSize. + // is scaled by 1 << kCacheIndexShift. static int SecondaryOffset(Name* name, Code::Flags flags, int seed) { // Use the seed from the primary cache in the secondary cache. uint32_t name_low32bits = @@ -254,7 +153,7 @@ uint32_t iflags = (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); uint32_t key = (seed - name_low32bits) + iflags; - return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize); + return key & ((kSecondaryTableSize - 1) << kCacheIndexShift); } // Compute the entry for a given offset in exactly the same way as @@ -288,42 +187,219 @@ // Support functions for IC stubs for callbacks. -DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty); +DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty); // Support functions for IC stubs for interceptors. -DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty); -DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor); +DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly); +DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor); +DECLARE_RUNTIME_FUNCTION(LoadElementWithInterceptor); +DECLARE_RUNTIME_FUNCTION(StorePropertyWithInterceptor); enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER }; enum IcCheckType { ELEMENT, PROPERTY }; -// The stub compilers compile stubs for the stub cache. -class StubCompiler BASE_EMBEDDED { +class PropertyAccessCompiler BASE_EMBEDDED { + public: + static Builtins::Name MissBuiltin(Code::Kind kind) { + switch (kind) { + case Code::LOAD_IC: + return Builtins::kLoadIC_Miss; + case Code::STORE_IC: + return Builtins::kStoreIC_Miss; + case Code::KEYED_LOAD_IC: + return Builtins::kKeyedLoadIC_Miss; + case Code::KEYED_STORE_IC: + return Builtins::kKeyedStoreIC_Miss; + default: + UNREACHABLE(); + } + return Builtins::kLoadIC_Miss; + } + + static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name); + + protected: + PropertyAccessCompiler(Isolate* isolate, Code::Kind kind, + CacheHolderFlag cache_holder) + : registers_(GetCallingConvention(kind)), + kind_(kind), + cache_holder_(cache_holder), + isolate_(isolate), + masm_(isolate, NULL, 256) {} + + Code::Kind kind() const { return kind_; } + CacheHolderFlag cache_holder() const { return cache_holder_; } + MacroAssembler* masm() { return &masm_; } + Isolate* isolate() const { return isolate_; } + Heap* heap() const { return isolate()->heap(); } + Factory* factory() const { return isolate()->factory(); } + + Register receiver() const { return registers_[0]; } + Register name() const { return registers_[1]; } + Register scratch1() const { return registers_[2]; } + Register scratch2() const { return registers_[3]; } + Register scratch3() const { return registers_[4]; } + + // Calling convention between indexed store IC and handler. + Register transition_map() const { return scratch1(); } + + static Register* GetCallingConvention(Code::Kind); + static Register* load_calling_convention(); + static Register* store_calling_convention(); + static Register* keyed_store_calling_convention(); + + Register* registers_; + + static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code); + + Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name); + Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name); + + private: + Code::Kind kind_; + CacheHolderFlag cache_holder_; + + Isolate* isolate_; + MacroAssembler masm_; +}; + + +class PropertyICCompiler : public PropertyAccessCompiler { public: - explicit StubCompiler(Isolate* isolate, - ExtraICState extra_ic_state = kNoExtraICState) - : isolate_(isolate), extra_ic_state_(extra_ic_state), - masm_(isolate, NULL, 256), failure_(NULL) { } + // Finds the Code object stored in the Heap::non_monomorphic_cache(). + static Code* FindPreMonomorphic(Isolate* isolate, Code::Kind kind, + ExtraICState extra_ic_state); + + // Named + static Handle<Code> ComputeLoad(Isolate* isolate, InlineCacheState ic_state, + ExtraICState extra_state); + static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state, + ExtraICState extra_state); + + static Handle<Code> ComputeMonomorphic(Code::Kind kind, Handle<Name> name, + Handle<HeapType> type, + Handle<Code> handler, + ExtraICState extra_ic_state); + static Handle<Code> ComputePolymorphic(Code::Kind kind, TypeHandleList* types, + CodeHandleList* handlers, + int number_of_valid_maps, + Handle<Name> name, + ExtraICState extra_ic_state); + + // Keyed + static Handle<Code> ComputeKeyedLoadMonomorphic(Handle<Map> receiver_map); + + static Handle<Code> ComputeKeyedStoreMonomorphic( + Handle<Map> receiver_map, StrictMode strict_mode, + KeyedAccessStoreMode store_mode); + static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps); + static Handle<Code> ComputeKeyedStorePolymorphic( + MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode, + StrictMode strict_mode); + + // Compare nil + static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map, + CompareNilICStub* stub); + + + private: + PropertyICCompiler(Isolate* isolate, Code::Kind kind, + ExtraICState extra_ic_state = kNoExtraICState, + CacheHolderFlag cache_holder = kCacheOnReceiver) + : PropertyAccessCompiler(isolate, kind, cache_holder), + extra_ic_state_(extra_ic_state) {} + + static Handle<Code> Find(Handle<Name> name, Handle<Map> stub_holder_map, + Code::Kind kind, + ExtraICState extra_ic_state = kNoExtraICState, + CacheHolderFlag cache_holder = kCacheOnReceiver); Handle<Code> CompileLoadInitialize(Code::Flags flags); Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags); Handle<Code> CompileLoadMegamorphic(Code::Flags flags); - Handle<Code> CompileStoreInitialize(Code::Flags flags); Handle<Code> CompileStorePreMonomorphic(Code::Flags flags); Handle<Code> CompileStoreGeneric(Code::Flags flags); Handle<Code> CompileStoreMegamorphic(Code::Flags flags); - // Static functions for generating parts of stubs. - static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, - int index, - Register prototype); + Handle<Code> CompileMonomorphic(Handle<HeapType> type, Handle<Code> handler, + Handle<Name> name, IcCheckType check); + Handle<Code> CompilePolymorphic(TypeHandleList* types, + CodeHandleList* handlers, Handle<Name> name, + Code::StubType type, IcCheckType check); + + Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map, + KeyedAccessStoreMode store_mode); + Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps, + KeyedAccessStoreMode store_mode); + Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps, + CodeHandleList* handler_stubs, + MapHandleList* transitioned_maps); + + bool IncludesNumberType(TypeHandleList* types); + + Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name, + InlineCacheState state = MONOMORPHIC); + + Logger::LogEventsAndTags log_kind(Handle<Code> code) { + if (kind() == Code::LOAD_IC) { + return code->ic_state() == MONOMORPHIC ? Logger::LOAD_IC_TAG + : Logger::LOAD_POLYMORPHIC_IC_TAG; + } else if (kind() == Code::KEYED_LOAD_IC) { + return code->ic_state() == MONOMORPHIC + ? Logger::KEYED_LOAD_IC_TAG + : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG; + } else if (kind() == Code::STORE_IC) { + return code->ic_state() == MONOMORPHIC ? Logger::STORE_IC_TAG + : Logger::STORE_POLYMORPHIC_IC_TAG; + } else { + DCHECK_EQ(Code::KEYED_STORE_IC, kind()); + return code->ic_state() == MONOMORPHIC + ? Logger::KEYED_STORE_IC_TAG + : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG; + } + } + + const ExtraICState extra_ic_state_; +}; + + +class PropertyHandlerCompiler : public PropertyAccessCompiler { + public: + static Handle<Code> Find(Handle<Name> name, Handle<Map> map, Code::Kind kind, + CacheHolderFlag cache_holder, Code::StubType type); + + protected: + PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind, + Handle<HeapType> type, Handle<JSObject> holder, + CacheHolderFlag cache_holder) + : PropertyAccessCompiler(isolate, kind, cache_holder), + type_(type), + holder_(holder) {} + + virtual ~PropertyHandlerCompiler() {} + + virtual Register FrontendHeader(Register object_reg, Handle<Name> name, + Label* miss) { + UNREACHABLE(); + return receiver(); + } + + virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); } + + Register Frontend(Register object_reg, Handle<Name> name); + void NonexistentFrontendHeader(Handle<Name> name, Label* miss, + Register scratch1, Register scratch2); + + // TODO(verwaest): Make non-static. + static void GenerateFastApiCall(MacroAssembler* masm, + const CallOptimization& optimization, + Handle<Map> receiver_map, Register receiver, + Register scratch, bool is_store, int argc, + Register* values); // Helper function used to check that the dictionary doesn't contain // the property. This function may return false negatives, so miss_label @@ -337,35 +413,6 @@ Register r0, Register r1); - // Generates prototype loading code that uses the objects from the - // context we were in when this function was called. If the context - // has changed, a jump to miss is performed. This ties the generated - // code to a particular context and so must not be used in cases - // where the generated code is not allowed to have references to - // objects from a context. - static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm, - int index, - Register prototype, - Label* miss); - - static void GenerateFastPropertyLoad(MacroAssembler* masm, - Register dst, - Register src, - bool inobject, - int index, - Representation representation); - - static void GenerateLoadArrayLength(MacroAssembler* masm, - Register receiver, - Register scratch, - Label* miss_label); - - static void GenerateLoadFunctionPrototype(MacroAssembler* masm, - Register receiver, - Register scratch1, - Register scratch2, - Label* miss_label); - // Generate code to check that a global property cell is empty. Create // the property cell at compilation time if no cell exists for the // property. @@ -375,8 +422,6 @@ Register scratch, Label* miss); - static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name); - // Generates code that verifies that the property holder has not changed // (checking maps of objects in the prototype chain for fast and global // objects or doing negative lookup for slow objects, ensures that the @@ -389,359 +434,163 @@ // register is only clobbered if it the same as the holder register. The // function returns a register containing the holder - either object_reg or // holder_reg. - Register CheckPrototypes(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Register holder_reg, - Register scratch1, - Register scratch2, - Handle<Name> name, - Label* miss, + Register CheckPrototypes(Register object_reg, Register holder_reg, + Register scratch1, Register scratch2, + Handle<Name> name, Label* miss, PrototypeCheckType check = CHECK_ALL_MAPS); - void GenerateBooleanCheck(Register object, Label* miss); - - static void GenerateFastApiCall(MacroAssembler* masm, - const CallOptimization& optimization, - Handle<Map> receiver_map, - Register receiver, - Register scratch, - bool is_store, - int argc, - Register* values); - - protected: - Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name); - Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name); - - ExtraICState extra_state() { return extra_ic_state_; } - - MacroAssembler* masm() { return &masm_; } - void set_failure(Failure* failure) { failure_ = failure; } - - static void LookupPostInterceptor(Handle<JSObject> holder, - Handle<Name> name, - LookupResult* lookup); - - Isolate* isolate() { return isolate_; } - Heap* heap() { return isolate()->heap(); } - Factory* factory() { return isolate()->factory(); } - - static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code); + Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name); + void set_type_for_object(Handle<Object> object) { + type_ = IC::CurrentTypeOf(object, isolate()); + } + void set_holder(Handle<JSObject> holder) { holder_ = holder; } + Handle<HeapType> type() const { return type_; } + Handle<JSObject> holder() const { return holder_; } private: - Isolate* isolate_; - const ExtraICState extra_ic_state_; - MacroAssembler masm_; - Failure* failure_; + Handle<HeapType> type_; + Handle<JSObject> holder_; }; -enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS }; - - -class BaseLoadStoreStubCompiler: public StubCompiler { +class NamedLoadHandlerCompiler : public PropertyHandlerCompiler { public: - BaseLoadStoreStubCompiler(Isolate* isolate, - Code::Kind kind, - ExtraICState extra_ic_state = kNoExtraICState, - InlineCacheHolderFlag cache_holder = OWN_MAP) - : StubCompiler(isolate, extra_ic_state), - kind_(kind), - cache_holder_(cache_holder) { - InitializeRegisters(); - } - virtual ~BaseLoadStoreStubCompiler() { } - - Handle<Code> CompileMonomorphicIC(Handle<HeapType> type, - Handle<Code> handler, - Handle<Name> name); - - Handle<Code> CompilePolymorphicIC(TypeHandleList* types, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check); - - static Builtins::Name MissBuiltin(Code::Kind kind) { - switch (kind) { - case Code::LOAD_IC: return Builtins::kLoadIC_Miss; - case Code::STORE_IC: return Builtins::kStoreIC_Miss; - case Code::KEYED_LOAD_IC: return Builtins::kKeyedLoadIC_Miss; - case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Miss; - default: UNREACHABLE(); - } - return Builtins::kLoadIC_Miss; - } - - protected: - virtual Register HandlerFrontendHeader(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Label* miss) = 0; - - virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss) = 0; - - Register HandlerFrontend(Handle<HeapType> type, - Register object_reg, + NamedLoadHandlerCompiler(Isolate* isolate, Handle<HeapType> type, Handle<JSObject> holder, - Handle<Name> name); + CacheHolderFlag cache_holder) + : PropertyHandlerCompiler(isolate, Code::LOAD_IC, type, holder, + cache_holder) {} - Handle<Code> GetCode(Code::Kind kind, - Code::StubType type, - Handle<Name> name); - - Handle<Code> GetICCode(Code::Kind kind, - Code::StubType type, - Handle<Name> name, - InlineCacheState state = MONOMORPHIC); - Code::Kind kind() { return kind_; } + virtual ~NamedLoadHandlerCompiler() {} - Logger::LogEventsAndTags log_kind(Handle<Code> code) { - if (!code->is_inline_cache_stub()) return Logger::STUB_TAG; - if (kind_ == Code::LOAD_IC) { - return code->ic_state() == MONOMORPHIC - ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG; - } else if (kind_ == Code::KEYED_LOAD_IC) { - return code->ic_state() == MONOMORPHIC - ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG; - } else if (kind_ == Code::STORE_IC) { - return code->ic_state() == MONOMORPHIC - ? Logger::STORE_IC_TAG : Logger::STORE_POLYMORPHIC_IC_TAG; - } else { - return code->ic_state() == MONOMORPHIC - ? Logger::KEYED_STORE_IC_TAG : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG; - } - } - void JitEvent(Handle<Name> name, Handle<Code> code); - - Register receiver() { return registers_[0]; } - Register name() { return registers_[1]; } - Register scratch1() { return registers_[2]; } - Register scratch2() { return registers_[3]; } - Register scratch3() { return registers_[4]; } - - void InitializeRegisters(); - - bool IncludesNumberType(TypeHandleList* types); - - Code::Kind kind_; - InlineCacheHolderFlag cache_holder_; - Register* registers_; -}; + Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index); - -class LoadStubCompiler: public BaseLoadStoreStubCompiler { - public: - LoadStubCompiler(Isolate* isolate, - ExtraICState extra_ic_state = kNoExtraICState, - InlineCacheHolderFlag cache_holder = OWN_MAP, - Code::Kind kind = Code::LOAD_IC) - : BaseLoadStoreStubCompiler(isolate, kind, extra_ic_state, - cache_holder) { } - virtual ~LoadStubCompiler() { } - - Handle<Code> CompileLoadField(Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name, - PropertyIndex index, - Representation representation); - - Handle<Code> CompileLoadCallback(Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name, + Handle<Code> CompileLoadCallback(Handle<Name> name, Handle<ExecutableAccessorInfo> callback); - Handle<Code> CompileLoadCallback(Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name, + Handle<Code> CompileLoadCallback(Handle<Name> name, const CallOptimization& call_optimization); - Handle<Code> CompileLoadConstant(Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name, - Handle<Object> value); + Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index); - Handle<Code> CompileLoadInterceptor(Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name); + Handle<Code> CompileLoadInterceptor(Handle<Name> name); - Handle<Code> CompileLoadViaGetter(Handle<HeapType> type, - Handle<JSObject> holder, - Handle<Name> name, + Handle<Code> CompileLoadViaGetter(Handle<Name> name, Handle<JSFunction> getter); - static void GenerateLoadViaGetter(MacroAssembler* masm, - Handle<HeapType> type, + Handle<Code> CompileLoadGlobal(Handle<PropertyCell> cell, Handle<Name> name, + bool is_configurable); + + // Static interface + static Handle<Code> ComputeLoadNonexistent(Handle<Name> name, + Handle<HeapType> type); + + static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<HeapType> type, Register receiver, Handle<JSFunction> getter); static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) { - GenerateLoadViaGetter( - masm, Handle<HeapType>::null(), no_reg, Handle<JSFunction>()); + GenerateLoadViaGetter(masm, Handle<HeapType>::null(), no_reg, + Handle<JSFunction>()); } - Handle<Code> CompileLoadNonexistent(Handle<HeapType> type, - Handle<JSObject> last, - Handle<Name> name); + static void GenerateLoadFunctionPrototype(MacroAssembler* masm, + Register receiver, + Register scratch1, + Register scratch2, + Label* miss_label); - Handle<Code> CompileLoadGlobal(Handle<HeapType> type, - Handle<GlobalObject> holder, - Handle<PropertyCell> cell, - Handle<Name> name, - bool is_dont_delete); + // These constants describe the structure of the interceptor arguments on the + // stack. The arguments are pushed by the (platform-specific) + // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and + // LoadWithInterceptor. + static const int kInterceptorArgsNameIndex = 0; + static const int kInterceptorArgsInfoIndex = 1; + static const int kInterceptorArgsThisIndex = 2; + static const int kInterceptorArgsHolderIndex = 3; + static const int kInterceptorArgsLength = 4; protected: - ContextualMode contextual_mode() { - return LoadIC::GetContextualMode(extra_state()); - } - - virtual Register HandlerFrontendHeader(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Label* miss); + virtual Register FrontendHeader(Register object_reg, Handle<Name> name, + Label* miss); - virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss); + virtual void FrontendFooter(Handle<Name> name, Label* miss); - Register CallbackHandlerFrontend(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Handle<Object> callback); - void NonexistentHandlerFrontend(Handle<HeapType> type, - Handle<JSObject> last, - Handle<Name> name); - - void GenerateLoadField(Register reg, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation); + private: + Handle<Code> CompileLoadNonexistent(Handle<Name> name); void GenerateLoadConstant(Handle<Object> value); void GenerateLoadCallback(Register reg, Handle<ExecutableAccessorInfo> callback); void GenerateLoadCallback(const CallOptimization& call_optimization, Handle<Map> receiver_map); void GenerateLoadInterceptor(Register holder_reg, - Handle<Object> object, - Handle<JSObject> holder, LookupResult* lookup, Handle<Name> name); void GenerateLoadPostInterceptor(Register reg, - Handle<JSObject> interceptor_holder, Handle<Name> name, LookupResult* lookup); - private: - static Register* registers(); - Register scratch4() { return registers_[5]; } - friend class BaseLoadStoreStubCompiler; -}; - - -class KeyedLoadStubCompiler: public LoadStubCompiler { - public: - KeyedLoadStubCompiler(Isolate* isolate, - ExtraICState extra_ic_state = kNoExtraICState, - InlineCacheHolderFlag cache_holder = OWN_MAP) - : LoadStubCompiler(isolate, extra_ic_state, cache_holder, - Code::KEYED_LOAD_IC) { } - - Handle<Code> CompileLoadElement(Handle<Map> receiver_map); - - void CompileElementHandlers(MapHandleList* receiver_maps, - CodeHandleList* handlers); + // Generates prototype loading code that uses the objects from the + // context we were in when this function was called. If the context + // has changed, a jump to miss is performed. This ties the generated + // code to a particular context and so must not be used in cases + // where the generated code is not allowed to have references to + // objects from a context. + static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm, + int index, + Register prototype, + Label* miss); - static void GenerateLoadDictionaryElement(MacroAssembler* masm); - private: - static Register* registers(); - friend class BaseLoadStoreStubCompiler; + Register scratch4() { return registers_[5]; } }; -class StoreStubCompiler: public BaseLoadStoreStubCompiler { +class NamedStoreHandlerCompiler : public PropertyHandlerCompiler { public: - StoreStubCompiler(Isolate* isolate, - ExtraICState extra_ic_state, - Code::Kind kind = Code::STORE_IC) - : BaseLoadStoreStubCompiler(isolate, kind, extra_ic_state) {} - - virtual ~StoreStubCompiler() { } - - Handle<Code> CompileStoreTransition(Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name); - - Handle<Code> CompileStoreField(Handle<JSObject> object, - LookupResult* lookup, - Handle<Name> name); - - Handle<Code> CompileStoreArrayLength(Handle<JSObject> object, - LookupResult* lookup, - Handle<Name> name); - - void GenerateStoreArrayLength(); - - void GenerateNegativeHolderLookup(MacroAssembler* masm, - Handle<JSObject> holder, - Register holder_reg, - Handle<Name> name, - Label* miss); + explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<HeapType> type, + Handle<JSObject> holder) + : PropertyHandlerCompiler(isolate, Code::STORE_IC, type, holder, + kCacheOnReceiver) {} - void GenerateStoreTransition(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Label* miss_label, - Label* slow); - - void GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label); + virtual ~NamedStoreHandlerCompiler() {} - Handle<Code> CompileStoreCallback(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, + Handle<Code> CompileStoreTransition(Handle<Map> transition, + Handle<Name> name); + Handle<Code> CompileStoreField(LookupResult* lookup, Handle<Name> name); + Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name, Handle<ExecutableAccessorInfo> callback); - - Handle<Code> CompileStoreCallback(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, + Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name, const CallOptimization& call_optimization); + Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name, + Handle<JSFunction> setter); + Handle<Code> CompileStoreInterceptor(Handle<Name> name); static void GenerateStoreViaSetter(MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, + Handle<HeapType> type, Register receiver, Handle<JSFunction> setter); static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) { - GenerateStoreViaSetter( - masm, Handle<HeapType>::null(), no_reg, Handle<JSFunction>()); + GenerateStoreViaSetter(masm, Handle<HeapType>::null(), no_reg, + Handle<JSFunction>()); } - Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, - Handle<JSFunction> setter); + protected: + virtual Register FrontendHeader(Register object_reg, Handle<Name> name, + Label* miss); + + virtual void FrontendFooter(Handle<Name> name, Label* miss); + void GenerateRestoreName(Label* label, Handle<Name> name); + + private: + void GenerateStoreTransition(Handle<Map> transition, Handle<Name> name, + Register receiver_reg, Register name_reg, + Register value_reg, Register scratch1, + Register scratch2, Register scratch3, + Label* miss_label, Label* slow); - Handle<Code> CompileStoreInterceptor(Handle<JSObject> object, - Handle<Name> name); + void GenerateStoreField(LookupResult* lookup, Register value_reg, + Label* miss_label); static Builtins::Name SlowBuiltin(Code::Kind kind) { switch (kind) { @@ -752,51 +601,24 @@ return Builtins::kStoreIC_Slow; } - protected: - virtual Register HandlerFrontendHeader(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Label* miss); - - virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss); - void GenerateRestoreName(MacroAssembler* masm, - Label* label, - Handle<Name> name); - - private: - static Register* registers(); static Register value(); - friend class BaseLoadStoreStubCompiler; }; -class KeyedStoreStubCompiler: public StoreStubCompiler { +class ElementHandlerCompiler : public PropertyHandlerCompiler { public: - KeyedStoreStubCompiler(Isolate* isolate, - ExtraICState extra_ic_state) - : StoreStubCompiler(isolate, extra_ic_state, Code::KEYED_STORE_IC) {} + explicit ElementHandlerCompiler(Isolate* isolate) + : PropertyHandlerCompiler(isolate, Code::KEYED_LOAD_IC, + Handle<HeapType>::null(), + Handle<JSObject>::null(), kCacheOnReceiver) {} - Handle<Code> CompileStoreElement(Handle<Map> receiver_map); + virtual ~ElementHandlerCompiler() {} - Handle<Code> CompileStorePolymorphic(MapHandleList* receiver_maps, - CodeHandleList* handler_stubs, - MapHandleList* transitioned_maps); - - Handle<Code> CompileStoreElementPolymorphic(MapHandleList* receiver_maps); + void CompileElementHandlers(MapHandleList* receiver_maps, + CodeHandleList* handlers); + static void GenerateLoadDictionaryElement(MacroAssembler* masm); static void GenerateStoreDictionaryElement(MacroAssembler* masm); - - private: - static Register* registers(); - - KeyedAccessStoreMode store_mode() { - return KeyedStoreIC::GetKeyedAccessStoreMode(extra_state()); - } - - Register transition_map() { return scratch1(); } - - friend class BaseLoadStoreStubCompiler; }; @@ -812,7 +634,7 @@ } Handle<JSFunction> constant_function() const { - ASSERT(is_constant_call()); + DCHECK(is_constant_call()); return constant_function_; } @@ -821,12 +643,12 @@ } Handle<FunctionTemplateInfo> expected_receiver_type() const { - ASSERT(is_simple_api_call()); + DCHECK(is_simple_api_call()); return expected_receiver_type_; } Handle<CallHandlerInfo> api_call_info() const { - ASSERT(is_simple_api_call()); + DCHECK(is_simple_api_call()); return api_call_info_; } diff -Nru nodejs-0.11.13/deps/v8/src/sweeper-thread.cc nodejs-0.11.15/deps/v8/src/sweeper-thread.cc --- nodejs-0.11.13/deps/v8/src/sweeper-thread.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/sweeper-thread.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "sweeper-thread.h" - -#include "v8.h" - -#include "isolate.h" -#include "v8threads.h" - -namespace v8 { -namespace internal { - -static const int kSweeperThreadStackSize = 64 * KB; - -SweeperThread::SweeperThread(Isolate* isolate) - : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)), - isolate_(isolate), - heap_(isolate->heap()), - collector_(heap_->mark_compact_collector()), - start_sweeping_semaphore_(0), - end_sweeping_semaphore_(0), - stop_semaphore_(0) { - ASSERT(!FLAG_job_based_sweeping); - NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false)); -} - - -void SweeperThread::Run() { - Isolate::SetIsolateThreadLocals(isolate_, NULL); - DisallowHeapAllocation no_allocation; - DisallowHandleAllocation no_handles; - DisallowHandleDereference no_deref; - - while (true) { - start_sweeping_semaphore_.Wait(); - - if (Acquire_Load(&stop_thread_)) { - stop_semaphore_.Signal(); - return; - } - - collector_->SweepInParallel(heap_->old_data_space()); - collector_->SweepInParallel(heap_->old_pointer_space()); - end_sweeping_semaphore_.Signal(); - } -} - - -void SweeperThread::Stop() { - Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); - start_sweeping_semaphore_.Signal(); - stop_semaphore_.Wait(); - Join(); -} - - -void SweeperThread::StartSweeping() { - start_sweeping_semaphore_.Signal(); -} - - -void SweeperThread::WaitForSweeperThread() { - end_sweeping_semaphore_.Wait(); -} - - -int SweeperThread::NumberOfThreads(int max_available) { - if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) return 0; - if (FLAG_sweeper_threads > 0) return FLAG_sweeper_threads; - if (FLAG_concurrent_sweeping) return max_available - 1; - ASSERT(FLAG_parallel_sweeping); - return max_available; -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/sweeper-thread.h nodejs-0.11.15/deps/v8/src/sweeper-thread.h --- nodejs-0.11.13/deps/v8/src/sweeper-thread.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/sweeper-thread.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_SWEEPER_THREAD_H_ -#define V8_SWEEPER_THREAD_H_ - -#include "atomicops.h" -#include "flags.h" -#include "platform.h" -#include "v8utils.h" - -#include "spaces.h" - -#include "heap.h" - -namespace v8 { -namespace internal { - -class SweeperThread : public Thread { - public: - explicit SweeperThread(Isolate* isolate); - ~SweeperThread() {} - - void Run(); - void Stop(); - void StartSweeping(); - void WaitForSweeperThread(); - - static int NumberOfThreads(int max_available); - - private: - Isolate* isolate_; - Heap* heap_; - MarkCompactCollector* collector_; - Semaphore start_sweeping_semaphore_; - Semaphore end_sweeping_semaphore_; - Semaphore stop_semaphore_; - volatile AtomicWord stop_thread_; -}; - -} } // namespace v8::internal - -#endif // V8_SWEEPER_THREAD_H_ diff -Nru nodejs-0.11.13/deps/v8/src/symbol.js nodejs-0.11.15/deps/v8/src/symbol.js --- nodejs-0.11.13/deps/v8/src/symbol.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/symbol.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. "use strict"; @@ -105,7 +82,6 @@ //------------------------------------------------------------------- -var symbolCreate = InternalSymbol("Symbol.create"); var symbolHasInstance = InternalSymbol("Symbol.hasInstance"); var symbolIsConcatSpreadable = InternalSymbol("Symbol.isConcatSpreadable"); var symbolIsRegExp = InternalSymbol("Symbol.isRegExp"); @@ -123,12 +99,12 @@ %FunctionSetPrototype($Symbol, new $Object()); InstallConstants($Symbol, $Array( - "create", symbolCreate, - "hasInstance", symbolHasInstance, - "isConcatSpreadable", symbolIsConcatSpreadable, - "isRegExp", symbolIsRegExp, + // TODO(rossberg): expose when implemented. + // "hasInstance", symbolHasInstance, + // "isConcatSpreadable", symbolIsConcatSpreadable, + // "isRegExp", symbolIsRegExp, "iterator", symbolIterator, - "toStringTag", symbolToStringTag, + // "toStringTag", symbolToStringTag, "unscopables", symbolUnscopables )); InstallFunctions($Symbol, DONT_ENUM, $Array( @@ -136,7 +112,7 @@ "keyFor", SymbolKeyFor )); - %SetProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM); + %AddNamedProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM); InstallFunctions($Symbol.prototype, DONT_ENUM, $Array( "toString", SymbolToString, "valueOf", SymbolValueOf diff -Nru nodejs-0.11.13/deps/v8/src/third_party/kernel/tools/perf/util/jitdump.h nodejs-0.11.15/deps/v8/src/third_party/kernel/tools/perf/util/jitdump.h --- nodejs-0.11.13/deps/v8/src/third_party/kernel/tools/perf/util/jitdump.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/third_party/kernel/tools/perf/util/jitdump.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,83 @@ +#ifndef JITDUMP_H +#define JITDUMP_H + +#include <sys/time.h> +#include <time.h> +#include <stdint.h> + +/* JiTD */ +#define JITHEADER_MAGIC 0x4A695444 +#define JITHEADER_MAGIC_SW 0x4454694A + +#define PADDING_8ALIGNED(x) ((((x) + 7) & 7) ^ 7) + +#define JITHEADER_VERSION 1 + +struct jitheader { + uint32_t magic; /* characters "jItD" */ + uint32_t version; /* header version */ + uint32_t total_size; /* total size of header */ + uint32_t elf_mach; /* elf mach target */ + uint32_t pad1; /* reserved */ + uint32_t pid; /* JIT process id */ + uint64_t timestamp; /* timestamp */ +}; + +enum jit_record_type { + JIT_CODE_LOAD = 0, + JIT_CODE_MOVE = 1, + JIT_CODE_DEBUG_INFO = 2, + JIT_CODE_CLOSE = 3, + JIT_CODE_MAX +}; + +/* record prefix (mandatory in each record) */ +struct jr_prefix { + uint32_t id; + uint32_t total_size; + uint64_t timestamp; +}; + +struct jr_code_load { + struct jr_prefix p; + + uint32_t pid; + uint32_t tid; + uint64_t vma; + uint64_t code_addr; + uint64_t code_size; + uint64_t code_index; +}; + +struct jr_code_close { + struct jr_prefix p; +}; + +struct jr_code_move { + struct jr_prefix p; + + uint32_t pid; + uint32_t tid; + uint64_t vma; + uint64_t old_code_addr; + uint64_t new_code_addr; + uint64_t code_size; + uint64_t code_index; +}; + +struct jr_code_debug_info { + struct jr_prefix p; + + uint64_t code_addr; + uint64_t nr_entry; +}; + +union jr_entry { + struct jr_code_debug_info info; + struct jr_code_close close; + struct jr_code_load load; + struct jr_code_move move; + struct jr_prefix prefix; +}; + +#endif /* !JITDUMP_H */ diff -Nru nodejs-0.11.13/deps/v8/src/third_party/vtune/vtune-jit.cc nodejs-0.11.15/deps/v8/src/third_party/vtune/vtune-jit.cc --- nodejs-0.11.13/deps/v8/src/third_party/vtune/vtune-jit.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/third_party/vtune/vtune-jit.cc 2015-01-20 21:22:17.000000000 +0000 @@ -192,11 +192,11 @@ jmethod.method_size = static_cast<unsigned int>(event->code_len); jmethod.method_name = temp_method_name; - Handle<Script> script = event->script; + Handle<UnboundScript> script = event->script; if (*script != NULL) { // Get the source file name and set it to jmethod.source_file_name - if ((*script->GetScriptName())->IsString()) { + if ((*script->GetScriptName())->IsString()) { Handle<String> script_name = script->GetScriptName()->ToString(); temp_file_name = new char[script_name->Utf8Length() + 1]; script_name->WriteUtf8(temp_file_name); @@ -224,7 +224,7 @@ jmethod.line_number_table[index].Offset = static_cast<unsigned int>(Iter->pc_); jmethod.line_number_table[index++].LineNumber = - script->GetLineNumber(Iter->pos_)+1; + script->GetLineNumber(Iter->pos_) + 1; } GetEntries()->erase(event->code_start); } diff -Nru nodejs-0.11.13/deps/v8/src/token.cc nodejs-0.11.15/deps/v8/src/token.cc --- nodejs-0.11.13/deps/v8/src/token.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/token.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,32 +1,9 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "../include/v8stdint.h" -#include "token.h" +#include "include/v8stdint.h" +#include "src/token.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/token.h nodejs-0.11.15/deps/v8/src/token.h --- nodejs-0.11.13/deps/v8/src/token.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/token.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_TOKEN_H_ #define V8_TOKEN_H_ -#include "checks.h" +#include "src/base/logging.h" namespace v8 { namespace internal { @@ -48,138 +25,139 @@ #define IGNORE_TOKEN(name, string, precedence) -#define TOKEN_LIST(T, K) \ - /* End of source indicator. */ \ - T(EOS, "EOS", 0) \ - \ - /* Punctuators (ECMA-262, section 7.7, page 15). */ \ - T(LPAREN, "(", 0) \ - T(RPAREN, ")", 0) \ - T(LBRACK, "[", 0) \ - T(RBRACK, "]", 0) \ - T(LBRACE, "{", 0) \ - T(RBRACE, "}", 0) \ - T(COLON, ":", 0) \ - T(SEMICOLON, ";", 0) \ - T(PERIOD, ".", 0) \ - T(CONDITIONAL, "?", 3) \ - T(INC, "++", 0) \ - T(DEC, "--", 0) \ - \ - /* Assignment operators. */ \ - /* IsAssignmentOp() and Assignment::is_compound() relies on */ \ - /* this block of enum values being contiguous and sorted in the */ \ - /* same order! */ \ - T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \ - T(INIT_LET, "=init_let", 2) /* AST-use only. */ \ - T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \ - T(INIT_CONST_LEGACY, "=init_const_legacy", 2) /* AST-use only. */ \ - T(ASSIGN, "=", 2) \ - T(ASSIGN_BIT_OR, "|=", 2) \ - T(ASSIGN_BIT_XOR, "^=", 2) \ - T(ASSIGN_BIT_AND, "&=", 2) \ - T(ASSIGN_SHL, "<<=", 2) \ - T(ASSIGN_SAR, ">>=", 2) \ - T(ASSIGN_SHR, ">>>=", 2) \ - T(ASSIGN_ADD, "+=", 2) \ - T(ASSIGN_SUB, "-=", 2) \ - T(ASSIGN_MUL, "*=", 2) \ - T(ASSIGN_DIV, "/=", 2) \ - T(ASSIGN_MOD, "%=", 2) \ - \ - /* Binary operators sorted by precedence. */ \ - /* IsBinaryOp() relies on this block of enum values */ \ - /* being contiguous and sorted in the same order! */ \ - T(COMMA, ",", 1) \ - T(OR, "||", 4) \ - T(AND, "&&", 5) \ - T(BIT_OR, "|", 6) \ - T(BIT_XOR, "^", 7) \ - T(BIT_AND, "&", 8) \ - T(SHL, "<<", 11) \ - T(SAR, ">>", 11) \ - T(SHR, ">>>", 11) \ - T(ROR, "rotate right", 11) /* only used by Crankshaft */ \ - T(ADD, "+", 12) \ - T(SUB, "-", 12) \ - T(MUL, "*", 13) \ - T(DIV, "/", 13) \ - T(MOD, "%", 13) \ - \ - /* Compare operators sorted by precedence. */ \ - /* IsCompareOp() relies on this block of enum values */ \ - /* being contiguous and sorted in the same order! */ \ - T(EQ, "==", 9) \ - T(NE, "!=", 9) \ - T(EQ_STRICT, "===", 9) \ - T(NE_STRICT, "!==", 9) \ - T(LT, "<", 10) \ - T(GT, ">", 10) \ - T(LTE, "<=", 10) \ - T(GTE, ">=", 10) \ - K(INSTANCEOF, "instanceof", 10) \ - K(IN, "in", 10) \ - \ - /* Unary operators. */ \ - /* IsUnaryOp() relies on this block of enum values */ \ - /* being contiguous and sorted in the same order! */ \ - T(NOT, "!", 0) \ - T(BIT_NOT, "~", 0) \ - K(DELETE, "delete", 0) \ - K(TYPEOF, "typeof", 0) \ - K(VOID, "void", 0) \ - \ - /* Keywords (ECMA-262, section 7.5.2, page 13). */ \ - K(BREAK, "break", 0) \ - K(CASE, "case", 0) \ - K(CATCH, "catch", 0) \ - K(CONTINUE, "continue", 0) \ - K(DEBUGGER, "debugger", 0) \ - K(DEFAULT, "default", 0) \ - /* DELETE */ \ - K(DO, "do", 0) \ - K(ELSE, "else", 0) \ - K(FINALLY, "finally", 0) \ - K(FOR, "for", 0) \ - K(FUNCTION, "function", 0) \ - K(IF, "if", 0) \ - /* IN */ \ - /* INSTANCEOF */ \ - K(NEW, "new", 0) \ - K(RETURN, "return", 0) \ - K(SWITCH, "switch", 0) \ - K(THIS, "this", 0) \ - K(THROW, "throw", 0) \ - K(TRY, "try", 0) \ - /* TYPEOF */ \ - K(VAR, "var", 0) \ - /* VOID */ \ - K(WHILE, "while", 0) \ - K(WITH, "with", 0) \ - \ - /* Literals (ECMA-262, section 7.8, page 16). */ \ - K(NULL_LITERAL, "null", 0) \ - K(TRUE_LITERAL, "true", 0) \ - K(FALSE_LITERAL, "false", 0) \ - T(NUMBER, NULL, 0) \ - T(STRING, NULL, 0) \ - \ - /* Identifiers (not keywords or future reserved words). */ \ - T(IDENTIFIER, NULL, 0) \ - \ - /* Future reserved words (ECMA-262, section 7.6.1.2). */ \ - T(FUTURE_RESERVED_WORD, NULL, 0) \ - T(FUTURE_STRICT_RESERVED_WORD, NULL, 0) \ - K(CONST, "const", 0) \ - K(EXPORT, "export", 0) \ - K(IMPORT, "import", 0) \ - K(LET, "let", 0) \ - K(YIELD, "yield", 0) \ - \ - /* Illegal token - not able to scan. */ \ - T(ILLEGAL, "ILLEGAL", 0) \ - \ - /* Scanner-internal use only. */ \ +#define TOKEN_LIST(T, K) \ + /* End of source indicator. */ \ + T(EOS, "EOS", 0) \ + \ + /* Punctuators (ECMA-262, section 7.7, page 15). */ \ + T(LPAREN, "(", 0) \ + T(RPAREN, ")", 0) \ + T(LBRACK, "[", 0) \ + T(RBRACK, "]", 0) \ + T(LBRACE, "{", 0) \ + T(RBRACE, "}", 0) \ + T(COLON, ":", 0) \ + T(SEMICOLON, ";", 0) \ + T(PERIOD, ".", 0) \ + T(CONDITIONAL, "?", 3) \ + T(INC, "++", 0) \ + T(DEC, "--", 0) \ + T(ARROW, "=>", 0) \ + \ + /* Assignment operators. */ \ + /* IsAssignmentOp() and Assignment::is_compound() relies on */ \ + /* this block of enum values being contiguous and sorted in the */ \ + /* same order! */ \ + T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \ + T(INIT_LET, "=init_let", 2) /* AST-use only. */ \ + T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \ + T(INIT_CONST_LEGACY, "=init_const_legacy", 2) /* AST-use only. */ \ + T(ASSIGN, "=", 2) \ + T(ASSIGN_BIT_OR, "|=", 2) \ + T(ASSIGN_BIT_XOR, "^=", 2) \ + T(ASSIGN_BIT_AND, "&=", 2) \ + T(ASSIGN_SHL, "<<=", 2) \ + T(ASSIGN_SAR, ">>=", 2) \ + T(ASSIGN_SHR, ">>>=", 2) \ + T(ASSIGN_ADD, "+=", 2) \ + T(ASSIGN_SUB, "-=", 2) \ + T(ASSIGN_MUL, "*=", 2) \ + T(ASSIGN_DIV, "/=", 2) \ + T(ASSIGN_MOD, "%=", 2) \ + \ + /* Binary operators sorted by precedence. */ \ + /* IsBinaryOp() relies on this block of enum values */ \ + /* being contiguous and sorted in the same order! */ \ + T(COMMA, ",", 1) \ + T(OR, "||", 4) \ + T(AND, "&&", 5) \ + T(BIT_OR, "|", 6) \ + T(BIT_XOR, "^", 7) \ + T(BIT_AND, "&", 8) \ + T(SHL, "<<", 11) \ + T(SAR, ">>", 11) \ + T(SHR, ">>>", 11) \ + T(ROR, "rotate right", 11) /* only used by Crankshaft */ \ + T(ADD, "+", 12) \ + T(SUB, "-", 12) \ + T(MUL, "*", 13) \ + T(DIV, "/", 13) \ + T(MOD, "%", 13) \ + \ + /* Compare operators sorted by precedence. */ \ + /* IsCompareOp() relies on this block of enum values */ \ + /* being contiguous and sorted in the same order! */ \ + T(EQ, "==", 9) \ + T(NE, "!=", 9) \ + T(EQ_STRICT, "===", 9) \ + T(NE_STRICT, "!==", 9) \ + T(LT, "<", 10) \ + T(GT, ">", 10) \ + T(LTE, "<=", 10) \ + T(GTE, ">=", 10) \ + K(INSTANCEOF, "instanceof", 10) \ + K(IN, "in", 10) \ + \ + /* Unary operators. */ \ + /* IsUnaryOp() relies on this block of enum values */ \ + /* being contiguous and sorted in the same order! */ \ + T(NOT, "!", 0) \ + T(BIT_NOT, "~", 0) \ + K(DELETE, "delete", 0) \ + K(TYPEOF, "typeof", 0) \ + K(VOID, "void", 0) \ + \ + /* Keywords (ECMA-262, section 7.5.2, page 13). */ \ + K(BREAK, "break", 0) \ + K(CASE, "case", 0) \ + K(CATCH, "catch", 0) \ + K(CONTINUE, "continue", 0) \ + K(DEBUGGER, "debugger", 0) \ + K(DEFAULT, "default", 0) \ + /* DELETE */ \ + K(DO, "do", 0) \ + K(ELSE, "else", 0) \ + K(FINALLY, "finally", 0) \ + K(FOR, "for", 0) \ + K(FUNCTION, "function", 0) \ + K(IF, "if", 0) \ + /* IN */ \ + /* INSTANCEOF */ \ + K(NEW, "new", 0) \ + K(RETURN, "return", 0) \ + K(SWITCH, "switch", 0) \ + K(THIS, "this", 0) \ + K(THROW, "throw", 0) \ + K(TRY, "try", 0) \ + /* TYPEOF */ \ + K(VAR, "var", 0) \ + /* VOID */ \ + K(WHILE, "while", 0) \ + K(WITH, "with", 0) \ + \ + /* Literals (ECMA-262, section 7.8, page 16). */ \ + K(NULL_LITERAL, "null", 0) \ + K(TRUE_LITERAL, "true", 0) \ + K(FALSE_LITERAL, "false", 0) \ + T(NUMBER, NULL, 0) \ + T(STRING, NULL, 0) \ + \ + /* Identifiers (not keywords or future reserved words). */ \ + T(IDENTIFIER, NULL, 0) \ + \ + /* Future reserved words (ECMA-262, section 7.6.1.2). */ \ + T(FUTURE_RESERVED_WORD, NULL, 0) \ + T(FUTURE_STRICT_RESERVED_WORD, NULL, 0) \ + K(CONST, "const", 0) \ + K(EXPORT, "export", 0) \ + K(IMPORT, "import", 0) \ + K(LET, "let", 0) \ + K(YIELD, "yield", 0) \ + \ + /* Illegal token - not able to scan. */ \ + T(ILLEGAL, "ILLEGAL", 0) \ + \ + /* Scanner-internal use only. */ \ T(WHITESPACE, NULL, 0) @@ -196,7 +174,7 @@ // Returns a string corresponding to the C++ token name // (e.g. "LT" for the token LT). static const char* Name(Value tok) { - ASSERT(tok < NUM_TOKENS); // tok is unsigned + DCHECK(tok < NUM_TOKENS); // tok is unsigned return name_[tok]; } @@ -239,7 +217,7 @@ } static Value NegateCompareOp(Value op) { - ASSERT(IsArithmeticCompareOp(op)); + DCHECK(IsArithmeticCompareOp(op)); switch (op) { case EQ: return NE; case NE: return EQ; @@ -256,7 +234,7 @@ } static Value ReverseCompareOp(Value op) { - ASSERT(IsArithmeticCompareOp(op)); + DCHECK(IsArithmeticCompareOp(op)); switch (op) { case EQ: return EQ; case NE: return NE; @@ -292,14 +270,14 @@ // (.e., "<" for the token LT) or NULL if the token doesn't // have a (unique) string (e.g. an IDENTIFIER). static const char* String(Value tok) { - ASSERT(tok < NUM_TOKENS); // tok is unsigned. + DCHECK(tok < NUM_TOKENS); // tok is unsigned. return string_[tok]; } // Returns the precedence > 0 for binary and compare // operators; returns 0 otherwise. static int Precedence(Value tok) { - ASSERT(tok < NUM_TOKENS); // tok is unsigned. + DCHECK(tok < NUM_TOKENS); // tok is unsigned. return precedence_[tok]; } diff -Nru nodejs-0.11.13/deps/v8/src/transitions.cc nodejs-0.11.15/deps/v8/src/transitions.cc --- nodejs-0.11.13/deps/v8/src/transitions.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/transitions.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,57 +1,32 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "objects.h" -#include "transitions-inl.h" -#include "utils.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/objects.h" +#include "src/transitions-inl.h" +#include "src/utils.h" namespace v8 { namespace internal { -static MaybeObject* AllocateRaw(Isolate* isolate, int length) { - // Use FixedArray to not use TransitionArray::cast on incomplete object. - FixedArray* array; - MaybeObject* maybe_array = isolate->heap()->AllocateFixedArray(length); - if (!maybe_array->To(&array)) return maybe_array; - return array; +Handle<TransitionArray> TransitionArray::Allocate(Isolate* isolate, + int number_of_transitions) { + Handle<FixedArray> array = + isolate->factory()->NewFixedArray(ToKeyIndex(number_of_transitions)); + array->set(kPrototypeTransitionsIndex, Smi::FromInt(0)); + return Handle<TransitionArray>::cast(array); } -MaybeObject* TransitionArray::Allocate(Isolate* isolate, - int number_of_transitions) { - FixedArray* array; - MaybeObject* maybe_array = - AllocateRaw(isolate, ToKeyIndex(number_of_transitions)); - if (!maybe_array->To(&array)) return maybe_array; - array->set(kPrototypeTransitionsIndex, Smi::FromInt(0)); - return array; +Handle<TransitionArray> TransitionArray::AllocateSimple(Isolate* isolate, + Handle<Map> target) { + Handle<FixedArray> array = + isolate->factory()->NewFixedArray(kSimpleTransitionSize); + array->set(kSimpleTransitionTarget, *target); + return Handle<TransitionArray>::cast(array); } @@ -69,86 +44,111 @@ } -MaybeObject* TransitionArray::NewWith(SimpleTransitionFlag flag, - Name* key, - Map* target, - Object* back_pointer) { - TransitionArray* result; - MaybeObject* maybe_result; +Handle<TransitionArray> TransitionArray::NewWith(Handle<Map> map, + Handle<Name> name, + Handle<Map> target, + SimpleTransitionFlag flag) { + Handle<TransitionArray> result; + Isolate* isolate = name->GetIsolate(); if (flag == SIMPLE_TRANSITION) { - maybe_result = AllocateRaw(target->GetIsolate(), kSimpleTransitionSize); - if (!maybe_result->To(&result)) return maybe_result; - result->set(kSimpleTransitionTarget, target); + result = AllocateSimple(isolate, target); } else { - maybe_result = Allocate(target->GetIsolate(), 1); - if (!maybe_result->To(&result)) return maybe_result; - result->NoIncrementalWriteBarrierSet(0, key, target); + result = Allocate(isolate, 1); + result->NoIncrementalWriteBarrierSet(0, *name, *target); } - result->set_back_pointer_storage(back_pointer); + result->set_back_pointer_storage(map->GetBackPointer()); return result; } -MaybeObject* TransitionArray::ExtendToFullTransitionArray() { - ASSERT(!IsFullTransitionArray()); - int nof = number_of_transitions(); - TransitionArray* result; - MaybeObject* maybe_result = Allocate(GetIsolate(), nof); - if (!maybe_result->To(&result)) return maybe_result; - - if (nof == 1) { - result->NoIncrementalWriteBarrierCopyFrom(this, kSimpleTransitionIndex, 0); +Handle<TransitionArray> TransitionArray::ExtendToFullTransitionArray( + Handle<Map> containing_map) { + DCHECK(!containing_map->transitions()->IsFullTransitionArray()); + int nof = containing_map->transitions()->number_of_transitions(); + + // A transition array may shrink during GC. + Handle<TransitionArray> result = Allocate(containing_map->GetIsolate(), nof); + DisallowHeapAllocation no_gc; + int new_nof = containing_map->transitions()->number_of_transitions(); + if (new_nof != nof) { + DCHECK(new_nof == 0); + result->Shrink(ToKeyIndex(0)); + } else if (nof == 1) { + result->NoIncrementalWriteBarrierCopyFrom( + containing_map->transitions(), kSimpleTransitionIndex, 0); } - result->set_back_pointer_storage(back_pointer_storage()); + result->set_back_pointer_storage( + containing_map->transitions()->back_pointer_storage()); return result; } -MaybeObject* TransitionArray::CopyInsert(Name* name, Map* target) { - TransitionArray* result; +Handle<TransitionArray> TransitionArray::CopyInsert(Handle<Map> map, + Handle<Name> name, + Handle<Map> target, + SimpleTransitionFlag flag) { + if (!map->HasTransitionArray()) { + return TransitionArray::NewWith(map, name, target, flag); + } - int number_of_transitions = this->number_of_transitions(); + int number_of_transitions = map->transitions()->number_of_transitions(); int new_size = number_of_transitions; - int insertion_index = this->Search(name); + int insertion_index = map->transitions()->Search(*name); if (insertion_index == kNotFound) ++new_size; - MaybeObject* maybe_array; - maybe_array = TransitionArray::Allocate(GetIsolate(), new_size); - if (!maybe_array->To(&result)) return maybe_array; + Handle<TransitionArray> result = Allocate(map->GetIsolate(), new_size); + + // The map's transition array may grown smaller during the allocation above as + // it was weakly traversed, though it is guaranteed not to disappear. Trim the + // result copy if needed, and recompute variables. + DCHECK(map->HasTransitionArray()); + DisallowHeapAllocation no_gc; + TransitionArray* array = map->transitions(); + if (array->number_of_transitions() != number_of_transitions) { + DCHECK(array->number_of_transitions() < number_of_transitions); + + number_of_transitions = array->number_of_transitions(); + new_size = number_of_transitions; + + insertion_index = array->Search(*name); + if (insertion_index == kNotFound) ++new_size; + + result->Shrink(ToKeyIndex(new_size)); + } - if (HasPrototypeTransitions()) { - result->SetPrototypeTransitions(GetPrototypeTransitions()); + if (array->HasPrototypeTransitions()) { + result->SetPrototypeTransitions(array->GetPrototypeTransitions()); } if (insertion_index != kNotFound) { for (int i = 0; i < number_of_transitions; ++i) { if (i != insertion_index) { - result->NoIncrementalWriteBarrierCopyFrom(this, i, i); + result->NoIncrementalWriteBarrierCopyFrom(array, i, i); } } - result->NoIncrementalWriteBarrierSet(insertion_index, name, target); - result->set_back_pointer_storage(back_pointer_storage()); + result->NoIncrementalWriteBarrierSet(insertion_index, *name, *target); + result->set_back_pointer_storage(array->back_pointer_storage()); return result; } insertion_index = 0; for (; insertion_index < number_of_transitions; ++insertion_index) { - if (InsertionPointFound(GetKey(insertion_index), name)) break; + if (InsertionPointFound(array->GetKey(insertion_index), *name)) break; result->NoIncrementalWriteBarrierCopyFrom( - this, insertion_index, insertion_index); + array, insertion_index, insertion_index); } - result->NoIncrementalWriteBarrierSet(insertion_index, name, target); + result->NoIncrementalWriteBarrierSet(insertion_index, *name, *target); for (; insertion_index < number_of_transitions; ++insertion_index) { result->NoIncrementalWriteBarrierCopyFrom( - this, insertion_index, insertion_index + 1); + array, insertion_index, insertion_index + 1); } - result->set_back_pointer_storage(back_pointer_storage()); + result->set_back_pointer_storage(array->back_pointer_storage()); return result; } diff -Nru nodejs-0.11.13/deps/v8/src/transitions.h nodejs-0.11.15/deps/v8/src/transitions.h --- nodejs-0.11.13/deps/v8/src/transitions.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/transitions.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_TRANSITIONS_H_ #define V8_TRANSITIONS_H_ -#include "elements-kind.h" -#include "heap.h" -#include "isolate.h" -#include "objects.h" -#include "v8checks.h" +#include "src/checks.h" +#include "src/elements-kind.h" +#include "src/heap/heap.h" +#include "src/isolate.h" +#include "src/objects.h" namespace v8 { namespace internal { @@ -85,7 +62,6 @@ WriteBarrierMode mode = UPDATE_WRITE_BARRIER); inline Object** GetPrototypeTransitionsSlot(); inline bool HasPrototypeTransitions(); - inline HeapObject* UncheckedPrototypeTransitions(); // Returns the number of transitions in the array. int number_of_transitions() { @@ -96,30 +72,25 @@ inline int number_of_entries() { return number_of_transitions(); } - // Allocate a new transition array with a single entry. - static MUST_USE_RESULT MaybeObject* NewWith( - SimpleTransitionFlag flag, - Name* key, - Map* target, - Object* back_pointer); + // Creates a FullTransitionArray from a SimpleTransitionArray in + // containing_map. + static Handle<TransitionArray> ExtendToFullTransitionArray( + Handle<Map> containing_map); - MUST_USE_RESULT MaybeObject* ExtendToFullTransitionArray(); - - // Copy the transition array, inserting a new transition. + // Create a transition array, copying from the owning map if it already has + // one, otherwise creating a new one according to flag. // TODO(verwaest): This should not cause an existing transition to be // overwritten. - MUST_USE_RESULT MaybeObject* CopyInsert(Name* name, Map* target); - - // Copy a single transition from the origin array. - inline void NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin, - int origin_transition, - int target_transition); + static Handle<TransitionArray> CopyInsert(Handle<Map> map, + Handle<Name> name, + Handle<Map> target, + SimpleTransitionFlag flag); // Search a transition for a given property name. inline int Search(Name* name); // Allocates a TransitionArray. - MUST_USE_RESULT static MaybeObject* Allocate( + static Handle<TransitionArray> Allocate( Isolate* isolate, int number_of_transitions); bool IsSimpleTransition() { @@ -169,10 +140,7 @@ #ifdef OBJECT_PRINT // Print all the transitions. - inline void PrintTransitions() { - PrintTransitions(stdout); - } - void PrintTransitions(FILE* out); + void PrintTransitions(OStream& os); // NOLINT #endif #ifdef DEBUG @@ -199,10 +167,24 @@ kTransitionTarget; } + static Handle<TransitionArray> AllocateSimple( + Isolate* isolate, Handle<Map> target); + + // Allocate a new transition array with a single entry. + static Handle<TransitionArray> NewWith(Handle<Map> map, + Handle<Name> name, + Handle<Map> target, + SimpleTransitionFlag flag); + inline void NoIncrementalWriteBarrierSet(int transition_number, Name* key, Map* target); + // Copy a single transition from the origin array. + inline void NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin, + int origin_transition, + int target_transition); + DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray); }; diff -Nru nodejs-0.11.13/deps/v8/src/transitions-inl.h nodejs-0.11.15/deps/v8/src/transitions-inl.h --- nodejs-0.11.13/deps/v8/src/transitions-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/transitions-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_TRANSITIONS_INL_H_ #define V8_TRANSITIONS_INL_H_ -#include "transitions.h" +#include "src/transitions.h" namespace v8 { namespace internal { @@ -51,7 +28,7 @@ TransitionArray* TransitionArray::cast(Object* object) { - ASSERT(object->IsTransitionArray()); + DCHECK(object->IsTransitionArray()); return reinterpret_cast<TransitionArray*>(object); } @@ -82,22 +59,16 @@ FixedArray* TransitionArray::GetPrototypeTransitions() { - ASSERT(IsFullTransitionArray()); + DCHECK(IsFullTransitionArray()); Object* prototype_transitions = get(kPrototypeTransitionsIndex); return FixedArray::cast(prototype_transitions); } -HeapObject* TransitionArray::UncheckedPrototypeTransitions() { - ASSERT(HasPrototypeTransitions()); - return reinterpret_cast<HeapObject*>(get(kPrototypeTransitionsIndex)); -} - - void TransitionArray::SetPrototypeTransitions(FixedArray* transitions, WriteBarrierMode mode) { - ASSERT(IsFullTransitionArray()); - ASSERT(transitions->IsFixedArray()); + DCHECK(IsFullTransitionArray()); + DCHECK(transitions->IsFixedArray()); Heap* heap = GetHeap(); WRITE_FIELD(this, kPrototypeTransitionsOffset, transitions); CONDITIONAL_WRITE_BARRIER( @@ -112,8 +83,8 @@ Object** TransitionArray::GetKeySlot(int transition_number) { - ASSERT(!IsSimpleTransition()); - ASSERT(transition_number < number_of_transitions()); + DCHECK(!IsSimpleTransition()); + DCHECK(transition_number < number_of_transitions()); return RawFieldOfElementAt(ToKeyIndex(transition_number)); } @@ -125,34 +96,34 @@ Name* key = target->instance_descriptors()->GetKey(descriptor); return key; } - ASSERT(transition_number < number_of_transitions()); + DCHECK(transition_number < number_of_transitions()); return Name::cast(get(ToKeyIndex(transition_number))); } void TransitionArray::SetKey(int transition_number, Name* key) { - ASSERT(!IsSimpleTransition()); - ASSERT(transition_number < number_of_transitions()); + DCHECK(!IsSimpleTransition()); + DCHECK(transition_number < number_of_transitions()); set(ToKeyIndex(transition_number), key); } Map* TransitionArray::GetTarget(int transition_number) { if (IsSimpleTransition()) { - ASSERT(transition_number == kSimpleTransitionIndex); + DCHECK(transition_number == kSimpleTransitionIndex); return Map::cast(get(kSimpleTransitionTarget)); } - ASSERT(transition_number < number_of_transitions()); + DCHECK(transition_number < number_of_transitions()); return Map::cast(get(ToTargetIndex(transition_number))); } void TransitionArray::SetTarget(int transition_number, Map* value) { if (IsSimpleTransition()) { - ASSERT(transition_number == kSimpleTransitionIndex); + DCHECK(transition_number == kSimpleTransitionIndex); return set(kSimpleTransitionTarget, value); } - ASSERT(transition_number < number_of_transitions()); + DCHECK(transition_number < number_of_transitions()); set(ToTargetIndex(transition_number), value); } diff -Nru nodejs-0.11.13/deps/v8/src/trig-table.h nodejs-0.11.15/deps/v8/src/trig-table.h --- nodejs-0.11.13/deps/v8/src/trig-table.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/trig-table.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_TRIG_TABLE_H_ -#define V8_TRIG_TABLE_H_ - - -namespace v8 { -namespace internal { - -class TrigonometricLookupTable : public AllStatic { - public: - // Casting away const-ness to use as argument for typed array constructor. - static void* sin_table() { - return const_cast<double*>(&kSinTable[0]); - } - - static void* cos_x_interval_table() { - return const_cast<double*>(&kCosXIntervalTable[0]); - } - - static double samples_over_pi_half() { return kSamplesOverPiHalf; } - static int samples() { return kSamples; } - static int table_num_bytes() { return kTableSize * sizeof(*kSinTable); } - static int table_size() { return kTableSize; } - - private: - static const double kSinTable[]; - static const double kCosXIntervalTable[]; - static const int kSamples; - static const int kTableSize; - static const double kSamplesOverPiHalf; -}; - -} } // namespace v8::internal - -#endif // V8_TRIG_TABLE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/typedarray.js nodejs-0.11.15/deps/v8/src/typedarray.js --- nodejs-0.11.13/deps/v8/src/typedarray.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/typedarray.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. "use strict"; @@ -48,147 +25,167 @@ endmacro macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE) - function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) { - if (!IS_UNDEFINED(byteOffset)) { - byteOffset = - ToPositiveInteger(byteOffset, "invalid_typed_array_length"); - } - if (!IS_UNDEFINED(length)) { - length = ToPositiveInteger(length, "invalid_typed_array_length"); - } +function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) { + if (!IS_UNDEFINED(byteOffset)) { + byteOffset = + ToPositiveInteger(byteOffset, "invalid_typed_array_length"); + } + if (!IS_UNDEFINED(length)) { + length = ToPositiveInteger(length, "invalid_typed_array_length"); + } - var bufferByteLength = %ArrayBufferGetByteLength(buffer); - var offset; - if (IS_UNDEFINED(byteOffset)) { - offset = 0; - } else { - offset = byteOffset; + var bufferByteLength = %_ArrayBufferGetByteLength(buffer); + var offset; + if (IS_UNDEFINED(byteOffset)) { + offset = 0; + } else { + offset = byteOffset; - if (offset % ELEMENT_SIZE !== 0) { - throw MakeRangeError("invalid_typed_array_alignment", - ["start offset", "NAME", ELEMENT_SIZE]); - } - if (offset > bufferByteLength) { - throw MakeRangeError("invalid_typed_array_offset"); - } + if (offset % ELEMENT_SIZE !== 0) { + throw MakeRangeError("invalid_typed_array_alignment", + ["start offset", "NAME", ELEMENT_SIZE]); } - - var newByteLength; - var newLength; - if (IS_UNDEFINED(length)) { - if (bufferByteLength % ELEMENT_SIZE !== 0) { - throw MakeRangeError("invalid_typed_array_alignment", - ["byte length", "NAME", ELEMENT_SIZE]); - } - newByteLength = bufferByteLength - offset; - newLength = newByteLength / ELEMENT_SIZE; - } else { - var newLength = length; - newByteLength = newLength * ELEMENT_SIZE; + if (offset > bufferByteLength) { + throw MakeRangeError("invalid_typed_array_offset"); } - if ((offset + newByteLength > bufferByteLength) - || (newLength > %_MaxSmi())) { - throw MakeRangeError("invalid_typed_array_length"); - } - %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength); - } - - function NAMEConstructByLength(obj, length) { - var l = IS_UNDEFINED(length) ? - 0 : ToPositiveInteger(length, "invalid_typed_array_length"); - if (l > %_MaxSmi()) { - throw MakeRangeError("invalid_typed_array_length"); - } - var byteLength = l * ELEMENT_SIZE; - if (byteLength > %_TypedArrayMaxSizeInHeap()) { - var buffer = new $ArrayBuffer(byteLength); - %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength); - } else { - %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength); + } + + var newByteLength; + var newLength; + if (IS_UNDEFINED(length)) { + if (bufferByteLength % ELEMENT_SIZE !== 0) { + throw MakeRangeError("invalid_typed_array_alignment", + ["byte length", "NAME", ELEMENT_SIZE]); } + newByteLength = bufferByteLength - offset; + newLength = newByteLength / ELEMENT_SIZE; + } else { + var newLength = length; + newByteLength = newLength * ELEMENT_SIZE; } + if ((offset + newByteLength > bufferByteLength) + || (newLength > %_MaxSmi())) { + throw MakeRangeError("invalid_typed_array_length"); + } + %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength); +} + +function NAMEConstructByLength(obj, length) { + var l = IS_UNDEFINED(length) ? + 0 : ToPositiveInteger(length, "invalid_typed_array_length"); + if (l > %_MaxSmi()) { + throw MakeRangeError("invalid_typed_array_length"); + } + var byteLength = l * ELEMENT_SIZE; + if (byteLength > %_TypedArrayMaxSizeInHeap()) { + var buffer = new $ArrayBuffer(byteLength); + %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength); + } else { + %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength); + } +} - function NAMEConstructByArrayLike(obj, arrayLike) { - var length = arrayLike.length; - var l = ToPositiveInteger(length, "invalid_typed_array_length"); +function NAMEConstructByArrayLike(obj, arrayLike) { + var length = arrayLike.length; + var l = ToPositiveInteger(length, "invalid_typed_array_length"); - if (l > %_MaxSmi()) { - throw MakeRangeError("invalid_typed_array_length"); - } - if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) { - for (var i = 0; i < l; i++) { - // It is crucial that we let any execptions from arrayLike[i] - // propagate outside the function. - obj[i] = arrayLike[i]; - } + if (l > %_MaxSmi()) { + throw MakeRangeError("invalid_typed_array_length"); + } + if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) { + for (var i = 0; i < l; i++) { + // It is crucial that we let any execptions from arrayLike[i] + // propagate outside the function. + obj[i] = arrayLike[i]; } } +} - function NAMEConstructor(arg1, arg2, arg3) { - - if (%_IsConstructCall()) { - if (IS_ARRAYBUFFER(arg1)) { - NAMEConstructByArrayBuffer(this, arg1, arg2, arg3); - } else if (IS_NUMBER(arg1) || IS_STRING(arg1) || - IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) { - NAMEConstructByLength(this, arg1); - } else { - NAMEConstructByArrayLike(this, arg1); - } +function NAMEConstructor(arg1, arg2, arg3) { + if (%_IsConstructCall()) { + if (IS_ARRAYBUFFER(arg1)) { + NAMEConstructByArrayBuffer(this, arg1, arg2, arg3); + } else if (IS_NUMBER(arg1) || IS_STRING(arg1) || + IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) { + NAMEConstructByLength(this, arg1); } else { - throw MakeTypeError("constructor_not_function", ["NAME"]) + NAMEConstructByArrayLike(this, arg1); } + } else { + throw MakeTypeError("constructor_not_function", ["NAME"]) } -endmacro - -TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR) +} -function TypedArrayGetBuffer() { +function NAME_GetBuffer() { + if (!(%_ClassOf(this) === 'NAME')) { + throw MakeTypeError('incompatible_method_receiver', + ["NAME.buffer", this]); + } return %TypedArrayGetBuffer(this); } -function TypedArrayGetByteLength() { - return %TypedArrayGetByteLength(this); +function NAME_GetByteLength() { + if (!(%_ClassOf(this) === 'NAME')) { + throw MakeTypeError('incompatible_method_receiver', + ["NAME.byteLength", this]); + } + return %_ArrayBufferViewGetByteLength(this); } -function TypedArrayGetByteOffset() { - return %TypedArrayGetByteOffset(this); +function NAME_GetByteOffset() { + if (!(%_ClassOf(this) === 'NAME')) { + throw MakeTypeError('incompatible_method_receiver', + ["NAME.byteOffset", this]); + } + return %_ArrayBufferViewGetByteOffset(this); } -function TypedArrayGetLength() { - return %TypedArrayGetLength(this); +function NAME_GetLength() { + if (!(%_ClassOf(this) === 'NAME')) { + throw MakeTypeError('incompatible_method_receiver', + ["NAME.length", this]); + } + return %_TypedArrayGetLength(this); } -function CreateSubArray(elementSize, constructor) { - return function(begin, end) { - var beginInt = TO_INTEGER(begin); - if (!IS_UNDEFINED(end)) { - end = TO_INTEGER(end); - } +var $NAME = global.NAME; - var srcLength = %TypedArrayGetLength(this); - if (beginInt < 0) { - beginInt = MathMax(0, srcLength + beginInt); - } else { - beginInt = MathMin(srcLength, beginInt); - } +function NAMESubArray(begin, end) { + if (!(%_ClassOf(this) === 'NAME')) { + throw MakeTypeError('incompatible_method_receiver', + ["NAME.subarray", this]); + } + var beginInt = TO_INTEGER(begin); + if (!IS_UNDEFINED(end)) { + end = TO_INTEGER(end); + } - var endInt = IS_UNDEFINED(end) ? srcLength : end; - if (endInt < 0) { - endInt = MathMax(0, srcLength + endInt); - } else { - endInt = MathMin(endInt, srcLength); - } - if (endInt < beginInt) { - endInt = beginInt; - } - var newLength = endInt - beginInt; - var beginByteOffset = - %TypedArrayGetByteOffset(this) + beginInt * elementSize; - return new constructor(%TypedArrayGetBuffer(this), - beginByteOffset, newLength); + var srcLength = %_TypedArrayGetLength(this); + if (beginInt < 0) { + beginInt = MathMax(0, srcLength + beginInt); + } else { + beginInt = MathMin(srcLength, beginInt); + } + + var endInt = IS_UNDEFINED(end) ? srcLength : end; + if (endInt < 0) { + endInt = MathMax(0, srcLength + endInt); + } else { + endInt = MathMin(endInt, srcLength); } + if (endInt < beginInt) { + endInt = beginInt; + } + var newLength = endInt - beginInt; + var beginByteOffset = + %_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE; + return new $NAME(%TypedArrayGetBuffer(this), + beginByteOffset, newLength); } +endmacro + +TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR) + function TypedArraySetFromArrayLike(target, source, sourceLength, offset) { if (offset > 0) { @@ -296,34 +293,34 @@ // ------------------------------------------------------------------- -function SetupTypedArray(constructor, fun, elementSize) { +function SetupTypedArrays() { +macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE) %CheckIsBootstrapping(); - %SetCode(constructor, fun); - %FunctionSetPrototype(constructor, new $Object()); + %SetCode(global.NAME, NAMEConstructor); + %FunctionSetPrototype(global.NAME, new $Object()); - %SetProperty(constructor, "BYTES_PER_ELEMENT", elementSize, - READ_ONLY | DONT_ENUM | DONT_DELETE); - %SetProperty(constructor.prototype, - "constructor", constructor, DONT_ENUM); - %SetProperty(constructor.prototype, - "BYTES_PER_ELEMENT", elementSize, - READ_ONLY | DONT_ENUM | DONT_DELETE); - InstallGetter(constructor.prototype, "buffer", TypedArrayGetBuffer); - InstallGetter(constructor.prototype, "byteOffset", TypedArrayGetByteOffset); - InstallGetter(constructor.prototype, "byteLength", TypedArrayGetByteLength); - InstallGetter(constructor.prototype, "length", TypedArrayGetLength); + %AddNamedProperty(global.NAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE, + READ_ONLY | DONT_ENUM | DONT_DELETE); + %AddNamedProperty(global.NAME.prototype, + "constructor", global.NAME, DONT_ENUM); + %AddNamedProperty(global.NAME.prototype, + "BYTES_PER_ELEMENT", ELEMENT_SIZE, + READ_ONLY | DONT_ENUM | DONT_DELETE); + InstallGetter(global.NAME.prototype, "buffer", NAME_GetBuffer); + InstallGetter(global.NAME.prototype, "byteOffset", NAME_GetByteOffset); + InstallGetter(global.NAME.prototype, "byteLength", NAME_GetByteLength); + InstallGetter(global.NAME.prototype, "length", NAME_GetLength); - InstallFunctions(constructor.prototype, DONT_ENUM, $Array( - "subarray", CreateSubArray(elementSize, constructor), + InstallFunctions(global.NAME.prototype, DONT_ENUM, $Array( + "subarray", NAMESubArray, "set", TypedArraySet )); -} - -macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE) - SetupTypedArray (global.NAME, NAMEConstructor, ELEMENT_SIZE); endmacro TYPED_ARRAYS(SETUP_TYPED_ARRAY) +} + +SetupTypedArrays(); // --------------------------- DataView ----------------------------- @@ -341,7 +338,7 @@ byteLength = TO_INTEGER(byteLength); } - var bufferByteLength = %ArrayBufferGetByteLength(buffer); + var bufferByteLength = %_ArrayBufferGetByteLength(buffer); var offset = IS_UNDEFINED(byteOffset) ? 0 : byteOffset; if (offset > bufferByteLength) { @@ -360,7 +357,7 @@ } } -function DataViewGetBuffer() { +function DataViewGetBufferJS() { if (!IS_DATAVIEW(this)) { throw MakeTypeError('incompatible_method_receiver', ['DataView.buffer', this]); @@ -373,7 +370,7 @@ throw MakeTypeError('incompatible_method_receiver', ['DataView.byteOffset', this]); } - return %DataViewGetByteOffset(this); + return %_ArrayBufferViewGetByteOffset(this); } function DataViewGetByteLength() { @@ -381,7 +378,7 @@ throw MakeTypeError('incompatible_method_receiver', ['DataView.byteLength', this]); } - return %DataViewGetByteLength(this); + return %_ArrayBufferViewGetByteLength(this); } macro DATA_VIEW_TYPES(FUNCTION) @@ -401,7 +398,7 @@ macro DATA_VIEW_GETTER_SETTER(TYPENAME) -function DataViewGetTYPENAME(offset, little_endian) { +function DataViewGetTYPENAMEJS(offset, little_endian) { if (!IS_DATAVIEW(this)) { throw MakeTypeError('incompatible_method_receiver', ['DataView.getTYPENAME', this]); @@ -414,7 +411,7 @@ !!little_endian); } -function DataViewSetTYPENAME(offset, value, little_endian) { +function DataViewSetTYPENAMEJS(offset, value, little_endian) { if (!IS_DATAVIEW(this)) { throw MakeTypeError('incompatible_method_receiver', ['DataView.setTYPENAME', this]); @@ -439,36 +436,36 @@ %FunctionSetPrototype($DataView, new $Object); // Set up constructor property on the DataView prototype. - %SetProperty($DataView.prototype, "constructor", $DataView, DONT_ENUM); + %AddNamedProperty($DataView.prototype, "constructor", $DataView, DONT_ENUM); - InstallGetter($DataView.prototype, "buffer", DataViewGetBuffer); + InstallGetter($DataView.prototype, "buffer", DataViewGetBufferJS); InstallGetter($DataView.prototype, "byteOffset", DataViewGetByteOffset); InstallGetter($DataView.prototype, "byteLength", DataViewGetByteLength); InstallFunctions($DataView.prototype, DONT_ENUM, $Array( - "getInt8", DataViewGetInt8, - "setInt8", DataViewSetInt8, + "getInt8", DataViewGetInt8JS, + "setInt8", DataViewSetInt8JS, - "getUint8", DataViewGetUint8, - "setUint8", DataViewSetUint8, + "getUint8", DataViewGetUint8JS, + "setUint8", DataViewSetUint8JS, - "getInt16", DataViewGetInt16, - "setInt16", DataViewSetInt16, + "getInt16", DataViewGetInt16JS, + "setInt16", DataViewSetInt16JS, - "getUint16", DataViewGetUint16, - "setUint16", DataViewSetUint16, + "getUint16", DataViewGetUint16JS, + "setUint16", DataViewSetUint16JS, - "getInt32", DataViewGetInt32, - "setInt32", DataViewSetInt32, + "getInt32", DataViewGetInt32JS, + "setInt32", DataViewSetInt32JS, - "getUint32", DataViewGetUint32, - "setUint32", DataViewSetUint32, + "getUint32", DataViewGetUint32JS, + "setUint32", DataViewSetUint32JS, - "getFloat32", DataViewGetFloat32, - "setFloat32", DataViewSetFloat32, + "getFloat32", DataViewGetFloat32JS, + "setFloat32", DataViewSetFloat32JS, - "getFloat64", DataViewGetFloat64, - "setFloat64", DataViewSetFloat64 + "getFloat64", DataViewGetFloat64JS, + "setFloat64", DataViewSetFloat64JS )); } diff -Nru nodejs-0.11.13/deps/v8/src/type-info.cc nodejs-0.11.15/deps/v8/src/type-info.cc --- nodejs-0.11.13/deps/v8/src/type-info.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/type-info.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,60 +1,37 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "ast.h" -#include "code-stubs.h" -#include "compiler.h" -#include "ic.h" -#include "macro-assembler.h" -#include "stub-cache.h" -#include "type-info.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "ic-inl.h" -#include "objects-inl.h" +#include "src/v8.h" + +#include "src/ast.h" +#include "src/code-stubs.h" +#include "src/compiler.h" +#include "src/ic.h" +#include "src/macro-assembler.h" +#include "src/stub-cache.h" +#include "src/type-info.h" + +#include "src/ic-inl.h" +#include "src/objects-inl.h" namespace v8 { namespace internal { TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code, + Handle<FixedArray> feedback_vector, Handle<Context> native_context, Zone* zone) : native_context_(native_context), zone_(zone) { - Object* raw_info = code->type_feedback_info(); - if (raw_info->IsTypeFeedbackInfo()) { - feedback_vector_ = Handle<FixedArray>(TypeFeedbackInfo::cast(raw_info)-> - feedback_vector()); - } - BuildDictionary(code); - ASSERT(dictionary_->IsDictionary()); + DCHECK(dictionary_->IsDictionary()); + // We make a copy of the feedback vector because a GC could clear + // the type feedback info contained therein. + // TODO(mvstanton): revisit the decision to copy when we weakly + // traverse the feedback vector at GC time. + feedback_vector_ = isolate()->factory()->CopyFixedArray(feedback_vector); } @@ -79,7 +56,7 @@ Handle<Object> TypeFeedbackOracle::GetInfo(int slot) { - ASSERT(slot >= 0 && slot < feedback_vector_->length()); + DCHECK(slot >= 0 && slot < feedback_vector_->length()); Object* obj = feedback_vector_->get(slot); if (!obj->IsJSFunction() || !CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) { @@ -120,9 +97,7 @@ bool TypeFeedbackOracle::CallIsMonomorphic(int slot) { Handle<Object> value = GetInfo(slot); - return FLAG_pretenuring_call_new - ? value->IsJSFunction() - : value->IsAllocationSite() || value->IsJSFunction(); + return value->IsAllocationSite() || value->IsJSFunction(); } @@ -136,9 +111,9 @@ byte TypeFeedbackOracle::ForInType(int feedback_vector_slot) { Handle<Object> value = GetInfo(feedback_vector_slot); - return value->IsSmi() && - Smi::cast(*value)->value() == TypeFeedbackInfo::kForInFastCaseMarker - ? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN; + return value.is_identical_to( + TypeFeedbackInfo::UninitializedSentinel(isolate())) + ? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN; } @@ -157,12 +132,11 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(int slot) { Handle<Object> info = GetInfo(slot); - if (FLAG_pretenuring_call_new || info->IsJSFunction()) { - return Handle<JSFunction>::cast(info); + if (info->IsAllocationSite()) { + return Handle<JSFunction>(isolate()->native_context()->array_function()); } - ASSERT(info->IsAllocationSite()); - return Handle<JSFunction>(isolate()->native_context()->array_function()); + return Handle<JSFunction>::cast(info); } @@ -172,11 +146,20 @@ return Handle<JSFunction>::cast(info); } - ASSERT(info->IsAllocationSite()); + DCHECK(info->IsAllocationSite()); return Handle<JSFunction>(isolate()->native_context()->array_function()); } +Handle<AllocationSite> TypeFeedbackOracle::GetCallAllocationSite(int slot) { + Handle<Object> info = GetInfo(slot); + if (info->IsAllocationSite()) { + return Handle<AllocationSite>::cast(info); + } + return Handle<AllocationSite>::null(); +} + + Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(int slot) { Handle<Object> info = GetInfo(slot); if (FLAG_pretenuring_call_new || info->IsAllocationSite()) { @@ -192,16 +175,6 @@ } -bool TypeFeedbackOracle::LoadIsStub(TypeFeedbackId id, ICStub* stub) { - Handle<Object> object = GetInfo(id); - if (!object->IsCode()) return false; - Handle<Code> code = Handle<Code>::cast(object); - if (!code->is_load_stub()) return false; - if (code->ic_state() != MONOMORPHIC) return false; - return stub->Describes(*code); -} - - void TypeFeedbackOracle::CompareType(TypeFeedbackId id, Type** left_type, Type** right_type, @@ -217,18 +190,17 @@ Handle<Map> map; Map* raw_map = code->FindFirstMap(); if (raw_map != NULL) { - map = Map::CurrentMapForDeprecated(handle(raw_map)); - if (!map.is_null() && CanRetainOtherContext(*map, *native_context_)) { + if (Map::TryUpdate(handle(raw_map)).ToHandle(&map) && + CanRetainOtherContext(*map, *native_context_)) { map = Handle<Map>::null(); } } if (code->is_compare_ic_stub()) { - int stub_minor_key = code->stub_info(); - CompareIC::StubInfoToType( - stub_minor_key, left_type, right_type, combined_type, map, zone()); + CompareIC::StubInfoToType(code->stub_key(), left_type, right_type, + combined_type, map, zone()); } else if (code->is_compare_nil_ic_stub()) { - CompareNilICStub stub(code->extra_ic_state()); + CompareNilICStub stub(isolate(), code->extra_ic_state()); *combined_type = stub.GetType(zone(), map); *left_type = *right_type = stub.GetInputType(zone(), map); } @@ -246,7 +218,7 @@ if (!object->IsCode()) { // For some binary ops we don't have ICs, e.g. Token::COMMA, but for the // operations covered by the BinaryOpIC we should always have them. - ASSERT(op < BinaryOpIC::State::FIRST_TOKEN || + DCHECK(op < BinaryOpIC::State::FIRST_TOKEN || op > BinaryOpIC::State::LAST_TOKEN); *left = *right = *result = Type::None(zone()); *fixed_right_arg = Maybe<int>(); @@ -254,9 +226,9 @@ return; } Handle<Code> code = Handle<Code>::cast(object); - ASSERT_EQ(Code::BINARY_OP_IC, code->kind()); - BinaryOpIC::State state(code->extra_ic_state()); - ASSERT_EQ(op, state.op()); + DCHECK_EQ(Code::BINARY_OP_IC, code->kind()); + BinaryOpIC::State state(isolate(), code->extra_ic_state()); + DCHECK_EQ(op, state.op()); *left = state.GetLeftType(zone()); *right = state.GetRightType(zone()); @@ -276,22 +248,18 @@ Handle<Object> object = GetInfo(id); if (!object->IsCode()) return Type::None(zone()); Handle<Code> code = Handle<Code>::cast(object); - ASSERT_EQ(Code::BINARY_OP_IC, code->kind()); - BinaryOpIC::State state(code->extra_ic_state()); + DCHECK_EQ(Code::BINARY_OP_IC, code->kind()); + BinaryOpIC::State state(isolate(), code->extra_ic_state()); return state.GetLeftType(zone()); } -void TypeFeedbackOracle::PropertyReceiverTypes( - TypeFeedbackId id, Handle<String> name, - SmallMapList* receiver_types, bool* is_prototype) { +void TypeFeedbackOracle::PropertyReceiverTypes(TypeFeedbackId id, + Handle<String> name, + SmallMapList* receiver_types) { receiver_types->Clear(); - FunctionPrototypeStub proto_stub(Code::LOAD_IC); - *is_prototype = LoadIsStub(id, &proto_stub); - if (!*is_prototype) { - Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); - CollectReceiverTypes(id, name, flags, receiver_types); - } + Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); + CollectReceiverTypes(id, name, flags, receiver_types); } @@ -338,7 +306,7 @@ Handle<Object> object = GetInfo(ast_id); if (object->IsUndefined() || object->IsSmi()) return; - ASSERT(object->IsCode()); + DCHECK(object->IsCode()); Handle<Code> code(Handle<Code>::cast(object)); if (FLAG_collect_megamorphic_maps_from_stub_cache && @@ -445,8 +413,7 @@ ZoneList<RelocInfo>* infos) { AllowHeapAllocation allocation_allowed; Code* old_code = *code; - dictionary_ = - isolate()->factory()->NewUnseededNumberDictionary(infos->length()); + dictionary_ = UnseededNumberDictionary::New(isolate(), infos->length()); RelocateRelocInfos(infos, old_code, *code); } @@ -490,16 +457,13 @@ void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) { - ASSERT(dictionary_->FindEntry(IdToKey(ast_id)) == + DCHECK(dictionary_->FindEntry(IdToKey(ast_id)) == UnseededNumberDictionary::kNotFound); - MaybeObject* maybe_result = dictionary_->AtNumberPut(IdToKey(ast_id), target); - USE(maybe_result); -#ifdef DEBUG - Object* result = NULL; // Dictionary has been allocated with sufficient size for all elements. - ASSERT(maybe_result->ToObject(&result)); - ASSERT(*dictionary_ == result); -#endif + DisallowHeapAllocation no_need_to_resize_dictionary; + HandleScope scope(isolate()); + USE(UnseededNumberDictionary::AtNumberPut( + dictionary_, IdToKey(ast_id), handle(target, isolate()))); } diff -Nru nodejs-0.11.13/deps/v8/src/type-info.h nodejs-0.11.15/deps/v8/src/type-info.h --- nodejs-0.11.13/deps/v8/src/type-info.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/type-info.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,49 +1,26 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_TYPE_INFO_H_ #define V8_TYPE_INFO_H_ -#include "allocation.h" -#include "globals.h" -#include "types.h" -#include "zone-inl.h" +#include "src/allocation.h" +#include "src/globals.h" +#include "src/types.h" +#include "src/zone-inl.h" namespace v8 { namespace internal { // Forward declarations. -class ICStub; class SmallMapList; class TypeFeedbackOracle: public ZoneObject { public: TypeFeedbackOracle(Handle<Code> code, + Handle<FixedArray> feedback_vector, Handle<Context> native_context, Zone* zone); @@ -63,10 +40,8 @@ KeyedAccessStoreMode GetStoreMode(TypeFeedbackId id); - void PropertyReceiverTypes(TypeFeedbackId id, - Handle<String> name, - SmallMapList* receiver_types, - bool* is_prototype); + void PropertyReceiverTypes(TypeFeedbackId id, Handle<String> name, + SmallMapList* receiver_types); void KeyedPropertyReceiverTypes(TypeFeedbackId id, SmallMapList* receiver_types, bool* is_string); @@ -87,11 +62,11 @@ Context* native_context); Handle<JSFunction> GetCallTarget(int slot); + Handle<AllocationSite> GetCallAllocationSite(int slot); Handle<JSFunction> GetCallNewTarget(int slot); Handle<AllocationSite> GetCallNewAllocationSite(int slot); bool LoadIsBuiltin(TypeFeedbackId id, Builtins::Name builtin_id); - bool LoadIsStub(TypeFeedbackId id, ICStub* stub); // TODO(1571) We can't use ToBooleanStub::Types as the return value because // of various cycles in our headers. Death to tons of implementations in diff -Nru nodejs-0.11.13/deps/v8/src/types.cc nodejs-0.11.15/deps/v8/src/types.cc --- nodejs-0.11.13/deps/v8/src/types.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/types.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,180 +1,173 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "types.h" -#include "string-stream.h" +#include "src/types.h" + +#include "src/ostreams.h" +#include "src/types-inl.h" namespace v8 { namespace internal { + +// ----------------------------------------------------------------------------- +// Range-related custom order on doubles. +// We want -0 to be less than +0. + +static bool dle(double x, double y) { + return x <= y && (x != 0 || IsMinusZero(x) || !IsMinusZero(y)); +} + + +static bool deq(double x, double y) { + return dle(x, y) && dle(y, x); +} + + +// ----------------------------------------------------------------------------- +// Glb and lub computation. + +// The largest bitset subsumed by this type. template<class Config> -int TypeImpl<Config>::NumClasses() { - if (this->IsClass()) { - return 1; - } else if (this->IsUnion()) { - UnionedHandle unioned = this->AsUnion(); - int result = 0; - for (int i = 0; i < Config::union_length(unioned); ++i) { - if (Config::union_get(unioned, i)->IsClass()) ++result; - } - return result; +int TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) { + DisallowHeapAllocation no_allocation; + if (type->IsBitset()) { + return type->AsBitset(); + } else if (type->IsUnion()) { + UnionHandle unioned = handle(type->AsUnion()); + DCHECK(unioned->Wellformed()); + return unioned->Get(0)->BitsetGlb(); // Other BitsetGlb's are kNone anyway. } else { - return 0; + return kNone; } } +// The smallest bitset subsuming this type. template<class Config> -int TypeImpl<Config>::NumConstants() { - if (this->IsConstant()) { - return 1; - } else if (this->IsUnion()) { - UnionedHandle unioned = this->AsUnion(); - int result = 0; - for (int i = 0; i < Config::union_length(unioned); ++i) { - if (Config::union_get(unioned, i)->IsConstant()) ++result; +int TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) { + DisallowHeapAllocation no_allocation; + if (type->IsBitset()) { + return type->AsBitset(); + } else if (type->IsUnion()) { + UnionHandle unioned = handle(type->AsUnion()); + int bitset = kNone; + for (int i = 0; i < unioned->Length(); ++i) { + bitset |= unioned->Get(i)->BitsetLub(); } - return result; + return bitset; + } else if (type->IsClass()) { + // Little hack to avoid the need for a region for handlification here... + return Config::is_class(type) ? Lub(*Config::as_class(type)) : + type->AsClass()->Bound(NULL)->AsBitset(); + } else if (type->IsConstant()) { + return type->AsConstant()->Bound()->AsBitset(); + } else if (type->IsRange()) { + return type->AsRange()->Bound()->AsBitset(); + } else if (type->IsContext()) { + return type->AsContext()->Bound()->AsBitset(); + } else if (type->IsArray()) { + return type->AsArray()->Bound()->AsBitset(); + } else if (type->IsFunction()) { + return type->AsFunction()->Bound()->AsBitset(); } else { - return 0; + UNREACHABLE(); + return kNone; } } -template<class Config> template<class T> -typename TypeImpl<Config>::TypeHandle -TypeImpl<Config>::Iterator<T>::get_type() { - ASSERT(!Done()); - return type_->IsUnion() ? Config::union_get(type_->AsUnion(), index_) : type_; -} - - -// C++ cannot specialise nested templates, so we have to go through this -// contortion with an auxiliary template to simulate it. -template<class Config, class T> -struct TypeImplIteratorAux { - static bool matches(typename TypeImpl<Config>::TypeHandle type); - static i::Handle<T> current(typename TypeImpl<Config>::TypeHandle type); -}; - +// The smallest bitset subsuming this type, ignoring explicit bounds. template<class Config> -struct TypeImplIteratorAux<Config, i::Map> { - static bool matches(typename TypeImpl<Config>::TypeHandle type) { - return type->IsClass(); - } - static i::Handle<i::Map> current(typename TypeImpl<Config>::TypeHandle type) { - return type->AsClass(); +int TypeImpl<Config>::BitsetType::InherentLub(TypeImpl* type) { + DisallowHeapAllocation no_allocation; + if (type->IsBitset()) { + return type->AsBitset(); + } else if (type->IsUnion()) { + UnionHandle unioned = handle(type->AsUnion()); + int bitset = kNone; + for (int i = 0; i < unioned->Length(); ++i) { + bitset |= unioned->Get(i)->InherentBitsetLub(); + } + return bitset; + } else if (type->IsClass()) { + return Lub(*type->AsClass()->Map()); + } else if (type->IsConstant()) { + return Lub(*type->AsConstant()->Value()); + } else if (type->IsRange()) { + return Lub(type->AsRange()->Min(), type->AsRange()->Max()); + } else if (type->IsContext()) { + return kInternal & kTaggedPtr; + } else if (type->IsArray()) { + return kArray; + } else if (type->IsFunction()) { + return kFunction; + } else { + UNREACHABLE(); + return kNone; } -}; +} + template<class Config> -struct TypeImplIteratorAux<Config, i::Object> { - static bool matches(typename TypeImpl<Config>::TypeHandle type) { - return type->IsConstant(); +int TypeImpl<Config>::BitsetType::Lub(i::Object* value) { + DisallowHeapAllocation no_allocation; + if (value->IsNumber()) { + return Lub(value->Number()) & (value->IsSmi() ? kTaggedInt : kTaggedPtr); } - static i::Handle<i::Object> current( - typename TypeImpl<Config>::TypeHandle type) { - return type->AsConstant(); - } -}; - -template<class Config> template<class T> -bool TypeImpl<Config>::Iterator<T>::matches(TypeHandle type) { - return TypeImplIteratorAux<Config, T>::matches(type); + return Lub(i::HeapObject::cast(value)->map()); } -template<class Config> template<class T> -i::Handle<T> TypeImpl<Config>::Iterator<T>::Current() { - return TypeImplIteratorAux<Config, T>::current(get_type()); + +template<class Config> +int TypeImpl<Config>::BitsetType::Lub(double value) { + DisallowHeapAllocation no_allocation; + if (i::IsMinusZero(value)) return kMinusZero; + if (std::isnan(value)) return kNaN; + if (IsUint32Double(value)) return Lub(FastD2UI(value)); + if (IsInt32Double(value)) return Lub(FastD2I(value)); + return kOtherNumber; } -template<class Config> template<class T> -void TypeImpl<Config>::Iterator<T>::Advance() { - ++index_; - if (type_->IsUnion()) { - UnionedHandle unioned = type_->AsUnion(); - for (; index_ < Config::union_length(unioned); ++index_) { - if (matches(Config::union_get(unioned, index_))) return; - } - } else if (index_ == 0 && matches(type_)) { - return; - } - index_ = -1; +template<class Config> +int TypeImpl<Config>::BitsetType::Lub(double min, double max) { + DisallowHeapAllocation no_allocation; + DCHECK(dle(min, max)); + if (deq(min, max)) return BitsetType::Lub(min); // Singleton range. + int bitset = BitsetType::kNumber ^ SEMANTIC(BitsetType::kNaN); + if (dle(0, min) || max < 0) bitset ^= SEMANTIC(BitsetType::kMinusZero); + return bitset; + // TODO(neis): Could refine this further by doing more checks on min/max. } -// Get the smallest bitset subsuming this type. template<class Config> -int TypeImpl<Config>::LubBitset() { - if (this->IsBitset()) { - return this->AsBitset(); - } else if (this->IsUnion()) { - UnionedHandle unioned = this->AsUnion(); - int bitset = kNone; - for (int i = 0; i < Config::union_length(unioned); ++i) { - bitset |= Config::union_get(unioned, i)->LubBitset(); - } - return bitset; - } else if (this->IsClass()) { - int bitset = Config::lub_bitset(this); - return bitset ? bitset : LubBitset(*this->AsClass()); - } else { - int bitset = Config::lub_bitset(this); - return bitset ? bitset : LubBitset(*this->AsConstant()); +int TypeImpl<Config>::BitsetType::Lub(int32_t value) { + if (value >= 0x40000000) { + return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall; } + if (value >= 0) return kUnsignedSmall; + if (value >= -0x40000000) return kOtherSignedSmall; + return i::SmiValuesAre31Bits() ? kOtherSigned32 : kOtherSignedSmall; } template<class Config> -int TypeImpl<Config>::LubBitset(i::Object* value) { - if (value->IsSmi()) return kSignedSmall & kTaggedInt; - i::Map* map = i::HeapObject::cast(value)->map(); - if (map->instance_type() == HEAP_NUMBER_TYPE) { - int32_t i; - uint32_t u; - return kTaggedPtr & ( - value->ToInt32(&i) ? (Smi::IsValid(i) ? kSignedSmall : kOtherSigned32) : - value->ToUint32(&u) ? kUnsigned32 : kFloat); - } - if (map->instance_type() == ODDBALL_TYPE) { - if (value->IsUndefined()) return kUndefined; - if (value->IsNull()) return kNull; - if (value->IsBoolean()) return kBoolean; - if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone? - if (value->IsUninitialized()) return kNone; - UNREACHABLE(); +int TypeImpl<Config>::BitsetType::Lub(uint32_t value) { + DisallowHeapAllocation no_allocation; + if (value >= 0x80000000u) return kOtherUnsigned32; + if (value >= 0x40000000u) { + return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall; } - return LubBitset(map); + return kUnsignedSmall; } template<class Config> -int TypeImpl<Config>::LubBitset(i::Map* map) { +int TypeImpl<Config>::BitsetType::Lub(i::Map* map) { + DisallowHeapAllocation no_allocation; switch (map->instance_type()) { case STRING_TYPE: case ASCII_STRING_TYPE: @@ -190,8 +183,6 @@ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE: case INTERNALIZED_STRING_TYPE: case ASCII_INTERNALIZED_STRING_TYPE: - case CONS_INTERNALIZED_STRING_TYPE: - case CONS_ASCII_INTERNALIZED_STRING_TYPE: case EXTERNAL_INTERNALIZED_STRING_TYPE: case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE: case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE: @@ -201,10 +192,20 @@ return kString; case SYMBOL_TYPE: return kSymbol; - case ODDBALL_TYPE: - return kOddball; + case ODDBALL_TYPE: { + Heap* heap = map->GetHeap(); + if (map == heap->undefined_map()) return kUndefined; + if (map == heap->null_map()) return kNull; + if (map == heap->boolean_map()) return kBoolean; + DCHECK(map == heap->the_hole_map() || + map == heap->uninitialized_map() || + map == heap->no_interceptor_result_sentinel_map() || + map == heap->termination_exception_map() || + map == heap->arguments_marker_map()); + return kInternal & kTaggedPtr; + } case HEAP_NUMBER_TYPE: - return kFloat & kTaggedPtr; + return kNumber & kTaggedPtr; case JS_VALUE_TYPE: case JS_DATE_TYPE: case JS_OBJECT_TYPE: @@ -219,6 +220,8 @@ case JS_DATA_VIEW_TYPE: case JS_SET_TYPE: case JS_MAP_TYPE: + case JS_SET_ITERATOR_TYPE: + case JS_MAP_ITERATOR_TYPE: case JS_WEAK_MAP_TYPE: case JS_WEAK_SET_TYPE: if (map->is_undetectable()) return kUndetectable; @@ -245,8 +248,11 @@ return kDetectable; case DECLARED_ACCESSOR_INFO_TYPE: case EXECUTABLE_ACCESSOR_INFO_TYPE: + case SHARED_FUNCTION_INFO_TYPE: case ACCESSOR_PAIR_TYPE: case FIXED_ARRAY_TYPE: + case FOREIGN_TYPE: + case CODE_TYPE: return kInternal & kTaggedPtr; default: UNREACHABLE(); @@ -255,179 +261,337 @@ } -// Get the largest bitset subsumed by this type. -template<class Config> -int TypeImpl<Config>::GlbBitset() { - if (this->IsBitset()) { - return this->AsBitset(); - } else if (this->IsUnion()) { - // All but the first are non-bitsets and thus would yield kNone anyway. - return Config::union_get(this->AsUnion(), 0)->GlbBitset(); - } else { - return kNone; - } -} - - -// Most precise _current_ type of a value (usually its class). -template<class Config> -typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::OfCurrently( - i::Handle<i::Object> value, Region* region) { - if (value->IsSmi() || - i::HeapObject::cast(*value)->map()->instance_type() == HEAP_NUMBER_TYPE || - i::HeapObject::cast(*value)->map()->instance_type() == ODDBALL_TYPE) { - return Of(value, region); - } - return Class(i::handle(i::HeapObject::cast(*value)->map()), region); -} - +// ----------------------------------------------------------------------------- +// Predicates. // Check this <= that. template<class Config> bool TypeImpl<Config>::SlowIs(TypeImpl* that) { + DisallowHeapAllocation no_allocation; + // Fast path for bitsets. if (this->IsNone()) return true; if (that->IsBitset()) { - return (this->LubBitset() | that->AsBitset()) == that->AsBitset(); + return BitsetType::Is(BitsetType::Lub(this), that->AsBitset()); } if (that->IsClass()) { - return this->IsClass() && *this->AsClass() == *that->AsClass(); + return this->IsClass() + && *this->AsClass()->Map() == *that->AsClass()->Map() + && ((Config::is_class(that) && Config::is_class(this)) || + BitsetType::New(this->BitsetLub())->Is( + BitsetType::New(that->BitsetLub()))); } if (that->IsConstant()) { - return this->IsConstant() && *this->AsConstant() == *that->AsConstant(); + return this->IsConstant() + && *this->AsConstant()->Value() == *that->AsConstant()->Value() + && this->AsConstant()->Bound()->Is(that->AsConstant()->Bound()); + } + if (that->IsRange()) { + return this->IsRange() + && this->AsRange()->Bound()->Is(that->AsRange()->Bound()) + && dle(that->AsRange()->Min(), this->AsRange()->Min()) + && dle(this->AsRange()->Max(), that->AsRange()->Max()); + } + if (that->IsContext()) { + return this->IsContext() + && this->AsContext()->Outer()->Equals(that->AsContext()->Outer()); + } + if (that->IsArray()) { + return this->IsArray() + && this->AsArray()->Element()->Equals(that->AsArray()->Element()); + } + if (that->IsFunction()) { + // We currently do not allow for any variance here, in order to keep + // Union and Intersect operations simple. + if (!this->IsFunction()) return false; + FunctionType* this_fun = this->AsFunction(); + FunctionType* that_fun = that->AsFunction(); + if (this_fun->Arity() != that_fun->Arity() || + !this_fun->Result()->Equals(that_fun->Result()) || + !that_fun->Receiver()->Equals(this_fun->Receiver())) { + return false; + } + for (int i = 0; i < this_fun->Arity(); ++i) { + if (!that_fun->Parameter(i)->Equals(this_fun->Parameter(i))) return false; + } + return true; } // (T1 \/ ... \/ Tn) <= T <=> (T1 <= T) /\ ... /\ (Tn <= T) if (this->IsUnion()) { - UnionedHandle unioned = this->AsUnion(); - for (int i = 0; i < Config::union_length(unioned); ++i) { - TypeHandle this_i = Config::union_get(unioned, i); - if (!this_i->Is(that)) return false; + UnionHandle unioned = handle(this->AsUnion()); + for (int i = 0; i < unioned->Length(); ++i) { + if (!unioned->Get(i)->Is(that)) return false; } return true; } // T <= (T1 \/ ... \/ Tn) <=> (T <= T1) \/ ... \/ (T <= Tn) // (iff T is not a union) - ASSERT(!this->IsUnion()); - if (that->IsUnion()) { - UnionedHandle unioned = that->AsUnion(); - for (int i = 0; i < Config::union_length(unioned); ++i) { - TypeHandle that_i = Config::union_get(unioned, i); - if (this->Is(that_i)) return true; - if (this->IsBitset()) break; // Fast fail, only first field is a bitset. - } - return false; + DCHECK(!this->IsUnion() && that->IsUnion()); + UnionHandle unioned = handle(that->AsUnion()); + for (int i = 0; i < unioned->Length(); ++i) { + if (this->Is(unioned->Get(i))) return true; + if (this->IsBitset()) break; // Fast fail, only first field is a bitset. } - return false; } template<class Config> -bool TypeImpl<Config>::IsCurrently(TypeImpl* that) { - return this->Is(that) || - (this->IsConstant() && that->IsClass() && - this->AsConstant()->IsHeapObject() && - i::HeapObject::cast(*this->AsConstant())->map() == *that->AsClass()); +bool TypeImpl<Config>::NowIs(TypeImpl* that) { + DisallowHeapAllocation no_allocation; + + // TODO(rossberg): this is incorrect for + // Union(Constant(V), T)->NowIs(Class(M)) + // but fuzzing does not cover that! + if (this->IsConstant()) { + i::Object* object = *this->AsConstant()->Value(); + if (object->IsHeapObject()) { + i::Map* map = i::HeapObject::cast(object)->map(); + for (Iterator<i::Map> it = that->Classes(); !it.Done(); it.Advance()) { + if (*it.Current() == map) return true; + } + } + } + return this->Is(that); +} + + +// Check if this contains only (currently) stable classes. +template<class Config> +bool TypeImpl<Config>::NowStable() { + DisallowHeapAllocation no_allocation; + for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) { + if (!it.Current()->is_stable()) return false; + } + return true; } // Check this overlaps that. template<class Config> bool TypeImpl<Config>::Maybe(TypeImpl* that) { - // Fast path for bitsets. - if (this->IsBitset()) { - return IsInhabited(this->AsBitset() & that->LubBitset()); - } - if (that->IsBitset()) { - return IsInhabited(this->LubBitset() & that->AsBitset()); - } + DisallowHeapAllocation no_allocation; // (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T) if (this->IsUnion()) { - UnionedHandle unioned = this->AsUnion(); - for (int i = 0; i < Config::union_length(unioned); ++i) { - TypeHandle this_i = Config::union_get(unioned, i); - if (this_i->Maybe(that)) return true; + UnionHandle unioned = handle(this->AsUnion()); + for (int i = 0; i < unioned->Length(); ++i) { + if (unioned->Get(i)->Maybe(that)) return true; } return false; } // T overlaps (T1 \/ ... \/ Tn) <=> (T overlaps T1) \/ ... \/ (T overlaps Tn) if (that->IsUnion()) { - UnionedHandle unioned = that->AsUnion(); - for (int i = 0; i < Config::union_length(unioned); ++i) { - TypeHandle that_i = Config::union_get(unioned, i); - if (this->Maybe(that_i)) return true; + UnionHandle unioned = handle(that->AsUnion()); + for (int i = 0; i < unioned->Length(); ++i) { + if (this->Maybe(unioned->Get(i))) return true; } return false; } - ASSERT(!this->IsUnion() && !that->IsUnion()); + DCHECK(!this->IsUnion() && !that->IsUnion()); + if (this->IsBitset() || that->IsBitset()) { + return BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()); + } if (this->IsClass()) { - return that->IsClass() && *this->AsClass() == *that->AsClass(); + return that->IsClass() + && *this->AsClass()->Map() == *that->AsClass()->Map(); } if (this->IsConstant()) { - return that->IsConstant() && *this->AsConstant() == *that->AsConstant(); + return that->IsConstant() + && *this->AsConstant()->Value() == *that->AsConstant()->Value(); + } + if (this->IsContext()) { + return this->Equals(that); + } + if (this->IsArray()) { + // There is no variance! + return this->Equals(that); + } + if (this->IsFunction()) { + // There is no variance! + return this->Equals(that); } return false; } +// Check if value is contained in (inhabits) type. +template<class Config> +bool TypeImpl<Config>::Contains(i::Object* value) { + DisallowHeapAllocation no_allocation; + if (this->IsRange()) { + return value->IsNumber() && + dle(this->AsRange()->Min(), value->Number()) && + dle(value->Number(), this->AsRange()->Max()) && + BitsetType::Is(BitsetType::Lub(value), this->BitsetLub()); + } + for (Iterator<i::Object> it = this->Constants(); !it.Done(); it.Advance()) { + if (*it.Current() == value) return true; + } + return BitsetType::New(BitsetType::Lub(value))->Is(this); +} + + +template<class Config> +bool TypeImpl<Config>::UnionType::Wellformed() { + DCHECK(this->Length() >= 2); + for (int i = 0; i < this->Length(); ++i) { + DCHECK(!this->Get(i)->IsUnion()); + if (i > 0) DCHECK(!this->Get(i)->IsBitset()); + for (int j = 0; j < this->Length(); ++j) { + if (i != j) DCHECK(!this->Get(i)->Is(this->Get(j))); + } + } + return true; +} + + +// ----------------------------------------------------------------------------- +// Union and intersection + +template<class Config> +typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Rebound( + int bitset, Region* region) { + TypeHandle bound = BitsetType::New(bitset, region); + if (this->IsClass()) { + return ClassType::New(this->AsClass()->Map(), bound, region); + } else if (this->IsConstant()) { + return ConstantType::New(this->AsConstant()->Value(), bound, region); + } else if (this->IsRange()) { + return RangeType::New( + this->AsRange()->Min(), this->AsRange()->Max(), bound, region); + } else if (this->IsContext()) { + return ContextType::New(this->AsContext()->Outer(), bound, region); + } else if (this->IsArray()) { + return ArrayType::New(this->AsArray()->Element(), bound, region); + } else if (this->IsFunction()) { + FunctionHandle function = Config::handle(this->AsFunction()); + int arity = function->Arity(); + FunctionHandle type = FunctionType::New( + function->Result(), function->Receiver(), bound, arity, region); + for (int i = 0; i < arity; ++i) { + type->InitParameter(i, function->Parameter(i)); + } + return type; + } + UNREACHABLE(); + return TypeHandle(); +} + + template<class Config> -bool TypeImpl<Config>::InUnion(UnionedHandle unioned, int current_size) { - ASSERT(!this->IsUnion()); +int TypeImpl<Config>::BoundBy(TypeImpl* that) { + DCHECK(!this->IsUnion()); + if (that->IsUnion()) { + UnionType* unioned = that->AsUnion(); + int length = unioned->Length(); + int bitset = BitsetType::kNone; + for (int i = 0; i < length; ++i) { + bitset |= BoundBy(unioned->Get(i)->unhandle()); + } + return bitset; + } else if (that->IsClass() && this->IsClass() && + *this->AsClass()->Map() == *that->AsClass()->Map()) { + return that->BitsetLub(); + } else if (that->IsConstant() && this->IsConstant() && + *this->AsConstant()->Value() == *that->AsConstant()->Value()) { + return that->AsConstant()->Bound()->AsBitset(); + } else if (that->IsContext() && this->IsContext() && this->Is(that)) { + return that->AsContext()->Bound()->AsBitset(); + } else if (that->IsArray() && this->IsArray() && this->Is(that)) { + return that->AsArray()->Bound()->AsBitset(); + } else if (that->IsFunction() && this->IsFunction() && this->Is(that)) { + return that->AsFunction()->Bound()->AsBitset(); + } + return that->BitsetGlb(); +} + + +template<class Config> +int TypeImpl<Config>::IndexInUnion( + int bound, UnionHandle unioned, int current_size) { + DCHECK(!this->IsUnion()); for (int i = 0; i < current_size; ++i) { - TypeHandle type = Config::union_get(unioned, i); - if (this->Is(type)) return true; + TypeHandle that = unioned->Get(i); + if (that->IsBitset()) { + if (BitsetType::Is(bound, that->AsBitset())) return i; + } else if (that->IsClass() && this->IsClass()) { + if (*this->AsClass()->Map() == *that->AsClass()->Map()) return i; + } else if (that->IsConstant() && this->IsConstant()) { + if (*this->AsConstant()->Value() == *that->AsConstant()->Value()) + return i; + } else if (that->IsContext() && this->IsContext()) { + if (this->Is(that)) return i; + } else if (that->IsArray() && this->IsArray()) { + if (this->Is(that)) return i; + } else if (that->IsFunction() && this->IsFunction()) { + if (this->Is(that)) return i; + } } - return false; + return -1; } -// Get non-bitsets from this which are not subsumed by union, store at unioned, -// starting at index. Returns updated index. +// Get non-bitsets from type, bounded by upper. +// Store at result starting at index. Returns updated index. template<class Config> int TypeImpl<Config>::ExtendUnion( - UnionedHandle result, TypeHandle type, int current_size) { - int old_size = current_size; - if (type->IsClass() || type->IsConstant()) { - if (!type->InUnion(result, old_size)) { - Config::union_set(result, current_size++, type); + UnionHandle result, int size, TypeHandle type, + TypeHandle other, bool is_intersect, Region* region) { + if (type->IsUnion()) { + UnionHandle unioned = handle(type->AsUnion()); + for (int i = 0; i < unioned->Length(); ++i) { + TypeHandle type_i = unioned->Get(i); + DCHECK(i == 0 || !(type_i->IsBitset() || type_i->Is(unioned->Get(0)))); + if (!type_i->IsBitset()) { + size = ExtendUnion(result, size, type_i, other, is_intersect, region); + } } - } else if (type->IsUnion()) { - UnionedHandle unioned = type->AsUnion(); - for (int i = 0; i < Config::union_length(unioned); ++i) { - TypeHandle type = Config::union_get(unioned, i); - ASSERT(i == 0 || - !(type->IsBitset() || type->Is(Config::union_get(unioned, 0)))); - if (!type->IsBitset() && !type->InUnion(result, old_size)) { - Config::union_set(result, current_size++, type); + } else if (!type->IsBitset()) { + DCHECK(type->IsClass() || type->IsConstant() || type->IsRange() || + type->IsContext() || type->IsArray() || type->IsFunction()); + int inherent_bound = type->InherentBitsetLub(); + int old_bound = type->BitsetLub(); + int other_bound = type->BoundBy(other->unhandle()) & inherent_bound; + int new_bound = + is_intersect ? (old_bound & other_bound) : (old_bound | other_bound); + if (new_bound != BitsetType::kNone) { + int i = type->IndexInUnion(new_bound, result, size); + if (i == -1) { + i = size++; + } else if (result->Get(i)->IsBitset()) { + return size; // Already fully subsumed. + } else { + int type_i_bound = result->Get(i)->BitsetLub(); + new_bound |= type_i_bound; + if (new_bound == type_i_bound) return size; } + if (new_bound != old_bound) type = type->Rebound(new_bound, region); + result->Set(i, type); } } - return current_size; + return size; } -// Union is O(1) on simple bit unions, but O(n*m) on structured unions. -// TODO(rossberg): Should we use object sets somehow? Is it worth it? +// Union is O(1) on simple bitsets, but O(n*m) on structured unions. template<class Config> typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union( TypeHandle type1, TypeHandle type2, Region* region) { // Fast case: bit sets. if (type1->IsBitset() && type2->IsBitset()) { - return Config::from_bitset(type1->AsBitset() | type2->AsBitset(), region); + return BitsetType::New(type1->AsBitset() | type2->AsBitset(), region); } // Fast case: top or bottom types. - if (type1->IsAny()) return type1; - if (type2->IsAny()) return type2; - if (type1->IsNone()) return type2; - if (type2->IsNone()) return type1; + if (type1->IsAny() || type2->IsNone()) return type1; + if (type2->IsAny() || type1->IsNone()) return type2; // Semi-fast case: Unioned objects are neither involved nor produced. if (!(type1->IsUnion() || type2->IsUnion())) { @@ -436,74 +600,47 @@ } // Slow case: may need to produce a Unioned object. - int size = type1->IsBitset() || type2->IsBitset() ? 1 : 0; + int size = 0; if (!type1->IsBitset()) { - size += (type1->IsUnion() ? Config::union_length(type1->AsUnion()) : 1); + size += (type1->IsUnion() ? type1->AsUnion()->Length() : 1); } if (!type2->IsBitset()) { - size += (type2->IsUnion() ? Config::union_length(type2->AsUnion()) : 1); + size += (type2->IsUnion() ? type2->AsUnion()->Length() : 1); } - ASSERT(size >= 2); - UnionedHandle unioned = Config::union_create(size, region); - size = 0; + int bitset = type1->BitsetGlb() | type2->BitsetGlb(); + if (bitset != BitsetType::kNone) ++size; + DCHECK(size >= 1); - int bitset = type1->GlbBitset() | type2->GlbBitset(); - if (bitset != kNone) { - Config::union_set(unioned, size++, Config::from_bitset(bitset, region)); + UnionHandle unioned = UnionType::New(size, region); + size = 0; + if (bitset != BitsetType::kNone) { + unioned->Set(size++, BitsetType::New(bitset, region)); } - size = ExtendUnion(unioned, type1, size); - size = ExtendUnion(unioned, type2, size); + size = ExtendUnion(unioned, size, type1, type2, false, region); + size = ExtendUnion(unioned, size, type2, type1, false, region); if (size == 1) { - return Config::union_get(unioned, 0); + return unioned->Get(0); } else { - Config::union_shrink(unioned, size); - return Config::from_union(unioned); + unioned->Shrink(size); + DCHECK(unioned->Wellformed()); + return unioned; } } -// Get non-bitsets from type which are also in other, store at unioned, -// starting at index. Returns updated index. -template<class Config> -int TypeImpl<Config>::ExtendIntersection( - UnionedHandle result, TypeHandle type, TypeHandle other, int current_size) { - int old_size = current_size; - if (type->IsClass() || type->IsConstant()) { - if (type->Is(other) && !type->InUnion(result, old_size)) { - Config::union_set(result, current_size++, type); - } - } else if (type->IsUnion()) { - UnionedHandle unioned = type->AsUnion(); - for (int i = 0; i < Config::union_length(unioned); ++i) { - TypeHandle type = Config::union_get(unioned, i); - ASSERT(i == 0 || - !(type->IsBitset() || type->Is(Config::union_get(unioned, 0)))); - if (!type->IsBitset() && type->Is(other) && - !type->InUnion(result, old_size)) { - Config::union_set(result, current_size++, type); - } - } - } - return current_size; -} - - -// Intersection is O(1) on simple bit unions, but O(n*m) on structured unions. -// TODO(rossberg): Should we use object sets somehow? Is it worth it? +// Intersection is O(1) on simple bitsets, but O(n*m) on structured unions. template<class Config> typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect( TypeHandle type1, TypeHandle type2, Region* region) { // Fast case: bit sets. if (type1->IsBitset() && type2->IsBitset()) { - return Config::from_bitset(type1->AsBitset() & type2->AsBitset(), region); + return BitsetType::New(type1->AsBitset() & type2->AsBitset(), region); } // Fast case: top or bottom types. - if (type1->IsNone()) return type1; - if (type2->IsNone()) return type2; - if (type1->IsAny()) return type2; - if (type2->IsAny()) return type1; + if (type1->IsNone() || type2->IsAny()) return type1; + if (type2->IsNone() || type1->IsAny()) return type2; // Semi-fast case: Unioned objects are neither involved nor produced. if (!(type1->IsUnion() || type2->IsUnion())) { @@ -514,90 +651,208 @@ // Slow case: may need to produce a Unioned object. int size = 0; if (!type1->IsBitset()) { - size = (type1->IsUnion() ? Config::union_length(type1->AsUnion()) : 2); + size += (type1->IsUnion() ? type1->AsUnion()->Length() : 1); } if (!type2->IsBitset()) { - int size2 = (type2->IsUnion() ? Config::union_length(type2->AsUnion()) : 2); - size = (size == 0 ? size2 : Min(size, size2)); + size += (type2->IsUnion() ? type2->AsUnion()->Length() : 1); } - ASSERT(size >= 2); - UnionedHandle unioned = Config::union_create(size, region); - size = 0; + int bitset = type1->BitsetGlb() & type2->BitsetGlb(); + if (bitset != BitsetType::kNone) ++size; + DCHECK(size >= 1); - int bitset = type1->GlbBitset() & type2->GlbBitset(); - if (bitset != kNone) { - Config::union_set(unioned, size++, Config::from_bitset(bitset, region)); + UnionHandle unioned = UnionType::New(size, region); + size = 0; + if (bitset != BitsetType::kNone) { + unioned->Set(size++, BitsetType::New(bitset, region)); } - size = ExtendIntersection(unioned, type1, type2, size); - size = ExtendIntersection(unioned, type2, type1, size); + size = ExtendUnion(unioned, size, type1, type2, true, region); + size = ExtendUnion(unioned, size, type2, type1, true, region); if (size == 0) { return None(region); } else if (size == 1) { - return Config::union_get(unioned, 0); + return unioned->Get(0); } else { - Config::union_shrink(unioned, size); - return Config::from_union(unioned); + unioned->Shrink(size); + DCHECK(unioned->Wellformed()); + return unioned; } } +// ----------------------------------------------------------------------------- +// Iteration. + template<class Config> -template<class OtherType> -typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert( - typename OtherType::TypeHandle type, Region* region) { - if (type->IsBitset()) { - return Config::from_bitset(type->AsBitset(), region); - } else if (type->IsClass()) { - return Config::from_class(type->AsClass(), type->LubBitset(), region); - } else if (type->IsConstant()) { - return Config::from_constant(type->AsConstant(), type->LubBitset(), region); +int TypeImpl<Config>::NumClasses() { + DisallowHeapAllocation no_allocation; + if (this->IsClass()) { + return 1; + } else if (this->IsUnion()) { + UnionHandle unioned = handle(this->AsUnion()); + int result = 0; + for (int i = 0; i < unioned->Length(); ++i) { + if (unioned->Get(i)->IsClass()) ++result; + } + return result; } else { - ASSERT(type->IsUnion()); - typename OtherType::UnionedHandle unioned = type->AsUnion(); - int length = OtherType::UnionLength(unioned); - UnionedHandle new_unioned = Config::union_create(length, region); - for (int i = 0; i < length; ++i) { - Config::union_set(new_unioned, i, - Convert<OtherType>(OtherType::UnionGet(unioned, i), region)); + return 0; + } +} + + +template<class Config> +int TypeImpl<Config>::NumConstants() { + DisallowHeapAllocation no_allocation; + if (this->IsConstant()) { + return 1; + } else if (this->IsUnion()) { + UnionHandle unioned = handle(this->AsUnion()); + int result = 0; + for (int i = 0; i < unioned->Length(); ++i) { + if (unioned->Get(i)->IsConstant()) ++result; } - return Config::from_union(new_unioned); + return result; + } else { + return 0; + } +} + + +template<class Config> template<class T> +typename TypeImpl<Config>::TypeHandle +TypeImpl<Config>::Iterator<T>::get_type() { + DCHECK(!Done()); + return type_->IsUnion() ? type_->AsUnion()->Get(index_) : type_; +} + + +// C++ cannot specialise nested templates, so we have to go through this +// contortion with an auxiliary template to simulate it. +template<class Config, class T> +struct TypeImplIteratorAux { + static bool matches(typename TypeImpl<Config>::TypeHandle type); + static i::Handle<T> current(typename TypeImpl<Config>::TypeHandle type); +}; + +template<class Config> +struct TypeImplIteratorAux<Config, i::Map> { + static bool matches(typename TypeImpl<Config>::TypeHandle type) { + return type->IsClass(); + } + static i::Handle<i::Map> current(typename TypeImpl<Config>::TypeHandle type) { + return type->AsClass()->Map(); + } +}; + +template<class Config> +struct TypeImplIteratorAux<Config, i::Object> { + static bool matches(typename TypeImpl<Config>::TypeHandle type) { + return type->IsConstant(); + } + static i::Handle<i::Object> current( + typename TypeImpl<Config>::TypeHandle type) { + return type->AsConstant()->Value(); } +}; + +template<class Config> template<class T> +bool TypeImpl<Config>::Iterator<T>::matches(TypeHandle type) { + return TypeImplIteratorAux<Config, T>::matches(type); +} + +template<class Config> template<class T> +i::Handle<T> TypeImpl<Config>::Iterator<T>::Current() { + return TypeImplIteratorAux<Config, T>::current(get_type()); } -// TODO(rossberg): this does not belong here. -Representation Representation::FromType(Type* type) { - if (type->Is(Type::None())) return Representation::None(); - if (type->Is(Type::SignedSmall())) return Representation::Smi(); - if (type->Is(Type::Signed32())) return Representation::Integer32(); - if (type->Is(Type::Number())) return Representation::Double(); - return Representation::Tagged(); +template<class Config> template<class T> +void TypeImpl<Config>::Iterator<T>::Advance() { + DisallowHeapAllocation no_allocation; + ++index_; + if (type_->IsUnion()) { + UnionHandle unioned = handle(type_->AsUnion()); + for (; index_ < unioned->Length(); ++index_) { + if (matches(unioned->Get(index_))) return; + } + } else if (index_ == 0 && matches(type_)) { + return; + } + index_ = -1; } -#ifdef OBJECT_PRINT +// ----------------------------------------------------------------------------- +// Conversion between low-level representations. + template<class Config> -void TypeImpl<Config>::TypePrint(PrintDimension dim) { - TypePrint(stdout, dim); - PrintF(stdout, "\n"); - Flush(stdout); +template<class OtherType> +typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert( + typename OtherType::TypeHandle type, Region* region) { + if (type->IsBitset()) { + return BitsetType::New(type->AsBitset(), region); + } else if (type->IsClass()) { + TypeHandle bound = BitsetType::New(type->BitsetLub(), region); + return ClassType::New(type->AsClass()->Map(), bound, region); + } else if (type->IsConstant()) { + TypeHandle bound = Convert<OtherType>(type->AsConstant()->Bound(), region); + return ConstantType::New(type->AsConstant()->Value(), bound, region); + } else if (type->IsRange()) { + TypeHandle bound = Convert<OtherType>(type->AsRange()->Bound(), region); + return RangeType::New( + type->AsRange()->Min(), type->AsRange()->Max(), bound, region); + } else if (type->IsContext()) { + TypeHandle bound = Convert<OtherType>(type->AsContext()->Bound(), region); + TypeHandle outer = Convert<OtherType>(type->AsContext()->Outer(), region); + return ContextType::New(outer, bound, region); + } else if (type->IsUnion()) { + int length = type->AsUnion()->Length(); + UnionHandle unioned = UnionType::New(length, region); + for (int i = 0; i < length; ++i) { + TypeHandle t = Convert<OtherType>(type->AsUnion()->Get(i), region); + unioned->Set(i, t); + } + return unioned; + } else if (type->IsArray()) { + TypeHandle element = Convert<OtherType>(type->AsArray()->Element(), region); + TypeHandle bound = Convert<OtherType>(type->AsArray()->Bound(), region); + return ArrayType::New(element, bound, region); + } else if (type->IsFunction()) { + TypeHandle res = Convert<OtherType>(type->AsFunction()->Result(), region); + TypeHandle rcv = Convert<OtherType>(type->AsFunction()->Receiver(), region); + TypeHandle bound = Convert<OtherType>(type->AsFunction()->Bound(), region); + FunctionHandle function = FunctionType::New( + res, rcv, bound, type->AsFunction()->Arity(), region); + for (int i = 0; i < function->Arity(); ++i) { + TypeHandle param = Convert<OtherType>( + type->AsFunction()->Parameter(i), region); + function->InitParameter(i, param); + } + return function; + } else { + UNREACHABLE(); + return None(region); + } } +// ----------------------------------------------------------------------------- +// Printing. + template<class Config> -const char* TypeImpl<Config>::bitset_name(int bitset) { +const char* TypeImpl<Config>::BitsetType::Name(int bitset) { switch (bitset) { - case kAny & kRepresentation: return "Any"; - #define PRINT_COMPOSED_TYPE(type, value) \ - case k##type & kRepresentation: return #type; - REPRESENTATION_BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE) - #undef PRINT_COMPOSED_TYPE - - #define PRINT_COMPOSED_TYPE(type, value) \ - case k##type & kSemantic: return #type; - SEMANTIC_BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE) - #undef PRINT_COMPOSED_TYPE + case REPRESENTATION(kAny): return "Any"; + #define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \ + case REPRESENTATION(k##type): return #type; + REPRESENTATION_BITSET_TYPE_LIST(RETURN_NAMED_REPRESENTATION_TYPE) + #undef RETURN_NAMED_REPRESENTATION_TYPE + + #define RETURN_NAMED_SEMANTIC_TYPE(type, value) \ + case SEMANTIC(k##type): return #type; + SEMANTIC_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE) + #undef RETURN_NAMED_SEMANTIC_TYPE default: return NULL; @@ -605,78 +860,115 @@ } -template<class Config> -void TypeImpl<Config>::BitsetTypePrint(FILE* out, int bitset) { - const char* name = bitset_name(bitset); +template <class Config> +void TypeImpl<Config>::BitsetType::Print(OStream& os, // NOLINT + int bitset) { + DisallowHeapAllocation no_allocation; + const char* name = Name(bitset); if (name != NULL) { - PrintF(out, "%s", name); - } else { - static const int named_bitsets[] = { - #define BITSET_CONSTANT(type, value) k##type & kRepresentation, + os << name; + return; + } + + static const int named_bitsets[] = { +#define BITSET_CONSTANT(type, value) REPRESENTATION(k##type), REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT) - #undef BITSET_CONSTANT +#undef BITSET_CONSTANT - #define BITSET_CONSTANT(type, value) k##type & kSemantic, +#define BITSET_CONSTANT(type, value) SEMANTIC(k##type), SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT) - #undef BITSET_CONSTANT - }; +#undef BITSET_CONSTANT + }; + + bool is_first = true; + os << "("; + for (int i(ARRAY_SIZE(named_bitsets) - 1); bitset != 0 && i >= 0; --i) { + int subset = named_bitsets[i]; + if ((bitset & subset) == subset) { + if (!is_first) os << " | "; + is_first = false; + os << Name(subset); + bitset -= subset; + } + } + DCHECK(bitset == 0); + os << ")"; +} + - bool is_first = true; - PrintF(out, "("); - for (int i(ARRAY_SIZE(named_bitsets) - 1); bitset != 0 && i >= 0; --i) { - int subset = named_bitsets[i]; - if ((bitset & subset) == subset) { - if (!is_first) PrintF(out, " | "); - is_first = false; - PrintF(out, "%s", bitset_name(subset)); - bitset -= subset; +template <class Config> +void TypeImpl<Config>::PrintTo(OStream& os, PrintDimension dim) { // NOLINT + DisallowHeapAllocation no_allocation; + if (dim != REPRESENTATION_DIM) { + if (this->IsBitset()) { + BitsetType::Print(os, SEMANTIC(this->AsBitset())); + } else if (this->IsClass()) { + os << "Class(" << static_cast<void*>(*this->AsClass()->Map()) << " < "; + BitsetType::New(BitsetType::Lub(this))->PrintTo(os, dim); + os << ")"; + } else if (this->IsConstant()) { + os << "Constant(" << static_cast<void*>(*this->AsConstant()->Value()) + << " : "; + BitsetType::New(BitsetType::Lub(this))->PrintTo(os, dim); + os << ")"; + } else if (this->IsRange()) { + os << "Range(" << this->AsRange()->Min() + << ".." << this->AsRange()->Max() << " : "; + BitsetType::New(BitsetType::Lub(this))->PrintTo(os, dim); + os << ")"; + } else if (this->IsContext()) { + os << "Context("; + this->AsContext()->Outer()->PrintTo(os, dim); + os << ")"; + } else if (this->IsUnion()) { + os << "("; + UnionHandle unioned = handle(this->AsUnion()); + for (int i = 0; i < unioned->Length(); ++i) { + TypeHandle type_i = unioned->Get(i); + if (i > 0) os << " | "; + type_i->PrintTo(os, dim); + } + os << ")"; + } else if (this->IsArray()) { + os << "Array("; + AsArray()->Element()->PrintTo(os, dim); + os << ")"; + } else if (this->IsFunction()) { + if (!this->AsFunction()->Receiver()->IsAny()) { + this->AsFunction()->Receiver()->PrintTo(os, dim); + os << "."; } + os << "("; + for (int i = 0; i < this->AsFunction()->Arity(); ++i) { + if (i > 0) os << ", "; + this->AsFunction()->Parameter(i)->PrintTo(os, dim); + } + os << ")->"; + this->AsFunction()->Result()->PrintTo(os, dim); + } else { + UNREACHABLE(); } - ASSERT(bitset == 0); - PrintF(out, ")"); + } + if (dim == BOTH_DIMS) os << "/"; + if (dim != SEMANTIC_DIM) { + BitsetType::Print(os, REPRESENTATION(this->BitsetLub())); } } -template<class Config> -void TypeImpl<Config>::TypePrint(FILE* out, PrintDimension dim) { - if (this->IsBitset()) { - int bitset = this->AsBitset(); - switch (dim) { - case BOTH_DIMS: - BitsetTypePrint(out, bitset & kSemantic); - PrintF("/"); - BitsetTypePrint(out, bitset & kRepresentation); - break; - case SEMANTIC_DIM: - BitsetTypePrint(out, bitset & kSemantic); - break; - case REPRESENTATION_DIM: - BitsetTypePrint(out, bitset & kRepresentation); - break; - } - } else if (this->IsConstant()) { - PrintF(out, "Constant(%p : ", static_cast<void*>(*this->AsConstant())); - Config::from_bitset(this->LubBitset())->TypePrint(out); - PrintF(")"); - } else if (this->IsClass()) { - PrintF(out, "Class(%p < ", static_cast<void*>(*this->AsClass())); - Config::from_bitset(this->LubBitset())->TypePrint(out); - PrintF(")"); - } else if (this->IsUnion()) { - PrintF(out, "("); - UnionedHandle unioned = this->AsUnion(); - for (int i = 0; i < Config::union_length(unioned); ++i) { - TypeHandle type_i = Config::union_get(unioned, i); - if (i > 0) PrintF(out, " | "); - type_i->TypePrint(out); - } - PrintF(out, ")"); - } +#ifdef DEBUG +template <class Config> +void TypeImpl<Config>::Print() { + OFStream os(stdout); + PrintTo(os); + os << endl; } #endif +// ----------------------------------------------------------------------------- +// Instantiations. + template class TypeImpl<ZoneTypeConfig>; template class TypeImpl<ZoneTypeConfig>::Iterator<i::Map>; template class TypeImpl<ZoneTypeConfig>::Iterator<i::Object>; @@ -692,5 +984,4 @@ TypeImpl<HeapTypeConfig>::Convert<Type>( TypeImpl<ZoneTypeConfig>::TypeHandle, TypeImpl<HeapTypeConfig>::Region*); - } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/types.h nodejs-0.11.15/deps/v8/src/types.h --- nodejs-0.11.13/deps/v8/src/types.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/types.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,41 +1,19 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_TYPES_H_ #define V8_TYPES_H_ -#include "v8.h" - -#include "objects.h" +#include "src/factory.h" +#include "src/handles.h" +#include "src/ostreams.h" namespace v8 { namespace internal { - +// SUMMARY +// // A simple type system for compiler-internal use. It is based entirely on // union types, and all subtyping hence amounts to set inclusion. Besides the // obvious primitive types and some predefined unions, the type language also @@ -45,12 +23,13 @@ // Types consist of two dimensions: semantic (value range) and representation. // Both are related through subtyping. // +// SEMANTIC DIMENSION +// // The following equations and inequations hold for the semantic axis: // // None <= T // T <= Any // -// Oddball = Boolean \/ Null \/ Undefined // Number = Signed32 \/ Unsigned32 \/ Double // Smi <= Signed32 // Name = String \/ Symbol @@ -66,22 +45,35 @@ // // Class(map) < T iff instance_type(map) < T // Constant(x) < T iff instance_type(map(x)) < T -// -// Note that Constant(x) < Class(map(x)) does _not_ hold, since x's map can +// Array(T) < Array +// Function(R, S, T0, T1, ...) < Function +// Context(T) < Internal +// +// Both structural Array and Function types are invariant in all parameters; +// relaxing this would make Union and Intersect operations more involved. +// There is no subtyping relation between Array, Function, or Context types +// and respective Constant types, since these types cannot be reconstructed +// for arbitrary heap values. +// Note also that Constant(x) < Class(map(x)) does _not_ hold, since x's map can // change! (Its instance type cannot, however.) // TODO(rossberg): the latter is not currently true for proxies, because of fix, // but will hold once we implement direct proxies. +// However, we also define a 'temporal' variant of the subtyping relation that +// considers the _current_ state only, i.e., Constant(x) <_now Class(map(x)). +// +// REPRESENTATIONAL DIMENSION // // For the representation axis, the following holds: // // None <= R // R <= Any // -// UntaggedInt <= UntaggedInt8 \/ UntaggedInt16 \/ UntaggedInt32) -// UntaggedFloat <= UntaggedFloat32 \/ UntaggedFloat64 -// UntaggedNumber <= UntaggedInt \/ UntaggedFloat -// Untagged <= UntaggedNumber \/ UntaggedPtr -// Tagged <= TaggedInt \/ TaggedPtr +// UntaggedInt = UntaggedInt1 \/ UntaggedInt8 \/ +// UntaggedInt16 \/ UntaggedInt32 +// UntaggedFloat = UntaggedFloat32 \/ UntaggedFloat64 +// UntaggedNumber = UntaggedInt \/ UntaggedFloat +// Untagged = UntaggedNumber \/ UntaggedPtr +// Tagged = TaggedInt \/ TaggedPtr // // Subtyping relates the two dimensions, for example: // @@ -96,6 +88,8 @@ // SignedSmall /\ TaggedInt (a 'smi') // Number /\ TaggedPtr (a heap number) // +// PREDICATES +// // There are two main functions for testing types: // // T1->Is(T2) -- tests whether T1 is included in T2 (i.e., T1 <= T2) @@ -109,7 +103,23 @@ // lattice. That is intentional. It should always be possible to refine the // lattice (e.g., splitting up number types further) without invalidating any // existing assumptions or tests. -// Consequently, do not use pointer equality for type tests, always use Is! +// Consequently, do not normally use Equals for type tests, always use Is! +// +// The NowIs operator implements state-sensitive subtying, as described above. +// Any compilation decision based on such temporary properties requires runtime +// guarding! +// +// PROPERTIES +// +// Various formal properties hold for constructors, operators, and predicates +// over types. For example, constructors are injective, subtyping is a complete +// partial order, union and intersection satisfy the usual algebraic properties. +// +// See test/cctest/test-types.cc for a comprehensive executable specification, +// especially with respect to the properties of the more exotic 'temporal' +// constructors and predicates (those prefixed 'Now'). +// +// IMPLEMENTATION // // Internally, all 'primitive' types, and their unions, are represented as // bitsets. Class is a heap pointer to the respective map. Only Constant's, or @@ -125,15 +135,19 @@ // them. For zone types, no query method touches the heap, only constructors do. +// ----------------------------------------------------------------------------- +// Values for bitset types + #define MASK_BITSET_TYPE_LIST(V) \ - V(Representation, static_cast<int>(0xff800000)) \ - V(Semantic, static_cast<int>(0x007fffff)) + V(Representation, static_cast<int>(0xffc00000)) \ + V(Semantic, static_cast<int>(0x003fffff)) -#define REPRESENTATION(k) ((k) & kRepresentation) -#define SEMANTIC(k) ((k) & kSemantic) +#define REPRESENTATION(k) ((k) & BitsetType::kRepresentation) +#define SEMANTIC(k) ((k) & BitsetType::kSemantic) #define REPRESENTATION_BITSET_TYPE_LIST(V) \ V(None, 0) \ + V(UntaggedInt1, 1 << 22 | kSemantic) \ V(UntaggedInt8, 1 << 23 | kSemantic) \ V(UntaggedInt16, 1 << 24 | kSemantic) \ V(UntaggedInt32, 1 << 25 | kSemantic) \ @@ -141,47 +155,58 @@ V(UntaggedFloat64, 1 << 27 | kSemantic) \ V(UntaggedPtr, 1 << 28 | kSemantic) \ V(TaggedInt, 1 << 29 | kSemantic) \ - V(TaggedPtr, -1 << 30 | kSemantic) /* MSB has to be sign-extended */ \ + /* MSB has to be sign-extended */ \ + V(TaggedPtr, static_cast<int>(~0u << 30) | kSemantic) \ \ - V(UntaggedInt, kUntaggedInt8 | kUntaggedInt16 | kUntaggedInt32) \ - V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \ - V(UntaggedNumber, kUntaggedInt | kUntaggedFloat) \ - V(Untagged, kUntaggedNumber | kUntaggedPtr) \ + V(UntaggedInt, kUntaggedInt1 | kUntaggedInt8 | \ + kUntaggedInt16 | kUntaggedInt32) \ + V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \ + V(UntaggedNumber, kUntaggedInt | kUntaggedFloat) \ + V(Untagged, kUntaggedNumber | kUntaggedPtr) \ V(Tagged, kTaggedInt | kTaggedPtr) #define SEMANTIC_BITSET_TYPE_LIST(V) \ V(Null, 1 << 0 | REPRESENTATION(kTaggedPtr)) \ V(Undefined, 1 << 1 | REPRESENTATION(kTaggedPtr)) \ V(Boolean, 1 << 2 | REPRESENTATION(kTaggedPtr)) \ - V(SignedSmall, 1 << 3 | REPRESENTATION(kTagged | kUntaggedNumber)) \ - V(OtherSigned32, 1 << 4 | REPRESENTATION(kTagged | kUntaggedNumber)) \ - V(Unsigned32, 1 << 5 | REPRESENTATION(kTagged | kUntaggedNumber)) \ - V(Float, 1 << 6 | REPRESENTATION(kTagged | kUntaggedNumber)) \ - V(Symbol, 1 << 7 | REPRESENTATION(kTaggedPtr)) \ - V(InternalizedString, 1 << 8 | REPRESENTATION(kTaggedPtr)) \ - V(OtherString, 1 << 9 | REPRESENTATION(kTaggedPtr)) \ - V(Undetectable, 1 << 10 | REPRESENTATION(kTaggedPtr)) \ - V(Array, 1 << 11 | REPRESENTATION(kTaggedPtr)) \ - V(Function, 1 << 12 | REPRESENTATION(kTaggedPtr)) \ - V(RegExp, 1 << 13 | REPRESENTATION(kTaggedPtr)) \ - V(OtherObject, 1 << 14 | REPRESENTATION(kTaggedPtr)) \ - V(Proxy, 1 << 15 | REPRESENTATION(kTaggedPtr)) \ - V(Internal, 1 << 16 | REPRESENTATION(kTagged | kUntagged)) \ + V(UnsignedSmall, 1 << 3 | REPRESENTATION(kTagged | kUntaggedNumber)) \ + V(OtherSignedSmall, 1 << 4 | REPRESENTATION(kTagged | kUntaggedNumber)) \ + V(OtherUnsigned31, 1 << 5 | REPRESENTATION(kTagged | kUntaggedNumber)) \ + V(OtherUnsigned32, 1 << 6 | REPRESENTATION(kTagged | kUntaggedNumber)) \ + V(OtherSigned32, 1 << 7 | REPRESENTATION(kTagged | kUntaggedNumber)) \ + V(MinusZero, 1 << 8 | REPRESENTATION(kTagged | kUntaggedNumber)) \ + V(NaN, 1 << 9 | REPRESENTATION(kTagged | kUntaggedNumber)) \ + V(OtherNumber, 1 << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \ + V(Symbol, 1 << 11 | REPRESENTATION(kTaggedPtr)) \ + V(InternalizedString, 1 << 12 | REPRESENTATION(kTaggedPtr)) \ + V(OtherString, 1 << 13 | REPRESENTATION(kTaggedPtr)) \ + V(Undetectable, 1 << 14 | REPRESENTATION(kTaggedPtr)) \ + V(Array, 1 << 15 | REPRESENTATION(kTaggedPtr)) \ + V(Buffer, 1 << 16 | REPRESENTATION(kTaggedPtr)) \ + V(Function, 1 << 17 | REPRESENTATION(kTaggedPtr)) \ + V(RegExp, 1 << 18 | REPRESENTATION(kTaggedPtr)) \ + V(OtherObject, 1 << 19 | REPRESENTATION(kTaggedPtr)) \ + V(Proxy, 1 << 20 | REPRESENTATION(kTaggedPtr)) \ + V(Internal, 1 << 21 | REPRESENTATION(kTagged | kUntagged)) \ \ - V(Oddball, kBoolean | kNull | kUndefined) \ - V(Signed32, kSignedSmall | kOtherSigned32) \ - V(Number, kSigned32 | kUnsigned32 | kFloat) \ - V(String, kInternalizedString | kOtherString) \ - V(UniqueName, kSymbol | kInternalizedString) \ - V(Name, kSymbol | kString) \ - V(NumberOrString, kNumber | kString) \ - V(DetectableObject, kArray | kFunction | kRegExp | kOtherObject) \ - V(DetectableReceiver, kDetectableObject | kProxy) \ - V(Detectable, kDetectableReceiver | kNumber | kName) \ - V(Object, kDetectableObject | kUndetectable) \ - V(Receiver, kObject | kProxy) \ - V(NonNumber, kOddball | kName | kReceiver | kInternal) \ - V(Any, kNumber | kNonNumber) + V(SignedSmall, kUnsignedSmall | kOtherSignedSmall) \ + V(Signed32, kSignedSmall | kOtherUnsigned31 | kOtherSigned32) \ + V(Unsigned32, kUnsignedSmall | kOtherUnsigned31 | kOtherUnsigned32) \ + V(Integral32, kSigned32 | kUnsigned32) \ + V(Number, kIntegral32 | kMinusZero | kNaN | kOtherNumber) \ + V(String, kInternalizedString | kOtherString) \ + V(UniqueName, kSymbol | kInternalizedString) \ + V(Name, kSymbol | kString) \ + V(NumberOrString, kNumber | kString) \ + V(Primitive, kNumber | kName | kBoolean | kNull | kUndefined) \ + V(DetectableObject, kArray | kFunction | kRegExp | kOtherObject) \ + V(DetectableReceiver, kDetectableObject | kProxy) \ + V(Detectable, kDetectableReceiver | kNumber | kName) \ + V(Object, kDetectableObject | kUndetectable) \ + V(Receiver, kObject | kProxy) \ + V(NonNumber, kBoolean | kName | kNull | kReceiver | \ + kUndefined | kInternal) \ + V(Any, -1) #define BITSET_TYPE_LIST(V) \ MASK_BITSET_TYPE_LIST(V) \ @@ -189,104 +214,200 @@ SEMANTIC_BITSET_TYPE_LIST(V) +// ----------------------------------------------------------------------------- +// The abstract Type class, parameterized over the low-level representation. + // struct Config { +// typedef TypeImpl<Config> Type; // typedef Base; -// typedef Unioned; +// typedef Struct; // typedef Region; // template<class> struct Handle { typedef type; } // No template typedefs... -// static Handle<Type>::type handle(Type* type); // !is_bitset(type) +// template<class T> static Handle<T>::type handle(T* t); // !is_bitset(t) +// template<class T> static Handle<T>::type cast(Handle<Type>::type); // static bool is_bitset(Type*); // static bool is_class(Type*); -// static bool is_constant(Type*); -// static bool is_union(Type*); +// static bool is_struct(Type*, int tag); // static int as_bitset(Type*); // static i::Handle<i::Map> as_class(Type*); -// static i::Handle<i::Object> as_constant(Type*); -// static Handle<Unioned>::type as_union(Type*); +// static Handle<Struct>::type as_struct(Type*); // static Type* from_bitset(int bitset); // static Handle<Type>::type from_bitset(int bitset, Region*); -// static Handle<Type>::type from_class(i::Handle<Map>, int lub, Region*); -// static Handle<Type>::type from_constant(i::Handle<Object>, int, Region*); -// static Handle<Type>::type from_union(Handle<Unioned>::type); -// static Handle<Unioned>::type union_create(int size, Region*); -// static void union_shrink(Handle<Unioned>::type, int size); -// static Handle<Type>::type union_get(Handle<Unioned>::type, int); -// static void union_set(Handle<Unioned>::type, int, Handle<Type>::type); -// static int union_length(Handle<Unioned>::type); -// static int lub_bitset(Type*); +// static Handle<Type>::type from_class(i::Handle<Map>, Region*); +// static Handle<Type>::type from_struct(Handle<Struct>::type, int tag); +// static Handle<Struct>::type struct_create(int tag, int length, Region*); +// static void struct_shrink(Handle<Struct>::type, int length); +// static int struct_tag(Handle<Struct>::type); +// static int struct_length(Handle<Struct>::type); +// static Handle<Type>::type struct_get(Handle<Struct>::type, int); +// static void struct_set(Handle<Struct>::type, int, Handle<Type>::type); +// template<class V> +// static i::Handle<V> struct_get_value(Handle<Struct>::type, int); +// template<class V> +// static void struct_set_value(Handle<Struct>::type, int, i::Handle<V>); // } template<class Config> class TypeImpl : public Config::Base { public: + // Auxiliary types. + + class BitsetType; // Internal + class StructuralType; // Internal + class UnionType; // Internal + + class ClassType; + class ConstantType; + class RangeType; + class ContextType; + class ArrayType; + class FunctionType; + typedef typename Config::template Handle<TypeImpl>::type TypeHandle; + typedef typename Config::template Handle<ClassType>::type ClassHandle; + typedef typename Config::template Handle<ConstantType>::type ConstantHandle; + typedef typename Config::template Handle<RangeType>::type RangeHandle; + typedef typename Config::template Handle<ContextType>::type ContextHandle; + typedef typename Config::template Handle<ArrayType>::type ArrayHandle; + typedef typename Config::template Handle<FunctionType>::type FunctionHandle; + typedef typename Config::template Handle<UnionType>::type UnionHandle; typedef typename Config::Region Region; - #define DEFINE_TYPE_CONSTRUCTOR(type, value) \ - static TypeImpl* type() { return Config::from_bitset(k##type); } \ - static TypeHandle type(Region* region) { \ - return Config::from_bitset(k##type, region); \ + // Constructors. + + #define DEFINE_TYPE_CONSTRUCTOR(type, value) \ + static TypeImpl* type() { return BitsetType::New(BitsetType::k##type); } \ + static TypeHandle type(Region* region) { \ + return BitsetType::New(BitsetType::k##type, region); \ } BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR) #undef DEFINE_TYPE_CONSTRUCTOR static TypeHandle Class(i::Handle<i::Map> map, Region* region) { - return Config::from_class(map, LubBitset(*map), region); + return ClassType::New(map, region); } static TypeHandle Constant(i::Handle<i::Object> value, Region* region) { - return Config::from_constant(value, LubBitset(*value), region); + // TODO(neis): Return RangeType for numerical values. + return ConstantType::New(value, region); + } + static TypeHandle Range(double min, double max, Region* region) { + return RangeType::New(min, max, region); + } + static TypeHandle Context(TypeHandle outer, Region* region) { + return ContextType::New(outer, region); + } + static TypeHandle Array(TypeHandle element, Region* region) { + return ArrayType::New(element, region); + } + static FunctionHandle Function( + TypeHandle result, TypeHandle receiver, int arity, Region* region) { + return FunctionType::New(result, receiver, arity, region); + } + static TypeHandle Function(TypeHandle result, Region* region) { + return Function(result, Any(region), 0, region); + } + static TypeHandle Function( + TypeHandle result, TypeHandle param0, Region* region) { + FunctionHandle function = Function(result, Any(region), 1, region); + function->InitParameter(0, param0); + return function; + } + static TypeHandle Function( + TypeHandle result, TypeHandle param0, TypeHandle param1, Region* region) { + FunctionHandle function = Function(result, Any(region), 2, region); + function->InitParameter(0, param0); + function->InitParameter(1, param1); + return function; + } + static TypeHandle Function( + TypeHandle result, TypeHandle param0, TypeHandle param1, + TypeHandle param2, Region* region) { + FunctionHandle function = Function(result, Any(region), 3, region); + function->InitParameter(0, param0); + function->InitParameter(1, param1); + function->InitParameter(2, param2); + return function; } static TypeHandle Union(TypeHandle type1, TypeHandle type2, Region* reg); static TypeHandle Intersect(TypeHandle type1, TypeHandle type2, Region* reg); + static TypeHandle Of(double value, Region* region) { + return Config::from_bitset(BitsetType::Lub(value), region); + } + static TypeHandle Of(i::Object* value, Region* region) { + return Config::from_bitset(BitsetType::Lub(value), region); + } static TypeHandle Of(i::Handle<i::Object> value, Region* region) { - return Config::from_bitset(LubBitset(*value), region); + return Of(*value, region); } + // Predicates. + + bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); } + bool Is(TypeImpl* that) { return this == that || this->SlowIs(that); } template<class TypeHandle> bool Is(TypeHandle that) { return this->Is(*that); } + bool Maybe(TypeImpl* that); template<class TypeHandle> bool Maybe(TypeHandle that) { return this->Maybe(*that); } - // State-dependent versions of Of and Is that consider subtyping between + bool Equals(TypeImpl* that) { return this->Is(that) && that->Is(this); } + template<class TypeHandle> + bool Equals(TypeHandle that) { return this->Equals(*that); } + + // Equivalent to Constant(value)->Is(this), but avoiding allocation. + bool Contains(i::Object* val); + bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); } + + // State-dependent versions of the above that consider subtyping between // a constant and its map class. - static TypeHandle OfCurrently(i::Handle<i::Object> value, Region* region); - bool IsCurrently(TypeImpl* that); + inline static TypeHandle NowOf(i::Object* value, Region* region); + static TypeHandle NowOf(i::Handle<i::Object> value, Region* region) { + return NowOf(*value, region); + } + bool NowIs(TypeImpl* that); template<class TypeHandle> - bool IsCurrently(TypeHandle that) { return this->IsCurrently(*that); } + bool NowIs(TypeHandle that) { return this->NowIs(*that); } + inline bool NowContains(i::Object* val); + bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); } - bool IsClass() { return Config::is_class(this); } - bool IsConstant() { return Config::is_constant(this); } - i::Handle<i::Map> AsClass() { return Config::as_class(this); } - i::Handle<i::Object> AsConstant() { return Config::as_constant(this); } + bool NowStable(); - int NumClasses(); - int NumConstants(); + // Inspection. - template<class T> - class Iterator { - public: - bool Done() const { return index_ < 0; } - i::Handle<T> Current(); - void Advance(); - - private: - template<class> friend class TypeImpl; - - Iterator() : index_(-1) {} - explicit Iterator(TypeHandle type) : type_(type), index_(-1) { - Advance(); - } + bool IsClass() { + return Config::is_class(this) + || Config::is_struct(this, StructuralType::kClassTag); + } + bool IsConstant() { + return Config::is_struct(this, StructuralType::kConstantTag); + } + bool IsRange() { + return Config::is_struct(this, StructuralType::kRangeTag); + } + bool IsContext() { + return Config::is_struct(this, StructuralType::kContextTag); + } + bool IsArray() { + return Config::is_struct(this, StructuralType::kArrayTag); + } + bool IsFunction() { + return Config::is_struct(this, StructuralType::kFunctionTag); + } - inline bool matches(TypeHandle type); - inline TypeHandle get_type(); + ClassType* AsClass() { return ClassType::cast(this); } + ConstantType* AsConstant() { return ConstantType::cast(this); } + RangeType* AsRange() { return RangeType::cast(this); } + ContextType* AsContext() { return ContextType::cast(this); } + ArrayType* AsArray() { return ArrayType::cast(this); } + FunctionType* AsFunction() { return FunctionType::cast(this); } - TypeHandle type_; - int index_; - }; + int NumClasses(); + int NumConstants(); + template<class T> class Iterator; Iterator<i::Map> Classes() { if (this->IsBitset()) return Iterator<i::Map>(); return Iterator<i::Map>(Config::handle(this)); @@ -296,32 +417,75 @@ return Iterator<i::Object>(Config::handle(this)); } - static TypeImpl* cast(typename Config::Base* object) { - TypeImpl* t = static_cast<TypeImpl*>(object); - ASSERT(t->IsBitset() || t->IsClass() || t->IsConstant() || t->IsUnion()); - return t; - } + // Casting and conversion. + + static inline TypeImpl* cast(typename Config::Base* object); template<class OtherTypeImpl> static TypeHandle Convert( typename OtherTypeImpl::TypeHandle type, Region* region); -#ifdef OBJECT_PRINT + // Printing. + enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM }; - void TypePrint(PrintDimension = BOTH_DIMS); - void TypePrint(FILE* out, PrintDimension = BOTH_DIMS); + + void PrintTo(OStream& os, PrintDimension dim = BOTH_DIMS); // NOLINT + +#ifdef DEBUG + void Print(); #endif - private: + protected: + // Friends. + template<class> friend class Iterator; template<class> friend class TypeImpl; - // A union is a fixed array containing types. Invariants: - // - its length is at least 2 - // - at most one field is a bitset, and it must go into index 0 - // - no field is a union - typedef typename Config::Unioned Unioned; - typedef typename Config::template Handle<Unioned>::type UnionedHandle; + // Handle conversion. + + template<class T> + static typename Config::template Handle<T>::type handle(T* type) { + return Config::handle(type); + } + TypeImpl* unhandle() { return this; } + + // Internal inspection. + + bool IsNone() { return this == None(); } + bool IsAny() { return this == Any(); } + bool IsBitset() { return Config::is_bitset(this); } + bool IsUnion() { return Config::is_struct(this, StructuralType::kUnionTag); } + + int AsBitset() { + DCHECK(this->IsBitset()); + return static_cast<BitsetType*>(this)->Bitset(); + } + UnionType* AsUnion() { return UnionType::cast(this); } + + // Auxiliary functions. + + int BitsetGlb() { return BitsetType::Glb(this); } + int BitsetLub() { return BitsetType::Lub(this); } + int InherentBitsetLub() { return BitsetType::InherentLub(this); } + + bool SlowIs(TypeImpl* that); + + TypeHandle Rebound(int bitset, Region* region); + int BoundBy(TypeImpl* that); + int IndexInUnion(int bound, UnionHandle unioned, int current_size); + static int ExtendUnion( + UnionHandle unioned, int current_size, TypeHandle t, + TypeHandle other, bool is_intersect, Region* region); +}; + + +// ----------------------------------------------------------------------------- +// Bitset types (internal). + +template<class Config> +class TypeImpl<Config>::BitsetType : public TypeImpl<Config> { + protected: + friend class TypeImpl<Config>; enum { #define DECLARE_TYPE(type, value) k##type = (value), @@ -330,264 +494,456 @@ kUnusedEOL = 0 }; - bool IsNone() { return this == None(); } - bool IsAny() { return this == Any(); } - bool IsBitset() { return Config::is_bitset(this); } - bool IsUnion() { return Config::is_union(this); } - int AsBitset() { return Config::as_bitset(this); } - UnionedHandle AsUnion() { return Config::as_union(this); } + int Bitset() { return Config::as_bitset(this); } - static int UnionLength(UnionedHandle unioned) { - return Config::union_length(unioned); + static TypeImpl* New(int bitset) { + return static_cast<BitsetType*>(Config::from_bitset(bitset)); } - static TypeHandle UnionGet(UnionedHandle unioned, int i) { - return Config::union_get(unioned, i); + static TypeHandle New(int bitset, Region* region) { + return Config::from_bitset(bitset, region); } - bool SlowIs(TypeImpl* that); - static bool IsInhabited(int bitset) { return (bitset & kRepresentation) && (bitset & kSemantic); } - int LubBitset(); // least upper bound that's a bitset - int GlbBitset(); // greatest lower bound that's a bitset - - static int LubBitset(i::Object* value); - static int LubBitset(i::Map* map); + static bool Is(int bitset1, int bitset2) { + return (bitset1 | bitset2) == bitset2; + } - bool InUnion(UnionedHandle unioned, int current_size); - static int ExtendUnion( - UnionedHandle unioned, TypeHandle t, int current_size); - static int ExtendIntersection( - UnionedHandle unioned, TypeHandle t, TypeHandle other, int current_size); - -#ifdef OBJECT_PRINT - static const char* bitset_name(int bitset); - static void BitsetTypePrint(FILE* out, int bitset); -#endif + static int Glb(TypeImpl* type); // greatest lower bound that's a bitset + static int Lub(TypeImpl* type); // least upper bound that's a bitset + static int Lub(i::Object* value); + static int Lub(double value); + static int Lub(int32_t value); + static int Lub(uint32_t value); + static int Lub(i::Map* map); + static int Lub(double min, double max); + static int InherentLub(TypeImpl* type); + + static const char* Name(int bitset); + static void Print(OStream& os, int bitset); // NOLINT + using TypeImpl::PrintTo; }; -// Zone-allocated types are either (odd) integers to represent bitsets, or -// (even) pointers to zone lists for everything else. The first slot of every -// list is an explicit tag value to distinguish representation. -struct ZoneTypeConfig { - private: - typedef i::ZoneList<void*> Tagged; +// ----------------------------------------------------------------------------- +// Superclass for non-bitset types (internal). +// Contains a tag and a variable number of type or value fields. + +template<class Config> +class TypeImpl<Config>::StructuralType : public TypeImpl<Config> { + protected: + template<class> friend class TypeImpl; + friend struct ZoneTypeConfig; // For tags. + friend struct HeapTypeConfig; enum Tag { kClassTag, kConstantTag, + kRangeTag, + kContextTag, + kArrayTag, + kFunctionTag, kUnionTag }; - static Tagged* tagged_create(Tag tag, int size, Zone* zone) { - Tagged* tagged = new(zone) Tagged(size + 1, zone); - tagged->Add(reinterpret_cast<void*>(tag), zone); - tagged->AddBlock(NULL, size, zone); - return tagged; + int Length() { + return Config::struct_length(Config::as_struct(this)); } - static void tagged_shrink(Tagged* tagged, int size) { - tagged->Rewind(size + 1); + TypeHandle Get(int i) { + DCHECK(0 <= i && i < this->Length()); + return Config::struct_get(Config::as_struct(this), i); } - static Tag tagged_tag(Tagged* tagged) { - return static_cast<Tag>(reinterpret_cast<intptr_t>(tagged->at(0))); + void Set(int i, TypeHandle type) { + DCHECK(0 <= i && i < this->Length()); + Config::struct_set(Config::as_struct(this), i, type); } - template<class T> - static T tagged_get(Tagged* tagged, int i) { - return reinterpret_cast<T>(tagged->at(i + 1)); + void Shrink(int length) { + DCHECK(2 <= length && length <= this->Length()); + Config::struct_shrink(Config::as_struct(this), length); } - template<class T> - static void tagged_set(Tagged* tagged, int i, T value) { - tagged->at(i + 1) = reinterpret_cast<void*>(value); + template<class V> i::Handle<V> GetValue(int i) { + DCHECK(0 <= i && i < this->Length()); + return Config::template struct_get_value<V>(Config::as_struct(this), i); } - static int tagged_length(Tagged* tagged) { - return tagged->length() - 1; + template<class V> void SetValue(int i, i::Handle<V> x) { + DCHECK(0 <= i && i < this->Length()); + Config::struct_set_value(Config::as_struct(this), i, x); } - public: - typedef TypeImpl<ZoneTypeConfig> Type; - class Base {}; - typedef i::ZoneList<Type*> Unioned; - typedef i::Zone Region; - template<class T> struct Handle { typedef T* type; }; + static TypeHandle New(Tag tag, int length, Region* region) { + DCHECK(1 <= length); + return Config::from_struct(Config::struct_create(tag, length, region)); + } +}; - static Type* handle(Type* type) { return type; } - static bool is(Type* type, Tag tag) { - return is_tagged(type) && tagged_tag(as_tagged(type)) == tag; +// ----------------------------------------------------------------------------- +// Union types (internal). +// A union is a structured type with the following invariants: +// - its length is at least 2 +// - at most one field is a bitset, and it must go into index 0 +// - no field is a union +// - no field is a subtype of any other field +template<class Config> +class TypeImpl<Config>::UnionType : public StructuralType { + public: + static UnionHandle New(int length, Region* region) { + return Config::template cast<UnionType>( + StructuralType::New(StructuralType::kUnionTag, length, region)); } - static bool is_bitset(Type* type) { - return reinterpret_cast<intptr_t>(type) & 1; - } - static bool is_tagged(Type* type) { return !is_bitset(type); } - static bool is_class(Type* type) { return is(type, kClassTag); } - static bool is_constant(Type* type) { return is(type, kConstantTag); } - static bool is_union(Type* type) { return is(type, kUnionTag); } - static bool tagged_is_union(Tagged* tagged) { - return is(from_tagged(tagged), kUnionTag); + static UnionType* cast(TypeImpl* type) { + DCHECK(type->IsUnion()); + return static_cast<UnionType*>(type); } - static int as_bitset(Type* type) { - ASSERT(is_bitset(type)); - return static_cast<int>(reinterpret_cast<intptr_t>(type) >> 1); + bool Wellformed(); +}; + + +// ----------------------------------------------------------------------------- +// Class types. + +template<class Config> +class TypeImpl<Config>::ClassType : public StructuralType { + public: + TypeHandle Bound(Region* region) { + return Config::is_class(this) + ? BitsetType::New(BitsetType::Lub(*Config::as_class(this)), region) + : this->Get(0); + } + i::Handle<i::Map> Map() { + return Config::is_class(this) + ? Config::as_class(this) + : this->template GetValue<i::Map>(1); + } + + static ClassHandle New( + i::Handle<i::Map> map, TypeHandle bound, Region* region) { + DCHECK(BitsetType::Is(bound->AsBitset(), BitsetType::Lub(*map))); + ClassHandle type = Config::template cast<ClassType>( + StructuralType::New(StructuralType::kClassTag, 2, region)); + type->Set(0, bound); + type->SetValue(1, map); + return type; } - static Tagged* as_tagged(Type* type) { - ASSERT(is_tagged(type)); - return reinterpret_cast<Tagged*>(type); + + static ClassHandle New(i::Handle<i::Map> map, Region* region) { + ClassHandle type = + Config::template cast<ClassType>(Config::from_class(map, region)); + if (type->IsClass()) { + return type; + } else { + TypeHandle bound = BitsetType::New(BitsetType::Lub(*map), region); + return New(map, bound, region); + } } - static i::Handle<i::Map> as_class(Type* type) { - ASSERT(is_class(type)); - return i::Handle<i::Map>(tagged_get<i::Map**>(as_tagged(type), 1)); + + static ClassType* cast(TypeImpl* type) { + DCHECK(type->IsClass()); + return static_cast<ClassType*>(type); } - static i::Handle<i::Object> as_constant(Type* type) { - ASSERT(is_constant(type)); - return i::Handle<i::Object>(tagged_get<i::Object**>(as_tagged(type), 1)); +}; + + +// ----------------------------------------------------------------------------- +// Constant types. + +template<class Config> +class TypeImpl<Config>::ConstantType : public StructuralType { + public: + TypeHandle Bound() { return this->Get(0); } + i::Handle<i::Object> Value() { return this->template GetValue<i::Object>(1); } + + static ConstantHandle New( + i::Handle<i::Object> value, TypeHandle bound, Region* region) { + DCHECK(BitsetType::Is(bound->AsBitset(), BitsetType::Lub(*value))); + ConstantHandle type = Config::template cast<ConstantType>( + StructuralType::New(StructuralType::kConstantTag, 2, region)); + type->Set(0, bound); + type->SetValue(1, value); + return type; } - static Unioned* as_union(Type* type) { - ASSERT(is_union(type)); - return tagged_as_union(as_tagged(type)); + + static ConstantHandle New(i::Handle<i::Object> value, Region* region) { + TypeHandle bound = BitsetType::New(BitsetType::Lub(*value), region); + return New(value, bound, region); } - static Unioned* tagged_as_union(Tagged* tagged) { - ASSERT(tagged_is_union(tagged)); - return reinterpret_cast<Unioned*>(tagged); + + static ConstantType* cast(TypeImpl* type) { + DCHECK(type->IsConstant()); + return static_cast<ConstantType*>(type); } +}; + - static Type* from_bitset(int bitset) { - return reinterpret_cast<Type*>((bitset << 1) | 1); +// ----------------------------------------------------------------------------- +// Range types. + +template<class Config> +class TypeImpl<Config>::RangeType : public StructuralType { + public: + TypeHandle Bound() { return this->Get(0); } + double Min() { return this->template GetValue<i::HeapNumber>(1)->value(); } + double Max() { return this->template GetValue<i::HeapNumber>(2)->value(); } + + static RangeHandle New( + double min, double max, TypeHandle bound, Region* region) { + DCHECK(BitsetType::Is(bound->AsBitset(), BitsetType::Lub(min, max))); + RangeHandle type = Config::template cast<RangeType>( + StructuralType::New(StructuralType::kRangeTag, 3, region)); + type->Set(0, bound); + Factory* factory = Config::isolate(region)->factory(); + Handle<HeapNumber> minV = factory->NewHeapNumber(min); + Handle<HeapNumber> maxV = factory->NewHeapNumber(max); + type->SetValue(1, minV); + type->SetValue(2, maxV); + return type; } - static Type* from_bitset(int bitset, Zone* Zone) { - return from_bitset(bitset); + + static RangeHandle New(double min, double max, Region* region) { + TypeHandle bound = BitsetType::New(BitsetType::Lub(min, max), region); + return New(min, max, bound, region); } - static Type* from_tagged(Tagged* tagged) { - return reinterpret_cast<Type*>(tagged); + + static RangeType* cast(TypeImpl* type) { + DCHECK(type->IsRange()); + return static_cast<RangeType*>(type); } - static Type* from_class(i::Handle<i::Map> map, int lub, Zone* zone) { - Tagged* tagged = tagged_create(kClassTag, 2, zone); - tagged_set(tagged, 0, lub); - tagged_set(tagged, 1, map.location()); - return from_tagged(tagged); +}; + + +// ----------------------------------------------------------------------------- +// Context types. + +template<class Config> +class TypeImpl<Config>::ContextType : public StructuralType { + public: + TypeHandle Bound() { return this->Get(0); } + TypeHandle Outer() { return this->Get(1); } + + static ContextHandle New(TypeHandle outer, TypeHandle bound, Region* region) { + DCHECK(BitsetType::Is( + bound->AsBitset(), BitsetType::kInternal & BitsetType::kTaggedPtr)); + ContextHandle type = Config::template cast<ContextType>( + StructuralType::New(StructuralType::kContextTag, 2, region)); + type->Set(0, bound); + type->Set(1, outer); + return type; } - static Type* from_constant(i::Handle<i::Object> value, int lub, Zone* zone) { - Tagged* tagged = tagged_create(kConstantTag, 2, zone); - tagged_set(tagged, 0, lub); - tagged_set(tagged, 1, value.location()); - return from_tagged(tagged); + + static ContextHandle New(TypeHandle outer, Region* region) { + TypeHandle bound = BitsetType::New( + BitsetType::kInternal & BitsetType::kTaggedPtr, region); + return New(outer, bound, region); } - static Type* from_union(Unioned* unioned) { - return from_tagged(tagged_from_union(unioned)); + + static ContextType* cast(TypeImpl* type) { + DCHECK(type->IsContext()); + return static_cast<ContextType*>(type); } - static Tagged* tagged_from_union(Unioned* unioned) { - return reinterpret_cast<Tagged*>(unioned); +}; + + +// ----------------------------------------------------------------------------- +// Array types. + +template<class Config> +class TypeImpl<Config>::ArrayType : public StructuralType { + public: + TypeHandle Bound() { return this->Get(0); } + TypeHandle Element() { return this->Get(1); } + + static ArrayHandle New(TypeHandle element, TypeHandle bound, Region* region) { + DCHECK(BitsetType::Is(bound->AsBitset(), BitsetType::kArray)); + ArrayHandle type = Config::template cast<ArrayType>( + StructuralType::New(StructuralType::kArrayTag, 2, region)); + type->Set(0, bound); + type->Set(1, element); + return type; } - static Unioned* union_create(int size, Zone* zone) { - return tagged_as_union(tagged_create(kUnionTag, size, zone)); + static ArrayHandle New(TypeHandle element, Region* region) { + TypeHandle bound = BitsetType::New(BitsetType::kArray, region); + return New(element, bound, region); } - static void union_shrink(Unioned* unioned, int size) { - tagged_shrink(tagged_from_union(unioned), size); + + static ArrayType* cast(TypeImpl* type) { + DCHECK(type->IsArray()); + return static_cast<ArrayType*>(type); } - static Type* union_get(Unioned* unioned, int i) { - Type* type = tagged_get<Type*>(tagged_from_union(unioned), i); - ASSERT(!is_union(type)); +}; + + +// ----------------------------------------------------------------------------- +// Function types. + +template<class Config> +class TypeImpl<Config>::FunctionType : public StructuralType { + public: + int Arity() { return this->Length() - 3; } + TypeHandle Bound() { return this->Get(0); } + TypeHandle Result() { return this->Get(1); } + TypeHandle Receiver() { return this->Get(2); } + TypeHandle Parameter(int i) { return this->Get(3 + i); } + + void InitParameter(int i, TypeHandle type) { this->Set(3 + i, type); } + + static FunctionHandle New( + TypeHandle result, TypeHandle receiver, TypeHandle bound, + int arity, Region* region) { + DCHECK(BitsetType::Is(bound->AsBitset(), BitsetType::kFunction)); + FunctionHandle type = Config::template cast<FunctionType>( + StructuralType::New(StructuralType::kFunctionTag, 3 + arity, region)); + type->Set(0, bound); + type->Set(1, result); + type->Set(2, receiver); return type; } - static void union_set(Unioned* unioned, int i, Type* type) { - ASSERT(!is_union(type)); - tagged_set(tagged_from_union(unioned), i, type); + + static FunctionHandle New( + TypeHandle result, TypeHandle receiver, int arity, Region* region) { + TypeHandle bound = BitsetType::New(BitsetType::kFunction, region); + return New(result, receiver, bound, arity, region); } - static int union_length(Unioned* unioned) { - return tagged_length(tagged_from_union(unioned)); + + static FunctionType* cast(TypeImpl* type) { + DCHECK(type->IsFunction()); + return static_cast<FunctionType*>(type); } - static int lub_bitset(Type* type) { - ASSERT(is_class(type) || is_constant(type)); - return static_cast<int>(tagged_get<intptr_t>(as_tagged(type), 0)); +}; + + +// ----------------------------------------------------------------------------- +// Type iterators. + +template<class Config> template<class T> +class TypeImpl<Config>::Iterator { + public: + bool Done() const { return index_ < 0; } + i::Handle<T> Current(); + void Advance(); + + private: + template<class> friend class TypeImpl; + + Iterator() : index_(-1) {} + explicit Iterator(TypeHandle type) : type_(type), index_(-1) { + Advance(); } + + inline bool matches(TypeHandle type); + inline TypeHandle get_type(); + + TypeHandle type_; + int index_; }; -// Heap-allocated types are either smis for bitsets, maps for classes, boxes for +// ----------------------------------------------------------------------------- +// Zone-allocated types; they are either (odd) integers to represent bitsets, or +// (even) pointers to structures for everything else. + +struct ZoneTypeConfig { + typedef TypeImpl<ZoneTypeConfig> Type; + class Base {}; + typedef void* Struct; + typedef i::Zone Region; + template<class T> struct Handle { typedef T* type; }; + + // TODO(neis): This will be removed again once we have struct_get_double(). + static inline i::Isolate* isolate(Region* region) { + return region->isolate(); + } + + template<class T> static inline T* handle(T* type); + template<class T> static inline T* cast(Type* type); + + static inline bool is_bitset(Type* type); + static inline bool is_class(Type* type); + static inline bool is_struct(Type* type, int tag); + + static inline int as_bitset(Type* type); + static inline i::Handle<i::Map> as_class(Type* type); + static inline Struct* as_struct(Type* type); + + static inline Type* from_bitset(int bitset); + static inline Type* from_bitset(int bitset, Zone* zone); + static inline Type* from_class(i::Handle<i::Map> map, Zone* zone); + static inline Type* from_struct(Struct* structured); + + static inline Struct* struct_create(int tag, int length, Zone* zone); + static inline void struct_shrink(Struct* structure, int length); + static inline int struct_tag(Struct* structure); + static inline int struct_length(Struct* structure); + static inline Type* struct_get(Struct* structure, int i); + static inline void struct_set(Struct* structure, int i, Type* type); + template<class V> + static inline i::Handle<V> struct_get_value(Struct* structure, int i); + template<class V> static inline void struct_set_value( + Struct* structure, int i, i::Handle<V> x); +}; + +typedef TypeImpl<ZoneTypeConfig> Type; + + +// ----------------------------------------------------------------------------- +// Heap-allocated types; either smis for bitsets, maps for classes, boxes for // constants, or fixed arrays for unions. + struct HeapTypeConfig { typedef TypeImpl<HeapTypeConfig> Type; typedef i::Object Base; - typedef i::FixedArray Unioned; + typedef i::FixedArray Struct; typedef i::Isolate Region; template<class T> struct Handle { typedef i::Handle<T> type; }; - static i::Handle<Type> handle(Type* type) { - return i::handle(type, i::HeapObject::cast(type)->GetIsolate()); - } - - static bool is_bitset(Type* type) { return type->IsSmi(); } - static bool is_class(Type* type) { return type->IsMap(); } - static bool is_constant(Type* type) { return type->IsBox(); } - static bool is_union(Type* type) { return type->IsFixedArray(); } - - static int as_bitset(Type* type) { - return Smi::cast(type)->value(); - } - static i::Handle<i::Map> as_class(Type* type) { - return i::handle(i::Map::cast(type)); - } - static i::Handle<i::Object> as_constant(Type* type) { - i::Box* box = i::Box::cast(type); - return i::handle(box->value(), box->GetIsolate()); - } - static i::Handle<Unioned> as_union(Type* type) { - return i::handle(i::FixedArray::cast(type)); - } - - static Type* from_bitset(int bitset) { - return Type::cast(i::Smi::FromInt(bitset)); - } - static i::Handle<Type> from_bitset(int bitset, Isolate* isolate) { - return i::handle(from_bitset(bitset), isolate); - } - static i::Handle<Type> from_class( - i::Handle<i::Map> map, int lub, Isolate* isolate) { - return i::Handle<Type>::cast(i::Handle<Object>::cast(map)); - } - static i::Handle<Type> from_constant( - i::Handle<i::Object> value, int lub, Isolate* isolate) { - i::Handle<Box> box = isolate->factory()->NewBox(value); - return i::Handle<Type>::cast(i::Handle<Object>::cast(box)); - } - static i::Handle<Type> from_union(i::Handle<Unioned> unioned) { - return i::Handle<Type>::cast(i::Handle<Object>::cast(unioned)); - } - - static i::Handle<Unioned> union_create(int size, Isolate* isolate) { - return isolate->factory()->NewFixedArray(size); - } - static void union_shrink(i::Handle<Unioned> unioned, int size) { - unioned->Shrink(size); - } - static i::Handle<Type> union_get(i::Handle<Unioned> unioned, int i) { - Type* type = static_cast<Type*>(unioned->get(i)); - ASSERT(!is_union(type)); - return i::handle(type, unioned->GetIsolate()); - } - static void union_set( - i::Handle<Unioned> unioned, int i, i::Handle<Type> type) { - ASSERT(!is_union(*type)); - unioned->set(i, *type); - } - static int union_length(i::Handle<Unioned> unioned) { - return unioned->length(); - } - static int lub_bitset(Type* type) { - return 0; // kNone, which causes recomputation. - } + // TODO(neis): This will be removed again once we have struct_get_double(). + static inline i::Isolate* isolate(Region* region) { + return region; + } + + template<class T> static inline i::Handle<T> handle(T* type); + template<class T> static inline i::Handle<T> cast(i::Handle<Type> type); + + static inline bool is_bitset(Type* type); + static inline bool is_class(Type* type); + static inline bool is_struct(Type* type, int tag); + + static inline int as_bitset(Type* type); + static inline i::Handle<i::Map> as_class(Type* type); + static inline i::Handle<Struct> as_struct(Type* type); + + static inline Type* from_bitset(int bitset); + static inline i::Handle<Type> from_bitset(int bitset, Isolate* isolate); + static inline i::Handle<Type> from_class( + i::Handle<i::Map> map, Isolate* isolate); + static inline i::Handle<Type> from_struct(i::Handle<Struct> structure); + + static inline i::Handle<Struct> struct_create( + int tag, int length, Isolate* isolate); + static inline void struct_shrink(i::Handle<Struct> structure, int length); + static inline int struct_tag(i::Handle<Struct> structure); + static inline int struct_length(i::Handle<Struct> structure); + static inline i::Handle<Type> struct_get(i::Handle<Struct> structure, int i); + static inline void struct_set( + i::Handle<Struct> structure, int i, i::Handle<Type> type); + template<class V> + static inline i::Handle<V> struct_get_value( + i::Handle<Struct> structure, int i); + template<class V> + static inline void struct_set_value( + i::Handle<Struct> structure, int i, i::Handle<V> x); }; -typedef TypeImpl<ZoneTypeConfig> Type; typedef TypeImpl<HeapTypeConfig> HeapType; -// A simple struct to represent a pair of lower/upper type bounds. +// ----------------------------------------------------------------------------- +// Type bounds. A simple struct to represent a pair of lower/upper types. + template<class Config> struct BoundsImpl { typedef TypeImpl<Config> Type; @@ -600,7 +956,7 @@ BoundsImpl() {} explicit BoundsImpl(TypeHandle t) : lower(t), upper(t) {} BoundsImpl(TypeHandle l, TypeHandle u) : lower(l), upper(u) { - ASSERT(lower->Is(upper)); + DCHECK(lower->Is(upper)); } // Unrestricted bounds. @@ -643,7 +999,6 @@ typedef BoundsImpl<ZoneTypeConfig> Bounds; - } } // namespace v8::internal #endif // V8_TYPES_H_ diff -Nru nodejs-0.11.13/deps/v8/src/types-inl.h nodejs-0.11.15/deps/v8/src/types-inl.h --- nodejs-0.11.13/deps/v8/src/types-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/types-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,336 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_TYPES_INL_H_ +#define V8_TYPES_INL_H_ + +#include "src/types.h" + +#include "src/factory.h" +#include "src/handles-inl.h" + +namespace v8 { +namespace internal { + +// ----------------------------------------------------------------------------- +// TypeImpl + +template<class Config> +TypeImpl<Config>* TypeImpl<Config>::cast(typename Config::Base* object) { + TypeImpl* t = static_cast<TypeImpl*>(object); + DCHECK(t->IsBitset() || t->IsClass() || t->IsConstant() || t->IsRange() || + t->IsUnion() || t->IsArray() || t->IsFunction() || t->IsContext()); + return t; +} + + +// Most precise _current_ type of a value (usually its class). +template<class Config> +typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NowOf( + i::Object* value, Region* region) { + if (value->IsSmi() || + i::HeapObject::cast(value)->map()->instance_type() == HEAP_NUMBER_TYPE) { + return Of(value, region); + } + return Class(i::handle(i::HeapObject::cast(value)->map()), region); +} + + +template<class Config> +bool TypeImpl<Config>::NowContains(i::Object* value) { + DisallowHeapAllocation no_allocation; + if (this->IsAny()) return true; + if (value->IsHeapObject()) { + i::Map* map = i::HeapObject::cast(value)->map(); + for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) { + if (*it.Current() == map) return true; + } + } + return this->Contains(value); +} + + +// ----------------------------------------------------------------------------- +// ZoneTypeConfig + +// static +template<class T> +T* ZoneTypeConfig::handle(T* type) { + return type; +} + + +// static +template<class T> +T* ZoneTypeConfig::cast(Type* type) { + return static_cast<T*>(type); +} + + +// static +bool ZoneTypeConfig::is_bitset(Type* type) { + return reinterpret_cast<intptr_t>(type) & 1; +} + + +// static +bool ZoneTypeConfig::is_struct(Type* type, int tag) { + return !is_bitset(type) && struct_tag(as_struct(type)) == tag; +} + + +// static +bool ZoneTypeConfig::is_class(Type* type) { + return false; +} + + +// static +int ZoneTypeConfig::as_bitset(Type* type) { + DCHECK(is_bitset(type)); + return static_cast<int>(reinterpret_cast<intptr_t>(type) >> 1); +} + + +// static +ZoneTypeConfig::Struct* ZoneTypeConfig::as_struct(Type* type) { + DCHECK(!is_bitset(type)); + return reinterpret_cast<Struct*>(type); +} + + +// static +i::Handle<i::Map> ZoneTypeConfig::as_class(Type* type) { + UNREACHABLE(); + return i::Handle<i::Map>(); +} + + +// static +ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(int bitset) { + return reinterpret_cast<Type*>((bitset << 1) | 1); +} + + +// static +ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(int bitset, Zone* Zone) { + return from_bitset(bitset); +} + + +// static +ZoneTypeConfig::Type* ZoneTypeConfig::from_struct(Struct* structure) { + return reinterpret_cast<Type*>(structure); +} + + +// static +ZoneTypeConfig::Type* ZoneTypeConfig::from_class( + i::Handle<i::Map> map, Zone* zone) { + return from_bitset(0); +} + + +// static +ZoneTypeConfig::Struct* ZoneTypeConfig::struct_create( + int tag, int length, Zone* zone) { + Struct* structure = reinterpret_cast<Struct*>( + zone->New(sizeof(void*) * (length + 2))); // NOLINT + structure[0] = reinterpret_cast<void*>(tag); + structure[1] = reinterpret_cast<void*>(length); + return structure; +} + + +// static +void ZoneTypeConfig::struct_shrink(Struct* structure, int length) { + DCHECK(0 <= length && length <= struct_length(structure)); + structure[1] = reinterpret_cast<void*>(length); +} + + +// static +int ZoneTypeConfig::struct_tag(Struct* structure) { + return static_cast<int>(reinterpret_cast<intptr_t>(structure[0])); +} + + +// static +int ZoneTypeConfig::struct_length(Struct* structure) { + return static_cast<int>(reinterpret_cast<intptr_t>(structure[1])); +} + + +// static +Type* ZoneTypeConfig::struct_get(Struct* structure, int i) { + DCHECK(0 <= i && i <= struct_length(structure)); + return static_cast<Type*>(structure[2 + i]); +} + + +// static +void ZoneTypeConfig::struct_set(Struct* structure, int i, Type* x) { + DCHECK(0 <= i && i <= struct_length(structure)); + structure[2 + i] = x; +} + + +// static +template<class V> +i::Handle<V> ZoneTypeConfig::struct_get_value(Struct* structure, int i) { + DCHECK(0 <= i && i <= struct_length(structure)); + return i::Handle<V>(static_cast<V**>(structure[2 + i])); +} + + +// static +template<class V> +void ZoneTypeConfig::struct_set_value( + Struct* structure, int i, i::Handle<V> x) { + DCHECK(0 <= i && i <= struct_length(structure)); + structure[2 + i] = x.location(); +} + + +// ----------------------------------------------------------------------------- +// HeapTypeConfig + +// static +template<class T> +i::Handle<T> HeapTypeConfig::handle(T* type) { + return i::handle(type, i::HeapObject::cast(type)->GetIsolate()); +} + + +// static +template<class T> +i::Handle<T> HeapTypeConfig::cast(i::Handle<Type> type) { + return i::Handle<T>::cast(type); +} + + +// static +bool HeapTypeConfig::is_bitset(Type* type) { + return type->IsSmi(); +} + + +// static +bool HeapTypeConfig::is_class(Type* type) { + return type->IsMap(); +} + + +// static +bool HeapTypeConfig::is_struct(Type* type, int tag) { + return type->IsFixedArray() && struct_tag(as_struct(type)) == tag; +} + + +// static +int HeapTypeConfig::as_bitset(Type* type) { + return i::Smi::cast(type)->value(); +} + + +// static +i::Handle<i::Map> HeapTypeConfig::as_class(Type* type) { + return i::handle(i::Map::cast(type)); +} + + +// static +i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::as_struct(Type* type) { + return i::handle(Struct::cast(type)); +} + + +// static +HeapTypeConfig::Type* HeapTypeConfig::from_bitset(int bitset) { + return Type::cast(i::Smi::FromInt(bitset)); +} + + +// static +i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_bitset( + int bitset, Isolate* isolate) { + return i::handle(from_bitset(bitset), isolate); +} + + +// static +i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_class( + i::Handle<i::Map> map, Isolate* isolate) { + return i::Handle<Type>::cast(i::Handle<Object>::cast(map)); +} + + +// static +i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_struct( + i::Handle<Struct> structure) { + return i::Handle<Type>::cast(i::Handle<Object>::cast(structure)); +} + + +// static +i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::struct_create( + int tag, int length, Isolate* isolate) { + i::Handle<Struct> structure = isolate->factory()->NewFixedArray(length + 1); + structure->set(0, i::Smi::FromInt(tag)); + return structure; +} + + +// static +void HeapTypeConfig::struct_shrink(i::Handle<Struct> structure, int length) { + structure->Shrink(length + 1); +} + + +// static +int HeapTypeConfig::struct_tag(i::Handle<Struct> structure) { + return static_cast<i::Smi*>(structure->get(0))->value(); +} + + +// static +int HeapTypeConfig::struct_length(i::Handle<Struct> structure) { + return structure->length() - 1; +} + + +// static +i::Handle<HeapTypeConfig::Type> HeapTypeConfig::struct_get( + i::Handle<Struct> structure, int i) { + Type* type = static_cast<Type*>(structure->get(i + 1)); + return i::handle(type, structure->GetIsolate()); +} + + +// static +void HeapTypeConfig::struct_set( + i::Handle<Struct> structure, int i, i::Handle<Type> type) { + structure->set(i + 1, *type); +} + + +// static +template<class V> +i::Handle<V> HeapTypeConfig::struct_get_value( + i::Handle<Struct> structure, int i) { + V* x = static_cast<V*>(structure->get(i + 1)); + return i::handle(x, structure->GetIsolate()); +} + + +// static +template<class V> +void HeapTypeConfig::struct_set_value( + i::Handle<Struct> structure, int i, i::Handle<V> x) { + structure->set(i + 1, *x); +} + +} } // namespace v8::internal + +#endif // V8_TYPES_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/typing.cc nodejs-0.11.15/deps/v8/src/typing.cc --- nodejs-0.11.13/deps/v8/src/typing.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/typing.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,14 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "typing.h" - -#include "frames.h" -#include "frames-inl.h" -#include "parser.h" // for CompileTimeValue; TODO(rossberg): should move -#include "scopes.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/typing.h" + +#include "src/frames.h" +#include "src/frames-inl.h" +#include "src/ostreams.h" +#include "src/parser.h" // for CompileTimeValue; TODO(rossberg): should move +#include "src/scopes.h" namespace v8 { namespace internal { @@ -39,8 +17,9 @@ AstTyper::AstTyper(CompilationInfo* info) : info_(info), oracle_( - Handle<Code>(info->closure()->shared()->code()), - Handle<Context>(info->closure()->context()->native_context()), + handle(info->closure()->shared()->code()), + handle(info->closure()->shared()->feedback_vector()), + handle(info->closure()->context()->native_context()), info->zone()), store_(info->zone()) { InitializeAstVisitor(info->zone()); @@ -49,7 +28,7 @@ #define RECURSE(call) \ do { \ - ASSERT(!visitor->HasStackOverflow()); \ + DCHECK(!visitor->HasStackOverflow()); \ call; \ if (visitor->HasStackOverflow()) return; \ } while (false) @@ -72,18 +51,18 @@ #ifdef OBJECT_PRINT static void PrintObserved(Variable* var, Object* value, Type* type) { - PrintF(" observed %s ", var->IsParameter() ? "param" : "local"); - var->name()->Print(); - PrintF(" : "); - value->ShortPrint(); - PrintF(" -> "); - type->TypePrint(); + OFStream os(stdout); + os << " observed " << (var->IsParameter() ? "param" : "local") << " "; + var->name()->Print(os); + os << " : " << Brief(value) << " -> "; + type->PrintTo(os); + os << endl; } #endif // OBJECT_PRINT Effect AstTyper::ObservedOnStack(Object* value) { - Type* lower = Type::OfCurrently(handle(value, isolate()), zone()); + Type* lower = Type::NowOf(value, zone()); return Effect(Bounds(lower, Type::Any(zone()))); } @@ -97,7 +76,7 @@ Scope* scope = info_->scope(); // Assert that the frame on the stack belongs to the function we want to OSR. - ASSERT_EQ(*info_->closure(), frame->function()); + DCHECK_EQ(*info_->closure(), frame->function()); int params = scope->num_parameters(); int locals = scope->StackLocalCount(); @@ -140,7 +119,7 @@ #define RECURSE(call) \ do { \ - ASSERT(!HasStackOverflow()); \ + DCHECK(!HasStackOverflow()); \ call; \ if (HasStackOverflow()) return; \ } while (false) @@ -457,7 +436,7 @@ if (!expr->IsUninitialized()) { if (prop->key()->IsPropertyName()) { Literal* lit_key = prop->key()->AsLiteral(); - ASSERT(lit_key != NULL && lit_key->value()->IsString()); + DCHECK(lit_key != NULL && lit_key->value()->IsString()); Handle<String> name = Handle<String>::cast(lit_key->value()); oracle()->AssignmentReceiverTypes(id, name, expr->GetReceiverTypes()); } else { @@ -505,12 +484,9 @@ if (!expr->IsUninitialized()) { if (expr->key()->IsPropertyName()) { Literal* lit_key = expr->key()->AsLiteral(); - ASSERT(lit_key != NULL && lit_key->value()->IsString()); + DCHECK(lit_key != NULL && lit_key->value()->IsString()); Handle<String> name = Handle<String>::cast(lit_key->value()); - bool is_prototype; - oracle()->PropertyReceiverTypes( - id, name, expr->GetReceiverTypes(), &is_prototype); - expr->set_is_function_prototype(is_prototype); + oracle()->PropertyReceiverTypes(id, name, expr->GetReceiverTypes()); } else { bool is_string; oracle()->KeyedPropertyReceiverTypes( @@ -530,9 +506,12 @@ // Collect type feedback. RECURSE(Visit(expr->expression())); if (!expr->expression()->IsProperty() && - expr->HasCallFeedbackSlot() && + expr->IsUsingCallFeedbackSlot(isolate()) && oracle()->CallIsMonomorphic(expr->CallFeedbackSlot())) { expr->set_target(oracle()->GetCallTarget(expr->CallFeedbackSlot())); + Handle<AllocationSite> site = + oracle()->GetCallAllocationSite(expr->CallFeedbackSlot()); + expr->set_allocation_site(site); } ZoneList<Expression*>* args = expr->arguments(); @@ -694,7 +673,7 @@ Bounds l = expr->left()->bounds(); Bounds r = expr->right()->bounds(); Type* lower = - l.lower->Is(Type::None()) || r.lower->Is(Type::None()) ? + !l.lower->IsInhabited() || !r.lower->IsInhabited() ? Type::None(zone()) : l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ? Type::String(zone()) : diff -Nru nodejs-0.11.13/deps/v8/src/typing.h nodejs-0.11.15/deps/v8/src/typing.h --- nodejs-0.11.13/deps/v8/src/typing.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/typing.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,20 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_TYPING_H_ #define V8_TYPING_H_ -#include "v8.h" +#include "src/v8.h" -#include "allocation.h" -#include "ast.h" -#include "compiler.h" -#include "type-info.h" -#include "types.h" -#include "effects.h" -#include "zone.h" -#include "scopes.h" +#include "src/allocation.h" +#include "src/ast.h" +#include "src/compiler.h" +#include "src/effects.h" +#include "src/scopes.h" +#include "src/type-info.h" +#include "src/types.h" +#include "src/zone.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/unbound-queue.h nodejs-0.11.15/deps/v8/src/unbound-queue.h --- nodejs-0.11.13/deps/v8/src/unbound-queue.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/unbound-queue.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,12 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_UNBOUND_QUEUE_ #define V8_UNBOUND_QUEUE_ -#include "allocation.h" +#include "src/allocation.h" +#include "src/base/atomicops.h" namespace v8 { namespace internal { @@ -57,8 +35,8 @@ struct Node; Node* first_; - AtomicWord divider_; // Node* - AtomicWord last_; // Node* + base::AtomicWord divider_; // Node* + base::AtomicWord last_; // Node* DISALLOW_COPY_AND_ASSIGN(UnboundQueue); }; diff -Nru nodejs-0.11.13/deps/v8/src/unbound-queue-inl.h nodejs-0.11.15/deps/v8/src/unbound-queue-inl.h --- nodejs-0.11.13/deps/v8/src/unbound-queue-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/unbound-queue-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,11 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_UNBOUND_QUEUE_INL_H_ #define V8_UNBOUND_QUEUE_INL_H_ -#include "unbound-queue.h" - -#include "atomicops.h" +#include "src/unbound-queue.h" namespace v8 { namespace internal { @@ -49,7 +24,7 @@ template<typename Record> UnboundQueue<Record>::UnboundQueue() { first_ = new Node(Record()); - divider_ = last_ = reinterpret_cast<AtomicWord>(first_); + divider_ = last_ = reinterpret_cast<base::AtomicWord>(first_); } @@ -69,10 +44,10 @@ template<typename Record> bool UnboundQueue<Record>::Dequeue(Record* rec) { - if (divider_ == Acquire_Load(&last_)) return false; + if (divider_ == base::Acquire_Load(&last_)) return false; Node* next = reinterpret_cast<Node*>(divider_)->next; *rec = next->value; - Release_Store(÷r_, reinterpret_cast<AtomicWord>(next)); + base::Release_Store(÷r_, reinterpret_cast<base::AtomicWord>(next)); return true; } @@ -81,9 +56,9 @@ void UnboundQueue<Record>::Enqueue(const Record& rec) { Node*& next = reinterpret_cast<Node*>(last_)->next; next = new Node(rec); - Release_Store(&last_, reinterpret_cast<AtomicWord>(next)); + base::Release_Store(&last_, reinterpret_cast<base::AtomicWord>(next)); - while (first_ != reinterpret_cast<Node*>(Acquire_Load(÷r_))) { + while (first_ != reinterpret_cast<Node*>(base::Acquire_Load(÷r_))) { DeleteFirst(); } } @@ -91,13 +66,13 @@ template<typename Record> bool UnboundQueue<Record>::IsEmpty() const { - return NoBarrier_Load(÷r_) == NoBarrier_Load(&last_); + return base::NoBarrier_Load(÷r_) == base::NoBarrier_Load(&last_); } template<typename Record> Record* UnboundQueue<Record>::Peek() const { - if (divider_ == Acquire_Load(&last_)) return NULL; + if (divider_ == base::Acquire_Load(&last_)) return NULL; Node* next = reinterpret_cast<Node*>(divider_)->next; return &next->value; } diff -Nru nodejs-0.11.13/deps/v8/src/unicode.cc nodejs-0.11.15/deps/v8/src/unicode.cc --- nodejs-0.11.13/deps/v8/src/unicode.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/unicode.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // // This file was generated at 2014-02-07 15:31:16.733174 -#include "unicode-inl.h" -#include <stdlib.h> +#include "src/unicode-inl.h" #include <stdio.h> +#include <stdlib.h> namespace unibrow { @@ -294,7 +271,7 @@ while (stream_length != 0) { unsigned cursor = 0; uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor); - ASSERT(cursor > 0 && cursor <= stream_length); + DCHECK(cursor > 0 && cursor <= stream_length); stream += cursor; stream_length -= cursor; bool is_two_characters = character > Utf16::kMaxNonSurrogateCharCode; @@ -319,7 +296,7 @@ } // Have gone over buffer. // Last char of buffer is unused, set cursor back. - ASSERT(is_two_characters); + DCHECK(is_two_characters); writing_to_buffer = false; last_byte_of_buffer_unused_ = true; unbuffered_start_ = stream - cursor; @@ -340,7 +317,7 @@ if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) { *data++ = Utf16::LeadSurrogate(character); *data++ = Utf16::TrailSurrogate(character); - ASSERT(data_length > 1); + DCHECK(data_length > 1); data_length -= 2; } else { *data++ = character; diff -Nru nodejs-0.11.13/deps/v8/src/unicode.h nodejs-0.11.15/deps/v8/src/unicode.h --- nodejs-0.11.13/deps/v8/src/unicode.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/unicode.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_UNICODE_H_ #define V8_UNICODE_H_ #include <sys/types.h> -#include "globals.h" +#include "src/globals.h" /** * \file * Definitions and convenience functions for working with unicode. diff -Nru nodejs-0.11.13/deps/v8/src/unicode-inl.h nodejs-0.11.15/deps/v8/src/unicode-inl.h --- nodejs-0.11.13/deps/v8/src/unicode-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/unicode-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2007-2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_UNICODE_INL_H_ #define V8_UNICODE_INL_H_ -#include "unicode.h" -#include "checks.h" -#include "platform.h" +#include "src/unicode.h" +#include "src/base/logging.h" +#include "src/utils.h" namespace unibrow { @@ -81,7 +58,7 @@ uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) { - ASSERT(c > Latin1::kMaxChar); + DCHECK(c > Latin1::kMaxChar); switch (c) { // This are equivalent characters in unicode. case 0x39c: @@ -207,15 +184,15 @@ template <unsigned kBufferSize> unsigned Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data, unsigned length) const { - ASSERT(length > 0); + DCHECK(length > 0); if (length > utf16_length_) length = utf16_length_; // memcpy everything in buffer. unsigned buffer_length = last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize; - unsigned memcpy_length = length <= buffer_length ? length : buffer_length; - v8::internal::OS::MemCopy(data, buffer_, memcpy_length*sizeof(uint16_t)); + unsigned memcpy_length = length <= buffer_length ? length : buffer_length; + v8::internal::MemCopy(data, buffer_, memcpy_length * sizeof(uint16_t)); if (length <= buffer_length) return length; - ASSERT(unbuffered_start_ != NULL); + DCHECK(unbuffered_start_ != NULL); // Copy the rest the slow way. WriteUtf16Slow(unbuffered_start_, data + buffer_length, diff -Nru nodejs-0.11.13/deps/v8/src/unique.h nodejs-0.11.15/deps/v8/src/unique.h --- nodejs-0.11.13/deps/v8/src/unique.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/unique.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,15 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_HYDROGEN_UNIQUE_H_ #define V8_HYDROGEN_UNIQUE_H_ -#include "handles.h" -#include "objects.h" -#include "utils.h" -#include "zone.h" +#include "src/handles.h" +#include "src/objects.h" +#include "src/string-stream.h" +#include "src/utils.h" +#include "src/zone.h" namespace v8 { namespace internal { @@ -52,7 +30,7 @@ // Careful! Comparison of two Uniques is only correct if both were created // in the same "era" of GC or if at least one is a non-movable object. template <typename T> -class Unique V8_FINAL { +class Unique { public: // TODO(titzer): make private and introduce a uniqueness scope. explicit Unique(Handle<T> handle) { @@ -65,9 +43,9 @@ // NOTE: we currently consider maps to be non-movable, so no special // assurance is required for creating a Unique<Map>. // TODO(titzer): other immortable immovable objects are also fine. - ASSERT(!AllowHeapAllocation::IsAllowed() || handle->IsMap()); + DCHECK(!AllowHeapAllocation::IsAllowed() || handle->IsMap()); raw_address_ = reinterpret_cast<Address>(*handle); - ASSERT_NE(raw_address_, NULL); // Non-null should imply non-zero address. + DCHECK_NE(raw_address_, NULL); // Non-null should imply non-zero address. } handle_ = handle; } @@ -91,28 +69,28 @@ template <typename U> inline bool operator==(const Unique<U>& other) const { - ASSERT(IsInitialized() && other.IsInitialized()); + DCHECK(IsInitialized() && other.IsInitialized()); return raw_address_ == other.raw_address_; } template <typename U> inline bool operator!=(const Unique<U>& other) const { - ASSERT(IsInitialized() && other.IsInitialized()); + DCHECK(IsInitialized() && other.IsInitialized()); return raw_address_ != other.raw_address_; } inline intptr_t Hashcode() const { - ASSERT(IsInitialized()); + DCHECK(IsInitialized()); return reinterpret_cast<intptr_t>(raw_address_); } inline bool IsNull() const { - ASSERT(IsInitialized()); + DCHECK(IsInitialized()); return raw_address_ == NULL; } inline bool IsKnownGlobal(void* global) const { - ASSERT(IsInitialized()); + DCHECK(IsInitialized()); return raw_address_ == reinterpret_cast<Address>(global); } @@ -140,8 +118,10 @@ friend class UniqueSet<T>; // Uses internal details for speed. template <class U> friend class Unique; // For comparing raw_address values. + template <class U> + friend class PrintableUnique; // For automatic up casting. - private: + protected: Unique<T>() : raw_address_(NULL) { } Address raw_address_; @@ -151,15 +131,92 @@ }; +// TODO(danno): At some point if all of the uses of Unique end up using +// PrintableUnique, then we should merge PrintableUnique into Unique and +// predicate generating the printable string on a "am I tracing" check. +template <class T> +class PrintableUnique : public Unique<T> { + public: + // TODO(titzer): make private and introduce a uniqueness scope. + explicit PrintableUnique(Zone* zone, Handle<T> handle) : Unique<T>(handle) { + InitializeString(zone); + } + + // TODO(titzer): this is a hack to migrate to Unique<T> incrementally. + PrintableUnique(Zone* zone, Address raw_address, Handle<T> handle) + : Unique<T>(raw_address, handle) { + InitializeString(zone); + } + + // Constructor for handling automatic up casting. + // Eg. PrintableUnique<JSFunction> can be passed when PrintableUnique<Object> + // is expected. + template <class S> + PrintableUnique(PrintableUnique<S> uniq) // NOLINT + : Unique<T>(Handle<T>()) { +#ifdef DEBUG + T* a = NULL; + S* b = NULL; + a = b; // Fake assignment to enforce type checks. + USE(a); +#endif + this->raw_address_ = uniq.raw_address_; + this->handle_ = uniq.handle_; + string_ = uniq.string(); + } + + // TODO(titzer): this is a hack to migrate to Unique<T> incrementally. + static PrintableUnique<T> CreateUninitialized(Zone* zone, Handle<T> handle) { + return PrintableUnique<T>(zone, reinterpret_cast<Address>(NULL), handle); + } + + static PrintableUnique<T> CreateImmovable(Zone* zone, Handle<T> handle) { + return PrintableUnique<T>(zone, reinterpret_cast<Address>(*handle), handle); + } + + const char* string() const { return string_; } + + private: + const char* string_; + + void InitializeString(Zone* zone) { + // The stringified version of the parameter must be calculated when the + // Operator is constructed to avoid accessing the heap. + HeapStringAllocator temp_allocator; + StringStream stream(&temp_allocator); + this->handle_->ShortPrint(&stream); + SmartArrayPointer<const char> desc_string = stream.ToCString(); + const char* desc_chars = desc_string.get(); + int length = static_cast<int>(strlen(desc_chars)); + char* desc_copy = zone->NewArray<char>(length + 1); + memcpy(desc_copy, desc_chars, length + 1); + string_ = desc_copy; + } +}; + + template <typename T> class UniqueSet V8_FINAL : public ZoneObject { public: // Constructor. A new set will be empty. UniqueSet() : size_(0), capacity_(0), array_(NULL) { } + // Capacity constructor. A new set will be empty. + UniqueSet(int capacity, Zone* zone) + : size_(0), capacity_(capacity), + array_(zone->NewArray<Unique<T> >(capacity)) { + DCHECK(capacity <= kMaxCapacity); + } + + // Singleton constructor. + UniqueSet(Unique<T> uniq, Zone* zone) + : size_(1), capacity_(1), array_(zone->NewArray<Unique<T> >(1)) { + array_[0] = uniq; + } + // Add a new element to this unique set. Mutates this set. O(|this|). void Add(Unique<T> uniq, Zone* zone) { - ASSERT(uniq.IsInitialized()); + DCHECK(uniq.IsInitialized()); // Keep the set sorted by the {raw_address} of the unique elements. for (int i = 0; i < size_; i++) { if (array_[i] == uniq) return; @@ -189,7 +246,7 @@ } // Compare this set against another set. O(|this|). - bool Equals(UniqueSet<T>* that) const { + bool Equals(const UniqueSet<T>* that) const { if (that->size_ != this->size_) return false; for (int i = 0; i < this->size_; i++) { if (this->array_[i] != that->array_[i]) return false; @@ -200,15 +257,18 @@ // Check whether this set contains the given element. O(|this|) // TODO(titzer): use binary search for large sets to make this O(log|this|) template <typename U> - bool Contains(Unique<U> elem) const { - for (int i = 0; i < size_; i++) { - if (this->array_[i] == elem) return true; + bool Contains(const Unique<U> elem) const { + for (int i = 0; i < this->size_; ++i) { + Unique<T> cand = this->array_[i]; + if (cand.raw_address_ >= elem.raw_address_) { + return cand.raw_address_ == elem.raw_address_; + } } return false; } // Check if this set is a subset of the given set. O(|this| + |that|). - bool IsSubset(UniqueSet<T>* that) const { + bool IsSubset(const UniqueSet<T>* that) const { if (that->size_ < this->size_) return false; int j = 0; for (int i = 0; i < this->size_; i++) { @@ -224,11 +284,11 @@ // Returns a new set representing the intersection of this set and the other. // O(|this| + |that|). - UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) const { + UniqueSet<T>* Intersect(const UniqueSet<T>* that, Zone* zone) const { if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>(); - UniqueSet<T>* out = new(zone) UniqueSet<T>(); - out->Grow(Min(this->size_, that->size_), zone); + UniqueSet<T>* out = new(zone) UniqueSet<T>( + Min(this->size_, that->size_), zone); int i = 0, j = 0, k = 0; while (i < this->size_ && j < that->size_) { @@ -251,12 +311,12 @@ // Returns a new set representing the union of this set and the other. // O(|this| + |that|). - UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) const { + UniqueSet<T>* Union(const UniqueSet<T>* that, Zone* zone) const { if (that->size_ == 0) return this->Copy(zone); if (this->size_ == 0) return that->Copy(zone); - UniqueSet<T>* out = new(zone) UniqueSet<T>(); - out->Grow(this->size_ + that->size_, zone); + UniqueSet<T>* out = new(zone) UniqueSet<T>( + this->size_ + that->size_, zone); int i = 0, j = 0, k = 0; while (i < this->size_ && j < that->size_) { @@ -282,12 +342,30 @@ return out; } + // Returns a new set representing all elements from this set which are not in + // that set. O(|this| * |that|). + UniqueSet<T>* Subtract(const UniqueSet<T>* that, Zone* zone) const { + if (that->size_ == 0) return this->Copy(zone); + + UniqueSet<T>* out = new(zone) UniqueSet<T>(this->size_, zone); + + int i = 0, j = 0; + while (i < this->size_) { + Unique<T> cand = this->array_[i]; + if (!that->Contains(cand)) { + out->array_[j++] = cand; + } + i++; + } + + out->size_ = j; + return out; + } + // Makes an exact copy of this set. O(|this|). UniqueSet<T>* Copy(Zone* zone) const { - UniqueSet<T>* copy = new(zone) UniqueSet<T>(); + UniqueSet<T>* copy = new(zone) UniqueSet<T>(this->size_, zone); copy->size_ = this->size_; - copy->capacity_ = this->size_; - copy->array_ = zone->NewArray<Unique<T> >(this->size_); memcpy(copy->array_, this->array_, this->size_ * sizeof(Unique<T>)); return copy; } @@ -301,7 +379,7 @@ } inline Unique<T> at(int index) const { - ASSERT(index >= 0 && index < size_); + DCHECK(index >= 0 && index < size_); return array_[index]; } @@ -330,7 +408,6 @@ } }; - } } // namespace v8::internal #endif // V8_HYDROGEN_UNIQUE_H_ diff -Nru nodejs-0.11.13/deps/v8/src/uri.h nodejs-0.11.15/deps/v8/src/uri.h --- nodejs-0.11.13/deps/v8/src/uri.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/uri.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_URI_H_ #define V8_URI_H_ -#include "v8.h" +#include "src/v8.h" -#include "string-search.h" -#include "v8utils.h" -#include "v8conversions.h" +#include "src/conversions.h" +#include "src/string-search.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -45,7 +22,7 @@ template <> Vector<const uint8_t> GetCharVector(Handle<String> string) { String::FlatContent flat = string->GetFlatContent(); - ASSERT(flat.IsAscii()); + DCHECK(flat.IsAscii()); return flat.ToOneByteVector(); } @@ -53,7 +30,7 @@ template <> Vector<const uc16> GetCharVector(Handle<String> string) { String::FlatContent flat = string->GetFlatContent(); - ASSERT(flat.IsTwoByte()); + DCHECK(flat.IsTwoByte()); return flat.ToUC16Vector(); } @@ -61,13 +38,14 @@ class URIUnescape : public AllStatic { public: template<typename Char> - static Handle<String> Unescape(Isolate* isolate, Handle<String> source); + MUST_USE_RESULT static MaybeHandle<String> Unescape(Isolate* isolate, + Handle<String> source); private: static const signed char kHexValue['g']; template<typename Char> - static Handle<String> UnescapeSlow( + MUST_USE_RESULT static MaybeHandle<String> UnescapeSlow( Isolate* isolate, Handle<String> string, int start_index); static INLINE(int TwoDigitHex(uint16_t character1, uint16_t character2)); @@ -91,7 +69,8 @@ template<typename Char> -Handle<String> URIUnescape::Unescape(Isolate* isolate, Handle<String> source) { +MaybeHandle<String> URIUnescape::Unescape(Isolate* isolate, + Handle<String> source) { int index; { DisallowHeapAllocation no_allocation; StringSearch<uint8_t, Char> search(isolate, STATIC_ASCII_VECTOR("%")); @@ -103,7 +82,7 @@ template <typename Char> -Handle<String> URIUnescape::UnescapeSlow( +MaybeHandle<String> URIUnescape::UnescapeSlow( Isolate* isolate, Handle<String> string, int start_index) { bool one_byte = true; int length = string->length(); @@ -121,17 +100,16 @@ } } - ASSERT(start_index < length); + DCHECK(start_index < length); Handle<String> first_part = isolate->factory()->NewProperSubString(string, 0, start_index); int dest_position = 0; Handle<String> second_part; - ASSERT(unescaped_length <= String::kMaxLength); + DCHECK(unescaped_length <= String::kMaxLength); if (one_byte) { - Handle<SeqOneByteString> dest = - isolate->factory()->NewRawOneByteString(unescaped_length); - ASSERT(!dest.is_null()); + Handle<SeqOneByteString> dest = isolate->factory()->NewRawOneByteString( + unescaped_length).ToHandleChecked(); DisallowHeapAllocation no_allocation; Vector<const Char> vector = GetCharVector<Char>(string); for (int i = start_index; i < length; dest_position++) { @@ -142,9 +120,8 @@ } second_part = dest; } else { - Handle<SeqTwoByteString> dest = - isolate->factory()->NewRawTwoByteString(unescaped_length); - ASSERT(!dest.is_null()); + Handle<SeqTwoByteString> dest = isolate->factory()->NewRawTwoByteString( + unescaped_length).ToHandleChecked(); DisallowHeapAllocation no_allocation; Vector<const Char> vector = GetCharVector<Char>(string); for (int i = start_index; i < length; dest_position++) { @@ -203,7 +180,8 @@ class URIEscape : public AllStatic { public: template<typename Char> - static Handle<String> Escape(Isolate* isolate, Handle<String> string); + MUST_USE_RESULT static MaybeHandle<String> Escape(Isolate* isolate, + Handle<String> string); private: static const char kHexChars[17]; @@ -247,8 +225,8 @@ template<typename Char> -Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) { - ASSERT(string->IsFlat()); +MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) { + DCHECK(string->IsFlat()); int escaped_length = 0; int length = string->length(); @@ -265,7 +243,7 @@ } // We don't allow strings that are longer than a maximal length. - ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow. + DCHECK(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow. if (escaped_length > String::kMaxLength) break; // Provoke exception. } } @@ -273,9 +251,11 @@ // No length change implies no change. Return original string if no change. if (escaped_length == length) return string; - Handle<SeqOneByteString> dest = - isolate->factory()->NewRawOneByteString(escaped_length); - RETURN_IF_EMPTY_HANDLE_VALUE(isolate, dest, Handle<String>()); + Handle<SeqOneByteString> dest; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, dest, + isolate->factory()->NewRawOneByteString(escaped_length), + String); int dest_position = 0; { DisallowHeapAllocation no_allocation; diff -Nru nodejs-0.11.13/deps/v8/src/uri.js nodejs-0.11.15/deps/v8/src/uri.js --- nodejs-0.11.13/deps/v8/src/uri.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/uri.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,8 @@ // Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict"; // This file relies on the fact that the following declaration has been made // in runtime.js: @@ -34,424 +13,380 @@ // This file contains support for URI manipulations written in // JavaScript. -// Lazily initialized. -var hexCharArray = 0; -var hexCharCodeArray = 0; - - -function URIAddEncodedOctetToBuffer(octet, result, index) { - result[index++] = 37; // Char code of '%'. - result[index++] = hexCharCodeArray[octet >> 4]; - result[index++] = hexCharCodeArray[octet & 0x0F]; - return index; -} - - -function URIEncodeOctets(octets, result, index) { - if (hexCharCodeArray === 0) { - hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - 65, 66, 67, 68, 69, 70]; - } - index = URIAddEncodedOctetToBuffer(octets[0], result, index); - if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index); - if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index); - if (octets[3]) index = URIAddEncodedOctetToBuffer(octets[3], result, index); - return index; -} - - -function URIEncodeSingle(cc, result, index) { - var x = (cc >> 12) & 0xF; - var y = (cc >> 6) & 63; - var z = cc & 63; - var octets = new $Array(3); - if (cc <= 0x007F) { - octets[0] = cc; - } else if (cc <= 0x07FF) { - octets[0] = y + 192; - octets[1] = z + 128; - } else { - octets[0] = x + 224; - octets[1] = y + 128; - octets[2] = z + 128; - } - return URIEncodeOctets(octets, result, index); -} - - -function URIEncodePair(cc1 , cc2, result, index) { - var u = ((cc1 >> 6) & 0xF) + 1; - var w = (cc1 >> 2) & 0xF; - var x = cc1 & 3; - var y = (cc2 >> 6) & 0xF; - var z = cc2 & 63; - var octets = new $Array(4); - octets[0] = (u >> 2) + 240; - octets[1] = (((u & 3) << 4) | w) + 128; - octets[2] = ((x << 4) | y) + 128; - octets[3] = z + 128; - return URIEncodeOctets(octets, result, index); -} - - -function URIHexCharsToCharCode(highChar, lowChar) { - var highCode = HexValueOf(highChar); - var lowCode = HexValueOf(lowChar); - if (highCode == -1 || lowCode == -1) { - throw new $URIError("URI malformed"); - } - return (highCode << 4) | lowCode; -} - - -function URIDecodeOctets(octets, result, index) { - var value; - var o0 = octets[0]; - if (o0 < 0x80) { - value = o0; - } else if (o0 < 0xc2) { - throw new $URIError("URI malformed"); - } else { - var o1 = octets[1]; - if (o0 < 0xe0) { - var a = o0 & 0x1f; - if ((o1 < 0x80) || (o1 > 0xbf)) { - throw new $URIError("URI malformed"); - } - var b = o1 & 0x3f; - value = (a << 6) + b; - if (value < 0x80 || value > 0x7ff) { - throw new $URIError("URI malformed"); - } + +(function() { + + // ------------------------------------------------------------------- + // Define internal helper functions. + + function HexValueOf(code) { + // 0-9 + if (code >= 48 && code <= 57) return code - 48; + // A-F + if (code >= 65 && code <= 70) return code - 55; + // a-f + if (code >= 97 && code <= 102) return code - 87; + + return -1; + } + + // Does the char code correspond to an alpha-numeric char. + function isAlphaNumeric(cc) { + // a - z + if (97 <= cc && cc <= 122) return true; + // A - Z + if (65 <= cc && cc <= 90) return true; + // 0 - 9 + if (48 <= cc && cc <= 57) return true; + + return false; + } + + //Lazily initialized. + var hexCharCodeArray = 0; + + function URIAddEncodedOctetToBuffer(octet, result, index) { + result[index++] = 37; // Char code of '%'. + result[index++] = hexCharCodeArray[octet >> 4]; + result[index++] = hexCharCodeArray[octet & 0x0F]; + return index; + } + + function URIEncodeOctets(octets, result, index) { + if (hexCharCodeArray === 0) { + hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 65, 66, 67, 68, 69, 70]; + } + index = URIAddEncodedOctetToBuffer(octets[0], result, index); + if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index); + if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index); + if (octets[3]) index = URIAddEncodedOctetToBuffer(octets[3], result, index); + return index; + } + + function URIEncodeSingle(cc, result, index) { + var x = (cc >> 12) & 0xF; + var y = (cc >> 6) & 63; + var z = cc & 63; + var octets = new $Array(3); + if (cc <= 0x007F) { + octets[0] = cc; + } else if (cc <= 0x07FF) { + octets[0] = y + 192; + octets[1] = z + 128; } else { - var o2 = octets[2]; - if (o0 < 0xf0) { - var a = o0 & 0x0f; + octets[0] = x + 224; + octets[1] = y + 128; + octets[2] = z + 128; + } + return URIEncodeOctets(octets, result, index); + } + + function URIEncodePair(cc1 , cc2, result, index) { + var u = ((cc1 >> 6) & 0xF) + 1; + var w = (cc1 >> 2) & 0xF; + var x = cc1 & 3; + var y = (cc2 >> 6) & 0xF; + var z = cc2 & 63; + var octets = new $Array(4); + octets[0] = (u >> 2) + 240; + octets[1] = (((u & 3) << 4) | w) + 128; + octets[2] = ((x << 4) | y) + 128; + octets[3] = z + 128; + return URIEncodeOctets(octets, result, index); + } + + function URIHexCharsToCharCode(highChar, lowChar) { + var highCode = HexValueOf(highChar); + var lowCode = HexValueOf(lowChar); + if (highCode == -1 || lowCode == -1) { + throw new $URIError("URI malformed"); + } + return (highCode << 4) | lowCode; + } + + // Callers must ensure that |result| is a sufficiently long sequential + // two-byte string! + function URIDecodeOctets(octets, result, index) { + var value; + var o0 = octets[0]; + if (o0 < 0x80) { + value = o0; + } else if (o0 < 0xc2) { + throw new $URIError("URI malformed"); + } else { + var o1 = octets[1]; + if (o0 < 0xe0) { + var a = o0 & 0x1f; if ((o1 < 0x80) || (o1 > 0xbf)) { throw new $URIError("URI malformed"); } var b = o1 & 0x3f; - if ((o2 < 0x80) || (o2 > 0xbf)) { - throw new $URIError("URI malformed"); - } - var c = o2 & 0x3f; - value = (a << 12) + (b << 6) + c; - if ((value < 0x800) || (value > 0xffff)) { + value = (a << 6) + b; + if (value < 0x80 || value > 0x7ff) { throw new $URIError("URI malformed"); } } else { - var o3 = octets[3]; - if (o0 < 0xf8) { - var a = (o0 & 0x07); + var o2 = octets[2]; + if (o0 < 0xf0) { + var a = o0 & 0x0f; if ((o1 < 0x80) || (o1 > 0xbf)) { throw new $URIError("URI malformed"); } - var b = (o1 & 0x3f); + var b = o1 & 0x3f; if ((o2 < 0x80) || (o2 > 0xbf)) { throw new $URIError("URI malformed"); } - var c = (o2 & 0x3f); - if ((o3 < 0x80) || (o3 > 0xbf)) { + var c = o2 & 0x3f; + value = (a << 12) + (b << 6) + c; + if ((value < 0x800) || (value > 0xffff)) { throw new $URIError("URI malformed"); } - var d = (o3 & 0x3f); - value = (a << 18) + (b << 12) + (c << 6) + d; - if ((value < 0x10000) || (value > 0x10ffff)) { + } else { + var o3 = octets[3]; + if (o0 < 0xf8) { + var a = (o0 & 0x07); + if ((o1 < 0x80) || (o1 > 0xbf)) { + throw new $URIError("URI malformed"); + } + var b = (o1 & 0x3f); + if ((o2 < 0x80) || (o2 > 0xbf)) { + throw new $URIError("URI malformed"); + } + var c = (o2 & 0x3f); + if ((o3 < 0x80) || (o3 > 0xbf)) { + throw new $URIError("URI malformed"); + } + var d = (o3 & 0x3f); + value = (a << 18) + (b << 12) + (c << 6) + d; + if ((value < 0x10000) || (value > 0x10ffff)) { + throw new $URIError("URI malformed"); + } + } else { throw new $URIError("URI malformed"); } - } else { - throw new $URIError("URI malformed"); } } } - } - if (0xD800 <= value && value <= 0xDFFF) { - throw new $URIError("URI malformed"); - } - if (value < 0x10000) { - %_TwoByteSeqStringSetChar(result, index++, value); - return index; - } else { - %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0); - %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00); + if (0xD800 <= value && value <= 0xDFFF) { + throw new $URIError("URI malformed"); + } + if (value < 0x10000) { + %_TwoByteSeqStringSetChar(result, index++, value); + } else { + %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0); + %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00); + } return index; } -} - -// ECMA-262, section 15.1.3 -function Encode(uri, unescape) { - var uriLength = uri.length; - var array = new InternalArray(uriLength); - var index = 0; - for (var k = 0; k < uriLength; k++) { - var cc1 = uri.charCodeAt(k); - if (unescape(cc1)) { - array[index++] = cc1; - } else { - if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed"); - if (cc1 < 0xD800 || cc1 > 0xDBFF) { - index = URIEncodeSingle(cc1, array, index); + // ECMA-262, section 15.1.3 + function Encode(uri, unescape) { + var uriLength = uri.length; + var array = new InternalArray(uriLength); + var index = 0; + for (var k = 0; k < uriLength; k++) { + var cc1 = uri.charCodeAt(k); + if (unescape(cc1)) { + array[index++] = cc1; } else { - k++; - if (k == uriLength) throw new $URIError("URI malformed"); - var cc2 = uri.charCodeAt(k); - if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed"); - index = URIEncodePair(cc1, cc2, array, index); + if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed"); + if (cc1 < 0xD800 || cc1 > 0xDBFF) { + index = URIEncodeSingle(cc1, array, index); + } else { + k++; + if (k == uriLength) throw new $URIError("URI malformed"); + var cc2 = uri.charCodeAt(k); + if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed"); + index = URIEncodePair(cc1, cc2, array, index); + } } } + + var result = %NewString(array.length, NEW_ONE_BYTE_STRING); + for (var i = 0; i < array.length; i++) { + %_OneByteSeqStringSetChar(result, i, array[i]); + } + return result; } - var result = %NewString(array.length, NEW_ONE_BYTE_STRING); - for (var i = 0; i < array.length; i++) { - %_OneByteSeqStringSetChar(result, i, array[i]); - } - return result; -} - - -// ECMA-262, section 15.1.3 -function Decode(uri, reserved) { - var uriLength = uri.length; - var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING); - var index = 0; - var k = 0; - - // Optimistically assume ascii string. - for ( ; k < uriLength; k++) { - var code = uri.charCodeAt(k); - if (code == 37) { // '%' - if (k + 2 >= uriLength) throw new $URIError("URI malformed"); - var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2)); - if (cc >> 7) break; // Assumption wrong, two byte string. - if (reserved(cc)) { - %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'. - %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1)); - %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2)); + // ECMA-262, section 15.1.3 + function Decode(uri, reserved) { + var uriLength = uri.length; + var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING); + var index = 0; + var k = 0; + + // Optimistically assume ascii string. + for ( ; k < uriLength; k++) { + var code = uri.charCodeAt(k); + if (code == 37) { // '%' + if (k + 2 >= uriLength) throw new $URIError("URI malformed"); + var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2)); + if (cc >> 7) break; // Assumption wrong, two byte string. + if (reserved(cc)) { + %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'. + %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1)); + %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2)); + } else { + %_OneByteSeqStringSetChar(one_byte, index++, cc); + } + k += 2; } else { - %_OneByteSeqStringSetChar(one_byte, index++, cc); + if (code > 0x7f) break; // Assumption wrong, two byte string. + %_OneByteSeqStringSetChar(one_byte, index++, code); } - k += 2; - } else { - if (code > 0x7f) break; // Assumption wrong, two byte string. - %_OneByteSeqStringSetChar(one_byte, index++, code); } - } - one_byte = %TruncateString(one_byte, index); - if (k == uriLength) return one_byte; + one_byte = %TruncateString(one_byte, index); + if (k == uriLength) return one_byte; - // Write into two byte string. - var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING); - index = 0; - - for ( ; k < uriLength; k++) { - var code = uri.charCodeAt(k); - if (code == 37) { // '%' - if (k + 2 >= uriLength) throw new $URIError("URI malformed"); - var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k)); - if (cc >> 7) { - var n = 0; - while (((cc << ++n) & 0x80) != 0) { } - if (n == 1 || n > 4) throw new $URIError("URI malformed"); - var octets = new $Array(n); - octets[0] = cc; - if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed"); - for (var i = 1; i < n; i++) { - if (uri.charAt(++k) != '%') throw new $URIError("URI malformed"); - octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), - uri.charCodeAt(++k)); + // Write into two byte string. + var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING); + index = 0; + + for ( ; k < uriLength; k++) { + var code = uri.charCodeAt(k); + if (code == 37) { // '%' + if (k + 2 >= uriLength) throw new $URIError("URI malformed"); + var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k)); + if (cc >> 7) { + var n = 0; + while (((cc << ++n) & 0x80) != 0) { } + if (n == 1 || n > 4) throw new $URIError("URI malformed"); + var octets = new $Array(n); + octets[0] = cc; + if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed"); + for (var i = 1; i < n; i++) { + if (uri.charAt(++k) != '%') throw new $URIError("URI malformed"); + octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), + uri.charCodeAt(++k)); + } + index = URIDecodeOctets(octets, two_byte, index); + } else if (reserved(cc)) { + %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'. + %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1)); + %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k)); + } else { + %_TwoByteSeqStringSetChar(two_byte, index++, cc); } - index = URIDecodeOctets(octets, two_byte, index); - } else if (reserved(cc)) { - %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'. - %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1)); - %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k)); } else { - %_TwoByteSeqStringSetChar(two_byte, index++, cc); + %_TwoByteSeqStringSetChar(two_byte, index++, code); } - } else { - %_TwoByteSeqStringSetChar(two_byte, index++, code); } - } - two_byte = %TruncateString(two_byte, index); - return one_byte + two_byte; -} - - -// ECMA-262 - 15.1.3.1. -function URIDecode(uri) { - var reservedPredicate = function(cc) { - // #$ - if (35 <= cc && cc <= 36) return true; - // & - if (cc == 38) return true; - // +, - if (43 <= cc && cc <= 44) return true; - // / - if (cc == 47) return true; - // :; - if (58 <= cc && cc <= 59) return true; - // = - if (cc == 61) return true; - // ?@ - if (63 <= cc && cc <= 64) return true; + two_byte = %TruncateString(two_byte, index); + return one_byte + two_byte; + } - return false; - }; - var string = ToString(uri); - return Decode(string, reservedPredicate); -} - - -// ECMA-262 - 15.1.3.2. -function URIDecodeComponent(component) { - var reservedPredicate = function(cc) { return false; }; - var string = ToString(component); - return Decode(string, reservedPredicate); -} - - -// Does the char code correspond to an alpha-numeric char. -function isAlphaNumeric(cc) { - // a - z - if (97 <= cc && cc <= 122) return true; - // A - Z - if (65 <= cc && cc <= 90) return true; - // 0 - 9 - if (48 <= cc && cc <= 57) return true; - - return false; -} - - -// ECMA-262 - 15.1.3.3. -function URIEncode(uri) { - var unescapePredicate = function(cc) { - if (isAlphaNumeric(cc)) return true; - // ! - if (cc == 33) return true; - // #$ - if (35 <= cc && cc <= 36) return true; - // &'()*+,-./ - if (38 <= cc && cc <= 47) return true; - // :; - if (58 <= cc && cc <= 59) return true; - // = - if (cc == 61) return true; - // ?@ - if (63 <= cc && cc <= 64) return true; - // _ - if (cc == 95) return true; - // ~ - if (cc == 126) return true; + // ------------------------------------------------------------------- + // Define exported functions. - return false; - }; + // ECMA-262 - B.2.1. + function URIEscapeJS(str) { + var s = ToString(str); + return %URIEscape(s); + } + + // ECMA-262 - B.2.2. + function URIUnescapeJS(str) { + var s = ToString(str); + return %URIUnescape(s); + } + + // ECMA-262 - 15.1.3.1. + function URIDecode(uri) { + var reservedPredicate = function(cc) { + // #$ + if (35 <= cc && cc <= 36) return true; + // & + if (cc == 38) return true; + // +, + if (43 <= cc && cc <= 44) return true; + // / + if (cc == 47) return true; + // :; + if (58 <= cc && cc <= 59) return true; + // = + if (cc == 61) return true; + // ?@ + if (63 <= cc && cc <= 64) return true; - var string = ToString(uri); - return Encode(string, unescapePredicate); -} - - -// ECMA-262 - 15.1.3.4 -function URIEncodeComponent(component) { - var unescapePredicate = function(cc) { - if (isAlphaNumeric(cc)) return true; - // ! - if (cc == 33) return true; - // '()* - if (39 <= cc && cc <= 42) return true; - // -. - if (45 <= cc && cc <= 46) return true; - // _ - if (cc == 95) return true; - // ~ - if (cc == 126) return true; + return false; + }; + var string = ToString(uri); + return Decode(string, reservedPredicate); + } + + // ECMA-262 - 15.1.3.2. + function URIDecodeComponent(component) { + var reservedPredicate = function(cc) { return false; }; + var string = ToString(component); + return Decode(string, reservedPredicate); + } + + // ECMA-262 - 15.1.3.3. + function URIEncode(uri) { + var unescapePredicate = function(cc) { + if (isAlphaNumeric(cc)) return true; + // ! + if (cc == 33) return true; + // #$ + if (35 <= cc && cc <= 36) return true; + // &'()*+,-./ + if (38 <= cc && cc <= 47) return true; + // :; + if (58 <= cc && cc <= 59) return true; + // = + if (cc == 61) return true; + // ?@ + if (63 <= cc && cc <= 64) return true; + // _ + if (cc == 95) return true; + // ~ + if (cc == 126) return true; - return false; - }; + return false; + }; + var string = ToString(uri); + return Encode(string, unescapePredicate); + } + + // ECMA-262 - 15.1.3.4 + function URIEncodeComponent(component) { + var unescapePredicate = function(cc) { + if (isAlphaNumeric(cc)) return true; + // ! + if (cc == 33) return true; + // '()* + if (39 <= cc && cc <= 42) return true; + // -. + if (45 <= cc && cc <= 46) return true; + // _ + if (cc == 95) return true; + // ~ + if (cc == 126) return true; - var string = ToString(component); - return Encode(string, unescapePredicate); -} - - -function HexValueOf(code) { - // 0-9 - if (code >= 48 && code <= 57) return code - 48; - // A-F - if (code >= 65 && code <= 70) return code - 55; - // a-f - if (code >= 97 && code <= 102) return code - 87; - - return -1; -} - - -// Convert a character code to 4-digit hex string representation -// 64 -> 0040, 62234 -> F31A. -function CharCodeToHex4Str(cc) { - var r = ""; - if (hexCharArray === 0) { - hexCharArray = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", - "A", "B", "C", "D", "E", "F"]; - } - for (var i = 0; i < 4; ++i) { - var c = hexCharArray[cc & 0x0F]; - r = c + r; - cc = cc >>> 4; - } - return r; -} - - -// Returns true if all digits in string s are valid hex numbers -function IsValidHex(s) { - for (var i = 0; i < s.length; ++i) { - var cc = s.charCodeAt(i); - if ((48 <= cc && cc <= 57) || - (65 <= cc && cc <= 70) || - (97 <= cc && cc <= 102)) { - // '0'..'9', 'A'..'F' and 'a' .. 'f'. - } else { return false; - } + }; + var string = ToString(component); + return Encode(string, unescapePredicate); } - return true; -} - -// ECMA-262 - B.2.1. -function URIEscape(str) { - var s = ToString(str); - return %URIEscape(s); -} - - -// ECMA-262 - B.2.2. -function URIUnescape(str) { - var s = ToString(str); - return %URIUnescape(s); -} - - -// ------------------------------------------------------------------- + // ------------------------------------------------------------------- + // Install exported functions. -function SetUpUri() { %CheckIsBootstrapping(); // Set up non-enumerable URI functions on the global object and set // their names. InstallFunctions(global, DONT_ENUM, $Array( - "escape", URIEscape, - "unescape", URIUnescape, - "decodeURI", URIDecode, - "decodeURIComponent", URIDecodeComponent, - "encodeURI", URIEncode, - "encodeURIComponent", URIEncodeComponent + "escape", URIEscapeJS, + "unescape", URIUnescapeJS, + "decodeURI", URIDecode, + "decodeURIComponent", URIDecodeComponent, + "encodeURI", URIEncode, + "encodeURIComponent", URIEncodeComponent )); -} -SetUpUri(); +})(); diff -Nru nodejs-0.11.13/deps/v8/src/utils/random-number-generator.cc nodejs-0.11.15/deps/v8/src/utils/random-number-generator.cc --- nodejs-0.11.13/deps/v8/src/utils/random-number-generator.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/utils/random-number-generator.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,153 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "utils/random-number-generator.h" - -#include <stdio.h> -#include <stdlib.h> - -#include "flags.h" -#include "platform/mutex.h" -#include "platform/time.h" -#include "utils.h" - -namespace v8 { -namespace internal { - -static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER; -static RandomNumberGenerator::EntropySource entropy_source = NULL; - - -// static -void RandomNumberGenerator::SetEntropySource(EntropySource source) { - LockGuard<Mutex> lock_guard(entropy_mutex.Pointer()); - entropy_source = source; -} - - -RandomNumberGenerator::RandomNumberGenerator() { - // Check --random-seed flag first. - if (FLAG_random_seed != 0) { - SetSeed(FLAG_random_seed); - return; - } - - // Check if embedder supplied an entropy source. - { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer()); - if (entropy_source != NULL) { - int64_t seed; - if (entropy_source(reinterpret_cast<unsigned char*>(&seed), - sizeof(seed))) { - SetSeed(seed); - return; - } - } - } - -#if V8_OS_CYGWIN || V8_OS_WIN - // Use rand_s() to gather entropy on Windows. See: - // https://code.google.com/p/v8/issues/detail?id=2905 - unsigned first_half, second_half; - errno_t result = rand_s(&first_half); - ASSERT_EQ(0, result); - result = rand_s(&second_half); - ASSERT_EQ(0, result); - SetSeed((static_cast<int64_t>(first_half) << 32) + second_half); -#else - // Gather entropy from /dev/urandom if available. - FILE* fp = fopen("/dev/urandom", "rb"); - if (fp != NULL) { - int64_t seed; - size_t n = fread(&seed, sizeof(seed), 1, fp); - fclose(fp); - if (n == 1) { - SetSeed(seed); - return; - } - } - - // We cannot assume that random() or rand() were seeded - // properly, so instead of relying on random() or rand(), - // we just seed our PRNG using timing data as fallback. - // This is weak entropy, but it's sufficient, because - // it is the responsibility of the embedder to install - // an entropy source using v8::V8::SetEntropySource(), - // which provides reasonable entropy, see: - // https://code.google.com/p/v8/issues/detail?id=2905 - int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24; - seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16; - seed ^= TimeTicks::Now().ToInternalValue() << 8; - SetSeed(seed); -#endif // V8_OS_CYGWIN || V8_OS_WIN -} - - -int RandomNumberGenerator::NextInt(int max) { - ASSERT_LE(0, max); - - // Fast path if max is a power of 2. - if (IsPowerOf2(max)) { - return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31); - } - - while (true) { - int rnd = Next(31); - int val = rnd % max; - if (rnd - val + (max - 1) >= 0) { - return val; - } - } -} - - -double RandomNumberGenerator::NextDouble() { - return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) / - static_cast<double>(static_cast<int64_t>(1) << 53); -} - - -void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) { - for (size_t n = 0; n < buflen; ++n) { - static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8)); - } -} - - -int RandomNumberGenerator::Next(int bits) { - ASSERT_LT(0, bits); - ASSERT_GE(32, bits); - int64_t seed = (seed_ * kMultiplier + kAddend) & kMask; - seed_ = seed; - return static_cast<int>(seed >> (48 - bits)); -} - - -void RandomNumberGenerator::SetSeed(int64_t seed) { - seed_ = (seed ^ kMultiplier) & kMask; -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/utils/random-number-generator.h nodejs-0.11.15/deps/v8/src/utils/random-number-generator.h --- nodejs-0.11.13/deps/v8/src/utils/random-number-generator.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/utils/random-number-generator.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_UTILS_RANDOM_NUMBER_GENERATOR_H_ -#define V8_UTILS_RANDOM_NUMBER_GENERATOR_H_ - -#include "globals.h" - -namespace v8 { -namespace internal { - -// ----------------------------------------------------------------------------- -// RandomNumberGenerator -// -// This class is used to generate a stream of pseudorandom numbers. The class -// uses a 48-bit seed, which is modified using a linear congruential formula. -// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.) -// If two instances of RandomNumberGenerator are created with the same seed, and -// the same sequence of method calls is made for each, they will generate and -// return identical sequences of numbers. -// This class uses (probably) weak entropy by default, but it's sufficient, -// because it is the responsibility of the embedder to install an entropy source -// using v8::V8::SetEntropySource(), which provides reasonable entropy, see: -// https://code.google.com/p/v8/issues/detail?id=2905 -// This class is neither reentrant nor threadsafe. - -class RandomNumberGenerator V8_FINAL { - public: - // EntropySource is used as a callback function when V8 needs a source of - // entropy. - typedef bool (*EntropySource)(unsigned char* buffer, size_t buflen); - static void SetEntropySource(EntropySource entropy_source); - - RandomNumberGenerator(); - explicit RandomNumberGenerator(int64_t seed) { SetSeed(seed); } - - // Returns the next pseudorandom, uniformly distributed int value from this - // random number generator's sequence. The general contract of |NextInt()| is - // that one int value is pseudorandomly generated and returned. - // All 2^32 possible integer values are produced with (approximately) equal - // probability. - V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT { - return Next(32); - } - - // Returns a pseudorandom, uniformly distributed int value between 0 - // (inclusive) and the specified max value (exclusive), drawn from this random - // number generator's sequence. The general contract of |NextInt(int)| is that - // one int value in the specified range is pseudorandomly generated and - // returned. All max possible int values are produced with (approximately) - // equal probability. - int NextInt(int max) V8_WARN_UNUSED_RESULT; - - // Returns the next pseudorandom, uniformly distributed boolean value from - // this random number generator's sequence. The general contract of - // |NextBoolean()| is that one boolean value is pseudorandomly generated and - // returned. The values true and false are produced with (approximately) equal - // probability. - V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT { - return Next(1) != 0; - } - - // Returns the next pseudorandom, uniformly distributed double value between - // 0.0 and 1.0 from this random number generator's sequence. - // The general contract of |NextDouble()| is that one double value, chosen - // (approximately) uniformly from the range 0.0 (inclusive) to 1.0 - // (exclusive), is pseudorandomly generated and returned. - double NextDouble() V8_WARN_UNUSED_RESULT; - - // Fills the elements of a specified array of bytes with random numbers. - void NextBytes(void* buffer, size_t buflen); - - private: - static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d); - static const int64_t kAddend = 0xb; - static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff); - - int Next(int bits) V8_WARN_UNUSED_RESULT; - void SetSeed(int64_t seed); - - int64_t seed_; -}; - -} } // namespace v8::internal - -#endif // V8_UTILS_RANDOM_NUMBER_GENERATOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/utils.cc nodejs-0.11.15/deps/v8/src/utils.cc --- nodejs-0.11.13/deps/v8/src/utils.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/utils.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,15 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdarg.h> -#include "../include/v8stdint.h" -#include "checks.h" -#include "platform.h" -#include "utils.h" +#include <sys/stat.h> + +#include "src/v8.h" + +#include "src/base/logging.h" +#include "src/base/platform/platform.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -47,9 +27,9 @@ void SimpleStringBuilder::AddSubstring(const char* s, int n) { - ASSERT(!is_finalized() && position_ + n <= buffer_.length()); - ASSERT(static_cast<size_t>(n) <= strlen(s)); - OS::MemCopy(&buffer_[position_], s, n * kCharSize); + DCHECK(!is_finalized() && position_ + n <= buffer_.length()); + DCHECK(static_cast<size_t>(n) <= strlen(s)); + MemCopy(&buffer_[position_], s, n * kCharSize); position_ += n; } @@ -80,7 +60,7 @@ char* SimpleStringBuilder::Finalize() { - ASSERT(!is_finalized() && position_ <= buffer_.length()); + DCHECK(!is_finalized() && position_ <= buffer_.length()); // If there is no space for null termination, overwrite last character. if (position_ == buffer_.length()) { position_--; @@ -90,11 +70,348 @@ buffer_[position_] = '\0'; // Make sure nobody managed to add a 0-character to the // buffer while building the string. - ASSERT(strlen(buffer_.start()) == static_cast<size_t>(position_)); + DCHECK(strlen(buffer_.start()) == static_cast<size_t>(position_)); position_ = -1; - ASSERT(is_finalized()); + DCHECK(is_finalized()); return buffer_.start(); } +void PrintF(const char* format, ...) { + va_list arguments; + va_start(arguments, format); + base::OS::VPrint(format, arguments); + va_end(arguments); +} + + +void PrintF(FILE* out, const char* format, ...) { + va_list arguments; + va_start(arguments, format); + base::OS::VFPrint(out, format, arguments); + va_end(arguments); +} + + +void PrintPID(const char* format, ...) { + base::OS::Print("[%d] ", base::OS::GetCurrentProcessId()); + va_list arguments; + va_start(arguments, format); + base::OS::VPrint(format, arguments); + va_end(arguments); +} + + +int SNPrintF(Vector<char> str, const char* format, ...) { + va_list args; + va_start(args, format); + int result = VSNPrintF(str, format, args); + va_end(args); + return result; +} + + +int VSNPrintF(Vector<char> str, const char* format, va_list args) { + return base::OS::VSNPrintF(str.start(), str.length(), format, args); +} + + +void StrNCpy(Vector<char> dest, const char* src, size_t n) { + base::OS::StrNCpy(dest.start(), dest.length(), src, n); +} + + +void Flush(FILE* out) { + fflush(out); +} + + +char* ReadLine(const char* prompt) { + char* result = NULL; + char line_buf[256]; + int offset = 0; + bool keep_going = true; + fprintf(stdout, "%s", prompt); + fflush(stdout); + while (keep_going) { + if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) { + // fgets got an error. Just give up. + if (result != NULL) { + DeleteArray(result); + } + return NULL; + } + int len = StrLength(line_buf); + if (len > 1 && + line_buf[len - 2] == '\\' && + line_buf[len - 1] == '\n') { + // When we read a line that ends with a "\" we remove the escape and + // append the remainder. + line_buf[len - 2] = '\n'; + line_buf[len - 1] = 0; + len -= 1; + } else if ((len > 0) && (line_buf[len - 1] == '\n')) { + // Since we read a new line we are done reading the line. This + // will exit the loop after copying this buffer into the result. + keep_going = false; + } + if (result == NULL) { + // Allocate the initial result and make room for the terminating '\0' + result = NewArray<char>(len + 1); + } else { + // Allocate a new result with enough room for the new addition. + int new_len = offset + len + 1; + char* new_result = NewArray<char>(new_len); + // Copy the existing input into the new array and set the new + // array as the result. + MemCopy(new_result, result, offset * kCharSize); + DeleteArray(result); + result = new_result; + } + // Copy the newly read line into the result. + MemCopy(result + offset, line_buf, len * kCharSize); + offset += len; + } + DCHECK(result != NULL); + result[offset] = '\0'; + return result; +} + + +char* ReadCharsFromFile(FILE* file, + int* size, + int extra_space, + bool verbose, + const char* filename) { + if (file == NULL || fseek(file, 0, SEEK_END) != 0) { + if (verbose) { + base::OS::PrintError("Cannot read from file %s.\n", filename); + } + return NULL; + } + + // Get the size of the file and rewind it. + *size = ftell(file); + rewind(file); + + char* result = NewArray<char>(*size + extra_space); + for (int i = 0; i < *size && feof(file) == 0;) { + int read = static_cast<int>(fread(&result[i], 1, *size - i, file)); + if (read != (*size - i) && ferror(file) != 0) { + fclose(file); + DeleteArray(result); + return NULL; + } + i += read; + } + return result; +} + + +char* ReadCharsFromFile(const char* filename, + int* size, + int extra_space, + bool verbose) { + FILE* file = base::OS::FOpen(filename, "rb"); + char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename); + if (file != NULL) fclose(file); + return result; +} + + +byte* ReadBytes(const char* filename, int* size, bool verbose) { + char* chars = ReadCharsFromFile(filename, size, 0, verbose); + return reinterpret_cast<byte*>(chars); +} + + +static Vector<const char> SetVectorContents(char* chars, + int size, + bool* exists) { + if (!chars) { + *exists = false; + return Vector<const char>::empty(); + } + chars[size] = '\0'; + *exists = true; + return Vector<const char>(chars, size); +} + + +Vector<const char> ReadFile(const char* filename, + bool* exists, + bool verbose) { + int size; + char* result = ReadCharsFromFile(filename, &size, 1, verbose); + return SetVectorContents(result, size, exists); +} + + +Vector<const char> ReadFile(FILE* file, + bool* exists, + bool verbose) { + int size; + char* result = ReadCharsFromFile(file, &size, 1, verbose, ""); + return SetVectorContents(result, size, exists); +} + + +int WriteCharsToFile(const char* str, int size, FILE* f) { + int total = 0; + while (total < size) { + int write = static_cast<int>(fwrite(str, 1, size - total, f)); + if (write == 0) { + return total; + } + total += write; + str += write; + } + return total; +} + + +int AppendChars(const char* filename, + const char* str, + int size, + bool verbose) { + FILE* f = base::OS::FOpen(filename, "ab"); + if (f == NULL) { + if (verbose) { + base::OS::PrintError("Cannot open file %s for writing.\n", filename); + } + return 0; + } + int written = WriteCharsToFile(str, size, f); + fclose(f); + return written; +} + + +int WriteChars(const char* filename, + const char* str, + int size, + bool verbose) { + FILE* f = base::OS::FOpen(filename, "wb"); + if (f == NULL) { + if (verbose) { + base::OS::PrintError("Cannot open file %s for writing.\n", filename); + } + return 0; + } + int written = WriteCharsToFile(str, size, f); + fclose(f); + return written; +} + + +int WriteBytes(const char* filename, + const byte* bytes, + int size, + bool verbose) { + const char* str = reinterpret_cast<const char*>(bytes); + return WriteChars(filename, str, size, verbose); +} + + + +void StringBuilder::AddFormatted(const char* format, ...) { + va_list arguments; + va_start(arguments, format); + AddFormattedList(format, arguments); + va_end(arguments); +} + + +void StringBuilder::AddFormattedList(const char* format, va_list list) { + DCHECK(!is_finalized() && position_ <= buffer_.length()); + int n = VSNPrintF(buffer_ + position_, format, list); + if (n < 0 || n >= (buffer_.length() - position_)) { + position_ = buffer_.length(); + } else { + position_ += n; + } +} + + +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 +static void MemMoveWrapper(void* dest, const void* src, size_t size) { + memmove(dest, src, size); +} + + +// Initialize to library version so we can call this at any time during startup. +static MemMoveFunction memmove_function = &MemMoveWrapper; + +// Defined in codegen-ia32.cc. +MemMoveFunction CreateMemMoveFunction(); + +// Copy memory area to disjoint memory area. +void MemMove(void* dest, const void* src, size_t size) { + if (size == 0) return; + // Note: here we rely on dependent reads being ordered. This is true + // on all architectures we currently support. + (*memmove_function)(dest, src, size); +} + +#elif V8_OS_POSIX && V8_HOST_ARCH_ARM +void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, + size_t chars) { + uint16_t* limit = dest + chars; + while (dest < limit) { + *dest++ = static_cast<uint16_t>(*src++); + } +} + + +MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper; +MemCopyUint16Uint8Function memcopy_uint16_uint8_function = + &MemCopyUint16Uint8Wrapper; +// Defined in codegen-arm.cc. +MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub); +MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( + MemCopyUint16Uint8Function stub); + +#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS +MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper; +// Defined in codegen-mips.cc. +MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub); +#endif + + +void init_memcopy_functions() { +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 + MemMoveFunction generated_memmove = CreateMemMoveFunction(); + if (generated_memmove != NULL) { + memmove_function = generated_memmove; + } +#elif V8_OS_POSIX && V8_HOST_ARCH_ARM + memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper); + memcopy_uint16_uint8_function = + CreateMemCopyUint16Uint8Function(&MemCopyUint16Uint8Wrapper); +#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS + memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper); +#endif +} + + +bool DoubleToBoolean(double d) { + // NaN, +0, and -0 should return the false object +#if __BYTE_ORDER == __LITTLE_ENDIAN + union IeeeDoubleLittleEndianArchType u; +#elif __BYTE_ORDER == __BIG_ENDIAN + union IeeeDoubleBigEndianArchType u; +#endif + u.d = d; + if (u.bits.exp == 2047) { + // Detect NaN for IEEE double precision floating point. + if ((u.bits.man_low | u.bits.man_high) != 0) return false; + } + if (u.bits.exp == 0) { + // Detect +0, and -0 for IEEE double precision floating point. + if ((u.bits.man_low | u.bits.man_high) == 0) return false; + } + return true; +} + + } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/utils.h nodejs-0.11.15/deps/v8/src/utils.h --- nodejs-0.11.13/deps/v8/src/utils.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/utils.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_UTILS_H_ #define V8_UTILS_H_ @@ -31,11 +8,16 @@ #include <limits.h> #include <stdlib.h> #include <string.h> -#include <algorithm> +#include <cmath> -#include "allocation.h" -#include "checks.h" -#include "globals.h" +#include "include/v8.h" +#include "src/allocation.h" +#include "src/base/logging.h" +#include "src/base/macros.h" +#include "src/base/platform/platform.h" +#include "src/globals.h" +#include "src/list.h" +#include "src/vector.h" namespace v8 { namespace internal { @@ -43,20 +25,9 @@ // ---------------------------------------------------------------------------- // General helper functions -#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0) - -// Returns true iff x is a power of 2 (or zero). Cannot be used with the -// maximally negative value of the type T (the -1 overflows). -template <typename T> -inline bool IsPowerOf2(T x) { - return IS_POWER_OF_TWO(x); -} - - // X must be a power of 2. Returns the number of trailing zeros. inline int WhichPowerOf2(uint32_t x) { - ASSERT(IsPowerOf2(x)); - ASSERT(x != 0); + DCHECK(IsPowerOf2(x)); int bits = 0; #ifdef DEBUG int original_x = x; @@ -80,7 +51,7 @@ case 2: bits++; // Fall through. case 1: break; } - ASSERT_EQ(1 << bits, original_x); + DCHECK_EQ(1 << bits, original_x); return bits; return 0; } @@ -113,50 +84,6 @@ } -// Compute the 0-relative offset of some absolute value x of type T. -// This allows conversion of Addresses and integral types into -// 0-relative int offsets. -template <typename T> -inline intptr_t OffsetFrom(T x) { - return x - static_cast<T>(0); -} - - -// Compute the absolute value of type T for some 0-relative offset x. -// This allows conversion of 0-relative int offsets into Addresses and -// integral types. -template <typename T> -inline T AddressFrom(intptr_t x) { - return static_cast<T>(static_cast<T>(0) + x); -} - - -// Return the largest multiple of m which is <= x. -template <typename T> -inline T RoundDown(T x, intptr_t m) { - ASSERT(IsPowerOf2(m)); - return AddressFrom<T>(OffsetFrom(x) & -m); -} - - -// Return the smallest multiple of m which is >= x. -template <typename T> -inline T RoundUp(T x, intptr_t m) { - return RoundDown<T>(static_cast<T>(x + m - 1), m); -} - - -// Increment a pointer until it has the specified alignment. -// This works like RoundUp, but it works correctly on pointer types where -// sizeof(*pointer) might not be 1. -template<class T> -T AlignUp(T pointer, size_t alignment) { - ASSERT(sizeof(pointer) == sizeof(uintptr_t)); - uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer); - return reinterpret_cast<T>(RoundUp(pointer_raw, alignment)); -} - - template <typename T> int Compare(const T& a, const T& b) { if (a == b) @@ -184,35 +111,6 @@ } -// Returns the smallest power of two which is >= x. If you pass in a -// number that is already a power of two, it is returned as is. -// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr., -// figure 3-3, page 48, where the function is called clp2. -inline uint32_t RoundUpToPowerOf2(uint32_t x) { - ASSERT(x <= 0x80000000u); - x = x - 1; - x = x | (x >> 1); - x = x | (x >> 2); - x = x | (x >> 4); - x = x | (x >> 8); - x = x | (x >> 16); - return x + 1; -} - - -inline uint32_t RoundDownToPowerOf2(uint32_t x) { - uint32_t rounded_up = RoundUpToPowerOf2(x); - if (rounded_up > x) return rounded_up >> 1; - return rounded_up; -} - - -template <typename T, typename U> -inline bool IsAligned(T value, U alignment) { - return (value & (alignment - 1)) == 0; -} - - // Returns true if (addr + offset) is aligned. inline bool IsAddressAligned(Address addr, intptr_t alignment, @@ -243,17 +141,12 @@ } -// Returns the negative absolute value of its argument. -template <typename T> -T NegAbs(T a) { - return a < 0 ? a : -a; -} - - -inline int StrLength(const char* string) { - size_t length = strlen(string); - ASSERT(length == static_cast<size_t>(static_cast<int>(length))); - return static_cast<int>(length); +// Floor(-0.0) == 0.0 +inline double Floor(double x) { +#ifdef _MSC_VER + if (x == 0) return x; // Fix for issue 3477. +#endif + return std::floor(x); } @@ -263,6 +156,25 @@ } +// Obtains the unsigned type corresponding to T +// available in C++11 as std::make_unsigned +template<typename T> +struct make_unsigned { + typedef T type; +}; + + +// Template specializations necessary to have make_unsigned work +template<> struct make_unsigned<int32_t> { + typedef uint32_t type; +}; + + +template<> struct make_unsigned<int64_t> { + typedef uint64_t type; +}; + + // ---------------------------------------------------------------------------- // BitField is a help template for encoding and decode bitfield with // unsigned content. @@ -277,6 +189,7 @@ static const U kMask = ((kOne << shift) << size) - (kOne << shift); static const U kShift = shift; static const U kSize = size; + static const U kNext = kShift + kSize; // Value for the field with all bits set. static const T kMax = static_cast<T>((1U << size) - 1); @@ -288,7 +201,7 @@ // Returns a type U with the bit field value encoded. static U encode(T value) { - ASSERT(is_valid(value)); + DCHECK(is_valid(value)); return static_cast<U>(value) << shift; } @@ -352,6 +265,85 @@ // ---------------------------------------------------------------------------- +// Generated memcpy/memmove + +// Initializes the codegen support that depends on CPU features. This is +// called after CPU initialization. +void init_memcopy_functions(); + +#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87) +// Limit below which the extra overhead of the MemCopy function is likely +// to outweigh the benefits of faster copying. +const int kMinComplexMemCopy = 64; + +// Copy memory area. No restrictions. +void MemMove(void* dest, const void* src, size_t size); +typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); + +// Keep the distinction of "move" vs. "copy" for the benefit of other +// architectures. +V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { + MemMove(dest, src, size); +} +#elif defined(V8_HOST_ARCH_ARM) +typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, + size_t size); +extern MemCopyUint8Function memcopy_uint8_function; +V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, + size_t chars) { + memcpy(dest, src, chars); +} +// For values < 16, the assembler function is slower than the inlined C code. +const int kMinComplexMemCopy = 16; +V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { + (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), + reinterpret_cast<const uint8_t*>(src), size); +} +V8_INLINE void MemMove(void* dest, const void* src, size_t size) { + memmove(dest, src, size); +} + +typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src, + size_t size); +extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function; +void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, + size_t chars); +// For values < 12, the assembler function is slower than the inlined C code. +const int kMinComplexConvertMemCopy = 12; +V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src, + size_t size) { + (*memcopy_uint16_uint8_function)(dest, src, size); +} +#elif defined(V8_HOST_ARCH_MIPS) +typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, + size_t size); +extern MemCopyUint8Function memcopy_uint8_function; +V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, + size_t chars) { + memcpy(dest, src, chars); +} +// For values < 16, the assembler function is slower than the inlined C code. +const int kMinComplexMemCopy = 16; +V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { + (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), + reinterpret_cast<const uint8_t*>(src), size); +} +V8_INLINE void MemMove(void* dest, const void* src, size_t size) { + memmove(dest, src, size); +} +#else +// Copy memory area to disjoint memory area. +V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { + memcpy(dest, src, size); +} +V8_INLINE void MemMove(void* dest, const void* src, size_t size) { + memmove(dest, src, size); +} +const int kMinComplexMemCopy = 16 * kPointerSize; +#endif // V8_TARGET_ARCH_IA32 + + +// ---------------------------------------------------------------------------- // Miscellaneous // A static resource holds a static instance that can be reserved in @@ -376,7 +368,7 @@ explicit Access(StaticResource<T>* resource) : resource_(resource) , instance_(&resource->instance_) { - ASSERT(!resource->is_reserved_); + DCHECK(!resource->is_reserved_); resource->is_reserved_ = true; } @@ -395,110 +387,6 @@ }; -template <typename T> -class Vector { - public: - Vector() : start_(NULL), length_(0) {} - Vector(T* data, int length) : start_(data), length_(length) { - ASSERT(length == 0 || (length > 0 && data != NULL)); - } - - static Vector<T> New(int length) { - return Vector<T>(NewArray<T>(length), length); - } - - // Returns a vector using the same backing storage as this one, - // spanning from and including 'from', to but not including 'to'. - Vector<T> SubVector(int from, int to) { - SLOW_ASSERT(to <= length_); - SLOW_ASSERT(from < to); - ASSERT(0 <= from); - return Vector<T>(start() + from, to - from); - } - - // Returns the length of the vector. - int length() const { return length_; } - - // Returns whether or not the vector is empty. - bool is_empty() const { return length_ == 0; } - - // Returns the pointer to the start of the data in the vector. - T* start() const { return start_; } - - // Access individual vector elements - checks bounds in debug mode. - T& operator[](int index) const { - ASSERT(0 <= index && index < length_); - return start_[index]; - } - - const T& at(int index) const { return operator[](index); } - - T& first() { return start_[0]; } - - T& last() { return start_[length_ - 1]; } - - // Returns a clone of this vector with a new backing store. - Vector<T> Clone() const { - T* result = NewArray<T>(length_); - for (int i = 0; i < length_; i++) result[i] = start_[i]; - return Vector<T>(result, length_); - } - - void Sort(int (*cmp)(const T*, const T*)) { - std::sort(start(), start() + length(), RawComparer(cmp)); - } - - void Sort() { - std::sort(start(), start() + length()); - } - - void Truncate(int length) { - ASSERT(length <= length_); - length_ = length; - } - - // Releases the array underlying this vector. Once disposed the - // vector is empty. - void Dispose() { - DeleteArray(start_); - start_ = NULL; - length_ = 0; - } - - inline Vector<T> operator+(int offset) { - ASSERT(offset < length_); - return Vector<T>(start_ + offset, length_ - offset); - } - - // Factory method for creating empty vectors. - static Vector<T> empty() { return Vector<T>(NULL, 0); } - - template<typename S> - static Vector<T> cast(Vector<S> input) { - return Vector<T>(reinterpret_cast<T*>(input.start()), - input.length() * sizeof(S) / sizeof(T)); - } - - protected: - void set_start(T* start) { start_ = start; } - - private: - T* start_; - int length_; - - class RawComparer { - public: - explicit RawComparer(int (*cmp)(const T*, const T*)) : cmp_(cmp) {} - bool operator()(const T& a, const T& b) { - return cmp_(&a, &b) < 0; - } - - private: - int (*cmp_)(const T*, const T*); - }; -}; - - // A pointer that can only be set once and doesn't allow NULL values. template<typename T> class SetOncePointer { @@ -508,12 +396,12 @@ bool is_set() const { return pointer_ != NULL; } T* get() const { - ASSERT(pointer_ != NULL); + DCHECK(pointer_ != NULL); return pointer_; } void set(T* value) { - ASSERT(pointer_ == NULL && value != NULL); + DCHECK(pointer_ == NULL && value != NULL); pointer_ = value; } @@ -536,16 +424,14 @@ // When copying, make underlying Vector to reference our buffer. EmbeddedVector(const EmbeddedVector& rhs) : Vector<T>(rhs) { - // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead. - memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize); - set_start(buffer_); + MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize); + this->set_start(buffer_); } EmbeddedVector& operator=(const EmbeddedVector& rhs) { if (this == &rhs) return *this; Vector<T>::operator=(rhs); - // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead. - memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize); + MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize); this->set_start(buffer_); return *this; } @@ -555,44 +441,6 @@ }; -template <typename T> -class ScopedVector : public Vector<T> { - public: - explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { } - ~ScopedVector() { - DeleteArray(this->start()); - } - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector); -}; - -#define STATIC_ASCII_VECTOR(x) \ - v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \ - ARRAY_SIZE(x)-1) - -inline Vector<const char> CStrVector(const char* data) { - return Vector<const char>(data, StrLength(data)); -} - -inline Vector<const uint8_t> OneByteVector(const char* data, int length) { - return Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), length); -} - -inline Vector<const uint8_t> OneByteVector(const char* data) { - return OneByteVector(data, StrLength(data)); -} - -inline Vector<char> MutableCStrVector(char* data) { - return Vector<char>(data, StrLength(data)); -} - -inline Vector<char> MutableCStrVector(char* data, int max) { - int length = StrLength(data); - return Vector<char>(data, (length < max) ? length : max); -} - - /* * A class that collects values into a backing store. * Specialized versions of the class can allow access to the backing store @@ -633,7 +481,7 @@ // A basic Collector will keep this vector valid as long as the Collector // is alive. inline Vector<T> AddBlock(int size, T initial_value) { - ASSERT(size > 0); + DCHECK(size > 0); if (size > current_chunk_.length() - index_) { Grow(size); } @@ -667,7 +515,7 @@ // Write the contents of the collector into the provided vector. void WriteTo(Vector<T> destination) { - ASSERT(size_ <= destination.length()); + DCHECK(size_ <= destination.length()); int position = 0; for (int i = 0; i < chunks_.length(); i++) { Vector<T> chunk = chunks_.at(i); @@ -707,7 +555,7 @@ // Creates a new current chunk, and stores the old chunk in the chunks_ list. void Grow(int min_capacity) { - ASSERT(growth_factor > 1); + DCHECK(growth_factor > 1); int new_capacity; int current_length = current_chunk_.length(); if (current_length < kMinCapacity) { @@ -725,7 +573,7 @@ } } NewChunk(new_capacity); - ASSERT(index_ + min_capacity <= current_chunk_.length()); + DCHECK(index_ + min_capacity <= current_chunk_.length()); } // Before replacing the current chunk, give a subclass the option to move @@ -764,12 +612,12 @@ virtual ~SequenceCollector() {} void StartSequence() { - ASSERT(sequence_start_ == kNoSequence); + DCHECK(sequence_start_ == kNoSequence); sequence_start_ = this->index_; } Vector<T> EndSequence() { - ASSERT(sequence_start_ != kNoSequence); + DCHECK(sequence_start_ != kNoSequence); int sequence_start = sequence_start_; sequence_start_ = kNoSequence; if (sequence_start == this->index_) return Vector<T>(); @@ -778,7 +626,7 @@ // Drops the currently added sequence, and all collected elements in it. void DropSequence() { - ASSERT(sequence_start_ != kNoSequence); + DCHECK(sequence_start_ != kNoSequence); int sequence_length = this->index_ - sequence_start_; this->index_ = sequence_start_; this->size_ -= sequence_length; @@ -803,7 +651,7 @@ } int sequence_length = this->index_ - sequence_start_; Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity); - ASSERT(sequence_length < new_chunk.length()); + DCHECK(sequence_length < new_chunk.length()); for (int i = 0; i < sequence_length; i++) { new_chunk[i] = this->current_chunk_[sequence_start_ + i]; } @@ -850,8 +698,8 @@ template<typename lchar, typename rchar> inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) { - ASSERT(sizeof(lchar) <= 2); - ASSERT(sizeof(rchar) <= 2); + DCHECK(sizeof(lchar) <= 2); + DCHECK(sizeof(rchar) <= 2); if (sizeof(lchar) == 1) { if (sizeof(rchar) == 1) { return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs), @@ -878,8 +726,8 @@ // Calculate 10^exponent. inline int TenToThe(int exponent) { - ASSERT(exponent <= 9); - ASSERT(exponent >= 1); + DCHECK(exponent <= 9); + DCHECK(exponent >= 1); int answer = 10; for (int i = 1; i < exponent; i++) answer *= 10; return answer; @@ -920,7 +768,6 @@ INLINE(static Dest cast(const Source& source)) { Dest dest; - // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead. memcpy(&dest, &source, sizeof(dest)); return dest; } @@ -950,11 +797,11 @@ int length() const { return NumElements; } const ElementType& operator[](int i) const { - ASSERT(i < length()); + DCHECK(i < length()); return elems_[i]; } ElementType& operator[](int i) { - ASSERT(i < length()); + DCHECK(i < length()); return elems_[i]; } @@ -1000,7 +847,7 @@ // Get the current position in the builder. int position() const { - ASSERT(!is_finalized()); + DCHECK(!is_finalized()); return position_; } @@ -1011,8 +858,8 @@ // 0-characters; use the Finalize() method to terminate the string // instead. void AddCharacter(char c) { - ASSERT(c != '\0'); - ASSERT(!is_finalized() && position_ < buffer_.length()); + DCHECK(c != '\0'); + DCHECK(!is_finalized() && position_ < buffer_.length()); buffer_[position_++] = c; } @@ -1071,9 +918,9 @@ private: T Mask(E element) const { - // The strange typing in ASSERT is necessary to avoid stupid warnings, see: + // The strange typing in DCHECK is necessary to avoid stupid warnings, see: // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680 - ASSERT(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT)); + DCHECK(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT)); return static_cast<T>(1) << element; } @@ -1100,19 +947,19 @@ // Check number width. inline bool is_intn(int64_t x, unsigned n) { - ASSERT((0 < n) && (n < 64)); + DCHECK((0 < n) && (n < 64)); int64_t limit = static_cast<int64_t>(1) << (n - 1); return (-limit <= x) && (x < limit); } inline bool is_uintn(int64_t x, unsigned n) { - ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte))); + DCHECK((0 < n) && (n < (sizeof(x) * kBitsPerByte))); return !(x >> n); } template <class T> inline T truncate_to_intn(T x, unsigned n) { - ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte))); + DCHECK((0 < n) && (n < (sizeof(x) * kBitsPerByte))); return (x & ((static_cast<T>(1) << n) - 1)); } @@ -1206,6 +1053,488 @@ C* container_; }; + +// ---------------------------------------------------------------------------- +// I/O support. + +#if __GNUC__ >= 4 +// On gcc we can ask the compiler to check the types of %d-style format +// specifiers and their associated arguments. TODO(erikcorry) fix this +// so it works on MacOSX. +#if defined(__MACH__) && defined(__APPLE__) +#define PRINTF_CHECKING +#define FPRINTF_CHECKING +#define PRINTF_METHOD_CHECKING +#define FPRINTF_METHOD_CHECKING +#else // MacOsX. +#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2))) +#define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3))) +#define PRINTF_METHOD_CHECKING __attribute__ ((format (printf, 2, 3))) +#define FPRINTF_METHOD_CHECKING __attribute__ ((format (printf, 3, 4))) +#endif +#else +#define PRINTF_CHECKING +#define FPRINTF_CHECKING +#define PRINTF_METHOD_CHECKING +#define FPRINTF_METHOD_CHECKING +#endif + +// Our version of printf(). +void PRINTF_CHECKING PrintF(const char* format, ...); +void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...); + +// Prepends the current process ID to the output. +void PRINTF_CHECKING PrintPID(const char* format, ...); + +// Safe formatting print. Ensures that str is always null-terminated. +// Returns the number of chars written, or -1 if output was truncated. +int FPRINTF_CHECKING SNPrintF(Vector<char> str, const char* format, ...); +int VSNPrintF(Vector<char> str, const char* format, va_list args); + +void StrNCpy(Vector<char> dest, const char* src, size_t n); + +// Our version of fflush. +void Flush(FILE* out); + +inline void Flush() { + Flush(stdout); +} + + +// Read a line of characters after printing the prompt to stdout. The resulting +// char* needs to be disposed off with DeleteArray by the caller. +char* ReadLine(const char* prompt); + + +// Read and return the raw bytes in a file. the size of the buffer is returned +// in size. +// The returned buffer must be freed by the caller. +byte* ReadBytes(const char* filename, int* size, bool verbose = true); + + +// Append size chars from str to the file given by filename. +// The file is overwritten. Returns the number of chars written. +int AppendChars(const char* filename, + const char* str, + int size, + bool verbose = true); + + +// Write size chars from str to the file given by filename. +// The file is overwritten. Returns the number of chars written. +int WriteChars(const char* filename, + const char* str, + int size, + bool verbose = true); + + +// Write size bytes to the file given by filename. +// The file is overwritten. Returns the number of bytes written. +int WriteBytes(const char* filename, + const byte* bytes, + int size, + bool verbose = true); + + +// Write the C code +// const char* <varname> = "<str>"; +// const int <varname>_len = <len>; +// to the file given by filename. Only the first len chars are written. +int WriteAsCFile(const char* filename, const char* varname, + const char* str, int size, bool verbose = true); + + +// ---------------------------------------------------------------------------- +// Data structures + +template <typename T> +inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms, + int length) { + return Vector< Handle<Object> >( + reinterpret_cast<v8::internal::Handle<Object>*>(elms), length); +} + + +// ---------------------------------------------------------------------------- +// Memory + +// Copies words from |src| to |dst|. The data spans must not overlap. +template <typename T> +inline void CopyWords(T* dst, const T* src, size_t num_words) { + STATIC_ASSERT(sizeof(T) == kPointerSize); + // TODO(mvstanton): disabled because mac builds are bogus failing on this + // assert. They are doing a signed comparison. Investigate in + // the morning. + // DCHECK(Min(dst, const_cast<T*>(src)) + num_words <= + // Max(dst, const_cast<T*>(src))); + DCHECK(num_words > 0); + + // Use block copying MemCopy if the segment we're copying is + // enough to justify the extra call/setup overhead. + static const size_t kBlockCopyLimit = 16; + + if (num_words < kBlockCopyLimit) { + do { + num_words--; + *dst++ = *src++; + } while (num_words > 0); + } else { + MemCopy(dst, src, num_words * kPointerSize); + } +} + + +// Copies words from |src| to |dst|. No restrictions. +template <typename T> +inline void MoveWords(T* dst, const T* src, size_t num_words) { + STATIC_ASSERT(sizeof(T) == kPointerSize); + DCHECK(num_words > 0); + + // Use block copying MemCopy if the segment we're copying is + // enough to justify the extra call/setup overhead. + static const size_t kBlockCopyLimit = 16; + + if (num_words < kBlockCopyLimit && + ((dst < src) || (dst >= (src + num_words * kPointerSize)))) { + T* end = dst + num_words; + do { + num_words--; + *dst++ = *src++; + } while (num_words > 0); + } else { + MemMove(dst, src, num_words * kPointerSize); + } +} + + +// Copies data from |src| to |dst|. The data spans must not overlap. +template <typename T> +inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { + STATIC_ASSERT(sizeof(T) == 1); + DCHECK(Min(dst, const_cast<T*>(src)) + num_bytes <= + Max(dst, const_cast<T*>(src))); + if (num_bytes == 0) return; + + // Use block copying MemCopy if the segment we're copying is + // enough to justify the extra call/setup overhead. + static const int kBlockCopyLimit = kMinComplexMemCopy; + + if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) { + do { + num_bytes--; + *dst++ = *src++; + } while (num_bytes > 0); + } else { + MemCopy(dst, src, num_bytes); + } +} + + +template <typename T, typename U> +inline void MemsetPointer(T** dest, U* value, int counter) { +#ifdef DEBUG + T* a = NULL; + U* b = NULL; + a = b; // Fake assignment to check assignability. + USE(a); +#endif // DEBUG +#if V8_HOST_ARCH_IA32 +#define STOS "stosl" +#elif V8_HOST_ARCH_X64 +#if V8_HOST_ARCH_32_BIT +#define STOS "addr32 stosl" +#else +#define STOS "stosq" +#endif +#endif +#if defined(__native_client__) + // This STOS sequence does not validate for x86_64 Native Client. + // Here we #undef STOS to force use of the slower C version. + // TODO(bradchen): Profile V8 and implement a faster REP STOS + // here if the profile indicates it matters. +#undef STOS +#endif + +#if defined(MEMORY_SANITIZER) + // MemorySanitizer does not understand inline assembly. +#undef STOS +#endif + +#if defined(__GNUC__) && defined(STOS) + asm volatile( + "cld;" + "rep ; " STOS + : "+&c" (counter), "+&D" (dest) + : "a" (value) + : "memory", "cc"); +#else + for (int i = 0; i < counter; i++) { + dest[i] = value; + } +#endif + +#undef STOS +} + + +// Simple wrapper that allows an ExternalString to refer to a +// Vector<const char>. Doesn't assume ownership of the data. +class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource { + public: + explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {} + + virtual const char* data() const { return data_.start(); } + + virtual size_t length() const { return data_.length(); } + + private: + Vector<const char> data_; +}; + + +// Simple support to read a file into a 0-terminated C-string. +// The returned buffer must be freed by the caller. +// On return, *exits tells whether the file existed. +Vector<const char> ReadFile(const char* filename, + bool* exists, + bool verbose = true); +Vector<const char> ReadFile(FILE* file, + bool* exists, + bool verbose = true); + + +template <typename sourcechar, typename sinkchar> +INLINE(static void CopyCharsUnsigned(sinkchar* dest, + const sourcechar* src, + int chars)); +#if defined(V8_HOST_ARCH_ARM) +INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars)); +INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars)); +INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars)); +#elif defined(V8_HOST_ARCH_MIPS) +INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars)); +INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars)); +#endif + +// Copy from ASCII/16bit chars to ASCII/16bit chars. +template <typename sourcechar, typename sinkchar> +INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars)); + +template<typename sourcechar, typename sinkchar> +void CopyChars(sinkchar* dest, const sourcechar* src, int chars) { + DCHECK(sizeof(sourcechar) <= 2); + DCHECK(sizeof(sinkchar) <= 2); + if (sizeof(sinkchar) == 1) { + if (sizeof(sourcechar) == 1) { + CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), + reinterpret_cast<const uint8_t*>(src), + chars); + } else { + CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), + reinterpret_cast<const uint16_t*>(src), + chars); + } + } else { + if (sizeof(sourcechar) == 1) { + CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), + reinterpret_cast<const uint8_t*>(src), + chars); + } else { + CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), + reinterpret_cast<const uint16_t*>(src), + chars); + } + } +} + +template <typename sourcechar, typename sinkchar> +void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) { + sinkchar* limit = dest + chars; +#ifdef V8_HOST_CAN_READ_UNALIGNED + if (sizeof(*dest) == sizeof(*src)) { + if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) { + MemCopy(dest, src, chars * sizeof(*dest)); + return; + } + // Number of characters in a uintptr_t. + static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT + DCHECK(dest + kStepSize > dest); // Check for overflow. + while (dest + kStepSize <= limit) { + *reinterpret_cast<uintptr_t*>(dest) = + *reinterpret_cast<const uintptr_t*>(src); + dest += kStepSize; + src += kStepSize; + } + } +#endif + while (dest < limit) { + *dest++ = static_cast<sinkchar>(*src++); + } +} + + +#if defined(V8_HOST_ARCH_ARM) +void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) { + switch (static_cast<unsigned>(chars)) { + case 0: + break; + case 1: + *dest = *src; + break; + case 2: + memcpy(dest, src, 2); + break; + case 3: + memcpy(dest, src, 3); + break; + case 4: + memcpy(dest, src, 4); + break; + case 5: + memcpy(dest, src, 5); + break; + case 6: + memcpy(dest, src, 6); + break; + case 7: + memcpy(dest, src, 7); + break; + case 8: + memcpy(dest, src, 8); + break; + case 9: + memcpy(dest, src, 9); + break; + case 10: + memcpy(dest, src, 10); + break; + case 11: + memcpy(dest, src, 11); + break; + case 12: + memcpy(dest, src, 12); + break; + case 13: + memcpy(dest, src, 13); + break; + case 14: + memcpy(dest, src, 14); + break; + case 15: + memcpy(dest, src, 15); + break; + default: + MemCopy(dest, src, chars); + break; + } +} + + +void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) { + if (chars >= kMinComplexConvertMemCopy) { + MemCopyUint16Uint8(dest, src, chars); + } else { + MemCopyUint16Uint8Wrapper(dest, src, chars); + } +} + + +void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) { + switch (static_cast<unsigned>(chars)) { + case 0: + break; + case 1: + *dest = *src; + break; + case 2: + memcpy(dest, src, 4); + break; + case 3: + memcpy(dest, src, 6); + break; + case 4: + memcpy(dest, src, 8); + break; + case 5: + memcpy(dest, src, 10); + break; + case 6: + memcpy(dest, src, 12); + break; + case 7: + memcpy(dest, src, 14); + break; + default: + MemCopy(dest, src, chars * sizeof(*dest)); + break; + } +} + + +#elif defined(V8_HOST_ARCH_MIPS) +void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) { + if (chars < kMinComplexMemCopy) { + memcpy(dest, src, chars); + } else { + MemCopy(dest, src, chars); + } +} + +void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) { + if (chars < kMinComplexMemCopy) { + memcpy(dest, src, chars * sizeof(*dest)); + } else { + MemCopy(dest, src, chars * sizeof(*dest)); + } +} +#endif + + +class StringBuilder : public SimpleStringBuilder { + public: + explicit StringBuilder(int size) : SimpleStringBuilder(size) { } + StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { } + + // Add formatted contents to the builder just like printf(). + void AddFormatted(const char* format, ...); + + // Add formatted contents like printf based on a va_list. + void AddFormattedList(const char* format, va_list list); + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); +}; + + +bool DoubleToBoolean(double d); + +template <typename Stream> +bool StringToArrayIndex(Stream* stream, uint32_t* index) { + uint16_t ch = stream->GetNext(); + + // If the string begins with a '0' character, it must only consist + // of it to be a legal array index. + if (ch == '0') { + *index = 0; + return !stream->HasMore(); + } + + // Convert string to uint32 array index; character by character. + int d = ch - '0'; + if (d < 0 || d > 9) return false; + uint32_t result = d; + while (stream->HasMore()) { + d = stream->GetNext() - '0'; + if (d < 0 || d > 9) return false; + // Check that the new result is below the 32 bit limit. + if (result > 429496729U - ((d > 5) ? 1 : 0)) return false; + result = (result * 10) + d; + } + + *index = result; + return true; +} + + } } // namespace v8::internal #endif // V8_UTILS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/utils-inl.h nodejs-0.11.15/deps/v8/src/utils-inl.h --- nodejs-0.11.13/deps/v8/src/utils-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/utils-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_UTILS_INL_H_ #define V8_UTILS_INL_H_ -#include "list-inl.h" +#include "src/list-inl.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/v8.cc nodejs-0.11.15/deps/v8/src/v8.cc --- nodejs-0.11.13/deps/v8/src/v8.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,123 +1,59 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "assembler.h" -#include "isolate.h" -#include "elements.h" -#include "bootstrapper.h" -#include "debug.h" -#include "deoptimizer.h" -#include "frames.h" -#include "heap-profiler.h" -#include "hydrogen.h" -#ifdef V8_USE_DEFAULT_PLATFORM -#include "libplatform/default-platform.h" -#endif -#include "lithium-allocator.h" -#include "objects.h" -#include "once.h" -#include "platform.h" -#include "sampler.h" -#include "runtime-profiler.h" -#include "serialize.h" -#include "store-buffer.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/assembler.h" +#include "src/base/once.h" +#include "src/base/platform/platform.h" +#include "src/bootstrapper.h" +#include "src/compiler/pipeline.h" +#include "src/debug.h" +#include "src/deoptimizer.h" +#include "src/elements.h" +#include "src/frames.h" +#include "src/heap/store-buffer.h" +#include "src/heap-profiler.h" +#include "src/hydrogen.h" +#include "src/isolate.h" +#include "src/lithium-allocator.h" +#include "src/objects.h" +#include "src/runtime-profiler.h" +#include "src/sampler.h" +#include "src/serialize.h" + namespace v8 { namespace internal { V8_DECLARE_ONCE(init_once); -List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL; v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL; v8::Platform* V8::platform_ = NULL; bool V8::Initialize(Deserializer* des) { InitializeOncePerProcess(); - - // The current thread may not yet had entered an isolate to run. - // Note the Isolate::Current() may be non-null because for various - // initialization purposes an initializing thread may be assigned an isolate - // but not actually enter it. - if (i::Isolate::CurrentPerIsolateThreadData() == NULL) { - i::Isolate::EnterDefaultIsolate(); - } - - ASSERT(i::Isolate::CurrentPerIsolateThreadData() != NULL); - ASSERT(i::Isolate::CurrentPerIsolateThreadData()->thread_id().Equals( - i::ThreadId::Current())); - ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() == - i::Isolate::Current()); - - Isolate* isolate = Isolate::Current(); + Isolate* isolate = Isolate::UncheckedCurrent(); + if (isolate == NULL) return true; if (isolate->IsDead()) return false; if (isolate->IsInitialized()) return true; -#ifdef V8_USE_DEFAULT_PLATFORM - DefaultPlatform* platform = static_cast<DefaultPlatform*>(platform_); - platform->SetThreadPoolSize(isolate->max_available_threads()); - // We currently only start the threads early, if we know that we'll use them. - if (FLAG_job_based_sweeping) platform->EnsureInitialized(); -#endif - return isolate->Init(des); } void V8::TearDown() { - Isolate* isolate = Isolate::Current(); - ASSERT(isolate->IsDefaultIsolate()); - if (!isolate->IsInitialized()) return; - - // The isolate has to be torn down before clearing the LOperand - // caches so that the optimizing compiler thread (if running) - // doesn't see an inconsistent view of the lithium instructions. - isolate->TearDown(); - delete isolate; - Bootstrapper::TearDownExtensions(); ElementsAccessor::TearDown(); LOperand::TearDownCaches(); + compiler::Pipeline::TearDown(); ExternalReference::TearDownMathExpData(); RegisteredExtension::UnregisterAll(); Isolate::GlobalTearDown(); - delete call_completed_callbacks_; - call_completed_callbacks_ = NULL; - Sampler::TearDown(); - -#ifdef V8_USE_DEFAULT_PLATFORM - DefaultPlatform* platform = static_cast<DefaultPlatform*>(platform_); - platform_ = NULL; - delete platform; -#endif } @@ -127,63 +63,6 @@ } -void V8::AddCallCompletedCallback(CallCompletedCallback callback) { - if (call_completed_callbacks_ == NULL) { // Lazy init. - call_completed_callbacks_ = new List<CallCompletedCallback>(); - } - for (int i = 0; i < call_completed_callbacks_->length(); i++) { - if (callback == call_completed_callbacks_->at(i)) return; - } - call_completed_callbacks_->Add(callback); -} - - -void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) { - if (call_completed_callbacks_ == NULL) return; - for (int i = 0; i < call_completed_callbacks_->length(); i++) { - if (callback == call_completed_callbacks_->at(i)) { - call_completed_callbacks_->Remove(i); - } - } -} - - -void V8::FireCallCompletedCallback(Isolate* isolate) { - bool has_call_completed_callbacks = call_completed_callbacks_ != NULL; - bool run_microtasks = isolate->autorun_microtasks() && - isolate->microtask_pending(); - if (!has_call_completed_callbacks && !run_microtasks) return; - - HandleScopeImplementer* handle_scope_implementer = - isolate->handle_scope_implementer(); - if (!handle_scope_implementer->CallDepthIsZero()) return; - // Fire callbacks. Increase call depth to prevent recursive callbacks. - handle_scope_implementer->IncrementCallDepth(); - if (run_microtasks) Execution::RunMicrotasks(isolate); - if (has_call_completed_callbacks) { - for (int i = 0; i < call_completed_callbacks_->length(); i++) { - call_completed_callbacks_->at(i)(); - } - } - handle_scope_implementer->DecrementCallDepth(); -} - - -void V8::RunMicrotasks(Isolate* isolate) { - if (!isolate->microtask_pending()) - return; - - HandleScopeImplementer* handle_scope_implementer = - isolate->handle_scope_implementer(); - ASSERT(handle_scope_implementer->CallDepthIsZero()); - - // Increase call depth to prevent recursive callbacks. - handle_scope_implementer->IncrementCallDepth(); - Execution::RunMicrotasks(isolate); - handle_scope_implementer->DecrementCallDepth(); -} - - void V8::InitializeOncePerProcessImpl() { FlagList::EnforceFlagImplications(); @@ -195,17 +74,23 @@ if (FLAG_stress_compaction) { FLAG_force_marking_deque_overflows = true; FLAG_gc_global = true; - FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2; + FLAG_max_semi_space_size = 1; } -#ifdef V8_USE_DEFAULT_PLATFORM - platform_ = new DefaultPlatform; -#endif + base::OS::Initialize(FLAG_random_seed, FLAG_hard_abort, FLAG_gc_fake_mmap); + Sampler::SetUp(); - CPU::SetUp(); - OS::PostSetUp(); + CpuFeatures::Probe(false); + init_memcopy_functions(); + // The custom exp implementation needs 16KB of lookup data; initialize it + // on demand. + init_fast_sqrt_function(); +#ifdef _WIN64 + init_modulo_function(); +#endif ElementsAccessor::InitializeOncePerProcess(); LOperand::SetUpCaches(); + compiler::Pipeline::SetUp(); SetUpJSCallerSavedCodeData(); ExternalReference::SetUp(); Bootstrapper::InitializeOncePerProcess(); @@ -213,25 +98,25 @@ void V8::InitializeOncePerProcess() { - CallOnce(&init_once, &InitializeOncePerProcessImpl); + base::CallOnce(&init_once, &InitializeOncePerProcessImpl); } void V8::InitializePlatform(v8::Platform* platform) { - ASSERT(!platform_); - ASSERT(platform); + CHECK(!platform_); + CHECK(platform); platform_ = platform; } void V8::ShutdownPlatform() { - ASSERT(platform_); + CHECK(platform_); platform_ = NULL; } v8::Platform* V8::GetCurrentPlatform() { - ASSERT(platform_); + DCHECK(platform_); return platform_; } diff -Nru nodejs-0.11.13/deps/v8/src/v8checks.h nodejs-0.11.15/deps/v8/src/v8checks.h --- nodejs-0.11.13/deps/v8/src/v8checks.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8checks.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_V8CHECKS_H_ -#define V8_V8CHECKS_H_ - -#include "checks.h" - -namespace v8 { - class Value; - template <class T> class Handle; - -namespace internal { - intptr_t HeapObjectTagMask(); - -} } // namespace v8::internal - - -void CheckNonEqualsHelper(const char* file, - int line, - const char* unexpected_source, - v8::Handle<v8::Value> unexpected, - const char* value_source, - v8::Handle<v8::Value> value); - -void CheckEqualsHelper(const char* file, - int line, - const char* expected_source, - v8::Handle<v8::Value> expected, - const char* value_source, - v8::Handle<v8::Value> value); - -#define ASSERT_TAG_ALIGNED(address) \ - ASSERT((reinterpret_cast<intptr_t>(address) & HeapObjectTagMask()) == 0) - -#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & HeapObjectTagMask()) == 0) - -#endif // V8_V8CHECKS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/v8conversions.cc nodejs-0.11.15/deps/v8/src/v8conversions.cc --- nodejs-0.11.13/deps/v8/src/v8conversions.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8conversions.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,132 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include <stdarg.h> -#include <limits.h> - -#include "v8.h" - -#include "conversions-inl.h" -#include "v8conversions.h" -#include "dtoa.h" -#include "factory.h" -#include "strtod.h" - -namespace v8 { -namespace internal { - -namespace { - -// C++-style iterator adaptor for StringCharacterStream -// (unlike C++ iterators the end-marker has different type). -class StringCharacterStreamIterator { - public: - class EndMarker {}; - - explicit StringCharacterStreamIterator(StringCharacterStream* stream); - - uint16_t operator*() const; - void operator++(); - bool operator==(EndMarker const&) const { return end_; } - bool operator!=(EndMarker const& m) const { return !end_; } - - private: - StringCharacterStream* const stream_; - uint16_t current_; - bool end_; -}; - - -StringCharacterStreamIterator::StringCharacterStreamIterator( - StringCharacterStream* stream) : stream_(stream) { - ++(*this); -} - -uint16_t StringCharacterStreamIterator::operator*() const { - return current_; -} - - -void StringCharacterStreamIterator::operator++() { - end_ = !stream_->HasMore(); - if (!end_) { - current_ = stream_->GetNext(); - } -} -} // End anonymous namespace. - - -double StringToDouble(UnicodeCache* unicode_cache, - String* str, int flags, double empty_string_val) { - StringShape shape(str); - // TODO(dcarney): Use a Visitor here. - if (shape.IsSequentialAscii()) { - const uint8_t* begin = SeqOneByteString::cast(str)->GetChars(); - const uint8_t* end = begin + str->length(); - return InternalStringToDouble(unicode_cache, begin, end, flags, - empty_string_val); - } else if (shape.IsSequentialTwoByte()) { - const uc16* begin = SeqTwoByteString::cast(str)->GetChars(); - const uc16* end = begin + str->length(); - return InternalStringToDouble(unicode_cache, begin, end, flags, - empty_string_val); - } else { - ConsStringIteratorOp op; - StringCharacterStream stream(str, &op); - return InternalStringToDouble(unicode_cache, - StringCharacterStreamIterator(&stream), - StringCharacterStreamIterator::EndMarker(), - flags, - empty_string_val); - } -} - - -double StringToInt(UnicodeCache* unicode_cache, - String* str, - int radix) { - StringShape shape(str); - // TODO(dcarney): Use a Visitor here. - if (shape.IsSequentialAscii()) { - const uint8_t* begin = SeqOneByteString::cast(str)->GetChars(); - const uint8_t* end = begin + str->length(); - return InternalStringToInt(unicode_cache, begin, end, radix); - } else if (shape.IsSequentialTwoByte()) { - const uc16* begin = SeqTwoByteString::cast(str)->GetChars(); - const uc16* end = begin + str->length(); - return InternalStringToInt(unicode_cache, begin, end, radix); - } else { - ConsStringIteratorOp op; - StringCharacterStream stream(str, &op); - return InternalStringToInt(unicode_cache, - StringCharacterStreamIterator(&stream), - StringCharacterStreamIterator::EndMarker(), - radix); - } -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/v8conversions.h nodejs-0.11.15/deps/v8/src/v8conversions.h --- nodejs-0.11.13/deps/v8/src/v8conversions.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8conversions.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,113 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_V8CONVERSIONS_H_ -#define V8_V8CONVERSIONS_H_ - -#include "conversions.h" - -namespace v8 { -namespace internal { - - -static inline bool IsMinusZero(double value) { - static const DoubleRepresentation minus_zero(-0.0); - return DoubleRepresentation(value) == minus_zero; -} - - -// Integer32 is an integer that can be represented as a signed 32-bit -// integer. It has to be in the range [-2^31, 2^31 - 1]. -// We also have to check for negative 0 as it is not an Integer32. -static inline bool IsInt32Double(double value) { - return !IsMinusZero(value) && - value >= kMinInt && - value <= kMaxInt && - value == FastI2D(FastD2I(value)); -} - - -// Convert from Number object to C integer. -inline int32_t NumberToInt32(Object* number) { - if (number->IsSmi()) return Smi::cast(number)->value(); - return DoubleToInt32(number->Number()); -} - - -inline uint32_t NumberToUint32(Object* number) { - if (number->IsSmi()) return Smi::cast(number)->value(); - return DoubleToUint32(number->Number()); -} - - -// Converts a string into a double value according to ECMA-262 9.3.1 -double StringToDouble(UnicodeCache* unicode_cache, - String* str, - int flags, - double empty_string_val = 0); - -// Converts a string into an integer. -double StringToInt(UnicodeCache* unicode_cache, String* str, int radix); - -inline bool TryNumberToSize(Isolate* isolate, - Object* number, size_t* result) { - SealHandleScope shs(isolate); - if (number->IsSmi()) { - int value = Smi::cast(number)->value(); - ASSERT( - static_cast<unsigned>(Smi::kMaxValue) - <= std::numeric_limits<size_t>::max()); - if (value >= 0) { - *result = static_cast<size_t>(value); - return true; - } - return false; - } else { - ASSERT(number->IsHeapNumber()); - double value = HeapNumber::cast(number)->value(); - if (value >= 0 && - value <= std::numeric_limits<size_t>::max()) { - *result = static_cast<size_t>(value); - return true; - } else { - return false; - } - } -} - -// Converts a number into size_t. -inline size_t NumberToSize(Isolate* isolate, - Object* number) { - size_t result = 0; - bool is_valid = TryNumberToSize(isolate, number, &result); - CHECK(is_valid); - return result; -} - -} } // namespace v8::internal - -#endif // V8_V8CONVERSIONS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/v8-counters.cc nodejs-0.11.15/deps/v8/src/v8-counters.cc --- nodejs-0.11.13/deps/v8/src/v8-counters.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8-counters.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "v8-counters.h" - -namespace v8 { -namespace internal { - -Counters::Counters(Isolate* isolate) { -#define HT(name, caption) \ - name##_ = HistogramTimer(#caption, 0, 10000, 50, isolate); - HISTOGRAM_TIMER_LIST(HT) -#undef HT - -#define HP(name, caption) \ - name##_ = Histogram(#caption, 0, 101, 100, isolate); - HISTOGRAM_PERCENTAGE_LIST(HP) -#undef HP - -#define HM(name, caption) \ - name##_ = Histogram(#caption, 1000, 500000, 50, isolate); - HISTOGRAM_MEMORY_LIST(HM) -#undef HM - -#define SC(name, caption) \ - name##_ = StatsCounter(isolate, "c:" #caption); - - STATS_COUNTER_LIST_1(SC) - STATS_COUNTER_LIST_2(SC) -#undef SC - -#define SC(name) \ - count_of_##name##_ = StatsCounter(isolate, "c:" "V8.CountOf_" #name); \ - size_of_##name##_ = StatsCounter(isolate, "c:" "V8.SizeOf_" #name); - INSTANCE_TYPE_LIST(SC) -#undef SC - -#define SC(name) \ - count_of_CODE_TYPE_##name##_ = \ - StatsCounter(isolate, "c:" "V8.CountOf_CODE_TYPE-" #name); \ - size_of_CODE_TYPE_##name##_ = \ - StatsCounter(isolate, "c:" "V8.SizeOf_CODE_TYPE-" #name); - CODE_KIND_LIST(SC) -#undef SC - -#define SC(name) \ - count_of_FIXED_ARRAY_##name##_ = \ - StatsCounter(isolate, "c:" "V8.CountOf_FIXED_ARRAY-" #name); \ - size_of_FIXED_ARRAY_##name##_ = \ - StatsCounter(isolate, "c:" "V8.SizeOf_FIXED_ARRAY-" #name); - FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) -#undef SC - -#define SC(name) \ - count_of_CODE_AGE_##name##_ = \ - StatsCounter(isolate, "c:" "V8.CountOf_CODE_AGE-" #name); \ - size_of_CODE_AGE_##name##_ = \ - StatsCounter(isolate, "c:" "V8.SizeOf_CODE_AGE-" #name); - CODE_AGE_LIST_COMPLETE(SC) -#undef SC -} - - -void Counters::ResetHistograms() { -#define HT(name, caption) name##_.Reset(); - HISTOGRAM_TIMER_LIST(HT) -#undef HT - -#define HP(name, caption) name##_.Reset(); - HISTOGRAM_PERCENTAGE_LIST(HP) -#undef HP - -#define HM(name, caption) name##_.Reset(); - HISTOGRAM_MEMORY_LIST(HM) -#undef HM -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/v8-counters.h nodejs-0.11.15/deps/v8/src/v8-counters.h --- nodejs-0.11.13/deps/v8/src/v8-counters.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8-counters.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,435 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_V8_COUNTERS_H_ -#define V8_V8_COUNTERS_H_ - -#include "allocation.h" -#include "counters.h" -#include "objects.h" -#include "v8globals.h" - -namespace v8 { -namespace internal { - -#define HISTOGRAM_TIMER_LIST(HT) \ - /* Garbage collection timers. */ \ - HT(gc_compactor, V8.GCCompactor) \ - HT(gc_scavenger, V8.GCScavenger) \ - HT(gc_context, V8.GCContext) /* GC context cleanup time */ \ - /* Parsing timers. */ \ - HT(parse, V8.Parse) \ - HT(parse_lazy, V8.ParseLazy) \ - HT(pre_parse, V8.PreParse) \ - /* Total compilation times. */ \ - HT(compile, V8.Compile) \ - HT(compile_eval, V8.CompileEval) \ - HT(compile_lazy, V8.CompileLazy) - -#define HISTOGRAM_PERCENTAGE_LIST(HP) \ - /* Heap fragmentation. */ \ - HP(external_fragmentation_total, \ - V8.MemoryExternalFragmentationTotal) \ - HP(external_fragmentation_old_pointer_space, \ - V8.MemoryExternalFragmentationOldPointerSpace) \ - HP(external_fragmentation_old_data_space, \ - V8.MemoryExternalFragmentationOldDataSpace) \ - HP(external_fragmentation_code_space, \ - V8.MemoryExternalFragmentationCodeSpace) \ - HP(external_fragmentation_map_space, \ - V8.MemoryExternalFragmentationMapSpace) \ - HP(external_fragmentation_cell_space, \ - V8.MemoryExternalFragmentationCellSpace) \ - HP(external_fragmentation_property_cell_space, \ - V8.MemoryExternalFragmentationPropertyCellSpace) \ - HP(external_fragmentation_lo_space, \ - V8.MemoryExternalFragmentationLoSpace) \ - /* Percentages of heap committed to each space. */ \ - HP(heap_fraction_new_space, \ - V8.MemoryHeapFractionNewSpace) \ - HP(heap_fraction_old_pointer_space, \ - V8.MemoryHeapFractionOldPointerSpace) \ - HP(heap_fraction_old_data_space, \ - V8.MemoryHeapFractionOldDataSpace) \ - HP(heap_fraction_code_space, \ - V8.MemoryHeapFractionCodeSpace) \ - HP(heap_fraction_map_space, \ - V8.MemoryHeapFractionMapSpace) \ - HP(heap_fraction_cell_space, \ - V8.MemoryHeapFractionCellSpace) \ - HP(heap_fraction_property_cell_space, \ - V8.MemoryHeapFractionPropertyCellSpace) \ - HP(heap_fraction_lo_space, \ - V8.MemoryHeapFractionLoSpace) \ - /* Percentage of crankshafted codegen. */ \ - HP(codegen_fraction_crankshaft, \ - V8.CodegenFractionCrankshaft) \ - - -#define HISTOGRAM_MEMORY_LIST(HM) \ - HM(heap_sample_total_committed, V8.MemoryHeapSampleTotalCommitted) \ - HM(heap_sample_total_used, V8.MemoryHeapSampleTotalUsed) \ - HM(heap_sample_map_space_committed, \ - V8.MemoryHeapSampleMapSpaceCommitted) \ - HM(heap_sample_cell_space_committed, \ - V8.MemoryHeapSampleCellSpaceCommitted) \ - HM(heap_sample_property_cell_space_committed, \ - V8.MemoryHeapSamplePropertyCellSpaceCommitted) \ - HM(heap_sample_code_space_committed, \ - V8.MemoryHeapSampleCodeSpaceCommitted) \ - HM(heap_sample_maximum_committed, \ - V8.MemoryHeapSampleMaximumCommitted) \ - - -// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC -// Intellisense to crash. It was broken into two macros (each of length 40 -// lines) rather than one macro (of length about 80 lines) to work around -// this problem. Please avoid using recursive macros of this length when -// possible. -#define STATS_COUNTER_LIST_1(SC) \ - /* Global Handle Count*/ \ - SC(global_handles, V8.GlobalHandles) \ - /* OS Memory allocated */ \ - SC(memory_allocated, V8.OsMemoryAllocated) \ - SC(normalized_maps, V8.NormalizedMaps) \ - SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \ - SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \ - SC(alive_after_last_gc, V8.AliveAfterLastGC) \ - SC(objs_since_last_young, V8.ObjsSinceLastYoung) \ - SC(objs_since_last_full, V8.ObjsSinceLastFull) \ - SC(string_table_capacity, V8.StringTableCapacity) \ - SC(number_of_symbols, V8.NumberOfSymbols) \ - SC(script_wrappers, V8.ScriptWrappers) \ - SC(call_initialize_stubs, V8.CallInitializeStubs) \ - SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \ - SC(call_normal_stubs, V8.CallNormalStubs) \ - SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \ - SC(arguments_adaptors, V8.ArgumentsAdaptors) \ - SC(compilation_cache_hits, V8.CompilationCacheHits) \ - SC(compilation_cache_misses, V8.CompilationCacheMisses) \ - SC(string_ctor_calls, V8.StringConstructorCalls) \ - SC(string_ctor_conversions, V8.StringConstructorConversions) \ - SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \ - SC(string_ctor_string_value, V8.StringConstructorStringValue) \ - SC(string_ctor_gc_required, V8.StringConstructorGCRequired) \ - /* Amount of evaled source code. */ \ - SC(total_eval_size, V8.TotalEvalSize) \ - /* Amount of loaded source code. */ \ - SC(total_load_size, V8.TotalLoadSize) \ - /* Amount of parsed source code. */ \ - SC(total_parse_size, V8.TotalParseSize) \ - /* Amount of source code skipped over using preparsing. */ \ - SC(total_preparse_skipped, V8.TotalPreparseSkipped) \ - /* Number of symbol lookups skipped using preparsing */ \ - SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped) \ - /* Amount of compiled source code. */ \ - SC(total_compile_size, V8.TotalCompileSize) \ - /* Amount of source code compiled with the full codegen. */ \ - SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize) \ - /* Number of contexts created from scratch. */ \ - SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \ - /* Number of contexts created by partial snapshot. */ \ - SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \ - /* Number of code objects found from pc. */ \ - SC(pc_to_code, V8.PcToCode) \ - SC(pc_to_code_cached, V8.PcToCodeCached) \ - /* The store-buffer implementation of the write barrier. */ \ - SC(store_buffer_compactions, V8.StoreBufferCompactions) \ - SC(store_buffer_overflows, V8.StoreBufferOverflows) - - -#define STATS_COUNTER_LIST_2(SC) \ - /* Number of code stubs. */ \ - SC(code_stubs, V8.CodeStubs) \ - /* Amount of stub code. */ \ - SC(total_stubs_code_size, V8.TotalStubsCodeSize) \ - /* Amount of (JS) compiled code. */ \ - SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \ - SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \ - SC(gc_compactor_caused_by_promoted_data, \ - V8.GCCompactorCausedByPromotedData) \ - SC(gc_compactor_caused_by_oldspace_exhaustion, \ - V8.GCCompactorCausedByOldspaceExhaustion) \ - SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \ - SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \ - /* How is the generic keyed-load stub used? */ \ - SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \ - SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \ - SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \ - SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \ - SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs) \ - SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \ - /* How is the generic keyed-call stub used? */ \ - SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \ - SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \ - SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \ - SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \ - SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \ - SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \ - SC(named_load_global_stub, V8.NamedLoadGlobalStub) \ - SC(named_store_global_inline, V8.NamedStoreGlobalInline) \ - SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \ - SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \ - SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \ - SC(store_normal_miss, V8.StoreNormalMiss) \ - SC(store_normal_hit, V8.StoreNormalHit) \ - SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \ - SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \ - SC(cow_arrays_converted, V8.COWArraysConverted) \ - SC(call_miss, V8.CallMiss) \ - SC(keyed_call_miss, V8.KeyedCallMiss) \ - SC(load_miss, V8.LoadMiss) \ - SC(keyed_load_miss, V8.KeyedLoadMiss) \ - SC(call_const, V8.CallConst) \ - SC(call_const_fast_api, V8.CallConstFastApi) \ - SC(call_const_interceptor, V8.CallConstInterceptor) \ - SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \ - SC(call_global_inline, V8.CallGlobalInline) \ - SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \ - SC(constructed_objects, V8.ConstructedObjects) \ - SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \ - SC(negative_lookups, V8.NegativeLookups) \ - SC(negative_lookups_miss, V8.NegativeLookupsMiss) \ - SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \ - SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses) \ - SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \ - SC(array_function_runtime, V8.ArrayFunctionRuntime) \ - SC(array_function_native, V8.ArrayFunctionNative) \ - SC(for_in, V8.ForIn) \ - SC(enum_cache_hits, V8.EnumCacheHits) \ - SC(enum_cache_misses, V8.EnumCacheMisses) \ - SC(zone_segment_bytes, V8.ZoneSegmentBytes) \ - SC(fast_new_closure_total, V8.FastNewClosureTotal) \ - SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \ - SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \ - SC(string_add_runtime, V8.StringAddRuntime) \ - SC(string_add_native, V8.StringAddNative) \ - SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii) \ - SC(sub_string_runtime, V8.SubStringRuntime) \ - SC(sub_string_native, V8.SubStringNative) \ - SC(string_add_make_two_char, V8.StringAddMakeTwoChar) \ - SC(string_compare_native, V8.StringCompareNative) \ - SC(string_compare_runtime, V8.StringCompareRuntime) \ - SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \ - SC(regexp_entry_native, V8.RegExpEntryNative) \ - SC(number_to_string_native, V8.NumberToStringNative) \ - SC(number_to_string_runtime, V8.NumberToStringRuntime) \ - SC(math_acos, V8.MathAcos) \ - SC(math_asin, V8.MathAsin) \ - SC(math_atan, V8.MathAtan) \ - SC(math_atan2, V8.MathAtan2) \ - SC(math_exp, V8.MathExp) \ - SC(math_floor, V8.MathFloor) \ - SC(math_log, V8.MathLog) \ - SC(math_pow, V8.MathPow) \ - SC(math_round, V8.MathRound) \ - SC(math_sqrt, V8.MathSqrt) \ - SC(stack_interrupts, V8.StackInterrupts) \ - SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \ - SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \ - SC(bounds_checks_hoisted, V8.BoundsChecksHoisted) \ - SC(soft_deopts_requested, V8.SoftDeoptsRequested) \ - SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \ - SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \ - /* Number of write barriers in generated code. */ \ - SC(write_barriers_dynamic, V8.WriteBarriersDynamic) \ - SC(write_barriers_static, V8.WriteBarriersStatic) \ - SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \ - SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \ - SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \ - SC(old_pointer_space_bytes_available, \ - V8.MemoryOldPointerSpaceBytesAvailable) \ - SC(old_pointer_space_bytes_committed, \ - V8.MemoryOldPointerSpaceBytesCommitted) \ - SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed) \ - SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable) \ - SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted) \ - SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed) \ - SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable) \ - SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted) \ - SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed) \ - SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable) \ - SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted) \ - SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \ - SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \ - SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \ - SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \ - SC(property_cell_space_bytes_available, \ - V8.MemoryPropertyCellSpaceBytesAvailable) \ - SC(property_cell_space_bytes_committed, \ - V8.MemoryPropertyCellSpaceBytesCommitted) \ - SC(property_cell_space_bytes_used, \ - V8.MemoryPropertyCellSpaceBytesUsed) \ - SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \ - SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \ - SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed) - - -// This file contains all the v8 counters that are in use. -class Counters { - public: -#define HT(name, caption) \ - HistogramTimer* name() { return &name##_; } - HISTOGRAM_TIMER_LIST(HT) -#undef HT - -#define HP(name, caption) \ - Histogram* name() { return &name##_; } - HISTOGRAM_PERCENTAGE_LIST(HP) -#undef HP - -#define HM(name, caption) \ - Histogram* name() { return &name##_; } - HISTOGRAM_MEMORY_LIST(HM) -#undef HM - -#define SC(name, caption) \ - StatsCounter* name() { return &name##_; } - STATS_COUNTER_LIST_1(SC) - STATS_COUNTER_LIST_2(SC) -#undef SC - -#define SC(name) \ - StatsCounter* count_of_##name() { return &count_of_##name##_; } \ - StatsCounter* size_of_##name() { return &size_of_##name##_; } - INSTANCE_TYPE_LIST(SC) -#undef SC - -#define SC(name) \ - StatsCounter* count_of_CODE_TYPE_##name() \ - { return &count_of_CODE_TYPE_##name##_; } \ - StatsCounter* size_of_CODE_TYPE_##name() \ - { return &size_of_CODE_TYPE_##name##_; } - CODE_KIND_LIST(SC) -#undef SC - -#define SC(name) \ - StatsCounter* count_of_FIXED_ARRAY_##name() \ - { return &count_of_FIXED_ARRAY_##name##_; } \ - StatsCounter* size_of_FIXED_ARRAY_##name() \ - { return &size_of_FIXED_ARRAY_##name##_; } - FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) -#undef SC - -#define SC(name) \ - StatsCounter* count_of_CODE_AGE_##name() \ - { return &count_of_CODE_AGE_##name##_; } \ - StatsCounter* size_of_CODE_AGE_##name() \ - { return &size_of_CODE_AGE_##name##_; } - CODE_AGE_LIST_COMPLETE(SC) -#undef SC - - enum Id { -#define RATE_ID(name, caption) k_##name, - HISTOGRAM_TIMER_LIST(RATE_ID) -#undef RATE_ID -#define PERCENTAGE_ID(name, caption) k_##name, - HISTOGRAM_PERCENTAGE_LIST(PERCENTAGE_ID) -#undef PERCENTAGE_ID -#define MEMORY_ID(name, caption) k_##name, - HISTOGRAM_MEMORY_LIST(MEMORY_ID) -#undef MEMORY_ID -#define COUNTER_ID(name, caption) k_##name, - STATS_COUNTER_LIST_1(COUNTER_ID) - STATS_COUNTER_LIST_2(COUNTER_ID) -#undef COUNTER_ID -#define COUNTER_ID(name) kCountOf##name, kSizeOf##name, - INSTANCE_TYPE_LIST(COUNTER_ID) -#undef COUNTER_ID -#define COUNTER_ID(name) kCountOfCODE_TYPE_##name, \ - kSizeOfCODE_TYPE_##name, - CODE_KIND_LIST(COUNTER_ID) -#undef COUNTER_ID -#define COUNTER_ID(name) kCountOfFIXED_ARRAY__##name, \ - kSizeOfFIXED_ARRAY__##name, - FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID) -#undef COUNTER_ID -#define COUNTER_ID(name) kCountOfCODE_AGE__##name, \ - kSizeOfCODE_AGE__##name, - CODE_AGE_LIST_COMPLETE(COUNTER_ID) -#undef COUNTER_ID - stats_counter_count - }; - - void ResetHistograms(); - - private: -#define HT(name, caption) \ - HistogramTimer name##_; - HISTOGRAM_TIMER_LIST(HT) -#undef HT - -#define HP(name, caption) \ - Histogram name##_; - HISTOGRAM_PERCENTAGE_LIST(HP) -#undef HP - -#define HM(name, caption) \ - Histogram name##_; - HISTOGRAM_MEMORY_LIST(HM) -#undef HM - -#define SC(name, caption) \ - StatsCounter name##_; - STATS_COUNTER_LIST_1(SC) - STATS_COUNTER_LIST_2(SC) -#undef SC - -#define SC(name) \ - StatsCounter size_of_##name##_; \ - StatsCounter count_of_##name##_; - INSTANCE_TYPE_LIST(SC) -#undef SC - -#define SC(name) \ - StatsCounter size_of_CODE_TYPE_##name##_; \ - StatsCounter count_of_CODE_TYPE_##name##_; - CODE_KIND_LIST(SC) -#undef SC - -#define SC(name) \ - StatsCounter size_of_FIXED_ARRAY_##name##_; \ - StatsCounter count_of_FIXED_ARRAY_##name##_; - FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) -#undef SC - -#define SC(name) \ - StatsCounter size_of_CODE_AGE_##name##_; \ - StatsCounter count_of_CODE_AGE_##name##_; - CODE_AGE_LIST_COMPLETE(SC) -#undef SC - - friend class Isolate; - - explicit Counters(Isolate* isolate); - - DISALLOW_IMPLICIT_CONSTRUCTORS(Counters); -}; - -} } // namespace v8::internal - -#endif // V8_V8_COUNTERS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/v8dll-main.cc nodejs-0.11.15/deps/v8/src/v8dll-main.cc --- nodejs-0.11.13/deps/v8/src/v8dll-main.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8dll-main.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,37 +1,14 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // The GYP based build ends up defining USING_V8_SHARED when compiling this // file. #undef USING_V8_SHARED -#include "../include/v8.h" +#include "include/v8.h" #if V8_OS_WIN -#include "win32-headers.h" +#include "src/base/win32-headers.h" extern "C" { BOOL WINAPI DllMain(HANDLE hinstDLL, diff -Nru nodejs-0.11.13/deps/v8/src/v8globals.h nodejs-0.11.15/deps/v8/src/v8globals.h --- nodejs-0.11.13/deps/v8/src/v8globals.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8globals.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,568 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_V8GLOBALS_H_ -#define V8_V8GLOBALS_H_ - -#include "globals.h" -#include "checks.h" - -namespace v8 { -namespace internal { - -// This file contains constants and global declarations related to the -// V8 system. - -// Mask for the sign bit in a smi. -const intptr_t kSmiSignMask = kIntptrSignBit; - -const int kObjectAlignmentBits = kPointerSizeLog2; -const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits; -const intptr_t kObjectAlignmentMask = kObjectAlignment - 1; - -// Desired alignment for pointers. -const intptr_t kPointerAlignment = (1 << kPointerSizeLog2); -const intptr_t kPointerAlignmentMask = kPointerAlignment - 1; - -// Desired alignment for double values. -const intptr_t kDoubleAlignment = 8; -const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1; - -// Desired alignment for generated code is 32 bytes (to improve cache line -// utilization). -const int kCodeAlignmentBits = 5; -const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits; -const intptr_t kCodeAlignmentMask = kCodeAlignment - 1; - -// Tag information for Failure. -const int kFailureTag = 3; -const int kFailureTagSize = 2; -const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1; - - -// Zap-value: The value used for zapping dead objects. -// Should be a recognizable hex value tagged as a failure. -#ifdef V8_HOST_ARCH_64_BIT -const Address kZapValue = - reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef)); -const Address kHandleZapValue = - reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf)); -const Address kGlobalHandleZapValue = - reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf)); -const Address kFromSpaceZapValue = - reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf)); -const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb); -const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef); -const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf; -#else -const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef); -const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf); -const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf); -const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf); -const uint32_t kSlotsZapValue = 0xbeefdeef; -const uint32_t kDebugZapValue = 0xbadbaddb; -const uint32_t kFreeListZapValue = 0xfeed1eaf; -#endif - -const int kCodeZapValue = 0xbadc0de; - -// Number of bits to represent the page size for paged spaces. The value of 20 -// gives 1Mb bytes per page. -const int kPageSizeBits = 20; - -// On Intel architecture, cache line size is 64 bytes. -// On ARM it may be less (32 bytes), but as far this constant is -// used for aligning data, it doesn't hurt to align on a greater value. -#define PROCESSOR_CACHE_LINE_SIZE 64 - -// Constants relevant to double precision floating point numbers. -// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30. -const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32); - - -// ----------------------------------------------------------------------------- -// Forward declarations for frequently used classes - -class AccessorInfo; -class Allocation; -class Arguments; -class Assembler; -class Code; -class CodeGenerator; -class CodeStub; -class Context; -class Debug; -class Debugger; -class DebugInfo; -class Descriptor; -class DescriptorArray; -class TransitionArray; -class ExternalReference; -class FixedArray; -class FunctionTemplateInfo; -class MemoryChunk; -class SeededNumberDictionary; -class UnseededNumberDictionary; -class NameDictionary; -template <typename T> class Handle; -class Heap; -class HeapObject; -class IC; -class InterceptorInfo; -class Isolate; -class JSReceiver; -class JSArray; -class JSFunction; -class JSObject; -class LargeObjectSpace; -class LookupResult; -class MacroAssembler; -class Map; -class MapSpace; -class MarkCompactCollector; -class NewSpace; -class Object; -class MaybeObject; -class OldSpace; -class Foreign; -class Scope; -class ScopeInfo; -class Script; -class Smi; -template <typename Config, class Allocator = FreeStoreAllocationPolicy> - class SplayTree; -class String; -class Name; -class Struct; -class Variable; -class RelocInfo; -class Deserializer; -class MessageLocation; -class VirtualMemory; -class Mutex; -class RecursiveMutex; - -typedef bool (*WeakSlotCallback)(Object** pointer); - -typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer); - -// ----------------------------------------------------------------------------- -// Miscellaneous - -// NOTE: SpaceIterator depends on AllocationSpace enumeration values being -// consecutive. -enum AllocationSpace { - NEW_SPACE, // Semispaces collected with copying collector. - OLD_POINTER_SPACE, // May contain pointers to new space. - OLD_DATA_SPACE, // Must not have pointers to new space. - CODE_SPACE, // No pointers to new space, marked executable. - MAP_SPACE, // Only and all map objects. - CELL_SPACE, // Only and all cell objects. - PROPERTY_CELL_SPACE, // Only and all global property cell objects. - LO_SPACE, // Promoted large objects. - - FIRST_SPACE = NEW_SPACE, - LAST_SPACE = LO_SPACE, - FIRST_PAGED_SPACE = OLD_POINTER_SPACE, - LAST_PAGED_SPACE = PROPERTY_CELL_SPACE -}; -const int kSpaceTagSize = 3; -const int kSpaceTagMask = (1 << kSpaceTagSize) - 1; - - -// A flag that indicates whether objects should be pretenured when -// allocated (allocated directly into the old generation) or not -// (allocated in the young generation if the object size and type -// allows). -enum PretenureFlag { NOT_TENURED, TENURED }; - -enum MinimumCapacity { - USE_DEFAULT_MINIMUM_CAPACITY, - USE_CUSTOM_MINIMUM_CAPACITY -}; - -enum GarbageCollector { SCAVENGER, MARK_COMPACTOR }; - -enum Executability { NOT_EXECUTABLE, EXECUTABLE }; - -enum VisitMode { - VISIT_ALL, - VISIT_ALL_IN_SCAVENGE, - VISIT_ALL_IN_SWEEP_NEWSPACE, - VISIT_ONLY_STRONG -}; - -// Flag indicating whether code is built into the VM (one of the natives files). -enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE }; - - -// A CodeDesc describes a buffer holding instructions and relocation -// information. The instructions start at the beginning of the buffer -// and grow forward, the relocation information starts at the end of -// the buffer and grows backward. -// -// |<--------------- buffer_size ---------------->| -// |<-- instr_size -->| |<-- reloc_size -->| -// +==================+========+==================+ -// | instructions | free | reloc info | -// +==================+========+==================+ -// ^ -// | -// buffer - -struct CodeDesc { - byte* buffer; - int buffer_size; - int instr_size; - int reloc_size; - Assembler* origin; -}; - - -// Callback function used for iterating objects in heap spaces, -// for example, scanning heap objects. -typedef int (*HeapObjectCallback)(HeapObject* obj); - - -// Callback function used for checking constraints when copying/relocating -// objects. Returns true if an object can be copied/relocated from its -// old_addr to a new_addr. -typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr); - - -// Callback function on inline caches, used for iterating over inline caches -// in compiled code. -typedef void (*InlineCacheCallback)(Code* code, Address ic); - - -// State for inline cache call sites. Aliased as IC::State. -enum InlineCacheState { - // Has never been executed. - UNINITIALIZED, - // Has been executed but monomorhic state has been delayed. - PREMONOMORPHIC, - // Has been executed and only one receiver type has been seen. - MONOMORPHIC, - // Like MONOMORPHIC but check failed due to prototype. - MONOMORPHIC_PROTOTYPE_FAILURE, - // Multiple receiver types have been seen. - POLYMORPHIC, - // Many receiver types have been seen. - MEGAMORPHIC, - // A generic handler is installed and no extra typefeedback is recorded. - GENERIC, - // Special state for debug break or step in prepare stubs. - DEBUG_STUB -}; - - -enum CallFunctionFlags { - NO_CALL_FUNCTION_FLAGS, - // The call target is cached in the instruction stream. - RECORD_CALL_TARGET, - CALL_AS_METHOD, - // Always wrap the receiver and call to the JSFunction. Only use this flag - // both the receiver type and the target method are statically known. - WRAP_AND_CALL -}; - - -enum InlineCacheHolderFlag { - OWN_MAP, // For fast properties objects. - PROTOTYPE_MAP // For slow properties objects (except GlobalObjects). -}; - - -// The Store Buffer (GC). -typedef enum { - kStoreBufferFullEvent, - kStoreBufferStartScanningPagesEvent, - kStoreBufferScanningPageEvent -} StoreBufferEvent; - - -typedef void (*StoreBufferCallback)(Heap* heap, - MemoryChunk* page, - StoreBufferEvent event); - - -// Union used for fast testing of specific double values. -union DoubleRepresentation { - double value; - int64_t bits; - DoubleRepresentation(double x) { value = x; } - bool operator==(const DoubleRepresentation& other) const { - return bits == other.bits; - } -}; - - -// Union used for customized checking of the IEEE double types -// inlined within v8 runtime, rather than going to the underlying -// platform headers and libraries -union IeeeDoubleLittleEndianArchType { - double d; - struct { - unsigned int man_low :32; - unsigned int man_high :20; - unsigned int exp :11; - unsigned int sign :1; - } bits; -}; - - -union IeeeDoubleBigEndianArchType { - double d; - struct { - unsigned int sign :1; - unsigned int exp :11; - unsigned int man_high :20; - unsigned int man_low :32; - } bits; -}; - - -// AccessorCallback -struct AccessorDescriptor { - MaybeObject* (*getter)(Isolate* isolate, Object* object, void* data); - MaybeObject* (*setter)( - Isolate* isolate, JSObject* object, Object* value, void* data); - void* data; -}; - - -// Logging and profiling. A StateTag represents a possible state of -// the VM. The logger maintains a stack of these. Creating a VMState -// object enters a state by pushing on the stack, and destroying a -// VMState object leaves a state by popping the current state from the -// stack. - -enum StateTag { - JS, - GC, - COMPILER, - OTHER, - EXTERNAL, - IDLE -}; - - -// ----------------------------------------------------------------------------- -// Macros - -// Testers for test. - -#define HAS_SMI_TAG(value) \ - ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag) - -#define HAS_FAILURE_TAG(value) \ - ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag) - -// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer -#define OBJECT_POINTER_ALIGN(value) \ - (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask) - -// POINTER_SIZE_ALIGN returns the value aligned as a pointer. -#define POINTER_SIZE_ALIGN(value) \ - (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask) - -// CODE_POINTER_ALIGN returns the value aligned as a generated code segment. -#define CODE_POINTER_ALIGN(value) \ - (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask) - -// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk") -// inside a C++ class and new and delete will be overloaded so logging is -// performed. -// This file (globals.h) is included before log.h, so we use direct calls to -// the Logger rather than the LOG macro. -#ifdef DEBUG -#define TRACK_MEMORY(name) \ - void* operator new(size_t size) { \ - void* result = ::operator new(size); \ - Logger::NewEventStatic(name, result, size); \ - return result; \ - } \ - void operator delete(void* object) { \ - Logger::DeleteEventStatic(name, object); \ - ::operator delete(object); \ - } -#else -#define TRACK_MEMORY(name) -#endif - - -// Feature flags bit positions. They are mostly based on the CPUID spec. -// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX. -enum CpuFeature { SSE4_1 = 32 + 19, // x86 - SSE3 = 32 + 0, // x86 - SSE2 = 26, // x86 - CMOV = 15, // x86 - VFP3 = 1, // ARM - ARMv7 = 2, // ARM - SUDIV = 3, // ARM - UNALIGNED_ACCESSES = 4, // ARM - MOVW_MOVT_IMMEDIATE_LOADS = 5, // ARM - VFP32DREGS = 6, // ARM - NEON = 7, // ARM - SAHF = 0, // x86 - FPU = 1}; // MIPS - - -// Used to specify if a macro instruction must perform a smi check on tagged -// values. -enum SmiCheckType { - DONT_DO_SMI_CHECK, - DO_SMI_CHECK -}; - - -enum ScopeType { - EVAL_SCOPE, // The top-level scope for an eval source. - FUNCTION_SCOPE, // The top-level scope for a function. - MODULE_SCOPE, // The scope introduced by a module literal - GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval. - CATCH_SCOPE, // The scope introduced by catch. - BLOCK_SCOPE, // The scope introduced by a new block. - WITH_SCOPE // The scope introduced by with. -}; - - -const uint32_t kHoleNanUpper32 = 0x7FFFFFFF; -const uint32_t kHoleNanLower32 = 0xFFFFFFFF; -const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000; - -const uint64_t kHoleNanInt64 = - (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32; -const uint64_t kLastNonNaNInt64 = - (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32); - - -// The order of this enum has to be kept in sync with the predicates below. -enum VariableMode { - // User declared variables: - VAR, // declared via 'var', and 'function' declarations - - CONST_LEGACY, // declared via legacy 'const' declarations - - LET, // declared via 'let' declarations (first lexical) - - CONST, // declared via 'const' declarations - - MODULE, // declared via 'module' declaration (last lexical) - - // Variables introduced by the compiler: - INTERNAL, // like VAR, but not user-visible (may or may not - // be in a context) - - TEMPORARY, // temporary variables (not user-visible), stack-allocated - // unless the scope as a whole has forced context allocation - - DYNAMIC, // always require dynamic lookup (we don't know - // the declaration) - - DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the - // variable is global unless it has been shadowed - // by an eval-introduced variable - - DYNAMIC_LOCAL // requires dynamic lookup, but we know that the - // variable is local and where it is unless it - // has been shadowed by an eval-introduced - // variable -}; - - -inline bool IsDynamicVariableMode(VariableMode mode) { - return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL; -} - - -inline bool IsDeclaredVariableMode(VariableMode mode) { - return mode >= VAR && mode <= MODULE; -} - - -inline bool IsLexicalVariableMode(VariableMode mode) { - return mode >= LET && mode <= MODULE; -} - - -inline bool IsImmutableVariableMode(VariableMode mode) { - return (mode >= CONST && mode <= MODULE) || mode == CONST_LEGACY; -} - - -// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable -// and immutable bindings that can be in two states: initialized and -// uninitialized. In ES5 only immutable bindings have these two states. When -// accessing a binding, it needs to be checked for initialization. However in -// the following cases the binding is initialized immediately after creation -// so the initialization check can always be skipped: -// 1. Var declared local variables. -// var foo; -// 2. A local variable introduced by a function declaration. -// function foo() {} -// 3. Parameters -// function x(foo) {} -// 4. Catch bound variables. -// try {} catch (foo) {} -// 6. Function variables of named function expressions. -// var x = function foo() {} -// 7. Implicit binding of 'this'. -// 8. Implicit binding of 'arguments' in functions. -// -// ES5 specified object environment records which are introduced by ES elements -// such as Program and WithStatement that associate identifier bindings with the -// properties of some object. In the specification only mutable bindings exist -// (which may be non-writable) and have no distinct initialization step. However -// V8 allows const declarations in global code with distinct creation and -// initialization steps which are represented by non-writable properties in the -// global object. As a result also these bindings need to be checked for -// initialization. -// -// The following enum specifies a flag that indicates if the binding needs a -// distinct initialization step (kNeedsInitialization) or if the binding is -// immediately initialized upon creation (kCreatedInitialized). -enum InitializationFlag { - kNeedsInitialization, - kCreatedInitialized -}; - - -enum ClearExceptionFlag { - KEEP_EXCEPTION, - CLEAR_EXCEPTION -}; - - -enum MinusZeroMode { - TREAT_MINUS_ZERO_AS_ZERO, - FAIL_ON_MINUS_ZERO -}; - -} } // namespace v8::internal - -#endif // V8_V8GLOBALS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/v8.h nodejs-0.11.15/deps/v8/src/v8.h --- nodejs-0.11.13/deps/v8/src/v8.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // // Top include for all V8 .cc files. @@ -49,24 +26,25 @@ #endif // Basic includes -#include "../include/v8.h" -#include "../include/v8-platform.h" -#include "v8globals.h" -#include "v8checks.h" -#include "allocation.h" -#include "assert-scope.h" -#include "v8utils.h" -#include "flags.h" +#include "include/v8.h" +#include "include/v8-platform.h" +#include "src/checks.h" // NOLINT +#include "src/allocation.h" // NOLINT +#include "src/assert-scope.h" // NOLINT +#include "src/utils.h" // NOLINT +#include "src/flags.h" // NOLINT +#include "src/globals.h" // NOLINT // Objects & heap -#include "objects-inl.h" -#include "spaces-inl.h" -#include "heap-inl.h" -#include "incremental-marking-inl.h" -#include "mark-compact-inl.h" -#include "log-inl.h" -#include "handles-inl.h" -#include "zone-inl.h" +#include "src/objects-inl.h" // NOLINT +#include "src/heap/spaces-inl.h" // NOLINT +#include "src/heap/heap-inl.h" // NOLINT +#include "src/heap/incremental-marking-inl.h" // NOLINT +#include "src/heap/mark-compact-inl.h" // NOLINT +#include "src/log-inl.h" // NOLINT +#include "src/handles-inl.h" // NOLINT +#include "src/types-inl.h" // NOLINT +#include "src/zone-inl.h" // NOLINT namespace v8 { namespace internal { @@ -97,12 +75,6 @@ // Support for entry hooking JITed code. static void SetFunctionEntryHook(FunctionEntryHook entry_hook); - static void AddCallCompletedCallback(CallCompletedCallback callback); - static void RemoveCallCompletedCallback(CallCompletedCallback callback); - static void FireCallCompletedCallback(Isolate* isolate); - - static void RunMicrotasks(Isolate* isolate); - static v8::ArrayBuffer::Allocator* ArrayBufferAllocator() { return array_buffer_allocator_; } @@ -120,8 +92,6 @@ static void InitializeOncePerProcessImpl(); static void InitializeOncePerProcess(); - // List of callbacks when a Call completes. - static List<CallCompletedCallback>* call_completed_callbacks_; // Allocator for external array buffers. static v8::ArrayBuffer::Allocator* array_buffer_allocator_; // v8::Platform to use. @@ -135,6 +105,4 @@ } } // namespace v8::internal -namespace i = v8::internal; - #endif // V8_V8_H_ diff -Nru nodejs-0.11.13/deps/v8/src/v8memory.h nodejs-0.11.15/deps/v8/src/v8memory.h --- nodejs-0.11.13/deps/v8/src/v8memory.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8memory.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_MEMORY_H_ #define V8_MEMORY_H_ diff -Nru nodejs-0.11.13/deps/v8/src/v8natives.js nodejs-0.11.15/deps/v8/src/v8natives.js --- nodejs-0.11.13/deps/v8/src/v8natives.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8natives.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // This file relies on the fact that the following declarations have been made // in runtime.js: @@ -51,7 +28,7 @@ var f = functions[i + 1]; %FunctionSetName(f, key); %FunctionRemovePrototype(f); - %SetProperty(object, key, f, attributes); + %AddNamedProperty(object, key, f, attributes); %SetNativeFlag(f); } %ToFastProperties(object); @@ -62,7 +39,7 @@ function InstallGetter(object, name, getter) { %FunctionSetName(getter, name); %FunctionRemovePrototype(getter); - %DefineOrRedefineAccessorProperty(object, name, getter, null, DONT_ENUM); + %DefineAccessorPropertyUnchecked(object, name, getter, null, DONT_ENUM); %SetNativeFlag(getter); } @@ -73,7 +50,7 @@ %FunctionSetName(setter, name); %FunctionRemovePrototype(getter); %FunctionRemovePrototype(setter); - %DefineOrRedefineAccessorProperty(object, name, getter, setter, DONT_ENUM); + %DefineAccessorPropertyUnchecked(object, name, getter, setter, DONT_ENUM); %SetNativeFlag(getter); %SetNativeFlag(setter); } @@ -88,7 +65,7 @@ for (var i = 0; i < constants.length; i += 2) { var name = constants[i]; var k = constants[i + 1]; - %SetProperty(object, name, k, attributes); + %AddNamedProperty(object, name, k, attributes); } %ToFastProperties(object); } @@ -109,16 +86,17 @@ } if (fields) { for (var i = 0; i < fields.length; i++) { - %SetProperty(prototype, fields[i], UNDEFINED, DONT_ENUM | DONT_DELETE); + %AddNamedProperty(prototype, fields[i], + UNDEFINED, DONT_ENUM | DONT_DELETE); } } for (var i = 0; i < methods.length; i += 2) { var key = methods[i]; var f = methods[i + 1]; - %SetProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY); + %AddNamedProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY); %SetNativeFlag(f); } - %SetPrototype(prototype, null); + %InternalSetPrototype(prototype, null); %ToFastProperties(prototype); } @@ -195,12 +173,12 @@ 'be the global object from which eval originated'); } - var global_receiver = %GlobalReceiver(global); + var global_proxy = %GlobalProxy(global); var f = %CompileString(x, false); if (!IS_FUNCTION(f)) return f; - return %_CallFunction(global_receiver, f); + return %_CallFunction(global_proxy, f); } @@ -213,13 +191,13 @@ var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY; // ECMA 262 - 15.1.1.1. - %SetProperty(global, "NaN", NAN, attributes); + %AddNamedProperty(global, "NaN", NAN, attributes); // ECMA-262 - 15.1.1.2. - %SetProperty(global, "Infinity", INFINITY, attributes); + %AddNamedProperty(global, "Infinity", INFINITY, attributes); // ECMA-262 - 15.1.1.3. - %SetProperty(global, "undefined", UNDEFINED, attributes); + %AddNamedProperty(global, "undefined", UNDEFINED, attributes); // Set up non-enumerable function on the global object. InstallFunctions(global, DONT_ENUM, $Array( @@ -267,7 +245,7 @@ var handler = %GetHandler(this); return CallTrap1(handler, "hasOwn", DerivedHasOwnTrap, ToName(V)); } - return %HasLocalProperty(TO_OBJECT_INLINE(this), ToName(V)); + return %HasOwnProperty(TO_OBJECT_INLINE(this), ToName(V)); } @@ -286,7 +264,7 @@ // TODO(rossberg): adjust once there is a story for symbols vs proxies. if (IS_SYMBOL(V)) return false; - var desc = GetOwnProperty(this, P); + var desc = GetOwnPropertyJS(this, P); return IS_UNDEFINED(desc) ? false : desc.isEnumerable(); } return %IsPropertyEnumerable(ToObject(this), P); @@ -297,7 +275,7 @@ function ObjectDefineGetter(name, fun) { var receiver = this; if (receiver == null && !IS_UNDETECTABLE(receiver)) { - receiver = %GlobalReceiver(global); + receiver = %GlobalProxy(global); } if (!IS_SPEC_FUNCTION(fun)) { throw new $TypeError( @@ -314,7 +292,7 @@ function ObjectLookupGetter(name) { var receiver = this; if (receiver == null && !IS_UNDETECTABLE(receiver)) { - receiver = %GlobalReceiver(global); + receiver = %GlobalProxy(global); } return %LookupAccessor(ToObject(receiver), ToName(name), GETTER); } @@ -323,7 +301,7 @@ function ObjectDefineSetter(name, fun) { var receiver = this; if (receiver == null && !IS_UNDETECTABLE(receiver)) { - receiver = %GlobalReceiver(global); + receiver = %GlobalProxy(global); } if (!IS_SPEC_FUNCTION(fun)) { throw new $TypeError( @@ -340,7 +318,7 @@ function ObjectLookupSetter(name) { var receiver = this; if (receiver == null && !IS_UNDETECTABLE(receiver)) { - receiver = %GlobalReceiver(global); + receiver = %GlobalProxy(global); } return %LookupAccessor(ToObject(receiver), ToName(name), SETTER); } @@ -355,7 +333,7 @@ var names = CallTrap0(handler, "keys", DerivedKeysTrap); return ToNameArray(names, "keys", false); } - return %LocalKeys(obj); + return %OwnKeys(obj); } @@ -409,24 +387,22 @@ var obj = new $Object(); if (desc.hasValue()) { - %IgnoreAttributesAndSetProperty(obj, "value", desc.getValue(), NONE); + %AddNamedProperty(obj, "value", desc.getValue(), NONE); } if (desc.hasWritable()) { - %IgnoreAttributesAndSetProperty(obj, "writable", desc.isWritable(), NONE); + %AddNamedProperty(obj, "writable", desc.isWritable(), NONE); } if (desc.hasGetter()) { - %IgnoreAttributesAndSetProperty(obj, "get", desc.getGet(), NONE); + %AddNamedProperty(obj, "get", desc.getGet(), NONE); } if (desc.hasSetter()) { - %IgnoreAttributesAndSetProperty(obj, "set", desc.getSet(), NONE); + %AddNamedProperty(obj, "set", desc.getSet(), NONE); } if (desc.hasEnumerable()) { - %IgnoreAttributesAndSetProperty(obj, "enumerable", - desc.isEnumerable(), NONE); + %AddNamedProperty(obj, "enumerable", desc.isEnumerable(), NONE); } if (desc.hasConfigurable()) { - %IgnoreAttributesAndSetProperty(obj, "configurable", - desc.isConfigurable(), NONE); + %AddNamedProperty(obj, "configurable", desc.isConfigurable(), NONE); } return obj; } @@ -650,7 +626,7 @@ // ES5 section 8.12.1. -function GetOwnProperty(obj, v) { +function GetOwnPropertyJS(obj, v) { var p = ToName(v); if (%IsJSProxy(obj)) { // TODO(rossberg): adjust once there is a story for symbols vs proxies. @@ -682,7 +658,7 @@ // ES5 section 8.12.7. function Delete(obj, p, should_throw) { - var desc = GetOwnProperty(obj, p); + var desc = GetOwnPropertyJS(obj, p); if (IS_UNDEFINED(desc)) return true; if (desc.isConfigurable()) { %DeleteProperty(obj, p, 0); @@ -856,7 +832,7 @@ value = current.getValue(); } - %DefineOrRedefineDataProperty(obj, p, value, flag); + %DefineDataPropertyUnchecked(obj, p, value, flag); } else { // There are 3 cases that lead here: // Step 4b - defining a new accessor property. @@ -866,7 +842,7 @@ // descriptor. var getter = desc.hasGetter() ? desc.getGet() : null; var setter = desc.hasSetter() ? desc.getSet() : null; - %DefineOrRedefineAccessorProperty(obj, p, getter, setter, flag); + %DefineAccessorPropertyUnchecked(obj, p, getter, setter, flag); } return true; } @@ -889,7 +865,7 @@ if (new_length != ToNumber(desc.getValue())) { throw new $RangeError('defineProperty() array length out of range'); } - var length_desc = GetOwnProperty(obj, "length"); + var length_desc = GetOwnPropertyJS(obj, "length"); if (new_length != length && !length_desc.isWritable()) { if (should_throw) { throw MakeTypeError("redefine_disallowed", [p]); @@ -911,7 +887,7 @@ while (new_length < length--) { var index = ToString(length); if (emit_splice) { - var deletedDesc = GetOwnProperty(obj, index); + var deletedDesc = GetOwnPropertyJS(obj, index); if (deletedDesc && deletedDesc.hasValue()) removed[length - new_length] = deletedDesc.getValue(); } @@ -924,7 +900,7 @@ // Make sure the below call to DefineObjectProperty() doesn't overwrite // any magic "length" property by removing the value. // TODO(mstarzinger): This hack should be removed once we have addressed the - // respective TODO in Runtime_DefineOrRedefineDataProperty. + // respective TODO in Runtime_DefineDataPropertyUnchecked. // For the time being, we need a hack to prevent Object.observe from // generating two change records. obj.length = new_length; @@ -949,34 +925,36 @@ } // Step 4 - Special handling for array index. - var index = ToUint32(p); - var emit_splice = false; - if (ToString(index) == p && index != 4294967295) { - var length = obj.length; - if (index >= length && %IsObserved(obj)) { - emit_splice = true; - BeginPerformSplice(obj); - } + if (!IS_SYMBOL(p)) { + var index = ToUint32(p); + var emit_splice = false; + if (ToString(index) == p && index != 4294967295) { + var length = obj.length; + if (index >= length && %IsObserved(obj)) { + emit_splice = true; + BeginPerformSplice(obj); + } - var length_desc = GetOwnProperty(obj, "length"); - if ((index >= length && !length_desc.isWritable()) || - !DefineObjectProperty(obj, p, desc, true)) { - if (emit_splice) + var length_desc = GetOwnPropertyJS(obj, "length"); + if ((index >= length && !length_desc.isWritable()) || + !DefineObjectProperty(obj, p, desc, true)) { + if (emit_splice) + EndPerformSplice(obj); + if (should_throw) { + throw MakeTypeError("define_disallowed", [p]); + } else { + return false; + } + } + if (index >= length) { + obj.length = index + 1; + } + if (emit_splice) { EndPerformSplice(obj); - if (should_throw) { - throw MakeTypeError("define_disallowed", [p]); - } else { - return false; + EnqueueSpliceRecord(obj, length, [], index + 1 - length); } + return true; } - if (index >= length) { - obj.length = index + 1; - } - if (emit_splice) { - EndPerformSplice(obj); - EnqueueSpliceRecord(obj, length, [], index + 1 - length); - } - return true; } // Step 5 - Fallback to default implementation. @@ -1030,7 +1008,7 @@ throw MakeTypeError("called_on_non_object", ["Object.getOwnPropertyDescriptor"]); } - var desc = GetOwnProperty(obj, p); + var desc = GetOwnPropertyJS(obj, p); return FromPropertyDescriptor(desc); } @@ -1048,7 +1026,7 @@ var s = ToName(obj[index]); // TODO(rossberg): adjust once there is a story for symbols vs proxies. if (IS_SYMBOL(s) && !includeSymbols) continue; - if (%HasLocalProperty(names, s)) { + if (%HasOwnProperty(names, s)) { throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s]); } array[index] = s; @@ -1068,13 +1046,13 @@ // Find all the indexed properties. - // Only get the local element names if we want to include string keys. + // Only get own element names if we want to include string keys. if (!symbolsOnly) { - var localElementNames = %GetLocalElementNames(obj); - for (var i = 0; i < localElementNames.length; ++i) { - localElementNames[i] = %_NumberToString(localElementNames[i]); + var ownElementNames = %GetOwnElementNames(obj); + for (var i = 0; i < ownElementNames.length; ++i) { + ownElementNames[i] = %_NumberToString(ownElementNames[i]); } - nameArrays.push(localElementNames); + nameArrays.push(ownElementNames); // Get names for indexed interceptor properties. var interceptorInfo = %GetInterceptorInfo(obj); @@ -1088,8 +1066,8 @@ // Find all the named properties. - // Get the local property names. - nameArrays.push(%GetLocalPropertyNames(obj, filter)); + // Get own property names. + nameArrays.push(%GetOwnPropertyNames(obj, filter)); // Get names for named interceptor properties if any. if ((interceptorInfo & 2) != 0) { @@ -1149,7 +1127,8 @@ if (!IS_SPEC_OBJECT(proto) && proto !== null) { throw MakeTypeError("proto_object_or_null", [proto]); } - var obj = { __proto__: proto }; + var obj = {}; + %InternalSetPrototype(obj, proto); if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties); return obj; } @@ -1179,8 +1158,8 @@ {value: 0, writable: 0, get: 0, set: 0, enumerable: 0, configurable: 0}; for (var i = 0; i < names.length; i++) { var N = names[i]; - if (!(%HasLocalProperty(standardNames, N))) { - var attr = GetOwnProperty(attributes, N); + if (!(%HasOwnProperty(standardNames, N))) { + var attr = GetOwnPropertyJS(attributes, N); DefineOwnProperty(descObj, N, attr, true); } } @@ -1196,13 +1175,24 @@ } -function GetOwnEnumerablePropertyNames(properties) { +function GetOwnEnumerablePropertyNames(object) { var names = new InternalArray(); - for (var key in properties) { - if (%HasLocalProperty(properties, key)) { + for (var key in object) { + if (%HasOwnProperty(object, key)) { names.push(key); } } + + var filter = PROPERTY_ATTRIBUTES_STRING | PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL; + var symbols = %GetOwnPropertyNames(object, filter); + for (var i = 0; i < symbols.length; ++i) { + var symbol = symbols[i]; + if (IS_SYMBOL(symbol)) { + var desc = ObjectGetOwnPropertyDescriptor(object, symbol); + if (desc.enumerable) names.push(symbol); + } + } + return names; } @@ -1265,7 +1255,7 @@ var names = ObjectGetOwnPropertyNames(obj); for (var i = 0; i < names.length; i++) { var name = names[i]; - var desc = GetOwnProperty(obj, name); + var desc = GetOwnPropertyJS(obj, name); if (desc.isConfigurable()) { desc.setConfigurable(false); DefineOwnProperty(obj, name, desc, true); @@ -1277,7 +1267,7 @@ // ES5 section 15.2.3.9. -function ObjectFreeze(obj) { +function ObjectFreezeJS(obj) { if (!IS_SPEC_OBJECT(obj)) { throw MakeTypeError("called_on_non_object", ["Object.freeze"]); } @@ -1289,7 +1279,7 @@ var names = ObjectGetOwnPropertyNames(obj); for (var i = 0; i < names.length; i++) { var name = names[i]; - var desc = GetOwnProperty(obj, name); + var desc = GetOwnPropertyJS(obj, name); if (desc.isWritable() || desc.isConfigurable()) { if (IsDataDescriptor(desc)) desc.setWritable(false); desc.setConfigurable(false); @@ -1333,8 +1323,10 @@ var names = ObjectGetOwnPropertyNames(obj); for (var i = 0; i < names.length; i++) { var name = names[i]; - var desc = GetOwnProperty(obj, name); - if (desc.isConfigurable()) return false; + var desc = GetOwnPropertyJS(obj, name); + if (desc.isConfigurable()) { + return false; + } } return true; } @@ -1354,7 +1346,7 @@ var names = ObjectGetOwnPropertyNames(obj); for (var i = 0; i < names.length; i++) { var name = names[i]; - var desc = GetOwnProperty(obj, name); + var desc = GetOwnPropertyJS(obj, name); if (IsDataDescriptor(desc) && desc.isWritable()) return false; if (desc.isConfigurable()) return false; } @@ -1419,9 +1411,8 @@ %SetNativeFlag($Object); %SetCode($Object, ObjectConstructor); - %SetExpectedNumberOfProperties($Object, 4); - %SetProperty($Object.prototype, "constructor", $Object, DONT_ENUM); + %AddNamedProperty($Object.prototype, "constructor", $Object, DONT_ENUM); // Set up non-enumerable functions on the Object.prototype object. InstallFunctions($Object.prototype, DONT_ENUM, $Array( @@ -1445,7 +1436,7 @@ "create", ObjectCreate, "defineProperty", ObjectDefineProperty, "defineProperties", ObjectDefineProperties, - "freeze", ObjectFreeze, + "freeze", ObjectFreezeJS, "getPrototypeOf", ObjectGetPrototypeOf, "setPrototypeOf", ObjectSetPrototypeOf, "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor, @@ -1508,7 +1499,7 @@ %SetCode($Boolean, BooleanConstructor); %FunctionSetPrototype($Boolean, new $Boolean(false)); - %SetProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM); + %AddNamedProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM); InstallFunctions($Boolean.prototype, DONT_ENUM, $Array( "toString", BooleanToString, @@ -1577,7 +1568,7 @@ // ECMA-262 section 15.7.4.5 -function NumberToFixed(fractionDigits) { +function NumberToFixedJS(fractionDigits) { var x = this; if (!IS_NUMBER(this)) { if (!IS_NUMBER_WRAPPER(this)) { @@ -1602,7 +1593,7 @@ // ECMA-262 section 15.7.4.6 -function NumberToExponential(fractionDigits) { +function NumberToExponentialJS(fractionDigits) { var x = this; if (!IS_NUMBER(this)) { if (!IS_NUMBER_WRAPPER(this)) { @@ -1628,7 +1619,7 @@ // ECMA-262 section 15.7.4.7 -function NumberToPrecision(precision) { +function NumberToPrecisionJS(precision) { var x = this; if (!IS_NUMBER(this)) { if (!IS_NUMBER_WRAPPER(this)) { @@ -1691,7 +1682,7 @@ %OptimizeObjectForAddingMultipleProperties($Number.prototype, 8); // Set up the constructor property on the Number prototype object. - %SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM); + %AddNamedProperty($Number.prototype, "constructor", $Number, DONT_ENUM); InstallConstants($Number, $Array( // ECMA-262 section 15.7.3.1. @@ -1717,9 +1708,9 @@ "toString", NumberToString, "toLocaleString", NumberToLocaleString, "valueOf", NumberValueOf, - "toFixed", NumberToFixed, - "toExponential", NumberToExponential, - "toPrecision", NumberToPrecision + "toFixed", NumberToFixedJS, + "toExponential", NumberToExponentialJS, + "toPrecision", NumberToPrecisionJS )); // Harmony Number constructor additions @@ -1759,6 +1750,10 @@ } } + if (%FunctionIsArrow(func)) { + return source; + } + var name = %FunctionNameShouldPrintAsAnonymous(func) ? 'anonymous' : %FunctionGetName(func); @@ -1805,19 +1800,15 @@ return %Apply(bindings[0], bindings[1], argv, 0, bound_argc + argc); }; - %FunctionRemovePrototype(boundFunction); var new_length = 0; - if (%_ClassOf(this) == "Function") { - // Function or FunctionProxy. - var old_length = this.length; - // FunctionProxies might provide a non-UInt32 value. If so, ignore it. - if ((typeof old_length === "number") && - ((old_length >>> 0) === old_length)) { - var argc = %_ArgumentsLength(); - if (argc > 0) argc--; // Don't count the thisArg as parameter. - new_length = old_length - argc; - if (new_length < 0) new_length = 0; - } + var old_length = this.length; + // FunctionProxies might provide a non-UInt32 value. If so, ignore it. + if ((typeof old_length === "number") && + ((old_length >>> 0) === old_length)) { + var argc = %_ArgumentsLength(); + if (argc > 0) argc--; // Don't count the thisArg as parameter. + new_length = old_length - argc; + if (new_length < 0) new_length = 0; } // This runtime function finds any remaining arguments on the stack, // so we don't pass the arguments object. @@ -1846,7 +1837,7 @@ // If the formal parameters string include ) - an illegal // character - it may make the combined function expression // compile. We avoid this problem by checking for this early on. - if (%_CallFunction(p, ')', StringIndexOf) != -1) { + if (%_CallFunction(p, ')', StringIndexOfJS) != -1) { throw MakeSyntaxError('paren_in_arg_string', []); } // If the formal parameters include an unbalanced block comment, the @@ -1861,10 +1852,12 @@ function FunctionConstructor(arg1) { // length == 1 var source = NewFunctionString(arguments, 'function'); - var global_receiver = %GlobalReceiver(global); + var global_proxy = %GlobalProxy(global); // Compile the string in the constructor and not a helper so that errors // appear to come from here. - var f = %_CallFunction(global_receiver, %CompileString(source, true)); + var f = %CompileString(source, true); + if (!IS_FUNCTION(f)) return f; + f = %_CallFunction(global_proxy, f); %FunctionMarkNameShouldPrintAsAnonymous(f); return f; } @@ -1876,7 +1869,7 @@ %CheckIsBootstrapping(); %SetCode($Function, FunctionConstructor); - %SetProperty($Function.prototype, "constructor", $Function, DONT_ENUM); + %AddNamedProperty($Function.prototype, "constructor", $Function, DONT_ENUM); InstallFunctions($Function.prototype, DONT_ENUM, $Array( "bind", FunctionBind, @@ -1887,36 +1880,31 @@ SetUpFunction(); -//---------------------------------------------------------------------------- - -// TODO(rossberg): very simple abstraction for generic microtask queue. -// Eventually, we should move to a real event queue that allows to maintain -// relative ordering of different kinds of tasks. +// ---------------------------------------------------------------------------- +// Iterator related spec functions. -function GetMicrotaskQueue() { - var microtaskState = %GetMicrotaskState(); - if (IS_UNDEFINED(microtaskState.queue)) { - microtaskState.queue = new InternalArray; +// ES6 rev 26, 2014-07-18 +// 7.4.1 CheckIterable ( obj ) +function ToIterable(obj) { + if (!IS_SPEC_OBJECT(obj)) { + return UNDEFINED; } - return microtaskState.queue; + return obj[symbolIterator]; } -function RunMicrotasks() { - while (%SetMicrotaskPending(false)) { - var microtaskState = %GetMicrotaskState(); - if (IS_UNDEFINED(microtaskState.queue)) - return; - - var microtasks = microtaskState.queue; - microtaskState.queue = new InternalArray; - for (var i = 0; i < microtasks.length; i++) { - microtasks[i](); - } +// ES6 rev 26, 2014-07-18 +// 7.4.2 GetIterator ( obj, method ) +function GetIterator(obj, method) { + if (IS_UNDEFINED(method)) { + method = ToIterable(obj); } -} - -function EnqueueExternalMicrotask(fn) { - GetMicrotaskQueue().push(fn); - %SetMicrotaskPending(true); + if (!IS_SPEC_FUNCTION(method)) { + throw MakeTypeError('not_iterable', [obj]); + } + var iterator = %_CallFunction(obj, method); + if (!IS_SPEC_OBJECT(iterator)) { + throw MakeTypeError('not_an_iterator', [iterator]); + } + return iterator; } diff -Nru nodejs-0.11.13/deps/v8/src/v8threads.cc nodejs-0.11.15/deps/v8/src/v8threads.cc --- nodejs-0.11.13/deps/v8/src/v8threads.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8threads.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "api.h" -#include "bootstrapper.h" -#include "debug.h" -#include "execution.h" -#include "v8threads.h" -#include "regexp-stack.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/api.h" +#include "src/bootstrapper.h" +#include "src/debug.h" +#include "src/execution.h" +#include "src/regexp-stack.h" +#include "src/v8threads.h" namespace v8 { @@ -45,7 +22,7 @@ // Once the Locker is initialized, the current thread will be guaranteed to have // the lock for a given isolate. void Locker::Initialize(v8::Isolate* isolate) { - ASSERT(isolate != NULL); + DCHECK(isolate != NULL); has_lock_= false; top_level_ = true; isolate_ = reinterpret_cast<i::Isolate*>(isolate); @@ -74,17 +51,13 @@ isolate_->stack_guard()->ClearThread(access); isolate_->stack_guard()->InitThread(access); } - if (isolate_->IsDefaultIsolate()) { - // This only enters if not yet entered. - internal::Isolate::EnterDefaultIsolate(); - } } - ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread()); + DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread()); } bool Locker::IsLocked(v8::Isolate* isolate) { - ASSERT(isolate != NULL); + DCHECK(isolate != NULL); i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); return internal_isolate->thread_manager()->IsLockedByCurrentThread(); } @@ -96,11 +69,8 @@ Locker::~Locker() { - ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread()); + DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread()); if (has_lock_) { - if (isolate_->IsDefaultIsolate()) { - isolate_->Exit(); - } if (top_level_) { isolate_->thread_manager()->FreeThreadResources(); } else { @@ -112,24 +82,18 @@ void Unlocker::Initialize(v8::Isolate* isolate) { - ASSERT(isolate != NULL); + DCHECK(isolate != NULL); isolate_ = reinterpret_cast<i::Isolate*>(isolate); - ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread()); - if (isolate_->IsDefaultIsolate()) { - isolate_->Exit(); - } + DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread()); isolate_->thread_manager()->ArchiveThread(); isolate_->thread_manager()->Unlock(); } Unlocker::~Unlocker() { - ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread()); + DCHECK(!isolate_->thread_manager()->IsLockedByCurrentThread()); isolate_->thread_manager()->Lock(); isolate_->thread_manager()->RestoreThread(); - if (isolate_->IsDefaultIsolate()) { - isolate_->Enter(); - } } @@ -137,7 +101,7 @@ bool ThreadManager::RestoreThread() { - ASSERT(IsLockedByCurrentThread()); + DCHECK(IsLockedByCurrentThread()); // First check whether the current thread has been 'lazily archived', i.e. // not archived at all. If that is the case we put the state storage we // had prepared back in the free list, since we didn't need it after all. @@ -145,8 +109,8 @@ lazily_archived_thread_ = ThreadId::Invalid(); Isolate::PerIsolateThreadData* per_thread = isolate_->FindPerThreadDataForThisThread(); - ASSERT(per_thread != NULL); - ASSERT(per_thread->thread_state() == lazily_archived_thread_state_); + DCHECK(per_thread != NULL); + DCHECK(per_thread->thread_state() == lazily_archived_thread_state_); lazily_archived_thread_state_->set_id(ThreadId::Invalid()); lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST); lazily_archived_thread_state_ = NULL; @@ -175,15 +139,13 @@ from = isolate_->handle_scope_implementer()->RestoreThread(from); from = isolate_->RestoreThread(from); from = Relocatable::RestoreState(isolate_, from); -#ifdef ENABLE_DEBUGGER_SUPPORT from = isolate_->debug()->RestoreDebug(from); -#endif from = isolate_->stack_guard()->RestoreStackGuard(from); from = isolate_->regexp_stack()->RestoreStack(from); from = isolate_->bootstrapper()->RestoreState(from); per_thread->set_thread_state(NULL); if (state->terminate_on_restore()) { - isolate_->stack_guard()->TerminateExecution(); + isolate_->stack_guard()->RequestTerminateExecution(); state->set_terminate_on_restore(false); } state->set_id(ThreadId::Invalid()); @@ -196,7 +158,7 @@ void ThreadManager::Lock() { mutex_.Lock(); mutex_owner_ = ThreadId::Current(); - ASSERT(IsLockedByCurrentThread()); + DCHECK(IsLockedByCurrentThread()); } @@ -209,9 +171,7 @@ static int ArchiveSpacePerThread() { return HandleScopeImplementer::ArchiveSpacePerThread() + Isolate::ArchiveSpacePerThread() + -#ifdef ENABLE_DEBUGGER_SUPPORT Debug::ArchiveSpacePerThread() + -#endif StackGuard::ArchiveSpacePerThread() + RegExpStack::ArchiveSpacePerThread() + Bootstrapper::ArchiveSpacePerThread() + @@ -311,9 +271,9 @@ void ThreadManager::ArchiveThread() { - ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid())); - ASSERT(!IsArchived()); - ASSERT(IsLockedByCurrentThread()); + DCHECK(lazily_archived_thread_.Equals(ThreadId::Invalid())); + DCHECK(!IsArchived()); + DCHECK(IsLockedByCurrentThread()); ThreadState* state = GetFreeThreadState(); state->Unlink(); Isolate::PerIsolateThreadData* per_thread = @@ -321,14 +281,14 @@ per_thread->set_thread_state(state); lazily_archived_thread_ = ThreadId::Current(); lazily_archived_thread_state_ = state; - ASSERT(state->id().Equals(ThreadId::Invalid())); + DCHECK(state->id().Equals(ThreadId::Invalid())); state->set_id(CurrentId()); - ASSERT(!state->id().Equals(ThreadId::Invalid())); + DCHECK(!state->id().Equals(ThreadId::Invalid())); } void ThreadManager::EagerlyArchiveThread() { - ASSERT(IsLockedByCurrentThread()); + DCHECK(IsLockedByCurrentThread()); ThreadState* state = lazily_archived_thread_state_; state->LinkInto(ThreadState::IN_USE_LIST); char* to = state->data(); @@ -337,9 +297,7 @@ to = isolate_->handle_scope_implementer()->ArchiveThread(to); to = isolate_->ArchiveThread(to); to = Relocatable::ArchiveState(isolate_, to); -#ifdef ENABLE_DEBUGGER_SUPPORT to = isolate_->debug()->ArchiveDebug(to); -#endif to = isolate_->stack_guard()->ArchiveStackGuard(to); to = isolate_->regexp_stack()->ArchiveStack(to); to = isolate_->bootstrapper()->ArchiveState(to); @@ -351,9 +309,7 @@ void ThreadManager::FreeThreadResources() { isolate_->handle_scope_implementer()->FreeThreadResources(); isolate_->FreeThreadResources(); -#ifdef ENABLE_DEBUGGER_SUPPORT isolate_->debug()->FreeThreadResources(); -#endif isolate_->stack_guard()->FreeThreadResources(); isolate_->regexp_stack()->FreeThreadResources(); isolate_->bootstrapper()->FreeThreadResources(); diff -Nru nodejs-0.11.13/deps/v8/src/v8threads.h nodejs-0.11.15/deps/v8/src/v8threads.h --- nodejs-0.11.13/deps/v8/src/v8threads.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8threads.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_V8THREADS_H_ #define V8_V8THREADS_H_ @@ -119,7 +96,7 @@ void EagerlyArchiveThread(); - Mutex mutex_; + base::Mutex mutex_; ThreadId mutex_owner_; ThreadId lazily_archived_thread_; ThreadState* lazily_archived_thread_state_; diff -Nru nodejs-0.11.13/deps/v8/src/v8utils.cc nodejs-0.11.15/deps/v8/src/v8utils.cc --- nodejs-0.11.13/deps/v8/src/v8utils.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8utils.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,276 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include <stdarg.h> - -#include "v8.h" - -#include "platform.h" - -#include "sys/stat.h" - -namespace v8 { -namespace internal { - - -void PrintF(const char* format, ...) { - va_list arguments; - va_start(arguments, format); - OS::VPrint(format, arguments); - va_end(arguments); -} - - -void PrintF(FILE* out, const char* format, ...) { - va_list arguments; - va_start(arguments, format); - OS::VFPrint(out, format, arguments); - va_end(arguments); -} - - -void PrintPID(const char* format, ...) { - OS::Print("[%d] ", OS::GetCurrentProcessId()); - va_list arguments; - va_start(arguments, format); - OS::VPrint(format, arguments); - va_end(arguments); -} - - -void Flush(FILE* out) { - fflush(out); -} - - -char* ReadLine(const char* prompt) { - char* result = NULL; - char line_buf[256]; - int offset = 0; - bool keep_going = true; - fprintf(stdout, "%s", prompt); - fflush(stdout); - while (keep_going) { - if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) { - // fgets got an error. Just give up. - if (result != NULL) { - DeleteArray(result); - } - return NULL; - } - int len = StrLength(line_buf); - if (len > 1 && - line_buf[len - 2] == '\\' && - line_buf[len - 1] == '\n') { - // When we read a line that ends with a "\" we remove the escape and - // append the remainder. - line_buf[len - 2] = '\n'; - line_buf[len - 1] = 0; - len -= 1; - } else if ((len > 0) && (line_buf[len - 1] == '\n')) { - // Since we read a new line we are done reading the line. This - // will exit the loop after copying this buffer into the result. - keep_going = false; - } - if (result == NULL) { - // Allocate the initial result and make room for the terminating '\0' - result = NewArray<char>(len + 1); - } else { - // Allocate a new result with enough room for the new addition. - int new_len = offset + len + 1; - char* new_result = NewArray<char>(new_len); - // Copy the existing input into the new array and set the new - // array as the result. - OS::MemCopy(new_result, result, offset * kCharSize); - DeleteArray(result); - result = new_result; - } - // Copy the newly read line into the result. - OS::MemCopy(result + offset, line_buf, len * kCharSize); - offset += len; - } - ASSERT(result != NULL); - result[offset] = '\0'; - return result; -} - - -char* ReadCharsFromFile(FILE* file, - int* size, - int extra_space, - bool verbose, - const char* filename) { - if (file == NULL || fseek(file, 0, SEEK_END) != 0) { - if (verbose) { - OS::PrintError("Cannot read from file %s.\n", filename); - } - return NULL; - } - - // Get the size of the file and rewind it. - *size = ftell(file); - rewind(file); - - char* result = NewArray<char>(*size + extra_space); - for (int i = 0; i < *size && feof(file) == 0;) { - int read = static_cast<int>(fread(&result[i], 1, *size - i, file)); - if (read != (*size - i) && ferror(file) != 0) { - fclose(file); - DeleteArray(result); - return NULL; - } - i += read; - } - return result; -} - - -char* ReadCharsFromFile(const char* filename, - int* size, - int extra_space, - bool verbose) { - FILE* file = OS::FOpen(filename, "rb"); - char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename); - if (file != NULL) fclose(file); - return result; -} - - -byte* ReadBytes(const char* filename, int* size, bool verbose) { - char* chars = ReadCharsFromFile(filename, size, 0, verbose); - return reinterpret_cast<byte*>(chars); -} - - -static Vector<const char> SetVectorContents(char* chars, - int size, - bool* exists) { - if (!chars) { - *exists = false; - return Vector<const char>::empty(); - } - chars[size] = '\0'; - *exists = true; - return Vector<const char>(chars, size); -} - - -Vector<const char> ReadFile(const char* filename, - bool* exists, - bool verbose) { - int size; - char* result = ReadCharsFromFile(filename, &size, 1, verbose); - return SetVectorContents(result, size, exists); -} - - -Vector<const char> ReadFile(FILE* file, - bool* exists, - bool verbose) { - int size; - char* result = ReadCharsFromFile(file, &size, 1, verbose, ""); - return SetVectorContents(result, size, exists); -} - - -int WriteCharsToFile(const char* str, int size, FILE* f) { - int total = 0; - while (total < size) { - int write = static_cast<int>(fwrite(str, 1, size - total, f)); - if (write == 0) { - return total; - } - total += write; - str += write; - } - return total; -} - - -int AppendChars(const char* filename, - const char* str, - int size, - bool verbose) { - FILE* f = OS::FOpen(filename, "ab"); - if (f == NULL) { - if (verbose) { - OS::PrintError("Cannot open file %s for writing.\n", filename); - } - return 0; - } - int written = WriteCharsToFile(str, size, f); - fclose(f); - return written; -} - - -int WriteChars(const char* filename, - const char* str, - int size, - bool verbose) { - FILE* f = OS::FOpen(filename, "wb"); - if (f == NULL) { - if (verbose) { - OS::PrintError("Cannot open file %s for writing.\n", filename); - } - return 0; - } - int written = WriteCharsToFile(str, size, f); - fclose(f); - return written; -} - - -int WriteBytes(const char* filename, - const byte* bytes, - int size, - bool verbose) { - const char* str = reinterpret_cast<const char*>(bytes); - return WriteChars(filename, str, size, verbose); -} - - - -void StringBuilder::AddFormatted(const char* format, ...) { - va_list arguments; - va_start(arguments, format); - AddFormattedList(format, arguments); - va_end(arguments); -} - - -void StringBuilder::AddFormattedList(const char* format, va_list list) { - ASSERT(!is_finalized() && position_ <= buffer_.length()); - int n = OS::VSNPrintF(buffer_ + position_, format, list); - if (n < 0 || n >= (buffer_.length() - position_)) { - position_ = buffer_.length(); - } else { - position_ += n; - } -} - -} } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/v8utils.h nodejs-0.11.15/deps/v8/src/v8utils.h --- nodejs-0.11.13/deps/v8/src/v8utils.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/v8utils.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,464 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_V8UTILS_H_ -#define V8_V8UTILS_H_ - -#include "utils.h" -#include "platform.h" // For va_list on Solaris. - -namespace v8 { -namespace internal { - -// ---------------------------------------------------------------------------- -// I/O support. - -#if __GNUC__ >= 4 -// On gcc we can ask the compiler to check the types of %d-style format -// specifiers and their associated arguments. TODO(erikcorry) fix this -// so it works on MacOSX. -#if defined(__MACH__) && defined(__APPLE__) -#define PRINTF_CHECKING -#define FPRINTF_CHECKING -#else // MacOsX. -#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2))) -#define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3))) -#endif -#else -#define PRINTF_CHECKING -#define FPRINTF_CHECKING -#endif - -// Our version of printf(). -void PRINTF_CHECKING PrintF(const char* format, ...); -void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...); - -// Prepends the current process ID to the output. -void PRINTF_CHECKING PrintPID(const char* format, ...); - -// Our version of fflush. -void Flush(FILE* out); - -inline void Flush() { - Flush(stdout); -} - - -// Read a line of characters after printing the prompt to stdout. The resulting -// char* needs to be disposed off with DeleteArray by the caller. -char* ReadLine(const char* prompt); - - -// Read and return the raw bytes in a file. the size of the buffer is returned -// in size. -// The returned buffer must be freed by the caller. -byte* ReadBytes(const char* filename, int* size, bool verbose = true); - - -// Append size chars from str to the file given by filename. -// The file is overwritten. Returns the number of chars written. -int AppendChars(const char* filename, - const char* str, - int size, - bool verbose = true); - - -// Write size chars from str to the file given by filename. -// The file is overwritten. Returns the number of chars written. -int WriteChars(const char* filename, - const char* str, - int size, - bool verbose = true); - - -// Write size bytes to the file given by filename. -// The file is overwritten. Returns the number of bytes written. -int WriteBytes(const char* filename, - const byte* bytes, - int size, - bool verbose = true); - - -// Write the C code -// const char* <varname> = "<str>"; -// const int <varname>_len = <len>; -// to the file given by filename. Only the first len chars are written. -int WriteAsCFile(const char* filename, const char* varname, - const char* str, int size, bool verbose = true); - - -// ---------------------------------------------------------------------------- -// Data structures - -template <typename T> -inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms, - int length) { - return Vector< Handle<Object> >( - reinterpret_cast<v8::internal::Handle<Object>*>(elms), length); -} - - -// ---------------------------------------------------------------------------- -// Memory - -// Copies words from |src| to |dst|. The data spans must not overlap. -template <typename T> -inline void CopyWords(T* dst, const T* src, size_t num_words) { - STATIC_ASSERT(sizeof(T) == kPointerSize); - ASSERT(Min(dst, const_cast<T*>(src)) + num_words <= - Max(dst, const_cast<T*>(src))); - ASSERT(num_words > 0); - - // Use block copying OS::MemCopy if the segment we're copying is - // enough to justify the extra call/setup overhead. - static const size_t kBlockCopyLimit = 16; - - if (num_words < kBlockCopyLimit) { - do { - num_words--; - *dst++ = *src++; - } while (num_words > 0); - } else { - OS::MemCopy(dst, src, num_words * kPointerSize); - } -} - - -// Copies words from |src| to |dst|. No restrictions. -template <typename T> -inline void MoveWords(T* dst, const T* src, size_t num_words) { - STATIC_ASSERT(sizeof(T) == kPointerSize); - ASSERT(num_words > 0); - - // Use block copying OS::MemCopy if the segment we're copying is - // enough to justify the extra call/setup overhead. - static const size_t kBlockCopyLimit = 16; - - if (num_words < kBlockCopyLimit && - ((dst < src) || (dst >= (src + num_words * kPointerSize)))) { - T* end = dst + num_words; - do { - num_words--; - *dst++ = *src++; - } while (num_words > 0); - } else { - OS::MemMove(dst, src, num_words * kPointerSize); - } -} - - -// Copies data from |src| to |dst|. The data spans must not overlap. -template <typename T> -inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { - STATIC_ASSERT(sizeof(T) == 1); - ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <= - Max(dst, const_cast<T*>(src))); - if (num_bytes == 0) return; - - // Use block copying OS::MemCopy if the segment we're copying is - // enough to justify the extra call/setup overhead. - static const int kBlockCopyLimit = OS::kMinComplexMemCopy; - - if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) { - do { - num_bytes--; - *dst++ = *src++; - } while (num_bytes > 0); - } else { - OS::MemCopy(dst, src, num_bytes); - } -} - - -template <typename T, typename U> -inline void MemsetPointer(T** dest, U* value, int counter) { -#ifdef DEBUG - T* a = NULL; - U* b = NULL; - a = b; // Fake assignment to check assignability. - USE(a); -#endif // DEBUG -#if V8_HOST_ARCH_IA32 -#define STOS "stosl" -#elif V8_HOST_ARCH_X64 -#define STOS "stosq" -#endif -#if defined(__native_client__) - // This STOS sequence does not validate for x86_64 Native Client. - // Here we #undef STOS to force use of the slower C version. - // TODO(bradchen): Profile V8 and implement a faster REP STOS - // here if the profile indicates it matters. -#undef STOS -#endif - -#if defined(__GNUC__) && defined(STOS) - asm volatile( - "cld;" - "rep ; " STOS - : "+&c" (counter), "+&D" (dest) - : "a" (value) - : "memory", "cc"); -#else - for (int i = 0; i < counter; i++) { - dest[i] = value; - } -#endif - -#undef STOS -} - - -// Simple wrapper that allows an ExternalString to refer to a -// Vector<const char>. Doesn't assume ownership of the data. -class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource { - public: - explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {} - - virtual const char* data() const { return data_.start(); } - - virtual size_t length() const { return data_.length(); } - - private: - Vector<const char> data_; -}; - - -// Simple support to read a file into a 0-terminated C-string. -// The returned buffer must be freed by the caller. -// On return, *exits tells whether the file existed. -Vector<const char> ReadFile(const char* filename, - bool* exists, - bool verbose = true); -Vector<const char> ReadFile(FILE* file, - bool* exists, - bool verbose = true); - - -template <typename sourcechar, typename sinkchar> -INLINE(static void CopyCharsUnsigned(sinkchar* dest, - const sourcechar* src, - int chars)); -#if defined(V8_HOST_ARCH_ARM) -INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars)); -INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars)); -INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars)); -#elif defined(V8_HOST_ARCH_MIPS) -INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars)); -INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars)); -#endif - -// Copy from ASCII/16bit chars to ASCII/16bit chars. -template <typename sourcechar, typename sinkchar> -INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars)); - -template<typename sourcechar, typename sinkchar> -void CopyChars(sinkchar* dest, const sourcechar* src, int chars) { - ASSERT(sizeof(sourcechar) <= 2); - ASSERT(sizeof(sinkchar) <= 2); - if (sizeof(sinkchar) == 1) { - if (sizeof(sourcechar) == 1) { - CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), - reinterpret_cast<const uint8_t*>(src), - chars); - } else { - CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), - reinterpret_cast<const uint16_t*>(src), - chars); - } - } else { - if (sizeof(sourcechar) == 1) { - CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), - reinterpret_cast<const uint8_t*>(src), - chars); - } else { - CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), - reinterpret_cast<const uint16_t*>(src), - chars); - } - } -} - -template <typename sourcechar, typename sinkchar> -void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) { - sinkchar* limit = dest + chars; -#ifdef V8_HOST_CAN_READ_UNALIGNED - if (sizeof(*dest) == sizeof(*src)) { - if (chars >= static_cast<int>(OS::kMinComplexMemCopy / sizeof(*dest))) { - OS::MemCopy(dest, src, chars * sizeof(*dest)); - return; - } - // Number of characters in a uintptr_t. - static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT - ASSERT(dest + kStepSize > dest); // Check for overflow. - while (dest + kStepSize <= limit) { - *reinterpret_cast<uintptr_t*>(dest) = - *reinterpret_cast<const uintptr_t*>(src); - dest += kStepSize; - src += kStepSize; - } - } -#endif - while (dest < limit) { - *dest++ = static_cast<sinkchar>(*src++); - } -} - - -#if defined(V8_HOST_ARCH_ARM) -void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) { - switch (static_cast<unsigned>(chars)) { - case 0: - break; - case 1: - *dest = *src; - break; - case 2: - memcpy(dest, src, 2); - break; - case 3: - memcpy(dest, src, 3); - break; - case 4: - memcpy(dest, src, 4); - break; - case 5: - memcpy(dest, src, 5); - break; - case 6: - memcpy(dest, src, 6); - break; - case 7: - memcpy(dest, src, 7); - break; - case 8: - memcpy(dest, src, 8); - break; - case 9: - memcpy(dest, src, 9); - break; - case 10: - memcpy(dest, src, 10); - break; - case 11: - memcpy(dest, src, 11); - break; - case 12: - memcpy(dest, src, 12); - break; - case 13: - memcpy(dest, src, 13); - break; - case 14: - memcpy(dest, src, 14); - break; - case 15: - memcpy(dest, src, 15); - break; - default: - OS::MemCopy(dest, src, chars); - break; - } -} - - -void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) { - if (chars >= OS::kMinComplexConvertMemCopy) { - OS::MemCopyUint16Uint8(dest, src, chars); - } else { - OS::MemCopyUint16Uint8Wrapper(dest, src, chars); - } -} - - -void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) { - switch (static_cast<unsigned>(chars)) { - case 0: - break; - case 1: - *dest = *src; - break; - case 2: - memcpy(dest, src, 4); - break; - case 3: - memcpy(dest, src, 6); - break; - case 4: - memcpy(dest, src, 8); - break; - case 5: - memcpy(dest, src, 10); - break; - case 6: - memcpy(dest, src, 12); - break; - case 7: - memcpy(dest, src, 14); - break; - default: - OS::MemCopy(dest, src, chars * sizeof(*dest)); - break; - } -} - - -#elif defined(V8_HOST_ARCH_MIPS) -void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) { - if (chars < OS::kMinComplexMemCopy) { - memcpy(dest, src, chars); - } else { - OS::MemCopy(dest, src, chars); - } -} - -void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) { - if (chars < OS::kMinComplexMemCopy) { - memcpy(dest, src, chars * sizeof(*dest)); - } else { - OS::MemCopy(dest, src, chars * sizeof(*dest)); - } -} -#endif - - -class StringBuilder : public SimpleStringBuilder { - public: - explicit StringBuilder(int size) : SimpleStringBuilder(size) { } - StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { } - - // Add formatted contents to the builder just like printf(). - void AddFormatted(const char* format, ...); - - // Add formatted contents like printf based on a va_list. - void AddFormattedList(const char* format, va_list list); - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); -}; - -} } // namespace v8::internal - -#endif // V8_V8UTILS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/variables.cc nodejs-0.11.15/deps/v8/src/variables.cc --- nodejs-0.11.13/deps/v8/src/variables.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/variables.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "ast.h" -#include "scopes.h" -#include "variables.h" +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/ast.h" +#include "src/scopes.h" +#include "src/variables.h" namespace v8 { namespace internal { @@ -55,30 +32,26 @@ } -Variable::Variable(Scope* scope, - Handle<String> name, - VariableMode mode, - bool is_valid_LHS, - Kind kind, +Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode, + bool is_valid_ref, Kind kind, InitializationFlag initialization_flag, - Interface* interface) - : scope_(scope), - name_(name), - mode_(mode), - kind_(kind), - location_(UNALLOCATED), - index_(-1), - initializer_position_(RelocInfo::kNoPosition), - local_if_not_shadowed_(NULL), - is_valid_LHS_(is_valid_LHS), - force_context_allocation_(false), - is_used_(false), - initialization_flag_(initialization_flag), - interface_(interface) { - // Names must be canonicalized for fast equality checks. - ASSERT(name->IsInternalizedString()); + MaybeAssignedFlag maybe_assigned_flag, Interface* interface) + : scope_(scope), + name_(name), + mode_(mode), + kind_(kind), + location_(UNALLOCATED), + index_(-1), + initializer_position_(RelocInfo::kNoPosition), + local_if_not_shadowed_(NULL), + is_valid_ref_(is_valid_ref), + force_context_allocation_(false), + is_used_(false), + initialization_flag_(initialization_flag), + maybe_assigned_(maybe_assigned_flag), + interface_(interface) { // Var declared variables never need initialization. - ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization)); + DCHECK(!(mode == VAR && initialization_flag == kNeedsInitialization)); } diff -Nru nodejs-0.11.13/deps/v8/src/variables.h nodejs-0.11.15/deps/v8/src/variables.h --- nodejs-0.11.13/deps/v8/src/variables.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/variables.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_VARIABLES_H_ #define V8_VARIABLES_H_ -#include "zone.h" -#include "interface.h" +#include "src/ast-value-factory.h" +#include "src/interface.h" +#include "src/zone.h" namespace v8 { namespace internal { @@ -74,18 +52,15 @@ LOOKUP }; - Variable(Scope* scope, - Handle<String> name, - VariableMode mode, - bool is_valid_lhs, - Kind kind, - InitializationFlag initialization_flag, + Variable(Scope* scope, const AstRawString* name, VariableMode mode, + bool is_valid_ref, Kind kind, InitializationFlag initialization_flag, + MaybeAssignedFlag maybe_assigned_flag = kNotAssigned, Interface* interface = Interface::NewValue()); // Printing support static const char* Mode2String(VariableMode mode); - bool IsValidLeftHandSide() { return is_valid_LHS_; } + bool IsValidReference() { return is_valid_ref_; } // The source code for an eval() call may refer to a variable that is // in an outer scope about which we don't know anything (it may not @@ -93,17 +68,20 @@ // scope is only used to follow the context chain length. Scope* scope() const { return scope_; } - Handle<String> name() const { return name_; } + Handle<String> name() const { return name_->string(); } + const AstRawString* raw_name() const { return name_; } VariableMode mode() const { return mode_; } bool has_forced_context_allocation() const { return force_context_allocation_; } void ForceContextAllocation() { - ASSERT(mode_ != TEMPORARY); + DCHECK(mode_ != TEMPORARY); force_context_allocation_ = true; } bool is_used() { return is_used_; } - void set_is_used(bool flag) { is_used_ = flag; } + void set_is_used() { is_used_ = true; } + MaybeAssignedFlag maybe_assigned() const { return maybe_assigned_; } + void set_maybe_assigned() { maybe_assigned_ = kMaybeAssigned; } int initializer_position() { return initializer_position_; } void set_initializer_position(int pos) { initializer_position_ = pos; } @@ -135,7 +113,7 @@ } Variable* local_if_not_shadowed() const { - ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL); + DCHECK(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL); return local_if_not_shadowed_; } @@ -159,7 +137,7 @@ private: Scope* scope_; - Handle<String> name_; + const AstRawString* name_; VariableMode mode_; Kind kind_; Location location_; @@ -172,13 +150,14 @@ // binding scope (exclusive). Variable* local_if_not_shadowed_; - // Valid as a LHS? (const and this are not valid LHS, for example) - bool is_valid_LHS_; + // Valid as a reference? (const and this are not valid, for example) + bool is_valid_ref_; // Usage info. bool force_context_allocation_; // set by variable resolver bool is_used_; InitializationFlag initialization_flag_; + MaybeAssignedFlag maybe_assigned_; // Module type info. Interface* interface_; diff -Nru nodejs-0.11.13/deps/v8/src/vector.h nodejs-0.11.15/deps/v8/src/vector.h --- nodejs-0.11.13/deps/v8/src/vector.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/vector.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,182 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_VECTOR_H_ +#define V8_VECTOR_H_ + +#include <string.h> +#include <algorithm> + +#include "src/allocation.h" +#include "src/checks.h" +#include "src/globals.h" + +namespace v8 { +namespace internal { + + +template <typename T> +class Vector { + public: + Vector() : start_(NULL), length_(0) {} + Vector(T* data, int length) : start_(data), length_(length) { + DCHECK(length == 0 || (length > 0 && data != NULL)); + } + + static Vector<T> New(int length) { + return Vector<T>(NewArray<T>(length), length); + } + + // Returns a vector using the same backing storage as this one, + // spanning from and including 'from', to but not including 'to'. + Vector<T> SubVector(int from, int to) { + SLOW_DCHECK(to <= length_); + SLOW_DCHECK(from < to); + DCHECK(0 <= from); + return Vector<T>(start() + from, to - from); + } + + // Returns the length of the vector. + int length() const { return length_; } + + // Returns whether or not the vector is empty. + bool is_empty() const { return length_ == 0; } + + // Returns the pointer to the start of the data in the vector. + T* start() const { return start_; } + + // Access individual vector elements - checks bounds in debug mode. + T& operator[](int index) const { + DCHECK(0 <= index && index < length_); + return start_[index]; + } + + const T& at(int index) const { return operator[](index); } + + T& first() { return start_[0]; } + + T& last() { return start_[length_ - 1]; } + + // Returns a clone of this vector with a new backing store. + Vector<T> Clone() const { + T* result = NewArray<T>(length_); + for (int i = 0; i < length_; i++) result[i] = start_[i]; + return Vector<T>(result, length_); + } + + void Sort(int (*cmp)(const T*, const T*)) { + std::sort(start(), start() + length(), RawComparer(cmp)); + } + + void Sort() { + std::sort(start(), start() + length()); + } + + void Truncate(int length) { + DCHECK(length <= length_); + length_ = length; + } + + // Releases the array underlying this vector. Once disposed the + // vector is empty. + void Dispose() { + DeleteArray(start_); + start_ = NULL; + length_ = 0; + } + + inline Vector<T> operator+(int offset) { + DCHECK(offset < length_); + return Vector<T>(start_ + offset, length_ - offset); + } + + // Factory method for creating empty vectors. + static Vector<T> empty() { return Vector<T>(NULL, 0); } + + template<typename S> + static Vector<T> cast(Vector<S> input) { + return Vector<T>(reinterpret_cast<T*>(input.start()), + input.length() * sizeof(S) / sizeof(T)); + } + + bool operator==(const Vector<T>& other) const { + if (length_ != other.length_) return false; + if (start_ == other.start_) return true; + for (int i = 0; i < length_; ++i) { + if (start_[i] != other.start_[i]) { + return false; + } + } + return true; + } + + protected: + void set_start(T* start) { start_ = start; } + + private: + T* start_; + int length_; + + class RawComparer { + public: + explicit RawComparer(int (*cmp)(const T*, const T*)) : cmp_(cmp) {} + bool operator()(const T& a, const T& b) { + return cmp_(&a, &b) < 0; + } + + private: + int (*cmp_)(const T*, const T*); + }; +}; + + +template <typename T> +class ScopedVector : public Vector<T> { + public: + explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { } + ~ScopedVector() { + DeleteArray(this->start()); + } + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector); +}; + + +inline int StrLength(const char* string) { + size_t length = strlen(string); + DCHECK(length == static_cast<size_t>(static_cast<int>(length))); + return static_cast<int>(length); +} + + +#define STATIC_ASCII_VECTOR(x) \ + v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \ + ARRAY_SIZE(x)-1) + +inline Vector<const char> CStrVector(const char* data) { + return Vector<const char>(data, StrLength(data)); +} + +inline Vector<const uint8_t> OneByteVector(const char* data, int length) { + return Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), length); +} + +inline Vector<const uint8_t> OneByteVector(const char* data) { + return OneByteVector(data, StrLength(data)); +} + +inline Vector<char> MutableCStrVector(char* data) { + return Vector<char>(data, StrLength(data)); +} + +inline Vector<char> MutableCStrVector(char* data, int max) { + int length = StrLength(data); + return Vector<char>(data, (length < max) ? length : max); +} + + +} } // namespace v8::internal + +#endif // V8_VECTOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/version.cc nodejs-0.11.15/deps/v8/src/version.cc --- nodejs-0.11.13/deps/v8/src/version.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/version.cc 2015-01-20 21:22:17.000000000 +0000 @@ -25,16 +25,16 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "v8.h" +#include "src/v8.h" -#include "version.h" +#include "src/version.h" // These macros define the version number for the current version. // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define MAJOR_VERSION 3 -#define MINOR_VERSION 25 -#define BUILD_NUMBER 30 +#define MINOR_VERSION 28 +#define BUILD_NUMBER 73 #define PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) @@ -84,13 +84,13 @@ const char* is_simulator = ""; #endif // USE_SIMULATOR if (GetPatch() > 0) { - OS::SNPrintF(str, "%d.%d.%d.%d%s%s", - GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate, - is_simulator); + SNPrintF(str, "%d.%d.%d.%d%s%s", + GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate, + is_simulator); } else { - OS::SNPrintF(str, "%d.%d.%d%s%s", - GetMajor(), GetMinor(), GetBuild(), candidate, - is_simulator); + SNPrintF(str, "%d.%d.%d%s%s", + GetMajor(), GetMinor(), GetBuild(), candidate, + is_simulator); } } @@ -101,15 +101,15 @@ // Generate generic SONAME if no specific SONAME is defined. const char* candidate = IsCandidate() ? "-candidate" : ""; if (GetPatch() > 0) { - OS::SNPrintF(str, "libv8-%d.%d.%d.%d%s.so", - GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate); + SNPrintF(str, "libv8-%d.%d.%d.%d%s.so", + GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate); } else { - OS::SNPrintF(str, "libv8-%d.%d.%d%s.so", - GetMajor(), GetMinor(), GetBuild(), candidate); + SNPrintF(str, "libv8-%d.%d.%d%s.so", + GetMajor(), GetMinor(), GetBuild(), candidate); } } else { // Use specific SONAME. - OS::SNPrintF(str, "%s", soname_); + SNPrintF(str, "%s", soname_); } } diff -Nru nodejs-0.11.13/deps/v8/src/version.h nodejs-0.11.15/deps/v8/src/version.h --- nodejs-0.11.13/deps/v8/src/version.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/version.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_VERSION_H_ #define V8_VERSION_H_ @@ -39,6 +16,7 @@ static int GetBuild() { return build_; } static int GetPatch() { return patch_; } static bool IsCandidate() { return candidate_; } + static int Hash() { return (major_ << 20) ^ (minor_ << 10) ^ patch_; } // Calculate the V8 version string. static void GetString(Vector<char> str); diff -Nru nodejs-0.11.13/deps/v8/src/vm-state.h nodejs-0.11.15/deps/v8/src/vm-state.h --- nodejs-0.11.13/deps/v8/src/vm-state.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/vm-state.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_VM_STATE_H_ #define V8_VM_STATE_H_ -#include "allocation.h" -#include "isolate.h" +#include "src/allocation.h" +#include "src/isolate.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/vm-state-inl.h nodejs-0.11.15/deps/v8/src/vm-state-inl.h --- nodejs-0.11.13/deps/v8/src/vm-state-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/vm-state-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_VM_STATE_INL_H_ #define V8_VM_STATE_INL_H_ -#include "vm-state.h" -#include "log.h" -#include "simulator.h" +#include "src/vm-state.h" +#include "src/log.h" +#include "src/simulator.h" namespace v8 { namespace internal { @@ -63,8 +40,7 @@ VMState<Tag>::VMState(Isolate* isolate) : isolate_(isolate), previous_tag_(isolate->current_vm_state()) { if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) { - LOG(isolate_, - TimerEvent(Logger::START, Logger::TimerEventScope::v8_external)); + LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name())); } isolate_->set_current_vm_state(Tag); } @@ -73,8 +49,7 @@ template <StateTag Tag> VMState<Tag>::~VMState() { if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) { - LOG(isolate_, - TimerEvent(Logger::END, Logger::TimerEventScope::v8_external)); + LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name())); } isolate_->set_current_vm_state(previous_tag_); } diff -Nru nodejs-0.11.13/deps/v8/src/weak_collection.js nodejs-0.11.15/deps/v8/src/weak_collection.js --- nodejs-0.11.13/deps/v8/src/weak_collection.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/weak_collection.js 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. "use strict"; @@ -38,12 +15,36 @@ // ------------------------------------------------------------------- // Harmony WeakMap -function WeakMapConstructor() { - if (%_IsConstructCall()) { - %WeakCollectionInitialize(this); - } else { +function WeakMapConstructor(iterable) { + if (!%_IsConstructCall()) { throw MakeTypeError('constructor_not_function', ['WeakMap']); } + + var iter, adder; + + if (!IS_NULL_OR_UNDEFINED(iterable)) { + iter = GetIterator(iterable); + adder = this.set; + if (!IS_SPEC_FUNCTION(adder)) { + throw MakeTypeError('property_not_function', ['set', this]); + } + } + + %WeakCollectionInitialize(this); + + if (IS_UNDEFINED(iter)) return; + + var next, done, nextItem; + while (!(next = iter.next()).done) { + if (!IS_SPEC_OBJECT(next)) { + throw MakeTypeError('iterator_result_not_an_object', [next]); + } + nextItem = next.value; + if (!IS_SPEC_OBJECT(nextItem)) { + throw MakeTypeError('iterator_value_not_an_object', [nextItem]); + } + %_CallFunction(this, nextItem[0], nextItem[1], adder); + } } @@ -112,7 +113,7 @@ %SetCode($WeakMap, WeakMapConstructor); %FunctionSetPrototype($WeakMap, new $Object()); - %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM); + %AddNamedProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM); // Set up the non-enumerable functions on the WeakMap prototype object. InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array( @@ -130,12 +131,32 @@ // ------------------------------------------------------------------- // Harmony WeakSet -function WeakSetConstructor() { - if (%_IsConstructCall()) { - %WeakCollectionInitialize(this); - } else { +function WeakSetConstructor(iterable) { + if (!%_IsConstructCall()) { throw MakeTypeError('constructor_not_function', ['WeakSet']); } + + var iter, adder; + + if (!IS_NULL_OR_UNDEFINED(iterable)) { + iter = GetIterator(iterable); + adder = this.add; + if (!IS_SPEC_FUNCTION(adder)) { + throw MakeTypeError('property_not_function', ['add', this]); + } + } + + %WeakCollectionInitialize(this); + + if (IS_UNDEFINED(iter)) return; + + var next, done; + while (!(next = iter.next()).done) { + if (!IS_SPEC_OBJECT(next)) { + throw MakeTypeError('iterator_result_not_an_object', [next]); + } + %_CallFunction(this, next.value, adder); + } } @@ -192,7 +213,7 @@ %SetCode($WeakSet, WeakSetConstructor); %FunctionSetPrototype($WeakSet, new $Object()); - %SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM); + %AddNamedProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM); // Set up the non-enumerable functions on the WeakSet prototype object. InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array( diff -Nru nodejs-0.11.13/deps/v8/src/win32-headers.h nodejs-0.11.15/deps/v8/src/win32-headers.h --- nodejs-0.11.13/deps/v8/src/win32-headers.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/win32-headers.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_WIN32_HEADERS_H_ -#define V8_WIN32_HEADERS_H_ - -#ifndef WIN32_LEAN_AND_MEAN -// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI. -#define WIN32_LEAN_AND_MEAN -#endif -#ifndef NOMINMAX -#define NOMINMAX -#endif -#ifndef NOKERNEL -#define NOKERNEL -#endif -#ifndef NOUSER -#define NOUSER -#endif -#ifndef NOSERVICE -#define NOSERVICE -#endif -#ifndef NOSOUND -#define NOSOUND -#endif -#ifndef NOMCX -#define NOMCX -#endif -// Require Windows XP or higher (this is required for the RtlCaptureContext -// function to be present). -#ifndef _WIN32_WINNT -#define _WIN32_WINNT 0x501 -#endif - -#include <windows.h> - -#include <signal.h> // For raise(). -#include <time.h> // For LocalOffset() implementation. -#include <mmsystem.h> // For timeGetTime(). -#ifdef __MINGW32__ -// Require Windows XP or higher when compiling with MinGW. This is for MinGW -// header files to expose getaddrinfo. -#undef _WIN32_WINNT -#define _WIN32_WINNT 0x501 -#endif // __MINGW32__ -#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR) -#include <dbghelp.h> // For SymLoadModule64 and al. -#include <errno.h> // For STRUNCATE -#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR) -#include <limits.h> // For INT_MAX and al. -#include <tlhelp32.h> // For Module32First and al. - -// These additional WIN32 includes have to be right here as the #undef's below -// makes it impossible to have them elsewhere. -#include <winsock2.h> -#include <ws2tcpip.h> -#ifndef __MINGW32__ -#include <wspiapi.h> -#endif // __MINGW32__ -#include <process.h> // For _beginthreadex(). -#include <stdlib.h> - -#undef VOID -#undef DELETE -#undef IN -#undef THIS -#undef CONST -#undef NAN -#undef UNKNOWN -#undef NONE -#undef ANY -#undef IGNORE -#undef STRICT -#undef GetObject -#undef CreateSemaphore -#undef Yield - -#endif // V8_WIN32_HEADERS_H_ diff -Nru nodejs-0.11.13/deps/v8/src/win32-math.cc nodejs-0.11.15/deps/v8/src/win32-math.cc --- nodejs-0.11.13/deps/v8/src/win32-math.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/win32-math.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please -// refer to The Open Group Base Specification for specification of the correct -// semantics for these functions. -// (http://www.opengroup.org/onlinepubs/000095399/) -#if defined(_MSC_VER) && (_MSC_VER < 1800) - -#include "win32-headers.h" -#include <limits.h> // Required for INT_MAX etc. -#include <float.h> // Required for DBL_MAX and on Win32 for finite() -#include <cmath> -#include "win32-math.h" - -#include "checks.h" - - -namespace std { - -// Test for a NaN (not a number) value - usually defined in math.h -int isnan(double x) { - return _isnan(x); -} - - -// Test for infinity - usually defined in math.h -int isinf(double x) { - return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0; -} - - -// Test for finite value - usually defined in math.h -int isfinite(double x) { - return _finite(x); -} - - -// Test if x is less than y and both nominal - usually defined in math.h -int isless(double x, double y) { - return isnan(x) || isnan(y) ? 0 : x < y; -} - - -// Test if x is greater than y and both nominal - usually defined in math.h -int isgreater(double x, double y) { - return isnan(x) || isnan(y) ? 0 : x > y; -} - - -// Classify floating point number - usually defined in math.h -int fpclassify(double x) { - // Use the MS-specific _fpclass() for classification. - int flags = _fpclass(x); - - // Determine class. We cannot use a switch statement because - // the _FPCLASS_ constants are defined as flags. - if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL; - if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO; - if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL; - if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE; - - // All cases should be covered by the code above. - ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN)); - return FP_NAN; -} - - -// Test sign - usually defined in math.h -int signbit(double x) { - // We need to take care of the special case of both positive - // and negative versions of zero. - if (x == 0) - return _fpclass(x) & _FPCLASS_NZ; - else - return x < 0; -} - -} // namespace std - -#endif // _MSC_VER diff -Nru nodejs-0.11.13/deps/v8/src/win32-math.h nodejs-0.11.15/deps/v8/src/win32-math.h --- nodejs-0.11.13/deps/v8/src/win32-math.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/win32-math.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please -// refer to The Open Group Base Specification for specification of the correct -// semantics for these functions. -// (http://www.opengroup.org/onlinepubs/000095399/) - -#ifndef V8_WIN32_MATH_H_ -#define V8_WIN32_MATH_H_ - -#ifndef _MSC_VER -#error Wrong environment, expected MSVC. -#endif // _MSC_VER - -// MSVC 2013+ provides implementations of all standard math functions. -#if (_MSC_VER < 1800) -enum { - FP_NAN, - FP_INFINITE, - FP_ZERO, - FP_SUBNORMAL, - FP_NORMAL -}; - - -namespace std { - -int isfinite(double x); -int isinf(double x); -int isnan(double x); -int isless(double x, double y); -int isgreater(double x, double y); -int fpclassify(double x); -int signbit(double x); - -} // namespace std - -#endif // _MSC_VER < 1800 - -#endif // V8_WIN32_MATH_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x64/assembler-x64.cc nodejs-0.11.15/deps/v8/src/x64/assembler-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/assembler-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/assembler-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "macro-assembler.h" -#include "serialize.h" +#include "src/macro-assembler.h" +#include "src/serialize.h" namespace v8 { namespace internal { @@ -38,58 +15,23 @@ // ----------------------------------------------------------------------------- // Implementation of CpuFeatures +void CpuFeatures::ProbeImpl(bool cross_compile) { + base::CPU cpu; + CHECK(cpu.has_sse2()); // SSE2 support is mandatory. + CHECK(cpu.has_cmov()); // CMOV support is mandatory. -#ifdef DEBUG -bool CpuFeatures::initialized_ = false; -#endif -uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures; -uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0; -uint64_t CpuFeatures::cross_compile_ = 0; + // Only use statically determined features for cross compile (snapshot). + if (cross_compile) return; -ExternalReference ExternalReference::cpu_features() { - ASSERT(CpuFeatures::initialized_); - return ExternalReference(&CpuFeatures::supported_); + if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1; + if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3; + // SAHF is not generally available in long mode. + if (cpu.has_sahf() && FLAG_enable_sahf) supported_|= 1u << SAHF; } -void CpuFeatures::Probe() { - ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures); -#ifdef DEBUG - initialized_ = true; -#endif - supported_ = kDefaultCpuFeatures; - if (Serializer::enabled()) { - supported_ |= OS::CpuFeaturesImpliedByPlatform(); - return; // No features if we might serialize. - } - - uint64_t probed_features = 0; - CPU cpu; - if (cpu.has_sse41()) { - probed_features |= static_cast<uint64_t>(1) << SSE4_1; - } - if (cpu.has_sse3()) { - probed_features |= static_cast<uint64_t>(1) << SSE3; - } - - // SSE2 must be available on every x64 CPU. - ASSERT(cpu.has_sse2()); - probed_features |= static_cast<uint64_t>(1) << SSE2; - - // CMOV must be available on every x64 CPU. - ASSERT(cpu.has_cmov()); - probed_features |= static_cast<uint64_t>(1) << CMOV; - - // SAHF is not generally available in long mode. - if (cpu.has_sahf()) { - probed_features |= static_cast<uint64_t>(1) << SAHF; - } - - uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform(); - supported_ = probed_features | platform_features; - found_by_runtime_probing_only_ - = probed_features & ~kDefaultCpuFeatures & ~platform_features; -} +void CpuFeatures::PrintTarget() { } +void CpuFeatures::PrintFeatures() { } // ----------------------------------------------------------------------------- @@ -115,7 +57,7 @@ patcher.masm()->call(kScratchRegister); // Check that the size of the code generated is as expected. - ASSERT_EQ(Assembler::kCallSequenceLength, + DCHECK_EQ(Assembler::kCallSequenceLength, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize)); // Add the requested number of int3 instructions after the call. @@ -132,7 +74,7 @@ } // Indicate that code has changed. - CPU::FlushICache(pc_, instruction_count); + CpuFeatures::FlushICache(pc_, instruction_count); } @@ -176,7 +118,7 @@ Register index, ScaleFactor scale, int32_t disp) : rex_(0) { - ASSERT(!index.is(rsp)); + DCHECK(!index.is(rsp)); len_ = 1; set_sib(scale, index, base); if (disp == 0 && !base.is(rbp) && !base.is(r13)) { @@ -196,7 +138,7 @@ Operand::Operand(Register index, ScaleFactor scale, int32_t disp) : rex_(0) { - ASSERT(!index.is(rsp)); + DCHECK(!index.is(rsp)); len_ = 1; set_modrm(0, rsp); set_sib(scale, index, rbp); @@ -205,10 +147,10 @@ Operand::Operand(const Operand& operand, int32_t offset) { - ASSERT(operand.len_ >= 1); + DCHECK(operand.len_ >= 1); // Operand encodes REX ModR/M [SIB] [Disp]. byte modrm = operand.buf_[0]; - ASSERT(modrm < 0xC0); // Disallow mode 3 (register target). + DCHECK(modrm < 0xC0); // Disallow mode 3 (register target). bool has_sib = ((modrm & 0x07) == 0x04); byte mode = modrm & 0xC0; int disp_offset = has_sib ? 2 : 1; @@ -226,7 +168,7 @@ } // Write new operand with same registers, but with modified displacement. - ASSERT(offset >= 0 ? disp_value + offset > disp_value + DCHECK(offset >= 0 ? disp_value + offset > disp_value : disp_value + offset < disp_value); // No overflow. disp_value += offset; rex_ = operand.rex_; @@ -253,7 +195,7 @@ bool Operand::AddressUsesRegister(Register reg) const { int code = reg.code(); - ASSERT((buf_[0] & 0xC0) != 0xC0); // Always a memory operand. + DCHECK((buf_[0] & 0xC0) != 0xC0); // Always a memory operand. // Start with only low three bits of base register. Initial decoding doesn't // distinguish on the REX.B bit. int base_code = buf_[0] & 0x07; @@ -310,12 +252,12 @@ void Assembler::GetCode(CodeDesc* desc) { // Finalize code (at this point overflow() may be true, but the gap ensures // that we are still not overlapping instructions and relocation info). - ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. + DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; desc->instr_size = pc_offset(); - ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system. + DCHECK(desc->instr_size > 0); // Zero-size code objects upset the system. desc->reloc_size = static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos()); desc->origin = this; @@ -323,7 +265,7 @@ void Assembler::Align(int m) { - ASSERT(IsPowerOf2(m)); + DCHECK(IsPowerOf2(m)); int delta = (m - (pc_offset() & (m - 1))) & (m - 1); Nop(delta); } @@ -344,8 +286,8 @@ void Assembler::bind_to(Label* L, int pos) { - ASSERT(!L->is_bound()); // Label may only be bound once. - ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid. + DCHECK(!L->is_bound()); // Label may only be bound once. + DCHECK(0 <= pos && pos <= pc_offset()); // Position must be valid. if (L->is_linked()) { int current = L->pos(); int next = long_at(current); @@ -364,7 +306,7 @@ int fixup_pos = L->near_link_pos(); int offset_to_next = static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos))); - ASSERT(offset_to_next <= 0); + DCHECK(offset_to_next <= 0); int disp = pos - (fixup_pos + sizeof(int8_t)); CHECK(is_int8(disp)); set_byte_at(fixup_pos, disp); @@ -384,16 +326,13 @@ void Assembler::GrowBuffer() { - ASSERT(buffer_overflow()); + DCHECK(buffer_overflow()); if (!own_buffer_) FATAL("external code buffer is too small"); // Compute new buffer size. CodeDesc desc; // the new buffer - if (buffer_size_ < 4*KB) { - desc.buffer_size = 4*KB; - } else { - desc.buffer_size = 2*buffer_size_; - } + desc.buffer_size = 2 * buffer_size_; + // Some internal data structures overflow for very large buffers, // they must ensure that kMaximalBufferSize is not too large. if ((desc.buffer_size > kMaximalBufferSize) || @@ -417,18 +356,12 @@ intptr_t pc_delta = desc.buffer - buffer_; intptr_t rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); - OS::MemMove(desc.buffer, buffer_, desc.instr_size); - OS::MemMove(rc_delta + reloc_info_writer.pos(), - reloc_info_writer.pos(), desc.reloc_size); + MemMove(desc.buffer, buffer_, desc.instr_size); + MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(), + desc.reloc_size); // Switch buffers. - if (isolate() != NULL && - isolate()->assembler_spare_buffer() == NULL && - buffer_size_ == kMinimalBufferSize) { - isolate()->set_assembler_spare_buffer(buffer_); - } else { - DeleteArray(buffer_); - } + DeleteArray(buffer_); buffer_ = desc.buffer; buffer_size_ = desc.buffer_size; pc_ += pc_delta; @@ -446,17 +379,17 @@ } } - ASSERT(!buffer_overflow()); + DCHECK(!buffer_overflow()); } void Assembler::emit_operand(int code, const Operand& adr) { - ASSERT(is_uint3(code)); + DCHECK(is_uint3(code)); const unsigned length = adr.len_; - ASSERT(length > 0); + DCHECK(length > 0); // Emit updated ModR/M byte containing the given register. - ASSERT((adr.buf_[0] & 0x38) == 0); + DCHECK((adr.buf_[0] & 0x38) == 0); pc_[0] = adr.buf_[0] | code << 3; // Emit the rest of the encoded operand. @@ -467,24 +400,30 @@ // Assembler Instruction implementations. -void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) { +void Assembler::arithmetic_op(byte opcode, + Register reg, + const Operand& op, + int size) { EnsureSpace ensure_space(this); - emit_rex_64(reg, op); + emit_rex(reg, op, size); emit(opcode); emit_operand(reg, op); } -void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) { +void Assembler::arithmetic_op(byte opcode, + Register reg, + Register rm_reg, + int size) { EnsureSpace ensure_space(this); - ASSERT((opcode & 0xC6) == 2); + DCHECK((opcode & 0xC6) == 2); if (rm_reg.low_bits() == 4) { // Forces SIB byte. // Swap reg and rm_reg and change opcode operand order. - emit_rex_64(rm_reg, reg); + emit_rex(rm_reg, reg, size); emit(opcode ^ 0x02); emit_modrm(rm_reg, reg); } else { - emit_rex_64(reg, rm_reg); + emit_rex(reg, rm_reg, size); emit(opcode); emit_modrm(reg, rm_reg); } @@ -493,7 +432,7 @@ void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) { EnsureSpace ensure_space(this); - ASSERT((opcode & 0xC6) == 2); + DCHECK((opcode & 0xC6) == 2); if (rm_reg.low_bits() == 4) { // Forces SIB byte. // Swap reg and rm_reg and change opcode operand order. emit(0x66); @@ -520,37 +459,45 @@ } -void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) { +void Assembler::arithmetic_op_8(byte opcode, Register reg, const Operand& op) { EnsureSpace ensure_space(this); - ASSERT((opcode & 0xC6) == 2); - if (rm_reg.low_bits() == 4) { // Forces SIB byte. + if (!reg.is_byte_register()) { + // Register is not one of al, bl, cl, dl. Its encoding needs REX. + emit_rex_32(reg); + } + emit(opcode); + emit_operand(reg, op); +} + + +void Assembler::arithmetic_op_8(byte opcode, Register reg, Register rm_reg) { + EnsureSpace ensure_space(this); + DCHECK((opcode & 0xC6) == 2); + if (rm_reg.low_bits() == 4) { // Forces SIB byte. // Swap reg and rm_reg and change opcode operand order. - emit_optional_rex_32(rm_reg, reg); - emit(opcode ^ 0x02); // E.g. 0x03 -> 0x01 for ADD. + if (!rm_reg.is_byte_register() || !reg.is_byte_register()) { + // Register is not one of al, bl, cl, dl. Its encoding needs REX. + emit_rex_32(rm_reg, reg); + } + emit(opcode ^ 0x02); emit_modrm(rm_reg, reg); } else { - emit_optional_rex_32(reg, rm_reg); + if (!reg.is_byte_register() || !rm_reg.is_byte_register()) { + // Register is not one of al, bl, cl, dl. Its encoding needs REX. + emit_rex_32(reg, rm_reg); + } emit(opcode); emit_modrm(reg, rm_reg); } } -void Assembler::arithmetic_op_32(byte opcode, - Register reg, - const Operand& rm_reg) { - EnsureSpace ensure_space(this); - emit_optional_rex_32(reg, rm_reg); - emit(opcode); - emit_operand(reg, rm_reg); -} - - void Assembler::immediate_arithmetic_op(byte subcode, Register dst, - Immediate src) { + Immediate src, + int size) { EnsureSpace ensure_space(this); - emit_rex_64(dst); + emit_rex(dst, size); if (is_int8(src.value_)) { emit(0x83); emit_modrm(subcode, dst); @@ -567,9 +514,10 @@ void Assembler::immediate_arithmetic_op(byte subcode, const Operand& dst, - Immediate src) { + Immediate src, + int size) { EnsureSpace ensure_space(this); - emit_rex_64(dst); + emit_rex(dst, size); if (is_int8(src.value_)) { emit(0x83); emit_operand(subcode, dst); @@ -621,49 +569,12 @@ } -void Assembler::immediate_arithmetic_op_32(byte subcode, - Register dst, - Immediate src) { - EnsureSpace ensure_space(this); - emit_optional_rex_32(dst); - if (is_int8(src.value_)) { - emit(0x83); - emit_modrm(subcode, dst); - emit(src.value_); - } else if (dst.is(rax)) { - emit(0x05 | (subcode << 3)); - emitl(src.value_); - } else { - emit(0x81); - emit_modrm(subcode, dst); - emitl(src.value_); - } -} - - -void Assembler::immediate_arithmetic_op_32(byte subcode, - const Operand& dst, - Immediate src) { - EnsureSpace ensure_space(this); - emit_optional_rex_32(dst); - if (is_int8(src.value_)) { - emit(0x83); - emit_operand(subcode, dst); - emit(src.value_); - } else { - emit(0x81); - emit_operand(subcode, dst); - emitl(src.value_); - } -} - - void Assembler::immediate_arithmetic_op_8(byte subcode, const Operand& dst, Immediate src) { EnsureSpace ensure_space(this); emit_optional_rex_32(dst); - ASSERT(is_int8(src.value_) || is_uint8(src.value_)); + DCHECK(is_int8(src.value_) || is_uint8(src.value_)); emit(0x80); emit_operand(subcode, dst); emit(src.value_); @@ -675,25 +586,29 @@ Immediate src) { EnsureSpace ensure_space(this); if (!dst.is_byte_register()) { - // Use 64-bit mode byte registers. - emit_rex_64(dst); + // Register is not one of al, bl, cl, dl. Its encoding needs REX. + emit_rex_32(dst); } - ASSERT(is_int8(src.value_) || is_uint8(src.value_)); + DCHECK(is_int8(src.value_) || is_uint8(src.value_)); emit(0x80); emit_modrm(subcode, dst); emit(src.value_); } -void Assembler::shift(Register dst, Immediate shift_amount, int subcode) { +void Assembler::shift(Register dst, + Immediate shift_amount, + int subcode, + int size) { EnsureSpace ensure_space(this); - ASSERT(is_uint6(shift_amount.value_)); // illegal shift count + DCHECK(size == kInt64Size ? is_uint6(shift_amount.value_) + : is_uint5(shift_amount.value_)); if (shift_amount.value_ == 1) { - emit_rex_64(dst); + emit_rex(dst, size); emit(0xD1); emit_modrm(subcode, dst); } else { - emit_rex_64(dst); + emit_rex(dst, size); emit(0xC1); emit_modrm(subcode, dst); emit(shift_amount.value_); @@ -701,38 +616,14 @@ } -void Assembler::shift(Register dst, int subcode) { +void Assembler::shift(Register dst, int subcode, int size) { EnsureSpace ensure_space(this); - emit_rex_64(dst); - emit(0xD3); - emit_modrm(subcode, dst); -} - - -void Assembler::shift_32(Register dst, int subcode) { - EnsureSpace ensure_space(this); - emit_optional_rex_32(dst); + emit_rex(dst, size); emit(0xD3); emit_modrm(subcode, dst); } -void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) { - EnsureSpace ensure_space(this); - ASSERT(is_uint5(shift_amount.value_)); // illegal shift count - if (shift_amount.value_ == 1) { - emit_optional_rex_32(dst); - emit(0xD1); - emit_modrm(subcode, dst); - } else { - emit_optional_rex_32(dst); - emit(0xC1); - emit_modrm(subcode, dst); - emit(shift_amount.value_); - } -} - - void Assembler::bt(const Operand& dst, Register src) { EnsureSpace ensure_space(this); emit_rex_64(src, dst); @@ -767,13 +658,13 @@ emit(0xE8); if (L->is_bound()) { int offset = L->pos() - pc_offset() - sizeof(int32_t); - ASSERT(offset <= 0); + DCHECK(offset <= 0); emitl(offset); } else if (L->is_linked()) { emitl(L->pos()); L->link_to(pc_offset() - sizeof(int32_t)); } else { - ASSERT(L->is_unused()); + DCHECK(L->is_unused()); int32_t current = pc_offset(); emitl(current); L->link_to(current); @@ -782,7 +673,7 @@ void Assembler::call(Address entry, RelocInfo::Mode rmode) { - ASSERT(RelocInfo::IsRuntimeEntry(rmode)); + DCHECK(RelocInfo::IsRuntimeEntry(rmode)); positions_recorder()->WriteRecordedPositions(); EnsureSpace ensure_space(this); // 1110 1000 #32-bit disp. @@ -833,7 +724,7 @@ emit(0xE8); Address source = pc_ + 4; intptr_t displacement = target - source; - ASSERT(is_int32(displacement)); + DCHECK(is_int32(displacement)); emitl(static_cast<int32_t>(displacement)); } @@ -864,7 +755,7 @@ } // No need to check CpuInfo for CMOV support, it's a required part of the // 64-bit architecture. - ASSERT(cc >= 0); // Use mov for unconditional moves. + DCHECK(cc >= 0); // Use mov for unconditional moves. EnsureSpace ensure_space(this); // Opcode: REX.W 0f 40 + cc /r. emit_rex_64(dst, src); @@ -880,7 +771,7 @@ } else if (cc == never) { return; } - ASSERT(cc >= 0); + DCHECK(cc >= 0); EnsureSpace ensure_space(this); // Opcode: REX.W 0f 40 + cc /r. emit_rex_64(dst, src); @@ -896,7 +787,7 @@ } else if (cc == never) { return; } - ASSERT(cc >= 0); + DCHECK(cc >= 0); EnsureSpace ensure_space(this); // Opcode: 0f 40 + cc /r. emit_optional_rex_32(dst, src); @@ -912,7 +803,7 @@ } else if (cc == never) { return; } - ASSERT(cc >= 0); + DCHECK(cc >= 0); EnsureSpace ensure_space(this); // Opcode: 0f 40 + cc /r. emit_optional_rex_32(dst, src); @@ -923,7 +814,7 @@ void Assembler::cmpb_al(Immediate imm8) { - ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_)); + DCHECK(is_int8(imm8.value_) || is_uint8(imm8.value_)); EnsureSpace ensure_space(this); emit(0x3c); emit(imm8.value_); @@ -1001,6 +892,14 @@ } +void Assembler::emit_div(Register src, int size) { + EnsureSpace ensure_space(this); + emit_rex(src, size); + emit(0xF7); + emit_modrm(0x6, src); +} + + void Assembler::emit_imul(Register src, int size) { EnsureSpace ensure_space(this); emit_rex(src, size); @@ -1072,12 +971,12 @@ return; } EnsureSpace ensure_space(this); - ASSERT(is_uint4(cc)); + DCHECK(is_uint4(cc)); if (L->is_bound()) { const int short_size = 2; const int long_size = 6; int offs = L->pos() - pc_offset(); - ASSERT(offs <= 0); + DCHECK(offs <= 0); // Determine whether we can use 1-byte offsets for backwards branches, // which have a max range of 128 bytes. @@ -1103,7 +1002,7 @@ byte disp = 0x00; if (L->is_near_linked()) { int offset = L->near_link_pos() - pc_offset(); - ASSERT(is_int8(offset)); + DCHECK(is_int8(offset)); disp = static_cast<byte>(offset & 0xFF); } L->link_to(pc_offset(), Label::kNear); @@ -1115,7 +1014,7 @@ emitl(L->pos()); L->link_to(pc_offset() - sizeof(int32_t)); } else { - ASSERT(L->is_unused()); + DCHECK(L->is_unused()); emit(0x0F); emit(0x80 | cc); int32_t current = pc_offset(); @@ -1126,9 +1025,9 @@ void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) { - ASSERT(RelocInfo::IsRuntimeEntry(rmode)); + DCHECK(RelocInfo::IsRuntimeEntry(rmode)); EnsureSpace ensure_space(this); - ASSERT(is_uint4(cc)); + DCHECK(is_uint4(cc)); emit(0x0F); emit(0x80 | cc); emit_runtime_entry(entry, rmode); @@ -1139,7 +1038,7 @@ Handle<Code> target, RelocInfo::Mode rmode) { EnsureSpace ensure_space(this); - ASSERT(is_uint4(cc)); + DCHECK(is_uint4(cc)); // 0000 1111 1000 tttn #32-bit disp. emit(0x0F); emit(0x80 | cc); @@ -1153,7 +1052,7 @@ const int long_size = sizeof(int32_t); if (L->is_bound()) { int offs = L->pos() - pc_offset() - 1; - ASSERT(offs <= 0); + DCHECK(offs <= 0); if (is_int8(offs - short_size) && !predictable_code_size()) { // 1110 1011 #8-bit disp. emit(0xEB); @@ -1168,7 +1067,7 @@ byte disp = 0x00; if (L->is_near_linked()) { int offset = L->near_link_pos() - pc_offset(); - ASSERT(is_int8(offset)); + DCHECK(is_int8(offset)); disp = static_cast<byte>(offset & 0xFF); } L->link_to(pc_offset(), Label::kNear); @@ -1180,7 +1079,7 @@ L->link_to(pc_offset() - long_size); } else { // 1110 1001 #32-bit disp. - ASSERT(L->is_unused()); + DCHECK(L->is_unused()); emit(0xE9); int32_t current = pc_offset(); emitl(current); @@ -1198,9 +1097,9 @@ void Assembler::jmp(Address entry, RelocInfo::Mode rmode) { - ASSERT(RelocInfo::IsRuntimeEntry(rmode)); + DCHECK(RelocInfo::IsRuntimeEntry(rmode)); EnsureSpace ensure_space(this); - ASSERT(RelocInfo::IsRuntimeEntry(rmode)); + DCHECK(RelocInfo::IsRuntimeEntry(rmode)); emit(0xE9); emit_runtime_entry(entry, rmode); } @@ -1239,7 +1138,7 @@ emit(0xA1); emitp(value, mode); } else { - ASSERT(kPointerSize == kInt32Size); + DCHECK(kPointerSize == kInt32Size); emit(0xA1); emitp(value, mode); // In 64-bit mode, need to zero extend the operand to 8 bytes. @@ -1371,7 +1270,7 @@ emit(0xC7); emit_modrm(0x0, dst); } else { - ASSERT(size == kInt32Size); + DCHECK(size == kInt32Size); emit(0xB8 + dst.low_bits()); } emit(value); @@ -1417,13 +1316,13 @@ emit_operand(0, dst); if (src->is_bound()) { int offset = src->pos() - pc_offset() - sizeof(int32_t); - ASSERT(offset <= 0); + DCHECK(offset <= 0); emitl(offset); } else if (src->is_linked()) { emitl(src->pos()); src->link_to(pc_offset() - sizeof(int32_t)); } else { - ASSERT(src->is_unused()); + DCHECK(src->is_unused()); int32_t current = pc_offset(); emitl(current); src->link_to(current); @@ -1431,6 +1330,15 @@ } +void Assembler::movsxbl(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0xBE); + emit_operand(dst, src); +} + + void Assembler::movsxbq(Register dst, const Operand& src) { EnsureSpace ensure_space(this); emit_rex_64(dst, src); @@ -1440,6 +1348,15 @@ } +void Assembler::movsxwl(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0xBF); + emit_operand(dst, src); +} + + void Assembler::movsxwq(Register dst, const Operand& src) { EnsureSpace ensure_space(this); emit_rex_64(dst, src); @@ -1476,6 +1393,17 @@ } +void Assembler::emit_movzxb(Register dst, Register src, int size) { + EnsureSpace ensure_space(this); + // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore + // there is no need to make this a 64 bit operation. + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0xB6); + emit_modrm(dst, src); +} + + void Assembler::emit_movzxw(Register dst, const Operand& src, int size) { EnsureSpace ensure_space(this); // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore @@ -1707,7 +1635,7 @@ void Assembler::ret(int imm16) { EnsureSpace ensure_space(this); - ASSERT(is_uint16(imm16)); + DCHECK(is_uint16(imm16)); if (imm16 == 0) { emit(0xC3); } else { @@ -1724,7 +1652,7 @@ return; } EnsureSpace ensure_space(this); - ASSERT(is_uint4(cc)); + DCHECK(is_uint4(cc)); if (!reg.is_byte_register()) { // Use x64 byte registers, where different. emit_rex_32(reg); } @@ -1770,6 +1698,14 @@ } +void Assembler::emit_xchg(Register dst, const Operand& src, int size) { + EnsureSpace ensure_space(this); + emit_rex(dst, src, size); + emit(0x87); + emit_operand(dst, src); +} + + void Assembler::store_rax(void* dst, RelocInfo::Mode mode) { EnsureSpace ensure_space(this); if (kPointerSize == kInt64Size) { @@ -1777,7 +1713,7 @@ emit(0xA3); emitp(dst, mode); } else { - ASSERT(kPointerSize == kInt32Size); + DCHECK(kPointerSize == kInt32Size); emit(0xA3); emitp(dst, mode); // In 64-bit mode, need to zero extend the operand to 8 bytes. @@ -1811,7 +1747,7 @@ void Assembler::testb(Register reg, Immediate mask) { - ASSERT(is_int8(mask.value_) || is_uint8(mask.value_)); + DCHECK(is_int8(mask.value_) || is_uint8(mask.value_)); EnsureSpace ensure_space(this); if (reg.is(rax)) { emit(0xA8); @@ -1829,7 +1765,7 @@ void Assembler::testb(const Operand& op, Immediate mask) { - ASSERT(is_int8(mask.value_) || is_uint8(mask.value_)); + DCHECK(is_int8(mask.value_) || is_uint8(mask.value_)); EnsureSpace ensure_space(this); emit_optional_rex_32(rax, op); emit(0xF6); @@ -1977,7 +1913,7 @@ void Assembler::fstp(int index) { - ASSERT(is_uint3(index)); + DCHECK(is_uint3(index)); EnsureSpace ensure_space(this); emit_farith(0xDD, 0xD8, index); } @@ -2008,7 +1944,7 @@ void Assembler::fisttp_s(const Operand& adr) { - ASSERT(IsEnabled(SSE3)); + DCHECK(IsEnabled(SSE3)); EnsureSpace ensure_space(this); emit_optional_rex_32(adr); emit(0xDB); @@ -2017,7 +1953,7 @@ void Assembler::fisttp_d(const Operand& adr) { - ASSERT(IsEnabled(SSE3)); + DCHECK(IsEnabled(SSE3)); EnsureSpace ensure_space(this); emit_optional_rex_32(adr); emit(0xDD); @@ -2270,14 +2206,15 @@ void Assembler::sahf() { // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf // in 64-bit mode. Test CpuID. + DCHECK(IsEnabled(SAHF)); EnsureSpace ensure_space(this); emit(0x9E); } void Assembler::emit_farith(int b1, int b2, int i) { - ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode - ASSERT(is_uint3(i)); // illegal stack offset + DCHECK(is_uint8(b1) && is_uint8(b2)); // wrong opcode + DCHECK(is_uint3(i)); // illegal stack offset emit(b1); emit(b2 + i); } @@ -2513,8 +2450,8 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) { - ASSERT(IsEnabled(SSE4_1)); - ASSERT(is_uint8(imm8)); + DCHECK(IsEnabled(SSE4_1)); + DCHECK(is_uint8(imm8)); EnsureSpace ensure_space(this); emit(0x66); emit_optional_rex_32(src, dst); @@ -2574,7 +2511,7 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) { - ASSERT(is_uint8(imm8)); + DCHECK(is_uint8(imm8)); EnsureSpace ensure_space(this); emit_optional_rex_32(src, dst); emit(0x0F); @@ -2873,6 +2810,16 @@ } +void Assembler::sqrtsd(XMMRegister dst, const Operand& src) { + EnsureSpace ensure_space(this); + emit(0xF2); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0x51); + emit_sse_operand(dst, src); +} + + void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { EnsureSpace ensure_space(this); emit(0x66); @@ -2906,7 +2853,7 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, Assembler::RoundingMode mode) { - ASSERT(IsEnabled(SSE4_1)); + DCHECK(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); emit(0x66); emit_optional_rex_32(dst, src); @@ -2974,17 +2921,11 @@ // Relocation information implementations. void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - ASSERT(!RelocInfo::IsNone(rmode)); - if (rmode == RelocInfo::EXTERNAL_REFERENCE) { - // Don't record external references unless the heap will be serialized. -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); - } -#endif - if (!Serializer::enabled() && !emit_debug_code()) { - return; - } + DCHECK(!RelocInfo::IsNone(rmode)); + // Don't record external references unless the heap will be serialized. + if (rmode == RelocInfo::EXTERNAL_REFERENCE && + !serializer_enabled() && !emit_debug_code()) { + return; } else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) { // Don't record psuedo relocation info for code age sequence mode. return; @@ -3016,16 +2957,17 @@ } -MaybeObject* Assembler::AllocateConstantPool(Heap* heap) { +Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { // No out-of-line constant pool support. - UNREACHABLE(); - return NULL; + DCHECK(!FLAG_enable_ool_constant_pool); + return isolate->factory()->empty_constant_pool_array(); } void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { // No out-of-line constant pool support. - UNREACHABLE(); + DCHECK(!FLAG_enable_ool_constant_pool); + return; } diff -Nru nodejs-0.11.13/deps/v8/src/x64/assembler-x64.h nodejs-0.11.15/deps/v8/src/x64/assembler-x64.h --- nodejs-0.11.13/deps/v8/src/x64/assembler-x64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/assembler-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -37,7 +37,7 @@ #ifndef V8_X64_ASSEMBLER_X64_H_ #define V8_X64_ASSEMBLER_X64_H_ -#include "serialize.h" +#include "src/serialize.h" namespace v8 { namespace internal { @@ -84,13 +84,13 @@ } static Register FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); Register result = { kRegisterCodeByAllocationIndex[index] }; return result; } static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "rax", "rbx", @@ -116,7 +116,7 @@ // rax, rbx, rcx and rdx are byte registers, the rest are not. bool is_byte_register() const { return code_ <= 3; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } int bit() const { @@ -201,18 +201,18 @@ } static int ToAllocationIndex(XMMRegister reg) { - ASSERT(reg.code() != 0); + DCHECK(reg.code() != 0); return reg.code() - 1; } static XMMRegister FromAllocationIndex(int index) { - ASSERT(0 <= index && index < kMaxNumAllocatableRegisters); + DCHECK(0 <= index && index < kMaxNumAllocatableRegisters); XMMRegister result = { index + 1 }; return result; } static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "xmm1", "xmm2", @@ -234,15 +234,15 @@ } static XMMRegister from_code(int code) { - ASSERT(code >= 0); - ASSERT(code < kMaxNumRegisters); + DCHECK(code >= 0); + DCHECK(code < kMaxNumRegisters); XMMRegister r = { code }; return r; } bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; } bool is(XMMRegister reg) const { return code_ == reg.code_; } int code() const { - ASSERT(is_valid()); + DCHECK(is_valid()); return code_; } @@ -326,8 +326,8 @@ } -// Corresponds to transposing the operands of a comparison. -inline Condition ReverseCondition(Condition cc) { +// Commute a condition such that {a cond b == b cond' a}. +inline Condition CommuteCondition(Condition cc) { switch (cc) { case below: return above; @@ -347,7 +347,7 @@ return greater_equal; default: return cc; - }; + } } @@ -357,6 +357,10 @@ class Immediate BASE_EMBEDDED { public: explicit Immediate(int32_t value) : value_(value) {} + explicit Immediate(Smi* value) { + DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI. + value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value)); + } private: int32_t value_; @@ -433,103 +437,40 @@ }; -// CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a CpuFeatureScope before use. -// Example: -// if (assembler->IsSupported(SSE3)) { -// CpuFeatureScope fscope(assembler, SSE3); -// // Generate SSE3 floating point code. -// } else { -// // Generate standard SSE2 floating point code. -// } -class CpuFeatures : public AllStatic { - public: - // Detect features of the target CPU. Set safe defaults if the serializer - // is enabled (snapshots must be portable). - static void Probe(); - - // Check whether a feature is supported by the target CPU. - static bool IsSupported(CpuFeature f) { - if (Check(f, cross_compile_)) return true; - ASSERT(initialized_); - if (f == SSE3 && !FLAG_enable_sse3) return false; - if (f == SSE4_1 && !FLAG_enable_sse4_1) return false; - if (f == CMOV && !FLAG_enable_cmov) return false; - if (f == SAHF && !FLAG_enable_sahf) return false; - return Check(f, supported_); - } - - static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { - ASSERT(initialized_); - return Check(f, found_by_runtime_probing_only_); - } - - static bool IsSafeForSnapshot(CpuFeature f) { - return Check(f, cross_compile_) || - (IsSupported(f) && - (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); - } - - static bool VerifyCrossCompiling() { - return cross_compile_ == 0; - } - - static bool VerifyCrossCompiling(CpuFeature f) { - uint64_t mask = flag2set(f); - return cross_compile_ == 0 || - (cross_compile_ & mask) == mask; - } - - private: - static bool Check(CpuFeature f, uint64_t set) { - return (set & flag2set(f)) != 0; - } - - static uint64_t flag2set(CpuFeature f) { - return static_cast<uint64_t>(1) << f; - } - - // Safe defaults include CMOV for X64. It is always available, if - // anyone checks, but they shouldn't need to check. - // The required user mode extensions in X64 are (from AMD64 ABI Table A.1): - // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall - static const uint64_t kDefaultCpuFeatures = (1 << CMOV); - -#ifdef DEBUG - static bool initialized_; -#endif - static uint64_t supported_; - static uint64_t found_by_runtime_probing_only_; - - static uint64_t cross_compile_; - - friend class ExternalReference; - friend class PlatformFeatureScope; - DISALLOW_COPY_AND_ASSIGN(CpuFeatures); -}; +#define ASSEMBLER_INSTRUCTION_LIST(V) \ + V(add) \ + V(and) \ + V(cmp) \ + V(dec) \ + V(idiv) \ + V(div) \ + V(imul) \ + V(inc) \ + V(lea) \ + V(mov) \ + V(movzxb) \ + V(movzxw) \ + V(neg) \ + V(not) \ + V(or) \ + V(repmovs) \ + V(sbb) \ + V(sub) \ + V(test) \ + V(xchg) \ + V(xor) -#define ASSEMBLER_INSTRUCTION_LIST(V) \ - V(add) \ - V(and) \ - V(cmp) \ - V(dec) \ - V(idiv) \ - V(imul) \ - V(inc) \ - V(lea) \ - V(mov) \ - V(movzxb) \ - V(movzxw) \ - V(neg) \ - V(not) \ - V(or) \ - V(repmovs) \ - V(sbb) \ - V(sub) \ - V(test) \ - V(xchg) \ - V(xor) +// Shift instructions on operands/registers with kPointerSize, kInt32Size and +// kInt64Size. +#define SHIFT_INSTRUCTION_LIST(V) \ + V(rol, 0x0) \ + V(ror, 0x1) \ + V(rcl, 0x2) \ + V(rcr, 0x3) \ + V(shl, 0x4) \ + V(shr, 0x5) \ + V(sar, 0x7) \ class Assembler : public AssemblerBase { @@ -578,22 +519,29 @@ ConstantPoolArray* constant_pool); static inline void set_target_address_at(Address pc, ConstantPoolArray* constant_pool, - Address target); + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED) ; static inline Address target_address_at(Address pc, Code* code) { ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; return target_address_at(pc, constant_pool); } static inline void set_target_address_at(Address pc, Code* code, - Address target) { + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED) { ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; - set_target_address_at(pc, constant_pool, target); + set_target_address_at(pc, constant_pool, target, icache_flush_mode); } // Return the code target address at a call site from the return address // of that call in the instruction stream. static inline Address target_address_from_return_address(Address pc); + // Return the code target address of the patch debug break slot + inline static Address break_address_from_return_address(Address pc); + // This sets the branch destination (which is in the instruction on x64). // This is for calls and branches within generated code. inline static void deserialization_set_special_target_at( @@ -605,7 +553,7 @@ if (kPointerSize == kInt64Size) { return RelocInfo::NONE64; } else { - ASSERT(kPointerSize == kInt32Size); + DCHECK(kPointerSize == kInt32Size); return RelocInfo::NONE32; } } @@ -680,6 +628,8 @@ // - Instructions on 64-bit (quadword) operands/registers use 'q'. // - Instructions on operands/registers with pointer size use 'p'. + STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size); + #define DECLARE_INSTRUCTION(instruction) \ template<class P1> \ void instruction##p(P1 p1) { \ @@ -776,7 +726,9 @@ void movq(Register dst, int64_t value); void movq(Register dst, uint64_t value); + void movsxbl(Register dst, const Operand& src); void movsxbq(Register dst, const Operand& src); + void movsxwl(Register dst, const Operand& src); void movsxwq(Register dst, const Operand& src); void movsxlq(Register dst, Register src); void movsxlq(Register dst, const Operand& src); @@ -806,15 +758,15 @@ void cmpb_al(Immediate src); void cmpb(Register dst, Register src) { - arithmetic_op(0x3A, dst, src); + arithmetic_op_8(0x3A, dst, src); } void cmpb(Register dst, const Operand& src) { - arithmetic_op(0x3A, dst, src); + arithmetic_op_8(0x3A, dst, src); } void cmpb(const Operand& dst, Register src) { - arithmetic_op(0x38, src, dst); + arithmetic_op_8(0x38, src, dst); } void cmpb(const Operand& dst, Immediate src) { @@ -856,33 +808,32 @@ // Multiply rax by src, put the result in rdx:rax. void mul(Register src); - void rcl(Register dst, Immediate imm8) { - shift(dst, imm8, 0x2); - } - - void rol(Register dst, Immediate imm8) { - shift(dst, imm8, 0x0); - } - - void roll(Register dst, Immediate imm8) { - shift_32(dst, imm8, 0x0); - } - - void rcr(Register dst, Immediate imm8) { - shift(dst, imm8, 0x3); - } - - void ror(Register dst, Immediate imm8) { - shift(dst, imm8, 0x1); - } - - void rorl(Register dst, Immediate imm8) { - shift_32(dst, imm8, 0x1); - } - - void rorl_cl(Register dst) { - shift_32(dst, 0x1); +#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \ + void instruction##p(Register dst, Immediate imm8) { \ + shift(dst, imm8, subcode, kPointerSize); \ + } \ + \ + void instruction##l(Register dst, Immediate imm8) { \ + shift(dst, imm8, subcode, kInt32Size); \ + } \ + \ + void instruction##q(Register dst, Immediate imm8) { \ + shift(dst, imm8, subcode, kInt64Size); \ + } \ + \ + void instruction##p_cl(Register dst) { \ + shift(dst, subcode, kPointerSize); \ + } \ + \ + void instruction##l_cl(Register dst) { \ + shift(dst, subcode, kInt32Size); \ + } \ + \ + void instruction##q_cl(Register dst) { \ + shift(dst, subcode, kInt64Size); \ } + SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION) +#undef DECLARE_SHIFT_INSTRUCTION // Shifts dst:src left by cl bits, affecting only dst. void shld(Register dst, Register src); @@ -890,60 +841,6 @@ // Shifts src:dst right by cl bits, affecting only dst. void shrd(Register dst, Register src); - // Shifts dst right, duplicating sign bit, by shift_amount bits. - // Shifting by 1 is handled efficiently. - void sar(Register dst, Immediate shift_amount) { - shift(dst, shift_amount, 0x7); - } - - // Shifts dst right, duplicating sign bit, by shift_amount bits. - // Shifting by 1 is handled efficiently. - void sarl(Register dst, Immediate shift_amount) { - shift_32(dst, shift_amount, 0x7); - } - - // Shifts dst right, duplicating sign bit, by cl % 64 bits. - void sar_cl(Register dst) { - shift(dst, 0x7); - } - - // Shifts dst right, duplicating sign bit, by cl % 64 bits. - void sarl_cl(Register dst) { - shift_32(dst, 0x7); - } - - void shl(Register dst, Immediate shift_amount) { - shift(dst, shift_amount, 0x4); - } - - void shl_cl(Register dst) { - shift(dst, 0x4); - } - - void shll_cl(Register dst) { - shift_32(dst, 0x4); - } - - void shll(Register dst, Immediate shift_amount) { - shift_32(dst, shift_amount, 0x4); - } - - void shr(Register dst, Immediate shift_amount) { - shift(dst, shift_amount, 0x5); - } - - void shr_cl(Register dst) { - shift(dst, 0x5); - } - - void shrl_cl(Register dst) { - shift_32(dst, 0x5); - } - - void shrl(Register dst, Immediate shift_amount) { - shift_32(dst, shift_amount, 0x5); - } - void store_rax(void* dst, RelocInfo::Mode mode); void store_rax(ExternalReference ref); @@ -1176,6 +1073,7 @@ void orpd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src); + void sqrtsd(XMMRegister dst, const Operand& src); void ucomisd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, const Operand& src); @@ -1214,7 +1112,7 @@ void RecordComment(const char* msg, bool force = false); // Allocate a constant pool of the correct size for the generated code. - MaybeObject* AllocateConstantPool(Heap* heap); + Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); // Generate the constant pool for the generated code. void PopulateConstantPool(ConstantPoolArray* constant_pool); @@ -1363,7 +1261,7 @@ if (size == kInt64Size) { emit_rex_64(); } else { - ASSERT(size == kInt32Size); + DCHECK(size == kInt32Size); } } @@ -1372,7 +1270,7 @@ if (size == kInt64Size) { emit_rex_64(p1); } else { - ASSERT(size == kInt32Size); + DCHECK(size == kInt32Size); emit_optional_rex_32(p1); } } @@ -1382,7 +1280,7 @@ if (size == kInt64Size) { emit_rex_64(p1, p2); } else { - ASSERT(size == kInt32Size); + DCHECK(size == kInt32Size); emit_optional_rex_32(p1, p2); } } @@ -1408,7 +1306,7 @@ // Emit a ModR/M byte with an operation subcode in the reg field and // a register in the rm_reg field. void emit_modrm(int code, Register rm_reg) { - ASSERT(is_uint3(code)); + DCHECK(is_uint3(code)); emit(0xC0 | code << 3 | rm_reg.low_bits()); } @@ -1425,14 +1323,16 @@ // AND, OR, XOR, or CMP. The encodings of these operations are all // similar, differing just in the opcode or in the reg field of the // ModR/M byte. + void arithmetic_op_8(byte opcode, Register reg, Register rm_reg); + void arithmetic_op_8(byte opcode, Register reg, const Operand& rm_reg); void arithmetic_op_16(byte opcode, Register reg, Register rm_reg); void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg); - void arithmetic_op_32(byte opcode, Register reg, Register rm_reg); - void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg); - void arithmetic_op(byte opcode, Register reg, Register rm_reg); - void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg); - void immediate_arithmetic_op(byte subcode, Register dst, Immediate src); - void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src); + // Operate on operands/registers with pointer size, 32-bit or 64-bit size. + void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size); + void arithmetic_op(byte opcode, + Register reg, + const Operand& rm_reg, + int size); // Operate on a byte in memory or register. void immediate_arithmetic_op_8(byte subcode, Register dst, @@ -1447,20 +1347,20 @@ void immediate_arithmetic_op_16(byte subcode, const Operand& dst, Immediate src); - // Operate on a 32-bit word in memory or register. - void immediate_arithmetic_op_32(byte subcode, - Register dst, - Immediate src); - void immediate_arithmetic_op_32(byte subcode, - const Operand& dst, - Immediate src); + // Operate on operands/registers with pointer size, 32-bit or 64-bit size. + void immediate_arithmetic_op(byte subcode, + Register dst, + Immediate src, + int size); + void immediate_arithmetic_op(byte subcode, + const Operand& dst, + Immediate src, + int size); // Emit machine code for a shift operation. - void shift(Register dst, Immediate shift_amount, int subcode); - void shift_32(Register dst, Immediate shift_amount, int subcode); + void shift(Register dst, Immediate shift_amount, int subcode, int size); // Shift dst by cl % 64 bits. - void shift(Register dst, int subcode); - void shift_32(Register dst, int subcode); + void shift(Register dst, int subcode, int size); void emit_farith(int b1, int b2, int i); @@ -1473,138 +1373,63 @@ // Arithmetics void emit_add(Register dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x03, dst, src); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x03, dst, src); - } + arithmetic_op(0x03, dst, src, size); } void emit_add(Register dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x0, dst, src); - } else { - ASSERT(size == kInt32Size); - immediate_arithmetic_op_32(0x0, dst, src); - } + immediate_arithmetic_op(0x0, dst, src, size); } void emit_add(Register dst, const Operand& src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x03, dst, src); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x03, dst, src); - } + arithmetic_op(0x03, dst, src, size); } void emit_add(const Operand& dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x1, src, dst); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x1, src, dst); - } + arithmetic_op(0x1, src, dst, size); } void emit_add(const Operand& dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x0, dst, src); - } else { - ASSERT(size == kInt32Size); - immediate_arithmetic_op_32(0x0, dst, src); - } + immediate_arithmetic_op(0x0, dst, src, size); } void emit_and(Register dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x23, dst, src); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x23, dst, src); - } + arithmetic_op(0x23, dst, src, size); } void emit_and(Register dst, const Operand& src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x23, dst, src); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x23, dst, src); - } + arithmetic_op(0x23, dst, src, size); } void emit_and(const Operand& dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x21, src, dst); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x21, src, dst); - } + arithmetic_op(0x21, src, dst, size); } void emit_and(Register dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x4, dst, src); - } else { - ASSERT(size == kInt32Size); - immediate_arithmetic_op_32(0x4, dst, src); - } + immediate_arithmetic_op(0x4, dst, src, size); } void emit_and(const Operand& dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x4, dst, src); - } else { - ASSERT(size == kInt32Size); - immediate_arithmetic_op_32(0x4, dst, src); - } + immediate_arithmetic_op(0x4, dst, src, size); } void emit_cmp(Register dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x3B, dst, src); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x3B, dst, src); - } + arithmetic_op(0x3B, dst, src, size); } void emit_cmp(Register dst, const Operand& src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x3B, dst, src); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x3B, dst, src); - } + arithmetic_op(0x3B, dst, src, size); } void emit_cmp(const Operand& dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x39, src, dst); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x39, src, dst); - } + arithmetic_op(0x39, src, dst, size); } void emit_cmp(Register dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x7, dst, src); - } else { - ASSERT(size == kInt32Size); - immediate_arithmetic_op_32(0x7, dst, src); - } + immediate_arithmetic_op(0x7, dst, src, size); } void emit_cmp(const Operand& dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x7, dst, src); - } else { - ASSERT(size == kInt32Size); - immediate_arithmetic_op_32(0x7, dst, src); - } + immediate_arithmetic_op(0x7, dst, src, size); } void emit_dec(Register dst, int size); @@ -1614,6 +1439,7 @@ // Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx // when size is 32. void emit_idiv(Register src, int size); + void emit_div(Register src, int size); // Signed multiply instructions. // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32. @@ -1634,6 +1460,7 @@ void emit_mov(const Operand& dst, Immediate value, int size); void emit_movzxb(Register dst, const Operand& src, int size); + void emit_movzxb(Register dst, Register src, int size); void emit_movzxw(Register dst, const Operand& src, int size); void emit_movzxw(Register dst, Register src, int size); @@ -1644,156 +1471,86 @@ void emit_not(const Operand& dst, int size); void emit_or(Register dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x0B, dst, src); - } else { - arithmetic_op_32(0x0B, dst, src); - } + arithmetic_op(0x0B, dst, src, size); } void emit_or(Register dst, const Operand& src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x0B, dst, src); - } else { - arithmetic_op_32(0x0B, dst, src); - } + arithmetic_op(0x0B, dst, src, size); } void emit_or(const Operand& dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x9, src, dst); - } else { - arithmetic_op_32(0x9, src, dst); - } + arithmetic_op(0x9, src, dst, size); } void emit_or(Register dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x1, dst, src); - } else { - immediate_arithmetic_op_32(0x1, dst, src); - } + immediate_arithmetic_op(0x1, dst, src, size); } void emit_or(const Operand& dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x1, dst, src); - } else { - immediate_arithmetic_op_32(0x1, dst, src); - } + immediate_arithmetic_op(0x1, dst, src, size); } void emit_repmovs(int size); void emit_sbb(Register dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x1b, dst, src); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x1b, dst, src); - } + arithmetic_op(0x1b, dst, src, size); } void emit_sub(Register dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x2B, dst, src); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x2B, dst, src); - } + arithmetic_op(0x2B, dst, src, size); } void emit_sub(Register dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x5, dst, src); - } else { - ASSERT(size == kInt32Size); - immediate_arithmetic_op_32(0x5, dst, src); - } + immediate_arithmetic_op(0x5, dst, src, size); } void emit_sub(Register dst, const Operand& src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x2B, dst, src); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x2B, dst, src); - } + arithmetic_op(0x2B, dst, src, size); } void emit_sub(const Operand& dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x29, src, dst); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x29, src, dst); - } + arithmetic_op(0x29, src, dst, size); } void emit_sub(const Operand& dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x5, dst, src); - } else { - ASSERT(size == kInt32Size); - immediate_arithmetic_op_32(0x5, dst, src); - } + immediate_arithmetic_op(0x5, dst, src, size); } void emit_test(Register dst, Register src, int size); void emit_test(Register reg, Immediate mask, int size); void emit_test(const Operand& op, Register reg, int size); void emit_test(const Operand& op, Immediate mask, int size); + void emit_test(Register reg, const Operand& op, int size) { + return emit_test(op, reg, size); + } - // Exchange two registers void emit_xchg(Register dst, Register src, int size); + void emit_xchg(Register dst, const Operand& src, int size); void emit_xor(Register dst, Register src, int size) { - if (size == kInt64Size) { - if (dst.code() == src.code()) { - arithmetic_op_32(0x33, dst, src); - } else { - arithmetic_op(0x33, dst, src); - } + if (size == kInt64Size && dst.code() == src.code()) { + // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore + // there is no need to make this a 64 bit operation. + arithmetic_op(0x33, dst, src, kInt32Size); } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x33, dst, src); + arithmetic_op(0x33, dst, src, size); } } void emit_xor(Register dst, const Operand& src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x33, dst, src); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x33, dst, src); - } + arithmetic_op(0x33, dst, src, size); } void emit_xor(Register dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x6, dst, src); - } else { - ASSERT(size == kInt32Size); - immediate_arithmetic_op_32(0x6, dst, src); - } + immediate_arithmetic_op(0x6, dst, src, size); } void emit_xor(const Operand& dst, Immediate src, int size) { - if (size == kInt64Size) { - immediate_arithmetic_op(0x6, dst, src); - } else { - ASSERT(size == kInt32Size); - immediate_arithmetic_op_32(0x6, dst, src); - } + immediate_arithmetic_op(0x6, dst, src, size); } void emit_xor(const Operand& dst, Register src, int size) { - if (size == kInt64Size) { - arithmetic_op(0x31, src, dst); - } else { - ASSERT(size == kInt32Size); - arithmetic_op_32(0x31, src, dst); - } + arithmetic_op(0x31, src, dst, size); } friend class CodePatcher; @@ -1826,7 +1583,7 @@ #ifdef DEBUG ~EnsureSpace() { int bytes_generated = space_before_ - assembler_->available_space(); - ASSERT(bytes_generated < assembler_->kGap); + DCHECK(bytes_generated < assembler_->kGap); } #endif diff -Nru nodejs-0.11.13/deps/v8/src/x64/assembler-x64-inl.h nodejs-0.11.15/deps/v8/src/x64/assembler-x64-inl.h --- nodejs-0.11.13/deps/v8/src/x64/assembler-x64-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/assembler-x64-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,49 +1,29 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_X64_ASSEMBLER_X64_INL_H_ #define V8_X64_ASSEMBLER_X64_INL_H_ -#include "x64/assembler-x64.h" +#include "src/x64/assembler-x64.h" -#include "cpu.h" -#include "debug.h" -#include "v8memory.h" +#include "src/base/cpu.h" +#include "src/debug.h" +#include "src/v8memory.h" namespace v8 { namespace internal { +bool CpuFeatures::SupportsCrankshaft() { return true; } + // ----------------------------------------------------------------------------- // Implementation of Assembler static const byte kCallOpcode = 0xE8; -static const int kNoCodeAgeSequenceLength = 6; +// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi). +static const int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17; void Assembler::emitl(uint32_t x) { @@ -77,7 +57,7 @@ void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode, TypeFeedbackId ast_id) { - ASSERT(RelocInfo::IsCodeTarget(rmode) || + DCHECK(RelocInfo::IsCodeTarget(rmode) || rmode == RelocInfo::CODE_AGE_SEQUENCE); if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt()); @@ -96,8 +76,7 @@ void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) { - ASSERT(RelocInfo::IsRuntimeEntry(rmode)); - ASSERT(isolate()->code_range()->exists()); + DCHECK(RelocInfo::IsRuntimeEntry(rmode)); RecordRelocInfo(rmode); emitl(static_cast<uint32_t>(entry - isolate()->code_range()->start())); } @@ -129,7 +108,7 @@ void Assembler::emit_rex_64(Register rm_reg) { - ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code()); + DCHECK_EQ(rm_reg.code() & 0xf, rm_reg.code()); emit(0x48 | rm_reg.high_bit()); } @@ -213,9 +192,12 @@ void Assembler::set_target_address_at(Address pc, ConstantPoolArray* constant_pool, - Address target) { + Address target, + ICacheFlushMode icache_flush_mode) { Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4); - CPU::FlushICache(pc, sizeof(int32_t)); + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(pc, sizeof(int32_t)); + } } @@ -224,13 +206,17 @@ } +Address Assembler::break_address_from_return_address(Address pc) { + return pc - Assembler::kPatchDebugBreakSlotReturnOffset; +} + + Handle<Object> Assembler::code_target_object_handle_at(Address pc) { return code_targets_[Memory::int32_at(pc)]; } Address Assembler::runtime_entry_at(Address pc) { - ASSERT(isolate()->code_range()->exists()); return Memory::int32_at(pc) + isolate()->code_range()->start(); } @@ -238,32 +224,33 @@ // Implementation of RelocInfo // The modes possibly affected by apply must be in kApplyMask. -void RelocInfo::apply(intptr_t delta) { +void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) { + bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH; if (IsInternalReference(rmode_)) { // absolute code pointer inside code object moves with the code object. Memory::Address_at(pc_) += static_cast<int32_t>(delta); - CPU::FlushICache(pc_, sizeof(Address)); + if (flush_icache) CpuFeatures::FlushICache(pc_, sizeof(Address)); } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) { Memory::int32_at(pc_) -= static_cast<int32_t>(delta); - CPU::FlushICache(pc_, sizeof(int32_t)); + if (flush_icache) CpuFeatures::FlushICache(pc_, sizeof(int32_t)); } else if (rmode_ == CODE_AGE_SEQUENCE) { if (*pc_ == kCallOpcode) { int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); *p -= static_cast<int32_t>(delta); // Relocate entry. - CPU::FlushICache(p, sizeof(uint32_t)); + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); } } } Address RelocInfo::target_address() { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); return Assembler::target_address_at(pc_, host_); } Address RelocInfo::target_address_address() { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); return reinterpret_cast<Address>(pc_); @@ -285,10 +272,13 @@ } -void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); - Assembler::set_target_address_at(pc_, host_, target); - if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { +void RelocInfo::set_target_address(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && + IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( host(), this, HeapObject::cast(target_code)); @@ -297,13 +287,13 @@ Object* RelocInfo::target_object() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return Memory::Object_at(pc_); } Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); if (rmode_ == EMBEDDED_OBJECT) { return Memory::Object_Handle_at(pc_); } else { @@ -313,17 +303,20 @@ Address RelocInfo::target_reference() { - ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE); + DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); return Memory::Address_at(pc_); } -void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - ASSERT(!target->IsConsString()); +void RelocInfo::set_target_object(Object* target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Memory::Object_at(pc_) = target; - CPU::FlushICache(pc_, sizeof(Address)); - if (mode == UPDATE_WRITE_BARRIER && + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(pc_, sizeof(Address)); + } + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && target->IsHeapObject()) { host()->GetHeap()->incremental_marking()->RecordWrite( @@ -333,37 +326,44 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) { - ASSERT(IsRuntimeEntry(rmode_)); + DCHECK(IsRuntimeEntry(rmode_)); return origin->runtime_entry_at(pc_); } void RelocInfo::set_target_runtime_entry(Address target, - WriteBarrierMode mode) { - ASSERT(IsRuntimeEntry(rmode_)); - if (target_address() != target) set_target_address(target, mode); + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsRuntimeEntry(rmode_)); + if (target_address() != target) { + set_target_address(target, write_barrier_mode, icache_flush_mode); + } } Handle<Cell> RelocInfo::target_cell_handle() { - ASSERT(rmode_ == RelocInfo::CELL); + DCHECK(rmode_ == RelocInfo::CELL); Address address = Memory::Address_at(pc_); return Handle<Cell>(reinterpret_cast<Cell**>(address)); } Cell* RelocInfo::target_cell() { - ASSERT(rmode_ == RelocInfo::CELL); + DCHECK(rmode_ == RelocInfo::CELL); return Cell::FromValueAddress(Memory::Address_at(pc_)); } -void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) { - ASSERT(rmode_ == RelocInfo::CELL); +void RelocInfo::set_target_cell(Cell* cell, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::CELL); Address address = cell->address() + Cell::kValueOffset; Memory::Address_at(pc_) = address; - CPU::FlushICache(pc_, sizeof(Address)); - if (mode == UPDATE_WRITE_BARRIER && + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(pc_, sizeof(Address)); + } + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) { // TODO(1550) We are passing NULL as a slot because cell can never be on // evacuation candidate. @@ -392,12 +392,8 @@ // movq(rsp, rbp); pop(rbp); ret(n); int3 *6 // The 11th byte is int3 (0xCC) in the return sequence and // REX.WB (0x48+register bit) for the call sequence. -#ifdef ENABLE_DEBUGGER_SUPPORT return pc_[Assembler::kMoveAddressIntoScratchRegisterInstructionLength] != 0xCC; -#else - return false; -#endif } @@ -407,29 +403,31 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - ASSERT(*pc_ == kCallOpcode); + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + DCHECK(*pc_ == kCallOpcode); return origin->code_target_object_handle_at(pc_ + 1); } Code* RelocInfo::code_age_stub() { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - ASSERT(*pc_ == kCallOpcode); + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + DCHECK(*pc_ == kCallOpcode); return Code::GetCodeFromTargetAddress( Assembler::target_address_at(pc_ + 1, host_)); } -void RelocInfo::set_code_age_stub(Code* stub) { - ASSERT(*pc_ == kCallOpcode); - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start()); +void RelocInfo::set_code_age_stub(Code* stub, + ICacheFlushMode icache_flush_mode) { + DCHECK(*pc_ == kCallOpcode); + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(), + icache_flush_mode); } Address RelocInfo::call_address() { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); return Memory::Address_at( pc_ + Assembler::kRealPatchReturnSequenceAddressOffset); @@ -437,12 +435,12 @@ void RelocInfo::set_call_address(Address target) { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) = target; - CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset, - sizeof(Address)); + CpuFeatures::FlushICache( + pc_ + Assembler::kRealPatchReturnSequenceAddressOffset, sizeof(Address)); if (host() != NULL) { Object* target_code = Code::GetCodeFromTargetAddress(target); host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( @@ -462,7 +460,7 @@ Object** RelocInfo::call_object_address() { - ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); return reinterpret_cast<Object**>( pc_ + Assembler::kPatchReturnSequenceAddressOffset); @@ -473,24 +471,22 @@ RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { visitor->VisitEmbeddedPointer(this); - CPU::FlushICache(pc_, sizeof(Address)); + CpuFeatures::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); } else if (mode == RelocInfo::CELL) { visitor->VisitCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(this); - CPU::FlushICache(pc_, sizeof(Address)); + CpuFeatures::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeAgeSequence(mode)) { visitor->VisitCodeAgeSequence(this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence())) && isolate->debug()->has_break_points()) { visitor->VisitDebugTarget(this); -#endif } else if (RelocInfo::IsRuntimeEntry(mode)) { visitor->VisitRuntimeEntry(this); } @@ -502,24 +498,22 @@ RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { StaticVisitor::VisitEmbeddedPointer(heap, this); - CPU::FlushICache(pc_, sizeof(Address)); + CpuFeatures::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); } else if (mode == RelocInfo::CELL) { StaticVisitor::VisitCell(heap, this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(this); - CPU::FlushICache(pc_, sizeof(Address)); + CpuFeatures::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeAgeSequence(mode)) { StaticVisitor::VisitCodeAgeSequence(heap, this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence()))) { StaticVisitor::VisitDebugTarget(heap, this); -#endif } else if (RelocInfo::IsRuntimeEntry(mode)) { StaticVisitor::VisitRuntimeEntry(this); } @@ -530,7 +524,7 @@ // Implementation of Operand void Operand::set_modrm(int mod, Register rm_reg) { - ASSERT(is_uint2(mod)); + DCHECK(is_uint2(mod)); buf_[0] = mod << 6 | rm_reg.low_bits(); // Set REX.B to the high bit of rm.code(). rex_ |= rm_reg.high_bit(); @@ -538,26 +532,26 @@ void Operand::set_sib(ScaleFactor scale, Register index, Register base) { - ASSERT(len_ == 1); - ASSERT(is_uint2(scale)); + DCHECK(len_ == 1); + DCHECK(is_uint2(scale)); // Use SIB with no index register only for base rsp or r12. Otherwise we // would skip the SIB byte entirely. - ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12)); + DCHECK(!index.is(rsp) || base.is(rsp) || base.is(r12)); buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits(); rex_ |= index.high_bit() << 1 | base.high_bit(); len_ = 2; } void Operand::set_disp8(int disp) { - ASSERT(is_int8(disp)); - ASSERT(len_ == 1 || len_ == 2); + DCHECK(is_int8(disp)); + DCHECK(len_ == 1 || len_ == 2); int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]); *p = disp; len_ += sizeof(int8_t); } void Operand::set_disp32(int disp) { - ASSERT(len_ == 1 || len_ == 2); + DCHECK(len_ == 1 || len_ == 2); int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]); *p = disp; len_ += sizeof(int32_t); diff -Nru nodejs-0.11.13/deps/v8/src/x64/builtins-x64.cc nodejs-0.11.15/deps/v8/src/x64/builtins-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/builtins-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/builtins-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "codegen.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -64,7 +41,7 @@ __ Push(rdi); __ PushReturnAddressFrom(kScratchRegister); } else { - ASSERT(extra_args == NO_EXTRA_ARGUMENTS); + DCHECK(extra_args == NO_EXTRA_ARGUMENTS); } // JumpToExternalReference expects rax to contain the number of arguments @@ -114,7 +91,7 @@ __ CompareRoot(rsp, Heap::kStackLimitRootIndex); __ j(above_equal, &ok); - CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode); + CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); GenerateTailCallToReturnedCode(masm); __ bind(&ok); @@ -124,7 +101,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function, - bool count_constructions, bool create_memento) { // ----------- S t a t e ------------- // -- rax: number of arguments @@ -132,14 +108,8 @@ // -- rbx: allocation site or undefined // ----------------------------------- - // Should never count constructions for api objects. - ASSERT(!is_api_function || !count_constructions);\ - // Should never create mementos for api functions. - ASSERT(!is_api_function || !create_memento); - - // Should never create mementos before slack tracking is finished. - ASSERT(!count_constructions || !create_memento); + DCHECK(!is_api_function || !create_memento); // Enter a construct frame. { @@ -163,20 +133,18 @@ if (FLAG_inline_new) { Label undo_allocation; -#ifdef ENABLE_DEBUGGER_SUPPORT ExternalReference debug_step_in_fp = ExternalReference::debug_step_in_fp_address(masm->isolate()); __ Move(kScratchRegister, debug_step_in_fp); __ cmpp(Operand(kScratchRegister, 0), Immediate(0)); __ j(not_equal, &rt_call); -#endif // Verified that the constructor is a JSFunction. // Load the initial map and verify that it is in fact a map. // rdi: constructor __ movp(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi - ASSERT(kSmiTag == 0); + DCHECK(kSmiTag == 0); __ JumpIfSmi(rax, &rt_call); // rdi: constructor // rax: initial map (if proven valid below) @@ -191,30 +159,39 @@ __ CmpInstanceType(rax, JS_FUNCTION_TYPE); __ j(equal, &rt_call); - if (count_constructions) { + if (!is_api_function) { Label allocate; + // The code below relies on these assumptions. + STATIC_ASSERT(JSFunction::kNoSlackTracking == 0); + STATIC_ASSERT(Map::ConstructionCount::kShift + + Map::ConstructionCount::kSize == 32); + // Check if slack tracking is enabled. + __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset)); + __ shrl(rsi, Immediate(Map::ConstructionCount::kShift)); + __ j(zero, &allocate); // JSFunction::kNoSlackTracking // Decrease generous allocation count. - __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ decb(FieldOperand(rcx, - SharedFunctionInfo::kConstructionCountOffset)); - __ j(not_zero, &allocate); + __ subl(FieldOperand(rax, Map::kBitField3Offset), + Immediate(1 << Map::ConstructionCount::kShift)); + + __ cmpl(rsi, Immediate(JSFunction::kFinishSlackTracking)); + __ j(not_equal, &allocate); __ Push(rax); __ Push(rdi); __ Push(rdi); // constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1); + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); __ Pop(rdi); __ Pop(rax); + __ xorl(rsi, rsi); // JSFunction::kNoSlackTracking __ bind(&allocate); } // Now allocate the JSObject on the heap. __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset)); - __ shl(rdi, Immediate(kPointerSizeLog2)); + __ shlp(rdi, Immediate(kPointerSizeLog2)); if (create_memento) { __ addp(rdi, Immediate(AllocationMemento::kSize)); } @@ -238,9 +215,17 @@ // rax: initial map // rbx: JSObject // rdi: start of next object (including memento if create_memento) + // rsi: slack tracking counter (non-API function case) __ leap(rcx, Operand(rbx, JSObject::kHeaderSize)); __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); - if (count_constructions) { + if (!is_api_function) { + Label no_inobject_slack_tracking; + + // Check if slack tracking is enabled. + __ cmpl(rsi, Immediate(JSFunction::kNoSlackTracking)); + __ j(equal, &no_inobject_slack_tracking); + + // Allocate object with a slack. __ movzxbp(rsi, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); __ leap(rsi, @@ -253,20 +238,21 @@ } __ InitializeFieldsWithFiller(rcx, rsi, rdx); __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex); - __ InitializeFieldsWithFiller(rcx, rdi, rdx); - } else if (create_memento) { + // Fill the remaining fields with one pointer filler map. + + __ bind(&no_inobject_slack_tracking); + } + if (create_memento) { __ leap(rsi, Operand(rdi, -AllocationMemento::kSize)); __ InitializeFieldsWithFiller(rcx, rsi, rdx); // Fill in memento fields if necessary. // rsi: points to the allocated but uninitialized memento. - Handle<Map> allocation_memento_map = factory->allocation_memento_map(); __ Move(Operand(rsi, AllocationMemento::kMapOffset), - allocation_memento_map); + factory->allocation_memento_map()); // Get the cell or undefined. __ movp(rdx, Operand(rsp, kPointerSize*2)); - __ movp(Operand(rsi, AllocationMemento::kAllocationSiteOffset), - rdx); + __ movp(Operand(rsi, AllocationMemento::kAllocationSiteOffset), rdx); } else { __ InitializeFieldsWithFiller(rcx, rdi, rdx); } @@ -369,13 +355,14 @@ offset = kPointerSize; } - // Must restore rdi (constructor) before calling runtime. + // Must restore rsi (context) and rdi (constructor) before calling runtime. + __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); __ movp(rdi, Operand(rsp, offset)); __ Push(rdi); if (create_memento) { - __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2); + __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2); } else { - __ CallRuntime(Runtime::kHiddenNewObject, 1); + __ CallRuntime(Runtime::kNewObject, 1); } __ movp(rbx, rax); // store result in rbx @@ -441,7 +428,7 @@ } // Store offset of return address for deoptimizer. - if (!is_api_function && !count_constructions) { + if (!is_api_function) { masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); } @@ -484,18 +471,13 @@ } -void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, true, false); -} - - void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new); + Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); } void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, true, false, false); + Generate_JSConstructStubHelper(masm, true, false); } @@ -600,7 +582,7 @@ // No type feedback cell is available __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); // Expects rdi to hold function pointer. - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS); __ CallStub(&stub); } else { ParameterCount actual(rax); @@ -628,7 +610,7 @@ void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) { - CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized); + CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized); GenerateTailCallToReturnedCode(masm); } @@ -643,7 +625,7 @@ // Whether to compile in a background thread. __ Push(masm->isolate()->factory()->ToBoolean(concurrent)); - __ CallRuntime(Runtime::kHiddenCompileOptimized, 2); + __ CallRuntime(Runtime::kCompileOptimized, 2); // Restore receiver. __ Pop(rdi); } @@ -744,12 +726,12 @@ // stubs that tail call the runtime on deopts passing their parameters in // registers. __ Pushad(); - __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ Popad(); // Tear down internal frame. } - __ Pop(MemOperand(rsp, 0)); // Ignore state offset + __ DropUnderReturnAddress(1); // Ignore state offset __ ret(0); // Return to IC Miss stub, continuation still on stack. } @@ -773,7 +755,7 @@ // Pass the deoptimization type to the runtime system. __ Push(Smi::FromInt(static_cast<int>(type))); - __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); // Tear down internal frame. } @@ -846,7 +828,7 @@ // 3a. Patch the first argument if necessary when calling a function. Label shift_arguments; __ Set(rdx, 0); // indicate regular JS_FUNCTION - { Label convert_to_object, use_global_receiver, patch_receiver; + { Label convert_to_object, use_global_proxy, patch_receiver; // Change context eagerly in case we need the global receiver. __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); @@ -867,9 +849,9 @@ __ JumpIfSmi(rbx, &convert_to_object, Label::kNear); __ CompareRoot(rbx, Heap::kNullValueRootIndex); - __ j(equal, &use_global_receiver); + __ j(equal, &use_global_proxy); __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); - __ j(equal, &use_global_receiver); + __ j(equal, &use_global_proxy); STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx); @@ -895,10 +877,10 @@ __ movp(rdi, args.GetReceiverOperand()); __ jmp(&patch_receiver, Label::kNear); - __ bind(&use_global_receiver); + __ bind(&use_global_proxy); __ movp(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset)); + __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalProxyOffset)); __ bind(&patch_receiver); __ movp(args.GetArgumentOperand(1), rbx); @@ -926,12 +908,13 @@ __ bind(&shift_arguments); { Label loop; __ movp(rcx, rax); + StackArgumentsAccessor args(rsp, rcx); __ bind(&loop); - __ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0)); - __ movp(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx); + __ movp(rbx, args.GetArgumentOperand(1)); + __ movp(args.GetArgumentOperand(0), rbx); __ decp(rcx); - __ j(not_sign, &loop); // While non-negative (to copy return address). - __ popq(rbx); // Discard copy of return address. + __ j(not_zero, &loop); // While non-zero. + __ DropUnderReturnAddress(1, rbx); // Drop one slot under return address. __ decp(rax); // One fewer argument (first argument is new receiver). } @@ -963,9 +946,8 @@ // expected arguments matches what we're providing. If so, jump // (tail-call) to the code in register edx without checking arguments. __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ movsxlq(rbx, - FieldOperand(rdx, - SharedFunctionInfo::kFormalParameterCountOffset)); + __ LoadSharedFunctionInfoSpecialField(rbx, rdx, + SharedFunctionInfo::kFormalParameterCountOffset); __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); __ cmpp(rax, rbx); __ j(not_equal, @@ -1018,7 +1000,7 @@ // Out of stack space. __ Push(Operand(rbp, kFunctionOffset)); __ Push(rax); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); __ bind(&okay); // End of stack check. @@ -1042,7 +1024,7 @@ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; + Label call_to_object, use_global_proxy; __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset), Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); @@ -1056,9 +1038,9 @@ // Compute the receiver in sloppy mode. __ JumpIfSmi(rbx, &call_to_object, Label::kNear); __ CompareRoot(rbx, Heap::kNullValueRootIndex); - __ j(equal, &use_global_receiver); + __ j(equal, &use_global_proxy); __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); - __ j(equal, &use_global_receiver); + __ j(equal, &use_global_proxy); // If given receiver is already a JavaScript object then there's no // reason for converting it. @@ -1073,10 +1055,10 @@ __ movp(rbx, rax); __ jmp(&push_receiver, Label::kNear); - __ bind(&use_global_receiver); + __ bind(&use_global_proxy); __ movp(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset)); + __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalProxyOffset)); // Push the receiver. __ bind(&push_receiver); @@ -1084,12 +1066,17 @@ // Copy all arguments from the array to the stack. Label entry, loop; - __ movp(rax, Operand(rbp, kIndexOffset)); + Register receiver = LoadIC::ReceiverRegister(); + Register key = LoadIC::NameRegister(); + __ movp(key, Operand(rbp, kIndexOffset)); __ jmp(&entry); __ bind(&loop); - __ movp(rdx, Operand(rbp, kArgumentsOffset)); // load arguments + __ movp(receiver, Operand(rbp, kArgumentsOffset)); // load arguments // Use inline caching to speed up access to arguments. + if (FLAG_vector_ics) { + __ Move(LoadIC::SlotRegister(), Smi::FromInt(0)); + } Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize(); __ Call(ic, RelocInfo::CODE_TARGET); @@ -1101,19 +1088,19 @@ // Push the nth argument. __ Push(rax); - // Update the index on the stack and in register rax. - __ movp(rax, Operand(rbp, kIndexOffset)); - __ SmiAddConstant(rax, rax, Smi::FromInt(1)); - __ movp(Operand(rbp, kIndexOffset), rax); + // Update the index on the stack and in register key. + __ movp(key, Operand(rbp, kIndexOffset)); + __ SmiAddConstant(key, key, Smi::FromInt(1)); + __ movp(Operand(rbp, kIndexOffset), key); __ bind(&entry); - __ cmpp(rax, Operand(rbp, kLimitOffset)); + __ cmpp(key, Operand(rbp, kLimitOffset)); __ j(not_equal, &loop); // Call the function. Label call_proxy; ParameterCount actual(rax); - __ SmiToInteger32(rax, rax); + __ SmiToInteger32(rax, key); __ movp(rdi, Operand(rbp, kFunctionOffset)); __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); __ j(not_equal, &call_proxy); @@ -1323,6 +1310,32 @@ } +static void ArgumentsAdaptorStackCheck(MacroAssembler* masm, + Label* stack_overflow) { + // ----------- S t a t e ------------- + // -- rax : actual number of arguments + // -- rbx : expected number of arguments + // -- rdi: function (passed through to callee) + // ----------------------------------- + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(rdx, Heap::kRealStackLimitRootIndex); + __ movp(rcx, rsp); + // Make rcx the space we have left. The stack might already be overflowed + // here which will cause rcx to become negative. + __ subp(rcx, rdx); + // Make rdx the space we need for the array when it is unrolled onto the + // stack. + __ movp(rdx, rbx); + __ shlp(rdx, Immediate(kPointerSizeLog2)); + // Check if the arguments will overflow the stack. + __ cmpp(rcx, rdx); + __ j(less_equal, stack_overflow); // Signed comparison. +} + + static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ pushq(rbp); __ movp(rbp, rsp); @@ -1368,6 +1381,9 @@ Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->arguments_adaptors(), 1); + Label stack_overflow; + ArgumentsAdaptorStackCheck(masm, &stack_overflow); + Label enough, too_few; __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); __ cmpp(rax, rbx); @@ -1440,6 +1456,14 @@ // ------------------------------------------- __ bind(&dont_adapt_arguments); __ jmp(rdx); + + __ bind(&stack_overflow); + { + FrameScope frame(masm, StackFrame::MANUAL); + EnterArgumentsAdaptorFrame(masm); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ int3(); + } } @@ -1486,7 +1510,7 @@ __ j(above_equal, &ok); { FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kHiddenStackGuard, 0); + __ CallRuntime(Runtime::kStackGuard, 0); } __ jmp(masm->isolate()->builtins()->OnStackReplacement(), RelocInfo::CODE_TARGET); diff -Nru nodejs-0.11.13/deps/v8/src/x64/codegen-x64.cc nodejs-0.11.15/deps/v8/src/x64/codegen-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/codegen-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/codegen-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "codegen.h" -#include "macro-assembler.h" +#include "src/codegen.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { @@ -40,14 +17,14 @@ void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { masm->EnterFrame(StackFrame::INTERNAL); - ASSERT(!masm->has_frame()); + DCHECK(!masm->has_frame()); masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { masm->LeaveFrame(StackFrame::INTERNAL); - ASSERT(masm->has_frame()); + DCHECK(masm->has_frame()); masm->set_has_frame(false); } @@ -58,7 +35,8 @@ UnaryMathFunction CreateExpFunction() { if (!FLAG_fast_math) return &std::exp; size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &std::exp; ExternalReference::InitializeMathExpData(); @@ -78,10 +56,10 @@ CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST<UnaryMathFunction>(buffer); } @@ -89,9 +67,8 @@ UnaryMathFunction CreateSqrtFunction() { size_t actual_size; // Allocate buffer in executable space. - byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, - &actual_size, - true)); + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return &std::sqrt; MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); @@ -102,10 +79,10 @@ CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST<UnaryMathFunction>(buffer); } @@ -115,9 +92,8 @@ // Define custom fmod implementation. ModuloFunction CreateModuloFunction() { size_t actual_size; - byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize, - &actual_size, - true)); + byte* buffer = static_cast<byte*>( + base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true)); CHECK(buffer); Assembler masm(NULL, buffer, static_cast<int>(actual_size)); // Generated code is put into a fixed, unmovable, buffer, and not into @@ -193,7 +169,7 @@ CodeDesc desc; masm.GetCode(&desc); - OS::ProtectCode(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); // Call the function from C++ through this pointer. return FUNCTION_CAST<ModuloFunction>(buffer); } @@ -208,26 +184,29 @@ #define __ ACCESS_MASM(masm) void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - MacroAssembler* masm, AllocationSiteMode mode, + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, Label* allocation_memento_found) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rbx : target map - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // Return address is on the stack. + Register scratch = rdi; + DCHECK(!AreAliased(receiver, key, value, target_map, scratch)); + if (mode == TRACK_ALLOCATION_SITE) { - ASSERT(allocation_memento_found != NULL); - __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found); + DCHECK(allocation_memento_found != NULL); + __ JumpIfJSArrayHasAllocationMemento( + receiver, scratch, allocation_memento_found); } // Set transitioned map. - __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx); - __ RecordWriteField(rdx, + __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map); + __ RecordWriteField(receiver, HeapObject::kMapOffset, - rbx, - rdi, + target_map, + scratch, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); @@ -235,14 +214,19 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( - MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rbx : target map - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Return address is on the stack. + DCHECK(receiver.is(rdx)); + DCHECK(key.is(rcx)); + DCHECK(value.is(rax)); + DCHECK(target_map.is(rbx)); + // The fail label is not actually used since we do not allocate. Label allocated, new_backing_store, only_change_map, done; @@ -256,12 +240,20 @@ __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex); __ j(equal, &only_change_map); - // Check backing store for COW-ness. For COW arrays we have to - // allocate a new backing store. __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset)); - __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset), - Heap::kFixedCOWArrayMapRootIndex); - __ j(equal, &new_backing_store); + if (kPointerSize == kDoubleSize) { + // Check backing store for COW-ness. For COW arrays we have to + // allocate a new backing store. + __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset), + Heap::kFixedCOWArrayMapRootIndex); + __ j(equal, &new_backing_store); + } else { + // For x32 port we have to allocate a new backing store as SMI size is + // not equal with double size. + DCHECK(kDoubleSize == 2 * kPointerSize); + __ jmp(&new_backing_store); + } + // Check if the backing store is in new-space. If not, we need to allocate // a new one since the old one is in pointer-space. // If in new space, we can reuse the old backing store because it is @@ -361,14 +353,19 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( - MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rbx : target map - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Return address is on the stack. + DCHECK(receiver.is(rdx)); + DCHECK(key.is(rcx)); + DCHECK(value.is(rax)); + DCHECK(target_map.is(rbx)); + Label loop, entry, convert_hole, gc_required, only_change_map; if (mode == TRACK_ALLOCATION_SITE) { @@ -533,7 +530,7 @@ __ Assert(zero, kExternalStringExpectedButNotFound); } // Rule out short external strings. - STATIC_CHECK(kShortExternalStringTag != 0); + STATIC_ASSERT(kShortExternalStringTag != 0); __ testb(result, Immediate(kShortExternalStringTag)); __ j(not_zero, call_runtime); // Check encoding. @@ -583,11 +580,12 @@ XMMRegister double_scratch, Register temp1, Register temp2) { - ASSERT(!input.is(result)); - ASSERT(!input.is(double_scratch)); - ASSERT(!result.is(double_scratch)); - ASSERT(!temp1.is(temp2)); - ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(!input.is(result)); + DCHECK(!input.is(double_scratch)); + DCHECK(!result.is(double_scratch)); + DCHECK(!temp1.is(temp2)); + DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); + DCHECK(!masm->serializer_enabled()); // External references not serializable. Label done; @@ -608,10 +606,10 @@ __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize)); __ leaq(temp1, Operand(temp2, 0x1ff800)); __ andq(temp2, Immediate(0x7ff)); - __ shr(temp1, Immediate(11)); + __ shrq(temp1, Immediate(11)); __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize)); __ Move(kScratchRegister, ExternalReference::math_exp_log_table()); - __ shl(temp1, Immediate(52)); + __ shlq(temp1, Immediate(52)); __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0)); __ Move(kScratchRegister, ExternalReference::math_exp_constants(0)); __ subsd(double_scratch, input); @@ -631,37 +629,36 @@ #undef __ -static byte* GetNoCodeAgeSequence(uint32_t* length) { - static bool initialized = false; - static byte sequence[kNoCodeAgeSequenceLength]; - *length = kNoCodeAgeSequenceLength; - if (!initialized) { - // The sequence of instructions that is patched out for aging code is the - // following boilerplate stack-building prologue that is found both in - // FUNCTION and OPTIMIZED_FUNCTION code: - CodePatcher patcher(sequence, kNoCodeAgeSequenceLength); - patcher.masm()->pushq(rbp); - patcher.masm()->movp(rbp, rsp); - patcher.masm()->Push(rsi); - patcher.masm()->Push(rdi); - initialized = true; - } - return sequence; +CodeAgingHelper::CodeAgingHelper() { + DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); + // The sequence of instructions that is patched out for aging code is the + // following boilerplate stack-building prologue that is found both in + // FUNCTION and OPTIMIZED_FUNCTION code: + CodePatcher patcher(young_sequence_.start(), young_sequence_.length()); + patcher.masm()->pushq(rbp); + patcher.masm()->movp(rbp, rsp); + patcher.masm()->Push(rsi); + patcher.masm()->Push(rdi); } -bool Code::IsYoungSequence(byte* sequence) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); - bool result = (!memcmp(sequence, young_sequence, young_length)); - ASSERT(result || *sequence == kCallOpcode); +#ifdef DEBUG +bool CodeAgingHelper::IsOld(byte* candidate) const { + return *candidate == kCallOpcode; +} +#endif + + +bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { + bool result = isolate->code_aging_helper()->IsYoung(sequence); + DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); return result; } -void Code::GetCodeAgeAndParity(byte* sequence, Age* age, +void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, MarkingParity* parity) { - if (IsYoungSequence(sequence)) { + if (IsYoungSequence(isolate, sequence)) { *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { @@ -678,11 +675,10 @@ byte* sequence, Code::Age age, MarkingParity parity) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); + uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); if (age == kNoAgeCodeAge) { - CopyBytes(sequence, young_sequence, young_length); - CPU::FlushICache(sequence, young_length); + isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); + CpuFeatures::FlushICache(sequence, young_length); } else { Code* stub = GetCodeAgeStub(isolate, age, parity); CodePatcher patcher(sequence, young_length); @@ -694,7 +690,7 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) { - ASSERT(index >= 0); + DCHECK(index >= 0); int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0; int displacement_to_last_argument = base_reg_.is(rsp) ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize; @@ -702,7 +698,7 @@ if (argument_count_reg_.is(no_reg)) { // argument[0] is at base_reg_ + displacement_to_last_argument + // (argument_count_immediate_ + receiver - 1) * kPointerSize. - ASSERT(argument_count_immediate_ + receiver > 0); + DCHECK(argument_count_immediate_ + receiver > 0); return Operand(base_reg_, displacement_to_last_argument + (argument_count_immediate_ + receiver - 1 - index) * kPointerSize); } else { diff -Nru nodejs-0.11.13/deps/v8/src/x64/codegen-x64.h nodejs-0.11.15/deps/v8/src/x64/codegen-x64.h --- nodejs-0.11.13/deps/v8/src/x64/codegen-x64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/codegen-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,35 +1,12 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_X64_CODEGEN_X64_H_ #define V8_X64_CODEGEN_X64_H_ -#include "ast.h" -#include "ic-inl.h" +#include "src/ast.h" +#include "src/ic-inl.h" namespace v8 { namespace internal { @@ -119,7 +96,7 @@ Operand GetArgumentOperand(int index); Operand GetReceiverOperand() { - ASSERT(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER); + DCHECK(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER); return GetArgumentOperand(0); } diff -Nru nodejs-0.11.13/deps/v8/src/x64/code-stubs-x64.cc nodejs-0.11.15/deps/v8/src/x64/code-stubs-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/code-stubs-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/code-stubs-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,392 +1,267 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "bootstrapper.h" -#include "code-stubs.h" -#include "regexp-macro-assembler.h" -#include "stub-cache.h" -#include "runtime.h" +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/regexp-macro-assembler.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { void FastNewClosureStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rbx }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry; + Register registers[] = { rsi, rbx }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry); } void FastNewContextStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rdi }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { rsi, rdi }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void ToNumberStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rax }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { rsi, rax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void NumberToStringStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rax }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry; + Register registers[] = { rsi, rax }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry); } void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rax, rbx, rcx }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId( - Runtime::kHiddenCreateArrayLiteralStubBailout)->entry; + Register registers[] = { rsi, rax, rbx, rcx }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Smi(), + Representation::Tagged() }; + + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry, + representations); } void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rax, rbx, rcx, rdx }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry; + Register registers[] = { rsi, rax, rbx, rcx, rdx }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry); } void CreateAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rbx, rdx }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { rsi, rbx, rdx }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } -void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void CallFunctionStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rdx, rax }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); + Register registers[] = {rsi, rdi}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } -void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void CallConstructStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rdx, rax }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); + // rax : number of arguments + // rbx : feedback vector + // rdx : (only if rbx is not the megamorphic symbol) slot in feedback + // vector (Smi) + // rdi : constructor function + // TODO(turbofan): So far we don't gather type feedback and hence skip the + // slot parameter, but ArrayConstructStub needs the vector to be undefined. + Register registers[] = {rsi, rax, rdi, rbx}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); } void RegExpConstructResultStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rcx, rbx, rax }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry; -} - - -void LoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rax }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedLoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rdx }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void StringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rax, rcx }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; -} - - -void KeyedStringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rdx, rax }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = NULL; + Register registers[] = { rsi, rcx, rbx, rax }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry); } -void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, +void TransitionElementsKindStub::InitializeInterfaceDescriptor( CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rdx, rcx, rax }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); + Register registers[] = { rsi, rax, rbx }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry); } -void TransitionElementsKindStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rax, rbx }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; -} +const Register InterfaceDescriptor::ContextRegister() { return rsi; } static void InitializeArrayConstructorDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor, + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { // register state // rax -- number of arguments // rdi -- function // rbx -- allocation site with elements kind - static Register registers_variable_args[] = { rdi, rbx, rax }; - static Register registers_no_args[] = { rdi, rbx }; + Address deopt_handler = Runtime::FunctionForId( + Runtime::kArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers_no_args; + Register registers[] = { rsi, rdi, rbx }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); } else { // stack param count needs (constructor pointer, and single argument) - descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; - descriptor->stack_parameter_count_ = rax; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers_variable_args; + Register registers[] = { rsi, rdi, rbx, rax }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, rax, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); } - - descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; - descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry; } static void InitializeInternalArrayConstructorDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor, + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { // register state + // rsi -- context // rax -- number of arguments // rdi -- constructor function - static Register registers_variable_args[] = { rdi, rax }; - static Register registers_no_args[] = { rdi }; + Address deopt_handler = Runtime::FunctionForId( + Runtime::kInternalArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers_no_args; + Register registers[] = { rsi, rdi }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); } else { // stack param count needs (constructor pointer, and single argument) - descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; - descriptor->stack_parameter_count_ = rax; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers_variable_args; + Register registers[] = { rsi, rdi, rax }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, rax, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); } - - descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; - descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry; } void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, -1); + InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1); } void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0); } void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1); } void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1); } void CompareNilICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rax }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(CompareNilIC_Miss); + Register registers[] = { rsi, rax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(CompareNilIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate())); } void ToBooleanStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rax }; - descriptor->register_param_count_ = 1; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(ToBooleanIC_Miss); + Register registers[] = { rsi, rax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(ToBooleanIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); -} - - -void StoreGlobalStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rdx, rcx, rax }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(StoreIC_MissFromStubFailure); -} - - -void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( - Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rax, rbx, rcx, rdx }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); + ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate())); } void BinaryOpICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rdx, rax }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); + Register registers[] = { rsi, rdx, rax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_Miss)); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate())); } void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rcx, rdx, rax }; - descriptor->register_param_count_ = 3; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite); + Register registers[] = { rsi, rcx, rdx, rax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite)); } void StringAddStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - static Register registers[] = { rdx, rax }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry; + Register registers[] = { rsi, rdx, rax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kStringAdd)->entry); } @@ -394,82 +269,72 @@ { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::ArgumentAdaptorCall); - static Register registers[] = { rdi, // JSFunction - rsi, // context - rax, // actual number of arguments - rbx, // expected number of arguments + Register registers[] = { rsi, // context + rdi, // JSFunction + rax, // actual number of arguments + rbx, // expected number of arguments }; - static Representation representations[] = { - Representation::Tagged(), // JSFunction + Representation representations[] = { Representation::Tagged(), // context + Representation::Tagged(), // JSFunction Representation::Integer32(), // actual number of arguments Representation::Integer32(), // expected number of arguments }; - descriptor->register_param_count_ = 4; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::KeyedCall); - static Register registers[] = { rsi, // context - rcx, // key + Register registers[] = { rsi, // context + rcx, // key }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // key }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::NamedCall); - static Register registers[] = { rsi, // context - rcx, // name + Register registers[] = { rsi, // context + rcx, // name }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // name }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::CallHandler); - static Register registers[] = { rsi, // context - rdx, // receiver + Register registers[] = { rsi, // context + rdx, // receiver }; - static Representation representations[] = { + Representation representations[] = { Representation::Tagged(), // context Representation::Tagged(), // receiver }; - descriptor->register_param_count_ = 2; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } { CallInterfaceDescriptor* descriptor = isolate->call_descriptor(Isolate::ApiFunctionCall); - static Register registers[] = { rax, // callee - rbx, // call_data - rcx, // holder - rdx, // api_function_address - rsi, // context + Register registers[] = { rsi, // context + rax, // callee + rbx, // call_data + rcx, // holder + rdx, // api_function_address }; - static Representation representations[] = { + Representation representations[] = { + Representation::Tagged(), // context Representation::Tagged(), // callee Representation::Tagged(), // call_data Representation::Tagged(), // holder Representation::External(), // api_function_address - Representation::Tagged(), // context }; - descriptor->register_param_count_ = 5; - descriptor->register_params_ = registers; - descriptor->param_representations_ = representations; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); } } @@ -479,22 +344,22 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { // Update the static counter each time a new code stub is generated. - Isolate* isolate = masm->isolate(); - isolate->counters()->code_stubs()->Increment(); + isolate()->counters()->code_stubs()->Increment(); - CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); - int param_count = descriptor->register_param_count_; + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); + int param_count = descriptor->GetEnvironmentParameterCount(); { // Call the runtime system in a fresh internal frame. FrameScope scope(masm, StackFrame::INTERNAL); - ASSERT(descriptor->register_param_count_ == 0 || - rax.is(descriptor->register_params_[param_count - 1])); + DCHECK(param_count == 0 || + rax.is(descriptor->GetEnvironmentParameterRegister( + param_count - 1))); // Push arguments for (int i = 0; i < param_count; ++i) { - __ Push(descriptor->register_params_[i]); + __ Push(descriptor->GetEnvironmentParameterRegister(i)); } ExternalReference miss = descriptor->miss_handler(); - __ CallExternalReference(miss, descriptor->register_param_count_); + __ CallExternalReference(miss, param_count); } __ Ret(); @@ -506,11 +371,11 @@ const int argument_count = 1; __ PrepareCallCFunction(argument_count); __ LoadAddress(arg_reg_1, - ExternalReference::isolate_address(masm->isolate())); + ExternalReference::isolate_address(isolate())); AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction( - ExternalReference::store_buffer_overflow_function(masm->isolate()), + ExternalReference::store_buffer_overflow_function(isolate()), argument_count); __ PopCallerSaved(save_doubles_); __ ret(0); @@ -535,7 +400,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { Register input_reg = this->source(); Register final_result_reg = this->destination(); - ASSERT(is_truncating()); + DCHECK(is_truncating()); Label check_negative, process_64_bits, done; @@ -607,7 +472,7 @@ __ addp(rsp, Immediate(kDoubleSize)); } if (!final_result_reg.is(result_reg)) { - ASSERT(final_result_reg.is(rcx)); + DCHECK(final_result_reg.is(rcx)); __ movl(final_result_reg, result_reg); } __ popq(save_reg); @@ -871,11 +736,11 @@ __ Cvtlsi2sd(double_exponent, exponent); // Returning or bailing out. - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); if (exponent_type_ == ON_STACK) { // The arguments are still on the stack. __ bind(&call_runtime); - __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); + __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); // The stub is called from non-optimized code, which expects the result // as heap number in rax. @@ -888,12 +753,12 @@ __ bind(&call_runtime); // Move base to the correct argument register. Exponent is already in xmm1. __ movsd(xmm0, double_base); - ASSERT(double_exponent.is(xmm1)); + DCHECK(double_exponent.is(xmm1)); { AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(2); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), 2); + ExternalReference::power_double_double_function(isolate()), 2); } // Return value is in xmm0. __ movsd(double_result, xmm0); @@ -907,30 +772,13 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) { Label miss; - Register receiver; - if (kind() == Code::KEYED_LOAD_IC) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - __ Cmp(rax, masm->isolate()->factory()->prototype_string()); - __ j(not_equal, &miss); - receiver = rdx; - } else { - ASSERT(kind() == Code::LOAD_IC); - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - receiver = rax; - } + Register receiver = LoadIC::ReceiverRegister(); - StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, r9, &miss); + NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, + r9, &miss); __ bind(&miss); - StubCompiler::TailCallBuiltin( - masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); + PropertyAccessCompiler::TailCallBuiltin( + masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); } @@ -1000,7 +848,7 @@ // rbx: the mapped parameter count (untagged) // rax: the allocated object (tagged). - Factory* factory = masm->isolate()->factory(); + Factory* factory = isolate()->factory(); StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER); __ SmiToInteger64(rbx, args.GetArgumentOperand(2)); @@ -1058,35 +906,35 @@ // rax = address of new object(s) (tagged) // rcx = argument count (untagged) - // Get the arguments boilerplate from the current native context into rdi. - Label has_mapped_parameters, copy; + // Get the arguments map from the current native context into rdi. + Label has_mapped_parameters, instantiate; __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset)); __ testp(rbx, rbx); __ j(not_zero, &has_mapped_parameters, Label::kNear); - const int kIndex = Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX; + const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX; __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex))); - __ jmp(©, Label::kNear); + __ jmp(&instantiate, Label::kNear); - const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX; + const int kAliasedIndex = Context::ALIASED_ARGUMENTS_MAP_INDEX; __ bind(&has_mapped_parameters); __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex))); - __ bind(©); + __ bind(&instantiate); // rax = address of new object (tagged) // rbx = mapped parameter count (untagged) // rcx = argument count (untagged) - // rdi = address of boilerplate object (tagged) - // Copy the JS object part. - for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { - __ movp(rdx, FieldOperand(rdi, i)); - __ movp(FieldOperand(rax, i), rdx); - } + // rdi = address of arguments map (tagged) + __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi); + __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex); + __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister); + __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister); // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ movp(rdx, args.GetArgumentOperand(0)); + __ AssertNotSmi(rdx); __ movp(FieldOperand(rax, JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize), rdx); @@ -1204,7 +1052,7 @@ __ bind(&runtime); __ Integer32ToSmi(rcx, rcx); __ movp(args.GetArgumentOperand(2), rcx); // Patch argument count. - __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } @@ -1231,7 +1079,7 @@ __ movp(args.GetArgumentOperand(1), rdx); __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } @@ -1276,18 +1124,16 @@ // Do the allocation of both objects in one go. __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); - // Get the arguments boilerplate from the current native context. + // Get the arguments map from the current native context. __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset)); - const int offset = - Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX); + const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX); __ movp(rdi, Operand(rdi, offset)); - // Copy the JS object part. - for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { - __ movp(rbx, FieldOperand(rdi, i)); - __ movp(FieldOperand(rax, i), rbx); - } + __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi); + __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex); + __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister); + __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister); // Get the length (smi tagged) and set that as an in-object property too. STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); @@ -1332,7 +1178,7 @@ // Do the runtime call to allocate the arguments object. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); } @@ -1341,7 +1187,7 @@ // time or if regexp entry in generated code is turned off runtime switch or // at compilation. #ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); #else // V8_INTERPRETED_REGEXP // Stack frame on entry. @@ -1363,11 +1209,10 @@ ARGUMENTS_DONT_CONTAIN_RECEIVER); Label runtime; // Ensure that a RegExp stack is allocated. - Isolate* isolate = masm->isolate(); ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address(isolate); + ExternalReference::address_of_regexp_stack_memory_address(isolate()); ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(isolate); + ExternalReference::address_of_regexp_stack_memory_size(isolate()); __ Load(kScratchRegister, address_of_regexp_stack_memory_size); __ testp(kScratchRegister, kScratchRegister); __ j(zero, &runtime); @@ -1481,8 +1326,8 @@ // (5b) Is subject external? If yes, go to (8). __ testb(rbx, Immediate(kStringRepresentationMask)); // The underlying external string is never a short external string. - STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); - STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); __ j(not_zero, &external_string); // Go to (8) // (6) One byte sequential. Load regexp code for one byte. @@ -1519,7 +1364,7 @@ // rcx: encoding of subject string (1 if ASCII 0 if two_byte); // r11: code // All checks done. Now push arguments for native regexp code. - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->regexp_entry_native(), 1); // Isolates: note we add an additional parameter here (isolate pointer). @@ -1530,7 +1375,7 @@ // Argument 9: Pass current isolate address. __ LoadAddress(kScratchRegister, - ExternalReference::isolate_address(masm->isolate())); + ExternalReference::isolate_address(isolate())); __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kRegisterSize), kScratchRegister); @@ -1556,8 +1401,8 @@ #endif // Argument 5: static offsets vector buffer. - __ LoadAddress(r8, - ExternalReference::address_of_static_offsets_vector(isolate)); + __ LoadAddress( + r8, ExternalReference::address_of_static_offsets_vector(isolate())); // Argument 5 passed in r8 on Linux and on the stack on Windows. #ifdef _WIN64 __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kRegisterSize), r8); @@ -1682,8 +1527,8 @@ kDontSaveFPRegs); // Get the static offsets vector filled by the native regexp code. - __ LoadAddress(rcx, - ExternalReference::address_of_static_offsets_vector(isolate)); + __ LoadAddress( + rcx, ExternalReference::address_of_static_offsets_vector(isolate())); // rbx: last_match_info backing store (FixedArray) // rcx: offsets vector @@ -1716,7 +1561,7 @@ // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. ExternalReference pending_exception_address( - Isolate::kPendingExceptionAddress, isolate); + Isolate::kPendingExceptionAddress, isolate()); Operand pending_exception_operand = masm->ExternalOperand(pending_exception_address, rbx); __ movp(rax, pending_exception_operand); @@ -1735,7 +1580,7 @@ // Do the runtime call to execute the regexp. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); // Deferred code for string handling. // (7) Not a long external string? If yes, go to (10). @@ -1787,8 +1632,8 @@ static int NegativeComparisonResult(Condition cc) { - ASSERT(cc != equal); - ASSERT((cc == less) || (cc == less_equal) + DCHECK(cc != equal); + DCHECK((cc == less) || (cc == less_equal) || (cc == greater) || (cc == greater_equal)); return (cc == greater || cc == greater_equal) ? LESS : GREATER; } @@ -1829,7 +1674,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { Label check_unequal_objects, done; Condition cc = GetCondition(); - Factory* factory = masm->isolate()->factory(); + Factory* factory = isolate()->factory(); Label miss; CheckInputType(masm, rdx, left_, &miss); @@ -1978,7 +1823,7 @@ // If one of the numbers was NaN, then the result is always false. // The cc is never not-equal. __ bind(&unordered); - ASSERT(cc != not_equal); + DCHECK(cc != not_equal); if (cc == less || cc == less_equal) { __ Set(rax, 1); } else { @@ -2163,7 +2008,7 @@ __ Push(rdx); __ Push(rbx); - CreateAllocationSiteStub create_stub; + CreateAllocationSiteStub create_stub(isolate); __ CallStub(&create_stub); __ Pop(rbx); @@ -2197,55 +2042,100 @@ } -void CallFunctionStub::Generate(MacroAssembler* masm) { - // rbx : feedback vector - // rdx : (only if rbx is not the megamorphic symbol) slot in feedback - // vector (Smi) +static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { + // Do not transform the receiver for strict mode functions. + __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset), + Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); + __ j(not_equal, cont); + + // Do not transform the receiver for natives. + // SharedFunctionInfo is already loaded into rcx. + __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset), + Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); + __ j(not_equal, cont); +} + + +static void EmitSlowCase(Isolate* isolate, + MacroAssembler* masm, + StackArgumentsAccessor* args, + int argc, + Label* non_function) { + // Check for function proxy. + __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE); + __ j(not_equal, non_function); + __ PopReturnAddressTo(rcx); + __ Push(rdi); // put proxy as additional argument under return address + __ PushReturnAddressFrom(rcx); + __ Set(rax, argc + 1); + __ Set(rbx, 0); + __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY); + { + Handle<Code> adaptor = + masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + __ jmp(adaptor, RelocInfo::CODE_TARGET); + } + + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ bind(non_function); + __ movp(args->GetReceiverOperand(), rdi); + __ Set(rax, argc); + __ Set(rbx, 0); + __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); + Handle<Code> adaptor = + isolate->builtins()->ArgumentsAdaptorTrampoline(); + __ Jump(adaptor, RelocInfo::CODE_TARGET); +} + + +static void EmitWrapCase(MacroAssembler* masm, + StackArgumentsAccessor* args, + Label* cont) { + // Wrap the receiver and patch it back onto the stack. + { FrameScope frame_scope(masm, StackFrame::INTERNAL); + __ Push(rdi); + __ Push(rax); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ Pop(rdi); + } + __ movp(args->GetReceiverOperand(), rax); + __ jmp(cont); +} + + +static void CallFunctionNoFeedback(MacroAssembler* masm, + int argc, bool needs_checks, + bool call_as_method) { // rdi : the function to call + + // wrap_and_call can only be true if we are compiling a monomorphic method. Isolate* isolate = masm->isolate(); Label slow, non_function, wrap, cont; - StackArgumentsAccessor args(rsp, argc_); + StackArgumentsAccessor args(rsp, argc); - if (NeedsChecks()) { + if (needs_checks) { // Check that the function really is a JavaScript function. __ JumpIfSmi(rdi, &non_function); // Goto slow case if we do not have a function. __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); __ j(not_equal, &slow); - - if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); - // Type information was updated. Because we may call Array, which - // expects either undefined or an AllocationSite in rbx we need - // to set rbx to undefined. - __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); - } } // Fast-case: Just invoke the function. - ParameterCount actual(argc_); + ParameterCount actual(argc); - if (CallAsMethod()) { - if (NeedsChecks()) { - // Do not transform the receiver for strict mode functions. - __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset), - Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); - __ j(not_equal, &cont); - - // Do not transform the receiver for natives. - // SharedFunctionInfo is already loaded into rcx. - __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset), - Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); - __ j(not_equal, &cont); + if (call_as_method) { + if (needs_checks) { + EmitContinueIfStrictOrNative(masm, &cont); } - // Load the receiver from the stack. __ movp(rax, args.GetReceiverOperand()); - if (NeedsChecks()) { + if (needs_checks) { __ JumpIfSmi(rax, &wrap); __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); @@ -2256,63 +2146,27 @@ __ bind(&cont); } + __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper()); - if (NeedsChecks()) { + if (needs_checks) { // Slow-case: Non-function called. __ bind(&slow); - if (RecordCallTarget()) { - // If there is a call target cache, mark it megamorphic in the - // non-function case. MegamorphicSentinel is an immortal immovable - // object (megamorphic symbol) so no write barrier is needed. - __ SmiToInteger32(rdx, rdx); - __ Move(FieldOperand(rbx, rdx, times_pointer_size, - FixedArray::kHeaderSize), - TypeFeedbackInfo::MegamorphicSentinel(isolate)); - __ Integer32ToSmi(rdx, rdx); - } - // Check for function proxy. - __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE); - __ j(not_equal, &non_function); - __ PopReturnAddressTo(rcx); - __ Push(rdi); // put proxy as additional argument under return address - __ PushReturnAddressFrom(rcx); - __ Set(rax, argc_ + 1); - __ Set(rbx, 0); - __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY); - { - Handle<Code> adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); - __ jmp(adaptor, RelocInfo::CODE_TARGET); - } - - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ bind(&non_function); - __ movp(args.GetReceiverOperand(), rdi); - __ Set(rax, argc_); - __ Set(rbx, 0); - __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); - Handle<Code> adaptor = - isolate->builtins()->ArgumentsAdaptorTrampoline(); - __ Jump(adaptor, RelocInfo::CODE_TARGET); + EmitSlowCase(isolate, masm, &args, argc, &non_function); } - if (CallAsMethod()) { + if (call_as_method) { __ bind(&wrap); - // Wrap the receiver and patch it back onto the stack. - { FrameScope frame_scope(masm, StackFrame::INTERNAL); - __ Push(rdi); - __ Push(rax); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ Pop(rdi); - } - __ movp(args.GetReceiverOperand(), rax); - __ jmp(&cont); + EmitWrapCase(masm, &args, &cont); } } +void CallFunctionStub::Generate(MacroAssembler* masm) { + CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod()); +} + + void CallConstructStub::Generate(MacroAssembler* masm) { // rax : number of arguments // rbx : feedback vector @@ -2374,11 +2228,170 @@ __ bind(&do_call); // Set expected number of arguments to zero (not changing rax). __ Set(rbx, 0); - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); } +static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { + __ movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); + __ movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset)); + __ movp(vector, FieldOperand(vector, + SharedFunctionInfo::kFeedbackVectorOffset)); +} + + +void CallIC_ArrayStub::Generate(MacroAssembler* masm) { + // rdi - function + // rdx - slot id (as integer) + Label miss; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, rbx); + __ SmiToInteger32(rdx, rdx); + + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx); + __ cmpp(rdi, rcx); + __ j(not_equal, &miss); + + __ movp(rax, Immediate(arg_count())); + __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size, + FixedArray::kHeaderSize)); + // Verify that ecx contains an AllocationSite + Factory* factory = masm->isolate()->factory(); + __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), + factory->allocation_site_map()); + __ j(not_equal, &miss); + + __ movp(rbx, rcx); + ArrayConstructorStub stub(masm->isolate(), arg_count()); + __ TailCallStub(&stub); + + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Customization_Miss); + + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, + arg_count(), + true, + CallAsMethod()); + + // Unreachable. + __ int3(); +} + + +void CallICStub::Generate(MacroAssembler* masm) { + // rdi - function + // rbx - vector + // rdx - slot id + Isolate* isolate = masm->isolate(); + Label extra_checks_or_miss, slow_start; + Label slow, non_function, wrap, cont; + Label have_js_function; + int argc = state_.arg_count(); + StackArgumentsAccessor args(rsp, argc); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, rbx); + + // The checks. First, does rdi match the recorded monomorphic target? + __ SmiToInteger32(rdx, rdx); + __ cmpp(rdi, FieldOperand(rbx, rdx, times_pointer_size, + FixedArray::kHeaderSize)); + __ j(not_equal, &extra_checks_or_miss); + + __ bind(&have_js_function); + if (state_.CallAsMethod()) { + EmitContinueIfStrictOrNative(masm, &cont); + + // Load the receiver from the stack. + __ movp(rax, args.GetReceiverOperand()); + + __ JumpIfSmi(rax, &wrap); + + __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); + __ j(below, &wrap); + + __ bind(&cont); + } + + __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper()); + + __ bind(&slow); + EmitSlowCase(isolate, masm, &args, argc, &non_function); + + if (state_.CallAsMethod()) { + __ bind(&wrap); + EmitWrapCase(masm, &args, &cont); + } + + __ bind(&extra_checks_or_miss); + Label miss; + + __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size, + FixedArray::kHeaderSize)); + __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate)); + __ j(equal, &slow_start); + __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate)); + __ j(equal, &miss); + + if (!FLAG_trace_ic) { + // We are going megamorphic. If the feedback is a JSFunction, it is fine + // to handle it here. More complex cases are dealt with in the runtime. + __ AssertNotSmi(rcx); + __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx); + __ j(not_equal, &miss); + __ Move(FieldOperand(rbx, rdx, times_pointer_size, + FixedArray::kHeaderSize), + TypeFeedbackInfo::MegamorphicSentinel(isolate)); + __ jmp(&slow_start); + } + + // We are here because tracing is on or we are going monomorphic. + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Miss); + + // the slow case + __ bind(&slow_start); + // Check that function is not a smi. + __ JumpIfSmi(rdi, &non_function); + // Check that function is a JSFunction. + __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); + __ j(not_equal, &slow); + __ jmp(&have_js_function); + + // Unreachable + __ int3(); +} + + +void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) { + // Get the receiver of the function from the stack; 1 ~ return address. + __ movp(rcx, Operand(rsp, (state_.arg_count() + 1) * kPointerSize)); + + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Push the receiver and the function and feedback info. + __ Push(rcx); + __ Push(rdi); + __ Push(rbx); + __ Integer32ToSmi(rdx, rdx); + __ Push(rdx); + + // Call the entry. + ExternalReference miss = ExternalReference(IC_Utility(id), + masm->isolate()); + __ CallExternalReference(miss, 4); + + // Move result to edi and exit the internal frame. + __ movp(rdi, rax); + } +} + + bool CEntryStub::NeedsImmovableCode() { return false; } @@ -2401,26 +2414,35 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { - CEntryStub stub(1, kDontSaveFPRegs); - stub.GetCode(isolate); - CEntryStub save_doubles(1, kSaveFPRegs); - save_doubles.GetCode(isolate); + CEntryStub stub(isolate, 1, kDontSaveFPRegs); + stub.GetCode(); + CEntryStub save_doubles(isolate, 1, kSaveFPRegs); + save_doubles.GetCode(); } -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - bool do_gc, - bool always_allocate_scope) { - // rax: result parameter for PerformGC, if any. - // rbx: pointer to C function (C callee-saved). - // rbp: frame pointer (restored after C call). - // rsp: stack pointer (restored after C call). +void CEntryStub::Generate(MacroAssembler* masm) { + // rax: number of arguments including receiver + // rbx: pointer to C function (C callee-saved) + // rbp: frame pointer of calling JS frame (restored after C call) + // rsp: stack pointer (restored after C call) + // rsi: current context (restored) + + ProfileEntryHookStub::MaybeCallEntryHook(masm); + + // Enter the exit frame that transitions from JavaScript to C++. +#ifdef _WIN64 + int arg_stack_space = (result_size_ < 2 ? 2 : 4); +#else + int arg_stack_space = 0; +#endif + __ EnterExitFrame(arg_stack_space, save_doubles_); + + // rbx: pointer to builtin function (C callee-saved). + // rbp: frame pointer of exit frame (restored after C call). + // rsp: stack pointer (restored after C call). // r14: number of arguments including receiver (C callee-saved). - // r15: pointer to the first argument (C callee-saved). - // This pointer is reused in LeaveExitFrame(), so it is stored in a - // callee-saved register. + // r15: argv pointer (C callee-saved). // Simple results returned in rax (both AMD64 and Win64 calling conventions). // Complex results must be written to address passed as first argument. @@ -2431,25 +2453,6 @@ __ CheckStackAlignment(); } - if (do_gc) { - // Pass failure code returned from last attempt as first argument to - // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the - // stack is known to be aligned. This function takes one argument which is - // passed in register. - __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate())); - __ movp(arg_reg_1, rax); - __ Move(kScratchRegister, - ExternalReference::perform_gc_function(masm->isolate())); - __ call(kScratchRegister); - } - - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(masm->isolate()); - if (always_allocate_scope) { - Operand scope_depth_operand = masm->ExternalOperand(scope_depth); - __ incl(scope_depth_operand); - } - // Call C function. #ifdef _WIN64 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. @@ -2460,38 +2463,30 @@ // Return result in single register (rax). __ movp(rcx, r14); // argc. __ movp(rdx, r15); // argv. - __ Move(r8, ExternalReference::isolate_address(masm->isolate())); + __ Move(r8, ExternalReference::isolate_address(isolate())); } else { - ASSERT_EQ(2, result_size_); + DCHECK_EQ(2, result_size_); // Pass a pointer to the result location as the first argument. __ leap(rcx, StackSpaceOperand(2)); // Pass a pointer to the Arguments object as the second argument. __ movp(rdx, r14); // argc. __ movp(r8, r15); // argv. - __ Move(r9, ExternalReference::isolate_address(masm->isolate())); + __ Move(r9, ExternalReference::isolate_address(isolate())); } #else // _WIN64 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9. __ movp(rdi, r14); // argc. __ movp(rsi, r15); // argv. - __ Move(rdx, ExternalReference::isolate_address(masm->isolate())); + __ Move(rdx, ExternalReference::isolate_address(isolate())); #endif __ call(rbx); // Result is in rax - do not destroy this register! - if (always_allocate_scope) { - Operand scope_depth_operand = masm->ExternalOperand(scope_depth); - __ decl(scope_depth_operand); - } - - // Check for failure result. - Label failure_returned; - STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); #ifdef _WIN64 // If return value is on the stack, pop it to registers. if (result_size_ > 1) { - ASSERT_EQ(2, result_size_); + DCHECK_EQ(2, result_size_); // Read result values stored on stack. Result is stored // above the four argument mirror slots and the two // Arguments object slots. @@ -2499,121 +2494,65 @@ __ movq(rdx, Operand(rsp, 7 * kRegisterSize)); } #endif - __ leap(rcx, Operand(rax, 1)); - // Lower 2 bits of rcx are 0 iff rax has failure tag. - __ testl(rcx, Immediate(kFailureTagMask)); - __ j(zero, &failure_returned); + + // Runtime functions should not return 'the hole'. Allowing it to escape may + // lead to crashes in the IC code later. + if (FLAG_debug_code) { + Label okay; + __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); + __ j(not_equal, &okay, Label::kNear); + __ int3(); + __ bind(&okay); + } + + // Check result for exception sentinel. + Label exception_returned; + __ CompareRoot(rax, Heap::kExceptionRootIndex); + __ j(equal, &exception_returned); + + ExternalReference pending_exception_address( + Isolate::kPendingExceptionAddress, isolate()); + + // Check that there is no pending exception, otherwise we + // should have returned the exception sentinel. + if (FLAG_debug_code) { + Label okay; + __ LoadRoot(r14, Heap::kTheHoleValueRootIndex); + Operand pending_exception_operand = + masm->ExternalOperand(pending_exception_address); + __ cmpp(r14, pending_exception_operand); + __ j(equal, &okay, Label::kNear); + __ int3(); + __ bind(&okay); + } // Exit the JavaScript to C++ exit frame. __ LeaveExitFrame(save_doubles_); __ ret(0); - // Handling of failure. - __ bind(&failure_returned); - - Label retry; - // If the returned exception is RETRY_AFTER_GC continue at retry label - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); - __ j(zero, &retry, Label::kNear); + // Handling of exception. + __ bind(&exception_returned); // Retrieve the pending exception. - ExternalReference pending_exception_address( - Isolate::kPendingExceptionAddress, masm->isolate()); Operand pending_exception_operand = masm->ExternalOperand(pending_exception_address); __ movp(rax, pending_exception_operand); // Clear the pending exception. - pending_exception_operand = - masm->ExternalOperand(pending_exception_address); __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex); __ movp(pending_exception_operand, rdx); // Special handling of termination exceptions which are uncatchable // by javascript code. + Label throw_termination_exception; __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex); - __ j(equal, throw_termination_exception); + __ j(equal, &throw_termination_exception); // Handle normal exception. - __ jmp(throw_normal_exception); - - // Retry. - __ bind(&retry); -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // rax: number of arguments including receiver - // rbx: pointer to C function (C callee-saved) - // rbp: frame pointer of calling JS frame (restored after C call) - // rsp: stack pointer (restored after C call) - // rsi: current context (restored) - - // NOTE: Invocations of builtins may return failure objects - // instead of a proper result. The builtin entry handles - // this by performing a garbage collection and retrying the - // builtin once. - - ProfileEntryHookStub::MaybeCallEntryHook(masm); - - // Enter the exit frame that transitions from JavaScript to C++. -#ifdef _WIN64 - int arg_stack_space = (result_size_ < 2 ? 2 : 4); -#else - int arg_stack_space = 0; -#endif - __ EnterExitFrame(arg_stack_space, save_doubles_); - - // rax: Holds the context at this point, but should not be used. - // On entry to code generated by GenerateCore, it must hold - // a failure result if the collect_garbage argument to GenerateCore - // is true. This failure result can be the result of code - // generated by a previous call to GenerateCore. The value - // of rax is then passed to Runtime::PerformGC. - // rbx: pointer to builtin function (C callee-saved). - // rbp: frame pointer of exit frame (restored after C call). - // rsp: stack pointer (restored after C call). - // r14: number of arguments including receiver (C callee-saved). - // r15: argv pointer (C callee-saved). - - Label throw_normal_exception; - Label throw_termination_exception; - - // Call into the runtime system. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - false, - false); - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - true, - false); - - // Do full GC and retry runtime call one final time. - Failure* failure = Failure::InternalError(); - __ Move(rax, failure, Assembler::RelocInfoNone()); - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - true, - true); - - { FrameScope scope(masm, StackFrame::MANUAL); - __ PrepareCallCFunction(0); - __ CallCFunction( - ExternalReference::out_of_memory_function(masm->isolate()), 0); - } + __ Throw(rax); __ bind(&throw_termination_exception); __ ThrowUncatchable(rax); - - __ bind(&throw_normal_exception); - __ Throw(rax); } @@ -2669,17 +2608,15 @@ __ InitializeRootRegister(); } - Isolate* isolate = masm->isolate(); - // Save copies of the top frame descriptor on the stack. - ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate); + ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate()); { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp); __ Push(c_entry_fp_operand); } // If this is the outermost JS call, set js_entry_sp value. - ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); + ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); __ Load(rax, js_entry_sp); __ testp(rax, rax); __ j(not_zero, ¬_outermost_js); @@ -2700,9 +2637,9 @@ // Caught exception: Store result (exception) in the pending exception // field in the JSEnv and return a failure sentinel. ExternalReference pending_exception(Isolate::kPendingExceptionAddress, - isolate); + isolate()); __ Store(pending_exception, rax); - __ Move(rax, Failure::Exception(), Assembler::RelocInfoNone()); + __ LoadRoot(rax, Heap::kExceptionRootIndex); __ jmp(&exit); // Invoke: Link this frame into the handler chain. There's only one @@ -2724,10 +2661,10 @@ // at the time this code is generated. if (is_construct) { ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, - isolate); + isolate()); __ Load(rax, construct_entry); } else { - ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); + ExternalReference entry(Builtins::kJSEntryTrampoline, isolate()); __ Load(rax, entry); } __ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize)); @@ -2799,97 +2736,109 @@ // is and instance of the function and anything else to // indicate that the value is not an instance. + // Fixed register usage throughout the stub. + Register object = rax; // Object (lhs). + Register map = rbx; // Map of the object. + Register function = rdx; // Function (rhs). + Register prototype = rdi; // Prototype of the function. + Register scratch = rcx; + static const int kOffsetToMapCheckValue = 2; - static const int kOffsetToResultValue = 18; + static const int kOffsetToResultValue = kPointerSize == kInt64Size ? 18 : 14; // The last 4 bytes of the instruction sequence - // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset)) + // movp(rdi, FieldOperand(rax, HeapObject::kMapOffset)) // Move(kScratchRegister, Factory::the_hole_value()) // in front of the hole value address. - static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78; + static const unsigned int kWordBeforeMapCheckValue = + kPointerSize == kInt64Size ? 0xBA49FF78 : 0xBA41FF78; // The last 4 bytes of the instruction sequence // __ j(not_equal, &cache_miss); // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); // before the offset of the hole value in the root array. - static const unsigned int kWordBeforeResultValue = 0x458B4906; - // Only the inline check flag is supported on X64. - ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck()); + static const unsigned int kWordBeforeResultValue = + kPointerSize == kInt64Size ? 0x458B4906 : 0x458B4106; + int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0; - // Get the object - go slow case if it's a smi. + DCHECK_EQ(object.code(), InstanceofStub::left().code()); + DCHECK_EQ(function.code(), InstanceofStub::right().code()); + + // Get the object and function - they are always both needed. + // Go slow case if the object is a smi. Label slow; StackArgumentsAccessor args(rsp, 2 + extra_argument_offset, ARGUMENTS_DONT_CONTAIN_RECEIVER); - __ movp(rax, args.GetArgumentOperand(0)); - __ JumpIfSmi(rax, &slow); + if (!HasArgsInRegisters()) { + __ movp(object, args.GetArgumentOperand(0)); + __ movp(function, args.GetArgumentOperand(1)); + } + __ JumpIfSmi(object, &slow); // Check that the left hand is a JS object. Leave its map in rax. - __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax); + __ CmpObjectType(object, FIRST_SPEC_OBJECT_TYPE, map); __ j(below, &slow); - __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE); + __ CmpInstanceType(map, LAST_SPEC_OBJECT_TYPE); __ j(above, &slow); - // Get the prototype of the function. - __ movp(rdx, args.GetArgumentOperand(1)); - // rdx is function, rax is map. - // If there is a call site cache don't look in the global cache, but do the // real lookup and update the call site cache. - if (!HasCallSiteInlineCheck()) { + if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) { // Look up the function and the map in the instanceof cache. Label miss; - __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); + __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); __ j(not_equal, &miss, Label::kNear); - __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex); + __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex); __ j(not_equal, &miss, Label::kNear); __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); - __ ret(2 * kPointerSize); + __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); __ bind(&miss); } - __ TryGetFunctionPrototype(rdx, rbx, &slow, true); + // Get the prototype of the function. + __ TryGetFunctionPrototype(function, prototype, &slow, true); // Check that the function prototype is a JS object. - __ JumpIfSmi(rbx, &slow); - __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister); + __ JumpIfSmi(prototype, &slow); + __ CmpObjectType(prototype, FIRST_SPEC_OBJECT_TYPE, kScratchRegister); __ j(below, &slow); __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE); __ j(above, &slow); - // Register mapping: - // rax is object map. - // rdx is function. - // rbx is function prototype. + // Update the global instanceof or call site inlined cache with the current + // map and function. The cached answer will be set when it is known below. if (!HasCallSiteInlineCheck()) { - __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); - __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); + __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); + __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); } else { + // The constants for the code patching are based on push instructions + // at the call site. + DCHECK(!HasArgsInRegisters()); // Get return address and delta to inlined map check. __ movq(kScratchRegister, StackOperandForReturnAddress(0)); __ subp(kScratchRegister, args.GetArgumentOperand(2)); if (FLAG_debug_code) { - __ movl(rdi, Immediate(kWordBeforeMapCheckValue)); - __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi); + __ movl(scratch, Immediate(kWordBeforeMapCheckValue)); + __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), scratch); __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck); } __ movp(kScratchRegister, Operand(kScratchRegister, kOffsetToMapCheckValue)); - __ movp(Operand(kScratchRegister, 0), rax); + __ movp(Operand(kScratchRegister, 0), map); } - __ movp(rcx, FieldOperand(rax, Map::kPrototypeOffset)); - // Loop through the prototype chain looking for the function prototype. + __ movp(scratch, FieldOperand(map, Map::kPrototypeOffset)); Label loop, is_instance, is_not_instance; __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex); __ bind(&loop); - __ cmpp(rcx, rbx); + __ cmpp(scratch, prototype); __ j(equal, &is_instance, Label::kNear); - __ cmpp(rcx, kScratchRegister); + __ cmpp(scratch, kScratchRegister); // The code at is_not_instance assumes that kScratchRegister contains a // non-zero GCable value (the null object in this case). __ j(equal, &is_not_instance, Label::kNear); - __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset)); - __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset)); + __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); + __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset)); __ jmp(&loop); __ bind(&is_instance); @@ -2898,12 +2847,15 @@ // Store bitwise zero in the cache. This is a Smi in GC terms. STATIC_ASSERT(kSmiTag == 0); __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); + if (ReturnTrueFalseObject()) { + __ LoadRoot(rax, Heap::kTrueValueRootIndex); + } } else { // Store offset of true in the root array at the inline check site. int true_offset = 0x100 + (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; // Assert it is a 1-byte signed value. - ASSERT(true_offset >= 0 && true_offset < 0x100); + DCHECK(true_offset >= 0 && true_offset < 0x100); __ movl(rax, Immediate(true_offset)); __ movq(kScratchRegister, StackOperandForReturnAddress(0)); __ subp(kScratchRegister, args.GetArgumentOperand(2)); @@ -2913,20 +2865,26 @@ __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax); __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov); } - __ Set(rax, 0); + if (!ReturnTrueFalseObject()) { + __ Set(rax, 0); + } } - __ ret((2 + extra_argument_offset) * kPointerSize); + __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) * + kPointerSize); __ bind(&is_not_instance); if (!HasCallSiteInlineCheck()) { // We have to store a non-zero value in the cache. __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); + if (ReturnTrueFalseObject()) { + __ LoadRoot(rax, Heap::kFalseValueRootIndex); + } } else { // Store offset of false in the root array at the inline check site. int false_offset = 0x100 + (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; // Assert it is a 1-byte signed value. - ASSERT(false_offset >= 0 && false_offset < 0x100); + DCHECK(false_offset >= 0 && false_offset < 0x100); __ movl(rax, Immediate(false_offset)); __ movq(kScratchRegister, StackOperandForReturnAddress(0)); __ subp(kScratchRegister, args.GetArgumentOperand(2)); @@ -2937,25 +2895,48 @@ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov); } } - __ ret((2 + extra_argument_offset) * kPointerSize); + __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) * + kPointerSize); // Slow-case: Go through the JavaScript implementation. __ bind(&slow); - if (HasCallSiteInlineCheck()) { - // Remove extra value from the stack. - __ PopReturnAddressTo(rcx); - __ Pop(rax); - __ PushReturnAddressFrom(rcx); + if (!ReturnTrueFalseObject()) { + // Tail call the builtin which returns 0 or 1. + DCHECK(!HasArgsInRegisters()); + if (HasCallSiteInlineCheck()) { + // Remove extra value from the stack. + __ PopReturnAddressTo(rcx); + __ Pop(rax); + __ PushReturnAddressFrom(rcx); + } + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); + } else { + // Call the builtin and convert 0/1 to true/false. + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(object); + __ Push(function); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + } + Label true_value, done; + __ testq(rax, rax); + __ j(zero, &true_value, Label::kNear); + __ LoadRoot(rax, Heap::kFalseValueRootIndex); + __ jmp(&done, Label::kNear); + __ bind(&true_value); + __ LoadRoot(rax, Heap::kTrueValueRootIndex); + __ bind(&done); + __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) * + kPointerSize); } - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } // Passing arguments in registers is not supported. -Register InstanceofStub::left() { return no_reg; } +Register InstanceofStub::left() { return rax; } -Register InstanceofStub::right() { return no_reg; } +Register InstanceofStub::right() { return rdx; } // ------------------------------------------------------------------------- @@ -3014,9 +2995,9 @@ if (index_flags_ == STRING_INDEX_IS_NUMBER) { __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); } else { - ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kHiddenNumberToSmi, 1); + __ CallRuntime(Runtime::kNumberToSmi, 1); } if (!index_.is(rax)) { // Save the conversion result before the pop instructions below @@ -3041,7 +3022,7 @@ __ Push(object_); __ Integer32ToSmi(index_, index_); __ Push(index_); - __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2); + __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); if (!result_.is(rax)) { __ movp(result_, rax); } @@ -3090,49 +3071,22 @@ } -void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, - Register dest, - Register src, - Register count, - bool ascii) { - // Copy characters using rep movs of doublewords. Align destination on 4 byte - // boundary before starting rep movs. Copy remaining characters after running - // rep movs. - // Count is positive int32, dest and src are character pointers. - ASSERT(dest.is(rdi)); // rep movs destination - ASSERT(src.is(rsi)); // rep movs source - ASSERT(count.is(rcx)); // rep movs count - +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + String::Encoding encoding) { // Nothing to do for zero characters. Label done; __ testl(count, count); __ j(zero, &done, Label::kNear); // Make count the number of bytes to copy. - if (!ascii) { + if (encoding == String::TWO_BYTE_ENCODING) { STATIC_ASSERT(2 == sizeof(uc16)); __ addl(count, count); } - // Don't enter the rep movs if there are less than 4 bytes to copy. - Label last_bytes; - __ testl(count, Immediate(~(kPointerSize - 1))); - __ j(zero, &last_bytes, Label::kNear); - - // Copy from edi to esi using rep movs instruction. - __ movl(kScratchRegister, count); - __ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy. - __ repmovsp(); - - // Find number of bytes left. - __ movl(count, kScratchRegister); - __ andp(count, Immediate(kPointerSize - 1)); - - // Check if there are more bytes to copy. - __ bind(&last_bytes); - __ testl(count, count); - __ j(zero, &done, Label::kNear); - // Copy remaining characters. Label loop; __ bind(&loop); @@ -3248,7 +3202,7 @@ // Longer than original string's length or negative: unsafe arguments. __ j(above, &runtime); // Return original string. - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize); __ bind(¬_original_string); @@ -3352,7 +3306,7 @@ // Handle external string. // Rule out short external strings. - STATIC_CHECK(kShortExternalStringTag != 0); + STATIC_ASSERT(kShortExternalStringTag != 0); __ testb(rbx, Immediate(kShortExternalStringMask)); __ j(not_zero, &runtime); __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset)); @@ -3370,10 +3324,9 @@ // rax: result string // rcx: result string length - __ movp(r14, rsi); // esi used by following code. { // Locate character of sub string start. SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1); - __ leap(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale, + __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale, SeqOneByteString::kHeaderSize - kHeapObjectTag)); } // Locate first character of result. @@ -3381,11 +3334,10 @@ // rax: result string // rcx: result length - // rdi: first character of result + // r14: first character of result // rsi: character of sub string start - // r14: original value of rsi - StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true); - __ movp(rsi, r14); // Restore rsi. + StringHelper::GenerateCopyCharacters( + masm, rdi, r14, rcx, String::ONE_BYTE_ENCODING); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize); @@ -3395,10 +3347,9 @@ // rax: result string // rcx: result string length - __ movp(r14, rsi); // esi used by following code. { // Locate character of sub string start. SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2); - __ leap(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale, + __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale, SeqOneByteString::kHeaderSize - kHeapObjectTag)); } // Locate first character of result. @@ -3407,16 +3358,15 @@ // rax: result string // rcx: result length // rdi: first character of result - // rsi: character of sub string start - // r14: original value of rsi - StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false); - __ movp(rsi, r14); // Restore esi. + // r14: character of sub string start + StringHelper::GenerateCopyCharacters( + masm, rdi, r14, rcx, String::TWO_BYTE_ENCODING); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize); // Just jump to runtime to create the sub string. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1); + __ TailCallRuntime(Runtime::kSubString, 3, 1); __ bind(&single_char); // rax: string @@ -3593,7 +3543,7 @@ __ cmpp(rdx, rax); __ j(not_equal, ¬_same, Label::kNear); __ Move(rax, Smi::FromInt(EQUAL)); - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->string_compare_native(), 1); __ ret(2 * kPointerSize); @@ -3613,203 +3563,7 @@ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ bind(&runtime); - __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); -} - - -void ArrayPushStub::Generate(MacroAssembler* masm) { - int argc = arguments_count(); - - StackArgumentsAccessor args(rsp, argc); - if (argc == 0) { - // Noop, return the length. - __ movp(rax, FieldOperand(rdx, JSArray::kLengthOffset)); - __ ret((argc + 1) * kPointerSize); - return; - } - - Isolate* isolate = masm->isolate(); - - if (argc != 1) { - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - Label call_builtin, attempt_to_grow_elements, with_write_barrier; - - // Get the elements array of the object. - __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset)); - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - // Check that the elements are in fast mode and writable. - __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset), - isolate->factory()->fixed_array_map()); - __ j(not_equal, &call_builtin); - } - - // Get the array's length into rax and calculate new length. - __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset)); - STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue); - __ addl(rax, Immediate(argc)); - - // Get the elements' length into rcx. - __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset)); - - // Check if we could survive without allocation. - __ cmpl(rax, rcx); - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - __ j(greater, &attempt_to_grow_elements); - - // Check if value is a smi. - __ movp(rcx, args.GetArgumentOperand(1)); - __ JumpIfNotSmi(rcx, &with_write_barrier); - - // Store the value. - __ movp(FieldOperand(rdi, - rax, - times_pointer_size, - FixedArray::kHeaderSize - argc * kPointerSize), - rcx); - } else { - __ j(greater, &call_builtin); - - __ movp(rcx, args.GetArgumentOperand(1)); - __ StoreNumberToDoubleElements( - rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize); - } - - // Save new length. - __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); - - __ Integer32ToSmi(rax, rax); // Return new length as smi. - __ ret((argc + 1) * kPointerSize); - - if (IsFastDoubleElementsKind(elements_kind())) { - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ bind(&with_write_barrier); - - if (IsFastSmiElementsKind(elements_kind())) { - if (FLAG_trace_elements_transitions) __ jmp(&call_builtin); - - __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), - isolate->factory()->heap_number_map()); - __ j(equal, &call_builtin); - - ElementsKind target_kind = IsHoleyElementsKind(elements_kind()) - ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; - __ movp(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX)); - __ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset)); - __ movp(rbx, ContextOperand(rbx, Context::JS_ARRAY_MAPS_INDEX)); - const int header_size = FixedArrayBase::kHeaderSize; - // Verify that the object can be transitioned in place. - const int origin_offset = header_size + elements_kind() * kPointerSize; - __ movp(rdi, FieldOperand(rbx, origin_offset)); - __ cmpp(rdi, FieldOperand(rdx, HeapObject::kMapOffset)); - __ j(not_equal, &call_builtin); - - const int target_offset = header_size + target_kind * kPointerSize; - __ movp(rbx, FieldOperand(rbx, target_offset)); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - masm, DONT_TRACK_ALLOCATION_SITE, NULL); - __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset)); - } - - // Save new length. - __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); - - // Store the value. - __ leap(rdx, FieldOperand(rdi, - rax, times_pointer_size, - FixedArray::kHeaderSize - argc * kPointerSize)); - __ movp(Operand(rdx, 0), rcx); - - __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - - __ Integer32ToSmi(rax, rax); // Return new length as smi. - __ ret((argc + 1) * kPointerSize); - - __ bind(&attempt_to_grow_elements); - if (!FLAG_inline_new) { - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ movp(rbx, args.GetArgumentOperand(1)); - // Growing elements that are SMI-only requires special handling in case the - // new element is non-Smi. For now, delegate to the builtin. - Label no_fast_elements_check; - __ JumpIfSmi(rbx, &no_fast_elements_check); - __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset)); - __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar); - __ bind(&no_fast_elements_check); - - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate); - ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate); - - const int kAllocationDelta = 4; - ASSERT(kAllocationDelta >= argc); - // Load top. - __ Load(rcx, new_space_allocation_top); - - // Check if it's the end of elements. - __ leap(rdx, FieldOperand(rdi, - rax, times_pointer_size, - FixedArray::kHeaderSize - argc * kPointerSize)); - __ cmpp(rdx, rcx); - __ j(not_equal, &call_builtin); - __ addp(rcx, Immediate(kAllocationDelta * kPointerSize)); - Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit); - __ cmpp(rcx, limit_operand); - __ j(above, &call_builtin); - - // We fit and could grow elements. - __ Store(new_space_allocation_top, rcx); - - // Push the argument... - __ movp(Operand(rdx, 0), rbx); - // ... and fill the rest with holes. - __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); - for (int i = 1; i < kAllocationDelta; i++) { - __ movp(Operand(rdx, i * kPointerSize), kScratchRegister); - } - - if (IsFastObjectElementsKind(elements_kind())) { - // We know the elements array is in new space so we don't need the - // remembered set, but we just pushed a value onto it so we may have to tell - // the incremental marker to rescan the object that we just grew. We don't - // need to worry about the holes because they are in old space and already - // marked black. - __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET); - } - - // Restore receiver to rdx as finish sequence assumes it's here. - __ movp(rdx, args.GetReceiverOperand()); - - // Increment element's and array's sizes. - __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset), - Smi::FromInt(kAllocationDelta)); - - // Make new length a smi before returning it. - __ Integer32ToSmi(rax, rax); - __ movp(FieldOperand(rdx, JSArray::kLengthOffset), rax); - - __ ret((argc + 1) * kPointerSize); - - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } @@ -3819,31 +3573,30 @@ // -- rax : right // -- rsp[0] : return address // ----------------------------------- - Isolate* isolate = masm->isolate(); // Load rcx with the allocation site. We stick an undefined dummy value here // and replace it with the real allocation site later when we instantiate this // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). - __ Move(rcx, handle(isolate->heap()->undefined_value())); + __ Move(rcx, handle(isolate()->heap()->undefined_value())); // Make sure that we actually patched the allocation site. if (FLAG_debug_code) { __ testb(rcx, Immediate(kSmiTagMask)); __ Assert(not_equal, kExpectedAllocationSite); __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), - isolate->factory()->allocation_site_map()); + isolate()->factory()->allocation_site_map()); __ Assert(equal, kExpectedAllocationSite); } // Tail call into the stub that handles binary operations with allocation // sites. - BinaryOpWithAllocationSiteStub stub(state_); + BinaryOpWithAllocationSiteStub stub(isolate(), state_); __ TailCallStub(&stub); } void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMI); + DCHECK(state_ == CompareIC::SMI); Label miss; __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear); @@ -3867,7 +3620,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::NUMBER); + DCHECK(state_ == CompareIC::NUMBER); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; @@ -3883,7 +3636,7 @@ // Load left and right operand. Label done, left, left_smi, right_smi; __ JumpIfSmi(rax, &right_smi, Label::kNear); - __ CompareMap(rax, masm->isolate()->factory()->heap_number_map()); + __ CompareMap(rax, isolate()->factory()->heap_number_map()); __ j(not_equal, &maybe_undefined1, Label::kNear); __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); __ jmp(&left, Label::kNear); @@ -3893,7 +3646,7 @@ __ bind(&left); __ JumpIfSmi(rdx, &left_smi, Label::kNear); - __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map()); + __ CompareMap(rdx, isolate()->factory()->heap_number_map()); __ j(not_equal, &maybe_undefined2, Label::kNear); __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); __ jmp(&done); @@ -3918,13 +3671,13 @@ __ bind(&unordered); __ bind(&generic_stub); - ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC, CompareIC::GENERIC); - __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { - __ Cmp(rax, masm->isolate()->factory()->undefined_value()); + __ Cmp(rax, isolate()->factory()->undefined_value()); __ j(not_equal, &miss); __ JumpIfSmi(rdx, &unordered); __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx); @@ -3934,7 +3687,7 @@ __ bind(&maybe_undefined2); if (Token::IsOrderedRelationalCompareOp(op_)) { - __ Cmp(rdx, masm->isolate()->factory()->undefined_value()); + __ Cmp(rdx, isolate()->factory()->undefined_value()); __ j(equal, &unordered); } @@ -3944,8 +3697,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::INTERNALIZED_STRING); - ASSERT(GetCondition() == equal); + DCHECK(state_ == CompareIC::INTERNALIZED_STRING); + DCHECK(GetCondition() == equal); // Registers containing left and right operands respectively. Register left = rdx; @@ -3973,7 +3726,7 @@ __ cmpp(left, right); // Make sure rax is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(rax)); + DCHECK(right.is(rax)); __ j(not_equal, &done, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); @@ -3987,8 +3740,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::UNIQUE_NAME); - ASSERT(GetCondition() == equal); + DCHECK(state_ == CompareIC::UNIQUE_NAME); + DCHECK(GetCondition() == equal); // Registers containing left and right operands respectively. Register left = rdx; @@ -4016,7 +3769,7 @@ __ cmpp(left, right); // Make sure rax is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(rax)); + DCHECK(right.is(rax)); __ j(not_equal, &done, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); @@ -4030,7 +3783,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRING); + DCHECK(state_ == CompareIC::STRING); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -4081,7 +3834,7 @@ __ j(not_zero, &do_compare, Label::kNear); // Make sure rax is non-zero. At this point input operands are // guaranteed to be non-zero. - ASSERT(right.is(rax)); + DCHECK(right.is(rax)); __ ret(0); __ bind(&do_compare); } @@ -4108,7 +3861,7 @@ if (equality) { __ TailCallRuntime(Runtime::kStringEquals, 2, 1); } else { - __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } __ bind(&miss); @@ -4117,7 +3870,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECT); + DCHECK(state_ == CompareIC::OBJECT); Label miss; Condition either_smi = masm->CheckEitherSmi(rdx, rax); __ j(either_smi, &miss, Label::kNear); @@ -4127,7 +3880,7 @@ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx); __ j(not_equal, &miss, Label::kNear); - ASSERT(GetCondition() == equal); + DCHECK(GetCondition() == equal); __ subp(rax, rdx); __ ret(0); @@ -4160,7 +3913,7 @@ { // Call the runtime system in a fresh internal frame. ExternalReference miss = - ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); + ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate()); FrameScope scope(masm, StackFrame::INTERNAL); __ Push(rdx); @@ -4187,7 +3940,7 @@ Register properties, Handle<Name> name, Register r0) { - ASSERT(name->IsUniqueName()); + DCHECK(name->IsUniqueName()); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the // undefined value), it guarantees the hash table doesn't contain the @@ -4204,12 +3957,12 @@ Immediate(name->Hash() + NameDictionary::GetProbeOffset(i))); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ leap(index, Operand(index, index, times_2, 0)); // index *= 3. Register entity_name = r0; // Having undefined at this place means the name is not contained. - ASSERT_EQ(kSmiTagSize, 1); + DCHECK_EQ(kSmiTagSize, 1); __ movp(entity_name, Operand(properties, index, times_pointer_size, @@ -4233,7 +3986,8 @@ __ bind(&good); } - NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0, + NEGATIVE_LOOKUP); __ Push(Handle<Object>(name)); __ Push(Immediate(name->Hash())); __ CallStub(&stub); @@ -4254,10 +4008,10 @@ Register name, Register r0, Register r1) { - ASSERT(!elements.is(r0)); - ASSERT(!elements.is(r1)); - ASSERT(!name.is(r0)); - ASSERT(!name.is(r1)); + DCHECK(!elements.is(r0)); + DCHECK(!elements.is(r1)); + DCHECK(!name.is(r0)); + DCHECK(!name.is(r1)); __ AssertName(name); @@ -4274,7 +4028,7 @@ __ andp(r1, r0); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3 // Check if the key is identical to the name. @@ -4283,7 +4037,8 @@ __ j(equal, done); } - NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), elements, r0, r1, + POSITIVE_LOOKUP); __ Push(name); __ movl(r0, FieldOperand(name, Name::kHashFieldOffset)); __ shrl(r0, Immediate(Name::kHashShift)); @@ -4335,7 +4090,7 @@ __ andp(scratch, Operand(rsp, 0)); // Scale the index by multiplying by the entry size. - ASSERT(NameDictionary::kEntrySize == 3); + DCHECK(NameDictionary::kEntrySize == 3); __ leap(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3. // Having undefined at this place means the name is not contained. @@ -4344,7 +4099,7 @@ times_pointer_size, kElementsStartOffset - kHeapObjectTag)); - __ Cmp(scratch, masm->isolate()->factory()->undefined_value()); + __ Cmp(scratch, isolate()->factory()->undefined_value()); __ j(equal, ¬_in_dictionary); // Stop if found the property. @@ -4387,15 +4142,10 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { - StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode(isolate); - StoreBufferOverflowStub stub2(kSaveFPRegs); - stub2.GetCode(isolate); -} - - -bool CodeStub::CanUseFPRegisters() { - return true; // Always have SSE2 on x64. + StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs); + stub1.GetCode(); + StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); + stub2.GetCode(); } @@ -4482,21 +4232,20 @@ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); Register address = arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address(); - ASSERT(!address.is(regs_.object())); - ASSERT(!address.is(arg_reg_1)); + DCHECK(!address.is(regs_.object())); + DCHECK(!address.is(arg_reg_1)); __ Move(address, regs_.address()); __ Move(arg_reg_1, regs_.object()); // TODO(gc) Can we just set address arg2 in the beginning? __ Move(arg_reg_2, address); __ LoadAddress(arg_reg_3, - ExternalReference::isolate_address(masm->isolate())); + ExternalReference::isolate_address(isolate())); int argument_count = 3; AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count); __ CallCFunction( - ExternalReference::incremental_marking_record_write_function( - masm->isolate()), + ExternalReference::incremental_marking_record_write_function(isolate()), argument_count); regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); } @@ -4674,8 +4423,8 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { - CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); - __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + CEntryStub ces(isolate(), 1, kSaveFPRegs); + __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); int parameter_count_offset = StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; __ movp(rbx, MemOperand(rbp, parameter_count_offset)); @@ -4691,7 +4440,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { - ProfileEntryHookStub stub; + ProfileEntryHookStub stub(masm->isolate()); masm->CallStub(&stub); } } @@ -4716,7 +4465,7 @@ masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2); // Call the entry hook function. - __ Move(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()), + __ Move(rax, FUNCTION_ADDR(isolate()->function_entry_hook()), Assembler::RelocInfoNone()); AllowExternalCallThatCantCauseGC scope(masm); @@ -4738,7 +4487,7 @@ static void CreateArrayDispatch(MacroAssembler* masm, AllocationSiteOverrideMode mode) { if (mode == DISABLE_ALLOCATION_SITES) { - T stub(GetInitialFastElementsKind(), mode); + T stub(masm->isolate(), GetInitialFastElementsKind(), mode); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { int last_index = GetSequenceIndexFromFastElementsKind( @@ -4748,7 +4497,7 @@ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmpl(rdx, Immediate(kind)); __ j(not_equal, &next); - T stub(kind); + T stub(masm->isolate(), kind); __ TailCallStub(&stub); __ bind(&next); } @@ -4775,12 +4524,12 @@ Label normal_sequence; if (mode == DONT_OVERRIDE) { - ASSERT(FAST_SMI_ELEMENTS == 0); - ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); - ASSERT(FAST_ELEMENTS == 2); - ASSERT(FAST_HOLEY_ELEMENTS == 3); - ASSERT(FAST_DOUBLE_ELEMENTS == 4); - ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + DCHECK(FAST_SMI_ELEMENTS == 0); + DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1); + DCHECK(FAST_ELEMENTS == 2); + DCHECK(FAST_HOLEY_ELEMENTS == 3); + DCHECK(FAST_DOUBLE_ELEMENTS == 4); + DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5); // is the low bit set? If so, we are holey and that is good. __ testb(rdx, Immediate(1)); @@ -4797,12 +4546,14 @@ ElementsKind initial = GetInitialFastElementsKind(); ElementsKind holey_initial = GetHoleyElementsKind(initial); - ArraySingleArgumentConstructorStub stub_holey(holey_initial, + ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), + holey_initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub_holey); __ bind(&normal_sequence); - ArraySingleArgumentConstructorStub stub(initial, + ArraySingleArgumentConstructorStub stub(masm->isolate(), + initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { @@ -4832,7 +4583,7 @@ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmpl(rdx, Immediate(kind)); __ j(not_equal, &next); - ArraySingleArgumentConstructorStub stub(kind); + ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); __ TailCallStub(&stub); __ bind(&next); } @@ -4851,11 +4602,11 @@ TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= to_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); - T stub(kind); - stub.GetCode(isolate); + T stub(isolate, kind); + stub.GetCode(); if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { - T stub1(kind, DISABLE_ALLOCATION_SITES); - stub1.GetCode(isolate); + T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); + stub1.GetCode(); } } } @@ -4876,12 +4627,12 @@ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; for (int i = 0; i < 2; i++) { // For internal arrays we only need a few things - InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); - stubh1.GetCode(isolate); - InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); - stubh2.GetCode(isolate); - InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); - stubh3.GetCode(isolate); + InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); + stubh1.GetCode(); + InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); + stubh2.GetCode(); + InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); + stubh3.GetCode(); } } @@ -4964,7 +4715,7 @@ __ testp(rax, rax); __ j(not_zero, ¬_zero_case); - InternalArrayNoArgumentConstructorStub stub0(kind); + InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); __ TailCallStub(&stub0); __ bind(¬_zero_case); @@ -4980,16 +4731,16 @@ __ j(zero, &normal_sequence); InternalArraySingleArgumentConstructorStub - stub1_holey(GetHoleyElementsKind(kind)); + stub1_holey(isolate(), GetHoleyElementsKind(kind)); __ TailCallStub(&stub1_holey); } __ bind(&normal_sequence); - InternalArraySingleArgumentConstructorStub stub1(kind); + InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); __ TailCallStub(&stub1); __ bind(¬_one_case); - InternalArrayNArgumentsConstructorStub stubN(kind); + InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); __ TailCallStub(&stubN); } @@ -5023,8 +4774,7 @@ // but the following masking takes care of that anyway. __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset)); // Retrieve elements_kind from bit field 2. - __ andp(rcx, Immediate(Map::kElementsKindMask)); - __ shr(rcx, Immediate(Map::kElementsKindShift)); + __ DecodeField<Map::ElementsKindBits>(rcx); if (FLAG_debug_code) { Label done; @@ -5105,7 +4855,7 @@ __ Push(scratch); // isolate __ Move(scratch, - ExternalReference::isolate_address(masm->isolate())); + ExternalReference::isolate_address(isolate())); __ Push(scratch); // holder __ Push(holder); @@ -5138,12 +4888,13 @@ // It's okay if api_function_address == callback_arg // but not arguments_arg - ASSERT(!api_function_address.is(arguments_arg)); + DCHECK(!api_function_address.is(arguments_arg)); // v8::InvocationCallback's argument. __ leap(arguments_arg, StackSpaceOperand(0)); - Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(isolate()); // Accessor for FunctionCallbackInfo and first js arg. StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1, @@ -5155,7 +4906,7 @@ is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset); __ CallApiFunctionAndReturn( api_function_address, - thunk_address, + thunk_ref, callback_arg, argc + FCA::kArgsLength + 1, return_value_operand, @@ -5202,11 +4953,12 @@ // could be used to pass arguments. __ leap(accessor_info_arg, StackSpaceOperand(0)); - Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); + ExternalReference thunk_ref = + ExternalReference::invoke_accessor_getter_callback(isolate()); // It's okay if api_function_address == getter_arg // but not accessor_info_arg or name_arg - ASSERT(!api_function_address.is(accessor_info_arg) && + DCHECK(!api_function_address.is(accessor_info_arg) && !api_function_address.is(name_arg)); // The name handler is counted as an argument. @@ -5215,7 +4967,7 @@ PropertyCallbackArguments::kArgsLength - 1 - PropertyCallbackArguments::kReturnValueOffset); __ CallApiFunctionAndReturn(api_function_address, - thunk_address, + thunk_ref, getter_arg, kStackSpace, return_value_operand, diff -Nru nodejs-0.11.13/deps/v8/src/x64/code-stubs-x64.h nodejs-0.11.15/deps/v8/src/x64/code-stubs-x64.h --- nodejs-0.11.13/deps/v8/src/x64/code-stubs-x64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/code-stubs-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_X64_CODE_STUBS_X64_H_ #define V8_X64_CODE_STUBS_X64_H_ -#include "ic-inl.h" +#include "src/ic-inl.h" namespace v8 { namespace internal { @@ -38,8 +15,8 @@ class StoreBufferOverflowStub: public PlatformCodeStub { public: - explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) { } + StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp) + : PlatformCodeStub(isolate), save_doubles_(save_fp) { } void Generate(MacroAssembler* masm); @@ -49,8 +26,8 @@ private: SaveFPRegsMode save_doubles_; - Major MajorKey() { return StoreBufferOverflow; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } + Major MajorKey() const { return StoreBufferOverflow; } + int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } }; @@ -59,11 +36,11 @@ // Generate code for copying characters using the rep movs instruction. // Copies rcx characters from rsi to rdi. Copying of overlapping regions is // not supported. - static void GenerateCopyCharactersREP(MacroAssembler* masm, - Register dest, // Must be rdi. - Register src, // Must be rsi. - Register count, // Must be rcx. - bool ascii); + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + String::Encoding encoding); // Generate string hash. @@ -86,11 +63,11 @@ class SubStringStub: public PlatformCodeStub { public: - SubStringStub() {} + explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {} private: - Major MajorKey() { return SubString; } - int MinorKey() { return 0; } + Major MajorKey() const { return SubString; } + int MinorKey() const { return 0; } void Generate(MacroAssembler* masm); }; @@ -98,7 +75,7 @@ class StringCompareStub: public PlatformCodeStub { public: - StringCompareStub() {} + explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {} // Compares two flat ASCII strings and returns result in rax. static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, @@ -118,8 +95,8 @@ Register scratch2); private: - virtual Major MajorKey() { return StringCompare; } - virtual int MinorKey() { return 0; } + virtual Major MajorKey() const { return StringCompare; } + virtual int MinorKey() const { return 0; } virtual void Generate(MacroAssembler* masm); static void GenerateAsciiCharsCompareLoop( @@ -137,11 +114,16 @@ public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - NameDictionaryLookupStub(Register dictionary, + NameDictionaryLookupStub(Isolate* isolate, + Register dictionary, Register result, Register index, LookupMode mode) - : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { } + : PlatformCodeStub(isolate), + dictionary_(dictionary), + result_(result), + index_(index), + mode_(mode) { } void Generate(MacroAssembler* masm); @@ -174,9 +156,9 @@ NameDictionary::kHeaderSize + NameDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return NameDictionaryLookup; } + Major MajorKey() const { return NameDictionaryLookup; } - int MinorKey() { + int MinorKey() const { return DictionaryBits::encode(dictionary_.code()) | ResultBits::encode(result_.code()) | IndexBits::encode(index_.code()) | @@ -197,12 +179,14 @@ class RecordWriteStub: public PlatformCodeStub { public: - RecordWriteStub(Register object, + RecordWriteStub(Isolate* isolate, + Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) - : object_(object), + : PlatformCodeStub(isolate), + object_(object), value_(value), address_(address), remembered_set_action_(remembered_set_action), @@ -234,13 +218,13 @@ return INCREMENTAL; } - ASSERT(first_instruction == kTwoByteNopInstruction); + DCHECK(first_instruction == kTwoByteNopInstruction); if (second_instruction == kFiveByteJumpInstruction) { return INCREMENTAL_COMPACTION; } - ASSERT(second_instruction == kFiveByteNopInstruction); + DCHECK(second_instruction == kFiveByteNopInstruction); return STORE_BUFFER_ONLY; } @@ -248,23 +232,23 @@ static void Patch(Code* stub, Mode mode) { switch (mode) { case STORE_BUFFER_ONLY: - ASSERT(GetMode(stub) == INCREMENTAL || + DCHECK(GetMode(stub) == INCREMENTAL || GetMode(stub) == INCREMENTAL_COMPACTION); stub->instruction_start()[0] = kTwoByteNopInstruction; stub->instruction_start()[2] = kFiveByteNopInstruction; break; case INCREMENTAL: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); stub->instruction_start()[0] = kTwoByteJumpInstruction; break; case INCREMENTAL_COMPACTION: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); stub->instruction_start()[0] = kTwoByteNopInstruction; stub->instruction_start()[2] = kFiveByteJumpInstruction; break; } - ASSERT(GetMode(stub) == mode); - CPU::FlushICache(stub->instruction_start(), 7); + DCHECK(GetMode(stub) == mode); + CpuFeatures::FlushICache(stub->instruction_start(), 7); } private: @@ -282,7 +266,7 @@ object_(object), address_(address), scratch0_(scratch0) { - ASSERT(!AreAliased(scratch0, object, address, no_reg)); + DCHECK(!AreAliased(scratch0, object, address, no_reg)); scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_); if (scratch0.is(rcx)) { scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_); @@ -293,15 +277,15 @@ if (address.is(rcx)) { address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_); } - ASSERT(!AreAliased(scratch0_, object_, address_, rcx)); + DCHECK(!AreAliased(scratch0_, object_, address_, rcx)); } void Save(MacroAssembler* masm) { - ASSERT(!address_orig_.is(object_)); - ASSERT(object_.is(object_orig_) || address_.is(address_orig_)); - ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); - ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_)); - ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_)); + DCHECK(!address_orig_.is(object_)); + DCHECK(object_.is(object_orig_) || address_.is(address_orig_)); + DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_)); + DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_)); + DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_)); // We don't have to save scratch0_orig_ because it was given to us as // a scratch register. But if we had to switch to a different reg then // we should save the new scratch0_. @@ -403,9 +387,9 @@ Mode mode); void InformIncrementalMarker(MacroAssembler* masm); - Major MajorKey() { return RecordWrite; } + Major MajorKey() const { return RecordWrite; } - int MinorKey() { + int MinorKey() const { return ObjectBits::encode(object_.code()) | ValueBits::encode(value_.code()) | AddressBits::encode(address_.code()) | diff -Nru nodejs-0.11.13/deps/v8/src/x64/cpu-x64.cc nodejs-0.11.15/deps/v8/src/x64/cpu-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/cpu-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/cpu-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,57 +1,24 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // CPU specific code for x64 independent of OS goes here. #if defined(__GNUC__) && !defined(__MINGW64__) -#include "third_party/valgrind/valgrind.h" +#include "src/third_party/valgrind/valgrind.h" #endif -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "cpu.h" -#include "macro-assembler.h" +#include "src/assembler.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { -void CPU::SetUp() { - CpuFeatures::Probe(); -} - - -bool CPU::SupportsCrankshaft() { - return true; // Yay! -} - - -void CPU::FlushICache(void* start, size_t size) { +void CpuFeatures::FlushICache(void* start, size_t size) { // No need to flush the instruction cache on Intel. On Intel instruction // cache flushing is only necessary when multiple cores running the same // code simultaneously. V8 (and JavaScript) is single threaded and when code diff -Nru nodejs-0.11.13/deps/v8/src/x64/debug-x64.cc nodejs-0.11.15/deps/v8/src/x64/debug-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/debug-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/debug-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,44 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "assembler.h" -#include "codegen.h" -#include "debug.h" +#include "src/assembler.h" +#include "src/codegen.h" +#include "src/debug.h" namespace v8 { namespace internal { -#ifdef ENABLE_DEBUGGER_SUPPORT - bool BreakLocationIterator::IsDebugBreakAtReturn() { return Debug::IsDebugBreakAtReturn(rinfo()); } @@ -48,9 +23,9 @@ // CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x64.cc // for the precise return instructions sequence. void BreakLocationIterator::SetDebugBreakAtReturn() { - ASSERT(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength); + DCHECK(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength); rinfo()->PatchCodeWithCall( - debug_info_->GetIsolate()->debug()->debug_break_return()->entry(), + debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(), Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength); } @@ -65,33 +40,31 @@ // A debug break in the frame exit code is identified by the JS frame exit code // having been patched with a call instruction. bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) { - ASSERT(RelocInfo::IsJSReturn(rinfo->rmode())); + DCHECK(RelocInfo::IsJSReturn(rinfo->rmode())); return rinfo->IsPatchedReturnSequence(); } bool BreakLocationIterator::IsDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); // Check whether the debug break slot instructions have been patched. - return !Assembler::IsNop(rinfo()->pc()); + return rinfo()->IsPatchedDebugBreakSlotSequence(); } void BreakLocationIterator::SetDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); rinfo()->PatchCodeWithCall( - debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(), + debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry(), Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength); } void BreakLocationIterator::ClearDebugBreakAtSlot() { - ASSERT(IsDebugBreakSlot()); + DCHECK(IsDebugBreakSlot()); rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength); } -const bool Debug::FramePaddingLayout::kIsSupported = true; - #define __ ACCESS_MASM(masm) @@ -105,26 +78,26 @@ FrameScope scope(masm, StackFrame::INTERNAL); // Load padding words on stack. - for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) { - __ Push(Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue)); + for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) { + __ Push(Smi::FromInt(LiveEdit::kFramePaddingValue)); } - __ Push(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)); + __ Push(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)); // Store the registers containing live values on the expression stack to // make sure that these are correctly updated during GC. Non object values // are stored as as two smis causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); + DCHECK((object_regs & ~kJSCallerSaved) == 0); + DCHECK((non_object_regs & ~kJSCallerSaved) == 0); + DCHECK((object_regs & non_object_regs) == 0); for (int i = 0; i < kNumJSCallerSaved; i++) { int r = JSCallerSavedCode(i); Register reg = { r }; - ASSERT(!reg.is(kScratchRegister)); + DCHECK(!reg.is(kScratchRegister)); if ((object_regs & (1 << r)) != 0) { __ Push(reg); } if ((non_object_regs & (1 << r)) != 0) { - __ PushInt64AsTwoSmis(reg); + __ PushRegisterAsTwoSmis(reg); } } @@ -134,7 +107,7 @@ __ Set(rax, 0); // No arguments (argc == 0). __ Move(rbx, ExternalReference::debug_break(masm->isolate())); - CEntryStub ceb(1); + CEntryStub ceb(masm->isolate(), 1); __ CallStub(&ceb); // Restore the register values from the expression stack. @@ -149,7 +122,7 @@ } // Reconstruct the 64-bit value from two smis. if ((non_object_regs & (1 << r)) != 0) { - __ PopInt64AsTwoSmis(reg); + __ PopRegisterAsTwoSmis(reg); } } @@ -171,57 +144,57 @@ // jumping to the target address intended by the caller and that was // overwritten by the address of DebugBreakXXX. ExternalReference after_break_target = - ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate()); + ExternalReference::debug_after_break_target_address(masm->isolate()); __ Move(kScratchRegister, after_break_target); __ Jump(Operand(kScratchRegister, 0)); } -void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { - // Register state for IC load call (from ic-x64.cc). +void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) { + // Register state for CallICStub // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name + // -- rdx : type feedback slot (smi) + // -- rdi : function // ----------------------------------- - Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), 0, false); + Generate_DebugBreakCallHelper(masm, rdx.bit() | rdi.bit(), 0, false); +} + + +void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) { + // Register state for IC load call (from ic-x64.cc). + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0, false); } -void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) { // Register state for IC store call (from ic-x64.cc). - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // ----------------------------------- + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + Register value = StoreIC::ValueRegister(); Generate_DebugBreakCallHelper( - masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false); + masm, receiver.bit() | name.bit() | value.bit(), 0, false); } -void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { // Register state for keyed IC load call (from ic-x64.cc). - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), 0, false); + GenerateLoadICDebugBreak(masm); } -void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { - // Register state for keyed IC load call (from ic-x64.cc). - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // ----------------------------------- +void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { + // Register state for keyed IC store call (from ic-x64.cc). + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register name = KeyedStoreIC::NameRegister(); + Register value = KeyedStoreIC::ValueRegister(); Generate_DebugBreakCallHelper( - masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false); + masm, receiver.bit() | name.bit() | value.bit(), 0, false); } -void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { // Register state for CompareNil IC // ----------- S t a t e ------------- // -- rax : value @@ -230,16 +203,7 @@ } -void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { - // Register state for IC call call (from ic-x64.cc) - // ----------- S t a t e ------------- - // -- rcx: function name - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, rcx.bit(), 0, false); -} - - -void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) { // Register state just before return from JS function (from codegen-x64.cc). // ----------- S t a t e ------------- // -- rax: return value @@ -248,7 +212,7 @@ } -void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { // Register state for CallFunctionStub (from code-stubs-x64.cc). // ----------- S t a t e ------------- // -- rdi : function @@ -257,19 +221,7 @@ } -void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) { - // Register state for CallFunctionStub (from code-stubs-x64.cc). - // ----------- S t a t e ------------- - // -- rdi : function - // -- rbx: feedback array - // -- rdx: slot in feedback array - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(), - 0, false); -} - - -void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { // Register state for CallConstructStub (from code-stubs-x64.cc). // rax is the actual number of arguments not encoded as a smi, see comment // above IC call. @@ -281,7 +233,8 @@ } -void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateCallConstructStubRecordDebugBreak( + MacroAssembler* masm) { // Register state for CallConstructStub (from code-stubs-x64.cc). // rax is the actual number of arguments not encoded as a smi, see comment // above IC call. @@ -296,33 +249,33 @@ } -void Debug::GenerateSlot(MacroAssembler* masm) { +void DebugCodegen::GenerateSlot(MacroAssembler* masm) { // Generate enough nop's to make space for a call instruction. Label check_codesize; __ bind(&check_codesize); __ RecordDebugBreakSlot(); __ Nop(Assembler::kDebugBreakSlotLength); - ASSERT_EQ(Assembler::kDebugBreakSlotLength, + DCHECK_EQ(Assembler::kDebugBreakSlotLength, masm->SizeOfCodeGeneratedSince(&check_codesize)); } -void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { +void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) { // In the places where a debug break slot is inserted no registers can contain // object pointers. Generate_DebugBreakCallHelper(masm, 0, 0, true); } -void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { +void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { masm->ret(0); } -void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { +void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { ExternalReference restarter_frame_function_slot = - ExternalReference(Debug_Address::RestarterFrameFunctionPointer(), - masm->isolate()); + ExternalReference::debug_restarter_frame_function_pointer_address( + masm->isolate()); __ Move(rax, restarter_frame_function_slot); __ movp(Operand(rax, 0), Immediate(0)); @@ -344,12 +297,10 @@ __ jmp(rdx); } -const bool Debug::kFrameDropperSupported = true; +const bool LiveEdit::kFrameDropperSupported = true; #undef __ -#endif // ENABLE_DEBUGGER_SUPPORT - } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff -Nru nodejs-0.11.13/deps/v8/src/x64/deoptimizer-x64.cc nodejs-0.11.15/deps/v8/src/x64/deoptimizer-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/deoptimizer-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/deoptimizer-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "codegen.h" -#include "deoptimizer.h" -#include "full-codegen.h" -#include "safepoint-table.h" +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/safepoint-table.h" namespace v8 { namespace internal { @@ -83,9 +60,6 @@ #endif DeoptimizationInputData* deopt_data = DeoptimizationInputData::cast(code->deoptimization_data()); - SharedFunctionInfo* shared = - SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()); - shared->EvictFromOptimizedCodeMap(code, "deoptimized code"); deopt_data->SetSharedFunctionInfo(Smi::FromInt(0)); // For each LLazyBailout instruction insert a call to the corresponding // deoptimization entry. @@ -98,9 +72,9 @@ CodePatcher patcher(call_address, Assembler::kCallSequenceLength); patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY), Assembler::RelocInfoNone()); - ASSERT(prev_call_address == NULL || + DCHECK(prev_call_address == NULL || call_address >= prev_call_address + patch_size()); - ASSERT(call_address + patch_size() <= code->instruction_end()); + DCHECK(call_address + patch_size() <= code->instruction_end()); #ifdef DEBUG prev_call_address = call_address; #endif @@ -131,7 +105,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { intptr_t handler = - reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_); + reinterpret_cast<intptr_t>(descriptor->deoptimization_handler()); int params = descriptor->GetHandlerParameterCount(); output_frame->SetRegister(rax.code(), params); output_frame->SetRegister(rbx.code(), handler); @@ -152,11 +126,6 @@ } -Code* Deoptimizer::NotifyStubFailureBuiltin() { - return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); -} - - #define __ masm()-> void Deoptimizer::EntryGenerator::Generate() { @@ -230,7 +199,7 @@ // Fill in the input registers. for (int i = kNumberOfRegisters -1; i >= 0; i--) { int offset = (i * kPointerSize) + FrameDescription::registers_offset(); - __ Pop(Operand(rbx, offset)); + __ PopQuad(Operand(rbx, offset)); } // Fill in the double input registers. @@ -307,13 +276,13 @@ // Push state, pc, and continuation from the last output frame. __ Push(Operand(rbx, FrameDescription::state_offset())); - __ Push(Operand(rbx, FrameDescription::pc_offset())); - __ Push(Operand(rbx, FrameDescription::continuation_offset())); + __ PushQuad(Operand(rbx, FrameDescription::pc_offset())); + __ PushQuad(Operand(rbx, FrameDescription::continuation_offset())); // Push the registers from the last output frame. for (int i = 0; i < kNumberOfRegisters; i++) { int offset = (i * kPointerSize) + FrameDescription::registers_offset(); - __ Push(Operand(rbx, offset)); + __ PushQuad(Operand(rbx, offset)); } // Restore the registers from the stack. @@ -322,7 +291,7 @@ // Do not restore rsp, simply pop the value into the next register // and overwrite this afterwards. if (r.is(rsp)) { - ASSERT(i > 0); + DCHECK(i > 0); r = Register::from_code(i - 1); } __ popq(r); @@ -345,18 +314,26 @@ USE(start); __ pushq_imm32(i); __ jmp(&done); - ASSERT(masm()->pc_offset() - start == table_entry_size_); + DCHECK(masm()->pc_offset() - start == table_entry_size_); } __ bind(&done); } void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + if (kPCOnStackSize == 2 * kPointerSize) { + // Zero out the high-32 bit of PC for x32 port. + SetFrameSlot(offset + kPointerSize, 0); + } SetFrameSlot(offset, value); } void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + if (kFPOnStackSize == 2 * kPointerSize) { + // Zero out the high-32 bit of FP for x32 port. + SetFrameSlot(offset + kPointerSize, 0); + } SetFrameSlot(offset, value); } diff -Nru nodejs-0.11.13/deps/v8/src/x64/disasm-x64.cc nodejs-0.11.15/deps/v8/src/x64/disasm-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/disasm-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/disasm-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,40 +1,17 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <assert.h> -#include <stdio.h> #include <stdarg.h> +#include <stdio.h> -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "disasm.h" -#include "lazy-instance.h" +#include "src/base/lazy-instance.h" +#include "src/disasm.h" namespace disasm { @@ -239,7 +216,7 @@ OperandType op_order = bm[i].op_order_; id->op_order_ = static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG); - ASSERT_EQ(NO_INSTR, id->type); // Information not already entered + DCHECK_EQ(NO_INSTR, id->type); // Information not already entered id->type = type; id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0); } @@ -253,7 +230,7 @@ const char* mnem) { for (byte b = start; b <= end; b++) { InstructionDesc* id = &instructions_[b]; - ASSERT_EQ(NO_INSTR, id->type); // Information not already entered + DCHECK_EQ(NO_INSTR, id->type); // Information not already entered id->mnem = mnem; id->type = type; id->byte_size_operation = byte_size; @@ -264,14 +241,14 @@ void InstructionTable::AddJumpConditionalShort() { for (byte b = 0x70; b <= 0x7F; b++) { InstructionDesc* id = &instructions_[b]; - ASSERT_EQ(NO_INSTR, id->type); // Information not already entered + DCHECK_EQ(NO_INSTR, id->type); // Information not already entered id->mnem = NULL; // Computed depending on condition code. id->type = JUMP_CONDITIONAL_SHORT_INSTR; } } -static v8::internal::LazyInstance<InstructionTable>::type instruction_table = +static v8::base::LazyInstance<InstructionTable>::type instruction_table = LAZY_INSTANCE_INITIALIZER; @@ -351,7 +328,7 @@ const InstructionTable* const instruction_table_; void setRex(byte rex) { - ASSERT_EQ(0x40, rex & 0xF0); + DCHECK_EQ(0x40, rex & 0xF0); rex_ = rex; } @@ -453,7 +430,7 @@ v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_; va_list args; va_start(args, format); - int result = v8::internal::OS::VSNPrintF(buf, format, args); + int result = v8::internal::VSNPrintF(buf, format, args); va_end(args); tmp_buffer_pos_ += result; } @@ -684,7 +661,7 @@ // Returns number of bytes used, including *data. int DisassemblerX64::F6F7Instruction(byte* data) { - ASSERT(*data == 0xF7 || *data == 0xF6); + DCHECK(*data == 0xF7 || *data == 0xF6); byte modrm = *(data + 1); int mod, regop, rm; get_modrm(modrm, &mod, ®op, &rm); @@ -703,6 +680,9 @@ case 5: mnem = "imul"; break; + case 6: + mnem = "div"; + break; case 7: mnem = "idiv"; break; @@ -770,7 +750,7 @@ UnimplementedInstruction(); return num_bytes; } - ASSERT_NE(NULL, mnem); + DCHECK_NE(NULL, mnem); if (op == 0xD0) { imm8 = 1; } else if (op == 0xC0) { @@ -793,7 +773,7 @@ // Returns number of bytes used, including *data. int DisassemblerX64::JumpShort(byte* data) { - ASSERT_EQ(0xEB, *data); + DCHECK_EQ(0xEB, *data); byte b = *(data + 1); byte* dest = data + static_cast<int8_t>(b) + 2; AppendToBuffer("jmp %s", NameOfAddress(dest)); @@ -803,7 +783,7 @@ // Returns number of bytes used, including *data. int DisassemblerX64::JumpConditional(byte* data) { - ASSERT_EQ(0x0F, *data); + DCHECK_EQ(0x0F, *data); byte cond = *(data + 1) & 0x0F; byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6; const char* mnem = conditional_code_suffix[cond]; @@ -825,7 +805,7 @@ // Returns number of bytes used, including *data. int DisassemblerX64::SetCC(byte* data) { - ASSERT_EQ(0x0F, *data); + DCHECK_EQ(0x0F, *data); byte cond = *(data + 1) & 0x0F; const char* mnem = conditional_code_suffix[cond]; AppendToBuffer("set%s%c ", mnem, operand_size_code()); @@ -837,7 +817,7 @@ // Returns number of bytes used, including *data. int DisassemblerX64::FPUInstruction(byte* data) { byte escape_opcode = *data; - ASSERT_EQ(0xD8, escape_opcode & 0xF8); + DCHECK_EQ(0xD8, escape_opcode & 0xF8); byte modrm_byte = *(data+1); if (modrm_byte >= 0xC0) { @@ -1091,7 +1071,7 @@ current += PrintRightXMMOperand(current); } else if (opcode == 0x73) { current += 1; - ASSERT(regop == 6); + DCHECK(regop == 6); AppendToBuffer("psllq,%s,%d", NameOfXMMRegister(rm), *current & 0x7f); current += 1; } else { @@ -1811,19 +1791,19 @@ } int instr_len = static_cast<int>(data - instr); - ASSERT(instr_len > 0); // Ensure progress. + DCHECK(instr_len > 0); // Ensure progress. int outp = 0; // Instruction bytes. for (byte* bp = instr; bp < data; bp++) { - outp += v8::internal::OS::SNPrintF(out_buffer + outp, "%02x", *bp); + outp += v8::internal::SNPrintF(out_buffer + outp, "%02x", *bp); } for (int i = 6 - instr_len; i >= 0; i--) { - outp += v8::internal::OS::SNPrintF(out_buffer + outp, " "); + outp += v8::internal::SNPrintF(out_buffer + outp, " "); } - outp += v8::internal::OS::SNPrintF(out_buffer + outp, " %s", - tmp_buffer_.start()); + outp += v8::internal::SNPrintF(out_buffer + outp, " %s", + tmp_buffer_.start()); return instr_len; } @@ -1850,7 +1830,7 @@ const char* NameConverter::NameOfAddress(byte* addr) const { - v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr); + v8::internal::SNPrintF(tmp_buffer_, "%p", addr); return tmp_buffer_.start(); } diff -Nru nodejs-0.11.13/deps/v8/src/x64/frames-x64.cc nodejs-0.11.15/deps/v8/src/x64/frames-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/frames-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/frames-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "assembler.h" -#include "assembler-x64.h" -#include "assembler-x64-inl.h" -#include "frames.h" +#include "src/assembler.h" +#include "src/frames.h" +#include "src/x64/assembler-x64-inl.h" +#include "src/x64/assembler-x64.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/x64/frames-x64.h nodejs-0.11.15/deps/v8/src/x64/frames-x64.h --- nodejs-0.11.13/deps/v8/src/x64/frames-x64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/frames-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_X64_FRAMES_X64_H_ #define V8_X64_FRAMES_X64_H_ @@ -41,8 +18,6 @@ const int kNumJSCallerSaved = 5; -typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved]; - // Number of registers for which space is reserved in safepoints. const int kNumSafepointRegisters = 16; @@ -56,11 +31,12 @@ static const int kXMMRegistersBlockSize = kXMMRegisterSize * kCalleeSaveXMMRegisters; static const int kCallerFPOffset = - -10 * kPointerSize - kXMMRegistersBlockSize; + -3 * kPointerSize + -7 * kRegisterSize - kXMMRegistersBlockSize; #else - static const int kCallerFPOffset = -8 * kPointerSize; + // We have 3 Push and 5 pushq in the JSEntryStub::GenerateBody. + static const int kCallerFPOffset = -3 * kPointerSize + -5 * kRegisterSize; #endif - static const int kArgvOffset = 6 * kPointerSize; + static const int kArgvOffset = 6 * kPointerSize; }; @@ -132,6 +108,10 @@ inline void StackHandler::SetFp(Address slot, Address fp) { + if (kFPOnStackSize == 2 * kPointerSize) { + // Zero out the high-32 bit of FP for x32 port. + Memory::Address_at(slot + kPointerSize) = 0; + } Memory::Address_at(slot) = fp; } diff -Nru nodejs-0.11.13/deps/v8/src/x64/full-codegen-x64.cc nodejs-0.11.15/deps/v8/src/x64/full-codegen-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/full-codegen-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/full-codegen-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,20 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "code-stubs.h" -#include "codegen.h" -#include "compiler.h" -#include "debug.h" -#include "full-codegen.h" -#include "isolate-inl.h" -#include "parser.h" -#include "scopes.h" -#include "stub-cache.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/full-codegen.h" +#include "src/isolate-inl.h" +#include "src/parser.h" +#include "src/scopes.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -54,7 +31,7 @@ } ~JumpPatchSite() { - ASSERT(patch_site_.is_bound() == info_emitted_); + DCHECK(patch_site_.is_bound() == info_emitted_); } void EmitJumpIfNotSmi(Register reg, @@ -74,7 +51,7 @@ void EmitPatchInfo() { if (patch_site_.is_bound()) { int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_); - ASSERT(is_int8(delta_to_patch_site)); + DCHECK(is_uint8(delta_to_patch_site)); __ testl(rax, Immediate(delta_to_patch_site)); #ifdef DEBUG info_emitted_ = true; @@ -87,8 +64,8 @@ private: // jc will be patched with jz, jnc will become jnz. void EmitJump(Condition cc, Label* target, Label::Distance near_jump) { - ASSERT(!patch_site_.is_bound() && !info_emitted_); - ASSERT(cc == carry || cc == not_carry); + DCHECK(!patch_site_.is_bound() && !info_emitted_); + DCHECK(cc == carry || cc == not_carry); __ bind(&patch_site_); __ j(cc, target, near_jump); } @@ -101,23 +78,6 @@ }; -static void EmitStackCheck(MacroAssembler* masm_, - int pointers = 0, - Register scratch = rsp) { - Isolate* isolate = masm_->isolate(); - Label ok; - ASSERT(scratch.is(rsp) == (pointers == 0)); - if (pointers != 0) { - __ movq(scratch, rsp); - __ subq(scratch, Immediate(pointers * kPointerSize)); - } - __ CompareRoot(scratch, Heap::kStackLimitRootIndex); - __ j(above_equal, &ok, Label::kNear); - __ call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET); - __ bind(&ok); -} - - // Generate code for a JS function. On entry to the function the receiver // and arguments have been pushed on the stack left to right, with the // return address on top of them. The actual argument count matches the @@ -136,8 +96,6 @@ handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); - InitializeFeedbackVector(); - profiling_counter_ = isolate()->factory()->NewCell( Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); @@ -165,7 +123,7 @@ __ j(not_equal, &ok, Label::kNear); __ movp(rcx, GlobalObjectOperand()); - __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset)); + __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset)); __ movp(args.GetReceiverOperand(), rcx); @@ -178,24 +136,30 @@ FrameScope frame_scope(masm_, StackFrame::MANUAL); info->set_prologue_offset(masm_->pc_offset()); - __ Prologue(BUILD_FUNCTION_FRAME); + __ Prologue(info->IsCodePreAgingActive()); info->AddNoFrameRange(0, masm_->pc_offset()); { Comment cmnt(masm_, "[ Allocate locals"); int locals_count = info->scope()->num_stack_slots(); // Generators allocate locals, if any, in context slots. - ASSERT(!info->function()->is_generator() || locals_count == 0); + DCHECK(!info->function()->is_generator() || locals_count == 0); if (locals_count == 1) { __ PushRoot(Heap::kUndefinedValueRootIndex); } else if (locals_count > 1) { if (locals_count >= 128) { - EmitStackCheck(masm_, locals_count, rcx); + Label ok; + __ movp(rcx, rsp); + __ subp(rcx, Immediate(locals_count * kPointerSize)); + __ CompareRoot(rcx, Heap::kRealStackLimitRootIndex); + __ j(above_equal, &ok, Label::kNear); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ bind(&ok); } __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); const int kMaxPushes = 32; if (locals_count >= kMaxPushes) { int loop_iterations = locals_count / kMaxPushes; - __ movq(rcx, Immediate(loop_iterations)); + __ movp(rcx, Immediate(loop_iterations)); Label loop_header; __ bind(&loop_header); // Do pushes. @@ -203,7 +167,7 @@ __ Push(rdx); } // Continue loop if not done. - __ decq(rcx); + __ decp(rcx); __ j(not_zero, &loop_header, Label::kNear); } int remaining = locals_count % kMaxPushes; @@ -220,17 +184,20 @@ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment cmnt(masm_, "[ Allocate context"); + bool need_write_barrier = true; // Argument to NewContext is the function, which is still in rdi. if (FLAG_harmony_scoping && info->scope()->is_global_scope()) { __ Push(rdi); __ Push(info->scope()->GetScopeInfo()); - __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2); + __ CallRuntime(Runtime::kNewGlobalContext, 2); } else if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; } else { __ Push(rdi); - __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); } function_in_register = false; // Context is returned in rax. It replaces the context passed to us. @@ -251,8 +218,15 @@ int context_offset = Context::SlotOffset(var->index()); __ movp(Operand(rsi, context_offset), rax); // Update the write barrier. This clobbers rax and rbx. - __ RecordWriteContextSlot( - rsi, context_offset, rax, rbx, kDontSaveFPRegs); + if (need_write_barrier) { + __ RecordWriteContextSlot( + rsi, context_offset, rax, rbx, kDontSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } } } } @@ -287,7 +261,7 @@ } else { type = ArgumentsAccessStub::NEW_SLOPPY_FAST; } - ArgumentsAccessStub stub(type); + ArgumentsAccessStub stub(isolate(), type); __ CallStub(&stub); SetVar(arguments, rax, rbx, rdx); @@ -310,9 +284,9 @@ // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { VariableDeclaration* function = scope()->function(); - ASSERT(function->proxy()->var()->mode() == CONST || + DCHECK(function->proxy()->var()->mode() == CONST || function->proxy()->var()->mode() == CONST_LEGACY); - ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED); + DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED); VisitVariableDeclaration(function); } VisitDeclarations(scope()->declarations()); @@ -320,13 +294,17 @@ { Comment cmnt(masm_, "[ Stack check"); PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS); - EmitStackCheck(masm_); + Label ok; + __ CompareRoot(rsp, Heap::kStackLimitRootIndex); + __ j(above_equal, &ok, Label::kNear); + __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET); + __ bind(&ok); } { Comment cmnt(masm_, "[ Body"); - ASSERT(loop_depth() == 0); + DCHECK(loop_depth() == 0); VisitStatements(function()->body()); - ASSERT(loop_depth() == 0); + DCHECK(loop_depth() == 0); } } @@ -359,27 +337,35 @@ } +static const byte kJnsOffset = kPointerSize == kInt64Size ? 0x1d : 0x14; + + void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, Label* back_edge_target) { Comment cmnt(masm_, "[ Back edge bookkeeping"); Label ok; - ASSERT(back_edge_target->is_bound()); + DCHECK(back_edge_target->is_bound()); int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier)); EmitProfilingCounterDecrement(weight); - __ j(positive, &ok, Label::kNear); - __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET); - - // Record a mapping of this PC offset to the OSR id. This is used to find - // the AST id from the unoptimized code in order to use it as a key into - // the deoptimization input data found in the optimized code. - RecordBackEdge(stmt->OsrEntryId()); - EmitProfilingCounterReset(); + __ j(positive, &ok, Label::kNear); + { + PredictableCodeSizeScope predictible_code_size_scope(masm_, kJnsOffset); + DontEmitDebugCodeScope dont_emit_debug_code_scope(masm_); + __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET); + + // Record a mapping of this PC offset to the OSR id. This is used to find + // the AST id from the unoptimized code in order to use it as a key into + // the deoptimization input data found in the optimized code. + RecordBackEdge(stmt->OsrEntryId()); + EmitProfilingCounterReset(); + } __ bind(&ok); + PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); // Record a mapping of the OSR id to this PC. This is used if the OSR // entry becomes the target of a bailout. We don't expect it to be, but @@ -432,37 +418,37 @@ int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize; __ Ret(arguments_bytes, rcx); -#ifdef ENABLE_DEBUGGER_SUPPORT // Add padding that will be overwritten by a debugger breakpoint. We - // have just generated at least 7 bytes: "movq rsp, rbp; pop rbp; ret k" - // (3 + 1 + 3). - const int kPadding = Assembler::kJSReturnSequenceLength - 7; + // have just generated at least 7 bytes: "movp rsp, rbp; pop rbp; ret k" + // (3 + 1 + 3) for x64 and at least 6 (2 + 1 + 3) bytes for x32. + const int kPadding = Assembler::kJSReturnSequenceLength - + kPointerSize == kInt64Size ? 7 : 6; for (int i = 0; i < kPadding; ++i) { masm_->int3(); } // Check that the size of the code used for returning is large enough // for the debugger's requirements. - ASSERT(Assembler::kJSReturnSequenceLength <= + DCHECK(Assembler::kJSReturnSequenceLength <= masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); -#endif + info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); } } void FullCodeGenerator::EffectContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); } void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); codegen()->GetVar(result_register(), var); } void FullCodeGenerator::StackValueContext::Plug(Variable* var) const { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); MemOperand operand = codegen()->VarOperand(var, result_register()); __ Push(operand); } @@ -537,7 +523,7 @@ true, true_label_, false_label_); - ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals. + DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals. if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) { if (false_label_ != fall_through_) __ jmp(false_label_); } else if (lit->IsTrue() || lit->IsJSObject()) { @@ -564,7 +550,7 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); __ Drop(count); } @@ -572,7 +558,7 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug( int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); __ Drop(count); __ Move(result_register(), reg); } @@ -580,7 +566,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); if (count > 1) __ Drop(count - 1); __ movp(Operand(rsp, 0), reg); } @@ -588,7 +574,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count, Register reg) const { - ASSERT(count > 0); + DCHECK(count > 0); // For simplicity we always test the accumulator register. __ Drop(count); __ Move(result_register(), reg); @@ -599,7 +585,7 @@ void FullCodeGenerator::EffectContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == materialize_false); + DCHECK(materialize_true == materialize_false); __ bind(materialize_true); } @@ -632,8 +618,8 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true, Label* materialize_false) const { - ASSERT(materialize_true == true_label_); - ASSERT(materialize_false == false_label_); + DCHECK(materialize_true == true_label_); + DCHECK(materialize_false == false_label_); } @@ -696,7 +682,7 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) { - ASSERT(var->IsStackAllocated()); + DCHECK(var->IsStackAllocated()); // Offset is negative because higher indexes are at lower addresses. int offset = -var->index() * kPointerSize; // Adjust by a (parameter or local) base offset. @@ -711,7 +697,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); if (var->IsContextSlot()) { int context_chain_length = scope()->ContextChainLength(var->scope()); __ LoadContext(scratch, context_chain_length); @@ -723,7 +709,7 @@ void FullCodeGenerator::GetVar(Register dest, Variable* var) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); MemOperand location = VarOperand(var, dest); __ movp(dest, location); } @@ -733,10 +719,10 @@ Register src, Register scratch0, Register scratch1) { - ASSERT(var->IsContextSlot() || var->IsStackAllocated()); - ASSERT(!scratch0.is(src)); - ASSERT(!scratch0.is(scratch1)); - ASSERT(!scratch1.is(src)); + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(!scratch0.is(src)); + DCHECK(!scratch0.is(scratch1)); + DCHECK(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ movp(location, src); @@ -770,7 +756,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { // The variable in the declaration always resides in the current context. - ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); + DCHECK_EQ(0, scope()->ContextChainLength(variable->scope())); if (generate_debug_code_) { // Check that we're not inside a with or catch context. __ movp(rbx, FieldOperand(rsi, HeapObject::kMapOffset)); @@ -825,7 +811,7 @@ __ Push(rsi); __ Push(variable->name()); // Declaration nodes are always introduced in one of four modes. - ASSERT(IsDeclaredVariableMode(mode)); + DCHECK(IsDeclaredVariableMode(mode)); PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY : NONE; __ Push(Smi::FromInt(attr)); @@ -838,7 +824,7 @@ } else { __ Push(Smi::FromInt(0)); // Indicates no initial value. } - __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); break; } } @@ -853,7 +839,7 @@ case Variable::UNALLOCATED: { globals_->Add(variable->name(), zone()); Handle<SharedFunctionInfo> function = - Compiler::BuildFunctionInfo(declaration->fun(), script()); + Compiler::BuildFunctionInfo(declaration->fun(), script(), info_); // Check for stack-overflow exception. if (function.is_null()) return SetStackOverflow(); globals_->Add(function, zone()); @@ -892,7 +878,7 @@ __ Push(variable->name()); __ Push(Smi::FromInt(NONE)); VisitForStackValue(declaration->fun()); - __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); break; } } @@ -901,8 +887,8 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { Variable* variable = declaration->proxy()->var(); - ASSERT(variable->location() == Variable::CONTEXT); - ASSERT(variable->interface()->IsFrozen()); + DCHECK(variable->location() == Variable::CONTEXT); + DCHECK(variable->interface()->IsFrozen()); Comment cmnt(masm_, "[ ModuleDeclaration"); EmitDebugCheckDeclarationContext(variable); @@ -962,7 +948,7 @@ __ Push(rsi); // The context is the first argument. __ Push(pairs); __ Push(Smi::FromInt(DeclareGlobalsFlags())); - __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3); + __ CallRuntime(Runtime::kDeclareGlobals, 3); // Return value is ignored. } @@ -970,7 +956,7 @@ void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { // Call the runtime to declare the modules. __ Push(descriptions); - __ CallRuntime(Runtime::kHiddenDeclareModules, 1); + __ CallRuntime(Runtime::kDeclareModules, 1); // Return value is ignored. } @@ -1159,15 +1145,10 @@ Label non_proxy; __ bind(&fixed_array); - Handle<Object> feedback = Handle<Object>( - Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker), - isolate()); - StoreFeedbackVectorSlot(slot, feedback); - // No need for a write barrier, we are storing a Smi in the feedback vector. __ Move(rbx, FeedbackVector()); __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)), - Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)); + TypeFeedbackInfo::MegamorphicSentinel(isolate())); __ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check __ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); @@ -1260,24 +1241,8 @@ Iteration loop_statement(this, stmt); increment_loop_depth(); - // var iterator = iterable[@@iterator]() - VisitForAccumulatorValue(stmt->assign_iterator()); - - // As with for-in, skip the loop if the iterator is null or undefined. - __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); - __ j(equal, loop_statement.break_label()); - __ CompareRoot(rax, Heap::kNullValueRootIndex); - __ j(equal, loop_statement.break_label()); - - // Convert the iterator to a JS object. - Label convert, done_convert; - __ JumpIfSmi(rax, &convert); - __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); - __ j(above_equal, &done_convert); - __ bind(&convert); - __ Push(rax); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ bind(&done_convert); + // var iterator = iterable[Symbol.iterator](); + VisitForEffect(stmt->assign_iterator()); // Loop entry. __ bind(loop_statement.continue_label()); @@ -1324,7 +1289,9 @@ !pretenure && scope()->is_function_scope() && info->num_literals() == 0) { - FastNewClosureStub stub(info->strict_mode(), info->is_generator()); + FastNewClosureStub stub(isolate(), + info->strict_mode(), + info->is_generator()); __ Move(rbx, info); __ CallStub(&stub); } else { @@ -1333,7 +1300,7 @@ __ Push(pretenure ? isolate()->factory()->true_value() : isolate()->factory()->false_value()); - __ CallRuntime(Runtime::kHiddenNewClosure, 3); + __ CallRuntime(Runtime::kNewClosure, 3); } context()->Plug(rax); } @@ -1345,7 +1312,7 @@ } -void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, +void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, TypeofState typeof_state, Label* slow) { Register context = rsi; @@ -1396,8 +1363,13 @@ // All extension objects were empty and it is safe to use a global // load IC call. - __ movp(rax, GlobalObjectOperand()); - __ Move(rcx, var->name()); + __ movp(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ Move(LoadIC::NameRegister(), proxy->var()->name()); + if (FLAG_vector_ics) { + __ Move(LoadIC::SlotRegister(), + Smi::FromInt(proxy->VariableFeedbackSlot())); + } + ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL : CONTEXTUAL; @@ -1407,7 +1379,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var, Label* slow) { - ASSERT(var->IsContextSlot()); + DCHECK(var->IsContextSlot()); Register context = rsi; Register temp = rbx; @@ -1435,7 +1407,7 @@ } -void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, +void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy, TypeofState typeof_state, Label* slow, Label* done) { @@ -1444,8 +1416,9 @@ // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. + Variable* var = proxy->var(); if (var->mode() == DYNAMIC_GLOBAL) { - EmitLoadGlobalCheckExtensions(var, typeof_state, slow); + EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow); __ jmp(done); } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); @@ -1458,7 +1431,7 @@ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); } else { // LET || CONST __ Push(var->name()); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); + __ CallRuntime(Runtime::kThrowReferenceError, 1); } } __ jmp(done); @@ -1476,10 +1449,12 @@ switch (var->location()) { case Variable::UNALLOCATED: { Comment cmnt(masm_, "[ Global variable"); - // Use inline caching. Variable name is passed in rcx and the global - // object on the stack. - __ Move(rcx, var->name()); - __ movp(rax, GlobalObjectOperand()); + __ Move(LoadIC::NameRegister(), var->name()); + __ movp(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + if (FLAG_vector_ics) { + __ Move(LoadIC::SlotRegister(), + Smi::FromInt(proxy->VariableFeedbackSlot())); + } CallLoadIC(CONTEXTUAL); context()->Plug(rax); break; @@ -1496,7 +1471,7 @@ // always looked up dynamically, i.e. in that case // var->location() == LOOKUP. // always holds. - ASSERT(var->scope() != NULL); + DCHECK(var->scope() != NULL); // Check if the binding really needs an initialization check. The check // can be skipped in the following situation: we have a LET or CONST @@ -1519,8 +1494,8 @@ skip_init_check = false; } else { // Check that we always have valid source position. - ASSERT(var->initializer_position() != RelocInfo::kNoPosition); - ASSERT(proxy->position() != RelocInfo::kNoPosition); + DCHECK(var->initializer_position() != RelocInfo::kNoPosition); + DCHECK(proxy->position() != RelocInfo::kNoPosition); skip_init_check = var->mode() != CONST_LEGACY && var->initializer_position() < proxy->position(); } @@ -1535,10 +1510,10 @@ // Throw a reference error when using an uninitialized let/const // binding in harmony mode. __ Push(var->name()); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); + __ CallRuntime(Runtime::kThrowReferenceError, 1); } else { // Uninitalized const bindings outside of harmony mode are unholed. - ASSERT(var->mode() == CONST_LEGACY); + DCHECK(var->mode() == CONST_LEGACY); __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); } __ bind(&done); @@ -1555,11 +1530,11 @@ Label done, slow; // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); __ bind(&slow); __ Push(rsi); // Context. __ Push(var->name()); - __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); __ bind(&done); context()->Plug(rax); break; @@ -1590,7 +1565,7 @@ __ Push(Smi::FromInt(expr->literal_index())); __ Push(expr->pattern()); __ Push(expr->flags()); - __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4); + __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); __ movp(rbx, rax); __ bind(&materialized); @@ -1602,7 +1577,7 @@ __ bind(&runtime_allocate); __ Push(rbx); __ Push(Smi::FromInt(size)); - __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); __ Pop(rbx); __ bind(&allocated); @@ -1643,22 +1618,22 @@ ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags; int properties_count = constant_properties->length() / 2; - if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() || - flags != ObjectLiteral::kFastElements || + if (expr->may_store_doubles() || expr->depth() > 1 || + masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); __ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset)); __ Push(Smi::FromInt(expr->literal_index())); __ Push(constant_properties); __ Push(Smi::FromInt(flags)); - __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); } else { __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); __ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset)); __ Move(rbx, Smi::FromInt(expr->literal_index())); __ Move(rcx, constant_properties); __ Move(rdx, Smi::FromInt(flags)); - FastCloneShallowObjectStub stub(properties_count); + FastCloneShallowObjectStub stub(isolate(), properties_count); __ CallStub(&stub); } @@ -1686,14 +1661,15 @@ case ObjectLiteral::Property::CONSTANT: UNREACHABLE(); case ObjectLiteral::Property::MATERIALIZED_LITERAL: - ASSERT(!CompileTimeValue::IsCompileTimeValue(value)); + DCHECK(!CompileTimeValue::IsCompileTimeValue(value)); // Fall through. case ObjectLiteral::Property::COMPUTED: if (key->value()->IsInternalizedString()) { if (property->emit_store()) { VisitForAccumulatorValue(value); - __ Move(rcx, key->value()); - __ movp(rdx, Operand(rsp, 0)); + DCHECK(StoreIC::ValueRegister().is(rax)); + __ Move(StoreIC::NameRegister(), key->value()); + __ movp(StoreIC::ReceiverRegister(), Operand(rsp, 0)); CallStoreIC(key->LiteralFeedbackId()); PrepareForBailoutForId(key->id(), NO_REGISTERS); } else { @@ -1705,7 +1681,7 @@ VisitForStackValue(key); VisitForStackValue(value); if (property->emit_store()) { - __ Push(Smi::FromInt(NONE)); // PropertyAttributes + __ Push(Smi::FromInt(SLOPPY)); // Strict mode __ CallRuntime(Runtime::kSetProperty, 4); } else { __ Drop(3); @@ -1739,11 +1715,11 @@ EmitAccessor(it->second->getter); EmitAccessor(it->second->setter); __ Push(Smi::FromInt(NONE)); - __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); + __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5); } if (expr->has_function()) { - ASSERT(result_saved); + DCHECK(result_saved); __ Push(Operand(rsp, 0)); __ CallRuntime(Runtime::kToFastProperties, 1); } @@ -1767,7 +1743,7 @@ ZoneList<Expression*>* subexprs = expr->values(); int length = subexprs->length(); Handle<FixedArray> constant_elements = expr->constant_elements(); - ASSERT_EQ(2, constant_elements->length()); + DCHECK_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); bool has_constant_fast_elements = @@ -1782,46 +1758,19 @@ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE; } - Heap* heap = isolate()->heap(); - if (has_constant_fast_elements && - constant_elements_values->map() == heap->fixed_cow_array_map()) { - // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot - // change, so it's possible to specialize the stub in advance. - __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1); - __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); - __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset)); - __ Move(rbx, Smi::FromInt(expr->literal_index())); - __ Move(rcx, constant_elements); - FastCloneShallowArrayStub stub( - FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, - allocation_site_mode, - length); - __ CallStub(&stub); - } else if (expr->depth() > 1 || Serializer::enabled() || - length > FastCloneShallowArrayStub::kMaximumClonedLength) { + if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) { __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset)); __ Push(Smi::FromInt(expr->literal_index())); __ Push(constant_elements); __ Push(Smi::FromInt(flags)); - __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4); + __ CallRuntime(Runtime::kCreateArrayLiteral, 4); } else { - ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || - FLAG_smi_only_arrays); - FastCloneShallowArrayStub::Mode mode = - FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; - - // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot - // change, so it's possible to specialize the stub in advance. - if (has_constant_fast_elements) { - mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; - } - __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset)); __ Move(rbx, Smi::FromInt(expr->literal_index())); __ Move(rcx, constant_elements); - FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); + FastCloneShallowArrayStub stub(isolate(), allocation_site_mode); __ CallStub(&stub); } @@ -1858,7 +1807,7 @@ } else { // Store the subexpression value in the array's elements. __ Move(rcx, Smi::FromInt(i)); - StoreArrayLiteralElementStub stub; + StoreArrayLiteralElementStub stub(isolate()); __ CallStub(&stub); } @@ -1875,7 +1824,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { - ASSERT(expr->target()->IsValidLeftHandSide()); + DCHECK(expr->target()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ Assignment"); @@ -1897,9 +1846,9 @@ break; case NAMED_PROPERTY: if (expr->is_compound()) { - // We need the receiver both on the stack and in the accumulator. - VisitForAccumulatorValue(property->obj()); - __ Push(result_register()); + // We need the receiver both on the stack and in the register. + VisitForStackValue(property->obj()); + __ movp(LoadIC::ReceiverRegister(), Operand(rsp, 0)); } else { VisitForStackValue(property->obj()); } @@ -1907,9 +1856,9 @@ case KEYED_PROPERTY: { if (expr->is_compound()) { VisitForStackValue(property->obj()); - VisitForAccumulatorValue(property->key()); - __ movp(rdx, Operand(rsp, 0)); - __ Push(rax); + VisitForStackValue(property->key()); + __ movp(LoadIC::ReceiverRegister(), Operand(rsp, kPointerSize)); + __ movp(LoadIC::NameRegister(), Operand(rsp, 0)); } else { VisitForStackValue(property->obj()); VisitForStackValue(property->key()); @@ -2005,7 +1954,7 @@ __ bind(&suspend); VisitForAccumulatorValue(expr->generator_object()); - ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); + DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset), Smi::FromInt(continuation.pos())); __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi); @@ -2016,7 +1965,7 @@ __ cmpp(rsp, rbx); __ j(equal, &post_runtime); __ Push(rax); // generator object - __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ movp(context_register(), Operand(rbp, StandardFrameConstants::kContextOffset)); __ bind(&post_runtime); @@ -2050,6 +1999,9 @@ Label l_catch, l_try, l_suspend, l_continuation, l_resume; Label l_next, l_call, l_loop; + Register load_receiver = LoadIC::ReceiverRegister(); + Register load_name = LoadIC::NameRegister(); + // Initial send value is undefined. __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); __ jmp(&l_next); @@ -2057,10 +2009,10 @@ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; } __ bind(&l_catch); handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); - __ LoadRoot(rcx, Heap::kthrow_stringRootIndex); // "throw" - __ Push(rcx); - __ Push(Operand(rsp, 2 * kPointerSize)); // iter - __ Push(rax); // exception + __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw" + __ Push(load_name); + __ Push(Operand(rsp, 2 * kPointerSize)); // iter + __ Push(rax); // exception __ jmp(&l_call); // try { received = %yield result } @@ -2078,14 +2030,14 @@ const int generator_object_depth = kPointerSize + handler_size; __ movp(rax, Operand(rsp, generator_object_depth)); __ Push(rax); // g - ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); + DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset), Smi::FromInt(l_continuation.pos())); __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi); __ movp(rcx, rsi); __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx, kDontSaveFPRegs); - __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); __ movp(context_register(), Operand(rbp, StandardFrameConstants::kContextOffset)); __ Pop(rax); // result @@ -2095,20 +2047,24 @@ // receiver = iter; f = 'next'; arg = received; __ bind(&l_next); - __ LoadRoot(rcx, Heap::knext_stringRootIndex); // "next" - __ Push(rcx); - __ Push(Operand(rsp, 2 * kPointerSize)); // iter - __ Push(rax); // received + + __ LoadRoot(load_name, Heap::knext_stringRootIndex); + __ Push(load_name); // "next" + __ Push(Operand(rsp, 2 * kPointerSize)); // iter + __ Push(rax); // received // result = receiver[f](arg); __ bind(&l_call); - __ movp(rdx, Operand(rsp, kPointerSize)); - __ movp(rax, Operand(rsp, 2 * kPointerSize)); + __ movp(load_receiver, Operand(rsp, kPointerSize)); + if (FLAG_vector_ics) { + __ Move(LoadIC::SlotRegister(), + Smi::FromInt(expr->KeyedLoadFeedbackSlot())); + } Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallIC(ic, TypeFeedbackId::None()); __ movp(rdi, rax); __ movp(Operand(rsp, 2 * kPointerSize), rdi); - CallFunctionStub stub(1, CALL_AS_METHOD); + CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD); __ CallStub(&stub); __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); @@ -2116,17 +2072,25 @@ // if (!result.done) goto l_try; __ bind(&l_loop); - __ Push(rax); // save result - __ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done" - CallLoadIC(NOT_CONTEXTUAL); // result.done in rax + __ Move(load_receiver, rax); + __ Push(load_receiver); // save result + __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done" + if (FLAG_vector_ics) { + __ Move(LoadIC::SlotRegister(), Smi::FromInt(expr->DoneFeedbackSlot())); + } + CallLoadIC(NOT_CONTEXTUAL); // rax=result.done Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate()); CallIC(bool_ic); __ testp(result_register(), result_register()); __ j(zero, &l_try); // result.value - __ Pop(rax); // result - __ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value" + __ Pop(load_receiver); // result + __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value" + if (FLAG_vector_ics) { + __ Move(LoadIC::SlotRegister(), + Smi::FromInt(expr->ValueFeedbackSlot())); + } CallLoadIC(NOT_CONTEXTUAL); // result.value in rax context()->DropAndPlug(2, rax); // drop iter and g break; @@ -2139,7 +2103,7 @@ Expression *value, JSGeneratorObject::ResumeMode resume_mode) { // The value stays in rax, and is ultimately read by the resumed generator, as - // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it + // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it // is read to throw the value when the resumed generator is already closed. // rbx will hold the generator object until the activation has been resumed. VisitForStackValue(generator); @@ -2164,9 +2128,8 @@ // Push holes for arguments to generator function. __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ movsxlq(rdx, - FieldOperand(rdx, - SharedFunctionInfo::kFormalParameterCountOffset)); + __ LoadSharedFunctionInfoSpecialField(rdx, rdx, + SharedFunctionInfo::kFormalParameterCountOffset); __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex); Label push_argument_holes, push_frame; __ bind(&push_argument_holes); @@ -2220,7 +2183,7 @@ __ Push(rbx); __ Push(result_register()); __ Push(Smi::FromInt(resume_mode)); - __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); // Not reached: the runtime call returns elsewhere. __ Abort(kGeneratorFailedToResume); @@ -2234,14 +2197,14 @@ } else { // Throw the provided value. __ Push(rax); - __ CallRuntime(Runtime::kHiddenThrow, 1); + __ CallRuntime(Runtime::kThrow, 1); } __ jmp(&done); // Throw error if we attempt to operate on a running generator. __ bind(&wrong_state); __ Push(rbx); - __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); __ bind(&done); context()->Plug(result_register()); @@ -2252,14 +2215,14 @@ Label gc_required; Label allocated; - Handle<Map> map(isolate()->native_context()->generator_result_map()); + Handle<Map> map(isolate()->native_context()->iterator_result_map()); __ Allocate(map->instance_size(), rax, rcx, rdx, &gc_required, TAG_OBJECT); __ jmp(&allocated); __ bind(&gc_required); __ Push(Smi::FromInt(map->instance_size())); - __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); __ movp(context_register(), Operand(rbp, StandardFrameConstants::kContextOffset)); @@ -2267,7 +2230,7 @@ __ Move(rbx, map); __ Pop(rcx); __ Move(rdx, isolate()->factory()->ToBoolean(done)); - ASSERT_EQ(map->instance_size(), 5 * kPointerSize); + DCHECK_EQ(map->instance_size(), 5 * kPointerSize); __ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx); __ Move(FieldOperand(rax, JSObject::kPropertiesOffset), isolate()->factory()->empty_fixed_array()); @@ -2288,15 +2251,25 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); - __ Move(rcx, key->value()); - CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + __ Move(LoadIC::NameRegister(), key->value()); + if (FLAG_vector_ics) { + __ Move(LoadIC::SlotRegister(), Smi::FromInt(prop->PropertyFeedbackSlot())); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + } } void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - CallIC(ic, prop->PropertyFeedbackId()); + if (FLAG_vector_ics) { + __ Move(LoadIC::SlotRegister(), Smi::FromInt(prop->PropertyFeedbackSlot())); + CallIC(ic); + } else { + CallIC(ic, prop->PropertyFeedbackId()); + } } @@ -2317,8 +2290,8 @@ __ bind(&stub_call); __ movp(rax, rcx); - BinaryOpICStub stub(op, mode); - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + BinaryOpICStub stub(isolate(), op, mode); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); __ jmp(&done, Label::kNear); @@ -2328,7 +2301,7 @@ __ SmiShiftArithmeticRight(rax, rdx, rcx); break; case Token::SHL: - __ SmiShiftLeft(rax, rdx, rcx); + __ SmiShiftLeft(rax, rdx, rcx, &stub_call); break; case Token::SHR: __ SmiShiftLogicalRight(rax, rdx, rcx, &stub_call); @@ -2365,16 +2338,16 @@ Token::Value op, OverwriteMode mode) { __ Pop(rdx); - BinaryOpICStub stub(op, mode); + BinaryOpICStub stub(isolate(), op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); context()->Plug(rax); } void FullCodeGenerator::EmitAssignment(Expression* expr) { - ASSERT(expr->IsValidLeftHandSide()); + DCHECK(expr->IsValidReferenceExpression()); // Left-hand side can only be a property, a global or a (parameter or local) // slot. @@ -2397,9 +2370,9 @@ case NAMED_PROPERTY: { __ Push(rax); // Preserve value. VisitForAccumulatorValue(prop->obj()); - __ movp(rdx, rax); - __ Pop(rax); // Restore value. - __ Move(rcx, prop->key()->AsLiteral()->value()); + __ Move(StoreIC::ReceiverRegister(), rax); + __ Pop(StoreIC::ValueRegister()); // Restore value. + __ Move(StoreIC::NameRegister(), prop->key()->AsLiteral()->value()); CallStoreIC(); break; } @@ -2407,9 +2380,9 @@ __ Push(rax); // Preserve value. VisitForStackValue(prop->obj()); VisitForAccumulatorValue(prop->key()); - __ movp(rcx, rax); - __ Pop(rdx); - __ Pop(rax); // Restore value. + __ Move(KeyedStoreIC::NameRegister(), rax); + __ Pop(KeyedStoreIC::ReceiverRegister()); + __ Pop(KeyedStoreIC::ValueRegister()); // Restore value. Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); @@ -2432,34 +2405,24 @@ } -void FullCodeGenerator::EmitCallStoreContextSlot( - Handle<String> name, StrictMode strict_mode) { - __ Push(rax); // Value. - __ Push(rsi); // Context. - __ Push(name); - __ Push(Smi::FromInt(strict_mode)); - __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4); -} - - void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { if (var->IsUnallocated()) { // Global var, const, or let. - __ Move(rcx, var->name()); - __ movp(rdx, GlobalObjectOperand()); + __ Move(StoreIC::NameRegister(), var->name()); + __ movp(StoreIC::ReceiverRegister(), GlobalObjectOperand()); CallStoreIC(); } else if (op == Token::INIT_CONST_LEGACY) { // Const initializers need a write barrier. - ASSERT(!var->IsParameter()); // No const parameters. + DCHECK(!var->IsParameter()); // No const parameters. if (var->IsLookupSlot()) { __ Push(rax); __ Push(rsi); __ Push(var->name()); - __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3); + __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3); } else { - ASSERT(var->IsStackLocal() || var->IsContextSlot()); + DCHECK(var->IsStackLocal() || var->IsContextSlot()); Label skip; MemOperand location = VarOperand(var, rcx); __ movp(rdx, location); @@ -2471,28 +2434,30 @@ } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. - if (var->IsLookupSlot()) { - EmitCallStoreContextSlot(var->name(), strict_mode()); - } else { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); - Label assign; - MemOperand location = VarOperand(var, rcx); - __ movp(rdx, location); - __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex); - __ j(not_equal, &assign, Label::kNear); - __ Push(var->name()); - __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1); - __ bind(&assign); - EmitStoreToStackLocalOrContextSlot(var, location); - } + DCHECK(!var->IsLookupSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + Label assign; + MemOperand location = VarOperand(var, rcx); + __ movp(rdx, location); + __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex); + __ j(not_equal, &assign, Label::kNear); + __ Push(var->name()); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + __ bind(&assign); + EmitStoreToStackLocalOrContextSlot(var, location); } else if (!var->is_const_mode() || op == Token::INIT_CONST) { - // Assignment to var or initializing assignment to let/const - // in harmony mode. if (var->IsLookupSlot()) { - EmitCallStoreContextSlot(var->name(), strict_mode()); + // Assignment to var. + __ Push(rax); // Value. + __ Push(rsi); // Context. + __ Push(var->name()); + __ Push(Smi::FromInt(strict_mode())); + __ CallRuntime(Runtime::kStoreLookupSlot, 4); } else { - ASSERT(var->IsStackAllocated() || var->IsContextSlot()); + // Assignment to var or initializing assignment to let/const in harmony + // mode. + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); MemOperand location = VarOperand(var, rcx); if (generate_debug_code_ && op == Token::INIT_LET) { // Check for an uninitialized let binding. @@ -2510,13 +2475,13 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { // Assignment to a property, using a named store IC. Property* prop = expr->target()->AsProperty(); - ASSERT(prop != NULL); - ASSERT(prop->key()->AsLiteral() != NULL); + DCHECK(prop != NULL); + DCHECK(prop->key()->IsLiteral()); // Record source code position before IC call. SetSourcePosition(expr->position()); - __ Move(rcx, prop->key()->AsLiteral()->value()); - __ Pop(rdx); + __ Move(StoreIC::NameRegister(), prop->key()->AsLiteral()->value()); + __ Pop(StoreIC::ReceiverRegister()); CallStoreIC(expr->AssignmentFeedbackId()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); @@ -2527,8 +2492,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { // Assignment to a property, using a keyed store IC. - __ Pop(rcx); - __ Pop(rdx); + __ Pop(KeyedStoreIC::NameRegister()); // Key. + __ Pop(KeyedStoreIC::ReceiverRegister()); + DCHECK(KeyedStoreIC::ValueRegister().is(rax)); // Record source code position before IC call. SetSourcePosition(expr->position()); Handle<Code> ic = strict_mode() == SLOPPY @@ -2547,13 +2513,16 @@ if (key->IsPropertyName()) { VisitForAccumulatorValue(expr->obj()); + DCHECK(!rax.is(LoadIC::ReceiverRegister())); + __ movp(LoadIC::ReceiverRegister(), rax); EmitNamedPropertyLoad(expr); PrepareForBailoutForId(expr->LoadId(), TOS_REG); context()->Plug(rax); } else { VisitForStackValue(expr->obj()); VisitForAccumulatorValue(expr->key()); - __ Pop(rdx); + __ Move(LoadIC::NameRegister(), rax); + __ Pop(LoadIC::ReceiverRegister()); EmitKeyedPropertyLoad(expr); context()->Plug(rax); } @@ -2568,14 +2537,14 @@ // Code common for calls using the IC. -void FullCodeGenerator::EmitCallWithIC(Call* expr) { +void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); - CallFunctionFlags flags; - // Get the target function; - if (callee->IsVariableProxy()) { + CallIC::CallType call_type = callee->IsVariableProxy() + ? CallIC::FUNCTION + : CallIC::METHOD; + // Get the target function. + if (call_type == CallIC::FUNCTION) { { StackValueContext context(this); EmitVariableLoad(callee->AsVariableProxy()); PrepareForBailout(callee, NO_REGISTERS); @@ -2583,54 +2552,33 @@ // Push undefined as receiver. This is patched in the method prologue if it // is a sloppy mode method. __ Push(isolate()->factory()->undefined_value()); - flags = NO_CALL_FUNCTION_FLAGS; } else { // Load the function from the receiver. - ASSERT(callee->IsProperty()); - __ movp(rax, Operand(rsp, 0)); + DCHECK(callee->IsProperty()); + __ movp(LoadIC::ReceiverRegister(), Operand(rsp, 0)); EmitNamedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); // Push the target function under the receiver. __ Push(Operand(rsp, 0)); __ movp(Operand(rsp, kPointerSize), rax); - flags = CALL_AS_METHOD; } - // Load the arguments. - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } - } - - // Record source position for debugger. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, flags); - __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); - - RecordJSReturnSite(expr); - - // Restore context register. - __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, rax); + EmitCall(expr, call_type); } // Common code for calls using the IC. -void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, - Expression* key) { +void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, + Expression* key) { // Load the key. VisitForAccumulatorValue(key); Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); // Load the function from the receiver. - ASSERT(callee->IsProperty()); - __ movp(rdx, Operand(rsp, 0)); + DCHECK(callee->IsProperty()); + __ movp(LoadIC::ReceiverRegister(), Operand(rsp, 0)); + __ Move(LoadIC::NameRegister(), rax); EmitKeyedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); @@ -2638,29 +2586,12 @@ __ Push(Operand(rsp, 0)); __ movp(Operand(rsp, kPointerSize), rax); - // Load the arguments. - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } - } - - // Record source position for debugger. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, CALL_AS_METHOD); - __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); - - RecordJSReturnSite(expr); - // Restore context register. - __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, rax); + EmitCall(expr, CallIC::METHOD); } -void FullCodeGenerator::EmitCallWithStub(Call* expr) { - // Code common for calls using the call stub. +void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { + // Load the arguments. ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); { PreservePositionScope scope(masm()->positions_recorder()); @@ -2668,20 +2599,19 @@ VisitForStackValue(args->at(i)); } } - // Record source position for debugger. - SetSourcePosition(expr->position()); - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized); - __ Move(rbx, FeedbackVector()); + // Record source position of the IC call. + SetSourcePosition(expr->position()); + Handle<Code> ic = CallIC::initialize_stub( + isolate(), arg_count, call_type); __ Move(rdx, Smi::FromInt(expr->CallFeedbackSlot())); - - // Record call targets in unoptimized code. - CallFunctionStub stub(arg_count, RECORD_CALL_TARGET); __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); + // Don't assign a type feedback id to the IC, since type feedback is provided + // by the vector above. + CallIC(ic); + RecordJSReturnSite(expr); + // Restore context register. __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); // Discard the function left on TOS. @@ -2708,7 +2638,7 @@ __ Push(Smi::FromInt(scope()->start_position())); // Do the runtime call. - __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5); + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); } @@ -2750,7 +2680,7 @@ } // Record source position for debugger. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); RecordJSReturnSite(expr); @@ -2758,7 +2688,7 @@ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); context()->DropAndPlug(1, rax); } else if (call_type == Call::GLOBAL_CALL) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else if (call_type == Call::LOOKUP_SLOT_CALL) { // Call to a lookup slot (dynamically introduced variable). @@ -2768,14 +2698,14 @@ { PreservePositionScope scope(masm()->positions_recorder()); // Generate code for loading from variables potentially shadowed by // eval-introduced variables. - EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); } __ bind(&slow); // Call the runtime to find the function to call (returned in rax) and // the object holding it (returned in rdx). __ Push(context_register()); __ Push(proxy->name()); - __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); __ Push(rax); // Function. __ Push(rdx); // Receiver. @@ -2795,31 +2725,31 @@ // The receiver is either the global receiver or an object found by // LoadContextSlot. - EmitCallWithStub(expr); + EmitCall(expr); } else if (call_type == Call::PROPERTY_CALL) { Property* property = callee->AsProperty(); { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(property->obj()); } if (property->key()->IsPropertyName()) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else { - EmitKeyedCallWithIC(expr, property->key()); + EmitKeyedCallWithLoadIC(expr, property->key()); } } else { - ASSERT(call_type == Call::OTHER_CALL); + DCHECK(call_type == Call::OTHER_CALL); // Call to an arbitrary expression not handled specially above. { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(callee); } __ PushRoot(Heap::kUndefinedValueRootIndex); // Emit function call. - EmitCallWithStub(expr); + EmitCall(expr); } #ifdef DEBUG // RecordJSReturnSite should have been called. - ASSERT(expr->return_is_recorded_); + DCHECK(expr->return_is_recorded_); #endif } @@ -2851,21 +2781,17 @@ __ movp(rdi, Operand(rsp, arg_count * kPointerSize)); // Record call targets in unoptimized code, but not in the snapshot. - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized); if (FLAG_pretenuring_call_new) { - StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(), - isolate()->factory()->NewAllocationSite()); - ASSERT(expr->AllocationSiteFeedbackSlot() == + EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot()); + DCHECK(expr->AllocationSiteFeedbackSlot() == expr->CallNewFeedbackSlot() + 1); } __ Move(rbx, FeedbackVector()); __ Move(rdx, Smi::FromInt(expr->CallNewFeedbackSlot())); - CallConstructStub stub(RECORD_CALL_TARGET); - __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); + CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET); + __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); PrepareForBailoutForId(expr->ReturnId(), TOS_REG); context()->Plug(rax); } @@ -2873,7 +2799,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2894,7 +2820,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2915,7 +2841,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2947,7 +2873,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2969,7 +2895,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -2994,7 +2920,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3035,10 +2961,8 @@ // rcx: valid entries in the descriptor array. // Calculate the end of the descriptor array. __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize)); - SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2); __ leap(rcx, - Operand( - r8, index.reg, index.scale, DescriptorArray::kFirstOffset)); + Operand(r8, rcx, times_pointer_size, DescriptorArray::kFirstOffset)); // Calculate location of the first key name. __ addp(r8, Immediate(DescriptorArray::kFirstOffset)); // Loop through all the keys in the descriptor array. If one of these is the @@ -3080,7 +3004,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3102,7 +3026,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3129,7 +3053,7 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3151,7 +3075,7 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3173,7 +3097,7 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); Label materialize_true, materialize_false; Label* if_true = NULL; @@ -3205,7 +3129,7 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); // Load the two objects into registers and perform the comparison. VisitForStackValue(args->at(0)); @@ -3229,21 +3153,21 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); // ArgumentsAccessStub expects the key in rdx and the formal // parameter count in rax. VisitForAccumulatorValue(args->at(0)); __ movp(rdx, rax); __ Move(rax, Smi::FromInt(info_->scope()->num_parameters())); - ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); + ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT); __ CallStub(&stub); context()->Plug(rax); } void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) { - ASSERT(expr->arguments()->length() == 0); + DCHECK(expr->arguments()->length() == 0); Label exit; // Get the number of formal parameters. @@ -3267,7 +3191,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); Label done, null, function, non_function_constructor; VisitForAccumulatorValue(args->at(0)); @@ -3326,32 +3250,11 @@ } -void FullCodeGenerator::EmitLog(CallRuntime* expr) { - // Conditionally generate a log call. - // Args: - // 0 (literal string): The type of logging (corresponds to the flags). - // This is used to determine whether or not to generate the log call. - // 1 (string): Format string. Access the string at argument index 2 - // with '%2s' (see Logger::LogRuntime for all the formats). - // 2 (array): Arguments to the format string. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 3); - if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) { - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - __ CallRuntime(Runtime::kHiddenLog, 2); - } - // Finally, we're expected to leave a value on the top of the stack. - __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); - context()->Plug(rax); -} - - void FullCodeGenerator::EmitSubString(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - SubStringStub stub; + SubStringStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); + DCHECK(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); @@ -3362,9 +3265,9 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - RegExpExecStub stub; + RegExpExecStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 4); + DCHECK(args->length() == 4); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); @@ -3376,7 +3279,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -3395,8 +3298,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); - ASSERT_NE(NULL, args->at(1)->AsLiteral()); + DCHECK(args->length() == 2); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -3434,7 +3337,7 @@ } __ bind(¬_date_object); - __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0); + __ CallRuntime(Runtime::kThrowNotDateError, 0); __ bind(&done); context()->Plug(rax); } @@ -3442,7 +3345,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(3, args->length()); + DCHECK_EQ(3, args->length()); Register string = rax; Register index = rbx; @@ -3475,7 +3378,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(3, args->length()); + DCHECK_EQ(3, args->length()); Register string = rax; Register index = rbx; @@ -3509,10 +3412,10 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the runtime function. ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - MathPowStub stub(MathPowStub::ON_STACK); + MathPowStub stub(isolate(), MathPowStub::ON_STACK); __ CallStub(&stub); context()->Plug(rax); } @@ -3520,7 +3423,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); // Load the object. VisitForAccumulatorValue(args->at(1)); // Load the value. @@ -3548,12 +3451,12 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 1); + DCHECK_EQ(args->length(), 1); // Load the argument into rax and call the stub. VisitForAccumulatorValue(args->at(0)); - NumberToStringStub stub; + NumberToStringStub stub(isolate()); __ CallStub(&stub); context()->Plug(rax); } @@ -3561,7 +3464,7 @@ void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3580,7 +3483,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); @@ -3626,7 +3529,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); @@ -3674,12 +3577,12 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); VisitForStackValue(args->at(0)); VisitForAccumulatorValue(args->at(1)); __ Pop(rdx); - StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED); + StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED); __ CallStub(&stub); context()->Plug(rax); } @@ -3687,40 +3590,20 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - StringCompareStub stub; + StringCompareStub stub(isolate()); __ CallStub(&stub); context()->Plug(rax); } -void FullCodeGenerator::EmitMathLog(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_log, 1); - context()->Plug(rax); -} - - -void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_sqrt, 1); - context()->Plug(rax); -} - - void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() >= 2); + DCHECK(args->length() >= 2); int arg_count = args->length() - 2; // 2 ~ receiver and function. for (int i = 0; i < arg_count + 1; i++) { @@ -3751,9 +3634,9 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { - RegExpConstructResultStub stub; + RegExpConstructResultStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); + DCHECK(args->length() == 3); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForAccumulatorValue(args->at(2)); @@ -3766,9 +3649,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(2, args->length()); + DCHECK_EQ(2, args->length()); - ASSERT_NE(NULL, args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle<FixedArray> jsfunction_result_caches( @@ -3814,7 +3697,7 @@ // Call runtime to perform the lookup. __ Push(cache); __ Push(key); - __ CallRuntime(Runtime::kHiddenGetFromCache, 2); + __ CallRuntime(Runtime::kGetFromCache, 2); __ bind(&done); context()->Plug(rax); @@ -3823,7 +3706,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); @@ -3846,13 +3729,13 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); + DCHECK(args->length() == 1); VisitForAccumulatorValue(args->at(0)); __ AssertString(rax); __ movl(rax, FieldOperand(rax, String::kHashFieldOffset)); - ASSERT(String::kHashShift >= kSmiTagSize); + DCHECK(String::kHashShift >= kSmiTagSize); __ IndexFromHash(rax, rax); context()->Plug(rax); @@ -3864,7 +3747,7 @@ non_trivial_array, not_size_one_array, loop, loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry; ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 2); + DCHECK(args->length() == 2); // We will leave the separator on the stack until the end of the function. VisitForStackValue(args->at(1)); // Load this to rax (= array) @@ -4144,6 +4027,17 @@ } +void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + ExternalReference debug_is_active = + ExternalReference::debug_is_active_address(isolate()); + __ Move(kScratchRegister, debug_is_active); + __ movzxbp(rax, Operand(kScratchRegister, 0)); + __ Integer32ToSmi(rax, rax); + context()->Plug(rax); +} + + void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { if (expr->function() != NULL && expr->function()->intrinsic_type == Runtime::INLINE) { @@ -4162,9 +4056,15 @@ __ Push(FieldOperand(rax, GlobalObject::kBuiltinsOffset)); // Load the function from the receiver. - __ movp(rax, Operand(rsp, 0)); - __ Move(rcx, expr->name()); - CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + __ movp(LoadIC::ReceiverRegister(), Operand(rsp, 0)); + __ Move(LoadIC::NameRegister(), expr->name()); + if (FLAG_vector_ics) { + __ Move(LoadIC::SlotRegister(), + Smi::FromInt(expr->CallRuntimeFeedbackSlot())); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + } // Push the target function under the receiver. __ Push(Operand(rsp, 0)); @@ -4177,7 +4077,7 @@ // Record source position of the IC call. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); @@ -4215,7 +4115,7 @@ Variable* var = proxy->var(); // Delete of an unqualified identifier is disallowed in strict mode // but "delete this" is allowed. - ASSERT(strict_mode() == SLOPPY || var->is_this()); + DCHECK(strict_mode() == SLOPPY || var->is_this()); if (var->IsUnallocated()) { __ Push(GlobalObjectOperand()); __ Push(var->name()); @@ -4232,7 +4132,7 @@ // context where the variable was introduced. __ Push(context_register()); __ Push(var->name()); - __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2); + __ CallRuntime(Runtime::kDeleteLookupSlot, 2); context()->Plug(rax); } } else { @@ -4270,7 +4170,7 @@ // for control and plugging the control flow into the context, // because we need to prepare a pair of extra administrative AST ids // for the optimizing compiler. - ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue()); + DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue()); Label materialize_true, materialize_false, done; VisitForControl(expr->expression(), &materialize_false, @@ -4313,7 +4213,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { - ASSERT(expr->expression()->IsValidLeftHandSide()); + DCHECK(expr->expression()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ CountOperation"); SetSourcePosition(expr->position()); @@ -4332,7 +4232,7 @@ // Evaluate expression and get value. if (assign_type == VARIABLE) { - ASSERT(expr->expression()->AsVariableProxy()->var() != NULL); + DCHECK(expr->expression()->AsVariableProxy()->var() != NULL); AccumulatorValueContext context(this); EmitVariableLoad(expr->expression()->AsVariableProxy()); } else { @@ -4341,14 +4241,16 @@ __ Push(Smi::FromInt(0)); } if (assign_type == NAMED_PROPERTY) { - VisitForAccumulatorValue(prop->obj()); - __ Push(rax); // Copy of receiver, needed for later store. + VisitForStackValue(prop->obj()); + __ movp(LoadIC::ReceiverRegister(), Operand(rsp, 0)); EmitNamedPropertyLoad(prop); } else { VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - __ movp(rdx, Operand(rsp, 0)); // Leave receiver on stack - __ Push(rax); // Copy of key, needed for later store. + VisitForStackValue(prop->key()); + // Leave receiver on stack + __ movp(LoadIC::ReceiverRegister(), Operand(rsp, kPointerSize)); + // Copy of key, needed for later store. + __ movp(LoadIC::NameRegister(), Operand(rsp, 0)); EmitKeyedPropertyLoad(prop); } } @@ -4400,7 +4302,7 @@ __ bind(&slow); } - ToNumberStub convert_stub; + ToNumberStub convert_stub(isolate()); __ CallStub(&convert_stub); // Save result for postfix expressions. @@ -4430,8 +4332,8 @@ __ bind(&stub_call); __ movp(rdx, rax); __ Move(rax, Smi::FromInt(1)); - BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE); - CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); + BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE); + CallIC(stub.GetCode(), expr->CountBinOpFeedbackId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -4460,8 +4362,8 @@ } break; case NAMED_PROPERTY: { - __ Move(rcx, prop->key()->AsLiteral()->value()); - __ Pop(rdx); + __ Move(StoreIC::NameRegister(), prop->key()->AsLiteral()->value()); + __ Pop(StoreIC::ReceiverRegister()); CallStoreIC(expr->CountStoreFeedbackId()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { @@ -4474,8 +4376,8 @@ break; } case KEYED_PROPERTY: { - __ Pop(rcx); - __ Pop(rdx); + __ Pop(KeyedStoreIC::NameRegister()); + __ Pop(KeyedStoreIC::ReceiverRegister()); Handle<Code> ic = strict_mode() == SLOPPY ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); @@ -4496,13 +4398,17 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { VariableProxy* proxy = expr->AsVariableProxy(); - ASSERT(!context()->IsEffect()); - ASSERT(!context()->IsTest()); + DCHECK(!context()->IsEffect()); + DCHECK(!context()->IsTest()); if (proxy != NULL && proxy->var()->IsUnallocated()) { Comment cmnt(masm_, "[ Global variable"); - __ Move(rcx, proxy->name()); - __ movp(rax, GlobalObjectOperand()); + __ Move(LoadIC::NameRegister(), proxy->name()); + __ movp(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + if (FLAG_vector_ics) { + __ Move(LoadIC::SlotRegister(), + Smi::FromInt(proxy->VariableFeedbackSlot())); + } // Use a regular load, not a contextual load, to avoid a reference // error. CallLoadIC(NOT_CONTEXTUAL); @@ -4514,12 +4420,12 @@ // Generate code for loading from variables potentially shadowed // by eval-introduced variables. - EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done); + EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done); __ bind(&slow); __ Push(rsi); __ Push(proxy->name()); - __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2); + __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2); PrepareForBailout(expr, TOS_REG); __ bind(&done); @@ -4546,12 +4452,13 @@ } PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_string())) { + Factory* factory = isolate()->factory(); + if (String::Equals(check, factory->number_string())) { __ JumpIfSmi(rax, if_true); __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset)); __ CompareRoot(rax, Heap::kHeapNumberMapRootIndex); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_string())) { + } else if (String::Equals(check, factory->string_string())) { __ JumpIfSmi(rax, if_false); // Check for undetectable objects => false. __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx); @@ -4559,20 +4466,16 @@ __ testb(FieldOperand(rdx, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); Split(zero, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->symbol_string())) { + } else if (String::Equals(check, factory->symbol_string())) { __ JumpIfSmi(rax, if_false); __ CmpObjectType(rax, SYMBOL_TYPE, rdx); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_string())) { + } else if (String::Equals(check, factory->boolean_string())) { __ CompareRoot(rax, Heap::kTrueValueRootIndex); __ j(equal, if_true); __ CompareRoot(rax, Heap::kFalseValueRootIndex); Split(equal, if_true, if_false, fall_through); - } else if (FLAG_harmony_typeof && - check->Equals(isolate()->heap()->null_string())) { - __ CompareRoot(rax, Heap::kNullValueRootIndex); - Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_string())) { + } else if (String::Equals(check, factory->undefined_string())) { __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); __ j(equal, if_true); __ JumpIfSmi(rax, if_false); @@ -4581,19 +4484,17 @@ __ testb(FieldOperand(rdx, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); Split(not_zero, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->function_string())) { + } else if (String::Equals(check, factory->function_string())) { __ JumpIfSmi(rax, if_false); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx); __ j(equal, if_true); __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->object_string())) { + } else if (String::Equals(check, factory->object_string())) { __ JumpIfSmi(rax, if_false); - if (!FLAG_harmony_typeof) { - __ CompareRoot(rax, Heap::kNullValueRootIndex); - __ j(equal, if_true); - } + __ CompareRoot(rax, Heap::kNullValueRootIndex); + __ j(equal, if_true); __ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx); __ j(below, if_false); __ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); @@ -4639,7 +4540,7 @@ case Token::INSTANCEOF: { VisitForStackValue(expr->right()); - InstanceofStub stub(InstanceofStub::kNoFlags); + InstanceofStub stub(isolate(), InstanceofStub::kNoFlags); __ CallStub(&stub); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); __ testp(rax, rax); @@ -4728,7 +4629,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { - ASSERT(IsAligned(frame_offset, kPointerSize)); + DCHECK(IsAligned(frame_offset, kPointerSize)); __ movp(Operand(rbp, frame_offset), value); } @@ -4753,7 +4654,7 @@ // code. Fetch it from the context. __ Push(ContextOperand(rsi, Context::CLOSURE_INDEX)); } else { - ASSERT(declaration_scope->is_function_scope()); + DCHECK(declaration_scope->is_function_scope()); __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); } } @@ -4764,8 +4665,8 @@ void FullCodeGenerator::EnterFinallyBlock() { - ASSERT(!result_register().is(rdx)); - ASSERT(!result_register().is(rcx)); + DCHECK(!result_register().is(rdx)); + DCHECK(!result_register().is(rcx)); // Cook return address on top of stack (smi encoded Code* delta) __ PopReturnAddressTo(rdx); __ Move(rcx, masm_->CodeObject()); @@ -4796,8 +4697,8 @@ void FullCodeGenerator::ExitFinallyBlock() { - ASSERT(!result_register().is(rdx)); - ASSERT(!result_register().is(rcx)); + DCHECK(!result_register().is(rdx)); + DCHECK(!result_register().is(rcx)); // Restore pending message from stack. __ Pop(rdx); ExternalReference pending_message_script = @@ -4859,7 +4760,6 @@ static const byte kJnsInstruction = 0x79; -static const byte kJnsOffset = 0x1d; static const byte kNopByteOne = 0x66; static const byte kNopByteTwo = 0x90; #ifdef DEBUG @@ -4910,18 +4810,18 @@ Address pc) { Address call_target_address = pc - kIntSize; Address jns_instr_address = call_target_address - 3; - ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); + DCHECK_EQ(kCallInstruction, *(call_target_address - 1)); if (*jns_instr_address == kJnsInstruction) { - ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); - ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(), + DCHECK_EQ(kJnsOffset, *(call_target_address - 2)); + DCHECK_EQ(isolate->builtins()->InterruptCheck()->entry(), Assembler::target_address_at(call_target_address, unoptimized_code)); return INTERRUPT; } - ASSERT_EQ(kNopByteOne, *jns_instr_address); - ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); + DCHECK_EQ(kNopByteOne, *jns_instr_address); + DCHECK_EQ(kNopByteTwo, *(call_target_address - 2)); if (Assembler::target_address_at(call_target_address, unoptimized_code) == @@ -4929,7 +4829,7 @@ return ON_STACK_REPLACEMENT; } - ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(), + DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(), Assembler::target_address_at(call_target_address, unoptimized_code)); return OSR_AFTER_STACK_CHECK; diff -Nru nodejs-0.11.13/deps/v8/src/x64/ic-x64.cc nodejs-0.11.15/deps/v8/src/x64/ic-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/ic-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/ic-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "codegen.h" -#include "ic-inl.h" -#include "runtime.h" -#include "stub-cache.h" +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/runtime.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -58,46 +35,6 @@ } -// Generated code falls through if the receiver is a regular non-global -// JS object with slow properties and no interceptors. -static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, - Register receiver, - Register r0, - Register r1, - Label* miss) { - // Register usage: - // receiver: holds the receiver on entry and is unchanged. - // r0: used to hold receiver instance type. - // Holds the property dictionary on fall through. - // r1: used to hold receivers map. - - __ JumpIfSmi(receiver, miss); - - // Check that the receiver is a valid JS object. - __ movp(r1, FieldOperand(receiver, HeapObject::kMapOffset)); - __ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset)); - __ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE)); - __ j(below, miss); - - // If this assert fails, we have to check upper bound too. - STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); - - GenerateGlobalInstanceTypeCheck(masm, r0, miss); - - // Check for non-global object that requires access check. - __ testb(FieldOperand(r1, Map::kBitFieldOffset), - Immediate((1 << Map::kIsAccessCheckNeeded) | - (1 << Map::kHasNamedInterceptor))); - __ j(not_zero, miss); - - __ movp(r0, FieldOperand(receiver, JSObject::kPropertiesOffset)); - __ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset), - Heap::kHashTableMapRootIndex); - __ j(not_equal, miss); -} - - - // Helper function used to load a property from a dictionary backing storage. // This function may return false negatives, so miss_label // must always call a backup property load that is complete. @@ -243,7 +180,7 @@ // In the case that the object is a value-wrapper object, // we enter the runtime system to make sure that indexing // into string objects work as intended. - ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); + DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); __ CmpObjectType(receiver, JS_OBJECT_TYPE, map); __ j(below, slow); @@ -350,30 +287,31 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // The return address is on the stack. Label slow, check_name, index_smi, index_name, property_array_property; Label probe_dictionary, check_number_dictionary; + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(rdx)); + DCHECK(key.is(rcx)); + // Check that the key is a smi. - __ JumpIfNotSmi(rax, &check_name); + __ JumpIfNotSmi(key, &check_name); __ bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from below // where a numeric string is converted to a smi. GenerateKeyedLoadReceiverCheck( - masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow); + masm, receiver, rax, Map::kHasIndexedInterceptor, &slow); // Check the receiver's map to see if it has fast elements. - __ CheckFastElements(rcx, &check_number_dictionary); + __ CheckFastElements(rax, &check_number_dictionary); GenerateFastArrayLoad(masm, - rdx, + receiver, + key, rax, - rcx, rbx, rax, NULL, @@ -383,50 +321,46 @@ __ ret(0); __ bind(&check_number_dictionary); - __ SmiToInteger32(rbx, rax); - __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset)); + __ SmiToInteger32(rbx, key); + __ movp(rax, FieldOperand(receiver, JSObject::kElementsOffset)); // Check whether the elements is a number dictionary. - // rdx: receiver - // rax: key // rbx: key as untagged int32 - // rcx: elements - __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset), + // rax: elements + __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), Heap::kHashTableMapRootIndex); __ j(not_equal, &slow); - __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax); + __ LoadFromNumberDictionary(&slow, rax, key, rbx, r9, rdi, rax); __ ret(0); __ bind(&slow); // Slow case: Jump to runtime. - // rdx: receiver - // rax: key __ IncrementCounter(counters->keyed_load_generic_slow(), 1); GenerateRuntimeGetProperty(masm); __ bind(&check_name); - GenerateKeyNameCheck(masm, rax, rcx, rbx, &index_name, &slow); + GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow); GenerateKeyedLoadReceiverCheck( - masm, rdx, rcx, Map::kHasNamedInterceptor, &slow); + masm, receiver, rax, Map::kHasNamedInterceptor, &slow); // If the receiver is a fast-case object, check the keyed lookup - // cache. Otherwise probe the dictionary leaving result in rcx. - __ movp(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset)); + // cache. Otherwise probe the dictionary leaving result in key. + __ movp(rbx, FieldOperand(receiver, JSObject::kPropertiesOffset)); __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), Heap::kHashTableMapRootIndex); __ j(equal, &probe_dictionary); // Load the map of the receiver, compute the keyed lookup cache hash // based on 32 bits of the map pointer and the string hash. - __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); - __ movl(rcx, rbx); - __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift)); - __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset)); - __ shr(rdi, Immediate(String::kHashShift)); - __ xorp(rcx, rdi); + __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset)); + __ movl(rax, rbx); + __ shrl(rax, Immediate(KeyedLookupCache::kMapHashShift)); + __ movl(rdi, FieldOperand(key, String::kHashFieldOffset)); + __ shrl(rdi, Immediate(String::kHashShift)); + __ xorp(rax, rdi); int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask); - __ andp(rcx, Immediate(mask)); + __ andp(rax, Immediate(mask)); // Load the key (consisting of map and internalized string) from the cache and // check for match. @@ -438,13 +372,13 @@ for (int i = 0; i < kEntriesPerBucket - 1; i++) { Label try_next_entry; - __ movp(rdi, rcx); - __ shl(rdi, Immediate(kPointerSizeLog2 + 1)); + __ movp(rdi, rax); + __ shlp(rdi, Immediate(kPointerSizeLog2 + 1)); __ LoadAddress(kScratchRegister, cache_keys); int off = kPointerSize * i * 2; __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off)); __ j(not_equal, &try_next_entry); - __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize)); + __ cmpp(key, Operand(kScratchRegister, rdi, times_1, off + kPointerSize)); __ j(equal, &hit_on_nth_entry[i]); __ bind(&try_next_entry); } @@ -452,7 +386,7 @@ int off = kPointerSize * (kEntriesPerBucket - 1) * 2; __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off)); __ j(not_equal, &slow); - __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize)); + __ cmpp(key, Operand(kScratchRegister, rdi, times_1, off + kPointerSize)); __ j(not_equal, &slow); // Get field offset, which is a 32-bit integer. @@ -463,12 +397,12 @@ for (int i = kEntriesPerBucket - 1; i >= 0; i--) { __ bind(&hit_on_nth_entry[i]); if (i != 0) { - __ addl(rcx, Immediate(i)); + __ addl(rax, Immediate(i)); } __ LoadAddress(kScratchRegister, cache_field_offsets); - __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0)); - __ movzxbp(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset)); - __ subp(rdi, rcx); + __ movl(rdi, Operand(kScratchRegister, rax, times_4, 0)); + __ movzxbp(rax, FieldOperand(rbx, Map::kInObjectPropertiesOffset)); + __ subp(rdi, rax); __ j(above_equal, &property_array_property); if (i != 0) { __ jmp(&load_in_object_property); @@ -477,15 +411,15 @@ // Load in-object property. __ bind(&load_in_object_property); - __ movzxbp(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset)); - __ addp(rcx, rdi); - __ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0)); + __ movzxbp(rax, FieldOperand(rbx, Map::kInstanceSizeOffset)); + __ addp(rax, rdi); + __ movp(rax, FieldOperand(receiver, rax, times_pointer_size, 0)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ ret(0); // Load property array property. __ bind(&property_array_property); - __ movp(rax, FieldOperand(rdx, JSObject::kPropertiesOffset)); + __ movp(rax, FieldOperand(receiver, JSObject::kPropertiesOffset)); __ movp(rax, FieldOperand(rax, rdi, times_pointer_size, FixedArray::kHeaderSize)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); @@ -494,36 +428,31 @@ // Do a quick inline probe of the receiver's dictionary, if it // exists. __ bind(&probe_dictionary); - // rdx: receiver - // rax: key // rbx: elements - __ movp(rcx, FieldOperand(rdx, JSObject::kMapOffset)); - __ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset)); - GenerateGlobalInstanceTypeCheck(masm, rcx, &slow); + __ movp(rax, FieldOperand(receiver, JSObject::kMapOffset)); + __ movb(rax, FieldOperand(rax, Map::kInstanceTypeOffset)); + GenerateGlobalInstanceTypeCheck(masm, rax, &slow); - GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax); + GenerateDictionaryLoad(masm, &slow, rbx, key, rax, rdi, rax); __ IncrementCounter(counters->keyed_load_generic_symbol(), 1); __ ret(0); __ bind(&index_name); - __ IndexFromHash(rbx, rax); + __ IndexFromHash(rbx, key); __ jmp(&index_smi); } void KeyedLoadIC::GenerateString(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // Return address is on the stack. Label miss; - Register receiver = rdx; - Register index = rax; - Register scratch = rcx; + Register receiver = ReceiverRegister(); + Register index = NameRegister(); + Register scratch = rbx; Register result = rax; + DCHECK(!scratch.is(receiver) && !scratch.is(index)); StringCharAtGenerator char_at_generator(receiver, index, @@ -545,42 +474,42 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // Return address is on the stack. Label slow; + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + Register scratch = rax; + DCHECK(!scratch.is(receiver) && !scratch.is(key)); + // Check that the receiver isn't a smi. - __ JumpIfSmi(rdx, &slow); + __ JumpIfSmi(receiver, &slow); // Check that the key is an array index, that is Uint32. STATIC_ASSERT(kSmiValueSize <= 32); - __ JumpUnlessNonNegativeSmi(rax, &slow); + __ JumpUnlessNonNegativeSmi(key, &slow); // Get the map of the receiver. - __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset)); + __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); // Check that it has indexed interceptor and access checks // are not enabled for this object. - __ movb(rcx, FieldOperand(rcx, Map::kBitFieldOffset)); - __ andb(rcx, Immediate(kSlowCaseBitFieldMask)); - __ cmpb(rcx, Immediate(1 << Map::kHasIndexedInterceptor)); + __ movb(scratch, FieldOperand(scratch, Map::kBitFieldOffset)); + __ andb(scratch, Immediate(kSlowCaseBitFieldMask)); + __ cmpb(scratch, Immediate(1 << Map::kHasIndexedInterceptor)); __ j(not_zero, &slow); // Everything is fine, call runtime. - __ PopReturnAddressTo(rcx); - __ Push(rdx); // receiver - __ Push(rax); // key - __ PushReturnAddressFrom(rcx); + __ PopReturnAddressTo(scratch); + __ Push(receiver); // receiver + __ Push(key); // key + __ PushReturnAddressFrom(scratch); // Perform tail call to the entry. __ TailCallExternalReference( - ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor), + ExternalReference(IC_Utility(kLoadElementWithInterceptor), masm->isolate()), - 2, - 1); + 2, 1); __ bind(&slow); GenerateMiss(masm); @@ -597,12 +526,16 @@ Label transition_smi_elements; Label finish_object_store, non_double_value, transition_double_elements; Label fast_double_without_map_check; + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register key = KeyedStoreIC::NameRegister(); + Register value = KeyedStoreIC::ValueRegister(); + DCHECK(receiver.is(rdx)); + DCHECK(key.is(rcx)); + DCHECK(value.is(rax)); // Fast case: Do the store, could be either Object or double. __ bind(fast_object); - // rax: value // rbx: receiver's elements array (a FixedArray) - // rcx: index - // rdx: receiver (a JSArray) + // receiver is a JSArray. // r9: map of receiver if (check_map == kCheckMap) { __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset)); @@ -615,26 +548,26 @@ // there may be a callback on the element Label holecheck_passed1; __ movp(kScratchRegister, FieldOperand(rbx, - rcx, + key, times_pointer_size, FixedArray::kHeaderSize)); __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); __ j(not_equal, &holecheck_passed1); - __ JumpIfDictionaryInPrototypeChain(rdx, rdi, kScratchRegister, slow); + __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow); __ bind(&holecheck_passed1); // Smi stores don't require further checks. Label non_smi_value; - __ JumpIfNotSmi(rax, &non_smi_value); + __ JumpIfNotSmi(value, &non_smi_value); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ leal(rdi, Operand(rcx, 1)); - __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi); + __ leal(rdi, Operand(key, 1)); + __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi); } // It's irrelevant whether array is smi-only or not when writing a smi. - __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize), - rax); + __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize), + value); __ ret(0); __ bind(&non_smi_value); @@ -645,14 +578,14 @@ __ bind(&finish_object_store); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ leal(rdi, Operand(rcx, 1)); - __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi); + __ leal(rdi, Operand(key, 1)); + __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi); } - __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize), - rax); - __ movp(rdx, rax); // Preserve the value which is returned. + __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize), + value); + __ movp(rdx, value); // Preserve the value which is returned. __ RecordWriteArray( - rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ ret(0); __ bind(fast_double); @@ -668,25 +601,25 @@ // We have to see if the double version of the hole is present. If so // go to the runtime. uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); - __ cmpl(FieldOperand(rbx, rcx, times_8, offset), Immediate(kHoleNanUpper32)); + __ cmpl(FieldOperand(rbx, key, times_8, offset), Immediate(kHoleNanUpper32)); __ j(not_equal, &fast_double_without_map_check); - __ JumpIfDictionaryInPrototypeChain(rdx, rdi, kScratchRegister, slow); + __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow); __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0, + __ StoreNumberToDoubleElements(value, rbx, key, xmm0, &transition_double_elements); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ leal(rdi, Operand(rcx, 1)); - __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi); + __ leal(rdi, Operand(key, 1)); + __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi); } __ ret(0); __ bind(&transition_smi_elements); - __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); + __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset)); // Transition the array appropriately depending on the value type. - __ movp(r9, FieldOperand(rax, HeapObject::kMapOffset)); + __ movp(r9, FieldOperand(value, HeapObject::kMapOffset)); __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex); __ j(not_equal, &non_double_value); @@ -699,8 +632,9 @@ slow); AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); - ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); - __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); + ElementsTransitionGenerator::GenerateSmiToDouble( + masm, receiver, key, value, rbx, mode, slow); + __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); @@ -711,52 +645,52 @@ rdi, slow); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, - slow); - __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition( + masm, receiver, key, value, rbx, mode, slow); + __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); __ bind(&transition_double_elements); // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS - __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); + __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset)); __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, rbx, rdi, slow); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); - __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); + ElementsTransitionGenerator::GenerateDoubleToObject( + masm, receiver, key, value, rbx, mode, slow); + __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); } void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // Return address is on the stack. Label slow, slow_with_tagged_index, fast_object, fast_object_grow; Label fast_double, fast_double_grow; Label array, extra, check_if_double_array; + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(rdx)); + DCHECK(key.is(rcx)); // Check that the object isn't a smi. - __ JumpIfSmi(rdx, &slow_with_tagged_index); + __ JumpIfSmi(receiver, &slow_with_tagged_index); // Get the map from the receiver. - __ movp(r9, FieldOperand(rdx, HeapObject::kMapOffset)); + __ movp(r9, FieldOperand(receiver, HeapObject::kMapOffset)); // Check that the receiver does not require access checks and is not observed. // The generic stub does not perform map checks or handle observed objects. __ testb(FieldOperand(r9, Map::kBitFieldOffset), Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved)); __ j(not_zero, &slow_with_tagged_index); // Check that the key is a smi. - __ JumpIfNotSmi(rcx, &slow_with_tagged_index); - __ SmiToInteger32(rcx, rcx); + __ JumpIfNotSmi(key, &slow_with_tagged_index); + __ SmiToInteger32(key, key); __ CmpInstanceType(r9, JS_ARRAY_TYPE); __ j(equal, &array); @@ -765,20 +699,15 @@ __ j(below, &slow); // Object case: Check key against length in the elements array. - // rax: value - // rdx: JSObject - // rcx: index - __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); + __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset)); // Check array bounds. - __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx); - // rax: value + __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key); // rbx: FixedArray - // rcx: index __ j(above, &fast_object); // Slow case: call runtime. __ bind(&slow); - __ Integer32ToSmi(rcx, rcx); + __ Integer32ToSmi(key, key); __ bind(&slow_with_tagged_index); GenerateRuntimeSetProperty(masm, strict_mode); // Never returns to here. @@ -787,13 +716,11 @@ // perform the store and update the length. Used for adding one // element to the array by writing to array[array.length]. __ bind(&extra); - // rax: value - // rdx: receiver (a JSArray) + // receiver is a JSArray. // rbx: receiver's elements array (a FixedArray) - // rcx: index - // flags: smicompare (rdx.length(), rbx) + // flags: smicompare (receiver.length(), rbx) __ j(not_equal, &slow); // do not leave holes in the array - __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx); + __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key); __ j(below_equal, &slow); // Increment index to get new length. __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset)); @@ -811,14 +738,12 @@ // array. Check that the array is in fast mode (and writable); if it // is the length is always a smi. __ bind(&array); - // rax: value - // rdx: receiver (a JSArray) - // rcx: index - __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); + // receiver is a JSArray. + __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset)); // Check the key against the length in the array, compute the // address to store into and fall through to fast case. - __ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx); + __ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key); __ j(below_equal, &extra); KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, @@ -910,21 +835,22 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // The return address is on the stack. + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(rdx)); + DCHECK(key.is(rcx)); + Label slow, notin; Operand mapped_location = GenerateMappedArgumentsLookup( - masm, rdx, rax, rbx, rcx, rdi, ¬in, &slow); + masm, receiver, key, rbx, rax, rdi, ¬in, &slow); __ movp(rax, mapped_location); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in rbx. Operand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow); + GenerateUnmappedArgumentsLookup(masm, key, rbx, rax, &slow); __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex); __ j(equal, &slow); __ movp(rax, unmapped_location); @@ -935,18 +861,20 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // The return address is on the stack. Label slow, notin; + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + Register value = ValueRegister(); + DCHECK(receiver.is(rdx)); + DCHECK(name.is(rcx)); + DCHECK(value.is(rax)); + Operand mapped_location = GenerateMappedArgumentsLookup( - masm, rdx, rcx, rbx, rdi, r8, ¬in, &slow); - __ movp(mapped_location, rax); + masm, receiver, name, rbx, rdi, r8, ¬in, &slow); + __ movp(mapped_location, value); __ leap(r9, mapped_location); - __ movp(r8, rax); + __ movp(r8, value); __ RecordWrite(rbx, r9, r8, @@ -957,10 +885,10 @@ __ bind(¬in); // The unmapped lookup expects that the parameter map is in rbx. Operand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow); - __ movp(unmapped_location, rax); + GenerateUnmappedArgumentsLookup(masm, name, rbx, rdi, &slow); + __ movp(unmapped_location, value); __ leap(r9, unmapped_location); - __ movp(r8, rax); + __ movp(r8, value); __ RecordWrite(rbx, r9, r8, @@ -974,56 +902,60 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- + // The return address is on the stack. + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(receiver.is(rdx)); + DCHECK(name.is(rcx)); // Probe the stub cache. - Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::LOAD_IC)); masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, rax, rcx, rbx, rdx); + masm, flags, receiver, name, rbx, rax); GenerateMiss(masm); } void LoadIC::GenerateNormal(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; + Register dictionary = rax; + DCHECK(!dictionary.is(ReceiverRegister())); + DCHECK(!dictionary.is(NameRegister())); - GenerateNameDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss); + Label slow; - // rdx: elements - // Search the dictionary placing the result in rax. - GenerateDictionaryLoad(masm, &miss, rdx, rcx, rbx, rdi, rax); + __ movp(dictionary, + FieldOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); + GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), rbx, rdi, + rax); __ ret(0); - // Cache miss: Jump to runtime. - __ bind(&miss); - GenerateMiss(masm); + // Dictionary load failed, go slow (but don't miss). + __ bind(&slow); + GenerateRuntimeGetProperty(masm); +} + + +// A register that isn't one of the parameters to the load ic. +static const Register LoadIC_TempRegister() { return rbx; } + + +static const Register KeyedLoadIC_TempRegister() { + return rbx; } void LoadIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- + // The return address is on the stack. Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->load_miss(), 1); - __ PopReturnAddressTo(rbx); - __ Push(rax); // receiver - __ Push(rcx); // name - __ PushReturnAddressFrom(rbx); + __ PopReturnAddressTo(LoadIC_TempRegister()); + __ Push(ReceiverRegister()); // receiver + __ Push(NameRegister()); // name + __ PushReturnAddressFrom(LoadIC_TempRegister()); // Perform tail call to the entry. ExternalReference ref = @@ -1033,16 +965,12 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- + // The return address is on the stack. - __ PopReturnAddressTo(rbx); - __ Push(rax); // receiver - __ Push(rcx); // name - __ PushReturnAddressFrom(rbx); + __ PopReturnAddressTo(LoadIC_TempRegister()); + __ Push(ReceiverRegister()); // receiver + __ Push(NameRegister()); // name + __ PushReturnAddressFrom(LoadIC_TempRegister()); // Perform tail call to the entry. __ TailCallRuntime(Runtime::kGetProperty, 2, 1); @@ -1050,19 +978,14 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - + // The return address is on the stack. Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->keyed_load_miss(), 1); - __ PopReturnAddressTo(rbx); - __ Push(rdx); // receiver - __ Push(rax); // name - __ PushReturnAddressFrom(rbx); + __ PopReturnAddressTo(KeyedLoadIC_TempRegister()); + __ Push(ReceiverRegister()); // receiver + __ Push(NameRegister()); // name + __ PushReturnAddressFrom(KeyedLoadIC_TempRegister()); // Perform tail call to the entry. ExternalReference ref = @@ -1071,17 +994,40 @@ } +// IC register specifications +const Register LoadIC::ReceiverRegister() { return rdx; } +const Register LoadIC::NameRegister() { return rcx; } + + +const Register LoadIC::SlotRegister() { + DCHECK(FLAG_vector_ics); + return rax; +} + + +const Register LoadIC::VectorRegister() { + DCHECK(FLAG_vector_ics); + return rbx; +} + + +const Register StoreIC::ReceiverRegister() { return rdx; } +const Register StoreIC::NameRegister() { return rcx; } +const Register StoreIC::ValueRegister() { return rax; } + + +const Register KeyedStoreIC::MapRegister() { + return rbx; +} + + void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // The return address is on the stack. - __ PopReturnAddressTo(rbx); - __ Push(rdx); // receiver - __ Push(rax); // name - __ PushReturnAddressFrom(rbx); + __ PopReturnAddressTo(KeyedLoadIC_TempRegister()); + __ Push(ReceiverRegister()); // receiver + __ Push(NameRegister()); // name + __ PushReturnAddressFrom(KeyedLoadIC_TempRegister()); // Perform tail call to the entry. __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); @@ -1089,36 +1035,37 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // The return address is on the stack. // Get the receiver from the stack and probe the stub cache. - Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC); + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, rdx, rcx, rbx, no_reg); + masm, flags, ReceiverRegister(), NameRegister(), rbx, no_reg); // Cache miss: Jump to runtime. GenerateMiss(masm); } -void StoreIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- +static void StoreIC_PushArgs(MacroAssembler* masm) { + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + Register value = StoreIC::ValueRegister(); + + DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value)); __ PopReturnAddressTo(rbx); - __ Push(rdx); // receiver - __ Push(rcx); // name - __ Push(rax); // value + __ Push(receiver); + __ Push(name); + __ Push(value); __ PushReturnAddressFrom(rbx); +} + + +void StoreIC::GenerateMiss(MacroAssembler* masm) { + // Return address is on the stack. + StoreIC_PushArgs(masm); // Perform tail call to the entry. ExternalReference ref = @@ -1128,18 +1075,15 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + Register value = ValueRegister(); + Register dictionary = rbx; Label miss; - GenerateNameDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss); - - GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9); + __ movp(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset)); + GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->store_normal_hit(), 1); __ ret(0); @@ -1152,60 +1096,43 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // Return address is on the stack. + DCHECK(!rbx.is(ReceiverRegister()) && !rbx.is(NameRegister()) && + !rbx.is(ValueRegister())); + __ PopReturnAddressTo(rbx); - __ Push(rdx); - __ Push(rcx); - __ Push(rax); - __ Push(Smi::FromInt(NONE)); // PropertyAttributes + __ Push(ReceiverRegister()); + __ Push(NameRegister()); + __ Push(ValueRegister()); __ Push(Smi::FromInt(strict_mode)); __ PushReturnAddressFrom(rbx); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 5, 1); + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); } void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + // Return address is on the stack. + DCHECK(!rbx.is(ReceiverRegister()) && !rbx.is(NameRegister()) && + !rbx.is(ValueRegister())); __ PopReturnAddressTo(rbx); - __ Push(rdx); // receiver - __ Push(rcx); // key - __ Push(rax); // value - __ Push(Smi::FromInt(NONE)); // PropertyAttributes + __ Push(ReceiverRegister()); + __ Push(NameRegister()); + __ Push(ValueRegister()); __ Push(Smi::FromInt(strict_mode)); // Strict mode. __ PushReturnAddressFrom(rbx); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 5, 1); + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); } void StoreIC::GenerateSlow(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - - __ PopReturnAddressTo(rbx); - __ Push(rdx); // receiver - __ Push(rcx); // key - __ Push(rax); // value - __ PushReturnAddressFrom(rbx); + // Return address is on the stack. + StoreIC_PushArgs(masm); // Do tail-call to runtime routine. ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate()); @@ -1214,18 +1141,8 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - - __ PopReturnAddressTo(rbx); - __ Push(rdx); // receiver - __ Push(rcx); // key - __ Push(rax); // value - __ PushReturnAddressFrom(rbx); + // Return address is on the stack. + StoreIC_PushArgs(masm); // Do tail-call to runtime routine. ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); @@ -1234,18 +1151,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - - __ PopReturnAddressTo(rbx); - __ Push(rdx); // receiver - __ Push(rcx); // key - __ Push(rax); // value - __ PushReturnAddressFrom(rbx); + // Return address is on the stack. + StoreIC_PushArgs(masm); // Do tail-call to runtime routine. ExternalReference ref = @@ -1296,14 +1203,14 @@ // If the instruction following the call is not a test al, nothing // was inlined. if (*test_instruction_address != Assembler::kTestAlByte) { - ASSERT(*test_instruction_address == Assembler::kNopByte); + DCHECK(*test_instruction_address == Assembler::kNopByte); return; } Address delta_address = test_instruction_address + 1; // The delta to the start of the map check instruction and the // condition code uses at the patched jump. - int8_t delta = *reinterpret_cast<int8_t*>(delta_address); + uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address); if (FLAG_trace_ic) { PrintF("[ patching ic at %p, test=%p, delta=%d\n", address, test_instruction_address, delta); @@ -1313,7 +1220,7 @@ // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the // reverse operation of that. Address jmp_address = test_instruction_address - delta; - ASSERT((check == ENABLE_INLINED_SMI_CHECK) + DCHECK((check == ENABLE_INLINED_SMI_CHECK) ? (*jmp_address == Assembler::kJncShortOpcode || *jmp_address == Assembler::kJcShortOpcode) : (*jmp_address == Assembler::kJnzShortOpcode || diff -Nru nodejs-0.11.13/deps/v8/src/x64/lithium-codegen-x64.cc nodejs-0.11.15/deps/v8/src/x64/lithium-codegen-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/lithium-codegen-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/lithium-codegen-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "x64/lithium-codegen-x64.h" -#include "code-stubs.h" -#include "stub-cache.h" -#include "hydrogen-osr.h" +#include "src/code-stubs.h" +#include "src/hydrogen-osr.h" +#include "src/stub-cache.h" +#include "src/x64/lithium-codegen-x64.h" namespace v8 { namespace internal { @@ -67,7 +44,7 @@ bool LCodeGen::GenerateCode() { LPhase phase("Z_Code generation", chunk()); - ASSERT(is_unused()); + DCHECK(is_unused()); status_ = GENERATING; // Open a frame scope to indicate that there is a frame on the stack. The @@ -84,18 +61,11 @@ void LCodeGen::FinishCode(Handle<Code> code) { - ASSERT(is_done()); + DCHECK(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); PopulateDeoptimizationData(code); - info()->CommitDependencies(code); -} - - -void LChunkBuilder::Abort(BailoutReason reason) { - info()->set_bailout_reason(reason); - status_ = ABORTED; } @@ -110,8 +80,8 @@ void LCodeGen::SaveCallerDoubles() { - ASSERT(info()->saves_caller_doubles()); - ASSERT(NeedsEagerFrame()); + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); Comment(";;; Save clobbered callee double registers"); int count = 0; BitVector* doubles = chunk()->allocated_double_registers(); @@ -126,8 +96,8 @@ void LCodeGen::RestoreCallerDoubles() { - ASSERT(info()->saves_caller_doubles()); - ASSERT(NeedsEagerFrame()); + DCHECK(info()->saves_caller_doubles()); + DCHECK(NeedsEagerFrame()); Comment(";;; Restore clobbered callee double registers"); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); @@ -142,7 +112,7 @@ bool LCodeGen::GeneratePrologue() { - ASSERT(is_generating()); + DCHECK(is_generating()); if (info()->IsOptimizing()) { ProfileEntryHookStub::MaybeCallEntryHook(masm_); @@ -167,7 +137,7 @@ __ j(not_equal, &ok, Label::kNear); __ movp(rcx, GlobalObjectOperand()); - __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset)); + __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset)); __ movp(args.GetReceiverOperand(), rcx); @@ -177,9 +147,13 @@ info()->set_prologue_offset(masm_->pc_offset()); if (NeedsEagerFrame()) { - ASSERT(!frame_is_built_); + DCHECK(!frame_is_built_); frame_is_built_ = true; - __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); + if (info()->IsStub()) { + __ StubPrologue(); + } else { + __ Prologue(info()->IsCodePreAgingActive()); + } info()->AddNoFrameRange(0, masm_->pc_offset()); } @@ -193,7 +167,7 @@ #endif __ Push(rax); __ Set(rax, slots); - __ movq(kScratchRegister, kSlotsZapValue); + __ Set(kScratchRegister, kSlotsZapValue); Label loop; __ bind(&loop); __ movp(MemOperand(rsp, rax, times_pointer_size, 0), @@ -217,13 +191,16 @@ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); + bool need_write_barrier = true; // Argument to NewContext is the function, which is still in rdi. if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; } else { __ Push(rdi); - __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewFunctionContext, 1); } RecordSafepoint(Safepoint::kNoLazyDeopt); // Context is returned in rax. It replaces the context passed to us. @@ -244,7 +221,14 @@ int context_offset = Context::SlotOffset(var->index()); __ movp(Operand(rsi, context_offset), rax); // Update the write barrier. This clobbers rax and rbx. - __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs); + if (need_write_barrier) { + __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } } } Comment(";;; End allocate local context"); @@ -268,7 +252,7 @@ // Adjust the frame size, subsuming the unoptimized frame into the // optimized frame. int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); - ASSERT(slots >= 0); + DCHECK(slots >= 0); __ subp(rsp, Immediate(slots * kPointerSize)); } @@ -284,13 +268,24 @@ void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { + if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() && + instr->hydrogen_value()->representation().IsInteger32() && + instr->result()->IsRegister()) { + __ AssertZeroExtended(ToRegister(instr->result())); + } + if (instr->HasResult() && instr->MustSignExtendResult(chunk())) { + // We sign extend the dehoisted key at the definition point when the pointer + // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use + // points and MustSignExtendResult is always false. We can't use + // STATIC_ASSERT here as the pointer size is 32-bit for x32. + DCHECK(kPointerSize == kInt64Size); if (instr->result()->IsRegister()) { Register result_reg = ToRegister(instr->result()); __ movsxlq(result_reg, result_reg); } else { // Sign extend the 32bit result in the stack slots. - ASSERT(instr->result()->IsStackSlot()); + DCHECK(instr->result()->IsStackSlot()); Operand src = ToOperand(instr->result()); __ movsxlq(kScratchRegister, src); __ movq(src, kScratchRegister); @@ -315,7 +310,7 @@ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); } if (jump_table_[i].needs_frame) { - ASSERT(!info()->saves_caller_doubles()); + DCHECK(!info()->saves_caller_doubles()); __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); if (needs_frame.is_bound()) { __ jmp(&needs_frame); @@ -328,7 +323,7 @@ // This variant of deopt can only be used with stubs. Since we don't // have a function pointer to install in the stack frame that we're // building, install a special marker there instead. - ASSERT(info()->IsStub()); + DCHECK(info()->IsStub()); __ Move(rsi, Smi::FromInt(StackFrame::STUB)); __ Push(rsi); __ movp(rsi, MemOperand(rsp, kPointerSize)); @@ -336,7 +331,7 @@ } } else { if (info()->saves_caller_doubles()) { - ASSERT(info()->IsStub()); + DCHECK(info()->IsStub()); RestoreCallerDoubles(); } __ call(entry, RelocInfo::RUNTIME_ENTRY); @@ -347,7 +342,7 @@ bool LCodeGen::GenerateDeferredCode() { - ASSERT(is_generating()); + DCHECK(is_generating()); if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; @@ -365,8 +360,8 @@ __ bind(code->entry()); if (NeedsDeferredFrame()) { Comment(";;; Build frame"); - ASSERT(!frame_is_built_); - ASSERT(info()->IsStub()); + DCHECK(!frame_is_built_); + DCHECK(info()->IsStub()); frame_is_built_ = true; // Build the frame in such a way that esi isn't trashed. __ pushq(rbp); // Caller's frame pointer. @@ -379,7 +374,7 @@ if (NeedsDeferredFrame()) { __ bind(code->done()); Comment(";;; Destroy frame"); - ASSERT(frame_is_built_); + DCHECK(frame_is_built_); frame_is_built_ = false; __ movp(rsp, rbp); __ popq(rbp); @@ -396,7 +391,7 @@ bool LCodeGen::GenerateSafepointTable() { - ASSERT(is_done()); + DCHECK(is_done()); safepoints_.Emit(masm(), GetStackSlotCount()); return !is_aborted(); } @@ -413,13 +408,13 @@ Register LCodeGen::ToRegister(LOperand* op) const { - ASSERT(op->IsRegister()); + DCHECK(op->IsRegister()); return ToRegister(op->index()); } XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { - ASSERT(op->IsDoubleRegister()); + DCHECK(op->IsDoubleRegister()); return ToDoubleRegister(op->index()); } @@ -441,8 +436,17 @@ int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { + return ToRepresentation(op, Representation::Integer32()); +} + + +int32_t LCodeGen::ToRepresentation(LConstantOperand* op, + const Representation& r) const { HConstant* constant = chunk_->LookupConstant(op); - return constant->Integer32Value(); + int32_t value = constant->Integer32Value(); + if (r.IsInteger32()) return value; + DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged()); + return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value))); } @@ -454,27 +458,27 @@ double LCodeGen::ToDouble(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(constant->HasDoubleValue()); + DCHECK(constant->HasDoubleValue()); return constant->DoubleValue(); } ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(constant->HasExternalReferenceValue()); + DCHECK(constant->HasExternalReferenceValue()); return constant->ExternalReferenceValue(); } Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); + DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); return constant->handle(isolate()); } static int ArgumentsOffsetWithoutFrame(int index) { - ASSERT(index < 0); + DCHECK(index < 0); return -(index + 1) * kPointerSize + kPCOnStackSize; } @@ -482,7 +486,7 @@ Operand LCodeGen::ToOperand(LOperand* op) const { // Does not handle registers. In X64 assembler, plain registers are not // representable as an Operand. - ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); if (NeedsEagerFrame()) { return Operand(rbp, StackSlotOffset(op->index())); } else { @@ -517,13 +521,13 @@ translation->BeginConstructStubFrame(closure_id, translation_size); break; case JS_GETTER: - ASSERT(translation_size == 1); - ASSERT(height == 0); + DCHECK(translation_size == 1); + DCHECK(height == 0); translation->BeginGetterStubFrame(closure_id); break; case JS_SETTER: - ASSERT(translation_size == 2); - ASSERT(height == 0); + DCHECK(translation_size == 2); + DCHECK(height == 0); translation->BeginSetterStubFrame(closure_id); break; case ARGUMENTS_ADAPTOR: @@ -622,7 +626,7 @@ LInstruction* instr, SafepointMode safepoint_mode, int argc) { - ASSERT(instr != NULL); + DCHECK(instr != NULL); __ call(code, mode); RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc); @@ -646,8 +650,8 @@ int num_arguments, LInstruction* instr, SaveFPRegsMode save_doubles) { - ASSERT(instr != NULL); - ASSERT(instr->HasPointerMap()); + DCHECK(instr != NULL); + DCHECK(instr->HasPointerMap()); __ CallRuntime(function, num_arguments, save_doubles); @@ -687,6 +691,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode) { + environment->set_has_been_used(); if (!environment->HasBeenRegistered()) { // Physical stack frame layout: // -x ............. -4 0 ..................................... y @@ -725,9 +730,9 @@ LEnvironment* environment, Deoptimizer::BailoutType bailout_type) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); - ASSERT(environment->HasBeenRegistered()); + DCHECK(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); - ASSERT(info()->IsOptimizing() || info()->IsStub()); + DCHECK(info()->IsOptimizing() || info()->IsStub()); Address entry = Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { @@ -739,7 +744,7 @@ ExternalReference count = ExternalReference::stress_deopt_count(isolate()); Label no_deopt; __ pushfq(); - __ Push(rax); + __ pushq(rax); Operand count_operand = masm()->ExternalOperand(count, kScratchRegister); __ movl(rax, count_operand); __ subl(rax, Immediate(1)); @@ -747,13 +752,13 @@ if (FLAG_trap_on_deopt) __ int3(); __ movl(rax, Immediate(FLAG_deopt_every_n_times)); __ movl(count_operand, rax); - __ Pop(rax); + __ popq(rax); __ popfq(); - ASSERT(frame_is_built_); + DCHECK(frame_is_built_); __ call(entry, RelocInfo::RUNTIME_ENTRY); __ bind(&no_deopt); __ movl(count_operand, rax); - __ Pop(rax); + __ popq(rax); __ popfq(); } @@ -766,7 +771,7 @@ __ bind(&done); } - ASSERT(info()->IsStub() || frame_is_built_); + DCHECK(info()->IsStub() || frame_is_built_); // Go through jump table if we need to handle condition, build frame, or // restore caller doubles. if (cc == no_condition && frame_is_built_ && @@ -806,7 +811,7 @@ int length = deoptimizations_.length(); if (length == 0) return; Handle<DeoptimizationInputData> data = - factory()->NewDeoptimizationInputData(length, TENURED); + DeoptimizationInputData::New(isolate(), length, 0, TENURED); Handle<ByteArray> translations = translations_.CreateByteArray(isolate()->factory()); @@ -857,7 +862,7 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { - ASSERT(deoptimization_literals_.length() == 0); + DCHECK(deoptimization_literals_.length() == 0); const ZoneList<Handle<JSFunction> >* inlined_closures = chunk()->inlined_closures(); @@ -877,7 +882,7 @@ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); } else { - ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS); + DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS); RecordSafepointWithRegisters( instr->pointer_map(), argc, Safepoint::kLazyDeopt); } @@ -889,7 +894,7 @@ Safepoint::Kind kind, int arguments, Safepoint::DeoptMode deopt_mode) { - ASSERT(kind == expected_safepoint_kind_); + DCHECK(kind == expected_safepoint_kind_); const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); @@ -978,22 +983,22 @@ void LCodeGen::DoCallStub(LCallStub* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->result()).is(rax)); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->result()).is(rax)); switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpExec: { - RegExpExecStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + RegExpExecStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { - SubStringStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + SubStringStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { - StringCompareStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + StringCompareStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } default: @@ -1010,7 +1015,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(dividend.is(ToRegister(instr->result()))); + DCHECK(dividend.is(ToRegister(instr->result()))); // Theoretically, a variation of the branch-free code for integer division by // a power of 2 (calculating the remainder via an additional multiplication @@ -1043,7 +1048,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(ToRegister(instr->result()).is(rax)); + DCHECK(ToRegister(instr->result()).is(rax)); if (divisor == 0) { DeoptimizeIf(no_condition, instr->environment()); @@ -1071,12 +1076,12 @@ HMod* hmod = instr->hydrogen(); Register left_reg = ToRegister(instr->left()); - ASSERT(left_reg.is(rax)); + DCHECK(left_reg.is(rax)); Register right_reg = ToRegister(instr->right()); - ASSERT(!right_reg.is(rax)); - ASSERT(!right_reg.is(rdx)); + DCHECK(!right_reg.is(rax)); + DCHECK(!right_reg.is(rdx)); Register result_reg = ToRegister(instr->result()); - ASSERT(result_reg.is(rdx)); + DCHECK(result_reg.is(rdx)); Label done; // Check for x % 0, idiv would signal a divide error. We have to @@ -1126,7 +1131,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(dividend.is(ToRegister(instr->result()))); + DCHECK(dividend.is(ToRegister(instr->result()))); // If the divisor is positive, things are easy: There can be no deopts and we // can simply do an arithmetic right shift. @@ -1138,22 +1143,29 @@ } // If the divisor is negative, we have to negate and handle edge cases. - Label not_kmin_int, done; __ negl(dividend); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(zero, instr->environment()); } - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - // Note that we could emit branch-free code, but that would need one more - // register. - __ j(no_overflow, ¬_kmin_int, Label::kNear); - if (divisor == -1) { - DeoptimizeIf(no_condition, instr->environment()); - } else { - __ movl(dividend, Immediate(kMinInt / divisor)); - __ jmp(&done, Label::kNear); + + // Dividing by -1 is basically negation, unless we overflow. + if (divisor == -1) { + if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + DeoptimizeIf(overflow, instr->environment()); } + return; + } + + // If the negation could not overflow, simply shifting is OK. + if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + __ sarl(dividend, Immediate(shift)); + return; } + + Label not_kmin_int, done; + __ j(no_overflow, ¬_kmin_int, Label::kNear); + __ movl(dividend, Immediate(kMinInt / divisor)); + __ jmp(&done, Label::kNear); __ bind(¬_kmin_int); __ sarl(dividend, Immediate(shift)); __ bind(&done); @@ -1163,7 +1175,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(ToRegister(instr->result()).is(rdx)); + DCHECK(ToRegister(instr->result()).is(rdx)); if (divisor == 0) { DeoptimizeIf(no_condition, instr->environment()); @@ -1189,7 +1201,7 @@ // In the general case we may need to adjust before and after the truncating // division to get a flooring division. Register temp = ToRegister(instr->temp3()); - ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx)); + DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx)); Label needs_adjustment, done; __ cmpl(dividend, Immediate(0)); __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); @@ -1205,12 +1217,65 @@ } +// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. +void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); + Register remainder = ToRegister(instr->temp()); + Register result = ToRegister(instr->result()); + DCHECK(dividend.is(rax)); + DCHECK(remainder.is(rdx)); + DCHECK(result.is(rax)); + DCHECK(!divisor.is(rax)); + DCHECK(!divisor.is(rdx)); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + __ testl(divisor, divisor); + DeoptimizeIf(zero, instr->environment()); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label dividend_not_zero; + __ testl(dividend, dividend); + __ j(not_zero, ÷nd_not_zero, Label::kNear); + __ testl(divisor, divisor); + DeoptimizeIf(sign, instr->environment()); + __ bind(÷nd_not_zero); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow)) { + Label dividend_not_min_int; + __ cmpl(dividend, Immediate(kMinInt)); + __ j(not_zero, ÷nd_not_min_int, Label::kNear); + __ cmpl(divisor, Immediate(-1)); + DeoptimizeIf(zero, instr->environment()); + __ bind(÷nd_not_min_int); + } + + // Sign extend to rdx (= remainder). + __ cdq(); + __ idivl(divisor); + + Label done; + __ testl(remainder, remainder); + __ j(zero, &done, Label::kNear); + __ xorl(remainder, divisor); + __ sarl(remainder, Immediate(31)); + __ addl(result, remainder); + __ bind(&done); +} + + void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor)))); - ASSERT(!result.is(dividend)); + DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor))); + DCHECK(!result.is(dividend)); // Check for (0 / -x) that will produce negative zero. HDiv* hdiv = instr->hydrogen(); @@ -1246,7 +1311,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); - ASSERT(ToRegister(instr->result()).is(rdx)); + DCHECK(ToRegister(instr->result()).is(rdx)); if (divisor == 0) { DeoptimizeIf(no_condition, instr->environment()); @@ -1261,7 +1326,7 @@ } __ TruncatingDiv(dividend, Abs(divisor)); - if (divisor < 0) __ negp(rdx); + if (divisor < 0) __ negl(rdx); if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { __ movl(rax, rdx); @@ -1272,17 +1337,17 @@ } +// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. void LCodeGen::DoDivI(LDivI* instr) { HBinaryOperation* hdiv = instr->hydrogen(); - Register dividend = ToRegister(instr->left()); - Register divisor = ToRegister(instr->right()); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); Register remainder = ToRegister(instr->temp()); - Register result = ToRegister(instr->result()); - ASSERT(dividend.is(rax)); - ASSERT(remainder.is(rdx)); - ASSERT(result.is(rax)); - ASSERT(!divisor.is(rax)); - ASSERT(!divisor.is(rdx)); + DCHECK(dividend.is(rax)); + DCHECK(remainder.is(rdx)); + DCHECK(ToRegister(instr->result()).is(rax)); + DCHECK(!divisor.is(rax)); + DCHECK(!divisor.is(rdx)); // Check for x / 0. if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { @@ -1314,15 +1379,7 @@ __ cdq(); __ idivl(divisor); - if (hdiv->IsMathFloorOfDiv()) { - Label done; - __ testl(remainder, remainder); - __ j(zero, &done, Label::kNear); - __ xorl(remainder, divisor); - __ sarl(remainder, Immediate(31)); - __ addl(result, remainder); - __ bind(&done); - } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { // Deoptimize if remainder is not 0. __ testl(remainder, remainder); DeoptimizeIf(not_zero, instr->environment()); @@ -1415,8 +1472,11 @@ } __ j(not_zero, &done, Label::kNear); if (right->IsConstantOperand()) { - // Constant can't be represented as Smi due to immediate size limit. - ASSERT(!instr->hydrogen_value()->representation().IsSmi()); + // Constant can't be represented as 32-bit Smi due to immediate size + // limit. + DCHECK(SmiValuesAre32Bits() + ? !instr->hydrogen_value()->representation().IsSmi() + : SmiValuesAre31Bits()); if (ToInteger32(LConstantOperand::cast(right)) < 0) { DeoptimizeIf(no_condition, instr->environment()); } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { @@ -1447,11 +1507,13 @@ void LCodeGen::DoBitI(LBitI* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); - ASSERT(left->IsRegister()); + DCHECK(left->Equals(instr->result())); + DCHECK(left->IsRegister()); if (right->IsConstantOperand()) { - int32_t right_operand = ToInteger32(LConstantOperand::cast(right)); + int32_t right_operand = + ToRepresentation(LConstantOperand::cast(right), + instr->hydrogen()->right()->representation()); switch (instr->op()) { case Token::BIT_AND: __ andl(ToRegister(left), Immediate(right_operand)); @@ -1473,29 +1535,53 @@ } else if (right->IsStackSlot()) { switch (instr->op()) { case Token::BIT_AND: - __ andp(ToRegister(left), ToOperand(right)); + if (instr->IsInteger32()) { + __ andl(ToRegister(left), ToOperand(right)); + } else { + __ andp(ToRegister(left), ToOperand(right)); + } break; case Token::BIT_OR: - __ orp(ToRegister(left), ToOperand(right)); + if (instr->IsInteger32()) { + __ orl(ToRegister(left), ToOperand(right)); + } else { + __ orp(ToRegister(left), ToOperand(right)); + } break; case Token::BIT_XOR: - __ xorp(ToRegister(left), ToOperand(right)); + if (instr->IsInteger32()) { + __ xorl(ToRegister(left), ToOperand(right)); + } else { + __ xorp(ToRegister(left), ToOperand(right)); + } break; default: UNREACHABLE(); break; } } else { - ASSERT(right->IsRegister()); + DCHECK(right->IsRegister()); switch (instr->op()) { case Token::BIT_AND: - __ andp(ToRegister(left), ToRegister(right)); + if (instr->IsInteger32()) { + __ andl(ToRegister(left), ToRegister(right)); + } else { + __ andp(ToRegister(left), ToRegister(right)); + } break; case Token::BIT_OR: - __ orp(ToRegister(left), ToRegister(right)); + if (instr->IsInteger32()) { + __ orl(ToRegister(left), ToRegister(right)); + } else { + __ orp(ToRegister(left), ToRegister(right)); + } break; case Token::BIT_XOR: - __ xorp(ToRegister(left), ToRegister(right)); + if (instr->IsInteger32()) { + __ xorl(ToRegister(left), ToRegister(right)); + } else { + __ xorp(ToRegister(left), ToRegister(right)); + } break; default: UNREACHABLE(); @@ -1508,10 +1594,10 @@ void LCodeGen::DoShiftI(LShiftI* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); - ASSERT(left->IsRegister()); + DCHECK(left->Equals(instr->result())); + DCHECK(left->IsRegister()); if (right->IsRegister()) { - ASSERT(ToRegister(right).is(rcx)); + DCHECK(ToRegister(right).is(rcx)); switch (instr->op()) { case Token::ROR: @@ -1549,17 +1635,30 @@ } break; case Token::SHR: - if (shift_count == 0 && instr->can_deopt()) { + if (shift_count != 0) { + __ shrl(ToRegister(left), Immediate(shift_count)); + } else if (instr->can_deopt()) { __ testl(ToRegister(left), ToRegister(left)); DeoptimizeIf(negative, instr->environment()); - } else { - __ shrl(ToRegister(left), Immediate(shift_count)); } break; case Token::SHL: if (shift_count != 0) { if (instr->hydrogen_value()->representation().IsSmi()) { - __ shl(ToRegister(left), Immediate(shift_count)); + if (SmiValuesAre32Bits()) { + __ shlp(ToRegister(left), Immediate(shift_count)); + } else { + DCHECK(SmiValuesAre31Bits()); + if (instr->can_deopt()) { + if (shift_count != 1) { + __ shll(ToRegister(left), Immediate(shift_count - 1)); + } + __ Integer32ToSmi(ToRegister(left), ToRegister(left)); + DeoptimizeIf(overflow, instr->environment()); + } else { + __ shll(ToRegister(left), Immediate(shift_count)); + } + } } else { __ shll(ToRegister(left), Immediate(shift_count)); } @@ -1576,11 +1675,13 @@ void LCodeGen::DoSubI(LSubI* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); + DCHECK(left->Equals(instr->result())); if (right->IsConstantOperand()) { - __ subl(ToRegister(left), - Immediate(ToInteger32(LConstantOperand::cast(right)))); + int32_t right_operand = + ToRepresentation(LConstantOperand::cast(right), + instr->hydrogen()->right()->representation()); + __ subl(ToRegister(left), Immediate(right_operand)); } else if (right->IsRegister()) { if (instr->hydrogen_value()->representation().IsSmi()) { __ subp(ToRegister(left), ToRegister(right)); @@ -1602,7 +1703,12 @@ void LCodeGen::DoConstantI(LConstantI* instr) { - __ Set(ToRegister(instr->result()), instr->value()); + Register dst = ToRegister(instr->result()); + if (instr->value() == 0) { + __ xorl(dst, dst); + } else { + __ movl(dst, Immediate(instr->value())); + } } @@ -1612,7 +1718,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { - ASSERT(instr->result()->IsDoubleRegister()); + DCHECK(instr->result()->IsDoubleRegister()); XMMRegister res = ToDoubleRegister(instr->result()); double v = instr->value(); uint64_t int_val = BitCast<uint64_t, double>(v); @@ -1634,8 +1740,9 @@ void LCodeGen::DoConstantT(LConstantT* instr) { - Handle<Object> value = instr->value(isolate()); - __ Move(ToRegister(instr->result()), value); + Handle<Object> object = instr->value(isolate()); + AllowDeferredHandleDereference smi_check; + __ Move(ToRegister(instr->result()), object); } @@ -1651,8 +1758,8 @@ Register result = ToRegister(instr->result()); Smi* index = instr->index(); Label runtime, done, not_date_object; - ASSERT(object.is(result)); - ASSERT(object.is(rax)); + DCHECK(object.is(result)); + DCHECK(object.is(rax)); Condition cc = masm()->CheckSmi(object); DeoptimizeIf(cc, instr->environment()); @@ -1747,12 +1854,12 @@ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); if (instr->value()->IsConstantOperand()) { int value = ToInteger32(LConstantOperand::cast(instr->value())); - ASSERT_LE(0, value); + DCHECK_LE(0, value); if (encoding == String::ONE_BYTE_ENCODING) { - ASSERT_LE(value, String::kMaxOneByteCharCode); + DCHECK_LE(value, String::kMaxOneByteCharCode); __ movb(operand, Immediate(value)); } else { - ASSERT_LE(value, String::kMaxUtf16CodeUnit); + DCHECK_LE(value, String::kMaxUtf16CodeUnit); __ movw(operand, Immediate(value)); } } else { @@ -1775,7 +1882,11 @@ if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { if (right->IsConstantOperand()) { - int32_t offset = ToInteger32(LConstantOperand::cast(right)); + // No support for smi-immediates for 32-bit SMI. + DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits()); + int32_t offset = + ToRepresentation(LConstantOperand::cast(right), + instr->hydrogen()->right()->representation()); if (is_p) { __ leap(ToRegister(instr->result()), MemOperand(ToRegister(left), offset)); @@ -1793,12 +1904,15 @@ } } else { if (right->IsConstantOperand()) { + // No support for smi-immediates for 32-bit SMI. + DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits()); + int32_t right_operand = + ToRepresentation(LConstantOperand::cast(right), + instr->hydrogen()->right()->representation()); if (is_p) { - __ addp(ToRegister(left), - Immediate(ToInteger32(LConstantOperand::cast(right)))); + __ addp(ToRegister(left), Immediate(right_operand)); } else { - __ addl(ToRegister(left), - Immediate(ToInteger32(LConstantOperand::cast(right)))); + __ addl(ToRegister(left), Immediate(right_operand)); } } else if (right->IsRegister()) { if (is_p) { @@ -1823,7 +1937,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - ASSERT(left->Equals(instr->result())); + DCHECK(left->Equals(instr->result())); HMathMinMax::Operation operation = instr->hydrogen()->operation(); if (instr->hydrogen()->representation().IsSmiOrInteger32()) { Label return_left; @@ -1832,9 +1946,12 @@ : greater_equal; Register left_reg = ToRegister(left); if (right->IsConstantOperand()) { - Immediate right_imm = - Immediate(ToInteger32(LConstantOperand::cast(right))); - ASSERT(!instr->hydrogen_value()->representation().IsSmi()); + Immediate right_imm = Immediate( + ToRepresentation(LConstantOperand::cast(right), + instr->hydrogen()->right()->representation())); + DCHECK(SmiValuesAre32Bits() + ? !instr->hydrogen()->representation().IsSmi() + : SmiValuesAre31Bits()); __ cmpl(left_reg, right_imm); __ j(condition, &return_left, Label::kNear); __ movp(left_reg, right_imm); @@ -1859,7 +1976,7 @@ } __ bind(&return_left); } else { - ASSERT(instr->hydrogen()->representation().IsDouble()); + DCHECK(instr->hydrogen()->representation().IsDouble()); Label check_nan_left, check_zero, return_left, return_right; Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; XMMRegister left_reg = ToDoubleRegister(left); @@ -1900,7 +2017,7 @@ XMMRegister right = ToDoubleRegister(instr->right()); XMMRegister result = ToDoubleRegister(instr->result()); // All operations except MOD are computed in-place. - ASSERT(instr->op() == Token::MOD || left.is(result)); + DCHECK(instr->op() == Token::MOD || left.is(result)); switch (instr->op()) { case Token::ADD: __ addsd(left, right); @@ -1921,7 +2038,7 @@ XMMRegister xmm_scratch = double_scratch0(); __ PrepareCallCFunction(2); __ movaps(xmm_scratch, left); - ASSERT(right.is(xmm1)); + DCHECK(right.is(xmm1)); __ CallCFunction( ExternalReference::mod_two_doubles_operation(isolate()), 2); __ movaps(result, xmm_scratch); @@ -1935,13 +2052,13 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->left()).is(rdx)); - ASSERT(ToRegister(instr->right()).is(rax)); - ASSERT(ToRegister(instr->result()).is(rax)); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->left()).is(rdx)); + DCHECK(ToRegister(instr->right()).is(rax)); + DCHECK(ToRegister(instr->result()).is(rax)); - BinaryOpICStub stub(instr->op(), NO_OVERWRITE); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -1982,45 +2099,45 @@ void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); Register reg = ToRegister(instr->value()); __ testl(reg, reg); EmitBranch(instr, not_zero); } else if (r.IsSmi()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); Register reg = ToRegister(instr->value()); __ testp(reg, reg); EmitBranch(instr, not_zero); } else if (r.IsDouble()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); XMMRegister reg = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(reg, xmm_scratch); EmitBranch(instr, not_equal); } else { - ASSERT(r.IsTagged()); + DCHECK(r.IsTagged()); Register reg = ToRegister(instr->value()); HType type = instr->hydrogen()->value()->type(); if (type.IsBoolean()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ CompareRoot(reg, Heap::kTrueValueRootIndex); EmitBranch(instr, equal); } else if (type.IsSmi()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ SmiCompare(reg, Smi::FromInt(0)); EmitBranch(instr, not_equal); } else if (type.IsJSArray()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); EmitBranch(instr, no_condition); } else if (type.IsHeapNumber()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); EmitBranch(instr, not_equal); } else if (type.IsString()) { - ASSERT(!info()->IsStub()); + DCHECK(!info()->IsStub()); __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); EmitBranch(instr, not_equal); } else { @@ -2163,7 +2280,11 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); - Condition cc = TokenToCondition(instr->op(), instr->is_double()); + bool is_unsigned = + instr->is_double() || + instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || + instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); + Condition cc = TokenToCondition(instr->op(), is_unsigned); if (left->IsConstantOperand() && right->IsConstantOperand()) { // We can statically evaluate the comparison. @@ -2200,8 +2321,8 @@ } else { __ cmpl(ToOperand(right), Immediate(value)); } - // We transposed the operands. Reverse the condition. - cc = ReverseCondition(cc); + // We commuted the operands, so commute the condition. + cc = CommuteCondition(cc); } else if (instr->hydrogen_value()->representation().IsSmi()) { if (right->IsRegister()) { __ cmpp(ToRegister(left), ToRegister(right)); @@ -2259,7 +2380,7 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { Representation rep = instr->hydrogen()->value()->representation(); - ASSERT(!rep.IsInteger32()); + DCHECK(!rep.IsInteger32()); if (rep.IsDouble()) { XMMRegister value = ToDoubleRegister(instr->value()); @@ -2287,7 +2408,7 @@ Condition LCodeGen::EmitIsObject(Register input, Label* is_not_object, Label* is_object) { - ASSERT(!input.is(kScratchRegister)); + DCHECK(!input.is(kScratchRegister)); __ JumpIfSmi(input, is_not_object); @@ -2338,7 +2459,7 @@ Register temp = ToRegister(instr->temp()); SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; Condition true_cond = EmitIsString( @@ -2365,7 +2486,7 @@ Register input = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } __ movp(temp, FieldOperand(input, HeapObject::kMapOffset)); @@ -2376,7 +2497,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->context()).is(rsi)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -2393,7 +2514,7 @@ InstanceType from = instr->from(); InstanceType to = instr->to(); if (from == FIRST_TYPE) return to; - ASSERT(from == to || to == LAST_TYPE); + DCHECK(from == to || to == LAST_TYPE); return from; } @@ -2412,7 +2533,7 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { Register input = ToRegister(instr->value()); - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } @@ -2428,7 +2549,7 @@ __ AssertString(input); __ movl(result, FieldOperand(input, String::kHashFieldOffset)); - ASSERT(String::kHashShift >= kSmiTagSize); + DCHECK(String::kHashShift >= kSmiTagSize); __ IndexFromHash(result, result); } @@ -2451,9 +2572,9 @@ Register input, Register temp, Register temp2) { - ASSERT(!input.is(temp)); - ASSERT(!input.is(temp2)); - ASSERT(!temp.is(temp2)); + DCHECK(!input.is(temp)); + DCHECK(!input.is(temp2)); + DCHECK(!temp.is(temp2)); __ JumpIfSmi(input, is_false); @@ -2505,7 +2626,7 @@ // classes and it doesn't have to because you can't access it with natives // syntax. Since both sides are internalized it is sufficient to use an // identity comparison. - ASSERT(class_name->IsInternalizedString()); + DCHECK(class_name->IsInternalizedString()); __ Cmp(temp, class_name); // End with the answer in the z flag. } @@ -2533,11 +2654,11 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - InstanceofStub stub(InstanceofStub::kNoFlags); + DCHECK(ToRegister(instr->context()).is(rsi)); + InstanceofStub stub(isolate(), InstanceofStub::kNoFlags); __ Push(ToRegister(instr->left())); __ Push(ToRegister(instr->right())); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); Label true_value, done; __ testp(rax, rax); __ j(zero, &true_value, Label::kNear); @@ -2565,7 +2686,7 @@ Label map_check_; }; - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->context()).is(rsi)); DeferredInstanceOfKnownGlobal* deferred; deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); @@ -2593,7 +2714,7 @@ // Check that the code size between patch label and patch sites is invariant. Label end_of_patched_code; __ bind(&end_of_patched_code); - ASSERT(true); + DCHECK(true); #endif __ jmp(&done, Label::kNear); @@ -2620,27 +2741,27 @@ PushSafepointRegistersScope scope(this); InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>( InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck); - InstanceofStub stub(flags); + InstanceofStub stub(isolate(), flags); __ Push(ToRegister(instr->value())); __ Push(instr->function()); - static const int kAdditionalDelta = 10; + static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16; int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; - ASSERT(delta >= 0); + DCHECK(delta >= 0); __ PushImm32(delta); // We are pushing three values on the stack but recording a // safepoint with two arguments because stub is going to // remove the third argument from the stack before jumping // to instanceof builtin on the slow path. - CallCodeGeneric(stub.GetCode(isolate()), + CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS, 2); - ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check)); + DCHECK(delta == masm_->SizeOfCodeGeneratedSince(map_check)); LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); // Move result to a register that survives the end of the @@ -2660,7 +2781,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->context()).is(rsi)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -2706,7 +2827,7 @@ __ SmiToInteger32(reg, reg); Register return_addr_reg = reg.is(rcx) ? rbx : rcx; __ PopReturnAddressTo(return_addr_reg); - __ shl(reg, Immediate(kPointerSizeLog2)); + __ shlp(reg, Immediate(kPointerSizeLog2)); __ addp(rsp, reg); __ jmp(return_addr_reg); } @@ -2727,11 +2848,19 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->global_object()).is(rax)); - ASSERT(ToRegister(instr->result()).is(rax)); - - __ Move(rcx, instr->name()); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(rax)); + + __ Move(LoadIC::NameRegister(), instr->name()); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ Move(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(rax)); + __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot())); + } ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); CallCode(ic, RelocInfo::CODE_TARGET, instr); @@ -2749,7 +2878,7 @@ if (instr->hydrogen()->RequiresHoleCheck()) { // We have a temp because CompareRoot might clobber kScratchRegister. Register cell = ToRegister(instr->temp()); - ASSERT(!value.is(cell)); + DCHECK(!value.is(cell)); __ Move(cell, cell_handle, RelocInfo::CELL); __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex); DeoptimizeIf(equal, instr->environment()); @@ -2801,7 +2930,7 @@ if (instr->hydrogen()->NeedsWriteBarrier()) { SmiCheck check_needed = - instr->hydrogen()->value()->IsHeapObject() + instr->hydrogen()->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; int offset = Context::SlotOffset(instr->slot_index()); Register scratch = ToRegister(instr->temp()); @@ -2825,7 +2954,7 @@ if (access.IsExternalMemory()) { Register result = ToRegister(instr->result()); if (instr->object()->IsConstantOperand()) { - ASSERT(result.is(rax)); + DCHECK(result.is(rax)); __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object()))); } else { Register object = ToRegister(instr->object()); @@ -2848,17 +2977,17 @@ } Representation representation = access.representation(); - if (representation.IsSmi() && + if (representation.IsSmi() && SmiValuesAre32Bits() && instr->hydrogen()->representation().IsInteger32()) { -#ifdef DEBUG - Register scratch = kScratchRegister; - __ Load(scratch, FieldOperand(object, offset), representation); - __ AssertSmi(scratch); -#endif + if (FLAG_debug_code) { + Register scratch = kScratchRegister; + __ Load(scratch, FieldOperand(object, offset), representation); + __ AssertSmi(scratch); + } // Read int value directly from upper half of the smi. STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); + DCHECK(kSmiTagSize + kSmiShiftSize == 32); offset += kPointerSize / 2; representation = Representation::Integer32(); } @@ -2867,11 +2996,19 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->object()).is(rax)); - ASSERT(ToRegister(instr->result()).is(rax)); - - __ Move(rcx, instr->name()); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(rax)); + + __ Move(LoadIC::NameRegister(), instr->name()); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ Move(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(rax)); + __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot())); + } Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -2881,16 +3018,6 @@ Register function = ToRegister(instr->function()); Register result = ToRegister(instr->result()); - // Check that the function really is a function. - __ CmpObjectType(function, JS_FUNCTION_TYPE, result); - DeoptimizeIf(not_equal, instr->environment()); - - // Check whether the function has an instance prototype. - Label non_instance; - __ testb(FieldOperand(result, Map::kBitFieldOffset), - Immediate(1 << Map::kHasNonInstancePrototype)); - __ j(not_zero, &non_instance, Label::kNear); - // Get the prototype or initial map from the function. __ movp(result, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); @@ -2906,12 +3033,6 @@ // Get the prototype from the initial map. __ movp(result, FieldOperand(result, Map::kPrototypeOffset)); - __ jmp(&done, Label::kNear); - - // Non-instance prototype: Fetch prototype from constructor field - // in the function's map. - __ bind(&non_instance); - __ movp(result, FieldOperand(result, Map::kConstructorOffset)); // All done. __ bind(&done); @@ -2958,15 +3079,24 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { ElementsKind elements_kind = instr->elements_kind(); LOperand* key = instr->key(); - int base_offset = instr->is_fixed_typed_array() - ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag - : 0; + if (kPointerSize == kInt32Size && !key->IsConstantOperand()) { + Register key_reg = ToRegister(key); + Representation key_representation = + instr->hydrogen()->key()->representation(); + if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) { + __ SmiToInteger64(key_reg, key_reg); + } else if (instr->hydrogen()->IsDehoisted()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + __ movsxlq(key_reg, key_reg); + } + } Operand operand(BuildFastArrayOperand( instr->elements(), key, + instr->hydrogen()->key()->representation(), elements_kind, - base_offset, - instr->additional_index())); + instr->base_offset())); if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || elements_kind == FLOAT32_ELEMENTS) { @@ -2981,25 +3111,25 @@ switch (elements_kind) { case EXTERNAL_INT8_ELEMENTS: case INT8_ELEMENTS: - __ movsxbq(result, operand); + __ movsxbl(result, operand); break; case EXTERNAL_UINT8_ELEMENTS: case EXTERNAL_UINT8_CLAMPED_ELEMENTS: case UINT8_ELEMENTS: case UINT8_CLAMPED_ELEMENTS: - __ movzxbp(result, operand); + __ movzxbl(result, operand); break; case EXTERNAL_INT16_ELEMENTS: case INT16_ELEMENTS: - __ movsxwq(result, operand); + __ movsxwl(result, operand); break; case EXTERNAL_UINT16_ELEMENTS: case UINT16_ELEMENTS: - __ movzxwp(result, operand); + __ movzxwl(result, operand); break; case EXTERNAL_INT32_ELEMENTS: case INT32_ELEMENTS: - __ movsxlq(result, operand); + __ movl(result, operand); break; case EXTERNAL_UINT32_ELEMENTS: case UINT32_ELEMENTS: @@ -3031,15 +3161,19 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { XMMRegister result(ToDoubleRegister(instr->result())); LOperand* key = instr->key(); + if (kPointerSize == kInt32Size && !key->IsConstantOperand() && + instr->hydrogen()->IsDehoisted()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + __ movsxlq(ToRegister(key), ToRegister(key)); + } if (instr->hydrogen()->RequiresHoleCheck()) { - int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + - sizeof(kHoleNanLower32); Operand hole_check_operand = BuildFastArrayOperand( instr->elements(), key, + instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, - offset, - instr->additional_index()); + instr->base_offset() + sizeof(kHoleNanLower32)); __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); DeoptimizeIf(equal, instr->environment()); } @@ -3047,9 +3181,9 @@ Operand double_load_operand = BuildFastArrayOperand( instr->elements(), key, + instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); + instr->base_offset()); __ movsd(result, double_load_operand); } @@ -3059,35 +3193,41 @@ Register result = ToRegister(instr->result()); LOperand* key = instr->key(); bool requires_hole_check = hinstr->RequiresHoleCheck(); - int offset = FixedArray::kHeaderSize - kHeapObjectTag; Representation representation = hinstr->representation(); + int offset = instr->base_offset(); - if (representation.IsInteger32() && + if (kPointerSize == kInt32Size && !key->IsConstantOperand() && + instr->hydrogen()->IsDehoisted()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + __ movsxlq(ToRegister(key), ToRegister(key)); + } + if (representation.IsInteger32() && SmiValuesAre32Bits() && hinstr->elements_kind() == FAST_SMI_ELEMENTS) { - ASSERT(!requires_hole_check); -#ifdef DEBUG - Register scratch = kScratchRegister; - __ Load(scratch, - BuildFastArrayOperand(instr->elements(), - key, - FAST_ELEMENTS, - offset, - instr->additional_index()), - Representation::Smi()); - __ AssertSmi(scratch); -#endif + DCHECK(!requires_hole_check); + if (FLAG_debug_code) { + Register scratch = kScratchRegister; + __ Load(scratch, + BuildFastArrayOperand(instr->elements(), + key, + instr->hydrogen()->key()->representation(), + FAST_ELEMENTS, + offset), + Representation::Smi()); + __ AssertSmi(scratch); + } // Read int value directly from upper half of the smi. STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); + DCHECK(kSmiTagSize + kSmiShiftSize == 32); offset += kPointerSize / 2; } __ Load(result, BuildFastArrayOperand(instr->elements(), key, + instr->hydrogen()->key()->representation(), FAST_ELEMENTS, - offset, - instr->additional_index()), + offset), representation); // Check for the hole value. @@ -3117,9 +3257,9 @@ Operand LCodeGen::BuildFastArrayOperand( LOperand* elements_pointer, LOperand* key, + Representation key_representation, ElementsKind elements_kind, - uint32_t offset, - uint32_t additional_index) { + uint32_t offset) { Register elements_pointer_reg = ToRegister(elements_pointer); int shift_size = ElementsKindToShiftSize(elements_kind); if (key->IsConstantOperand()) { @@ -3128,22 +3268,35 @@ Abort(kArrayIndexConstantValueTooBig); } return Operand(elements_pointer_reg, - ((constant_value + additional_index) << shift_size) - + offset); + (constant_value << shift_size) + offset); } else { + // Take the tag bit into account while computing the shift size. + if (key_representation.IsSmi() && (shift_size >= 1)) { + DCHECK(SmiValuesAre31Bits()); + shift_size -= kSmiTagSize; + } ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); return Operand(elements_pointer_reg, ToRegister(key), scale_factor, - offset + (additional_index << shift_size)); + offset); } } void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->object()).is(rdx)); - ASSERT(ToRegister(instr->key()).is(rax)); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister())); + + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ Move(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(rax)); + __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot())); + } Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallCode(ic, RelocInfo::CODE_TARGET, instr); @@ -3248,8 +3401,7 @@ __ movp(receiver, Operand(receiver, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - __ movp(receiver, - FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset)); + __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset)); __ bind(&receiver_ok); } @@ -3260,9 +3412,9 @@ Register function = ToRegister(instr->function()); Register length = ToRegister(instr->length()); Register elements = ToRegister(instr->elements()); - ASSERT(receiver.is(rax)); // Used for parameter count. - ASSERT(function.is(rdi)); // Required by InvokeFunction. - ASSERT(ToRegister(instr->result()).is(rax)); + DCHECK(receiver.is(rax)); // Used for parameter count. + DCHECK(function.is(rdi)); // Required by InvokeFunction. + DCHECK(ToRegister(instr->result()).is(rax)); // Copy the arguments to this function possibly from the // adaptor frame below it. @@ -3288,7 +3440,7 @@ // Invoke the function. __ bind(&invoke); - ASSERT(instr->HasPointerMap()); + DCHECK(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator safepoint_generator( this, pointers, Safepoint::kLazyDeopt); @@ -3320,17 +3472,17 @@ __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset)); } else { // If there is no frame, the context must be in rsi. - ASSERT(result.is(rsi)); + DCHECK(result.is(rsi)); } } void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->context()).is(rsi)); __ Push(rsi); // The context is the first argument. __ Push(instr->hydrogen()->pairs()); __ Push(Smi::FromInt(instr->hydrogen()->flags())); - CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); } @@ -3381,7 +3533,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { - ASSERT(ToRegister(instr->result()).is(rax)); + DCHECK(ToRegister(instr->result()).is(rax)); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); @@ -3392,7 +3544,7 @@ generator.BeforeCall(__ CallSize(code)); __ call(code, RelocInfo::CODE_TARGET); } else { - ASSERT(instr->target()->IsRegister()); + DCHECK(instr->target()->IsRegister()); Register target = ToRegister(instr->target()); generator.BeforeCall(__ CallSize(target)); __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); @@ -3403,8 +3555,8 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { - ASSERT(ToRegister(instr->function()).is(rdi)); - ASSERT(ToRegister(instr->result()).is(rax)); + DCHECK(ToRegister(instr->function()).is(rdi)); + DCHECK(ToRegister(instr->result()).is(rax)); if (instr->hydrogen()->pass_argument_count()) { __ Set(rax, instr->arity()); @@ -3462,7 +3614,7 @@ // Slow case: Call the runtime system to do the number allocation. __ bind(&slow); CallRuntimeFromDeferred( - Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context()); + Runtime::kAllocateHeapNumber, 0, instr, instr->context()); // Set the pointer to the new heap number in tmp. if (!tmp.is(rax)) __ movp(tmp, rax); // Restore input_reg after call to runtime. @@ -3470,8 +3622,8 @@ __ bind(&allocated); __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ shl(tmp2, Immediate(1)); - __ shr(tmp2, Immediate(1)); + __ shlq(tmp2, Immediate(1)); + __ shrq(tmp2, Immediate(1)); __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); __ StoreToSafepointRegisterSlot(input_reg, tmp); @@ -3515,7 +3667,7 @@ LMathAbs* instr_; }; - ASSERT(instr->value()->Equals(instr->result())); + DCHECK(instr->value()->Equals(instr->result())); Representation r = instr->hydrogen()->value()->representation(); if (r.IsDouble()) { @@ -3573,7 +3725,7 @@ __ testq(output_reg, Immediate(1)); DeoptimizeIf(not_zero, instr->environment()); __ Set(output_reg, 0); - __ jmp(&done, Label::kNear); + __ jmp(&done); __ bind(&positive_sign); } @@ -3660,17 +3812,30 @@ } -void LCodeGen::DoMathSqrt(LMathSqrt* instr) { +void LCodeGen::DoMathFround(LMathFround* instr) { XMMRegister input_reg = ToDoubleRegister(instr->value()); - ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); - __ sqrtsd(input_reg, input_reg); + XMMRegister output_reg = ToDoubleRegister(instr->result()); + __ cvtsd2ss(output_reg, input_reg); + __ cvtss2sd(output_reg, output_reg); +} + + +void LCodeGen::DoMathSqrt(LMathSqrt* instr) { + XMMRegister output = ToDoubleRegister(instr->result()); + if (instr->value()->IsDoubleRegister()) { + XMMRegister input = ToDoubleRegister(instr->value()); + __ sqrtsd(output, input); + } else { + Operand input = ToOperand(instr->value()); + __ sqrtsd(output, input); + } } void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { XMMRegister xmm_scratch = double_scratch0(); XMMRegister input_reg = ToDoubleRegister(instr->value()); - ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); + DCHECK(ToDoubleRegister(instr->result()).is(input_reg)); // Note that according to ECMA-262 15.8.2.13: // Math.pow(-Infinity, 0.5) == Infinity @@ -3705,15 +3870,15 @@ // Just make sure that the input/output registers are the expected ones. Register exponent = rdx; - ASSERT(!instr->right()->IsRegister() || + DCHECK(!instr->right()->IsRegister() || ToRegister(instr->right()).is(exponent)); - ASSERT(!instr->right()->IsDoubleRegister() || + DCHECK(!instr->right()->IsDoubleRegister() || ToDoubleRegister(instr->right()).is(xmm1)); - ASSERT(ToDoubleRegister(instr->left()).is(xmm2)); - ASSERT(ToDoubleRegister(instr->result()).is(xmm3)); + DCHECK(ToDoubleRegister(instr->left()).is(xmm2)); + DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); if (exponent_type.IsSmi()) { - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsTagged()) { Label no_deopt; @@ -3721,14 +3886,14 @@ __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx); DeoptimizeIf(not_equal, instr->environment()); __ bind(&no_deopt); - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsInteger32()) { - MathPowStub stub(MathPowStub::INTEGER); + MathPowStub stub(isolate(), MathPowStub::INTEGER); __ CallStub(&stub); } else { - ASSERT(exponent_type.IsDouble()); - MathPowStub stub(MathPowStub::DOUBLE); + DCHECK(exponent_type.IsDouble()); + MathPowStub stub(isolate(), MathPowStub::DOUBLE); __ CallStub(&stub); } } @@ -3746,7 +3911,7 @@ void LCodeGen::DoMathLog(LMathLog* instr) { - ASSERT(instr->value()->Equals(instr->result())); + DCHECK(instr->value()->Equals(instr->result())); XMMRegister input_reg = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = double_scratch0(); Label positive, done, zero; @@ -3793,9 +3958,9 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->function()).is(rdi)); - ASSERT(instr->HasPointerMap()); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->function()).is(rdi)); + DCHECK(instr->HasPointerMap()); Handle<JSFunction> known_function = instr->hydrogen()->known_function(); if (known_function.is_null()) { @@ -3814,33 +3979,33 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->function()).is(rdi)); - ASSERT(ToRegister(instr->result()).is(rax)); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->function()).is(rdi)); + DCHECK(ToRegister(instr->result()).is(rax)); int arity = instr->arity(); - CallFunctionStub stub(arity, instr->hydrogen()->function_flags()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoCallNew(LCallNew* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->constructor()).is(rdi)); - ASSERT(ToRegister(instr->result()).is(rax)); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->constructor()).is(rdi)); + DCHECK(ToRegister(instr->result()).is(rax)); __ Set(rax, instr->arity()); // No cell in ebx for construct type feedback in optimized code __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } void LCodeGen::DoCallNewArray(LCallNewArray* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->constructor()).is(rdi)); - ASSERT(ToRegister(instr->result()).is(rax)); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->constructor()).is(rdi)); + DCHECK(ToRegister(instr->result()).is(rax)); __ Set(rax, instr->arity()); __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); @@ -3851,8 +4016,8 @@ : DONT_OVERRIDE; if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } else if (instr->arity() == 1) { Label done; if (IsFastPackedElementsKind(kind)) { @@ -3864,24 +4029,26 @@ __ j(zero, &packed_case, Label::kNear); ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(holey_kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), + holey_kind, + override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ jmp(&done, Label::kNear); __ bind(&packed_case); } - ArraySingleArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ bind(&done); } else { - ArrayNArgumentsConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } } void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->context()).is(rsi)); CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); } @@ -3915,10 +4082,10 @@ int offset = access.offset(); if (access.IsExternalMemory()) { - ASSERT(!hinstr->NeedsWriteBarrier()); + DCHECK(!hinstr->NeedsWriteBarrier()); Register value = ToRegister(instr->value()); if (instr->object()->IsConstantOperand()) { - ASSERT(value.is(rax)); + DCHECK(value.is(rax)); LConstantOperand* object = LConstantOperand::cast(instr->object()); __ store_rax(ToExternalReference(object)); } else { @@ -3929,39 +4096,23 @@ } Register object = ToRegister(instr->object()); - Handle<Map> transition = instr->transition(); - SmiCheck check_needed = hinstr->value()->IsHeapObject() - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - - ASSERT(!(representation.IsSmi() && - instr->value()->IsConstantOperand() && - !IsInteger32Constant(LConstantOperand::cast(instr->value())))); - if (representation.IsHeapObject()) { - if (instr->value()->IsConstantOperand()) { - LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); - if (chunk_->LookupConstant(operand_value)->HasSmiValue()) { - DeoptimizeIf(no_condition, instr->environment()); - } - } else { - if (!hinstr->value()->type().IsHeapObject()) { - Register value = ToRegister(instr->value()); - Condition cc = masm()->CheckSmi(value); - DeoptimizeIf(cc, instr->environment()); + __ AssertNotSmi(object); - // We know that value is a smi now, so we can omit the check below. - check_needed = OMIT_SMI_CHECK; - } - } - } else if (representation.IsDouble()) { - ASSERT(transition.is_null()); - ASSERT(access.IsInobject()); - ASSERT(!hinstr->NeedsWriteBarrier()); + DCHECK(!representation.IsSmi() || + !instr->value()->IsConstantOperand() || + IsInteger32Constant(LConstantOperand::cast(instr->value()))); + if (representation.IsDouble()) { + DCHECK(access.IsInobject()); + DCHECK(!hinstr->has_transition()); + DCHECK(!hinstr->NeedsWriteBarrier()); XMMRegister value = ToDoubleRegister(instr->value()); __ movsd(FieldOperand(object, offset), value); return; } - if (!transition.is_null()) { + if (hinstr->has_transition()) { + Handle<Map> transition = hinstr->transition_map(); + AddDeprecationDependency(transition); if (!hinstr->NeedsWriteBarrierForMap()) { __ Move(FieldOperand(object, HeapObject::kMapOffset), transition); } else { @@ -3969,13 +4120,10 @@ __ Move(kScratchRegister, transition); __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister); // Update the write barrier for the map field. - __ RecordWriteField(object, - HeapObject::kMapOffset, - kScratchRegister, - temp, - kSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWriteForMap(object, + kScratchRegister, + temp, + kSaveFPRegs); } } @@ -3986,17 +4134,17 @@ __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); } - if (representation.IsSmi() && + if (representation.IsSmi() && SmiValuesAre32Bits() && hinstr->value()->representation().IsInteger32()) { - ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); -#ifdef DEBUG - Register scratch = kScratchRegister; - __ Load(scratch, FieldOperand(write_register, offset), representation); - __ AssertSmi(scratch); -#endif + DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); + if (FLAG_debug_code) { + Register scratch = kScratchRegister; + __ Load(scratch, FieldOperand(write_register, offset), representation); + __ AssertSmi(scratch); + } // Store int value directly to upper half of the smi. STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); + DCHECK(kSmiTagSize + kSmiShiftSize == 32); offset += kPointerSize / 2; representation = Representation::Integer32(); } @@ -4009,7 +4157,7 @@ } else { LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); if (IsInteger32Constant(operand_value)) { - ASSERT(!hinstr->NeedsWriteBarrier()); + DCHECK(!hinstr->NeedsWriteBarrier()); int32_t value = ToInteger32(operand_value); if (representation.IsSmi()) { __ Move(operand, Smi::FromInt(value)); @@ -4020,7 +4168,7 @@ } else { Handle<Object> handle_value = ToHandle(operand_value); - ASSERT(!hinstr->NeedsWriteBarrier()); + DCHECK(!hinstr->NeedsWriteBarrier()); __ Move(operand, handle_value); } } @@ -4035,96 +4183,105 @@ temp, kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + hinstr->SmiCheckForWriteBarrier(), + hinstr->PointersToHereCheckForValue()); } } void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->object()).is(rdx)); - ASSERT(ToRegister(instr->value()).is(rax)); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister())); - __ Move(rcx, instr->hydrogen()->name()); + __ Move(StoreIC::NameRegister(), instr->hydrogen()->name()); Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); CallCode(ic, RelocInfo::CODE_TARGET, instr); } -void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { - if (FLAG_debug_code && check->hydrogen()->skip_check()) { - Label done; - __ j(NegateCondition(cc), &done, Label::kNear); - __ int3(); - __ bind(&done); - } else { - DeoptimizeIf(cc, check->environment()); - } -} - - void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - HBoundsCheck* hinstr = instr->hydrogen(); - if (hinstr->skip_check()) return; - - Representation representation = hinstr->length()->representation(); - ASSERT(representation.Equals(hinstr->index()->representation())); - ASSERT(representation.IsSmiOrInteger32()); - - if (instr->length()->IsRegister()) { - Register reg = ToRegister(instr->length()); - - if (instr->index()->IsConstantOperand()) { - int32_t constant_index = - ToInteger32(LConstantOperand::cast(instr->index())); + Representation representation = instr->hydrogen()->length()->representation(); + DCHECK(representation.Equals(instr->hydrogen()->index()->representation())); + DCHECK(representation.IsSmiOrInteger32()); + + Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal; + if (instr->length()->IsConstantOperand()) { + int32_t length = ToInteger32(LConstantOperand::cast(instr->length())); + Register index = ToRegister(instr->index()); + if (representation.IsSmi()) { + __ Cmp(index, Smi::FromInt(length)); + } else { + __ cmpl(index, Immediate(length)); + } + cc = CommuteCondition(cc); + } else if (instr->index()->IsConstantOperand()) { + int32_t index = ToInteger32(LConstantOperand::cast(instr->index())); + if (instr->length()->IsRegister()) { + Register length = ToRegister(instr->length()); if (representation.IsSmi()) { - __ Cmp(reg, Smi::FromInt(constant_index)); + __ Cmp(length, Smi::FromInt(index)); } else { - __ cmpl(reg, Immediate(constant_index)); + __ cmpl(length, Immediate(index)); } } else { - Register reg2 = ToRegister(instr->index()); + Operand length = ToOperand(instr->length()); if (representation.IsSmi()) { - __ cmpp(reg, reg2); + __ Cmp(length, Smi::FromInt(index)); } else { - __ cmpl(reg, reg2); + __ cmpl(length, Immediate(index)); } } } else { - Operand length = ToOperand(instr->length()); - if (instr->index()->IsConstantOperand()) { - int32_t constant_index = - ToInteger32(LConstantOperand::cast(instr->index())); + Register index = ToRegister(instr->index()); + if (instr->length()->IsRegister()) { + Register length = ToRegister(instr->length()); if (representation.IsSmi()) { - __ Cmp(length, Smi::FromInt(constant_index)); + __ cmpp(length, index); } else { - __ cmpl(length, Immediate(constant_index)); + __ cmpl(length, index); } } else { + Operand length = ToOperand(instr->length()); if (representation.IsSmi()) { - __ cmpp(length, ToRegister(instr->index())); + __ cmpp(length, index); } else { - __ cmpl(length, ToRegister(instr->index())); + __ cmpl(length, index); } } } - Condition condition = hinstr->allow_equality() ? below : below_equal; - ApplyCheckIf(condition, instr); + if (FLAG_debug_code && instr->hydrogen()->skip_check()) { + Label done; + __ j(NegateCondition(cc), &done, Label::kNear); + __ int3(); + __ bind(&done); + } else { + DeoptimizeIf(cc, instr->environment()); + } } void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { ElementsKind elements_kind = instr->elements_kind(); LOperand* key = instr->key(); - int base_offset = instr->is_fixed_typed_array() - ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag - : 0; + if (kPointerSize == kInt32Size && !key->IsConstantOperand()) { + Register key_reg = ToRegister(key); + Representation key_representation = + instr->hydrogen()->key()->representation(); + if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) { + __ SmiToInteger64(key_reg, key_reg); + } else if (instr->hydrogen()->IsDehoisted()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + __ movsxlq(key_reg, key_reg); + } + } Operand operand(BuildFastArrayOperand( instr->elements(), key, + instr->hydrogen()->key()->representation(), elements_kind, - base_offset, - instr->additional_index())); + instr->base_offset())); if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || elements_kind == FLOAT32_ELEMENTS) { @@ -4179,6 +4336,12 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { XMMRegister value = ToDoubleRegister(instr->value()); LOperand* key = instr->key(); + if (kPointerSize == kInt32Size && !key->IsConstantOperand() && + instr->hydrogen()->IsDehoisted()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + __ movsxlq(ToRegister(key), ToRegister(key)); + } if (instr->NeedsCanonicalization()) { Label have_value; @@ -4195,9 +4358,9 @@ Operand double_store_operand = BuildFastArrayOperand( instr->elements(), key, + instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); + instr->base_offset()); __ movsd(double_store_operand, value); } @@ -4206,36 +4369,41 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { HStoreKeyed* hinstr = instr->hydrogen(); LOperand* key = instr->key(); - int offset = FixedArray::kHeaderSize - kHeapObjectTag; + int offset = instr->base_offset(); Representation representation = hinstr->value()->representation(); - if (representation.IsInteger32()) { - ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); - ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS); -#ifdef DEBUG - Register scratch = kScratchRegister; - __ Load(scratch, - BuildFastArrayOperand(instr->elements(), - key, - FAST_ELEMENTS, - offset, - instr->additional_index()), - Representation::Smi()); - __ AssertSmi(scratch); -#endif + if (kPointerSize == kInt32Size && !key->IsConstantOperand() && + instr->hydrogen()->IsDehoisted()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + __ movsxlq(ToRegister(key), ToRegister(key)); + } + if (representation.IsInteger32() && SmiValuesAre32Bits()) { + DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); + DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS); + if (FLAG_debug_code) { + Register scratch = kScratchRegister; + __ Load(scratch, + BuildFastArrayOperand(instr->elements(), + key, + instr->hydrogen()->key()->representation(), + FAST_ELEMENTS, + offset), + Representation::Smi()); + __ AssertSmi(scratch); + } // Store int value directly to upper half of the smi. STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); + DCHECK(kSmiTagSize + kSmiShiftSize == 32); offset += kPointerSize / 2; } Operand operand = BuildFastArrayOperand(instr->elements(), key, + instr->hydrogen()->key()->representation(), FAST_ELEMENTS, - offset, - instr->additional_index()); - + offset); if (instr->value()->IsRegister()) { __ Store(operand, ToRegister(instr->value()), representation); } else { @@ -4256,10 +4424,10 @@ if (hinstr->NeedsWriteBarrier()) { Register elements = ToRegister(instr->elements()); - ASSERT(instr->value()->IsRegister()); + DCHECK(instr->value()->IsRegister()); Register value = ToRegister(instr->value()); - ASSERT(!key->IsConstantOperand()); - SmiCheck check_needed = hinstr->value()->IsHeapObject() + DCHECK(!key->IsConstantOperand()); + SmiCheck check_needed = hinstr->value()->type().IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. Register key_reg(ToRegister(key)); @@ -4269,7 +4437,8 @@ value, kSaveFPRegs, EMIT_REMEMBERED_SET, - check_needed); + check_needed, + hinstr->PointersToHereCheckForValue()); } } @@ -4286,10 +4455,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->object()).is(rdx)); - ASSERT(ToRegister(instr->key()).is(rcx)); - ASSERT(ToRegister(instr->value()).is(rax)); + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); + DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); Handle<Code> ic = instr->strict_mode() == STRICT ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() @@ -4314,21 +4483,17 @@ __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); // Write barrier. - ASSERT_NE(instr->temp(), NULL); - __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, - ToRegister(instr->temp()), kDontSaveFPRegs); + __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()), + kDontSaveFPRegs); } else { - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(object_reg.is(rax)); + DCHECK(ToRegister(instr->context()).is(rsi)); PushSafepointRegistersScope scope(this); - if (!object_reg.is(rax)) { - __ movp(rax, object_reg); - } __ Move(rbx, to_map); bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; - TransitionElementsKindStub stub(from_kind, to_kind, is_js_array); + TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); __ CallStub(&stub); - RecordSafepointWithRegisters( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); } __ bind(¬_applicable); } @@ -4345,12 +4510,13 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); - ASSERT(ToRegister(instr->left()).is(rdx)); - ASSERT(ToRegister(instr->right()).is(rax)); - StringAddStub stub(instr->hydrogen()->flags(), + DCHECK(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->left()).is(rdx)); + DCHECK(ToRegister(instr->right()).is(rax)); + StringAddStub stub(isolate(), + instr->hydrogen()->flags(), instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -4402,7 +4568,7 @@ __ Push(index); } CallRuntimeFromDeferred( - Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context()); + Runtime::kStringCharCodeAtRT, 2, instr, instr->context()); __ AssertSmi(rax); __ SmiToInteger32(rax, rax); __ StoreToSafepointRegisterSlot(result, rax); @@ -4425,10 +4591,10 @@ DeferredStringCharFromCode* deferred = new(zone()) DeferredStringCharFromCode(this, instr); - ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); + DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); Register char_code = ToRegister(instr->char_code()); Register result = ToRegister(instr->result()); - ASSERT(!char_code.is(result)); + DCHECK(!char_code.is(result)); __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode)); __ j(above, deferred->entry()); @@ -4462,9 +4628,9 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { LOperand* input = instr->value(); - ASSERT(input->IsRegister() || input->IsStackSlot()); + DCHECK(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); - ASSERT(output->IsDoubleRegister()); + DCHECK(output->IsDoubleRegister()); if (input->IsRegister()) { __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input)); } else { @@ -4476,20 +4642,38 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { LOperand* input = instr->value(); LOperand* output = instr->result(); - LOperand* temp = instr->temp(); - __ LoadUint32(ToDoubleRegister(output), - ToRegister(input), - ToDoubleRegister(temp)); + __ LoadUint32(ToDoubleRegister(output), ToRegister(input)); } void LCodeGen::DoNumberTagI(LNumberTagI* instr) { + class DeferredNumberTagI V8_FINAL : public LDeferredCode { + public: + DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), + instr_->temp2(), SIGNED_INT32); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LNumberTagI* instr_; + }; + LOperand* input = instr->value(); - ASSERT(input->IsRegister() && input->Equals(instr->result())); + DCHECK(input->IsRegister() && input->Equals(instr->result())); Register reg = ToRegister(input); - __ Integer32ToSmi(reg, reg); + if (SmiValuesAre32Bits()) { + __ Integer32ToSmi(reg, reg); + } else { + DCHECK(SmiValuesAre31Bits()); + DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); + __ Integer32ToSmi(reg, reg); + __ j(overflow, deferred->entry()); + __ bind(deferred->exit()); + } } @@ -4499,7 +4683,8 @@ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { - codegen()->DoDeferredNumberTagU(instr_); + codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), + instr_->temp2(), UNSIGNED_INT32); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: @@ -4507,7 +4692,7 @@ }; LOperand* input = instr->value(); - ASSERT(input->IsRegister() && input->Equals(instr->result())); + DCHECK(input->IsRegister() && input->Equals(instr->result())); Register reg = ToRegister(input); DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); @@ -4518,21 +4703,35 @@ } -void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) { +void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, + LOperand* value, + LOperand* temp1, + LOperand* temp2, + IntegerSignedness signedness) { Label done, slow; - Register reg = ToRegister(instr->value()); - Register tmp = ToRegister(instr->temp1()); - XMMRegister temp_xmm = ToDoubleRegister(instr->temp2()); + Register reg = ToRegister(value); + Register tmp = ToRegister(temp1); + XMMRegister temp_xmm = ToDoubleRegister(temp2); // Load value into temp_xmm which will be preserved across potential call to // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable // XMM registers on x64). - XMMRegister xmm_scratch = double_scratch0(); - __ LoadUint32(temp_xmm, reg, xmm_scratch); + if (signedness == SIGNED_INT32) { + DCHECK(SmiValuesAre31Bits()); + // There was overflow, so bits 30 and 31 of the original integer + // disagree. Try to allocate a heap number in new space and store + // the value in there. If that fails, call the runtime system. + __ SmiToInteger32(reg, reg); + __ xorl(reg, Immediate(0x80000000)); + __ cvtlsi2sd(temp_xmm, reg); + } else { + DCHECK(signedness == UNSIGNED_INT32); + __ LoadUint32(temp_xmm, reg); + } if (FLAG_inline_new) { __ AllocateHeapNumber(reg, tmp, &slow); - __ jmp(&done, Label::kNear); + __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar); } // Slow case: Call the runtime system to do the number allocation. @@ -4546,13 +4745,13 @@ // Preserve the value of all registers. PushSafepointRegistersScope scope(this); - // NumberTagU uses the context from the frame, rather than + // NumberTagIU uses the context from the frame, rather than // the environment's HContext or HInlinedContext value. - // They only call Runtime::kHiddenAllocateHeapNumber. + // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(reg, rax); @@ -4604,11 +4803,11 @@ PushSafepointRegistersScope scope(this); // NumberTagD uses the context from the frame, rather than // the environment's HContext or HInlinedContext value. - // They only call Runtime::kHiddenAllocateHeapNumber. + // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ movp(kScratchRegister, rax); @@ -4623,8 +4822,8 @@ Register output = ToRegister(instr->result()); if (hchange->CheckFlag(HValue::kCanOverflow) && hchange->value()->CheckFlag(HValue::kUint32)) { - __ testl(input, input); - DeoptimizeIf(sign, instr->environment()); + Condition is_smi = __ CheckUInteger32ValidSmiValue(input); + DeoptimizeIf(NegateCondition(is_smi), instr->environment()); } __ Integer32ToSmi(output, input); if (hchange->CheckFlag(HValue::kCanOverflow) && @@ -4635,7 +4834,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - ASSERT(instr->value()->Equals(instr->result())); + DCHECK(instr->value()->Equals(instr->result())); Register input = ToRegister(instr->value()); if (instr->needs_check()) { Condition is_smi = __ CheckSmi(input); @@ -4696,7 +4895,7 @@ __ jmp(&done, Label::kNear); } } else { - ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); + DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); } // Smi to XMM conversion @@ -4767,8 +4966,8 @@ }; LOperand* input = instr->value(); - ASSERT(input->IsRegister()); - ASSERT(input->Equals(instr->result())); + DCHECK(input->IsRegister()); + DCHECK(input->Equals(instr->result())); Register input_reg = ToRegister(input); if (instr->hydrogen()->value()->representation().IsSmi()) { @@ -4784,9 +4983,9 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { LOperand* input = instr->value(); - ASSERT(input->IsRegister()); + DCHECK(input->IsRegister()); LOperand* result = instr->result(); - ASSERT(result->IsDoubleRegister()); + DCHECK(result->IsDoubleRegister()); Register input_reg = ToRegister(input); XMMRegister result_reg = ToDoubleRegister(result); @@ -4805,9 +5004,9 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { LOperand* input = instr->value(); - ASSERT(input->IsDoubleRegister()); + DCHECK(input->IsDoubleRegister()); LOperand* result = instr->result(); - ASSERT(result->IsRegister()); + DCHECK(result->IsRegister()); XMMRegister input_reg = ToDoubleRegister(input); Register result_reg = ToRegister(result); @@ -4830,9 +5029,9 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { LOperand* input = instr->value(); - ASSERT(input->IsDoubleRegister()); + DCHECK(input->IsDoubleRegister()); LOperand* result = instr->result(); - ASSERT(result->IsRegister()); + DCHECK(result->IsRegister()); XMMRegister input_reg = ToDoubleRegister(input); Register result_reg = ToRegister(result); @@ -4860,7 +5059,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - if (!instr->hydrogen()->value()->IsHeapObject()) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { LOperand* input = instr->value(); Condition cc = masm()->CheckSmi(ToRegister(input)); DeoptimizeIf(cc, instr->environment()); @@ -4899,7 +5098,7 @@ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); if (IsPowerOf2(mask)) { - ASSERT(tag == 0 || IsPowerOf2(tag)); + DCHECK(tag == 0 || IsPowerOf2(tag)); __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), Immediate(mask)); DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment()); @@ -4954,29 +5153,35 @@ Register object_; }; - if (instr->hydrogen()->CanOmitMapChecks()) return; + if (instr->hydrogen()->IsStabilityCheck()) { + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + for (int i = 0; i < maps->size(); ++i) { + AddStabilityDependency(maps->at(i).handle()); + } + return; + } LOperand* input = instr->value(); - ASSERT(input->IsRegister()); + DCHECK(input->IsRegister()); Register reg = ToRegister(input); DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->has_migration_target()) { + if (instr->hydrogen()->HasMigrationTarget()) { deferred = new(zone()) DeferredCheckMaps(this, instr, reg); __ bind(deferred->check_maps()); } - UniqueSet<Map> map_set = instr->hydrogen()->map_set(); + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); Label success; - for (int i = 0; i < map_set.size() - 1; i++) { - Handle<Map> map = map_set.at(i).handle(); + for (int i = 0; i < maps->size() - 1; i++) { + Handle<Map> map = maps->at(i).handle(); __ CompareMap(reg, map); __ j(equal, &success, Label::kNear); } - Handle<Map> map = map_set.at(map_set.size() - 1).handle(); + Handle<Map> map = maps->at(maps->size() - 1).handle(); __ CompareMap(reg, map); - if (instr->hydrogen()->has_migration_target()) { + if (instr->hydrogen()->HasMigrationTarget()) { __ j(not_equal, deferred->entry()); } else { DeoptimizeIf(not_equal, instr->environment()); @@ -4995,14 +5200,14 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - ASSERT(instr->unclamped()->Equals(instr->result())); + DCHECK(instr->unclamped()->Equals(instr->result())); Register value_reg = ToRegister(instr->result()); __ ClampUint8(value_reg); } void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - ASSERT(instr->unclamped()->Equals(instr->result())); + DCHECK(instr->unclamped()->Equals(instr->result())); Register input_reg = ToRegister(instr->unclamped()); XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); XMMRegister xmm_scratch = double_scratch0(); @@ -5042,7 +5247,7 @@ Register result_reg = ToRegister(instr->result()); if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { __ movq(result_reg, value_reg); - __ shr(result_reg, Immediate(32)); + __ shrq(result_reg, Immediate(32)); } else { __ movd(result_reg, value_reg); } @@ -5086,11 +5291,11 @@ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); } if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); } @@ -5114,7 +5319,7 @@ __ movl(temp, Immediate((size / kPointerSize) - 1)); } else { temp = ToRegister(instr->size()); - __ sar(temp, Immediate(kPointerSizeLog2)); + __ sarp(temp, Immediate(kPointerSizeLog2)); __ decl(temp); } Label loop; @@ -5138,7 +5343,7 @@ PushSafepointRegistersScope scope(this); if (instr->size()->IsRegister()) { Register size = ToRegister(instr->size()); - ASSERT(!size.is(result)); + DCHECK(!size.is(result)); __ Integer32ToSmi(size, size); __ Push(size); } else { @@ -5148,11 +5353,11 @@ int flags = 0; if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { - ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); } else { flags = AllocateTargetSpace::update(flags, NEW_SPACE); @@ -5160,20 +5365,20 @@ __ Push(Smi::FromInt(flags)); CallRuntimeFromDeferred( - Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context()); + Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); __ StoreToSafepointRegisterSlot(result, rax); } void LCodeGen::DoToFastProperties(LToFastProperties* instr) { - ASSERT(ToRegister(instr->value()).is(rax)); + DCHECK(ToRegister(instr->value()).is(rax)); __ Push(rax); CallRuntime(Runtime::kToFastProperties, 1, instr); } void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->context()).is(rsi)); Label materialized; // Registers will be used as follows: // rcx = literals array. @@ -5192,7 +5397,7 @@ __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); __ Push(instr->hydrogen()->pattern()); __ Push(instr->hydrogen()->flags()); - CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr); + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); __ movp(rbx, rax); __ bind(&materialized); @@ -5204,7 +5409,7 @@ __ bind(&runtime_allocate); __ Push(rbx); __ Push(Smi::FromInt(size)); - CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr); + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); __ Pop(rbx); __ bind(&allocated); @@ -5224,27 +5429,28 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->context()).is(rsi)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. bool pretenure = instr->hydrogen()->pretenure(); if (!pretenure && instr->hydrogen()->has_no_literals()) { - FastNewClosureStub stub(instr->hydrogen()->strict_mode(), + FastNewClosureStub stub(isolate(), + instr->hydrogen()->strict_mode(), instr->hydrogen()->is_generator()); __ Move(rbx, instr->hydrogen()->shared_info()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } else { __ Push(rsi); __ Push(instr->hydrogen()->shared_info()); __ PushRoot(pretenure ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex); - CallRuntime(Runtime::kHiddenNewClosure, 3, instr); + CallRuntime(Runtime::kNewClosure, 3, instr); } } void LCodeGen::DoTypeof(LTypeof* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->context()).is(rsi)); LOperand* input = instr->value(); EmitPushTaggedOperand(input); CallRuntime(Runtime::kTypeof, 1, instr); @@ -5252,7 +5458,7 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { - ASSERT(!operand->IsDoubleRegister()); + DCHECK(!operand->IsDoubleRegister()); if (operand->IsConstantOperand()) { __ Push(ToHandle(LConstantOperand::cast(operand))); } else if (operand->IsRegister()) { @@ -5285,14 +5491,15 @@ Label::Distance false_distance = right_block == next_block ? Label::kNear : Label::kFar; Condition final_branch_condition = no_condition; - if (type_name->Equals(heap()->number_string())) { + Factory* factory = isolate()->factory(); + if (String::Equals(type_name, factory->number_string())) { __ JumpIfSmi(input, true_label, true_distance); __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), Heap::kHeapNumberMapRootIndex); final_branch_condition = equal; - } else if (type_name->Equals(heap()->string_string())) { + } else if (String::Equals(type_name, factory->string_string())) { __ JumpIfSmi(input, false_label, false_distance); __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); __ j(above_equal, false_label, false_distance); @@ -5300,22 +5507,18 @@ Immediate(1 << Map::kIsUndetectable)); final_branch_condition = zero; - } else if (type_name->Equals(heap()->symbol_string())) { + } else if (String::Equals(type_name, factory->symbol_string())) { __ JumpIfSmi(input, false_label, false_distance); __ CmpObjectType(input, SYMBOL_TYPE, input); final_branch_condition = equal; - } else if (type_name->Equals(heap()->boolean_string())) { + } else if (String::Equals(type_name, factory->boolean_string())) { __ CompareRoot(input, Heap::kTrueValueRootIndex); __ j(equal, true_label, true_distance); __ CompareRoot(input, Heap::kFalseValueRootIndex); final_branch_condition = equal; - } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { - __ CompareRoot(input, Heap::kNullValueRootIndex); - final_branch_condition = equal; - - } else if (type_name->Equals(heap()->undefined_string())) { + } else if (String::Equals(type_name, factory->undefined_string())) { __ CompareRoot(input, Heap::kUndefinedValueRootIndex); __ j(equal, true_label, true_distance); __ JumpIfSmi(input, false_label, false_distance); @@ -5325,7 +5528,7 @@ Immediate(1 << Map::kIsUndetectable)); final_branch_condition = not_zero; - } else if (type_name->Equals(heap()->function_string())) { + } else if (String::Equals(type_name, factory->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label, false_distance); __ CmpObjectType(input, JS_FUNCTION_TYPE, input); @@ -5333,12 +5536,10 @@ __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); final_branch_condition = equal; - } else if (type_name->Equals(heap()->object_string())) { + } else if (String::Equals(type_name, factory->object_string())) { __ JumpIfSmi(input, false_label, false_distance); - if (!FLAG_harmony_typeof) { - __ CompareRoot(input, Heap::kNullValueRootIndex); - __ j(equal, true_label, true_distance); - } + __ CompareRoot(input, Heap::kNullValueRootIndex); + __ j(equal, true_label, true_distance); __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); __ j(below, false_label, false_distance); __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); @@ -5398,7 +5599,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) { last_lazy_deopt_pc_ = masm()->pc_offset(); - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); @@ -5433,9 +5634,9 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { PushSafepointRegistersScope scope(this); __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard); + __ CallRuntimeSaveDoubles(Runtime::kStackGuard); RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); } @@ -5454,7 +5655,7 @@ LStackCheck* instr_; }; - ASSERT(instr->HasEnvironment()); + DCHECK(instr->HasEnvironment()); LEnvironment* env = instr->environment(); // There is no LLazyBailout instruction for stack-checks. We have to // prepare for lazy deoptimization explicitly here. @@ -5464,14 +5665,14 @@ __ CompareRoot(rsp, Heap::kStackLimitRootIndex); __ j(above_equal, &done, Label::kNear); - ASSERT(instr->context()->IsRegister()); - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(instr->context()->IsRegister()); + DCHECK(ToRegister(instr->context()).is(rsi)); CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, instr); __ bind(&done); } else { - ASSERT(instr->hydrogen()->is_backwards_branch()); + DCHECK(instr->hydrogen()->is_backwards_branch()); // Perform stack overflow check if this goto needs it before jumping. DeferredStackCheck* deferred_stack_check = new(zone()) DeferredStackCheck(this, instr); @@ -5496,7 +5697,7 @@ // If the environment were already registered, we would have no way of // backpatching it with the spill slot operands. - ASSERT(!environment->HasBeenRegistered()); + DCHECK(!environment->HasBeenRegistered()); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); GenerateOsrPrologue(); @@ -5504,7 +5705,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { - ASSERT(ToRegister(instr->context()).is(rsi)); + DCHECK(ToRegister(instr->context()).is(rsi)); __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); DeoptimizeIf(equal, instr->environment()); @@ -5567,11 +5768,55 @@ } +void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register object, + Register index) { + PushSafepointRegistersScope scope(this); + __ Push(object); + __ Push(index); + __ xorp(rsi, rsi); + __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(object, rax); +} + + void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { + public: + DeferredLoadMutableDouble(LCodeGen* codegen, + LLoadFieldByIndex* instr, + Register object, + Register index) + : LDeferredCode(codegen), + instr_(instr), + object_(object), + index_(index) { + } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LLoadFieldByIndex* instr_; + Register object_; + Register index_; + }; + Register object = ToRegister(instr->object()); Register index = ToRegister(instr->index()); + DeferredLoadMutableDouble* deferred; + deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index); + Label out_of_object, done; + __ Move(kScratchRegister, Smi::FromInt(1)); + __ testp(index, kScratchRegister); + __ j(not_zero, deferred->entry()); + + __ sarp(index, Immediate(1)); + __ SmiToInteger32(index, index); __ cmpl(index, Immediate(0)); __ j(less, &out_of_object, Label::kNear); @@ -5589,10 +5834,26 @@ index, times_pointer_size, FixedArray::kHeaderSize - kPointerSize)); + __ bind(deferred->exit()); __ bind(&done); } +void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { + Register context = ToRegister(instr->context()); + __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context); +} + + +void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { + Handle<ScopeInfo> scope_info = instr->scope_info(); + __ Push(scope_info); + __ Push(ToRegister(instr->function())); + CallRuntime(Runtime::kPushBlockContext, 2, instr); + RecordSafepoint(Safepoint::kNoLazyDeopt); +} + + #undef __ } } // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/src/x64/lithium-codegen-x64.h nodejs-0.11.15/deps/v8/src/x64/lithium-codegen-x64.h --- nodejs-0.11.13/deps/v8/src/x64/lithium-codegen-x64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/lithium-codegen-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_X64_LITHIUM_CODEGEN_X64_H_ #define V8_X64_LITHIUM_CODEGEN_X64_H_ -#include "x64/lithium-x64.h" +#include "src/x64/lithium-x64.h" -#include "checks.h" -#include "deoptimizer.h" -#include "lithium-codegen.h" -#include "safepoint-table.h" -#include "scopes.h" -#include "v8utils.h" -#include "x64/lithium-gap-resolver-x64.h" +#include "src/base/logging.h" +#include "src/deoptimizer.h" +#include "src/lithium-codegen.h" +#include "src/safepoint-table.h" +#include "src/scopes.h" +#include "src/utils.h" +#include "src/x64/lithium-gap-resolver-x64.h" namespace v8 { namespace internal { @@ -88,6 +65,7 @@ bool IsInteger32Constant(LConstantOperand* op) const; bool IsDehoistedKeyConstant(LConstantOperand* op) const; bool IsSmiConstant(LConstantOperand* op) const; + int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; int32_t ToInteger32(LConstantOperand* op) const; Smi* ToSmi(LConstantOperand* op) const; double ToDouble(LConstantOperand* op) const; @@ -106,7 +84,14 @@ // Deferred code support. void DoDeferredNumberTagD(LNumberTagD* instr); - void DoDeferredNumberTagU(LNumberTagU* instr); + + enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; + void DoDeferredNumberTagIU(LInstruction* instr, + LOperand* value, + LOperand* temp1, + LOperand* temp2, + IntegerSignedness signedness); + void DoDeferredTaggedToI(LTaggedToI* instr, Label* done); void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); void DoDeferredStackCheck(LStackCheck* instr); @@ -116,6 +101,9 @@ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); + void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register object, + Register index); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -147,8 +135,6 @@ int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - void Abort(BailoutReason reason); - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } @@ -224,7 +210,6 @@ LEnvironment* environment, Deoptimizer::BailoutType bailout_type); void DeoptimizeIf(Condition cc, LEnvironment* environment); - void ApplyCheckIf(Condition cc, LBoundsCheck* check); bool DeoptEveryNTimes() { return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); @@ -247,9 +232,9 @@ Operand BuildFastArrayOperand( LOperand* elements_pointer, LOperand* key, + Representation key_representation, ElementsKind elements_kind, - uint32_t offset, - uint32_t additional_index = 0); + uint32_t base_offset); Operand BuildSeqStringOperand(Register string, LOperand* index, @@ -360,14 +345,14 @@ public: explicit PushSafepointRegistersScope(LCodeGen* codegen) : codegen_(codegen) { - ASSERT(codegen_->info()->is_calling()); - ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); + DCHECK(codegen_->info()->is_calling()); + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); codegen_->masm_->PushSafepointRegisters(); codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; } ~PushSafepointRegistersScope() { - ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); codegen_->masm_->PopSafepointRegisters(); codegen_->expected_safepoint_kind_ = Safepoint::kSimple; } diff -Nru nodejs-0.11.13/deps/v8/src/x64/lithium-gap-resolver-x64.cc nodejs-0.11.15/deps/v8/src/x64/lithium-gap-resolver-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/lithium-gap-resolver-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/lithium-gap-resolver-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "x64/lithium-gap-resolver-x64.h" -#include "x64/lithium-codegen-x64.h" +#include "src/x64/lithium-codegen-x64.h" +#include "src/x64/lithium-gap-resolver-x64.h" namespace v8 { namespace internal { @@ -40,7 +17,7 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) { - ASSERT(moves_.is_empty()); + DCHECK(moves_.is_empty()); // Build up a worklist of moves. BuildInitialMoveList(parallel_move); @@ -57,7 +34,7 @@ // Perform the moves with constant sources. for (int i = 0; i < moves_.length(); ++i) { if (!moves_[i].IsEliminated()) { - ASSERT(moves_[i].source()->IsConstantOperand()); + DCHECK(moves_[i].source()->IsConstantOperand()); EmitMove(i); } } @@ -88,13 +65,13 @@ // which means that a call to PerformMove could change any source operand // in the move graph. - ASSERT(!moves_[index].IsPending()); - ASSERT(!moves_[index].IsRedundant()); + DCHECK(!moves_[index].IsPending()); + DCHECK(!moves_[index].IsRedundant()); // Clear this move's destination to indicate a pending move. The actual // destination is saved in a stack-allocated local. Recursion may allow // multiple moves to be pending. - ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. + DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. LOperand* destination = moves_[index].destination(); moves_[index].set_destination(NULL); @@ -135,7 +112,7 @@ for (int i = 0; i < moves_.length(); ++i) { LMoveOperands other_move = moves_[i]; if (other_move.Blocks(destination)) { - ASSERT(other_move.IsPending()); + DCHECK(other_move.IsPending()); EmitSwap(index); return; } @@ -147,12 +124,12 @@ void LGapResolver::Verify() { -#ifdef ENABLE_SLOW_ASSERTS +#ifdef ENABLE_SLOW_DCHECKS // No operand should be the destination for more than one move. for (int i = 0; i < moves_.length(); ++i) { LOperand* destination = moves_[i].destination(); for (int j = i + 1; j < moves_.length(); ++j) { - SLOW_ASSERT(!destination->Equals(moves_[j].destination())); + SLOW_DCHECK(!destination->Equals(moves_[j].destination())); } } #endif @@ -174,7 +151,7 @@ Register dst = cgen_->ToRegister(destination); __ movp(dst, src); } else { - ASSERT(destination->IsStackSlot()); + DCHECK(destination->IsStackSlot()); Operand dst = cgen_->ToOperand(destination); __ movp(dst, src); } @@ -185,7 +162,7 @@ Register dst = cgen_->ToRegister(destination); __ movp(dst, src); } else { - ASSERT(destination->IsStackSlot()); + DCHECK(destination->IsStackSlot()); Operand dst = cgen_->ToOperand(destination); __ movp(kScratchRegister, src); __ movp(dst, kScratchRegister); @@ -220,7 +197,7 @@ __ movq(dst, kScratchRegister); } } else { - ASSERT(destination->IsStackSlot()); + DCHECK(destination->IsStackSlot()); Operand dst = cgen_->ToOperand(destination); if (cgen_->IsSmiConstant(constant_source)) { __ Move(dst, cgen_->ToSmi(constant_source)); @@ -238,7 +215,7 @@ if (destination->IsDoubleRegister()) { __ movaps(cgen_->ToDoubleRegister(destination), src); } else { - ASSERT(destination->IsDoubleStackSlot()); + DCHECK(destination->IsDoubleStackSlot()); __ movsd(cgen_->ToOperand(destination), src); } } else if (source->IsDoubleStackSlot()) { @@ -246,7 +223,7 @@ if (destination->IsDoubleRegister()) { __ movsd(cgen_->ToDoubleRegister(destination), src); } else { - ASSERT(destination->IsDoubleStackSlot()); + DCHECK(destination->IsDoubleStackSlot()); __ movsd(xmm0, src); __ movsd(cgen_->ToOperand(destination), xmm0); } @@ -301,13 +278,13 @@ } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { // Swap a double register and a double stack slot. - ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) || + DCHECK((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) || (source->IsDoubleStackSlot() && destination->IsDoubleRegister())); XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister() ? source : destination); LOperand* other = source->IsDoubleRegister() ? destination : source; - ASSERT(other->IsDoubleStackSlot()); + DCHECK(other->IsDoubleStackSlot()); Operand other_operand = cgen_->ToOperand(other); __ movsd(xmm0, other_operand); __ movsd(other_operand, reg); diff -Nru nodejs-0.11.13/deps/v8/src/x64/lithium-gap-resolver-x64.h nodejs-0.11.15/deps/v8/src/x64/lithium-gap-resolver-x64.h --- nodejs-0.11.13/deps/v8/src/x64/lithium-gap-resolver-x64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/lithium-gap-resolver-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_ #define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_ -#include "v8.h" +#include "src/v8.h" -#include "lithium.h" +#include "src/lithium.h" namespace v8 { namespace internal { diff -Nru nodejs-0.11.13/deps/v8/src/x64/lithium-x64.cc nodejs-0.11.15/deps/v8/src/x64/lithium-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/lithium-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/lithium-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "lithium-allocator-inl.h" -#include "x64/lithium-x64.h" -#include "x64/lithium-codegen-x64.h" -#include "hydrogen-osr.h" +#include "src/hydrogen-osr.h" +#include "src/lithium-inl.h" +#include "src/x64/lithium-codegen-x64.h" namespace v8 { namespace internal { @@ -51,17 +27,17 @@ // outputs because all registers are blocked by the calling convention. // Inputs operands must use a fixed register or use-at-start policy or // a non-register policy. - ASSERT(Output() == NULL || + DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() || !LUnallocated::cast(Output())->HasRegisterPolicy()); for (UseIterator it(this); !it.Done(); it.Advance()) { LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() || + DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart()); } for (TempIterator it(this); !it.Done(); it.Advance()) { LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); + DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); } } #endif @@ -179,12 +155,9 @@ bool LTemplateResultInstruction<R>::MustSignExtendResult( LPlatformChunk* chunk) const { HValue* hvalue = this->hydrogen_value(); - - if (hvalue == NULL) return false; - if (!hvalue->representation().IsInteger32()) return false; - if (hvalue->HasRange() && !hvalue->range()->CanBeNegative()) return false; - - return chunk->GetDehoistedKeyIds()->Contains(hvalue->id()); + return hvalue != NULL && + hvalue->representation().IsInteger32() && + chunk->GetDehoistedKeyIds()->Contains(hvalue->id()); } @@ -357,6 +330,16 @@ int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { + if (kind == DOUBLE_REGISTERS && kDoubleSize == 2 * kPointerSize) { + // Skip a slot if for a double-width slot for x32 port. + spill_slot_count_++; + // The spill slot's address is at rbp - (index + 1) * kPointerSize - + // StandardFrameConstants::kFixedFrameSizeFromFp. kFixedFrameSizeFromFp is + // 2 * kPointerSize, if rbp is aligned at 8-byte boundary, the below "|= 1" + // will make sure the spilled doubles are aligned at 8-byte boundary. + // TODO(haitao): make sure rbp is aligned at 8-byte boundary for x32 port. + spill_slot_count_ |= 1; + } return spill_slot_count_++; } @@ -369,7 +352,7 @@ if (kind == DOUBLE_REGISTERS) { return LDoubleStackSlot::Create(index, zone()); } else { - ASSERT(kind == GENERAL_REGISTERS); + DCHECK(kind == GENERAL_REGISTERS); return LStackSlot::Create(index, zone()); } } @@ -377,8 +360,9 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); - hydrogen()->access().PrintTo(stream); - stream->Add(" <- "); + OStringStream os; + os << hydrogen()->access() << " <- "; + stream->Add(os.c_str()); value()->PrintTo(stream); } @@ -397,7 +381,7 @@ stream->Add("["); key()->PrintTo(stream); if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", additional_index()); + stream->Add(" + %d]", base_offset()); } else { stream->Add("]"); } @@ -409,13 +393,13 @@ stream->Add("["); key()->PrintTo(stream); if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", additional_index()); + stream->Add(" + %d] <-", base_offset()); } else { stream->Add("] <- "); } if (value() == NULL) { - ASSERT(hydrogen()->IsConstantHoleStore() && + DCHECK(hydrogen()->IsConstantHoleStore() && hydrogen()->value()->representation().IsDouble()); stream->Add("<the hole(nan)>"); } else { @@ -440,7 +424,7 @@ LPlatformChunk* LChunkBuilder::Build() { - ASSERT(is_unused()); + DCHECK(is_unused()); chunk_ = new(zone()) LPlatformChunk(info(), graph()); LPhase phase("L_Building chunk", chunk_); status_ = BUILDING; @@ -465,7 +449,7 @@ } -void LCodeGen::Abort(BailoutReason reason) { +void LChunkBuilder::Abort(BailoutReason reason) { info()->set_bailout_reason(reason); status_ = ABORTED; } @@ -652,6 +636,8 @@ !hinstr->HasObservableSideEffects(); if (needs_environment && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); + // We can't really figure out if the environment is needed or not. + instr->environment()->set_has_been_used(); } return instr; @@ -659,7 +645,7 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { - ASSERT(!instr->HasPointerMap()); + DCHECK(!instr->HasPointerMap()); instr->set_pointer_map(new(zone()) LPointerMap(zone())); return instr; } @@ -680,14 +666,14 @@ LOperand* LChunkBuilder::FixedTemp(Register reg) { LUnallocated* operand = ToUnallocated(reg); - ASSERT(operand->HasFixedPolicy()); + DCHECK(operand->HasFixedPolicy()); return operand; } LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) { LUnallocated* operand = ToUnallocated(reg); - ASSERT(operand->HasFixedPolicy()); + DCHECK(operand->HasFixedPolicy()); return operand; } @@ -716,24 +702,30 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->left()); HValue* right_value = instr->right(); LOperand* right = NULL; int constant_value = 0; + bool does_deopt = false; if (right_value->IsConstant()) { HConstant* constant = HConstant::cast(right_value); right = chunk_->DefineConstantOperand(constant); constant_value = constant->Integer32Value() & 0x1f; + if (SmiValuesAre31Bits() && instr->representation().IsSmi() && + constant_value > 0) { + // Left shift can deoptimize if we shift by > 0 and the result + // cannot be truncated to smi. + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); + } } else { right = UseFixed(right_value, rcx); } // Shift operations can only deoptimize if we do a logical shift by 0 and // the result cannot be truncated to int32. - bool does_deopt = false; if (op == Token::SHR && constant_value == 0) { if (FLAG_opt_safe_uint32_operations) { does_deopt = !instr->CheckFlag(HInstruction::kUint32); @@ -753,9 +745,9 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); if (op == Token::MOD) { LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1); @@ -774,8 +766,8 @@ HBinaryOperation* instr) { HValue* left = instr->left(); HValue* right = instr->right(); - ASSERT(left->representation().IsTagged()); - ASSERT(right->representation().IsTagged()); + DCHECK(left->representation().IsTagged()); + DCHECK(right->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), rsi); LOperand* left_operand = UseFixed(left, rdx); LOperand* right_operand = UseFixed(right, rax); @@ -786,7 +778,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { - ASSERT(is_building()); + DCHECK(is_building()); current_block_ = block; next_block_ = next_block; if (block->IsStartBlock()) { @@ -795,13 +787,13 @@ } else if (block->predecessors()->length() == 1) { // We have a single predecessor => copy environment and outgoing // argument count from the predecessor. - ASSERT(block->phis()->length() == 0); + DCHECK(block->phis()->length() == 0); HBasicBlock* pred = block->predecessors()->at(0); HEnvironment* last_environment = pred->last_environment(); - ASSERT(last_environment != NULL); + DCHECK(last_environment != NULL); // Only copy the environment, if it is later used again. if (pred->end()->SecondSuccessor() == NULL) { - ASSERT(pred->end()->FirstSuccessor() == block); + DCHECK(pred->end()->FirstSuccessor() == block); } else { if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || pred->end()->SecondSuccessor()->block_id() > block->block_id()) { @@ -809,7 +801,7 @@ } } block->UpdateEnvironment(last_environment); - ASSERT(pred->argument_count() >= 0); + DCHECK(pred->argument_count() >= 0); argument_count_ = pred->argument_count(); } else { // We are at a state join => process phis. @@ -861,7 +853,7 @@ if (current->OperandCount() == 0) { instr = DefineAsRegister(new(zone()) LDummy()); } else { - ASSERT(!current->OperandAt(0)->IsControlInstruction()); + DCHECK(!current->OperandAt(0)->IsControlInstruction()); instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(current->OperandAt(0)))); } @@ -873,75 +865,90 @@ chunk_->AddInstruction(dummy, current_block_); } } else { - instr = current->CompileToLithium(this); + HBasicBlock* successor; + if (current->IsControlInstruction() && + HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && + successor != NULL) { + instr = new(zone()) LGoto(successor); + } else { + instr = current->CompileToLithium(this); + } } argument_count_ += current->argument_delta(); - ASSERT(argument_count_ >= 0); + DCHECK(argument_count_ >= 0); if (instr != NULL) { - // Associate the hydrogen instruction first, since we may need it for - // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. - instr->set_hydrogen_value(current); + AddInstruction(instr, current); + } + + current_instruction_ = old_current; +} + + +void LChunkBuilder::AddInstruction(LInstruction* instr, + HInstruction* hydrogen_val) { + // Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(hydrogen_val); #if DEBUG - // Make sure that the lithium instruction has either no fixed register - // constraints in temps or the result OR no uses that are only used at - // start. If this invariant doesn't hold, the register allocator can decide - // to insert a split of a range immediately before the instruction due to an - // already allocated register needing to be used for the instruction's fixed - // register constraint. In this case, The register allocator won't see an - // interference between the split child and the use-at-start (it would if - // the it was just a plain use), so it is free to move the split child into - // the same register that is used for the use-at-start. - // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) { - int fixed = 0; - int used_at_start = 0; - for (UseIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->IsUsedAtStart()) ++used_at_start; - } - if (instr->Output() != NULL) { - if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; - } - for (TempIterator it(instr); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - if (operand->HasFixedPolicy()) ++fixed; - } - ASSERT(fixed == 0 || used_at_start == 0); + // Make sure that the lithium instruction has either no fixed register + // constraints in temps or the result OR no uses that are only used at + // start. If this invariant doesn't hold, the register allocator can decide + // to insert a split of a range immediately before the instruction due to an + // already allocated register needing to be used for the instruction's fixed + // register constraint. In this case, The register allocator won't see an + // interference between the split child and the use-at-start (it would if + // the it was just a plain use), so it is free to move the split child into + // the same register that is used for the use-at-start. + // See https://code.google.com/p/chromium/issues/detail?id=201590 + if (!(instr->ClobbersRegisters() && + instr->ClobbersDoubleRegisters(isolate()))) { + int fixed = 0; + int used_at_start = 0; + for (UseIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->IsUsedAtStart()) ++used_at_start; + } + if (instr->Output() != NULL) { + if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; + } + for (TempIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->HasFixedPolicy()) ++fixed; } + DCHECK(fixed == 0 || used_at_start == 0); + } #endif - if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { - instr = AssignPointerMap(instr); - } - if (FLAG_stress_environments && !instr->HasEnvironment()) { - instr = AssignEnvironment(instr); - } - chunk_->AddInstruction(instr, current_block_); + if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { + instr = AssignPointerMap(instr); + } + if (FLAG_stress_environments && !instr->HasEnvironment()) { + instr = AssignEnvironment(instr); + } + chunk_->AddInstruction(instr, current_block_); - if (instr->IsCall()) { - HValue* hydrogen_value_for_lazy_bailout = current; - LInstruction* instruction_needing_environment = NULL; - if (current->HasObservableSideEffects()) { - HSimulate* sim = HSimulate::cast(current->next()); - instruction_needing_environment = instr; - sim->ReplayEnvironment(current_block_->last_environment()); - hydrogen_value_for_lazy_bailout = sim; - } - LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); - bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); - chunk_->AddInstruction(bailout, current_block_); - if (instruction_needing_environment != NULL) { - // Store the lazy deopt environment with the instruction if needed. - // Right now it is only used for LInstanceOfKnownGlobal. - instruction_needing_environment-> - SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); - } + if (instr->IsCall()) { + HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; + LInstruction* instruction_needing_environment = NULL; + if (hydrogen_val->HasObservableSideEffects()) { + HSimulate* sim = HSimulate::cast(hydrogen_val->next()); + instruction_needing_environment = instr; + sim->ReplayEnvironment(current_block_->last_environment()); + hydrogen_value_for_lazy_bailout = sim; + } + LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); + bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); + chunk_->AddInstruction(bailout, current_block_); + if (instruction_needing_environment != NULL) { + // Store the lazy deopt environment with the instruction if needed. + // Right now it is only used for LInstanceOfKnownGlobal. + instruction_needing_environment-> + SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); } } - current_instruction_ = old_current; } @@ -956,9 +963,6 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - HValue* value = instr->value(); Representation r = value->representation(); HType type = value->type(); @@ -978,10 +982,7 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new(zone()) LCmpMapAndBranch(value); } @@ -1039,9 +1040,13 @@ } -LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { - LOperand* argument = UseOrConstant(instr->argument()); - return new(zone()) LPushArgument(argument); +LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { + int argc = instr->OperandCount(); + for (int i = 0; i < argc; ++i) { + LOperand* argument = UseOrConstant(instr->argument(i)); + AddInstruction(new(zone()) LPushArgument(argument), instr); + } + return NULL; } @@ -1098,7 +1103,7 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor( HCallWithDescriptor* instr) { - const CallInterfaceDescriptor* descriptor = instr->descriptor(); + const InterfaceDescriptor* descriptor = instr->descriptor(); LOperand* target = UseRegisterOrConstantAtStart(instr->target()); ZoneList<LOperand*> ops(instr->OperandCount(), zone()); @@ -1125,14 +1130,24 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { switch (instr->op()) { - case kMathFloor: return DoMathFloor(instr); - case kMathRound: return DoMathRound(instr); - case kMathAbs: return DoMathAbs(instr); - case kMathLog: return DoMathLog(instr); - case kMathExp: return DoMathExp(instr); - case kMathSqrt: return DoMathSqrt(instr); - case kMathPowHalf: return DoMathPowHalf(instr); - case kMathClz32: return DoMathClz32(instr); + case kMathFloor: + return DoMathFloor(instr); + case kMathRound: + return DoMathRound(instr); + case kMathFround: + return DoMathFround(instr); + case kMathAbs: + return DoMathAbs(instr); + case kMathLog: + return DoMathLog(instr); + case kMathExp: + return DoMathExp(instr); + case kMathSqrt: + return DoMathSqrt(instr); + case kMathPowHalf: + return DoMathPowHalf(instr); + case kMathClz32: + return DoMathClz32(instr); default: UNREACHABLE(); return NULL; @@ -1155,6 +1170,13 @@ } +LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LMathFround* result = new (zone()) LMathFround(input); + return DefineAsRegister(result); +} + + LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { LOperand* context = UseAny(instr->context()); LOperand* input = UseRegisterAtStart(instr->value()); @@ -1168,8 +1190,8 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* input = UseRegisterAtStart(instr->value()); return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr); } @@ -1183,8 +1205,8 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); LOperand* value = UseTempRegister(instr->value()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); @@ -1194,9 +1216,8 @@ LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { - LOperand* input = UseRegisterAtStart(instr->value()); - LMathSqrt* result = new(zone()) LMathSqrt(input); - return DefineSameAsFirst(result); + LOperand* input = UseAtStart(instr->value()); + return DefineAsRegister(new(zone()) LMathSqrt(input)); } @@ -1260,9 +1281,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); - ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32)); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); @@ -1274,9 +1295,9 @@ LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( @@ -1292,9 +1313,9 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp1 = FixedTemp(rax); @@ -1310,10 +1331,10 @@ } -LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); +LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseFixed(instr->left(), rax); LOperand* divisor = UseRegister(instr->right()); LOperand* temp = FixedTemp(rdx); @@ -1322,8 +1343,7 @@ if (instr->CheckFlag(HValue::kCanBeDivByZero) || instr->CheckFlag(HValue::kBailoutOnMinusZero) || instr->CheckFlag(HValue::kCanOverflow) || - (!instr->IsMathFloorOfDiv() && - !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { result = AssignEnvironment(result); } return result; @@ -1361,9 +1381,9 @@ LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp1 = FixedTemp(rax); @@ -1387,26 +1407,45 @@ } +LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseFixed(instr->left(), rax); + LOperand* divisor = UseRegister(instr->right()); + LOperand* temp = FixedTemp(rdx); + LInstruction* result = DefineFixed(new(zone()) LFlooringDivI( + dividend, divisor, temp), rax); + if (instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kBailoutOnMinusZero) || + instr->CheckFlag(HValue::kCanOverflow)) { + result = AssignEnvironment(result); + } + return result; +} + + LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { if (instr->RightIsPowerOf2()) { return DoFlooringDivByPowerOf2I(instr); } else if (instr->right()->IsConstant()) { return DoFlooringDivByConstI(instr); } else { - return DoDivI(instr); + return DoFlooringDivI(instr); } } LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegisterAtStart(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( dividend, divisor)); - if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + if (instr->CheckFlag(HValue::kLeftCanBeNegative) && + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { result = AssignEnvironment(result); } return result; @@ -1414,9 +1453,9 @@ LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); int32_t divisor = instr->right()->GetInteger32Constant(); LOperand* temp1 = FixedTemp(rax); @@ -1431,9 +1470,9 @@ LInstruction* LChunkBuilder::DoModI(HMod* instr) { - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseFixed(instr->left(), rax); LOperand* divisor = UseRegister(instr->right()); LOperand* temp = FixedTemp(rdx); @@ -1466,8 +1505,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstant(instr->BetterRightOperand()); LMulI* mul = new(zone()) LMulI(left, right); @@ -1486,8 +1525,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { if (instr->representation().IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); LSubI* sub = new(zone()) LSubI(left, right); @@ -1511,26 +1550,31 @@ // are multiple uses of the add's inputs, so using a 3-register add will // preserve all input values for later uses. bool use_lea = LAddI::UseLea(instr); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); HValue* right_candidate = instr->BetterRightOperand(); - LOperand* right = use_lea - ? UseRegisterOrConstantAtStart(right_candidate) - : UseOrConstantAtStart(right_candidate); + LOperand* right; + if (SmiValuesAre32Bits() && instr->representation().IsSmi()) { + // We cannot add a tagged immediate to a tagged value, + // so we request it in a register. + right = UseRegisterAtStart(right_candidate); + } else { + right = use_lea ? UseRegisterOrConstantAtStart(right_candidate) + : UseOrConstantAtStart(right_candidate); + } LAddI* add = new(zone()) LAddI(left, right); bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); - LInstruction* result = use_lea - ? DefineAsRegister(add) - : DefineSameAsFirst(add); + LInstruction* result = use_lea ? DefineAsRegister(add) + : DefineSameAsFirst(add); if (can_overflow) { result = AssignEnvironment(result); } return result; } else if (instr->representation().IsExternal()) { - ASSERT(instr->left()->representation().IsExternal()); - ASSERT(instr->right()->representation().IsInteger32()); - ASSERT(!instr->CheckFlag(HValue::kCanOverflow)); + DCHECK(instr->left()->representation().IsExternal()); + DCHECK(instr->right()->representation().IsInteger32()); + DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); bool use_lea = LAddI::UseLea(instr); LOperand* left = UseRegisterAtStart(instr->left()); HValue* right_candidate = instr->right(); @@ -1554,8 +1598,8 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { LOperand* left = NULL; LOperand* right = NULL; - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); if (instr->representation().IsSmi()) { left = UseRegisterAtStart(instr->BetterLeftOperand()); right = UseAtStart(instr->BetterRightOperand()); @@ -1563,7 +1607,7 @@ left = UseRegisterAtStart(instr->BetterLeftOperand()); right = UseOrConstantAtStart(instr->BetterRightOperand()); } else { - ASSERT(instr->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); left = UseRegisterAtStart(instr->left()); right = UseRegisterAtStart(instr->right()); } @@ -1573,11 +1617,11 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) { - ASSERT(instr->representation().IsDouble()); + DCHECK(instr->representation().IsDouble()); // We call a C function for double power. It can't trigger a GC. // We need to use fixed result register for the call. Representation exponent_type = instr->right()->representation(); - ASSERT(instr->left()->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); LOperand* left = UseFixedDouble(instr->left(), xmm2); LOperand* right = exponent_type.IsDouble() ? UseFixedDouble(instr->right(), xmm1) : UseFixed(instr->right(), rdx); @@ -1588,8 +1632,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), rsi); LOperand* left = UseFixed(instr->left(), rdx); LOperand* right = UseFixed(instr->right(), rax); @@ -1602,15 +1646,15 @@ HCompareNumericAndBranch* instr) { Representation r = instr->representation(); if (r.IsSmiOrInteger32()) { - ASSERT(instr->left()->representation().Equals(r)); - ASSERT(instr->right()->representation().Equals(r)); + DCHECK(instr->left()->representation().Equals(r)); + DCHECK(instr->right()->representation().Equals(r)); LOperand* left = UseRegisterOrConstantAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); return new(zone()) LCompareNumericAndBranch(left, right); } else { - ASSERT(r.IsDouble()); - ASSERT(instr->left()->representation().IsDouble()); - ASSERT(instr->right()->representation().IsDouble()); + DCHECK(r.IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); LOperand* left; LOperand* right; if (instr->left()->IsConstant() && instr->right()->IsConstant()) { @@ -1627,8 +1671,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( HCompareObjectEqAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterOrConstantAtStart(instr->right()); return new(zone()) LCmpObjectEqAndBranch(left, right); @@ -1644,21 +1686,19 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch( HCompareMinusZeroAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; LOperand* value = UseRegister(instr->value()); return new(zone()) LCompareMinusZeroAndBranch(value); } LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value())); } LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp = TempRegister(); return new(zone()) LIsStringAndBranch(value, temp); @@ -1666,14 +1706,14 @@ LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); return new(zone()) LIsSmiAndBranch(Use(instr->value())); } LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( HIsUndetectableAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp = TempRegister(); return new(zone()) LIsUndetectableAndBranch(value, temp); @@ -1683,8 +1723,8 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch( HStringCompareAndBranch* instr) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), rsi); LOperand* left = UseFixed(instr->left(), rdx); LOperand* right = UseFixed(instr->right(), rax); @@ -1697,7 +1737,7 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( HHasInstanceTypeAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new(zone()) LHasInstanceTypeAndBranch(value); } @@ -1705,7 +1745,7 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex( HGetCachedArrayIndex* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value)); @@ -1714,7 +1754,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( HHasCachedArrayIndexAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); return new(zone()) LHasCachedArrayIndexAndBranch(value); } @@ -1768,9 +1808,16 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - LOperand* value = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = Use(instr->length()); - return AssignEnvironment(new(zone()) LBoundsCheck(value, length)); + if (!FLAG_debug_code && instr->skip_check()) return NULL; + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + LOperand* length = !index->IsConstantOperand() + ? UseOrConstantAtStart(instr->length()) + : UseAtStart(instr->length()); + LInstruction* result = new(zone()) LBoundsCheck(index, length); + if (!FLAG_debug_code || !instr->skip_check()) { + result = AssignEnvironment(result); + } + return result; } @@ -1804,105 +1851,92 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation from = instr->from(); Representation to = instr->to(); + HValue* val = instr->value(); if (from.IsSmi()) { if (to.IsTagged()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return DefineSameAsFirst(new(zone()) LDummyUse(value)); } from = Representation::Tagged(); } - // Only mark conversions that might need to allocate as calling rather than - // all changes. This makes simple, non-allocating conversion not have to force - // building a stack frame. if (from.IsTagged()) { if (to.IsDouble()) { - LOperand* value = UseRegister(instr->value()); - LInstruction* res = DefineAsRegister(new(zone()) LNumberUntagD(value)); - if (!instr->value()->representation().IsSmi()) { - res = AssignEnvironment(res); - } - return res; + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; } else if (to.IsSmi()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); if (val->type().IsSmi()) { return DefineSameAsFirst(new(zone()) LDummyUse(value)); } return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); } else { - ASSERT(to.IsInteger32()); - HValue* val = instr->value(); - LOperand* value = UseRegister(val); + DCHECK(to.IsInteger32()); if (val->type().IsSmi() || val->representation().IsSmi()) { + LOperand* value = UseRegister(val); return DefineSameAsFirst(new(zone()) LSmiUntag(value, false)); } else { + LOperand* value = UseRegister(val); bool truncating = instr->CanTruncateToInt32(); LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1); - LInstruction* res = + LInstruction* result = DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp)); - if (!instr->value()->representation().IsSmi()) { - // Note: Only deopts in deferred code. - res = AssignEnvironment(res); - } - return res; + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; } } } else if (from.IsDouble()) { if (to.IsTagged()) { info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); LOperand* temp = TempRegister(); - - // Make sure that temp and result_temp are different registers. LUnallocated* result_temp = TempRegister(); LNumberTagD* result = new(zone()) LNumberTagD(value, temp); return AssignPointerMap(Define(result, result_temp)); } else if (to.IsSmi()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return AssignEnvironment( DefineAsRegister(new(zone()) LDoubleToSmi(value))); } else { - ASSERT(to.IsInteger32()); - LOperand* value = UseRegister(instr->value()); + DCHECK(to.IsInteger32()); + LOperand* value = UseRegister(val); LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); - if (!instr->CanTruncateToInt32()) { - result = AssignEnvironment(result); - } + if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); return result; } } else if (from.IsInteger32()) { info()->MarkAsDeferredCalling(); if (to.IsTagged()) { - HValue* val = instr->value(); - LOperand* value = UseRegister(val); if (!instr->CheckFlag(HValue::kCanOverflow)) { + LOperand* value = UseRegister(val); return DefineAsRegister(new(zone()) LSmiTag(value)); } else if (val->CheckFlag(HInstruction::kUint32)) { + LOperand* value = UseRegister(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = FixedTemp(xmm1); LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); return AssignPointerMap(DefineSameAsFirst(result)); } else { - LNumberTagI* result = new(zone()) LNumberTagI(value); + LOperand* value = UseRegister(val); + LOperand* temp1 = SmiValuesAre32Bits() ? NULL : TempRegister(); + LOperand* temp2 = SmiValuesAre32Bits() ? NULL : FixedTemp(xmm1); + LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2); return AssignPointerMap(DefineSameAsFirst(result)); } } else if (to.IsSmi()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); if (instr->CheckFlag(HValue::kCanOverflow)) { - ASSERT(val->CheckFlag(HValue::kUint32)); result = AssignEnvironment(result); } return result; } else { - if (instr->value()->CheckFlag(HInstruction::kUint32)) { - LOperand* temp = FixedTemp(xmm1); - return DefineAsRegister( - new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp)); + DCHECK(to.IsDouble()); + if (val->CheckFlag(HInstruction::kUint32)) { + return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); } else { - ASSERT(to.IsDouble()); - LOperand* value = Use(instr->value()); + LOperand* value = Use(val); return DefineAsRegister(new(zone()) LInteger32ToDouble(value)); } } @@ -1914,7 +1948,11 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckNonSmi(value)); + LInstruction* result = new(zone()) LCheckNonSmi(value); + if (!instr->value()->type().IsHeapObject()) { + result = AssignEnvironment(result); + } + return result; } @@ -1938,16 +1976,12 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - LOperand* value = NULL; - if (!instr->CanOmitMapChecks()) { - value = UseRegisterAtStart(instr->value()); - if (instr->has_migration_target()) info()->MarkAsDeferredCalling(); - } - LCheckMaps* result = new(zone()) LCheckMaps(value); - if (!instr->CanOmitMapChecks()) { - // Note: Only deopts in deferred code. - AssignEnvironment(result); - if (instr->has_migration_target()) return AssignPointerMap(result); + if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; + LOperand* value = UseRegisterAtStart(instr->value()); + LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); + if (instr->HasMigrationTarget()) { + info()->MarkAsDeferredCalling(); + result = AssignPointerMap(result); } return result; } @@ -1962,7 +1996,7 @@ } else if (input_rep.IsInteger32()) { return DefineSameAsFirst(new(zone()) LClampIToUint8(reg)); } else { - ASSERT(input_rep.IsSmiOrTagged()); + DCHECK(input_rep.IsSmiOrTagged()); // Register allocator doesn't (yet) support allocation of double // temps. Reserve xmm1 explicitly. LClampTToUint8* result = new(zone()) LClampTToUint8(reg, @@ -1974,7 +2008,7 @@ LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) { HValue* value = instr->value(); - ASSERT(value->representation().IsDouble()); + DCHECK(value->representation().IsDouble()); return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value))); } @@ -2024,9 +2058,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LOperand* context = UseFixed(instr->context(), rsi); - LOperand* global_object = UseFixed(instr->global_object(), rax); + LOperand* global_object = UseFixed(instr->global_object(), + LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LLoadGlobalGeneric* result = - new(zone()) LLoadGlobalGeneric(context, global_object); + new(zone()) LLoadGlobalGeneric(context, global_object, vector); return MarkAsCall(DefineFixed(result, rax), instr); } @@ -2091,8 +2131,13 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { LOperand* context = UseFixed(instr->context(), rsi); - LOperand* object = UseFixed(instr->object(), rax); - LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric( + context, object, vector); return MarkAsCall(DefineFixed(result, rax), instr); } @@ -2110,6 +2155,11 @@ void LChunkBuilder::FindDehoistedKeyDefinitions(HValue* candidate) { + // We sign extend the dehoisted key at the definition point when the pointer + // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use + // points and should not invoke this function. We can't use STATIC_ASSERT + // here as the pointer size is 32-bit for x32. + DCHECK(kPointerSize == kInt64Size); BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds(); if (dehoisted_key_ids->Contains(candidate->id())) return; dehoisted_key_ids->Add(candidate->id()); @@ -2121,12 +2171,25 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { - ASSERT(instr->key()->representation().IsInteger32()); + DCHECK((kPointerSize == kInt64Size && + instr->key()->representation().IsInteger32()) || + (kPointerSize == kInt32Size && + instr->key()->representation().IsSmiOrInteger32())); ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + LOperand* key = NULL; LInstruction* result = NULL; - if (instr->IsDehoisted()) { + if (kPointerSize == kInt64Size) { + key = UseRegisterOrConstantAtStart(instr->key()); + } else { + bool clobbers_key = ExternalArrayOpRequiresTemp( + instr->key()->representation(), elements_kind); + key = clobbers_key + ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + } + + if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) { FindDehoistedKeyDefinitions(instr->key()); } @@ -2134,7 +2197,7 @@ LOperand* obj = UseRegisterAtStart(instr->elements()); result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key)); } else { - ASSERT( + DCHECK( (instr->representation().IsInteger32() && !(IsDoubleOrFloatElementsKind(elements_kind))) || (instr->representation().IsDouble() && @@ -2159,11 +2222,15 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), rsi); - LOperand* object = UseFixed(instr->object(), rdx); - LOperand* key = UseFixed(instr->key(), rax); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } LLoadKeyedGeneric* result = - new(zone()) LLoadKeyedGeneric(context, object, key); + new(zone()) LLoadKeyedGeneric(context, object, key, vector); return MarkAsCall(DefineFixed(result, rax), instr); } @@ -2171,12 +2238,12 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { ElementsKind elements_kind = instr->elements_kind(); - if (instr->IsDehoisted()) { + if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) { FindDehoistedKeyDefinitions(instr->key()); } if (!instr->is_typed_elements()) { - ASSERT(instr->elements()->representation().IsTagged()); + DCHECK(instr->elements()->representation().IsTagged()); bool needs_write_barrier = instr->NeedsWriteBarrier(); LOperand* object = NULL; LOperand* key = NULL; @@ -2188,7 +2255,7 @@ val = UseRegisterAtStart(instr->value()); key = UseRegisterOrConstantAtStart(instr->key()); } else { - ASSERT(value_representation.IsSmiOrTagged() || + DCHECK(value_representation.IsSmiOrTagged() || value_representation.IsInteger32()); if (needs_write_barrier) { object = UseTempRegister(instr->elements()); @@ -2204,12 +2271,12 @@ return new(zone()) LStoreKeyed(object, key, val); } - ASSERT( + DCHECK( (instr->value()->representation().IsInteger32() && !IsDoubleOrFloatElementsKind(elements_kind)) || (instr->value()->representation().IsDouble() && IsDoubleOrFloatElementsKind(elements_kind))); - ASSERT((instr->is_fixed_typed_array() && + DCHECK((instr->is_fixed_typed_array() && instr->elements()->representation().IsTagged()) || (instr->is_external() && instr->elements()->representation().IsExternal())); @@ -2219,7 +2286,16 @@ elements_kind == FLOAT32_ELEMENTS; LOperand* val = val_is_temp_register ? UseTempRegister(instr->value()) : UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + LOperand* key = NULL; + if (kPointerSize == kInt64Size) { + key = UseRegisterOrConstantAtStart(instr->key()); + } else { + bool clobbers_key = ExternalArrayOpRequiresTemp( + instr->key()->representation(), elements_kind); + key = clobbers_key + ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + } LOperand* backing_store = UseRegister(instr->elements()); return new(zone()) LStoreKeyed(backing_store, key, val); } @@ -2227,13 +2303,14 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), rsi); - LOperand* object = UseFixed(instr->object(), rdx); - LOperand* key = UseFixed(instr->key(), rcx); - LOperand* value = UseFixed(instr->value(), rax); - - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsTagged()); - ASSERT(instr->value()->representation().IsTagged()); + LOperand* object = UseFixed(instr->object(), + KeyedStoreIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister()); + LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister()); + + DCHECK(instr->object()->representation().IsTagged()); + DCHECK(instr->key()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); LStoreKeyedGeneric* result = new(zone()) LStoreKeyedGeneric(context, object, key, value); @@ -2243,7 +2320,6 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - LOperand* object = UseRegister(instr->object()); if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); @@ -2252,10 +2328,11 @@ object, NULL, new_map_reg, temp_reg); return result; } else { + LOperand* object = UseFixed(instr->object(), rax); LOperand* context = UseFixed(instr->context(), rsi); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, context, NULL, NULL); - return AssignPointerMap(result); + return MarkAsCall(result, instr); } } @@ -2284,9 +2361,9 @@ ? UseRegister(instr->object()) : UseTempRegister(instr->object()); } else if (is_external_location) { - ASSERT(!is_in_object); - ASSERT(!needs_write_barrier); - ASSERT(!needs_write_barrier_for_map); + DCHECK(!is_in_object); + DCHECK(!needs_write_barrier); + DCHECK(!needs_write_barrier_for_map); obj = UseRegisterOrConstant(instr->object()); } else { obj = needs_write_barrier_for_map @@ -2318,22 +2395,14 @@ LOperand* temp = (!is_in_object || needs_write_barrier || needs_write_barrier_for_map) ? TempRegister() : NULL; - LInstruction* result = new(zone()) LStoreNamedField(obj, val, temp); - if (!instr->access().IsExternalMemory() && - instr->field_representation().IsHeapObject() && - (val->IsConstantOperand() - ? HConstant::cast(instr->value())->HasSmiValue() - : !instr->value()->type().IsHeapObject())) { - result = AssignEnvironment(result); - } - return result; + return new(zone()) LStoreNamedField(obj, val, temp); } LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { LOperand* context = UseFixed(instr->context(), rsi); - LOperand* object = UseFixed(instr->object(), rdx); - LOperand* value = UseFixed(instr->value(), rax); + LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister()); + LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister()); LStoreNamedGeneric* result = new(zone()) LStoreNamedGeneric(context, object, value); @@ -2396,7 +2465,7 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - ASSERT(argument_count_ == 0); + DCHECK(argument_count_ == 0); allocator_->MarkAsOsrEntry(); current_block_->last_environment()->set_ast_id(instr->ast_id()); return AssignEnvironment(new(zone()) LOsrEntry); @@ -2409,11 +2478,11 @@ int spill_index = chunk()->GetParameterStackSlot(instr->index()); return DefineAsSpilled(result, spill_index); } else { - ASSERT(info()->IsStub()); + DCHECK(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = - info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + info()->code_stub()->GetInterfaceDescriptor(); int index = static_cast<int>(instr->index()); - Register reg = descriptor->GetParameterRegister(index); + Register reg = descriptor->GetEnvironmentParameterRegister(index); return DefineFixed(result, reg); } } @@ -2493,9 +2562,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - LInstruction* goto_instr = CheckElideControlInstruction(instr); - if (goto_instr != NULL) return goto_instr; - return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value())); } @@ -2518,7 +2584,7 @@ LOperand* context = UseFixed(instr->context(), rsi); return MarkAsCall(new(zone()) LStackCheck(context), instr); } else { - ASSERT(instr->is_backwards_branch()); + DCHECK(instr->is_backwards_branch()); LOperand* context = UseAny(instr->context()); return AssignEnvironment( AssignPointerMap(new(zone()) LStackCheck(context))); @@ -2528,6 +2594,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { HEnvironment* outer = current_block_->last_environment(); + outer->set_ast_id(instr->ReturnId()); HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner = outer->CopyForInlining(instr->closure(), instr->arguments_count(), @@ -2553,7 +2620,7 @@ if (env->entry()->arguments_pushed()) { int argument_count = env->arguments_environment()->parameter_count(); pop = new(zone()) LDrop(argument_count); - ASSERT(instr->argument_delta() == -argument_count); + DCHECK(instr->argument_delta() == -argument_count); } HEnvironment* outer = current_block_->last_environment()-> @@ -2589,7 +2656,25 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { LOperand* object = UseRegister(instr->object()); LOperand* index = UseTempRegister(instr->index()); - return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index)); + LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); + LInstruction* result = DefineSameAsFirst(load); + return AssignPointerMap(result); +} + + +LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) { + LOperand* context = UseRegisterAtStart(instr->context()); + return new(zone()) LStoreFrameContext(context); +} + + +LInstruction* LChunkBuilder::DoAllocateBlockContext( + HAllocateBlockContext* instr) { + LOperand* context = UseFixed(instr->context(), rsi); + LOperand* function = UseRegisterAtStart(instr->function()); + LAllocateBlockContext* result = + new(zone()) LAllocateBlockContext(context, function); + return MarkAsCall(DefineFixed(result, rsi), instr); } diff -Nru nodejs-0.11.13/deps/v8/src/x64/lithium-x64.h nodejs-0.11.15/deps/v8/src/x64/lithium-x64.h --- nodejs-0.11.13/deps/v8/src/x64/lithium-x64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/lithium-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_X64_LITHIUM_X64_H_ #define V8_X64_LITHIUM_X64_H_ -#include "hydrogen.h" -#include "lithium-allocator.h" -#include "lithium.h" -#include "safepoint-table.h" -#include "utils.h" +#include "src/hydrogen.h" +#include "src/lithium.h" +#include "src/lithium-allocator.h" +#include "src/safepoint-table.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -40,144 +17,148 @@ // Forward declarations. class LCodeGen; -#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ - V(AccessArgumentsAt) \ - V(AddI) \ - V(Allocate) \ - V(ApplyArguments) \ - V(ArgumentsElements) \ - V(ArgumentsLength) \ - V(ArithmeticD) \ - V(ArithmeticT) \ - V(BitI) \ - V(BoundsCheck) \ - V(Branch) \ - V(CallJSFunction) \ - V(CallWithDescriptor) \ - V(CallFunction) \ - V(CallNew) \ - V(CallNewArray) \ - V(CallRuntime) \ - V(CallStub) \ - V(CheckInstanceType) \ - V(CheckMaps) \ - V(CheckMapValue) \ - V(CheckNonSmi) \ - V(CheckSmi) \ - V(CheckValue) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ - V(ClassOfTestAndBranch) \ - V(CompareMinusZeroAndBranch) \ - V(CompareNumericAndBranch) \ - V(CmpObjectEqAndBranch) \ - V(CmpHoleAndBranch) \ - V(CmpMapAndBranch) \ - V(CmpT) \ - V(ConstantD) \ - V(ConstantE) \ - V(ConstantI) \ - V(ConstantS) \ - V(ConstantT) \ - V(ConstructDouble) \ - V(Context) \ - V(DateField) \ - V(DebugBreak) \ - V(DeclareGlobals) \ - V(Deoptimize) \ - V(DivByConstI) \ - V(DivByPowerOf2I) \ - V(DivI) \ - V(DoubleBits) \ - V(DoubleToI) \ - V(DoubleToSmi) \ - V(Drop) \ - V(DummyUse) \ - V(Dummy) \ - V(FlooringDivByConstI) \ - V(FlooringDivByPowerOf2I) \ - V(ForInCacheArray) \ - V(ForInPrepareMap) \ - V(FunctionLiteral) \ - V(GetCachedArrayIndex) \ - V(Goto) \ - V(HasCachedArrayIndexAndBranch) \ - V(HasInstanceTypeAndBranch) \ - V(InnerAllocatedObject) \ - V(InstanceOf) \ - V(InstanceOfKnownGlobal) \ - V(InstructionGap) \ - V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsConstructCallAndBranch) \ - V(IsObjectAndBranch) \ - V(IsStringAndBranch) \ - V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ - V(Label) \ - V(LazyBailout) \ - V(LoadContextSlot) \ - V(LoadRoot) \ - V(LoadFieldByIndex) \ - V(LoadFunctionPrototype) \ - V(LoadGlobalCell) \ - V(LoadGlobalGeneric) \ - V(LoadKeyed) \ - V(LoadKeyedGeneric) \ - V(LoadNamedField) \ - V(LoadNamedGeneric) \ - V(MapEnumLength) \ - V(MathAbs) \ - V(MathClz32) \ - V(MathExp) \ - V(MathFloor) \ - V(MathLog) \ - V(MathMinMax) \ - V(MathPowHalf) \ - V(MathRound) \ - V(MathSqrt) \ - V(ModByConstI) \ - V(ModByPowerOf2I) \ - V(ModI) \ - V(MulI) \ - V(NumberTagD) \ - V(NumberTagI) \ - V(NumberTagU) \ - V(NumberUntagD) \ - V(OsrEntry) \ - V(Parameter) \ - V(Power) \ - V(PushArgument) \ - V(RegExpLiteral) \ - V(Return) \ - V(SeqStringGetChar) \ - V(SeqStringSetChar) \ - V(ShiftI) \ - V(SmiTag) \ - V(SmiUntag) \ - V(StackCheck) \ - V(StoreCodeEntry) \ - V(StoreContextSlot) \ - V(StoreGlobalCell) \ - V(StoreKeyed) \ - V(StoreKeyedGeneric) \ - V(StoreNamedField) \ - V(StoreNamedGeneric) \ - V(StringAdd) \ - V(StringCharCodeAt) \ - V(StringCharFromCode) \ - V(StringCompareAndBranch) \ - V(SubI) \ - V(TaggedToI) \ - V(ThisFunction) \ - V(ToFastProperties) \ - V(TransitionElementsKind) \ - V(TrapAllocationMemento) \ - V(Typeof) \ - V(TypeofIsAndBranch) \ - V(Uint32ToDouble) \ - V(UnknownOSRValue) \ +#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ + V(AccessArgumentsAt) \ + V(AddI) \ + V(Allocate) \ + V(AllocateBlockContext) \ + V(ApplyArguments) \ + V(ArgumentsElements) \ + V(ArgumentsLength) \ + V(ArithmeticD) \ + V(ArithmeticT) \ + V(BitI) \ + V(BoundsCheck) \ + V(Branch) \ + V(CallJSFunction) \ + V(CallWithDescriptor) \ + V(CallFunction) \ + V(CallNew) \ + V(CallNewArray) \ + V(CallRuntime) \ + V(CallStub) \ + V(CheckInstanceType) \ + V(CheckMaps) \ + V(CheckMapValue) \ + V(CheckNonSmi) \ + V(CheckSmi) \ + V(CheckValue) \ + V(ClampDToUint8) \ + V(ClampIToUint8) \ + V(ClampTToUint8) \ + V(ClassOfTestAndBranch) \ + V(CompareMinusZeroAndBranch) \ + V(CompareNumericAndBranch) \ + V(CmpObjectEqAndBranch) \ + V(CmpHoleAndBranch) \ + V(CmpMapAndBranch) \ + V(CmpT) \ + V(ConstantD) \ + V(ConstantE) \ + V(ConstantI) \ + V(ConstantS) \ + V(ConstantT) \ + V(ConstructDouble) \ + V(Context) \ + V(DateField) \ + V(DebugBreak) \ + V(DeclareGlobals) \ + V(Deoptimize) \ + V(DivByConstI) \ + V(DivByPowerOf2I) \ + V(DivI) \ + V(DoubleBits) \ + V(DoubleToI) \ + V(DoubleToSmi) \ + V(Drop) \ + V(DummyUse) \ + V(Dummy) \ + V(FlooringDivByConstI) \ + V(FlooringDivByPowerOf2I) \ + V(FlooringDivI) \ + V(ForInCacheArray) \ + V(ForInPrepareMap) \ + V(FunctionLiteral) \ + V(GetCachedArrayIndex) \ + V(Goto) \ + V(HasCachedArrayIndexAndBranch) \ + V(HasInstanceTypeAndBranch) \ + V(InnerAllocatedObject) \ + V(InstanceOf) \ + V(InstanceOfKnownGlobal) \ + V(InstructionGap) \ + V(Integer32ToDouble) \ + V(InvokeFunction) \ + V(IsConstructCallAndBranch) \ + V(IsObjectAndBranch) \ + V(IsStringAndBranch) \ + V(IsSmiAndBranch) \ + V(IsUndetectableAndBranch) \ + V(Label) \ + V(LazyBailout) \ + V(LoadContextSlot) \ + V(LoadRoot) \ + V(LoadFieldByIndex) \ + V(LoadFunctionPrototype) \ + V(LoadGlobalCell) \ + V(LoadGlobalGeneric) \ + V(LoadKeyed) \ + V(LoadKeyedGeneric) \ + V(LoadNamedField) \ + V(LoadNamedGeneric) \ + V(MapEnumLength) \ + V(MathAbs) \ + V(MathClz32) \ + V(MathExp) \ + V(MathFloor) \ + V(MathFround) \ + V(MathLog) \ + V(MathMinMax) \ + V(MathPowHalf) \ + V(MathRound) \ + V(MathSqrt) \ + V(ModByConstI) \ + V(ModByPowerOf2I) \ + V(ModI) \ + V(MulI) \ + V(NumberTagD) \ + V(NumberTagI) \ + V(NumberTagU) \ + V(NumberUntagD) \ + V(OsrEntry) \ + V(Parameter) \ + V(Power) \ + V(PushArgument) \ + V(RegExpLiteral) \ + V(Return) \ + V(SeqStringGetChar) \ + V(SeqStringSetChar) \ + V(ShiftI) \ + V(SmiTag) \ + V(SmiUntag) \ + V(StackCheck) \ + V(StoreCodeEntry) \ + V(StoreContextSlot) \ + V(StoreFrameContext) \ + V(StoreGlobalCell) \ + V(StoreKeyed) \ + V(StoreKeyedGeneric) \ + V(StoreNamedField) \ + V(StoreNamedGeneric) \ + V(StringAdd) \ + V(StringCharCodeAt) \ + V(StringCharFromCode) \ + V(StringCompareAndBranch) \ + V(SubI) \ + V(TaggedToI) \ + V(ThisFunction) \ + V(ToFastProperties) \ + V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ + V(Typeof) \ + V(TypeofIsAndBranch) \ + V(Uint32ToDouble) \ + V(UnknownOSRValue) \ V(WrapReceiver) @@ -190,7 +171,7 @@ return mnemonic; \ } \ static L##type* cast(LInstruction* instr) { \ - ASSERT(instr->Is##type()); \ + DCHECK(instr->Is##type()); \ return reinterpret_cast<L##type*>(instr); \ } @@ -239,6 +220,9 @@ virtual bool IsControl() const { return false; } + // Try deleting this instruction if possible. + virtual bool TryDelete() { return false; } + void set_environment(LEnvironment* env) { environment_ = env; } LEnvironment* environment() const { return environment_; } bool HasEnvironment() const { return environment_ != NULL; } @@ -256,7 +240,9 @@ // Interface to the register allocator and iterators. bool ClobbersTemps() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters() const { return IsCall(); } + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { + return IsCall(); + } virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { } @@ -279,11 +265,12 @@ void VerifyCall(); #endif + virtual int InputCount() = 0; + virtual LOperand* InputAt(int i) = 0; + private: // Iterator support. friend class InputIterator; - virtual int InputCount() = 0; - virtual LOperand* InputAt(int i) = 0; friend class TempIterator; virtual int TempCount() = 0; @@ -351,7 +338,7 @@ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; } virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; static LGap* cast(LInstruction* instr) { - ASSERT(instr->IsGap()); + DCHECK(instr->IsGap()); return reinterpret_cast<LGap*>(instr); } @@ -432,7 +419,7 @@ class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> { public: - explicit LDummy() { } + LDummy() {} DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") }; @@ -448,6 +435,7 @@ class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> { public: + virtual bool IsControl() const V8_OVERRIDE { return true; } DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") DECLARE_HYDROGEN_ACCESSOR(Deoptimize) }; @@ -732,14 +720,14 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LDivI(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; + LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; temps_[0] = temp; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") @@ -794,6 +782,23 @@ }; +class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; + temps_[0] = temp; + } + + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) +}; + + class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: LMulI(LOperand* left, LOperand* right) { @@ -847,7 +852,7 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> { public: - explicit LMathRound(LOperand* value, LOperand* temp) { + LMathRound(LOperand* value, LOperand* temp) { inputs_[0] = value; temps_[0] = temp; } @@ -860,6 +865,16 @@ }; +class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathFround(LOperand* value) { inputs_[0] = value; } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") +}; + + class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: explicit LMathAbs(LOperand* context, LOperand* value) { @@ -1228,6 +1243,9 @@ LOperand* right() { return inputs_[1]; } Token::Value op() const { return hydrogen()->op(); } + bool IsInteger32() const { + return hydrogen()->representation().IsInteger32(); + } DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") DECLARE_HYDROGEN_ACCESSOR(Bitwise) @@ -1544,7 +1562,7 @@ return parameter_count()->IsConstantOperand(); } LConstantOperand* constant_parameter_count() { - ASSERT(has_constant_parameter_count()); + DCHECK(has_constant_parameter_count()); return LConstantOperand::cast(parameter_count()); } LOperand* parameter_count() { return inputs_[2]; } @@ -1567,11 +1585,13 @@ }; -class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - explicit LLoadNamedGeneric(LOperand* context, LOperand* object) { + explicit LLoadNamedGeneric(LOperand* context, LOperand* object, + LOperand* vector) { inputs_[0] = context; inputs_[1] = object; + temps_[0] = vector; } DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") @@ -1579,6 +1599,8 @@ LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } + Handle<Object> name() const { return hydrogen()->name(); } }; @@ -1605,6 +1627,22 @@ }; +inline static bool ExternalArrayOpRequiresTemp( + Representation key_representation, + ElementsKind elements_kind) { + // Operations that require the key to be divided by two to be converted into + // an index cannot fold the scale operation into a load and need an extra + // temp register to do the work. + return SmiValuesAre31Bits() && key_representation.IsSmi() && + (elements_kind == EXTERNAL_INT8_ELEMENTS || + elements_kind == EXTERNAL_UINT8_ELEMENTS || + elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS || + elements_kind == UINT8_ELEMENTS || + elements_kind == INT8_ELEMENTS || + elements_kind == UINT8_CLAMPED_ELEMENTS); +} + + class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: LLoadKeyed(LOperand* elements, LOperand* key) { @@ -1627,26 +1665,30 @@ LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - uint32_t additional_index() const { return hydrogen()->index_offset(); } + uint32_t base_offset() const { return hydrogen()->base_offset(); } ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } }; -class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> { +class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> { public: - LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) { + LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key, + LOperand* vector) { inputs_[0] = context; inputs_[1] = obj; inputs_[2] = key; + temps_[0] = vector; } DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric) LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } LOperand* key() { return inputs_[2]; } + LOperand* temp_vector() { return temps_[0]; } }; @@ -1657,11 +1699,13 @@ }; -class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object) { + explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object, + LOperand* vector) { inputs_[0] = context; inputs_[1] = global_object; + temps_[0] = vector; } DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") @@ -1669,6 +1713,8 @@ LOperand* context() { return inputs_[0]; } LOperand* global_object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } + Handle<Object> name() const { return hydrogen()->name(); } bool for_typeof() const { return hydrogen()->for_typeof(); } }; @@ -1752,15 +1798,15 @@ }; -class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> { +class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> { public: LStoreCodeEntry(LOperand* function, LOperand* code_object) { inputs_[0] = function; - temps_[0] = code_object; + inputs_[1] = code_object; } LOperand* function() { return inputs_[0]; } - LOperand* code_object() { return temps_[0]; } + LOperand* code_object() { return inputs_[1]; } virtual void PrintDataTo(StringStream* stream); @@ -1831,11 +1877,11 @@ class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> { public: - LCallWithDescriptor(const CallInterfaceDescriptor* descriptor, - ZoneList<LOperand*>& operands, + LCallWithDescriptor(const InterfaceDescriptor* descriptor, + const ZoneList<LOperand*>& operands, Zone* zone) - : inputs_(descriptor->environment_length() + 1, zone) { - ASSERT(descriptor->environment_length() + 1 == operands.length()); + : inputs_(descriptor->GetRegisterParameterCount() + 1, zone) { + DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length()); inputs_.AddAll(operands, zone); } @@ -1944,7 +1990,7 @@ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { return save_doubles() == kDontSaveFPRegs; } @@ -1966,27 +2012,29 @@ }; -class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> { +class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: - explicit LUint32ToDouble(LOperand* value, LOperand* temp) { + explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") }; -class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> { public: - explicit LNumberTagI(LOperand* value) { + LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) { inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; } LOperand* value() { return inputs_[0]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") }; @@ -2130,7 +2178,6 @@ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - Handle<Map> transition() const { return hydrogen()->transition_map(); } Representation representation() const { return hydrogen()->field_representation(); } @@ -2184,7 +2231,7 @@ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } - uint32_t additional_index() const { return hydrogen()->index_offset(); } + uint32_t base_offset() const { return hydrogen()->base_offset(); } }; @@ -2339,7 +2386,7 @@ class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> { public: - explicit LCheckMaps(LOperand* value) { + explicit LCheckMaps(LOperand* value = NULL) { inputs_[0] = value; } @@ -2629,6 +2676,35 @@ }; +class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> { + public: + explicit LStoreFrameContext(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context") +}; + + +class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> { + public: + LAllocateBlockContext(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); } + + DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context") + DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext) +}; + + class LChunkBuilder; class LPlatformChunk V8_FINAL : public LChunk { public: @@ -2661,11 +2737,11 @@ next_block_(NULL), allocator_(allocator) { } + Isolate* isolate() const { return graph_->isolate(); } + // Build the sequence for the graph. LPlatformChunk* Build(); - LInstruction* CheckElideControlInstruction(HControlInstruction* instr); - // Declare methods that deal with the individual node types. #define DECLARE_DO(type) LInstruction* Do##type(H##type* node); HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -2673,6 +2749,7 @@ LInstruction* DoMathFloor(HUnaryMathOperation* instr); LInstruction* DoMathRound(HUnaryMathOperation* instr); + LInstruction* DoMathFround(HUnaryMathOperation* instr); LInstruction* DoMathAbs(HUnaryMathOperation* instr); LInstruction* DoMathLog(HUnaryMathOperation* instr); LInstruction* DoMathExp(HUnaryMathOperation* instr); @@ -2681,12 +2758,13 @@ LInstruction* DoMathClz32(HUnaryMathOperation* instr); LInstruction* DoDivByPowerOf2I(HDiv* instr); LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HBinaryOperation* instr); + LInstruction* DoDivI(HDiv* instr); LInstruction* DoModByPowerOf2I(HMod* instr); LInstruction* DoModByConstI(HMod* instr); LInstruction* DoModI(HMod* instr); LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); + LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); private: enum Status { @@ -2788,6 +2866,7 @@ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); void VisitInstruction(HInstruction* current); + void AddInstruction(LInstruction* instr, HInstruction* current); void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); diff -Nru nodejs-0.11.13/deps/v8/src/x64/macro-assembler-x64.cc nodejs-0.11.15/deps/v8/src/x64/macro-assembler-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/macro-assembler-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/macro-assembler-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,43 +1,20 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "bootstrapper.h" -#include "codegen.h" -#include "cpu-profiler.h" -#include "assembler-x64.h" -#include "macro-assembler-x64.h" -#include "serialize.h" -#include "debug.h" -#include "heap.h" -#include "isolate-inl.h" +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/heap/heap.h" +#include "src/isolate-inl.h" +#include "src/serialize.h" +#include "src/x64/assembler-x64.h" +#include "src/x64/macro-assembler-x64.h" namespace v8 { namespace internal { @@ -54,10 +31,10 @@ } -static const int kInvalidRootRegisterDelta = -1; +static const int64_t kInvalidRootRegisterDelta = -1; -intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) { +int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) { if (predictable_code_size() && (other.address() < reinterpret_cast<Address>(isolate()) || other.address() >= reinterpret_cast<Address>(isolate() + 1))) { @@ -65,17 +42,27 @@ } Address roots_register_value = kRootRegisterBias + reinterpret_cast<Address>(isolate()->heap()->roots_array_start()); - intptr_t delta = other.address() - roots_register_value; + + int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization. + if (kPointerSize == kInt64Size) { + delta = other.address() - roots_register_value; + } else { + // For x32, zero extend the address to 64-bit and calculate the delta. + uint64_t o = static_cast<uint32_t>( + reinterpret_cast<intptr_t>(other.address())); + uint64_t r = static_cast<uint32_t>( + reinterpret_cast<intptr_t>(roots_register_value)); + delta = o - r; + } return delta; } Operand MacroAssembler::ExternalOperand(ExternalReference target, Register scratch) { - if (root_array_available_ && !Serializer::enabled()) { - intptr_t delta = RootRegisterDelta(target); + if (root_array_available_ && !serializer_enabled()) { + int64_t delta = RootRegisterDelta(target); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { - Serializer::TooLateToEnableNow(); return Operand(kRootRegister, static_cast<int32_t>(delta)); } } @@ -85,10 +72,9 @@ void MacroAssembler::Load(Register destination, ExternalReference source) { - if (root_array_available_ && !Serializer::enabled()) { - intptr_t delta = RootRegisterDelta(source); + if (root_array_available_ && !serializer_enabled()) { + int64_t delta = RootRegisterDelta(source); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { - Serializer::TooLateToEnableNow(); movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); return; } @@ -104,10 +90,9 @@ void MacroAssembler::Store(ExternalReference destination, Register source) { - if (root_array_available_ && !Serializer::enabled()) { - intptr_t delta = RootRegisterDelta(destination); + if (root_array_available_ && !serializer_enabled()) { + int64_t delta = RootRegisterDelta(destination); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { - Serializer::TooLateToEnableNow(); movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source); return; } @@ -124,10 +109,9 @@ void MacroAssembler::LoadAddress(Register destination, ExternalReference source) { - if (root_array_available_ && !Serializer::enabled()) { - intptr_t delta = RootRegisterDelta(source); + if (root_array_available_ && !serializer_enabled()) { + int64_t delta = RootRegisterDelta(source); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { - Serializer::TooLateToEnableNow(); leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); return; } @@ -138,13 +122,12 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) { - if (root_array_available_ && !Serializer::enabled()) { + if (root_array_available_ && !serializer_enabled()) { // This calculation depends on the internals of LoadAddress. // It's correctness is ensured by the asserts in the Call // instruction below. - intptr_t delta = RootRegisterDelta(source); + int64_t delta = RootRegisterDelta(source); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { - Serializer::TooLateToEnableNow(); // Operand is leap(scratch, Operand(kRootRegister, delta)); // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7. int size = 4; @@ -161,7 +144,7 @@ void MacroAssembler::PushAddress(ExternalReference source) { int64_t address = reinterpret_cast<int64_t>(source.address()); - if (is_int32(address) && !Serializer::enabled()) { + if (is_int32(address) && !serializer_enabled()) { if (emit_debug_code()) { Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone()); } @@ -174,7 +157,7 @@ void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { - ASSERT(root_array_available_); + DCHECK(root_array_available_); movp(destination, Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias)); } @@ -183,7 +166,7 @@ void MacroAssembler::LoadRootIndexed(Register destination, Register variable_offset, int fixed_offset) { - ASSERT(root_array_available_); + DCHECK(root_array_available_); movp(destination, Operand(kRootRegister, variable_offset, times_pointer_size, @@ -192,20 +175,20 @@ void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) { - ASSERT(root_array_available_); + DCHECK(root_array_available_); movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias), source); } void MacroAssembler::PushRoot(Heap::RootListIndex index) { - ASSERT(root_array_available_); + DCHECK(root_array_available_); Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias)); } void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) { - ASSERT(root_array_available_); + DCHECK(root_array_available_); cmpp(with, Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias)); } @@ -213,8 +196,8 @@ void MacroAssembler::CompareRoot(const Operand& with, Heap::RootListIndex index) { - ASSERT(root_array_available_); - ASSERT(!with.AddressUsesRegister(kScratchRegister)); + DCHECK(root_array_available_); + DCHECK(!with.AddressUsesRegister(kScratchRegister)); LoadRoot(kScratchRegister, index); cmpp(with, kScratchRegister); } @@ -249,16 +232,16 @@ ret(0); bind(&buffer_overflowed); } else { - ASSERT(and_then == kFallThroughAtEnd); + DCHECK(and_then == kFallThroughAtEnd); j(equal, &done, Label::kNear); } StoreBufferOverflowStub store_buffer_overflow = - StoreBufferOverflowStub(save_fp); + StoreBufferOverflowStub(isolate(), save_fp); CallStub(&store_buffer_overflow); if (and_then == kReturnAtEnd) { ret(0); } else { - ASSERT(and_then == kFallThroughAtEnd); + DCHECK(and_then == kFallThroughAtEnd); bind(&done); } } @@ -269,7 +252,7 @@ Condition cc, Label* branch, Label::Distance distance) { - if (Serializer::enabled()) { + if (serializer_enabled()) { // Can't do arithmetic on external references if it might get serialized. // The mask isn't really an address. We load it as an external reference in // case the size of the new space is different between the snapshot maker @@ -285,7 +268,9 @@ cmpp(scratch, kScratchRegister); j(cc, branch, distance); } else { - ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))); + DCHECK(kPointerSize == kInt64Size + ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())) + : kPointerSize == kInt32Size); intptr_t new_space_start = reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart()); Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start), @@ -309,7 +294,8 @@ Register dst, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action, - SmiCheck smi_check) { + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { // First, check if a write barrier is even needed. The tests below // catch stores of Smis. Label done; @@ -321,7 +307,7 @@ // Although the object register is tagged, the offset is relative to the start // of the object, so so offset must be a multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize)); + DCHECK(IsAligned(offset, kPointerSize)); leap(dst, FieldOperand(object, offset)); if (emit_debug_code()) { @@ -332,8 +318,8 @@ bind(&ok); } - RecordWrite( - object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); + RecordWrite(object, dst, value, save_fp, remembered_set_action, + OMIT_SMI_CHECK, pointers_to_here_check_for_value); bind(&done); @@ -346,12 +332,14 @@ } -void MacroAssembler::RecordWriteArray(Register object, - Register value, - Register index, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { +void MacroAssembler::RecordWriteArray( + Register object, + Register value, + Register index, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { // First, check if a write barrier is even needed. The tests below // catch stores of Smis. Label done; @@ -366,8 +354,8 @@ leap(dst, Operand(object, index, times_pointer_size, FixedArray::kHeaderSize - kHeapObjectTag)); - RecordWrite( - object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); + RecordWrite(object, dst, value, save_fp, remembered_set_action, + OMIT_SMI_CHECK, pointers_to_here_check_for_value); bind(&done); @@ -380,34 +368,103 @@ } -void MacroAssembler::RecordWrite(Register object, - Register address, - Register value, - SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - ASSERT(!object.is(value)); - ASSERT(!object.is(address)); - ASSERT(!value.is(address)); +void MacroAssembler::RecordWriteForMap(Register object, + Register map, + Register dst, + SaveFPRegsMode fp_mode) { + DCHECK(!object.is(kScratchRegister)); + DCHECK(!object.is(map)); + DCHECK(!object.is(dst)); + DCHECK(!map.is(dst)); AssertNotSmi(object); - if (remembered_set_action == OMIT_REMEMBERED_SET && - !FLAG_incremental_marking) { + if (emit_debug_code()) { + Label ok; + if (map.is(kScratchRegister)) pushq(map); + CompareMap(map, isolate()->factory()->meta_map()); + if (map.is(kScratchRegister)) popq(map); + j(equal, &ok, Label::kNear); + int3(); + bind(&ok); + } + + if (!FLAG_incremental_marking) { return; } if (emit_debug_code()) { Label ok; - cmpp(value, Operand(address, 0)); + if (map.is(kScratchRegister)) pushq(map); + cmpp(map, FieldOperand(object, HeapObject::kMapOffset)); + if (map.is(kScratchRegister)) popq(map); j(equal, &ok, Label::kNear); int3(); bind(&ok); } + // Compute the address. + leap(dst, FieldOperand(object, HeapObject::kMapOffset)); + + // First, check if a write barrier is even needed. The tests below + // catch stores of smis and stores into the young generation. + Label done; + + // A single check of the map's pages interesting flag suffices, since it is + // only set during incremental collection, and then it's also guaranteed that + // the from object's page's interesting flag is also set. This optimization + // relies on the fact that maps can never be in new space. + CheckPageFlag(map, + map, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + zero, + &done, + Label::kNear); + + RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, + fp_mode); + CallStub(&stub); + + bind(&done); + // Count number of write barriers in generated code. isolate()->counters()->write_barriers_static()->Increment(); IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); + // Clobber clobbered registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + Move(dst, kZapValue, Assembler::RelocInfoNone()); + Move(map, kZapValue, Assembler::RelocInfoNone()); + } +} + + +void MacroAssembler::RecordWrite( + Register object, + Register address, + Register value, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + DCHECK(!object.is(value)); + DCHECK(!object.is(address)); + DCHECK(!value.is(address)); + AssertNotSmi(object); + + if (remembered_set_action == OMIT_REMEMBERED_SET && + !FLAG_incremental_marking) { + return; + } + + if (emit_debug_code()) { + Label ok; + cmpp(value, Operand(address, 0)); + j(equal, &ok, Label::kNear); + int3(); + bind(&ok); + } + // First, check if a write barrier is even needed. The tests below // catch stores of smis and stores into the young generation. Label done; @@ -417,12 +474,14 @@ JumpIfSmi(value, &done); } - CheckPageFlag(value, - value, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, - zero, - &done, - Label::kNear); + if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + zero, + &done, + Label::kNear); + } CheckPageFlag(object, value, // Used as scratch. @@ -431,11 +490,16 @@ &done, Label::kNear); - RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, + fp_mode); CallStub(&stub); bind(&done); + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { @@ -478,10 +542,10 @@ void MacroAssembler::CheckStackAlignment() { - int frame_alignment = OS::ActivationFrameAlignment(); + int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); Label alignment_as_expected; testp(rsp, Immediate(frame_alignment_mask)); j(zero, &alignment_as_expected, Label::kNear); @@ -518,7 +582,6 @@ } #endif - Push(rax); Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)), Assembler::RelocInfoNone()); Push(kScratchRegister); @@ -537,18 +600,18 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { - ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs - Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id); + DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs + Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); } void MacroAssembler::TailCallStub(CodeStub* stub) { - Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET); + Jump(stub->GetCode(), RelocInfo::CODE_TARGET); } void MacroAssembler::StubReturn(int argc) { - ASSERT(argc >= 1 && generating_stub()); + DCHECK(argc >= 1 && generating_stub()); ret((argc - 1) * kPointerSize); } @@ -558,30 +621,16 @@ } -void MacroAssembler::IllegalOperation(int num_arguments) { - if (num_arguments > 0) { - addp(rsp, Immediate(num_arguments * kPointerSize)); - } - LoadRoot(rax, Heap::kUndefinedValueRootIndex); -} - - void MacroAssembler::IndexFromHash(Register hash, Register index) { // The assert checks that the constants for the maximum number of digits // for an array index cached in the hash field and the number of bits // reserved for it does not conflict. - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < (1 << String::kArrayIndexValueBits)); - // We want the smi-tagged index in key. Even if we subsequently go to - // the slow case, converting the key to a smi is always valid. - // key: string key - // hash: key's hash field, including its array index value. - andp(hash, Immediate(String::kArrayIndexValueMask)); - shr(hash, Immediate(String::kHashShift)); - // Here we actually clobber the key which will be used if calling into - // runtime later. However as the new key is the numeric value of a string key - // there is no difference in using either key. - Integer32ToSmi(index, hash); + if (!hash.is(index)) { + movl(index, hash); + } + DecodeFieldToSmi<String::ArrayIndexValueBits>(index); } @@ -591,10 +640,7 @@ // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the // expectation. - if (f->nargs >= 0 && f->nargs != num_arguments) { - IllegalOperation(num_arguments); - return; - } + CHECK(f->nargs < 0 || f->nargs == num_arguments); // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we @@ -602,7 +648,7 @@ // smarter. Set(rax, num_arguments); LoadAddress(rbx, ExternalReference(f, isolate())); - CEntryStub ces(f->result_size, save_doubles); + CEntryStub ces(isolate(), f->result_size, save_doubles); CallStub(&ces); } @@ -612,7 +658,7 @@ Set(rax, num_arguments); LoadAddress(rbx, ext); - CEntryStub stub(1); + CEntryStub stub(isolate(), 1); CallStub(&stub); } @@ -648,7 +694,7 @@ static int Offset(ExternalReference ref0, ExternalReference ref1) { int64_t offset = (ref0.address() - ref1.address()); // Check that fits into int. - ASSERT(static_cast<int>(offset) == offset); + DCHECK(static_cast<int>(offset) == offset); return static_cast<int>(offset); } @@ -660,7 +706,7 @@ void MacroAssembler::CallApiFunctionAndReturn( Register function_address, - Address thunk_address, + ExternalReference thunk_ref, Register thunk_last_arg, int stack_space, Operand return_value_operand, @@ -685,7 +731,7 @@ ExternalReference scheduled_exception_address = ExternalReference::scheduled_exception_address(isolate()); - ASSERT(rdx.is(function_address) || r8.is(function_address)); + DCHECK(rdx.is(function_address) || r8.is(function_address)); // Allocate HandleScope in callee-save registers. Register prev_next_address_reg = r14; Register prev_limit_reg = rbx; @@ -707,16 +753,13 @@ Label profiler_disabled; Label end_profiler_check; - bool* is_profiling_flag = - isolate()->cpu_profiler()->is_profiling_address(); - STATIC_ASSERT(sizeof(*is_profiling_flag) == 1); - Move(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE); + Move(rax, ExternalReference::is_profiling_address(isolate())); cmpb(Operand(rax, 0), Immediate(0)); j(zero, &profiler_disabled); // Third parameter is the address of the actual getter function. Move(thunk_last_arg, function_address); - Move(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE); + Move(rax, thunk_ref); jmp(&end_profiler_check); bind(&profiler_disabled); @@ -800,7 +843,7 @@ bind(&promote_scheduled_exception); { FrameScope frame(this, StackFrame::INTERNAL); - CallRuntime(Runtime::kHiddenPromoteScheduledException, 0); + CallRuntime(Runtime::kPromoteScheduledException, 0); } jmp(&exception_handled); @@ -821,8 +864,8 @@ int result_size) { // Set the entry point and jump to the C entry runtime stub. LoadAddress(rbx, ext); - CEntryStub ces(result_size); - jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET); + CEntryStub ces(isolate(), result_size); + jmp(ces.GetCode(), RelocInfo::CODE_TARGET); } @@ -830,7 +873,7 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a builtin without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Rely on the assertion to check that the number of provided // arguments match the expected number of arguments. Fake a @@ -852,7 +895,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { - ASSERT(!target.is(rdi)); + DCHECK(!target.is(rdi)); // Load the JavaScript builtin function from the builtins object. GetBuiltinFunction(rdi, id); movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); @@ -928,7 +971,7 @@ void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8()) { movsxbq(dst, src); } else if (r.IsUInteger8()) { @@ -946,7 +989,7 @@ void MacroAssembler::Store(const Operand& dst, Register src, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8() || r.IsUInteger8()) { movb(dst, src); } else if (r.IsInteger16() || r.IsUInteger16()) { @@ -954,6 +997,11 @@ } else if (r.IsInteger32()) { movl(dst, src); } else { + if (r.IsHeapObject()) { + AssertNotSmi(src); + } else if (r.IsSmi()) { + AssertSmi(src); + } movp(dst, src); } } @@ -981,7 +1029,6 @@ movp(dst, kScratchRegister); } } else { - ASSERT(kPointerSize == kInt32Size); movp(dst, Immediate(static_cast<int32_t>(x))); } } @@ -997,12 +1044,19 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) { - ASSERT(!dst.is(kScratchRegister)); - ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi. + DCHECK(!dst.is(kScratchRegister)); if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { - Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); - Move(kScratchRegister, Smi::FromInt(jit_cookie())); - xorq(dst, kScratchRegister); + if (SmiValuesAre32Bits()) { + // JIT cookie can be converted to Smi. + Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); + Move(kScratchRegister, Smi::FromInt(jit_cookie())); + xorp(dst, kScratchRegister); + } else { + DCHECK(SmiValuesAre31Bits()); + int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src)); + movp(dst, Immediate(value ^ jit_cookie())); + xorp(dst, Immediate(jit_cookie())); + } } else { Move(dst, src); } @@ -1010,11 +1064,18 @@ void MacroAssembler::SafePush(Smi* src) { - ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi. if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { - Push(Smi::FromInt(src->value() ^ jit_cookie())); - Move(kScratchRegister, Smi::FromInt(jit_cookie())); - xorq(Operand(rsp, 0), kScratchRegister); + if (SmiValuesAre32Bits()) { + // JIT cookie can be converted to Smi. + Push(Smi::FromInt(src->value() ^ jit_cookie())); + Move(kScratchRegister, Smi::FromInt(jit_cookie())); + xorp(Operand(rsp, 0), kScratchRegister); + } else { + DCHECK(SmiValuesAre31Bits()); + int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src)); + Push(Immediate(value ^ jit_cookie())); + xorp(Operand(rsp, 0), Immediate(jit_cookie())); + } } else { Push(src); } @@ -1039,7 +1100,7 @@ if (emit_debug_code()) { Move(dst, Smi::FromInt(kSmiConstantRegisterValue), Assembler::RelocInfoNone()); - cmpq(dst, kSmiConstantRegister); + cmpp(dst, kSmiConstantRegister); Assert(equal, kUninitializedKSmiConstantRegister); } int value = source->value(); @@ -1096,7 +1157,7 @@ if (!dst.is(src)) { movl(dst, src); } - shl(dst, Immediate(kSmiShift)); + shlp(dst, Immediate(kSmiShift)); } @@ -1108,8 +1169,15 @@ Abort(kInteger32ToSmiFieldWritingToNonSmiLocation); bind(&ok); } - ASSERT(kSmiShift % kBitsPerByte == 0); - movl(Operand(dst, kSmiShift / kBitsPerByte), src); + + if (SmiValuesAre32Bits()) { + DCHECK(kSmiShift % kBitsPerByte == 0); + movl(Operand(dst, kSmiShift / kBitsPerByte), src); + } else { + DCHECK(SmiValuesAre31Bits()); + Integer32ToSmi(kScratchRegister, src); + movp(dst, kScratchRegister); + } } @@ -1121,7 +1189,7 @@ } else { leal(dst, Operand(src, constant)); } - shl(dst, Immediate(kSmiShift)); + shlp(dst, Immediate(kSmiShift)); } @@ -1130,12 +1198,24 @@ if (!dst.is(src)) { movp(dst, src); } - shr(dst, Immediate(kSmiShift)); + + if (SmiValuesAre32Bits()) { + shrp(dst, Immediate(kSmiShift)); + } else { + DCHECK(SmiValuesAre31Bits()); + sarl(dst, Immediate(kSmiShift)); + } } void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { - movl(dst, Operand(src, kSmiShift / kBitsPerByte)); + if (SmiValuesAre32Bits()) { + movl(dst, Operand(src, kSmiShift / kBitsPerByte)); + } else { + DCHECK(SmiValuesAre31Bits()); + movl(dst, src); + sarl(dst, Immediate(kSmiShift)); + } } @@ -1144,12 +1224,22 @@ if (!dst.is(src)) { movp(dst, src); } - sar(dst, Immediate(kSmiShift)); + sarp(dst, Immediate(kSmiShift)); + if (kPointerSize == kInt32Size) { + // Sign extend to 64-bit. + movsxlq(dst, dst); + } } void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { - movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); + if (SmiValuesAre32Bits()) { + movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); + } else { + DCHECK(SmiValuesAre31Bits()); + movp(dst, src); + SmiToInteger64(dst, dst); + } } @@ -1173,7 +1263,7 @@ void MacroAssembler::Cmp(Register dst, Smi* src) { - ASSERT(!dst.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); if (src->value() == 0) { testp(dst, dst); } else { @@ -1199,28 +1289,39 @@ void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { AssertSmi(dst); - cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); + if (SmiValuesAre32Bits()) { + cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); + } else { + DCHECK(SmiValuesAre31Bits()); + cmpl(dst, Immediate(src)); + } } void MacroAssembler::Cmp(const Operand& dst, Smi* src) { // The Operand cannot use the smi register. Register smi_reg = GetSmiConstant(src); - ASSERT(!dst.AddressUsesRegister(smi_reg)); + DCHECK(!dst.AddressUsesRegister(smi_reg)); cmpp(dst, smi_reg); } void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) { - cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); + if (SmiValuesAre32Bits()) { + cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); + } else { + DCHECK(SmiValuesAre31Bits()); + SmiToInteger32(kScratchRegister, dst); + cmpl(kScratchRegister, src); + } } void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, Register src, int power) { - ASSERT(power >= 0); - ASSERT(power < 64); + DCHECK(power >= 0); + DCHECK(power < 64); if (power == 0) { SmiToInteger64(dst, src); return; @@ -1229,9 +1330,9 @@ movp(dst, src); } if (power < kSmiShift) { - sar(dst, Immediate(kSmiShift - power)); + sarp(dst, Immediate(kSmiShift - power)); } else if (power > kSmiShift) { - shl(dst, Immediate(power - kSmiShift)); + shlp(dst, Immediate(power - kSmiShift)); } } @@ -1239,9 +1340,9 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, Register src, int power) { - ASSERT((0 <= power) && (power < 32)); + DCHECK((0 <= power) && (power < 32)); if (dst.is(src)) { - shr(dst, Immediate(power + kSmiShift)); + shrp(dst, Immediate(power + kSmiShift)); } else { UNIMPLEMENTED(); // Not used. } @@ -1252,8 +1353,8 @@ Label* on_not_smis, Label::Distance near_jump) { if (dst.is(src1) || dst.is(src2)) { - ASSERT(!src1.is(kScratchRegister)); - ASSERT(!src2.is(kScratchRegister)); + DCHECK(!src1.is(kScratchRegister)); + DCHECK(!src2.is(kScratchRegister)); movp(kScratchRegister, src1); orp(kScratchRegister, src2); JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump); @@ -1284,7 +1385,7 @@ STATIC_ASSERT(kSmiTag == 0); // Test that both bits of the mask 0x8000000000000001 are zero. movp(kScratchRegister, src); - rol(kScratchRegister, Immediate(1)); + rolp(kScratchRegister, Immediate(1)); testb(kScratchRegister, Immediate(3)); return zero; } @@ -1295,8 +1396,15 @@ return CheckSmi(first); } STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); - leal(kScratchRegister, Operand(first, second, times_1, 0)); - testb(kScratchRegister, Immediate(0x03)); + if (SmiValuesAre32Bits()) { + leal(kScratchRegister, Operand(first, second, times_1, 0)); + testb(kScratchRegister, Immediate(0x03)); + } else { + DCHECK(SmiValuesAre31Bits()); + movl(kScratchRegister, first); + orl(kScratchRegister, second); + testb(kScratchRegister, Immediate(kSmiTagMask)); + } return zero; } @@ -1308,7 +1416,7 @@ } movp(kScratchRegister, first); orp(kScratchRegister, second); - rol(kScratchRegister, Immediate(1)); + rolp(kScratchRegister, Immediate(1)); testl(kScratchRegister, Immediate(3)); return zero; } @@ -1334,7 +1442,7 @@ Condition MacroAssembler::CheckIsMinSmi(Register src) { - ASSERT(!src.is(kScratchRegister)); + DCHECK(!src.is(kScratchRegister)); // If we overflow by subtracting one, it's the minimal smi value. cmpp(src, kSmiConstantRegister); return overflow; @@ -1342,16 +1450,28 @@ Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { - // A 32-bit integer value can always be converted to a smi. - return always; + if (SmiValuesAre32Bits()) { + // A 32-bit integer value can always be converted to a smi. + return always; + } else { + DCHECK(SmiValuesAre31Bits()); + cmpl(src, Immediate(0xc0000000)); + return positive; + } } Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { - // An unsigned 32-bit integer value is valid as long as the high bit - // is not set. - testl(src, src); - return positive; + if (SmiValuesAre32Bits()) { + // An unsigned 32-bit integer value is valid as long as the high bit + // is not set. + testl(src, src); + return positive; + } else { + DCHECK(SmiValuesAre31Bits()); + testl(src, Immediate(0xc0000000)); + return zero; + } } @@ -1376,6 +1496,14 @@ } +void MacroAssembler::JumpIfValidSmiValue(Register src, + Label* on_valid, + Label::Distance near_jump) { + Condition is_valid = CheckInteger32ValidSmiValue(src); + j(is_valid, on_valid, near_jump); +} + + void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid, Label::Distance near_jump) { @@ -1384,6 +1512,14 @@ } +void MacroAssembler::JumpIfUIntValidSmiValue(Register src, + Label* on_valid, + Label::Distance near_jump) { + Condition is_valid = CheckUInteger32ValidSmiValue(src); + j(is_valid, on_valid, near_jump); +} + + void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid, Label::Distance near_jump) { @@ -1450,7 +1586,7 @@ } return; } else if (dst.is(src)) { - ASSERT(!dst.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); switch (constant->value()) { case 1: addp(dst, kSmiConstantRegister); @@ -1494,7 +1630,13 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { if (constant->value() != 0) { - addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); + if (SmiValuesAre32Bits()) { + addl(Operand(dst, kSmiShift / kBitsPerByte), + Immediate(constant->value())); + } else { + DCHECK(SmiValuesAre31Bits()); + addp(dst, Immediate(constant)); + } } } @@ -1510,12 +1652,12 @@ movp(dst, src); } } else if (dst.is(src)) { - ASSERT(!dst.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); LoadSmiConstant(kScratchRegister, constant); addp(dst, kScratchRegister); if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { j(no_overflow, bailout_label, near_jump); - ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); + DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER)); subp(dst, kScratchRegister); } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { @@ -1532,8 +1674,8 @@ CHECK(mode.IsEmpty()); } } else { - ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); - ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); + DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER)); + DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW)); LoadSmiConstant(dst, constant); addp(dst, src); j(overflow, bailout_label, near_jump); @@ -1547,7 +1689,7 @@ movp(dst, src); } } else if (dst.is(src)) { - ASSERT(!dst.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); Register constant_reg = GetSmiConstant(constant); subp(dst, constant_reg); } else { @@ -1576,12 +1718,12 @@ movp(dst, src); } } else if (dst.is(src)) { - ASSERT(!dst.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); LoadSmiConstant(kScratchRegister, constant); subp(dst, kScratchRegister); if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { j(no_overflow, bailout_label, near_jump); - ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); + DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER)); addp(dst, kScratchRegister); } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { @@ -1598,10 +1740,10 @@ CHECK(mode.IsEmpty()); } } else { - ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); - ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); + DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER)); + DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW)); if (constant->value() == Smi::kMinValue) { - ASSERT(!dst.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); movp(dst, src); LoadSmiConstant(kScratchRegister, constant); subp(dst, kScratchRegister); @@ -1621,7 +1763,7 @@ Label* on_smi_result, Label::Distance near_jump) { if (dst.is(src)) { - ASSERT(!dst.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); movp(kScratchRegister, src); negp(dst); // Low 32 bits are retained as zero by negation. // Test if result is zero or Smi::kMinValue. @@ -1666,8 +1808,8 @@ Register src2, Label* on_not_smi_result, Label::Distance near_jump) { - ASSERT_NOT_NULL(on_not_smi_result); - ASSERT(!dst.is(src2)); + DCHECK_NOT_NULL(on_not_smi_result); + DCHECK(!dst.is(src2)); SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump); } @@ -1677,8 +1819,8 @@ const Operand& src2, Label* on_not_smi_result, Label::Distance near_jump) { - ASSERT_NOT_NULL(on_not_smi_result); - ASSERT(!src2.AddressUsesRegister(dst)); + DCHECK_NOT_NULL(on_not_smi_result); + DCHECK(!src2.AddressUsesRegister(dst)); SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump); } @@ -1730,8 +1872,8 @@ Register src2, Label* on_not_smi_result, Label::Distance near_jump) { - ASSERT_NOT_NULL(on_not_smi_result); - ASSERT(!dst.is(src2)); + DCHECK_NOT_NULL(on_not_smi_result); + DCHECK(!dst.is(src2)); SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump); } @@ -1741,8 +1883,8 @@ const Operand& src2, Label* on_not_smi_result, Label::Distance near_jump) { - ASSERT_NOT_NULL(on_not_smi_result); - ASSERT(!src2.AddressUsesRegister(dst)); + DCHECK_NOT_NULL(on_not_smi_result); + DCHECK(!src2.AddressUsesRegister(dst)); SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump); } @@ -1763,7 +1905,7 @@ void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { - ASSERT(!dst.is(src2)); + DCHECK(!dst.is(src2)); SmiSubNoOverflowHelper<Register>(this, dst, src1, src2); } @@ -1780,10 +1922,10 @@ Register src2, Label* on_not_smi_result, Label::Distance near_jump) { - ASSERT(!dst.is(src2)); - ASSERT(!dst.is(kScratchRegister)); - ASSERT(!src1.is(kScratchRegister)); - ASSERT(!src2.is(kScratchRegister)); + DCHECK(!dst.is(src2)); + DCHECK(!dst.is(kScratchRegister)); + DCHECK(!src1.is(kScratchRegister)); + DCHECK(!src2.is(kScratchRegister)); if (dst.is(src1)) { Label failure, zero_correct_result; @@ -1835,12 +1977,12 @@ Register src2, Label* on_not_smi_result, Label::Distance near_jump) { - ASSERT(!src1.is(kScratchRegister)); - ASSERT(!src2.is(kScratchRegister)); - ASSERT(!dst.is(kScratchRegister)); - ASSERT(!src2.is(rax)); - ASSERT(!src2.is(rdx)); - ASSERT(!src1.is(rdx)); + DCHECK(!src1.is(kScratchRegister)); + DCHECK(!src2.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); + DCHECK(!src2.is(rax)); + DCHECK(!src2.is(rdx)); + DCHECK(!src1.is(rdx)); // Check for 0 divisor (result is +/-Infinity). testp(src2, src2); @@ -1858,7 +2000,7 @@ // We overshoot a little and go to slow case if we divide min-value // by any negative value, not just -1. Label safe_div; - testl(rax, Immediate(0x7fffffff)); + testl(rax, Immediate(~Smi::kMinValue)); j(not_zero, &safe_div, Label::kNear); testp(src2, src2); if (src1.is(rax)) { @@ -1898,13 +2040,13 @@ Register src2, Label* on_not_smi_result, Label::Distance near_jump) { - ASSERT(!dst.is(kScratchRegister)); - ASSERT(!src1.is(kScratchRegister)); - ASSERT(!src2.is(kScratchRegister)); - ASSERT(!src2.is(rax)); - ASSERT(!src2.is(rdx)); - ASSERT(!src1.is(rdx)); - ASSERT(!src1.is(src2)); + DCHECK(!dst.is(kScratchRegister)); + DCHECK(!src1.is(kScratchRegister)); + DCHECK(!src2.is(kScratchRegister)); + DCHECK(!src2.is(rax)); + DCHECK(!src2.is(rdx)); + DCHECK(!src1.is(rdx)); + DCHECK(!src1.is(src2)); testp(src2, src2); j(zero, on_not_smi_result, near_jump); @@ -1950,10 +2092,16 @@ void MacroAssembler::SmiNot(Register dst, Register src) { - ASSERT(!dst.is(kScratchRegister)); - ASSERT(!src.is(kScratchRegister)); - // Set tag and padding bits before negating, so that they are zero afterwards. - movl(kScratchRegister, Immediate(~0)); + DCHECK(!dst.is(kScratchRegister)); + DCHECK(!src.is(kScratchRegister)); + if (SmiValuesAre32Bits()) { + // Set tag and padding bits before negating, so that they are zero + // afterwards. + movl(kScratchRegister, Immediate(~0)); + } else { + DCHECK(SmiValuesAre31Bits()); + movl(kScratchRegister, Immediate(1)); + } if (dst.is(src)) { xorp(dst, kScratchRegister); } else { @@ -1964,7 +2112,7 @@ void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { - ASSERT(!dst.is(src2)); + DCHECK(!dst.is(src2)); if (!dst.is(src1)) { movp(dst, src1); } @@ -1976,7 +2124,7 @@ if (constant->value() == 0) { Set(dst, 0); } else if (dst.is(src)) { - ASSERT(!dst.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); Register constant_reg = GetSmiConstant(constant); andp(dst, constant_reg); } else { @@ -1988,7 +2136,7 @@ void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { if (!dst.is(src1)) { - ASSERT(!src1.is(src2)); + DCHECK(!src1.is(src2)); movp(dst, src1); } orp(dst, src2); @@ -1997,7 +2145,7 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { if (dst.is(src)) { - ASSERT(!dst.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); Register constant_reg = GetSmiConstant(constant); orp(dst, constant_reg); } else { @@ -2009,7 +2157,7 @@ void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { if (!dst.is(src1)) { - ASSERT(!src1.is(src2)); + DCHECK(!src1.is(src2)); movp(dst, src1); } xorp(dst, src2); @@ -2018,7 +2166,7 @@ void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { if (dst.is(src)) { - ASSERT(!dst.is(kScratchRegister)); + DCHECK(!dst.is(kScratchRegister)); Register constant_reg = GetSmiConstant(constant); xorp(dst, constant_reg); } else { @@ -2031,11 +2179,11 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, Register src, int shift_value) { - ASSERT(is_uint5(shift_value)); + DCHECK(is_uint5(shift_value)); if (shift_value > 0) { if (dst.is(src)) { - sar(dst, Immediate(shift_value + kSmiShift)); - shl(dst, Immediate(kSmiShift)); + sarp(dst, Immediate(shift_value + kSmiShift)); + shlp(dst, Immediate(kSmiShift)); } else { UNIMPLEMENTED(); // Not used. } @@ -2045,12 +2193,27 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst, Register src, - int shift_value) { - if (!dst.is(src)) { - movp(dst, src); - } - if (shift_value > 0) { - shl(dst, Immediate(shift_value)); + int shift_value, + Label* on_not_smi_result, + Label::Distance near_jump) { + if (SmiValuesAre32Bits()) { + if (!dst.is(src)) { + movp(dst, src); + } + if (shift_value > 0) { + // Shift amount specified by lower 5 bits, not six as the shl opcode. + shlq(dst, Immediate(shift_value & 0x1f)); + } + } else { + DCHECK(SmiValuesAre31Bits()); + if (dst.is(src)) { + UNIMPLEMENTED(); // Not used. + } else { + SmiToInteger32(dst, src); + shll(dst, Immediate(shift_value)); + JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump); + Integer32ToSmi(dst, dst); + } } } @@ -2062,29 +2225,73 @@ if (dst.is(src)) { UNIMPLEMENTED(); // Not used. } else { - movp(dst, src); if (shift_value == 0) { - testp(dst, dst); + testp(src, src); j(negative, on_not_smi_result, near_jump); } - shr(dst, Immediate(shift_value + kSmiShift)); - shl(dst, Immediate(kSmiShift)); + if (SmiValuesAre32Bits()) { + movp(dst, src); + shrp(dst, Immediate(shift_value + kSmiShift)); + shlp(dst, Immediate(kSmiShift)); + } else { + DCHECK(SmiValuesAre31Bits()); + SmiToInteger32(dst, src); + shrp(dst, Immediate(shift_value)); + JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump); + Integer32ToSmi(dst, dst); + } } } void MacroAssembler::SmiShiftLeft(Register dst, Register src1, - Register src2) { - ASSERT(!dst.is(rcx)); - // Untag shift amount. - if (!dst.is(src1)) { - movq(dst, src1); + Register src2, + Label* on_not_smi_result, + Label::Distance near_jump) { + if (SmiValuesAre32Bits()) { + DCHECK(!dst.is(rcx)); + if (!dst.is(src1)) { + movp(dst, src1); + } + // Untag shift amount. + SmiToInteger32(rcx, src2); + // Shift amount specified by lower 5 bits, not six as the shl opcode. + andp(rcx, Immediate(0x1f)); + shlq_cl(dst); + } else { + DCHECK(SmiValuesAre31Bits()); + DCHECK(!dst.is(kScratchRegister)); + DCHECK(!src1.is(kScratchRegister)); + DCHECK(!src2.is(kScratchRegister)); + DCHECK(!dst.is(src2)); + DCHECK(!dst.is(rcx)); + + if (src1.is(rcx) || src2.is(rcx)) { + movq(kScratchRegister, rcx); + } + if (dst.is(src1)) { + UNIMPLEMENTED(); // Not used. + } else { + Label valid_result; + SmiToInteger32(dst, src1); + SmiToInteger32(rcx, src2); + shll_cl(dst); + JumpIfValidSmiValue(dst, &valid_result, Label::kNear); + // As src1 or src2 could not be dst, we do not need to restore them for + // clobbering dst. + if (src1.is(rcx) || src2.is(rcx)) { + if (src1.is(rcx)) { + movq(src1, kScratchRegister); + } else { + movq(src2, kScratchRegister); + } + } + jmp(on_not_smi_result, near_jump); + bind(&valid_result); + Integer32ToSmi(dst, dst); + } } - SmiToInteger32(rcx, src2); - // Shift amount specified by lower 5 bits, not six as the shl opcode. - andq(rcx, Immediate(0x1f)); - shl_cl(dst); } @@ -2093,36 +2300,34 @@ Register src2, Label* on_not_smi_result, Label::Distance near_jump) { - ASSERT(!dst.is(kScratchRegister)); - ASSERT(!src1.is(kScratchRegister)); - ASSERT(!src2.is(kScratchRegister)); - ASSERT(!dst.is(rcx)); - // dst and src1 can be the same, because the one case that bails out - // is a shift by 0, which leaves dst, and therefore src1, unchanged. + DCHECK(!dst.is(kScratchRegister)); + DCHECK(!src1.is(kScratchRegister)); + DCHECK(!src2.is(kScratchRegister)); + DCHECK(!dst.is(src2)); + DCHECK(!dst.is(rcx)); if (src1.is(rcx) || src2.is(rcx)) { movq(kScratchRegister, rcx); } - if (!dst.is(src1)) { - movq(dst, src1); - } - SmiToInteger32(rcx, src2); - orl(rcx, Immediate(kSmiShift)); - shr_cl(dst); // Shift is rcx modulo 0x1f + 32. - shl(dst, Immediate(kSmiShift)); - testq(dst, dst); - if (src1.is(rcx) || src2.is(rcx)) { - Label positive_result; - j(positive, &positive_result, Label::kNear); - if (src1.is(rcx)) { - movq(src1, kScratchRegister); - } else { - movq(src2, kScratchRegister); - } - jmp(on_not_smi_result, near_jump); - bind(&positive_result); + if (dst.is(src1)) { + UNIMPLEMENTED(); // Not used. } else { - // src2 was zero and src1 negative. - j(negative, on_not_smi_result, near_jump); + Label valid_result; + SmiToInteger32(dst, src1); + SmiToInteger32(rcx, src2); + shrl_cl(dst); + JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear); + // As src1 or src2 could not be dst, we do not need to restore them for + // clobbering dst. + if (src1.is(rcx) || src2.is(rcx)) { + if (src1.is(rcx)) { + movq(src1, kScratchRegister); + } else { + movq(src2, kScratchRegister); + } + } + jmp(on_not_smi_result, near_jump); + bind(&valid_result); + Integer32ToSmi(dst, dst); } } @@ -2130,27 +2335,18 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst, Register src1, Register src2) { - ASSERT(!dst.is(kScratchRegister)); - ASSERT(!src1.is(kScratchRegister)); - ASSERT(!src2.is(kScratchRegister)); - ASSERT(!dst.is(rcx)); - if (src1.is(rcx)) { - movp(kScratchRegister, src1); - } else if (src2.is(rcx)) { - movp(kScratchRegister, src2); - } + DCHECK(!dst.is(kScratchRegister)); + DCHECK(!src1.is(kScratchRegister)); + DCHECK(!src2.is(kScratchRegister)); + DCHECK(!dst.is(rcx)); + + SmiToInteger32(rcx, src2); if (!dst.is(src1)) { movp(dst, src1); } - SmiToInteger32(rcx, src2); - orl(rcx, Immediate(kSmiShift)); - sar_cl(dst); // Shift 32 + original rcx & 0x1f. - shl(dst, Immediate(kSmiShift)); - if (src1.is(rcx)) { - movp(src1, kScratchRegister); - } else if (src2.is(rcx)) { - movp(src2, kScratchRegister); - } + SmiToInteger32(dst, dst); + sarl_cl(dst); + Integer32ToSmi(dst, dst); } @@ -2159,18 +2355,18 @@ Register src2, Label* on_not_smis, Label::Distance near_jump) { - ASSERT(!dst.is(kScratchRegister)); - ASSERT(!src1.is(kScratchRegister)); - ASSERT(!src2.is(kScratchRegister)); - ASSERT(!dst.is(src1)); - ASSERT(!dst.is(src2)); + DCHECK(!dst.is(kScratchRegister)); + DCHECK(!src1.is(kScratchRegister)); + DCHECK(!src2.is(kScratchRegister)); + DCHECK(!dst.is(src1)); + DCHECK(!dst.is(src2)); // Both operands must not be smis. #ifdef DEBUG Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2)); Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi); #endif STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(0, Smi::FromInt(0)); + DCHECK_EQ(0, Smi::FromInt(0)); movl(kScratchRegister, Immediate(kSmiTagMask)); andp(kScratchRegister, src1); testl(kScratchRegister, src2); @@ -2178,7 +2374,7 @@ j(not_zero, on_not_smis, near_jump); // Exactly one operand is a smi. - ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); + DCHECK_EQ(1, static_cast<int>(kSmiTagMask)); // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. subp(kScratchRegister, Immediate(1)); // If src1 is a smi, then scratch register all 1s, else it is all 0s. @@ -2194,41 +2390,78 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) { - ASSERT(is_uint6(shift)); - // There is a possible optimization if shift is in the range 60-63, but that - // will (and must) never happen. - if (!dst.is(src)) { - movq(dst, src); - } - if (shift < kSmiShift) { - sar(dst, Immediate(kSmiShift - shift)); + if (SmiValuesAre32Bits()) { + DCHECK(is_uint6(shift)); + // There is a possible optimization if shift is in the range 60-63, but that + // will (and must) never happen. + if (!dst.is(src)) { + movp(dst, src); + } + if (shift < kSmiShift) { + sarp(dst, Immediate(kSmiShift - shift)); + } else { + shlp(dst, Immediate(shift - kSmiShift)); + } + return SmiIndex(dst, times_1); } else { - shl(dst, Immediate(shift - kSmiShift)); + DCHECK(SmiValuesAre31Bits()); + DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1)); + if (!dst.is(src)) { + movp(dst, src); + } + // We have to sign extend the index register to 64-bit as the SMI might + // be negative. + movsxlq(dst, dst); + if (shift == times_1) { + sarq(dst, Immediate(kSmiShift)); + return SmiIndex(dst, times_1); + } + return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); } - return SmiIndex(dst, times_1); } + SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, Register src, int shift) { - // Register src holds a positive smi. - ASSERT(is_uint6(shift)); - if (!dst.is(src)) { - movq(dst, src); - } - negq(dst); - if (shift < kSmiShift) { - sar(dst, Immediate(kSmiShift - shift)); + if (SmiValuesAre32Bits()) { + // Register src holds a positive smi. + DCHECK(is_uint6(shift)); + if (!dst.is(src)) { + movp(dst, src); + } + negp(dst); + if (shift < kSmiShift) { + sarp(dst, Immediate(kSmiShift - shift)); + } else { + shlp(dst, Immediate(shift - kSmiShift)); + } + return SmiIndex(dst, times_1); } else { - shl(dst, Immediate(shift - kSmiShift)); + DCHECK(SmiValuesAre31Bits()); + DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1)); + if (!dst.is(src)) { + movp(dst, src); + } + negq(dst); + if (shift == times_1) { + sarq(dst, Immediate(kSmiShift)); + return SmiIndex(dst, times_1); + } + return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); } - return SmiIndex(dst, times_1); } void MacroAssembler::AddSmiField(Register dst, const Operand& src) { - ASSERT_EQ(0, kSmiShift % kBitsPerByte); - addl(dst, Operand(src, kSmiShift / kBitsPerByte)); + if (SmiValuesAre32Bits()) { + DCHECK_EQ(0, kSmiShift % kBitsPerByte); + addl(dst, Operand(src, kSmiShift / kBitsPerByte)); + } else { + DCHECK(SmiValuesAre31Bits()); + SmiToInteger32(kScratchRegister, src); + addl(dst, kScratchRegister); + } } @@ -2243,32 +2476,39 @@ } -void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) { +void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) { + DCHECK(!src.is(scratch)); movp(scratch, src); // High bits. - shr(src, Immediate(64 - kSmiShift)); - shl(src, Immediate(kSmiShift)); + shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift)); + shlp(src, Immediate(kSmiShift)); Push(src); // Low bits. - shl(scratch, Immediate(kSmiShift)); + shlp(scratch, Immediate(kSmiShift)); Push(scratch); } -void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) { +void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) { + DCHECK(!dst.is(scratch)); Pop(scratch); // Low bits. - shr(scratch, Immediate(kSmiShift)); + shrp(scratch, Immediate(kSmiShift)); Pop(dst); - shr(dst, Immediate(kSmiShift)); + shrp(dst, Immediate(kSmiShift)); // High bits. - shl(dst, Immediate(64 - kSmiShift)); + shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift)); orp(dst, scratch); } void MacroAssembler::Test(const Operand& src, Smi* source) { - testl(Operand(src, kIntSize), Immediate(source->value())); + if (SmiValuesAre32Bits()) { + testl(Operand(src, kIntSize), Immediate(source->value())); + } else { + DCHECK(SmiValuesAre31Bits()); + testl(src, Immediate(source)); + } } @@ -2315,7 +2555,7 @@ // but times_twice_pointer_size (multiplication by 16) scale factor // is not supported by addrmode on x64 platform. // So we have to premultiply entry index before lookup. - shl(scratch, Immediate(kPointerSizeLog2 + 1)); + shlp(scratch, Immediate(kPointerSizeLog2 + 1)); Register index = scratch; Register probe = mask; @@ -2338,7 +2578,7 @@ // but times_twice_pointer_size (multiplication by 16) scale factor // is not supported by addrmode on x64 platform. // So we have to premultiply entry index before lookup. - shl(scratch, Immediate(kPointerSizeLog2 + 1)); + shlp(scratch, Immediate(kPointerSizeLog2 + 1)); // Check if the entry is the smi we are looking for. cmpp(object, @@ -2388,7 +2628,7 @@ movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset)); // Check that both are flat ASCII strings. - ASSERT(kNotStringTag != 0); + DCHECK(kNotStringTag != 0); const int kFlatAsciiStringMask = kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; const int kFlatAsciiStringTag = @@ -2397,7 +2637,7 @@ andl(scratch1, Immediate(kFlatAsciiStringMask)); andl(scratch2, Immediate(kFlatAsciiStringMask)); // Interleave the bits to check both scratch1 and scratch2 in one test. - ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); + DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); leap(scratch1, Operand(scratch1, scratch2, times_8, 0)); cmpl(scratch1, Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); @@ -2435,7 +2675,7 @@ movp(scratch2, second_object_instance_type); // Check that both are flat ASCII strings. - ASSERT(kNotStringTag != 0); + DCHECK(kNotStringTag != 0); const int kFlatAsciiStringMask = kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; const int kFlatAsciiStringTag = @@ -2444,7 +2684,7 @@ andl(scratch1, Immediate(kFlatAsciiStringMask)); andl(scratch2, Immediate(kFlatAsciiStringMask)); // Interleave the bits to check both scratch1 and scratch2 in one test. - ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); + DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); leap(scratch1, Operand(scratch1, scratch2, times_8, 0)); cmpl(scratch1, Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); @@ -2547,7 +2787,7 @@ void MacroAssembler::MoveHeapObject(Register result, Handle<Object> object) { AllowDeferredHandleDereference using_raw_address; - ASSERT(object->IsHeapObject()); + DCHECK(object->IsHeapObject()); if (isolate()->heap()->InNewSpace(*object)) { Handle<Cell> cell = isolate()->factory()->NewCell(object); Move(result, cell, RelocInfo::CELL); @@ -2576,13 +2816,26 @@ } +void MacroAssembler::DropUnderReturnAddress(int stack_elements, + Register scratch) { + DCHECK(stack_elements > 0); + if (kPointerSize == kInt64Size && stack_elements == 1) { + popq(MemOperand(rsp, 0)); + return; + } + + PopReturnAddressTo(scratch); + Drop(stack_elements); + PushReturnAddressFrom(scratch); +} + + void MacroAssembler::Push(Register src) { if (kPointerSize == kInt64Size) { pushq(src); } else { - ASSERT(kPointerSize == kInt32Size); // x32 uses 64-bit push for rbp in the prologue. - ASSERT(src.code() != rbp.code()); + DCHECK(src.code() != rbp.code()); leal(rsp, Operand(rsp, -4)); movp(Operand(rsp, 0), src); } @@ -2593,7 +2846,6 @@ if (kPointerSize == kInt64Size) { pushq(src); } else { - ASSERT(kPointerSize == kInt32Size); movp(kScratchRegister, src); leal(rsp, Operand(rsp, -4)); movp(Operand(rsp, 0), kScratchRegister); @@ -2601,11 +2853,20 @@ } +void MacroAssembler::PushQuad(const Operand& src) { + if (kPointerSize == kInt64Size) { + pushq(src); + } else { + movp(kScratchRegister, src); + pushq(kScratchRegister); + } +} + + void MacroAssembler::Push(Immediate value) { if (kPointerSize == kInt64Size) { pushq(value); } else { - ASSERT(kPointerSize == kInt32Size); leal(rsp, Operand(rsp, -4)); movp(Operand(rsp, 0), value); } @@ -2616,7 +2877,6 @@ if (kPointerSize == kInt64Size) { pushq_imm32(imm32); } else { - ASSERT(kPointerSize == kInt32Size); leal(rsp, Operand(rsp, -4)); movp(Operand(rsp, 0), Immediate(imm32)); } @@ -2627,9 +2887,8 @@ if (kPointerSize == kInt64Size) { popq(dst); } else { - ASSERT(kPointerSize == kInt32Size); // x32 uses 64-bit pop for rbp in the epilogue. - ASSERT(dst.code() != rbp.code()); + DCHECK(dst.code() != rbp.code()); movp(dst, Operand(rsp, 0)); leal(rsp, Operand(rsp, 4)); } @@ -2640,7 +2899,6 @@ if (kPointerSize == kInt64Size) { popq(dst); } else { - ASSERT(kPointerSize == kInt32Size); Register scratch = dst.AddressUsesRegister(kScratchRegister) ? kSmiConstantRegister : kScratchRegister; movp(scratch, Operand(rsp, 0)); @@ -2656,10 +2914,44 @@ } -void MacroAssembler::TestBit(const Operand& src, int bits) { +void MacroAssembler::PopQuad(const Operand& dst) { + if (kPointerSize == kInt64Size) { + popq(dst); + } else { + popq(kScratchRegister); + movp(dst, kScratchRegister); + } +} + + +void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst, + Register base, + int offset) { + DCHECK(offset > SharedFunctionInfo::kLengthOffset && + offset <= SharedFunctionInfo::kSize && + (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1)); + if (kPointerSize == kInt64Size) { + movsxlq(dst, FieldOperand(base, offset)); + } else { + movp(dst, FieldOperand(base, offset)); + SmiToInteger32(dst, dst); + } +} + + +void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base, + int offset, + int bits) { + DCHECK(offset > SharedFunctionInfo::kLengthOffset && + offset <= SharedFunctionInfo::kSize && + (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1)); + if (kPointerSize == kInt32Size) { + // On x32, this field is represented by SMI. + bits += kSmiShift; + } int byte_offset = bits / kBitsPerByte; int bit_in_byte = bits & (kBitsPerByte - 1); - testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte)); + testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte)); } @@ -2673,7 +2965,6 @@ if (kPointerSize == kInt64Size) { jmp(op); } else { - ASSERT(kPointerSize == kInt32Size); movp(kScratchRegister, op); jmp(kScratchRegister); } @@ -2715,7 +3006,6 @@ if (kPointerSize == kInt64Size) { call(op); } else { - ASSERT(kPointerSize == kInt32Size); movp(kScratchRegister, op); call(kScratchRegister); } @@ -2740,7 +3030,7 @@ #ifdef DEBUG int end_position = pc_offset() + CallSize(code_object); #endif - ASSERT(RelocInfo::IsCodeTarget(rmode) || + DCHECK(RelocInfo::IsCodeTarget(rmode) || rmode == RelocInfo::CODE_AGE_SEQUENCE); call(code_object, rmode, ast_id); #ifdef DEBUG @@ -2893,7 +3183,7 @@ // a fixed array of (smi-tagged) code offsets. // rax = exception, rdi = code object, rdx = state. movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset)); - shr(rdx, Immediate(StackHandler::kKindWidth)); + shrp(rdx, Immediate(StackHandler::kKindWidth)); movp(rdx, FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize)); SmiToInteger64(rdx, rdx); @@ -3175,8 +3465,7 @@ void MacroAssembler::LoadUint32(XMMRegister dst, - Register src, - XMMRegister scratch) { + Register src) { if (FLAG_debug_code) { cmpq(src, Immediate(0xffffffff)); Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared); @@ -3188,8 +3477,8 @@ void MacroAssembler::SlowTruncateToI(Register result_reg, Register input_reg, int offset) { - DoubleToIStub stub(input_reg, result_reg, offset, true); - call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); + DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true); + call(stub.GetCode(), RelocInfo::CODE_TARGET); } @@ -3212,6 +3501,8 @@ } bind(&done); + // Keep our invariant that the upper 32 bits are zero. + movl(result_reg, result_reg); } @@ -3228,6 +3519,8 @@ addp(rsp, Immediate(kDoubleSize)); bind(&done); + // Keep our invariant that the upper 32 bits are zero. + movl(result_reg, result_reg); } @@ -3266,7 +3559,7 @@ Label* lost_precision, Label::Distance dst) { Label done; - ASSERT(!temp.is(xmm0)); + DCHECK(!temp.is(xmm0)); // Heap number map check. CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), @@ -3292,39 +3585,6 @@ } -void MacroAssembler::Throw(BailoutReason reason) { -#ifdef DEBUG - const char* msg = GetBailoutReason(reason); - if (msg != NULL) { - RecordComment("Throw message: "); - RecordComment(msg); - } -#endif - - Push(rax); - Push(Smi::FromInt(reason)); - if (!has_frame_) { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(this, StackFrame::NONE); - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } else { - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } - // Control will not return here. - int3(); -} - - -void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) { - Label L; - j(NegateCondition(cc), &L); - Throw(reason); - // will not return here - bind(&L); -} - - void MacroAssembler::LoadInstanceDescriptors(Register map, Register descriptors) { movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset)); @@ -3332,16 +3592,16 @@ void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { - movp(dst, FieldOperand(map, Map::kBitField3Offset)); + movl(dst, FieldOperand(map, Map::kBitField3Offset)); DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); } void MacroAssembler::EnumLength(Register dst, Register map) { STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); - movp(dst, FieldOperand(map, Map::kBitField3Offset)); - Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask)); - andp(dst, kScratchRegister); + movl(dst, FieldOperand(map, Map::kBitField3Offset)); + andl(dst, Immediate(Map::EnumLengthBits::kMask)); + Integer32ToSmi(dst, dst); } @@ -3400,7 +3660,7 @@ void MacroAssembler::AssertZeroExtended(Register int32_register) { if (emit_debug_code()) { - ASSERT(!int32_register.is(kScratchRegister)); + DCHECK(!int32_register.is(kScratchRegister)); movq(kScratchRegister, V8_INT64_C(0x0000000100000000)); cmpq(kScratchRegister, int32_register); Check(above_equal, k32BitValueInRegisterIsNotZeroExtended); @@ -3451,7 +3711,7 @@ Heap::RootListIndex root_value_index, BailoutReason reason) { if (emit_debug_code()) { - ASSERT(!src.is(kScratchRegister)); + DCHECK(!src.is(kScratchRegister)); LoadRoot(kScratchRegister, root_value_index); cmpp(src, kScratchRegister); Check(equal, reason); @@ -3485,30 +3745,30 @@ Register result, Label* miss, bool miss_on_bound_function) { - // Check that the receiver isn't a smi. - testl(function, Immediate(kSmiTagMask)); - j(zero, miss); - - // Check that the function really is a function. - CmpObjectType(function, JS_FUNCTION_TYPE, result); - j(not_equal, miss); - + Label non_instance; if (miss_on_bound_function) { + // Check that the receiver isn't a smi. + testl(function, Immediate(kSmiTagMask)); + j(zero, miss); + + // Check that the function really is a function. + CmpObjectType(function, JS_FUNCTION_TYPE, result); + j(not_equal, miss); + movp(kScratchRegister, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte // field). - TestBit(FieldOperand(kScratchRegister, - SharedFunctionInfo::kCompilerHintsOffset), - SharedFunctionInfo::kBoundFunction); + TestBitSharedFunctionInfoSpecialField(kScratchRegister, + SharedFunctionInfo::kCompilerHintsOffset, + SharedFunctionInfo::kBoundFunction); j(not_zero, miss); - } - // Make sure that the function has an instance prototype. - Label non_instance; - testb(FieldOperand(result, Map::kBitFieldOffset), - Immediate(1 << Map::kHasNonInstancePrototype)); - j(not_zero, &non_instance, Label::kNear); + // Make sure that the function has an instance prototype. + testb(FieldOperand(result, Map::kBitFieldOffset), + Immediate(1 << Map::kHasNonInstancePrototype)); + j(not_zero, &non_instance, Label::kNear); + } // Get the prototype or initial map from the function. movp(result, @@ -3527,12 +3787,15 @@ // Get the prototype from the initial map. movp(result, FieldOperand(result, Map::kPrototypeOffset)); - jmp(&done, Label::kNear); - // Non-instance prototype: Fetch prototype from constructor field - // in initial map. - bind(&non_instance); - movp(result, FieldOperand(result, Map::kConstructorOffset)); + if (miss_on_bound_function) { + jmp(&done, Label::kNear); + + // Non-instance prototype: Fetch prototype from constructor field + // in initial map. + bind(&non_instance); + movp(result, FieldOperand(result, Map::kConstructorOffset)); + } // All done. bind(&done); @@ -3548,7 +3811,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { Operand counter_operand = ExternalOperand(ExternalReference(counter)); if (value == 1) { @@ -3561,7 +3824,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { Operand counter_operand = ExternalOperand(ExternalReference(counter)); if (value == 1) { @@ -3573,15 +3836,13 @@ } -#ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { Set(rax, 0); // No arguments. LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate())); - CEntryStub ces(1); - ASSERT(AllowThisStubCall(&ces)); - Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); + CEntryStub ces(isolate(), 1); + DCHECK(AllowThisStubCall(&ces)); + Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } -#endif // ENABLE_DEBUGGER_SUPPORT void MacroAssembler::InvokeCode(Register code, @@ -3590,7 +3851,7 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); Label done; bool definitely_mismatches = false; @@ -3609,7 +3870,7 @@ call(code); call_wrapper.AfterCall(); } else { - ASSERT(flag == JUMP_FUNCTION); + DCHECK(flag == JUMP_FUNCTION); jmp(code); } bind(&done); @@ -3622,13 +3883,13 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); - ASSERT(function.is(rdi)); + DCHECK(function.is(rdi)); movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); movp(rsi, FieldOperand(function, JSFunction::kContextOffset)); - movsxlq(rbx, - FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); + LoadSharedFunctionInfoSpecialField(rbx, rdx, + SharedFunctionInfo::kFormalParameterCountOffset); // Advances rdx to the end of the Code object header, to the start of // the executable code. movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); @@ -3644,9 +3905,9 @@ InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); - ASSERT(function.is(rdi)); + DCHECK(function.is(rdi)); movp(rsi, FieldOperand(function, JSFunction::kContextOffset)); // Advances rdx to the end of the Code object header, to the start of // the executable code. @@ -3679,7 +3940,7 @@ *definitely_mismatches = false; Label invoke; if (expected.is_immediate()) { - ASSERT(actual.is_immediate()); + DCHECK(actual.is_immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; } else { @@ -3703,15 +3964,15 @@ // IC mechanism. cmpp(expected.reg(), Immediate(actual.immediate())); j(equal, &invoke, Label::kNear); - ASSERT(expected.reg().is(rbx)); + DCHECK(expected.reg().is(rbx)); Set(rax, actual.immediate()); } else if (!expected.reg().is(actual.reg())) { // Both expected and actual are in (different) registers. This // is the case when we invoke functions using call and apply. cmpp(expected.reg(), actual.reg()); j(equal, &invoke, Label::kNear); - ASSERT(actual.reg().is(rax)); - ASSERT(expected.reg().is(rbx)); + DCHECK(actual.reg().is(rax)); + DCHECK(expected.reg().is(rbx)); } } @@ -3739,26 +4000,27 @@ } -void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { - if (frame_mode == BUILD_STUB_FRAME) { +void MacroAssembler::StubPrologue() { pushq(rbp); // Caller's frame pointer. movp(rbp, rsp); Push(rsi); // Callee's context. Push(Smi::FromInt(StackFrame::STUB)); +} + + +void MacroAssembler::Prologue(bool code_pre_aging) { + PredictableCodeSizeScope predictible_code_size_scope(this, + kNoCodeAgeSequenceLength); + if (code_pre_aging) { + // Pre-age the code. + Call(isolate()->builtins()->MarkCodeAsExecutedOnce(), + RelocInfo::CODE_AGE_SEQUENCE); + Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength); } else { - PredictableCodeSizeScope predictible_code_size_scope(this, - kNoCodeAgeSequenceLength); - if (isolate()->IsCodePreAgingActive()) { - // Pre-age the code. - Call(isolate()->builtins()->MarkCodeAsExecutedOnce(), - RelocInfo::CODE_AGE_SEQUENCE); - Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength); - } else { - pushq(rbp); // Caller's frame pointer. - movp(rbp, rsp); - Push(rsi); // Callee's context. - Push(rdi); // Callee's JS function. - } + pushq(rbp); // Caller's frame pointer. + movp(rbp, rsp); + Push(rsi); // Callee's context. + Push(rdi); // Callee's JS function. } } @@ -3794,15 +4056,15 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) { // Set up the frame structure on the stack. // All constants are relative to the frame pointer of the exit frame. - ASSERT(ExitFrameConstants::kCallerSPDisplacement == + DCHECK(ExitFrameConstants::kCallerSPDisplacement == kFPOnStackSize + kPCOnStackSize); - ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize); - ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); + DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize); + DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); pushq(rbp); movp(rbp, rsp); // Reserve room for entry stack pointer and push the code object. - ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); + DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize); Push(Immediate(0)); // Saved entry sp, patched before call. Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); Push(kScratchRegister); // Accessed from EditFrame::code_slot. @@ -3838,10 +4100,10 @@ } // Get the required frame alignment for the OS. - const int kFrameAlignment = OS::ActivationFrameAlignment(); + const int kFrameAlignment = base::OS::ActivationFrameAlignment(); if (kFrameAlignment > 0) { - ASSERT(IsPowerOf2(kFrameAlignment)); - ASSERT(is_int8(kFrameAlignment)); + DCHECK(IsPowerOf2(kFrameAlignment)); + DCHECK(is_int8(kFrameAlignment)); andp(rsp, Immediate(-kFrameAlignment)); } @@ -3924,8 +4186,8 @@ Label* miss) { Label same_contexts; - ASSERT(!holder_reg.is(scratch)); - ASSERT(!scratch.is(kScratchRegister)); + DCHECK(!holder_reg.is(scratch)); + DCHECK(!scratch.is(kScratchRegister)); // Load current lexical context from the stack frame. movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset)); @@ -3985,7 +4247,7 @@ // Compute the hash code from the untagged key. This must be kept in sync with -// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in +// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in // code-stub-hydrogen.cc void MacroAssembler::GetNumberHash(Register r0, Register scratch) { // First of all we assign the hash seed to scratch. @@ -4071,7 +4333,7 @@ andp(r2, r1); // Scale the index by multiplying by the entry size. - ASSERT(SeededNumberDictionary::kEntrySize == 3); + DCHECK(SeededNumberDictionary::kEntrySize == 3); leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 // Check if the key matches. @@ -4090,7 +4352,7 @@ // Check that the value is a normal propety. const int kDetailsOffset = SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; - ASSERT_EQ(NORMAL, 0); + DCHECK_EQ(NORMAL, 0); Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), Smi::FromInt(PropertyDetails::TypeField::kMask)); j(not_zero, miss); @@ -4111,7 +4373,7 @@ // Just return if allocation top is already known. if ((flags & RESULT_CONTAINS_TOP) != 0) { // No use of scratch if allocation top is provided. - ASSERT(!scratch.is_valid()); + DCHECK(!scratch.is_valid()); #ifdef DEBUG // Assert that result actually contains top on entry. Operand top_operand = ExternalOperand(allocation_top); @@ -4132,6 +4394,41 @@ } +void MacroAssembler::MakeSureDoubleAlignedHelper(Register result, + Register scratch, + Label* gc_required, + AllocationFlags flags) { + if (kPointerSize == kDoubleSize) { + if (FLAG_debug_code) { + testl(result, Immediate(kDoubleAlignmentMask)); + Check(zero, kAllocationIsNotDoubleAligned); + } + } else { + // Align the next allocation. Storing the filler map without checking top + // is safe in new-space because the limit of the heap is aligned there. + DCHECK(kPointerSize * 2 == kDoubleSize); + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); + // Make sure scratch is not clobbered by this function as it might be + // used in UpdateAllocationTopHelper later. + DCHECK(!scratch.is(kScratchRegister)); + Label aligned; + testl(result, Immediate(kDoubleAlignmentMask)); + j(zero, &aligned, Label::kNear); + if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { + ExternalReference allocation_limit = + AllocationUtils::GetAllocationLimitReference(isolate(), flags); + cmpp(result, ExternalOperand(allocation_limit)); + j(above_equal, gc_required); + } + LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex); + movp(Operand(result, 0), kScratchRegister); + addp(result, Immediate(kDoubleSize / 2)); + bind(&aligned); + } +} + + void MacroAssembler::UpdateAllocationTopHelper(Register result_end, Register scratch, AllocationFlags flags) { @@ -4159,8 +4456,8 @@ Register scratch, Label* gc_required, AllocationFlags flags) { - ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); - ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); + DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -4175,16 +4472,13 @@ jmp(gc_required); return; } - ASSERT(!result.is(result_end)); + DCHECK(!result.is(result_end)); // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); - // Align the next allocation. Storing the filler map without checking top is - // safe in new-space because the limit of the heap is aligned there. - if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { - testq(result, Immediate(kDoubleAlignmentMask)); - Check(zero, kAllocationIsNotDoubleAligned); + if ((flags & DOUBLE_ALIGNMENT) != 0) { + MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags); } // Calculate new top and bail out if new space is exhausted. @@ -4214,7 +4508,7 @@ } } else if (tag_result) { // Tag the result if requested. - ASSERT(kHeapObjectTag == 1); + DCHECK(kHeapObjectTag == 1); incp(result); } } @@ -4228,7 +4522,7 @@ Register scratch, Label* gc_required, AllocationFlags flags) { - ASSERT((flags & SIZE_IN_WORDS) == 0); + DCHECK((flags & SIZE_IN_WORDS) == 0); leap(result_end, Operand(element_count, element_size, header_size)); Allocate(result_end, result, result_end, scratch, gc_required, flags); } @@ -4240,7 +4534,7 @@ Register scratch, Label* gc_required, AllocationFlags flags) { - ASSERT((flags & SIZE_IN_WORDS) == 0); + DCHECK((flags & SIZE_IN_WORDS) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -4254,16 +4548,13 @@ jmp(gc_required); return; } - ASSERT(!result.is(result_end)); + DCHECK(!result.is(result_end)); // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); - // Align the next allocation. Storing the filler map without checking top is - // safe in new-space because the limit of the heap is aligned there. - if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { - testq(result, Immediate(kDoubleAlignmentMask)); - Check(zero, kAllocationIsNotDoubleAligned); + if ((flags & DOUBLE_ALIGNMENT) != 0) { + MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags); } // Calculate new top and bail out if new space is exhausted. @@ -4305,12 +4596,17 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch, - Label* gc_required) { + Label* gc_required, + MutableMode mode) { // Allocate heap number in new space. Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT); + Heap::RootListIndex map_index = mode == MUTABLE + ? Heap::kMutableHeapNumberMapRootIndex + : Heap::kHeapNumberMapRootIndex; + // Set the map. - LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); + LoadRoot(kScratchRegister, map_index); movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); } @@ -4325,7 +4621,7 @@ // observing object alignment. const int kHeaderAlignment = SeqTwoByteString::kHeaderSize & kObjectAlignmentMask; - ASSERT(kShortSize == 2); + DCHECK(kShortSize == 2); // scratch1 = length * 2 + kObjectAlignmentMask. leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + kHeaderAlignment)); @@ -4365,7 +4661,7 @@ const int kHeaderAlignment = SeqOneByteString::kHeaderSize & kObjectAlignmentMask; movl(scratch1, length); - ASSERT(kCharSize == 1); + DCHECK(kCharSize == 1); addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment)); andp(scratch1, Immediate(~kObjectAlignmentMask)); if (kHeaderAlignment > 0) { @@ -4410,33 +4706,12 @@ Register scratch1, Register scratch2, Label* gc_required) { - Label allocate_new_space, install_map; - AllocationFlags flags = TAG_OBJECT; - - ExternalReference high_promotion_mode = ExternalReference:: - new_space_high_promotion_mode_active_address(isolate()); - - Load(scratch1, high_promotion_mode); - testb(scratch1, Immediate(1)); - j(zero, &allocate_new_space); Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, - static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE)); - - jmp(&install_map); - - bind(&allocate_new_space); - Allocate(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - flags); - - bind(&install_map); + TAG_OBJECT); // Set the map. The other fields are left uninitialized. LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex); @@ -4484,7 +4759,7 @@ Register length, int min_length, Register scratch) { - ASSERT(min_length >= 0); + DCHECK(min_length >= 0); if (emit_debug_code()) { cmpl(length, Immediate(min_length)); Assert(greater_equal, kInvalidMinLength); @@ -4497,9 +4772,9 @@ j(below, &short_string, Label::kNear); } - ASSERT(source.is(rsi)); - ASSERT(destination.is(rdi)); - ASSERT(length.is(rcx)); + DCHECK(source.is(rsi)); + DCHECK(destination.is(rdi)); + DCHECK(length.is(rcx)); if (min_length <= kLongStringLimit) { cmpl(length, Immediate(2 * kPointerSize)); @@ -4664,7 +4939,7 @@ // arguments. // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers // and the caller does not reserve stack slots for them. - ASSERT(num_arguments >= 0); + DCHECK(num_arguments >= 0); #ifdef _WIN64 const int kMinimumStackSlots = kRegisterPassedArguments; if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots; @@ -4710,13 +4985,13 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) { - int frame_alignment = OS::ActivationFrameAlignment(); - ASSERT(frame_alignment != 0); - ASSERT(num_arguments >= 0); + int frame_alignment = base::OS::ActivationFrameAlignment(); + DCHECK(frame_alignment != 0); + DCHECK(num_arguments >= 0); // Make stack end at alignment and allocate space for arguments and old rsp. movp(kScratchRegister, rsp); - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); int argument_slots_on_stack = ArgumentStackSlotsForCFunctionCall(num_arguments); subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize)); @@ -4733,30 +5008,48 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) { - ASSERT(has_frame()); + DCHECK(has_frame()); // Check stack alignment. if (emit_debug_code()) { CheckStackAlignment(); } call(function); - ASSERT(OS::ActivationFrameAlignment() != 0); - ASSERT(num_arguments >= 0); + DCHECK(base::OS::ActivationFrameAlignment() != 0); + DCHECK(num_arguments >= 0); int argument_slots_on_stack = ArgumentStackSlotsForCFunctionCall(num_arguments); movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize)); } -bool AreAliased(Register r1, Register r2, Register r3, Register r4) { - if (r1.is(r2)) return true; - if (r1.is(r3)) return true; - if (r1.is(r4)) return true; - if (r2.is(r3)) return true; - if (r2.is(r4)) return true; - if (r3.is(r4)) return true; - return false; +#ifdef DEBUG +bool AreAliased(Register reg1, + Register reg2, + Register reg3, + Register reg4, + Register reg5, + Register reg6, + Register reg7, + Register reg8) { + int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + + reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + + reg7.is_valid() + reg8.is_valid(); + + RegList regs = 0; + if (reg1.is_valid()) regs |= reg1.bit(); + if (reg2.is_valid()) regs |= reg2.bit(); + if (reg3.is_valid()) regs |= reg3.bit(); + if (reg4.is_valid()) regs |= reg4.bit(); + if (reg5.is_valid()) regs |= reg5.bit(); + if (reg6.is_valid()) regs |= reg6.bit(); + if (reg7.is_valid()) regs |= reg7.bit(); + if (reg8.is_valid()) regs |= reg8.bit(); + int n_of_non_aliasing_regs = NumRegs(regs); + + return n_of_valid_regs != n_of_non_aliasing_regs; } +#endif CodePatcher::CodePatcher(byte* address, int size) @@ -4766,17 +5059,17 @@ // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } CodePatcher::~CodePatcher() { // Indicate that code has changed. - CPU::FlushICache(address_, size_); + CpuFeatures::FlushICache(address_, size_); // Check that the code was patched as expected. - ASSERT(masm_.pc_ == address_ + size_); - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); + DCHECK(masm_.pc_ == address_ + size_); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } @@ -4787,7 +5080,7 @@ Condition cc, Label* condition_met, Label::Distance condition_met_distance) { - ASSERT(cc == zero || cc == not_zero); + DCHECK(cc == zero || cc == not_zero); if (scratch.is(object)) { andp(scratch, Immediate(~Page::kPageAlignmentMask)); } else { @@ -4809,9 +5102,8 @@ Label* if_deprecated) { if (map->CanBeDeprecated()) { Move(scratch, map); - movp(scratch, FieldOperand(scratch, Map::kBitField3Offset)); - SmiToInteger32(scratch, scratch); - andp(scratch, Immediate(Map::Deprecated::kMask)); + movl(scratch, FieldOperand(scratch, Map::kBitField3Offset)); + andl(scratch, Immediate(Map::Deprecated::kMask)); j(not_zero, if_deprecated); } } @@ -4822,10 +5114,10 @@ Register mask_scratch, Label* on_black, Label::Distance on_black_distance) { - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx)); + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx)); GetMarkBits(object, bitmap_scratch, mask_scratch); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); // The mask_scratch register contains a 1 at the position of the first bit // and a 0 at all other positions, including the position of the second bit. movp(rcx, mask_scratch); @@ -4851,8 +5143,8 @@ movp(scratch, FieldOperand(value, HeapObject::kMapOffset)); CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); j(equal, &is_data_object, Label::kNear); - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); // If it's a string and it's not a cons string then it's an object containing // no GC pointers. testb(FieldOperand(scratch, Map::kInstanceTypeOffset), @@ -4865,7 +5157,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg, Register mask_reg) { - ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx)); + DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx)); movp(bitmap_reg, addr_reg); // Sign extended 32 bit immediate. andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); @@ -4882,7 +5174,7 @@ shrl(rcx, Immediate(kPointerSizeLog2)); andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); movl(mask_reg, Immediate(1)); - shl_cl(mask_reg); + shlp_cl(mask_reg); } @@ -4892,14 +5184,14 @@ Register mask_scratch, Label* value_is_white_and_not_data, Label::Distance distance) { - ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx)); + DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx)); GetMarkBits(value, bitmap_scratch, mask_scratch); // If the value is black or grey we don't need to do anything. - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); Label done; @@ -4937,8 +5229,8 @@ bind(¬_heap_number); // Check for strings. - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); // If it's a string and it's not a cons string then it's an object containing // no GC pointers. Register instance_type = rcx; @@ -4951,8 +5243,8 @@ Label not_external; // External strings are the only ones with the kExternalStringTag bit // set. - ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); - ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); + DCHECK_EQ(0, kConsStringTag & kExternalStringTag); testb(instance_type, Immediate(kExternalStringTag)); j(zero, ¬_external, Label::kNear); movp(length, Immediate(ExternalString::kSize)); @@ -4960,13 +5252,13 @@ bind(¬_external); // Sequential string, either ASCII or UC16. - ASSERT(kOneByteStringTag == 0x04); + DCHECK(kOneByteStringTag == 0x04); andp(length, Immediate(kStringEncodingMask)); xorp(length, Immediate(kStringEncodingMask)); addp(length, Immediate(0x04)); // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. imulp(length, FieldOperand(value, String::kLengthOffset)); - shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); + shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); andp(length, Immediate(~kObjectAlignmentMask)); @@ -5053,8 +5345,8 @@ Register scratch0, Register scratch1, Label* found) { - ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister))); - ASSERT(!scratch1.is(scratch0)); + DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister))); + DCHECK(!scratch1.is(scratch0)); Register current = scratch0; Label loop_again; @@ -5064,8 +5356,7 @@ bind(&loop_again); movp(current, FieldOperand(current, HeapObject::kMapOffset)); movp(scratch1, FieldOperand(current, Map::kBitField2Offset)); - andp(scratch1, Immediate(Map::kElementsKindMask)); - shr(scratch1, Immediate(Map::kElementsKindShift)); + DecodeField<Map::ElementsKindBits>(scratch1); cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS)); j(equal, found); movp(current, FieldOperand(current, Map::kPrototypeOffset)); @@ -5075,8 +5366,8 @@ void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { - ASSERT(!dividend.is(rax)); - ASSERT(!dividend.is(rdx)); + DCHECK(!dividend.is(rax)); + DCHECK(!dividend.is(rdx)); MultiplierAndShift ms(divisor); movl(rax, Immediate(ms.multiplier())); imull(dividend); diff -Nru nodejs-0.11.13/deps/v8/src/x64/macro-assembler-x64.h nodejs-0.11.15/deps/v8/src/x64/macro-assembler-x64.h --- nodejs-0.11.13/deps/v8/src/x64/macro-assembler-x64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/macro-assembler-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,36 +1,13 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_X64_MACRO_ASSEMBLER_X64_H_ #define V8_X64_MACRO_ASSEMBLER_X64_H_ -#include "assembler.h" -#include "frames.h" -#include "v8globals.h" +#include "src/assembler.h" +#include "src/frames.h" +#include "src/globals.h" namespace v8 { namespace internal { @@ -52,6 +29,10 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum PointersToHereCheck { + kPointersToHereMaybeInteresting, + kPointersToHereAreAlwaysInteresting +}; enum SmiOperationConstraint { PRESERVE_SOURCE_REGISTER, @@ -69,7 +50,16 @@ : EnumSet<SmiOperationConstraint, byte>(bits) { } }; -bool AreAliased(Register r1, Register r2, Register r3, Register r4); +#ifdef DEBUG +bool AreAliased(Register reg1, + Register reg2, + Register reg3 = no_reg, + Register reg4 = no_reg, + Register reg5 = no_reg, + Register reg6 = no_reg, + Register reg7 = no_reg, + Register reg8 = no_reg); +#endif // Forward declaration. class JumpTarget; @@ -243,7 +233,9 @@ Register scratch, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); // As above, but the offset has the tag presubtracted. For use with // Operand(reg, off). @@ -254,14 +246,17 @@ Register scratch, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK) { + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting) { RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp, remembered_set_action, - smi_check); + smi_check, + pointers_to_here_check_for_value); } // Notify the garbage collector that we wrote a pointer into a fixed array. @@ -276,7 +271,15 @@ Register index, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); + + void RecordWriteForMap( + Register object, + Register map, + Register dst, + SaveFPRegsMode save_fp); // For page containing |object| mark region covering |address| // dirty. |object| is the object being stored into, |value| is the @@ -289,17 +292,18 @@ Register value, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); -#ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- // Debugger Support void DebugBreak(); -#endif // Generates function and stub prologue code. - void Prologue(PrologueFrameMode frame_mode); + void StubPrologue(); + void Prologue(bool code_pre_aging); // Enter specific kind of exit frame; either in normal or // debug mode. Expects the number of arguments in register rax and @@ -494,10 +498,18 @@ // Test-and-jump functions. Typically combines a check function // above with a conditional jump. + // Jump if the value can be represented by a smi. + void JumpIfValidSmiValue(Register src, Label* on_valid, + Label::Distance near_jump = Label::kFar); + // Jump if the value cannot be represented by a smi. void JumpIfNotValidSmiValue(Register src, Label* on_invalid, Label::Distance near_jump = Label::kFar); + // Jump if the unsigned integer value can be represented by a smi. + void JumpIfUIntValidSmiValue(Register src, Label* on_valid, + Label::Distance near_jump = Label::kFar); + // Jump if the unsigned integer value cannot be represented by a smi. void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid, Label::Distance near_jump = Label::kFar); @@ -655,12 +667,14 @@ void SmiShiftLeftConstant(Register dst, Register src, - int shift_value); + int shift_value, + Label* on_not_smi_result = NULL, + Label::Distance near_jump = Label::kFar); void SmiShiftLogicalRightConstant(Register dst, - Register src, - int shift_value, - Label* on_not_smi_result, - Label::Distance near_jump = Label::kFar); + Register src, + int shift_value, + Label* on_not_smi_result, + Label::Distance near_jump = Label::kFar); void SmiShiftArithmeticRightConstant(Register dst, Register src, int shift_value); @@ -669,7 +683,9 @@ // Uses and clobbers rcx, so dst may not be rcx. void SmiShiftLeft(Register dst, Register src1, - Register src2); + Register src2, + Label* on_not_smi_result = NULL, + Label::Distance near_jump = Label::kFar); // Shifts a smi value to the right, shifting in zero bits at the top, and // returns the unsigned intepretation of the result if that is a smi. // Uses and clobbers rcx, so dst may not be rcx. @@ -726,12 +742,12 @@ void Push(Smi* smi); - // Save away a 64-bit integer on the stack as two 32-bit integers + // Save away a raw integer with pointer size on the stack as two integers // masquerading as smis so that the garbage collector skips visiting them. - void PushInt64AsTwoSmis(Register src, Register scratch = kScratchRegister); - // Reconstruct a 64-bit integer from two 32-bit integers masquerading as - // smis on the top of stack. - void PopInt64AsTwoSmis(Register dst, Register scratch = kScratchRegister); + void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister); + // Reconstruct a raw integer with pointer size from two integers masquerading + // as smis on the top of stack. + void PopRegisterAsTwoSmis(Register dst, Register scratch = kScratchRegister); void Test(const Operand& dst, Smi* source); @@ -813,8 +829,13 @@ // Move if the registers are not identical. void Move(Register target, Register source); - // Bit-field support. - void TestBit(const Operand& dst, int bit_index); + // TestBit and Load SharedFunctionInfo special field. + void TestBitSharedFunctionInfoSpecialField(Register base, + int offset, + int bit_index); + void LoadSharedFunctionInfoSpecialField(Register dst, + Register base, + int offset); // Handle support void Move(Register dst, Handle<Object> source); @@ -835,14 +856,21 @@ // Emit code to discard a non-negative number of pointer-sized elements // from the stack, clobbering only the rsp register. void Drop(int stack_elements); + // Emit code to discard a positive number of pointer-sized elements + // from the stack under the return address which remains on the top, + // clobbering the rsp register. + void DropUnderReturnAddress(int stack_elements, + Register scratch = kScratchRegister); void Call(Label* target) { call(target); } void Push(Register src); void Push(const Operand& src); + void PushQuad(const Operand& src); void Push(Immediate value); void PushImm32(int32_t imm32); void Pop(Register dst); void Pop(const Operand& dst); + void PopQuad(const Operand& dst); void PushReturnAddressFrom(Register src) { pushq(src); } void PopReturnAddressTo(Register dst) { popq(dst); } void Move(Register dst, ExternalReference ext) { @@ -854,15 +882,15 @@ void Move(Register dst, void* ptr, RelocInfo::Mode rmode) { // This method must not be used with heap object references. The stored // address is not GC safe. Use the handle version instead. - ASSERT(rmode > RelocInfo::LAST_GCED_ENUM); + DCHECK(rmode > RelocInfo::LAST_GCED_ENUM); movp(dst, ptr, rmode); } void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) { AllowDeferredHandleDereference using_raw_address; - ASSERT(!RelocInfo::IsNone(rmode)); - ASSERT(value->IsHeapObject()); - ASSERT(!isolate()->heap()->InNewSpace(*value)); + DCHECK(!RelocInfo::IsNone(rmode)); + DCHECK(value->IsHeapObject()); + DCHECK(!isolate()->heap()->InNewSpace(*value)); movp(dst, reinterpret_cast<void*>(value.location()), rmode); } @@ -1016,7 +1044,7 @@ MinusZeroMode minus_zero_mode, Label* lost_precision, Label::Distance dst = Label::kFar); - void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch); + void LoadUint32(XMMRegister dst, Register src); void LoadInstanceDescriptors(Register map, Register descriptors); void EnumLength(Register dst, Register map); @@ -1024,11 +1052,32 @@ template<typename Field> void DecodeField(Register reg) { - static const int shift = Field::kShift + kSmiShift; + static const int shift = Field::kShift; static const int mask = Field::kMask >> Field::kShift; - shr(reg, Immediate(shift)); + if (shift != 0) { + shrp(reg, Immediate(shift)); + } andp(reg, Immediate(mask)); - shl(reg, Immediate(kSmiShift)); + } + + template<typename Field> + void DecodeFieldToSmi(Register reg) { + if (SmiValuesAre32Bits()) { + andp(reg, Immediate(Field::kMask)); + shlp(reg, Immediate(kSmiShift - Field::kShift)); + } else { + static const int shift = Field::kShift; + static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize; + DCHECK(SmiValuesAre31Bits()); + DCHECK(kSmiShift == kSmiTagSize); + DCHECK((mask & 0x80000000u) == 0); + if (shift < kSmiShift) { + shlp(reg, Immediate(kSmiShift - shift)); + } else if (shift > kSmiShift) { + sarp(reg, Immediate(shift - kSmiShift)); + } + andp(reg, Immediate(mask)); + } } // Abort execution if argument is not a number, enabled via --debug-code. @@ -1077,12 +1126,6 @@ // Propagate an uncatchable exception out of the current JS stack. void ThrowUncatchable(Register value); - // Throw a message string as an exception. - void Throw(BailoutReason reason); - - // Throw a message string as an exception if a condition is not true. - void ThrowIf(Condition cc, BailoutReason reason); - // --------------------------------------------------------------------------- // Inline caching support @@ -1152,7 +1195,8 @@ // space is full. void AllocateHeapNumber(Register result, Register scratch, - Label* gc_required); + Label* gc_required, + MutableMode mode = IMMUTABLE); // Allocate a sequential string. All the header fields of the string object // are initialized. @@ -1218,10 +1262,6 @@ Label* miss, bool miss_on_bound_function = false); - // Generates code for reporting that an illegal operation has - // occurred. - void IllegalOperation(int num_arguments); - // Picks out an array index from the hash field. // Register use: // hash - holds the index's hash. Clobbered. @@ -1310,7 +1350,7 @@ // caller-save registers. Restores context. On return removes // stack_space * kPointerSize (GCed). void CallApiFunctionAndReturn(Register function_address, - Address thunk_address, + ExternalReference thunk_ref, Register thunk_last_arg, int stack_space, Operand return_value_operand, @@ -1347,7 +1387,7 @@ void Ret(int bytes_dropped, Register scratch); Handle<Object> CodeObject() { - ASSERT(!code_object_.is_null()); + DCHECK(!code_object_.is_null()); return code_object_; } @@ -1460,7 +1500,7 @@ // modified. It may be the "smi 1 constant" register. Register GetSmiConstant(Smi* value); - intptr_t RootRegisterDelta(ExternalReference other); + int64_t RootRegisterDelta(ExternalReference other); // Moves the smi value to the destination register. void LoadSmiConstant(Register dst, Smi* value); @@ -1495,19 +1535,17 @@ Register scratch, AllocationFlags flags); + void MakeSureDoubleAlignedHelper(Register result, + Register scratch, + Label* gc_required, + AllocationFlags flags); + // Update allocation top with value in result_end register. // If scratch is valid, it contains the address of the allocation top. void UpdateAllocationTopHelper(Register result_end, Register scratch, AllocationFlags flags); - // Helper for PopHandleScope. Allowed to perform a GC and returns - // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and - // possibly returns a failure object indicating an allocation failure. - Object* PopHandleScopeHelper(Register saved, - Register scratch, - bool gc_allowed); - // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. void InNewSpace(Register object, Register scratch, diff -Nru nodejs-0.11.13/deps/v8/src/x64/regexp-macro-assembler-x64.cc nodejs-0.11.15/deps/v8/src/x64/regexp-macro-assembler-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/regexp-macro-assembler-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/regexp-macro-assembler-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "cpu-profiler.h" -#include "serialize.h" -#include "unicode.h" -#include "log.h" -#include "regexp-stack.h" -#include "macro-assembler.h" -#include "regexp-macro-assembler.h" -#include "x64/regexp-macro-assembler-x64.h" +#include "src/cpu-profiler.h" +#include "src/log.h" +#include "src/macro-assembler.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-stack.h" +#include "src/serialize.h" +#include "src/unicode.h" +#include "src/x64/regexp-macro-assembler-x64.h" namespace v8 { namespace internal { @@ -132,7 +109,7 @@ success_label_(), backtrack_label_(), exit_label_() { - ASSERT_EQ(0, registers_to_save % 2); + DCHECK_EQ(0, registers_to_save % 2); __ jmp(&entry_label_); // We'll write the entry code when we know more. __ bind(&start_label_); // And then continue from here. } @@ -163,8 +140,8 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) { - ASSERT(reg >= 0); - ASSERT(reg < num_registers_); + DCHECK(reg >= 0); + DCHECK(reg < num_registers_); if (by != 0) { __ addp(register_location(reg), Immediate(by)); } @@ -241,8 +218,8 @@ int start_reg, Label* on_no_match) { Label fallthrough; - __ movq(rdx, register_location(start_reg)); // Offset of start of capture - __ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture + ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture + ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture __ subp(rbx, rdx); // Length of capture. // ----------------------- @@ -318,7 +295,7 @@ __ movp(rdi, r11); __ subq(rdi, rsi); } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); // Save important/volatile registers before calling C function. #ifndef _WIN64 // Caller save on Linux and callee save in Windows. @@ -390,8 +367,8 @@ Label fallthrough; // Find length of back-referenced capture. - __ movq(rdx, register_location(start_reg)); - __ movq(rax, register_location(start_reg + 1)); + ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture + ReadPositionFromRegister(rax, start_reg + 1); // Offset of end of capture __ subp(rax, rdx); // Length to check. // Fail on partial or illegal capture (start of capture after end of capture). @@ -427,7 +404,7 @@ __ movzxbl(rax, Operand(rdx, 0)); __ cmpb(rax, Operand(rbx, 0)); } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); __ movzxwl(rax, Operand(rdx, 0)); __ cmpw(rax, Operand(rbx, 0)); } @@ -488,7 +465,7 @@ uc16 minus, uc16 mask, Label* on_not_equal) { - ASSERT(minus < String::kMaxUtf16CodeUnit); + DCHECK(minus < String::kMaxUtf16CodeUnit); __ leap(rax, Operand(current_character(), -minus)); __ andp(rax, Immediate(mask)); __ cmpl(rax, Immediate(c)); @@ -619,7 +596,7 @@ BranchOrBacktrack(above, on_no_match); } __ Move(rbx, ExternalReference::re_word_character_map()); - ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char. + DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char. __ testb(Operand(rbx, current_character(), times_1, 0), current_character()); BranchOrBacktrack(zero, on_no_match); @@ -633,7 +610,7 @@ __ j(above, &done); } __ Move(rbx, ExternalReference::re_word_character_map()); - ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char. + DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char. __ testb(Operand(rbx, current_character(), times_1, 0), current_character()); BranchOrBacktrack(not_zero, on_no_match); @@ -692,12 +669,12 @@ #else // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack). // Push register parameters on stack for reference. - ASSERT_EQ(kInputString, -1 * kPointerSize); - ASSERT_EQ(kStartIndex, -2 * kPointerSize); - ASSERT_EQ(kInputStart, -3 * kPointerSize); - ASSERT_EQ(kInputEnd, -4 * kPointerSize); - ASSERT_EQ(kRegisterOutput, -5 * kPointerSize); - ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize); + DCHECK_EQ(kInputString, -1 * kRegisterSize); + DCHECK_EQ(kStartIndex, -2 * kRegisterSize); + DCHECK_EQ(kInputStart, -3 * kRegisterSize); + DCHECK_EQ(kInputEnd, -4 * kRegisterSize); + DCHECK_EQ(kRegisterOutput, -5 * kRegisterSize); + DCHECK_EQ(kNumOutputRegisters, -6 * kRegisterSize); __ pushq(rdi); __ pushq(rsi); __ pushq(rdx); @@ -747,7 +724,7 @@ // Load input position. __ movp(rdi, Operand(rbp, kInputStart)); // Set up rdi to be negative offset from string end. - __ subp(rdi, rsi); + __ subq(rdi, rsi); // Set rax to address of char before start of the string // (effectively string position -1). __ movp(rbx, Operand(rbp, kStartIndex)); @@ -831,14 +808,14 @@ __ addp(rcx, rdx); } for (int i = 0; i < num_saved_registers_; i++) { - __ movq(rax, register_location(i)); + __ movp(rax, register_location(i)); if (i == 0 && global_with_zero_length_check()) { // Keep capture start in rdx for the zero-length check later. __ movp(rdx, rax); } __ addp(rax, rcx); // Convert to index from start, not end. if (mode_ == UC16) { - __ sar(rax, Immediate(1)); // Convert byte index to character index. + __ sarp(rax, Immediate(1)); // Convert byte index to character index. } __ movl(Operand(rbx, i * kIntSize), rax); } @@ -1045,8 +1022,8 @@ Label* on_end_of_input, bool check_bounds, int characters) { - ASSERT(cp_offset >= -1); // ^ and \b can look behind one character. - ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works) + DCHECK(cp_offset >= -1); // ^ and \b can look behind one character. + DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works) if (check_bounds) { CheckPosition(cp_offset + characters - 1, on_end_of_input); } @@ -1084,13 +1061,31 @@ } +STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size); + + void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) { - __ movq(rdi, register_location(reg)); + if (kPointerSize == kInt64Size) { + __ movq(rdi, register_location(reg)); + } else { + // Need sign extension for x32 as rdi might be used as an index register. + __ movsxlq(rdi, register_location(reg)); + } +} + + +void RegExpMacroAssemblerX64::ReadPositionFromRegister(Register dst, int reg) { + if (kPointerSize == kInt64Size) { + __ movq(dst, register_location(reg)); + } else { + // Need sign extension for x32 as dst might be used as an index register. + __ movsxlq(dst, register_location(reg)); + } } void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) { - __ movq(backtrack_stackpointer(), register_location(reg)); + __ movp(backtrack_stackpointer(), register_location(reg)); __ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd)); } @@ -1109,7 +1104,7 @@ void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) { - ASSERT(register_index >= num_saved_registers_); // Reserved for positions! + DCHECK(register_index >= num_saved_registers_); // Reserved for positions! __ movp(register_location(register_index), Immediate(to)); } @@ -1132,7 +1127,7 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) { - ASSERT(reg_from <= reg_to); + DCHECK(reg_from <= reg_to); __ movp(rax, Operand(rbp, kInputStartMinusOne)); for (int reg = reg_from; reg <= reg_to; reg++) { __ movp(register_location(reg), rax); @@ -1188,7 +1183,8 @@ Code* re_code, Address re_frame) { Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate); - if (isolate->stack_guard()->IsStackOverflow()) { + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { isolate->StackOverflow(); return EXCEPTION; } @@ -1211,11 +1207,11 @@ // Current string. bool is_ascii = subject->IsOneByteRepresentationUnderneath(); - ASSERT(re_code->instruction_start() <= *return_address); - ASSERT(*return_address <= + DCHECK(re_code->instruction_start() <= *return_address); + DCHECK(*return_address <= re_code->instruction_start() + re_code->instruction_size()); - MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate); + Object* result = isolate->stack_guard()->HandleInterrupts(); if (*code_handle != re_code) { // Return address no longer valid intptr_t delta = code_handle->address() - re_code->address(); @@ -1251,7 +1247,7 @@ // be a sequential or external string with the same content. // Update the start and end pointers in the stack frame to the current // location (whether it has actually moved or not). - ASSERT(StringShape(*subject_tmp).IsSequential() || + DCHECK(StringShape(*subject_tmp).IsSequential() || StringShape(*subject_tmp).IsExternal()); // The original start address of the characters to match. @@ -1283,7 +1279,7 @@ Operand RegExpMacroAssemblerX64::register_location(int register_index) { - ASSERT(register_index < (1<<30)); + DCHECK(register_index < (1<<30)); if (num_registers_ <= register_index) { num_registers_ = register_index + 1; } @@ -1334,7 +1330,7 @@ void RegExpMacroAssemblerX64::Push(Register source) { - ASSERT(!source.is(backtrack_stackpointer())); + DCHECK(!source.is(backtrack_stackpointer())); // Notice: This updates flags, unlike normal Push. __ subp(backtrack_stackpointer(), Immediate(kIntSize)); __ movl(Operand(backtrack_stackpointer(), 0), source); @@ -1374,7 +1370,7 @@ void RegExpMacroAssemblerX64::Pop(Register target) { - ASSERT(!target.is(backtrack_stackpointer())); + DCHECK(!target.is(backtrack_stackpointer())); __ movsxlq(target, Operand(backtrack_stackpointer(), 0)); // Notice: This updates flags, unlike normal Pop. __ addp(backtrack_stackpointer(), Immediate(kIntSize)); @@ -1423,16 +1419,16 @@ } else if (characters == 2) { __ movzxwl(current_character(), Operand(rsi, rdi, times_1, cp_offset)); } else { - ASSERT(characters == 1); + DCHECK(characters == 1); __ movzxbl(current_character(), Operand(rsi, rdi, times_1, cp_offset)); } } else { - ASSERT(mode_ == UC16); + DCHECK(mode_ == UC16); if (characters == 2) { __ movl(current_character(), Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16))); } else { - ASSERT(characters == 1); + DCHECK(characters == 1); __ movzxwl(current_character(), Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16))); } diff -Nru nodejs-0.11.13/deps/v8/src/x64/regexp-macro-assembler-x64.h nodejs-0.11.15/deps/v8/src/x64/regexp-macro-assembler-x64.h --- nodejs-0.11.13/deps/v8/src/x64/regexp-macro-assembler-x64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/regexp-macro-assembler-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,14 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_ #define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_ -#include "x64/assembler-x64.h" -#include "x64/assembler-x64-inl.h" -#include "macro-assembler.h" -#include "code.h" -#include "x64/macro-assembler-x64.h" +#include "src/macro-assembler.h" +#include "src/x64/assembler-x64-inl.h" +#include "src/x64/assembler-x64.h" +#include "src/x64/macro-assembler-x64.h" namespace v8 { namespace internal { @@ -135,8 +111,8 @@ // Offsets from rbp of function parameters and stored registers. static const int kFramePointer = 0; // Above the frame pointer - function parameters and return address. - static const int kReturn_eip = kFramePointer + kPointerSize; - static const int kFrameAlign = kReturn_eip + kPointerSize; + static const int kReturn_eip = kFramePointer + kRegisterSize; + static const int kFrameAlign = kReturn_eip + kRegisterSize; #ifdef _WIN64 // Parameters (first four passed as registers, but with room on stack). @@ -145,49 +121,50 @@ // use this space to store the register passed parameters. static const int kInputString = kFrameAlign; // StartIndex is passed as 32 bit int. - static const int kStartIndex = kInputString + kPointerSize; - static const int kInputStart = kStartIndex + kPointerSize; - static const int kInputEnd = kInputStart + kPointerSize; - static const int kRegisterOutput = kInputEnd + kPointerSize; + static const int kStartIndex = kInputString + kRegisterSize; + static const int kInputStart = kStartIndex + kRegisterSize; + static const int kInputEnd = kInputStart + kRegisterSize; + static const int kRegisterOutput = kInputEnd + kRegisterSize; // For the case of global regular expression, we have room to store at least // one set of capture results. For the case of non-global regexp, we ignore // this value. NumOutputRegisters is passed as 32-bit value. The upper // 32 bit of this 64-bit stack slot may contain garbage. - static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; - static const int kStackHighEnd = kNumOutputRegisters + kPointerSize; + static const int kNumOutputRegisters = kRegisterOutput + kRegisterSize; + static const int kStackHighEnd = kNumOutputRegisters + kRegisterSize; // DirectCall is passed as 32 bit int (values 0 or 1). - static const int kDirectCall = kStackHighEnd + kPointerSize; - static const int kIsolate = kDirectCall + kPointerSize; + static const int kDirectCall = kStackHighEnd + kRegisterSize; + static const int kIsolate = kDirectCall + kRegisterSize; #else // In AMD64 ABI Calling Convention, the first six integer parameters // are passed as registers, and caller must allocate space on the stack // if it wants them stored. We push the parameters after the frame pointer. - static const int kInputString = kFramePointer - kPointerSize; - static const int kStartIndex = kInputString - kPointerSize; - static const int kInputStart = kStartIndex - kPointerSize; - static const int kInputEnd = kInputStart - kPointerSize; - static const int kRegisterOutput = kInputEnd - kPointerSize; + static const int kInputString = kFramePointer - kRegisterSize; + static const int kStartIndex = kInputString - kRegisterSize; + static const int kInputStart = kStartIndex - kRegisterSize; + static const int kInputEnd = kInputStart - kRegisterSize; + static const int kRegisterOutput = kInputEnd - kRegisterSize; + // For the case of global regular expression, we have room to store at least // one set of capture results. For the case of non-global regexp, we ignore // this value. - static const int kNumOutputRegisters = kRegisterOutput - kPointerSize; + static const int kNumOutputRegisters = kRegisterOutput - kRegisterSize; static const int kStackHighEnd = kFrameAlign; - static const int kDirectCall = kStackHighEnd + kPointerSize; - static const int kIsolate = kDirectCall + kPointerSize; + static const int kDirectCall = kStackHighEnd + kRegisterSize; + static const int kIsolate = kDirectCall + kRegisterSize; #endif #ifdef _WIN64 // Microsoft calling convention has three callee-saved registers // (that we are using). We push these after the frame pointer. - static const int kBackup_rsi = kFramePointer - kPointerSize; - static const int kBackup_rdi = kBackup_rsi - kPointerSize; - static const int kBackup_rbx = kBackup_rdi - kPointerSize; + static const int kBackup_rsi = kFramePointer - kRegisterSize; + static const int kBackup_rdi = kBackup_rsi - kRegisterSize; + static const int kBackup_rbx = kBackup_rdi - kRegisterSize; static const int kLastCalleeSaveRegister = kBackup_rbx; #else // AMD64 Calling Convention has only one callee-save register that // we use. We push this after the frame pointer (and after the // parameters). - static const int kBackup_rbx = kNumOutputRegisters - kPointerSize; + static const int kBackup_rbx = kNumOutputRegisters - kRegisterSize; static const int kLastCalleeSaveRegister = kBackup_rbx; #endif @@ -268,6 +245,8 @@ // Increments the stack pointer (rcx) by a word size. inline void Drop(); + inline void ReadPositionFromRegister(Register dst, int reg); + Isolate* isolate() const { return masm_.isolate(); } MacroAssembler masm_; diff -Nru nodejs-0.11.13/deps/v8/src/x64/simulator-x64.cc nodejs-0.11.15/deps/v8/src/x64/simulator-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/simulator-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/simulator-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,26 +1,3 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. diff -Nru nodejs-0.11.13/deps/v8/src/x64/simulator-x64.h nodejs-0.11.15/deps/v8/src/x64/simulator-x64.h --- nodejs-0.11.13/deps/v8/src/x64/simulator-x64.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/simulator-x64.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_X64_SIMULATOR_X64_H_ #define V8_X64_SIMULATOR_X64_H_ -#include "allocation.h" +#include "src/allocation.h" namespace v8 { namespace internal { @@ -47,9 +24,6 @@ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8)) -#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ - (reinterpret_cast<TryCatch*>(try_catch_address)) - // The stack limit beyond which we will throw stack overflow errors in // generated code. Because generated code on x64 uses the C stack, we // just use the C stack limit. diff -Nru nodejs-0.11.13/deps/v8/src/x64/stub-cache-x64.cc nodejs-0.11.15/deps/v8/src/x64/stub-cache-x64.cc --- nodejs-0.11.13/deps/v8/src/x64/stub-cache-x64.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x64/stub-cache-x64.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,38 +1,15 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_X64 -#include "arguments.h" -#include "ic-inl.h" -#include "codegen.h" -#include "stub-cache.h" +#include "src/arguments.h" +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/stub-cache.h" namespace v8 { namespace internal { @@ -47,16 +24,16 @@ Register receiver, Register name, // The offset is scaled by 4, based on - // kHeapObjectTagSize, which is two bits + // kCacheIndexShift, which is two bits Register offset) { // We need to scale up the pointer by 2 when the offset is scaled by less // than the pointer size. - ASSERT(kPointerSize == kInt64Size - ? kPointerSizeLog2 == kHeapObjectTagSize + 1 - : kPointerSizeLog2 == kHeapObjectTagSize); + DCHECK(kPointerSize == kInt64Size + ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1 + : kPointerSizeLog2 == StubCache::kCacheIndexShift); ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1; - ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry)); + DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry)); // The offset register holds the entry offset times four (due to masking // and shifting optimizations). ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); @@ -109,14 +86,11 @@ } -void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, - Label* miss_label, - Register receiver, - Handle<Name> name, - Register scratch0, - Register scratch1) { - ASSERT(name->IsUniqueName()); - ASSERT(!receiver.is(scratch0)); +void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( + MacroAssembler* masm, Label* miss_label, Register receiver, + Handle<Name> name, Register scratch0, Register scratch1) { + DCHECK(name->IsUniqueName()); + DCHECK(!receiver.is(scratch0)); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->negative_lookups(), 1); __ IncrementCounter(counters->negative_lookups_miss(), 1); @@ -171,19 +145,19 @@ USE(extra3); // The register extra2 is not used on the X64 platform. // Make sure that code is valid. The multiplying code relies on the // entry size being 3 * kPointerSize. - ASSERT(sizeof(Entry) == 3 * kPointerSize); + DCHECK(sizeof(Entry) == 3 * kPointerSize); // Make sure the flags do not name a specific type. - ASSERT(Code::ExtractTypeFromFlags(flags) == 0); + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); // Make sure that there are no register conflicts. - ASSERT(!scratch.is(receiver)); - ASSERT(!scratch.is(name)); + DCHECK(!scratch.is(receiver)); + DCHECK(!scratch.is(name)); // Check scratch register is valid, extra and extra2 are unused. - ASSERT(!scratch.is(no_reg)); - ASSERT(extra2.is(no_reg)); - ASSERT(extra3.is(no_reg)); + DCHECK(!scratch.is(no_reg)); + DCHECK(extra2.is(no_reg)); + DCHECK(extra3.is(no_reg)); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1); @@ -198,7 +172,7 @@ __ xorp(scratch, Immediate(flags)); // We mask out the last two bits because they are not part of the hash and // they are always 01 for maps. Also in the two 'and' instructions below. - __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); + __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); // Probe the primary table. ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch); @@ -207,10 +181,10 @@ __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); __ xorp(scratch, Immediate(flags)); - __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); + __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); __ subl(scratch, name); __ addl(scratch, Immediate(flags)); - __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize)); + __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift)); // Probe the secondary table. ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch); @@ -222,30 +196,8 @@ } -void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, - int index, - Register prototype) { - // Load the global or builtins object from the current context. - __ movp(prototype, - Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - // Load the native context from the global or builtins object. - __ movp(prototype, - FieldOperand(prototype, GlobalObject::kNativeContextOffset)); - // Load the function from the native context. - __ movp(prototype, Operand(prototype, Context::SlotOffset(index))); - // Load the initial map. The global functions all have initial maps. - __ movp(prototype, - FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); - // Load the prototype from the initial map. - __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset)); -} - - -void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( - MacroAssembler* masm, - int index, - Register prototype, - Label* miss) { +void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( + MacroAssembler* masm, int index, Register prototype, Label* miss) { Isolate* isolate = masm->isolate(); // Get the global function with the given index. Handle<JSFunction> function( @@ -266,65 +218,28 @@ } -void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, - Register receiver, - Register scratch, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss_label); - - // Check that the object is a JS array. - __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch); - __ j(not_equal, miss_label); - - // Load length directly from the JS array. - __ movp(rax, FieldOperand(receiver, JSArray::kLengthOffset)); - __ ret(0); -} - - -void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, - Register receiver, - Register result, - Register scratch, - Label* miss_label) { +void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype( + MacroAssembler* masm, Register receiver, Register result, Register scratch, + Label* miss_label) { __ TryGetFunctionPrototype(receiver, result, miss_label); if (!result.is(rax)) __ movp(rax, result); __ ret(0); } -void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, - Register dst, - Register src, - bool inobject, - int index, - Representation representation) { - ASSERT(!representation.IsDouble()); - int offset = index * kPointerSize; - if (!inobject) { - // Calculate the offset into the properties array. - offset = offset + FixedArray::kHeaderSize; - __ movp(dst, FieldOperand(src, JSObject::kPropertiesOffset)); - src = dst; - } - __ movp(dst, FieldOperand(src, offset)); -} - - static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, Register holder, Register name, Handle<JSObject> holder_obj) { - STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0); - STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1); - STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2); - STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3); - STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4); __ Push(name); Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); - ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor)); + DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor)); __ Move(kScratchRegister, interceptor); __ Push(kScratchRegister); __ Push(receiver); @@ -340,22 +255,17 @@ Handle<JSObject> holder_obj, IC::UtilityId id) { PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallExternalReference( - ExternalReference(IC_Utility(id), masm->isolate()), - StubCache::kInterceptorArgsLength); + __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()), + NamedLoadHandlerCompiler::kInterceptorArgsLength); } // Generate call to api function. -void StubCompiler::GenerateFastApiCall(MacroAssembler* masm, - const CallOptimization& optimization, - Handle<Map> receiver_map, - Register receiver, - Register scratch_in, - bool is_store, - int argc, - Register* values) { - ASSERT(optimization.is_simple_api_call()); +void PropertyHandlerCompiler::GenerateFastApiCall( + MacroAssembler* masm, const CallOptimization& optimization, + Handle<Map> receiver_map, Register receiver, Register scratch_in, + bool is_store, int argc, Register* values) { + DCHECK(optimization.is_simple_api_call()); __ PopReturnAddressTo(scratch_in); // receiver @@ -363,8 +273,8 @@ // Write the arguments to stack frame. for (int i = 0; i < argc; i++) { Register arg = values[argc-1-i]; - ASSERT(!receiver.is(arg)); - ASSERT(!scratch_in.is(arg)); + DCHECK(!receiver.is(arg)); + DCHECK(!scratch_in.is(arg)); __ Push(arg); } __ PushReturnAddressFrom(scratch_in); @@ -420,29 +330,17 @@ api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE); // Jump to stub. - CallApiFunctionStub stub(is_store, call_data_undefined, argc); + CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc); __ TailCallStub(&stub); } -void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, - Label* label, - Handle<Name> name) { - if (!label->is_unused()) { - __ bind(label); - __ Move(this->name(), name); - } -} - - -void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm, - Handle<JSGlobalObject> global, - Handle<Name> name, - Register scratch, - Label* miss) { +void PropertyHandlerCompiler::GenerateCheckPropertyCell( + MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name, + Register scratch, Label* miss) { Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name); - ASSERT(cell->value()->IsTheHole()); + DCHECK(cell->value()->IsTheHole()); __ Move(scratch, cell); __ Cmp(FieldOperand(scratch, Cell::kValueOffset), masm->isolate()->factory()->the_hole_value()); @@ -450,54 +348,63 @@ } -void StoreStubCompiler::GenerateNegativeHolderLookup( - MacroAssembler* masm, - Handle<JSObject> holder, - Register holder_reg, - Handle<Name> name, - Label* miss) { - if (holder->IsJSGlobalObject()) { - GenerateCheckPropertyCell( - masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss); - } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { - GenerateDictionaryNegativeLookup( - masm, miss, holder_reg, name, scratch1(), scratch2()); +void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm, + Handle<Code> code) { + __ jmp(code, RelocInfo::CODE_TARGET); +} + + +#undef __ +#define __ ACCESS_MASM((masm())) + + +void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ bind(label); + __ Move(this->name(), name); } } // Receiver_reg is preserved on jumps to miss_label, but may be destroyed if // store is successful. -void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register storage_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register unused, - Label* miss_label, - Label* slow) { +void NamedStoreHandlerCompiler::GenerateStoreTransition( + Handle<Map> transition, Handle<Name> name, Register receiver_reg, + Register storage_reg, Register value_reg, Register scratch1, + Register scratch2, Register unused, Label* miss_label, Label* slow) { int descriptor = transition->LastAdded(); DescriptorArray* descriptors = transition->instance_descriptors(); PropertyDetails details = descriptors->GetDetails(descriptor); Representation representation = details.representation(); - ASSERT(!representation.IsNone()); + DCHECK(!representation.IsNone()); if (details.type() == CONSTANT) { - Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); + Handle<Object> constant(descriptors->GetValue(descriptor), isolate()); __ Cmp(value_reg, constant); __ j(not_equal, miss_label); } else if (representation.IsSmi()) { __ JumpIfNotSmi(value_reg, miss_label); } else if (representation.IsHeapObject()) { __ JumpIfSmi(value_reg, miss_label); + HeapType* field_type = descriptors->GetFieldType(descriptor); + HeapType::Iterator<Map> it = field_type->Classes(); + if (!it.Done()) { + Label do_store; + while (true) { + __ CompareMap(value_reg, it.Current()); + it.Advance(); + if (it.Done()) { + __ j(not_equal, miss_label); + break; + } + __ j(equal, &do_store, Label::kNear); + } + __ bind(&do_store); + } } else if (representation.IsDouble()) { Label do_store, heap_number; - __ AllocateHeapNumber(storage_reg, scratch1, slow); + __ AllocateHeapNumber(storage_reg, scratch1, slow, MUTABLE); __ JumpIfNotSmi(value_reg, &heap_number); __ SmiToInteger32(scratch1, value_reg); @@ -505,21 +412,20 @@ __ jmp(&do_store); __ bind(&heap_number); - __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(), - miss_label, DONT_DO_SMI_CHECK); + __ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label, + DONT_DO_SMI_CHECK); __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset)); __ bind(&do_store); __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0); } - // Stub never generated for non-global objects that require access - // checks. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); + // Stub never generated for objects that require access checks. + DCHECK(!transition->is_access_check_needed()); // Perform map transition for the receiver if necessary. if (details.type() == FIELD && - object->map()->unused_property_fields() == 0) { + Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) { // The properties must be extended before we can store the value. // We jump to a runtime call that extends the properties array. __ PopReturnAddressTo(scratch1); @@ -529,9 +435,8 @@ __ PushReturnAddressFrom(scratch1); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), - masm->isolate()), - 3, - 1); + isolate()), + 3, 1); return; } @@ -549,7 +454,7 @@ OMIT_SMI_CHECK); if (details.type() == CONSTANT) { - ASSERT(value_reg.is(rax)); + DCHECK(value_reg.is(rax)); __ ret(0); return; } @@ -560,14 +465,14 @@ // Adjust for the number of properties stored in the object. Even in the // face of a transition we can use the old map here because the size of the // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); + index -= transition->inobject_properties(); // TODO(verwaest): Share this code as a code stub. SmiCheck smi_check = representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; if (index < 0) { // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); + int offset = transition->instance_size() + (index * kPointerSize); if (representation.IsDouble()) { __ movp(FieldOperand(receiver_reg, offset), storage_reg); } else { @@ -606,132 +511,44 @@ } // Return the value (register rax). - ASSERT(value_reg.is(rax)); + DCHECK(value_reg.is(rax)); __ ret(0); } -// Both name_reg and receiver_reg are preserved on jumps to miss_label, -// but may be destroyed if store is successful. -void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label) { - // Stub never generated for non-global objects that require access - // checks. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - - int index = lookup->GetFieldIndex().field_index(); - - // Adjust for the number of properties stored in the object. Even in the - // face of a transition we can use the old map here because the size of the - // object and the number of in-object properties is not going to change. - index -= object->map()->inobject_properties(); - - Representation representation = lookup->representation(); - ASSERT(!representation.IsNone()); - if (representation.IsSmi()) { - __ JumpIfNotSmi(value_reg, miss_label); - } else if (representation.IsHeapObject()) { - __ JumpIfSmi(value_reg, miss_label); - } else if (representation.IsDouble()) { - // Load the double storage. - if (index < 0) { - int offset = object->map()->instance_size() + (index * kPointerSize); - __ movp(scratch1, FieldOperand(receiver_reg, offset)); - } else { - __ movp(scratch1, - FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); - int offset = index * kPointerSize + FixedArray::kHeaderSize; - __ movp(scratch1, FieldOperand(scratch1, offset)); - } - - // Store the value into the storage. - Label do_store, heap_number; - __ JumpIfNotSmi(value_reg, &heap_number); - __ SmiToInteger32(scratch2, value_reg); - __ Cvtlsi2sd(xmm0, scratch2); - __ jmp(&do_store); - - __ bind(&heap_number); - __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(), - miss_label, DONT_DO_SMI_CHECK); - __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset)); - __ bind(&do_store); - __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0); - // Return the value (register rax). - ASSERT(value_reg.is(rax)); - __ ret(0); - return; - } - - // TODO(verwaest): Share this code as a code stub. - SmiCheck smi_check = representation.IsTagged() - ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; - if (index < 0) { - // Set the property straight into the object. - int offset = object->map()->instance_size() + (index * kPointerSize); - __ movp(FieldOperand(receiver_reg, offset), value_reg); - - if (!representation.IsSmi()) { - // Update the write barrier for the array address. - // Pass the value being stored in the now unused name_reg. - __ movp(name_reg, value_reg); - __ RecordWriteField( - receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, smi_check); - } - } else { - // Write to the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; - // Get the properties array (optimistically). - __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ movp(FieldOperand(scratch1, offset), value_reg); - - if (!representation.IsSmi()) { - // Update the write barrier for the array address. - // Pass the value being stored in the now unused name_reg. - __ movp(name_reg, value_reg); - __ RecordWriteField( - scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, smi_check); +void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup, + Register value_reg, + Label* miss_label) { + DCHECK(lookup->representation().IsHeapObject()); + __ JumpIfSmi(value_reg, miss_label); + HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes(); + Label do_store; + while (true) { + __ CompareMap(value_reg, it.Current()); + it.Advance(); + if (it.Done()) { + __ j(not_equal, miss_label); + break; } + __ j(equal, &do_store, Label::kNear); } + __ bind(&do_store); - // Return the value (register rax). - ASSERT(value_reg.is(rax)); - __ ret(0); + StoreFieldStub stub(isolate(), lookup->GetFieldIndex(), + lookup->representation()); + GenerateTailCall(masm(), stub.GetCode()); } -void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { - __ jmp(code, RelocInfo::CODE_TARGET); -} - - -#undef __ -#define __ ACCESS_MASM((masm())) - - -Register StubCompiler::CheckPrototypes(Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Register holder_reg, - Register scratch1, - Register scratch2, - Handle<Name> name, - Label* miss, - PrototypeCheckType check) { - Handle<Map> receiver_map(IC::TypeToMap(*type, isolate())); +Register PropertyHandlerCompiler::CheckPrototypes( + Register object_reg, Register holder_reg, Register scratch1, + Register scratch2, Handle<Name> name, Label* miss, + PrototypeCheckType check) { + Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate())); // Make sure there's no overlap between holder and object registers. - ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); - ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) + DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); + DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) && !scratch2.is(scratch1)); // Keep track of the current object in register reg. On the first @@ -741,10 +558,12 @@ int depth = 0; Handle<JSObject> current = Handle<JSObject>::null(); - if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant()); + if (type()->IsConstant()) { + current = Handle<JSObject>::cast(type()->AsConstant()->Value()); + } Handle<JSObject> prototype = Handle<JSObject>::null(); Handle<Map> current_map = receiver_map; - Handle<Map> holder_map(holder->map()); + Handle<Map> holder_map(holder()->map()); // Traverse the prototype chain and check the maps in the prototype chain for // fast and global objects or do negative lookup for normal objects. while (!current_map.is_identical_to(holder_map)) { @@ -752,19 +571,19 @@ // Only global objects and objects that do not require access // checks are allowed in stubs. - ASSERT(current_map->IsJSGlobalProxyMap() || + DCHECK(current_map->IsJSGlobalProxyMap() || !current_map->is_access_check_needed()); prototype = handle(JSObject::cast(current_map->prototype())); if (current_map->is_dictionary_map() && - !current_map->IsJSGlobalObjectMap() && - !current_map->IsJSGlobalProxyMap()) { + !current_map->IsJSGlobalObjectMap()) { + DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast. if (!name->IsUniqueName()) { - ASSERT(name->IsString()); + DCHECK(name->IsString()); name = factory()->InternalizeString(Handle<String>::cast(name)); } - ASSERT(current.is_null() || - current->property_dictionary()->FindEntry(*name) == + DCHECK(current.is_null() || + current->property_dictionary()->FindEntry(name) == NameDictionary::kNotFound); GenerateDictionaryNegativeLookup(masm(), miss, reg, name, @@ -775,7 +594,12 @@ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset)); } else { bool in_new_space = heap()->InNewSpace(*prototype); - if (in_new_space) { + // Two possible reasons for loading the prototype from the map: + // (1) Can't store references to new space in code. + // (2) Handler is shared for all receivers with the same prototype + // map (but not necessarily the same prototype instance). + bool load_prototype_from_map = in_new_space || depth == 1; + if (load_prototype_from_map) { // Save the map in scratch1 for later. __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); } @@ -786,6 +610,9 @@ // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global // object. + // This allows us to install generated handlers for accesses to the + // global proxy (as opposed to using slow ICs). See corresponding code + // in LookupForRead(). if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch2, miss); } else if (current_map->IsJSGlobalObjectMap()) { @@ -795,12 +622,9 @@ } reg = holder_reg; // From now on the object will be in holder_reg. - if (in_new_space) { - // The prototype is in new space; we cannot store a reference to it - // in the code. Load it from the map. + if (load_prototype_from_map) { __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset)); } else { - // The prototype is in old space; load it directly. __ Move(reg, prototype); } } @@ -819,7 +643,7 @@ } // Perform security check for access to the global object. - ASSERT(current_map->IsJSGlobalProxyMap() || + DCHECK(current_map->IsJSGlobalProxyMap() || !current_map->is_access_check_needed()); if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch1, miss); @@ -830,7 +654,7 @@ } -void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { +void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { if (!miss->is_unused()) { Label success; __ jmp(&success); @@ -841,91 +665,21 @@ } -void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { +void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { if (!miss->is_unused()) { Label success; __ jmp(&success); - GenerateRestoreName(masm(), miss, name); + GenerateRestoreName(miss, name); TailCallBuiltin(masm(), MissBuiltin(kind())); __ bind(&success); } } -Register LoadStubCompiler::CallbackHandlerFrontend( - Handle<HeapType> type, - Register object_reg, - Handle<JSObject> holder, - Handle<Name> name, - Handle<Object> callback) { - Label miss; - - Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); - - if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { - ASSERT(!reg.is(scratch2())); - ASSERT(!reg.is(scratch3())); - ASSERT(!reg.is(scratch4())); - - // Load the properties dictionary. - Register dictionary = scratch4(); - __ movp(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset)); - - // Probe the dictionary. - Label probe_done; - NameDictionaryLookupStub::GeneratePositiveLookup(masm(), - &miss, - &probe_done, - dictionary, - this->name(), - scratch2(), - scratch3()); - __ bind(&probe_done); - - // If probing finds an entry in the dictionary, scratch3 contains the - // index into the dictionary. Check that the value is the callback. - Register index = scratch3(); - const int kElementsStartOffset = - NameDictionary::kHeaderSize + - NameDictionary::kElementsStartIndex * kPointerSize; - const int kValueOffset = kElementsStartOffset + kPointerSize; - __ movp(scratch2(), - Operand(dictionary, index, times_pointer_size, - kValueOffset - kHeapObjectTag)); - __ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT); - __ cmpp(scratch2(), scratch3()); - __ j(not_equal, &miss); - } - - HandlerFrontendFooter(name, &miss); - return reg; -} - - -void LoadStubCompiler::GenerateLoadField(Register reg, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation) { - if (!reg.is(receiver())) __ movp(receiver(), reg); - if (kind() == Code::LOAD_IC) { - LoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); - } else { - KeyedLoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); - } -} - - -void LoadStubCompiler::GenerateLoadCallback( - Register reg, - Handle<ExecutableAccessorInfo> callback) { +void NamedLoadHandlerCompiler::GenerateLoadCallback( + Register reg, Handle<ExecutableAccessorInfo> callback) { // Insert additional parameters into the stack frame above return address. - ASSERT(!scratch4().is(reg)); + DCHECK(!scratch4().is(reg)); __ PopReturnAddressTo(scratch4()); STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); @@ -937,14 +691,14 @@ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); __ Push(receiver()); // receiver if (heap()->InNewSpace(callback->data())) { - ASSERT(!scratch2().is(reg)); + DCHECK(!scratch2().is(reg)); __ Move(scratch2(), callback); __ Push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset)); // data } else { __ Push(Handle<Object>(callback->data(), isolate())); } - ASSERT(!kScratchRegister.is(reg)); + DCHECK(!kScratchRegister.is(reg)); __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); __ Push(kScratchRegister); // return value __ Push(kScratchRegister); // return value default @@ -961,26 +715,23 @@ Address getter_address = v8::ToCData<Address>(callback->getter()); __ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE); - CallApiGetterStub stub; + CallApiGetterStub stub(isolate()); __ TailCallStub(&stub); } -void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { +void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) { // Return the constant value. __ Move(rax, value); __ ret(0); } -void LoadStubCompiler::GenerateLoadInterceptor( - Register holder_reg, - Handle<Object> object, - Handle<JSObject> interceptor_holder, - LookupResult* lookup, - Handle<Name> name) { - ASSERT(interceptor_holder->HasNamedInterceptor()); - ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); +void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg, + LookupResult* lookup, + Handle<Name> name) { + DCHECK(holder()->HasNamedInterceptor()); + DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined()); // So far the most popular follow ups for interceptor loads are FIELD // and CALLBACKS, so inline only them, other cases may be added @@ -991,10 +742,12 @@ compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { - ExecutableAccessorInfo* callback = - ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); - compile_followup_inline = callback->getter() != NULL && - callback->IsCompatibleReceiver(*object); + Handle<ExecutableAccessorInfo> callback( + ExecutableAccessorInfo::cast(lookup->GetCallbackObject())); + compile_followup_inline = + callback->getter() != NULL && + ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback, + type()); } } @@ -1002,13 +755,13 @@ // Compile the interceptor call, followed by inline code to load the // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. - ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); + DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1())); // Preserve the receiver register explicitly whenever it is different from // the holder and it is needed should the interceptor return without any // result. The CALLBACKS case needs the receiver to be passed into C++ code, // the FIELD case might cause a miss during the prototype check. - bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); + bool must_perfrom_prototype_check = *holder() != lookup->holder(); bool must_preserve_receiver_reg = !receiver().is(holder_reg) && (lookup->type() == CALLBACKS || must_perfrom_prototype_check); @@ -1027,7 +780,7 @@ // interceptor's holder has been compiled before (see a caller // of this method.) CompileCallLoadPropertyWithInterceptor( - masm(), receiver(), holder_reg, this->name(), interceptor_holder, + masm(), receiver(), holder_reg, this->name(), holder(), IC::kLoadPropertyWithInterceptorOnly); // Check if interceptor provided a value for property. If it's @@ -1048,40 +801,27 @@ // Leave the internal frame. } - GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); + GenerateLoadPostInterceptor(holder_reg, name, lookup); } else { // !compile_followup_inline // Call the runtime system to load the interceptor. // Check that the maps haven't changed. __ PopReturnAddressTo(scratch2()); - PushInterceptorArguments(masm(), receiver(), holder_reg, - this->name(), interceptor_holder); + PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), + holder()); __ PushReturnAddressFrom(scratch2()); ExternalReference ref = ExternalReference( - IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate()); - __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1); + IC_Utility(IC::kLoadPropertyWithInterceptor), isolate()); + __ TailCallExternalReference( + ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); } } -void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) { - Label success; - // Check that the object is a boolean. - __ Cmp(object, factory()->true_value()); - __ j(equal, &success); - __ Cmp(object, factory()->false_value()); - __ j(not_equal, miss); - __ bind(&success); -} - - -Handle<Code> StoreStubCompiler::CompileStoreCallback( - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( + Handle<JSObject> object, Handle<Name> name, Handle<ExecutableAccessorInfo> callback) { - Register holder_reg = HandlerFrontend( - IC::CurrentTypeOf(object, isolate()), receiver(), holder, name); + Register holder_reg = Frontend(receiver(), name); __ PopReturnAddressTo(scratch1()); __ Push(receiver()); @@ -1105,10 +845,8 @@ #define __ ACCESS_MASM(masm) -void StoreStubCompiler::GenerateStoreViaSetter( - MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, +void NamedStoreHandlerCompiler::GenerateStoreViaSetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, Handle<JSFunction> setter) { // ----------- S t a t e ------------- // -- rsp[0] : return address @@ -1124,7 +862,7 @@ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { // Swap in the global receiver. __ movp(receiver, - FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); + FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); } __ Push(receiver); __ Push(value()); @@ -1152,8 +890,7 @@ #define __ ACCESS_MASM(masm()) -Handle<Code> StoreStubCompiler::CompileStoreInterceptor( - Handle<JSObject> object, +Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( Handle<Name> name) { __ PopReturnAddressTo(scratch1()); __ Push(receiver()); @@ -1162,8 +899,8 @@ __ PushReturnAddressFrom(scratch1()); // Do tail-call to the runtime system. - ExternalReference store_ic_property = - ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); + ExternalReference store_ic_property = ExternalReference( + IC_Utility(IC::kStorePropertyWithInterceptor), isolate()); __ TailCallExternalReference(store_ic_property, 3, 1); // Return the generated code. @@ -1171,23 +908,8 @@ } -void StoreStubCompiler::GenerateStoreArrayLength() { - // Prepare tail call to StoreIC_ArrayLength. - __ PopReturnAddressTo(scratch1()); - __ Push(receiver()); - __ Push(value()); - __ PushReturnAddressFrom(scratch1()); - - ExternalReference ref = - ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), - masm()->isolate()); - __ TailCallExternalReference(ref, 2, 1); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( - MapHandleList* receiver_maps, - CodeHandleList* handler_stubs, +Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( + MapHandleList* receiver_maps, CodeHandleList* handler_stubs, MapHandleList* transitioned_maps) { Label miss; __ JumpIfSmi(receiver(), &miss, Label::kNear); @@ -1215,67 +937,39 @@ TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetICCode( - kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); + return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); } -Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type, - Handle<JSObject> last, - Handle<Name> name) { - NonexistentHandlerFrontend(type, last, name); - - // Return undefined if maps of the full prototype chain are still the - // same and no global property with this name contains a value. - __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); - __ ret(0); - - // Return the generated code. - return GetCode(kind(), Code::FAST, name); -} - - -Register* LoadStubCompiler::registers() { - // receiver, name, scratch1, scratch2, scratch3, scratch4. - static Register registers[] = { rax, rcx, rdx, rbx, rdi, r8 }; - return registers; -} - - -Register* KeyedLoadStubCompiler::registers() { +Register* PropertyAccessCompiler::load_calling_convention() { // receiver, name, scratch1, scratch2, scratch3, scratch4. - static Register registers[] = { rdx, rax, rbx, rcx, rdi, r8 }; + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + static Register registers[] = { receiver, name, rax, rbx, rdi, r8 }; return registers; } -Register StoreStubCompiler::value() { - return rax; -} - - -Register* StoreStubCompiler::registers() { +Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. - static Register registers[] = { rdx, rcx, rbx, rdi, r8 }; + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register name = KeyedStoreIC::NameRegister(); + DCHECK(rbx.is(KeyedStoreIC::MapRegister())); + static Register registers[] = { receiver, name, rbx, rdi, r8 }; return registers; } -Register* KeyedStoreStubCompiler::registers() { - // receiver, name, scratch1, scratch2, scratch3. - static Register registers[] = { rdx, rcx, rbx, rdi, r8 }; - return registers; -} +Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); } #undef __ #define __ ACCESS_MASM(masm) -void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, - Handle<HeapType> type, - Register receiver, - Handle<JSFunction> getter) { +void NamedLoadHandlerCompiler::GenerateLoadViaGetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, + Handle<JSFunction> getter) { // ----------- S t a t e ------------- // -- rax : receiver // -- rcx : name @@ -1289,7 +983,7 @@ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { // Swap in the global receiver. __ movp(receiver, - FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); + FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); } __ Push(receiver); ParameterCount actual(0); @@ -1313,62 +1007,63 @@ #define __ ACCESS_MASM(masm()) -Handle<Code> LoadStubCompiler::CompileLoadGlobal( - Handle<HeapType> type, - Handle<GlobalObject> global, - Handle<PropertyCell> cell, - Handle<Name> name, - bool is_dont_delete) { +Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal( + Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) { Label miss; - // TODO(verwaest): Directly store to rax. Currently we cannot do this, since - // rax is used as receiver(), which we would otherwise clobber before a - // potential miss. - HandlerFrontendHeader(type, receiver(), global, name, &miss); + FrontendHeader(receiver(), name, &miss); // Get the value from the cell. - __ Move(rbx, cell); - __ movp(rbx, FieldOperand(rbx, PropertyCell::kValueOffset)); + Register result = StoreIC::ValueRegister(); + __ Move(result, cell); + __ movp(result, FieldOperand(result, PropertyCell::kValueOffset)); // Check for deleted property if property can actually be deleted. - if (!is_dont_delete) { - __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex); + if (is_configurable) { + __ CompareRoot(result, Heap::kTheHoleValueRootIndex); __ j(equal, &miss); } else if (FLAG_debug_code) { - __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex); + __ CompareRoot(result, Heap::kTheHoleValueRootIndex); __ Check(not_equal, kDontDeleteCellsCannotContainTheHole); } Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1); - __ movp(rax, rbx); __ ret(0); - HandlerFrontendFooter(name, &miss); + FrontendFooter(name, &miss); // Return the generated code. return GetCode(kind(), Code::NORMAL, name); } -Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( - TypeHandleList* types, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { +Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { Label miss; if (check == PROPERTY && (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - __ Cmp(this->name(), name); - __ j(not_equal, &miss); + // In case we are compiling an IC for dictionary loads and stores, just + // check whether the name is unique. + if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { + __ JumpIfNotUniqueName(this->name(), &miss); + } else { + __ Cmp(this->name(), name); + __ j(not_equal, &miss); + } } Label number_case; Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; __ JumpIfSmi(receiver(), smi_target); + // Polymorphic keyed stores may use the map register Register map_reg = scratch1(); + DCHECK(kind() != Code::KEYED_STORE_IC || + map_reg.is(KeyedStoreIC::MapRegister())); __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); int receiver_count = types->length(); int number_of_handled_maps = 0; @@ -1380,13 +1075,13 @@ // Check map and tail call if there's a match __ Cmp(map_reg, map); if (type->Is(HeapType::Number())) { - ASSERT(!number_case.is_unused()); + DCHECK(!number_case.is_unused()); __ bind(&number_case); } __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET); } } - ASSERT(number_of_handled_maps > 0); + DCHECK(number_of_handled_maps > 0); __ bind(&miss); TailCallBuiltin(masm(), MissBuiltin(kind())); @@ -1394,7 +1089,7 @@ // Return the generated code. InlineCacheState state = number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetICCode(kind(), type, name, state); + return GetCode(kind(), type, name, state); } @@ -1402,33 +1097,35 @@ #define __ ACCESS_MASM(masm) -void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( +void ElementHandlerCompiler::GenerateLoadDictionaryElement( MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- rax : key + // -- rcx : key // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- + DCHECK(rdx.is(LoadIC::ReceiverRegister())); + DCHECK(rcx.is(LoadIC::NameRegister())); Label slow, miss; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - __ JumpIfNotSmi(rax, &miss); - __ SmiToInteger32(rbx, rax); - __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset)); + __ JumpIfNotSmi(rcx, &miss); + __ SmiToInteger32(rbx, rcx); + __ movp(rax, FieldOperand(rdx, JSObject::kElementsOffset)); // Check whether the elements is a number dictionary. // rdx: receiver - // rax: key + // rcx: key // rbx: key as untagged int32 - // rcx: elements - __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax); + // rax: elements + __ LoadFromNumberDictionary(&slow, rax, rcx, rbx, r9, rdi, rax); __ ret(0); __ bind(&slow); // ----------- S t a t e ------------- - // -- rax : key + // -- rcx : key // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- @@ -1436,7 +1133,7 @@ __ bind(&miss); // ----------- S t a t e ------------- - // -- rax : key + // -- rcx : key // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- diff -Nru nodejs-0.11.13/deps/v8/src/x87/assembler-x87.cc nodejs-0.11.15/deps/v8/src/x87/assembler-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/assembler-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/assembler-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2053 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the +// distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been modified +// significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/base/cpu.h" +#include "src/disassembler.h" +#include "src/macro-assembler.h" +#include "src/serialize.h" + +namespace v8 { +namespace internal { + +// ----------------------------------------------------------------------------- +// Implementation of CpuFeatures + +void CpuFeatures::ProbeImpl(bool cross_compile) { + base::CPU cpu; + + // Only use statically determined features for cross compile (snapshot). + if (cross_compile) return; +} + + +void CpuFeatures::PrintTarget() { } +void CpuFeatures::PrintFeatures() { } + + +// ----------------------------------------------------------------------------- +// Implementation of Displacement + +void Displacement::init(Label* L, Type type) { + DCHECK(!L->is_bound()); + int next = 0; + if (L->is_linked()) { + next = L->pos(); + DCHECK(next > 0); // Displacements must be at positions > 0 + } + // Ensure that we _never_ overflow the next field. + DCHECK(NextField::is_valid(Assembler::kMaximalBufferSize)); + data_ = NextField::encode(next) | TypeField::encode(type); +} + + +// ----------------------------------------------------------------------------- +// Implementation of RelocInfo + + +const int RelocInfo::kApplyMask = + RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY | + 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE | + 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE; + + +bool RelocInfo::IsCodedSpecially() { + // The deserializer needs to know whether a pointer is specially coded. Being + // specially coded on IA32 means that it is a relative address, as used by + // branch instructions. These are also the ones that need changing when a + // code object moves. + return (1 << rmode_) & kApplyMask; +} + + +bool RelocInfo::IsInConstantPool() { + return false; +} + + +void RelocInfo::PatchCode(byte* instructions, int instruction_count) { + // Patch the code at the current address with the supplied instructions. + for (int i = 0; i < instruction_count; i++) { + *(pc_ + i) = *(instructions + i); + } + + // Indicate that code has changed. + CpuFeatures::FlushICache(pc_, instruction_count); +} + + +// Patch the code at the current PC with a call to the target address. +// Additional guard int3 instructions can be added if required. +void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { + // Call instruction takes up 5 bytes and int3 takes up one byte. + static const int kCallCodeSize = 5; + int code_size = kCallCodeSize + guard_bytes; + + // Create a code patcher. + CodePatcher patcher(pc_, code_size); + + // Add a label for checking the size of the code used for returning. +#ifdef DEBUG + Label check_codesize; + patcher.masm()->bind(&check_codesize); +#endif + + // Patch the code. + patcher.masm()->call(target, RelocInfo::NONE32); + + // Check that the size of the code generated is as expected. + DCHECK_EQ(kCallCodeSize, + patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize)); + + // Add the requested number of int3 instructions after the call. + DCHECK_GE(guard_bytes, 0); + for (int i = 0; i < guard_bytes; i++) { + patcher.masm()->int3(); + } +} + + +// ----------------------------------------------------------------------------- +// Implementation of Operand + +Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) { + // [base + disp/r] + if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) { + // [base] + set_modrm(0, base); + if (base.is(esp)) set_sib(times_1, esp, base); + } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) { + // [base + disp8] + set_modrm(1, base); + if (base.is(esp)) set_sib(times_1, esp, base); + set_disp8(disp); + } else { + // [base + disp/r] + set_modrm(2, base); + if (base.is(esp)) set_sib(times_1, esp, base); + set_dispr(disp, rmode); + } +} + + +Operand::Operand(Register base, + Register index, + ScaleFactor scale, + int32_t disp, + RelocInfo::Mode rmode) { + DCHECK(!index.is(esp)); // illegal addressing mode + // [base + index*scale + disp/r] + if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) { + // [base + index*scale] + set_modrm(0, esp); + set_sib(scale, index, base); + } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) { + // [base + index*scale + disp8] + set_modrm(1, esp); + set_sib(scale, index, base); + set_disp8(disp); + } else { + // [base + index*scale + disp/r] + set_modrm(2, esp); + set_sib(scale, index, base); + set_dispr(disp, rmode); + } +} + + +Operand::Operand(Register index, + ScaleFactor scale, + int32_t disp, + RelocInfo::Mode rmode) { + DCHECK(!index.is(esp)); // illegal addressing mode + // [index*scale + disp/r] + set_modrm(0, esp); + set_sib(scale, index, ebp); + set_dispr(disp, rmode); +} + + +bool Operand::is_reg(Register reg) const { + return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only. + && ((buf_[0] & 0x07) == reg.code()); // register codes match. +} + + +bool Operand::is_reg_only() const { + return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only. +} + + +Register Operand::reg() const { + DCHECK(is_reg_only()); + return Register::from_code(buf_[0] & 0x07); +} + + +// ----------------------------------------------------------------------------- +// Implementation of Assembler. + +// Emit a single byte. Must always be inlined. +#define EMIT(x) \ + *pc_++ = (x) + + +#ifdef GENERATED_CODE_COVERAGE +static void InitCoverageLog(); +#endif + +Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) + : AssemblerBase(isolate, buffer, buffer_size), + positions_recorder_(this) { + // Clear the buffer in debug mode unless it was provided by the + // caller in which case we can't be sure it's okay to overwrite + // existing code in it; see CodePatcher::CodePatcher(...). +#ifdef DEBUG + if (own_buffer_) { + memset(buffer_, 0xCC, buffer_size_); // int3 + } +#endif + + reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); + +#ifdef GENERATED_CODE_COVERAGE + InitCoverageLog(); +#endif +} + + +void Assembler::GetCode(CodeDesc* desc) { + // Finalize code (at this point overflow() may be true, but the gap ensures + // that we are still not overlapping instructions and relocation info). + DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. + // Set up code descriptor. + desc->buffer = buffer_; + desc->buffer_size = buffer_size_; + desc->instr_size = pc_offset(); + desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); + desc->origin = this; +} + + +void Assembler::Align(int m) { + DCHECK(IsPowerOf2(m)); + int mask = m - 1; + int addr = pc_offset(); + Nop((m - (addr & mask)) & mask); +} + + +bool Assembler::IsNop(Address addr) { + Address a = addr; + while (*a == 0x66) a++; + if (*a == 0x90) return true; + if (a[0] == 0xf && a[1] == 0x1f) return true; + return false; +} + + +void Assembler::Nop(int bytes) { + EnsureSpace ensure_space(this); + + // Older CPUs that do not support SSE2 may not support multibyte NOP + // instructions. + for (; bytes > 0; bytes--) { + EMIT(0x90); + } + return; +} + + +void Assembler::CodeTargetAlign() { + Align(16); // Preferred alignment of jump targets on ia32. +} + + +void Assembler::cpuid() { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xA2); +} + + +void Assembler::pushad() { + EnsureSpace ensure_space(this); + EMIT(0x60); +} + + +void Assembler::popad() { + EnsureSpace ensure_space(this); + EMIT(0x61); +} + + +void Assembler::pushfd() { + EnsureSpace ensure_space(this); + EMIT(0x9C); +} + + +void Assembler::popfd() { + EnsureSpace ensure_space(this); + EMIT(0x9D); +} + + +void Assembler::push(const Immediate& x) { + EnsureSpace ensure_space(this); + if (x.is_int8()) { + EMIT(0x6a); + EMIT(x.x_); + } else { + EMIT(0x68); + emit(x); + } +} + + +void Assembler::push_imm32(int32_t imm32) { + EnsureSpace ensure_space(this); + EMIT(0x68); + emit(imm32); +} + + +void Assembler::push(Register src) { + EnsureSpace ensure_space(this); + EMIT(0x50 | src.code()); +} + + +void Assembler::push(const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0xFF); + emit_operand(esi, src); +} + + +void Assembler::pop(Register dst) { + DCHECK(reloc_info_writer.last_pc() != NULL); + EnsureSpace ensure_space(this); + EMIT(0x58 | dst.code()); +} + + +void Assembler::pop(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0x8F); + emit_operand(eax, dst); +} + + +void Assembler::enter(const Immediate& size) { + EnsureSpace ensure_space(this); + EMIT(0xC8); + emit_w(size); + EMIT(0); +} + + +void Assembler::leave() { + EnsureSpace ensure_space(this); + EMIT(0xC9); +} + + +void Assembler::mov_b(Register dst, const Operand& src) { + CHECK(dst.is_byte_register()); + EnsureSpace ensure_space(this); + EMIT(0x8A); + emit_operand(dst, src); +} + + +void Assembler::mov_b(const Operand& dst, int8_t imm8) { + EnsureSpace ensure_space(this); + EMIT(0xC6); + emit_operand(eax, dst); + EMIT(imm8); +} + + +void Assembler::mov_b(const Operand& dst, Register src) { + CHECK(src.is_byte_register()); + EnsureSpace ensure_space(this); + EMIT(0x88); + emit_operand(src, dst); +} + + +void Assembler::mov_w(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x66); + EMIT(0x8B); + emit_operand(dst, src); +} + + +void Assembler::mov_w(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x66); + EMIT(0x89); + emit_operand(src, dst); +} + + +void Assembler::mov_w(const Operand& dst, int16_t imm16) { + EnsureSpace ensure_space(this); + EMIT(0x66); + EMIT(0xC7); + emit_operand(eax, dst); + EMIT(static_cast<int8_t>(imm16 & 0xff)); + EMIT(static_cast<int8_t>(imm16 >> 8)); +} + + +void Assembler::mov(Register dst, int32_t imm32) { + EnsureSpace ensure_space(this); + EMIT(0xB8 | dst.code()); + emit(imm32); +} + + +void Assembler::mov(Register dst, const Immediate& x) { + EnsureSpace ensure_space(this); + EMIT(0xB8 | dst.code()); + emit(x); +} + + +void Assembler::mov(Register dst, Handle<Object> handle) { + EnsureSpace ensure_space(this); + EMIT(0xB8 | dst.code()); + emit(handle); +} + + +void Assembler::mov(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x8B); + emit_operand(dst, src); +} + + +void Assembler::mov(Register dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x89); + EMIT(0xC0 | src.code() << 3 | dst.code()); +} + + +void Assembler::mov(const Operand& dst, const Immediate& x) { + EnsureSpace ensure_space(this); + EMIT(0xC7); + emit_operand(eax, dst); + emit(x); +} + + +void Assembler::mov(const Operand& dst, Handle<Object> handle) { + EnsureSpace ensure_space(this); + EMIT(0xC7); + emit_operand(eax, dst); + emit(handle); +} + + +void Assembler::mov(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x89); + emit_operand(src, dst); +} + + +void Assembler::movsx_b(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xBE); + emit_operand(dst, src); +} + + +void Assembler::movsx_w(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xBF); + emit_operand(dst, src); +} + + +void Assembler::movzx_b(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xB6); + emit_operand(dst, src); +} + + +void Assembler::movzx_w(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xB7); + emit_operand(dst, src); +} + + +void Assembler::cld() { + EnsureSpace ensure_space(this); + EMIT(0xFC); +} + + +void Assembler::rep_movs() { + EnsureSpace ensure_space(this); + EMIT(0xF3); + EMIT(0xA5); +} + + +void Assembler::rep_stos() { + EnsureSpace ensure_space(this); + EMIT(0xF3); + EMIT(0xAB); +} + + +void Assembler::stos() { + EnsureSpace ensure_space(this); + EMIT(0xAB); +} + + +void Assembler::xchg(Register dst, Register src) { + EnsureSpace ensure_space(this); + if (src.is(eax) || dst.is(eax)) { // Single-byte encoding. + EMIT(0x90 | (src.is(eax) ? dst.code() : src.code())); + } else { + EMIT(0x87); + EMIT(0xC0 | src.code() << 3 | dst.code()); + } +} + + +void Assembler::xchg(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x87); + emit_operand(dst, src); +} + + +void Assembler::adc(Register dst, int32_t imm32) { + EnsureSpace ensure_space(this); + emit_arith(2, Operand(dst), Immediate(imm32)); +} + + +void Assembler::adc(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x13); + emit_operand(dst, src); +} + + +void Assembler::add(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x03); + emit_operand(dst, src); +} + + +void Assembler::add(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x01); + emit_operand(src, dst); +} + + +void Assembler::add(const Operand& dst, const Immediate& x) { + DCHECK(reloc_info_writer.last_pc() != NULL); + EnsureSpace ensure_space(this); + emit_arith(0, dst, x); +} + + +void Assembler::and_(Register dst, int32_t imm32) { + and_(dst, Immediate(imm32)); +} + + +void Assembler::and_(Register dst, const Immediate& x) { + EnsureSpace ensure_space(this); + emit_arith(4, Operand(dst), x); +} + + +void Assembler::and_(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x23); + emit_operand(dst, src); +} + + +void Assembler::and_(const Operand& dst, const Immediate& x) { + EnsureSpace ensure_space(this); + emit_arith(4, dst, x); +} + + +void Assembler::and_(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x21); + emit_operand(src, dst); +} + + +void Assembler::cmpb(const Operand& op, int8_t imm8) { + EnsureSpace ensure_space(this); + if (op.is_reg(eax)) { + EMIT(0x3C); + } else { + EMIT(0x80); + emit_operand(edi, op); // edi == 7 + } + EMIT(imm8); +} + + +void Assembler::cmpb(const Operand& op, Register reg) { + CHECK(reg.is_byte_register()); + EnsureSpace ensure_space(this); + EMIT(0x38); + emit_operand(reg, op); +} + + +void Assembler::cmpb(Register reg, const Operand& op) { + CHECK(reg.is_byte_register()); + EnsureSpace ensure_space(this); + EMIT(0x3A); + emit_operand(reg, op); +} + + +void Assembler::cmpw(const Operand& op, Immediate imm16) { + DCHECK(imm16.is_int16()); + EnsureSpace ensure_space(this); + EMIT(0x66); + EMIT(0x81); + emit_operand(edi, op); + emit_w(imm16); +} + + +void Assembler::cmp(Register reg, int32_t imm32) { + EnsureSpace ensure_space(this); + emit_arith(7, Operand(reg), Immediate(imm32)); +} + + +void Assembler::cmp(Register reg, Handle<Object> handle) { + EnsureSpace ensure_space(this); + emit_arith(7, Operand(reg), Immediate(handle)); +} + + +void Assembler::cmp(Register reg, const Operand& op) { + EnsureSpace ensure_space(this); + EMIT(0x3B); + emit_operand(reg, op); +} + + +void Assembler::cmp(const Operand& op, const Immediate& imm) { + EnsureSpace ensure_space(this); + emit_arith(7, op, imm); +} + + +void Assembler::cmp(const Operand& op, Handle<Object> handle) { + EnsureSpace ensure_space(this); + emit_arith(7, op, Immediate(handle)); +} + + +void Assembler::cmpb_al(const Operand& op) { + EnsureSpace ensure_space(this); + EMIT(0x38); // CMP r/m8, r8 + emit_operand(eax, op); // eax has same code as register al. +} + + +void Assembler::cmpw_ax(const Operand& op) { + EnsureSpace ensure_space(this); + EMIT(0x66); + EMIT(0x39); // CMP r/m16, r16 + emit_operand(eax, op); // eax has same code as register ax. +} + + +void Assembler::dec_b(Register dst) { + CHECK(dst.is_byte_register()); + EnsureSpace ensure_space(this); + EMIT(0xFE); + EMIT(0xC8 | dst.code()); +} + + +void Assembler::dec_b(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0xFE); + emit_operand(ecx, dst); +} + + +void Assembler::dec(Register dst) { + EnsureSpace ensure_space(this); + EMIT(0x48 | dst.code()); +} + + +void Assembler::dec(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0xFF); + emit_operand(ecx, dst); +} + + +void Assembler::cdq() { + EnsureSpace ensure_space(this); + EMIT(0x99); +} + + +void Assembler::idiv(const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + emit_operand(edi, src); +} + + +void Assembler::div(const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + emit_operand(esi, src); +} + + +void Assembler::imul(Register reg) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + EMIT(0xE8 | reg.code()); +} + + +void Assembler::imul(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xAF); + emit_operand(dst, src); +} + + +void Assembler::imul(Register dst, Register src, int32_t imm32) { + imul(dst, Operand(src), imm32); +} + + +void Assembler::imul(Register dst, const Operand& src, int32_t imm32) { + EnsureSpace ensure_space(this); + if (is_int8(imm32)) { + EMIT(0x6B); + emit_operand(dst, src); + EMIT(imm32); + } else { + EMIT(0x69); + emit_operand(dst, src); + emit(imm32); + } +} + + +void Assembler::inc(Register dst) { + EnsureSpace ensure_space(this); + EMIT(0x40 | dst.code()); +} + + +void Assembler::inc(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0xFF); + emit_operand(eax, dst); +} + + +void Assembler::lea(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x8D); + emit_operand(dst, src); +} + + +void Assembler::mul(Register src) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + EMIT(0xE0 | src.code()); +} + + +void Assembler::neg(Register dst) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + EMIT(0xD8 | dst.code()); +} + + +void Assembler::neg(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + emit_operand(ebx, dst); +} + + +void Assembler::not_(Register dst) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + EMIT(0xD0 | dst.code()); +} + + +void Assembler::not_(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0xF7); + emit_operand(edx, dst); +} + + +void Assembler::or_(Register dst, int32_t imm32) { + EnsureSpace ensure_space(this); + emit_arith(1, Operand(dst), Immediate(imm32)); +} + + +void Assembler::or_(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x0B); + emit_operand(dst, src); +} + + +void Assembler::or_(const Operand& dst, const Immediate& x) { + EnsureSpace ensure_space(this); + emit_arith(1, dst, x); +} + + +void Assembler::or_(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x09); + emit_operand(src, dst); +} + + +void Assembler::rcl(Register dst, uint8_t imm8) { + EnsureSpace ensure_space(this); + DCHECK(is_uint5(imm8)); // illegal shift count + if (imm8 == 1) { + EMIT(0xD1); + EMIT(0xD0 | dst.code()); + } else { + EMIT(0xC1); + EMIT(0xD0 | dst.code()); + EMIT(imm8); + } +} + + +void Assembler::rcr(Register dst, uint8_t imm8) { + EnsureSpace ensure_space(this); + DCHECK(is_uint5(imm8)); // illegal shift count + if (imm8 == 1) { + EMIT(0xD1); + EMIT(0xD8 | dst.code()); + } else { + EMIT(0xC1); + EMIT(0xD8 | dst.code()); + EMIT(imm8); + } +} + + +void Assembler::ror(Register dst, uint8_t imm8) { + EnsureSpace ensure_space(this); + DCHECK(is_uint5(imm8)); // illegal shift count + if (imm8 == 1) { + EMIT(0xD1); + EMIT(0xC8 | dst.code()); + } else { + EMIT(0xC1); + EMIT(0xC8 | dst.code()); + EMIT(imm8); + } +} + + +void Assembler::ror_cl(Register dst) { + EnsureSpace ensure_space(this); + EMIT(0xD3); + EMIT(0xC8 | dst.code()); +} + + +void Assembler::sar(const Operand& dst, uint8_t imm8) { + EnsureSpace ensure_space(this); + DCHECK(is_uint5(imm8)); // illegal shift count + if (imm8 == 1) { + EMIT(0xD1); + emit_operand(edi, dst); + } else { + EMIT(0xC1); + emit_operand(edi, dst); + EMIT(imm8); + } +} + + +void Assembler::sar_cl(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0xD3); + emit_operand(edi, dst); +} + + +void Assembler::sbb(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x1B); + emit_operand(dst, src); +} + + +void Assembler::shld(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xA5); + emit_operand(dst, src); +} + + +void Assembler::shl(const Operand& dst, uint8_t imm8) { + EnsureSpace ensure_space(this); + DCHECK(is_uint5(imm8)); // illegal shift count + if (imm8 == 1) { + EMIT(0xD1); + emit_operand(esp, dst); + } else { + EMIT(0xC1); + emit_operand(esp, dst); + EMIT(imm8); + } +} + + +void Assembler::shl_cl(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0xD3); + emit_operand(esp, dst); +} + + +void Assembler::shrd(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xAD); + emit_operand(dst, src); +} + + +void Assembler::shr(const Operand& dst, uint8_t imm8) { + EnsureSpace ensure_space(this); + DCHECK(is_uint5(imm8)); // illegal shift count + if (imm8 == 1) { + EMIT(0xD1); + emit_operand(ebp, dst); + } else { + EMIT(0xC1); + emit_operand(ebp, dst); + EMIT(imm8); + } +} + + +void Assembler::shr_cl(const Operand& dst) { + EnsureSpace ensure_space(this); + EMIT(0xD3); + emit_operand(ebp, dst); +} + + +void Assembler::sub(const Operand& dst, const Immediate& x) { + EnsureSpace ensure_space(this); + emit_arith(5, dst, x); +} + + +void Assembler::sub(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x2B); + emit_operand(dst, src); +} + + +void Assembler::sub(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x29); + emit_operand(src, dst); +} + + +void Assembler::test(Register reg, const Immediate& imm) { + if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) { + test_b(reg, imm.x_); + return; + } + + EnsureSpace ensure_space(this); + // This is not using emit_arith because test doesn't support + // sign-extension of 8-bit operands. + if (reg.is(eax)) { + EMIT(0xA9); + } else { + EMIT(0xF7); + EMIT(0xC0 | reg.code()); + } + emit(imm); +} + + +void Assembler::test(Register reg, const Operand& op) { + EnsureSpace ensure_space(this); + EMIT(0x85); + emit_operand(reg, op); +} + + +void Assembler::test_b(Register reg, const Operand& op) { + CHECK(reg.is_byte_register()); + EnsureSpace ensure_space(this); + EMIT(0x84); + emit_operand(reg, op); +} + + +void Assembler::test(const Operand& op, const Immediate& imm) { + if (op.is_reg_only()) { + test(op.reg(), imm); + return; + } + if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) { + return test_b(op, imm.x_); + } + EnsureSpace ensure_space(this); + EMIT(0xF7); + emit_operand(eax, op); + emit(imm); +} + + +void Assembler::test_b(Register reg, uint8_t imm8) { + EnsureSpace ensure_space(this); + // Only use test against byte for registers that have a byte + // variant: eax, ebx, ecx, and edx. + if (reg.is(eax)) { + EMIT(0xA8); + EMIT(imm8); + } else if (reg.is_byte_register()) { + emit_arith_b(0xF6, 0xC0, reg, imm8); + } else { + EMIT(0xF7); + EMIT(0xC0 | reg.code()); + emit(imm8); + } +} + + +void Assembler::test_b(const Operand& op, uint8_t imm8) { + if (op.is_reg_only()) { + test_b(op.reg(), imm8); + return; + } + EnsureSpace ensure_space(this); + EMIT(0xF6); + emit_operand(eax, op); + EMIT(imm8); +} + + +void Assembler::xor_(Register dst, int32_t imm32) { + EnsureSpace ensure_space(this); + emit_arith(6, Operand(dst), Immediate(imm32)); +} + + +void Assembler::xor_(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x33); + emit_operand(dst, src); +} + + +void Assembler::xor_(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x31); + emit_operand(src, dst); +} + + +void Assembler::xor_(const Operand& dst, const Immediate& x) { + EnsureSpace ensure_space(this); + emit_arith(6, dst, x); +} + + +void Assembler::bt(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xA3); + emit_operand(src, dst); +} + + +void Assembler::bts(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xAB); + emit_operand(src, dst); +} + + +void Assembler::bsr(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xBD); + emit_operand(dst, src); +} + + +void Assembler::hlt() { + EnsureSpace ensure_space(this); + EMIT(0xF4); +} + + +void Assembler::int3() { + EnsureSpace ensure_space(this); + EMIT(0xCC); +} + + +void Assembler::nop() { + EnsureSpace ensure_space(this); + EMIT(0x90); +} + + +void Assembler::ret(int imm16) { + EnsureSpace ensure_space(this); + DCHECK(is_uint16(imm16)); + if (imm16 == 0) { + EMIT(0xC3); + } else { + EMIT(0xC2); + EMIT(imm16 & 0xFF); + EMIT((imm16 >> 8) & 0xFF); + } +} + + +// Labels refer to positions in the (to be) generated code. +// There are bound, linked, and unused labels. +// +// Bound labels refer to known positions in the already +// generated code. pos() is the position the label refers to. +// +// Linked labels refer to unknown positions in the code +// to be generated; pos() is the position of the 32bit +// Displacement of the last instruction using the label. + + +void Assembler::print(Label* L) { + if (L->is_unused()) { + PrintF("unused label\n"); + } else if (L->is_bound()) { + PrintF("bound label to %d\n", L->pos()); + } else if (L->is_linked()) { + Label l = *L; + PrintF("unbound label"); + while (l.is_linked()) { + Displacement disp = disp_at(&l); + PrintF("@ %d ", l.pos()); + disp.print(); + PrintF("\n"); + disp.next(&l); + } + } else { + PrintF("label in inconsistent state (pos = %d)\n", L->pos_); + } +} + + +void Assembler::bind_to(Label* L, int pos) { + EnsureSpace ensure_space(this); + DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position + while (L->is_linked()) { + Displacement disp = disp_at(L); + int fixup_pos = L->pos(); + if (disp.type() == Displacement::CODE_RELATIVE) { + // Relative to Code* heap object pointer. + long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag); + } else { + if (disp.type() == Displacement::UNCONDITIONAL_JUMP) { + DCHECK(byte_at(fixup_pos - 1) == 0xE9); // jmp expected + } + // Relative address, relative to point after address. + int imm32 = pos - (fixup_pos + sizeof(int32_t)); + long_at_put(fixup_pos, imm32); + } + disp.next(L); + } + while (L->is_near_linked()) { + int fixup_pos = L->near_link_pos(); + int offset_to_next = + static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos))); + DCHECK(offset_to_next <= 0); + // Relative address, relative to point after address. + int disp = pos - fixup_pos - sizeof(int8_t); + CHECK(0 <= disp && disp <= 127); + set_byte_at(fixup_pos, disp); + if (offset_to_next < 0) { + L->link_to(fixup_pos + offset_to_next, Label::kNear); + } else { + L->UnuseNear(); + } + } + L->bind_to(pos); +} + + +void Assembler::bind(Label* L) { + EnsureSpace ensure_space(this); + DCHECK(!L->is_bound()); // label can only be bound once + bind_to(L, pc_offset()); +} + + +void Assembler::call(Label* L) { + positions_recorder()->WriteRecordedPositions(); + EnsureSpace ensure_space(this); + if (L->is_bound()) { + const int long_size = 5; + int offs = L->pos() - pc_offset(); + DCHECK(offs <= 0); + // 1110 1000 #32-bit disp. + EMIT(0xE8); + emit(offs - long_size); + } else { + // 1110 1000 #32-bit disp. + EMIT(0xE8); + emit_disp(L, Displacement::OTHER); + } +} + + +void Assembler::call(byte* entry, RelocInfo::Mode rmode) { + positions_recorder()->WriteRecordedPositions(); + EnsureSpace ensure_space(this); + DCHECK(!RelocInfo::IsCodeTarget(rmode)); + EMIT(0xE8); + if (RelocInfo::IsRuntimeEntry(rmode)) { + emit(reinterpret_cast<uint32_t>(entry), rmode); + } else { + emit(entry - (pc_ + sizeof(int32_t)), rmode); + } +} + + +int Assembler::CallSize(const Operand& adr) { + // Call size is 1 (opcode) + adr.len_ (operand). + return 1 + adr.len_; +} + + +void Assembler::call(const Operand& adr) { + positions_recorder()->WriteRecordedPositions(); + EnsureSpace ensure_space(this); + EMIT(0xFF); + emit_operand(edx, adr); +} + + +int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) { + return 1 /* EMIT */ + sizeof(uint32_t) /* emit */; +} + + +void Assembler::call(Handle<Code> code, + RelocInfo::Mode rmode, + TypeFeedbackId ast_id) { + positions_recorder()->WriteRecordedPositions(); + EnsureSpace ensure_space(this); + DCHECK(RelocInfo::IsCodeTarget(rmode) + || rmode == RelocInfo::CODE_AGE_SEQUENCE); + EMIT(0xE8); + emit(code, rmode, ast_id); +} + + +void Assembler::jmp(Label* L, Label::Distance distance) { + EnsureSpace ensure_space(this); + if (L->is_bound()) { + const int short_size = 2; + const int long_size = 5; + int offs = L->pos() - pc_offset(); + DCHECK(offs <= 0); + if (is_int8(offs - short_size)) { + // 1110 1011 #8-bit disp. + EMIT(0xEB); + EMIT((offs - short_size) & 0xFF); + } else { + // 1110 1001 #32-bit disp. + EMIT(0xE9); + emit(offs - long_size); + } + } else if (distance == Label::kNear) { + EMIT(0xEB); + emit_near_disp(L); + } else { + // 1110 1001 #32-bit disp. + EMIT(0xE9); + emit_disp(L, Displacement::UNCONDITIONAL_JUMP); + } +} + + +void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) { + EnsureSpace ensure_space(this); + DCHECK(!RelocInfo::IsCodeTarget(rmode)); + EMIT(0xE9); + if (RelocInfo::IsRuntimeEntry(rmode)) { + emit(reinterpret_cast<uint32_t>(entry), rmode); + } else { + emit(entry - (pc_ + sizeof(int32_t)), rmode); + } +} + + +void Assembler::jmp(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xFF); + emit_operand(esp, adr); +} + + +void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) { + EnsureSpace ensure_space(this); + DCHECK(RelocInfo::IsCodeTarget(rmode)); + EMIT(0xE9); + emit(code, rmode); +} + + +void Assembler::j(Condition cc, Label* L, Label::Distance distance) { + EnsureSpace ensure_space(this); + DCHECK(0 <= cc && static_cast<int>(cc) < 16); + if (L->is_bound()) { + const int short_size = 2; + const int long_size = 6; + int offs = L->pos() - pc_offset(); + DCHECK(offs <= 0); + if (is_int8(offs - short_size)) { + // 0111 tttn #8-bit disp + EMIT(0x70 | cc); + EMIT((offs - short_size) & 0xFF); + } else { + // 0000 1111 1000 tttn #32-bit disp + EMIT(0x0F); + EMIT(0x80 | cc); + emit(offs - long_size); + } + } else if (distance == Label::kNear) { + EMIT(0x70 | cc); + emit_near_disp(L); + } else { + // 0000 1111 1000 tttn #32-bit disp + // Note: could eliminate cond. jumps to this jump if condition + // is the same however, seems to be rather unlikely case. + EMIT(0x0F); + EMIT(0x80 | cc); + emit_disp(L, Displacement::OTHER); + } +} + + +void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) { + EnsureSpace ensure_space(this); + DCHECK((0 <= cc) && (static_cast<int>(cc) < 16)); + // 0000 1111 1000 tttn #32-bit disp. + EMIT(0x0F); + EMIT(0x80 | cc); + if (RelocInfo::IsRuntimeEntry(rmode)) { + emit(reinterpret_cast<uint32_t>(entry), rmode); + } else { + emit(entry - (pc_ + sizeof(int32_t)), rmode); + } +} + + +void Assembler::j(Condition cc, Handle<Code> code) { + EnsureSpace ensure_space(this); + // 0000 1111 1000 tttn #32-bit disp + EMIT(0x0F); + EMIT(0x80 | cc); + emit(code, RelocInfo::CODE_TARGET); +} + + +// FPU instructions. + +void Assembler::fld(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xD9, 0xC0, i); +} + + +void Assembler::fstp(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDD, 0xD8, i); +} + + +void Assembler::fld1() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xE8); +} + + +void Assembler::fldpi() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xEB); +} + + +void Assembler::fldz() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xEE); +} + + +void Assembler::fldln2() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xED); +} + + +void Assembler::fld_s(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xD9); + emit_operand(eax, adr); +} + + +void Assembler::fld_d(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xDD); + emit_operand(eax, adr); +} + + +void Assembler::fstp_s(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xD9); + emit_operand(ebx, adr); +} + + +void Assembler::fst_s(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xD9); + emit_operand(edx, adr); +} + + +void Assembler::fstp_d(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xDD); + emit_operand(ebx, adr); +} + + +void Assembler::fst_d(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xDD); + emit_operand(edx, adr); +} + + +void Assembler::fild_s(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xDB); + emit_operand(eax, adr); +} + + +void Assembler::fild_d(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xDF); + emit_operand(ebp, adr); +} + + +void Assembler::fistp_s(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xDB); + emit_operand(ebx, adr); +} + + +void Assembler::fisttp_s(const Operand& adr) { + DCHECK(IsEnabled(SSE3)); + EnsureSpace ensure_space(this); + EMIT(0xDB); + emit_operand(ecx, adr); +} + + +void Assembler::fisttp_d(const Operand& adr) { + DCHECK(IsEnabled(SSE3)); + EnsureSpace ensure_space(this); + EMIT(0xDD); + emit_operand(ecx, adr); +} + + +void Assembler::fist_s(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xDB); + emit_operand(edx, adr); +} + + +void Assembler::fistp_d(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xDF); + emit_operand(edi, adr); +} + + +void Assembler::fabs() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xE1); +} + + +void Assembler::fchs() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xE0); +} + + +void Assembler::fcos() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xFF); +} + + +void Assembler::fsin() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xFE); +} + + +void Assembler::fptan() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xF2); +} + + +void Assembler::fyl2x() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xF1); +} + + +void Assembler::f2xm1() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xF0); +} + + +void Assembler::fscale() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xFD); +} + + +void Assembler::fninit() { + EnsureSpace ensure_space(this); + EMIT(0xDB); + EMIT(0xE3); +} + + +void Assembler::fadd(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDC, 0xC0, i); +} + + +void Assembler::fadd_i(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xD8, 0xC0, i); +} + + +void Assembler::fsub(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDC, 0xE8, i); +} + + +void Assembler::fsub_i(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xD8, 0xE0, i); +} + + +void Assembler::fisub_s(const Operand& adr) { + EnsureSpace ensure_space(this); + EMIT(0xDA); + emit_operand(esp, adr); +} + + +void Assembler::fmul_i(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xD8, 0xC8, i); +} + + +void Assembler::fmul(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDC, 0xC8, i); +} + + +void Assembler::fdiv(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDC, 0xF8, i); +} + + +void Assembler::fdiv_i(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xD8, 0xF0, i); +} + + +void Assembler::faddp(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDE, 0xC0, i); +} + + +void Assembler::fsubp(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDE, 0xE8, i); +} + + +void Assembler::fsubrp(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDE, 0xE0, i); +} + + +void Assembler::fmulp(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDE, 0xC8, i); +} + + +void Assembler::fdivp(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDE, 0xF8, i); +} + + +void Assembler::fprem() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xF8); +} + + +void Assembler::fprem1() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xF5); +} + + +void Assembler::fxch(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xD9, 0xC8, i); +} + + +void Assembler::fincstp() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xF7); +} + + +void Assembler::ffree(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDD, 0xC0, i); +} + + +void Assembler::ftst() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xE4); +} + + +void Assembler::fucomp(int i) { + EnsureSpace ensure_space(this); + emit_farith(0xDD, 0xE8, i); +} + + +void Assembler::fucompp() { + EnsureSpace ensure_space(this); + EMIT(0xDA); + EMIT(0xE9); +} + + +void Assembler::fucomi(int i) { + EnsureSpace ensure_space(this); + EMIT(0xDB); + EMIT(0xE8 + i); +} + + +void Assembler::fucomip() { + EnsureSpace ensure_space(this); + EMIT(0xDF); + EMIT(0xE9); +} + + +void Assembler::fcompp() { + EnsureSpace ensure_space(this); + EMIT(0xDE); + EMIT(0xD9); +} + + +void Assembler::fnstsw_ax() { + EnsureSpace ensure_space(this); + EMIT(0xDF); + EMIT(0xE0); +} + + +void Assembler::fwait() { + EnsureSpace ensure_space(this); + EMIT(0x9B); +} + + +void Assembler::frndint() { + EnsureSpace ensure_space(this); + EMIT(0xD9); + EMIT(0xFC); +} + + +void Assembler::fnclex() { + EnsureSpace ensure_space(this); + EMIT(0xDB); + EMIT(0xE2); +} + + +void Assembler::sahf() { + EnsureSpace ensure_space(this); + EMIT(0x9E); +} + + +void Assembler::setcc(Condition cc, Register reg) { + DCHECK(reg.is_byte_register()); + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0x90 | cc); + EMIT(0xC0 | reg.code()); +} + + +void Assembler::Print() { + Disassembler::Decode(isolate(), stdout, buffer_, pc_); +} + + +void Assembler::RecordJSReturn() { + positions_recorder()->WriteRecordedPositions(); + EnsureSpace ensure_space(this); + RecordRelocInfo(RelocInfo::JS_RETURN); +} + + +void Assembler::RecordDebugBreakSlot() { + positions_recorder()->WriteRecordedPositions(); + EnsureSpace ensure_space(this); + RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); +} + + +void Assembler::RecordComment(const char* msg, bool force) { + if (FLAG_code_comments || force) { + EnsureSpace ensure_space(this); + RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); + } +} + + +void Assembler::GrowBuffer() { + DCHECK(buffer_overflow()); + if (!own_buffer_) FATAL("external code buffer is too small"); + + // Compute new buffer size. + CodeDesc desc; // the new buffer + desc.buffer_size = 2 * buffer_size_; + + // Some internal data structures overflow for very large buffers, + // they must ensure that kMaximalBufferSize is not too large. + if ((desc.buffer_size > kMaximalBufferSize) || + (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) { + V8::FatalProcessOutOfMemory("Assembler::GrowBuffer"); + } + + // Set up new buffer. + desc.buffer = NewArray<byte>(desc.buffer_size); + desc.instr_size = pc_offset(); + desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos()); + + // Clear the buffer in debug mode. Use 'int3' instructions to make + // sure to get into problems if we ever run uninitialized code. +#ifdef DEBUG + memset(desc.buffer, 0xCC, desc.buffer_size); +#endif + + // Copy the data. + int pc_delta = desc.buffer - buffer_; + int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); + MemMove(desc.buffer, buffer_, desc.instr_size); + MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(), + desc.reloc_size); + + DeleteArray(buffer_); + buffer_ = desc.buffer; + buffer_size_ = desc.buffer_size; + pc_ += pc_delta; + reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, + reloc_info_writer.last_pc() + pc_delta); + + // Relocate runtime entries. + for (RelocIterator it(desc); !it.done(); it.next()) { + RelocInfo::Mode rmode = it.rinfo()->rmode(); + if (rmode == RelocInfo::INTERNAL_REFERENCE) { + int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc()); + if (*p != 0) { // 0 means uninitialized. + *p += pc_delta; + } + } + } + + DCHECK(!buffer_overflow()); +} + + +void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { + DCHECK(is_uint8(op1) && is_uint8(op2)); // wrong opcode + DCHECK(is_uint8(imm8)); + DCHECK((op1 & 0x01) == 0); // should be 8bit operation + EMIT(op1); + EMIT(op2 | dst.code()); + EMIT(imm8); +} + + +void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) { + DCHECK((0 <= sel) && (sel <= 7)); + Register ireg = { sel }; + if (x.is_int8()) { + EMIT(0x83); // using a sign-extended 8-bit immediate. + emit_operand(ireg, dst); + EMIT(x.x_ & 0xFF); + } else if (dst.is_reg(eax)) { + EMIT((sel << 3) | 0x05); // short form if the destination is eax. + emit(x); + } else { + EMIT(0x81); // using a literal 32-bit immediate. + emit_operand(ireg, dst); + emit(x); + } +} + + +void Assembler::emit_operand(Register reg, const Operand& adr) { + const unsigned length = adr.len_; + DCHECK(length > 0); + + // Emit updated ModRM byte containing the given register. + pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3); + + // Emit the rest of the encoded operand. + for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i]; + pc_ += length; + + // Emit relocation information if necessary. + if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) { + pc_ -= sizeof(int32_t); // pc_ must be *at* disp32 + RecordRelocInfo(adr.rmode_); + pc_ += sizeof(int32_t); + } +} + + +void Assembler::emit_farith(int b1, int b2, int i) { + DCHECK(is_uint8(b1) && is_uint8(b2)); // wrong opcode + DCHECK(0 <= i && i < 8); // illegal stack offset + EMIT(b1); + EMIT(b2 + i); +} + + +void Assembler::db(uint8_t data) { + EnsureSpace ensure_space(this); + EMIT(data); +} + + +void Assembler::dd(uint32_t data) { + EnsureSpace ensure_space(this); + emit(data); +} + + +void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { + DCHECK(!RelocInfo::IsNone(rmode)); + // Don't record external references unless the heap will be serialized. + if (rmode == RelocInfo::EXTERNAL_REFERENCE && + !serializer_enabled() && !emit_debug_code()) { + return; + } + RelocInfo rinfo(pc_, rmode, data, NULL); + reloc_info_writer.Write(&rinfo); +} + + +Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { + // No out-of-line constant pool support. + DCHECK(!FLAG_enable_ool_constant_pool); + return isolate->factory()->empty_constant_pool_array(); +} + + +void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { + // No out-of-line constant pool support. + DCHECK(!FLAG_enable_ool_constant_pool); + return; +} + + +#ifdef GENERATED_CODE_COVERAGE +static FILE* coverage_log = NULL; + + +static void InitCoverageLog() { + char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG"); + if (file_name != NULL) { + coverage_log = fopen(file_name, "aw+"); + } +} + + +void LogGeneratedCodeCoverage(const char* file_line) { + const char* return_address = (&file_line)[-1]; + char* push_insn = const_cast<char*>(return_address - 12); + push_insn[0] = 0xeb; // Relative branch insn. + push_insn[1] = 13; // Skip over coverage insns. + if (coverage_log != NULL) { + fprintf(coverage_log, "%s\n", file_line); + fflush(coverage_log); + } +} + +#endif + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/assembler-x87.h nodejs-0.11.15/deps/v8/src/x87/assembler-x87.h --- nodejs-0.11.13/deps/v8/src/x87/assembler-x87.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/assembler-x87.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1053 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2011 the V8 project authors. All rights reserved. + +// A light-weight IA32 Assembler. + +#ifndef V8_X87_ASSEMBLER_X87_H_ +#define V8_X87_ASSEMBLER_X87_H_ + +#include "src/isolate.h" +#include "src/serialize.h" + +namespace v8 { +namespace internal { + +// CPU Registers. +// +// 1) We would prefer to use an enum, but enum values are assignment- +// compatible with int, which has caused code-generation bugs. +// +// 2) We would prefer to use a class instead of a struct but we don't like +// the register initialization to depend on the particular initialization +// order (which appears to be different on OS X, Linux, and Windows for the +// installed versions of C++ we tried). Using a struct permits C-style +// "initialization". Also, the Register objects cannot be const as this +// forces initialization stubs in MSVC, making us dependent on initialization +// order. +// +// 3) By not using an enum, we are possibly preventing the compiler from +// doing certain constant folds, which may significantly reduce the +// code generated for some assembly instructions (because they boil down +// to a few constants). If this is a problem, we could change the code +// such that we use an enum in optimized mode, and the struct in debug +// mode. This way we get the compile-time error checking in debug mode +// and best performance in optimized code. +// +struct Register { + static const int kMaxNumAllocatableRegisters = 6; + static int NumAllocatableRegisters() { + return kMaxNumAllocatableRegisters; + } + static const int kNumRegisters = 8; + + static inline const char* AllocationIndexToString(int index); + + static inline int ToAllocationIndex(Register reg); + + static inline Register FromAllocationIndex(int index); + + static Register from_code(int code) { + DCHECK(code >= 0); + DCHECK(code < kNumRegisters); + Register r = { code }; + return r; + } + bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } + bool is(Register reg) const { return code_ == reg.code_; } + // eax, ebx, ecx and edx are byte registers, the rest are not. + bool is_byte_register() const { return code_ <= 3; } + int code() const { + DCHECK(is_valid()); + return code_; + } + int bit() const { + DCHECK(is_valid()); + return 1 << code_; + } + + // Unfortunately we can't make this private in a struct. + int code_; +}; + +const int kRegister_eax_Code = 0; +const int kRegister_ecx_Code = 1; +const int kRegister_edx_Code = 2; +const int kRegister_ebx_Code = 3; +const int kRegister_esp_Code = 4; +const int kRegister_ebp_Code = 5; +const int kRegister_esi_Code = 6; +const int kRegister_edi_Code = 7; +const int kRegister_no_reg_Code = -1; + +const Register eax = { kRegister_eax_Code }; +const Register ecx = { kRegister_ecx_Code }; +const Register edx = { kRegister_edx_Code }; +const Register ebx = { kRegister_ebx_Code }; +const Register esp = { kRegister_esp_Code }; +const Register ebp = { kRegister_ebp_Code }; +const Register esi = { kRegister_esi_Code }; +const Register edi = { kRegister_edi_Code }; +const Register no_reg = { kRegister_no_reg_Code }; + + +inline const char* Register::AllocationIndexToString(int index) { + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); + // This is the mapping of allocation indices to registers. + const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" }; + return kNames[index]; +} + + +inline int Register::ToAllocationIndex(Register reg) { + DCHECK(reg.is_valid() && !reg.is(esp) && !reg.is(ebp)); + return (reg.code() >= 6) ? reg.code() - 2 : reg.code(); +} + + +inline Register Register::FromAllocationIndex(int index) { + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); + return (index >= 4) ? from_code(index + 2) : from_code(index); +} + + +struct X87Register { + static const int kMaxNumAllocatableRegisters = 8; + static const int kMaxNumRegisters = 8; + static int NumAllocatableRegisters() { + return kMaxNumAllocatableRegisters; + } + + static int ToAllocationIndex(X87Register reg) { + return reg.code_; + } + + static const char* AllocationIndexToString(int index) { + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); + const char* const names[] = { + "stX_0", "stX_1", "stX_2", "stX_3", "stX_4", + "stX_5", "stX_6", "stX_7" + }; + return names[index]; + } + + static X87Register FromAllocationIndex(int index) { + DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); + X87Register result; + result.code_ = index; + return result; + } + + bool is_valid() const { + return 0 <= code_ && code_ < kMaxNumRegisters; + } + + int code() const { + DCHECK(is_valid()); + return code_; + } + + bool is(X87Register reg) const { + return code_ == reg.code_; + } + + int code_; +}; + + +typedef X87Register DoubleRegister; + + +const X87Register stX_0 = { 0 }; +const X87Register stX_1 = { 1 }; +const X87Register stX_2 = { 2 }; +const X87Register stX_3 = { 3 }; +const X87Register stX_4 = { 4 }; +const X87Register stX_5 = { 5 }; +const X87Register stX_6 = { 6 }; +const X87Register stX_7 = { 7 }; + + +enum Condition { + // any value < 0 is considered no_condition + no_condition = -1, + + overflow = 0, + no_overflow = 1, + below = 2, + above_equal = 3, + equal = 4, + not_equal = 5, + below_equal = 6, + above = 7, + negative = 8, + positive = 9, + parity_even = 10, + parity_odd = 11, + less = 12, + greater_equal = 13, + less_equal = 14, + greater = 15, + + // aliases + carry = below, + not_carry = above_equal, + zero = equal, + not_zero = not_equal, + sign = negative, + not_sign = positive +}; + + +// Returns the equivalent of !cc. +// Negation of the default no_condition (-1) results in a non-default +// no_condition value (-2). As long as tests for no_condition check +// for condition < 0, this will work as expected. +inline Condition NegateCondition(Condition cc) { + return static_cast<Condition>(cc ^ 1); +} + + +// Commute a condition such that {a cond b == b cond' a}. +inline Condition CommuteCondition(Condition cc) { + switch (cc) { + case below: + return above; + case above: + return below; + case above_equal: + return below_equal; + case below_equal: + return above_equal; + case less: + return greater; + case greater: + return less; + case greater_equal: + return less_equal; + case less_equal: + return greater_equal; + default: + return cc; + } +} + + +// ----------------------------------------------------------------------------- +// Machine instruction Immediates + +class Immediate BASE_EMBEDDED { + public: + inline explicit Immediate(int x); + inline explicit Immediate(const ExternalReference& ext); + inline explicit Immediate(Handle<Object> handle); + inline explicit Immediate(Smi* value); + inline explicit Immediate(Address addr); + + static Immediate CodeRelativeOffset(Label* label) { + return Immediate(label); + } + + bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); } + bool is_int8() const { + return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_); + } + bool is_int16() const { + return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_); + } + + private: + inline explicit Immediate(Label* value); + + int x_; + RelocInfo::Mode rmode_; + + friend class Operand; + friend class Assembler; + friend class MacroAssembler; +}; + + +// ----------------------------------------------------------------------------- +// Machine instruction Operands + +enum ScaleFactor { + times_1 = 0, + times_2 = 1, + times_4 = 2, + times_8 = 3, + times_int_size = times_4, + times_half_pointer_size = times_2, + times_pointer_size = times_4, + times_twice_pointer_size = times_8 +}; + + +class Operand BASE_EMBEDDED { + public: + // reg + INLINE(explicit Operand(Register reg)); + + // [disp/r] + INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode)); + + // [disp/r] + INLINE(explicit Operand(Immediate imm)); + + // [base + disp/r] + explicit Operand(Register base, int32_t disp, + RelocInfo::Mode rmode = RelocInfo::NONE32); + + // [base + index*scale + disp/r] + explicit Operand(Register base, + Register index, + ScaleFactor scale, + int32_t disp, + RelocInfo::Mode rmode = RelocInfo::NONE32); + + // [index*scale + disp/r] + explicit Operand(Register index, + ScaleFactor scale, + int32_t disp, + RelocInfo::Mode rmode = RelocInfo::NONE32); + + static Operand StaticVariable(const ExternalReference& ext) { + return Operand(reinterpret_cast<int32_t>(ext.address()), + RelocInfo::EXTERNAL_REFERENCE); + } + + static Operand StaticArray(Register index, + ScaleFactor scale, + const ExternalReference& arr) { + return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()), + RelocInfo::EXTERNAL_REFERENCE); + } + + static Operand ForCell(Handle<Cell> cell) { + AllowDeferredHandleDereference embedding_raw_address; + return Operand(reinterpret_cast<int32_t>(cell.location()), + RelocInfo::CELL); + } + + static Operand ForRegisterPlusImmediate(Register base, Immediate imm) { + return Operand(base, imm.x_, imm.rmode_); + } + + // Returns true if this Operand is a wrapper for the specified register. + bool is_reg(Register reg) const; + + // Returns true if this Operand is a wrapper for one register. + bool is_reg_only() const; + + // Asserts that this Operand is a wrapper for one register and returns the + // register. + Register reg() const; + + private: + // Set the ModRM byte without an encoded 'reg' register. The + // register is encoded later as part of the emit_operand operation. + inline void set_modrm(int mod, Register rm); + + inline void set_sib(ScaleFactor scale, Register index, Register base); + inline void set_disp8(int8_t disp); + inline void set_dispr(int32_t disp, RelocInfo::Mode rmode); + + byte buf_[6]; + // The number of bytes in buf_. + unsigned int len_; + // Only valid if len_ > 4. + RelocInfo::Mode rmode_; + + friend class Assembler; + friend class MacroAssembler; +}; + + +// ----------------------------------------------------------------------------- +// A Displacement describes the 32bit immediate field of an instruction which +// may be used together with a Label in order to refer to a yet unknown code +// position. Displacements stored in the instruction stream are used to describe +// the instruction and to chain a list of instructions using the same Label. +// A Displacement contains 2 different fields: +// +// next field: position of next displacement in the chain (0 = end of list) +// type field: instruction type +// +// A next value of null (0) indicates the end of a chain (note that there can +// be no displacement at position zero, because there is always at least one +// instruction byte before the displacement). +// +// Displacement _data field layout +// +// |31.....2|1......0| +// [ next | type | + +class Displacement BASE_EMBEDDED { + public: + enum Type { + UNCONDITIONAL_JUMP, + CODE_RELATIVE, + OTHER + }; + + int data() const { return data_; } + Type type() const { return TypeField::decode(data_); } + void next(Label* L) const { + int n = NextField::decode(data_); + n > 0 ? L->link_to(n) : L->Unuse(); + } + void link_to(Label* L) { init(L, type()); } + + explicit Displacement(int data) { data_ = data; } + + Displacement(Label* L, Type type) { init(L, type); } + + void print() { + PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"), + NextField::decode(data_)); + } + + private: + int data_; + + class TypeField: public BitField<Type, 0, 2> {}; + class NextField: public BitField<int, 2, 32-2> {}; + + void init(Label* L, Type type); +}; + + +class Assembler : public AssemblerBase { + private: + // We check before assembling an instruction that there is sufficient + // space to write an instruction and its relocation information. + // The relocation writer's position must be kGap bytes above the end of + // the generated instructions. This leaves enough space for the + // longest possible ia32 instruction, 15 bytes, and the longest possible + // relocation information encoding, RelocInfoWriter::kMaxLength == 16. + // (There is a 15 byte limit on ia32 instruction length that rules out some + // otherwise valid instructions.) + // This allows for a single, fast space check per instruction. + static const int kGap = 32; + + public: + // Create an assembler. Instructions and relocation information are emitted + // into a buffer, with the instructions starting from the beginning and the + // relocation information starting from the end of the buffer. See CodeDesc + // for a detailed comment on the layout (globals.h). + // + // If the provided buffer is NULL, the assembler allocates and grows its own + // buffer, and buffer_size determines the initial buffer size. The buffer is + // owned by the assembler and deallocated upon destruction of the assembler. + // + // If the provided buffer is not NULL, the assembler uses the provided buffer + // for code generation and assumes its size to be buffer_size. If the buffer + // is too small, a fatal error occurs. No deallocation of the buffer is done + // upon destruction of the assembler. + // TODO(vitalyr): the assembler does not need an isolate. + Assembler(Isolate* isolate, void* buffer, int buffer_size); + virtual ~Assembler() { } + + // GetCode emits any pending (non-emitted) code and fills the descriptor + // desc. GetCode() is idempotent; it returns the same result if no other + // Assembler functions are invoked in between GetCode() calls. + void GetCode(CodeDesc* desc); + + // Read/Modify the code target in the branch/call instruction at pc. + inline static Address target_address_at(Address pc, + ConstantPoolArray* constant_pool); + inline static void set_target_address_at(Address pc, + ConstantPoolArray* constant_pool, + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED); + static inline Address target_address_at(Address pc, Code* code) { + ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; + return target_address_at(pc, constant_pool); + } + static inline void set_target_address_at(Address pc, + Code* code, + Address target, + ICacheFlushMode icache_flush_mode = + FLUSH_ICACHE_IF_NEEDED) { + ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; + set_target_address_at(pc, constant_pool, target); + } + + // Return the code target address at a call site from the return address + // of that call in the instruction stream. + inline static Address target_address_from_return_address(Address pc); + + // Return the code target address of the patch debug break slot + inline static Address break_address_from_return_address(Address pc); + + // This sets the branch destination (which is in the instruction on x86). + // This is for calls and branches within generated code. + inline static void deserialization_set_special_target_at( + Address instruction_payload, Code* code, Address target) { + set_target_address_at(instruction_payload, code, target); + } + + static const int kSpecialTargetSize = kPointerSize; + + // Distance between the address of the code target in the call instruction + // and the return address + static const int kCallTargetAddressOffset = kPointerSize; + // Distance between start of patched return sequence and the emitted address + // to jump to. + static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32. + + // Distance between start of patched debug break slot and the emitted address + // to jump to. + static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32. + + static const int kCallInstructionLength = 5; + static const int kPatchDebugBreakSlotReturnOffset = kPointerSize; + static const int kJSReturnSequenceLength = 6; + + // The debug break slot must be able to contain a call instruction. + static const int kDebugBreakSlotLength = kCallInstructionLength; + + // One byte opcode for test al, 0xXX. + static const byte kTestAlByte = 0xA8; + // One byte opcode for nop. + static const byte kNopByte = 0x90; + + // One byte opcode for a short unconditional jump. + static const byte kJmpShortOpcode = 0xEB; + // One byte prefix for a short conditional jump. + static const byte kJccShortPrefix = 0x70; + static const byte kJncShortOpcode = kJccShortPrefix | not_carry; + static const byte kJcShortOpcode = kJccShortPrefix | carry; + static const byte kJnzShortOpcode = kJccShortPrefix | not_zero; + static const byte kJzShortOpcode = kJccShortPrefix | zero; + + + // --------------------------------------------------------------------------- + // Code generation + // + // - function names correspond one-to-one to ia32 instruction mnemonics + // - unless specified otherwise, instructions operate on 32bit operands + // - instructions on 8bit (byte) operands/registers have a trailing '_b' + // - instructions on 16bit (word) operands/registers have a trailing '_w' + // - naming conflicts with C++ keywords are resolved via a trailing '_' + + // NOTE ON INTERFACE: Currently, the interface is not very consistent + // in the sense that some operations (e.g. mov()) can be called in more + // the one way to generate the same instruction: The Register argument + // can in some cases be replaced with an Operand(Register) argument. + // This should be cleaned up and made more orthogonal. The questions + // is: should we always use Operands instead of Registers where an + // Operand is possible, or should we have a Register (overloaded) form + // instead? We must be careful to make sure that the selected instruction + // is obvious from the parameters to avoid hard-to-find code generation + // bugs. + + // Insert the smallest number of nop instructions + // possible to align the pc offset to a multiple + // of m. m must be a power of 2. + void Align(int m); + void Nop(int bytes = 1); + // Aligns code to something that's optimal for a jump target for the platform. + void CodeTargetAlign(); + + // Stack + void pushad(); + void popad(); + + void pushfd(); + void popfd(); + + void push(const Immediate& x); + void push_imm32(int32_t imm32); + void push(Register src); + void push(const Operand& src); + + void pop(Register dst); + void pop(const Operand& dst); + + void enter(const Immediate& size); + void leave(); + + // Moves + void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); } + void mov_b(Register dst, const Operand& src); + void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); } + void mov_b(const Operand& dst, int8_t imm8); + void mov_b(const Operand& dst, Register src); + + void mov_w(Register dst, const Operand& src); + void mov_w(const Operand& dst, Register src); + void mov_w(const Operand& dst, int16_t imm16); + + void mov(Register dst, int32_t imm32); + void mov(Register dst, const Immediate& x); + void mov(Register dst, Handle<Object> handle); + void mov(Register dst, const Operand& src); + void mov(Register dst, Register src); + void mov(const Operand& dst, const Immediate& x); + void mov(const Operand& dst, Handle<Object> handle); + void mov(const Operand& dst, Register src); + + void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); } + void movsx_b(Register dst, const Operand& src); + + void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); } + void movsx_w(Register dst, const Operand& src); + + void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); } + void movzx_b(Register dst, const Operand& src); + + void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); } + void movzx_w(Register dst, const Operand& src); + + // Flag management. + void cld(); + + // Repetitive string instructions. + void rep_movs(); + void rep_stos(); + void stos(); + + // Exchange + void xchg(Register dst, Register src); + void xchg(Register dst, const Operand& src); + + // Arithmetics + void adc(Register dst, int32_t imm32); + void adc(Register dst, const Operand& src); + + void add(Register dst, Register src) { add(dst, Operand(src)); } + void add(Register dst, const Operand& src); + void add(const Operand& dst, Register src); + void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); } + void add(const Operand& dst, const Immediate& x); + + void and_(Register dst, int32_t imm32); + void and_(Register dst, const Immediate& x); + void and_(Register dst, Register src) { and_(dst, Operand(src)); } + void and_(Register dst, const Operand& src); + void and_(const Operand& dst, Register src); + void and_(const Operand& dst, const Immediate& x); + + void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); } + void cmpb(const Operand& op, int8_t imm8); + void cmpb(Register reg, const Operand& op); + void cmpb(const Operand& op, Register reg); + void cmpb_al(const Operand& op); + void cmpw_ax(const Operand& op); + void cmpw(const Operand& op, Immediate imm16); + void cmp(Register reg, int32_t imm32); + void cmp(Register reg, Handle<Object> handle); + void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); } + void cmp(Register reg, const Operand& op); + void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); } + void cmp(const Operand& op, const Immediate& imm); + void cmp(const Operand& op, Handle<Object> handle); + + void dec_b(Register dst); + void dec_b(const Operand& dst); + + void dec(Register dst); + void dec(const Operand& dst); + + void cdq(); + + void idiv(Register src) { idiv(Operand(src)); } + void idiv(const Operand& src); + void div(Register src) { div(Operand(src)); } + void div(const Operand& src); + + // Signed multiply instructions. + void imul(Register src); // edx:eax = eax * src. + void imul(Register dst, Register src) { imul(dst, Operand(src)); } + void imul(Register dst, const Operand& src); // dst = dst * src. + void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32. + void imul(Register dst, const Operand& src, int32_t imm32); + + void inc(Register dst); + void inc(const Operand& dst); + + void lea(Register dst, const Operand& src); + + // Unsigned multiply instruction. + void mul(Register src); // edx:eax = eax * reg. + + void neg(Register dst); + void neg(const Operand& dst); + + void not_(Register dst); + void not_(const Operand& dst); + + void or_(Register dst, int32_t imm32); + void or_(Register dst, Register src) { or_(dst, Operand(src)); } + void or_(Register dst, const Operand& src); + void or_(const Operand& dst, Register src); + void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); } + void or_(const Operand& dst, const Immediate& x); + + void rcl(Register dst, uint8_t imm8); + void rcr(Register dst, uint8_t imm8); + void ror(Register dst, uint8_t imm8); + void ror_cl(Register dst); + + void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); } + void sar(const Operand& dst, uint8_t imm8); + void sar_cl(Register dst) { sar_cl(Operand(dst)); } + void sar_cl(const Operand& dst); + + void sbb(Register dst, const Operand& src); + + void shld(Register dst, Register src) { shld(dst, Operand(src)); } + void shld(Register dst, const Operand& src); + + void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); } + void shl(const Operand& dst, uint8_t imm8); + void shl_cl(Register dst) { shl_cl(Operand(dst)); } + void shl_cl(const Operand& dst); + + void shrd(Register dst, Register src) { shrd(dst, Operand(src)); } + void shrd(Register dst, const Operand& src); + + void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); } + void shr(const Operand& dst, uint8_t imm8); + void shr_cl(Register dst) { shr_cl(Operand(dst)); } + void shr_cl(const Operand& dst); + + void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); } + void sub(const Operand& dst, const Immediate& x); + void sub(Register dst, Register src) { sub(dst, Operand(src)); } + void sub(Register dst, const Operand& src); + void sub(const Operand& dst, Register src); + + void test(Register reg, const Immediate& imm); + void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); } + void test(Register reg, const Operand& op); + void test_b(Register reg, const Operand& op); + void test(const Operand& op, const Immediate& imm); + void test_b(Register reg, uint8_t imm8); + void test_b(const Operand& op, uint8_t imm8); + + void xor_(Register dst, int32_t imm32); + void xor_(Register dst, Register src) { xor_(dst, Operand(src)); } + void xor_(Register dst, const Operand& src); + void xor_(const Operand& dst, Register src); + void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); } + void xor_(const Operand& dst, const Immediate& x); + + // Bit operations. + void bt(const Operand& dst, Register src); + void bts(Register dst, Register src) { bts(Operand(dst), src); } + void bts(const Operand& dst, Register src); + void bsr(Register dst, Register src) { bsr(dst, Operand(src)); } + void bsr(Register dst, const Operand& src); + + // Miscellaneous + void hlt(); + void int3(); + void nop(); + void ret(int imm16); + + // Label operations & relative jumps (PPUM Appendix D) + // + // Takes a branch opcode (cc) and a label (L) and generates + // either a backward branch or a forward branch and links it + // to the label fixup chain. Usage: + // + // Label L; // unbound label + // j(cc, &L); // forward branch to unbound label + // bind(&L); // bind label to the current pc + // j(cc, &L); // backward branch to bound label + // bind(&L); // illegal: a label may be bound only once + // + // Note: The same Label can be used for forward and backward branches + // but it may be bound only once. + + void bind(Label* L); // binds an unbound label L to the current code position + + // Calls + void call(Label* L); + void call(byte* entry, RelocInfo::Mode rmode); + int CallSize(const Operand& adr); + void call(Register reg) { call(Operand(reg)); } + void call(const Operand& adr); + int CallSize(Handle<Code> code, RelocInfo::Mode mode); + void call(Handle<Code> code, + RelocInfo::Mode rmode, + TypeFeedbackId id = TypeFeedbackId::None()); + + // Jumps + // unconditional jump to L + void jmp(Label* L, Label::Distance distance = Label::kFar); + void jmp(byte* entry, RelocInfo::Mode rmode); + void jmp(Register reg) { jmp(Operand(reg)); } + void jmp(const Operand& adr); + void jmp(Handle<Code> code, RelocInfo::Mode rmode); + + // Conditional jumps + void j(Condition cc, + Label* L, + Label::Distance distance = Label::kFar); + void j(Condition cc, byte* entry, RelocInfo::Mode rmode); + void j(Condition cc, Handle<Code> code); + + // Floating-point operations + void fld(int i); + void fstp(int i); + + void fld1(); + void fldz(); + void fldpi(); + void fldln2(); + + void fld_s(const Operand& adr); + void fld_d(const Operand& adr); + + void fstp_s(const Operand& adr); + void fst_s(const Operand& adr); + void fstp_d(const Operand& adr); + void fst_d(const Operand& adr); + + void fild_s(const Operand& adr); + void fild_d(const Operand& adr); + + void fist_s(const Operand& adr); + + void fistp_s(const Operand& adr); + void fistp_d(const Operand& adr); + + // The fisttp instructions require SSE3. + void fisttp_s(const Operand& adr); + void fisttp_d(const Operand& adr); + + void fabs(); + void fchs(); + void fcos(); + void fsin(); + void fptan(); + void fyl2x(); + void f2xm1(); + void fscale(); + void fninit(); + + void fadd(int i); + void fadd_i(int i); + void fsub(int i); + void fsub_i(int i); + void fmul(int i); + void fmul_i(int i); + void fdiv(int i); + void fdiv_i(int i); + + void fisub_s(const Operand& adr); + + void faddp(int i = 1); + void fsubp(int i = 1); + void fsubrp(int i = 1); + void fmulp(int i = 1); + void fdivp(int i = 1); + void fprem(); + void fprem1(); + + void fxch(int i = 1); + void fincstp(); + void ffree(int i = 0); + + void ftst(); + void fucomp(int i); + void fucompp(); + void fucomi(int i); + void fucomip(); + void fcompp(); + void fnstsw_ax(); + void fwait(); + void fnclex(); + + void frndint(); + + void sahf(); + void setcc(Condition cc, Register reg); + + void cpuid(); + + // TODO(lrn): Need SFENCE for movnt? + + // Debugging + void Print(); + + // Check the code size generated from label to here. + int SizeOfCodeGeneratedSince(Label* label) { + return pc_offset() - label->pos(); + } + + // Mark address of the ExitJSFrame code. + void RecordJSReturn(); + + // Mark address of a debug break slot. + void RecordDebugBreakSlot(); + + // Record a comment relocation entry that can be used by a disassembler. + // Use --code-comments to enable, or provide "force = true" flag to always + // write a comment. + void RecordComment(const char* msg, bool force = false); + + // Writes a single byte or word of data in the code stream. Used for + // inline tables, e.g., jump-tables. + void db(uint8_t data); + void dd(uint32_t data); + + // Check if there is less than kGap bytes available in the buffer. + // If this is the case, we need to grow the buffer before emitting + // an instruction or relocation information. + inline bool buffer_overflow() const { + return pc_ >= reloc_info_writer.pos() - kGap; + } + + // Get the number of bytes available in the buffer. + inline int available_space() const { return reloc_info_writer.pos() - pc_; } + + static bool IsNop(Address addr); + + PositionsRecorder* positions_recorder() { return &positions_recorder_; } + + int relocation_writer_size() { + return (buffer_ + buffer_size_) - reloc_info_writer.pos(); + } + + // Avoid overflows for displacements etc. + static const int kMaximalBufferSize = 512*MB; + + byte byte_at(int pos) { return buffer_[pos]; } + void set_byte_at(int pos, byte value) { buffer_[pos] = value; } + + // Allocate a constant pool of the correct size for the generated code. + Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); + + // Generate the constant pool for the generated code. + void PopulateConstantPool(ConstantPoolArray* constant_pool); + + protected: + byte* addr_at(int pos) { return buffer_ + pos; } + + + private: + uint32_t long_at(int pos) { + return *reinterpret_cast<uint32_t*>(addr_at(pos)); + } + void long_at_put(int pos, uint32_t x) { + *reinterpret_cast<uint32_t*>(addr_at(pos)) = x; + } + + // code emission + void GrowBuffer(); + inline void emit(uint32_t x); + inline void emit(Handle<Object> handle); + inline void emit(uint32_t x, + RelocInfo::Mode rmode, + TypeFeedbackId id = TypeFeedbackId::None()); + inline void emit(Handle<Code> code, + RelocInfo::Mode rmode, + TypeFeedbackId id = TypeFeedbackId::None()); + inline void emit(const Immediate& x); + inline void emit_w(const Immediate& x); + + // Emit the code-object-relative offset of the label's position + inline void emit_code_relative_offset(Label* label); + + // instruction generation + void emit_arith_b(int op1, int op2, Register dst, int imm8); + + // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81) + // with a given destination expression and an immediate operand. It attempts + // to use the shortest encoding possible. + // sel specifies the /n in the modrm byte (see the Intel PRM). + void emit_arith(int sel, Operand dst, const Immediate& x); + + void emit_operand(Register reg, const Operand& adr); + + void emit_farith(int b1, int b2, int i); + + // labels + void print(Label* L); + void bind_to(Label* L, int pos); + + // displacements + inline Displacement disp_at(Label* L); + inline void disp_at_put(Label* L, Displacement disp); + inline void emit_disp(Label* L, Displacement::Type type); + inline void emit_near_disp(Label* L); + + // record reloc info for current pc_ + void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); + + friend class CodePatcher; + friend class EnsureSpace; + + // code generation + RelocInfoWriter reloc_info_writer; + + PositionsRecorder positions_recorder_; + friend class PositionsRecorder; +}; + + +// Helper class that ensures that there is enough space for generating +// instructions and relocation information. The constructor makes +// sure that there is enough space and (in debug mode) the destructor +// checks that we did not generate too much. +class EnsureSpace BASE_EMBEDDED { + public: + explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) { + if (assembler_->buffer_overflow()) assembler_->GrowBuffer(); +#ifdef DEBUG + space_before_ = assembler_->available_space(); +#endif + } + +#ifdef DEBUG + ~EnsureSpace() { + int bytes_generated = space_before_ - assembler_->available_space(); + DCHECK(bytes_generated < assembler_->kGap); + } +#endif + + private: + Assembler* assembler_; +#ifdef DEBUG + int space_before_; +#endif +}; + +} } // namespace v8::internal + +#endif // V8_X87_ASSEMBLER_X87_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/assembler-x87-inl.h nodejs-0.11.15/deps/v8/src/x87/assembler-x87-inl.h --- nodejs-0.11.13/deps/v8/src/x87/assembler-x87-inl.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/assembler-x87-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,571 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + +// A light-weight IA32 Assembler. + +#ifndef V8_X87_ASSEMBLER_X87_INL_H_ +#define V8_X87_ASSEMBLER_X87_INL_H_ + +#include "src/x87/assembler-x87.h" + +#include "src/assembler.h" +#include "src/debug.h" + +namespace v8 { +namespace internal { + +bool CpuFeatures::SupportsCrankshaft() { return false; } + + +static const byte kCallOpcode = 0xE8; +static const int kNoCodeAgeSequenceLength = 5; + + +// The modes possibly affected by apply must be in kApplyMask. +void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) { + bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH; + if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) { + int32_t* p = reinterpret_cast<int32_t*>(pc_); + *p -= delta; // Relocate entry. + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); + } else if (rmode_ == CODE_AGE_SEQUENCE) { + if (*pc_ == kCallOpcode) { + int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); + *p -= delta; // Relocate entry. + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); + } + } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) { + // Special handling of js_return when a break point is set (call + // instruction has been inserted). + int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); + *p -= delta; // Relocate entry. + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); + } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) { + // Special handling of a debug break slot when a break point is set (call + // instruction has been inserted). + int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); + *p -= delta; // Relocate entry. + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); + } else if (IsInternalReference(rmode_)) { + // absolute code pointer inside code object moves with the code object. + int32_t* p = reinterpret_cast<int32_t*>(pc_); + *p += delta; // Relocate entry. + if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t)); + } +} + + +Address RelocInfo::target_address() { + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + return Assembler::target_address_at(pc_, host_); +} + + +Address RelocInfo::target_address_address() { + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) + || rmode_ == EMBEDDED_OBJECT + || rmode_ == EXTERNAL_REFERENCE); + return reinterpret_cast<Address>(pc_); +} + + +Address RelocInfo::constant_pool_entry_address() { + UNREACHABLE(); + return NULL; +} + + +int RelocInfo::target_address_size() { + return Assembler::kSpecialTargetSize; +} + + +void RelocInfo::set_target_address(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode); + Assembler::set_target_address_at(pc_, host_, target); + DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && + IsCodeTarget(rmode_)) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } +} + + +Object* RelocInfo::target_object() { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + return Memory::Object_at(pc_); +} + + +Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + return Memory::Object_Handle_at(pc_); +} + + +void RelocInfo::set_target_object(Object* target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + Memory::Object_at(pc_) = target; + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(pc_, sizeof(Address)); + } + if (write_barrier_mode == UPDATE_WRITE_BARRIER && + host() != NULL && + target->IsHeapObject()) { + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), &Memory::Object_at(pc_), HeapObject::cast(target)); + } +} + + +Address RelocInfo::target_reference() { + DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); + return Memory::Address_at(pc_); +} + + +Address RelocInfo::target_runtime_entry(Assembler* origin) { + DCHECK(IsRuntimeEntry(rmode_)); + return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_)); +} + + +void RelocInfo::set_target_runtime_entry(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsRuntimeEntry(rmode_)); + if (target_address() != target) { + set_target_address(target, write_barrier_mode, icache_flush_mode); + } +} + + +Handle<Cell> RelocInfo::target_cell_handle() { + DCHECK(rmode_ == RelocInfo::CELL); + Address address = Memory::Address_at(pc_); + return Handle<Cell>(reinterpret_cast<Cell**>(address)); +} + + +Cell* RelocInfo::target_cell() { + DCHECK(rmode_ == RelocInfo::CELL); + return Cell::FromValueAddress(Memory::Address_at(pc_)); +} + + +void RelocInfo::set_target_cell(Cell* cell, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::CELL); + Address address = cell->address() + Cell::kValueOffset; + Memory::Address_at(pc_) = address; + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(pc_, sizeof(Address)); + } + if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) { + // TODO(1550) We are passing NULL as a slot because cell can never be on + // evacuation candidate. + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), NULL, cell); + } +} + + +Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + DCHECK(*pc_ == kCallOpcode); + return Memory::Object_Handle_at(pc_ + 1); +} + + +Code* RelocInfo::code_age_stub() { + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + DCHECK(*pc_ == kCallOpcode); + return Code::GetCodeFromTargetAddress( + Assembler::target_address_at(pc_ + 1, host_)); +} + + +void RelocInfo::set_code_age_stub(Code* stub, + ICacheFlushMode icache_flush_mode) { + DCHECK(*pc_ == kCallOpcode); + DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(), + icache_flush_mode); +} + + +Address RelocInfo::call_address() { + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); + return Assembler::target_address_at(pc_ + 1, host_); +} + + +void RelocInfo::set_call_address(Address target) { + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); + Assembler::set_target_address_at(pc_ + 1, host_, target); + if (host() != NULL) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } +} + + +Object* RelocInfo::call_object() { + return *call_object_address(); +} + + +void RelocInfo::set_call_object(Object* target) { + *call_object_address() = target; +} + + +Object** RelocInfo::call_object_address() { + DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); + return reinterpret_cast<Object**>(pc_ + 1); +} + + +void RelocInfo::WipeOut() { + if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) { + Memory::Address_at(pc_) = NULL; + } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) { + // Effectively write zero into the relocation. + Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t)); + } else { + UNREACHABLE(); + } +} + + +bool RelocInfo::IsPatchedReturnSequence() { + return *pc_ == kCallOpcode; +} + + +bool RelocInfo::IsPatchedDebugBreakSlotSequence() { + return !Assembler::IsNop(pc()); +} + + +void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) { + RelocInfo::Mode mode = rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT) { + visitor->VisitEmbeddedPointer(this); + CpuFeatures::FlushICache(pc_, sizeof(Address)); + } else if (RelocInfo::IsCodeTarget(mode)) { + visitor->VisitCodeTarget(this); + } else if (mode == RelocInfo::CELL) { + visitor->VisitCell(this); + } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { + visitor->VisitExternalReference(this); + CpuFeatures::FlushICache(pc_, sizeof(Address)); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + visitor->VisitCodeAgeSequence(this); + } else if (((RelocInfo::IsJSReturn(mode) && + IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(mode) && + IsPatchedDebugBreakSlotSequence())) && + isolate->debug()->has_break_points()) { + visitor->VisitDebugTarget(this); + } else if (IsRuntimeEntry(mode)) { + visitor->VisitRuntimeEntry(this); + } +} + + +template<typename StaticVisitor> +void RelocInfo::Visit(Heap* heap) { + RelocInfo::Mode mode = rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT) { + StaticVisitor::VisitEmbeddedPointer(heap, this); + CpuFeatures::FlushICache(pc_, sizeof(Address)); + } else if (RelocInfo::IsCodeTarget(mode)) { + StaticVisitor::VisitCodeTarget(heap, this); + } else if (mode == RelocInfo::CELL) { + StaticVisitor::VisitCell(heap, this); + } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { + StaticVisitor::VisitExternalReference(this); + CpuFeatures::FlushICache(pc_, sizeof(Address)); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + StaticVisitor::VisitCodeAgeSequence(heap, this); + } else if (heap->isolate()->debug()->has_break_points() && + ((RelocInfo::IsJSReturn(mode) && + IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(mode) && + IsPatchedDebugBreakSlotSequence()))) { + StaticVisitor::VisitDebugTarget(heap, this); + } else if (IsRuntimeEntry(mode)) { + StaticVisitor::VisitRuntimeEntry(this); + } +} + + + +Immediate::Immediate(int x) { + x_ = x; + rmode_ = RelocInfo::NONE32; +} + + +Immediate::Immediate(const ExternalReference& ext) { + x_ = reinterpret_cast<int32_t>(ext.address()); + rmode_ = RelocInfo::EXTERNAL_REFERENCE; +} + + +Immediate::Immediate(Label* internal_offset) { + x_ = reinterpret_cast<int32_t>(internal_offset); + rmode_ = RelocInfo::INTERNAL_REFERENCE; +} + + +Immediate::Immediate(Handle<Object> handle) { + AllowDeferredHandleDereference using_raw_address; + // Verify all Objects referred by code are NOT in new space. + Object* obj = *handle; + if (obj->IsHeapObject()) { + DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); + x_ = reinterpret_cast<intptr_t>(handle.location()); + rmode_ = RelocInfo::EMBEDDED_OBJECT; + } else { + // no relocation needed + x_ = reinterpret_cast<intptr_t>(obj); + rmode_ = RelocInfo::NONE32; + } +} + + +Immediate::Immediate(Smi* value) { + x_ = reinterpret_cast<intptr_t>(value); + rmode_ = RelocInfo::NONE32; +} + + +Immediate::Immediate(Address addr) { + x_ = reinterpret_cast<int32_t>(addr); + rmode_ = RelocInfo::NONE32; +} + + +void Assembler::emit(uint32_t x) { + *reinterpret_cast<uint32_t*>(pc_) = x; + pc_ += sizeof(uint32_t); +} + + +void Assembler::emit(Handle<Object> handle) { + AllowDeferredHandleDereference heap_object_check; + // Verify all Objects referred by code are NOT in new space. + Object* obj = *handle; + DCHECK(!isolate()->heap()->InNewSpace(obj)); + if (obj->IsHeapObject()) { + emit(reinterpret_cast<intptr_t>(handle.location()), + RelocInfo::EMBEDDED_OBJECT); + } else { + // no relocation needed + emit(reinterpret_cast<intptr_t>(obj)); + } +} + + +void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) { + if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) { + RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt()); + } else if (!RelocInfo::IsNone(rmode) + && rmode != RelocInfo::CODE_AGE_SEQUENCE) { + RecordRelocInfo(rmode); + } + emit(x); +} + + +void Assembler::emit(Handle<Code> code, + RelocInfo::Mode rmode, + TypeFeedbackId id) { + AllowDeferredHandleDereference embedding_raw_address; + emit(reinterpret_cast<intptr_t>(code.location()), rmode, id); +} + + +void Assembler::emit(const Immediate& x) { + if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) { + Label* label = reinterpret_cast<Label*>(x.x_); + emit_code_relative_offset(label); + return; + } + if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_); + emit(x.x_); +} + + +void Assembler::emit_code_relative_offset(Label* label) { + if (label->is_bound()) { + int32_t pos; + pos = label->pos() + Code::kHeaderSize - kHeapObjectTag; + emit(pos); + } else { + emit_disp(label, Displacement::CODE_RELATIVE); + } +} + + +void Assembler::emit_w(const Immediate& x) { + DCHECK(RelocInfo::IsNone(x.rmode_)); + uint16_t value = static_cast<uint16_t>(x.x_); + reinterpret_cast<uint16_t*>(pc_)[0] = value; + pc_ += sizeof(uint16_t); +} + + +Address Assembler::target_address_at(Address pc, + ConstantPoolArray* constant_pool) { + return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc); +} + + +void Assembler::set_target_address_at(Address pc, + ConstantPoolArray* constant_pool, + Address target, + ICacheFlushMode icache_flush_mode) { + int32_t* p = reinterpret_cast<int32_t*>(pc); + *p = target - (pc + sizeof(int32_t)); + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + CpuFeatures::FlushICache(p, sizeof(int32_t)); + } +} + + +Address Assembler::target_address_from_return_address(Address pc) { + return pc - kCallTargetAddressOffset; +} + + +Address Assembler::break_address_from_return_address(Address pc) { + return pc - Assembler::kPatchDebugBreakSlotReturnOffset; +} + + +Displacement Assembler::disp_at(Label* L) { + return Displacement(long_at(L->pos())); +} + + +void Assembler::disp_at_put(Label* L, Displacement disp) { + long_at_put(L->pos(), disp.data()); +} + + +void Assembler::emit_disp(Label* L, Displacement::Type type) { + Displacement disp(L, type); + L->link_to(pc_offset()); + emit(static_cast<int>(disp.data())); +} + + +void Assembler::emit_near_disp(Label* L) { + byte disp = 0x00; + if (L->is_near_linked()) { + int offset = L->near_link_pos() - pc_offset(); + DCHECK(is_int8(offset)); + disp = static_cast<byte>(offset & 0xFF); + } + L->link_to(pc_offset(), Label::kNear); + *pc_++ = disp; +} + + +void Operand::set_modrm(int mod, Register rm) { + DCHECK((mod & -4) == 0); + buf_[0] = mod << 6 | rm.code(); + len_ = 1; +} + + +void Operand::set_sib(ScaleFactor scale, Register index, Register base) { + DCHECK(len_ == 1); + DCHECK((scale & -4) == 0); + // Use SIB with no index register only for base esp. + DCHECK(!index.is(esp) || base.is(esp)); + buf_[1] = scale << 6 | index.code() << 3 | base.code(); + len_ = 2; +} + + +void Operand::set_disp8(int8_t disp) { + DCHECK(len_ == 1 || len_ == 2); + *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp; +} + + +void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) { + DCHECK(len_ == 1 || len_ == 2); + int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]); + *p = disp; + len_ += sizeof(int32_t); + rmode_ = rmode; +} + +Operand::Operand(Register reg) { + // reg + set_modrm(3, reg); +} + + +Operand::Operand(int32_t disp, RelocInfo::Mode rmode) { + // [disp/r] + set_modrm(0, ebp); + set_dispr(disp, rmode); +} + + +Operand::Operand(Immediate imm) { + // [disp/r] + set_modrm(0, ebp); + set_dispr(imm.x_, imm.rmode_); +} +} } // namespace v8::internal + +#endif // V8_X87_ASSEMBLER_X87_INL_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/builtins-x87.cc nodejs-0.11.15/deps/v8/src/x87/builtins-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/builtins-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/builtins-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1457 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + + +#define __ ACCESS_MASM(masm) + + +void Builtins::Generate_Adaptor(MacroAssembler* masm, + CFunctionId id, + BuiltinExtraArguments extra_args) { + // ----------- S t a t e ------------- + // -- eax : number of arguments excluding receiver + // -- edi : called function (only guaranteed when + // extra_args requires it) + // -- esi : context + // -- esp[0] : return address + // -- esp[4] : last argument + // -- ... + // -- esp[4 * argc] : first argument (argc == eax) + // -- esp[4 * (argc +1)] : receiver + // ----------------------------------- + + // Insert extra arguments. + int num_extra_args = 0; + if (extra_args == NEEDS_CALLED_FUNCTION) { + num_extra_args = 1; + Register scratch = ebx; + __ pop(scratch); // Save return address. + __ push(edi); + __ push(scratch); // Restore return address. + } else { + DCHECK(extra_args == NO_EXTRA_ARGUMENTS); + } + + // JumpToExternalReference expects eax to contain the number of arguments + // including the receiver and the extra arguments. + __ add(eax, Immediate(num_extra_args + 1)); + __ JumpToExternalReference(ExternalReference(id, masm->isolate())); +} + + +static void CallRuntimePassFunction( + MacroAssembler* masm, Runtime::FunctionId function_id) { + FrameScope scope(masm, StackFrame::INTERNAL); + // Push a copy of the function. + __ push(edi); + // Function is also the parameter to the runtime call. + __ push(edi); + + __ CallRuntime(function_id, 1); + // Restore receiver. + __ pop(edi); +} + + +static void GenerateTailCallToSharedCode(MacroAssembler* masm) { + __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset)); + __ lea(eax, FieldOperand(eax, Code::kHeaderSize)); + __ jmp(eax); +} + + +static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { + __ lea(eax, FieldOperand(eax, Code::kHeaderSize)); + __ jmp(eax); +} + + +void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { + // Checking whether the queued function is ready for install is optional, + // since we come across interrupts and stack checks elsewhere. However, + // not checking may delay installing ready functions, and always checking + // would be quite expensive. A good compromise is to first check against + // stack limit as a cue for an interrupt signal. + Label ok; + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(masm->isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(above_equal, &ok, Label::kNear); + + CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); + GenerateTailCallToReturnedCode(masm); + + __ bind(&ok); + GenerateTailCallToSharedCode(masm); +} + + +static void Generate_JSConstructStubHelper(MacroAssembler* masm, + bool is_api_function, + bool create_memento) { + // ----------- S t a t e ------------- + // -- eax: number of arguments + // -- edi: constructor function + // -- ebx: allocation site or undefined + // ----------------------------------- + + // Should never create mementos for api functions. + DCHECK(!is_api_function || !create_memento); + + // Enter a construct frame. + { + FrameScope scope(masm, StackFrame::CONSTRUCT); + + if (create_memento) { + __ AssertUndefinedOrAllocationSite(ebx); + __ push(ebx); + } + + // Store a smi-tagged arguments count on the stack. + __ SmiTag(eax); + __ push(eax); + + // Push the function to invoke on the stack. + __ push(edi); + + // Try to allocate the object without transitioning into C code. If any of + // the preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(masm->isolate()); + __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0)); + __ j(not_equal, &rt_call); + + // Verified that the constructor is a JSFunction. + // Load the initial map and verify that it is in fact a map. + // edi: constructor + __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi + __ JumpIfSmi(eax, &rt_call); + // edi: constructor + // eax: initial map (if proven valid below) + __ CmpObjectType(eax, MAP_TYPE, ebx); + __ j(not_equal, &rt_call); + + // Check that the constructor is not constructing a JSFunction (see + // comments in Runtime_NewObject in runtime.cc). In which case the + // initial map's instance type would be JS_FUNCTION_TYPE. + // edi: constructor + // eax: initial map + __ CmpInstanceType(eax, JS_FUNCTION_TYPE); + __ j(equal, &rt_call); + + if (!is_api_function) { + Label allocate; + // The code below relies on these assumptions. + STATIC_ASSERT(JSFunction::kNoSlackTracking == 0); + STATIC_ASSERT(Map::ConstructionCount::kShift + + Map::ConstructionCount::kSize == 32); + // Check if slack tracking is enabled. + __ mov(esi, FieldOperand(eax, Map::kBitField3Offset)); + __ shr(esi, Map::ConstructionCount::kShift); + __ j(zero, &allocate); // JSFunction::kNoSlackTracking + // Decrease generous allocation count. + __ sub(FieldOperand(eax, Map::kBitField3Offset), + Immediate(1 << Map::ConstructionCount::kShift)); + + __ cmp(esi, JSFunction::kFinishSlackTracking); + __ j(not_equal, &allocate); + + __ push(eax); + __ push(edi); + + __ push(edi); // constructor + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + + __ pop(edi); + __ pop(eax); + __ xor_(esi, esi); // JSFunction::kNoSlackTracking + + __ bind(&allocate); + } + + // Now allocate the JSObject on the heap. + // edi: constructor + // eax: initial map + __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset)); + __ shl(edi, kPointerSizeLog2); + if (create_memento) { + __ add(edi, Immediate(AllocationMemento::kSize)); + } + + __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS); + + Factory* factory = masm->isolate()->factory(); + + // Allocated the JSObject, now initialize the fields. + // eax: initial map + // ebx: JSObject + // edi: start of next object (including memento if create_memento) + __ mov(Operand(ebx, JSObject::kMapOffset), eax); + __ mov(ecx, factory->empty_fixed_array()); + __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx); + __ mov(Operand(ebx, JSObject::kElementsOffset), ecx); + // Set extra fields in the newly allocated object. + // eax: initial map + // ebx: JSObject + // edi: start of next object (including memento if create_memento) + // esi: slack tracking counter (non-API function case) + __ mov(edx, factory->undefined_value()); + __ lea(ecx, Operand(ebx, JSObject::kHeaderSize)); + if (!is_api_function) { + Label no_inobject_slack_tracking; + + // Check if slack tracking is enabled. + __ cmp(esi, JSFunction::kNoSlackTracking); + __ j(equal, &no_inobject_slack_tracking); + + // Allocate object with a slack. + __ movzx_b(esi, + FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset)); + __ lea(esi, + Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize)); + // esi: offset of first field after pre-allocated fields + if (FLAG_debug_code) { + __ cmp(esi, edi); + __ Assert(less_equal, + kUnexpectedNumberOfPreAllocatedPropertyFields); + } + __ InitializeFieldsWithFiller(ecx, esi, edx); + __ mov(edx, factory->one_pointer_filler_map()); + // Fill the remaining fields with one pointer filler map. + + __ bind(&no_inobject_slack_tracking); + } + + if (create_memento) { + __ lea(esi, Operand(edi, -AllocationMemento::kSize)); + __ InitializeFieldsWithFiller(ecx, esi, edx); + + // Fill in memento fields if necessary. + // esi: points to the allocated but uninitialized memento. + __ mov(Operand(esi, AllocationMemento::kMapOffset), + factory->allocation_memento_map()); + // Get the cell or undefined. + __ mov(edx, Operand(esp, kPointerSize*2)); + __ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset), + edx); + } else { + __ InitializeFieldsWithFiller(ecx, edi, edx); + } + + // Add the object tag to make the JSObject real, so that we can continue + // and jump into the continuation code at any time from now on. Any + // failures need to undo the allocation, so that the heap is in a + // consistent state and verifiable. + // eax: initial map + // ebx: JSObject + // edi: start of next object + __ or_(ebx, Immediate(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. + // Allocate and initialize a FixedArray if it is. + // eax: initial map + // ebx: JSObject + // edi: start of next object + // Calculate the total number of properties described by the map. + __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset)); + __ movzx_b(ecx, + FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset)); + __ add(edx, ecx); + // Calculate unused properties past the end of the in-object properties. + __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset)); + __ sub(edx, ecx); + // Done if no extra properties are to be allocated. + __ j(zero, &allocated); + __ Assert(positive, kPropertyAllocationCountFailed); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // ebx: JSObject + // edi: start of next object (will be start of FixedArray) + // edx: number of elements in properties array + __ Allocate(FixedArray::kHeaderSize, + times_pointer_size, + edx, + REGISTER_VALUE_IS_INT32, + edi, + ecx, + no_reg, + &undo_allocation, + RESULT_CONTAINS_TOP); + + // Initialize the FixedArray. + // ebx: JSObject + // edi: FixedArray + // edx: number of elements + // ecx: start of next object + __ mov(eax, factory->fixed_array_map()); + __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map + __ SmiTag(edx); + __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length + + // Initialize the fields to undefined. + // ebx: JSObject + // edi: FixedArray + // ecx: start of next object + { Label loop, entry; + __ mov(edx, factory->undefined_value()); + __ lea(eax, Operand(edi, FixedArray::kHeaderSize)); + __ jmp(&entry); + __ bind(&loop); + __ mov(Operand(eax, 0), edx); + __ add(eax, Immediate(kPointerSize)); + __ bind(&entry); + __ cmp(eax, ecx); + __ j(below, &loop); + } + + // Store the initialized FixedArray into the properties field of + // the JSObject + // ebx: JSObject + // edi: FixedArray + __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag + __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi); + + + // Continue with JSObject being successfully allocated + // ebx: JSObject + __ jmp(&allocated); + + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // ebx: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(ebx); + } + + // Allocate the new receiver object using the runtime call. + __ bind(&rt_call); + int offset = 0; + if (create_memento) { + // Get the cell or allocation site. + __ mov(edi, Operand(esp, kPointerSize * 2)); + __ push(edi); + offset = kPointerSize; + } + + // Must restore esi (context) and edi (constructor) before calling runtime. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ mov(edi, Operand(esp, offset)); + // edi: function (constructor) + __ push(edi); + if (create_memento) { + __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2); + } else { + __ CallRuntime(Runtime::kNewObject, 1); + } + __ mov(ebx, eax); // store result in ebx + + // If we ended up using the runtime, and we want a memento, then the + // runtime call made it for us, and we shouldn't do create count + // increment. + Label count_incremented; + if (create_memento) { + __ jmp(&count_incremented); + } + + // New object allocated. + // ebx: newly allocated object + __ bind(&allocated); + + if (create_memento) { + __ mov(ecx, Operand(esp, kPointerSize * 2)); + __ cmp(ecx, masm->isolate()->factory()->undefined_value()); + __ j(equal, &count_incremented); + // ecx is an AllocationSite. We are creating a memento from it, so we + // need to increment the memento create count. + __ add(FieldOperand(ecx, AllocationSite::kPretenureCreateCountOffset), + Immediate(Smi::FromInt(1))); + __ bind(&count_incremented); + } + + // Retrieve the function from the stack. + __ pop(edi); + + // Retrieve smi-tagged arguments count from the stack. + __ mov(eax, Operand(esp, 0)); + __ SmiUntag(eax); + + // Push the allocated receiver to the stack. We need two copies + // because we may have to return the original one and the calling + // conventions dictate that the called function pops the receiver. + __ push(ebx); + __ push(ebx); + + // Set up pointer to last argument. + __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); + + // Copy arguments and receiver to the expression stack. + Label loop, entry; + __ mov(ecx, eax); + __ jmp(&entry); + __ bind(&loop); + __ push(Operand(ebx, ecx, times_4, 0)); + __ bind(&entry); + __ dec(ecx); + __ j(greater_equal, &loop); + + // Call the function. + if (is_api_function) { + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + Handle<Code> code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + __ call(code, RelocInfo::CODE_TARGET); + } else { + ParameterCount actual(eax); + __ InvokeFunction(edi, actual, CALL_FUNCTION, + NullCallWrapper()); + } + + // Store offset of return address for deoptimizer. + if (!is_api_function) { + masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); + } + + // Restore context from the frame. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + + // If the result is a smi, it is *not* an object in the ECMA sense. + __ JumpIfSmi(eax, &use_receiver); + + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, &exit); + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ mov(eax, Operand(esp, 0)); + + // Restore the arguments count and leave the construct frame. + __ bind(&exit); + __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count. + + // Leave construct frame. + } + + // Remove caller arguments from the stack and return. + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + __ pop(ecx); + __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver + __ push(ecx); + __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1); + __ ret(0); +} + + +void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { + Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); +} + + +void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { + Generate_JSConstructStubHelper(masm, true, false); +} + + +static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, + bool is_construct) { + ProfileEntryHookStub::MaybeCallEntryHook(masm); + + // Clear the context before we push it when entering the internal frame. + __ Move(esi, Immediate(0)); + + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Load the previous frame pointer (ebx) to access C arguments + __ mov(ebx, Operand(ebp, 0)); + + // Get the function from the frame and setup the context. + __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset)); + __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset)); + + // Push the function and the receiver onto the stack. + __ push(ecx); + __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset)); + + // Load the number of arguments and setup pointer to the arguments. + __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset)); + __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset)); + + // Copy arguments to the stack in a loop. + Label loop, entry; + __ Move(ecx, Immediate(0)); + __ jmp(&entry); + __ bind(&loop); + __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv + __ push(Operand(edx, 0)); // dereference handle + __ inc(ecx); + __ bind(&entry); + __ cmp(ecx, eax); + __ j(not_equal, &loop); + + // Get the function from the stack and call it. + // kPointerSize for the receiver. + __ mov(edi, Operand(esp, eax, times_4, kPointerSize)); + + // Invoke the code. + if (is_construct) { + // No type feedback cell is available + __ mov(ebx, masm->isolate()->factory()->undefined_value()); + CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + __ CallStub(&stub); + } else { + ParameterCount actual(eax); + __ InvokeFunction(edi, actual, CALL_FUNCTION, + NullCallWrapper()); + } + + // Exit the internal frame. Notice that this also removes the empty. + // context and the function left on the stack by the code + // invocation. + } + __ ret(kPointerSize); // Remove receiver. +} + + +void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, false); +} + + +void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, true); +} + + +void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) { + CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized); + GenerateTailCallToReturnedCode(masm); +} + + + +static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) { + FrameScope scope(masm, StackFrame::INTERNAL); + // Push a copy of the function. + __ push(edi); + // Function is also the parameter to the runtime call. + __ push(edi); + // Whether to compile in a background thread. + __ Push(masm->isolate()->factory()->ToBoolean(concurrent)); + + __ CallRuntime(Runtime::kCompileOptimized, 2); + // Restore receiver. + __ pop(edi); +} + + +void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { + CallCompileOptimized(masm, false); + GenerateTailCallToReturnedCode(masm); +} + + +void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { + CallCompileOptimized(masm, true); + GenerateTailCallToReturnedCode(masm); +} + + +static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { + // For now, we are relying on the fact that make_code_young doesn't do any + // garbage collection which allows us to save/restore the registers without + // worrying about which of them contain pointers. We also don't build an + // internal frame to make the code faster, since we shouldn't have to do stack + // crawls in MakeCodeYoung. This seems a bit fragile. + + // Re-execute the code that was patched back to the young age when + // the stub returns. + __ sub(Operand(esp, 0), Immediate(5)); + __ pushad(); + __ mov(eax, Operand(esp, 8 * kPointerSize)); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(2, ebx); + __ mov(Operand(esp, 1 * kPointerSize), + Immediate(ExternalReference::isolate_address(masm->isolate()))); + __ mov(Operand(esp, 0), eax); + __ CallCFunction( + ExternalReference::get_make_code_young_function(masm->isolate()), 2); + } + __ popad(); + __ ret(0); +} + +#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \ +void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} \ +void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} +CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) +#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR + + +void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { + // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact + // that make_code_young doesn't do any garbage collection which allows us to + // save/restore the registers without worrying about which of them contain + // pointers. + __ pushad(); + __ mov(eax, Operand(esp, 8 * kPointerSize)); + __ sub(eax, Immediate(Assembler::kCallInstructionLength)); + { // NOLINT + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(2, ebx); + __ mov(Operand(esp, 1 * kPointerSize), + Immediate(ExternalReference::isolate_address(masm->isolate()))); + __ mov(Operand(esp, 0), eax); + __ CallCFunction( + ExternalReference::get_mark_code_as_executed_function(masm->isolate()), + 2); + } + __ popad(); + + // Perform prologue operations usually performed by the young code stub. + __ pop(eax); // Pop return address into scratch register. + __ push(ebp); // Caller's frame pointer. + __ mov(ebp, esp); + __ push(esi); // Callee's context. + __ push(edi); // Callee's JS Function. + __ push(eax); // Push return address after frame prologue. + + // Jump to point after the code-age stub. + __ ret(0); +} + + +void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { + GenerateMakeCodeYoungAgainCommon(masm); +} + + +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve registers across notification, this is important for compiled + // stubs that tail call the runtime on deopts passing their parameters in + // registers. + __ pushad(); + __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ popad(); + // Tear down internal frame. + } + + __ pop(MemOperand(esp, 0)); // Ignore state offset + __ ret(0); // Return to IC Miss stub, continuation still on stack. +} + + +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm); +} + + +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { + // SaveDoubles is meanless for X87, just used by deoptimizer.cc + Generate_NotifyStubFailureHelper(masm); +} + + +static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, + Deoptimizer::BailoutType type) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Pass deoptimization type to the runtime system. + __ push(Immediate(Smi::FromInt(static_cast<int>(type)))); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + + // Tear down internal frame. + } + + // Get the full codegen state from the stack and untag it. + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + __ SmiUntag(ecx); + + // Switch on the state. + Label not_no_registers, not_tos_eax; + __ cmp(ecx, FullCodeGenerator::NO_REGISTERS); + __ j(not_equal, ¬_no_registers, Label::kNear); + __ ret(1 * kPointerSize); // Remove state. + + __ bind(¬_no_registers); + __ mov(eax, Operand(esp, 2 * kPointerSize)); + __ cmp(ecx, FullCodeGenerator::TOS_REG); + __ j(not_equal, ¬_tos_eax, Label::kNear); + __ ret(2 * kPointerSize); // Remove state, eax. + + __ bind(¬_tos_eax); + __ Abort(kNoCasesLeft); +} + + +void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER); +} + + +void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) { + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT); +} + + +void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); +} + + +void Builtins::Generate_FunctionCall(MacroAssembler* masm) { + Factory* factory = masm->isolate()->factory(); + + // 1. Make sure we have at least one argument. + { Label done; + __ test(eax, eax); + __ j(not_zero, &done); + __ pop(ebx); + __ push(Immediate(factory->undefined_value())); + __ push(ebx); + __ inc(eax); + __ bind(&done); + } + + // 2. Get the function to call (passed as receiver) from the stack, check + // if it is a function. + Label slow, non_function; + // 1 ~ return address. + __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize)); + __ JumpIfSmi(edi, &non_function); + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &slow); + + + // 3a. Patch the first argument if necessary when calling a function. + Label shift_arguments; + __ Move(edx, Immediate(0)); // indicate regular JS_FUNCTION + { Label convert_to_object, use_global_proxy, patch_receiver; + // Change context eagerly in case we need the global receiver. + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + + // Do not transform the receiver for strict mode functions. + __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset), + 1 << SharedFunctionInfo::kStrictModeBitWithinByte); + __ j(not_equal, &shift_arguments); + + // Do not transform the receiver for natives (shared already in ebx). + __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset), + 1 << SharedFunctionInfo::kNativeBitWithinByte); + __ j(not_equal, &shift_arguments); + + // Compute the receiver in sloppy mode. + __ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument. + + // Call ToObject on the receiver if it is not an object, or use the + // global object if it is null or undefined. + __ JumpIfSmi(ebx, &convert_to_object); + __ cmp(ebx, factory->null_value()); + __ j(equal, &use_global_proxy); + __ cmp(ebx, factory->undefined_value()); + __ j(equal, &use_global_proxy); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, &shift_arguments); + + __ bind(&convert_to_object); + + { // In order to preserve argument count. + FrameScope scope(masm, StackFrame::INTERNAL); + __ SmiTag(eax); + __ push(eax); + + __ push(ebx); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(ebx, eax); + __ Move(edx, Immediate(0)); // restore + + __ pop(eax); + __ SmiUntag(eax); + } + + // Restore the function to edi. + __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize)); + __ jmp(&patch_receiver); + + __ bind(&use_global_proxy); + __ mov(ebx, + Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset)); + + __ bind(&patch_receiver); + __ mov(Operand(esp, eax, times_4, 0), ebx); + + __ jmp(&shift_arguments); + } + + // 3b. Check for function proxy. + __ bind(&slow); + __ Move(edx, Immediate(1)); // indicate function proxy + __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); + __ j(equal, &shift_arguments); + __ bind(&non_function); + __ Move(edx, Immediate(2)); // indicate non-function + + // 3c. Patch the first argument when calling a non-function. The + // CALL_NON_FUNCTION builtin expects the non-function callee as + // receiver, so overwrite the first argument which will ultimately + // become the receiver. + __ mov(Operand(esp, eax, times_4, 0), edi); + + // 4. Shift arguments and return address one slot down on the stack + // (overwriting the original receiver). Adjust argument count to make + // the original first argument the new receiver. + __ bind(&shift_arguments); + { Label loop; + __ mov(ecx, eax); + __ bind(&loop); + __ mov(ebx, Operand(esp, ecx, times_4, 0)); + __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx); + __ dec(ecx); + __ j(not_sign, &loop); // While non-negative (to copy return address). + __ pop(ebx); // Discard copy of return address. + __ dec(eax); // One fewer argument (first argument is new receiver). + } + + // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, + // or a function proxy via CALL_FUNCTION_PROXY. + { Label function, non_proxy; + __ test(edx, edx); + __ j(zero, &function); + __ Move(ebx, Immediate(0)); + __ cmp(edx, Immediate(1)); + __ j(not_equal, &non_proxy); + + __ pop(edx); // return address + __ push(edi); // re-add proxy object as additional argument + __ push(edx); + __ inc(eax); + __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); + __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + + __ bind(&non_proxy); + __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); + __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + __ bind(&function); + } + + // 5b. Get the code to call from the function and check that the number of + // expected arguments matches what we're providing. If so, jump + // (tail-call) to the code in register edx without checking arguments. + __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ mov(ebx, + FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); + __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset)); + __ SmiUntag(ebx); + __ cmp(eax, ebx); + __ j(not_equal, + masm->isolate()->builtins()->ArgumentsAdaptorTrampoline()); + + ParameterCount expected(0); + __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper()); +} + + +void Builtins::Generate_FunctionApply(MacroAssembler* masm) { + static const int kArgumentsOffset = 2 * kPointerSize; + static const int kReceiverOffset = 3 * kPointerSize; + static const int kFunctionOffset = 4 * kPointerSize; + { + FrameScope frame_scope(masm, StackFrame::INTERNAL); + + __ push(Operand(ebp, kFunctionOffset)); // push this + __ push(Operand(ebp, kArgumentsOffset)); // push arguments + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + ExternalReference real_stack_limit = + ExternalReference::address_of_real_stack_limit(masm->isolate()); + __ mov(edi, Operand::StaticVariable(real_stack_limit)); + // Make ecx the space we have left. The stack might already be overflowed + // here which will cause ecx to become negative. + __ mov(ecx, esp); + __ sub(ecx, edi); + // Make edx the space we need for the array when it is unrolled onto the + // stack. + __ mov(edx, eax); + __ shl(edx, kPointerSizeLog2 - kSmiTagSize); + // Check if the arguments will overflow the stack. + __ cmp(ecx, edx); + __ j(greater, &okay); // Signed comparison. + + // Out of stack space. + __ push(Operand(ebp, 4 * kPointerSize)); // push this + __ push(eax); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ bind(&okay); + // End of stack check. + + // Push current index and limit. + const int kLimitOffset = + StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize; + const int kIndexOffset = kLimitOffset - 1 * kPointerSize; + __ push(eax); // limit + __ push(Immediate(0)); // index + + // Get the receiver. + __ mov(ebx, Operand(ebp, kReceiverOffset)); + + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver, use_global_proxy; + __ mov(edi, Operand(ebp, kFunctionOffset)); + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &push_receiver); + + // Change context eagerly to get the right global object if necessary. + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + + // Compute the receiver. + // Do not transform the receiver for strict mode functions. + Label call_to_object; + __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset), + 1 << SharedFunctionInfo::kStrictModeBitWithinByte); + __ j(not_equal, &push_receiver); + + Factory* factory = masm->isolate()->factory(); + + // Do not transform the receiver for natives (shared already in ecx). + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset), + 1 << SharedFunctionInfo::kNativeBitWithinByte); + __ j(not_equal, &push_receiver); + + // Compute the receiver in sloppy mode. + // Call ToObject on the receiver if it is not an object, or use the + // global object if it is null or undefined. + __ JumpIfSmi(ebx, &call_to_object); + __ cmp(ebx, factory->null_value()); + __ j(equal, &use_global_proxy); + __ cmp(ebx, factory->undefined_value()); + __ j(equal, &use_global_proxy); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, &push_receiver); + + __ bind(&call_to_object); + __ push(ebx); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(ebx, eax); + __ jmp(&push_receiver); + + __ bind(&use_global_proxy); + __ mov(ebx, + Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset)); + + // Push the receiver. + __ bind(&push_receiver); + __ push(ebx); + + // Copy all arguments from the array to the stack. + Label entry, loop; + Register receiver = LoadIC::ReceiverRegister(); + Register key = LoadIC::NameRegister(); + __ mov(key, Operand(ebp, kIndexOffset)); + __ jmp(&entry); + __ bind(&loop); + __ mov(receiver, Operand(ebp, kArgumentsOffset)); // load arguments + + // Use inline caching to speed up access to arguments. + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), Immediate(Smi::FromInt(0))); + } + Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize(); + __ call(ic, RelocInfo::CODE_TARGET); + // It is important that we do not have a test instruction after the + // call. A test instruction after the call is used to indicate that + // we have generated an inline version of the keyed load. In this + // case, we know that we are not generating a test instruction next. + + // Push the nth argument. + __ push(eax); + + // Update the index on the stack and in register key. + __ mov(key, Operand(ebp, kIndexOffset)); + __ add(key, Immediate(1 << kSmiTagSize)); + __ mov(Operand(ebp, kIndexOffset), key); + + __ bind(&entry); + __ cmp(key, Operand(ebp, kLimitOffset)); + __ j(not_equal, &loop); + + // Call the function. + Label call_proxy; + ParameterCount actual(eax); + __ Move(eax, key); + __ SmiUntag(eax); + __ mov(edi, Operand(ebp, kFunctionOffset)); + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &call_proxy); + __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper()); + + frame_scope.GenerateLeaveFrame(); + __ ret(3 * kPointerSize); // remove this, receiver, and arguments + + // Call the function proxy. + __ bind(&call_proxy); + __ push(edi); // add function proxy as last argument + __ inc(eax); + __ Move(ebx, Immediate(0)); + __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); + __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + + // Leave internal frame. + } + __ ret(3 * kPointerSize); // remove this, receiver, and arguments +} + + +void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : argc + // -- esp[0] : return address + // -- esp[4] : last argument + // ----------------------------------- + Label generic_array_code; + + // Get the InternalArray function. + __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi); + + if (FLAG_debug_code) { + // Initial map for the builtin InternalArray function should be a map. + __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ test(ebx, Immediate(kSmiTagMask)); + __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction); + __ CmpObjectType(ebx, MAP_TYPE, ecx); + __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction); + } + + // Run the native code for the InternalArray function called as a normal + // function. + // tail call a stub + InternalArrayConstructorStub stub(masm->isolate()); + __ TailCallStub(&stub); +} + + +void Builtins::Generate_ArrayCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : argc + // -- esp[0] : return address + // -- esp[4] : last argument + // ----------------------------------- + Label generic_array_code; + + // Get the Array function. + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi); + + if (FLAG_debug_code) { + // Initial map for the builtin Array function should be a map. + __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ test(ebx, Immediate(kSmiTagMask)); + __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction); + __ CmpObjectType(ebx, MAP_TYPE, ecx); + __ Assert(equal, kUnexpectedInitialMapForArrayFunction); + } + + // Run the native code for the Array function called as a normal function. + // tail call a stub + __ mov(ebx, masm->isolate()->factory()->undefined_value()); + ArrayConstructorStub stub(masm->isolate()); + __ TailCallStub(&stub); +} + + +void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : number of arguments + // -- edi : constructor function + // -- esp[0] : return address + // -- esp[(argc - n) * 4] : arg[n] (zero-based) + // -- esp[(argc + 1) * 4] : receiver + // ----------------------------------- + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->string_ctor_calls(), 1); + + if (FLAG_debug_code) { + __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx); + __ cmp(edi, ecx); + __ Assert(equal, kUnexpectedStringFunction); + } + + // Load the first argument into eax and get rid of the rest + // (including the receiver). + Label no_arguments; + __ test(eax, eax); + __ j(zero, &no_arguments); + __ mov(ebx, Operand(esp, eax, times_pointer_size, 0)); + __ pop(ecx); + __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize)); + __ push(ecx); + __ mov(eax, ebx); + + // Lookup the argument in the number to string cache. + Label not_cached, argument_is_string; + __ LookupNumberStringCache(eax, // Input. + ebx, // Result. + ecx, // Scratch 1. + edx, // Scratch 2. + ¬_cached); + __ IncrementCounter(counters->string_ctor_cached_number(), 1); + __ bind(&argument_is_string); + // ----------- S t a t e ------------- + // -- ebx : argument converted to string + // -- edi : constructor function + // -- esp[0] : return address + // ----------------------------------- + + // Allocate a JSValue and put the tagged pointer into eax. + Label gc_required; + __ Allocate(JSValue::kSize, + eax, // Result. + ecx, // New allocation top (we ignore it). + no_reg, + &gc_required, + TAG_OBJECT); + + // Set the map. + __ LoadGlobalFunctionInitialMap(edi, ecx); + if (FLAG_debug_code) { + __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset), + JSValue::kSize >> kPointerSizeLog2); + __ Assert(equal, kUnexpectedStringWrapperInstanceSize); + __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0); + __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper); + } + __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx); + + // Set properties and elements. + Factory* factory = masm->isolate()->factory(); + __ Move(ecx, Immediate(factory->empty_fixed_array())); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx); + + // Set the value. + __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx); + + // Ensure the object is fully initialized. + STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); + + // We're done. Return. + __ ret(0); + + // The argument was not found in the number to string cache. Check + // if it's a string already before calling the conversion builtin. + Label convert_argument; + __ bind(¬_cached); + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfSmi(eax, &convert_argument); + Condition is_string = masm->IsObjectStringType(eax, ebx, ecx); + __ j(NegateCondition(is_string), &convert_argument); + __ mov(ebx, eax); + __ IncrementCounter(counters->string_ctor_string_value(), 1); + __ jmp(&argument_is_string); + + // Invoke the conversion builtin and put the result into ebx. + __ bind(&convert_argument); + __ IncrementCounter(counters->string_ctor_conversions(), 1); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(edi); // Preserve the function. + __ push(eax); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + __ pop(edi); + } + __ mov(ebx, eax); + __ jmp(&argument_is_string); + + // Load the empty string into ebx, remove the receiver from the + // stack, and jump back to the case where the argument is a string. + __ bind(&no_arguments); + __ Move(ebx, Immediate(factory->empty_string())); + __ pop(ecx); + __ lea(esp, Operand(esp, kPointerSize)); + __ push(ecx); + __ jmp(&argument_is_string); + + // At this point the argument is already a string. Call runtime to + // create a string wrapper. + __ bind(&gc_required); + __ IncrementCounter(counters->string_ctor_gc_required(), 1); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(ebx); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + } + __ ret(0); +} + + +static void ArgumentsAdaptorStackCheck(MacroAssembler* masm, + Label* stack_overflow) { + // ----------- S t a t e ------------- + // -- eax : actual number of arguments + // -- ebx : expected number of arguments + // -- edi : function (passed through to callee) + // ----------------------------------- + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + ExternalReference real_stack_limit = + ExternalReference::address_of_real_stack_limit(masm->isolate()); + __ mov(edx, Operand::StaticVariable(real_stack_limit)); + // Make ecx the space we have left. The stack might already be overflowed + // here which will cause ecx to become negative. + __ mov(ecx, esp); + __ sub(ecx, edx); + // Make edx the space we need for the array when it is unrolled onto the + // stack. + __ mov(edx, ebx); + __ shl(edx, kPointerSizeLog2); + // Check if the arguments will overflow the stack. + __ cmp(ecx, edx); + __ j(less_equal, stack_overflow); // Signed comparison. +} + + +static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { + __ push(ebp); + __ mov(ebp, esp); + + // Store the arguments adaptor context sentinel. + __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + + // Push the function on the stack. + __ push(edi); + + // Preserve the number of arguments on the stack. Must preserve eax, + // ebx and ecx because these registers are used when copying the + // arguments and the receiver. + STATIC_ASSERT(kSmiTagSize == 1); + __ lea(edi, Operand(eax, eax, times_1, kSmiTag)); + __ push(edi); +} + + +static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { + // Retrieve the number of arguments from the stack. + __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset)); + + // Leave the frame. + __ leave(); + + // Remove caller arguments from the stack. + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + __ pop(ecx); + __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver + __ push(ecx); +} + + +void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : actual number of arguments + // -- ebx : expected number of arguments + // -- edi : function (passed through to callee) + // ----------------------------------- + + Label invoke, dont_adapt_arguments; + __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1); + + Label stack_overflow; + ArgumentsAdaptorStackCheck(masm, &stack_overflow); + + Label enough, too_few; + __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset)); + __ cmp(eax, ebx); + __ j(less, &too_few); + __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel); + __ j(equal, &dont_adapt_arguments); + + { // Enough parameters: Actual >= expected. + __ bind(&enough); + EnterArgumentsAdaptorFrame(masm); + + // Copy receiver and all expected arguments. + const int offset = StandardFrameConstants::kCallerSPOffset; + __ lea(eax, Operand(ebp, eax, times_4, offset)); + __ mov(edi, -1); // account for receiver + + Label copy; + __ bind(©); + __ inc(edi); + __ push(Operand(eax, 0)); + __ sub(eax, Immediate(kPointerSize)); + __ cmp(edi, ebx); + __ j(less, ©); + __ jmp(&invoke); + } + + { // Too few parameters: Actual < expected. + __ bind(&too_few); + EnterArgumentsAdaptorFrame(masm); + + // Copy receiver and all actual arguments. + const int offset = StandardFrameConstants::kCallerSPOffset; + __ lea(edi, Operand(ebp, eax, times_4, offset)); + // ebx = expected - actual. + __ sub(ebx, eax); + // eax = -actual - 1 + __ neg(eax); + __ sub(eax, Immediate(1)); + + Label copy; + __ bind(©); + __ inc(eax); + __ push(Operand(edi, 0)); + __ sub(edi, Immediate(kPointerSize)); + __ test(eax, eax); + __ j(not_zero, ©); + + // Fill remaining expected arguments with undefined values. + Label fill; + __ bind(&fill); + __ inc(eax); + __ push(Immediate(masm->isolate()->factory()->undefined_value())); + __ cmp(eax, ebx); + __ j(less, &fill); + } + + // Call the entry point. + __ bind(&invoke); + // Restore function pointer. + __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ call(edx); + + // Store offset of return address for deoptimizer. + masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); + + // Leave frame and return. + LeaveArgumentsAdaptorFrame(masm); + __ ret(0); + + // ------------------------------------------- + // Dont adapt arguments. + // ------------------------------------------- + __ bind(&dont_adapt_arguments); + __ jmp(edx); + + __ bind(&stack_overflow); + { + FrameScope frame(masm, StackFrame::MANUAL); + EnterArgumentsAdaptorFrame(masm); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ int3(); + } +} + + +void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { + // Lookup the function in the JavaScript frame. + __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Pass function as argument. + __ push(eax); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + } + + Label skip; + // If the code object is null, just return to the unoptimized code. + __ cmp(eax, Immediate(0)); + __ j(not_equal, &skip, Label::kNear); + __ ret(0); + + __ bind(&skip); + + // Load deoptimization data from the code object. + __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag)); + + // Load the OSR entrypoint offset from the deoptimization data. + __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt( + DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag)); + __ SmiUntag(ebx); + + // Compute the target address = code_obj + header_size + osr_offset + __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag)); + + // Overwrite the return address on the stack. + __ mov(Operand(esp, 0), eax); + + // And "return" to the OSR entry point of the function. + __ ret(0); +} + + +void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { + // We check the stack limit as indicator that recompilation might be done. + Label ok; + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(masm->isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(above_equal, &ok, Label::kNear); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kStackGuard, 0); + } + __ jmp(masm->isolate()->builtins()->OnStackReplacement(), + RelocInfo::CODE_TARGET); + + __ bind(&ok); + __ ret(0); +} + +#undef __ +} +} // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/codegen-x87.cc nodejs-0.11.15/deps/v8/src/x87/codegen-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/codegen-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/codegen-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,645 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/codegen.h" +#include "src/heap/heap.h" +#include "src/macro-assembler.h" + +namespace v8 { +namespace internal { + + +// ------------------------------------------------------------------------- +// Platform-specific RuntimeCallHelper functions. + +void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { + masm->EnterFrame(StackFrame::INTERNAL); + DCHECK(!masm->has_frame()); + masm->set_has_frame(true); +} + + +void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { + masm->LeaveFrame(StackFrame::INTERNAL); + DCHECK(masm->has_frame()); + masm->set_has_frame(false); +} + + +#define __ masm. + + +UnaryMathFunction CreateExpFunction() { + // No SSE2 support + return &std::exp; +} + + +UnaryMathFunction CreateSqrtFunction() { + // No SSE2 support + return &std::sqrt; +} + + +// Helper functions for CreateMemMoveFunction. +#undef __ +#define __ ACCESS_MASM(masm) + +enum Direction { FORWARD, BACKWARD }; +enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED }; + + +void MemMoveEmitPopAndReturn(MacroAssembler* masm) { + __ pop(esi); + __ pop(edi); + __ ret(0); +} + + +#undef __ +#define __ masm. + + +class LabelConverter { + public: + explicit LabelConverter(byte* buffer) : buffer_(buffer) {} + int32_t address(Label* l) const { + return reinterpret_cast<int32_t>(buffer_) + l->pos(); + } + private: + byte* buffer_; +}; + + +MemMoveFunction CreateMemMoveFunction() { + size_t actual_size; + // Allocate buffer in executable space. + byte* buffer = + static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); + if (buffer == NULL) return NULL; + MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + LabelConverter conv(buffer); + + // Generated code is put into a fixed, unmovable buffer, and not into + // the V8 heap. We can't, and don't, refer to any relocatable addresses + // (e.g. the JavaScript nan-object). + + // 32-bit C declaration function calls pass arguments on stack. + + // Stack layout: + // esp[12]: Third argument, size. + // esp[8]: Second argument, source pointer. + // esp[4]: First argument, destination pointer. + // esp[0]: return address + + const int kDestinationOffset = 1 * kPointerSize; + const int kSourceOffset = 2 * kPointerSize; + const int kSizeOffset = 3 * kPointerSize; + + int stack_offset = 0; // Update if we change the stack height. + + Label backward, backward_much_overlap; + Label forward_much_overlap, small_size, medium_size, pop_and_return; + __ push(edi); + __ push(esi); + stack_offset += 2 * kPointerSize; + Register dst = edi; + Register src = esi; + Register count = ecx; + __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); + __ mov(src, Operand(esp, stack_offset + kSourceOffset)); + __ mov(count, Operand(esp, stack_offset + kSizeOffset)); + + __ cmp(dst, src); + __ j(equal, &pop_and_return); + + // No SSE2. + Label forward; + __ cmp(count, 0); + __ j(equal, &pop_and_return); + __ cmp(dst, src); + __ j(above, &backward); + __ jmp(&forward); + { + // Simple forward copier. + Label forward_loop_1byte, forward_loop_4byte; + __ bind(&forward_loop_4byte); + __ mov(eax, Operand(src, 0)); + __ sub(count, Immediate(4)); + __ add(src, Immediate(4)); + __ mov(Operand(dst, 0), eax); + __ add(dst, Immediate(4)); + __ bind(&forward); // Entry point. + __ cmp(count, 3); + __ j(above, &forward_loop_4byte); + __ bind(&forward_loop_1byte); + __ cmp(count, 0); + __ j(below_equal, &pop_and_return); + __ mov_b(eax, Operand(src, 0)); + __ dec(count); + __ inc(src); + __ mov_b(Operand(dst, 0), eax); + __ inc(dst); + __ jmp(&forward_loop_1byte); + } + { + // Simple backward copier. + Label backward_loop_1byte, backward_loop_4byte, entry_shortcut; + __ bind(&backward); + __ add(src, count); + __ add(dst, count); + __ cmp(count, 3); + __ j(below_equal, &entry_shortcut); + + __ bind(&backward_loop_4byte); + __ sub(src, Immediate(4)); + __ sub(count, Immediate(4)); + __ mov(eax, Operand(src, 0)); + __ sub(dst, Immediate(4)); + __ mov(Operand(dst, 0), eax); + __ cmp(count, 3); + __ j(above, &backward_loop_4byte); + __ bind(&backward_loop_1byte); + __ cmp(count, 0); + __ j(below_equal, &pop_and_return); + __ bind(&entry_shortcut); + __ dec(src); + __ dec(count); + __ mov_b(eax, Operand(src, 0)); + __ dec(dst); + __ mov_b(Operand(dst, 0), eax); + __ jmp(&backward_loop_1byte); + } + + __ bind(&pop_and_return); + MemMoveEmitPopAndReturn(&masm); + + CodeDesc desc; + masm.GetCode(&desc); + DCHECK(!RelocInfo::RequiresRelocation(desc)); + CpuFeatures::FlushICache(buffer, actual_size); + base::OS::ProtectCode(buffer, actual_size); + // TODO(jkummerow): It would be nice to register this code creation event + // with the PROFILE / GDBJIT system. + return FUNCTION_CAST<MemMoveFunction>(buffer); +} + + +#undef __ + +// ------------------------------------------------------------------------- +// Code generators + +#define __ ACCESS_MASM(masm) + + +void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* allocation_memento_found) { + Register scratch = edi; + DCHECK(!AreAliased(receiver, key, value, target_map, scratch)); + + if (mode == TRACK_ALLOCATION_SITE) { + DCHECK(allocation_memento_found != NULL); + __ JumpIfJSArrayHasAllocationMemento( + receiver, scratch, allocation_memento_found); + } + + // Set transitioned map. + __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map); + __ RecordWriteField(receiver, + HeapObject::kMapOffset, + target_map, + scratch, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); +} + + +void ElementsTransitionGenerator::GenerateSmiToDouble( + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Return address is on the stack. + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + DCHECK(value.is(eax)); + DCHECK(target_map.is(ebx)); + + Label loop, entry, convert_hole, gc_required, only_change_map; + + if (mode == TRACK_ALLOCATION_SITE) { + __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail); + } + + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. + __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); + __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array())); + __ j(equal, &only_change_map); + + __ push(eax); + __ push(ebx); + + __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset)); + + // Allocate new FixedDoubleArray. + // edx: receiver + // edi: length of source FixedArray (smi-tagged) + AllocationFlags flags = + static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT); + __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi, + REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags); + + // eax: destination FixedDoubleArray + // edi: number of elements + // edx: receiver + __ mov(FieldOperand(eax, HeapObject::kMapOffset), + Immediate(masm->isolate()->factory()->fixed_double_array_map())); + __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi); + __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset)); + // Replace receiver's backing store with newly created FixedDoubleArray. + __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); + __ mov(ebx, eax); + __ RecordWriteField(edx, + JSObject::kElementsOffset, + ebx, + edi, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + + __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset)); + + // Prepare for conversion loop. + ExternalReference canonical_the_hole_nan_reference = + ExternalReference::address_of_the_hole_nan(); + __ jmp(&entry); + + // Call into runtime if GC is required. + __ bind(&gc_required); + // Restore registers before jumping into runtime. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ pop(ebx); + __ pop(eax); + __ jmp(fail); + + // Convert and copy elements + // esi: source FixedArray + __ bind(&loop); + __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize)); + // ebx: current element from source + // edi: index of current element + __ JumpIfNotSmi(ebx, &convert_hole); + + // Normal smi, convert it to double and store. + __ SmiUntag(ebx); + __ push(ebx); + __ fild_s(Operand(esp, 0)); + __ pop(ebx); + __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize)); + __ jmp(&entry); + + // Found hole, store hole_nan_as_double instead. + __ bind(&convert_hole); + + if (FLAG_debug_code) { + __ cmp(ebx, masm->isolate()->factory()->the_hole_value()); + __ Assert(equal, kObjectFoundInSmiOnlyArray); + } + + __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference)); + __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize)); + + __ bind(&entry); + __ sub(edi, Immediate(Smi::FromInt(1))); + __ j(not_sign, &loop); + + __ pop(ebx); + __ pop(eax); + + // Restore esi. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + + __ bind(&only_change_map); + // eax: value + // ebx: target map + // Set transitioned map. + __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); + __ RecordWriteField(edx, + HeapObject::kMapOffset, + ebx, + edi, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); +} + + +void ElementsTransitionGenerator::GenerateDoubleToObject( + MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register target_map, + AllocationSiteMode mode, + Label* fail) { + // Return address is on the stack. + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + DCHECK(value.is(eax)); + DCHECK(target_map.is(ebx)); + + Label loop, entry, convert_hole, gc_required, only_change_map, success; + + if (mode == TRACK_ALLOCATION_SITE) { + __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail); + } + + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. + __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); + __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array())); + __ j(equal, &only_change_map); + + __ push(eax); + __ push(edx); + __ push(ebx); + + __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset)); + + // Allocate new FixedArray. + // ebx: length of source FixedDoubleArray (smi-tagged) + __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize)); + __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT); + + // eax: destination FixedArray + // ebx: number of elements + __ mov(FieldOperand(eax, HeapObject::kMapOffset), + Immediate(masm->isolate()->factory()->fixed_array_map())); + __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx); + __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); + + __ jmp(&entry); + + // ebx: target map + // edx: receiver + // Set transitioned map. + __ bind(&only_change_map); + __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); + __ RecordWriteField(edx, + HeapObject::kMapOffset, + ebx, + edi, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ jmp(&success); + + // Call into runtime if GC is required. + __ bind(&gc_required); + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ pop(ebx); + __ pop(edx); + __ pop(eax); + __ jmp(fail); + + // Box doubles into heap numbers. + // edi: source FixedDoubleArray + // eax: destination FixedArray + __ bind(&loop); + // ebx: index of current element (smi-tagged) + uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32)); + __ j(equal, &convert_hole); + + // Non-hole double, copy value into a heap number. + __ AllocateHeapNumber(edx, esi, no_reg, &gc_required); + // edx: new heap number + __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); + __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi); + __ mov(esi, FieldOperand(edi, ebx, times_4, offset)); + __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi); + __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx); + __ mov(esi, ebx); + __ RecordWriteArray(eax, + edx, + esi, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ jmp(&entry, Label::kNear); + + // Replace the-hole NaN with the-hole pointer. + __ bind(&convert_hole); + __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), + masm->isolate()->factory()->the_hole_value()); + + __ bind(&entry); + __ sub(ebx, Immediate(Smi::FromInt(1))); + __ j(not_sign, &loop); + + __ pop(ebx); + __ pop(edx); + // ebx: target map + // edx: receiver + // Set transitioned map. + __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); + __ RecordWriteField(edx, + HeapObject::kMapOffset, + ebx, + edi, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + // Replace receiver's backing store with newly created and filled FixedArray. + __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); + __ RecordWriteField(edx, + JSObject::kElementsOffset, + eax, + edi, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + + // Restore registers. + __ pop(eax); + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + + __ bind(&success); +} + + +void StringCharLoadGenerator::Generate(MacroAssembler* masm, + Factory* factory, + Register string, + Register index, + Register result, + Label* call_runtime) { + // Fetch the instance type of the receiver into result register. + __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); + __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); + + // We need special handling for indirect strings. + Label check_sequential; + __ test(result, Immediate(kIsIndirectStringMask)); + __ j(zero, &check_sequential, Label::kNear); + + // Dispatch on the indirect string shape: slice or cons. + Label cons_string; + __ test(result, Immediate(kSlicedNotConsMask)); + __ j(zero, &cons_string, Label::kNear); + + // Handle slices. + Label indirect_string_loaded; + __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset)); + __ SmiUntag(result); + __ add(index, result); + __ mov(string, FieldOperand(string, SlicedString::kParentOffset)); + __ jmp(&indirect_string_loaded, Label::kNear); + + // Handle cons strings. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ bind(&cons_string); + __ cmp(FieldOperand(string, ConsString::kSecondOffset), + Immediate(factory->empty_string())); + __ j(not_equal, call_runtime); + __ mov(string, FieldOperand(string, ConsString::kFirstOffset)); + + __ bind(&indirect_string_loaded); + __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); + __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); + + // Distinguish sequential and external strings. Only these two string + // representations can reach here (slices and flat cons strings have been + // reduced to the underlying sequential or external string). + Label seq_string; + __ bind(&check_sequential); + STATIC_ASSERT(kSeqStringTag == 0); + __ test(result, Immediate(kStringRepresentationMask)); + __ j(zero, &seq_string, Label::kNear); + + // Handle external strings. + Label ascii_external, done; + if (FLAG_debug_code) { + // Assert that we do not have a cons or slice (indirect strings) here. + // Sequential strings have already been ruled out. + __ test(result, Immediate(kIsIndirectStringMask)); + __ Assert(zero, kExternalStringExpectedButNotFound); + } + // Rule out short external strings. + STATIC_ASSERT(kShortExternalStringTag != 0); + __ test_b(result, kShortExternalStringMask); + __ j(not_zero, call_runtime); + // Check encoding. + STATIC_ASSERT(kTwoByteStringTag == 0); + __ test_b(result, kStringEncodingMask); + __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset)); + __ j(not_equal, &ascii_external, Label::kNear); + // Two-byte string. + __ movzx_w(result, Operand(result, index, times_2, 0)); + __ jmp(&done, Label::kNear); + __ bind(&ascii_external); + // Ascii string. + __ movzx_b(result, Operand(result, index, times_1, 0)); + __ jmp(&done, Label::kNear); + + // Dispatch on the encoding: ASCII or two-byte. + Label ascii; + __ bind(&seq_string); + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); + __ test(result, Immediate(kStringEncodingMask)); + __ j(not_zero, &ascii, Label::kNear); + + // Two-byte string. + // Load the two-byte character code into the result register. + __ movzx_w(result, FieldOperand(string, + index, + times_2, + SeqTwoByteString::kHeaderSize)); + __ jmp(&done, Label::kNear); + + // Ascii string. + // Load the byte into the result register. + __ bind(&ascii); + __ movzx_b(result, FieldOperand(string, + index, + times_1, + SeqOneByteString::kHeaderSize)); + __ bind(&done); +} + + +#undef __ + + +CodeAgingHelper::CodeAgingHelper() { + DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); + CodePatcher patcher(young_sequence_.start(), young_sequence_.length()); + patcher.masm()->push(ebp); + patcher.masm()->mov(ebp, esp); + patcher.masm()->push(esi); + patcher.masm()->push(edi); +} + + +#ifdef DEBUG +bool CodeAgingHelper::IsOld(byte* candidate) const { + return *candidate == kCallOpcode; +} +#endif + + +bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { + bool result = isolate->code_aging_helper()->IsYoung(sequence); + DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); + return result; +} + + +void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, + MarkingParity* parity) { + if (IsYoungSequence(isolate, sequence)) { + *age = kNoAgeCodeAge; + *parity = NO_MARKING_PARITY; + } else { + sequence++; // Skip the kCallOpcode byte + Address target_address = sequence + *reinterpret_cast<int*>(sequence) + + Assembler::kCallTargetAddressOffset; + Code* stub = GetCodeFromTargetAddress(target_address); + GetCodeAgeAndParity(stub, age, parity); + } +} + + +void Code::PatchPlatformCodeAge(Isolate* isolate, + byte* sequence, + Code::Age age, + MarkingParity parity) { + uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); + if (age == kNoAgeCodeAge) { + isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); + CpuFeatures::FlushICache(sequence, young_length); + } else { + Code* stub = GetCodeAgeStub(isolate, age, parity); + CodePatcher patcher(sequence, young_length); + patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32); + } +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/codegen-x87.h nodejs-0.11.15/deps/v8/src/x87/codegen-x87.h --- nodejs-0.11.13/deps/v8/src/x87/codegen-x87.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/codegen-x87.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_X87_CODEGEN_X87_H_ +#define V8_X87_CODEGEN_X87_H_ + +#include "src/ast.h" +#include "src/ic-inl.h" + +namespace v8 { +namespace internal { + + +class StringCharLoadGenerator : public AllStatic { + public: + // Generates the code for handling different string types and loading the + // indexed character into |result|. We expect |index| as untagged input and + // |result| as untagged output. + static void Generate(MacroAssembler* masm, + Factory* factory, + Register string, + Register index, + Register result, + Label* call_runtime); + + private: + DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator); +}; + +} } // namespace v8::internal + +#endif // V8_X87_CODEGEN_X87_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/code-stubs-x87.cc nodejs-0.11.15/deps/v8/src/x87/code-stubs-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/code-stubs-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/code-stubs-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4654 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/bootstrapper.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/isolate.h" +#include "src/jsregexp.h" +#include "src/regexp-macro-assembler.h" +#include "src/runtime.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + + +void FastNewClosureStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, ebx }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry); +} + + +void FastNewContextStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, edi }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); +} + + +void ToNumberStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + // ToNumberStub invokes a function, and therefore needs a context. + Register registers[] = { esi, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); +} + + +void NumberToStringStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, eax }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry); +} + + +void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, eax, ebx, ecx }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Smi(), + Representation::Tagged() }; + + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry, + representations); +} + + +void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, eax, ebx, ecx, edx }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry); +} + + +void CreateAllocationSiteStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, ebx, edx }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); +} + + +void CallFunctionStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = {esi, edi}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); +} + + +void CallConstructStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + // eax : number of arguments + // ebx : feedback vector + // edx : (only if ebx is not the megamorphic symbol) slot in feedback + // vector (Smi) + // edi : constructor function + // TODO(turbofan): So far we don't gather type feedback and hence skip the + // slot parameter, but ArrayConstructStub needs the vector to be undefined. + Register registers[] = {esi, eax, edi, ebx}; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); +} + + +void RegExpConstructResultStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, ecx, ebx, eax }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry); +} + + +void TransitionElementsKindStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, eax, ebx }; + descriptor->Initialize( + MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry); +} + + +const Register InterfaceDescriptor::ContextRegister() { return esi; } + + +static void InitializeArrayConstructorDescriptor( + Isolate* isolate, CodeStub::Major major, + CodeStubInterfaceDescriptor* descriptor, + int constant_stack_parameter_count) { + // register state + // eax -- number of arguments + // edi -- function + // ebx -- allocation site with elements kind + Address deopt_handler = Runtime::FunctionForId( + Runtime::kArrayConstructor)->entry; + + if (constant_stack_parameter_count == 0) { + Register registers[] = { esi, edi, ebx }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); + } else { + // stack param count needs (constructor pointer, and single argument) + Register registers[] = { esi, edi, ebx, eax }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); + } +} + + +static void InitializeInternalArrayConstructorDescriptor( + CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor, + int constant_stack_parameter_count) { + // register state + // eax -- number of arguments + // edi -- constructor function + Address deopt_handler = Runtime::FunctionForId( + Runtime::kInternalArrayConstructor)->entry; + + if (constant_stack_parameter_count == 0) { + Register registers[] = { esi, edi }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, + deopt_handler, NULL, constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE); + } else { + // stack param count needs (constructor pointer, and single argument) + Register registers[] = { esi, edi, eax }; + Representation representations[] = { + Representation::Tagged(), + Representation::Tagged(), + Representation::Integer32() }; + descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax, + deopt_handler, representations, + constant_stack_parameter_count, + JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); + } +} + + +void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 0); +} + + +void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 1); +} + + +void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, -1); +} + + +void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0); +} + + +void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1); +} + + +void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1); +} + + +void CompareNilICStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(CompareNilIC_Miss)); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate())); +} + +void ToBooleanStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(ToBooleanIC_Miss)); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate())); +} + + +void BinaryOpICStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, edx, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_Miss)); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate())); +} + + +void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, ecx, edx, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite)); +} + + +void StringAddStub::InitializeInterfaceDescriptor( + CodeStubInterfaceDescriptor* descriptor) { + Register registers[] = { esi, edx, eax }; + descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, + Runtime::FunctionForId(Runtime::kStringAdd)->entry); +} + + +void CallDescriptors::InitializeForIsolate(Isolate* isolate) { + { + CallInterfaceDescriptor* descriptor = + isolate->call_descriptor(Isolate::ArgumentAdaptorCall); + Register registers[] = { esi, // context + edi, // JSFunction + eax, // actual number of arguments + ebx, // expected number of arguments + }; + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // JSFunction + Representation::Integer32(), // actual number of arguments + Representation::Integer32(), // expected number of arguments + }; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); + } + { + CallInterfaceDescriptor* descriptor = + isolate->call_descriptor(Isolate::KeyedCall); + Register registers[] = { esi, // context + ecx, // key + }; + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // key + }; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); + } + { + CallInterfaceDescriptor* descriptor = + isolate->call_descriptor(Isolate::NamedCall); + Register registers[] = { esi, // context + ecx, // name + }; + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // name + }; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); + } + { + CallInterfaceDescriptor* descriptor = + isolate->call_descriptor(Isolate::CallHandler); + Register registers[] = { esi, // context + edx, // name + }; + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // receiver + }; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); + } + { + CallInterfaceDescriptor* descriptor = + isolate->call_descriptor(Isolate::ApiFunctionCall); + Register registers[] = { esi, // context + eax, // callee + ebx, // call_data + ecx, // holder + edx, // api_function_address + }; + Representation representations[] = { + Representation::Tagged(), // context + Representation::Tagged(), // callee + Representation::Tagged(), // call_data + Representation::Tagged(), // holder + Representation::External(), // api_function_address + }; + descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); + } +} + + +#define __ ACCESS_MASM(masm) + + +void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { + // Update the static counter each time a new code stub is generated. + isolate()->counters()->code_stubs()->Increment(); + + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); + int param_count = descriptor->GetEnvironmentParameterCount(); + { + // Call the runtime system in a fresh internal frame. + FrameScope scope(masm, StackFrame::INTERNAL); + DCHECK(param_count == 0 || + eax.is(descriptor->GetEnvironmentParameterRegister( + param_count - 1))); + // Push arguments + for (int i = 0; i < param_count; ++i) { + __ push(descriptor->GetEnvironmentParameterRegister(i)); + } + ExternalReference miss = descriptor->miss_handler(); + __ CallExternalReference(miss, param_count); + } + + __ ret(0); +} + + +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { + // We don't allow a GC during a store buffer overflow so there is no need to + // store the registers in any particular way, but we do have to store and + // restore them. + __ pushad(); + const int argument_count = 1; + + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(argument_count, ecx); + __ mov(Operand(esp, 0 * kPointerSize), + Immediate(ExternalReference::isolate_address(isolate()))); + __ CallCFunction( + ExternalReference::store_buffer_overflow_function(isolate()), + argument_count); + __ popad(); + __ ret(0); +} + + +class FloatingPointHelper : public AllStatic { + public: + enum ArgLocation { + ARGS_ON_STACK, + ARGS_IN_REGISTERS + }; + + // Code pattern for loading a floating point value. Input value must + // be either a smi or a heap number object (fp value). Requirements: + // operand in register number. Returns operand as floating point number + // on FPU stack. + static void LoadFloatOperand(MacroAssembler* masm, Register number); + + // Test if operands are smi or number objects (fp). Requirements: + // operand_1 in eax, operand_2 in edx; falls through on float + // operands, jumps to the non_float label otherwise. + static void CheckFloatOperands(MacroAssembler* masm, + Label* non_float, + Register scratch); +}; + + +void DoubleToIStub::Generate(MacroAssembler* masm) { + Register input_reg = this->source(); + Register final_result_reg = this->destination(); + DCHECK(is_truncating()); + + Label check_negative, process_64_bits, done, done_no_stash; + + int double_offset = offset(); + + // Account for return address and saved regs if input is esp. + if (input_reg.is(esp)) double_offset += 3 * kPointerSize; + + MemOperand mantissa_operand(MemOperand(input_reg, double_offset)); + MemOperand exponent_operand(MemOperand(input_reg, + double_offset + kDoubleSize / 2)); + + Register scratch1; + { + Register scratch_candidates[3] = { ebx, edx, edi }; + for (int i = 0; i < 3; i++) { + scratch1 = scratch_candidates[i]; + if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break; + } + } + // Since we must use ecx for shifts below, use some other register (eax) + // to calculate the result if ecx is the requested return register. + Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg; + // Save ecx if it isn't the return register and therefore volatile, or if it + // is the return register, then save the temp register we use in its stead for + // the result. + Register save_reg = final_result_reg.is(ecx) ? eax : ecx; + __ push(scratch1); + __ push(save_reg); + + bool stash_exponent_copy = !input_reg.is(esp); + __ mov(scratch1, mantissa_operand); + __ mov(ecx, exponent_operand); + if (stash_exponent_copy) __ push(ecx); + + __ and_(ecx, HeapNumber::kExponentMask); + __ shr(ecx, HeapNumber::kExponentShift); + __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias)); + __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits)); + __ j(below, &process_64_bits); + + // Result is entirely in lower 32-bits of mantissa + int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize; + __ sub(ecx, Immediate(delta)); + __ xor_(result_reg, result_reg); + __ cmp(ecx, Immediate(31)); + __ j(above, &done); + __ shl_cl(scratch1); + __ jmp(&check_negative); + + __ bind(&process_64_bits); + // Result must be extracted from shifted 32-bit mantissa + __ sub(ecx, Immediate(delta)); + __ neg(ecx); + if (stash_exponent_copy) { + __ mov(result_reg, MemOperand(esp, 0)); + } else { + __ mov(result_reg, exponent_operand); + } + __ and_(result_reg, + Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32))); + __ add(result_reg, + Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32))); + __ shrd(result_reg, scratch1); + __ shr_cl(result_reg); + __ test(ecx, Immediate(32)); + { + Label skip_mov; + __ j(equal, &skip_mov, Label::kNear); + __ mov(scratch1, result_reg); + __ bind(&skip_mov); + } + + // If the double was negative, negate the integer result. + __ bind(&check_negative); + __ mov(result_reg, scratch1); + __ neg(result_reg); + if (stash_exponent_copy) { + __ cmp(MemOperand(esp, 0), Immediate(0)); + } else { + __ cmp(exponent_operand, Immediate(0)); + } + { + Label skip_mov; + __ j(less_equal, &skip_mov, Label::kNear); + __ mov(result_reg, scratch1); + __ bind(&skip_mov); + } + + // Restore registers + __ bind(&done); + if (stash_exponent_copy) { + __ add(esp, Immediate(kDoubleSize / 2)); + } + __ bind(&done_no_stash); + if (!final_result_reg.is(result_reg)) { + DCHECK(final_result_reg.is(ecx)); + __ mov(final_result_reg, result_reg); + } + __ pop(save_reg); + __ pop(scratch1); + __ ret(0); +} + + +void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, + Register number) { + Label load_smi, done; + + __ JumpIfSmi(number, &load_smi, Label::kNear); + __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); + __ jmp(&done, Label::kNear); + + __ bind(&load_smi); + __ SmiUntag(number); + __ push(number); + __ fild_s(Operand(esp, 0)); + __ pop(number); + + __ bind(&done); +} + + +void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, + Label* non_float, + Register scratch) { + Label test_other, done; + // Test if both operands are floats or smi -> scratch=k_is_float; + // Otherwise scratch = k_not_float. + __ JumpIfSmi(edx, &test_other, Label::kNear); + __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); + Factory* factory = masm->isolate()->factory(); + __ cmp(scratch, factory->heap_number_map()); + __ j(not_equal, non_float); // argument in edx is not a number -> NaN + + __ bind(&test_other); + __ JumpIfSmi(eax, &done, Label::kNear); + __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(scratch, factory->heap_number_map()); + __ j(not_equal, non_float); // argument in eax is not a number -> NaN + + // Fall-through: Both operands are numbers. + __ bind(&done); +} + + +void MathPowStub::Generate(MacroAssembler* masm) { + // No SSE2 support + UNREACHABLE(); +} + + +void FunctionPrototypeStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver = LoadIC::ReceiverRegister(); + + NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax, + ebx, &miss); + __ bind(&miss); + PropertyAccessCompiler::TailCallBuiltin( + masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); +} + + +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + // The key is in edx and the parameter count is in eax. + + // The displacement is used for skipping the frame pointer on the + // stack. It is the offset of the last parameter (if any) relative + // to the frame pointer. + static const int kDisplacement = 1 * kPointerSize; + + // Check that the key is a smi. + Label slow; + __ JumpIfNotSmi(edx, &slow, Label::kNear); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(equal, &adaptor, Label::kNear); + + // Check index against formal parameters count limit passed in + // through register eax. Use unsigned comparison to get negative + // check for free. + __ cmp(edx, eax); + __ j(above_equal, &slow, Label::kNear); + + // Read the argument from the stack and return it. + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. + __ lea(ebx, Operand(ebp, eax, times_2, 0)); + __ neg(edx); + __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); + __ ret(0); + + // Arguments adaptor case: Check index against actual arguments + // limit found in the arguments adaptor frame. Use unsigned + // comparison to get negative check for free. + __ bind(&adaptor); + __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ cmp(edx, ecx); + __ j(above_equal, &slow, Label::kNear); + + // Read the argument from the stack and return it. + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. + __ lea(ebx, Operand(ebx, ecx, times_2, 0)); + __ neg(edx); + __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); + __ ret(0); + + // Slow-case: Handle non-smi or out-of-bounds access to arguments + // by calling the runtime system. + __ bind(&slow); + __ pop(ebx); // Return address. + __ push(edx); + __ push(ebx); + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); +} + + +void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { + // esp[0] : return address + // esp[4] : number of parameters + // esp[8] : receiver displacement + // esp[12] : function + + // Check if the calling frame is an arguments adaptor frame. + Label runtime; + __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(not_equal, &runtime, Label::kNear); + + // Patch the arguments.length and the parameters pointer. + __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ mov(Operand(esp, 1 * kPointerSize), ecx); + __ lea(edx, Operand(edx, ecx, times_2, + StandardFrameConstants::kCallerSPOffset)); + __ mov(Operand(esp, 2 * kPointerSize), edx); + + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); +} + + +void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { + // esp[0] : return address + // esp[4] : number of parameters (tagged) + // esp[8] : receiver displacement + // esp[12] : function + + // ebx = parameter count (tagged) + __ mov(ebx, Operand(esp, 1 * kPointerSize)); + + // Check if the calling frame is an arguments adaptor frame. + // TODO(rossberg): Factor out some of the bits that are shared with the other + // Generate* functions. + Label runtime; + Label adaptor_frame, try_allocate; + __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(equal, &adaptor_frame, Label::kNear); + + // No adaptor, parameter count = argument count. + __ mov(ecx, ebx); + __ jmp(&try_allocate, Label::kNear); + + // We have an adaptor frame. Patch the parameters pointer. + __ bind(&adaptor_frame); + __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ lea(edx, Operand(edx, ecx, times_2, + StandardFrameConstants::kCallerSPOffset)); + __ mov(Operand(esp, 2 * kPointerSize), edx); + + // ebx = parameter count (tagged) + // ecx = argument count (smi-tagged) + // esp[4] = parameter count (tagged) + // esp[8] = address of receiver argument + // Compute the mapped parameter count = min(ebx, ecx) in ebx. + __ cmp(ebx, ecx); + __ j(less_equal, &try_allocate, Label::kNear); + __ mov(ebx, ecx); + + __ bind(&try_allocate); + + // Save mapped parameter count. + __ push(ebx); + + // Compute the sizes of backing store, parameter map, and arguments object. + // 1. Parameter map, has 2 extra words containing context and backing store. + const int kParameterMapHeaderSize = + FixedArray::kHeaderSize + 2 * kPointerSize; + Label no_parameter_map; + __ test(ebx, ebx); + __ j(zero, &no_parameter_map, Label::kNear); + __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize)); + __ bind(&no_parameter_map); + + // 2. Backing store. + __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); + + // 3. Arguments object. + __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize)); + + // Do the allocation of all three objects in one go. + __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT); + + // eax = address of new object(s) (tagged) + // ecx = argument count (smi-tagged) + // esp[0] = mapped parameter count (tagged) + // esp[8] = parameter count (tagged) + // esp[12] = address of receiver argument + // Get the arguments map from the current native context into edi. + Label has_mapped_parameters, instantiate; + __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset)); + __ mov(ebx, Operand(esp, 0 * kPointerSize)); + __ test(ebx, ebx); + __ j(not_zero, &has_mapped_parameters, Label::kNear); + __ mov( + edi, + Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX))); + __ jmp(&instantiate, Label::kNear); + + __ bind(&has_mapped_parameters); + __ mov( + edi, + Operand(edi, Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX))); + __ bind(&instantiate); + + // eax = address of new object (tagged) + // ebx = mapped parameter count (tagged) + // ecx = argument count (smi-tagged) + // edi = address of arguments map (tagged) + // esp[0] = mapped parameter count (tagged) + // esp[8] = parameter count (tagged) + // esp[12] = address of receiver argument + // Copy the JS object part. + __ mov(FieldOperand(eax, JSObject::kMapOffset), edi); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), + masm->isolate()->factory()->empty_fixed_array()); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), + masm->isolate()->factory()->empty_fixed_array()); + + // Set up the callee in-object property. + STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); + __ mov(edx, Operand(esp, 4 * kPointerSize)); + __ AssertNotSmi(edx); + __ mov(FieldOperand(eax, JSObject::kHeaderSize + + Heap::kArgumentsCalleeIndex * kPointerSize), + edx); + + // Use the length (smi tagged) and set that as an in-object property too. + __ AssertSmi(ecx); + STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); + __ mov(FieldOperand(eax, JSObject::kHeaderSize + + Heap::kArgumentsLengthIndex * kPointerSize), + ecx); + + // Set up the elements pointer in the allocated arguments object. + // If we allocated a parameter map, edi will point there, otherwise to the + // backing store. + __ lea(edi, Operand(eax, Heap::kSloppyArgumentsObjectSize)); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); + + // eax = address of new object (tagged) + // ebx = mapped parameter count (tagged) + // ecx = argument count (tagged) + // edi = address of parameter map or backing store (tagged) + // esp[0] = mapped parameter count (tagged) + // esp[8] = parameter count (tagged) + // esp[12] = address of receiver argument + // Free a register. + __ push(eax); + + // Initialize parameter map. If there are no mapped arguments, we're done. + Label skip_parameter_map; + __ test(ebx, ebx); + __ j(zero, &skip_parameter_map); + + __ mov(FieldOperand(edi, FixedArray::kMapOffset), + Immediate(isolate()->factory()->sloppy_arguments_elements_map())); + __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2)))); + __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax); + __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi); + __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize)); + __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax); + + // Copy the parameter slots and the holes in the arguments. + // We need to fill in mapped_parameter_count slots. They index the context, + // where parameters are stored in reverse order, at + // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 + // The mapped parameter thus need to get indices + // MIN_CONTEXT_SLOTS+parameter_count-1 .. + // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count + // We loop from right to left. + Label parameters_loop, parameters_test; + __ push(ecx); + __ mov(eax, Operand(esp, 2 * kPointerSize)); + __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); + __ add(ebx, Operand(esp, 4 * kPointerSize)); + __ sub(ebx, eax); + __ mov(ecx, isolate()->factory()->the_hole_value()); + __ mov(edx, edi); + __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize)); + // eax = loop variable (tagged) + // ebx = mapping index (tagged) + // ecx = the hole value + // edx = address of parameter map (tagged) + // edi = address of backing store (tagged) + // esp[0] = argument count (tagged) + // esp[4] = address of new object (tagged) + // esp[8] = mapped parameter count (tagged) + // esp[16] = parameter count (tagged) + // esp[20] = address of receiver argument + __ jmp(¶meters_test, Label::kNear); + + __ bind(¶meters_loop); + __ sub(eax, Immediate(Smi::FromInt(1))); + __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx); + __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx); + __ add(ebx, Immediate(Smi::FromInt(1))); + __ bind(¶meters_test); + __ test(eax, eax); + __ j(not_zero, ¶meters_loop, Label::kNear); + __ pop(ecx); + + __ bind(&skip_parameter_map); + + // ecx = argument count (tagged) + // edi = address of backing store (tagged) + // esp[0] = address of new object (tagged) + // esp[4] = mapped parameter count (tagged) + // esp[12] = parameter count (tagged) + // esp[16] = address of receiver argument + // Copy arguments header and remaining slots (if there are any). + __ mov(FieldOperand(edi, FixedArray::kMapOffset), + Immediate(isolate()->factory()->fixed_array_map())); + __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); + + Label arguments_loop, arguments_test; + __ mov(ebx, Operand(esp, 1 * kPointerSize)); + __ mov(edx, Operand(esp, 4 * kPointerSize)); + __ sub(edx, ebx); // Is there a smarter way to do negative scaling? + __ sub(edx, ebx); + __ jmp(&arguments_test, Label::kNear); + + __ bind(&arguments_loop); + __ sub(edx, Immediate(kPointerSize)); + __ mov(eax, Operand(edx, 0)); + __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax); + __ add(ebx, Immediate(Smi::FromInt(1))); + + __ bind(&arguments_test); + __ cmp(ebx, ecx); + __ j(less, &arguments_loop, Label::kNear); + + // Restore. + __ pop(eax); // Address of arguments object. + __ pop(ebx); // Parameter count. + + // Return and remove the on-stack parameters. + __ ret(3 * kPointerSize); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ pop(eax); // Remove saved parameter count. + __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count. + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); +} + + +void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { + // esp[0] : return address + // esp[4] : number of parameters + // esp[8] : receiver displacement + // esp[12] : function + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor_frame, try_allocate, runtime; + __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(equal, &adaptor_frame, Label::kNear); + + // Get the length from the frame. + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + __ jmp(&try_allocate, Label::kNear); + + // Patch the arguments.length and the parameters pointer. + __ bind(&adaptor_frame); + __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ mov(Operand(esp, 1 * kPointerSize), ecx); + __ lea(edx, Operand(edx, ecx, times_2, + StandardFrameConstants::kCallerSPOffset)); + __ mov(Operand(esp, 2 * kPointerSize), edx); + + // Try the new space allocation. Start out with computing the size of + // the arguments object and the elements array. + Label add_arguments_object; + __ bind(&try_allocate); + __ test(ecx, ecx); + __ j(zero, &add_arguments_object, Label::kNear); + __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); + __ bind(&add_arguments_object); + __ add(ecx, Immediate(Heap::kStrictArgumentsObjectSize)); + + // Do the allocation of both objects in one go. + __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); + + // Get the arguments map from the current native context. + __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset)); + const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX); + __ mov(edi, Operand(edi, offset)); + + __ mov(FieldOperand(eax, JSObject::kMapOffset), edi); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), + masm->isolate()->factory()->empty_fixed_array()); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), + masm->isolate()->factory()->empty_fixed_array()); + + // Get the length (smi tagged) and set that as an in-object property too. + STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + __ AssertSmi(ecx); + __ mov(FieldOperand(eax, JSObject::kHeaderSize + + Heap::kArgumentsLengthIndex * kPointerSize), + ecx); + + // If there are no actual arguments, we're done. + Label done; + __ test(ecx, ecx); + __ j(zero, &done, Label::kNear); + + // Get the parameters pointer from the stack. + __ mov(edx, Operand(esp, 2 * kPointerSize)); + + // Set up the elements pointer in the allocated arguments object and + // initialize the header in the elements fixed array. + __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize)); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); + __ mov(FieldOperand(edi, FixedArray::kMapOffset), + Immediate(isolate()->factory()->fixed_array_map())); + + __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); + // Untag the length for the loop below. + __ SmiUntag(ecx); + + // Copy the fixed array slots. + Label loop; + __ bind(&loop); + __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. + __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); + __ add(edi, Immediate(kPointerSize)); + __ sub(edx, Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &loop); + + // Return and remove the on-stack parameters. + __ bind(&done); + __ ret(3 * kPointerSize); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); +} + + +void RegExpExecStub::Generate(MacroAssembler* masm) { + // Just jump directly to runtime if native RegExp is not selected at compile + // time or if regexp entry in generated code is turned off runtime switch or + // at compilation. +#ifdef V8_INTERPRETED_REGEXP + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); +#else // V8_INTERPRETED_REGEXP + + // Stack frame on entry. + // esp[0]: return address + // esp[4]: last_match_info (expected JSArray) + // esp[8]: previous index + // esp[12]: subject string + // esp[16]: JSRegExp object + + static const int kLastMatchInfoOffset = 1 * kPointerSize; + static const int kPreviousIndexOffset = 2 * kPointerSize; + static const int kSubjectOffset = 3 * kPointerSize; + static const int kJSRegExpOffset = 4 * kPointerSize; + + Label runtime; + Factory* factory = isolate()->factory(); + + // Ensure that a RegExp stack is allocated. + ExternalReference address_of_regexp_stack_memory_address = + ExternalReference::address_of_regexp_stack_memory_address(isolate()); + ExternalReference address_of_regexp_stack_memory_size = + ExternalReference::address_of_regexp_stack_memory_size(isolate()); + __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); + __ test(ebx, ebx); + __ j(zero, &runtime); + + // Check that the first argument is a JSRegExp object. + __ mov(eax, Operand(esp, kJSRegExpOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfSmi(eax, &runtime); + __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx); + __ j(not_equal, &runtime); + + // Check that the RegExp has been compiled (data contains a fixed array). + __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); + if (FLAG_debug_code) { + __ test(ecx, Immediate(kSmiTagMask)); + __ Check(not_zero, kUnexpectedTypeForRegExpDataFixedArrayExpected); + __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx); + __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected); + } + + // ecx: RegExp data (FixedArray) + // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. + __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); + __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); + __ j(not_equal, &runtime); + + // ecx: RegExp data (FixedArray) + // Check that the number of captures fit in the static offsets vector buffer. + __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); + // Check (number_of_captures + 1) * 2 <= offsets vector size + // Or number_of_captures * 2 <= offsets vector size - 2 + // Multiplying by 2 comes for free since edx is smi-tagged. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); + __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2); + __ j(above, &runtime); + + // Reset offset for possibly sliced string. + __ Move(edi, Immediate(0)); + __ mov(eax, Operand(esp, kSubjectOffset)); + __ JumpIfSmi(eax, &runtime); + __ mov(edx, eax); // Make a copy of the original subject string. + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + + // eax: subject string + // edx: subject string + // ebx: subject string instance type + // ecx: RegExp data (FixedArray) + // Handle subject string according to its encoding and representation: + // (1) Sequential two byte? If yes, go to (9). + // (2) Sequential one byte? If yes, go to (6). + // (3) Anything but sequential or cons? If yes, go to (7). + // (4) Cons string. If the string is flat, replace subject with first string. + // Otherwise bailout. + // (5a) Is subject sequential two byte? If yes, go to (9). + // (5b) Is subject external? If yes, go to (8). + // (6) One byte sequential. Load regexp code for one byte. + // (E) Carry on. + /// [...] + + // Deferred code at the end of the stub: + // (7) Not a long external string? If yes, go to (10). + // (8) External string. Make it, offset-wise, look like a sequential string. + // (8a) Is the external string one byte? If yes, go to (6). + // (9) Two byte sequential. Load regexp code for one byte. Go to (E). + // (10) Short external string or not a string? If yes, bail out to runtime. + // (11) Sliced string. Replace subject with parent. Go to (5a). + + Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */, + external_string /* 8 */, check_underlying /* 5a */, + not_seq_nor_cons /* 7 */, check_code /* E */, + not_long_external /* 10 */; + + // (1) Sequential two byte? If yes, go to (9). + __ and_(ebx, kIsNotStringMask | + kStringRepresentationMask | + kStringEncodingMask | + kShortExternalStringMask); + STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); + __ j(zero, &seq_two_byte_string); // Go to (9). + + // (2) Sequential one byte? If yes, go to (6). + // Any other sequential string must be one byte. + __ and_(ebx, Immediate(kIsNotStringMask | + kStringRepresentationMask | + kShortExternalStringMask)); + __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6). + + // (3) Anything but sequential or cons? If yes, go to (7). + // We check whether the subject string is a cons, since sequential strings + // have already been covered. + STATIC_ASSERT(kConsStringTag < kExternalStringTag); + STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); + STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); + STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); + __ cmp(ebx, Immediate(kExternalStringTag)); + __ j(greater_equal, ¬_seq_nor_cons); // Go to (7). + + // (4) Cons string. Check that it's flat. + // Replace subject with first string and reload instance type. + __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string()); + __ j(not_equal, &runtime); + __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset)); + __ bind(&check_underlying); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + + // (5a) Is subject sequential two byte? If yes, go to (9). + __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask); + STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); + __ j(zero, &seq_two_byte_string); // Go to (9). + // (5b) Is subject external? If yes, go to (8). + __ test_b(ebx, kStringRepresentationMask); + // The underlying external string is never a short external string. + STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); + __ j(not_zero, &external_string); // Go to (8). + + // eax: sequential subject string (or look-alike, external string) + // edx: original subject string + // ecx: RegExp data (FixedArray) + // (6) One byte sequential. Load regexp code for one byte. + __ bind(&seq_one_byte_string); + // Load previous index and check range before edx is overwritten. We have + // to use edx instead of eax here because it might have been only made to + // look like a sequential string when it actually is an external string. + __ mov(ebx, Operand(esp, kPreviousIndexOffset)); + __ JumpIfNotSmi(ebx, &runtime); + __ cmp(ebx, FieldOperand(edx, String::kLengthOffset)); + __ j(above_equal, &runtime); + __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset)); + __ Move(ecx, Immediate(1)); // Type is one byte. + + // (E) Carry on. String handling is done. + __ bind(&check_code); + // edx: irregexp code + // Check that the irregexp code has been generated for the actual string + // encoding. If it has, the field contains a code object otherwise it contains + // a smi (code flushing support). + __ JumpIfSmi(edx, &runtime); + + // eax: subject string + // ebx: previous index (smi) + // edx: code + // ecx: encoding of subject string (1 if ASCII, 0 if two_byte); + // All checks done. Now push arguments for native regexp code. + Counters* counters = isolate()->counters(); + __ IncrementCounter(counters->regexp_entry_native(), 1); + + // Isolates: note we add an additional parameter here (isolate pointer). + static const int kRegExpExecuteArguments = 9; + __ EnterApiExitFrame(kRegExpExecuteArguments); + + // Argument 9: Pass current isolate address. + __ mov(Operand(esp, 8 * kPointerSize), + Immediate(ExternalReference::isolate_address(isolate()))); + + // Argument 8: Indicate that this is a direct call from JavaScript. + __ mov(Operand(esp, 7 * kPointerSize), Immediate(1)); + + // Argument 7: Start (high end) of backtracking stack memory area. + __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address)); + __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size)); + __ mov(Operand(esp, 6 * kPointerSize), esi); + + // Argument 6: Set the number of capture registers to zero to force global + // regexps to behave as non-global. This does not affect non-global regexps. + __ mov(Operand(esp, 5 * kPointerSize), Immediate(0)); + + // Argument 5: static offsets vector buffer. + __ mov(Operand(esp, 4 * kPointerSize), + Immediate(ExternalReference::address_of_static_offsets_vector( + isolate()))); + + // Argument 2: Previous index. + __ SmiUntag(ebx); + __ mov(Operand(esp, 1 * kPointerSize), ebx); + + // Argument 1: Original subject string. + // The original subject is in the previous stack frame. Therefore we have to + // use ebp, which points exactly to one pointer size below the previous esp. + // (Because creating a new stack frame pushes the previous ebp onto the stack + // and thereby moves up esp by one kPointerSize.) + __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize)); + __ mov(Operand(esp, 0 * kPointerSize), esi); + + // esi: original subject string + // eax: underlying subject string + // ebx: previous index + // ecx: encoding of subject string (1 if ASCII 0 if two_byte); + // edx: code + // Argument 4: End of string data + // Argument 3: Start of string data + // Prepare start and end index of the input. + // Load the length from the original sliced string if that is the case. + __ mov(esi, FieldOperand(esi, String::kLengthOffset)); + __ add(esi, edi); // Calculate input end wrt offset. + __ SmiUntag(edi); + __ add(ebx, edi); // Calculate input start wrt offset. + + // ebx: start index of the input string + // esi: end index of the input string + Label setup_two_byte, setup_rest; + __ test(ecx, ecx); + __ j(zero, &setup_two_byte, Label::kNear); + __ SmiUntag(esi); + __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize)); + __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. + __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize)); + __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. + __ jmp(&setup_rest, Label::kNear); + + __ bind(&setup_two_byte); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); // esi is smi (powered by 2). + __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize)); + __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. + __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize)); + __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. + + __ bind(&setup_rest); + + // Locate the code entry and call it. + __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ call(edx); + + // Drop arguments and come back to JS mode. + __ LeaveApiExitFrame(true); + + // Check the result. + Label success; + __ cmp(eax, 1); + // We expect exactly one result since we force the called regexp to behave + // as non-global. + __ j(equal, &success); + Label failure; + __ cmp(eax, NativeRegExpMacroAssembler::FAILURE); + __ j(equal, &failure); + __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION); + // If not exception it can only be retry. Handle that in the runtime system. + __ j(not_equal, &runtime); + // Result must now be exception. If there is no pending exception already a + // stack overflow (on the backtrack stack) was detected in RegExp code but + // haven't created the exception yet. Handle that in the runtime system. + // TODO(592): Rerunning the RegExp to get the stack overflow exception. + ExternalReference pending_exception(Isolate::kPendingExceptionAddress, + isolate()); + __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); + __ mov(eax, Operand::StaticVariable(pending_exception)); + __ cmp(edx, eax); + __ j(equal, &runtime); + // For exception, throw the exception again. + + // Clear the pending exception variable. + __ mov(Operand::StaticVariable(pending_exception), edx); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + __ cmp(eax, factory->termination_exception()); + Label throw_termination_exception; + __ j(equal, &throw_termination_exception, Label::kNear); + + // Handle normal exception by following handler chain. + __ Throw(eax); + + __ bind(&throw_termination_exception); + __ ThrowUncatchable(eax); + + __ bind(&failure); + // For failure to match, return null. + __ mov(eax, factory->null_value()); + __ ret(4 * kPointerSize); + + // Load RegExp data. + __ bind(&success); + __ mov(eax, Operand(esp, kJSRegExpOffset)); + __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); + __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(edx, Immediate(2)); // edx was a smi. + + // edx: Number of capture registers + // Load last_match_info which is still known to be a fast case JSArray. + // Check that the fourth object is a JSArray object. + __ mov(eax, Operand(esp, kLastMatchInfoOffset)); + __ JumpIfSmi(eax, &runtime); + __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); + __ j(not_equal, &runtime); + // Check that the JSArray is in fast case. + __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); + __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset)); + __ cmp(eax, factory->fixed_array_map()); + __ j(not_equal, &runtime); + // Check that the last match info has space for the capture registers and the + // additional information. + __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); + __ SmiUntag(eax); + __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead)); + __ cmp(edx, eax); + __ j(greater, &runtime); + + // ebx: last_match_info backing store (FixedArray) + // edx: number of capture registers + // Store the capture count. + __ SmiTag(edx); // Number of capture registers to smi. + __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx); + __ SmiUntag(edx); // Number of capture registers back from smi. + // Store last subject and last input. + __ mov(eax, Operand(esp, kSubjectOffset)); + __ mov(ecx, eax); + __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); + __ RecordWriteField(ebx, + RegExpImpl::kLastSubjectOffset, + eax, + edi); + __ mov(eax, ecx); + __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); + __ RecordWriteField(ebx, + RegExpImpl::kLastInputOffset, + eax, + edi); + + // Get the static offsets vector filled by the native regexp code. + ExternalReference address_of_static_offsets_vector = + ExternalReference::address_of_static_offsets_vector(isolate()); + __ mov(ecx, Immediate(address_of_static_offsets_vector)); + + // ebx: last_match_info backing store (FixedArray) + // ecx: offsets vector + // edx: number of capture registers + Label next_capture, done; + // Capture register counter starts from number of capture registers and + // counts down until wraping after zero. + __ bind(&next_capture); + __ sub(edx, Immediate(1)); + __ j(negative, &done, Label::kNear); + // Read the value from the static offsets vector buffer. + __ mov(edi, Operand(ecx, edx, times_int_size, 0)); + __ SmiTag(edi); + // Store the smi value in the last match info. + __ mov(FieldOperand(ebx, + edx, + times_pointer_size, + RegExpImpl::kFirstCaptureOffset), + edi); + __ jmp(&next_capture); + __ bind(&done); + + // Return last match info. + __ mov(eax, Operand(esp, kLastMatchInfoOffset)); + __ ret(4 * kPointerSize); + + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); + + // Deferred code for string handling. + // (7) Not a long external string? If yes, go to (10). + __ bind(¬_seq_nor_cons); + // Compare flags are still set from (3). + __ j(greater, ¬_long_external, Label::kNear); // Go to (10). + + // (8) External string. Short external strings have been ruled out. + __ bind(&external_string); + // Reload instance type. + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + if (FLAG_debug_code) { + // Assert that we do not have a cons or slice (indirect strings) here. + // Sequential strings have already been ruled out. + __ test_b(ebx, kIsIndirectStringMask); + __ Assert(zero, kExternalStringExpectedButNotFound); + } + __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset)); + // Move the pointer so that offset-wise, it looks like a sequential string. + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kTwoByteStringTag == 0); + // (8a) Is the external string one byte? If yes, go to (6). + __ test_b(ebx, kStringEncodingMask); + __ j(not_zero, &seq_one_byte_string); // Goto (6). + + // eax: sequential subject string (or look-alike, external string) + // edx: original subject string + // ecx: RegExp data (FixedArray) + // (9) Two byte sequential. Load regexp code for one byte. Go to (E). + __ bind(&seq_two_byte_string); + // Load previous index and check range before edx is overwritten. We have + // to use edx instead of eax here because it might have been only made to + // look like a sequential string when it actually is an external string. + __ mov(ebx, Operand(esp, kPreviousIndexOffset)); + __ JumpIfNotSmi(ebx, &runtime); + __ cmp(ebx, FieldOperand(edx, String::kLengthOffset)); + __ j(above_equal, &runtime); + __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset)); + __ Move(ecx, Immediate(0)); // Type is two byte. + __ jmp(&check_code); // Go to (E). + + // (10) Not a string or a short external string? If yes, bail out to runtime. + __ bind(¬_long_external); + // Catch non-string subject or short external string. + STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); + __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag)); + __ j(not_zero, &runtime); + + // (11) Sliced string. Replace subject with parent. Go to (5a). + // Load offset into edi and replace subject string with parent. + __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset)); + __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset)); + __ jmp(&check_underlying); // Go to (5a). +#endif // V8_INTERPRETED_REGEXP +} + + +static int NegativeComparisonResult(Condition cc) { + DCHECK(cc != equal); + DCHECK((cc == less) || (cc == less_equal) + || (cc == greater) || (cc == greater_equal)); + return (cc == greater || cc == greater_equal) ? LESS : GREATER; +} + + +static void CheckInputType(MacroAssembler* masm, + Register input, + CompareIC::State expected, + Label* fail) { + Label ok; + if (expected == CompareIC::SMI) { + __ JumpIfNotSmi(input, fail); + } else if (expected == CompareIC::NUMBER) { + __ JumpIfSmi(input, &ok); + __ cmp(FieldOperand(input, HeapObject::kMapOffset), + Immediate(masm->isolate()->factory()->heap_number_map())); + __ j(not_equal, fail); + } + // We could be strict about internalized/non-internalized here, but as long as + // hydrogen doesn't care, the stub doesn't have to care either. + __ bind(&ok); +} + + +static void BranchIfNotInternalizedString(MacroAssembler* masm, + Label* label, + Register object, + Register scratch) { + __ JumpIfSmi(object, label); + __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); + __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + __ test(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); + __ j(not_zero, label); +} + + +void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { + Label check_unequal_objects; + Condition cc = GetCondition(); + + Label miss; + CheckInputType(masm, edx, left_, &miss); + CheckInputType(masm, eax, right_, &miss); + + // Compare two smis. + Label non_smi, smi_done; + __ mov(ecx, edx); + __ or_(ecx, eax); + __ JumpIfNotSmi(ecx, &non_smi, Label::kNear); + __ sub(edx, eax); // Return on the result of the subtraction. + __ j(no_overflow, &smi_done, Label::kNear); + __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. + __ bind(&smi_done); + __ mov(eax, edx); + __ ret(0); + __ bind(&non_smi); + + // NOTICE! This code is only reached after a smi-fast-case check, so + // it is certain that at least one operand isn't a smi. + + // Identical objects can be compared fast, but there are some tricky cases + // for NaN and undefined. + Label generic_heap_number_comparison; + { + Label not_identical; + __ cmp(eax, edx); + __ j(not_equal, ¬_identical); + + if (cc != equal) { + // Check for undefined. undefined OP undefined is false even though + // undefined == undefined. + Label check_for_nan; + __ cmp(edx, isolate()->factory()->undefined_value()); + __ j(not_equal, &check_for_nan, Label::kNear); + __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); + __ ret(0); + __ bind(&check_for_nan); + } + + // Test for NaN. Compare heap numbers in a general way, + // to hanlde NaNs correctly. + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), + Immediate(isolate()->factory()->heap_number_map())); + __ j(equal, &generic_heap_number_comparison, Label::kNear); + if (cc != equal) { + // Call runtime on identical JSObjects. Otherwise return equal. + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, ¬_identical); + } + __ Move(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + + + __ bind(¬_identical); + } + + // Strict equality can quickly decide whether objects are equal. + // Non-strict object equality is slower, so it is handled later in the stub. + if (cc == equal && strict()) { + Label slow; // Fallthrough label. + Label not_smis; + // If we're doing a strict equality comparison, we don't have to do + // type conversion, so we generate code to do fast comparison for objects + // and oddballs. Non-smi numbers and strings still go through the usual + // slow-case code. + // If either is a Smi (we know that not both are), then they can only + // be equal if the other is a HeapNumber. If so, use the slow case. + STATIC_ASSERT(kSmiTag == 0); + DCHECK_EQ(0, Smi::FromInt(0)); + __ mov(ecx, Immediate(kSmiTagMask)); + __ and_(ecx, eax); + __ test(ecx, edx); + __ j(not_zero, ¬_smis, Label::kNear); + // One operand is a smi. + + // Check whether the non-smi is a heap number. + STATIC_ASSERT(kSmiTagMask == 1); + // ecx still holds eax & kSmiTag, which is either zero or one. + __ sub(ecx, Immediate(0x01)); + __ mov(ebx, edx); + __ xor_(ebx, eax); + __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx. + __ xor_(ebx, eax); + // if eax was smi, ebx is now edx, else eax. + + // Check if the non-smi operand is a heap number. + __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), + Immediate(isolate()->factory()->heap_number_map())); + // If heap number, handle it in the slow case. + __ j(equal, &slow, Label::kNear); + // Return non-equal (ebx is not zero) + __ mov(eax, ebx); + __ ret(0); + + __ bind(¬_smis); + // If either operand is a JSObject or an oddball value, then they are not + // equal since their pointers are different + // There is no test for undetectability in strict equality. + + // Get the type of the first operand. + // If the first object is a JS object, we have done pointer comparison. + Label first_non_object; + STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(below, &first_non_object, Label::kNear); + + // Return non-zero (eax is not zero) + Label return_not_equal; + STATIC_ASSERT(kHeapObjectTag != 0); + __ bind(&return_not_equal); + __ ret(0); + + __ bind(&first_non_object); + // Check for oddballs: true, false, null, undefined. + __ CmpInstanceType(ecx, ODDBALL_TYPE); + __ j(equal, &return_not_equal); + + __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, &return_not_equal); + + // Check for oddballs: true, false, null, undefined. + __ CmpInstanceType(ecx, ODDBALL_TYPE); + __ j(equal, &return_not_equal); + + // Fall through to the general case. + __ bind(&slow); + } + + // Generate the number comparison code. + Label non_number_comparison; + Label unordered; + __ bind(&generic_heap_number_comparison); + FloatingPointHelper::CheckFloatOperands( + masm, &non_number_comparison, ebx); + FloatingPointHelper::LoadFloatOperand(masm, eax); + FloatingPointHelper::LoadFloatOperand(masm, edx); + __ FCmp(); + + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, Label::kNear); + + Label below_label, above_label; + // Return a result of -1, 0, or 1, based on EFLAGS. + __ j(below, &below_label, Label::kNear); + __ j(above, &above_label, Label::kNear); + + __ Move(eax, Immediate(0)); + __ ret(0); + + __ bind(&below_label); + __ mov(eax, Immediate(Smi::FromInt(-1))); + __ ret(0); + + __ bind(&above_label); + __ mov(eax, Immediate(Smi::FromInt(1))); + __ ret(0); + + // If one of the numbers was NaN, then the result is always false. + // The cc is never not-equal. + __ bind(&unordered); + DCHECK(cc != not_equal); + if (cc == less || cc == less_equal) { + __ mov(eax, Immediate(Smi::FromInt(1))); + } else { + __ mov(eax, Immediate(Smi::FromInt(-1))); + } + __ ret(0); + + // The number comparison code did not provide a valid result. + __ bind(&non_number_comparison); + + // Fast negative check for internalized-to-internalized equality. + Label check_for_strings; + if (cc == equal) { + BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx); + BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx); + + // We've already checked for object identity, so if both operands + // are internalized they aren't equal. Register eax already holds a + // non-zero value, which indicates not equal, so just return. + __ ret(0); + } + + __ bind(&check_for_strings); + + __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, + &check_unequal_objects); + + // Inline comparison of ASCII strings. + if (cc == equal) { + StringCompareStub::GenerateFlatAsciiStringEquals(masm, + edx, + eax, + ecx, + ebx); + } else { + StringCompareStub::GenerateCompareFlatAsciiStrings(masm, + edx, + eax, + ecx, + ebx, + edi); + } +#ifdef DEBUG + __ Abort(kUnexpectedFallThroughFromStringComparison); +#endif + + __ bind(&check_unequal_objects); + if (cc == equal && !strict()) { + // Non-strict equality. Objects are unequal if + // they are both JSObjects and not undetectable, + // and their pointers are different. + Label not_both_objects; + Label return_unequal; + // At most one is a smi, so we can test for smi by adding the two. + // A smi plus a heap object has the low bit set, a heap object plus + // a heap object has the low bit clear. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagMask == 1); + __ lea(ecx, Operand(eax, edx, times_1, 0)); + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_both_objects, Label::kNear); + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(below, ¬_both_objects, Label::kNear); + __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx); + __ j(below, ¬_both_objects, Label::kNear); + // We do not bail out after this point. Both are JSObjects, and + // they are equal if and only if both are undetectable. + // The and of the undetectable flags is 1 if and only if they are equal. + __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(zero, &return_unequal, Label::kNear); + __ test_b(FieldOperand(ebx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(zero, &return_unequal, Label::kNear); + // The objects are both undetectable, so they both compare as the value + // undefined, and are equal. + __ Move(eax, Immediate(EQUAL)); + __ bind(&return_unequal); + // Return non-equal by returning the non-zero object pointer in eax, + // or return equal if we fell through to here. + __ ret(0); // rax, rdx were pushed + __ bind(¬_both_objects); + } + + // Push arguments below the return address. + __ pop(ecx); + __ push(edx); + __ push(eax); + + // Figure out which native to call and setup the arguments. + Builtins::JavaScript builtin; + if (cc == equal) { + builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + } else { + builtin = Builtins::COMPARE; + __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); + } + + // Restore return address on the stack. + __ push(ecx); + + // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ InvokeBuiltin(builtin, JUMP_FUNCTION); + + __ bind(&miss); + GenerateMiss(masm); +} + + +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a feedback vector slot. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // eax : number of arguments to the construct function + // ebx : Feedback vector + // edx : slot in feedback vector (Smi) + // edi : the function to call + Isolate* isolate = masm->isolate(); + Label initialize, done, miss, megamorphic, not_array_function; + + // Load the cache state into ecx. + __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmp(ecx, edi); + __ j(equal, &done, Label::kFar); + __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate))); + __ j(equal, &done, Label::kFar); + + if (!FLAG_pretenuring_call_new) { + // If we came here, we need to see if we are the array function. + // If we didn't have a matching function, and we didn't find the megamorph + // sentinel, then we have in the slot either some other function or an + // AllocationSite. Do a map check on the object in ecx. + Handle<Map> allocation_site_map = isolate->factory()->allocation_site_map(); + __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map)); + __ j(not_equal, &miss); + + // Make sure the function is the Array() function + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx); + __ cmp(edi, ecx); + __ j(not_equal, &megamorphic); + __ jmp(&done, Label::kFar); + } + + __ bind(&miss); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate))); + __ j(equal, &initialize); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ bind(&megamorphic); + __ mov(FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize), + Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate))); + __ jmp(&done, Label::kFar); + + // An uninitialized cache is patched with the function or sentinel to + // indicate the ElementsKind if function is the Array constructor. + __ bind(&initialize); + if (!FLAG_pretenuring_call_new) { + // Make sure the function is the Array() function + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx); + __ cmp(edi, ecx); + __ j(not_equal, ¬_array_function); + + // The target function is the Array constructor, + // Create an AllocationSite if we don't already have it, store it in the + // slot. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Arguments register must be smi-tagged to call out. + __ SmiTag(eax); + __ push(eax); + __ push(edi); + __ push(edx); + __ push(ebx); + + CreateAllocationSiteStub create_stub(isolate); + __ CallStub(&create_stub); + + __ pop(ebx); + __ pop(edx); + __ pop(edi); + __ pop(eax); + __ SmiUntag(eax); + } + __ jmp(&done); + + __ bind(¬_array_function); + } + + __ mov(FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize), + edi); + // We won't need edx or ebx anymore, just save edi + __ push(edi); + __ push(ebx); + __ push(edx); + __ RecordWriteArray(ebx, edi, edx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ pop(edx); + __ pop(ebx); + __ pop(edi); + + __ bind(&done); +} + + +static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { + // Do not transform the receiver for strict mode functions. + __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset), + 1 << SharedFunctionInfo::kStrictModeBitWithinByte); + __ j(not_equal, cont); + + // Do not transform the receiver for natives (shared already in ecx). + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset), + 1 << SharedFunctionInfo::kNativeBitWithinByte); + __ j(not_equal, cont); +} + + +static void EmitSlowCase(Isolate* isolate, + MacroAssembler* masm, + int argc, + Label* non_function) { + // Check for function proxy. + __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); + __ j(not_equal, non_function); + __ pop(ecx); + __ push(edi); // put proxy as additional argument under return address + __ push(ecx); + __ Move(eax, Immediate(argc + 1)); + __ Move(ebx, Immediate(0)); + __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); + { + Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); + __ jmp(adaptor, RelocInfo::CODE_TARGET); + } + + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ bind(non_function); + __ mov(Operand(esp, (argc + 1) * kPointerSize), edi); + __ Move(eax, Immediate(argc)); + __ Move(ebx, Immediate(0)); + __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); + Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); + __ jmp(adaptor, RelocInfo::CODE_TARGET); +} + + +static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { + // Wrap the receiver and patch it back onto the stack. + { FrameScope frame_scope(masm, StackFrame::INTERNAL); + __ push(edi); + __ push(eax); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ pop(edi); + } + __ mov(Operand(esp, (argc + 1) * kPointerSize), eax); + __ jmp(cont); +} + + +static void CallFunctionNoFeedback(MacroAssembler* masm, + int argc, bool needs_checks, + bool call_as_method) { + // edi : the function to call + Label slow, non_function, wrap, cont; + + if (needs_checks) { + // Check that the function really is a JavaScript function. + __ JumpIfSmi(edi, &non_function); + + // Goto slow case if we do not have a function. + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &slow); + } + + // Fast-case: Just invoke the function. + ParameterCount actual(argc); + + if (call_as_method) { + if (needs_checks) { + EmitContinueIfStrictOrNative(masm, &cont); + } + + // Load the receiver from the stack. + __ mov(eax, Operand(esp, (argc + 1) * kPointerSize)); + + if (needs_checks) { + __ JumpIfSmi(eax, &wrap); + + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(below, &wrap); + } else { + __ jmp(&wrap); + } + + __ bind(&cont); + } + + __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper()); + + if (needs_checks) { + // Slow-case: Non-function called. + __ bind(&slow); + // (non_function is bound in EmitSlowCase) + EmitSlowCase(masm->isolate(), masm, argc, &non_function); + } + + if (call_as_method) { + __ bind(&wrap); + EmitWrapCase(masm, argc, &cont); + } +} + + +void CallFunctionStub::Generate(MacroAssembler* masm) { + CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod()); +} + + +void CallConstructStub::Generate(MacroAssembler* masm) { + // eax : number of arguments + // ebx : feedback vector + // edx : (only if ebx is not the megamorphic symbol) slot in feedback + // vector (Smi) + // edi : constructor function + Label slow, non_function_call; + + // Check that function is not a smi. + __ JumpIfSmi(edi, &non_function_call); + // Check that function is a JSFunction. + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &slow); + + if (RecordCallTarget()) { + GenerateRecordCallTarget(masm); + + if (FLAG_pretenuring_call_new) { + // Put the AllocationSite from the feedback vector into ebx. + // By adding kPointerSize we encode that we know the AllocationSite + // entry is at the feedback vector slot given by edx + 1. + __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + } else { + Label feedback_register_initialized; + // Put the AllocationSite from the feedback vector into ebx, or undefined. + __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize)); + Handle<Map> allocation_site_map = + isolate()->factory()->allocation_site_map(); + __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map)); + __ j(equal, &feedback_register_initialized); + __ mov(ebx, isolate()->factory()->undefined_value()); + __ bind(&feedback_register_initialized); + } + + __ AssertUndefinedOrAllocationSite(ebx); + } + + // Jump to the function-specific construct stub. + Register jmp_reg = ecx; + __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ mov(jmp_reg, FieldOperand(jmp_reg, + SharedFunctionInfo::kConstructStubOffset)); + __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize)); + __ jmp(jmp_reg); + + // edi: called object + // eax: number of arguments + // ecx: object map + Label do_call; + __ bind(&slow); + __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); + __ j(not_equal, &non_function_call); + __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); + __ jmp(&do_call); + + __ bind(&non_function_call); + __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); + __ bind(&do_call); + // Set expected number of arguments to zero (not changing eax). + __ Move(ebx, Immediate(0)); + Handle<Code> arguments_adaptor = + isolate()->builtins()->ArgumentsAdaptorTrampoline(); + __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET); +} + + +static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { + __ mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset)); + __ mov(vector, FieldOperand(vector, + SharedFunctionInfo::kFeedbackVectorOffset)); +} + + +void CallIC_ArrayStub::Generate(MacroAssembler* masm) { + // edi - function + // edx - slot id + Label miss; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, ebx); + + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx); + __ cmp(edi, ecx); + __ j(not_equal, &miss); + + __ mov(eax, arg_count()); + __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize)); + + // Verify that ecx contains an AllocationSite + Factory* factory = masm->isolate()->factory(); + __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), + factory->allocation_site_map()); + __ j(not_equal, &miss); + + __ mov(ebx, ecx); + ArrayConstructorStub stub(masm->isolate(), arg_count()); + __ TailCallStub(&stub); + + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Customization_Miss); + + // The slow case, we need this no matter what to complete a call after a miss. + CallFunctionNoFeedback(masm, + arg_count(), + true, + CallAsMethod()); + + // Unreachable. + __ int3(); +} + + +void CallICStub::Generate(MacroAssembler* masm) { + // edi - function + // edx - slot id + Isolate* isolate = masm->isolate(); + Label extra_checks_or_miss, slow_start; + Label slow, non_function, wrap, cont; + Label have_js_function; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, ebx); + + // The checks. First, does edi match the recorded monomorphic target? + __ cmp(edi, FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ j(not_equal, &extra_checks_or_miss); + + __ bind(&have_js_function); + if (state_.CallAsMethod()) { + EmitContinueIfStrictOrNative(masm, &cont); + + // Load the receiver from the stack. + __ mov(eax, Operand(esp, (argc + 1) * kPointerSize)); + + __ JumpIfSmi(eax, &wrap); + + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(below, &wrap); + + __ bind(&cont); + } + + __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper()); + + __ bind(&slow); + EmitSlowCase(isolate, masm, argc, &non_function); + + if (state_.CallAsMethod()) { + __ bind(&wrap); + EmitWrapCase(masm, argc, &cont); + } + + __ bind(&extra_checks_or_miss); + Label miss; + + __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate))); + __ j(equal, &slow_start); + __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate))); + __ j(equal, &miss); + + if (!FLAG_trace_ic) { + // We are going megamorphic. If the feedback is a JSFunction, it is fine + // to handle it here. More complex cases are dealt with in the runtime. + __ AssertNotSmi(ecx); + __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &miss); + __ mov(FieldOperand(ebx, edx, times_half_pointer_size, + FixedArray::kHeaderSize), + Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate))); + __ jmp(&slow_start); + } + + // We are here because tracing is on or we are going monomorphic. + __ bind(&miss); + GenerateMiss(masm, IC::kCallIC_Miss); + + // the slow case + __ bind(&slow_start); + + // Check that the function really is a JavaScript function. + __ JumpIfSmi(edi, &non_function); + + // Goto slow case if we do not have a function. + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &slow); + __ jmp(&have_js_function); + + // Unreachable + __ int3(); +} + + +void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) { + // Get the receiver of the function from the stack; 1 ~ return address. + __ mov(ecx, Operand(esp, (state_.arg_count() + 1) * kPointerSize)); + + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Push the receiver and the function and feedback info. + __ push(ecx); + __ push(edi); + __ push(ebx); + __ push(edx); + + // Call the entry. + ExternalReference miss = ExternalReference(IC_Utility(id), + masm->isolate()); + __ CallExternalReference(miss, 4); + + // Move result to edi and exit the internal frame. + __ mov(edi, eax); + } +} + + +bool CEntryStub::NeedsImmovableCode() { + return false; +} + + +void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { + CEntryStub::GenerateAheadOfTime(isolate); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); + StubFailureTrampolineStub::GenerateAheadOfTime(isolate); + // It is important that the store buffer overflow stubs are generated first. + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); + CreateAllocationSiteStub::GenerateAheadOfTime(isolate); + BinaryOpICStub::GenerateAheadOfTime(isolate); + BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); +} + + +void CodeStub::GenerateFPStubs(Isolate* isolate) { + // Do nothing. +} + + +void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { + CEntryStub stub(isolate, 1); + stub.GetCode(); +} + + +void CEntryStub::Generate(MacroAssembler* masm) { + // eax: number of arguments including receiver + // ebx: pointer to C function (C callee-saved) + // ebp: frame pointer (restored after C call) + // esp: stack pointer (restored after C call) + // esi: current context (C callee-saved) + // edi: JS function of the caller (C callee-saved) + + ProfileEntryHookStub::MaybeCallEntryHook(masm); + + // Enter the exit frame that transitions from JavaScript to C++. + __ EnterExitFrame(); + + // ebx: pointer to C function (C callee-saved) + // ebp: frame pointer (restored after C call) + // esp: stack pointer (restored after C call) + // edi: number of arguments including receiver (C callee-saved) + // esi: pointer to the first argument (C callee-saved) + + // Result returned in eax, or eax+edx if result_size_ is 2. + + // Check stack alignment. + if (FLAG_debug_code) { + __ CheckStackAlignment(); + } + + // Call C function. + __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. + __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. + __ mov(Operand(esp, 2 * kPointerSize), + Immediate(ExternalReference::isolate_address(isolate()))); + __ call(ebx); + // Result is in eax or edx:eax - do not destroy these registers! + + // Runtime functions should not return 'the hole'. Allowing it to escape may + // lead to crashes in the IC code later. + if (FLAG_debug_code) { + Label okay; + __ cmp(eax, isolate()->factory()->the_hole_value()); + __ j(not_equal, &okay, Label::kNear); + __ int3(); + __ bind(&okay); + } + + // Check result for exception sentinel. + Label exception_returned; + __ cmp(eax, isolate()->factory()->exception()); + __ j(equal, &exception_returned); + + ExternalReference pending_exception_address( + Isolate::kPendingExceptionAddress, isolate()); + + // Check that there is no pending exception, otherwise we + // should have returned the exception sentinel. + if (FLAG_debug_code) { + __ push(edx); + __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); + Label okay; + __ cmp(edx, Operand::StaticVariable(pending_exception_address)); + // Cannot use check here as it attempts to generate call into runtime. + __ j(equal, &okay, Label::kNear); + __ int3(); + __ bind(&okay); + __ pop(edx); + } + + // Exit the JavaScript to C++ exit frame. + __ LeaveExitFrame(); + __ ret(0); + + // Handling of exception. + __ bind(&exception_returned); + + // Retrieve the pending exception. + __ mov(eax, Operand::StaticVariable(pending_exception_address)); + + // Clear the pending exception. + __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); + __ mov(Operand::StaticVariable(pending_exception_address), edx); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + Label throw_termination_exception; + __ cmp(eax, isolate()->factory()->termination_exception()); + __ j(equal, &throw_termination_exception); + + // Handle normal exception. + __ Throw(eax); + + __ bind(&throw_termination_exception); + __ ThrowUncatchable(eax); +} + + +void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { + Label invoke, handler_entry, exit; + Label not_outermost_js, not_outermost_js_2; + + ProfileEntryHookStub::MaybeCallEntryHook(masm); + + // Set up frame. + __ push(ebp); + __ mov(ebp, esp); + + // Push marker in two places. + int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; + __ push(Immediate(Smi::FromInt(marker))); // context slot + __ push(Immediate(Smi::FromInt(marker))); // function slot + // Save callee-saved registers (C calling conventions). + __ push(edi); + __ push(esi); + __ push(ebx); + + // Save copies of the top frame descriptor on the stack. + ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate()); + __ push(Operand::StaticVariable(c_entry_fp)); + + // If this is the outermost JS call, set js_entry_sp value. + ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); + __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0)); + __ j(not_equal, ¬_outermost_js, Label::kNear); + __ mov(Operand::StaticVariable(js_entry_sp), ebp); + __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); + __ jmp(&invoke, Label::kNear); + __ bind(¬_outermost_js); + __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); + + // Jump to a faked try block that does the invoke, with a faked catch + // block that sets the pending exception. + __ jmp(&invoke); + __ bind(&handler_entry); + handler_offset_ = handler_entry.pos(); + // Caught exception: Store result (exception) in the pending exception + // field in the JSEnv and return a failure sentinel. + ExternalReference pending_exception(Isolate::kPendingExceptionAddress, + isolate()); + __ mov(Operand::StaticVariable(pending_exception), eax); + __ mov(eax, Immediate(isolate()->factory()->exception())); + __ jmp(&exit); + + // Invoke: Link this frame into the handler chain. There's only one + // handler block in this code object, so its index is 0. + __ bind(&invoke); + __ PushTryHandler(StackHandler::JS_ENTRY, 0); + + // Clear any pending exceptions. + __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); + __ mov(Operand::StaticVariable(pending_exception), edx); + + // Fake a receiver (NULL). + __ push(Immediate(0)); // receiver + + // Invoke the function by calling through JS entry trampoline builtin and + // pop the faked function when we return. Notice that we cannot store a + // reference to the trampoline code directly in this stub, because the + // builtin stubs may not have been generated yet. + if (is_construct) { + ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, + isolate()); + __ mov(edx, Immediate(construct_entry)); + } else { + ExternalReference entry(Builtins::kJSEntryTrampoline, isolate()); + __ mov(edx, Immediate(entry)); + } + __ mov(edx, Operand(edx, 0)); // deref address + __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); + __ call(edx); + + // Unlink this frame from the handler chain. + __ PopTryHandler(); + + __ bind(&exit); + // Check if the current stack frame is marked as the outermost JS frame. + __ pop(ebx); + __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); + __ j(not_equal, ¬_outermost_js_2); + __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); + __ bind(¬_outermost_js_2); + + // Restore the top frame descriptor from the stack. + __ pop(Operand::StaticVariable(ExternalReference( + Isolate::kCEntryFPAddress, isolate()))); + + // Restore callee-saved registers (C calling conventions). + __ pop(ebx); + __ pop(esi); + __ pop(edi); + __ add(esp, Immediate(2 * kPointerSize)); // remove markers + + // Restore frame pointer and return. + __ pop(ebp); + __ ret(0); +} + + +// Generate stub code for instanceof. +// This code can patch a call site inlined cache of the instance of check, +// which looks like this. +// +// 81 ff XX XX XX XX cmp edi, <the hole, patched to a map> +// 75 0a jne <some near label> +// b8 XX XX XX XX mov eax, <the hole, patched to either true or false> +// +// If call site patching is requested the stack will have the delta from the +// return address to the cmp instruction just below the return address. This +// also means that call site patching can only take place with arguments in +// registers. TOS looks like this when call site patching is requested +// +// esp[0] : return address +// esp[4] : delta from return address to cmp instruction +// +void InstanceofStub::Generate(MacroAssembler* masm) { + // Call site inlining and patching implies arguments in registers. + DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck()); + + // Fixed register usage throughout the stub. + Register object = eax; // Object (lhs). + Register map = ebx; // Map of the object. + Register function = edx; // Function (rhs). + Register prototype = edi; // Prototype of the function. + Register scratch = ecx; + + // Constants describing the call site code to patch. + static const int kDeltaToCmpImmediate = 2; + static const int kDeltaToMov = 8; + static const int kDeltaToMovImmediate = 9; + static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b); + static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d); + static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8); + + DCHECK_EQ(object.code(), InstanceofStub::left().code()); + DCHECK_EQ(function.code(), InstanceofStub::right().code()); + + // Get the object and function - they are always both needed. + Label slow, not_js_object; + if (!HasArgsInRegisters()) { + __ mov(object, Operand(esp, 2 * kPointerSize)); + __ mov(function, Operand(esp, 1 * kPointerSize)); + } + + // Check that the left hand is a JS object. + __ JumpIfSmi(object, ¬_js_object); + __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); + + // If there is a call site cache don't look in the global cache, but do the + // real lookup and update the call site cache. + if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) { + // Look up the function and the map in the instanceof cache. + Label miss; + __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex); + __ j(not_equal, &miss, Label::kNear); + __ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex); + __ j(not_equal, &miss, Label::kNear); + __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex); + __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); + __ bind(&miss); + } + + // Get the prototype of the function. + __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true); + + // Check that the function prototype is a JS object. + __ JumpIfSmi(prototype, &slow); + __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); + + // Update the global instanceof or call site inlined cache with the current + // map and function. The cached answer will be set when it is known below. + if (!HasCallSiteInlineCheck()) { + __ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex); + __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex); + } else { + // The constants for the code patching are based on no push instructions + // at the call site. + DCHECK(HasArgsInRegisters()); + // Get return address and delta to inlined map check. + __ mov(scratch, Operand(esp, 0 * kPointerSize)); + __ sub(scratch, Operand(esp, 1 * kPointerSize)); + if (FLAG_debug_code) { + __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1); + __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp1); + __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2); + __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp2); + } + __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate)); + __ mov(Operand(scratch, 0), map); + } + + // Loop through the prototype chain of the object looking for the function + // prototype. + __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset)); + Label loop, is_instance, is_not_instance; + __ bind(&loop); + __ cmp(scratch, prototype); + __ j(equal, &is_instance, Label::kNear); + Factory* factory = isolate()->factory(); + __ cmp(scratch, Immediate(factory->null_value())); + __ j(equal, &is_not_instance, Label::kNear); + __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); + __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset)); + __ jmp(&loop); + + __ bind(&is_instance); + if (!HasCallSiteInlineCheck()) { + __ mov(eax, Immediate(0)); + __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex); + if (ReturnTrueFalseObject()) { + __ mov(eax, factory->true_value()); + } + } else { + // Get return address and delta to inlined map check. + __ mov(eax, factory->true_value()); + __ mov(scratch, Operand(esp, 0 * kPointerSize)); + __ sub(scratch, Operand(esp, 1 * kPointerSize)); + if (FLAG_debug_code) { + __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte); + __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov); + } + __ mov(Operand(scratch, kDeltaToMovImmediate), eax); + if (!ReturnTrueFalseObject()) { + __ Move(eax, Immediate(0)); + } + } + __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); + + __ bind(&is_not_instance); + if (!HasCallSiteInlineCheck()) { + __ mov(eax, Immediate(Smi::FromInt(1))); + __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex); + if (ReturnTrueFalseObject()) { + __ mov(eax, factory->false_value()); + } + } else { + // Get return address and delta to inlined map check. + __ mov(eax, factory->false_value()); + __ mov(scratch, Operand(esp, 0 * kPointerSize)); + __ sub(scratch, Operand(esp, 1 * kPointerSize)); + if (FLAG_debug_code) { + __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte); + __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov); + } + __ mov(Operand(scratch, kDeltaToMovImmediate), eax); + if (!ReturnTrueFalseObject()) { + __ Move(eax, Immediate(Smi::FromInt(1))); + } + } + __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); + + Label object_not_null, object_not_null_or_smi; + __ bind(¬_js_object); + // Before null, smi and string value checks, check that the rhs is a function + // as for a non-function rhs an exception needs to be thrown. + __ JumpIfSmi(function, &slow, Label::kNear); + __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch); + __ j(not_equal, &slow, Label::kNear); + + // Null is not instance of anything. + __ cmp(object, factory->null_value()); + __ j(not_equal, &object_not_null, Label::kNear); + if (ReturnTrueFalseObject()) { + __ mov(eax, factory->false_value()); + } else { + __ Move(eax, Immediate(Smi::FromInt(1))); + } + __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); + + __ bind(&object_not_null); + // Smi values is not instance of anything. + __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear); + if (ReturnTrueFalseObject()) { + __ mov(eax, factory->false_value()); + } else { + __ Move(eax, Immediate(Smi::FromInt(1))); + } + __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); + + __ bind(&object_not_null_or_smi); + // String values is not instance of anything. + Condition is_string = masm->IsObjectStringType(object, scratch, scratch); + __ j(NegateCondition(is_string), &slow, Label::kNear); + if (ReturnTrueFalseObject()) { + __ mov(eax, factory->false_value()); + } else { + __ Move(eax, Immediate(Smi::FromInt(1))); + } + __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); + + // Slow-case: Go through the JavaScript implementation. + __ bind(&slow); + if (!ReturnTrueFalseObject()) { + // Tail call the builtin which returns 0 or 1. + if (HasArgsInRegisters()) { + // Push arguments below return address. + __ pop(scratch); + __ push(object); + __ push(function); + __ push(scratch); + } + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); + } else { + // Call the builtin and convert 0/1 to true/false. + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(object); + __ push(function); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + } + Label true_value, done; + __ test(eax, eax); + __ j(zero, &true_value, Label::kNear); + __ mov(eax, factory->false_value()); + __ jmp(&done, Label::kNear); + __ bind(&true_value); + __ mov(eax, factory->true_value()); + __ bind(&done); + __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); + } +} + + +Register InstanceofStub::left() { return eax; } + + +Register InstanceofStub::right() { return edx; } + + +// ------------------------------------------------------------------------- +// StringCharCodeAtGenerator + +void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { + // If the receiver is a smi trigger the non-string case. + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfSmi(object_, receiver_not_string_); + + // Fetch the instance type of the receiver into result register. + __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + // If the receiver is not a string trigger the non-string case. + __ test(result_, Immediate(kIsNotStringMask)); + __ j(not_zero, receiver_not_string_); + + // If the index is non-smi trigger the non-smi case. + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfNotSmi(index_, &index_not_smi_); + __ bind(&got_smi_index_); + + // Check for index out of range. + __ cmp(index_, FieldOperand(object_, String::kLengthOffset)); + __ j(above_equal, index_out_of_range_); + + __ SmiUntag(index_); + + Factory* factory = masm->isolate()->factory(); + StringCharLoadGenerator::Generate( + masm, factory, object_, index_, result_, &call_runtime_); + + __ SmiTag(result_); + __ bind(&exit_); +} + + +void StringCharCodeAtGenerator::GenerateSlow( + MacroAssembler* masm, + const RuntimeCallHelper& call_helper) { + __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); + + // Index is not a smi. + __ bind(&index_not_smi_); + // If index is a heap number, try converting it to an integer. + __ CheckMap(index_, + masm->isolate()->factory()->heap_number_map(), + index_not_number_, + DONT_DO_SMI_CHECK); + call_helper.BeforeCall(masm); + __ push(object_); + __ push(index_); // Consumed by runtime conversion function. + if (index_flags_ == STRING_INDEX_IS_NUMBER) { + __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); + } else { + DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + // NumberToSmi discards numbers that are not exact integers. + __ CallRuntime(Runtime::kNumberToSmi, 1); + } + if (!index_.is(eax)) { + // Save the conversion result before the pop instructions below + // have a chance to overwrite it. + __ mov(index_, eax); + } + __ pop(object_); + // Reload the instance type. + __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + call_helper.AfterCall(masm); + // If index is still not a smi, it must be out of range. + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfNotSmi(index_, index_out_of_range_); + // Otherwise, return to the fast path. + __ jmp(&got_smi_index_); + + // Call runtime. We get here when the receiver is a string and the + // index is a number, but the code of getting the actual character + // is too complex (e.g., when the string needs to be flattened). + __ bind(&call_runtime_); + call_helper.BeforeCall(masm); + __ push(object_); + __ SmiTag(index_); + __ push(index_); + __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); + if (!result_.is(eax)) { + __ mov(result_, eax); + } + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); +} + + +// ------------------------------------------------------------------------- +// StringCharFromCodeGenerator + +void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { + // Fast case of Heap::LookupSingleCharacterStringFromCode. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiShiftSize == 0); + DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1)); + __ test(code_, + Immediate(kSmiTagMask | + ((~String::kMaxOneByteCharCode) << kSmiTagSize))); + __ j(not_zero, &slow_case_); + + Factory* factory = masm->isolate()->factory(); + __ Move(result_, Immediate(factory->single_character_string_cache())); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiShiftSize == 0); + // At this point code register contains smi tagged ASCII char code. + __ mov(result_, FieldOperand(result_, + code_, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(result_, factory->undefined_value()); + __ j(equal, &slow_case_); + __ bind(&exit_); +} + + +void StringCharFromCodeGenerator::GenerateSlow( + MacroAssembler* masm, + const RuntimeCallHelper& call_helper) { + __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); + + __ bind(&slow_case_); + call_helper.BeforeCall(masm); + __ push(code_); + __ CallRuntime(Runtime::kCharFromCode, 1); + if (!result_.is(eax)) { + __ mov(result_, eax); + } + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); +} + + +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + String::Encoding encoding) { + DCHECK(!scratch.is(dest)); + DCHECK(!scratch.is(src)); + DCHECK(!scratch.is(count)); + + // Nothing to do for zero characters. + Label done; + __ test(count, count); + __ j(zero, &done); + + // Make count the number of bytes to copy. + if (encoding == String::TWO_BYTE_ENCODING) { + __ shl(count, 1); + } + + Label loop; + __ bind(&loop); + __ mov_b(scratch, Operand(src, 0)); + __ mov_b(Operand(dest, 0), scratch); + __ inc(src); + __ inc(dest); + __ dec(count); + __ j(not_zero, &loop); + + __ bind(&done); +} + + +void StringHelper::GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character, + Register scratch) { + // hash = (seed + character) + ((seed + character) << 10); + if (masm->serializer_enabled()) { + __ LoadRoot(scratch, Heap::kHashSeedRootIndex); + __ SmiUntag(scratch); + __ add(scratch, character); + __ mov(hash, scratch); + __ shl(scratch, 10); + __ add(hash, scratch); + } else { + int32_t seed = masm->isolate()->heap()->HashSeed(); + __ lea(scratch, Operand(character, seed)); + __ shl(scratch, 10); + __ lea(hash, Operand(scratch, character, times_1, seed)); + } + // hash ^= hash >> 6; + __ mov(scratch, hash); + __ shr(scratch, 6); + __ xor_(hash, scratch); +} + + +void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character, + Register scratch) { + // hash += character; + __ add(hash, character); + // hash += hash << 10; + __ mov(scratch, hash); + __ shl(scratch, 10); + __ add(hash, scratch); + // hash ^= hash >> 6; + __ mov(scratch, hash); + __ shr(scratch, 6); + __ xor_(hash, scratch); +} + + +void StringHelper::GenerateHashGetHash(MacroAssembler* masm, + Register hash, + Register scratch) { + // hash += hash << 3; + __ mov(scratch, hash); + __ shl(scratch, 3); + __ add(hash, scratch); + // hash ^= hash >> 11; + __ mov(scratch, hash); + __ shr(scratch, 11); + __ xor_(hash, scratch); + // hash += hash << 15; + __ mov(scratch, hash); + __ shl(scratch, 15); + __ add(hash, scratch); + + __ and_(hash, String::kHashBitMask); + + // if (hash == 0) hash = 27; + Label hash_not_zero; + __ j(not_zero, &hash_not_zero, Label::kNear); + __ mov(hash, Immediate(StringHasher::kZeroHash)); + __ bind(&hash_not_zero); +} + + +void SubStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // esp[0]: return address + // esp[4]: to + // esp[8]: from + // esp[12]: string + + // Make sure first argument is a string. + __ mov(eax, Operand(esp, 3 * kPointerSize)); + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfSmi(eax, &runtime); + Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); + __ j(NegateCondition(is_string), &runtime); + + // eax: string + // ebx: instance type + + // Calculate length of sub string using the smi values. + __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index. + __ JumpIfNotSmi(ecx, &runtime); + __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. + __ JumpIfNotSmi(edx, &runtime); + __ sub(ecx, edx); + __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); + Label not_original_string; + // Shorter than original string's length: an actual substring. + __ j(below, ¬_original_string, Label::kNear); + // Longer than original string's length or negative: unsafe arguments. + __ j(above, &runtime); + // Return original string. + Counters* counters = isolate()->counters(); + __ IncrementCounter(counters->sub_string_native(), 1); + __ ret(3 * kPointerSize); + __ bind(¬_original_string); + + Label single_char; + __ cmp(ecx, Immediate(Smi::FromInt(1))); + __ j(equal, &single_char); + + // eax: string + // ebx: instance type + // ecx: sub string length (smi) + // edx: from index (smi) + // Deal with different string types: update the index if necessary + // and put the underlying string into edi. + Label underlying_unpacked, sliced_string, seq_or_external_string; + // If the string is not indirect, it can only be sequential or external. + STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); + STATIC_ASSERT(kIsIndirectStringMask != 0); + __ test(ebx, Immediate(kIsIndirectStringMask)); + __ j(zero, &seq_or_external_string, Label::kNear); + + Factory* factory = isolate()->factory(); + __ test(ebx, Immediate(kSlicedNotConsMask)); + __ j(not_zero, &sliced_string, Label::kNear); + // Cons string. Check whether it is flat, then fetch first part. + // Flat cons strings have an empty second part. + __ cmp(FieldOperand(eax, ConsString::kSecondOffset), + factory->empty_string()); + __ j(not_equal, &runtime); + __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset)); + // Update instance type. + __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + __ jmp(&underlying_unpacked, Label::kNear); + + __ bind(&sliced_string); + // Sliced string. Fetch parent and adjust start index by offset. + __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset)); + __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset)); + // Update instance type. + __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + __ jmp(&underlying_unpacked, Label::kNear); + + __ bind(&seq_or_external_string); + // Sequential or external string. Just move string to the expected register. + __ mov(edi, eax); + + __ bind(&underlying_unpacked); + + if (FLAG_string_slices) { + Label copy_routine; + // edi: underlying subject string + // ebx: instance type of underlying subject string + // edx: adjusted start index (smi) + // ecx: length (smi) + __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength))); + // Short slice. Copy instead of slicing. + __ j(less, ©_routine); + // Allocate new sliced string. At this point we do not reload the instance + // type including the string encoding because we simply rely on the info + // provided by the original string. It does not matter if the original + // string's encoding is wrong because we always have to recheck encoding of + // the newly created string's parent anyways due to externalized strings. + Label two_byte_slice, set_slice_header; + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); + __ test(ebx, Immediate(kStringEncodingMask)); + __ j(zero, &two_byte_slice, Label::kNear); + __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime); + __ jmp(&set_slice_header, Label::kNear); + __ bind(&two_byte_slice); + __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime); + __ bind(&set_slice_header); + __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx); + __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset), + Immediate(String::kEmptyHashField)); + __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi); + __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx); + __ IncrementCounter(counters->sub_string_native(), 1); + __ ret(3 * kPointerSize); + + __ bind(©_routine); + } + + // edi: underlying subject string + // ebx: instance type of underlying subject string + // edx: adjusted start index (smi) + // ecx: length (smi) + // The subject string can only be external or sequential string of either + // encoding at this point. + Label two_byte_sequential, runtime_drop_two, sequential_string; + STATIC_ASSERT(kExternalStringTag != 0); + STATIC_ASSERT(kSeqStringTag == 0); + __ test_b(ebx, kExternalStringTag); + __ j(zero, &sequential_string); + + // Handle external string. + // Rule out short external strings. + STATIC_ASSERT(kShortExternalStringTag != 0); + __ test_b(ebx, kShortExternalStringMask); + __ j(not_zero, &runtime); + __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset)); + // Move the pointer so that offset-wise, it looks like a sequential string. + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + + __ bind(&sequential_string); + // Stash away (adjusted) index and (underlying) string. + __ push(edx); + __ push(edi); + __ SmiUntag(ecx); + STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); + __ test_b(ebx, kStringEncodingMask); + __ j(zero, &two_byte_sequential); + + // Sequential ASCII string. Allocate the result. + __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two); + + // eax: result string + // ecx: result string length + // Locate first character of result. + __ mov(edi, eax); + __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + // Load string argument and locate character of sub string start. + __ pop(edx); + __ pop(ebx); + __ SmiUntag(ebx); + __ lea(edx, FieldOperand(edx, ebx, times_1, SeqOneByteString::kHeaderSize)); + + // eax: result string + // ecx: result length + // edi: first character of result + // edx: character of sub string start + StringHelper::GenerateCopyCharacters( + masm, edi, edx, ecx, ebx, String::ONE_BYTE_ENCODING); + __ IncrementCounter(counters->sub_string_native(), 1); + __ ret(3 * kPointerSize); + + __ bind(&two_byte_sequential); + // Sequential two-byte string. Allocate the result. + __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two); + + // eax: result string + // ecx: result string length + // Locate first character of result. + __ mov(edi, eax); + __ add(edi, + Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // Load string argument and locate character of sub string start. + __ pop(edx); + __ pop(ebx); + // As from is a smi it is 2 times the value which matches the size of a two + // byte character. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ lea(edx, FieldOperand(edx, ebx, times_1, SeqTwoByteString::kHeaderSize)); + + // eax: result string + // ecx: result length + // edi: first character of result + // edx: character of sub string start + StringHelper::GenerateCopyCharacters( + masm, edi, edx, ecx, ebx, String::TWO_BYTE_ENCODING); + __ IncrementCounter(counters->sub_string_native(), 1); + __ ret(3 * kPointerSize); + + // Drop pushed values on the stack before tail call. + __ bind(&runtime_drop_two); + __ Drop(2); + + // Just jump to runtime to create the sub string. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kSubString, 3, 1); + + __ bind(&single_char); + // eax: string + // ebx: instance type + // ecx: sub string length (smi) + // edx: from index (smi) + StringCharAtGenerator generator( + eax, edx, ecx, eax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm); + __ ret(3 * kPointerSize); + generator.SkipSlow(masm, &runtime); +} + + +void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2) { + Register length = scratch1; + + // Compare lengths. + Label strings_not_equal, check_zero_length; + __ mov(length, FieldOperand(left, String::kLengthOffset)); + __ cmp(length, FieldOperand(right, String::kLengthOffset)); + __ j(equal, &check_zero_length, Label::kNear); + __ bind(&strings_not_equal); + __ Move(eax, Immediate(Smi::FromInt(NOT_EQUAL))); + __ ret(0); + + // Check if the length is zero. + Label compare_chars; + __ bind(&check_zero_length); + STATIC_ASSERT(kSmiTag == 0); + __ test(length, length); + __ j(not_zero, &compare_chars, Label::kNear); + __ Move(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + + // Compare characters. + __ bind(&compare_chars); + GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2, + &strings_not_equal, Label::kNear); + + // Characters are equal. + __ Move(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); +} + + +void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3) { + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->string_compare_native(), 1); + + // Find minimum length. + Label left_shorter; + __ mov(scratch1, FieldOperand(left, String::kLengthOffset)); + __ mov(scratch3, scratch1); + __ sub(scratch3, FieldOperand(right, String::kLengthOffset)); + + Register length_delta = scratch3; + + __ j(less_equal, &left_shorter, Label::kNear); + // Right string is shorter. Change scratch1 to be length of right string. + __ sub(scratch1, length_delta); + __ bind(&left_shorter); + + Register min_length = scratch1; + + // If either length is zero, just compare lengths. + Label compare_lengths; + __ test(min_length, min_length); + __ j(zero, &compare_lengths, Label::kNear); + + // Compare characters. + Label result_not_equal; + GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2, + &result_not_equal, Label::kNear); + + // Compare lengths - strings up to min-length are equal. + __ bind(&compare_lengths); + __ test(length_delta, length_delta); + Label length_not_equal; + __ j(not_zero, &length_not_equal, Label::kNear); + + // Result is EQUAL. + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Move(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + + Label result_greater; + Label result_less; + __ bind(&length_not_equal); + __ j(greater, &result_greater, Label::kNear); + __ jmp(&result_less, Label::kNear); + __ bind(&result_not_equal); + __ j(above, &result_greater, Label::kNear); + __ bind(&result_less); + + // Result is LESS. + __ Move(eax, Immediate(Smi::FromInt(LESS))); + __ ret(0); + + // Result is GREATER. + __ bind(&result_greater); + __ Move(eax, Immediate(Smi::FromInt(GREATER))); + __ ret(0); +} + + +void StringCompareStub::GenerateAsciiCharsCompareLoop( + MacroAssembler* masm, + Register left, + Register right, + Register length, + Register scratch, + Label* chars_not_equal, + Label::Distance chars_not_equal_near) { + // Change index to run from -length to -1 by adding length to string + // start. This means that loop ends when index reaches zero, which + // doesn't need an additional compare. + __ SmiUntag(length); + __ lea(left, + FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize)); + __ lea(right, + FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize)); + __ neg(length); + Register index = length; // index = -length; + + // Compare loop. + Label loop; + __ bind(&loop); + __ mov_b(scratch, Operand(left, index, times_1, 0)); + __ cmpb(scratch, Operand(right, index, times_1, 0)); + __ j(not_equal, chars_not_equal, chars_not_equal_near); + __ inc(index); + __ j(not_zero, &loop); +} + + +void StringCompareStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // esp[0]: return address + // esp[4]: right string + // esp[8]: left string + + __ mov(edx, Operand(esp, 2 * kPointerSize)); // left + __ mov(eax, Operand(esp, 1 * kPointerSize)); // right + + Label not_same; + __ cmp(edx, eax); + __ j(not_equal, ¬_same, Label::kNear); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Move(eax, Immediate(Smi::FromInt(EQUAL))); + __ IncrementCounter(isolate()->counters()->string_compare_native(), 1); + __ ret(2 * kPointerSize); + + __ bind(¬_same); + + // Check that both objects are sequential ASCII strings. + __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime); + + // Compare flat ASCII strings. + // Drop arguments from the stack. + __ pop(ecx); + __ add(esp, Immediate(2 * kPointerSize)); + __ push(ecx); + GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); + + // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); +} + + +void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- edx : left + // -- eax : right + // -- esp[0] : return address + // ----------------------------------- + + // Load ecx with the allocation site. We stick an undefined dummy value here + // and replace it with the real allocation site later when we instantiate this + // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). + __ mov(ecx, handle(isolate()->heap()->undefined_value())); + + // Make sure that we actually patched the allocation site. + if (FLAG_debug_code) { + __ test(ecx, Immediate(kSmiTagMask)); + __ Assert(not_equal, kExpectedAllocationSite); + __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), + isolate()->factory()->allocation_site_map()); + __ Assert(equal, kExpectedAllocationSite); + } + + // Tail call into the stub that handles binary operations with allocation + // sites. + BinaryOpWithAllocationSiteStub stub(isolate(), state_); + __ TailCallStub(&stub); +} + + +void ICCompareStub::GenerateSmis(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::SMI); + Label miss; + __ mov(ecx, edx); + __ or_(ecx, eax); + __ JumpIfNotSmi(ecx, &miss, Label::kNear); + + if (GetCondition() == equal) { + // For equality we do not care about the sign of the result. + __ sub(eax, edx); + } else { + Label done; + __ sub(edx, eax); + __ j(no_overflow, &done, Label::kNear); + // Correct sign of result in case of overflow. + __ not_(edx); + __ bind(&done); + __ mov(eax, edx); + } + __ ret(0); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::NUMBER); + + Label generic_stub; + Label unordered, maybe_undefined1, maybe_undefined2; + Label miss; + + if (left_ == CompareIC::SMI) { + __ JumpIfNotSmi(edx, &miss); + } + if (right_ == CompareIC::SMI) { + __ JumpIfNotSmi(eax, &miss); + } + + // Inlining the double comparison and falling back to the general compare + // stub if NaN is involved or SSE2 or CMOV is unsupported. + __ mov(ecx, edx); + __ and_(ecx, eax); + __ JumpIfSmi(ecx, &generic_stub, Label::kNear); + + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + isolate()->factory()->heap_number_map()); + __ j(not_equal, &maybe_undefined1, Label::kNear); + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), + isolate()->factory()->heap_number_map()); + __ j(not_equal, &maybe_undefined2, Label::kNear); + + __ bind(&unordered); + __ bind(&generic_stub); + ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC, + CompareIC::GENERIC); + __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); + + __ bind(&maybe_undefined1); + if (Token::IsOrderedRelationalCompareOp(op_)) { + __ cmp(eax, Immediate(isolate()->factory()->undefined_value())); + __ j(not_equal, &miss); + __ JumpIfSmi(edx, &unordered); + __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx); + __ j(not_equal, &maybe_undefined2, Label::kNear); + __ jmp(&unordered); + } + + __ bind(&maybe_undefined2); + if (Token::IsOrderedRelationalCompareOp(op_)) { + __ cmp(edx, Immediate(isolate()->factory()->undefined_value())); + __ j(equal, &unordered); + } + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::INTERNALIZED_STRING); + DCHECK(GetCondition() == equal); + + // Registers containing left and right operands respectively. + Register left = edx; + Register right = eax; + Register tmp1 = ecx; + Register tmp2 = ebx; + + // Check that both operands are heap objects. + Label miss; + __ mov(tmp1, left); + STATIC_ASSERT(kSmiTag == 0); + __ and_(tmp1, right); + __ JumpIfSmi(tmp1, &miss, Label::kNear); + + // Check that both operands are internalized strings. + __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); + __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); + __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); + __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + __ or_(tmp1, tmp2); + __ test(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); + __ j(not_zero, &miss, Label::kNear); + + // Internalized strings are compared by identity. + Label done; + __ cmp(left, right); + // Make sure eax is non-zero. At this point input operands are + // guaranteed to be non-zero. + DCHECK(right.is(eax)); + __ j(not_equal, &done, Label::kNear); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Move(eax, Immediate(Smi::FromInt(EQUAL))); + __ bind(&done); + __ ret(0); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::UNIQUE_NAME); + DCHECK(GetCondition() == equal); + + // Registers containing left and right operands respectively. + Register left = edx; + Register right = eax; + Register tmp1 = ecx; + Register tmp2 = ebx; + + // Check that both operands are heap objects. + Label miss; + __ mov(tmp1, left); + STATIC_ASSERT(kSmiTag == 0); + __ and_(tmp1, right); + __ JumpIfSmi(tmp1, &miss, Label::kNear); + + // Check that both operands are unique names. This leaves the instance + // types loaded in tmp1 and tmp2. + __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); + __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); + __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); + __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); + + __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear); + __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear); + + // Unique names are compared by identity. + Label done; + __ cmp(left, right); + // Make sure eax is non-zero. At this point input operands are + // guaranteed to be non-zero. + DCHECK(right.is(eax)); + __ j(not_equal, &done, Label::kNear); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Move(eax, Immediate(Smi::FromInt(EQUAL))); + __ bind(&done); + __ ret(0); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateStrings(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::STRING); + Label miss; + + bool equality = Token::IsEqualityOp(op_); + + // Registers containing left and right operands respectively. + Register left = edx; + Register right = eax; + Register tmp1 = ecx; + Register tmp2 = ebx; + Register tmp3 = edi; + + // Check that both operands are heap objects. + __ mov(tmp1, left); + STATIC_ASSERT(kSmiTag == 0); + __ and_(tmp1, right); + __ JumpIfSmi(tmp1, &miss); + + // Check that both operands are strings. This leaves the instance + // types loaded in tmp1 and tmp2. + __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); + __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); + __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); + __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); + __ mov(tmp3, tmp1); + STATIC_ASSERT(kNotStringTag != 0); + __ or_(tmp3, tmp2); + __ test(tmp3, Immediate(kIsNotStringMask)); + __ j(not_zero, &miss); + + // Fast check for identical strings. + Label not_same; + __ cmp(left, right); + __ j(not_equal, ¬_same, Label::kNear); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Move(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + + // Handle not identical strings. + __ bind(¬_same); + + // Check that both strings are internalized. If they are, we're done + // because we already know they are not identical. But in the case of + // non-equality compare, we still need to determine the order. We + // also know they are both strings. + if (equality) { + Label do_compare; + STATIC_ASSERT(kInternalizedTag == 0); + __ or_(tmp1, tmp2); + __ test(tmp1, Immediate(kIsNotInternalizedMask)); + __ j(not_zero, &do_compare, Label::kNear); + // Make sure eax is non-zero. At this point input operands are + // guaranteed to be non-zero. + DCHECK(right.is(eax)); + __ ret(0); + __ bind(&do_compare); + } + + // Check that both strings are sequential ASCII. + Label runtime; + __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime); + + // Compare flat ASCII strings. Returns when done. + if (equality) { + StringCompareStub::GenerateFlatAsciiStringEquals( + masm, left, right, tmp1, tmp2); + } else { + StringCompareStub::GenerateCompareFlatAsciiStrings( + masm, left, right, tmp1, tmp2, tmp3); + } + + // Handle more complex cases in runtime. + __ bind(&runtime); + __ pop(tmp1); // Return address. + __ push(left); + __ push(right); + __ push(tmp1); + if (equality) { + __ TailCallRuntime(Runtime::kStringEquals, 2, 1); + } else { + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); + } + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateObjects(MacroAssembler* masm) { + DCHECK(state_ == CompareIC::OBJECT); + Label miss; + __ mov(ecx, edx); + __ and_(ecx, eax); + __ JumpIfSmi(ecx, &miss, Label::kNear); + + __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx); + __ j(not_equal, &miss, Label::kNear); + __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx); + __ j(not_equal, &miss, Label::kNear); + + DCHECK(GetCondition() == equal); + __ sub(eax, edx); + __ ret(0); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { + Label miss; + __ mov(ecx, edx); + __ and_(ecx, eax); + __ JumpIfSmi(ecx, &miss, Label::kNear); + + __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); + __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); + __ cmp(ecx, known_map_); + __ j(not_equal, &miss, Label::kNear); + __ cmp(ebx, known_map_); + __ j(not_equal, &miss, Label::kNear); + + __ sub(eax, edx); + __ ret(0); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateMiss(MacroAssembler* masm) { + { + // Call the runtime system in a fresh internal frame. + ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), + isolate()); + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(edx); // Preserve edx and eax. + __ push(eax); + __ push(edx); // And also use them as the arguments. + __ push(eax); + __ push(Immediate(Smi::FromInt(op_))); + __ CallExternalReference(miss, 3); + // Compute the entry point of the rewritten stub. + __ lea(edi, FieldOperand(eax, Code::kHeaderSize)); + __ pop(eax); + __ pop(edx); + } + + // Do a tail call to the rewritten stub. + __ jmp(edi); +} + + +// Helper function used to check that the dictionary doesn't contain +// the property. This function may return false negatives, so miss_label +// must always call a backup property check that is complete. +// This function is safe to call if the receiver has fast properties. +// Name must be a unique name and receiver must be a heap object. +void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register properties, + Handle<Name> name, + Register r0) { + DCHECK(name->IsUniqueName()); + + // If names of slots in range from 1 to kProbes - 1 for the hash value are + // not equal to the name and kProbes-th slot is not used (its name is the + // undefined value), it guarantees the hash table doesn't contain the + // property. It's true even if some slots represent deleted properties + // (their names are the hole value). + for (int i = 0; i < kInlinedProbes; i++) { + // Compute the masked index: (hash + i + i * i) & mask. + Register index = r0; + // Capacity is smi 2^n. + __ mov(index, FieldOperand(properties, kCapacityOffset)); + __ dec(index); + __ and_(index, + Immediate(Smi::FromInt(name->Hash() + + NameDictionary::GetProbeOffset(i)))); + + // Scale the index by multiplying by the entry size. + DCHECK(NameDictionary::kEntrySize == 3); + __ lea(index, Operand(index, index, times_2, 0)); // index *= 3. + Register entity_name = r0; + // Having undefined at this place means the name is not contained. + DCHECK_EQ(kSmiTagSize, 1); + __ mov(entity_name, Operand(properties, index, times_half_pointer_size, + kElementsStartOffset - kHeapObjectTag)); + __ cmp(entity_name, masm->isolate()->factory()->undefined_value()); + __ j(equal, done); + + // Stop if found the property. + __ cmp(entity_name, Handle<Name>(name)); + __ j(equal, miss); + + Label good; + // Check for the hole and skip. + __ cmp(entity_name, masm->isolate()->factory()->the_hole_value()); + __ j(equal, &good, Label::kNear); + + // Check if the entry name is not a unique name. + __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset)); + __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset), + miss); + __ bind(&good); + } + + NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0, + NEGATIVE_LOOKUP); + __ push(Immediate(Handle<Object>(name))); + __ push(Immediate(name->Hash())); + __ CallStub(&stub); + __ test(r0, r0); + __ j(not_zero, miss); + __ jmp(done); +} + + +// Probe the name dictionary in the |elements| register. Jump to the +// |done| label if a property with the given name is found leaving the +// index into the dictionary in |r0|. Jump to the |miss| label +// otherwise. +void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register r0, + Register r1) { + DCHECK(!elements.is(r0)); + DCHECK(!elements.is(r1)); + DCHECK(!name.is(r0)); + DCHECK(!name.is(r1)); + + __ AssertName(name); + + __ mov(r1, FieldOperand(elements, kCapacityOffset)); + __ shr(r1, kSmiTagSize); // convert smi to int + __ dec(r1); + + // Generate an unrolled loop that performs a few probes before + // giving up. Measurements done on Gmail indicate that 2 probes + // cover ~93% of loads from dictionaries. + for (int i = 0; i < kInlinedProbes; i++) { + // Compute the masked index: (hash + i + i * i) & mask. + __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); + __ shr(r0, Name::kHashShift); + if (i > 0) { + __ add(r0, Immediate(NameDictionary::GetProbeOffset(i))); + } + __ and_(r0, r1); + + // Scale the index by multiplying by the entry size. + DCHECK(NameDictionary::kEntrySize == 3); + __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3 + + // Check if the key is identical to the name. + __ cmp(name, Operand(elements, + r0, + times_4, + kElementsStartOffset - kHeapObjectTag)); + __ j(equal, done); + } + + NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0, + POSITIVE_LOOKUP); + __ push(name); + __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); + __ shr(r0, Name::kHashShift); + __ push(r0); + __ CallStub(&stub); + + __ test(r1, r1); + __ j(zero, miss); + __ jmp(done); +} + + +void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. + // Stack frame on entry: + // esp[0 * kPointerSize]: return address. + // esp[1 * kPointerSize]: key's hash. + // esp[2 * kPointerSize]: key. + // Registers: + // dictionary_: NameDictionary to probe. + // result_: used as scratch. + // index_: will hold an index of entry if lookup is successful. + // might alias with result_. + // Returns: + // result_ is zero if lookup failed, non zero otherwise. + + Label in_dictionary, maybe_in_dictionary, not_in_dictionary; + + Register scratch = result_; + + __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset)); + __ dec(scratch); + __ SmiUntag(scratch); + __ push(scratch); + + // If names of slots in range from 1 to kProbes - 1 for the hash value are + // not equal to the name and kProbes-th slot is not used (its name is the + // undefined value), it guarantees the hash table doesn't contain the + // property. It's true even if some slots represent deleted properties + // (their names are the null value). + for (int i = kInlinedProbes; i < kTotalProbes; i++) { + // Compute the masked index: (hash + i + i * i) & mask. + __ mov(scratch, Operand(esp, 2 * kPointerSize)); + if (i > 0) { + __ add(scratch, Immediate(NameDictionary::GetProbeOffset(i))); + } + __ and_(scratch, Operand(esp, 0)); + + // Scale the index by multiplying by the entry size. + DCHECK(NameDictionary::kEntrySize == 3); + __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3. + + // Having undefined at this place means the name is not contained. + DCHECK_EQ(kSmiTagSize, 1); + __ mov(scratch, Operand(dictionary_, + index_, + times_pointer_size, + kElementsStartOffset - kHeapObjectTag)); + __ cmp(scratch, isolate()->factory()->undefined_value()); + __ j(equal, ¬_in_dictionary); + + // Stop if found the property. + __ cmp(scratch, Operand(esp, 3 * kPointerSize)); + __ j(equal, &in_dictionary); + + if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { + // If we hit a key that is not a unique name during negative + // lookup we have to bailout as this key might be equal to the + // key we are looking for. + + // Check if the entry name is not a unique name. + __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); + __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset), + &maybe_in_dictionary); + } + } + + __ bind(&maybe_in_dictionary); + // If we are doing negative lookup then probing failure should be + // treated as a lookup success. For positive lookup probing failure + // should be treated as lookup failure. + if (mode_ == POSITIVE_LOOKUP) { + __ mov(result_, Immediate(0)); + __ Drop(1); + __ ret(2 * kPointerSize); + } + + __ bind(&in_dictionary); + __ mov(result_, Immediate(1)); + __ Drop(1); + __ ret(2 * kPointerSize); + + __ bind(¬_in_dictionary); + __ mov(result_, Immediate(0)); + __ Drop(1); + __ ret(2 * kPointerSize); +} + + +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( + Isolate* isolate) { + StoreBufferOverflowStub stub(isolate); + stub.GetCode(); +} + + +// Takes the input in 3 registers: address_ value_ and object_. A pointer to +// the value has just been written into the object, now this stub makes sure +// we keep the GC informed. The word in the object where the value has been +// written is in the address register. +void RecordWriteStub::Generate(MacroAssembler* masm) { + Label skip_to_incremental_noncompacting; + Label skip_to_incremental_compacting; + + // The first two instructions are generated with labels so as to get the + // offset fixed up correctly by the bind(Label*) call. We patch it back and + // forth between a compare instructions (a nop in this position) and the + // real branch when we start and stop incremental heap marking. + __ jmp(&skip_to_incremental_noncompacting, Label::kNear); + __ jmp(&skip_to_incremental_compacting, Label::kFar); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + __ RememberedSetHelper(object_, + address_, + value_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&skip_to_incremental_noncompacting); + GenerateIncremental(masm, INCREMENTAL); + + __ bind(&skip_to_incremental_compacting); + GenerateIncremental(masm, INCREMENTAL_COMPACTION); + + // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. + // Will be checked in IncrementalMarking::ActivateGeneratedStub. + masm->set_byte_at(0, kTwoByteNopInstruction); + masm->set_byte_at(2, kFiveByteNopInstruction); +} + + +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { + regs_.Save(masm); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + Label dont_need_remembered_set; + + __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); + __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. + regs_.scratch0(), + &dont_need_remembered_set); + + __ CheckPageFlag(regs_.object(), + regs_.scratch0(), + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + not_zero, + &dont_need_remembered_set); + + // First notify the incremental marker if necessary, then update the + // remembered set. + CheckNeedsToInformIncrementalMarker( + masm, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, + mode); + InformIncrementalMarker(masm); + regs_.Restore(masm); + __ RememberedSetHelper(object_, + address_, + value_, + MacroAssembler::kReturnAtEnd); + + __ bind(&dont_need_remembered_set); + } + + CheckNeedsToInformIncrementalMarker( + masm, + kReturnOnNoNeedToInformIncrementalMarker, + mode); + InformIncrementalMarker(masm); + regs_.Restore(masm); + __ ret(0); +} + + +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { + regs_.SaveCallerSaveRegisters(masm); + int argument_count = 3; + __ PrepareCallCFunction(argument_count, regs_.scratch0()); + __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); + __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. + __ mov(Operand(esp, 2 * kPointerSize), + Immediate(ExternalReference::isolate_address(isolate()))); + + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::incremental_marking_record_write_function(isolate()), + argument_count); + + regs_.RestoreCallerSaveRegisters(masm); +} + + +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode) { + Label object_is_black, need_incremental, need_incremental_pop_object; + + __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask)); + __ and_(regs_.scratch0(), regs_.object()); + __ mov(regs_.scratch1(), + Operand(regs_.scratch0(), + MemoryChunk::kWriteBarrierCounterOffset)); + __ sub(regs_.scratch1(), Immediate(1)); + __ mov(Operand(regs_.scratch0(), + MemoryChunk::kWriteBarrierCounterOffset), + regs_.scratch1()); + __ j(negative, &need_incremental); + + // Let's look at the color of the object: If it is not black we don't have + // to inform the incremental marker. + __ JumpIfBlack(regs_.object(), + regs_.scratch0(), + regs_.scratch1(), + &object_is_black, + Label::kNear); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&object_is_black); + + // Get the value from the slot. + __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); + + if (mode == INCREMENTAL_COMPACTION) { + Label ensure_not_white; + + __ CheckPageFlag(regs_.scratch0(), // Contains value. + regs_.scratch1(), // Scratch. + MemoryChunk::kEvacuationCandidateMask, + zero, + &ensure_not_white, + Label::kNear); + + __ CheckPageFlag(regs_.object(), + regs_.scratch1(), // Scratch. + MemoryChunk::kSkipEvacuationSlotsRecordingMask, + not_zero, + &ensure_not_white, + Label::kNear); + + __ jmp(&need_incremental); + + __ bind(&ensure_not_white); + } + + // We need an extra register for this, so we push the object register + // temporarily. + __ push(regs_.object()); + __ EnsureNotWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + &need_incremental_pop_object, + Label::kNear); + __ pop(regs_.object()); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&need_incremental_pop_object); + __ pop(regs_.object()); + + __ bind(&need_incremental); + + // Fall through when we need to inform the incremental marker. +} + + +void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : element value to store + // -- ecx : element index as smi + // -- esp[0] : return address + // -- esp[4] : array literal index in function + // -- esp[8] : array literal + // clobbers ebx, edx, edi + // ----------------------------------- + + Label element_done; + Label double_elements; + Label smi_element; + Label slow_elements; + Label slow_elements_from_double; + Label fast_elements; + + // Get array literal index, array literal and its map. + __ mov(edx, Operand(esp, 1 * kPointerSize)); + __ mov(ebx, Operand(esp, 2 * kPointerSize)); + __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset)); + + __ CheckFastElements(edi, &double_elements); + + // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements + __ JumpIfSmi(eax, &smi_element); + __ CheckFastSmiElements(edi, &fast_elements, Label::kNear); + + // Store into the array literal requires a elements transition. Call into + // the runtime. + + __ bind(&slow_elements); + __ pop(edi); // Pop return address and remember to put back later for tail + // call. + __ push(ebx); + __ push(ecx); + __ push(eax); + __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset)); + __ push(edx); + __ push(edi); // Return return address so that tail call returns to right + // place. + __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); + + __ bind(&slow_elements_from_double); + __ pop(edx); + __ jmp(&slow_elements); + + // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. + __ bind(&fast_elements); + __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); + __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size, + FixedArrayBase::kHeaderSize)); + __ mov(Operand(ecx, 0), eax); + // Update the write barrier for the array store. + __ RecordWrite(ebx, ecx, eax, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ ret(0); + + // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, + // and value is Smi. + __ bind(&smi_element); + __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); + __ mov(FieldOperand(ebx, ecx, times_half_pointer_size, + FixedArrayBase::kHeaderSize), eax); + __ ret(0); + + // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. + __ bind(&double_elements); + + __ push(edx); + __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset)); + __ StoreNumberToDoubleElements(eax, + edx, + ecx, + edi, + &slow_elements_from_double, + false); + __ pop(edx); + __ ret(0); +} + + +void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { + CEntryStub ces(isolate(), 1); + __ call(ces.GetCode(), RelocInfo::CODE_TARGET); + int parameter_count_offset = + StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; + __ mov(ebx, MemOperand(ebp, parameter_count_offset)); + masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); + __ pop(ecx); + int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE + ? kPointerSize + : 0; + __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset)); + __ jmp(ecx); // Return to IC Miss stub, continuation still on stack. +} + + +void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { + if (masm->isolate()->function_entry_hook() != NULL) { + ProfileEntryHookStub stub(masm->isolate()); + masm->CallStub(&stub); + } +} + + +void ProfileEntryHookStub::Generate(MacroAssembler* masm) { + // Save volatile registers. + const int kNumSavedRegisters = 3; + __ push(eax); + __ push(ecx); + __ push(edx); + + // Calculate and push the original stack pointer. + __ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize)); + __ push(eax); + + // Retrieve our return address and use it to calculate the calling + // function's address. + __ mov(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize)); + __ sub(eax, Immediate(Assembler::kCallInstructionLength)); + __ push(eax); + + // Call the entry hook. + DCHECK(isolate()->function_entry_hook() != NULL); + __ call(FUNCTION_ADDR(isolate()->function_entry_hook()), + RelocInfo::RUNTIME_ENTRY); + __ add(esp, Immediate(2 * kPointerSize)); + + // Restore ecx. + __ pop(edx); + __ pop(ecx); + __ pop(eax); + + __ ret(0); +} + + +template<class T> +static void CreateArrayDispatch(MacroAssembler* masm, + AllocationSiteOverrideMode mode) { + if (mode == DISABLE_ALLOCATION_SITES) { + T stub(masm->isolate(), + GetInitialFastElementsKind(), + mode); + __ TailCallStub(&stub); + } else if (mode == DONT_OVERRIDE) { + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ cmp(edx, kind); + __ j(not_equal, &next); + T stub(masm->isolate(), kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort(kUnexpectedElementsKindInArrayConstructor); + } else { + UNREACHABLE(); + } +} + + +static void CreateArrayDispatchOneArgument(MacroAssembler* masm, + AllocationSiteOverrideMode mode) { + // ebx - allocation site (if mode != DISABLE_ALLOCATION_SITES) + // edx - kind (if mode != DISABLE_ALLOCATION_SITES) + // eax - number of arguments + // edi - constructor? + // esp[0] - return address + // esp[4] - last argument + Label normal_sequence; + if (mode == DONT_OVERRIDE) { + DCHECK(FAST_SMI_ELEMENTS == 0); + DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1); + DCHECK(FAST_ELEMENTS == 2); + DCHECK(FAST_HOLEY_ELEMENTS == 3); + DCHECK(FAST_DOUBLE_ELEMENTS == 4); + DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + + // is the low bit set? If so, we are holey and that is good. + __ test_b(edx, 1); + __ j(not_zero, &normal_sequence); + } + + // look at the first argument + __ mov(ecx, Operand(esp, kPointerSize)); + __ test(ecx, ecx); + __ j(zero, &normal_sequence); + + if (mode == DISABLE_ALLOCATION_SITES) { + ElementsKind initial = GetInitialFastElementsKind(); + ElementsKind holey_initial = GetHoleyElementsKind(initial); + + ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), + holey_initial, + DISABLE_ALLOCATION_SITES); + __ TailCallStub(&stub_holey); + + __ bind(&normal_sequence); + ArraySingleArgumentConstructorStub stub(masm->isolate(), + initial, + DISABLE_ALLOCATION_SITES); + __ TailCallStub(&stub); + } else if (mode == DONT_OVERRIDE) { + // We are going to create a holey array, but our kind is non-holey. + // Fix kind and retry. + __ inc(edx); + + if (FLAG_debug_code) { + Handle<Map> allocation_site_map = + masm->isolate()->factory()->allocation_site_map(); + __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map)); + __ Assert(equal, kExpectedAllocationSite); + } + + // Save the resulting elements kind in type info. We can't just store r3 + // in the AllocationSite::transition_info field because elements kind is + // restricted to a portion of the field...upper bits need to be left alone. + STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); + __ add(FieldOperand(ebx, AllocationSite::kTransitionInfoOffset), + Immediate(Smi::FromInt(kFastElementsKindPackedToHoley))); + + __ bind(&normal_sequence); + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ cmp(edx, kind); + __ j(not_equal, &next); + ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort(kUnexpectedElementsKindInArrayConstructor); + } else { + UNREACHABLE(); + } +} + + +template<class T> +static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { + int to_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= to_index; ++i) { + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + T stub(isolate, kind); + stub.GetCode(); + if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { + T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); + stub1.GetCode(); + } + } +} + + +void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { + ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( + isolate); +} + + +void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( + Isolate* isolate) { + ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; + for (int i = 0; i < 2; i++) { + // For internal arrays we only need a few things + InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); + stubh1.GetCode(); + InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); + stubh2.GetCode(); + InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); + stubh3.GetCode(); + } +} + + +void ArrayConstructorStub::GenerateDispatchToArrayStub( + MacroAssembler* masm, + AllocationSiteOverrideMode mode) { + if (argument_count_ == ANY) { + Label not_zero_case, not_one_case; + __ test(eax, eax); + __ j(not_zero, ¬_zero_case); + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); + + __ bind(¬_zero_case); + __ cmp(eax, 1); + __ j(greater, ¬_one_case); + CreateArrayDispatchOneArgument(masm, mode); + + __ bind(¬_one_case); + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); + } else if (argument_count_ == NONE) { + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); + } else if (argument_count_ == ONE) { + CreateArrayDispatchOneArgument(masm, mode); + } else if (argument_count_ == MORE_THAN_ONE) { + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); + } else { + UNREACHABLE(); + } +} + + +void ArrayConstructorStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : argc (only if argument_count_ == ANY) + // -- ebx : AllocationSite or undefined + // -- edi : constructor + // -- esp[0] : return address + // -- esp[4] : last argument + // ----------------------------------- + if (FLAG_debug_code) { + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + + // Initial map for the builtin Array function should be a map. + __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ test(ecx, Immediate(kSmiTagMask)); + __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction); + __ CmpObjectType(ecx, MAP_TYPE, ecx); + __ Assert(equal, kUnexpectedInitialMapForArrayFunction); + + // We should either have undefined in ebx or a valid AllocationSite + __ AssertUndefinedOrAllocationSite(ebx); + } + + Label no_info; + // If the feedback vector is the undefined value call an array constructor + // that doesn't use AllocationSites. + __ cmp(ebx, isolate()->factory()->undefined_value()); + __ j(equal, &no_info); + + // Only look at the lower 16 bits of the transition info. + __ mov(edx, FieldOperand(ebx, AllocationSite::kTransitionInfoOffset)); + __ SmiUntag(edx); + STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); + __ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask)); + GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); + + __ bind(&no_info); + GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); +} + + +void InternalArrayConstructorStub::GenerateCase( + MacroAssembler* masm, ElementsKind kind) { + Label not_zero_case, not_one_case; + Label normal_sequence; + + __ test(eax, eax); + __ j(not_zero, ¬_zero_case); + InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); + __ TailCallStub(&stub0); + + __ bind(¬_zero_case); + __ cmp(eax, 1); + __ j(greater, ¬_one_case); + + if (IsFastPackedElementsKind(kind)) { + // We might need to create a holey array + // look at the first argument + __ mov(ecx, Operand(esp, kPointerSize)); + __ test(ecx, ecx); + __ j(zero, &normal_sequence); + + InternalArraySingleArgumentConstructorStub + stub1_holey(isolate(), GetHoleyElementsKind(kind)); + __ TailCallStub(&stub1_holey); + } + + __ bind(&normal_sequence); + InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); + __ TailCallStub(&stub1); + + __ bind(¬_one_case); + InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); + __ TailCallStub(&stubN); +} + + +void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : argc + // -- edi : constructor + // -- esp[0] : return address + // -- esp[4] : last argument + // ----------------------------------- + + if (FLAG_debug_code) { + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + + // Initial map for the builtin Array function should be a map. + __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ test(ecx, Immediate(kSmiTagMask)); + __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction); + __ CmpObjectType(ecx, MAP_TYPE, ecx); + __ Assert(equal, kUnexpectedInitialMapForArrayFunction); + } + + // Figure out the right elements kind + __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + + // Load the map's "bit field 2" into |result|. We only need the first byte, + // but the following masking takes care of that anyway. + __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset)); + // Retrieve elements_kind from bit field 2. + __ DecodeField<Map::ElementsKindBits>(ecx); + + if (FLAG_debug_code) { + Label done; + __ cmp(ecx, Immediate(FAST_ELEMENTS)); + __ j(equal, &done); + __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS)); + __ Assert(equal, + kInvalidElementsKindForInternalArrayOrInternalPackedArray); + __ bind(&done); + } + + Label fast_elements_case; + __ cmp(ecx, Immediate(FAST_ELEMENTS)); + __ j(equal, &fast_elements_case); + GenerateCase(masm, FAST_HOLEY_ELEMENTS); + + __ bind(&fast_elements_case); + GenerateCase(masm, FAST_ELEMENTS); +} + + +void CallApiFunctionStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : callee + // -- ebx : call_data + // -- ecx : holder + // -- edx : api_function_address + // -- esi : context + // -- + // -- esp[0] : return address + // -- esp[4] : last argument + // -- ... + // -- esp[argc * 4] : first argument + // -- esp[(argc + 1) * 4] : receiver + // ----------------------------------- + + Register callee = eax; + Register call_data = ebx; + Register holder = ecx; + Register api_function_address = edx; + Register return_address = edi; + Register context = esi; + + int argc = ArgumentBits::decode(bit_field_); + bool is_store = IsStoreBits::decode(bit_field_); + bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_); + + typedef FunctionCallbackArguments FCA; + + STATIC_ASSERT(FCA::kContextSaveIndex == 6); + STATIC_ASSERT(FCA::kCalleeIndex == 5); + STATIC_ASSERT(FCA::kDataIndex == 4); + STATIC_ASSERT(FCA::kReturnValueOffset == 3); + STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); + STATIC_ASSERT(FCA::kIsolateIndex == 1); + STATIC_ASSERT(FCA::kHolderIndex == 0); + STATIC_ASSERT(FCA::kArgsLength == 7); + + __ pop(return_address); + + // context save + __ push(context); + // load context from callee + __ mov(context, FieldOperand(callee, JSFunction::kContextOffset)); + + // callee + __ push(callee); + + // call data + __ push(call_data); + + Register scratch = call_data; + if (!call_data_undefined) { + // return value + __ push(Immediate(isolate()->factory()->undefined_value())); + // return value default + __ push(Immediate(isolate()->factory()->undefined_value())); + } else { + // return value + __ push(scratch); + // return value default + __ push(scratch); + } + // isolate + __ push(Immediate(reinterpret_cast<int>(isolate()))); + // holder + __ push(holder); + + __ mov(scratch, esp); + + // return address + __ push(return_address); + + // API function gets reference to the v8::Arguments. If CPU profiler + // is enabled wrapper function will be called and we need to pass + // address of the callback as additional parameter, always allocate + // space for it. + const int kApiArgc = 1 + 1; + + // Allocate the v8::Arguments structure in the arguments' space since + // it's not controlled by GC. + const int kApiStackSpace = 4; + + __ PrepareCallApiFunction(kApiArgc + kApiStackSpace); + + // FunctionCallbackInfo::implicit_args_. + __ mov(ApiParameterOperand(2), scratch); + __ add(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize)); + // FunctionCallbackInfo::values_. + __ mov(ApiParameterOperand(3), scratch); + // FunctionCallbackInfo::length_. + __ Move(ApiParameterOperand(4), Immediate(argc)); + // FunctionCallbackInfo::is_construct_call_. + __ Move(ApiParameterOperand(5), Immediate(0)); + + // v8::InvocationCallback's argument. + __ lea(scratch, ApiParameterOperand(2)); + __ mov(ApiParameterOperand(0), scratch); + + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(isolate()); + + Operand context_restore_operand(ebp, + (2 + FCA::kContextSaveIndex) * kPointerSize); + // Stores return the first js argument + int return_value_offset = 0; + if (is_store) { + return_value_offset = 2 + FCA::kArgsLength; + } else { + return_value_offset = 2 + FCA::kReturnValueOffset; + } + Operand return_value_operand(ebp, return_value_offset * kPointerSize); + __ CallApiFunctionAndReturn(api_function_address, + thunk_ref, + ApiParameterOperand(1), + argc + FCA::kArgsLength + 1, + return_value_operand, + &context_restore_operand); +} + + +void CallApiGetterStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- esp[0] : return address + // -- esp[4] : name + // -- esp[8 - kArgsLength*4] : PropertyCallbackArguments object + // -- ... + // -- edx : api_function_address + // ----------------------------------- + + // array for v8::Arguments::values_, handler for name and pointer + // to the values (it considered as smi in GC). + const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2; + // Allocate space for opional callback address parameter in case + // CPU profiler is active. + const int kApiArgc = 2 + 1; + + Register api_function_address = edx; + Register scratch = ebx; + + // load address of name + __ lea(scratch, Operand(esp, 1 * kPointerSize)); + + __ PrepareCallApiFunction(kApiArgc); + __ mov(ApiParameterOperand(0), scratch); // name. + __ add(scratch, Immediate(kPointerSize)); + __ mov(ApiParameterOperand(1), scratch); // arguments pointer. + + ExternalReference thunk_ref = + ExternalReference::invoke_accessor_getter_callback(isolate()); + + __ CallApiFunctionAndReturn(api_function_address, + thunk_ref, + ApiParameterOperand(2), + kStackSpace, + Operand(ebp, 7 * kPointerSize), + NULL); +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/code-stubs-x87.h nodejs-0.11.15/deps/v8/src/x87/code-stubs-x87.h --- nodejs-0.11.13/deps/v8/src/x87/code-stubs-x87.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/code-stubs-x87.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,413 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_X87_CODE_STUBS_X87_H_ +#define V8_X87_CODE_STUBS_X87_H_ + +#include "src/ic-inl.h" +#include "src/macro-assembler.h" + +namespace v8 { +namespace internal { + + +void ArrayNativeCode(MacroAssembler* masm, + bool construct_call, + Label* call_generic_code); + + +class StoreBufferOverflowStub: public PlatformCodeStub { + public: + explicit StoreBufferOverflowStub(Isolate* isolate) + : PlatformCodeStub(isolate) { } + + void Generate(MacroAssembler* masm); + + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + Major MajorKey() const { return StoreBufferOverflow; } + int MinorKey() const { return 0; } +}; + + +class StringHelper : public AllStatic { + public: + // Generate code for copying characters using the rep movs instruction. + // Copies ecx characters from esi to edi. Copying of overlapping regions is + // not supported. + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + String::Encoding encoding); + + // Generate string hash. + static void GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character, + Register scratch); + static void GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character, + Register scratch); + static void GenerateHashGetHash(MacroAssembler* masm, + Register hash, + Register scratch); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); +}; + + +class SubStringStub: public PlatformCodeStub { + public: + explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {} + + private: + Major MajorKey() const { return SubString; } + int MinorKey() const { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +class StringCompareStub: public PlatformCodeStub { + public: + explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { } + + // Compares two flat ASCII strings and returns result in eax. + static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3); + + // Compares two flat ASCII strings for equality and returns result + // in eax. + static void GenerateFlatAsciiStringEquals(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2); + + private: + virtual Major MajorKey() const { return StringCompare; } + virtual int MinorKey() const { return 0; } + virtual void Generate(MacroAssembler* masm); + + static void GenerateAsciiCharsCompareLoop( + MacroAssembler* masm, + Register left, + Register right, + Register length, + Register scratch, + Label* chars_not_equal, + Label::Distance chars_not_equal_near = Label::kFar); +}; + + +class NameDictionaryLookupStub: public PlatformCodeStub { + public: + enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; + + NameDictionaryLookupStub(Isolate* isolate, + Register dictionary, + Register result, + Register index, + LookupMode mode) + : PlatformCodeStub(isolate), + dictionary_(dictionary), result_(result), index_(index), mode_(mode) { } + + void Generate(MacroAssembler* masm); + + static void GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register properties, + Handle<Name> name, + Register r0); + + static void GeneratePositiveLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register r0, + Register r1); + + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + static const int kInlinedProbes = 4; + static const int kTotalProbes = 20; + + static const int kCapacityOffset = + NameDictionary::kHeaderSize + + NameDictionary::kCapacityIndex * kPointerSize; + + static const int kElementsStartOffset = + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; + + Major MajorKey() const { return NameDictionaryLookup; } + + int MinorKey() const { + return DictionaryBits::encode(dictionary_.code()) | + ResultBits::encode(result_.code()) | + IndexBits::encode(index_.code()) | + LookupModeBits::encode(mode_); + } + + class DictionaryBits: public BitField<int, 0, 3> {}; + class ResultBits: public BitField<int, 3, 3> {}; + class IndexBits: public BitField<int, 6, 3> {}; + class LookupModeBits: public BitField<LookupMode, 9, 1> {}; + + Register dictionary_; + Register result_; + Register index_; + LookupMode mode_; +}; + + +class RecordWriteStub: public PlatformCodeStub { + public: + RecordWriteStub(Isolate* isolate, + Register object, + Register value, + Register address, + RememberedSetAction remembered_set_action) + : PlatformCodeStub(isolate), + object_(object), + value_(value), + address_(address), + remembered_set_action_(remembered_set_action), + regs_(object, // An input reg. + address, // An input reg. + value) { // One scratch reg. + } + + enum Mode { + STORE_BUFFER_ONLY, + INCREMENTAL, + INCREMENTAL_COMPACTION + }; + + virtual bool SometimesSetsUpAFrame() { return false; } + + static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8. + static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8. + + static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32. + static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32. + + static Mode GetMode(Code* stub) { + byte first_instruction = stub->instruction_start()[0]; + byte second_instruction = stub->instruction_start()[2]; + + if (first_instruction == kTwoByteJumpInstruction) { + return INCREMENTAL; + } + + DCHECK(first_instruction == kTwoByteNopInstruction); + + if (second_instruction == kFiveByteJumpInstruction) { + return INCREMENTAL_COMPACTION; + } + + DCHECK(second_instruction == kFiveByteNopInstruction); + + return STORE_BUFFER_ONLY; + } + + static void Patch(Code* stub, Mode mode) { + switch (mode) { + case STORE_BUFFER_ONLY: + DCHECK(GetMode(stub) == INCREMENTAL || + GetMode(stub) == INCREMENTAL_COMPACTION); + stub->instruction_start()[0] = kTwoByteNopInstruction; + stub->instruction_start()[2] = kFiveByteNopInstruction; + break; + case INCREMENTAL: + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); + stub->instruction_start()[0] = kTwoByteJumpInstruction; + break; + case INCREMENTAL_COMPACTION: + DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); + stub->instruction_start()[0] = kTwoByteNopInstruction; + stub->instruction_start()[2] = kFiveByteJumpInstruction; + break; + } + DCHECK(GetMode(stub) == mode); + CpuFeatures::FlushICache(stub->instruction_start(), 7); + } + + private: + // This is a helper class for freeing up 3 scratch registers, where the third + // is always ecx (needed for shift operations). The input is two registers + // that must be preserved and one scratch register provided by the caller. + class RegisterAllocation { + public: + RegisterAllocation(Register object, + Register address, + Register scratch0) + : object_orig_(object), + address_orig_(address), + scratch0_orig_(scratch0), + object_(object), + address_(address), + scratch0_(scratch0) { + DCHECK(!AreAliased(scratch0, object, address, no_reg)); + scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_); + if (scratch0.is(ecx)) { + scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_); + } + if (object.is(ecx)) { + object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_); + } + if (address.is(ecx)) { + address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_); + } + DCHECK(!AreAliased(scratch0_, object_, address_, ecx)); + } + + void Save(MacroAssembler* masm) { + DCHECK(!address_orig_.is(object_)); + DCHECK(object_.is(object_orig_) || address_.is(address_orig_)); + DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_)); + DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_)); + DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_)); + // We don't have to save scratch0_orig_ because it was given to us as + // a scratch register. But if we had to switch to a different reg then + // we should save the new scratch0_. + if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_); + if (!ecx.is(scratch0_orig_) && + !ecx.is(object_orig_) && + !ecx.is(address_orig_)) { + masm->push(ecx); + } + masm->push(scratch1_); + if (!address_.is(address_orig_)) { + masm->push(address_); + masm->mov(address_, address_orig_); + } + if (!object_.is(object_orig_)) { + masm->push(object_); + masm->mov(object_, object_orig_); + } + } + + void Restore(MacroAssembler* masm) { + // These will have been preserved the entire time, so we just need to move + // them back. Only in one case is the orig_ reg different from the plain + // one, since only one of them can alias with ecx. + if (!object_.is(object_orig_)) { + masm->mov(object_orig_, object_); + masm->pop(object_); + } + if (!address_.is(address_orig_)) { + masm->mov(address_orig_, address_); + masm->pop(address_); + } + masm->pop(scratch1_); + if (!ecx.is(scratch0_orig_) && + !ecx.is(object_orig_) && + !ecx.is(address_orig_)) { + masm->pop(ecx); + } + if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_); + } + + // If we have to call into C then we need to save and restore all caller- + // saved registers that were not already preserved. The caller saved + // registers are eax, ecx and edx. The three scratch registers (incl. ecx) + // will be restored by other means so we don't bother pushing them here. + void SaveCallerSaveRegisters(MacroAssembler* masm) { + if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax); + if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx); + } + + inline void RestoreCallerSaveRegisters(MacroAssembler*masm) { + if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx); + if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax); + } + + inline Register object() { return object_; } + inline Register address() { return address_; } + inline Register scratch0() { return scratch0_; } + inline Register scratch1() { return scratch1_; } + + private: + Register object_orig_; + Register address_orig_; + Register scratch0_orig_; + Register object_; + Register address_; + Register scratch0_; + Register scratch1_; + // Third scratch register is always ecx. + + Register GetRegThatIsNotEcxOr(Register r1, + Register r2, + Register r3) { + for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { + Register candidate = Register::FromAllocationIndex(i); + if (candidate.is(ecx)) continue; + if (candidate.is(r1)) continue; + if (candidate.is(r2)) continue; + if (candidate.is(r3)) continue; + return candidate; + } + UNREACHABLE(); + return no_reg; + } + friend class RecordWriteStub; + }; + + enum OnNoNeedToInformIncrementalMarker { + kReturnOnNoNeedToInformIncrementalMarker, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker + } +; + void Generate(MacroAssembler* masm); + void GenerateIncremental(MacroAssembler* masm, Mode mode); + void CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode); + void InformIncrementalMarker(MacroAssembler* masm); + + Major MajorKey() const { return RecordWrite; } + + int MinorKey() const { + return ObjectBits::encode(object_.code()) | + ValueBits::encode(value_.code()) | + AddressBits::encode(address_.code()) | + RememberedSetActionBits::encode(remembered_set_action_); + } + + void Activate(Code* code) { + code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); + } + + class ObjectBits: public BitField<int, 0, 3> {}; + class ValueBits: public BitField<int, 3, 3> {}; + class AddressBits: public BitField<int, 6, 3> {}; + class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {}; + + Register object_; + Register value_; + Register address_; + RememberedSetAction remembered_set_action_; + RegisterAllocation regs_; +}; + + +} } // namespace v8::internal + +#endif // V8_X87_CODE_STUBS_X87_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/cpu-x87.cc nodejs-0.11.15/deps/v8/src/x87/cpu-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/cpu-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/cpu-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,44 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// CPU specific code for ia32 independent of OS goes here. + +#ifdef __GNUC__ +#include "src/third_party/valgrind/valgrind.h" +#endif + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/assembler.h" +#include "src/macro-assembler.h" + +namespace v8 { +namespace internal { + +void CpuFeatures::FlushICache(void* start, size_t size) { + // No need to flush the instruction cache on Intel. On Intel instruction + // cache flushing is only necessary when multiple cores running the same + // code simultaneously. V8 (and JavaScript) is single threaded and when code + // is patched on an intel CPU the core performing the patching will have its + // own instruction cache updated automatically. + + // If flushing of the instruction cache becomes necessary Windows has the + // API function FlushInstructionCache. + + // By default, valgrind only checks the stack for writes that might need to + // invalidate already cached translated code. This leads to random + // instability when code patches or moves are sometimes unnoticed. One + // solution is to run valgrind with --smc-check=all, but this comes at a big + // performance cost. We can notify valgrind to invalidate its cache. +#ifdef VALGRIND_DISCARD_TRANSLATIONS + unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size); + USE(res); +#endif +} + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/debug-x87.cc nodejs-0.11.15/deps/v8/src/x87/debug-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/debug-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/debug-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,326 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/codegen.h" +#include "src/debug.h" + + +namespace v8 { +namespace internal { + +bool BreakLocationIterator::IsDebugBreakAtReturn() { + return Debug::IsDebugBreakAtReturn(rinfo()); +} + + +// Patch the JS frame exit code with a debug break call. See +// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x87.cc +// for the precise return instructions sequence. +void BreakLocationIterator::SetDebugBreakAtReturn() { + DCHECK(Assembler::kJSReturnSequenceLength >= + Assembler::kCallInstructionLength); + rinfo()->PatchCodeWithCall( + debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(), + Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength); +} + + +// Restore the JS frame exit code. +void BreakLocationIterator::ClearDebugBreakAtReturn() { + rinfo()->PatchCode(original_rinfo()->pc(), + Assembler::kJSReturnSequenceLength); +} + + +// A debug break in the frame exit code is identified by the JS frame exit code +// having been patched with a call instruction. +bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) { + DCHECK(RelocInfo::IsJSReturn(rinfo->rmode())); + return rinfo->IsPatchedReturnSequence(); +} + + +bool BreakLocationIterator::IsDebugBreakAtSlot() { + DCHECK(IsDebugBreakSlot()); + // Check whether the debug break slot instructions have been patched. + return rinfo()->IsPatchedDebugBreakSlotSequence(); +} + + +void BreakLocationIterator::SetDebugBreakAtSlot() { + DCHECK(IsDebugBreakSlot()); + Isolate* isolate = debug_info_->GetIsolate(); + rinfo()->PatchCodeWithCall( + isolate->builtins()->Slot_DebugBreak()->entry(), + Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength); +} + + +void BreakLocationIterator::ClearDebugBreakAtSlot() { + DCHECK(IsDebugBreakSlot()); + rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength); +} + + +#define __ ACCESS_MASM(masm) + +static void Generate_DebugBreakCallHelper(MacroAssembler* masm, + RegList object_regs, + RegList non_object_regs, + bool convert_call_to_jmp) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Load padding words on stack. + for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) { + __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue))); + } + __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize))); + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + DCHECK((object_regs & ~kJSCallerSaved) == 0); + DCHECK((non_object_regs & ~kJSCallerSaved) == 0); + DCHECK((object_regs & non_object_regs) == 0); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((object_regs & (1 << r)) != 0) { + __ push(reg); + } + if ((non_object_regs & (1 << r)) != 0) { + if (FLAG_debug_code) { + __ test(reg, Immediate(0xc0000000)); + __ Assert(zero, kUnableToEncodeValueAsSmi); + } + __ SmiTag(reg); + __ push(reg); + } + } + +#ifdef DEBUG + __ RecordComment("// Calling from debug break to runtime - come in - over"); +#endif + __ Move(eax, Immediate(0)); // No arguments. + __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate()))); + + CEntryStub ceb(masm->isolate(), 1); + __ CallStub(&ceb); + + // Automatically find register that could be used after register restore. + // We need one register for padding skip instructions. + Register unused_reg = { -1 }; + + // Restore the register values containing object pointers from the + // expression stack. + for (int i = kNumJSCallerSaved; --i >= 0;) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if (FLAG_debug_code) { + __ Move(reg, Immediate(kDebugZapValue)); + } + bool taken = reg.code() == esi.code(); + if ((object_regs & (1 << r)) != 0) { + __ pop(reg); + taken = true; + } + if ((non_object_regs & (1 << r)) != 0) { + __ pop(reg); + __ SmiUntag(reg); + taken = true; + } + if (!taken) { + unused_reg = reg; + } + } + + DCHECK(unused_reg.code() != -1); + + // Read current padding counter and skip corresponding number of words. + __ pop(unused_reg); + // We divide stored value by 2 (untagging) and multiply it by word's size. + STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0); + __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0)); + + // Get rid of the internal frame. + } + + // If this call did not replace a call but patched other code then there will + // be an unwanted return address left on the stack. Here we get rid of that. + if (convert_call_to_jmp) { + __ add(esp, Immediate(kPointerSize)); + } + + // Now that the break point has been handled, resume normal execution by + // jumping to the target address intended by the caller and that was + // overwritten by the address of DebugBreakXXX. + ExternalReference after_break_target = + ExternalReference::debug_after_break_target_address(masm->isolate()); + __ jmp(Operand::StaticVariable(after_break_target)); +} + + +void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) { + // Register state for CallICStub + // ----------- S t a t e ------------- + // -- edx : type feedback slot (smi) + // -- edi : function + // ----------------------------------- + Generate_DebugBreakCallHelper(masm, edx.bit() | edi.bit(), + 0, false); +} + + +void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) { + // Register state for IC load call (from ic-x87.cc). + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0, false); +} + + +void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) { + // Register state for IC store call (from ic-x87.cc). + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + Register value = StoreIC::ValueRegister(); + Generate_DebugBreakCallHelper( + masm, receiver.bit() | name.bit() | value.bit(), 0, false); +} + + +void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { + // Register state for keyed IC load call (from ic-x87.cc). + GenerateLoadICDebugBreak(masm); +} + + +void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { + // Register state for keyed IC store call (from ic-x87.cc). + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register name = KeyedStoreIC::NameRegister(); + Register value = KeyedStoreIC::ValueRegister(); + Generate_DebugBreakCallHelper( + masm, receiver.bit() | name.bit() | value.bit(), 0, false); +} + + +void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { + // Register state for CompareNil IC + // ----------- S t a t e ------------- + // -- eax : value + // ----------------------------------- + Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false); +} + + +void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) { + // Register state just before return from JS function (from codegen-x87.cc). + // ----------- S t a t e ------------- + // -- eax: return value + // ----------------------------------- + Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true); +} + + +void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { + // Register state for CallFunctionStub (from code-stubs-x87.cc). + // ----------- S t a t e ------------- + // -- edi: function + // ----------------------------------- + Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false); +} + + +void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { + // Register state for CallConstructStub (from code-stubs-x87.cc). + // eax is the actual number of arguments not encoded as a smi see comment + // above IC call. + // ----------- S t a t e ------------- + // -- eax: number of arguments (not smi) + // -- edi: constructor function + // ----------------------------------- + // The number of arguments in eax is not smi encoded. + Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false); +} + + +void DebugCodegen::GenerateCallConstructStubRecordDebugBreak( + MacroAssembler* masm) { + // Register state for CallConstructStub (from code-stubs-x87.cc). + // eax is the actual number of arguments not encoded as a smi see comment + // above IC call. + // ----------- S t a t e ------------- + // -- eax: number of arguments (not smi) + // -- ebx: feedback array + // -- edx: feedback slot (smi) + // -- edi: constructor function + // ----------------------------------- + // The number of arguments in eax is not smi encoded. + Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(), + eax.bit(), false); +} + + +void DebugCodegen::GenerateSlot(MacroAssembler* masm) { + // Generate enough nop's to make space for a call instruction. + Label check_codesize; + __ bind(&check_codesize); + __ RecordDebugBreakSlot(); + __ Nop(Assembler::kDebugBreakSlotLength); + DCHECK_EQ(Assembler::kDebugBreakSlotLength, + masm->SizeOfCodeGeneratedSince(&check_codesize)); +} + + +void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) { + // In the places where a debug break slot is inserted no registers can contain + // object pointers. + Generate_DebugBreakCallHelper(masm, 0, 0, true); +} + + +void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { + masm->ret(0); +} + + +void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { + ExternalReference restarter_frame_function_slot = + ExternalReference::debug_restarter_frame_function_pointer_address( + masm->isolate()); + __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0)); + + // We do not know our frame height, but set esp based on ebp. + __ lea(esp, Operand(ebp, -1 * kPointerSize)); + + __ pop(edi); // Function. + __ pop(ebp); + + // Load context from the function. + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + + // Get function code. + __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); + __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); + + // Re-run JSFunction, edi is function, esi is context. + __ jmp(edx); +} + + +const bool LiveEdit::kFrameDropperSupported = true; + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/deoptimizer-x87.cc nodejs-0.11.15/deps/v8/src/x87/deoptimizer-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/deoptimizer-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/deoptimizer-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,403 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/full-codegen.h" +#include "src/safepoint-table.h" + +namespace v8 { +namespace internal { + +const int Deoptimizer::table_entry_size_ = 10; + + +int Deoptimizer::patch_size() { + return Assembler::kCallInstructionLength; +} + + +void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { + Isolate* isolate = code->GetIsolate(); + HandleScope scope(isolate); + + // Compute the size of relocation information needed for the code + // patching in Deoptimizer::DeoptimizeFunction. + int min_reloc_size = 0; + int prev_pc_offset = 0; + DeoptimizationInputData* deopt_data = + DeoptimizationInputData::cast(code->deoptimization_data()); + for (int i = 0; i < deopt_data->DeoptCount(); i++) { + int pc_offset = deopt_data->Pc(i)->value(); + if (pc_offset == -1) continue; + DCHECK_GE(pc_offset, prev_pc_offset); + int pc_delta = pc_offset - prev_pc_offset; + // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes + // if encodable with small pc delta encoding and up to 6 bytes + // otherwise. + if (pc_delta <= RelocInfo::kMaxSmallPCDelta) { + min_reloc_size += 2; + } else { + min_reloc_size += 6; + } + prev_pc_offset = pc_offset; + } + + // If the relocation information is not big enough we create a new + // relocation info object that is padded with comments to make it + // big enough for lazy doptimization. + int reloc_length = code->relocation_info()->length(); + if (min_reloc_size > reloc_length) { + int comment_reloc_size = RelocInfo::kMinRelocCommentSize; + // Padding needed. + int min_padding = min_reloc_size - reloc_length; + // Number of comments needed to take up at least that much space. + int additional_comments = + (min_padding + comment_reloc_size - 1) / comment_reloc_size; + // Actual padding size. + int padding = additional_comments * comment_reloc_size; + // Allocate new relocation info and copy old relocation to the end + // of the new relocation info array because relocation info is + // written and read backwards. + Factory* factory = isolate->factory(); + Handle<ByteArray> new_reloc = + factory->NewByteArray(reloc_length + padding, TENURED); + MemCopy(new_reloc->GetDataStartAddress() + padding, + code->relocation_info()->GetDataStartAddress(), reloc_length); + // Create a relocation writer to write the comments in the padding + // space. Use position 0 for everything to ensure short encoding. + RelocInfoWriter reloc_info_writer( + new_reloc->GetDataStartAddress() + padding, 0); + intptr_t comment_string + = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString); + RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL); + for (int i = 0; i < additional_comments; ++i) { +#ifdef DEBUG + byte* pos_before = reloc_info_writer.pos(); +#endif + reloc_info_writer.Write(&rinfo); + DCHECK(RelocInfo::kMinRelocCommentSize == + pos_before - reloc_info_writer.pos()); + } + // Replace relocation information on the code object. + code->set_relocation_info(*new_reloc); + } +} + + +void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { + Address code_start_address = code->instruction_start(); + + if (FLAG_zap_code_space) { + // Fail hard and early if we enter this code object again. + byte* pointer = code->FindCodeAgeSequence(); + if (pointer != NULL) { + pointer += kNoCodeAgeSequenceLength; + } else { + pointer = code->instruction_start(); + } + CodePatcher patcher(pointer, 1); + patcher.masm()->int3(); + + DeoptimizationInputData* data = + DeoptimizationInputData::cast(code->deoptimization_data()); + int osr_offset = data->OsrPcOffset()->value(); + if (osr_offset > 0) { + CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1); + osr_patcher.masm()->int3(); + } + } + + // We will overwrite the code's relocation info in-place. Relocation info + // is written backward. The relocation info is the payload of a byte + // array. Later on we will slide this to the start of the byte array and + // create a filler object in the remaining space. + ByteArray* reloc_info = code->relocation_info(); + Address reloc_end_address = reloc_info->address() + reloc_info->Size(); + RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address); + + // Since the call is a relative encoding, write new + // reloc info. We do not need any of the existing reloc info because the + // existing code will not be used again (we zap it in debug builds). + // + // Emit call to lazy deoptimization at all lazy deopt points. + DeoptimizationInputData* deopt_data = + DeoptimizationInputData::cast(code->deoptimization_data()); +#ifdef DEBUG + Address prev_call_address = NULL; +#endif + // For each LLazyBailout instruction insert a call to the corresponding + // deoptimization entry. + for (int i = 0; i < deopt_data->DeoptCount(); i++) { + if (deopt_data->Pc(i)->value() == -1) continue; + // Patch lazy deoptimization entry. + Address call_address = code_start_address + deopt_data->Pc(i)->value(); + CodePatcher patcher(call_address, patch_size()); + Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); + patcher.masm()->call(deopt_entry, RelocInfo::NONE32); + // We use RUNTIME_ENTRY for deoptimization bailouts. + RelocInfo rinfo(call_address + 1, // 1 after the call opcode. + RelocInfo::RUNTIME_ENTRY, + reinterpret_cast<intptr_t>(deopt_entry), + NULL); + reloc_info_writer.Write(&rinfo); + DCHECK_GE(reloc_info_writer.pos(), + reloc_info->address() + ByteArray::kHeaderSize); + DCHECK(prev_call_address == NULL || + call_address >= prev_call_address + patch_size()); + DCHECK(call_address + patch_size() <= code->instruction_end()); +#ifdef DEBUG + prev_call_address = call_address; +#endif + } + + // Move the relocation info to the beginning of the byte array. + int new_reloc_size = reloc_end_address - reloc_info_writer.pos(); + MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size); + + // The relocation info is in place, update the size. + reloc_info->set_length(new_reloc_size); + + // Handle the junk part after the new relocation info. We will create + // a non-live object in the extra space at the end of the former reloc info. + Address junk_address = reloc_info->address() + reloc_info->Size(); + DCHECK(junk_address <= reloc_end_address); + isolate->heap()->CreateFillerObjectAt(junk_address, + reloc_end_address - junk_address); +} + + +void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { + // Set the register values. The values are not important as there are no + // callee saved registers in JavaScript frames, so all registers are + // spilled. Registers ebp and esp are set to the correct values though. + + for (int i = 0; i < Register::kNumRegisters; i++) { + input_->SetRegister(i, i * 4); + } + input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp())); + input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp())); + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { + input_->SetDoubleRegister(i, 0.0); + } + + // Fill the frame content from the actual data on the frame. + for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { + input_->SetFrameSlot(i, Memory::uint32_at(tos + i)); + } +} + + +void Deoptimizer::SetPlatformCompiledStubRegisters( + FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { + intptr_t handler = + reinterpret_cast<intptr_t>(descriptor->deoptimization_handler()); + int params = descriptor->GetHandlerParameterCount(); + output_frame->SetRegister(eax.code(), params); + output_frame->SetRegister(ebx.code(), handler); +} + + +void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { + // Do nothing for X87. + return; +} + + +bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { + int parameter_count = function->shared()->formal_parameter_count() + 1; + unsigned input_frame_size = input_->GetFrameSize(); + unsigned alignment_state_offset = + input_frame_size - parameter_count * kPointerSize - + StandardFrameConstants::kFixedFrameSize - + kPointerSize; + DCHECK(JavaScriptFrameConstants::kDynamicAlignmentStateOffset == + JavaScriptFrameConstants::kLocal0Offset); + int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset); + return (alignment_state == kAlignmentPaddingPushed); +} + + +#define __ masm()-> + +void Deoptimizer::EntryGenerator::Generate() { + GeneratePrologue(); + + // Save all general purpose registers before messing with them. + const int kNumberOfRegisters = Register::kNumRegisters; + __ pushad(); + + const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize; + + // Get the bailout id from the stack. + __ mov(ebx, Operand(esp, kSavedRegistersAreaSize)); + + // Get the address of the location in the code object + // and compute the fp-to-sp delta in register edx. + __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); + __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize)); + + __ sub(edx, ebp); + __ neg(edx); + + // Allocate a new deoptimizer object. + __ PrepareCallCFunction(6, eax); + __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ mov(Operand(esp, 0 * kPointerSize), eax); // Function. + __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type. + __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id. + __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0. + __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta. + __ mov(Operand(esp, 5 * kPointerSize), + Immediate(ExternalReference::isolate_address(isolate()))); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); + } + + // Preserve deoptimizer object in register eax and get the input + // frame descriptor pointer. + __ mov(ebx, Operand(eax, Deoptimizer::input_offset())); + + // Fill in the input registers. + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + __ pop(Operand(ebx, offset)); + } + + // Clear FPU all exceptions. + // TODO(ulan): Find out why the TOP register is not zero here in some cases, + // and check that the generated code never deoptimizes with unbalanced stack. + __ fnclex(); + + // Remove the bailout id, return address and the double registers. + __ add(esp, Immediate(2 * kPointerSize)); + + // Compute a pointer to the unwinding limit in register ecx; that is + // the first stack slot not part of the input frame. + __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); + __ add(ecx, esp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset())); + Label pop_loop_header; + __ jmp(&pop_loop_header); + Label pop_loop; + __ bind(&pop_loop); + __ pop(Operand(edx, 0)); + __ add(edx, Immediate(sizeof(uint32_t))); + __ bind(&pop_loop_header); + __ cmp(ecx, esp); + __ j(not_equal, &pop_loop); + + // Compute the output frame in the deoptimizer. + __ push(eax); + __ PrepareCallCFunction(1, ebx); + __ mov(Operand(esp, 0 * kPointerSize), eax); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction( + ExternalReference::compute_output_frames_function(isolate()), 1); + } + __ pop(eax); + + // If frame was dynamically aligned, pop padding. + Label no_padding; + __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()), + Immediate(0)); + __ j(equal, &no_padding); + __ pop(ecx); + if (FLAG_debug_code) { + __ cmp(ecx, Immediate(kAlignmentZapValue)); + __ Assert(equal, kAlignmentMarkerExpected); + } + __ bind(&no_padding); + + // Replace the current frame with the output frames. + Label outer_push_loop, inner_push_loop, + outer_loop_header, inner_loop_header; + // Outer loop state: eax = current FrameDescription**, edx = one past the + // last FrameDescription**. + __ mov(edx, Operand(eax, Deoptimizer::output_count_offset())); + __ mov(eax, Operand(eax, Deoptimizer::output_offset())); + __ lea(edx, Operand(eax, edx, times_4, 0)); + __ jmp(&outer_loop_header); + __ bind(&outer_push_loop); + // Inner loop state: ebx = current FrameDescription*, ecx = loop index. + __ mov(ebx, Operand(eax, 0)); + __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); + __ jmp(&inner_loop_header); + __ bind(&inner_push_loop); + __ sub(ecx, Immediate(sizeof(uint32_t))); + __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset())); + __ bind(&inner_loop_header); + __ test(ecx, ecx); + __ j(not_zero, &inner_push_loop); + __ add(eax, Immediate(kPointerSize)); + __ bind(&outer_loop_header); + __ cmp(eax, edx); + __ j(below, &outer_push_loop); + + // Push state, pc, and continuation from the last output frame. + __ push(Operand(ebx, FrameDescription::state_offset())); + __ push(Operand(ebx, FrameDescription::pc_offset())); + __ push(Operand(ebx, FrameDescription::continuation_offset())); + + + // Push the registers from the last output frame. + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + __ push(Operand(ebx, offset)); + } + + // Restore the registers from the stack. + __ popad(); + + // Return to the continuation point. + __ ret(0); +} + + +void Deoptimizer::TableEntryGenerator::GeneratePrologue() { + // Create a sequence of deoptimization entries. + Label done; + for (int i = 0; i < count(); i++) { + int start = masm()->pc_offset(); + USE(start); + __ push_imm32(i); + __ jmp(&done); + DCHECK(masm()->pc_offset() - start == table_entry_size_); + } + __ bind(&done); +} + + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + + +void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { + // No out-of-line constant pool support. + UNREACHABLE(); +} + + +#undef __ + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/disasm-x87.cc nodejs-0.11.15/deps/v8/src/x87/disasm-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/disasm-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/disasm-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1775 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <assert.h> +#include <stdarg.h> +#include <stdio.h> + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/disasm.h" + +namespace disasm { + +enum OperandOrder { + UNSET_OP_ORDER = 0, + REG_OPER_OP_ORDER, + OPER_REG_OP_ORDER +}; + + +//------------------------------------------------------------------ +// Tables +//------------------------------------------------------------------ +struct ByteMnemonic { + int b; // -1 terminates, otherwise must be in range (0..255) + const char* mnem; + OperandOrder op_order_; +}; + + +static const ByteMnemonic two_operands_instr[] = { + {0x01, "add", OPER_REG_OP_ORDER}, + {0x03, "add", REG_OPER_OP_ORDER}, + {0x09, "or", OPER_REG_OP_ORDER}, + {0x0B, "or", REG_OPER_OP_ORDER}, + {0x1B, "sbb", REG_OPER_OP_ORDER}, + {0x21, "and", OPER_REG_OP_ORDER}, + {0x23, "and", REG_OPER_OP_ORDER}, + {0x29, "sub", OPER_REG_OP_ORDER}, + {0x2A, "subb", REG_OPER_OP_ORDER}, + {0x2B, "sub", REG_OPER_OP_ORDER}, + {0x31, "xor", OPER_REG_OP_ORDER}, + {0x33, "xor", REG_OPER_OP_ORDER}, + {0x38, "cmpb", OPER_REG_OP_ORDER}, + {0x3A, "cmpb", REG_OPER_OP_ORDER}, + {0x3B, "cmp", REG_OPER_OP_ORDER}, + {0x84, "test_b", REG_OPER_OP_ORDER}, + {0x85, "test", REG_OPER_OP_ORDER}, + {0x87, "xchg", REG_OPER_OP_ORDER}, + {0x8A, "mov_b", REG_OPER_OP_ORDER}, + {0x8B, "mov", REG_OPER_OP_ORDER}, + {0x8D, "lea", REG_OPER_OP_ORDER}, + {-1, "", UNSET_OP_ORDER} +}; + + +static const ByteMnemonic zero_operands_instr[] = { + {0xC3, "ret", UNSET_OP_ORDER}, + {0xC9, "leave", UNSET_OP_ORDER}, + {0x90, "nop", UNSET_OP_ORDER}, + {0xF4, "hlt", UNSET_OP_ORDER}, + {0xCC, "int3", UNSET_OP_ORDER}, + {0x60, "pushad", UNSET_OP_ORDER}, + {0x61, "popad", UNSET_OP_ORDER}, + {0x9C, "pushfd", UNSET_OP_ORDER}, + {0x9D, "popfd", UNSET_OP_ORDER}, + {0x9E, "sahf", UNSET_OP_ORDER}, + {0x99, "cdq", UNSET_OP_ORDER}, + {0x9B, "fwait", UNSET_OP_ORDER}, + {0xFC, "cld", UNSET_OP_ORDER}, + {0xAB, "stos", UNSET_OP_ORDER}, + {-1, "", UNSET_OP_ORDER} +}; + + +static const ByteMnemonic call_jump_instr[] = { + {0xE8, "call", UNSET_OP_ORDER}, + {0xE9, "jmp", UNSET_OP_ORDER}, + {-1, "", UNSET_OP_ORDER} +}; + + +static const ByteMnemonic short_immediate_instr[] = { + {0x05, "add", UNSET_OP_ORDER}, + {0x0D, "or", UNSET_OP_ORDER}, + {0x15, "adc", UNSET_OP_ORDER}, + {0x25, "and", UNSET_OP_ORDER}, + {0x2D, "sub", UNSET_OP_ORDER}, + {0x35, "xor", UNSET_OP_ORDER}, + {0x3D, "cmp", UNSET_OP_ORDER}, + {-1, "", UNSET_OP_ORDER} +}; + + +// Generally we don't want to generate these because they are subject to partial +// register stalls. They are included for completeness and because the cmp +// variant is used by the RecordWrite stub. Because it does not update the +// register it is not subject to partial register stalls. +static ByteMnemonic byte_immediate_instr[] = { + {0x0c, "or", UNSET_OP_ORDER}, + {0x24, "and", UNSET_OP_ORDER}, + {0x34, "xor", UNSET_OP_ORDER}, + {0x3c, "cmp", UNSET_OP_ORDER}, + {-1, "", UNSET_OP_ORDER} +}; + + +static const char* const jump_conditional_mnem[] = { + /*0*/ "jo", "jno", "jc", "jnc", + /*4*/ "jz", "jnz", "jna", "ja", + /*8*/ "js", "jns", "jpe", "jpo", + /*12*/ "jl", "jnl", "jng", "jg" +}; + + +static const char* const set_conditional_mnem[] = { + /*0*/ "seto", "setno", "setc", "setnc", + /*4*/ "setz", "setnz", "setna", "seta", + /*8*/ "sets", "setns", "setpe", "setpo", + /*12*/ "setl", "setnl", "setng", "setg" +}; + + +static const char* const conditional_move_mnem[] = { + /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc", + /*4*/ "cmovz", "cmovnz", "cmovna", "cmova", + /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo", + /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg" +}; + + +enum InstructionType { + NO_INSTR, + ZERO_OPERANDS_INSTR, + TWO_OPERANDS_INSTR, + JUMP_CONDITIONAL_SHORT_INSTR, + REGISTER_INSTR, + MOVE_REG_INSTR, + CALL_JUMP_INSTR, + SHORT_IMMEDIATE_INSTR, + BYTE_IMMEDIATE_INSTR +}; + + +struct InstructionDesc { + const char* mnem; + InstructionType type; + OperandOrder op_order_; +}; + + +class InstructionTable { + public: + InstructionTable(); + const InstructionDesc& Get(byte x) const { return instructions_[x]; } + static InstructionTable* get_instance() { + static InstructionTable table; + return &table; + } + + private: + InstructionDesc instructions_[256]; + void Clear(); + void Init(); + void CopyTable(const ByteMnemonic bm[], InstructionType type); + void SetTableRange(InstructionType type, + byte start, + byte end, + const char* mnem); + void AddJumpConditionalShort(); +}; + + +InstructionTable::InstructionTable() { + Clear(); + Init(); +} + + +void InstructionTable::Clear() { + for (int i = 0; i < 256; i++) { + instructions_[i].mnem = ""; + instructions_[i].type = NO_INSTR; + instructions_[i].op_order_ = UNSET_OP_ORDER; + } +} + + +void InstructionTable::Init() { + CopyTable(two_operands_instr, TWO_OPERANDS_INSTR); + CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR); + CopyTable(call_jump_instr, CALL_JUMP_INSTR); + CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR); + CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR); + AddJumpConditionalShort(); + SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc"); + SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec"); + SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push"); + SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop"); + SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,"); // 0x90 is nop. + SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov"); +} + + +void InstructionTable::CopyTable(const ByteMnemonic bm[], + InstructionType type) { + for (int i = 0; bm[i].b >= 0; i++) { + InstructionDesc* id = &instructions_[bm[i].b]; + id->mnem = bm[i].mnem; + id->op_order_ = bm[i].op_order_; + DCHECK_EQ(NO_INSTR, id->type); // Information not already entered. + id->type = type; + } +} + + +void InstructionTable::SetTableRange(InstructionType type, + byte start, + byte end, + const char* mnem) { + for (byte b = start; b <= end; b++) { + InstructionDesc* id = &instructions_[b]; + DCHECK_EQ(NO_INSTR, id->type); // Information not already entered. + id->mnem = mnem; + id->type = type; + } +} + + +void InstructionTable::AddJumpConditionalShort() { + for (byte b = 0x70; b <= 0x7F; b++) { + InstructionDesc* id = &instructions_[b]; + DCHECK_EQ(NO_INSTR, id->type); // Information not already entered. + id->mnem = jump_conditional_mnem[b & 0x0F]; + id->type = JUMP_CONDITIONAL_SHORT_INSTR; + } +} + + +// The X87 disassembler implementation. +class DisassemblerX87 { + public: + DisassemblerX87(const NameConverter& converter, + bool abort_on_unimplemented = true) + : converter_(converter), + instruction_table_(InstructionTable::get_instance()), + tmp_buffer_pos_(0), + abort_on_unimplemented_(abort_on_unimplemented) { + tmp_buffer_[0] = '\0'; + } + + virtual ~DisassemblerX87() {} + + // Writes one disassembled instruction into 'buffer' (0-terminated). + // Returns the length of the disassembled machine instruction in bytes. + int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction); + + private: + const NameConverter& converter_; + InstructionTable* instruction_table_; + v8::internal::EmbeddedVector<char, 128> tmp_buffer_; + unsigned int tmp_buffer_pos_; + bool abort_on_unimplemented_; + + enum { + eax = 0, + ecx = 1, + edx = 2, + ebx = 3, + esp = 4, + ebp = 5, + esi = 6, + edi = 7 + }; + + + enum ShiftOpcodeExtension { + kROL = 0, + kROR = 1, + kRCL = 2, + kRCR = 3, + kSHL = 4, + KSHR = 5, + kSAR = 7 + }; + + + const char* NameOfCPURegister(int reg) const { + return converter_.NameOfCPURegister(reg); + } + + + const char* NameOfByteCPURegister(int reg) const { + return converter_.NameOfByteCPURegister(reg); + } + + + const char* NameOfXMMRegister(int reg) const { + return converter_.NameOfXMMRegister(reg); + } + + + const char* NameOfAddress(byte* addr) const { + return converter_.NameOfAddress(addr); + } + + + // Disassembler helper functions. + static void get_modrm(byte data, int* mod, int* regop, int* rm) { + *mod = (data >> 6) & 3; + *regop = (data & 0x38) >> 3; + *rm = data & 7; + } + + + static void get_sib(byte data, int* scale, int* index, int* base) { + *scale = (data >> 6) & 3; + *index = (data >> 3) & 7; + *base = data & 7; + } + + typedef const char* (DisassemblerX87::*RegisterNameMapping)(int reg) const; + + int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name); + int PrintRightOperand(byte* modrmp); + int PrintRightByteOperand(byte* modrmp); + int PrintRightXMMOperand(byte* modrmp); + int PrintOperands(const char* mnem, OperandOrder op_order, byte* data); + int PrintImmediateOp(byte* data); + int F7Instruction(byte* data); + int D1D3C1Instruction(byte* data); + int JumpShort(byte* data); + int JumpConditional(byte* data, const char* comment); + int JumpConditionalShort(byte* data, const char* comment); + int SetCC(byte* data); + int CMov(byte* data); + int FPUInstruction(byte* data); + int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start); + int RegisterFPUInstruction(int escape_opcode, byte modrm_byte); + void AppendToBuffer(const char* format, ...); + + + void UnimplementedInstruction() { + if (abort_on_unimplemented_) { + UNIMPLEMENTED(); + } else { + AppendToBuffer("'Unimplemented Instruction'"); + } + } +}; + + +void DisassemblerX87::AppendToBuffer(const char* format, ...) { + v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_; + va_list args; + va_start(args, format); + int result = v8::internal::VSNPrintF(buf, format, args); + va_end(args); + tmp_buffer_pos_ += result; +} + +int DisassemblerX87::PrintRightOperandHelper( + byte* modrmp, + RegisterNameMapping direct_register_name) { + int mod, regop, rm; + get_modrm(*modrmp, &mod, ®op, &rm); + RegisterNameMapping register_name = (mod == 3) ? direct_register_name : + &DisassemblerX87::NameOfCPURegister; + switch (mod) { + case 0: + if (rm == ebp) { + int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1); + AppendToBuffer("[0x%x]", disp); + return 5; + } else if (rm == esp) { + byte sib = *(modrmp + 1); + int scale, index, base; + get_sib(sib, &scale, &index, &base); + if (index == esp && base == esp && scale == 0 /*times_1*/) { + AppendToBuffer("[%s]", (this->*register_name)(rm)); + return 2; + } else if (base == ebp) { + int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2); + AppendToBuffer("[%s*%d%s0x%x]", + (this->*register_name)(index), + 1 << scale, + disp < 0 ? "-" : "+", + disp < 0 ? -disp : disp); + return 6; + } else if (index != esp && base != ebp) { + // [base+index*scale] + AppendToBuffer("[%s+%s*%d]", + (this->*register_name)(base), + (this->*register_name)(index), + 1 << scale); + return 2; + } else { + UnimplementedInstruction(); + return 1; + } + } else { + AppendToBuffer("[%s]", (this->*register_name)(rm)); + return 1; + } + break; + case 1: // fall through + case 2: + if (rm == esp) { + byte sib = *(modrmp + 1); + int scale, index, base; + get_sib(sib, &scale, &index, &base); + int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) + : *reinterpret_cast<int8_t*>(modrmp + 2); + if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) { + AppendToBuffer("[%s%s0x%x]", + (this->*register_name)(rm), + disp < 0 ? "-" : "+", + disp < 0 ? -disp : disp); + } else { + AppendToBuffer("[%s+%s*%d%s0x%x]", + (this->*register_name)(base), + (this->*register_name)(index), + 1 << scale, + disp < 0 ? "-" : "+", + disp < 0 ? -disp : disp); + } + return mod == 2 ? 6 : 3; + } else { + // No sib. + int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) + : *reinterpret_cast<int8_t*>(modrmp + 1); + AppendToBuffer("[%s%s0x%x]", + (this->*register_name)(rm), + disp < 0 ? "-" : "+", + disp < 0 ? -disp : disp); + return mod == 2 ? 5 : 2; + } + break; + case 3: + AppendToBuffer("%s", (this->*register_name)(rm)); + return 1; + default: + UnimplementedInstruction(); + return 1; + } + UNREACHABLE(); +} + + +int DisassemblerX87::PrintRightOperand(byte* modrmp) { + return PrintRightOperandHelper(modrmp, &DisassemblerX87::NameOfCPURegister); +} + + +int DisassemblerX87::PrintRightByteOperand(byte* modrmp) { + return PrintRightOperandHelper(modrmp, + &DisassemblerX87::NameOfByteCPURegister); +} + + +int DisassemblerX87::PrintRightXMMOperand(byte* modrmp) { + return PrintRightOperandHelper(modrmp, + &DisassemblerX87::NameOfXMMRegister); +} + + +// Returns number of bytes used including the current *data. +// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'. +int DisassemblerX87::PrintOperands(const char* mnem, + OperandOrder op_order, + byte* data) { + byte modrm = *data; + int mod, regop, rm; + get_modrm(modrm, &mod, ®op, &rm); + int advance = 0; + switch (op_order) { + case REG_OPER_OP_ORDER: { + AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop)); + advance = PrintRightOperand(data); + break; + } + case OPER_REG_OP_ORDER: { + AppendToBuffer("%s ", mnem); + advance = PrintRightOperand(data); + AppendToBuffer(",%s", NameOfCPURegister(regop)); + break; + } + default: + UNREACHABLE(); + break; + } + return advance; +} + + +// Returns number of bytes used by machine instruction, including *data byte. +// Writes immediate instructions to 'tmp_buffer_'. +int DisassemblerX87::PrintImmediateOp(byte* data) { + bool sign_extension_bit = (*data & 0x02) != 0; + byte modrm = *(data+1); + int mod, regop, rm; + get_modrm(modrm, &mod, ®op, &rm); + const char* mnem = "Imm???"; + switch (regop) { + case 0: mnem = "add"; break; + case 1: mnem = "or"; break; + case 2: mnem = "adc"; break; + case 4: mnem = "and"; break; + case 5: mnem = "sub"; break; + case 6: mnem = "xor"; break; + case 7: mnem = "cmp"; break; + default: UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(data+1); + if (sign_extension_bit) { + AppendToBuffer(",0x%x", *(data + 1 + count)); + return 1 + count + 1 /*int8*/; + } else { + AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count)); + return 1 + count + 4 /*int32_t*/; + } +} + + +// Returns number of bytes used, including *data. +int DisassemblerX87::F7Instruction(byte* data) { + DCHECK_EQ(0xF7, *data); + byte modrm = *++data; + int mod, regop, rm; + get_modrm(modrm, &mod, ®op, &rm); + const char* mnem = NULL; + switch (regop) { + case 0: + mnem = "test"; + break; + case 2: + mnem = "not"; + break; + case 3: + mnem = "neg"; + break; + case 4: + mnem = "mul"; + break; + case 5: + mnem = "imul"; + break; + case 6: + mnem = "div"; + break; + case 7: + mnem = "idiv"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(data); + if (regop == 0) { + AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + count)); + count += 4; + } + return 1 + count; +} + + +int DisassemblerX87::D1D3C1Instruction(byte* data) { + byte op = *data; + DCHECK(op == 0xD1 || op == 0xD3 || op == 0xC1); + byte modrm = *++data; + int mod, regop, rm; + get_modrm(modrm, &mod, ®op, &rm); + int imm8 = -1; + const char* mnem = NULL; + switch (regop) { + case kROL: + mnem = "rol"; + break; + case kROR: + mnem = "ror"; + break; + case kRCL: + mnem = "rcl"; + break; + case kRCR: + mnem = "rcr"; + break; + case kSHL: + mnem = "shl"; + break; + case KSHR: + mnem = "shr"; + break; + case kSAR: + mnem = "sar"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(data); + if (op == 0xD1) { + imm8 = 1; + } else if (op == 0xC1) { + imm8 = *(data + 1); + count++; + } else if (op == 0xD3) { + // Shift/rotate by cl. + } + if (imm8 >= 0) { + AppendToBuffer(",%d", imm8); + } else { + AppendToBuffer(",cl"); + } + return 1 + count; +} + + +// Returns number of bytes used, including *data. +int DisassemblerX87::JumpShort(byte* data) { + DCHECK_EQ(0xEB, *data); + byte b = *(data+1); + byte* dest = data + static_cast<int8_t>(b) + 2; + AppendToBuffer("jmp %s", NameOfAddress(dest)); + return 2; +} + + +// Returns number of bytes used, including *data. +int DisassemblerX87::JumpConditional(byte* data, const char* comment) { + DCHECK_EQ(0x0F, *data); + byte cond = *(data+1) & 0x0F; + byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6; + const char* mnem = jump_conditional_mnem[cond]; + AppendToBuffer("%s %s", mnem, NameOfAddress(dest)); + if (comment != NULL) { + AppendToBuffer(", %s", comment); + } + return 6; // includes 0x0F +} + + +// Returns number of bytes used, including *data. +int DisassemblerX87::JumpConditionalShort(byte* data, const char* comment) { + byte cond = *data & 0x0F; + byte b = *(data+1); + byte* dest = data + static_cast<int8_t>(b) + 2; + const char* mnem = jump_conditional_mnem[cond]; + AppendToBuffer("%s %s", mnem, NameOfAddress(dest)); + if (comment != NULL) { + AppendToBuffer(", %s", comment); + } + return 2; +} + + +// Returns number of bytes used, including *data. +int DisassemblerX87::SetCC(byte* data) { + DCHECK_EQ(0x0F, *data); + byte cond = *(data+1) & 0x0F; + const char* mnem = set_conditional_mnem[cond]; + AppendToBuffer("%s ", mnem); + PrintRightByteOperand(data+2); + return 3; // Includes 0x0F. +} + + +// Returns number of bytes used, including *data. +int DisassemblerX87::CMov(byte* data) { + DCHECK_EQ(0x0F, *data); + byte cond = *(data + 1) & 0x0F; + const char* mnem = conditional_move_mnem[cond]; + int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2); + return 2 + op_size; // includes 0x0F +} + + +// Returns number of bytes used, including *data. +int DisassemblerX87::FPUInstruction(byte* data) { + byte escape_opcode = *data; + DCHECK_EQ(0xD8, escape_opcode & 0xF8); + byte modrm_byte = *(data+1); + + if (modrm_byte >= 0xC0) { + return RegisterFPUInstruction(escape_opcode, modrm_byte); + } else { + return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1); + } +} + +int DisassemblerX87::MemoryFPUInstruction(int escape_opcode, + int modrm_byte, + byte* modrm_start) { + const char* mnem = "?"; + int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte. + switch (escape_opcode) { + case 0xD9: switch (regop) { + case 0: mnem = "fld_s"; break; + case 2: mnem = "fst_s"; break; + case 3: mnem = "fstp_s"; break; + case 7: mnem = "fstcw"; break; + default: UnimplementedInstruction(); + } + break; + + case 0xDB: switch (regop) { + case 0: mnem = "fild_s"; break; + case 1: mnem = "fisttp_s"; break; + case 2: mnem = "fist_s"; break; + case 3: mnem = "fistp_s"; break; + default: UnimplementedInstruction(); + } + break; + + case 0xDD: switch (regop) { + case 0: mnem = "fld_d"; break; + case 1: mnem = "fisttp_d"; break; + case 2: mnem = "fst_d"; break; + case 3: mnem = "fstp_d"; break; + default: UnimplementedInstruction(); + } + break; + + case 0xDF: switch (regop) { + case 5: mnem = "fild_d"; break; + case 7: mnem = "fistp_d"; break; + default: UnimplementedInstruction(); + } + break; + + default: UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(modrm_start); + return count + 1; +} + +int DisassemblerX87::RegisterFPUInstruction(int escape_opcode, + byte modrm_byte) { + bool has_register = false; // Is the FPU register encoded in modrm_byte? + const char* mnem = "?"; + + switch (escape_opcode) { + case 0xD8: + has_register = true; + switch (modrm_byte & 0xF8) { + case 0xC0: mnem = "fadd_i"; break; + case 0xE0: mnem = "fsub_i"; break; + case 0xC8: mnem = "fmul_i"; break; + case 0xF0: mnem = "fdiv_i"; break; + default: UnimplementedInstruction(); + } + break; + + case 0xD9: + switch (modrm_byte & 0xF8) { + case 0xC0: + mnem = "fld"; + has_register = true; + break; + case 0xC8: + mnem = "fxch"; + has_register = true; + break; + default: + switch (modrm_byte) { + case 0xE0: mnem = "fchs"; break; + case 0xE1: mnem = "fabs"; break; + case 0xE4: mnem = "ftst"; break; + case 0xE8: mnem = "fld1"; break; + case 0xEB: mnem = "fldpi"; break; + case 0xED: mnem = "fldln2"; break; + case 0xEE: mnem = "fldz"; break; + case 0xF0: mnem = "f2xm1"; break; + case 0xF1: mnem = "fyl2x"; break; + case 0xF4: mnem = "fxtract"; break; + case 0xF5: mnem = "fprem1"; break; + case 0xF7: mnem = "fincstp"; break; + case 0xF8: mnem = "fprem"; break; + case 0xFC: mnem = "frndint"; break; + case 0xFD: mnem = "fscale"; break; + case 0xFE: mnem = "fsin"; break; + case 0xFF: mnem = "fcos"; break; + default: UnimplementedInstruction(); + } + } + break; + + case 0xDA: + if (modrm_byte == 0xE9) { + mnem = "fucompp"; + } else { + UnimplementedInstruction(); + } + break; + + case 0xDB: + if ((modrm_byte & 0xF8) == 0xE8) { + mnem = "fucomi"; + has_register = true; + } else if (modrm_byte == 0xE2) { + mnem = "fclex"; + } else if (modrm_byte == 0xE3) { + mnem = "fninit"; + } else { + UnimplementedInstruction(); + } + break; + + case 0xDC: + has_register = true; + switch (modrm_byte & 0xF8) { + case 0xC0: mnem = "fadd"; break; + case 0xE8: mnem = "fsub"; break; + case 0xC8: mnem = "fmul"; break; + case 0xF8: mnem = "fdiv"; break; + default: UnimplementedInstruction(); + } + break; + + case 0xDD: + has_register = true; + switch (modrm_byte & 0xF8) { + case 0xC0: mnem = "ffree"; break; + case 0xD0: mnem = "fst"; break; + case 0xD8: mnem = "fstp"; break; + default: UnimplementedInstruction(); + } + break; + + case 0xDE: + if (modrm_byte == 0xD9) { + mnem = "fcompp"; + } else { + has_register = true; + switch (modrm_byte & 0xF8) { + case 0xC0: mnem = "faddp"; break; + case 0xE8: mnem = "fsubp"; break; + case 0xC8: mnem = "fmulp"; break; + case 0xF8: mnem = "fdivp"; break; + default: UnimplementedInstruction(); + } + } + break; + + case 0xDF: + if (modrm_byte == 0xE0) { + mnem = "fnstsw_ax"; + } else if ((modrm_byte & 0xF8) == 0xE8) { + mnem = "fucomip"; + has_register = true; + } + break; + + default: UnimplementedInstruction(); + } + + if (has_register) { + AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7); + } else { + AppendToBuffer("%s", mnem); + } + return 2; +} + + +// Mnemonics for instructions 0xF0 byte. +// Returns NULL if the instruction is not handled here. +static const char* F0Mnem(byte f0byte) { + switch (f0byte) { + case 0x18: return "prefetch"; + case 0xA2: return "cpuid"; + case 0xBE: return "movsx_b"; + case 0xBF: return "movsx_w"; + case 0xB6: return "movzx_b"; + case 0xB7: return "movzx_w"; + case 0xAF: return "imul"; + case 0xA5: return "shld"; + case 0xAD: return "shrd"; + case 0xAC: return "shrd"; // 3-operand version. + case 0xAB: return "bts"; + case 0xBD: return "bsr"; + default: return NULL; + } +} + + +// Disassembled instruction '*instr' and writes it into 'out_buffer'. +int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer, + byte* instr) { + tmp_buffer_pos_ = 0; // starting to write as position 0 + byte* data = instr; + // Check for hints. + const char* branch_hint = NULL; + // We use these two prefixes only with branch prediction + if (*data == 0x3E /*ds*/) { + branch_hint = "predicted taken"; + data++; + } else if (*data == 0x2E /*cs*/) { + branch_hint = "predicted not taken"; + data++; + } + bool processed = true; // Will be set to false if the current instruction + // is not in 'instructions' table. + const InstructionDesc& idesc = instruction_table_->Get(*data); + switch (idesc.type) { + case ZERO_OPERANDS_INSTR: + AppendToBuffer(idesc.mnem); + data++; + break; + + case TWO_OPERANDS_INSTR: + data++; + data += PrintOperands(idesc.mnem, idesc.op_order_, data); + break; + + case JUMP_CONDITIONAL_SHORT_INSTR: + data += JumpConditionalShort(data, branch_hint); + break; + + case REGISTER_INSTR: + AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07)); + data++; + break; + + case MOVE_REG_INSTR: { + byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1)); + AppendToBuffer("mov %s,%s", + NameOfCPURegister(*data & 0x07), + NameOfAddress(addr)); + data += 5; + break; + } + + case CALL_JUMP_INSTR: { + byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5; + AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr)); + data += 5; + break; + } + + case SHORT_IMMEDIATE_INSTR: { + byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1)); + AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr)); + data += 5; + break; + } + + case BYTE_IMMEDIATE_INSTR: { + AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]); + data += 2; + break; + } + + case NO_INSTR: + processed = false; + break; + + default: + UNIMPLEMENTED(); // This type is not implemented. + } + //---------------------------- + if (!processed) { + switch (*data) { + case 0xC2: + AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1)); + data += 3; + break; + + case 0x6B: { + data++; + data += PrintOperands("imul", REG_OPER_OP_ORDER, data); + AppendToBuffer(",%d", *data); + data++; + } break; + + case 0x69: { + data++; + data += PrintOperands("imul", REG_OPER_OP_ORDER, data); + AppendToBuffer(",%d", *reinterpret_cast<int32_t*>(data)); + data += 4; + } + break; + + case 0xF6: + { data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + if (regop == eax) { + AppendToBuffer("test_b "); + data += PrintRightByteOperand(data); + int32_t imm = *data; + AppendToBuffer(",0x%x", imm); + data++; + } else { + UnimplementedInstruction(); + } + } + break; + + case 0x81: // fall through + case 0x83: // 0x81 with sign extension bit set + data += PrintImmediateOp(data); + break; + + case 0x0F: + { byte f0byte = data[1]; + const char* f0mnem = F0Mnem(f0byte); + if (f0byte == 0x18) { + data += 2; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + const char* suffix[] = {"nta", "1", "2", "3"}; + AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]); + data += PrintRightOperand(data); + } else if (f0byte == 0x1F && data[2] == 0) { + AppendToBuffer("nop"); // 3 byte nop. + data += 3; + } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) { + AppendToBuffer("nop"); // 4 byte nop. + data += 4; + } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 && + data[4] == 0) { + AppendToBuffer("nop"); // 5 byte nop. + data += 5; + } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 && + data[4] == 0 && data[5] == 0 && data[6] == 0) { + AppendToBuffer("nop"); // 7 byte nop. + data += 7; + } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 && + data[4] == 0 && data[5] == 0 && data[6] == 0 && + data[7] == 0) { + AppendToBuffer("nop"); // 8 byte nop. + data += 8; + } else if (f0byte == 0xA2 || f0byte == 0x31) { + AppendToBuffer("%s", f0mnem); + data += 2; + } else if (f0byte == 0x28) { + data += 2; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movaps %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (f0byte >= 0x53 && f0byte <= 0x5F) { + const char* const pseudo_op[] = { + "rcpps", + "andps", + "andnps", + "orps", + "xorps", + "addps", + "mulps", + "cvtps2pd", + "cvtdq2ps", + "subps", + "minps", + "divps", + "maxps", + }; + + data += 2; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("%s %s,", + pseudo_op[f0byte - 0x53], + NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } else if (f0byte == 0x50) { + data += 2; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movmskps %s,%s", + NameOfCPURegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (f0byte== 0xC6) { + // shufps xmm, xmm/m128, imm8 + data += 2; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + int8_t imm8 = static_cast<int8_t>(data[1]); + AppendToBuffer("shufps %s,%s,%d", + NameOfXMMRegister(rm), + NameOfXMMRegister(regop), + static_cast<int>(imm8)); + data += 2; + } else if ((f0byte & 0xF0) == 0x80) { + data += JumpConditional(data, branch_hint); + } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 || + f0byte == 0xB7 || f0byte == 0xAF) { + data += 2; + data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data); + } else if ((f0byte & 0xF0) == 0x90) { + data += SetCC(data); + } else if ((f0byte & 0xF0) == 0x40) { + data += CMov(data); + } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) { + // shrd, shld, bts + data += 2; + AppendToBuffer("%s ", f0mnem); + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + data += PrintRightOperand(data); + if (f0byte == 0xAB) { + AppendToBuffer(",%s", NameOfCPURegister(regop)); + } else { + AppendToBuffer(",%s,cl", NameOfCPURegister(regop)); + } + } else if (f0byte == 0xBD) { + data += 2; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop)); + data += PrintRightOperand(data); + } else { + UnimplementedInstruction(); + } + } + break; + + case 0x8F: + { data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + if (regop == eax) { + AppendToBuffer("pop "); + data += PrintRightOperand(data); + } + } + break; + + case 0xFF: + { data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + const char* mnem = NULL; + switch (regop) { + case esi: mnem = "push"; break; + case eax: mnem = "inc"; break; + case ecx: mnem = "dec"; break; + case edx: mnem = "call"; break; + case esp: mnem = "jmp"; break; + default: mnem = "???"; + } + AppendToBuffer("%s ", mnem); + data += PrintRightOperand(data); + } + break; + + case 0xC7: // imm32, fall through + case 0xC6: // imm8 + { bool is_byte = *data == 0xC6; + data++; + if (is_byte) { + AppendToBuffer("%s ", "mov_b"); + data += PrintRightByteOperand(data); + int32_t imm = *data; + AppendToBuffer(",0x%x", imm); + data++; + } else { + AppendToBuffer("%s ", "mov"); + data += PrintRightOperand(data); + int32_t imm = *reinterpret_cast<int32_t*>(data); + AppendToBuffer(",0x%x", imm); + data += 4; + } + } + break; + + case 0x80: + { data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + const char* mnem = NULL; + switch (regop) { + case 5: mnem = "subb"; break; + case 7: mnem = "cmpb"; break; + default: UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + data += PrintRightByteOperand(data); + int32_t imm = *data; + AppendToBuffer(",0x%x", imm); + data++; + } + break; + + case 0x88: // 8bit, fall through + case 0x89: // 32bit + { bool is_byte = *data == 0x88; + int mod, regop, rm; + data++; + get_modrm(*data, &mod, ®op, &rm); + if (is_byte) { + AppendToBuffer("%s ", "mov_b"); + data += PrintRightByteOperand(data); + AppendToBuffer(",%s", NameOfByteCPURegister(regop)); + } else { + AppendToBuffer("%s ", "mov"); + data += PrintRightOperand(data); + AppendToBuffer(",%s", NameOfCPURegister(regop)); + } + } + break; + + case 0x66: // prefix + while (*data == 0x66) data++; + if (*data == 0xf && data[1] == 0x1f) { + AppendToBuffer("nop"); // 0x66 prefix + } else if (*data == 0x90) { + AppendToBuffer("nop"); // 0x66 prefix + } else if (*data == 0x8B) { + data++; + data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data); + } else if (*data == 0x89) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("mov_w "); + data += PrintRightOperand(data); + AppendToBuffer(",%s", NameOfCPURegister(regop)); + } else if (*data == 0xC7) { + data++; + AppendToBuffer("%s ", "mov_w"); + data += PrintRightOperand(data); + int imm = *reinterpret_cast<int16_t*>(data); + AppendToBuffer(",0x%x", imm); + data += 2; + } else if (*data == 0x0F) { + data++; + if (*data == 0x38) { + data++; + if (*data == 0x17) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("ptest %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (*data == 0x2A) { + // movntdqa + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop)); + data += PrintRightOperand(data); + } else { + UnimplementedInstruction(); + } + } else if (*data == 0x3A) { + data++; + if (*data == 0x0B) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + int8_t imm8 = static_cast<int8_t>(data[1]); + AppendToBuffer("roundsd %s,%s,%d", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm), + static_cast<int>(imm8)); + data += 2; + } else if (*data == 0x16) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + int8_t imm8 = static_cast<int8_t>(data[1]); + AppendToBuffer("pextrd %s,%s,%d", + NameOfCPURegister(regop), + NameOfXMMRegister(rm), + static_cast<int>(imm8)); + data += 2; + } else if (*data == 0x17) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + int8_t imm8 = static_cast<int8_t>(data[1]); + AppendToBuffer("extractps %s,%s,%d", + NameOfCPURegister(rm), + NameOfXMMRegister(regop), + static_cast<int>(imm8)); + data += 2; + } else if (*data == 0x22) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + int8_t imm8 = static_cast<int8_t>(data[1]); + AppendToBuffer("pinsrd %s,%s,%d", + NameOfXMMRegister(regop), + NameOfCPURegister(rm), + static_cast<int>(imm8)); + data += 2; + } else { + UnimplementedInstruction(); + } + } else if (*data == 0x2E || *data == 0x2F) { + const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd"; + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + if (mod == 0x3) { + AppendToBuffer("%s %s,%s", mnem, + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else { + AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop)); + data += PrintRightOperand(data); + } + } else if (*data == 0x50) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movmskpd %s,%s", + NameOfCPURegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (*data == 0x54) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("andpd %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (*data == 0x56) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("orpd %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (*data == 0x57) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("xorpd %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (*data == 0x6E) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movd %s,", NameOfXMMRegister(regop)); + data += PrintRightOperand(data); + } else if (*data == 0x6F) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } else if (*data == 0x70) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + int8_t imm8 = static_cast<int8_t>(data[1]); + AppendToBuffer("pshufd %s,%s,%d", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm), + static_cast<int>(imm8)); + data += 2; + } else if (*data == 0x76) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("pcmpeqd %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (*data == 0x90) { + data++; + AppendToBuffer("nop"); // 2 byte nop. + } else if (*data == 0xF3) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("psllq %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (*data == 0x73) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + int8_t imm8 = static_cast<int8_t>(data[1]); + DCHECK(regop == esi || regop == edx); + AppendToBuffer("%s %s,%d", + (regop == esi) ? "psllq" : "psrlq", + NameOfXMMRegister(rm), + static_cast<int>(imm8)); + data += 2; + } else if (*data == 0xD3) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("psrlq %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (*data == 0x7F) { + AppendToBuffer("movdqa "); + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + data += PrintRightXMMOperand(data); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); + } else if (*data == 0x7E) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movd "); + data += PrintRightOperand(data); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); + } else if (*data == 0xDB) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("pand %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (*data == 0xE7) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + if (mod == 3) { + AppendToBuffer("movntdq "); + data += PrintRightOperand(data); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); + } else { + UnimplementedInstruction(); + } + } else if (*data == 0xEF) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("pxor %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else if (*data == 0xEB) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("por %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } else { + UnimplementedInstruction(); + } + } else { + UnimplementedInstruction(); + } + break; + + case 0xFE: + { data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + if (regop == ecx) { + AppendToBuffer("dec_b "); + data += PrintRightOperand(data); + } else { + UnimplementedInstruction(); + } + } + break; + + case 0x68: + AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1)); + data += 5; + break; + + case 0x6A: + AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1)); + data += 2; + break; + + case 0xA8: + AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1)); + data += 2; + break; + + case 0xA9: + AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1)); + data += 5; + break; + + case 0xD1: // fall through + case 0xD3: // fall through + case 0xC1: + data += D1D3C1Instruction(data); + break; + + case 0xD8: // fall through + case 0xD9: // fall through + case 0xDA: // fall through + case 0xDB: // fall through + case 0xDC: // fall through + case 0xDD: // fall through + case 0xDE: // fall through + case 0xDF: + data += FPUInstruction(data); + break; + + case 0xEB: + data += JumpShort(data); + break; + + case 0xF2: + if (*(data+1) == 0x0F) { + byte b2 = *(data+2); + if (b2 == 0x11) { + AppendToBuffer("movsd "); + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + data += PrintRightXMMOperand(data); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); + } else if (b2 == 0x10) { + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movsd %s,", NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } else if (b2 == 0x5A) { + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("cvtsd2ss %s,", NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } else { + const char* mnem = "?"; + switch (b2) { + case 0x2A: mnem = "cvtsi2sd"; break; + case 0x2C: mnem = "cvttsd2si"; break; + case 0x2D: mnem = "cvtsd2si"; break; + case 0x51: mnem = "sqrtsd"; break; + case 0x58: mnem = "addsd"; break; + case 0x59: mnem = "mulsd"; break; + case 0x5C: mnem = "subsd"; break; + case 0x5E: mnem = "divsd"; break; + } + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + if (b2 == 0x2A) { + AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop)); + data += PrintRightOperand(data); + } else if (b2 == 0x2C || b2 == 0x2D) { + AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop)); + data += PrintRightXMMOperand(data); + } else if (b2 == 0xC2) { + // Intel manual 2A, Table 3-18. + const char* const pseudo_op[] = { + "cmpeqsd", + "cmpltsd", + "cmplesd", + "cmpunordsd", + "cmpneqsd", + "cmpnltsd", + "cmpnlesd", + "cmpordsd" + }; + AppendToBuffer("%s %s,%s", + pseudo_op[data[1]], + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data += 2; + } else { + AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } + } + } else { + UnimplementedInstruction(); + } + break; + + case 0xF3: + if (*(data+1) == 0x0F) { + byte b2 = *(data+2); + if (b2 == 0x11) { + AppendToBuffer("movss "); + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + data += PrintRightXMMOperand(data); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); + } else if (b2 == 0x10) { + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movss %s,", NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } else if (b2 == 0x2C) { + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop)); + data += PrintRightXMMOperand(data); + } else if (b2 == 0x5A) { + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } else if (b2 == 0x6F) { + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } else if (b2 == 0x7F) { + AppendToBuffer("movdqu "); + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + data += PrintRightXMMOperand(data); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); + } else { + UnimplementedInstruction(); + } + } else if (*(data+1) == 0xA5) { + data += 2; + AppendToBuffer("rep_movs"); + } else if (*(data+1) == 0xAB) { + data += 2; + AppendToBuffer("rep_stos"); + } else { + UnimplementedInstruction(); + } + break; + + case 0xF7: + data += F7Instruction(data); + break; + + default: + UnimplementedInstruction(); + } + } + + if (tmp_buffer_pos_ < sizeof tmp_buffer_) { + tmp_buffer_[tmp_buffer_pos_] = '\0'; + } + + int instr_len = data - instr; + if (instr_len == 0) { + printf("%02x", *data); + } + DCHECK(instr_len > 0); // Ensure progress. + + int outp = 0; + // Instruction bytes. + for (byte* bp = instr; bp < data; bp++) { + outp += v8::internal::SNPrintF(out_buffer + outp, "%02x", *bp); + } + for (int i = 6 - instr_len; i >= 0; i--) { + outp += v8::internal::SNPrintF(out_buffer + outp, " "); + } + + outp += v8::internal::SNPrintF(out_buffer + outp, " %s", tmp_buffer_.start()); + return instr_len; +} // NOLINT (function is too long) + + +//------------------------------------------------------------------------------ + + +static const char* cpu_regs[8] = { + "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi" +}; + + +static const char* byte_cpu_regs[8] = { + "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh" +}; + + +static const char* xmm_regs[8] = { + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" +}; + + +const char* NameConverter::NameOfAddress(byte* addr) const { + v8::internal::SNPrintF(tmp_buffer_, "%p", addr); + return tmp_buffer_.start(); +} + + +const char* NameConverter::NameOfConstant(byte* addr) const { + return NameOfAddress(addr); +} + + +const char* NameConverter::NameOfCPURegister(int reg) const { + if (0 <= reg && reg < 8) return cpu_regs[reg]; + return "noreg"; +} + + +const char* NameConverter::NameOfByteCPURegister(int reg) const { + if (0 <= reg && reg < 8) return byte_cpu_regs[reg]; + return "noreg"; +} + + +const char* NameConverter::NameOfXMMRegister(int reg) const { + if (0 <= reg && reg < 8) return xmm_regs[reg]; + return "noxmmreg"; +} + + +const char* NameConverter::NameInCode(byte* addr) const { + // X87 does not embed debug strings at the moment. + UNREACHABLE(); + return ""; +} + + +//------------------------------------------------------------------------------ + +Disassembler::Disassembler(const NameConverter& converter) + : converter_(converter) {} + + +Disassembler::~Disassembler() {} + + +int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer, + byte* instruction) { + DisassemblerX87 d(converter_, false /*do not crash if unimplemented*/); + return d.InstructionDecode(buffer, instruction); +} + + +// The IA-32 assembler does not currently use constant pools. +int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; } + + +/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) { + NameConverter converter; + Disassembler d(converter); + for (byte* pc = begin; pc < end;) { + v8::internal::EmbeddedVector<char, 128> buffer; + buffer[0] = '\0'; + byte* prev_pc = pc; + pc += d.InstructionDecode(buffer, pc); + fprintf(f, "%p", prev_pc); + fprintf(f, " "); + + for (byte* bp = prev_pc; bp < pc; bp++) { + fprintf(f, "%02x", *bp); + } + for (int i = 6 - (pc - prev_pc); i >= 0; i--) { + fprintf(f, " "); + } + fprintf(f, " %s\n", buffer.start()); + } +} + + +} // namespace disasm + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/frames-x87.cc nodejs-0.11.15/deps/v8/src/x87/frames-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/frames-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/frames-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,42 @@ +// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/assembler.h" +#include "src/frames.h" +#include "src/x87/assembler-x87-inl.h" +#include "src/x87/assembler-x87.h" + +namespace v8 { +namespace internal { + + +Register JavaScriptFrame::fp_register() { return ebp; } +Register JavaScriptFrame::context_register() { return esi; } +Register JavaScriptFrame::constant_pool_pointer_register() { + UNREACHABLE(); + return no_reg; +} + + +Register StubFailureTrampolineFrame::fp_register() { return ebp; } +Register StubFailureTrampolineFrame::context_register() { return esi; } +Register StubFailureTrampolineFrame::constant_pool_pointer_register() { + UNREACHABLE(); + return no_reg; +} + + +Object*& ExitFrame::constant_pool_slot() const { + UNREACHABLE(); + return Memory::Object_at(NULL); +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/frames-x87.h nodejs-0.11.15/deps/v8/src/x87/frames-x87.h --- nodejs-0.11.13/deps/v8/src/x87/frames-x87.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/frames-x87.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,125 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_X87_FRAMES_X87_H_ +#define V8_X87_FRAMES_X87_H_ + +namespace v8 { +namespace internal { + + +// Register lists +// Note that the bit values must match those used in actual instruction encoding +const int kNumRegs = 8; + + +// Caller-saved registers +const RegList kJSCallerSaved = + 1 << 0 | // eax + 1 << 1 | // ecx + 1 << 2 | // edx + 1 << 3 | // ebx - used as a caller-saved register in JavaScript code + 1 << 7; // edi - callee function + +const int kNumJSCallerSaved = 5; + + +// Number of registers for which space is reserved in safepoints. +const int kNumSafepointRegisters = 8; + +const int kNoAlignmentPadding = 0; +const int kAlignmentPaddingPushed = 2; +const int kAlignmentZapValue = 0x12345678; // Not heap object tagged. + +// ---------------------------------------------------- + + +class EntryFrameConstants : public AllStatic { + public: + static const int kCallerFPOffset = -6 * kPointerSize; + + static const int kFunctionArgOffset = +3 * kPointerSize; + static const int kReceiverArgOffset = +4 * kPointerSize; + static const int kArgcOffset = +5 * kPointerSize; + static const int kArgvOffset = +6 * kPointerSize; +}; + + +class ExitFrameConstants : public AllStatic { + public: + static const int kFrameSize = 2 * kPointerSize; + + static const int kCodeOffset = -2 * kPointerSize; + static const int kSPOffset = -1 * kPointerSize; + + static const int kCallerFPOffset = 0 * kPointerSize; + static const int kCallerPCOffset = +1 * kPointerSize; + + // FP-relative displacement of the caller's SP. It points just + // below the saved PC. + static const int kCallerSPDisplacement = +2 * kPointerSize; + + static const int kConstantPoolOffset = 0; // Not used +}; + + +class JavaScriptFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset; + static const int kLastParameterOffset = +2 * kPointerSize; + static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset; + + // Caller SP-relative. + static const int kParam0Offset = -2 * kPointerSize; + static const int kReceiverOffset = -1 * kPointerSize; + + static const int kDynamicAlignmentStateOffset = kLocal0Offset; +}; + + +class ArgumentsAdaptorFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset; + + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + kPointerSize; +}; + + +class ConstructFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kImplicitReceiverOffset = -5 * kPointerSize; + static const int kConstructorOffset = kMinInt; + static const int kLengthOffset = -4 * kPointerSize; + static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; + + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize; +}; + + +class InternalFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; +}; + + +inline Object* JavaScriptFrame::function_slot_object() const { + const int offset = JavaScriptFrameConstants::kFunctionOffset; + return Memory::Object_at(fp() + offset); +} + + +inline void StackHandler::SetFp(Address slot, Address fp) { + Memory::Address_at(slot) = fp; +} + + +} } // namespace v8::internal + +#endif // V8_X87_FRAMES_X87_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/full-codegen-x87.cc nodejs-0.11.15/deps/v8/src/x87/full-codegen-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/full-codegen-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/full-codegen-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4827 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/compiler.h" +#include "src/debug.h" +#include "src/full-codegen.h" +#include "src/isolate-inl.h" +#include "src/parser.h" +#include "src/scopes.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm_) + + +class JumpPatchSite BASE_EMBEDDED { + public: + explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) { +#ifdef DEBUG + info_emitted_ = false; +#endif + } + + ~JumpPatchSite() { + DCHECK(patch_site_.is_bound() == info_emitted_); + } + + void EmitJumpIfNotSmi(Register reg, + Label* target, + Label::Distance distance = Label::kFar) { + __ test(reg, Immediate(kSmiTagMask)); + EmitJump(not_carry, target, distance); // Always taken before patched. + } + + void EmitJumpIfSmi(Register reg, + Label* target, + Label::Distance distance = Label::kFar) { + __ test(reg, Immediate(kSmiTagMask)); + EmitJump(carry, target, distance); // Never taken before patched. + } + + void EmitPatchInfo() { + if (patch_site_.is_bound()) { + int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_); + DCHECK(is_uint8(delta_to_patch_site)); + __ test(eax, Immediate(delta_to_patch_site)); +#ifdef DEBUG + info_emitted_ = true; +#endif + } else { + __ nop(); // Signals no inlined code. + } + } + + private: + // jc will be patched with jz, jnc will become jnz. + void EmitJump(Condition cc, Label* target, Label::Distance distance) { + DCHECK(!patch_site_.is_bound() && !info_emitted_); + DCHECK(cc == carry || cc == not_carry); + __ bind(&patch_site_); + __ j(cc, target, distance); + } + + MacroAssembler* masm_; + Label patch_site_; +#ifdef DEBUG + bool info_emitted_; +#endif +}; + + +// Generate code for a JS function. On entry to the function the receiver +// and arguments have been pushed on the stack left to right, with the +// return address on top of them. The actual argument count matches the +// formal parameter count expected by the function. +// +// The live registers are: +// o edi: the JS function object being called (i.e. ourselves) +// o esi: our context +// o ebp: our caller's frame pointer +// o esp: stack pointer (pointing to return address) +// +// The function builds a JS frame. Please see JavaScriptFrameConstants in +// frames-x87.h for its layout. +void FullCodeGenerator::Generate() { + CompilationInfo* info = info_; + handler_table_ = + isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); + + profiling_counter_ = isolate()->factory()->NewCell( + Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); + SetFunctionPosition(function()); + Comment cmnt(masm_, "[ function compiled by full code generator"); + + ProfileEntryHookStub::MaybeCallEntryHook(masm_); + +#ifdef DEBUG + if (strlen(FLAG_stop_at) > 0 && + info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { + __ int3(); + } +#endif + + // Sloppy mode functions and builtins need to replace the receiver with the + // global proxy when called as functions (without an explicit receiver + // object). + if (info->strict_mode() == SLOPPY && !info->is_native()) { + Label ok; + // +1 for return address. + int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize; + __ mov(ecx, Operand(esp, receiver_offset)); + + __ cmp(ecx, isolate()->factory()->undefined_value()); + __ j(not_equal, &ok, Label::kNear); + + __ mov(ecx, GlobalObjectOperand()); + __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset)); + + __ mov(Operand(esp, receiver_offset), ecx); + + __ bind(&ok); + } + + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + + info->set_prologue_offset(masm_->pc_offset()); + __ Prologue(info->IsCodePreAgingActive()); + info->AddNoFrameRange(0, masm_->pc_offset()); + + { Comment cmnt(masm_, "[ Allocate locals"); + int locals_count = info->scope()->num_stack_slots(); + // Generators allocate locals, if any, in context slots. + DCHECK(!info->function()->is_generator() || locals_count == 0); + if (locals_count == 1) { + __ push(Immediate(isolate()->factory()->undefined_value())); + } else if (locals_count > 1) { + if (locals_count >= 128) { + Label ok; + __ mov(ecx, esp); + __ sub(ecx, Immediate(locals_count * kPointerSize)); + ExternalReference stack_limit = + ExternalReference::address_of_real_stack_limit(isolate()); + __ cmp(ecx, Operand::StaticVariable(stack_limit)); + __ j(above_equal, &ok, Label::kNear); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ bind(&ok); + } + __ mov(eax, Immediate(isolate()->factory()->undefined_value())); + const int kMaxPushes = 32; + if (locals_count >= kMaxPushes) { + int loop_iterations = locals_count / kMaxPushes; + __ mov(ecx, loop_iterations); + Label loop_header; + __ bind(&loop_header); + // Do pushes. + for (int i = 0; i < kMaxPushes; i++) { + __ push(eax); + } + __ dec(ecx); + __ j(not_zero, &loop_header, Label::kNear); + } + int remaining = locals_count % kMaxPushes; + // Emit the remaining pushes. + for (int i = 0; i < remaining; i++) { + __ push(eax); + } + } + } + + bool function_in_register = true; + + // Possibly allocate a local context. + int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment cmnt(masm_, "[ Allocate context"); + bool need_write_barrier = true; + // Argument to NewContext is the function, which is still in edi. + if (FLAG_harmony_scoping && info->scope()->is_global_scope()) { + __ push(edi); + __ Push(info->scope()->GetScopeInfo()); + __ CallRuntime(Runtime::kNewGlobalContext, 2); + } else if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(isolate(), heap_slots); + __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; + } else { + __ push(edi); + __ CallRuntime(Runtime::kNewFunctionContext, 1); + } + function_in_register = false; + // Context is returned in eax. It replaces the context passed to us. + // It's saved in the stack and kept live in esi. + __ mov(esi, eax); + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax); + + // Copy parameters into context if necessary. + int num_parameters = info->scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Variable* var = scope()->parameter(i); + if (var->IsContextSlot()) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ mov(eax, Operand(ebp, parameter_offset)); + // Store it in the context. + int context_offset = Context::SlotOffset(var->index()); + __ mov(Operand(esi, context_offset), eax); + // Update the write barrier. This clobbers eax and ebx. + if (need_write_barrier) { + __ RecordWriteContextSlot(esi, + context_offset, + eax, + ebx); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } + } + } + } + + Variable* arguments = scope()->arguments(); + if (arguments != NULL) { + // Function uses arguments object. + Comment cmnt(masm_, "[ Allocate arguments object"); + if (function_in_register) { + __ push(edi); + } else { + __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + } + // Receiver is just before the parameters on the caller's stack. + int num_parameters = info->scope()->num_parameters(); + int offset = num_parameters * kPointerSize; + __ lea(edx, + Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset)); + __ push(edx); + __ push(Immediate(Smi::FromInt(num_parameters))); + // Arguments to ArgumentsAccessStub: + // function, receiver address, parameter count. + // The stub will rewrite receiver and parameter count if the previous + // stack frame was an arguments adapter frame. + ArgumentsAccessStub::Type type; + if (strict_mode() == STRICT) { + type = ArgumentsAccessStub::NEW_STRICT; + } else if (function()->has_duplicate_parameters()) { + type = ArgumentsAccessStub::NEW_SLOPPY_SLOW; + } else { + type = ArgumentsAccessStub::NEW_SLOPPY_FAST; + } + ArgumentsAccessStub stub(isolate(), type); + __ CallStub(&stub); + + SetVar(arguments, eax, ebx, edx); + } + + if (FLAG_trace) { + __ CallRuntime(Runtime::kTraceEnter, 0); + } + + // Visit the declarations and body unless there is an illegal + // redeclaration. + if (scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ Declarations"); + scope()->VisitIllegalRedeclaration(this); + + } else { + PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS); + { Comment cmnt(masm_, "[ Declarations"); + // For named function expressions, declare the function name as a + // constant. + if (scope()->is_function_scope() && scope()->function() != NULL) { + VariableDeclaration* function = scope()->function(); + DCHECK(function->proxy()->var()->mode() == CONST || + function->proxy()->var()->mode() == CONST_LEGACY); + DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED); + VisitVariableDeclaration(function); + } + VisitDeclarations(scope()->declarations()); + } + + { Comment cmnt(masm_, "[ Stack check"); + PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS); + Label ok; + ExternalReference stack_limit + = ExternalReference::address_of_stack_limit(isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(above_equal, &ok, Label::kNear); + __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET); + __ bind(&ok); + } + + { Comment cmnt(masm_, "[ Body"); + DCHECK(loop_depth() == 0); + VisitStatements(function()->body()); + DCHECK(loop_depth() == 0); + } + } + + // Always emit a 'return undefined' in case control fell off the end of + // the body. + { Comment cmnt(masm_, "[ return <undefined>;"); + __ mov(eax, isolate()->factory()->undefined_value()); + EmitReturnSequence(); + } +} + + +void FullCodeGenerator::ClearAccumulator() { + __ Move(eax, Immediate(Smi::FromInt(0))); +} + + +void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) { + __ mov(ebx, Immediate(profiling_counter_)); + __ sub(FieldOperand(ebx, Cell::kValueOffset), + Immediate(Smi::FromInt(delta))); +} + + +void FullCodeGenerator::EmitProfilingCounterReset() { + int reset_value = FLAG_interrupt_budget; + __ mov(ebx, Immediate(profiling_counter_)); + __ mov(FieldOperand(ebx, Cell::kValueOffset), + Immediate(Smi::FromInt(reset_value))); +} + + +void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, + Label* back_edge_target) { + Comment cmnt(masm_, "[ Back edge bookkeeping"); + Label ok; + + DCHECK(back_edge_target->is_bound()); + int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); + int weight = Min(kMaxBackEdgeWeight, + Max(1, distance / kCodeSizeMultiplier)); + EmitProfilingCounterDecrement(weight); + __ j(positive, &ok, Label::kNear); + __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET); + + // Record a mapping of this PC offset to the OSR id. This is used to find + // the AST id from the unoptimized code in order to use it as a key into + // the deoptimization input data found in the optimized code. + RecordBackEdge(stmt->OsrEntryId()); + + EmitProfilingCounterReset(); + + __ bind(&ok); + PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); + // Record a mapping of the OSR id to this PC. This is used if the OSR + // entry becomes the target of a bailout. We don't expect it to be, but + // we want it to work if it is. + PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS); +} + + +void FullCodeGenerator::EmitReturnSequence() { + Comment cmnt(masm_, "[ Return sequence"); + if (return_label_.is_bound()) { + __ jmp(&return_label_); + } else { + // Common return label + __ bind(&return_label_); + if (FLAG_trace) { + __ push(eax); + __ CallRuntime(Runtime::kTraceExit, 1); + } + // Pretend that the exit is a backwards jump to the entry. + int weight = 1; + if (info_->ShouldSelfOptimize()) { + weight = FLAG_interrupt_budget / FLAG_self_opt_count; + } else { + int distance = masm_->pc_offset(); + weight = Min(kMaxBackEdgeWeight, + Max(1, distance / kCodeSizeMultiplier)); + } + EmitProfilingCounterDecrement(weight); + Label ok; + __ j(positive, &ok, Label::kNear); + __ push(eax); + __ call(isolate()->builtins()->InterruptCheck(), + RelocInfo::CODE_TARGET); + __ pop(eax); + EmitProfilingCounterReset(); + __ bind(&ok); +#ifdef DEBUG + // Add a label for checking the size of the code used for returning. + Label check_exit_codesize; + masm_->bind(&check_exit_codesize); +#endif + SetSourcePosition(function()->end_position() - 1); + __ RecordJSReturn(); + // Do not use the leave instruction here because it is too short to + // patch with the code required by the debugger. + __ mov(esp, ebp); + int no_frame_start = masm_->pc_offset(); + __ pop(ebp); + + int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize; + __ Ret(arguments_bytes, ecx); + // Check that the size of the code used for returning is large enough + // for the debugger's requirements. + DCHECK(Assembler::kJSReturnSequenceLength <= + masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); + info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); + } +} + + +void FullCodeGenerator::EffectContext::Plug(Variable* var) const { + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); +} + + +void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const { + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + codegen()->GetVar(result_register(), var); +} + + +void FullCodeGenerator::StackValueContext::Plug(Variable* var) const { + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + MemOperand operand = codegen()->VarOperand(var, result_register()); + // Memory operands can be pushed directly. + __ push(operand); +} + + +void FullCodeGenerator::TestContext::Plug(Variable* var) const { + // For simplicity we always test the accumulator register. + codegen()->GetVar(result_register(), var); + codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL); + codegen()->DoTest(this); +} + + +void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const { + UNREACHABLE(); // Not used on X87. +} + + +void FullCodeGenerator::AccumulatorValueContext::Plug( + Heap::RootListIndex index) const { + UNREACHABLE(); // Not used on X87. +} + + +void FullCodeGenerator::StackValueContext::Plug( + Heap::RootListIndex index) const { + UNREACHABLE(); // Not used on X87. +} + + +void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const { + UNREACHABLE(); // Not used on X87. +} + + +void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const { +} + + +void FullCodeGenerator::AccumulatorValueContext::Plug( + Handle<Object> lit) const { + if (lit->IsSmi()) { + __ SafeMove(result_register(), Immediate(lit)); + } else { + __ Move(result_register(), Immediate(lit)); + } +} + + +void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const { + if (lit->IsSmi()) { + __ SafePush(Immediate(lit)); + } else { + __ push(Immediate(lit)); + } +} + + +void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const { + codegen()->PrepareForBailoutBeforeSplit(condition(), + true, + true_label_, + false_label_); + DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals. + if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) { + if (false_label_ != fall_through_) __ jmp(false_label_); + } else if (lit->IsTrue() || lit->IsJSObject()) { + if (true_label_ != fall_through_) __ jmp(true_label_); + } else if (lit->IsString()) { + if (String::cast(*lit)->length() == 0) { + if (false_label_ != fall_through_) __ jmp(false_label_); + } else { + if (true_label_ != fall_through_) __ jmp(true_label_); + } + } else if (lit->IsSmi()) { + if (Smi::cast(*lit)->value() == 0) { + if (false_label_ != fall_through_) __ jmp(false_label_); + } else { + if (true_label_ != fall_through_) __ jmp(true_label_); + } + } else { + // For simplicity we always test the accumulator register. + __ mov(result_register(), lit); + codegen()->DoTest(this); + } +} + + +void FullCodeGenerator::EffectContext::DropAndPlug(int count, + Register reg) const { + DCHECK(count > 0); + __ Drop(count); +} + + +void FullCodeGenerator::AccumulatorValueContext::DropAndPlug( + int count, + Register reg) const { + DCHECK(count > 0); + __ Drop(count); + __ Move(result_register(), reg); +} + + +void FullCodeGenerator::StackValueContext::DropAndPlug(int count, + Register reg) const { + DCHECK(count > 0); + if (count > 1) __ Drop(count - 1); + __ mov(Operand(esp, 0), reg); +} + + +void FullCodeGenerator::TestContext::DropAndPlug(int count, + Register reg) const { + DCHECK(count > 0); + // For simplicity we always test the accumulator register. + __ Drop(count); + __ Move(result_register(), reg); + codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL); + codegen()->DoTest(this); +} + + +void FullCodeGenerator::EffectContext::Plug(Label* materialize_true, + Label* materialize_false) const { + DCHECK(materialize_true == materialize_false); + __ bind(materialize_true); +} + + +void FullCodeGenerator::AccumulatorValueContext::Plug( + Label* materialize_true, + Label* materialize_false) const { + Label done; + __ bind(materialize_true); + __ mov(result_register(), isolate()->factory()->true_value()); + __ jmp(&done, Label::kNear); + __ bind(materialize_false); + __ mov(result_register(), isolate()->factory()->false_value()); + __ bind(&done); +} + + +void FullCodeGenerator::StackValueContext::Plug( + Label* materialize_true, + Label* materialize_false) const { + Label done; + __ bind(materialize_true); + __ push(Immediate(isolate()->factory()->true_value())); + __ jmp(&done, Label::kNear); + __ bind(materialize_false); + __ push(Immediate(isolate()->factory()->false_value())); + __ bind(&done); +} + + +void FullCodeGenerator::TestContext::Plug(Label* materialize_true, + Label* materialize_false) const { + DCHECK(materialize_true == true_label_); + DCHECK(materialize_false == false_label_); +} + + +void FullCodeGenerator::EffectContext::Plug(bool flag) const { +} + + +void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const { + Handle<Object> value = flag + ? isolate()->factory()->true_value() + : isolate()->factory()->false_value(); + __ mov(result_register(), value); +} + + +void FullCodeGenerator::StackValueContext::Plug(bool flag) const { + Handle<Object> value = flag + ? isolate()->factory()->true_value() + : isolate()->factory()->false_value(); + __ push(Immediate(value)); +} + + +void FullCodeGenerator::TestContext::Plug(bool flag) const { + codegen()->PrepareForBailoutBeforeSplit(condition(), + true, + true_label_, + false_label_); + if (flag) { + if (true_label_ != fall_through_) __ jmp(true_label_); + } else { + if (false_label_ != fall_through_) __ jmp(false_label_); + } +} + + +void FullCodeGenerator::DoTest(Expression* condition, + Label* if_true, + Label* if_false, + Label* fall_through) { + Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate()); + CallIC(ic, condition->test_id()); + __ test(result_register(), result_register()); + // The stub returns nonzero for true. + Split(not_zero, if_true, if_false, fall_through); +} + + +void FullCodeGenerator::Split(Condition cc, + Label* if_true, + Label* if_false, + Label* fall_through) { + if (if_false == fall_through) { + __ j(cc, if_true); + } else if (if_true == fall_through) { + __ j(NegateCondition(cc), if_false); + } else { + __ j(cc, if_true); + __ jmp(if_false); + } +} + + +MemOperand FullCodeGenerator::StackOperand(Variable* var) { + DCHECK(var->IsStackAllocated()); + // Offset is negative because higher indexes are at lower addresses. + int offset = -var->index() * kPointerSize; + // Adjust by a (parameter or local) base offset. + if (var->IsParameter()) { + offset += (info_->scope()->num_parameters() + 1) * kPointerSize; + } else { + offset += JavaScriptFrameConstants::kLocal0Offset; + } + return Operand(ebp, offset); +} + + +MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) { + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); + if (var->IsContextSlot()) { + int context_chain_length = scope()->ContextChainLength(var->scope()); + __ LoadContext(scratch, context_chain_length); + return ContextOperand(scratch, var->index()); + } else { + return StackOperand(var); + } +} + + +void FullCodeGenerator::GetVar(Register dest, Variable* var) { + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); + MemOperand location = VarOperand(var, dest); + __ mov(dest, location); +} + + +void FullCodeGenerator::SetVar(Variable* var, + Register src, + Register scratch0, + Register scratch1) { + DCHECK(var->IsContextSlot() || var->IsStackAllocated()); + DCHECK(!scratch0.is(src)); + DCHECK(!scratch0.is(scratch1)); + DCHECK(!scratch1.is(src)); + MemOperand location = VarOperand(var, scratch0); + __ mov(location, src); + + // Emit the write barrier code if the location is in the heap. + if (var->IsContextSlot()) { + int offset = Context::SlotOffset(var->index()); + DCHECK(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi)); + __ RecordWriteContextSlot(scratch0, offset, src, scratch1); + } +} + + +void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr, + bool should_normalize, + Label* if_true, + Label* if_false) { + // Only prepare for bailouts before splits if we're in a test + // context. Otherwise, we let the Visit function deal with the + // preparation to avoid preparing with the same AST id twice. + if (!context()->IsTest() || !info_->IsOptimizable()) return; + + Label skip; + if (should_normalize) __ jmp(&skip, Label::kNear); + PrepareForBailout(expr, TOS_REG); + if (should_normalize) { + __ cmp(eax, isolate()->factory()->true_value()); + Split(equal, if_true, if_false, NULL); + __ bind(&skip); + } +} + + +void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { + // The variable in the declaration always resides in the current context. + DCHECK_EQ(0, scope()->ContextChainLength(variable->scope())); + if (generate_debug_code_) { + // Check that we're not inside a with or catch context. + __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset)); + __ cmp(ebx, isolate()->factory()->with_context_map()); + __ Check(not_equal, kDeclarationInWithContext); + __ cmp(ebx, isolate()->factory()->catch_context_map()); + __ Check(not_equal, kDeclarationInCatchContext); + } +} + + +void FullCodeGenerator::VisitVariableDeclaration( + VariableDeclaration* declaration) { + // If it was not possible to allocate the variable at compile time, we + // need to "declare" it at runtime to make sure it actually exists in the + // local context. + VariableProxy* proxy = declaration->proxy(); + VariableMode mode = declaration->mode(); + Variable* variable = proxy->var(); + bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY; + switch (variable->location()) { + case Variable::UNALLOCATED: + globals_->Add(variable->name(), zone()); + globals_->Add(variable->binding_needs_init() + ? isolate()->factory()->the_hole_value() + : isolate()->factory()->undefined_value(), zone()); + break; + + case Variable::PARAMETER: + case Variable::LOCAL: + if (hole_init) { + Comment cmnt(masm_, "[ VariableDeclaration"); + __ mov(StackOperand(variable), + Immediate(isolate()->factory()->the_hole_value())); + } + break; + + case Variable::CONTEXT: + if (hole_init) { + Comment cmnt(masm_, "[ VariableDeclaration"); + EmitDebugCheckDeclarationContext(variable); + __ mov(ContextOperand(esi, variable->index()), + Immediate(isolate()->factory()->the_hole_value())); + // No write barrier since the hole value is in old space. + PrepareForBailoutForId(proxy->id(), NO_REGISTERS); + } + break; + + case Variable::LOOKUP: { + Comment cmnt(masm_, "[ VariableDeclaration"); + __ push(esi); + __ push(Immediate(variable->name())); + // VariableDeclaration nodes are always introduced in one of four modes. + DCHECK(IsDeclaredVariableMode(mode)); + PropertyAttributes attr = + IsImmutableVariableMode(mode) ? READ_ONLY : NONE; + __ push(Immediate(Smi::FromInt(attr))); + // Push initial value, if any. + // Note: For variables we must not push an initial value (such as + // 'undefined') because we may have a (legal) redeclaration and we + // must not destroy the current value. + if (hole_init) { + __ push(Immediate(isolate()->factory()->the_hole_value())); + } else { + __ push(Immediate(Smi::FromInt(0))); // Indicates no initial value. + } + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); + break; + } + } +} + + +void FullCodeGenerator::VisitFunctionDeclaration( + FunctionDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: { + globals_->Add(variable->name(), zone()); + Handle<SharedFunctionInfo> function = + Compiler::BuildFunctionInfo(declaration->fun(), script(), info_); + // Check for stack-overflow exception. + if (function.is_null()) return SetStackOverflow(); + globals_->Add(function, zone()); + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + VisitForAccumulatorValue(declaration->fun()); + __ mov(StackOperand(variable), result_register()); + break; + } + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + EmitDebugCheckDeclarationContext(variable); + VisitForAccumulatorValue(declaration->fun()); + __ mov(ContextOperand(esi, variable->index()), result_register()); + // We know that we have written a function, which is not a smi. + __ RecordWriteContextSlot(esi, + Context::SlotOffset(variable->index()), + result_register(), + ecx, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(proxy->id(), NO_REGISTERS); + break; + } + + case Variable::LOOKUP: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + __ push(esi); + __ push(Immediate(variable->name())); + __ push(Immediate(Smi::FromInt(NONE))); + VisitForStackValue(declaration->fun()); + __ CallRuntime(Runtime::kDeclareLookupSlot, 4); + break; + } + } +} + + +void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { + Variable* variable = declaration->proxy()->var(); + DCHECK(variable->location() == Variable::CONTEXT); + DCHECK(variable->interface()->IsFrozen()); + + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); + + // Load instance object. + __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope())); + __ mov(eax, ContextOperand(eax, variable->interface()->Index())); + __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX)); + + // Assign it. + __ mov(ContextOperand(esi, variable->index()), eax); + // We know that we have written a module, which is not a smi. + __ RecordWriteContextSlot(esi, + Context::SlotOffset(variable->index()), + eax, + ecx, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS); + + // Traverse into body. + Visit(declaration->module()); +} + + +void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: + // TODO(rossberg) + break; + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ ImportDeclaration"); + EmitDebugCheckDeclarationContext(variable); + // TODO(rossberg) + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::LOOKUP: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) { + // TODO(rossberg) +} + + +void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { + // Call the runtime to declare the globals. + __ push(esi); // The context is the first argument. + __ Push(pairs); + __ Push(Smi::FromInt(DeclareGlobalsFlags())); + __ CallRuntime(Runtime::kDeclareGlobals, 3); + // Return value is ignored. +} + + +void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { + // Call the runtime to declare the modules. + __ Push(descriptions); + __ CallRuntime(Runtime::kDeclareModules, 1); + // Return value is ignored. +} + + +void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { + Comment cmnt(masm_, "[ SwitchStatement"); + Breakable nested_statement(this, stmt); + SetStatementPosition(stmt); + + // Keep the switch value on the stack until a case matches. + VisitForStackValue(stmt->tag()); + PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); + + ZoneList<CaseClause*>* clauses = stmt->cases(); + CaseClause* default_clause = NULL; // Can occur anywhere in the list. + + Label next_test; // Recycled for each test. + // Compile all the tests with branches to their bodies. + for (int i = 0; i < clauses->length(); i++) { + CaseClause* clause = clauses->at(i); + clause->body_target()->Unuse(); + + // The default is not a test, but remember it as final fall through. + if (clause->is_default()) { + default_clause = clause; + continue; + } + + Comment cmnt(masm_, "[ Case comparison"); + __ bind(&next_test); + next_test.Unuse(); + + // Compile the label expression. + VisitForAccumulatorValue(clause->label()); + + // Perform the comparison as if via '==='. + __ mov(edx, Operand(esp, 0)); // Switch value. + bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT); + JumpPatchSite patch_site(masm_); + if (inline_smi_code) { + Label slow_case; + __ mov(ecx, edx); + __ or_(ecx, eax); + patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear); + + __ cmp(edx, eax); + __ j(not_equal, &next_test); + __ Drop(1); // Switch value is no longer needed. + __ jmp(clause->body_target()); + __ bind(&slow_case); + } + + // Record position before stub call for type feedback. + SetSourcePosition(clause->position()); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT); + CallIC(ic, clause->CompareId()); + patch_site.EmitPatchInfo(); + + Label skip; + __ jmp(&skip, Label::kNear); + PrepareForBailout(clause, TOS_REG); + __ cmp(eax, isolate()->factory()->true_value()); + __ j(not_equal, &next_test); + __ Drop(1); + __ jmp(clause->body_target()); + __ bind(&skip); + + __ test(eax, eax); + __ j(not_equal, &next_test); + __ Drop(1); // Switch value is no longer needed. + __ jmp(clause->body_target()); + } + + // Discard the test value and jump to the default if present, otherwise to + // the end of the statement. + __ bind(&next_test); + __ Drop(1); // Switch value is no longer needed. + if (default_clause == NULL) { + __ jmp(nested_statement.break_label()); + } else { + __ jmp(default_clause->body_target()); + } + + // Compile all the case bodies. + for (int i = 0; i < clauses->length(); i++) { + Comment cmnt(masm_, "[ Case body"); + CaseClause* clause = clauses->at(i); + __ bind(clause->body_target()); + PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS); + VisitStatements(clause->statements()); + } + + __ bind(nested_statement.break_label()); + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); +} + + +void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { + Comment cmnt(masm_, "[ ForInStatement"); + int slot = stmt->ForInFeedbackSlot(); + + SetStatementPosition(stmt); + + Label loop, exit; + ForIn loop_statement(this, stmt); + increment_loop_depth(); + + // Get the object to enumerate over. If the object is null or undefined, skip + // over the loop. See ECMA-262 version 5, section 12.6.4. + VisitForAccumulatorValue(stmt->enumerable()); + __ cmp(eax, isolate()->factory()->undefined_value()); + __ j(equal, &exit); + __ cmp(eax, isolate()->factory()->null_value()); + __ j(equal, &exit); + + PrepareForBailoutForId(stmt->PrepareId(), TOS_REG); + + // Convert the object to a JS object. + Label convert, done_convert; + __ JumpIfSmi(eax, &convert, Label::kNear); + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, &done_convert, Label::kNear); + __ bind(&convert); + __ push(eax); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ bind(&done_convert); + __ push(eax); + + // Check for proxies. + Label call_runtime, use_cache, fixed_array; + STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); + __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx); + __ j(below_equal, &call_runtime); + + // Check cache validity in generated code. This is a fast case for + // the JSObject::IsSimpleEnum cache validity checks. If we cannot + // guarantee cache validity, call the runtime system to check cache + // validity or get the property names in a fixed array. + __ CheckEnumCache(&call_runtime); + + __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); + __ jmp(&use_cache, Label::kNear); + + // Get the set of properties to enumerate. + __ bind(&call_runtime); + __ push(eax); + __ CallRuntime(Runtime::kGetPropertyNamesFast, 1); + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + isolate()->factory()->meta_map()); + __ j(not_equal, &fixed_array); + + + // We got a map in register eax. Get the enumeration cache from it. + Label no_descriptors; + __ bind(&use_cache); + + __ EnumLength(edx, eax); + __ cmp(edx, Immediate(Smi::FromInt(0))); + __ j(equal, &no_descriptors); + + __ LoadInstanceDescriptors(eax, ecx); + __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset)); + __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset)); + + // Set up the four remaining stack slots. + __ push(eax); // Map. + __ push(ecx); // Enumeration cache. + __ push(edx); // Number of valid entries for the map in the enum cache. + __ push(Immediate(Smi::FromInt(0))); // Initial index. + __ jmp(&loop); + + __ bind(&no_descriptors); + __ add(esp, Immediate(kPointerSize)); + __ jmp(&exit); + + // We got a fixed array in register eax. Iterate through that. + Label non_proxy; + __ bind(&fixed_array); + + // No need for a write barrier, we are storing a Smi in the feedback vector. + __ LoadHeapObject(ebx, FeedbackVector()); + __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)), + Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); + + __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check + __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object + STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); + __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx); + __ j(above, &non_proxy); + __ Move(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy + __ bind(&non_proxy); + __ push(ebx); // Smi + __ push(eax); // Array + __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset)); + __ push(eax); // Fixed array length (as smi). + __ push(Immediate(Smi::FromInt(0))); // Initial index. + + // Generate code for doing the condition check. + PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); + __ bind(&loop); + __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index. + __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length. + __ j(above_equal, loop_statement.break_label()); + + // Get the current entry of the array into register ebx. + __ mov(ebx, Operand(esp, 2 * kPointerSize)); + __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize)); + + // Get the expected map from the stack or a smi in the + // permanent slow case into register edx. + __ mov(edx, Operand(esp, 3 * kPointerSize)); + + // Check if the expected map still matches that of the enumerable. + // If not, we may have to filter the key. + Label update_each; + __ mov(ecx, Operand(esp, 4 * kPointerSize)); + __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset)); + __ j(equal, &update_each, Label::kNear); + + // For proxies, no filtering is done. + // TODO(rossberg): What if only a prototype is a proxy? Not specified yet. + DCHECK(Smi::FromInt(0) == 0); + __ test(edx, edx); + __ j(zero, &update_each); + + // Convert the entry to a string or null if it isn't a property + // anymore. If the property has been removed while iterating, we + // just skip it. + __ push(ecx); // Enumerable. + __ push(ebx); // Current entry. + __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); + __ test(eax, eax); + __ j(equal, loop_statement.continue_label()); + __ mov(ebx, eax); + + // Update the 'each' property or variable from the possibly filtered + // entry in register ebx. + __ bind(&update_each); + __ mov(result_register(), ebx); + // Perform the assignment as if via '='. + { EffectContext context(this); + EmitAssignment(stmt->each()); + } + + // Generate code for the body of the loop. + Visit(stmt->body()); + + // Generate code for going to the next element by incrementing the + // index (smi) stored on top of the stack. + __ bind(loop_statement.continue_label()); + __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1))); + + EmitBackEdgeBookkeeping(stmt, &loop); + __ jmp(&loop); + + // Remove the pointers stored on the stack. + __ bind(loop_statement.break_label()); + __ add(esp, Immediate(5 * kPointerSize)); + + // Exit and decrement the loop depth. + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); + __ bind(&exit); + decrement_loop_depth(); +} + + +void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) { + Comment cmnt(masm_, "[ ForOfStatement"); + SetStatementPosition(stmt); + + Iteration loop_statement(this, stmt); + increment_loop_depth(); + + // var iterator = iterable[Symbol.iterator](); + VisitForEffect(stmt->assign_iterator()); + + // Loop entry. + __ bind(loop_statement.continue_label()); + + // result = iterator.next() + VisitForEffect(stmt->next_result()); + + // if (result.done) break; + Label result_not_done; + VisitForControl(stmt->result_done(), + loop_statement.break_label(), + &result_not_done, + &result_not_done); + __ bind(&result_not_done); + + // each = result.value + VisitForEffect(stmt->assign_each()); + + // Generate code for the body of the loop. + Visit(stmt->body()); + + // Check stack before looping. + PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS); + EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label()); + __ jmp(loop_statement.continue_label()); + + // Exit and decrement the loop depth. + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); + __ bind(loop_statement.break_label()); + decrement_loop_depth(); +} + + +void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, + bool pretenure) { + // Use the fast case closure allocation code that allocates in new + // space for nested functions that don't need literals cloning. If + // we're running with the --always-opt or the --prepare-always-opt + // flag, we need to use the runtime function so that the new function + // we are creating here gets a chance to have its code optimized and + // doesn't just get a copy of the existing unoptimized code. + if (!FLAG_always_opt && + !FLAG_prepare_always_opt && + !pretenure && + scope()->is_function_scope() && + info->num_literals() == 0) { + FastNewClosureStub stub(isolate(), + info->strict_mode(), + info->is_generator()); + __ mov(ebx, Immediate(info)); + __ CallStub(&stub); + } else { + __ push(esi); + __ push(Immediate(info)); + __ push(Immediate(pretenure + ? isolate()->factory()->true_value() + : isolate()->factory()->false_value())); + __ CallRuntime(Runtime::kNewClosure, 3); + } + context()->Plug(eax); +} + + +void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { + Comment cmnt(masm_, "[ VariableProxy"); + EmitVariableLoad(expr); +} + + +void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, + TypeofState typeof_state, + Label* slow) { + Register context = esi; + Register temp = edx; + + Scope* s = scope(); + while (s != NULL) { + if (s->num_heap_slots() > 0) { + if (s->calls_sloppy_eval()) { + // Check that extension is NULL. + __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), + Immediate(0)); + __ j(not_equal, slow); + } + // Load next context in chain. + __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX)); + // Walk the rest of the chain without clobbering esi. + context = temp; + } + // If no outer scope calls eval, we do not need to check more + // context extensions. If we have reached an eval scope, we check + // all extensions from this point. + if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break; + s = s->outer_scope(); + } + + if (s != NULL && s->is_eval_scope()) { + // Loop up the context chain. There is no frame effect so it is + // safe to use raw labels here. + Label next, fast; + if (!context.is(temp)) { + __ mov(temp, context); + } + __ bind(&next); + // Terminate at native context. + __ cmp(FieldOperand(temp, HeapObject::kMapOffset), + Immediate(isolate()->factory()->native_context_map())); + __ j(equal, &fast, Label::kNear); + // Check that extension is NULL. + __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0)); + __ j(not_equal, slow); + // Load next context in chain. + __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX)); + __ jmp(&next); + __ bind(&fast); + } + + // All extension objects were empty and it is safe to use a global + // load IC call. + __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ mov(LoadIC::NameRegister(), proxy->var()->name()); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } + + ContextualMode mode = (typeof_state == INSIDE_TYPEOF) + ? NOT_CONTEXTUAL + : CONTEXTUAL; + + CallLoadIC(mode); +} + + +MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var, + Label* slow) { + DCHECK(var->IsContextSlot()); + Register context = esi; + Register temp = ebx; + + for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) { + if (s->num_heap_slots() > 0) { + if (s->calls_sloppy_eval()) { + // Check that extension is NULL. + __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), + Immediate(0)); + __ j(not_equal, slow); + } + __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX)); + // Walk the rest of the chain without clobbering esi. + context = temp; + } + } + // Check that last extension is NULL. + __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); + __ j(not_equal, slow); + + // This function is used only for loads, not stores, so it's safe to + // return an esi-based operand (the write barrier cannot be allowed to + // destroy the esi register). + return ContextOperand(context, var->index()); +} + + +void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy, + TypeofState typeof_state, + Label* slow, + Label* done) { + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + Variable* var = proxy->var(); + if (var->mode() == DYNAMIC_GLOBAL) { + EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow); + __ jmp(done); + } else if (var->mode() == DYNAMIC_LOCAL) { + Variable* local = var->local_if_not_shadowed(); + __ mov(eax, ContextSlotOperandCheckExtensions(local, slow)); + if (local->mode() == LET || local->mode() == CONST || + local->mode() == CONST_LEGACY) { + __ cmp(eax, isolate()->factory()->the_hole_value()); + __ j(not_equal, done); + if (local->mode() == CONST_LEGACY) { + __ mov(eax, isolate()->factory()->undefined_value()); + } else { // LET || CONST + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + } + } + __ jmp(done); + } +} + + +void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { + // Record position before possible IC call. + SetSourcePosition(proxy->position()); + Variable* var = proxy->var(); + + // Three cases: global variables, lookup variables, and all other types of + // variables. + switch (var->location()) { + case Variable::UNALLOCATED: { + Comment cmnt(masm_, "[ Global variable"); + __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ mov(LoadIC::NameRegister(), var->name()); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } + CallLoadIC(CONTEXTUAL); + context()->Plug(eax); + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::CONTEXT: { + Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable" + : "[ Stack variable"); + if (var->binding_needs_init()) { + // var->scope() may be NULL when the proxy is located in eval code and + // refers to a potential outside binding. Currently those bindings are + // always looked up dynamically, i.e. in that case + // var->location() == LOOKUP. + // always holds. + DCHECK(var->scope() != NULL); + + // Check if the binding really needs an initialization check. The check + // can be skipped in the following situation: we have a LET or CONST + // binding in harmony mode, both the Variable and the VariableProxy have + // the same declaration scope (i.e. they are both in global code, in the + // same function or in the same eval code) and the VariableProxy is in + // the source physically located after the initializer of the variable. + // + // We cannot skip any initialization checks for CONST in non-harmony + // mode because const variables may be declared but never initialized: + // if (false) { const x; }; var y = x; + // + // The condition on the declaration scopes is a conservative check for + // nested functions that access a binding and are called before the + // binding is initialized: + // function() { f(); let x = 1; function f() { x = 2; } } + // + bool skip_init_check; + if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) { + skip_init_check = false; + } else { + // Check that we always have valid source position. + DCHECK(var->initializer_position() != RelocInfo::kNoPosition); + DCHECK(proxy->position() != RelocInfo::kNoPosition); + skip_init_check = var->mode() != CONST_LEGACY && + var->initializer_position() < proxy->position(); + } + + if (!skip_init_check) { + // Let and const need a read barrier. + Label done; + GetVar(eax, var); + __ cmp(eax, isolate()->factory()->the_hole_value()); + __ j(not_equal, &done, Label::kNear); + if (var->mode() == LET || var->mode() == CONST) { + // Throw a reference error when using an uninitialized let/const + // binding in harmony mode. + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + } else { + // Uninitalized const bindings outside of harmony mode are unholed. + DCHECK(var->mode() == CONST_LEGACY); + __ mov(eax, isolate()->factory()->undefined_value()); + } + __ bind(&done); + context()->Plug(eax); + break; + } + } + context()->Plug(var); + break; + } + + case Variable::LOOKUP: { + Comment cmnt(masm_, "[ Lookup variable"); + Label done, slow; + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); + __ bind(&slow); + __ push(esi); // Context. + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); + __ bind(&done); + context()->Plug(eax); + break; + } + } +} + + +void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { + Comment cmnt(masm_, "[ RegExpLiteral"); + Label materialized; + // Registers will be used as follows: + // edi = JS function. + // ecx = literals array. + // ebx = regexp literal. + // eax = regexp literal clone. + __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset)); + int literal_offset = + FixedArray::kHeaderSize + expr->literal_index() * kPointerSize; + __ mov(ebx, FieldOperand(ecx, literal_offset)); + __ cmp(ebx, isolate()->factory()->undefined_value()); + __ j(not_equal, &materialized, Label::kNear); + + // Create regexp literal using runtime function + // Result will be in eax. + __ push(ecx); + __ push(Immediate(Smi::FromInt(expr->literal_index()))); + __ push(Immediate(expr->pattern())); + __ push(Immediate(expr->flags())); + __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); + __ mov(ebx, eax); + + __ bind(&materialized); + int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; + Label allocated, runtime_allocate; + __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); + __ jmp(&allocated); + + __ bind(&runtime_allocate); + __ push(ebx); + __ push(Immediate(Smi::FromInt(size))); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); + __ pop(ebx); + + __ bind(&allocated); + // Copy the content into the newly allocated memory. + // (Unroll copy loop once for better throughput). + for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { + __ mov(edx, FieldOperand(ebx, i)); + __ mov(ecx, FieldOperand(ebx, i + kPointerSize)); + __ mov(FieldOperand(eax, i), edx); + __ mov(FieldOperand(eax, i + kPointerSize), ecx); + } + if ((size % (2 * kPointerSize)) != 0) { + __ mov(edx, FieldOperand(ebx, size - kPointerSize)); + __ mov(FieldOperand(eax, size - kPointerSize), edx); + } + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitAccessor(Expression* expression) { + if (expression == NULL) { + __ push(Immediate(isolate()->factory()->null_value())); + } else { + VisitForStackValue(expression); + } +} + + +void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { + Comment cmnt(masm_, "[ ObjectLiteral"); + + expr->BuildConstantProperties(isolate()); + Handle<FixedArray> constant_properties = expr->constant_properties(); + int flags = expr->fast_elements() + ? ObjectLiteral::kFastElements + : ObjectLiteral::kNoFlags; + flags |= expr->has_function() + ? ObjectLiteral::kHasFunction + : ObjectLiteral::kNoFlags; + int properties_count = constant_properties->length() / 2; + if (expr->may_store_doubles() || expr->depth() > 1 || + masm()->serializer_enabled() || + flags != ObjectLiteral::kFastElements || + properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { + __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ push(FieldOperand(edi, JSFunction::kLiteralsOffset)); + __ push(Immediate(Smi::FromInt(expr->literal_index()))); + __ push(Immediate(constant_properties)); + __ push(Immediate(Smi::FromInt(flags))); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); + } else { + __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset)); + __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index()))); + __ mov(ecx, Immediate(constant_properties)); + __ mov(edx, Immediate(Smi::FromInt(flags))); + FastCloneShallowObjectStub stub(isolate(), properties_count); + __ CallStub(&stub); + } + + // If result_saved is true the result is on top of the stack. If + // result_saved is false the result is in eax. + bool result_saved = false; + + // Mark all computed expressions that are bound to a key that + // is shadowed by a later occurrence of the same key. For the + // marked expressions, no store code is emitted. + expr->CalculateEmitStore(zone()); + + AccessorTable accessor_table(zone()); + for (int i = 0; i < expr->properties()->length(); i++) { + ObjectLiteral::Property* property = expr->properties()->at(i); + if (property->IsCompileTimeValue()) continue; + + Literal* key = property->key(); + Expression* value = property->value(); + if (!result_saved) { + __ push(eax); // Save result on the stack + result_saved = true; + } + switch (property->kind()) { + case ObjectLiteral::Property::CONSTANT: + UNREACHABLE(); + case ObjectLiteral::Property::MATERIALIZED_LITERAL: + DCHECK(!CompileTimeValue::IsCompileTimeValue(value)); + // Fall through. + case ObjectLiteral::Property::COMPUTED: + if (key->value()->IsInternalizedString()) { + if (property->emit_store()) { + VisitForAccumulatorValue(value); + DCHECK(StoreIC::ValueRegister().is(eax)); + __ mov(StoreIC::NameRegister(), Immediate(key->value())); + __ mov(StoreIC::ReceiverRegister(), Operand(esp, 0)); + CallStoreIC(key->LiteralFeedbackId()); + PrepareForBailoutForId(key->id(), NO_REGISTERS); + } else { + VisitForEffect(value); + } + break; + } + __ push(Operand(esp, 0)); // Duplicate receiver. + VisitForStackValue(key); + VisitForStackValue(value); + if (property->emit_store()) { + __ push(Immediate(Smi::FromInt(SLOPPY))); // Strict mode + __ CallRuntime(Runtime::kSetProperty, 4); + } else { + __ Drop(3); + } + break; + case ObjectLiteral::Property::PROTOTYPE: + __ push(Operand(esp, 0)); // Duplicate receiver. + VisitForStackValue(value); + if (property->emit_store()) { + __ CallRuntime(Runtime::kSetPrototype, 2); + } else { + __ Drop(2); + } + break; + case ObjectLiteral::Property::GETTER: + accessor_table.lookup(key)->second->getter = value; + break; + case ObjectLiteral::Property::SETTER: + accessor_table.lookup(key)->second->setter = value; + break; + } + } + + // Emit code to define accessors, using only a single call to the runtime for + // each pair of corresponding getters and setters. + for (AccessorTable::Iterator it = accessor_table.begin(); + it != accessor_table.end(); + ++it) { + __ push(Operand(esp, 0)); // Duplicate receiver. + VisitForStackValue(it->first); + EmitAccessor(it->second->getter); + EmitAccessor(it->second->setter); + __ push(Immediate(Smi::FromInt(NONE))); + __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5); + } + + if (expr->has_function()) { + DCHECK(result_saved); + __ push(Operand(esp, 0)); + __ CallRuntime(Runtime::kToFastProperties, 1); + } + + if (result_saved) { + context()->PlugTOS(); + } else { + context()->Plug(eax); + } +} + + +void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { + Comment cmnt(masm_, "[ ArrayLiteral"); + + expr->BuildConstantElements(isolate()); + int flags = expr->depth() == 1 + ? ArrayLiteral::kShallowElements + : ArrayLiteral::kNoFlags; + + ZoneList<Expression*>* subexprs = expr->values(); + int length = subexprs->length(); + Handle<FixedArray> constant_elements = expr->constant_elements(); + DCHECK_EQ(2, constant_elements->length()); + ElementsKind constant_elements_kind = + static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); + bool has_constant_fast_elements = + IsFastObjectElementsKind(constant_elements_kind); + Handle<FixedArrayBase> constant_elements_values( + FixedArrayBase::cast(constant_elements->get(1))); + + AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE; + if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) { + // If the only customer of allocation sites is transitioning, then + // we can turn it off if we don't have anywhere else to transition to. + allocation_site_mode = DONT_TRACK_ALLOCATION_SITE; + } + + if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) { + __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset)); + __ push(Immediate(Smi::FromInt(expr->literal_index()))); + __ push(Immediate(constant_elements)); + __ push(Immediate(Smi::FromInt(flags))); + __ CallRuntime(Runtime::kCreateArrayLiteral, 4); + } else { + __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset)); + __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index()))); + __ mov(ecx, Immediate(constant_elements)); + FastCloneShallowArrayStub stub(isolate(), allocation_site_mode); + __ CallStub(&stub); + } + + bool result_saved = false; // Is the result saved to the stack? + + // Emit code to evaluate all the non-constant subexpressions and to store + // them into the newly cloned array. + for (int i = 0; i < length; i++) { + Expression* subexpr = subexprs->at(i); + // If the subexpression is a literal or a simple materialized literal it + // is already set in the cloned array. + if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue; + + if (!result_saved) { + __ push(eax); // array literal. + __ push(Immediate(Smi::FromInt(expr->literal_index()))); + result_saved = true; + } + VisitForAccumulatorValue(subexpr); + + if (IsFastObjectElementsKind(constant_elements_kind)) { + // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they + // cannot transition and don't need to call the runtime stub. + int offset = FixedArray::kHeaderSize + (i * kPointerSize); + __ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal. + __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); + // Store the subexpression value in the array's elements. + __ mov(FieldOperand(ebx, offset), result_register()); + // Update the write barrier for the array store. + __ RecordWriteField(ebx, offset, result_register(), ecx, + EMIT_REMEMBERED_SET, + INLINE_SMI_CHECK); + } else { + // Store the subexpression value in the array's elements. + __ mov(ecx, Immediate(Smi::FromInt(i))); + StoreArrayLiteralElementStub stub(isolate()); + __ CallStub(&stub); + } + + PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); + } + + if (result_saved) { + __ add(esp, Immediate(kPointerSize)); // literal index + context()->PlugTOS(); + } else { + context()->Plug(eax); + } +} + + +void FullCodeGenerator::VisitAssignment(Assignment* expr) { + DCHECK(expr->target()->IsValidReferenceExpression()); + + Comment cmnt(masm_, "[ Assignment"); + + // Left-hand side can only be a property, a global or a (parameter or local) + // slot. + enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; + LhsKind assign_type = VARIABLE; + Property* property = expr->target()->AsProperty(); + if (property != NULL) { + assign_type = (property->key()->IsPropertyName()) + ? NAMED_PROPERTY + : KEYED_PROPERTY; + } + + // Evaluate LHS expression. + switch (assign_type) { + case VARIABLE: + // Nothing to do here. + break; + case NAMED_PROPERTY: + if (expr->is_compound()) { + // We need the receiver both on the stack and in the register. + VisitForStackValue(property->obj()); + __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0)); + } else { + VisitForStackValue(property->obj()); + } + break; + case KEYED_PROPERTY: { + if (expr->is_compound()) { + VisitForStackValue(property->obj()); + VisitForStackValue(property->key()); + __ mov(LoadIC::ReceiverRegister(), Operand(esp, kPointerSize)); + __ mov(LoadIC::NameRegister(), Operand(esp, 0)); + } else { + VisitForStackValue(property->obj()); + VisitForStackValue(property->key()); + } + break; + } + } + + // For compound assignments we need another deoptimization point after the + // variable/property load. + if (expr->is_compound()) { + AccumulatorValueContext result_context(this); + { AccumulatorValueContext left_operand_context(this); + switch (assign_type) { + case VARIABLE: + EmitVariableLoad(expr->target()->AsVariableProxy()); + PrepareForBailout(expr->target(), TOS_REG); + break; + case NAMED_PROPERTY: + EmitNamedPropertyLoad(property); + PrepareForBailoutForId(property->LoadId(), TOS_REG); + break; + case KEYED_PROPERTY: + EmitKeyedPropertyLoad(property); + PrepareForBailoutForId(property->LoadId(), TOS_REG); + break; + } + } + + Token::Value op = expr->binary_op(); + __ push(eax); // Left operand goes on the stack. + VisitForAccumulatorValue(expr->value()); + + OverwriteMode mode = expr->value()->ResultOverwriteAllowed() + ? OVERWRITE_RIGHT + : NO_OVERWRITE; + SetSourcePosition(expr->position() + 1); + if (ShouldInlineSmiCase(op)) { + EmitInlineSmiBinaryOp(expr->binary_operation(), + op, + mode, + expr->target(), + expr->value()); + } else { + EmitBinaryOp(expr->binary_operation(), op, mode); + } + + // Deoptimization point in case the binary operation may have side effects. + PrepareForBailout(expr->binary_operation(), TOS_REG); + } else { + VisitForAccumulatorValue(expr->value()); + } + + // Record source position before possible IC call. + SetSourcePosition(expr->position()); + + // Store the value. + switch (assign_type) { + case VARIABLE: + EmitVariableAssignment(expr->target()->AsVariableProxy()->var(), + expr->op()); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + context()->Plug(eax); + break; + case NAMED_PROPERTY: + EmitNamedPropertyAssignment(expr); + break; + case KEYED_PROPERTY: + EmitKeyedPropertyAssignment(expr); + break; + } +} + + +void FullCodeGenerator::VisitYield(Yield* expr) { + Comment cmnt(masm_, "[ Yield"); + // Evaluate yielded value first; the initial iterator definition depends on + // this. It stays on the stack while we update the iterator. + VisitForStackValue(expr->expression()); + + switch (expr->yield_kind()) { + case Yield::SUSPEND: + // Pop value from top-of-stack slot; box result into result register. + EmitCreateIteratorResult(false); + __ push(result_register()); + // Fall through. + case Yield::INITIAL: { + Label suspend, continuation, post_runtime, resume; + + __ jmp(&suspend); + + __ bind(&continuation); + __ jmp(&resume); + + __ bind(&suspend); + VisitForAccumulatorValue(expr->generator_object()); + DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); + __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset), + Immediate(Smi::FromInt(continuation.pos()))); + __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi); + __ mov(ecx, esi); + __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx); + __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset)); + __ cmp(esp, ebx); + __ j(equal, &post_runtime); + __ push(eax); // generator object + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ mov(context_register(), + Operand(ebp, StandardFrameConstants::kContextOffset)); + __ bind(&post_runtime); + __ pop(result_register()); + EmitReturnSequence(); + + __ bind(&resume); + context()->Plug(result_register()); + break; + } + + case Yield::FINAL: { + VisitForAccumulatorValue(expr->generator_object()); + __ mov(FieldOperand(result_register(), + JSGeneratorObject::kContinuationOffset), + Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed))); + // Pop value from top-of-stack slot, box result into result register. + EmitCreateIteratorResult(true); + EmitUnwindBeforeReturn(); + EmitReturnSequence(); + break; + } + + case Yield::DELEGATING: { + VisitForStackValue(expr->generator_object()); + + // Initial stack layout is as follows: + // [sp + 1 * kPointerSize] iter + // [sp + 0 * kPointerSize] g + + Label l_catch, l_try, l_suspend, l_continuation, l_resume; + Label l_next, l_call, l_loop; + Register load_receiver = LoadIC::ReceiverRegister(); + Register load_name = LoadIC::NameRegister(); + + // Initial send value is undefined. + __ mov(eax, isolate()->factory()->undefined_value()); + __ jmp(&l_next); + + // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; } + __ bind(&l_catch); + handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); + __ mov(load_name, isolate()->factory()->throw_string()); // "throw" + __ push(load_name); // "throw" + __ push(Operand(esp, 2 * kPointerSize)); // iter + __ push(eax); // exception + __ jmp(&l_call); + + // try { received = %yield result } + // Shuffle the received result above a try handler and yield it without + // re-boxing. + __ bind(&l_try); + __ pop(eax); // result + __ PushTryHandler(StackHandler::CATCH, expr->index()); + const int handler_size = StackHandlerConstants::kSize; + __ push(eax); // result + __ jmp(&l_suspend); + __ bind(&l_continuation); + __ jmp(&l_resume); + __ bind(&l_suspend); + const int generator_object_depth = kPointerSize + handler_size; + __ mov(eax, Operand(esp, generator_object_depth)); + __ push(eax); // g + DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); + __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset), + Immediate(Smi::FromInt(l_continuation.pos()))); + __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi); + __ mov(ecx, esi); + __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ mov(context_register(), + Operand(ebp, StandardFrameConstants::kContextOffset)); + __ pop(eax); // result + EmitReturnSequence(); + __ bind(&l_resume); // received in eax + __ PopTryHandler(); + + // receiver = iter; f = iter.next; arg = received; + __ bind(&l_next); + + __ mov(load_name, isolate()->factory()->next_string()); + __ push(load_name); // "next" + __ push(Operand(esp, 2 * kPointerSize)); // iter + __ push(eax); // received + + // result = receiver[f](arg); + __ bind(&l_call); + __ mov(load_receiver, Operand(esp, kPointerSize)); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(expr->KeyedLoadFeedbackSlot()))); + } + Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); + CallIC(ic, TypeFeedbackId::None()); + __ mov(edi, eax); + __ mov(Operand(esp, 2 * kPointerSize), edi); + CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD); + __ CallStub(&stub); + + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ Drop(1); // The function is still on the stack; drop it. + + // if (!result.done) goto l_try; + __ bind(&l_loop); + __ push(eax); // save result + __ Move(load_receiver, eax); // result + __ mov(load_name, + isolate()->factory()->done_string()); // "done" + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(expr->DoneFeedbackSlot()))); + } + CallLoadIC(NOT_CONTEXTUAL); // result.done in eax + Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate()); + CallIC(bool_ic); + __ test(eax, eax); + __ j(zero, &l_try); + + // result.value + __ pop(load_receiver); // result + __ mov(load_name, + isolate()->factory()->value_string()); // "value" + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(expr->ValueFeedbackSlot()))); + } + CallLoadIC(NOT_CONTEXTUAL); // result.value in eax + context()->DropAndPlug(2, eax); // drop iter and g + break; + } + } +} + + +void FullCodeGenerator::EmitGeneratorResume(Expression *generator, + Expression *value, + JSGeneratorObject::ResumeMode resume_mode) { + // The value stays in eax, and is ultimately read by the resumed generator, as + // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it + // is read to throw the value when the resumed generator is already closed. + // ebx will hold the generator object until the activation has been resumed. + VisitForStackValue(generator); + VisitForAccumulatorValue(value); + __ pop(ebx); + + // Check generator state. + Label wrong_state, closed_state, done; + STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0); + STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0); + __ cmp(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset), + Immediate(Smi::FromInt(0))); + __ j(equal, &closed_state); + __ j(less, &wrong_state); + + // Load suspended function and context. + __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset)); + __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset)); + + // Push receiver. + __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset)); + + // Push holes for arguments to generator function. + __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ mov(edx, + FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); + __ mov(ecx, isolate()->factory()->the_hole_value()); + Label push_argument_holes, push_frame; + __ bind(&push_argument_holes); + __ sub(edx, Immediate(Smi::FromInt(1))); + __ j(carry, &push_frame); + __ push(ecx); + __ jmp(&push_argument_holes); + + // Enter a new JavaScript frame, and initialize its slots as they were when + // the generator was suspended. + Label resume_frame; + __ bind(&push_frame); + __ call(&resume_frame); + __ jmp(&done); + __ bind(&resume_frame); + __ push(ebp); // Caller's frame pointer. + __ mov(ebp, esp); + __ push(esi); // Callee's context. + __ push(edi); // Callee's JS Function. + + // Load the operand stack size. + __ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset)); + __ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset)); + __ SmiUntag(edx); + + // If we are sending a value and there is no operand stack, we can jump back + // in directly. + if (resume_mode == JSGeneratorObject::NEXT) { + Label slow_resume; + __ cmp(edx, Immediate(0)); + __ j(not_zero, &slow_resume); + __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset)); + __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset)); + __ SmiUntag(ecx); + __ add(edx, ecx); + __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset), + Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))); + __ jmp(edx); + __ bind(&slow_resume); + } + + // Otherwise, we push holes for the operand stack and call the runtime to fix + // up the stack and the handlers. + Label push_operand_holes, call_resume; + __ bind(&push_operand_holes); + __ sub(edx, Immediate(1)); + __ j(carry, &call_resume); + __ push(ecx); + __ jmp(&push_operand_holes); + __ bind(&call_resume); + __ push(ebx); + __ push(result_register()); + __ Push(Smi::FromInt(resume_mode)); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); + // Not reached: the runtime call returns elsewhere. + __ Abort(kGeneratorFailedToResume); + + // Reach here when generator is closed. + __ bind(&closed_state); + if (resume_mode == JSGeneratorObject::NEXT) { + // Return completed iterator result when generator is closed. + __ push(Immediate(isolate()->factory()->undefined_value())); + // Pop value from top-of-stack slot; box result into result register. + EmitCreateIteratorResult(true); + } else { + // Throw the provided value. + __ push(eax); + __ CallRuntime(Runtime::kThrow, 1); + } + __ jmp(&done); + + // Throw error if we attempt to operate on a running generator. + __ bind(&wrong_state); + __ push(ebx); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); + + __ bind(&done); + context()->Plug(result_register()); +} + + +void FullCodeGenerator::EmitCreateIteratorResult(bool done) { + Label gc_required; + Label allocated; + + Handle<Map> map(isolate()->native_context()->iterator_result_map()); + + __ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT); + __ jmp(&allocated); + + __ bind(&gc_required); + __ Push(Smi::FromInt(map->instance_size())); + __ CallRuntime(Runtime::kAllocateInNewSpace, 1); + __ mov(context_register(), + Operand(ebp, StandardFrameConstants::kContextOffset)); + + __ bind(&allocated); + __ mov(ebx, map); + __ pop(ecx); + __ mov(edx, isolate()->factory()->ToBoolean(done)); + DCHECK_EQ(map->instance_size(), 5 * kPointerSize); + __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), + isolate()->factory()->empty_fixed_array()); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), + isolate()->factory()->empty_fixed_array()); + __ mov(FieldOperand(eax, JSGeneratorObject::kResultValuePropertyOffset), ecx); + __ mov(FieldOperand(eax, JSGeneratorObject::kResultDonePropertyOffset), edx); + + // Only the value field needs a write barrier, as the other values are in the + // root set. + __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset, + ecx, edx); +} + + +void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { + SetSourcePosition(prop->position()); + Literal* key = prop->key()->AsLiteral(); + DCHECK(!key->value()->IsSmi()); + __ mov(LoadIC::NameRegister(), Immediate(key->value())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(prop->PropertyFeedbackSlot()))); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); + } +} + + +void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { + SetSourcePosition(prop->position()); + Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(prop->PropertyFeedbackSlot()))); + CallIC(ic); + } else { + CallIC(ic, prop->PropertyFeedbackId()); + } +} + + +void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, + Token::Value op, + OverwriteMode mode, + Expression* left, + Expression* right) { + // Do combined smi check of the operands. Left operand is on the + // stack. Right operand is in eax. + Label smi_case, done, stub_call; + __ pop(edx); + __ mov(ecx, eax); + __ or_(eax, edx); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear); + + __ bind(&stub_call); + __ mov(eax, ecx); + BinaryOpICStub stub(isolate(), op, mode); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); + patch_site.EmitPatchInfo(); + __ jmp(&done, Label::kNear); + + // Smi case. + __ bind(&smi_case); + __ mov(eax, edx); // Copy left operand in case of a stub call. + + switch (op) { + case Token::SAR: + __ SmiUntag(ecx); + __ sar_cl(eax); // No checks of result necessary + __ and_(eax, Immediate(~kSmiTagMask)); + break; + case Token::SHL: { + Label result_ok; + __ SmiUntag(eax); + __ SmiUntag(ecx); + __ shl_cl(eax); + // Check that the *signed* result fits in a smi. + __ cmp(eax, 0xc0000000); + __ j(positive, &result_ok); + __ SmiTag(ecx); + __ jmp(&stub_call); + __ bind(&result_ok); + __ SmiTag(eax); + break; + } + case Token::SHR: { + Label result_ok; + __ SmiUntag(eax); + __ SmiUntag(ecx); + __ shr_cl(eax); + __ test(eax, Immediate(0xc0000000)); + __ j(zero, &result_ok); + __ SmiTag(ecx); + __ jmp(&stub_call); + __ bind(&result_ok); + __ SmiTag(eax); + break; + } + case Token::ADD: + __ add(eax, ecx); + __ j(overflow, &stub_call); + break; + case Token::SUB: + __ sub(eax, ecx); + __ j(overflow, &stub_call); + break; + case Token::MUL: { + __ SmiUntag(eax); + __ imul(eax, ecx); + __ j(overflow, &stub_call); + __ test(eax, eax); + __ j(not_zero, &done, Label::kNear); + __ mov(ebx, edx); + __ or_(ebx, ecx); + __ j(negative, &stub_call); + break; + } + case Token::BIT_OR: + __ or_(eax, ecx); + break; + case Token::BIT_AND: + __ and_(eax, ecx); + break; + case Token::BIT_XOR: + __ xor_(eax, ecx); + break; + default: + UNREACHABLE(); + } + + __ bind(&done); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, + Token::Value op, + OverwriteMode mode) { + __ pop(edx); + BinaryOpICStub stub(isolate(), op, mode); + JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); + patch_site.EmitPatchInfo(); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitAssignment(Expression* expr) { + DCHECK(expr->IsValidReferenceExpression()); + + // Left-hand side can only be a property, a global or a (parameter or local) + // slot. + enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; + LhsKind assign_type = VARIABLE; + Property* prop = expr->AsProperty(); + if (prop != NULL) { + assign_type = (prop->key()->IsPropertyName()) + ? NAMED_PROPERTY + : KEYED_PROPERTY; + } + + switch (assign_type) { + case VARIABLE: { + Variable* var = expr->AsVariableProxy()->var(); + EffectContext context(this); + EmitVariableAssignment(var, Token::ASSIGN); + break; + } + case NAMED_PROPERTY: { + __ push(eax); // Preserve value. + VisitForAccumulatorValue(prop->obj()); + __ Move(StoreIC::ReceiverRegister(), eax); + __ pop(StoreIC::ValueRegister()); // Restore value. + __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value()); + CallStoreIC(); + break; + } + case KEYED_PROPERTY: { + __ push(eax); // Preserve value. + VisitForStackValue(prop->obj()); + VisitForAccumulatorValue(prop->key()); + __ Move(KeyedStoreIC::NameRegister(), eax); + __ pop(KeyedStoreIC::ReceiverRegister()); // Receiver. + __ pop(KeyedStoreIC::ValueRegister()); // Restore value. + Handle<Code> ic = strict_mode() == SLOPPY + ? isolate()->builtins()->KeyedStoreIC_Initialize() + : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); + CallIC(ic); + break; + } + } + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot( + Variable* var, MemOperand location) { + __ mov(location, eax); + if (var->IsContextSlot()) { + __ mov(edx, eax); + int offset = Context::SlotOffset(var->index()); + __ RecordWriteContextSlot(ecx, offset, edx, ebx); + } +} + + +void FullCodeGenerator::EmitVariableAssignment(Variable* var, + Token::Value op) { + if (var->IsUnallocated()) { + // Global var, const, or let. + __ mov(StoreIC::NameRegister(), var->name()); + __ mov(StoreIC::ReceiverRegister(), GlobalObjectOperand()); + CallStoreIC(); + + } else if (op == Token::INIT_CONST_LEGACY) { + // Const initializers need a write barrier. + DCHECK(!var->IsParameter()); // No const parameters. + if (var->IsLookupSlot()) { + __ push(eax); + __ push(esi); + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3); + } else { + DCHECK(var->IsStackLocal() || var->IsContextSlot()); + Label skip; + MemOperand location = VarOperand(var, ecx); + __ mov(edx, location); + __ cmp(edx, isolate()->factory()->the_hole_value()); + __ j(not_equal, &skip, Label::kNear); + EmitStoreToStackLocalOrContextSlot(var, location); + __ bind(&skip); + } + + } else if (var->mode() == LET && op != Token::INIT_LET) { + // Non-initializing assignment to let variable needs a write barrier. + DCHECK(!var->IsLookupSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + Label assign; + MemOperand location = VarOperand(var, ecx); + __ mov(edx, location); + __ cmp(edx, isolate()->factory()->the_hole_value()); + __ j(not_equal, &assign, Label::kNear); + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + __ bind(&assign); + EmitStoreToStackLocalOrContextSlot(var, location); + + } else if (!var->is_const_mode() || op == Token::INIT_CONST) { + if (var->IsLookupSlot()) { + // Assignment to var. + __ push(eax); // Value. + __ push(esi); // Context. + __ push(Immediate(var->name())); + __ push(Immediate(Smi::FromInt(strict_mode()))); + __ CallRuntime(Runtime::kStoreLookupSlot, 4); + } else { + // Assignment to var or initializing assignment to let/const in harmony + // mode. + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + MemOperand location = VarOperand(var, ecx); + if (generate_debug_code_ && op == Token::INIT_LET) { + // Check for an uninitialized let binding. + __ mov(edx, location); + __ cmp(edx, isolate()->factory()->the_hole_value()); + __ Check(equal, kLetBindingReInitialization); + } + EmitStoreToStackLocalOrContextSlot(var, location); + } + } + // Non-initializing assignments to consts are ignored. +} + + +void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { + // Assignment to a property, using a named store IC. + // eax : value + // esp[0] : receiver + + Property* prop = expr->target()->AsProperty(); + DCHECK(prop != NULL); + DCHECK(prop->key()->IsLiteral()); + + // Record source code position before IC call. + SetSourcePosition(expr->position()); + __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value()); + __ pop(StoreIC::ReceiverRegister()); + CallStoreIC(expr->AssignmentFeedbackId()); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { + // Assignment to a property, using a keyed store IC. + // eax : value + // esp[0] : key + // esp[kPointerSize] : receiver + + __ pop(KeyedStoreIC::NameRegister()); // Key. + __ pop(KeyedStoreIC::ReceiverRegister()); + DCHECK(KeyedStoreIC::ValueRegister().is(eax)); + // Record source code position before IC call. + SetSourcePosition(expr->position()); + Handle<Code> ic = strict_mode() == SLOPPY + ? isolate()->builtins()->KeyedStoreIC_Initialize() + : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); + CallIC(ic, expr->AssignmentFeedbackId()); + + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + context()->Plug(eax); +} + + +void FullCodeGenerator::VisitProperty(Property* expr) { + Comment cmnt(masm_, "[ Property"); + Expression* key = expr->key(); + + if (key->IsPropertyName()) { + VisitForAccumulatorValue(expr->obj()); + __ Move(LoadIC::ReceiverRegister(), result_register()); + EmitNamedPropertyLoad(expr); + PrepareForBailoutForId(expr->LoadId(), TOS_REG); + context()->Plug(eax); + } else { + VisitForStackValue(expr->obj()); + VisitForAccumulatorValue(expr->key()); + __ pop(LoadIC::ReceiverRegister()); // Object. + __ Move(LoadIC::NameRegister(), result_register()); // Key. + EmitKeyedPropertyLoad(expr); + context()->Plug(eax); + } +} + + +void FullCodeGenerator::CallIC(Handle<Code> code, + TypeFeedbackId ast_id) { + ic_total_count_++; + __ call(code, RelocInfo::CODE_TARGET, ast_id); +} + + +// Code common for calls using the IC. +void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { + Expression* callee = expr->expression(); + + CallIC::CallType call_type = callee->IsVariableProxy() + ? CallIC::FUNCTION + : CallIC::METHOD; + // Get the target function. + if (call_type == CallIC::FUNCTION) { + { StackValueContext context(this); + EmitVariableLoad(callee->AsVariableProxy()); + PrepareForBailout(callee, NO_REGISTERS); + } + // Push undefined as receiver. This is patched in the method prologue if it + // is a sloppy mode method. + __ push(Immediate(isolate()->factory()->undefined_value())); + } else { + // Load the function from the receiver. + DCHECK(callee->IsProperty()); + __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0)); + EmitNamedPropertyLoad(callee->AsProperty()); + PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); + // Push the target function under the receiver. + __ push(Operand(esp, 0)); + __ mov(Operand(esp, kPointerSize), eax); + } + + EmitCall(expr, call_type); +} + + +// Code common for calls using the IC. +void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, + Expression* key) { + // Load the key. + VisitForAccumulatorValue(key); + + Expression* callee = expr->expression(); + + // Load the function from the receiver. + DCHECK(callee->IsProperty()); + __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0)); + __ mov(LoadIC::NameRegister(), eax); + EmitKeyedPropertyLoad(callee->AsProperty()); + PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); + + // Push the target function under the receiver. + __ push(Operand(esp, 0)); + __ mov(Operand(esp, kPointerSize), eax); + + EmitCall(expr, CallIC::METHOD); +} + + +void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { + // Load the arguments. + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + { PreservePositionScope scope(masm()->positions_recorder()); + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + } + + // Record source position of the IC call. + SetSourcePosition(expr->position()); + Handle<Code> ic = CallIC::initialize_stub( + isolate(), arg_count, call_type); + __ Move(edx, Immediate(Smi::FromInt(expr->CallFeedbackSlot()))); + __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize)); + // Don't assign a type feedback id to the IC, since type feedback is provided + // by the vector above. + CallIC(ic); + + RecordJSReturnSite(expr); + + // Restore context register. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + + context()->DropAndPlug(1, eax); +} + + +void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) { + // Push copy of the first argument or undefined if it doesn't exist. + if (arg_count > 0) { + __ push(Operand(esp, arg_count * kPointerSize)); + } else { + __ push(Immediate(isolate()->factory()->undefined_value())); + } + + // Push the receiver of the enclosing function. + __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize)); + // Push the language mode. + __ push(Immediate(Smi::FromInt(strict_mode()))); + + // Push the start position of the scope the calls resides in. + __ push(Immediate(Smi::FromInt(scope()->start_position()))); + + // Do the runtime call. + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); +} + + +void FullCodeGenerator::VisitCall(Call* expr) { +#ifdef DEBUG + // We want to verify that RecordJSReturnSite gets called on all paths + // through this function. Avoid early returns. + expr->return_is_recorded_ = false; +#endif + + Comment cmnt(masm_, "[ Call"); + Expression* callee = expr->expression(); + Call::CallType call_type = expr->GetCallType(isolate()); + + if (call_type == Call::POSSIBLY_EVAL_CALL) { + // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval + // to resolve the function we need to call and the receiver of the call. + // Then we call the resolved function using the given arguments. + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + { PreservePositionScope pos_scope(masm()->positions_recorder()); + VisitForStackValue(callee); + // Reserved receiver slot. + __ push(Immediate(isolate()->factory()->undefined_value())); + // Push the arguments. + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + + // Push a copy of the function (found below the arguments) and + // resolve eval. + __ push(Operand(esp, (arg_count + 1) * kPointerSize)); + EmitResolvePossiblyDirectEval(arg_count); + + // The runtime call returns a pair of values in eax (function) and + // edx (receiver). Touch up the stack with the right values. + __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx); + __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax); + } + // Record source position for debugger. + SetSourcePosition(expr->position()); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); + __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize)); + __ CallStub(&stub); + RecordJSReturnSite(expr); + // Restore context register. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + context()->DropAndPlug(1, eax); + + } else if (call_type == Call::GLOBAL_CALL) { + EmitCallWithLoadIC(expr); + + } else if (call_type == Call::LOOKUP_SLOT_CALL) { + // Call to a lookup slot (dynamically introduced variable). + VariableProxy* proxy = callee->AsVariableProxy(); + Label slow, done; + { PreservePositionScope scope(masm()->positions_recorder()); + // Generate code for loading from variables potentially shadowed by + // eval-introduced variables. + EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done); + } + __ bind(&slow); + // Call the runtime to find the function to call (returned in eax) and + // the object holding it (returned in edx). + __ push(context_register()); + __ push(Immediate(proxy->name())); + __ CallRuntime(Runtime::kLoadLookupSlot, 2); + __ push(eax); // Function. + __ push(edx); // Receiver. + + // If fast case code has been generated, emit code to push the function + // and receiver and have the slow path jump around this code. + if (done.is_linked()) { + Label call; + __ jmp(&call, Label::kNear); + __ bind(&done); + // Push function. + __ push(eax); + // The receiver is implicitly the global receiver. Indicate this by + // passing the hole to the call function stub. + __ push(Immediate(isolate()->factory()->undefined_value())); + __ bind(&call); + } + + // The receiver is either the global receiver or an object found by + // LoadContextSlot. + EmitCall(expr); + + } else if (call_type == Call::PROPERTY_CALL) { + Property* property = callee->AsProperty(); + { PreservePositionScope scope(masm()->positions_recorder()); + VisitForStackValue(property->obj()); + } + if (property->key()->IsPropertyName()) { + EmitCallWithLoadIC(expr); + } else { + EmitKeyedCallWithLoadIC(expr, property->key()); + } + + } else { + DCHECK(call_type == Call::OTHER_CALL); + // Call to an arbitrary expression not handled specially above. + { PreservePositionScope scope(masm()->positions_recorder()); + VisitForStackValue(callee); + } + __ push(Immediate(isolate()->factory()->undefined_value())); + // Emit function call. + EmitCall(expr); + } + +#ifdef DEBUG + // RecordJSReturnSite should have been called. + DCHECK(expr->return_is_recorded_); +#endif +} + + +void FullCodeGenerator::VisitCallNew(CallNew* expr) { + Comment cmnt(masm_, "[ CallNew"); + // According to ECMA-262, section 11.2.2, page 44, the function + // expression in new calls must be evaluated before the + // arguments. + + // Push constructor on the stack. If it's not a function it's used as + // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is + // ignored. + VisitForStackValue(expr->expression()); + + // Push the arguments ("left-to-right") on the stack. + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + + // Call the construct call builtin that handles allocation and + // constructor invocation. + SetSourcePosition(expr->position()); + + // Load function and argument count into edi and eax. + __ Move(eax, Immediate(arg_count)); + __ mov(edi, Operand(esp, arg_count * kPointerSize)); + + // Record call targets in unoptimized code. + if (FLAG_pretenuring_call_new) { + EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot()); + DCHECK(expr->AllocationSiteFeedbackSlot() == + expr->CallNewFeedbackSlot() + 1); + } + + __ LoadHeapObject(ebx, FeedbackVector()); + __ mov(edx, Immediate(Smi::FromInt(expr->CallNewFeedbackSlot()))); + + CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET); + __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); + PrepareForBailoutForId(expr->ReturnId(), TOS_REG); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + __ test(eax, Immediate(kSmiTagMask)); + Split(zero, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + __ test(eax, Immediate(kSmiTagMask | 0x80000000)); + Split(zero, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(eax, if_false); + __ cmp(eax, isolate()->factory()->null_value()); + __ j(equal, if_true); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + // Undetectable objects behave like undefined when tested with typeof. + __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset)); + __ test(ecx, Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, if_false); + __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); + __ j(below, if_false); + __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(below_equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(eax, if_false); + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(above_equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(eax, if_false); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset)); + __ test(ebx, Immediate(1 << Map::kIsUndetectable)); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(not_zero, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( + CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false, skip_lookup; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ AssertNotSmi(eax); + + // Check whether this map has already been checked to be safe for default + // valueOf. + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ test_b(FieldOperand(ebx, Map::kBitField2Offset), + 1 << Map::kStringWrapperSafeForDefaultValueOf); + __ j(not_zero, &skip_lookup); + + // Check for fast case object. Return false for slow case objects. + __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset)); + __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset)); + __ cmp(ecx, isolate()->factory()->hash_table_map()); + __ j(equal, if_false); + + // Look for valueOf string in the descriptor array, and indicate false if + // found. Since we omit an enumeration index check, if it is added via a + // transition that shares its descriptor array, this is a false positive. + Label entry, loop, done; + + // Skip loop if no descriptors are valid. + __ NumberOfOwnDescriptors(ecx, ebx); + __ cmp(ecx, 0); + __ j(equal, &done); + + __ LoadInstanceDescriptors(ebx, ebx); + // ebx: descriptor array. + // ecx: valid entries in the descriptor array. + // Calculate the end of the descriptor array. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kPointerSize == 4); + __ imul(ecx, ecx, DescriptorArray::kDescriptorSize); + __ lea(ecx, Operand(ebx, ecx, times_4, DescriptorArray::kFirstOffset)); + // Calculate location of the first key name. + __ add(ebx, Immediate(DescriptorArray::kFirstOffset)); + // Loop through all the keys in the descriptor array. If one of these is the + // internalized string "valueOf" the result is false. + __ jmp(&entry); + __ bind(&loop); + __ mov(edx, FieldOperand(ebx, 0)); + __ cmp(edx, isolate()->factory()->value_of_string()); + __ j(equal, if_false); + __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize)); + __ bind(&entry); + __ cmp(ebx, ecx); + __ j(not_equal, &loop); + + __ bind(&done); + + // Reload map as register ebx was used as temporary above. + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + + // Set the bit in the map to indicate that there is no local valueOf field. + __ or_(FieldOperand(ebx, Map::kBitField2Offset), + Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf)); + + __ bind(&skip_lookup); + + // If a valueOf property is not found on the object check that its + // prototype is the un-modified String prototype. If not result is false. + __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset)); + __ JumpIfSmi(ecx, if_false); + __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset)); + __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ mov(edx, + FieldOperand(edx, GlobalObject::kNativeContextOffset)); + __ cmp(ecx, + ContextOperand(edx, + Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(eax, if_false); + __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); + __ CheckMap(eax, map, if_false, DO_SMI_CHECK); + // Check if the exponent half is 0x80000000. Comparing against 1 and + // checking for overflow is the shortest possible encoding. + __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1)); + __ j(no_overflow, if_false); + __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0)); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + + +void FullCodeGenerator::EmitIsArray(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(eax, if_false); + __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(eax, if_false); + __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + + +void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + // Get the frame pointer for the calling frame. + __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ cmp(Operand(eax, StandardFrameConstants::kContextOffset), + Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(not_equal, &check_frame_marker); + __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset), + Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + + // Load the two objects into registers and perform the comparison. + VisitForStackValue(args->at(0)); + VisitForAccumulatorValue(args->at(1)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ pop(ebx); + __ cmp(eax, ebx); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitArguments(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + // ArgumentsAccessStub expects the key in edx and the formal + // parameter count in eax. + VisitForAccumulatorValue(args->at(0)); + __ mov(edx, eax); + __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters()))); + ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT); + __ CallStub(&stub); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + + Label exit; + // Get the number of formal parameters. + __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters()))); + + // Check if the calling frame is an arguments adaptor frame. + __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset), + Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(not_equal, &exit); + + // Arguments adaptor case: Read the arguments length from the + // adaptor frame. + __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + + __ bind(&exit); + __ AssertSmi(eax); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + Label done, null, function, non_function_constructor; + + VisitForAccumulatorValue(args->at(0)); + + // If the object is a smi, we return null. + __ JumpIfSmi(eax, &null); + + // Check that the object is a JS object but take special care of JS + // functions to make sure they have 'Function' as their class. + // Assume that there are only two callable types, and one of them is at + // either end of the type range for JS object types. Saves extra comparisons. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax); + // Map is now in eax. + __ j(below, &null); + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + __ j(equal, &function); + + __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + __ j(equal, &function); + // Assume that there is no larger type. + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); + + // Check if the constructor in the map is a JS function. + __ mov(eax, FieldOperand(eax, Map::kConstructorOffset)); + __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx); + __ j(not_equal, &non_function_constructor); + + // eax now contains the constructor function. Grab the + // instance class name from there. + __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset)); + __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset)); + __ jmp(&done); + + // Functions have class 'Function'. + __ bind(&function); + __ mov(eax, isolate()->factory()->function_class_string()); + __ jmp(&done); + + // Objects with a non-function constructor have class 'Object'. + __ bind(&non_function_constructor); + __ mov(eax, isolate()->factory()->Object_string()); + __ jmp(&done); + + // Non-JS objects have class null. + __ bind(&null); + __ mov(eax, isolate()->factory()->null_value()); + + // All done. + __ bind(&done); + + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitSubString(CallRuntime* expr) { + // Load the arguments on the stack and call the stub. + SubStringStub stub(isolate()); + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 3); + VisitForStackValue(args->at(0)); + VisitForStackValue(args->at(1)); + VisitForStackValue(args->at(2)); + __ CallStub(&stub); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { + // Load the arguments on the stack and call the stub. + RegExpExecStub stub(isolate()); + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 4); + VisitForStackValue(args->at(0)); + VisitForStackValue(args->at(1)); + VisitForStackValue(args->at(2)); + VisitForStackValue(args->at(3)); + __ CallStub(&stub); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); // Load the object. + + Label done; + // If the object is a smi return the object. + __ JumpIfSmi(eax, &done, Label::kNear); + // If the object is not a value type, return the object. + __ CmpObjectType(eax, JS_VALUE_TYPE, ebx); + __ j(not_equal, &done, Label::kNear); + __ mov(eax, FieldOperand(eax, JSValue::kValueOffset)); + + __ bind(&done); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitDateField(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); + Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); + + VisitForAccumulatorValue(args->at(0)); // Load the object. + + Label runtime, done, not_date_object; + Register object = eax; + Register result = eax; + Register scratch = ecx; + + __ JumpIfSmi(object, ¬_date_object); + __ CmpObjectType(object, JS_DATE_TYPE, scratch); + __ j(not_equal, ¬_date_object); + + if (index->value() == 0) { + __ mov(result, FieldOperand(object, JSDate::kValueOffset)); + __ jmp(&done); + } else { + if (index->value() < JSDate::kFirstUncachedField) { + ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); + __ mov(scratch, Operand::StaticVariable(stamp)); + __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset)); + __ j(not_equal, &runtime, Label::kNear); + __ mov(result, FieldOperand(object, JSDate::kValueOffset + + kPointerSize * index->value())); + __ jmp(&done); + } + __ bind(&runtime); + __ PrepareCallCFunction(2, scratch); + __ mov(Operand(esp, 0), object); + __ mov(Operand(esp, 1 * kPointerSize), Immediate(index)); + __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); + __ jmp(&done); + } + + __ bind(¬_date_object); + __ CallRuntime(Runtime::kThrowNotDateError, 0); + __ bind(&done); + context()->Plug(result); +} + + +void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(3, args->length()); + + Register string = eax; + Register index = ebx; + Register value = ecx; + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + VisitForAccumulatorValue(args->at(0)); // string + + __ pop(value); + __ pop(index); + + if (FLAG_debug_code) { + __ test(value, Immediate(kSmiTagMask)); + __ Check(zero, kNonSmiValue); + __ test(index, Immediate(kSmiTagMask)); + __ Check(zero, kNonSmiValue); + } + + __ SmiUntag(value); + __ SmiUntag(index); + + if (FLAG_debug_code) { + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type); + } + + __ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize), + value); + context()->Plug(string); +} + + +void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(3, args->length()); + + Register string = eax; + Register index = ebx; + Register value = ecx; + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + VisitForAccumulatorValue(args->at(0)); // string + __ pop(value); + __ pop(index); + + if (FLAG_debug_code) { + __ test(value, Immediate(kSmiTagMask)); + __ Check(zero, kNonSmiValue); + __ test(index, Immediate(kSmiTagMask)); + __ Check(zero, kNonSmiValue); + __ SmiUntag(index); + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type); + __ SmiTag(index); + } + + __ SmiUntag(value); + // No need to untag a smi for two-byte addressing. + __ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize), + value); + context()->Plug(string); +} + + +void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { + // Load the arguments on the stack and call the runtime function. + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + VisitForStackValue(args->at(0)); + VisitForStackValue(args->at(1)); + + __ CallRuntime(Runtime::kMathPowSlow, 2); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + + VisitForStackValue(args->at(0)); // Load the object. + VisitForAccumulatorValue(args->at(1)); // Load the value. + __ pop(ebx); // eax = value. ebx = object. + + Label done; + // If the object is a smi, return the value. + __ JumpIfSmi(ebx, &done, Label::kNear); + + // If the object is not a value type, return the value. + __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx); + __ j(not_equal, &done, Label::kNear); + + // Store the value. + __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax); + + // Update the write barrier. Save the value as it will be + // overwritten by the write barrier code and is needed afterward. + __ mov(edx, eax); + __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx); + + __ bind(&done); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(args->length(), 1); + + // Load the argument into eax and call the stub. + VisitForAccumulatorValue(args->at(0)); + + NumberToStringStub stub(isolate()); + __ CallStub(&stub); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label done; + StringCharFromCodeGenerator generator(eax, ebx); + generator.GenerateFast(masm_); + __ jmp(&done); + + NopRuntimeCallHelper call_helper; + generator.GenerateSlow(masm_, call_helper); + + __ bind(&done); + context()->Plug(ebx); +} + + +void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + + VisitForStackValue(args->at(0)); + VisitForAccumulatorValue(args->at(1)); + + Register object = ebx; + Register index = eax; + Register result = edx; + + __ pop(object); + + Label need_conversion; + Label index_out_of_range; + Label done; + StringCharCodeAtGenerator generator(object, + index, + result, + &need_conversion, + &need_conversion, + &index_out_of_range, + STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm_); + __ jmp(&done); + + __ bind(&index_out_of_range); + // When the index is out of range, the spec requires us to return + // NaN. + __ Move(result, Immediate(isolate()->factory()->nan_value())); + __ jmp(&done); + + __ bind(&need_conversion); + // Move the undefined value into the result register, which will + // trigger conversion. + __ Move(result, Immediate(isolate()->factory()->undefined_value())); + __ jmp(&done); + + NopRuntimeCallHelper call_helper; + generator.GenerateSlow(masm_, call_helper); + + __ bind(&done); + context()->Plug(result); +} + + +void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + + VisitForStackValue(args->at(0)); + VisitForAccumulatorValue(args->at(1)); + + Register object = ebx; + Register index = eax; + Register scratch = edx; + Register result = eax; + + __ pop(object); + + Label need_conversion; + Label index_out_of_range; + Label done; + StringCharAtGenerator generator(object, + index, + scratch, + result, + &need_conversion, + &need_conversion, + &index_out_of_range, + STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm_); + __ jmp(&done); + + __ bind(&index_out_of_range); + // When the index is out of range, the spec requires us to return + // the empty string. + __ Move(result, Immediate(isolate()->factory()->empty_string())); + __ jmp(&done); + + __ bind(&need_conversion); + // Move smi zero into the result register, which will trigger + // conversion. + __ Move(result, Immediate(Smi::FromInt(0))); + __ jmp(&done); + + NopRuntimeCallHelper call_helper; + generator.GenerateSlow(masm_, call_helper); + + __ bind(&done); + context()->Plug(result); +} + + +void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(2, args->length()); + VisitForStackValue(args->at(0)); + VisitForAccumulatorValue(args->at(1)); + + __ pop(edx); + StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED); + __ CallStub(&stub); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(2, args->length()); + + VisitForStackValue(args->at(0)); + VisitForStackValue(args->at(1)); + + StringCompareStub stub(isolate()); + __ CallStub(&stub); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() >= 2); + + int arg_count = args->length() - 2; // 2 ~ receiver and function. + for (int i = 0; i < arg_count + 1; ++i) { + VisitForStackValue(args->at(i)); + } + VisitForAccumulatorValue(args->last()); // Function. + + Label runtime, done; + // Check for non-function argument (including proxy). + __ JumpIfSmi(eax, &runtime); + __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx); + __ j(not_equal, &runtime); + + // InvokeFunction requires the function in edi. Move it in there. + __ mov(edi, result_register()); + ParameterCount count(arg_count); + __ InvokeFunction(edi, count, CALL_FUNCTION, NullCallWrapper()); + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ jmp(&done); + + __ bind(&runtime); + __ push(eax); + __ CallRuntime(Runtime::kCall, args->length()); + __ bind(&done); + + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { + // Load the arguments on the stack and call the stub. + RegExpConstructResultStub stub(isolate()); + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 3); + VisitForStackValue(args->at(0)); + VisitForStackValue(args->at(1)); + VisitForAccumulatorValue(args->at(2)); + __ pop(ebx); + __ pop(ecx); + __ CallStub(&stub); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK_EQ(2, args->length()); + + DCHECK_NE(NULL, args->at(0)->AsLiteral()); + int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); + + Handle<FixedArray> jsfunction_result_caches( + isolate()->native_context()->jsfunction_result_caches()); + if (jsfunction_result_caches->length() <= cache_id) { + __ Abort(kAttemptToUseUndefinedCache); + __ mov(eax, isolate()->factory()->undefined_value()); + context()->Plug(eax); + return; + } + + VisitForAccumulatorValue(args->at(1)); + + Register key = eax; + Register cache = ebx; + Register tmp = ecx; + __ mov(cache, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX)); + __ mov(cache, + FieldOperand(cache, GlobalObject::kNativeContextOffset)); + __ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX)); + __ mov(cache, + FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id))); + + Label done, not_found; + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset)); + // tmp now holds finger offset as a smi. + __ cmp(key, FixedArrayElementOperand(cache, tmp)); + __ j(not_equal, ¬_found); + + __ mov(eax, FixedArrayElementOperand(cache, tmp, 1)); + __ jmp(&done); + + __ bind(¬_found); + // Call runtime to perform the lookup. + __ push(cache); + __ push(key); + __ CallRuntime(Runtime::kGetFromCache, 2); + + __ bind(&done); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + __ AssertString(eax); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ test(FieldOperand(eax, String::kHashFieldOffset), + Immediate(String::kContainsCachedArrayIndexMask)); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(zero, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 1); + VisitForAccumulatorValue(args->at(0)); + + __ AssertString(eax); + + __ mov(eax, FieldOperand(eax, String::kHashFieldOffset)); + __ IndexFromHash(eax, eax); + + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { + Label bailout, done, one_char_separator, long_separator, + non_trivial_array, not_size_one_array, loop, + loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry; + + ZoneList<Expression*>* args = expr->arguments(); + DCHECK(args->length() == 2); + // We will leave the separator on the stack until the end of the function. + VisitForStackValue(args->at(1)); + // Load this to eax (= array) + VisitForAccumulatorValue(args->at(0)); + // All aliases of the same register have disjoint lifetimes. + Register array = eax; + Register elements = no_reg; // Will be eax. + + Register index = edx; + + Register string_length = ecx; + + Register string = esi; + + Register scratch = ebx; + + Register array_length = edi; + Register result_pos = no_reg; // Will be edi. + + // Separator operand is already pushed. + Operand separator_operand = Operand(esp, 2 * kPointerSize); + Operand result_operand = Operand(esp, 1 * kPointerSize); + Operand array_length_operand = Operand(esp, 0); + __ sub(esp, Immediate(2 * kPointerSize)); + __ cld(); + // Check that the array is a JSArray + __ JumpIfSmi(array, &bailout); + __ CmpObjectType(array, JS_ARRAY_TYPE, scratch); + __ j(not_equal, &bailout); + + // Check that the array has fast elements. + __ CheckFastElements(scratch, &bailout); + + // If the array has length zero, return the empty string. + __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset)); + __ SmiUntag(array_length); + __ j(not_zero, &non_trivial_array); + __ mov(result_operand, isolate()->factory()->empty_string()); + __ jmp(&done); + + // Save the array length. + __ bind(&non_trivial_array); + __ mov(array_length_operand, array_length); + + // Save the FixedArray containing array's elements. + // End of array's live range. + elements = array; + __ mov(elements, FieldOperand(array, JSArray::kElementsOffset)); + array = no_reg; + + + // Check that all array elements are sequential ASCII strings, and + // accumulate the sum of their lengths, as a smi-encoded value. + __ Move(index, Immediate(0)); + __ Move(string_length, Immediate(0)); + // Loop condition: while (index < length). + // Live loop registers: index, array_length, string, + // scratch, string_length, elements. + if (generate_debug_code_) { + __ cmp(index, array_length); + __ Assert(less, kNoEmptyArraysHereInEmitFastAsciiArrayJoin); + } + __ bind(&loop); + __ mov(string, FieldOperand(elements, + index, + times_pointer_size, + FixedArray::kHeaderSize)); + __ JumpIfSmi(string, &bailout); + __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset)); + __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); + __ and_(scratch, Immediate( + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask)); + __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag); + __ j(not_equal, &bailout); + __ add(string_length, + FieldOperand(string, SeqOneByteString::kLengthOffset)); + __ j(overflow, &bailout); + __ add(index, Immediate(1)); + __ cmp(index, array_length); + __ j(less, &loop); + + // If array_length is 1, return elements[0], a string. + __ cmp(array_length, 1); + __ j(not_equal, ¬_size_one_array); + __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize)); + __ mov(result_operand, scratch); + __ jmp(&done); + + __ bind(¬_size_one_array); + + // End of array_length live range. + result_pos = array_length; + array_length = no_reg; + + // Live registers: + // string_length: Sum of string lengths, as a smi. + // elements: FixedArray of strings. + + // Check that the separator is a flat ASCII string. + __ mov(string, separator_operand); + __ JumpIfSmi(string, &bailout); + __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset)); + __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); + __ and_(scratch, Immediate( + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask)); + __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag); + __ j(not_equal, &bailout); + + // Add (separator length times array_length) - separator length + // to string_length. + __ mov(scratch, separator_operand); + __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset)); + __ sub(string_length, scratch); // May be negative, temporarily. + __ imul(scratch, array_length_operand); + __ j(overflow, &bailout); + __ add(string_length, scratch); + __ j(overflow, &bailout); + + __ shr(string_length, 1); + // Live registers and stack values: + // string_length + // elements + __ AllocateAsciiString(result_pos, string_length, scratch, + index, string, &bailout); + __ mov(result_operand, result_pos); + __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize)); + + + __ mov(string, separator_operand); + __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset), + Immediate(Smi::FromInt(1))); + __ j(equal, &one_char_separator); + __ j(greater, &long_separator); + + + // Empty separator case + __ mov(index, Immediate(0)); + __ jmp(&loop_1_condition); + // Loop condition: while (index < length). + __ bind(&loop_1); + // Each iteration of the loop concatenates one string to the result. + // Live values in registers: + // index: which element of the elements array we are adding to the result. + // result_pos: the position to which we are currently copying characters. + // elements: the FixedArray of strings we are joining. + + // Get string = array[index]. + __ mov(string, FieldOperand(elements, index, + times_pointer_size, + FixedArray::kHeaderSize)); + __ mov(string_length, + FieldOperand(string, String::kLengthOffset)); + __ shr(string_length, 1); + __ lea(string, + FieldOperand(string, SeqOneByteString::kHeaderSize)); + __ CopyBytes(string, result_pos, string_length, scratch); + __ add(index, Immediate(1)); + __ bind(&loop_1_condition); + __ cmp(index, array_length_operand); + __ j(less, &loop_1); // End while (index < length). + __ jmp(&done); + + + + // One-character separator case + __ bind(&one_char_separator); + // Replace separator with its ASCII character value. + __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize)); + __ mov_b(separator_operand, scratch); + + __ Move(index, Immediate(0)); + // Jump into the loop after the code that copies the separator, so the first + // element is not preceded by a separator + __ jmp(&loop_2_entry); + // Loop condition: while (index < length). + __ bind(&loop_2); + // Each iteration of the loop concatenates one string to the result. + // Live values in registers: + // index: which element of the elements array we are adding to the result. + // result_pos: the position to which we are currently copying characters. + + // Copy the separator character to the result. + __ mov_b(scratch, separator_operand); + __ mov_b(Operand(result_pos, 0), scratch); + __ inc(result_pos); + + __ bind(&loop_2_entry); + // Get string = array[index]. + __ mov(string, FieldOperand(elements, index, + times_pointer_size, + FixedArray::kHeaderSize)); + __ mov(string_length, + FieldOperand(string, String::kLengthOffset)); + __ shr(string_length, 1); + __ lea(string, + FieldOperand(string, SeqOneByteString::kHeaderSize)); + __ CopyBytes(string, result_pos, string_length, scratch); + __ add(index, Immediate(1)); + + __ cmp(index, array_length_operand); + __ j(less, &loop_2); // End while (index < length). + __ jmp(&done); + + + // Long separator case (separator is more than one character). + __ bind(&long_separator); + + __ Move(index, Immediate(0)); + // Jump into the loop after the code that copies the separator, so the first + // element is not preceded by a separator + __ jmp(&loop_3_entry); + // Loop condition: while (index < length). + __ bind(&loop_3); + // Each iteration of the loop concatenates one string to the result. + // Live values in registers: + // index: which element of the elements array we are adding to the result. + // result_pos: the position to which we are currently copying characters. + + // Copy the separator to the result. + __ mov(string, separator_operand); + __ mov(string_length, + FieldOperand(string, String::kLengthOffset)); + __ shr(string_length, 1); + __ lea(string, + FieldOperand(string, SeqOneByteString::kHeaderSize)); + __ CopyBytes(string, result_pos, string_length, scratch); + + __ bind(&loop_3_entry); + // Get string = array[index]. + __ mov(string, FieldOperand(elements, index, + times_pointer_size, + FixedArray::kHeaderSize)); + __ mov(string_length, + FieldOperand(string, String::kLengthOffset)); + __ shr(string_length, 1); + __ lea(string, + FieldOperand(string, SeqOneByteString::kHeaderSize)); + __ CopyBytes(string, result_pos, string_length, scratch); + __ add(index, Immediate(1)); + + __ cmp(index, array_length_operand); + __ j(less, &loop_3); // End while (index < length). + __ jmp(&done); + + + __ bind(&bailout); + __ mov(result_operand, isolate()->factory()->undefined_value()); + __ bind(&done); + __ mov(eax, result_operand); + // Drop temp values from the stack, and restore context register. + __ add(esp, Immediate(3 * kPointerSize)); + + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) { + DCHECK(expr->arguments()->length() == 0); + ExternalReference debug_is_active = + ExternalReference::debug_is_active_address(isolate()); + __ movzx_b(eax, Operand::StaticVariable(debug_is_active)); + __ SmiTag(eax); + context()->Plug(eax); +} + + +void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { + if (expr->function() != NULL && + expr->function()->intrinsic_type == Runtime::INLINE) { + Comment cmnt(masm_, "[ InlineRuntimeCall"); + EmitInlineRuntimeCall(expr); + return; + } + + Comment cmnt(masm_, "[ CallRuntime"); + ZoneList<Expression*>* args = expr->arguments(); + + if (expr->is_jsruntime()) { + // Push the builtins object as receiver. + __ mov(eax, GlobalObjectOperand()); + __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset)); + + // Load the function from the receiver. + __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0)); + __ mov(LoadIC::NameRegister(), Immediate(expr->name())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(expr->CallRuntimeFeedbackSlot()))); + CallLoadIC(NOT_CONTEXTUAL); + } else { + CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); + } + + // Push the target function under the receiver. + __ push(Operand(esp, 0)); + __ mov(Operand(esp, kPointerSize), eax); + + // Code common for calls using the IC. + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + + // Record source position of the IC call. + SetSourcePosition(expr->position()); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); + __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize)); + __ CallStub(&stub); + // Restore context register. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + context()->DropAndPlug(1, eax); + + } else { + // Push the arguments ("left-to-right"). + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } + + // Call the C runtime function. + __ CallRuntime(expr->function(), arg_count); + + context()->Plug(eax); + } +} + + +void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { + switch (expr->op()) { + case Token::DELETE: { + Comment cmnt(masm_, "[ UnaryOperation (DELETE)"); + Property* property = expr->expression()->AsProperty(); + VariableProxy* proxy = expr->expression()->AsVariableProxy(); + + if (property != NULL) { + VisitForStackValue(property->obj()); + VisitForStackValue(property->key()); + __ push(Immediate(Smi::FromInt(strict_mode()))); + __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); + context()->Plug(eax); + } else if (proxy != NULL) { + Variable* var = proxy->var(); + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is allowed. + DCHECK(strict_mode() == SLOPPY || var->is_this()); + if (var->IsUnallocated()) { + __ push(GlobalObjectOperand()); + __ push(Immediate(var->name())); + __ push(Immediate(Smi::FromInt(SLOPPY))); + __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); + context()->Plug(eax); + } else if (var->IsStackAllocated() || var->IsContextSlot()) { + // Result of deleting non-global variables is false. 'this' is + // not really a variable, though we implement it as one. The + // subexpression does not have side effects. + context()->Plug(var->is_this()); + } else { + // Non-global variable. Call the runtime to try to delete from the + // context where the variable was introduced. + __ push(context_register()); + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kDeleteLookupSlot, 2); + context()->Plug(eax); + } + } else { + // Result of deleting non-property, non-variable reference is true. + // The subexpression may have side effects. + VisitForEffect(expr->expression()); + context()->Plug(true); + } + break; + } + + case Token::VOID: { + Comment cmnt(masm_, "[ UnaryOperation (VOID)"); + VisitForEffect(expr->expression()); + context()->Plug(isolate()->factory()->undefined_value()); + break; + } + + case Token::NOT: { + Comment cmnt(masm_, "[ UnaryOperation (NOT)"); + if (context()->IsEffect()) { + // Unary NOT has no side effects so it's only necessary to visit the + // subexpression. Match the optimizing compiler by not branching. + VisitForEffect(expr->expression()); + } else if (context()->IsTest()) { + const TestContext* test = TestContext::cast(context()); + // The labels are swapped for the recursive call. + VisitForControl(expr->expression(), + test->false_label(), + test->true_label(), + test->fall_through()); + context()->Plug(test->true_label(), test->false_label()); + } else { + // We handle value contexts explicitly rather than simply visiting + // for control and plugging the control flow into the context, + // because we need to prepare a pair of extra administrative AST ids + // for the optimizing compiler. + DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue()); + Label materialize_true, materialize_false, done; + VisitForControl(expr->expression(), + &materialize_false, + &materialize_true, + &materialize_true); + __ bind(&materialize_true); + PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS); + if (context()->IsAccumulatorValue()) { + __ mov(eax, isolate()->factory()->true_value()); + } else { + __ Push(isolate()->factory()->true_value()); + } + __ jmp(&done, Label::kNear); + __ bind(&materialize_false); + PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS); + if (context()->IsAccumulatorValue()) { + __ mov(eax, isolate()->factory()->false_value()); + } else { + __ Push(isolate()->factory()->false_value()); + } + __ bind(&done); + } + break; + } + + case Token::TYPEOF: { + Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)"); + { StackValueContext context(this); + VisitForTypeofValue(expr->expression()); + } + __ CallRuntime(Runtime::kTypeof, 1); + context()->Plug(eax); + break; + } + + default: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { + DCHECK(expr->expression()->IsValidReferenceExpression()); + + Comment cmnt(masm_, "[ CountOperation"); + SetSourcePosition(expr->position()); + + // Expression can only be a property, a global or a (parameter or local) + // slot. + enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; + LhsKind assign_type = VARIABLE; + Property* prop = expr->expression()->AsProperty(); + // In case of a property we use the uninitialized expression context + // of the key to detect a named property. + if (prop != NULL) { + assign_type = + (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY; + } + + // Evaluate expression and get value. + if (assign_type == VARIABLE) { + DCHECK(expr->expression()->AsVariableProxy()->var() != NULL); + AccumulatorValueContext context(this); + EmitVariableLoad(expr->expression()->AsVariableProxy()); + } else { + // Reserve space for result of postfix operation. + if (expr->is_postfix() && !context()->IsEffect()) { + __ push(Immediate(Smi::FromInt(0))); + } + if (assign_type == NAMED_PROPERTY) { + // Put the object both on the stack and in the register. + VisitForStackValue(prop->obj()); + __ mov(LoadIC::ReceiverRegister(), Operand(esp, 0)); + EmitNamedPropertyLoad(prop); + } else { + VisitForStackValue(prop->obj()); + VisitForStackValue(prop->key()); + __ mov(LoadIC::ReceiverRegister(), + Operand(esp, kPointerSize)); // Object. + __ mov(LoadIC::NameRegister(), Operand(esp, 0)); // Key. + EmitKeyedPropertyLoad(prop); + } + } + + // We need a second deoptimization point after loading the value + // in case evaluating the property load my have a side effect. + if (assign_type == VARIABLE) { + PrepareForBailout(expr->expression(), TOS_REG); + } else { + PrepareForBailoutForId(prop->LoadId(), TOS_REG); + } + + // Inline smi case if we are in a loop. + Label done, stub_call; + JumpPatchSite patch_site(masm_); + if (ShouldInlineSmiCase(expr->op())) { + Label slow; + patch_site.EmitJumpIfNotSmi(eax, &slow, Label::kNear); + + // Save result for postfix expressions. + if (expr->is_postfix()) { + if (!context()->IsEffect()) { + // Save the result on the stack. If we have a named or keyed property + // we store the result under the receiver that is currently on top + // of the stack. + switch (assign_type) { + case VARIABLE: + __ push(eax); + break; + case NAMED_PROPERTY: + __ mov(Operand(esp, kPointerSize), eax); + break; + case KEYED_PROPERTY: + __ mov(Operand(esp, 2 * kPointerSize), eax); + break; + } + } + } + + if (expr->op() == Token::INC) { + __ add(eax, Immediate(Smi::FromInt(1))); + } else { + __ sub(eax, Immediate(Smi::FromInt(1))); + } + __ j(no_overflow, &done, Label::kNear); + // Call stub. Undo operation first. + if (expr->op() == Token::INC) { + __ sub(eax, Immediate(Smi::FromInt(1))); + } else { + __ add(eax, Immediate(Smi::FromInt(1))); + } + __ jmp(&stub_call, Label::kNear); + __ bind(&slow); + } + ToNumberStub convert_stub(isolate()); + __ CallStub(&convert_stub); + + // Save result for postfix expressions. + if (expr->is_postfix()) { + if (!context()->IsEffect()) { + // Save the result on the stack. If we have a named or keyed property + // we store the result under the receiver that is currently on top + // of the stack. + switch (assign_type) { + case VARIABLE: + __ push(eax); + break; + case NAMED_PROPERTY: + __ mov(Operand(esp, kPointerSize), eax); + break; + case KEYED_PROPERTY: + __ mov(Operand(esp, 2 * kPointerSize), eax); + break; + } + } + } + + // Record position before stub call. + SetSourcePosition(expr->position()); + + // Call stub for +1/-1. + __ bind(&stub_call); + __ mov(edx, eax); + __ mov(eax, Immediate(Smi::FromInt(1))); + BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE); + CallIC(stub.GetCode(), expr->CountBinOpFeedbackId()); + patch_site.EmitPatchInfo(); + __ bind(&done); + + // Store the value returned in eax. + switch (assign_type) { + case VARIABLE: + if (expr->is_postfix()) { + // Perform the assignment as if via '='. + { EffectContext context(this); + EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), + Token::ASSIGN); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + context.Plug(eax); + } + // For all contexts except EffectContext We have the result on + // top of the stack. + if (!context()->IsEffect()) { + context()->PlugTOS(); + } + } else { + // Perform the assignment as if via '='. + EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), + Token::ASSIGN); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + context()->Plug(eax); + } + break; + case NAMED_PROPERTY: { + __ mov(StoreIC::NameRegister(), prop->key()->AsLiteral()->value()); + __ pop(StoreIC::ReceiverRegister()); + CallStoreIC(expr->CountStoreFeedbackId()); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + if (expr->is_postfix()) { + if (!context()->IsEffect()) { + context()->PlugTOS(); + } + } else { + context()->Plug(eax); + } + break; + } + case KEYED_PROPERTY: { + __ pop(KeyedStoreIC::NameRegister()); + __ pop(KeyedStoreIC::ReceiverRegister()); + Handle<Code> ic = strict_mode() == SLOPPY + ? isolate()->builtins()->KeyedStoreIC_Initialize() + : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); + CallIC(ic, expr->CountStoreFeedbackId()); + PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); + if (expr->is_postfix()) { + // Result is on the stack + if (!context()->IsEffect()) { + context()->PlugTOS(); + } + } else { + context()->Plug(eax); + } + break; + } + } +} + + +void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { + VariableProxy* proxy = expr->AsVariableProxy(); + DCHECK(!context()->IsEffect()); + DCHECK(!context()->IsTest()); + + if (proxy != NULL && proxy->var()->IsUnallocated()) { + Comment cmnt(masm_, "[ Global variable"); + __ mov(LoadIC::ReceiverRegister(), GlobalObjectOperand()); + __ mov(LoadIC::NameRegister(), Immediate(proxy->name())); + if (FLAG_vector_ics) { + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(proxy->VariableFeedbackSlot()))); + } + // Use a regular load, not a contextual load, to avoid a reference + // error. + CallLoadIC(NOT_CONTEXTUAL); + PrepareForBailout(expr, TOS_REG); + context()->Plug(eax); + } else if (proxy != NULL && proxy->var()->IsLookupSlot()) { + Comment cmnt(masm_, "[ Lookup slot"); + Label done, slow; + + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done); + + __ bind(&slow); + __ push(esi); + __ push(Immediate(proxy->name())); + __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2); + PrepareForBailout(expr, TOS_REG); + __ bind(&done); + + context()->Plug(eax); + } else { + // This expression cannot throw a reference error at the top level. + VisitInDuplicateContext(expr); + } +} + + +void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, + Expression* sub_expr, + Handle<String> check) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + { AccumulatorValueContext context(this); + VisitForTypeofValue(sub_expr); + } + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + + Factory* factory = isolate()->factory(); + if (String::Equals(check, factory->number_string())) { + __ JumpIfSmi(eax, if_true); + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + isolate()->factory()->heap_number_map()); + Split(equal, if_true, if_false, fall_through); + } else if (String::Equals(check, factory->string_string())) { + __ JumpIfSmi(eax, if_false); + __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx); + __ j(above_equal, if_false); + // Check for undetectable objects => false. + __ test_b(FieldOperand(edx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + Split(zero, if_true, if_false, fall_through); + } else if (String::Equals(check, factory->symbol_string())) { + __ JumpIfSmi(eax, if_false); + __ CmpObjectType(eax, SYMBOL_TYPE, edx); + Split(equal, if_true, if_false, fall_through); + } else if (String::Equals(check, factory->boolean_string())) { + __ cmp(eax, isolate()->factory()->true_value()); + __ j(equal, if_true); + __ cmp(eax, isolate()->factory()->false_value()); + Split(equal, if_true, if_false, fall_through); + } else if (String::Equals(check, factory->undefined_string())) { + __ cmp(eax, isolate()->factory()->undefined_value()); + __ j(equal, if_true); + __ JumpIfSmi(eax, if_false); + // Check for undetectable objects => true. + __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset)); + __ test(ecx, Immediate(1 << Map::kIsUndetectable)); + Split(not_zero, if_true, if_false, fall_through); + } else if (String::Equals(check, factory->function_string())) { + __ JumpIfSmi(eax, if_false); + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx); + __ j(equal, if_true); + __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE); + Split(equal, if_true, if_false, fall_through); + } else if (String::Equals(check, factory->object_string())) { + __ JumpIfSmi(eax, if_false); + __ cmp(eax, isolate()->factory()->null_value()); + __ j(equal, if_true); + __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx); + __ j(below, if_false); + __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); + __ j(above, if_false); + // Check for undetectable objects => false. + __ test_b(FieldOperand(edx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + Split(zero, if_true, if_false, fall_through); + } else { + if (if_false != fall_through) __ jmp(if_false); + } + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { + Comment cmnt(masm_, "[ CompareOperation"); + SetSourcePosition(expr->position()); + + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr)) return; + + // Always perform the comparison for its control flow. Pack the result + // into the expression's context after the comparison is performed. + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + Token::Value op = expr->op(); + VisitForStackValue(expr->left()); + switch (op) { + case Token::IN: + VisitForStackValue(expr->right()); + __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); + PrepareForBailoutBeforeSplit(expr, false, NULL, NULL); + __ cmp(eax, isolate()->factory()->true_value()); + Split(equal, if_true, if_false, fall_through); + break; + + case Token::INSTANCEOF: { + VisitForStackValue(expr->right()); + InstanceofStub stub(isolate(), InstanceofStub::kNoFlags); + __ CallStub(&stub); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + __ test(eax, eax); + // The stub returns 0 for true. + Split(zero, if_true, if_false, fall_through); + break; + } + + default: { + VisitForAccumulatorValue(expr->right()); + Condition cc = CompareIC::ComputeCondition(op); + __ pop(edx); + + bool inline_smi_code = ShouldInlineSmiCase(op); + JumpPatchSite patch_site(masm_); + if (inline_smi_code) { + Label slow_case; + __ mov(ecx, edx); + __ or_(ecx, eax); + patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear); + __ cmp(edx, eax); + Split(cc, if_true, if_false, NULL); + __ bind(&slow_case); + } + + // Record position and call the compare IC. + SetSourcePosition(expr->position()); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); + CallIC(ic, expr->CompareOperationFeedbackId()); + patch_site.EmitPatchInfo(); + + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + __ test(eax, eax); + Split(cc, if_true, if_false, fall_through); + } + } + + // Convert the result of the comparison into one expected for this + // expression's context. + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + VisitForAccumulatorValue(sub_expr); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + + Handle<Object> nil_value = nil == kNullValue + ? isolate()->factory()->null_value() + : isolate()->factory()->undefined_value(); + if (expr->op() == Token::EQ_STRICT) { + __ cmp(eax, nil_value); + Split(equal, if_true, if_false, fall_through); + } else { + Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil); + CallIC(ic, expr->CompareOperationFeedbackId()); + __ test(eax, eax); + Split(not_zero, if_true, if_false, fall_through); + } + context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) { + __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + context()->Plug(eax); +} + + +Register FullCodeGenerator::result_register() { + return eax; +} + + +Register FullCodeGenerator::context_register() { + return esi; +} + + +void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { + DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); + __ mov(Operand(ebp, frame_offset), value); +} + + +void FullCodeGenerator::LoadContextField(Register dst, int context_index) { + __ mov(dst, ContextOperand(esi, context_index)); +} + + +void FullCodeGenerator::PushFunctionArgumentForContextAllocation() { + Scope* declaration_scope = scope()->DeclarationScope(); + if (declaration_scope->is_global_scope() || + declaration_scope->is_module_scope()) { + // Contexts nested in the native context have a canonical empty function + // as their closure, not the anonymous closure containing the global + // code. Pass a smi sentinel and let the runtime look up the empty + // function. + __ push(Immediate(Smi::FromInt(0))); + } else if (declaration_scope->is_eval_scope()) { + // Contexts nested inside eval code have the same closure as the context + // calling eval, not the anonymous closure containing the eval code. + // Fetch it from the context. + __ push(ContextOperand(esi, Context::CLOSURE_INDEX)); + } else { + DCHECK(declaration_scope->is_function_scope()); + __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + } +} + + +// ---------------------------------------------------------------------------- +// Non-local control flow support. + +void FullCodeGenerator::EnterFinallyBlock() { + // Cook return address on top of stack (smi encoded Code* delta) + DCHECK(!result_register().is(edx)); + __ pop(edx); + __ sub(edx, Immediate(masm_->CodeObject())); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + STATIC_ASSERT(kSmiTag == 0); + __ SmiTag(edx); + __ push(edx); + + // Store result register while executing finally block. + __ push(result_register()); + + // Store pending message while executing finally block. + ExternalReference pending_message_obj = + ExternalReference::address_of_pending_message_obj(isolate()); + __ mov(edx, Operand::StaticVariable(pending_message_obj)); + __ push(edx); + + ExternalReference has_pending_message = + ExternalReference::address_of_has_pending_message(isolate()); + __ mov(edx, Operand::StaticVariable(has_pending_message)); + __ SmiTag(edx); + __ push(edx); + + ExternalReference pending_message_script = + ExternalReference::address_of_pending_message_script(isolate()); + __ mov(edx, Operand::StaticVariable(pending_message_script)); + __ push(edx); +} + + +void FullCodeGenerator::ExitFinallyBlock() { + DCHECK(!result_register().is(edx)); + // Restore pending message from stack. + __ pop(edx); + ExternalReference pending_message_script = + ExternalReference::address_of_pending_message_script(isolate()); + __ mov(Operand::StaticVariable(pending_message_script), edx); + + __ pop(edx); + __ SmiUntag(edx); + ExternalReference has_pending_message = + ExternalReference::address_of_has_pending_message(isolate()); + __ mov(Operand::StaticVariable(has_pending_message), edx); + + __ pop(edx); + ExternalReference pending_message_obj = + ExternalReference::address_of_pending_message_obj(isolate()); + __ mov(Operand::StaticVariable(pending_message_obj), edx); + + // Restore result register from stack. + __ pop(result_register()); + + // Uncook return address. + __ pop(edx); + __ SmiUntag(edx); + __ add(edx, Immediate(masm_->CodeObject())); + __ jmp(edx); +} + + +#undef __ + +#define __ ACCESS_MASM(masm()) + +FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( + int* stack_depth, + int* context_length) { + // The macros used here must preserve the result register. + + // Because the handler block contains the context of the finally + // code, we can restore it directly from there for the finally code + // rather than iteratively unwinding contexts via their previous + // links. + __ Drop(*stack_depth); // Down to the handler block. + if (*context_length > 0) { + // Restore the context to its dedicated register and the stack. + __ mov(esi, Operand(esp, StackHandlerConstants::kContextOffset)); + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); + } + __ PopTryHandler(); + __ call(finally_entry_); + + *stack_depth = 0; + *context_length = 0; + return previous_; +} + +#undef __ + + +static const byte kJnsInstruction = 0x79; +static const byte kJnsOffset = 0x11; +static const byte kNopByteOne = 0x66; +static const byte kNopByteTwo = 0x90; +#ifdef DEBUG +static const byte kCallInstruction = 0xe8; +#endif + + +void BackEdgeTable::PatchAt(Code* unoptimized_code, + Address pc, + BackEdgeState target_state, + Code* replacement_code) { + Address call_target_address = pc - kIntSize; + Address jns_instr_address = call_target_address - 3; + Address jns_offset_address = call_target_address - 2; + + switch (target_state) { + case INTERRUPT: + // sub <profiling_counter>, <delta> ;; Not changed + // jns ok + // call <interrupt stub> + // ok: + *jns_instr_address = kJnsInstruction; + *jns_offset_address = kJnsOffset; + break; + case ON_STACK_REPLACEMENT: + case OSR_AFTER_STACK_CHECK: + // sub <profiling_counter>, <delta> ;; Not changed + // nop + // nop + // call <on-stack replacment> + // ok: + *jns_instr_address = kNopByteOne; + *jns_offset_address = kNopByteTwo; + break; + } + + Assembler::set_target_address_at(call_target_address, + unoptimized_code, + replacement_code->entry()); + unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, call_target_address, replacement_code); +} + + +BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( + Isolate* isolate, + Code* unoptimized_code, + Address pc) { + Address call_target_address = pc - kIntSize; + Address jns_instr_address = call_target_address - 3; + DCHECK_EQ(kCallInstruction, *(call_target_address - 1)); + + if (*jns_instr_address == kJnsInstruction) { + DCHECK_EQ(kJnsOffset, *(call_target_address - 2)); + DCHECK_EQ(isolate->builtins()->InterruptCheck()->entry(), + Assembler::target_address_at(call_target_address, + unoptimized_code)); + return INTERRUPT; + } + + DCHECK_EQ(kNopByteOne, *jns_instr_address); + DCHECK_EQ(kNopByteTwo, *(call_target_address - 2)); + + if (Assembler::target_address_at(call_target_address, unoptimized_code) == + isolate->builtins()->OnStackReplacement()->entry()) { + return ON_STACK_REPLACEMENT; + } + + DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(), + Assembler::target_address_at(call_target_address, + unoptimized_code)); + return OSR_AFTER_STACK_CHECK; +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/ic-x87.cc nodejs-0.11.15/deps/v8/src/x87/ic-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/ic-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/ic-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1211 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/runtime.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + +// ---------------------------------------------------------------------------- +// Static IC stub generators. +// + +#define __ ACCESS_MASM(masm) + + +static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, + Register type, + Label* global_object) { + // Register usage: + // type: holds the receiver instance type on entry. + __ cmp(type, JS_GLOBAL_OBJECT_TYPE); + __ j(equal, global_object); + __ cmp(type, JS_BUILTINS_OBJECT_TYPE); + __ j(equal, global_object); + __ cmp(type, JS_GLOBAL_PROXY_TYPE); + __ j(equal, global_object); +} + + +// Helper function used to load a property from a dictionary backing +// storage. This function may fail to load a property even though it is +// in the dictionary, so code at miss_label must always call a backup +// property load that is complete. This function is safe to call if +// name is not internalized, and will jump to the miss_label in that +// case. The generated code assumes that the receiver has slow +// properties, is not a global object and does not have interceptors. +static void GenerateDictionaryLoad(MacroAssembler* masm, + Label* miss_label, + Register elements, + Register name, + Register r0, + Register r1, + Register result) { + // Register use: + // + // elements - holds the property dictionary on entry and is unchanged. + // + // name - holds the name of the property on entry and is unchanged. + // + // Scratch registers: + // + // r0 - used for the index into the property dictionary + // + // r1 - used to hold the capacity of the property dictionary. + // + // result - holds the result on exit. + + Label done; + + // Probe the dictionary. + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss_label, + &done, + elements, + name, + r0, + r1); + + // If probing finds an entry in the dictionary, r0 contains the + // index into the dictionary. Check that the value is a normal + // property. + __ bind(&done); + const int kElementsStartOffset = + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; + const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; + __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag), + Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize)); + __ j(not_zero, miss_label); + + // Get the value at the masked, scaled index. + const int kValueOffset = kElementsStartOffset + kPointerSize; + __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag)); +} + + +// Helper function used to store a property to a dictionary backing +// storage. This function may fail to store a property eventhough it +// is in the dictionary, so code at miss_label must always call a +// backup property store that is complete. This function is safe to +// call if name is not internalized, and will jump to the miss_label in +// that case. The generated code assumes that the receiver has slow +// properties, is not a global object and does not have interceptors. +static void GenerateDictionaryStore(MacroAssembler* masm, + Label* miss_label, + Register elements, + Register name, + Register value, + Register r0, + Register r1) { + // Register use: + // + // elements - holds the property dictionary on entry and is clobbered. + // + // name - holds the name of the property on entry and is unchanged. + // + // value - holds the value to store and is unchanged. + // + // r0 - used for index into the property dictionary and is clobbered. + // + // r1 - used to hold the capacity of the property dictionary and is clobbered. + Label done; + + + // Probe the dictionary. + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss_label, + &done, + elements, + name, + r0, + r1); + + // If probing finds an entry in the dictionary, r0 contains the + // index into the dictionary. Check that the value is a normal + // property that is not read only. + __ bind(&done); + const int kElementsStartOffset = + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; + const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; + const int kTypeAndReadOnlyMask = + (PropertyDetails::TypeField::kMask | + PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize; + __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag), + Immediate(kTypeAndReadOnlyMask)); + __ j(not_zero, miss_label); + + // Store the value at the masked, scaled index. + const int kValueOffset = kElementsStartOffset + kPointerSize; + __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag)); + __ mov(Operand(r0, 0), value); + + // Update write barrier. Make sure not to clobber the value. + __ mov(r1, value); + __ RecordWrite(elements, r0, r1); +} + + +// Checks the receiver for special cases (value type, slow case bits). +// Falls through for regular JS object. +static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, + Register receiver, + Register map, + int interceptor_bit, + Label* slow) { + // Register use: + // receiver - holds the receiver and is unchanged. + // Scratch registers: + // map - used to hold the map of the receiver. + + // Check that the object isn't a smi. + __ JumpIfSmi(receiver, slow); + + // Get the map of the receiver. + __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset)); + + // Check bit field. + __ test_b(FieldOperand(map, Map::kBitFieldOffset), + (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)); + __ j(not_zero, slow); + // Check that the object is some kind of JS object EXCEPT JS Value type. + // In the case that the object is a value-wrapper object, + // we enter the runtime system to make sure that indexing + // into string objects works as intended. + DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); + + __ CmpInstanceType(map, JS_OBJECT_TYPE); + __ j(below, slow); +} + + +// Loads an indexed element from a fast case array. +// If not_fast_array is NULL, doesn't perform the elements map check. +static void GenerateFastArrayLoad(MacroAssembler* masm, + Register receiver, + Register key, + Register scratch, + Register result, + Label* not_fast_array, + Label* out_of_range) { + // Register use: + // receiver - holds the receiver and is unchanged. + // key - holds the key and is unchanged (must be a smi). + // Scratch registers: + // scratch - used to hold elements of the receiver and the loaded value. + // result - holds the result on exit if the load succeeds and + // we fall through. + + __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset)); + if (not_fast_array != NULL) { + // Check that the object is in fast mode and writable. + __ CheckMap(scratch, + masm->isolate()->factory()->fixed_array_map(), + not_fast_array, + DONT_DO_SMI_CHECK); + } else { + __ AssertFastElements(scratch); + } + // Check that the key (index) is within bounds. + __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset)); + __ j(above_equal, out_of_range); + // Fast case: Do the load. + STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0)); + __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize)); + __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value())); + // In case the loaded value is the_hole we have to consult GetProperty + // to ensure the prototype chain is searched. + __ j(equal, out_of_range); + if (!result.is(scratch)) { + __ mov(result, scratch); + } +} + + +// Checks whether a key is an array index string or a unique name. +// Falls through if the key is a unique name. +static void GenerateKeyNameCheck(MacroAssembler* masm, + Register key, + Register map, + Register hash, + Label* index_string, + Label* not_unique) { + // Register use: + // key - holds the key and is unchanged. Assumed to be non-smi. + // Scratch registers: + // map - used to hold the map of the key. + // hash - used to hold the hash of the key. + Label unique; + __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map); + __ j(above, not_unique); + STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); + __ j(equal, &unique); + + // Is the string an array index, with cached numeric value? + __ mov(hash, FieldOperand(key, Name::kHashFieldOffset)); + __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask)); + __ j(zero, index_string); + + // Is the string internalized? We already know it's a string so a single + // bit test is enough. + STATIC_ASSERT(kNotInternalizedTag != 0); + __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), + kIsNotInternalizedMask); + __ j(not_zero, not_unique); + + __ bind(&unique); +} + + +static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, + Register object, + Register key, + Register scratch1, + Register scratch2, + Label* unmapped_case, + Label* slow_case) { + Heap* heap = masm->isolate()->heap(); + Factory* factory = masm->isolate()->factory(); + + // Check that the receiver is a JSObject. Because of the elements + // map check later, we do not need to check for interceptors or + // whether it requires access checks. + __ JumpIfSmi(object, slow_case); + // Check that the object is some kind of JSObject. + __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1); + __ j(below, slow_case); + + // Check that the key is a positive smi. + __ test(key, Immediate(0x80000001)); + __ j(not_zero, slow_case); + + // Load the elements into scratch1 and check its map. + Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); + __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset)); + __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK); + + // Check if element is in the range of mapped arguments. If not, jump + // to the unmapped lookup with the parameter map in scratch1. + __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset)); + __ sub(scratch2, Immediate(Smi::FromInt(2))); + __ cmp(key, scratch2); + __ j(above_equal, unmapped_case); + + // Load element index and check whether it is the hole. + const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize; + __ mov(scratch2, FieldOperand(scratch1, + key, + times_half_pointer_size, + kHeaderSize)); + __ cmp(scratch2, factory->the_hole_value()); + __ j(equal, unmapped_case); + + // Load value from context and return it. We can reuse scratch1 because + // we do not jump to the unmapped lookup (which requires the parameter + // map in scratch1). + const int kContextOffset = FixedArray::kHeaderSize; + __ mov(scratch1, FieldOperand(scratch1, kContextOffset)); + return FieldOperand(scratch1, + scratch2, + times_half_pointer_size, + Context::kHeaderSize); +} + + +static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, + Register key, + Register parameter_map, + Register scratch, + Label* slow_case) { + // Element is in arguments backing store, which is referenced by the + // second element of the parameter_map. + const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; + Register backing_store = parameter_map; + __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset)); + Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); + __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); + __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset)); + __ cmp(key, scratch); + __ j(greater_equal, slow_case); + return FieldOperand(backing_store, + key, + times_half_pointer_size, + FixedArray::kHeaderSize); +} + + +void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { + // The return address is on the stack. + Label slow, check_name, index_smi, index_name, property_array_property; + Label probe_dictionary, check_number_dictionary; + + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + + // Check that the key is a smi. + __ JumpIfNotSmi(key, &check_name); + __ bind(&index_smi); + // Now the key is known to be a smi. This place is also jumped to from + // where a numeric string is converted to a smi. + + GenerateKeyedLoadReceiverCheck( + masm, receiver, eax, Map::kHasIndexedInterceptor, &slow); + + // Check the receiver's map to see if it has fast elements. + __ CheckFastElements(eax, &check_number_dictionary); + + GenerateFastArrayLoad(masm, receiver, key, eax, eax, NULL, &slow); + Isolate* isolate = masm->isolate(); + Counters* counters = isolate->counters(); + __ IncrementCounter(counters->keyed_load_generic_smi(), 1); + __ ret(0); + + __ bind(&check_number_dictionary); + __ mov(ebx, key); + __ SmiUntag(ebx); + __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset)); + + // Check whether the elements is a number dictionary. + // ebx: untagged index + // eax: elements + __ CheckMap(eax, + isolate->factory()->hash_table_map(), + &slow, + DONT_DO_SMI_CHECK); + Label slow_pop_receiver; + // Push receiver on the stack to free up a register for the dictionary + // probing. + __ push(receiver); + __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax); + // Pop receiver before returning. + __ pop(receiver); + __ ret(0); + + __ bind(&slow_pop_receiver); + // Pop the receiver from the stack and jump to runtime. + __ pop(receiver); + + __ bind(&slow); + // Slow case: jump to runtime. + __ IncrementCounter(counters->keyed_load_generic_slow(), 1); + GenerateRuntimeGetProperty(masm); + + __ bind(&check_name); + GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow); + + GenerateKeyedLoadReceiverCheck( + masm, receiver, eax, Map::kHasNamedInterceptor, &slow); + + // If the receiver is a fast-case object, check the keyed lookup + // cache. Otherwise probe the dictionary. + __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset)); + __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), + Immediate(isolate->factory()->hash_table_map())); + __ j(equal, &probe_dictionary); + + // The receiver's map is still in eax, compute the keyed lookup cache hash + // based on 32 bits of the map pointer and the string hash. + if (FLAG_debug_code) { + __ cmp(eax, FieldOperand(receiver, HeapObject::kMapOffset)); + __ Check(equal, kMapIsNoLongerInEax); + } + __ mov(ebx, eax); // Keep the map around for later. + __ shr(eax, KeyedLookupCache::kMapHashShift); + __ mov(edi, FieldOperand(key, String::kHashFieldOffset)); + __ shr(edi, String::kHashShift); + __ xor_(eax, edi); + __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask); + + // Load the key (consisting of map and internalized string) from the cache and + // check for match. + Label load_in_object_property; + static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; + Label hit_on_nth_entry[kEntriesPerBucket]; + ExternalReference cache_keys = + ExternalReference::keyed_lookup_cache_keys(masm->isolate()); + + for (int i = 0; i < kEntriesPerBucket - 1; i++) { + Label try_next_entry; + __ mov(edi, eax); + __ shl(edi, kPointerSizeLog2 + 1); + if (i != 0) { + __ add(edi, Immediate(kPointerSize * i * 2)); + } + __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); + __ j(not_equal, &try_next_entry); + __ add(edi, Immediate(kPointerSize)); + __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys)); + __ j(equal, &hit_on_nth_entry[i]); + __ bind(&try_next_entry); + } + + __ lea(edi, Operand(eax, 1)); + __ shl(edi, kPointerSizeLog2 + 1); + __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2)); + __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); + __ j(not_equal, &slow); + __ add(edi, Immediate(kPointerSize)); + __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys)); + __ j(not_equal, &slow); + + // Get field offset. + // ebx : receiver's map + // eax : lookup cache index + ExternalReference cache_field_offsets = + ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate()); + + // Hit on nth entry. + for (int i = kEntriesPerBucket - 1; i >= 0; i--) { + __ bind(&hit_on_nth_entry[i]); + if (i != 0) { + __ add(eax, Immediate(i)); + } + __ mov(edi, + Operand::StaticArray(eax, times_pointer_size, cache_field_offsets)); + __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset)); + __ sub(edi, eax); + __ j(above_equal, &property_array_property); + if (i != 0) { + __ jmp(&load_in_object_property); + } + } + + // Load in-object property. + __ bind(&load_in_object_property); + __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset)); + __ add(eax, edi); + __ mov(eax, FieldOperand(receiver, eax, times_pointer_size, 0)); + __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); + __ ret(0); + + // Load property array property. + __ bind(&property_array_property); + __ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset)); + __ mov(eax, FieldOperand(eax, edi, times_pointer_size, + FixedArray::kHeaderSize)); + __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); + __ ret(0); + + // Do a quick inline probe of the receiver's dictionary, if it + // exists. + __ bind(&probe_dictionary); + + __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset)); + __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset)); + GenerateGlobalInstanceTypeCheck(masm, eax, &slow); + + GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax); + __ IncrementCounter(counters->keyed_load_generic_symbol(), 1); + __ ret(0); + + __ bind(&index_name); + __ IndexFromHash(ebx, key); + // Now jump to the place where smi keys are handled. + __ jmp(&index_smi); +} + + +void KeyedLoadIC::GenerateString(MacroAssembler* masm) { + // Return address is on the stack. + Label miss; + + Register receiver = ReceiverRegister(); + Register index = NameRegister(); + Register scratch = ebx; + DCHECK(!scratch.is(receiver) && !scratch.is(index)); + Register result = eax; + DCHECK(!result.is(scratch)); + + StringCharAtGenerator char_at_generator(receiver, + index, + scratch, + result, + &miss, // When not a string. + &miss, // When not a number. + &miss, // When index out of range. + STRING_INDEX_IS_ARRAY_INDEX); + char_at_generator.GenerateFast(masm); + __ ret(0); + + StubRuntimeCallHelper call_helper; + char_at_generator.GenerateSlow(masm, call_helper); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { + // Return address is on the stack. + Label slow; + + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + Register scratch = eax; + DCHECK(!scratch.is(receiver) && !scratch.is(key)); + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &slow); + + // Check that the key is an array index, that is Uint32. + __ test(key, Immediate(kSmiTagMask | kSmiSignMask)); + __ j(not_zero, &slow); + + // Get the map of the receiver. + __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); + + // Check that it has indexed interceptor and access checks + // are not enabled for this object. + __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset)); + __ and_(scratch, Immediate(kSlowCaseBitFieldMask)); + __ cmp(scratch, Immediate(1 << Map::kHasIndexedInterceptor)); + __ j(not_zero, &slow); + + // Everything is fine, call runtime. + __ pop(scratch); + __ push(receiver); // receiver + __ push(key); // key + __ push(scratch); // return address + + // Perform tail call to the entry. + ExternalReference ref = ExternalReference( + IC_Utility(kLoadElementWithInterceptor), masm->isolate()); + __ TailCallExternalReference(ref, 2, 1); + + __ bind(&slow); + GenerateMiss(masm); +} + + +void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { + // The return address is on the stack. + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + + Label slow, notin; + Factory* factory = masm->isolate()->factory(); + Operand mapped_location = + GenerateMappedArgumentsLookup( + masm, receiver, key, ebx, eax, ¬in, &slow); + __ mov(eax, mapped_location); + __ Ret(); + __ bind(¬in); + // The unmapped lookup expects that the parameter map is in ebx. + Operand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, key, ebx, eax, &slow); + __ cmp(unmapped_location, factory->the_hole_value()); + __ j(equal, &slow); + __ mov(eax, unmapped_location); + __ Ret(); + __ bind(&slow); + GenerateMiss(masm); +} + + +void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { + // Return address is on the stack. + Label slow, notin; + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + Register value = ValueRegister(); + DCHECK(receiver.is(edx)); + DCHECK(name.is(ecx)); + DCHECK(value.is(eax)); + + Operand mapped_location = + GenerateMappedArgumentsLookup(masm, receiver, name, ebx, edi, ¬in, + &slow); + __ mov(mapped_location, value); + __ lea(ecx, mapped_location); + __ mov(edx, value); + __ RecordWrite(ebx, ecx, edx); + __ Ret(); + __ bind(¬in); + // The unmapped lookup expects that the parameter map is in ebx. + Operand unmapped_location = + GenerateUnmappedArgumentsLookup(masm, name, ebx, edi, &slow); + __ mov(unmapped_location, value); + __ lea(edi, unmapped_location); + __ mov(edx, value); + __ RecordWrite(ebx, edi, edx); + __ Ret(); + __ bind(&slow); + GenerateMiss(masm); +} + + +static void KeyedStoreGenerateGenericHelper( + MacroAssembler* masm, + Label* fast_object, + Label* fast_double, + Label* slow, + KeyedStoreCheckMap check_map, + KeyedStoreIncrementLength increment_length) { + Label transition_smi_elements; + Label finish_object_store, non_double_value, transition_double_elements; + Label fast_double_without_map_check; + Register receiver = KeyedStoreIC::ReceiverRegister(); + Register key = KeyedStoreIC::NameRegister(); + Register value = KeyedStoreIC::ValueRegister(); + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + DCHECK(value.is(eax)); + // key is a smi. + // ebx: FixedArray receiver->elements + // edi: receiver map + // Fast case: Do the store, could either Object or double. + __ bind(fast_object); + if (check_map == kCheckMap) { + __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); + __ cmp(edi, masm->isolate()->factory()->fixed_array_map()); + __ j(not_equal, fast_double); + } + + // HOLECHECK: guards "A[i] = V" + // We have to go to the runtime if the current value is the hole because + // there may be a callback on the element + Label holecheck_passed1; + __ cmp(FixedArrayElementOperand(ebx, key), + masm->isolate()->factory()->the_hole_value()); + __ j(not_equal, &holecheck_passed1); + __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); + + __ bind(&holecheck_passed1); + + // Smi stores don't require further checks. + Label non_smi_value; + __ JumpIfNotSmi(value, &non_smi_value); + if (increment_length == kIncrementLength) { + // Add 1 to receiver->length. + __ add(FieldOperand(receiver, JSArray::kLengthOffset), + Immediate(Smi::FromInt(1))); + } + // It's irrelevant whether array is smi-only or not when writing a smi. + __ mov(FixedArrayElementOperand(ebx, key), value); + __ ret(0); + + __ bind(&non_smi_value); + // Escape to elements kind transition case. + __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset)); + __ CheckFastObjectElements(edi, &transition_smi_elements); + + // Fast elements array, store the value to the elements backing store. + __ bind(&finish_object_store); + if (increment_length == kIncrementLength) { + // Add 1 to receiver->length. + __ add(FieldOperand(receiver, JSArray::kLengthOffset), + Immediate(Smi::FromInt(1))); + } + __ mov(FixedArrayElementOperand(ebx, key), value); + // Update write barrier for the elements array address. + __ mov(edx, value); // Preserve the value which is returned. + __ RecordWriteArray( + ebx, edx, key, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ ret(0); + + __ bind(fast_double); + if (check_map == kCheckMap) { + // Check for fast double array case. If this fails, call through to the + // runtime. + __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map()); + __ j(not_equal, slow); + // If the value is a number, store it as a double in the FastDoubleElements + // array. + } + + // HOLECHECK: guards "A[i] double hole?" + // We have to see if the double version of the hole is present. If so + // go to the runtime. + uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32)); + __ j(not_equal, &fast_double_without_map_check); + __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); + + __ bind(&fast_double_without_map_check); + __ StoreNumberToDoubleElements(value, ebx, key, edi, + &transition_double_elements, false); + if (increment_length == kIncrementLength) { + // Add 1 to receiver->length. + __ add(FieldOperand(receiver, JSArray::kLengthOffset), + Immediate(Smi::FromInt(1))); + } + __ ret(0); + + __ bind(&transition_smi_elements); + __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset)); + + // Transition the array appropriately depending on the value type. + __ CheckMap(value, + masm->isolate()->factory()->heap_number_map(), + &non_double_value, + DONT_DO_SMI_CHECK); + + // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS + // and complete the store. + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS, + ebx, + edi, + slow); + AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble( + masm, receiver, key, value, ebx, mode, slow); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); + __ jmp(&fast_double_without_map_check); + + __ bind(&non_double_value); + // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_ELEMENTS, + ebx, + edi, + slow); + mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition( + masm, receiver, key, value, ebx, mode, slow); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); + __ jmp(&finish_object_store); + + __ bind(&transition_double_elements); + // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a + // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and + // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS + __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset)); + __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, + FAST_ELEMENTS, + ebx, + edi, + slow); + mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateDoubleToObject( + masm, receiver, key, value, ebx, mode, slow); + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); + __ jmp(&finish_object_store); +} + + +void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, + StrictMode strict_mode) { + // Return address is on the stack. + Label slow, fast_object, fast_object_grow; + Label fast_double, fast_double_grow; + Label array, extra, check_if_double_array; + Register receiver = ReceiverRegister(); + Register key = NameRegister(); + DCHECK(receiver.is(edx)); + DCHECK(key.is(ecx)); + + // Check that the object isn't a smi. + __ JumpIfSmi(receiver, &slow); + // Get the map from the receiver. + __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset)); + // Check that the receiver does not require access checks and is not observed. + // The generic stub does not perform map checks or handle observed objects. + __ test_b(FieldOperand(edi, Map::kBitFieldOffset), + 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved); + __ j(not_zero, &slow); + // Check that the key is a smi. + __ JumpIfNotSmi(key, &slow); + __ CmpInstanceType(edi, JS_ARRAY_TYPE); + __ j(equal, &array); + // Check that the object is some kind of JSObject. + __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE); + __ j(below, &slow); + + // Object case: Check key against length in the elements array. + // Key is a smi. + // edi: receiver map + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); + // Check array bounds. Both the key and the length of FixedArray are smis. + __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset)); + __ j(below, &fast_object); + + // Slow case: call runtime. + __ bind(&slow); + GenerateRuntimeSetProperty(masm, strict_mode); + + // Extra capacity case: Check if there is extra capacity to + // perform the store and update the length. Used for adding one + // element to the array by writing to array[array.length]. + __ bind(&extra); + // receiver is a JSArray. + // key is a smi. + // ebx: receiver->elements, a FixedArray + // edi: receiver map + // flags: compare (key, receiver.length()) + // do not leave holes in the array: + __ j(not_equal, &slow); + __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset)); + __ j(above_equal, &slow); + __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); + __ cmp(edi, masm->isolate()->factory()->fixed_array_map()); + __ j(not_equal, &check_if_double_array); + __ jmp(&fast_object_grow); + + __ bind(&check_if_double_array); + __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map()); + __ j(not_equal, &slow); + __ jmp(&fast_double_grow); + + // Array case: Get the length and the elements array from the JS + // array. Check that the array is in fast mode (and writable); if it + // is the length is always a smi. + __ bind(&array); + // receiver is a JSArray. + // key is a smi. + // edi: receiver map + __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); + + // Check the key against the length in the array and fall through to the + // common store code. + __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis. + __ j(above_equal, &extra); + + KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, + &slow, kCheckMap, kDontIncrementLength); + KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, + &slow, kDontCheckMap, kIncrementLength); +} + + +void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { + // The return address is on the stack. + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + DCHECK(receiver.is(edx)); + DCHECK(name.is(ecx)); + + // Probe the stub cache. + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::LOAD_IC)); + masm->isolate()->stub_cache()->GenerateProbe( + masm, flags, receiver, name, ebx, eax); + + // Cache miss: Jump to runtime. + GenerateMiss(masm); +} + + +void LoadIC::GenerateNormal(MacroAssembler* masm) { + Register dictionary = eax; + DCHECK(!dictionary.is(ReceiverRegister())); + DCHECK(!dictionary.is(NameRegister())); + + Label slow; + + __ mov(dictionary, + FieldOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); + GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), edi, ebx, + eax); + __ ret(0); + + // Dictionary load failed, go slow (but don't miss). + __ bind(&slow); + GenerateRuntimeGetProperty(masm); +} + + +static void LoadIC_PushArgs(MacroAssembler* masm) { + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + DCHECK(!ebx.is(receiver) && !ebx.is(name)); + + __ pop(ebx); + __ push(receiver); + __ push(name); + __ push(ebx); +} + + +void LoadIC::GenerateMiss(MacroAssembler* masm) { + // Return address is on the stack. + __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1); + + LoadIC_PushArgs(masm); + + // Perform tail call to the entry. + ExternalReference ref = + ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate()); + __ TailCallExternalReference(ref, 2, 1); +} + + +void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { + // Return address is on the stack. + LoadIC_PushArgs(masm); + + // Perform tail call to the entry. + __ TailCallRuntime(Runtime::kGetProperty, 2, 1); +} + + +void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { + // Return address is on the stack. + __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1); + + LoadIC_PushArgs(masm); + + // Perform tail call to the entry. + ExternalReference ref = + ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate()); + __ TailCallExternalReference(ref, 2, 1); +} + + +// IC register specifications +const Register LoadIC::ReceiverRegister() { return edx; } +const Register LoadIC::NameRegister() { return ecx; } + + +const Register LoadIC::SlotRegister() { + DCHECK(FLAG_vector_ics); + return eax; +} + + +const Register LoadIC::VectorRegister() { + DCHECK(FLAG_vector_ics); + return ebx; +} + + +const Register StoreIC::ReceiverRegister() { return edx; } +const Register StoreIC::NameRegister() { return ecx; } +const Register StoreIC::ValueRegister() { return eax; } + + +const Register KeyedStoreIC::MapRegister() { + return ebx; +} + + +void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { + // Return address is on the stack. + LoadIC_PushArgs(masm); + + // Perform tail call to the entry. + __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); +} + + +void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { + // Return address is on the stack. + Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); + masm->isolate()->stub_cache()->GenerateProbe( + masm, flags, ReceiverRegister(), NameRegister(), + ebx, no_reg); + + // Cache miss: Jump to runtime. + GenerateMiss(masm); +} + + +static void StoreIC_PushArgs(MacroAssembler* masm) { + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + Register value = StoreIC::ValueRegister(); + + DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value)); + + __ pop(ebx); + __ push(receiver); + __ push(name); + __ push(value); + __ push(ebx); +} + + +void StoreIC::GenerateMiss(MacroAssembler* masm) { + // Return address is on the stack. + StoreIC_PushArgs(masm); + + // Perform tail call to the entry. + ExternalReference ref = + ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); + __ TailCallExternalReference(ref, 3, 1); +} + + +void StoreIC::GenerateNormal(MacroAssembler* masm) { + Label restore_miss; + Register receiver = ReceiverRegister(); + Register name = NameRegister(); + Register value = ValueRegister(); + Register dictionary = ebx; + + __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset)); + + // A lot of registers are needed for storing to slow case + // objects. Push and restore receiver but rely on + // GenerateDictionaryStore preserving the value and name. + __ push(receiver); + GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value, + receiver, edi); + __ Drop(1); + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->store_normal_hit(), 1); + __ ret(0); + + __ bind(&restore_miss); + __ pop(receiver); + __ IncrementCounter(counters->store_normal_miss(), 1); + GenerateMiss(masm); +} + + +void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, + StrictMode strict_mode) { + // Return address is on the stack. + DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) && + !ebx.is(ValueRegister())); + __ pop(ebx); + __ push(ReceiverRegister()); + __ push(NameRegister()); + __ push(ValueRegister()); + __ push(Immediate(Smi::FromInt(strict_mode))); + __ push(ebx); // return address + + // Do tail-call to runtime routine. + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); +} + + +void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, + StrictMode strict_mode) { + // Return address is on the stack. + DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) && + !ebx.is(ValueRegister())); + __ pop(ebx); + __ push(ReceiverRegister()); + __ push(NameRegister()); + __ push(ValueRegister()); + __ push(Immediate(Smi::FromInt(strict_mode))); + __ push(ebx); // return address + + // Do tail-call to runtime routine. + __ TailCallRuntime(Runtime::kSetProperty, 4, 1); +} + + +void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { + // Return address is on the stack. + StoreIC_PushArgs(masm); + + // Do tail-call to runtime routine. + ExternalReference ref = + ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); + __ TailCallExternalReference(ref, 3, 1); +} + + +void StoreIC::GenerateSlow(MacroAssembler* masm) { + // Return address is on the stack. + StoreIC_PushArgs(masm); + + // Do tail-call to runtime routine. + ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate()); + __ TailCallExternalReference(ref, 3, 1); +} + + +void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { + // Return address is on the stack. + StoreIC_PushArgs(masm); + + // Do tail-call to runtime routine. + ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); + __ TailCallExternalReference(ref, 3, 1); +} + + +#undef __ + + +Condition CompareIC::ComputeCondition(Token::Value op) { + switch (op) { + case Token::EQ_STRICT: + case Token::EQ: + return equal; + case Token::LT: + return less; + case Token::GT: + return greater; + case Token::LTE: + return less_equal; + case Token::GTE: + return greater_equal; + default: + UNREACHABLE(); + return no_condition; + } +} + + +bool CompareIC::HasInlinedSmiCode(Address address) { + // The address of the instruction following the call. + Address test_instruction_address = + address + Assembler::kCallTargetAddressOffset; + + // If the instruction following the call is not a test al, nothing + // was inlined. + return *test_instruction_address == Assembler::kTestAlByte; +} + + +void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { + // The address of the instruction following the call. + Address test_instruction_address = + address + Assembler::kCallTargetAddressOffset; + + // If the instruction following the call is not a test al, nothing + // was inlined. + if (*test_instruction_address != Assembler::kTestAlByte) { + DCHECK(*test_instruction_address == Assembler::kNopByte); + return; + } + + Address delta_address = test_instruction_address + 1; + // The delta to the start of the map check instruction and the + // condition code uses at the patched jump. + uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address); + if (FLAG_trace_ic) { + PrintF("[ patching ic at %p, test=%p, delta=%d\n", + address, test_instruction_address, delta); + } + + // Patch with a short conditional jump. Enabling means switching from a short + // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the + // reverse operation of that. + Address jmp_address = test_instruction_address - delta; + DCHECK((check == ENABLE_INLINED_SMI_CHECK) + ? (*jmp_address == Assembler::kJncShortOpcode || + *jmp_address == Assembler::kJcShortOpcode) + : (*jmp_address == Assembler::kJnzShortOpcode || + *jmp_address == Assembler::kJzShortOpcode)); + Condition cc = (check == ENABLE_INLINED_SMI_CHECK) + ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero) + : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry); + *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc); +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/lithium-codegen-x87.cc nodejs-0.11.15/deps/v8/src/x87/lithium-codegen-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/lithium-codegen-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/lithium-codegen-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5717 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/deoptimizer.h" +#include "src/hydrogen-osr.h" +#include "src/ic.h" +#include "src/stub-cache.h" +#include "src/x87/lithium-codegen-x87.h" + +namespace v8 { +namespace internal { + + +// When invoking builtins, we need to record the safepoint in the middle of +// the invoke instruction sequence generated by the macro assembler. +class SafepointGenerator V8_FINAL : public CallWrapper { + public: + SafepointGenerator(LCodeGen* codegen, + LPointerMap* pointers, + Safepoint::DeoptMode mode) + : codegen_(codegen), + pointers_(pointers), + deopt_mode_(mode) {} + virtual ~SafepointGenerator() {} + + virtual void BeforeCall(int call_size) const V8_OVERRIDE {} + + virtual void AfterCall() const V8_OVERRIDE { + codegen_->RecordSafepoint(pointers_, deopt_mode_); + } + + private: + LCodeGen* codegen_; + LPointerMap* pointers_; + Safepoint::DeoptMode deopt_mode_; +}; + + +#define __ masm()-> + +bool LCodeGen::GenerateCode() { + LPhase phase("Z_Code generation", chunk()); + DCHECK(is_unused()); + status_ = GENERATING; + + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done in GeneratePrologue). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + + support_aligned_spilled_doubles_ = info()->IsOptimizing(); + + dynamic_frame_alignment_ = info()->IsOptimizing() && + ((chunk()->num_double_slots() > 2 && + !chunk()->graph()->is_recursive()) || + !info()->osr_ast_id().IsNone()); + + return GeneratePrologue() && + GenerateBody() && + GenerateDeferredCode() && + GenerateJumpTable() && + GenerateSafepointTable(); +} + + +void LCodeGen::FinishCode(Handle<Code> code) { + DCHECK(is_done()); + code->set_stack_slots(GetStackSlotCount()); + code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); + if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); + PopulateDeoptimizationData(code); + if (!info()->IsStub()) { + Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); + } +} + + +#ifdef _MSC_VER +void LCodeGen::MakeSureStackPagesMapped(int offset) { + const int kPageSize = 4 * KB; + for (offset -= kPageSize; offset > 0; offset -= kPageSize) { + __ mov(Operand(esp, offset), eax); + } +} +#endif + + +bool LCodeGen::GeneratePrologue() { + DCHECK(is_generating()); + + if (info()->IsOptimizing()) { + ProfileEntryHookStub::MaybeCallEntryHook(masm_); + +#ifdef DEBUG + if (strlen(FLAG_stop_at) > 0 && + info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { + __ int3(); + } +#endif + + // Sloppy mode functions and builtins need to replace the receiver with the + // global proxy when called as functions (without an explicit receiver + // object). + if (info_->this_has_uses() && + info_->strict_mode() == SLOPPY && + !info_->is_native()) { + Label ok; + // +1 for return address. + int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; + __ mov(ecx, Operand(esp, receiver_offset)); + + __ cmp(ecx, isolate()->factory()->undefined_value()); + __ j(not_equal, &ok, Label::kNear); + + __ mov(ecx, GlobalObjectOperand()); + __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset)); + + __ mov(Operand(esp, receiver_offset), ecx); + + __ bind(&ok); + } + + if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) { + // Move state of dynamic frame alignment into edx. + __ Move(edx, Immediate(kNoAlignmentPadding)); + + Label do_not_pad, align_loop; + STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); + // Align esp + 4 to a multiple of 2 * kPointerSize. + __ test(esp, Immediate(kPointerSize)); + __ j(not_zero, &do_not_pad, Label::kNear); + __ push(Immediate(0)); + __ mov(ebx, esp); + __ mov(edx, Immediate(kAlignmentPaddingPushed)); + // Copy arguments, receiver, and return address. + __ mov(ecx, Immediate(scope()->num_parameters() + 2)); + + __ bind(&align_loop); + __ mov(eax, Operand(ebx, 1 * kPointerSize)); + __ mov(Operand(ebx, 0), eax); + __ add(Operand(ebx), Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &align_loop, Label::kNear); + __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); + __ bind(&do_not_pad); + } + } + + info()->set_prologue_offset(masm_->pc_offset()); + if (NeedsEagerFrame()) { + DCHECK(!frame_is_built_); + frame_is_built_ = true; + if (info()->IsStub()) { + __ StubPrologue(); + } else { + __ Prologue(info()->IsCodePreAgingActive()); + } + info()->AddNoFrameRange(0, masm_->pc_offset()); + } + + if (info()->IsOptimizing() && + dynamic_frame_alignment_ && + FLAG_debug_code) { + __ test(esp, Immediate(kPointerSize)); + __ Assert(zero, kFrameIsExpectedToBeAligned); + } + + // Reserve space for the stack slots needed by the code. + int slots = GetStackSlotCount(); + DCHECK(slots != 0 || !info()->IsOptimizing()); + if (slots > 0) { + if (slots == 1) { + if (dynamic_frame_alignment_) { + __ push(edx); + } else { + __ push(Immediate(kNoAlignmentPadding)); + } + } else { + if (FLAG_debug_code) { + __ sub(Operand(esp), Immediate(slots * kPointerSize)); +#ifdef _MSC_VER + MakeSureStackPagesMapped(slots * kPointerSize); +#endif + __ push(eax); + __ mov(Operand(eax), Immediate(slots)); + Label loop; + __ bind(&loop); + __ mov(MemOperand(esp, eax, times_4, 0), + Immediate(kSlotsZapValue)); + __ dec(eax); + __ j(not_zero, &loop); + __ pop(eax); + } else { + __ sub(Operand(esp), Immediate(slots * kPointerSize)); +#ifdef _MSC_VER + MakeSureStackPagesMapped(slots * kPointerSize); +#endif + } + + if (support_aligned_spilled_doubles_) { + Comment(";;; Store dynamic frame alignment tag for spilled doubles"); + // Store dynamic frame alignment state in the first local. + int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; + if (dynamic_frame_alignment_) { + __ mov(Operand(ebp, offset), edx); + } else { + __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); + } + } + } + } + + // Possibly allocate a local context. + int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment(";;; Allocate local context"); + bool need_write_barrier = true; + // Argument to NewContext is the function, which is still in edi. + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(isolate(), heap_slots); + __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; + } else { + __ push(edi); + __ CallRuntime(Runtime::kNewFunctionContext, 1); + } + RecordSafepoint(Safepoint::kNoLazyDeopt); + // Context is returned in eax. It replaces the context passed to us. + // It's saved in the stack and kept live in esi. + __ mov(esi, eax); + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax); + + // Copy parameters into context if necessary. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Variable* var = scope()->parameter(i); + if (var->IsContextSlot()) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ mov(eax, Operand(ebp, parameter_offset)); + // Store it in the context. + int context_offset = Context::SlotOffset(var->index()); + __ mov(Operand(esi, context_offset), eax); + // Update the write barrier. This clobbers eax and ebx. + if (need_write_barrier) { + __ RecordWriteContextSlot(esi, + context_offset, + eax, + ebx); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } + } + } + Comment(";;; End allocate local context"); + } + + // Trace the call. + if (FLAG_trace && info()->IsOptimizing()) { + // We have not executed any compiled code yet, so esi still holds the + // incoming context. + __ CallRuntime(Runtime::kTraceEnter, 0); + } + return !is_aborted(); +} + + +void LCodeGen::GenerateOsrPrologue() { + // Generate the OSR entry prologue at the first unknown OSR value, or if there + // are none, at the OSR entrypoint instruction. + if (osr_pc_offset_ >= 0) return; + + osr_pc_offset_ = masm()->pc_offset(); + + // Move state of dynamic frame alignment into edx. + __ Move(edx, Immediate(kNoAlignmentPadding)); + + if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) { + Label do_not_pad, align_loop; + // Align ebp + 4 to a multiple of 2 * kPointerSize. + __ test(ebp, Immediate(kPointerSize)); + __ j(zero, &do_not_pad, Label::kNear); + __ push(Immediate(0)); + __ mov(ebx, esp); + __ mov(edx, Immediate(kAlignmentPaddingPushed)); + + // Move all parts of the frame over one word. The frame consists of: + // unoptimized frame slots, alignment state, context, frame pointer, return + // address, receiver, and the arguments. + __ mov(ecx, Immediate(scope()->num_parameters() + + 5 + graph()->osr()->UnoptimizedFrameSlots())); + + __ bind(&align_loop); + __ mov(eax, Operand(ebx, 1 * kPointerSize)); + __ mov(Operand(ebx, 0), eax); + __ add(Operand(ebx), Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &align_loop, Label::kNear); + __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); + __ sub(Operand(ebp), Immediate(kPointerSize)); + __ bind(&do_not_pad); + } + + // Save the first local, which is overwritten by the alignment state. + Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize); + __ push(alignment_loc); + + // Set the dynamic frame alignment state. + __ mov(alignment_loc, edx); + + // Adjust the frame size, subsuming the unoptimized frame into the + // optimized frame. + int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); + DCHECK(slots >= 1); + __ sub(esp, Immediate((slots - 1) * kPointerSize)); +} + + +void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { + if (instr->IsCall()) { + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); + } + if (!instr->IsLazyBailout() && !instr->IsGap()) { + safepoints_.BumpLastLazySafepointIndex(); + } + FlushX87StackIfNecessary(instr); +} + + +void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { + if (instr->IsGoto()) { + x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); + } else if (FLAG_debug_code && FLAG_enable_slow_asserts && + !instr->IsGap() && !instr->IsReturn()) { + if (instr->ClobbersDoubleRegisters(isolate())) { + if (instr->HasDoubleRegisterResult()) { + DCHECK_EQ(1, x87_stack_.depth()); + } else { + DCHECK_EQ(0, x87_stack_.depth()); + } + } + __ VerifyX87StackDepth(x87_stack_.depth()); + } +} + + +bool LCodeGen::GenerateJumpTable() { + Label needs_frame; + if (jump_table_.length() > 0) { + Comment(";;; -------------------- Jump table --------------------"); + } + for (int i = 0; i < jump_table_.length(); i++) { + __ bind(&jump_table_[i].label); + Address entry = jump_table_[i].address; + Deoptimizer::BailoutType type = jump_table_[i].bailout_type; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + if (id == Deoptimizer::kNotDeoptimizationEntry) { + Comment(";;; jump table entry %d.", i); + } else { + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + } + if (jump_table_[i].needs_frame) { + DCHECK(!info()->saves_caller_doubles()); + __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); + if (needs_frame.is_bound()) { + __ jmp(&needs_frame); + } else { + __ bind(&needs_frame); + __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + DCHECK(info()->IsStub()); + __ push(Immediate(Smi::FromInt(StackFrame::STUB))); + // Push a PC inside the function so that the deopt code can find where + // the deopt comes from. It doesn't have to be the precise return + // address of a "calling" LAZY deopt, it only has to be somewhere + // inside the code body. + Label push_approx_pc; + __ call(&push_approx_pc); + __ bind(&push_approx_pc); + // Push the continuation which was stashed were the ebp should + // be. Replace it with the saved ebp. + __ push(MemOperand(esp, 3 * kPointerSize)); + __ mov(MemOperand(esp, 4 * kPointerSize), ebp); + __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); + __ ret(0); // Call the continuation without clobbering registers. + } + } else { + __ call(entry, RelocInfo::RUNTIME_ENTRY); + } + } + return !is_aborted(); +} + + +bool LCodeGen::GenerateDeferredCode() { + DCHECK(is_generating()); + if (deferred_.length() > 0) { + for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { + LDeferredCode* code = deferred_[i]; + X87Stack copy(code->x87_stack()); + x87_stack_ = copy; + + HValue* value = + instructions_->at(code->instruction_index())->hydrogen_value(); + RecordAndWritePosition( + chunk()->graph()->SourcePositionToScriptPosition(value->position())); + + Comment(";;; <@%d,#%d> " + "-------------------- Deferred %s --------------------", + code->instruction_index(), + code->instr()->hydrogen_value()->id(), + code->instr()->Mnemonic()); + __ bind(code->entry()); + if (NeedsDeferredFrame()) { + Comment(";;; Build frame"); + DCHECK(!frame_is_built_); + DCHECK(info()->IsStub()); + frame_is_built_ = true; + // Build the frame in such a way that esi isn't trashed. + __ push(ebp); // Caller's frame pointer. + __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); + __ push(Immediate(Smi::FromInt(StackFrame::STUB))); + __ lea(ebp, Operand(esp, 2 * kPointerSize)); + Comment(";;; Deferred code"); + } + code->Generate(); + if (NeedsDeferredFrame()) { + __ bind(code->done()); + Comment(";;; Destroy frame"); + DCHECK(frame_is_built_); + frame_is_built_ = false; + __ mov(esp, ebp); + __ pop(ebp); + } + __ jmp(code->exit()); + } + } + + // Deferred code is the last part of the instruction sequence. Mark + // the generated code as done unless we bailed out. + if (!is_aborted()) status_ = DONE; + return !is_aborted(); +} + + +bool LCodeGen::GenerateSafepointTable() { + DCHECK(is_done()); + if (!info()->IsStub()) { + // For lazy deoptimization we need space to patch a call after every call. + // Ensure there is always space for such patching, even if the code ends + // in a call. + int target_offset = masm()->pc_offset() + Deoptimizer::patch_size(); + while (masm()->pc_offset() < target_offset) { + masm()->nop(); + } + } + safepoints_.Emit(masm(), GetStackSlotCount()); + return !is_aborted(); +} + + +Register LCodeGen::ToRegister(int index) const { + return Register::FromAllocationIndex(index); +} + + +X87Register LCodeGen::ToX87Register(int index) const { + return X87Register::FromAllocationIndex(index); +} + + +void LCodeGen::X87LoadForUsage(X87Register reg) { + DCHECK(x87_stack_.Contains(reg)); + x87_stack_.Fxch(reg); + x87_stack_.pop(); +} + + +void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { + DCHECK(x87_stack_.Contains(reg1)); + DCHECK(x87_stack_.Contains(reg2)); + x87_stack_.Fxch(reg1, 1); + x87_stack_.Fxch(reg2); + x87_stack_.pop(); + x87_stack_.pop(); +} + + +void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { + DCHECK(is_mutable_); + DCHECK(Contains(reg) && stack_depth_ > other_slot); + int i = ArrayIndex(reg); + int st = st2idx(i); + if (st != other_slot) { + int other_i = st2idx(other_slot); + X87Register other = stack_[other_i]; + stack_[other_i] = reg; + stack_[i] = other; + if (st == 0) { + __ fxch(other_slot); + } else if (other_slot == 0) { + __ fxch(st); + } else { + __ fxch(st); + __ fxch(other_slot); + __ fxch(st); + } + } +} + + +int LCodeGen::X87Stack::st2idx(int pos) { + return stack_depth_ - pos - 1; +} + + +int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { + for (int i = 0; i < stack_depth_; i++) { + if (stack_[i].is(reg)) return i; + } + UNREACHABLE(); + return -1; +} + + +bool LCodeGen::X87Stack::Contains(X87Register reg) { + for (int i = 0; i < stack_depth_; i++) { + if (stack_[i].is(reg)) return true; + } + return false; +} + + +void LCodeGen::X87Stack::Free(X87Register reg) { + DCHECK(is_mutable_); + DCHECK(Contains(reg)); + int i = ArrayIndex(reg); + int st = st2idx(i); + if (st > 0) { + // keep track of how fstp(i) changes the order of elements + int tos_i = st2idx(0); + stack_[i] = stack_[tos_i]; + } + pop(); + __ fstp(st); +} + + +void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { + if (x87_stack_.Contains(dst)) { + x87_stack_.Fxch(dst); + __ fstp(0); + } else { + x87_stack_.push(dst); + } + X87Fld(src, opts); +} + + +void LCodeGen::X87Fld(Operand src, X87OperandType opts) { + DCHECK(!src.is_reg_only()); + switch (opts) { + case kX87DoubleOperand: + __ fld_d(src); + break; + case kX87FloatOperand: + __ fld_s(src); + break; + case kX87IntOperand: + __ fild_s(src); + break; + default: + UNREACHABLE(); + } +} + + +void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { + DCHECK(!dst.is_reg_only()); + x87_stack_.Fxch(src); + switch (opts) { + case kX87DoubleOperand: + __ fst_d(dst); + break; + case kX87IntOperand: + __ fist_s(dst); + break; + default: + UNREACHABLE(); + } +} + + +void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { + DCHECK(is_mutable_); + if (Contains(reg)) { + Free(reg); + } + // Mark this register as the next register to write to + stack_[stack_depth_] = reg; +} + + +void LCodeGen::X87Stack::CommitWrite(X87Register reg) { + DCHECK(is_mutable_); + // Assert the reg is prepared to write, but not on the virtual stack yet + DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) && + stack_depth_ < X87Register::kMaxNumAllocatableRegisters); + stack_depth_++; +} + + +void LCodeGen::X87PrepareBinaryOp( + X87Register left, X87Register right, X87Register result) { + // You need to use DefineSameAsFirst for x87 instructions + DCHECK(result.is(left)); + x87_stack_.Fxch(right, 1); + x87_stack_.Fxch(left); +} + + +void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { + if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) { + bool double_inputs = instr->HasDoubleRegisterInput(); + + // Flush stack from tos down, since FreeX87() will mess with tos + for (int i = stack_depth_-1; i >= 0; i--) { + X87Register reg = stack_[i]; + // Skip registers which contain the inputs for the next instruction + // when flushing the stack + if (double_inputs && instr->IsDoubleInput(reg, cgen)) { + continue; + } + Free(reg); + if (i < stack_depth_-1) i++; + } + } + if (instr->IsReturn()) { + while (stack_depth_ > 0) { + __ fstp(0); + stack_depth_--; + } + if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0); + } +} + + +void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) { + DCHECK(stack_depth_ <= 1); + // If ever used for new stubs producing two pairs of doubles joined into two + // phis this assert hits. That situation is not handled, since the two stacks + // might have st0 and st1 swapped. + if (current_block_id + 1 != goto_instr->block_id()) { + // If we have a value on the x87 stack on leaving a block, it must be a + // phi input. If the next block we compile is not the join block, we have + // to discard the stack state. + stack_depth_ = 0; + } +} + + +void LCodeGen::EmitFlushX87ForDeopt() { + // The deoptimizer does not support X87 Registers. But as long as we + // deopt from a stub its not a problem, since we will re-materialize the + // original stub inputs, which can't be double registers. + DCHECK(info()->IsStub()); + if (FLAG_debug_code && FLAG_enable_slow_asserts) { + __ pushfd(); + __ VerifyX87StackDepth(x87_stack_.depth()); + __ popfd(); + } + for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0); +} + + +Register LCodeGen::ToRegister(LOperand* op) const { + DCHECK(op->IsRegister()); + return ToRegister(op->index()); +} + + +X87Register LCodeGen::ToX87Register(LOperand* op) const { + DCHECK(op->IsDoubleRegister()); + return ToX87Register(op->index()); +} + + +int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { + return ToRepresentation(op, Representation::Integer32()); +} + + +int32_t LCodeGen::ToRepresentation(LConstantOperand* op, + const Representation& r) const { + HConstant* constant = chunk_->LookupConstant(op); + int32_t value = constant->Integer32Value(); + if (r.IsInteger32()) return value; + DCHECK(r.IsSmiOrTagged()); + return reinterpret_cast<int32_t>(Smi::FromInt(value)); +} + + +Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { + HConstant* constant = chunk_->LookupConstant(op); + DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); + return constant->handle(isolate()); +} + + +double LCodeGen::ToDouble(LConstantOperand* op) const { + HConstant* constant = chunk_->LookupConstant(op); + DCHECK(constant->HasDoubleValue()); + return constant->DoubleValue(); +} + + +ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { + HConstant* constant = chunk_->LookupConstant(op); + DCHECK(constant->HasExternalReferenceValue()); + return constant->ExternalReferenceValue(); +} + + +bool LCodeGen::IsInteger32(LConstantOperand* op) const { + return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); +} + + +bool LCodeGen::IsSmi(LConstantOperand* op) const { + return chunk_->LookupLiteralRepresentation(op).IsSmi(); +} + + +static int ArgumentsOffsetWithoutFrame(int index) { + DCHECK(index < 0); + return -(index + 1) * kPointerSize + kPCOnStackSize; +} + + +Operand LCodeGen::ToOperand(LOperand* op) const { + if (op->IsRegister()) return Operand(ToRegister(op)); + DCHECK(!op->IsDoubleRegister()); + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); + if (NeedsEagerFrame()) { + return Operand(ebp, StackSlotOffset(op->index())); + } else { + // Retrieve parameter without eager stack-frame relative to the + // stack-pointer. + return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); + } +} + + +Operand LCodeGen::HighOperand(LOperand* op) { + DCHECK(op->IsDoubleStackSlot()); + if (NeedsEagerFrame()) { + return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize); + } else { + // Retrieve parameter without eager stack-frame relative to the + // stack-pointer. + return Operand( + esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); + } +} + + +void LCodeGen::WriteTranslation(LEnvironment* environment, + Translation* translation) { + if (environment == NULL) return; + + // The translation includes one command per value in the environment. + int translation_size = environment->translation_size(); + // The output frame height does not include the parameters. + int height = translation_size - environment->parameter_count(); + + WriteTranslation(environment->outer(), translation); + bool has_closure_id = !info()->closure().is_null() && + !info()->closure().is_identical_to(environment->closure()); + int closure_id = has_closure_id + ? DefineDeoptimizationLiteral(environment->closure()) + : Translation::kSelfLiteralId; + switch (environment->frame_type()) { + case JS_FUNCTION: + translation->BeginJSFrame(environment->ast_id(), closure_id, height); + break; + case JS_CONSTRUCT: + translation->BeginConstructStubFrame(closure_id, translation_size); + break; + case JS_GETTER: + DCHECK(translation_size == 1); + DCHECK(height == 0); + translation->BeginGetterStubFrame(closure_id); + break; + case JS_SETTER: + DCHECK(translation_size == 2); + DCHECK(height == 0); + translation->BeginSetterStubFrame(closure_id); + break; + case ARGUMENTS_ADAPTOR: + translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); + break; + case STUB: + translation->BeginCompiledStubFrame(); + break; + default: + UNREACHABLE(); + } + + int object_index = 0; + int dematerialized_index = 0; + for (int i = 0; i < translation_size; ++i) { + LOperand* value = environment->values()->at(i); + AddToTranslation(environment, + translation, + value, + environment->HasTaggedValueAt(i), + environment->HasUint32ValueAt(i), + &object_index, + &dematerialized_index); + } +} + + +void LCodeGen::AddToTranslation(LEnvironment* environment, + Translation* translation, + LOperand* op, + bool is_tagged, + bool is_uint32, + int* object_index_pointer, + int* dematerialized_index_pointer) { + if (op == LEnvironment::materialization_marker()) { + int object_index = (*object_index_pointer)++; + if (environment->ObjectIsDuplicateAt(object_index)) { + int dupe_of = environment->ObjectDuplicateOfAt(object_index); + translation->DuplicateObject(dupe_of); + return; + } + int object_length = environment->ObjectLengthAt(object_index); + if (environment->ObjectIsArgumentsAt(object_index)) { + translation->BeginArgumentsObject(object_length); + } else { + translation->BeginCapturedObject(object_length); + } + int dematerialized_index = *dematerialized_index_pointer; + int env_offset = environment->translation_size() + dematerialized_index; + *dematerialized_index_pointer += object_length; + for (int i = 0; i < object_length; ++i) { + LOperand* value = environment->values()->at(env_offset + i); + AddToTranslation(environment, + translation, + value, + environment->HasTaggedValueAt(env_offset + i), + environment->HasUint32ValueAt(env_offset + i), + object_index_pointer, + dematerialized_index_pointer); + } + return; + } + + if (op->IsStackSlot()) { + if (is_tagged) { + translation->StoreStackSlot(op->index()); + } else if (is_uint32) { + translation->StoreUint32StackSlot(op->index()); + } else { + translation->StoreInt32StackSlot(op->index()); + } + } else if (op->IsDoubleStackSlot()) { + translation->StoreDoubleStackSlot(op->index()); + } else if (op->IsRegister()) { + Register reg = ToRegister(op); + if (is_tagged) { + translation->StoreRegister(reg); + } else if (is_uint32) { + translation->StoreUint32Register(reg); + } else { + translation->StoreInt32Register(reg); + } + } else if (op->IsConstantOperand()) { + HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); + int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); + translation->StoreLiteral(src_index); + } else { + UNREACHABLE(); + } +} + + +void LCodeGen::CallCodeGeneric(Handle<Code> code, + RelocInfo::Mode mode, + LInstruction* instr, + SafepointMode safepoint_mode) { + DCHECK(instr != NULL); + __ call(code, mode); + RecordSafepointWithLazyDeopt(instr, safepoint_mode); + + // Signal that we don't inline smi code before these stubs in the + // optimizing code generator. + if (code->kind() == Code::BINARY_OP_IC || + code->kind() == Code::COMPARE_IC) { + __ nop(); + } +} + + +void LCodeGen::CallCode(Handle<Code> code, + RelocInfo::Mode mode, + LInstruction* instr) { + CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); +} + + +void LCodeGen::CallRuntime(const Runtime::Function* fun, + int argc, + LInstruction* instr) { + DCHECK(instr != NULL); + DCHECK(instr->HasPointerMap()); + + __ CallRuntime(fun, argc); + + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); + + DCHECK(info()->is_calling()); +} + + +void LCodeGen::LoadContextFromDeferred(LOperand* context) { + if (context->IsRegister()) { + if (!ToRegister(context).is(esi)) { + __ mov(esi, ToRegister(context)); + } + } else if (context->IsStackSlot()) { + __ mov(esi, ToOperand(context)); + } else if (context->IsConstantOperand()) { + HConstant* constant = + chunk_->LookupConstant(LConstantOperand::cast(context)); + __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate()))); + } else { + UNREACHABLE(); + } +} + +void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, + int argc, + LInstruction* instr, + LOperand* context) { + LoadContextFromDeferred(context); + + __ CallRuntime(id); + RecordSafepointWithRegisters( + instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); + + DCHECK(info()->is_calling()); +} + + +void LCodeGen::RegisterEnvironmentForDeoptimization( + LEnvironment* environment, Safepoint::DeoptMode mode) { + environment->set_has_been_used(); + if (!environment->HasBeenRegistered()) { + // Physical stack frame layout: + // -x ............. -4 0 ..................................... y + // [incoming arguments] [spill slots] [pushed outgoing arguments] + + // Layout of the environment: + // 0 ..................................................... size-1 + // [parameters] [locals] [expression stack including arguments] + + // Layout of the translation: + // 0 ........................................................ size - 1 + 4 + // [expression stack including arguments] [locals] [4 words] [parameters] + // |>------------ translation_size ------------<| + + int frame_count = 0; + int jsframe_count = 0; + for (LEnvironment* e = environment; e != NULL; e = e->outer()) { + ++frame_count; + if (e->frame_type() == JS_FUNCTION) { + ++jsframe_count; + } + } + Translation translation(&translations_, frame_count, jsframe_count, zone()); + WriteTranslation(environment, &translation); + int deoptimization_index = deoptimizations_.length(); + int pc_offset = masm()->pc_offset(); + environment->Register(deoptimization_index, + translation.index(), + (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); + deoptimizations_.Add(environment, zone()); + } +} + + +void LCodeGen::DeoptimizeIf(Condition cc, + LEnvironment* environment, + Deoptimizer::BailoutType bailout_type) { + RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); + DCHECK(environment->HasBeenRegistered()); + int id = environment->deoptimization_index(); + DCHECK(info()->IsOptimizing() || info()->IsStub()); + Address entry = + Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); + if (entry == NULL) { + Abort(kBailoutWasNotPrepared); + return; + } + + if (DeoptEveryNTimes()) { + ExternalReference count = ExternalReference::stress_deopt_count(isolate()); + Label no_deopt; + __ pushfd(); + __ push(eax); + __ mov(eax, Operand::StaticVariable(count)); + __ sub(eax, Immediate(1)); + __ j(not_zero, &no_deopt, Label::kNear); + if (FLAG_trap_on_deopt) __ int3(); + __ mov(eax, Immediate(FLAG_deopt_every_n_times)); + __ mov(Operand::StaticVariable(count), eax); + __ pop(eax); + __ popfd(); + DCHECK(frame_is_built_); + __ call(entry, RelocInfo::RUNTIME_ENTRY); + __ bind(&no_deopt); + __ mov(Operand::StaticVariable(count), eax); + __ pop(eax); + __ popfd(); + } + + // Before Instructions which can deopt, we normally flush the x87 stack. But + // we can have inputs or outputs of the current instruction on the stack, + // thus we need to flush them here from the physical stack to leave it in a + // consistent state. + if (x87_stack_.depth() > 0) { + Label done; + if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); + EmitFlushX87ForDeopt(); + __ bind(&done); + } + + if (info()->ShouldTrapOnDeopt()) { + Label done; + if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); + __ int3(); + __ bind(&done); + } + + DCHECK(info()->IsStub() || frame_is_built_); + if (cc == no_condition && frame_is_built_) { + __ call(entry, RelocInfo::RUNTIME_ENTRY); + } else { + // We often have several deopts to the same entry, reuse the last + // jump entry if this is the case. + if (jump_table_.is_empty() || + jump_table_.last().address != entry || + jump_table_.last().needs_frame != !frame_is_built_ || + jump_table_.last().bailout_type != bailout_type) { + Deoptimizer::JumpTableEntry table_entry(entry, + bailout_type, + !frame_is_built_); + jump_table_.Add(table_entry, zone()); + } + if (cc == no_condition) { + __ jmp(&jump_table_.last().label); + } else { + __ j(cc, &jump_table_.last().label); + } + } +} + + +void LCodeGen::DeoptimizeIf(Condition cc, + LEnvironment* environment) { + Deoptimizer::BailoutType bailout_type = info()->IsStub() + ? Deoptimizer::LAZY + : Deoptimizer::EAGER; + DeoptimizeIf(cc, environment, bailout_type); +} + + +void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { + int length = deoptimizations_.length(); + if (length == 0) return; + Handle<DeoptimizationInputData> data = + DeoptimizationInputData::New(isolate(), length, 0, TENURED); + + Handle<ByteArray> translations = + translations_.CreateByteArray(isolate()->factory()); + data->SetTranslationByteArray(*translations); + data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); + data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); + if (info_->IsOptimizing()) { + // Reference to shared function info does not change between phases. + AllowDeferredHandleDereference allow_handle_dereference; + data->SetSharedFunctionInfo(*info_->shared_info()); + } else { + data->SetSharedFunctionInfo(Smi::FromInt(0)); + } + + Handle<FixedArray> literals = + factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); + { AllowDeferredHandleDereference copy_handles; + for (int i = 0; i < deoptimization_literals_.length(); i++) { + literals->set(i, *deoptimization_literals_[i]); + } + data->SetLiteralArray(*literals); + } + + data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); + data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); + + // Populate the deoptimization entries. + for (int i = 0; i < length; i++) { + LEnvironment* env = deoptimizations_[i]; + data->SetAstId(i, env->ast_id()); + data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); + data->SetArgumentsStackHeight(i, + Smi::FromInt(env->arguments_stack_height())); + data->SetPc(i, Smi::FromInt(env->pc_offset())); + } + code->set_deoptimization_data(*data); +} + + +int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { + int result = deoptimization_literals_.length(); + for (int i = 0; i < deoptimization_literals_.length(); ++i) { + if (deoptimization_literals_[i].is_identical_to(literal)) return i; + } + deoptimization_literals_.Add(literal, zone()); + return result; +} + + +void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { + DCHECK(deoptimization_literals_.length() == 0); + + const ZoneList<Handle<JSFunction> >* inlined_closures = + chunk()->inlined_closures(); + + for (int i = 0, length = inlined_closures->length(); + i < length; + i++) { + DefineDeoptimizationLiteral(inlined_closures->at(i)); + } + + inlined_function_count_ = deoptimization_literals_.length(); +} + + +void LCodeGen::RecordSafepointWithLazyDeopt( + LInstruction* instr, SafepointMode safepoint_mode) { + if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { + RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); + } else { + DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kLazyDeopt); + } +} + + +void LCodeGen::RecordSafepoint( + LPointerMap* pointers, + Safepoint::Kind kind, + int arguments, + Safepoint::DeoptMode deopt_mode) { + DCHECK(kind == expected_safepoint_kind_); + const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); + Safepoint safepoint = + safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); + for (int i = 0; i < operands->length(); i++) { + LOperand* pointer = operands->at(i); + if (pointer->IsStackSlot()) { + safepoint.DefinePointerSlot(pointer->index(), zone()); + } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { + safepoint.DefinePointerRegister(ToRegister(pointer), zone()); + } + } +} + + +void LCodeGen::RecordSafepoint(LPointerMap* pointers, + Safepoint::DeoptMode mode) { + RecordSafepoint(pointers, Safepoint::kSimple, 0, mode); +} + + +void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) { + LPointerMap empty_pointers(zone()); + RecordSafepoint(&empty_pointers, mode); +} + + +void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, + int arguments, + Safepoint::DeoptMode mode) { + RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode); +} + + +void LCodeGen::RecordAndWritePosition(int position) { + if (position == RelocInfo::kNoPosition) return; + masm()->positions_recorder()->RecordPosition(position); + masm()->positions_recorder()->WriteRecordedPositions(); +} + + +static const char* LabelType(LLabel* label) { + if (label->is_loop_header()) return " (loop header)"; + if (label->is_osr_entry()) return " (OSR entry)"; + return ""; +} + + +void LCodeGen::DoLabel(LLabel* label) { + Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", + current_instruction_, + label->hydrogen_value()->id(), + label->block_id(), + LabelType(label)); + __ bind(label->label()); + current_block_ = label->block_id(); + DoGap(label); +} + + +void LCodeGen::DoParallelMove(LParallelMove* move) { + resolver_.Resolve(move); +} + + +void LCodeGen::DoGap(LGap* gap) { + for (int i = LGap::FIRST_INNER_POSITION; + i <= LGap::LAST_INNER_POSITION; + i++) { + LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); + LParallelMove* move = gap->GetParallelMove(inner_pos); + if (move != NULL) DoParallelMove(move); + } +} + + +void LCodeGen::DoInstructionGap(LInstructionGap* instr) { + DoGap(instr); +} + + +void LCodeGen::DoParameter(LParameter* instr) { + // Nothing to do. +} + + +void LCodeGen::DoCallStub(LCallStub* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->result()).is(eax)); + switch (instr->hydrogen()->major_key()) { + case CodeStub::RegExpExec: { + RegExpExecStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + break; + } + case CodeStub::SubString: { + SubStringStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + break; + } + case CodeStub::StringCompare: { + StringCompareStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + break; + } + default: + UNREACHABLE(); + } +} + + +void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { + GenerateOsrPrologue(); +} + + +void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(dividend.is(ToRegister(instr->result()))); + + // Theoretically, a variation of the branch-free code for integer division by + // a power of 2 (calculating the remainder via an additional multiplication + // (which gets simplified to an 'and') and subtraction) should be faster, and + // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to + // indicate that positive dividends are heavily favored, so the branching + // version performs better. + HMod* hmod = instr->hydrogen(); + int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); + Label dividend_is_not_negative, done; + if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { + __ test(dividend, dividend); + __ j(not_sign, ÷nd_is_not_negative, Label::kNear); + // Note that this is correct even for kMinInt operands. + __ neg(dividend); + __ and_(dividend, mask); + __ neg(dividend); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(zero, instr->environment()); + } + __ jmp(&done, Label::kNear); + } + + __ bind(÷nd_is_not_negative); + __ and_(dividend, mask); + __ bind(&done); +} + + +void LCodeGen::DoModByConstI(LModByConstI* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(ToRegister(instr->result()).is(eax)); + + if (divisor == 0) { + DeoptimizeIf(no_condition, instr->environment()); + return; + } + + __ TruncatingDiv(dividend, Abs(divisor)); + __ imul(edx, edx, Abs(divisor)); + __ mov(eax, dividend); + __ sub(eax, edx); + + // Check for negative zero. + HMod* hmod = instr->hydrogen(); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label remainder_not_zero; + __ j(not_zero, &remainder_not_zero, Label::kNear); + __ cmp(dividend, Immediate(0)); + DeoptimizeIf(less, instr->environment()); + __ bind(&remainder_not_zero); + } +} + + +void LCodeGen::DoModI(LModI* instr) { + HMod* hmod = instr->hydrogen(); + + Register left_reg = ToRegister(instr->left()); + DCHECK(left_reg.is(eax)); + Register right_reg = ToRegister(instr->right()); + DCHECK(!right_reg.is(eax)); + DCHECK(!right_reg.is(edx)); + Register result_reg = ToRegister(instr->result()); + DCHECK(result_reg.is(edx)); + + Label done; + // Check for x % 0, idiv would signal a divide error. We have to + // deopt in this case because we can't return a NaN. + if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { + __ test(right_reg, Operand(right_reg)); + DeoptimizeIf(zero, instr->environment()); + } + + // Check for kMinInt % -1, idiv would signal a divide error. We + // have to deopt if we care about -0, because we can't return that. + if (hmod->CheckFlag(HValue::kCanOverflow)) { + Label no_overflow_possible; + __ cmp(left_reg, kMinInt); + __ j(not_equal, &no_overflow_possible, Label::kNear); + __ cmp(right_reg, -1); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(equal, instr->environment()); + } else { + __ j(not_equal, &no_overflow_possible, Label::kNear); + __ Move(result_reg, Immediate(0)); + __ jmp(&done, Label::kNear); + } + __ bind(&no_overflow_possible); + } + + // Sign extend dividend in eax into edx:eax. + __ cdq(); + + // If we care about -0, test if the dividend is <0 and the result is 0. + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label positive_left; + __ test(left_reg, Operand(left_reg)); + __ j(not_sign, &positive_left, Label::kNear); + __ idiv(right_reg); + __ test(result_reg, Operand(result_reg)); + DeoptimizeIf(zero, instr->environment()); + __ jmp(&done, Label::kNear); + __ bind(&positive_left); + } + __ idiv(right_reg); + __ bind(&done); +} + + +void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + Register result = ToRegister(instr->result()); + DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor))); + DCHECK(!result.is(dividend)); + + // Check for (0 / -x) that will produce negative zero. + HDiv* hdiv = instr->hydrogen(); + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { + __ test(dividend, dividend); + DeoptimizeIf(zero, instr->environment()); + } + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { + __ cmp(dividend, kMinInt); + DeoptimizeIf(zero, instr->environment()); + } + // Deoptimize if remainder will not be 0. + if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && + divisor != 1 && divisor != -1) { + int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); + __ test(dividend, Immediate(mask)); + DeoptimizeIf(not_zero, instr->environment()); + } + __ Move(result, dividend); + int32_t shift = WhichPowerOf2Abs(divisor); + if (shift > 0) { + // The arithmetic shift is always OK, the 'if' is an optimization only. + if (shift > 1) __ sar(result, 31); + __ shr(result, 32 - shift); + __ add(result, dividend); + __ sar(result, shift); + } + if (divisor < 0) __ neg(result); +} + + +void LCodeGen::DoDivByConstI(LDivByConstI* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(ToRegister(instr->result()).is(edx)); + + if (divisor == 0) { + DeoptimizeIf(no_condition, instr->environment()); + return; + } + + // Check for (0 / -x) that will produce negative zero. + HDiv* hdiv = instr->hydrogen(); + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { + __ test(dividend, dividend); + DeoptimizeIf(zero, instr->environment()); + } + + __ TruncatingDiv(dividend, Abs(divisor)); + if (divisor < 0) __ neg(edx); + + if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { + __ mov(eax, edx); + __ imul(eax, eax, divisor); + __ sub(eax, dividend); + DeoptimizeIf(not_equal, instr->environment()); + } +} + + +// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. +void LCodeGen::DoDivI(LDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); + Register remainder = ToRegister(instr->temp()); + DCHECK(dividend.is(eax)); + DCHECK(remainder.is(edx)); + DCHECK(ToRegister(instr->result()).is(eax)); + DCHECK(!divisor.is(eax)); + DCHECK(!divisor.is(edx)); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + __ test(divisor, divisor); + DeoptimizeIf(zero, instr->environment()); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label dividend_not_zero; + __ test(dividend, dividend); + __ j(not_zero, ÷nd_not_zero, Label::kNear); + __ test(divisor, divisor); + DeoptimizeIf(sign, instr->environment()); + __ bind(÷nd_not_zero); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow)) { + Label dividend_not_min_int; + __ cmp(dividend, kMinInt); + __ j(not_zero, ÷nd_not_min_int, Label::kNear); + __ cmp(divisor, -1); + DeoptimizeIf(zero, instr->environment()); + __ bind(÷nd_not_min_int); + } + + // Sign extend to edx (= remainder). + __ cdq(); + __ idiv(divisor); + + if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + // Deoptimize if remainder is not 0. + __ test(remainder, remainder); + DeoptimizeIf(not_zero, instr->environment()); + } +} + + +void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(dividend.is(ToRegister(instr->result()))); + + // If the divisor is positive, things are easy: There can be no deopts and we + // can simply do an arithmetic right shift. + if (divisor == 1) return; + int32_t shift = WhichPowerOf2Abs(divisor); + if (divisor > 1) { + __ sar(dividend, shift); + return; + } + + // If the divisor is negative, we have to negate and handle edge cases. + __ neg(dividend); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(zero, instr->environment()); + } + + // Dividing by -1 is basically negation, unless we overflow. + if (divisor == -1) { + if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + DeoptimizeIf(overflow, instr->environment()); + } + return; + } + + // If the negation could not overflow, simply shifting is OK. + if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + __ sar(dividend, shift); + return; + } + + Label not_kmin_int, done; + __ j(no_overflow, ¬_kmin_int, Label::kNear); + __ mov(dividend, Immediate(kMinInt / divisor)); + __ jmp(&done, Label::kNear); + __ bind(¬_kmin_int); + __ sar(dividend, shift); + __ bind(&done); +} + + +void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(ToRegister(instr->result()).is(edx)); + + if (divisor == 0) { + DeoptimizeIf(no_condition, instr->environment()); + return; + } + + // Check for (0 / -x) that will produce negative zero. + HMathFloorOfDiv* hdiv = instr->hydrogen(); + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { + __ test(dividend, dividend); + DeoptimizeIf(zero, instr->environment()); + } + + // Easy case: We need no dynamic check for the dividend and the flooring + // division is the same as the truncating division. + if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || + (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { + __ TruncatingDiv(dividend, Abs(divisor)); + if (divisor < 0) __ neg(edx); + return; + } + + // In the general case we may need to adjust before and after the truncating + // division to get a flooring division. + Register temp = ToRegister(instr->temp3()); + DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx)); + Label needs_adjustment, done; + __ cmp(dividend, Immediate(0)); + __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); + __ TruncatingDiv(dividend, Abs(divisor)); + if (divisor < 0) __ neg(edx); + __ jmp(&done, Label::kNear); + __ bind(&needs_adjustment); + __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1)); + __ TruncatingDiv(temp, Abs(divisor)); + if (divisor < 0) __ neg(edx); + __ dec(edx); + __ bind(&done); +} + + +// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. +void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); + Register remainder = ToRegister(instr->temp()); + Register result = ToRegister(instr->result()); + DCHECK(dividend.is(eax)); + DCHECK(remainder.is(edx)); + DCHECK(result.is(eax)); + DCHECK(!divisor.is(eax)); + DCHECK(!divisor.is(edx)); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + __ test(divisor, divisor); + DeoptimizeIf(zero, instr->environment()); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label dividend_not_zero; + __ test(dividend, dividend); + __ j(not_zero, ÷nd_not_zero, Label::kNear); + __ test(divisor, divisor); + DeoptimizeIf(sign, instr->environment()); + __ bind(÷nd_not_zero); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow)) { + Label dividend_not_min_int; + __ cmp(dividend, kMinInt); + __ j(not_zero, ÷nd_not_min_int, Label::kNear); + __ cmp(divisor, -1); + DeoptimizeIf(zero, instr->environment()); + __ bind(÷nd_not_min_int); + } + + // Sign extend to edx (= remainder). + __ cdq(); + __ idiv(divisor); + + Label done; + __ test(remainder, remainder); + __ j(zero, &done, Label::kNear); + __ xor_(remainder, divisor); + __ sar(remainder, 31); + __ add(result, remainder); + __ bind(&done); +} + + +void LCodeGen::DoMulI(LMulI* instr) { + Register left = ToRegister(instr->left()); + LOperand* right = instr->right(); + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ mov(ToRegister(instr->temp()), left); + } + + if (right->IsConstantOperand()) { + // Try strength reductions on the multiplication. + // All replacement instructions are at most as long as the imul + // and have better latency. + int constant = ToInteger32(LConstantOperand::cast(right)); + if (constant == -1) { + __ neg(left); + } else if (constant == 0) { + __ xor_(left, Operand(left)); + } else if (constant == 2) { + __ add(left, Operand(left)); + } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + // If we know that the multiplication can't overflow, it's safe to + // use instructions that don't set the overflow flag for the + // multiplication. + switch (constant) { + case 1: + // Do nothing. + break; + case 3: + __ lea(left, Operand(left, left, times_2, 0)); + break; + case 4: + __ shl(left, 2); + break; + case 5: + __ lea(left, Operand(left, left, times_4, 0)); + break; + case 8: + __ shl(left, 3); + break; + case 9: + __ lea(left, Operand(left, left, times_8, 0)); + break; + case 16: + __ shl(left, 4); + break; + default: + __ imul(left, left, constant); + break; + } + } else { + __ imul(left, left, constant); + } + } else { + if (instr->hydrogen()->representation().IsSmi()) { + __ SmiUntag(left); + } + __ imul(left, ToOperand(right)); + } + + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + DeoptimizeIf(overflow, instr->environment()); + } + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + // Bail out if the result is supposed to be negative zero. + Label done; + __ test(left, Operand(left)); + __ j(not_zero, &done, Label::kNear); + if (right->IsConstantOperand()) { + if (ToInteger32(LConstantOperand::cast(right)) < 0) { + DeoptimizeIf(no_condition, instr->environment()); + } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { + __ cmp(ToRegister(instr->temp()), Immediate(0)); + DeoptimizeIf(less, instr->environment()); + } + } else { + // Test the non-zero operand for negative sign. + __ or_(ToRegister(instr->temp()), ToOperand(right)); + DeoptimizeIf(sign, instr->environment()); + } + __ bind(&done); + } +} + + +void LCodeGen::DoBitI(LBitI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + DCHECK(left->Equals(instr->result())); + DCHECK(left->IsRegister()); + + if (right->IsConstantOperand()) { + int32_t right_operand = + ToRepresentation(LConstantOperand::cast(right), + instr->hydrogen()->representation()); + switch (instr->op()) { + case Token::BIT_AND: + __ and_(ToRegister(left), right_operand); + break; + case Token::BIT_OR: + __ or_(ToRegister(left), right_operand); + break; + case Token::BIT_XOR: + if (right_operand == int32_t(~0)) { + __ not_(ToRegister(left)); + } else { + __ xor_(ToRegister(left), right_operand); + } + break; + default: + UNREACHABLE(); + break; + } + } else { + switch (instr->op()) { + case Token::BIT_AND: + __ and_(ToRegister(left), ToOperand(right)); + break; + case Token::BIT_OR: + __ or_(ToRegister(left), ToOperand(right)); + break; + case Token::BIT_XOR: + __ xor_(ToRegister(left), ToOperand(right)); + break; + default: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoShiftI(LShiftI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + DCHECK(left->Equals(instr->result())); + DCHECK(left->IsRegister()); + if (right->IsRegister()) { + DCHECK(ToRegister(right).is(ecx)); + + switch (instr->op()) { + case Token::ROR: + __ ror_cl(ToRegister(left)); + if (instr->can_deopt()) { + __ test(ToRegister(left), ToRegister(left)); + DeoptimizeIf(sign, instr->environment()); + } + break; + case Token::SAR: + __ sar_cl(ToRegister(left)); + break; + case Token::SHR: + __ shr_cl(ToRegister(left)); + if (instr->can_deopt()) { + __ test(ToRegister(left), ToRegister(left)); + DeoptimizeIf(sign, instr->environment()); + } + break; + case Token::SHL: + __ shl_cl(ToRegister(left)); + break; + default: + UNREACHABLE(); + break; + } + } else { + int value = ToInteger32(LConstantOperand::cast(right)); + uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); + switch (instr->op()) { + case Token::ROR: + if (shift_count == 0 && instr->can_deopt()) { + __ test(ToRegister(left), ToRegister(left)); + DeoptimizeIf(sign, instr->environment()); + } else { + __ ror(ToRegister(left), shift_count); + } + break; + case Token::SAR: + if (shift_count != 0) { + __ sar(ToRegister(left), shift_count); + } + break; + case Token::SHR: + if (shift_count != 0) { + __ shr(ToRegister(left), shift_count); + } else if (instr->can_deopt()) { + __ test(ToRegister(left), ToRegister(left)); + DeoptimizeIf(sign, instr->environment()); + } + break; + case Token::SHL: + if (shift_count != 0) { + if (instr->hydrogen_value()->representation().IsSmi() && + instr->can_deopt()) { + if (shift_count != 1) { + __ shl(ToRegister(left), shift_count - 1); + } + __ SmiTag(ToRegister(left)); + DeoptimizeIf(overflow, instr->environment()); + } else { + __ shl(ToRegister(left), shift_count); + } + } + break; + default: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoSubI(LSubI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + DCHECK(left->Equals(instr->result())); + + if (right->IsConstantOperand()) { + __ sub(ToOperand(left), + ToImmediate(right, instr->hydrogen()->representation())); + } else { + __ sub(ToRegister(left), ToOperand(right)); + } + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + DeoptimizeIf(overflow, instr->environment()); + } +} + + +void LCodeGen::DoConstantI(LConstantI* instr) { + __ Move(ToRegister(instr->result()), Immediate(instr->value())); +} + + +void LCodeGen::DoConstantS(LConstantS* instr) { + __ Move(ToRegister(instr->result()), Immediate(instr->value())); +} + + +void LCodeGen::DoConstantD(LConstantD* instr) { + double v = instr->value(); + uint64_t int_val = BitCast<uint64_t, double>(v); + int32_t lower = static_cast<int32_t>(int_val); + int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); + DCHECK(instr->result()->IsDoubleRegister()); + + __ push(Immediate(upper)); + __ push(Immediate(lower)); + X87Register reg = ToX87Register(instr->result()); + X87Mov(reg, Operand(esp, 0)); + __ add(Operand(esp), Immediate(kDoubleSize)); +} + + +void LCodeGen::DoConstantE(LConstantE* instr) { + __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); +} + + +void LCodeGen::DoConstantT(LConstantT* instr) { + Register reg = ToRegister(instr->result()); + Handle<Object> object = instr->value(isolate()); + AllowDeferredHandleDereference smi_check; + __ LoadObject(reg, object); +} + + +void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { + Register result = ToRegister(instr->result()); + Register map = ToRegister(instr->value()); + __ EnumLength(result, map); +} + + +void LCodeGen::DoDateField(LDateField* instr) { + Register object = ToRegister(instr->date()); + Register result = ToRegister(instr->result()); + Register scratch = ToRegister(instr->temp()); + Smi* index = instr->index(); + Label runtime, done; + DCHECK(object.is(result)); + DCHECK(object.is(eax)); + + __ test(object, Immediate(kSmiTagMask)); + DeoptimizeIf(zero, instr->environment()); + __ CmpObjectType(object, JS_DATE_TYPE, scratch); + DeoptimizeIf(not_equal, instr->environment()); + + if (index->value() == 0) { + __ mov(result, FieldOperand(object, JSDate::kValueOffset)); + } else { + if (index->value() < JSDate::kFirstUncachedField) { + ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); + __ mov(scratch, Operand::StaticVariable(stamp)); + __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset)); + __ j(not_equal, &runtime, Label::kNear); + __ mov(result, FieldOperand(object, JSDate::kValueOffset + + kPointerSize * index->value())); + __ jmp(&done, Label::kNear); + } + __ bind(&runtime); + __ PrepareCallCFunction(2, scratch); + __ mov(Operand(esp, 0), object); + __ mov(Operand(esp, 1 * kPointerSize), Immediate(index)); + __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); + __ bind(&done); + } +} + + +Operand LCodeGen::BuildSeqStringOperand(Register string, + LOperand* index, + String::Encoding encoding) { + if (index->IsConstantOperand()) { + int offset = ToRepresentation(LConstantOperand::cast(index), + Representation::Integer32()); + if (encoding == String::TWO_BYTE_ENCODING) { + offset *= kUC16Size; + } + STATIC_ASSERT(kCharSize == 1); + return FieldOperand(string, SeqString::kHeaderSize + offset); + } + return FieldOperand( + string, ToRegister(index), + encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2, + SeqString::kHeaderSize); +} + + +void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { + String::Encoding encoding = instr->hydrogen()->encoding(); + Register result = ToRegister(instr->result()); + Register string = ToRegister(instr->string()); + + if (FLAG_debug_code) { + __ push(string); + __ mov(string, FieldOperand(string, HeapObject::kMapOffset)); + __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset)); + + __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type)); + __ Check(equal, kUnexpectedStringType); + __ pop(string); + } + + Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); + if (encoding == String::ONE_BYTE_ENCODING) { + __ movzx_b(result, operand); + } else { + __ movzx_w(result, operand); + } +} + + +void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { + String::Encoding encoding = instr->hydrogen()->encoding(); + Register string = ToRegister(instr->string()); + + if (FLAG_debug_code) { + Register value = ToRegister(instr->value()); + Register index = ToRegister(instr->index()); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + int encoding_mask = + instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type; + __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); + } + + Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); + if (instr->value()->IsConstantOperand()) { + int value = ToRepresentation(LConstantOperand::cast(instr->value()), + Representation::Integer32()); + DCHECK_LE(0, value); + if (encoding == String::ONE_BYTE_ENCODING) { + DCHECK_LE(value, String::kMaxOneByteCharCode); + __ mov_b(operand, static_cast<int8_t>(value)); + } else { + DCHECK_LE(value, String::kMaxUtf16CodeUnit); + __ mov_w(operand, static_cast<int16_t>(value)); + } + } else { + Register value = ToRegister(instr->value()); + if (encoding == String::ONE_BYTE_ENCODING) { + __ mov_b(operand, value); + } else { + __ mov_w(operand, value); + } + } +} + + +void LCodeGen::DoAddI(LAddI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + + if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { + if (right->IsConstantOperand()) { + int32_t offset = ToRepresentation(LConstantOperand::cast(right), + instr->hydrogen()->representation()); + __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset)); + } else { + Operand address(ToRegister(left), ToRegister(right), times_1, 0); + __ lea(ToRegister(instr->result()), address); + } + } else { + if (right->IsConstantOperand()) { + __ add(ToOperand(left), + ToImmediate(right, instr->hydrogen()->representation())); + } else { + __ add(ToRegister(left), ToOperand(right)); + } + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + DeoptimizeIf(overflow, instr->environment()); + } + } +} + + +void LCodeGen::DoMathMinMax(LMathMinMax* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + DCHECK(left->Equals(instr->result())); + HMathMinMax::Operation operation = instr->hydrogen()->operation(); + if (instr->hydrogen()->representation().IsSmiOrInteger32()) { + Label return_left; + Condition condition = (operation == HMathMinMax::kMathMin) + ? less_equal + : greater_equal; + if (right->IsConstantOperand()) { + Operand left_op = ToOperand(left); + Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()), + instr->hydrogen()->representation()); + __ cmp(left_op, immediate); + __ j(condition, &return_left, Label::kNear); + __ mov(left_op, immediate); + } else { + Register left_reg = ToRegister(left); + Operand right_op = ToOperand(right); + __ cmp(left_reg, right_op); + __ j(condition, &return_left, Label::kNear); + __ mov(left_reg, right_op); + } + __ bind(&return_left); + } else { + // TODO(weiliang) use X87 for double representation. + UNIMPLEMENTED(); + } +} + + +void LCodeGen::DoArithmeticD(LArithmeticD* instr) { + X87Register left = ToX87Register(instr->left()); + X87Register right = ToX87Register(instr->right()); + X87Register result = ToX87Register(instr->result()); + if (instr->op() != Token::MOD) { + X87PrepareBinaryOp(left, right, result); + } + switch (instr->op()) { + case Token::ADD: + __ fadd_i(1); + break; + case Token::SUB: + __ fsub_i(1); + break; + case Token::MUL: + __ fmul_i(1); + break; + case Token::DIV: + __ fdiv_i(1); + break; + case Token::MOD: { + // Pass two doubles as arguments on the stack. + __ PrepareCallCFunction(4, eax); + X87Mov(Operand(esp, 1 * kDoubleSize), right); + X87Mov(Operand(esp, 0), left); + X87Free(right); + DCHECK(left.is(result)); + X87PrepareToWrite(result); + __ CallCFunction( + ExternalReference::mod_two_doubles_operation(isolate()), + 4); + + // Return value is in st(0) on ia32. + X87CommitWrite(result); + break; + } + default: + UNREACHABLE(); + break; + } +} + + +void LCodeGen::DoArithmeticT(LArithmeticT* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->left()).is(edx)); + DCHECK(ToRegister(instr->right()).is(eax)); + DCHECK(ToRegister(instr->result()).is(eax)); + + BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +template<class InstrType> +void LCodeGen::EmitBranch(InstrType instr, Condition cc) { + int left_block = instr->TrueDestination(chunk_); + int right_block = instr->FalseDestination(chunk_); + + int next_block = GetNextEmittedBlock(); + + if (right_block == left_block || cc == no_condition) { + EmitGoto(left_block); + } else if (left_block == next_block) { + __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); + } else if (right_block == next_block) { + __ j(cc, chunk_->GetAssemblyLabel(left_block)); + } else { + __ j(cc, chunk_->GetAssemblyLabel(left_block)); + __ jmp(chunk_->GetAssemblyLabel(right_block)); + } +} + + +template<class InstrType> +void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { + int false_block = instr->FalseDestination(chunk_); + if (cc == no_condition) { + __ jmp(chunk_->GetAssemblyLabel(false_block)); + } else { + __ j(cc, chunk_->GetAssemblyLabel(false_block)); + } +} + + +void LCodeGen::DoBranch(LBranch* instr) { + Representation r = instr->hydrogen()->value()->representation(); + if (r.IsSmiOrInteger32()) { + Register reg = ToRegister(instr->value()); + __ test(reg, Operand(reg)); + EmitBranch(instr, not_zero); + } else if (r.IsDouble()) { + UNREACHABLE(); + } else { + DCHECK(r.IsTagged()); + Register reg = ToRegister(instr->value()); + HType type = instr->hydrogen()->value()->type(); + if (type.IsBoolean()) { + DCHECK(!info()->IsStub()); + __ cmp(reg, factory()->true_value()); + EmitBranch(instr, equal); + } else if (type.IsSmi()) { + DCHECK(!info()->IsStub()); + __ test(reg, Operand(reg)); + EmitBranch(instr, not_equal); + } else if (type.IsJSArray()) { + DCHECK(!info()->IsStub()); + EmitBranch(instr, no_condition); + } else if (type.IsHeapNumber()) { + UNREACHABLE(); + } else if (type.IsString()) { + DCHECK(!info()->IsStub()); + __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); + EmitBranch(instr, not_equal); + } else { + ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); + if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); + + if (expected.Contains(ToBooleanStub::UNDEFINED)) { + // undefined -> false. + __ cmp(reg, factory()->undefined_value()); + __ j(equal, instr->FalseLabel(chunk_)); + } + if (expected.Contains(ToBooleanStub::BOOLEAN)) { + // true -> true. + __ cmp(reg, factory()->true_value()); + __ j(equal, instr->TrueLabel(chunk_)); + // false -> false. + __ cmp(reg, factory()->false_value()); + __ j(equal, instr->FalseLabel(chunk_)); + } + if (expected.Contains(ToBooleanStub::NULL_TYPE)) { + // 'null' -> false. + __ cmp(reg, factory()->null_value()); + __ j(equal, instr->FalseLabel(chunk_)); + } + + if (expected.Contains(ToBooleanStub::SMI)) { + // Smis: 0 -> false, all other -> true. + __ test(reg, Operand(reg)); + __ j(equal, instr->FalseLabel(chunk_)); + __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); + } else if (expected.NeedsMap()) { + // If we need a map later and have a Smi -> deopt. + __ test(reg, Immediate(kSmiTagMask)); + DeoptimizeIf(zero, instr->environment()); + } + + Register map = no_reg; // Keep the compiler happy. + if (expected.NeedsMap()) { + map = ToRegister(instr->temp()); + DCHECK(!map.is(reg)); + __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); + + if (expected.CanBeUndetectable()) { + // Undetectable -> false. + __ test_b(FieldOperand(map, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(not_zero, instr->FalseLabel(chunk_)); + } + } + + if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { + // spec object -> true. + __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE); + __ j(above_equal, instr->TrueLabel(chunk_)); + } + + if (expected.Contains(ToBooleanStub::STRING)) { + // String value -> false iff empty. + Label not_string; + __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); + __ j(above_equal, ¬_string, Label::kNear); + __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); + __ j(not_zero, instr->TrueLabel(chunk_)); + __ jmp(instr->FalseLabel(chunk_)); + __ bind(¬_string); + } + + if (expected.Contains(ToBooleanStub::SYMBOL)) { + // Symbol value -> true. + __ CmpInstanceType(map, SYMBOL_TYPE); + __ j(equal, instr->TrueLabel(chunk_)); + } + + if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { + // heap number -> false iff +0, -0, or NaN. + Label not_heap_number; + __ cmp(FieldOperand(reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + __ j(not_equal, ¬_heap_number, Label::kNear); + __ fldz(); + __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); + __ FCmp(); + __ j(zero, instr->FalseLabel(chunk_)); + __ jmp(instr->TrueLabel(chunk_)); + __ bind(¬_heap_number); + } + + if (!expected.IsGeneric()) { + // We've seen something for the first time -> deopt. + // This can only happen if we are not generic already. + DeoptimizeIf(no_condition, instr->environment()); + } + } + } +} + + +void LCodeGen::EmitGoto(int block) { + if (!IsNextEmittedBlock(block)) { + __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); + } +} + + +void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) { +} + + +void LCodeGen::DoGoto(LGoto* instr) { + EmitGoto(instr->block_id()); +} + + +Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { + Condition cond = no_condition; + switch (op) { + case Token::EQ: + case Token::EQ_STRICT: + cond = equal; + break; + case Token::NE: + case Token::NE_STRICT: + cond = not_equal; + break; + case Token::LT: + cond = is_unsigned ? below : less; + break; + case Token::GT: + cond = is_unsigned ? above : greater; + break; + case Token::LTE: + cond = is_unsigned ? below_equal : less_equal; + break; + case Token::GTE: + cond = is_unsigned ? above_equal : greater_equal; + break; + case Token::IN: + case Token::INSTANCEOF: + default: + UNREACHABLE(); + } + return cond; +} + + +void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + bool is_unsigned = + instr->is_double() || + instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || + instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); + Condition cc = TokenToCondition(instr->op(), is_unsigned); + + if (left->IsConstantOperand() && right->IsConstantOperand()) { + // We can statically evaluate the comparison. + double left_val = ToDouble(LConstantOperand::cast(left)); + double right_val = ToDouble(LConstantOperand::cast(right)); + int next_block = EvalComparison(instr->op(), left_val, right_val) ? + instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); + EmitGoto(next_block); + } else { + if (instr->is_double()) { + X87LoadForUsage(ToX87Register(right), ToX87Register(left)); + __ FCmp(); + // Don't base result on EFLAGS when a NaN is involved. Instead + // jump to the false block. + __ j(parity_even, instr->FalseLabel(chunk_)); + } else { + if (right->IsConstantOperand()) { + __ cmp(ToOperand(left), + ToImmediate(right, instr->hydrogen()->representation())); + } else if (left->IsConstantOperand()) { + __ cmp(ToOperand(right), + ToImmediate(left, instr->hydrogen()->representation())); + // We commuted the operands, so commute the condition. + cc = CommuteCondition(cc); + } else { + __ cmp(ToRegister(left), ToOperand(right)); + } + } + EmitBranch(instr, cc); + } +} + + +void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { + Register left = ToRegister(instr->left()); + + if (instr->right()->IsConstantOperand()) { + Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right())); + __ CmpObject(left, right); + } else { + Operand right = ToOperand(instr->right()); + __ cmp(left, right); + } + EmitBranch(instr, equal); +} + + +void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { + if (instr->hydrogen()->representation().IsTagged()) { + Register input_reg = ToRegister(instr->object()); + __ cmp(input_reg, factory()->the_hole_value()); + EmitBranch(instr, equal); + return; + } + + // Put the value to the top of stack + X87Register src = ToX87Register(instr->object()); + X87LoadForUsage(src); + __ fld(0); + __ fld(0); + __ FCmp(); + Label ok; + __ j(parity_even, &ok, Label::kNear); + __ fstp(0); + EmitFalseBranch(instr, no_condition); + __ bind(&ok); + + + __ sub(esp, Immediate(kDoubleSize)); + __ fstp_d(MemOperand(esp, 0)); + + __ add(esp, Immediate(kDoubleSize)); + int offset = sizeof(kHoleNanUpper32); + __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); + EmitBranch(instr, equal); +} + + +void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { + Representation rep = instr->hydrogen()->value()->representation(); + DCHECK(!rep.IsInteger32()); + + if (rep.IsDouble()) { + UNREACHABLE(); + } else { + Register value = ToRegister(instr->value()); + Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); + __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK); + __ cmp(FieldOperand(value, HeapNumber::kExponentOffset), + Immediate(0x1)); + EmitFalseBranch(instr, no_overflow); + __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset), + Immediate(0x00000000)); + EmitBranch(instr, equal); + } +} + + +Condition LCodeGen::EmitIsObject(Register input, + Register temp1, + Label* is_not_object, + Label* is_object) { + __ JumpIfSmi(input, is_not_object); + + __ cmp(input, isolate()->factory()->null_value()); + __ j(equal, is_object); + + __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset)); + // Undetectable objects behave like undefined. + __ test_b(FieldOperand(temp1, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(not_zero, is_not_object); + + __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset)); + __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); + __ j(below, is_not_object); + __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); + return below_equal; +} + + +void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { + Register reg = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + Condition true_cond = EmitIsObject( + reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_)); + + EmitBranch(instr, true_cond); +} + + +Condition LCodeGen::EmitIsString(Register input, + Register temp1, + Label* is_not_string, + SmiCheck check_needed = INLINE_SMI_CHECK) { + if (check_needed == INLINE_SMI_CHECK) { + __ JumpIfSmi(input, is_not_string); + } + + Condition cond = masm_->IsObjectStringType(input, temp1, temp1); + + return cond; +} + + +void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { + Register reg = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + SmiCheck check_needed = + instr->hydrogen()->value()->type().IsHeapObject() + ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + + Condition true_cond = EmitIsString( + reg, temp, instr->FalseLabel(chunk_), check_needed); + + EmitBranch(instr, true_cond); +} + + +void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { + Operand input = ToOperand(instr->value()); + + __ test(input, Immediate(kSmiTagMask)); + EmitBranch(instr, zero); +} + + +void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { + Register input = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + if (!instr->hydrogen()->value()->type().IsHeapObject()) { + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfSmi(input, instr->FalseLabel(chunk_)); + } + __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); + __ test_b(FieldOperand(temp, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + EmitBranch(instr, not_zero); +} + + +static Condition ComputeCompareCondition(Token::Value op) { + switch (op) { + case Token::EQ_STRICT: + case Token::EQ: + return equal; + case Token::LT: + return less; + case Token::GT: + return greater; + case Token::LTE: + return less_equal; + case Token::GTE: + return greater_equal; + default: + UNREACHABLE(); + return no_condition; + } +} + + +void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { + Token::Value op = instr->op(); + + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + + Condition condition = ComputeCompareCondition(op); + __ test(eax, Operand(eax)); + + EmitBranch(instr, condition); +} + + +static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { + InstanceType from = instr->from(); + InstanceType to = instr->to(); + if (from == FIRST_TYPE) return to; + DCHECK(from == to || to == LAST_TYPE); + return from; +} + + +static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { + InstanceType from = instr->from(); + InstanceType to = instr->to(); + if (from == to) return equal; + if (to == LAST_TYPE) return above_equal; + if (from == FIRST_TYPE) return below_equal; + UNREACHABLE(); + return equal; +} + + +void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { + Register input = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + if (!instr->hydrogen()->value()->type().IsHeapObject()) { + __ JumpIfSmi(input, instr->FalseLabel(chunk_)); + } + + __ CmpObjectType(input, TestType(instr->hydrogen()), temp); + EmitBranch(instr, BranchCondition(instr->hydrogen())); +} + + +void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { + Register input = ToRegister(instr->value()); + Register result = ToRegister(instr->result()); + + __ AssertString(input); + + __ mov(result, FieldOperand(input, String::kHashFieldOffset)); + __ IndexFromHash(result, result); +} + + +void LCodeGen::DoHasCachedArrayIndexAndBranch( + LHasCachedArrayIndexAndBranch* instr) { + Register input = ToRegister(instr->value()); + + __ test(FieldOperand(input, String::kHashFieldOffset), + Immediate(String::kContainsCachedArrayIndexMask)); + EmitBranch(instr, equal); +} + + +// Branches to a label or falls through with the answer in the z flag. Trashes +// the temp registers, but not the input. +void LCodeGen::EmitClassOfTest(Label* is_true, + Label* is_false, + Handle<String>class_name, + Register input, + Register temp, + Register temp2) { + DCHECK(!input.is(temp)); + DCHECK(!input.is(temp2)); + DCHECK(!temp.is(temp2)); + __ JumpIfSmi(input, is_false); + + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { + // Assuming the following assertions, we can use the same compares to test + // for both being a function type and being in the object type range. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); + __ j(below, is_false); + __ j(equal, is_true); + __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE); + __ j(equal, is_true); + } else { + // Faster code path to avoid two compares: subtract lower bound from the + // actual type and do a signed compare with the width of the type range. + __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); + __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ j(above, is_false); + } + + // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. + // Check if the constructor in the map is a function. + __ mov(temp, FieldOperand(temp, Map::kConstructorOffset)); + // Objects with a non-function constructor have class 'Object'. + __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2); + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { + __ j(not_equal, is_true); + } else { + __ j(not_equal, is_false); + } + + // temp now contains the constructor function. Grab the + // instance class name from there. + __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); + __ mov(temp, FieldOperand(temp, + SharedFunctionInfo::kInstanceClassNameOffset)); + // The class name we are testing against is internalized since it's a literal. + // The name in the constructor is internalized because of the way the context + // is booted. This routine isn't expected to work for random API-created + // classes and it doesn't have to because you can't access it with natives + // syntax. Since both sides are internalized it is sufficient to use an + // identity comparison. + __ cmp(temp, class_name); + // End with the answer in the z flag. +} + + +void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { + Register input = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + Register temp2 = ToRegister(instr->temp2()); + + Handle<String> class_name = instr->hydrogen()->class_name(); + + EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), + class_name, input, temp, temp2); + + EmitBranch(instr, equal); +} + + +void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { + Register reg = ToRegister(instr->value()); + __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); + EmitBranch(instr, equal); +} + + +void LCodeGen::DoInstanceOf(LInstanceOf* instr) { + // Object and function are in fixed registers defined by the stub. + DCHECK(ToRegister(instr->context()).is(esi)); + InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + + Label true_value, done; + __ test(eax, Operand(eax)); + __ j(zero, &true_value, Label::kNear); + __ mov(ToRegister(instr->result()), factory()->false_value()); + __ jmp(&done, Label::kNear); + __ bind(&true_value); + __ mov(ToRegister(instr->result()), factory()->true_value()); + __ bind(&done); +} + + +void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { + class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { + public: + DeferredInstanceOfKnownGlobal(LCodeGen* codegen, + LInstanceOfKnownGlobal* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + Label* map_check() { return &map_check_; } + private: + LInstanceOfKnownGlobal* instr_; + Label map_check_; + }; + + DeferredInstanceOfKnownGlobal* deferred; + deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_); + + Label done, false_result; + Register object = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + // A Smi is not an instance of anything. + __ JumpIfSmi(object, &false_result, Label::kNear); + + // This is the inlined call site instanceof cache. The two occurences of the + // hole value will be patched to the last map/result pair generated by the + // instanceof stub. + Label cache_miss; + Register map = ToRegister(instr->temp()); + __ mov(map, FieldOperand(object, HeapObject::kMapOffset)); + __ bind(deferred->map_check()); // Label for calculating code patching. + Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value()); + __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map. + __ j(not_equal, &cache_miss, Label::kNear); + __ mov(eax, factory()->the_hole_value()); // Patched to either true or false. + __ jmp(&done, Label::kNear); + + // The inlined call site cache did not match. Check for null and string + // before calling the deferred code. + __ bind(&cache_miss); + // Null is not an instance of anything. + __ cmp(object, factory()->null_value()); + __ j(equal, &false_result, Label::kNear); + + // String values are not instances of anything. + Condition is_string = masm_->IsObjectStringType(object, temp, temp); + __ j(is_string, &false_result, Label::kNear); + + // Go to the deferred code. + __ jmp(deferred->entry()); + + __ bind(&false_result); + __ mov(ToRegister(instr->result()), factory()->false_value()); + + // Here result has either true or false. Deferred code also produces true or + // false object. + __ bind(deferred->exit()); + __ bind(&done); +} + + +void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, + Label* map_check) { + PushSafepointRegistersScope scope(this); + + InstanceofStub::Flags flags = InstanceofStub::kNoFlags; + flags = static_cast<InstanceofStub::Flags>( + flags | InstanceofStub::kArgsInRegisters); + flags = static_cast<InstanceofStub::Flags>( + flags | InstanceofStub::kCallSiteInlineCheck); + flags = static_cast<InstanceofStub::Flags>( + flags | InstanceofStub::kReturnTrueFalseObject); + InstanceofStub stub(isolate(), flags); + + // Get the temp register reserved by the instruction. This needs to be a + // register which is pushed last by PushSafepointRegisters as top of the + // stack is used to pass the offset to the location of the map check to + // the stub. + Register temp = ToRegister(instr->temp()); + DCHECK(MacroAssembler::SafepointRegisterStackIndex(temp) == 0); + __ LoadHeapObject(InstanceofStub::right(), instr->function()); + static const int kAdditionalDelta = 13; + int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; + __ mov(temp, Immediate(delta)); + __ StoreToSafepointRegisterSlot(temp, temp); + CallCodeGeneric(stub.GetCode(), + RelocInfo::CODE_TARGET, + instr, + RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + // Get the deoptimization index of the LLazyBailout-environment that + // corresponds to this instruction. + LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); + safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); + + // Put the result value into the eax slot and restore all registers. + __ StoreToSafepointRegisterSlot(eax, eax); +} + + +void LCodeGen::DoCmpT(LCmpT* instr) { + Token::Value op = instr->op(); + + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + + Condition condition = ComputeCompareCondition(op); + Label true_value, done; + __ test(eax, Operand(eax)); + __ j(condition, &true_value, Label::kNear); + __ mov(ToRegister(instr->result()), factory()->false_value()); + __ jmp(&done, Label::kNear); + __ bind(&true_value); + __ mov(ToRegister(instr->result()), factory()->true_value()); + __ bind(&done); +} + + +void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) { + int extra_value_count = dynamic_frame_alignment ? 2 : 1; + + if (instr->has_constant_parameter_count()) { + int parameter_count = ToInteger32(instr->constant_parameter_count()); + if (dynamic_frame_alignment && FLAG_debug_code) { + __ cmp(Operand(esp, + (parameter_count + extra_value_count) * kPointerSize), + Immediate(kAlignmentZapValue)); + __ Assert(equal, kExpectedAlignmentMarker); + } + __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); + } else { + Register reg = ToRegister(instr->parameter_count()); + // The argument count parameter is a smi + __ SmiUntag(reg); + Register return_addr_reg = reg.is(ecx) ? ebx : ecx; + if (dynamic_frame_alignment && FLAG_debug_code) { + DCHECK(extra_value_count == 2); + __ cmp(Operand(esp, reg, times_pointer_size, + extra_value_count * kPointerSize), + Immediate(kAlignmentZapValue)); + __ Assert(equal, kExpectedAlignmentMarker); + } + + // emit code to restore stack based on instr->parameter_count() + __ pop(return_addr_reg); // save return address + if (dynamic_frame_alignment) { + __ inc(reg); // 1 more for alignment + } + __ shl(reg, kPointerSizeLog2); + __ add(esp, reg); + __ jmp(return_addr_reg); + } +} + + +void LCodeGen::DoReturn(LReturn* instr) { + if (FLAG_trace && info()->IsOptimizing()) { + // Preserve the return value on the stack and rely on the runtime call + // to return the value in the same register. We're leaving the code + // managed by the register allocator and tearing down the frame, it's + // safe to write to the context register. + __ push(eax); + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ CallRuntime(Runtime::kTraceExit, 1); + } + if (dynamic_frame_alignment_) { + // Fetch the state of the dynamic frame alignment. + __ mov(edx, Operand(ebp, + JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); + } + int no_frame_start = -1; + if (NeedsEagerFrame()) { + __ mov(esp, ebp); + __ pop(ebp); + no_frame_start = masm_->pc_offset(); + } + if (dynamic_frame_alignment_) { + Label no_padding; + __ cmp(edx, Immediate(kNoAlignmentPadding)); + __ j(equal, &no_padding, Label::kNear); + + EmitReturn(instr, true); + __ bind(&no_padding); + } + + EmitReturn(instr, false); + if (no_frame_start != -1) { + info()->AddNoFrameRange(no_frame_start, masm_->pc_offset()); + } +} + + +void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { + Register result = ToRegister(instr->result()); + __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle())); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ cmp(result, factory()->the_hole_value()); + DeoptimizeIf(equal, instr->environment()); + } +} + + +void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(eax)); + + __ mov(LoadIC::NameRegister(), instr->name()); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ mov(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(eax)); + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(instr->hydrogen()->slot()))); + } + ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; + Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { + Register value = ToRegister(instr->value()); + Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle(); + + // If the cell we are storing to contains the hole it could have + // been deleted from the property dictionary. In that case, we need + // to update the property details in the property dictionary to mark + // it as no longer deleted. We deoptimize in that case. + if (instr->hydrogen()->RequiresHoleCheck()) { + __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value()); + DeoptimizeIf(equal, instr->environment()); + } + + // Store the value. + __ mov(Operand::ForCell(cell_handle), value); + // Cells are always rescanned, so no write barrier here. +} + + +void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { + Register context = ToRegister(instr->context()); + Register result = ToRegister(instr->result()); + __ mov(result, ContextOperand(context, instr->slot_index())); + + if (instr->hydrogen()->RequiresHoleCheck()) { + __ cmp(result, factory()->the_hole_value()); + if (instr->hydrogen()->DeoptimizesOnHole()) { + DeoptimizeIf(equal, instr->environment()); + } else { + Label is_not_hole; + __ j(not_equal, &is_not_hole, Label::kNear); + __ mov(result, factory()->undefined_value()); + __ bind(&is_not_hole); + } + } +} + + +void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { + Register context = ToRegister(instr->context()); + Register value = ToRegister(instr->value()); + + Label skip_assignment; + + Operand target = ContextOperand(context, instr->slot_index()); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ cmp(target, factory()->the_hole_value()); + if (instr->hydrogen()->DeoptimizesOnHole()) { + DeoptimizeIf(equal, instr->environment()); + } else { + __ j(not_equal, &skip_assignment, Label::kNear); + } + } + + __ mov(target, value); + if (instr->hydrogen()->NeedsWriteBarrier()) { + SmiCheck check_needed = + instr->hydrogen()->value()->type().IsHeapObject() + ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + Register temp = ToRegister(instr->temp()); + int offset = Context::SlotOffset(instr->slot_index()); + __ RecordWriteContextSlot(context, + offset, + value, + temp, + EMIT_REMEMBERED_SET, + check_needed); + } + + __ bind(&skip_assignment); +} + + +void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { + HObjectAccess access = instr->hydrogen()->access(); + int offset = access.offset(); + + if (access.IsExternalMemory()) { + Register result = ToRegister(instr->result()); + MemOperand operand = instr->object()->IsConstantOperand() + ? MemOperand::StaticVariable(ToExternalReference( + LConstantOperand::cast(instr->object()))) + : MemOperand(ToRegister(instr->object()), offset); + __ Load(result, operand, access.representation()); + return; + } + + Register object = ToRegister(instr->object()); + if (instr->hydrogen()->representation().IsDouble()) { + X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); + return; + } + + Register result = ToRegister(instr->result()); + if (!access.IsInobject()) { + __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); + object = result; + } + __ Load(result, FieldOperand(object, offset), access.representation()); +} + + +void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { + DCHECK(!operand->IsDoubleRegister()); + if (operand->IsConstantOperand()) { + Handle<Object> object = ToHandle(LConstantOperand::cast(operand)); + AllowDeferredHandleDereference smi_check; + if (object->IsSmi()) { + __ Push(Handle<Smi>::cast(object)); + } else { + __ PushHeapObject(Handle<HeapObject>::cast(object)); + } + } else if (operand->IsRegister()) { + __ push(ToRegister(operand)); + } else { + __ push(ToOperand(operand)); + } +} + + +void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(eax)); + + __ mov(LoadIC::NameRegister(), instr->name()); + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ mov(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(eax)); + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(instr->hydrogen()->slot()))); + } + Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { + Register function = ToRegister(instr->function()); + Register temp = ToRegister(instr->temp()); + Register result = ToRegister(instr->result()); + + // Get the prototype or initial map from the function. + __ mov(result, + FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); + + // Check that the function has a prototype or an initial map. + __ cmp(Operand(result), Immediate(factory()->the_hole_value())); + DeoptimizeIf(equal, instr->environment()); + + // If the function does not have an initial map, we're done. + Label done; + __ CmpObjectType(result, MAP_TYPE, temp); + __ j(not_equal, &done, Label::kNear); + + // Get the prototype from the initial map. + __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); + + // All done. + __ bind(&done); +} + + +void LCodeGen::DoLoadRoot(LLoadRoot* instr) { + Register result = ToRegister(instr->result()); + __ LoadRoot(result, instr->index()); +} + + +void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { + Register arguments = ToRegister(instr->arguments()); + Register result = ToRegister(instr->result()); + if (instr->length()->IsConstantOperand() && + instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + int const_length = ToInteger32(LConstantOperand::cast(instr->length())); + int index = (const_length - const_index) + 1; + __ mov(result, Operand(arguments, index * kPointerSize)); + } else { + Register length = ToRegister(instr->length()); + Operand index = ToOperand(instr->index()); + // There are two words between the frame pointer and the last argument. + // Subtracting from length accounts for one of them add one more. + __ sub(length, index); + __ mov(result, Operand(arguments, length, times_4, kPointerSize)); + } +} + + +void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { + ElementsKind elements_kind = instr->elements_kind(); + LOperand* key = instr->key(); + if (!key->IsConstantOperand() && + ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), + elements_kind)) { + __ SmiUntag(ToRegister(key)); + } + Operand operand(BuildFastArrayOperand( + instr->elements(), + key, + instr->hydrogen()->key()->representation(), + elements_kind, + instr->base_offset())); + if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || + elements_kind == FLOAT32_ELEMENTS) { + X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); + } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || + elements_kind == FLOAT64_ELEMENTS) { + X87Mov(ToX87Register(instr->result()), operand); + } else { + Register result(ToRegister(instr->result())); + switch (elements_kind) { + case EXTERNAL_INT8_ELEMENTS: + case INT8_ELEMENTS: + __ movsx_b(result, operand); + break; + case EXTERNAL_UINT8_CLAMPED_ELEMENTS: + case EXTERNAL_UINT8_ELEMENTS: + case UINT8_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + __ movzx_b(result, operand); + break; + case EXTERNAL_INT16_ELEMENTS: + case INT16_ELEMENTS: + __ movsx_w(result, operand); + break; + case EXTERNAL_UINT16_ELEMENTS: + case UINT16_ELEMENTS: + __ movzx_w(result, operand); + break; + case EXTERNAL_INT32_ELEMENTS: + case INT32_ELEMENTS: + __ mov(result, operand); + break; + case EXTERNAL_UINT32_ELEMENTS: + case UINT32_ELEMENTS: + __ mov(result, operand); + if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { + __ test(result, Operand(result)); + DeoptimizeIf(negative, instr->environment()); + } + break; + case EXTERNAL_FLOAT32_ELEMENTS: + case EXTERNAL_FLOAT64_ELEMENTS: + case FLOAT32_ELEMENTS: + case FLOAT64_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case DICTIONARY_ELEMENTS: + case SLOPPY_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { + if (instr->hydrogen()->RequiresHoleCheck()) { + Operand hole_check_operand = BuildFastArrayOperand( + instr->elements(), instr->key(), + instr->hydrogen()->key()->representation(), + FAST_DOUBLE_ELEMENTS, + instr->base_offset() + sizeof(kHoleNanLower32)); + __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); + DeoptimizeIf(equal, instr->environment()); + } + + Operand double_load_operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_DOUBLE_ELEMENTS, + instr->base_offset()); + X87Mov(ToX87Register(instr->result()), double_load_operand); +} + + +void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { + Register result = ToRegister(instr->result()); + + // Load the result. + __ mov(result, + BuildFastArrayOperand(instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_ELEMENTS, + instr->base_offset())); + + // Check for the hole value. + if (instr->hydrogen()->RequiresHoleCheck()) { + if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { + __ test(result, Immediate(kSmiTagMask)); + DeoptimizeIf(not_equal, instr->environment()); + } else { + __ cmp(result, factory()->the_hole_value()); + DeoptimizeIf(equal, instr->environment()); + } + } +} + + +void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { + if (instr->is_typed_elements()) { + DoLoadKeyedExternalArray(instr); + } else if (instr->hydrogen()->representation().IsDouble()) { + DoLoadKeyedFixedDoubleArray(instr); + } else { + DoLoadKeyedFixedArray(instr); + } +} + + +Operand LCodeGen::BuildFastArrayOperand( + LOperand* elements_pointer, + LOperand* key, + Representation key_representation, + ElementsKind elements_kind, + uint32_t base_offset) { + Register elements_pointer_reg = ToRegister(elements_pointer); + int element_shift_size = ElementsKindToShiftSize(elements_kind); + int shift_size = element_shift_size; + if (key->IsConstantOperand()) { + int constant_value = ToInteger32(LConstantOperand::cast(key)); + if (constant_value & 0xF0000000) { + Abort(kArrayIndexConstantValueTooBig); + } + return Operand(elements_pointer_reg, + ((constant_value) << shift_size) + + base_offset); + } else { + // Take the tag bit into account while computing the shift size. + if (key_representation.IsSmi() && (shift_size >= 1)) { + shift_size -= kSmiTagSize; + } + ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); + return Operand(elements_pointer_reg, + ToRegister(key), + scale_factor, + base_offset); + } +} + + +void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister())); + + if (FLAG_vector_ics) { + Register vector = ToRegister(instr->temp_vector()); + DCHECK(vector.is(LoadIC::VectorRegister())); + __ mov(vector, instr->hydrogen()->feedback_vector()); + // No need to allocate this register. + DCHECK(LoadIC::SlotRegister().is(eax)); + __ mov(LoadIC::SlotRegister(), + Immediate(Smi::FromInt(instr->hydrogen()->slot()))); + } + + Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { + Register result = ToRegister(instr->result()); + + if (instr->hydrogen()->from_inlined()) { + __ lea(result, Operand(esp, -2 * kPointerSize)); + } else { + // Check for arguments adapter frame. + Label done, adapted; + __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(result, Operand(result, StandardFrameConstants::kContextOffset)); + __ cmp(Operand(result), + Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(equal, &adapted, Label::kNear); + + // No arguments adaptor frame. + __ mov(result, Operand(ebp)); + __ jmp(&done, Label::kNear); + + // Arguments adaptor frame present. + __ bind(&adapted); + __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + + // Result is the frame pointer for the frame if not adapted and for the real + // frame below the adaptor frame if adapted. + __ bind(&done); + } +} + + +void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { + Operand elem = ToOperand(instr->elements()); + Register result = ToRegister(instr->result()); + + Label done; + + // If no arguments adaptor frame the number of arguments is fixed. + __ cmp(ebp, elem); + __ mov(result, Immediate(scope()->num_parameters())); + __ j(equal, &done, Label::kNear); + + // Arguments adaptor frame present. Get argument length from there. + __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(result, Operand(result, + ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiUntag(result); + + // Argument length is in result register. + __ bind(&done); +} + + +void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { + Register receiver = ToRegister(instr->receiver()); + Register function = ToRegister(instr->function()); + + // If the receiver is null or undefined, we have to pass the global + // object as a receiver to normal functions. Values have to be + // passed unchanged to builtins and strict-mode functions. + Label receiver_ok, global_object; + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; + Register scratch = ToRegister(instr->temp()); + + if (!instr->hydrogen()->known_function()) { + // Do not transform the receiver to object for strict mode + // functions. + __ mov(scratch, + FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); + __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset), + 1 << SharedFunctionInfo::kStrictModeBitWithinByte); + __ j(not_equal, &receiver_ok, dist); + + // Do not transform the receiver to object for builtins. + __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset), + 1 << SharedFunctionInfo::kNativeBitWithinByte); + __ j(not_equal, &receiver_ok, dist); + } + + // Normal function. Replace undefined or null with global receiver. + __ cmp(receiver, factory()->null_value()); + __ j(equal, &global_object, Label::kNear); + __ cmp(receiver, factory()->undefined_value()); + __ j(equal, &global_object, Label::kNear); + + // The receiver should be a JS object. + __ test(receiver, Immediate(kSmiTagMask)); + DeoptimizeIf(equal, instr->environment()); + __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch); + DeoptimizeIf(below, instr->environment()); + + __ jmp(&receiver_ok, Label::kNear); + __ bind(&global_object); + __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset)); + const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); + __ mov(receiver, Operand(receiver, global_offset)); + const int proxy_offset = GlobalObject::kGlobalProxyOffset; + __ mov(receiver, FieldOperand(receiver, proxy_offset)); + __ bind(&receiver_ok); +} + + +void LCodeGen::DoApplyArguments(LApplyArguments* instr) { + Register receiver = ToRegister(instr->receiver()); + Register function = ToRegister(instr->function()); + Register length = ToRegister(instr->length()); + Register elements = ToRegister(instr->elements()); + DCHECK(receiver.is(eax)); // Used for parameter count. + DCHECK(function.is(edi)); // Required by InvokeFunction. + DCHECK(ToRegister(instr->result()).is(eax)); + + // Copy the arguments to this function possibly from the + // adaptor frame below it. + const uint32_t kArgumentsLimit = 1 * KB; + __ cmp(length, kArgumentsLimit); + DeoptimizeIf(above, instr->environment()); + + __ push(receiver); + __ mov(receiver, length); + + // Loop through the arguments pushing them onto the execution + // stack. + Label invoke, loop; + // length is a small non-negative integer, due to the test above. + __ test(length, Operand(length)); + __ j(zero, &invoke, Label::kNear); + __ bind(&loop); + __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); + __ dec(length); + __ j(not_zero, &loop); + + // Invoke the function. + __ bind(&invoke); + DCHECK(instr->HasPointerMap()); + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator safepoint_generator( + this, pointers, Safepoint::kLazyDeopt); + ParameterCount actual(eax); + __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); +} + + +void LCodeGen::DoDebugBreak(LDebugBreak* instr) { + __ int3(); +} + + +void LCodeGen::DoPushArgument(LPushArgument* instr) { + LOperand* argument = instr->value(); + EmitPushTaggedOperand(argument); +} + + +void LCodeGen::DoDrop(LDrop* instr) { + __ Drop(instr->count()); +} + + +void LCodeGen::DoThisFunction(LThisFunction* instr) { + Register result = ToRegister(instr->result()); + __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); +} + + +void LCodeGen::DoContext(LContext* instr) { + Register result = ToRegister(instr->result()); + if (info()->IsOptimizing()) { + __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); + } else { + // If there is no frame, the context must be in esi. + DCHECK(result.is(esi)); + } +} + + +void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + __ push(esi); // The context is the first argument. + __ push(Immediate(instr->hydrogen()->pairs())); + __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags()))); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); +} + + +void LCodeGen::CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, + int arity, + LInstruction* instr, + EDIState edi_state) { + bool dont_adapt_arguments = + formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; + bool can_invoke_directly = + dont_adapt_arguments || formal_parameter_count == arity; + + if (can_invoke_directly) { + if (edi_state == EDI_UNINITIALIZED) { + __ LoadHeapObject(edi, function); + } + + // Change context. + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + + // Set eax to arguments count if adaption is not needed. Assumes that eax + // is available to write to at this point. + if (dont_adapt_arguments) { + __ mov(eax, arity); + } + + // Invoke function directly. + if (function.is_identical_to(info()->closure())) { + __ CallSelf(); + } else { + __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); + } + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); + } else { + // We need to adapt arguments. + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator generator( + this, pointers, Safepoint::kLazyDeopt); + ParameterCount count(arity); + ParameterCount expected(formal_parameter_count); + __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); + } +} + + +void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { + DCHECK(ToRegister(instr->result()).is(eax)); + + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); + + if (instr->target()->IsConstantOperand()) { + LConstantOperand* target = LConstantOperand::cast(instr->target()); + Handle<Code> code = Handle<Code>::cast(ToHandle(target)); + generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); + __ call(code, RelocInfo::CODE_TARGET); + } else { + DCHECK(instr->target()->IsRegister()); + Register target = ToRegister(instr->target()); + generator.BeforeCall(__ CallSize(Operand(target))); + __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ call(target); + } + generator.AfterCall(); +} + + +void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { + DCHECK(ToRegister(instr->function()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); + + if (instr->hydrogen()->pass_argument_count()) { + __ mov(eax, instr->arity()); + } + + // Change context. + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + + bool is_self_call = false; + if (instr->hydrogen()->function()->IsConstant()) { + HConstant* fun_const = HConstant::cast(instr->hydrogen()->function()); + Handle<JSFunction> jsfun = + Handle<JSFunction>::cast(fun_const->handle(isolate())); + is_self_call = jsfun.is_identical_to(info()->closure()); + } + + if (is_self_call) { + __ CallSelf(); + } else { + __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); + } + + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); +} + + +void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { + Register input_reg = ToRegister(instr->value()); + __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + DeoptimizeIf(not_equal, instr->environment()); + + Label slow, allocated, done; + Register tmp = input_reg.is(eax) ? ecx : eax; + Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx; + + // Preserve the value of all registers. + PushSafepointRegistersScope scope(this); + + __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); + // Check the sign of the argument. If the argument is positive, just + // return it. We do not need to patch the stack since |input| and + // |result| are the same register and |input| will be restored + // unchanged by popping safepoint registers. + __ test(tmp, Immediate(HeapNumber::kSignMask)); + __ j(zero, &done, Label::kNear); + + __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); + __ jmp(&allocated, Label::kNear); + + // Slow case: Call the runtime system to do the number allocation. + __ bind(&slow); + CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, + instr, instr->context()); + // Set the pointer to the new heap number in tmp. + if (!tmp.is(eax)) __ mov(tmp, eax); + // Restore input_reg after call to runtime. + __ LoadFromSafepointRegisterSlot(input_reg, input_reg); + + __ bind(&allocated); + __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); + __ and_(tmp2, ~HeapNumber::kSignMask); + __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); + __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); + __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); + __ StoreToSafepointRegisterSlot(input_reg, tmp); + + __ bind(&done); +} + + +void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { + Register input_reg = ToRegister(instr->value()); + __ test(input_reg, Operand(input_reg)); + Label is_positive; + __ j(not_sign, &is_positive, Label::kNear); + __ neg(input_reg); // Sets flags. + DeoptimizeIf(negative, instr->environment()); + __ bind(&is_positive); +} + + +void LCodeGen::DoMathAbs(LMathAbs* instr) { + // Class for deferred case. + class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { + public: + DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, + LMathAbs* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LMathAbs* instr_; + }; + + DCHECK(instr->value()->Equals(instr->result())); + Representation r = instr->hydrogen()->value()->representation(); + + if (r.IsDouble()) { + UNIMPLEMENTED(); + } else if (r.IsSmiOrInteger32()) { + EmitIntegerMathAbs(instr); + } else { // Tagged case. + DeferredMathAbsTaggedHeapNumber* deferred = + new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); + Register input_reg = ToRegister(instr->value()); + // Smi check. + __ JumpIfNotSmi(input_reg, deferred->entry()); + EmitIntegerMathAbs(instr); + __ bind(deferred->exit()); + } +} + + +void LCodeGen::DoMathFloor(LMathFloor* instr) { + UNIMPLEMENTED(); +} + + +void LCodeGen::DoMathRound(LMathRound* instr) { + UNIMPLEMENTED(); +} + + +void LCodeGen::DoMathFround(LMathFround* instr) { + UNIMPLEMENTED(); +} + + +void LCodeGen::DoMathSqrt(LMathSqrt* instr) { + UNIMPLEMENTED(); +} + + +void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { + UNIMPLEMENTED(); +} + + +void LCodeGen::DoPower(LPower* instr) { + UNIMPLEMENTED(); +} + + +void LCodeGen::DoMathLog(LMathLog* instr) { + UNIMPLEMENTED(); +} + + +void LCodeGen::DoMathClz32(LMathClz32* instr) { + UNIMPLEMENTED(); +} + + +void LCodeGen::DoMathExp(LMathExp* instr) { + UNIMPLEMENTED(); +} + + +void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->function()).is(edi)); + DCHECK(instr->HasPointerMap()); + + Handle<JSFunction> known_function = instr->hydrogen()->known_function(); + if (known_function.is_null()) { + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator generator( + this, pointers, Safepoint::kLazyDeopt); + ParameterCount count(instr->arity()); + __ InvokeFunction(edi, count, CALL_FUNCTION, generator); + } else { + CallKnownFunction(known_function, + instr->hydrogen()->formal_parameter_count(), + instr->arity(), + instr, + EDI_CONTAINS_TARGET); + } +} + + +void LCodeGen::DoCallFunction(LCallFunction* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->function()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); + + int arity = instr->arity(); + CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoCallNew(LCallNew* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->constructor()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); + + // No cell in ebx for construct type feedback in optimized code + __ mov(ebx, isolate()->factory()->undefined_value()); + CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + __ Move(eax, Immediate(instr->arity())); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); +} + + +void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->constructor()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); + + __ Move(eax, Immediate(instr->arity())); + __ mov(ebx, isolate()->factory()->undefined_value()); + ElementsKind kind = instr->hydrogen()->elements_kind(); + AllocationSiteOverrideMode override_mode = + (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) + ? DISABLE_ALLOCATION_SITES + : DONT_OVERRIDE; + + if (instr->arity() == 0) { + ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + } else if (instr->arity() == 1) { + Label done; + if (IsFastPackedElementsKind(kind)) { + Label packed_case; + // We might need a change here + // look at the first argument + __ mov(ecx, Operand(esp, 0)); + __ test(ecx, ecx); + __ j(zero, &packed_case, Label::kNear); + + ElementsKind holey_kind = GetHoleyElementsKind(kind); + ArraySingleArgumentConstructorStub stub(isolate(), + holey_kind, + override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + __ jmp(&done, Label::kNear); + __ bind(&packed_case); + } + + ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + __ bind(&done); + } else { + ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + } +} + + +void LCodeGen::DoCallRuntime(LCallRuntime* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + CallRuntime(instr->function(), instr->arity(), instr); +} + + +void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { + Register function = ToRegister(instr->function()); + Register code_object = ToRegister(instr->code_object()); + __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); + __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); +} + + +void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { + Register result = ToRegister(instr->result()); + Register base = ToRegister(instr->base_object()); + if (instr->offset()->IsConstantOperand()) { + LConstantOperand* offset = LConstantOperand::cast(instr->offset()); + __ lea(result, Operand(base, ToInteger32(offset))); + } else { + Register offset = ToRegister(instr->offset()); + __ lea(result, Operand(base, offset, times_1, 0)); + } +} + + +void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { + Representation representation = instr->hydrogen()->field_representation(); + + HObjectAccess access = instr->hydrogen()->access(); + int offset = access.offset(); + + if (access.IsExternalMemory()) { + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + MemOperand operand = instr->object()->IsConstantOperand() + ? MemOperand::StaticVariable( + ToExternalReference(LConstantOperand::cast(instr->object()))) + : MemOperand(ToRegister(instr->object()), offset); + if (instr->value()->IsConstantOperand()) { + LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); + __ mov(operand, Immediate(ToInteger32(operand_value))); + } else { + Register value = ToRegister(instr->value()); + __ Store(value, operand, representation); + } + return; + } + + Register object = ToRegister(instr->object()); + __ AssertNotSmi(object); + DCHECK(!representation.IsSmi() || + !instr->value()->IsConstantOperand() || + IsSmi(LConstantOperand::cast(instr->value()))); + if (representation.IsDouble()) { + DCHECK(access.IsInobject()); + DCHECK(!instr->hydrogen()->has_transition()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + X87Register value = ToX87Register(instr->value()); + X87Mov(FieldOperand(object, offset), value); + return; + } + + if (instr->hydrogen()->has_transition()) { + Handle<Map> transition = instr->hydrogen()->transition_map(); + AddDeprecationDependency(transition); + __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); + if (instr->hydrogen()->NeedsWriteBarrierForMap()) { + Register temp = ToRegister(instr->temp()); + Register temp_map = ToRegister(instr->temp_map()); + __ mov(temp_map, transition); + __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); + // Update the write barrier for the map field. + __ RecordWriteForMap(object, transition, temp_map, temp); + } + } + + // Do the store. + Register write_register = object; + if (!access.IsInobject()) { + write_register = ToRegister(instr->temp()); + __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); + } + + MemOperand operand = FieldOperand(write_register, offset); + if (instr->value()->IsConstantOperand()) { + LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); + if (operand_value->IsRegister()) { + Register value = ToRegister(operand_value); + __ Store(value, operand, representation); + } else if (representation.IsInteger32()) { + Immediate immediate = ToImmediate(operand_value, representation); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + __ mov(operand, immediate); + } else { + Handle<Object> handle_value = ToHandle(operand_value); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + __ mov(operand, handle_value); + } + } else { + Register value = ToRegister(instr->value()); + __ Store(value, operand, representation); + } + + if (instr->hydrogen()->NeedsWriteBarrier()) { + Register value = ToRegister(instr->value()); + Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; + // Update the write barrier for the object for in-object properties. + __ RecordWriteField(write_register, + offset, + value, + temp, + EMIT_REMEMBERED_SET, + instr->hydrogen()->SmiCheckForWriteBarrier(), + instr->hydrogen()->PointersToHereCheckForValue()); + } +} + + +void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister())); + + __ mov(StoreIC::NameRegister(), instr->name()); + Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { + Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal; + if (instr->index()->IsConstantOperand()) { + __ cmp(ToOperand(instr->length()), + ToImmediate(LConstantOperand::cast(instr->index()), + instr->hydrogen()->length()->representation())); + cc = CommuteCondition(cc); + } else if (instr->length()->IsConstantOperand()) { + __ cmp(ToOperand(instr->index()), + ToImmediate(LConstantOperand::cast(instr->length()), + instr->hydrogen()->index()->representation())); + } else { + __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); + } + if (FLAG_debug_code && instr->hydrogen()->skip_check()) { + Label done; + __ j(NegateCondition(cc), &done, Label::kNear); + __ int3(); + __ bind(&done); + } else { + DeoptimizeIf(cc, instr->environment()); + } +} + + +void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { + ElementsKind elements_kind = instr->elements_kind(); + LOperand* key = instr->key(); + if (!key->IsConstantOperand() && + ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), + elements_kind)) { + __ SmiUntag(ToRegister(key)); + } + Operand operand(BuildFastArrayOperand( + instr->elements(), + key, + instr->hydrogen()->key()->representation(), + elements_kind, + instr->base_offset())); + if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || + elements_kind == FLOAT32_ELEMENTS) { + __ fld(0); + __ fstp_s(operand); + } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || + elements_kind == FLOAT64_ELEMENTS) { + X87Mov(operand, ToX87Register(instr->value())); + } else { + Register value = ToRegister(instr->value()); + switch (elements_kind) { + case EXTERNAL_UINT8_CLAMPED_ELEMENTS: + case EXTERNAL_UINT8_ELEMENTS: + case EXTERNAL_INT8_ELEMENTS: + case UINT8_ELEMENTS: + case INT8_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + __ mov_b(operand, value); + break; + case EXTERNAL_INT16_ELEMENTS: + case EXTERNAL_UINT16_ELEMENTS: + case UINT16_ELEMENTS: + case INT16_ELEMENTS: + __ mov_w(operand, value); + break; + case EXTERNAL_INT32_ELEMENTS: + case EXTERNAL_UINT32_ELEMENTS: + case UINT32_ELEMENTS: + case INT32_ELEMENTS: + __ mov(operand, value); + break; + case EXTERNAL_FLOAT32_ELEMENTS: + case EXTERNAL_FLOAT64_ELEMENTS: + case FLOAT32_ELEMENTS: + case FLOAT64_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case DICTIONARY_ELEMENTS: + case SLOPPY_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { + ExternalReference canonical_nan_reference = + ExternalReference::address_of_canonical_non_hole_nan(); + Operand double_store_operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_DOUBLE_ELEMENTS, + instr->base_offset()); + + // Can't use SSE2 in the serializer + if (instr->hydrogen()->IsConstantHoleStore()) { + // This means we should store the (double) hole. No floating point + // registers required. + double nan_double = FixedDoubleArray::hole_nan_as_double(); + uint64_t int_val = BitCast<uint64_t, double>(nan_double); + int32_t lower = static_cast<int32_t>(int_val); + int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); + + __ mov(double_store_operand, Immediate(lower)); + Operand double_store_operand2 = BuildFastArrayOperand( + instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_DOUBLE_ELEMENTS, + instr->base_offset() + kPointerSize); + __ mov(double_store_operand2, Immediate(upper)); + } else { + Label no_special_nan_handling; + X87Register value = ToX87Register(instr->value()); + X87Fxch(value); + + if (instr->NeedsCanonicalization()) { + __ fld(0); + __ fld(0); + __ FCmp(); + + __ j(parity_odd, &no_special_nan_handling, Label::kNear); + __ sub(esp, Immediate(kDoubleSize)); + __ fst_d(MemOperand(esp, 0)); + __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), + Immediate(kHoleNanUpper32)); + __ add(esp, Immediate(kDoubleSize)); + Label canonicalize; + __ j(not_equal, &canonicalize, Label::kNear); + __ jmp(&no_special_nan_handling, Label::kNear); + __ bind(&canonicalize); + __ fstp(0); + __ fld_d(Operand::StaticVariable(canonical_nan_reference)); + } + + __ bind(&no_special_nan_handling); + __ fst_d(double_store_operand); + } +} + + +void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { + Register elements = ToRegister(instr->elements()); + Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; + + Operand operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_ELEMENTS, + instr->base_offset()); + if (instr->value()->IsRegister()) { + __ mov(operand, ToRegister(instr->value())); + } else { + LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); + if (IsSmi(operand_value)) { + Immediate immediate = ToImmediate(operand_value, Representation::Smi()); + __ mov(operand, immediate); + } else { + DCHECK(!IsInteger32(operand_value)); + Handle<Object> handle_value = ToHandle(operand_value); + __ mov(operand, handle_value); + } + } + + if (instr->hydrogen()->NeedsWriteBarrier()) { + DCHECK(instr->value()->IsRegister()); + Register value = ToRegister(instr->value()); + DCHECK(!instr->key()->IsConstantOperand()); + SmiCheck check_needed = + instr->hydrogen()->value()->type().IsHeapObject() + ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + // Compute address of modified element and store it into key register. + __ lea(key, operand); + __ RecordWrite(elements, + key, + value, + EMIT_REMEMBERED_SET, + check_needed, + instr->hydrogen()->PointersToHereCheckForValue()); + } +} + + +void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { + // By cases...external, fast-double, fast + if (instr->is_typed_elements()) { + DoStoreKeyedExternalArray(instr); + } else if (instr->hydrogen()->value()->representation().IsDouble()) { + DoStoreKeyedFixedDoubleArray(instr); + } else { + DoStoreKeyedFixedArray(instr); + } +} + + +void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); + DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); + + Handle<Code> ic = instr->strict_mode() == STRICT + ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() + : isolate()->builtins()->KeyedStoreIC_Initialize(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { + Register object = ToRegister(instr->object()); + Register temp = ToRegister(instr->temp()); + Label no_memento_found; + __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); + DeoptimizeIf(equal, instr->environment()); + __ bind(&no_memento_found); +} + + +void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { + Register object_reg = ToRegister(instr->object()); + + Handle<Map> from_map = instr->original_map(); + Handle<Map> to_map = instr->transitioned_map(); + ElementsKind from_kind = instr->from_kind(); + ElementsKind to_kind = instr->to_kind(); + + Label not_applicable; + bool is_simple_map_transition = + IsSimpleMapChangeTransition(from_kind, to_kind); + Label::Distance branch_distance = + is_simple_map_transition ? Label::kNear : Label::kFar; + __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); + __ j(not_equal, ¬_applicable, branch_distance); + if (is_simple_map_transition) { + Register new_map_reg = ToRegister(instr->new_map_temp()); + __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), + Immediate(to_map)); + // Write barrier. + DCHECK_NE(instr->temp(), NULL); + __ RecordWriteForMap(object_reg, to_map, new_map_reg, + ToRegister(instr->temp())); + } else { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(object_reg.is(eax)); + PushSafepointRegistersScope scope(this); + __ mov(ebx, to_map); + bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; + TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); + __ CallStub(&stub); + RecordSafepointWithLazyDeopt(instr, + RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + } + __ bind(¬_applicable); +} + + +void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { + class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { + public: + DeferredStringCharCodeAt(LCodeGen* codegen, + LStringCharCodeAt* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredStringCharCodeAt(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LStringCharCodeAt* instr_; + }; + + DeferredStringCharCodeAt* deferred = + new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); + + StringCharLoadGenerator::Generate(masm(), + factory(), + ToRegister(instr->string()), + ToRegister(instr->index()), + ToRegister(instr->result()), + deferred->entry()); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { + Register string = ToRegister(instr->string()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ Move(result, Immediate(0)); + + PushSafepointRegistersScope scope(this); + __ push(string); + // Push the index as a smi. This is safe because of the checks in + // DoStringCharCodeAt above. + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); + if (instr->index()->IsConstantOperand()) { + Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()), + Representation::Smi()); + __ push(immediate); + } else { + Register index = ToRegister(instr->index()); + __ SmiTag(index); + __ push(index); + } + CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, + instr, instr->context()); + __ AssertSmi(eax); + __ SmiUntag(eax); + __ StoreToSafepointRegisterSlot(result, eax); +} + + +void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { + class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { + public: + DeferredStringCharFromCode(LCodeGen* codegen, + LStringCharFromCode* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredStringCharFromCode(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LStringCharFromCode* instr_; + }; + + DeferredStringCharFromCode* deferred = + new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); + + DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); + Register char_code = ToRegister(instr->char_code()); + Register result = ToRegister(instr->result()); + DCHECK(!char_code.is(result)); + + __ cmp(char_code, String::kMaxOneByteCharCode); + __ j(above, deferred->entry()); + __ Move(result, Immediate(factory()->single_character_string_cache())); + __ mov(result, FieldOperand(result, + char_code, times_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(result, factory()->undefined_value()); + __ j(equal, deferred->entry()); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { + Register char_code = ToRegister(instr->char_code()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ Move(result, Immediate(0)); + + PushSafepointRegistersScope scope(this); + __ SmiTag(char_code); + __ push(char_code); + CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); + __ StoreToSafepointRegisterSlot(result, eax); +} + + +void LCodeGen::DoStringAdd(LStringAdd* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->left()).is(edx)); + DCHECK(ToRegister(instr->right()).is(eax)); + StringAddStub stub(isolate(), + instr->hydrogen()->flags(), + instr->hydrogen()->pretenure_flag()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { + LOperand* input = instr->value(); + LOperand* output = instr->result(); + DCHECK(input->IsRegister() || input->IsStackSlot()); + DCHECK(output->IsDoubleRegister()); + if (input->IsRegister()) { + Register input_reg = ToRegister(input); + __ push(input_reg); + X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); + __ pop(input_reg); + } else { + X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); + } +} + + +void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { + LOperand* input = instr->value(); + LOperand* output = instr->result(); + X87Register res = ToX87Register(output); + X87PrepareToWrite(res); + __ LoadUint32NoSSE2(ToRegister(input)); + X87CommitWrite(res); +} + + +void LCodeGen::DoNumberTagI(LNumberTagI* instr) { + class DeferredNumberTagI V8_FINAL : public LDeferredCode { + public: + DeferredNumberTagI(LCodeGen* codegen, + LNumberTagI* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), + SIGNED_INT32); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LNumberTagI* instr_; + }; + + LOperand* input = instr->value(); + DCHECK(input->IsRegister() && input->Equals(instr->result())); + Register reg = ToRegister(input); + + DeferredNumberTagI* deferred = + new(zone()) DeferredNumberTagI(this, instr, x87_stack_); + __ SmiTag(reg); + __ j(overflow, deferred->entry()); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoNumberTagU(LNumberTagU* instr) { + class DeferredNumberTagU V8_FINAL : public LDeferredCode { + public: + DeferredNumberTagU(LCodeGen* codegen, + LNumberTagU* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), + UNSIGNED_INT32); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LNumberTagU* instr_; + }; + + LOperand* input = instr->value(); + DCHECK(input->IsRegister() && input->Equals(instr->result())); + Register reg = ToRegister(input); + + DeferredNumberTagU* deferred = + new(zone()) DeferredNumberTagU(this, instr, x87_stack_); + __ cmp(reg, Immediate(Smi::kMaxValue)); + __ j(above, deferred->entry()); + __ SmiTag(reg); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, + LOperand* value, + LOperand* temp, + IntegerSignedness signedness) { + Label done, slow; + Register reg = ToRegister(value); + Register tmp = ToRegister(temp); + + if (signedness == SIGNED_INT32) { + // There was overflow, so bits 30 and 31 of the original integer + // disagree. Try to allocate a heap number in new space and store + // the value in there. If that fails, call the runtime system. + __ SmiUntag(reg); + __ xor_(reg, 0x80000000); + __ push(reg); + __ fild_s(Operand(esp, 0)); + __ pop(reg); + } else { + // There's no fild variant for unsigned values, so zero-extend to a 64-bit + // int manually. + __ push(Immediate(0)); + __ push(reg); + __ fild_d(Operand(esp, 0)); + __ pop(reg); + __ pop(reg); + } + + if (FLAG_inline_new) { + __ AllocateHeapNumber(reg, tmp, no_reg, &slow); + __ jmp(&done, Label::kNear); + } + + // Slow case: Call the runtime system to do the number allocation. + __ bind(&slow); + { + // TODO(3095996): Put a valid pointer value in the stack slot where the + // result register is stored, as this register is in the pointer map, but + // contains an integer value. + __ Move(reg, Immediate(0)); + + // Preserve the value of all registers. + PushSafepointRegistersScope scope(this); + + // NumberTagI and NumberTagD use the context from the frame, rather than + // the environment's HContext or HInlinedContext value. + // They only call Runtime::kAllocateHeapNumber. + // The corresponding HChange instructions are added in a phase that does + // not have easy access to the local context. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ CallRuntime(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(reg, eax); + } + + __ bind(&done); + __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); +} + + +void LCodeGen::DoNumberTagD(LNumberTagD* instr) { + class DeferredNumberTagD V8_FINAL : public LDeferredCode { + public: + DeferredNumberTagD(LCodeGen* codegen, + LNumberTagD* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredNumberTagD(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LNumberTagD* instr_; + }; + + Register reg = ToRegister(instr->result()); + + // Put the value to the top of stack + X87Register src = ToX87Register(instr->value()); + X87LoadForUsage(src); + + DeferredNumberTagD* deferred = + new(zone()) DeferredNumberTagD(this, instr, x87_stack_); + if (FLAG_inline_new) { + Register tmp = ToRegister(instr->temp()); + __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); + } else { + __ jmp(deferred->entry()); + } + __ bind(deferred->exit()); + __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); +} + + +void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + Register reg = ToRegister(instr->result()); + __ Move(reg, Immediate(0)); + + PushSafepointRegistersScope scope(this); + // NumberTagI and NumberTagD use the context from the frame, rather than + // the environment's HContext or HInlinedContext value. + // They only call Runtime::kAllocateHeapNumber. + // The corresponding HChange instructions are added in a phase that does + // not have easy access to the local context. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ CallRuntime(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(reg, eax); +} + + +void LCodeGen::DoSmiTag(LSmiTag* instr) { + HChange* hchange = instr->hydrogen(); + Register input = ToRegister(instr->value()); + if (hchange->CheckFlag(HValue::kCanOverflow) && + hchange->value()->CheckFlag(HValue::kUint32)) { + __ test(input, Immediate(0xc0000000)); + DeoptimizeIf(not_zero, instr->environment()); + } + __ SmiTag(input); + if (hchange->CheckFlag(HValue::kCanOverflow) && + !hchange->value()->CheckFlag(HValue::kUint32)) { + DeoptimizeIf(overflow, instr->environment()); + } +} + + +void LCodeGen::DoSmiUntag(LSmiUntag* instr) { + LOperand* input = instr->value(); + Register result = ToRegister(input); + DCHECK(input->IsRegister() && input->Equals(instr->result())); + if (instr->needs_check()) { + __ test(result, Immediate(kSmiTagMask)); + DeoptimizeIf(not_zero, instr->environment()); + } else { + __ AssertSmi(result); + } + __ SmiUntag(result); +} + + +void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, + Register temp_reg, + X87Register res_reg, + bool can_convert_undefined_to_nan, + bool deoptimize_on_minus_zero, + LEnvironment* env, + NumberUntagDMode mode) { + Label load_smi, done; + + X87PrepareToWrite(res_reg); + if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { + // Smi check. + __ JumpIfSmi(input_reg, &load_smi, Label::kNear); + + // Heap number map check. + __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + if (!can_convert_undefined_to_nan) { + DeoptimizeIf(not_equal, env); + } else { + Label heap_number, convert; + __ j(equal, &heap_number, Label::kNear); + + // Convert undefined (or hole) to NaN. + __ cmp(input_reg, factory()->undefined_value()); + DeoptimizeIf(not_equal, env); + + __ bind(&convert); + ExternalReference nan = + ExternalReference::address_of_canonical_non_hole_nan(); + __ fld_d(Operand::StaticVariable(nan)); + __ jmp(&done, Label::kNear); + + __ bind(&heap_number); + } + // Heap number to x87 conversion. + __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); + if (deoptimize_on_minus_zero) { + __ fldz(); + __ FCmp(); + __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); + __ j(not_zero, &done, Label::kNear); + + // Use general purpose registers to check if we have -0.0 + __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); + __ test(temp_reg, Immediate(HeapNumber::kSignMask)); + __ j(zero, &done, Label::kNear); + + // Pop FPU stack before deoptimizing. + __ fstp(0); + DeoptimizeIf(not_zero, env); + } + __ jmp(&done, Label::kNear); + } else { + DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); + } + + __ bind(&load_smi); + // Clobbering a temp is faster than re-tagging the + // input register since we avoid dependencies. + __ mov(temp_reg, input_reg); + __ SmiUntag(temp_reg); // Untag smi before converting to float. + __ push(temp_reg); + __ fild_s(Operand(esp, 0)); + __ add(esp, Immediate(kPointerSize)); + __ bind(&done); + X87CommitWrite(res_reg); +} + + +void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { + Register input_reg = ToRegister(instr->value()); + + // The input was optimistically untagged; revert it. + STATIC_ASSERT(kSmiTagSize == 1); + __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); + + if (instr->truncating()) { + Label no_heap_number, check_bools, check_false; + + // Heap number map check. + __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + __ j(not_equal, &no_heap_number, Label::kNear); + __ TruncateHeapNumberToI(input_reg, input_reg); + __ jmp(done); + + __ bind(&no_heap_number); + // Check for Oddballs. Undefined/False is converted to zero and True to one + // for truncating conversions. + __ cmp(input_reg, factory()->undefined_value()); + __ j(not_equal, &check_bools, Label::kNear); + __ Move(input_reg, Immediate(0)); + __ jmp(done); + + __ bind(&check_bools); + __ cmp(input_reg, factory()->true_value()); + __ j(not_equal, &check_false, Label::kNear); + __ Move(input_reg, Immediate(1)); + __ jmp(done); + + __ bind(&check_false); + __ cmp(input_reg, factory()->false_value()); + __ RecordComment("Deferred TaggedToI: cannot truncate"); + DeoptimizeIf(not_equal, instr->environment()); + __ Move(input_reg, Immediate(0)); + } else { + Label bailout; + __ TaggedToI(input_reg, input_reg, + instr->hydrogen()->GetMinusZeroMode(), &bailout); + __ jmp(done); + __ bind(&bailout); + DeoptimizeIf(no_condition, instr->environment()); + } +} + + +void LCodeGen::DoTaggedToI(LTaggedToI* instr) { + class DeferredTaggedToI V8_FINAL : public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, + LTaggedToI* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredTaggedToI(instr_, done()); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LTaggedToI* instr_; + }; + + LOperand* input = instr->value(); + DCHECK(input->IsRegister()); + Register input_reg = ToRegister(input); + DCHECK(input_reg.is(ToRegister(instr->result()))); + + if (instr->hydrogen()->value()->representation().IsSmi()) { + __ SmiUntag(input_reg); + } else { + DeferredTaggedToI* deferred = + new(zone()) DeferredTaggedToI(this, instr, x87_stack_); + // Optimistically untag the input. + // If the input is a HeapObject, SmiUntag will set the carry flag. + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + __ SmiUntag(input_reg); + // Branch to deferred code if the input was tagged. + // The deferred code will take care of restoring the tag. + __ j(carry, deferred->entry()); + __ bind(deferred->exit()); + } +} + + +void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { + LOperand* input = instr->value(); + DCHECK(input->IsRegister()); + LOperand* temp = instr->temp(); + DCHECK(temp->IsRegister()); + LOperand* result = instr->result(); + DCHECK(result->IsDoubleRegister()); + + Register input_reg = ToRegister(input); + bool deoptimize_on_minus_zero = + instr->hydrogen()->deoptimize_on_minus_zero(); + Register temp_reg = ToRegister(temp); + + HValue* value = instr->hydrogen()->value(); + NumberUntagDMode mode = value->representation().IsSmi() + ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; + + EmitNumberUntagDNoSSE2(input_reg, + temp_reg, + ToX87Register(result), + instr->hydrogen()->can_convert_undefined_to_nan(), + deoptimize_on_minus_zero, + instr->environment(), + mode); +} + + +void LCodeGen::DoDoubleToI(LDoubleToI* instr) { + LOperand* input = instr->value(); + DCHECK(input->IsDoubleRegister()); + LOperand* result = instr->result(); + DCHECK(result->IsRegister()); + Register result_reg = ToRegister(result); + + if (instr->truncating()) { + X87Register input_reg = ToX87Register(input); + X87Fxch(input_reg); + __ TruncateX87TOSToI(result_reg); + } else { + Label bailout, done; + X87Register input_reg = ToX87Register(input); + X87Fxch(input_reg); + __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), + &bailout, Label::kNear); + __ jmp(&done, Label::kNear); + __ bind(&bailout); + DeoptimizeIf(no_condition, instr->environment()); + __ bind(&done); + } +} + + +void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { + LOperand* input = instr->value(); + DCHECK(input->IsDoubleRegister()); + LOperand* result = instr->result(); + DCHECK(result->IsRegister()); + Register result_reg = ToRegister(result); + + Label bailout, done; + X87Register input_reg = ToX87Register(input); + X87Fxch(input_reg); + __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), + &bailout, Label::kNear); + __ jmp(&done, Label::kNear); + __ bind(&bailout); + DeoptimizeIf(no_condition, instr->environment()); + __ bind(&done); + + __ SmiTag(result_reg); + DeoptimizeIf(overflow, instr->environment()); +} + + +void LCodeGen::DoCheckSmi(LCheckSmi* instr) { + LOperand* input = instr->value(); + __ test(ToOperand(input), Immediate(kSmiTagMask)); + DeoptimizeIf(not_zero, instr->environment()); +} + + +void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { + LOperand* input = instr->value(); + __ test(ToOperand(input), Immediate(kSmiTagMask)); + DeoptimizeIf(zero, instr->environment()); + } +} + + +void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { + Register input = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); + + if (instr->hydrogen()->is_interval_check()) { + InstanceType first; + InstanceType last; + instr->hydrogen()->GetCheckInterval(&first, &last); + + __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), + static_cast<int8_t>(first)); + + // If there is only one type in the interval check for equality. + if (first == last) { + DeoptimizeIf(not_equal, instr->environment()); + } else { + DeoptimizeIf(below, instr->environment()); + // Omit check for the last type. + if (last != LAST_TYPE) { + __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), + static_cast<int8_t>(last)); + DeoptimizeIf(above, instr->environment()); + } + } + } else { + uint8_t mask; + uint8_t tag; + instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); + + if (IsPowerOf2(mask)) { + DCHECK(tag == 0 || IsPowerOf2(tag)); + __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask); + DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment()); + } else { + __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ and_(temp, mask); + __ cmp(temp, tag); + DeoptimizeIf(not_equal, instr->environment()); + } + } +} + + +void LCodeGen::DoCheckValue(LCheckValue* instr) { + Handle<HeapObject> object = instr->hydrogen()->object().handle(); + if (instr->hydrogen()->object_in_new_space()) { + Register reg = ToRegister(instr->value()); + Handle<Cell> cell = isolate()->factory()->NewCell(object); + __ cmp(reg, Operand::ForCell(cell)); + } else { + Operand operand = ToOperand(instr->value()); + __ cmp(operand, object); + } + DeoptimizeIf(not_equal, instr->environment()); +} + + +void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { + { + PushSafepointRegistersScope scope(this); + __ push(object); + __ xor_(esi, esi); + __ CallRuntime(Runtime::kTryMigrateInstance); + RecordSafepointWithRegisters( + instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); + + __ test(eax, Immediate(kSmiTagMask)); + } + DeoptimizeIf(zero, instr->environment()); +} + + +void LCodeGen::DoCheckMaps(LCheckMaps* instr) { + class DeferredCheckMaps V8_FINAL : public LDeferredCode { + public: + DeferredCheckMaps(LCodeGen* codegen, + LCheckMaps* instr, + Register object, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) { + SetExit(check_maps()); + } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredInstanceMigration(instr_, object_); + } + Label* check_maps() { return &check_maps_; } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LCheckMaps* instr_; + Label check_maps_; + Register object_; + }; + + if (instr->hydrogen()->IsStabilityCheck()) { + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + for (int i = 0; i < maps->size(); ++i) { + AddStabilityDependency(maps->at(i).handle()); + } + return; + } + + LOperand* input = instr->value(); + DCHECK(input->IsRegister()); + Register reg = ToRegister(input); + + DeferredCheckMaps* deferred = NULL; + if (instr->hydrogen()->HasMigrationTarget()) { + deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); + __ bind(deferred->check_maps()); + } + + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + Label success; + for (int i = 0; i < maps->size() - 1; i++) { + Handle<Map> map = maps->at(i).handle(); + __ CompareMap(reg, map); + __ j(equal, &success, Label::kNear); + } + + Handle<Map> map = maps->at(maps->size() - 1).handle(); + __ CompareMap(reg, map); + if (instr->hydrogen()->HasMigrationTarget()) { + __ j(not_equal, deferred->entry()); + } else { + DeoptimizeIf(not_equal, instr->environment()); + } + + __ bind(&success); +} + + +void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { + UNREACHABLE(); +} + + +void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { + DCHECK(instr->unclamped()->Equals(instr->result())); + Register value_reg = ToRegister(instr->result()); + __ ClampUint8(value_reg); +} + + +void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { + Register input_reg = ToRegister(instr->unclamped()); + Register result_reg = ToRegister(instr->result()); + Register scratch = ToRegister(instr->scratch()); + Register scratch2 = ToRegister(instr->scratch2()); + Register scratch3 = ToRegister(instr->scratch3()); + Label is_smi, done, heap_number, valid_exponent, + largest_value, zero_result, maybe_nan_or_infinity; + + __ JumpIfSmi(input_reg, &is_smi); + + // Check for heap number + __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + __ j(equal, &heap_number, Label::kNear); + + // Check for undefined. Undefined is converted to zero for clamping + // conversions. + __ cmp(input_reg, factory()->undefined_value()); + DeoptimizeIf(not_equal, instr->environment()); + __ jmp(&zero_result, Label::kNear); + + // Heap number + __ bind(&heap_number); + + // Surprisingly, all of the hand-crafted bit-manipulations below are much + // faster than the x86 FPU built-in instruction, especially since "banker's + // rounding" would be additionally very expensive + + // Get exponent word. + __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); + __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); + + // Test for negative values --> clamp to zero + __ test(scratch, scratch); + __ j(negative, &zero_result, Label::kNear); + + // Get exponent alone in scratch2. + __ mov(scratch2, scratch); + __ and_(scratch2, HeapNumber::kExponentMask); + __ shr(scratch2, HeapNumber::kExponentShift); + __ j(zero, &zero_result, Label::kNear); + __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); + __ j(negative, &zero_result, Label::kNear); + + const uint32_t non_int8_exponent = 7; + __ cmp(scratch2, Immediate(non_int8_exponent + 1)); + // If the exponent is too big, check for special values. + __ j(greater, &maybe_nan_or_infinity, Label::kNear); + + __ bind(&valid_exponent); + // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent + // < 7. The shift bias is the number of bits to shift the mantissa such that + // with an exponent of 7 such the that top-most one is in bit 30, allowing + // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to + // 1). + int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; + __ lea(result_reg, MemOperand(scratch2, shift_bias)); + // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the + // top bits of the mantissa. + __ and_(scratch, HeapNumber::kMantissaMask); + // Put back the implicit 1 of the mantissa + __ or_(scratch, 1 << HeapNumber::kExponentShift); + // Shift up to round + __ shl_cl(scratch); + // Use "banker's rounding" to spec: If fractional part of number is 0.5, then + // use the bit in the "ones" place and add it to the "halves" place, which has + // the effect of rounding to even. + __ mov(scratch2, scratch); + const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; + const uint32_t one_bit_shift = one_half_bit_shift + 1; + __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); + __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); + Label no_round; + __ j(less, &no_round, Label::kNear); + Label round_up; + __ mov(scratch2, Immediate(1 << one_half_bit_shift)); + __ j(greater, &round_up, Label::kNear); + __ test(scratch3, scratch3); + __ j(not_zero, &round_up, Label::kNear); + __ mov(scratch2, scratch); + __ and_(scratch2, Immediate(1 << one_bit_shift)); + __ shr(scratch2, 1); + __ bind(&round_up); + __ add(scratch, scratch2); + __ j(overflow, &largest_value, Label::kNear); + __ bind(&no_round); + __ shr(scratch, 23); + __ mov(result_reg, scratch); + __ jmp(&done, Label::kNear); + + __ bind(&maybe_nan_or_infinity); + // Check for NaN/Infinity, all other values map to 255 + __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); + __ j(not_equal, &largest_value, Label::kNear); + + // Check for NaN, which differs from Infinity in that at least one mantissa + // bit is set. + __ and_(scratch, HeapNumber::kMantissaMask); + __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); + __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN + // Infinity -> Fall through to map to 255. + + __ bind(&largest_value); + __ mov(result_reg, Immediate(255)); + __ jmp(&done, Label::kNear); + + __ bind(&zero_result); + __ xor_(result_reg, result_reg); + __ jmp(&done, Label::kNear); + + // smi + __ bind(&is_smi); + if (!input_reg.is(result_reg)) { + __ mov(result_reg, input_reg); + } + __ SmiUntag(result_reg); + __ ClampUint8(result_reg); + __ bind(&done); +} + + +void LCodeGen::DoDoubleBits(LDoubleBits* instr) { + UNREACHABLE(); +} + + +void LCodeGen::DoConstructDouble(LConstructDouble* instr) { + UNREACHABLE(); +} + + +void LCodeGen::DoAllocate(LAllocate* instr) { + class DeferredAllocate V8_FINAL : public LDeferredCode { + public: + DeferredAllocate(LCodeGen* codegen, + LAllocate* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredAllocate(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LAllocate* instr_; + }; + + DeferredAllocate* deferred = + new(zone()) DeferredAllocate(this, instr, x87_stack_); + + Register result = ToRegister(instr->result()); + Register temp = ToRegister(instr->temp()); + + // Allocate memory for the object. + AllocationFlags flags = TAG_OBJECT; + if (instr->hydrogen()->MustAllocateDoubleAligned()) { + flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); + } + if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); + flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); + } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); + flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); + } + + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + if (size <= Page::kMaxRegularHeapObjectSize) { + __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); + } else { + __ jmp(deferred->entry()); + } + } else { + Register size = ToRegister(instr->size()); + __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); + } + + __ bind(deferred->exit()); + + if (instr->hydrogen()->MustPrefillWithFiller()) { + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + __ mov(temp, (size / kPointerSize) - 1); + } else { + temp = ToRegister(instr->size()); + __ shr(temp, kPointerSizeLog2); + __ dec(temp); + } + Label loop; + __ bind(&loop); + __ mov(FieldOperand(result, temp, times_pointer_size, 0), + isolate()->factory()->one_pointer_filler_map()); + __ dec(temp); + __ j(not_zero, &loop); + } +} + + +void LCodeGen::DoDeferredAllocate(LAllocate* instr) { + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ Move(result, Immediate(Smi::FromInt(0))); + + PushSafepointRegistersScope scope(this); + if (instr->size()->IsRegister()) { + Register size = ToRegister(instr->size()); + DCHECK(!size.is(result)); + __ SmiTag(ToRegister(instr->size())); + __ push(size); + } else { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + if (size >= 0 && size <= Smi::kMaxValue) { + __ push(Immediate(Smi::FromInt(size))); + } else { + // We should never get here at runtime => abort + __ int3(); + return; + } + } + + int flags = AllocateDoubleAlignFlag::encode( + instr->hydrogen()->MustAllocateDoubleAligned()); + if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { + DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); + flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); + } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); + flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); + } else { + flags = AllocateTargetSpace::update(flags, NEW_SPACE); + } + __ push(Immediate(Smi::FromInt(flags))); + + CallRuntimeFromDeferred( + Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); + __ StoreToSafepointRegisterSlot(result, eax); +} + + +void LCodeGen::DoToFastProperties(LToFastProperties* instr) { + DCHECK(ToRegister(instr->value()).is(eax)); + __ push(eax); + CallRuntime(Runtime::kToFastProperties, 1, instr); +} + + +void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + Label materialized; + // Registers will be used as follows: + // ecx = literals array. + // ebx = regexp literal. + // eax = regexp literal clone. + // esi = context. + int literal_offset = + FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); + __ LoadHeapObject(ecx, instr->hydrogen()->literals()); + __ mov(ebx, FieldOperand(ecx, literal_offset)); + __ cmp(ebx, factory()->undefined_value()); + __ j(not_equal, &materialized, Label::kNear); + + // Create regexp literal using runtime function + // Result will be in eax. + __ push(ecx); + __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); + __ push(Immediate(instr->hydrogen()->pattern())); + __ push(Immediate(instr->hydrogen()->flags())); + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); + __ mov(ebx, eax); + + __ bind(&materialized); + int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; + Label allocated, runtime_allocate; + __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); + __ jmp(&allocated, Label::kNear); + + __ bind(&runtime_allocate); + __ push(ebx); + __ push(Immediate(Smi::FromInt(size))); + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); + __ pop(ebx); + + __ bind(&allocated); + // Copy the content into the newly allocated memory. + // (Unroll copy loop once for better throughput). + for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { + __ mov(edx, FieldOperand(ebx, i)); + __ mov(ecx, FieldOperand(ebx, i + kPointerSize)); + __ mov(FieldOperand(eax, i), edx); + __ mov(FieldOperand(eax, i + kPointerSize), ecx); + } + if ((size % (2 * kPointerSize)) != 0) { + __ mov(edx, FieldOperand(ebx, size - kPointerSize)); + __ mov(FieldOperand(eax, size - kPointerSize), edx); + } +} + + +void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + // Use the fast case closure allocation code that allocates in new + // space for nested functions that don't need literals cloning. + bool pretenure = instr->hydrogen()->pretenure(); + if (!pretenure && instr->hydrogen()->has_no_literals()) { + FastNewClosureStub stub(isolate(), + instr->hydrogen()->strict_mode(), + instr->hydrogen()->is_generator()); + __ mov(ebx, Immediate(instr->hydrogen()->shared_info())); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + } else { + __ push(esi); + __ push(Immediate(instr->hydrogen()->shared_info())); + __ push(Immediate(pretenure ? factory()->true_value() + : factory()->false_value())); + CallRuntime(Runtime::kNewClosure, 3, instr); + } +} + + +void LCodeGen::DoTypeof(LTypeof* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + LOperand* input = instr->value(); + EmitPushTaggedOperand(input); + CallRuntime(Runtime::kTypeof, 1, instr); +} + + +void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { + Register input = ToRegister(instr->value()); + Condition final_branch_condition = EmitTypeofIs(instr, input); + if (final_branch_condition != no_condition) { + EmitBranch(instr, final_branch_condition); + } +} + + +Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { + Label* true_label = instr->TrueLabel(chunk_); + Label* false_label = instr->FalseLabel(chunk_); + Handle<String> type_name = instr->type_literal(); + int left_block = instr->TrueDestination(chunk_); + int right_block = instr->FalseDestination(chunk_); + int next_block = GetNextEmittedBlock(); + + Label::Distance true_distance = left_block == next_block ? Label::kNear + : Label::kFar; + Label::Distance false_distance = right_block == next_block ? Label::kNear + : Label::kFar; + Condition final_branch_condition = no_condition; + if (String::Equals(type_name, factory()->number_string())) { + __ JumpIfSmi(input, true_label, true_distance); + __ cmp(FieldOperand(input, HeapObject::kMapOffset), + factory()->heap_number_map()); + final_branch_condition = equal; + + } else if (String::Equals(type_name, factory()->string_string())) { + __ JumpIfSmi(input, false_label, false_distance); + __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); + __ j(above_equal, false_label, false_distance); + __ test_b(FieldOperand(input, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + final_branch_condition = zero; + + } else if (String::Equals(type_name, factory()->symbol_string())) { + __ JumpIfSmi(input, false_label, false_distance); + __ CmpObjectType(input, SYMBOL_TYPE, input); + final_branch_condition = equal; + + } else if (String::Equals(type_name, factory()->boolean_string())) { + __ cmp(input, factory()->true_value()); + __ j(equal, true_label, true_distance); + __ cmp(input, factory()->false_value()); + final_branch_condition = equal; + + } else if (String::Equals(type_name, factory()->undefined_string())) { + __ cmp(input, factory()->undefined_value()); + __ j(equal, true_label, true_distance); + __ JumpIfSmi(input, false_label, false_distance); + // Check for undetectable objects => true. + __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); + __ test_b(FieldOperand(input, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + final_branch_condition = not_zero; + + } else if (String::Equals(type_name, factory()->function_string())) { + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ JumpIfSmi(input, false_label, false_distance); + __ CmpObjectType(input, JS_FUNCTION_TYPE, input); + __ j(equal, true_label, true_distance); + __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); + final_branch_condition = equal; + + } else if (String::Equals(type_name, factory()->object_string())) { + __ JumpIfSmi(input, false_label, false_distance); + __ cmp(input, factory()->null_value()); + __ j(equal, true_label, true_distance); + __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); + __ j(below, false_label, false_distance); + __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); + __ j(above, false_label, false_distance); + // Check for undetectable objects => false. + __ test_b(FieldOperand(input, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + final_branch_condition = zero; + + } else { + __ jmp(false_label, false_distance); + } + return final_branch_condition; +} + + +void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { + Register temp = ToRegister(instr->temp()); + + EmitIsConstructCall(temp); + EmitBranch(instr, equal); +} + + +void LCodeGen::EmitIsConstructCall(Register temp) { + // Get the frame pointer for the calling frame. + __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ cmp(Operand(temp, StandardFrameConstants::kContextOffset), + Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(not_equal, &check_frame_marker, Label::kNear); + __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), + Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); +} + + +void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { + if (!info()->IsStub()) { + // Ensure that we have enough space after the previous lazy-bailout + // instruction for patching the code here. + int current_pc = masm()->pc_offset(); + if (current_pc < last_lazy_deopt_pc_ + space_needed) { + int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; + __ Nop(padding_size); + } + } + last_lazy_deopt_pc_ = masm()->pc_offset(); +} + + +void LCodeGen::DoLazyBailout(LLazyBailout* instr) { + last_lazy_deopt_pc_ = masm()->pc_offset(); + DCHECK(instr->HasEnvironment()); + LEnvironment* env = instr->environment(); + RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); + safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); +} + + +void LCodeGen::DoDeoptimize(LDeoptimize* instr) { + Deoptimizer::BailoutType type = instr->hydrogen()->type(); + // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the + // needed return address), even though the implementation of LAZY and EAGER is + // now identical. When LAZY is eventually completely folded into EAGER, remove + // the special case below. + if (info()->IsStub() && type == Deoptimizer::EAGER) { + type = Deoptimizer::LAZY; + } + Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); + DeoptimizeIf(no_condition, instr->environment(), type); +} + + +void LCodeGen::DoDummy(LDummy* instr) { + // Nothing to see here, move on! +} + + +void LCodeGen::DoDummyUse(LDummyUse* instr) { + // Nothing to see here, move on! +} + + +void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { + PushSafepointRegistersScope scope(this); + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ CallRuntime(Runtime::kStackGuard); + RecordSafepointWithLazyDeopt( + instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + DCHECK(instr->HasEnvironment()); + LEnvironment* env = instr->environment(); + safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); +} + + +void LCodeGen::DoStackCheck(LStackCheck* instr) { + class DeferredStackCheck V8_FINAL : public LDeferredCode { + public: + DeferredStackCheck(LCodeGen* codegen, + LStackCheck* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredStackCheck(instr_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LStackCheck* instr_; + }; + + DCHECK(instr->HasEnvironment()); + LEnvironment* env = instr->environment(); + // There is no LLazyBailout instruction for stack-checks. We have to + // prepare for lazy deoptimization explicitly here. + if (instr->hydrogen()->is_function_entry()) { + // Perform stack overflow check. + Label done; + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(above_equal, &done, Label::kNear); + + DCHECK(instr->context()->IsRegister()); + DCHECK(ToRegister(instr->context()).is(esi)); + CallCode(isolate()->builtins()->StackCheck(), + RelocInfo::CODE_TARGET, + instr); + __ bind(&done); + } else { + DCHECK(instr->hydrogen()->is_backwards_branch()); + // Perform stack overflow check if this goto needs it before jumping. + DeferredStackCheck* deferred_stack_check = + new(zone()) DeferredStackCheck(this, instr, x87_stack_); + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(below, deferred_stack_check->entry()); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); + __ bind(instr->done_label()); + deferred_stack_check->SetExit(instr->done_label()); + RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); + // Don't record a deoptimization index for the safepoint here. + // This will be done explicitly when emitting call and the safepoint in + // the deferred code. + } +} + + +void LCodeGen::DoOsrEntry(LOsrEntry* instr) { + // This is a pseudo-instruction that ensures that the environment here is + // properly registered for deoptimization and records the assembler's PC + // offset. + LEnvironment* environment = instr->environment(); + + // If the environment were already registered, we would have no way of + // backpatching it with the spill slot operands. + DCHECK(!environment->HasBeenRegistered()); + RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); + + GenerateOsrPrologue(); +} + + +void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + __ cmp(eax, isolate()->factory()->undefined_value()); + DeoptimizeIf(equal, instr->environment()); + + __ cmp(eax, isolate()->factory()->null_value()); + DeoptimizeIf(equal, instr->environment()); + + __ test(eax, Immediate(kSmiTagMask)); + DeoptimizeIf(zero, instr->environment()); + + STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); + __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx); + DeoptimizeIf(below_equal, instr->environment()); + + Label use_cache, call_runtime; + __ CheckEnumCache(&call_runtime); + + __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); + __ jmp(&use_cache, Label::kNear); + + // Get the set of properties to enumerate. + __ bind(&call_runtime); + __ push(eax); + CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); + + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + isolate()->factory()->meta_map()); + DeoptimizeIf(not_equal, instr->environment()); + __ bind(&use_cache); +} + + +void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { + Register map = ToRegister(instr->map()); + Register result = ToRegister(instr->result()); + Label load_cache, done; + __ EnumLength(result, map); + __ cmp(result, Immediate(Smi::FromInt(0))); + __ j(not_equal, &load_cache, Label::kNear); + __ mov(result, isolate()->factory()->empty_fixed_array()); + __ jmp(&done, Label::kNear); + + __ bind(&load_cache); + __ LoadInstanceDescriptors(map, result); + __ mov(result, + FieldOperand(result, DescriptorArray::kEnumCacheOffset)); + __ mov(result, + FieldOperand(result, FixedArray::SizeFor(instr->idx()))); + __ bind(&done); + __ test(result, result); + DeoptimizeIf(equal, instr->environment()); +} + + +void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { + Register object = ToRegister(instr->value()); + __ cmp(ToRegister(instr->map()), + FieldOperand(object, HeapObject::kMapOffset)); + DeoptimizeIf(not_equal, instr->environment()); +} + + +void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register object, + Register index) { + PushSafepointRegistersScope scope(this); + __ push(object); + __ push(index); + __ xor_(esi, esi); + __ CallRuntime(Runtime::kLoadMutableDouble); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(object, eax); +} + + +void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { + public: + DeferredLoadMutableDouble(LCodeGen* codegen, + LLoadFieldByIndex* instr, + Register object, + Register index, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), + instr_(instr), + object_(object), + index_(index) { + } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LLoadFieldByIndex* instr_; + Register object_; + Register index_; + }; + + Register object = ToRegister(instr->object()); + Register index = ToRegister(instr->index()); + + DeferredLoadMutableDouble* deferred; + deferred = new(zone()) DeferredLoadMutableDouble( + this, instr, object, index, x87_stack_); + + Label out_of_object, done; + __ test(index, Immediate(Smi::FromInt(1))); + __ j(not_zero, deferred->entry()); + + __ sar(index, 1); + + __ cmp(index, Immediate(0)); + __ j(less, &out_of_object, Label::kNear); + __ mov(object, FieldOperand(object, + index, + times_half_pointer_size, + JSObject::kHeaderSize)); + __ jmp(&done, Label::kNear); + + __ bind(&out_of_object); + __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset)); + __ neg(index); + // Index is now equal to out of object property index plus 1. + __ mov(object, FieldOperand(object, + index, + times_half_pointer_size, + FixedArray::kHeaderSize - kPointerSize)); + __ bind(deferred->exit()); + __ bind(&done); +} + + +void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { + Register context = ToRegister(instr->context()); + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context); +} + + +void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { + Handle<ScopeInfo> scope_info = instr->scope_info(); + __ Push(scope_info); + __ push(ToRegister(instr->function())); + CallRuntime(Runtime::kPushBlockContext, 2, instr); + RecordSafepoint(Safepoint::kNoLazyDeopt); +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/lithium-codegen-x87.h nodejs-0.11.15/deps/v8/src/x87/lithium-codegen-x87.h --- nodejs-0.11.13/deps/v8/src/x87/lithium-codegen-x87.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/lithium-codegen-x87.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,504 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_X87_LITHIUM_CODEGEN_X87_H_ +#define V8_X87_LITHIUM_CODEGEN_X87_H_ + +#include "src/x87/lithium-x87.h" + +#include "src/base/logging.h" +#include "src/deoptimizer.h" +#include "src/lithium-codegen.h" +#include "src/safepoint-table.h" +#include "src/scopes.h" +#include "src/utils.h" +#include "src/x87/lithium-gap-resolver-x87.h" + +namespace v8 { +namespace internal { + +// Forward declarations. +class LDeferredCode; +class LGapNode; +class SafepointGenerator; + +class LCodeGen: public LCodeGenBase { + public: + LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) + : LCodeGenBase(chunk, assembler, info), + deoptimizations_(4, info->zone()), + jump_table_(4, info->zone()), + deoptimization_literals_(8, info->zone()), + inlined_function_count_(0), + scope_(info->scope()), + translations_(info->zone()), + deferred_(8, info->zone()), + dynamic_frame_alignment_(false), + support_aligned_spilled_doubles_(false), + osr_pc_offset_(-1), + frame_is_built_(false), + x87_stack_(assembler), + safepoints_(info->zone()), + resolver_(this), + expected_safepoint_kind_(Safepoint::kSimple) { + PopulateDeoptimizationLiteralsWithInlinedFunctions(); + } + + int LookupDestination(int block_id) const { + return chunk()->LookupDestination(block_id); + } + + bool IsNextEmittedBlock(int block_id) const { + return LookupDestination(block_id) == GetNextEmittedBlock(); + } + + bool NeedsEagerFrame() const { + return GetStackSlotCount() > 0 || + info()->is_non_deferred_calling() || + !info()->IsStub() || + info()->requires_frame(); + } + bool NeedsDeferredFrame() const { + return !NeedsEagerFrame() && info()->is_deferred_calling(); + } + + // Support for converting LOperands to assembler types. + Operand ToOperand(LOperand* op) const; + Register ToRegister(LOperand* op) const; + X87Register ToX87Register(LOperand* op) const; + + bool IsInteger32(LConstantOperand* op) const; + bool IsSmi(LConstantOperand* op) const; + Immediate ToImmediate(LOperand* op, const Representation& r) const { + return Immediate(ToRepresentation(LConstantOperand::cast(op), r)); + } + double ToDouble(LConstantOperand* op) const; + + // Support for non-sse2 (x87) floating point stack handling. + // These functions maintain the mapping of physical stack registers to our + // virtual registers between instructions. + enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand }; + + void X87Mov(X87Register reg, Operand src, + X87OperandType operand = kX87DoubleOperand); + void X87Mov(Operand src, X87Register reg, + X87OperandType operand = kX87DoubleOperand); + + void X87PrepareBinaryOp( + X87Register left, X87Register right, X87Register result); + + void X87LoadForUsage(X87Register reg); + void X87LoadForUsage(X87Register reg1, X87Register reg2); + void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); } + void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); } + + void X87Fxch(X87Register reg, int other_slot = 0) { + x87_stack_.Fxch(reg, other_slot); + } + void X87Free(X87Register reg) { + x87_stack_.Free(reg); + } + + + bool X87StackEmpty() { + return x87_stack_.depth() == 0; + } + + Handle<Object> ToHandle(LConstantOperand* op) const; + + // The operand denoting the second word (the one with a higher address) of + // a double stack slot. + Operand HighOperand(LOperand* op); + + // Try to generate code for the entire chunk, but it may fail if the + // chunk contains constructs we cannot handle. Returns true if the + // code generation attempt succeeded. + bool GenerateCode(); + + // Finish the code by setting stack height, safepoint, and bailout + // information on it. + void FinishCode(Handle<Code> code); + + // Deferred code support. + void DoDeferredNumberTagD(LNumberTagD* instr); + + enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; + void DoDeferredNumberTagIU(LInstruction* instr, + LOperand* value, + LOperand* temp, + IntegerSignedness signedness); + + void DoDeferredTaggedToI(LTaggedToI* instr, Label* done); + void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); + void DoDeferredStackCheck(LStackCheck* instr); + void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); + void DoDeferredStringCharFromCode(LStringCharFromCode* instr); + void DoDeferredAllocate(LAllocate* instr); + void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, + Label* map_check); + void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); + void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register object, + Register index); + + // Parallel move support. + void DoParallelMove(LParallelMove* move); + void DoGap(LGap* instr); + + // Emit frame translation commands for an environment. + void WriteTranslation(LEnvironment* environment, Translation* translation); + + void EnsureRelocSpaceForDeoptimization(); + + // Declare methods that deal with the individual node types. +#define DECLARE_DO(type) void Do##type(L##type* node); + LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) +#undef DECLARE_DO + + private: + StrictMode strict_mode() const { return info()->strict_mode(); } + + Scope* scope() const { return scope_; } + + void EmitClassOfTest(Label* if_true, + Label* if_false, + Handle<String> class_name, + Register input, + Register temporary, + Register temporary2); + + int GetStackSlotCount() const { return chunk()->spill_slot_count(); } + + void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } + + // Code generation passes. Returns true if code generation should + // continue. + void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE; + void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE; + bool GeneratePrologue(); + bool GenerateDeferredCode(); + bool GenerateJumpTable(); + bool GenerateSafepointTable(); + + // Generates the custom OSR entrypoint and sets the osr_pc_offset. + void GenerateOsrPrologue(); + + enum SafepointMode { + RECORD_SIMPLE_SAFEPOINT, + RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS + }; + + void CallCode(Handle<Code> code, + RelocInfo::Mode mode, + LInstruction* instr); + + void CallCodeGeneric(Handle<Code> code, + RelocInfo::Mode mode, + LInstruction* instr, + SafepointMode safepoint_mode); + + void CallRuntime(const Runtime::Function* fun, + int argc, + LInstruction* instr); + + void CallRuntime(Runtime::FunctionId id, + int argc, + LInstruction* instr) { + const Runtime::Function* function = Runtime::FunctionForId(id); + CallRuntime(function, argc, instr); + } + + void CallRuntimeFromDeferred(Runtime::FunctionId id, + int argc, + LInstruction* instr, + LOperand* context); + + void LoadContextFromDeferred(LOperand* context); + + enum EDIState { + EDI_UNINITIALIZED, + EDI_CONTAINS_TARGET + }; + + // Generate a direct call to a known function. Expects the function + // to be in edi. + void CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, + int arity, + LInstruction* instr, + EDIState edi_state); + + void RecordSafepointWithLazyDeopt(LInstruction* instr, + SafepointMode safepoint_mode); + + void RegisterEnvironmentForDeoptimization(LEnvironment* environment, + Safepoint::DeoptMode mode); + void DeoptimizeIf(Condition cc, + LEnvironment* environment, + Deoptimizer::BailoutType bailout_type); + void DeoptimizeIf(Condition cc, LEnvironment* environment); + + bool DeoptEveryNTimes() { + return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); + } + + void AddToTranslation(LEnvironment* environment, + Translation* translation, + LOperand* op, + bool is_tagged, + bool is_uint32, + int* object_index_pointer, + int* dematerialized_index_pointer); + void PopulateDeoptimizationData(Handle<Code> code); + int DefineDeoptimizationLiteral(Handle<Object> literal); + + void PopulateDeoptimizationLiteralsWithInlinedFunctions(); + + Register ToRegister(int index) const; + X87Register ToX87Register(int index) const; + int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; + int32_t ToInteger32(LConstantOperand* op) const; + ExternalReference ToExternalReference(LConstantOperand* op) const; + + Operand BuildFastArrayOperand(LOperand* elements_pointer, + LOperand* key, + Representation key_representation, + ElementsKind elements_kind, + uint32_t base_offset); + + Operand BuildSeqStringOperand(Register string, + LOperand* index, + String::Encoding encoding); + + void EmitIntegerMathAbs(LMathAbs* instr); + + // Support for recording safepoint and position information. + void RecordSafepoint(LPointerMap* pointers, + Safepoint::Kind kind, + int arguments, + Safepoint::DeoptMode mode); + void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); + void RecordSafepoint(Safepoint::DeoptMode mode); + void RecordSafepointWithRegisters(LPointerMap* pointers, + int arguments, + Safepoint::DeoptMode mode); + + void RecordAndWritePosition(int position) V8_OVERRIDE; + + static Condition TokenToCondition(Token::Value op, bool is_unsigned); + void EmitGoto(int block); + + // EmitBranch expects to be the last instruction of a block. + template<class InstrType> + void EmitBranch(InstrType instr, Condition cc); + template<class InstrType> + void EmitFalseBranch(InstrType instr, Condition cc); + void EmitNumberUntagDNoSSE2( + Register input, + Register temp, + X87Register res_reg, + bool allow_undefined_as_nan, + bool deoptimize_on_minus_zero, + LEnvironment* env, + NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED); + + // Emits optimized code for typeof x == "y". Modifies input register. + // Returns the condition on which a final split to + // true and false label should be made, to optimize fallthrough. + Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input); + + // Emits optimized code for %_IsObject(x). Preserves input register. + // Returns the condition on which a final split to + // true and false label should be made, to optimize fallthrough. + Condition EmitIsObject(Register input, + Register temp1, + Label* is_not_object, + Label* is_object); + + // Emits optimized code for %_IsString(x). Preserves input register. + // Returns the condition on which a final split to + // true and false label should be made, to optimize fallthrough. + Condition EmitIsString(Register input, + Register temp1, + Label* is_not_string, + SmiCheck check_needed); + + // Emits optimized code for %_IsConstructCall(). + // Caller should branch on equal condition. + void EmitIsConstructCall(Register temp); + + // Emits optimized code to deep-copy the contents of statically known + // object graphs (e.g. object literal boilerplate). + void EmitDeepCopy(Handle<JSObject> object, + Register result, + Register source, + int* offset, + AllocationSiteMode mode); + + void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; + void DoLoadKeyedExternalArray(LLoadKeyed* instr); + void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); + void DoLoadKeyedFixedArray(LLoadKeyed* instr); + void DoStoreKeyedExternalArray(LStoreKeyed* instr); + void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); + void DoStoreKeyedFixedArray(LStoreKeyed* instr); + + void EmitReturn(LReturn* instr, bool dynamic_frame_alignment); + + // Emits code for pushing either a tagged constant, a (non-double) + // register, or a stack slot operand. + void EmitPushTaggedOperand(LOperand* operand); + + void X87Fld(Operand src, X87OperandType opts); + + void EmitFlushX87ForDeopt(); + void FlushX87StackIfNecessary(LInstruction* instr) { + x87_stack_.FlushIfNecessary(instr, this); + } + friend class LGapResolver; + +#ifdef _MSC_VER + // On windows, you may not access the stack more than one page below + // the most recently mapped page. To make the allocated area randomly + // accessible, we write an arbitrary value to each page in range + // esp + offset - page_size .. esp in turn. + void MakeSureStackPagesMapped(int offset); +#endif + + ZoneList<LEnvironment*> deoptimizations_; + ZoneList<Deoptimizer::JumpTableEntry> jump_table_; + ZoneList<Handle<Object> > deoptimization_literals_; + int inlined_function_count_; + Scope* const scope_; + TranslationBuffer translations_; + ZoneList<LDeferredCode*> deferred_; + bool dynamic_frame_alignment_; + bool support_aligned_spilled_doubles_; + int osr_pc_offset_; + bool frame_is_built_; + + class X87Stack { + public: + explicit X87Stack(MacroAssembler* masm) + : stack_depth_(0), is_mutable_(true), masm_(masm) { } + explicit X87Stack(const X87Stack& other) + : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) { + for (int i = 0; i < stack_depth_; i++) { + stack_[i] = other.stack_[i]; + } + } + bool operator==(const X87Stack& other) const { + if (stack_depth_ != other.stack_depth_) return false; + for (int i = 0; i < stack_depth_; i++) { + if (!stack_[i].is(other.stack_[i])) return false; + } + return true; + } + bool Contains(X87Register reg); + void Fxch(X87Register reg, int other_slot = 0); + void Free(X87Register reg); + void PrepareToWrite(X87Register reg); + void CommitWrite(X87Register reg); + void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen); + void LeavingBlock(int current_block_id, LGoto* goto_instr); + int depth() const { return stack_depth_; } + void pop() { + DCHECK(is_mutable_); + stack_depth_--; + } + void push(X87Register reg) { + DCHECK(is_mutable_); + DCHECK(stack_depth_ < X87Register::kMaxNumAllocatableRegisters); + stack_[stack_depth_] = reg; + stack_depth_++; + } + + MacroAssembler* masm() const { return masm_; } + Isolate* isolate() const { return masm_->isolate(); } + + private: + int ArrayIndex(X87Register reg); + int st2idx(int pos); + + X87Register stack_[X87Register::kMaxNumAllocatableRegisters]; + int stack_depth_; + bool is_mutable_; + MacroAssembler* masm_; + }; + X87Stack x87_stack_; + + // Builder that keeps track of safepoints in the code. The table + // itself is emitted at the end of the generated code. + SafepointTableBuilder safepoints_; + + // Compiler from a set of parallel moves to a sequential list of moves. + LGapResolver resolver_; + + Safepoint::Kind expected_safepoint_kind_; + + class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { + public: + explicit PushSafepointRegistersScope(LCodeGen* codegen) + : codegen_(codegen) { + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); + codegen_->masm_->PushSafepointRegisters(); + codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; + DCHECK(codegen_->info()->is_calling()); + } + + ~PushSafepointRegistersScope() { + DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); + codegen_->masm_->PopSafepointRegisters(); + codegen_->expected_safepoint_kind_ = Safepoint::kSimple; + } + + private: + LCodeGen* codegen_; + }; + + friend class LDeferredCode; + friend class LEnvironment; + friend class SafepointGenerator; + DISALLOW_COPY_AND_ASSIGN(LCodeGen); +}; + + +class LDeferredCode : public ZoneObject { + public: + explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack) + : codegen_(codegen), + external_exit_(NULL), + instruction_index_(codegen->current_instruction_), + x87_stack_(x87_stack) { + codegen->AddDeferredCode(this); + } + + virtual ~LDeferredCode() {} + virtual void Generate() = 0; + virtual LInstruction* instr() = 0; + + void SetExit(Label* exit) { external_exit_ = exit; } + Label* entry() { return &entry_; } + Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } + Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); } + int instruction_index() const { return instruction_index_; } + const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; } + + protected: + LCodeGen* codegen() const { return codegen_; } + MacroAssembler* masm() const { return codegen_->masm(); } + + private: + LCodeGen* codegen_; + Label entry_; + Label exit_; + Label* external_exit_; + Label done_; + int instruction_index_; + LCodeGen::X87Stack x87_stack_; +}; + +} } // namespace v8::internal + +#endif // V8_X87_LITHIUM_CODEGEN_X87_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/lithium-gap-resolver-x87.cc nodejs-0.11.15/deps/v8/src/x87/lithium-gap-resolver-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/lithium-gap-resolver-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/lithium-gap-resolver-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,445 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/x87/lithium-codegen-x87.h" +#include "src/x87/lithium-gap-resolver-x87.h" + +namespace v8 { +namespace internal { + +LGapResolver::LGapResolver(LCodeGen* owner) + : cgen_(owner), + moves_(32, owner->zone()), + source_uses_(), + destination_uses_(), + spilled_register_(-1) {} + + +void LGapResolver::Resolve(LParallelMove* parallel_move) { + DCHECK(HasBeenReset()); + // Build up a worklist of moves. + BuildInitialMoveList(parallel_move); + + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands move = moves_[i]; + // Skip constants to perform them last. They don't block other moves + // and skipping such moves with register destinations keeps those + // registers free for the whole algorithm. + if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { + PerformMove(i); + } + } + + // Perform the moves with constant sources. + for (int i = 0; i < moves_.length(); ++i) { + if (!moves_[i].IsEliminated()) { + DCHECK(moves_[i].source()->IsConstantOperand()); + EmitMove(i); + } + } + + Finish(); + DCHECK(HasBeenReset()); +} + + +void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { + // Perform a linear sweep of the moves to add them to the initial list of + // moves to perform, ignoring any move that is redundant (the source is + // the same as the destination, the destination is ignored and + // unallocated, or the move was already eliminated). + const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); + for (int i = 0; i < moves->length(); ++i) { + LMoveOperands move = moves->at(i); + if (!move.IsRedundant()) AddMove(move); + } + Verify(); +} + + +void LGapResolver::PerformMove(int index) { + // Each call to this function performs a move and deletes it from the move + // graph. We first recursively perform any move blocking this one. We + // mark a move as "pending" on entry to PerformMove in order to detect + // cycles in the move graph. We use operand swaps to resolve cycles, + // which means that a call to PerformMove could change any source operand + // in the move graph. + + DCHECK(!moves_[index].IsPending()); + DCHECK(!moves_[index].IsRedundant()); + + // Clear this move's destination to indicate a pending move. The actual + // destination is saved on the side. + DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. + LOperand* destination = moves_[index].destination(); + moves_[index].set_destination(NULL); + + // Perform a depth-first traversal of the move graph to resolve + // dependencies. Any unperformed, unpending move with a source the same + // as this one's destination blocks this one so recursively perform all + // such moves. + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands other_move = moves_[i]; + if (other_move.Blocks(destination) && !other_move.IsPending()) { + // Though PerformMove can change any source operand in the move graph, + // this call cannot create a blocking move via a swap (this loop does + // not miss any). Assume there is a non-blocking move with source A + // and this move is blocked on source B and there is a swap of A and + // B. Then A and B must be involved in the same cycle (or they would + // not be swapped). Since this move's destination is B and there is + // only a single incoming edge to an operand, this move must also be + // involved in the same cycle. In that case, the blocking move will + // be created but will be "pending" when we return from PerformMove. + PerformMove(i); + } + } + + // We are about to resolve this move and don't need it marked as + // pending, so restore its destination. + moves_[index].set_destination(destination); + + // This move's source may have changed due to swaps to resolve cycles and + // so it may now be the last move in the cycle. If so remove it. + if (moves_[index].source()->Equals(destination)) { + RemoveMove(index); + return; + } + + // The move may be blocked on a (at most one) pending move, in which case + // we have a cycle. Search for such a blocking move and perform a swap to + // resolve it. + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands other_move = moves_[i]; + if (other_move.Blocks(destination)) { + DCHECK(other_move.IsPending()); + EmitSwap(index); + return; + } + } + + // This move is not blocked. + EmitMove(index); +} + + +void LGapResolver::AddMove(LMoveOperands move) { + LOperand* source = move.source(); + if (source->IsRegister()) ++source_uses_[source->index()]; + + LOperand* destination = move.destination(); + if (destination->IsRegister()) ++destination_uses_[destination->index()]; + + moves_.Add(move, cgen_->zone()); +} + + +void LGapResolver::RemoveMove(int index) { + LOperand* source = moves_[index].source(); + if (source->IsRegister()) { + --source_uses_[source->index()]; + DCHECK(source_uses_[source->index()] >= 0); + } + + LOperand* destination = moves_[index].destination(); + if (destination->IsRegister()) { + --destination_uses_[destination->index()]; + DCHECK(destination_uses_[destination->index()] >= 0); + } + + moves_[index].Eliminate(); +} + + +int LGapResolver::CountSourceUses(LOperand* operand) { + int count = 0; + for (int i = 0; i < moves_.length(); ++i) { + if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { + ++count; + } + } + return count; +} + + +Register LGapResolver::GetFreeRegisterNot(Register reg) { + int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg); + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { + if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) { + return Register::FromAllocationIndex(i); + } + } + return no_reg; +} + + +bool LGapResolver::HasBeenReset() { + if (!moves_.is_empty()) return false; + if (spilled_register_ >= 0) return false; + + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { + if (source_uses_[i] != 0) return false; + if (destination_uses_[i] != 0) return false; + } + return true; +} + + +void LGapResolver::Verify() { +#ifdef ENABLE_SLOW_DCHECKS + // No operand should be the destination for more than one move. + for (int i = 0; i < moves_.length(); ++i) { + LOperand* destination = moves_[i].destination(); + for (int j = i + 1; j < moves_.length(); ++j) { + SLOW_DCHECK(!destination->Equals(moves_[j].destination())); + } + } +#endif +} + + +#define __ ACCESS_MASM(cgen_->masm()) + +void LGapResolver::Finish() { + if (spilled_register_ >= 0) { + __ pop(Register::FromAllocationIndex(spilled_register_)); + spilled_register_ = -1; + } + moves_.Rewind(0); +} + + +void LGapResolver::EnsureRestored(LOperand* operand) { + if (operand->IsRegister() && operand->index() == spilled_register_) { + __ pop(Register::FromAllocationIndex(spilled_register_)); + spilled_register_ = -1; + } +} + + +Register LGapResolver::EnsureTempRegister() { + // 1. We may have already spilled to create a temp register. + if (spilled_register_ >= 0) { + return Register::FromAllocationIndex(spilled_register_); + } + + // 2. We may have a free register that we can use without spilling. + Register free = GetFreeRegisterNot(no_reg); + if (!free.is(no_reg)) return free; + + // 3. Prefer to spill a register that is not used in any remaining move + // because it will not need to be restored until the end. + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { + if (source_uses_[i] == 0 && destination_uses_[i] == 0) { + Register scratch = Register::FromAllocationIndex(i); + __ push(scratch); + spilled_register_ = i; + return scratch; + } + } + + // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. + Register scratch = Register::FromAllocationIndex(0); + __ push(scratch); + spilled_register_ = 0; + return scratch; +} + + +void LGapResolver::EmitMove(int index) { + LOperand* source = moves_[index].source(); + LOperand* destination = moves_[index].destination(); + EnsureRestored(source); + EnsureRestored(destination); + + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + Register src = cgen_->ToRegister(source); + Operand dst = cgen_->ToOperand(destination); + __ mov(dst, src); + + } else if (source->IsStackSlot()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + Operand src = cgen_->ToOperand(source); + if (destination->IsRegister()) { + Register dst = cgen_->ToRegister(destination); + __ mov(dst, src); + } else { + // Spill on demand to use a temporary register for memory-to-memory + // moves. + Register tmp = EnsureTempRegister(); + Operand dst = cgen_->ToOperand(destination); + __ mov(tmp, src); + __ mov(dst, tmp); + } + + } else if (source->IsConstantOperand()) { + LConstantOperand* constant_source = LConstantOperand::cast(source); + if (destination->IsRegister()) { + Register dst = cgen_->ToRegister(destination); + Representation r = cgen_->IsSmi(constant_source) + ? Representation::Smi() : Representation::Integer32(); + if (cgen_->IsInteger32(constant_source)) { + __ Move(dst, cgen_->ToImmediate(constant_source, r)); + } else { + __ LoadObject(dst, cgen_->ToHandle(constant_source)); + } + } else if (destination->IsDoubleRegister()) { + double v = cgen_->ToDouble(constant_source); + uint64_t int_val = BitCast<uint64_t, double>(v); + int32_t lower = static_cast<int32_t>(int_val); + int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt); + __ push(Immediate(upper)); + __ push(Immediate(lower)); + X87Register dst = cgen_->ToX87Register(destination); + cgen_->X87Mov(dst, MemOperand(esp, 0)); + __ add(esp, Immediate(kDoubleSize)); + } else { + DCHECK(destination->IsStackSlot()); + Operand dst = cgen_->ToOperand(destination); + Representation r = cgen_->IsSmi(constant_source) + ? Representation::Smi() : Representation::Integer32(); + if (cgen_->IsInteger32(constant_source)) { + __ Move(dst, cgen_->ToImmediate(constant_source, r)); + } else { + Register tmp = EnsureTempRegister(); + __ LoadObject(tmp, cgen_->ToHandle(constant_source)); + __ mov(dst, tmp); + } + } + + } else if (source->IsDoubleRegister()) { + // load from the register onto the stack, store in destination, which must + // be a double stack slot in the non-SSE2 case. + DCHECK(destination->IsDoubleStackSlot()); + Operand dst = cgen_->ToOperand(destination); + X87Register src = cgen_->ToX87Register(source); + cgen_->X87Mov(dst, src); + } else if (source->IsDoubleStackSlot()) { + // load from the stack slot on top of the floating point stack, and then + // store in destination. If destination is a double register, then it + // represents the top of the stack and nothing needs to be done. + if (destination->IsDoubleStackSlot()) { + Register tmp = EnsureTempRegister(); + Operand src0 = cgen_->ToOperand(source); + Operand src1 = cgen_->HighOperand(source); + Operand dst0 = cgen_->ToOperand(destination); + Operand dst1 = cgen_->HighOperand(destination); + __ mov(tmp, src0); // Then use tmp to copy source to destination. + __ mov(dst0, tmp); + __ mov(tmp, src1); + __ mov(dst1, tmp); + } else { + Operand src = cgen_->ToOperand(source); + X87Register dst = cgen_->ToX87Register(destination); + cgen_->X87Mov(dst, src); + } + } else { + UNREACHABLE(); + } + + RemoveMove(index); +} + + +void LGapResolver::EmitSwap(int index) { + LOperand* source = moves_[index].source(); + LOperand* destination = moves_[index].destination(); + EnsureRestored(source); + EnsureRestored(destination); + + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister() && destination->IsRegister()) { + // Register-register. + Register src = cgen_->ToRegister(source); + Register dst = cgen_->ToRegister(destination); + __ xchg(dst, src); + + } else if ((source->IsRegister() && destination->IsStackSlot()) || + (source->IsStackSlot() && destination->IsRegister())) { + // Register-memory. Use a free register as a temp if possible. Do not + // spill on demand because the simple spill implementation cannot avoid + // spilling src at this point. + Register tmp = GetFreeRegisterNot(no_reg); + Register reg = + cgen_->ToRegister(source->IsRegister() ? source : destination); + Operand mem = + cgen_->ToOperand(source->IsRegister() ? destination : source); + if (tmp.is(no_reg)) { + __ xor_(reg, mem); + __ xor_(mem, reg); + __ xor_(reg, mem); + } else { + __ mov(tmp, mem); + __ mov(mem, reg); + __ mov(reg, tmp); + } + + } else if (source->IsStackSlot() && destination->IsStackSlot()) { + // Memory-memory. Spill on demand to use a temporary. If there is a + // free register after that, use it as a second temporary. + Register tmp0 = EnsureTempRegister(); + Register tmp1 = GetFreeRegisterNot(tmp0); + Operand src = cgen_->ToOperand(source); + Operand dst = cgen_->ToOperand(destination); + if (tmp1.is(no_reg)) { + // Only one temp register available to us. + __ mov(tmp0, dst); + __ xor_(tmp0, src); + __ xor_(src, tmp0); + __ xor_(tmp0, src); + __ mov(dst, tmp0); + } else { + __ mov(tmp0, dst); + __ mov(tmp1, src); + __ mov(dst, tmp1); + __ mov(src, tmp0); + } + } else { + // No other combinations are possible. + UNREACHABLE(); + } + + // The swap of source and destination has executed a move from source to + // destination. + RemoveMove(index); + + // Any unperformed (including pending) move with a source of either + // this move's source or destination needs to have their source + // changed to reflect the state of affairs after the swap. + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands other_move = moves_[i]; + if (other_move.Blocks(source)) { + moves_[i].set_source(destination); + } else if (other_move.Blocks(destination)) { + moves_[i].set_source(source); + } + } + + // In addition to swapping the actual uses as sources, we need to update + // the use counts. + if (source->IsRegister() && destination->IsRegister()) { + int temp = source_uses_[source->index()]; + source_uses_[source->index()] = source_uses_[destination->index()]; + source_uses_[destination->index()] = temp; + } else if (source->IsRegister()) { + // We don't have use counts for non-register operands like destination. + // Compute those counts now. + source_uses_[source->index()] = CountSourceUses(source); + } else if (destination->IsRegister()) { + source_uses_[destination->index()] = CountSourceUses(destination); + } +} + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/lithium-gap-resolver-x87.h nodejs-0.11.15/deps/v8/src/x87/lithium-gap-resolver-x87.h --- nodejs-0.11.13/deps/v8/src/x87/lithium-gap-resolver-x87.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/lithium-gap-resolver-x87.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,87 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_X87_LITHIUM_GAP_RESOLVER_X87_H_ +#define V8_X87_LITHIUM_GAP_RESOLVER_X87_H_ + +#include "src/v8.h" + +#include "src/lithium.h" + +namespace v8 { +namespace internal { + +class LCodeGen; +class LGapResolver; + +class LGapResolver V8_FINAL BASE_EMBEDDED { + public: + explicit LGapResolver(LCodeGen* owner); + + // Resolve a set of parallel moves, emitting assembler instructions. + void Resolve(LParallelMove* parallel_move); + + private: + // Build the initial list of moves. + void BuildInitialMoveList(LParallelMove* parallel_move); + + // Perform the move at the moves_ index in question (possibly requiring + // other moves to satisfy dependencies). + void PerformMove(int index); + + // Emit any code necessary at the end of a gap move. + void Finish(); + + // Add or delete a move from the move graph without emitting any code. + // Used to build up the graph and remove trivial moves. + void AddMove(LMoveOperands move); + void RemoveMove(int index); + + // Report the count of uses of operand as a source in a not-yet-performed + // move. Used to rebuild use counts. + int CountSourceUses(LOperand* operand); + + // Emit a move and remove it from the move graph. + void EmitMove(int index); + + // Execute a move by emitting a swap of two operands. The move from + // source to destination is removed from the move graph. + void EmitSwap(int index); + + // Ensure that the given operand is not spilled. + void EnsureRestored(LOperand* operand); + + // Return a register that can be used as a temp register, spilling + // something if necessary. + Register EnsureTempRegister(); + + // Return a known free register different from the given one (which could + // be no_reg---returning any free register), or no_reg if there is no such + // register. + Register GetFreeRegisterNot(Register reg); + + // Verify that the state is the initial one, ready to resolve a single + // parallel move. + bool HasBeenReset(); + + // Verify the move list before performing moves. + void Verify(); + + LCodeGen* cgen_; + + // List of moves not yet resolved. + ZoneList<LMoveOperands> moves_; + + // Source and destination use counts for the general purpose registers. + int source_uses_[Register::kMaxNumAllocatableRegisters]; + int destination_uses_[Register::kMaxNumAllocatableRegisters]; + + // If we had to spill on demand, the currently spilled register's + // allocation index. + int spilled_register_; +}; + +} } // namespace v8::internal + +#endif // V8_X87_LITHIUM_GAP_RESOLVER_X87_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/lithium-x87.cc nodejs-0.11.15/deps/v8/src/x87/lithium-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/lithium-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/lithium-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2683 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/hydrogen-osr.h" +#include "src/lithium-inl.h" +#include "src/x87/lithium-codegen-x87.h" + +namespace v8 { +namespace internal { + +#define DEFINE_COMPILE(type) \ + void L##type::CompileToNative(LCodeGen* generator) { \ + generator->Do##type(this); \ + } +LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) +#undef DEFINE_COMPILE + + +#ifdef DEBUG +void LInstruction::VerifyCall() { + // Call instructions can use only fixed registers as temporaries and + // outputs because all registers are blocked by the calling convention. + // Inputs operands must use a fixed register or use-at-start policy or + // a non-register policy. + DCHECK(Output() == NULL || + LUnallocated::cast(Output())->HasFixedPolicy() || + !LUnallocated::cast(Output())->HasRegisterPolicy()); + for (UseIterator it(this); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + DCHECK(operand->HasFixedPolicy() || + operand->IsUsedAtStart()); + } + for (TempIterator it(this); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); + } +} +#endif + + +bool LInstruction::HasDoubleRegisterResult() { + return HasResult() && result()->IsDoubleRegister(); +} + + +bool LInstruction::HasDoubleRegisterInput() { + for (int i = 0; i < InputCount(); i++) { + LOperand* op = InputAt(i); + if (op != NULL && op->IsDoubleRegister()) { + return true; + } + } + return false; +} + + +bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) { + for (int i = 0; i < InputCount(); i++) { + LOperand* op = InputAt(i); + if (op != NULL && op->IsDoubleRegister()) { + if (cgen->ToX87Register(op).is(reg)) return true; + } + } + return false; +} + + +void LInstruction::PrintTo(StringStream* stream) { + stream->Add("%s ", this->Mnemonic()); + + PrintOutputOperandTo(stream); + + PrintDataTo(stream); + + if (HasEnvironment()) { + stream->Add(" "); + environment()->PrintTo(stream); + } + + if (HasPointerMap()) { + stream->Add(" "); + pointer_map()->PrintTo(stream); + } +} + + +void LInstruction::PrintDataTo(StringStream* stream) { + stream->Add("= "); + for (int i = 0; i < InputCount(); i++) { + if (i > 0) stream->Add(" "); + if (InputAt(i) == NULL) { + stream->Add("NULL"); + } else { + InputAt(i)->PrintTo(stream); + } + } +} + + +void LInstruction::PrintOutputOperandTo(StringStream* stream) { + if (HasResult()) result()->PrintTo(stream); +} + + +void LLabel::PrintDataTo(StringStream* stream) { + LGap::PrintDataTo(stream); + LLabel* rep = replacement(); + if (rep != NULL) { + stream->Add(" Dead block replaced with B%d", rep->block_id()); + } +} + + +bool LGap::IsRedundant() const { + for (int i = 0; i < 4; i++) { + if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) { + return false; + } + } + + return true; +} + + +void LGap::PrintDataTo(StringStream* stream) { + for (int i = 0; i < 4; i++) { + stream->Add("("); + if (parallel_moves_[i] != NULL) { + parallel_moves_[i]->PrintDataTo(stream); + } + stream->Add(") "); + } +} + + +const char* LArithmeticD::Mnemonic() const { + switch (op()) { + case Token::ADD: return "add-d"; + case Token::SUB: return "sub-d"; + case Token::MUL: return "mul-d"; + case Token::DIV: return "div-d"; + case Token::MOD: return "mod-d"; + default: + UNREACHABLE(); + return NULL; + } +} + + +const char* LArithmeticT::Mnemonic() const { + switch (op()) { + case Token::ADD: return "add-t"; + case Token::SUB: return "sub-t"; + case Token::MUL: return "mul-t"; + case Token::MOD: return "mod-t"; + case Token::DIV: return "div-t"; + case Token::BIT_AND: return "bit-and-t"; + case Token::BIT_OR: return "bit-or-t"; + case Token::BIT_XOR: return "bit-xor-t"; + case Token::ROR: return "ror-t"; + case Token::SHL: return "sal-t"; + case Token::SAR: return "sar-t"; + case Token::SHR: return "shr-t"; + default: + UNREACHABLE(); + return NULL; + } +} + + +bool LGoto::HasInterestingComment(LCodeGen* gen) const { + return !gen->IsNextEmittedBlock(block_id()); +} + + +void LGoto::PrintDataTo(StringStream* stream) { + stream->Add("B%d", block_id()); +} + + +void LBranch::PrintDataTo(StringStream* stream) { + stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); + value()->PrintTo(stream); +} + + +void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if "); + left()->PrintTo(stream); + stream->Add(" %s ", Token::String(op())); + right()->PrintTo(stream); + stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LIsObjectAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if is_object("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LIsStringAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if is_string("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if is_smi("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if is_undetectable("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if string_compare("); + left()->PrintTo(stream); + right()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if has_instance_type("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if has_cached_array_index("); + value()->PrintTo(stream); + stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); +} + + +void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if class_of_test("); + value()->PrintTo(stream); + stream->Add(", \"%o\") then B%d else B%d", + *hydrogen()->class_name(), + true_block_id(), + false_block_id()); +} + + +void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { + stream->Add("if typeof "); + value()->PrintTo(stream); + stream->Add(" == \"%s\" then B%d else B%d", + hydrogen()->type_literal()->ToCString().get(), + true_block_id(), false_block_id()); +} + + +void LStoreCodeEntry::PrintDataTo(StringStream* stream) { + stream->Add(" = "); + function()->PrintTo(stream); + stream->Add(".code_entry = "); + code_object()->PrintTo(stream); +} + + +void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { + stream->Add(" = "); + base_object()->PrintTo(stream); + stream->Add(" + "); + offset()->PrintTo(stream); +} + + +void LCallJSFunction::PrintDataTo(StringStream* stream) { + stream->Add("= "); + function()->PrintTo(stream); + stream->Add("#%d / ", arity()); +} + + +void LCallWithDescriptor::PrintDataTo(StringStream* stream) { + for (int i = 0; i < InputCount(); i++) { + InputAt(i)->PrintTo(stream); + stream->Add(" "); + } + stream->Add("#%d / ", arity()); +} + + +void LLoadContextSlot::PrintDataTo(StringStream* stream) { + context()->PrintTo(stream); + stream->Add("[%d]", slot_index()); +} + + +void LStoreContextSlot::PrintDataTo(StringStream* stream) { + context()->PrintTo(stream); + stream->Add("[%d] <- ", slot_index()); + value()->PrintTo(stream); +} + + +void LInvokeFunction::PrintDataTo(StringStream* stream) { + stream->Add("= "); + context()->PrintTo(stream); + stream->Add(" "); + function()->PrintTo(stream); + stream->Add(" #%d / ", arity()); +} + + +void LCallNew::PrintDataTo(StringStream* stream) { + stream->Add("= "); + context()->PrintTo(stream); + stream->Add(" "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); +} + + +void LCallNewArray::PrintDataTo(StringStream* stream) { + stream->Add("= "); + context()->PrintTo(stream); + stream->Add(" "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); + ElementsKind kind = hydrogen()->elements_kind(); + stream->Add(" (%s) ", ElementsKindToString(kind)); +} + + +void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { + arguments()->PrintTo(stream); + + stream->Add(" length "); + length()->PrintTo(stream); + + stream->Add(" index "); + index()->PrintTo(stream); +} + + +int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { + // Skip a slot if for a double-width slot. + if (kind == DOUBLE_REGISTERS) { + spill_slot_count_++; + spill_slot_count_ |= 1; + num_double_slots_++; + } + return spill_slot_count_++; +} + + +LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { + int index = GetNextSpillIndex(kind); + if (kind == DOUBLE_REGISTERS) { + return LDoubleStackSlot::Create(index, zone()); + } else { + DCHECK(kind == GENERAL_REGISTERS); + return LStackSlot::Create(index, zone()); + } +} + + +void LStoreNamedField::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + OStringStream os; + os << hydrogen()->access() << " <- "; + stream->Add(os.c_str()); + value()->PrintTo(stream); +} + + +void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(String::cast(*name())->ToCString().get()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LLoadKeyed::PrintDataTo(StringStream* stream) { + elements()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d]", base_offset()); + } else { + stream->Add("]"); + } +} + + +void LStoreKeyed::PrintDataTo(StringStream* stream) { + elements()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d] <-", base_offset()); + } else { + stream->Add("] <- "); + } + + if (value() == NULL) { + DCHECK(hydrogen()->IsConstantHoleStore() && + hydrogen()->value()->representation().IsDouble()); + stream->Add("<the hole(nan)>"); + } else { + value()->PrintTo(stream); + } +} + + +void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + +void LTransitionElementsKind::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add(" %p -> %p", *original_map(), *transitioned_map()); +} + + +LPlatformChunk* LChunkBuilder::Build() { + DCHECK(is_unused()); + chunk_ = new(zone()) LPlatformChunk(info(), graph()); + LPhase phase("L_Building chunk", chunk_); + status_ = BUILDING; + + // Reserve the first spill slot for the state of dynamic alignment. + if (info()->IsOptimizing()) { + int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS); + DCHECK_EQ(alignment_state_index, 0); + USE(alignment_state_index); + } + + // If compiling for OSR, reserve space for the unoptimized frame, + // which will be subsumed into this frame. + if (graph()->has_osr()) { + for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { + chunk_->GetNextSpillIndex(GENERAL_REGISTERS); + } + } + + const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); + for (int i = 0; i < blocks->length(); i++) { + HBasicBlock* next = NULL; + if (i < blocks->length() - 1) next = blocks->at(i + 1); + DoBasicBlock(blocks->at(i), next); + if (is_aborted()) return NULL; + } + status_ = DONE; + return chunk_; +} + + +void LChunkBuilder::Abort(BailoutReason reason) { + info()->set_bailout_reason(reason); + status_ = ABORTED; +} + + +LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { + return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER, + Register::ToAllocationIndex(reg)); +} + + +LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { + return Use(value, ToUnallocated(fixed_register)); +} + + +LOperand* LChunkBuilder::UseRegister(HValue* value) { + return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); +} + + +LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) { + return Use(value, + new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER, + LUnallocated::USED_AT_START)); +} + + +LOperand* LChunkBuilder::UseTempRegister(HValue* value) { + return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER)); +} + + +LOperand* LChunkBuilder::Use(HValue* value) { + return Use(value, new(zone()) LUnallocated(LUnallocated::NONE)); +} + + +LOperand* LChunkBuilder::UseAtStart(HValue* value) { + return Use(value, new(zone()) LUnallocated(LUnallocated::NONE, + LUnallocated::USED_AT_START)); +} + + +static inline bool CanBeImmediateConstant(HValue* value) { + return value->IsConstant() && HConstant::cast(value)->NotInNewSpace(); +} + + +LOperand* LChunkBuilder::UseOrConstant(HValue* value) { + return CanBeImmediateConstant(value) + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : Use(value); +} + + +LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { + return CanBeImmediateConstant(value) + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : UseAtStart(value); +} + + +LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value, + Register fixed_register) { + return CanBeImmediateConstant(value) + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : UseFixed(value, fixed_register); +} + + +LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { + return CanBeImmediateConstant(value) + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : UseRegister(value); +} + + +LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { + return CanBeImmediateConstant(value) + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : UseRegisterAtStart(value); +} + + +LOperand* LChunkBuilder::UseConstant(HValue* value) { + return chunk_->DefineConstantOperand(HConstant::cast(value)); +} + + +LOperand* LChunkBuilder::UseAny(HValue* value) { + return value->IsConstant() + ? chunk_->DefineConstantOperand(HConstant::cast(value)) + : Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); +} + + +LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) { + if (value->EmitAtUses()) { + HInstruction* instr = HInstruction::cast(value); + VisitInstruction(instr); + } + operand->set_virtual_register(value->id()); + return operand; +} + + +LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr, + LUnallocated* result) { + result->set_virtual_register(current_instruction_->id()); + instr->set_result(result); + return instr; +} + + +LInstruction* LChunkBuilder::DefineAsRegister( + LTemplateResultInstruction<1>* instr) { + return Define(instr, + new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER)); +} + + +LInstruction* LChunkBuilder::DefineAsSpilled( + LTemplateResultInstruction<1>* instr, + int index) { + return Define(instr, + new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index)); +} + + +LInstruction* LChunkBuilder::DefineSameAsFirst( + LTemplateResultInstruction<1>* instr) { + return Define(instr, + new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT)); +} + + +LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr, + Register reg) { + return Define(instr, ToUnallocated(reg)); +} + + +LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { + HEnvironment* hydrogen_env = current_block_->last_environment(); + int argument_index_accumulator = 0; + ZoneList<HValue*> objects_to_materialize(0, zone()); + instr->set_environment(CreateEnvironment(hydrogen_env, + &argument_index_accumulator, + &objects_to_materialize)); + return instr; +} + + +LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, + HInstruction* hinstr, + CanDeoptimize can_deoptimize) { + info()->MarkAsNonDeferredCalling(); + +#ifdef DEBUG + instr->VerifyCall(); +#endif + instr->MarkAsCall(); + instr = AssignPointerMap(instr); + + // If instruction does not have side-effects lazy deoptimization + // after the call will try to deoptimize to the point before the call. + // Thus we still need to attach environment to this call even if + // call sequence can not deoptimize eagerly. + bool needs_environment = + (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || + !hinstr->HasObservableSideEffects(); + if (needs_environment && !instr->HasEnvironment()) { + instr = AssignEnvironment(instr); + // We can't really figure out if the environment is needed or not. + instr->environment()->set_has_been_used(); + } + + return instr; +} + + +LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { + DCHECK(!instr->HasPointerMap()); + instr->set_pointer_map(new(zone()) LPointerMap(zone())); + return instr; +} + + +LUnallocated* LChunkBuilder::TempRegister() { + LUnallocated* operand = + new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); + int vreg = allocator_->GetVirtualRegister(); + if (!allocator_->AllocationOk()) { + Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister); + vreg = 0; + } + operand->set_virtual_register(vreg); + return operand; +} + + +LOperand* LChunkBuilder::FixedTemp(Register reg) { + LUnallocated* operand = ToUnallocated(reg); + DCHECK(operand->HasFixedPolicy()); + return operand; +} + + +LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { + return new(zone()) LLabel(instr->block()); +} + + +LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { + return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); +} + + +LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { + UNREACHABLE(); + return NULL; +} + + +LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { + return AssignEnvironment(new(zone()) LDeoptimize); +} + + +LInstruction* LChunkBuilder::DoShift(Token::Value op, + HBitwiseBinaryOperation* instr) { + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* left = UseRegisterAtStart(instr->left()); + + HValue* right_value = instr->right(); + LOperand* right = NULL; + int constant_value = 0; + bool does_deopt = false; + if (right_value->IsConstant()) { + HConstant* constant = HConstant::cast(right_value); + right = chunk_->DefineConstantOperand(constant); + constant_value = constant->Integer32Value() & 0x1f; + // Left shifts can deoptimize if we shift by > 0 and the result cannot be + // truncated to smi. + if (instr->representation().IsSmi() && constant_value > 0) { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); + } + } else { + right = UseFixed(right_value, ecx); + } + + // Shift operations can only deoptimize if we do a logical shift by 0 and + // the result cannot be truncated to int32. + if (op == Token::SHR && constant_value == 0) { + if (FLAG_opt_safe_uint32_operations) { + does_deopt = !instr->CheckFlag(HInstruction::kUint32); + } else { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); + } + } + + LInstruction* result = + DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt)); + return does_deopt ? AssignEnvironment(result) : result; + } else { + return DoArithmeticT(op, instr); + } +} + + +LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, + HArithmeticBinaryOperation* instr) { + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); + if (op == Token::MOD) { + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + return MarkAsCall(DefineSameAsFirst(result), instr); + } else { + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + return DefineSameAsFirst(result); + } +} + + +LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, + HBinaryOperation* instr) { + HValue* left = instr->left(); + HValue* right = instr->right(); + DCHECK(left->representation().IsTagged()); + DCHECK(right->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), esi); + LOperand* left_operand = UseFixed(left, edx); + LOperand* right_operand = UseFixed(right, eax); + LArithmeticT* result = + new(zone()) LArithmeticT(op, context, left_operand, right_operand); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { + DCHECK(is_building()); + current_block_ = block; + next_block_ = next_block; + if (block->IsStartBlock()) { + block->UpdateEnvironment(graph_->start_environment()); + argument_count_ = 0; + } else if (block->predecessors()->length() == 1) { + // We have a single predecessor => copy environment and outgoing + // argument count from the predecessor. + DCHECK(block->phis()->length() == 0); + HBasicBlock* pred = block->predecessors()->at(0); + HEnvironment* last_environment = pred->last_environment(); + DCHECK(last_environment != NULL); + // Only copy the environment, if it is later used again. + if (pred->end()->SecondSuccessor() == NULL) { + DCHECK(pred->end()->FirstSuccessor() == block); + } else { + if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || + pred->end()->SecondSuccessor()->block_id() > block->block_id()) { + last_environment = last_environment->Copy(); + } + } + block->UpdateEnvironment(last_environment); + DCHECK(pred->argument_count() >= 0); + argument_count_ = pred->argument_count(); + } else { + // We are at a state join => process phis. + HBasicBlock* pred = block->predecessors()->at(0); + // No need to copy the environment, it cannot be used later. + HEnvironment* last_environment = pred->last_environment(); + for (int i = 0; i < block->phis()->length(); ++i) { + HPhi* phi = block->phis()->at(i); + if (phi->HasMergedIndex()) { + last_environment->SetValueAt(phi->merged_index(), phi); + } + } + for (int i = 0; i < block->deleted_phis()->length(); ++i) { + if (block->deleted_phis()->at(i) < last_environment->length()) { + last_environment->SetValueAt(block->deleted_phis()->at(i), + graph_->GetConstantUndefined()); + } + } + block->UpdateEnvironment(last_environment); + // Pick up the outgoing argument count of one of the predecessors. + argument_count_ = pred->argument_count(); + } + HInstruction* current = block->first(); + int start = chunk_->instructions()->length(); + while (current != NULL && !is_aborted()) { + // Code for constants in registers is generated lazily. + if (!current->EmitAtUses()) { + VisitInstruction(current); + } + current = current->next(); + } + int end = chunk_->instructions()->length() - 1; + if (end >= start) { + block->set_first_instruction_index(start); + block->set_last_instruction_index(end); + } + block->set_argument_count(argument_count_); + next_block_ = NULL; + current_block_ = NULL; +} + + +void LChunkBuilder::VisitInstruction(HInstruction* current) { + HInstruction* old_current = current_instruction_; + current_instruction_ = current; + + LInstruction* instr = NULL; + if (current->CanReplaceWithDummyUses()) { + if (current->OperandCount() == 0) { + instr = DefineAsRegister(new(zone()) LDummy()); + } else { + DCHECK(!current->OperandAt(0)->IsControlInstruction()); + instr = DefineAsRegister(new(zone()) + LDummyUse(UseAny(current->OperandAt(0)))); + } + for (int i = 1; i < current->OperandCount(); ++i) { + if (current->OperandAt(i)->IsControlInstruction()) continue; + LInstruction* dummy = + new(zone()) LDummyUse(UseAny(current->OperandAt(i))); + dummy->set_hydrogen_value(current); + chunk_->AddInstruction(dummy, current_block_); + } + } else { + HBasicBlock* successor; + if (current->IsControlInstruction() && + HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && + successor != NULL) { + instr = new(zone()) LGoto(successor); + } else { + instr = current->CompileToLithium(this); + } + } + + argument_count_ += current->argument_delta(); + DCHECK(argument_count_ >= 0); + + if (instr != NULL) { + AddInstruction(instr, current); + } + + current_instruction_ = old_current; +} + + +void LChunkBuilder::AddInstruction(LInstruction* instr, + HInstruction* hydrogen_val) { + // Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(hydrogen_val); + +#if DEBUG + // Make sure that the lithium instruction has either no fixed register + // constraints in temps or the result OR no uses that are only used at + // start. If this invariant doesn't hold, the register allocator can decide + // to insert a split of a range immediately before the instruction due to an + // already allocated register needing to be used for the instruction's fixed + // register constraint. In this case, The register allocator won't see an + // interference between the split child and the use-at-start (it would if + // the it was just a plain use), so it is free to move the split child into + // the same register that is used for the use-at-start. + // See https://code.google.com/p/chromium/issues/detail?id=201590 + if (!(instr->ClobbersRegisters() && + instr->ClobbersDoubleRegisters(isolate()))) { + int fixed = 0; + int used_at_start = 0; + for (UseIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->IsUsedAtStart()) ++used_at_start; + } + if (instr->Output() != NULL) { + if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed; + } + for (TempIterator it(instr); !it.Done(); it.Advance()) { + LUnallocated* operand = LUnallocated::cast(it.Current()); + if (operand->HasFixedPolicy()) ++fixed; + } + DCHECK(fixed == 0 || used_at_start == 0); + } +#endif + + if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { + instr = AssignPointerMap(instr); + } + if (FLAG_stress_environments && !instr->HasEnvironment()) { + instr = AssignEnvironment(instr); + } + if (instr->IsGoto() && LGoto::cast(instr)->jumps_to_join()) { + // TODO(olivf) Since phis of spilled values are joined as registers + // (not in the stack slot), we need to allow the goto gaps to keep one + // x87 register alive. To ensure all other values are still spilled, we + // insert a fpu register barrier right before. + LClobberDoubles* clobber = new(zone()) LClobberDoubles(isolate()); + clobber->set_hydrogen_value(hydrogen_val); + chunk_->AddInstruction(clobber, current_block_); + } + chunk_->AddInstruction(instr, current_block_); + + if (instr->IsCall()) { + HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; + LInstruction* instruction_needing_environment = NULL; + if (hydrogen_val->HasObservableSideEffects()) { + HSimulate* sim = HSimulate::cast(hydrogen_val->next()); + instruction_needing_environment = instr; + sim->ReplayEnvironment(current_block_->last_environment()); + hydrogen_value_for_lazy_bailout = sim; + } + LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); + bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); + chunk_->AddInstruction(bailout, current_block_); + if (instruction_needing_environment != NULL) { + // Store the lazy deopt environment with the instruction if needed. + // Right now it is only used for LInstanceOfKnownGlobal. + instruction_needing_environment-> + SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); + } + } +} + + +LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { + return new(zone()) LGoto(instr->FirstSuccessor()); +} + + +LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { + HValue* value = instr->value(); + Representation r = value->representation(); + HType type = value->type(); + ToBooleanStub::Types expected = instr->expected_input_types(); + if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); + + bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || + type.IsJSArray() || type.IsHeapNumber() || type.IsString(); + LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL; + LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp); + if (!easy_case && + ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) || + !expected.IsGeneric())) { + branch = AssignEnvironment(branch); + } + return branch; +} + + +LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { + return new(zone()) LDebugBreak(); +} + + +LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { + DCHECK(instr->value()->representation().IsTagged()); + LOperand* value = UseRegisterAtStart(instr->value()); + return new(zone()) LCmpMapAndBranch(value); +} + + +LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { + info()->MarkAsRequiresFrame(); + return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value()))); +} + + +LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { + info()->MarkAsRequiresFrame(); + return DefineAsRegister(new(zone()) LArgumentsElements); +} + + +LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { + LOperand* left = UseFixed(instr->left(), InstanceofStub::left()); + LOperand* right = UseFixed(instr->right(), InstanceofStub::right()); + LOperand* context = UseFixed(instr->context(), esi); + LInstanceOf* result = new(zone()) LInstanceOf(context, left, right); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( + HInstanceOfKnownGlobal* instr) { + LInstanceOfKnownGlobal* result = + new(zone()) LInstanceOfKnownGlobal( + UseFixed(instr->context(), esi), + UseFixed(instr->left(), InstanceofStub::left()), + FixedTemp(edi)); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { + LOperand* receiver = UseRegister(instr->receiver()); + LOperand* function = UseRegister(instr->function()); + LOperand* temp = TempRegister(); + LWrapReceiver* result = + new(zone()) LWrapReceiver(receiver, function, temp); + return AssignEnvironment(DefineSameAsFirst(result)); +} + + +LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { + LOperand* function = UseFixed(instr->function(), edi); + LOperand* receiver = UseFixed(instr->receiver(), eax); + LOperand* length = UseFixed(instr->length(), ebx); + LOperand* elements = UseFixed(instr->elements(), ecx); + LApplyArguments* result = new(zone()) LApplyArguments(function, + receiver, + length, + elements); + return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) { + int argc = instr->OperandCount(); + for (int i = 0; i < argc; ++i) { + LOperand* argument = UseAny(instr->argument(i)); + AddInstruction(new(zone()) LPushArgument(argument), instr); + } + return NULL; +} + + +LInstruction* LChunkBuilder::DoStoreCodeEntry( + HStoreCodeEntry* store_code_entry) { + LOperand* function = UseRegister(store_code_entry->function()); + LOperand* code_object = UseTempRegister(store_code_entry->code_object()); + return new(zone()) LStoreCodeEntry(function, code_object); +} + + +LInstruction* LChunkBuilder::DoInnerAllocatedObject( + HInnerAllocatedObject* instr) { + LOperand* base_object = UseRegisterAtStart(instr->base_object()); + LOperand* offset = UseRegisterOrConstantAtStart(instr->offset()); + return DefineAsRegister( + new(zone()) LInnerAllocatedObject(base_object, offset)); +} + + +LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { + return instr->HasNoUses() + ? NULL + : DefineAsRegister(new(zone()) LThisFunction); +} + + +LInstruction* LChunkBuilder::DoContext(HContext* instr) { + if (instr->HasNoUses()) return NULL; + + if (info()->IsStub()) { + return DefineFixed(new(zone()) LContext, esi); + } + + return DefineAsRegister(new(zone()) LContext); +} + + +LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { + LOperand* context = UseFixed(instr->context(), esi); + return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); +} + + +LInstruction* LChunkBuilder::DoCallJSFunction( + HCallJSFunction* instr) { + LOperand* function = UseFixed(instr->function(), edi); + + LCallJSFunction* result = new(zone()) LCallJSFunction(function); + + return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoCallWithDescriptor( + HCallWithDescriptor* instr) { + const InterfaceDescriptor* descriptor = instr->descriptor(); + LOperand* target = UseRegisterOrConstantAtStart(instr->target()); + ZoneList<LOperand*> ops(instr->OperandCount(), zone()); + ops.Add(target, zone()); + for (int i = 1; i < instr->OperandCount(); i++) { + LOperand* op = UseFixed(instr->OperandAt(i), + descriptor->GetParameterRegister(i - 1)); + ops.Add(op, zone()); + } + + LCallWithDescriptor* result = new(zone()) LCallWithDescriptor( + descriptor, ops, zone()); + return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* function = UseFixed(instr->function(), edi); + LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); + return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { + switch (instr->op()) { + case kMathFloor: return DoMathFloor(instr); + case kMathRound: return DoMathRound(instr); + case kMathFround: return DoMathFround(instr); + case kMathAbs: return DoMathAbs(instr); + case kMathLog: return DoMathLog(instr); + case kMathExp: return DoMathExp(instr); + case kMathSqrt: return DoMathSqrt(instr); + case kMathPowHalf: return DoMathPowHalf(instr); + case kMathClz32: return DoMathClz32(instr); + default: + UNREACHABLE(); + return NULL; + } +} + + +LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { + LOperand* input = UseRegisterAtStart(instr->value()); + LMathFloor* result = new(zone()) LMathFloor(input); + return AssignEnvironment(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { + // Crankshaft is turned off for nosse2. + UNREACHABLE(); + return NULL; +} + + +LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) { + LOperand* input = UseRegisterAtStart(instr->value()); + LMathFround* result = new (zone()) LMathFround(input); + return AssignEnvironment(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { + LOperand* context = UseAny(instr->context()); // Deferred use. + LOperand* input = UseRegisterAtStart(instr->value()); + LInstruction* result = + DefineSameAsFirst(new(zone()) LMathAbs(context, input)); + Representation r = instr->value()->representation(); + if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); + if (!r.IsDouble()) result = AssignEnvironment(result); + return result; +} + + +LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); + LOperand* input = UseRegisterAtStart(instr->value()); + return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr); +} + + +LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) { + LOperand* input = UseRegisterAtStart(instr->value()); + LMathClz32* result = new(zone()) LMathClz32(input); + return DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->value()->representation().IsDouble()); + LOperand* value = UseTempRegister(instr->value()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LMathExp* result = new(zone()) LMathExp(value, temp1, temp2); + return DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { + LOperand* input = UseRegisterAtStart(instr->value()); + LMathSqrt* result = new(zone()) LMathSqrt(input); + return DefineSameAsFirst(result); +} + + +LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { + LOperand* input = UseRegisterAtStart(instr->value()); + LOperand* temp = TempRegister(); + LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp); + return DefineSameAsFirst(result); +} + + +LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* constructor = UseFixed(instr->constructor(), edi); + LCallNew* result = new(zone()) LCallNew(context, constructor); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* constructor = UseFixed(instr->constructor(), edi); + LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* function = UseFixed(instr->function(), edi); + LCallFunction* call = new(zone()) LCallFunction(context, function); + return MarkAsCall(DefineFixed(call, eax), instr); +} + + +LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { + LOperand* context = UseFixed(instr->context(), esi); + return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr); +} + + +LInstruction* LChunkBuilder::DoRor(HRor* instr) { + return DoShift(Token::ROR, instr); +} + + +LInstruction* LChunkBuilder::DoShr(HShr* instr) { + return DoShift(Token::SHR, instr); +} + + +LInstruction* LChunkBuilder::DoSar(HSar* instr) { + return DoShift(Token::SAR, instr); +} + + +LInstruction* LChunkBuilder::DoShl(HShl* instr) { + return DoShift(Token::SHL, instr); +} + + +LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32)); + + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); + return DefineSameAsFirst(new(zone()) LBitI(left, right)); + } else { + return DoArithmeticT(instr->op(), instr); + } +} + + +LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I( + dividend, divisor)); + if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || + (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) || + (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && + divisor != 1 && divisor != -1)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LOperand* temp1 = FixedTemp(eax); + LOperand* temp2 = FixedTemp(edx); + LInstruction* result = DefineFixed(new(zone()) LDivByConstI( + dividend, divisor, temp1, temp2), edx); + if (divisor == 0 || + (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || + !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseFixed(instr->left(), eax); + LOperand* divisor = UseRegister(instr->right()); + LOperand* temp = FixedTemp(edx); + LInstruction* result = DefineFixed(new(zone()) LDivI( + dividend, divisor, temp), eax); + if (instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kBailoutOnMinusZero) || + instr->CheckFlag(HValue::kCanOverflow) || + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { + if (instr->representation().IsSmiOrInteger32()) { + if (instr->RightIsPowerOf2()) { + return DoDivByPowerOf2I(instr); + } else if (instr->right()->IsConstant()) { + return DoDivByConstI(instr); + } else { + return DoDivI(instr); + } + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::DIV, instr); + } else { + return DoArithmeticT(Token::DIV, instr); + } +} + + +LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) { + LOperand* dividend = UseRegisterAtStart(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I( + dividend, divisor)); + if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) || + (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { + DCHECK(instr->representation().IsInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LOperand* temp1 = FixedTemp(eax); + LOperand* temp2 = FixedTemp(edx); + LOperand* temp3 = + ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) || + (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ? + NULL : TempRegister(); + LInstruction* result = + DefineFixed(new(zone()) LFlooringDivByConstI(dividend, + divisor, + temp1, + temp2, + temp3), + edx); + if (divisor == 0 || + (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseFixed(instr->left(), eax); + LOperand* divisor = UseRegister(instr->right()); + LOperand* temp = FixedTemp(edx); + LInstruction* result = DefineFixed(new(zone()) LFlooringDivI( + dividend, divisor, temp), eax); + if (instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kBailoutOnMinusZero) || + instr->CheckFlag(HValue::kCanOverflow)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { + if (instr->RightIsPowerOf2()) { + return DoFlooringDivByPowerOf2I(instr); + } else if (instr->right()->IsConstant()) { + return DoFlooringDivByConstI(instr); + } else { + return DoFlooringDivI(instr); + } +} + + +LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegisterAtStart(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I( + dividend, divisor)); + if (instr->CheckFlag(HValue::kLeftCanBeNegative) && + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + int32_t divisor = instr->right()->GetInteger32Constant(); + LOperand* temp1 = FixedTemp(eax); + LOperand* temp2 = FixedTemp(edx); + LInstruction* result = DefineFixed(new(zone()) LModByConstI( + dividend, divisor, temp1, temp2), eax); + if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoModI(HMod* instr) { + DCHECK(instr->representation().IsSmiOrInteger32()); + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseFixed(instr->left(), eax); + LOperand* divisor = UseRegister(instr->right()); + LOperand* temp = FixedTemp(edx); + LInstruction* result = DefineFixed(new(zone()) LModI( + dividend, divisor, temp), edx); + if (instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoMod(HMod* instr) { + if (instr->representation().IsSmiOrInteger32()) { + if (instr->RightIsPowerOf2()) { + return DoModByPowerOf2I(instr); + } else if (instr->right()->IsConstant()) { + return DoModByConstI(instr); + } else { + return DoModI(instr); + } + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::MOD, instr); + } else { + return DoArithmeticT(Token::MOD, instr); + } +} + + +LInstruction* LChunkBuilder::DoMul(HMul* instr) { + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + LOperand* right = UseOrConstant(instr->BetterRightOperand()); + LOperand* temp = NULL; + if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + temp = TempRegister(); + } + LMulI* mul = new(zone()) LMulI(left, right, temp); + if (instr->CheckFlag(HValue::kCanOverflow) || + instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + AssignEnvironment(mul); + } + return DefineSameAsFirst(mul); + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::MUL, instr); + } else { + return DoArithmeticT(Token::MUL, instr); + } +} + + +LInstruction* LChunkBuilder::DoSub(HSub* instr) { + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseOrConstantAtStart(instr->right()); + LSubI* sub = new(zone()) LSubI(left, right); + LInstruction* result = DefineSameAsFirst(sub); + if (instr->CheckFlag(HValue::kCanOverflow)) { + result = AssignEnvironment(result); + } + return result; + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::SUB, instr); + } else { + return DoArithmeticT(Token::SUB, instr); + } +} + + +LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + // Check to see if it would be advantageous to use an lea instruction rather + // than an add. This is the case when no overflow check is needed and there + // are multiple uses of the add's inputs, so using a 3-register add will + // preserve all input values for later uses. + bool use_lea = LAddI::UseLea(instr); + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + HValue* right_candidate = instr->BetterRightOperand(); + LOperand* right = use_lea + ? UseRegisterOrConstantAtStart(right_candidate) + : UseOrConstantAtStart(right_candidate); + LAddI* add = new(zone()) LAddI(left, right); + bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); + LInstruction* result = use_lea + ? DefineAsRegister(add) + : DefineSameAsFirst(add); + if (can_overflow) { + result = AssignEnvironment(result); + } + return result; + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::ADD, instr); + } else if (instr->representation().IsExternal()) { + DCHECK(instr->left()->representation().IsExternal()); + DCHECK(instr->right()->representation().IsInteger32()); + DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); + bool use_lea = LAddI::UseLea(instr); + LOperand* left = UseRegisterAtStart(instr->left()); + HValue* right_candidate = instr->right(); + LOperand* right = use_lea + ? UseRegisterOrConstantAtStart(right_candidate) + : UseOrConstantAtStart(right_candidate); + LAddI* add = new(zone()) LAddI(left, right); + LInstruction* result = use_lea + ? DefineAsRegister(add) + : DefineSameAsFirst(add); + return result; + } else { + return DoArithmeticT(Token::ADD, instr); + } +} + + +LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) { + LOperand* left = NULL; + LOperand* right = NULL; + if (instr->representation().IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(instr->representation())); + DCHECK(instr->right()->representation().Equals(instr->representation())); + left = UseRegisterAtStart(instr->BetterLeftOperand()); + right = UseOrConstantAtStart(instr->BetterRightOperand()); + } else { + DCHECK(instr->representation().IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); + left = UseRegisterAtStart(instr->left()); + right = UseRegisterAtStart(instr->right()); + } + LMathMinMax* minmax = new(zone()) LMathMinMax(left, right); + return DefineSameAsFirst(minmax); +} + + +LInstruction* LChunkBuilder::DoPower(HPower* instr) { + // Crankshaft is turned off for nosse2. + UNREACHABLE(); + return NULL; +} + + +LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { + DCHECK(instr->left()->representation().IsSmiOrTagged()); + DCHECK(instr->right()->representation().IsSmiOrTagged()); + LOperand* context = UseFixed(instr->context(), esi); + LOperand* left = UseFixed(instr->left(), edx); + LOperand* right = UseFixed(instr->right(), eax); + LCmpT* result = new(zone()) LCmpT(context, left, right); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LInstruction* LChunkBuilder::DoCompareNumericAndBranch( + HCompareNumericAndBranch* instr) { + Representation r = instr->representation(); + if (r.IsSmiOrInteger32()) { + DCHECK(instr->left()->representation().Equals(r)); + DCHECK(instr->right()->representation().Equals(r)); + LOperand* left = UseRegisterOrConstantAtStart(instr->left()); + LOperand* right = UseOrConstantAtStart(instr->right()); + return new(zone()) LCompareNumericAndBranch(left, right); + } else { + DCHECK(r.IsDouble()); + DCHECK(instr->left()->representation().IsDouble()); + DCHECK(instr->right()->representation().IsDouble()); + LOperand* left; + LOperand* right; + if (CanBeImmediateConstant(instr->left()) && + CanBeImmediateConstant(instr->right())) { + // The code generator requires either both inputs to be constant + // operands, or neither. + left = UseConstant(instr->left()); + right = UseConstant(instr->right()); + } else { + left = UseRegisterAtStart(instr->left()); + right = UseRegisterAtStart(instr->right()); + } + return new(zone()) LCompareNumericAndBranch(left, right); + } +} + + +LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( + HCompareObjectEqAndBranch* instr) { + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseOrConstantAtStart(instr->right()); + return new(zone()) LCmpObjectEqAndBranch(left, right); +} + + +LInstruction* LChunkBuilder::DoCompareHoleAndBranch( + HCompareHoleAndBranch* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + return new(zone()) LCmpHoleAndBranch(value); +} + + +LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch( + HCompareMinusZeroAndBranch* instr) { + LOperand* value = UseRegister(instr->value()); + LOperand* scratch = TempRegister(); + return new(zone()) LCompareMinusZeroAndBranch(value, scratch); +} + + +LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { + DCHECK(instr->value()->representation().IsSmiOrTagged()); + LOperand* temp = TempRegister(); + return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp); +} + + +LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + LOperand* temp = TempRegister(); + return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp); +} + + +LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + return new(zone()) LIsSmiAndBranch(Use(instr->value())); +} + + +LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( + HIsUndetectableAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + return new(zone()) LIsUndetectableAndBranch( + UseRegisterAtStart(instr->value()), TempRegister()); +} + + +LInstruction* LChunkBuilder::DoStringCompareAndBranch( + HStringCompareAndBranch* instr) { + DCHECK(instr->left()->representation().IsTagged()); + DCHECK(instr->right()->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), esi); + LOperand* left = UseFixed(instr->left(), edx); + LOperand* right = UseFixed(instr->right(), eax); + + LStringCompareAndBranch* result = new(zone()) + LStringCompareAndBranch(context, left, right); + + return MarkAsCall(result, instr); +} + + +LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( + HHasInstanceTypeAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + return new(zone()) LHasInstanceTypeAndBranch( + UseRegisterAtStart(instr->value()), + TempRegister()); +} + + +LInstruction* LChunkBuilder::DoGetCachedArrayIndex( + HGetCachedArrayIndex* instr) { + DCHECK(instr->value()->representation().IsTagged()); + LOperand* value = UseRegisterAtStart(instr->value()); + + return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value)); +} + + +LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( + HHasCachedArrayIndexAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + return new(zone()) LHasCachedArrayIndexAndBranch( + UseRegisterAtStart(instr->value())); +} + + +LInstruction* LChunkBuilder::DoClassOfTestAndBranch( + HClassOfTestAndBranch* instr) { + DCHECK(instr->value()->representation().IsTagged()); + return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()), + TempRegister(), + TempRegister()); +} + + +LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) { + LOperand* map = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new(zone()) LMapEnumLength(map)); +} + + +LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { + LOperand* date = UseFixed(instr->value(), eax); + LDateField* result = + new(zone()) LDateField(date, FixedTemp(ecx), instr->index()); + return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { + LOperand* string = UseRegisterAtStart(instr->string()); + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index)); +} + + +LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) { + if (instr->encoding() == String::ONE_BYTE_ENCODING) { + if (FLAG_debug_code) { + return UseFixed(instr->value(), eax); + } else { + return UseFixedOrConstant(instr->value(), eax); + } + } else { + if (FLAG_debug_code) { + return UseRegisterAtStart(instr->value()); + } else { + return UseRegisterOrConstantAtStart(instr->value()); + } + } +} + + +LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { + LOperand* string = UseRegisterAtStart(instr->string()); + LOperand* index = FLAG_debug_code + ? UseRegisterAtStart(instr->index()) + : UseRegisterOrConstantAtStart(instr->index()); + LOperand* value = GetSeqStringSetCharOperand(instr); + LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL; + LInstruction* result = new(zone()) LSeqStringSetChar(context, string, + index, value); + if (FLAG_debug_code) { + result = MarkAsCall(result, instr); + } + return result; +} + + +LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { + if (!FLAG_debug_code && instr->skip_check()) return NULL; + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + LOperand* length = !index->IsConstantOperand() + ? UseOrConstantAtStart(instr->length()) + : UseAtStart(instr->length()); + LInstruction* result = new(zone()) LBoundsCheck(index, length); + if (!FLAG_debug_code || !instr->skip_check()) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation( + HBoundsCheckBaseIndexInformation* instr) { + UNREACHABLE(); + return NULL; +} + + +LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { + // The control instruction marking the end of a block that completed + // abruptly (e.g., threw an exception). There is nothing specific to do. + return NULL; +} + + +LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { + return NULL; +} + + +LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { + // All HForceRepresentation instructions should be eliminated in the + // representation change phase of Hydrogen. + UNREACHABLE(); + return NULL; +} + + +LInstruction* LChunkBuilder::DoChange(HChange* instr) { + Representation from = instr->from(); + Representation to = instr->to(); + HValue* val = instr->value(); + if (from.IsSmi()) { + if (to.IsTagged()) { + LOperand* value = UseRegister(val); + return DefineSameAsFirst(new(zone()) LDummyUse(value)); + } + from = Representation::Tagged(); + } + if (from.IsTagged()) { + if (to.IsDouble()) { + LOperand* value = UseRegister(val); + LOperand* temp = TempRegister(); + LInstruction* result = + DefineAsRegister(new(zone()) LNumberUntagD(value, temp)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; + } else if (to.IsSmi()) { + LOperand* value = UseRegister(val); + if (val->type().IsSmi()) { + return DefineSameAsFirst(new(zone()) LDummyUse(value)); + } + return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); + } else { + DCHECK(to.IsInteger32()); + if (val->type().IsSmi() || val->representation().IsSmi()) { + LOperand* value = UseRegister(val); + return DefineSameAsFirst(new(zone()) LSmiUntag(value, false)); + } else { + LOperand* value = UseRegister(val); + LInstruction* result = DefineSameAsFirst(new(zone()) LTaggedToI(value)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; + } + } + } else if (from.IsDouble()) { + if (to.IsTagged()) { + info()->MarkAsDeferredCalling(); + LOperand* value = UseRegisterAtStart(val); + LOperand* temp = FLAG_inline_new ? TempRegister() : NULL; + LUnallocated* result_temp = TempRegister(); + LNumberTagD* result = new(zone()) LNumberTagD(value, temp); + return AssignPointerMap(Define(result, result_temp)); + } else if (to.IsSmi()) { + LOperand* value = UseRegister(val); + return AssignEnvironment( + DefineAsRegister(new(zone()) LDoubleToSmi(value))); + } else { + DCHECK(to.IsInteger32()); + bool truncating = instr->CanTruncateToInt32(); + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); + if (!truncating) result = AssignEnvironment(result); + return result; + } + } else if (from.IsInteger32()) { + info()->MarkAsDeferredCalling(); + if (to.IsTagged()) { + if (!instr->CheckFlag(HValue::kCanOverflow)) { + LOperand* value = UseRegister(val); + return DefineSameAsFirst(new(zone()) LSmiTag(value)); + } else if (val->CheckFlag(HInstruction::kUint32)) { + LOperand* value = UseRegister(val); + LOperand* temp = TempRegister(); + LNumberTagU* result = new(zone()) LNumberTagU(value, temp); + return AssignPointerMap(DefineSameAsFirst(result)); + } else { + LOperand* value = UseRegister(val); + LOperand* temp = TempRegister(); + LNumberTagI* result = new(zone()) LNumberTagI(value, temp); + return AssignPointerMap(DefineSameAsFirst(result)); + } + } else if (to.IsSmi()) { + LOperand* value = UseRegister(val); + LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value)); + if (instr->CheckFlag(HValue::kCanOverflow)) { + result = AssignEnvironment(result); + } + return result; + } else { + DCHECK(to.IsDouble()); + if (val->CheckFlag(HInstruction::kUint32)) { + return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); + } else { + return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); + } + } + } + UNREACHABLE(); + return NULL; +} + + +LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { + LOperand* value = UseAtStart(instr->value()); + LInstruction* result = new(zone()) LCheckNonSmi(value); + if (!instr->value()->type().IsHeapObject()) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + return AssignEnvironment(new(zone()) LCheckSmi(value)); +} + + +LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* temp = TempRegister(); + LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp); + return AssignEnvironment(result); +} + + +LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { + // If the object is in new space, we'll emit a global cell compare and so + // want the value in a register. If the object gets promoted before we + // emit code, we will still get the register but will do an immediate + // compare instead of the cell compare. This is safe. + LOperand* value = instr->object_in_new_space() + ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value()); + return AssignEnvironment(new(zone()) LCheckValue(value)); +} + + +LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { + if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; + LOperand* value = UseRegisterAtStart(instr->value()); + LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); + if (instr->HasMigrationTarget()) { + info()->MarkAsDeferredCalling(); + result = AssignPointerMap(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { + HValue* value = instr->value(); + Representation input_rep = value->representation(); + if (input_rep.IsDouble()) { + UNREACHABLE(); + return NULL; + } else if (input_rep.IsInteger32()) { + LOperand* reg = UseFixed(value, eax); + return DefineFixed(new(zone()) LClampIToUint8(reg), eax); + } else { + DCHECK(input_rep.IsSmiOrTagged()); + LOperand* value = UseRegister(instr->value()); + LClampTToUint8NoSSE2* res = + new(zone()) LClampTToUint8NoSSE2(value, TempRegister(), + TempRegister(), TempRegister()); + return AssignEnvironment(DefineFixed(res, ecx)); + } +} + + +LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) { + HValue* value = instr->value(); + DCHECK(value->representation().IsDouble()); + return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value))); +} + + +LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) { + LOperand* lo = UseRegister(instr->lo()); + LOperand* hi = UseRegister(instr->hi()); + return DefineAsRegister(new(zone()) LConstructDouble(hi, lo)); +} + + +LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { + LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL; + LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); + return new(zone()) LReturn( + UseFixed(instr->value(), eax), context, parameter_count); +} + + +LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { + Representation r = instr->representation(); + if (r.IsSmi()) { + return DefineAsRegister(new(zone()) LConstantS); + } else if (r.IsInteger32()) { + return DefineAsRegister(new(zone()) LConstantI); + } else if (r.IsDouble()) { + double value = instr->DoubleValue(); + bool value_is_zero = BitCast<uint64_t, double>(value) == 0; + LOperand* temp = value_is_zero ? NULL : TempRegister(); + return DefineAsRegister(new(zone()) LConstantD(temp)); + } else if (r.IsExternal()) { + return DefineAsRegister(new(zone()) LConstantE); + } else if (r.IsTagged()) { + return DefineAsRegister(new(zone()) LConstantT); + } else { + UNREACHABLE(); + return NULL; + } +} + + +LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { + LLoadGlobalCell* result = new(zone()) LLoadGlobalCell; + return instr->RequiresHoleCheck() + ? AssignEnvironment(DefineAsRegister(result)) + : DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* global_object = UseFixed(instr->global_object(), + LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + + LLoadGlobalGeneric* result = + new(zone()) LLoadGlobalGeneric(context, global_object, vector); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { + LStoreGlobalCell* result = + new(zone()) LStoreGlobalCell(UseRegister(instr->value())); + return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; +} + + +LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { + LOperand* context = UseRegisterAtStart(instr->value()); + LInstruction* result = + DefineAsRegister(new(zone()) LLoadContextSlot(context)); + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { + LOperand* value; + LOperand* temp; + LOperand* context = UseRegister(instr->context()); + if (instr->NeedsWriteBarrier()) { + value = UseTempRegister(instr->value()); + temp = TempRegister(); + } else { + value = UseRegister(instr->value()); + temp = NULL; + } + LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp); + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { + LOperand* obj = (instr->access().IsExternalMemory() && + instr->access().offset() == 0) + ? UseRegisterOrConstantAtStart(instr->object()) + : UseRegisterAtStart(instr->object()); + return DefineAsRegister(new(zone()) LLoadNamedField(obj)); +} + + +LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric( + context, object, vector); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LInstruction* LChunkBuilder::DoLoadFunctionPrototype( + HLoadFunctionPrototype* instr) { + return AssignEnvironment(DefineAsRegister( + new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()), + TempRegister()))); +} + + +LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { + return DefineAsRegister(new(zone()) LLoadRoot); +} + + +LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { + DCHECK(instr->key()->representation().IsSmiOrInteger32()); + ElementsKind elements_kind = instr->elements_kind(); + bool clobbers_key = ExternalArrayOpRequiresTemp( + instr->key()->representation(), elements_kind); + LOperand* key = clobbers_key + ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + LInstruction* result = NULL; + + if (!instr->is_typed_elements()) { + LOperand* obj = UseRegisterAtStart(instr->elements()); + result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key)); + } else { + DCHECK( + (instr->representation().IsInteger32() && + !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) || + (instr->representation().IsDouble() && + (IsDoubleOrFloatElementsKind(instr->elements_kind())))); + LOperand* backing_store = UseRegister(instr->elements()); + result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key)); + } + + if ((instr->is_external() || instr->is_fixed_typed_array()) ? + // see LCodeGen::DoLoadKeyedExternalArray + ((instr->elements_kind() == EXTERNAL_UINT32_ELEMENTS || + instr->elements_kind() == UINT32_ELEMENTS) && + !instr->CheckFlag(HInstruction::kUint32)) : + // see LCodeGen::DoLoadKeyedFixedDoubleArray and + // LCodeGen::DoLoadKeyedFixedArray + instr->RequiresHoleCheck()) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister()); + LOperand* vector = NULL; + if (FLAG_vector_ics) { + vector = FixedTemp(LoadIC::VectorRegister()); + } + LLoadKeyedGeneric* result = + new(zone()) LLoadKeyedGeneric(context, object, key, vector); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) { + ElementsKind elements_kind = instr->elements_kind(); + + // Determine if we need a byte register in this case for the value. + bool val_is_fixed_register = + elements_kind == EXTERNAL_INT8_ELEMENTS || + elements_kind == EXTERNAL_UINT8_ELEMENTS || + elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS || + elements_kind == UINT8_ELEMENTS || + elements_kind == INT8_ELEMENTS || + elements_kind == UINT8_CLAMPED_ELEMENTS; + if (val_is_fixed_register) { + return UseFixed(instr->value(), eax); + } + + if (IsDoubleOrFloatElementsKind(elements_kind)) { + return UseRegisterAtStart(instr->value()); + } + + return UseRegister(instr->value()); +} + + +LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { + if (!instr->is_typed_elements()) { + DCHECK(instr->elements()->representation().IsTagged()); + DCHECK(instr->key()->representation().IsInteger32() || + instr->key()->representation().IsSmi()); + + if (instr->value()->representation().IsDouble()) { + LOperand* object = UseRegisterAtStart(instr->elements()); + LOperand* val = NULL; + val = UseRegisterAtStart(instr->value()); + LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + return new(zone()) LStoreKeyed(object, key, val); + } else { + DCHECK(instr->value()->representation().IsSmiOrTagged()); + bool needs_write_barrier = instr->NeedsWriteBarrier(); + + LOperand* obj = UseRegister(instr->elements()); + LOperand* val; + LOperand* key; + if (needs_write_barrier) { + val = UseTempRegister(instr->value()); + key = UseTempRegister(instr->key()); + } else { + val = UseRegisterOrConstantAtStart(instr->value()); + key = UseRegisterOrConstantAtStart(instr->key()); + } + return new(zone()) LStoreKeyed(obj, key, val); + } + } + + ElementsKind elements_kind = instr->elements_kind(); + DCHECK( + (instr->value()->representation().IsInteger32() && + !IsDoubleOrFloatElementsKind(elements_kind)) || + (instr->value()->representation().IsDouble() && + IsDoubleOrFloatElementsKind(elements_kind))); + DCHECK((instr->is_fixed_typed_array() && + instr->elements()->representation().IsTagged()) || + (instr->is_external() && + instr->elements()->representation().IsExternal())); + + LOperand* backing_store = UseRegister(instr->elements()); + LOperand* val = GetStoreKeyedValueOperand(instr); + bool clobbers_key = ExternalArrayOpRequiresTemp( + instr->key()->representation(), elements_kind); + LOperand* key = clobbers_key + ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + return new(zone()) LStoreKeyed(backing_store, key, val); +} + + +LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* object = UseFixed(instr->object(), + KeyedStoreIC::ReceiverRegister()); + LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister()); + LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister()); + + DCHECK(instr->object()->representation().IsTagged()); + DCHECK(instr->key()->representation().IsTagged()); + DCHECK(instr->value()->representation().IsTagged()); + + LStoreKeyedGeneric* result = + new(zone()) LStoreKeyedGeneric(context, object, key, value); + return MarkAsCall(result, instr); +} + + +LInstruction* LChunkBuilder::DoTransitionElementsKind( + HTransitionElementsKind* instr) { + if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { + LOperand* object = UseRegister(instr->object()); + LOperand* new_map_reg = TempRegister(); + LOperand* temp_reg = TempRegister(); + LTransitionElementsKind* result = + new(zone()) LTransitionElementsKind(object, NULL, + new_map_reg, temp_reg); + return result; + } else { + LOperand* object = UseFixed(instr->object(), eax); + LOperand* context = UseFixed(instr->context(), esi); + LTransitionElementsKind* result = + new(zone()) LTransitionElementsKind(object, context, NULL, NULL); + return MarkAsCall(result, instr); + } +} + + +LInstruction* LChunkBuilder::DoTrapAllocationMemento( + HTrapAllocationMemento* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* temp = TempRegister(); + LTrapAllocationMemento* result = + new(zone()) LTrapAllocationMemento(object, temp); + return AssignEnvironment(result); +} + + +LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { + bool is_in_object = instr->access().IsInobject(); + bool is_external_location = instr->access().IsExternalMemory() && + instr->access().offset() == 0; + bool needs_write_barrier = instr->NeedsWriteBarrier(); + bool needs_write_barrier_for_map = instr->has_transition() && + instr->NeedsWriteBarrierForMap(); + + LOperand* obj; + if (needs_write_barrier) { + obj = is_in_object + ? UseRegister(instr->object()) + : UseTempRegister(instr->object()); + } else if (is_external_location) { + DCHECK(!is_in_object); + DCHECK(!needs_write_barrier); + DCHECK(!needs_write_barrier_for_map); + obj = UseRegisterOrConstant(instr->object()); + } else { + obj = needs_write_barrier_for_map + ? UseRegister(instr->object()) + : UseRegisterAtStart(instr->object()); + } + + bool can_be_constant = instr->value()->IsConstant() && + HConstant::cast(instr->value())->NotInNewSpace() && + !instr->field_representation().IsDouble(); + + LOperand* val; + if (instr->field_representation().IsInteger8() || + instr->field_representation().IsUInteger8()) { + // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx). + // Just force the value to be in eax and we're safe here. + val = UseFixed(instr->value(), eax); + } else if (needs_write_barrier) { + val = UseTempRegister(instr->value()); + } else if (can_be_constant) { + val = UseRegisterOrConstant(instr->value()); + } else if (instr->field_representation().IsSmi()) { + val = UseTempRegister(instr->value()); + } else if (instr->field_representation().IsDouble()) { + val = UseRegisterAtStart(instr->value()); + } else { + val = UseRegister(instr->value()); + } + + // We only need a scratch register if we have a write barrier or we + // have a store into the properties array (not in-object-property). + LOperand* temp = (!is_in_object || needs_write_barrier || + needs_write_barrier_for_map) ? TempRegister() : NULL; + + // We need a temporary register for write barrier of the map field. + LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL; + + return new(zone()) LStoreNamedField(obj, val, temp, temp_map); +} + + +LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister()); + LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister()); + + LStoreNamedGeneric* result = + new(zone()) LStoreNamedGeneric(context, object, value); + return MarkAsCall(result, instr); +} + + +LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* left = UseFixed(instr->left(), edx); + LOperand* right = UseFixed(instr->right(), eax); + LStringAdd* string_add = new(zone()) LStringAdd(context, left, right); + return MarkAsCall(DefineFixed(string_add, eax), instr); +} + + +LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { + LOperand* string = UseTempRegister(instr->string()); + LOperand* index = UseTempRegister(instr->index()); + LOperand* context = UseAny(instr->context()); + LStringCharCodeAt* result = + new(zone()) LStringCharCodeAt(context, string, index); + return AssignPointerMap(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { + LOperand* char_code = UseRegister(instr->value()); + LOperand* context = UseAny(instr->context()); + LStringCharFromCode* result = + new(zone()) LStringCharFromCode(context, char_code); + return AssignPointerMap(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { + info()->MarkAsDeferredCalling(); + LOperand* context = UseAny(instr->context()); + LOperand* size = instr->size()->IsConstant() + ? UseConstant(instr->size()) + : UseTempRegister(instr->size()); + LOperand* temp = TempRegister(); + LAllocate* result = new(zone()) LAllocate(context, size, temp); + return AssignPointerMap(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) { + LOperand* context = UseFixed(instr->context(), esi); + return MarkAsCall( + DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr); +} + + +LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { + LOperand* context = UseFixed(instr->context(), esi); + return MarkAsCall( + DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr); +} + + +LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { + DCHECK(argument_count_ == 0); + allocator_->MarkAsOsrEntry(); + current_block_->last_environment()->set_ast_id(instr->ast_id()); + return AssignEnvironment(new(zone()) LOsrEntry); +} + + +LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { + LParameter* result = new(zone()) LParameter; + if (instr->kind() == HParameter::STACK_PARAMETER) { + int spill_index = chunk()->GetParameterStackSlot(instr->index()); + return DefineAsSpilled(result, spill_index); + } else { + DCHECK(info()->IsStub()); + CodeStubInterfaceDescriptor* descriptor = + info()->code_stub()->GetInterfaceDescriptor(); + int index = static_cast<int>(instr->index()); + Register reg = descriptor->GetEnvironmentParameterRegister(index); + return DefineFixed(result, reg); + } +} + + +LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { + // Use an index that corresponds to the location in the unoptimized frame, + // which the optimized frame will subsume. + int env_index = instr->index(); + int spill_index = 0; + if (instr->environment()->is_parameter_index(env_index)) { + spill_index = chunk()->GetParameterStackSlot(env_index); + } else { + spill_index = env_index - instr->environment()->first_local_index(); + if (spill_index > LUnallocated::kMaxFixedSlotIndex) { + Abort(kNotEnoughSpillSlotsForOsr); + spill_index = 0; + } + if (spill_index == 0) { + // The dynamic frame alignment state overwrites the first local. + // The first local is saved at the end of the unoptimized frame. + spill_index = graph()->osr()->UnoptimizedFrameSlots(); + } + } + return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); +} + + +LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LCallStub* result = new(zone()) LCallStub(context); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { + // There are no real uses of the arguments object. + // arguments.length and element access are supported directly on + // stack arguments, and any real arguments object use causes a bailout. + // So this value is never used. + return NULL; +} + + +LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) { + instr->ReplayEnvironment(current_block_->last_environment()); + + // There are no real uses of a captured object. + return NULL; +} + + +LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { + info()->MarkAsRequiresFrame(); + LOperand* args = UseRegister(instr->arguments()); + LOperand* length; + LOperand* index; + if (instr->length()->IsConstant() && instr->index()->IsConstant()) { + length = UseRegisterOrConstant(instr->length()); + index = UseOrConstant(instr->index()); + } else { + length = UseTempRegister(instr->length()); + index = Use(instr->index()); + } + return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); +} + + +LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) { + LOperand* object = UseFixed(instr->value(), eax); + LToFastProperties* result = new(zone()) LToFastProperties(object); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* value = UseAtStart(instr->value()); + LTypeof* result = new(zone()) LTypeof(context, value); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + +LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { + return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value())); +} + + +LInstruction* LChunkBuilder::DoIsConstructCallAndBranch( + HIsConstructCallAndBranch* instr) { + return new(zone()) LIsConstructCallAndBranch(TempRegister()); +} + + +LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { + instr->ReplayEnvironment(current_block_->last_environment()); + return NULL; +} + + +LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { + info()->MarkAsDeferredCalling(); + if (instr->is_function_entry()) { + LOperand* context = UseFixed(instr->context(), esi); + return MarkAsCall(new(zone()) LStackCheck(context), instr); + } else { + DCHECK(instr->is_backwards_branch()); + LOperand* context = UseAny(instr->context()); + return AssignEnvironment( + AssignPointerMap(new(zone()) LStackCheck(context))); + } +} + + +LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { + HEnvironment* outer = current_block_->last_environment(); + outer->set_ast_id(instr->ReturnId()); + HConstant* undefined = graph()->GetConstantUndefined(); + HEnvironment* inner = outer->CopyForInlining(instr->closure(), + instr->arguments_count(), + instr->function(), + undefined, + instr->inlining_kind()); + // Only replay binding of arguments object if it wasn't removed from graph. + if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { + inner->Bind(instr->arguments_var(), instr->arguments_object()); + } + inner->set_entry(instr); + current_block_->UpdateEnvironment(inner); + chunk_->AddInlinedClosure(instr->closure()); + return NULL; +} + + +LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { + LInstruction* pop = NULL; + + HEnvironment* env = current_block_->last_environment(); + + if (env->entry()->arguments_pushed()) { + int argument_count = env->arguments_environment()->parameter_count(); + pop = new(zone()) LDrop(argument_count); + DCHECK(instr->argument_delta() == -argument_count); + } + + HEnvironment* outer = current_block_->last_environment()-> + DiscardInlined(false); + current_block_->UpdateEnvironment(outer); + return pop; +} + + +LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* object = UseFixed(instr->enumerable(), eax); + LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); + return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { + LOperand* map = UseRegister(instr->map()); + return AssignEnvironment(DefineAsRegister( + new(zone()) LForInCacheArray(map))); +} + + +LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* map = UseRegisterAtStart(instr->map()); + return AssignEnvironment(new(zone()) LCheckMapValue(value, map)); +} + + +LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* index = UseTempRegister(instr->index()); + LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); + LInstruction* result = DefineSameAsFirst(load); + return AssignPointerMap(result); +} + + +LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) { + LOperand* context = UseRegisterAtStart(instr->context()); + return new(zone()) LStoreFrameContext(context); +} + + +LInstruction* LChunkBuilder::DoAllocateBlockContext( + HAllocateBlockContext* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* function = UseRegisterAtStart(instr->function()); + LAllocateBlockContext* result = + new(zone()) LAllocateBlockContext(context, function); + return MarkAsCall(DefineFixed(result, esi), instr); +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/lithium-x87.h nodejs-0.11.15/deps/v8/src/x87/lithium-x87.h --- nodejs-0.11.13/deps/v8/src/x87/lithium-x87.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/lithium-x87.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2917 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_X87_LITHIUM_X87_H_ +#define V8_X87_LITHIUM_X87_H_ + +#include "src/hydrogen.h" +#include "src/lithium.h" +#include "src/lithium-allocator.h" +#include "src/safepoint-table.h" +#include "src/utils.h" + +namespace v8 { +namespace internal { + +namespace compiler { +class RCodeVisualizer; +} + +// Forward declarations. +class LCodeGen; + +#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ + V(AccessArgumentsAt) \ + V(AddI) \ + V(AllocateBlockContext) \ + V(Allocate) \ + V(ApplyArguments) \ + V(ArgumentsElements) \ + V(ArgumentsLength) \ + V(ArithmeticD) \ + V(ArithmeticT) \ + V(BitI) \ + V(BoundsCheck) \ + V(Branch) \ + V(CallJSFunction) \ + V(CallWithDescriptor) \ + V(CallFunction) \ + V(CallNew) \ + V(CallNewArray) \ + V(CallRuntime) \ + V(CallStub) \ + V(CheckInstanceType) \ + V(CheckMaps) \ + V(CheckMapValue) \ + V(CheckNonSmi) \ + V(CheckSmi) \ + V(CheckValue) \ + V(ClampDToUint8) \ + V(ClampIToUint8) \ + V(ClampTToUint8NoSSE2) \ + V(ClassOfTestAndBranch) \ + V(ClobberDoubles) \ + V(CompareMinusZeroAndBranch) \ + V(CompareNumericAndBranch) \ + V(CmpObjectEqAndBranch) \ + V(CmpHoleAndBranch) \ + V(CmpMapAndBranch) \ + V(CmpT) \ + V(ConstantD) \ + V(ConstantE) \ + V(ConstantI) \ + V(ConstantS) \ + V(ConstantT) \ + V(ConstructDouble) \ + V(Context) \ + V(DateField) \ + V(DebugBreak) \ + V(DeclareGlobals) \ + V(Deoptimize) \ + V(DivByConstI) \ + V(DivByPowerOf2I) \ + V(DivI) \ + V(DoubleBits) \ + V(DoubleToI) \ + V(DoubleToSmi) \ + V(Drop) \ + V(Dummy) \ + V(DummyUse) \ + V(FlooringDivByConstI) \ + V(FlooringDivByPowerOf2I) \ + V(FlooringDivI) \ + V(ForInCacheArray) \ + V(ForInPrepareMap) \ + V(FunctionLiteral) \ + V(GetCachedArrayIndex) \ + V(Goto) \ + V(HasCachedArrayIndexAndBranch) \ + V(HasInstanceTypeAndBranch) \ + V(InnerAllocatedObject) \ + V(InstanceOf) \ + V(InstanceOfKnownGlobal) \ + V(InstructionGap) \ + V(Integer32ToDouble) \ + V(InvokeFunction) \ + V(IsConstructCallAndBranch) \ + V(IsObjectAndBranch) \ + V(IsStringAndBranch) \ + V(IsSmiAndBranch) \ + V(IsUndetectableAndBranch) \ + V(Label) \ + V(LazyBailout) \ + V(LoadContextSlot) \ + V(LoadFieldByIndex) \ + V(LoadFunctionPrototype) \ + V(LoadGlobalCell) \ + V(LoadGlobalGeneric) \ + V(LoadKeyed) \ + V(LoadKeyedGeneric) \ + V(LoadNamedField) \ + V(LoadNamedGeneric) \ + V(LoadRoot) \ + V(MapEnumLength) \ + V(MathAbs) \ + V(MathClz32) \ + V(MathExp) \ + V(MathFloor) \ + V(MathFround) \ + V(MathLog) \ + V(MathMinMax) \ + V(MathPowHalf) \ + V(MathRound) \ + V(MathSqrt) \ + V(ModByConstI) \ + V(ModByPowerOf2I) \ + V(ModI) \ + V(MulI) \ + V(NumberTagD) \ + V(NumberTagI) \ + V(NumberTagU) \ + V(NumberUntagD) \ + V(OsrEntry) \ + V(Parameter) \ + V(Power) \ + V(PushArgument) \ + V(RegExpLiteral) \ + V(Return) \ + V(SeqStringGetChar) \ + V(SeqStringSetChar) \ + V(ShiftI) \ + V(SmiTag) \ + V(SmiUntag) \ + V(StackCheck) \ + V(StoreCodeEntry) \ + V(StoreContextSlot) \ + V(StoreFrameContext) \ + V(StoreGlobalCell) \ + V(StoreKeyed) \ + V(StoreKeyedGeneric) \ + V(StoreNamedField) \ + V(StoreNamedGeneric) \ + V(StringAdd) \ + V(StringCharCodeAt) \ + V(StringCharFromCode) \ + V(StringCompareAndBranch) \ + V(SubI) \ + V(TaggedToI) \ + V(ThisFunction) \ + V(ToFastProperties) \ + V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ + V(Typeof) \ + V(TypeofIsAndBranch) \ + V(Uint32ToDouble) \ + V(UnknownOSRValue) \ + V(WrapReceiver) + + +#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ + virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \ + return LInstruction::k##type; \ + } \ + virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \ + virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \ + return mnemonic; \ + } \ + static L##type* cast(LInstruction* instr) { \ + DCHECK(instr->Is##type()); \ + return reinterpret_cast<L##type*>(instr); \ + } + + +#define DECLARE_HYDROGEN_ACCESSOR(type) \ + H##type* hydrogen() const { \ + return H##type::cast(hydrogen_value()); \ + } + + +class LInstruction : public ZoneObject { + public: + LInstruction() + : environment_(NULL), + hydrogen_value_(NULL), + bit_field_(IsCallBits::encode(false)) { + } + + virtual ~LInstruction() {} + + virtual void CompileToNative(LCodeGen* generator) = 0; + virtual const char* Mnemonic() const = 0; + virtual void PrintTo(StringStream* stream); + virtual void PrintDataTo(StringStream* stream); + virtual void PrintOutputOperandTo(StringStream* stream); + + enum Opcode { + // Declare a unique enum value for each instruction. +#define DECLARE_OPCODE(type) k##type, + LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter, + kNumberOfInstructions +#undef DECLARE_OPCODE + }; + + virtual Opcode opcode() const = 0; + + // Declare non-virtual type testers for all leaf IR classes. +#define DECLARE_PREDICATE(type) \ + bool Is##type() const { return opcode() == k##type; } + LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) +#undef DECLARE_PREDICATE + + // Declare virtual predicates for instructions that don't have + // an opcode. + virtual bool IsGap() const { return false; } + + virtual bool IsControl() const { return false; } + + // Try deleting this instruction if possible. + virtual bool TryDelete() { return false; } + + void set_environment(LEnvironment* env) { environment_ = env; } + LEnvironment* environment() const { return environment_; } + bool HasEnvironment() const { return environment_ != NULL; } + + void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); } + LPointerMap* pointer_map() const { return pointer_map_.get(); } + bool HasPointerMap() const { return pointer_map_.is_set(); } + + void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } + HValue* hydrogen_value() const { return hydrogen_value_; } + + virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { } + + void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); } + bool IsCall() const { return IsCallBits::decode(bit_field_); } + + // Interface to the register allocator and iterators. + bool ClobbersTemps() const { return IsCall(); } + bool ClobbersRegisters() const { return IsCall(); } + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { + return IsCall() || + // We only have rudimentary X87Stack tracking, thus in general + // cannot handle phi-nodes. + (IsControl()); + } + + virtual bool HasResult() const = 0; + virtual LOperand* result() const = 0; + + bool HasDoubleRegisterResult(); + bool HasDoubleRegisterInput(); + bool IsDoubleInput(X87Register reg, LCodeGen* cgen); + + LOperand* FirstInput() { return InputAt(0); } + LOperand* Output() { return HasResult() ? result() : NULL; } + + virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } + +#ifdef DEBUG + void VerifyCall(); +#endif + + virtual int InputCount() = 0; + virtual LOperand* InputAt(int i) = 0; + + private: + // Iterator support. + friend class InputIterator; + + friend class TempIterator; + virtual int TempCount() = 0; + virtual LOperand* TempAt(int i) = 0; + + class IsCallBits: public BitField<bool, 0, 1> {}; + + LEnvironment* environment_; + SetOncePointer<LPointerMap> pointer_map_; + HValue* hydrogen_value_; + int bit_field_; +}; + + +// R = number of result operands (0 or 1). +template<int R> +class LTemplateResultInstruction : public LInstruction { + public: + // Allow 0 or 1 output operands. + STATIC_ASSERT(R == 0 || R == 1); + virtual bool HasResult() const V8_FINAL V8_OVERRIDE { + return R != 0 && result() != NULL; + } + void set_result(LOperand* operand) { results_[0] = operand; } + LOperand* result() const { return results_[0]; } + + protected: + EmbeddedContainer<LOperand*, R> results_; +}; + + +// R = number of result operands (0 or 1). +// I = number of input operands. +// T = number of temporary operands. +template<int R, int I, int T> +class LTemplateInstruction : public LTemplateResultInstruction<R> { + protected: + EmbeddedContainer<LOperand*, I> inputs_; + EmbeddedContainer<LOperand*, T> temps_; + + private: + // Iterator support. + virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; } + virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; } + + virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; } + virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; } +}; + + +class LGap : public LTemplateInstruction<0, 0, 0> { + public: + explicit LGap(HBasicBlock* block) : block_(block) { + parallel_moves_[BEFORE] = NULL; + parallel_moves_[START] = NULL; + parallel_moves_[END] = NULL; + parallel_moves_[AFTER] = NULL; + } + + // Can't use the DECLARE-macro here because of sub-classes. + virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; } + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + static LGap* cast(LInstruction* instr) { + DCHECK(instr->IsGap()); + return reinterpret_cast<LGap*>(instr); + } + + bool IsRedundant() const; + + HBasicBlock* block() const { return block_; } + + enum InnerPosition { + BEFORE, + START, + END, + AFTER, + FIRST_INNER_POSITION = BEFORE, + LAST_INNER_POSITION = AFTER + }; + + LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { + if (parallel_moves_[pos] == NULL) { + parallel_moves_[pos] = new(zone) LParallelMove(zone); + } + return parallel_moves_[pos]; + } + + LParallelMove* GetParallelMove(InnerPosition pos) { + return parallel_moves_[pos]; + } + + private: + LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1]; + HBasicBlock* block_; +}; + + +class LInstructionGap V8_FINAL : public LGap { + public: + explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } + + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE { + return !IsRedundant(); + } + + DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") +}; + + +class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + explicit LClobberDoubles(Isolate* isolate) { } + + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { + return true; + } + + DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d") +}; + + +class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + explicit LGoto(HBasicBlock* block) : block_(block) { } + + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE; + DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual bool IsControl() const V8_OVERRIDE { return true; } + + int block_id() const { return block_->block_id(); } + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { + return false; + } + + bool jumps_to_join() const { return block_->predecessors()->length() > 1; } + + private: + HBasicBlock* block_; +}; + + +class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout") +}; + + +class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + LDummy() {} + DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy") +}; + + +class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LDummyUse(LOperand* value) { + inputs_[0] = value; + } + DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") +}; + + +class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + virtual bool IsControl() const V8_OVERRIDE { return true; } + DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") + DECLARE_HYDROGEN_ACCESSOR(Deoptimize) +}; + + +class LLabel V8_FINAL : public LGap { + public: + explicit LLabel(HBasicBlock* block) + : LGap(block), replacement_(NULL) { } + + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE { + return false; + } + DECLARE_CONCRETE_INSTRUCTION(Label, "label") + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int block_id() const { return block()->block_id(); } + bool is_loop_header() const { return block()->IsLoopHeader(); } + bool is_osr_entry() const { return block()->is_osr_entry(); } + Label* label() { return &label_; } + LLabel* replacement() const { return replacement_; } + void set_replacement(LLabel* label) { replacement_ = label; } + bool HasReplacement() const { return replacement_ != NULL; } + + private: + Label label_; + LLabel* replacement_; +}; + + +class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE { + return false; + } + DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") +}; + + +class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallStub(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub") + DECLARE_HYDROGEN_ACCESSOR(CallStub) +}; + + +class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE { + return false; + } + DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") +}; + + +template<int I, int T> +class LControlInstruction: public LTemplateInstruction<0, I, T> { + public: + LControlInstruction() : false_label_(NULL), true_label_(NULL) { } + + virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; } + + int SuccessorCount() { return hydrogen()->SuccessorCount(); } + HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } + + int TrueDestination(LChunk* chunk) { + return chunk->LookupDestination(true_block_id()); + } + int FalseDestination(LChunk* chunk) { + return chunk->LookupDestination(false_block_id()); + } + + Label* TrueLabel(LChunk* chunk) { + if (true_label_ == NULL) { + true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk)); + } + return true_label_; + } + Label* FalseLabel(LChunk* chunk) { + if (false_label_ == NULL) { + false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk)); + } + return false_label_; + } + + protected: + int true_block_id() { return SuccessorAt(0)->block_id(); } + int false_block_id() { return SuccessorAt(1)->block_id(); } + + private: + HControlInstruction* hydrogen() { + return HControlInstruction::cast(this->hydrogen_value()); + } + + Label* false_label_; + Label* true_label_; +}; + + +class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LWrapReceiver(LOperand* receiver, + LOperand* function, + LOperand* temp) { + inputs_[0] = receiver; + inputs_[1] = function; + temps_[0] = temp; + } + + LOperand* receiver() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver") + DECLARE_HYDROGEN_ACCESSOR(WrapReceiver) +}; + + +class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> { + public: + LApplyArguments(LOperand* function, + LOperand* receiver, + LOperand* length, + LOperand* elements) { + inputs_[0] = function; + inputs_[1] = receiver; + inputs_[2] = length; + inputs_[3] = elements; + } + + LOperand* function() { return inputs_[0]; } + LOperand* receiver() { return inputs_[1]; } + LOperand* length() { return inputs_[2]; } + LOperand* elements() { return inputs_[3]; } + + DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments") +}; + + +class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) { + inputs_[0] = arguments; + inputs_[1] = length; + inputs_[2] = index; + } + + LOperand* arguments() { return inputs_[0]; } + LOperand* length() { return inputs_[1]; } + LOperand* index() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at") + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LArgumentsLength(LOperand* elements) { + inputs_[0] = elements; + } + + LOperand* elements() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") +}; + + +class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") + DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) +}; + + +class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break") +}; + + +class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + LModByPowerOf2I(LOperand* dividend, int32_t divisor) { + inputs_[0] = dividend; + divisor_ = divisor; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + + DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i") + DECLARE_HYDROGEN_ACCESSOR(Mod) + + private: + int32_t divisor_; +}; + + +class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> { + public: + LModByConstI(LOperand* dividend, + int32_t divisor, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = dividend; + divisor_ = divisor; + temps_[0] = temp1; + temps_[1] = temp2; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i") + DECLARE_HYDROGEN_ACCESSOR(Mod) + + private: + int32_t divisor_; +}; + + +class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LModI(LOperand* left, LOperand* right, LOperand* temp) { + inputs_[0] = left; + inputs_[1] = right; + temps_[0] = temp; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") + DECLARE_HYDROGEN_ACCESSOR(Mod) +}; + + +class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + LDivByPowerOf2I(LOperand* dividend, int32_t divisor) { + inputs_[0] = dividend; + divisor_ = divisor; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + + DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i") + DECLARE_HYDROGEN_ACCESSOR(Div) + + private: + int32_t divisor_; +}; + + +class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> { + public: + LDivByConstI(LOperand* dividend, + int32_t divisor, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = dividend; + divisor_ = divisor; + temps_[0] = temp1; + temps_[1] = temp2; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i") + DECLARE_HYDROGEN_ACCESSOR(Div) + + private: + int32_t divisor_; +}; + + +class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; + temps_[0] = temp; + } + + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") + DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) +}; + + +class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) { + inputs_[0] = dividend; + divisor_ = divisor; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I, + "flooring-div-by-power-of-2-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) + + private: + int32_t divisor_; +}; + + +class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> { + public: + LFlooringDivByConstI(LOperand* dividend, + int32_t divisor, + LOperand* temp1, + LOperand* temp2, + LOperand* temp3) { + inputs_[0] = dividend; + divisor_ = divisor; + temps_[0] = temp1; + temps_[1] = temp2; + temps_[2] = temp3; + } + + LOperand* dividend() { return inputs_[0]; } + int32_t divisor() const { return divisor_; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + LOperand* temp3() { return temps_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) + + private: + int32_t divisor_; +}; + + +class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; + temps_[0] = temp; + } + + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) +}; + + +class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LMulI(LOperand* left, LOperand* right, LOperand* temp) { + inputs_[0] = left; + inputs_[1] = right; + temps_[0] = temp; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") + DECLARE_HYDROGEN_ACCESSOR(Mul) +}; + + +class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> { + public: + LCompareNumericAndBranch(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, + "compare-numeric-and-branch") + DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) + + Token::Value op() const { return hydrogen()->token(); } + bool is_double() const { + return hydrogen()->representation().IsDouble(); + } + + virtual void PrintDataTo(StringStream* stream); +}; + + +class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathFloor(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor") + DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) +}; + + +class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathRound(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") + DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) +}; + + +class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathFround(LOperand* value) { inputs_[0] = value; } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround") +}; + + +class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LMathAbs(LOperand* context, LOperand* value) { + inputs_[1] = context; + inputs_[0] = value; + } + + LOperand* context() { return inputs_[1]; } + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") + DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) +}; + + +class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathLog(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") +}; + + +class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathClz32(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32") +}; + + +class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> { + public: + LMathExp(LOperand* value, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; + ExternalReference::InitializeMathExpData(); + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") +}; + + +class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathSqrt(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") +}; + + +class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LMathPowHalf(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") +}; + + +class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> { + public: + LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") +}; + + +class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> { + public: + explicit LCmpHoleAndBranch(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch") + DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch) +}; + + +class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch, + "cmp-minus-zero-and-branch") + DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch) +}; + + +class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LIsObjectAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch") + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LIsStringAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> { + public: + explicit LIsSmiAndBranch(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, + "is-undetectable-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> { + public: + LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; + } + + LOperand* context() { return inputs_[1]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, + "string-compare-and-branch") + DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + Token::Value op() const { return hydrogen()->token(); } +}; + + +class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, + "has-instance-type-and-branch") + DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LGetCachedArrayIndex(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index") + DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex) +}; + + +class LHasCachedArrayIndexAndBranch V8_FINAL + : public LControlInstruction<1, 0> { + public: + explicit LHasCachedArrayIndexAndBranch(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch, + "has-cached-array-index-and-branch") + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> { + public: + explicit LIsConstructCallAndBranch(LOperand* temp) { + temps_[0] = temp; + } + + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch, + "is-construct-call-and-branch") +}; + + +class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> { + public: + LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp; + temps_[1] = temp2; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, + "class-of-test-and-branch") + DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LCmpT(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; + } + + DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") + DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) + + LOperand* context() { return inputs_[0]; } + Token::Value op() const { return hydrogen()->token(); } +}; + + +class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LInstanceOf(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of") +}; + + +class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) { + inputs_[0] = context; + inputs_[1] = value; + temps_[0] = temp; + } + + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal, + "instance-of-known-global") + DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal) + + Handle<JSFunction> function() const { return hydrogen()->function(); } + LEnvironment* GetDeferredLazyDeoptimizationEnvironment() { + return lazy_deopt_env_; + } + virtual void SetDeferredLazyDeoptimizationEnvironment( + LEnvironment* env) V8_OVERRIDE { + lazy_deopt_env_ = env; + } + + private: + LEnvironment* lazy_deopt_env_; +}; + + +class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> { + public: + LBoundsCheck(LOperand* index, LOperand* length) { + inputs_[0] = index; + inputs_[1] = length; + } + + LOperand* index() { return inputs_[0]; } + LOperand* length() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check") + DECLARE_HYDROGEN_ACCESSOR(BoundsCheck) +}; + + +class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LBitI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") + DECLARE_HYDROGEN_ACCESSOR(Bitwise) + + Token::Value op() const { return hydrogen()->op(); } +}; + + +class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt) + : op_(op), can_deopt_(can_deopt) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") + + Token::Value op() const { return op_; } + bool can_deopt() const { return can_deopt_; } + + private: + Token::Value op_; + bool can_deopt_; +}; + + +class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LSubI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") + DECLARE_HYDROGEN_ACCESSOR(Sub) +}; + + +class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + int32_t value() const { return hydrogen()->Integer32Value(); } +}; + + +class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } +}; + + +class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> { + public: + explicit LConstantD(LOperand* temp) { + temps_[0] = temp; + } + + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + double value() const { return hydrogen()->DoubleValue(); } +}; + + +class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + ExternalReference value() const { + return hydrogen()->ExternalReferenceValue(); + } +}; + + +class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + Handle<Object> value(Isolate* isolate) const { + return hydrogen()->handle(isolate); + } +}; + + +class LBranch V8_FINAL : public LControlInstruction<1, 1> { + public: + LBranch(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") + DECLARE_HYDROGEN_ACCESSOR(Branch) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> { + public: + explicit LCmpMapAndBranch(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") + DECLARE_HYDROGEN_ACCESSOR(CompareMap) + + Handle<Map> map() const { return hydrogen()->map().handle(); } +}; + + +class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LMapEnumLength(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length") +}; + + +class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LDateField(LOperand* date, LOperand* temp, Smi* index) + : index_(index) { + inputs_[0] = date; + temps_[0] = temp; + } + + LOperand* date() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field") + DECLARE_HYDROGEN_ACCESSOR(DateField) + + Smi* index() const { return index_; } + + private: + Smi* index_; +}; + + +class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LSeqStringGetChar(LOperand* string, LOperand* index) { + inputs_[0] = string; + inputs_[1] = index; + } + + LOperand* string() const { return inputs_[0]; } + LOperand* index() const { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char") + DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar) +}; + + +class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> { + public: + LSeqStringSetChar(LOperand* context, + LOperand* string, + LOperand* index, + LOperand* value) { + inputs_[0] = context; + inputs_[1] = string; + inputs_[2] = index; + inputs_[3] = value; + } + + LOperand* string() { return inputs_[1]; } + LOperand* index() { return inputs_[2]; } + LOperand* value() { return inputs_[3]; } + + DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") + DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) +}; + + +class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LAddI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + static bool UseLea(HAdd* add) { + return !add->CheckFlag(HValue::kCanOverflow) && + add->BetterLeftOperand()->UseCount() > 1; + } + + DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") + DECLARE_HYDROGEN_ACCESSOR(Add) +}; + + +class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LMathMinMax(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") + DECLARE_HYDROGEN_ACCESSOR(MathMinMax) +}; + + +class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LPower(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(Power, "power") + DECLARE_HYDROGEN_ACCESSOR(Power) +}; + + +class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LArithmeticD(Token::Value op, LOperand* left, LOperand* right) + : op_(op) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + Token::Value op() const { return op_; } + + virtual Opcode opcode() const V8_OVERRIDE { + return LInstruction::kArithmeticD; + } + virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE; + virtual const char* Mnemonic() const V8_OVERRIDE; + + private: + Token::Value op_; +}; + + +class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LArithmeticT(Token::Value op, + LOperand* context, + LOperand* left, + LOperand* right) + : op_(op) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; + } + + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } + + virtual Opcode opcode() const V8_OVERRIDE { + return LInstruction::kArithmeticT; + } + virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE; + virtual const char* Mnemonic() const V8_OVERRIDE; + + Token::Value op() const { return op_; } + + private: + Token::Value op_; +}; + + +class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> { + public: + explicit LReturn(LOperand* value, + LOperand* context, + LOperand* parameter_count) { + inputs_[0] = value; + inputs_[1] = context; + inputs_[2] = parameter_count; + } + + bool has_constant_parameter_count() { + return parameter_count()->IsConstantOperand(); + } + LConstantOperand* constant_parameter_count() { + DCHECK(has_constant_parameter_count()); + return LConstantOperand::cast(parameter_count()); + } + LOperand* parameter_count() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(Return, "return") + DECLARE_HYDROGEN_ACCESSOR(Return) +}; + + +class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LLoadNamedField(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") + DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) +}; + + +class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) { + inputs_[0] = context; + inputs_[1] = object; + temps_[0] = vector; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) + + Handle<Object> name() const { return hydrogen()->name(); } +}; + + +class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LLoadFunctionPrototype(LOperand* function, LOperand* temp) { + inputs_[0] = function; + temps_[0] = temp; + } + + LOperand* function() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") + DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) +}; + + +class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") + DECLARE_HYDROGEN_ACCESSOR(LoadRoot) + + Heap::RootListIndex index() const { return hydrogen()->index(); } +}; + + +class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LLoadKeyed(LOperand* elements, LOperand* key) { + inputs_[0] = elements; + inputs_[1] = key; + } + LOperand* elements() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); + } + bool is_external() const { + return hydrogen()->is_external(); + } + bool is_fixed_typed_array() const { + return hydrogen()->is_fixed_typed_array(); + } + bool is_typed_elements() const { + return is_external() || is_fixed_typed_array(); + } + + DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + uint32_t base_offset() const { return hydrogen()->base_offset(); } + bool key_is_smi() { + return hydrogen()->key()->representation().IsTagged(); + } +}; + + +inline static bool ExternalArrayOpRequiresTemp( + Representation key_representation, + ElementsKind elements_kind) { + // Operations that require the key to be divided by two to be converted into + // an index cannot fold the scale operation into a load and need an extra + // temp register to do the work. + return key_representation.IsSmi() && + (elements_kind == EXTERNAL_INT8_ELEMENTS || + elements_kind == EXTERNAL_UINT8_ELEMENTS || + elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS || + elements_kind == UINT8_ELEMENTS || + elements_kind == INT8_ELEMENTS || + elements_kind == UINT8_CLAMPED_ELEMENTS); +} + + +class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> { + public: + LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key, + LOperand* vector) { + inputs_[0] = context; + inputs_[1] = obj; + inputs_[2] = key; + temps_[0] = vector; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* key() { return inputs_[2]; } + LOperand* temp_vector() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric) +}; + + +class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell") + DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell) +}; + + +class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LLoadGlobalGeneric(LOperand* context, LOperand* global_object, + LOperand* vector) { + inputs_[0] = context; + inputs_[1] = global_object; + temps_[0] = vector; + } + + LOperand* context() { return inputs_[0]; } + LOperand* global_object() { return inputs_[1]; } + LOperand* temp_vector() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") + DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric) + + Handle<Object> name() const { return hydrogen()->name(); } + bool for_typeof() const { return hydrogen()->for_typeof(); } +}; + + +class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LStoreGlobalCell(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell") + DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell) +}; + + +class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LLoadContextSlot(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") + DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) + + int slot_index() { return hydrogen()->slot_index(); } + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> { + public: + LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) { + inputs_[0] = context; + inputs_[1] = value; + temps_[0] = temp; + } + + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") + DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) + + int slot_index() { return hydrogen()->slot_index(); } + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LPushArgument(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") +}; + + +class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + explicit LDrop(int count) : count_(count) { } + + int count() const { return count_; } + + DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") + + private: + int count_; +}; + + +class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> { + public: + LStoreCodeEntry(LOperand* function, LOperand* code_object) { + inputs_[0] = function; + inputs_[1] = code_object; + } + + LOperand* function() { return inputs_[0]; } + LOperand* code_object() { return inputs_[1]; } + + virtual void PrintDataTo(StringStream* stream); + + DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry") + DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry) +}; + + +class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> { + public: + LInnerAllocatedObject(LOperand* base_object, LOperand* offset) { + inputs_[0] = base_object; + inputs_[1] = offset; + } + + LOperand* base_object() const { return inputs_[0]; } + LOperand* offset() const { return inputs_[1]; } + + virtual void PrintDataTo(StringStream* stream); + + DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object") +}; + + +class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") + DECLARE_HYDROGEN_ACCESSOR(ThisFunction) +}; + + +class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(Context, "context") + DECLARE_HYDROGEN_ACCESSOR(Context) +}; + + +class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LDeclareGlobals(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") + DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) +}; + + +class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallJSFunction(LOperand* function) { + inputs_[0] = function; + } + + LOperand* function() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function") + DECLARE_HYDROGEN_ACCESSOR(CallJSFunction) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + +class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> { + public: + LCallWithDescriptor(const InterfaceDescriptor* descriptor, + const ZoneList<LOperand*>& operands, + Zone* zone) + : inputs_(descriptor->GetRegisterParameterCount() + 1, zone) { + DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length()); + inputs_.AddAll(operands, zone); + } + + LOperand* target() const { return inputs_[0]; } + + private: + DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") + DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int arity() const { return hydrogen()->argument_count() - 1; } + + ZoneList<LOperand*> inputs_; + + // Iterator support. + virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); } + virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; } + + virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; } + virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; } +}; + + +class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LInvokeFunction(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") + DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + +class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + explicit LCallFunction(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function") + DECLARE_HYDROGEN_ACCESSOR(CallFunction) + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + +class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LCallNew(LOperand* context, LOperand* constructor) { + inputs_[0] = context; + inputs_[1] = constructor; + } + + LOperand* context() { return inputs_[0]; } + LOperand* constructor() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new") + DECLARE_HYDROGEN_ACCESSOR(CallNew) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + +class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LCallNewArray(LOperand* context, LOperand* constructor) { + inputs_[0] = context; + inputs_[1] = constructor; + } + + LOperand* context() { return inputs_[0]; } + LOperand* constructor() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") + DECLARE_HYDROGEN_ACCESSOR(CallNewArray) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + +class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallRuntime(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") + DECLARE_HYDROGEN_ACCESSOR(CallRuntime) + + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { + return true; + } + + const Runtime::Function* function() const { return hydrogen()->function(); } + int arity() const { return hydrogen()->argument_count(); } +}; + + +class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LInteger32ToDouble(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") +}; + + +class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + explicit LUint32ToDouble(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") +}; + + +class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LNumberTagI(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") +}; + + +class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LNumberTagU(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") +}; + + +class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + LNumberTagD(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") + DECLARE_HYDROGEN_ACCESSOR(Change) +}; + + +// Sometimes truncating conversion from a tagged value to an int32. +class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LDoubleToI(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") + DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) + + bool truncating() { return hydrogen()->CanTruncateToInt32(); } +}; + + +class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LDoubleToSmi(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") + DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) +}; + + +// Truncating conversion from a tagged value to an int32. +class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LTaggedToI(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") + DECLARE_HYDROGEN_ACCESSOR(Change) + + bool truncating() { return hydrogen()->CanTruncateToInt32(); } +}; + + +class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LSmiTag(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") + DECLARE_HYDROGEN_ACCESSOR(Change) +}; + + +class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> { + public: + explicit LNumberUntagD(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") + DECLARE_HYDROGEN_ACCESSOR(Change); +}; + + +class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + LSmiUntag(LOperand* value, bool needs_check) + : needs_check_(needs_check) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") + + bool needs_check() const { return needs_check_; } + + private: + bool needs_check_; +}; + + +class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> { + public: + LStoreNamedField(LOperand* obj, + LOperand* val, + LOperand* temp, + LOperand* temp_map) { + inputs_[0] = obj; + inputs_[1] = val; + temps_[0] = temp; + temps_[1] = temp_map; + } + + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + LOperand* temp_map() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> { + public: + LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) { + inputs_[0] = context; + inputs_[1] = object; + inputs_[2] = value; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + Handle<Object> name() const { return hydrogen()->name(); } + StrictMode strict_mode() { return hydrogen()->strict_mode(); } +}; + + +class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> { + public: + LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = key; + inputs_[2] = val; + } + + bool is_external() const { return hydrogen()->is_external(); } + bool is_fixed_typed_array() const { + return hydrogen()->is_fixed_typed_array(); + } + bool is_typed_elements() const { + return is_external() || is_fixed_typed_array(); + } + LOperand* elements() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); + } + + DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + uint32_t base_offset() const { return hydrogen()->base_offset(); } + bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } +}; + + +class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> { + public: + LStoreKeyedGeneric(LOperand* context, + LOperand* object, + LOperand* key, + LOperand* value) { + inputs_[0] = context; + inputs_[1] = object; + inputs_[2] = key; + inputs_[3] = value; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* key() { return inputs_[2]; } + LOperand* value() { return inputs_[3]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + StrictMode strict_mode() { return hydrogen()->strict_mode(); } +}; + + +class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> { + public: + LTransitionElementsKind(LOperand* object, + LOperand* context, + LOperand* new_map_temp, + LOperand* temp) { + inputs_[0] = object; + inputs_[1] = context; + temps_[0] = new_map_temp; + temps_[1] = temp; + } + + LOperand* context() { return inputs_[1]; } + LOperand* object() { return inputs_[0]; } + LOperand* new_map_temp() { return temps_[0]; } + LOperand* temp() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, + "transition-elements-kind") + DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + Handle<Map> original_map() { return hydrogen()->original_map().handle(); } + Handle<Map> transitioned_map() { + return hydrogen()->transitioned_map().handle(); + } + ElementsKind from_kind() { return hydrogen()->from_kind(); } + ElementsKind to_kind() { return hydrogen()->to_kind(); } +}; + + +class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> { + public: + LTrapAllocationMemento(LOperand* object, + LOperand* temp) { + inputs_[0] = object; + temps_[0] = temp; + } + + LOperand* object() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, + "trap-allocation-memento") +}; + + +class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LStringAdd(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; + } + + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") + DECLARE_HYDROGEN_ACCESSOR(StringAdd) +}; + + +class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> { + public: + LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { + inputs_[0] = context; + inputs_[1] = string; + inputs_[2] = index; + } + + LOperand* context() { return inputs_[0]; } + LOperand* string() { return inputs_[1]; } + LOperand* index() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") + DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) +}; + + +class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LStringCharFromCode(LOperand* context, LOperand* char_code) { + inputs_[0] = context; + inputs_[1] = char_code; + } + + LOperand* context() { return inputs_[0]; } + LOperand* char_code() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") + DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) +}; + + +class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LCheckValue(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value") + DECLARE_HYDROGEN_ACCESSOR(CheckValue) +}; + + +class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> { + public: + LCheckInstanceType(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") + DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) +}; + + +class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LCheckMaps(LOperand* value = NULL) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") + DECLARE_HYDROGEN_ACCESSOR(CheckMaps) +}; + + +class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LCheckSmi(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") +}; + + +class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LClampDToUint8(LOperand* value) { + inputs_[0] = value; + } + + LOperand* unclamped() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") +}; + + +class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LClampIToUint8(LOperand* value) { + inputs_[0] = value; + } + + LOperand* unclamped() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") +}; + + +// Truncating conversion from a tagged value to an int32. +class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> { + public: + LClampTToUint8NoSSE2(LOperand* unclamped, + LOperand* temp1, + LOperand* temp2, + LOperand* temp3) { + inputs_[0] = unclamped; + temps_[0] = temp1; + temps_[1] = temp2; + temps_[2] = temp3; + } + + LOperand* unclamped() { return inputs_[0]; } + LOperand* scratch() { return temps_[0]; } + LOperand* scratch2() { return temps_[1]; } + LOperand* scratch3() { return temps_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8NoSSE2, + "clamp-t-to-uint8-nosse2") + DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) +}; + + +class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LCheckNonSmi(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") + DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject) +}; + + +class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LDoubleBits(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits") + DECLARE_HYDROGEN_ACCESSOR(DoubleBits) +}; + + +class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LConstructDouble(LOperand* hi, LOperand* lo) { + inputs_[0] = hi; + inputs_[1] = lo; + } + + LOperand* hi() { return inputs_[0]; } + LOperand* lo() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double") +}; + + +class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LAllocate(LOperand* context, LOperand* size, LOperand* temp) { + inputs_[0] = context; + inputs_[1] = size; + temps_[0] = temp; + } + + LOperand* context() { return inputs_[0]; } + LOperand* size() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") + DECLARE_HYDROGEN_ACCESSOR(Allocate) +}; + + +class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LRegExpLiteral(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal") + DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral) +}; + + +class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LFunctionLiteral(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") + DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) +}; + + +class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LToFastProperties(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties") + DECLARE_HYDROGEN_ACCESSOR(ToFastProperties) +}; + + +class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LTypeof(LOperand* context, LOperand* value) { + inputs_[0] = context; + inputs_[1] = value; + } + + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") +}; + + +class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> { + public: + explicit LTypeofIsAndBranch(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") + DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) + + Handle<String> type_literal() { return hydrogen()->type_literal(); } + + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; +}; + + +class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> { + public: + virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE { + return false; + } + DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") +}; + + +class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> { + public: + explicit LStackCheck(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") + DECLARE_HYDROGEN_ACCESSOR(StackCheck) + + Label* done_label() { return &done_label_; } + + private: + Label done_label_; +}; + + +class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LForInPrepareMap(LOperand* context, LOperand* object) { + inputs_[0] = context; + inputs_[1] = object; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") +}; + + +class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LForInCacheArray(LOperand* map) { + inputs_[0] = map; + } + + LOperand* map() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") + + int idx() { + return HForInCacheArray::cast(this->hydrogen_value())->idx(); + } +}; + + +class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> { + public: + LCheckMapValue(LOperand* value, LOperand* map) { + inputs_[0] = value; + inputs_[1] = map; + } + + LOperand* value() { return inputs_[0]; } + LOperand* map() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") +}; + + +class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> { + public: + LLoadFieldByIndex(LOperand* object, LOperand* index) { + inputs_[0] = object; + inputs_[1] = index; + } + + LOperand* object() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") +}; + + +class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> { + public: + explicit LStoreFrameContext(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context") +}; + + +class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> { + public: + LAllocateBlockContext(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; + } + + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } + + Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); } + + DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context") + DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext) +}; + + +class LChunkBuilder; +class LPlatformChunk V8_FINAL : public LChunk { + public: + LPlatformChunk(CompilationInfo* info, HGraph* graph) + : LChunk(info, graph), + num_double_slots_(0) { } + + int GetNextSpillIndex(RegisterKind kind); + LOperand* GetNextSpillSlot(RegisterKind kind); + + int num_double_slots() const { return num_double_slots_; } + + private: + int num_double_slots_; +}; + + +class LChunkBuilder V8_FINAL : public LChunkBuilderBase { + public: + LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) + : LChunkBuilderBase(graph->zone()), + chunk_(NULL), + info_(info), + graph_(graph), + status_(UNUSED), + current_instruction_(NULL), + current_block_(NULL), + next_block_(NULL), + allocator_(allocator) { } + + Isolate* isolate() const { return graph_->isolate(); } + + // Build the sequence for the graph. + LPlatformChunk* Build(); + + // Declare methods that deal with the individual node types. +#define DECLARE_DO(type) LInstruction* Do##type(H##type* node); + HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) +#undef DECLARE_DO + + LInstruction* DoMathFloor(HUnaryMathOperation* instr); + LInstruction* DoMathRound(HUnaryMathOperation* instr); + LInstruction* DoMathFround(HUnaryMathOperation* instr); + LInstruction* DoMathAbs(HUnaryMathOperation* instr); + LInstruction* DoMathLog(HUnaryMathOperation* instr); + LInstruction* DoMathExp(HUnaryMathOperation* instr); + LInstruction* DoMathSqrt(HUnaryMathOperation* instr); + LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); + LInstruction* DoMathClz32(HUnaryMathOperation* instr); + LInstruction* DoDivByPowerOf2I(HDiv* instr); + LInstruction* DoDivByConstI(HDiv* instr); + LInstruction* DoDivI(HDiv* instr); + LInstruction* DoModByPowerOf2I(HMod* instr); + LInstruction* DoModByConstI(HMod* instr); + LInstruction* DoModI(HMod* instr); + LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); + LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); + LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); + + private: + enum Status { + UNUSED, + BUILDING, + DONE, + ABORTED + }; + + LPlatformChunk* chunk() const { return chunk_; } + CompilationInfo* info() const { return info_; } + HGraph* graph() const { return graph_; } + + bool is_unused() const { return status_ == UNUSED; } + bool is_building() const { return status_ == BUILDING; } + bool is_done() const { return status_ == DONE; } + bool is_aborted() const { return status_ == ABORTED; } + + void Abort(BailoutReason reason); + + // Methods for getting operands for Use / Define / Temp. + LUnallocated* ToUnallocated(Register reg); + LUnallocated* ToUnallocated(X87Register reg); + + // Methods for setting up define-use relationships. + MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); + MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register); + + // A value that is guaranteed to be allocated to a register. + // Operand created by UseRegister is guaranteed to be live until the end of + // instruction. This means that register allocator will not reuse it's + // register for any other operand inside instruction. + // Operand created by UseRegisterAtStart is guaranteed to be live only at + // instruction start. Register allocator is free to assign the same register + // to some other operand used inside instruction (i.e. temporary or + // output). + MUST_USE_RESULT LOperand* UseRegister(HValue* value); + MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value); + + // An input operand in a register that may be trashed. + MUST_USE_RESULT LOperand* UseTempRegister(HValue* value); + + // An input operand in a register or stack slot. + MUST_USE_RESULT LOperand* Use(HValue* value); + MUST_USE_RESULT LOperand* UseAtStart(HValue* value); + + // An input operand in a register, stack slot or a constant operand. + MUST_USE_RESULT LOperand* UseOrConstant(HValue* value); + MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value); + + // An input operand in a fixed register or a constant operand. + MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value, + Register fixed_register); + + // An input operand in a register or a constant operand. + MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value); + MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value); + + // An input operand in a constant operand. + MUST_USE_RESULT LOperand* UseConstant(HValue* value); + + // An input operand in register, stack slot or a constant operand. + // Will not be moved to a register even if one is freely available. + virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE; + + // Temporary operand that must be in a register. + MUST_USE_RESULT LUnallocated* TempRegister(); + MUST_USE_RESULT LOperand* FixedTemp(Register reg); + + // Methods for setting up define-use relationships. + // Return the same instruction that they are passed. + LInstruction* Define(LTemplateResultInstruction<1>* instr, + LUnallocated* result); + LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr); + LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr, + int index); + LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr); + LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, + Register reg); + LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr); + // Assigns an environment to an instruction. An instruction which can + // deoptimize must have an environment. + LInstruction* AssignEnvironment(LInstruction* instr); + // Assigns a pointer map to an instruction. An instruction which can + // trigger a GC or a lazy deoptimization must have a pointer map. + LInstruction* AssignPointerMap(LInstruction* instr); + + enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY }; + + LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr); + + // Marks a call for the register allocator. Assigns a pointer map to + // support GC and lazy deoptimization. Assigns an environment to support + // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY. + LInstruction* MarkAsCall( + LInstruction* instr, + HInstruction* hinstr, + CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); + + void VisitInstruction(HInstruction* current); + void AddInstruction(LInstruction* instr, HInstruction* current); + + void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); + LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); + LInstruction* DoArithmeticD(Token::Value op, + HArithmeticBinaryOperation* instr); + LInstruction* DoArithmeticT(Token::Value op, + HBinaryOperation* instr); + + LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr); + + LPlatformChunk* chunk_; + CompilationInfo* info_; + HGraph* const graph_; + Status status_; + HInstruction* current_instruction_; + HBasicBlock* current_block_; + HBasicBlock* next_block_; + LAllocator* allocator_; + + DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); +}; + +#undef DECLARE_HYDROGEN_ACCESSOR +#undef DECLARE_CONCRETE_INSTRUCTION + +} } // namespace v8::internal + +#endif // V8_X87_LITHIUM_X87_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/macro-assembler-x87.cc nodejs-0.11.15/deps/v8/src/x87/macro-assembler-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/macro-assembler-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/macro-assembler-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3327 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/isolate-inl.h" +#include "src/runtime.h" +#include "src/serialize.h" + +namespace v8 { +namespace internal { + +// ------------------------------------------------------------------------- +// MacroAssembler implementation. + +MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) + : Assembler(arg_isolate, buffer, size), + generating_stub_(false), + has_frame_(false) { + if (isolate() != NULL) { + // TODO(titzer): should we just use a null handle here instead? + code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), + isolate()); + } +} + + +void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { + DCHECK(!r.IsDouble()); + if (r.IsInteger8()) { + movsx_b(dst, src); + } else if (r.IsUInteger8()) { + movzx_b(dst, src); + } else if (r.IsInteger16()) { + movsx_w(dst, src); + } else if (r.IsUInteger16()) { + movzx_w(dst, src); + } else { + mov(dst, src); + } +} + + +void MacroAssembler::Store(Register src, const Operand& dst, Representation r) { + DCHECK(!r.IsDouble()); + if (r.IsInteger8() || r.IsUInteger8()) { + mov_b(dst, src); + } else if (r.IsInteger16() || r.IsUInteger16()) { + mov_w(dst, src); + } else { + if (r.IsHeapObject()) { + AssertNotSmi(src); + } else if (r.IsSmi()) { + AssertSmi(src); + } + mov(dst, src); + } +} + + +void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { + if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) { + Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); + mov(destination, value); + return; + } + ExternalReference roots_array_start = + ExternalReference::roots_array_start(isolate()); + mov(destination, Immediate(index)); + mov(destination, Operand::StaticArray(destination, + times_pointer_size, + roots_array_start)); +} + + +void MacroAssembler::StoreRoot(Register source, + Register scratch, + Heap::RootListIndex index) { + DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); + ExternalReference roots_array_start = + ExternalReference::roots_array_start(isolate()); + mov(scratch, Immediate(index)); + mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start), + source); +} + + +void MacroAssembler::CompareRoot(Register with, + Register scratch, + Heap::RootListIndex index) { + ExternalReference roots_array_start = + ExternalReference::roots_array_start(isolate()); + mov(scratch, Immediate(index)); + cmp(with, Operand::StaticArray(scratch, + times_pointer_size, + roots_array_start)); +} + + +void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) { + DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index)); + Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); + cmp(with, value); +} + + +void MacroAssembler::CompareRoot(const Operand& with, + Heap::RootListIndex index) { + DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index)); + Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); + cmp(with, value); +} + + +void MacroAssembler::InNewSpace( + Register object, + Register scratch, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance) { + DCHECK(cc == equal || cc == not_equal); + if (scratch.is(object)) { + and_(scratch, Immediate(~Page::kPageAlignmentMask)); + } else { + mov(scratch, Immediate(~Page::kPageAlignmentMask)); + and_(scratch, object); + } + // Check that we can use a test_b. + DCHECK(MemoryChunk::IN_FROM_SPACE < 8); + DCHECK(MemoryChunk::IN_TO_SPACE < 8); + int mask = (1 << MemoryChunk::IN_FROM_SPACE) + | (1 << MemoryChunk::IN_TO_SPACE); + // If non-zero, the page belongs to new-space. + test_b(Operand(scratch, MemoryChunk::kFlagsOffset), + static_cast<uint8_t>(mask)); + j(cc, condition_met, condition_met_distance); +} + + +void MacroAssembler::RememberedSetHelper( + Register object, // Only used for debug checks. + Register addr, + Register scratch, + MacroAssembler::RememberedSetFinalAction and_then) { + Label done; + if (emit_debug_code()) { + Label ok; + JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear); + int3(); + bind(&ok); + } + // Load store buffer top. + ExternalReference store_buffer = + ExternalReference::store_buffer_top(isolate()); + mov(scratch, Operand::StaticVariable(store_buffer)); + // Store pointer to buffer. + mov(Operand(scratch, 0), addr); + // Increment buffer top. + add(scratch, Immediate(kPointerSize)); + // Write back new top of buffer. + mov(Operand::StaticVariable(store_buffer), scratch); + // Call stub on end of buffer. + // Check for end of buffer. + test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); + if (and_then == kReturnAtEnd) { + Label buffer_overflowed; + j(not_equal, &buffer_overflowed, Label::kNear); + ret(0); + bind(&buffer_overflowed); + } else { + DCHECK(and_then == kFallThroughAtEnd); + j(equal, &done, Label::kNear); + } + StoreBufferOverflowStub store_buffer_overflow = + StoreBufferOverflowStub(isolate()); + CallStub(&store_buffer_overflow); + if (and_then == kReturnAtEnd) { + ret(0); + } else { + DCHECK(and_then == kFallThroughAtEnd); + bind(&done); + } +} + + +void MacroAssembler::ClampUint8(Register reg) { + Label done; + test(reg, Immediate(0xFFFFFF00)); + j(zero, &done, Label::kNear); + setcc(negative, reg); // 1 if negative, 0 if positive. + dec_b(reg); // 0 if negative, 255 if positive. + bind(&done); +} + + +void MacroAssembler::SlowTruncateToI(Register result_reg, + Register input_reg, + int offset) { + DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true); + call(stub.GetCode(), RelocInfo::CODE_TARGET); +} + + +void MacroAssembler::TruncateX87TOSToI(Register result_reg) { + sub(esp, Immediate(kDoubleSize)); + fst_d(MemOperand(esp, 0)); + SlowTruncateToI(result_reg, esp, 0); + add(esp, Immediate(kDoubleSize)); +} + + +void MacroAssembler::X87TOSToI(Register result_reg, + MinusZeroMode minus_zero_mode, + Label* conversion_failed, + Label::Distance dst) { + Label done; + sub(esp, Immediate(kPointerSize)); + fld(0); + fist_s(MemOperand(esp, 0)); + fild_s(MemOperand(esp, 0)); + pop(result_reg); + FCmp(); + j(not_equal, conversion_failed, dst); + j(parity_even, conversion_failed, dst); + if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { + test(result_reg, Operand(result_reg)); + j(not_zero, &done, Label::kNear); + // To check for minus zero, we load the value again as float, and check + // if that is still 0. + sub(esp, Immediate(kPointerSize)); + fst_s(MemOperand(esp, 0)); + pop(result_reg); + test(result_reg, Operand(result_reg)); + j(not_zero, conversion_failed, dst); + } + bind(&done); +} + + +void MacroAssembler::TruncateHeapNumberToI(Register result_reg, + Register input_reg) { + Label done, slow_case; + + SlowTruncateToI(result_reg, input_reg); + bind(&done); +} + + +void MacroAssembler::TaggedToI(Register result_reg, + Register input_reg, + MinusZeroMode minus_zero_mode, + Label* lost_precision) { + Label done; + + cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + isolate()->factory()->heap_number_map()); + j(not_equal, lost_precision, Label::kNear); + + // TODO(olivf) Converting a number on the fpu is actually quite slow. We + // should first try a fast conversion and then bailout to this slow case. + Label lost_precision_pop, zero_check; + Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO) + ? &lost_precision_pop : lost_precision; + sub(esp, Immediate(kPointerSize)); + fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); + if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0); + fist_s(MemOperand(esp, 0)); + fild_s(MemOperand(esp, 0)); + FCmp(); + pop(result_reg); + j(not_equal, lost_precision_int, Label::kNear); + j(parity_even, lost_precision_int, Label::kNear); // NaN. + if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { + test(result_reg, Operand(result_reg)); + j(zero, &zero_check, Label::kNear); + fstp(0); + jmp(&done, Label::kNear); + bind(&zero_check); + // To check for minus zero, we load the value again as float, and check + // if that is still 0. + sub(esp, Immediate(kPointerSize)); + fstp_s(Operand(esp, 0)); + pop(result_reg); + test(result_reg, Operand(result_reg)); + j(zero, &done, Label::kNear); + jmp(lost_precision, Label::kNear); + + bind(&lost_precision_pop); + fstp(0); + jmp(lost_precision, Label::kNear); + } + bind(&done); +} + + +void MacroAssembler::LoadUint32NoSSE2(Register src) { + Label done; + push(src); + fild_s(Operand(esp, 0)); + cmp(src, Immediate(0)); + j(not_sign, &done, Label::kNear); + ExternalReference uint32_bias = + ExternalReference::address_of_uint32_bias(); + fld_d(Operand::StaticVariable(uint32_bias)); + faddp(1); + bind(&done); + add(esp, Immediate(kPointerSize)); +} + + +void MacroAssembler::RecordWriteArray( + Register object, + Register value, + Register index, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis. + Label done; + + // Skip barrier if writing a smi. + if (smi_check == INLINE_SMI_CHECK) { + DCHECK_EQ(0, kSmiTag); + test(value, Immediate(kSmiTagMask)); + j(zero, &done); + } + + // Array access: calculate the destination address in the same manner as + // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset + // into an array of words. + Register dst = index; + lea(dst, Operand(object, index, times_half_pointer_size, + FixedArray::kHeaderSize - kHeapObjectTag)); + + RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK, + pointers_to_here_check_for_value); + + bind(&done); + + // Clobber clobbered input registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + mov(value, Immediate(BitCast<int32_t>(kZapValue))); + mov(index, Immediate(BitCast<int32_t>(kZapValue))); + } +} + + +void MacroAssembler::RecordWriteField( + Register object, + int offset, + Register value, + Register dst, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis. + Label done; + + // Skip barrier if writing a smi. + if (smi_check == INLINE_SMI_CHECK) { + JumpIfSmi(value, &done, Label::kNear); + } + + // Although the object register is tagged, the offset is relative to the start + // of the object, so so offset must be a multiple of kPointerSize. + DCHECK(IsAligned(offset, kPointerSize)); + + lea(dst, FieldOperand(object, offset)); + if (emit_debug_code()) { + Label ok; + test_b(dst, (1 << kPointerSizeLog2) - 1); + j(zero, &ok, Label::kNear); + int3(); + bind(&ok); + } + + RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK, + pointers_to_here_check_for_value); + + bind(&done); + + // Clobber clobbered input registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + mov(value, Immediate(BitCast<int32_t>(kZapValue))); + mov(dst, Immediate(BitCast<int32_t>(kZapValue))); + } +} + + +void MacroAssembler::RecordWriteForMap( + Register object, + Handle<Map> map, + Register scratch1, + Register scratch2) { + Label done; + + Register address = scratch1; + Register value = scratch2; + if (emit_debug_code()) { + Label ok; + lea(address, FieldOperand(object, HeapObject::kMapOffset)); + test_b(address, (1 << kPointerSizeLog2) - 1); + j(zero, &ok, Label::kNear); + int3(); + bind(&ok); + } + + DCHECK(!object.is(value)); + DCHECK(!object.is(address)); + DCHECK(!value.is(address)); + AssertNotSmi(object); + + if (!FLAG_incremental_marking) { + return; + } + + // Compute the address. + lea(address, FieldOperand(object, HeapObject::kMapOffset)); + + // A single check of the map's pages interesting flag suffices, since it is + // only set during incremental collection, and then it's also guaranteed that + // the from object's page's interesting flag is also set. This optimization + // relies on the fact that maps can never be in new space. + DCHECK(!isolate()->heap()->InNewSpace(*map)); + CheckPageFlagForMap(map, + MemoryChunk::kPointersToHereAreInterestingMask, + zero, + &done, + Label::kNear); + + RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET); + CallStub(&stub); + + bind(&done); + + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); + + // Clobber clobbered input registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + mov(value, Immediate(BitCast<int32_t>(kZapValue))); + mov(scratch1, Immediate(BitCast<int32_t>(kZapValue))); + mov(scratch2, Immediate(BitCast<int32_t>(kZapValue))); + } +} + + +void MacroAssembler::RecordWrite( + Register object, + Register address, + Register value, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + DCHECK(!object.is(value)); + DCHECK(!object.is(address)); + DCHECK(!value.is(address)); + AssertNotSmi(object); + + if (remembered_set_action == OMIT_REMEMBERED_SET && + !FLAG_incremental_marking) { + return; + } + + if (emit_debug_code()) { + Label ok; + cmp(value, Operand(address, 0)); + j(equal, &ok, Label::kNear); + int3(); + bind(&ok); + } + + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis and stores into young gen. + Label done; + + if (smi_check == INLINE_SMI_CHECK) { + // Skip barrier if writing a smi. + JumpIfSmi(value, &done, Label::kNear); + } + + if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + zero, + &done, + Label::kNear); + } + CheckPageFlag(object, + value, // Used as scratch. + MemoryChunk::kPointersFromHereAreInterestingMask, + zero, + &done, + Label::kNear); + + RecordWriteStub stub(isolate(), object, value, address, + remembered_set_action); + CallStub(&stub); + + bind(&done); + + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); + + // Clobber clobbered registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + mov(address, Immediate(BitCast<int32_t>(kZapValue))); + mov(value, Immediate(BitCast<int32_t>(kZapValue))); + } +} + + +void MacroAssembler::DebugBreak() { + Move(eax, Immediate(0)); + mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate()))); + CEntryStub ces(isolate(), 1); + call(ces.GetCode(), RelocInfo::DEBUG_BREAK); +} + + +bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) { + static const int kMaxImmediateBits = 17; + if (!RelocInfo::IsNone(x.rmode_)) return false; + return !is_intn(x.x_, kMaxImmediateBits); +} + + +void MacroAssembler::SafeMove(Register dst, const Immediate& x) { + if (IsUnsafeImmediate(x) && jit_cookie() != 0) { + Move(dst, Immediate(x.x_ ^ jit_cookie())); + xor_(dst, jit_cookie()); + } else { + Move(dst, x); + } +} + + +void MacroAssembler::SafePush(const Immediate& x) { + if (IsUnsafeImmediate(x) && jit_cookie() != 0) { + push(Immediate(x.x_ ^ jit_cookie())); + xor_(Operand(esp, 0), Immediate(jit_cookie())); + } else { + push(x); + } +} + + +void MacroAssembler::CmpObjectType(Register heap_object, + InstanceType type, + Register map) { + mov(map, FieldOperand(heap_object, HeapObject::kMapOffset)); + CmpInstanceType(map, type); +} + + +void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { + cmpb(FieldOperand(map, Map::kInstanceTypeOffset), + static_cast<int8_t>(type)); +} + + +void MacroAssembler::CheckFastElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Map::kMaximumBitField2FastHoleyElementValue); + j(above, fail, distance); +} + + +void MacroAssembler::CheckFastObjectElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Map::kMaximumBitField2FastHoleySmiElementValue); + j(below_equal, fail, distance); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Map::kMaximumBitField2FastHoleyElementValue); + j(above, fail, distance); +} + + +void MacroAssembler::CheckFastSmiElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Map::kMaximumBitField2FastHoleySmiElementValue); + j(above, fail, distance); +} + + +void MacroAssembler::StoreNumberToDoubleElements( + Register maybe_number, + Register elements, + Register key, + Register scratch, + Label* fail, + int elements_offset) { + Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value; + JumpIfSmi(maybe_number, &smi_value, Label::kNear); + + CheckMap(maybe_number, + isolate()->factory()->heap_number_map(), + fail, + DONT_DO_SMI_CHECK); + + // Double value, canonicalize NaN. + uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); + cmp(FieldOperand(maybe_number, offset), + Immediate(kNaNOrInfinityLowerBoundUpper32)); + j(greater_equal, &maybe_nan, Label::kNear); + + bind(¬_nan); + ExternalReference canonical_nan_reference = + ExternalReference::address_of_canonical_non_hole_nan(); + fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); + bind(&have_double_value); + fstp_d(FieldOperand(elements, key, times_4, + FixedDoubleArray::kHeaderSize - elements_offset)); + jmp(&done); + + bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + j(greater, &is_nan, Label::kNear); + cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); + j(zero, ¬_nan); + bind(&is_nan); + fld_d(Operand::StaticVariable(canonical_nan_reference)); + jmp(&have_double_value, Label::kNear); + + bind(&smi_value); + // Value is a smi. Convert to a double and store. + // Preserve original value. + mov(scratch, maybe_number); + SmiUntag(scratch); + push(scratch); + fild_s(Operand(esp, 0)); + pop(scratch); + fstp_d(FieldOperand(elements, key, times_4, + FixedDoubleArray::kHeaderSize - elements_offset)); + bind(&done); +} + + +void MacroAssembler::CompareMap(Register obj, Handle<Map> map) { + cmp(FieldOperand(obj, HeapObject::kMapOffset), map); +} + + +void MacroAssembler::CheckMap(Register obj, + Handle<Map> map, + Label* fail, + SmiCheckType smi_check_type) { + if (smi_check_type == DO_SMI_CHECK) { + JumpIfSmi(obj, fail); + } + + CompareMap(obj, map); + j(not_equal, fail); +} + + +void MacroAssembler::DispatchMap(Register obj, + Register unused, + Handle<Map> map, + Handle<Code> success, + SmiCheckType smi_check_type) { + Label fail; + if (smi_check_type == DO_SMI_CHECK) { + JumpIfSmi(obj, &fail); + } + cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map)); + j(equal, success); + + bind(&fail); +} + + +Condition MacroAssembler::IsObjectStringType(Register heap_object, + Register map, + Register instance_type) { + mov(map, FieldOperand(heap_object, HeapObject::kMapOffset)); + movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kNotStringTag != 0); + test(instance_type, Immediate(kIsNotStringMask)); + return zero; +} + + +Condition MacroAssembler::IsObjectNameType(Register heap_object, + Register map, + Register instance_type) { + mov(map, FieldOperand(heap_object, HeapObject::kMapOffset)); + movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); + cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE)); + return below_equal; +} + + +void MacroAssembler::IsObjectJSObjectType(Register heap_object, + Register map, + Register scratch, + Label* fail) { + mov(map, FieldOperand(heap_object, HeapObject::kMapOffset)); + IsInstanceJSObjectType(map, scratch, fail); +} + + +void MacroAssembler::IsInstanceJSObjectType(Register map, + Register scratch, + Label* fail) { + movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset)); + sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + cmp(scratch, + LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); + j(above, fail); +} + + +void MacroAssembler::FCmp() { + fucompp(); + push(eax); + fnstsw_ax(); + sahf(); + pop(eax); +} + + +void MacroAssembler::AssertNumber(Register object) { + if (emit_debug_code()) { + Label ok; + JumpIfSmi(object, &ok); + cmp(FieldOperand(object, HeapObject::kMapOffset), + isolate()->factory()->heap_number_map()); + Check(equal, kOperandNotANumber); + bind(&ok); + } +} + + +void MacroAssembler::AssertSmi(Register object) { + if (emit_debug_code()) { + test(object, Immediate(kSmiTagMask)); + Check(equal, kOperandIsNotASmi); + } +} + + +void MacroAssembler::AssertString(Register object) { + if (emit_debug_code()) { + test(object, Immediate(kSmiTagMask)); + Check(not_equal, kOperandIsASmiAndNotAString); + push(object); + mov(object, FieldOperand(object, HeapObject::kMapOffset)); + CmpInstanceType(object, FIRST_NONSTRING_TYPE); + pop(object); + Check(below, kOperandIsNotAString); + } +} + + +void MacroAssembler::AssertName(Register object) { + if (emit_debug_code()) { + test(object, Immediate(kSmiTagMask)); + Check(not_equal, kOperandIsASmiAndNotAName); + push(object); + mov(object, FieldOperand(object, HeapObject::kMapOffset)); + CmpInstanceType(object, LAST_NAME_TYPE); + pop(object); + Check(below_equal, kOperandIsNotAName); + } +} + + +void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { + if (emit_debug_code()) { + Label done_checking; + AssertNotSmi(object); + cmp(object, isolate()->factory()->undefined_value()); + j(equal, &done_checking); + cmp(FieldOperand(object, 0), + Immediate(isolate()->factory()->allocation_site_map())); + Assert(equal, kExpectedUndefinedOrCell); + bind(&done_checking); + } +} + + +void MacroAssembler::AssertNotSmi(Register object) { + if (emit_debug_code()) { + test(object, Immediate(kSmiTagMask)); + Check(not_equal, kOperandIsASmi); + } +} + + +void MacroAssembler::StubPrologue() { + push(ebp); // Caller's frame pointer. + mov(ebp, esp); + push(esi); // Callee's context. + push(Immediate(Smi::FromInt(StackFrame::STUB))); +} + + +void MacroAssembler::Prologue(bool code_pre_aging) { + PredictableCodeSizeScope predictible_code_size_scope(this, + kNoCodeAgeSequenceLength); + if (code_pre_aging) { + // Pre-age the code. + call(isolate()->builtins()->MarkCodeAsExecutedOnce(), + RelocInfo::CODE_AGE_SEQUENCE); + Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength); + } else { + push(ebp); // Caller's frame pointer. + mov(ebp, esp); + push(esi); // Callee's context. + push(edi); // Callee's JS function. + } +} + + +void MacroAssembler::EnterFrame(StackFrame::Type type) { + push(ebp); + mov(ebp, esp); + push(esi); + push(Immediate(Smi::FromInt(type))); + push(Immediate(CodeObject())); + if (emit_debug_code()) { + cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value())); + Check(not_equal, kCodeObjectNotProperlyPatched); + } +} + + +void MacroAssembler::LeaveFrame(StackFrame::Type type) { + if (emit_debug_code()) { + cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset), + Immediate(Smi::FromInt(type))); + Check(equal, kStackFrameTypesMustMatch); + } + leave(); +} + + +void MacroAssembler::EnterExitFramePrologue() { + // Set up the frame structure on the stack. + DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); + DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); + DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); + push(ebp); + mov(ebp, esp); + + // Reserve room for entry stack pointer and push the code object. + DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize); + push(Immediate(0)); // Saved entry sp, patched before call. + push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot. + + // Save the frame pointer and the context in top. + ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate()); + ExternalReference context_address(Isolate::kContextAddress, isolate()); + mov(Operand::StaticVariable(c_entry_fp_address), ebp); + mov(Operand::StaticVariable(context_address), esi); +} + + +void MacroAssembler::EnterExitFrameEpilogue(int argc) { + sub(esp, Immediate(argc * kPointerSize)); + + // Get the required frame alignment for the OS. + const int kFrameAlignment = base::OS::ActivationFrameAlignment(); + if (kFrameAlignment > 0) { + DCHECK(IsPowerOf2(kFrameAlignment)); + and_(esp, -kFrameAlignment); + } + + // Patch the saved entry sp. + mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp); +} + + +void MacroAssembler::EnterExitFrame() { + EnterExitFramePrologue(); + + // Set up argc and argv in callee-saved registers. + int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; + mov(edi, eax); + lea(esi, Operand(ebp, eax, times_4, offset)); + + // Reserve space for argc, argv and isolate. + EnterExitFrameEpilogue(3); +} + + +void MacroAssembler::EnterApiExitFrame(int argc) { + EnterExitFramePrologue(); + EnterExitFrameEpilogue(argc); +} + + +void MacroAssembler::LeaveExitFrame() { + // Get the return address from the stack and restore the frame pointer. + mov(ecx, Operand(ebp, 1 * kPointerSize)); + mov(ebp, Operand(ebp, 0 * kPointerSize)); + + // Pop the arguments and the receiver from the caller stack. + lea(esp, Operand(esi, 1 * kPointerSize)); + + // Push the return address to get ready to return. + push(ecx); + + LeaveExitFrameEpilogue(true); +} + + +void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) { + // Restore current context from top and clear it in debug mode. + ExternalReference context_address(Isolate::kContextAddress, isolate()); + if (restore_context) { + mov(esi, Operand::StaticVariable(context_address)); + } +#ifdef DEBUG + mov(Operand::StaticVariable(context_address), Immediate(0)); +#endif + + // Clear the top frame. + ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, + isolate()); + mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0)); +} + + +void MacroAssembler::LeaveApiExitFrame(bool restore_context) { + mov(esp, ebp); + pop(ebp); + + LeaveExitFrameEpilogue(restore_context); +} + + +void MacroAssembler::PushTryHandler(StackHandler::Kind kind, + int handler_index) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); + + // We will build up the handler from the bottom by pushing on the stack. + // First push the frame pointer and context. + if (kind == StackHandler::JS_ENTRY) { + // The frame pointer does not point to a JS frame so we save NULL for + // ebp. We expect the code throwing an exception to check ebp before + // dereferencing it to restore the context. + push(Immediate(0)); // NULL frame pointer. + push(Immediate(Smi::FromInt(0))); // No context. + } else { + push(ebp); + push(esi); + } + // Push the state and the code object. + unsigned state = + StackHandler::IndexField::encode(handler_index) | + StackHandler::KindField::encode(kind); + push(Immediate(state)); + Push(CodeObject()); + + // Link the current handler as the next handler. + ExternalReference handler_address(Isolate::kHandlerAddress, isolate()); + push(Operand::StaticVariable(handler_address)); + // Set this new handler as the current one. + mov(Operand::StaticVariable(handler_address), esp); +} + + +void MacroAssembler::PopTryHandler() { + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + ExternalReference handler_address(Isolate::kHandlerAddress, isolate()); + pop(Operand::StaticVariable(handler_address)); + add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize)); +} + + +void MacroAssembler::JumpToHandlerEntry() { + // Compute the handler entry address and jump to it. The handler table is + // a fixed array of (smi-tagged) code offsets. + // eax = exception, edi = code object, edx = state. + mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset)); + shr(edx, StackHandler::kKindWidth); + mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize)); + SmiUntag(edx); + lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize)); + jmp(edi); +} + + +void MacroAssembler::Throw(Register value) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); + + // The exception is expected in eax. + if (!value.is(eax)) { + mov(eax, value); + } + // Drop the stack pointer to the top of the top handler. + ExternalReference handler_address(Isolate::kHandlerAddress, isolate()); + mov(esp, Operand::StaticVariable(handler_address)); + // Restore the next handler. + pop(Operand::StaticVariable(handler_address)); + + // Remove the code object and state, compute the handler address in edi. + pop(edi); // Code object. + pop(edx); // Index and state. + + // Restore the context and frame pointer. + pop(esi); // Context. + pop(ebp); // Frame pointer. + + // If the handler is a JS frame, restore the context to the frame. + // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either + // ebp or esi. + Label skip; + test(esi, esi); + j(zero, &skip, Label::kNear); + mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); + bind(&skip); + + JumpToHandlerEntry(); +} + + +void MacroAssembler::ThrowUncatchable(Register value) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); + + // The exception is expected in eax. + if (!value.is(eax)) { + mov(eax, value); + } + // Drop the stack pointer to the top of the top stack handler. + ExternalReference handler_address(Isolate::kHandlerAddress, isolate()); + mov(esp, Operand::StaticVariable(handler_address)); + + // Unwind the handlers until the top ENTRY handler is found. + Label fetch_next, check_kind; + jmp(&check_kind, Label::kNear); + bind(&fetch_next); + mov(esp, Operand(esp, StackHandlerConstants::kNextOffset)); + + bind(&check_kind); + STATIC_ASSERT(StackHandler::JS_ENTRY == 0); + test(Operand(esp, StackHandlerConstants::kStateOffset), + Immediate(StackHandler::KindField::kMask)); + j(not_zero, &fetch_next); + + // Set the top handler address to next handler past the top ENTRY handler. + pop(Operand::StaticVariable(handler_address)); + + // Remove the code object and state, compute the handler address in edi. + pop(edi); // Code object. + pop(edx); // Index and state. + + // Clear the context pointer and frame pointer (0 was saved in the handler). + pop(esi); + pop(ebp); + + JumpToHandlerEntry(); +} + + +void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, + Register scratch1, + Register scratch2, + Label* miss) { + Label same_contexts; + + DCHECK(!holder_reg.is(scratch1)); + DCHECK(!holder_reg.is(scratch2)); + DCHECK(!scratch1.is(scratch2)); + + // Load current lexical context from the stack frame. + mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset)); + + // When generating debug code, make sure the lexical context is set. + if (emit_debug_code()) { + cmp(scratch1, Immediate(0)); + Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext); + } + // Load the native context of the current context. + int offset = + Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; + mov(scratch1, FieldOperand(scratch1, offset)); + mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset)); + + // Check the context is a native context. + if (emit_debug_code()) { + // Read the first word and compare to native_context_map. + cmp(FieldOperand(scratch1, HeapObject::kMapOffset), + isolate()->factory()->native_context_map()); + Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext); + } + + // Check if both contexts are the same. + cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); + j(equal, &same_contexts); + + // Compare security tokens, save holder_reg on the stack so we can use it + // as a temporary register. + // + // Check that the security token in the calling global object is + // compatible with the security token in the receiving global + // object. + mov(scratch2, + FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); + + // Check the context is a native context. + if (emit_debug_code()) { + cmp(scratch2, isolate()->factory()->null_value()); + Check(not_equal, kJSGlobalProxyContextShouldNotBeNull); + + // Read the first word and compare to native_context_map(), + cmp(FieldOperand(scratch2, HeapObject::kMapOffset), + isolate()->factory()->native_context_map()); + Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext); + } + + int token_offset = Context::kHeaderSize + + Context::SECURITY_TOKEN_INDEX * kPointerSize; + mov(scratch1, FieldOperand(scratch1, token_offset)); + cmp(scratch1, FieldOperand(scratch2, token_offset)); + j(not_equal, miss); + + bind(&same_contexts); +} + + +// Compute the hash code from the untagged key. This must be kept in sync with +// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in +// code-stub-hydrogen.cc +// +// Note: r0 will contain hash code +void MacroAssembler::GetNumberHash(Register r0, Register scratch) { + // Xor original key with a seed. + if (serializer_enabled()) { + ExternalReference roots_array_start = + ExternalReference::roots_array_start(isolate()); + mov(scratch, Immediate(Heap::kHashSeedRootIndex)); + mov(scratch, + Operand::StaticArray(scratch, times_pointer_size, roots_array_start)); + SmiUntag(scratch); + xor_(r0, scratch); + } else { + int32_t seed = isolate()->heap()->HashSeed(); + xor_(r0, Immediate(seed)); + } + + // hash = ~hash + (hash << 15); + mov(scratch, r0); + not_(r0); + shl(scratch, 15); + add(r0, scratch); + // hash = hash ^ (hash >> 12); + mov(scratch, r0); + shr(scratch, 12); + xor_(r0, scratch); + // hash = hash + (hash << 2); + lea(r0, Operand(r0, r0, times_4, 0)); + // hash = hash ^ (hash >> 4); + mov(scratch, r0); + shr(scratch, 4); + xor_(r0, scratch); + // hash = hash * 2057; + imul(r0, r0, 2057); + // hash = hash ^ (hash >> 16); + mov(scratch, r0); + shr(scratch, 16); + xor_(r0, scratch); +} + + + +void MacroAssembler::LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register r0, + Register r1, + Register r2, + Register result) { + // Register use: + // + // elements - holds the slow-case elements of the receiver and is unchanged. + // + // key - holds the smi key on entry and is unchanged. + // + // Scratch registers: + // + // r0 - holds the untagged key on entry and holds the hash once computed. + // + // r1 - used to hold the capacity mask of the dictionary + // + // r2 - used for the index into the dictionary. + // + // result - holds the result on exit if the load succeeds and we fall through. + + Label done; + + GetNumberHash(r0, r1); + + // Compute capacity mask. + mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset)); + shr(r1, kSmiTagSize); // convert smi to int + dec(r1); + + // Generate an unrolled loop that performs a few probes before giving up. + for (int i = 0; i < kNumberDictionaryProbes; i++) { + // Use r2 for index calculations and keep the hash intact in r0. + mov(r2, r0); + // Compute the masked index: (hash + i + i * i) & mask. + if (i > 0) { + add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i))); + } + and_(r2, r1); + + // Scale the index by multiplying by the entry size. + DCHECK(SeededNumberDictionary::kEntrySize == 3); + lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 + + // Check if the key matches. + cmp(key, FieldOperand(elements, + r2, + times_pointer_size, + SeededNumberDictionary::kElementsStartOffset)); + if (i != (kNumberDictionaryProbes - 1)) { + j(equal, &done); + } else { + j(not_equal, miss); + } + } + + bind(&done); + // Check that the value is a normal propety. + const int kDetailsOffset = + SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; + DCHECK_EQ(NORMAL, 0); + test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), + Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize)); + j(not_zero, miss); + + // Get the value at the masked, scaled index. + const int kValueOffset = + SeededNumberDictionary::kElementsStartOffset + kPointerSize; + mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); +} + + +void MacroAssembler::LoadAllocationTopHelper(Register result, + Register scratch, + AllocationFlags flags) { + ExternalReference allocation_top = + AllocationUtils::GetAllocationTopReference(isolate(), flags); + + // Just return if allocation top is already known. + if ((flags & RESULT_CONTAINS_TOP) != 0) { + // No use of scratch if allocation top is provided. + DCHECK(scratch.is(no_reg)); +#ifdef DEBUG + // Assert that result actually contains top on entry. + cmp(result, Operand::StaticVariable(allocation_top)); + Check(equal, kUnexpectedAllocationTop); +#endif + return; + } + + // Move address of new object to result. Use scratch register if available. + if (scratch.is(no_reg)) { + mov(result, Operand::StaticVariable(allocation_top)); + } else { + mov(scratch, Immediate(allocation_top)); + mov(result, Operand(scratch, 0)); + } +} + + +void MacroAssembler::UpdateAllocationTopHelper(Register result_end, + Register scratch, + AllocationFlags flags) { + if (emit_debug_code()) { + test(result_end, Immediate(kObjectAlignmentMask)); + Check(zero, kUnalignedAllocationInNewSpace); + } + + ExternalReference allocation_top = + AllocationUtils::GetAllocationTopReference(isolate(), flags); + + // Update new top. Use scratch if available. + if (scratch.is(no_reg)) { + mov(Operand::StaticVariable(allocation_top), result_end); + } else { + mov(Operand(scratch, 0), result_end); + } +} + + +void MacroAssembler::Allocate(int object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { + DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); + if (!FLAG_inline_new) { + if (emit_debug_code()) { + // Trash the registers to simulate an allocation failure. + mov(result, Immediate(0x7091)); + if (result_end.is_valid()) { + mov(result_end, Immediate(0x7191)); + } + if (scratch.is_valid()) { + mov(scratch, Immediate(0x7291)); + } + } + jmp(gc_required); + return; + } + DCHECK(!result.is(result_end)); + + // Load address of new object into result. + LoadAllocationTopHelper(result, scratch, flags); + + ExternalReference allocation_limit = + AllocationUtils::GetAllocationLimitReference(isolate(), flags); + + // Align the next allocation. Storing the filler map without checking top is + // safe in new-space because the limit of the heap is aligned there. + if ((flags & DOUBLE_ALIGNMENT) != 0) { + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); + Label aligned; + test(result, Immediate(kDoubleAlignmentMask)); + j(zero, &aligned, Label::kNear); + if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { + cmp(result, Operand::StaticVariable(allocation_limit)); + j(above_equal, gc_required); + } + mov(Operand(result, 0), + Immediate(isolate()->factory()->one_pointer_filler_map())); + add(result, Immediate(kDoubleSize / 2)); + bind(&aligned); + } + + // Calculate new top and bail out if space is exhausted. + Register top_reg = result_end.is_valid() ? result_end : result; + if (!top_reg.is(result)) { + mov(top_reg, result); + } + add(top_reg, Immediate(object_size)); + j(carry, gc_required); + cmp(top_reg, Operand::StaticVariable(allocation_limit)); + j(above, gc_required); + + // Update allocation top. + UpdateAllocationTopHelper(top_reg, scratch, flags); + + // Tag result if requested. + bool tag_result = (flags & TAG_OBJECT) != 0; + if (top_reg.is(result)) { + if (tag_result) { + sub(result, Immediate(object_size - kHeapObjectTag)); + } else { + sub(result, Immediate(object_size)); + } + } else if (tag_result) { + DCHECK(kHeapObjectTag == 1); + inc(result); + } +} + + +void MacroAssembler::Allocate(int header_size, + ScaleFactor element_size, + Register element_count, + RegisterValueType element_count_type, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { + DCHECK((flags & SIZE_IN_WORDS) == 0); + if (!FLAG_inline_new) { + if (emit_debug_code()) { + // Trash the registers to simulate an allocation failure. + mov(result, Immediate(0x7091)); + mov(result_end, Immediate(0x7191)); + if (scratch.is_valid()) { + mov(scratch, Immediate(0x7291)); + } + // Register element_count is not modified by the function. + } + jmp(gc_required); + return; + } + DCHECK(!result.is(result_end)); + + // Load address of new object into result. + LoadAllocationTopHelper(result, scratch, flags); + + ExternalReference allocation_limit = + AllocationUtils::GetAllocationLimitReference(isolate(), flags); + + // Align the next allocation. Storing the filler map without checking top is + // safe in new-space because the limit of the heap is aligned there. + if ((flags & DOUBLE_ALIGNMENT) != 0) { + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); + Label aligned; + test(result, Immediate(kDoubleAlignmentMask)); + j(zero, &aligned, Label::kNear); + if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { + cmp(result, Operand::StaticVariable(allocation_limit)); + j(above_equal, gc_required); + } + mov(Operand(result, 0), + Immediate(isolate()->factory()->one_pointer_filler_map())); + add(result, Immediate(kDoubleSize / 2)); + bind(&aligned); + } + + // Calculate new top and bail out if space is exhausted. + // We assume that element_count*element_size + header_size does not + // overflow. + if (element_count_type == REGISTER_VALUE_IS_SMI) { + STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1); + STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2); + STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4); + DCHECK(element_size >= times_2); + DCHECK(kSmiTagSize == 1); + element_size = static_cast<ScaleFactor>(element_size - 1); + } else { + DCHECK(element_count_type == REGISTER_VALUE_IS_INT32); + } + lea(result_end, Operand(element_count, element_size, header_size)); + add(result_end, result); + j(carry, gc_required); + cmp(result_end, Operand::StaticVariable(allocation_limit)); + j(above, gc_required); + + if ((flags & TAG_OBJECT) != 0) { + DCHECK(kHeapObjectTag == 1); + inc(result); + } + + // Update allocation top. + UpdateAllocationTopHelper(result_end, scratch, flags); +} + + +void MacroAssembler::Allocate(Register object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { + DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); + if (!FLAG_inline_new) { + if (emit_debug_code()) { + // Trash the registers to simulate an allocation failure. + mov(result, Immediate(0x7091)); + mov(result_end, Immediate(0x7191)); + if (scratch.is_valid()) { + mov(scratch, Immediate(0x7291)); + } + // object_size is left unchanged by this function. + } + jmp(gc_required); + return; + } + DCHECK(!result.is(result_end)); + + // Load address of new object into result. + LoadAllocationTopHelper(result, scratch, flags); + + ExternalReference allocation_limit = + AllocationUtils::GetAllocationLimitReference(isolate(), flags); + + // Align the next allocation. Storing the filler map without checking top is + // safe in new-space because the limit of the heap is aligned there. + if ((flags & DOUBLE_ALIGNMENT) != 0) { + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); + Label aligned; + test(result, Immediate(kDoubleAlignmentMask)); + j(zero, &aligned, Label::kNear); + if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { + cmp(result, Operand::StaticVariable(allocation_limit)); + j(above_equal, gc_required); + } + mov(Operand(result, 0), + Immediate(isolate()->factory()->one_pointer_filler_map())); + add(result, Immediate(kDoubleSize / 2)); + bind(&aligned); + } + + // Calculate new top and bail out if space is exhausted. + if (!object_size.is(result_end)) { + mov(result_end, object_size); + } + add(result_end, result); + j(carry, gc_required); + cmp(result_end, Operand::StaticVariable(allocation_limit)); + j(above, gc_required); + + // Tag result if requested. + if ((flags & TAG_OBJECT) != 0) { + DCHECK(kHeapObjectTag == 1); + inc(result); + } + + // Update allocation top. + UpdateAllocationTopHelper(result_end, scratch, flags); +} + + +void MacroAssembler::UndoAllocationInNewSpace(Register object) { + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(isolate()); + + // Make sure the object has no tag before resetting top. + and_(object, Immediate(~kHeapObjectTagMask)); +#ifdef DEBUG + cmp(object, Operand::StaticVariable(new_space_allocation_top)); + Check(below, kUndoAllocationOfNonAllocatedMemory); +#endif + mov(Operand::StaticVariable(new_space_allocation_top), object); +} + + +void MacroAssembler::AllocateHeapNumber(Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + MutableMode mode) { + // Allocate heap number in new space. + Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); + + Handle<Map> map = mode == MUTABLE + ? isolate()->factory()->mutable_heap_number_map() + : isolate()->factory()->heap_number_map(); + + // Set the map. + mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map)); +} + + +void MacroAssembler::AllocateTwoByteString(Register result, + Register length, + Register scratch1, + Register scratch2, + Register scratch3, + Label* gc_required) { + // Calculate the number of bytes needed for the characters in the string while + // observing object alignment. + DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); + DCHECK(kShortSize == 2); + // scratch1 = length * 2 + kObjectAlignmentMask. + lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask)); + and_(scratch1, Immediate(~kObjectAlignmentMask)); + + // Allocate two byte string in new space. + Allocate(SeqTwoByteString::kHeaderSize, + times_1, + scratch1, + REGISTER_VALUE_IS_INT32, + result, + scratch2, + scratch3, + gc_required, + TAG_OBJECT); + + // Set the map, length and hash field. + mov(FieldOperand(result, HeapObject::kMapOffset), + Immediate(isolate()->factory()->string_map())); + mov(scratch1, length); + SmiTag(scratch1); + mov(FieldOperand(result, String::kLengthOffset), scratch1); + mov(FieldOperand(result, String::kHashFieldOffset), + Immediate(String::kEmptyHashField)); +} + + +void MacroAssembler::AllocateAsciiString(Register result, + Register length, + Register scratch1, + Register scratch2, + Register scratch3, + Label* gc_required) { + // Calculate the number of bytes needed for the characters in the string while + // observing object alignment. + DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); + mov(scratch1, length); + DCHECK(kCharSize == 1); + add(scratch1, Immediate(kObjectAlignmentMask)); + and_(scratch1, Immediate(~kObjectAlignmentMask)); + + // Allocate ASCII string in new space. + Allocate(SeqOneByteString::kHeaderSize, + times_1, + scratch1, + REGISTER_VALUE_IS_INT32, + result, + scratch2, + scratch3, + gc_required, + TAG_OBJECT); + + // Set the map, length and hash field. + mov(FieldOperand(result, HeapObject::kMapOffset), + Immediate(isolate()->factory()->ascii_string_map())); + mov(scratch1, length); + SmiTag(scratch1); + mov(FieldOperand(result, String::kLengthOffset), scratch1); + mov(FieldOperand(result, String::kHashFieldOffset), + Immediate(String::kEmptyHashField)); +} + + +void MacroAssembler::AllocateAsciiString(Register result, + int length, + Register scratch1, + Register scratch2, + Label* gc_required) { + DCHECK(length > 0); + + // Allocate ASCII string in new space. + Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2, + gc_required, TAG_OBJECT); + + // Set the map, length and hash field. + mov(FieldOperand(result, HeapObject::kMapOffset), + Immediate(isolate()->factory()->ascii_string_map())); + mov(FieldOperand(result, String::kLengthOffset), + Immediate(Smi::FromInt(length))); + mov(FieldOperand(result, String::kHashFieldOffset), + Immediate(String::kEmptyHashField)); +} + + +void MacroAssembler::AllocateTwoByteConsString(Register result, + Register scratch1, + Register scratch2, + Label* gc_required) { + // Allocate heap number in new space. + Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); + + // Set the map. The other fields are left uninitialized. + mov(FieldOperand(result, HeapObject::kMapOffset), + Immediate(isolate()->factory()->cons_string_map())); +} + + +void MacroAssembler::AllocateAsciiConsString(Register result, + Register scratch1, + Register scratch2, + Label* gc_required) { + Allocate(ConsString::kSize, + result, + scratch1, + scratch2, + gc_required, + TAG_OBJECT); + + // Set the map. The other fields are left uninitialized. + mov(FieldOperand(result, HeapObject::kMapOffset), + Immediate(isolate()->factory()->cons_ascii_string_map())); +} + + +void MacroAssembler::AllocateTwoByteSlicedString(Register result, + Register scratch1, + Register scratch2, + Label* gc_required) { + // Allocate heap number in new space. + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); + + // Set the map. The other fields are left uninitialized. + mov(FieldOperand(result, HeapObject::kMapOffset), + Immediate(isolate()->factory()->sliced_string_map())); +} + + +void MacroAssembler::AllocateAsciiSlicedString(Register result, + Register scratch1, + Register scratch2, + Label* gc_required) { + // Allocate heap number in new space. + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); + + // Set the map. The other fields are left uninitialized. + mov(FieldOperand(result, HeapObject::kMapOffset), + Immediate(isolate()->factory()->sliced_ascii_string_map())); +} + + +// Copy memory, byte-by-byte, from source to destination. Not optimized for +// long or aligned copies. The contents of scratch and length are destroyed. +// Source and destination are incremented by length. +// Many variants of movsb, loop unrolling, word moves, and indexed operands +// have been tried here already, and this is fastest. +// A simpler loop is faster on small copies, but 30% slower on large ones. +// The cld() instruction must have been emitted, to set the direction flag(), +// before calling this function. +void MacroAssembler::CopyBytes(Register source, + Register destination, + Register length, + Register scratch) { + Label short_loop, len4, len8, len12, done, short_string; + DCHECK(source.is(esi)); + DCHECK(destination.is(edi)); + DCHECK(length.is(ecx)); + cmp(length, Immediate(4)); + j(below, &short_string, Label::kNear); + + // Because source is 4-byte aligned in our uses of this function, + // we keep source aligned for the rep_movs call by copying the odd bytes + // at the end of the ranges. + mov(scratch, Operand(source, length, times_1, -4)); + mov(Operand(destination, length, times_1, -4), scratch); + + cmp(length, Immediate(8)); + j(below_equal, &len4, Label::kNear); + cmp(length, Immediate(12)); + j(below_equal, &len8, Label::kNear); + cmp(length, Immediate(16)); + j(below_equal, &len12, Label::kNear); + + mov(scratch, ecx); + shr(ecx, 2); + rep_movs(); + and_(scratch, Immediate(0x3)); + add(destination, scratch); + jmp(&done, Label::kNear); + + bind(&len12); + mov(scratch, Operand(source, 8)); + mov(Operand(destination, 8), scratch); + bind(&len8); + mov(scratch, Operand(source, 4)); + mov(Operand(destination, 4), scratch); + bind(&len4); + mov(scratch, Operand(source, 0)); + mov(Operand(destination, 0), scratch); + add(destination, length); + jmp(&done, Label::kNear); + + bind(&short_string); + test(length, length); + j(zero, &done, Label::kNear); + + bind(&short_loop); + mov_b(scratch, Operand(source, 0)); + mov_b(Operand(destination, 0), scratch); + inc(source); + inc(destination); + dec(length); + j(not_zero, &short_loop); + + bind(&done); +} + + +void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler) { + Label loop, entry; + jmp(&entry); + bind(&loop); + mov(Operand(start_offset, 0), filler); + add(start_offset, Immediate(kPointerSize)); + bind(&entry); + cmp(start_offset, end_offset); + j(less, &loop); +} + + +void MacroAssembler::BooleanBitTest(Register object, + int field_offset, + int bit_index) { + bit_index += kSmiTagSize + kSmiShiftSize; + DCHECK(IsPowerOf2(kBitsPerByte)); + int byte_index = bit_index / kBitsPerByte; + int byte_bit_index = bit_index & (kBitsPerByte - 1); + test_b(FieldOperand(object, field_offset + byte_index), + static_cast<byte>(1 << byte_bit_index)); +} + + + +void MacroAssembler::NegativeZeroTest(Register result, + Register op, + Label* then_label) { + Label ok; + test(result, result); + j(not_zero, &ok); + test(op, op); + j(sign, then_label); + bind(&ok); +} + + +void MacroAssembler::NegativeZeroTest(Register result, + Register op1, + Register op2, + Register scratch, + Label* then_label) { + Label ok; + test(result, result); + j(not_zero, &ok); + mov(scratch, op1); + or_(scratch, op2); + j(sign, then_label); + bind(&ok); +} + + +void MacroAssembler::TryGetFunctionPrototype(Register function, + Register result, + Register scratch, + Label* miss, + bool miss_on_bound_function) { + Label non_instance; + if (miss_on_bound_function) { + // Check that the receiver isn't a smi. + JumpIfSmi(function, miss); + + // Check that the function really is a function. + CmpObjectType(function, JS_FUNCTION_TYPE, result); + j(not_equal, miss); + + // If a bound function, go to miss label. + mov(scratch, + FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); + BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset, + SharedFunctionInfo::kBoundFunction); + j(not_zero, miss); + + // Make sure that the function has an instance prototype. + movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset)); + test(scratch, Immediate(1 << Map::kHasNonInstancePrototype)); + j(not_zero, &non_instance); + } + + // Get the prototype or initial map from the function. + mov(result, + FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); + + // If the prototype or initial map is the hole, don't return it and + // simply miss the cache instead. This will allow us to allocate a + // prototype object on-demand in the runtime system. + cmp(result, Immediate(isolate()->factory()->the_hole_value())); + j(equal, miss); + + // If the function does not have an initial map, we're done. + Label done; + CmpObjectType(result, MAP_TYPE, scratch); + j(not_equal, &done); + + // Get the prototype from the initial map. + mov(result, FieldOperand(result, Map::kPrototypeOffset)); + + if (miss_on_bound_function) { + jmp(&done); + + // Non-instance prototype: Fetch prototype from constructor field + // in initial map. + bind(&non_instance); + mov(result, FieldOperand(result, Map::kConstructorOffset)); + } + + // All done. + bind(&done); +} + + +void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { + DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. + call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); +} + + +void MacroAssembler::TailCallStub(CodeStub* stub) { + jmp(stub->GetCode(), RelocInfo::CODE_TARGET); +} + + +void MacroAssembler::StubReturn(int argc) { + DCHECK(argc >= 1 && generating_stub()); + ret((argc - 1) * kPointerSize); +} + + +bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { + return has_frame_ || !stub->SometimesSetsUpAFrame(); +} + + +void MacroAssembler::IndexFromHash(Register hash, Register index) { + // The assert checks that the constants for the maximum number of digits + // for an array index cached in the hash field and the number of bits + // reserved for it does not conflict. + DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < + (1 << String::kArrayIndexValueBits)); + if (!index.is(hash)) { + mov(index, hash); + } + DecodeFieldToSmi<String::ArrayIndexValueBits>(index); +} + + +void MacroAssembler::CallRuntime(const Runtime::Function* f, + int num_arguments) { + // If the expected number of arguments of the runtime function is + // constant, we check that the actual number of arguments match the + // expectation. + CHECK(f->nargs < 0 || f->nargs == num_arguments); + + // TODO(1236192): Most runtime routines don't need the number of + // arguments passed in because it is constant. At some point we + // should remove this need and make the runtime routine entry code + // smarter. + Move(eax, Immediate(num_arguments)); + mov(ebx, Immediate(ExternalReference(f, isolate()))); + CEntryStub ces(isolate(), 1); + CallStub(&ces); +} + + +void MacroAssembler::CallExternalReference(ExternalReference ref, + int num_arguments) { + mov(eax, Immediate(num_arguments)); + mov(ebx, Immediate(ref)); + + CEntryStub stub(isolate(), 1); + CallStub(&stub); +} + + +void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, + int num_arguments, + int result_size) { + // TODO(1236192): Most runtime routines don't need the number of + // arguments passed in because it is constant. At some point we + // should remove this need and make the runtime routine entry code + // smarter. + Move(eax, Immediate(num_arguments)); + JumpToExternalReference(ext); +} + + +void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, + int num_arguments, + int result_size) { + TailCallExternalReference(ExternalReference(fid, isolate()), + num_arguments, + result_size); +} + + +Operand ApiParameterOperand(int index) { + return Operand(esp, index * kPointerSize); +} + + +void MacroAssembler::PrepareCallApiFunction(int argc) { + EnterApiExitFrame(argc); + if (emit_debug_code()) { + mov(esi, Immediate(BitCast<int32_t>(kZapValue))); + } +} + + +void MacroAssembler::CallApiFunctionAndReturn( + Register function_address, + ExternalReference thunk_ref, + Operand thunk_last_arg, + int stack_space, + Operand return_value_operand, + Operand* context_restore_operand) { + ExternalReference next_address = + ExternalReference::handle_scope_next_address(isolate()); + ExternalReference limit_address = + ExternalReference::handle_scope_limit_address(isolate()); + ExternalReference level_address = + ExternalReference::handle_scope_level_address(isolate()); + + DCHECK(edx.is(function_address)); + // Allocate HandleScope in callee-save registers. + mov(ebx, Operand::StaticVariable(next_address)); + mov(edi, Operand::StaticVariable(limit_address)); + add(Operand::StaticVariable(level_address), Immediate(1)); + + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(1, eax); + mov(Operand(esp, 0), + Immediate(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); + PopSafepointRegisters(); + } + + + Label profiler_disabled; + Label end_profiler_check; + mov(eax, Immediate(ExternalReference::is_profiling_address(isolate()))); + cmpb(Operand(eax, 0), 0); + j(zero, &profiler_disabled); + + // Additional parameter is the address of the actual getter function. + mov(thunk_last_arg, function_address); + // Call the api function. + mov(eax, Immediate(thunk_ref)); + call(eax); + jmp(&end_profiler_check); + + bind(&profiler_disabled); + // Call the api function. + call(function_address); + bind(&end_profiler_check); + + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(1, eax); + mov(Operand(esp, 0), + Immediate(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); + PopSafepointRegisters(); + } + + Label prologue; + // Load the value from ReturnValue + mov(eax, return_value_operand); + + Label promote_scheduled_exception; + Label exception_handled; + Label delete_allocated_handles; + Label leave_exit_frame; + + bind(&prologue); + // No more valid handles (the result handle was the last one). Restore + // previous handle scope. + mov(Operand::StaticVariable(next_address), ebx); + sub(Operand::StaticVariable(level_address), Immediate(1)); + Assert(above_equal, kInvalidHandleScopeLevel); + cmp(edi, Operand::StaticVariable(limit_address)); + j(not_equal, &delete_allocated_handles); + bind(&leave_exit_frame); + + // Check if the function scheduled an exception. + ExternalReference scheduled_exception_address = + ExternalReference::scheduled_exception_address(isolate()); + cmp(Operand::StaticVariable(scheduled_exception_address), + Immediate(isolate()->factory()->the_hole_value())); + j(not_equal, &promote_scheduled_exception); + bind(&exception_handled); + +#if ENABLE_EXTRA_CHECKS + // Check if the function returned a valid JavaScript value. + Label ok; + Register return_value = eax; + Register map = ecx; + + JumpIfSmi(return_value, &ok, Label::kNear); + mov(map, FieldOperand(return_value, HeapObject::kMapOffset)); + + CmpInstanceType(map, FIRST_NONSTRING_TYPE); + j(below, &ok, Label::kNear); + + CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE); + j(above_equal, &ok, Label::kNear); + + cmp(map, isolate()->factory()->heap_number_map()); + j(equal, &ok, Label::kNear); + + cmp(return_value, isolate()->factory()->undefined_value()); + j(equal, &ok, Label::kNear); + + cmp(return_value, isolate()->factory()->true_value()); + j(equal, &ok, Label::kNear); + + cmp(return_value, isolate()->factory()->false_value()); + j(equal, &ok, Label::kNear); + + cmp(return_value, isolate()->factory()->null_value()); + j(equal, &ok, Label::kNear); + + Abort(kAPICallReturnedInvalidObject); + + bind(&ok); +#endif + + bool restore_context = context_restore_operand != NULL; + if (restore_context) { + mov(esi, *context_restore_operand); + } + LeaveApiExitFrame(!restore_context); + ret(stack_space * kPointerSize); + + bind(&promote_scheduled_exception); + { + FrameScope frame(this, StackFrame::INTERNAL); + CallRuntime(Runtime::kPromoteScheduledException, 0); + } + jmp(&exception_handled); + + // HandleScope limit has changed. Delete allocated extensions. + ExternalReference delete_extensions = + ExternalReference::delete_handle_scope_extensions(isolate()); + bind(&delete_allocated_handles); + mov(Operand::StaticVariable(limit_address), edi); + mov(edi, eax); + mov(Operand(esp, 0), + Immediate(ExternalReference::isolate_address(isolate()))); + mov(eax, Immediate(delete_extensions)); + call(eax); + mov(eax, edi); + jmp(&leave_exit_frame); +} + + +void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) { + // Set the entry point and jump to the C entry runtime stub. + mov(ebx, Immediate(ext)); + CEntryStub ces(isolate(), 1); + jmp(ces.GetCode(), RelocInfo::CODE_TARGET); +} + + +void MacroAssembler::InvokePrologue(const ParameterCount& expected, + const ParameterCount& actual, + Handle<Code> code_constant, + const Operand& code_operand, + Label* done, + bool* definitely_mismatches, + InvokeFlag flag, + Label::Distance done_near, + const CallWrapper& call_wrapper) { + bool definitely_matches = false; + *definitely_mismatches = false; + Label invoke; + if (expected.is_immediate()) { + DCHECK(actual.is_immediate()); + if (expected.immediate() == actual.immediate()) { + definitely_matches = true; + } else { + mov(eax, actual.immediate()); + const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; + if (expected.immediate() == sentinel) { + // Don't worry about adapting arguments for builtins that + // don't want that done. Skip adaption code by making it look + // like we have a match between expected and actual number of + // arguments. + definitely_matches = true; + } else { + *definitely_mismatches = true; + mov(ebx, expected.immediate()); + } + } + } else { + if (actual.is_immediate()) { + // Expected is in register, actual is immediate. This is the + // case when we invoke function values without going through the + // IC mechanism. + cmp(expected.reg(), actual.immediate()); + j(equal, &invoke); + DCHECK(expected.reg().is(ebx)); + mov(eax, actual.immediate()); + } else if (!expected.reg().is(actual.reg())) { + // Both expected and actual are in (different) registers. This + // is the case when we invoke functions using call and apply. + cmp(expected.reg(), actual.reg()); + j(equal, &invoke); + DCHECK(actual.reg().is(eax)); + DCHECK(expected.reg().is(ebx)); + } + } + + if (!definitely_matches) { + Handle<Code> adaptor = + isolate()->builtins()->ArgumentsAdaptorTrampoline(); + if (!code_constant.is_null()) { + mov(edx, Immediate(code_constant)); + add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); + } else if (!code_operand.is_reg(edx)) { + mov(edx, code_operand); + } + + if (flag == CALL_FUNCTION) { + call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET)); + call(adaptor, RelocInfo::CODE_TARGET); + call_wrapper.AfterCall(); + if (!*definitely_mismatches) { + jmp(done, done_near); + } + } else { + jmp(adaptor, RelocInfo::CODE_TARGET); + } + bind(&invoke); + } +} + + +void MacroAssembler::InvokeCode(const Operand& code, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + // You can't call a function without a valid frame. + DCHECK(flag == JUMP_FUNCTION || has_frame()); + + Label done; + bool definitely_mismatches = false; + InvokePrologue(expected, actual, Handle<Code>::null(), code, + &done, &definitely_mismatches, flag, Label::kNear, + call_wrapper); + if (!definitely_mismatches) { + if (flag == CALL_FUNCTION) { + call_wrapper.BeforeCall(CallSize(code)); + call(code); + call_wrapper.AfterCall(); + } else { + DCHECK(flag == JUMP_FUNCTION); + jmp(code); + } + bind(&done); + } +} + + +void MacroAssembler::InvokeFunction(Register fun, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + // You can't call a function without a valid frame. + DCHECK(flag == JUMP_FUNCTION || has_frame()); + + DCHECK(fun.is(edi)); + mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); + SmiUntag(ebx); + + ParameterCount expected(ebx); + InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), + expected, actual, flag, call_wrapper); +} + + +void MacroAssembler::InvokeFunction(Register fun, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + // You can't call a function without a valid frame. + DCHECK(flag == JUMP_FUNCTION || has_frame()); + + DCHECK(fun.is(edi)); + mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + + InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), + expected, actual, flag, call_wrapper); +} + + +void MacroAssembler::InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + LoadHeapObject(edi, function); + InvokeFunction(edi, expected, actual, flag, call_wrapper); +} + + +void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + // You can't call a builtin without a valid frame. + DCHECK(flag == JUMP_FUNCTION || has_frame()); + + // Rely on the assertion to check that the number of provided + // arguments match the expected number of arguments. Fake a + // parameter count to avoid emitting code to do the check. + ParameterCount expected(0); + GetBuiltinFunction(edi, id); + InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), + expected, expected, flag, call_wrapper); +} + + +void MacroAssembler::GetBuiltinFunction(Register target, + Builtins::JavaScript id) { + // Load the JavaScript builtin function from the builtins object. + mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset)); + mov(target, FieldOperand(target, + JSBuiltinsObject::OffsetOfFunctionWithId(id))); +} + + +void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { + DCHECK(!target.is(edi)); + // Load the JavaScript builtin function from the builtins object. + GetBuiltinFunction(edi, id); + // Load the code entry point from the function into the target register. + mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset)); +} + + +void MacroAssembler::LoadContext(Register dst, int context_chain_length) { + if (context_chain_length > 0) { + // Move up the chain of contexts to the context containing the slot. + mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX))); + for (int i = 1; i < context_chain_length; i++) { + mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); + } + } else { + // Slot is in the current function context. Move it into the + // destination register in case we store into it (the write barrier + // cannot be allowed to destroy the context in esi). + mov(dst, esi); + } + + // We should not have found a with context by walking the context chain + // (i.e., the static scope chain and runtime context chain do not agree). + // A variable occurring in such a scope should have slot type LOOKUP and + // not CONTEXT. + if (emit_debug_code()) { + cmp(FieldOperand(dst, HeapObject::kMapOffset), + isolate()->factory()->with_context_map()); + Check(not_equal, kVariableResolvedToWithContext); + } +} + + +void MacroAssembler::LoadTransitionedArrayMapConditional( + ElementsKind expected_kind, + ElementsKind transitioned_kind, + Register map_in_out, + Register scratch, + Label* no_map_match) { + // Load the global or builtins object from the current context. + mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset)); + + // Check that the function's map is the same as the expected cached map. + mov(scratch, Operand(scratch, + Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); + + size_t offset = expected_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + cmp(map_in_out, FieldOperand(scratch, offset)); + j(not_equal, no_map_match); + + // Use the transitioned cached map. + offset = transitioned_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + mov(map_in_out, FieldOperand(scratch, offset)); +} + + +void MacroAssembler::LoadGlobalFunction(int index, Register function) { + // Load the global or builtins object from the current context. + mov(function, + Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + // Load the native context from the global or builtins object. + mov(function, + FieldOperand(function, GlobalObject::kNativeContextOffset)); + // Load the function from the native context. + mov(function, Operand(function, Context::SlotOffset(index))); +} + + +void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, + Register map) { + // Load the initial map. The global functions all have initial maps. + mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); + if (emit_debug_code()) { + Label ok, fail; + CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK); + jmp(&ok); + bind(&fail); + Abort(kGlobalFunctionsMustHaveInitialMap); + bind(&ok); + } +} + + +// Store the value in register src in the safepoint register stack +// slot for register dst. +void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) { + mov(SafepointRegisterSlot(dst), src); +} + + +void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) { + mov(SafepointRegisterSlot(dst), src); +} + + +void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { + mov(dst, SafepointRegisterSlot(src)); +} + + +Operand MacroAssembler::SafepointRegisterSlot(Register reg) { + return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); +} + + +int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { + // The registers are pushed starting with the lowest encoding, + // which means that lowest encodings are furthest away from + // the stack pointer. + DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters); + return kNumSafepointRegisters - reg_code - 1; +} + + +void MacroAssembler::LoadHeapObject(Register result, + Handle<HeapObject> object) { + AllowDeferredHandleDereference embedding_raw_address; + if (isolate()->heap()->InNewSpace(*object)) { + Handle<Cell> cell = isolate()->factory()->NewCell(object); + mov(result, Operand::ForCell(cell)); + } else { + mov(result, object); + } +} + + +void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) { + AllowDeferredHandleDereference using_raw_address; + if (isolate()->heap()->InNewSpace(*object)) { + Handle<Cell> cell = isolate()->factory()->NewCell(object); + cmp(reg, Operand::ForCell(cell)); + } else { + cmp(reg, object); + } +} + + +void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { + AllowDeferredHandleDereference using_raw_address; + if (isolate()->heap()->InNewSpace(*object)) { + Handle<Cell> cell = isolate()->factory()->NewCell(object); + push(Operand::ForCell(cell)); + } else { + Push(object); + } +} + + +void MacroAssembler::Ret() { + ret(0); +} + + +void MacroAssembler::Ret(int bytes_dropped, Register scratch) { + if (is_uint16(bytes_dropped)) { + ret(bytes_dropped); + } else { + pop(scratch); + add(esp, Immediate(bytes_dropped)); + push(scratch); + ret(0); + } +} + + +void MacroAssembler::VerifyX87StackDepth(uint32_t depth) { + // Make sure the floating point stack is either empty or has depth items. + DCHECK(depth <= 7); + // This is very expensive. + DCHECK(FLAG_debug_code && FLAG_enable_slow_asserts); + + // The top-of-stack (tos) is 7 if there is one item pushed. + int tos = (8 - depth) % 8; + const int kTopMask = 0x3800; + push(eax); + fwait(); + fnstsw_ax(); + and_(eax, kTopMask); + shr(eax, 11); + cmp(eax, Immediate(tos)); + Check(equal, kUnexpectedFPUStackDepthAfterInstruction); + fnclex(); + pop(eax); +} + + +void MacroAssembler::Drop(int stack_elements) { + if (stack_elements > 0) { + add(esp, Immediate(stack_elements * kPointerSize)); + } +} + + +void MacroAssembler::Move(Register dst, Register src) { + if (!dst.is(src)) { + mov(dst, src); + } +} + + +void MacroAssembler::Move(Register dst, const Immediate& x) { + if (x.is_zero()) { + xor_(dst, dst); // Shorter than mov of 32-bit immediate 0. + } else { + mov(dst, x); + } +} + + +void MacroAssembler::Move(const Operand& dst, const Immediate& x) { + mov(dst, x); +} + + +void MacroAssembler::SetCounter(StatsCounter* counter, int value) { + if (FLAG_native_code_counters && counter->Enabled()) { + mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value)); + } +} + + +void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { + DCHECK(value > 0); + if (FLAG_native_code_counters && counter->Enabled()) { + Operand operand = Operand::StaticVariable(ExternalReference(counter)); + if (value == 1) { + inc(operand); + } else { + add(operand, Immediate(value)); + } + } +} + + +void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { + DCHECK(value > 0); + if (FLAG_native_code_counters && counter->Enabled()) { + Operand operand = Operand::StaticVariable(ExternalReference(counter)); + if (value == 1) { + dec(operand); + } else { + sub(operand, Immediate(value)); + } + } +} + + +void MacroAssembler::IncrementCounter(Condition cc, + StatsCounter* counter, + int value) { + DCHECK(value > 0); + if (FLAG_native_code_counters && counter->Enabled()) { + Label skip; + j(NegateCondition(cc), &skip); + pushfd(); + IncrementCounter(counter, value); + popfd(); + bind(&skip); + } +} + + +void MacroAssembler::DecrementCounter(Condition cc, + StatsCounter* counter, + int value) { + DCHECK(value > 0); + if (FLAG_native_code_counters && counter->Enabled()) { + Label skip; + j(NegateCondition(cc), &skip); + pushfd(); + DecrementCounter(counter, value); + popfd(); + bind(&skip); + } +} + + +void MacroAssembler::Assert(Condition cc, BailoutReason reason) { + if (emit_debug_code()) Check(cc, reason); +} + + +void MacroAssembler::AssertFastElements(Register elements) { + if (emit_debug_code()) { + Factory* factory = isolate()->factory(); + Label ok; + cmp(FieldOperand(elements, HeapObject::kMapOffset), + Immediate(factory->fixed_array_map())); + j(equal, &ok); + cmp(FieldOperand(elements, HeapObject::kMapOffset), + Immediate(factory->fixed_double_array_map())); + j(equal, &ok); + cmp(FieldOperand(elements, HeapObject::kMapOffset), + Immediate(factory->fixed_cow_array_map())); + j(equal, &ok); + Abort(kJSObjectWithFastElementsMapHasSlowElements); + bind(&ok); + } +} + + +void MacroAssembler::Check(Condition cc, BailoutReason reason) { + Label L; + j(cc, &L); + Abort(reason); + // will not return here + bind(&L); +} + + +void MacroAssembler::CheckStackAlignment() { + int frame_alignment = base::OS::ActivationFrameAlignment(); + int frame_alignment_mask = frame_alignment - 1; + if (frame_alignment > kPointerSize) { + DCHECK(IsPowerOf2(frame_alignment)); + Label alignment_as_expected; + test(esp, Immediate(frame_alignment_mask)); + j(zero, &alignment_as_expected); + // Abort if stack is not aligned. + int3(); + bind(&alignment_as_expected); + } +} + + +void MacroAssembler::Abort(BailoutReason reason) { +#ifdef DEBUG + const char* msg = GetBailoutReason(reason); + if (msg != NULL) { + RecordComment("Abort message: "); + RecordComment(msg); + } + + if (FLAG_trap_on_abort) { + int3(); + return; + } +#endif + + push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason)))); + // Disable stub call restrictions to always allow calls to abort. + if (!has_frame_) { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(this, StackFrame::NONE); + CallRuntime(Runtime::kAbort, 1); + } else { + CallRuntime(Runtime::kAbort, 1); + } + // will not return here + int3(); +} + + +void MacroAssembler::LoadInstanceDescriptors(Register map, + Register descriptors) { + mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset)); +} + + +void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { + mov(dst, FieldOperand(map, Map::kBitField3Offset)); + DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); +} + + +void MacroAssembler::LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch1; + Register scratch = scratch2; + + // Load the number string cache. + LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); + shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. + sub(mask, Immediate(1)); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label smi_hash_calculated; + Label load_result_from_cache; + Label not_smi; + STATIC_ASSERT(kSmiTag == 0); + JumpIfNotSmi(object, ¬_smi, Label::kNear); + mov(scratch, object); + SmiUntag(scratch); + jmp(&smi_hash_calculated, Label::kNear); + bind(¬_smi); + cmp(FieldOperand(object, HeapObject::kMapOffset), + isolate()->factory()->heap_number_map()); + j(not_equal, not_found); + STATIC_ASSERT(8 == kDoubleSize); + mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); + xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); + // Object is heap number and hash is now in scratch. Calculate cache index. + and_(scratch, mask); + Register index = scratch; + Register probe = mask; + mov(probe, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize)); + JumpIfSmi(probe, not_found); + fld_d(FieldOperand(object, HeapNumber::kValueOffset)); + fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); + FCmp(); + j(parity_even, not_found); // Bail out if NaN is involved. + j(not_equal, not_found); // The cache did not contain this value. + jmp(&load_result_from_cache, Label::kNear); + + bind(&smi_hash_calculated); + // Object is smi and hash is now in scratch. Calculate cache index. + and_(scratch, mask); + // Check if the entry is the smi we are looking for. + cmp(object, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize)); + j(not_equal, not_found); + + // Get the result from the cache. + bind(&load_result_from_cache); + mov(result, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + IncrementCounter(isolate()->counters()->number_to_string_native(), 1); +} + + +void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( + Register instance_type, + Register scratch, + Label* failure) { + if (!scratch.is(instance_type)) { + mov(scratch, instance_type); + } + and_(scratch, + kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask); + cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag); + j(not_equal, failure); +} + + +void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1, + Register object2, + Register scratch1, + Register scratch2, + Label* failure) { + // Check that both objects are not smis. + STATIC_ASSERT(kSmiTag == 0); + mov(scratch1, object1); + and_(scratch1, object2); + JumpIfSmi(scratch1, failure); + + // Load instance type for both strings. + mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset)); + mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset)); + movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset)); + movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset)); + + // Check that both are flat ASCII strings. + const int kFlatAsciiStringMask = + kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; + const int kFlatAsciiStringTag = + kStringTag | kOneByteStringTag | kSeqStringTag; + // Interleave bits from both instance types and compare them in one check. + DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); + and_(scratch1, kFlatAsciiStringMask); + and_(scratch2, kFlatAsciiStringMask); + lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); + cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3)); + j(not_equal, failure); +} + + +void MacroAssembler::JumpIfNotUniqueName(Operand operand, + Label* not_unique_name, + Label::Distance distance) { + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + Label succeed; + test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); + j(zero, &succeed); + cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE)); + j(not_equal, not_unique_name, distance); + + bind(&succeed); +} + + +void MacroAssembler::EmitSeqStringSetCharCheck(Register string, + Register index, + Register value, + uint32_t encoding_mask) { + Label is_object; + JumpIfNotSmi(string, &is_object, Label::kNear); + Abort(kNonObject); + bind(&is_object); + + push(value); + mov(value, FieldOperand(string, HeapObject::kMapOffset)); + movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset)); + + and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask)); + cmp(value, Immediate(encoding_mask)); + pop(value); + Check(equal, kUnexpectedStringType); + + // The index is assumed to be untagged coming in, tag it to compare with the + // string length without using a temp register, it is restored at the end of + // this function. + SmiTag(index); + Check(no_overflow, kIndexIsTooLarge); + + cmp(index, FieldOperand(string, String::kLengthOffset)); + Check(less, kIndexIsTooLarge); + + cmp(index, Immediate(Smi::FromInt(0))); + Check(greater_equal, kIndexIsNegative); + + // Restore the index + SmiUntag(index); +} + + +void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { + int frame_alignment = base::OS::ActivationFrameAlignment(); + if (frame_alignment != 0) { + // Make stack end at alignment and make room for num_arguments words + // and the original value of esp. + mov(scratch, esp); + sub(esp, Immediate((num_arguments + 1) * kPointerSize)); + DCHECK(IsPowerOf2(frame_alignment)); + and_(esp, -frame_alignment); + mov(Operand(esp, num_arguments * kPointerSize), scratch); + } else { + sub(esp, Immediate(num_arguments * kPointerSize)); + } +} + + +void MacroAssembler::CallCFunction(ExternalReference function, + int num_arguments) { + // Trashing eax is ok as it will be the return value. + mov(eax, Immediate(function)); + CallCFunction(eax, num_arguments); +} + + +void MacroAssembler::CallCFunction(Register function, + int num_arguments) { + DCHECK(has_frame()); + // Check stack alignment. + if (emit_debug_code()) { + CheckStackAlignment(); + } + + call(function); + if (base::OS::ActivationFrameAlignment() != 0) { + mov(esp, Operand(esp, num_arguments * kPointerSize)); + } else { + add(esp, Immediate(num_arguments * kPointerSize)); + } +} + + +#ifdef DEBUG +bool AreAliased(Register reg1, + Register reg2, + Register reg3, + Register reg4, + Register reg5, + Register reg6, + Register reg7, + Register reg8) { + int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + + reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + + reg7.is_valid() + reg8.is_valid(); + + RegList regs = 0; + if (reg1.is_valid()) regs |= reg1.bit(); + if (reg2.is_valid()) regs |= reg2.bit(); + if (reg3.is_valid()) regs |= reg3.bit(); + if (reg4.is_valid()) regs |= reg4.bit(); + if (reg5.is_valid()) regs |= reg5.bit(); + if (reg6.is_valid()) regs |= reg6.bit(); + if (reg7.is_valid()) regs |= reg7.bit(); + if (reg8.is_valid()) regs |= reg8.bit(); + int n_of_non_aliasing_regs = NumRegs(regs); + + return n_of_valid_regs != n_of_non_aliasing_regs; +} +#endif + + +CodePatcher::CodePatcher(byte* address, int size) + : address_(address), + size_(size), + masm_(NULL, address, size + Assembler::kGap) { + // Create a new macro assembler pointing to the address of the code to patch. + // The size is adjusted with kGap on order for the assembler to generate size + // bytes of instructions without failing with buffer size constraints. + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); +} + + +CodePatcher::~CodePatcher() { + // Indicate that code has changed. + CpuFeatures::FlushICache(address_, size_); + + // Check that the code was patched as expected. + DCHECK(masm_.pc_ == address_ + size_); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); +} + + +void MacroAssembler::CheckPageFlag( + Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance) { + DCHECK(cc == zero || cc == not_zero); + if (scratch.is(object)) { + and_(scratch, Immediate(~Page::kPageAlignmentMask)); + } else { + mov(scratch, Immediate(~Page::kPageAlignmentMask)); + and_(scratch, object); + } + if (mask < (1 << kBitsPerByte)) { + test_b(Operand(scratch, MemoryChunk::kFlagsOffset), + static_cast<uint8_t>(mask)); + } else { + test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); + } + j(cc, condition_met, condition_met_distance); +} + + +void MacroAssembler::CheckPageFlagForMap( + Handle<Map> map, + int mask, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance) { + DCHECK(cc == zero || cc == not_zero); + Page* page = Page::FromAddress(map->address()); + DCHECK(!serializer_enabled()); // Serializer cannot match page_flags. + ExternalReference reference(ExternalReference::page_flags(page)); + // The inlined static address check of the page's flags relies + // on maps never being compacted. + DCHECK(!isolate()->heap()->mark_compact_collector()-> + IsOnEvacuationCandidate(*map)); + if (mask < (1 << kBitsPerByte)) { + test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask)); + } else { + test(Operand::StaticVariable(reference), Immediate(mask)); + } + j(cc, condition_met, condition_met_distance); +} + + +void MacroAssembler::CheckMapDeprecated(Handle<Map> map, + Register scratch, + Label* if_deprecated) { + if (map->CanBeDeprecated()) { + mov(scratch, map); + mov(scratch, FieldOperand(scratch, Map::kBitField3Offset)); + and_(scratch, Immediate(Map::Deprecated::kMask)); + j(not_zero, if_deprecated); + } +} + + +void MacroAssembler::JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black, + Label::Distance on_black_near) { + HasColor(object, scratch0, scratch1, + on_black, on_black_near, + 1, 0); // kBlackBitPattern. + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); +} + + +void MacroAssembler::HasColor(Register object, + Register bitmap_scratch, + Register mask_scratch, + Label* has_color, + Label::Distance has_color_distance, + int first_bit, + int second_bit) { + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx)); + + GetMarkBits(object, bitmap_scratch, mask_scratch); + + Label other_color, word_boundary; + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear); + add(mask_scratch, mask_scratch); // Shift left 1 by adding. + j(zero, &word_boundary, Label::kNear); + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); + jmp(&other_color, Label::kNear); + + bind(&word_boundary); + test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1); + + j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); + bind(&other_color); +} + + +void MacroAssembler::GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg) { + DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx)); + mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); + and_(bitmap_reg, addr_reg); + mov(ecx, addr_reg); + int shift = + Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; + shr(ecx, shift); + and_(ecx, + (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1)); + + add(bitmap_reg, ecx); + mov(ecx, addr_reg); + shr(ecx, kPointerSizeLog2); + and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1); + mov(mask_reg, Immediate(1)); + shl_cl(mask_reg); +} + + +void MacroAssembler::EnsureNotWhite( + Register value, + Register bitmap_scratch, + Register mask_scratch, + Label* value_is_white_and_not_data, + Label::Distance distance) { + DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx)); + GetMarkBits(value, bitmap_scratch, mask_scratch); + + // If the value is black or grey we don't need to do anything. + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + Label done; + + // Since both black and grey have a 1 in the first position and white does + // not have a 1 there we only need to check one bit. + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(not_zero, &done, Label::kNear); + + if (emit_debug_code()) { + // Check for impossible bit pattern. + Label ok; + push(mask_scratch); + // shl. May overflow making the check conservative. + add(mask_scratch, mask_scratch); + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(zero, &ok, Label::kNear); + int3(); + bind(&ok); + pop(mask_scratch); + } + + // Value is white. We check whether it is data that doesn't need scanning. + // Currently only checks for HeapNumber and non-cons strings. + Register map = ecx; // Holds map while checking type. + Register length = ecx; // Holds length of object after checking type. + Label not_heap_number; + Label is_data_object; + + // Check for heap-number + mov(map, FieldOperand(value, HeapObject::kMapOffset)); + cmp(map, isolate()->factory()->heap_number_map()); + j(not_equal, ¬_heap_number, Label::kNear); + mov(length, Immediate(HeapNumber::kSize)); + jmp(&is_data_object, Label::kNear); + + bind(¬_heap_number); + // Check for strings. + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + Register instance_type = ecx; + movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); + test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask); + j(not_zero, value_is_white_and_not_data); + // It's a non-indirect (non-cons and non-slice) string. + // If it's external, the length is just ExternalString::kSize. + // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). + Label not_external; + // External strings are the only ones with the kExternalStringTag bit + // set. + DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); + DCHECK_EQ(0, kConsStringTag & kExternalStringTag); + test_b(instance_type, kExternalStringTag); + j(zero, ¬_external, Label::kNear); + mov(length, Immediate(ExternalString::kSize)); + jmp(&is_data_object, Label::kNear); + + bind(¬_external); + // Sequential string, either ASCII or UC16. + DCHECK(kOneByteStringTag == 0x04); + and_(length, Immediate(kStringEncodingMask)); + xor_(length, Immediate(kStringEncodingMask)); + add(length, Immediate(0x04)); + // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted + // by 2. If we multiply the string length as smi by this, it still + // won't overflow a 32-bit value. + DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize); + DCHECK(SeqOneByteString::kMaxSize <= + static_cast<int>(0xffffffffu >> (2 + kSmiTagSize))); + imul(length, FieldOperand(value, String::kLengthOffset)); + shr(length, 2 + kSmiTagSize + kSmiShiftSize); + add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); + and_(length, Immediate(~kObjectAlignmentMask)); + + bind(&is_data_object); + // Value is a data object, and it is white. Mark it black. Since we know + // that the object is white we can make it black by flipping one bit. + or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); + + and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); + add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), + length); + if (emit_debug_code()) { + mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset)); + Check(less_equal, kLiveBytesCountOverflowChunkSize); + } + + bind(&done); +} + + +void MacroAssembler::EnumLength(Register dst, Register map) { + STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); + mov(dst, FieldOperand(map, Map::kBitField3Offset)); + and_(dst, Immediate(Map::EnumLengthBits::kMask)); + SmiTag(dst); +} + + +void MacroAssembler::CheckEnumCache(Label* call_runtime) { + Label next, start; + mov(ecx, eax); + + // Check if the enum length field is properly initialized, indicating that + // there is an enum cache. + mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset)); + + EnumLength(edx, ebx); + cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel))); + j(equal, call_runtime); + + jmp(&start); + + bind(&next); + mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset)); + + // For all objects but the receiver, check that the cache is empty. + EnumLength(edx, ebx); + cmp(edx, Immediate(Smi::FromInt(0))); + j(not_equal, call_runtime); + + bind(&start); + + // Check that there are no elements. Register rcx contains the current JS + // object we've reached through the prototype chain. + Label no_elements; + mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset)); + cmp(ecx, isolate()->factory()->empty_fixed_array()); + j(equal, &no_elements); + + // Second chance, the object may be using the empty slow element dictionary. + cmp(ecx, isolate()->factory()->empty_slow_element_dictionary()); + j(not_equal, call_runtime); + + bind(&no_elements); + mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset)); + cmp(ecx, isolate()->factory()->null_value()); + j(not_equal, &next); +} + + +void MacroAssembler::TestJSArrayForAllocationMemento( + Register receiver_reg, + Register scratch_reg, + Label* no_memento_found) { + ExternalReference new_space_start = + ExternalReference::new_space_start(isolate()); + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(isolate()); + + lea(scratch_reg, Operand(receiver_reg, + JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); + cmp(scratch_reg, Immediate(new_space_start)); + j(less, no_memento_found); + cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top)); + j(greater, no_memento_found); + cmp(MemOperand(scratch_reg, -AllocationMemento::kSize), + Immediate(isolate()->factory()->allocation_memento_map())); +} + + +void MacroAssembler::JumpIfDictionaryInPrototypeChain( + Register object, + Register scratch0, + Register scratch1, + Label* found) { + DCHECK(!scratch1.is(scratch0)); + Factory* factory = isolate()->factory(); + Register current = scratch0; + Label loop_again; + + // scratch contained elements pointer. + mov(current, object); + + // Loop based on the map going up the prototype chain. + bind(&loop_again); + mov(current, FieldOperand(current, HeapObject::kMapOffset)); + mov(scratch1, FieldOperand(current, Map::kBitField2Offset)); + DecodeField<Map::ElementsKindBits>(scratch1); + cmp(scratch1, Immediate(DICTIONARY_ELEMENTS)); + j(equal, found); + mov(current, FieldOperand(current, Map::kPrototypeOffset)); + cmp(current, Immediate(factory->null_value())); + j(not_equal, &loop_again); +} + + +void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { + DCHECK(!dividend.is(eax)); + DCHECK(!dividend.is(edx)); + MultiplierAndShift ms(divisor); + mov(eax, Immediate(ms.multiplier())); + imul(dividend); + if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend); + if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend); + if (ms.shift() > 0) sar(edx, ms.shift()); + mov(eax, dividend); + shr(eax, 31); + add(edx, eax); +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/macro-assembler-x87.h nodejs-0.11.15/deps/v8/src/x87/macro-assembler-x87.h --- nodejs-0.11.13/deps/v8/src/x87/macro-assembler-x87.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/macro-assembler-x87.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1100 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_X87_MACRO_ASSEMBLER_X87_H_ +#define V8_X87_MACRO_ASSEMBLER_X87_H_ + +#include "src/assembler.h" +#include "src/frames.h" +#include "src/globals.h" + +namespace v8 { +namespace internal { + +// Convenience for platform-independent signatures. We do not normally +// distinguish memory operands from other operands on ia32. +typedef Operand MemOperand; + +enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; +enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum PointersToHereCheck { + kPointersToHereMaybeInteresting, + kPointersToHereAreAlwaysInteresting +}; + + +enum RegisterValueType { + REGISTER_VALUE_IS_SMI, + REGISTER_VALUE_IS_INT32 +}; + + +#ifdef DEBUG +bool AreAliased(Register reg1, + Register reg2, + Register reg3 = no_reg, + Register reg4 = no_reg, + Register reg5 = no_reg, + Register reg6 = no_reg, + Register reg7 = no_reg, + Register reg8 = no_reg); +#endif + + +// MacroAssembler implements a collection of frequently used macros. +class MacroAssembler: public Assembler { + public: + // The isolate parameter can be NULL if the macro assembler should + // not use isolate-dependent functionality. In this case, it's the + // responsibility of the caller to never invoke such function on the + // macro assembler. + MacroAssembler(Isolate* isolate, void* buffer, int size); + + void Load(Register dst, const Operand& src, Representation r); + void Store(Register src, const Operand& dst, Representation r); + + // Operations on roots in the root-array. + void LoadRoot(Register destination, Heap::RootListIndex index); + void StoreRoot(Register source, Register scratch, Heap::RootListIndex index); + void CompareRoot(Register with, Register scratch, Heap::RootListIndex index); + // These methods can only be used with constant roots (i.e. non-writable + // and not in new space). + void CompareRoot(Register with, Heap::RootListIndex index); + void CompareRoot(const Operand& with, Heap::RootListIndex index); + + // --------------------------------------------------------------------------- + // GC Support + enum RememberedSetFinalAction { + kReturnAtEnd, + kFallThroughAtEnd + }; + + // Record in the remembered set the fact that we have a pointer to new space + // at the address pointed to by the addr register. Only works if addr is not + // in new space. + void RememberedSetHelper(Register object, // Used for debug code. + Register addr, + Register scratch, + RememberedSetFinalAction and_then); + + void CheckPageFlag(Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance = Label::kFar); + + void CheckPageFlagForMap( + Handle<Map> map, + int mask, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance = Label::kFar); + + void CheckMapDeprecated(Handle<Map> map, + Register scratch, + Label* if_deprecated); + + // Check if object is in new space. Jumps if the object is not in new space. + // The register scratch can be object itself, but scratch will be clobbered. + void JumpIfNotInNewSpace(Register object, + Register scratch, + Label* branch, + Label::Distance distance = Label::kFar) { + InNewSpace(object, scratch, zero, branch, distance); + } + + // Check if object is in new space. Jumps if the object is in new space. + // The register scratch can be object itself, but it will be clobbered. + void JumpIfInNewSpace(Register object, + Register scratch, + Label* branch, + Label::Distance distance = Label::kFar) { + InNewSpace(object, scratch, not_zero, branch, distance); + } + + // Check if an object has a given incremental marking color. Also uses ecx! + void HasColor(Register object, + Register scratch0, + Register scratch1, + Label* has_color, + Label::Distance has_color_distance, + int first_bit, + int second_bit); + + void JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black, + Label::Distance on_black_distance = Label::kFar); + + // Checks the color of an object. If the object is already grey or black + // then we just fall through, since it is already live. If it is white and + // we can determine that it doesn't need to be scanned, then we just mark it + // black and fall through. For the rest we jump to the label so the + // incremental marker can fix its assumptions. + void EnsureNotWhite(Register object, + Register scratch1, + Register scratch2, + Label* object_is_white_and_not_data, + Label::Distance distance); + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldOperand(reg, off). + void RecordWriteField( + Register object, + int offset, + Register value, + Register scratch, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); + + // As above, but the offset has the tag presubtracted. For use with + // Operand(reg, off). + void RecordWriteContextSlot( + Register context, + int offset, + Register value, + Register scratch, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting) { + RecordWriteField(context, + offset + kHeapObjectTag, + value, + scratch, + remembered_set_action, + smi_check, + pointers_to_here_check_for_value); + } + + // Notify the garbage collector that we wrote a pointer into a fixed array. + // |array| is the array being stored into, |value| is the + // object being stored. |index| is the array index represented as a + // Smi. All registers are clobbered by the operation RecordWriteArray + // filters out smis so it does not update the write barrier if the + // value is a smi. + void RecordWriteArray( + Register array, + Register value, + Register index, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); + + // For page containing |object| mark region covering |address| + // dirty. |object| is the object being stored into, |value| is the + // object being stored. The address and value registers are clobbered by the + // operation. RecordWrite filters out smis so it does not update the + // write barrier if the value is a smi. + void RecordWrite( + Register object, + Register address, + Register value, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); + + // For page containing |object| mark the region covering the object's map + // dirty. |object| is the object being stored into, |map| is the Map object + // that was stored. + void RecordWriteForMap( + Register object, + Handle<Map> map, + Register scratch1, + Register scratch2); + + // --------------------------------------------------------------------------- + // Debugger Support + + void DebugBreak(); + + // Generates function and stub prologue code. + void StubPrologue(); + void Prologue(bool code_pre_aging); + + // Enter specific kind of exit frame. Expects the number of + // arguments in register eax and sets up the number of arguments in + // register edi and the pointer to the first argument in register + // esi. + void EnterExitFrame(); + + void EnterApiExitFrame(int argc); + + // Leave the current exit frame. Expects the return value in + // register eax:edx (untouched) and the pointer to the first + // argument in register esi. + void LeaveExitFrame(); + + // Leave the current exit frame. Expects the return value in + // register eax (untouched). + void LeaveApiExitFrame(bool restore_context); + + // Find the function context up the context chain. + void LoadContext(Register dst, int context_chain_length); + + // Conditionally load the cached Array transitioned map of type + // transitioned_kind from the native context if the map in register + // map_in_out is the cached Array map in the native context of + // expected_kind. + void LoadTransitionedArrayMapConditional( + ElementsKind expected_kind, + ElementsKind transitioned_kind, + Register map_in_out, + Register scratch, + Label* no_map_match); + + // Load the global function with the given index. + void LoadGlobalFunction(int index, Register function); + + // Load the initial map from the global function. The registers + // function and map can be the same. + void LoadGlobalFunctionInitialMap(Register function, Register map); + + // Push and pop the registers that can hold pointers. + void PushSafepointRegisters() { pushad(); } + void PopSafepointRegisters() { popad(); } + // Store the value in register/immediate src in the safepoint + // register stack slot for register dst. + void StoreToSafepointRegisterSlot(Register dst, Register src); + void StoreToSafepointRegisterSlot(Register dst, Immediate src); + void LoadFromSafepointRegisterSlot(Register dst, Register src); + + void LoadHeapObject(Register result, Handle<HeapObject> object); + void CmpHeapObject(Register reg, Handle<HeapObject> object); + void PushHeapObject(Handle<HeapObject> object); + + void LoadObject(Register result, Handle<Object> object) { + AllowDeferredHandleDereference heap_object_check; + if (object->IsHeapObject()) { + LoadHeapObject(result, Handle<HeapObject>::cast(object)); + } else { + Move(result, Immediate(object)); + } + } + + void CmpObject(Register reg, Handle<Object> object) { + AllowDeferredHandleDereference heap_object_check; + if (object->IsHeapObject()) { + CmpHeapObject(reg, Handle<HeapObject>::cast(object)); + } else { + cmp(reg, Immediate(object)); + } + } + + // --------------------------------------------------------------------------- + // JavaScript invokes + + // Invoke the JavaScript function code by either calling or jumping. + void InvokeCode(Register code, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper) { + InvokeCode(Operand(code), expected, actual, flag, call_wrapper); + } + + void InvokeCode(const Operand& code, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper); + + // Invoke the JavaScript function in the given register. Changes the + // current context to the context in the function before invoking. + void InvokeFunction(Register function, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper); + + void InvokeFunction(Register function, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper); + + void InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper); + + // Invoke specified builtin JavaScript function. Adds an entry to + // the unresolved list if the name does not resolve. + void InvokeBuiltin(Builtins::JavaScript id, + InvokeFlag flag, + const CallWrapper& call_wrapper = NullCallWrapper()); + + // Store the function for the given builtin in the target register. + void GetBuiltinFunction(Register target, Builtins::JavaScript id); + + // Store the code object for the given builtin in the target register. + void GetBuiltinEntry(Register target, Builtins::JavaScript id); + + // Expression support + // Support for constant splitting. + bool IsUnsafeImmediate(const Immediate& x); + void SafeMove(Register dst, const Immediate& x); + void SafePush(const Immediate& x); + + // Compare object type for heap object. + // Incoming register is heap_object and outgoing register is map. + void CmpObjectType(Register heap_object, InstanceType type, Register map); + + // Compare instance type for map. + void CmpInstanceType(Register map, InstanceType type); + + // Check if a map for a JSObject indicates that the object has fast elements. + // Jump to the specified label if it does not. + void CheckFastElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); + + // Check if a map for a JSObject indicates that the object can have both smi + // and HeapObject elements. Jump to the specified label if it does not. + void CheckFastObjectElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); + + // Check if a map for a JSObject indicates that the object has fast smi only + // elements. Jump to the specified label if it does not. + void CheckFastSmiElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); + + // Check to see if maybe_number can be stored as a double in + // FastDoubleElements. If it can, store it at the index specified by key in + // the FastDoubleElements array elements, otherwise jump to fail. + void StoreNumberToDoubleElements(Register maybe_number, + Register elements, + Register key, + Register scratch, + Label* fail, + int offset = 0); + + // Compare an object's map with the specified map. + void CompareMap(Register obj, Handle<Map> map); + + // Check if the map of an object is equal to a specified map and branch to + // label if not. Skip the smi check if not required (object is known to be a + // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match + // against maps that are ElementsKind transition maps of the specified map. + void CheckMap(Register obj, + Handle<Map> map, + Label* fail, + SmiCheckType smi_check_type); + + // Check if the map of an object is equal to a specified map and branch to a + // specified target if equal. Skip the smi check if not required (object is + // known to be a heap object) + void DispatchMap(Register obj, + Register unused, + Handle<Map> map, + Handle<Code> success, + SmiCheckType smi_check_type); + + // Check if the object in register heap_object is a string. Afterwards the + // register map contains the object map and the register instance_type + // contains the instance_type. The registers map and instance_type can be the + // same in which case it contains the instance type afterwards. Either of the + // registers map and instance_type can be the same as heap_object. + Condition IsObjectStringType(Register heap_object, + Register map, + Register instance_type); + + // Check if the object in register heap_object is a name. Afterwards the + // register map contains the object map and the register instance_type + // contains the instance_type. The registers map and instance_type can be the + // same in which case it contains the instance type afterwards. Either of the + // registers map and instance_type can be the same as heap_object. + Condition IsObjectNameType(Register heap_object, + Register map, + Register instance_type); + + // Check if a heap object's type is in the JSObject range, not including + // JSFunction. The object's map will be loaded in the map register. + // Any or all of the three registers may be the same. + // The contents of the scratch register will always be overwritten. + void IsObjectJSObjectType(Register heap_object, + Register map, + Register scratch, + Label* fail); + + // The contents of the scratch register will be overwritten. + void IsInstanceJSObjectType(Register map, Register scratch, Label* fail); + + // FCmp is similar to integer cmp, but requires unsigned + // jcc instructions (je, ja, jae, jb, jbe, je, and jz). + void FCmp(); + + void ClampUint8(Register reg); + + void SlowTruncateToI(Register result_reg, Register input_reg, + int offset = HeapNumber::kValueOffset - kHeapObjectTag); + + void TruncateHeapNumberToI(Register result_reg, Register input_reg); + void TruncateX87TOSToI(Register result_reg); + + void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode, + Label* conversion_failed, Label::Distance dst = Label::kFar); + + void TaggedToI(Register result_reg, Register input_reg, + MinusZeroMode minus_zero_mode, Label* lost_precision); + + // Smi tagging support. + void SmiTag(Register reg) { + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + add(reg, reg); + } + void SmiUntag(Register reg) { + sar(reg, kSmiTagSize); + } + + // Modifies the register even if it does not contain a Smi! + void SmiUntag(Register reg, Label* is_smi) { + STATIC_ASSERT(kSmiTagSize == 1); + sar(reg, kSmiTagSize); + STATIC_ASSERT(kSmiTag == 0); + j(not_carry, is_smi); + } + + void LoadUint32NoSSE2(Register src); + + // Jump the register contains a smi. + inline void JumpIfSmi(Register value, + Label* smi_label, + Label::Distance distance = Label::kFar) { + test(value, Immediate(kSmiTagMask)); + j(zero, smi_label, distance); + } + // Jump if the operand is a smi. + inline void JumpIfSmi(Operand value, + Label* smi_label, + Label::Distance distance = Label::kFar) { + test(value, Immediate(kSmiTagMask)); + j(zero, smi_label, distance); + } + // Jump if register contain a non-smi. + inline void JumpIfNotSmi(Register value, + Label* not_smi_label, + Label::Distance distance = Label::kFar) { + test(value, Immediate(kSmiTagMask)); + j(not_zero, not_smi_label, distance); + } + + void LoadInstanceDescriptors(Register map, Register descriptors); + void EnumLength(Register dst, Register map); + void NumberOfOwnDescriptors(Register dst, Register map); + + template<typename Field> + void DecodeField(Register reg) { + static const int shift = Field::kShift; + static const int mask = Field::kMask >> Field::kShift; + if (shift != 0) { + sar(reg, shift); + } + and_(reg, Immediate(mask)); + } + + template<typename Field> + void DecodeFieldToSmi(Register reg) { + static const int shift = Field::kShift; + static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize; + STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); + STATIC_ASSERT(kSmiTag == 0); + if (shift < kSmiTagSize) { + shl(reg, kSmiTagSize - shift); + } else if (shift > kSmiTagSize) { + sar(reg, shift - kSmiTagSize); + } + and_(reg, Immediate(mask)); + } + + // Abort execution if argument is not a number, enabled via --debug-code. + void AssertNumber(Register object); + + // Abort execution if argument is not a smi, enabled via --debug-code. + void AssertSmi(Register object); + + // Abort execution if argument is a smi, enabled via --debug-code. + void AssertNotSmi(Register object); + + // Abort execution if argument is not a string, enabled via --debug-code. + void AssertString(Register object); + + // Abort execution if argument is not a name, enabled via --debug-code. + void AssertName(Register object); + + // Abort execution if argument is not undefined or an AllocationSite, enabled + // via --debug-code. + void AssertUndefinedOrAllocationSite(Register object); + + // --------------------------------------------------------------------------- + // Exception handling + + // Push a new try handler and link it into try handler chain. + void PushTryHandler(StackHandler::Kind kind, int handler_index); + + // Unlink the stack handler on top of the stack from the try handler chain. + void PopTryHandler(); + + // Throw to the top handler in the try hander chain. + void Throw(Register value); + + // Throw past all JS frames to the top JS entry frame. + void ThrowUncatchable(Register value); + + // --------------------------------------------------------------------------- + // Inline caching support + + // Generate code for checking access rights - used for security checks + // on access to global objects across environments. The holder register + // is left untouched, but the scratch register is clobbered. + void CheckAccessGlobalProxy(Register holder_reg, + Register scratch1, + Register scratch2, + Label* miss); + + void GetNumberHash(Register r0, Register scratch); + + void LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register r0, + Register r1, + Register r2, + Register result); + + + // --------------------------------------------------------------------------- + // Allocation support + + // Allocate an object in new space or old pointer space. If the given space + // is exhausted control continues at the gc_required label. The allocated + // object is returned in result and end of the new object is returned in + // result_end. The register scratch can be passed as no_reg in which case + // an additional object reference will be added to the reloc info. The + // returned pointers in result and result_end have not yet been tagged as + // heap objects. If result_contains_top_on_entry is true the content of + // result is known to be the allocation top on entry (could be result_end + // from a previous call). If result_contains_top_on_entry is true scratch + // should be no_reg as it is never used. + void Allocate(int object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags); + + void Allocate(int header_size, + ScaleFactor element_size, + Register element_count, + RegisterValueType element_count_type, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags); + + void Allocate(Register object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags); + + // Undo allocation in new space. The object passed and objects allocated after + // it will no longer be allocated. Make sure that no pointers are left to the + // object(s) no longer allocated as they would be invalid when allocation is + // un-done. + void UndoAllocationInNewSpace(Register object); + + // Allocate a heap number in new space with undefined value. The + // register scratch2 can be passed as no_reg; the others must be + // valid registers. Returns tagged pointer in result register, or + // jumps to gc_required if new space is full. + void AllocateHeapNumber(Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + MutableMode mode = IMMUTABLE); + + // Allocate a sequential string. All the header fields of the string object + // are initialized. + void AllocateTwoByteString(Register result, + Register length, + Register scratch1, + Register scratch2, + Register scratch3, + Label* gc_required); + void AllocateAsciiString(Register result, + Register length, + Register scratch1, + Register scratch2, + Register scratch3, + Label* gc_required); + void AllocateAsciiString(Register result, + int length, + Register scratch1, + Register scratch2, + Label* gc_required); + + // Allocate a raw cons string object. Only the map field of the result is + // initialized. + void AllocateTwoByteConsString(Register result, + Register scratch1, + Register scratch2, + Label* gc_required); + void AllocateAsciiConsString(Register result, + Register scratch1, + Register scratch2, + Label* gc_required); + + // Allocate a raw sliced string object. Only the map field of the result is + // initialized. + void AllocateTwoByteSlicedString(Register result, + Register scratch1, + Register scratch2, + Label* gc_required); + void AllocateAsciiSlicedString(Register result, + Register scratch1, + Register scratch2, + Label* gc_required); + + // Copy memory, byte-by-byte, from source to destination. Not optimized for + // long or aligned copies. + // The contents of index and scratch are destroyed. + void CopyBytes(Register source, + Register destination, + Register length, + Register scratch); + + // Initialize fields with filler values. Fields starting at |start_offset| + // not including end_offset are overwritten with the value in |filler|. At + // the end the loop, |start_offset| takes the value of |end_offset|. + void InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler); + + // --------------------------------------------------------------------------- + // Support functions. + + // Check a boolean-bit of a Smi field. + void BooleanBitTest(Register object, int field_offset, int bit_index); + + // Check if result is zero and op is negative. + void NegativeZeroTest(Register result, Register op, Label* then_label); + + // Check if result is zero and any of op1 and op2 are negative. + // Register scratch is destroyed, and it must be different from op2. + void NegativeZeroTest(Register result, Register op1, Register op2, + Register scratch, Label* then_label); + + // Try to get function prototype of a function and puts the value in + // the result register. Checks that the function really is a + // function and jumps to the miss label if the fast checks fail. The + // function register will be untouched; the other registers may be + // clobbered. + void TryGetFunctionPrototype(Register function, + Register result, + Register scratch, + Label* miss, + bool miss_on_bound_function = false); + + // Picks out an array index from the hash field. + // Register use: + // hash - holds the index's hash. Clobbered. + // index - holds the overwritten index on exit. + void IndexFromHash(Register hash, Register index); + + // --------------------------------------------------------------------------- + // Runtime calls + + // Call a code stub. Generate the code if necessary. + void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None()); + + // Tail call a code stub (jump). Generate the code if necessary. + void TailCallStub(CodeStub* stub); + + // Return from a code stub after popping its arguments. + void StubReturn(int argc); + + // Call a runtime routine. + void CallRuntime(const Runtime::Function* f, int num_arguments); + // Convenience function: Same as above, but takes the fid instead. + void CallRuntime(Runtime::FunctionId id) { + const Runtime::Function* function = Runtime::FunctionForId(id); + CallRuntime(function, function->nargs); + } + void CallRuntime(Runtime::FunctionId id, int num_arguments) { + CallRuntime(Runtime::FunctionForId(id), num_arguments); + } + + // Convenience function: call an external reference. + void CallExternalReference(ExternalReference ref, int num_arguments); + + // Tail call of a runtime routine (jump). + // Like JumpToExternalReference, but also takes care of passing the number + // of parameters. + void TailCallExternalReference(const ExternalReference& ext, + int num_arguments, + int result_size); + + // Convenience function: tail call a runtime routine (jump). + void TailCallRuntime(Runtime::FunctionId fid, + int num_arguments, + int result_size); + + // Before calling a C-function from generated code, align arguments on stack. + // After aligning the frame, arguments must be stored in esp[0], esp[4], + // etc., not pushed. The argument count assumes all arguments are word sized. + // Some compilers/platforms require the stack to be aligned when calling + // C++ code. + // Needs a scratch register to do some arithmetic. This register will be + // trashed. + void PrepareCallCFunction(int num_arguments, Register scratch); + + // Calls a C function and cleans up the space for arguments allocated + // by PrepareCallCFunction. The called function is not allowed to trigger a + // garbage collection, since that might move the code and invalidate the + // return address (unless this is somehow accounted for by the called + // function). + void CallCFunction(ExternalReference function, int num_arguments); + void CallCFunction(Register function, int num_arguments); + + // Prepares stack to put arguments (aligns and so on). Reserves + // space for return value if needed (assumes the return value is a handle). + // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1) + // etc. Saves context (esi). If space was reserved for return value then + // stores the pointer to the reserved slot into esi. + void PrepareCallApiFunction(int argc); + + // Calls an API function. Allocates HandleScope, extracts returned value + // from handle and propagates exceptions. Clobbers ebx, edi and + // caller-save registers. Restores context. On return removes + // stack_space * kPointerSize (GCed). + void CallApiFunctionAndReturn(Register function_address, + ExternalReference thunk_ref, + Operand thunk_last_arg, + int stack_space, + Operand return_value_operand, + Operand* context_restore_operand); + + // Jump to a runtime routine. + void JumpToExternalReference(const ExternalReference& ext); + + // --------------------------------------------------------------------------- + // Utilities + + void Ret(); + + // Return and drop arguments from stack, where the number of arguments + // may be bigger than 2^16 - 1. Requires a scratch register. + void Ret(int bytes_dropped, Register scratch); + + // Emit code to discard a non-negative number of pointer-sized elements + // from the stack, clobbering only the esp register. + void Drop(int element_count); + + void Call(Label* target) { call(target); } + void Push(Register src) { push(src); } + void Pop(Register dst) { pop(dst); } + + // Emit call to the code we are currently generating. + void CallSelf() { + Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location())); + call(self, RelocInfo::CODE_TARGET); + } + + // Move if the registers are not identical. + void Move(Register target, Register source); + + // Move a constant into a destination using the most efficient encoding. + void Move(Register dst, const Immediate& x); + void Move(const Operand& dst, const Immediate& x); + + // Push a handle value. + void Push(Handle<Object> handle) { push(Immediate(handle)); } + void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } + + Handle<Object> CodeObject() { + DCHECK(!code_object_.is_null()); + return code_object_; + } + + // Insert code to verify that the x87 stack has the specified depth (0-7) + void VerifyX87StackDepth(uint32_t depth); + + // Emit code for a truncating division by a constant. The dividend register is + // unchanged, the result is in edx, and eax gets clobbered. + void TruncatingDiv(Register dividend, int32_t divisor); + + // --------------------------------------------------------------------------- + // StatsCounter support + + void SetCounter(StatsCounter* counter, int value); + void IncrementCounter(StatsCounter* counter, int value); + void DecrementCounter(StatsCounter* counter, int value); + void IncrementCounter(Condition cc, StatsCounter* counter, int value); + void DecrementCounter(Condition cc, StatsCounter* counter, int value); + + + // --------------------------------------------------------------------------- + // Debugging + + // Calls Abort(msg) if the condition cc is not satisfied. + // Use --debug_code to enable. + void Assert(Condition cc, BailoutReason reason); + + void AssertFastElements(Register elements); + + // Like Assert(), but always enabled. + void Check(Condition cc, BailoutReason reason); + + // Print a message to stdout and abort execution. + void Abort(BailoutReason reason); + + // Check that the stack is aligned. + void CheckStackAlignment(); + + // Verify restrictions about code generated in stubs. + void set_generating_stub(bool value) { generating_stub_ = value; } + bool generating_stub() { return generating_stub_; } + void set_has_frame(bool value) { has_frame_ = value; } + bool has_frame() { return has_frame_; } + inline bool AllowThisStubCall(CodeStub* stub); + + // --------------------------------------------------------------------------- + // String utilities. + + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + void LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Label* not_found); + + // Check whether the instance type represents a flat ASCII string. Jump to the + // label if not. If the instance type can be scratched specify same register + // for both instance type and scratch. + void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type, + Register scratch, + Label* on_not_flat_ascii_string); + + // Checks if both objects are sequential ASCII strings, and jumps to label + // if either is not. + void JumpIfNotBothSequentialAsciiStrings(Register object1, + Register object2, + Register scratch1, + Register scratch2, + Label* on_not_flat_ascii_strings); + + // Checks if the given register or operand is a unique name + void JumpIfNotUniqueName(Register reg, Label* not_unique_name, + Label::Distance distance = Label::kFar) { + JumpIfNotUniqueName(Operand(reg), not_unique_name, distance); + } + + void JumpIfNotUniqueName(Operand operand, Label* not_unique_name, + Label::Distance distance = Label::kFar); + + void EmitSeqStringSetCharCheck(Register string, + Register index, + Register value, + uint32_t encoding_mask); + + static int SafepointRegisterStackIndex(Register reg) { + return SafepointRegisterStackIndex(reg.code()); + } + + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + + // Expects object in eax and returns map with validated enum cache + // in eax. Assumes that any other register can be used as a scratch. + void CheckEnumCache(Label* call_runtime); + + // AllocationMemento support. Arrays may have an associated + // AllocationMemento object that can be checked for in order to pretransition + // to another type. + // On entry, receiver_reg should point to the array object. + // scratch_reg gets clobbered. + // If allocation info is present, conditional code is set to equal. + void TestJSArrayForAllocationMemento(Register receiver_reg, + Register scratch_reg, + Label* no_memento_found); + + void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, + Register scratch_reg, + Label* memento_found) { + Label no_memento_found; + TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, + &no_memento_found); + j(equal, memento_found); + bind(&no_memento_found); + } + + // Jumps to found label if a prototype map has dictionary elements. + void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, + Register scratch1, Label* found); + + private: + bool generating_stub_; + bool has_frame_; + // This handle will be patched with the code object on installation. + Handle<Object> code_object_; + + // Helper functions for generating invokes. + void InvokePrologue(const ParameterCount& expected, + const ParameterCount& actual, + Handle<Code> code_constant, + const Operand& code_operand, + Label* done, + bool* definitely_mismatches, + InvokeFlag flag, + Label::Distance done_distance, + const CallWrapper& call_wrapper = NullCallWrapper()); + + void EnterExitFramePrologue(); + void EnterExitFrameEpilogue(int argc); + + void LeaveExitFrameEpilogue(bool restore_context); + + // Allocation support helpers. + void LoadAllocationTopHelper(Register result, + Register scratch, + AllocationFlags flags); + + void UpdateAllocationTopHelper(Register result_end, + Register scratch, + AllocationFlags flags); + + // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. + void InNewSpace(Register object, + Register scratch, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance = Label::kFar); + + // Helper for finding the mark bits for an address. Afterwards, the + // bitmap register points at the word with the mark bits and the mask + // the position of the first bit. Uses ecx as scratch and leaves addr_reg + // unchanged. + inline void GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg); + + // Helper for throwing exceptions. Compute a handler address and jump to + // it. See the implementation for register usage. + void JumpToHandlerEntry(); + + // Compute memory operands for safepoint stack slots. + Operand SafepointRegisterSlot(Register reg); + static int SafepointRegisterStackIndex(int reg_code); + + // Needs access to SafepointRegisterStackIndex for compiled frame + // traversal. + friend class StandardFrame; +}; + + +// The code patcher is used to patch (typically) small parts of code e.g. for +// debugging and other types of instrumentation. When using the code patcher +// the exact number of bytes specified must be emitted. Is not legal to emit +// relocation information. If any of these constraints are violated it causes +// an assertion. +class CodePatcher { + public: + CodePatcher(byte* address, int size); + virtual ~CodePatcher(); + + // Macro assembler to emit code. + MacroAssembler* masm() { return &masm_; } + + private: + byte* address_; // The address of the code being patched. + int size_; // Number of bytes of the expected patch size. + MacroAssembler masm_; // Macro assembler used to generate the code. +}; + + +// ----------------------------------------------------------------------------- +// Static helper functions. + +// Generate an Operand for loading a field from an object. +inline Operand FieldOperand(Register object, int offset) { + return Operand(object, offset - kHeapObjectTag); +} + + +// Generate an Operand for loading an indexed field from an object. +inline Operand FieldOperand(Register object, + Register index, + ScaleFactor scale, + int offset) { + return Operand(object, index, scale, offset - kHeapObjectTag); +} + + +inline Operand FixedArrayElementOperand(Register array, + Register index_as_smi, + int additional_offset = 0) { + int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize; + return FieldOperand(array, index_as_smi, times_half_pointer_size, offset); +} + + +inline Operand ContextOperand(Register context, int index) { + return Operand(context, Context::SlotOffset(index)); +} + + +inline Operand GlobalObjectOperand() { + return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX); +} + + +// Generates an Operand for saving parameters after PrepareCallApiFunction. +Operand ApiParameterOperand(int index); + + +#ifdef GENERATED_CODE_COVERAGE +extern void LogGeneratedCodeCoverage(const char* file_line); +#define CODE_COVERAGE_STRINGIFY(x) #x +#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) +#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) +#define ACCESS_MASM(masm) { \ + byte* ia32_coverage_function = \ + reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \ + masm->pushfd(); \ + masm->pushad(); \ + masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \ + masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \ + masm->pop(eax); \ + masm->popad(); \ + masm->popfd(); \ + } \ + masm-> +#else +#define ACCESS_MASM(masm) masm-> +#endif + + +} } // namespace v8::internal + +#endif // V8_X87_MACRO_ASSEMBLER_X87_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/OWNERS nodejs-0.11.15/deps/v8/src/x87/OWNERS --- nodejs-0.11.13/deps/v8/src/x87/OWNERS 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/OWNERS 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +weiliang.lin@intel.com diff -Nru nodejs-0.11.13/deps/v8/src/x87/regexp-macro-assembler-x87.cc nodejs-0.11.15/deps/v8/src/x87/regexp-macro-assembler-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/regexp-macro-assembler-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/regexp-macro-assembler-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1309 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/cpu-profiler.h" +#include "src/log.h" +#include "src/macro-assembler.h" +#include "src/regexp-macro-assembler.h" +#include "src/regexp-stack.h" +#include "src/unicode.h" +#include "src/x87/regexp-macro-assembler-x87.h" + +namespace v8 { +namespace internal { + +#ifndef V8_INTERPRETED_REGEXP +/* + * This assembler uses the following register assignment convention + * - edx : Current character. Must be loaded using LoadCurrentCharacter + * before using any of the dispatch methods. Temporarily stores the + * index of capture start after a matching pass for a global regexp. + * - edi : Current position in input, as negative offset from end of string. + * Please notice that this is the byte offset, not the character offset! + * - esi : end of input (points to byte after last character in input). + * - ebp : Frame pointer. Used to access arguments, local variables and + * RegExp registers. + * - esp : Points to tip of C stack. + * - ecx : Points to tip of backtrack stack + * + * The registers eax and ebx are free to use for computations. + * + * Each call to a public method should retain this convention. + * The stack will have the following structure: + * - Isolate* isolate (address of the current isolate) + * - direct_call (if 1, direct call from JavaScript code, if 0 + * call through the runtime system) + * - stack_area_base (high end of the memory area to use as + * backtracking stack) + * - capture array size (may fit multiple sets of matches) + * - int* capture_array (int[num_saved_registers_], for output). + * - end of input (address of end of string) + * - start of input (address of first character in string) + * - start index (character index of start) + * - String* input_string (location of a handle containing the string) + * --- frame alignment (if applicable) --- + * - return address + * ebp-> - old ebp + * - backup of caller esi + * - backup of caller edi + * - backup of caller ebx + * - success counter (only for global regexps to count matches). + * - Offset of location before start of input (effectively character + * position -1). Used to initialize capture registers to a non-position. + * - register 0 ebp[-4] (only positions must be stored in the first + * - register 1 ebp[-8] num_saved_registers_ registers) + * - ... + * + * The first num_saved_registers_ registers are initialized to point to + * "character -1" in the string (i.e., char_size() bytes before the first + * character of the string). The remaining registers starts out as garbage. + * + * The data up to the return address must be placed there by the calling + * code, by calling the code entry as cast to a function with the signature: + * int (*match)(String* input_string, + * int start_index, + * Address start, + * Address end, + * int* capture_output_array, + * bool at_start, + * byte* stack_area_base, + * bool direct_call) + */ + +#define __ ACCESS_MASM(masm_) + +RegExpMacroAssemblerX87::RegExpMacroAssemblerX87( + Mode mode, + int registers_to_save, + Zone* zone) + : NativeRegExpMacroAssembler(zone), + masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)), + mode_(mode), + num_registers_(registers_to_save), + num_saved_registers_(registers_to_save), + entry_label_(), + start_label_(), + success_label_(), + backtrack_label_(), + exit_label_() { + DCHECK_EQ(0, registers_to_save % 2); + __ jmp(&entry_label_); // We'll write the entry code later. + __ bind(&start_label_); // And then continue from here. +} + + +RegExpMacroAssemblerX87::~RegExpMacroAssemblerX87() { + delete masm_; + // Unuse labels in case we throw away the assembler without calling GetCode. + entry_label_.Unuse(); + start_label_.Unuse(); + success_label_.Unuse(); + backtrack_label_.Unuse(); + exit_label_.Unuse(); + check_preempt_label_.Unuse(); + stack_overflow_label_.Unuse(); +} + + +int RegExpMacroAssemblerX87::stack_limit_slack() { + return RegExpStack::kStackLimitSlack; +} + + +void RegExpMacroAssemblerX87::AdvanceCurrentPosition(int by) { + if (by != 0) { + __ add(edi, Immediate(by * char_size())); + } +} + + +void RegExpMacroAssemblerX87::AdvanceRegister(int reg, int by) { + DCHECK(reg >= 0); + DCHECK(reg < num_registers_); + if (by != 0) { + __ add(register_location(reg), Immediate(by)); + } +} + + +void RegExpMacroAssemblerX87::Backtrack() { + CheckPreemption(); + // Pop Code* offset from backtrack stack, add Code* and jump to location. + Pop(ebx); + __ add(ebx, Immediate(masm_->CodeObject())); + __ jmp(ebx); +} + + +void RegExpMacroAssemblerX87::Bind(Label* label) { + __ bind(label); +} + + +void RegExpMacroAssemblerX87::CheckCharacter(uint32_t c, Label* on_equal) { + __ cmp(current_character(), c); + BranchOrBacktrack(equal, on_equal); +} + + +void RegExpMacroAssemblerX87::CheckCharacterGT(uc16 limit, Label* on_greater) { + __ cmp(current_character(), limit); + BranchOrBacktrack(greater, on_greater); +} + + +void RegExpMacroAssemblerX87::CheckAtStart(Label* on_at_start) { + Label not_at_start; + // Did we start the match at the start of the string at all? + __ cmp(Operand(ebp, kStartIndex), Immediate(0)); + BranchOrBacktrack(not_equal, ¬_at_start); + // If we did, are we still at the start of the input? + __ lea(eax, Operand(esi, edi, times_1, 0)); + __ cmp(eax, Operand(ebp, kInputStart)); + BranchOrBacktrack(equal, on_at_start); + __ bind(¬_at_start); +} + + +void RegExpMacroAssemblerX87::CheckNotAtStart(Label* on_not_at_start) { + // Did we start the match at the start of the string at all? + __ cmp(Operand(ebp, kStartIndex), Immediate(0)); + BranchOrBacktrack(not_equal, on_not_at_start); + // If we did, are we still at the start of the input? + __ lea(eax, Operand(esi, edi, times_1, 0)); + __ cmp(eax, Operand(ebp, kInputStart)); + BranchOrBacktrack(not_equal, on_not_at_start); +} + + +void RegExpMacroAssemblerX87::CheckCharacterLT(uc16 limit, Label* on_less) { + __ cmp(current_character(), limit); + BranchOrBacktrack(less, on_less); +} + + +void RegExpMacroAssemblerX87::CheckGreedyLoop(Label* on_equal) { + Label fallthrough; + __ cmp(edi, Operand(backtrack_stackpointer(), 0)); + __ j(not_equal, &fallthrough); + __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop. + BranchOrBacktrack(no_condition, on_equal); + __ bind(&fallthrough); +} + + +void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase( + int start_reg, + Label* on_no_match) { + Label fallthrough; + __ mov(edx, register_location(start_reg)); // Index of start of capture + __ mov(ebx, register_location(start_reg + 1)); // Index of end of capture + __ sub(ebx, edx); // Length of capture. + + // The length of a capture should not be negative. This can only happen + // if the end of the capture is unrecorded, or at a point earlier than + // the start of the capture. + BranchOrBacktrack(less, on_no_match); + + // If length is zero, either the capture is empty or it is completely + // uncaptured. In either case succeed immediately. + __ j(equal, &fallthrough); + + // Check that there are sufficient characters left in the input. + __ mov(eax, edi); + __ add(eax, ebx); + BranchOrBacktrack(greater, on_no_match); + + if (mode_ == ASCII) { + Label success; + Label fail; + Label loop_increment; + // Save register contents to make the registers available below. + __ push(edi); + __ push(backtrack_stackpointer()); + // After this, the eax, ecx, and edi registers are available. + + __ add(edx, esi); // Start of capture + __ add(edi, esi); // Start of text to match against capture. + __ add(ebx, edi); // End of text to match against capture. + + Label loop; + __ bind(&loop); + __ movzx_b(eax, Operand(edi, 0)); + __ cmpb_al(Operand(edx, 0)); + __ j(equal, &loop_increment); + + // Mismatch, try case-insensitive match (converting letters to lower-case). + __ or_(eax, 0x20); // Convert match character to lower-case. + __ lea(ecx, Operand(eax, -'a')); + __ cmp(ecx, static_cast<int32_t>('z' - 'a')); // Is eax a lowercase letter? + Label convert_capture; + __ j(below_equal, &convert_capture); // In range 'a'-'z'. + // Latin-1: Check for values in range [224,254] but not 247. + __ sub(ecx, Immediate(224 - 'a')); + __ cmp(ecx, Immediate(254 - 224)); + __ j(above, &fail); // Weren't Latin-1 letters. + __ cmp(ecx, Immediate(247 - 224)); // Check for 247. + __ j(equal, &fail); + __ bind(&convert_capture); + // Also convert capture character. + __ movzx_b(ecx, Operand(edx, 0)); + __ or_(ecx, 0x20); + + __ cmp(eax, ecx); + __ j(not_equal, &fail); + + __ bind(&loop_increment); + // Increment pointers into match and capture strings. + __ add(edx, Immediate(1)); + __ add(edi, Immediate(1)); + // Compare to end of match, and loop if not done. + __ cmp(edi, ebx); + __ j(below, &loop); + __ jmp(&success); + + __ bind(&fail); + // Restore original values before failing. + __ pop(backtrack_stackpointer()); + __ pop(edi); + BranchOrBacktrack(no_condition, on_no_match); + + __ bind(&success); + // Restore original value before continuing. + __ pop(backtrack_stackpointer()); + // Drop original value of character position. + __ add(esp, Immediate(kPointerSize)); + // Compute new value of character position after the matched part. + __ sub(edi, esi); + } else { + DCHECK(mode_ == UC16); + // Save registers before calling C function. + __ push(esi); + __ push(edi); + __ push(backtrack_stackpointer()); + __ push(ebx); + + static const int argument_count = 4; + __ PrepareCallCFunction(argument_count, ecx); + // Put arguments into allocated stack area, last argument highest on stack. + // Parameters are + // Address byte_offset1 - Address captured substring's start. + // Address byte_offset2 - Address of current character position. + // size_t byte_length - length of capture in bytes(!) + // Isolate* isolate + + // Set isolate. + __ mov(Operand(esp, 3 * kPointerSize), + Immediate(ExternalReference::isolate_address(isolate()))); + // Set byte_length. + __ mov(Operand(esp, 2 * kPointerSize), ebx); + // Set byte_offset2. + // Found by adding negative string-end offset of current position (edi) + // to end of string. + __ add(edi, esi); + __ mov(Operand(esp, 1 * kPointerSize), edi); + // Set byte_offset1. + // Start of capture, where edx already holds string-end negative offset. + __ add(edx, esi); + __ mov(Operand(esp, 0 * kPointerSize), edx); + + { + AllowExternalCallThatCantCauseGC scope(masm_); + ExternalReference compare = + ExternalReference::re_case_insensitive_compare_uc16(isolate()); + __ CallCFunction(compare, argument_count); + } + // Pop original values before reacting on result value. + __ pop(ebx); + __ pop(backtrack_stackpointer()); + __ pop(edi); + __ pop(esi); + + // Check if function returned non-zero for success or zero for failure. + __ or_(eax, eax); + BranchOrBacktrack(zero, on_no_match); + // On success, increment position by length of capture. + __ add(edi, ebx); + } + __ bind(&fallthrough); +} + + +void RegExpMacroAssemblerX87::CheckNotBackReference( + int start_reg, + Label* on_no_match) { + Label fallthrough; + Label success; + Label fail; + + // Find length of back-referenced capture. + __ mov(edx, register_location(start_reg)); + __ mov(eax, register_location(start_reg + 1)); + __ sub(eax, edx); // Length to check. + // Fail on partial or illegal capture (start of capture after end of capture). + BranchOrBacktrack(less, on_no_match); + // Succeed on empty capture (including no capture) + __ j(equal, &fallthrough); + + // Check that there are sufficient characters left in the input. + __ mov(ebx, edi); + __ add(ebx, eax); + BranchOrBacktrack(greater, on_no_match); + + // Save register to make it available below. + __ push(backtrack_stackpointer()); + + // Compute pointers to match string and capture string + __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match. + __ add(edx, esi); // Start of capture. + __ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match + + Label loop; + __ bind(&loop); + if (mode_ == ASCII) { + __ movzx_b(eax, Operand(edx, 0)); + __ cmpb_al(Operand(ebx, 0)); + } else { + DCHECK(mode_ == UC16); + __ movzx_w(eax, Operand(edx, 0)); + __ cmpw_ax(Operand(ebx, 0)); + } + __ j(not_equal, &fail); + // Increment pointers into capture and match string. + __ add(edx, Immediate(char_size())); + __ add(ebx, Immediate(char_size())); + // Check if we have reached end of match area. + __ cmp(ebx, ecx); + __ j(below, &loop); + __ jmp(&success); + + __ bind(&fail); + // Restore backtrack stackpointer. + __ pop(backtrack_stackpointer()); + BranchOrBacktrack(no_condition, on_no_match); + + __ bind(&success); + // Move current character position to position after match. + __ mov(edi, ecx); + __ sub(edi, esi); + // Restore backtrack stackpointer. + __ pop(backtrack_stackpointer()); + + __ bind(&fallthrough); +} + + +void RegExpMacroAssemblerX87::CheckNotCharacter(uint32_t c, + Label* on_not_equal) { + __ cmp(current_character(), c); + BranchOrBacktrack(not_equal, on_not_equal); +} + + +void RegExpMacroAssemblerX87::CheckCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_equal) { + if (c == 0) { + __ test(current_character(), Immediate(mask)); + } else { + __ mov(eax, mask); + __ and_(eax, current_character()); + __ cmp(eax, c); + } + BranchOrBacktrack(equal, on_equal); +} + + +void RegExpMacroAssemblerX87::CheckNotCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_not_equal) { + if (c == 0) { + __ test(current_character(), Immediate(mask)); + } else { + __ mov(eax, mask); + __ and_(eax, current_character()); + __ cmp(eax, c); + } + BranchOrBacktrack(not_equal, on_not_equal); +} + + +void RegExpMacroAssemblerX87::CheckNotCharacterAfterMinusAnd( + uc16 c, + uc16 minus, + uc16 mask, + Label* on_not_equal) { + DCHECK(minus < String::kMaxUtf16CodeUnit); + __ lea(eax, Operand(current_character(), -minus)); + if (c == 0) { + __ test(eax, Immediate(mask)); + } else { + __ and_(eax, mask); + __ cmp(eax, c); + } + BranchOrBacktrack(not_equal, on_not_equal); +} + + +void RegExpMacroAssemblerX87::CheckCharacterInRange( + uc16 from, + uc16 to, + Label* on_in_range) { + __ lea(eax, Operand(current_character(), -from)); + __ cmp(eax, to - from); + BranchOrBacktrack(below_equal, on_in_range); +} + + +void RegExpMacroAssemblerX87::CheckCharacterNotInRange( + uc16 from, + uc16 to, + Label* on_not_in_range) { + __ lea(eax, Operand(current_character(), -from)); + __ cmp(eax, to - from); + BranchOrBacktrack(above, on_not_in_range); +} + + +void RegExpMacroAssemblerX87::CheckBitInTable( + Handle<ByteArray> table, + Label* on_bit_set) { + __ mov(eax, Immediate(table)); + Register index = current_character(); + if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) { + __ mov(ebx, kTableSize - 1); + __ and_(ebx, current_character()); + index = ebx; + } + __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0); + BranchOrBacktrack(not_equal, on_bit_set); +} + + +bool RegExpMacroAssemblerX87::CheckSpecialCharacterClass(uc16 type, + Label* on_no_match) { + // Range checks (c in min..max) are generally implemented by an unsigned + // (c - min) <= (max - min) check + switch (type) { + case 's': + // Match space-characters + if (mode_ == ASCII) { + // One byte space characters are '\t'..'\r', ' ' and \u00a0. + Label success; + __ cmp(current_character(), ' '); + __ j(equal, &success, Label::kNear); + // Check range 0x09..0x0d + __ lea(eax, Operand(current_character(), -'\t')); + __ cmp(eax, '\r' - '\t'); + __ j(below_equal, &success, Label::kNear); + // \u00a0 (NBSP). + __ cmp(eax, 0x00a0 - '\t'); + BranchOrBacktrack(not_equal, on_no_match); + __ bind(&success); + return true; + } + return false; + case 'S': + // The emitted code for generic character classes is good enough. + return false; + case 'd': + // Match ASCII digits ('0'..'9') + __ lea(eax, Operand(current_character(), -'0')); + __ cmp(eax, '9' - '0'); + BranchOrBacktrack(above, on_no_match); + return true; + case 'D': + // Match non ASCII-digits + __ lea(eax, Operand(current_character(), -'0')); + __ cmp(eax, '9' - '0'); + BranchOrBacktrack(below_equal, on_no_match); + return true; + case '.': { + // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029) + __ mov(eax, current_character()); + __ xor_(eax, Immediate(0x01)); + // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c + __ sub(eax, Immediate(0x0b)); + __ cmp(eax, 0x0c - 0x0b); + BranchOrBacktrack(below_equal, on_no_match); + if (mode_ == UC16) { + // Compare original value to 0x2028 and 0x2029, using the already + // computed (current_char ^ 0x01 - 0x0b). I.e., check for + // 0x201d (0x2028 - 0x0b) or 0x201e. + __ sub(eax, Immediate(0x2028 - 0x0b)); + __ cmp(eax, 0x2029 - 0x2028); + BranchOrBacktrack(below_equal, on_no_match); + } + return true; + } + case 'w': { + if (mode_ != ASCII) { + // Table is 128 entries, so all ASCII characters can be tested. + __ cmp(current_character(), Immediate('z')); + BranchOrBacktrack(above, on_no_match); + } + DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char. + ExternalReference word_map = ExternalReference::re_word_character_map(); + __ test_b(current_character(), + Operand::StaticArray(current_character(), times_1, word_map)); + BranchOrBacktrack(zero, on_no_match); + return true; + } + case 'W': { + Label done; + if (mode_ != ASCII) { + // Table is 128 entries, so all ASCII characters can be tested. + __ cmp(current_character(), Immediate('z')); + __ j(above, &done); + } + DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char. + ExternalReference word_map = ExternalReference::re_word_character_map(); + __ test_b(current_character(), + Operand::StaticArray(current_character(), times_1, word_map)); + BranchOrBacktrack(not_zero, on_no_match); + if (mode_ != ASCII) { + __ bind(&done); + } + return true; + } + // Non-standard classes (with no syntactic shorthand) used internally. + case '*': + // Match any character. + return true; + case 'n': { + // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029). + // The opposite of '.'. + __ mov(eax, current_character()); + __ xor_(eax, Immediate(0x01)); + // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c + __ sub(eax, Immediate(0x0b)); + __ cmp(eax, 0x0c - 0x0b); + if (mode_ == ASCII) { + BranchOrBacktrack(above, on_no_match); + } else { + Label done; + BranchOrBacktrack(below_equal, &done); + DCHECK_EQ(UC16, mode_); + // Compare original value to 0x2028 and 0x2029, using the already + // computed (current_char ^ 0x01 - 0x0b). I.e., check for + // 0x201d (0x2028 - 0x0b) or 0x201e. + __ sub(eax, Immediate(0x2028 - 0x0b)); + __ cmp(eax, 1); + BranchOrBacktrack(above, on_no_match); + __ bind(&done); + } + return true; + } + // No custom implementation (yet): s(UC16), S(UC16). + default: + return false; + } +} + + +void RegExpMacroAssemblerX87::Fail() { + STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero. + if (!global()) { + __ Move(eax, Immediate(FAILURE)); + } + __ jmp(&exit_label_); +} + + +Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) { + Label return_eax; + // Finalize code - write the entry point code now we know how many + // registers we need. + + // Entry code: + __ bind(&entry_label_); + + // Tell the system that we have a stack frame. Because the type is MANUAL, no + // code is generated. + FrameScope scope(masm_, StackFrame::MANUAL); + + // Actually emit code to start a new stack frame. + __ push(ebp); + __ mov(ebp, esp); + // Save callee-save registers. Order here should correspond to order of + // kBackup_ebx etc. + __ push(esi); + __ push(edi); + __ push(ebx); // Callee-save on MacOS. + __ push(Immediate(0)); // Number of successful matches in a global regexp. + __ push(Immediate(0)); // Make room for "input start - 1" constant. + + // Check if we have space on the stack for registers. + Label stack_limit_hit; + Label stack_ok; + + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(isolate()); + __ mov(ecx, esp); + __ sub(ecx, Operand::StaticVariable(stack_limit)); + // Handle it if the stack pointer is already below the stack limit. + __ j(below_equal, &stack_limit_hit); + // Check if there is room for the variable number of registers above + // the stack limit. + __ cmp(ecx, num_registers_ * kPointerSize); + __ j(above_equal, &stack_ok); + // Exit with OutOfMemory exception. There is not enough space on the stack + // for our working registers. + __ mov(eax, EXCEPTION); + __ jmp(&return_eax); + + __ bind(&stack_limit_hit); + CallCheckStackGuardState(ebx); + __ or_(eax, eax); + // If returned value is non-zero, we exit with the returned value as result. + __ j(not_zero, &return_eax); + + __ bind(&stack_ok); + // Load start index for later use. + __ mov(ebx, Operand(ebp, kStartIndex)); + + // Allocate space on stack for registers. + __ sub(esp, Immediate(num_registers_ * kPointerSize)); + // Load string length. + __ mov(esi, Operand(ebp, kInputEnd)); + // Load input position. + __ mov(edi, Operand(ebp, kInputStart)); + // Set up edi to be negative offset from string end. + __ sub(edi, esi); + + // Set eax to address of char before start of the string. + // (effectively string position -1). + __ neg(ebx); + if (mode_ == UC16) { + __ lea(eax, Operand(edi, ebx, times_2, -char_size())); + } else { + __ lea(eax, Operand(edi, ebx, times_1, -char_size())); + } + // Store this value in a local variable, for use when clearing + // position registers. + __ mov(Operand(ebp, kInputStartMinusOne), eax); + +#if V8_OS_WIN + // Ensure that we write to each stack page, in order. Skipping a page + // on Windows can cause segmentation faults. Assuming page size is 4k. + const int kPageSize = 4096; + const int kRegistersPerPage = kPageSize / kPointerSize; + for (int i = num_saved_registers_ + kRegistersPerPage - 1; + i < num_registers_; + i += kRegistersPerPage) { + __ mov(register_location(i), eax); // One write every page. + } +#endif // V8_OS_WIN + + Label load_char_start_regexp, start_regexp; + // Load newline if index is at start, previous character otherwise. + __ cmp(Operand(ebp, kStartIndex), Immediate(0)); + __ j(not_equal, &load_char_start_regexp, Label::kNear); + __ mov(current_character(), '\n'); + __ jmp(&start_regexp, Label::kNear); + + // Global regexp restarts matching here. + __ bind(&load_char_start_regexp); + // Load previous char as initial value of current character register. + LoadCurrentCharacterUnchecked(-1, 1); + __ bind(&start_regexp); + + // Initialize on-stack registers. + if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. + // Fill saved registers with initial value = start offset - 1 + // Fill in stack push order, to avoid accessing across an unwritten + // page (a problem on Windows). + if (num_saved_registers_ > 8) { + __ mov(ecx, kRegisterZero); + Label init_loop; + __ bind(&init_loop); + __ mov(Operand(ebp, ecx, times_1, 0), eax); + __ sub(ecx, Immediate(kPointerSize)); + __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize); + __ j(greater, &init_loop); + } else { // Unroll the loop. + for (int i = 0; i < num_saved_registers_; i++) { + __ mov(register_location(i), eax); + } + } + } + + // Initialize backtrack stack pointer. + __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd)); + + __ jmp(&start_label_); + + // Exit code: + if (success_label_.is_linked()) { + // Save captures when successful. + __ bind(&success_label_); + if (num_saved_registers_ > 0) { + // copy captures to output + __ mov(ebx, Operand(ebp, kRegisterOutput)); + __ mov(ecx, Operand(ebp, kInputEnd)); + __ mov(edx, Operand(ebp, kStartIndex)); + __ sub(ecx, Operand(ebp, kInputStart)); + if (mode_ == UC16) { + __ lea(ecx, Operand(ecx, edx, times_2, 0)); + } else { + __ add(ecx, edx); + } + for (int i = 0; i < num_saved_registers_; i++) { + __ mov(eax, register_location(i)); + if (i == 0 && global_with_zero_length_check()) { + // Keep capture start in edx for the zero-length check later. + __ mov(edx, eax); + } + // Convert to index from start of string, not end. + __ add(eax, ecx); + if (mode_ == UC16) { + __ sar(eax, 1); // Convert byte index to character index. + } + __ mov(Operand(ebx, i * kPointerSize), eax); + } + } + + if (global()) { + // Restart matching if the regular expression is flagged as global. + // Increment success counter. + __ inc(Operand(ebp, kSuccessfulCaptures)); + // Capture results have been stored, so the number of remaining global + // output registers is reduced by the number of stored captures. + __ mov(ecx, Operand(ebp, kNumOutputRegisters)); + __ sub(ecx, Immediate(num_saved_registers_)); + // Check whether we have enough room for another set of capture results. + __ cmp(ecx, Immediate(num_saved_registers_)); + __ j(less, &exit_label_); + + __ mov(Operand(ebp, kNumOutputRegisters), ecx); + // Advance the location for output. + __ add(Operand(ebp, kRegisterOutput), + Immediate(num_saved_registers_ * kPointerSize)); + + // Prepare eax to initialize registers with its value in the next run. + __ mov(eax, Operand(ebp, kInputStartMinusOne)); + + if (global_with_zero_length_check()) { + // Special case for zero-length matches. + // edx: capture start index + __ cmp(edi, edx); + // Not a zero-length match, restart. + __ j(not_equal, &load_char_start_regexp); + // edi (offset from the end) is zero if we already reached the end. + __ test(edi, edi); + __ j(zero, &exit_label_, Label::kNear); + // Advance current position after a zero-length match. + if (mode_ == UC16) { + __ add(edi, Immediate(2)); + } else { + __ inc(edi); + } + } + + __ jmp(&load_char_start_regexp); + } else { + __ mov(eax, Immediate(SUCCESS)); + } + } + + __ bind(&exit_label_); + if (global()) { + // Return the number of successful captures. + __ mov(eax, Operand(ebp, kSuccessfulCaptures)); + } + + __ bind(&return_eax); + // Skip esp past regexp registers. + __ lea(esp, Operand(ebp, kBackup_ebx)); + // Restore callee-save registers. + __ pop(ebx); + __ pop(edi); + __ pop(esi); + // Exit function frame, restore previous one. + __ pop(ebp); + __ ret(0); + + // Backtrack code (branch target for conditional backtracks). + if (backtrack_label_.is_linked()) { + __ bind(&backtrack_label_); + Backtrack(); + } + + Label exit_with_exception; + + // Preempt-code + if (check_preempt_label_.is_linked()) { + SafeCallTarget(&check_preempt_label_); + + __ push(backtrack_stackpointer()); + __ push(edi); + + CallCheckStackGuardState(ebx); + __ or_(eax, eax); + // If returning non-zero, we should end execution with the given + // result as return value. + __ j(not_zero, &return_eax); + + __ pop(edi); + __ pop(backtrack_stackpointer()); + // String might have moved: Reload esi from frame. + __ mov(esi, Operand(ebp, kInputEnd)); + SafeReturn(); + } + + // Backtrack stack overflow code. + if (stack_overflow_label_.is_linked()) { + SafeCallTarget(&stack_overflow_label_); + // Reached if the backtrack-stack limit has been hit. + + Label grow_failed; + // Save registers before calling C function + __ push(esi); + __ push(edi); + + // Call GrowStack(backtrack_stackpointer()) + static const int num_arguments = 3; + __ PrepareCallCFunction(num_arguments, ebx); + __ mov(Operand(esp, 2 * kPointerSize), + Immediate(ExternalReference::isolate_address(isolate()))); + __ lea(eax, Operand(ebp, kStackHighEnd)); + __ mov(Operand(esp, 1 * kPointerSize), eax); + __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer()); + ExternalReference grow_stack = + ExternalReference::re_grow_stack(isolate()); + __ CallCFunction(grow_stack, num_arguments); + // If return NULL, we have failed to grow the stack, and + // must exit with a stack-overflow exception. + __ or_(eax, eax); + __ j(equal, &exit_with_exception); + // Otherwise use return value as new stack pointer. + __ mov(backtrack_stackpointer(), eax); + // Restore saved registers and continue. + __ pop(edi); + __ pop(esi); + SafeReturn(); + } + + if (exit_with_exception.is_linked()) { + // If any of the code above needed to exit with an exception. + __ bind(&exit_with_exception); + // Exit with Result EXCEPTION(-1) to signal thrown exception. + __ mov(eax, EXCEPTION); + __ jmp(&return_eax); + } + + CodeDesc code_desc; + masm_->GetCode(&code_desc); + Handle<Code> code = + isolate()->factory()->NewCode(code_desc, + Code::ComputeFlags(Code::REGEXP), + masm_->CodeObject()); + PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source)); + return Handle<HeapObject>::cast(code); +} + + +void RegExpMacroAssemblerX87::GoTo(Label* to) { + BranchOrBacktrack(no_condition, to); +} + + +void RegExpMacroAssemblerX87::IfRegisterGE(int reg, + int comparand, + Label* if_ge) { + __ cmp(register_location(reg), Immediate(comparand)); + BranchOrBacktrack(greater_equal, if_ge); +} + + +void RegExpMacroAssemblerX87::IfRegisterLT(int reg, + int comparand, + Label* if_lt) { + __ cmp(register_location(reg), Immediate(comparand)); + BranchOrBacktrack(less, if_lt); +} + + +void RegExpMacroAssemblerX87::IfRegisterEqPos(int reg, + Label* if_eq) { + __ cmp(edi, register_location(reg)); + BranchOrBacktrack(equal, if_eq); +} + + +RegExpMacroAssembler::IrregexpImplementation + RegExpMacroAssemblerX87::Implementation() { + return kX87Implementation; +} + + +void RegExpMacroAssemblerX87::LoadCurrentCharacter(int cp_offset, + Label* on_end_of_input, + bool check_bounds, + int characters) { + DCHECK(cp_offset >= -1); // ^ and \b can look behind one character. + DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works) + if (check_bounds) { + CheckPosition(cp_offset + characters - 1, on_end_of_input); + } + LoadCurrentCharacterUnchecked(cp_offset, characters); +} + + +void RegExpMacroAssemblerX87::PopCurrentPosition() { + Pop(edi); +} + + +void RegExpMacroAssemblerX87::PopRegister(int register_index) { + Pop(eax); + __ mov(register_location(register_index), eax); +} + + +void RegExpMacroAssemblerX87::PushBacktrack(Label* label) { + Push(Immediate::CodeRelativeOffset(label)); + CheckStackLimit(); +} + + +void RegExpMacroAssemblerX87::PushCurrentPosition() { + Push(edi); +} + + +void RegExpMacroAssemblerX87::PushRegister(int register_index, + StackCheckFlag check_stack_limit) { + __ mov(eax, register_location(register_index)); + Push(eax); + if (check_stack_limit) CheckStackLimit(); +} + + +void RegExpMacroAssemblerX87::ReadCurrentPositionFromRegister(int reg) { + __ mov(edi, register_location(reg)); +} + + +void RegExpMacroAssemblerX87::ReadStackPointerFromRegister(int reg) { + __ mov(backtrack_stackpointer(), register_location(reg)); + __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd)); +} + +void RegExpMacroAssemblerX87::SetCurrentPositionFromEnd(int by) { + Label after_position; + __ cmp(edi, -by * char_size()); + __ j(greater_equal, &after_position, Label::kNear); + __ mov(edi, -by * char_size()); + // On RegExp code entry (where this operation is used), the character before + // the current position is expected to be already loaded. + // We have advanced the position, so it's safe to read backwards. + LoadCurrentCharacterUnchecked(-1, 1); + __ bind(&after_position); +} + + +void RegExpMacroAssemblerX87::SetRegister(int register_index, int to) { + DCHECK(register_index >= num_saved_registers_); // Reserved for positions! + __ mov(register_location(register_index), Immediate(to)); +} + + +bool RegExpMacroAssemblerX87::Succeed() { + __ jmp(&success_label_); + return global(); +} + + +void RegExpMacroAssemblerX87::WriteCurrentPositionToRegister(int reg, + int cp_offset) { + if (cp_offset == 0) { + __ mov(register_location(reg), edi); + } else { + __ lea(eax, Operand(edi, cp_offset * char_size())); + __ mov(register_location(reg), eax); + } +} + + +void RegExpMacroAssemblerX87::ClearRegisters(int reg_from, int reg_to) { + DCHECK(reg_from <= reg_to); + __ mov(eax, Operand(ebp, kInputStartMinusOne)); + for (int reg = reg_from; reg <= reg_to; reg++) { + __ mov(register_location(reg), eax); + } +} + + +void RegExpMacroAssemblerX87::WriteStackPointerToRegister(int reg) { + __ mov(eax, backtrack_stackpointer()); + __ sub(eax, Operand(ebp, kStackHighEnd)); + __ mov(register_location(reg), eax); +} + + +// Private methods: + +void RegExpMacroAssemblerX87::CallCheckStackGuardState(Register scratch) { + static const int num_arguments = 3; + __ PrepareCallCFunction(num_arguments, scratch); + // RegExp code frame pointer. + __ mov(Operand(esp, 2 * kPointerSize), ebp); + // Code* of self. + __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject())); + // Next address on the stack (will be address of return address). + __ lea(eax, Operand(esp, -kPointerSize)); + __ mov(Operand(esp, 0 * kPointerSize), eax); + ExternalReference check_stack_guard = + ExternalReference::re_check_stack_guard_state(isolate()); + __ CallCFunction(check_stack_guard, num_arguments); +} + + +// Helper function for reading a value out of a stack frame. +template <typename T> +static T& frame_entry(Address re_frame, int frame_offset) { + return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset)); +} + + +int RegExpMacroAssemblerX87::CheckStackGuardState(Address* return_address, + Code* re_code, + Address re_frame) { + Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate); + StackLimitCheck check(isolate); + if (check.JsHasOverflowed()) { + isolate->StackOverflow(); + return EXCEPTION; + } + + // If not real stack overflow the stack guard was used to interrupt + // execution for another purpose. + + // If this is a direct call from JavaScript retry the RegExp forcing the call + // through the runtime system. Currently the direct call cannot handle a GC. + if (frame_entry<int>(re_frame, kDirectCall) == 1) { + return RETRY; + } + + // Prepare for possible GC. + HandleScope handles(isolate); + Handle<Code> code_handle(re_code); + + Handle<String> subject(frame_entry<String*>(re_frame, kInputString)); + + // Current string. + bool is_ascii = subject->IsOneByteRepresentationUnderneath(); + + DCHECK(re_code->instruction_start() <= *return_address); + DCHECK(*return_address <= + re_code->instruction_start() + re_code->instruction_size()); + + Object* result = isolate->stack_guard()->HandleInterrupts(); + + if (*code_handle != re_code) { // Return address no longer valid + int delta = code_handle->address() - re_code->address(); + // Overwrite the return address on the stack. + *return_address += delta; + } + + if (result->IsException()) { + return EXCEPTION; + } + + Handle<String> subject_tmp = subject; + int slice_offset = 0; + + // Extract the underlying string and the slice offset. + if (StringShape(*subject_tmp).IsCons()) { + subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first()); + } else if (StringShape(*subject_tmp).IsSliced()) { + SlicedString* slice = SlicedString::cast(*subject_tmp); + subject_tmp = Handle<String>(slice->parent()); + slice_offset = slice->offset(); + } + + // String might have changed. + if (subject_tmp->IsOneByteRepresentation() != is_ascii) { + // If we changed between an ASCII and an UC16 string, the specialized + // code cannot be used, and we need to restart regexp matching from + // scratch (including, potentially, compiling a new version of the code). + return RETRY; + } + + // Otherwise, the content of the string might have moved. It must still + // be a sequential or external string with the same content. + // Update the start and end pointers in the stack frame to the current + // location (whether it has actually moved or not). + DCHECK(StringShape(*subject_tmp).IsSequential() || + StringShape(*subject_tmp).IsExternal()); + + // The original start address of the characters to match. + const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart); + + // Find the current start address of the same character at the current string + // position. + int start_index = frame_entry<int>(re_frame, kStartIndex); + const byte* new_address = StringCharacterPosition(*subject_tmp, + start_index + slice_offset); + + if (start_address != new_address) { + // If there is a difference, update the object pointer and start and end + // addresses in the RegExp stack frame to match the new value. + const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd); + int byte_length = static_cast<int>(end_address - start_address); + frame_entry<const String*>(re_frame, kInputString) = *subject; + frame_entry<const byte*>(re_frame, kInputStart) = new_address; + frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length; + } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) { + // Subject string might have been a ConsString that underwent + // short-circuiting during GC. That will not change start_address but + // will change pointer inside the subject handle. + frame_entry<const String*>(re_frame, kInputString) = *subject; + } + + return 0; +} + + +Operand RegExpMacroAssemblerX87::register_location(int register_index) { + DCHECK(register_index < (1<<30)); + if (num_registers_ <= register_index) { + num_registers_ = register_index + 1; + } + return Operand(ebp, kRegisterZero - register_index * kPointerSize); +} + + +void RegExpMacroAssemblerX87::CheckPosition(int cp_offset, + Label* on_outside_input) { + __ cmp(edi, -cp_offset * char_size()); + BranchOrBacktrack(greater_equal, on_outside_input); +} + + +void RegExpMacroAssemblerX87::BranchOrBacktrack(Condition condition, + Label* to) { + if (condition < 0) { // No condition + if (to == NULL) { + Backtrack(); + return; + } + __ jmp(to); + return; + } + if (to == NULL) { + __ j(condition, &backtrack_label_); + return; + } + __ j(condition, to); +} + + +void RegExpMacroAssemblerX87::SafeCall(Label* to) { + Label return_to; + __ push(Immediate::CodeRelativeOffset(&return_to)); + __ jmp(to); + __ bind(&return_to); +} + + +void RegExpMacroAssemblerX87::SafeReturn() { + __ pop(ebx); + __ add(ebx, Immediate(masm_->CodeObject())); + __ jmp(ebx); +} + + +void RegExpMacroAssemblerX87::SafeCallTarget(Label* name) { + __ bind(name); +} + + +void RegExpMacroAssemblerX87::Push(Register source) { + DCHECK(!source.is(backtrack_stackpointer())); + // Notice: This updates flags, unlike normal Push. + __ sub(backtrack_stackpointer(), Immediate(kPointerSize)); + __ mov(Operand(backtrack_stackpointer(), 0), source); +} + + +void RegExpMacroAssemblerX87::Push(Immediate value) { + // Notice: This updates flags, unlike normal Push. + __ sub(backtrack_stackpointer(), Immediate(kPointerSize)); + __ mov(Operand(backtrack_stackpointer(), 0), value); +} + + +void RegExpMacroAssemblerX87::Pop(Register target) { + DCHECK(!target.is(backtrack_stackpointer())); + __ mov(target, Operand(backtrack_stackpointer(), 0)); + // Notice: This updates flags, unlike normal Pop. + __ add(backtrack_stackpointer(), Immediate(kPointerSize)); +} + + +void RegExpMacroAssemblerX87::CheckPreemption() { + // Check for preemption. + Label no_preempt; + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(above, &no_preempt); + + SafeCall(&check_preempt_label_); + + __ bind(&no_preempt); +} + + +void RegExpMacroAssemblerX87::CheckStackLimit() { + Label no_stack_overflow; + ExternalReference stack_limit = + ExternalReference::address_of_regexp_stack_limit(isolate()); + __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit)); + __ j(above, &no_stack_overflow); + + SafeCall(&stack_overflow_label_); + + __ bind(&no_stack_overflow); +} + + +void RegExpMacroAssemblerX87::LoadCurrentCharacterUnchecked(int cp_offset, + int characters) { + if (mode_ == ASCII) { + if (characters == 4) { + __ mov(current_character(), Operand(esi, edi, times_1, cp_offset)); + } else if (characters == 2) { + __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset)); + } else { + DCHECK(characters == 1); + __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset)); + } + } else { + DCHECK(mode_ == UC16); + if (characters == 2) { + __ mov(current_character(), + Operand(esi, edi, times_1, cp_offset * sizeof(uc16))); + } else { + DCHECK(characters == 1); + __ movzx_w(current_character(), + Operand(esi, edi, times_1, cp_offset * sizeof(uc16))); + } + } +} + + +#undef __ + +#endif // V8_INTERPRETED_REGEXP + +}} // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/x87/regexp-macro-assembler-x87.h nodejs-0.11.15/deps/v8/src/x87/regexp-macro-assembler-x87.h --- nodejs-0.11.13/deps/v8/src/x87/regexp-macro-assembler-x87.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/regexp-macro-assembler-x87.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,200 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_ +#define V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_ + +#include "src/macro-assembler.h" +#include "src/x87/assembler-x87-inl.h" +#include "src/x87/assembler-x87.h" + +namespace v8 { +namespace internal { + +#ifndef V8_INTERPRETED_REGEXP +class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler { + public: + RegExpMacroAssemblerX87(Mode mode, int registers_to_save, Zone* zone); + virtual ~RegExpMacroAssemblerX87(); + virtual int stack_limit_slack(); + virtual void AdvanceCurrentPosition(int by); + virtual void AdvanceRegister(int reg, int by); + virtual void Backtrack(); + virtual void Bind(Label* label); + virtual void CheckAtStart(Label* on_at_start); + virtual void CheckCharacter(uint32_t c, Label* on_equal); + virtual void CheckCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_equal); + virtual void CheckCharacterGT(uc16 limit, Label* on_greater); + virtual void CheckCharacterLT(uc16 limit, Label* on_less); + // A "greedy loop" is a loop that is both greedy and with a simple + // body. It has a particularly simple implementation. + virtual void CheckGreedyLoop(Label* on_tos_equals_current_position); + virtual void CheckNotAtStart(Label* on_not_at_start); + virtual void CheckNotBackReference(int start_reg, Label* on_no_match); + virtual void CheckNotBackReferenceIgnoreCase(int start_reg, + Label* on_no_match); + virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal); + virtual void CheckNotCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_not_equal); + virtual void CheckNotCharacterAfterMinusAnd(uc16 c, + uc16 minus, + uc16 mask, + Label* on_not_equal); + virtual void CheckCharacterInRange(uc16 from, + uc16 to, + Label* on_in_range); + virtual void CheckCharacterNotInRange(uc16 from, + uc16 to, + Label* on_not_in_range); + virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set); + + // Checks whether the given offset from the current position is before + // the end of the string. + virtual void CheckPosition(int cp_offset, Label* on_outside_input); + virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match); + virtual void Fail(); + virtual Handle<HeapObject> GetCode(Handle<String> source); + virtual void GoTo(Label* label); + virtual void IfRegisterGE(int reg, int comparand, Label* if_ge); + virtual void IfRegisterLT(int reg, int comparand, Label* if_lt); + virtual void IfRegisterEqPos(int reg, Label* if_eq); + virtual IrregexpImplementation Implementation(); + virtual void LoadCurrentCharacter(int cp_offset, + Label* on_end_of_input, + bool check_bounds = true, + int characters = 1); + virtual void PopCurrentPosition(); + virtual void PopRegister(int register_index); + virtual void PushBacktrack(Label* label); + virtual void PushCurrentPosition(); + virtual void PushRegister(int register_index, + StackCheckFlag check_stack_limit); + virtual void ReadCurrentPositionFromRegister(int reg); + virtual void ReadStackPointerFromRegister(int reg); + virtual void SetCurrentPositionFromEnd(int by); + virtual void SetRegister(int register_index, int to); + virtual bool Succeed(); + virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); + virtual void ClearRegisters(int reg_from, int reg_to); + virtual void WriteStackPointerToRegister(int reg); + + // Called from RegExp if the stack-guard is triggered. + // If the code object is relocated, the return address is fixed before + // returning. + static int CheckStackGuardState(Address* return_address, + Code* re_code, + Address re_frame); + + private: + // Offsets from ebp of function parameters and stored registers. + static const int kFramePointer = 0; + // Above the frame pointer - function parameters and return address. + static const int kReturn_eip = kFramePointer + kPointerSize; + static const int kFrameAlign = kReturn_eip + kPointerSize; + // Parameters. + static const int kInputString = kFrameAlign; + static const int kStartIndex = kInputString + kPointerSize; + static const int kInputStart = kStartIndex + kPointerSize; + static const int kInputEnd = kInputStart + kPointerSize; + static const int kRegisterOutput = kInputEnd + kPointerSize; + // For the case of global regular expression, we have room to store at least + // one set of capture results. For the case of non-global regexp, we ignore + // this value. + static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; + static const int kStackHighEnd = kNumOutputRegisters + kPointerSize; + static const int kDirectCall = kStackHighEnd + kPointerSize; + static const int kIsolate = kDirectCall + kPointerSize; + // Below the frame pointer - local stack variables. + // When adding local variables remember to push space for them in + // the frame in GetCode. + static const int kBackup_esi = kFramePointer - kPointerSize; + static const int kBackup_edi = kBackup_esi - kPointerSize; + static const int kBackup_ebx = kBackup_edi - kPointerSize; + static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize; + static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; + // First register address. Following registers are below it on the stack. + static const int kRegisterZero = kInputStartMinusOne - kPointerSize; + + // Initial size of code buffer. + static const size_t kRegExpCodeSize = 1024; + + // Load a number of characters at the given offset from the + // current position, into the current-character register. + void LoadCurrentCharacterUnchecked(int cp_offset, int character_count); + + // Check whether preemption has been requested. + void CheckPreemption(); + + // Check whether we are exceeding the stack limit on the backtrack stack. + void CheckStackLimit(); + + // Generate a call to CheckStackGuardState. + void CallCheckStackGuardState(Register scratch); + + // The ebp-relative location of a regexp register. + Operand register_location(int register_index); + + // The register containing the current character after LoadCurrentCharacter. + inline Register current_character() { return edx; } + + // The register containing the backtrack stack top. Provides a meaningful + // name to the register. + inline Register backtrack_stackpointer() { return ecx; } + + // Byte size of chars in the string to match (decided by the Mode argument) + inline int char_size() { return static_cast<int>(mode_); } + + // Equivalent to a conditional branch to the label, unless the label + // is NULL, in which case it is a conditional Backtrack. + void BranchOrBacktrack(Condition condition, Label* to); + + // Call and return internally in the generated code in a way that + // is GC-safe (i.e., doesn't leave absolute code addresses on the stack) + inline void SafeCall(Label* to); + inline void SafeReturn(); + inline void SafeCallTarget(Label* name); + + // Pushes the value of a register on the backtrack stack. Decrements the + // stack pointer (ecx) by a word size and stores the register's value there. + inline void Push(Register source); + + // Pushes a value on the backtrack stack. Decrements the stack pointer (ecx) + // by a word size and stores the value there. + inline void Push(Immediate value); + + // Pops a value from the backtrack stack. Reads the word at the stack pointer + // (ecx) and increments it by a word size. + inline void Pop(Register target); + + Isolate* isolate() const { return masm_->isolate(); } + + MacroAssembler* masm_; + + // Which mode to generate code for (ASCII or UC16). + Mode mode_; + + // One greater than maximal register index actually used. + int num_registers_; + + // Number of registers to output at the end (the saved registers + // are always 0..num_saved_registers_-1) + int num_saved_registers_; + + // Labels used internally. + Label entry_label_; + Label start_label_; + Label success_label_; + Label backtrack_label_; + Label exit_label_; + Label check_preempt_label_; + Label stack_overflow_label_; +}; +#endif // V8_INTERPRETED_REGEXP + +}} // namespace v8::internal + +#endif // V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/simulator-x87.cc nodejs-0.11.15/deps/v8/src/x87/simulator-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/simulator-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/simulator-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,6 @@ +// Copyright 2008 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + +// Since there is no simulator for the ia32 architecture this file is empty. diff -Nru nodejs-0.11.13/deps/v8/src/x87/simulator-x87.h nodejs-0.11.15/deps/v8/src/x87/simulator-x87.h --- nodejs-0.11.13/deps/v8/src/x87/simulator-x87.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/simulator-x87.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,48 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_X87_SIMULATOR_X87_H_ +#define V8_X87_SIMULATOR_X87_H_ + +#include "src/allocation.h" + +namespace v8 { +namespace internal { + +// Since there is no simulator for the ia32 architecture the only thing we can +// do is to call the entry directly. +#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ + (entry(p0, p1, p2, p3, p4)) + + +typedef int (*regexp_matcher)(String*, int, const byte*, + const byte*, int*, int, Address, int, Isolate*); + +// Call the generated regexp code directly. The code at the entry address should +// expect eight int/pointer sized arguments and return an int. +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ + (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8)) + + +// The stack limit beyond which we will throw stack overflow errors in +// generated code. Because generated code on ia32 uses the C stack, we +// just use the C stack limit. +class SimulatorStack : public v8::internal::AllStatic { + public: + static inline uintptr_t JsLimitFromCLimit(Isolate* isolate, + uintptr_t c_limit) { + USE(isolate); + return c_limit; + } + + static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { + return try_catch_address; + } + + static inline void UnregisterCTryCatch() { } +}; + +} } // namespace v8::internal + +#endif // V8_X87_SIMULATOR_X87_H_ diff -Nru nodejs-0.11.13/deps/v8/src/x87/stub-cache-x87.cc nodejs-0.11.15/deps/v8/src/x87/stub-cache-x87.cc --- nodejs-0.11.13/deps/v8/src/x87/stub-cache-x87.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/x87/stub-cache-x87.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1201 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X87 + +#include "src/codegen.h" +#include "src/ic-inl.h" +#include "src/stub-cache.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) + + +static void ProbeTable(Isolate* isolate, + MacroAssembler* masm, + Code::Flags flags, + StubCache::Table table, + Register name, + Register receiver, + // Number of the cache entry pointer-size scaled. + Register offset, + Register extra) { + ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); + ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); + ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); + + Label miss; + + // Multiply by 3 because there are 3 fields per entry (name, code, map). + __ lea(offset, Operand(offset, offset, times_2, 0)); + + if (extra.is_valid()) { + // Get the code entry from the cache. + __ mov(extra, Operand::StaticArray(offset, times_1, value_offset)); + + // Check that the key in the entry matches the name. + __ cmp(name, Operand::StaticArray(offset, times_1, key_offset)); + __ j(not_equal, &miss); + + // Check the map matches. + __ mov(offset, Operand::StaticArray(offset, times_1, map_offset)); + __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset)); + __ j(not_equal, &miss); + + // Check that the flags match what we're looking for. + __ mov(offset, FieldOperand(extra, Code::kFlagsOffset)); + __ and_(offset, ~Code::kFlagsNotUsedInLookup); + __ cmp(offset, flags); + __ j(not_equal, &miss); + +#ifdef DEBUG + if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { + __ jmp(&miss); + } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { + __ jmp(&miss); + } +#endif + + // Jump to the first instruction in the code stub. + __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(extra); + + __ bind(&miss); + } else { + // Save the offset on the stack. + __ push(offset); + + // Check that the key in the entry matches the name. + __ cmp(name, Operand::StaticArray(offset, times_1, key_offset)); + __ j(not_equal, &miss); + + // Check the map matches. + __ mov(offset, Operand::StaticArray(offset, times_1, map_offset)); + __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset)); + __ j(not_equal, &miss); + + // Restore offset register. + __ mov(offset, Operand(esp, 0)); + + // Get the code entry from the cache. + __ mov(offset, Operand::StaticArray(offset, times_1, value_offset)); + + // Check that the flags match what we're looking for. + __ mov(offset, FieldOperand(offset, Code::kFlagsOffset)); + __ and_(offset, ~Code::kFlagsNotUsedInLookup); + __ cmp(offset, flags); + __ j(not_equal, &miss); + +#ifdef DEBUG + if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { + __ jmp(&miss); + } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { + __ jmp(&miss); + } +#endif + + // Restore offset and re-load code entry from cache. + __ pop(offset); + __ mov(offset, Operand::StaticArray(offset, times_1, value_offset)); + + // Jump to the first instruction in the code stub. + __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(offset); + + // Pop at miss. + __ bind(&miss); + __ pop(offset); + } +} + + +void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( + MacroAssembler* masm, Label* miss_label, Register receiver, + Handle<Name> name, Register scratch0, Register scratch1) { + DCHECK(name->IsUniqueName()); + DCHECK(!receiver.is(scratch0)); + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->negative_lookups(), 1); + __ IncrementCounter(counters->negative_lookups_miss(), 1); + + __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset)); + + const int kInterceptorOrAccessCheckNeededMask = + (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); + + // Bail out if the receiver has a named interceptor or requires access checks. + __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset), + kInterceptorOrAccessCheckNeededMask); + __ j(not_zero, miss_label); + + // Check that receiver is a JSObject. + __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE); + __ j(below, miss_label); + + // Load properties array. + Register properties = scratch0; + __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset)); + + // Check that the properties array is a dictionary. + __ cmp(FieldOperand(properties, HeapObject::kMapOffset), + Immediate(masm->isolate()->factory()->hash_table_map())); + __ j(not_equal, miss_label); + + Label done; + NameDictionaryLookupStub::GenerateNegativeLookup(masm, + miss_label, + &done, + properties, + name, + scratch1); + __ bind(&done); + __ DecrementCounter(counters->negative_lookups_miss(), 1); +} + + +void StubCache::GenerateProbe(MacroAssembler* masm, + Code::Flags flags, + Register receiver, + Register name, + Register scratch, + Register extra, + Register extra2, + Register extra3) { + Label miss; + + // Assert that code is valid. The multiplying code relies on the entry size + // being 12. + DCHECK(sizeof(Entry) == 12); + + // Assert the flags do not name a specific type. + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); + + // Assert that there are no register conflicts. + DCHECK(!scratch.is(receiver)); + DCHECK(!scratch.is(name)); + DCHECK(!extra.is(receiver)); + DCHECK(!extra.is(name)); + DCHECK(!extra.is(scratch)); + + // Assert scratch and extra registers are valid, and extra2/3 are unused. + DCHECK(!scratch.is(no_reg)); + DCHECK(extra2.is(no_reg)); + DCHECK(extra3.is(no_reg)); + + Register offset = scratch; + scratch = no_reg; + + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1); + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &miss); + + // Get the map of the receiver and compute the hash. + __ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); + __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xor_(offset, flags); + // We mask out the last two bits because they are not part of the hash and + // they are always 01 for maps. Also in the two 'and' instructions below. + __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift); + // ProbeTable expects the offset to be pointer scaled, which it is, because + // the heap object tag size is 2 and the pointer size log 2 is also 2. + DCHECK(kCacheIndexShift == kPointerSizeLog2); + + // Probe the primary table. + ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra); + + // Primary miss: Compute hash for secondary probe. + __ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); + __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xor_(offset, flags); + __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift); + __ sub(offset, name); + __ add(offset, Immediate(flags)); + __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift); + + // Probe the secondary table. + ProbeTable( + isolate(), masm, flags, kSecondary, name, receiver, offset, extra); + + // Cache miss: Fall-through and let caller handle the miss by + // entering the runtime system. + __ bind(&miss); + __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1); +} + + +void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( + MacroAssembler* masm, int index, Register prototype, Label* miss) { + // Get the global function with the given index. + Handle<JSFunction> function( + JSFunction::cast(masm->isolate()->native_context()->get(index))); + // Check we're still in the same context. + Register scratch = prototype; + const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); + __ mov(scratch, Operand(esi, offset)); + __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset)); + __ cmp(Operand(scratch, Context::SlotOffset(index)), function); + __ j(not_equal, miss); + + // Load its initial map. The global functions all have initial maps. + __ Move(prototype, Immediate(Handle<Map>(function->initial_map()))); + // Load the prototype from the initial map. + __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset)); +} + + +void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype( + MacroAssembler* masm, Register receiver, Register scratch1, + Register scratch2, Label* miss_label) { + __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); + __ mov(eax, scratch1); + __ ret(0); +} + + +static void PushInterceptorArguments(MacroAssembler* masm, + Register receiver, + Register holder, + Register name, + Handle<JSObject> holder_obj) { + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3); + STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4); + __ push(name); + Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); + DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor)); + Register scratch = name; + __ mov(scratch, Immediate(interceptor)); + __ push(scratch); + __ push(receiver); + __ push(holder); +} + + +static void CompileCallLoadPropertyWithInterceptor( + MacroAssembler* masm, + Register receiver, + Register holder, + Register name, + Handle<JSObject> holder_obj, + IC::UtilityId id) { + PushInterceptorArguments(masm, receiver, holder, name, holder_obj); + __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()), + NamedLoadHandlerCompiler::kInterceptorArgsLength); +} + + +// Generate call to api function. +// This function uses push() to generate smaller, faster code than +// the version above. It is an optimization that should will be removed +// when api call ICs are generated in hydrogen. +void PropertyHandlerCompiler::GenerateFastApiCall( + MacroAssembler* masm, const CallOptimization& optimization, + Handle<Map> receiver_map, Register receiver, Register scratch_in, + bool is_store, int argc, Register* values) { + // Copy return value. + __ pop(scratch_in); + // receiver + __ push(receiver); + // Write the arguments to stack frame. + for (int i = 0; i < argc; i++) { + Register arg = values[argc-1-i]; + DCHECK(!receiver.is(arg)); + DCHECK(!scratch_in.is(arg)); + __ push(arg); + } + __ push(scratch_in); + // Stack now matches JSFunction abi. + DCHECK(optimization.is_simple_api_call()); + + // Abi for CallApiFunctionStub. + Register callee = eax; + Register call_data = ebx; + Register holder = ecx; + Register api_function_address = edx; + Register scratch = edi; // scratch_in is no longer valid. + + // Put holder in place. + CallOptimization::HolderLookup holder_lookup; + Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType( + receiver_map, + &holder_lookup); + switch (holder_lookup) { + case CallOptimization::kHolderIsReceiver: + __ Move(holder, receiver); + break; + case CallOptimization::kHolderFound: + __ LoadHeapObject(holder, api_holder); + break; + case CallOptimization::kHolderNotFound: + UNREACHABLE(); + break; + } + + Isolate* isolate = masm->isolate(); + Handle<JSFunction> function = optimization.constant_function(); + Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); + Handle<Object> call_data_obj(api_call_info->data(), isolate); + + // Put callee in place. + __ LoadHeapObject(callee, function); + + bool call_data_undefined = false; + // Put call_data in place. + if (isolate->heap()->InNewSpace(*call_data_obj)) { + __ mov(scratch, api_call_info); + __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset)); + } else if (call_data_obj->IsUndefined()) { + call_data_undefined = true; + __ mov(call_data, Immediate(isolate->factory()->undefined_value())); + } else { + __ mov(call_data, call_data_obj); + } + + // Put api_function_address in place. + Address function_address = v8::ToCData<Address>(api_call_info->callback()); + __ mov(api_function_address, Immediate(function_address)); + + // Jump to stub. + CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc); + __ TailCallStub(&stub); +} + + +// Generate code to check that a global property cell is empty. Create +// the property cell at compilation time if no cell exists for the +// property. +void PropertyHandlerCompiler::GenerateCheckPropertyCell( + MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name, + Register scratch, Label* miss) { + Handle<PropertyCell> cell = + JSGlobalObject::EnsurePropertyCell(global, name); + DCHECK(cell->value()->IsTheHole()); + Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value(); + if (masm->serializer_enabled()) { + __ mov(scratch, Immediate(cell)); + __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset), + Immediate(the_hole)); + } else { + __ cmp(Operand::ForCell(cell), Immediate(the_hole)); + } + __ j(not_equal, miss); +} + + +void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm, + Handle<Code> code) { + __ jmp(code, RelocInfo::CODE_TARGET); +} + + +#undef __ +#define __ ACCESS_MASM(masm()) + + +void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ bind(label); + __ mov(this->name(), Immediate(name)); + } +} + + +// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if +// store is successful. +void NamedStoreHandlerCompiler::GenerateStoreTransition( + Handle<Map> transition, Handle<Name> name, Register receiver_reg, + Register storage_reg, Register value_reg, Register scratch1, + Register scratch2, Register unused, Label* miss_label, Label* slow) { + int descriptor = transition->LastAdded(); + DescriptorArray* descriptors = transition->instance_descriptors(); + PropertyDetails details = descriptors->GetDetails(descriptor); + Representation representation = details.representation(); + DCHECK(!representation.IsNone()); + + if (details.type() == CONSTANT) { + Handle<Object> constant(descriptors->GetValue(descriptor), isolate()); + __ CmpObject(value_reg, constant); + __ j(not_equal, miss_label); + } else if (representation.IsSmi()) { + __ JumpIfNotSmi(value_reg, miss_label); + } else if (representation.IsHeapObject()) { + __ JumpIfSmi(value_reg, miss_label); + HeapType* field_type = descriptors->GetFieldType(descriptor); + HeapType::Iterator<Map> it = field_type->Classes(); + if (!it.Done()) { + Label do_store; + while (true) { + __ CompareMap(value_reg, it.Current()); + it.Advance(); + if (it.Done()) { + __ j(not_equal, miss_label); + break; + } + __ j(equal, &do_store, Label::kNear); + } + __ bind(&do_store); + } + } else if (representation.IsDouble()) { + Label do_store, heap_number; + __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow, MUTABLE); + + __ JumpIfNotSmi(value_reg, &heap_number); + __ SmiUntag(value_reg); + __ push(value_reg); + __ fild_s(Operand(esp, 0)); + __ pop(value_reg); + __ SmiTag(value_reg); + __ jmp(&do_store); + + __ bind(&heap_number); + __ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label, + DONT_DO_SMI_CHECK); + __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset)); + + __ bind(&do_store); + __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset)); + } + + // Stub never generated for objects that require access checks. + DCHECK(!transition->is_access_check_needed()); + + // Perform map transition for the receiver if necessary. + if (details.type() == FIELD && + Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) { + // The properties must be extended before we can store the value. + // We jump to a runtime call that extends the properties array. + __ pop(scratch1); // Return address. + __ push(receiver_reg); + __ push(Immediate(transition)); + __ push(value_reg); + __ push(scratch1); + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), + isolate()), + 3, 1); + return; + } + + // Update the map of the object. + __ mov(scratch1, Immediate(transition)); + __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1); + + // Update the write barrier for the map field. + __ RecordWriteField(receiver_reg, + HeapObject::kMapOffset, + scratch1, + scratch2, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + + if (details.type() == CONSTANT) { + DCHECK(value_reg.is(eax)); + __ ret(0); + return; + } + + int index = transition->instance_descriptors()->GetFieldIndex( + transition->LastAdded()); + + // Adjust for the number of properties stored in the object. Even in the + // face of a transition we can use the old map here because the size of the + // object and the number of in-object properties is not going to change. + index -= transition->inobject_properties(); + + SmiCheck smi_check = representation.IsTagged() + ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; + // TODO(verwaest): Share this code as a code stub. + if (index < 0) { + // Set the property straight into the object. + int offset = transition->instance_size() + (index * kPointerSize); + if (representation.IsDouble()) { + __ mov(FieldOperand(receiver_reg, offset), storage_reg); + } else { + __ mov(FieldOperand(receiver_reg, offset), value_reg); + } + + if (!representation.IsSmi()) { + // Update the write barrier for the array address. + if (!representation.IsDouble()) { + __ mov(storage_reg, value_reg); + } + __ RecordWriteField(receiver_reg, + offset, + storage_reg, + scratch1, + EMIT_REMEMBERED_SET, + smi_check); + } + } else { + // Write to the properties array. + int offset = index * kPointerSize + FixedArray::kHeaderSize; + // Get the properties array (optimistically). + __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); + if (representation.IsDouble()) { + __ mov(FieldOperand(scratch1, offset), storage_reg); + } else { + __ mov(FieldOperand(scratch1, offset), value_reg); + } + + if (!representation.IsSmi()) { + // Update the write barrier for the array address. + if (!representation.IsDouble()) { + __ mov(storage_reg, value_reg); + } + __ RecordWriteField(scratch1, + offset, + storage_reg, + receiver_reg, + EMIT_REMEMBERED_SET, + smi_check); + } + } + + // Return the value (register eax). + DCHECK(value_reg.is(eax)); + __ ret(0); +} + + +void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup, + Register value_reg, + Label* miss_label) { + DCHECK(lookup->representation().IsHeapObject()); + __ JumpIfSmi(value_reg, miss_label); + HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes(); + Label do_store; + while (true) { + __ CompareMap(value_reg, it.Current()); + it.Advance(); + if (it.Done()) { + __ j(not_equal, miss_label); + break; + } + __ j(equal, &do_store, Label::kNear); + } + __ bind(&do_store); + + StoreFieldStub stub(isolate(), lookup->GetFieldIndex(), + lookup->representation()); + GenerateTailCall(masm(), stub.GetCode()); +} + + +Register PropertyHandlerCompiler::CheckPrototypes( + Register object_reg, Register holder_reg, Register scratch1, + Register scratch2, Handle<Name> name, Label* miss, + PrototypeCheckType check) { + Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate())); + + // Make sure there's no overlap between holder and object registers. + DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); + DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) + && !scratch2.is(scratch1)); + + // Keep track of the current object in register reg. + Register reg = object_reg; + int depth = 0; + + Handle<JSObject> current = Handle<JSObject>::null(); + if (type()->IsConstant()) + current = Handle<JSObject>::cast(type()->AsConstant()->Value()); + Handle<JSObject> prototype = Handle<JSObject>::null(); + Handle<Map> current_map = receiver_map; + Handle<Map> holder_map(holder()->map()); + // Traverse the prototype chain and check the maps in the prototype chain for + // fast and global objects or do negative lookup for normal objects. + while (!current_map.is_identical_to(holder_map)) { + ++depth; + + // Only global objects and objects that do not require access + // checks are allowed in stubs. + DCHECK(current_map->IsJSGlobalProxyMap() || + !current_map->is_access_check_needed()); + + prototype = handle(JSObject::cast(current_map->prototype())); + if (current_map->is_dictionary_map() && + !current_map->IsJSGlobalObjectMap()) { + DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast. + if (!name->IsUniqueName()) { + DCHECK(name->IsString()); + name = factory()->InternalizeString(Handle<String>::cast(name)); + } + DCHECK(current.is_null() || + current->property_dictionary()->FindEntry(name) == + NameDictionary::kNotFound); + + GenerateDictionaryNegativeLookup(masm(), miss, reg, name, + scratch1, scratch2); + + __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); + reg = holder_reg; // From now on the object will be in holder_reg. + __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset)); + } else { + bool in_new_space = heap()->InNewSpace(*prototype); + // Two possible reasons for loading the prototype from the map: + // (1) Can't store references to new space in code. + // (2) Handler is shared for all receivers with the same prototype + // map (but not necessarily the same prototype instance). + bool load_prototype_from_map = in_new_space || depth == 1; + if (depth != 1 || check == CHECK_ALL_MAPS) { + __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK); + } + + // Check access rights to the global object. This has to happen after + // the map check so that we know that the object is actually a global + // object. + // This allows us to install generated handlers for accesses to the + // global proxy (as opposed to using slow ICs). See corresponding code + // in LookupForRead(). + if (current_map->IsJSGlobalProxyMap()) { + __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss); + } else if (current_map->IsJSGlobalObjectMap()) { + GenerateCheckPropertyCell( + masm(), Handle<JSGlobalObject>::cast(current), name, + scratch2, miss); + } + + if (load_prototype_from_map) { + // Save the map in scratch1 for later. + __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); + } + + reg = holder_reg; // From now on the object will be in holder_reg. + + if (load_prototype_from_map) { + __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset)); + } else { + __ mov(reg, prototype); + } + } + + // Go to the next object in the prototype chain. + current = prototype; + current_map = handle(current->map()); + } + + // Log the check depth. + LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); + + if (depth != 0 || check == CHECK_ALL_MAPS) { + // Check the holder map. + __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK); + } + + // Perform security check for access to the global object. + DCHECK(current_map->IsJSGlobalProxyMap() || + !current_map->is_access_check_needed()); + if (current_map->IsJSGlobalProxyMap()) { + __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss); + } + + // Return the register containing the holder. + return reg; +} + + +void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { + if (!miss->is_unused()) { + Label success; + __ jmp(&success); + __ bind(miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + __ bind(&success); + } +} + + +void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { + if (!miss->is_unused()) { + Label success; + __ jmp(&success); + GenerateRestoreName(miss, name); + TailCallBuiltin(masm(), MissBuiltin(kind())); + __ bind(&success); + } +} + + +void NamedLoadHandlerCompiler::GenerateLoadCallback( + Register reg, Handle<ExecutableAccessorInfo> callback) { + // Insert additional parameters into the stack frame above return address. + DCHECK(!scratch3().is(reg)); + __ pop(scratch3()); // Get return address to place it below. + + STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); + STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); + STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); + STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); + __ push(receiver()); // receiver + // Push data from ExecutableAccessorInfo. + if (isolate()->heap()->InNewSpace(callback->data())) { + DCHECK(!scratch2().is(reg)); + __ mov(scratch2(), Immediate(callback)); + __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset)); + } else { + __ push(Immediate(Handle<Object>(callback->data(), isolate()))); + } + __ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue + // ReturnValue default value + __ push(Immediate(isolate()->factory()->undefined_value())); + __ push(Immediate(reinterpret_cast<int>(isolate()))); + __ push(reg); // holder + + // Save a pointer to where we pushed the arguments. This will be + // passed as the const PropertyAccessorInfo& to the C++ callback. + __ push(esp); + + __ push(name()); // name + + __ push(scratch3()); // Restore return address. + + // Abi for CallApiGetter + Register getter_address = edx; + Address function_address = v8::ToCData<Address>(callback->getter()); + __ mov(getter_address, Immediate(function_address)); + + CallApiGetterStub stub(isolate()); + __ TailCallStub(&stub); +} + + +void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) { + // Return the constant value. + __ LoadObject(eax, value); + __ ret(0); +} + + +void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg, + LookupResult* lookup, + Handle<Name> name) { + DCHECK(holder()->HasNamedInterceptor()); + DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined()); + + // So far the most popular follow ups for interceptor loads are FIELD + // and CALLBACKS, so inline only them, other cases may be added + // later. + bool compile_followup_inline = false; + if (lookup->IsFound() && lookup->IsCacheable()) { + if (lookup->IsField()) { + compile_followup_inline = true; + } else if (lookup->type() == CALLBACKS && + lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { + Handle<ExecutableAccessorInfo> callback( + ExecutableAccessorInfo::cast(lookup->GetCallbackObject())); + compile_followup_inline = + callback->getter() != NULL && + ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback, + type()); + } + } + + if (compile_followup_inline) { + // Compile the interceptor call, followed by inline code to load the + // property from further up the prototype chain if the call fails. + // Check that the maps haven't changed. + DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1())); + + // Preserve the receiver register explicitly whenever it is different from + // the holder and it is needed should the interceptor return without any + // result. The CALLBACKS case needs the receiver to be passed into C++ code, + // the FIELD case might cause a miss during the prototype check. + bool must_perfrom_prototype_check = *holder() != lookup->holder(); + bool must_preserve_receiver_reg = !receiver().is(holder_reg) && + (lookup->type() == CALLBACKS || must_perfrom_prototype_check); + + // Save necessary data before invoking an interceptor. + // Requires a frame to make GC aware of pushed pointers. + { + FrameScope frame_scope(masm(), StackFrame::INTERNAL); + + if (must_preserve_receiver_reg) { + __ push(receiver()); + } + __ push(holder_reg); + __ push(this->name()); + + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method.) + CompileCallLoadPropertyWithInterceptor( + masm(), receiver(), holder_reg, this->name(), holder(), + IC::kLoadPropertyWithInterceptorOnly); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ cmp(eax, factory()->no_interceptor_result_sentinel()); + __ j(equal, &interceptor_failed); + frame_scope.GenerateLeaveFrame(); + __ ret(0); + + // Clobber registers when generating debug-code to provoke errors. + __ bind(&interceptor_failed); + if (FLAG_debug_code) { + __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue))); + __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue))); + __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue))); + } + + __ pop(this->name()); + __ pop(holder_reg); + if (must_preserve_receiver_reg) { + __ pop(receiver()); + } + + // Leave the internal frame. + } + + GenerateLoadPostInterceptor(holder_reg, name, lookup); + } else { // !compile_followup_inline + // Call the runtime system to load the interceptor. + // Check that the maps haven't changed. + __ pop(scratch2()); // save old return address + PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), + holder()); + __ push(scratch2()); // restore old return address + + ExternalReference ref = + ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor), + isolate()); + __ TailCallExternalReference( + ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); + } +} + + +Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( + Handle<JSObject> object, Handle<Name> name, + Handle<ExecutableAccessorInfo> callback) { + Register holder_reg = Frontend(receiver(), name); + + __ pop(scratch1()); // remove the return address + __ push(receiver()); + __ push(holder_reg); + __ Push(callback); + __ Push(name); + __ push(value()); + __ push(scratch1()); // restore return address + + // Do tail-call to the runtime system. + ExternalReference store_callback_property = + ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate()); + __ TailCallExternalReference(store_callback_property, 5, 1); + + // Return the generated code. + return GetCode(kind(), Code::FAST, name); +} + + +#undef __ +#define __ ACCESS_MASM(masm) + + +void NamedStoreHandlerCompiler::GenerateStoreViaSetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, + Handle<JSFunction> setter) { + // ----------- S t a t e ------------- + // -- esp[0] : return address + // ----------------------------------- + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Save value register, so we can restore it later. + __ push(value()); + + if (!setter.is_null()) { + // Call the JavaScript setter with receiver and value on the stack. + if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { + // Swap in the global receiver. + __ mov(receiver, + FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); + } + __ push(receiver); + __ push(value()); + ParameterCount actual(1); + ParameterCount expected(setter); + __ InvokeFunction(setter, expected, actual, + CALL_FUNCTION, NullCallWrapper()); + } else { + // If we generate a global code snippet for deoptimization only, remember + // the place to continue after deoptimization. + masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); + } + + // We have to return the passed value, not the return value of the setter. + __ pop(eax); + + // Restore context register. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + } + __ ret(0); +} + + +#undef __ +#define __ ACCESS_MASM(masm()) + + +Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( + Handle<Name> name) { + __ pop(scratch1()); // remove the return address + __ push(receiver()); + __ push(this->name()); + __ push(value()); + __ push(scratch1()); // restore return address + + // Do tail-call to the runtime system. + ExternalReference store_ic_property = ExternalReference( + IC_Utility(IC::kStorePropertyWithInterceptor), isolate()); + __ TailCallExternalReference(store_ic_property, 3, 1); + + // Return the generated code. + return GetCode(kind(), Code::FAST, name); +} + + +Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( + MapHandleList* receiver_maps, CodeHandleList* handler_stubs, + MapHandleList* transitioned_maps) { + Label miss; + __ JumpIfSmi(receiver(), &miss, Label::kNear); + __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset)); + for (int i = 0; i < receiver_maps->length(); ++i) { + __ cmp(scratch1(), receiver_maps->at(i)); + if (transitioned_maps->at(i).is_null()) { + __ j(equal, handler_stubs->at(i)); + } else { + Label next_map; + __ j(not_equal, &next_map, Label::kNear); + __ mov(transition_map(), Immediate(transitioned_maps->at(i))); + __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET); + __ bind(&next_map); + } + } + __ bind(&miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + + // Return the generated code. + return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); +} + + +Register* PropertyAccessCompiler::load_calling_convention() { + // receiver, name, scratch1, scratch2, scratch3, scratch4. + Register receiver = LoadIC::ReceiverRegister(); + Register name = LoadIC::NameRegister(); + static Register registers[] = { receiver, name, ebx, eax, edi, no_reg }; + return registers; +} + + +Register* PropertyAccessCompiler::store_calling_convention() { + // receiver, name, scratch1, scratch2, scratch3. + Register receiver = StoreIC::ReceiverRegister(); + Register name = StoreIC::NameRegister(); + DCHECK(ebx.is(KeyedStoreIC::MapRegister())); + static Register registers[] = { receiver, name, ebx, edi, no_reg }; + return registers; +} + + +Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); } + + +#undef __ +#define __ ACCESS_MASM(masm) + + +void NamedLoadHandlerCompiler::GenerateLoadViaGetter( + MacroAssembler* masm, Handle<HeapType> type, Register receiver, + Handle<JSFunction> getter) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + + if (!getter.is_null()) { + // Call the JavaScript getter with the receiver on the stack. + if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { + // Swap in the global receiver. + __ mov(receiver, + FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); + } + __ push(receiver); + ParameterCount actual(0); + ParameterCount expected(getter); + __ InvokeFunction(getter, expected, actual, + CALL_FUNCTION, NullCallWrapper()); + } else { + // If we generate a global code snippet for deoptimization only, remember + // the place to continue after deoptimization. + masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); + } + + // Restore context register. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + } + __ ret(0); +} + + +#undef __ +#define __ ACCESS_MASM(masm()) + + +Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal( + Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) { + Label miss; + + FrontendHeader(receiver(), name, &miss); + // Get the value from the cell. + Register result = StoreIC::ValueRegister(); + if (masm()->serializer_enabled()) { + __ mov(result, Immediate(cell)); + __ mov(result, FieldOperand(result, PropertyCell::kValueOffset)); + } else { + __ mov(result, Operand::ForCell(cell)); + } + + // Check for deleted property if property can actually be deleted. + if (is_configurable) { + __ cmp(result, factory()->the_hole_value()); + __ j(equal, &miss); + } else if (FLAG_debug_code) { + __ cmp(result, factory()->the_hole_value()); + __ Check(not_equal, kDontDeleteCellsCannotContainTheHole); + } + + Counters* counters = isolate()->counters(); + __ IncrementCounter(counters->named_load_global_stub(), 1); + // The code above already loads the result into the return register. + __ ret(0); + + FrontendFooter(name, &miss); + + // Return the generated code. + return GetCode(kind(), Code::NORMAL, name); +} + + +Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { + Label miss; + + if (check == PROPERTY && + (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { + // In case we are compiling an IC for dictionary loads and stores, just + // check whether the name is unique. + if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { + __ JumpIfNotUniqueName(this->name(), &miss); + } else { + __ cmp(this->name(), Immediate(name)); + __ j(not_equal, &miss); + } + } + + Label number_case; + Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; + __ JumpIfSmi(receiver(), smi_target); + + // Polymorphic keyed stores may use the map register + Register map_reg = scratch1(); + DCHECK(kind() != Code::KEYED_STORE_IC || + map_reg.is(KeyedStoreIC::MapRegister())); + __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); + int receiver_count = types->length(); + int number_of_handled_maps = 0; + for (int current = 0; current < receiver_count; ++current) { + Handle<HeapType> type = types->at(current); + Handle<Map> map = IC::TypeToMap(*type, isolate()); + if (!map->is_deprecated()) { + number_of_handled_maps++; + __ cmp(map_reg, map); + if (type->Is(HeapType::Number())) { + DCHECK(!number_case.is_unused()); + __ bind(&number_case); + } + __ j(equal, handlers->at(current)); + } + } + DCHECK(number_of_handled_maps != 0); + + __ bind(&miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + + // Return the generated code. + InlineCacheState state = + number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; + return GetCode(kind(), type, name, state); +} + + +#undef __ +#define __ ACCESS_MASM(masm) + + +void ElementHandlerCompiler::GenerateLoadDictionaryElement( + MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- ecx : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + DCHECK(edx.is(LoadIC::ReceiverRegister())); + DCHECK(ecx.is(LoadIC::NameRegister())); + Label slow, miss; + + // This stub is meant to be tail-jumped to, the receiver must already + // have been verified by the caller to not be a smi. + __ JumpIfNotSmi(ecx, &miss); + __ mov(ebx, ecx); + __ SmiUntag(ebx); + __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset)); + + // Push receiver on the stack to free up a register for the dictionary + // probing. + __ push(edx); + __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax); + // Pop receiver before returning. + __ pop(edx); + __ ret(0); + + __ bind(&slow); + __ pop(edx); + + // ----------- S t a t e ------------- + // -- ecx : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); + + __ bind(&miss); + // ----------- S t a t e ------------- + // -- ecx : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X87 diff -Nru nodejs-0.11.13/deps/v8/src/zone-allocator.h nodejs-0.11.15/deps/v8/src/zone-allocator.h --- nodejs-0.11.13/deps/v8/src/zone-allocator.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/zone-allocator.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,13 @@ // Copyright 2014 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ZONE_ALLOCATOR_H_ #define V8_ZONE_ALLOCATOR_H_ -#include "zone.h" +#include <limits> + +#include "src/zone.h" namespace v8 { namespace internal { @@ -57,16 +36,14 @@ pointer address(reference x) const {return &x;} const_pointer address(const_reference x) const {return &x;} - pointer allocate(size_type count, const void* hint = 0) { - size_t size = count * sizeof(value_type); - size = RoundUp(size, kPointerSize); - return static_cast<pointer>(zone_->New(size)); + pointer allocate(size_type n, const void* hint = 0) { + return static_cast<pointer>(zone_->NewArray<value_type>( + static_cast<int>(n))); } void deallocate(pointer p, size_type) { /* noop for Zones */ } size_type max_size() const throw() { - size_type max = static_cast<size_type>(-1) / sizeof(T); - return (max > 0 ? max : 1); + return std::numeric_limits<int>::max() / sizeof(value_type); } void construct(pointer p, const T& val) { new(static_cast<void*>(p)) T(val); @@ -85,6 +62,8 @@ Zone* zone_; }; +typedef zone_allocator<bool> ZoneBoolAllocator; +typedef zone_allocator<int> ZoneIntAllocator; } } // namespace v8::internal #endif // V8_ZONE_ALLOCATOR_H_ diff -Nru nodejs-0.11.13/deps/v8/src/zone.cc nodejs-0.11.15/deps/v8/src/zone.cc --- nodejs-0.11.13/deps/v8/src/zone.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/zone.cc 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <string.h> -#include "v8.h" -#include "zone-inl.h" +#include "src/v8.h" +#include "src/zone-inl.h" namespace v8 { namespace internal { @@ -81,7 +58,48 @@ DeleteAll(); DeleteKeptSegment(); - ASSERT(segment_bytes_allocated_ == 0); + DCHECK(segment_bytes_allocated_ == 0); +} + + +void* Zone::New(int size) { + // Round up the requested size to fit the alignment. + size = RoundUp(size, kAlignment); + + // If the allocation size is divisible by 8 then we return an 8-byte aligned + // address. + if (kPointerSize == 4 && kAlignment == 4) { + position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); + } else { + DCHECK(kAlignment >= kPointerSize); + } + + // Check if the requested size is available without expanding. + Address result = position_; + + int size_with_redzone = +#ifdef V8_USE_ADDRESS_SANITIZER + size + kASanRedzoneBytes; +#else + size; +#endif + + if (size_with_redzone > limit_ - position_) { + result = NewExpand(size_with_redzone); + } else { + position_ += size_with_redzone; + } + +#ifdef V8_USE_ADDRESS_SANITIZER + Address redzone_position = result + size; + DCHECK(redzone_position + kASanRedzoneBytes == position_); + ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); +#endif + + // Check that the result has the proper alignment and return it. + DCHECK(IsAddressAligned(result, kAlignment, 0)); + allocation_size_ += size; + return reinterpret_cast<void*>(result); } @@ -143,7 +161,7 @@ static const unsigned char kZapDeadByte = 0xcd; #endif - ASSERT(segment_head_ == NULL || segment_head_->next() == NULL); + DCHECK(segment_head_ == NULL || segment_head_->next() == NULL); if (segment_head_ != NULL) { int size = segment_head_->size(); #ifdef DEBUG @@ -156,7 +174,7 @@ segment_head_ = NULL; } - ASSERT(segment_bytes_allocated_ == 0); + DCHECK(segment_bytes_allocated_ == 0); } @@ -183,8 +201,8 @@ Address Zone::NewExpand(int size) { // Make sure the requested size is already properly aligned and that // there isn't enough room in the Zone to satisfy the request. - ASSERT(size == RoundDown(size, kAlignment)); - ASSERT(size > limit_ - position_); + DCHECK(size == RoundDown(size, kAlignment)); + DCHECK(size > limit_ - position_); // Compute the new segment size. We use a 'high water mark' // strategy, where we increase the segment size every time we expand @@ -233,7 +251,7 @@ return NULL; } limit_ = segment->end(); - ASSERT(position_ <= limit_); + DCHECK(position_ <= limit_); return result; } diff -Nru nodejs-0.11.13/deps/v8/src/zone-containers.h nodejs-0.11.15/deps/v8/src/zone-containers.h --- nodejs-0.11.13/deps/v8/src/zone-containers.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/zone-containers.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,42 +1,19 @@ // Copyright 2014 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ZONE_CONTAINERS_H_ #define V8_ZONE_CONTAINERS_H_ #include <vector> -#include <set> -#include "zone.h" +#include "src/zone-allocator.h" namespace v8 { namespace internal { -typedef zone_allocator<int> ZoneIntAllocator; +typedef std::vector<bool, ZoneBoolAllocator> BoolVector; + typedef std::vector<int, ZoneIntAllocator> IntVector; typedef IntVector::iterator IntVectorIter; typedef IntVector::reverse_iterator IntVectorRIter; diff -Nru nodejs-0.11.13/deps/v8/src/zone.h nodejs-0.11.15/deps/v8/src/zone.h --- nodejs-0.11.13/deps/v8/src/zone.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/zone.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,48 +1,22 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ZONE_H_ #define V8_ZONE_H_ -#include "allocation.h" -#include "checks.h" -#include "hashmap.h" -#include "globals.h" -#include "list.h" -#include "splay-tree.h" +#include <limits> + +#include "src/allocation.h" +#include "src/base/logging.h" +#include "src/globals.h" +#include "src/hashmap.h" +#include "src/list.h" +#include "src/splay-tree.h" namespace v8 { namespace internal { -#if defined(__has_feature) - #if __has_feature(address_sanitizer) - #define V8_USE_ADDRESS_SANITIZER - #endif -#endif class Segment; class Isolate; @@ -66,10 +40,14 @@ ~Zone(); // Allocate 'size' bytes of memory in the Zone; expands the Zone by // allocating new segments of memory on demand using malloc(). - inline void* New(int size); + void* New(int size); template <typename T> - inline T* NewArray(int length); + T* NewArray(int length) { + CHECK(std::numeric_limits<int>::max() / static_cast<int>(sizeof(T)) > + length); + return static_cast<T*>(New(length * sizeof(T))); + } // Deletes all objects and free all memory allocated in the Zone. Keeps one // small (size <= kMaximumKeptSegmentSize) segment around if it finds one. diff -Nru nodejs-0.11.13/deps/v8/src/zone-inl.h nodejs-0.11.15/deps/v8/src/zone-inl.h --- nodejs-0.11.13/deps/v8/src/zone-inl.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/src/zone-inl.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,34 +1,11 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ZONE_INL_H_ #define V8_ZONE_INL_H_ -#include "zone.h" +#include "src/zone.h" #ifdef V8_USE_ADDRESS_SANITIZER #include <sanitizer/asan_interface.h> @@ -36,10 +13,9 @@ #define ASAN_UNPOISON_MEMORY_REGION(start, size) ((void) 0) #endif -#include "counters.h" -#include "isolate.h" -#include "utils.h" -#include "v8-counters.h" +#include "src/counters.h" +#include "src/isolate.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -48,54 +24,6 @@ static const int kASanRedzoneBytes = 24; // Must be a multiple of 8. -inline void* Zone::New(int size) { - // Round up the requested size to fit the alignment. - size = RoundUp(size, kAlignment); - - // If the allocation size is divisible by 8 then we return an 8-byte aligned - // address. - if (kPointerSize == 4 && kAlignment == 4) { - position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); - } else { - ASSERT(kAlignment >= kPointerSize); - } - - // Check if the requested size is available without expanding. - Address result = position_; - - int size_with_redzone = -#ifdef V8_USE_ADDRESS_SANITIZER - size + kASanRedzoneBytes; -#else - size; -#endif - - if (size_with_redzone > limit_ - position_) { - result = NewExpand(size_with_redzone); - } else { - position_ += size_with_redzone; - } - -#ifdef V8_USE_ADDRESS_SANITIZER - Address redzone_position = result + size; - ASSERT(redzone_position + kASanRedzoneBytes == position_); - ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); -#endif - - // Check that the result has the proper alignment and return it. - ASSERT(IsAddressAligned(result, kAlignment, 0)); - allocation_size_ += size; - return reinterpret_cast<void*>(result); -} - - -template <typename T> -T* Zone::NewArray(int length) { - CHECK(std::numeric_limits<int>::max() / static_cast<int>(sizeof(T)) > length); - return static_cast<T*>(New(length * sizeof(T))); -} - - bool Zone::excess_allocation() { return segment_bytes_allocated_ > kExcessLimit; } @@ -121,7 +49,7 @@ } inline void* ZoneAllocationPolicy::New(size_t size) { - ASSERT(zone_); + DCHECK(zone_); return zone_->New(static_cast<int>(size)); } diff -Nru nodejs-0.11.13/deps/v8/testing/gmock.gyp nodejs-0.11.15/deps/v8/testing/gmock.gyp --- nodejs-0.11.13/deps/v8/testing/gmock.gyp 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/testing/gmock.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,62 @@ +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +{ + 'targets': [ + { + 'target_name': 'gmock', + 'type': 'static_library', + 'dependencies': [ + 'gtest.gyp:gtest', + ], + 'sources': [ + # Sources based on files in r173 of gmock. + 'gmock/include/gmock/gmock-actions.h', + 'gmock/include/gmock/gmock-cardinalities.h', + 'gmock/include/gmock/gmock-generated-actions.h', + 'gmock/include/gmock/gmock-generated-function-mockers.h', + 'gmock/include/gmock/gmock-generated-matchers.h', + 'gmock/include/gmock/gmock-generated-nice-strict.h', + 'gmock/include/gmock/gmock-matchers.h', + 'gmock/include/gmock/gmock-spec-builders.h', + 'gmock/include/gmock/gmock.h', + 'gmock/include/gmock/internal/gmock-generated-internal-utils.h', + 'gmock/include/gmock/internal/gmock-internal-utils.h', + 'gmock/include/gmock/internal/gmock-port.h', + 'gmock/src/gmock-all.cc', + 'gmock/src/gmock-cardinalities.cc', + 'gmock/src/gmock-internal-utils.cc', + 'gmock/src/gmock-matchers.cc', + 'gmock/src/gmock-spec-builders.cc', + 'gmock/src/gmock.cc', + 'gmock_mutant.h', # gMock helpers + ], + 'sources!': [ + 'gmock/src/gmock-all.cc', # Not needed by our build. + ], + 'include_dirs': [ + 'gmock', + 'gmock/include', + ], + 'direct_dependent_settings': { + 'include_dirs': [ + 'gmock/include', # So that gmock headers can find themselves. + ], + }, + 'export_dependent_settings': [ + 'gtest.gyp:gtest', + ], + }, + { + 'target_name': 'gmock_main', + 'type': 'static_library', + 'dependencies': [ + 'gmock', + ], + 'sources': [ + 'gmock/src/gmock_main.cc', + ], + }, + ], +} diff -Nru nodejs-0.11.13/deps/v8/testing/gtest.gyp nodejs-0.11.15/deps/v8/testing/gtest.gyp --- nodejs-0.11.13/deps/v8/testing/gtest.gyp 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/testing/gtest.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,159 @@ +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +{ + 'targets': [ + { + 'target_name': 'gtest', + 'toolsets': ['host', 'target'], + 'type': 'static_library', + 'sources': [ + 'gtest/include/gtest/gtest-death-test.h', + 'gtest/include/gtest/gtest-message.h', + 'gtest/include/gtest/gtest-param-test.h', + 'gtest/include/gtest/gtest-printers.h', + 'gtest/include/gtest/gtest-spi.h', + 'gtest/include/gtest/gtest-test-part.h', + 'gtest/include/gtest/gtest-typed-test.h', + 'gtest/include/gtest/gtest.h', + 'gtest/include/gtest/gtest_pred_impl.h', + 'gtest/include/gtest/internal/gtest-death-test-internal.h', + 'gtest/include/gtest/internal/gtest-filepath.h', + 'gtest/include/gtest/internal/gtest-internal.h', + 'gtest/include/gtest/internal/gtest-linked_ptr.h', + 'gtest/include/gtest/internal/gtest-param-util-generated.h', + 'gtest/include/gtest/internal/gtest-param-util.h', + 'gtest/include/gtest/internal/gtest-port.h', + 'gtest/include/gtest/internal/gtest-string.h', + 'gtest/include/gtest/internal/gtest-tuple.h', + 'gtest/include/gtest/internal/gtest-type-util.h', + 'gtest/src/gtest-all.cc', + 'gtest/src/gtest-death-test.cc', + 'gtest/src/gtest-filepath.cc', + 'gtest/src/gtest-internal-inl.h', + 'gtest/src/gtest-port.cc', + 'gtest/src/gtest-printers.cc', + 'gtest/src/gtest-test-part.cc', + 'gtest/src/gtest-typed-test.cc', + 'gtest/src/gtest.cc', + 'gtest-type-names.h', + ], + 'sources!': [ + 'gtest/src/gtest-all.cc', # Not needed by our build. + ], + 'include_dirs': [ + 'gtest', + 'gtest/include', + ], + 'dependencies': [ + 'gtest_prod', + ], + 'defines': [ + # In order to allow regex matches in gtest to be shared between Windows + # and other systems, we tell gtest to always use it's internal engine. + 'GTEST_HAS_POSIX_RE=0', + # Chrome doesn't support / require C++11, yet. + 'GTEST_LANG_CXX11=0', + ], + 'all_dependent_settings': { + 'defines': [ + 'GTEST_HAS_POSIX_RE=0', + 'GTEST_LANG_CXX11=0', + ], + }, + 'conditions': [ + ['os_posix == 1', { + 'defines': [ + # gtest isn't able to figure out when RTTI is disabled for gcc + # versions older than 4.3.2, and assumes it's enabled. Our Mac + # and Linux builds disable RTTI, and cannot guarantee that the + # compiler will be 4.3.2. or newer. The Mac, for example, uses + # 4.2.1 as that is the latest available on that platform. gtest + # must be instructed that RTTI is disabled here, and for any + # direct dependents that might include gtest headers. + 'GTEST_HAS_RTTI=0', + ], + 'direct_dependent_settings': { + 'defines': [ + 'GTEST_HAS_RTTI=0', + ], + }, + }], + ['OS=="android"', { + 'defines': [ + 'GTEST_HAS_CLONE=0', + ], + 'direct_dependent_settings': { + 'defines': [ + 'GTEST_HAS_CLONE=0', + ], + }, + }], + ['OS=="android"', { + # We want gtest features that use tr1::tuple, but we currently + # don't support the variadic templates used by libstdc++'s + # implementation. gtest supports this scenario by providing its + # own implementation but we must opt in to it. + 'defines': [ + 'GTEST_USE_OWN_TR1_TUPLE=1', + # GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set. + # gtest r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0 + # automatically on android, so it has to be set explicitly here. + 'GTEST_HAS_TR1_TUPLE=1', + ], + 'direct_dependent_settings': { + 'defines': [ + 'GTEST_USE_OWN_TR1_TUPLE=1', + 'GTEST_HAS_TR1_TUPLE=1', + ], + }, + }], + ], + 'direct_dependent_settings': { + 'defines': [ + 'UNIT_TEST', + ], + 'include_dirs': [ + 'gtest/include', # So that gtest headers can find themselves. + ], + 'target_conditions': [ + ['_type=="executable"', { + 'test': 1, + 'conditions': [ + ['OS=="mac"', { + 'run_as': { + 'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}'], + }, + }], + ['OS=="win"', { + 'run_as': { + 'action????': ['$(TargetPath)', '--gtest_print_time'], + }, + }], + ], + }], + ], + 'msvs_disabled_warnings': [4800], + }, + }, + { + 'target_name': 'gtest_main', + 'type': 'static_library', + 'dependencies': [ + 'gtest', + ], + 'sources': [ + 'gtest/src/gtest_main.cc', + ], + }, + { + 'target_name': 'gtest_prod', + 'toolsets': ['host', 'target'], + 'type': 'none', + 'sources': [ + 'gtest/include/gtest/gtest_prod.h', + ], + }, + ], +} diff -Nru nodejs-0.11.13/deps/v8/testing/gtest-type-names.h nodejs-0.11.15/deps/v8/testing/gtest-type-names.h --- nodejs-0.11.13/deps/v8/testing/gtest-type-names.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/testing/gtest-type-names.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,34 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_TESTING_GTEST_TYPE_NAMES_H_ +#define V8_TESTING_GTEST_TYPE_NAMES_H_ + +#include "include/v8stdint.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace testing { +namespace internal { + +#define GET_TYPE_NAME(type) \ + template <> \ + std::string GetTypeName<type>() { \ + return #type; \ + } +GET_TYPE_NAME(int8_t) +GET_TYPE_NAME(uint8_t) +GET_TYPE_NAME(int16_t) +GET_TYPE_NAME(uint16_t) +GET_TYPE_NAME(int32_t) +GET_TYPE_NAME(uint32_t) +GET_TYPE_NAME(int64_t) +GET_TYPE_NAME(uint64_t) +GET_TYPE_NAME(float) +GET_TYPE_NAME(double) +#undef GET_TYPE_NAME + +} // namespace internal +} // namespace testing + +#endif // V8_TESTING_GTEST_TYPE_NAMES_H_ diff -Nru nodejs-0.11.13/deps/v8/third_party/fdlibm/fdlibm.cc nodejs-0.11.15/deps/v8/third_party/fdlibm/fdlibm.cc --- nodejs-0.11.13/deps/v8/third_party/fdlibm/fdlibm.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/third_party/fdlibm/fdlibm.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,273 @@ +// The following is adapted from fdlibm (http://www.netlib.org/fdlibm). +// +// ==================================================== +// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. +// +// Developed at SunSoft, a Sun Microsystems, Inc. business. +// Permission to use, copy, modify, and distribute this +// software is freely granted, provided that this notice +// is preserved. +// ==================================================== +// +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2014 the V8 project authors. All rights reserved. + +#include "src/v8.h" + +#include "src/double.h" +#include "third_party/fdlibm/fdlibm.h" + + +namespace v8 { +namespace fdlibm { + +#ifdef _MSC_VER +inline double scalbn(double x, int y) { return _scalb(x, y); } +#endif // _MSC_VER + +const double MathConstants::constants[] = { + 6.36619772367581382433e-01, // invpio2 0 + 1.57079632673412561417e+00, // pio2_1 1 + 6.07710050650619224932e-11, // pio2_1t 2 + 6.07710050630396597660e-11, // pio2_2 3 + 2.02226624879595063154e-21, // pio2_2t 4 + 2.02226624871116645580e-21, // pio2_3 5 + 8.47842766036889956997e-32, // pio2_3t 6 + -1.66666666666666324348e-01, // S1 7 + 8.33333333332248946124e-03, // 8 + -1.98412698298579493134e-04, // 9 + 2.75573137070700676789e-06, // 10 + -2.50507602534068634195e-08, // 11 + 1.58969099521155010221e-10, // S6 12 + 4.16666666666666019037e-02, // C1 13 + -1.38888888888741095749e-03, // 14 + 2.48015872894767294178e-05, // 15 + -2.75573143513906633035e-07, // 16 + 2.08757232129817482790e-09, // 17 + -1.13596475577881948265e-11, // C6 18 + 3.33333333333334091986e-01, // T0 19 + 1.33333333333201242699e-01, // 20 + 5.39682539762260521377e-02, // 21 + 2.18694882948595424599e-02, // 22 + 8.86323982359930005737e-03, // 23 + 3.59207910759131235356e-03, // 24 + 1.45620945432529025516e-03, // 25 + 5.88041240820264096874e-04, // 26 + 2.46463134818469906812e-04, // 27 + 7.81794442939557092300e-05, // 28 + 7.14072491382608190305e-05, // 29 + -1.85586374855275456654e-05, // 30 + 2.59073051863633712884e-05, // T12 31 + 7.85398163397448278999e-01, // pio4 32 + 3.06161699786838301793e-17, // pio4lo 33 + 6.93147180369123816490e-01, // ln2_hi 34 + 1.90821492927058770002e-10, // ln2_lo 35 + 1.80143985094819840000e+16, // 2^54 36 + 6.666666666666666666e-01, // 2/3 37 + 6.666666666666735130e-01, // LP1 38 + 3.999999999940941908e-01, // 39 + 2.857142874366239149e-01, // 40 + 2.222219843214978396e-01, // 41 + 1.818357216161805012e-01, // 42 + 1.531383769920937332e-01, // 43 + 1.479819860511658591e-01, // LP7 44 +}; + + +// Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi +static const int two_over_pi[] = { + 0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62, 0x95993C, + 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A, 0x424DD2, 0xE00649, + 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129, 0xA73EE8, 0x8235F5, 0x2EBB44, + 0x84E99C, 0x7026B4, 0x5F7E41, 0x3991D6, 0x398353, 0x39F49C, 0x845F8B, + 0xBDF928, 0x3B1FF8, 0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, + 0x367ECF, 0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5, + 0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08, 0x560330, + 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3, 0x91615E, 0xE61B08, + 0x659985, 0x5F14A0, 0x68408D, 0xFFD880, 0x4D7327, 0x310606, 0x1556CA, + 0x73A8C9, 0x60E27B, 0xC08C6B}; + +static const double zero = 0.0; +static const double two24 = 1.6777216e+07; +static const double one = 1.0; +static const double twon24 = 5.9604644775390625e-08; + +static const double PIo2[] = { + 1.57079625129699707031e+00, // 0x3FF921FB, 0x40000000 + 7.54978941586159635335e-08, // 0x3E74442D, 0x00000000 + 5.39030252995776476554e-15, // 0x3CF84698, 0x80000000 + 3.28200341580791294123e-22, // 0x3B78CC51, 0x60000000 + 1.27065575308067607349e-29, // 0x39F01B83, 0x80000000 + 1.22933308981111328932e-36, // 0x387A2520, 0x40000000 + 2.73370053816464559624e-44, // 0x36E38222, 0x80000000 + 2.16741683877804819444e-51 // 0x3569F31D, 0x00000000 +}; + + +int __kernel_rem_pio2(double* x, double* y, int e0, int nx) { + static const int32_t jk = 3; + double fw; + int32_t jx = nx - 1; + int32_t jv = (e0 - 3) / 24; + if (jv < 0) jv = 0; + int32_t q0 = e0 - 24 * (jv + 1); + int32_t m = jx + jk; + + double f[10]; + for (int i = 0, j = jv - jx; i <= m; i++, j++) { + f[i] = (j < 0) ? zero : static_cast<double>(two_over_pi[j]); + } + + double q[10]; + for (int i = 0; i <= jk; i++) { + fw = 0.0; + for (int j = 0; j <= jx; j++) fw += x[j] * f[jx + i - j]; + q[i] = fw; + } + + int32_t jz = jk; + +recompute: + + int32_t iq[10]; + double z = q[jz]; + for (int i = 0, j = jz; j > 0; i++, j--) { + fw = static_cast<double>(static_cast<int32_t>(twon24 * z)); + iq[i] = static_cast<int32_t>(z - two24 * fw); + z = q[j - 1] + fw; + } + + z = scalbn(z, q0); + z -= 8.0 * std::floor(z * 0.125); + int32_t n = static_cast<int32_t>(z); + z -= static_cast<double>(n); + int32_t ih = 0; + if (q0 > 0) { + int32_t i = (iq[jz - 1] >> (24 - q0)); + n += i; + iq[jz - 1] -= i << (24 - q0); + ih = iq[jz - 1] >> (23 - q0); + } else if (q0 == 0) { + ih = iq[jz - 1] >> 23; + } else if (z >= 0.5) { + ih = 2; + } + + if (ih > 0) { + n += 1; + int32_t carry = 0; + for (int i = 0; i < jz; i++) { + int32_t j = iq[i]; + if (carry == 0) { + if (j != 0) { + carry = 1; + iq[i] = 0x1000000 - j; + } + } else { + iq[i] = 0xffffff - j; + } + } + if (q0 == 1) { + iq[jz - 1] &= 0x7fffff; + } else if (q0 == 2) { + iq[jz - 1] &= 0x3fffff; + } + if (ih == 2) { + z = one - z; + if (carry != 0) z -= scalbn(one, q0); + } + } + + if (z == zero) { + int32_t j = 0; + for (int i = jz - 1; i >= jk; i--) j |= iq[i]; + if (j == 0) { + int32_t k = 1; + while (iq[jk - k] == 0) k++; + for (int i = jz + 1; i <= jz + k; i++) { + f[jx + i] = static_cast<double>(two_over_pi[jv + i]); + for (j = 0, fw = 0.0; j <= jx; j++) fw += x[j] * f[jx + i - j]; + q[i] = fw; + } + jz += k; + goto recompute; + } + } + + if (z == 0.0) { + jz -= 1; + q0 -= 24; + while (iq[jz] == 0) { + jz--; + q0 -= 24; + } + } else { + z = scalbn(z, -q0); + if (z >= two24) { + fw = static_cast<double>(static_cast<int32_t>(twon24 * z)); + iq[jz] = static_cast<int32_t>(z - two24 * fw); + jz += 1; + q0 += 24; + iq[jz] = static_cast<int32_t>(fw); + } else { + iq[jz] = static_cast<int32_t>(z); + } + } + + fw = scalbn(one, q0); + for (int i = jz; i >= 0; i--) { + q[i] = fw * static_cast<double>(iq[i]); + fw *= twon24; + } + + double fq[10]; + for (int i = jz; i >= 0; i--) { + fw = 0.0; + for (int k = 0; k <= jk && k <= jz - i; k++) fw += PIo2[k] * q[i + k]; + fq[jz - i] = fw; + } + + fw = 0.0; + for (int i = jz; i >= 0; i--) fw += fq[i]; + y[0] = (ih == 0) ? fw : -fw; + fw = fq[0] - fw; + for (int i = 1; i <= jz; i++) fw += fq[i]; + y[1] = (ih == 0) ? fw : -fw; + return n & 7; +} + + +int rempio2(double x, double* y) { + int32_t hx = static_cast<int32_t>(internal::double_to_uint64(x) >> 32); + int32_t ix = hx & 0x7fffffff; + + if (ix >= 0x7ff00000) { + *y = base::OS::nan_value(); + return 0; + } + + int32_t e0 = (ix >> 20) - 1046; + uint64_t zi = internal::double_to_uint64(x) & 0xFFFFFFFFu; + zi |= static_cast<uint64_t>(ix - (e0 << 20)) << 32; + double z = internal::uint64_to_double(zi); + + double tx[3]; + for (int i = 0; i < 2; i++) { + tx[i] = static_cast<double>(static_cast<int32_t>(z)); + z = (z - tx[i]) * two24; + } + tx[2] = z; + + int nx = 3; + while (tx[nx - 1] == zero) nx--; + int n = __kernel_rem_pio2(tx, y, e0, nx); + if (hx < 0) { + y[0] = -y[0]; + y[1] = -y[1]; + return -n; + } + return n; +} +} +} // namespace v8::internal diff -Nru nodejs-0.11.13/deps/v8/third_party/fdlibm/fdlibm.h nodejs-0.11.15/deps/v8/third_party/fdlibm/fdlibm.h --- nodejs-0.11.13/deps/v8/third_party/fdlibm/fdlibm.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/third_party/fdlibm/fdlibm.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,31 @@ +// The following is adapted from fdlibm (http://www.netlib.org/fdlibm). +// +// ==================================================== +// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. +// +// Developed at SunSoft, a Sun Microsystems, Inc. business. +// Permission to use, copy, modify, and distribute this +// software is freely granted, provided that this notice +// is preserved. +// ==================================================== +// +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2014 the V8 project authors. All rights reserved. + +#ifndef V8_FDLIBM_H_ +#define V8_FDLIBM_H_ + +namespace v8 { +namespace fdlibm { + +int rempio2(double x, double* y); + +// Constants to be exposed to builtins via Float64Array. +struct MathConstants { + static const double constants[45]; +}; +} +} // namespace v8::internal + +#endif // V8_FDLIBM_H_ diff -Nru nodejs-0.11.13/deps/v8/third_party/fdlibm/fdlibm.js nodejs-0.11.15/deps/v8/third_party/fdlibm/fdlibm.js --- nodejs-0.11.13/deps/v8/third_party/fdlibm/fdlibm.js 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/third_party/fdlibm/fdlibm.js 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,518 @@ +// The following is adapted from fdlibm (http://www.netlib.org/fdlibm), +// +// ==================================================== +// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. +// +// Developed at SunSoft, a Sun Microsystems, Inc. business. +// Permission to use, copy, modify, and distribute this +// software is freely granted, provided that this notice +// is preserved. +// ==================================================== +// +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2014 the V8 project authors. All rights reserved. +// +// The following is a straightforward translation of fdlibm routines +// by Raymond Toy (rtoy@google.com). + + +var kMath; // Initialized to a Float64Array during genesis and is not writable. + +const INVPIO2 = kMath[0]; +const PIO2_1 = kMath[1]; +const PIO2_1T = kMath[2]; +const PIO2_2 = kMath[3]; +const PIO2_2T = kMath[4]; +const PIO2_3 = kMath[5]; +const PIO2_3T = kMath[6]; +const PIO4 = kMath[32]; +const PIO4LO = kMath[33]; + +// Compute k and r such that x - k*pi/2 = r where |r| < pi/4. For +// precision, r is returned as two values y0 and y1 such that r = y0 + y1 +// to more than double precision. +macro REMPIO2(X) + var n, y0, y1; + var hx = %_DoubleHi(X); + var ix = hx & 0x7fffffff; + + if (ix < 0x4002d97c) { + // |X| ~< 3*pi/4, special case with n = +/- 1 + if (hx > 0) { + var z = X - PIO2_1; + if (ix != 0x3ff921fb) { + // 33+53 bit pi is good enough + y0 = z - PIO2_1T; + y1 = (z - y0) - PIO2_1T; + } else { + // near pi/2, use 33+33+53 bit pi + z -= PIO2_2; + y0 = z - PIO2_2T; + y1 = (z - y0) - PIO2_2T; + } + n = 1; + } else { + // Negative X + var z = X + PIO2_1; + if (ix != 0x3ff921fb) { + // 33+53 bit pi is good enough + y0 = z + PIO2_1T; + y1 = (z - y0) + PIO2_1T; + } else { + // near pi/2, use 33+33+53 bit pi + z += PIO2_2; + y0 = z + PIO2_2T; + y1 = (z - y0) + PIO2_2T; + } + n = -1; + } + } else if (ix <= 0x413921fb) { + // |X| ~<= 2^19*(pi/2), medium size + var t = MathAbs(X); + n = (t * INVPIO2 + 0.5) | 0; + var r = t - n * PIO2_1; + var w = n * PIO2_1T; + // First round good to 85 bit + y0 = r - w; + if (ix - (%_DoubleHi(y0) & 0x7ff00000) > 0x1000000) { + // 2nd iteration needed, good to 118 + t = r; + w = n * PIO2_2; + r = t - w; + w = n * PIO2_2T - ((t - r) - w); + y0 = r - w; + if (ix - (%_DoubleHi(y0) & 0x7ff00000) > 0x3100000) { + // 3rd iteration needed. 151 bits accuracy + t = r; + w = n * PIO2_3; + r = t - w; + w = n * PIO2_3T - ((t - r) - w); + y0 = r - w; + } + } + y1 = (r - y0) - w; + if (hx < 0) { + n = -n; + y0 = -y0; + y1 = -y1; + } + } else { + // Need to do full Payne-Hanek reduction here. + var r = %RemPiO2(X); + n = r[0]; + y0 = r[1]; + y1 = r[2]; + } +endmacro + + +// __kernel_sin(X, Y, IY) +// kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854 +// Input X is assumed to be bounded by ~pi/4 in magnitude. +// Input Y is the tail of X so that x = X + Y. +// +// Algorithm +// 1. Since ieee_sin(-x) = -ieee_sin(x), we need only to consider positive x. +// 2. ieee_sin(x) is approximated by a polynomial of degree 13 on +// [0,pi/4] +// 3 13 +// sin(x) ~ x + S1*x + ... + S6*x +// where +// +// |ieee_sin(x) 2 4 6 8 10 12 | -58 +// |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x +S6*x )| <= 2 +// | x | +// +// 3. ieee_sin(X+Y) = ieee_sin(X) + sin'(X')*Y +// ~ ieee_sin(X) + (1-X*X/2)*Y +// For better accuracy, let +// 3 2 2 2 2 +// r = X *(S2+X *(S3+X *(S4+X *(S5+X *S6)))) +// then 3 2 +// sin(x) = X + (S1*X + (X *(r-Y/2)+Y)) +// +macro KSIN(x) +kMath[7+x] +endmacro + +macro RETURN_KERNELSIN(X, Y, SIGN) + var z = X * X; + var v = z * X; + var r = KSIN(1) + z * (KSIN(2) + z * (KSIN(3) + + z * (KSIN(4) + z * KSIN(5)))); + return (X - ((z * (0.5 * Y - v * r) - Y) - v * KSIN(0))) SIGN; +endmacro + +// __kernel_cos(X, Y) +// kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164 +// Input X is assumed to be bounded by ~pi/4 in magnitude. +// Input Y is the tail of X so that x = X + Y. +// +// Algorithm +// 1. Since ieee_cos(-x) = ieee_cos(x), we need only to consider positive x. +// 2. ieee_cos(x) is approximated by a polynomial of degree 14 on +// [0,pi/4] +// 4 14 +// cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x +// where the remez error is +// +// | 2 4 6 8 10 12 14 | -58 +// |ieee_cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x +C6*x )| <= 2 +// | | +// +// 4 6 8 10 12 14 +// 3. let r = C1*x +C2*x +C3*x +C4*x +C5*x +C6*x , then +// ieee_cos(x) = 1 - x*x/2 + r +// since ieee_cos(X+Y) ~ ieee_cos(X) - ieee_sin(X)*Y +// ~ ieee_cos(X) - X*Y, +// a correction term is necessary in ieee_cos(x) and hence +// cos(X+Y) = 1 - (X*X/2 - (r - X*Y)) +// For better accuracy when x > 0.3, let qx = |x|/4 with +// the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125. +// Then +// cos(X+Y) = (1-qx) - ((X*X/2-qx) - (r-X*Y)). +// Note that 1-qx and (X*X/2-qx) is EXACT here, and the +// magnitude of the latter is at least a quarter of X*X/2, +// thus, reducing the rounding error in the subtraction. +// +macro KCOS(x) +kMath[13+x] +endmacro + +macro RETURN_KERNELCOS(X, Y, SIGN) + var ix = %_DoubleHi(X) & 0x7fffffff; + var z = X * X; + var r = z * (KCOS(0) + z * (KCOS(1) + z * (KCOS(2)+ + z * (KCOS(3) + z * (KCOS(4) + z * KCOS(5)))))); + if (ix < 0x3fd33333) { // |x| ~< 0.3 + return (1 - (0.5 * z - (z * r - X * Y))) SIGN; + } else { + var qx; + if (ix > 0x3fe90000) { // |x| > 0.78125 + qx = 0.28125; + } else { + qx = %_ConstructDouble(%_DoubleHi(0.25 * X), 0); + } + var hz = 0.5 * z - qx; + return (1 - qx - (hz - (z * r - X * Y))) SIGN; + } +endmacro + + +// kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854 +// Input x is assumed to be bounded by ~pi/4 in magnitude. +// Input y is the tail of x. +// Input k indicates whether ieee_tan (if k = 1) or -1/tan (if k = -1) +// is returned. +// +// Algorithm +// 1. Since ieee_tan(-x) = -ieee_tan(x), we need only to consider positive x. +// 2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0. +// 3. ieee_tan(x) is approximated by a odd polynomial of degree 27 on +// [0,0.67434] +// 3 27 +// tan(x) ~ x + T1*x + ... + T13*x +// where +// +// |ieee_tan(x) 2 4 26 | -59.2 +// |----- - (1+T1*x +T2*x +.... +T13*x )| <= 2 +// | x | +// +// Note: ieee_tan(x+y) = ieee_tan(x) + tan'(x)*y +// ~ ieee_tan(x) + (1+x*x)*y +// Therefore, for better accuracy in computing ieee_tan(x+y), let +// 3 2 2 2 2 +// r = x *(T2+x *(T3+x *(...+x *(T12+x *T13)))) +// then +// 3 2 +// tan(x+y) = x + (T1*x + (x *(r+y)+y)) +// +// 4. For x in [0.67434,pi/4], let y = pi/4 - x, then +// tan(x) = ieee_tan(pi/4-y) = (1-ieee_tan(y))/(1+ieee_tan(y)) +// = 1 - 2*(ieee_tan(y) - (ieee_tan(y)^2)/(1+ieee_tan(y))) +// +// Set returnTan to 1 for tan; -1 for cot. Anything else is illegal +// and will cause incorrect results. +// +macro KTAN(x) +kMath[19+x] +endmacro + +function KernelTan(x, y, returnTan) { + var z; + var w; + var hx = %_DoubleHi(x); + var ix = hx & 0x7fffffff; + + if (ix < 0x3e300000) { // |x| < 2^-28 + if (((ix | %_DoubleLo(x)) | (returnTan + 1)) == 0) { + // x == 0 && returnTan = -1 + return 1 / MathAbs(x); + } else { + if (returnTan == 1) { + return x; + } else { + // Compute -1/(x + y) carefully + var w = x + y; + var z = %_ConstructDouble(%_DoubleHi(w), 0); + var v = y - (z - x); + var a = -1 / w; + var t = %_ConstructDouble(%_DoubleHi(a), 0); + var s = 1 + t * z; + return t + a * (s + t * v); + } + } + } + if (ix >= 0x3fe59429) { // |x| > .6744 + if (x < 0) { + x = -x; + y = -y; + } + z = PIO4 - x; + w = PIO4LO - y; + x = z + w; + y = 0; + } + z = x * x; + w = z * z; + + // Break x^5 * (T1 + x^2*T2 + ...) into + // x^5 * (T1 + x^4*T3 + ... + x^20*T11) + + // x^5 * (x^2 * (T2 + x^4*T4 + ... + x^22*T12)) + var r = KTAN(1) + w * (KTAN(3) + w * (KTAN(5) + + w * (KTAN(7) + w * (KTAN(9) + w * KTAN(11))))); + var v = z * (KTAN(2) + w * (KTAN(4) + w * (KTAN(6) + + w * (KTAN(8) + w * (KTAN(10) + w * KTAN(12)))))); + var s = z * x; + r = y + z * (s * (r + v) + y); + r = r + KTAN(0) * s; + w = x + r; + if (ix >= 0x3fe59428) { + return (1 - ((hx >> 30) & 2)) * + (returnTan - 2.0 * (x - (w * w / (w + returnTan) - r))); + } + if (returnTan == 1) { + return w; + } else { + z = %_ConstructDouble(%_DoubleHi(w), 0); + v = r - (z - x); + var a = -1 / w; + var t = %_ConstructDouble(%_DoubleHi(a), 0); + s = 1 + t * z; + return t + a * (s + t * v); + } +} + +function MathSinSlow(x) { + REMPIO2(x); + var sign = 1 - (n & 2); + if (n & 1) { + RETURN_KERNELCOS(y0, y1, * sign); + } else { + RETURN_KERNELSIN(y0, y1, * sign); + } +} + +function MathCosSlow(x) { + REMPIO2(x); + if (n & 1) { + var sign = (n & 2) - 1; + RETURN_KERNELSIN(y0, y1, * sign); + } else { + var sign = 1 - (n & 2); + RETURN_KERNELCOS(y0, y1, * sign); + } +} + +// ECMA 262 - 15.8.2.16 +function MathSin(x) { + x = x * 1; // Convert to number. + if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) { + // |x| < pi/4, approximately. No reduction needed. + RETURN_KERNELSIN(x, 0, /* empty */); + } + return MathSinSlow(x); +} + +// ECMA 262 - 15.8.2.7 +function MathCos(x) { + x = x * 1; // Convert to number. + if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) { + // |x| < pi/4, approximately. No reduction needed. + RETURN_KERNELCOS(x, 0, /* empty */); + } + return MathCosSlow(x); +} + +// ECMA 262 - 15.8.2.18 +function MathTan(x) { + x = x * 1; // Convert to number. + if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) { + // |x| < pi/4, approximately. No reduction needed. + return KernelTan(x, 0, 1); + } + REMPIO2(x); + return KernelTan(y0, y1, (n & 1) ? -1 : 1); +} + +// ES6 draft 09-27-13, section 20.2.2.20. +// Math.log1p +// +// Method : +// 1. Argument Reduction: find k and f such that +// 1+x = 2^k * (1+f), +// where sqrt(2)/2 < 1+f < sqrt(2) . +// +// Note. If k=0, then f=x is exact. However, if k!=0, then f +// may not be representable exactly. In that case, a correction +// term is need. Let u=1+x rounded. Let c = (1+x)-u, then +// log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u), +// and add back the correction term c/u. +// (Note: when x > 2**53, one can simply return log(x)) +// +// 2. Approximation of log1p(f). +// Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s) +// = 2s + 2/3 s**3 + 2/5 s**5 + ....., +// = 2s + s*R +// We use a special Reme algorithm on [0,0.1716] to generate +// a polynomial of degree 14 to approximate R The maximum error +// of this polynomial approximation is bounded by 2**-58.45. In +// other words, +// 2 4 6 8 10 12 14 +// R(z) ~ Lp1*s +Lp2*s +Lp3*s +Lp4*s +Lp5*s +Lp6*s +Lp7*s +// (the values of Lp1 to Lp7 are listed in the program) +// and +// | 2 14 | -58.45 +// | Lp1*s +...+Lp7*s - R(z) | <= 2 +// | | +// Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2. +// In order to guarantee error in log below 1ulp, we compute log +// by +// log1p(f) = f - (hfsq - s*(hfsq+R)). +// +// 3. Finally, log1p(x) = k*ln2 + log1p(f). +// = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo))) +// Here ln2 is split into two floating point number: +// ln2_hi + ln2_lo, +// where n*ln2_hi is always exact for |n| < 2000. +// +// Special cases: +// log1p(x) is NaN with signal if x < -1 (including -INF) ; +// log1p(+INF) is +INF; log1p(-1) is -INF with signal; +// log1p(NaN) is that NaN with no signal. +// +// Accuracy: +// according to an error analysis, the error is always less than +// 1 ulp (unit in the last place). +// +// Constants: +// The hexadecimal values are the intended ones for the following +// constants. The decimal values may be used, provided that the +// compiler will convert from decimal to binary accurately enough +// to produce the hexadecimal values shown. +// +// Note: Assuming log() return accurate answer, the following +// algorithm can be used to compute log1p(x) to within a few ULP: +// +// u = 1+x; +// if (u==1.0) return x ; else +// return log(u)*(x/(u-1.0)); +// +// See HP-15C Advanced Functions Handbook, p.193. +// +const LN2_HI = kMath[34]; +const LN2_LO = kMath[35]; +const TWO54 = kMath[36]; +const TWO_THIRD = kMath[37]; +macro KLOGP1(x) +(kMath[38+x]) +endmacro + +function MathLog1p(x) { + x = x * 1; // Convert to number. + var hx = %_DoubleHi(x); + var ax = hx & 0x7fffffff; + var k = 1; + var f = x; + var hu = 1; + var c = 0; + var u = x; + + if (hx < 0x3fda827a) { + // x < 0.41422 + if (ax >= 0x3ff00000) { // |x| >= 1 + if (x === -1) { + return -INFINITY; // log1p(-1) = -inf + } else { + return NAN; // log1p(x<-1) = NaN + } + } else if (ax < 0x3c900000) { + // For |x| < 2^-54 we can return x. + return x; + } else if (ax < 0x3e200000) { + // For |x| < 2^-29 we can use a simple two-term Taylor series. + return x - x * x * 0.5; + } + + if ((hx > 0) || (hx <= -0x402D413D)) { // (int) 0xbfd2bec3 = -0x402d413d + // -.2929 < x < 0.41422 + k = 0; + } + } + + // Handle Infinity and NAN + if (hx >= 0x7ff00000) return x; + + if (k !== 0) { + if (hx < 0x43400000) { + // x < 2^53 + u = 1 + x; + hu = %_DoubleHi(u); + k = (hu >> 20) - 1023; + c = (k > 0) ? 1 - (u - x) : x - (u - 1); + c = c / u; + } else { + hu = %_DoubleHi(u); + k = (hu >> 20) - 1023; + } + hu = hu & 0xfffff; + if (hu < 0x6a09e) { + u = %_ConstructDouble(hu | 0x3ff00000, %_DoubleLo(u)); // Normalize u. + } else { + ++k; + u = %_ConstructDouble(hu | 0x3fe00000, %_DoubleLo(u)); // Normalize u/2. + hu = (0x00100000 - hu) >> 2; + } + f = u - 1; + } + + var hfsq = 0.5 * f * f; + if (hu === 0) { + // |f| < 2^-20; + if (f === 0) { + if (k === 0) { + return 0.0; + } else { + return k * LN2_HI + (c + k * LN2_LO); + } + } + var R = hfsq * (1 - TWO_THIRD * f); + if (k === 0) { + return f - R; + } else { + return k * LN2_HI - ((R - (k * LN2_LO + c)) - f); + } + } + + var s = f / (2 + f); + var z = s * s; + var R = z * (KLOGP1(0) + z * (KLOGP1(1) + z * + (KLOGP1(2) + z * (KLOGP1(3) + z * + (KLOGP1(4) + z * (KLOGP1(5) + z * KLOGP1(6))))))); + if (k === 0) { + return f - (hfsq - s * (hfsq + R)); + } else { + return k * LN2_HI - ((hfsq - (s * (hfsq + R) + (k * LN2_LO + c))) - f); + } +} diff -Nru nodejs-0.11.13/deps/v8/third_party/fdlibm/LICENSE nodejs-0.11.15/deps/v8/third_party/fdlibm/LICENSE --- nodejs-0.11.13/deps/v8/third_party/fdlibm/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/third_party/fdlibm/LICENSE 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,6 @@ +Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + +Developed at SunSoft, a Sun Microsystems, Inc. business. +Permission to use, copy, modify, and distribute this +software is freely granted, provided that this notice +is preserved. diff -Nru nodejs-0.11.13/deps/v8/third_party/fdlibm/README.v8 nodejs-0.11.15/deps/v8/third_party/fdlibm/README.v8 --- nodejs-0.11.13/deps/v8/third_party/fdlibm/README.v8 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/third_party/fdlibm/README.v8 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +Name: Freely Distributable LIBM +Short Name: fdlibm +URL: http://www.netlib.org/fdlibm/ +Version: 5.3 +License: Freely Distributable. +License File: LICENSE. +Security Critical: yes. +License Android Compatible: yes. + +Description: +This is used to provide a accurate implementation for trigonometric functions +used in V8. + +Local Modifications: +For the use in V8, fdlibm has been reduced to include only sine, cosine and +tangent. To make inlining into generated code possible, a large portion of +that has been translated to Javascript. The rest remains in C, but has been +refactored and reformatted to interoperate with the rest of V8. diff -Nru nodejs-0.11.13/deps/v8/tools/blink_tests/TestExpectations nodejs-0.11.15/deps/v8/tools/blink_tests/TestExpectations --- nodejs-0.11.13/deps/v8/tools/blink_tests/TestExpectations 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/blink_tests/TestExpectations 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,4 @@ # Tests that sometimes fail only on the V8 waterfall: -[ Linux Release x86 ] fast/js/JSON-stringify.html [ Pass Failure Slow ] [ Linux Release x86 ] fast/text/atsui-multiple-renderers.html [ Pass Failure Slow ] [ Linux Release x86 ] fast/text/international/complex-joining-using-gpos.html [ Pass Failure Slow ] [ Linux Release x86 ] fast/text/international/danda-space.html [ Pass Failure Slow ] diff -Nru nodejs-0.11.13/deps/v8/tools/check-static-initializers.sh nodejs-0.11.15/deps/v8/tools/check-static-initializers.sh --- nodejs-0.11.13/deps/v8/tools/check-static-initializers.sh 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/check-static-initializers.sh 2015-01-20 21:22:17.000000000 +0000 @@ -32,8 +32,7 @@ # Allow: # - _GLOBAL__I__ZN2v810LineEditor6first_E # - _GLOBAL__I__ZN2v88internal32AtomicOps_Internalx86CPUFeaturesE -# - _GLOBAL__I__ZN2v88internal8ThreadId18highest_thread_id_E -expected_static_init_count=3 +expected_static_init_count=2 v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../) diff -Nru nodejs-0.11.13/deps/v8/tools/common-includes.sh nodejs-0.11.15/deps/v8/tools/common-includes.sh --- nodejs-0.11.13/deps/v8/tools/common-includes.sh 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/common-includes.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,198 +0,0 @@ -# Copyright 2012 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# This file contains common function definitions for various other shell -# scripts in this directory. It is not meant to be executed by itself. - -# Important: before including this file, the following variables must be set: -# - BRANCHNAME -# - PERSISTFILE_BASENAME - -TEMP_BRANCH=$BRANCHNAME-temporary-branch-created-by-script -VERSION_FILE="src/version.cc" -CHANGELOG_ENTRY_FILE="$PERSISTFILE_BASENAME-changelog-entry" -PATCH_FILE="$PERSISTFILE_BASENAME-patch" -COMMITMSG_FILE="$PERSISTFILE_BASENAME-commitmsg" -TRUNK_REVISION_FILE="$PERSISTFILE_BASENAME-trunkrevision" -START_STEP=0 -CURRENT_STEP=0 - -die() { - [[ -n "$1" ]] && echo "Error: $1" - echo "Exiting." - exit 1 -} - -confirm() { - echo -n "$1 [Y/n] " - read ANSWER - if [[ -z "$ANSWER" || "$ANSWER" == "Y" || "$ANSWER" == "y" ]] ; then - return 0 - else - return 1 - fi -} - -delete_branch() { - local MATCH=$(git branch | grep "$1" | awk '{print $NF}' | grep -x $1) - if [ "$MATCH" == "$1" ] ; then - confirm "Branch $1 exists, do you want to delete it?" - if [ $? -eq 0 ] ; then - git branch -D $1 || die "Deleting branch '$1' failed." - echo "Branch $1 deleted." - else - die "Can't continue. Please delete branch $1 and try again." - fi - fi -} - -# Persist and restore variables to support canceling/resuming execution -# of this script. -persist() { - local VARNAME=$1 - local FILE="$PERSISTFILE_BASENAME-$VARNAME" - local VALUE="${!VARNAME}" - if [ -z "$VALUE" ] ; then - VALUE="__EMPTY__" - fi - echo "$VALUE" > $FILE -} - -restore() { - local VARNAME=$1 - local FILE="$PERSISTFILE_BASENAME-$VARNAME" - local VALUE="$(cat $FILE)" - [[ -z "$VALUE" ]] && die "Variable '$VARNAME' could not be restored." - if [ "$VALUE" == "__EMPTY__" ] ; then - VALUE="" - fi - eval "$VARNAME=\"$VALUE\"" -} - -restore_if_unset() { - local VARNAME=$1 - [[ -z "${!VARNAME}" ]] && restore "$VARNAME" -} - -initial_environment_checks() { - # Cancel if this is not a git checkout. - [[ -d .git ]] \ - || die "This is not a git checkout, this script won't work for you." - - # Cancel if EDITOR is unset or not executable. - [[ -n "$EDITOR" && -x "$(which $EDITOR)" ]] \ - || die "Please set your EDITOR environment variable, you'll need it." -} - -common_prepare() { - # Check for a clean workdir. - [[ -z "$(git status -s -uno)" ]] \ - || die "Workspace is not clean. Please commit or undo your changes." - - # Persist current branch. - CURRENT_BRANCH=$(git status -s -b -uno | grep "^##" | awk '{print $2}') - persist "CURRENT_BRANCH" - - # Fetch unfetched revisions. - git svn fetch || die "'git svn fetch' failed." - - # Get ahold of a safe temporary branch and check it out. - if [ "$CURRENT_BRANCH" != "$TEMP_BRANCH" ] ; then - delete_branch $TEMP_BRANCH - git checkout -b $TEMP_BRANCH - fi - - # Delete the branch that will be created later if it exists already. - delete_branch $BRANCHNAME -} - -common_cleanup() { - restore_if_unset "CURRENT_BRANCH" - git checkout -f $CURRENT_BRANCH - [[ "$TEMP_BRANCH" != "$CURRENT_BRANCH" ]] && git branch -D $TEMP_BRANCH - [[ "$BRANCHNAME" != "$CURRENT_BRANCH" ]] && git branch -D $BRANCHNAME - # Clean up all temporary files. - rm -f "$PERSISTFILE_BASENAME"* -} - -# These two functions take a prefix for the variable names as first argument. -read_and_persist_version() { - for v in MAJOR_VERSION MINOR_VERSION BUILD_NUMBER PATCH_LEVEL; do - VARNAME="$1${v%%_*}" - VALUE=$(grep "#define $v" "$VERSION_FILE" | awk '{print $NF}') - eval "$VARNAME=\"$VALUE\"" - persist "$VARNAME" - done -} -restore_version_if_unset() { - for v in MAJOR MINOR BUILD PATCH; do - restore_if_unset "$1$v" - done -} - -upload_step() { - let CURRENT_STEP+=1 - if [ $START_STEP -le $CURRENT_STEP ] ; then - echo ">>> Step $CURRENT_STEP: Upload for code review." - echo -n "Please enter the email address of a V8 reviewer for your patch: " - read REVIEWER - git cl upload -r "$REVIEWER" --send-mail \ - || die "'git cl upload' failed, please try again." - fi -} - -wait_for_lgtm() { - echo "Please wait for an LGTM, then type \"LGTM<Return>\" to commit your \ -change. (If you need to iterate on the patch or double check that it's \ -sane, do so in another shell, but remember to not change the headline of \ -the uploaded CL." - unset ANSWER - while [ "$ANSWER" != "LGTM" ] ; do - [[ -n "$ANSWER" ]] && echo "That was not 'LGTM'." - echo -n "> " - read ANSWER - done -} - -wait_for_resolving_conflicts() { - echo "Applying the patch \"$1\" failed. Either type \"ABORT<Return>\", or \ -resolve the conflicts, stage *all* touched files with 'git add', and \ -type \"RESOLVED<Return>\"" - unset ANSWER - while [ "$ANSWER" != "RESOLVED" ] ; do - [[ "$ANSWER" == "ABORT" ]] && die "Applying the patch failed." - [[ -n "$ANSWER" ]] && echo "That was not 'RESOLVED' or 'ABORT'." - echo -n "> " - read ANSWER - done -} - -# Takes a file containing the patch to apply as first argument. -apply_patch() { - git apply --index --reject $REVERSE_PATCH "$1" || \ - wait_for_resolving_conflicts "$1"; -} diff -Nru nodejs-0.11.13/deps/v8/tools/concatenate-files.py nodejs-0.11.15/deps/v8/tools/concatenate-files.py --- nodejs-0.11.13/deps/v8/tools/concatenate-files.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/concatenate-files.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# +# Copyright 2014 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This utility concatenates several files into one. On Unix-like systems +# it is equivalent to: +# cat file1 file2 file3 ...files... > target +# +# The reason for writing a seperate utility is that 'cat' is not available +# on all supported build platforms, but Python is, and hence this provides +# us with an easy and uniform way of doing this on all platforms. + +import optparse + + +def Concatenate(filenames): + """Concatenate files. + + Args: + files: Array of file names. + The last name is the target; all earlier ones are sources. + + Returns: + True, if the operation was successful. + """ + if len(filenames) < 2: + print "An error occured generating %s:\nNothing to do." % filenames[-1] + return False + + try: + with open(filenames[-1], "wb") as target: + for filename in filenames[:-1]: + with open(filename, "rb") as current: + target.write(current.read()) + return True + except IOError as e: + print "An error occured when writing %s:\n%s" % (filenames[-1], e) + return False + + +def main(): + parser = optparse.OptionParser() + parser.set_usage("""Concatenate several files into one. + Equivalent to: cat file1 ... > target.""") + (options, args) = parser.parse_args() + exit(0 if Concatenate(args) else 1) + + +if __name__ == "__main__": + main() diff -Nru nodejs-0.11.13/deps/v8/tools/DEPS nodejs-0.11.15/deps/v8/tools/DEPS --- nodejs-0.11.13/deps/v8/tools/DEPS 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/DEPS 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +include_rules = [ + "+src", +] + +# checkdeps.py shouldn't check for includes in these directories: +skip_child_includes = [ + "gcmole", +] diff -Nru nodejs-0.11.13/deps/v8/tools/disasm.py nodejs-0.11.15/deps/v8/tools/disasm.py --- nodejs-0.11.13/deps/v8/tools/disasm.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/disasm.py 2015-01-20 21:22:17.000000000 +0000 @@ -49,7 +49,8 @@ "ia32": "-m i386", "x64": "-m i386 -M x86-64", "arm": "-m arm", # Not supported by our objdump build. - "mips": "-m mips" # Not supported by our objdump build. + "mips": "-m mips", # Not supported by our objdump build. + "arm64": "-m aarch64" } diff -Nru nodejs-0.11.13/deps/v8/tools/external-reference-check.py nodejs-0.11.15/deps/v8/tools/external-reference-check.py --- nodejs-0.11.13/deps/v8/tools/external-reference-check.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/external-reference-check.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import re +import os +import sys + +DECLARE_FILE = "src/assembler.h" +REGISTER_FILE = "src/serialize.cc" +DECLARE_RE = re.compile("\s*static ExternalReference ([^(]+)\(") +REGISTER_RE = re.compile("\s*Add\(ExternalReference::([^(]+)\(") + +WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..")) + +# Ignore those. +BLACKLISTED = [ + "page_flags", + "math_exp_constants", + "math_exp_log_table", + "ForDeoptEntry", +] + +def Find(filename, re): + references = [] + with open(filename, "r") as f: + for line in f: + match = re.match(line) + if match: + references.append(match.group(1)) + return references + +def Main(): + declarations = Find(DECLARE_FILE, DECLARE_RE) + registrations = Find(REGISTER_FILE, REGISTER_RE) + difference = list(set(declarations) - set(registrations) - set(BLACKLISTED)) + for reference in difference: + print("Declared but not registered: ExternalReference::%s" % reference) + return len(difference) > 0 + +if __name__ == "__main__": + sys.exit(Main()) diff -Nru nodejs-0.11.13/deps/v8/tools/fuzz-harness.sh nodejs-0.11.15/deps/v8/tools/fuzz-harness.sh --- nodejs-0.11.13/deps/v8/tools/fuzz-harness.sh 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/fuzz-harness.sh 2015-01-20 21:22:17.000000000 +0000 @@ -85,7 +85,7 @@ "$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js" exit_code=$(cat w* | grep " looking good" -c) exit_code=$((100-exit_code)) -tar -cjf fuzz-results-$(date +%y%m%d).tar.bz2 err-* w* +tar -cjf fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2 err-* w* rm -f err-* w* echo "Total failures: $exit_code" diff -Nru nodejs-0.11.13/deps/v8/tools/gcmole/bootstrap.sh nodejs-0.11.15/deps/v8/tools/gcmole/bootstrap.sh --- nodejs-0.11.13/deps/v8/tools/gcmole/bootstrap.sh 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/gcmole/bootstrap.sh 2015-01-20 21:22:17.000000000 +0000 @@ -27,9 +27,12 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# This script will build libgcmole.so. +# This script will build libgcmole.so. Building a recent clang needs a +# recent GCC, so if you explicitly want to use GCC 4.8, use: +# +# CC=gcc-4.8 CPP=cpp-4.8 CXX=g++-4.8 CXXFLAGS=-static-libstdc++ CXXCPP=cpp-4.8 ./bootstrap.sh -CLANG_RELEASE=2.9 +CLANG_RELEASE=3.5 THIS_DIR="$(dirname "${0}")" LLVM_DIR="${THIS_DIR}/../../third_party/llvm" @@ -110,7 +113,7 @@ # See http://crbug.com/256342 STRIP_FLAGS=-x fi -strip ${STRIP_FLAGS} Release/bin/clang +strip ${STRIP_FLAGS} Release+Asserts/bin/clang cd - # Build libgcmole.so @@ -122,5 +125,5 @@ echo echo You can now run gcmole using this command: echo -echo CLANG_BIN=\"third_party/llvm/Release/bin\" lua tools/gcmole/gcmole.lua +echo CLANG_BIN=\"third_party/llvm/Release+Asserts/bin\" lua tools/gcmole/gcmole.lua echo diff -Nru nodejs-0.11.13/deps/v8/tools/gcmole/gcmole.cc nodejs-0.11.15/deps/v8/tools/gcmole/gcmole.cc --- nodejs-0.11.13/deps/v8/tools/gcmole/gcmole.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/gcmole/gcmole.cc 2015-01-20 21:22:17.000000000 +0000 @@ -51,8 +51,8 @@ static bool GetMangledName(clang::MangleContext* ctx, const clang::NamedDecl* decl, MangledName* result) { - if (!isa<clang::CXXConstructorDecl>(decl) && - !isa<clang::CXXDestructorDecl>(decl)) { + if (!llvm::isa<clang::CXXConstructorDecl>(decl) && + !llvm::isa<clang::CXXDestructorDecl>(decl)) { llvm::SmallVector<char, 512> output; llvm::raw_svector_ostream out(output); ctx->mangleName(decl, out); @@ -74,7 +74,7 @@ static bool IsExternalVMState(const clang::ValueDecl* var) { const clang::EnumConstantDecl* enum_constant = - dyn_cast<clang::EnumConstantDecl>(var); + llvm::dyn_cast<clang::EnumConstantDecl>(var); if (enum_constant != NULL && enum_constant->getNameAsString() == EXTERNAL) { clang::QualType type = enum_constant->getType(); return (type.getAsString() == STATE_TAG); @@ -109,11 +109,10 @@ clang::DeclContext::lookup_result result = decl_ctx_->lookup(ResolveName(n)); - clang::DeclContext::lookup_iterator end = result.second; - for (clang::DeclContext::lookup_iterator i = result.first; - i != end; + clang::DeclContext::lookup_iterator end = result.end(); + for (clang::DeclContext::lookup_iterator i = result.begin(); i != end; i++) { - if (isa<T>(*i)) return cast<T>(*i); + if (llvm::isa<T>(*i)) return llvm::cast<T>(*i); } return NULL; @@ -208,13 +207,13 @@ : public clang::ASTConsumer, public clang::RecursiveASTVisitor<FunctionDeclarationFinder> { public: - explicit FunctionDeclarationFinder(clang::Diagnostic& d, + explicit FunctionDeclarationFinder(clang::DiagnosticsEngine& d, clang::SourceManager& sm, const std::vector<std::string>& args) - : d_(d), sm_(sm) { } + : d_(d), sm_(sm) {} virtual void HandleTranslationUnit(clang::ASTContext &ctx) { - mangle_context_ = clang::createItaniumMangleContext(ctx, d_); + mangle_context_ = clang::ItaniumMangleContext::create(ctx, d_); callees_printer_ = new CalleesPrinter(mangle_context_); TraverseDecl(ctx.getTranslationUnitDecl()); @@ -228,7 +227,7 @@ } private: - clang::Diagnostic& d_; + clang::DiagnosticsEngine& d_; clang::SourceManager& sm_; clang::MangleContext* mangle_context_; @@ -508,10 +507,8 @@ FunctionAnalyzer(clang::MangleContext* ctx, clang::DeclarationName handle_decl_name, clang::CXXRecordDecl* object_decl, - clang::CXXRecordDecl* smi_decl, - clang::Diagnostic& d, - clang::SourceManager& sm, - bool dead_vars_analysis) + clang::CXXRecordDecl* smi_decl, clang::DiagnosticsEngine& d, + clang::SourceManager& sm, bool dead_vars_analysis) : ctx_(ctx), handle_decl_name_(handle_decl_name), object_decl_(object_decl), @@ -519,8 +516,7 @@ d_(d), sm_(sm), block_(NULL), - dead_vars_analysis_(dead_vars_analysis) { - } + dead_vars_analysis_(dead_vars_analysis) {} // -------------------------------------------------------------------------- @@ -528,19 +524,18 @@ // -------------------------------------------------------------------------- ExprEffect VisitExpr(clang::Expr* expr, const Environment& env) { -#define VISIT(type) do { \ - clang::type* concrete_expr = dyn_cast_or_null<clang::type>(expr); \ - if (concrete_expr != NULL) { \ - return Visit##type (concrete_expr, env); \ - } \ - } while(0); +#define VISIT(type) \ + do { \ + clang::type* concrete_expr = llvm::dyn_cast_or_null<clang::type>(expr); \ + if (concrete_expr != NULL) { \ + return Visit##type(concrete_expr, env); \ + } \ + } while (0); VISIT(AbstractConditionalOperator); VISIT(AddrLabelExpr); VISIT(ArraySubscriptExpr); VISIT(BinaryOperator); - VISIT(BinaryTypeTraitExpr); - VISIT(BlockDeclRefExpr); VISIT(BlockExpr); VISIT(CallExpr); VISIT(CastExpr); @@ -587,8 +582,8 @@ VISIT(StmtExpr); VISIT(StringLiteral); VISIT(SubstNonTypeTemplateParmPackExpr); + VISIT(TypeTraitExpr); VISIT(UnaryOperator); - VISIT(UnaryTypeTraitExpr); VISIT(VAArgExpr); #undef VISIT @@ -604,7 +599,6 @@ } IGNORE_EXPR(AddrLabelExpr); - IGNORE_EXPR(BinaryTypeTraitExpr); IGNORE_EXPR(BlockExpr); IGNORE_EXPR(CharacterLiteral); IGNORE_EXPR(ChooseExpr); @@ -633,7 +627,7 @@ IGNORE_EXPR(StmtExpr); IGNORE_EXPR(StringLiteral); IGNORE_EXPR(SubstNonTypeTemplateParmPackExpr); - IGNORE_EXPR(UnaryTypeTraitExpr); + IGNORE_EXPR(TypeTraitExpr); IGNORE_EXPR(VAArgExpr); IGNORE_EXPR(GNUNullExpr); IGNORE_EXPR(OverloadExpr); @@ -654,12 +648,9 @@ } bool IsRawPointerVar(clang::Expr* expr, std::string* var_name) { - if (isa<clang::BlockDeclRefExpr>(expr)) { - *var_name = cast<clang::BlockDeclRefExpr>(expr)->getDecl()-> - getNameAsString(); - return true; - } else if (isa<clang::DeclRefExpr>(expr)) { - *var_name = cast<clang::DeclRefExpr>(expr)->getDecl()->getNameAsString(); + if (llvm::isa<clang::DeclRefExpr>(expr)) { + *var_name = + llvm::cast<clang::DeclRefExpr>(expr)->getDecl()->getNameAsString(); return true; } return false; @@ -707,12 +698,7 @@ return VisitExpr(expr->getArgument(), env); } - DECL_VISIT_EXPR(CXXNewExpr) { - return Par(expr, - expr->getNumConstructorArgs(), - expr->getConstructorArgs(), - env); - } + DECL_VISIT_EXPR(CXXNewExpr) { return VisitExpr(expr->getInitializer(), env); } DECL_VISIT_EXPR(ExprWithCleanups) { return VisitExpr(expr->getSubExpr(), env); @@ -766,10 +752,6 @@ return Use(expr, expr->getDecl(), env); } - DECL_VISIT_EXPR(BlockDeclRefExpr) { - return Use(expr, expr->getDecl(), env); - } - ExprEffect Par(clang::Expr* parent, int n, clang::Expr** exprs, @@ -844,7 +826,7 @@ CallProps props; clang::CXXMemberCallExpr* memcall = - dyn_cast_or_null<clang::CXXMemberCallExpr>(call); + llvm::dyn_cast_or_null<clang::CXXMemberCallExpr>(call); if (memcall != NULL) { clang::Expr* receiver = memcall->getImplicitObjectArgument(); props.SetEffect(0, VisitExpr(receiver, env)); @@ -870,14 +852,15 @@ // -------------------------------------------------------------------------- Environment VisitStmt(clang::Stmt* stmt, const Environment& env) { -#define VISIT(type) do { \ - clang::type* concrete_stmt = dyn_cast_or_null<clang::type>(stmt); \ - if (concrete_stmt != NULL) { \ - return Visit##type (concrete_stmt, env); \ - } \ - } while(0); +#define VISIT(type) \ + do { \ + clang::type* concrete_stmt = llvm::dyn_cast_or_null<clang::type>(stmt); \ + if (concrete_stmt != NULL) { \ + return Visit##type(concrete_stmt, env); \ + } \ + } while (0); - if (clang::Expr* expr = dyn_cast_or_null<clang::Expr>(stmt)) { + if (clang::Expr* expr = llvm::dyn_cast_or_null<clang::Expr>(stmt)) { return env.ApplyEffect(VisitExpr(expr, env)); } @@ -1078,11 +1061,12 @@ const clang::TagType* ToTagType(const clang::Type* t) { if (t == NULL) { return NULL; - } else if (isa<clang::TagType>(t)) { - return cast<clang::TagType>(t); - } else if (isa<clang::SubstTemplateTypeParmType>(t)) { - return ToTagType(cast<clang::SubstTemplateTypeParmType>(t)-> - getReplacementType().getTypePtr()); + } else if (llvm::isa<clang::TagType>(t)) { + return llvm::cast<clang::TagType>(t); + } else if (llvm::isa<clang::SubstTemplateTypeParmType>(t)) { + return ToTagType(llvm::cast<clang::SubstTemplateTypeParmType>(t) + ->getReplacementType() + .getTypePtr()); } else { return NULL; } @@ -1095,7 +1079,7 @@ bool IsRawPointerType(clang::QualType qtype) { const clang::PointerType* type = - dyn_cast_or_null<clang::PointerType>(qtype.getTypePtrOrNull()); + llvm::dyn_cast_or_null<clang::PointerType>(qtype.getTypePtrOrNull()); if (type == NULL) return false; const clang::TagType* pointee = @@ -1103,7 +1087,7 @@ if (pointee == NULL) return false; clang::CXXRecordDecl* record = - dyn_cast_or_null<clang::CXXRecordDecl>(pointee->getDecl()); + llvm::dyn_cast_or_null<clang::CXXRecordDecl>(pointee->getDecl()); if (record == NULL) return false; if (!InV8Namespace(record)) return false; @@ -1117,7 +1101,7 @@ } Environment VisitDecl(clang::Decl* decl, const Environment& env) { - if (clang::VarDecl* var = dyn_cast<clang::VarDecl>(decl)) { + if (clang::VarDecl* var = llvm::dyn_cast<clang::VarDecl>(decl)) { Environment out = var->hasInit() ? VisitStmt(var->getInit(), env) : env; if (IsRawPointerType(var->getType())) { @@ -1177,7 +1161,8 @@ private: void ReportUnsafe(const clang::Expr* expr, const std::string& msg) { d_.Report(clang::FullSourceLoc(expr->getExprLoc(), sm_), - d_.getCustomDiagID(clang::Diagnostic::Warning, msg)); + d_.getCustomDiagID(clang::DiagnosticsEngine::Warning, "%0")) + << msg; } @@ -1186,7 +1171,7 @@ clang::CXXRecordDecl* object_decl_; clang::CXXRecordDecl* smi_decl_; - clang::Diagnostic& d_; + clang::DiagnosticsEngine& d_; clang::SourceManager& sm_; Block* block_; @@ -1197,8 +1182,7 @@ class ProblemsFinder : public clang::ASTConsumer, public clang::RecursiveASTVisitor<ProblemsFinder> { public: - ProblemsFinder(clang::Diagnostic& d, - clang::SourceManager& sm, + ProblemsFinder(clang::DiagnosticsEngine& d, clang::SourceManager& sm, const std::vector<std::string>& args) : d_(d), sm_(sm), dead_vars_analysis_(false) { for (unsigned i = 0; i < args.size(); ++i) { @@ -1224,14 +1208,9 @@ if (smi_decl != NULL) smi_decl = smi_decl->getDefinition(); if (object_decl != NULL && smi_decl != NULL) { - function_analyzer_ = - new FunctionAnalyzer(clang::createItaniumMangleContext(ctx, d_), - r.ResolveName("Handle"), - object_decl, - smi_decl, - d_, - sm_, - dead_vars_analysis_); + function_analyzer_ = new FunctionAnalyzer( + clang::ItaniumMangleContext::create(ctx, d_), r.ResolveName("Handle"), + object_decl, smi_decl, d_, sm_, dead_vars_analysis_); TraverseDecl(ctx.getTranslationUnitDecl()); } else { if (object_decl == NULL) { @@ -1249,7 +1228,7 @@ } private: - clang::Diagnostic& d_; + clang::DiagnosticsEngine& d_; clang::SourceManager& sm_; bool dead_vars_analysis_; diff -Nru nodejs-0.11.13/deps/v8/tools/gcmole/gcmole.lua nodejs-0.11.15/deps/v8/tools/gcmole/gcmole.lua --- nodejs-0.11.13/deps/v8/tools/gcmole/gcmole.lua 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/gcmole/gcmole.lua 2015-01-20 21:22:17.000000000 +0000 @@ -93,18 +93,20 @@ local function MakeClangCommandLine(plugin, plugin_args, triple, arch_define) if plugin_args then for i = 1, #plugin_args do - plugin_args[i] = "-plugin-arg-" .. plugin .. " " .. plugin_args[i] + plugin_args[i] = "-Xclang -plugin-arg-" .. plugin + .. " -Xclang " .. plugin_args[i] end plugin_args = " " .. table.concat(plugin_args, " ") end - return CLANG_BIN .. "/clang -cc1 -load " .. CLANG_PLUGINS .. "/libgcmole.so" - .. " -plugin " .. plugin + return CLANG_BIN .. "/clang++ -std=c++11 -c " + .. " -Xclang -load -Xclang " .. CLANG_PLUGINS .. "/libgcmole.so" + .. " -Xclang -plugin -Xclang " .. plugin .. (plugin_args or "") - .. " -triple " .. triple + .. " -Xclang -triple -Xclang " .. triple .. " -D" .. arch_define .. " -DENABLE_DEBUGGER_SUPPORT" .. " -DV8_I18N_SUPPORT" - .. " -Isrc" + .. " -I./" .. " -Ithird_party/icu/source/common" .. " -Ithird_party/icu/source/i18n" end @@ -116,7 +118,7 @@ cfg.arch_define) for _, filename in ipairs(filenames) do log("-- %s", filename) - local action = cmd_line .. " src/" .. filename .. " 2>&1" + local action = cmd_line .. " " .. filename .. " 2>&1" if FLAGS.verbose then print('popen ', action) end local pipe = io.popen(action) func(filename, pipe:lines()) @@ -129,19 +131,26 @@ -- GYP file parsing local function ParseGYPFile() - local f = assert(io.open("tools/gyp/v8.gyp"), "failed to open GYP file") - local gyp = f:read('*a') - f:close() + local gyp = "" + local gyp_files = { "tools/gyp/v8.gyp", "test/cctest/cctest.gyp" } + for i = 1, #gyp_files do + local f = assert(io.open(gyp_files[i]), "failed to open GYP file") + local t = f:read('*a') + gyp = gyp .. t + f:close() + end local result = {} for condition, sources in gyp:gmatch "'sources': %[.-### gcmole%((.-)%) ###(.-)%]" do - local files = {} + if result[condition] == nil then result[condition] = {} end for file in sources:gmatch "'%.%./%.%./src/([^']-%.cc)'" do - table.insert(files, file) + table.insert(result[condition], "src/" .. file) + end + for file in sources:gmatch "'(test-[^']-%.cc)'" do + table.insert(result[condition], "test/cctest/" .. file) end - result[condition] = files end return result diff -Nru nodejs-0.11.13/deps/v8/tools/gcmole/Makefile nodejs-0.11.15/deps/v8/tools/gcmole/Makefile --- nodejs-0.11.13/deps/v8/tools/gcmole/Makefile 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/gcmole/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -31,13 +31,12 @@ CLANG_INCLUDE:=$(LLVM_SRC_ROOT)/tools/clang/include libgcmole.so: gcmole.cc - g++ -I$(LLVM_INCLUDE) -I$(CLANG_INCLUDE) -I. -D_DEBUG -D_GNU_SOURCE \ - -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -O3 \ - -fomit-frame-pointer -fno-exceptions -fno-rtti -fPIC \ - -Woverloaded-virtual -Wcast-qual -fno-strict-aliasing \ - -pedantic -Wno-long-long -Wall \ - -W -Wno-unused-parameter -Wwrite-strings \ - -shared -o libgcmole.so gcmole.cc + $(CXX) -I$(LLVM_INCLUDE) -I$(CLANG_INCLUDE) -I. -D_DEBUG \ + -D_GNU_SOURCE -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS \ + -D__STDC_LIMIT_MACROS -O3 -fomit-frame-pointer -fno-exceptions \ + -fno-rtti -fPIC -Woverloaded-virtual -Wcast-qual -fno-strict-aliasing \ + -pedantic -Wno-long-long -Wall -W -Wno-unused-parameter \ + -Wwrite-strings -std=c++0x -shared -o libgcmole.so gcmole.cc clean: - rm -f libgcmole.so + $(RM) libgcmole.so diff -Nru nodejs-0.11.13/deps/v8/tools/gdbinit nodejs-0.11.15/deps/v8/tools/gdbinit --- nodejs-0.11.13/deps/v8/tools/gdbinit 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/gdbinit 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Print HeapObjects. +define job +print ((v8::internal::HeapObject*)($arg0))->Print() +end +document job +Print a v8 JavaScript object +Usage: job tagged_ptr +end + +# Print Code objects containing given PC. +define jco +job (v8::internal::Isolate::Current()->FindCodeObject((v8::internal::Address)$arg0)) +end +document jco +Print a v8 Code object from an internal code address +Usage: jco pc +end + +# Print JavaScript stack trace. +define jst +print v8::internal::Isolate::Current()->PrintStack(stdout) +end +document jst +Print the current JavaScript stack trace +Usage: jst +end + +set disassembly-flavor intel +set disable-randomization off diff -Nru nodejs-0.11.13/deps/v8/tools/generate-runtime-tests.py nodejs-0.11.15/deps/v8/tools/generate-runtime-tests.py --- nodejs-0.11.13/deps/v8/tools/generate-runtime-tests.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/generate-runtime-tests.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1412 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import itertools +import js2c +import multiprocessing +import optparse +import os +import random +import re +import shutil +import signal +import string +import subprocess +import sys +import time + +FILENAME = "src/runtime.cc" +HEADERFILENAME = "src/runtime.h" +FUNCTION = re.compile("^RUNTIME_FUNCTION\(Runtime_(\w+)") +ARGSLENGTH = re.compile(".*DCHECK\(.*args\.length\(\) == (\d+)\);") +FUNCTIONEND = "}\n" +MACRO = re.compile(r"^#define ([^ ]+)\(([^)]*)\) *([^\\]*)\\?\n$") +FIRST_WORD = re.compile("^\s*(.*?)[\s({\[]") + +WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..")) +BASEPATH = os.path.join(WORKSPACE, "test", "mjsunit", "runtime-gen") +THIS_SCRIPT = os.path.relpath(sys.argv[0]) + +# Expand these macros, they define further runtime functions. +EXPAND_MACROS = [ + "BUFFER_VIEW_GETTER", + "DATA_VIEW_GETTER", + "DATA_VIEW_SETTER", + "RUNTIME_UNARY_MATH", +] +# TODO(jkummerow): We could also whitelist the following macros, but the +# functions they define are so trivial that it's unclear how much benefit +# that would provide: +# ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION +# FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION +# TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION + +# Counts of functions in each detection state. These are used to assert +# that the parser doesn't bit-rot. Change the values as needed when you add, +# remove or change runtime functions, but make sure we don't lose our ability +# to parse them! +EXPECTED_FUNCTION_COUNT = 429 +EXPECTED_FUZZABLE_COUNT = 332 +EXPECTED_CCTEST_COUNT = 7 +EXPECTED_UNKNOWN_COUNT = 16 +EXPECTED_BUILTINS_COUNT = 808 + + +# Don't call these at all. +BLACKLISTED = [ + "Abort", # Kills the process. + "AbortJS", # Kills the process. + "CompileForOnStackReplacement", # Riddled with DCHECK. + "IS_VAR", # Not implemented in the runtime. + "ListNatives", # Not available in Release mode. + "SetAllocationTimeout", # Too slow for fuzzing. + "SystemBreak", # Kills (int3) the process. + + # These are weird. They violate some invariants when called after + # bootstrapping. + "DisableAccessChecks", + "EnableAccessChecks", + + # The current LiveEdit implementation relies on and messes with internals + # in ways that makes it fundamentally unfuzzable :-( + "DebugGetLoadedScripts", + "DebugSetScriptSource", + "LiveEditFindSharedFunctionInfosForScript", + "LiveEditFunctionSourceUpdated", + "LiveEditGatherCompileInfo", + "LiveEditPatchFunctionPositions", + "LiveEditReplaceFunctionCode", + "LiveEditReplaceRefToNestedFunction", + "LiveEditReplaceScript", + "LiveEditRestartFrame", + "SetScriptBreakPoint", + + # TODO(jkummerow): Fix these and un-blacklist them! + "CreateDateTimeFormat", + "CreateNumberFormat", + + # TODO(danno): Fix these internal function that are only callable form stubs + # and un-blacklist them! + "NumberToString", + "RxegExpConstructResult", + "RegExpExec", + "StringAdd", + "SubString", + "StringCompare", + "StringCharCodeAt", + "GetFromCache", + + # Compilation + "CompileUnoptimized", + "CompileOptimized", + "TryInstallOptimizedCode", + "NotifyDeoptimized", + "NotifyStubFailure", + + # Utilities + "AllocateInNewSpace", + "AllocateInTargetSpace", + "AllocateHeapNumber", + "NumberToSmi", + "NumberToStringSkipCache", + + "NewSloppyArguments", + "NewStrictArguments", + + # Harmony + "CreateJSGeneratorObject", + "SuspendJSGeneratorObject", + "ResumeJSGeneratorObject", + "ThrowGeneratorStateError", + + # Arrays + "ArrayConstructor", + "InternalArrayConstructor", + "NormalizeElements", + + # Literals + "MaterializeRegExpLiteral", + "CreateObjectLiteral", + "CreateArrayLiteral", + "CreateArrayLiteralStubBailout", + + # Statements + "NewClosure", + "NewClosureFromStubFailure", + "NewObject", + "NewObjectWithAllocationSite", + "FinalizeInstanceSize", + "Throw", + "ReThrow", + "ThrowReferenceError", + "ThrowNotDateError", + "StackGuard", + "Interrupt", + "PromoteScheduledException", + + # Contexts + "NewGlobalContext", + "NewFunctionContext", + "PushWithContext", + "PushCatchContext", + "PushBlockContext", + "PushModuleContext", + "DeleteLookupSlot", + "LoadLookupSlot", + "LoadLookupSlotNoReferenceError", + "StoreLookupSlot", + + # Declarations + "DeclareGlobals", + "DeclareModules", + "DeclareContextSlot", + "InitializeConstGlobal", + "InitializeConstContextSlot", + + # Eval + "ResolvePossiblyDirectEval", + + # Maths + "MathPowSlow", + "MathPowRT" +] + + +# These will always throw. +THROWS = [ + "CheckExecutionState", # Needs to hit a break point. + "CheckIsBootstrapping", # Needs to be bootstrapping. + "DebugEvaluate", # Needs to hit a break point. + "DebugEvaluateGlobal", # Needs to hit a break point. + "DebugIndexedInterceptorElementValue", # Needs an indexed interceptor. + "DebugNamedInterceptorPropertyValue", # Needs a named interceptor. + "DebugSetScriptSource", # Checks compilation state of script. + "GetAllScopesDetails", # Needs to hit a break point. + "GetFrameCount", # Needs to hit a break point. + "GetFrameDetails", # Needs to hit a break point. + "GetRootNaN", # Needs to be bootstrapping. + "GetScopeCount", # Needs to hit a break point. + "GetScopeDetails", # Needs to hit a break point. + "GetStepInPositions", # Needs to hit a break point. + "GetTemplateField", # Needs a {Function,Object}TemplateInfo. + "GetThreadCount", # Needs to hit a break point. + "GetThreadDetails", # Needs to hit a break point. + "IsAccessAllowedForObserver", # Needs access-check-required object. + "UnblockConcurrentRecompilation" # Needs --block-concurrent-recompilation. +] + + +# Definitions used in CUSTOM_KNOWN_GOOD_INPUT below. +_BREAK_ITERATOR = ( + "%GetImplFromInitializedIntlObject(new Intl.v8BreakIterator())") +_COLLATOR = "%GetImplFromInitializedIntlObject(new Intl.Collator('en-US'))" +_DATETIME_FORMAT = ( + "%GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'))") +_NUMBER_FORMAT = ( + "%GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'))") + + +# Custom definitions for function input that does not throw. +# Format: "FunctionName": ["arg0", "arg1", ..., argslength]. +# None means "fall back to autodetected value". +CUSTOM_KNOWN_GOOD_INPUT = { + "AddNamedProperty": [None, "\"bla\"", None, None, None], + "AddPropertyForTemplate": [None, 10, None, None, None], + "Apply": ["function() {}", None, None, None, None, None], + "ArrayBufferSliceImpl": [None, None, 0, None], + "ArrayConcat": ["[1, 'a']", None], + "BreakIteratorAdoptText": [_BREAK_ITERATOR, None, None], + "BreakIteratorBreakType": [_BREAK_ITERATOR, None], + "BreakIteratorCurrent": [_BREAK_ITERATOR, None], + "BreakIteratorFirst": [_BREAK_ITERATOR, None], + "BreakIteratorNext": [_BREAK_ITERATOR, None], + "CompileString": [None, "false", None], + "CreateBreakIterator": ["'en-US'", "{type: 'string'}", None, None], + "CreateJSFunctionProxy": [None, "function() {}", None, None, None], + "CreatePrivateSymbol": ["\"foo\"", None], + "CreatePrivateOwnSymbol": ["\"foo\"", None], + "CreateSymbol": ["\"foo\"", None], + "DateParseString": [None, "new Array(8)", None], + "DefineAccessorPropertyUnchecked": [None, None, "function() {}", + "function() {}", 2, None], + "FunctionBindArguments": [None, None, "undefined", None, None], + "GetBreakLocations": [None, 0, None], + "GetDefaultReceiver": ["function() {}", None], + "GetImplFromInitializedIntlObject": ["new Intl.NumberFormat('en-US')", None], + "InternalCompare": [_COLLATOR, None, None, None], + "InternalDateFormat": [_DATETIME_FORMAT, None, None], + "InternalDateParse": [_DATETIME_FORMAT, None, None], + "InternalNumberFormat": [_NUMBER_FORMAT, None, None], + "InternalNumberParse": [_NUMBER_FORMAT, None, None], + "IsSloppyModeFunction": ["function() {}", None], + "LoadMutableDouble": ["{foo: 1.2}", None, None], + "NewObjectFromBound": ["(function() {}).bind({})", None], + "NumberToRadixString": [None, "2", None], + "ParseJson": ["\"{}\"", 1], + "RegExpExecMultiple": [None, None, "['a']", "['a']", None], + "DefineApiAccessorProperty": [None, None, "undefined", "undefined", None, None], + "SetIteratorInitialize": [None, None, "2", None], + "SetDebugEventListener": ["undefined", None, None], + "SetFunctionBreakPoint": [None, 218, None, None], + "StringBuilderConcat": ["[1, 2, 3]", 3, None, None], + "StringBuilderJoin": ["['a', 'b']", 4, None, None], + "StringMatch": [None, None, "['a', 'b']", None], + "StringNormalize": [None, 2, None], + "StringReplaceGlobalRegExpWithString": [None, None, None, "['a']", None], + "TypedArrayInitialize": [None, 6, "new ArrayBuffer(8)", None, 4, None], + "TypedArrayInitializeFromArrayLike": [None, 6, None, None, None], + "TypedArraySetFastCases": [None, None, "0", None], + "FunctionIsArrow": ["() => null", None], +} + + +# Types of arguments that cannot be generated in a JavaScript testcase. +NON_JS_TYPES = [ + "Code", "Context", "FixedArray", "FunctionTemplateInfo", + "JSFunctionResultCache", "JSMessageObject", "Map", "ScopeInfo", + "SharedFunctionInfo"] + + +class Generator(object): + + def RandomVariable(self, varname, vartype, simple): + if simple: + return self._Variable(varname, self.GENERATORS[vartype][0]) + return self.GENERATORS[vartype][1](self, varname, + self.DEFAULT_RECURSION_BUDGET) + + @staticmethod + def IsTypeSupported(typename): + return typename in Generator.GENERATORS + + USUAL_SUSPECT_PROPERTIES = ["size", "length", "byteLength", "__proto__", + "prototype", "0", "1", "-1"] + DEFAULT_RECURSION_BUDGET = 2 + PROXY_TRAPS = """{ + getOwnPropertyDescriptor: function(name) { + return {value: function() {}, configurable: true, writable: true, + enumerable: true}; + }, + getPropertyDescriptor: function(name) { + return {value: function() {}, configurable: true, writable: true, + enumerable: true}; + }, + getOwnPropertyNames: function() { return []; }, + getPropertyNames: function() { return []; }, + defineProperty: function(name, descriptor) {}, + delete: function(name) { return true; }, + fix: function() {} + }""" + + def _Variable(self, name, value, fallback=None): + args = { "name": name, "value": value, "fallback": fallback } + if fallback: + wrapper = "try { %%s } catch(e) { var %(name)s = %(fallback)s; }" % args + else: + wrapper = "%s" + return [wrapper % ("var %(name)s = %(value)s;" % args)] + + def _Boolean(self, name, recursion_budget): + return self._Variable(name, random.choice(["true", "false"])) + + def _Oddball(self, name, recursion_budget): + return self._Variable(name, + random.choice(["true", "false", "undefined", "null"])) + + def _StrictMode(self, name, recursion_budget): + return self._Variable(name, random.choice([0, 1])) + + def _Int32(self, name, recursion_budget=0): + die = random.random() + if die < 0.5: + value = random.choice([-3, -1, 0, 1, 2, 10, 515, 0x3fffffff, 0x7fffffff, + 0x40000000, -0x40000000, -0x80000000]) + elif die < 0.75: + value = random.randint(-1000, 1000) + else: + value = random.randint(-0x80000000, 0x7fffffff) + return self._Variable(name, value) + + def _Uint32(self, name, recursion_budget=0): + die = random.random() + if die < 0.5: + value = random.choice([0, 1, 2, 3, 4, 8, 0x3fffffff, 0x40000000, + 0x7fffffff, 0xffffffff]) + elif die < 0.75: + value = random.randint(0, 1000) + else: + value = random.randint(0, 0xffffffff) + return self._Variable(name, value) + + def _Smi(self, name, recursion_budget): + die = random.random() + if die < 0.5: + value = random.choice([-5, -1, 0, 1, 2, 3, 0x3fffffff, -0x40000000]) + elif die < 0.75: + value = random.randint(-1000, 1000) + else: + value = random.randint(-0x40000000, 0x3fffffff) + return self._Variable(name, value) + + def _Number(self, name, recursion_budget): + die = random.random() + if die < 0.5: + return self._Smi(name, recursion_budget) + elif die < 0.6: + value = random.choice(["Infinity", "-Infinity", "NaN", "-0", + "1.7976931348623157e+308", # Max value. + "2.2250738585072014e-308", # Min value. + "4.9406564584124654e-324"]) # Min subnormal. + else: + value = random.lognormvariate(0, 15) + return self._Variable(name, value) + + def _RawRandomString(self, minlength=0, maxlength=100, + alphabet=string.ascii_letters): + length = random.randint(minlength, maxlength) + result = "" + for i in xrange(length): + result += random.choice(alphabet) + return result + + def _SeqString(self, name, recursion_budget): + s1 = self._RawRandomString(1, 5) + s2 = self._RawRandomString(1, 5) + # 'foo' + 'bar' + return self._Variable(name, "\"%s\" + \"%s\"" % (s1, s2)) + + def _SeqTwoByteString(self, name): + s1 = self._RawRandomString(1, 5) + s2 = self._RawRandomString(1, 5) + # 'foo' + unicode + 'bar' + return self._Variable(name, "\"%s\" + \"\\2082\" + \"%s\"" % (s1, s2)) + + def _SlicedString(self, name): + s = self._RawRandomString(20, 30) + # 'ffoo12345678901234567890'.substr(1) + return self._Variable(name, "\"%s\".substr(1)" % s) + + def _ConsString(self, name): + s1 = self._RawRandomString(8, 15) + s2 = self._RawRandomString(8, 15) + # 'foo12345' + (function() { return 'bar12345';})() + return self._Variable(name, + "\"%s\" + (function() { return \"%s\";})()" % (s1, s2)) + + def _InternalizedString(self, name): + return self._Variable(name, "\"%s\"" % self._RawRandomString(0, 20)) + + def _String(self, name, recursion_budget): + die = random.random() + if die < 0.5: + string = random.choice(self.USUAL_SUSPECT_PROPERTIES) + return self._Variable(name, "\"%s\"" % string) + elif die < 0.6: + number_name = name + "_number" + result = self._Number(number_name, recursion_budget) + return result + self._Variable(name, "\"\" + %s" % number_name) + elif die < 0.7: + return self._SeqString(name, recursion_budget) + elif die < 0.8: + return self._ConsString(name) + elif die < 0.9: + return self._InternalizedString(name) + else: + return self._SlicedString(name) + + def _Symbol(self, name, recursion_budget): + raw_string_name = name + "_1" + result = self._String(raw_string_name, recursion_budget) + return result + self._Variable(name, "Symbol(%s)" % raw_string_name) + + def _Name(self, name, recursion_budget): + if random.random() < 0.2: + return self._Symbol(name, recursion_budget) + return self._String(name, recursion_budget) + + def _JSValue(self, name, recursion_budget): + die = random.random() + raw_name = name + "_1" + if die < 0.33: + result = self._String(raw_name, recursion_budget) + return result + self._Variable(name, "new String(%s)" % raw_name) + elif die < 0.66: + result = self._Boolean(raw_name, recursion_budget) + return result + self._Variable(name, "new Boolean(%s)" % raw_name) + else: + result = self._Number(raw_name, recursion_budget) + return result + self._Variable(name, "new Number(%s)" % raw_name) + + def _RawRandomPropertyName(self): + if random.random() < 0.5: + return random.choice(self.USUAL_SUSPECT_PROPERTIES) + return self._RawRandomString(0, 10) + + def _AddProperties(self, name, result, recursion_budget): + propcount = random.randint(0, 3) + propname = None + for i in range(propcount): + die = random.random() + if die < 0.5: + propname = "%s_prop%d" % (name, i) + result += self._Name(propname, recursion_budget - 1) + else: + propname = "\"%s\"" % self._RawRandomPropertyName() + propvalue_name = "%s_val%d" % (name, i) + result += self._Object(propvalue_name, recursion_budget - 1) + result.append("try { %s[%s] = %s; } catch (e) {}" % + (name, propname, propvalue_name)) + if random.random() < 0.2 and propname: + # Force the object to slow mode. + result.append("delete %s[%s];" % (name, propname)) + + def _RandomElementIndex(self, element_name, result): + if random.random() < 0.5: + return random.randint(-1000, 1000) + result += self._Smi(element_name, 0) + return element_name + + def _AddElements(self, name, result, recursion_budget): + elementcount = random.randint(0, 3) + for i in range(elementcount): + element_name = "%s_idx%d" % (name, i) + index = self._RandomElementIndex(element_name, result) + value_name = "%s_elt%d" % (name, i) + result += self._Object(value_name, recursion_budget - 1) + result.append("try { %s[%s] = %s; } catch(e) {}" % + (name, index, value_name)) + + def _AddAccessors(self, name, result, recursion_budget): + accessorcount = random.randint(0, 3) + for i in range(accessorcount): + propname = self._RawRandomPropertyName() + what = random.choice(["get", "set"]) + function_name = "%s_access%d" % (name, i) + result += self._PlainFunction(function_name, recursion_budget - 1) + result.append("try { Object.defineProperty(%s, \"%s\", {%s: %s}); } " + "catch (e) {}" % (name, propname, what, function_name)) + + def _PlainArray(self, name, recursion_budget): + die = random.random() + if die < 0.5: + literal = random.choice(["[]", "[1, 2]", "[1.5, 2.5]", + "['a', 'b', 1, true]"]) + return self._Variable(name, literal) + else: + new = random.choice(["", "new "]) + length = random.randint(0, 101000) + return self._Variable(name, "%sArray(%d)" % (new, length)) + + def _PlainObject(self, name, recursion_budget): + die = random.random() + if die < 0.67: + literal_propcount = random.randint(0, 3) + properties = [] + result = [] + for i in range(literal_propcount): + propname = self._RawRandomPropertyName() + propvalue_name = "%s_lit%d" % (name, i) + result += self._Object(propvalue_name, recursion_budget - 1) + properties.append("\"%s\": %s" % (propname, propvalue_name)) + return result + self._Variable(name, "{%s}" % ", ".join(properties)) + else: + return self._Variable(name, "new Object()") + + def _JSArray(self, name, recursion_budget): + result = self._PlainArray(name, recursion_budget) + self._AddAccessors(name, result, recursion_budget) + self._AddProperties(name, result, recursion_budget) + self._AddElements(name, result, recursion_budget) + return result + + def _RawRandomBufferLength(self): + if random.random() < 0.2: + return random.choice([0, 1, 8, 0x40000000, 0x80000000]) + return random.randint(0, 1000) + + def _JSArrayBuffer(self, name, recursion_budget): + length = self._RawRandomBufferLength() + return self._Variable(name, "new ArrayBuffer(%d)" % length) + + def _JSDataView(self, name, recursion_budget): + buffer_name = name + "_buffer" + result = self._JSArrayBuffer(buffer_name, recursion_budget) + args = [buffer_name] + die = random.random() + if die < 0.67: + offset = self._RawRandomBufferLength() + args.append("%d" % offset) + if die < 0.33: + length = self._RawRandomBufferLength() + args.append("%d" % length) + result += self._Variable(name, "new DataView(%s)" % ", ".join(args), + fallback="new DataView(new ArrayBuffer(8))") + return result + + def _JSDate(self, name, recursion_budget): + die = random.random() + if die < 0.25: + return self._Variable(name, "new Date()") + elif die < 0.5: + ms_name = name + "_ms" + result = self._Number(ms_name, recursion_budget) + return result + self._Variable(name, "new Date(%s)" % ms_name) + elif die < 0.75: + str_name = name + "_str" + month = random.choice(["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", + "Aug", "Sep", "Oct", "Nov", "Dec"]) + day = random.randint(1, 28) + year = random.randint(1900, 2100) + hour = random.randint(0, 23) + minute = random.randint(0, 59) + second = random.randint(0, 59) + str_value = ("\"%s %s, %s %s:%s:%s\"" % + (month, day, year, hour, minute, second)) + result = self._Variable(str_name, str_value) + return result + self._Variable(name, "new Date(%s)" % str_name) + else: + components = tuple(map(lambda x: "%s_%s" % (name, x), + ["y", "m", "d", "h", "min", "s", "ms"])) + return ([j for i in map(self._Int32, components) for j in i] + + self._Variable(name, "new Date(%s)" % ", ".join(components))) + + def _PlainFunction(self, name, recursion_budget): + result_name = "result" + body = ["function() {"] + body += self._Object(result_name, recursion_budget - 1) + body.append("return result;\n}") + return self._Variable(name, "%s" % "\n".join(body)) + + def _JSFunction(self, name, recursion_budget): + result = self._PlainFunction(name, recursion_budget) + self._AddAccessors(name, result, recursion_budget) + self._AddProperties(name, result, recursion_budget) + self._AddElements(name, result, recursion_budget) + return result + + def _JSFunctionProxy(self, name, recursion_budget): + # TODO(jkummerow): Revisit this as the Proxy implementation evolves. + return self._Variable(name, "Proxy.createFunction(%s, function() {})" % + self.PROXY_TRAPS) + + def _JSGeneratorObject(self, name, recursion_budget): + # TODO(jkummerow): Be more creative here? + return self._Variable(name, "(function*() { yield 1; })()") + + def _JSMap(self, name, recursion_budget, weak=""): + result = self._Variable(name, "new %sMap()" % weak) + num_entries = random.randint(0, 3) + for i in range(num_entries): + key_name = "%s_k%d" % (name, i) + value_name = "%s_v%d" % (name, i) + if weak: + result += self._JSObject(key_name, recursion_budget - 1) + else: + result += self._Object(key_name, recursion_budget - 1) + result += self._Object(value_name, recursion_budget - 1) + result.append("%s.set(%s, %s)" % (name, key_name, value_name)) + return result + + def _JSMapIterator(self, name, recursion_budget): + map_name = name + "_map" + result = self._JSMap(map_name, recursion_budget) + iterator_type = random.choice(['keys', 'values', 'entries']) + return (result + self._Variable(name, "%s.%s()" % + (map_name, iterator_type))) + + def _JSProxy(self, name, recursion_budget): + # TODO(jkummerow): Revisit this as the Proxy implementation evolves. + return self._Variable(name, "Proxy.create(%s)" % self.PROXY_TRAPS) + + def _JSRegExp(self, name, recursion_budget): + flags = random.choice(["", "g", "i", "m", "gi"]) + string = "a(b|c)*a" # TODO(jkummerow): Be more creative here? + ctor = random.choice(["/%s/%s", "new RegExp(\"%s\", \"%s\")"]) + return self._Variable(name, ctor % (string, flags)) + + def _JSSet(self, name, recursion_budget, weak=""): + result = self._Variable(name, "new %sSet()" % weak) + num_entries = random.randint(0, 3) + for i in range(num_entries): + element_name = "%s_e%d" % (name, i) + if weak: + result += self._JSObject(element_name, recursion_budget - 1) + else: + result += self._Object(element_name, recursion_budget - 1) + result.append("%s.add(%s)" % (name, element_name)) + return result + + def _JSSetIterator(self, name, recursion_budget): + set_name = name + "_set" + result = self._JSSet(set_name, recursion_budget) + iterator_type = random.choice(['values', 'entries']) + return (result + self._Variable(name, "%s.%s()" % + (set_name, iterator_type))) + + def _JSTypedArray(self, name, recursion_budget): + arraytype = random.choice(["Int8", "Int16", "Int32", "Uint8", "Uint16", + "Uint32", "Float32", "Float64", "Uint8Clamped"]) + ctor_type = random.randint(0, 3) + if ctor_type == 0: + length = random.randint(0, 1000) + return self._Variable(name, "new %sArray(%d)" % (arraytype, length), + fallback="new %sArray(8)" % arraytype) + elif ctor_type == 1: + input_name = name + "_typedarray" + result = self._JSTypedArray(input_name, recursion_budget - 1) + return (result + + self._Variable(name, "new %sArray(%s)" % (arraytype, input_name), + fallback="new %sArray(8)" % arraytype)) + elif ctor_type == 2: + arraylike_name = name + "_arraylike" + result = self._JSObject(arraylike_name, recursion_budget - 1) + length = random.randint(0, 1000) + result.append("try { %s.length = %d; } catch(e) {}" % + (arraylike_name, length)) + return (result + + self._Variable(name, + "new %sArray(%s)" % (arraytype, arraylike_name), + fallback="new %sArray(8)" % arraytype)) + else: + die = random.random() + buffer_name = name + "_buffer" + args = [buffer_name] + result = self._JSArrayBuffer(buffer_name, recursion_budget) + if die < 0.67: + offset_name = name + "_offset" + args.append(offset_name) + result += self._Int32(offset_name) + if die < 0.33: + length_name = name + "_length" + args.append(length_name) + result += self._Int32(length_name) + return (result + + self._Variable(name, + "new %sArray(%s)" % (arraytype, ", ".join(args)), + fallback="new %sArray(8)" % arraytype)) + + def _JSArrayBufferView(self, name, recursion_budget): + if random.random() < 0.4: + return self._JSDataView(name, recursion_budget) + else: + return self._JSTypedArray(name, recursion_budget) + + def _JSWeakCollection(self, name, recursion_budget): + ctor = random.choice([self._JSMap, self._JSSet]) + return ctor(name, recursion_budget, weak="Weak") + + def _PropertyDetails(self, name, recursion_budget): + # TODO(jkummerow): Be more clever here? + return self._Int32(name) + + def _JSObject(self, name, recursion_budget): + die = random.random() + if die < 0.4: + function = random.choice([self._PlainObject, self._PlainArray, + self._PlainFunction]) + elif die < 0.5: + return self._Variable(name, "this") # Global object. + else: + function = random.choice([self._JSArrayBuffer, self._JSDataView, + self._JSDate, self._JSFunctionProxy, + self._JSGeneratorObject, self._JSMap, + self._JSMapIterator, self._JSRegExp, + self._JSSet, self._JSSetIterator, + self._JSTypedArray, self._JSValue, + self._JSWeakCollection]) + result = function(name, recursion_budget) + self._AddAccessors(name, result, recursion_budget) + self._AddProperties(name, result, recursion_budget) + self._AddElements(name, result, recursion_budget) + return result + + def _JSReceiver(self, name, recursion_budget): + if random.random() < 0.9: return self._JSObject(name, recursion_budget) + return self._JSProxy(name, recursion_budget) + + def _HeapObject(self, name, recursion_budget): + die = random.random() + if die < 0.9: return self._JSReceiver(name, recursion_budget) + elif die < 0.95: return self._Oddball(name, recursion_budget) + else: return self._Name(name, recursion_budget) + + def _Object(self, name, recursion_budget): + if recursion_budget <= 0: + function = random.choice([self._Oddball, self._Number, self._Name, + self._JSValue, self._JSRegExp]) + return function(name, recursion_budget) + if random.random() < 0.2: + return self._Smi(name, recursion_budget) + return self._HeapObject(name, recursion_budget) + + GENERATORS = { + "Boolean": ["true", _Boolean], + "HeapObject": ["new Object()", _HeapObject], + "Int32": ["32", _Int32], + "JSArray": ["new Array()", _JSArray], + "JSArrayBuffer": ["new ArrayBuffer(8)", _JSArrayBuffer], + "JSArrayBufferView": ["new Int32Array(2)", _JSArrayBufferView], + "JSDataView": ["new DataView(new ArrayBuffer(24))", _JSDataView], + "JSDate": ["new Date()", _JSDate], + "JSFunction": ["function() {}", _JSFunction], + "JSFunctionProxy": ["Proxy.createFunction({}, function() {})", + _JSFunctionProxy], + "JSGeneratorObject": ["(function*(){ yield 1; })()", _JSGeneratorObject], + "JSMap": ["new Map()", _JSMap], + "JSMapIterator": ["new Map().entries()", _JSMapIterator], + "JSObject": ["new Object()", _JSObject], + "JSProxy": ["Proxy.create({})", _JSProxy], + "JSReceiver": ["new Object()", _JSReceiver], + "JSRegExp": ["/ab/g", _JSRegExp], + "JSSet": ["new Set()", _JSSet], + "JSSetIterator": ["new Set().values()", _JSSetIterator], + "JSTypedArray": ["new Int32Array(2)", _JSTypedArray], + "JSValue": ["new String('foo')", _JSValue], + "JSWeakCollection": ["new WeakMap()", _JSWeakCollection], + "Name": ["\"name\"", _Name], + "Number": ["1.5", _Number], + "Object": ["new Object()", _Object], + "PropertyDetails": ["513", _PropertyDetails], + "SeqOneByteString": ["\"seq 1-byte\"", _SeqString], + "SeqString": ["\"seqstring\"", _SeqString], + "SeqTwoByteString": ["\"seq \\u2082-byte\"", _SeqTwoByteString], + "Smi": ["1", _Smi], + "StrictMode": ["1", _StrictMode], + "String": ["\"foo\"", _String], + "Symbol": ["Symbol(\"symbol\")", _Symbol], + "Uint32": ["32", _Uint32], + } + + +class ArgParser(object): + def __init__(self, regex, ctor): + self.regex = regex + self.ArgCtor = ctor + + +class Arg(object): + def __init__(self, typename, varname, index): + self.type = typename + self.name = "_%s" % varname + self.index = index + + +class Function(object): + def __init__(self, match): + self.name = match.group(1) + self.argslength = -1 + self.args = {} + self.inline = "" + + handle_arg_parser = ArgParser( + re.compile("^\s*CONVERT_ARG_HANDLE_CHECKED\((\w+), (\w+), (\d+)\)"), + lambda match: Arg(match.group(1), match.group(2), int(match.group(3)))) + + plain_arg_parser = ArgParser( + re.compile("^\s*CONVERT_ARG_CHECKED\((\w+), (\w+), (\d+)\)"), + lambda match: Arg(match.group(1), match.group(2), int(match.group(3)))) + + number_handle_arg_parser = ArgParser( + re.compile("^\s*CONVERT_NUMBER_ARG_HANDLE_CHECKED\((\w+), (\d+)\)"), + lambda match: Arg("Number", match.group(1), int(match.group(2)))) + + smi_arg_parser = ArgParser( + re.compile("^\s*CONVERT_SMI_ARG_CHECKED\((\w+), (\d+)\)"), + lambda match: Arg("Smi", match.group(1), int(match.group(2)))) + + double_arg_parser = ArgParser( + re.compile("^\s*CONVERT_DOUBLE_ARG_CHECKED\((\w+), (\d+)\)"), + lambda match: Arg("Number", match.group(1), int(match.group(2)))) + + number_arg_parser = ArgParser( + re.compile( + "^\s*CONVERT_NUMBER_CHECKED\(\w+, (\w+), (\w+), args\[(\d+)\]\)"), + lambda match: Arg(match.group(2), match.group(1), int(match.group(3)))) + + strict_mode_arg_parser = ArgParser( + re.compile("^\s*CONVERT_STRICT_MODE_ARG_CHECKED\((\w+), (\d+)\)"), + lambda match: Arg("StrictMode", match.group(1), int(match.group(2)))) + + boolean_arg_parser = ArgParser( + re.compile("^\s*CONVERT_BOOLEAN_ARG_CHECKED\((\w+), (\d+)\)"), + lambda match: Arg("Boolean", match.group(1), int(match.group(2)))) + + property_details_parser = ArgParser( + re.compile("^\s*CONVERT_PROPERTY_DETAILS_CHECKED\((\w+), (\d+)\)"), + lambda match: Arg("PropertyDetails", match.group(1), int(match.group(2)))) + + arg_parsers = [handle_arg_parser, plain_arg_parser, number_handle_arg_parser, + smi_arg_parser, + double_arg_parser, number_arg_parser, strict_mode_arg_parser, + boolean_arg_parser, property_details_parser] + + def SetArgsLength(self, match): + self.argslength = int(match.group(1)) + + def TryParseArg(self, line): + for parser in Function.arg_parsers: + match = parser.regex.match(line) + if match: + arg = parser.ArgCtor(match) + self.args[arg.index] = arg + return True + return False + + def Filename(self): + return "%s.js" % self.name.lower() + + def __str__(self): + s = [self.name, "("] + argcount = self.argslength + if argcount < 0: + print("WARNING: unknown argslength for function %s" % self.name) + if self.args: + argcount = max([self.args[i].index + 1 for i in self.args]) + else: + argcount = 0 + for i in range(argcount): + if i > 0: s.append(", ") + s.append(self.args[i].type if i in self.args else "<unknown>") + s.append(")") + return "".join(s) + + +class Macro(object): + def __init__(self, match): + self.name = match.group(1) + self.args = [s.strip() for s in match.group(2).split(",")] + self.lines = [] + self.indentation = 0 + self.AddLine(match.group(3)) + + def AddLine(self, line): + if not line: return + if not self.lines: + # This is the first line, detect indentation. + self.indentation = len(line) - len(line.lstrip()) + line = line.rstrip("\\\n ") + if not line: return + assert len(line[:self.indentation].strip()) == 0, \ + ("expected whitespace: '%s', full line: '%s'" % + (line[:self.indentation], line)) + line = line[self.indentation:] + if not line: return + self.lines.append(line + "\n") + + def Finalize(self): + for arg in self.args: + pattern = re.compile(r"(##|\b)%s(##|\b)" % arg) + for i in range(len(self.lines)): + self.lines[i] = re.sub(pattern, "%%(%s)s" % arg, self.lines[i]) + + def FillIn(self, arg_values): + filler = {} + assert len(arg_values) == len(self.args) + for i in range(len(self.args)): + filler[self.args[i]] = arg_values[i] + result = [] + for line in self.lines: + result.append(line % filler) + return result + + +# Parses HEADERFILENAME to find out which runtime functions are "inline". +def FindInlineRuntimeFunctions(): + inline_functions = [] + with open(HEADERFILENAME, "r") as f: + inline_list = "#define INLINE_FUNCTION_LIST(F) \\\n" + inline_function = re.compile(r"^\s*F\((\w+), \d+, \d+\)\s*\\?") + mode = "SEARCHING" + for line in f: + if mode == "ACTIVE": + match = inline_function.match(line) + if match: + inline_functions.append(match.group(1)) + if not line.endswith("\\\n"): + mode = "SEARCHING" + elif mode == "SEARCHING": + if line == inline_list: + mode = "ACTIVE" + return inline_functions + + +def ReadFileAndExpandMacros(filename): + found_macros = {} + expanded_lines = [] + with open(filename, "r") as f: + found_macro = None + for line in f: + if found_macro is not None: + found_macro.AddLine(line) + if not line.endswith("\\\n"): + found_macro.Finalize() + found_macro = None + continue + + match = MACRO.match(line) + if match: + found_macro = Macro(match) + if found_macro.name in EXPAND_MACROS: + found_macros[found_macro.name] = found_macro + else: + found_macro = None + continue + + match = FIRST_WORD.match(line) + if match: + first_word = match.group(1) + if first_word in found_macros: + MACRO_CALL = re.compile("%s\(([^)]*)\)" % first_word) + match = MACRO_CALL.match(line) + assert match + args = [s.strip() for s in match.group(1).split(",")] + expanded_lines += found_macros[first_word].FillIn(args) + continue + + expanded_lines.append(line) + return expanded_lines + + +# Detects runtime functions by parsing FILENAME. +def FindRuntimeFunctions(): + inline_functions = FindInlineRuntimeFunctions() + functions = [] + expanded_lines = ReadFileAndExpandMacros(FILENAME) + function = None + partial_line = "" + for line in expanded_lines: + # Multi-line definition support, ignoring macros. + if line.startswith("RUNTIME_FUNCTION") and not line.endswith("{\n"): + if line.endswith("\\\n"): continue + partial_line = line.rstrip() + continue + if partial_line: + partial_line += " " + line.strip() + if partial_line.endswith("{"): + line = partial_line + partial_line = "" + else: + continue + + match = FUNCTION.match(line) + if match: + function = Function(match) + if function.name in inline_functions: + function.inline = "_" + continue + if function is None: continue + + match = ARGSLENGTH.match(line) + if match: + function.SetArgsLength(match) + continue + + if function.TryParseArg(line): + continue + + if line == FUNCTIONEND: + if function is not None: + functions.append(function) + function = None + return functions + + +# Hack: This must have the same fields as class Function above, because the +# two are used polymorphically in RunFuzzer(). We could use inheritance... +class Builtin(object): + def __init__(self, match): + self.name = match.group(1) + args = match.group(2) + self.argslength = 0 if args == "" else args.count(",") + 1 + self.inline = "" + self.args = {} + if self.argslength > 0: + args = args.split(",") + for i in range(len(args)): + # a = args[i].strip() # TODO: filter out /* comments */ first. + a = "" + self.args[i] = Arg("Object", a, i) + + def __str__(self): + return "%s(%d)" % (self.name, self.argslength) + + +def FindJSBuiltins(): + PATH = "src" + fileslist = [] + for (root, dirs, files) in os.walk(PATH): + for f in files: + if f.endswith(".js"): + fileslist.append(os.path.join(root, f)) + builtins = [] + regexp = re.compile("^function (\w+)\s*\((.*?)\) {") + matches = 0 + for filename in fileslist: + with open(filename, "r") as f: + file_contents = f.read() + file_contents = js2c.ExpandInlineMacros(file_contents) + lines = file_contents.split("\n") + partial_line = "" + for line in lines: + if line.startswith("function") and not '{' in line: + partial_line += line.rstrip() + continue + if partial_line: + partial_line += " " + line.strip() + if '{' in line: + line = partial_line + partial_line = "" + else: + continue + match = regexp.match(line) + if match: + builtins.append(Builtin(match)) + return builtins + + +# Classifies runtime functions. +def ClassifyFunctions(functions): + # Can be fuzzed with a JavaScript testcase. + js_fuzzable_functions = [] + # We have enough information to fuzz these, but they need inputs that + # cannot be created or passed around in JavaScript. + cctest_fuzzable_functions = [] + # This script does not have enough information about these. + unknown_functions = [] + + types = {} + for f in functions: + if f.name in BLACKLISTED: + continue + decision = js_fuzzable_functions + custom = CUSTOM_KNOWN_GOOD_INPUT.get(f.name, None) + if f.argslength < 0: + # Unknown length -> give up unless there's a custom definition. + if custom and custom[-1] is not None: + f.argslength = custom[-1] + assert len(custom) == f.argslength + 1, \ + ("%s: last custom definition must be argslength" % f.name) + else: + decision = unknown_functions + else: + if custom: + # Any custom definitions must match the known argslength. + assert len(custom) == f.argslength + 1, \ + ("%s should have %d custom definitions but has %d" % + (f.name, f.argslength + 1, len(custom))) + for i in range(f.argslength): + if custom and custom[i] is not None: + # All good, there's a custom definition. + pass + elif not i in f.args: + # No custom definition and no parse result -> give up. + decision = unknown_functions + else: + t = f.args[i].type + if t in NON_JS_TYPES: + decision = cctest_fuzzable_functions + else: + assert Generator.IsTypeSupported(t), \ + ("type generator not found for %s, function: %s" % (t, f)) + decision.append(f) + return (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) + + +def _GetKnownGoodArgs(function, generator): + custom_input = CUSTOM_KNOWN_GOOD_INPUT.get(function.name, None) + definitions = [] + argslist = [] + for i in range(function.argslength): + if custom_input and custom_input[i] is not None: + name = "arg%d" % i + definitions.append("var %s = %s;" % (name, custom_input[i])) + else: + arg = function.args[i] + name = arg.name + definitions += generator.RandomVariable(name, arg.type, simple=True) + argslist.append(name) + return (definitions, argslist) + + +def _GenerateTestcase(function, definitions, argslist, throws): + s = ["// Copyright 2014 the V8 project authors. All rights reserved.", + "// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY", + "// Flags: --allow-natives-syntax --harmony --harmony-proxies" + ] + definitions + call = "%%%s%s(%s);" % (function.inline, function.name, ", ".join(argslist)) + if throws: + s.append("try {") + s.append(call); + s.append("} catch(e) {}") + else: + s.append(call) + testcase = "\n".join(s) + return testcase + + +def GenerateJSTestcaseForFunction(function): + gen = Generator() + (definitions, argslist) = _GetKnownGoodArgs(function, gen) + testcase = _GenerateTestcase(function, definitions, argslist, + function.name in THROWS) + path = os.path.join(BASEPATH, function.Filename()) + with open(path, "w") as f: + f.write("%s\n" % testcase) + + +def GenerateTestcases(functions): + shutil.rmtree(BASEPATH) # Re-generate everything. + os.makedirs(BASEPATH) + for f in functions: + GenerateJSTestcaseForFunction(f) + + +def _SaveFileName(save_path, process_id, save_file_index): + return "%s/fuzz_%d_%d.js" % (save_path, process_id, save_file_index) + + +def _GetFuzzableRuntimeFunctions(): + functions = FindRuntimeFunctions() + (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \ + ClassifyFunctions(functions) + return js_fuzzable_functions + + +FUZZ_TARGET_LISTS = { + "runtime": _GetFuzzableRuntimeFunctions, + "builtins": FindJSBuiltins, +} + + +def RunFuzzer(process_id, options, stop_running): + MAX_SLEEP_TIME = 0.1 + INITIAL_SLEEP_TIME = 0.001 + SLEEP_TIME_FACTOR = 1.25 + base_file_name = "/dev/shm/runtime_fuzz_%d" % process_id + test_file_name = "%s.js" % base_file_name + stderr_file_name = "%s.out" % base_file_name + save_file_index = 0 + while os.path.exists(_SaveFileName(options.save_path, process_id, + save_file_index)): + save_file_index += 1 + + targets = FUZZ_TARGET_LISTS[options.fuzz_target]() + try: + for i in range(options.num_tests): + if stop_running.is_set(): break + function = None + while function is None or function.argslength == 0: + function = random.choice(targets) + args = [] + definitions = [] + gen = Generator() + for i in range(function.argslength): + arg = function.args[i] + argname = "arg%d%s" % (i, arg.name) + args.append(argname) + definitions += gen.RandomVariable(argname, arg.type, simple=False) + testcase = _GenerateTestcase(function, definitions, args, True) + with open(test_file_name, "w") as f: + f.write("%s\n" % testcase) + with open("/dev/null", "w") as devnull: + with open(stderr_file_name, "w") as stderr: + process = subprocess.Popen( + [options.binary, "--allow-natives-syntax", "--harmony", + "--harmony-proxies", "--enable-slow-asserts", test_file_name], + stdout=devnull, stderr=stderr) + end_time = time.time() + options.timeout + timed_out = False + exit_code = None + sleep_time = INITIAL_SLEEP_TIME + while exit_code is None: + if time.time() >= end_time: + # Kill the process and wait for it to exit. + os.kill(process.pid, signal.SIGTERM) + exit_code = process.wait() + timed_out = True + else: + exit_code = process.poll() + time.sleep(sleep_time) + sleep_time = sleep_time * SLEEP_TIME_FACTOR + if sleep_time > MAX_SLEEP_TIME: + sleep_time = MAX_SLEEP_TIME + if exit_code != 0 and not timed_out: + oom = False + with open(stderr_file_name, "r") as stderr: + for line in stderr: + if line.strip() == "# Allocation failed - process out of memory": + oom = True + break + if oom: continue + save_name = _SaveFileName(options.save_path, process_id, + save_file_index) + shutil.copyfile(test_file_name, save_name) + save_file_index += 1 + except KeyboardInterrupt: + stop_running.set() + finally: + if os.path.exists(test_file_name): + os.remove(test_file_name) + if os.path.exists(stderr_file_name): + os.remove(stderr_file_name) + + +def BuildOptionParser(): + usage = """Usage: %%prog [options] ACTION + +where ACTION can be: + +info Print diagnostic info. +check Check that runtime functions can be parsed as expected, and that + test cases exist. +generate Parse source code for runtime functions, and auto-generate + test cases for them. Warning: this will nuke and re-create + %(path)s. +fuzz Generate fuzz tests, run them, save those that crashed (see options). +""" % {"path": os.path.relpath(BASEPATH)} + + o = optparse.OptionParser(usage=usage) + o.add_option("--binary", default="out/x64.debug/d8", + help="d8 binary used for running fuzz tests (default: %default)") + o.add_option("--fuzz-target", default="runtime", + help="Set of functions targeted by fuzzing. Allowed values: " + "%s (default: %%default)" % ", ".join(FUZZ_TARGET_LISTS)) + o.add_option("-n", "--num-tests", default=1000, type="int", + help="Number of fuzz tests to generate per worker process" + " (default: %default)") + o.add_option("--save-path", default="~/runtime_fuzz_output", + help="Path to directory where failing tests will be stored" + " (default: %default)") + o.add_option("--timeout", default=20, type="int", + help="Timeout for each fuzz test (in seconds, default:" + "%default)") + return o + + +def ProcessOptions(options, args): + options.save_path = os.path.expanduser(options.save_path) + if options.fuzz_target not in FUZZ_TARGET_LISTS: + print("Invalid fuzz target: %s" % options.fuzz_target) + return False + if len(args) != 1 or args[0] == "help": + return False + return True + + +def Main(): + parser = BuildOptionParser() + (options, args) = parser.parse_args() + + if not ProcessOptions(options, args): + parser.print_help() + return 1 + action = args[0] + + functions = FindRuntimeFunctions() + (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \ + ClassifyFunctions(functions) + builtins = FindJSBuiltins() + + if action == "test": + print("put your temporary debugging code here") + return 0 + + if action == "info": + print("%d functions total; js_fuzzable_functions: %d, " + "cctest_fuzzable_functions: %d, unknown_functions: %d" + % (len(functions), len(js_fuzzable_functions), + len(cctest_fuzzable_functions), len(unknown_functions))) + print("%d JavaScript builtins" % len(builtins)) + print("unknown functions:") + for f in unknown_functions: + print(f) + return 0 + + if action == "check": + errors = 0 + + def CheckCount(actual, expected, description): + if len(actual) != expected: + print("Expected to detect %d %s, but found %d." % ( + expected, description, len(actual))) + print("If this change is intentional, please update the expectations" + " at the top of %s." % THIS_SCRIPT) + return 1 + return 0 + + errors += CheckCount(functions, EXPECTED_FUNCTION_COUNT, + "functions in total") + errors += CheckCount(js_fuzzable_functions, EXPECTED_FUZZABLE_COUNT, + "JavaScript-fuzzable functions") + errors += CheckCount(cctest_fuzzable_functions, EXPECTED_CCTEST_COUNT, + "cctest-fuzzable functions") + errors += CheckCount(unknown_functions, EXPECTED_UNKNOWN_COUNT, + "functions with incomplete type information") + errors += CheckCount(builtins, EXPECTED_BUILTINS_COUNT, + "JavaScript builtins") + + def CheckTestcasesExisting(functions): + errors = 0 + for f in functions: + if not os.path.isfile(os.path.join(BASEPATH, f.Filename())): + print("Missing testcase for %s, please run '%s generate'" % + (f.name, THIS_SCRIPT)) + errors += 1 + files = filter(lambda filename: not filename.startswith("."), + os.listdir(BASEPATH)) + if (len(files) != len(functions)): + unexpected_files = set(files) - set([f.Filename() for f in functions]) + for f in unexpected_files: + print("Unexpected testcase: %s" % os.path.join(BASEPATH, f)) + errors += 1 + print("Run '%s generate' to automatically clean these up." + % THIS_SCRIPT) + return errors + + errors += CheckTestcasesExisting(js_fuzzable_functions) + + def CheckNameClashes(runtime_functions, builtins): + errors = 0 + runtime_map = {} + for f in runtime_functions: + runtime_map[f.name] = 1 + for b in builtins: + if b.name in runtime_map: + print("Builtin/Runtime_Function name clash: %s" % b.name) + errors += 1 + return errors + + errors += CheckNameClashes(functions, builtins) + + if errors > 0: + return 1 + print("Generated runtime tests: all good.") + return 0 + + if action == "generate": + GenerateTestcases(js_fuzzable_functions) + return 0 + + if action == "fuzz": + processes = [] + if not os.path.isdir(options.save_path): + os.makedirs(options.save_path) + stop_running = multiprocessing.Event() + for i in range(multiprocessing.cpu_count()): + args = (i, options, stop_running) + p = multiprocessing.Process(target=RunFuzzer, args=args) + p.start() + processes.append(p) + try: + for i in range(len(processes)): + processes[i].join() + except KeyboardInterrupt: + stop_running.set() + for i in range(len(processes)): + processes[i].join() + return 0 + +if __name__ == "__main__": + sys.exit(Main()) diff -Nru nodejs-0.11.13/deps/v8/tools/generate-trig-table.py nodejs-0.11.15/deps/v8/tools/generate-trig-table.py --- nodejs-0.11.13/deps/v8/tools/generate-trig-table.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/generate-trig-table.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2013 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# This is a utility for populating the lookup table for the -# approximation of trigonometric functions. - -import sys, math - -SAMPLES = 1800 - -TEMPLATE = """\ -// Copyright 2013 Google Inc. All Rights Reserved. - -// This file was generated from a python script. - -#include "v8.h" -#include "trig-table.h" - -namespace v8 { -namespace internal { - - const double TrigonometricLookupTable::kSinTable[] = - { %(sine_table)s }; - const double TrigonometricLookupTable::kCosXIntervalTable[] = - { %(cosine_table)s }; - const int TrigonometricLookupTable::kSamples = %(samples)i; - const int TrigonometricLookupTable::kTableSize = %(table_size)i; - const double TrigonometricLookupTable::kSamplesOverPiHalf = - %(samples_over_pi_half)s; - -} } // v8::internal -""" - -def main(): - pi_half = math.pi / 2 - interval = pi_half / SAMPLES - sin = [] - cos_times_interval = [] - table_size = SAMPLES + 2 - - for i in range(0, table_size): - sample = i * interval - sin.append(repr(math.sin(sample))) - cos_times_interval.append(repr(math.cos(sample) * interval)) - - output_file = sys.argv[1] - output = open(str(output_file), "w") - output.write(TEMPLATE % { - 'sine_table': ','.join(sin), - 'cosine_table': ','.join(cos_times_interval), - 'samples': SAMPLES, - 'table_size': table_size, - 'samples_over_pi_half': repr(SAMPLES / pi_half) - }) - -if __name__ == "__main__": - main() diff -Nru nodejs-0.11.13/deps/v8/tools/gen-postmortem-metadata.py nodejs-0.11.15/deps/v8/tools/gen-postmortem-metadata.py --- nodejs-0.11.13/deps/v8/tools/gen-postmortem-metadata.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/gen-postmortem-metadata.py 2015-01-20 21:22:17.000000000 +0000 @@ -70,8 +70,6 @@ { 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' }, { 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' }, - { 'name': 'FailureTag', 'value': 'kFailureTag' }, - { 'name': 'FailureTagMask', 'value': 'kFailureTagMask' }, { 'name': 'HeapObjectTag', 'value': 'kHeapObjectTag' }, { 'name': 'HeapObjectTagMask', 'value': 'kHeapObjectTagMask' }, { 'name': 'SmiTag', 'value': 'kSmiTag' }, @@ -80,14 +78,26 @@ { 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' }, { 'name': 'PointerSizeLog2', 'value': 'kPointerSizeLog2' }, + { 'name': 'OddballFalse', 'value': 'Oddball::kFalse' }, + { 'name': 'OddballTrue', 'value': 'Oddball::kTrue' }, + { 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' }, + { 'name': 'OddballNull', 'value': 'Oddball::kNull' }, + { 'name': 'OddballArgumentMarker', 'value': 'Oddball::kArgumentMarker' }, + { 'name': 'OddballUndefined', 'value': 'Oddball::kUndefined' }, + { 'name': 'OddballUninitialized', 'value': 'Oddball::kUninitialized' }, + { 'name': 'OddballOther', 'value': 'Oddball::kOther' }, + { 'name': 'OddballException', 'value': 'Oddball::kException' }, + { 'name': 'prop_idx_first', 'value': 'DescriptorArray::kFirstIndex' }, { 'name': 'prop_type_field', 'value': 'FIELD' }, - { 'name': 'prop_type_first_phantom', - 'value': 'TRANSITION' }, { 'name': 'prop_type_mask', 'value': 'PropertyDetails::TypeField::kMask' }, + { 'name': 'prop_index_mask', + 'value': 'PropertyDetails::FieldIndexField::kMask' }, + { 'name': 'prop_index_shift', + 'value': 'PropertyDetails::FieldIndexField::kShift' }, { 'name': 'prop_desc_key', 'value': 'DescriptorArray::kDescriptorKey' }, @@ -98,6 +108,20 @@ { 'name': 'prop_desc_size', 'value': 'DescriptorArray::kDescriptorSize' }, + { 'name': 'elements_fast_holey_elements', + 'value': 'FAST_HOLEY_ELEMENTS' }, + { 'name': 'elements_fast_elements', + 'value': 'FAST_ELEMENTS' }, + { 'name': 'elements_dictionary_elements', + 'value': 'DICTIONARY_ELEMENTS' }, + + { 'name': 'bit_field2_elements_kind_mask', + 'value': 'Map::ElementsKindBits::kMask' }, + { 'name': 'bit_field2_elements_kind_shift', + 'value': 'Map::ElementsKindBits::kShift' }, + { 'name': 'bit_field3_dictionary_map_shift', + 'value': 'Map::DictionaryMap::kShift' }, + { 'name': 'off_fp_context', 'value': 'StandardFrameConstants::kContextOffset' }, { 'name': 'off_fp_constant_pool', @@ -120,6 +144,16 @@ 'Map, instance_attributes, int, kInstanceAttributesOffset', 'Map, inobject_properties, int, kInObjectPropertiesOffset', 'Map, instance_size, int, kInstanceSizeOffset', + 'Map, bit_field, char, kBitFieldOffset', + 'Map, bit_field2, char, kBitField2Offset', + 'Map, bit_field3, SMI, kBitField3Offset', + 'Map, prototype, Object, kPrototypeOffset', + 'NameDictionaryShape, prefix_size, int, kPrefixSize', + 'NameDictionaryShape, entry_size, int, kEntrySize', + 'SeededNumberDictionaryShape, prefix_size, int, kPrefixSize', + 'UnseededNumberDictionaryShape, prefix_size, int, kPrefixSize', + 'NumberDictionaryShape, entry_size, int, kEntrySize', + 'Oddball, kind_offset, int, kKindOffset', 'HeapNumber, value, double, kValueOffset', 'ConsString, first, String, kFirstOffset', 'ConsString, second, String, kSecondOffset', @@ -158,9 +192,9 @@ * This file is generated by %s. Do not edit directly. */ -#include "v8.h" -#include "frames.h" -#include "frames-inl.h" /* for architecture-specific frame constants */ +#include "src/v8.h" +#include "src/frames.h" +#include "src/frames-inl.h" /* for architecture-specific frame constants */ using namespace v8::internal; @@ -361,7 +395,7 @@ 'value': '%s::%s' % (klass, offset) }); - assert(kind == 'SMI_ACCESSORS'); + assert(kind == 'SMI_ACCESSORS' or kind == 'ACCESSORS_TO_SMI'); klass = args[0]; field = args[1]; offset = args[2]; @@ -385,7 +419,8 @@ # may span multiple lines and may contain nested parentheses. We also # call parse_field() to pick apart the invocation. # - prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE', 'SMI_ACCESSORS' ]; + prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE', + 'SMI_ACCESSORS', 'ACCESSORS_TO_SMI' ]; current = ''; opens = 0; diff -Nru nodejs-0.11.13/deps/v8/tools/grokdump.py nodejs-0.11.15/deps/v8/tools/grokdump.py --- nodejs-0.11.13/deps/v8/tools/grokdump.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/grokdump.py 2015-01-20 21:22:17.000000000 +0000 @@ -3103,15 +3103,18 @@ frame_pointer = reader.ExceptionFP() print "Annotated stack (from exception.esp to bottom):" for slot in xrange(stack_top, stack_bottom, reader.PointerSize()): + ascii_content = [c if c >= '\x20' and c < '\x7f' else '.' + for c in reader.ReadBytes(slot, reader.PointerSize())] maybe_address = reader.ReadUIntPtr(slot) heap_object = heap.FindObject(maybe_address) maybe_symbol = reader.FindSymbol(maybe_address) if slot == frame_pointer: maybe_symbol = "<---- frame pointer" frame_pointer = maybe_address - print "%s: %s %s" % (reader.FormatIntPtr(slot), - reader.FormatIntPtr(maybe_address), - maybe_symbol or "") + print "%s: %s %s %s" % (reader.FormatIntPtr(slot), + reader.FormatIntPtr(maybe_address), + "".join(ascii_content), + maybe_symbol or "") if heap_object: heap_object.Print(Printer()) print diff -Nru nodejs-0.11.13/deps/v8/tools/gyp/v8.gyp nodejs-0.11.15/deps/v8/tools/gyp/v8.gyp --- nodejs-0.11.13/deps/v8/tools/gyp/v8.gyp 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/gyp/v8.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -42,20 +42,24 @@ }, { 'toolsets': ['target'], }], - ['v8_use_snapshot=="true"', { + + ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', { # The dependency on v8_base should come from a transitive # dependency however the Android toolchain requires libv8_base.a # to appear before libv8_snapshot.a so it's listed explicitly. - 'dependencies': ['v8_base.<(v8_target_arch)', 'v8_snapshot'], - }, - { + 'dependencies': ['v8_base', 'v8_snapshot'], + }], + ['v8_use_snapshot!="true" and v8_use_external_startup_data==0', { # The dependency on v8_base should come from a transitive # dependency however the Android toolchain requires libv8_base.a # to appear before libv8_snapshot.a so it's listed explicitly. - 'dependencies': [ - 'v8_base.<(v8_target_arch)', - 'v8_nosnapshot.<(v8_target_arch)', - ], + 'dependencies': ['v8_base', 'v8_nosnapshot'], + }], + ['v8_use_external_startup_data==1 and want_separate_host_toolset==0', { + 'dependencies': ['v8_base', 'v8_external_snapshot'], + }], + ['v8_use_external_startup_data==1 and want_separate_host_toolset==1', { + 'dependencies': ['v8_base', 'v8_external_snapshot#host'], }], ['component=="shared_library"', { 'type': '<(component)', @@ -64,6 +68,9 @@ # has some sources to link into the component. '../../src/v8dll-main.cc', ], + 'include_dirs': [ + '../..', + ], 'defines': [ 'V8_SHARED', 'BUILDING_V8_SHARED', @@ -112,16 +119,14 @@ ['want_separate_host_toolset==1', { 'toolsets': ['host', 'target'], 'dependencies': [ - 'mksnapshot.<(v8_target_arch)#host', + 'mksnapshot#host', 'js2c#host', - 'generate_trig_table#host', ], }, { 'toolsets': ['target'], 'dependencies': [ - 'mksnapshot.<(v8_target_arch)', + 'mksnapshot', 'js2c', - 'generate_trig_table', ], }], ['component=="shared_library"', { @@ -138,22 +143,22 @@ }], ], 'dependencies': [ - 'v8_base.<(v8_target_arch)', + 'v8_base', ], 'include_dirs+': [ - '../../src', + '../..', ], 'sources': [ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc', '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc', - '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc', '<(INTERMEDIATE_DIR)/snapshot.cc', + '../../src/snapshot-common.cc', ], 'actions': [ { 'action_name': 'run_mksnapshot', 'inputs': [ - '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot.<(v8_target_arch)<(EXECUTABLE_SUFFIX)', + '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)', ], 'outputs': [ '<(INTERMEDIATE_DIR)/snapshot.cc', @@ -172,33 +177,33 @@ 'action': [ '<@(_inputs)', '<@(mksnapshot_flags)', - '<@(_outputs)' + '<@(INTERMEDIATE_DIR)/snapshot.cc' ], }, ], }, { - 'target_name': 'v8_nosnapshot.<(v8_target_arch)', + 'target_name': 'v8_nosnapshot', 'type': 'static_library', 'dependencies': [ - 'v8_base.<(v8_target_arch)', + 'v8_base', ], 'include_dirs+': [ - '../../src', + '../..', ], 'sources': [ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc', '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc', - '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc', + '../../src/snapshot-common.cc', '../../src/snapshot-empty.cc', ], 'conditions': [ ['want_separate_host_toolset==1', { 'toolsets': ['host', 'target'], - 'dependencies': ['js2c#host', 'generate_trig_table#host'], + 'dependencies': ['js2c#host'], }, { 'toolsets': ['target'], - 'dependencies': ['js2c', 'generate_trig_table'], + 'dependencies': ['js2c'], }], ['component=="shared_library"', { 'defines': [ @@ -208,40 +213,88 @@ }], ] }, - { 'target_name': 'generate_trig_table', - 'type': 'none', + { + 'target_name': 'v8_external_snapshot', + 'type': 'static_library', 'conditions': [ ['want_separate_host_toolset==1', { 'toolsets': ['host'], - }, { + 'dependencies': [ + 'mksnapshot#host', + 'js2c#host', + 'natives_blob#host', + ]}, { 'toolsets': ['target'], + 'dependencies': [ + 'mksnapshot', + 'js2c', + 'natives_blob', + ], + }], + ['component=="shared_library"', { + 'defines': [ + 'V8_SHARED', + 'BUILDING_V8_SHARED', + ], + 'direct_dependent_settings': { + 'defines': [ + 'V8_SHARED', + 'USING_V8_SHARED', + ], + }, }], ], + 'dependencies': [ + 'v8_base', + ], + 'include_dirs+': [ + '../..', + ], + 'sources': [ + '../../src/natives-external.cc', + '../../src/snapshot-external.cc', + ], 'actions': [ { - 'action_name': 'generate', + 'action_name': 'run_mksnapshot (external)', 'inputs': [ - '../../tools/generate-trig-table.py', + '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)', ], 'outputs': [ - '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc', + '<(INTERMEDIATE_DIR)/snapshot.cc', + '<(PRODUCT_DIR)/snapshot_blob.bin', ], + 'variables': { + 'mksnapshot_flags': [ + '--log-snapshot-positions', + '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log', + ], + 'conditions': [ + ['v8_random_seed!=0', { + 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'], + }], + ], + }, 'action': [ - 'python', - '../../tools/generate-trig-table.py', - '<@(_outputs)', + '<@(_inputs)', + '<@(mksnapshot_flags)', + '<@(INTERMEDIATE_DIR)/snapshot.cc', + '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin', ], }, - ] + ], }, { - 'target_name': 'v8_base.<(v8_target_arch)', + 'target_name': 'v8_base', 'type': 'static_library', + 'dependencies': [ + 'v8_libbase', + ], 'variables': { 'optimize': 'max', }, 'include_dirs+': [ - '../../src', + '../..', ], 'sources': [ ### gcmole(all) ### '../../src/accessors.cc', @@ -260,10 +313,10 @@ '../../src/assembler.h', '../../src/assert-scope.h', '../../src/assert-scope.cc', + '../../src/ast-value-factory.cc', + '../../src/ast-value-factory.h', '../../src/ast.cc', '../../src/ast.h', - '../../src/atomicops.h', - '../../src/atomicops_internals_x86_gcc.cc', '../../src/bignum-dtoa.cc', '../../src/bignum-dtoa.h', '../../src/bignum.cc', @@ -289,6 +342,98 @@ '../../src/codegen.h', '../../src/compilation-cache.cc', '../../src/compilation-cache.h', + '../../src/compiler/ast-graph-builder.cc', + '../../src/compiler/ast-graph-builder.h', + '../../src/compiler/change-lowering.cc', + '../../src/compiler/change-lowering.h', + '../../src/compiler/code-generator-impl.h', + '../../src/compiler/code-generator.cc', + '../../src/compiler/code-generator.h', + '../../src/compiler/common-node-cache.h', + '../../src/compiler/common-operator.h', + '../../src/compiler/control-builders.cc', + '../../src/compiler/control-builders.h', + '../../src/compiler/frame.h', + '../../src/compiler/gap-resolver.cc', + '../../src/compiler/gap-resolver.h', + '../../src/compiler/generic-algorithm-inl.h', + '../../src/compiler/generic-algorithm.h', + '../../src/compiler/generic-graph.h', + '../../src/compiler/generic-node-inl.h', + '../../src/compiler/generic-node.h', + '../../src/compiler/graph-builder.cc', + '../../src/compiler/graph-builder.h', + '../../src/compiler/graph-inl.h', + '../../src/compiler/graph-reducer.cc', + '../../src/compiler/graph-reducer.h', + '../../src/compiler/graph-replay.cc', + '../../src/compiler/graph-replay.h', + '../../src/compiler/graph-visualizer.cc', + '../../src/compiler/graph-visualizer.h', + '../../src/compiler/graph.cc', + '../../src/compiler/graph.h', + '../../src/compiler/instruction-codes.h', + '../../src/compiler/instruction-selector-impl.h', + '../../src/compiler/instruction-selector.cc', + '../../src/compiler/instruction-selector.h', + '../../src/compiler/instruction.cc', + '../../src/compiler/instruction.h', + '../../src/compiler/js-context-specialization.cc', + '../../src/compiler/js-context-specialization.h', + '../../src/compiler/js-generic-lowering.cc', + '../../src/compiler/js-generic-lowering.h', + '../../src/compiler/js-graph.cc', + '../../src/compiler/js-graph.h', + '../../src/compiler/js-operator.h', + '../../src/compiler/js-typed-lowering.cc', + '../../src/compiler/js-typed-lowering.h', + '../../src/compiler/linkage-impl.h', + '../../src/compiler/linkage.cc', + '../../src/compiler/linkage.h', + '../../src/compiler/lowering-builder.cc', + '../../src/compiler/lowering-builder.h', + '../../src/compiler/machine-node-factory.h', + '../../src/compiler/machine-operator-reducer.cc', + '../../src/compiler/machine-operator-reducer.h', + '../../src/compiler/machine-operator.h', + '../../src/compiler/machine-type.h', + '../../src/compiler/node-aux-data-inl.h', + '../../src/compiler/node-aux-data.h', + '../../src/compiler/node-cache.cc', + '../../src/compiler/node-cache.h', + '../../src/compiler/node-matchers.h', + '../../src/compiler/node-properties-inl.h', + '../../src/compiler/node-properties.h', + '../../src/compiler/node.cc', + '../../src/compiler/node.h', + '../../src/compiler/opcodes.h', + '../../src/compiler/operator-properties-inl.h', + '../../src/compiler/operator-properties.h', + '../../src/compiler/operator.h', + '../../src/compiler/phi-reducer.h', + '../../src/compiler/pipeline.cc', + '../../src/compiler/pipeline.h', + '../../src/compiler/raw-machine-assembler.cc', + '../../src/compiler/raw-machine-assembler.h', + '../../src/compiler/register-allocator.cc', + '../../src/compiler/register-allocator.h', + '../../src/compiler/representation-change.h', + '../../src/compiler/schedule.cc', + '../../src/compiler/schedule.h', + '../../src/compiler/scheduler.cc', + '../../src/compiler/scheduler.h', + '../../src/compiler/simplified-lowering.cc', + '../../src/compiler/simplified-lowering.h', + '../../src/compiler/simplified-node-factory.h', + '../../src/compiler/simplified-operator.h', + '../../src/compiler/source-position.cc', + '../../src/compiler/source-position.h', + '../../src/compiler/structured-machine-assembler.cc', + '../../src/compiler/structured-machine-assembler.h', + '../../src/compiler/typer.cc', + '../../src/compiler/typer.h', + '../../src/compiler/verifier.cc', + '../../src/compiler/verifier.h', '../../src/compiler.cc', '../../src/compiler.h', '../../src/contexts.cc', @@ -301,8 +446,6 @@ '../../src/cpu-profiler-inl.h', '../../src/cpu-profiler.cc', '../../src/cpu-profiler.h', - '../../src/cpu.cc', - '../../src/cpu.h', '../../src/data-flow.cc', '../../src/data-flow.h', '../../src/date.cc', @@ -310,8 +453,6 @@ '../../src/dateparser-inl.h', '../../src/dateparser.cc', '../../src/dateparser.h', - '../../src/debug-agent.cc', - '../../src/debug-agent.h', '../../src/debug.cc', '../../src/debug.h', '../../src/deoptimizer.cc', @@ -346,6 +487,9 @@ '../../src/fast-dtoa.cc', '../../src/fast-dtoa.h', '../../src/feedback-slots.h', + '../../src/field-index.cc', + '../../src/field-index.h', + '../../src/field-index-inl.h', '../../src/fixed-dtoa.cc', '../../src/fixed-dtoa.h', '../../src/flag-definitions.h', @@ -367,14 +511,33 @@ '../../src/handles.cc', '../../src/handles.h', '../../src/hashmap.h', - '../../src/heap-inl.h', '../../src/heap-profiler.cc', '../../src/heap-profiler.h', '../../src/heap-snapshot-generator-inl.h', '../../src/heap-snapshot-generator.cc', '../../src/heap-snapshot-generator.h', - '../../src/heap.cc', - '../../src/heap.h', + '../../src/heap/gc-tracer.cc', + '../../src/heap/gc-tracer.h', + '../../src/heap/heap-inl.h', + '../../src/heap/heap.cc', + '../../src/heap/heap.h', + '../../src/heap/incremental-marking-inl.h', + '../../src/heap/incremental-marking.cc', + '../../src/heap/incremental-marking.h', + '../../src/heap/mark-compact-inl.h', + '../../src/heap/mark-compact.cc', + '../../src/heap/mark-compact.h', + '../../src/heap/objects-visiting-inl.h', + '../../src/heap/objects-visiting.cc', + '../../src/heap/objects-visiting.h', + '../../src/heap/spaces-inl.h', + '../../src/heap/spaces.cc', + '../../src/heap/spaces.h', + '../../src/heap/store-buffer-inl.h', + '../../src/heap/store-buffer.cc', + '../../src/heap/store-buffer.h', + '../../src/heap/sweeper-thread.h', + '../../src/heap/sweeper-thread.cc', '../../src/hydrogen-alias-analysis.h', '../../src/hydrogen-bce.cc', '../../src/hydrogen-bce.h', @@ -423,6 +586,8 @@ '../../src/hydrogen-sce.h', '../../src/hydrogen-store-elimination.cc', '../../src/hydrogen-store-elimination.h', + '../../src/hydrogen-types.cc', + '../../src/hydrogen-types.h', '../../src/hydrogen-uint32-analysis.cc', '../../src/hydrogen-uint32-analysis.h', '../../src/i18n.cc', @@ -432,8 +597,6 @@ '../../src/ic-inl.h', '../../src/ic.cc', '../../src/ic.h', - '../../src/incremental-marking.cc', - '../../src/incremental-marking.h', '../../src/interface.cc', '../../src/interface.h', '../../src/interpreter-irregexp.cc', @@ -445,14 +608,6 @@ '../../src/jsregexp-inl.h', '../../src/jsregexp.cc', '../../src/jsregexp.h', - '../../src/lazy-instance.h', - # TODO(jochen): move libplatform/ files to their own target. - '../../src/libplatform/default-platform.cc', - '../../src/libplatform/default-platform.h', - '../../src/libplatform/task-queue.cc', - '../../src/libplatform/task-queue.h', - '../../src/libplatform/worker-thread.cc', - '../../src/libplatform/worker-thread.h', '../../src/list-inl.h', '../../src/list.h', '../../src/lithium-allocator-inl.h', @@ -462,6 +617,7 @@ '../../src/lithium-codegen.h', '../../src/lithium.cc', '../../src/lithium.h', + '../../src/lithium-inl.h', '../../src/liveedit.cc', '../../src/liveedit.h', '../../src/log-inl.h', @@ -469,37 +625,27 @@ '../../src/log-utils.h', '../../src/log.cc', '../../src/log.h', + '../../src/lookup-inl.h', + '../../src/lookup.cc', + '../../src/lookup.h', '../../src/macro-assembler.h', - '../../src/mark-compact.cc', - '../../src/mark-compact.h', '../../src/messages.cc', '../../src/messages.h', + '../../src/msan.h', '../../src/natives.h', '../../src/objects-debug.cc', '../../src/objects-inl.h', '../../src/objects-printer.cc', - '../../src/objects-visiting.cc', - '../../src/objects-visiting.h', '../../src/objects.cc', '../../src/objects.h', - '../../src/once.cc', - '../../src/once.h', - '../../src/optimizing-compiler-thread.h', '../../src/optimizing-compiler-thread.cc', + '../../src/optimizing-compiler-thread.h', + '../../src/ostreams.cc', + '../../src/ostreams.h', '../../src/parser.cc', '../../src/parser.h', - '../../src/platform/elapsed-timer.h', - '../../src/platform/time.cc', - '../../src/platform/time.h', - '../../src/platform.h', - '../../src/platform/condition-variable.cc', - '../../src/platform/condition-variable.h', - '../../src/platform/mutex.cc', - '../../src/platform/mutex.h', - '../../src/platform/semaphore.cc', - '../../src/platform/semaphore.h', - '../../src/platform/socket.cc', - '../../src/platform/socket.h', + '../../src/perf-jit.cc', + '../../src/perf-jit.h', '../../src/preparse-data-format.h', '../../src/preparse-data.cc', '../../src/preparse-data.h', @@ -513,6 +659,7 @@ '../../src/property-details.h', '../../src/property.cc', '../../src/property.h', + '../../src/prototype.h', '../../src/regexp-macro-assembler-irregexp-inl.h', '../../src/regexp-macro-assembler-irregexp.cc', '../../src/regexp-macro-assembler-irregexp.h', @@ -544,14 +691,9 @@ '../../src/serialize.h', '../../src/small-pointer-list.h', '../../src/smart-pointers.h', - '../../src/snapshot-common.cc', '../../src/snapshot.h', - '../../src/spaces-inl.h', - '../../src/spaces.cc', - '../../src/spaces.h', - '../../src/store-buffer-inl.h', - '../../src/store-buffer.cc', - '../../src/store-buffer.h', + '../../src/snapshot-source-sink.cc', + '../../src/snapshot-source-sink.h', '../../src/string-search.cc', '../../src/string-search.h', '../../src/string-stream.cc', @@ -560,8 +702,6 @@ '../../src/strtod.h', '../../src/stub-cache.cc', '../../src/stub-cache.h', - '../../src/sweeper-thread.h', - '../../src/sweeper-thread.cc', '../../src/token.cc', '../../src/token.h', '../../src/transitions-inl.h', @@ -569,6 +709,7 @@ '../../src/transitions.h', '../../src/type-info.cc', '../../src/type-info.h', + '../../src/types-inl.h', '../../src/types.cc', '../../src/types.h', '../../src/typing.cc', @@ -583,23 +724,14 @@ '../../src/utils-inl.h', '../../src/utils.cc', '../../src/utils.h', - '../../src/utils/random-number-generator.cc', - '../../src/utils/random-number-generator.h', - '../../src/v8-counters.cc', - '../../src/v8-counters.h', '../../src/v8.cc', '../../src/v8.h', - '../../src/v8checks.h', - '../../src/v8conversions.cc', - '../../src/v8conversions.h', - '../../src/v8globals.h', '../../src/v8memory.h', '../../src/v8threads.cc', '../../src/v8threads.h', - '../../src/v8utils.cc', - '../../src/v8utils.h', '../../src/variables.cc', '../../src/variables.h', + '../../src/vector.h', '../../src/version.cc', '../../src/version.h', '../../src/vm-state-inl.h', @@ -607,6 +739,8 @@ '../../src/zone-inl.h', '../../src/zone.cc', '../../src/zone.h', + '../../third_party/fdlibm/fdlibm.cc', + '../../third_party/fdlibm/fdlibm.h', ], 'conditions': [ ['want_separate_host_toolset==1', { @@ -646,6 +780,10 @@ '../../src/arm/regexp-macro-assembler-arm.h', '../../src/arm/simulator-arm.cc', '../../src/arm/stub-cache-arm.cc', + '../../src/compiler/arm/code-generator-arm.cc', + '../../src/compiler/arm/instruction-codes-arm.h', + '../../src/compiler/arm/instruction-selector-arm.cc', + '../../src/compiler/arm/linkage-arm.cc', ], }], ['v8_target_arch=="arm64"', { @@ -660,11 +798,13 @@ '../../src/arm64/code-stubs-arm64.h', '../../src/arm64/constants-arm64.h', '../../src/arm64/cpu-arm64.cc', - '../../src/arm64/cpu-arm64.h', '../../src/arm64/debug-arm64.cc', '../../src/arm64/decoder-arm64.cc', '../../src/arm64/decoder-arm64.h', '../../src/arm64/decoder-arm64-inl.h', + '../../src/arm64/delayed-masm-arm64.cc', + '../../src/arm64/delayed-masm-arm64.h', + '../../src/arm64/delayed-masm-arm64-inl.h', '../../src/arm64/deoptimizer-arm64.cc', '../../src/arm64/disasm-arm64.cc', '../../src/arm64/disasm-arm64.h', @@ -692,9 +832,13 @@ '../../src/arm64/stub-cache-arm64.cc', '../../src/arm64/utils-arm64.cc', '../../src/arm64/utils-arm64.h', + '../../src/compiler/arm64/code-generator-arm64.cc', + '../../src/compiler/arm64/instruction-codes-arm64.h', + '../../src/compiler/arm64/instruction-selector-arm64.cc', + '../../src/compiler/arm64/linkage-arm64.cc', ], }], - ['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', { + ['v8_target_arch=="ia32"', { 'sources': [ ### gcmole(arch:ia32) ### '../../src/ia32/assembler-ia32-inl.h', '../../src/ia32/assembler-ia32.cc', @@ -723,9 +867,44 @@ '../../src/ia32/regexp-macro-assembler-ia32.cc', '../../src/ia32/regexp-macro-assembler-ia32.h', '../../src/ia32/stub-cache-ia32.cc', + '../../src/compiler/ia32/code-generator-ia32.cc', + '../../src/compiler/ia32/instruction-codes-ia32.h', + '../../src/compiler/ia32/instruction-selector-ia32.cc', + '../../src/compiler/ia32/linkage-ia32.cc', + ], + }], + ['v8_target_arch=="x87"', { + 'sources': [ ### gcmole(arch:x87) ### + '../../src/x87/assembler-x87-inl.h', + '../../src/x87/assembler-x87.cc', + '../../src/x87/assembler-x87.h', + '../../src/x87/builtins-x87.cc', + '../../src/x87/code-stubs-x87.cc', + '../../src/x87/code-stubs-x87.h', + '../../src/x87/codegen-x87.cc', + '../../src/x87/codegen-x87.h', + '../../src/x87/cpu-x87.cc', + '../../src/x87/debug-x87.cc', + '../../src/x87/deoptimizer-x87.cc', + '../../src/x87/disasm-x87.cc', + '../../src/x87/frames-x87.cc', + '../../src/x87/frames-x87.h', + '../../src/x87/full-codegen-x87.cc', + '../../src/x87/ic-x87.cc', + '../../src/x87/lithium-codegen-x87.cc', + '../../src/x87/lithium-codegen-x87.h', + '../../src/x87/lithium-gap-resolver-x87.cc', + '../../src/x87/lithium-gap-resolver-x87.h', + '../../src/x87/lithium-x87.cc', + '../../src/x87/lithium-x87.h', + '../../src/x87/macro-assembler-x87.cc', + '../../src/x87/macro-assembler-x87.h', + '../../src/x87/regexp-macro-assembler-x87.cc', + '../../src/x87/regexp-macro-assembler-x87.h', + '../../src/x87/stub-cache-x87.cc', ], }], - ['v8_target_arch=="mipsel"', { + ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', { 'sources': [ ### gcmole(arch:mipsel) ### '../../src/mips/assembler-mips.cc', '../../src/mips/assembler-mips.h', @@ -759,7 +938,41 @@ '../../src/mips/stub-cache-mips.cc', ], }], - ['v8_target_arch=="x64" or v8_target_arch=="mac" or OS=="mac"', { + ['v8_target_arch=="mips64el"', { + 'sources': [ ### gcmole(arch:mips64el) ### + '../../src/mips64/assembler-mips64.cc', + '../../src/mips64/assembler-mips64.h', + '../../src/mips64/assembler-mips64-inl.h', + '../../src/mips64/builtins-mips64.cc', + '../../src/mips64/codegen-mips64.cc', + '../../src/mips64/codegen-mips64.h', + '../../src/mips64/code-stubs-mips64.cc', + '../../src/mips64/code-stubs-mips64.h', + '../../src/mips64/constants-mips64.cc', + '../../src/mips64/constants-mips64.h', + '../../src/mips64/cpu-mips64.cc', + '../../src/mips64/debug-mips64.cc', + '../../src/mips64/deoptimizer-mips64.cc', + '../../src/mips64/disasm-mips64.cc', + '../../src/mips64/frames-mips64.cc', + '../../src/mips64/frames-mips64.h', + '../../src/mips64/full-codegen-mips64.cc', + '../../src/mips64/ic-mips64.cc', + '../../src/mips64/lithium-codegen-mips64.cc', + '../../src/mips64/lithium-codegen-mips64.h', + '../../src/mips64/lithium-gap-resolver-mips64.cc', + '../../src/mips64/lithium-gap-resolver-mips64.h', + '../../src/mips64/lithium-mips64.cc', + '../../src/mips64/lithium-mips64.h', + '../../src/mips64/macro-assembler-mips64.cc', + '../../src/mips64/macro-assembler-mips64.h', + '../../src/mips64/regexp-macro-assembler-mips64.cc', + '../../src/mips64/regexp-macro-assembler-mips64.h', + '../../src/mips64/simulator-mips64.cc', + '../../src/mips64/stub-cache-mips64.cc', + ], + }], + ['v8_target_arch=="x64" or v8_target_arch=="x32"', { 'sources': [ ### gcmole(arch:x64) ### '../../src/x64/assembler-x64-inl.h', '../../src/x64/assembler-x64.cc', @@ -788,6 +1001,10 @@ '../../src/x64/regexp-macro-assembler-x64.cc', '../../src/x64/regexp-macro-assembler-x64.h', '../../src/x64/stub-cache-x64.cc', + '../../src/compiler/x64/code-generator-x64.cc', + '../../src/compiler/x64/instruction-codes-x64.h', + '../../src/compiler/x64/instruction-selector-x64.cc', + '../../src/compiler/x64/linkage-x64.cc', ], }], ['OS=="linux"', { @@ -799,33 +1016,133 @@ ] }], ], + }, + } + ], + ['OS=="win"', { + 'variables': { + 'gyp_generators': '<!(echo $GYP_GENERATORS)', + }, + 'msvs_disabled_warnings': [4351, 4355, 4800], + }], + ['component=="shared_library"', { + 'defines': [ + 'BUILDING_V8_SHARED', + 'V8_SHARED', + ], + }], + ['v8_postmortem_support=="true"', { + 'sources': [ + '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc', + ] + }], + ['v8_enable_i18n_support==1', { + 'dependencies': [ + '<(icu_gyp_path):icui18n', + '<(icu_gyp_path):icuuc', + ] + }, { # v8_enable_i18n_support==0 + 'sources!': [ + '../../src/i18n.cc', + '../../src/i18n.h', + ], + }], + ['OS=="win" and v8_enable_i18n_support==1', { + 'dependencies': [ + '<(icu_gyp_path):icudata', + ], + }], + ['icu_use_data_file_flag==1', { + 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'], + }, { # else icu_use_data_file_flag !=1 + 'conditions': [ + ['OS=="win"', { + 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'], + }, { + 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'], + }], + ], + }], + ], + }, + { + 'target_name': 'v8_libbase', + 'type': 'static_library', + 'variables': { + 'optimize': 'max', + }, + 'include_dirs+': [ + '../..', + ], + 'sources': [ + '../../src/base/atomicops.h', + '../../src/base/atomicops_internals_arm64_gcc.h', + '../../src/base/atomicops_internals_arm_gcc.h', + '../../src/base/atomicops_internals_atomicword_compat.h', + '../../src/base/atomicops_internals_mac.h', + '../../src/base/atomicops_internals_mips_gcc.h', + '../../src/base/atomicops_internals_tsan.h', + '../../src/base/atomicops_internals_x86_gcc.cc', + '../../src/base/atomicops_internals_x86_gcc.h', + '../../src/base/atomicops_internals_x86_msvc.h', + '../../src/base/build_config.h', + '../../src/base/cpu.cc', + '../../src/base/cpu.h', + '../../src/base/lazy-instance.h', + '../../src/base/logging.cc', + '../../src/base/logging.h', + '../../src/base/macros.h', + '../../src/base/once.cc', + '../../src/base/once.h', + '../../src/base/platform/elapsed-timer.h', + '../../src/base/platform/time.cc', + '../../src/base/platform/time.h', + '../../src/base/platform/condition-variable.cc', + '../../src/base/platform/condition-variable.h', + '../../src/base/platform/mutex.cc', + '../../src/base/platform/mutex.h', + '../../src/base/platform/platform.h', + '../../src/base/platform/semaphore.cc', + '../../src/base/platform/semaphore.h', + '../../src/base/safe_conversions.h', + '../../src/base/safe_conversions_impl.h', + '../../src/base/safe_math.h', + '../../src/base/safe_math_impl.h', + '../../src/base/utils/random-number-generator.cc', + '../../src/base/utils/random-number-generator.h', + ], + 'conditions': [ + ['want_separate_host_toolset==1', { + 'toolsets': ['host', 'target'], + }, { + 'toolsets': ['target'], + }], + ['OS=="linux"', { + 'link_settings': { 'libraries': [ '-lrt' ] }, - 'sources': [ ### gcmole(os:linux) ### - '../../src/platform-linux.cc', - '../../src/platform-posix.cc' + 'sources': [ + '../../src/base/platform/platform-linux.cc', + '../../src/base/platform/platform-posix.cc' ], } ], ['OS=="android"', { - 'defines': [ - 'CAN_USE_VFP_INSTRUCTIONS', - ], 'sources': [ - '../../src/platform-posix.cc' + '../../src/base/platform/platform-posix.cc' ], 'conditions': [ ['host_os=="mac"', { 'target_conditions': [ ['_toolset=="host"', { 'sources': [ - '../../src/platform-macos.cc' + '../../src/base/platform/platform-macos.cc' ] }, { 'sources': [ - '../../src/platform-linux.cc' + '../../src/base/platform/platform-linux.cc' ] }], ], @@ -853,7 +1170,7 @@ }], ], 'sources': [ - '../../src/platform-linux.cc' + '../../src/base/platform/platform-linux.cc' ] }], ], @@ -869,28 +1186,29 @@ }], ['_toolset=="target"', { 'libraries': [ - '-lbacktrace', '-lsocket' + '-lbacktrace' ], }], ], }, 'sources': [ - '../../src/platform-posix.cc', + '../../src/base/platform/platform-posix.cc', + '../../src/base/qnx-math.h', ], 'target_conditions': [ ['_toolset=="host" and host_os=="linux"', { 'sources': [ - '../../src/platform-linux.cc' + '../../src/base/platform/platform-linux.cc' ], }], ['_toolset=="host" and host_os=="mac"', { 'sources': [ - '../../src/platform-macos.cc' + '../../src/base/platform/platform-macos.cc' ], }], ['_toolset=="target"', { 'sources': [ - '../../src/platform-qnx.cc' + '../../src/base/platform/platform-qnx.cc' ], }], ], @@ -902,8 +1220,8 @@ '-L/usr/local/lib -lexecinfo', ]}, 'sources': [ - '../../src/platform-freebsd.cc', - '../../src/platform-posix.cc' + '../../src/base/platform/platform-freebsd.cc', + '../../src/base/platform/platform-posix.cc' ], } ], @@ -913,8 +1231,8 @@ '-L/usr/local/lib -lexecinfo', ]}, 'sources': [ - '../../src/platform-openbsd.cc', - '../../src/platform-posix.cc' + '../../src/base/platform/platform-openbsd.cc', + '../../src/base/platform/platform-posix.cc' ], } ], @@ -924,26 +1242,26 @@ '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo', ]}, 'sources': [ - '../../src/platform-openbsd.cc', - '../../src/platform-posix.cc' + '../../src/base/platform/platform-openbsd.cc', + '../../src/base/platform/platform-posix.cc' ], } ], ['OS=="solaris"', { 'link_settings': { 'libraries': [ - '-lsocket -lnsl', + '-lnsl -lrt', ]}, 'sources': [ - '../../src/platform-solaris.cc', - '../../src/platform-posix.cc' + '../../src/base/platform/platform-solaris.cc', + '../../src/base/platform/platform-posix.cc' ], } ], ['OS=="mac"', { 'sources': [ - '../../src/platform-macos.cc', - '../../src/platform-posix.cc' + '../../src/base/platform/platform-macos.cc', + '../../src/base/platform/platform-posix.cc' ]}, ], ['OS=="win"', { @@ -961,14 +1279,15 @@ 'conditions': [ ['build_env=="Cygwin"', { 'sources': [ - '../../src/platform-cygwin.cc', - '../../src/platform-posix.cc' + '../../src/base/platform/platform-cygwin.cc', + '../../src/base/platform/platform-posix.cc' ], }, { 'sources': [ - '../../src/platform-win32.cc', - '../../src/win32-math.cc', - '../../src/win32-math.h' + '../../src/base/platform/platform-win32.cc', + '../../src/base/win32-headers.h', + '../../src/base/win32-math.cc', + '../../src/base/win32-math.h' ], }], ], @@ -977,9 +1296,10 @@ }, }, { 'sources': [ - '../../src/platform-win32.cc', - '../../src/win32-math.cc', - '../../src/win32-math.h' + '../../src/base/platform/platform-win32.cc', + '../../src/base/win32-headers.h', + '../../src/base/win32-math.cc', + '../../src/base/win32-math.h' ], 'msvs_disabled_warnings': [4351, 4355, 4800], 'link_settings': { @@ -988,51 +1308,62 @@ }], ], }], - ['component=="shared_library"', { - 'defines': [ - 'BUILDING_V8_SHARED', - 'V8_SHARED', - ], - }], - ['v8_postmortem_support=="true"', { - 'sources': [ - '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc', - ] - }], - ['v8_enable_i18n_support==1', { - 'dependencies': [ - '<(icu_gyp_path):icui18n', - '<(icu_gyp_path):icuuc', - ] - }, { # v8_enable_i18n_support==0 - 'sources!': [ - '../../src/i18n.cc', - '../../src/i18n.h', - ], - }], - ['OS=="win" and v8_enable_i18n_support==1', { - 'dependencies': [ - '<(icu_gyp_path):icudata', - ], + ], + }, + { + 'target_name': 'v8_libplatform', + 'type': 'static_library', + 'variables': { + 'optimize': 'max', + }, + 'dependencies': [ + 'v8_libbase', + ], + 'include_dirs+': [ + '../..', + ], + 'sources': [ + '../../include/libplatform/libplatform.h', + '../../src/libplatform/default-platform.cc', + '../../src/libplatform/default-platform.h', + '../../src/libplatform/task-queue.cc', + '../../src/libplatform/task-queue.h', + '../../src/libplatform/worker-thread.cc', + '../../src/libplatform/worker-thread.h', + ], + 'conditions': [ + ['want_separate_host_toolset==1', { + 'toolsets': ['host', 'target'], + }, { + 'toolsets': ['target'], }], - ['v8_use_default_platform==0', { - 'sources!': [ - '../../src/default-platform.cc', - '../../src/default-platform.h', - ], + ], + }, + { + 'target_name': 'natives_blob', + 'type': 'none', + 'conditions': [ + [ 'v8_use_external_startup_data==1', { + 'dependencies': ['js2c'], + 'actions': [{ + 'action_name': 'concatenate_natives_blob', + 'inputs': [ + '../../tools/concatenate-files.py', + '<(SHARED_INTERMEDIATE_DIR)/libraries.bin', + '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin', + ], + 'outputs': [ + '<(PRODUCT_DIR)/natives_blob.bin', + ], + 'action': ['python', '<@(_inputs)', '<@(_outputs)'], + }], }], - ['icu_use_data_file_flag==1', { - 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'], - }, { # else icu_use_data_file_flag !=1 - 'conditions': [ - ['OS=="win"', { - 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'], - }, { - 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'], - }], - ], + ['want_separate_host_toolset==1', { + 'toolsets': ['host'], + }, { + 'toolsets': ['target'], }], - ], + ] }, { 'target_name': 'js2c', @@ -1059,9 +1390,11 @@ 'library_files': [ '../../src/runtime.js', '../../src/v8natives.js', + '../../src/symbol.js', '../../src/array.js', '../../src/string.js', '../../src/uri.js', + '../../third_party/fdlibm/fdlibm.js', '../../src/math.js', '../../src/messages.js', '../../src/apinatives.js', @@ -1076,19 +1409,21 @@ '../../src/weak_collection.js', '../../src/promise.js', '../../src/object-observe.js', + '../../src/collection.js', + '../../src/collection-iterator.js', '../../src/macros.py', + '../../src/array-iterator.js', + '../../src/string-iterator.js' ], 'experimental_library_files': [ '../../src/macros.py', - '../../src/symbol.js', '../../src/proxy.js', - '../../src/collection.js', '../../src/generator.js', - '../../src/array-iterator.js', '../../src/harmony-string.js', '../../src/harmony-array.js', - '../../src/harmony-math.js' ], + 'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin', + 'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin', }, 'actions': [ { @@ -1104,12 +1439,20 @@ 'action': [ 'python', '../../tools/js2c.py', - '<@(_outputs)', + '<(SHARED_INTERMEDIATE_DIR)/libraries.cc', 'CORE', '<(v8_compress_startup_data)', '<@(library_files)', '<@(i18n_library_files)', ], + 'conditions': [ + [ 'v8_use_external_startup_data==1', { + 'outputs': ['<@(libraries_bin_file)'], + 'action': [ + '--startup_blob', '<@(libraries_bin_file)', + ], + }], + ], }, { 'action_name': 'js2c_experimental', @@ -1123,11 +1466,19 @@ 'action': [ 'python', '../../tools/js2c.py', - '<@(_outputs)', + '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc', 'EXPERIMENTAL', '<(v8_compress_startup_data)', '<@(experimental_library_files)' ], + 'conditions': [ + [ 'v8_use_external_startup_data==1', { + 'outputs': ['<@(libraries_experimental_bin_file)'], + 'action': [ + '--startup_blob', '<@(libraries_experimental_bin_file)' + ], + }], + ], }, ], }, @@ -1160,14 +1511,11 @@ ] }, { - 'target_name': 'mksnapshot.<(v8_target_arch)', + 'target_name': 'mksnapshot', 'type': 'executable', - 'dependencies': [ - 'v8_base.<(v8_target_arch)', - 'v8_nosnapshot.<(v8_target_arch)', - ], + 'dependencies': ['v8_base', 'v8_nosnapshot', 'v8_libplatform'], 'include_dirs+': [ - '../../src', + '../..', ], 'sources': [ '../../src/mksnapshot.cc', diff -Nru nodejs-0.11.13/deps/v8/tools/js2c.py nodejs-0.11.15/deps/v8/tools/js2c.py --- nodejs-0.11.13/deps/v8/tools/js2c.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/js2c.py 2015-01-20 21:22:17.000000000 +0000 @@ -32,24 +32,23 @@ # library. import os, re, sys, string +import optparse import jsmin import bz2 +import textwrap -def ToCAsciiArray(lines): - result = [] - for chr in lines: - value = ord(chr) - assert value < 128 - result.append(str(value)) - return ", ".join(result) +class Error(Exception): + def __init__(self, msg): + Exception.__init__(self, msg) -def ToCArray(lines): +def ToCArray(byte_sequence): result = [] - for chr in lines: + for chr in byte_sequence: result.append(str(ord(chr))) - return ", ".join(result) + joined = ", ".join(result) + return textwrap.fill(joined, 80) def RemoveCommentsAndTrailingWhitespace(lines): @@ -68,46 +67,19 @@ return lines -def ReadLines(filename): - result = [] - for line in open(filename, "rt"): - if '#' in line: - line = line[:line.index('#')] - line = line.strip() - if len(line) > 0: - result.append(line) - return result - - -def LoadConfigFrom(name): - import ConfigParser - config = ConfigParser.ConfigParser() - config.read(name) - return config - - -def ParseValue(string): - string = string.strip() - if string.startswith('[') and string.endswith(']'): - return string.lstrip('[').rstrip(']').split() - else: - return string - - EVAL_PATTERN = re.compile(r'\beval\s*\(') WITH_PATTERN = re.compile(r'\bwith\s*\(') - -def Validate(lines, file): - lines = RemoveCommentsAndTrailingWhitespace(lines) +def Validate(lines): # Because of simplified context setup, eval and with is not # allowed in the natives files. - eval_match = EVAL_PATTERN.search(lines) - if eval_match: - raise ("Eval disallowed in natives: %s" % file) - with_match = WITH_PATTERN.search(lines) - if with_match: - raise ("With statements disallowed in natives: %s" % file) + if EVAL_PATTERN.search(lines): + raise Error("Eval disallowed in natives.") + if WITH_PATTERN.search(lines): + raise Error("With statements disallowed in natives.") + + # Pass lines through unchanged. + return lines def ExpandConstants(lines, constants): @@ -187,7 +159,7 @@ def ReadMacros(lines): constants = [] macros = [] - for line in lines: + for line in lines.split('\n'): hash = line.find('#') if hash != -1: line = line[:hash] line = line.strip() @@ -213,13 +185,13 @@ fun = eval("lambda " + ",".join(args) + ': ' + body) macros.append((re.compile("\\b%s\\(" % name), PythonMacro(args, fun))) else: - raise ("Illegal line: " + line) + raise Error("Illegal line: " + line) return (constants, macros) INLINE_MACRO_PATTERN = re.compile(r'macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*\n') INLINE_MACRO_END_PATTERN = re.compile(r'endmacro\s*\n') -def ExpandInlineMacros(lines, filename): +def ExpandInlineMacros(lines): pos = 0 while True: macro_match = INLINE_MACRO_PATTERN.search(lines, pos) @@ -230,7 +202,7 @@ args = [match.strip() for match in macro_match.group(2).split(',')] end_macro_match = INLINE_MACRO_END_PATTERN.search(lines, macro_match.end()); if end_macro_match is None: - raise ("Macro %s unclosed in %s" % (name, filename)) + raise Error("Macro %s unclosed" % name) body = lines[macro_match.end():end_macro_match.start()] # remove macro definition @@ -245,6 +217,28 @@ return s lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander) + +INLINE_CONSTANT_PATTERN = re.compile(r'const\s+([a-zA-Z0-9_]+)\s*=\s*([^;\n]+)[;\n]') + +def ExpandInlineConstants(lines): + pos = 0 + while True: + const_match = INLINE_CONSTANT_PATTERN.search(lines, pos) + if const_match is None: + # no more constants + return lines + name = const_match.group(1) + replacement = const_match.group(2) + name_pattern = re.compile("\\b%s\\b" % name) + + # remove constant definition and replace + lines = (lines[:const_match.start()] + + re.sub(name_pattern, replacement, lines[const_match.end():])) + + # advance position to where the constant defintion was + pos = const_match.start() + + HEADER_TEMPLATE = """\ // Copyright 2011 Google Inc. All Rights Reserved. @@ -252,14 +246,14 @@ // want to make changes to this file you should either change the // javascript source files or the GYP script. -#include "v8.h" -#include "natives.h" -#include "utils.h" +#include "src/v8.h" +#include "src/natives.h" +#include "src/utils.h" namespace v8 { namespace internal { - static const byte sources[] = { %(sources_data)s }; +%(sources_declaration)s\ %(raw_sources_declaration)s\ @@ -303,7 +297,7 @@ template <> void NativesCollection<%(type)s>::SetRawScriptsSource(Vector<const char> raw_source) { - ASSERT(%(raw_total_length)i == raw_source.length()); + DCHECK(%(raw_total_length)i == raw_source.length()); raw_sources = raw_source.start(); } @@ -311,6 +305,10 @@ } // v8 """ +SOURCES_DECLARATION = """\ + static const byte sources[] = { %s }; +""" + RAW_SOURCES_COMPRESSION_DECLARATION = """\ static const char* raw_sources = NULL; @@ -336,97 +334,245 @@ if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i); """ -def JS2C(source, target, env): - ids = [] - debugger_ids = [] - modules = [] - # Locate the macros file name. - consts = [] - macros = [] - for s in source: - if 'macros.py' == (os.path.split(str(s))[1]): - (consts, macros) = ReadMacros(ReadLines(str(s))) - else: - modules.append(s) - minifier = jsmin.JavaScriptMinifier() +def BuildFilterChain(macro_filename): + """Build the chain of filter functions to be applied to the sources. - module_offset = 0 - all_sources = [] - for module in modules: - filename = str(module) - debugger = filename.endswith('-debugger.js') - lines = ReadFile(filename) - lines = ExpandConstants(lines, consts) - lines = ExpandMacros(lines, macros) - lines = RemoveCommentsAndTrailingWhitespace(lines) - lines = ExpandInlineMacros(lines, filename) - Validate(lines, filename) - lines = minifier.JSMinify(lines) - id = (os.path.split(filename)[1])[:-3] - if debugger: id = id[:-9] - raw_length = len(lines) - if debugger: - debugger_ids.append((id, raw_length, module_offset)) - else: - ids.append((id, raw_length, module_offset)) - all_sources.append(lines) - module_offset += raw_length - total_length = raw_total_length = module_offset - - if env['COMPRESSION'] == 'off': - raw_sources_declaration = RAW_SOURCES_DECLARATION - sources_data = ToCAsciiArray("".join(all_sources)) + Args: + macro_filename: Name of the macro file, if any. + + Returns: + A function (string -> string) that reads a source file and processes it. + """ + filter_chain = [ReadFile] + + if macro_filename: + (consts, macros) = ReadMacros(ReadFile(macro_filename)) + filter_chain.append(lambda l: ExpandConstants(l, consts)) + filter_chain.append(lambda l: ExpandMacros(l, macros)) + + filter_chain.extend([ + RemoveCommentsAndTrailingWhitespace, + ExpandInlineMacros, + ExpandInlineConstants, + Validate, + jsmin.JavaScriptMinifier().JSMinify + ]) + + def chain(f1, f2): + return lambda x: f2(f1(x)) + + return reduce(chain, filter_chain) + + +class Sources: + def __init__(self): + self.names = [] + self.modules = [] + self.is_debugger_id = [] + + +def IsDebuggerFile(filename): + return filename.endswith("-debugger.js") + +def IsMacroFile(filename): + return filename.endswith("macros.py") + + +def PrepareSources(source_files): + """Read, prepare and assemble the list of source files. + + Args: + sources: List of Javascript-ish source files. A file named macros.py + will be treated as a list of macros. + + Returns: + An instance of Sources. + """ + macro_file = None + macro_files = filter(IsMacroFile, source_files) + assert len(macro_files) in [0, 1] + if macro_files: + source_files.remove(macro_files[0]) + macro_file = macro_files[0] + + filters = BuildFilterChain(macro_file) + + # Sort 'debugger' sources first. + source_files = sorted(source_files, + lambda l,r: IsDebuggerFile(r) - IsDebuggerFile(l)) + + result = Sources() + for source in source_files: + try: + lines = filters(source) + except Error as e: + raise Error("In file %s:\n%s" % (source, str(e))) + + result.modules.append(lines); + + is_debugger = IsDebuggerFile(source) + result.is_debugger_id.append(is_debugger); + + name = os.path.basename(source)[:-3] + result.names.append(name if not is_debugger else name[:-9]); + return result + + +def BuildMetadata(sources, source_bytes, native_type): + """Build the meta data required to generate a libaries file. + + Args: + sources: A Sources instance with the prepared sources. + source_bytes: A list of source bytes. + (The concatenation of all sources; might be compressed.) + native_type: The parameter for the NativesCollection template. + + Returns: + A dictionary for use with HEADER_TEMPLATE. + """ + total_length = len(source_bytes) + raw_sources = "".join(sources.modules) + + # The sources are expected to be ASCII-only. + assert not filter(lambda value: ord(value) >= 128, raw_sources) + + # Loop over modules and build up indices into the source blob: + get_index_cases = [] + get_script_name_cases = [] + get_raw_script_source_cases = [] + offset = 0 + for i in xrange(len(sources.modules)): + native_name = "native %s.js" % sources.names[i] + d = { + "i": i, + "id": sources.names[i], + "name": native_name, + "length": len(native_name), + "offset": offset, + "raw_length": len(sources.modules[i]), + } + get_index_cases.append(GET_INDEX_CASE % d) + get_script_name_cases.append(GET_SCRIPT_NAME_CASE % d) + get_raw_script_source_cases.append(GET_RAW_SCRIPT_SOURCE_CASE % d) + offset += len(sources.modules[i]) + assert offset == len(raw_sources) + + # If we have the raw sources we can declare them accordingly. + have_raw_sources = source_bytes == raw_sources + raw_sources_declaration = (RAW_SOURCES_DECLARATION + if have_raw_sources else RAW_SOURCES_COMPRESSION_DECLARATION) + + metadata = { + "builtin_count": len(sources.modules), + "debugger_count": sum(sources.is_debugger_id), + "sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes), + "raw_sources_declaration": raw_sources_declaration, + "raw_total_length": sum(map(len, sources.modules)), + "total_length": total_length, + "get_index_cases": "".join(get_index_cases), + "get_raw_script_source_cases": "".join(get_raw_script_source_cases), + "get_script_name_cases": "".join(get_script_name_cases), + "type": native_type, + } + return metadata + + +def CompressMaybe(sources, compression_type): + """Take the prepared sources and generate a sequence of bytes. + + Args: + sources: A Sources instance with the prepared sourced. + compression_type: string, describing the desired compression. + + Returns: + A sequence of bytes. + """ + sources_bytes = "".join(sources.modules) + if compression_type == "off": + return sources_bytes + elif compression_type == "bz2": + return bz2.compress(sources_bytes) else: - raw_sources_declaration = RAW_SOURCES_COMPRESSION_DECLARATION - if env['COMPRESSION'] == 'bz2': - all_sources = bz2.compress("".join(all_sources)) - total_length = len(all_sources) - sources_data = ToCArray(all_sources) - - # Build debugger support functions - get_index_cases = [ ] - get_raw_script_source_cases = [ ] - get_script_name_cases = [ ] - - i = 0 - for (id, raw_length, module_offset) in debugger_ids + ids: - native_name = "native %s.js" % id - get_index_cases.append(GET_INDEX_CASE % { 'id': id, 'i': i }) - get_raw_script_source_cases.append(GET_RAW_SCRIPT_SOURCE_CASE % { - 'offset': module_offset, - 'raw_length': raw_length, - 'i': i - }) - get_script_name_cases.append(GET_SCRIPT_NAME_CASE % { - 'name': native_name, - 'length': len(native_name), - 'i': i - }) - i = i + 1 - - # Emit result - output = open(str(target[0]), "w") - output.write(HEADER_TEMPLATE % { - 'builtin_count': len(ids) + len(debugger_ids), - 'debugger_count': len(debugger_ids), - 'sources_data': sources_data, - 'raw_sources_declaration': raw_sources_declaration, - 'raw_total_length': raw_total_length, - 'total_length': total_length, - 'get_index_cases': "".join(get_index_cases), - 'get_raw_script_source_cases': "".join(get_raw_script_source_cases), - 'get_script_name_cases': "".join(get_script_name_cases), - 'type': env['TYPE'] - }) + raise Error("Unknown compression type %s." % compression_type) + + +def PutInt(blob_file, value): + assert(value >= 0 and value < (1 << 20)) + size = 1 if (value < 1 << 6) else (2 if (value < 1 << 14) else 3) + value_with_length = (value << 2) | size + + byte_sequence = bytearray() + for i in xrange(size): + byte_sequence.append(value_with_length & 255) + value_with_length >>= 8; + blob_file.write(byte_sequence) + + +def PutStr(blob_file, value): + PutInt(blob_file, len(value)); + blob_file.write(value); + + +def WriteStartupBlob(sources, startup_blob): + """Write a startup blob, as expected by V8 Initialize ... + TODO(vogelheim): Add proper method name. + + Args: + sources: A Sources instance with the prepared sources. + startup_blob_file: Name of file to write the blob to. + """ + output = open(startup_blob, "wb") + + debug_sources = sum(sources.is_debugger_id); + PutInt(output, debug_sources) + for i in xrange(debug_sources): + PutStr(output, sources.names[i]); + PutStr(output, sources.modules[i]); + + PutInt(output, len(sources.names) - debug_sources) + for i in xrange(debug_sources, len(sources.names)): + PutStr(output, sources.names[i]); + PutStr(output, sources.modules[i]); + + output.close() + + +def JS2C(source, target, native_type, compression_type, raw_file, startup_blob): + sources = PrepareSources(source) + sources_bytes = CompressMaybe(sources, compression_type) + metadata = BuildMetadata(sources, sources_bytes, native_type) + + # Optionally emit raw file. + if raw_file: + output = open(raw_file, "w") + output.write(sources_bytes) + output.close() + + if startup_blob: + WriteStartupBlob(sources, startup_blob); + + # Emit resulting source file. + output = open(target, "w") + output.write(HEADER_TEMPLATE % metadata) output.close() + def main(): - natives = sys.argv[1] - type = sys.argv[2] - compression = sys.argv[3] - source_files = sys.argv[4:] - JS2C(source_files, [natives], { 'TYPE': type, 'COMPRESSION': compression }) + parser = optparse.OptionParser() + parser.add_option("--raw", action="store", + help="file to write the processed sources array to.") + parser.add_option("--startup_blob", action="store", + help="file to write the startup blob to.") + parser.set_usage("""js2c out.cc type compression sources.js ... + out.cc: C code to be generated. + type: type parameter for NativesCollection template. + compression: type of compression used. [off|bz2] + sources.js: JS internal sources or macros.py.""") + (options, args) = parser.parse_args() + + JS2C(args[3:], args[0], args[1], args[2], options.raw, options.startup_blob) + if __name__ == "__main__": main() diff -Nru nodejs-0.11.13/deps/v8/tools/lexer-shell.cc nodejs-0.11.15/deps/v8/tools/lexer-shell.cc --- nodejs-0.11.13/deps/v8/tools/lexer-shell.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/lexer-shell.cc 2015-01-20 21:22:17.000000000 +0000 @@ -31,17 +31,18 @@ #include <stdlib.h> #include <string> #include <vector> -#include "v8.h" +#include "src/v8.h" -#include "api.h" -#include "messages.h" -#include "platform.h" -#include "runtime.h" -#include "scanner-character-streams.h" -#include "scopeinfo.h" -#include "shell-utils.h" -#include "string-stream.h" -#include "scanner.h" +#include "include/libplatform/libplatform.h" +#include "src/api.h" +#include "src/base/platform/platform.h" +#include "src/messages.h" +#include "src/runtime.h" +#include "src/scanner-character-streams.h" +#include "src/scopeinfo.h" +#include "tools/shell-utils.h" +#include "src/string-stream.h" +#include "src/scanner.h" using namespace v8::internal; @@ -52,7 +53,7 @@ BaselineScanner(const char* fname, Isolate* isolate, Encoding encoding, - ElapsedTimer* timer, + v8::base::ElapsedTimer* timer, int repeat) : stream_(NULL) { int length = 0; @@ -67,16 +68,14 @@ Handle<String> result = isolate->factory()->NewStringFromTwoByte( Vector<const uint16_t>( reinterpret_cast<const uint16_t*>(source_), - length / 2)); - CHECK_NOT_EMPTY_HANDLE(isolate, result); + length / 2)).ToHandleChecked(); stream_ = new GenericStringUtf16CharacterStream(result, 0, result->length()); break; } case LATIN1: { Handle<String> result = isolate->factory()->NewStringFromOneByte( - Vector<const uint8_t>(source_, length)); - CHECK_NOT_EMPTY_HANDLE(isolate, result); + Vector<const uint8_t>(source_, length)).ToHandleChecked(); stream_ = new GenericStringUtf16CharacterStream(result, 0, result->length()); break; @@ -129,13 +128,11 @@ }; -TimeDelta RunBaselineScanner(const char* fname, - Isolate* isolate, - Encoding encoding, - bool dump_tokens, - std::vector<TokenWithLocation>* tokens, - int repeat) { - ElapsedTimer timer; +v8::base::TimeDelta RunBaselineScanner(const char* fname, Isolate* isolate, + Encoding encoding, bool dump_tokens, + std::vector<TokenWithLocation>* tokens, + int repeat) { + v8::base::ElapsedTimer timer; BaselineScanner scanner(fname, isolate, encoding, &timer, repeat); Token::Value token; int beg, end; @@ -160,7 +157,7 @@ } -TimeDelta ProcessFile( +v8::base::TimeDelta ProcessFile( const char* fname, Encoding encoding, Isolate* isolate, @@ -171,7 +168,7 @@ } HandleScope handle_scope(isolate); std::vector<TokenWithLocation> baseline_tokens; - TimeDelta baseline_time; + v8::base::TimeDelta baseline_time; baseline_time = RunBaselineScanner( fname, isolate, encoding, print_tokens, &baseline_tokens, repeat); @@ -184,6 +181,8 @@ int main(int argc, char* argv[]) { v8::V8::InitializeICU(); + v8::Platform* platform = v8::platform::CreateDefaultPlatform(); + v8::V8::InitializePlatform(platform); v8::V8::SetFlagsFromCommandLine(&argc, argv, true); Encoding encoding = LATIN1; bool print_tokens = false; @@ -208,19 +207,20 @@ fnames.push_back(std::string(argv[i])); } } - v8::Isolate* isolate = v8::Isolate::GetCurrent(); + v8::Isolate* isolate = v8::Isolate::New(); { + v8::Isolate::Scope isolate_scope(isolate); v8::HandleScope handle_scope(isolate); v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate); v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global); - ASSERT(!context.IsEmpty()); + DCHECK(!context.IsEmpty()); { v8::Context::Scope scope(context); - Isolate* isolate = Isolate::Current(); double baseline_total = 0; for (size_t i = 0; i < fnames.size(); i++) { - TimeDelta time; - time = ProcessFile(fnames[i].c_str(), encoding, isolate, print_tokens, + v8::base::TimeDelta time; + time = ProcessFile(fnames[i].c_str(), encoding, + reinterpret_cast<Isolate*>(isolate), print_tokens, repeat); baseline_total += time.InMillisecondsF(); } @@ -229,5 +229,7 @@ } } v8::V8::Dispose(); + v8::V8::ShutdownPlatform(); + delete platform; return 0; } diff -Nru nodejs-0.11.13/deps/v8/tools/lexer-shell.gyp nodejs-0.11.15/deps/v8/tools/lexer-shell.gyp --- nodejs-0.11.13/deps/v8/tools/lexer-shell.gyp 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/lexer-shell.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -37,6 +37,7 @@ 'type': 'executable', 'dependencies': [ '../tools/gyp/v8.gyp:v8', + '../tools/gyp/v8.gyp:v8_libplatform', ], 'conditions': [ ['v8_enable_i18n_support==1', { @@ -47,7 +48,7 @@ }], ], 'include_dirs+': [ - '../src', + '..', ], 'sources': [ 'lexer-shell.cc', @@ -59,6 +60,7 @@ 'type': 'executable', 'dependencies': [ '../tools/gyp/v8.gyp:v8', + '../tools/gyp/v8.gyp:v8_libplatform', ], 'conditions': [ ['v8_enable_i18n_support==1', { @@ -69,7 +71,7 @@ }], ], 'include_dirs+': [ - '../src', + '..', ], 'sources': [ 'parser-shell.cc', diff -Nru nodejs-0.11.13/deps/v8/tools/ll_prof.py nodejs-0.11.15/deps/v8/tools/ll_prof.py --- nodejs-0.11.13/deps/v8/tools/ll_prof.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/ll_prof.py 2015-01-20 21:22:17.000000000 +0000 @@ -351,7 +351,8 @@ "ia32": ctypes.c_uint32, "arm": ctypes.c_uint32, "mips": ctypes.c_uint32, - "x64": ctypes.c_uint64 + "x64": ctypes.c_uint64, + "arm64": ctypes.c_uint64 } _CODE_CREATE_TAG = "C" diff -Nru nodejs-0.11.13/deps/v8/tools/merge-to-branch.sh nodejs-0.11.15/deps/v8/tools/merge-to-branch.sh --- nodejs-0.11.13/deps/v8/tools/merge-to-branch.sh 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/merge-to-branch.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,342 +0,0 @@ -#!/bin/bash -# Copyright 2012 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -########## Global variable definitions - -BRANCHNAME=prepare-merge -PERSISTFILE_BASENAME=/tmp/v8-merge-to-branch-tempfile -ALREADY_MERGING_SENTINEL_FILE="$PERSISTFILE_BASENAME-already-merging" -COMMIT_HASHES_FILE="$PERSISTFILE_BASENAME-PATCH_COMMIT_HASHES" -TEMPORARY_PATCH_FILE="$PERSISTFILE_BASENAME-temporary-patch" - -########## Function definitions - -source $(dirname $BASH_SOURCE)/common-includes.sh - -usage() { -cat << EOF -usage: $0 [OPTIONS]... [BRANCH] [REVISION]... - -Performs the necessary steps to merge revisions from bleeding_edge -to other branches, including trunk. - -OPTIONS: - -h Show this message - -s Specify the step where to start work. Default: 0. - -p Specify a patch file to apply as part of the merge - -m Specify a commit message for the patch - -r Reverse specified patches -EOF -} - -persist_patch_commit_hashes() { - echo "PATCH_COMMIT_HASHES=( ${PATCH_COMMIT_HASHES[@]} )" > $COMMIT_HASHES_FILE -} - -restore_patch_commit_hashes() { - source $COMMIT_HASHES_FILE -} - -restore_patch_commit_hashes_if_unset() { - [[ "${#PATCH_COMMIT_HASHES[@]}" == 0 ]] && restore_patch_commit_hashes - [[ "${#PATCH_COMMIT_HASHES[@]}" == 0 ]] && [[ -z "$EXTRA_PATCH" ]] && \ - die "Variable PATCH_COMMIT_HASHES could not be restored." -} - -########## Option parsing -REVERT_FROM_BLEEDING_EDGE=0 - -while getopts ":hs:fp:rm:R" OPTION ; do - case $OPTION in - h) usage - exit 0 - ;; - p) EXTRA_PATCH=$OPTARG - ;; - f) rm -f "$ALREADY_MERGING_SENTINEL_FILE" - ;; - r) REVERSE_PATCH="--reverse" - ;; - m) NEW_COMMIT_MSG=$OPTARG - ;; - s) START_STEP=$OPTARG - ;; - R) REVERSE_PATCH="--reverse" - REVERT_FROM_BLEEDING_EDGE=1 - ;; - ?) echo "Illegal option: -$OPTARG" - usage - exit 1 - ;; - esac -done -let OPTION_COUNT=$OPTIND-1 -shift $OPTION_COUNT - -########## Regular workflow - -# If there is a merge in progress, abort. -[[ -e "$ALREADY_MERGING_SENTINEL_FILE" ]] && [[ $START_STEP -eq 0 ]] \ - && die "A merge is already in progress" -touch "$ALREADY_MERGING_SENTINEL_FILE" - -initial_environment_checks - -if [ $START_STEP -le $CURRENT_STEP ] ; then - let MIN_EXPECTED_ARGS=2-$REVERT_FROM_BLEEDING_EDGE - if [ ${#@} -lt $MIN_EXPECTED_ARGS ] ; then - if [ -z "$EXTRA_PATCH" ] ; then - die "Either a patch file or revision numbers must be specified" - fi - if [ -z "$NEW_COMMIT_MSG" ] ; then - die "You must specify a merge comment if no patches are specified" - fi - fi - echo ">>> Step $CURRENT_STEP: Preparation" - if [ $REVERT_FROM_BLEEDING_EDGE -eq 1 ] ; then - MERGE_TO_BRANCH="bleeding_edge" - else - MERGE_TO_BRANCH=$1 - [[ -n "$MERGE_TO_BRANCH" ]] || die "Please specify a branch to merge to" - shift - fi - persist "MERGE_TO_BRANCH" - common_prepare -fi - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] ; then - echo ">>> Step $CURRENT_STEP: Create a fresh branch for the patch." - restore_if_unset "MERGE_TO_BRANCH" - git checkout -b $BRANCHNAME svn/$MERGE_TO_BRANCH \ - || die "Creating branch $BRANCHNAME failed." -fi - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] ; then - echo ">>> Step $CURRENT_STEP: Search for corresponding architecture ports." - for REVISION in "$@" ; do - # Add the revision to the array if it isn't already added. - if [[ ! "${FULL_REVISION_LIST[@]}" =~ (^| )$REVISION($| ) ]] ; then - FULL_REVISION_LIST=("${FULL_REVISION_LIST[@]}" "$REVISION") - fi - # Search for commits which matches the "Port rXXX" pattern. - GIT_HASHES=$(git log svn/bleeding_edge --reverse \ - --format=%H --grep="Port r$REVISION") - if [ -n "$GIT_HASHES" ]; then - while read -r NEXT_GIT_HASH; do - NEXT_SVN_REVISION=$(git svn find-rev $NEXT_GIT_HASH svn/bleeding_edge) - [[ -n "$NEXT_SVN_REVISION" ]] \ - || die "Cannot determine svn revision for $NEXT_GIT_HASH" - FULL_REVISION_LIST=("${FULL_REVISION_LIST[@]}" "$NEXT_SVN_REVISION") - REVISION_TITLE=$(git log -1 --format=%s $NEXT_GIT_HASH) - # Is this revision included in the original revision list? - if [[ $@ =~ (^| )$NEXT_SVN_REVISION($| ) ]] ; then - echo "Found port of r$REVISION -> \ -r$NEXT_SVN_REVISION (already included): $REVISION_TITLE" - else - echo "Found port of r$REVISION -> \ -r$NEXT_SVN_REVISION: $REVISION_TITLE" - PORT_REVISION_LIST=("${PORT_REVISION_LIST[@]}" "$NEXT_SVN_REVISION") - fi - done <<< "$GIT_HASHES" - fi - done - # Next step expects a list, not an array. - FULL_REVISION_LIST="${FULL_REVISION_LIST[@]}" - # Do we find any port? - if [ ${#PORT_REVISION_LIST[@]} -ne 0 ] ; then - confirm "Automatically add corresponding ports (${PORT_REVISION_LIST[*]})?" - #: 'n': Restore the original revision list. - if [ $? -ne 0 ] ; then - FULL_REVISION_LIST="$@" - fi - fi - persist "FULL_REVISION_LIST" -fi - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] ; then - echo ">>> Step $CURRENT_STEP: Find the git \ -revisions associated with the patches." - restore_if_unset "FULL_REVISION_LIST" - current=0 - for REVISION in $FULL_REVISION_LIST ; do - NEXT_HASH=$(git svn find-rev "r$REVISION" svn/bleeding_edge) - [[ -n "$NEXT_HASH" ]] \ - || die "Cannot determine git hash for r$REVISION" - PATCH_COMMIT_HASHES[$current]="$NEXT_HASH" - [[ -n "$REVISION_LIST" ]] && REVISION_LIST="$REVISION_LIST," - REVISION_LIST="$REVISION_LIST r$REVISION" - let current+=1 - done - if [ -n "$REVISION_LIST" ] ; then - if [ -n "$REVERSE_PATCH" ] ; then - if [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then - NEW_COMMIT_MSG="Rollback of$REVISION_LIST in $MERGE_TO_BRANCH branch." - else - NEW_COMMIT_MSG="Revert$REVISION_LIST." - fi - else - NEW_COMMIT_MSG="Merged$REVISION_LIST into $MERGE_TO_BRANCH branch." - fi; - fi; - - echo "$NEW_COMMIT_MSG" > $COMMITMSG_FILE - echo "" >> $COMMITMSG_FILE - for HASH in ${PATCH_COMMIT_HASHES[@]} ; do - PATCH_MERGE_DESCRIPTION=$(git log -1 --format=%s $HASH) - echo "$PATCH_MERGE_DESCRIPTION" >> $COMMITMSG_FILE - echo "" >> $COMMITMSG_FILE - done - for HASH in ${PATCH_COMMIT_HASHES[@]} ; do - BUG=$(git log -1 $HASH | grep "BUG=" | awk -F '=' '{print $NF}') - if [ -n "$BUG" ] ; then - [[ -n "$BUG_AGGREGATE" ]] && BUG_AGGREGATE="$BUG_AGGREGATE," - BUG_AGGREGATE="$BUG_AGGREGATE$BUG" - fi - done - if [ -n "$BUG_AGGREGATE" ] ; then - echo "BUG=$BUG_AGGREGATE" >> $COMMITMSG_FILE - echo "LOG=N" >> $COMMITMSG_FILE - fi - persist "NEW_COMMIT_MSG" - persist "REVISION_LIST" - persist_patch_commit_hashes -fi - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] ; then - echo ">>> Step $CURRENT_STEP: Apply patches for selected revisions." - restore_if_unset "MERGE_TO_BRANCH" - restore_patch_commit_hashes_if_unset "PATCH_COMMIT_HASHES" - for HASH in ${PATCH_COMMIT_HASHES[@]} ; do - echo "Applying patch for $HASH to $MERGE_TO_BRANCH..." - git log -1 -p $HASH > "$TEMPORARY_PATCH_FILE" - apply_patch "$TEMPORARY_PATCH_FILE" - done - if [ -n "$EXTRA_PATCH" ] ; then - apply_patch "$EXTRA_PATCH" - fi -fi - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then - echo ">>> Step $CURRENT_STEP: Prepare $VERSION_FILE." - # These version numbers are used again for creating the tag - read_and_persist_version -fi - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then - echo ">>> Step $CURRENT_STEP: Increment version number." - restore_if_unset "PATCH" - NEWPATCH=$(($PATCH + 1)) - confirm "Automatically increment PATCH_LEVEL? (Saying 'n' will fire up \ -your EDITOR on $VERSION_FILE so you can make arbitrary changes. When \ -you're done, save the file and exit your EDITOR.)" - if [ $? -eq 0 ] ; then - echo $NEWPATCH $VERSION_FILE - sed -e "/#define PATCH_LEVEL/s/[0-9]*$/$NEWPATCH/" \ - -i.bak "$VERSION_FILE" || die "Could not increment patch level" - else - $EDITOR "$VERSION_FILE" - fi - read_and_persist_version "NEW" -fi - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] ; then - echo ">>> Step $CURRENT_STEP: Commit to local branch." - git commit -a -F "$COMMITMSG_FILE" \ - || die "'git commit -a' failed." -fi - -upload_step - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] ; then - echo ">>> Step $CURRENT_STEP: Commit to the repository." - restore_if_unset "MERGE_TO_BRANCH" - git checkout $BRANCHNAME \ - || die "cannot ensure that the current branch is $BRANCHNAME" - wait_for_lgtm - PRESUBMIT_TREE_CHECK="skip" git cl presubmit \ - || die "presubmit failed" - PRESUBMIT_TREE_CHECK="skip" git cl dcommit --bypass-hooks \ - || die "failed to commit to $MERGE_TO_BRANCH" -fi - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then - echo ">>> Step $CURRENT_STEP: Determine svn commit revision" - restore_if_unset "NEW_COMMIT_MSG" - restore_if_unset "MERGE_TO_BRANCH" - git svn fetch || die "'git svn fetch' failed." - COMMIT_HASH=$(git log -1 --format=%H --grep="$NEW_COMMIT_MSG" \ - svn/$MERGE_TO_BRANCH) - [[ -z "$COMMIT_HASH" ]] && die "Unable to map git commit to svn revision" - SVN_REVISION=$(git svn find-rev $COMMIT_HASH) - echo "subversion revision number is r$SVN_REVISION" - persist "SVN_REVISION" -fi - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then - echo ">>> Step $CURRENT_STEP: Create the tag." - restore_if_unset "SVN_REVISION" - restore_version_if_unset "NEW" - echo "Creating tag svn/tags/$NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH" - if [ "$MERGE_TO_BRANCH" == "trunk" ] ; then - TO_URL="$MERGE_TO_BRANCH" - else - TO_URL="branches/$MERGE_TO_BRANCH" - fi - svn copy -r $SVN_REVISION \ - https://v8.googlecode.com/svn/$TO_URL \ - https://v8.googlecode.com/svn/tags/$NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH \ - -m "Tagging version $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH" - persist "TO_URL" -fi - -let CURRENT_STEP+=1 -if [ $START_STEP -le $CURRENT_STEP ] ; then - echo ">>> Step $CURRENT_STEP: Cleanup." - restore_if_unset "SVN_REVISION" - restore_if_unset "TO_URL" - restore_if_unset "REVISION_LIST" - restore_version_if_unset "NEW" - common_cleanup - if [ $REVERT_FROM_BLEEDING_EDGE==0 ] ; then - echo "*** SUMMARY ***" - echo "version: $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH" - echo "branch: $TO_URL" - echo "svn revision: $SVN_REVISION" - [[ -n "$REVISION_LIST" ]] && echo "patches:$REVISION_LIST" - fi -fi diff -Nru nodejs-0.11.13/deps/v8/tools/oom_dump/oom_dump.cc nodejs-0.11.15/deps/v8/tools/oom_dump/oom_dump.cc --- nodejs-0.11.13/deps/v8/tools/oom_dump/oom_dump.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/oom_dump/oom_dump.cc 2015-01-20 21:22:17.000000000 +0000 @@ -32,8 +32,6 @@ #include <google_breakpad/processor/minidump.h> -#define ENABLE_DEBUGGER_SUPPORT - #include <v8.h> namespace { diff -Nru nodejs-0.11.13/deps/v8/tools/parser-shell.cc nodejs-0.11.15/deps/v8/tools/parser-shell.cc --- nodejs-0.11.13/deps/v8/tools/parser-shell.cc 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/parser-shell.cc 2015-01-20 21:22:17.000000000 +0000 @@ -31,28 +31,35 @@ #include <stdlib.h> #include <string> #include <vector> -#include "v8.h" +#include "src/v8.h" -#include "api.h" -#include "compiler.h" -#include "scanner-character-streams.h" -#include "shell-utils.h" -#include "parser.h" -#include "preparse-data-format.h" -#include "preparse-data.h" -#include "preparser.h" +#include "include/libplatform/libplatform.h" +#include "src/api.h" +#include "src/compiler.h" +#include "src/scanner-character-streams.h" +#include "tools/shell-utils.h" +#include "src/parser.h" +#include "src/preparse-data-format.h" +#include "src/preparse-data.h" +#include "src/preparser.h" using namespace v8::internal; -enum TestMode { - PreParseAndParse, - PreParse, - Parse +class StringResource8 : public v8::String::ExternalAsciiStringResource { + public: + StringResource8(const char* data, int length) + : data_(data), length_(length) { } + virtual size_t length() const { return length_; } + virtual const char* data() const { return data_; } + + private: + const char* data_; + int length_; }; -std::pair<TimeDelta, TimeDelta> RunBaselineParser( +std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser( const char* fname, Encoding encoding, int repeat, v8::Isolate* isolate, - v8::Handle<v8::Context> context, TestMode test_mode) { + v8::Handle<v8::Context> context) { int length = 0; const byte* source = ReadFileAndRepeat(fname, &length, repeat); v8::Handle<v8::String> source_handle; @@ -69,54 +76,58 @@ break; } case LATIN1: { - source_handle = v8::String::NewFromOneByte(isolate, source); + StringResource8* string_resource = + new StringResource8(reinterpret_cast<const char*>(source), length); + source_handle = v8::String::NewExternal(isolate, string_resource); break; } } - v8::ScriptData* cached_data = NULL; - TimeDelta preparse_time, parse_time; - if (test_mode == PreParseAndParse || test_mode == PreParse) { - ElapsedTimer timer; + v8::base::TimeDelta parse_time1, parse_time2; + Handle<Script> script = Isolate::Current()->factory()->NewScript( + v8::Utils::OpenHandle(*source_handle)); + i::ScriptData* cached_data_impl = NULL; + // First round of parsing (produce data to cache). + { + CompilationInfoWithZone info(script); + info.MarkAsGlobal(); + info.SetCachedData(&cached_data_impl, + v8::ScriptCompiler::kProduceParserCache); + v8::base::ElapsedTimer timer; timer.Start(); - cached_data = v8::ScriptData::PreCompile(source_handle); - preparse_time = timer.Elapsed(); - if (cached_data == NULL || cached_data->HasError()) { - fprintf(stderr, "Preparsing failed\n"); - return std::make_pair(TimeDelta(), TimeDelta()); + // Allow lazy parsing; otherwise we won't produce cached data. + bool success = Parser::Parse(&info, true); + parse_time1 = timer.Elapsed(); + if (!success) { + fprintf(stderr, "Parsing failed\n"); + return std::make_pair(v8::base::TimeDelta(), v8::base::TimeDelta()); } } - if (test_mode == PreParseAndParse || test_mode == Parse) { - Handle<String> str = v8::Utils::OpenHandle(*source_handle); - i::Isolate* internal_isolate = str->GetIsolate(); - Handle<Script> script = internal_isolate->factory()->NewScript(str); + // Second round of parsing (consume cached data). + { CompilationInfoWithZone info(script); info.MarkAsGlobal(); - i::ScriptDataImpl* cached_data_impl = - static_cast<i::ScriptDataImpl*>(cached_data); - if (test_mode == PreParseAndParse) { - info.SetCachedData(&cached_data_impl, - i::CONSUME_CACHED_DATA); - } - info.SetContext(v8::Utils::OpenHandle(*context)); - ElapsedTimer timer; + info.SetCachedData(&cached_data_impl, + v8::ScriptCompiler::kConsumeParserCache); + v8::base::ElapsedTimer timer; timer.Start(); - // Allow lazy parsing; otherwise the preparse data won't help. + // Allow lazy parsing; otherwise cached data won't help. bool success = Parser::Parse(&info, true); - parse_time = timer.Elapsed(); + parse_time2 = timer.Elapsed(); if (!success) { fprintf(stderr, "Parsing failed\n"); - return std::make_pair(TimeDelta(), TimeDelta()); + return std::make_pair(v8::base::TimeDelta(), v8::base::TimeDelta()); } } - return std::make_pair(preparse_time, parse_time); + return std::make_pair(parse_time1, parse_time2); } int main(int argc, char* argv[]) { v8::V8::InitializeICU(); + v8::Platform* platform = v8::platform::CreateDefaultPlatform(); + v8::V8::InitializePlatform(platform); v8::V8::SetFlagsFromCommandLine(&argc, argv, true); Encoding encoding = LATIN1; - TestMode test_mode = PreParseAndParse; std::vector<std::string> fnames; std::string benchmark; int repeat = 1; @@ -127,12 +138,6 @@ encoding = UTF8; } else if (strcmp(argv[i], "--utf16") == 0) { encoding = UTF16; - } else if (strcmp(argv[i], "--preparse-and-parse") == 0) { - test_mode = PreParseAndParse; - } else if (strcmp(argv[i], "--preparse") == 0) { - test_mode = PreParse; - } else if (strcmp(argv[i], "--parse") == 0) { - test_mode = Parse; } else if (strncmp(argv[i], "--benchmark=", 12) == 0) { benchmark = std::string(argv[i]).substr(12); } else if (strncmp(argv[i], "--repeat=", 9) == 0) { @@ -142,30 +147,33 @@ fnames.push_back(std::string(argv[i])); } } - v8::Isolate* isolate = v8::Isolate::GetCurrent(); + v8::Isolate* isolate = v8::Isolate::New(); { + v8::Isolate::Scope isolate_scope(isolate); v8::HandleScope handle_scope(isolate); v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate); v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global); - ASSERT(!context.IsEmpty()); + DCHECK(!context.IsEmpty()); { v8::Context::Scope scope(context); - double preparse_total = 0; - double parse_total = 0; + double first_parse_total = 0; + double second_parse_total = 0; for (size_t i = 0; i < fnames.size(); i++) { - std::pair<TimeDelta, TimeDelta> time = RunBaselineParser( - fnames[i].c_str(), encoding, repeat, isolate, context, test_mode); - preparse_total += time.first.InMillisecondsF(); - parse_total += time.second.InMillisecondsF(); + std::pair<v8::base::TimeDelta, v8::base::TimeDelta> time = + RunBaselineParser(fnames[i].c_str(), encoding, repeat, isolate, + context); + first_parse_total += time.first.InMillisecondsF(); + second_parse_total += time.second.InMillisecondsF(); } if (benchmark.empty()) benchmark = "Baseline"; - printf("%s(PreParseRunTime): %.f ms\n", benchmark.c_str(), - preparse_total); - printf("%s(ParseRunTime): %.f ms\n", benchmark.c_str(), parse_total); - printf("%s(RunTime): %.f ms\n", benchmark.c_str(), - preparse_total + parse_total); + printf("%s(FirstParseRunTime): %.f ms\n", benchmark.c_str(), + first_parse_total); + printf("%s(SecondParseRunTime): %.f ms\n", benchmark.c_str(), + second_parse_total); } } v8::V8::Dispose(); + v8::V8::ShutdownPlatform(); + delete platform; return 0; } diff -Nru nodejs-0.11.13/deps/v8/tools/plot-timer-events nodejs-0.11.15/deps/v8/tools/plot-timer-events --- nodejs-0.11.13/deps/v8/tools/plot-timer-events 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/plot-timer-events 2015-01-20 21:22:17.000000000 +0000 @@ -10,29 +10,43 @@ done tools_path=`cd $(dirname "$0");pwd` -if [ ! "$D8_PATH" ]; then +if test ! "$D8_PATH"; then d8_public=`which d8` - if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi + if test -x "$d8_public"; then D8_PATH=$(dirname "$d8_public"); fi fi -[ -n "$D8_PATH" ] || D8_PATH=$tools_path/.. + +if test ! -n "$D8_PATH"; then + D8_PATH=$tools_path/.. +fi + d8_exec=$D8_PATH/d8 -if [ ! -x "$d8_exec" ]; then +if test ! -x "$d8_exec"; then D8_PATH=`pwd`/out/native d8_exec=$D8_PATH/d8 fi -if [ ! -x "$d8_exec" ]; then +if test ! -x "$d8_exec"; then d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'` fi -if [ ! -x "$d8_exec" ]; then +if test ! -x "$d8_exec"; then echo "d8 shell not found in $D8_PATH" echo "To build, execute 'make native' from the V8 directory" exit 1 fi -if [[ "$@" != *--distortion* ]]; then + +contains=0; +for arg in "$@"; do + `echo "$arg" | grep -q "^--distortion"` + if test $? -eq 0; then + contains=1 + break + fi +done + +if test "$contains" -eq 0; then # Try to find out how much the instrumentation overhead is. calibration_log=calibration.log calibration_script="for (var i = 0; i < 1000000; i++) print();" @@ -70,7 +84,7 @@ -- $@ $options 2>/dev/null > timer-events.plot success=$? -if [[ $success != 0 ]] ; then +if test $success -ne 0; then cat timer-events.plot else cat timer-events.plot | gnuplot > timer-events.png diff -Nru nodejs-0.11.13/deps/v8/tools/presubmit.py nodejs-0.11.15/deps/v8/tools/presubmit.py --- nodejs-0.11.13/deps/v8/tools/presubmit.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/presubmit.py 2015-01-20 21:22:17.000000000 +0000 @@ -54,6 +54,7 @@ build/deprecated build/endif_comment build/forward_decl +build/include_alpha build/include_order build/printf_format build/storage_class @@ -61,7 +62,6 @@ readability/boost readability/braces readability/casting -readability/check readability/constructors readability/fn_size readability/function @@ -80,7 +80,6 @@ runtime/nonconf runtime/printf runtime/printf_format -runtime/references runtime/rtti runtime/sizeof runtime/string @@ -101,6 +100,7 @@ whitespace/todo """.split() +# TODO(bmeurer): Fix and re-enable readability/check LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing') @@ -200,7 +200,8 @@ def IgnoreDir(self, name): return (name.startswith('.') or - name in ('data', 'kraken', 'octane', 'sunspider')) + name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken', + 'octane', 'sunspider')) def IgnoreFile(self, name): return name.startswith('.') @@ -235,7 +236,10 @@ or (name in CppLintProcessor.IGNORE_LINT)) def GetPathsToSearch(self): - return ['src', 'include', 'samples', join('test', 'cctest')] + return ['src', 'include', 'samples', + join('test', 'base-unittests'), + join('test', 'cctest'), + join('test', 'compiler-unittests')] def GetCpplintScript(self, prio_path): for path in [prio_path] + os.environ["PATH"].split(os.pathsep): @@ -305,7 +309,8 @@ if self.IgnoreDir(dir_part): break else: - if self.IsRelevant(file) and not self.IgnoreFile(file): + if (self.IsRelevant(file) and os.path.exists(file) + and not self.IgnoreFile(file)): result.append(join(path, file)) if output.wait() == 0: return result @@ -415,6 +420,19 @@ return success +def CheckGeneratedRuntimeTests(workspace): + code = subprocess.call( + [sys.executable, join(workspace, "tools", "generate-runtime-tests.py"), + "check"]) + return code == 0 + + +def CheckExternalReferenceRegistration(workspace): + code = subprocess.call( + [sys.executable, join(workspace, "tools", "external-reference-check.py")]) + return code == 0 + + def GetOptions(): result = optparse.OptionParser() result.add_option('--no-lint', help="Do not run cpplint", default=False, @@ -433,6 +451,8 @@ print "Running copyright header, trailing whitespaces and " \ "two empty lines between declarations check..." success = SourceProcessor().Run(workspace) and success + success = CheckGeneratedRuntimeTests(workspace) and success + success = CheckExternalReferenceRegistration(workspace) and success if success: return 0 else: diff -Nru nodejs-0.11.13/deps/v8/tools/profile_view.js nodejs-0.11.15/deps/v8/tools/profile_view.js --- nodejs-0.11.13/deps/v8/tools/profile_view.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/profile_view.js 2015-01-20 21:22:17.000000000 +0000 @@ -169,24 +169,6 @@ /** - * Returns a share of the function's total time in application's total time. - */ -ProfileView.Node.prototype.__defineGetter__( - 'totalPercent', - function() { return this.totalTime / - (this.head ? this.head.totalTime : this.totalTime) * 100.0; }); - - -/** - * Returns a share of the function's self time in application's total time. - */ -ProfileView.Node.prototype.__defineGetter__( - 'selfPercent', - function() { return this.selfTime / - (this.head ? this.head.totalTime : this.totalTime) * 100.0; }); - - -/** * Returns a share of the function's total time in its parent's total time. */ ProfileView.Node.prototype.__defineGetter__( diff -Nru nodejs-0.11.13/deps/v8/tools/profviz/composer.js nodejs-0.11.15/deps/v8/tools/profviz/composer.js --- nodejs-0.11.13/deps/v8/tools/profviz/composer.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/profviz/composer.js 2015-01-20 21:22:17.000000000 +0000 @@ -43,6 +43,7 @@ var kY1Offset = 11; // Offset for stack frame vs. event lines. var kDeoptRow = 7; // Row displaying deopts. + var kGetTimeHeight = 0.5; // Height of marker displaying timed part. var kMaxDeoptLength = 4; // Draw size of the largest deopt. var kPauseLabelPadding = 5; // Padding for pause time labels. var kNumPauseLabels = 7; // Number of biggest pauses to label. @@ -105,6 +106,8 @@ new TimerEvent("recompile async", "#CC4499", false, 1), 'V8.CompileEval': new TimerEvent("compile eval", "#CC4400", true, 0), + 'V8.IcMiss': + new TimerEvent("ic miss", "#CC9900", false, 0), 'V8.Parse': new TimerEvent("parse", "#00CC00", true, 0), 'V8.PreParse': @@ -134,6 +137,7 @@ var code_map = new CodeMap(); var execution_pauses = []; var deopts = []; + var gettime = []; var event_stack = []; var last_time_stamp = []; for (var i = 0; i < kNumThreads; i++) { @@ -272,6 +276,10 @@ deopts.push(new Deopt(time, size)); } + var processCurrentTimeEvent = function(time) { + gettime.push(time); + } + var processSharedLibrary = function(name, start, end) { var code_entry = new CodeMap.CodeEntry(end - start, name); code_entry.kind = -3; // External code kind. @@ -314,6 +322,8 @@ processor: processCodeDeleteEvent }, 'code-deopt': { parsers: [parseTimeStamp, parseInt], processor: processCodeDeoptEvent }, + 'current-time': { parsers: [parseTimeStamp], + processor: processCurrentTimeEvent }, 'tick': { parsers: [parseInt, parseTimeStamp, null, null, parseInt, 'var-args'], processor: processTickEvent } @@ -389,12 +399,15 @@ output("set xtics out nomirror"); output("unset key"); - function DrawBarBase(color, start, end, top, bottom) { + function DrawBarBase(color, start, end, top, bottom, transparency) { obj_index++; command = "set object " + obj_index + " rect"; command += " from " + start + ", " + top; command += " to " + end + ", " + bottom; command += " fc rgb \"" + color + "\""; + if (transparency) { + command += " fs transparent solid " + transparency; + } output(command); } @@ -411,7 +424,6 @@ for (var name in TimerEvents) { var event = TimerEvents[name]; var ranges = RestrictRangesTo(event.ranges, range_start, range_end); - ranges = MergeRanges(ranges); var sum = ranges.map(function(range) { return range.duration(); }) .reduce(function(a, b) { return a + b; }, 0); @@ -429,6 +441,13 @@ deopt.size / max_deopt_size * kMaxDeoptLength); } + // Plot current time polls. + if (gettime.length > 1) { + var start = gettime[0]; + var end = gettime.pop(); + DrawBarBase("#0000BB", start, end, kGetTimeHeight, 0, 0.2); + } + // Name Y-axis. var ytics = []; for (name in TimerEvents) { @@ -502,7 +521,8 @@ execution_pauses.sort( function(a, b) { return b.duration() - a.duration(); }); - var max_pause_time = execution_pauses[0].duration(); + var max_pause_time = execution_pauses.length > 0 + ? execution_pauses[0].duration() : 0; padding = kPauseLabelPadding * (range_end - range_start) / kResX; var y_scale = kY1Offset / max_pause_time / 2; for (var i = 0; i < execution_pauses.length && i < kNumPauseLabels; i++) { diff -Nru nodejs-0.11.13/deps/v8/tools/profviz/stdio.js nodejs-0.11.15/deps/v8/tools/profviz/stdio.js --- nodejs-0.11.13/deps/v8/tools/profviz/stdio.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/profviz/stdio.js 2015-01-20 21:22:17.000000000 +0000 @@ -43,7 +43,7 @@ if (!isNaN(range_end)) range_end_override = range_end; var kResX = 1600; -var kResY = 600; +var kResY = 700; function log_error(text) { print(text); quit(1); diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/auto_push.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/auto_push.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/auto_push.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/auto_push.py 2015-01-20 21:22:17.000000000 +0000 @@ -39,7 +39,7 @@ SETTINGS_LOCATION = "SETTINGS_LOCATION" CONFIG = { - PERSISTFILE_BASENAME: "/tmp/v8-auto-roll-tempfile", + PERSISTFILE_BASENAME: "/tmp/v8-auto-push-tempfile", DOT_GIT_LOCATION: ".git", SETTINGS_LOCATION: "~/.auto-roll", } diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/auto_roll.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/auto_roll.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/auto_roll.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/auto_roll.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,139 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import argparse +import json +import os +import sys +import urllib + +from common_includes import * +import chromium_roll + +CLUSTERFUZZ_API_KEY_FILE = "CLUSTERFUZZ_API_KEY_FILE" + +CONFIG = { + PERSISTFILE_BASENAME: "/tmp/v8-auto-roll-tempfile", + CLUSTERFUZZ_API_KEY_FILE: ".cf_api_key", +} + +CR_DEPS_URL = 'http://src.chromium.org/svn/trunk/src/DEPS' + +class CheckActiveRoll(Step): + MESSAGE = "Check active roll." + + @staticmethod + def ContainsChromiumRoll(changes): + for change in changes: + if change["subject"].startswith("Update V8 to"): + return True + return False + + def RunStep(self): + params = { + "closed": 3, + "owner": self._options.author, + "limit": 30, + "format": "json", + } + params = urllib.urlencode(params) + search_url = "https://codereview.chromium.org/search" + result = self.ReadURL(search_url, params, wait_plan=[5, 20]) + if self.ContainsChromiumRoll(json.loads(result)["results"]): + print "Stop due to existing Chromium roll." + return True + + +class DetectLastPush(Step): + MESSAGE = "Detect commit ID of the last push to trunk." + + def RunStep(self): + push_hash = self.FindLastTrunkPush(include_patches=True) + self["last_push"] = self.GitSVNFindSVNRev(push_hash) + + +class DetectLastRoll(Step): + MESSAGE = "Detect commit ID of the last Chromium roll." + + def RunStep(self): + # Interpret the DEPS file to retrieve the v8 revision. + Var = lambda var: '%s' + exec(self.ReadURL(CR_DEPS_URL)) + last_roll = vars['v8_revision'] + if last_roll >= self["last_push"]: + print("There is no newer v8 revision than the one in Chromium (%s)." + % last_roll) + return True + + +class CheckClusterFuzz(Step): + MESSAGE = "Check ClusterFuzz api for new problems." + + def RunStep(self): + if not os.path.exists(self.Config(CLUSTERFUZZ_API_KEY_FILE)): + print "Skipping ClusterFuzz check. No api key file found." + return False + api_key = FileToText(self.Config(CLUSTERFUZZ_API_KEY_FILE)) + # Check for open, reproducible issues that have no associated bug. + result = self._side_effect_handler.ReadClusterFuzzAPI( + api_key, job_type="linux_asan_d8_dbg", reproducible="True", + open="True", bug_information="", + revision_greater_or_equal=str(self["last_push"])) + if result: + print "Stop due to pending ClusterFuzz issues." + return True + + +class RollChromium(Step): + MESSAGE = "Roll V8 into Chromium." + + def RunStep(self): + if self._options.roll: + args = [ + "--author", self._options.author, + "--reviewer", self._options.reviewer, + "--chromium", self._options.chromium, + "--force", + "--use-commit-queue", + ] + if self._options.sheriff: + args.extend([ + "--sheriff", "--googlers-mapping", self._options.googlers_mapping]) + R = chromium_roll.ChromiumRoll + self._side_effect_handler.Call( + R(chromium_roll.CONFIG, self._side_effect_handler).Run, + args) + + +class AutoRoll(ScriptsBase): + def _PrepareOptions(self, parser): + parser.add_argument("-c", "--chromium", required=True, + help=("The path to your Chromium src/ " + "directory to automate the V8 roll.")) + parser.add_argument("--roll", + help="Make Chromium roll. Dry run if unspecified.", + default=False, action="store_true") + + def _ProcessOptions(self, options): # pragma: no cover + if not options.reviewer: + print "A reviewer (-r) is required." + return False + if not options.author: + print "An author (-a) is required." + return False + return True + + def _Steps(self): + return [ + CheckActiveRoll, + DetectLastPush, + DetectLastRoll, + CheckClusterFuzz, + RollChromium, + ] + + +if __name__ == "__main__": # pragma: no cover + sys.exit(AutoRoll(CONFIG).Run()) diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/auto_tag.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/auto_tag.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/auto_tag.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/auto_tag.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,200 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import argparse +import sys + +from common_includes import * + +CONFIG = { + BRANCHNAME: "auto-tag-v8", + PERSISTFILE_BASENAME: "/tmp/v8-auto-tag-tempfile", + DOT_GIT_LOCATION: ".git", + VERSION_FILE: "src/version.cc", +} + + +class Preparation(Step): + MESSAGE = "Preparation." + + def RunStep(self): + self.CommonPrepare() + self.PrepareBranch() + self.GitCheckout("master") + self.GitSVNRebase() + + +class GetTags(Step): + MESSAGE = "Get all V8 tags." + + def RunStep(self): + self.GitCreateBranch(self._config[BRANCHNAME]) + + # Get remote tags. + tags = filter(lambda s: re.match(r"^svn/tags/[\d+\.]+$", s), + self.GitRemotes()) + + # Remove 'svn/tags/' prefix. + self["tags"] = map(lambda s: s[9:], tags) + + +class GetOldestUntaggedVersion(Step): + MESSAGE = "Check if there's a version on bleeding edge without a tag." + + def RunStep(self): + tags = set(self["tags"]) + self["candidate"] = None + self["candidate_version"] = None + self["next"] = None + self["next_version"] = None + + # Iterate backwards through all automatic version updates. + for git_hash in self.GitLog( + format="%H", grep="\\[Auto\\-roll\\] Bump up version to").splitlines(): + + # Get the version. + if not self.GitCheckoutFileSafe(self._config[VERSION_FILE], git_hash): + continue + + self.ReadAndPersistVersion() + version = self.ArrayToVersion("") + + # Strip off trailing patch level (tags don't include tag level 0). + if version.endswith(".0"): + version = version[:-2] + + # Clean up checked-out version file. + self.GitCheckoutFileSafe(self._config[VERSION_FILE], "HEAD") + + if version in tags: + if self["candidate"]: + # Revision "git_hash" is tagged already and "candidate" was the next + # newer revision without a tag. + break + else: + print("Stop as %s is the latest version and it has been tagged." % + version) + self.CommonCleanup() + return True + else: + # This is the second oldest version without a tag. + self["next"] = self["candidate"] + self["next_version"] = self["candidate_version"] + + # This is the oldest version without a tag. + self["candidate"] = git_hash + self["candidate_version"] = version + + if not self["candidate"] or not self["candidate_version"]: + print "Nothing found to tag." + self.CommonCleanup() + return True + + print("Candidate for tagging is %s with version %s" % + (self["candidate"], self["candidate_version"])) + + +class GetLKGRs(Step): + MESSAGE = "Get the last lkgrs." + + def RunStep(self): + revision_url = "https://v8-status.appspot.com/revisions?format=json" + status_json = self.ReadURL(revision_url, wait_plan=[5, 20]) + self["lkgrs"] = [entry["revision"] + for entry in json.loads(status_json) if entry["status"]] + + +class CalculateTagRevision(Step): + MESSAGE = "Calculate the revision to tag." + + def LastLKGR(self, min_rev, max_rev): + """Finds the newest lkgr between min_rev (inclusive) and max_rev + (exclusive). + """ + for lkgr in self["lkgrs"]: + # LKGRs are reverse sorted. + if int(min_rev) <= int(lkgr) and int(lkgr) < int(max_rev): + return lkgr + return None + + def RunStep(self): + # Get the lkgr after the tag candidate and before the next tag candidate. + candidate_svn = self.GitSVNFindSVNRev(self["candidate"]) + if self["next"]: + next_svn = self.GitSVNFindSVNRev(self["next"]) + else: + # Don't include the version change commit itself if there is no upper + # limit yet. + candidate_svn = str(int(candidate_svn) + 1) + next_svn = sys.maxint + lkgr_svn = self.LastLKGR(candidate_svn, next_svn) + + if not lkgr_svn: + print "There is no lkgr since the candidate version yet." + self.CommonCleanup() + return True + + # Let's check if the lkgr is at least three hours old. + self["lkgr"] = self.GitSVNFindGitHash(lkgr_svn) + if not self["lkgr"]: + print "Couldn't find git hash for lkgr %s" % lkgr_svn + self.CommonCleanup() + return True + + lkgr_utc_time = int(self.GitLog(n=1, format="%at", git_hash=self["lkgr"])) + current_utc_time = self._side_effect_handler.GetUTCStamp() + + if current_utc_time < lkgr_utc_time + 10800: + print "Candidate lkgr %s is too recent for tagging." % lkgr_svn + self.CommonCleanup() + return True + + print "Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"]) + + +class MakeTag(Step): + MESSAGE = "Tag the version." + + def RunStep(self): + if not self._options.dry_run: + self.GitReset(self["lkgr"]) + self.GitSVNTag(self["candidate_version"]) + + +class CleanUp(Step): + MESSAGE = "Clean up." + + def RunStep(self): + self.CommonCleanup() + + +class AutoTag(ScriptsBase): + def _PrepareOptions(self, parser): + parser.add_argument("--dry_run", help="Don't tag the new version.", + default=False, action="store_true") + + def _ProcessOptions(self, options): # pragma: no cover + if not options.dry_run and not options.author: + print "Specify your chromium.org email with -a" + return False + options.wait_for_lgtm = False + options.force_readline_defaults = True + options.force_upload = True + return True + + def _Steps(self): + return [ + Preparation, + GetTags, + GetOldestUntaggedVersion, + GetLKGRs, + CalculateTagRevision, + MakeTag, + CleanUp, + ] + + +if __name__ == "__main__": # pragma: no cover + sys.exit(AutoTag(CONFIG).Run()) diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/bump_up_version.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/bump_up_version.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/bump_up_version.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/bump_up_version.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,241 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Script for auto-increasing the version on bleeding_edge. + +The script can be run regularly by a cron job. It will increase the build +level of the version on bleeding_edge if: +- the lkgr version is smaller than the version of the latest revision, +- the lkgr version is not a version change itself, +- the tree is not closed for maintenance. + +The new version will be the maximum of the bleeding_edge and trunk versions +1. +E.g. latest bleeding_edge version: 3.22.11.0 and latest trunk 3.23.0.0 gives +the new version 3.23.1.0. + +This script requires a depot tools git checkout. I.e. 'fetch v8'. +""" + +import argparse +import os +import sys + +from common_includes import * + +CONFIG = { + PERSISTFILE_BASENAME: "/tmp/v8-bump-up-version-tempfile", + VERSION_FILE: "src/version.cc", +} + +VERSION_BRANCH = "auto-bump-up-version" + + +class Preparation(Step): + MESSAGE = "Preparation." + + def RunStep(self): + # Check for a clean workdir. + if not self.GitIsWorkdirClean(): # pragma: no cover + # This is in case a developer runs this script on a dirty tree. + self.GitStash() + + # TODO(machenbach): This should be called master after the git switch. + self.GitCheckout("bleeding_edge") + + self.GitPull() + + # Ensure a clean version branch. + self.DeleteBranch(VERSION_BRANCH) + + +class GetCurrentBleedingEdgeVersion(Step): + MESSAGE = "Get latest bleeding edge version." + + def RunStep(self): + # TODO(machenbach): This should be called master after the git switch. + self.GitCheckout("bleeding_edge") + + # Store latest version and revision. + self.ReadAndPersistVersion() + self["latest_version"] = self.ArrayToVersion("") + self["latest"] = self.GitLog(n=1, format="%H") + print "Bleeding edge version: %s" % self["latest_version"] + + +# This step is pure paranoia. It forbids the script to continue if the last +# commit changed version.cc. Just in case the other bailout has a bug, this +# prevents the script from continuously commiting version changes. +class LastChangeBailout(Step): + MESSAGE = "Stop script if the last change modified the version." + + def RunStep(self): + if self._config[VERSION_FILE] in self.GitChangedFiles(self["latest"]): + print "Stop due to recent version change." + return True + + +# TODO(machenbach): Implement this for git. +class FetchLKGR(Step): + MESSAGE = "Fetching V8 LKGR." + + def RunStep(self): + lkgr_url = "https://v8-status.appspot.com/lkgr" + self["lkgr_svn"] = self.ReadURL(lkgr_url, wait_plan=[5]) + + +# TODO(machenbach): Implement this for git. With a git lkgr we could simply +# checkout that revision. With svn, we have to search backwards until that +# revision is found. +class GetLKGRVersion(Step): + MESSAGE = "Get bleeding edge lkgr version." + + def RunStep(self): + self.GitCheckout("bleeding_edge") + # If the commit was made from svn, there is a mapping entry in the commit + # message. + self["lkgr"] = self.GitLog( + grep="^git-svn-id: [^@]*@%s [A-Za-z0-9-]*$" % self["lkgr_svn"], + format="%H") + + # FIXME(machenbach): http://crbug.com/391712 can lead to svn lkgrs on the + # trunk branch (rarely). + if not self["lkgr"]: # pragma: no cover + self.Die("No git hash found for svn lkgr.") + + self.GitCreateBranch(VERSION_BRANCH, self["lkgr"]) + self.ReadAndPersistVersion("lkgr_") + self["lkgr_version"] = self.ArrayToVersion("lkgr_") + print "LKGR version: %s" % self["lkgr_version"] + + # Ensure a clean version branch. + self.GitCheckout("bleeding_edge") + self.DeleteBranch(VERSION_BRANCH) + + +class LKGRVersionUpToDateBailout(Step): + MESSAGE = "Stop script if the lkgr has a renewed version." + + def RunStep(self): + # If a version-change commit becomes the lkgr, don't bump up the version + # again. + if self._config[VERSION_FILE] in self.GitChangedFiles(self["lkgr"]): + print "Stop because the lkgr is a version change itself." + return True + + # Don't bump up the version if it got updated already after the lkgr. + if SortingKey(self["lkgr_version"]) < SortingKey(self["latest_version"]): + print("Stop because the latest version already changed since the lkgr " + "version.") + return True + + +class GetTrunkVersion(Step): + MESSAGE = "Get latest trunk version." + + def RunStep(self): + # TODO(machenbach): This should be called trunk after the git switch. + self.GitCheckout("master") + self.GitPull() + self.ReadAndPersistVersion("trunk_") + self["trunk_version"] = self.ArrayToVersion("trunk_") + print "Trunk version: %s" % self["trunk_version"] + + +class CalculateVersion(Step): + MESSAGE = "Calculate the new version." + + def RunStep(self): + if self["lkgr_build"] == "9999": # pragma: no cover + # If version control on bleeding edge was switched off, just use the last + # trunk version. + self["lkgr_version"] = self["trunk_version"] + + # The new version needs to be greater than the max on bleeding edge and + # trunk. + max_version = max(self["trunk_version"], + self["lkgr_version"], + key=SortingKey) + + # Strip off possible leading zeros. + self["new_major"], self["new_minor"], self["new_build"], _ = ( + map(str, map(int, max_version.split(".")))) + + self["new_build"] = str(int(self["new_build"]) + 1) + self["new_patch"] = "0" + + self["new_version"] = ("%s.%s.%s.0" % + (self["new_major"], self["new_minor"], self["new_build"])) + print "New version is %s" % self["new_version"] + + if self._options.dry_run: # pragma: no cover + print "Dry run, skipping version change." + return True + + +class CheckTreeStatus(Step): + MESSAGE = "Checking v8 tree status message." + + def RunStep(self): + status_url = "https://v8-status.appspot.com/current?format=json" + status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300]) + message = json.loads(status_json)["message"] + if re.search(r"maintenance|no commits", message, flags=re.I): + print "Skip version change by tree status: \"%s\"" % message + return True + + +class ChangeVersion(Step): + MESSAGE = "Bump up the version." + + def RunStep(self): + self.GitCreateBranch(VERSION_BRANCH, "bleeding_edge") + + self.SetVersion(self.Config(VERSION_FILE), "new_") + + try: + self.GitCommit("[Auto-roll] Bump up version to %s\n\nTBR=%s" % + (self["new_version"], self._options.author)) + self.GitUpload(author=self._options.author, + force=self._options.force_upload, + bypass_hooks=True) + self.GitDCommit() + print "Successfully changed the version." + finally: + # Clean up. + self.GitCheckout("bleeding_edge") + self.DeleteBranch(VERSION_BRANCH) + + +class BumpUpVersion(ScriptsBase): + def _PrepareOptions(self, parser): + parser.add_argument("--dry_run", help="Don't commit the new version.", + default=False, action="store_true") + + def _ProcessOptions(self, options): # pragma: no cover + if not options.dry_run and not options.author: + print "Specify your chromium.org email with -a" + return False + options.wait_for_lgtm = False + options.force_readline_defaults = True + options.force_upload = True + return True + + def _Steps(self): + return [ + Preparation, + GetCurrentBleedingEdgeVersion, + LastChangeBailout, + FetchLKGR, + GetLKGRVersion, + LKGRVersionUpToDateBailout, + GetTrunkVersion, + CalculateVersion, + CheckTreeStatus, + ChangeVersion, + ] + +if __name__ == "__main__": # pragma: no cover + sys.exit(BumpUpVersion(CONFIG).Run()) diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/chromium_roll.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/chromium_roll.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/chromium_roll.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/chromium_roll.py 2015-01-20 21:22:17.000000000 +0000 @@ -30,7 +30,8 @@ MESSAGE = "Detect commit ID of last push to trunk." def RunStep(self): - self["last_push"] = self._options.last_push or self.FindLastTrunkPush() + self["last_push"] = self._options.last_push or self.FindLastTrunkPush( + include_patches=True) self["trunk_revision"] = self.GitSVNFindSVNRev(self["last_push"]) self["push_title"] = self.GitLog(n=1, format="%s", git_hash=self["last_push"]) @@ -89,7 +90,7 @@ deps) TextToFile(deps, self.Config(DEPS_FILE)) - if self._options.reviewer: + if self._options.reviewer and not self._options.manual: print "Using account %s for review." % self._options.reviewer rev = self._options.reviewer else: @@ -98,9 +99,14 @@ rev = self.ReadLine() commit_title = "Update V8 to %s." % self["push_title"].lower() - self.GitCommit("%s\n\nTBR=%s" % (commit_title, rev)) + sheriff = "" + if self["sheriff"]: + sheriff = ("\n\nPlease reply to the V8 sheriff %s in case of problems." + % self["sheriff"]) + self.GitCommit("%s%s\n\nTBR=%s" % (commit_title, sheriff, rev)) self.GitUpload(author=self._options.author, - force=self._options.force_upload) + force=self._options.force_upload, + cq=self._options.use_commit_queue) print "CL uploaded." @@ -138,6 +144,9 @@ "directory to automate the V8 roll.")) parser.add_argument("-l", "--last-push", help="The git commit ID of the last push to trunk.") + parser.add_argument("--use-commit-queue", + help="Check the CQ bit on upload.", + default=False, action="store_true") def _ProcessOptions(self, options): # pragma: no cover if not options.manual and not options.reviewer: @@ -158,6 +167,7 @@ Preparation, DetectLastPush, CheckChromium, + DetermineV8Sheriff, SwitchChromium, UpdateChromiumCheckout, UploadCL, diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/common_includes.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/common_includes.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/common_includes.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/common_includes.py 2015-01-20 21:22:17.000000000 +0000 @@ -28,6 +28,8 @@ import argparse import datetime +import httplib +import imp import json import os import re @@ -35,12 +37,13 @@ import sys import textwrap import time +import urllib import urllib2 from git_recipes import GitRecipesMixin +from git_recipes import GitFailedException PERSISTFILE_BASENAME = "PERSISTFILE_BASENAME" -TEMP_BRANCH = "TEMP_BRANCH" BRANCHNAME = "BRANCHNAME" DOT_GIT_LOCATION = "DOT_GIT_LOCATION" VERSION_FILE = "VERSION_FILE" @@ -168,12 +171,23 @@ return "" +def SortingKey(version): + """Key for sorting version number strings: '3.11' > '3.2.1.1'""" + version_keys = map(int, version.split(".")) + # Fill up to full version numbers to normalize comparison. + while len(version_keys) < 4: # pragma: no cover + version_keys.append(0) + # Fill digits. + return ".".join(map("{0:04d}".format, version_keys)) + + # Some commands don't like the pipe, e.g. calling vi from within the script or # from subscripts like git cl upload. def Command(cmd, args="", prefix="", pipe=True): # TODO(machenbach): Use timeout. cmd_line = "%s %s %s" % (prefix, cmd, args) print "Command: %s" % cmd_line + sys.stdout.flush() try: if pipe: return subprocess.check_output(cmd_line, shell=True) @@ -181,6 +195,9 @@ return subprocess.check_call(cmd_line, shell=True) except subprocess.CalledProcessError: return None + finally: + sys.stdout.flush() + sys.stderr.flush() # Wrapper for side effects. @@ -202,12 +219,34 @@ finally: url_fh.close() + def ReadClusterFuzzAPI(self, api_key, **params): + params["api_key"] = api_key.strip() + params = urllib.urlencode(params) + + headers = {"Content-type": "application/x-www-form-urlencoded"} + + conn = httplib.HTTPSConnection("backend-dot-cluster-fuzz.appspot.com") + conn.request("POST", "/_api/", params, headers) + + response = conn.getresponse() + data = response.read() + + try: + return json.loads(data) + except: + print data + print "ERROR: Could not read response. Is your key valid?" + raise + def Sleep(self, seconds): time.sleep(seconds) def GetDate(self): return datetime.date.today().strftime("%Y-%m-%d") + def GetUTCStamp(self): + return time.mktime(datetime.datetime.utcnow().timetuple()) + DEFAULT_SIDE_EFFECT_HANDLER = SideEffectHandler() @@ -215,10 +254,6 @@ pass -class GitFailedException(Exception): - pass - - class Step(GitRecipesMixin): def __init__(self, text, requires, number, config, state, options, handler): self._text = text @@ -257,10 +292,11 @@ return print ">>> Step %d: %s" % (self._number, self._text) - self.RunStep() - - # Persist state. - TextToFile(json.dumps(self._state), state_file) + try: + return self.RunStep() + finally: + # Persist state. + TextToFile(json.dumps(self._state), state_file) def RunStep(self): # pragma: no cover raise NotImplementedError @@ -346,7 +382,7 @@ def DeleteBranch(self, name): for line in self.GitBranch().splitlines(): - if re.match(r".*\s+%s$" % name, line): + if re.match(r"\*?\s*%s$" % re.escape(name), line): msg = "Branch %s exists, do you want to delete it?" % name if self.Confirm(msg): self.GitDeleteBranch(name) @@ -377,18 +413,11 @@ self.GitSVNFetch() def PrepareBranch(self): - # Get ahold of a safe temporary branch and check it out. - if self["current_branch"] != self._config[TEMP_BRANCH]: - self.DeleteBranch(self._config[TEMP_BRANCH]) - self.GitCreateBranch(self._config[TEMP_BRANCH]) - # Delete the branch that will be created later if it exists already. self.DeleteBranch(self._config[BRANCHNAME]) def CommonCleanup(self): self.GitCheckout(self["current_branch"]) - if self._config[TEMP_BRANCH] != self["current_branch"]: - self.GitDeleteBranch(self._config[TEMP_BRANCH]) if self._config[BRANCHNAME] != self["current_branch"]: self.GitDeleteBranch(self._config[BRANCHNAME]) @@ -441,12 +470,35 @@ except GitFailedException: self.WaitForResolvingConflicts(patch_file) - def FindLastTrunkPush(self, parent_hash=""): - push_pattern = "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based" + def FindLastTrunkPush(self, parent_hash="", include_patches=False): + push_pattern = "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*" + if not include_patches: + # Non-patched versions only have three numbers followed by the "(based + # on...) comment." + push_pattern += " (based" branch = "" if parent_hash else "svn/trunk" return self.GitLog(n=1, format="%H", grep=push_pattern, parent_hash=parent_hash, branch=branch) + def ArrayToVersion(self, prefix): + return ".".join([self[prefix + "major"], + self[prefix + "minor"], + self[prefix + "build"], + self[prefix + "patch"]]) + + def SetVersion(self, version_file, prefix): + output = "" + for line in FileToText(version_file).splitlines(): + if line.startswith("#define MAJOR_VERSION"): + line = re.sub("\d+$", self[prefix + "major"], line) + elif line.startswith("#define MINOR_VERSION"): + line = re.sub("\d+$", self[prefix + "minor"], line) + elif line.startswith("#define BUILD_NUMBER"): + line = re.sub("\d+$", self[prefix + "build"], line) + elif line.startswith("#define PATCH_LEVEL"): + line = re.sub("\d+$", self[prefix + "patch"], line) + output += "%s\n" % line + TextToFile(output, version_file) class UploadStep(Step): MESSAGE = "Upload for code review." @@ -462,6 +514,40 @@ self.GitUpload(reviewer, self._options.author, self._options.force_upload) +class DetermineV8Sheriff(Step): + MESSAGE = "Determine the V8 sheriff for code review." + + def RunStep(self): + self["sheriff"] = None + if not self._options.sheriff: # pragma: no cover + return + + try: + # The googlers mapping maps @google.com accounts to @chromium.org + # accounts. + googlers = imp.load_source('googlers_mapping', + self._options.googlers_mapping) + googlers = googlers.list_to_dict(googlers.get_list()) + except: # pragma: no cover + print "Skip determining sheriff without googler mapping." + return + + # The sheriff determined by the rotation on the waterfall has a + # @google.com account. + url = "https://chromium-build.appspot.com/p/chromium/sheriff_v8.js" + match = re.match(r"document\.write\('(\w+)'\)", self.ReadURL(url)) + + # If "channel is sheriff", we can't match an account. + if match: + g_name = match.group(1) + self["sheriff"] = googlers.get(g_name + "@google.com", + g_name + "@chromium.org") + self._options.reviewer = self["sheriff"] + print "Found active sheriff: %s" % self["sheriff"] + else: + print "No active sheriff found." + + def MakeStep(step_class=Step, number=0, state=None, config=None, options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER): # Allow to pass in empty dictionaries. @@ -506,11 +592,17 @@ parser = argparse.ArgumentParser(description=self._Description()) parser.add_argument("-a", "--author", default="", help="The author email used for rietveld.") + parser.add_argument("-g", "--googlers-mapping", + help="Path to the script mapping google accounts.") parser.add_argument("-r", "--reviewer", default="", help="The account name to be used for reviews.") + parser.add_argument("--sheriff", default=False, action="store_true", + help=("Determine current sheriff to review CLs. On " + "success, this will overwrite the reviewer " + "option.")) parser.add_argument("-s", "--step", - help="Specify the step where to start work. Default: 0.", - default=0, type=int) + help="Specify the step where to start work. Default: 0.", + default=0, type=int) self._PrepareOptions(parser) @@ -524,6 +616,10 @@ print "Bad step number %d" % options.step parser.print_help() return None + if options.sheriff and not options.googlers_mapping: # pragma: no cover + print "To determine the current sheriff, requires the googler mapping" + parser.print_help() + return None # Defaults for options, common to all scripts. options.manual = getattr(options, "manual", True) @@ -555,7 +651,8 @@ steps.append(MakeStep(step_class, number, self._state, self._config, options, self._side_effect_handler)) for step in steps[options.step:]: - step.Run() + if step.Run(): + return 1 return 0 def Run(self, args=None): diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/git_recipes.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/git_recipes.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/git_recipes.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/git_recipes.py 2015-01-20 21:22:17.000000000 +0000 @@ -28,6 +28,11 @@ import re + +class GitFailedException(Exception): + pass + + def Strip(f): def new_f(*args, **kwargs): return f(*args, **kwargs).strip() @@ -59,6 +64,16 @@ assert name self.Git(MakeArgs(["branch -D", name])) + def GitReset(self, name): + assert name + self.Git(MakeArgs(["reset --hard", name])) + + def GitStash(self): + self.Git(MakeArgs(["stash"])) + + def GitRemotes(self): + return map(str.strip, self.Git(MakeArgs(["branch -r"])).splitlines()) + def GitCheckout(self, name): assert name self.Git(MakeArgs(["checkout -f", name])) @@ -68,6 +83,26 @@ assert branch_or_hash self.Git(MakeArgs(["checkout -f", branch_or_hash, "--", name])) + def GitCheckoutFileSafe(self, name, branch_or_hash): + try: + self.GitCheckoutFile(name, branch_or_hash) + except GitFailedException: # pragma: no cover + # The file doesn't exist in that revision. + return False + return True + + def GitChangedFiles(self, git_hash): + assert git_hash + try: + files = self.Git(MakeArgs(["diff --name-only", + git_hash, + "%s^" % git_hash])) + return map(str.strip, files.splitlines()) + except GitFailedException: # pragma: no cover + # Git fails using "^" at branch roots. + return [] + + @Strip def GitCurrentBranch(self): for line in self.Git("status -s -b -uno").strip().splitlines(): @@ -85,7 +120,7 @@ if format: args.append("--format=%s" % format) if grep: - args.append("--grep=\"%s\"" % grep) + args.append("--grep=\"%s\"" % grep.replace("\"", "\\\"")) if reverse: args.append("--reverse") if git_hash: @@ -99,6 +134,7 @@ assert git_hash return self.Git(MakeArgs(["log", "-1", "-p", git_hash])) + # TODO(machenbach): Unused? Remove. def GitAdd(self, name): assert name self.Git(MakeArgs(["add", Quoted(name)])) @@ -111,7 +147,8 @@ args.append(Quoted(patch_file)) self.Git(MakeArgs(args)) - def GitUpload(self, reviewer="", author="", force=False): + def GitUpload(self, reviewer="", author="", force=False, cq=False, + bypass_hooks=False): args = ["cl upload --send-mail"] if author: args += ["--email", Quoted(author)] @@ -119,6 +156,10 @@ args += ["-r", Quoted(reviewer)] if force: args.append("-f") + if cq: + args.append("--use-commit-queue") + if bypass_hooks: + args.append("--bypass-hooks") # TODO(machenbach): Check output in forced mode. Verify that all required # base files were uploaded, if not retry. self.Git(MakeArgs(args), pipe=False) @@ -147,6 +188,10 @@ def GitSVNFetch(self): self.Git("svn fetch") + def GitSVNRebase(self): + self.Git("svn rebase") + + # TODO(machenbach): Unused? Remove. @Strip def GitSVNLog(self): return self.Git("svn log -1 --oneline") diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/merge_to_branch.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/merge_to_branch.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/merge_to_branch.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/merge_to_branch.py 2015-01-20 21:22:17.000000000 +0000 @@ -41,7 +41,6 @@ PERSISTFILE_BASENAME: "/tmp/v8-merge-to-branch-tempfile", ALREADY_MERGING_SENTINEL_FILE: "/tmp/v8-merge-to-branch-tempfile-already-merging", - TEMP_BRANCH: "prepare-merge-temporary-branch-created-by-script", DOT_GIT_LOCATION: ".git", VERSION_FILE: "src/version.cc", TEMPORARY_PATCH_FILE: "/tmp/v8-prepare-merge-tempfile-temporary-patch", @@ -134,16 +133,8 @@ if not self["revision_list"]: # pragma: no cover self.Die("Revision list is empty.") - if self._options.revert: - if not self._options.revert_bleeding_edge: - self["new_commit_msg"] = ("Rollback of %s in %s branch." - % (self["revision_list"], self["merge_to_branch"])) - else: - self["new_commit_msg"] = "Revert %s." % self["revision_list"] - else: - self["new_commit_msg"] = ("Merged %s into %s branch." - % (self["revision_list"], self["merge_to_branch"])) - self["new_commit_msg"] += "\n\n" + # The commit message title is added below after the version is specified. + self["new_commit_msg"] = "" for commit_hash in self["patch_commit_hashes"]: patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash) @@ -155,10 +146,9 @@ for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg, re.M): bugs.extend(map(lambda s: s.strip(), bug.split(","))) - bug_aggregate = ",".join(sorted(bugs)) + bug_aggregate = ",".join(sorted(filter(lambda s: s and s != "none", bugs))) if bug_aggregate: self["new_commit_msg"] += "BUG=%s\nLOG=N\n" % bug_aggregate - TextToFile(self["new_commit_msg"], self.Config(COMMITMSG_FILE)) class ApplyPatches(Step): @@ -181,7 +171,7 @@ def RunStep(self): if self._options.revert_bleeding_edge: return - # These version numbers are used again for creating the tag + # This is used to calculate the patch level increment. self.ReadAndPersistVersion() @@ -204,12 +194,28 @@ else: self.Editor(self.Config(VERSION_FILE)) self.ReadAndPersistVersion("new_") + self["version"] = "%s.%s.%s.%s" % (self["new_major"], + self["new_minor"], + self["new_build"], + self["new_patch"]) class CommitLocal(Step): MESSAGE = "Commit to local branch." def RunStep(self): + # Add a commit message title. + if self._options.revert: + if not self._options.revert_bleeding_edge: + title = ("Version %s (rollback of %s)" + % (self["version"], self["revision_list"])) + else: + title = "Revert %s." % self["revision_list"] + else: + title = ("Version %s (merged %s)" + % (self["version"], self["revision_list"])) + self["new_commit_msg"] = "%s\n\n%s" % (title, self["new_commit_msg"]) + TextToFile(self["new_commit_msg"], self.Config(COMMITMSG_FILE)) self.GitCommit(file_name=self.Config(COMMITMSG_FILE)) @@ -244,10 +250,6 @@ def RunStep(self): if self._options.revert_bleeding_edge: return - self["version"] = "%s.%s.%s.%s" % (self["new_major"], - self["new_minor"], - self["new_build"], - self["new_patch"]) print "Creating tag svn/tags/%s" % self["version"] if self["merge_to_branch"] == "trunk": self["to_url"] = "trunk" diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/push_to_trunk.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/push_to_trunk.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/push_to_trunk.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/push_to_trunk.py 2015-01-20 21:22:17.000000000 +0000 @@ -39,7 +39,6 @@ BRANCHNAME: "prepare-push", TRUNKBRANCH: "trunk-push", PERSISTFILE_BASENAME: "/tmp/v8-push-to-trunk-tempfile", - TEMP_BRANCH: "prepare-push-temporary-branch-created-by-script", DOT_GIT_LOCATION: ".git", VERSION_FILE: "src/version.cc", CHANGELOG_FILE: "ChangeLog", @@ -58,6 +57,11 @@ def RunStep(self): self.InitialEnvironmentChecks() self.CommonPrepare() + + if(self["current_branch"] == self.Config(TRUNKBRANCH) + or self["current_branch"] == self.Config(BRANCHNAME)): + print "Warning: Script started on branch %s" % self["current_branch"] + self.PrepareBranch() self.DeleteBranch(self.Config(TRUNKBRANCH)) @@ -120,6 +124,20 @@ self["last_push_bleeding_edge"] = last_push_bleeding_edge +# TODO(machenbach): Code similarities with bump_up_version.py. Merge after +# turning this script into a pure git script. +class GetCurrentBleedingEdgeVersion(Step): + MESSAGE = "Get latest bleeding edge version." + + def RunStep(self): + self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/bleeding_edge") + + # Store latest version. + self.ReadAndPersistVersion("latest_") + self["latest_version"] = self.ArrayToVersion("latest_") + print "Bleeding edge version: %s" % self["latest_version"] + + class IncrementVersion(Step): MESSAGE = "Increment version number." @@ -127,11 +145,23 @@ # Retrieve current version from last trunk push. self.GitCheckoutFile(self.Config(VERSION_FILE), self["last_push_trunk"]) self.ReadAndPersistVersion() + self["trunk_version"] = self.ArrayToVersion("") + + if self["latest_build"] == "9999": # pragma: no cover + # If version control on bleeding edge was switched off, just use the last + # trunk version. + self["latest_version"] = self["trunk_version"] + + if SortingKey(self["trunk_version"]) < SortingKey(self["latest_version"]): + # If the version on bleeding_edge is newer than on trunk, use it. + self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/bleeding_edge") + self.ReadAndPersistVersion() if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will " "fire up your EDITOR on %s so you can make arbitrary " "changes. When you're done, save the file and exit your " "EDITOR.)" % self.Config(VERSION_FILE))): + text = FileToText(self.Config(VERSION_FILE)) text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$", r"\g<space>%s" % str(int(self["build"]) + 1), @@ -143,6 +173,10 @@ # Variables prefixed with 'new_' contain the new version numbers for the # ongoing trunk push. self.ReadAndPersistVersion("new_") + + # Make sure patch level is 0 in a new push. + self["new_patch"] = "0" + self["version"] = "%s.%s.%s" % (self["new_major"], self["new_minor"], self["new_build"]) @@ -303,20 +337,7 @@ # The version file has been modified by the patch. Reset it to the version # on trunk and apply the correct version. self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/trunk") - output = "" - for line in FileToText(self.Config(VERSION_FILE)).splitlines(): - if line.startswith("#define MAJOR_VERSION"): - line = re.sub("\d+$", self["new_major"], line) - elif line.startswith("#define MINOR_VERSION"): - line = re.sub("\d+$", self["new_minor"], line) - elif line.startswith("#define BUILD_NUMBER"): - line = re.sub("\d+$", self["new_build"], line) - elif line.startswith("#define PATCH_LEVEL"): - line = re.sub("\d+$", "0", line) - elif line.startswith("#define IS_CANDIDATE_VERSION"): - line = re.sub("\d+$", "0", line) - output += "%s\n" % line - TextToFile(output, self.Config(VERSION_FILE)) + self.SetVersion(self.Config(VERSION_FILE), "new_") class CommitTrunk(Step): @@ -424,6 +445,7 @@ FreshBranch, PreparePushRevision, DetectLastPush, + GetCurrentBleedingEdgeVersion, IncrementVersion, PrepareChangeLog, EditChangeLog, diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/releases.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/releases.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/releases.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/releases.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,481 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This script retrieves the history of all V8 branches and trunk revisions and +# their corresponding Chromium revisions. + +# Requires a chromium checkout with branch heads: +# gclient sync --with_branch_heads +# gclient fetch + +import argparse +import csv +import itertools +import json +import os +import re +import sys + +from common_includes import * + +DEPS_FILE = "DEPS_FILE" +CHROMIUM = "CHROMIUM" + +CONFIG = { + BRANCHNAME: "retrieve-v8-releases", + PERSISTFILE_BASENAME: "/tmp/v8-releases-tempfile", + DOT_GIT_LOCATION: ".git", + VERSION_FILE: "src/version.cc", + DEPS_FILE: "DEPS", +} + +# Expression for retrieving the bleeding edge revision from a commit message. +PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$") + +# Expression for retrieving the merged patches from a merge commit message +# (old and new format). +MERGE_MESSAGE_RE = re.compile(r"^.*[M|m]erged (.+)(\)| into).*$", re.M) + +# Expression for retrieving reverted patches from a commit message (old and +# new format). +ROLLBACK_MESSAGE_RE = re.compile(r"^.*[R|r]ollback of (.+)(\)| in).*$", re.M) + +# Expression for retrieving the code review link. +REVIEW_LINK_RE = re.compile(r"^Review URL: (.+)$", re.M) + +# Expression with three versions (historical) for extracting the v8 revision +# from the chromium DEPS file. +DEPS_RE = re.compile(r'^\s*(?:"v8_revision": "' + '|\(Var\("googlecode_url"\) % "v8"\) \+ "\/trunk@' + '|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)' + '([0-9]+)".*$', re.M) + +# Expression to pick tag and revision for bleeding edge tags. To be used with +# output of 'svn log'. +BLEEDING_EDGE_TAGS_RE = re.compile( + r"A \/tags\/([^\s]+) \(from \/branches\/bleeding_edge\:(\d+)\)") + + +def SortBranches(branches): + """Sort branches with version number names.""" + return sorted(branches, key=SortingKey, reverse=True) + + +def FilterDuplicatesAndReverse(cr_releases): + """Returns the chromium releases in reverse order filtered by v8 revision + duplicates. + + cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev. + """ + last = "" + result = [] + for release in reversed(cr_releases): + if last == release[1]: + continue + last = release[1] + result.append(release) + return result + + +def BuildRevisionRanges(cr_releases): + """Returns a mapping of v8 revision -> chromium ranges. + The ranges are comma-separated, each range has the form R1:R2. The newest + entry is the only one of the form R1, as there is no end range. + + cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev. + cr_rev either refers to a chromium svn revision or a chromium branch number. + """ + range_lists = {} + cr_releases = FilterDuplicatesAndReverse(cr_releases) + + # Visit pairs of cr releases from oldest to newest. + for cr_from, cr_to in itertools.izip( + cr_releases, itertools.islice(cr_releases, 1, None)): + + # Assume the chromium revisions are all different. + assert cr_from[0] != cr_to[0] + + # TODO(machenbach): Subtraction is not git friendly. + ran = "%s:%d" % (cr_from[0], int(cr_to[0]) - 1) + + # Collect the ranges in lists per revision. + range_lists.setdefault(cr_from[1], []).append(ran) + + # Add the newest revision. + if cr_releases: + range_lists.setdefault(cr_releases[-1][1], []).append(cr_releases[-1][0]) + + # Stringify and comma-separate the range lists. + return dict((rev, ", ".join(ran)) for rev, ran in range_lists.iteritems()) + + +def MatchSafe(match): + if match: + return match.group(1) + else: + return "" + + +class Preparation(Step): + MESSAGE = "Preparation." + + def RunStep(self): + self.CommonPrepare() + self.PrepareBranch() + + +class RetrieveV8Releases(Step): + MESSAGE = "Retrieve all V8 releases." + + def ExceedsMax(self, releases): + return (self._options.max_releases > 0 + and len(releases) > self._options.max_releases) + + def GetBleedingEdgeFromPush(self, title): + return MatchSafe(PUSH_MESSAGE_RE.match(title)) + + def GetMergedPatches(self, body): + patches = MatchSafe(MERGE_MESSAGE_RE.search(body)) + if not patches: + patches = MatchSafe(ROLLBACK_MESSAGE_RE.search(body)) + if patches: + # Indicate reverted patches with a "-". + patches = "-%s" % patches + return patches + + def GetReleaseDict( + self, git_hash, bleeding_edge_rev, branch, version, patches, cl_body): + revision = self.GitSVNFindSVNRev(git_hash) + return { + # The SVN revision on the branch. + "revision": revision, + # The SVN revision on bleeding edge (only for newer trunk pushes). + "bleeding_edge": bleeding_edge_rev, + # The branch name. + "branch": branch, + # The version for displaying in the form 3.26.3 or 3.26.3.12. + "version": version, + # The date of the commit. + "date": self.GitLog(n=1, format="%ci", git_hash=git_hash), + # Merged patches if available in the form 'r1234, r2345'. + "patches_merged": patches, + # Default for easier output formatting. + "chromium_revision": "", + # Default for easier output formatting. + "chromium_branch": "", + # Link to the CL on code review. Trunk pushes are not uploaded, so this + # field will be populated below with the recent roll CL link. + "review_link": MatchSafe(REVIEW_LINK_RE.search(cl_body)), + # Link to the commit message on google code. + "revision_link": ("https://code.google.com/p/v8/source/detail?r=%s" + % revision), + } + + def GetRelease(self, git_hash, branch): + self.ReadAndPersistVersion() + base_version = [self["major"], self["minor"], self["build"]] + version = ".".join(base_version) + body = self.GitLog(n=1, format="%B", git_hash=git_hash) + + patches = "" + if self["patch"] != "0": + version += ".%s" % self["patch"] + patches = self.GetMergedPatches(body) + + title = self.GitLog(n=1, format="%s", git_hash=git_hash) + return self.GetReleaseDict( + git_hash, self.GetBleedingEdgeFromPush(title), branch, version, + patches, body), self["patch"] + + def GetReleasesFromBleedingEdge(self): + tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v --limit 20") + releases = [] + for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text): + git_hash = self.GitSVNFindGitHash(revision) + + # Add bleeding edge release. It does not contain patches or a code + # review link, as tags are not uploaded. + releases.append(self.GetReleaseDict( + git_hash, revision, "bleeding_edge", tag, "", "")) + return releases + + def GetReleasesFromBranch(self, branch): + self.GitReset("svn/%s" % branch) + if branch == 'bleeding_edge': + return self.GetReleasesFromBleedingEdge() + + releases = [] + try: + for git_hash in self.GitLog(format="%H").splitlines(): + if self._config[VERSION_FILE] not in self.GitChangedFiles(git_hash): + continue + if self.ExceedsMax(releases): + break # pragma: no cover + if not self.GitCheckoutFileSafe(self._config[VERSION_FILE], git_hash): + break # pragma: no cover + + release, patch_level = self.GetRelease(git_hash, branch) + releases.append(release) + + # Follow branches only until their creation point. + # TODO(machenbach): This omits patches if the version file wasn't + # manipulated correctly. Find a better way to detect the point where + # the parent of the branch head leads to the trunk branch. + if branch != "trunk" and patch_level == "0": + break + + # Allow Ctrl-C interrupt. + except (KeyboardInterrupt, SystemExit): # pragma: no cover + pass + + # Clean up checked-out version file. + self.GitCheckoutFileSafe(self._config[VERSION_FILE], "HEAD") + return releases + + def RunStep(self): + self.GitCreateBranch(self._config[BRANCHNAME]) + # Get relevant remote branches, e.g. "svn/3.25". + branches = filter(lambda s: re.match(r"^svn/\d+\.\d+$", s), + self.GitRemotes()) + # Remove 'svn/' prefix. + branches = map(lambda s: s[4:], branches) + + releases = [] + if self._options.branch == 'recent': + # Get only recent development on trunk, beta and stable. + if self._options.max_releases == 0: # pragma: no cover + self._options.max_releases = 10 + beta, stable = SortBranches(branches)[0:2] + releases += self.GetReleasesFromBranch(stable) + releases += self.GetReleasesFromBranch(beta) + releases += self.GetReleasesFromBranch("trunk") + releases += self.GetReleasesFromBranch("bleeding_edge") + elif self._options.branch == 'all': # pragma: no cover + # Retrieve the full release history. + for branch in branches: + releases += self.GetReleasesFromBranch(branch) + releases += self.GetReleasesFromBranch("trunk") + releases += self.GetReleasesFromBranch("bleeding_edge") + else: # pragma: no cover + # Retrieve history for a specified branch. + assert self._options.branch in branches + ["trunk", "bleeding_edge"] + releases += self.GetReleasesFromBranch(self._options.branch) + + self["releases"] = sorted(releases, + key=lambda r: SortingKey(r["version"]), + reverse=True) + + +# TODO(machenbach): Parts of the Chromium setup are c/p from the chromium_roll +# script -> unify. +class CheckChromium(Step): + MESSAGE = "Check the chromium checkout." + + def Run(self): + self["chrome_path"] = self._options.chromium + + +class SwitchChromium(Step): + MESSAGE = "Switch to Chromium checkout." + REQUIRES = "chrome_path" + + def RunStep(self): + self["v8_path"] = os.getcwd() + os.chdir(self["chrome_path"]) + # Check for a clean workdir. + if not self.GitIsWorkdirClean(): # pragma: no cover + self.Die("Workspace is not clean. Please commit or undo your changes.") + # Assert that the DEPS file is there. + if not os.path.exists(self.Config(DEPS_FILE)): # pragma: no cover + self.Die("DEPS file not present.") + + +class UpdateChromiumCheckout(Step): + MESSAGE = "Update the checkout and create a new branch." + REQUIRES = "chrome_path" + + def RunStep(self): + os.chdir(self["chrome_path"]) + self.GitCheckout("master") + self.GitPull() + self.GitCreateBranch(self.Config(BRANCHNAME)) + + +class RetrieveChromiumV8Releases(Step): + MESSAGE = "Retrieve V8 releases from Chromium DEPS." + REQUIRES = "chrome_path" + + def RunStep(self): + os.chdir(self["chrome_path"]) + + trunk_releases = filter(lambda r: r["branch"] == "trunk", self["releases"]) + if not trunk_releases: # pragma: no cover + print "No trunk releases detected. Skipping chromium history." + return True + + oldest_v8_rev = int(trunk_releases[-1]["revision"]) + + cr_releases = [] + try: + for git_hash in self.GitLog(format="%H", grep="V8").splitlines(): + if self._config[DEPS_FILE] not in self.GitChangedFiles(git_hash): + continue + if not self.GitCheckoutFileSafe(self._config[DEPS_FILE], git_hash): + break # pragma: no cover + deps = FileToText(self.Config(DEPS_FILE)) + match = DEPS_RE.search(deps) + if match: + svn_rev = self.GitSVNFindSVNRev(git_hash) + v8_rev = match.group(1) + cr_releases.append([svn_rev, v8_rev]) + + # Stop after reaching beyond the last v8 revision we want to update. + # We need a small buffer for possible revert/reland frenzies. + # TODO(machenbach): Subtraction is not git friendly. + if int(v8_rev) < oldest_v8_rev - 100: + break # pragma: no cover + + # Allow Ctrl-C interrupt. + except (KeyboardInterrupt, SystemExit): # pragma: no cover + pass + + # Clean up. + self.GitCheckoutFileSafe(self._config[DEPS_FILE], "HEAD") + + # Add the chromium ranges to the v8 trunk releases. + all_ranges = BuildRevisionRanges(cr_releases) + trunk_dict = dict((r["revision"], r) for r in trunk_releases) + for revision, ranges in all_ranges.iteritems(): + trunk_dict.get(revision, {})["chromium_revision"] = ranges + + +# TODO(machenbach): Unify common code with method above. +class RietrieveChromiumBranches(Step): + MESSAGE = "Retrieve Chromium branch information." + REQUIRES = "chrome_path" + + def RunStep(self): + os.chdir(self["chrome_path"]) + + trunk_releases = filter(lambda r: r["branch"] == "trunk", self["releases"]) + if not trunk_releases: # pragma: no cover + print "No trunk releases detected. Skipping chromium history." + return True + + oldest_v8_rev = int(trunk_releases[-1]["revision"]) + + # Filter out irrelevant branches. + branches = filter(lambda r: re.match(r"branch-heads/\d+", r), + self.GitRemotes()) + + # Transform into pure branch numbers. + branches = map(lambda r: int(re.match(r"branch-heads/(\d+)", r).group(1)), + branches) + + branches = sorted(branches, reverse=True) + + cr_branches = [] + try: + for branch in branches: + if not self.GitCheckoutFileSafe(self._config[DEPS_FILE], + "branch-heads/%d" % branch): + break # pragma: no cover + deps = FileToText(self.Config(DEPS_FILE)) + match = DEPS_RE.search(deps) + if match: + v8_rev = match.group(1) + cr_branches.append([str(branch), v8_rev]) + + # Stop after reaching beyond the last v8 revision we want to update. + # We need a small buffer for possible revert/reland frenzies. + # TODO(machenbach): Subtraction is not git friendly. + if int(v8_rev) < oldest_v8_rev - 100: + break # pragma: no cover + + # Allow Ctrl-C interrupt. + except (KeyboardInterrupt, SystemExit): # pragma: no cover + pass + + # Clean up. + self.GitCheckoutFileSafe(self._config[DEPS_FILE], "HEAD") + + # Add the chromium branches to the v8 trunk releases. + all_ranges = BuildRevisionRanges(cr_branches) + trunk_dict = dict((r["revision"], r) for r in trunk_releases) + for revision, ranges in all_ranges.iteritems(): + trunk_dict.get(revision, {})["chromium_branch"] = ranges + + +class SwitchV8(Step): + MESSAGE = "Returning to V8 checkout." + REQUIRES = "chrome_path" + + def RunStep(self): + self.GitCheckout("master") + self.GitDeleteBranch(self.Config(BRANCHNAME)) + os.chdir(self["v8_path"]) + + +class CleanUp(Step): + MESSAGE = "Clean up." + + def RunStep(self): + self.CommonCleanup() + + +class WriteOutput(Step): + MESSAGE = "Print output." + + def Run(self): + if self._options.csv: + with open(self._options.csv, "w") as f: + writer = csv.DictWriter(f, + ["version", "branch", "revision", + "chromium_revision", "patches_merged"], + restval="", + extrasaction="ignore") + for release in self["releases"]: + writer.writerow(release) + if self._options.json: + with open(self._options.json, "w") as f: + f.write(json.dumps(self["releases"])) + if not self._options.csv and not self._options.json: + print self["releases"] # pragma: no cover + + +class Releases(ScriptsBase): + def _PrepareOptions(self, parser): + parser.add_argument("-b", "--branch", default="recent", + help=("The branch to analyze. If 'all' is specified, " + "analyze all branches. If 'recent' (default) " + "is specified, track beta, stable and trunk.")) + parser.add_argument("-c", "--chromium", + help=("The path to your Chromium src/ " + "directory to automate the V8 roll.")) + parser.add_argument("--csv", help="Path to a CSV file for export.") + parser.add_argument("-m", "--max-releases", type=int, default=0, + help="The maximum number of releases to track.") + parser.add_argument("--json", help="Path to a JSON file for export.") + + def _ProcessOptions(self, options): # pragma: no cover + return True + + def _Steps(self): + return [ + Preparation, + RetrieveV8Releases, + CheckChromium, + SwitchChromium, + UpdateChromiumCheckout, + RetrieveChromiumV8Releases, + RietrieveChromiumBranches, + SwitchV8, + CleanUp, + WriteOutput, + ] + + +if __name__ == "__main__": # pragma: no cover + sys.exit(Releases(CONFIG).Run()) diff -Nru nodejs-0.11.13/deps/v8/tools/push-to-trunk/test_scripts.py nodejs-0.11.15/deps/v8/tools/push-to-trunk/test_scripts.py --- nodejs-0.11.13/deps/v8/tools/push-to-trunk/test_scripts.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/push-to-trunk/test_scripts.py 2015-01-20 21:22:17.000000000 +0000 @@ -34,6 +34,8 @@ import auto_push from auto_push import CheckLastPush from auto_push import SETTINGS_LOCATION +import auto_roll +from auto_roll import CLUSTERFUZZ_API_KEY_FILE import common_includes from common_includes import * import merge_to_branch @@ -44,13 +46,19 @@ from chromium_roll import CHROMIUM from chromium_roll import DEPS_FILE from chromium_roll import ChromiumRoll +import releases +from releases import Releases +import bump_up_version +from bump_up_version import BumpUpVersion +from bump_up_version import LastChangeBailout +from bump_up_version import LKGRVersionUpToDateBailout +from auto_tag import AutoTag TEST_CONFIG = { BRANCHNAME: "test-prepare-push", TRUNKBRANCH: "test-trunk-push", PERSISTFILE_BASENAME: "/tmp/test-v8-push-to-trunk-tempfile", - TEMP_BRANCH: "test-prepare-push-temporary-branch-created-by-script", DOT_GIT_LOCATION: None, VERSION_FILE: None, CHANGELOG_FILE: None, @@ -64,6 +72,7 @@ "/tmp/test-merge-to-branch-tempfile-already-merging", COMMIT_HASHES_FILE: "/tmp/test-merge-to-branch-tempfile-PATCH_COMMIT_HASHES", TEMPORARY_PATCH_FILE: "/tmp/test-merge-to-branch-tempfile-temporary-patch", + CLUSTERFUZZ_API_KEY_FILE: "/tmp/test-fake-cf-api-key", } @@ -74,6 +83,38 @@ class ToplevelTest(unittest.TestCase): + def testSortBranches(self): + S = releases.SortBranches + self.assertEquals(["3.1", "2.25"], S(["2.25", "3.1"])[0:2]) + self.assertEquals(["3.0", "2.25"], S(["2.25", "3.0", "2.24"])[0:2]) + self.assertEquals(["3.11", "3.2"], S(["3.11", "3.2", "2.24"])[0:2]) + + def testFilterDuplicatesAndReverse(self): + F = releases.FilterDuplicatesAndReverse + self.assertEquals([], F([])) + self.assertEquals([["100", "10"]], F([["100", "10"]])) + self.assertEquals([["99", "9"], ["100", "10"]], + F([["100", "10"], ["99", "9"]])) + self.assertEquals([["98", "9"], ["100", "10"]], + F([["100", "10"], ["99", "9"], ["98", "9"]])) + self.assertEquals([["98", "9"], ["99", "10"]], + F([["100", "10"], ["99", "10"], ["98", "9"]])) + + def testBuildRevisionRanges(self): + B = releases.BuildRevisionRanges + self.assertEquals({}, B([])) + self.assertEquals({"10": "100"}, B([["100", "10"]])) + self.assertEquals({"10": "100", "9": "99:99"}, + B([["100", "10"], ["99", "9"]])) + self.assertEquals({"10": "100", "9": "97:99"}, + B([["100", "10"], ["98", "9"], ["97", "9"]])) + self.assertEquals({"10": "100", "9": "99:99", "3": "91:98"}, + B([["100", "10"], ["99", "9"], ["91", "3"]])) + self.assertEquals({"13": "101", "12": "100:100", "9": "94:97", + "3": "91:93, 98:99"}, + B([["101", "13"], ["100", "12"], ["98", "3"], + ["94", "9"], ["91", "3"]])) + def testMakeComment(self): self.assertEquals("# Line 1\n# Line 2\n#", MakeComment(" Line 1\n Line 2\n")) @@ -261,7 +302,7 @@ # arguments. if len(args) > len(expected_call['args']): raise NoRetryException("When calling %s with arguments, the " - "expectations must consist of at least as many arguments.") + "expectations must consist of at least as many arguments." % name) # Compare expected and actual arguments. for (expected_arg, actual_arg) in zip(expected_call['args'], args): @@ -296,14 +337,14 @@ self._tmp_files.append(name) return name - def WriteFakeVersionFile(self, build=4): + def WriteFakeVersionFile(self, minor=22, build=4, patch=0): with open(TEST_CONFIG[VERSION_FILE], "w") as f: f.write(" // Some line...\n") f.write("\n") f.write("#define MAJOR_VERSION 3\n") - f.write("#define MINOR_VERSION 22\n") + f.write("#define MINOR_VERSION %s\n" % minor) f.write("#define BUILD_NUMBER %s\n" % build) - f.write("#define PATCH_LEVEL 0\n") + f.write("#define PATCH_LEVEL %s\n" % patch) f.write(" // Some line...\n") f.write("#define IS_CANDIDATE_VERSION 0\n") @@ -316,7 +357,7 @@ def RunStep(self, script=PushToTrunk, step_class=Step, args=None): """Convenience wrapper.""" - args = args or ["-m"] + args = args if args is not None else ["-m"] return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args) def GitMock(self, cmd, args="", pipe=True): @@ -350,12 +391,20 @@ else: return self._url_mock.Call("readurl", url) + def ReadClusterFuzzAPI(self, api_key, **params): + # TODO(machenbach): Use a mock for this and add a test that stops rolling + # due to clustefuzz results. + return [] + def Sleep(self, seconds): pass def GetDate(self): return "1999-07-31" + def GetUTCStamp(self): + return "100000" + def ExpectGit(self, *args): """Convenience wrapper.""" self._git_mock.Expect(*args) @@ -400,10 +449,8 @@ Git("status -s -uno", ""), Git("status -s -b -uno", "## some_branch"), Git("svn fetch", ""), - Git("branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]), - Git("branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""), - Git("checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""), - Git("branch", ""), + Git("branch", " branch1\n* %s" % TEST_CONFIG[BRANCHNAME]), + Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""), ]) self.ExpectReadline([RL("Y")]) self.MakeStep().CommonPrepare() @@ -415,7 +462,7 @@ Git("status -s -uno", ""), Git("status -s -b -uno", "## some_branch"), Git("svn fetch", ""), - Git("branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]), + Git("branch", " branch1\n* %s" % TEST_CONFIG[BRANCHNAME]), ]) self.ExpectReadline([RL("n")]) self.MakeStep().CommonPrepare() @@ -427,8 +474,8 @@ Git("status -s -uno", ""), Git("status -s -b -uno", "## some_branch"), Git("svn fetch", ""), - Git("branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]), - Git("branch -D %s" % TEST_CONFIG[TEMP_BRANCH], None), + Git("branch", " branch1\n* %s" % TEST_CONFIG[BRANCHNAME]), + Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], None), ]) self.ExpectReadline([RL("Y")]) self.MakeStep().CommonPrepare() @@ -558,13 +605,19 @@ self.assertEquals("New\n Lines", FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE])) + # Version on trunk: 3.22.4.0. Version on master (bleeding_edge): 3.22.6. + # Make sure that the increment is 3.22.7.0. def testIncrementVersion(self): TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile() self.WriteFakeVersionFile() self._state["last_push_trunk"] = "hash1" + self._state["latest_build"] = "6" + self._state["latest_version"] = "3.22.6.0" self.ExpectGit([ - Git("checkout -f hash1 -- %s" % TEST_CONFIG[VERSION_FILE], "") + Git("checkout -f hash1 -- %s" % TEST_CONFIG[VERSION_FILE], ""), + Git("checkout -f svn/bleeding_edge -- %s" % TEST_CONFIG[VERSION_FILE], + "", cb=lambda: self.WriteFakeVersionFile(22, 6)), ]) self.ExpectReadline([ @@ -575,7 +628,7 @@ self.assertEquals("3", self._state["new_major"]) self.assertEquals("22", self._state["new_minor"]) - self.assertEquals("5", self._state["new_build"]) + self.assertEquals("7", self._state["new_build"]) self.assertEquals("0", self._state["new_patch"]) def _TestSquashCommits(self, change_log, expected_msg): @@ -692,8 +745,6 @@ Git("status -s -b -uno", "## some_branch\n"), Git("svn fetch", ""), Git("branch", " branch1\n* branch2\n"), - Git("checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""), - Git("branch", " branch1\n* branch2\n"), Git("branch", " branch1\n* branch2\n"), Git("checkout -b %s svn/bleeding_edge" % TEST_CONFIG[BRANCHNAME], ""), Git("svn find-rev r123455", "push_hash\n"), @@ -704,6 +755,8 @@ Git("log -1 --format=%s hash2", "Version 3.4.5 (based on bleeding_edge revision r1234)\n"), Git("svn find-rev r1234", "hash3\n"), + Git("checkout -f svn/bleeding_edge -- %s" % TEST_CONFIG[VERSION_FILE], + "", cb=self.WriteFakeVersionFile), Git("checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "", cb=self.WriteFakeVersionFile), Git("log --format=%H hash3..push_hash", "rev1\n"), @@ -726,7 +779,6 @@ Git("svn dcommit 2>&1", "Some output\nCommitted r123456\nSome output\n"), Git("svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""), Git("checkout -f some_branch", ""), - Git("branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""), Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""), Git("branch -D %s" % TEST_CONFIG[TRUNKBRANCH], ""), ]) @@ -768,8 +820,15 @@ def testPushToTrunkForced(self): self._PushToTrunk(force=True) - def _ChromiumRoll(self, force=False, manual=False): + googlers_mapping_py = "%s-mapping.py" % TEST_CONFIG[PERSISTFILE_BASENAME] + with open(googlers_mapping_py, "w") as f: + f.write(""" +def list_to_dict(entries): + return {"g_name@google.com": "c_name@chromium.org"} +def get_list(): + pass""") + TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile() if not os.path.exists(TEST_CONFIG[CHROMIUM]): os.makedirs(TEST_CONFIG[CHROMIUM]) @@ -783,7 +842,7 @@ Git("status -s -b -uno", "## some_branch\n"), Git("svn fetch", ""), Git(("log -1 --format=%H --grep=" - "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" " + "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" " "svn/trunk"), "push_hash\n"), Git("svn find-rev push_hash", "123455\n"), Git("log -1 --format=%s push_hash", @@ -794,23 +853,30 @@ Git("checkout -b v8-roll-123455", ""), Git(("commit -am \"Update V8 to version 3.22.5 " "(based on bleeding_edge revision r123454).\n\n" - "TBR=reviewer@chromium.org\""), + "Please reply to the V8 sheriff c_name@chromium.org in " + "case of problems.\n\nTBR=c_name@chromium.org\""), ""), Git(("cl upload --send-mail --email \"author@chromium.org\"%s" % force_flag), ""), ]) + self.ExpectReadURL([ + URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js", + "document.write('g_name')"), + ]) + # Expected keyboard input in manual mode: if manual: self.ExpectReadline([ - RL("reviewer@chromium.org"), # Chromium reviewer. + RL("c_name@chromium.org"), # Chromium reviewer. ]) # Expected keyboard input in semi-automatic mode and forced mode: if not manual: self.ExpectReadline([]) - args = ["-a", "author@chromium.org", "-c", TEST_CONFIG[CHROMIUM]] + args = ["-a", "author@chromium.org", "-c", TEST_CONFIG[CHROMIUM], + "--sheriff", "--googlers-mapping", googlers_mapping_py] if force: args.append("-f") if manual: args.append("-m") else: args += ["-r", "reviewer@chromium.org"] @@ -908,6 +974,72 @@ auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS) self.assertRaises(Exception, RunAutoPush) + def testAutoRollExistingRoll(self): + self.ExpectReadURL([ + URL("https://codereview.chromium.org/search", + "owner=author%40chromium.org&limit=30&closed=3&format=json", + ("{\"results\": [{\"subject\": \"different\"}," + "{\"subject\": \"Update V8 to Version...\"}]}")), + ]) + + result = auto_roll.AutoRoll(TEST_CONFIG, self).Run( + AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM]]) + self.assertEquals(1, result) + + # Snippet from the original DEPS file. + FAKE_DEPS = """ +vars = { + "v8_revision": "123455", +} +deps = { + "src/v8": + (Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" + + Var("v8_revision"), +} +""" + + def testAutoRollUpToDate(self): + self.ExpectReadURL([ + URL("https://codereview.chromium.org/search", + "owner=author%40chromium.org&limit=30&closed=3&format=json", + ("{\"results\": [{\"subject\": \"different\"}]}")), + URL("http://src.chromium.org/svn/trunk/src/DEPS", + self.FAKE_DEPS), + ]) + + self.ExpectGit([ + Git(("log -1 --format=%H --grep=" + "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" " + "svn/trunk"), "push_hash\n"), + Git("svn find-rev push_hash", "123455\n"), + ]) + + result = auto_roll.AutoRoll(TEST_CONFIG, self).Run( + AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM]]) + self.assertEquals(1, result) + + def testAutoRoll(self): + TEST_CONFIG[CLUSTERFUZZ_API_KEY_FILE] = self.MakeEmptyTempFile() + TextToFile("fake key", TEST_CONFIG[CLUSTERFUZZ_API_KEY_FILE]) + self.ExpectReadURL([ + URL("https://codereview.chromium.org/search", + "owner=author%40chromium.org&limit=30&closed=3&format=json", + ("{\"results\": [{\"subject\": \"different\"}]}")), + URL("http://src.chromium.org/svn/trunk/src/DEPS", + self.FAKE_DEPS), + ]) + + self.ExpectGit([ + Git(("log -1 --format=%H --grep=" + "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" " + "svn/trunk"), "push_hash\n"), + Git("svn find-rev push_hash", "123456\n"), + ]) + + result = auto_roll.AutoRoll(TEST_CONFIG, self).Run( + AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM], "--roll"]) + self.assertEquals(0, result) + def testMergeToBranch(self): TEST_CONFIG[ALREADY_MERGING_SENTINEL_FILE] = self.MakeEmptyTempFile() TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile() @@ -920,7 +1052,7 @@ return lambda: self.assertEquals(patch, FileToText(TEST_CONFIG[TEMPORARY_PATCH_FILE])) - msg = """Merged r12345, r23456, r34567, r45678, r56789 into trunk branch. + msg = """Version 3.22.5.1 (merged r12345, r23456, r34567, r45678, r56789) Title4 @@ -930,7 +1062,7 @@ Title1 -Title5 +Revert "Something" BUG=123,234,345,456,567,v8:123 LOG=N @@ -950,8 +1082,6 @@ Git("status -s -b -uno", "## some_branch\n"), Git("svn fetch", ""), Git("branch", " branch1\n* branch2\n"), - Git("checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""), - Git("branch", " branch1\n* branch2\n"), Git("checkout -b %s svn/trunk" % TEST_CONFIG[BRANCHNAME], ""), Git("log --format=%H --grep=\"Port r12345\" --reverse svn/bleeding_edge", "hash1\nhash2"), @@ -978,12 +1108,12 @@ Git("log -1 --format=%s hash2", "Title2"), Git("log -1 --format=%s hash3", "Title3"), Git("log -1 --format=%s hash1", "Title1"), - Git("log -1 --format=%s hash5", "Title5"), + Git("log -1 --format=%s hash5", "Revert \"Something\""), Git("log -1 hash4", "Title4\nBUG=123\nBUG=234"), Git("log -1 hash2", "Title2\n BUG = v8:123,345"), Git("log -1 hash3", "Title3\nLOG=n\nBUG=567, 456"), - Git("log -1 hash1", "Title1"), - Git("log -1 hash5", "Title5"), + Git("log -1 hash1", "Title1\nBUG="), + Git("log -1 hash5", "Revert \"Something\"\nBUG=none"), Git("log -1 -p hash4", "patch4"), Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE], "", cb=VerifyPatch("patch4")), @@ -1006,13 +1136,13 @@ Git("cl presubmit", "Presubmit successfull\n"), Git("cl dcommit -f --bypass-hooks", "Closing issue\n", cb=VerifySVNCommit), Git("svn fetch", ""), - Git("log -1 --format=%%H --grep=\"%s\" svn/trunk" % msg, "hash6"), + Git(("log -1 --format=%%H --grep=\"%s\" svn/trunk" + % msg.replace("\"", "\\\"")), "hash6"), Git("svn find-rev hash6", "1324"), Git(("copy -r 1324 https://v8.googlecode.com/svn/trunk " "https://v8.googlecode.com/svn/tags/3.22.5.1 -m " "\"Tagging version 3.22.5.1\""), ""), Git("checkout -f some_branch", ""), - Git("branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""), Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""), ]) @@ -1036,6 +1166,306 @@ args += ["-s", "3"] MergeToBranch(TEST_CONFIG, self).Run(args) + def testReleases(self): + tag_response_text = """ +------------------------------------------------------------------------ +r22631 | author1@chromium.org | 2014-07-28 02:05:29 +0200 (Mon, 28 Jul 2014) +Changed paths: + A /tags/3.28.43 (from /trunk:22630) + +Tagging version 3.28.43 +------------------------------------------------------------------------ +r22629 | author2@chromium.org | 2014-07-26 05:09:29 +0200 (Sat, 26 Jul 2014) +Changed paths: + A /tags/3.28.41 (from /branches/bleeding_edge:22626) + +Tagging version 3.28.41 +------------------------------------------------------------------------ +r22556 | author3@chromium.org | 2014-07-23 13:31:59 +0200 (Wed, 23 Jul 2014) +Changed paths: + A /tags/3.27.34.7 (from /branches/3.27:22555) + +Tagging version 3.27.34.7 +------------------------------------------------------------------------ +r22627 | author4@chromium.org | 2014-07-26 01:39:15 +0200 (Sat, 26 Jul 2014) +Changed paths: + A /tags/3.28.40 (from /branches/bleeding_edge:22624) + +Tagging version 3.28.40 +------------------------------------------------------------------------ +""" + json_output = self.MakeEmptyTempFile() + csv_output = self.MakeEmptyTempFile() + TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile() + self.WriteFakeVersionFile() + + TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile() + if not os.path.exists(TEST_CONFIG[CHROMIUM]): + os.makedirs(TEST_CONFIG[CHROMIUM]) + def WriteDEPS(revision): + TextToFile("Line\n \"v8_revision\": \"%s\",\n line\n" % revision, + TEST_CONFIG[DEPS_FILE]) + WriteDEPS(567) + + def ResetVersion(minor, build, patch=0): + return lambda: self.WriteFakeVersionFile(minor=minor, + build=build, + patch=patch) + + def ResetDEPS(revision): + return lambda: WriteDEPS(revision) + + self.ExpectGit([ + Git("status -s -uno", ""), + Git("status -s -b -uno", "## some_branch\n"), + Git("svn fetch", ""), + Git("branch", " branch1\n* branch2\n"), + Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], ""), + Git("branch -r", " svn/3.21\n svn/3.3\n"), + Git("reset --hard svn/3.3", ""), + Git("log --format=%H", "hash1\nhash2"), + Git("diff --name-only hash1 hash1^", ""), + Git("diff --name-only hash2 hash2^", TEST_CONFIG[VERSION_FILE]), + Git("checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(3, 1, 1)), + Git("log -1 --format=%B hash2", + "Version 3.3.1.1 (merged 12)\n\nReview URL: fake.com\n"), + Git("log -1 --format=%s hash2", ""), + Git("svn find-rev hash2", "234"), + Git("log -1 --format=%ci hash2", "18:15"), + Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(22, 5)), + Git("reset --hard svn/3.21", ""), + Git("log --format=%H", "hash3\nhash4\nhash5\n"), + Git("diff --name-only hash3 hash3^", TEST_CONFIG[VERSION_FILE]), + Git("checkout -f hash3 -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(21, 2)), + Git("log -1 --format=%B hash3", ""), + Git("log -1 --format=%s hash3", ""), + Git("svn find-rev hash3", "123"), + Git("log -1 --format=%ci hash3", "03:15"), + Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(22, 5)), + Git("reset --hard svn/trunk", ""), + Git("log --format=%H", "hash6\n"), + Git("diff --name-only hash6 hash6^", TEST_CONFIG[VERSION_FILE]), + Git("checkout -f hash6 -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(22, 3)), + Git("log -1 --format=%B hash6", ""), + Git("log -1 --format=%s hash6", ""), + Git("svn find-rev hash6", "345"), + Git("log -1 --format=%ci hash6", ""), + Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(22, 5)), + Git("reset --hard svn/bleeding_edge", ""), + Git("log https://v8.googlecode.com/svn/tags -v --limit 20", + tag_response_text), + Git("svn find-rev r22626", "hash_22626"), + Git("svn find-rev hash_22626", "22626"), + Git("log -1 --format=%ci hash_22626", "01:23"), + Git("svn find-rev r22624", "hash_22624"), + Git("svn find-rev hash_22624", "22624"), + Git("log -1 --format=%ci hash_22624", "02:34"), + Git("status -s -uno", ""), + Git("checkout -f master", ""), + Git("pull", ""), + Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], ""), + Git("log --format=%H --grep=\"V8\"", "c_hash1\nc_hash2\n"), + Git("diff --name-only c_hash1 c_hash1^", ""), + Git("diff --name-only c_hash2 c_hash2^", TEST_CONFIG[DEPS_FILE]), + Git("checkout -f c_hash2 -- %s" % TEST_CONFIG[DEPS_FILE], "", + cb=ResetDEPS(345)), + Git("svn find-rev c_hash2", "4567"), + Git("checkout -f HEAD -- %s" % TEST_CONFIG[DEPS_FILE], "", + cb=ResetDEPS(567)), + Git("branch -r", " weird/123\n branch-heads/7\n"), + Git("checkout -f branch-heads/7 -- %s" % TEST_CONFIG[DEPS_FILE], "", + cb=ResetDEPS(345)), + Git("checkout -f HEAD -- %s" % TEST_CONFIG[DEPS_FILE], "", + cb=ResetDEPS(567)), + Git("checkout -f master", ""), + Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""), + Git("checkout -f some_branch", ""), + Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""), + ]) + + args = ["-c", TEST_CONFIG[CHROMIUM], + "--json", json_output, + "--csv", csv_output, + "--max-releases", "1"] + Releases(TEST_CONFIG, self).Run(args) + + # Check expected output. + csv = ("3.28.41,bleeding_edge,22626,,\r\n" + "3.28.40,bleeding_edge,22624,,\r\n" + "3.22.3,trunk,345,4567,\r\n" + "3.21.2,3.21,123,,\r\n" + "3.3.1.1,3.3,234,,12\r\n") + self.assertEquals(csv, FileToText(csv_output)) + + expected_json = [ + {"bleeding_edge": "22626", "patches_merged": "", "version": "3.28.41", + "chromium_revision": "", "branch": "bleeding_edge", "revision": "22626", + "review_link": "", "date": "01:23", "chromium_branch": "", + "revision_link": "https://code.google.com/p/v8/source/detail?r=22626"}, + {"bleeding_edge": "22624", "patches_merged": "", "version": "3.28.40", + "chromium_revision": "", "branch": "bleeding_edge", "revision": "22624", + "review_link": "", "date": "02:34", "chromium_branch": "", + "revision_link": "https://code.google.com/p/v8/source/detail?r=22624"}, + {"bleeding_edge": "", "patches_merged": "", "version": "3.22.3", + "chromium_revision": "4567", "branch": "trunk", "revision": "345", + "review_link": "", "date": "", "chromium_branch": "7", + "revision_link": "https://code.google.com/p/v8/source/detail?r=345"}, + {"patches_merged": "", "bleeding_edge": "", "version": "3.21.2", + "chromium_revision": "", "branch": "3.21", "revision": "123", + "review_link": "", "date": "03:15", "chromium_branch": "", + "revision_link": "https://code.google.com/p/v8/source/detail?r=123"}, + {"patches_merged": "12", "bleeding_edge": "", "version": "3.3.1.1", + "chromium_revision": "", "branch": "3.3", "revision": "234", + "review_link": "fake.com", "date": "18:15", "chromium_branch": "", + "revision_link": "https://code.google.com/p/v8/source/detail?r=234"}, + ] + self.assertEquals(expected_json, json.loads(FileToText(json_output))) + + + def testBumpUpVersion(self): + TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile() + self.WriteFakeVersionFile() + + def ResetVersion(minor, build, patch=0): + return lambda: self.WriteFakeVersionFile(minor=minor, + build=build, + patch=patch) + + self.ExpectGit([ + Git("status -s -uno", ""), + Git("checkout -f bleeding_edge", "", cb=ResetVersion(11, 4)), + Git("pull", ""), + Git("branch", ""), + Git("checkout -f bleeding_edge", ""), + Git("log -1 --format=%H", "latest_hash"), + Git("diff --name-only latest_hash latest_hash^", ""), + Git("checkout -f bleeding_edge", ""), + Git("log --format=%H --grep=\"^git-svn-id: [^@]*@12345 [A-Za-z0-9-]*$\"", + "lkgr_hash"), + Git("checkout -b auto-bump-up-version lkgr_hash", ""), + Git("checkout -f bleeding_edge", ""), + Git("branch", ""), + Git("diff --name-only lkgr_hash lkgr_hash^", ""), + Git("checkout -f master", "", cb=ResetVersion(11, 5)), + Git("pull", ""), + Git("checkout -b auto-bump-up-version bleeding_edge", "", + cb=ResetVersion(11, 4)), + Git("commit -am \"[Auto-roll] Bump up version to 3.11.6.0\n\n" + "TBR=author@chromium.org\"", ""), + Git("cl upload --send-mail --email \"author@chromium.org\" -f " + "--bypass-hooks", ""), + Git("cl dcommit -f --bypass-hooks", ""), + Git("checkout -f bleeding_edge", ""), + Git("branch", "auto-bump-up-version\n* bleeding_edge"), + Git("branch -D auto-bump-up-version", ""), + ]) + + self.ExpectReadURL([ + URL("https://v8-status.appspot.com/lkgr", "12345"), + URL("https://v8-status.appspot.com/current?format=json", + "{\"message\": \"Tree is open\"}"), + ]) + + BumpUpVersion(TEST_CONFIG, self).Run(["-a", "author@chromium.org"]) + + def testAutoTag(self): + TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile() + self.WriteFakeVersionFile() + + def ResetVersion(minor, build, patch=0): + return lambda: self.WriteFakeVersionFile(minor=minor, + build=build, + patch=patch) + + self.ExpectGit([ + Git("status -s -uno", ""), + Git("status -s -b -uno", "## some_branch\n"), + Git("svn fetch", ""), + Git("branch", " branch1\n* branch2\n"), + Git("checkout -f master", ""), + Git("svn rebase", ""), + Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], "", + cb=ResetVersion(4, 5)), + Git("branch -r", "svn/tags/3.4.2\nsvn/tags/3.2.1.0\nsvn/branches/3.4"), + Git("log --format=%H --grep=\"\\[Auto\\-roll\\] Bump up version to\"", + "hash125\nhash118\nhash111\nhash101"), + Git("checkout -f hash125 -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(4, 4)), + Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(4, 5)), + Git("checkout -f hash118 -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(4, 3)), + Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(4, 5)), + Git("checkout -f hash111 -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(4, 2)), + Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "", + cb=ResetVersion(4, 5)), + Git("svn find-rev hash118", "118"), + Git("svn find-rev hash125", "125"), + Git("svn find-rev r123", "hash123"), + Git("log -1 --format=%at hash123", "1"), + Git("reset --hard hash123", ""), + Git("svn tag 3.4.3 -m \"Tagging version 3.4.3\"", ""), + Git("checkout -f some_branch", ""), + Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""), + ]) + + self.ExpectReadURL([ + URL("https://v8-status.appspot.com/revisions?format=json", + "[{\"revision\": \"126\", \"status\": true}," + "{\"revision\": \"123\", \"status\": true}," + "{\"revision\": \"112\", \"status\": true}]"), + ]) + + AutoTag(TEST_CONFIG, self).Run(["-a", "author@chromium.org"]) + + # Test that we bail out if the last change was a version change. + def testBumpUpVersionBailout1(self): + TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile() + self._state["latest"] = "latest_hash" + + self.ExpectGit([ + Git("diff --name-only latest_hash latest_hash^", + TEST_CONFIG[VERSION_FILE]), + ]) + + self.assertEquals(1, + self.RunStep(BumpUpVersion, LastChangeBailout, ["--dry_run"])) + + # Test that we bail out if the lkgr was a version change. + def testBumpUpVersionBailout2(self): + TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile() + self._state["lkgr"] = "lkgr_hash" + + self.ExpectGit([ + Git("diff --name-only lkgr_hash lkgr_hash^", TEST_CONFIG[VERSION_FILE]), + ]) + + self.assertEquals(1, + self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"])) + + # Test that we bail out if the last version is already newer than the lkgr's + # version. + def testBumpUpVersionBailout3(self): + TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile() + self._state["lkgr"] = "lkgr_hash" + self._state["lkgr_version"] = "3.22.4.0" + self._state["latest_version"] = "3.22.5.0" + + self.ExpectGit([ + Git("diff --name-only lkgr_hash lkgr_hash^", ""), + ]) + + self.assertEquals(1, + self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"])) + class SystemTest(unittest.TestCase): def testReload(self): diff -Nru nodejs-0.11.13/deps/v8/tools/run_benchmarks.py nodejs-0.11.15/deps/v8/tools/run_benchmarks.py --- nodejs-0.11.13/deps/v8/tools/run_benchmarks.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/run_benchmarks.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,421 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Performance runner for d8. + +Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json + +The suite json format is expected to be: +{ + "path": <relative path chunks to benchmark resources and main file>, + "name": <optional suite name, file name is default>, + "archs": [<architecture name for which this suite is run>, ...], + "binary": <name of binary to run, default "d8">, + "flags": [<flag to d8>, ...], + "run_count": <how often will this suite run (optional)>, + "run_count_XXX": <how often will this suite run for arch XXX (optional)>, + "resources": [<js file to be loaded before main>, ...] + "main": <main js benchmark runner file>, + "results_regexp": <optional regexp>, + "results_processor": <optional python results processor script>, + "units": <the unit specification for the performance dashboard>, + "benchmarks": [ + { + "name": <name of the benchmark>, + "results_regexp": <optional more specific regexp>, + "results_processor": <optional python results processor script>, + "units": <the unit specification for the performance dashboard>, + }, ... + ] +} + +The benchmarks field can also nest other suites in arbitrary depth. A suite +with a "main" file is a leaf suite that can contain one more level of +benchmarks. + +A suite's results_regexp is expected to have one string place holder +"%s" for the benchmark name. A benchmark's results_regexp overwrites suite +defaults. + +A suite's results_processor may point to an optional python script. If +specified, it is called after running the benchmarks like this (with a path +relatve to the suite level's path): +<results_processor file> <same flags as for d8> <suite level name> <output> + +The <output> is a temporary file containing d8 output. The results_regexp will +be applied to the output of this script. + +A suite without "benchmarks" is considered a benchmark itself. + +Full example (suite with one runner): +{ + "path": ["."], + "flags": ["--expose-gc"], + "archs": ["ia32", "x64"], + "run_count": 5, + "run_count_ia32": 3, + "main": "run.js", + "results_regexp": "^%s: (.+)$", + "units": "score", + "benchmarks": [ + {"name": "Richards"}, + {"name": "DeltaBlue"}, + {"name": "NavierStokes", + "results_regexp": "^NavierStokes: (.+)$"} + ] +} + +Full example (suite with several runners): +{ + "path": ["."], + "flags": ["--expose-gc"], + "archs": ["ia32", "x64"], + "run_count": 5, + "units": "score", + "benchmarks": [ + {"name": "Richards", + "path": ["richards"], + "main": "run.js", + "run_count": 3, + "results_regexp": "^Richards: (.+)$"}, + {"name": "NavierStokes", + "path": ["navier_stokes"], + "main": "run.js", + "results_regexp": "^NavierStokes: (.+)$"} + ] +} + +Path pieces are concatenated. D8 is always run with the suite's path as cwd. +""" + +import json +import optparse +import os +import re +import sys + +from testrunner.local import commands +from testrunner.local import utils + +ARCH_GUESS = utils.DefaultArch() +SUPPORTED_ARCHS = ["android_arm", + "android_arm64", + "android_ia32", + "arm", + "ia32", + "mips", + "mipsel", + "nacl_ia32", + "nacl_x64", + "x64", + "arm64"] + + +class Results(object): + """Place holder for result traces.""" + def __init__(self, traces=None, errors=None): + self.traces = traces or [] + self.errors = errors or [] + + def ToDict(self): + return {"traces": self.traces, "errors": self.errors} + + def WriteToFile(self, file_name): + with open(file_name, "w") as f: + f.write(json.dumps(self.ToDict())) + + def __add__(self, other): + self.traces += other.traces + self.errors += other.errors + return self + + def __str__(self): # pragma: no cover + return str(self.ToDict()) + + +class Node(object): + """Represents a node in the benchmark suite tree structure.""" + def __init__(self, *args): + self._children = [] + + def AppendChild(self, child): + self._children.append(child) + + +class DefaultSentinel(Node): + """Fake parent node with all default values.""" + def __init__(self): + super(DefaultSentinel, self).__init__() + self.binary = "d8" + self.run_count = 10 + self.path = [] + self.graphs = [] + self.flags = [] + self.resources = [] + self.results_regexp = None + self.stddev_regexp = None + self.units = "score" + + +class Graph(Node): + """Represents a benchmark suite definition. + + Can either be a leaf or an inner node that provides default values. + """ + def __init__(self, suite, parent, arch): + super(Graph, self).__init__() + self._suite = suite + + assert isinstance(suite.get("path", []), list) + assert isinstance(suite["name"], basestring) + assert isinstance(suite.get("flags", []), list) + assert isinstance(suite.get("resources", []), list) + + # Accumulated values. + self.path = parent.path[:] + suite.get("path", []) + self.graphs = parent.graphs[:] + [suite["name"]] + self.flags = parent.flags[:] + suite.get("flags", []) + self.resources = parent.resources[:] + suite.get("resources", []) + + # Descrete values (with parent defaults). + self.binary = suite.get("binary", parent.binary) + self.run_count = suite.get("run_count", parent.run_count) + self.run_count = suite.get("run_count_%s" % arch, self.run_count) + self.units = suite.get("units", parent.units) + + # A regular expression for results. If the parent graph provides a + # regexp and the current suite has none, a string place holder for the + # suite name is expected. + # TODO(machenbach): Currently that makes only sense for the leaf level. + # Multiple place holders for multiple levels are not supported. + if parent.results_regexp: + regexp_default = parent.results_regexp % re.escape(suite["name"]) + else: + regexp_default = None + self.results_regexp = suite.get("results_regexp", regexp_default) + + # A similar regular expression for the standard deviation (optional). + if parent.stddev_regexp: + stddev_default = parent.stddev_regexp % re.escape(suite["name"]) + else: + stddev_default = None + self.stddev_regexp = suite.get("stddev_regexp", stddev_default) + + +class Trace(Graph): + """Represents a leaf in the benchmark suite tree structure. + + Handles collection of measurements. + """ + def __init__(self, suite, parent, arch): + super(Trace, self).__init__(suite, parent, arch) + assert self.results_regexp + self.results = [] + self.errors = [] + self.stddev = "" + + def ConsumeOutput(self, stdout): + try: + self.results.append( + re.search(self.results_regexp, stdout, re.M).group(1)) + except: + self.errors.append("Regexp \"%s\" didn't match for benchmark %s." + % (self.results_regexp, self.graphs[-1])) + + try: + if self.stddev_regexp and self.stddev: + self.errors.append("Benchmark %s should only run once since a stddev " + "is provided by the benchmark." % self.graphs[-1]) + if self.stddev_regexp: + self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1) + except: + self.errors.append("Regexp \"%s\" didn't match for benchmark %s." + % (self.stddev_regexp, self.graphs[-1])) + + def GetResults(self): + return Results([{ + "graphs": self.graphs, + "units": self.units, + "results": self.results, + "stddev": self.stddev, + }], self.errors) + + +class Runnable(Graph): + """Represents a runnable benchmark suite definition (i.e. has a main file). + """ + @property + def main(self): + return self._suite["main"] + + def ChangeCWD(self, suite_path): + """Changes the cwd to to path defined in the current graph. + + The benchmarks are supposed to be relative to the suite configuration. + """ + suite_dir = os.path.abspath(os.path.dirname(suite_path)) + bench_dir = os.path.normpath(os.path.join(*self.path)) + os.chdir(os.path.join(suite_dir, bench_dir)) + + def GetCommand(self, shell_dir): + # TODO(machenbach): This requires +.exe if run on windows. + return ( + [os.path.join(shell_dir, self.binary)] + + self.flags + + self.resources + + [self.main] + ) + + def Run(self, runner): + """Iterates over several runs and handles the output for all traces.""" + for stdout in runner(): + for trace in self._children: + trace.ConsumeOutput(stdout) + return reduce(lambda r, t: r + t.GetResults(), self._children, Results()) + + +class RunnableTrace(Trace, Runnable): + """Represents a runnable benchmark suite definition that is a leaf.""" + def __init__(self, suite, parent, arch): + super(RunnableTrace, self).__init__(suite, parent, arch) + + def Run(self, runner): + """Iterates over several runs and handles the output.""" + for stdout in runner(): + self.ConsumeOutput(stdout) + return self.GetResults() + + +def MakeGraph(suite, arch, parent): + """Factory method for making graph objects.""" + if isinstance(parent, Runnable): + # Below a runnable can only be traces. + return Trace(suite, parent, arch) + elif suite.get("main"): + # A main file makes this graph runnable. + if suite.get("benchmarks"): + # This graph has subbenchmarks (traces). + return Runnable(suite, parent, arch) + else: + # This graph has no subbenchmarks, it's a leaf. + return RunnableTrace(suite, parent, arch) + elif suite.get("benchmarks"): + # This is neither a leaf nor a runnable. + return Graph(suite, parent, arch) + else: # pragma: no cover + raise Exception("Invalid benchmark suite configuration.") + + +def BuildGraphs(suite, arch, parent=None): + """Builds a tree structure of graph objects that corresponds to the suite + configuration. + """ + parent = parent or DefaultSentinel() + + # TODO(machenbach): Implement notion of cpu type? + if arch not in suite.get("archs", ["ia32", "x64"]): + return None + + graph = MakeGraph(suite, arch, parent) + for subsuite in suite.get("benchmarks", []): + BuildGraphs(subsuite, arch, graph) + parent.AppendChild(graph) + return graph + + +def FlattenRunnables(node): + """Generator that traverses the tree structure and iterates over all + runnables. + """ + if isinstance(node, Runnable): + yield node + elif isinstance(node, Node): + for child in node._children: + for result in FlattenRunnables(child): + yield result + else: # pragma: no cover + raise Exception("Invalid benchmark suite configuration.") + + +# TODO: Implement results_processor. +def Main(args): + parser = optparse.OptionParser() + parser.add_option("--arch", + help=("The architecture to run tests for, " + "'auto' or 'native' for auto-detect"), + default="x64") + parser.add_option("--buildbot", + help="Adapt to path structure used on buildbots", + default=False, action="store_true") + parser.add_option("--json-test-results", + help="Path to a file for storing json results.") + parser.add_option("--outdir", help="Base directory with compile output", + default="out") + (options, args) = parser.parse_args(args) + + if len(args) == 0: # pragma: no cover + parser.print_help() + return 1 + + if options.arch in ["auto", "native"]: # pragma: no cover + options.arch = ARCH_GUESS + + if not options.arch in SUPPORTED_ARCHS: # pragma: no cover + print "Unknown architecture %s" % options.arch + return 1 + + workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + + if options.buildbot: + shell_dir = os.path.join(workspace, options.outdir, "Release") + else: + shell_dir = os.path.join(workspace, options.outdir, + "%s.release" % options.arch) + + results = Results() + for path in args: + path = os.path.abspath(path) + + if not os.path.exists(path): # pragma: no cover + results.errors.append("Benchmark file %s does not exist." % path) + continue + + with open(path) as f: + suite = json.loads(f.read()) + + # If no name is given, default to the file name without .json. + suite.setdefault("name", os.path.splitext(os.path.basename(path))[0]) + + for runnable in FlattenRunnables(BuildGraphs(suite, options.arch)): + print ">>> Running suite: %s" % "/".join(runnable.graphs) + runnable.ChangeCWD(path) + + def Runner(): + """Output generator that reruns several times.""" + for i in xrange(0, max(1, runnable.run_count)): + # TODO(machenbach): Make timeout configurable in the suite definition. + # Allow timeout per arch like with run_count per arch. + output = commands.Execute(runnable.GetCommand(shell_dir), timeout=60) + print ">>> Stdout (#%d):" % (i + 1) + print output.stdout + if output.stderr: # pragma: no cover + # Print stderr for debugging. + print ">>> Stderr (#%d):" % (i + 1) + print output.stderr + yield output.stdout + + # Let runnable iterate over all runs and handle output. + results += runnable.Run(Runner) + + if options.json_test_results: + results.WriteToFile(options.json_test_results) + else: # pragma: no cover + print results + + return min(1, len(results.errors)) + +if __name__ == "__main__": # pragma: no cover + sys.exit(Main(sys.argv[1:])) diff -Nru nodejs-0.11.13/deps/v8/tools/run-deopt-fuzzer.py nodejs-0.11.15/deps/v8/tools/run-deopt-fuzzer.py --- nodejs-0.11.13/deps/v8/tools/run-deopt-fuzzer.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/run-deopt-fuzzer.py 2015-01-20 21:22:17.000000000 +0000 @@ -213,6 +213,8 @@ default= -1, type="int") result.add_option("-v", "--verbose", help="Verbose output", default=False, action="store_true") + result.add_option("--random-seed", default=0, dest="random_seed", + help="Default seed for initializing random generator") return result @@ -242,6 +244,8 @@ options.extra_flags = shlex.split(options.extra_flags) if options.j == 0: options.j = multiprocessing.cpu_count() + while options.random_seed == 0: + options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647) if not options.distribution_mode in DISTRIBUTION_MODES: print "Unknown distribution mode %s" % options.distribution_mode return False @@ -315,8 +319,11 @@ for mode in options.mode: for arch in options.arch: - code = Execute(arch, mode, args, options, suites, workspace) - exit_code = exit_code or code + try: + code = Execute(arch, mode, args, options, suites, workspace) + exit_code = exit_code or code + except KeyboardInterrupt: + return 2 return exit_code @@ -362,7 +369,12 @@ timeout, options.isolates, options.command_prefix, options.extra_flags, - False) + False, # Keep i18n on by default. + options.random_seed, + True, # No sorting of test cases. + 0, # Don't rerun failing tests. + 0, # No use of a rerun-failing-tests maximum. + False) # No predictable mode. # Find available test suites and read test cases from them. variables = { @@ -373,8 +385,10 @@ "isolates": options.isolates, "mode": mode, "no_i18n": False, + "no_snap": False, "simulator": utils.UseSimulator(arch), "system": utils.GuessOS(), + "tsan": False, } all_tests = [] num_tests = 0 @@ -403,17 +417,11 @@ print "No tests to run." return 0 - try: - print(">>> Collection phase") - progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() - runner = execution.Runner(suites, progress_indicator, ctx) - - exit_code = runner.Run(options.j) - if runner.terminate: - return exit_code + print(">>> Collection phase") + progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() + runner = execution.Runner(suites, progress_indicator, ctx) - except KeyboardInterrupt: - return 1 + exit_code = runner.Run(options.j) print(">>> Analysis phase") num_tests = 0 @@ -456,19 +464,12 @@ print "No tests to run." return 0 - try: - print(">>> Deopt fuzzing phase (%d test cases)" % num_tests) - progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() - runner = execution.Runner(suites, progress_indicator, ctx) - - exit_code = runner.Run(options.j) - if runner.terminate: - return exit_code - - except KeyboardInterrupt: - return 1 + print(">>> Deopt fuzzing phase (%d test cases)" % num_tests) + progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() + runner = execution.Runner(suites, progress_indicator, ctx) - return exit_code + code = runner.Run(options.j) + return exit_code or code if __name__ == "__main__": diff -Nru nodejs-0.11.13/deps/v8/tools/run.py nodejs-0.11.15/deps/v8/tools/run.py --- nodejs-0.11.13/deps/v8/tools/run.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/run.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,12 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""This program wraps an arbitrary command since gn currently can only execute +scripts.""" + +import subprocess +import sys + +sys.exit(subprocess.call(sys.argv[1:])) diff -Nru nodejs-0.11.13/deps/v8/tools/run-tests.py nodejs-0.11.15/deps/v8/tools/run-tests.py --- nodejs-0.11.13/deps/v8/tools/run-tests.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/run-tests.py 2015-01-20 21:22:17.000000000 +0000 @@ -34,6 +34,7 @@ import os from os.path import join import platform +import random import shlex import subprocess import sys @@ -49,7 +50,8 @@ ARCH_GUESS = utils.DefaultArch() -DEFAULT_TESTS = ["mjsunit", "cctest", "message", "preparser"] +DEFAULT_TESTS = ["mjsunit", "fuzz-natives", "base-unittests", + "cctest", "compiler-unittests", "message", "preparser"] TIMEOUT_DEFAULT = 60 TIMEOUT_SCALEFACTOR = {"debug" : 4, "release" : 1 } @@ -58,9 +60,10 @@ VARIANT_FLAGS = { "default": [], "stress": ["--stress-opt", "--always-opt"], + "turbofan": ["--turbo-filter=*", "--always-opt"], "nocrankshaft": ["--nocrankshaft"]} -VARIANTS = ["default", "stress", "nocrankshaft"] +VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"] MODE_FLAGS = { "debug" : ["--nohard-abort", "--nodead-code-elimination", @@ -79,19 +82,26 @@ "android_ia32", "arm", "ia32", + "x87", + "mips", "mipsel", + "mips64el", "nacl_ia32", "nacl_x64", "x64", + "x32", "arm64"] # Double the timeout for these: SLOW_ARCHS = ["android_arm", "android_arm64", "android_ia32", "arm", + "mips", "mipsel", + "mips64el", "nacl_ia32", "nacl_x64", + "x87", "arm64"] @@ -149,6 +159,12 @@ result.add_option("--no-presubmit", "--nopresubmit", help='Skip presubmit checks', default=False, dest="no_presubmit", action="store_true") + result.add_option("--no-snap", "--nosnap", + help='Test a build compiled without snapshot.', + default=False, dest="no_snap", action="store_true") + result.add_option("--no-sorting", "--nosorting", + help="Don't sort tests according to duration of last run.", + default=False, dest="no_sorting", action="store_true") result.add_option("--no-stress", "--nostress", help="Don't run crankshaft --always-opt --stress-op test", default=False, dest="no_stress", action="store_true") @@ -159,6 +175,9 @@ help="Comma-separated list of testing variants") result.add_option("--outdir", help="Base directory with compile output", default="out") + result.add_option("--predictable", + help="Compare output of several reruns of each test", + default=False, action="store_true") result.add_option("-p", "--progress", help=("The style of progress indicator" " (verbose, dots, color, mono)"), @@ -167,6 +186,15 @@ help=("Quick check mode (skip slow/flaky tests)")) result.add_option("--report", help="Print a summary of the tests to be run", default=False, action="store_true") + result.add_option("--json-test-results", + help="Path to a file for storing json results.") + result.add_option("--rerun-failures-count", + help=("Number of times to rerun each failing test case. " + "Very slow tests will be rerun only once."), + default=0, type="int") + result.add_option("--rerun-failures-max", + help="Maximum number of failing test cases to rerun.", + default=100, type="int") result.add_option("--shard-count", help="Split testsuites into this number of shards", default=1, type="int") @@ -187,6 +215,9 @@ default=False, action="store_true") result.add_option("-t", "--timeout", help="Timeout in seconds", default= -1, type="int") + result.add_option("--tsan", + help="Regard test expectations for TSAN", + default=False, action="store_true") result.add_option("-v", "--verbose", help="Verbose output", default=False, action="store_true") result.add_option("--valgrind", help="Run tests through valgrind", @@ -197,6 +228,8 @@ result.add_option("--junittestsuite", help="The testsuite name in the JUnit output file", default="v8tests") + result.add_option("--random-seed", default=0, dest="random_seed", + help="Default seed for initializing random generator") return result @@ -244,9 +277,18 @@ if options.gc_stress: options.extra_flags += GC_STRESS_FLAGS + if options.asan: + options.extra_flags.append("--invoke-weak-callbacks") + + if options.tsan: + VARIANTS = ["default"] + if options.j == 0: options.j = multiprocessing.cpu_count() + while options.random_seed == 0: + options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647) + def excl(*args): """Returns true if zero or one of multiple arguments are true.""" return reduce(lambda x, y: x + y, args) <= 1 @@ -272,6 +314,11 @@ options.flaky_tests = "skip" options.slow_tests = "skip" options.pass_fail_tests = "skip" + if options.predictable: + VARIANTS = ["default"] + options.extra_flags.append("--predictable") + options.extra_flags.append("--verify_predictable") + options.extra_flags.append("--no-inline-new") if not options.shell_dir: if options.shell: @@ -325,9 +372,8 @@ workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), "..")) if not options.no_presubmit: print ">>> running presubmit tests" - code = subprocess.call( + exit_code = subprocess.call( [sys.executable, join(workspace, "tools", "presubmit.py")]) - exit_code = code suite_paths = utils.GetSuitePaths(join(workspace, "test")) @@ -388,12 +434,22 @@ timeout = TIMEOUT_DEFAULT; timeout *= TIMEOUT_SCALEFACTOR[mode] + + if options.predictable: + # Predictable mode is slower. + timeout *= 2 + ctx = context.Context(arch, mode, shell_dir, mode_flags, options.verbose, timeout, options.isolates, options.command_prefix, options.extra_flags, - options.no_i18n) + options.no_i18n, + options.random_seed, + options.no_sorting, + options.rerun_failures_count, + options.rerun_failures_max, + options.predictable) # TODO(all): Combine "simulator" and "simulator_run". simulator_run = not options.dont_skip_simulator_slow_tests and \ @@ -407,9 +463,11 @@ "isolates": options.isolates, "mode": mode, "no_i18n": options.no_i18n, + "no_snap": options.no_snap, "simulator_run": simulator_run, "simulator": utils.UseSimulator(arch), "system": utils.GuessOS(), + "tsan": options.tsan, } all_tests = [] num_tests = 0 @@ -446,44 +504,42 @@ return 0 # Run the tests, either locally or distributed on the network. - try: - start_time = time.time() - progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() - if options.junitout: - progress_indicator = progress.JUnitTestProgressIndicator( - progress_indicator, options.junitout, options.junittestsuite) - - run_networked = not options.no_network - if not run_networked: - print("Network distribution disabled, running tests locally.") - elif utils.GuessOS() != "linux": - print("Network distribution is only supported on Linux, sorry!") + start_time = time.time() + progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() + if options.junitout: + progress_indicator = progress.JUnitTestProgressIndicator( + progress_indicator, options.junitout, options.junittestsuite) + if options.json_test_results: + progress_indicator = progress.JsonTestProgressIndicator( + progress_indicator, options.json_test_results, arch, mode) + + run_networked = not options.no_network + if not run_networked: + print("Network distribution disabled, running tests locally.") + elif utils.GuessOS() != "linux": + print("Network distribution is only supported on Linux, sorry!") + run_networked = False + peers = [] + if run_networked: + peers = network_execution.GetPeers() + if not peers: + print("No connection to distribution server; running tests locally.") run_networked = False - peers = [] - if run_networked: - peers = network_execution.GetPeers() - if not peers: - print("No connection to distribution server; running tests locally.") - run_networked = False - elif len(peers) == 1: - print("No other peers on the network; running tests locally.") - run_networked = False - elif num_tests <= 100: - print("Less than 100 tests, running them locally.") - run_networked = False - - if run_networked: - runner = network_execution.NetworkedRunner(suites, progress_indicator, - ctx, peers, workspace) - else: - runner = execution.Runner(suites, progress_indicator, ctx) + elif len(peers) == 1: + print("No other peers on the network; running tests locally.") + run_networked = False + elif num_tests <= 100: + print("Less than 100 tests, running them locally.") + run_networked = False + + if run_networked: + runner = network_execution.NetworkedRunner(suites, progress_indicator, + ctx, peers, workspace) + else: + runner = execution.Runner(suites, progress_indicator, ctx) - exit_code = runner.Run(options.j) - if runner.terminate: - return exit_code - overall_duration = time.time() - start_time - except KeyboardInterrupt: - raise + exit_code = runner.Run(options.j) + overall_duration = time.time() - start_time if options.time: verbose.PrintTestDurations(suites, overall_duration) diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/local/commands.py nodejs-0.11.15/deps/v8/tools/testrunner/local/commands.py --- nodejs-0.11.13/deps/v8/tools/testrunner/local/commands.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/local/commands.py 2015-01-20 21:22:17.000000000 +0000 @@ -64,49 +64,46 @@ def RunProcess(verbose, timeout, args, **rest): - try: - if verbose: print "#", " ".join(args) - popen_args = args - prev_error_mode = SEM_INVALID_VALUE - if utils.IsWindows(): - popen_args = subprocess.list2cmdline(args) - # Try to change the error mode to avoid dialogs on fatal errors. Don't - # touch any existing error mode flags by merging the existing error mode. - # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx. - error_mode = SEM_NOGPFAULTERRORBOX - prev_error_mode = Win32SetErrorMode(error_mode) - Win32SetErrorMode(error_mode | prev_error_mode) - process = subprocess.Popen( - shell=utils.IsWindows(), - args=popen_args, - **rest - ) - if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE): - Win32SetErrorMode(prev_error_mode) - # Compute the end time - if the process crosses this limit we - # consider it timed out. - if timeout is None: end_time = None - else: end_time = time.time() + timeout - timed_out = False - # Repeatedly check the exit code from the process in a - # loop and keep track of whether or not it times out. - exit_code = None - sleep_time = INITIAL_SLEEP_TIME - while exit_code is None: - if (not end_time is None) and (time.time() >= end_time): - # Kill the process and wait for it to exit. - KillProcessWithID(process.pid) - exit_code = process.wait() - timed_out = True - else: - exit_code = process.poll() - time.sleep(sleep_time) - sleep_time = sleep_time * SLEEP_TIME_FACTOR - if sleep_time > MAX_SLEEP_TIME: - sleep_time = MAX_SLEEP_TIME - return (exit_code, timed_out) - except KeyboardInterrupt: - raise + if verbose: print "#", " ".join(args) + popen_args = args + prev_error_mode = SEM_INVALID_VALUE + if utils.IsWindows(): + popen_args = subprocess.list2cmdline(args) + # Try to change the error mode to avoid dialogs on fatal errors. Don't + # touch any existing error mode flags by merging the existing error mode. + # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx. + error_mode = SEM_NOGPFAULTERRORBOX + prev_error_mode = Win32SetErrorMode(error_mode) + Win32SetErrorMode(error_mode | prev_error_mode) + process = subprocess.Popen( + shell=utils.IsWindows(), + args=popen_args, + **rest + ) + if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE): + Win32SetErrorMode(prev_error_mode) + # Compute the end time - if the process crosses this limit we + # consider it timed out. + if timeout is None: end_time = None + else: end_time = time.time() + timeout + timed_out = False + # Repeatedly check the exit code from the process in a + # loop and keep track of whether or not it times out. + exit_code = None + sleep_time = INITIAL_SLEEP_TIME + while exit_code is None: + if (not end_time is None) and (time.time() >= end_time): + # Kill the process and wait for it to exit. + KillProcessWithID(process.pid) + exit_code = process.wait() + timed_out = True + else: + exit_code = process.poll() + time.sleep(sleep_time) + sleep_time = sleep_time * SLEEP_TIME_FACTOR + if sleep_time > MAX_SLEEP_TIME: + sleep_time = MAX_SLEEP_TIME + return (exit_code, timed_out) def PrintError(string): @@ -142,11 +139,9 @@ stdout=fd_out, stderr=fd_err ) - except KeyboardInterrupt: - raise - except: - raise finally: + # TODO(machenbach): A keyboard interrupt before the assignment to + # fd_out|err can lead to reference errors here. os.close(fd_out) os.close(fd_err) out = file(outname).read() diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/local/execution.py nodejs-0.11.15/deps/v8/tools/testrunner/local/execution.py --- nodejs-0.11.13/deps/v8/tools/testrunner/local/execution.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/local/execution.py 2015-01-20 21:22:17.000000000 +0000 @@ -26,19 +26,16 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import multiprocessing import os -import threading +import shutil import time +from pool import Pool from . import commands +from . import perfdata from . import utils -BREAK_NOW = -1 -EXCEPTION = -2 - - class Job(object): def __init__(self, command, dep_command, test_id, timeout, verbose): self.command = command @@ -49,29 +46,31 @@ def RunTest(job): - try: - start_time = time.time() - if job.dep_command is not None: - dep_output = commands.Execute(job.dep_command, job.verbose, job.timeout) - # TODO(jkummerow): We approximate the test suite specific function - # IsFailureOutput() by just checking the exit code here. Currently - # only cctests define dependencies, for which this simplification is - # correct. - if dep_output.exit_code != 0: - return (job.id, dep_output, time.time() - start_time) - output = commands.Execute(job.command, job.verbose, job.timeout) - return (job.id, output, time.time() - start_time) - except KeyboardInterrupt: - return (-1, BREAK_NOW, 0) - except Exception, e: - print(">>> EXCEPTION: %s" % e) - return (-1, EXCEPTION, 0) - + start_time = time.time() + if job.dep_command is not None: + dep_output = commands.Execute(job.dep_command, job.verbose, job.timeout) + # TODO(jkummerow): We approximate the test suite specific function + # IsFailureOutput() by just checking the exit code here. Currently + # only cctests define dependencies, for which this simplification is + # correct. + if dep_output.exit_code != 0: + return (job.id, dep_output, time.time() - start_time) + output = commands.Execute(job.command, job.verbose, job.timeout) + return (job.id, output, time.time() - start_time) class Runner(object): def __init__(self, suites, progress_indicator, context): + self.datapath = os.path.join("out", "testrunner_data") + self.perf_data_manager = perfdata.PerfDataManager(self.datapath) + self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode) + self.perf_failures = False + self.printed_allocations = False self.tests = [ t for s in suites for t in s.tests ] + if not context.no_sorting: + for t in self.tests: + t.duration = self.perfdata.FetchPerfData(t) or 1.0 + self.tests.sort(key=lambda t: t.duration, reverse=True) self._CommonInit(len(self.tests), progress_indicator, context) def _CommonInit(self, num_tests, progress_indicator, context): @@ -83,8 +82,119 @@ self.remaining = num_tests self.failed = [] self.crashed = 0 - self.terminate = False - self.lock = threading.Lock() + self.reran_tests = 0 + + def _RunPerfSafe(self, fun): + try: + fun() + except Exception, e: + print("PerfData exception: %s" % e) + self.perf_failures = True + + def _GetJob(self, test): + command = self.GetCommand(test) + timeout = self.context.timeout + if ("--stress-opt" in test.flags or + "--stress-opt" in self.context.mode_flags or + "--stress-opt" in self.context.extra_flags): + timeout *= 4 + if test.dependency is not None: + dep_command = [ c.replace(test.path, test.dependency) for c in command ] + else: + dep_command = None + return Job(command, dep_command, test.id, timeout, self.context.verbose) + + def _MaybeRerun(self, pool, test): + if test.run <= self.context.rerun_failures_count: + # Possibly rerun this test if its run count is below the maximum per + # test. <= as the flag controls reruns not including the first run. + if test.run == 1: + # Count the overall number of reran tests on the first rerun. + if self.reran_tests < self.context.rerun_failures_max: + self.reran_tests += 1 + else: + # Don't rerun this if the overall number of rerun tests has been + # reached. + return + if test.run >= 2 and test.duration > self.context.timeout / 20.0: + # Rerun slow tests at most once. + return + + # Rerun this test. + test.duration = None + test.output = None + test.run += 1 + pool.add([self._GetJob(test)]) + self.remaining += 1 + + def _ProcessTestNormal(self, test, result, pool): + self.indicator.AboutToRun(test) + test.output = result[1] + test.duration = result[2] + has_unexpected_output = test.suite.HasUnexpectedOutput(test) + if has_unexpected_output: + self.failed.append(test) + if test.output.HasCrashed(): + self.crashed += 1 + else: + self.succeeded += 1 + self.remaining -= 1 + # For the indicator, everything that happens after the first run is treated + # as unexpected even if it flakily passes in order to include it in the + # output. + self.indicator.HasRun(test, has_unexpected_output or test.run > 1) + if has_unexpected_output: + # Rerun test failures after the indicator has processed the results. + self._MaybeRerun(pool, test) + # Update the perf database if the test succeeded. + return not has_unexpected_output + + def _ProcessTestPredictable(self, test, result, pool): + def HasDifferentAllocations(output1, output2): + def AllocationStr(stdout): + for line in reversed((stdout or "").splitlines()): + if line.startswith("### Allocations = "): + self.printed_allocations = True + return line + return "" + return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout)) + + # Always pass the test duration for the database update. + test.duration = result[2] + if test.run == 1 and result[1].HasTimedOut(): + # If we get a timeout in the first run, we are already in an + # unpredictable state. Just report it as a failure and don't rerun. + self.indicator.AboutToRun(test) + test.output = result[1] + self.remaining -= 1 + self.failed.append(test) + self.indicator.HasRun(test, True) + if test.run > 1 and HasDifferentAllocations(test.output, result[1]): + # From the second run on, check for different allocations. If a + # difference is found, call the indicator twice to report both tests. + # All runs of each test are counted as one for the statistic. + self.indicator.AboutToRun(test) + self.remaining -= 1 + self.failed.append(test) + self.indicator.HasRun(test, True) + self.indicator.AboutToRun(test) + test.output = result[1] + self.indicator.HasRun(test, True) + elif test.run >= 3: + # No difference on the third run -> report a success. + self.indicator.AboutToRun(test) + self.remaining -= 1 + self.succeeded += 1 + test.output = result[1] + self.indicator.HasRun(test, False) + else: + # No difference yet and less than three runs -> add another run and + # remember the output for comparison. + test.run += 1 + test.output = result[1] + pool.add([self._GetJob(test)]) + # Always update the perf database. + return True def Run(self, jobs): self.indicator.Starting() @@ -95,71 +205,46 @@ return 0 def _RunInternal(self, jobs): - pool = multiprocessing.Pool(processes=jobs) + pool = Pool(jobs) test_map = {} + # TODO(machenbach): Instead of filling the queue completely before + # pool.imap_unordered, make this a generator that already starts testing + # while the queue is filled. queue = [] queued_exception = None for test in self.tests: assert test.id >= 0 test_map[test.id] = test try: - command = self.GetCommand(test) + queue.append([self._GetJob(test)]) except Exception, e: # If this failed, save the exception and re-raise it later (after # all other tests have had a chance to run). queued_exception = e continue - timeout = self.context.timeout - if ("--stress-opt" in test.flags or - "--stress-opt" in self.context.mode_flags or - "--stress-opt" in self.context.extra_flags): - timeout *= 4 - if test.dependency is not None: - dep_command = [ c.replace(test.path, test.dependency) for c in command ] - else: - dep_command = None - job = Job(command, dep_command, test.id, timeout, self.context.verbose) - queue.append(job) try: - kChunkSize = 1 - it = pool.imap_unordered(RunTest, queue, kChunkSize) + it = pool.imap_unordered(RunTest, queue) for result in it: - test_id = result[0] - if test_id < 0: - if result[1] == BREAK_NOW: - self.terminate = True - else: - continue - if self.terminate: - pool.terminate() - pool.join() - raise BreakNowException("User pressed Ctrl+C or IO went wrong") - test = test_map[test_id] - self.indicator.AboutToRun(test) - test.output = result[1] - test.duration = result[2] - has_unexpected_output = test.suite.HasUnexpectedOutput(test) - if has_unexpected_output: - self.failed.append(test) - if test.output.HasCrashed(): - self.crashed += 1 + test = test_map[result[0]] + if self.context.predictable: + update_perf = self._ProcessTestPredictable(test, result, pool) else: - self.succeeded += 1 - self.remaining -= 1 - self.indicator.HasRun(test, has_unexpected_output) - except KeyboardInterrupt: - pool.terminate() - pool.join() - raise - except Exception, e: - print("Exception: %s" % e) + update_perf = self._ProcessTestNormal(test, result, pool) + if update_perf: + self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test)) + finally: pool.terminate() - pool.join() - raise + self._RunPerfSafe(lambda: self.perf_data_manager.close()) + if self.perf_failures: + # Nuke perf data in case of failures. This might not work on windows as + # some files might still be open. + print "Deleting perf test data due to db corruption." + shutil.rmtree(self.datapath) if queued_exception: raise queued_exception - return + # Make sure that any allocations were printed in predictable mode. + assert not self.context.predictable or self.printed_allocations def GetCommand(self, test): d8testflag = [] @@ -171,6 +256,7 @@ cmd = (self.context.command_prefix + [os.path.abspath(os.path.join(self.context.shell_dir, shell))] + d8testflag + + ["--random-seed=%s" % self.context.random_seed] + test.suite.GetFlagsForTestCase(test, self.context) + self.context.extra_flags) return cmd diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/local/perfdata.py nodejs-0.11.15/deps/v8/tools/testrunner/local/perfdata.py --- nodejs-0.11.13/deps/v8/tools/testrunner/local/perfdata.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/local/perfdata.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,120 @@ +# Copyright 2012 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import os +import shelve +import threading + + +class PerfDataEntry(object): + def __init__(self): + self.avg = 0.0 + self.count = 0 + + def AddResult(self, result): + kLearnRateLimiter = 99 # Greater value means slower learning. + # We use an approximation of the average of the last 100 results here: + # The existing average is weighted with kLearnRateLimiter (or less + # if there are fewer data points). + effective_count = min(self.count, kLearnRateLimiter) + self.avg = self.avg * effective_count + result + self.count = effective_count + 1 + self.avg /= self.count + + +class PerfDataStore(object): + def __init__(self, datadir, arch, mode): + filename = os.path.join(datadir, "%s.%s.perfdata" % (arch, mode)) + self.database = shelve.open(filename, protocol=2) + self.closed = False + self.lock = threading.Lock() + + def __del__(self): + self.close() + + def close(self): + if self.closed: return + self.database.close() + self.closed = True + + def GetKey(self, test): + """Computes the key used to access data for the given testcase.""" + flags = "".join(test.flags) + return str("%s.%s.%s" % (test.suitename(), test.path, flags)) + + def FetchPerfData(self, test): + """Returns the observed duration for |test| as read from the store.""" + key = self.GetKey(test) + if key in self.database: + return self.database[key].avg + return None + + def UpdatePerfData(self, test): + """Updates the persisted value in the store with test.duration.""" + testkey = self.GetKey(test) + self.RawUpdatePerfData(testkey, test.duration) + + def RawUpdatePerfData(self, testkey, duration): + with self.lock: + if testkey in self.database: + entry = self.database[testkey] + else: + entry = PerfDataEntry() + entry.AddResult(duration) + self.database[testkey] = entry + + +class PerfDataManager(object): + def __init__(self, datadir): + self.datadir = os.path.abspath(datadir) + if not os.path.exists(self.datadir): + os.makedirs(self.datadir) + self.stores = {} # Keyed by arch, then mode. + self.closed = False + self.lock = threading.Lock() + + def __del__(self): + self.close() + + def close(self): + if self.closed: return + for arch in self.stores: + modes = self.stores[arch] + for mode in modes: + store = modes[mode] + store.close() + self.closed = True + + def GetStore(self, arch, mode): + with self.lock: + if not arch in self.stores: + self.stores[arch] = {} + modes = self.stores[arch] + if not mode in modes: + modes[mode] = PerfDataStore(self.datadir, arch, mode) + return modes[mode] diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/local/pool.py nodejs-0.11.15/deps/v8/tools/testrunner/local/pool.py --- nodejs-0.11.13/deps/v8/tools/testrunner/local/pool.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/local/pool.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,146 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from multiprocessing import Event, Process, Queue + +class NormalResult(): + def __init__(self, result): + self.result = result + self.exception = False + self.break_now = False + + +class ExceptionResult(): + def __init__(self): + self.exception = True + self.break_now = False + + +class BreakResult(): + def __init__(self): + self.exception = False + self.break_now = True + + +def Worker(fn, work_queue, done_queue, done): + """Worker to be run in a child process. + The worker stops on two conditions. 1. When the poison pill "STOP" is + reached or 2. when the event "done" is set.""" + try: + for args in iter(work_queue.get, "STOP"): + if done.is_set(): + break + try: + done_queue.put(NormalResult(fn(*args))) + except Exception, e: + print(">>> EXCEPTION: %s" % e) + done_queue.put(ExceptionResult()) + except KeyboardInterrupt: + done_queue.put(BreakResult()) + + +class Pool(): + """Distributes tasks to a number of worker processes. + New tasks can be added dynamically even after the workers have been started. + Requirement: Tasks can only be added from the parent process, e.g. while + consuming the results generator.""" + + # Factor to calculate the maximum number of items in the work/done queue. + # Necessary to not overflow the queue's pipe if a keyboard interrupt happens. + BUFFER_FACTOR = 4 + + def __init__(self, num_workers): + self.num_workers = num_workers + self.processes = [] + self.terminated = False + + # Invariant: count >= #work_queue + #done_queue. It is greater when a + # worker takes an item from the work_queue and before the result is + # submitted to the done_queue. It is equal when no worker is working, + # e.g. when all workers have finished, and when no results are processed. + # Count is only accessed by the parent process. Only the parent process is + # allowed to remove items from the done_queue and to add items to the + # work_queue. + self.count = 0 + self.work_queue = Queue() + self.done_queue = Queue() + self.done = Event() + + def imap_unordered(self, fn, gen): + """Maps function "fn" to items in generator "gen" on the worker processes + in an arbitrary order. The items are expected to be lists of arguments to + the function. Returns a results iterator.""" + try: + gen = iter(gen) + self.advance = self._advance_more + + for w in xrange(self.num_workers): + p = Process(target=Worker, args=(fn, + self.work_queue, + self.done_queue, + self.done)) + self.processes.append(p) + p.start() + + self.advance(gen) + while self.count > 0: + result = self.done_queue.get() + self.count -= 1 + if result.exception: + # Ignore items with unexpected exceptions. + continue + elif result.break_now: + # A keyboard interrupt happened in one of the worker processes. + raise KeyboardInterrupt + else: + yield result.result + self.advance(gen) + finally: + self.terminate() + + def _advance_more(self, gen): + while self.count < self.num_workers * self.BUFFER_FACTOR: + try: + self.work_queue.put(gen.next()) + self.count += 1 + except StopIteration: + self.advance = self._advance_empty + break + + def _advance_empty(self, gen): + pass + + def add(self, args): + """Adds an item to the work queue. Can be called dynamically while + processing the results from imap_unordered.""" + self.work_queue.put(args) + self.count += 1 + + def terminate(self): + if self.terminated: + return + self.terminated = True + + # For exceptional tear down set the "done" event to stop the workers before + # they empty the queue buffer. + self.done.set() + + for p in self.processes: + # During normal tear down the workers block on get(). Feed a poison pill + # per worker to make them stop. + self.work_queue.put("STOP") + + for p in self.processes: + p.join() + + # Drain the queues to prevent failures when queues are garbage collected. + try: + while True: self.work_queue.get(False) + except: + pass + try: + while True: self.done_queue.get(False) + except: + pass diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/local/pool_unittest.py nodejs-0.11.15/deps/v8/tools/testrunner/local/pool_unittest.py --- nodejs-0.11.13/deps/v8/tools/testrunner/local/pool_unittest.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/local/pool_unittest.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import unittest + +from pool import Pool + +def Run(x): + if x == 10: + raise Exception("Expected exception triggered by test.") + return x + +class PoolTest(unittest.TestCase): + def testNormal(self): + results = set() + pool = Pool(3) + for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]): + results.add(result) + self.assertEquals(set(range(0, 10)), results) + + def testException(self): + results = set() + pool = Pool(3) + for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]): + # Item 10 will not appear in results due to an internal exception. + results.add(result) + expect = set(range(0, 12)) + expect.remove(10) + self.assertEquals(expect, results) + + def testAdd(self): + results = set() + pool = Pool(3) + for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]): + results.add(result) + if result < 30: + pool.add([result + 20]) + self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)), + results) diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/local/progress.py nodejs-0.11.15/deps/v8/tools/testrunner/local/progress.py --- nodejs-0.11.13/deps/v8/tools/testrunner/local/progress.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/local/progress.py 2015-01-20 21:22:17.000000000 +0000 @@ -26,11 +26,17 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import json +import os import sys import time from . import junit_output + +ABS_PATH_PREFIX = os.getcwd() + os.sep + + def EscapeCommand(command): parts = [] for part in command: @@ -277,6 +283,59 @@ fail_text) +class JsonTestProgressIndicator(ProgressIndicator): + + def __init__(self, progress_indicator, json_test_results, arch, mode): + self.progress_indicator = progress_indicator + self.json_test_results = json_test_results + self.arch = arch + self.mode = mode + self.results = [] + + def Starting(self): + self.progress_indicator.runner = self.runner + self.progress_indicator.Starting() + + def Done(self): + self.progress_indicator.Done() + complete_results = [] + if os.path.exists(self.json_test_results): + with open(self.json_test_results, "r") as f: + # Buildbot might start out with an empty file. + complete_results = json.loads(f.read() or "[]") + + complete_results.append({ + "arch": self.arch, + "mode": self.mode, + "results": self.results, + }) + + with open(self.json_test_results, "w") as f: + f.write(json.dumps(complete_results)) + + def AboutToRun(self, test): + self.progress_indicator.AboutToRun(test) + + def HasRun(self, test, has_unexpected_output): + self.progress_indicator.HasRun(test, has_unexpected_output) + if not has_unexpected_output: + # Omit tests that run as expected. Passing tests of reruns after failures + # will have unexpected_output to be reported here has well. + return + + self.results.append({ + "name": test.GetLabel(), + "flags": test.flags, + "command": EscapeCommand(self.runner.GetCommand(test)).replace( + ABS_PATH_PREFIX, ""), + "run": test.run, + "stdout": test.output.stdout, + "stderr": test.output.stderr, + "exit_code": test.output.exit_code, + "result": test.suite.GetOutcome(test), + }) + + PROGRESS_INDICATORS = { 'verbose': VerboseProgressIndicator, 'dots': DotsProgressIndicator, diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/local/statusfile.py nodejs-0.11.15/deps/v8/tools/testrunner/local/statusfile.py --- nodejs-0.11.13/deps/v8/tools/testrunner/local/statusfile.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/local/statusfile.py 2015-01-20 21:22:17.000000000 +0000 @@ -52,9 +52,9 @@ # Support arches, modes to be written as keywords instead of strings. VARIABLES = {ALWAYS: True} -for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", - "arm", "arm64", "ia32", "mipsel", "x64", "nacl_ia32", "nacl_x64", - "macos", "windows", "linux"]: +for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87", + "arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32", + "nacl_x64", "macos", "windows", "linux"]: VARIABLES[var] = var diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/local/testsuite.py nodejs-0.11.15/deps/v8/tools/testrunner/local/testsuite.py --- nodejs-0.11.13/deps/v8/tools/testrunner/local/testsuite.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/local/testsuite.py 2015-01-20 21:22:17.000000000 +0000 @@ -190,18 +190,19 @@ else: return execution_failed - def HasUnexpectedOutput(self, testcase): + def GetOutcome(self, testcase): if testcase.output.HasCrashed(): - outcome = statusfile.CRASH + return statusfile.CRASH elif testcase.output.HasTimedOut(): - outcome = statusfile.TIMEOUT + return statusfile.TIMEOUT elif self.HasFailed(testcase): - outcome = statusfile.FAIL + return statusfile.FAIL else: - outcome = statusfile.PASS - if not testcase.outcomes: - return outcome != statusfile.PASS - return not outcome in testcase.outcomes + return statusfile.PASS + + def HasUnexpectedOutput(self, testcase): + outcome = self.GetOutcome(testcase) + return not outcome in (testcase.outcomes or [statusfile.PASS]) def StripOutputForTransmit(self, testcase): if not self.HasUnexpectedOutput(testcase): diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/local/utils.py nodejs-0.11.15/deps/v8/tools/testrunner/local/utils.py --- nodejs-0.11.13/deps/v8/tools/testrunner/local/utils.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/local/utils.py 2015-01-20 21:22:17.000000000 +0000 @@ -32,6 +32,7 @@ from os.path import join import platform import re +import urllib2 def GetSuitePaths(test_root): @@ -113,3 +114,10 @@ def IsWindows(): return GuessOS() == 'windows' + + +def URLRetrieve(source, destination): + """urllib is broken for SSL connections via a proxy therefore we + can't use urllib.urlretrieve().""" + with open(destination, 'w') as f: + f.write(urllib2.urlopen(source).read()) diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/network/network_execution.py nodejs-0.11.15/deps/v8/tools/testrunner/network/network_execution.py --- nodejs-0.11.13/deps/v8/tools/testrunner/network/network_execution.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/network/network_execution.py 2015-01-20 21:22:17.000000000 +0000 @@ -33,8 +33,8 @@ import time from . import distro -from . import perfdata from ..local import execution +from ..local import perfdata from ..objects import peer from ..objects import workpacket from ..server import compression @@ -54,6 +54,8 @@ self.suites = suites num_tests = 0 datapath = os.path.join("out", "testrunner_data") + # TODO(machenbach): These fields should exist now in the superclass. + # But there is no super constructor call. Check if this is a problem. self.perf_data_manager = perfdata.PerfDataManager(datapath) self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode) for s in suites: diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/network/perfdata.py nodejs-0.11.15/deps/v8/tools/testrunner/network/perfdata.py --- nodejs-0.11.13/deps/v8/tools/testrunner/network/perfdata.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/network/perfdata.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -# Copyright 2012 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -import os -import shelve -import threading - - -class PerfDataEntry(object): - def __init__(self): - self.avg = 0.0 - self.count = 0 - - def AddResult(self, result): - kLearnRateLimiter = 99 # Greater value means slower learning. - # We use an approximation of the average of the last 100 results here: - # The existing average is weighted with kLearnRateLimiter (or less - # if there are fewer data points). - effective_count = min(self.count, kLearnRateLimiter) - self.avg = self.avg * effective_count + result - self.count = effective_count + 1 - self.avg /= self.count - - -class PerfDataStore(object): - def __init__(self, datadir, arch, mode): - filename = os.path.join(datadir, "%s.%s.perfdata" % (arch, mode)) - self.database = shelve.open(filename, protocol=2) - self.closed = False - self.lock = threading.Lock() - - def __del__(self): - self.close() - - def close(self): - if self.closed: return - self.database.close() - self.closed = True - - def GetKey(self, test): - """Computes the key used to access data for the given testcase.""" - flags = "".join(test.flags) - return str("%s.%s.%s" % (test.suitename(), test.path, flags)) - - def FetchPerfData(self, test): - """Returns the observed duration for |test| as read from the store.""" - key = self.GetKey(test) - if key in self.database: - return self.database[key].avg - return None - - def UpdatePerfData(self, test): - """Updates the persisted value in the store with test.duration.""" - testkey = self.GetKey(test) - self.RawUpdatePerfData(testkey, test.duration) - - def RawUpdatePerfData(self, testkey, duration): - with self.lock: - if testkey in self.database: - entry = self.database[testkey] - else: - entry = PerfDataEntry() - entry.AddResult(duration) - self.database[testkey] = entry - - -class PerfDataManager(object): - def __init__(self, datadir): - self.datadir = os.path.abspath(datadir) - if not os.path.exists(self.datadir): - os.makedirs(self.datadir) - self.stores = {} # Keyed by arch, then mode. - self.closed = False - self.lock = threading.Lock() - - def __del__(self): - self.close() - - def close(self): - if self.closed: return - for arch in self.stores: - modes = self.stores[arch] - for mode in modes: - store = modes[mode] - store.close() - self.closed = True - - def GetStore(self, arch, mode): - with self.lock: - if not arch in self.stores: - self.stores[arch] = {} - modes = self.stores[arch] - if not mode in modes: - modes[mode] = PerfDataStore(self.datadir, arch, mode) - return modes[mode] diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/objects/context.py nodejs-0.11.15/deps/v8/tools/testrunner/objects/context.py --- nodejs-0.11.13/deps/v8/tools/testrunner/objects/context.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/objects/context.py 2015-01-20 21:22:17.000000000 +0000 @@ -28,7 +28,9 @@ class Context(): def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout, - isolates, command_prefix, extra_flags, noi18n): + isolates, command_prefix, extra_flags, noi18n, random_seed, + no_sorting, rerun_failures_count, rerun_failures_max, + predictable): self.arch = arch self.mode = mode self.shell_dir = shell_dir @@ -39,13 +41,21 @@ self.command_prefix = command_prefix self.extra_flags = extra_flags self.noi18n = noi18n + self.random_seed = random_seed + self.no_sorting = no_sorting + self.rerun_failures_count = rerun_failures_count + self.rerun_failures_max = rerun_failures_max + self.predictable = predictable def Pack(self): return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates, - self.command_prefix, self.extra_flags, self.noi18n] + self.command_prefix, self.extra_flags, self.noi18n, + self.random_seed, self.no_sorting, self.rerun_failures_count, + self.rerun_failures_max, self.predictable] @staticmethod def Unpack(packed): # For the order of the fields, refer to Pack() above. return Context(packed[0], packed[1], None, packed[2], False, - packed[3], packed[4], packed[5], packed[6], packed[7]) + packed[3], packed[4], packed[5], packed[6], packed[7], + packed[8], packed[9], packed[10], packed[11], packed[12]) diff -Nru nodejs-0.11.13/deps/v8/tools/testrunner/objects/testcase.py nodejs-0.11.15/deps/v8/tools/testrunner/objects/testcase.py --- nodejs-0.11.13/deps/v8/tools/testrunner/objects/testcase.py 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/testrunner/objects/testcase.py 2015-01-20 21:22:17.000000000 +0000 @@ -38,6 +38,7 @@ self.output = None self.id = None # int, used to map result back to TestCase instance self.duration = None # assigned during execution + self.run = 1 # The nth time this test is executed. def CopyAddingFlags(self, flags): copy = TestCase(self.suite, self.path, self.flags + flags, self.dependency) @@ -60,6 +61,7 @@ test = TestCase(str(task[0]), task[1], task[2], task[3]) test.outcomes = set(task[4]) test.id = task[5] + test.run = 1 return test def SetSuiteObject(self, suites): diff -Nru nodejs-0.11.13/deps/v8/tools/tickprocessor.js nodejs-0.11.15/deps/v8/tools/tickprocessor.js --- nodejs-0.11.13/deps/v8/tools/tickprocessor.js 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/tickprocessor.js 2015-01-20 21:22:17.000000000 +0000 @@ -441,12 +441,6 @@ if (this.ticks_.total == 0) return; - // Print the unknown ticks percentage if they are not ignored. - if (!this.ignoreUnknown_ && this.ticks_.unaccounted > 0) { - this.printHeader('Unknown'); - this.printCounter(this.ticks_.unaccounted, this.ticks_.total); - } - var flatProfile = this.profile_.getFlatProfile(); var flatView = this.viewBuilder_.buildView(flatProfile); // Sort by self time, desc, then by name, desc. @@ -457,33 +451,39 @@ if (this.ignoreUnknown_) { totalTicks -= this.ticks_.unaccounted; } - // Our total time contains all the ticks encountered, - // while profile only knows about the filtered ticks. - flatView.head.totalTime = totalTicks; // Count library ticks var flatViewNodes = flatView.head.children; var self = this; + var libraryTicks = 0; - this.processProfile(flatViewNodes, + this.printHeader('Shared libraries'); + this.printEntries(flatViewNodes, totalTicks, null, function(name) { return self.isSharedLibrary(name); }, function(rec) { libraryTicks += rec.selfTime; }); var nonLibraryTicks = totalTicks - libraryTicks; - this.printHeader('Shared libraries'); - this.printEntries(flatViewNodes, null, - function(name) { return self.isSharedLibrary(name); }); - + var jsTicks = 0; this.printHeader('JavaScript'); - this.printEntries(flatViewNodes, nonLibraryTicks, - function(name) { return self.isJsCode(name); }); + this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks, + function(name) { return self.isJsCode(name); }, + function(rec) { jsTicks += rec.selfTime; }); + var cppTicks = 0; this.printHeader('C++'); - this.printEntries(flatViewNodes, nonLibraryTicks, - function(name) { return self.isCppCode(name); }); - - this.printHeader('GC'); - this.printCounter(this.ticks_.gc, totalTicks); + this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks, + function(name) { return self.isCppCode(name); }, + function(rec) { cppTicks += rec.selfTime; }); + + this.printHeader('Summary'); + this.printLine('JavaScript', jsTicks, totalTicks, nonLibraryTicks); + this.printLine('C++', cppTicks, totalTicks, nonLibraryTicks); + this.printLine('GC', this.ticks_.gc, totalTicks, nonLibraryTicks); + this.printLine('Shared libraries', libraryTicks, totalTicks, null); + if (!this.ignoreUnknown_ && this.ticks_.unaccounted > 0) { + this.printLine('Unaccounted', this.ticks_.unaccounted, + this.ticks_.total, null); + } this.printHeavyProfHeader(); var heavyProfile = this.profile_.getBottomUpProfile(); @@ -517,6 +517,18 @@ }; +TickProcessor.prototype.printLine = function( + entry, ticks, totalTicks, nonLibTicks) { + var pct = ticks * 100 / totalTicks; + var nonLibPct = nonLibTicks != null + ? padLeft((ticks * 100 / nonLibTicks).toFixed(1), 5) + '% ' + : ' '; + print(' ' + padLeft(ticks, 5) + ' ' + + padLeft(pct.toFixed(1), 5) + '% ' + + nonLibPct + + entry); +} + TickProcessor.prototype.printHeavyProfHeader = function() { print('\n [Bottom up (heavy) profile]:'); print(' Note: percentage shows a share of a particular caller in the ' + @@ -529,12 +541,6 @@ }; -TickProcessor.prototype.printCounter = function(ticksCount, totalTicksCount) { - var pct = ticksCount * 100.0 / totalTicksCount; - print(' ' + padLeft(ticksCount, 5) + ' ' + padLeft(pct.toFixed(1), 5) + '%'); -}; - - TickProcessor.prototype.processProfile = function( profile, filterP, func) { for (var i = 0, n = profile.length; i < n; ++i) { @@ -580,18 +586,13 @@ }; TickProcessor.prototype.printEntries = function( - profile, nonLibTicks, filterP) { + profile, totalTicks, nonLibTicks, filterP, callback) { var that = this; this.processProfile(profile, filterP, function (rec) { if (rec.selfTime == 0) return; - var nonLibPct = nonLibTicks != null ? - rec.selfTime * 100.0 / nonLibTicks : 0.0; + callback(rec); var funcName = that.formatFunctionName(rec.internalFuncName); - - print(' ' + padLeft(rec.selfTime, 5) + ' ' + - padLeft(rec.selfPercent.toFixed(1), 5) + '% ' + - padLeft(nonLibPct.toFixed(1), 5) + '% ' + - funcName); + that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks); }); }; diff -Nru nodejs-0.11.13/deps/v8/tools/unittests/run_benchmarks_test.py nodejs-0.11.15/deps/v8/tools/unittests/run_benchmarks_test.py --- nodejs-0.11.13/deps/v8/tools/unittests/run_benchmarks_test.py 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/unittests/run_benchmarks_test.py 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,297 @@ +#!/usr/bin/env python +# Copyright 2014 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from collections import namedtuple +import coverage +import json +from mock import DEFAULT +from mock import MagicMock +import os +from os import path, sys +import shutil +import tempfile +import unittest + +# Requires python-coverage and python-mock. Native python coverage +# version >= 3.7.1 should be installed to get the best speed. + +TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-benchmarks") + +V8_JSON = { + "path": ["."], + "binary": "d7", + "flags": ["--flag"], + "main": "run.js", + "run_count": 1, + "results_regexp": "^%s: (.+)$", + "benchmarks": [ + {"name": "Richards"}, + {"name": "DeltaBlue"}, + ] +} + +V8_NESTED_SUITES_JSON = { + "path": ["."], + "flags": ["--flag"], + "run_count": 1, + "units": "score", + "benchmarks": [ + {"name": "Richards", + "path": ["richards"], + "binary": "d7", + "main": "run.js", + "resources": ["file1.js", "file2.js"], + "run_count": 2, + "results_regexp": "^Richards: (.+)$"}, + {"name": "Sub", + "path": ["sub"], + "benchmarks": [ + {"name": "Leaf", + "path": ["leaf"], + "run_count_x64": 3, + "units": "ms", + "main": "run.js", + "results_regexp": "^Simple: (.+) ms.$"}, + ] + }, + {"name": "DeltaBlue", + "path": ["delta_blue"], + "main": "run.js", + "flags": ["--flag2"], + "results_regexp": "^DeltaBlue: (.+)$"}, + {"name": "ShouldntRun", + "path": ["."], + "archs": ["arm"], + "main": "run.js"}, + ] +} + +Output = namedtuple("Output", "stdout, stderr") + +class BenchmarksTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.base = path.dirname(path.dirname(path.abspath(__file__))) + sys.path.append(cls.base) + cls._cov = coverage.coverage( + include=([os.path.join(cls.base, "run_benchmarks.py")])) + cls._cov.start() + import run_benchmarks + from testrunner.local import commands + global commands + global run_benchmarks + + @classmethod + def tearDownClass(cls): + cls._cov.stop() + print "" + print cls._cov.report() + + def setUp(self): + self.maxDiff = None + if path.exists(TEST_WORKSPACE): + shutil.rmtree(TEST_WORKSPACE) + os.makedirs(TEST_WORKSPACE) + + def tearDown(self): + if path.exists(TEST_WORKSPACE): + shutil.rmtree(TEST_WORKSPACE) + + def _WriteTestInput(self, json_content): + self._test_input = path.join(TEST_WORKSPACE, "test.json") + with open(self._test_input, "w") as f: + f.write(json.dumps(json_content)) + + def _MockCommand(self, *args): + # Fake output for each benchmark run. + benchmark_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]] + def execute(*args, **kwargs): + return benchmark_outputs.pop() + commands.Execute = MagicMock(side_effect=execute) + + # Check that d8 is called from the correct cwd for each benchmark run. + dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]] + def chdir(*args, **kwargs): + self.assertEquals(dirs.pop(), args[0]) + os.chdir = MagicMock(side_effect=chdir) + + def _CallMain(self, *args): + self._test_output = path.join(TEST_WORKSPACE, "results.json") + all_args=[ + "--json-test-results", + self._test_output, + self._test_input, + ] + all_args += args + return run_benchmarks.Main(all_args) + + def _LoadResults(self): + with open(self._test_output) as f: + return json.load(f) + + def _VerifyResults(self, suite, units, traces): + self.assertEquals([ + {"units": units, + "graphs": [suite, trace["name"]], + "results": trace["results"], + "stddev": trace["stddev"]} for trace in traces], + self._LoadResults()["traces"]) + + def _VerifyErrors(self, errors): + self.assertEquals(errors, self._LoadResults()["errors"]) + + def _VerifyMock(self, binary, *args): + arg = [path.join(path.dirname(self.base), binary)] + arg += args + commands.Execute.assert_called_with(arg, timeout=60) + + def _VerifyMockMultiple(self, *args): + expected = [] + for arg in args: + a = [path.join(path.dirname(self.base), arg[0])] + a += arg[1:] + expected.append(((a,), {"timeout": 60})) + self.assertEquals(expected, commands.Execute.call_args_list) + + def testOneRun(self): + self._WriteTestInput(V8_JSON) + self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"]) + self.assertEquals(0, self._CallMain()) + self._VerifyResults("test", "score", [ + {"name": "Richards", "results": ["1.234"], "stddev": ""}, + {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, + ]) + self._VerifyErrors([]) + self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") + + def testTwoRuns_Units_SuiteName(self): + test_input = dict(V8_JSON) + test_input["run_count"] = 2 + test_input["name"] = "v8" + test_input["units"] = "ms" + self._WriteTestInput(test_input) + self._MockCommand([".", "."], + ["Richards: 100\nDeltaBlue: 200\n", + "Richards: 50\nDeltaBlue: 300\n"]) + self.assertEquals(0, self._CallMain()) + self._VerifyResults("v8", "ms", [ + {"name": "Richards", "results": ["50", "100"], "stddev": ""}, + {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""}, + ]) + self._VerifyErrors([]) + self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") + + def testTwoRuns_SubRegexp(self): + test_input = dict(V8_JSON) + test_input["run_count"] = 2 + del test_input["results_regexp"] + test_input["benchmarks"][0]["results_regexp"] = "^Richards: (.+)$" + test_input["benchmarks"][1]["results_regexp"] = "^DeltaBlue: (.+)$" + self._WriteTestInput(test_input) + self._MockCommand([".", "."], + ["Richards: 100\nDeltaBlue: 200\n", + "Richards: 50\nDeltaBlue: 300\n"]) + self.assertEquals(0, self._CallMain()) + self._VerifyResults("test", "score", [ + {"name": "Richards", "results": ["50", "100"], "stddev": ""}, + {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""}, + ]) + self._VerifyErrors([]) + self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") + + def testNestedSuite(self): + self._WriteTestInput(V8_NESTED_SUITES_JSON) + self._MockCommand(["delta_blue", "sub/leaf", "richards"], + ["DeltaBlue: 200\n", + "Simple: 1 ms.\n", + "Simple: 2 ms.\n", + "Simple: 3 ms.\n", + "Richards: 100\n", + "Richards: 50\n"]) + self.assertEquals(0, self._CallMain()) + self.assertEquals([ + {"units": "score", + "graphs": ["test", "Richards"], + "results": ["50", "100"], + "stddev": ""}, + {"units": "ms", + "graphs": ["test", "Sub", "Leaf"], + "results": ["3", "2", "1"], + "stddev": ""}, + {"units": "score", + "graphs": ["test", "DeltaBlue"], + "results": ["200"], + "stddev": ""}, + ], self._LoadResults()["traces"]) + self._VerifyErrors([]) + self._VerifyMockMultiple( + (path.join("out", "x64.release", "d7"), "--flag", "file1.js", + "file2.js", "run.js"), + (path.join("out", "x64.release", "d7"), "--flag", "file1.js", + "file2.js", "run.js"), + (path.join("out", "x64.release", "d8"), "--flag", "run.js"), + (path.join("out", "x64.release", "d8"), "--flag", "run.js"), + (path.join("out", "x64.release", "d8"), "--flag", "run.js"), + (path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js")) + + def testOneRunStdDevRegExp(self): + test_input = dict(V8_JSON) + test_input["stddev_regexp"] = "^%s\-stddev: (.+)$" + self._WriteTestInput(test_input) + self._MockCommand(["."], ["Richards: 1.234\nRichards-stddev: 0.23\n" + "DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n"]) + self.assertEquals(0, self._CallMain()) + self._VerifyResults("test", "score", [ + {"name": "Richards", "results": ["1.234"], "stddev": "0.23"}, + {"name": "DeltaBlue", "results": ["10657567"], "stddev": "106"}, + ]) + self._VerifyErrors([]) + self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") + + def testTwoRunsStdDevRegExp(self): + test_input = dict(V8_JSON) + test_input["stddev_regexp"] = "^%s\-stddev: (.+)$" + test_input["run_count"] = 2 + self._WriteTestInput(test_input) + self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n" + "DeltaBlue: 6\nDeltaBlue-boom: 0.9\n", + "Richards: 2\nRichards-stddev: 0.5\n" + "DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"]) + self.assertEquals(1, self._CallMain()) + self._VerifyResults("test", "score", [ + {"name": "Richards", "results": ["2", "3"], "stddev": "0.7"}, + {"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"}, + ]) + self._VerifyErrors( + ["Benchmark Richards should only run once since a stddev is provided " + "by the benchmark.", + "Benchmark DeltaBlue should only run once since a stddev is provided " + "by the benchmark.", + "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for benchmark " + "DeltaBlue."]) + self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") + + def testBuildbot(self): + self._WriteTestInput(V8_JSON) + self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"]) + self.assertEquals(0, self._CallMain("--buildbot")) + self._VerifyResults("test", "score", [ + {"name": "Richards", "results": ["1.234"], "stddev": ""}, + {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, + ]) + self._VerifyErrors([]) + self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js") + + def testRegexpNoMatch(self): + self._WriteTestInput(V8_JSON) + self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"]) + self.assertEquals(1, self._CallMain()) + self._VerifyResults("test", "score", [ + {"name": "Richards", "results": [], "stddev": ""}, + {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, + ]) + self._VerifyErrors( + ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards."]) + self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") diff -Nru nodejs-0.11.13/deps/v8/tools/whitespace.txt nodejs-0.11.15/deps/v8/tools/whitespace.txt --- nodejs-0.11.13/deps/v8/tools/whitespace.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/v8/tools/whitespace.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +You can modify this file to create no-op changelists. + +Try to write something funny. And please don't add trailing whitespace. + +A Smi walks into a bar and says: +"I'm so deoptimized today!" +The doubles heard this and started to unbox. +The Smi looked at them and...................... diff -Nru nodejs-0.11.13/deps/zlib/adler32.c nodejs-0.11.15/deps/zlib/adler32.c --- nodejs-0.11.13/deps/zlib/adler32.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/adler32.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,14 +1,17 @@ /* adler32.c -- compute the Adler-32 checksum of a data stream - * Copyright (C) 1995-2004 Mark Adler + * Copyright (C) 1995-2011 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ -/* @(#) $Id: adler32.c,v 3.6 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ +/* @(#) $Id$ */ -#define ZLIB_INTERNAL -#include "zlib.h" +#include "zutil.h" -#define BASE 65521UL /* largest prime smaller than 65536 */ +#define local static + +local uLong adler32_combine_ OF((uLong adler1, uLong adler2, z_off64_t len2)); + +#define BASE 65521 /* largest prime smaller than 65536 */ #define NMAX 5552 /* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ @@ -18,39 +21,44 @@ #define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); #define DO16(buf) DO8(buf,0); DO8(buf,8); -/* use NO_DIVIDE if your processor does not do division in hardware */ +/* use NO_DIVIDE if your processor does not do division in hardware -- + try it both ways to see which is faster */ #ifdef NO_DIVIDE -# define MOD(a) \ +/* note that this assumes BASE is 65521, where 65536 % 65521 == 15 + (thank you to John Reiser for pointing this out) */ +# define CHOP(a) \ do { \ - if (a >= (BASE << 16)) a -= (BASE << 16); \ - if (a >= (BASE << 15)) a -= (BASE << 15); \ - if (a >= (BASE << 14)) a -= (BASE << 14); \ - if (a >= (BASE << 13)) a -= (BASE << 13); \ - if (a >= (BASE << 12)) a -= (BASE << 12); \ - if (a >= (BASE << 11)) a -= (BASE << 11); \ - if (a >= (BASE << 10)) a -= (BASE << 10); \ - if (a >= (BASE << 9)) a -= (BASE << 9); \ - if (a >= (BASE << 8)) a -= (BASE << 8); \ - if (a >= (BASE << 7)) a -= (BASE << 7); \ - if (a >= (BASE << 6)) a -= (BASE << 6); \ - if (a >= (BASE << 5)) a -= (BASE << 5); \ - if (a >= (BASE << 4)) a -= (BASE << 4); \ - if (a >= (BASE << 3)) a -= (BASE << 3); \ - if (a >= (BASE << 2)) a -= (BASE << 2); \ - if (a >= (BASE << 1)) a -= (BASE << 1); \ + unsigned long tmp = a >> 16; \ + a &= 0xffffUL; \ + a += (tmp << 4) - tmp; \ + } while (0) +# define MOD28(a) \ + do { \ + CHOP(a); \ if (a >= BASE) a -= BASE; \ } while (0) -# define MOD4(a) \ +# define MOD(a) \ do { \ - if (a >= (BASE << 4)) a -= (BASE << 4); \ - if (a >= (BASE << 3)) a -= (BASE << 3); \ - if (a >= (BASE << 2)) a -= (BASE << 2); \ - if (a >= (BASE << 1)) a -= (BASE << 1); \ + CHOP(a); \ + MOD28(a); \ + } while (0) +# define MOD63(a) \ + do { /* this assumes a is not negative */ \ + z_off64_t tmp = a >> 32; \ + a &= 0xffffffffL; \ + a += (tmp << 8) - (tmp << 5) + tmp; \ + tmp = a >> 16; \ + a &= 0xffffL; \ + a += (tmp << 4) - tmp; \ + tmp = a >> 16; \ + a &= 0xffffL; \ + a += (tmp << 4) - tmp; \ if (a >= BASE) a -= BASE; \ } while (0) #else # define MOD(a) a %= BASE -# define MOD4(a) a %= BASE +# define MOD28(a) a %= BASE +# define MOD63(a) a %= BASE #endif /* ========================================================================= */ @@ -89,7 +97,7 @@ } if (adler >= BASE) adler -= BASE; - MOD4(sum2); /* only added so many BASE's */ + MOD28(sum2); /* only added so many BASE's */ return adler | (sum2 << 16); } @@ -125,25 +133,47 @@ } /* ========================================================================= */ -uLong ZEXPORT adler32_combine(adler1, adler2, len2) +local uLong adler32_combine_(adler1, adler2, len2) uLong adler1; uLong adler2; - z_off_t len2; + z_off64_t len2; { unsigned long sum1; unsigned long sum2; unsigned rem; + /* for negative len, return invalid adler32 as a clue for debugging */ + if (len2 < 0) + return 0xffffffffUL; + /* the derivation of this formula is left as an exercise for the reader */ - rem = (unsigned)(len2 % BASE); + MOD63(len2); /* assumes len2 >= 0 */ + rem = (unsigned)len2; sum1 = adler1 & 0xffff; sum2 = rem * sum1; MOD(sum2); sum1 += (adler2 & 0xffff) + BASE - 1; sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem; - if (sum1 > BASE) sum1 -= BASE; - if (sum1 > BASE) sum1 -= BASE; - if (sum2 > (BASE << 1)) sum2 -= (BASE << 1); - if (sum2 > BASE) sum2 -= BASE; + if (sum1 >= BASE) sum1 -= BASE; + if (sum1 >= BASE) sum1 -= BASE; + if (sum2 >= (BASE << 1)) sum2 -= (BASE << 1); + if (sum2 >= BASE) sum2 -= BASE; return sum1 | (sum2 << 16); } + +/* ========================================================================= */ +uLong ZEXPORT adler32_combine(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off_t len2; +{ + return adler32_combine_(adler1, adler2, len2); +} + +uLong ZEXPORT adler32_combine64(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off64_t len2; +{ + return adler32_combine_(adler1, adler2, len2); +} diff -Nru nodejs-0.11.13/deps/zlib/amiga/Makefile.pup nodejs-0.11.15/deps/zlib/amiga/Makefile.pup --- nodejs-0.11.13/deps/zlib/amiga/Makefile.pup 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/amiga/Makefile.pup 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,69 @@ +# Amiga powerUP (TM) Makefile +# makefile for libpng and SAS C V6.58/7.00 PPC compiler +# Copyright (C) 1998 by Andreas R. Kleinert + +LIBNAME = libzip.a + +CC = scppc +CFLAGS = NOSTKCHK NOSINT OPTIMIZE OPTGO OPTPEEP OPTINLOCAL OPTINL \ + OPTLOOP OPTRDEP=8 OPTDEP=8 OPTCOMP=8 NOVER +AR = ppc-amigaos-ar cr +RANLIB = ppc-amigaos-ranlib +LD = ppc-amigaos-ld -r +LDFLAGS = -o +LDLIBS = LIB:scppc.a LIB:end.o +RM = delete quiet + +OBJS = adler32.o compress.o crc32.o gzclose.o gzlib.o gzread.o gzwrite.o \ + uncompr.o deflate.o trees.o zutil.o inflate.o infback.o inftrees.o inffast.o + +TEST_OBJS = example.o minigzip.o + +all: example minigzip + +check: test +test: all + example + echo hello world | minigzip | minigzip -d + +$(LIBNAME): $(OBJS) + $(AR) $@ $(OBJS) + -$(RANLIB) $@ + +example: example.o $(LIBNAME) + $(LD) $(LDFLAGS) $@ LIB:c_ppc.o $@.o $(LIBNAME) $(LDLIBS) + +minigzip: minigzip.o $(LIBNAME) + $(LD) $(LDFLAGS) $@ LIB:c_ppc.o $@.o $(LIBNAME) $(LDLIBS) + +mostlyclean: clean +clean: + $(RM) *.o example minigzip $(LIBNAME) foo.gz + +zip: + zip -ul9 zlib README ChangeLog Makefile Make????.??? Makefile.?? \ + descrip.mms *.[ch] + +tgz: + cd ..; tar cfz zlib/zlib.tgz zlib/README zlib/ChangeLog zlib/Makefile \ + zlib/Make????.??? zlib/Makefile.?? zlib/descrip.mms zlib/*.[ch] + +# DO NOT DELETE THIS LINE -- make depend depends on it. + +adler32.o: zlib.h zconf.h +compress.o: zlib.h zconf.h +crc32.o: crc32.h zlib.h zconf.h +deflate.o: deflate.h zutil.h zlib.h zconf.h +example.o: zlib.h zconf.h +gzclose.o: zlib.h zconf.h gzguts.h +gzlib.o: zlib.h zconf.h gzguts.h +gzread.o: zlib.h zconf.h gzguts.h +gzwrite.o: zlib.h zconf.h gzguts.h +inffast.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +inflate.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +infback.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +inftrees.o: zutil.h zlib.h zconf.h inftrees.h +minigzip.o: zlib.h zconf.h +trees.o: deflate.h zutil.h zlib.h zconf.h trees.h +uncompr.o: zlib.h zconf.h +zutil.o: zutil.h zlib.h zconf.h diff -Nru nodejs-0.11.13/deps/zlib/amiga/Makefile.sas nodejs-0.11.15/deps/zlib/amiga/Makefile.sas --- nodejs-0.11.13/deps/zlib/amiga/Makefile.sas 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/amiga/Makefile.sas 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,68 @@ +# SMakefile for zlib +# Modified from the standard UNIX Makefile Copyright Jean-loup Gailly +# Osma Ahvenlampi <Osma.Ahvenlampi@hut.fi> +# Amiga, SAS/C 6.56 & Smake + +CC=sc +CFLAGS=OPT +#CFLAGS=OPT CPU=68030 +#CFLAGS=DEBUG=LINE +LDFLAGS=LIB z.lib + +SCOPTIONS=OPTSCHED OPTINLINE OPTALIAS OPTTIME OPTINLOCAL STRMERGE \ + NOICONS PARMS=BOTH NOSTACKCHECK UTILLIB NOVERSION ERRORREXX \ + DEF=POSTINC + +OBJS = adler32.o compress.o crc32.o gzclose.o gzlib.o gzread.o gzwrite.o \ + uncompr.o deflate.o trees.o zutil.o inflate.o infback.o inftrees.o inffast.o + +TEST_OBJS = example.o minigzip.o + +all: SCOPTIONS example minigzip + +check: test +test: all + example + echo hello world | minigzip | minigzip -d + +install: z.lib + copy clone zlib.h zconf.h INCLUDE: + copy clone z.lib LIB: + +z.lib: $(OBJS) + oml z.lib r $(OBJS) + +example: example.o z.lib + $(CC) $(CFLAGS) LINK TO $@ example.o $(LDFLAGS) + +minigzip: minigzip.o z.lib + $(CC) $(CFLAGS) LINK TO $@ minigzip.o $(LDFLAGS) + +mostlyclean: clean +clean: + -delete force quiet example minigzip *.o z.lib foo.gz *.lnk SCOPTIONS + +SCOPTIONS: Makefile.sas + copy to $@ <from < +$(SCOPTIONS) +< + +# DO NOT DELETE THIS LINE -- make depend depends on it. + +adler32.o: zlib.h zconf.h +compress.o: zlib.h zconf.h +crc32.o: crc32.h zlib.h zconf.h +deflate.o: deflate.h zutil.h zlib.h zconf.h +example.o: zlib.h zconf.h +gzclose.o: zlib.h zconf.h gzguts.h +gzlib.o: zlib.h zconf.h gzguts.h +gzread.o: zlib.h zconf.h gzguts.h +gzwrite.o: zlib.h zconf.h gzguts.h +inffast.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +inflate.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +infback.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +inftrees.o: zutil.h zlib.h zconf.h inftrees.h +minigzip.o: zlib.h zconf.h +trees.o: deflate.h zutil.h zlib.h zconf.h trees.h +uncompr.o: zlib.h zconf.h +zutil.o: zutil.h zlib.h zconf.h diff -Nru nodejs-0.11.13/deps/zlib/as400/bndsrc nodejs-0.11.15/deps/zlib/as400/bndsrc --- nodejs-0.11.13/deps/zlib/as400/bndsrc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/as400/bndsrc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,215 @@ +STRPGMEXP PGMLVL(*CURRENT) SIGNATURE('ZLIB') + +/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*/ +/* Version 1.1.3 entry points. */ +/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*/ + +/********************************************************************/ +/* *MODULE ADLER32 ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("adler32") + +/********************************************************************/ +/* *MODULE COMPRESS ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("compress") + EXPORT SYMBOL("compress2") + +/********************************************************************/ +/* *MODULE CRC32 ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("crc32") + EXPORT SYMBOL("get_crc_table") + +/********************************************************************/ +/* *MODULE DEFLATE ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("deflate") + EXPORT SYMBOL("deflateEnd") + EXPORT SYMBOL("deflateSetDictionary") + EXPORT SYMBOL("deflateCopy") + EXPORT SYMBOL("deflateReset") + EXPORT SYMBOL("deflateParams") + EXPORT SYMBOL("deflatePrime") + EXPORT SYMBOL("deflateInit_") + EXPORT SYMBOL("deflateInit2_") + +/********************************************************************/ +/* *MODULE GZIO ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("gzopen") + EXPORT SYMBOL("gzdopen") + EXPORT SYMBOL("gzsetparams") + EXPORT SYMBOL("gzread") + EXPORT SYMBOL("gzwrite") + EXPORT SYMBOL("gzprintf") + EXPORT SYMBOL("gzputs") + EXPORT SYMBOL("gzgets") + EXPORT SYMBOL("gzputc") + EXPORT SYMBOL("gzgetc") + EXPORT SYMBOL("gzflush") + EXPORT SYMBOL("gzseek") + EXPORT SYMBOL("gzrewind") + EXPORT SYMBOL("gztell") + EXPORT SYMBOL("gzeof") + EXPORT SYMBOL("gzclose") + EXPORT SYMBOL("gzerror") + +/********************************************************************/ +/* *MODULE INFLATE ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("inflate") + EXPORT SYMBOL("inflateEnd") + EXPORT SYMBOL("inflateSetDictionary") + EXPORT SYMBOL("inflateSync") + EXPORT SYMBOL("inflateReset") + EXPORT SYMBOL("inflateInit_") + EXPORT SYMBOL("inflateInit2_") + EXPORT SYMBOL("inflateSyncPoint") + +/********************************************************************/ +/* *MODULE UNCOMPR ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("uncompress") + +/********************************************************************/ +/* *MODULE ZUTIL ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("zlibVersion") + EXPORT SYMBOL("zError") + +/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*/ +/* Version 1.2.1 additional entry points. */ +/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*/ + +/********************************************************************/ +/* *MODULE COMPRESS ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("compressBound") + +/********************************************************************/ +/* *MODULE DEFLATE ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("deflateBound") + +/********************************************************************/ +/* *MODULE GZIO ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("gzungetc") + EXPORT SYMBOL("gzclearerr") + +/********************************************************************/ +/* *MODULE INFBACK ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("inflateBack") + EXPORT SYMBOL("inflateBackEnd") + EXPORT SYMBOL("inflateBackInit_") + +/********************************************************************/ +/* *MODULE INFLATE ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("inflateCopy") + +/********************************************************************/ +/* *MODULE ZUTIL ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("zlibCompileFlags") + +/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*/ +/* Version 1.2.5 additional entry points. */ +/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*/ + +/********************************************************************/ +/* *MODULE ADLER32 ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("adler32_combine") + EXPORT SYMBOL("adler32_combine64") + +/********************************************************************/ +/* *MODULE CRC32 ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("crc32_combine") + EXPORT SYMBOL("crc32_combine64") + +/********************************************************************/ +/* *MODULE GZLIB ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("gzbuffer") + EXPORT SYMBOL("gzoffset") + EXPORT SYMBOL("gzoffset64") + EXPORT SYMBOL("gzopen64") + EXPORT SYMBOL("gzseek64") + EXPORT SYMBOL("gztell64") + +/********************************************************************/ +/* *MODULE GZREAD ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("gzclose_r") + +/********************************************************************/ +/* *MODULE GZWRITE ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("gzclose_w") + +/********************************************************************/ +/* *MODULE INFLATE ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("inflateMark") + EXPORT SYMBOL("inflatePrime") + EXPORT SYMBOL("inflateReset2") + EXPORT SYMBOL("inflateUndermine") + +/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*/ +/* Version 1.2.6 additional entry points. */ +/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*/ + +/********************************************************************/ +/* *MODULE DEFLATE ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("deflateResetKeep") + EXPORT SYMBOL("deflatePending") + +/********************************************************************/ +/* *MODULE GZWRITE ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("gzgetc_") + +/********************************************************************/ +/* *MODULE INFLATE ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("inflateResetKeep") + +/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*/ +/* Version 1.2.8 additional entry points. */ +/*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@*/ + +/********************************************************************/ +/* *MODULE INFLATE ZLIB 01/02/01 00:15:09 */ +/********************************************************************/ + + EXPORT SYMBOL("inflateGetDictionary") + +ENDPGMEXP diff -Nru nodejs-0.11.13/deps/zlib/as400/compile.clp nodejs-0.11.15/deps/zlib/as400/compile.clp --- nodejs-0.11.13/deps/zlib/as400/compile.clp 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/as400/compile.clp 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,110 @@ +/******************************************************************************/ +/* */ +/* ZLIB */ +/* */ +/* Compile sources into modules and link them into a service program. */ +/* */ +/******************************************************************************/ + + PGM + +/* Configuration adjustable parameters. */ + + DCL VAR(&SRCLIB) TYPE(*CHAR) LEN(10) + + VALUE('ZLIB') /* Source library. */ + DCL VAR(&SRCFILE) TYPE(*CHAR) LEN(10) + + VALUE('SOURCES') /* Source member file. */ + DCL VAR(&CTLFILE) TYPE(*CHAR) LEN(10) + + VALUE('TOOLS') /* Control member file. */ + + DCL VAR(&MODLIB) TYPE(*CHAR) LEN(10) + + VALUE('ZLIB') /* Module library. */ + + DCL VAR(&SRVLIB) TYPE(*CHAR) LEN(10) + + VALUE('LGPL') /* Service program library. */ + + DCL VAR(&CFLAGS) TYPE(*CHAR) + + VALUE('OPTIMIZE(40)') /* Compile options. */ + + DCL VAR(&TGTRLS) TYPE(*CHAR) + + VALUE('V5R3M0') /* Target release. */ + + +/* Working storage. */ + + DCL VAR(&CMDLEN) TYPE(*DEC) LEN(15 5) VALUE(300) /* Command length. */ + DCL VAR(&CMD) TYPE(*CHAR) LEN(512) + DCL VAR(&FIXDCMD) TYPE(*CHAR) LEN(512) + + +/* Compile sources into modules. */ + + CHGVAR VAR(&FIXDCMD) VALUE('CRTCMOD' *BCAT &CFLAGS *BCAT + + 'SYSIFCOPT(*IFS64IO)' *BCAT + + 'DEFINE(''_LARGEFILE64_SOURCE''' *BCAT + + '''_LFS64_LARGEFILE=1'') TGTRLS(' *TCAT &TGTRLS *TCAT + + ') SRCFILE(' *TCAT &SRCLIB *TCAT '/' *TCAT + + &SRCFILE *TCAT ') MODULE(' *TCAT &MODLIB *TCAT '/') + + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'ADLER32)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'COMPRESS)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'CRC32)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'DEFLATE)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'GZCLOSE)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'GZLIB)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'GZREAD)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'GZWRITE)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'INFBACK)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'INFFAST)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'INFLATE)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'INFTREES)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'TREES)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'UNCOMPR)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + CHGVAR VAR(&CMD) VALUE(&FIXDCMD *TCAT 'ZUTIL)') + CALL PGM(QCMDEXC) PARM(&CMD &CMDLEN) + + +/* Link modules into a service program. */ + + CRTSRVPGM SRVPGM(&SRVLIB/ZLIB) + + MODULE(&MODLIB/ADLER32 &MODLIB/COMPRESS + + &MODLIB/CRC32 &MODLIB/DEFLATE + + &MODLIB/GZCLOSE &MODLIB/GZLIB + + &MODLIB/GZREAD &MODLIB/GZWRITE + + &MODLIB/INFBACK &MODLIB/INFFAST + + &MODLIB/INFLATE &MODLIB/INFTREES + + &MODLIB/TREES &MODLIB/UNCOMPR + + &MODLIB/ZUTIL) + + SRCFILE(&SRCLIB/&CTLFILE) SRCMBR(BNDSRC) + + TEXT('ZLIB 1.2.8') TGTRLS(&TGTRLS) + + ENDPGM diff -Nru nodejs-0.11.13/deps/zlib/as400/readme.txt nodejs-0.11.15/deps/zlib/as400/readme.txt --- nodejs-0.11.13/deps/zlib/as400/readme.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/as400/readme.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,115 @@ + ZLIB version 1.2.8 for AS400 installation instructions + +I) From an AS400 *SAVF file: + +1) Unpacking archive to an AS400 save file + +On the AS400: + +_ Create the ZLIB AS400 library: + + CRTLIB LIB(ZLIB) TYPE(*PROD) TEXT('ZLIB compression API library') + +_ Create a work save file, for example: + + CRTSAVF FILE(ZLIB/ZLIBSAVF) + +On a PC connected to the target AS400: + +_ Unpack the save file image to a PC file "ZLIBSAVF" +_ Upload this file into the save file on the AS400, for example + using ftp in BINARY mode. + + +2) Populating the ZLIB AS400 source library + +On the AS400: + +_ Extract the saved objects into the ZLIB AS400 library using: + +RSTOBJ OBJ(*ALL) SAVLIB(ZLIB) DEV(*SAVF) SAVF(ZLIB/ZLIBSAVF) RSTLIB(ZLIB) + + +3) Customize installation: + +_ Edit CL member ZLIB/TOOLS(COMPILE) and change parameters if needed, + according to the comments. + +_ Compile this member with: + + CRTCLPGM PGM(ZLIB/COMPILE) SRCFILE(ZLIB/TOOLS) SRCMBR(COMPILE) + + +4) Compile and generate the service program: + +_ This can now be done by executing: + + CALL PGM(ZLIB/COMPILE) + + + +II) From the original source distribution: + +1) On the AS400, create the source library: + + CRTLIB LIB(ZLIB) TYPE(*PROD) TEXT('ZLIB compression API library') + +2) Create the source files: + + CRTSRCPF FILE(ZLIB/SOURCES) RCDLEN(112) TEXT('ZLIB library modules') + CRTSRCPF FILE(ZLIB/H) RCDLEN(112) TEXT('ZLIB library includes') + CRTSRCPF FILE(ZLIB/TOOLS) RCDLEN(112) TEXT('ZLIB library control utilities') + +3) From the machine hosting the distribution files, upload them (with + FTP in text mode, for example) according to the following table: + + Original AS400 AS400 AS400 AS400 + file file member type description + SOURCES Original ZLIB C subprogram sources + adler32.c ADLER32 C ZLIB - Compute the Adler-32 checksum of a dta strm + compress.c COMPRESS C ZLIB - Compress a memory buffer + crc32.c CRC32 C ZLIB - Compute the CRC-32 of a data stream + deflate.c DEFLATE C ZLIB - Compress data using the deflation algorithm + gzclose.c GZCLOSE C ZLIB - Close .gz files + gzlib.c GZLIB C ZLIB - Miscellaneous .gz files IO support + gzread.c GZREAD C ZLIB - Read .gz files + gzwrite.c GZWRITE C ZLIB - Write .gz files + infback.c INFBACK C ZLIB - Inflate using a callback interface + inffast.c INFFAST C ZLIB - Fast proc. literals & length/distance pairs + inflate.c INFLATE C ZLIB - Interface to inflate modules + inftrees.c INFTREES C ZLIB - Generate Huffman trees for efficient decode + trees.c TREES C ZLIB - Output deflated data using Huffman coding + uncompr.c UNCOMPR C ZLIB - Decompress a memory buffer + zutil.c ZUTIL C ZLIB - Target dependent utility functions + H Original ZLIB C and ILE/RPG include files + crc32.h CRC32 C ZLIB - CRC32 tables + deflate.h DEFLATE C ZLIB - Internal compression state + gzguts.h GZGUTS C ZLIB - Definitions for the gzclose module + inffast.h INFFAST C ZLIB - Header to use inffast.c + inffixed.h INFFIXED C ZLIB - Table for decoding fixed codes + inflate.h INFLATE C ZLIB - Internal inflate state definitions + inftrees.h INFTREES C ZLIB - Header to use inftrees.c + trees.h TREES C ZLIB - Created automatically with -DGEN_TREES_H + zconf.h ZCONF C ZLIB - Compression library configuration + zlib.h ZLIB C ZLIB - Compression library C user interface + as400/zlib.inc ZLIB.INC RPGLE ZLIB - Compression library ILE RPG user interface + zutil.h ZUTIL C ZLIB - Internal interface and configuration + TOOLS Building source software & AS/400 README + as400/bndsrc BNDSRC Entry point exportation list + as400/compile.clp COMPILE CLP Compile sources & generate service program + as400/readme.txt README TXT Installation instructions + +4) Continue as in I)3). + + + + +Notes: For AS400 ILE RPG programmers, a /copy member defining the ZLIB + API prototypes for ILE RPG can be found in ZLIB/H(ZLIB.INC). + Please read comments in this member for more information. + + Remember that most foreign textual data are ASCII coded: this + implementation does not handle conversion from/to ASCII, so + text data code conversions must be done explicitely. + + Mainly for the reason above, always open zipped files in binary mode. diff -Nru nodejs-0.11.13/deps/zlib/as400/zlib.inc nodejs-0.11.15/deps/zlib/as400/zlib.inc --- nodejs-0.11.13/deps/zlib/as400/zlib.inc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/as400/zlib.inc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,451 @@ + * ZLIB.INC - Interface to the general purpose compression library + * + * ILE RPG400 version by Patrick Monnerat, DATASPHERE. + * Version 1.2.8 + * + * + * WARNING: + * Procedures inflateInit(), inflateInit2(), deflateInit(), + * deflateInit2() and inflateBackInit() need to be called with + * two additional arguments: + * the package version string and the stream control structure. + * size. This is needed because RPG lacks some macro feature. + * Call these procedures as: + * inflateInit(...: ZLIB_VERSION: %size(z_stream)) + * + /if not defined(ZLIB_H_) + /define ZLIB_H_ + * + ************************************************************************** + * Constants + ************************************************************************** + * + * Versioning information. + * + D ZLIB_VERSION C '1.2.8' + D ZLIB_VERNUM C X'1280' + D ZLIB_VER_MAJOR C 1 + D ZLIB_VER_MINOR C 2 + D ZLIB_VER_REVISION... + D C 8 + D ZLIB_VER_SUBREVISION... + D C 0 + * + * Other equates. + * + D Z_NO_FLUSH C 0 + D Z_PARTIAL_FLUSH... + D C 1 + D Z_SYNC_FLUSH C 2 + D Z_FULL_FLUSH C 3 + D Z_FINISH C 4 + D Z_BLOCK C 5 + D Z_TREES C 6 + * + D Z_OK C 0 + D Z_STREAM_END C 1 + D Z_NEED_DICT C 2 + D Z_ERRNO C -1 + D Z_STREAM_ERROR C -2 + D Z_DATA_ERROR C -3 + D Z_MEM_ERROR C -4 + D Z_BUF_ERROR C -5 + DZ_VERSION_ERROR C -6 + * + D Z_NO_COMPRESSION... + D C 0 + D Z_BEST_SPEED C 1 + D Z_BEST_COMPRESSION... + D C 9 + D Z_DEFAULT_COMPRESSION... + D C -1 + * + D Z_FILTERED C 1 + D Z_HUFFMAN_ONLY C 2 + D Z_RLE C 3 + D Z_DEFAULT_STRATEGY... + D C 0 + * + D Z_BINARY C 0 + D Z_ASCII C 1 + D Z_UNKNOWN C 2 + * + D Z_DEFLATED C 8 + * + D Z_NULL C 0 + * + ************************************************************************** + * Types + ************************************************************************** + * + D z_streamp S * Stream struct ptr + D gzFile S * File pointer + D z_off_t S 10i 0 Stream offsets + D z_off64_t S 20i 0 Stream offsets + * + ************************************************************************** + * Structures + ************************************************************************** + * + * The GZIP encode/decode stream support structure. + * + D z_stream DS align based(z_streamp) + D zs_next_in * Next input byte + D zs_avail_in 10U 0 Byte cnt at next_in + D zs_total_in 10U 0 Total bytes read + D zs_next_out * Output buffer ptr + D zs_avail_out 10U 0 Room left @ next_out + D zs_total_out 10U 0 Total bytes written + D zs_msg * Last errmsg or null + D zs_state * Internal state + D zs_zalloc * procptr Int. state allocator + D zs_free * procptr Int. state dealloc. + D zs_opaque * Private alloc. data + D zs_data_type 10i 0 ASC/BIN best guess + D zs_adler 10u 0 Uncompr. adler32 val + D 10U 0 Reserved + D 10U 0 Ptr. alignment + * + ************************************************************************** + * Utility function prototypes + ************************************************************************** + * + D compress PR 10I 0 extproc('compress') + D dest 65535 options(*varsize) Destination buffer + D destLen 10U 0 Destination length + D source 65535 const options(*varsize) Source buffer + D sourceLen 10u 0 value Source length + * + D compress2 PR 10I 0 extproc('compress2') + D dest 65535 options(*varsize) Destination buffer + D destLen 10U 0 Destination length + D source 65535 const options(*varsize) Source buffer + D sourceLen 10U 0 value Source length + D level 10I 0 value Compression level + * + D compressBound PR 10U 0 extproc('compressBound') + D sourceLen 10U 0 value + * + D uncompress PR 10I 0 extproc('uncompress') + D dest 65535 options(*varsize) Destination buffer + D destLen 10U 0 Destination length + D source 65535 const options(*varsize) Source buffer + D sourceLen 10U 0 value Source length + * + /if not defined(LARGE_FILES) + D gzopen PR extproc('gzopen') + D like(gzFile) + D path * value options(*string) File pathname + D mode * value options(*string) Open mode + /else + D gzopen PR extproc('gzopen64') + D like(gzFile) + D path * value options(*string) File pathname + D mode * value options(*string) Open mode + * + D gzopen64 PR extproc('gzopen64') + D like(gzFile) + D path * value options(*string) File pathname + D mode * value options(*string) Open mode + /endif + * + D gzdopen PR extproc('gzdopen') + D like(gzFile) + D fd 10I 0 value File descriptor + D mode * value options(*string) Open mode + * + D gzbuffer PR 10I 0 extproc('gzbuffer') + D file value like(gzFile) File pointer + D size 10U 0 value + * + D gzsetparams PR 10I 0 extproc('gzsetparams') + D file value like(gzFile) File pointer + D level 10I 0 value + D strategy 10I 0 value + * + D gzread PR 10I 0 extproc('gzread') + D file value like(gzFile) File pointer + D buf 65535 options(*varsize) Buffer + D len 10u 0 value Buffer length + * + D gzwrite PR 10I 0 extproc('gzwrite') + D file value like(gzFile) File pointer + D buf 65535 const options(*varsize) Buffer + D len 10u 0 value Buffer length + * + D gzputs PR 10I 0 extproc('gzputs') + D file value like(gzFile) File pointer + D s * value options(*string) String to output + * + D gzgets PR * extproc('gzgets') + D file value like(gzFile) File pointer + D buf 65535 options(*varsize) Read buffer + D len 10i 0 value Buffer length + * + D gzputc PR 10i 0 extproc('gzputc') + D file value like(gzFile) File pointer + D c 10I 0 value Character to write + * + D gzgetc PR 10i 0 extproc('gzgetc') + D file value like(gzFile) File pointer + * + D gzgetc_ PR 10i 0 extproc('gzgetc_') + D file value like(gzFile) File pointer + * + D gzungetc PR 10i 0 extproc('gzungetc') + D c 10I 0 value Character to push + D file value like(gzFile) File pointer + * + D gzflush PR 10i 0 extproc('gzflush') + D file value like(gzFile) File pointer + D flush 10I 0 value Type of flush + * + /if not defined(LARGE_FILES) + D gzseek PR extproc('gzseek') + D like(z_off_t) + D file value like(gzFile) File pointer + D offset value like(z_off_t) Offset + D whence 10i 0 value Origin + /else + D gzseek PR extproc('gzseek64') + D like(z_off_t) + D file value like(gzFile) File pointer + D offset value like(z_off_t) Offset + D whence 10i 0 value Origin + * + D gzseek64 PR extproc('gzseek64') + D like(z_off64_t) + D file value like(gzFile) File pointer + D offset value like(z_off64_t) Offset + D whence 10i 0 value Origin + /endif + * + D gzrewind PR 10i 0 extproc('gzrewind') + D file value like(gzFile) File pointer + * + /if not defined(LARGE_FILES) + D gztell PR extproc('gztell') + D like(z_off_t) + D file value like(gzFile) File pointer + /else + D gztell PR extproc('gztell64') + D like(z_off_t) + D file value like(gzFile) File pointer + * + D gztell64 PR extproc('gztell64') + D like(z_off64_t) + D file value like(gzFile) File pointer + /endif + * + /if not defined(LARGE_FILES) + D gzoffset PR extproc('gzoffset') + D like(z_off_t) + D file value like(gzFile) File pointer + /else + D gzoffset PR extproc('gzoffset64') + D like(z_off_t) + D file value like(gzFile) File pointer + * + D gzoffset64 PR extproc('gzoffset64') + D like(z_off64_t) + D file value like(gzFile) File pointer + /endif + * + D gzeof PR 10i 0 extproc('gzeof') + D file value like(gzFile) File pointer + * + D gzclose_r PR 10i 0 extproc('gzclose_r') + D file value like(gzFile) File pointer + * + D gzclose_w PR 10i 0 extproc('gzclose_w') + D file value like(gzFile) File pointer + * + D gzclose PR 10i 0 extproc('gzclose') + D file value like(gzFile) File pointer + * + D gzerror PR * extproc('gzerror') Error string + D file value like(gzFile) File pointer + D errnum 10I 0 Error code + * + D gzclearerr PR extproc('gzclearerr') + D file value like(gzFile) File pointer + * + ************************************************************************** + * Basic function prototypes + ************************************************************************** + * + D zlibVersion PR * extproc('zlibVersion') Version string + * + D deflateInit PR 10I 0 extproc('deflateInit_') Init. compression + D strm like(z_stream) Compression stream + D level 10I 0 value Compression level + D version * value options(*string) Version string + D stream_size 10i 0 value Stream struct. size + * + D deflate PR 10I 0 extproc('deflate') Compress data + D strm like(z_stream) Compression stream + D flush 10I 0 value Flush type required + * + D deflateEnd PR 10I 0 extproc('deflateEnd') Termin. compression + D strm like(z_stream) Compression stream + * + D inflateInit PR 10I 0 extproc('inflateInit_') Init. expansion + D strm like(z_stream) Expansion stream + D version * value options(*string) Version string + D stream_size 10i 0 value Stream struct. size + * + D inflate PR 10I 0 extproc('inflate') Expand data + D strm like(z_stream) Expansion stream + D flush 10I 0 value Flush type required + * + D inflateEnd PR 10I 0 extproc('inflateEnd') Termin. expansion + D strm like(z_stream) Expansion stream + * + ************************************************************************** + * Advanced function prototypes + ************************************************************************** + * + D deflateInit2 PR 10I 0 extproc('deflateInit2_') Init. compression + D strm like(z_stream) Compression stream + D level 10I 0 value Compression level + D method 10I 0 value Compression method + D windowBits 10I 0 value log2(window size) + D memLevel 10I 0 value Mem/cmpress tradeoff + D strategy 10I 0 value Compression stategy + D version * value options(*string) Version string + D stream_size 10i 0 value Stream struct. size + * + D deflateSetDictionary... + D PR 10I 0 extproc('deflateSetDictionary') Init. dictionary + D strm like(z_stream) Compression stream + D dictionary 65535 const options(*varsize) Dictionary bytes + D dictLength 10U 0 value Dictionary length + * + D deflateCopy PR 10I 0 extproc('deflateCopy') Compress strm 2 strm + D dest like(z_stream) Destination stream + D source like(z_stream) Source stream + * + D deflateReset PR 10I 0 extproc('deflateReset') End and init. stream + D strm like(z_stream) Compression stream + * + D deflateParams PR 10I 0 extproc('deflateParams') Change level & strat + D strm like(z_stream) Compression stream + D level 10I 0 value Compression level + D strategy 10I 0 value Compression stategy + * + D deflateBound PR 10U 0 extproc('deflateBound') Change level & strat + D strm like(z_stream) Compression stream + D sourcelen 10U 0 value Compression level + * + D deflatePending PR 10I 0 extproc('deflatePending') Change level & strat + D strm like(z_stream) Compression stream + D pending 10U 0 Pending bytes + D bits 10I 0 Pending bits + * + D deflatePrime PR 10I 0 extproc('deflatePrime') Change level & strat + D strm like(z_stream) Compression stream + D bits 10I 0 value # of bits to insert + D value 10I 0 value Bits to insert + * + D inflateInit2 PR 10I 0 extproc('inflateInit2_') Init. expansion + D strm like(z_stream) Expansion stream + D windowBits 10I 0 value log2(window size) + D version * value options(*string) Version string + D stream_size 10i 0 value Stream struct. size + * + D inflateSetDictionary... + D PR 10I 0 extproc('inflateSetDictionary') Init. dictionary + D strm like(z_stream) Expansion stream + D dictionary 65535 const options(*varsize) Dictionary bytes + D dictLength 10U 0 value Dictionary length + * + D inflateGetDictionary... + D PR 10I 0 extproc('inflateGetDictionary') Get dictionary + D strm like(z_stream) Expansion stream + D dictionary 65535 options(*varsize) Dictionary bytes + D dictLength 10U 0 Dictionary length + * + D inflateSync PR 10I 0 extproc('inflateSync') Sync. expansion + D strm like(z_stream) Expansion stream + * + D inflateCopy PR 10I 0 extproc('inflateCopy') + D dest like(z_stream) Destination stream + D source like(z_stream) Source stream + * + D inflateReset PR 10I 0 extproc('inflateReset') End and init. stream + D strm like(z_stream) Expansion stream + * + D inflateReset2 PR 10I 0 extproc('inflateReset2') End and init. stream + D strm like(z_stream) Expansion stream + D windowBits 10I 0 value Log2(buffer size) + * + D inflatePrime PR 10I 0 extproc('inflatePrime') Insert bits + D strm like(z_stream) Expansion stream + D bits 10I 0 value Bit count + D value 10I 0 value Bits to insert + * + D inflateMark PR 10I 0 extproc('inflateMark') Get inflate info + D strm like(z_stream) Expansion stream + * + D inflateBackInit... + D PR 10I 0 extproc('inflateBackInit_') + D strm like(z_stream) Expansion stream + D windowBits 10I 0 value Log2(buffer size) + D window 65535 options(*varsize) Buffer + D version * value options(*string) Version string + D stream_size 10i 0 value Stream struct. size + * + D inflateBack PR 10I 0 extproc('inflateBack') + D strm like(z_stream) Expansion stream + D in * value procptr Input function + D in_desc * value Input descriptor + D out * value procptr Output function + D out_desc * value Output descriptor + * + D inflateBackEnd PR 10I 0 extproc('inflateBackEnd') + D strm like(z_stream) Expansion stream + * + D zlibCompileFlags... + D PR 10U 0 extproc('zlibCompileFlags') + * + ************************************************************************** + * Checksum function prototypes + ************************************************************************** + * + D adler32 PR 10U 0 extproc('adler32') New checksum + D adler 10U 0 value Old checksum + D buf 65535 const options(*varsize) Bytes to accumulate + D len 10U 0 value Buffer length + * + D crc32 PR 10U 0 extproc('crc32') New checksum + D crc 10U 0 value Old checksum + D buf 65535 const options(*varsize) Bytes to accumulate + D len 10U 0 value Buffer length + * + ************************************************************************** + * Miscellaneous function prototypes + ************************************************************************** + * + D zError PR * extproc('zError') Error string + D err 10I 0 value Error code + * + D inflateSyncPoint... + D PR 10I 0 extproc('inflateSyncPoint') + D strm like(z_stream) Expansion stream + * + D get_crc_table PR * extproc('get_crc_table') Ptr to ulongs + * + D inflateUndermine... + D PR 10I 0 extproc('inflateUndermine') + D strm like(z_stream) Expansion stream + D arg 10I 0 value Error code + * + D inflateResetKeep... + D PR 10I 0 extproc('inflateResetKeep') End and init. stream + D strm like(z_stream) Expansion stream + * + D deflateResetKeep... + D PR 10I 0 extproc('deflateResetKeep') End and init. stream + D strm like(z_stream) Expansion stream + * + /endif diff -Nru nodejs-0.11.13/deps/zlib/ChangeLog nodejs-0.11.15/deps/zlib/ChangeLog --- nodejs-0.11.13/deps/zlib/ChangeLog 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/ChangeLog 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1472 @@ + + ChangeLog file for zlib + +Changes in 1.2.8 (28 Apr 2013) +- Update contrib/minizip/iowin32.c for Windows RT [Vollant] +- Do not force Z_CONST for C++ +- Clean up contrib/vstudio [Ro] +- Correct spelling error in zlib.h +- Fix mixed line endings in contrib/vstudio + +Changes in 1.2.7.3 (13 Apr 2013) +- Fix version numbers and DLL names in contrib/vstudio/*/zlib.rc + +Changes in 1.2.7.2 (13 Apr 2013) +- Change check for a four-byte type back to hexadecimal +- Fix typo in win32/Makefile.msc +- Add casts in gzwrite.c for pointer differences + +Changes in 1.2.7.1 (24 Mar 2013) +- Replace use of unsafe string functions with snprintf if available +- Avoid including stddef.h on Windows for Z_SOLO compile [Niessink] +- Fix gzgetc undefine when Z_PREFIX set [Turk] +- Eliminate use of mktemp in Makefile (not always available) +- Fix bug in 'F' mode for gzopen() +- Add inflateGetDictionary() function +- Correct comment in deflate.h +- Use _snprintf for snprintf in Microsoft C +- On Darwin, only use /usr/bin/libtool if libtool is not Apple +- Delete "--version" file if created by "ar --version" [Richard G.] +- Fix configure check for veracity of compiler error return codes +- Fix CMake compilation of static lib for MSVC2010 x64 +- Remove unused variable in infback9.c +- Fix argument checks in gzlog_compress() and gzlog_write() +- Clean up the usage of z_const and respect const usage within zlib +- Clean up examples/gzlog.[ch] comparisons of different types +- Avoid shift equal to bits in type (caused endless loop) +- Fix unintialized value bug in gzputc() introduced by const patches +- Fix memory allocation error in examples/zran.c [Nor] +- Fix bug where gzopen(), gzclose() would write an empty file +- Fix bug in gzclose() when gzwrite() runs out of memory +- Check for input buffer malloc failure in examples/gzappend.c +- Add note to contrib/blast to use binary mode in stdio +- Fix comparisons of differently signed integers in contrib/blast +- Check for invalid code length codes in contrib/puff +- Fix serious but very rare decompression bug in inftrees.c +- Update inflateBack() comments, since inflate() can be faster +- Use underscored I/O function names for WINAPI_FAMILY +- Add _tr_flush_bits to the external symbols prefixed by --zprefix +- Add contrib/vstudio/vc10 pre-build step for static only +- Quote --version-script argument in CMakeLists.txt +- Don't specify --version-script on Apple platforms in CMakeLists.txt +- Fix casting error in contrib/testzlib/testzlib.c +- Fix types in contrib/minizip to match result of get_crc_table() +- Simplify contrib/vstudio/vc10 with 'd' suffix +- Add TOP support to win32/Makefile.msc +- Suport i686 and amd64 assembler builds in CMakeLists.txt +- Fix typos in the use of _LARGEFILE64_SOURCE in zconf.h +- Add vc11 and vc12 build files to contrib/vstudio +- Add gzvprintf() as an undocumented function in zlib +- Fix configure for Sun shell +- Remove runtime check in configure for four-byte integer type +- Add casts and consts to ease user conversion to C++ +- Add man pages for minizip and miniunzip +- In Makefile uninstall, don't rm if preceding cd fails +- Do not return Z_BUF_ERROR if deflateParam() has nothing to write + +Changes in 1.2.7 (2 May 2012) +- Replace use of memmove() with a simple copy for portability +- Test for existence of strerror +- Restore gzgetc_ for backward compatibility with 1.2.6 +- Fix build with non-GNU make on Solaris +- Require gcc 4.0 or later on Mac OS X to use the hidden attribute +- Include unistd.h for Watcom C +- Use __WATCOMC__ instead of __WATCOM__ +- Do not use the visibility attribute if NO_VIZ defined +- Improve the detection of no hidden visibility attribute +- Avoid using __int64 for gcc or solo compilation +- Cast to char * in gzprintf to avoid warnings [Zinser] +- Fix make_vms.com for VAX [Zinser] +- Don't use library or built-in byte swaps +- Simplify test and use of gcc hidden attribute +- Fix bug in gzclose_w() when gzwrite() fails to allocate memory +- Add "x" (O_EXCL) and "e" (O_CLOEXEC) modes support to gzopen() +- Fix bug in test/minigzip.c for configure --solo +- Fix contrib/vstudio project link errors [Mohanathas] +- Add ability to choose the builder in make_vms.com [Schweda] +- Add DESTDIR support to mingw32 win32/Makefile.gcc +- Fix comments in win32/Makefile.gcc for proper usage +- Allow overriding the default install locations for cmake +- Generate and install the pkg-config file with cmake +- Build both a static and a shared version of zlib with cmake +- Include version symbols for cmake builds +- If using cmake with MSVC, add the source directory to the includes +- Remove unneeded EXTRA_CFLAGS from win32/Makefile.gcc [Truta] +- Move obsolete emx makefile to old [Truta] +- Allow the use of -Wundef when compiling or using zlib +- Avoid the use of the -u option with mktemp +- Improve inflate() documentation on the use of Z_FINISH +- Recognize clang as gcc +- Add gzopen_w() in Windows for wide character path names +- Rename zconf.h in CMakeLists.txt to move it out of the way +- Add source directory in CMakeLists.txt for building examples +- Look in build directory for zlib.pc in CMakeLists.txt +- Remove gzflags from zlibvc.def in vc9 and vc10 +- Fix contrib/minizip compilation in the MinGW environment +- Update ./configure for Solaris, support --64 [Mooney] +- Remove -R. from Solaris shared build (possible security issue) +- Avoid race condition for parallel make (-j) running example +- Fix type mismatch between get_crc_table() and crc_table +- Fix parsing of version with "-" in CMakeLists.txt [Snider, Ziegler] +- Fix the path to zlib.map in CMakeLists.txt +- Force the native libtool in Mac OS X to avoid GNU libtool [Beebe] +- Add instructions to win32/Makefile.gcc for shared install [Torri] + +Changes in 1.2.6.1 (12 Feb 2012) +- Avoid the use of the Objective-C reserved name "id" +- Include io.h in gzguts.h for Microsoft compilers +- Fix problem with ./configure --prefix and gzgetc macro +- Include gz_header definition when compiling zlib solo +- Put gzflags() functionality back in zutil.c +- Avoid library header include in crc32.c for Z_SOLO +- Use name in GCC_CLASSIC as C compiler for coverage testing, if set +- Minor cleanup in contrib/minizip/zip.c [Vollant] +- Update make_vms.com [Zinser] +- Remove unnecessary gzgetc_ function +- Use optimized byte swap operations for Microsoft and GNU [Snyder] +- Fix minor typo in zlib.h comments [Rzesniowiecki] + +Changes in 1.2.6 (29 Jan 2012) +- Update the Pascal interface in contrib/pascal +- Fix function numbers for gzgetc_ in zlibvc.def files +- Fix configure.ac for contrib/minizip [Schiffer] +- Fix large-entry detection in minizip on 64-bit systems [Schiffer] +- Have ./configure use the compiler return code for error indication +- Fix CMakeLists.txt for cross compilation [McClure] +- Fix contrib/minizip/zip.c for 64-bit architectures [Dalsnes] +- Fix compilation of contrib/minizip on FreeBSD [Marquez] +- Correct suggested usages in win32/Makefile.msc [Shachar, Horvath] +- Include io.h for Turbo C / Borland C on all platforms [Truta] +- Make version explicit in contrib/minizip/configure.ac [Bosmans] +- Avoid warning for no encryption in contrib/minizip/zip.c [Vollant] +- Minor cleanup up contrib/minizip/unzip.c [Vollant] +- Fix bug when compiling minizip with C++ [Vollant] +- Protect for long name and extra fields in contrib/minizip [Vollant] +- Avoid some warnings in contrib/minizip [Vollant] +- Add -I../.. -L../.. to CFLAGS for minizip and miniunzip +- Add missing libs to minizip linker command +- Add support for VPATH builds in contrib/minizip +- Add an --enable-demos option to contrib/minizip/configure +- Add the generation of configure.log by ./configure +- Exit when required parameters not provided to win32/Makefile.gcc +- Have gzputc return the character written instead of the argument +- Use the -m option on ldconfig for BSD systems [Tobias] +- Correct in zlib.map when deflateResetKeep was added + +Changes in 1.2.5.3 (15 Jan 2012) +- Restore gzgetc function for binary compatibility +- Do not use _lseeki64 under Borland C++ [Truta] +- Update win32/Makefile.msc to build test/*.c [Truta] +- Remove old/visualc6 given CMakefile and other alternatives +- Update AS400 build files and documentation [Monnerat] +- Update win32/Makefile.gcc to build test/*.c [Truta] +- Permit stronger flushes after Z_BLOCK flushes +- Avoid extraneous empty blocks when doing empty flushes +- Permit Z_NULL arguments to deflatePending +- Allow deflatePrime() to insert bits in the middle of a stream +- Remove second empty static block for Z_PARTIAL_FLUSH +- Write out all of the available bits when using Z_BLOCK +- Insert the first two strings in the hash table after a flush + +Changes in 1.2.5.2 (17 Dec 2011) +- fix ld error: unable to find version dependency 'ZLIB_1.2.5' +- use relative symlinks for shared libs +- Avoid searching past window for Z_RLE strategy +- Assure that high-water mark initialization is always applied in deflate +- Add assertions to fill_window() in deflate.c to match comments +- Update python link in README +- Correct spelling error in gzread.c +- Fix bug in gzgets() for a concatenated empty gzip stream +- Correct error in comment for gz_make() +- Change gzread() and related to ignore junk after gzip streams +- Allow gzread() and related to continue after gzclearerr() +- Allow gzrewind() and gzseek() after a premature end-of-file +- Simplify gzseek() now that raw after gzip is ignored +- Change gzgetc() to a macro for speed (~40% speedup in testing) +- Fix gzclose() to return the actual error last encountered +- Always add large file support for windows +- Include zconf.h for windows large file support +- Include zconf.h.cmakein for windows large file support +- Update zconf.h.cmakein on make distclean +- Merge vestigial vsnprintf determination from zutil.h to gzguts.h +- Clarify how gzopen() appends in zlib.h comments +- Correct documentation of gzdirect() since junk at end now ignored +- Add a transparent write mode to gzopen() when 'T' is in the mode +- Update python link in zlib man page +- Get inffixed.h and MAKEFIXED result to match +- Add a ./config --solo option to make zlib subset with no libary use +- Add undocumented inflateResetKeep() function for CAB file decoding +- Add --cover option to ./configure for gcc coverage testing +- Add #define ZLIB_CONST option to use const in the z_stream interface +- Add comment to gzdopen() in zlib.h to use dup() when using fileno() +- Note behavior of uncompress() to provide as much data as it can +- Add files in contrib/minizip to aid in building libminizip +- Split off AR options in Makefile.in and configure +- Change ON macro to Z_ARG to avoid application conflicts +- Facilitate compilation with Borland C++ for pragmas and vsnprintf +- Include io.h for Turbo C / Borland C++ +- Move example.c and minigzip.c to test/ +- Simplify incomplete code table filling in inflate_table() +- Remove code from inflate.c and infback.c that is impossible to execute +- Test the inflate code with full coverage +- Allow deflateSetDictionary, inflateSetDictionary at any time (in raw) +- Add deflateResetKeep and fix inflateResetKeep to retain dictionary +- Fix gzwrite.c to accommodate reduced memory zlib compilation +- Have inflate() with Z_FINISH avoid the allocation of a window +- Do not set strm->adler when doing raw inflate +- Fix gzeof() to behave just like feof() when read is not past end of file +- Fix bug in gzread.c when end-of-file is reached +- Avoid use of Z_BUF_ERROR in gz* functions except for premature EOF +- Document gzread() capability to read concurrently written files +- Remove hard-coding of resource compiler in CMakeLists.txt [Blammo] + +Changes in 1.2.5.1 (10 Sep 2011) +- Update FAQ entry on shared builds (#13) +- Avoid symbolic argument to chmod in Makefile.in +- Fix bug and add consts in contrib/puff [Oberhumer] +- Update contrib/puff/zeros.raw test file to have all block types +- Add full coverage test for puff in contrib/puff/Makefile +- Fix static-only-build install in Makefile.in +- Fix bug in unzGetCurrentFileInfo() in contrib/minizip [Kuno] +- Add libz.a dependency to shared in Makefile.in for parallel builds +- Spell out "number" (instead of "nb") in zlib.h for total_in, total_out +- Replace $(...) with `...` in configure for non-bash sh [Bowler] +- Add darwin* to Darwin* and solaris* to SunOS\ 5* in configure [Groffen] +- Add solaris* to Linux* in configure to allow gcc use [Groffen] +- Add *bsd* to Linux* case in configure [Bar-Lev] +- Add inffast.obj to dependencies in win32/Makefile.msc +- Correct spelling error in deflate.h [Kohler] +- Change libzdll.a again to libz.dll.a (!) in win32/Makefile.gcc +- Add test to configure for GNU C looking for gcc in output of $cc -v +- Add zlib.pc generation to win32/Makefile.gcc [Weigelt] +- Fix bug in zlib.h for _FILE_OFFSET_BITS set and _LARGEFILE64_SOURCE not +- Add comment in zlib.h that adler32_combine with len2 < 0 makes no sense +- Make NO_DIVIDE option in adler32.c much faster (thanks to John Reiser) +- Make stronger test in zconf.h to include unistd.h for LFS +- Apply Darwin patches for 64-bit file offsets to contrib/minizip [Slack] +- Fix zlib.h LFS support when Z_PREFIX used +- Add updated as400 support (removed from old) [Monnerat] +- Avoid deflate sensitivity to volatile input data +- Avoid division in adler32_combine for NO_DIVIDE +- Clarify the use of Z_FINISH with deflateBound() amount of space +- Set binary for output file in puff.c +- Use u4 type for crc_table to avoid conversion warnings +- Apply casts in zlib.h to avoid conversion warnings +- Add OF to prototypes for adler32_combine_ and crc32_combine_ [Miller] +- Improve inflateSync() documentation to note indeterminancy +- Add deflatePending() function to return the amount of pending output +- Correct the spelling of "specification" in FAQ [Randers-Pehrson] +- Add a check in configure for stdarg.h, use for gzprintf() +- Check that pointers fit in ints when gzprint() compiled old style +- Add dummy name before $(SHAREDLIBV) in Makefile [Bar-Lev, Bowler] +- Delete line in configure that adds -L. libz.a to LDFLAGS [Weigelt] +- Add debug records in assmebler code [Londer] +- Update RFC references to use http://tools.ietf.org/html/... [Li] +- Add --archs option, use of libtool to configure for Mac OS X [Borstel] + +Changes in 1.2.5 (19 Apr 2010) +- Disable visibility attribute in win32/Makefile.gcc [Bar-Lev] +- Default to libdir as sharedlibdir in configure [Nieder] +- Update copyright dates on modified source files +- Update trees.c to be able to generate modified trees.h +- Exit configure for MinGW, suggesting win32/Makefile.gcc +- Check for NULL path in gz_open [Homurlu] + +Changes in 1.2.4.5 (18 Apr 2010) +- Set sharedlibdir in configure [Torok] +- Set LDFLAGS in Makefile.in [Bar-Lev] +- Avoid mkdir objs race condition in Makefile.in [Bowler] +- Add ZLIB_INTERNAL in front of internal inter-module functions and arrays +- Define ZLIB_INTERNAL to hide internal functions and arrays for GNU C +- Don't use hidden attribute when it is a warning generator (e.g. Solaris) + +Changes in 1.2.4.4 (18 Apr 2010) +- Fix CROSS_PREFIX executable testing, CHOST extract, mingw* [Torok] +- Undefine _LARGEFILE64_SOURCE in zconf.h if it is zero, but not if empty +- Try to use bash or ksh regardless of functionality of /bin/sh +- Fix configure incompatibility with NetBSD sh +- Remove attempt to run under bash or ksh since have better NetBSD fix +- Fix win32/Makefile.gcc for MinGW [Bar-Lev] +- Add diagnostic messages when using CROSS_PREFIX in configure +- Added --sharedlibdir option to configure [Weigelt] +- Use hidden visibility attribute when available [Frysinger] + +Changes in 1.2.4.3 (10 Apr 2010) +- Only use CROSS_PREFIX in configure for ar and ranlib if they exist +- Use CROSS_PREFIX for nm [Bar-Lev] +- Assume _LARGEFILE64_SOURCE defined is equivalent to true +- Avoid use of undefined symbols in #if with && and || +- Make *64 prototypes in gzguts.h consistent with functions +- Add -shared load option for MinGW in configure [Bowler] +- Move z_off64_t to public interface, use instead of off64_t +- Remove ! from shell test in configure (not portable to Solaris) +- Change +0 macro tests to -0 for possibly increased portability + +Changes in 1.2.4.2 (9 Apr 2010) +- Add consistent carriage returns to readme.txt's in masmx86 and masmx64 +- Really provide prototypes for *64 functions when building without LFS +- Only define unlink() in minigzip.c if unistd.h not included +- Update README to point to contrib/vstudio project files +- Move projects/vc6 to old/ and remove projects/ +- Include stdlib.h in minigzip.c for setmode() definition under WinCE +- Clean up assembler builds in win32/Makefile.msc [Rowe] +- Include sys/types.h for Microsoft for off_t definition +- Fix memory leak on error in gz_open() +- Symbolize nm as $NM in configure [Weigelt] +- Use TEST_LDSHARED instead of LDSHARED to link test programs [Weigelt] +- Add +0 to _FILE_OFFSET_BITS and _LFS64_LARGEFILE in case not defined +- Fix bug in gzeof() to take into account unused input data +- Avoid initialization of structures with variables in puff.c +- Updated win32/README-WIN32.txt [Rowe] + +Changes in 1.2.4.1 (28 Mar 2010) +- Remove the use of [a-z] constructs for sed in configure [gentoo 310225] +- Remove $(SHAREDLIB) from LIBS in Makefile.in [Creech] +- Restore "for debugging" comment on sprintf() in gzlib.c +- Remove fdopen for MVS from gzguts.h +- Put new README-WIN32.txt in win32 [Rowe] +- Add check for shell to configure and invoke another shell if needed +- Fix big fat stinking bug in gzseek() on uncompressed files +- Remove vestigial F_OPEN64 define in zutil.h +- Set and check the value of _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE +- Avoid errors on non-LFS systems when applications define LFS macros +- Set EXE to ".exe" in configure for MINGW [Kahle] +- Match crc32() in crc32.c exactly to the prototype in zlib.h [Sherrill] +- Add prefix for cross-compilation in win32/makefile.gcc [Bar-Lev] +- Add DLL install in win32/makefile.gcc [Bar-Lev] +- Allow Linux* or linux* from uname in configure [Bar-Lev] +- Allow ldconfig to be redefined in configure and Makefile.in [Bar-Lev] +- Add cross-compilation prefixes to configure [Bar-Lev] +- Match type exactly in gz_load() invocation in gzread.c +- Match type exactly of zcalloc() in zutil.c to zlib.h alloc_func +- Provide prototypes for *64 functions when building zlib without LFS +- Don't use -lc when linking shared library on MinGW +- Remove errno.h check in configure and vestigial errno code in zutil.h + +Changes in 1.2.4 (14 Mar 2010) +- Fix VER3 extraction in configure for no fourth subversion +- Update zlib.3, add docs to Makefile.in to make .pdf out of it +- Add zlib.3.pdf to distribution +- Don't set error code in gzerror() if passed pointer is NULL +- Apply destination directory fixes to CMakeLists.txt [Lowman] +- Move #cmakedefine's to a new zconf.in.cmakein +- Restore zconf.h for builds that don't use configure or cmake +- Add distclean to dummy Makefile for convenience +- Update and improve INDEX, README, and FAQ +- Update CMakeLists.txt for the return of zconf.h [Lowman] +- Update contrib/vstudio/vc9 and vc10 [Vollant] +- Change libz.dll.a back to libzdll.a in win32/Makefile.gcc +- Apply license and readme changes to contrib/asm686 [Raiter] +- Check file name lengths and add -c option in minigzip.c [Li] +- Update contrib/amd64 and contrib/masmx86/ [Vollant] +- Avoid use of "eof" parameter in trees.c to not shadow library variable +- Update make_vms.com for removal of zlibdefs.h [Zinser] +- Update assembler code and vstudio projects in contrib [Vollant] +- Remove outdated assembler code contrib/masm686 and contrib/asm586 +- Remove old vc7 and vc8 from contrib/vstudio +- Update win32/Makefile.msc, add ZLIB_VER_SUBREVISION [Rowe] +- Fix memory leaks in gzclose_r() and gzclose_w(), file leak in gz_open() +- Add contrib/gcc_gvmat64 for longest_match and inflate_fast [Vollant] +- Remove *64 functions from win32/zlib.def (they're not 64-bit yet) +- Fix bug in void-returning vsprintf() case in gzwrite.c +- Fix name change from inflate.h in contrib/inflate86/inffas86.c +- Check if temporary file exists before removing in make_vms.com [Zinser] +- Fix make install and uninstall for --static option +- Fix usage of _MSC_VER in gzguts.h and zutil.h [Truta] +- Update readme.txt in contrib/masmx64 and masmx86 to assemble + +Changes in 1.2.3.9 (21 Feb 2010) +- Expunge gzio.c +- Move as400 build information to old +- Fix updates in contrib/minizip and contrib/vstudio +- Add const to vsnprintf test in configure to avoid warnings [Weigelt] +- Delete zconf.h (made by configure) [Weigelt] +- Change zconf.in.h to zconf.h.in per convention [Weigelt] +- Check for NULL buf in gzgets() +- Return empty string for gzgets() with len == 1 (like fgets()) +- Fix description of gzgets() in zlib.h for end-of-file, NULL return +- Update minizip to 1.1 [Vollant] +- Avoid MSVC loss of data warnings in gzread.c, gzwrite.c +- Note in zlib.h that gzerror() should be used to distinguish from EOF +- Remove use of snprintf() from gzlib.c +- Fix bug in gzseek() +- Update contrib/vstudio, adding vc9 and vc10 [Kuno, Vollant] +- Fix zconf.h generation in CMakeLists.txt [Lowman] +- Improve comments in zconf.h where modified by configure + +Changes in 1.2.3.8 (13 Feb 2010) +- Clean up text files (tabs, trailing whitespace, etc.) [Oberhumer] +- Use z_off64_t in gz_zero() and gz_skip() to match state->skip +- Avoid comparison problem when sizeof(int) == sizeof(z_off64_t) +- Revert to Makefile.in from 1.2.3.6 (live with the clutter) +- Fix missing error return in gzflush(), add zlib.h note +- Add *64 functions to zlib.map [Levin] +- Fix signed/unsigned comparison in gz_comp() +- Use SFLAGS when testing shared linking in configure +- Add --64 option to ./configure to use -m64 with gcc +- Fix ./configure --help to correctly name options +- Have make fail if a test fails [Levin] +- Avoid buffer overrun in contrib/masmx64/gvmat64.asm [Simpson] +- Remove assembler object files from contrib + +Changes in 1.2.3.7 (24 Jan 2010) +- Always gzopen() with O_LARGEFILE if available +- Fix gzdirect() to work immediately after gzopen() or gzdopen() +- Make gzdirect() more precise when the state changes while reading +- Improve zlib.h documentation in many places +- Catch memory allocation failure in gz_open() +- Complete close operation if seek forward in gzclose_w() fails +- Return Z_ERRNO from gzclose_r() if close() fails +- Return Z_STREAM_ERROR instead of EOF for gzclose() being passed NULL +- Return zero for gzwrite() errors to match zlib.h description +- Return -1 on gzputs() error to match zlib.h description +- Add zconf.in.h to allow recovery from configure modification [Weigelt] +- Fix static library permissions in Makefile.in [Weigelt] +- Avoid warnings in configure tests that hide functionality [Weigelt] +- Add *BSD and DragonFly to Linux case in configure [gentoo 123571] +- Change libzdll.a to libz.dll.a in win32/Makefile.gcc [gentoo 288212] +- Avoid access of uninitialized data for first inflateReset2 call [Gomes] +- Keep object files in subdirectories to reduce the clutter somewhat +- Remove default Makefile and zlibdefs.h, add dummy Makefile +- Add new external functions to Z_PREFIX, remove duplicates, z_z_ -> z_ +- Remove zlibdefs.h completely -- modify zconf.h instead + +Changes in 1.2.3.6 (17 Jan 2010) +- Avoid void * arithmetic in gzread.c and gzwrite.c +- Make compilers happier with const char * for gz_error message +- Avoid unused parameter warning in inflate.c +- Avoid signed-unsigned comparison warning in inflate.c +- Indent #pragma's for traditional C +- Fix usage of strwinerror() in glib.c, change to gz_strwinerror() +- Correct email address in configure for system options +- Update make_vms.com and add make_vms.com to contrib/minizip [Zinser] +- Update zlib.map [Brown] +- Fix Makefile.in for Solaris 10 make of example64 and minizip64 [Torok] +- Apply various fixes to CMakeLists.txt [Lowman] +- Add checks on len in gzread() and gzwrite() +- Add error message for no more room for gzungetc() +- Remove zlib version check in gzwrite() +- Defer compression of gzprintf() result until need to +- Use snprintf() in gzdopen() if available +- Remove USE_MMAP configuration determination (only used by minigzip) +- Remove examples/pigz.c (available separately) +- Update examples/gun.c to 1.6 + +Changes in 1.2.3.5 (8 Jan 2010) +- Add space after #if in zutil.h for some compilers +- Fix relatively harmless bug in deflate_fast() [Exarevsky] +- Fix same problem in deflate_slow() +- Add $(SHAREDLIBV) to LIBS in Makefile.in [Brown] +- Add deflate_rle() for faster Z_RLE strategy run-length encoding +- Add deflate_huff() for faster Z_HUFFMAN_ONLY encoding +- Change name of "write" variable in inffast.c to avoid library collisions +- Fix premature EOF from gzread() in gzio.c [Brown] +- Use zlib header window size if windowBits is 0 in inflateInit2() +- Remove compressBound() call in deflate.c to avoid linking compress.o +- Replace use of errno in gz* with functions, support WinCE [Alves] +- Provide alternative to perror() in minigzip.c for WinCE [Alves] +- Don't use _vsnprintf on later versions of MSVC [Lowman] +- Add CMake build script and input file [Lowman] +- Update contrib/minizip to 1.1 [Svensson, Vollant] +- Moved nintendods directory from contrib to . +- Replace gzio.c with a new set of routines with the same functionality +- Add gzbuffer(), gzoffset(), gzclose_r(), gzclose_w() as part of above +- Update contrib/minizip to 1.1b +- Change gzeof() to return 0 on error instead of -1 to agree with zlib.h + +Changes in 1.2.3.4 (21 Dec 2009) +- Use old school .SUFFIXES in Makefile.in for FreeBSD compatibility +- Update comments in configure and Makefile.in for default --shared +- Fix test -z's in configure [Marquess] +- Build examplesh and minigzipsh when not testing +- Change NULL's to Z_NULL's in deflate.c and in comments in zlib.h +- Import LDFLAGS from the environment in configure +- Fix configure to populate SFLAGS with discovered CFLAGS options +- Adapt make_vms.com to the new Makefile.in [Zinser] +- Add zlib2ansi script for C++ compilation [Marquess] +- Add _FILE_OFFSET_BITS=64 test to make test (when applicable) +- Add AMD64 assembler code for longest match to contrib [Teterin] +- Include options from $SFLAGS when doing $LDSHARED +- Simplify 64-bit file support by introducing z_off64_t type +- Make shared object files in objs directory to work around old Sun cc +- Use only three-part version number for Darwin shared compiles +- Add rc option to ar in Makefile.in for when ./configure not run +- Add -WI,-rpath,. to LDFLAGS for OSF 1 V4* +- Set LD_LIBRARYN32_PATH for SGI IRIX shared compile +- Protect against _FILE_OFFSET_BITS being defined when compiling zlib +- Rename Makefile.in targets allstatic to static and allshared to shared +- Fix static and shared Makefile.in targets to be independent +- Correct error return bug in gz_open() by setting state [Brown] +- Put spaces before ;;'s in configure for better sh compatibility +- Add pigz.c (parallel implementation of gzip) to examples/ +- Correct constant in crc32.c to UL [Leventhal] +- Reject negative lengths in crc32_combine() +- Add inflateReset2() function to work like inflateEnd()/inflateInit2() +- Include sys/types.h for _LARGEFILE64_SOURCE [Brown] +- Correct typo in doc/algorithm.txt [Janik] +- Fix bug in adler32_combine() [Zhu] +- Catch missing-end-of-block-code error in all inflates and in puff + Assures that random input to inflate eventually results in an error +- Added enough.c (calculation of ENOUGH for inftrees.h) to examples/ +- Update ENOUGH and its usage to reflect discovered bounds +- Fix gzerror() error report on empty input file [Brown] +- Add ush casts in trees.c to avoid pedantic runtime errors +- Fix typo in zlib.h uncompress() description [Reiss] +- Correct inflate() comments with regard to automatic header detection +- Remove deprecation comment on Z_PARTIAL_FLUSH (it stays) +- Put new version of gzlog (2.0) in examples with interruption recovery +- Add puff compile option to permit invalid distance-too-far streams +- Add puff TEST command options, ability to read piped input +- Prototype the *64 functions in zlib.h when _FILE_OFFSET_BITS == 64, but + _LARGEFILE64_SOURCE not defined +- Fix Z_FULL_FLUSH to truly erase the past by resetting s->strstart +- Fix deflateSetDictionary() to use all 32K for output consistency +- Remove extraneous #define MIN_LOOKAHEAD in deflate.c (in deflate.h) +- Clear bytes after deflate lookahead to avoid use of uninitialized data +- Change a limit in inftrees.c to be more transparent to Coverity Prevent +- Update win32/zlib.def with exported symbols from zlib.h +- Correct spelling errors in zlib.h [Willem, Sobrado] +- Allow Z_BLOCK for deflate() to force a new block +- Allow negative bits in inflatePrime() to delete existing bit buffer +- Add Z_TREES flush option to inflate() to return at end of trees +- Add inflateMark() to return current state information for random access +- Add Makefile for NintendoDS to contrib [Costa] +- Add -w in configure compile tests to avoid spurious warnings [Beucler] +- Fix typos in zlib.h comments for deflateSetDictionary() +- Fix EOF detection in transparent gzread() [Maier] + +Changes in 1.2.3.3 (2 October 2006) +- Make --shared the default for configure, add a --static option +- Add compile option to permit invalid distance-too-far streams +- Add inflateUndermine() function which is required to enable above +- Remove use of "this" variable name for C++ compatibility [Marquess] +- Add testing of shared library in make test, if shared library built +- Use ftello() and fseeko() if available instead of ftell() and fseek() +- Provide two versions of all functions that use the z_off_t type for + binary compatibility -- a normal version and a 64-bit offset version, + per the Large File Support Extension when _LARGEFILE64_SOURCE is + defined; use the 64-bit versions by default when _FILE_OFFSET_BITS + is defined to be 64 +- Add a --uname= option to configure to perhaps help with cross-compiling + +Changes in 1.2.3.2 (3 September 2006) +- Turn off silly Borland warnings [Hay] +- Use off64_t and define _LARGEFILE64_SOURCE when present +- Fix missing dependency on inffixed.h in Makefile.in +- Rig configure --shared to build both shared and static [Teredesai, Truta] +- Remove zconf.in.h and instead create a new zlibdefs.h file +- Fix contrib/minizip/unzip.c non-encrypted after encrypted [Vollant] +- Add treebuild.xml (see http://treebuild.metux.de/) [Weigelt] + +Changes in 1.2.3.1 (16 August 2006) +- Add watcom directory with OpenWatcom make files [Daniel] +- Remove #undef of FAR in zconf.in.h for MVS [Fedtke] +- Update make_vms.com [Zinser] +- Use -fPIC for shared build in configure [Teredesai, Nicholson] +- Use only major version number for libz.so on IRIX and OSF1 [Reinholdtsen] +- Use fdopen() (not _fdopen()) for Interix in zutil.h [Bck] +- Add some FAQ entries about the contrib directory +- Update the MVS question in the FAQ +- Avoid extraneous reads after EOF in gzio.c [Brown] +- Correct spelling of "successfully" in gzio.c [Randers-Pehrson] +- Add comments to zlib.h about gzerror() usage [Brown] +- Set extra flags in gzip header in gzopen() like deflate() does +- Make configure options more compatible with double-dash conventions + [Weigelt] +- Clean up compilation under Solaris SunStudio cc [Rowe, Reinholdtsen] +- Fix uninstall target in Makefile.in [Truta] +- Add pkgconfig support [Weigelt] +- Use $(DESTDIR) macro in Makefile.in [Reinholdtsen, Weigelt] +- Replace set_data_type() with a more accurate detect_data_type() in + trees.c, according to the txtvsbin.txt document [Truta] +- Swap the order of #include <stdio.h> and #include "zlib.h" in + gzio.c, example.c and minigzip.c [Truta] +- Shut up annoying VS2005 warnings about standard C deprecation [Rowe, + Truta] (where?) +- Fix target "clean" from win32/Makefile.bor [Truta] +- Create .pdb and .manifest files in win32/makefile.msc [Ziegler, Rowe] +- Update zlib www home address in win32/DLL_FAQ.txt [Truta] +- Update contrib/masmx86/inffas32.asm for VS2005 [Vollant, Van Wassenhove] +- Enable browse info in the "Debug" and "ASM Debug" configurations in + the Visual C++ 6 project, and set (non-ASM) "Debug" as default [Truta] +- Add pkgconfig support [Weigelt] +- Add ZLIB_VER_MAJOR, ZLIB_VER_MINOR and ZLIB_VER_REVISION in zlib.h, + for use in win32/zlib1.rc [Polushin, Rowe, Truta] +- Add a document that explains the new text detection scheme to + doc/txtvsbin.txt [Truta] +- Add rfc1950.txt, rfc1951.txt and rfc1952.txt to doc/ [Truta] +- Move algorithm.txt into doc/ [Truta] +- Synchronize FAQ with website +- Fix compressBound(), was low for some pathological cases [Fearnley] +- Take into account wrapper variations in deflateBound() +- Set examples/zpipe.c input and output to binary mode for Windows +- Update examples/zlib_how.html with new zpipe.c (also web site) +- Fix some warnings in examples/gzlog.c and examples/zran.c (it seems + that gcc became pickier in 4.0) +- Add zlib.map for Linux: "All symbols from zlib-1.1.4 remain + un-versioned, the patch adds versioning only for symbols introduced in + zlib-1.2.0 or later. It also declares as local those symbols which are + not designed to be exported." [Levin] +- Update Z_PREFIX list in zconf.in.h, add --zprefix option to configure +- Do not initialize global static by default in trees.c, add a response + NO_INIT_GLOBAL_POINTERS to initialize them if needed [Marquess] +- Don't use strerror() in gzio.c under WinCE [Yakimov] +- Don't use errno.h in zutil.h under WinCE [Yakimov] +- Move arguments for AR to its usage to allow replacing ar [Marot] +- Add HAVE_VISIBILITY_PRAGMA in zconf.in.h for Mozilla [Randers-Pehrson] +- Improve inflateInit() and inflateInit2() documentation +- Fix structure size comment in inflate.h +- Change configure help option from --h* to --help [Santos] + +Changes in 1.2.3 (18 July 2005) +- Apply security vulnerability fixes to contrib/infback9 as well +- Clean up some text files (carriage returns, trailing space) +- Update testzlib, vstudio, masmx64, and masmx86 in contrib [Vollant] + +Changes in 1.2.2.4 (11 July 2005) +- Add inflatePrime() function for starting inflation at bit boundary +- Avoid some Visual C warnings in deflate.c +- Avoid more silly Visual C warnings in inflate.c and inftrees.c for 64-bit + compile +- Fix some spelling errors in comments [Betts] +- Correct inflateInit2() error return documentation in zlib.h +- Add zran.c example of compressed data random access to examples + directory, shows use of inflatePrime() +- Fix cast for assignments to strm->state in inflate.c and infback.c +- Fix zlibCompileFlags() in zutil.c to use 1L for long shifts [Oberhumer] +- Move declarations of gf2 functions to right place in crc32.c [Oberhumer] +- Add cast in trees.c t avoid a warning [Oberhumer] +- Avoid some warnings in fitblk.c, gun.c, gzjoin.c in examples [Oberhumer] +- Update make_vms.com [Zinser] +- Initialize state->write in inflateReset() since copied in inflate_fast() +- Be more strict on incomplete code sets in inflate_table() and increase + ENOUGH and MAXD -- this repairs a possible security vulnerability for + invalid inflate input. Thanks to Tavis Ormandy and Markus Oberhumer for + discovering the vulnerability and providing test cases. +- Add ia64 support to configure for HP-UX [Smith] +- Add error return to gzread() for format or i/o error [Levin] +- Use malloc.h for OS/2 [Necasek] + +Changes in 1.2.2.3 (27 May 2005) +- Replace 1U constants in inflate.c and inftrees.c for 64-bit compile +- Typecast fread() return values in gzio.c [Vollant] +- Remove trailing space in minigzip.c outmode (VC++ can't deal with it) +- Fix crc check bug in gzread() after gzungetc() [Heiner] +- Add the deflateTune() function to adjust internal compression parameters +- Add a fast gzip decompressor, gun.c, to examples (use of inflateBack) +- Remove an incorrect assertion in examples/zpipe.c +- Add C++ wrapper in infback9.h [Donais] +- Fix bug in inflateCopy() when decoding fixed codes +- Note in zlib.h how much deflateSetDictionary() actually uses +- Remove USE_DICT_HEAD in deflate.c (would mess up inflate if used) +- Add _WIN32_WCE to define WIN32 in zconf.in.h [Spencer] +- Don't include stderr.h or errno.h for _WIN32_WCE in zutil.h [Spencer] +- Add gzdirect() function to indicate transparent reads +- Update contrib/minizip [Vollant] +- Fix compilation of deflate.c when both ASMV and FASTEST [Oberhumer] +- Add casts in crc32.c to avoid warnings [Oberhumer] +- Add contrib/masmx64 [Vollant] +- Update contrib/asm586, asm686, masmx86, testzlib, vstudio [Vollant] + +Changes in 1.2.2.2 (30 December 2004) +- Replace structure assignments in deflate.c and inflate.c with zmemcpy to + avoid implicit memcpy calls (portability for no-library compilation) +- Increase sprintf() buffer size in gzdopen() to allow for large numbers +- Add INFLATE_STRICT to check distances against zlib header +- Improve WinCE errno handling and comments [Chang] +- Remove comment about no gzip header processing in FAQ +- Add Z_FIXED strategy option to deflateInit2() to force fixed trees +- Add updated make_vms.com [Coghlan], update README +- Create a new "examples" directory, move gzappend.c there, add zpipe.c, + fitblk.c, gzlog.[ch], gzjoin.c, and zlib_how.html. +- Add FAQ entry and comments in deflate.c on uninitialized memory access +- Add Solaris 9 make options in configure [Gilbert] +- Allow strerror() usage in gzio.c for STDC +- Fix DecompressBuf in contrib/delphi/ZLib.pas [ManChesTer] +- Update contrib/masmx86/inffas32.asm and gvmat32.asm [Vollant] +- Use z_off_t for adler32_combine() and crc32_combine() lengths +- Make adler32() much faster for small len +- Use OS_CODE in deflate() default gzip header + +Changes in 1.2.2.1 (31 October 2004) +- Allow inflateSetDictionary() call for raw inflate +- Fix inflate header crc check bug for file names and comments +- Add deflateSetHeader() and gz_header structure for custom gzip headers +- Add inflateGetheader() to retrieve gzip headers +- Add crc32_combine() and adler32_combine() functions +- Add alloc_func, free_func, in_func, out_func to Z_PREFIX list +- Use zstreamp consistently in zlib.h (inflate_back functions) +- Remove GUNZIP condition from definition of inflate_mode in inflate.h + and in contrib/inflate86/inffast.S [Truta, Anderson] +- Add support for AMD64 in contrib/inflate86/inffas86.c [Anderson] +- Update projects/README.projects and projects/visualc6 [Truta] +- Update win32/DLL_FAQ.txt [Truta] +- Avoid warning under NO_GZCOMPRESS in gzio.c; fix typo [Truta] +- Deprecate Z_ASCII; use Z_TEXT instead [Truta] +- Use a new algorithm for setting strm->data_type in trees.c [Truta] +- Do not define an exit() prototype in zutil.c unless DEBUG defined +- Remove prototype of exit() from zutil.c, example.c, minigzip.c [Truta] +- Add comment in zlib.h for Z_NO_FLUSH parameter to deflate() +- Fix Darwin build version identification [Peterson] + +Changes in 1.2.2 (3 October 2004) +- Update zlib.h comments on gzip in-memory processing +- Set adler to 1 in inflateReset() to support Java test suite [Walles] +- Add contrib/dotzlib [Ravn] +- Update win32/DLL_FAQ.txt [Truta] +- Update contrib/minizip [Vollant] +- Move contrib/visual-basic.txt to old/ [Truta] +- Fix assembler builds in projects/visualc6/ [Truta] + +Changes in 1.2.1.2 (9 September 2004) +- Update INDEX file +- Fix trees.c to update strm->data_type (no one ever noticed!) +- Fix bug in error case in inflate.c, infback.c, and infback9.c [Brown] +- Add "volatile" to crc table flag declaration (for DYNAMIC_CRC_TABLE) +- Add limited multitasking protection to DYNAMIC_CRC_TABLE +- Add NO_vsnprintf for VMS in zutil.h [Mozilla] +- Don't declare strerror() under VMS [Mozilla] +- Add comment to DYNAMIC_CRC_TABLE to use get_crc_table() to initialize +- Update contrib/ada [Anisimkov] +- Update contrib/minizip [Vollant] +- Fix configure to not hardcode directories for Darwin [Peterson] +- Fix gzio.c to not return error on empty files [Brown] +- Fix indentation; update version in contrib/delphi/ZLib.pas and + contrib/pascal/zlibpas.pas [Truta] +- Update mkasm.bat in contrib/masmx86 [Truta] +- Update contrib/untgz [Truta] +- Add projects/README.projects [Truta] +- Add project for MS Visual C++ 6.0 in projects/visualc6 [Cadieux, Truta] +- Update win32/DLL_FAQ.txt [Truta] +- Update list of Z_PREFIX symbols in zconf.h [Randers-Pehrson, Truta] +- Remove an unnecessary assignment to curr in inftrees.c [Truta] +- Add OS/2 to exe builds in configure [Poltorak] +- Remove err dummy parameter in zlib.h [Kientzle] + +Changes in 1.2.1.1 (9 January 2004) +- Update email address in README +- Several FAQ updates +- Fix a big fat bug in inftrees.c that prevented decoding valid + dynamic blocks with only literals and no distance codes -- + Thanks to "Hot Emu" for the bug report and sample file +- Add a note to puff.c on no distance codes case. + +Changes in 1.2.1 (17 November 2003) +- Remove a tab in contrib/gzappend/gzappend.c +- Update some interfaces in contrib for new zlib functions +- Update zlib version number in some contrib entries +- Add Windows CE definition for ptrdiff_t in zutil.h [Mai, Truta] +- Support shared libraries on Hurd and KFreeBSD [Brown] +- Fix error in NO_DIVIDE option of adler32.c + +Changes in 1.2.0.8 (4 November 2003) +- Update version in contrib/delphi/ZLib.pas and contrib/pascal/zlibpas.pas +- Add experimental NO_DIVIDE #define in adler32.c + - Possibly faster on some processors (let me know if it is) +- Correct Z_BLOCK to not return on first inflate call if no wrap +- Fix strm->data_type on inflate() return to correctly indicate EOB +- Add deflatePrime() function for appending in the middle of a byte +- Add contrib/gzappend for an example of appending to a stream +- Update win32/DLL_FAQ.txt [Truta] +- Delete Turbo C comment in README [Truta] +- Improve some indentation in zconf.h [Truta] +- Fix infinite loop on bad input in configure script [Church] +- Fix gzeof() for concatenated gzip files [Johnson] +- Add example to contrib/visual-basic.txt [Michael B.] +- Add -p to mkdir's in Makefile.in [vda] +- Fix configure to properly detect presence or lack of printf functions +- Add AS400 support [Monnerat] +- Add a little Cygwin support [Wilson] + +Changes in 1.2.0.7 (21 September 2003) +- Correct some debug formats in contrib/infback9 +- Cast a type in a debug statement in trees.c +- Change search and replace delimiter in configure from % to # [Beebe] +- Update contrib/untgz to 0.2 with various fixes [Truta] +- Add build support for Amiga [Nikl] +- Remove some directories in old that have been updated to 1.2 +- Add dylib building for Mac OS X in configure and Makefile.in +- Remove old distribution stuff from Makefile +- Update README to point to DLL_FAQ.txt, and add comment on Mac OS X +- Update links in README + +Changes in 1.2.0.6 (13 September 2003) +- Minor FAQ updates +- Update contrib/minizip to 1.00 [Vollant] +- Remove test of gz functions in example.c when GZ_COMPRESS defined [Truta] +- Update POSTINC comment for 68060 [Nikl] +- Add contrib/infback9 with deflate64 decoding (unsupported) +- For MVS define NO_vsnprintf and undefine FAR [van Burik] +- Add pragma for fdopen on MVS [van Burik] + +Changes in 1.2.0.5 (8 September 2003) +- Add OF to inflateBackEnd() declaration in zlib.h +- Remember start when using gzdopen in the middle of a file +- Use internal off_t counters in gz* functions to properly handle seeks +- Perform more rigorous check for distance-too-far in inffast.c +- Add Z_BLOCK flush option to return from inflate at block boundary +- Set strm->data_type on return from inflate + - Indicate bits unused, if at block boundary, and if in last block +- Replace size_t with ptrdiff_t in crc32.c, and check for correct size +- Add condition so old NO_DEFLATE define still works for compatibility +- FAQ update regarding the Windows DLL [Truta] +- INDEX update: add qnx entry, remove aix entry [Truta] +- Install zlib.3 into mandir [Wilson] +- Move contrib/zlib_dll_FAQ.txt to win32/DLL_FAQ.txt; update [Truta] +- Adapt the zlib interface to the new DLL convention guidelines [Truta] +- Introduce ZLIB_WINAPI macro to allow the export of functions using + the WINAPI calling convention, for Visual Basic [Vollant, Truta] +- Update msdos and win32 scripts and makefiles [Truta] +- Export symbols by name, not by ordinal, in win32/zlib.def [Truta] +- Add contrib/ada [Anisimkov] +- Move asm files from contrib/vstudio/vc70_32 to contrib/asm386 [Truta] +- Rename contrib/asm386 to contrib/masmx86 [Truta, Vollant] +- Add contrib/masm686 [Truta] +- Fix offsets in contrib/inflate86 and contrib/masmx86/inffas32.asm + [Truta, Vollant] +- Update contrib/delphi; rename to contrib/pascal; add example [Truta] +- Remove contrib/delphi2; add a new contrib/delphi [Truta] +- Avoid inclusion of the nonstandard <memory.h> in contrib/iostream, + and fix some method prototypes [Truta] +- Fix the ZCR_SEED2 constant to avoid warnings in contrib/minizip + [Truta] +- Avoid the use of backslash (\) in contrib/minizip [Vollant] +- Fix file time handling in contrib/untgz; update makefiles [Truta] +- Update contrib/vstudio/vc70_32 to comply with the new DLL guidelines + [Vollant] +- Remove contrib/vstudio/vc15_16 [Vollant] +- Rename contrib/vstudio/vc70_32 to contrib/vstudio/vc7 [Truta] +- Update README.contrib [Truta] +- Invert the assignment order of match_head and s->prev[...] in + INSERT_STRING [Truta] +- Compare TOO_FAR with 32767 instead of 32768, to avoid 16-bit warnings + [Truta] +- Compare function pointers with 0, not with NULL or Z_NULL [Truta] +- Fix prototype of syncsearch in inflate.c [Truta] +- Introduce ASMINF macro to be enabled when using an ASM implementation + of inflate_fast [Truta] +- Change NO_DEFLATE to NO_GZCOMPRESS [Truta] +- Modify test_gzio in example.c to take a single file name as a + parameter [Truta] +- Exit the example.c program if gzopen fails [Truta] +- Add type casts around strlen in example.c [Truta] +- Remove casting to sizeof in minigzip.c; give a proper type + to the variable compared with SUFFIX_LEN [Truta] +- Update definitions of STDC and STDC99 in zconf.h [Truta] +- Synchronize zconf.h with the new Windows DLL interface [Truta] +- Use SYS16BIT instead of __32BIT__ to distinguish between + 16- and 32-bit platforms [Truta] +- Use far memory allocators in small 16-bit memory models for + Turbo C [Truta] +- Add info about the use of ASMV, ASMINF and ZLIB_WINAPI in + zlibCompileFlags [Truta] +- Cygwin has vsnprintf [Wilson] +- In Windows16, OS_CODE is 0, as in MSDOS [Truta] +- In Cygwin, OS_CODE is 3 (Unix), not 11 (Windows32) [Wilson] + +Changes in 1.2.0.4 (10 August 2003) +- Minor FAQ updates +- Be more strict when checking inflateInit2's windowBits parameter +- Change NO_GUNZIP compile option to NO_GZIP to cover deflate as well +- Add gzip wrapper option to deflateInit2 using windowBits +- Add updated QNX rule in configure and qnx directory [Bonnefoy] +- Make inflate distance-too-far checks more rigorous +- Clean up FAR usage in inflate +- Add casting to sizeof() in gzio.c and minigzip.c + +Changes in 1.2.0.3 (19 July 2003) +- Fix silly error in gzungetc() implementation [Vollant] +- Update contrib/minizip and contrib/vstudio [Vollant] +- Fix printf format in example.c +- Correct cdecl support in zconf.in.h [Anisimkov] +- Minor FAQ updates + +Changes in 1.2.0.2 (13 July 2003) +- Add ZLIB_VERNUM in zlib.h for numerical preprocessor comparisons +- Attempt to avoid warnings in crc32.c for pointer-int conversion +- Add AIX to configure, remove aix directory [Bakker] +- Add some casts to minigzip.c +- Improve checking after insecure sprintf() or vsprintf() calls +- Remove #elif's from crc32.c +- Change leave label to inf_leave in inflate.c and infback.c to avoid + library conflicts +- Remove inflate gzip decoding by default--only enable gzip decoding by + special request for stricter backward compatibility +- Add zlibCompileFlags() function to return compilation information +- More typecasting in deflate.c to avoid warnings +- Remove leading underscore from _Capital #defines [Truta] +- Fix configure to link shared library when testing +- Add some Windows CE target adjustments [Mai] +- Remove #define ZLIB_DLL in zconf.h [Vollant] +- Add zlib.3 [Rodgers] +- Update RFC URL in deflate.c and algorithm.txt [Mai] +- Add zlib_dll_FAQ.txt to contrib [Truta] +- Add UL to some constants [Truta] +- Update minizip and vstudio [Vollant] +- Remove vestigial NEED_DUMMY_RETURN from zconf.in.h +- Expand use of NO_DUMMY_DECL to avoid all dummy structures +- Added iostream3 to contrib [Schwardt] +- Replace rewind() with fseek() for WinCE [Truta] +- Improve setting of zlib format compression level flags + - Report 0 for huffman and rle strategies and for level == 0 or 1 + - Report 2 only for level == 6 +- Only deal with 64K limit when necessary at compile time [Truta] +- Allow TOO_FAR check to be turned off at compile time [Truta] +- Add gzclearerr() function [Souza] +- Add gzungetc() function + +Changes in 1.2.0.1 (17 March 2003) +- Add Z_RLE strategy for run-length encoding [Truta] + - When Z_RLE requested, restrict matches to distance one + - Update zlib.h, minigzip.c, gzopen(), gzdopen() for Z_RLE +- Correct FASTEST compilation to allow level == 0 +- Clean up what gets compiled for FASTEST +- Incorporate changes to zconf.in.h [Vollant] + - Refine detection of Turbo C need for dummy returns + - Refine ZLIB_DLL compilation + - Include additional header file on VMS for off_t typedef +- Try to use _vsnprintf where it supplants vsprintf [Vollant] +- Add some casts in inffast.c +- Enchance comments in zlib.h on what happens if gzprintf() tries to + write more than 4095 bytes before compression +- Remove unused state from inflateBackEnd() +- Remove exit(0) from minigzip.c, example.c +- Get rid of all those darn tabs +- Add "check" target to Makefile.in that does the same thing as "test" +- Add "mostlyclean" and "maintainer-clean" targets to Makefile.in +- Update contrib/inflate86 [Anderson] +- Update contrib/testzlib, contrib/vstudio, contrib/minizip [Vollant] +- Add msdos and win32 directories with makefiles [Truta] +- More additions and improvements to the FAQ + +Changes in 1.2.0 (9 March 2003) +- New and improved inflate code + - About 20% faster + - Does not allocate 32K window unless and until needed + - Automatically detects and decompresses gzip streams + - Raw inflate no longer needs an extra dummy byte at end + - Added inflateBack functions using a callback interface--even faster + than inflate, useful for file utilities (gzip, zip) + - Added inflateCopy() function to record state for random access on + externally generated deflate streams (e.g. in gzip files) + - More readable code (I hope) +- New and improved crc32() + - About 50% faster, thanks to suggestions from Rodney Brown +- Add deflateBound() and compressBound() functions +- Fix memory leak in deflateInit2() +- Permit setting dictionary for raw deflate (for parallel deflate) +- Fix const declaration for gzwrite() +- Check for some malloc() failures in gzio.c +- Fix bug in gzopen() on single-byte file 0x1f +- Fix bug in gzread() on concatenated file with 0x1f at end of buffer + and next buffer doesn't start with 0x8b +- Fix uncompress() to return Z_DATA_ERROR on truncated input +- Free memory at end of example.c +- Remove MAX #define in trees.c (conflicted with some libraries) +- Fix static const's in deflate.c, gzio.c, and zutil.[ch] +- Declare malloc() and free() in gzio.c if STDC not defined +- Use malloc() instead of calloc() in zutil.c if int big enough +- Define STDC for AIX +- Add aix/ with approach for compiling shared library on AIX +- Add HP-UX support for shared libraries in configure +- Add OpenUNIX support for shared libraries in configure +- Use $cc instead of gcc to build shared library +- Make prefix directory if needed when installing +- Correct Macintosh avoidance of typedef Byte in zconf.h +- Correct Turbo C memory allocation when under Linux +- Use libz.a instead of -lz in Makefile (assure use of compiled library) +- Update configure to check for snprintf or vsnprintf functions and their + return value, warn during make if using an insecure function +- Fix configure problem with compile-time knowledge of HAVE_UNISTD_H that + is lost when library is used--resolution is to build new zconf.h +- Documentation improvements (in zlib.h): + - Document raw deflate and inflate + - Update RFCs URL + - Point out that zlib and gzip formats are different + - Note that Z_BUF_ERROR is not fatal + - Document string limit for gzprintf() and possible buffer overflow + - Note requirement on avail_out when flushing + - Note permitted values of flush parameter of inflate() +- Add some FAQs (and even answers) to the FAQ +- Add contrib/inflate86/ for x86 faster inflate +- Add contrib/blast/ for PKWare Data Compression Library decompression +- Add contrib/puff/ simple inflate for deflate format description + +Changes in 1.1.4 (11 March 2002) +- ZFREE was repeated on same allocation on some error conditions. + This creates a security problem described in + http://www.zlib.org/advisory-2002-03-11.txt +- Returned incorrect error (Z_MEM_ERROR) on some invalid data +- Avoid accesses before window for invalid distances with inflate window + less than 32K. +- force windowBits > 8 to avoid a bug in the encoder for a window size + of 256 bytes. (A complete fix will be available in 1.1.5). + +Changes in 1.1.3 (9 July 1998) +- fix "an inflate input buffer bug that shows up on rare but persistent + occasions" (Mark) +- fix gzread and gztell for concatenated .gz files (Didier Le Botlan) +- fix gzseek(..., SEEK_SET) in write mode +- fix crc check after a gzeek (Frank Faubert) +- fix miniunzip when the last entry in a zip file is itself a zip file + (J Lillge) +- add contrib/asm586 and contrib/asm686 (Brian Raiter) + See http://www.muppetlabs.com/~breadbox/software/assembly.html +- add support for Delphi 3 in contrib/delphi (Bob Dellaca) +- add support for C++Builder 3 and Delphi 3 in contrib/delphi2 (Davide Moretti) +- do not exit prematurely in untgz if 0 at start of block (Magnus Holmgren) +- use macro EXTERN instead of extern to support DLL for BeOS (Sander Stoks) +- added a FAQ file + +- Support gzdopen on Mac with Metrowerks (Jason Linhart) +- Do not redefine Byte on Mac (Brad Pettit & Jason Linhart) +- define SEEK_END too if SEEK_SET is not defined (Albert Chin-A-Young) +- avoid some warnings with Borland C (Tom Tanner) +- fix a problem in contrib/minizip/zip.c for 16-bit MSDOS (Gilles Vollant) +- emulate utime() for WIN32 in contrib/untgz (Gilles Vollant) +- allow several arguments to configure (Tim Mooney, Frodo Looijaard) +- use libdir and includedir in Makefile.in (Tim Mooney) +- support shared libraries on OSF1 V4 (Tim Mooney) +- remove so_locations in "make clean" (Tim Mooney) +- fix maketree.c compilation error (Glenn, Mark) +- Python interface to zlib now in Python 1.5 (Jeremy Hylton) +- new Makefile.riscos (Rich Walker) +- initialize static descriptors in trees.c for embedded targets (Nick Smith) +- use "foo-gz" in example.c for RISCOS and VMS (Nick Smith) +- add the OS/2 files in Makefile.in too (Andrew Zabolotny) +- fix fdopen and halloc macros for Microsoft C 6.0 (Tom Lane) +- fix maketree.c to allow clean compilation of inffixed.h (Mark) +- fix parameter check in deflateCopy (Gunther Nikl) +- cleanup trees.c, use compressed_len only in debug mode (Christian Spieler) +- Many portability patches by Christian Spieler: + . zutil.c, zutil.h: added "const" for zmem* + . Make_vms.com: fixed some typos + . Make_vms.com: msdos/Makefile.*: removed zutil.h from some dependency lists + . msdos/Makefile.msc: remove "default rtl link library" info from obj files + . msdos/Makefile.*: use model-dependent name for the built zlib library + . msdos/Makefile.emx, nt/Makefile.emx, nt/Makefile.gcc: + new makefiles, for emx (DOS/OS2), emx&rsxnt and mingw32 (Windows 9x / NT) +- use define instead of typedef for Bytef also for MSC small/medium (Tom Lane) +- replace __far with _far for better portability (Christian Spieler, Tom Lane) +- fix test for errno.h in configure (Tim Newsham) + +Changes in 1.1.2 (19 March 98) +- added contrib/minzip, mini zip and unzip based on zlib (Gilles Vollant) + See http://www.winimage.com/zLibDll/unzip.html +- preinitialize the inflate tables for fixed codes, to make the code + completely thread safe (Mark) +- some simplifications and slight speed-up to the inflate code (Mark) +- fix gzeof on non-compressed files (Allan Schrum) +- add -std1 option in configure for OSF1 to fix gzprintf (Martin Mokrejs) +- use default value of 4K for Z_BUFSIZE for 16-bit MSDOS (Tim Wegner + Glenn) +- added os2/Makefile.def and os2/zlib.def (Andrew Zabolotny) +- add shared lib support for UNIX_SV4.2MP (MATSUURA Takanori) +- do not wrap extern "C" around system includes (Tom Lane) +- mention zlib binding for TCL in README (Andreas Kupries) +- added amiga/Makefile.pup for Amiga powerUP SAS/C PPC (Andreas Kleinert) +- allow "make install prefix=..." even after configure (Glenn Randers-Pehrson) +- allow "configure --prefix $HOME" (Tim Mooney) +- remove warnings in example.c and gzio.c (Glenn Randers-Pehrson) +- move Makefile.sas to amiga/Makefile.sas + +Changes in 1.1.1 (27 Feb 98) +- fix macros _tr_tally_* in deflate.h for debug mode (Glenn Randers-Pehrson) +- remove block truncation heuristic which had very marginal effect for zlib + (smaller lit_bufsize than in gzip 1.2.4) and degraded a little the + compression ratio on some files. This also allows inlining _tr_tally for + matches in deflate_slow. +- added msdos/Makefile.w32 for WIN32 Microsoft Visual C++ (Bob Frazier) + +Changes in 1.1.0 (24 Feb 98) +- do not return STREAM_END prematurely in inflate (John Bowler) +- revert to the zlib 1.0.8 inflate to avoid the gcc 2.8.0 bug (Jeremy Buhler) +- compile with -DFASTEST to get compression code optimized for speed only +- in minigzip, try mmap'ing the input file first (Miguel Albrecht) +- increase size of I/O buffers in minigzip.c and gzio.c (not a big gain + on Sun but significant on HP) + +- add a pointer to experimental unzip library in README (Gilles Vollant) +- initialize variable gcc in configure (Chris Herborth) + +Changes in 1.0.9 (17 Feb 1998) +- added gzputs and gzgets functions +- do not clear eof flag in gzseek (Mark Diekhans) +- fix gzseek for files in transparent mode (Mark Diekhans) +- do not assume that vsprintf returns the number of bytes written (Jens Krinke) +- replace EXPORT with ZEXPORT to avoid conflict with other programs +- added compress2 in zconf.h, zlib.def, zlib.dnt +- new asm code from Gilles Vollant in contrib/asm386 +- simplify the inflate code (Mark): + . Replace ZALLOC's in huft_build() with single ZALLOC in inflate_blocks_new() + . ZALLOC the length list in inflate_trees_fixed() instead of using stack + . ZALLOC the value area for huft_build() instead of using stack + . Simplify Z_FINISH check in inflate() + +- Avoid gcc 2.8.0 comparison bug a little differently than zlib 1.0.8 +- in inftrees.c, avoid cc -O bug on HP (Farshid Elahi) +- in zconf.h move the ZLIB_DLL stuff earlier to avoid problems with + the declaration of FAR (Gilles VOllant) +- install libz.so* with mode 755 (executable) instead of 644 (Marc Lehmann) +- read_buf buf parameter of type Bytef* instead of charf* +- zmemcpy parameters are of type Bytef*, not charf* (Joseph Strout) +- do not redeclare unlink in minigzip.c for WIN32 (John Bowler) +- fix check for presence of directories in "make install" (Ian Willis) + +Changes in 1.0.8 (27 Jan 1998) +- fixed offsets in contrib/asm386/gvmat32.asm (Gilles Vollant) +- fix gzgetc and gzputc for big endian systems (Markus Oberhumer) +- added compress2() to allow setting the compression level +- include sys/types.h to get off_t on some systems (Marc Lehmann & QingLong) +- use constant arrays for the static trees in trees.c instead of computing + them at run time (thanks to Ken Raeburn for this suggestion). To create + trees.h, compile with GEN_TREES_H and run "make test". +- check return code of example in "make test" and display result +- pass minigzip command line options to file_compress +- simplifying code of inflateSync to avoid gcc 2.8 bug + +- support CC="gcc -Wall" in configure -s (QingLong) +- avoid a flush caused by ftell in gzopen for write mode (Ken Raeburn) +- fix test for shared library support to avoid compiler warnings +- zlib.lib -> zlib.dll in msdos/zlib.rc (Gilles Vollant) +- check for TARGET_OS_MAC in addition to MACOS (Brad Pettit) +- do not use fdopen for Metrowerks on Mac (Brad Pettit)) +- add checks for gzputc and gzputc in example.c +- avoid warnings in gzio.c and deflate.c (Andreas Kleinert) +- use const for the CRC table (Ken Raeburn) +- fixed "make uninstall" for shared libraries +- use Tracev instead of Trace in infblock.c +- in example.c use correct compressed length for test_sync +- suppress +vnocompatwarnings in configure for HPUX (not always supported) + +Changes in 1.0.7 (20 Jan 1998) +- fix gzseek which was broken in write mode +- return error for gzseek to negative absolute position +- fix configure for Linux (Chun-Chung Chen) +- increase stack space for MSC (Tim Wegner) +- get_crc_table and inflateSyncPoint are EXPORTed (Gilles Vollant) +- define EXPORTVA for gzprintf (Gilles Vollant) +- added man page zlib.3 (Rick Rodgers) +- for contrib/untgz, fix makedir() and improve Makefile + +- check gzseek in write mode in example.c +- allocate extra buffer for seeks only if gzseek is actually called +- avoid signed/unsigned comparisons (Tim Wegner, Gilles Vollant) +- add inflateSyncPoint in zconf.h +- fix list of exported functions in nt/zlib.dnt and mdsos/zlib.def + +Changes in 1.0.6 (19 Jan 1998) +- add functions gzprintf, gzputc, gzgetc, gztell, gzeof, gzseek, gzrewind and + gzsetparams (thanks to Roland Giersig and Kevin Ruland for some of this code) +- Fix a deflate bug occurring only with compression level 0 (thanks to + Andy Buckler for finding this one). +- In minigzip, pass transparently also the first byte for .Z files. +- return Z_BUF_ERROR instead of Z_OK if output buffer full in uncompress() +- check Z_FINISH in inflate (thanks to Marc Schluper) +- Implement deflateCopy (thanks to Adam Costello) +- make static libraries by default in configure, add --shared option. +- move MSDOS or Windows specific files to directory msdos +- suppress the notion of partial flush to simplify the interface + (but the symbol Z_PARTIAL_FLUSH is kept for compatibility with 1.0.4) +- suppress history buffer provided by application to simplify the interface + (this feature was not implemented anyway in 1.0.4) +- next_in and avail_in must be initialized before calling inflateInit or + inflateInit2 +- add EXPORT in all exported functions (for Windows DLL) +- added Makefile.nt (thanks to Stephen Williams) +- added the unsupported "contrib" directory: + contrib/asm386/ by Gilles Vollant <info@winimage.com> + 386 asm code replacing longest_match(). + contrib/iostream/ by Kevin Ruland <kevin@rodin.wustl.edu> + A C++ I/O streams interface to the zlib gz* functions + contrib/iostream2/ by Tyge Lvset <Tyge.Lovset@cmr.no> + Another C++ I/O streams interface + contrib/untgz/ by "Pedro A. Aranda Guti\irrez" <paag@tid.es> + A very simple tar.gz file extractor using zlib + contrib/visual-basic.txt by Carlos Rios <c_rios@sonda.cl> + How to use compress(), uncompress() and the gz* functions from VB. +- pass params -f (filtered data), -h (huffman only), -1 to -9 (compression + level) in minigzip (thanks to Tom Lane) + +- use const for rommable constants in deflate +- added test for gzseek and gztell in example.c +- add undocumented function inflateSyncPoint() (hack for Paul Mackerras) +- add undocumented function zError to convert error code to string + (for Tim Smithers) +- Allow compilation of gzio with -DNO_DEFLATE to avoid the compression code. +- Use default memcpy for Symantec MSDOS compiler. +- Add EXPORT keyword for check_func (needed for Windows DLL) +- add current directory to LD_LIBRARY_PATH for "make test" +- create also a link for libz.so.1 +- added support for FUJITSU UXP/DS (thanks to Toshiaki Nomura) +- use $(SHAREDLIB) instead of libz.so in Makefile.in (for HPUX) +- added -soname for Linux in configure (Chun-Chung Chen, +- assign numbers to the exported functions in zlib.def (for Windows DLL) +- add advice in zlib.h for best usage of deflateSetDictionary +- work around compiler bug on Atari (cast Z_NULL in call of s->checkfn) +- allow compilation with ANSI keywords only enabled for TurboC in large model +- avoid "versionString"[0] (Borland bug) +- add NEED_DUMMY_RETURN for Borland +- use variable z_verbose for tracing in debug mode (L. Peter Deutsch). +- allow compilation with CC +- defined STDC for OS/2 (David Charlap) +- limit external names to 8 chars for MVS (Thomas Lund) +- in minigzip.c, use static buffers only for 16-bit systems +- fix suffix check for "minigzip -d foo.gz" +- do not return an error for the 2nd of two consecutive gzflush() (Felix Lee) +- use _fdopen instead of fdopen for MSC >= 6.0 (Thomas Fanslau) +- added makelcc.bat for lcc-win32 (Tom St Denis) +- in Makefile.dj2, use copy and del instead of install and rm (Frank Donahoe) +- Avoid expanded $Id$. Use "rcs -kb" or "cvs admin -kb" to avoid Id expansion. +- check for unistd.h in configure (for off_t) +- remove useless check parameter in inflate_blocks_free +- avoid useless assignment of s->check to itself in inflate_blocks_new +- do not flush twice in gzclose (thanks to Ken Raeburn) +- rename FOPEN as F_OPEN to avoid clash with /usr/include/sys/file.h +- use NO_ERRNO_H instead of enumeration of operating systems with errno.h +- work around buggy fclose on pipes for HP/UX +- support zlib DLL with BORLAND C++ 5.0 (thanks to Glenn Randers-Pehrson) +- fix configure if CC is already equal to gcc + +Changes in 1.0.5 (3 Jan 98) +- Fix inflate to terminate gracefully when fed corrupted or invalid data +- Use const for rommable constants in inflate +- Eliminate memory leaks on error conditions in inflate +- Removed some vestigial code in inflate +- Update web address in README + +Changes in 1.0.4 (24 Jul 96) +- In very rare conditions, deflate(s, Z_FINISH) could fail to produce an EOF + bit, so the decompressor could decompress all the correct data but went + on to attempt decompressing extra garbage data. This affected minigzip too. +- zlibVersion and gzerror return const char* (needed for DLL) +- port to RISCOS (no fdopen, no multiple dots, no unlink, no fileno) +- use z_error only for DEBUG (avoid problem with DLLs) + +Changes in 1.0.3 (2 Jul 96) +- use z_streamp instead of z_stream *, which is now a far pointer in MSDOS + small and medium models; this makes the library incompatible with previous + versions for these models. (No effect in large model or on other systems.) +- return OK instead of BUF_ERROR if previous deflate call returned with + avail_out as zero but there is nothing to do +- added memcmp for non STDC compilers +- define NO_DUMMY_DECL for more Mac compilers (.h files merged incorrectly) +- define __32BIT__ if __386__ or i386 is defined (pb. with Watcom and SCO) +- better check for 16-bit mode MSC (avoids problem with Symantec) + +Changes in 1.0.2 (23 May 96) +- added Windows DLL support +- added a function zlibVersion (for the DLL support) +- fixed declarations using Bytef in infutil.c (pb with MSDOS medium model) +- Bytef is define's instead of typedef'd only for Borland C +- avoid reading uninitialized memory in example.c +- mention in README that the zlib format is now RFC1950 +- updated Makefile.dj2 +- added algorithm.doc + +Changes in 1.0.1 (20 May 96) [1.0 skipped to avoid confusion] +- fix array overlay in deflate.c which sometimes caused bad compressed data +- fix inflate bug with empty stored block +- fix MSDOS medium model which was broken in 0.99 +- fix deflateParams() which could generated bad compressed data. +- Bytef is define'd instead of typedef'ed (work around Borland bug) +- added an INDEX file +- new makefiles for DJGPP (Makefile.dj2), 32-bit Borland (Makefile.b32), + Watcom (Makefile.wat), Amiga SAS/C (Makefile.sas) +- speed up adler32 for modern machines without auto-increment +- added -ansi for IRIX in configure +- static_init_done in trees.c is an int +- define unlink as delete for VMS +- fix configure for QNX +- add configure branch for SCO and HPUX +- avoid many warnings (unused variables, dead assignments, etc...) +- no fdopen for BeOS +- fix the Watcom fix for 32 bit mode (define FAR as empty) +- removed redefinition of Byte for MKWERKS +- work around an MWKERKS bug (incorrect merge of all .h files) + +Changes in 0.99 (27 Jan 96) +- allow preset dictionary shared between compressor and decompressor +- allow compression level 0 (no compression) +- add deflateParams in zlib.h: allow dynamic change of compression level + and compression strategy. +- test large buffers and deflateParams in example.c +- add optional "configure" to build zlib as a shared library +- suppress Makefile.qnx, use configure instead +- fixed deflate for 64-bit systems (detected on Cray) +- fixed inflate_blocks for 64-bit systems (detected on Alpha) +- declare Z_DEFLATED in zlib.h (possible parameter for deflateInit2) +- always return Z_BUF_ERROR when deflate() has nothing to do +- deflateInit and inflateInit are now macros to allow version checking +- prefix all global functions and types with z_ with -DZ_PREFIX +- make falloc completely reentrant (inftrees.c) +- fixed very unlikely race condition in ct_static_init +- free in reverse order of allocation to help memory manager +- use zlib-1.0/* instead of zlib/* inside the tar.gz +- make zlib warning-free with "gcc -O3 -Wall -Wwrite-strings -Wpointer-arith + -Wconversion -Wstrict-prototypes -Wmissing-prototypes" +- allow gzread on concatenated .gz files +- deflateEnd now returns Z_DATA_ERROR if it was premature +- deflate is finally (?) fully deterministic (no matches beyond end of input) +- Document Z_SYNC_FLUSH +- add uninstall in Makefile +- Check for __cpluplus in zlib.h +- Better test in ct_align for partial flush +- avoid harmless warnings for Borland C++ +- initialize hash_head in deflate.c +- avoid warning on fdopen (gzio.c) for HP cc -Aa +- include stdlib.h for STDC compilers +- include errno.h for Cray +- ignore error if ranlib doesn't exist +- call ranlib twice for NeXTSTEP +- use exec_prefix instead of prefix for libz.a +- renamed ct_* as _tr_* to avoid conflict with applications +- clear z->msg in inflateInit2 before any error return +- initialize opaque in example.c, gzio.c, deflate.c and inflate.c +- fixed typo in zconf.h (_GNUC__ => __GNUC__) +- check for WIN32 in zconf.h and zutil.c (avoid farmalloc in 32-bit mode) +- fix typo in Make_vms.com (f$trnlnm -> f$getsyi) +- in fcalloc, normalize pointer if size > 65520 bytes +- don't use special fcalloc for 32 bit Borland C++ +- use STDC instead of __GO32__ to avoid redeclaring exit, calloc, etc... +- use Z_BINARY instead of BINARY +- document that gzclose after gzdopen will close the file +- allow "a" as mode in gzopen. +- fix error checking in gzread +- allow skipping .gz extra-field on pipes +- added reference to Perl interface in README +- put the crc table in FAR data (I dislike more and more the medium model :) +- added get_crc_table +- added a dimension to all arrays (Borland C can't count). +- workaround Borland C bug in declaration of inflate_codes_new & inflate_fast +- guard against multiple inclusion of *.h (for precompiled header on Mac) +- Watcom C pretends to be Microsoft C small model even in 32 bit mode. +- don't use unsized arrays to avoid silly warnings by Visual C++: + warning C4746: 'inflate_mask' : unsized array treated as '__far' + (what's wrong with far data in far model?). +- define enum out of inflate_blocks_state to allow compilation with C++ + +Changes in 0.95 (16 Aug 95) +- fix MSDOS small and medium model (now easier to adapt to any compiler) +- inlined send_bits +- fix the final (:-) bug for deflate with flush (output was correct but + not completely flushed in rare occasions). +- default window size is same for compression and decompression + (it's now sufficient to set MAX_WBITS in zconf.h). +- voidp -> voidpf and voidnp -> voidp (for consistency with other + typedefs and because voidnp was not near in large model). + +Changes in 0.94 (13 Aug 95) +- support MSDOS medium model +- fix deflate with flush (could sometimes generate bad output) +- fix deflateReset (zlib header was incorrectly suppressed) +- added support for VMS +- allow a compression level in gzopen() +- gzflush now calls fflush +- For deflate with flush, flush even if no more input is provided. +- rename libgz.a as libz.a +- avoid complex expression in infcodes.c triggering Turbo C bug +- work around a problem with gcc on Alpha (in INSERT_STRING) +- don't use inline functions (problem with some gcc versions) +- allow renaming of Byte, uInt, etc... with #define. +- avoid warning about (unused) pointer before start of array in deflate.c +- avoid various warnings in gzio.c, example.c, infblock.c, adler32.c, zutil.c +- avoid reserved word 'new' in trees.c + +Changes in 0.93 (25 June 95) +- temporarily disable inline functions +- make deflate deterministic +- give enough lookahead for PARTIAL_FLUSH +- Set binary mode for stdin/stdout in minigzip.c for OS/2 +- don't even use signed char in inflate (not portable enough) +- fix inflate memory leak for segmented architectures + +Changes in 0.92 (3 May 95) +- don't assume that char is signed (problem on SGI) +- Clear bit buffer when starting a stored block +- no memcpy on Pyramid +- suppressed inftest.c +- optimized fill_window, put longest_match inline for gcc +- optimized inflate on stored blocks. +- untabify all sources to simplify patches + +Changes in 0.91 (2 May 95) +- Default MEM_LEVEL is 8 (not 9 for Unix) as documented in zlib.h +- Document the memory requirements in zconf.h +- added "make install" +- fix sync search logic in inflateSync +- deflate(Z_FULL_FLUSH) now works even if output buffer too short +- after inflateSync, don't scare people with just "lo world" +- added support for DJGPP + +Changes in 0.9 (1 May 95) +- don't assume that zalloc clears the allocated memory (the TurboC bug + was Mark's bug after all :) +- let again gzread copy uncompressed data unchanged (was working in 0.71) +- deflate(Z_FULL_FLUSH), inflateReset and inflateSync are now fully implemented +- added a test of inflateSync in example.c +- moved MAX_WBITS to zconf.h because users might want to change that. +- document explicitly that zalloc(64K) on MSDOS must return a normalized + pointer (zero offset) +- added Makefiles for Microsoft C, Turbo C, Borland C++ +- faster crc32() + +Changes in 0.8 (29 April 95) +- added fast inflate (inffast.c) +- deflate(Z_FINISH) now returns Z_STREAM_END when done. Warning: this + is incompatible with previous versions of zlib which returned Z_OK. +- work around a TurboC compiler bug (bad code for b << 0, see infutil.h) + (actually that was not a compiler bug, see 0.81 above) +- gzread no longer reads one extra byte in certain cases +- In gzio destroy(), don't reference a freed structure +- avoid many warnings for MSDOS +- avoid the ERROR symbol which is used by MS Windows + +Changes in 0.71 (14 April 95) +- Fixed more MSDOS compilation problems :( There is still a bug with + TurboC large model. + +Changes in 0.7 (14 April 95) +- Added full inflate support. +- Simplified the crc32() interface. The pre- and post-conditioning + (one's complement) is now done inside crc32(). WARNING: this is + incompatible with previous versions; see zlib.h for the new usage. + +Changes in 0.61 (12 April 95) +- workaround for a bug in TurboC. example and minigzip now work on MSDOS. + +Changes in 0.6 (11 April 95) +- added minigzip.c +- added gzdopen to reopen a file descriptor as gzFile +- added transparent reading of non-gziped files in gzread. +- fixed bug in gzread (don't read crc as data) +- fixed bug in destroy (gzio.c) (don't return Z_STREAM_END for gzclose). +- don't allocate big arrays in the stack (for MSDOS) +- fix some MSDOS compilation problems + +Changes in 0.5: +- do real compression in deflate.c. Z_PARTIAL_FLUSH is supported but + not yet Z_FULL_FLUSH. +- support decompression but only in a single step (forced Z_FINISH) +- added opaque object for zalloc and zfree. +- added deflateReset and inflateReset +- added a variable zlib_version for consistency checking. +- renamed the 'filter' parameter of deflateInit2 as 'strategy'. + Added Z_FILTERED and Z_HUFFMAN_ONLY constants. + +Changes in 0.4: +- avoid "zip" everywhere, use zlib instead of ziplib. +- suppress Z_BLOCK_FLUSH, interpret Z_PARTIAL_FLUSH as block flush + if compression method == 8. +- added adler32 and crc32 +- renamed deflateOptions as deflateInit2, call one or the other but not both +- added the method parameter for deflateInit2. +- added inflateInit2 +- simplied considerably deflateInit and inflateInit by not supporting + user-provided history buffer. This is supported only in deflateInit2 + and inflateInit2. + +Changes in 0.3: +- prefix all macro names with Z_ +- use Z_FINISH instead of deflateEnd to finish compression. +- added Z_HUFFMAN_ONLY +- added gzerror() diff -Nru nodejs-0.11.13/deps/zlib/CMakeLists.txt nodejs-0.11.15/deps/zlib/CMakeLists.txt --- nodejs-0.11.13/deps/zlib/CMakeLists.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/CMakeLists.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,249 @@ +cmake_minimum_required(VERSION 2.4.4) +set(CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS ON) + +project(zlib C) + +set(VERSION "1.2.8") + +option(ASM686 "Enable building i686 assembly implementation") +option(AMD64 "Enable building amd64 assembly implementation") + +set(INSTALL_BIN_DIR "${CMAKE_INSTALL_PREFIX}/bin" CACHE PATH "Installation directory for executables") +set(INSTALL_LIB_DIR "${CMAKE_INSTALL_PREFIX}/lib" CACHE PATH "Installation directory for libraries") +set(INSTALL_INC_DIR "${CMAKE_INSTALL_PREFIX}/include" CACHE PATH "Installation directory for headers") +set(INSTALL_MAN_DIR "${CMAKE_INSTALL_PREFIX}/share/man" CACHE PATH "Installation directory for manual pages") +set(INSTALL_PKGCONFIG_DIR "${CMAKE_INSTALL_PREFIX}/share/pkgconfig" CACHE PATH "Installation directory for pkgconfig (.pc) files") + +include(CheckTypeSize) +include(CheckFunctionExists) +include(CheckIncludeFile) +include(CheckCSourceCompiles) +enable_testing() + +check_include_file(sys/types.h HAVE_SYS_TYPES_H) +check_include_file(stdint.h HAVE_STDINT_H) +check_include_file(stddef.h HAVE_STDDEF_H) + +# +# Check to see if we have large file support +# +set(CMAKE_REQUIRED_DEFINITIONS -D_LARGEFILE64_SOURCE=1) +# We add these other definitions here because CheckTypeSize.cmake +# in CMake 2.4.x does not automatically do so and we want +# compatibility with CMake 2.4.x. +if(HAVE_SYS_TYPES_H) + list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_SYS_TYPES_H) +endif() +if(HAVE_STDINT_H) + list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_STDINT_H) +endif() +if(HAVE_STDDEF_H) + list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_STDDEF_H) +endif() +check_type_size(off64_t OFF64_T) +if(HAVE_OFF64_T) + add_definitions(-D_LARGEFILE64_SOURCE=1) +endif() +set(CMAKE_REQUIRED_DEFINITIONS) # clear variable + +# +# Check for fseeko +# +check_function_exists(fseeko HAVE_FSEEKO) +if(NOT HAVE_FSEEKO) + add_definitions(-DNO_FSEEKO) +endif() + +# +# Check for unistd.h +# +check_include_file(unistd.h Z_HAVE_UNISTD_H) + +if(MSVC) + set(CMAKE_DEBUG_POSTFIX "d") + add_definitions(-D_CRT_SECURE_NO_DEPRECATE) + add_definitions(-D_CRT_NONSTDC_NO_DEPRECATE) + include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +endif() + +if(NOT CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR) + # If we're doing an out of source build and the user has a zconf.h + # in their source tree... + if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h) + message(STATUS "Renaming") + message(STATUS " ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h") + message(STATUS "to 'zconf.h.included' because this file is included with zlib") + message(STATUS "but CMake generates it automatically in the build directory.") + file(RENAME ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h.included) + endif() +endif() + +set(ZLIB_PC ${CMAKE_CURRENT_BINARY_DIR}/zlib.pc) +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/zlib.pc.cmakein + ${ZLIB_PC} @ONLY) +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h.cmakein + ${CMAKE_CURRENT_BINARY_DIR}/zconf.h @ONLY) +include_directories(${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_SOURCE_DIR}) + + +#============================================================================ +# zlib +#============================================================================ + +set(ZLIB_PUBLIC_HDRS + ${CMAKE_CURRENT_BINARY_DIR}/zconf.h + zlib.h +) +set(ZLIB_PRIVATE_HDRS + crc32.h + deflate.h + gzguts.h + inffast.h + inffixed.h + inflate.h + inftrees.h + trees.h + zutil.h +) +set(ZLIB_SRCS + adler32.c + compress.c + crc32.c + deflate.c + gzclose.c + gzlib.c + gzread.c + gzwrite.c + inflate.c + infback.c + inftrees.c + inffast.c + trees.c + uncompr.c + zutil.c +) + +if(NOT MINGW) + set(ZLIB_DLL_SRCS + win32/zlib1.rc # If present will override custom build rule below. + ) +endif() + +if(CMAKE_COMPILER_IS_GNUCC) + if(ASM686) + set(ZLIB_ASMS contrib/asm686/match.S) + elseif (AMD64) + set(ZLIB_ASMS contrib/amd64/amd64-match.S) + endif () + + if(ZLIB_ASMS) + add_definitions(-DASMV) + set_source_files_properties(${ZLIB_ASMS} PROPERTIES LANGUAGE C COMPILE_FLAGS -DNO_UNDERLINE) + endif() +endif() + +if(MSVC) + if(ASM686) + ENABLE_LANGUAGE(ASM_MASM) + set(ZLIB_ASMS + contrib/masmx86/inffas32.asm + contrib/masmx86/match686.asm + ) + elseif (AMD64) + ENABLE_LANGUAGE(ASM_MASM) + set(ZLIB_ASMS + contrib/masmx64/gvmat64.asm + contrib/masmx64/inffasx64.asm + ) + endif() + + if(ZLIB_ASMS) + add_definitions(-DASMV -DASMINF) + endif() +endif() + +# parse the full version number from zlib.h and include in ZLIB_FULL_VERSION +file(READ ${CMAKE_CURRENT_SOURCE_DIR}/zlib.h _zlib_h_contents) +string(REGEX REPLACE ".*#define[ \t]+ZLIB_VERSION[ \t]+\"([-0-9A-Za-z.]+)\".*" + "\\1" ZLIB_FULL_VERSION ${_zlib_h_contents}) + +if(MINGW) + # This gets us DLL resource information when compiling on MinGW. + if(NOT CMAKE_RC_COMPILER) + set(CMAKE_RC_COMPILER windres.exe) + endif() + + add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/zlib1rc.obj + COMMAND ${CMAKE_RC_COMPILER} + -D GCC_WINDRES + -I ${CMAKE_CURRENT_SOURCE_DIR} + -I ${CMAKE_CURRENT_BINARY_DIR} + -o ${CMAKE_CURRENT_BINARY_DIR}/zlib1rc.obj + -i ${CMAKE_CURRENT_SOURCE_DIR}/win32/zlib1.rc) + set(ZLIB_DLL_SRCS ${CMAKE_CURRENT_BINARY_DIR}/zlib1rc.obj) +endif(MINGW) + +add_library(zlib SHARED ${ZLIB_SRCS} ${ZLIB_ASMS} ${ZLIB_DLL_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS}) +add_library(zlibstatic STATIC ${ZLIB_SRCS} ${ZLIB_ASMS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS}) +set_target_properties(zlib PROPERTIES DEFINE_SYMBOL ZLIB_DLL) +set_target_properties(zlib PROPERTIES SOVERSION 1) + +if(NOT CYGWIN) + # This property causes shared libraries on Linux to have the full version + # encoded into their final filename. We disable this on Cygwin because + # it causes cygz-${ZLIB_FULL_VERSION}.dll to be created when cygz.dll + # seems to be the default. + # + # This has no effect with MSVC, on that platform the version info for + # the DLL comes from the resource file win32/zlib1.rc + set_target_properties(zlib PROPERTIES VERSION ${ZLIB_FULL_VERSION}) +endif() + +if(UNIX) + # On unix-like platforms the library is almost always called libz + set_target_properties(zlib zlibstatic PROPERTIES OUTPUT_NAME z) + if(NOT APPLE) + set_target_properties(zlib PROPERTIES LINK_FLAGS "-Wl,--version-script,\"${CMAKE_CURRENT_SOURCE_DIR}/zlib.map\"") + endif() +elseif(BUILD_SHARED_LIBS AND WIN32) + # Creates zlib1.dll when building shared library version + set_target_properties(zlib PROPERTIES SUFFIX "1.dll") +endif() + +if(NOT SKIP_INSTALL_LIBRARIES AND NOT SKIP_INSTALL_ALL ) + install(TARGETS zlib zlibstatic + RUNTIME DESTINATION "${INSTALL_BIN_DIR}" + ARCHIVE DESTINATION "${INSTALL_LIB_DIR}" + LIBRARY DESTINATION "${INSTALL_LIB_DIR}" ) +endif() +if(NOT SKIP_INSTALL_HEADERS AND NOT SKIP_INSTALL_ALL ) + install(FILES ${ZLIB_PUBLIC_HDRS} DESTINATION "${INSTALL_INC_DIR}") +endif() +if(NOT SKIP_INSTALL_FILES AND NOT SKIP_INSTALL_ALL ) + install(FILES zlib.3 DESTINATION "${INSTALL_MAN_DIR}/man3") +endif() +if(NOT SKIP_INSTALL_FILES AND NOT SKIP_INSTALL_ALL ) + install(FILES ${ZLIB_PC} DESTINATION "${INSTALL_PKGCONFIG_DIR}") +endif() + +#============================================================================ +# Example binaries +#============================================================================ + +add_executable(example test/example.c) +target_link_libraries(example zlib) +add_test(example example) + +add_executable(minigzip test/minigzip.c) +target_link_libraries(minigzip zlib) + +if(HAVE_OFF64_T) + add_executable(example64 test/example.c) + target_link_libraries(example64 zlib) + set_target_properties(example64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") + add_test(example64 example64) + + add_executable(minigzip64 test/minigzip.c) + target_link_libraries(minigzip64 zlib) + set_target_properties(minigzip64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") +endif() diff -Nru nodejs-0.11.13/deps/zlib/compress.c nodejs-0.11.15/deps/zlib/compress.c --- nodejs-0.11.13/deps/zlib/compress.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/compress.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,9 +1,9 @@ /* compress.c -- compress a memory buffer - * Copyright (C) 1995-2003 Jean-loup Gailly. + * Copyright (C) 1995-2005 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ -/* @(#) $Id: compress.c,v 3.6 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ +/* @(#) $Id$ */ #define ZLIB_INTERNAL #include "zlib.h" @@ -29,7 +29,7 @@ z_stream stream; int err; - stream.next_in = (Bytef*)source; + stream.next_in = (z_const Bytef *)source; stream.avail_in = (uInt)sourceLen; #ifdef MAXSEG_64K /* Check for source > 64K on 16-bit machine: */ @@ -75,5 +75,6 @@ uLong ZEXPORT compressBound (sourceLen) uLong sourceLen; { - return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + 11; + return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + + (sourceLen >> 25) + 13; } diff -Nru nodejs-0.11.13/deps/zlib/configure nodejs-0.11.15/deps/zlib/configure --- nodejs-0.11.13/deps/zlib/configure 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/configure 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,831 @@ +#!/bin/sh +# configure script for zlib. +# +# Normally configure builds both a static and a shared library. +# If you want to build just a static library, use: ./configure --static +# +# To impose specific compiler or flags or install directory, use for example: +# prefix=$HOME CC=cc CFLAGS="-O4" ./configure +# or for csh/tcsh users: +# (setenv prefix $HOME; setenv CC cc; setenv CFLAGS "-O4"; ./configure) + +# Incorrect settings of CC or CFLAGS may prevent creating a shared library. +# If you have problems, try without defining CC and CFLAGS before reporting +# an error. + +# start off configure.log +echo -------------------- >> configure.log +echo $0 $* >> configure.log +date >> configure.log + +# set command prefix for cross-compilation +if [ -n "${CHOST}" ]; then + uname="`echo "${CHOST}" | sed -e 's/^[^-]*-\([^-]*\)$/\1/' -e 's/^[^-]*-[^-]*-\([^-]*\)$/\1/' -e 's/^[^-]*-[^-]*-\([^-]*\)-.*$/\1/'`" + CROSS_PREFIX="${CHOST}-" +fi + +# destination name for static library +STATICLIB=libz.a + +# extract zlib version numbers from zlib.h +VER=`sed -n -e '/VERSION "/s/.*"\(.*\)".*/\1/p' < zlib.h` +VER3=`sed -n -e '/VERSION "/s/.*"\([0-9]*\\.[0-9]*\\.[0-9]*\).*/\1/p' < zlib.h` +VER2=`sed -n -e '/VERSION "/s/.*"\([0-9]*\\.[0-9]*\)\\..*/\1/p' < zlib.h` +VER1=`sed -n -e '/VERSION "/s/.*"\([0-9]*\)\\..*/\1/p' < zlib.h` + +# establish commands for library building +if "${CROSS_PREFIX}ar" --version >/dev/null 2>/dev/null || test $? -lt 126; then + AR=${AR-"${CROSS_PREFIX}ar"} + test -n "${CROSS_PREFIX}" && echo Using ${AR} | tee -a configure.log +else + AR=${AR-"ar"} + test -n "${CROSS_PREFIX}" && echo Using ${AR} | tee -a configure.log +fi +ARFLAGS=${ARFLAGS-"rc"} +if "${CROSS_PREFIX}ranlib" --version >/dev/null 2>/dev/null || test $? -lt 126; then + RANLIB=${RANLIB-"${CROSS_PREFIX}ranlib"} + test -n "${CROSS_PREFIX}" && echo Using ${RANLIB} | tee -a configure.log +else + RANLIB=${RANLIB-"ranlib"} +fi +if "${CROSS_PREFIX}nm" --version >/dev/null 2>/dev/null || test $? -lt 126; then + NM=${NM-"${CROSS_PREFIX}nm"} + test -n "${CROSS_PREFIX}" && echo Using ${NM} | tee -a configure.log +else + NM=${NM-"nm"} +fi + +# set defaults before processing command line options +LDCONFIG=${LDCONFIG-"ldconfig"} +LDSHAREDLIBC="${LDSHAREDLIBC--lc}" +ARCHS= +prefix=${prefix-/usr/local} +exec_prefix=${exec_prefix-'${prefix}'} +libdir=${libdir-'${exec_prefix}/lib'} +sharedlibdir=${sharedlibdir-'${libdir}'} +includedir=${includedir-'${prefix}/include'} +mandir=${mandir-'${prefix}/share/man'} +shared_ext='.so' +shared=1 +solo=0 +cover=0 +zprefix=0 +zconst=0 +build64=0 +gcc=0 +old_cc="$CC" +old_cflags="$CFLAGS" +OBJC='$(OBJZ) $(OBJG)' +PIC_OBJC='$(PIC_OBJZ) $(PIC_OBJG)' + +# leave this script, optionally in a bad way +leave() +{ + if test "$*" != "0"; then + echo "** $0 aborting." | tee -a configure.log + fi + rm -f $test.[co] $test $test$shared_ext $test.gcno ./--version + echo -------------------- >> configure.log + echo >> configure.log + echo >> configure.log + exit $1 +} + +# process command line options +while test $# -ge 1 +do +case "$1" in + -h* | --help) + echo 'usage:' | tee -a configure.log + echo ' configure [--const] [--zprefix] [--prefix=PREFIX] [--eprefix=EXPREFIX]' | tee -a configure.log + echo ' [--static] [--64] [--libdir=LIBDIR] [--sharedlibdir=LIBDIR]' | tee -a configure.log + echo ' [--includedir=INCLUDEDIR] [--archs="-arch i386 -arch x86_64"]' | tee -a configure.log + exit 0 ;; + -p*=* | --prefix=*) prefix=`echo $1 | sed 's/.*=//'`; shift ;; + -e*=* | --eprefix=*) exec_prefix=`echo $1 | sed 's/.*=//'`; shift ;; + -l*=* | --libdir=*) libdir=`echo $1 | sed 's/.*=//'`; shift ;; + --sharedlibdir=*) sharedlibdir=`echo $1 | sed 's/.*=//'`; shift ;; + -i*=* | --includedir=*) includedir=`echo $1 | sed 's/.*=//'`;shift ;; + -u*=* | --uname=*) uname=`echo $1 | sed 's/.*=//'`;shift ;; + -p* | --prefix) prefix="$2"; shift; shift ;; + -e* | --eprefix) exec_prefix="$2"; shift; shift ;; + -l* | --libdir) libdir="$2"; shift; shift ;; + -i* | --includedir) includedir="$2"; shift; shift ;; + -s* | --shared | --enable-shared) shared=1; shift ;; + -t | --static) shared=0; shift ;; + --solo) solo=1; shift ;; + --cover) cover=1; shift ;; + -z* | --zprefix) zprefix=1; shift ;; + -6* | --64) build64=1; shift ;; + -a*=* | --archs=*) ARCHS=`echo $1 | sed 's/.*=//'`; shift ;; + --sysconfdir=*) echo "ignored option: --sysconfdir" | tee -a configure.log; shift ;; + --localstatedir=*) echo "ignored option: --localstatedir" | tee -a configure.log; shift ;; + -c* | --const) zconst=1; shift ;; + *) + echo "unknown option: $1" | tee -a configure.log + echo "$0 --help for help" | tee -a configure.log + leave 1;; + esac +done + +# temporary file name +test=ztest$$ + +# put arguments in log, also put test file in log if used in arguments +show() +{ + case "$*" in + *$test.c*) + echo === $test.c === >> configure.log + cat $test.c >> configure.log + echo === >> configure.log;; + esac + echo $* >> configure.log +} + +# check for gcc vs. cc and set compile and link flags based on the system identified by uname +cat > $test.c <<EOF +extern int getchar(); +int hello() {return getchar();} +EOF + +test -z "$CC" && echo Checking for ${CROSS_PREFIX}gcc... | tee -a configure.log +cc=${CC-${CROSS_PREFIX}gcc} +cflags=${CFLAGS-"-O3"} +# to force the asm version use: CFLAGS="-O3 -DASMV" ./configure +case "$cc" in + *gcc*) gcc=1 ;; + *clang*) gcc=1 ;; +esac +case `$cc -v 2>&1` in + *gcc*) gcc=1 ;; +esac + +show $cc -c $test.c +if test "$gcc" -eq 1 && ($cc -c $test.c) >> configure.log 2>&1; then + echo ... using gcc >> configure.log + CC="$cc" + CFLAGS="${CFLAGS--O3} ${ARCHS}" + SFLAGS="${CFLAGS--O3} -fPIC" + LDFLAGS="${LDFLAGS} ${ARCHS}" + if test $build64 -eq 1; then + CFLAGS="${CFLAGS} -m64" + SFLAGS="${SFLAGS} -m64" + fi + if test "${ZLIBGCCWARN}" = "YES"; then + if test "$zconst" -eq 1; then + CFLAGS="${CFLAGS} -Wall -Wextra -Wcast-qual -pedantic -DZLIB_CONST" + else + CFLAGS="${CFLAGS} -Wall -Wextra -pedantic" + fi + fi + if test -z "$uname"; then + uname=`(uname -s || echo unknown) 2>/dev/null` + fi + case "$uname" in + Linux* | linux* | GNU | GNU/* | solaris*) + LDSHARED=${LDSHARED-"$cc -shared -Wl,-soname,libz.so.1,--version-script,zlib.map"} ;; + *BSD | *bsd* | DragonFly) + LDSHARED=${LDSHARED-"$cc -shared -Wl,-soname,libz.so.1,--version-script,zlib.map"} + LDCONFIG="ldconfig -m" ;; + CYGWIN* | Cygwin* | cygwin* | OS/2*) + EXE='.exe' ;; + MINGW* | mingw*) +# temporary bypass + rm -f $test.[co] $test $test$shared_ext + echo "Please use win32/Makefile.gcc instead." | tee -a configure.log + leave 1 + LDSHARED=${LDSHARED-"$cc -shared"} + LDSHAREDLIBC="" + EXE='.exe' ;; + QNX*) # This is for QNX6. I suppose that the QNX rule below is for QNX2,QNX4 + # (alain.bonnefoy@icbt.com) + LDSHARED=${LDSHARED-"$cc -shared -Wl,-hlibz.so.1"} ;; + HP-UX*) + LDSHARED=${LDSHARED-"$cc -shared $SFLAGS"} + case `(uname -m || echo unknown) 2>/dev/null` in + ia64) + shared_ext='.so' + SHAREDLIB='libz.so' ;; + *) + shared_ext='.sl' + SHAREDLIB='libz.sl' ;; + esac ;; + Darwin* | darwin*) + shared_ext='.dylib' + SHAREDLIB=libz$shared_ext + SHAREDLIBV=libz.$VER$shared_ext + SHAREDLIBM=libz.$VER1$shared_ext + LDSHARED=${LDSHARED-"$cc -dynamiclib -install_name $libdir/$SHAREDLIBM -compatibility_version $VER1 -current_version $VER3"} + if libtool -V 2>&1 | grep Apple > /dev/null; then + AR="libtool" + else + AR="/usr/bin/libtool" + fi + ARFLAGS="-o" ;; + *) LDSHARED=${LDSHARED-"$cc -shared"} ;; + esac +else + # find system name and corresponding cc options + CC=${CC-cc} + gcc=0 + echo ... using $CC >> configure.log + if test -z "$uname"; then + uname=`(uname -sr || echo unknown) 2>/dev/null` + fi + case "$uname" in + HP-UX*) SFLAGS=${CFLAGS-"-O +z"} + CFLAGS=${CFLAGS-"-O"} +# LDSHARED=${LDSHARED-"ld -b +vnocompatwarnings"} + LDSHARED=${LDSHARED-"ld -b"} + case `(uname -m || echo unknown) 2>/dev/null` in + ia64) + shared_ext='.so' + SHAREDLIB='libz.so' ;; + *) + shared_ext='.sl' + SHAREDLIB='libz.sl' ;; + esac ;; + IRIX*) SFLAGS=${CFLAGS-"-ansi -O2 -rpath ."} + CFLAGS=${CFLAGS-"-ansi -O2"} + LDSHARED=${LDSHARED-"cc -shared -Wl,-soname,libz.so.1"} ;; + OSF1\ V4*) SFLAGS=${CFLAGS-"-O -std1"} + CFLAGS=${CFLAGS-"-O -std1"} + LDFLAGS="${LDFLAGS} -Wl,-rpath,." + LDSHARED=${LDSHARED-"cc -shared -Wl,-soname,libz.so -Wl,-msym -Wl,-rpath,$(libdir) -Wl,-set_version,${VER}:1.0"} ;; + OSF1*) SFLAGS=${CFLAGS-"-O -std1"} + CFLAGS=${CFLAGS-"-O -std1"} + LDSHARED=${LDSHARED-"cc -shared -Wl,-soname,libz.so.1"} ;; + QNX*) SFLAGS=${CFLAGS-"-4 -O"} + CFLAGS=${CFLAGS-"-4 -O"} + LDSHARED=${LDSHARED-"cc"} + RANLIB=${RANLIB-"true"} + AR="cc" + ARFLAGS="-A" ;; + SCO_SV\ 3.2*) SFLAGS=${CFLAGS-"-O3 -dy -KPIC "} + CFLAGS=${CFLAGS-"-O3"} + LDSHARED=${LDSHARED-"cc -dy -KPIC -G"} ;; + SunOS\ 5* | solaris*) + LDSHARED=${LDSHARED-"cc -G -h libz$shared_ext.$VER1"} + SFLAGS=${CFLAGS-"-fast -KPIC"} + CFLAGS=${CFLAGS-"-fast"} + if test $build64 -eq 1; then + # old versions of SunPRO/Workshop/Studio don't support -m64, + # but newer ones do. Check for it. + flag64=`$CC -flags | egrep -- '^-m64'` + if test x"$flag64" != x"" ; then + CFLAGS="${CFLAGS} -m64" + SFLAGS="${SFLAGS} -m64" + else + case `(uname -m || echo unknown) 2>/dev/null` in + i86*) + SFLAGS="$SFLAGS -xarch=amd64" + CFLAGS="$CFLAGS -xarch=amd64" ;; + *) + SFLAGS="$SFLAGS -xarch=v9" + CFLAGS="$CFLAGS -xarch=v9" ;; + esac + fi + fi + ;; + SunOS\ 4*) SFLAGS=${CFLAGS-"-O2 -PIC"} + CFLAGS=${CFLAGS-"-O2"} + LDSHARED=${LDSHARED-"ld"} ;; + SunStudio\ 9*) SFLAGS=${CFLAGS-"-fast -xcode=pic32 -xtarget=ultra3 -xarch=v9b"} + CFLAGS=${CFLAGS-"-fast -xtarget=ultra3 -xarch=v9b"} + LDSHARED=${LDSHARED-"cc -xarch=v9b"} ;; + UNIX_System_V\ 4.2.0) + SFLAGS=${CFLAGS-"-KPIC -O"} + CFLAGS=${CFLAGS-"-O"} + LDSHARED=${LDSHARED-"cc -G"} ;; + UNIX_SV\ 4.2MP) + SFLAGS=${CFLAGS-"-Kconform_pic -O"} + CFLAGS=${CFLAGS-"-O"} + LDSHARED=${LDSHARED-"cc -G"} ;; + OpenUNIX\ 5) + SFLAGS=${CFLAGS-"-KPIC -O"} + CFLAGS=${CFLAGS-"-O"} + LDSHARED=${LDSHARED-"cc -G"} ;; + AIX*) # Courtesy of dbakker@arrayasolutions.com + SFLAGS=${CFLAGS-"-O -qmaxmem=8192"} + CFLAGS=${CFLAGS-"-O -qmaxmem=8192"} + LDSHARED=${LDSHARED-"xlc -G"} ;; + # send working options for other systems to zlib@gzip.org + *) SFLAGS=${CFLAGS-"-O"} + CFLAGS=${CFLAGS-"-O"} + LDSHARED=${LDSHARED-"cc -shared"} ;; + esac +fi + +# destination names for shared library if not defined above +SHAREDLIB=${SHAREDLIB-"libz$shared_ext"} +SHAREDLIBV=${SHAREDLIBV-"libz$shared_ext.$VER"} +SHAREDLIBM=${SHAREDLIBM-"libz$shared_ext.$VER1"} + +echo >> configure.log + +# define functions for testing compiler and library characteristics and logging the results + +cat > $test.c <<EOF +#error error +EOF +if ($CC -c $CFLAGS $test.c) 2>/dev/null; then + try() + { + show $* + test "`( $* ) 2>&1 | tee -a configure.log`" = "" + } + echo - using any output from compiler to indicate an error >> configure.log +else +try() +{ + show $* + ( $* ) >> configure.log 2>&1 + ret=$? + if test $ret -ne 0; then + echo "(exit code "$ret")" >> configure.log + fi + return $ret +} +fi + +tryboth() +{ + show $* + got=`( $* ) 2>&1` + ret=$? + printf %s "$got" >> configure.log + if test $ret -ne 0; then + return $ret + fi + test "$got" = "" +} + +cat > $test.c << EOF +int foo() { return 0; } +EOF +echo "Checking for obsessive-compulsive compiler options..." >> configure.log +if try $CC -c $CFLAGS $test.c; then + : +else + echo "Compiler error reporting is too harsh for $0 (perhaps remove -Werror)." | tee -a configure.log + leave 1 +fi + +echo >> configure.log + +# see if shared library build supported +cat > $test.c <<EOF +extern int getchar(); +int hello() {return getchar();} +EOF +if test $shared -eq 1; then + echo Checking for shared library support... | tee -a configure.log + # we must test in two steps (cc then ld), required at least on SunOS 4.x + if try $CC -w -c $SFLAGS $test.c && + try $LDSHARED $SFLAGS -o $test$shared_ext $test.o; then + echo Building shared library $SHAREDLIBV with $CC. | tee -a configure.log + elif test -z "$old_cc" -a -z "$old_cflags"; then + echo No shared library support. | tee -a configure.log + shared=0; + else + echo 'No shared library support; try without defining CC and CFLAGS' | tee -a configure.log + shared=0; + fi +fi +if test $shared -eq 0; then + LDSHARED="$CC" + ALL="static" + TEST="all teststatic" + SHAREDLIB="" + SHAREDLIBV="" + SHAREDLIBM="" + echo Building static library $STATICLIB version $VER with $CC. | tee -a configure.log +else + ALL="static shared" + TEST="all teststatic testshared" +fi + +# check for underscores in external names for use by assembler code +CPP=${CPP-"$CC -E"} +case $CFLAGS in + *ASMV*) + echo >> configure.log + show "$NM $test.o | grep _hello" + if test "`$NM $test.o | grep _hello | tee -a configure.log`" = ""; then + CPP="$CPP -DNO_UNDERLINE" + echo Checking for underline in external names... No. | tee -a configure.log + else + echo Checking for underline in external names... Yes. | tee -a configure.log + fi ;; +esac + +echo >> configure.log + +# check for large file support, and if none, check for fseeko() +cat > $test.c <<EOF +#include <sys/types.h> +off64_t dummy = 0; +EOF +if try $CC -c $CFLAGS -D_LARGEFILE64_SOURCE=1 $test.c; then + CFLAGS="${CFLAGS} -D_LARGEFILE64_SOURCE=1" + SFLAGS="${SFLAGS} -D_LARGEFILE64_SOURCE=1" + ALL="${ALL} all64" + TEST="${TEST} test64" + echo "Checking for off64_t... Yes." | tee -a configure.log + echo "Checking for fseeko... Yes." | tee -a configure.log +else + echo "Checking for off64_t... No." | tee -a configure.log + echo >> configure.log + cat > $test.c <<EOF +#include <stdio.h> +int main(void) { + fseeko(NULL, 0, 0); + return 0; +} +EOF + if try $CC $CFLAGS -o $test $test.c; then + echo "Checking for fseeko... Yes." | tee -a configure.log + else + CFLAGS="${CFLAGS} -DNO_FSEEKO" + SFLAGS="${SFLAGS} -DNO_FSEEKO" + echo "Checking for fseeko... No." | tee -a configure.log + fi +fi + +echo >> configure.log + +# check for strerror() for use by gz* functions +cat > $test.c <<EOF +#include <string.h> +#include <errno.h> +int main() { return strlen(strerror(errno)); } +EOF +if try $CC $CFLAGS -o $test $test.c; then + echo "Checking for strerror... Yes." | tee -a configure.log +else + CFLAGS="${CFLAGS} -DNO_STRERROR" + SFLAGS="${SFLAGS} -DNO_STRERROR" + echo "Checking for strerror... No." | tee -a configure.log +fi + +# copy clean zconf.h for subsequent edits +cp -p zconf.h.in zconf.h + +echo >> configure.log + +# check for unistd.h and save result in zconf.h +cat > $test.c <<EOF +#include <unistd.h> +int main() { return 0; } +EOF +if try $CC -c $CFLAGS $test.c; then + sed < zconf.h "/^#ifdef HAVE_UNISTD_H.* may be/s/def HAVE_UNISTD_H\(.*\) may be/ 1\1 was/" > zconf.temp.h + mv zconf.temp.h zconf.h + echo "Checking for unistd.h... Yes." | tee -a configure.log +else + echo "Checking for unistd.h... No." | tee -a configure.log +fi + +echo >> configure.log + +# check for stdarg.h and save result in zconf.h +cat > $test.c <<EOF +#include <stdarg.h> +int main() { return 0; } +EOF +if try $CC -c $CFLAGS $test.c; then + sed < zconf.h "/^#ifdef HAVE_STDARG_H.* may be/s/def HAVE_STDARG_H\(.*\) may be/ 1\1 was/" > zconf.temp.h + mv zconf.temp.h zconf.h + echo "Checking for stdarg.h... Yes." | tee -a configure.log +else + echo "Checking for stdarg.h... No." | tee -a configure.log +fi + +# if the z_ prefix was requested, save that in zconf.h +if test $zprefix -eq 1; then + sed < zconf.h "/#ifdef Z_PREFIX.* may be/s/def Z_PREFIX\(.*\) may be/ 1\1 was/" > zconf.temp.h + mv zconf.temp.h zconf.h + echo >> configure.log + echo "Using z_ prefix on all symbols." | tee -a configure.log +fi + +# if --solo compilation was requested, save that in zconf.h and remove gz stuff from object lists +if test $solo -eq 1; then + sed '/#define ZCONF_H/a\ +#define Z_SOLO + +' < zconf.h > zconf.temp.h + mv zconf.temp.h zconf.h +OBJC='$(OBJZ)' +PIC_OBJC='$(PIC_OBJZ)' +fi + +# if code coverage testing was requested, use older gcc if defined, e.g. "gcc-4.2" on Mac OS X +if test $cover -eq 1; then + CFLAGS="${CFLAGS} -fprofile-arcs -ftest-coverage" + if test -n "$GCC_CLASSIC"; then + CC=$GCC_CLASSIC + fi +fi + +echo >> configure.log + +# conduct a series of tests to resolve eight possible cases of using "vs" or "s" printf functions +# (using stdarg or not), with or without "n" (proving size of buffer), and with or without a +# return value. The most secure result is vsnprintf() with a return value. snprintf() with a +# return value is secure as well, but then gzprintf() will be limited to 20 arguments. +cat > $test.c <<EOF +#include <stdio.h> +#include <stdarg.h> +#include "zconf.h" +int main() +{ +#ifndef STDC + choke me +#endif + return 0; +} +EOF +if try $CC -c $CFLAGS $test.c; then + echo "Checking whether to use vs[n]printf() or s[n]printf()... using vs[n]printf()." | tee -a configure.log + + echo >> configure.log + cat > $test.c <<EOF +#include <stdio.h> +#include <stdarg.h> +int mytest(const char *fmt, ...) +{ + char buf[20]; + va_list ap; + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + return 0; +} +int main() +{ + return (mytest("Hello%d\n", 1)); +} +EOF + if try $CC $CFLAGS -o $test $test.c; then + echo "Checking for vsnprintf() in stdio.h... Yes." | tee -a configure.log + + echo >> configure.log + cat >$test.c <<EOF +#include <stdio.h> +#include <stdarg.h> +int mytest(const char *fmt, ...) +{ + int n; + char buf[20]; + va_list ap; + va_start(ap, fmt); + n = vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + return n; +} +int main() +{ + return (mytest("Hello%d\n", 1)); +} +EOF + + if try $CC -c $CFLAGS $test.c; then + echo "Checking for return value of vsnprintf()... Yes." | tee -a configure.log + else + CFLAGS="$CFLAGS -DHAS_vsnprintf_void" + SFLAGS="$SFLAGS -DHAS_vsnprintf_void" + echo "Checking for return value of vsnprintf()... No." | tee -a configure.log + echo " WARNING: apparently vsnprintf() does not return a value. zlib" | tee -a configure.log + echo " can build but will be open to possible string-format security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log + fi + else + CFLAGS="$CFLAGS -DNO_vsnprintf" + SFLAGS="$SFLAGS -DNO_vsnprintf" + echo "Checking for vsnprintf() in stdio.h... No." | tee -a configure.log + echo " WARNING: vsnprintf() not found, falling back to vsprintf(). zlib" | tee -a configure.log + echo " can build but will be open to possible buffer-overflow security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log + + echo >> configure.log + cat >$test.c <<EOF +#include <stdio.h> +#include <stdarg.h> +int mytest(const char *fmt, ...) +{ + int n; + char buf[20]; + va_list ap; + va_start(ap, fmt); + n = vsprintf(buf, fmt, ap); + va_end(ap); + return n; +} +int main() +{ + return (mytest("Hello%d\n", 1)); +} +EOF + + if try $CC -c $CFLAGS $test.c; then + echo "Checking for return value of vsprintf()... Yes." | tee -a configure.log + else + CFLAGS="$CFLAGS -DHAS_vsprintf_void" + SFLAGS="$SFLAGS -DHAS_vsprintf_void" + echo "Checking for return value of vsprintf()... No." | tee -a configure.log + echo " WARNING: apparently vsprintf() does not return a value. zlib" | tee -a configure.log + echo " can build but will be open to possible string-format security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log + fi + fi +else + echo "Checking whether to use vs[n]printf() or s[n]printf()... using s[n]printf()." | tee -a configure.log + + echo >> configure.log + cat >$test.c <<EOF +#include <stdio.h> +int mytest() +{ + char buf[20]; + snprintf(buf, sizeof(buf), "%s", "foo"); + return 0; +} +int main() +{ + return (mytest()); +} +EOF + + if try $CC $CFLAGS -o $test $test.c; then + echo "Checking for snprintf() in stdio.h... Yes." | tee -a configure.log + + echo >> configure.log + cat >$test.c <<EOF +#include <stdio.h> +int mytest() +{ + char buf[20]; + return snprintf(buf, sizeof(buf), "%s", "foo"); +} +int main() +{ + return (mytest()); +} +EOF + + if try $CC -c $CFLAGS $test.c; then + echo "Checking for return value of snprintf()... Yes." | tee -a configure.log + else + CFLAGS="$CFLAGS -DHAS_snprintf_void" + SFLAGS="$SFLAGS -DHAS_snprintf_void" + echo "Checking for return value of snprintf()... No." | tee -a configure.log + echo " WARNING: apparently snprintf() does not return a value. zlib" | tee -a configure.log + echo " can build but will be open to possible string-format security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log + fi + else + CFLAGS="$CFLAGS -DNO_snprintf" + SFLAGS="$SFLAGS -DNO_snprintf" + echo "Checking for snprintf() in stdio.h... No." | tee -a configure.log + echo " WARNING: snprintf() not found, falling back to sprintf(). zlib" | tee -a configure.log + echo " can build but will be open to possible buffer-overflow security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log + + echo >> configure.log + cat >$test.c <<EOF +#include <stdio.h> +int mytest() +{ + char buf[20]; + return sprintf(buf, "%s", "foo"); +} +int main() +{ + return (mytest()); +} +EOF + + if try $CC -c $CFLAGS $test.c; then + echo "Checking for return value of sprintf()... Yes." | tee -a configure.log + else + CFLAGS="$CFLAGS -DHAS_sprintf_void" + SFLAGS="$SFLAGS -DHAS_sprintf_void" + echo "Checking for return value of sprintf()... No." | tee -a configure.log + echo " WARNING: apparently sprintf() does not return a value. zlib" | tee -a configure.log + echo " can build but will be open to possible string-format security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log + fi + fi +fi + +# see if we can hide zlib internal symbols that are linked between separate source files +if test "$gcc" -eq 1; then + echo >> configure.log + cat > $test.c <<EOF +#define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) +int ZLIB_INTERNAL foo; +int main() +{ + return 0; +} +EOF + if tryboth $CC -c $CFLAGS $test.c; then + CFLAGS="$CFLAGS -DHAVE_HIDDEN" + SFLAGS="$SFLAGS -DHAVE_HIDDEN" + echo "Checking for attribute(visibility) support... Yes." | tee -a configure.log + else + echo "Checking for attribute(visibility) support... No." | tee -a configure.log + fi +fi + +# show the results in the log +echo >> configure.log +echo ALL = $ALL >> configure.log +echo AR = $AR >> configure.log +echo ARFLAGS = $ARFLAGS >> configure.log +echo CC = $CC >> configure.log +echo CFLAGS = $CFLAGS >> configure.log +echo CPP = $CPP >> configure.log +echo EXE = $EXE >> configure.log +echo LDCONFIG = $LDCONFIG >> configure.log +echo LDFLAGS = $LDFLAGS >> configure.log +echo LDSHARED = $LDSHARED >> configure.log +echo LDSHAREDLIBC = $LDSHAREDLIBC >> configure.log +echo OBJC = $OBJC >> configure.log +echo PIC_OBJC = $PIC_OBJC >> configure.log +echo RANLIB = $RANLIB >> configure.log +echo SFLAGS = $SFLAGS >> configure.log +echo SHAREDLIB = $SHAREDLIB >> configure.log +echo SHAREDLIBM = $SHAREDLIBM >> configure.log +echo SHAREDLIBV = $SHAREDLIBV >> configure.log +echo STATICLIB = $STATICLIB >> configure.log +echo TEST = $TEST >> configure.log +echo VER = $VER >> configure.log +echo Z_U4 = $Z_U4 >> configure.log +echo exec_prefix = $exec_prefix >> configure.log +echo includedir = $includedir >> configure.log +echo libdir = $libdir >> configure.log +echo mandir = $mandir >> configure.log +echo prefix = $prefix >> configure.log +echo sharedlibdir = $sharedlibdir >> configure.log +echo uname = $uname >> configure.log + +# udpate Makefile with the configure results +sed < Makefile.in " +/^CC *=/s#=.*#=$CC# +/^CFLAGS *=/s#=.*#=$CFLAGS# +/^SFLAGS *=/s#=.*#=$SFLAGS# +/^LDFLAGS *=/s#=.*#=$LDFLAGS# +/^LDSHARED *=/s#=.*#=$LDSHARED# +/^CPP *=/s#=.*#=$CPP# +/^STATICLIB *=/s#=.*#=$STATICLIB# +/^SHAREDLIB *=/s#=.*#=$SHAREDLIB# +/^SHAREDLIBV *=/s#=.*#=$SHAREDLIBV# +/^SHAREDLIBM *=/s#=.*#=$SHAREDLIBM# +/^AR *=/s#=.*#=$AR# +/^ARFLAGS *=/s#=.*#=$ARFLAGS# +/^RANLIB *=/s#=.*#=$RANLIB# +/^LDCONFIG *=/s#=.*#=$LDCONFIG# +/^LDSHAREDLIBC *=/s#=.*#=$LDSHAREDLIBC# +/^EXE *=/s#=.*#=$EXE# +/^prefix *=/s#=.*#=$prefix# +/^exec_prefix *=/s#=.*#=$exec_prefix# +/^libdir *=/s#=.*#=$libdir# +/^sharedlibdir *=/s#=.*#=$sharedlibdir# +/^includedir *=/s#=.*#=$includedir# +/^mandir *=/s#=.*#=$mandir# +/^OBJC *=/s#=.*#= $OBJC# +/^PIC_OBJC *=/s#=.*#= $PIC_OBJC# +/^all: */s#:.*#: $ALL# +/^test: */s#:.*#: $TEST# +" > Makefile + +# create zlib.pc with the configure results +sed < zlib.pc.in " +/^CC *=/s#=.*#=$CC# +/^CFLAGS *=/s#=.*#=$CFLAGS# +/^CPP *=/s#=.*#=$CPP# +/^LDSHARED *=/s#=.*#=$LDSHARED# +/^STATICLIB *=/s#=.*#=$STATICLIB# +/^SHAREDLIB *=/s#=.*#=$SHAREDLIB# +/^SHAREDLIBV *=/s#=.*#=$SHAREDLIBV# +/^SHAREDLIBM *=/s#=.*#=$SHAREDLIBM# +/^AR *=/s#=.*#=$AR# +/^ARFLAGS *=/s#=.*#=$ARFLAGS# +/^RANLIB *=/s#=.*#=$RANLIB# +/^EXE *=/s#=.*#=$EXE# +/^prefix *=/s#=.*#=$prefix# +/^exec_prefix *=/s#=.*#=$exec_prefix# +/^libdir *=/s#=.*#=$libdir# +/^sharedlibdir *=/s#=.*#=$sharedlibdir# +/^includedir *=/s#=.*#=$includedir# +/^mandir *=/s#=.*#=$mandir# +/^LDFLAGS *=/s#=.*#=$LDFLAGS# +" | sed -e " +s/\@VERSION\@/$VER/g; +" > zlib.pc + +# done +leave 0 diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/buffer_demo.adb nodejs-0.11.15/deps/zlib/contrib/ada/buffer_demo.adb --- nodejs-0.11.13/deps/zlib/contrib/ada/buffer_demo.adb 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/buffer_demo.adb 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,106 @@ +---------------------------------------------------------------- +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2004 Dmitriy Anisimkov -- +-- -- +-- Open source license information is in the zlib.ads file. -- +---------------------------------------------------------------- +-- +-- $Id: buffer_demo.adb,v 1.3 2004/09/06 06:55:35 vagul Exp $ + +-- This demo program provided by Dr Steve Sangwine <sjs@essex.ac.uk> +-- +-- Demonstration of a problem with Zlib-Ada (already fixed) when a buffer +-- of exactly the correct size is used for decompressed data, and the last +-- few bytes passed in to Zlib are checksum bytes. + +-- This program compresses a string of text, and then decompresses the +-- compressed text into a buffer of the same size as the original text. + +with Ada.Streams; use Ada.Streams; +with Ada.Text_IO; + +with ZLib; use ZLib; + +procedure Buffer_Demo is + EOL : Character renames ASCII.LF; + Text : constant String + := "Four score and seven years ago our fathers brought forth," & EOL & + "upon this continent, a new nation, conceived in liberty," & EOL & + "and dedicated to the proposition that `all men are created equal'."; + + Source : Stream_Element_Array (1 .. Text'Length); + for Source'Address use Text'Address; + +begin + Ada.Text_IO.Put (Text); + Ada.Text_IO.New_Line; + Ada.Text_IO.Put_Line + ("Uncompressed size : " & Positive'Image (Text'Length) & " bytes"); + + declare + Compressed_Data : Stream_Element_Array (1 .. Text'Length); + L : Stream_Element_Offset; + begin + Compress : declare + Compressor : Filter_Type; + I : Stream_Element_Offset; + begin + Deflate_Init (Compressor); + + -- Compress the whole of T at once. + + Translate (Compressor, Source, I, Compressed_Data, L, Finish); + pragma Assert (I = Source'Last); + + Close (Compressor); + + Ada.Text_IO.Put_Line + ("Compressed size : " + & Stream_Element_Offset'Image (L) & " bytes"); + end Compress; + + -- Now we decompress the data, passing short blocks of data to Zlib + -- (because this demonstrates the problem - the last block passed will + -- contain checksum information and there will be no output, only a + -- check inside Zlib that the checksum is correct). + + Decompress : declare + Decompressor : Filter_Type; + + Uncompressed_Data : Stream_Element_Array (1 .. Text'Length); + + Block_Size : constant := 4; + -- This makes sure that the last block contains + -- only Adler checksum data. + + P : Stream_Element_Offset := Compressed_Data'First - 1; + O : Stream_Element_Offset; + begin + Inflate_Init (Decompressor); + + loop + Translate + (Decompressor, + Compressed_Data + (P + 1 .. Stream_Element_Offset'Min (P + Block_Size, L)), + P, + Uncompressed_Data + (Total_Out (Decompressor) + 1 .. Uncompressed_Data'Last), + O, + No_Flush); + + Ada.Text_IO.Put_Line + ("Total in : " & Count'Image (Total_In (Decompressor)) & + ", out : " & Count'Image (Total_Out (Decompressor))); + + exit when P = L; + end loop; + + Ada.Text_IO.New_Line; + Ada.Text_IO.Put_Line + ("Decompressed text matches original text : " + & Boolean'Image (Uncompressed_Data = Source)); + end Decompress; + end; +end Buffer_Demo; diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/mtest.adb nodejs-0.11.15/deps/zlib/contrib/ada/mtest.adb --- nodejs-0.11.13/deps/zlib/contrib/ada/mtest.adb 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/mtest.adb 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,156 @@ +---------------------------------------------------------------- +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2003 Dmitriy Anisimkov -- +-- -- +-- Open source license information is in the zlib.ads file. -- +---------------------------------------------------------------- +-- Continuous test for ZLib multithreading. If the test would fail +-- we should provide thread safe allocation routines for the Z_Stream. +-- +-- $Id: mtest.adb,v 1.4 2004/07/23 07:49:54 vagul Exp $ + +with ZLib; +with Ada.Streams; +with Ada.Numerics.Discrete_Random; +with Ada.Text_IO; +with Ada.Exceptions; +with Ada.Task_Identification; + +procedure MTest is + use Ada.Streams; + use ZLib; + + Stop : Boolean := False; + + pragma Atomic (Stop); + + subtype Visible_Symbols is Stream_Element range 16#20# .. 16#7E#; + + package Random_Elements is + new Ada.Numerics.Discrete_Random (Visible_Symbols); + + task type Test_Task; + + task body Test_Task is + Buffer : Stream_Element_Array (1 .. 100_000); + Gen : Random_Elements.Generator; + + Buffer_First : Stream_Element_Offset; + Compare_First : Stream_Element_Offset; + + Deflate : Filter_Type; + Inflate : Filter_Type; + + procedure Further (Item : in Stream_Element_Array); + + procedure Read_Buffer + (Item : out Ada.Streams.Stream_Element_Array; + Last : out Ada.Streams.Stream_Element_Offset); + + ------------- + -- Further -- + ------------- + + procedure Further (Item : in Stream_Element_Array) is + + procedure Compare (Item : in Stream_Element_Array); + + ------------- + -- Compare -- + ------------- + + procedure Compare (Item : in Stream_Element_Array) is + Next_First : Stream_Element_Offset := Compare_First + Item'Length; + begin + if Buffer (Compare_First .. Next_First - 1) /= Item then + raise Program_Error; + end if; + + Compare_First := Next_First; + end Compare; + + procedure Compare_Write is new ZLib.Write (Write => Compare); + begin + Compare_Write (Inflate, Item, No_Flush); + end Further; + + ----------------- + -- Read_Buffer -- + ----------------- + + procedure Read_Buffer + (Item : out Ada.Streams.Stream_Element_Array; + Last : out Ada.Streams.Stream_Element_Offset) + is + Buff_Diff : Stream_Element_Offset := Buffer'Last - Buffer_First; + Next_First : Stream_Element_Offset; + begin + if Item'Length <= Buff_Diff then + Last := Item'Last; + + Next_First := Buffer_First + Item'Length; + + Item := Buffer (Buffer_First .. Next_First - 1); + + Buffer_First := Next_First; + else + Last := Item'First + Buff_Diff; + Item (Item'First .. Last) := Buffer (Buffer_First .. Buffer'Last); + Buffer_First := Buffer'Last + 1; + end if; + end Read_Buffer; + + procedure Translate is new Generic_Translate + (Data_In => Read_Buffer, + Data_Out => Further); + + begin + Random_Elements.Reset (Gen); + + Buffer := (others => 20); + + Main : loop + for J in Buffer'Range loop + Buffer (J) := Random_Elements.Random (Gen); + + Deflate_Init (Deflate); + Inflate_Init (Inflate); + + Buffer_First := Buffer'First; + Compare_First := Buffer'First; + + Translate (Deflate); + + if Compare_First /= Buffer'Last + 1 then + raise Program_Error; + end if; + + Ada.Text_IO.Put_Line + (Ada.Task_Identification.Image + (Ada.Task_Identification.Current_Task) + & Stream_Element_Offset'Image (J) + & ZLib.Count'Image (Total_Out (Deflate))); + + Close (Deflate); + Close (Inflate); + + exit Main when Stop; + end loop; + end loop Main; + exception + when E : others => + Ada.Text_IO.Put_Line (Ada.Exceptions.Exception_Information (E)); + Stop := True; + end Test_Task; + + Test : array (1 .. 4) of Test_Task; + + pragma Unreferenced (Test); + + Dummy : Character; + +begin + Ada.Text_IO.Get_Immediate (Dummy); + Stop := True; +end MTest; diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/read.adb nodejs-0.11.15/deps/zlib/contrib/ada/read.adb --- nodejs-0.11.13/deps/zlib/contrib/ada/read.adb 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/read.adb 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,156 @@ +---------------------------------------------------------------- +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2003 Dmitriy Anisimkov -- +-- -- +-- Open source license information is in the zlib.ads file. -- +---------------------------------------------------------------- + +-- $Id: read.adb,v 1.8 2004/05/31 10:53:40 vagul Exp $ + +-- Test/demo program for the generic read interface. + +with Ada.Numerics.Discrete_Random; +with Ada.Streams; +with Ada.Text_IO; + +with ZLib; + +procedure Read is + + use Ada.Streams; + + ------------------------------------ + -- Test configuration parameters -- + ------------------------------------ + + File_Size : Stream_Element_Offset := 100_000; + + Continuous : constant Boolean := False; + -- If this constant is True, the test would be repeated again and again, + -- with increment File_Size for every iteration. + + Header : constant ZLib.Header_Type := ZLib.Default; + -- Do not use Header other than Default in ZLib versions 1.1.4 and older. + + Init_Random : constant := 8; + -- We are using the same random sequence, in case of we catch bug, + -- so we would be able to reproduce it. + + -- End -- + + Pack_Size : Stream_Element_Offset; + Offset : Stream_Element_Offset; + + Filter : ZLib.Filter_Type; + + subtype Visible_Symbols + is Stream_Element range 16#20# .. 16#7E#; + + package Random_Elements is new + Ada.Numerics.Discrete_Random (Visible_Symbols); + + Gen : Random_Elements.Generator; + Period : constant Stream_Element_Offset := 200; + -- Period constant variable for random generator not to be very random. + -- Bigger period, harder random. + + Read_Buffer : Stream_Element_Array (1 .. 2048); + Read_First : Stream_Element_Offset; + Read_Last : Stream_Element_Offset; + + procedure Reset; + + procedure Read + (Item : out Stream_Element_Array; + Last : out Stream_Element_Offset); + -- this procedure is for generic instantiation of + -- ZLib.Read + -- reading data from the File_In. + + procedure Read is new ZLib.Read + (Read, + Read_Buffer, + Rest_First => Read_First, + Rest_Last => Read_Last); + + ---------- + -- Read -- + ---------- + + procedure Read + (Item : out Stream_Element_Array; + Last : out Stream_Element_Offset) is + begin + Last := Stream_Element_Offset'Min + (Item'Last, + Item'First + File_Size - Offset); + + for J in Item'First .. Last loop + if J < Item'First + Period then + Item (J) := Random_Elements.Random (Gen); + else + Item (J) := Item (J - Period); + end if; + + Offset := Offset + 1; + end loop; + end Read; + + ----------- + -- Reset -- + ----------- + + procedure Reset is + begin + Random_Elements.Reset (Gen, Init_Random); + Pack_Size := 0; + Offset := 1; + Read_First := Read_Buffer'Last + 1; + Read_Last := Read_Buffer'Last; + end Reset; + +begin + Ada.Text_IO.Put_Line ("ZLib " & ZLib.Version); + + loop + for Level in ZLib.Compression_Level'Range loop + + Ada.Text_IO.Put ("Level =" + & ZLib.Compression_Level'Image (Level)); + + -- Deflate using generic instantiation. + + ZLib.Deflate_Init + (Filter, + Level, + Header => Header); + + Reset; + + Ada.Text_IO.Put + (Stream_Element_Offset'Image (File_Size) & " ->"); + + loop + declare + Buffer : Stream_Element_Array (1 .. 1024); + Last : Stream_Element_Offset; + begin + Read (Filter, Buffer, Last); + + Pack_Size := Pack_Size + Last - Buffer'First + 1; + + exit when Last < Buffer'Last; + end; + end loop; + + Ada.Text_IO.Put_Line (Stream_Element_Offset'Image (Pack_Size)); + + ZLib.Close (Filter); + end loop; + + exit when not Continuous; + + File_Size := File_Size + 1; + end loop; +end Read; diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/readme.txt nodejs-0.11.15/deps/zlib/contrib/ada/readme.txt --- nodejs-0.11.13/deps/zlib/contrib/ada/readme.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/readme.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,65 @@ + ZLib for Ada thick binding (ZLib.Ada) + Release 1.3 + +ZLib.Ada is a thick binding interface to the popular ZLib data +compression library, available at http://www.gzip.org/zlib/. +It provides Ada-style access to the ZLib C library. + + + Here are the main changes since ZLib.Ada 1.2: + +- Attension: ZLib.Read generic routine have a initialization requirement + for Read_Last parameter now. It is a bit incompartible with previous version, + but extends functionality, we could use new parameters Allow_Read_Some and + Flush now. + +- Added Is_Open routines to ZLib and ZLib.Streams packages. + +- Add pragma Assert to check Stream_Element is 8 bit. + +- Fix extraction to buffer with exact known decompressed size. Error reported by + Steve Sangwine. + +- Fix definition of ULong (changed to unsigned_long), fix regression on 64 bits + computers. Patch provided by Pascal Obry. + +- Add Status_Error exception definition. + +- Add pragma Assertion that Ada.Streams.Stream_Element size is 8 bit. + + + How to build ZLib.Ada under GNAT + +You should have the ZLib library already build on your computer, before +building ZLib.Ada. Make the directory of ZLib.Ada sources current and +issue the command: + + gnatmake test -largs -L<directory where libz.a is> -lz + +Or use the GNAT project file build for GNAT 3.15 or later: + + gnatmake -Pzlib.gpr -L<directory where libz.a is> + + + How to build ZLib.Ada under Aonix ObjectAda for Win32 7.2.2 + +1. Make a project with all *.ads and *.adb files from the distribution. +2. Build the libz.a library from the ZLib C sources. +3. Rename libz.a to z.lib. +4. Add the library z.lib to the project. +5. Add the libc.lib library from the ObjectAda distribution to the project. +6. Build the executable using test.adb as a main procedure. + + + How to use ZLib.Ada + +The source files test.adb and read.adb are small demo programs that show +the main functionality of ZLib.Ada. + +The routines from the package specifications are commented. + + +Homepage: http://zlib-ada.sourceforge.net/ +Author: Dmitriy Anisimkov <anisimkov@yahoo.com> + +Contributors: Pascal Obry <pascal@obry.org>, Steve Sangwine <sjs@essex.ac.uk> diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/test.adb nodejs-0.11.15/deps/zlib/contrib/ada/test.adb --- nodejs-0.11.13/deps/zlib/contrib/ada/test.adb 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/test.adb 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,463 @@ +---------------------------------------------------------------- +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2003 Dmitriy Anisimkov -- +-- -- +-- Open source license information is in the zlib.ads file. -- +---------------------------------------------------------------- + +-- $Id: test.adb,v 1.17 2003/08/12 12:13:30 vagul Exp $ + +-- The program has a few aims. +-- 1. Test ZLib.Ada95 thick binding functionality. +-- 2. Show the example of use main functionality of the ZLib.Ada95 binding. +-- 3. Build this program automatically compile all ZLib.Ada95 packages under +-- GNAT Ada95 compiler. + +with ZLib.Streams; +with Ada.Streams.Stream_IO; +with Ada.Numerics.Discrete_Random; + +with Ada.Text_IO; + +with Ada.Calendar; + +procedure Test is + + use Ada.Streams; + use Stream_IO; + + ------------------------------------ + -- Test configuration parameters -- + ------------------------------------ + + File_Size : Count := 100_000; + Continuous : constant Boolean := False; + + Header : constant ZLib.Header_Type := ZLib.Default; + -- ZLib.None; + -- ZLib.Auto; + -- ZLib.GZip; + -- Do not use Header other then Default in ZLib versions 1.1.4 + -- and older. + + Strategy : constant ZLib.Strategy_Type := ZLib.Default_Strategy; + Init_Random : constant := 10; + + -- End -- + + In_File_Name : constant String := "testzlib.in"; + -- Name of the input file + + Z_File_Name : constant String := "testzlib.zlb"; + -- Name of the compressed file. + + Out_File_Name : constant String := "testzlib.out"; + -- Name of the decompressed file. + + File_In : File_Type; + File_Out : File_Type; + File_Back : File_Type; + File_Z : ZLib.Streams.Stream_Type; + + Filter : ZLib.Filter_Type; + + Time_Stamp : Ada.Calendar.Time; + + procedure Generate_File; + -- Generate file of spetsified size with some random data. + -- The random data is repeatable, for the good compression. + + procedure Compare_Streams + (Left, Right : in out Root_Stream_Type'Class); + -- The procedure compearing data in 2 streams. + -- It is for compare data before and after compression/decompression. + + procedure Compare_Files (Left, Right : String); + -- Compare files. Based on the Compare_Streams. + + procedure Copy_Streams + (Source, Target : in out Root_Stream_Type'Class; + Buffer_Size : in Stream_Element_Offset := 1024); + -- Copying data from one stream to another. It is for test stream + -- interface of the library. + + procedure Data_In + (Item : out Stream_Element_Array; + Last : out Stream_Element_Offset); + -- this procedure is for generic instantiation of + -- ZLib.Generic_Translate. + -- reading data from the File_In. + + procedure Data_Out (Item : in Stream_Element_Array); + -- this procedure is for generic instantiation of + -- ZLib.Generic_Translate. + -- writing data to the File_Out. + + procedure Stamp; + -- Store the timestamp to the local variable. + + procedure Print_Statistic (Msg : String; Data_Size : ZLib.Count); + -- Print the time statistic with the message. + + procedure Translate is new ZLib.Generic_Translate + (Data_In => Data_In, + Data_Out => Data_Out); + -- This procedure is moving data from File_In to File_Out + -- with compression or decompression, depend on initialization of + -- Filter parameter. + + ------------------- + -- Compare_Files -- + ------------------- + + procedure Compare_Files (Left, Right : String) is + Left_File, Right_File : File_Type; + begin + Open (Left_File, In_File, Left); + Open (Right_File, In_File, Right); + Compare_Streams (Stream (Left_File).all, Stream (Right_File).all); + Close (Left_File); + Close (Right_File); + end Compare_Files; + + --------------------- + -- Compare_Streams -- + --------------------- + + procedure Compare_Streams + (Left, Right : in out Ada.Streams.Root_Stream_Type'Class) + is + Left_Buffer, Right_Buffer : Stream_Element_Array (0 .. 16#FFF#); + Left_Last, Right_Last : Stream_Element_Offset; + begin + loop + Read (Left, Left_Buffer, Left_Last); + Read (Right, Right_Buffer, Right_Last); + + if Left_Last /= Right_Last then + Ada.Text_IO.Put_Line ("Compare error :" + & Stream_Element_Offset'Image (Left_Last) + & " /= " + & Stream_Element_Offset'Image (Right_Last)); + + raise Constraint_Error; + + elsif Left_Buffer (0 .. Left_Last) + /= Right_Buffer (0 .. Right_Last) + then + Ada.Text_IO.Put_Line ("ERROR: IN and OUT files is not equal."); + raise Constraint_Error; + + end if; + + exit when Left_Last < Left_Buffer'Last; + end loop; + end Compare_Streams; + + ------------------ + -- Copy_Streams -- + ------------------ + + procedure Copy_Streams + (Source, Target : in out Ada.Streams.Root_Stream_Type'Class; + Buffer_Size : in Stream_Element_Offset := 1024) + is + Buffer : Stream_Element_Array (1 .. Buffer_Size); + Last : Stream_Element_Offset; + begin + loop + Read (Source, Buffer, Last); + Write (Target, Buffer (1 .. Last)); + + exit when Last < Buffer'Last; + end loop; + end Copy_Streams; + + ------------- + -- Data_In -- + ------------- + + procedure Data_In + (Item : out Stream_Element_Array; + Last : out Stream_Element_Offset) is + begin + Read (File_In, Item, Last); + end Data_In; + + -------------- + -- Data_Out -- + -------------- + + procedure Data_Out (Item : in Stream_Element_Array) is + begin + Write (File_Out, Item); + end Data_Out; + + ------------------- + -- Generate_File -- + ------------------- + + procedure Generate_File is + subtype Visible_Symbols is Stream_Element range 16#20# .. 16#7E#; + + package Random_Elements is + new Ada.Numerics.Discrete_Random (Visible_Symbols); + + Gen : Random_Elements.Generator; + Buffer : Stream_Element_Array := (1 .. 77 => 16#20#) & 10; + + Buffer_Count : constant Count := File_Size / Buffer'Length; + -- Number of same buffers in the packet. + + Density : constant Count := 30; -- from 0 to Buffer'Length - 2; + + procedure Fill_Buffer (J, D : in Count); + -- Change the part of the buffer. + + ----------------- + -- Fill_Buffer -- + ----------------- + + procedure Fill_Buffer (J, D : in Count) is + begin + for K in 0 .. D loop + Buffer + (Stream_Element_Offset ((J + K) mod (Buffer'Length - 1) + 1)) + := Random_Elements.Random (Gen); + + end loop; + end Fill_Buffer; + + begin + Random_Elements.Reset (Gen, Init_Random); + + Create (File_In, Out_File, In_File_Name); + + Fill_Buffer (1, Buffer'Length - 2); + + for J in 1 .. Buffer_Count loop + Write (File_In, Buffer); + + Fill_Buffer (J, Density); + end loop; + + -- fill remain size. + + Write + (File_In, + Buffer + (1 .. Stream_Element_Offset + (File_Size - Buffer'Length * Buffer_Count))); + + Flush (File_In); + Close (File_In); + end Generate_File; + + --------------------- + -- Print_Statistic -- + --------------------- + + procedure Print_Statistic (Msg : String; Data_Size : ZLib.Count) is + use Ada.Calendar; + use Ada.Text_IO; + + package Count_IO is new Integer_IO (ZLib.Count); + + Curr_Dur : Duration := Clock - Time_Stamp; + begin + Put (Msg); + + Set_Col (20); + Ada.Text_IO.Put ("size ="); + + Count_IO.Put + (Data_Size, + Width => Stream_IO.Count'Image (File_Size)'Length); + + Put_Line (" duration =" & Duration'Image (Curr_Dur)); + end Print_Statistic; + + ----------- + -- Stamp -- + ----------- + + procedure Stamp is + begin + Time_Stamp := Ada.Calendar.Clock; + end Stamp; + +begin + Ada.Text_IO.Put_Line ("ZLib " & ZLib.Version); + + loop + Generate_File; + + for Level in ZLib.Compression_Level'Range loop + + Ada.Text_IO.Put_Line ("Level =" + & ZLib.Compression_Level'Image (Level)); + + -- Test generic interface. + Open (File_In, In_File, In_File_Name); + Create (File_Out, Out_File, Z_File_Name); + + Stamp; + + -- Deflate using generic instantiation. + + ZLib.Deflate_Init + (Filter => Filter, + Level => Level, + Strategy => Strategy, + Header => Header); + + Translate (Filter); + Print_Statistic ("Generic compress", ZLib.Total_Out (Filter)); + ZLib.Close (Filter); + + Close (File_In); + Close (File_Out); + + Open (File_In, In_File, Z_File_Name); + Create (File_Out, Out_File, Out_File_Name); + + Stamp; + + -- Inflate using generic instantiation. + + ZLib.Inflate_Init (Filter, Header => Header); + + Translate (Filter); + Print_Statistic ("Generic decompress", ZLib.Total_Out (Filter)); + + ZLib.Close (Filter); + + Close (File_In); + Close (File_Out); + + Compare_Files (In_File_Name, Out_File_Name); + + -- Test stream interface. + + -- Compress to the back stream. + + Open (File_In, In_File, In_File_Name); + Create (File_Back, Out_File, Z_File_Name); + + Stamp; + + ZLib.Streams.Create + (Stream => File_Z, + Mode => ZLib.Streams.Out_Stream, + Back => ZLib.Streams.Stream_Access + (Stream (File_Back)), + Back_Compressed => True, + Level => Level, + Strategy => Strategy, + Header => Header); + + Copy_Streams + (Source => Stream (File_In).all, + Target => File_Z); + + -- Flushing internal buffers to the back stream. + + ZLib.Streams.Flush (File_Z, ZLib.Finish); + + Print_Statistic ("Write compress", + ZLib.Streams.Write_Total_Out (File_Z)); + + ZLib.Streams.Close (File_Z); + + Close (File_In); + Close (File_Back); + + -- Compare reading from original file and from + -- decompression stream. + + Open (File_In, In_File, In_File_Name); + Open (File_Back, In_File, Z_File_Name); + + ZLib.Streams.Create + (Stream => File_Z, + Mode => ZLib.Streams.In_Stream, + Back => ZLib.Streams.Stream_Access + (Stream (File_Back)), + Back_Compressed => True, + Header => Header); + + Stamp; + Compare_Streams (Stream (File_In).all, File_Z); + + Print_Statistic ("Read decompress", + ZLib.Streams.Read_Total_Out (File_Z)); + + ZLib.Streams.Close (File_Z); + Close (File_In); + Close (File_Back); + + -- Compress by reading from compression stream. + + Open (File_Back, In_File, In_File_Name); + Create (File_Out, Out_File, Z_File_Name); + + ZLib.Streams.Create + (Stream => File_Z, + Mode => ZLib.Streams.In_Stream, + Back => ZLib.Streams.Stream_Access + (Stream (File_Back)), + Back_Compressed => False, + Level => Level, + Strategy => Strategy, + Header => Header); + + Stamp; + Copy_Streams + (Source => File_Z, + Target => Stream (File_Out).all); + + Print_Statistic ("Read compress", + ZLib.Streams.Read_Total_Out (File_Z)); + + ZLib.Streams.Close (File_Z); + + Close (File_Out); + Close (File_Back); + + -- Decompress to decompression stream. + + Open (File_In, In_File, Z_File_Name); + Create (File_Back, Out_File, Out_File_Name); + + ZLib.Streams.Create + (Stream => File_Z, + Mode => ZLib.Streams.Out_Stream, + Back => ZLib.Streams.Stream_Access + (Stream (File_Back)), + Back_Compressed => False, + Header => Header); + + Stamp; + + Copy_Streams + (Source => Stream (File_In).all, + Target => File_Z); + + Print_Statistic ("Write decompress", + ZLib.Streams.Write_Total_Out (File_Z)); + + ZLib.Streams.Close (File_Z); + Close (File_In); + Close (File_Back); + + Compare_Files (In_File_Name, Out_File_Name); + end loop; + + Ada.Text_IO.Put_Line (Count'Image (File_Size) & " Ok."); + + exit when not Continuous; + + File_Size := File_Size + 1; + end loop; +end Test; diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/zlib.adb nodejs-0.11.15/deps/zlib/contrib/ada/zlib.adb --- nodejs-0.11.13/deps/zlib/contrib/ada/zlib.adb 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/zlib.adb 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,701 @@ +---------------------------------------------------------------- +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2004 Dmitriy Anisimkov -- +-- -- +-- Open source license information is in the zlib.ads file. -- +---------------------------------------------------------------- + +-- $Id: zlib.adb,v 1.31 2004/09/06 06:53:19 vagul Exp $ + +with Ada.Exceptions; +with Ada.Unchecked_Conversion; +with Ada.Unchecked_Deallocation; + +with Interfaces.C.Strings; + +with ZLib.Thin; + +package body ZLib is + + use type Thin.Int; + + type Z_Stream is new Thin.Z_Stream; + + type Return_Code_Enum is + (OK, + STREAM_END, + NEED_DICT, + ERRNO, + STREAM_ERROR, + DATA_ERROR, + MEM_ERROR, + BUF_ERROR, + VERSION_ERROR); + + type Flate_Step_Function is access + function (Strm : in Thin.Z_Streamp; Flush : in Thin.Int) return Thin.Int; + pragma Convention (C, Flate_Step_Function); + + type Flate_End_Function is access + function (Ctrm : in Thin.Z_Streamp) return Thin.Int; + pragma Convention (C, Flate_End_Function); + + type Flate_Type is record + Step : Flate_Step_Function; + Done : Flate_End_Function; + end record; + + subtype Footer_Array is Stream_Element_Array (1 .. 8); + + Simple_GZip_Header : constant Stream_Element_Array (1 .. 10) + := (16#1f#, 16#8b#, -- Magic header + 16#08#, -- Z_DEFLATED + 16#00#, -- Flags + 16#00#, 16#00#, 16#00#, 16#00#, -- Time + 16#00#, -- XFlags + 16#03# -- OS code + ); + -- The simplest gzip header is not for informational, but just for + -- gzip format compatibility. + -- Note that some code below is using assumption + -- Simple_GZip_Header'Last > Footer_Array'Last, so do not make + -- Simple_GZip_Header'Last <= Footer_Array'Last. + + Return_Code : constant array (Thin.Int range <>) of Return_Code_Enum + := (0 => OK, + 1 => STREAM_END, + 2 => NEED_DICT, + -1 => ERRNO, + -2 => STREAM_ERROR, + -3 => DATA_ERROR, + -4 => MEM_ERROR, + -5 => BUF_ERROR, + -6 => VERSION_ERROR); + + Flate : constant array (Boolean) of Flate_Type + := (True => (Step => Thin.Deflate'Access, + Done => Thin.DeflateEnd'Access), + False => (Step => Thin.Inflate'Access, + Done => Thin.InflateEnd'Access)); + + Flush_Finish : constant array (Boolean) of Flush_Mode + := (True => Finish, False => No_Flush); + + procedure Raise_Error (Stream : in Z_Stream); + pragma Inline (Raise_Error); + + procedure Raise_Error (Message : in String); + pragma Inline (Raise_Error); + + procedure Check_Error (Stream : in Z_Stream; Code : in Thin.Int); + + procedure Free is new Ada.Unchecked_Deallocation + (Z_Stream, Z_Stream_Access); + + function To_Thin_Access is new Ada.Unchecked_Conversion + (Z_Stream_Access, Thin.Z_Streamp); + + procedure Translate_GZip + (Filter : in out Filter_Type; + In_Data : in Ada.Streams.Stream_Element_Array; + In_Last : out Ada.Streams.Stream_Element_Offset; + Out_Data : out Ada.Streams.Stream_Element_Array; + Out_Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode); + -- Separate translate routine for make gzip header. + + procedure Translate_Auto + (Filter : in out Filter_Type; + In_Data : in Ada.Streams.Stream_Element_Array; + In_Last : out Ada.Streams.Stream_Element_Offset; + Out_Data : out Ada.Streams.Stream_Element_Array; + Out_Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode); + -- translate routine without additional headers. + + ----------------- + -- Check_Error -- + ----------------- + + procedure Check_Error (Stream : in Z_Stream; Code : in Thin.Int) is + use type Thin.Int; + begin + if Code /= Thin.Z_OK then + Raise_Error + (Return_Code_Enum'Image (Return_Code (Code)) + & ": " & Last_Error_Message (Stream)); + end if; + end Check_Error; + + ----------- + -- Close -- + ----------- + + procedure Close + (Filter : in out Filter_Type; + Ignore_Error : in Boolean := False) + is + Code : Thin.Int; + begin + if not Ignore_Error and then not Is_Open (Filter) then + raise Status_Error; + end if; + + Code := Flate (Filter.Compression).Done (To_Thin_Access (Filter.Strm)); + + if Ignore_Error or else Code = Thin.Z_OK then + Free (Filter.Strm); + else + declare + Error_Message : constant String + := Last_Error_Message (Filter.Strm.all); + begin + Free (Filter.Strm); + Ada.Exceptions.Raise_Exception + (ZLib_Error'Identity, + Return_Code_Enum'Image (Return_Code (Code)) + & ": " & Error_Message); + end; + end if; + end Close; + + ----------- + -- CRC32 -- + ----------- + + function CRC32 + (CRC : in Unsigned_32; + Data : in Ada.Streams.Stream_Element_Array) + return Unsigned_32 + is + use Thin; + begin + return Unsigned_32 (crc32 (ULong (CRC), + Data'Address, + Data'Length)); + end CRC32; + + procedure CRC32 + (CRC : in out Unsigned_32; + Data : in Ada.Streams.Stream_Element_Array) is + begin + CRC := CRC32 (CRC, Data); + end CRC32; + + ------------------ + -- Deflate_Init -- + ------------------ + + procedure Deflate_Init + (Filter : in out Filter_Type; + Level : in Compression_Level := Default_Compression; + Strategy : in Strategy_Type := Default_Strategy; + Method : in Compression_Method := Deflated; + Window_Bits : in Window_Bits_Type := Default_Window_Bits; + Memory_Level : in Memory_Level_Type := Default_Memory_Level; + Header : in Header_Type := Default) + is + use type Thin.Int; + Win_Bits : Thin.Int := Thin.Int (Window_Bits); + begin + if Is_Open (Filter) then + raise Status_Error; + end if; + + -- We allow ZLib to make header only in case of default header type. + -- Otherwise we would either do header by ourselfs, or do not do + -- header at all. + + if Header = None or else Header = GZip then + Win_Bits := -Win_Bits; + end if; + + -- For the GZip CRC calculation and make headers. + + if Header = GZip then + Filter.CRC := 0; + Filter.Offset := Simple_GZip_Header'First; + else + Filter.Offset := Simple_GZip_Header'Last + 1; + end if; + + Filter.Strm := new Z_Stream; + Filter.Compression := True; + Filter.Stream_End := False; + Filter.Header := Header; + + if Thin.Deflate_Init + (To_Thin_Access (Filter.Strm), + Level => Thin.Int (Level), + method => Thin.Int (Method), + windowBits => Win_Bits, + memLevel => Thin.Int (Memory_Level), + strategy => Thin.Int (Strategy)) /= Thin.Z_OK + then + Raise_Error (Filter.Strm.all); + end if; + end Deflate_Init; + + ----------- + -- Flush -- + ----------- + + procedure Flush + (Filter : in out Filter_Type; + Out_Data : out Ada.Streams.Stream_Element_Array; + Out_Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode) + is + No_Data : Stream_Element_Array := (1 .. 0 => 0); + Last : Stream_Element_Offset; + begin + Translate (Filter, No_Data, Last, Out_Data, Out_Last, Flush); + end Flush; + + ----------------------- + -- Generic_Translate -- + ----------------------- + + procedure Generic_Translate + (Filter : in out ZLib.Filter_Type; + In_Buffer_Size : in Integer := Default_Buffer_Size; + Out_Buffer_Size : in Integer := Default_Buffer_Size) + is + In_Buffer : Stream_Element_Array + (1 .. Stream_Element_Offset (In_Buffer_Size)); + Out_Buffer : Stream_Element_Array + (1 .. Stream_Element_Offset (Out_Buffer_Size)); + Last : Stream_Element_Offset; + In_Last : Stream_Element_Offset; + In_First : Stream_Element_Offset; + Out_Last : Stream_Element_Offset; + begin + Main : loop + Data_In (In_Buffer, Last); + + In_First := In_Buffer'First; + + loop + Translate + (Filter => Filter, + In_Data => In_Buffer (In_First .. Last), + In_Last => In_Last, + Out_Data => Out_Buffer, + Out_Last => Out_Last, + Flush => Flush_Finish (Last < In_Buffer'First)); + + if Out_Buffer'First <= Out_Last then + Data_Out (Out_Buffer (Out_Buffer'First .. Out_Last)); + end if; + + exit Main when Stream_End (Filter); + + -- The end of in buffer. + + exit when In_Last = Last; + + In_First := In_Last + 1; + end loop; + end loop Main; + + end Generic_Translate; + + ------------------ + -- Inflate_Init -- + ------------------ + + procedure Inflate_Init + (Filter : in out Filter_Type; + Window_Bits : in Window_Bits_Type := Default_Window_Bits; + Header : in Header_Type := Default) + is + use type Thin.Int; + Win_Bits : Thin.Int := Thin.Int (Window_Bits); + + procedure Check_Version; + -- Check the latest header types compatibility. + + procedure Check_Version is + begin + if Version <= "1.1.4" then + Raise_Error + ("Inflate header type " & Header_Type'Image (Header) + & " incompatible with ZLib version " & Version); + end if; + end Check_Version; + + begin + if Is_Open (Filter) then + raise Status_Error; + end if; + + case Header is + when None => + Check_Version; + + -- Inflate data without headers determined + -- by negative Win_Bits. + + Win_Bits := -Win_Bits; + when GZip => + Check_Version; + + -- Inflate gzip data defined by flag 16. + + Win_Bits := Win_Bits + 16; + when Auto => + Check_Version; + + -- Inflate with automatic detection + -- of gzip or native header defined by flag 32. + + Win_Bits := Win_Bits + 32; + when Default => null; + end case; + + Filter.Strm := new Z_Stream; + Filter.Compression := False; + Filter.Stream_End := False; + Filter.Header := Header; + + if Thin.Inflate_Init + (To_Thin_Access (Filter.Strm), Win_Bits) /= Thin.Z_OK + then + Raise_Error (Filter.Strm.all); + end if; + end Inflate_Init; + + ------------- + -- Is_Open -- + ------------- + + function Is_Open (Filter : in Filter_Type) return Boolean is + begin + return Filter.Strm /= null; + end Is_Open; + + ----------------- + -- Raise_Error -- + ----------------- + + procedure Raise_Error (Message : in String) is + begin + Ada.Exceptions.Raise_Exception (ZLib_Error'Identity, Message); + end Raise_Error; + + procedure Raise_Error (Stream : in Z_Stream) is + begin + Raise_Error (Last_Error_Message (Stream)); + end Raise_Error; + + ---------- + -- Read -- + ---------- + + procedure Read + (Filter : in out Filter_Type; + Item : out Ada.Streams.Stream_Element_Array; + Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode := No_Flush) + is + In_Last : Stream_Element_Offset; + Item_First : Ada.Streams.Stream_Element_Offset := Item'First; + V_Flush : Flush_Mode := Flush; + + begin + pragma Assert (Rest_First in Buffer'First .. Buffer'Last + 1); + pragma Assert (Rest_Last in Buffer'First - 1 .. Buffer'Last); + + loop + if Rest_Last = Buffer'First - 1 then + V_Flush := Finish; + + elsif Rest_First > Rest_Last then + Read (Buffer, Rest_Last); + Rest_First := Buffer'First; + + if Rest_Last < Buffer'First then + V_Flush := Finish; + end if; + end if; + + Translate + (Filter => Filter, + In_Data => Buffer (Rest_First .. Rest_Last), + In_Last => In_Last, + Out_Data => Item (Item_First .. Item'Last), + Out_Last => Last, + Flush => V_Flush); + + Rest_First := In_Last + 1; + + exit when Stream_End (Filter) + or else Last = Item'Last + or else (Last >= Item'First and then Allow_Read_Some); + + Item_First := Last + 1; + end loop; + end Read; + + ---------------- + -- Stream_End -- + ---------------- + + function Stream_End (Filter : in Filter_Type) return Boolean is + begin + if Filter.Header = GZip and Filter.Compression then + return Filter.Stream_End + and then Filter.Offset = Footer_Array'Last + 1; + else + return Filter.Stream_End; + end if; + end Stream_End; + + -------------- + -- Total_In -- + -------------- + + function Total_In (Filter : in Filter_Type) return Count is + begin + return Count (Thin.Total_In (To_Thin_Access (Filter.Strm).all)); + end Total_In; + + --------------- + -- Total_Out -- + --------------- + + function Total_Out (Filter : in Filter_Type) return Count is + begin + return Count (Thin.Total_Out (To_Thin_Access (Filter.Strm).all)); + end Total_Out; + + --------------- + -- Translate -- + --------------- + + procedure Translate + (Filter : in out Filter_Type; + In_Data : in Ada.Streams.Stream_Element_Array; + In_Last : out Ada.Streams.Stream_Element_Offset; + Out_Data : out Ada.Streams.Stream_Element_Array; + Out_Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode) is + begin + if Filter.Header = GZip and then Filter.Compression then + Translate_GZip + (Filter => Filter, + In_Data => In_Data, + In_Last => In_Last, + Out_Data => Out_Data, + Out_Last => Out_Last, + Flush => Flush); + else + Translate_Auto + (Filter => Filter, + In_Data => In_Data, + In_Last => In_Last, + Out_Data => Out_Data, + Out_Last => Out_Last, + Flush => Flush); + end if; + end Translate; + + -------------------- + -- Translate_Auto -- + -------------------- + + procedure Translate_Auto + (Filter : in out Filter_Type; + In_Data : in Ada.Streams.Stream_Element_Array; + In_Last : out Ada.Streams.Stream_Element_Offset; + Out_Data : out Ada.Streams.Stream_Element_Array; + Out_Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode) + is + use type Thin.Int; + Code : Thin.Int; + + begin + if not Is_Open (Filter) then + raise Status_Error; + end if; + + if Out_Data'Length = 0 and then In_Data'Length = 0 then + raise Constraint_Error; + end if; + + Set_Out (Filter.Strm.all, Out_Data'Address, Out_Data'Length); + Set_In (Filter.Strm.all, In_Data'Address, In_Data'Length); + + Code := Flate (Filter.Compression).Step + (To_Thin_Access (Filter.Strm), + Thin.Int (Flush)); + + if Code = Thin.Z_STREAM_END then + Filter.Stream_End := True; + else + Check_Error (Filter.Strm.all, Code); + end if; + + In_Last := In_Data'Last + - Stream_Element_Offset (Avail_In (Filter.Strm.all)); + Out_Last := Out_Data'Last + - Stream_Element_Offset (Avail_Out (Filter.Strm.all)); + end Translate_Auto; + + -------------------- + -- Translate_GZip -- + -------------------- + + procedure Translate_GZip + (Filter : in out Filter_Type; + In_Data : in Ada.Streams.Stream_Element_Array; + In_Last : out Ada.Streams.Stream_Element_Offset; + Out_Data : out Ada.Streams.Stream_Element_Array; + Out_Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode) + is + Out_First : Stream_Element_Offset; + + procedure Add_Data (Data : in Stream_Element_Array); + -- Add data to stream from the Filter.Offset till necessary, + -- used for add gzip headr/footer. + + procedure Put_32 + (Item : in out Stream_Element_Array; + Data : in Unsigned_32); + pragma Inline (Put_32); + + -------------- + -- Add_Data -- + -------------- + + procedure Add_Data (Data : in Stream_Element_Array) is + Data_First : Stream_Element_Offset renames Filter.Offset; + Data_Last : Stream_Element_Offset; + Data_Len : Stream_Element_Offset; -- -1 + Out_Len : Stream_Element_Offset; -- -1 + begin + Out_First := Out_Last + 1; + + if Data_First > Data'Last then + return; + end if; + + Data_Len := Data'Last - Data_First; + Out_Len := Out_Data'Last - Out_First; + + if Data_Len <= Out_Len then + Out_Last := Out_First + Data_Len; + Data_Last := Data'Last; + else + Out_Last := Out_Data'Last; + Data_Last := Data_First + Out_Len; + end if; + + Out_Data (Out_First .. Out_Last) := Data (Data_First .. Data_Last); + + Data_First := Data_Last + 1; + Out_First := Out_Last + 1; + end Add_Data; + + ------------ + -- Put_32 -- + ------------ + + procedure Put_32 + (Item : in out Stream_Element_Array; + Data : in Unsigned_32) + is + D : Unsigned_32 := Data; + begin + for J in Item'First .. Item'First + 3 loop + Item (J) := Stream_Element (D and 16#FF#); + D := Shift_Right (D, 8); + end loop; + end Put_32; + + begin + Out_Last := Out_Data'First - 1; + + if not Filter.Stream_End then + Add_Data (Simple_GZip_Header); + + Translate_Auto + (Filter => Filter, + In_Data => In_Data, + In_Last => In_Last, + Out_Data => Out_Data (Out_First .. Out_Data'Last), + Out_Last => Out_Last, + Flush => Flush); + + CRC32 (Filter.CRC, In_Data (In_Data'First .. In_Last)); + end if; + + if Filter.Stream_End and then Out_Last <= Out_Data'Last then + -- This detection method would work only when + -- Simple_GZip_Header'Last > Footer_Array'Last + + if Filter.Offset = Simple_GZip_Header'Last + 1 then + Filter.Offset := Footer_Array'First; + end if; + + declare + Footer : Footer_Array; + begin + Put_32 (Footer, Filter.CRC); + Put_32 (Footer (Footer'First + 4 .. Footer'Last), + Unsigned_32 (Total_In (Filter))); + Add_Data (Footer); + end; + end if; + end Translate_GZip; + + ------------- + -- Version -- + ------------- + + function Version return String is + begin + return Interfaces.C.Strings.Value (Thin.zlibVersion); + end Version; + + ----------- + -- Write -- + ----------- + + procedure Write + (Filter : in out Filter_Type; + Item : in Ada.Streams.Stream_Element_Array; + Flush : in Flush_Mode := No_Flush) + is + Buffer : Stream_Element_Array (1 .. Buffer_Size); + In_Last : Stream_Element_Offset; + Out_Last : Stream_Element_Offset; + In_First : Stream_Element_Offset := Item'First; + begin + if Item'Length = 0 and Flush = No_Flush then + return; + end if; + + loop + Translate + (Filter => Filter, + In_Data => Item (In_First .. Item'Last), + In_Last => In_Last, + Out_Data => Buffer, + Out_Last => Out_Last, + Flush => Flush); + + if Out_Last >= Buffer'First then + Write (Buffer (1 .. Out_Last)); + end if; + + exit when In_Last = Item'Last or Stream_End (Filter); + + In_First := In_Last + 1; + end loop; + end Write; + +end ZLib; diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/zlib.ads nodejs-0.11.15/deps/zlib/contrib/ada/zlib.ads --- nodejs-0.11.13/deps/zlib/contrib/ada/zlib.ads 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/zlib.ads 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,328 @@ +------------------------------------------------------------------------------ +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2004 Dmitriy Anisimkov -- +-- -- +-- This library is free software; you can redistribute it and/or modify -- +-- it under the terms of the GNU General Public License as published by -- +-- the Free Software Foundation; either version 2 of the License, or (at -- +-- your option) any later version. -- +-- -- +-- This library is distributed in the hope that it will be useful, but -- +-- WITHOUT ANY WARRANTY; without even the implied warranty of -- +-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -- +-- General Public License for more details. -- +-- -- +-- You should have received a copy of the GNU General Public License -- +-- along with this library; if not, write to the Free Software Foundation, -- +-- Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -- +-- -- +-- As a special exception, if other files instantiate generics from this -- +-- unit, or you link this unit with other files to produce an executable, -- +-- this unit does not by itself cause the resulting executable to be -- +-- covered by the GNU General Public License. This exception does not -- +-- however invalidate any other reasons why the executable file might be -- +-- covered by the GNU Public License. -- +------------------------------------------------------------------------------ + +-- $Id: zlib.ads,v 1.26 2004/09/06 06:53:19 vagul Exp $ + +with Ada.Streams; + +with Interfaces; + +package ZLib is + + ZLib_Error : exception; + Status_Error : exception; + + type Compression_Level is new Integer range -1 .. 9; + + type Flush_Mode is private; + + type Compression_Method is private; + + type Window_Bits_Type is new Integer range 8 .. 15; + + type Memory_Level_Type is new Integer range 1 .. 9; + + type Unsigned_32 is new Interfaces.Unsigned_32; + + type Strategy_Type is private; + + type Header_Type is (None, Auto, Default, GZip); + -- Header type usage have a some limitation for inflate. + -- See comment for Inflate_Init. + + subtype Count is Ada.Streams.Stream_Element_Count; + + Default_Memory_Level : constant Memory_Level_Type := 8; + Default_Window_Bits : constant Window_Bits_Type := 15; + + ---------------------------------- + -- Compression method constants -- + ---------------------------------- + + Deflated : constant Compression_Method; + -- Only one method allowed in this ZLib version + + --------------------------------- + -- Compression level constants -- + --------------------------------- + + No_Compression : constant Compression_Level := 0; + Best_Speed : constant Compression_Level := 1; + Best_Compression : constant Compression_Level := 9; + Default_Compression : constant Compression_Level := -1; + + -------------------------- + -- Flush mode constants -- + -------------------------- + + No_Flush : constant Flush_Mode; + -- Regular way for compression, no flush + + Partial_Flush : constant Flush_Mode; + -- Will be removed, use Z_SYNC_FLUSH instead + + Sync_Flush : constant Flush_Mode; + -- All pending output is flushed to the output buffer and the output + -- is aligned on a byte boundary, so that the decompressor can get all + -- input data available so far. (In particular avail_in is zero after the + -- call if enough output space has been provided before the call.) + -- Flushing may degrade compression for some compression algorithms and so + -- it should be used only when necessary. + + Block_Flush : constant Flush_Mode; + -- Z_BLOCK requests that inflate() stop + -- if and when it get to the next deflate block boundary. When decoding the + -- zlib or gzip format, this will cause inflate() to return immediately + -- after the header and before the first block. When doing a raw inflate, + -- inflate() will go ahead and process the first block, and will return + -- when it gets to the end of that block, or when it runs out of data. + + Full_Flush : constant Flush_Mode; + -- All output is flushed as with SYNC_FLUSH, and the compression state + -- is reset so that decompression can restart from this point if previous + -- compressed data has been damaged or if random access is desired. Using + -- Full_Flush too often can seriously degrade the compression. + + Finish : constant Flush_Mode; + -- Just for tell the compressor that input data is complete. + + ------------------------------------ + -- Compression strategy constants -- + ------------------------------------ + + -- RLE stategy could be used only in version 1.2.0 and later. + + Filtered : constant Strategy_Type; + Huffman_Only : constant Strategy_Type; + RLE : constant Strategy_Type; + Default_Strategy : constant Strategy_Type; + + Default_Buffer_Size : constant := 4096; + + type Filter_Type is tagged limited private; + -- The filter is for compression and for decompression. + -- The usage of the type is depend of its initialization. + + function Version return String; + pragma Inline (Version); + -- Return string representation of the ZLib version. + + procedure Deflate_Init + (Filter : in out Filter_Type; + Level : in Compression_Level := Default_Compression; + Strategy : in Strategy_Type := Default_Strategy; + Method : in Compression_Method := Deflated; + Window_Bits : in Window_Bits_Type := Default_Window_Bits; + Memory_Level : in Memory_Level_Type := Default_Memory_Level; + Header : in Header_Type := Default); + -- Compressor initialization. + -- When Header parameter is Auto or Default, then default zlib header + -- would be provided for compressed data. + -- When Header is GZip, then gzip header would be set instead of + -- default header. + -- When Header is None, no header would be set for compressed data. + + procedure Inflate_Init + (Filter : in out Filter_Type; + Window_Bits : in Window_Bits_Type := Default_Window_Bits; + Header : in Header_Type := Default); + -- Decompressor initialization. + -- Default header type mean that ZLib default header is expecting in the + -- input compressed stream. + -- Header type None mean that no header is expecting in the input stream. + -- GZip header type mean that GZip header is expecting in the + -- input compressed stream. + -- Auto header type mean that header type (GZip or Native) would be + -- detected automatically in the input stream. + -- Note that header types parameter values None, GZip and Auto are + -- supported for inflate routine only in ZLib versions 1.2.0.2 and later. + -- Deflate_Init is supporting all header types. + + function Is_Open (Filter : in Filter_Type) return Boolean; + pragma Inline (Is_Open); + -- Is the filter opened for compression or decompression. + + procedure Close + (Filter : in out Filter_Type; + Ignore_Error : in Boolean := False); + -- Closing the compression or decompressor. + -- If stream is closing before the complete and Ignore_Error is False, + -- The exception would be raised. + + generic + with procedure Data_In + (Item : out Ada.Streams.Stream_Element_Array; + Last : out Ada.Streams.Stream_Element_Offset); + with procedure Data_Out + (Item : in Ada.Streams.Stream_Element_Array); + procedure Generic_Translate + (Filter : in out Filter_Type; + In_Buffer_Size : in Integer := Default_Buffer_Size; + Out_Buffer_Size : in Integer := Default_Buffer_Size); + -- Compress/decompress data fetch from Data_In routine and pass the result + -- to the Data_Out routine. User should provide Data_In and Data_Out + -- for compression/decompression data flow. + -- Compression or decompression depend on Filter initialization. + + function Total_In (Filter : in Filter_Type) return Count; + pragma Inline (Total_In); + -- Returns total number of input bytes read so far + + function Total_Out (Filter : in Filter_Type) return Count; + pragma Inline (Total_Out); + -- Returns total number of bytes output so far + + function CRC32 + (CRC : in Unsigned_32; + Data : in Ada.Streams.Stream_Element_Array) + return Unsigned_32; + pragma Inline (CRC32); + -- Compute CRC32, it could be necessary for make gzip format + + procedure CRC32 + (CRC : in out Unsigned_32; + Data : in Ada.Streams.Stream_Element_Array); + pragma Inline (CRC32); + -- Compute CRC32, it could be necessary for make gzip format + + ------------------------------------------------- + -- Below is more complex low level routines. -- + ------------------------------------------------- + + procedure Translate + (Filter : in out Filter_Type; + In_Data : in Ada.Streams.Stream_Element_Array; + In_Last : out Ada.Streams.Stream_Element_Offset; + Out_Data : out Ada.Streams.Stream_Element_Array; + Out_Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode); + -- Compress/decompress the In_Data buffer and place the result into + -- Out_Data. In_Last is the index of last element from In_Data accepted by + -- the Filter. Out_Last is the last element of the received data from + -- Filter. To tell the filter that incoming data are complete put the + -- Flush parameter to Finish. + + function Stream_End (Filter : in Filter_Type) return Boolean; + pragma Inline (Stream_End); + -- Return the true when the stream is complete. + + procedure Flush + (Filter : in out Filter_Type; + Out_Data : out Ada.Streams.Stream_Element_Array; + Out_Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode); + pragma Inline (Flush); + -- Flushing the data from the compressor. + + generic + with procedure Write + (Item : in Ada.Streams.Stream_Element_Array); + -- User should provide this routine for accept + -- compressed/decompressed data. + + Buffer_Size : in Ada.Streams.Stream_Element_Offset + := Default_Buffer_Size; + -- Buffer size for Write user routine. + + procedure Write + (Filter : in out Filter_Type; + Item : in Ada.Streams.Stream_Element_Array; + Flush : in Flush_Mode := No_Flush); + -- Compress/Decompress data from Item to the generic parameter procedure + -- Write. Output buffer size could be set in Buffer_Size generic parameter. + + generic + with procedure Read + (Item : out Ada.Streams.Stream_Element_Array; + Last : out Ada.Streams.Stream_Element_Offset); + -- User should provide data for compression/decompression + -- thru this routine. + + Buffer : in out Ada.Streams.Stream_Element_Array; + -- Buffer for keep remaining data from the previous + -- back read. + + Rest_First, Rest_Last : in out Ada.Streams.Stream_Element_Offset; + -- Rest_First have to be initialized to Buffer'Last + 1 + -- Rest_Last have to be initialized to Buffer'Last + -- before usage. + + Allow_Read_Some : in Boolean := False; + -- Is it allowed to return Last < Item'Last before end of data. + + procedure Read + (Filter : in out Filter_Type; + Item : out Ada.Streams.Stream_Element_Array; + Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode := No_Flush); + -- Compress/Decompress data from generic parameter procedure Read to the + -- Item. User should provide Buffer and initialized Rest_First, Rest_Last + -- indicators. If Allow_Read_Some is True, Read routines could return + -- Last < Item'Last only at end of stream. + +private + + use Ada.Streams; + + pragma Assert (Ada.Streams.Stream_Element'Size = 8); + pragma Assert (Ada.Streams.Stream_Element'Modulus = 2**8); + + type Flush_Mode is new Integer range 0 .. 5; + + type Compression_Method is new Integer range 8 .. 8; + + type Strategy_Type is new Integer range 0 .. 3; + + No_Flush : constant Flush_Mode := 0; + Partial_Flush : constant Flush_Mode := 1; + Sync_Flush : constant Flush_Mode := 2; + Full_Flush : constant Flush_Mode := 3; + Finish : constant Flush_Mode := 4; + Block_Flush : constant Flush_Mode := 5; + + Filtered : constant Strategy_Type := 1; + Huffman_Only : constant Strategy_Type := 2; + RLE : constant Strategy_Type := 3; + Default_Strategy : constant Strategy_Type := 0; + + Deflated : constant Compression_Method := 8; + + type Z_Stream; + + type Z_Stream_Access is access all Z_Stream; + + type Filter_Type is tagged limited record + Strm : Z_Stream_Access; + Compression : Boolean; + Stream_End : Boolean; + Header : Header_Type; + CRC : Unsigned_32; + Offset : Stream_Element_Offset; + -- Offset for gzip header/footer output. + end record; + +end ZLib; diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/zlib.gpr nodejs-0.11.15/deps/zlib/contrib/ada/zlib.gpr --- nodejs-0.11.13/deps/zlib/contrib/ada/zlib.gpr 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/zlib.gpr 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,20 @@ +project Zlib is + + for Languages use ("Ada"); + for Source_Dirs use ("."); + for Object_Dir use "."; + for Main use ("test.adb", "mtest.adb", "read.adb", "buffer_demo"); + + package Compiler is + for Default_Switches ("ada") use ("-gnatwcfilopru", "-gnatVcdfimorst", "-gnatyabcefhiklmnoprst"); + end Compiler; + + package Linker is + for Default_Switches ("ada") use ("-lz"); + end Linker; + + package Builder is + for Default_Switches ("ada") use ("-s", "-gnatQ"); + end Builder; + +end Zlib; diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/zlib-streams.adb nodejs-0.11.15/deps/zlib/contrib/ada/zlib-streams.adb --- nodejs-0.11.13/deps/zlib/contrib/ada/zlib-streams.adb 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/zlib-streams.adb 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,225 @@ +---------------------------------------------------------------- +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2003 Dmitriy Anisimkov -- +-- -- +-- Open source license information is in the zlib.ads file. -- +---------------------------------------------------------------- + +-- $Id: zlib-streams.adb,v 1.10 2004/05/31 10:53:40 vagul Exp $ + +with Ada.Unchecked_Deallocation; + +package body ZLib.Streams is + + ----------- + -- Close -- + ----------- + + procedure Close (Stream : in out Stream_Type) is + procedure Free is new Ada.Unchecked_Deallocation + (Stream_Element_Array, Buffer_Access); + begin + if Stream.Mode = Out_Stream or Stream.Mode = Duplex then + -- We should flush the data written by the writer. + + Flush (Stream, Finish); + + Close (Stream.Writer); + end if; + + if Stream.Mode = In_Stream or Stream.Mode = Duplex then + Close (Stream.Reader); + Free (Stream.Buffer); + end if; + end Close; + + ------------ + -- Create -- + ------------ + + procedure Create + (Stream : out Stream_Type; + Mode : in Stream_Mode; + Back : in Stream_Access; + Back_Compressed : in Boolean; + Level : in Compression_Level := Default_Compression; + Strategy : in Strategy_Type := Default_Strategy; + Header : in Header_Type := Default; + Read_Buffer_Size : in Ada.Streams.Stream_Element_Offset + := Default_Buffer_Size; + Write_Buffer_Size : in Ada.Streams.Stream_Element_Offset + := Default_Buffer_Size) + is + + subtype Buffer_Subtype is Stream_Element_Array (1 .. Read_Buffer_Size); + + procedure Init_Filter + (Filter : in out Filter_Type; + Compress : in Boolean); + + ----------------- + -- Init_Filter -- + ----------------- + + procedure Init_Filter + (Filter : in out Filter_Type; + Compress : in Boolean) is + begin + if Compress then + Deflate_Init + (Filter, Level, Strategy, Header => Header); + else + Inflate_Init (Filter, Header => Header); + end if; + end Init_Filter; + + begin + Stream.Back := Back; + Stream.Mode := Mode; + + if Mode = Out_Stream or Mode = Duplex then + Init_Filter (Stream.Writer, Back_Compressed); + Stream.Buffer_Size := Write_Buffer_Size; + else + Stream.Buffer_Size := 0; + end if; + + if Mode = In_Stream or Mode = Duplex then + Init_Filter (Stream.Reader, not Back_Compressed); + + Stream.Buffer := new Buffer_Subtype; + Stream.Rest_First := Stream.Buffer'Last + 1; + Stream.Rest_Last := Stream.Buffer'Last; + end if; + end Create; + + ----------- + -- Flush -- + ----------- + + procedure Flush + (Stream : in out Stream_Type; + Mode : in Flush_Mode := Sync_Flush) + is + Buffer : Stream_Element_Array (1 .. Stream.Buffer_Size); + Last : Stream_Element_Offset; + begin + loop + Flush (Stream.Writer, Buffer, Last, Mode); + + Ada.Streams.Write (Stream.Back.all, Buffer (1 .. Last)); + + exit when Last < Buffer'Last; + end loop; + end Flush; + + ------------- + -- Is_Open -- + ------------- + + function Is_Open (Stream : Stream_Type) return Boolean is + begin + return Is_Open (Stream.Reader) or else Is_Open (Stream.Writer); + end Is_Open; + + ---------- + -- Read -- + ---------- + + procedure Read + (Stream : in out Stream_Type; + Item : out Stream_Element_Array; + Last : out Stream_Element_Offset) + is + + procedure Read + (Item : out Stream_Element_Array; + Last : out Stream_Element_Offset); + + ---------- + -- Read -- + ---------- + + procedure Read + (Item : out Stream_Element_Array; + Last : out Stream_Element_Offset) is + begin + Ada.Streams.Read (Stream.Back.all, Item, Last); + end Read; + + procedure Read is new ZLib.Read + (Read => Read, + Buffer => Stream.Buffer.all, + Rest_First => Stream.Rest_First, + Rest_Last => Stream.Rest_Last); + + begin + Read (Stream.Reader, Item, Last); + end Read; + + ------------------- + -- Read_Total_In -- + ------------------- + + function Read_Total_In (Stream : in Stream_Type) return Count is + begin + return Total_In (Stream.Reader); + end Read_Total_In; + + -------------------- + -- Read_Total_Out -- + -------------------- + + function Read_Total_Out (Stream : in Stream_Type) return Count is + begin + return Total_Out (Stream.Reader); + end Read_Total_Out; + + ----------- + -- Write -- + ----------- + + procedure Write + (Stream : in out Stream_Type; + Item : in Stream_Element_Array) + is + + procedure Write (Item : in Stream_Element_Array); + + ----------- + -- Write -- + ----------- + + procedure Write (Item : in Stream_Element_Array) is + begin + Ada.Streams.Write (Stream.Back.all, Item); + end Write; + + procedure Write is new ZLib.Write + (Write => Write, + Buffer_Size => Stream.Buffer_Size); + + begin + Write (Stream.Writer, Item, No_Flush); + end Write; + + -------------------- + -- Write_Total_In -- + -------------------- + + function Write_Total_In (Stream : in Stream_Type) return Count is + begin + return Total_In (Stream.Writer); + end Write_Total_In; + + --------------------- + -- Write_Total_Out -- + --------------------- + + function Write_Total_Out (Stream : in Stream_Type) return Count is + begin + return Total_Out (Stream.Writer); + end Write_Total_Out; + +end ZLib.Streams; diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/zlib-streams.ads nodejs-0.11.15/deps/zlib/contrib/ada/zlib-streams.ads --- nodejs-0.11.13/deps/zlib/contrib/ada/zlib-streams.ads 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/zlib-streams.ads 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,114 @@ +---------------------------------------------------------------- +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2003 Dmitriy Anisimkov -- +-- -- +-- Open source license information is in the zlib.ads file. -- +---------------------------------------------------------------- + +-- $Id: zlib-streams.ads,v 1.12 2004/05/31 10:53:40 vagul Exp $ + +package ZLib.Streams is + + type Stream_Mode is (In_Stream, Out_Stream, Duplex); + + type Stream_Access is access all Ada.Streams.Root_Stream_Type'Class; + + type Stream_Type is + new Ada.Streams.Root_Stream_Type with private; + + procedure Read + (Stream : in out Stream_Type; + Item : out Ada.Streams.Stream_Element_Array; + Last : out Ada.Streams.Stream_Element_Offset); + + procedure Write + (Stream : in out Stream_Type; + Item : in Ada.Streams.Stream_Element_Array); + + procedure Flush + (Stream : in out Stream_Type; + Mode : in Flush_Mode := Sync_Flush); + -- Flush the written data to the back stream, + -- all data placed to the compressor is flushing to the Back stream. + -- Should not be used untill necessary, becouse it is decreasing + -- compression. + + function Read_Total_In (Stream : in Stream_Type) return Count; + pragma Inline (Read_Total_In); + -- Return total number of bytes read from back stream so far. + + function Read_Total_Out (Stream : in Stream_Type) return Count; + pragma Inline (Read_Total_Out); + -- Return total number of bytes read so far. + + function Write_Total_In (Stream : in Stream_Type) return Count; + pragma Inline (Write_Total_In); + -- Return total number of bytes written so far. + + function Write_Total_Out (Stream : in Stream_Type) return Count; + pragma Inline (Write_Total_Out); + -- Return total number of bytes written to the back stream. + + procedure Create + (Stream : out Stream_Type; + Mode : in Stream_Mode; + Back : in Stream_Access; + Back_Compressed : in Boolean; + Level : in Compression_Level := Default_Compression; + Strategy : in Strategy_Type := Default_Strategy; + Header : in Header_Type := Default; + Read_Buffer_Size : in Ada.Streams.Stream_Element_Offset + := Default_Buffer_Size; + Write_Buffer_Size : in Ada.Streams.Stream_Element_Offset + := Default_Buffer_Size); + -- Create the Comression/Decompression stream. + -- If mode is In_Stream then Write operation is disabled. + -- If mode is Out_Stream then Read operation is disabled. + + -- If Back_Compressed is true then + -- Data written to the Stream is compressing to the Back stream + -- and data read from the Stream is decompressed data from the Back stream. + + -- If Back_Compressed is false then + -- Data written to the Stream is decompressing to the Back stream + -- and data read from the Stream is compressed data from the Back stream. + + -- !!! When the Need_Header is False ZLib-Ada is using undocumented + -- ZLib 1.1.4 functionality to do not create/wait for ZLib headers. + + function Is_Open (Stream : Stream_Type) return Boolean; + + procedure Close (Stream : in out Stream_Type); + +private + + use Ada.Streams; + + type Buffer_Access is access all Stream_Element_Array; + + type Stream_Type + is new Root_Stream_Type with + record + Mode : Stream_Mode; + + Buffer : Buffer_Access; + Rest_First : Stream_Element_Offset; + Rest_Last : Stream_Element_Offset; + -- Buffer for Read operation. + -- We need to have this buffer in the record + -- becouse not all read data from back stream + -- could be processed during the read operation. + + Buffer_Size : Stream_Element_Offset; + -- Buffer size for write operation. + -- We do not need to have this buffer + -- in the record becouse all data could be + -- processed in the write operation. + + Back : Stream_Access; + Reader : Filter_Type; + Writer : Filter_Type; + end record; + +end ZLib.Streams; diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/zlib-thin.adb nodejs-0.11.15/deps/zlib/contrib/ada/zlib-thin.adb --- nodejs-0.11.13/deps/zlib/contrib/ada/zlib-thin.adb 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/zlib-thin.adb 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,141 @@ +---------------------------------------------------------------- +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2003 Dmitriy Anisimkov -- +-- -- +-- Open source license information is in the zlib.ads file. -- +---------------------------------------------------------------- + +-- $Id: zlib-thin.adb,v 1.8 2003/12/14 18:27:31 vagul Exp $ + +package body ZLib.Thin is + + ZLIB_VERSION : constant Chars_Ptr := zlibVersion; + + Z_Stream_Size : constant Int := Z_Stream'Size / System.Storage_Unit; + + -------------- + -- Avail_In -- + -------------- + + function Avail_In (Strm : in Z_Stream) return UInt is + begin + return Strm.Avail_In; + end Avail_In; + + --------------- + -- Avail_Out -- + --------------- + + function Avail_Out (Strm : in Z_Stream) return UInt is + begin + return Strm.Avail_Out; + end Avail_Out; + + ------------------ + -- Deflate_Init -- + ------------------ + + function Deflate_Init + (strm : Z_Streamp; + level : Int; + method : Int; + windowBits : Int; + memLevel : Int; + strategy : Int) + return Int is + begin + return deflateInit2 + (strm, + level, + method, + windowBits, + memLevel, + strategy, + ZLIB_VERSION, + Z_Stream_Size); + end Deflate_Init; + + ------------------ + -- Inflate_Init -- + ------------------ + + function Inflate_Init (strm : Z_Streamp; windowBits : Int) return Int is + begin + return inflateInit2 (strm, windowBits, ZLIB_VERSION, Z_Stream_Size); + end Inflate_Init; + + ------------------------ + -- Last_Error_Message -- + ------------------------ + + function Last_Error_Message (Strm : in Z_Stream) return String is + use Interfaces.C.Strings; + begin + if Strm.msg = Null_Ptr then + return ""; + else + return Value (Strm.msg); + end if; + end Last_Error_Message; + + ------------ + -- Set_In -- + ------------ + + procedure Set_In + (Strm : in out Z_Stream; + Buffer : in Voidp; + Size : in UInt) is + begin + Strm.Next_In := Buffer; + Strm.Avail_In := Size; + end Set_In; + + ------------------ + -- Set_Mem_Func -- + ------------------ + + procedure Set_Mem_Func + (Strm : in out Z_Stream; + Opaque : in Voidp; + Alloc : in alloc_func; + Free : in free_func) is + begin + Strm.opaque := Opaque; + Strm.zalloc := Alloc; + Strm.zfree := Free; + end Set_Mem_Func; + + ------------- + -- Set_Out -- + ------------- + + procedure Set_Out + (Strm : in out Z_Stream; + Buffer : in Voidp; + Size : in UInt) is + begin + Strm.Next_Out := Buffer; + Strm.Avail_Out := Size; + end Set_Out; + + -------------- + -- Total_In -- + -------------- + + function Total_In (Strm : in Z_Stream) return ULong is + begin + return Strm.Total_In; + end Total_In; + + --------------- + -- Total_Out -- + --------------- + + function Total_Out (Strm : in Z_Stream) return ULong is + begin + return Strm.Total_Out; + end Total_Out; + +end ZLib.Thin; diff -Nru nodejs-0.11.13/deps/zlib/contrib/ada/zlib-thin.ads nodejs-0.11.15/deps/zlib/contrib/ada/zlib-thin.ads --- nodejs-0.11.13/deps/zlib/contrib/ada/zlib-thin.ads 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/ada/zlib-thin.ads 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,450 @@ +---------------------------------------------------------------- +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2003 Dmitriy Anisimkov -- +-- -- +-- Open source license information is in the zlib.ads file. -- +---------------------------------------------------------------- + +-- $Id: zlib-thin.ads,v 1.11 2004/07/23 06:33:11 vagul Exp $ + +with Interfaces.C.Strings; + +with System; + +private package ZLib.Thin is + + -- From zconf.h + + MAX_MEM_LEVEL : constant := 9; -- zconf.h:105 + -- zconf.h:105 + MAX_WBITS : constant := 15; -- zconf.h:115 + -- 32K LZ77 window + -- zconf.h:115 + SEEK_SET : constant := 8#0000#; -- zconf.h:244 + -- Seek from beginning of file. + -- zconf.h:244 + SEEK_CUR : constant := 1; -- zconf.h:245 + -- Seek from current position. + -- zconf.h:245 + SEEK_END : constant := 2; -- zconf.h:246 + -- Set file pointer to EOF plus "offset" + -- zconf.h:246 + + type Byte is new Interfaces.C.unsigned_char; -- 8 bits + -- zconf.h:214 + type UInt is new Interfaces.C.unsigned; -- 16 bits or more + -- zconf.h:216 + type Int is new Interfaces.C.int; + + type ULong is new Interfaces.C.unsigned_long; -- 32 bits or more + -- zconf.h:217 + subtype Chars_Ptr is Interfaces.C.Strings.chars_ptr; + + type ULong_Access is access ULong; + type Int_Access is access Int; + + subtype Voidp is System.Address; -- zconf.h:232 + + subtype Byte_Access is Voidp; + + Nul : constant Voidp := System.Null_Address; + -- end from zconf + + Z_NO_FLUSH : constant := 8#0000#; -- zlib.h:125 + -- zlib.h:125 + Z_PARTIAL_FLUSH : constant := 1; -- zlib.h:126 + -- will be removed, use + -- Z_SYNC_FLUSH instead + -- zlib.h:126 + Z_SYNC_FLUSH : constant := 2; -- zlib.h:127 + -- zlib.h:127 + Z_FULL_FLUSH : constant := 3; -- zlib.h:128 + -- zlib.h:128 + Z_FINISH : constant := 4; -- zlib.h:129 + -- zlib.h:129 + Z_OK : constant := 8#0000#; -- zlib.h:132 + -- zlib.h:132 + Z_STREAM_END : constant := 1; -- zlib.h:133 + -- zlib.h:133 + Z_NEED_DICT : constant := 2; -- zlib.h:134 + -- zlib.h:134 + Z_ERRNO : constant := -1; -- zlib.h:135 + -- zlib.h:135 + Z_STREAM_ERROR : constant := -2; -- zlib.h:136 + -- zlib.h:136 + Z_DATA_ERROR : constant := -3; -- zlib.h:137 + -- zlib.h:137 + Z_MEM_ERROR : constant := -4; -- zlib.h:138 + -- zlib.h:138 + Z_BUF_ERROR : constant := -5; -- zlib.h:139 + -- zlib.h:139 + Z_VERSION_ERROR : constant := -6; -- zlib.h:140 + -- zlib.h:140 + Z_NO_COMPRESSION : constant := 8#0000#; -- zlib.h:145 + -- zlib.h:145 + Z_BEST_SPEED : constant := 1; -- zlib.h:146 + -- zlib.h:146 + Z_BEST_COMPRESSION : constant := 9; -- zlib.h:147 + -- zlib.h:147 + Z_DEFAULT_COMPRESSION : constant := -1; -- zlib.h:148 + -- zlib.h:148 + Z_FILTERED : constant := 1; -- zlib.h:151 + -- zlib.h:151 + Z_HUFFMAN_ONLY : constant := 2; -- zlib.h:152 + -- zlib.h:152 + Z_DEFAULT_STRATEGY : constant := 8#0000#; -- zlib.h:153 + -- zlib.h:153 + Z_BINARY : constant := 8#0000#; -- zlib.h:156 + -- zlib.h:156 + Z_ASCII : constant := 1; -- zlib.h:157 + -- zlib.h:157 + Z_UNKNOWN : constant := 2; -- zlib.h:158 + -- zlib.h:158 + Z_DEFLATED : constant := 8; -- zlib.h:161 + -- zlib.h:161 + Z_NULL : constant := 8#0000#; -- zlib.h:164 + -- for initializing zalloc, zfree, opaque + -- zlib.h:164 + type gzFile is new Voidp; -- zlib.h:646 + + type Z_Stream is private; + + type Z_Streamp is access all Z_Stream; -- zlib.h:89 + + type alloc_func is access function + (Opaque : Voidp; + Items : UInt; + Size : UInt) + return Voidp; -- zlib.h:63 + + type free_func is access procedure (opaque : Voidp; address : Voidp); + + function zlibVersion return Chars_Ptr; + + function Deflate (strm : Z_Streamp; flush : Int) return Int; + + function DeflateEnd (strm : Z_Streamp) return Int; + + function Inflate (strm : Z_Streamp; flush : Int) return Int; + + function InflateEnd (strm : Z_Streamp) return Int; + + function deflateSetDictionary + (strm : Z_Streamp; + dictionary : Byte_Access; + dictLength : UInt) + return Int; + + function deflateCopy (dest : Z_Streamp; source : Z_Streamp) return Int; + -- zlib.h:478 + + function deflateReset (strm : Z_Streamp) return Int; -- zlib.h:495 + + function deflateParams + (strm : Z_Streamp; + level : Int; + strategy : Int) + return Int; -- zlib.h:506 + + function inflateSetDictionary + (strm : Z_Streamp; + dictionary : Byte_Access; + dictLength : UInt) + return Int; -- zlib.h:548 + + function inflateSync (strm : Z_Streamp) return Int; -- zlib.h:565 + + function inflateReset (strm : Z_Streamp) return Int; -- zlib.h:580 + + function compress + (dest : Byte_Access; + destLen : ULong_Access; + source : Byte_Access; + sourceLen : ULong) + return Int; -- zlib.h:601 + + function compress2 + (dest : Byte_Access; + destLen : ULong_Access; + source : Byte_Access; + sourceLen : ULong; + level : Int) + return Int; -- zlib.h:615 + + function uncompress + (dest : Byte_Access; + destLen : ULong_Access; + source : Byte_Access; + sourceLen : ULong) + return Int; + + function gzopen (path : Chars_Ptr; mode : Chars_Ptr) return gzFile; + + function gzdopen (fd : Int; mode : Chars_Ptr) return gzFile; + + function gzsetparams + (file : gzFile; + level : Int; + strategy : Int) + return Int; + + function gzread + (file : gzFile; + buf : Voidp; + len : UInt) + return Int; + + function gzwrite + (file : in gzFile; + buf : in Voidp; + len : in UInt) + return Int; + + function gzprintf (file : in gzFile; format : in Chars_Ptr) return Int; + + function gzputs (file : in gzFile; s : in Chars_Ptr) return Int; + + function gzgets + (file : gzFile; + buf : Chars_Ptr; + len : Int) + return Chars_Ptr; + + function gzputc (file : gzFile; char : Int) return Int; + + function gzgetc (file : gzFile) return Int; + + function gzflush (file : gzFile; flush : Int) return Int; + + function gzseek + (file : gzFile; + offset : Int; + whence : Int) + return Int; + + function gzrewind (file : gzFile) return Int; + + function gztell (file : gzFile) return Int; + + function gzeof (file : gzFile) return Int; + + function gzclose (file : gzFile) return Int; + + function gzerror (file : gzFile; errnum : Int_Access) return Chars_Ptr; + + function adler32 + (adler : ULong; + buf : Byte_Access; + len : UInt) + return ULong; + + function crc32 + (crc : ULong; + buf : Byte_Access; + len : UInt) + return ULong; + + function deflateInit + (strm : Z_Streamp; + level : Int; + version : Chars_Ptr; + stream_size : Int) + return Int; + + function deflateInit2 + (strm : Z_Streamp; + level : Int; + method : Int; + windowBits : Int; + memLevel : Int; + strategy : Int; + version : Chars_Ptr; + stream_size : Int) + return Int; + + function Deflate_Init + (strm : Z_Streamp; + level : Int; + method : Int; + windowBits : Int; + memLevel : Int; + strategy : Int) + return Int; + pragma Inline (Deflate_Init); + + function inflateInit + (strm : Z_Streamp; + version : Chars_Ptr; + stream_size : Int) + return Int; + + function inflateInit2 + (strm : in Z_Streamp; + windowBits : in Int; + version : in Chars_Ptr; + stream_size : in Int) + return Int; + + function inflateBackInit + (strm : in Z_Streamp; + windowBits : in Int; + window : in Byte_Access; + version : in Chars_Ptr; + stream_size : in Int) + return Int; + -- Size of window have to be 2**windowBits. + + function Inflate_Init (strm : Z_Streamp; windowBits : Int) return Int; + pragma Inline (Inflate_Init); + + function zError (err : Int) return Chars_Ptr; + + function inflateSyncPoint (z : Z_Streamp) return Int; + + function get_crc_table return ULong_Access; + + -- Interface to the available fields of the z_stream structure. + -- The application must update next_in and avail_in when avail_in has + -- dropped to zero. It must update next_out and avail_out when avail_out + -- has dropped to zero. The application must initialize zalloc, zfree and + -- opaque before calling the init function. + + procedure Set_In + (Strm : in out Z_Stream; + Buffer : in Voidp; + Size : in UInt); + pragma Inline (Set_In); + + procedure Set_Out + (Strm : in out Z_Stream; + Buffer : in Voidp; + Size : in UInt); + pragma Inline (Set_Out); + + procedure Set_Mem_Func + (Strm : in out Z_Stream; + Opaque : in Voidp; + Alloc : in alloc_func; + Free : in free_func); + pragma Inline (Set_Mem_Func); + + function Last_Error_Message (Strm : in Z_Stream) return String; + pragma Inline (Last_Error_Message); + + function Avail_Out (Strm : in Z_Stream) return UInt; + pragma Inline (Avail_Out); + + function Avail_In (Strm : in Z_Stream) return UInt; + pragma Inline (Avail_In); + + function Total_In (Strm : in Z_Stream) return ULong; + pragma Inline (Total_In); + + function Total_Out (Strm : in Z_Stream) return ULong; + pragma Inline (Total_Out); + + function inflateCopy + (dest : in Z_Streamp; + Source : in Z_Streamp) + return Int; + + function compressBound (Source_Len : in ULong) return ULong; + + function deflateBound + (Strm : in Z_Streamp; + Source_Len : in ULong) + return ULong; + + function gzungetc (C : in Int; File : in gzFile) return Int; + + function zlibCompileFlags return ULong; + +private + + type Z_Stream is record -- zlib.h:68 + Next_In : Voidp := Nul; -- next input byte + Avail_In : UInt := 0; -- number of bytes available at next_in + Total_In : ULong := 0; -- total nb of input bytes read so far + Next_Out : Voidp := Nul; -- next output byte should be put there + Avail_Out : UInt := 0; -- remaining free space at next_out + Total_Out : ULong := 0; -- total nb of bytes output so far + msg : Chars_Ptr; -- last error message, NULL if no error + state : Voidp; -- not visible by applications + zalloc : alloc_func := null; -- used to allocate the internal state + zfree : free_func := null; -- used to free the internal state + opaque : Voidp; -- private data object passed to + -- zalloc and zfree + data_type : Int; -- best guess about the data type: + -- ascii or binary + adler : ULong; -- adler32 value of the uncompressed + -- data + reserved : ULong; -- reserved for future use + end record; + + pragma Convention (C, Z_Stream); + + pragma Import (C, zlibVersion, "zlibVersion"); + pragma Import (C, Deflate, "deflate"); + pragma Import (C, DeflateEnd, "deflateEnd"); + pragma Import (C, Inflate, "inflate"); + pragma Import (C, InflateEnd, "inflateEnd"); + pragma Import (C, deflateSetDictionary, "deflateSetDictionary"); + pragma Import (C, deflateCopy, "deflateCopy"); + pragma Import (C, deflateReset, "deflateReset"); + pragma Import (C, deflateParams, "deflateParams"); + pragma Import (C, inflateSetDictionary, "inflateSetDictionary"); + pragma Import (C, inflateSync, "inflateSync"); + pragma Import (C, inflateReset, "inflateReset"); + pragma Import (C, compress, "compress"); + pragma Import (C, compress2, "compress2"); + pragma Import (C, uncompress, "uncompress"); + pragma Import (C, gzopen, "gzopen"); + pragma Import (C, gzdopen, "gzdopen"); + pragma Import (C, gzsetparams, "gzsetparams"); + pragma Import (C, gzread, "gzread"); + pragma Import (C, gzwrite, "gzwrite"); + pragma Import (C, gzprintf, "gzprintf"); + pragma Import (C, gzputs, "gzputs"); + pragma Import (C, gzgets, "gzgets"); + pragma Import (C, gzputc, "gzputc"); + pragma Import (C, gzgetc, "gzgetc"); + pragma Import (C, gzflush, "gzflush"); + pragma Import (C, gzseek, "gzseek"); + pragma Import (C, gzrewind, "gzrewind"); + pragma Import (C, gztell, "gztell"); + pragma Import (C, gzeof, "gzeof"); + pragma Import (C, gzclose, "gzclose"); + pragma Import (C, gzerror, "gzerror"); + pragma Import (C, adler32, "adler32"); + pragma Import (C, crc32, "crc32"); + pragma Import (C, deflateInit, "deflateInit_"); + pragma Import (C, inflateInit, "inflateInit_"); + pragma Import (C, deflateInit2, "deflateInit2_"); + pragma Import (C, inflateInit2, "inflateInit2_"); + pragma Import (C, zError, "zError"); + pragma Import (C, inflateSyncPoint, "inflateSyncPoint"); + pragma Import (C, get_crc_table, "get_crc_table"); + + -- since zlib 1.2.0: + + pragma Import (C, inflateCopy, "inflateCopy"); + pragma Import (C, compressBound, "compressBound"); + pragma Import (C, deflateBound, "deflateBound"); + pragma Import (C, gzungetc, "gzungetc"); + pragma Import (C, zlibCompileFlags, "zlibCompileFlags"); + + pragma Import (C, inflateBackInit, "inflateBackInit_"); + + -- I stopped binding the inflateBack routines, becouse realize that + -- it does not support zlib and gzip headers for now, and have no + -- symmetric deflateBack routines. + -- ZLib-Ada is symmetric regarding deflate/inflate data transformation + -- and has a similar generic callback interface for the + -- deflate/inflate transformation based on the regular Deflate/Inflate + -- routines. + + -- pragma Import (C, inflateBack, "inflateBack"); + -- pragma Import (C, inflateBackEnd, "inflateBackEnd"); + +end ZLib.Thin; diff -Nru nodejs-0.11.13/deps/zlib/contrib/amd64/amd64-match.S nodejs-0.11.15/deps/zlib/contrib/amd64/amd64-match.S --- nodejs-0.11.13/deps/zlib/contrib/amd64/amd64-match.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/amd64/amd64-match.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,452 @@ +/* + * match.S -- optimized version of longest_match() + * based on the similar work by Gilles Vollant, and Brian Raiter, written 1998 + * + * This is free software; you can redistribute it and/or modify it + * under the terms of the BSD License. Use by owners of Che Guevarra + * parafernalia is prohibited, where possible, and highly discouraged + * elsewhere. + */ + +#ifndef NO_UNDERLINE +# define match_init _match_init +# define longest_match _longest_match +#endif + +#define scanend ebx +#define scanendw bx +#define chainlenwmask edx /* high word: current chain len low word: s->wmask */ +#define curmatch rsi +#define curmatchd esi +#define windowbestlen r8 +#define scanalign r9 +#define scanalignd r9d +#define window r10 +#define bestlen r11 +#define bestlend r11d +#define scanstart r12d +#define scanstartw r12w +#define scan r13 +#define nicematch r14d +#define limit r15 +#define limitd r15d +#define prev rcx + +/* + * The 258 is a "magic number, not a parameter -- changing it + * breaks the hell loose + */ +#define MAX_MATCH (258) +#define MIN_MATCH (3) +#define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1) +#define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7) + +/* stack frame offsets */ +#define LocalVarsSize (112) +#define _chainlenwmask ( 8-LocalVarsSize)(%rsp) +#define _windowbestlen (16-LocalVarsSize)(%rsp) +#define save_r14 (24-LocalVarsSize)(%rsp) +#define save_rsi (32-LocalVarsSize)(%rsp) +#define save_rbx (40-LocalVarsSize)(%rsp) +#define save_r12 (56-LocalVarsSize)(%rsp) +#define save_r13 (64-LocalVarsSize)(%rsp) +#define save_r15 (80-LocalVarsSize)(%rsp) + + +.globl match_init, longest_match + +/* + * On AMD64 the first argument of a function (in our case -- the pointer to + * deflate_state structure) is passed in %rdi, hence our offsets below are + * all off of that. + */ + +/* you can check the structure offset by running + +#include <stdlib.h> +#include <stdio.h> +#include "deflate.h" + +void print_depl() +{ +deflate_state ds; +deflate_state *s=&ds; +printf("size pointer=%u\n",(int)sizeof(void*)); + +printf("#define dsWSize (%3u)(%%rdi)\n",(int)(((char*)&(s->w_size))-((char*)s))); +printf("#define dsWMask (%3u)(%%rdi)\n",(int)(((char*)&(s->w_mask))-((char*)s))); +printf("#define dsWindow (%3u)(%%rdi)\n",(int)(((char*)&(s->window))-((char*)s))); +printf("#define dsPrev (%3u)(%%rdi)\n",(int)(((char*)&(s->prev))-((char*)s))); +printf("#define dsMatchLen (%3u)(%%rdi)\n",(int)(((char*)&(s->match_length))-((char*)s))); +printf("#define dsPrevMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_match))-((char*)s))); +printf("#define dsStrStart (%3u)(%%rdi)\n",(int)(((char*)&(s->strstart))-((char*)s))); +printf("#define dsMatchStart (%3u)(%%rdi)\n",(int)(((char*)&(s->match_start))-((char*)s))); +printf("#define dsLookahead (%3u)(%%rdi)\n",(int)(((char*)&(s->lookahead))-((char*)s))); +printf("#define dsPrevLen (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_length))-((char*)s))); +printf("#define dsMaxChainLen (%3u)(%%rdi)\n",(int)(((char*)&(s->max_chain_length))-((char*)s))); +printf("#define dsGoodMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->good_match))-((char*)s))); +printf("#define dsNiceMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->nice_match))-((char*)s))); +} + +*/ + + +/* + to compile for XCode 3.2 on MacOSX x86_64 + - run "gcc -g -c -DXCODE_MAC_X64_STRUCTURE amd64-match.S" + */ + + +#ifndef CURRENT_LINX_XCODE_MAC_X64_STRUCTURE +#define dsWSize ( 68)(%rdi) +#define dsWMask ( 76)(%rdi) +#define dsWindow ( 80)(%rdi) +#define dsPrev ( 96)(%rdi) +#define dsMatchLen (144)(%rdi) +#define dsPrevMatch (148)(%rdi) +#define dsStrStart (156)(%rdi) +#define dsMatchStart (160)(%rdi) +#define dsLookahead (164)(%rdi) +#define dsPrevLen (168)(%rdi) +#define dsMaxChainLen (172)(%rdi) +#define dsGoodMatch (188)(%rdi) +#define dsNiceMatch (192)(%rdi) + +#else + +#ifndef STRUCT_OFFSET +# define STRUCT_OFFSET (0) +#endif + + +#define dsWSize ( 56 + STRUCT_OFFSET)(%rdi) +#define dsWMask ( 64 + STRUCT_OFFSET)(%rdi) +#define dsWindow ( 72 + STRUCT_OFFSET)(%rdi) +#define dsPrev ( 88 + STRUCT_OFFSET)(%rdi) +#define dsMatchLen (136 + STRUCT_OFFSET)(%rdi) +#define dsPrevMatch (140 + STRUCT_OFFSET)(%rdi) +#define dsStrStart (148 + STRUCT_OFFSET)(%rdi) +#define dsMatchStart (152 + STRUCT_OFFSET)(%rdi) +#define dsLookahead (156 + STRUCT_OFFSET)(%rdi) +#define dsPrevLen (160 + STRUCT_OFFSET)(%rdi) +#define dsMaxChainLen (164 + STRUCT_OFFSET)(%rdi) +#define dsGoodMatch (180 + STRUCT_OFFSET)(%rdi) +#define dsNiceMatch (184 + STRUCT_OFFSET)(%rdi) + +#endif + + + + +.text + +/* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */ + +longest_match: +/* + * Retrieve the function arguments. %curmatch will hold cur_match + * throughout the entire function (passed via rsi on amd64). + * rdi will hold the pointer to the deflate_state (first arg on amd64) + */ + mov %rsi, save_rsi + mov %rbx, save_rbx + mov %r12, save_r12 + mov %r13, save_r13 + mov %r14, save_r14 + mov %r15, save_r15 + +/* uInt wmask = s->w_mask; */ +/* unsigned chain_length = s->max_chain_length; */ +/* if (s->prev_length >= s->good_match) { */ +/* chain_length >>= 2; */ +/* } */ + + movl dsPrevLen, %eax + movl dsGoodMatch, %ebx + cmpl %ebx, %eax + movl dsWMask, %eax + movl dsMaxChainLen, %chainlenwmask + jl LastMatchGood + shrl $2, %chainlenwmask +LastMatchGood: + +/* chainlen is decremented once beforehand so that the function can */ +/* use the sign flag instead of the zero flag for the exit test. */ +/* It is then shifted into the high word, to make room for the wmask */ +/* value, which it will always accompany. */ + + decl %chainlenwmask + shll $16, %chainlenwmask + orl %eax, %chainlenwmask + +/* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */ + + movl dsNiceMatch, %eax + movl dsLookahead, %ebx + cmpl %eax, %ebx + jl LookaheadLess + movl %eax, %ebx +LookaheadLess: movl %ebx, %nicematch + +/* register Bytef *scan = s->window + s->strstart; */ + + mov dsWindow, %window + movl dsStrStart, %limitd + lea (%limit, %window), %scan + +/* Determine how many bytes the scan ptr is off from being */ +/* dword-aligned. */ + + mov %scan, %scanalign + negl %scanalignd + andl $3, %scanalignd + +/* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */ +/* s->strstart - (IPos)MAX_DIST(s) : NIL; */ + + movl dsWSize, %eax + subl $MIN_LOOKAHEAD, %eax + xorl %ecx, %ecx + subl %eax, %limitd + cmovng %ecx, %limitd + +/* int best_len = s->prev_length; */ + + movl dsPrevLen, %bestlend + +/* Store the sum of s->window + best_len in %windowbestlen locally, and in memory. */ + + lea (%window, %bestlen), %windowbestlen + mov %windowbestlen, _windowbestlen + +/* register ush scan_start = *(ushf*)scan; */ +/* register ush scan_end = *(ushf*)(scan+best_len-1); */ +/* Posf *prev = s->prev; */ + + movzwl (%scan), %scanstart + movzwl -1(%scan, %bestlen), %scanend + mov dsPrev, %prev + +/* Jump into the main loop. */ + + movl %chainlenwmask, _chainlenwmask + jmp LoopEntry + +.balign 16 + +/* do { + * match = s->window + cur_match; + * if (*(ushf*)(match+best_len-1) != scan_end || + * *(ushf*)match != scan_start) continue; + * [...] + * } while ((cur_match = prev[cur_match & wmask]) > limit + * && --chain_length != 0); + * + * Here is the inner loop of the function. The function will spend the + * majority of its time in this loop, and majority of that time will + * be spent in the first ten instructions. + */ +LookupLoop: + andl %chainlenwmask, %curmatchd + movzwl (%prev, %curmatch, 2), %curmatchd + cmpl %limitd, %curmatchd + jbe LeaveNow + subl $0x00010000, %chainlenwmask + js LeaveNow +LoopEntry: cmpw -1(%windowbestlen, %curmatch), %scanendw + jne LookupLoop + cmpw %scanstartw, (%window, %curmatch) + jne LookupLoop + +/* Store the current value of chainlen. */ + movl %chainlenwmask, _chainlenwmask + +/* %scan is the string under scrutiny, and %prev to the string we */ +/* are hoping to match it up with. In actuality, %esi and %edi are */ +/* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */ +/* initialized to -(MAX_MATCH_8 - scanalign). */ + + mov $(-MAX_MATCH_8), %rdx + lea (%curmatch, %window), %windowbestlen + lea MAX_MATCH_8(%windowbestlen, %scanalign), %windowbestlen + lea MAX_MATCH_8(%scan, %scanalign), %prev + +/* the prefetching below makes very little difference... */ + prefetcht1 (%windowbestlen, %rdx) + prefetcht1 (%prev, %rdx) + +/* + * Test the strings for equality, 8 bytes at a time. At the end, + * adjust %rdx so that it is offset to the exact byte that mismatched. + * + * It should be confessed that this loop usually does not represent + * much of the total running time. Replacing it with a more + * straightforward "rep cmpsb" would not drastically degrade + * performance -- unrolling it, for example, makes no difference. + */ + +#undef USE_SSE /* works, but is 6-7% slower, than non-SSE... */ + +LoopCmps: +#ifdef USE_SSE + /* Preload the SSE registers */ + movdqu (%windowbestlen, %rdx), %xmm1 + movdqu (%prev, %rdx), %xmm2 + pcmpeqb %xmm2, %xmm1 + movdqu 16(%windowbestlen, %rdx), %xmm3 + movdqu 16(%prev, %rdx), %xmm4 + pcmpeqb %xmm4, %xmm3 + movdqu 32(%windowbestlen, %rdx), %xmm5 + movdqu 32(%prev, %rdx), %xmm6 + pcmpeqb %xmm6, %xmm5 + movdqu 48(%windowbestlen, %rdx), %xmm7 + movdqu 48(%prev, %rdx), %xmm8 + pcmpeqb %xmm8, %xmm7 + + /* Check the comparisions' results */ + pmovmskb %xmm1, %rax + notw %ax + bsfw %ax, %ax + jnz LeaveLoopCmps + + /* this is the only iteration of the loop with a possibility of having + incremented rdx by 0x108 (each loop iteration add 16*4 = 0x40 + and (0x40*4)+8=0x108 */ + add $8, %rdx + jz LenMaximum + add $8, %rdx + + + pmovmskb %xmm3, %rax + notw %ax + bsfw %ax, %ax + jnz LeaveLoopCmps + + + add $16, %rdx + + + pmovmskb %xmm5, %rax + notw %ax + bsfw %ax, %ax + jnz LeaveLoopCmps + + add $16, %rdx + + + pmovmskb %xmm7, %rax + notw %ax + bsfw %ax, %ax + jnz LeaveLoopCmps + + add $16, %rdx + + jmp LoopCmps +LeaveLoopCmps: add %rax, %rdx +#else + mov (%windowbestlen, %rdx), %rax + xor (%prev, %rdx), %rax + jnz LeaveLoopCmps + + mov 8(%windowbestlen, %rdx), %rax + xor 8(%prev, %rdx), %rax + jnz LeaveLoopCmps8 + + mov 16(%windowbestlen, %rdx), %rax + xor 16(%prev, %rdx), %rax + jnz LeaveLoopCmps16 + + add $24, %rdx + jnz LoopCmps + jmp LenMaximum +# if 0 +/* + * This three-liner is tantalizingly simple, but bsf is a slow instruction, + * and the complicated alternative down below is quite a bit faster. Sad... + */ + +LeaveLoopCmps: bsf %rax, %rax /* find the first non-zero bit */ + shrl $3, %eax /* divide by 8 to get the byte */ + add %rax, %rdx +# else +LeaveLoopCmps16: + add $8, %rdx +LeaveLoopCmps8: + add $8, %rdx +LeaveLoopCmps: testl $0xFFFFFFFF, %eax /* Check the first 4 bytes */ + jnz Check16 + add $4, %rdx + shr $32, %rax +Check16: testw $0xFFFF, %ax + jnz LenLower + add $2, %rdx + shrl $16, %eax +LenLower: subb $1, %al + adc $0, %rdx +# endif +#endif + +/* Calculate the length of the match. If it is longer than MAX_MATCH, */ +/* then automatically accept it as the best possible match and leave. */ + + lea (%prev, %rdx), %rax + sub %scan, %rax + cmpl $MAX_MATCH, %eax + jge LenMaximum + +/* If the length of the match is not longer than the best match we */ +/* have so far, then forget it and return to the lookup loop. */ + + cmpl %bestlend, %eax + jg LongerMatch + mov _windowbestlen, %windowbestlen + mov dsPrev, %prev + movl _chainlenwmask, %edx + jmp LookupLoop + +/* s->match_start = cur_match; */ +/* best_len = len; */ +/* if (len >= nice_match) break; */ +/* scan_end = *(ushf*)(scan+best_len-1); */ + +LongerMatch: + movl %eax, %bestlend + movl %curmatchd, dsMatchStart + cmpl %nicematch, %eax + jge LeaveNow + + lea (%window, %bestlen), %windowbestlen + mov %windowbestlen, _windowbestlen + + movzwl -1(%scan, %rax), %scanend + mov dsPrev, %prev + movl _chainlenwmask, %chainlenwmask + jmp LookupLoop + +/* Accept the current string, with the maximum possible length. */ + +LenMaximum: + movl $MAX_MATCH, %bestlend + movl %curmatchd, dsMatchStart + +/* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */ +/* return s->lookahead; */ + +LeaveNow: + movl dsLookahead, %eax + cmpl %eax, %bestlend + cmovngl %bestlend, %eax +LookaheadRet: + +/* Restore the registers and return from whence we came. */ + + mov save_rsi, %rsi + mov save_rbx, %rbx + mov save_r12, %r12 + mov save_r13, %r13 + mov save_r14, %r14 + mov save_r15, %r15 + + ret + +match_init: ret diff -Nru nodejs-0.11.13/deps/zlib/contrib/asm686/match.S nodejs-0.11.15/deps/zlib/contrib/asm686/match.S --- nodejs-0.11.13/deps/zlib/contrib/asm686/match.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/asm686/match.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,357 @@ +/* match.S -- x86 assembly version of the zlib longest_match() function. + * Optimized for the Intel 686 chips (PPro and later). + * + * Copyright (C) 1998, 2007 Brian Raiter <breadbox@muppetlabs.com> + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the author be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + */ + +#ifndef NO_UNDERLINE +#define match_init _match_init +#define longest_match _longest_match +#endif + +#define MAX_MATCH (258) +#define MIN_MATCH (3) +#define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1) +#define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7) + +/* stack frame offsets */ + +#define chainlenwmask 0 /* high word: current chain len */ + /* low word: s->wmask */ +#define window 4 /* local copy of s->window */ +#define windowbestlen 8 /* s->window + bestlen */ +#define scanstart 16 /* first two bytes of string */ +#define scanend 12 /* last two bytes of string */ +#define scanalign 20 /* dword-misalignment of string */ +#define nicematch 24 /* a good enough match size */ +#define bestlen 28 /* size of best match so far */ +#define scan 32 /* ptr to string wanting match */ + +#define LocalVarsSize (36) +/* saved ebx 36 */ +/* saved edi 40 */ +/* saved esi 44 */ +/* saved ebp 48 */ +/* return address 52 */ +#define deflatestate 56 /* the function arguments */ +#define curmatch 60 + +/* All the +zlib1222add offsets are due to the addition of fields + * in zlib in the deflate_state structure since the asm code was first written + * (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)"). + * (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0"). + * if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8"). + */ + +#define zlib1222add (8) + +#define dsWSize (36+zlib1222add) +#define dsWMask (44+zlib1222add) +#define dsWindow (48+zlib1222add) +#define dsPrev (56+zlib1222add) +#define dsMatchLen (88+zlib1222add) +#define dsPrevMatch (92+zlib1222add) +#define dsStrStart (100+zlib1222add) +#define dsMatchStart (104+zlib1222add) +#define dsLookahead (108+zlib1222add) +#define dsPrevLen (112+zlib1222add) +#define dsMaxChainLen (116+zlib1222add) +#define dsGoodMatch (132+zlib1222add) +#define dsNiceMatch (136+zlib1222add) + + +.file "match.S" + +.globl match_init, longest_match + +.text + +/* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */ +.cfi_sections .debug_frame + +longest_match: + +.cfi_startproc +/* Save registers that the compiler may be using, and adjust %esp to */ +/* make room for our stack frame. */ + + pushl %ebp + .cfi_def_cfa_offset 8 + .cfi_offset ebp, -8 + pushl %edi + .cfi_def_cfa_offset 12 + pushl %esi + .cfi_def_cfa_offset 16 + pushl %ebx + .cfi_def_cfa_offset 20 + subl $LocalVarsSize, %esp + .cfi_def_cfa_offset LocalVarsSize+20 + +/* Retrieve the function arguments. %ecx will hold cur_match */ +/* throughout the entire function. %edx will hold the pointer to the */ +/* deflate_state structure during the function's setup (before */ +/* entering the main loop). */ + + movl deflatestate(%esp), %edx + movl curmatch(%esp), %ecx + +/* uInt wmask = s->w_mask; */ +/* unsigned chain_length = s->max_chain_length; */ +/* if (s->prev_length >= s->good_match) { */ +/* chain_length >>= 2; */ +/* } */ + + movl dsPrevLen(%edx), %eax + movl dsGoodMatch(%edx), %ebx + cmpl %ebx, %eax + movl dsWMask(%edx), %eax + movl dsMaxChainLen(%edx), %ebx + jl LastMatchGood + shrl $2, %ebx +LastMatchGood: + +/* chainlen is decremented once beforehand so that the function can */ +/* use the sign flag instead of the zero flag for the exit test. */ +/* It is then shifted into the high word, to make room for the wmask */ +/* value, which it will always accompany. */ + + decl %ebx + shll $16, %ebx + orl %eax, %ebx + movl %ebx, chainlenwmask(%esp) + +/* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */ + + movl dsNiceMatch(%edx), %eax + movl dsLookahead(%edx), %ebx + cmpl %eax, %ebx + jl LookaheadLess + movl %eax, %ebx +LookaheadLess: movl %ebx, nicematch(%esp) + +/* register Bytef *scan = s->window + s->strstart; */ + + movl dsWindow(%edx), %esi + movl %esi, window(%esp) + movl dsStrStart(%edx), %ebp + lea (%esi,%ebp), %edi + movl %edi, scan(%esp) + +/* Determine how many bytes the scan ptr is off from being */ +/* dword-aligned. */ + + movl %edi, %eax + negl %eax + andl $3, %eax + movl %eax, scanalign(%esp) + +/* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */ +/* s->strstart - (IPos)MAX_DIST(s) : NIL; */ + + movl dsWSize(%edx), %eax + subl $MIN_LOOKAHEAD, %eax + subl %eax, %ebp + jg LimitPositive + xorl %ebp, %ebp +LimitPositive: + +/* int best_len = s->prev_length; */ + + movl dsPrevLen(%edx), %eax + movl %eax, bestlen(%esp) + +/* Store the sum of s->window + best_len in %esi locally, and in %esi. */ + + addl %eax, %esi + movl %esi, windowbestlen(%esp) + +/* register ush scan_start = *(ushf*)scan; */ +/* register ush scan_end = *(ushf*)(scan+best_len-1); */ +/* Posf *prev = s->prev; */ + + movzwl (%edi), %ebx + movl %ebx, scanstart(%esp) + movzwl -1(%edi,%eax), %ebx + movl %ebx, scanend(%esp) + movl dsPrev(%edx), %edi + +/* Jump into the main loop. */ + + movl chainlenwmask(%esp), %edx + jmp LoopEntry + +.balign 16 + +/* do { + * match = s->window + cur_match; + * if (*(ushf*)(match+best_len-1) != scan_end || + * *(ushf*)match != scan_start) continue; + * [...] + * } while ((cur_match = prev[cur_match & wmask]) > limit + * && --chain_length != 0); + * + * Here is the inner loop of the function. The function will spend the + * majority of its time in this loop, and majority of that time will + * be spent in the first ten instructions. + * + * Within this loop: + * %ebx = scanend + * %ecx = curmatch + * %edx = chainlenwmask - i.e., ((chainlen << 16) | wmask) + * %esi = windowbestlen - i.e., (window + bestlen) + * %edi = prev + * %ebp = limit + */ +LookupLoop: + andl %edx, %ecx + movzwl (%edi,%ecx,2), %ecx + cmpl %ebp, %ecx + jbe LeaveNow + subl $0x00010000, %edx + js LeaveNow +LoopEntry: movzwl -1(%esi,%ecx), %eax + cmpl %ebx, %eax + jnz LookupLoop + movl window(%esp), %eax + movzwl (%eax,%ecx), %eax + cmpl scanstart(%esp), %eax + jnz LookupLoop + +/* Store the current value of chainlen. */ + + movl %edx, chainlenwmask(%esp) + +/* Point %edi to the string under scrutiny, and %esi to the string we */ +/* are hoping to match it up with. In actuality, %esi and %edi are */ +/* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */ +/* initialized to -(MAX_MATCH_8 - scanalign). */ + + movl window(%esp), %esi + movl scan(%esp), %edi + addl %ecx, %esi + movl scanalign(%esp), %eax + movl $(-MAX_MATCH_8), %edx + lea MAX_MATCH_8(%edi,%eax), %edi + lea MAX_MATCH_8(%esi,%eax), %esi + +/* Test the strings for equality, 8 bytes at a time. At the end, + * adjust %edx so that it is offset to the exact byte that mismatched. + * + * We already know at this point that the first three bytes of the + * strings match each other, and they can be safely passed over before + * starting the compare loop. So what this code does is skip over 0-3 + * bytes, as much as necessary in order to dword-align the %edi + * pointer. (%esi will still be misaligned three times out of four.) + * + * It should be confessed that this loop usually does not represent + * much of the total running time. Replacing it with a more + * straightforward "rep cmpsb" would not drastically degrade + * performance. + */ +LoopCmps: + movl (%esi,%edx), %eax + xorl (%edi,%edx), %eax + jnz LeaveLoopCmps + movl 4(%esi,%edx), %eax + xorl 4(%edi,%edx), %eax + jnz LeaveLoopCmps4 + addl $8, %edx + jnz LoopCmps + jmp LenMaximum +LeaveLoopCmps4: addl $4, %edx +LeaveLoopCmps: testl $0x0000FFFF, %eax + jnz LenLower + addl $2, %edx + shrl $16, %eax +LenLower: subb $1, %al + adcl $0, %edx + +/* Calculate the length of the match. If it is longer than MAX_MATCH, */ +/* then automatically accept it as the best possible match and leave. */ + + lea (%edi,%edx), %eax + movl scan(%esp), %edi + subl %edi, %eax + cmpl $MAX_MATCH, %eax + jge LenMaximum + +/* If the length of the match is not longer than the best match we */ +/* have so far, then forget it and return to the lookup loop. */ + + movl deflatestate(%esp), %edx + movl bestlen(%esp), %ebx + cmpl %ebx, %eax + jg LongerMatch + movl windowbestlen(%esp), %esi + movl dsPrev(%edx), %edi + movl scanend(%esp), %ebx + movl chainlenwmask(%esp), %edx + jmp LookupLoop + +/* s->match_start = cur_match; */ +/* best_len = len; */ +/* if (len >= nice_match) break; */ +/* scan_end = *(ushf*)(scan+best_len-1); */ + +LongerMatch: movl nicematch(%esp), %ebx + movl %eax, bestlen(%esp) + movl %ecx, dsMatchStart(%edx) + cmpl %ebx, %eax + jge LeaveNow + movl window(%esp), %esi + addl %eax, %esi + movl %esi, windowbestlen(%esp) + movzwl -1(%edi,%eax), %ebx + movl dsPrev(%edx), %edi + movl %ebx, scanend(%esp) + movl chainlenwmask(%esp), %edx + jmp LookupLoop + +/* Accept the current string, with the maximum possible length. */ + +LenMaximum: movl deflatestate(%esp), %edx + movl $MAX_MATCH, bestlen(%esp) + movl %ecx, dsMatchStart(%edx) + +/* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */ +/* return s->lookahead; */ + +LeaveNow: + movl deflatestate(%esp), %edx + movl bestlen(%esp), %ebx + movl dsLookahead(%edx), %eax + cmpl %eax, %ebx + jg LookaheadRet + movl %ebx, %eax +LookaheadRet: + +/* Restore the stack and return from whence we came. */ + + addl $LocalVarsSize, %esp + .cfi_def_cfa_offset 20 + popl %ebx + .cfi_def_cfa_offset 16 + popl %esi + .cfi_def_cfa_offset 12 + popl %edi + .cfi_def_cfa_offset 8 + popl %ebp + .cfi_def_cfa_offset 4 +.cfi_endproc +match_init: ret diff -Nru nodejs-0.11.13/deps/zlib/contrib/asm686/README.686 nodejs-0.11.15/deps/zlib/contrib/asm686/README.686 --- nodejs-0.11.13/deps/zlib/contrib/asm686/README.686 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/asm686/README.686 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,51 @@ +This is a patched version of zlib, modified to use +Pentium-Pro-optimized assembly code in the deflation algorithm. The +files changed/added by this patch are: + +README.686 +match.S + +The speedup that this patch provides varies, depending on whether the +compiler used to build the original version of zlib falls afoul of the +PPro's speed traps. My own tests show a speedup of around 10-20% at +the default compression level, and 20-30% using -9, against a version +compiled using gcc 2.7.2.3. Your mileage may vary. + +Note that this code has been tailored for the PPro/PII in particular, +and will not perform particuarly well on a Pentium. + +If you are using an assembler other than GNU as, you will have to +translate match.S to use your assembler's syntax. (Have fun.) + +Brian Raiter +breadbox@muppetlabs.com +April, 1998 + + +Added for zlib 1.1.3: + +The patches come from +http://www.muppetlabs.com/~breadbox/software/assembly.html + +To compile zlib with this asm file, copy match.S to the zlib directory +then do: + +CFLAGS="-O3 -DASMV" ./configure +make OBJA=match.o + + +Update: + +I've been ignoring these assembly routines for years, believing that +gcc's generated code had caught up with it sometime around gcc 2.95 +and the major rearchitecting of the Pentium 4. However, I recently +learned that, despite what I believed, this code still has some life +in it. On the Pentium 4 and AMD64 chips, it continues to run about 8% +faster than the code produced by gcc 4.1. + +In acknowledgement of its continuing usefulness, I've altered the +license to match that of the rest of zlib. Share and Enjoy! + +Brian Raiter +breadbox@muppetlabs.com +April, 2007 diff -Nru nodejs-0.11.13/deps/zlib/contrib/blast/blast.c nodejs-0.11.15/deps/zlib/contrib/blast/blast.c --- nodejs-0.11.13/deps/zlib/contrib/blast/blast.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/blast/blast.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,446 @@ +/* blast.c + * Copyright (C) 2003, 2012 Mark Adler + * For conditions of distribution and use, see copyright notice in blast.h + * version 1.2, 24 Oct 2012 + * + * blast.c decompresses data compressed by the PKWare Compression Library. + * This function provides functionality similar to the explode() function of + * the PKWare library, hence the name "blast". + * + * This decompressor is based on the excellent format description provided by + * Ben Rudiak-Gould in comp.compression on August 13, 2001. Interestingly, the + * example Ben provided in the post is incorrect. The distance 110001 should + * instead be 111000. When corrected, the example byte stream becomes: + * + * 00 04 82 24 25 8f 80 7f + * + * which decompresses to "AIAIAIAIAIAIA" (without the quotes). + */ + +/* + * Change history: + * + * 1.0 12 Feb 2003 - First version + * 1.1 16 Feb 2003 - Fixed distance check for > 4 GB uncompressed data + * 1.2 24 Oct 2012 - Add note about using binary mode in stdio + * - Fix comparisons of differently signed integers + */ + +#include <setjmp.h> /* for setjmp(), longjmp(), and jmp_buf */ +#include "blast.h" /* prototype for blast() */ + +#define local static /* for local function definitions */ +#define MAXBITS 13 /* maximum code length */ +#define MAXWIN 4096 /* maximum window size */ + +/* input and output state */ +struct state { + /* input state */ + blast_in infun; /* input function provided by user */ + void *inhow; /* opaque information passed to infun() */ + unsigned char *in; /* next input location */ + unsigned left; /* available input at in */ + int bitbuf; /* bit buffer */ + int bitcnt; /* number of bits in bit buffer */ + + /* input limit error return state for bits() and decode() */ + jmp_buf env; + + /* output state */ + blast_out outfun; /* output function provided by user */ + void *outhow; /* opaque information passed to outfun() */ + unsigned next; /* index of next write location in out[] */ + int first; /* true to check distances (for first 4K) */ + unsigned char out[MAXWIN]; /* output buffer and sliding window */ +}; + +/* + * Return need bits from the input stream. This always leaves less than + * eight bits in the buffer. bits() works properly for need == 0. + * + * Format notes: + * + * - Bits are stored in bytes from the least significant bit to the most + * significant bit. Therefore bits are dropped from the bottom of the bit + * buffer, using shift right, and new bytes are appended to the top of the + * bit buffer, using shift left. + */ +local int bits(struct state *s, int need) +{ + int val; /* bit accumulator */ + + /* load at least need bits into val */ + val = s->bitbuf; + while (s->bitcnt < need) { + if (s->left == 0) { + s->left = s->infun(s->inhow, &(s->in)); + if (s->left == 0) longjmp(s->env, 1); /* out of input */ + } + val |= (int)(*(s->in)++) << s->bitcnt; /* load eight bits */ + s->left--; + s->bitcnt += 8; + } + + /* drop need bits and update buffer, always zero to seven bits left */ + s->bitbuf = val >> need; + s->bitcnt -= need; + + /* return need bits, zeroing the bits above that */ + return val & ((1 << need) - 1); +} + +/* + * Huffman code decoding tables. count[1..MAXBITS] is the number of symbols of + * each length, which for a canonical code are stepped through in order. + * symbol[] are the symbol values in canonical order, where the number of + * entries is the sum of the counts in count[]. The decoding process can be + * seen in the function decode() below. + */ +struct huffman { + short *count; /* number of symbols of each length */ + short *symbol; /* canonically ordered symbols */ +}; + +/* + * Decode a code from the stream s using huffman table h. Return the symbol or + * a negative value if there is an error. If all of the lengths are zero, i.e. + * an empty code, or if the code is incomplete and an invalid code is received, + * then -9 is returned after reading MAXBITS bits. + * + * Format notes: + * + * - The codes as stored in the compressed data are bit-reversed relative to + * a simple integer ordering of codes of the same lengths. Hence below the + * bits are pulled from the compressed data one at a time and used to + * build the code value reversed from what is in the stream in order to + * permit simple integer comparisons for decoding. + * + * - The first code for the shortest length is all ones. Subsequent codes of + * the same length are simply integer decrements of the previous code. When + * moving up a length, a one bit is appended to the code. For a complete + * code, the last code of the longest length will be all zeros. To support + * this ordering, the bits pulled during decoding are inverted to apply the + * more "natural" ordering starting with all zeros and incrementing. + */ +local int decode(struct state *s, struct huffman *h) +{ + int len; /* current number of bits in code */ + int code; /* len bits being decoded */ + int first; /* first code of length len */ + int count; /* number of codes of length len */ + int index; /* index of first code of length len in symbol table */ + int bitbuf; /* bits from stream */ + int left; /* bits left in next or left to process */ + short *next; /* next number of codes */ + + bitbuf = s->bitbuf; + left = s->bitcnt; + code = first = index = 0; + len = 1; + next = h->count + 1; + while (1) { + while (left--) { + code |= (bitbuf & 1) ^ 1; /* invert code */ + bitbuf >>= 1; + count = *next++; + if (code < first + count) { /* if length len, return symbol */ + s->bitbuf = bitbuf; + s->bitcnt = (s->bitcnt - len) & 7; + return h->symbol[index + (code - first)]; + } + index += count; /* else update for next length */ + first += count; + first <<= 1; + code <<= 1; + len++; + } + left = (MAXBITS+1) - len; + if (left == 0) break; + if (s->left == 0) { + s->left = s->infun(s->inhow, &(s->in)); + if (s->left == 0) longjmp(s->env, 1); /* out of input */ + } + bitbuf = *(s->in)++; + s->left--; + if (left > 8) left = 8; + } + return -9; /* ran out of codes */ +} + +/* + * Given a list of repeated code lengths rep[0..n-1], where each byte is a + * count (high four bits + 1) and a code length (low four bits), generate the + * list of code lengths. This compaction reduces the size of the object code. + * Then given the list of code lengths length[0..n-1] representing a canonical + * Huffman code for n symbols, construct the tables required to decode those + * codes. Those tables are the number of codes of each length, and the symbols + * sorted by length, retaining their original order within each length. The + * return value is zero for a complete code set, negative for an over- + * subscribed code set, and positive for an incomplete code set. The tables + * can be used if the return value is zero or positive, but they cannot be used + * if the return value is negative. If the return value is zero, it is not + * possible for decode() using that table to return an error--any stream of + * enough bits will resolve to a symbol. If the return value is positive, then + * it is possible for decode() using that table to return an error for received + * codes past the end of the incomplete lengths. + */ +local int construct(struct huffman *h, const unsigned char *rep, int n) +{ + int symbol; /* current symbol when stepping through length[] */ + int len; /* current length when stepping through h->count[] */ + int left; /* number of possible codes left of current length */ + short offs[MAXBITS+1]; /* offsets in symbol table for each length */ + short length[256]; /* code lengths */ + + /* convert compact repeat counts into symbol bit length list */ + symbol = 0; + do { + len = *rep++; + left = (len >> 4) + 1; + len &= 15; + do { + length[symbol++] = len; + } while (--left); + } while (--n); + n = symbol; + + /* count number of codes of each length */ + for (len = 0; len <= MAXBITS; len++) + h->count[len] = 0; + for (symbol = 0; symbol < n; symbol++) + (h->count[length[symbol]])++; /* assumes lengths are within bounds */ + if (h->count[0] == n) /* no codes! */ + return 0; /* complete, but decode() will fail */ + + /* check for an over-subscribed or incomplete set of lengths */ + left = 1; /* one possible code of zero length */ + for (len = 1; len <= MAXBITS; len++) { + left <<= 1; /* one more bit, double codes left */ + left -= h->count[len]; /* deduct count from possible codes */ + if (left < 0) return left; /* over-subscribed--return negative */ + } /* left > 0 means incomplete */ + + /* generate offsets into symbol table for each length for sorting */ + offs[1] = 0; + for (len = 1; len < MAXBITS; len++) + offs[len + 1] = offs[len] + h->count[len]; + + /* + * put symbols in table sorted by length, by symbol order within each + * length + */ + for (symbol = 0; symbol < n; symbol++) + if (length[symbol] != 0) + h->symbol[offs[length[symbol]]++] = symbol; + + /* return zero for complete set, positive for incomplete set */ + return left; +} + +/* + * Decode PKWare Compression Library stream. + * + * Format notes: + * + * - First byte is 0 if literals are uncoded or 1 if they are coded. Second + * byte is 4, 5, or 6 for the number of extra bits in the distance code. + * This is the base-2 logarithm of the dictionary size minus six. + * + * - Compressed data is a combination of literals and length/distance pairs + * terminated by an end code. Literals are either Huffman coded or + * uncoded bytes. A length/distance pair is a coded length followed by a + * coded distance to represent a string that occurs earlier in the + * uncompressed data that occurs again at the current location. + * + * - A bit preceding a literal or length/distance pair indicates which comes + * next, 0 for literals, 1 for length/distance. + * + * - If literals are uncoded, then the next eight bits are the literal, in the + * normal bit order in th stream, i.e. no bit-reversal is needed. Similarly, + * no bit reversal is needed for either the length extra bits or the distance + * extra bits. + * + * - Literal bytes are simply written to the output. A length/distance pair is + * an instruction to copy previously uncompressed bytes to the output. The + * copy is from distance bytes back in the output stream, copying for length + * bytes. + * + * - Distances pointing before the beginning of the output data are not + * permitted. + * + * - Overlapped copies, where the length is greater than the distance, are + * allowed and common. For example, a distance of one and a length of 518 + * simply copies the last byte 518 times. A distance of four and a length of + * twelve copies the last four bytes three times. A simple forward copy + * ignoring whether the length is greater than the distance or not implements + * this correctly. + */ +local int decomp(struct state *s) +{ + int lit; /* true if literals are coded */ + int dict; /* log2(dictionary size) - 6 */ + int symbol; /* decoded symbol, extra bits for distance */ + int len; /* length for copy */ + unsigned dist; /* distance for copy */ + int copy; /* copy counter */ + unsigned char *from, *to; /* copy pointers */ + static int virgin = 1; /* build tables once */ + static short litcnt[MAXBITS+1], litsym[256]; /* litcode memory */ + static short lencnt[MAXBITS+1], lensym[16]; /* lencode memory */ + static short distcnt[MAXBITS+1], distsym[64]; /* distcode memory */ + static struct huffman litcode = {litcnt, litsym}; /* length code */ + static struct huffman lencode = {lencnt, lensym}; /* length code */ + static struct huffman distcode = {distcnt, distsym};/* distance code */ + /* bit lengths of literal codes */ + static const unsigned char litlen[] = { + 11, 124, 8, 7, 28, 7, 188, 13, 76, 4, 10, 8, 12, 10, 12, 10, 8, 23, 8, + 9, 7, 6, 7, 8, 7, 6, 55, 8, 23, 24, 12, 11, 7, 9, 11, 12, 6, 7, 22, 5, + 7, 24, 6, 11, 9, 6, 7, 22, 7, 11, 38, 7, 9, 8, 25, 11, 8, 11, 9, 12, + 8, 12, 5, 38, 5, 38, 5, 11, 7, 5, 6, 21, 6, 10, 53, 8, 7, 24, 10, 27, + 44, 253, 253, 253, 252, 252, 252, 13, 12, 45, 12, 45, 12, 61, 12, 45, + 44, 173}; + /* bit lengths of length codes 0..15 */ + static const unsigned char lenlen[] = {2, 35, 36, 53, 38, 23}; + /* bit lengths of distance codes 0..63 */ + static const unsigned char distlen[] = {2, 20, 53, 230, 247, 151, 248}; + static const short base[16] = { /* base for length codes */ + 3, 2, 4, 5, 6, 7, 8, 9, 10, 12, 16, 24, 40, 72, 136, 264}; + static const char extra[16] = { /* extra bits for length codes */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8}; + + /* set up decoding tables (once--might not be thread-safe) */ + if (virgin) { + construct(&litcode, litlen, sizeof(litlen)); + construct(&lencode, lenlen, sizeof(lenlen)); + construct(&distcode, distlen, sizeof(distlen)); + virgin = 0; + } + + /* read header */ + lit = bits(s, 8); + if (lit > 1) return -1; + dict = bits(s, 8); + if (dict < 4 || dict > 6) return -2; + + /* decode literals and length/distance pairs */ + do { + if (bits(s, 1)) { + /* get length */ + symbol = decode(s, &lencode); + len = base[symbol] + bits(s, extra[symbol]); + if (len == 519) break; /* end code */ + + /* get distance */ + symbol = len == 2 ? 2 : dict; + dist = decode(s, &distcode) << symbol; + dist += bits(s, symbol); + dist++; + if (s->first && dist > s->next) + return -3; /* distance too far back */ + + /* copy length bytes from distance bytes back */ + do { + to = s->out + s->next; + from = to - dist; + copy = MAXWIN; + if (s->next < dist) { + from += copy; + copy = dist; + } + copy -= s->next; + if (copy > len) copy = len; + len -= copy; + s->next += copy; + do { + *to++ = *from++; + } while (--copy); + if (s->next == MAXWIN) { + if (s->outfun(s->outhow, s->out, s->next)) return 1; + s->next = 0; + s->first = 0; + } + } while (len != 0); + } + else { + /* get literal and write it */ + symbol = lit ? decode(s, &litcode) : bits(s, 8); + s->out[s->next++] = symbol; + if (s->next == MAXWIN) { + if (s->outfun(s->outhow, s->out, s->next)) return 1; + s->next = 0; + s->first = 0; + } + } + } while (1); + return 0; +} + +/* See comments in blast.h */ +int blast(blast_in infun, void *inhow, blast_out outfun, void *outhow) +{ + struct state s; /* input/output state */ + int err; /* return value */ + + /* initialize input state */ + s.infun = infun; + s.inhow = inhow; + s.left = 0; + s.bitbuf = 0; + s.bitcnt = 0; + + /* initialize output state */ + s.outfun = outfun; + s.outhow = outhow; + s.next = 0; + s.first = 1; + + /* return if bits() or decode() tries to read past available input */ + if (setjmp(s.env) != 0) /* if came back here via longjmp(), */ + err = 2; /* then skip decomp(), return error */ + else + err = decomp(&s); /* decompress */ + + /* write any leftover output and update the error code if needed */ + if (err != 1 && s.next && s.outfun(s.outhow, s.out, s.next) && err == 0) + err = 1; + return err; +} + +#ifdef TEST +/* Example of how to use blast() */ +#include <stdio.h> +#include <stdlib.h> + +#define CHUNK 16384 + +local unsigned inf(void *how, unsigned char **buf) +{ + static unsigned char hold[CHUNK]; + + *buf = hold; + return fread(hold, 1, CHUNK, (FILE *)how); +} + +local int outf(void *how, unsigned char *buf, unsigned len) +{ + return fwrite(buf, 1, len, (FILE *)how) != len; +} + +/* Decompress a PKWare Compression Library stream from stdin to stdout */ +int main(void) +{ + int ret, n; + + /* decompress to stdout */ + ret = blast(inf, stdin, outf, stdout); + if (ret != 0) fprintf(stderr, "blast error: %d\n", ret); + + /* see if there are any leftover bytes */ + n = 0; + while (getchar() != EOF) n++; + if (n) fprintf(stderr, "blast warning: %d unused bytes of input\n", n); + + /* return blast() error code */ + return ret; +} +#endif diff -Nru nodejs-0.11.13/deps/zlib/contrib/blast/blast.h nodejs-0.11.15/deps/zlib/contrib/blast/blast.h --- nodejs-0.11.13/deps/zlib/contrib/blast/blast.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/blast/blast.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,75 @@ +/* blast.h -- interface for blast.c + Copyright (C) 2003, 2012 Mark Adler + version 1.2, 24 Oct 2012 + + This software is provided 'as-is', without any express or implied + warranty. In no event will the author be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Mark Adler madler@alumni.caltech.edu + */ + + +/* + * blast() decompresses the PKWare Data Compression Library (DCL) compressed + * format. It provides the same functionality as the explode() function in + * that library. (Note: PKWare overused the "implode" verb, and the format + * used by their library implode() function is completely different and + * incompatible with the implode compression method supported by PKZIP.) + * + * The binary mode for stdio functions should be used to assure that the + * compressed data is not corrupted when read or written. For example: + * fopen(..., "rb") and fopen(..., "wb"). + */ + + +typedef unsigned (*blast_in)(void *how, unsigned char **buf); +typedef int (*blast_out)(void *how, unsigned char *buf, unsigned len); +/* Definitions for input/output functions passed to blast(). See below for + * what the provided functions need to do. + */ + + +int blast(blast_in infun, void *inhow, blast_out outfun, void *outhow); +/* Decompress input to output using the provided infun() and outfun() calls. + * On success, the return value of blast() is zero. If there is an error in + * the source data, i.e. it is not in the proper format, then a negative value + * is returned. If there is not enough input available or there is not enough + * output space, then a positive error is returned. + * + * The input function is invoked: len = infun(how, &buf), where buf is set by + * infun() to point to the input buffer, and infun() returns the number of + * available bytes there. If infun() returns zero, then blast() returns with + * an input error. (blast() only asks for input if it needs it.) inhow is for + * use by the application to pass an input descriptor to infun(), if desired. + * + * The output function is invoked: err = outfun(how, buf, len), where the bytes + * to be written are buf[0..len-1]. If err is not zero, then blast() returns + * with an output error. outfun() is always called with len <= 4096. outhow + * is for use by the application to pass an output descriptor to outfun(), if + * desired. + * + * The return codes are: + * + * 2: ran out of input before completing decompression + * 1: output error before completing decompression + * 0: successful decompression + * -1: literal flag not zero or one + * -2: dictionary size not in 4..6 + * -3: distance is too far back + * + * At the bottom of blast.c is an example program that uses blast() that can be + * compiled to produce a command-line decompression filter by defining TEST. + */ diff -Nru nodejs-0.11.13/deps/zlib/contrib/blast/Makefile nodejs-0.11.15/deps/zlib/contrib/blast/Makefile --- nodejs-0.11.13/deps/zlib/contrib/blast/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/blast/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,8 @@ +blast: blast.c blast.h + cc -DTEST -o blast blast.c + +test: blast + blast < test.pk | cmp - test.txt + +clean: + rm -f blast blast.o diff -Nru nodejs-0.11.13/deps/zlib/contrib/blast/README nodejs-0.11.15/deps/zlib/contrib/blast/README --- nodejs-0.11.13/deps/zlib/contrib/blast/README 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/blast/README 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,4 @@ +Read blast.h for purpose and usage. + +Mark Adler +madler@alumni.caltech.edu Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/zlib/contrib/blast/test.pk and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/zlib/contrib/blast/test.pk differ diff -Nru nodejs-0.11.13/deps/zlib/contrib/blast/test.txt nodejs-0.11.15/deps/zlib/contrib/blast/test.txt --- nodejs-0.11.13/deps/zlib/contrib/blast/test.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/blast/test.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +AIAIAIAIAIAIA \ No newline at end of file diff -Nru nodejs-0.11.13/deps/zlib/contrib/delphi/readme.txt nodejs-0.11.15/deps/zlib/contrib/delphi/readme.txt --- nodejs-0.11.13/deps/zlib/contrib/delphi/readme.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/delphi/readme.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,76 @@ + +Overview +======== + +This directory contains an update to the ZLib interface unit, +distributed by Borland as a Delphi supplemental component. + +The original ZLib unit is Copyright (c) 1997,99 Borland Corp., +and is based on zlib version 1.0.4. There are a series of bugs +and security problems associated with that old zlib version, and +we recommend the users to update their ZLib unit. + + +Summary of modifications +======================== + +- Improved makefile, adapted to zlib version 1.2.1. + +- Some field types from TZStreamRec are changed from Integer to + Longint, for consistency with the zlib.h header, and for 64-bit + readiness. + +- The zlib_version constant is updated. + +- The new Z_RLE strategy has its corresponding symbolic constant. + +- The allocation and deallocation functions and function types + (TAlloc, TFree, zlibAllocMem and zlibFreeMem) are now cdecl, + and _malloc and _free are added as C RTL stubs. As a result, + the original C sources of zlib can be compiled out of the box, + and linked to the ZLib unit. + + +Suggestions for improvements +============================ + +Currently, the ZLib unit provides only a limited wrapper around +the zlib library, and much of the original zlib functionality is +missing. Handling compressed file formats like ZIP/GZIP or PNG +cannot be implemented without having this functionality. +Applications that handle these formats are either using their own, +duplicated code, or not using the ZLib unit at all. + +Here are a few suggestions: + +- Checksum class wrappers around adler32() and crc32(), similar + to the Java classes that implement the java.util.zip.Checksum + interface. + +- The ability to read and write raw deflate streams, without the + zlib stream header and trailer. Raw deflate streams are used + in the ZIP file format. + +- The ability to read and write gzip streams, used in the GZIP + file format, and normally produced by the gzip program. + +- The ability to select a different compression strategy, useful + to PNG and MNG image compression, and to multimedia compression + in general. Besides the compression level + + TCompressionLevel = (clNone, clFastest, clDefault, clMax); + + which, in fact, could have used the 'z' prefix and avoided + TColor-like symbols + + TCompressionLevel = (zcNone, zcFastest, zcDefault, zcMax); + + there could be a compression strategy + + TCompressionStrategy = (zsDefault, zsFiltered, zsHuffmanOnly, zsRle); + +- ZIP and GZIP stream handling via TStreams. + + +-- +Cosmin Truta <cosmint@cs.ubbcluj.ro> diff -Nru nodejs-0.11.13/deps/zlib/contrib/delphi/ZLibConst.pas nodejs-0.11.15/deps/zlib/contrib/delphi/ZLibConst.pas --- nodejs-0.11.13/deps/zlib/contrib/delphi/ZLibConst.pas 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/delphi/ZLibConst.pas 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,11 @@ +unit ZLibConst; + +interface + +resourcestring + sTargetBufferTooSmall = 'ZLib error: target buffer may be too small'; + sInvalidStreamOp = 'Invalid stream operation'; + +implementation + +end. diff -Nru nodejs-0.11.13/deps/zlib/contrib/delphi/zlibd32.mak nodejs-0.11.15/deps/zlib/contrib/delphi/zlibd32.mak --- nodejs-0.11.13/deps/zlib/contrib/delphi/zlibd32.mak 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/delphi/zlibd32.mak 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,99 @@ +# Makefile for zlib +# For use with Delphi and C++ Builder under Win32 +# Updated for zlib 1.2.x by Cosmin Truta + +# ------------ Borland C++ ------------ + +# This project uses the Delphi (fastcall/register) calling convention: +LOC = -DZEXPORT=__fastcall -DZEXPORTVA=__cdecl + +CC = bcc32 +LD = bcc32 +AR = tlib +# do not use "-pr" in CFLAGS +CFLAGS = -a -d -k- -O2 $(LOC) +LDFLAGS = + + +# variables +ZLIB_LIB = zlib.lib + +OBJ1 = adler32.obj compress.obj crc32.obj deflate.obj gzclose.obj gzlib.obj gzread.obj +OBJ2 = gzwrite.obj infback.obj inffast.obj inflate.obj inftrees.obj trees.obj uncompr.obj zutil.obj +OBJP1 = +adler32.obj+compress.obj+crc32.obj+deflate.obj+gzclose.obj+gzlib.obj+gzread.obj +OBJP2 = +gzwrite.obj+infback.obj+inffast.obj+inflate.obj+inftrees.obj+trees.obj+uncompr.obj+zutil.obj + + +# targets +all: $(ZLIB_LIB) example.exe minigzip.exe + +.c.obj: + $(CC) -c $(CFLAGS) $*.c + +adler32.obj: adler32.c zlib.h zconf.h + +compress.obj: compress.c zlib.h zconf.h + +crc32.obj: crc32.c zlib.h zconf.h crc32.h + +deflate.obj: deflate.c deflate.h zutil.h zlib.h zconf.h + +gzclose.obj: gzclose.c zlib.h zconf.h gzguts.h + +gzlib.obj: gzlib.c zlib.h zconf.h gzguts.h + +gzread.obj: gzread.c zlib.h zconf.h gzguts.h + +gzwrite.obj: gzwrite.c zlib.h zconf.h gzguts.h + +infback.obj: infback.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inffast.obj: inffast.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h + +inflate.obj: inflate.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inftrees.obj: inftrees.c zutil.h zlib.h zconf.h inftrees.h + +trees.obj: trees.c zutil.h zlib.h zconf.h deflate.h trees.h + +uncompr.obj: uncompr.c zlib.h zconf.h + +zutil.obj: zutil.c zutil.h zlib.h zconf.h + +example.obj: test/example.c zlib.h zconf.h + +minigzip.obj: test/minigzip.c zlib.h zconf.h + + +# For the sake of the old Borland make, +# the command line is cut to fit in the MS-DOS 128 byte limit: +$(ZLIB_LIB): $(OBJ1) $(OBJ2) + -del $(ZLIB_LIB) + $(AR) $(ZLIB_LIB) $(OBJP1) + $(AR) $(ZLIB_LIB) $(OBJP2) + + +# testing +test: example.exe minigzip.exe + example + echo hello world | minigzip | minigzip -d + +example.exe: example.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) example.obj $(ZLIB_LIB) + +minigzip.exe: minigzip.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) minigzip.obj $(ZLIB_LIB) + + +# cleanup +clean: + -del *.obj + -del *.exe + -del *.lib + -del *.tds + -del zlib.bak + -del foo.gz + diff -Nru nodejs-0.11.13/deps/zlib/contrib/delphi/ZLib.pas nodejs-0.11.15/deps/zlib/contrib/delphi/ZLib.pas --- nodejs-0.11.13/deps/zlib/contrib/delphi/ZLib.pas 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/delphi/ZLib.pas 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,557 @@ +{*******************************************************} +{ } +{ Borland Delphi Supplemental Components } +{ ZLIB Data Compression Interface Unit } +{ } +{ Copyright (c) 1997,99 Borland Corporation } +{ } +{*******************************************************} + +{ Updated for zlib 1.2.x by Cosmin Truta <cosmint@cs.ubbcluj.ro> } + +unit ZLib; + +interface + +uses SysUtils, Classes; + +type + TAlloc = function (AppData: Pointer; Items, Size: Integer): Pointer; cdecl; + TFree = procedure (AppData, Block: Pointer); cdecl; + + // Internal structure. Ignore. + TZStreamRec = packed record + next_in: PChar; // next input byte + avail_in: Integer; // number of bytes available at next_in + total_in: Longint; // total nb of input bytes read so far + + next_out: PChar; // next output byte should be put here + avail_out: Integer; // remaining free space at next_out + total_out: Longint; // total nb of bytes output so far + + msg: PChar; // last error message, NULL if no error + internal: Pointer; // not visible by applications + + zalloc: TAlloc; // used to allocate the internal state + zfree: TFree; // used to free the internal state + AppData: Pointer; // private data object passed to zalloc and zfree + + data_type: Integer; // best guess about the data type: ascii or binary + adler: Longint; // adler32 value of the uncompressed data + reserved: Longint; // reserved for future use + end; + + // Abstract ancestor class + TCustomZlibStream = class(TStream) + private + FStrm: TStream; + FStrmPos: Integer; + FOnProgress: TNotifyEvent; + FZRec: TZStreamRec; + FBuffer: array [Word] of Char; + protected + procedure Progress(Sender: TObject); dynamic; + property OnProgress: TNotifyEvent read FOnProgress write FOnProgress; + constructor Create(Strm: TStream); + end; + +{ TCompressionStream compresses data on the fly as data is written to it, and + stores the compressed data to another stream. + + TCompressionStream is write-only and strictly sequential. Reading from the + stream will raise an exception. Using Seek to move the stream pointer + will raise an exception. + + Output data is cached internally, written to the output stream only when + the internal output buffer is full. All pending output data is flushed + when the stream is destroyed. + + The Position property returns the number of uncompressed bytes of + data that have been written to the stream so far. + + CompressionRate returns the on-the-fly percentage by which the original + data has been compressed: (1 - (CompressedBytes / UncompressedBytes)) * 100 + If raw data size = 100 and compressed data size = 25, the CompressionRate + is 75% + + The OnProgress event is called each time the output buffer is filled and + written to the output stream. This is useful for updating a progress + indicator when you are writing a large chunk of data to the compression + stream in a single call.} + + + TCompressionLevel = (clNone, clFastest, clDefault, clMax); + + TCompressionStream = class(TCustomZlibStream) + private + function GetCompressionRate: Single; + public + constructor Create(CompressionLevel: TCompressionLevel; Dest: TStream); + destructor Destroy; override; + function Read(var Buffer; Count: Longint): Longint; override; + function Write(const Buffer; Count: Longint): Longint; override; + function Seek(Offset: Longint; Origin: Word): Longint; override; + property CompressionRate: Single read GetCompressionRate; + property OnProgress; + end; + +{ TDecompressionStream decompresses data on the fly as data is read from it. + + Compressed data comes from a separate source stream. TDecompressionStream + is read-only and unidirectional; you can seek forward in the stream, but not + backwards. The special case of setting the stream position to zero is + allowed. Seeking forward decompresses data until the requested position in + the uncompressed data has been reached. Seeking backwards, seeking relative + to the end of the stream, requesting the size of the stream, and writing to + the stream will raise an exception. + + The Position property returns the number of bytes of uncompressed data that + have been read from the stream so far. + + The OnProgress event is called each time the internal input buffer of + compressed data is exhausted and the next block is read from the input stream. + This is useful for updating a progress indicator when you are reading a + large chunk of data from the decompression stream in a single call.} + + TDecompressionStream = class(TCustomZlibStream) + public + constructor Create(Source: TStream); + destructor Destroy; override; + function Read(var Buffer; Count: Longint): Longint; override; + function Write(const Buffer; Count: Longint): Longint; override; + function Seek(Offset: Longint; Origin: Word): Longint; override; + property OnProgress; + end; + + + +{ CompressBuf compresses data, buffer to buffer, in one call. + In: InBuf = ptr to compressed data + InBytes = number of bytes in InBuf + Out: OutBuf = ptr to newly allocated buffer containing decompressed data + OutBytes = number of bytes in OutBuf } +procedure CompressBuf(const InBuf: Pointer; InBytes: Integer; + out OutBuf: Pointer; out OutBytes: Integer); + + +{ DecompressBuf decompresses data, buffer to buffer, in one call. + In: InBuf = ptr to compressed data + InBytes = number of bytes in InBuf + OutEstimate = zero, or est. size of the decompressed data + Out: OutBuf = ptr to newly allocated buffer containing decompressed data + OutBytes = number of bytes in OutBuf } +procedure DecompressBuf(const InBuf: Pointer; InBytes: Integer; + OutEstimate: Integer; out OutBuf: Pointer; out OutBytes: Integer); + +{ DecompressToUserBuf decompresses data, buffer to buffer, in one call. + In: InBuf = ptr to compressed data + InBytes = number of bytes in InBuf + Out: OutBuf = ptr to user-allocated buffer to contain decompressed data + BufSize = number of bytes in OutBuf } +procedure DecompressToUserBuf(const InBuf: Pointer; InBytes: Integer; + const OutBuf: Pointer; BufSize: Integer); + +const + zlib_version = '1.2.8'; + +type + EZlibError = class(Exception); + ECompressionError = class(EZlibError); + EDecompressionError = class(EZlibError); + +implementation + +uses ZLibConst; + +const + Z_NO_FLUSH = 0; + Z_PARTIAL_FLUSH = 1; + Z_SYNC_FLUSH = 2; + Z_FULL_FLUSH = 3; + Z_FINISH = 4; + + Z_OK = 0; + Z_STREAM_END = 1; + Z_NEED_DICT = 2; + Z_ERRNO = (-1); + Z_STREAM_ERROR = (-2); + Z_DATA_ERROR = (-3); + Z_MEM_ERROR = (-4); + Z_BUF_ERROR = (-5); + Z_VERSION_ERROR = (-6); + + Z_NO_COMPRESSION = 0; + Z_BEST_SPEED = 1; + Z_BEST_COMPRESSION = 9; + Z_DEFAULT_COMPRESSION = (-1); + + Z_FILTERED = 1; + Z_HUFFMAN_ONLY = 2; + Z_RLE = 3; + Z_DEFAULT_STRATEGY = 0; + + Z_BINARY = 0; + Z_ASCII = 1; + Z_UNKNOWN = 2; + + Z_DEFLATED = 8; + + +{$L adler32.obj} +{$L compress.obj} +{$L crc32.obj} +{$L deflate.obj} +{$L infback.obj} +{$L inffast.obj} +{$L inflate.obj} +{$L inftrees.obj} +{$L trees.obj} +{$L uncompr.obj} +{$L zutil.obj} + +procedure adler32; external; +procedure compressBound; external; +procedure crc32; external; +procedure deflateInit2_; external; +procedure deflateParams; external; + +function _malloc(Size: Integer): Pointer; cdecl; +begin + Result := AllocMem(Size); +end; + +procedure _free(Block: Pointer); cdecl; +begin + FreeMem(Block); +end; + +procedure _memset(P: Pointer; B: Byte; count: Integer); cdecl; +begin + FillChar(P^, count, B); +end; + +procedure _memcpy(dest, source: Pointer; count: Integer); cdecl; +begin + Move(source^, dest^, count); +end; + + + +// deflate compresses data +function deflateInit_(var strm: TZStreamRec; level: Integer; version: PChar; + recsize: Integer): Integer; external; +function deflate(var strm: TZStreamRec; flush: Integer): Integer; external; +function deflateEnd(var strm: TZStreamRec): Integer; external; + +// inflate decompresses data +function inflateInit_(var strm: TZStreamRec; version: PChar; + recsize: Integer): Integer; external; +function inflate(var strm: TZStreamRec; flush: Integer): Integer; external; +function inflateEnd(var strm: TZStreamRec): Integer; external; +function inflateReset(var strm: TZStreamRec): Integer; external; + + +function zlibAllocMem(AppData: Pointer; Items, Size: Integer): Pointer; cdecl; +begin +// GetMem(Result, Items*Size); + Result := AllocMem(Items * Size); +end; + +procedure zlibFreeMem(AppData, Block: Pointer); cdecl; +begin + FreeMem(Block); +end; + +{function zlibCheck(code: Integer): Integer; +begin + Result := code; + if code < 0 then + raise EZlibError.Create('error'); //!! +end;} + +function CCheck(code: Integer): Integer; +begin + Result := code; + if code < 0 then + raise ECompressionError.Create('error'); //!! +end; + +function DCheck(code: Integer): Integer; +begin + Result := code; + if code < 0 then + raise EDecompressionError.Create('error'); //!! +end; + +procedure CompressBuf(const InBuf: Pointer; InBytes: Integer; + out OutBuf: Pointer; out OutBytes: Integer); +var + strm: TZStreamRec; + P: Pointer; +begin + FillChar(strm, sizeof(strm), 0); + strm.zalloc := zlibAllocMem; + strm.zfree := zlibFreeMem; + OutBytes := ((InBytes + (InBytes div 10) + 12) + 255) and not 255; + GetMem(OutBuf, OutBytes); + try + strm.next_in := InBuf; + strm.avail_in := InBytes; + strm.next_out := OutBuf; + strm.avail_out := OutBytes; + CCheck(deflateInit_(strm, Z_BEST_COMPRESSION, zlib_version, sizeof(strm))); + try + while CCheck(deflate(strm, Z_FINISH)) <> Z_STREAM_END do + begin + P := OutBuf; + Inc(OutBytes, 256); + ReallocMem(OutBuf, OutBytes); + strm.next_out := PChar(Integer(OutBuf) + (Integer(strm.next_out) - Integer(P))); + strm.avail_out := 256; + end; + finally + CCheck(deflateEnd(strm)); + end; + ReallocMem(OutBuf, strm.total_out); + OutBytes := strm.total_out; + except + FreeMem(OutBuf); + raise + end; +end; + + +procedure DecompressBuf(const InBuf: Pointer; InBytes: Integer; + OutEstimate: Integer; out OutBuf: Pointer; out OutBytes: Integer); +var + strm: TZStreamRec; + P: Pointer; + BufInc: Integer; +begin + FillChar(strm, sizeof(strm), 0); + strm.zalloc := zlibAllocMem; + strm.zfree := zlibFreeMem; + BufInc := (InBytes + 255) and not 255; + if OutEstimate = 0 then + OutBytes := BufInc + else + OutBytes := OutEstimate; + GetMem(OutBuf, OutBytes); + try + strm.next_in := InBuf; + strm.avail_in := InBytes; + strm.next_out := OutBuf; + strm.avail_out := OutBytes; + DCheck(inflateInit_(strm, zlib_version, sizeof(strm))); + try + while DCheck(inflate(strm, Z_NO_FLUSH)) <> Z_STREAM_END do + begin + P := OutBuf; + Inc(OutBytes, BufInc); + ReallocMem(OutBuf, OutBytes); + strm.next_out := PChar(Integer(OutBuf) + (Integer(strm.next_out) - Integer(P))); + strm.avail_out := BufInc; + end; + finally + DCheck(inflateEnd(strm)); + end; + ReallocMem(OutBuf, strm.total_out); + OutBytes := strm.total_out; + except + FreeMem(OutBuf); + raise + end; +end; + +procedure DecompressToUserBuf(const InBuf: Pointer; InBytes: Integer; + const OutBuf: Pointer; BufSize: Integer); +var + strm: TZStreamRec; +begin + FillChar(strm, sizeof(strm), 0); + strm.zalloc := zlibAllocMem; + strm.zfree := zlibFreeMem; + strm.next_in := InBuf; + strm.avail_in := InBytes; + strm.next_out := OutBuf; + strm.avail_out := BufSize; + DCheck(inflateInit_(strm, zlib_version, sizeof(strm))); + try + if DCheck(inflate(strm, Z_FINISH)) <> Z_STREAM_END then + raise EZlibError.CreateRes(@sTargetBufferTooSmall); + finally + DCheck(inflateEnd(strm)); + end; +end; + +// TCustomZlibStream + +constructor TCustomZLibStream.Create(Strm: TStream); +begin + inherited Create; + FStrm := Strm; + FStrmPos := Strm.Position; + FZRec.zalloc := zlibAllocMem; + FZRec.zfree := zlibFreeMem; +end; + +procedure TCustomZLibStream.Progress(Sender: TObject); +begin + if Assigned(FOnProgress) then FOnProgress(Sender); +end; + + +// TCompressionStream + +constructor TCompressionStream.Create(CompressionLevel: TCompressionLevel; + Dest: TStream); +const + Levels: array [TCompressionLevel] of ShortInt = + (Z_NO_COMPRESSION, Z_BEST_SPEED, Z_DEFAULT_COMPRESSION, Z_BEST_COMPRESSION); +begin + inherited Create(Dest); + FZRec.next_out := FBuffer; + FZRec.avail_out := sizeof(FBuffer); + CCheck(deflateInit_(FZRec, Levels[CompressionLevel], zlib_version, sizeof(FZRec))); +end; + +destructor TCompressionStream.Destroy; +begin + FZRec.next_in := nil; + FZRec.avail_in := 0; + try + if FStrm.Position <> FStrmPos then FStrm.Position := FStrmPos; + while (CCheck(deflate(FZRec, Z_FINISH)) <> Z_STREAM_END) + and (FZRec.avail_out = 0) do + begin + FStrm.WriteBuffer(FBuffer, sizeof(FBuffer)); + FZRec.next_out := FBuffer; + FZRec.avail_out := sizeof(FBuffer); + end; + if FZRec.avail_out < sizeof(FBuffer) then + FStrm.WriteBuffer(FBuffer, sizeof(FBuffer) - FZRec.avail_out); + finally + deflateEnd(FZRec); + end; + inherited Destroy; +end; + +function TCompressionStream.Read(var Buffer; Count: Longint): Longint; +begin + raise ECompressionError.CreateRes(@sInvalidStreamOp); +end; + +function TCompressionStream.Write(const Buffer; Count: Longint): Longint; +begin + FZRec.next_in := @Buffer; + FZRec.avail_in := Count; + if FStrm.Position <> FStrmPos then FStrm.Position := FStrmPos; + while (FZRec.avail_in > 0) do + begin + CCheck(deflate(FZRec, 0)); + if FZRec.avail_out = 0 then + begin + FStrm.WriteBuffer(FBuffer, sizeof(FBuffer)); + FZRec.next_out := FBuffer; + FZRec.avail_out := sizeof(FBuffer); + FStrmPos := FStrm.Position; + Progress(Self); + end; + end; + Result := Count; +end; + +function TCompressionStream.Seek(Offset: Longint; Origin: Word): Longint; +begin + if (Offset = 0) and (Origin = soFromCurrent) then + Result := FZRec.total_in + else + raise ECompressionError.CreateRes(@sInvalidStreamOp); +end; + +function TCompressionStream.GetCompressionRate: Single; +begin + if FZRec.total_in = 0 then + Result := 0 + else + Result := (1.0 - (FZRec.total_out / FZRec.total_in)) * 100.0; +end; + + +// TDecompressionStream + +constructor TDecompressionStream.Create(Source: TStream); +begin + inherited Create(Source); + FZRec.next_in := FBuffer; + FZRec.avail_in := 0; + DCheck(inflateInit_(FZRec, zlib_version, sizeof(FZRec))); +end; + +destructor TDecompressionStream.Destroy; +begin + FStrm.Seek(-FZRec.avail_in, 1); + inflateEnd(FZRec); + inherited Destroy; +end; + +function TDecompressionStream.Read(var Buffer; Count: Longint): Longint; +begin + FZRec.next_out := @Buffer; + FZRec.avail_out := Count; + if FStrm.Position <> FStrmPos then FStrm.Position := FStrmPos; + while (FZRec.avail_out > 0) do + begin + if FZRec.avail_in = 0 then + begin + FZRec.avail_in := FStrm.Read(FBuffer, sizeof(FBuffer)); + if FZRec.avail_in = 0 then + begin + Result := Count - FZRec.avail_out; + Exit; + end; + FZRec.next_in := FBuffer; + FStrmPos := FStrm.Position; + Progress(Self); + end; + CCheck(inflate(FZRec, 0)); + end; + Result := Count; +end; + +function TDecompressionStream.Write(const Buffer; Count: Longint): Longint; +begin + raise EDecompressionError.CreateRes(@sInvalidStreamOp); +end; + +function TDecompressionStream.Seek(Offset: Longint; Origin: Word): Longint; +var + I: Integer; + Buf: array [0..4095] of Char; +begin + if (Offset = 0) and (Origin = soFromBeginning) then + begin + DCheck(inflateReset(FZRec)); + FZRec.next_in := FBuffer; + FZRec.avail_in := 0; + FStrm.Position := 0; + FStrmPos := 0; + end + else if ( (Offset >= 0) and (Origin = soFromCurrent)) or + ( ((Offset - FZRec.total_out) > 0) and (Origin = soFromBeginning)) then + begin + if Origin = soFromBeginning then Dec(Offset, FZRec.total_out); + if Offset > 0 then + begin + for I := 1 to Offset div sizeof(Buf) do + ReadBuffer(Buf, sizeof(Buf)); + ReadBuffer(Buf, Offset mod sizeof(Buf)); + end; + end + else + raise EDecompressionError.CreateRes(@sInvalidStreamOp); + Result := FZRec.total_out; +end; + + +end. diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/AssemblyInfo.cs nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/AssemblyInfo.cs --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/AssemblyInfo.cs 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/AssemblyInfo.cs 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,58 @@ +using System.Reflection; +using System.Runtime.CompilerServices; + +// +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +// +[assembly: AssemblyTitle("DotZLib")] +[assembly: AssemblyDescription(".Net bindings for ZLib compression dll 1.2.x")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("Henrik Ravn")] +[assembly: AssemblyProduct("")] +[assembly: AssemblyCopyright("(c) 2004 by Henrik Ravn")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +// +// You can specify all the values or you can default the Revision and Build Numbers +// by using the '*' as shown below: + +[assembly: AssemblyVersion("1.0.*")] + +// +// In order to sign your assembly you must specify a key to use. Refer to the +// Microsoft .NET Framework documentation for more information on assembly signing. +// +// Use the attributes below to control which key is used for signing. +// +// Notes: +// (*) If no key is specified, the assembly is not signed. +// (*) KeyName refers to a key that has been installed in the Crypto Service +// Provider (CSP) on your machine. KeyFile refers to a file which contains +// a key. +// (*) If the KeyFile and the KeyName values are both specified, the +// following processing occurs: +// (1) If the KeyName can be found in the CSP, that key is used. +// (2) If the KeyName does not exist and the KeyFile does exist, the key +// in the KeyFile is installed into the CSP and used. +// (*) In order to create a KeyFile, you can use the sn.exe (Strong Name) utility. +// When specifying the KeyFile, the location of the KeyFile should be +// relative to the project output directory which is +// %Project Directory%\obj\<configuration>. For example, if your KeyFile is +// located in the project directory, you would specify the AssemblyKeyFile +// attribute as [assembly: AssemblyKeyFile("..\\..\\mykey.snk")] +// (*) Delay Signing is an advanced option - see the Microsoft .NET Framework +// documentation for more information on this. +// +[assembly: AssemblyDelaySign(false)] +[assembly: AssemblyKeyFile("")] +[assembly: AssemblyKeyName("")] diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/ChecksumImpl.cs nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/ChecksumImpl.cs --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/ChecksumImpl.cs 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/ChecksumImpl.cs 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,202 @@ +// +// Copyright Henrik Ravn 2004 +// +// Use, modification and distribution are subject to the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +using System; +using System.Runtime.InteropServices; +using System.Text; + + +namespace DotZLib +{ + #region ChecksumGeneratorBase + /// <summary> + /// Implements the common functionality needed for all <see cref="ChecksumGenerator"/>s + /// </summary> + /// <example></example> + public abstract class ChecksumGeneratorBase : ChecksumGenerator + { + /// <summary> + /// The value of the current checksum + /// </summary> + protected uint _current; + + /// <summary> + /// Initializes a new instance of the checksum generator base - the current checksum is + /// set to zero + /// </summary> + public ChecksumGeneratorBase() + { + _current = 0; + } + + /// <summary> + /// Initializes a new instance of the checksum generator basewith a specified value + /// </summary> + /// <param name="initialValue">The value to set the current checksum to</param> + public ChecksumGeneratorBase(uint initialValue) + { + _current = initialValue; + } + + /// <summary> + /// Resets the current checksum to zero + /// </summary> + public void Reset() { _current = 0; } + + /// <summary> + /// Gets the current checksum value + /// </summary> + public uint Value { get { return _current; } } + + /// <summary> + /// Updates the current checksum with part of an array of bytes + /// </summary> + /// <param name="data">The data to update the checksum with</param> + /// <param name="offset">Where in <c>data</c> to start updating</param> + /// <param name="count">The number of bytes from <c>data</c> to use</param> + /// <exception cref="ArgumentException">The sum of offset and count is larger than the length of <c>data</c></exception> + /// <exception cref="NullReferenceException"><c>data</c> is a null reference</exception> + /// <exception cref="ArgumentOutOfRangeException">Offset or count is negative.</exception> + /// <remarks>All the other <c>Update</c> methods are implmeneted in terms of this one. + /// This is therefore the only method a derived class has to implement</remarks> + public abstract void Update(byte[] data, int offset, int count); + + /// <summary> + /// Updates the current checksum with an array of bytes. + /// </summary> + /// <param name="data">The data to update the checksum with</param> + public void Update(byte[] data) + { + Update(data, 0, data.Length); + } + + /// <summary> + /// Updates the current checksum with the data from a string + /// </summary> + /// <param name="data">The string to update the checksum with</param> + /// <remarks>The characters in the string are converted by the UTF-8 encoding</remarks> + public void Update(string data) + { + Update(Encoding.UTF8.GetBytes(data)); + } + + /// <summary> + /// Updates the current checksum with the data from a string, using a specific encoding + /// </summary> + /// <param name="data">The string to update the checksum with</param> + /// <param name="encoding">The encoding to use</param> + public void Update(string data, Encoding encoding) + { + Update(encoding.GetBytes(data)); + } + + } + #endregion + + #region CRC32 + /// <summary> + /// Implements a CRC32 checksum generator + /// </summary> + public sealed class CRC32Checksum : ChecksumGeneratorBase + { + #region DLL imports + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern uint crc32(uint crc, int data, uint length); + + #endregion + + /// <summary> + /// Initializes a new instance of the CRC32 checksum generator + /// </summary> + public CRC32Checksum() : base() {} + + /// <summary> + /// Initializes a new instance of the CRC32 checksum generator with a specified value + /// </summary> + /// <param name="initialValue">The value to set the current checksum to</param> + public CRC32Checksum(uint initialValue) : base(initialValue) {} + + /// <summary> + /// Updates the current checksum with part of an array of bytes + /// </summary> + /// <param name="data">The data to update the checksum with</param> + /// <param name="offset">Where in <c>data</c> to start updating</param> + /// <param name="count">The number of bytes from <c>data</c> to use</param> + /// <exception cref="ArgumentException">The sum of offset and count is larger than the length of <c>data</c></exception> + /// <exception cref="NullReferenceException"><c>data</c> is a null reference</exception> + /// <exception cref="ArgumentOutOfRangeException">Offset or count is negative.</exception> + public override void Update(byte[] data, int offset, int count) + { + if (offset < 0 || count < 0) throw new ArgumentOutOfRangeException(); + if ((offset+count) > data.Length) throw new ArgumentException(); + GCHandle hData = GCHandle.Alloc(data, GCHandleType.Pinned); + try + { + _current = crc32(_current, hData.AddrOfPinnedObject().ToInt32()+offset, (uint)count); + } + finally + { + hData.Free(); + } + } + + } + #endregion + + #region Adler + /// <summary> + /// Implements a checksum generator that computes the Adler checksum on data + /// </summary> + public sealed class AdlerChecksum : ChecksumGeneratorBase + { + #region DLL imports + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern uint adler32(uint adler, int data, uint length); + + #endregion + + /// <summary> + /// Initializes a new instance of the Adler checksum generator + /// </summary> + public AdlerChecksum() : base() {} + + /// <summary> + /// Initializes a new instance of the Adler checksum generator with a specified value + /// </summary> + /// <param name="initialValue">The value to set the current checksum to</param> + public AdlerChecksum(uint initialValue) : base(initialValue) {} + + /// <summary> + /// Updates the current checksum with part of an array of bytes + /// </summary> + /// <param name="data">The data to update the checksum with</param> + /// <param name="offset">Where in <c>data</c> to start updating</param> + /// <param name="count">The number of bytes from <c>data</c> to use</param> + /// <exception cref="ArgumentException">The sum of offset and count is larger than the length of <c>data</c></exception> + /// <exception cref="NullReferenceException"><c>data</c> is a null reference</exception> + /// <exception cref="ArgumentOutOfRangeException">Offset or count is negative.</exception> + public override void Update(byte[] data, int offset, int count) + { + if (offset < 0 || count < 0) throw new ArgumentOutOfRangeException(); + if ((offset+count) > data.Length) throw new ArgumentException(); + GCHandle hData = GCHandle.Alloc(data, GCHandleType.Pinned); + try + { + _current = adler32(_current, hData.AddrOfPinnedObject().ToInt32()+offset, (uint)count); + } + finally + { + hData.Free(); + } + } + + } + #endregion + +} \ No newline at end of file diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/CircularBuffer.cs nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/CircularBuffer.cs --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/CircularBuffer.cs 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/CircularBuffer.cs 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,83 @@ +// +// Copyright Henrik Ravn 2004 +// +// Use, modification and distribution are subject to the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +using System; +using System.Diagnostics; + +namespace DotZLib +{ + + /// <summary> + /// This class implements a circular buffer + /// </summary> + internal class CircularBuffer + { + #region Private data + private int _capacity; + private int _head; + private int _tail; + private int _size; + private byte[] _buffer; + #endregion + + public CircularBuffer(int capacity) + { + Debug.Assert( capacity > 0 ); + _buffer = new byte[capacity]; + _capacity = capacity; + _head = 0; + _tail = 0; + _size = 0; + } + + public int Size { get { return _size; } } + + public int Put(byte[] source, int offset, int count) + { + Debug.Assert( count > 0 ); + int trueCount = Math.Min(count, _capacity - Size); + for (int i = 0; i < trueCount; ++i) + _buffer[(_tail+i) % _capacity] = source[offset+i]; + _tail += trueCount; + _tail %= _capacity; + _size += trueCount; + return trueCount; + } + + public bool Put(byte b) + { + if (Size == _capacity) // no room + return false; + _buffer[_tail++] = b; + _tail %= _capacity; + ++_size; + return true; + } + + public int Get(byte[] destination, int offset, int count) + { + int trueCount = Math.Min(count,Size); + for (int i = 0; i < trueCount; ++i) + destination[offset + i] = _buffer[(_head+i) % _capacity]; + _head += trueCount; + _head %= _capacity; + _size -= trueCount; + return trueCount; + } + + public int Get() + { + if (Size == 0) + return -1; + + int result = (int)_buffer[_head++ % _capacity]; + --_size; + return result; + } + + } +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/CodecBase.cs nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/CodecBase.cs --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/CodecBase.cs 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/CodecBase.cs 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,198 @@ +// +// Copyright Henrik Ravn 2004 +// +// Use, modification and distribution are subject to the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +using System; +using System.Runtime.InteropServices; + +namespace DotZLib +{ + /// <summary> + /// Implements the common functionality needed for all <see cref="Codec"/>s + /// </summary> + public abstract class CodecBase : Codec, IDisposable + { + + #region Data members + + /// <summary> + /// Instance of the internal zlib buffer structure that is + /// passed to all functions in the zlib dll + /// </summary> + internal ZStream _ztream = new ZStream(); + + /// <summary> + /// True if the object instance has been disposed, false otherwise + /// </summary> + protected bool _isDisposed = false; + + /// <summary> + /// The size of the internal buffers + /// </summary> + protected const int kBufferSize = 16384; + + private byte[] _outBuffer = new byte[kBufferSize]; + private byte[] _inBuffer = new byte[kBufferSize]; + + private GCHandle _hInput; + private GCHandle _hOutput; + + private uint _checksum = 0; + + #endregion + + /// <summary> + /// Initializes a new instance of the <c>CodeBase</c> class. + /// </summary> + public CodecBase() + { + try + { + _hInput = GCHandle.Alloc(_inBuffer, GCHandleType.Pinned); + _hOutput = GCHandle.Alloc(_outBuffer, GCHandleType.Pinned); + } + catch (Exception) + { + CleanUp(false); + throw; + } + } + + + #region Codec Members + + /// <summary> + /// Occurs when more processed data are available. + /// </summary> + public event DataAvailableHandler DataAvailable; + + /// <summary> + /// Fires the <see cref="DataAvailable"/> event + /// </summary> + protected void OnDataAvailable() + { + if (_ztream.total_out > 0) + { + if (DataAvailable != null) + DataAvailable( _outBuffer, 0, (int)_ztream.total_out); + resetOutput(); + } + } + + /// <summary> + /// Adds more data to the codec to be processed. + /// </summary> + /// <param name="data">Byte array containing the data to be added to the codec</param> + /// <remarks>Adding data may, or may not, raise the <c>DataAvailable</c> event</remarks> + public void Add(byte[] data) + { + Add(data,0,data.Length); + } + + /// <summary> + /// Adds more data to the codec to be processed. + /// </summary> + /// <param name="data">Byte array containing the data to be added to the codec</param> + /// <param name="offset">The index of the first byte to add from <c>data</c></param> + /// <param name="count">The number of bytes to add</param> + /// <remarks>Adding data may, or may not, raise the <c>DataAvailable</c> event</remarks> + /// <remarks>This must be implemented by a derived class</remarks> + public abstract void Add(byte[] data, int offset, int count); + + /// <summary> + /// Finishes up any pending data that needs to be processed and handled. + /// </summary> + /// <remarks>This must be implemented by a derived class</remarks> + public abstract void Finish(); + + /// <summary> + /// Gets the checksum of the data that has been added so far + /// </summary> + public uint Checksum { get { return _checksum; } } + + #endregion + + #region Destructor & IDisposable stuff + + /// <summary> + /// Destroys this instance + /// </summary> + ~CodecBase() + { + CleanUp(false); + } + + /// <summary> + /// Releases any unmanaged resources and calls the <see cref="CleanUp()"/> method of the derived class + /// </summary> + public void Dispose() + { + CleanUp(true); + } + + /// <summary> + /// Performs any codec specific cleanup + /// </summary> + /// <remarks>This must be implemented by a derived class</remarks> + protected abstract void CleanUp(); + + // performs the release of the handles and calls the dereived CleanUp() + private void CleanUp(bool isDisposing) + { + if (!_isDisposed) + { + CleanUp(); + if (_hInput.IsAllocated) + _hInput.Free(); + if (_hOutput.IsAllocated) + _hOutput.Free(); + + _isDisposed = true; + } + } + + + #endregion + + #region Helper methods + + /// <summary> + /// Copies a number of bytes to the internal codec buffer - ready for proccesing + /// </summary> + /// <param name="data">The byte array that contains the data to copy</param> + /// <param name="startIndex">The index of the first byte to copy</param> + /// <param name="count">The number of bytes to copy from <c>data</c></param> + protected void copyInput(byte[] data, int startIndex, int count) + { + Array.Copy(data, startIndex, _inBuffer,0, count); + _ztream.next_in = _hInput.AddrOfPinnedObject(); + _ztream.total_in = 0; + _ztream.avail_in = (uint)count; + + } + + /// <summary> + /// Resets the internal output buffers to a known state - ready for processing + /// </summary> + protected void resetOutput() + { + _ztream.total_out = 0; + _ztream.avail_out = kBufferSize; + _ztream.next_out = _hOutput.AddrOfPinnedObject(); + } + + /// <summary> + /// Updates the running checksum property + /// </summary> + /// <param name="newSum">The new checksum value</param> + protected void setChecksum(uint newSum) + { + _checksum = newSum; + } + #endregion + + } +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/Deflater.cs nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/Deflater.cs --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/Deflater.cs 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/Deflater.cs 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,106 @@ +// +// Copyright Henrik Ravn 2004 +// +// Use, modification and distribution are subject to the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +using System; +using System.Diagnostics; +using System.Runtime.InteropServices; + +namespace DotZLib +{ + + /// <summary> + /// Implements a data compressor, using the deflate algorithm in the ZLib dll + /// </summary> + public sealed class Deflater : CodecBase + { + #region Dll imports + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl, CharSet=CharSet.Ansi)] + private static extern int deflateInit_(ref ZStream sz, int level, string vs, int size); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int deflate(ref ZStream sz, int flush); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int deflateReset(ref ZStream sz); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int deflateEnd(ref ZStream sz); + #endregion + + /// <summary> + /// Constructs an new instance of the <c>Deflater</c> + /// </summary> + /// <param name="level">The compression level to use for this <c>Deflater</c></param> + public Deflater(CompressLevel level) : base() + { + int retval = deflateInit_(ref _ztream, (int)level, Info.Version, Marshal.SizeOf(_ztream)); + if (retval != 0) + throw new ZLibException(retval, "Could not initialize deflater"); + + resetOutput(); + } + + /// <summary> + /// Adds more data to the codec to be processed. + /// </summary> + /// <param name="data">Byte array containing the data to be added to the codec</param> + /// <param name="offset">The index of the first byte to add from <c>data</c></param> + /// <param name="count">The number of bytes to add</param> + /// <remarks>Adding data may, or may not, raise the <c>DataAvailable</c> event</remarks> + public override void Add(byte[] data, int offset, int count) + { + if (data == null) throw new ArgumentNullException(); + if (offset < 0 || count < 0) throw new ArgumentOutOfRangeException(); + if ((offset+count) > data.Length) throw new ArgumentException(); + + int total = count; + int inputIndex = offset; + int err = 0; + + while (err >= 0 && inputIndex < total) + { + copyInput(data, inputIndex, Math.Min(total - inputIndex, kBufferSize)); + while (err >= 0 && _ztream.avail_in > 0) + { + err = deflate(ref _ztream, (int)FlushTypes.None); + if (err == 0) + while (_ztream.avail_out == 0) + { + OnDataAvailable(); + err = deflate(ref _ztream, (int)FlushTypes.None); + } + inputIndex += (int)_ztream.total_in; + } + } + setChecksum( _ztream.adler ); + } + + + /// <summary> + /// Finishes up any pending data that needs to be processed and handled. + /// </summary> + public override void Finish() + { + int err; + do + { + err = deflate(ref _ztream, (int)FlushTypes.Finish); + OnDataAvailable(); + } + while (err == 0); + setChecksum( _ztream.adler ); + deflateReset(ref _ztream); + resetOutput(); + } + + /// <summary> + /// Closes the internal zlib deflate stream + /// </summary> + protected override void CleanUp() { deflateEnd(ref _ztream); } + + } +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/DotZLib.cs nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/DotZLib.cs --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/DotZLib.cs 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/DotZLib.cs 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,288 @@ +// +// Copyright Henrik Ravn 2004 +// +// Use, modification and distribution are subject to the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +using System; +using System.IO; +using System.Runtime.InteropServices; +using System.Text; + + +namespace DotZLib +{ + + #region Internal types + + /// <summary> + /// Defines constants for the various flush types used with zlib + /// </summary> + internal enum FlushTypes + { + None, Partial, Sync, Full, Finish, Block + } + + #region ZStream structure + // internal mapping of the zlib zstream structure for marshalling + [StructLayoutAttribute(LayoutKind.Sequential, Pack=4, Size=0, CharSet=CharSet.Ansi)] + internal struct ZStream + { + public IntPtr next_in; + public uint avail_in; + public uint total_in; + + public IntPtr next_out; + public uint avail_out; + public uint total_out; + + [MarshalAs(UnmanagedType.LPStr)] + string msg; + uint state; + + uint zalloc; + uint zfree; + uint opaque; + + int data_type; + public uint adler; + uint reserved; + } + + #endregion + + #endregion + + #region Public enums + /// <summary> + /// Defines constants for the available compression levels in zlib + /// </summary> + public enum CompressLevel : int + { + /// <summary> + /// The default compression level with a reasonable compromise between compression and speed + /// </summary> + Default = -1, + /// <summary> + /// No compression at all. The data are passed straight through. + /// </summary> + None = 0, + /// <summary> + /// The maximum compression rate available. + /// </summary> + Best = 9, + /// <summary> + /// The fastest available compression level. + /// </summary> + Fastest = 1 + } + #endregion + + #region Exception classes + /// <summary> + /// The exception that is thrown when an error occurs on the zlib dll + /// </summary> + public class ZLibException : ApplicationException + { + /// <summary> + /// Initializes a new instance of the <see cref="ZLibException"/> class with a specified + /// error message and error code + /// </summary> + /// <param name="errorCode">The zlib error code that caused the exception</param> + /// <param name="msg">A message that (hopefully) describes the error</param> + public ZLibException(int errorCode, string msg) : base(String.Format("ZLib error {0} {1}", errorCode, msg)) + { + } + + /// <summary> + /// Initializes a new instance of the <see cref="ZLibException"/> class with a specified + /// error code + /// </summary> + /// <param name="errorCode">The zlib error code that caused the exception</param> + public ZLibException(int errorCode) : base(String.Format("ZLib error {0}", errorCode)) + { + } + } + #endregion + + #region Interfaces + + /// <summary> + /// Declares methods and properties that enables a running checksum to be calculated + /// </summary> + public interface ChecksumGenerator + { + /// <summary> + /// Gets the current value of the checksum + /// </summary> + uint Value { get; } + + /// <summary> + /// Clears the current checksum to 0 + /// </summary> + void Reset(); + + /// <summary> + /// Updates the current checksum with an array of bytes + /// </summary> + /// <param name="data">The data to update the checksum with</param> + void Update(byte[] data); + + /// <summary> + /// Updates the current checksum with part of an array of bytes + /// </summary> + /// <param name="data">The data to update the checksum with</param> + /// <param name="offset">Where in <c>data</c> to start updating</param> + /// <param name="count">The number of bytes from <c>data</c> to use</param> + /// <exception cref="ArgumentException">The sum of offset and count is larger than the length of <c>data</c></exception> + /// <exception cref="ArgumentNullException"><c>data</c> is a null reference</exception> + /// <exception cref="ArgumentOutOfRangeException">Offset or count is negative.</exception> + void Update(byte[] data, int offset, int count); + + /// <summary> + /// Updates the current checksum with the data from a string + /// </summary> + /// <param name="data">The string to update the checksum with</param> + /// <remarks>The characters in the string are converted by the UTF-8 encoding</remarks> + void Update(string data); + + /// <summary> + /// Updates the current checksum with the data from a string, using a specific encoding + /// </summary> + /// <param name="data">The string to update the checksum with</param> + /// <param name="encoding">The encoding to use</param> + void Update(string data, Encoding encoding); + } + + + /// <summary> + /// Represents the method that will be called from a codec when new data + /// are available. + /// </summary> + /// <paramref name="data">The byte array containing the processed data</paramref> + /// <paramref name="startIndex">The index of the first processed byte in <c>data</c></paramref> + /// <paramref name="count">The number of processed bytes available</paramref> + /// <remarks>On return from this method, the data may be overwritten, so grab it while you can. + /// You cannot assume that startIndex will be zero. + /// </remarks> + public delegate void DataAvailableHandler(byte[] data, int startIndex, int count); + + /// <summary> + /// Declares methods and events for implementing compressors/decompressors + /// </summary> + public interface Codec + { + /// <summary> + /// Occurs when more processed data are available. + /// </summary> + event DataAvailableHandler DataAvailable; + + /// <summary> + /// Adds more data to the codec to be processed. + /// </summary> + /// <param name="data">Byte array containing the data to be added to the codec</param> + /// <remarks>Adding data may, or may not, raise the <c>DataAvailable</c> event</remarks> + void Add(byte[] data); + + /// <summary> + /// Adds more data to the codec to be processed. + /// </summary> + /// <param name="data">Byte array containing the data to be added to the codec</param> + /// <param name="offset">The index of the first byte to add from <c>data</c></param> + /// <param name="count">The number of bytes to add</param> + /// <remarks>Adding data may, or may not, raise the <c>DataAvailable</c> event</remarks> + void Add(byte[] data, int offset, int count); + + /// <summary> + /// Finishes up any pending data that needs to be processed and handled. + /// </summary> + void Finish(); + + /// <summary> + /// Gets the checksum of the data that has been added so far + /// </summary> + uint Checksum { get; } + + + } + + #endregion + + #region Classes + /// <summary> + /// Encapsulates general information about the ZLib library + /// </summary> + public class Info + { + #region DLL imports + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern uint zlibCompileFlags(); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern string zlibVersion(); + #endregion + + #region Private stuff + private uint _flags; + + // helper function that unpacks a bitsize mask + private static int bitSize(uint bits) + { + switch (bits) + { + case 0: return 16; + case 1: return 32; + case 2: return 64; + } + return -1; + } + #endregion + + /// <summary> + /// Constructs an instance of the <c>Info</c> class. + /// </summary> + public Info() + { + _flags = zlibCompileFlags(); + } + + /// <summary> + /// True if the library is compiled with debug info + /// </summary> + public bool HasDebugInfo { get { return 0 != (_flags & 0x100); } } + + /// <summary> + /// True if the library is compiled with assembly optimizations + /// </summary> + public bool UsesAssemblyCode { get { return 0 != (_flags & 0x200); } } + + /// <summary> + /// Gets the size of the unsigned int that was compiled into Zlib + /// </summary> + public int SizeOfUInt { get { return bitSize(_flags & 3); } } + + /// <summary> + /// Gets the size of the unsigned long that was compiled into Zlib + /// </summary> + public int SizeOfULong { get { return bitSize((_flags >> 2) & 3); } } + + /// <summary> + /// Gets the size of the pointers that were compiled into Zlib + /// </summary> + public int SizeOfPointer { get { return bitSize((_flags >> 4) & 3); } } + + /// <summary> + /// Gets the size of the z_off_t type that was compiled into Zlib + /// </summary> + public int SizeOfOffset { get { return bitSize((_flags >> 6) & 3); } } + + /// <summary> + /// Gets the version of ZLib as a string, e.g. "1.2.1" + /// </summary> + public static string Version { get { return zlibVersion(); } } + } + + #endregion + +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/DotZLib.csproj nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/DotZLib.csproj --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/DotZLib.csproj 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/DotZLib.csproj 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,141 @@ +<VisualStudioProject> + <CSHARP + ProjectType = "Local" + ProductVersion = "7.10.3077" + SchemaVersion = "2.0" + ProjectGuid = "{BB1EE0B1-1808-46CB-B786-949D91117FC5}" + > + <Build> + <Settings + ApplicationIcon = "" + AssemblyKeyContainerName = "" + AssemblyName = "DotZLib" + AssemblyOriginatorKeyFile = "" + DefaultClientScript = "JScript" + DefaultHTMLPageLayout = "Grid" + DefaultTargetSchema = "IE50" + DelaySign = "false" + OutputType = "Library" + PreBuildEvent = "" + PostBuildEvent = "" + RootNamespace = "DotZLib" + RunPostBuildEvent = "OnBuildSuccess" + StartupObject = "" + > + <Config + Name = "Debug" + AllowUnsafeBlocks = "false" + BaseAddress = "285212672" + CheckForOverflowUnderflow = "false" + ConfigurationOverrideFile = "" + DefineConstants = "DEBUG;TRACE" + DocumentationFile = "docs\DotZLib.xml" + DebugSymbols = "true" + FileAlignment = "4096" + IncrementalBuild = "false" + NoStdLib = "false" + NoWarn = "1591" + Optimize = "false" + OutputPath = "bin\Debug\" + RegisterForComInterop = "false" + RemoveIntegerChecks = "false" + TreatWarningsAsErrors = "false" + WarningLevel = "4" + /> + <Config + Name = "Release" + AllowUnsafeBlocks = "false" + BaseAddress = "285212672" + CheckForOverflowUnderflow = "false" + ConfigurationOverrideFile = "" + DefineConstants = "TRACE" + DocumentationFile = "docs\DotZLib.xml" + DebugSymbols = "false" + FileAlignment = "4096" + IncrementalBuild = "false" + NoStdLib = "false" + NoWarn = "" + Optimize = "true" + OutputPath = "bin\Release\" + RegisterForComInterop = "false" + RemoveIntegerChecks = "false" + TreatWarningsAsErrors = "false" + WarningLevel = "4" + /> + </Settings> + <References> + <Reference + Name = "System" + AssemblyName = "System" + HintPath = "C:\WINNT\Microsoft.NET\Framework\v1.1.4322\System.dll" + /> + <Reference + Name = "System.Data" + AssemblyName = "System.Data" + HintPath = "C:\WINNT\Microsoft.NET\Framework\v1.1.4322\System.Data.dll" + /> + <Reference + Name = "System.XML" + AssemblyName = "System.Xml" + HintPath = "C:\WINNT\Microsoft.NET\Framework\v1.1.4322\System.XML.dll" + /> + <Reference + Name = "nunit.framework" + AssemblyName = "nunit.framework" + HintPath = "E:\apps\NUnit V2.1\\bin\nunit.framework.dll" + AssemblyFolderKey = "hklm\dn\nunit.framework" + /> + </References> + </Build> + <Files> + <Include> + <File + RelPath = "AssemblyInfo.cs" + SubType = "Code" + BuildAction = "Compile" + /> + <File + RelPath = "ChecksumImpl.cs" + SubType = "Code" + BuildAction = "Compile" + /> + <File + RelPath = "CircularBuffer.cs" + SubType = "Code" + BuildAction = "Compile" + /> + <File + RelPath = "CodecBase.cs" + SubType = "Code" + BuildAction = "Compile" + /> + <File + RelPath = "Deflater.cs" + SubType = "Code" + BuildAction = "Compile" + /> + <File + RelPath = "DotZLib.cs" + SubType = "Code" + BuildAction = "Compile" + /> + <File + RelPath = "GZipStream.cs" + SubType = "Code" + BuildAction = "Compile" + /> + <File + RelPath = "Inflater.cs" + SubType = "Code" + BuildAction = "Compile" + /> + <File + RelPath = "UnitTests.cs" + SubType = "Code" + BuildAction = "Compile" + /> + </Include> + </Files> + </CSHARP> +</VisualStudioProject> + diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/GZipStream.cs nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/GZipStream.cs --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/GZipStream.cs 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/GZipStream.cs 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,301 @@ +// +// Copyright Henrik Ravn 2004 +// +// Use, modification and distribution are subject to the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +using System; +using System.IO; +using System.Runtime.InteropServices; + +namespace DotZLib +{ + /// <summary> + /// Implements a compressed <see cref="Stream"/>, in GZip (.gz) format. + /// </summary> + public class GZipStream : Stream, IDisposable + { + #region Dll Imports + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl, CharSet=CharSet.Ansi)] + private static extern IntPtr gzopen(string name, string mode); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int gzclose(IntPtr gzFile); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int gzwrite(IntPtr gzFile, int data, int length); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int gzread(IntPtr gzFile, int data, int length); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int gzgetc(IntPtr gzFile); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int gzputc(IntPtr gzFile, int c); + + #endregion + + #region Private data + private IntPtr _gzFile; + private bool _isDisposed = false; + private bool _isWriting; + #endregion + + #region Constructors + /// <summary> + /// Creates a new file as a writeable GZipStream + /// </summary> + /// <param name="fileName">The name of the compressed file to create</param> + /// <param name="level">The compression level to use when adding data</param> + /// <exception cref="ZLibException">If an error occurred in the internal zlib function</exception> + public GZipStream(string fileName, CompressLevel level) + { + _isWriting = true; + _gzFile = gzopen(fileName, String.Format("wb{0}", (int)level)); + if (_gzFile == IntPtr.Zero) + throw new ZLibException(-1, "Could not open " + fileName); + } + + /// <summary> + /// Opens an existing file as a readable GZipStream + /// </summary> + /// <param name="fileName">The name of the file to open</param> + /// <exception cref="ZLibException">If an error occurred in the internal zlib function</exception> + public GZipStream(string fileName) + { + _isWriting = false; + _gzFile = gzopen(fileName, "rb"); + if (_gzFile == IntPtr.Zero) + throw new ZLibException(-1, "Could not open " + fileName); + + } + #endregion + + #region Access properties + /// <summary> + /// Returns true of this stream can be read from, false otherwise + /// </summary> + public override bool CanRead + { + get + { + return !_isWriting; + } + } + + + /// <summary> + /// Returns false. + /// </summary> + public override bool CanSeek + { + get + { + return false; + } + } + + /// <summary> + /// Returns true if this tsream is writeable, false otherwise + /// </summary> + public override bool CanWrite + { + get + { + return _isWriting; + } + } + #endregion + + #region Destructor & IDispose stuff + + /// <summary> + /// Destroys this instance + /// </summary> + ~GZipStream() + { + cleanUp(false); + } + + /// <summary> + /// Closes the external file handle + /// </summary> + public void Dispose() + { + cleanUp(true); + } + + // Does the actual closing of the file handle. + private void cleanUp(bool isDisposing) + { + if (!_isDisposed) + { + gzclose(_gzFile); + _isDisposed = true; + } + } + #endregion + + #region Basic reading and writing + /// <summary> + /// Attempts to read a number of bytes from the stream. + /// </summary> + /// <param name="buffer">The destination data buffer</param> + /// <param name="offset">The index of the first destination byte in <c>buffer</c></param> + /// <param name="count">The number of bytes requested</param> + /// <returns>The number of bytes read</returns> + /// <exception cref="ArgumentNullException">If <c>buffer</c> is null</exception> + /// <exception cref="ArgumentOutOfRangeException">If <c>count</c> or <c>offset</c> are negative</exception> + /// <exception cref="ArgumentException">If <c>offset</c> + <c>count</c> is > buffer.Length</exception> + /// <exception cref="NotSupportedException">If this stream is not readable.</exception> + /// <exception cref="ObjectDisposedException">If this stream has been disposed.</exception> + public override int Read(byte[] buffer, int offset, int count) + { + if (!CanRead) throw new NotSupportedException(); + if (buffer == null) throw new ArgumentNullException(); + if (offset < 0 || count < 0) throw new ArgumentOutOfRangeException(); + if ((offset+count) > buffer.Length) throw new ArgumentException(); + if (_isDisposed) throw new ObjectDisposedException("GZipStream"); + + GCHandle h = GCHandle.Alloc(buffer, GCHandleType.Pinned); + int result; + try + { + result = gzread(_gzFile, h.AddrOfPinnedObject().ToInt32() + offset, count); + if (result < 0) + throw new IOException(); + } + finally + { + h.Free(); + } + return result; + } + + /// <summary> + /// Attempts to read a single byte from the stream. + /// </summary> + /// <returns>The byte that was read, or -1 in case of error or End-Of-File</returns> + public override int ReadByte() + { + if (!CanRead) throw new NotSupportedException(); + if (_isDisposed) throw new ObjectDisposedException("GZipStream"); + return gzgetc(_gzFile); + } + + /// <summary> + /// Writes a number of bytes to the stream + /// </summary> + /// <param name="buffer"></param> + /// <param name="offset"></param> + /// <param name="count"></param> + /// <exception cref="ArgumentNullException">If <c>buffer</c> is null</exception> + /// <exception cref="ArgumentOutOfRangeException">If <c>count</c> or <c>offset</c> are negative</exception> + /// <exception cref="ArgumentException">If <c>offset</c> + <c>count</c> is > buffer.Length</exception> + /// <exception cref="NotSupportedException">If this stream is not writeable.</exception> + /// <exception cref="ObjectDisposedException">If this stream has been disposed.</exception> + public override void Write(byte[] buffer, int offset, int count) + { + if (!CanWrite) throw new NotSupportedException(); + if (buffer == null) throw new ArgumentNullException(); + if (offset < 0 || count < 0) throw new ArgumentOutOfRangeException(); + if ((offset+count) > buffer.Length) throw new ArgumentException(); + if (_isDisposed) throw new ObjectDisposedException("GZipStream"); + + GCHandle h = GCHandle.Alloc(buffer, GCHandleType.Pinned); + try + { + int result = gzwrite(_gzFile, h.AddrOfPinnedObject().ToInt32() + offset, count); + if (result < 0) + throw new IOException(); + } + finally + { + h.Free(); + } + } + + /// <summary> + /// Writes a single byte to the stream + /// </summary> + /// <param name="value">The byte to add to the stream.</param> + /// <exception cref="NotSupportedException">If this stream is not writeable.</exception> + /// <exception cref="ObjectDisposedException">If this stream has been disposed.</exception> + public override void WriteByte(byte value) + { + if (!CanWrite) throw new NotSupportedException(); + if (_isDisposed) throw new ObjectDisposedException("GZipStream"); + + int result = gzputc(_gzFile, (int)value); + if (result < 0) + throw new IOException(); + } + #endregion + + #region Position & length stuff + /// <summary> + /// Not supported. + /// </summary> + /// <param name="value"></param> + /// <exception cref="NotSupportedException">Always thrown</exception> + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + /// <summary> + /// Not suppported. + /// </summary> + /// <param name="offset"></param> + /// <param name="origin"></param> + /// <returns></returns> + /// <exception cref="NotSupportedException">Always thrown</exception> + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + /// <summary> + /// Flushes the <c>GZipStream</c>. + /// </summary> + /// <remarks>In this implementation, this method does nothing. This is because excessive + /// flushing may degrade the achievable compression rates.</remarks> + public override void Flush() + { + // left empty on purpose + } + + /// <summary> + /// Gets/sets the current position in the <c>GZipStream</c>. Not suppported. + /// </summary> + /// <remarks>In this implementation this property is not supported</remarks> + /// <exception cref="NotSupportedException">Always thrown</exception> + public override long Position + { + get + { + throw new NotSupportedException(); + } + set + { + throw new NotSupportedException(); + } + } + + /// <summary> + /// Gets the size of the stream. Not suppported. + /// </summary> + /// <remarks>In this implementation this property is not supported</remarks> + /// <exception cref="NotSupportedException">Always thrown</exception> + public override long Length + { + get + { + throw new NotSupportedException(); + } + } + #endregion + } +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/Inflater.cs nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/Inflater.cs --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/Inflater.cs 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/Inflater.cs 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,105 @@ +// +// Copyright Henrik Ravn 2004 +// +// Use, modification and distribution are subject to the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +using System; +using System.Diagnostics; +using System.Runtime.InteropServices; + +namespace DotZLib +{ + + /// <summary> + /// Implements a data decompressor, using the inflate algorithm in the ZLib dll + /// </summary> + public class Inflater : CodecBase + { + #region Dll imports + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl, CharSet=CharSet.Ansi)] + private static extern int inflateInit_(ref ZStream sz, string vs, int size); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int inflate(ref ZStream sz, int flush); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int inflateReset(ref ZStream sz); + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern int inflateEnd(ref ZStream sz); + #endregion + + /// <summary> + /// Constructs an new instance of the <c>Inflater</c> + /// </summary> + public Inflater() : base() + { + int retval = inflateInit_(ref _ztream, Info.Version, Marshal.SizeOf(_ztream)); + if (retval != 0) + throw new ZLibException(retval, "Could not initialize inflater"); + + resetOutput(); + } + + + /// <summary> + /// Adds more data to the codec to be processed. + /// </summary> + /// <param name="data">Byte array containing the data to be added to the codec</param> + /// <param name="offset">The index of the first byte to add from <c>data</c></param> + /// <param name="count">The number of bytes to add</param> + /// <remarks>Adding data may, or may not, raise the <c>DataAvailable</c> event</remarks> + public override void Add(byte[] data, int offset, int count) + { + if (data == null) throw new ArgumentNullException(); + if (offset < 0 || count < 0) throw new ArgumentOutOfRangeException(); + if ((offset+count) > data.Length) throw new ArgumentException(); + + int total = count; + int inputIndex = offset; + int err = 0; + + while (err >= 0 && inputIndex < total) + { + copyInput(data, inputIndex, Math.Min(total - inputIndex, kBufferSize)); + err = inflate(ref _ztream, (int)FlushTypes.None); + if (err == 0) + while (_ztream.avail_out == 0) + { + OnDataAvailable(); + err = inflate(ref _ztream, (int)FlushTypes.None); + } + + inputIndex += (int)_ztream.total_in; + } + setChecksum( _ztream.adler ); + } + + + /// <summary> + /// Finishes up any pending data that needs to be processed and handled. + /// </summary> + public override void Finish() + { + int err; + do + { + err = inflate(ref _ztream, (int)FlushTypes.Finish); + OnDataAvailable(); + } + while (err == 0); + setChecksum( _ztream.adler ); + inflateReset(ref _ztream); + resetOutput(); + } + + /// <summary> + /// Closes the internal zlib inflate stream + /// </summary> + protected override void CleanUp() { inflateEnd(ref _ztream); } + + + } +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/UnitTests.cs nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/UnitTests.cs --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib/UnitTests.cs 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib/UnitTests.cs 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,274 @@ +// +// © Copyright Henrik Ravn 2004 +// +// Use, modification and distribution are subject to the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +using System; +using System.Collections; +using System.IO; + +// uncomment the define below to include unit tests +//#define nunit +#if nunit +using NUnit.Framework; + +// Unit tests for the DotZLib class library +// ---------------------------------------- +// +// Use this with NUnit 2 from http://www.nunit.org +// + +namespace DotZLibTests +{ + using DotZLib; + + // helper methods + internal class Utils + { + public static bool byteArrEqual( byte[] lhs, byte[] rhs ) + { + if (lhs.Length != rhs.Length) + return false; + for (int i = lhs.Length-1; i >= 0; --i) + if (lhs[i] != rhs[i]) + return false; + return true; + } + + } + + + [TestFixture] + public class CircBufferTests + { + #region Circular buffer tests + [Test] + public void SinglePutGet() + { + CircularBuffer buf = new CircularBuffer(10); + Assert.AreEqual( 0, buf.Size ); + Assert.AreEqual( -1, buf.Get() ); + + Assert.IsTrue(buf.Put( 1 )); + Assert.AreEqual( 1, buf.Size ); + Assert.AreEqual( 1, buf.Get() ); + Assert.AreEqual( 0, buf.Size ); + Assert.AreEqual( -1, buf.Get() ); + } + + [Test] + public void BlockPutGet() + { + CircularBuffer buf = new CircularBuffer(10); + byte[] arr = {1,2,3,4,5,6,7,8,9,10}; + Assert.AreEqual( 10, buf.Put(arr,0,10) ); + Assert.AreEqual( 10, buf.Size ); + Assert.IsFalse( buf.Put(11) ); + Assert.AreEqual( 1, buf.Get() ); + Assert.IsTrue( buf.Put(11) ); + + byte[] arr2 = (byte[])arr.Clone(); + Assert.AreEqual( 9, buf.Get(arr2,1,9) ); + Assert.IsTrue( Utils.byteArrEqual(arr,arr2) ); + } + + #endregion + } + + [TestFixture] + public class ChecksumTests + { + #region CRC32 Tests + [Test] + public void CRC32_Null() + { + CRC32Checksum crc32 = new CRC32Checksum(); + Assert.AreEqual( 0, crc32.Value ); + + crc32 = new CRC32Checksum(1); + Assert.AreEqual( 1, crc32.Value ); + + crc32 = new CRC32Checksum(556); + Assert.AreEqual( 556, crc32.Value ); + } + + [Test] + public void CRC32_Data() + { + CRC32Checksum crc32 = new CRC32Checksum(); + byte[] data = { 1,2,3,4,5,6,7 }; + crc32.Update(data); + Assert.AreEqual( 0x70e46888, crc32.Value ); + + crc32 = new CRC32Checksum(); + crc32.Update("penguin"); + Assert.AreEqual( 0x0e5c1a120, crc32.Value ); + + crc32 = new CRC32Checksum(1); + crc32.Update("penguin"); + Assert.AreEqual(0x43b6aa94, crc32.Value); + + } + #endregion + + #region Adler tests + + [Test] + public void Adler_Null() + { + AdlerChecksum adler = new AdlerChecksum(); + Assert.AreEqual(0, adler.Value); + + adler = new AdlerChecksum(1); + Assert.AreEqual( 1, adler.Value ); + + adler = new AdlerChecksum(556); + Assert.AreEqual( 556, adler.Value ); + } + + [Test] + public void Adler_Data() + { + AdlerChecksum adler = new AdlerChecksum(1); + byte[] data = { 1,2,3,4,5,6,7 }; + adler.Update(data); + Assert.AreEqual( 0x5b001d, adler.Value ); + + adler = new AdlerChecksum(); + adler.Update("penguin"); + Assert.AreEqual(0x0bcf02f6, adler.Value ); + + adler = new AdlerChecksum(1); + adler.Update("penguin"); + Assert.AreEqual(0x0bd602f7, adler.Value); + + } + #endregion + } + + [TestFixture] + public class InfoTests + { + #region Info tests + [Test] + public void Info_Version() + { + Info info = new Info(); + Assert.AreEqual("1.2.8", Info.Version); + Assert.AreEqual(32, info.SizeOfUInt); + Assert.AreEqual(32, info.SizeOfULong); + Assert.AreEqual(32, info.SizeOfPointer); + Assert.AreEqual(32, info.SizeOfOffset); + } + #endregion + } + + [TestFixture] + public class DeflateInflateTests + { + #region Deflate tests + [Test] + public void Deflate_Init() + { + using (Deflater def = new Deflater(CompressLevel.Default)) + { + } + } + + private ArrayList compressedData = new ArrayList(); + private uint adler1; + + private ArrayList uncompressedData = new ArrayList(); + private uint adler2; + + public void CDataAvail(byte[] data, int startIndex, int count) + { + for (int i = 0; i < count; ++i) + compressedData.Add(data[i+startIndex]); + } + + [Test] + public void Deflate_Compress() + { + compressedData.Clear(); + + byte[] testData = new byte[35000]; + for (int i = 0; i < testData.Length; ++i) + testData[i] = 5; + + using (Deflater def = new Deflater((CompressLevel)5)) + { + def.DataAvailable += new DataAvailableHandler(CDataAvail); + def.Add(testData); + def.Finish(); + adler1 = def.Checksum; + } + } + #endregion + + #region Inflate tests + [Test] + public void Inflate_Init() + { + using (Inflater inf = new Inflater()) + { + } + } + + private void DDataAvail(byte[] data, int startIndex, int count) + { + for (int i = 0; i < count; ++i) + uncompressedData.Add(data[i+startIndex]); + } + + [Test] + public void Inflate_Expand() + { + uncompressedData.Clear(); + + using (Inflater inf = new Inflater()) + { + inf.DataAvailable += new DataAvailableHandler(DDataAvail); + inf.Add((byte[])compressedData.ToArray(typeof(byte))); + inf.Finish(); + adler2 = inf.Checksum; + } + Assert.AreEqual( adler1, adler2 ); + } + #endregion + } + + [TestFixture] + public class GZipStreamTests + { + #region GZipStream test + [Test] + public void GZipStream_WriteRead() + { + using (GZipStream gzOut = new GZipStream("gzstream.gz", CompressLevel.Best)) + { + BinaryWriter writer = new BinaryWriter(gzOut); + writer.Write("hi there"); + writer.Write(Math.PI); + writer.Write(42); + } + + using (GZipStream gzIn = new GZipStream("gzstream.gz")) + { + BinaryReader reader = new BinaryReader(gzIn); + string s = reader.ReadString(); + Assert.AreEqual("hi there",s); + double d = reader.ReadDouble(); + Assert.AreEqual(Math.PI, d); + int i = reader.ReadInt32(); + Assert.AreEqual(42,i); + } + + } + #endregion + } +} + +#endif diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib.build nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib.build --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib.build 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib.build 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,33 @@ +<?xml version="1.0" encoding="utf-8" ?> +<project name="DotZLib" default="build" basedir="./DotZLib"> + <description>A .Net wrapper library around ZLib1.dll</description> + + <property name="nunit.location" value="c:/program files/NUnit V2.1/bin" /> + <property name="build.root" value="bin" /> + + <property name="debug" value="true" /> + <property name="nunit" value="true" /> + + <property name="build.folder" value="${build.root}/debug/" if="${debug}" /> + <property name="build.folder" value="${build.root}/release/" unless="${debug}" /> + + <target name="clean" description="Remove all generated files"> + <delete dir="${build.root}" failonerror="false" /> + </target> + + <target name="build" description="compiles the source code"> + + <mkdir dir="${build.folder}" /> + <csc target="library" output="${build.folder}DotZLib.dll" debug="${debug}"> + <references basedir="${nunit.location}"> + <includes if="${nunit}" name="nunit.framework.dll" /> + </references> + <sources> + <includes name="*.cs" /> + <excludes name="UnitTests.cs" unless="${nunit}" /> + </sources> + <arg value="/d:nunit" if="${nunit}" /> + </csc> + </target> + +</project> \ No newline at end of file Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/zlib/contrib/dotzlib/DotZLib.chm and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/zlib/contrib/dotzlib/DotZLib.chm differ diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/LICENSE_1_0.txt nodejs-0.11.15/deps/zlib/contrib/dotzlib/LICENSE_1_0.txt --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/LICENSE_1_0.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/LICENSE_1_0.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,23 @@ +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. \ No newline at end of file diff -Nru nodejs-0.11.13/deps/zlib/contrib/dotzlib/readme.txt nodejs-0.11.15/deps/zlib/contrib/dotzlib/readme.txt --- nodejs-0.11.13/deps/zlib/contrib/dotzlib/readme.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/dotzlib/readme.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,58 @@ +This directory contains a .Net wrapper class library for the ZLib1.dll + +The wrapper includes support for inflating/deflating memory buffers, +.Net streaming wrappers for the gz streams part of zlib, and wrappers +for the checksum parts of zlib. See DotZLib/UnitTests.cs for examples. + +Directory structure: +-------------------- + +LICENSE_1_0.txt - License file. +readme.txt - This file. +DotZLib.chm - Class library documentation +DotZLib.build - NAnt build file +DotZLib.sln - Microsoft Visual Studio 2003 solution file + +DotZLib\*.cs - Source files for the class library + +Unit tests: +----------- +The file DotZLib/UnitTests.cs contains unit tests for use with NUnit 2.1 or higher. +To include unit tests in the build, define nunit before building. + + +Build instructions: +------------------- + +1. Using Visual Studio.Net 2003: + Open DotZLib.sln in VS.Net and build from there. Output file (DotZLib.dll) + will be found ./DotZLib/bin/release or ./DotZLib/bin/debug, depending on + you are building the release or debug version of the library. Check + DotZLib/UnitTests.cs for instructions on how to include unit tests in the + build. + +2. Using NAnt: + Open a command prompt with access to the build environment and run nant + in the same directory as the DotZLib.build file. + You can define 2 properties on the nant command-line to control the build: + debug={true|false} to toggle between release/debug builds (default=true). + nunit={true|false} to include or esclude unit tests (default=true). + Also the target clean will remove binaries. + Output file (DotZLib.dll) will be found in either ./DotZLib/bin/release + or ./DotZLib/bin/debug, depending on whether you are building the release + or debug version of the library. + + Examples: + nant -D:debug=false -D:nunit=false + will build a release mode version of the library without unit tests. + nant + will build a debug version of the library with unit tests + nant clean + will remove all previously built files. + + +--------------------------------- +Copyright (c) Henrik Ravn 2004 + +Use, modification and distribution are subject to the Boost Software License, Version 1.0. +(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) diff -Nru nodejs-0.11.13/deps/zlib/contrib/gcc_gvmat64/gvmat64.S nodejs-0.11.15/deps/zlib/contrib/gcc_gvmat64/gvmat64.S --- nodejs-0.11.13/deps/zlib/contrib/gcc_gvmat64/gvmat64.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/gcc_gvmat64/gvmat64.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,574 @@ +/* +;uInt longest_match_x64( +; deflate_state *s, +; IPos cur_match); // current match + +; gvmat64.S -- Asm portion of the optimized longest_match for 32 bits x86_64 +; (AMD64 on Athlon 64, Opteron, Phenom +; and Intel EM64T on Pentium 4 with EM64T, Pentium D, Core 2 Duo, Core I5/I7) +; this file is translation from gvmat64.asm to GCC 4.x (for Linux, Mac XCode) +; Copyright (C) 1995-2010 Jean-loup Gailly, Brian Raiter and Gilles Vollant. +; +; File written by Gilles Vollant, by converting to assembly the longest_match +; from Jean-loup Gailly in deflate.c of zLib and infoZip zip. +; and by taking inspiration on asm686 with masm, optimised assembly code +; from Brian Raiter, written 1998 +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software +; 3. This notice may not be removed or altered from any source distribution. +; +; http://www.zlib.net +; http://www.winimage.com/zLibDll +; http://www.muppetlabs.com/~breadbox/software/assembly.html +; +; to compile this file for zLib, I use option: +; gcc -c -arch x86_64 gvmat64.S + + +;uInt longest_match(s, cur_match) +; deflate_state *s; +; IPos cur_match; // current match / +; +; with XCode for Mac, I had strange error with some jump on intel syntax +; this is why BEFORE_JMP and AFTER_JMP are used + */ + + +#define BEFORE_JMP .att_syntax +#define AFTER_JMP .intel_syntax noprefix + +#ifndef NO_UNDERLINE +# define match_init _match_init +# define longest_match _longest_match +#endif + +.intel_syntax noprefix + +.globl match_init, longest_match +.text +longest_match: + + + +#define LocalVarsSize 96 +/* +; register used : rax,rbx,rcx,rdx,rsi,rdi,r8,r9,r10,r11,r12 +; free register : r14,r15 +; register can be saved : rsp +*/ + +#define chainlenwmask (rsp + 8 - LocalVarsSize) +#define nicematch (rsp + 16 - LocalVarsSize) + +#define save_rdi (rsp + 24 - LocalVarsSize) +#define save_rsi (rsp + 32 - LocalVarsSize) +#define save_rbx (rsp + 40 - LocalVarsSize) +#define save_rbp (rsp + 48 - LocalVarsSize) +#define save_r12 (rsp + 56 - LocalVarsSize) +#define save_r13 (rsp + 64 - LocalVarsSize) +#define save_r14 (rsp + 72 - LocalVarsSize) +#define save_r15 (rsp + 80 - LocalVarsSize) + + +/* +; all the +4 offsets are due to the addition of pending_buf_size (in zlib +; in the deflate_state structure since the asm code was first written +; (if you compile with zlib 1.0.4 or older, remove the +4). +; Note : these value are good with a 8 bytes boundary pack structure +*/ + +#define MAX_MATCH 258 +#define MIN_MATCH 3 +#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) + +/* +;;; Offsets for fields in the deflate_state structure. These numbers +;;; are calculated from the definition of deflate_state, with the +;;; assumption that the compiler will dword-align the fields. (Thus, +;;; changing the definition of deflate_state could easily cause this +;;; program to crash horribly, without so much as a warning at +;;; compile time. Sigh.) + +; all the +zlib1222add offsets are due to the addition of fields +; in zlib in the deflate_state structure since the asm code was first written +; (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)"). +; (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0"). +; if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8"). +*/ + + + +/* you can check the structure offset by running + +#include <stdlib.h> +#include <stdio.h> +#include "deflate.h" + +void print_depl() +{ +deflate_state ds; +deflate_state *s=&ds; +printf("size pointer=%u\n",(int)sizeof(void*)); + +printf("#define dsWSize %u\n",(int)(((char*)&(s->w_size))-((char*)s))); +printf("#define dsWMask %u\n",(int)(((char*)&(s->w_mask))-((char*)s))); +printf("#define dsWindow %u\n",(int)(((char*)&(s->window))-((char*)s))); +printf("#define dsPrev %u\n",(int)(((char*)&(s->prev))-((char*)s))); +printf("#define dsMatchLen %u\n",(int)(((char*)&(s->match_length))-((char*)s))); +printf("#define dsPrevMatch %u\n",(int)(((char*)&(s->prev_match))-((char*)s))); +printf("#define dsStrStart %u\n",(int)(((char*)&(s->strstart))-((char*)s))); +printf("#define dsMatchStart %u\n",(int)(((char*)&(s->match_start))-((char*)s))); +printf("#define dsLookahead %u\n",(int)(((char*)&(s->lookahead))-((char*)s))); +printf("#define dsPrevLen %u\n",(int)(((char*)&(s->prev_length))-((char*)s))); +printf("#define dsMaxChainLen %u\n",(int)(((char*)&(s->max_chain_length))-((char*)s))); +printf("#define dsGoodMatch %u\n",(int)(((char*)&(s->good_match))-((char*)s))); +printf("#define dsNiceMatch %u\n",(int)(((char*)&(s->nice_match))-((char*)s))); +} +*/ + +#define dsWSize 68 +#define dsWMask 76 +#define dsWindow 80 +#define dsPrev 96 +#define dsMatchLen 144 +#define dsPrevMatch 148 +#define dsStrStart 156 +#define dsMatchStart 160 +#define dsLookahead 164 +#define dsPrevLen 168 +#define dsMaxChainLen 172 +#define dsGoodMatch 188 +#define dsNiceMatch 192 + +#define window_size [ rcx + dsWSize] +#define WMask [ rcx + dsWMask] +#define window_ad [ rcx + dsWindow] +#define prev_ad [ rcx + dsPrev] +#define strstart [ rcx + dsStrStart] +#define match_start [ rcx + dsMatchStart] +#define Lookahead [ rcx + dsLookahead] //; 0ffffffffh on infozip +#define prev_length [ rcx + dsPrevLen] +#define max_chain_length [ rcx + dsMaxChainLen] +#define good_match [ rcx + dsGoodMatch] +#define nice_match [ rcx + dsNiceMatch] + +/* +; windows: +; parameter 1 in rcx(deflate state s), param 2 in rdx (cur match) + +; see http://weblogs.asp.net/oldnewthing/archive/2004/01/14/58579.aspx and +; http://msdn.microsoft.com/library/en-us/kmarch/hh/kmarch/64bitAMD_8e951dd2-ee77-4728-8702-55ce4b5dd24a.xml.asp +; +; All registers must be preserved across the call, except for +; rax, rcx, rdx, r8, r9, r10, and r11, which are scratch. + +; +; gcc on macosx-linux: +; see http://www.x86-64.org/documentation/abi-0.99.pdf +; param 1 in rdi, param 2 in rsi +; rbx, rsp, rbp, r12 to r15 must be preserved + +;;; Save registers that the compiler may be using, and adjust esp to +;;; make room for our stack frame. + + +;;; Retrieve the function arguments. r8d will hold cur_match +;;; throughout the entire function. edx will hold the pointer to the +;;; deflate_state structure during the function's setup (before +;;; entering the main loop. + +; ms: parameter 1 in rcx (deflate_state* s), param 2 in edx -> r8 (cur match) +; mac: param 1 in rdi, param 2 rsi +; this clear high 32 bits of r8, which can be garbage in both r8 and rdx +*/ + mov [save_rbx],rbx + mov [save_rbp],rbp + + + mov rcx,rdi + + mov r8d,esi + + + mov [save_r12],r12 + mov [save_r13],r13 + mov [save_r14],r14 + mov [save_r15],r15 + + +//;;; uInt wmask = s->w_mask; +//;;; unsigned chain_length = s->max_chain_length; +//;;; if (s->prev_length >= s->good_match) { +//;;; chain_length >>= 2; +//;;; } + + + mov edi, prev_length + mov esi, good_match + mov eax, WMask + mov ebx, max_chain_length + cmp edi, esi + jl LastMatchGood + shr ebx, 2 +LastMatchGood: + +//;;; chainlen is decremented once beforehand so that the function can +//;;; use the sign flag instead of the zero flag for the exit test. +//;;; It is then shifted into the high word, to make room for the wmask +//;;; value, which it will always accompany. + + dec ebx + shl ebx, 16 + or ebx, eax + +//;;; on zlib only +//;;; if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; + + + + mov eax, nice_match + mov [chainlenwmask], ebx + mov r10d, Lookahead + cmp r10d, eax + cmovnl r10d, eax + mov [nicematch],r10d + + + +//;;; register Bytef *scan = s->window + s->strstart; + mov r10, window_ad + mov ebp, strstart + lea r13, [r10 + rbp] + +//;;; Determine how many bytes the scan ptr is off from being +//;;; dword-aligned. + + mov r9,r13 + neg r13 + and r13,3 + +//;;; IPos limit = s->strstart > (IPos)MAX_DIST(s) ? +//;;; s->strstart - (IPos)MAX_DIST(s) : NIL; + + + mov eax, window_size + sub eax, MIN_LOOKAHEAD + + + xor edi,edi + sub ebp, eax + + mov r11d, prev_length + + cmovng ebp,edi + +//;;; int best_len = s->prev_length; + + +//;;; Store the sum of s->window + best_len in esi locally, and in esi. + + lea rsi,[r10+r11] + +//;;; register ush scan_start = *(ushf*)scan; +//;;; register ush scan_end = *(ushf*)(scan+best_len-1); +//;;; Posf *prev = s->prev; + + movzx r12d,word ptr [r9] + movzx ebx, word ptr [r9 + r11 - 1] + + mov rdi, prev_ad + +//;;; Jump into the main loop. + + mov edx, [chainlenwmask] + + cmp bx,word ptr [rsi + r8 - 1] + jz LookupLoopIsZero + + + +LookupLoop1: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + jbe LeaveNow + + + + sub edx, 0x00010000 + BEFORE_JMP + js LeaveNow + AFTER_JMP + +LoopEntry1: + cmp bx,word ptr [rsi + r8 - 1] + BEFORE_JMP + jz LookupLoopIsZero + AFTER_JMP + +LookupLoop2: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + BEFORE_JMP + jbe LeaveNow + AFTER_JMP + sub edx, 0x00010000 + BEFORE_JMP + js LeaveNow + AFTER_JMP + +LoopEntry2: + cmp bx,word ptr [rsi + r8 - 1] + BEFORE_JMP + jz LookupLoopIsZero + AFTER_JMP + +LookupLoop4: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + BEFORE_JMP + jbe LeaveNow + AFTER_JMP + sub edx, 0x00010000 + BEFORE_JMP + js LeaveNow + AFTER_JMP + +LoopEntry4: + + cmp bx,word ptr [rsi + r8 - 1] + BEFORE_JMP + jnz LookupLoop1 + jmp LookupLoopIsZero + AFTER_JMP +/* +;;; do { +;;; match = s->window + cur_match; +;;; if (*(ushf*)(match+best_len-1) != scan_end || +;;; *(ushf*)match != scan_start) continue; +;;; [...] +;;; } while ((cur_match = prev[cur_match & wmask]) > limit +;;; && --chain_length != 0); +;;; +;;; Here is the inner loop of the function. The function will spend the +;;; majority of its time in this loop, and majority of that time will +;;; be spent in the first ten instructions. +;;; +;;; Within this loop: +;;; ebx = scanend +;;; r8d = curmatch +;;; edx = chainlenwmask - i.e., ((chainlen << 16) | wmask) +;;; esi = windowbestlen - i.e., (window + bestlen) +;;; edi = prev +;;; ebp = limit +*/ +.balign 16 +LookupLoop: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + BEFORE_JMP + jbe LeaveNow + AFTER_JMP + sub edx, 0x00010000 + BEFORE_JMP + js LeaveNow + AFTER_JMP + +LoopEntry: + + cmp bx,word ptr [rsi + r8 - 1] + BEFORE_JMP + jnz LookupLoop1 + AFTER_JMP +LookupLoopIsZero: + cmp r12w, word ptr [r10 + r8] + BEFORE_JMP + jnz LookupLoop1 + AFTER_JMP + + +//;;; Store the current value of chainlen. + mov [chainlenwmask], edx +/* +;;; Point edi to the string under scrutiny, and esi to the string we +;;; are hoping to match it up with. In actuality, esi and edi are +;;; both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and edx is +;;; initialized to -(MAX_MATCH_8 - scanalign). +*/ + lea rsi,[r8+r10] + mov rdx, 0xfffffffffffffef8 //; -(MAX_MATCH_8) + lea rsi, [rsi + r13 + 0x0108] //;MAX_MATCH_8] + lea rdi, [r9 + r13 + 0x0108] //;MAX_MATCH_8] + + prefetcht1 [rsi+rdx] + prefetcht1 [rdi+rdx] + +/* +;;; Test the strings for equality, 8 bytes at a time. At the end, +;;; adjust rdx so that it is offset to the exact byte that mismatched. +;;; +;;; We already know at this point that the first three bytes of the +;;; strings match each other, and they can be safely passed over before +;;; starting the compare loop. So what this code does is skip over 0-3 +;;; bytes, as much as necessary in order to dword-align the edi +;;; pointer. (rsi will still be misaligned three times out of four.) +;;; +;;; It should be confessed that this loop usually does not represent +;;; much of the total running time. Replacing it with a more +;;; straightforward "rep cmpsb" would not drastically degrade +;;; performance. +*/ + +LoopCmps: + mov rax, [rsi + rdx] + xor rax, [rdi + rdx] + jnz LeaveLoopCmps + + mov rax, [rsi + rdx + 8] + xor rax, [rdi + rdx + 8] + jnz LeaveLoopCmps8 + + + mov rax, [rsi + rdx + 8+8] + xor rax, [rdi + rdx + 8+8] + jnz LeaveLoopCmps16 + + add rdx,8+8+8 + + BEFORE_JMP + jnz LoopCmps + jmp LenMaximum + AFTER_JMP + +LeaveLoopCmps16: add rdx,8 +LeaveLoopCmps8: add rdx,8 +LeaveLoopCmps: + + test eax, 0x0000FFFF + jnz LenLower + + test eax,0xffffffff + + jnz LenLower32 + + add rdx,4 + shr rax,32 + or ax,ax + BEFORE_JMP + jnz LenLower + AFTER_JMP + +LenLower32: + shr eax,16 + add rdx,2 + +LenLower: + sub al, 1 + adc rdx, 0 +//;;; Calculate the length of the match. If it is longer than MAX_MATCH, +//;;; then automatically accept it as the best possible match and leave. + + lea rax, [rdi + rdx] + sub rax, r9 + cmp eax, MAX_MATCH + BEFORE_JMP + jge LenMaximum + AFTER_JMP +/* +;;; If the length of the match is not longer than the best match we +;;; have so far, then forget it and return to the lookup loop. +;/////////////////////////////////// +*/ + cmp eax, r11d + jg LongerMatch + + lea rsi,[r10+r11] + + mov rdi, prev_ad + mov edx, [chainlenwmask] + BEFORE_JMP + jmp LookupLoop + AFTER_JMP +/* +;;; s->match_start = cur_match; +;;; best_len = len; +;;; if (len >= nice_match) break; +;;; scan_end = *(ushf*)(scan+best_len-1); +*/ +LongerMatch: + mov r11d, eax + mov match_start, r8d + cmp eax, [nicematch] + BEFORE_JMP + jge LeaveNow + AFTER_JMP + + lea rsi,[r10+rax] + + movzx ebx, word ptr [r9 + rax - 1] + mov rdi, prev_ad + mov edx, [chainlenwmask] + BEFORE_JMP + jmp LookupLoop + AFTER_JMP + +//;;; Accept the current string, with the maximum possible length. + +LenMaximum: + mov r11d,MAX_MATCH + mov match_start, r8d + +//;;; if ((uInt)best_len <= s->lookahead) return (uInt)best_len; +//;;; return s->lookahead; + +LeaveNow: + mov eax, Lookahead + cmp r11d, eax + cmovng eax, r11d + + + +//;;; Restore the stack and return from whence we came. + + +// mov rsi,[save_rsi] +// mov rdi,[save_rdi] + mov rbx,[save_rbx] + mov rbp,[save_rbp] + mov r12,[save_r12] + mov r13,[save_r13] + mov r14,[save_r14] + mov r15,[save_r15] + + + ret 0 +//; please don't remove this string ! +//; Your can freely use gvmat64 in any free or commercial app +//; but it is far better don't remove the string in the binary! + // db 0dh,0ah,"asm686 with masm, optimised assembly code from Brian Raiter, written 1998, converted to amd 64 by Gilles Vollant 2005",0dh,0ah,0 + + +match_init: + ret 0 + + diff -Nru nodejs-0.11.13/deps/zlib/contrib/infback9/infback9.c nodejs-0.11.15/deps/zlib/contrib/infback9/infback9.c --- nodejs-0.11.13/deps/zlib/contrib/infback9/infback9.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/infback9/infback9.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,615 @@ +/* infback9.c -- inflate deflate64 data using a call-back interface + * Copyright (C) 1995-2008 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "zutil.h" +#include "infback9.h" +#include "inftree9.h" +#include "inflate9.h" + +#define WSIZE 65536UL + +/* + strm provides memory allocation functions in zalloc and zfree, or + Z_NULL to use the library memory allocation functions. + + window is a user-supplied window and output buffer that is 64K bytes. + */ +int ZEXPORT inflateBack9Init_(strm, window, version, stream_size) +z_stream FAR *strm; +unsigned char FAR *window; +const char *version; +int stream_size; +{ + struct inflate_state FAR *state; + + if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || + stream_size != (int)(sizeof(z_stream))) + return Z_VERSION_ERROR; + if (strm == Z_NULL || window == Z_NULL) + return Z_STREAM_ERROR; + strm->msg = Z_NULL; /* in case we return an error */ + if (strm->zalloc == (alloc_func)0) { + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; + } + if (strm->zfree == (free_func)0) strm->zfree = zcfree; + state = (struct inflate_state FAR *)ZALLOC(strm, 1, + sizeof(struct inflate_state)); + if (state == Z_NULL) return Z_MEM_ERROR; + Tracev((stderr, "inflate: allocated\n")); + strm->state = (voidpf)state; + state->window = window; + return Z_OK; +} + +/* + Build and output length and distance decoding tables for fixed code + decoding. + */ +#ifdef MAKEFIXED +#include <stdio.h> + +void makefixed9(void) +{ + unsigned sym, bits, low, size; + code *next, *lenfix, *distfix; + struct inflate_state state; + code fixed[544]; + + /* literal/length table */ + sym = 0; + while (sym < 144) state.lens[sym++] = 8; + while (sym < 256) state.lens[sym++] = 9; + while (sym < 280) state.lens[sym++] = 7; + while (sym < 288) state.lens[sym++] = 8; + next = fixed; + lenfix = next; + bits = 9; + inflate_table9(LENS, state.lens, 288, &(next), &(bits), state.work); + + /* distance table */ + sym = 0; + while (sym < 32) state.lens[sym++] = 5; + distfix = next; + bits = 5; + inflate_table9(DISTS, state.lens, 32, &(next), &(bits), state.work); + + /* write tables */ + puts(" /* inffix9.h -- table for decoding deflate64 fixed codes"); + puts(" * Generated automatically by makefixed9()."); + puts(" */"); + puts(""); + puts(" /* WARNING: this file should *not* be used by applications."); + puts(" It is part of the implementation of this library and is"); + puts(" subject to change. Applications should only use zlib.h."); + puts(" */"); + puts(""); + size = 1U << 9; + printf(" static const code lenfix[%u] = {", size); + low = 0; + for (;;) { + if ((low % 6) == 0) printf("\n "); + printf("{%u,%u,%d}", lenfix[low].op, lenfix[low].bits, + lenfix[low].val); + if (++low == size) break; + putchar(','); + } + puts("\n };"); + size = 1U << 5; + printf("\n static const code distfix[%u] = {", size); + low = 0; + for (;;) { + if ((low % 5) == 0) printf("\n "); + printf("{%u,%u,%d}", distfix[low].op, distfix[low].bits, + distfix[low].val); + if (++low == size) break; + putchar(','); + } + puts("\n };"); +} +#endif /* MAKEFIXED */ + +/* Macros for inflateBack(): */ + +/* Clear the input bit accumulator */ +#define INITBITS() \ + do { \ + hold = 0; \ + bits = 0; \ + } while (0) + +/* Assure that some input is available. If input is requested, but denied, + then return a Z_BUF_ERROR from inflateBack(). */ +#define PULL() \ + do { \ + if (have == 0) { \ + have = in(in_desc, &next); \ + if (have == 0) { \ + next = Z_NULL; \ + ret = Z_BUF_ERROR; \ + goto inf_leave; \ + } \ + } \ + } while (0) + +/* Get a byte of input into the bit accumulator, or return from inflateBack() + with an error if there is no input available. */ +#define PULLBYTE() \ + do { \ + PULL(); \ + have--; \ + hold += (unsigned long)(*next++) << bits; \ + bits += 8; \ + } while (0) + +/* Assure that there are at least n bits in the bit accumulator. If there is + not enough available input to do that, then return from inflateBack() with + an error. */ +#define NEEDBITS(n) \ + do { \ + while (bits < (unsigned)(n)) \ + PULLBYTE(); \ + } while (0) + +/* Return the low n bits of the bit accumulator (n <= 16) */ +#define BITS(n) \ + ((unsigned)hold & ((1U << (n)) - 1)) + +/* Remove n bits from the bit accumulator */ +#define DROPBITS(n) \ + do { \ + hold >>= (n); \ + bits -= (unsigned)(n); \ + } while (0) + +/* Remove zero to seven bits as needed to go to a byte boundary */ +#define BYTEBITS() \ + do { \ + hold >>= bits & 7; \ + bits -= bits & 7; \ + } while (0) + +/* Assure that some output space is available, by writing out the window + if it's full. If the write fails, return from inflateBack() with a + Z_BUF_ERROR. */ +#define ROOM() \ + do { \ + if (left == 0) { \ + put = window; \ + left = WSIZE; \ + wrap = 1; \ + if (out(out_desc, put, (unsigned)left)) { \ + ret = Z_BUF_ERROR; \ + goto inf_leave; \ + } \ + } \ + } while (0) + +/* + strm provides the memory allocation functions and window buffer on input, + and provides information on the unused input on return. For Z_DATA_ERROR + returns, strm will also provide an error message. + + in() and out() are the call-back input and output functions. When + inflateBack() needs more input, it calls in(). When inflateBack() has + filled the window with output, or when it completes with data in the + window, it calls out() to write out the data. The application must not + change the provided input until in() is called again or inflateBack() + returns. The application must not change the window/output buffer until + inflateBack() returns. + + in() and out() are called with a descriptor parameter provided in the + inflateBack() call. This parameter can be a structure that provides the + information required to do the read or write, as well as accumulated + information on the input and output such as totals and check values. + + in() should return zero on failure. out() should return non-zero on + failure. If either in() or out() fails, than inflateBack() returns a + Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it + was in() or out() that caused in the error. Otherwise, inflateBack() + returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format + error, or Z_MEM_ERROR if it could not allocate memory for the state. + inflateBack() can also return Z_STREAM_ERROR if the input parameters + are not correct, i.e. strm is Z_NULL or the state was not initialized. + */ +int ZEXPORT inflateBack9(strm, in, in_desc, out, out_desc) +z_stream FAR *strm; +in_func in; +void FAR *in_desc; +out_func out; +void FAR *out_desc; +{ + struct inflate_state FAR *state; + z_const unsigned char FAR *next; /* next input */ + unsigned char FAR *put; /* next output */ + unsigned have; /* available input */ + unsigned long left; /* available output */ + inflate_mode mode; /* current inflate mode */ + int lastblock; /* true if processing last block */ + int wrap; /* true if the window has wrapped */ + unsigned char FAR *window; /* allocated sliding window, if needed */ + unsigned long hold; /* bit buffer */ + unsigned bits; /* bits in bit buffer */ + unsigned extra; /* extra bits needed */ + unsigned long length; /* literal or length of data to copy */ + unsigned long offset; /* distance back to copy string from */ + unsigned long copy; /* number of stored or match bytes to copy */ + unsigned char FAR *from; /* where to copy match bytes from */ + code const FAR *lencode; /* starting table for length/literal codes */ + code const FAR *distcode; /* starting table for distance codes */ + unsigned lenbits; /* index bits for lencode */ + unsigned distbits; /* index bits for distcode */ + code here; /* current decoding table entry */ + code last; /* parent table entry */ + unsigned len; /* length to copy for repeats, bits to drop */ + int ret; /* return code */ + static const unsigned short order[19] = /* permutation of code lengths */ + {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; +#include "inffix9.h" + + /* Check that the strm exists and that the state was initialized */ + if (strm == Z_NULL || strm->state == Z_NULL) + return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* Reset the state */ + strm->msg = Z_NULL; + mode = TYPE; + lastblock = 0; + wrap = 0; + window = state->window; + next = strm->next_in; + have = next != Z_NULL ? strm->avail_in : 0; + hold = 0; + bits = 0; + put = window; + left = WSIZE; + lencode = Z_NULL; + distcode = Z_NULL; + + /* Inflate until end of block marked as last */ + for (;;) + switch (mode) { + case TYPE: + /* determine and dispatch block type */ + if (lastblock) { + BYTEBITS(); + mode = DONE; + break; + } + NEEDBITS(3); + lastblock = BITS(1); + DROPBITS(1); + switch (BITS(2)) { + case 0: /* stored block */ + Tracev((stderr, "inflate: stored block%s\n", + lastblock ? " (last)" : "")); + mode = STORED; + break; + case 1: /* fixed block */ + lencode = lenfix; + lenbits = 9; + distcode = distfix; + distbits = 5; + Tracev((stderr, "inflate: fixed codes block%s\n", + lastblock ? " (last)" : "")); + mode = LEN; /* decode codes */ + break; + case 2: /* dynamic block */ + Tracev((stderr, "inflate: dynamic codes block%s\n", + lastblock ? " (last)" : "")); + mode = TABLE; + break; + case 3: + strm->msg = (char *)"invalid block type"; + mode = BAD; + } + DROPBITS(2); + break; + + case STORED: + /* get and verify stored block length */ + BYTEBITS(); /* go to byte boundary */ + NEEDBITS(32); + if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { + strm->msg = (char *)"invalid stored block lengths"; + mode = BAD; + break; + } + length = (unsigned)hold & 0xffff; + Tracev((stderr, "inflate: stored length %lu\n", + length)); + INITBITS(); + + /* copy stored block from input to output */ + while (length != 0) { + copy = length; + PULL(); + ROOM(); + if (copy > have) copy = have; + if (copy > left) copy = left; + zmemcpy(put, next, copy); + have -= copy; + next += copy; + left -= copy; + put += copy; + length -= copy; + } + Tracev((stderr, "inflate: stored end\n")); + mode = TYPE; + break; + + case TABLE: + /* get dynamic table entries descriptor */ + NEEDBITS(14); + state->nlen = BITS(5) + 257; + DROPBITS(5); + state->ndist = BITS(5) + 1; + DROPBITS(5); + state->ncode = BITS(4) + 4; + DROPBITS(4); + if (state->nlen > 286) { + strm->msg = (char *)"too many length symbols"; + mode = BAD; + break; + } + Tracev((stderr, "inflate: table sizes ok\n")); + + /* get code length code lengths (not a typo) */ + state->have = 0; + while (state->have < state->ncode) { + NEEDBITS(3); + state->lens[order[state->have++]] = (unsigned short)BITS(3); + DROPBITS(3); + } + while (state->have < 19) + state->lens[order[state->have++]] = 0; + state->next = state->codes; + lencode = (code const FAR *)(state->next); + lenbits = 7; + ret = inflate_table9(CODES, state->lens, 19, &(state->next), + &(lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid code lengths set"; + mode = BAD; + break; + } + Tracev((stderr, "inflate: code lengths ok\n")); + + /* get length and distance code code lengths */ + state->have = 0; + while (state->have < state->nlen + state->ndist) { + for (;;) { + here = lencode[BITS(lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.val < 16) { + NEEDBITS(here.bits); + DROPBITS(here.bits); + state->lens[state->have++] = here.val; + } + else { + if (here.val == 16) { + NEEDBITS(here.bits + 2); + DROPBITS(here.bits); + if (state->have == 0) { + strm->msg = (char *)"invalid bit length repeat"; + mode = BAD; + break; + } + len = (unsigned)(state->lens[state->have - 1]); + copy = 3 + BITS(2); + DROPBITS(2); + } + else if (here.val == 17) { + NEEDBITS(here.bits + 3); + DROPBITS(here.bits); + len = 0; + copy = 3 + BITS(3); + DROPBITS(3); + } + else { + NEEDBITS(here.bits + 7); + DROPBITS(here.bits); + len = 0; + copy = 11 + BITS(7); + DROPBITS(7); + } + if (state->have + copy > state->nlen + state->ndist) { + strm->msg = (char *)"invalid bit length repeat"; + mode = BAD; + break; + } + while (copy--) + state->lens[state->have++] = (unsigned short)len; + } + } + + /* handle error breaks in while */ + if (mode == BAD) break; + + /* check for end-of-block code (better have one) */ + if (state->lens[256] == 0) { + strm->msg = (char *)"invalid code -- missing end-of-block"; + mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftree9.h + concerning the ENOUGH constants, which depend on those values */ + state->next = state->codes; + lencode = (code const FAR *)(state->next); + lenbits = 9; + ret = inflate_table9(LENS, state->lens, state->nlen, + &(state->next), &(lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid literal/lengths set"; + mode = BAD; + break; + } + distcode = (code const FAR *)(state->next); + distbits = 6; + ret = inflate_table9(DISTS, state->lens + state->nlen, + state->ndist, &(state->next), &(distbits), + state->work); + if (ret) { + strm->msg = (char *)"invalid distances set"; + mode = BAD; + break; + } + Tracev((stderr, "inflate: codes ok\n")); + mode = LEN; + + case LEN: + /* get a literal, length, or end-of-block code */ + for (;;) { + here = lencode[BITS(lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.op && (here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = lencode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + } + DROPBITS(here.bits); + length = (unsigned)here.val; + + /* process literal */ + if (here.op == 0) { + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + "inflate: literal '%c'\n" : + "inflate: literal 0x%02x\n", here.val)); + ROOM(); + *put++ = (unsigned char)(length); + left--; + mode = LEN; + break; + } + + /* process end of block */ + if (here.op & 32) { + Tracevv((stderr, "inflate: end of block\n")); + mode = TYPE; + break; + } + + /* invalid code */ + if (here.op & 64) { + strm->msg = (char *)"invalid literal/length code"; + mode = BAD; + break; + } + + /* length code -- get extra bits, if any */ + extra = (unsigned)(here.op) & 31; + if (extra != 0) { + NEEDBITS(extra); + length += BITS(extra); + DROPBITS(extra); + } + Tracevv((stderr, "inflate: length %lu\n", length)); + + /* get distance code */ + for (;;) { + here = distcode[BITS(distbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if ((here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = distcode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + } + DROPBITS(here.bits); + if (here.op & 64) { + strm->msg = (char *)"invalid distance code"; + mode = BAD; + break; + } + offset = (unsigned)here.val; + + /* get distance extra bits, if any */ + extra = (unsigned)(here.op) & 15; + if (extra != 0) { + NEEDBITS(extra); + offset += BITS(extra); + DROPBITS(extra); + } + if (offset > WSIZE - (wrap ? 0: left)) { + strm->msg = (char *)"invalid distance too far back"; + mode = BAD; + break; + } + Tracevv((stderr, "inflate: distance %lu\n", offset)); + + /* copy match from window to output */ + do { + ROOM(); + copy = WSIZE - offset; + if (copy < left) { + from = put + copy; + copy = left - copy; + } + else { + from = put - offset; + copy = left; + } + if (copy > length) copy = length; + length -= copy; + left -= copy; + do { + *put++ = *from++; + } while (--copy); + } while (length != 0); + break; + + case DONE: + /* inflate stream terminated properly -- write leftover output */ + ret = Z_STREAM_END; + if (left < WSIZE) { + if (out(out_desc, window, (unsigned)(WSIZE - left))) + ret = Z_BUF_ERROR; + } + goto inf_leave; + + case BAD: + ret = Z_DATA_ERROR; + goto inf_leave; + + default: /* can't happen, but makes compilers happy */ + ret = Z_STREAM_ERROR; + goto inf_leave; + } + + /* Return unused input */ + inf_leave: + strm->next_in = next; + strm->avail_in = have; + return ret; +} + +int ZEXPORT inflateBack9End(strm) +z_stream FAR *strm; +{ + if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) + return Z_STREAM_ERROR; + ZFREE(strm, strm->state); + strm->state = Z_NULL; + Tracev((stderr, "inflate: end\n")); + return Z_OK; +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/infback9/infback9.h nodejs-0.11.15/deps/zlib/contrib/infback9/infback9.h --- nodejs-0.11.13/deps/zlib/contrib/infback9/infback9.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/infback9/infback9.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,37 @@ +/* infback9.h -- header for using inflateBack9 functions + * Copyright (C) 2003 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * This header file and associated patches provide a decoder for PKWare's + * undocumented deflate64 compression method (method 9). Use with infback9.c, + * inftree9.h, inftree9.c, and inffix9.h. These patches are not supported. + * This should be compiled with zlib, since it uses zutil.h and zutil.o. + * This code has not yet been tested on 16-bit architectures. See the + * comments in zlib.h for inflateBack() usage. These functions are used + * identically, except that there is no windowBits parameter, and a 64K + * window must be provided. Also if int's are 16 bits, then a zero for + * the third parameter of the "out" function actually means 65536UL. + * zlib.h must be included before this header file. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +ZEXTERN int ZEXPORT inflateBack9 OF((z_stream FAR *strm, + in_func in, void FAR *in_desc, + out_func out, void FAR *out_desc)); +ZEXTERN int ZEXPORT inflateBack9End OF((z_stream FAR *strm)); +ZEXTERN int ZEXPORT inflateBack9Init_ OF((z_stream FAR *strm, + unsigned char FAR *window, + const char *version, + int stream_size)); +#define inflateBack9Init(strm, window) \ + inflateBack9Init_((strm), (window), \ + ZLIB_VERSION, sizeof(z_stream)) + +#ifdef __cplusplus +} +#endif diff -Nru nodejs-0.11.13/deps/zlib/contrib/infback9/inffix9.h nodejs-0.11.15/deps/zlib/contrib/infback9/inffix9.h --- nodejs-0.11.13/deps/zlib/contrib/infback9/inffix9.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/infback9/inffix9.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,107 @@ + /* inffix9.h -- table for decoding deflate64 fixed codes + * Generated automatically by makefixed9(). + */ + + /* WARNING: this file should *not* be used by applications. + It is part of the implementation of this library and is + subject to change. Applications should only use zlib.h. + */ + + static const code lenfix[512] = { + {96,7,0},{0,8,80},{0,8,16},{132,8,115},{130,7,31},{0,8,112}, + {0,8,48},{0,9,192},{128,7,10},{0,8,96},{0,8,32},{0,9,160}, + {0,8,0},{0,8,128},{0,8,64},{0,9,224},{128,7,6},{0,8,88}, + {0,8,24},{0,9,144},{131,7,59},{0,8,120},{0,8,56},{0,9,208}, + {129,7,17},{0,8,104},{0,8,40},{0,9,176},{0,8,8},{0,8,136}, + {0,8,72},{0,9,240},{128,7,4},{0,8,84},{0,8,20},{133,8,227}, + {131,7,43},{0,8,116},{0,8,52},{0,9,200},{129,7,13},{0,8,100}, + {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232}, + {128,7,8},{0,8,92},{0,8,28},{0,9,152},{132,7,83},{0,8,124}, + {0,8,60},{0,9,216},{130,7,23},{0,8,108},{0,8,44},{0,9,184}, + {0,8,12},{0,8,140},{0,8,76},{0,9,248},{128,7,3},{0,8,82}, + {0,8,18},{133,8,163},{131,7,35},{0,8,114},{0,8,50},{0,9,196}, + {129,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2},{0,8,130}, + {0,8,66},{0,9,228},{128,7,7},{0,8,90},{0,8,26},{0,9,148}, + {132,7,67},{0,8,122},{0,8,58},{0,9,212},{130,7,19},{0,8,106}, + {0,8,42},{0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244}, + {128,7,5},{0,8,86},{0,8,22},{65,8,0},{131,7,51},{0,8,118}, + {0,8,54},{0,9,204},{129,7,15},{0,8,102},{0,8,38},{0,9,172}, + {0,8,6},{0,8,134},{0,8,70},{0,9,236},{128,7,9},{0,8,94}, + {0,8,30},{0,9,156},{132,7,99},{0,8,126},{0,8,62},{0,9,220}, + {130,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142}, + {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{133,8,131}, + {130,7,31},{0,8,113},{0,8,49},{0,9,194},{128,7,10},{0,8,97}, + {0,8,33},{0,9,162},{0,8,1},{0,8,129},{0,8,65},{0,9,226}, + {128,7,6},{0,8,89},{0,8,25},{0,9,146},{131,7,59},{0,8,121}, + {0,8,57},{0,9,210},{129,7,17},{0,8,105},{0,8,41},{0,9,178}, + {0,8,9},{0,8,137},{0,8,73},{0,9,242},{128,7,4},{0,8,85}, + {0,8,21},{144,8,3},{131,7,43},{0,8,117},{0,8,53},{0,9,202}, + {129,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133}, + {0,8,69},{0,9,234},{128,7,8},{0,8,93},{0,8,29},{0,9,154}, + {132,7,83},{0,8,125},{0,8,61},{0,9,218},{130,7,23},{0,8,109}, + {0,8,45},{0,9,186},{0,8,13},{0,8,141},{0,8,77},{0,9,250}, + {128,7,3},{0,8,83},{0,8,19},{133,8,195},{131,7,35},{0,8,115}, + {0,8,51},{0,9,198},{129,7,11},{0,8,99},{0,8,35},{0,9,166}, + {0,8,3},{0,8,131},{0,8,67},{0,9,230},{128,7,7},{0,8,91}, + {0,8,27},{0,9,150},{132,7,67},{0,8,123},{0,8,59},{0,9,214}, + {130,7,19},{0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139}, + {0,8,75},{0,9,246},{128,7,5},{0,8,87},{0,8,23},{77,8,0}, + {131,7,51},{0,8,119},{0,8,55},{0,9,206},{129,7,15},{0,8,103}, + {0,8,39},{0,9,174},{0,8,7},{0,8,135},{0,8,71},{0,9,238}, + {128,7,9},{0,8,95},{0,8,31},{0,9,158},{132,7,99},{0,8,127}, + {0,8,63},{0,9,222},{130,7,27},{0,8,111},{0,8,47},{0,9,190}, + {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80}, + {0,8,16},{132,8,115},{130,7,31},{0,8,112},{0,8,48},{0,9,193}, + {128,7,10},{0,8,96},{0,8,32},{0,9,161},{0,8,0},{0,8,128}, + {0,8,64},{0,9,225},{128,7,6},{0,8,88},{0,8,24},{0,9,145}, + {131,7,59},{0,8,120},{0,8,56},{0,9,209},{129,7,17},{0,8,104}, + {0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72},{0,9,241}, + {128,7,4},{0,8,84},{0,8,20},{133,8,227},{131,7,43},{0,8,116}, + {0,8,52},{0,9,201},{129,7,13},{0,8,100},{0,8,36},{0,9,169}, + {0,8,4},{0,8,132},{0,8,68},{0,9,233},{128,7,8},{0,8,92}, + {0,8,28},{0,9,153},{132,7,83},{0,8,124},{0,8,60},{0,9,217}, + {130,7,23},{0,8,108},{0,8,44},{0,9,185},{0,8,12},{0,8,140}, + {0,8,76},{0,9,249},{128,7,3},{0,8,82},{0,8,18},{133,8,163}, + {131,7,35},{0,8,114},{0,8,50},{0,9,197},{129,7,11},{0,8,98}, + {0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229}, + {128,7,7},{0,8,90},{0,8,26},{0,9,149},{132,7,67},{0,8,122}, + {0,8,58},{0,9,213},{130,7,19},{0,8,106},{0,8,42},{0,9,181}, + {0,8,10},{0,8,138},{0,8,74},{0,9,245},{128,7,5},{0,8,86}, + {0,8,22},{65,8,0},{131,7,51},{0,8,118},{0,8,54},{0,9,205}, + {129,7,15},{0,8,102},{0,8,38},{0,9,173},{0,8,6},{0,8,134}, + {0,8,70},{0,9,237},{128,7,9},{0,8,94},{0,8,30},{0,9,157}, + {132,7,99},{0,8,126},{0,8,62},{0,9,221},{130,7,27},{0,8,110}, + {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253}, + {96,7,0},{0,8,81},{0,8,17},{133,8,131},{130,7,31},{0,8,113}, + {0,8,49},{0,9,195},{128,7,10},{0,8,97},{0,8,33},{0,9,163}, + {0,8,1},{0,8,129},{0,8,65},{0,9,227},{128,7,6},{0,8,89}, + {0,8,25},{0,9,147},{131,7,59},{0,8,121},{0,8,57},{0,9,211}, + {129,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9},{0,8,137}, + {0,8,73},{0,9,243},{128,7,4},{0,8,85},{0,8,21},{144,8,3}, + {131,7,43},{0,8,117},{0,8,53},{0,9,203},{129,7,13},{0,8,101}, + {0,8,37},{0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235}, + {128,7,8},{0,8,93},{0,8,29},{0,9,155},{132,7,83},{0,8,125}, + {0,8,61},{0,9,219},{130,7,23},{0,8,109},{0,8,45},{0,9,187}, + {0,8,13},{0,8,141},{0,8,77},{0,9,251},{128,7,3},{0,8,83}, + {0,8,19},{133,8,195},{131,7,35},{0,8,115},{0,8,51},{0,9,199}, + {129,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131}, + {0,8,67},{0,9,231},{128,7,7},{0,8,91},{0,8,27},{0,9,151}, + {132,7,67},{0,8,123},{0,8,59},{0,9,215},{130,7,19},{0,8,107}, + {0,8,43},{0,9,183},{0,8,11},{0,8,139},{0,8,75},{0,9,247}, + {128,7,5},{0,8,87},{0,8,23},{77,8,0},{131,7,51},{0,8,119}, + {0,8,55},{0,9,207},{129,7,15},{0,8,103},{0,8,39},{0,9,175}, + {0,8,7},{0,8,135},{0,8,71},{0,9,239},{128,7,9},{0,8,95}, + {0,8,31},{0,9,159},{132,7,99},{0,8,127},{0,8,63},{0,9,223}, + {130,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143}, + {0,8,79},{0,9,255} + }; + + static const code distfix[32] = { + {128,5,1},{135,5,257},{131,5,17},{139,5,4097},{129,5,5}, + {137,5,1025},{133,5,65},{141,5,16385},{128,5,3},{136,5,513}, + {132,5,33},{140,5,8193},{130,5,9},{138,5,2049},{134,5,129}, + {142,5,32769},{128,5,2},{135,5,385},{131,5,25},{139,5,6145}, + {129,5,7},{137,5,1537},{133,5,97},{141,5,24577},{128,5,4}, + {136,5,769},{132,5,49},{140,5,12289},{130,5,13},{138,5,3073}, + {134,5,193},{142,5,49153} + }; diff -Nru nodejs-0.11.13/deps/zlib/contrib/infback9/inflate9.h nodejs-0.11.15/deps/zlib/contrib/infback9/inflate9.h --- nodejs-0.11.13/deps/zlib/contrib/infback9/inflate9.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/infback9/inflate9.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,47 @@ +/* inflate9.h -- internal inflate state definition + * Copyright (C) 1995-2003 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* Possible inflate modes between inflate() calls */ +typedef enum { + TYPE, /* i: waiting for type bits, including last-flag bit */ + STORED, /* i: waiting for stored size (length and complement) */ + TABLE, /* i: waiting for dynamic block table lengths */ + LEN, /* i: waiting for length/lit code */ + DONE, /* finished check, done -- remain here until reset */ + BAD /* got a data error -- remain here until reset */ +} inflate_mode; + +/* + State transitions between above modes - + + (most modes can go to the BAD mode -- not shown for clarity) + + Read deflate blocks: + TYPE -> STORED or TABLE or LEN or DONE + STORED -> TYPE + TABLE -> LENLENS -> CODELENS -> LEN + Read deflate codes: + LEN -> LEN or TYPE + */ + +/* state maintained between inflate() calls. Approximately 7K bytes. */ +struct inflate_state { + /* sliding window */ + unsigned char FAR *window; /* allocated sliding window, if needed */ + /* dynamic table building */ + unsigned ncode; /* number of code length code lengths */ + unsigned nlen; /* number of length code lengths */ + unsigned ndist; /* number of distance code lengths */ + unsigned have; /* number of code lengths in lens[] */ + code FAR *next; /* next available space in codes[] */ + unsigned short lens[320]; /* temporary storage for code lengths */ + unsigned short work[288]; /* work area for code table building */ + code codes[ENOUGH]; /* space for code tables */ +}; diff -Nru nodejs-0.11.13/deps/zlib/contrib/infback9/inftree9.c nodejs-0.11.15/deps/zlib/contrib/infback9/inftree9.c --- nodejs-0.11.13/deps/zlib/contrib/infback9/inftree9.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/infback9/inftree9.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,324 @@ +/* inftree9.c -- generate Huffman trees for efficient decoding + * Copyright (C) 1995-2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "zutil.h" +#include "inftree9.h" + +#define MAXBITS 15 + +const char inflate9_copyright[] = + " inflate9 1.2.8 Copyright 1995-2013 Mark Adler "; +/* + If you use the zlib library in a product, an acknowledgment is welcome + in the documentation of your product. If for some reason you cannot + include such an acknowledgment, I would appreciate that you keep this + copyright string in the executable of your product. + */ + +/* + Build a set of tables to decode the provided canonical Huffman code. + The code lengths are lens[0..codes-1]. The result starts at *table, + whose indices are 0..2^bits-1. work is a writable array of at least + lens shorts, which is used as a work area. type is the type of code + to be generated, CODES, LENS, or DISTS. On return, zero is success, + -1 is an invalid code, and +1 means that ENOUGH isn't enough. table + on return points to the next available entry's address. bits is the + requested root table index bits, and on return it is the actual root + table index bits. It will differ if the request is greater than the + longest code or if it is less than the shortest code. + */ +int inflate_table9(type, lens, codes, table, bits, work) +codetype type; +unsigned short FAR *lens; +unsigned codes; +code FAR * FAR *table; +unsigned FAR *bits; +unsigned short FAR *work; +{ + unsigned len; /* a code's length in bits */ + unsigned sym; /* index of code symbols */ + unsigned min, max; /* minimum and maximum code lengths */ + unsigned root; /* number of index bits for root table */ + unsigned curr; /* number of index bits for current table */ + unsigned drop; /* code bits to drop for sub-table */ + int left; /* number of prefix codes available */ + unsigned used; /* code entries in table used */ + unsigned huff; /* Huffman code */ + unsigned incr; /* for incrementing code, index */ + unsigned fill; /* index for replicating entries */ + unsigned low; /* low bits for current root entry */ + unsigned mask; /* mask for low root bits */ + code this; /* table entry for duplication */ + code FAR *next; /* next available space in table */ + const unsigned short FAR *base; /* base value table to use */ + const unsigned short FAR *extra; /* extra bits table to use */ + int end; /* use base and extra for symbol > end */ + unsigned short count[MAXBITS+1]; /* number of codes of each length */ + unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ + static const unsigned short lbase[31] = { /* Length codes 257..285 base */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, + 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, + 131, 163, 195, 227, 3, 0, 0}; + static const unsigned short lext[31] = { /* Length codes 257..285 extra */ + 128, 128, 128, 128, 128, 128, 128, 128, 129, 129, 129, 129, + 130, 130, 130, 130, 131, 131, 131, 131, 132, 132, 132, 132, + 133, 133, 133, 133, 144, 72, 78}; + static const unsigned short dbase[32] = { /* Distance codes 0..31 base */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, + 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, + 4097, 6145, 8193, 12289, 16385, 24577, 32769, 49153}; + static const unsigned short dext[32] = { /* Distance codes 0..31 extra */ + 128, 128, 128, 128, 129, 129, 130, 130, 131, 131, 132, 132, + 133, 133, 134, 134, 135, 135, 136, 136, 137, 137, 138, 138, + 139, 139, 140, 140, 141, 141, 142, 142}; + + /* + Process a set of code lengths to create a canonical Huffman code. The + code lengths are lens[0..codes-1]. Each length corresponds to the + symbols 0..codes-1. The Huffman code is generated by first sorting the + symbols by length from short to long, and retaining the symbol order + for codes with equal lengths. Then the code starts with all zero bits + for the first code of the shortest length, and the codes are integer + increments for the same length, and zeros are appended as the length + increases. For the deflate format, these bits are stored backwards + from their more natural integer increment ordering, and so when the + decoding tables are built in the large loop below, the integer codes + are incremented backwards. + + This routine assumes, but does not check, that all of the entries in + lens[] are in the range 0..MAXBITS. The caller must assure this. + 1..MAXBITS is interpreted as that code length. zero means that that + symbol does not occur in this code. + + The codes are sorted by computing a count of codes for each length, + creating from that a table of starting indices for each length in the + sorted table, and then entering the symbols in order in the sorted + table. The sorted table is work[], with that space being provided by + the caller. + + The length counts are used for other purposes as well, i.e. finding + the minimum and maximum length codes, determining if there are any + codes at all, checking for a valid set of lengths, and looking ahead + at length counts to determine sub-table sizes when building the + decoding tables. + */ + + /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ + for (len = 0; len <= MAXBITS; len++) + count[len] = 0; + for (sym = 0; sym < codes; sym++) + count[lens[sym]]++; + + /* bound code lengths, force root to be within code lengths */ + root = *bits; + for (max = MAXBITS; max >= 1; max--) + if (count[max] != 0) break; + if (root > max) root = max; + if (max == 0) return -1; /* no codes! */ + for (min = 1; min <= MAXBITS; min++) + if (count[min] != 0) break; + if (root < min) root = min; + + /* check for an over-subscribed or incomplete set of lengths */ + left = 1; + for (len = 1; len <= MAXBITS; len++) { + left <<= 1; + left -= count[len]; + if (left < 0) return -1; /* over-subscribed */ + } + if (left > 0 && (type == CODES || max != 1)) + return -1; /* incomplete set */ + + /* generate offsets into symbol table for each length for sorting */ + offs[1] = 0; + for (len = 1; len < MAXBITS; len++) + offs[len + 1] = offs[len] + count[len]; + + /* sort symbols by length, by symbol order within each length */ + for (sym = 0; sym < codes; sym++) + if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; + + /* + Create and fill in decoding tables. In this loop, the table being + filled is at next and has curr index bits. The code being used is huff + with length len. That code is converted to an index by dropping drop + bits off of the bottom. For codes where len is less than drop + curr, + those top drop + curr - len bits are incremented through all values to + fill the table with replicated entries. + + root is the number of index bits for the root table. When len exceeds + root, sub-tables are created pointed to by the root entry with an index + of the low root bits of huff. This is saved in low to check for when a + new sub-table should be started. drop is zero when the root table is + being filled, and drop is root when sub-tables are being filled. + + When a new sub-table is needed, it is necessary to look ahead in the + code lengths to determine what size sub-table is needed. The length + counts are used for this, and so count[] is decremented as codes are + entered in the tables. + + used keeps track of how many table entries have been allocated from the + provided *table space. It is checked for LENS and DIST tables against + the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in + the initial root table size constants. See the comments in inftree9.h + for more information. + + sym increments through all symbols, and the loop terminates when + all codes of length max, i.e. all codes, have been processed. This + routine permits incomplete codes, so another loop after this one fills + in the rest of the decoding tables with invalid code markers. + */ + + /* set up for code type */ + switch (type) { + case CODES: + base = extra = work; /* dummy value--not used */ + end = 19; + break; + case LENS: + base = lbase; + base -= 257; + extra = lext; + extra -= 257; + end = 256; + break; + default: /* DISTS */ + base = dbase; + extra = dext; + end = -1; + } + + /* initialize state for loop */ + huff = 0; /* starting code */ + sym = 0; /* starting code symbol */ + len = min; /* starting code length */ + next = *table; /* current table to fill in */ + curr = root; /* current table index bits */ + drop = 0; /* current bits to drop from code for index */ + low = (unsigned)(-1); /* trigger new sub-table when len > root */ + used = 1U << root; /* use root table entries */ + mask = used - 1; /* mask for comparing low */ + + /* check available table space */ + if ((type == LENS && used >= ENOUGH_LENS) || + (type == DISTS && used >= ENOUGH_DISTS)) + return 1; + + /* process all codes and make table entries */ + for (;;) { + /* create table entry */ + this.bits = (unsigned char)(len - drop); + if ((int)(work[sym]) < end) { + this.op = (unsigned char)0; + this.val = work[sym]; + } + else if ((int)(work[sym]) > end) { + this.op = (unsigned char)(extra[work[sym]]); + this.val = base[work[sym]]; + } + else { + this.op = (unsigned char)(32 + 64); /* end of block */ + this.val = 0; + } + + /* replicate for those indices with low len bits equal to huff */ + incr = 1U << (len - drop); + fill = 1U << curr; + do { + fill -= incr; + next[(huff >> drop) + fill] = this; + } while (fill != 0); + + /* backwards increment the len-bit code huff */ + incr = 1U << (len - 1); + while (huff & incr) + incr >>= 1; + if (incr != 0) { + huff &= incr - 1; + huff += incr; + } + else + huff = 0; + + /* go to next symbol, update count, len */ + sym++; + if (--(count[len]) == 0) { + if (len == max) break; + len = lens[work[sym]]; + } + + /* create new sub-table if needed */ + if (len > root && (huff & mask) != low) { + /* if first time, transition to sub-tables */ + if (drop == 0) + drop = root; + + /* increment past last table */ + next += 1U << curr; + + /* determine length of next table */ + curr = len - drop; + left = (int)(1 << curr); + while (curr + drop < max) { + left -= count[curr + drop]; + if (left <= 0) break; + curr++; + left <<= 1; + } + + /* check for enough space */ + used += 1U << curr; + if ((type == LENS && used >= ENOUGH_LENS) || + (type == DISTS && used >= ENOUGH_DISTS)) + return 1; + + /* point entry in root table to sub-table */ + low = huff & mask; + (*table)[low].op = (unsigned char)curr; + (*table)[low].bits = (unsigned char)root; + (*table)[low].val = (unsigned short)(next - *table); + } + } + + /* + Fill in rest of table for incomplete codes. This loop is similar to the + loop above in incrementing huff for table indices. It is assumed that + len is equal to curr + drop, so there is no loop needed to increment + through high index bits. When the current sub-table is filled, the loop + drops back to the root table to fill in any remaining entries there. + */ + this.op = (unsigned char)64; /* invalid code marker */ + this.bits = (unsigned char)(len - drop); + this.val = (unsigned short)0; + while (huff != 0) { + /* when done with sub-table, drop back to root table */ + if (drop != 0 && (huff & mask) != low) { + drop = 0; + len = root; + next = *table; + curr = root; + this.bits = (unsigned char)len; + } + + /* put invalid code marker in table */ + next[huff >> drop] = this; + + /* backwards increment the len-bit code huff */ + incr = 1U << (len - 1); + while (huff & incr) + incr >>= 1; + if (incr != 0) { + huff &= incr - 1; + huff += incr; + } + else + huff = 0; + } + + /* set return parameters */ + *table += used; + *bits = root; + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/infback9/inftree9.h nodejs-0.11.15/deps/zlib/contrib/infback9/inftree9.h --- nodejs-0.11.13/deps/zlib/contrib/infback9/inftree9.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/infback9/inftree9.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,61 @@ +/* inftree9.h -- header to use inftree9.c + * Copyright (C) 1995-2008 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* Structure for decoding tables. Each entry provides either the + information needed to do the operation requested by the code that + indexed that table entry, or it provides a pointer to another + table that indexes more bits of the code. op indicates whether + the entry is a pointer to another table, a literal, a length or + distance, an end-of-block, or an invalid code. For a table + pointer, the low four bits of op is the number of index bits of + that table. For a length or distance, the low four bits of op + is the number of extra bits to get after the code. bits is + the number of bits in this code or part of the code to drop off + of the bit buffer. val is the actual byte to output in the case + of a literal, the base length or distance, or the offset from + the current table to the next table. Each entry is four bytes. */ +typedef struct { + unsigned char op; /* operation, extra bits, table bits */ + unsigned char bits; /* bits in this part of the code */ + unsigned short val; /* offset in table or code value */ +} code; + +/* op values as set by inflate_table(): + 00000000 - literal + 0000tttt - table link, tttt != 0 is the number of table index bits + 100eeeee - length or distance, eeee is the number of extra bits + 01100000 - end of block + 01000000 - invalid code + */ + +/* Maximum size of the dynamic table. The maximum number of code structures is + 1446, which is the sum of 852 for literal/length codes and 594 for distance + codes. These values were found by exhaustive searches using the program + examples/enough.c found in the zlib distribtution. The arguments to that + program are the number of symbols, the initial root table size, and the + maximum bit length of a code. "enough 286 9 15" for literal/length codes + returns returns 852, and "enough 32 6 15" for distance codes returns 594. + The initial root table size (9 or 6) is found in the fifth argument of the + inflate_table() calls in infback9.c. If the root table size is changed, + then these maximum sizes would be need to be recalculated and updated. */ +#define ENOUGH_LENS 852 +#define ENOUGH_DISTS 594 +#define ENOUGH (ENOUGH_LENS+ENOUGH_DISTS) + +/* Type of code to build for inflate_table9() */ +typedef enum { + CODES, + LENS, + DISTS +} codetype; + +extern int inflate_table9 OF((codetype type, unsigned short FAR *lens, + unsigned codes, code FAR * FAR *table, + unsigned FAR *bits, unsigned short FAR *work)); diff -Nru nodejs-0.11.13/deps/zlib/contrib/infback9/README nodejs-0.11.15/deps/zlib/contrib/infback9/README --- nodejs-0.11.13/deps/zlib/contrib/infback9/README 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/infback9/README 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1 @@ +See infback9.h for what this is and how to use it. diff -Nru nodejs-0.11.13/deps/zlib/contrib/inflate86/inffas86.c nodejs-0.11.15/deps/zlib/contrib/inflate86/inffas86.c --- nodejs-0.11.13/deps/zlib/contrib/inflate86/inffas86.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/inflate86/inffas86.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1157 @@ +/* inffas86.c is a hand tuned assembler version of + * + * inffast.c -- fast decoding + * Copyright (C) 1995-2003 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + * + * Copyright (C) 2003 Chris Anderson <christop@charm.net> + * Please use the copyright conditions above. + * + * Dec-29-2003 -- I added AMD64 inflate asm support. This version is also + * slightly quicker on x86 systems because, instead of using rep movsb to copy + * data, it uses rep movsw, which moves data in 2-byte chunks instead of single + * bytes. I've tested the AMD64 code on a Fedora Core 1 + the x86_64 updates + * from http://fedora.linux.duke.edu/fc1_x86_64 + * which is running on an Athlon 64 3000+ / Gigabyte GA-K8VT800M system with + * 1GB ram. The 64-bit version is about 4% faster than the 32-bit version, + * when decompressing mozilla-source-1.3.tar.gz. + * + * Mar-13-2003 -- Most of this is derived from inffast.S which is derived from + * the gcc -S output of zlib-1.2.0/inffast.c. Zlib-1.2.0 is in beta release at + * the moment. I have successfully compiled and tested this code with gcc2.96, + * gcc3.2, icc5.0, msvc6.0. It is very close to the speed of inffast.S + * compiled with gcc -DNO_MMX, but inffast.S is still faster on the P3 with MMX + * enabled. I will attempt to merge the MMX code into this version. Newer + * versions of this and inffast.S can be found at + * http://www.eetbeetee.com/zlib/ and http://www.charm.net/~christop/zlib/ + */ + +#include "zutil.h" +#include "inftrees.h" +#include "inflate.h" +#include "inffast.h" + +/* Mark Adler's comments from inffast.c: */ + +/* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is + available, an end-of-block is encountered, or a data error is encountered. + When large enough input and output buffers are supplied to inflate(), for + example, a 16K input buffer and a 64K output buffer, more than 95% of the + inflate execution time is spent in this routine. + + Entry assumptions: + + state->mode == LEN + strm->avail_in >= 6 + strm->avail_out >= 258 + start >= strm->avail_out + state->bits < 8 + + On return, state->mode is one of: + + LEN -- ran out of enough output space or enough available input + TYPE -- reached end of block code, inflate() to interpret next block + BAD -- error in block data + + Notes: + + - The maximum input bits used by a length/distance pair is 15 bits for the + length code, 5 bits for the length extra, 15 bits for the distance code, + and 13 bits for the distance extra. This totals 48 bits, or six bytes. + Therefore if strm->avail_in >= 6, then there is enough input to avoid + checking for available input while decoding. + + - The maximum bytes that a single length/distance pair can output is 258 + bytes, which is the maximum length that can be coded. inflate_fast() + requires strm->avail_out >= 258 for each loop to avoid checking for + output space. + */ +void inflate_fast(strm, start) +z_streamp strm; +unsigned start; /* inflate()'s starting value for strm->avail_out */ +{ + struct inflate_state FAR *state; + struct inffast_ar { +/* 64 32 x86 x86_64 */ +/* ar offset register */ +/* 0 0 */ void *esp; /* esp save */ +/* 8 4 */ void *ebp; /* ebp save */ +/* 16 8 */ unsigned char FAR *in; /* esi rsi local strm->next_in */ +/* 24 12 */ unsigned char FAR *last; /* r9 while in < last */ +/* 32 16 */ unsigned char FAR *out; /* edi rdi local strm->next_out */ +/* 40 20 */ unsigned char FAR *beg; /* inflate()'s init next_out */ +/* 48 24 */ unsigned char FAR *end; /* r10 while out < end */ +/* 56 28 */ unsigned char FAR *window;/* size of window, wsize!=0 */ +/* 64 32 */ code const FAR *lcode; /* ebp rbp local strm->lencode */ +/* 72 36 */ code const FAR *dcode; /* r11 local strm->distcode */ +/* 80 40 */ unsigned long hold; /* edx rdx local strm->hold */ +/* 88 44 */ unsigned bits; /* ebx rbx local strm->bits */ +/* 92 48 */ unsigned wsize; /* window size */ +/* 96 52 */ unsigned write; /* window write index */ +/*100 56 */ unsigned lmask; /* r12 mask for lcode */ +/*104 60 */ unsigned dmask; /* r13 mask for dcode */ +/*108 64 */ unsigned len; /* r14 match length */ +/*112 68 */ unsigned dist; /* r15 match distance */ +/*116 72 */ unsigned status; /* set when state chng*/ + } ar; + +#if defined( __GNUC__ ) && defined( __amd64__ ) && ! defined( __i386 ) +#define PAD_AVAIL_IN 6 +#define PAD_AVAIL_OUT 258 +#else +#define PAD_AVAIL_IN 5 +#define PAD_AVAIL_OUT 257 +#endif + + /* copy state to local variables */ + state = (struct inflate_state FAR *)strm->state; + ar.in = strm->next_in; + ar.last = ar.in + (strm->avail_in - PAD_AVAIL_IN); + ar.out = strm->next_out; + ar.beg = ar.out - (start - strm->avail_out); + ar.end = ar.out + (strm->avail_out - PAD_AVAIL_OUT); + ar.wsize = state->wsize; + ar.write = state->wnext; + ar.window = state->window; + ar.hold = state->hold; + ar.bits = state->bits; + ar.lcode = state->lencode; + ar.dcode = state->distcode; + ar.lmask = (1U << state->lenbits) - 1; + ar.dmask = (1U << state->distbits) - 1; + + /* decode literals and length/distances until end-of-block or not enough + input data or output space */ + + /* align in on 1/2 hold size boundary */ + while (((unsigned long)(void *)ar.in & (sizeof(ar.hold) / 2 - 1)) != 0) { + ar.hold += (unsigned long)*ar.in++ << ar.bits; + ar.bits += 8; + } + +#if defined( __GNUC__ ) && defined( __amd64__ ) && ! defined( __i386 ) + __asm__ __volatile__ ( +" leaq %0, %%rax\n" +" movq %%rbp, 8(%%rax)\n" /* save regs rbp and rsp */ +" movq %%rsp, (%%rax)\n" +" movq %%rax, %%rsp\n" /* make rsp point to &ar */ +" movq 16(%%rsp), %%rsi\n" /* rsi = in */ +" movq 32(%%rsp), %%rdi\n" /* rdi = out */ +" movq 24(%%rsp), %%r9\n" /* r9 = last */ +" movq 48(%%rsp), %%r10\n" /* r10 = end */ +" movq 64(%%rsp), %%rbp\n" /* rbp = lcode */ +" movq 72(%%rsp), %%r11\n" /* r11 = dcode */ +" movq 80(%%rsp), %%rdx\n" /* rdx = hold */ +" movl 88(%%rsp), %%ebx\n" /* ebx = bits */ +" movl 100(%%rsp), %%r12d\n" /* r12d = lmask */ +" movl 104(%%rsp), %%r13d\n" /* r13d = dmask */ + /* r14d = len */ + /* r15d = dist */ +" cld\n" +" cmpq %%rdi, %%r10\n" +" je .L_one_time\n" /* if only one decode left */ +" cmpq %%rsi, %%r9\n" +" je .L_one_time\n" +" jmp .L_do_loop\n" + +".L_one_time:\n" +" movq %%r12, %%r8\n" /* r8 = lmask */ +" cmpb $32, %%bl\n" +" ja .L_get_length_code_one_time\n" + +" lodsl\n" /* eax = *(uint *)in++ */ +" movb %%bl, %%cl\n" /* cl = bits, needs it for shifting */ +" addb $32, %%bl\n" /* bits += 32 */ +" shlq %%cl, %%rax\n" +" orq %%rax, %%rdx\n" /* hold |= *((uint *)in)++ << bits */ +" jmp .L_get_length_code_one_time\n" + +".align 32,0x90\n" +".L_while_test:\n" +" cmpq %%rdi, %%r10\n" +" jbe .L_break_loop\n" +" cmpq %%rsi, %%r9\n" +" jbe .L_break_loop\n" + +".L_do_loop:\n" +" movq %%r12, %%r8\n" /* r8 = lmask */ +" cmpb $32, %%bl\n" +" ja .L_get_length_code\n" /* if (32 < bits) */ + +" lodsl\n" /* eax = *(uint *)in++ */ +" movb %%bl, %%cl\n" /* cl = bits, needs it for shifting */ +" addb $32, %%bl\n" /* bits += 32 */ +" shlq %%cl, %%rax\n" +" orq %%rax, %%rdx\n" /* hold |= *((uint *)in)++ << bits */ + +".L_get_length_code:\n" +" andq %%rdx, %%r8\n" /* r8 &= hold */ +" movl (%%rbp,%%r8,4), %%eax\n" /* eax = lcode[hold & lmask] */ + +" movb %%ah, %%cl\n" /* cl = this.bits */ +" subb %%ah, %%bl\n" /* bits -= this.bits */ +" shrq %%cl, %%rdx\n" /* hold >>= this.bits */ + +" testb %%al, %%al\n" +" jnz .L_test_for_length_base\n" /* if (op != 0) 45.7% */ + +" movq %%r12, %%r8\n" /* r8 = lmask */ +" shrl $16, %%eax\n" /* output this.val char */ +" stosb\n" + +".L_get_length_code_one_time:\n" +" andq %%rdx, %%r8\n" /* r8 &= hold */ +" movl (%%rbp,%%r8,4), %%eax\n" /* eax = lcode[hold & lmask] */ + +".L_dolen:\n" +" movb %%ah, %%cl\n" /* cl = this.bits */ +" subb %%ah, %%bl\n" /* bits -= this.bits */ +" shrq %%cl, %%rdx\n" /* hold >>= this.bits */ + +" testb %%al, %%al\n" +" jnz .L_test_for_length_base\n" /* if (op != 0) 45.7% */ + +" shrl $16, %%eax\n" /* output this.val char */ +" stosb\n" +" jmp .L_while_test\n" + +".align 32,0x90\n" +".L_test_for_length_base:\n" +" movl %%eax, %%r14d\n" /* len = this */ +" shrl $16, %%r14d\n" /* len = this.val */ +" movb %%al, %%cl\n" + +" testb $16, %%al\n" +" jz .L_test_for_second_level_length\n" /* if ((op & 16) == 0) 8% */ +" andb $15, %%cl\n" /* op &= 15 */ +" jz .L_decode_distance\n" /* if (!op) */ + +".L_add_bits_to_len:\n" +" subb %%cl, %%bl\n" +" xorl %%eax, %%eax\n" +" incl %%eax\n" +" shll %%cl, %%eax\n" +" decl %%eax\n" +" andl %%edx, %%eax\n" /* eax &= hold */ +" shrq %%cl, %%rdx\n" +" addl %%eax, %%r14d\n" /* len += hold & mask[op] */ + +".L_decode_distance:\n" +" movq %%r13, %%r8\n" /* r8 = dmask */ +" cmpb $32, %%bl\n" +" ja .L_get_distance_code\n" /* if (32 < bits) */ + +" lodsl\n" /* eax = *(uint *)in++ */ +" movb %%bl, %%cl\n" /* cl = bits, needs it for shifting */ +" addb $32, %%bl\n" /* bits += 32 */ +" shlq %%cl, %%rax\n" +" orq %%rax, %%rdx\n" /* hold |= *((uint *)in)++ << bits */ + +".L_get_distance_code:\n" +" andq %%rdx, %%r8\n" /* r8 &= hold */ +" movl (%%r11,%%r8,4), %%eax\n" /* eax = dcode[hold & dmask] */ + +".L_dodist:\n" +" movl %%eax, %%r15d\n" /* dist = this */ +" shrl $16, %%r15d\n" /* dist = this.val */ +" movb %%ah, %%cl\n" +" subb %%ah, %%bl\n" /* bits -= this.bits */ +" shrq %%cl, %%rdx\n" /* hold >>= this.bits */ +" movb %%al, %%cl\n" /* cl = this.op */ + +" testb $16, %%al\n" /* if ((op & 16) == 0) */ +" jz .L_test_for_second_level_dist\n" +" andb $15, %%cl\n" /* op &= 15 */ +" jz .L_check_dist_one\n" + +".L_add_bits_to_dist:\n" +" subb %%cl, %%bl\n" +" xorl %%eax, %%eax\n" +" incl %%eax\n" +" shll %%cl, %%eax\n" +" decl %%eax\n" /* (1 << op) - 1 */ +" andl %%edx, %%eax\n" /* eax &= hold */ +" shrq %%cl, %%rdx\n" +" addl %%eax, %%r15d\n" /* dist += hold & ((1 << op) - 1) */ + +".L_check_window:\n" +" movq %%rsi, %%r8\n" /* save in so from can use it's reg */ +" movq %%rdi, %%rax\n" +" subq 40(%%rsp), %%rax\n" /* nbytes = out - beg */ + +" cmpl %%r15d, %%eax\n" +" jb .L_clip_window\n" /* if (dist > nbytes) 4.2% */ + +" movl %%r14d, %%ecx\n" /* ecx = len */ +" movq %%rdi, %%rsi\n" +" subq %%r15, %%rsi\n" /* from = out - dist */ + +" sarl %%ecx\n" +" jnc .L_copy_two\n" /* if len % 2 == 0 */ + +" rep movsw\n" +" movb (%%rsi), %%al\n" +" movb %%al, (%%rdi)\n" +" incq %%rdi\n" + +" movq %%r8, %%rsi\n" /* move in back to %rsi, toss from */ +" jmp .L_while_test\n" + +".L_copy_two:\n" +" rep movsw\n" +" movq %%r8, %%rsi\n" /* move in back to %rsi, toss from */ +" jmp .L_while_test\n" + +".align 32,0x90\n" +".L_check_dist_one:\n" +" cmpl $1, %%r15d\n" /* if dist 1, is a memset */ +" jne .L_check_window\n" +" cmpq %%rdi, 40(%%rsp)\n" /* if out == beg, outside window */ +" je .L_check_window\n" + +" movl %%r14d, %%ecx\n" /* ecx = len */ +" movb -1(%%rdi), %%al\n" +" movb %%al, %%ah\n" + +" sarl %%ecx\n" +" jnc .L_set_two\n" +" movb %%al, (%%rdi)\n" +" incq %%rdi\n" + +".L_set_two:\n" +" rep stosw\n" +" jmp .L_while_test\n" + +".align 32,0x90\n" +".L_test_for_second_level_length:\n" +" testb $64, %%al\n" +" jnz .L_test_for_end_of_block\n" /* if ((op & 64) != 0) */ + +" xorl %%eax, %%eax\n" +" incl %%eax\n" +" shll %%cl, %%eax\n" +" decl %%eax\n" +" andl %%edx, %%eax\n" /* eax &= hold */ +" addl %%r14d, %%eax\n" /* eax += len */ +" movl (%%rbp,%%rax,4), %%eax\n" /* eax = lcode[val+(hold&mask[op])]*/ +" jmp .L_dolen\n" + +".align 32,0x90\n" +".L_test_for_second_level_dist:\n" +" testb $64, %%al\n" +" jnz .L_invalid_distance_code\n" /* if ((op & 64) != 0) */ + +" xorl %%eax, %%eax\n" +" incl %%eax\n" +" shll %%cl, %%eax\n" +" decl %%eax\n" +" andl %%edx, %%eax\n" /* eax &= hold */ +" addl %%r15d, %%eax\n" /* eax += dist */ +" movl (%%r11,%%rax,4), %%eax\n" /* eax = dcode[val+(hold&mask[op])]*/ +" jmp .L_dodist\n" + +".align 32,0x90\n" +".L_clip_window:\n" +" movl %%eax, %%ecx\n" /* ecx = nbytes */ +" movl 92(%%rsp), %%eax\n" /* eax = wsize, prepare for dist cmp */ +" negl %%ecx\n" /* nbytes = -nbytes */ + +" cmpl %%r15d, %%eax\n" +" jb .L_invalid_distance_too_far\n" /* if (dist > wsize) */ + +" addl %%r15d, %%ecx\n" /* nbytes = dist - nbytes */ +" cmpl $0, 96(%%rsp)\n" +" jne .L_wrap_around_window\n" /* if (write != 0) */ + +" movq 56(%%rsp), %%rsi\n" /* from = window */ +" subl %%ecx, %%eax\n" /* eax -= nbytes */ +" addq %%rax, %%rsi\n" /* from += wsize - nbytes */ + +" movl %%r14d, %%eax\n" /* eax = len */ +" cmpl %%ecx, %%r14d\n" +" jbe .L_do_copy\n" /* if (nbytes >= len) */ + +" subl %%ecx, %%eax\n" /* eax -= nbytes */ +" rep movsb\n" +" movq %%rdi, %%rsi\n" +" subq %%r15, %%rsi\n" /* from = &out[ -dist ] */ +" jmp .L_do_copy\n" + +".align 32,0x90\n" +".L_wrap_around_window:\n" +" movl 96(%%rsp), %%eax\n" /* eax = write */ +" cmpl %%eax, %%ecx\n" +" jbe .L_contiguous_in_window\n" /* if (write >= nbytes) */ + +" movl 92(%%rsp), %%esi\n" /* from = wsize */ +" addq 56(%%rsp), %%rsi\n" /* from += window */ +" addq %%rax, %%rsi\n" /* from += write */ +" subq %%rcx, %%rsi\n" /* from -= nbytes */ +" subl %%eax, %%ecx\n" /* nbytes -= write */ + +" movl %%r14d, %%eax\n" /* eax = len */ +" cmpl %%ecx, %%eax\n" +" jbe .L_do_copy\n" /* if (nbytes >= len) */ + +" subl %%ecx, %%eax\n" /* len -= nbytes */ +" rep movsb\n" +" movq 56(%%rsp), %%rsi\n" /* from = window */ +" movl 96(%%rsp), %%ecx\n" /* nbytes = write */ +" cmpl %%ecx, %%eax\n" +" jbe .L_do_copy\n" /* if (nbytes >= len) */ + +" subl %%ecx, %%eax\n" /* len -= nbytes */ +" rep movsb\n" +" movq %%rdi, %%rsi\n" +" subq %%r15, %%rsi\n" /* from = out - dist */ +" jmp .L_do_copy\n" + +".align 32,0x90\n" +".L_contiguous_in_window:\n" +" movq 56(%%rsp), %%rsi\n" /* rsi = window */ +" addq %%rax, %%rsi\n" +" subq %%rcx, %%rsi\n" /* from += write - nbytes */ + +" movl %%r14d, %%eax\n" /* eax = len */ +" cmpl %%ecx, %%eax\n" +" jbe .L_do_copy\n" /* if (nbytes >= len) */ + +" subl %%ecx, %%eax\n" /* len -= nbytes */ +" rep movsb\n" +" movq %%rdi, %%rsi\n" +" subq %%r15, %%rsi\n" /* from = out - dist */ +" jmp .L_do_copy\n" /* if (nbytes >= len) */ + +".align 32,0x90\n" +".L_do_copy:\n" +" movl %%eax, %%ecx\n" /* ecx = len */ +" rep movsb\n" + +" movq %%r8, %%rsi\n" /* move in back to %esi, toss from */ +" jmp .L_while_test\n" + +".L_test_for_end_of_block:\n" +" testb $32, %%al\n" +" jz .L_invalid_literal_length_code\n" +" movl $1, 116(%%rsp)\n" +" jmp .L_break_loop_with_status\n" + +".L_invalid_literal_length_code:\n" +" movl $2, 116(%%rsp)\n" +" jmp .L_break_loop_with_status\n" + +".L_invalid_distance_code:\n" +" movl $3, 116(%%rsp)\n" +" jmp .L_break_loop_with_status\n" + +".L_invalid_distance_too_far:\n" +" movl $4, 116(%%rsp)\n" +" jmp .L_break_loop_with_status\n" + +".L_break_loop:\n" +" movl $0, 116(%%rsp)\n" + +".L_break_loop_with_status:\n" +/* put in, out, bits, and hold back into ar and pop esp */ +" movq %%rsi, 16(%%rsp)\n" /* in */ +" movq %%rdi, 32(%%rsp)\n" /* out */ +" movl %%ebx, 88(%%rsp)\n" /* bits */ +" movq %%rdx, 80(%%rsp)\n" /* hold */ +" movq (%%rsp), %%rax\n" /* restore rbp and rsp */ +" movq 8(%%rsp), %%rbp\n" +" movq %%rax, %%rsp\n" + : + : "m" (ar) + : "memory", "%rax", "%rbx", "%rcx", "%rdx", "%rsi", "%rdi", + "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15" + ); +#elif ( defined( __GNUC__ ) || defined( __ICC ) ) && defined( __i386 ) + __asm__ __volatile__ ( +" leal %0, %%eax\n" +" movl %%esp, (%%eax)\n" /* save esp, ebp */ +" movl %%ebp, 4(%%eax)\n" +" movl %%eax, %%esp\n" +" movl 8(%%esp), %%esi\n" /* esi = in */ +" movl 16(%%esp), %%edi\n" /* edi = out */ +" movl 40(%%esp), %%edx\n" /* edx = hold */ +" movl 44(%%esp), %%ebx\n" /* ebx = bits */ +" movl 32(%%esp), %%ebp\n" /* ebp = lcode */ + +" cld\n" +" jmp .L_do_loop\n" + +".align 32,0x90\n" +".L_while_test:\n" +" cmpl %%edi, 24(%%esp)\n" /* out < end */ +" jbe .L_break_loop\n" +" cmpl %%esi, 12(%%esp)\n" /* in < last */ +" jbe .L_break_loop\n" + +".L_do_loop:\n" +" cmpb $15, %%bl\n" +" ja .L_get_length_code\n" /* if (15 < bits) */ + +" xorl %%eax, %%eax\n" +" lodsw\n" /* al = *(ushort *)in++ */ +" movb %%bl, %%cl\n" /* cl = bits, needs it for shifting */ +" addb $16, %%bl\n" /* bits += 16 */ +" shll %%cl, %%eax\n" +" orl %%eax, %%edx\n" /* hold |= *((ushort *)in)++ << bits */ + +".L_get_length_code:\n" +" movl 56(%%esp), %%eax\n" /* eax = lmask */ +" andl %%edx, %%eax\n" /* eax &= hold */ +" movl (%%ebp,%%eax,4), %%eax\n" /* eax = lcode[hold & lmask] */ + +".L_dolen:\n" +" movb %%ah, %%cl\n" /* cl = this.bits */ +" subb %%ah, %%bl\n" /* bits -= this.bits */ +" shrl %%cl, %%edx\n" /* hold >>= this.bits */ + +" testb %%al, %%al\n" +" jnz .L_test_for_length_base\n" /* if (op != 0) 45.7% */ + +" shrl $16, %%eax\n" /* output this.val char */ +" stosb\n" +" jmp .L_while_test\n" + +".align 32,0x90\n" +".L_test_for_length_base:\n" +" movl %%eax, %%ecx\n" /* len = this */ +" shrl $16, %%ecx\n" /* len = this.val */ +" movl %%ecx, 64(%%esp)\n" /* save len */ +" movb %%al, %%cl\n" + +" testb $16, %%al\n" +" jz .L_test_for_second_level_length\n" /* if ((op & 16) == 0) 8% */ +" andb $15, %%cl\n" /* op &= 15 */ +" jz .L_decode_distance\n" /* if (!op) */ +" cmpb %%cl, %%bl\n" +" jae .L_add_bits_to_len\n" /* if (op <= bits) */ + +" movb %%cl, %%ch\n" /* stash op in ch, freeing cl */ +" xorl %%eax, %%eax\n" +" lodsw\n" /* al = *(ushort *)in++ */ +" movb %%bl, %%cl\n" /* cl = bits, needs it for shifting */ +" addb $16, %%bl\n" /* bits += 16 */ +" shll %%cl, %%eax\n" +" orl %%eax, %%edx\n" /* hold |= *((ushort *)in)++ << bits */ +" movb %%ch, %%cl\n" /* move op back to ecx */ + +".L_add_bits_to_len:\n" +" subb %%cl, %%bl\n" +" xorl %%eax, %%eax\n" +" incl %%eax\n" +" shll %%cl, %%eax\n" +" decl %%eax\n" +" andl %%edx, %%eax\n" /* eax &= hold */ +" shrl %%cl, %%edx\n" +" addl %%eax, 64(%%esp)\n" /* len += hold & mask[op] */ + +".L_decode_distance:\n" +" cmpb $15, %%bl\n" +" ja .L_get_distance_code\n" /* if (15 < bits) */ + +" xorl %%eax, %%eax\n" +" lodsw\n" /* al = *(ushort *)in++ */ +" movb %%bl, %%cl\n" /* cl = bits, needs it for shifting */ +" addb $16, %%bl\n" /* bits += 16 */ +" shll %%cl, %%eax\n" +" orl %%eax, %%edx\n" /* hold |= *((ushort *)in)++ << bits */ + +".L_get_distance_code:\n" +" movl 60(%%esp), %%eax\n" /* eax = dmask */ +" movl 36(%%esp), %%ecx\n" /* ecx = dcode */ +" andl %%edx, %%eax\n" /* eax &= hold */ +" movl (%%ecx,%%eax,4), %%eax\n"/* eax = dcode[hold & dmask] */ + +".L_dodist:\n" +" movl %%eax, %%ebp\n" /* dist = this */ +" shrl $16, %%ebp\n" /* dist = this.val */ +" movb %%ah, %%cl\n" +" subb %%ah, %%bl\n" /* bits -= this.bits */ +" shrl %%cl, %%edx\n" /* hold >>= this.bits */ +" movb %%al, %%cl\n" /* cl = this.op */ + +" testb $16, %%al\n" /* if ((op & 16) == 0) */ +" jz .L_test_for_second_level_dist\n" +" andb $15, %%cl\n" /* op &= 15 */ +" jz .L_check_dist_one\n" +" cmpb %%cl, %%bl\n" +" jae .L_add_bits_to_dist\n" /* if (op <= bits) 97.6% */ + +" movb %%cl, %%ch\n" /* stash op in ch, freeing cl */ +" xorl %%eax, %%eax\n" +" lodsw\n" /* al = *(ushort *)in++ */ +" movb %%bl, %%cl\n" /* cl = bits, needs it for shifting */ +" addb $16, %%bl\n" /* bits += 16 */ +" shll %%cl, %%eax\n" +" orl %%eax, %%edx\n" /* hold |= *((ushort *)in)++ << bits */ +" movb %%ch, %%cl\n" /* move op back to ecx */ + +".L_add_bits_to_dist:\n" +" subb %%cl, %%bl\n" +" xorl %%eax, %%eax\n" +" incl %%eax\n" +" shll %%cl, %%eax\n" +" decl %%eax\n" /* (1 << op) - 1 */ +" andl %%edx, %%eax\n" /* eax &= hold */ +" shrl %%cl, %%edx\n" +" addl %%eax, %%ebp\n" /* dist += hold & ((1 << op) - 1) */ + +".L_check_window:\n" +" movl %%esi, 8(%%esp)\n" /* save in so from can use it's reg */ +" movl %%edi, %%eax\n" +" subl 20(%%esp), %%eax\n" /* nbytes = out - beg */ + +" cmpl %%ebp, %%eax\n" +" jb .L_clip_window\n" /* if (dist > nbytes) 4.2% */ + +" movl 64(%%esp), %%ecx\n" /* ecx = len */ +" movl %%edi, %%esi\n" +" subl %%ebp, %%esi\n" /* from = out - dist */ + +" sarl %%ecx\n" +" jnc .L_copy_two\n" /* if len % 2 == 0 */ + +" rep movsw\n" +" movb (%%esi), %%al\n" +" movb %%al, (%%edi)\n" +" incl %%edi\n" + +" movl 8(%%esp), %%esi\n" /* move in back to %esi, toss from */ +" movl 32(%%esp), %%ebp\n" /* ebp = lcode */ +" jmp .L_while_test\n" + +".L_copy_two:\n" +" rep movsw\n" +" movl 8(%%esp), %%esi\n" /* move in back to %esi, toss from */ +" movl 32(%%esp), %%ebp\n" /* ebp = lcode */ +" jmp .L_while_test\n" + +".align 32,0x90\n" +".L_check_dist_one:\n" +" cmpl $1, %%ebp\n" /* if dist 1, is a memset */ +" jne .L_check_window\n" +" cmpl %%edi, 20(%%esp)\n" +" je .L_check_window\n" /* out == beg, if outside window */ + +" movl 64(%%esp), %%ecx\n" /* ecx = len */ +" movb -1(%%edi), %%al\n" +" movb %%al, %%ah\n" + +" sarl %%ecx\n" +" jnc .L_set_two\n" +" movb %%al, (%%edi)\n" +" incl %%edi\n" + +".L_set_two:\n" +" rep stosw\n" +" movl 32(%%esp), %%ebp\n" /* ebp = lcode */ +" jmp .L_while_test\n" + +".align 32,0x90\n" +".L_test_for_second_level_length:\n" +" testb $64, %%al\n" +" jnz .L_test_for_end_of_block\n" /* if ((op & 64) != 0) */ + +" xorl %%eax, %%eax\n" +" incl %%eax\n" +" shll %%cl, %%eax\n" +" decl %%eax\n" +" andl %%edx, %%eax\n" /* eax &= hold */ +" addl 64(%%esp), %%eax\n" /* eax += len */ +" movl (%%ebp,%%eax,4), %%eax\n" /* eax = lcode[val+(hold&mask[op])]*/ +" jmp .L_dolen\n" + +".align 32,0x90\n" +".L_test_for_second_level_dist:\n" +" testb $64, %%al\n" +" jnz .L_invalid_distance_code\n" /* if ((op & 64) != 0) */ + +" xorl %%eax, %%eax\n" +" incl %%eax\n" +" shll %%cl, %%eax\n" +" decl %%eax\n" +" andl %%edx, %%eax\n" /* eax &= hold */ +" addl %%ebp, %%eax\n" /* eax += dist */ +" movl 36(%%esp), %%ecx\n" /* ecx = dcode */ +" movl (%%ecx,%%eax,4), %%eax\n" /* eax = dcode[val+(hold&mask[op])]*/ +" jmp .L_dodist\n" + +".align 32,0x90\n" +".L_clip_window:\n" +" movl %%eax, %%ecx\n" +" movl 48(%%esp), %%eax\n" /* eax = wsize */ +" negl %%ecx\n" /* nbytes = -nbytes */ +" movl 28(%%esp), %%esi\n" /* from = window */ + +" cmpl %%ebp, %%eax\n" +" jb .L_invalid_distance_too_far\n" /* if (dist > wsize) */ + +" addl %%ebp, %%ecx\n" /* nbytes = dist - nbytes */ +" cmpl $0, 52(%%esp)\n" +" jne .L_wrap_around_window\n" /* if (write != 0) */ + +" subl %%ecx, %%eax\n" +" addl %%eax, %%esi\n" /* from += wsize - nbytes */ + +" movl 64(%%esp), %%eax\n" /* eax = len */ +" cmpl %%ecx, %%eax\n" +" jbe .L_do_copy\n" /* if (nbytes >= len) */ + +" subl %%ecx, %%eax\n" /* len -= nbytes */ +" rep movsb\n" +" movl %%edi, %%esi\n" +" subl %%ebp, %%esi\n" /* from = out - dist */ +" jmp .L_do_copy\n" + +".align 32,0x90\n" +".L_wrap_around_window:\n" +" movl 52(%%esp), %%eax\n" /* eax = write */ +" cmpl %%eax, %%ecx\n" +" jbe .L_contiguous_in_window\n" /* if (write >= nbytes) */ + +" addl 48(%%esp), %%esi\n" /* from += wsize */ +" addl %%eax, %%esi\n" /* from += write */ +" subl %%ecx, %%esi\n" /* from -= nbytes */ +" subl %%eax, %%ecx\n" /* nbytes -= write */ + +" movl 64(%%esp), %%eax\n" /* eax = len */ +" cmpl %%ecx, %%eax\n" +" jbe .L_do_copy\n" /* if (nbytes >= len) */ + +" subl %%ecx, %%eax\n" /* len -= nbytes */ +" rep movsb\n" +" movl 28(%%esp), %%esi\n" /* from = window */ +" movl 52(%%esp), %%ecx\n" /* nbytes = write */ +" cmpl %%ecx, %%eax\n" +" jbe .L_do_copy\n" /* if (nbytes >= len) */ + +" subl %%ecx, %%eax\n" /* len -= nbytes */ +" rep movsb\n" +" movl %%edi, %%esi\n" +" subl %%ebp, %%esi\n" /* from = out - dist */ +" jmp .L_do_copy\n" + +".align 32,0x90\n" +".L_contiguous_in_window:\n" +" addl %%eax, %%esi\n" +" subl %%ecx, %%esi\n" /* from += write - nbytes */ + +" movl 64(%%esp), %%eax\n" /* eax = len */ +" cmpl %%ecx, %%eax\n" +" jbe .L_do_copy\n" /* if (nbytes >= len) */ + +" subl %%ecx, %%eax\n" /* len -= nbytes */ +" rep movsb\n" +" movl %%edi, %%esi\n" +" subl %%ebp, %%esi\n" /* from = out - dist */ +" jmp .L_do_copy\n" /* if (nbytes >= len) */ + +".align 32,0x90\n" +".L_do_copy:\n" +" movl %%eax, %%ecx\n" +" rep movsb\n" + +" movl 8(%%esp), %%esi\n" /* move in back to %esi, toss from */ +" movl 32(%%esp), %%ebp\n" /* ebp = lcode */ +" jmp .L_while_test\n" + +".L_test_for_end_of_block:\n" +" testb $32, %%al\n" +" jz .L_invalid_literal_length_code\n" +" movl $1, 72(%%esp)\n" +" jmp .L_break_loop_with_status\n" + +".L_invalid_literal_length_code:\n" +" movl $2, 72(%%esp)\n" +" jmp .L_break_loop_with_status\n" + +".L_invalid_distance_code:\n" +" movl $3, 72(%%esp)\n" +" jmp .L_break_loop_with_status\n" + +".L_invalid_distance_too_far:\n" +" movl 8(%%esp), %%esi\n" +" movl $4, 72(%%esp)\n" +" jmp .L_break_loop_with_status\n" + +".L_break_loop:\n" +" movl $0, 72(%%esp)\n" + +".L_break_loop_with_status:\n" +/* put in, out, bits, and hold back into ar and pop esp */ +" movl %%esi, 8(%%esp)\n" /* save in */ +" movl %%edi, 16(%%esp)\n" /* save out */ +" movl %%ebx, 44(%%esp)\n" /* save bits */ +" movl %%edx, 40(%%esp)\n" /* save hold */ +" movl 4(%%esp), %%ebp\n" /* restore esp, ebp */ +" movl (%%esp), %%esp\n" + : + : "m" (ar) + : "memory", "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi" + ); +#elif defined( _MSC_VER ) && ! defined( _M_AMD64 ) + __asm { + lea eax, ar + mov [eax], esp /* save esp, ebp */ + mov [eax+4], ebp + mov esp, eax + mov esi, [esp+8] /* esi = in */ + mov edi, [esp+16] /* edi = out */ + mov edx, [esp+40] /* edx = hold */ + mov ebx, [esp+44] /* ebx = bits */ + mov ebp, [esp+32] /* ebp = lcode */ + + cld + jmp L_do_loop + +ALIGN 4 +L_while_test: + cmp [esp+24], edi + jbe L_break_loop + cmp [esp+12], esi + jbe L_break_loop + +L_do_loop: + cmp bl, 15 + ja L_get_length_code /* if (15 < bits) */ + + xor eax, eax + lodsw /* al = *(ushort *)in++ */ + mov cl, bl /* cl = bits, needs it for shifting */ + add bl, 16 /* bits += 16 */ + shl eax, cl + or edx, eax /* hold |= *((ushort *)in)++ << bits */ + +L_get_length_code: + mov eax, [esp+56] /* eax = lmask */ + and eax, edx /* eax &= hold */ + mov eax, [ebp+eax*4] /* eax = lcode[hold & lmask] */ + +L_dolen: + mov cl, ah /* cl = this.bits */ + sub bl, ah /* bits -= this.bits */ + shr edx, cl /* hold >>= this.bits */ + + test al, al + jnz L_test_for_length_base /* if (op != 0) 45.7% */ + + shr eax, 16 /* output this.val char */ + stosb + jmp L_while_test + +ALIGN 4 +L_test_for_length_base: + mov ecx, eax /* len = this */ + shr ecx, 16 /* len = this.val */ + mov [esp+64], ecx /* save len */ + mov cl, al + + test al, 16 + jz L_test_for_second_level_length /* if ((op & 16) == 0) 8% */ + and cl, 15 /* op &= 15 */ + jz L_decode_distance /* if (!op) */ + cmp bl, cl + jae L_add_bits_to_len /* if (op <= bits) */ + + mov ch, cl /* stash op in ch, freeing cl */ + xor eax, eax + lodsw /* al = *(ushort *)in++ */ + mov cl, bl /* cl = bits, needs it for shifting */ + add bl, 16 /* bits += 16 */ + shl eax, cl + or edx, eax /* hold |= *((ushort *)in)++ << bits */ + mov cl, ch /* move op back to ecx */ + +L_add_bits_to_len: + sub bl, cl + xor eax, eax + inc eax + shl eax, cl + dec eax + and eax, edx /* eax &= hold */ + shr edx, cl + add [esp+64], eax /* len += hold & mask[op] */ + +L_decode_distance: + cmp bl, 15 + ja L_get_distance_code /* if (15 < bits) */ + + xor eax, eax + lodsw /* al = *(ushort *)in++ */ + mov cl, bl /* cl = bits, needs it for shifting */ + add bl, 16 /* bits += 16 */ + shl eax, cl + or edx, eax /* hold |= *((ushort *)in)++ << bits */ + +L_get_distance_code: + mov eax, [esp+60] /* eax = dmask */ + mov ecx, [esp+36] /* ecx = dcode */ + and eax, edx /* eax &= hold */ + mov eax, [ecx+eax*4]/* eax = dcode[hold & dmask] */ + +L_dodist: + mov ebp, eax /* dist = this */ + shr ebp, 16 /* dist = this.val */ + mov cl, ah + sub bl, ah /* bits -= this.bits */ + shr edx, cl /* hold >>= this.bits */ + mov cl, al /* cl = this.op */ + + test al, 16 /* if ((op & 16) == 0) */ + jz L_test_for_second_level_dist + and cl, 15 /* op &= 15 */ + jz L_check_dist_one + cmp bl, cl + jae L_add_bits_to_dist /* if (op <= bits) 97.6% */ + + mov ch, cl /* stash op in ch, freeing cl */ + xor eax, eax + lodsw /* al = *(ushort *)in++ */ + mov cl, bl /* cl = bits, needs it for shifting */ + add bl, 16 /* bits += 16 */ + shl eax, cl + or edx, eax /* hold |= *((ushort *)in)++ << bits */ + mov cl, ch /* move op back to ecx */ + +L_add_bits_to_dist: + sub bl, cl + xor eax, eax + inc eax + shl eax, cl + dec eax /* (1 << op) - 1 */ + and eax, edx /* eax &= hold */ + shr edx, cl + add ebp, eax /* dist += hold & ((1 << op) - 1) */ + +L_check_window: + mov [esp+8], esi /* save in so from can use it's reg */ + mov eax, edi + sub eax, [esp+20] /* nbytes = out - beg */ + + cmp eax, ebp + jb L_clip_window /* if (dist > nbytes) 4.2% */ + + mov ecx, [esp+64] /* ecx = len */ + mov esi, edi + sub esi, ebp /* from = out - dist */ + + sar ecx, 1 + jnc L_copy_two + + rep movsw + mov al, [esi] + mov [edi], al + inc edi + + mov esi, [esp+8] /* move in back to %esi, toss from */ + mov ebp, [esp+32] /* ebp = lcode */ + jmp L_while_test + +L_copy_two: + rep movsw + mov esi, [esp+8] /* move in back to %esi, toss from */ + mov ebp, [esp+32] /* ebp = lcode */ + jmp L_while_test + +ALIGN 4 +L_check_dist_one: + cmp ebp, 1 /* if dist 1, is a memset */ + jne L_check_window + cmp [esp+20], edi + je L_check_window /* out == beg, if outside window */ + + mov ecx, [esp+64] /* ecx = len */ + mov al, [edi-1] + mov ah, al + + sar ecx, 1 + jnc L_set_two + mov [edi], al /* memset out with from[-1] */ + inc edi + +L_set_two: + rep stosw + mov ebp, [esp+32] /* ebp = lcode */ + jmp L_while_test + +ALIGN 4 +L_test_for_second_level_length: + test al, 64 + jnz L_test_for_end_of_block /* if ((op & 64) != 0) */ + + xor eax, eax + inc eax + shl eax, cl + dec eax + and eax, edx /* eax &= hold */ + add eax, [esp+64] /* eax += len */ + mov eax, [ebp+eax*4] /* eax = lcode[val+(hold&mask[op])]*/ + jmp L_dolen + +ALIGN 4 +L_test_for_second_level_dist: + test al, 64 + jnz L_invalid_distance_code /* if ((op & 64) != 0) */ + + xor eax, eax + inc eax + shl eax, cl + dec eax + and eax, edx /* eax &= hold */ + add eax, ebp /* eax += dist */ + mov ecx, [esp+36] /* ecx = dcode */ + mov eax, [ecx+eax*4] /* eax = dcode[val+(hold&mask[op])]*/ + jmp L_dodist + +ALIGN 4 +L_clip_window: + mov ecx, eax + mov eax, [esp+48] /* eax = wsize */ + neg ecx /* nbytes = -nbytes */ + mov esi, [esp+28] /* from = window */ + + cmp eax, ebp + jb L_invalid_distance_too_far /* if (dist > wsize) */ + + add ecx, ebp /* nbytes = dist - nbytes */ + cmp dword ptr [esp+52], 0 + jne L_wrap_around_window /* if (write != 0) */ + + sub eax, ecx + add esi, eax /* from += wsize - nbytes */ + + mov eax, [esp+64] /* eax = len */ + cmp eax, ecx + jbe L_do_copy /* if (nbytes >= len) */ + + sub eax, ecx /* len -= nbytes */ + rep movsb + mov esi, edi + sub esi, ebp /* from = out - dist */ + jmp L_do_copy + +ALIGN 4 +L_wrap_around_window: + mov eax, [esp+52] /* eax = write */ + cmp ecx, eax + jbe L_contiguous_in_window /* if (write >= nbytes) */ + + add esi, [esp+48] /* from += wsize */ + add esi, eax /* from += write */ + sub esi, ecx /* from -= nbytes */ + sub ecx, eax /* nbytes -= write */ + + mov eax, [esp+64] /* eax = len */ + cmp eax, ecx + jbe L_do_copy /* if (nbytes >= len) */ + + sub eax, ecx /* len -= nbytes */ + rep movsb + mov esi, [esp+28] /* from = window */ + mov ecx, [esp+52] /* nbytes = write */ + cmp eax, ecx + jbe L_do_copy /* if (nbytes >= len) */ + + sub eax, ecx /* len -= nbytes */ + rep movsb + mov esi, edi + sub esi, ebp /* from = out - dist */ + jmp L_do_copy + +ALIGN 4 +L_contiguous_in_window: + add esi, eax + sub esi, ecx /* from += write - nbytes */ + + mov eax, [esp+64] /* eax = len */ + cmp eax, ecx + jbe L_do_copy /* if (nbytes >= len) */ + + sub eax, ecx /* len -= nbytes */ + rep movsb + mov esi, edi + sub esi, ebp /* from = out - dist */ + jmp L_do_copy + +ALIGN 4 +L_do_copy: + mov ecx, eax + rep movsb + + mov esi, [esp+8] /* move in back to %esi, toss from */ + mov ebp, [esp+32] /* ebp = lcode */ + jmp L_while_test + +L_test_for_end_of_block: + test al, 32 + jz L_invalid_literal_length_code + mov dword ptr [esp+72], 1 + jmp L_break_loop_with_status + +L_invalid_literal_length_code: + mov dword ptr [esp+72], 2 + jmp L_break_loop_with_status + +L_invalid_distance_code: + mov dword ptr [esp+72], 3 + jmp L_break_loop_with_status + +L_invalid_distance_too_far: + mov esi, [esp+4] + mov dword ptr [esp+72], 4 + jmp L_break_loop_with_status + +L_break_loop: + mov dword ptr [esp+72], 0 + +L_break_loop_with_status: +/* put in, out, bits, and hold back into ar and pop esp */ + mov [esp+8], esi /* save in */ + mov [esp+16], edi /* save out */ + mov [esp+44], ebx /* save bits */ + mov [esp+40], edx /* save hold */ + mov ebp, [esp+4] /* restore esp, ebp */ + mov esp, [esp] + } +#else +#error "x86 architecture not defined" +#endif + + if (ar.status > 1) { + if (ar.status == 2) + strm->msg = "invalid literal/length code"; + else if (ar.status == 3) + strm->msg = "invalid distance code"; + else + strm->msg = "invalid distance too far back"; + state->mode = BAD; + } + else if ( ar.status == 1 ) { + state->mode = TYPE; + } + + /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ + ar.len = ar.bits >> 3; + ar.in -= ar.len; + ar.bits -= ar.len << 3; + ar.hold &= (1U << ar.bits) - 1; + + /* update state and return */ + strm->next_in = ar.in; + strm->next_out = ar.out; + strm->avail_in = (unsigned)(ar.in < ar.last ? + PAD_AVAIL_IN + (ar.last - ar.in) : + PAD_AVAIL_IN - (ar.in - ar.last)); + strm->avail_out = (unsigned)(ar.out < ar.end ? + PAD_AVAIL_OUT + (ar.end - ar.out) : + PAD_AVAIL_OUT - (ar.out - ar.end)); + state->hold = ar.hold; + state->bits = ar.bits; + return; +} + diff -Nru nodejs-0.11.13/deps/zlib/contrib/inflate86/inffast.S nodejs-0.11.15/deps/zlib/contrib/inflate86/inffast.S --- nodejs-0.11.13/deps/zlib/contrib/inflate86/inffast.S 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/inflate86/inffast.S 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1368 @@ +/* + * inffast.S is a hand tuned assembler version of: + * + * inffast.c -- fast decoding + * Copyright (C) 1995-2003 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + * + * Copyright (C) 2003 Chris Anderson <christop@charm.net> + * Please use the copyright conditions above. + * + * This version (Jan-23-2003) of inflate_fast was coded and tested under + * GNU/Linux on a pentium 3, using the gcc-3.2 compiler distribution. On that + * machine, I found that gzip style archives decompressed about 20% faster than + * the gcc-3.2 -O3 -fomit-frame-pointer compiled version. Your results will + * depend on how large of a buffer is used for z_stream.next_in & next_out + * (8K-32K worked best for my 256K cpu cache) and how much overhead there is in + * stream processing I/O and crc32/addler32. In my case, this routine used + * 70% of the cpu time and crc32 used 20%. + * + * I am confident that this version will work in the general case, but I have + * not tested a wide variety of datasets or a wide variety of platforms. + * + * Jan-24-2003 -- Added -DUSE_MMX define for slightly faster inflating. + * It should be a runtime flag instead of compile time flag... + * + * Jan-26-2003 -- Added runtime check for MMX support with cpuid instruction. + * With -DUSE_MMX, only MMX code is compiled. With -DNO_MMX, only non-MMX code + * is compiled. Without either option, runtime detection is enabled. Runtime + * detection should work on all modern cpus and the recomended algorithm (flip + * ID bit on eflags and then use the cpuid instruction) is used in many + * multimedia applications. Tested under win2k with gcc-2.95 and gas-2.12 + * distributed with cygwin3. Compiling with gcc-2.95 -c inffast.S -o + * inffast.obj generates a COFF object which can then be linked with MSVC++ + * compiled code. Tested under FreeBSD 4.7 with gcc-2.95. + * + * Jan-28-2003 -- Tested Athlon XP... MMX mode is slower than no MMX (and + * slower than compiler generated code). Adjusted cpuid check to use the MMX + * code only for Pentiums < P4 until I have more data on the P4. Speed + * improvment is only about 15% on the Athlon when compared with code generated + * with MSVC++. Not sure yet, but I think the P4 will also be slower using the + * MMX mode because many of it's x86 ALU instructions execute in .5 cycles and + * have less latency than MMX ops. Added code to buffer the last 11 bytes of + * the input stream since the MMX code grabs bits in chunks of 32, which + * differs from the inffast.c algorithm. I don't think there would have been + * read overruns where a page boundary was crossed (a segfault), but there + * could have been overruns when next_in ends on unaligned memory (unintialized + * memory read). + * + * Mar-13-2003 -- P4 MMX is slightly slower than P4 NO_MMX. I created a C + * version of the non-MMX code so that it doesn't depend on zstrm and zstate + * structure offsets which are hard coded in this file. This was last tested + * with zlib-1.2.0 which is currently in beta testing, newer versions of this + * and inffas86.c can be found at http://www.eetbeetee.com/zlib/ and + * http://www.charm.net/~christop/zlib/ + */ + + +/* + * if you have underscore linking problems (_inflate_fast undefined), try + * using -DGAS_COFF + */ +#if ! defined( GAS_COFF ) && ! defined( GAS_ELF ) + +#if defined( WIN32 ) || defined( __CYGWIN__ ) +#define GAS_COFF /* windows object format */ +#else +#define GAS_ELF +#endif + +#endif /* ! GAS_COFF && ! GAS_ELF */ + + +#if defined( GAS_COFF ) + +/* coff externals have underscores */ +#define inflate_fast _inflate_fast +#define inflate_fast_use_mmx _inflate_fast_use_mmx + +#endif /* GAS_COFF */ + + +.file "inffast.S" + +.globl inflate_fast + +.text +.align 4,0 +.L_invalid_literal_length_code_msg: +.string "invalid literal/length code" + +.align 4,0 +.L_invalid_distance_code_msg: +.string "invalid distance code" + +.align 4,0 +.L_invalid_distance_too_far_msg: +.string "invalid distance too far back" + +#if ! defined( NO_MMX ) +.align 4,0 +.L_mask: /* mask[N] = ( 1 << N ) - 1 */ +.long 0 +.long 1 +.long 3 +.long 7 +.long 15 +.long 31 +.long 63 +.long 127 +.long 255 +.long 511 +.long 1023 +.long 2047 +.long 4095 +.long 8191 +.long 16383 +.long 32767 +.long 65535 +.long 131071 +.long 262143 +.long 524287 +.long 1048575 +.long 2097151 +.long 4194303 +.long 8388607 +.long 16777215 +.long 33554431 +.long 67108863 +.long 134217727 +.long 268435455 +.long 536870911 +.long 1073741823 +.long 2147483647 +.long 4294967295 +#endif /* NO_MMX */ + +.text + +/* + * struct z_stream offsets, in zlib.h + */ +#define next_in_strm 0 /* strm->next_in */ +#define avail_in_strm 4 /* strm->avail_in */ +#define next_out_strm 12 /* strm->next_out */ +#define avail_out_strm 16 /* strm->avail_out */ +#define msg_strm 24 /* strm->msg */ +#define state_strm 28 /* strm->state */ + +/* + * struct inflate_state offsets, in inflate.h + */ +#define mode_state 0 /* state->mode */ +#define wsize_state 32 /* state->wsize */ +#define write_state 40 /* state->write */ +#define window_state 44 /* state->window */ +#define hold_state 48 /* state->hold */ +#define bits_state 52 /* state->bits */ +#define lencode_state 68 /* state->lencode */ +#define distcode_state 72 /* state->distcode */ +#define lenbits_state 76 /* state->lenbits */ +#define distbits_state 80 /* state->distbits */ + +/* + * inflate_fast's activation record + */ +#define local_var_size 64 /* how much local space for vars */ +#define strm_sp 88 /* first arg: z_stream * (local_var_size + 24) */ +#define start_sp 92 /* second arg: unsigned int (local_var_size + 28) */ + +/* + * offsets for local vars on stack + */ +#define out 60 /* unsigned char* */ +#define window 56 /* unsigned char* */ +#define wsize 52 /* unsigned int */ +#define write 48 /* unsigned int */ +#define in 44 /* unsigned char* */ +#define beg 40 /* unsigned char* */ +#define buf 28 /* char[ 12 ] */ +#define len 24 /* unsigned int */ +#define last 20 /* unsigned char* */ +#define end 16 /* unsigned char* */ +#define dcode 12 /* code* */ +#define lcode 8 /* code* */ +#define dmask 4 /* unsigned int */ +#define lmask 0 /* unsigned int */ + +/* + * typedef enum inflate_mode consts, in inflate.h + */ +#define INFLATE_MODE_TYPE 11 /* state->mode flags enum-ed in inflate.h */ +#define INFLATE_MODE_BAD 26 + + +#if ! defined( USE_MMX ) && ! defined( NO_MMX ) + +#define RUN_TIME_MMX + +#define CHECK_MMX 1 +#define DO_USE_MMX 2 +#define DONT_USE_MMX 3 + +.globl inflate_fast_use_mmx + +.data + +.align 4,0 +inflate_fast_use_mmx: /* integer flag for run time control 1=check,2=mmx,3=no */ +.long CHECK_MMX + +#if defined( GAS_ELF ) +/* elf info */ +.type inflate_fast_use_mmx,@object +.size inflate_fast_use_mmx,4 +#endif + +#endif /* RUN_TIME_MMX */ + +#if defined( GAS_COFF ) +/* coff info: scl 2 = extern, type 32 = function */ +.def inflate_fast; .scl 2; .type 32; .endef +#endif + +.text + +.align 32,0x90 +inflate_fast: + pushl %edi + pushl %esi + pushl %ebp + pushl %ebx + pushf /* save eflags (strm_sp, state_sp assumes this is 32 bits) */ + subl $local_var_size, %esp + cld + +#define strm_r %esi +#define state_r %edi + + movl strm_sp(%esp), strm_r + movl state_strm(strm_r), state_r + + /* in = strm->next_in; + * out = strm->next_out; + * last = in + strm->avail_in - 11; + * beg = out - (start - strm->avail_out); + * end = out + (strm->avail_out - 257); + */ + movl avail_in_strm(strm_r), %edx + movl next_in_strm(strm_r), %eax + + addl %eax, %edx /* avail_in += next_in */ + subl $11, %edx /* avail_in -= 11 */ + + movl %eax, in(%esp) + movl %edx, last(%esp) + + movl start_sp(%esp), %ebp + movl avail_out_strm(strm_r), %ecx + movl next_out_strm(strm_r), %ebx + + subl %ecx, %ebp /* start -= avail_out */ + negl %ebp /* start = -start */ + addl %ebx, %ebp /* start += next_out */ + + subl $257, %ecx /* avail_out -= 257 */ + addl %ebx, %ecx /* avail_out += out */ + + movl %ebx, out(%esp) + movl %ebp, beg(%esp) + movl %ecx, end(%esp) + + /* wsize = state->wsize; + * write = state->write; + * window = state->window; + * hold = state->hold; + * bits = state->bits; + * lcode = state->lencode; + * dcode = state->distcode; + * lmask = ( 1 << state->lenbits ) - 1; + * dmask = ( 1 << state->distbits ) - 1; + */ + + movl lencode_state(state_r), %eax + movl distcode_state(state_r), %ecx + + movl %eax, lcode(%esp) + movl %ecx, dcode(%esp) + + movl $1, %eax + movl lenbits_state(state_r), %ecx + shll %cl, %eax + decl %eax + movl %eax, lmask(%esp) + + movl $1, %eax + movl distbits_state(state_r), %ecx + shll %cl, %eax + decl %eax + movl %eax, dmask(%esp) + + movl wsize_state(state_r), %eax + movl write_state(state_r), %ecx + movl window_state(state_r), %edx + + movl %eax, wsize(%esp) + movl %ecx, write(%esp) + movl %edx, window(%esp) + + movl hold_state(state_r), %ebp + movl bits_state(state_r), %ebx + +#undef strm_r +#undef state_r + +#define in_r %esi +#define from_r %esi +#define out_r %edi + + movl in(%esp), in_r + movl last(%esp), %ecx + cmpl in_r, %ecx + ja .L_align_long /* if in < last */ + + addl $11, %ecx /* ecx = &in[ avail_in ] */ + subl in_r, %ecx /* ecx = avail_in */ + movl $12, %eax + subl %ecx, %eax /* eax = 12 - avail_in */ + leal buf(%esp), %edi + rep movsb /* memcpy( buf, in, avail_in ) */ + movl %eax, %ecx + xorl %eax, %eax + rep stosb /* memset( &buf[ avail_in ], 0, 12 - avail_in ) */ + leal buf(%esp), in_r /* in = buf */ + movl in_r, last(%esp) /* last = in, do just one iteration */ + jmp .L_is_aligned + + /* align in_r on long boundary */ +.L_align_long: + testl $3, in_r + jz .L_is_aligned + xorl %eax, %eax + movb (in_r), %al + incl in_r + movl %ebx, %ecx + addl $8, %ebx + shll %cl, %eax + orl %eax, %ebp + jmp .L_align_long + +.L_is_aligned: + movl out(%esp), out_r + +#if defined( NO_MMX ) + jmp .L_do_loop +#endif + +#if defined( USE_MMX ) + jmp .L_init_mmx +#endif + +/*** Runtime MMX check ***/ + +#if defined( RUN_TIME_MMX ) +.L_check_mmx: + cmpl $DO_USE_MMX, inflate_fast_use_mmx + je .L_init_mmx + ja .L_do_loop /* > 2 */ + + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + pushf + movl (%esp), %eax /* copy eflags to eax */ + xorl $0x200000, (%esp) /* try toggling ID bit of eflags (bit 21) + * to see if cpu supports cpuid... + * ID bit method not supported by NexGen but + * bios may load a cpuid instruction and + * cpuid may be disabled on Cyrix 5-6x86 */ + popf + pushf + popl %edx /* copy new eflags to edx */ + xorl %eax, %edx /* test if ID bit is flipped */ + jz .L_dont_use_mmx /* not flipped if zero */ + xorl %eax, %eax + cpuid + cmpl $0x756e6547, %ebx /* check for GenuineIntel in ebx,ecx,edx */ + jne .L_dont_use_mmx + cmpl $0x6c65746e, %ecx + jne .L_dont_use_mmx + cmpl $0x49656e69, %edx + jne .L_dont_use_mmx + movl $1, %eax + cpuid /* get cpu features */ + shrl $8, %eax + andl $15, %eax + cmpl $6, %eax /* check for Pentium family, is 0xf for P4 */ + jne .L_dont_use_mmx + testl $0x800000, %edx /* test if MMX feature is set (bit 23) */ + jnz .L_use_mmx + jmp .L_dont_use_mmx +.L_use_mmx: + movl $DO_USE_MMX, inflate_fast_use_mmx + jmp .L_check_mmx_pop +.L_dont_use_mmx: + movl $DONT_USE_MMX, inflate_fast_use_mmx +.L_check_mmx_pop: + popl %edx + popl %ecx + popl %ebx + popl %eax + jmp .L_check_mmx +#endif + + +/*** Non-MMX code ***/ + +#if defined ( NO_MMX ) || defined( RUN_TIME_MMX ) + +#define hold_r %ebp +#define bits_r %bl +#define bitslong_r %ebx + +.align 32,0x90 +.L_while_test: + /* while (in < last && out < end) + */ + cmpl out_r, end(%esp) + jbe .L_break_loop /* if (out >= end) */ + + cmpl in_r, last(%esp) + jbe .L_break_loop + +.L_do_loop: + /* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out + * + * do { + * if (bits < 15) { + * hold |= *((unsigned short *)in)++ << bits; + * bits += 16 + * } + * this = lcode[hold & lmask] + */ + cmpb $15, bits_r + ja .L_get_length_code /* if (15 < bits) */ + + xorl %eax, %eax + lodsw /* al = *(ushort *)in++ */ + movb bits_r, %cl /* cl = bits, needs it for shifting */ + addb $16, bits_r /* bits += 16 */ + shll %cl, %eax + orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */ + +.L_get_length_code: + movl lmask(%esp), %edx /* edx = lmask */ + movl lcode(%esp), %ecx /* ecx = lcode */ + andl hold_r, %edx /* edx &= hold */ + movl (%ecx,%edx,4), %eax /* eax = lcode[hold & lmask] */ + +.L_dolen: + /* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out + * + * dolen: + * bits -= this.bits; + * hold >>= this.bits + */ + movb %ah, %cl /* cl = this.bits */ + subb %ah, bits_r /* bits -= this.bits */ + shrl %cl, hold_r /* hold >>= this.bits */ + + /* check if op is a literal + * if (op == 0) { + * PUP(out) = this.val; + * } + */ + testb %al, %al + jnz .L_test_for_length_base /* if (op != 0) 45.7% */ + + shrl $16, %eax /* output this.val char */ + stosb + jmp .L_while_test + +.L_test_for_length_base: + /* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out, %edx = len + * + * else if (op & 16) { + * len = this.val + * op &= 15 + * if (op) { + * if (op > bits) { + * hold |= *((unsigned short *)in)++ << bits; + * bits += 16 + * } + * len += hold & mask[op]; + * bits -= op; + * hold >>= op; + * } + */ +#define len_r %edx + movl %eax, len_r /* len = this */ + shrl $16, len_r /* len = this.val */ + movb %al, %cl + + testb $16, %al + jz .L_test_for_second_level_length /* if ((op & 16) == 0) 8% */ + andb $15, %cl /* op &= 15 */ + jz .L_save_len /* if (!op) */ + cmpb %cl, bits_r + jae .L_add_bits_to_len /* if (op <= bits) */ + + movb %cl, %ch /* stash op in ch, freeing cl */ + xorl %eax, %eax + lodsw /* al = *(ushort *)in++ */ + movb bits_r, %cl /* cl = bits, needs it for shifting */ + addb $16, bits_r /* bits += 16 */ + shll %cl, %eax + orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */ + movb %ch, %cl /* move op back to ecx */ + +.L_add_bits_to_len: + movl $1, %eax + shll %cl, %eax + decl %eax + subb %cl, bits_r + andl hold_r, %eax /* eax &= hold */ + shrl %cl, hold_r + addl %eax, len_r /* len += hold & mask[op] */ + +.L_save_len: + movl len_r, len(%esp) /* save len */ +#undef len_r + +.L_decode_distance: + /* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out, %edx = dist + * + * if (bits < 15) { + * hold |= *((unsigned short *)in)++ << bits; + * bits += 16 + * } + * this = dcode[hold & dmask]; + * dodist: + * bits -= this.bits; + * hold >>= this.bits; + * op = this.op; + */ + + cmpb $15, bits_r + ja .L_get_distance_code /* if (15 < bits) */ + + xorl %eax, %eax + lodsw /* al = *(ushort *)in++ */ + movb bits_r, %cl /* cl = bits, needs it for shifting */ + addb $16, bits_r /* bits += 16 */ + shll %cl, %eax + orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */ + +.L_get_distance_code: + movl dmask(%esp), %edx /* edx = dmask */ + movl dcode(%esp), %ecx /* ecx = dcode */ + andl hold_r, %edx /* edx &= hold */ + movl (%ecx,%edx,4), %eax /* eax = dcode[hold & dmask] */ + +#define dist_r %edx +.L_dodist: + movl %eax, dist_r /* dist = this */ + shrl $16, dist_r /* dist = this.val */ + movb %ah, %cl + subb %ah, bits_r /* bits -= this.bits */ + shrl %cl, hold_r /* hold >>= this.bits */ + + /* if (op & 16) { + * dist = this.val + * op &= 15 + * if (op > bits) { + * hold |= *((unsigned short *)in)++ << bits; + * bits += 16 + * } + * dist += hold & mask[op]; + * bits -= op; + * hold >>= op; + */ + movb %al, %cl /* cl = this.op */ + + testb $16, %al /* if ((op & 16) == 0) */ + jz .L_test_for_second_level_dist + andb $15, %cl /* op &= 15 */ + jz .L_check_dist_one + cmpb %cl, bits_r + jae .L_add_bits_to_dist /* if (op <= bits) 97.6% */ + + movb %cl, %ch /* stash op in ch, freeing cl */ + xorl %eax, %eax + lodsw /* al = *(ushort *)in++ */ + movb bits_r, %cl /* cl = bits, needs it for shifting */ + addb $16, bits_r /* bits += 16 */ + shll %cl, %eax + orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */ + movb %ch, %cl /* move op back to ecx */ + +.L_add_bits_to_dist: + movl $1, %eax + shll %cl, %eax + decl %eax /* (1 << op) - 1 */ + subb %cl, bits_r + andl hold_r, %eax /* eax &= hold */ + shrl %cl, hold_r + addl %eax, dist_r /* dist += hold & ((1 << op) - 1) */ + jmp .L_check_window + +.L_check_window: + /* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist + * %ecx = nbytes + * + * nbytes = out - beg; + * if (dist <= nbytes) { + * from = out - dist; + * do { + * PUP(out) = PUP(from); + * } while (--len > 0) { + * } + */ + + movl in_r, in(%esp) /* save in so from can use it's reg */ + movl out_r, %eax + subl beg(%esp), %eax /* nbytes = out - beg */ + + cmpl dist_r, %eax + jb .L_clip_window /* if (dist > nbytes) 4.2% */ + + movl len(%esp), %ecx + movl out_r, from_r + subl dist_r, from_r /* from = out - dist */ + + subl $3, %ecx + movb (from_r), %al + movb %al, (out_r) + movb 1(from_r), %al + movb 2(from_r), %dl + addl $3, from_r + movb %al, 1(out_r) + movb %dl, 2(out_r) + addl $3, out_r + rep movsb + + movl in(%esp), in_r /* move in back to %esi, toss from */ + jmp .L_while_test + +.align 16,0x90 +.L_check_dist_one: + cmpl $1, dist_r + jne .L_check_window + cmpl out_r, beg(%esp) + je .L_check_window + + decl out_r + movl len(%esp), %ecx + movb (out_r), %al + subl $3, %ecx + + movb %al, 1(out_r) + movb %al, 2(out_r) + movb %al, 3(out_r) + addl $4, out_r + rep stosb + + jmp .L_while_test + +.align 16,0x90 +.L_test_for_second_level_length: + /* else if ((op & 64) == 0) { + * this = lcode[this.val + (hold & mask[op])]; + * } + */ + testb $64, %al + jnz .L_test_for_end_of_block /* if ((op & 64) != 0) */ + + movl $1, %eax + shll %cl, %eax + decl %eax + andl hold_r, %eax /* eax &= hold */ + addl %edx, %eax /* eax += this.val */ + movl lcode(%esp), %edx /* edx = lcode */ + movl (%edx,%eax,4), %eax /* eax = lcode[val + (hold&mask[op])] */ + jmp .L_dolen + +.align 16,0x90 +.L_test_for_second_level_dist: + /* else if ((op & 64) == 0) { + * this = dcode[this.val + (hold & mask[op])]; + * } + */ + testb $64, %al + jnz .L_invalid_distance_code /* if ((op & 64) != 0) */ + + movl $1, %eax + shll %cl, %eax + decl %eax + andl hold_r, %eax /* eax &= hold */ + addl %edx, %eax /* eax += this.val */ + movl dcode(%esp), %edx /* edx = dcode */ + movl (%edx,%eax,4), %eax /* eax = dcode[val + (hold&mask[op])] */ + jmp .L_dodist + +.align 16,0x90 +.L_clip_window: + /* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist + * %ecx = nbytes + * + * else { + * if (dist > wsize) { + * invalid distance + * } + * from = window; + * nbytes = dist - nbytes; + * if (write == 0) { + * from += wsize - nbytes; + */ +#define nbytes_r %ecx + movl %eax, nbytes_r + movl wsize(%esp), %eax /* prepare for dist compare */ + negl nbytes_r /* nbytes = -nbytes */ + movl window(%esp), from_r /* from = window */ + + cmpl dist_r, %eax + jb .L_invalid_distance_too_far /* if (dist > wsize) */ + + addl dist_r, nbytes_r /* nbytes = dist - nbytes */ + cmpl $0, write(%esp) + jne .L_wrap_around_window /* if (write != 0) */ + + subl nbytes_r, %eax + addl %eax, from_r /* from += wsize - nbytes */ + + /* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist + * %ecx = nbytes, %eax = len + * + * if (nbytes < len) { + * len -= nbytes; + * do { + * PUP(out) = PUP(from); + * } while (--nbytes); + * from = out - dist; + * } + * } + */ +#define len_r %eax + movl len(%esp), len_r + cmpl nbytes_r, len_r + jbe .L_do_copy1 /* if (nbytes >= len) */ + + subl nbytes_r, len_r /* len -= nbytes */ + rep movsb + movl out_r, from_r + subl dist_r, from_r /* from = out - dist */ + jmp .L_do_copy1 + + cmpl nbytes_r, len_r + jbe .L_do_copy1 /* if (nbytes >= len) */ + + subl nbytes_r, len_r /* len -= nbytes */ + rep movsb + movl out_r, from_r + subl dist_r, from_r /* from = out - dist */ + jmp .L_do_copy1 + +.L_wrap_around_window: + /* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist + * %ecx = nbytes, %eax = write, %eax = len + * + * else if (write < nbytes) { + * from += wsize + write - nbytes; + * nbytes -= write; + * if (nbytes < len) { + * len -= nbytes; + * do { + * PUP(out) = PUP(from); + * } while (--nbytes); + * from = window; + * nbytes = write; + * if (nbytes < len) { + * len -= nbytes; + * do { + * PUP(out) = PUP(from); + * } while(--nbytes); + * from = out - dist; + * } + * } + * } + */ +#define write_r %eax + movl write(%esp), write_r + cmpl write_r, nbytes_r + jbe .L_contiguous_in_window /* if (write >= nbytes) */ + + addl wsize(%esp), from_r + addl write_r, from_r + subl nbytes_r, from_r /* from += wsize + write - nbytes */ + subl write_r, nbytes_r /* nbytes -= write */ +#undef write_r + + movl len(%esp), len_r + cmpl nbytes_r, len_r + jbe .L_do_copy1 /* if (nbytes >= len) */ + + subl nbytes_r, len_r /* len -= nbytes */ + rep movsb + movl window(%esp), from_r /* from = window */ + movl write(%esp), nbytes_r /* nbytes = write */ + cmpl nbytes_r, len_r + jbe .L_do_copy1 /* if (nbytes >= len) */ + + subl nbytes_r, len_r /* len -= nbytes */ + rep movsb + movl out_r, from_r + subl dist_r, from_r /* from = out - dist */ + jmp .L_do_copy1 + +.L_contiguous_in_window: + /* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist + * %ecx = nbytes, %eax = write, %eax = len + * + * else { + * from += write - nbytes; + * if (nbytes < len) { + * len -= nbytes; + * do { + * PUP(out) = PUP(from); + * } while (--nbytes); + * from = out - dist; + * } + * } + */ +#define write_r %eax + addl write_r, from_r + subl nbytes_r, from_r /* from += write - nbytes */ +#undef write_r + + movl len(%esp), len_r + cmpl nbytes_r, len_r + jbe .L_do_copy1 /* if (nbytes >= len) */ + + subl nbytes_r, len_r /* len -= nbytes */ + rep movsb + movl out_r, from_r + subl dist_r, from_r /* from = out - dist */ + +.L_do_copy1: + /* regs: %esi = from, %esi = in, %ebp = hold, %bl = bits, %edi = out + * %eax = len + * + * while (len > 0) { + * PUP(out) = PUP(from); + * len--; + * } + * } + * } while (in < last && out < end); + */ +#undef nbytes_r +#define in_r %esi + movl len_r, %ecx + rep movsb + + movl in(%esp), in_r /* move in back to %esi, toss from */ + jmp .L_while_test + +#undef len_r +#undef dist_r + +#endif /* NO_MMX || RUN_TIME_MMX */ + + +/*** MMX code ***/ + +#if defined( USE_MMX ) || defined( RUN_TIME_MMX ) + +.align 32,0x90 +.L_init_mmx: + emms + +#undef bits_r +#undef bitslong_r +#define bitslong_r %ebp +#define hold_mm %mm0 + movd %ebp, hold_mm + movl %ebx, bitslong_r + +#define used_mm %mm1 +#define dmask2_mm %mm2 +#define lmask2_mm %mm3 +#define lmask_mm %mm4 +#define dmask_mm %mm5 +#define tmp_mm %mm6 + + movd lmask(%esp), lmask_mm + movq lmask_mm, lmask2_mm + movd dmask(%esp), dmask_mm + movq dmask_mm, dmask2_mm + pxor used_mm, used_mm + movl lcode(%esp), %ebx /* ebx = lcode */ + jmp .L_do_loop_mmx + +.align 32,0x90 +.L_while_test_mmx: + /* while (in < last && out < end) + */ + cmpl out_r, end(%esp) + jbe .L_break_loop /* if (out >= end) */ + + cmpl in_r, last(%esp) + jbe .L_break_loop + +.L_do_loop_mmx: + psrlq used_mm, hold_mm /* hold_mm >>= last bit length */ + + cmpl $32, bitslong_r + ja .L_get_length_code_mmx /* if (32 < bits) */ + + movd bitslong_r, tmp_mm + movd (in_r), %mm7 + addl $4, in_r + psllq tmp_mm, %mm7 + addl $32, bitslong_r + por %mm7, hold_mm /* hold_mm |= *((uint *)in)++ << bits */ + +.L_get_length_code_mmx: + pand hold_mm, lmask_mm + movd lmask_mm, %eax + movq lmask2_mm, lmask_mm + movl (%ebx,%eax,4), %eax /* eax = lcode[hold & lmask] */ + +.L_dolen_mmx: + movzbl %ah, %ecx /* ecx = this.bits */ + movd %ecx, used_mm + subl %ecx, bitslong_r /* bits -= this.bits */ + + testb %al, %al + jnz .L_test_for_length_base_mmx /* if (op != 0) 45.7% */ + + shrl $16, %eax /* output this.val char */ + stosb + jmp .L_while_test_mmx + +.L_test_for_length_base_mmx: +#define len_r %edx + movl %eax, len_r /* len = this */ + shrl $16, len_r /* len = this.val */ + + testb $16, %al + jz .L_test_for_second_level_length_mmx /* if ((op & 16) == 0) 8% */ + andl $15, %eax /* op &= 15 */ + jz .L_decode_distance_mmx /* if (!op) */ + + psrlq used_mm, hold_mm /* hold_mm >>= last bit length */ + movd %eax, used_mm + movd hold_mm, %ecx + subl %eax, bitslong_r + andl .L_mask(,%eax,4), %ecx + addl %ecx, len_r /* len += hold & mask[op] */ + +.L_decode_distance_mmx: + psrlq used_mm, hold_mm /* hold_mm >>= last bit length */ + + cmpl $32, bitslong_r + ja .L_get_dist_code_mmx /* if (32 < bits) */ + + movd bitslong_r, tmp_mm + movd (in_r), %mm7 + addl $4, in_r + psllq tmp_mm, %mm7 + addl $32, bitslong_r + por %mm7, hold_mm /* hold_mm |= *((uint *)in)++ << bits */ + +.L_get_dist_code_mmx: + movl dcode(%esp), %ebx /* ebx = dcode */ + pand hold_mm, dmask_mm + movd dmask_mm, %eax + movq dmask2_mm, dmask_mm + movl (%ebx,%eax,4), %eax /* eax = dcode[hold & lmask] */ + +.L_dodist_mmx: +#define dist_r %ebx + movzbl %ah, %ecx /* ecx = this.bits */ + movl %eax, dist_r + shrl $16, dist_r /* dist = this.val */ + subl %ecx, bitslong_r /* bits -= this.bits */ + movd %ecx, used_mm + + testb $16, %al /* if ((op & 16) == 0) */ + jz .L_test_for_second_level_dist_mmx + andl $15, %eax /* op &= 15 */ + jz .L_check_dist_one_mmx + +.L_add_bits_to_dist_mmx: + psrlq used_mm, hold_mm /* hold_mm >>= last bit length */ + movd %eax, used_mm /* save bit length of current op */ + movd hold_mm, %ecx /* get the next bits on input stream */ + subl %eax, bitslong_r /* bits -= op bits */ + andl .L_mask(,%eax,4), %ecx /* ecx = hold & mask[op] */ + addl %ecx, dist_r /* dist += hold & mask[op] */ + +.L_check_window_mmx: + movl in_r, in(%esp) /* save in so from can use it's reg */ + movl out_r, %eax + subl beg(%esp), %eax /* nbytes = out - beg */ + + cmpl dist_r, %eax + jb .L_clip_window_mmx /* if (dist > nbytes) 4.2% */ + + movl len_r, %ecx + movl out_r, from_r + subl dist_r, from_r /* from = out - dist */ + + subl $3, %ecx + movb (from_r), %al + movb %al, (out_r) + movb 1(from_r), %al + movb 2(from_r), %dl + addl $3, from_r + movb %al, 1(out_r) + movb %dl, 2(out_r) + addl $3, out_r + rep movsb + + movl in(%esp), in_r /* move in back to %esi, toss from */ + movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */ + jmp .L_while_test_mmx + +.align 16,0x90 +.L_check_dist_one_mmx: + cmpl $1, dist_r + jne .L_check_window_mmx + cmpl out_r, beg(%esp) + je .L_check_window_mmx + + decl out_r + movl len_r, %ecx + movb (out_r), %al + subl $3, %ecx + + movb %al, 1(out_r) + movb %al, 2(out_r) + movb %al, 3(out_r) + addl $4, out_r + rep stosb + + movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */ + jmp .L_while_test_mmx + +.align 16,0x90 +.L_test_for_second_level_length_mmx: + testb $64, %al + jnz .L_test_for_end_of_block /* if ((op & 64) != 0) */ + + andl $15, %eax + psrlq used_mm, hold_mm /* hold_mm >>= last bit length */ + movd hold_mm, %ecx + andl .L_mask(,%eax,4), %ecx + addl len_r, %ecx + movl (%ebx,%ecx,4), %eax /* eax = lcode[hold & lmask] */ + jmp .L_dolen_mmx + +.align 16,0x90 +.L_test_for_second_level_dist_mmx: + testb $64, %al + jnz .L_invalid_distance_code /* if ((op & 64) != 0) */ + + andl $15, %eax + psrlq used_mm, hold_mm /* hold_mm >>= last bit length */ + movd hold_mm, %ecx + andl .L_mask(,%eax,4), %ecx + movl dcode(%esp), %eax /* ecx = dcode */ + addl dist_r, %ecx + movl (%eax,%ecx,4), %eax /* eax = lcode[hold & lmask] */ + jmp .L_dodist_mmx + +.align 16,0x90 +.L_clip_window_mmx: +#define nbytes_r %ecx + movl %eax, nbytes_r + movl wsize(%esp), %eax /* prepare for dist compare */ + negl nbytes_r /* nbytes = -nbytes */ + movl window(%esp), from_r /* from = window */ + + cmpl dist_r, %eax + jb .L_invalid_distance_too_far /* if (dist > wsize) */ + + addl dist_r, nbytes_r /* nbytes = dist - nbytes */ + cmpl $0, write(%esp) + jne .L_wrap_around_window_mmx /* if (write != 0) */ + + subl nbytes_r, %eax + addl %eax, from_r /* from += wsize - nbytes */ + + cmpl nbytes_r, len_r + jbe .L_do_copy1_mmx /* if (nbytes >= len) */ + + subl nbytes_r, len_r /* len -= nbytes */ + rep movsb + movl out_r, from_r + subl dist_r, from_r /* from = out - dist */ + jmp .L_do_copy1_mmx + + cmpl nbytes_r, len_r + jbe .L_do_copy1_mmx /* if (nbytes >= len) */ + + subl nbytes_r, len_r /* len -= nbytes */ + rep movsb + movl out_r, from_r + subl dist_r, from_r /* from = out - dist */ + jmp .L_do_copy1_mmx + +.L_wrap_around_window_mmx: +#define write_r %eax + movl write(%esp), write_r + cmpl write_r, nbytes_r + jbe .L_contiguous_in_window_mmx /* if (write >= nbytes) */ + + addl wsize(%esp), from_r + addl write_r, from_r + subl nbytes_r, from_r /* from += wsize + write - nbytes */ + subl write_r, nbytes_r /* nbytes -= write */ +#undef write_r + + cmpl nbytes_r, len_r + jbe .L_do_copy1_mmx /* if (nbytes >= len) */ + + subl nbytes_r, len_r /* len -= nbytes */ + rep movsb + movl window(%esp), from_r /* from = window */ + movl write(%esp), nbytes_r /* nbytes = write */ + cmpl nbytes_r, len_r + jbe .L_do_copy1_mmx /* if (nbytes >= len) */ + + subl nbytes_r, len_r /* len -= nbytes */ + rep movsb + movl out_r, from_r + subl dist_r, from_r /* from = out - dist */ + jmp .L_do_copy1_mmx + +.L_contiguous_in_window_mmx: +#define write_r %eax + addl write_r, from_r + subl nbytes_r, from_r /* from += write - nbytes */ +#undef write_r + + cmpl nbytes_r, len_r + jbe .L_do_copy1_mmx /* if (nbytes >= len) */ + + subl nbytes_r, len_r /* len -= nbytes */ + rep movsb + movl out_r, from_r + subl dist_r, from_r /* from = out - dist */ + +.L_do_copy1_mmx: +#undef nbytes_r +#define in_r %esi + movl len_r, %ecx + rep movsb + + movl in(%esp), in_r /* move in back to %esi, toss from */ + movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */ + jmp .L_while_test_mmx + +#undef hold_r +#undef bitslong_r + +#endif /* USE_MMX || RUN_TIME_MMX */ + + +/*** USE_MMX, NO_MMX, and RUNTIME_MMX from here on ***/ + +.L_invalid_distance_code: + /* else { + * strm->msg = "invalid distance code"; + * state->mode = BAD; + * } + */ + movl $.L_invalid_distance_code_msg, %ecx + movl $INFLATE_MODE_BAD, %edx + jmp .L_update_stream_state + +.L_test_for_end_of_block: + /* else if (op & 32) { + * state->mode = TYPE; + * break; + * } + */ + testb $32, %al + jz .L_invalid_literal_length_code /* if ((op & 32) == 0) */ + + movl $0, %ecx + movl $INFLATE_MODE_TYPE, %edx + jmp .L_update_stream_state + +.L_invalid_literal_length_code: + /* else { + * strm->msg = "invalid literal/length code"; + * state->mode = BAD; + * } + */ + movl $.L_invalid_literal_length_code_msg, %ecx + movl $INFLATE_MODE_BAD, %edx + jmp .L_update_stream_state + +.L_invalid_distance_too_far: + /* strm->msg = "invalid distance too far back"; + * state->mode = BAD; + */ + movl in(%esp), in_r /* from_r has in's reg, put in back */ + movl $.L_invalid_distance_too_far_msg, %ecx + movl $INFLATE_MODE_BAD, %edx + jmp .L_update_stream_state + +.L_update_stream_state: + /* set strm->msg = %ecx, strm->state->mode = %edx */ + movl strm_sp(%esp), %eax + testl %ecx, %ecx /* if (msg != NULL) */ + jz .L_skip_msg + movl %ecx, msg_strm(%eax) /* strm->msg = msg */ +.L_skip_msg: + movl state_strm(%eax), %eax /* state = strm->state */ + movl %edx, mode_state(%eax) /* state->mode = edx (BAD | TYPE) */ + jmp .L_break_loop + +.align 32,0x90 +.L_break_loop: + +/* + * Regs: + * + * bits = %ebp when mmx, and in %ebx when non-mmx + * hold = %hold_mm when mmx, and in %ebp when non-mmx + * in = %esi + * out = %edi + */ + +#if defined( USE_MMX ) || defined( RUN_TIME_MMX ) + +#if defined( RUN_TIME_MMX ) + + cmpl $DO_USE_MMX, inflate_fast_use_mmx + jne .L_update_next_in + +#endif /* RUN_TIME_MMX */ + + movl %ebp, %ebx + +.L_update_next_in: + +#endif + +#define strm_r %eax +#define state_r %edx + + /* len = bits >> 3; + * in -= len; + * bits -= len << 3; + * hold &= (1U << bits) - 1; + * state->hold = hold; + * state->bits = bits; + * strm->next_in = in; + * strm->next_out = out; + */ + movl strm_sp(%esp), strm_r + movl %ebx, %ecx + movl state_strm(strm_r), state_r + shrl $3, %ecx + subl %ecx, in_r + shll $3, %ecx + subl %ecx, %ebx + movl out_r, next_out_strm(strm_r) + movl %ebx, bits_state(state_r) + movl %ebx, %ecx + + leal buf(%esp), %ebx + cmpl %ebx, last(%esp) + jne .L_buf_not_used /* if buf != last */ + + subl %ebx, in_r /* in -= buf */ + movl next_in_strm(strm_r), %ebx + movl %ebx, last(%esp) /* last = strm->next_in */ + addl %ebx, in_r /* in += strm->next_in */ + movl avail_in_strm(strm_r), %ebx + subl $11, %ebx + addl %ebx, last(%esp) /* last = &strm->next_in[ avail_in - 11 ] */ + +.L_buf_not_used: + movl in_r, next_in_strm(strm_r) + + movl $1, %ebx + shll %cl, %ebx + decl %ebx + +#if defined( USE_MMX ) || defined( RUN_TIME_MMX ) + +#if defined( RUN_TIME_MMX ) + + cmpl $DO_USE_MMX, inflate_fast_use_mmx + jne .L_update_hold + +#endif /* RUN_TIME_MMX */ + + psrlq used_mm, hold_mm /* hold_mm >>= last bit length */ + movd hold_mm, %ebp + + emms + +.L_update_hold: + +#endif /* USE_MMX || RUN_TIME_MMX */ + + andl %ebx, %ebp + movl %ebp, hold_state(state_r) + +#define last_r %ebx + + /* strm->avail_in = in < last ? 11 + (last - in) : 11 - (in - last) */ + movl last(%esp), last_r + cmpl in_r, last_r + jbe .L_last_is_smaller /* if (in >= last) */ + + subl in_r, last_r /* last -= in */ + addl $11, last_r /* last += 11 */ + movl last_r, avail_in_strm(strm_r) + jmp .L_fixup_out +.L_last_is_smaller: + subl last_r, in_r /* in -= last */ + negl in_r /* in = -in */ + addl $11, in_r /* in += 11 */ + movl in_r, avail_in_strm(strm_r) + +#undef last_r +#define end_r %ebx + +.L_fixup_out: + /* strm->avail_out = out < end ? 257 + (end - out) : 257 - (out - end)*/ + movl end(%esp), end_r + cmpl out_r, end_r + jbe .L_end_is_smaller /* if (out >= end) */ + + subl out_r, end_r /* end -= out */ + addl $257, end_r /* end += 257 */ + movl end_r, avail_out_strm(strm_r) + jmp .L_done +.L_end_is_smaller: + subl end_r, out_r /* out -= end */ + negl out_r /* out = -out */ + addl $257, out_r /* out += 257 */ + movl out_r, avail_out_strm(strm_r) + +#undef end_r +#undef strm_r +#undef state_r + +.L_done: + addl $local_var_size, %esp + popf + popl %ebx + popl %ebp + popl %esi + popl %edi + ret + +#if defined( GAS_ELF ) +/* elf info */ +.type inflate_fast,@function +.size inflate_fast,.-inflate_fast +#endif diff -Nru nodejs-0.11.13/deps/zlib/contrib/iostream/test.cpp nodejs-0.11.15/deps/zlib/contrib/iostream/test.cpp --- nodejs-0.11.13/deps/zlib/contrib/iostream/test.cpp 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/iostream/test.cpp 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,24 @@ + +#include "zfstream.h" + +int main() { + + // Construct a stream object with this filebuffer. Anything sent + // to this stream will go to standard out. + gzofstream os( 1, ios::out ); + + // This text is getting compressed and sent to stdout. + // To prove this, run 'test | zcat'. + os << "Hello, Mommy" << endl; + + os << setcompressionlevel( Z_NO_COMPRESSION ); + os << "hello, hello, hi, ho!" << endl; + + setcompressionlevel( os, Z_DEFAULT_COMPRESSION ) + << "I'm compressing again" << endl; + + os.close(); + + return 0; + +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/iostream/zfstream.cpp nodejs-0.11.15/deps/zlib/contrib/iostream/zfstream.cpp --- nodejs-0.11.13/deps/zlib/contrib/iostream/zfstream.cpp 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/iostream/zfstream.cpp 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,329 @@ + +#include "zfstream.h" + +gzfilebuf::gzfilebuf() : + file(NULL), + mode(0), + own_file_descriptor(0) +{ } + +gzfilebuf::~gzfilebuf() { + + sync(); + if ( own_file_descriptor ) + close(); + +} + +gzfilebuf *gzfilebuf::open( const char *name, + int io_mode ) { + + if ( is_open() ) + return NULL; + + char char_mode[10]; + char *p = char_mode; + + if ( io_mode & ios::in ) { + mode = ios::in; + *p++ = 'r'; + } else if ( io_mode & ios::app ) { + mode = ios::app; + *p++ = 'a'; + } else { + mode = ios::out; + *p++ = 'w'; + } + + if ( io_mode & ios::binary ) { + mode |= ios::binary; + *p++ = 'b'; + } + + // Hard code the compression level + if ( io_mode & (ios::out|ios::app )) { + *p++ = '9'; + } + + // Put the end-of-string indicator + *p = '\0'; + + if ( (file = gzopen(name, char_mode)) == NULL ) + return NULL; + + own_file_descriptor = 1; + + return this; + +} + +gzfilebuf *gzfilebuf::attach( int file_descriptor, + int io_mode ) { + + if ( is_open() ) + return NULL; + + char char_mode[10]; + char *p = char_mode; + + if ( io_mode & ios::in ) { + mode = ios::in; + *p++ = 'r'; + } else if ( io_mode & ios::app ) { + mode = ios::app; + *p++ = 'a'; + } else { + mode = ios::out; + *p++ = 'w'; + } + + if ( io_mode & ios::binary ) { + mode |= ios::binary; + *p++ = 'b'; + } + + // Hard code the compression level + if ( io_mode & (ios::out|ios::app )) { + *p++ = '9'; + } + + // Put the end-of-string indicator + *p = '\0'; + + if ( (file = gzdopen(file_descriptor, char_mode)) == NULL ) + return NULL; + + own_file_descriptor = 0; + + return this; + +} + +gzfilebuf *gzfilebuf::close() { + + if ( is_open() ) { + + sync(); + gzclose( file ); + file = NULL; + + } + + return this; + +} + +int gzfilebuf::setcompressionlevel( int comp_level ) { + + return gzsetparams(file, comp_level, -2); + +} + +int gzfilebuf::setcompressionstrategy( int comp_strategy ) { + + return gzsetparams(file, -2, comp_strategy); + +} + + +streampos gzfilebuf::seekoff( streamoff off, ios::seek_dir dir, int which ) { + + return streampos(EOF); + +} + +int gzfilebuf::underflow() { + + // If the file hasn't been opened for reading, error. + if ( !is_open() || !(mode & ios::in) ) + return EOF; + + // if a buffer doesn't exists, allocate one. + if ( !base() ) { + + if ( (allocate()) == EOF ) + return EOF; + setp(0,0); + + } else { + + if ( in_avail() ) + return (unsigned char) *gptr(); + + if ( out_waiting() ) { + if ( flushbuf() == EOF ) + return EOF; + } + + } + + // Attempt to fill the buffer. + + int result = fillbuf(); + if ( result == EOF ) { + // disable get area + setg(0,0,0); + return EOF; + } + + return (unsigned char) *gptr(); + +} + +int gzfilebuf::overflow( int c ) { + + if ( !is_open() || !(mode & ios::out) ) + return EOF; + + if ( !base() ) { + if ( allocate() == EOF ) + return EOF; + setg(0,0,0); + } else { + if (in_avail()) { + return EOF; + } + if (out_waiting()) { + if (flushbuf() == EOF) + return EOF; + } + } + + int bl = blen(); + setp( base(), base() + bl); + + if ( c != EOF ) { + + *pptr() = c; + pbump(1); + + } + + return 0; + +} + +int gzfilebuf::sync() { + + if ( !is_open() ) + return EOF; + + if ( out_waiting() ) + return flushbuf(); + + return 0; + +} + +int gzfilebuf::flushbuf() { + + int n; + char *q; + + q = pbase(); + n = pptr() - q; + + if ( gzwrite( file, q, n) < n ) + return EOF; + + setp(0,0); + + return 0; + +} + +int gzfilebuf::fillbuf() { + + int required; + char *p; + + p = base(); + + required = blen(); + + int t = gzread( file, p, required ); + + if ( t <= 0) return EOF; + + setg( base(), base(), base()+t); + + return t; + +} + +gzfilestream_common::gzfilestream_common() : + ios( gzfilestream_common::rdbuf() ) +{ } + +gzfilestream_common::~gzfilestream_common() +{ } + +void gzfilestream_common::attach( int fd, int io_mode ) { + + if ( !buffer.attach( fd, io_mode) ) + clear( ios::failbit | ios::badbit ); + else + clear(); + +} + +void gzfilestream_common::open( const char *name, int io_mode ) { + + if ( !buffer.open( name, io_mode ) ) + clear( ios::failbit | ios::badbit ); + else + clear(); + +} + +void gzfilestream_common::close() { + + if ( !buffer.close() ) + clear( ios::failbit | ios::badbit ); + +} + +gzfilebuf *gzfilestream_common::rdbuf() +{ + return &buffer; +} + +gzifstream::gzifstream() : + ios( gzfilestream_common::rdbuf() ) +{ + clear( ios::badbit ); +} + +gzifstream::gzifstream( const char *name, int io_mode ) : + ios( gzfilestream_common::rdbuf() ) +{ + gzfilestream_common::open( name, io_mode ); +} + +gzifstream::gzifstream( int fd, int io_mode ) : + ios( gzfilestream_common::rdbuf() ) +{ + gzfilestream_common::attach( fd, io_mode ); +} + +gzifstream::~gzifstream() { } + +gzofstream::gzofstream() : + ios( gzfilestream_common::rdbuf() ) +{ + clear( ios::badbit ); +} + +gzofstream::gzofstream( const char *name, int io_mode ) : + ios( gzfilestream_common::rdbuf() ) +{ + gzfilestream_common::open( name, io_mode ); +} + +gzofstream::gzofstream( int fd, int io_mode ) : + ios( gzfilestream_common::rdbuf() ) +{ + gzfilestream_common::attach( fd, io_mode ); +} + +gzofstream::~gzofstream() { } diff -Nru nodejs-0.11.13/deps/zlib/contrib/iostream/zfstream.h nodejs-0.11.15/deps/zlib/contrib/iostream/zfstream.h --- nodejs-0.11.13/deps/zlib/contrib/iostream/zfstream.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/iostream/zfstream.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,128 @@ + +#ifndef zfstream_h +#define zfstream_h + +#include <fstream.h> +#include "zlib.h" + +class gzfilebuf : public streambuf { + +public: + + gzfilebuf( ); + virtual ~gzfilebuf(); + + gzfilebuf *open( const char *name, int io_mode ); + gzfilebuf *attach( int file_descriptor, int io_mode ); + gzfilebuf *close(); + + int setcompressionlevel( int comp_level ); + int setcompressionstrategy( int comp_strategy ); + + inline int is_open() const { return (file !=NULL); } + + virtual streampos seekoff( streamoff, ios::seek_dir, int ); + + virtual int sync(); + +protected: + + virtual int underflow(); + virtual int overflow( int = EOF ); + +private: + + gzFile file; + short mode; + short own_file_descriptor; + + int flushbuf(); + int fillbuf(); + +}; + +class gzfilestream_common : virtual public ios { + + friend class gzifstream; + friend class gzofstream; + friend gzofstream &setcompressionlevel( gzofstream &, int ); + friend gzofstream &setcompressionstrategy( gzofstream &, int ); + +public: + virtual ~gzfilestream_common(); + + void attach( int fd, int io_mode ); + void open( const char *name, int io_mode ); + void close(); + +protected: + gzfilestream_common(); + +private: + gzfilebuf *rdbuf(); + + gzfilebuf buffer; + +}; + +class gzifstream : public gzfilestream_common, public istream { + +public: + + gzifstream(); + gzifstream( const char *name, int io_mode = ios::in ); + gzifstream( int fd, int io_mode = ios::in ); + + virtual ~gzifstream(); + +}; + +class gzofstream : public gzfilestream_common, public ostream { + +public: + + gzofstream(); + gzofstream( const char *name, int io_mode = ios::out ); + gzofstream( int fd, int io_mode = ios::out ); + + virtual ~gzofstream(); + +}; + +template<class T> class gzomanip { + friend gzofstream &operator<<(gzofstream &, const gzomanip<T> &); +public: + gzomanip(gzofstream &(*f)(gzofstream &, T), T v) : func(f), val(v) { } +private: + gzofstream &(*func)(gzofstream &, T); + T val; +}; + +template<class T> gzofstream &operator<<(gzofstream &s, const gzomanip<T> &m) +{ + return (*m.func)(s, m.val); +} + +inline gzofstream &setcompressionlevel( gzofstream &s, int l ) +{ + (s.rdbuf())->setcompressionlevel(l); + return s; +} + +inline gzofstream &setcompressionstrategy( gzofstream &s, int l ) +{ + (s.rdbuf())->setcompressionstrategy(l); + return s; +} + +inline gzomanip<int> setcompressionlevel(int l) +{ + return gzomanip<int>(&setcompressionlevel,l); +} + +inline gzomanip<int> setcompressionstrategy(int l) +{ + return gzomanip<int>(&setcompressionstrategy,l); +} + +#endif diff -Nru nodejs-0.11.13/deps/zlib/contrib/iostream2/zstream.h nodejs-0.11.15/deps/zlib/contrib/iostream2/zstream.h --- nodejs-0.11.13/deps/zlib/contrib/iostream2/zstream.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/iostream2/zstream.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,307 @@ +/* + * + * Copyright (c) 1997 + * Christian Michelsen Research AS + * Advanced Computing + * Fantoftvegen 38, 5036 BERGEN, Norway + * http://www.cmr.no + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Christian Michelsen Research AS makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + */ + +#ifndef ZSTREAM__H +#define ZSTREAM__H + +/* + * zstream.h - C++ interface to the 'zlib' general purpose compression library + * $Id: zstream.h 1.1 1997-06-25 12:00:56+02 tyge Exp tyge $ + */ + +#include <strstream.h> +#include <string.h> +#include <stdio.h> +#include "zlib.h" + +#if defined(_WIN32) +# include <fcntl.h> +# include <io.h> +# define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY) +#else +# define SET_BINARY_MODE(file) +#endif + +class zstringlen { +public: + zstringlen(class izstream&); + zstringlen(class ozstream&, const char*); + size_t value() const { return val.word; } +private: + struct Val { unsigned char byte; size_t word; } val; +}; + +// ----------------------------- izstream ----------------------------- + +class izstream +{ + public: + izstream() : m_fp(0) {} + izstream(FILE* fp) : m_fp(0) { open(fp); } + izstream(const char* name) : m_fp(0) { open(name); } + ~izstream() { close(); } + + /* Opens a gzip (.gz) file for reading. + * open() can be used to read a file which is not in gzip format; + * in this case read() will directly read from the file without + * decompression. errno can be checked to distinguish two error + * cases (if errno is zero, the zlib error is Z_MEM_ERROR). + */ + void open(const char* name) { + if (m_fp) close(); + m_fp = ::gzopen(name, "rb"); + } + + void open(FILE* fp) { + SET_BINARY_MODE(fp); + if (m_fp) close(); + m_fp = ::gzdopen(fileno(fp), "rb"); + } + + /* Flushes all pending input if necessary, closes the compressed file + * and deallocates all the (de)compression state. The return value is + * the zlib error number (see function error() below). + */ + int close() { + int r = ::gzclose(m_fp); + m_fp = 0; return r; + } + + /* Binary read the given number of bytes from the compressed file. + */ + int read(void* buf, size_t len) { + return ::gzread(m_fp, buf, len); + } + + /* Returns the error message for the last error which occurred on the + * given compressed file. errnum is set to zlib error number. If an + * error occurred in the file system and not in the compression library, + * errnum is set to Z_ERRNO and the application may consult errno + * to get the exact error code. + */ + const char* error(int* errnum) { + return ::gzerror(m_fp, errnum); + } + + gzFile fp() { return m_fp; } + + private: + gzFile m_fp; +}; + +/* + * Binary read the given (array of) object(s) from the compressed file. + * If the input file was not in gzip format, read() copies the objects number + * of bytes into the buffer. + * returns the number of uncompressed bytes actually read + * (0 for end of file, -1 for error). + */ +template <class T, class Items> +inline int read(izstream& zs, T* x, Items items) { + return ::gzread(zs.fp(), x, items*sizeof(T)); +} + +/* + * Binary input with the '>' operator. + */ +template <class T> +inline izstream& operator>(izstream& zs, T& x) { + ::gzread(zs.fp(), &x, sizeof(T)); + return zs; +} + + +inline zstringlen::zstringlen(izstream& zs) { + zs > val.byte; + if (val.byte == 255) zs > val.word; + else val.word = val.byte; +} + +/* + * Read length of string + the string with the '>' operator. + */ +inline izstream& operator>(izstream& zs, char* x) { + zstringlen len(zs); + ::gzread(zs.fp(), x, len.value()); + x[len.value()] = '\0'; + return zs; +} + +inline char* read_string(izstream& zs) { + zstringlen len(zs); + char* x = new char[len.value()+1]; + ::gzread(zs.fp(), x, len.value()); + x[len.value()] = '\0'; + return x; +} + +// ----------------------------- ozstream ----------------------------- + +class ozstream +{ + public: + ozstream() : m_fp(0), m_os(0) { + } + ozstream(FILE* fp, int level = Z_DEFAULT_COMPRESSION) + : m_fp(0), m_os(0) { + open(fp, level); + } + ozstream(const char* name, int level = Z_DEFAULT_COMPRESSION) + : m_fp(0), m_os(0) { + open(name, level); + } + ~ozstream() { + close(); + } + + /* Opens a gzip (.gz) file for writing. + * The compression level parameter should be in 0..9 + * errno can be checked to distinguish two error cases + * (if errno is zero, the zlib error is Z_MEM_ERROR). + */ + void open(const char* name, int level = Z_DEFAULT_COMPRESSION) { + char mode[4] = "wb\0"; + if (level != Z_DEFAULT_COMPRESSION) mode[2] = '0'+level; + if (m_fp) close(); + m_fp = ::gzopen(name, mode); + } + + /* open from a FILE pointer. + */ + void open(FILE* fp, int level = Z_DEFAULT_COMPRESSION) { + SET_BINARY_MODE(fp); + char mode[4] = "wb\0"; + if (level != Z_DEFAULT_COMPRESSION) mode[2] = '0'+level; + if (m_fp) close(); + m_fp = ::gzdopen(fileno(fp), mode); + } + + /* Flushes all pending output if necessary, closes the compressed file + * and deallocates all the (de)compression state. The return value is + * the zlib error number (see function error() below). + */ + int close() { + if (m_os) { + ::gzwrite(m_fp, m_os->str(), m_os->pcount()); + delete[] m_os->str(); delete m_os; m_os = 0; + } + int r = ::gzclose(m_fp); m_fp = 0; return r; + } + + /* Binary write the given number of bytes into the compressed file. + */ + int write(const void* buf, size_t len) { + return ::gzwrite(m_fp, (voidp) buf, len); + } + + /* Flushes all pending output into the compressed file. The parameter + * _flush is as in the deflate() function. The return value is the zlib + * error number (see function gzerror below). flush() returns Z_OK if + * the flush_ parameter is Z_FINISH and all output could be flushed. + * flush() should be called only when strictly necessary because it can + * degrade compression. + */ + int flush(int _flush) { + os_flush(); + return ::gzflush(m_fp, _flush); + } + + /* Returns the error message for the last error which occurred on the + * given compressed file. errnum is set to zlib error number. If an + * error occurred in the file system and not in the compression library, + * errnum is set to Z_ERRNO and the application may consult errno + * to get the exact error code. + */ + const char* error(int* errnum) { + return ::gzerror(m_fp, errnum); + } + + gzFile fp() { return m_fp; } + + ostream& os() { + if (m_os == 0) m_os = new ostrstream; + return *m_os; + } + + void os_flush() { + if (m_os && m_os->pcount()>0) { + ostrstream* oss = new ostrstream; + oss->fill(m_os->fill()); + oss->flags(m_os->flags()); + oss->precision(m_os->precision()); + oss->width(m_os->width()); + ::gzwrite(m_fp, m_os->str(), m_os->pcount()); + delete[] m_os->str(); delete m_os; m_os = oss; + } + } + + private: + gzFile m_fp; + ostrstream* m_os; +}; + +/* + * Binary write the given (array of) object(s) into the compressed file. + * returns the number of uncompressed bytes actually written + * (0 in case of error). + */ +template <class T, class Items> +inline int write(ozstream& zs, const T* x, Items items) { + return ::gzwrite(zs.fp(), (voidp) x, items*sizeof(T)); +} + +/* + * Binary output with the '<' operator. + */ +template <class T> +inline ozstream& operator<(ozstream& zs, const T& x) { + ::gzwrite(zs.fp(), (voidp) &x, sizeof(T)); + return zs; +} + +inline zstringlen::zstringlen(ozstream& zs, const char* x) { + val.byte = 255; val.word = ::strlen(x); + if (val.word < 255) zs < (val.byte = val.word); + else zs < val; +} + +/* + * Write length of string + the string with the '<' operator. + */ +inline ozstream& operator<(ozstream& zs, const char* x) { + zstringlen len(zs, x); + ::gzwrite(zs.fp(), (voidp) x, len.value()); + return zs; +} + +#ifdef _MSC_VER +inline ozstream& operator<(ozstream& zs, char* const& x) { + return zs < (const char*) x; +} +#endif + +/* + * Ascii write with the << operator; + */ +template <class T> +inline ostream& operator<<(ozstream& zs, const T& x) { + zs.os_flush(); + return zs.os() << x; +} + +#endif diff -Nru nodejs-0.11.13/deps/zlib/contrib/iostream2/zstream_test.cpp nodejs-0.11.15/deps/zlib/contrib/iostream2/zstream_test.cpp --- nodejs-0.11.13/deps/zlib/contrib/iostream2/zstream_test.cpp 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/iostream2/zstream_test.cpp 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,25 @@ +#include "zstream.h" +#include <math.h> +#include <stdlib.h> +#include <iomanip.h> + +void main() { + char h[256] = "Hello"; + char* g = "Goodbye"; + ozstream out("temp.gz"); + out < "This works well" < h < g; + out.close(); + + izstream in("temp.gz"); // read it back + char *x = read_string(in), *y = new char[256], z[256]; + in > y > z; + in.close(); + cout << x << endl << y << endl << z << endl; + + out.open("temp.gz"); // try ascii output; zcat temp.gz to see the results + out << setw(50) << setfill('#') << setprecision(20) << x << endl << y << endl << z << endl; + out << z << endl << y << endl << x << endl; + out << 1.1234567890123456789 << endl; + + delete[] x; delete[] y; +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/iostream3/README nodejs-0.11.15/deps/zlib/contrib/iostream3/README --- nodejs-0.11.13/deps/zlib/contrib/iostream3/README 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/iostream3/README 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,35 @@ +These classes provide a C++ stream interface to the zlib library. It allows you +to do things like: + + gzofstream outf("blah.gz"); + outf << "These go into the gzip file " << 123 << endl; + +It does this by deriving a specialized stream buffer for gzipped files, which is +the way Stroustrup would have done it. :-> + +The gzifstream and gzofstream classes were originally written by Kevin Ruland +and made available in the zlib contrib/iostream directory. The older version still +compiles under gcc 2.xx, but not under gcc 3.xx, which sparked the development of +this version. + +The new classes are as standard-compliant as possible, closely following the +approach of the standard library's fstream classes. It compiles under gcc versions +3.2 and 3.3, but not under gcc 2.xx. This is mainly due to changes in the standard +library naming scheme. The new version of gzifstream/gzofstream/gzfilebuf differs +from the previous one in the following respects: +- added showmanyc +- added setbuf, with support for unbuffered output via setbuf(0,0) +- a few bug fixes of stream behavior +- gzipped output file opened with default compression level instead of maximum level +- setcompressionlevel()/strategy() members replaced by single setcompression() + +The code is provided "as is", with the permission to use, copy, modify, distribute +and sell it for any purpose without fee. + +Ludwig Schwardt +<schwardt@sun.ac.za> + +DSP Lab +Electrical & Electronic Engineering Department +University of Stellenbosch +South Africa diff -Nru nodejs-0.11.13/deps/zlib/contrib/iostream3/test.cc nodejs-0.11.15/deps/zlib/contrib/iostream3/test.cc --- nodejs-0.11.13/deps/zlib/contrib/iostream3/test.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/iostream3/test.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,50 @@ +/* + * Test program for gzifstream and gzofstream + * + * by Ludwig Schwardt <schwardt@sun.ac.za> + * original version by Kevin Ruland <kevin@rodin.wustl.edu> + */ + +#include "zfstream.h" +#include <iostream> // for cout + +int main() { + + gzofstream outf; + gzifstream inf; + char buf[80]; + + outf.open("test1.txt.gz"); + outf << "The quick brown fox sidestepped the lazy canine\n" + << 1.3 << "\nPlan " << 9 << std::endl; + outf.close(); + std::cout << "Wrote the following message to 'test1.txt.gz' (check with zcat or zless):\n" + << "The quick brown fox sidestepped the lazy canine\n" + << 1.3 << "\nPlan " << 9 << std::endl; + + std::cout << "\nReading 'test1.txt.gz' (buffered) produces:\n"; + inf.open("test1.txt.gz"); + while (inf.getline(buf,80,'\n')) { + std::cout << buf << "\t(" << inf.rdbuf()->in_avail() << " chars left in buffer)\n"; + } + inf.close(); + + outf.rdbuf()->pubsetbuf(0,0); + outf.open("test2.txt.gz"); + outf << setcompression(Z_NO_COMPRESSION) + << "The quick brown fox sidestepped the lazy canine\n" + << 1.3 << "\nPlan " << 9 << std::endl; + outf.close(); + std::cout << "\nWrote the same message to 'test2.txt.gz' in uncompressed form"; + + std::cout << "\nReading 'test2.txt.gz' (unbuffered) produces:\n"; + inf.rdbuf()->pubsetbuf(0,0); + inf.open("test2.txt.gz"); + while (inf.getline(buf,80,'\n')) { + std::cout << buf << "\t(" << inf.rdbuf()->in_avail() << " chars left in buffer)\n"; + } + inf.close(); + + return 0; + +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/iostream3/TODO nodejs-0.11.15/deps/zlib/contrib/iostream3/TODO --- nodejs-0.11.13/deps/zlib/contrib/iostream3/TODO 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/iostream3/TODO 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,17 @@ +Possible upgrades to gzfilebuf: + +- The ability to do putback (e.g. putbackfail) + +- The ability to seek (zlib supports this, but could be slow/tricky) + +- Simultaneous read/write access (does it make sense?) + +- Support for ios_base::ate open mode + +- Locale support? + +- Check public interface to see which calls give problems + (due to dependence on library internals) + +- Override operator<<(ostream&, gzfilebuf*) to allow direct copying + of stream buffer to stream ( i.e. os << is.rdbuf(); ) diff -Nru nodejs-0.11.13/deps/zlib/contrib/iostream3/zfstream.cc nodejs-0.11.15/deps/zlib/contrib/iostream3/zfstream.cc --- nodejs-0.11.13/deps/zlib/contrib/iostream3/zfstream.cc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/iostream3/zfstream.cc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,479 @@ +/* + * A C++ I/O streams interface to the zlib gz* functions + * + * by Ludwig Schwardt <schwardt@sun.ac.za> + * original version by Kevin Ruland <kevin@rodin.wustl.edu> + * + * This version is standard-compliant and compatible with gcc 3.x. + */ + +#include "zfstream.h" +#include <cstring> // for strcpy, strcat, strlen (mode strings) +#include <cstdio> // for BUFSIZ + +// Internal buffer sizes (default and "unbuffered" versions) +#define BIGBUFSIZE BUFSIZ +#define SMALLBUFSIZE 1 + +/*****************************************************************************/ + +// Default constructor +gzfilebuf::gzfilebuf() +: file(NULL), io_mode(std::ios_base::openmode(0)), own_fd(false), + buffer(NULL), buffer_size(BIGBUFSIZE), own_buffer(true) +{ + // No buffers to start with + this->disable_buffer(); +} + +// Destructor +gzfilebuf::~gzfilebuf() +{ + // Sync output buffer and close only if responsible for file + // (i.e. attached streams should be left open at this stage) + this->sync(); + if (own_fd) + this->close(); + // Make sure internal buffer is deallocated + this->disable_buffer(); +} + +// Set compression level and strategy +int +gzfilebuf::setcompression(int comp_level, + int comp_strategy) +{ + return gzsetparams(file, comp_level, comp_strategy); +} + +// Open gzipped file +gzfilebuf* +gzfilebuf::open(const char *name, + std::ios_base::openmode mode) +{ + // Fail if file already open + if (this->is_open()) + return NULL; + // Don't support simultaneous read/write access (yet) + if ((mode & std::ios_base::in) && (mode & std::ios_base::out)) + return NULL; + + // Build mode string for gzopen and check it [27.8.1.3.2] + char char_mode[6] = "\0\0\0\0\0"; + if (!this->open_mode(mode, char_mode)) + return NULL; + + // Attempt to open file + if ((file = gzopen(name, char_mode)) == NULL) + return NULL; + + // On success, allocate internal buffer and set flags + this->enable_buffer(); + io_mode = mode; + own_fd = true; + return this; +} + +// Attach to gzipped file +gzfilebuf* +gzfilebuf::attach(int fd, + std::ios_base::openmode mode) +{ + // Fail if file already open + if (this->is_open()) + return NULL; + // Don't support simultaneous read/write access (yet) + if ((mode & std::ios_base::in) && (mode & std::ios_base::out)) + return NULL; + + // Build mode string for gzdopen and check it [27.8.1.3.2] + char char_mode[6] = "\0\0\0\0\0"; + if (!this->open_mode(mode, char_mode)) + return NULL; + + // Attempt to attach to file + if ((file = gzdopen(fd, char_mode)) == NULL) + return NULL; + + // On success, allocate internal buffer and set flags + this->enable_buffer(); + io_mode = mode; + own_fd = false; + return this; +} + +// Close gzipped file +gzfilebuf* +gzfilebuf::close() +{ + // Fail immediately if no file is open + if (!this->is_open()) + return NULL; + // Assume success + gzfilebuf* retval = this; + // Attempt to sync and close gzipped file + if (this->sync() == -1) + retval = NULL; + if (gzclose(file) < 0) + retval = NULL; + // File is now gone anyway (postcondition [27.8.1.3.8]) + file = NULL; + own_fd = false; + // Destroy internal buffer if it exists + this->disable_buffer(); + return retval; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// Convert int open mode to mode string +bool +gzfilebuf::open_mode(std::ios_base::openmode mode, + char* c_mode) const +{ + bool testb = mode & std::ios_base::binary; + bool testi = mode & std::ios_base::in; + bool testo = mode & std::ios_base::out; + bool testt = mode & std::ios_base::trunc; + bool testa = mode & std::ios_base::app; + + // Check for valid flag combinations - see [27.8.1.3.2] (Table 92) + // Original zfstream hardcoded the compression level to maximum here... + // Double the time for less than 1% size improvement seems + // excessive though - keeping it at the default level + // To change back, just append "9" to the next three mode strings + if (!testi && testo && !testt && !testa) + strcpy(c_mode, "w"); + if (!testi && testo && !testt && testa) + strcpy(c_mode, "a"); + if (!testi && testo && testt && !testa) + strcpy(c_mode, "w"); + if (testi && !testo && !testt && !testa) + strcpy(c_mode, "r"); + // No read/write mode yet +// if (testi && testo && !testt && !testa) +// strcpy(c_mode, "r+"); +// if (testi && testo && testt && !testa) +// strcpy(c_mode, "w+"); + + // Mode string should be empty for invalid combination of flags + if (strlen(c_mode) == 0) + return false; + if (testb) + strcat(c_mode, "b"); + return true; +} + +// Determine number of characters in internal get buffer +std::streamsize +gzfilebuf::showmanyc() +{ + // Calls to underflow will fail if file not opened for reading + if (!this->is_open() || !(io_mode & std::ios_base::in)) + return -1; + // Make sure get area is in use + if (this->gptr() && (this->gptr() < this->egptr())) + return std::streamsize(this->egptr() - this->gptr()); + else + return 0; +} + +// Fill get area from gzipped file +gzfilebuf::int_type +gzfilebuf::underflow() +{ + // If something is left in the get area by chance, return it + // (this shouldn't normally happen, as underflow is only supposed + // to be called when gptr >= egptr, but it serves as error check) + if (this->gptr() && (this->gptr() < this->egptr())) + return traits_type::to_int_type(*(this->gptr())); + + // If the file hasn't been opened for reading, produce error + if (!this->is_open() || !(io_mode & std::ios_base::in)) + return traits_type::eof(); + + // Attempt to fill internal buffer from gzipped file + // (buffer must be guaranteed to exist...) + int bytes_read = gzread(file, buffer, buffer_size); + // Indicates error or EOF + if (bytes_read <= 0) + { + // Reset get area + this->setg(buffer, buffer, buffer); + return traits_type::eof(); + } + // Make all bytes read from file available as get area + this->setg(buffer, buffer, buffer + bytes_read); + + // Return next character in get area + return traits_type::to_int_type(*(this->gptr())); +} + +// Write put area to gzipped file +gzfilebuf::int_type +gzfilebuf::overflow(int_type c) +{ + // Determine whether put area is in use + if (this->pbase()) + { + // Double-check pointer range + if (this->pptr() > this->epptr() || this->pptr() < this->pbase()) + return traits_type::eof(); + // Add extra character to buffer if not EOF + if (!traits_type::eq_int_type(c, traits_type::eof())) + { + *(this->pptr()) = traits_type::to_char_type(c); + this->pbump(1); + } + // Number of characters to write to file + int bytes_to_write = this->pptr() - this->pbase(); + // Overflow doesn't fail if nothing is to be written + if (bytes_to_write > 0) + { + // If the file hasn't been opened for writing, produce error + if (!this->is_open() || !(io_mode & std::ios_base::out)) + return traits_type::eof(); + // If gzipped file won't accept all bytes written to it, fail + if (gzwrite(file, this->pbase(), bytes_to_write) != bytes_to_write) + return traits_type::eof(); + // Reset next pointer to point to pbase on success + this->pbump(-bytes_to_write); + } + } + // Write extra character to file if not EOF + else if (!traits_type::eq_int_type(c, traits_type::eof())) + { + // If the file hasn't been opened for writing, produce error + if (!this->is_open() || !(io_mode & std::ios_base::out)) + return traits_type::eof(); + // Impromptu char buffer (allows "unbuffered" output) + char_type last_char = traits_type::to_char_type(c); + // If gzipped file won't accept this character, fail + if (gzwrite(file, &last_char, 1) != 1) + return traits_type::eof(); + } + + // If you got here, you have succeeded (even if c was EOF) + // The return value should therefore be non-EOF + if (traits_type::eq_int_type(c, traits_type::eof())) + return traits_type::not_eof(c); + else + return c; +} + +// Assign new buffer +std::streambuf* +gzfilebuf::setbuf(char_type* p, + std::streamsize n) +{ + // First make sure stuff is sync'ed, for safety + if (this->sync() == -1) + return NULL; + // If buffering is turned off on purpose via setbuf(0,0), still allocate one... + // "Unbuffered" only really refers to put [27.8.1.4.10], while get needs at + // least a buffer of size 1 (very inefficient though, therefore make it bigger?) + // This follows from [27.5.2.4.3]/12 (gptr needs to point at something, it seems) + if (!p || !n) + { + // Replace existing buffer (if any) with small internal buffer + this->disable_buffer(); + buffer = NULL; + buffer_size = 0; + own_buffer = true; + this->enable_buffer(); + } + else + { + // Replace existing buffer (if any) with external buffer + this->disable_buffer(); + buffer = p; + buffer_size = n; + own_buffer = false; + this->enable_buffer(); + } + return this; +} + +// Write put area to gzipped file (i.e. ensures that put area is empty) +int +gzfilebuf::sync() +{ + return traits_type::eq_int_type(this->overflow(), traits_type::eof()) ? -1 : 0; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// Allocate internal buffer +void +gzfilebuf::enable_buffer() +{ + // If internal buffer required, allocate one + if (own_buffer && !buffer) + { + // Check for buffered vs. "unbuffered" + if (buffer_size > 0) + { + // Allocate internal buffer + buffer = new char_type[buffer_size]; + // Get area starts empty and will be expanded by underflow as need arises + this->setg(buffer, buffer, buffer); + // Setup entire internal buffer as put area. + // The one-past-end pointer actually points to the last element of the buffer, + // so that overflow(c) can safely add the extra character c to the sequence. + // These pointers remain in place for the duration of the buffer + this->setp(buffer, buffer + buffer_size - 1); + } + else + { + // Even in "unbuffered" case, (small?) get buffer is still required + buffer_size = SMALLBUFSIZE; + buffer = new char_type[buffer_size]; + this->setg(buffer, buffer, buffer); + // "Unbuffered" means no put buffer + this->setp(0, 0); + } + } + else + { + // If buffer already allocated, reset buffer pointers just to make sure no + // stale chars are lying around + this->setg(buffer, buffer, buffer); + this->setp(buffer, buffer + buffer_size - 1); + } +} + +// Destroy internal buffer +void +gzfilebuf::disable_buffer() +{ + // If internal buffer exists, deallocate it + if (own_buffer && buffer) + { + // Preserve unbuffered status by zeroing size + if (!this->pbase()) + buffer_size = 0; + delete[] buffer; + buffer = NULL; + this->setg(0, 0, 0); + this->setp(0, 0); + } + else + { + // Reset buffer pointers to initial state if external buffer exists + this->setg(buffer, buffer, buffer); + if (buffer) + this->setp(buffer, buffer + buffer_size - 1); + else + this->setp(0, 0); + } +} + +/*****************************************************************************/ + +// Default constructor initializes stream buffer +gzifstream::gzifstream() +: std::istream(NULL), sb() +{ this->init(&sb); } + +// Initialize stream buffer and open file +gzifstream::gzifstream(const char* name, + std::ios_base::openmode mode) +: std::istream(NULL), sb() +{ + this->init(&sb); + this->open(name, mode); +} + +// Initialize stream buffer and attach to file +gzifstream::gzifstream(int fd, + std::ios_base::openmode mode) +: std::istream(NULL), sb() +{ + this->init(&sb); + this->attach(fd, mode); +} + +// Open file and go into fail() state if unsuccessful +void +gzifstream::open(const char* name, + std::ios_base::openmode mode) +{ + if (!sb.open(name, mode | std::ios_base::in)) + this->setstate(std::ios_base::failbit); + else + this->clear(); +} + +// Attach to file and go into fail() state if unsuccessful +void +gzifstream::attach(int fd, + std::ios_base::openmode mode) +{ + if (!sb.attach(fd, mode | std::ios_base::in)) + this->setstate(std::ios_base::failbit); + else + this->clear(); +} + +// Close file +void +gzifstream::close() +{ + if (!sb.close()) + this->setstate(std::ios_base::failbit); +} + +/*****************************************************************************/ + +// Default constructor initializes stream buffer +gzofstream::gzofstream() +: std::ostream(NULL), sb() +{ this->init(&sb); } + +// Initialize stream buffer and open file +gzofstream::gzofstream(const char* name, + std::ios_base::openmode mode) +: std::ostream(NULL), sb() +{ + this->init(&sb); + this->open(name, mode); +} + +// Initialize stream buffer and attach to file +gzofstream::gzofstream(int fd, + std::ios_base::openmode mode) +: std::ostream(NULL), sb() +{ + this->init(&sb); + this->attach(fd, mode); +} + +// Open file and go into fail() state if unsuccessful +void +gzofstream::open(const char* name, + std::ios_base::openmode mode) +{ + if (!sb.open(name, mode | std::ios_base::out)) + this->setstate(std::ios_base::failbit); + else + this->clear(); +} + +// Attach to file and go into fail() state if unsuccessful +void +gzofstream::attach(int fd, + std::ios_base::openmode mode) +{ + if (!sb.attach(fd, mode | std::ios_base::out)) + this->setstate(std::ios_base::failbit); + else + this->clear(); +} + +// Close file +void +gzofstream::close() +{ + if (!sb.close()) + this->setstate(std::ios_base::failbit); +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/iostream3/zfstream.h nodejs-0.11.15/deps/zlib/contrib/iostream3/zfstream.h --- nodejs-0.11.13/deps/zlib/contrib/iostream3/zfstream.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/iostream3/zfstream.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,466 @@ +/* + * A C++ I/O streams interface to the zlib gz* functions + * + * by Ludwig Schwardt <schwardt@sun.ac.za> + * original version by Kevin Ruland <kevin@rodin.wustl.edu> + * + * This version is standard-compliant and compatible with gcc 3.x. + */ + +#ifndef ZFSTREAM_H +#define ZFSTREAM_H + +#include <istream> // not iostream, since we don't need cin/cout +#include <ostream> +#include "zlib.h" + +/*****************************************************************************/ + +/** + * @brief Gzipped file stream buffer class. + * + * This class implements basic_filebuf for gzipped files. It doesn't yet support + * seeking (allowed by zlib but slow/limited), putback and read/write access + * (tricky). Otherwise, it attempts to be a drop-in replacement for the standard + * file streambuf. +*/ +class gzfilebuf : public std::streambuf +{ +public: + // Default constructor. + gzfilebuf(); + + // Destructor. + virtual + ~gzfilebuf(); + + /** + * @brief Set compression level and strategy on the fly. + * @param comp_level Compression level (see zlib.h for allowed values) + * @param comp_strategy Compression strategy (see zlib.h for allowed values) + * @return Z_OK on success, Z_STREAM_ERROR otherwise. + * + * Unfortunately, these parameters cannot be modified separately, as the + * previous zfstream version assumed. Since the strategy is seldom changed, + * it can default and setcompression(level) then becomes like the old + * setcompressionlevel(level). + */ + int + setcompression(int comp_level, + int comp_strategy = Z_DEFAULT_STRATEGY); + + /** + * @brief Check if file is open. + * @return True if file is open. + */ + bool + is_open() const { return (file != NULL); } + + /** + * @brief Open gzipped file. + * @param name File name. + * @param mode Open mode flags. + * @return @c this on success, NULL on failure. + */ + gzfilebuf* + open(const char* name, + std::ios_base::openmode mode); + + /** + * @brief Attach to already open gzipped file. + * @param fd File descriptor. + * @param mode Open mode flags. + * @return @c this on success, NULL on failure. + */ + gzfilebuf* + attach(int fd, + std::ios_base::openmode mode); + + /** + * @brief Close gzipped file. + * @return @c this on success, NULL on failure. + */ + gzfilebuf* + close(); + +protected: + /** + * @brief Convert ios open mode int to mode string used by zlib. + * @return True if valid mode flag combination. + */ + bool + open_mode(std::ios_base::openmode mode, + char* c_mode) const; + + /** + * @brief Number of characters available in stream buffer. + * @return Number of characters. + * + * This indicates number of characters in get area of stream buffer. + * These characters can be read without accessing the gzipped file. + */ + virtual std::streamsize + showmanyc(); + + /** + * @brief Fill get area from gzipped file. + * @return First character in get area on success, EOF on error. + * + * This actually reads characters from gzipped file to stream + * buffer. Always buffered. + */ + virtual int_type + underflow(); + + /** + * @brief Write put area to gzipped file. + * @param c Extra character to add to buffer contents. + * @return Non-EOF on success, EOF on error. + * + * This actually writes characters in stream buffer to + * gzipped file. With unbuffered output this is done one + * character at a time. + */ + virtual int_type + overflow(int_type c = traits_type::eof()); + + /** + * @brief Installs external stream buffer. + * @param p Pointer to char buffer. + * @param n Size of external buffer. + * @return @c this on success, NULL on failure. + * + * Call setbuf(0,0) to enable unbuffered output. + */ + virtual std::streambuf* + setbuf(char_type* p, + std::streamsize n); + + /** + * @brief Flush stream buffer to file. + * @return 0 on success, -1 on error. + * + * This calls underflow(EOF) to do the job. + */ + virtual int + sync(); + +// +// Some future enhancements +// +// virtual int_type uflow(); +// virtual int_type pbackfail(int_type c = traits_type::eof()); +// virtual pos_type +// seekoff(off_type off, +// std::ios_base::seekdir way, +// std::ios_base::openmode mode = std::ios_base::in|std::ios_base::out); +// virtual pos_type +// seekpos(pos_type sp, +// std::ios_base::openmode mode = std::ios_base::in|std::ios_base::out); + +private: + /** + * @brief Allocate internal buffer. + * + * This function is safe to call multiple times. It will ensure + * that a proper internal buffer exists if it is required. If the + * buffer already exists or is external, the buffer pointers will be + * reset to their original state. + */ + void + enable_buffer(); + + /** + * @brief Destroy internal buffer. + * + * This function is safe to call multiple times. It will ensure + * that the internal buffer is deallocated if it exists. In any + * case, it will also reset the buffer pointers. + */ + void + disable_buffer(); + + /** + * Underlying file pointer. + */ + gzFile file; + + /** + * Mode in which file was opened. + */ + std::ios_base::openmode io_mode; + + /** + * @brief True if this object owns file descriptor. + * + * This makes the class responsible for closing the file + * upon destruction. + */ + bool own_fd; + + /** + * @brief Stream buffer. + * + * For simplicity this remains allocated on the free store for the + * entire life span of the gzfilebuf object, unless replaced by setbuf. + */ + char_type* buffer; + + /** + * @brief Stream buffer size. + * + * Defaults to system default buffer size (typically 8192 bytes). + * Modified by setbuf. + */ + std::streamsize buffer_size; + + /** + * @brief True if this object owns stream buffer. + * + * This makes the class responsible for deleting the buffer + * upon destruction. + */ + bool own_buffer; +}; + +/*****************************************************************************/ + +/** + * @brief Gzipped file input stream class. + * + * This class implements ifstream for gzipped files. Seeking and putback + * is not supported yet. +*/ +class gzifstream : public std::istream +{ +public: + // Default constructor + gzifstream(); + + /** + * @brief Construct stream on gzipped file to be opened. + * @param name File name. + * @param mode Open mode flags (forced to contain ios::in). + */ + explicit + gzifstream(const char* name, + std::ios_base::openmode mode = std::ios_base::in); + + /** + * @brief Construct stream on already open gzipped file. + * @param fd File descriptor. + * @param mode Open mode flags (forced to contain ios::in). + */ + explicit + gzifstream(int fd, + std::ios_base::openmode mode = std::ios_base::in); + + /** + * Obtain underlying stream buffer. + */ + gzfilebuf* + rdbuf() const + { return const_cast<gzfilebuf*>(&sb); } + + /** + * @brief Check if file is open. + * @return True if file is open. + */ + bool + is_open() { return sb.is_open(); } + + /** + * @brief Open gzipped file. + * @param name File name. + * @param mode Open mode flags (forced to contain ios::in). + * + * Stream will be in state good() if file opens successfully; + * otherwise in state fail(). This differs from the behavior of + * ifstream, which never sets the state to good() and therefore + * won't allow you to reuse the stream for a second file unless + * you manually clear() the state. The choice is a matter of + * convenience. + */ + void + open(const char* name, + std::ios_base::openmode mode = std::ios_base::in); + + /** + * @brief Attach to already open gzipped file. + * @param fd File descriptor. + * @param mode Open mode flags (forced to contain ios::in). + * + * Stream will be in state good() if attach succeeded; otherwise + * in state fail(). + */ + void + attach(int fd, + std::ios_base::openmode mode = std::ios_base::in); + + /** + * @brief Close gzipped file. + * + * Stream will be in state fail() if close failed. + */ + void + close(); + +private: + /** + * Underlying stream buffer. + */ + gzfilebuf sb; +}; + +/*****************************************************************************/ + +/** + * @brief Gzipped file output stream class. + * + * This class implements ofstream for gzipped files. Seeking and putback + * is not supported yet. +*/ +class gzofstream : public std::ostream +{ +public: + // Default constructor + gzofstream(); + + /** + * @brief Construct stream on gzipped file to be opened. + * @param name File name. + * @param mode Open mode flags (forced to contain ios::out). + */ + explicit + gzofstream(const char* name, + std::ios_base::openmode mode = std::ios_base::out); + + /** + * @brief Construct stream on already open gzipped file. + * @param fd File descriptor. + * @param mode Open mode flags (forced to contain ios::out). + */ + explicit + gzofstream(int fd, + std::ios_base::openmode mode = std::ios_base::out); + + /** + * Obtain underlying stream buffer. + */ + gzfilebuf* + rdbuf() const + { return const_cast<gzfilebuf*>(&sb); } + + /** + * @brief Check if file is open. + * @return True if file is open. + */ + bool + is_open() { return sb.is_open(); } + + /** + * @brief Open gzipped file. + * @param name File name. + * @param mode Open mode flags (forced to contain ios::out). + * + * Stream will be in state good() if file opens successfully; + * otherwise in state fail(). This differs from the behavior of + * ofstream, which never sets the state to good() and therefore + * won't allow you to reuse the stream for a second file unless + * you manually clear() the state. The choice is a matter of + * convenience. + */ + void + open(const char* name, + std::ios_base::openmode mode = std::ios_base::out); + + /** + * @brief Attach to already open gzipped file. + * @param fd File descriptor. + * @param mode Open mode flags (forced to contain ios::out). + * + * Stream will be in state good() if attach succeeded; otherwise + * in state fail(). + */ + void + attach(int fd, + std::ios_base::openmode mode = std::ios_base::out); + + /** + * @brief Close gzipped file. + * + * Stream will be in state fail() if close failed. + */ + void + close(); + +private: + /** + * Underlying stream buffer. + */ + gzfilebuf sb; +}; + +/*****************************************************************************/ + +/** + * @brief Gzipped file output stream manipulator class. + * + * This class defines a two-argument manipulator for gzofstream. It is used + * as base for the setcompression(int,int) manipulator. +*/ +template<typename T1, typename T2> + class gzomanip2 + { + public: + // Allows insertor to peek at internals + template <typename Ta, typename Tb> + friend gzofstream& + operator<<(gzofstream&, + const gzomanip2<Ta,Tb>&); + + // Constructor + gzomanip2(gzofstream& (*f)(gzofstream&, T1, T2), + T1 v1, + T2 v2); + private: + // Underlying manipulator function + gzofstream& + (*func)(gzofstream&, T1, T2); + + // Arguments for manipulator function + T1 val1; + T2 val2; + }; + +/*****************************************************************************/ + +// Manipulator function thunks through to stream buffer +inline gzofstream& +setcompression(gzofstream &gzs, int l, int s = Z_DEFAULT_STRATEGY) +{ + (gzs.rdbuf())->setcompression(l, s); + return gzs; +} + +// Manipulator constructor stores arguments +template<typename T1, typename T2> + inline + gzomanip2<T1,T2>::gzomanip2(gzofstream &(*f)(gzofstream &, T1, T2), + T1 v1, + T2 v2) + : func(f), val1(v1), val2(v2) + { } + +// Insertor applies underlying manipulator function to stream +template<typename T1, typename T2> + inline gzofstream& + operator<<(gzofstream& s, const gzomanip2<T1,T2>& m) + { return (*m.func)(s, m.val1, m.val2); } + +// Insert this onto stream to simplify setting of compression level +inline gzomanip2<int,int> +setcompression(int l, int s = Z_DEFAULT_STRATEGY) +{ return gzomanip2<int,int>(&setcompression, l, s); } + +#endif // ZFSTREAM_H diff -Nru nodejs-0.11.13/deps/zlib/contrib/masmx64/bld_ml64.bat nodejs-0.11.15/deps/zlib/contrib/masmx64/bld_ml64.bat --- nodejs-0.11.13/deps/zlib/contrib/masmx64/bld_ml64.bat 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/masmx64/bld_ml64.bat 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2 @@ +ml64.exe /Flinffasx64 /c /Zi inffasx64.asm +ml64.exe /Flgvmat64 /c /Zi gvmat64.asm diff -Nru nodejs-0.11.13/deps/zlib/contrib/masmx64/gvmat64.asm nodejs-0.11.15/deps/zlib/contrib/masmx64/gvmat64.asm --- nodejs-0.11.13/deps/zlib/contrib/masmx64/gvmat64.asm 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/masmx64/gvmat64.asm 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,553 @@ +;uInt longest_match_x64( +; deflate_state *s, +; IPos cur_match); /* current match */ + +; gvmat64.asm -- Asm portion of the optimized longest_match for 32 bits x86_64 +; (AMD64 on Athlon 64, Opteron, Phenom +; and Intel EM64T on Pentium 4 with EM64T, Pentium D, Core 2 Duo, Core I5/I7) +; Copyright (C) 1995-2010 Jean-loup Gailly, Brian Raiter and Gilles Vollant. +; +; File written by Gilles Vollant, by converting to assembly the longest_match +; from Jean-loup Gailly in deflate.c of zLib and infoZip zip. +; +; and by taking inspiration on asm686 with masm, optimised assembly code +; from Brian Raiter, written 1998 +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software +; 3. This notice may not be removed or altered from any source distribution. +; +; +; +; http://www.zlib.net +; http://www.winimage.com/zLibDll +; http://www.muppetlabs.com/~breadbox/software/assembly.html +; +; to compile this file for infozip Zip, I use option: +; ml64.exe /Flgvmat64 /c /Zi /DINFOZIP gvmat64.asm +; +; to compile this file for zLib, I use option: +; ml64.exe /Flgvmat64 /c /Zi gvmat64.asm +; Be carrefull to adapt zlib1222add below to your version of zLib +; (if you use a version of zLib before 1.0.4 or after 1.2.2.2, change +; value of zlib1222add later) +; +; This file compile with Microsoft Macro Assembler (x64) for AMD64 +; +; ml64.exe is given with Visual Studio 2005/2008/2010 and Windows WDK +; +; (you can get Windows WDK with ml64 for AMD64 from +; http://www.microsoft.com/whdc/Devtools/wdk/default.mspx for low price) +; + + +;uInt longest_match(s, cur_match) +; deflate_state *s; +; IPos cur_match; /* current match */ +.code +longest_match PROC + + +;LocalVarsSize equ 88 + LocalVarsSize equ 72 + +; register used : rax,rbx,rcx,rdx,rsi,rdi,r8,r9,r10,r11,r12 +; free register : r14,r15 +; register can be saved : rsp + + chainlenwmask equ rsp + 8 - LocalVarsSize ; high word: current chain len + ; low word: s->wmask +;window equ rsp + xx - LocalVarsSize ; local copy of s->window ; stored in r10 +;windowbestlen equ rsp + xx - LocalVarsSize ; s->window + bestlen , use r10+r11 +;scanstart equ rsp + xx - LocalVarsSize ; first two bytes of string ; stored in r12w +;scanend equ rsp + xx - LocalVarsSize ; last two bytes of string use ebx +;scanalign equ rsp + xx - LocalVarsSize ; dword-misalignment of string r13 +;bestlen equ rsp + xx - LocalVarsSize ; size of best match so far -> r11d +;scan equ rsp + xx - LocalVarsSize ; ptr to string wanting match -> r9 +IFDEF INFOZIP +ELSE + nicematch equ (rsp + 16 - LocalVarsSize) ; a good enough match size +ENDIF + +save_rdi equ rsp + 24 - LocalVarsSize +save_rsi equ rsp + 32 - LocalVarsSize +save_rbx equ rsp + 40 - LocalVarsSize +save_rbp equ rsp + 48 - LocalVarsSize +save_r12 equ rsp + 56 - LocalVarsSize +save_r13 equ rsp + 64 - LocalVarsSize +;save_r14 equ rsp + 72 - LocalVarsSize +;save_r15 equ rsp + 80 - LocalVarsSize + + +; summary of register usage +; scanend ebx +; scanendw bx +; chainlenwmask edx +; curmatch rsi +; curmatchd esi +; windowbestlen r8 +; scanalign r9 +; scanalignd r9d +; window r10 +; bestlen r11 +; bestlend r11d +; scanstart r12d +; scanstartw r12w +; scan r13 +; nicematch r14d +; limit r15 +; limitd r15d +; prev rcx + +; all the +4 offsets are due to the addition of pending_buf_size (in zlib +; in the deflate_state structure since the asm code was first written +; (if you compile with zlib 1.0.4 or older, remove the +4). +; Note : these value are good with a 8 bytes boundary pack structure + + + MAX_MATCH equ 258 + MIN_MATCH equ 3 + MIN_LOOKAHEAD equ (MAX_MATCH+MIN_MATCH+1) + + +;;; Offsets for fields in the deflate_state structure. These numbers +;;; are calculated from the definition of deflate_state, with the +;;; assumption that the compiler will dword-align the fields. (Thus, +;;; changing the definition of deflate_state could easily cause this +;;; program to crash horribly, without so much as a warning at +;;; compile time. Sigh.) + +; all the +zlib1222add offsets are due to the addition of fields +; in zlib in the deflate_state structure since the asm code was first written +; (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)"). +; (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0"). +; if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8"). + + +IFDEF INFOZIP + +_DATA SEGMENT +COMM window_size:DWORD +; WMask ; 7fff +COMM window:BYTE:010040H +COMM prev:WORD:08000H +; MatchLen : unused +; PrevMatch : unused +COMM strstart:DWORD +COMM match_start:DWORD +; Lookahead : ignore +COMM prev_length:DWORD ; PrevLen +COMM max_chain_length:DWORD +COMM good_match:DWORD +COMM nice_match:DWORD +prev_ad equ OFFSET prev +window_ad equ OFFSET window +nicematch equ nice_match +_DATA ENDS +WMask equ 07fffh + +ELSE + + IFNDEF zlib1222add + zlib1222add equ 8 + ENDIF +dsWSize equ 56+zlib1222add+(zlib1222add/2) +dsWMask equ 64+zlib1222add+(zlib1222add/2) +dsWindow equ 72+zlib1222add +dsPrev equ 88+zlib1222add +dsMatchLen equ 128+zlib1222add +dsPrevMatch equ 132+zlib1222add +dsStrStart equ 140+zlib1222add +dsMatchStart equ 144+zlib1222add +dsLookahead equ 148+zlib1222add +dsPrevLen equ 152+zlib1222add +dsMaxChainLen equ 156+zlib1222add +dsGoodMatch equ 172+zlib1222add +dsNiceMatch equ 176+zlib1222add + +window_size equ [ rcx + dsWSize] +WMask equ [ rcx + dsWMask] +window_ad equ [ rcx + dsWindow] +prev_ad equ [ rcx + dsPrev] +strstart equ [ rcx + dsStrStart] +match_start equ [ rcx + dsMatchStart] +Lookahead equ [ rcx + dsLookahead] ; 0ffffffffh on infozip +prev_length equ [ rcx + dsPrevLen] +max_chain_length equ [ rcx + dsMaxChainLen] +good_match equ [ rcx + dsGoodMatch] +nice_match equ [ rcx + dsNiceMatch] +ENDIF + +; parameter 1 in r8(deflate state s), param 2 in rdx (cur match) + +; see http://weblogs.asp.net/oldnewthing/archive/2004/01/14/58579.aspx and +; http://msdn.microsoft.com/library/en-us/kmarch/hh/kmarch/64bitAMD_8e951dd2-ee77-4728-8702-55ce4b5dd24a.xml.asp +; +; All registers must be preserved across the call, except for +; rax, rcx, rdx, r8, r9, r10, and r11, which are scratch. + + + +;;; Save registers that the compiler may be using, and adjust esp to +;;; make room for our stack frame. + + +;;; Retrieve the function arguments. r8d will hold cur_match +;;; throughout the entire function. edx will hold the pointer to the +;;; deflate_state structure during the function's setup (before +;;; entering the main loop. + +; parameter 1 in rcx (deflate_state* s), param 2 in edx -> r8 (cur match) + +; this clear high 32 bits of r8, which can be garbage in both r8 and rdx + + mov [save_rdi],rdi + mov [save_rsi],rsi + mov [save_rbx],rbx + mov [save_rbp],rbp +IFDEF INFOZIP + mov r8d,ecx +ELSE + mov r8d,edx +ENDIF + mov [save_r12],r12 + mov [save_r13],r13 +; mov [save_r14],r14 +; mov [save_r15],r15 + + +;;; uInt wmask = s->w_mask; +;;; unsigned chain_length = s->max_chain_length; +;;; if (s->prev_length >= s->good_match) { +;;; chain_length >>= 2; +;;; } + + mov edi, prev_length + mov esi, good_match + mov eax, WMask + mov ebx, max_chain_length + cmp edi, esi + jl LastMatchGood + shr ebx, 2 +LastMatchGood: + +;;; chainlen is decremented once beforehand so that the function can +;;; use the sign flag instead of the zero flag for the exit test. +;;; It is then shifted into the high word, to make room for the wmask +;;; value, which it will always accompany. + + dec ebx + shl ebx, 16 + or ebx, eax + +;;; on zlib only +;;; if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; + +IFDEF INFOZIP + mov [chainlenwmask], ebx +; on infozip nice_match = [nice_match] +ELSE + mov eax, nice_match + mov [chainlenwmask], ebx + mov r10d, Lookahead + cmp r10d, eax + cmovnl r10d, eax + mov [nicematch],r10d +ENDIF + +;;; register Bytef *scan = s->window + s->strstart; + mov r10, window_ad + mov ebp, strstart + lea r13, [r10 + rbp] + +;;; Determine how many bytes the scan ptr is off from being +;;; dword-aligned. + + mov r9,r13 + neg r13 + and r13,3 + +;;; IPos limit = s->strstart > (IPos)MAX_DIST(s) ? +;;; s->strstart - (IPos)MAX_DIST(s) : NIL; +IFDEF INFOZIP + mov eax,07efah ; MAX_DIST = (WSIZE-MIN_LOOKAHEAD) (0x8000-(3+8+1)) +ELSE + mov eax, window_size + sub eax, MIN_LOOKAHEAD +ENDIF + xor edi,edi + sub ebp, eax + + mov r11d, prev_length + + cmovng ebp,edi + +;;; int best_len = s->prev_length; + + +;;; Store the sum of s->window + best_len in esi locally, and in esi. + + lea rsi,[r10+r11] + +;;; register ush scan_start = *(ushf*)scan; +;;; register ush scan_end = *(ushf*)(scan+best_len-1); +;;; Posf *prev = s->prev; + + movzx r12d,word ptr [r9] + movzx ebx, word ptr [r9 + r11 - 1] + + mov rdi, prev_ad + +;;; Jump into the main loop. + + mov edx, [chainlenwmask] + + cmp bx,word ptr [rsi + r8 - 1] + jz LookupLoopIsZero + +LookupLoop1: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + jbe LeaveNow + sub edx, 00010000h + js LeaveNow + +LoopEntry1: + cmp bx,word ptr [rsi + r8 - 1] + jz LookupLoopIsZero + +LookupLoop2: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + jbe LeaveNow + sub edx, 00010000h + js LeaveNow + +LoopEntry2: + cmp bx,word ptr [rsi + r8 - 1] + jz LookupLoopIsZero + +LookupLoop4: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + jbe LeaveNow + sub edx, 00010000h + js LeaveNow + +LoopEntry4: + + cmp bx,word ptr [rsi + r8 - 1] + jnz LookupLoop1 + jmp LookupLoopIsZero + + +;;; do { +;;; match = s->window + cur_match; +;;; if (*(ushf*)(match+best_len-1) != scan_end || +;;; *(ushf*)match != scan_start) continue; +;;; [...] +;;; } while ((cur_match = prev[cur_match & wmask]) > limit +;;; && --chain_length != 0); +;;; +;;; Here is the inner loop of the function. The function will spend the +;;; majority of its time in this loop, and majority of that time will +;;; be spent in the first ten instructions. +;;; +;;; Within this loop: +;;; ebx = scanend +;;; r8d = curmatch +;;; edx = chainlenwmask - i.e., ((chainlen << 16) | wmask) +;;; esi = windowbestlen - i.e., (window + bestlen) +;;; edi = prev +;;; ebp = limit + +LookupLoop: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + jbe LeaveNow + sub edx, 00010000h + js LeaveNow + +LoopEntry: + + cmp bx,word ptr [rsi + r8 - 1] + jnz LookupLoop1 +LookupLoopIsZero: + cmp r12w, word ptr [r10 + r8] + jnz LookupLoop1 + + +;;; Store the current value of chainlen. + mov [chainlenwmask], edx + +;;; Point edi to the string under scrutiny, and esi to the string we +;;; are hoping to match it up with. In actuality, esi and edi are +;;; both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and edx is +;;; initialized to -(MAX_MATCH_8 - scanalign). + + lea rsi,[r8+r10] + mov rdx, 0fffffffffffffef8h; -(MAX_MATCH_8) + lea rsi, [rsi + r13 + 0108h] ;MAX_MATCH_8] + lea rdi, [r9 + r13 + 0108h] ;MAX_MATCH_8] + + prefetcht1 [rsi+rdx] + prefetcht1 [rdi+rdx] + + +;;; Test the strings for equality, 8 bytes at a time. At the end, +;;; adjust rdx so that it is offset to the exact byte that mismatched. +;;; +;;; We already know at this point that the first three bytes of the +;;; strings match each other, and they can be safely passed over before +;;; starting the compare loop. So what this code does is skip over 0-3 +;;; bytes, as much as necessary in order to dword-align the edi +;;; pointer. (rsi will still be misaligned three times out of four.) +;;; +;;; It should be confessed that this loop usually does not represent +;;; much of the total running time. Replacing it with a more +;;; straightforward "rep cmpsb" would not drastically degrade +;;; performance. + + +LoopCmps: + mov rax, [rsi + rdx] + xor rax, [rdi + rdx] + jnz LeaveLoopCmps + + mov rax, [rsi + rdx + 8] + xor rax, [rdi + rdx + 8] + jnz LeaveLoopCmps8 + + + mov rax, [rsi + rdx + 8+8] + xor rax, [rdi + rdx + 8+8] + jnz LeaveLoopCmps16 + + add rdx,8+8+8 + + jnz short LoopCmps + jmp short LenMaximum +LeaveLoopCmps16: add rdx,8 +LeaveLoopCmps8: add rdx,8 +LeaveLoopCmps: + + test eax, 0000FFFFh + jnz LenLower + + test eax,0ffffffffh + + jnz LenLower32 + + add rdx,4 + shr rax,32 + or ax,ax + jnz LenLower + +LenLower32: + shr eax,16 + add rdx,2 +LenLower: sub al, 1 + adc rdx, 0 +;;; Calculate the length of the match. If it is longer than MAX_MATCH, +;;; then automatically accept it as the best possible match and leave. + + lea rax, [rdi + rdx] + sub rax, r9 + cmp eax, MAX_MATCH + jge LenMaximum + +;;; If the length of the match is not longer than the best match we +;;; have so far, then forget it and return to the lookup loop. +;/////////////////////////////////// + + cmp eax, r11d + jg LongerMatch + + lea rsi,[r10+r11] + + mov rdi, prev_ad + mov edx, [chainlenwmask] + jmp LookupLoop + +;;; s->match_start = cur_match; +;;; best_len = len; +;;; if (len >= nice_match) break; +;;; scan_end = *(ushf*)(scan+best_len-1); + +LongerMatch: + mov r11d, eax + mov match_start, r8d + cmp eax, [nicematch] + jge LeaveNow + + lea rsi,[r10+rax] + + movzx ebx, word ptr [r9 + rax - 1] + mov rdi, prev_ad + mov edx, [chainlenwmask] + jmp LookupLoop + +;;; Accept the current string, with the maximum possible length. + +LenMaximum: + mov r11d,MAX_MATCH + mov match_start, r8d + +;;; if ((uInt)best_len <= s->lookahead) return (uInt)best_len; +;;; return s->lookahead; + +LeaveNow: +IFDEF INFOZIP + mov eax,r11d +ELSE + mov eax, Lookahead + cmp r11d, eax + cmovng eax, r11d +ENDIF + +;;; Restore the stack and return from whence we came. + + + mov rsi,[save_rsi] + mov rdi,[save_rdi] + mov rbx,[save_rbx] + mov rbp,[save_rbp] + mov r12,[save_r12] + mov r13,[save_r13] +; mov r14,[save_r14] +; mov r15,[save_r15] + + + ret 0 +; please don't remove this string ! +; Your can freely use gvmat64 in any free or commercial app +; but it is far better don't remove the string in the binary! + db 0dh,0ah,"asm686 with masm, optimised assembly code from Brian Raiter, written 1998, converted to amd 64 by Gilles Vollant 2005",0dh,0ah,0 +longest_match ENDP + +match_init PROC + ret 0 +match_init ENDP + + +END diff -Nru nodejs-0.11.13/deps/zlib/contrib/masmx64/inffas8664.c nodejs-0.11.15/deps/zlib/contrib/masmx64/inffas8664.c --- nodejs-0.11.13/deps/zlib/contrib/masmx64/inffas8664.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/masmx64/inffas8664.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,186 @@ +/* inffas8664.c is a hand tuned assembler version of inffast.c - fast decoding + * version for AMD64 on Windows using Microsoft C compiler + * + * Copyright (C) 1995-2003 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + * + * Copyright (C) 2003 Chris Anderson <christop@charm.net> + * Please use the copyright conditions above. + * + * 2005 - Adaptation to Microsoft C Compiler for AMD64 by Gilles Vollant + * + * inffas8664.c call function inffas8664fnc in inffasx64.asm + * inffasx64.asm is automatically convert from AMD64 portion of inffas86.c + * + * Dec-29-2003 -- I added AMD64 inflate asm support. This version is also + * slightly quicker on x86 systems because, instead of using rep movsb to copy + * data, it uses rep movsw, which moves data in 2-byte chunks instead of single + * bytes. I've tested the AMD64 code on a Fedora Core 1 + the x86_64 updates + * from http://fedora.linux.duke.edu/fc1_x86_64 + * which is running on an Athlon 64 3000+ / Gigabyte GA-K8VT800M system with + * 1GB ram. The 64-bit version is about 4% faster than the 32-bit version, + * when decompressing mozilla-source-1.3.tar.gz. + * + * Mar-13-2003 -- Most of this is derived from inffast.S which is derived from + * the gcc -S output of zlib-1.2.0/inffast.c. Zlib-1.2.0 is in beta release at + * the moment. I have successfully compiled and tested this code with gcc2.96, + * gcc3.2, icc5.0, msvc6.0. It is very close to the speed of inffast.S + * compiled with gcc -DNO_MMX, but inffast.S is still faster on the P3 with MMX + * enabled. I will attempt to merge the MMX code into this version. Newer + * versions of this and inffast.S can be found at + * http://www.eetbeetee.com/zlib/ and http://www.charm.net/~christop/zlib/ + * + */ + +#include <stdio.h> +#include "zutil.h" +#include "inftrees.h" +#include "inflate.h" +#include "inffast.h" + +/* Mark Adler's comments from inffast.c: */ + +/* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is + available, an end-of-block is encountered, or a data error is encountered. + When large enough input and output buffers are supplied to inflate(), for + example, a 16K input buffer and a 64K output buffer, more than 95% of the + inflate execution time is spent in this routine. + + Entry assumptions: + + state->mode == LEN + strm->avail_in >= 6 + strm->avail_out >= 258 + start >= strm->avail_out + state->bits < 8 + + On return, state->mode is one of: + + LEN -- ran out of enough output space or enough available input + TYPE -- reached end of block code, inflate() to interpret next block + BAD -- error in block data + + Notes: + + - The maximum input bits used by a length/distance pair is 15 bits for the + length code, 5 bits for the length extra, 15 bits for the distance code, + and 13 bits for the distance extra. This totals 48 bits, or six bytes. + Therefore if strm->avail_in >= 6, then there is enough input to avoid + checking for available input while decoding. + + - The maximum bytes that a single length/distance pair can output is 258 + bytes, which is the maximum length that can be coded. inflate_fast() + requires strm->avail_out >= 258 for each loop to avoid checking for + output space. + */ + + + + typedef struct inffast_ar { +/* 64 32 x86 x86_64 */ +/* ar offset register */ +/* 0 0 */ void *esp; /* esp save */ +/* 8 4 */ void *ebp; /* ebp save */ +/* 16 8 */ unsigned char FAR *in; /* esi rsi local strm->next_in */ +/* 24 12 */ unsigned char FAR *last; /* r9 while in < last */ +/* 32 16 */ unsigned char FAR *out; /* edi rdi local strm->next_out */ +/* 40 20 */ unsigned char FAR *beg; /* inflate()'s init next_out */ +/* 48 24 */ unsigned char FAR *end; /* r10 while out < end */ +/* 56 28 */ unsigned char FAR *window;/* size of window, wsize!=0 */ +/* 64 32 */ code const FAR *lcode; /* ebp rbp local strm->lencode */ +/* 72 36 */ code const FAR *dcode; /* r11 local strm->distcode */ +/* 80 40 */ size_t /*unsigned long */hold; /* edx rdx local strm->hold */ +/* 88 44 */ unsigned bits; /* ebx rbx local strm->bits */ +/* 92 48 */ unsigned wsize; /* window size */ +/* 96 52 */ unsigned write; /* window write index */ +/*100 56 */ unsigned lmask; /* r12 mask for lcode */ +/*104 60 */ unsigned dmask; /* r13 mask for dcode */ +/*108 64 */ unsigned len; /* r14 match length */ +/*112 68 */ unsigned dist; /* r15 match distance */ +/*116 72 */ unsigned status; /* set when state chng*/ + } type_ar; +#ifdef ASMINF + +void inflate_fast(strm, start) +z_streamp strm; +unsigned start; /* inflate()'s starting value for strm->avail_out */ +{ + struct inflate_state FAR *state; + type_ar ar; + void inffas8664fnc(struct inffast_ar * par); + + + +#if (defined( __GNUC__ ) && defined( __amd64__ ) && ! defined( __i386 )) || (defined(_MSC_VER) && defined(_M_AMD64)) +#define PAD_AVAIL_IN 6 +#define PAD_AVAIL_OUT 258 +#else +#define PAD_AVAIL_IN 5 +#define PAD_AVAIL_OUT 257 +#endif + + /* copy state to local variables */ + state = (struct inflate_state FAR *)strm->state; + + ar.in = strm->next_in; + ar.last = ar.in + (strm->avail_in - PAD_AVAIL_IN); + ar.out = strm->next_out; + ar.beg = ar.out - (start - strm->avail_out); + ar.end = ar.out + (strm->avail_out - PAD_AVAIL_OUT); + ar.wsize = state->wsize; + ar.write = state->wnext; + ar.window = state->window; + ar.hold = state->hold; + ar.bits = state->bits; + ar.lcode = state->lencode; + ar.dcode = state->distcode; + ar.lmask = (1U << state->lenbits) - 1; + ar.dmask = (1U << state->distbits) - 1; + + /* decode literals and length/distances until end-of-block or not enough + input data or output space */ + + /* align in on 1/2 hold size boundary */ + while (((size_t)(void *)ar.in & (sizeof(ar.hold) / 2 - 1)) != 0) { + ar.hold += (unsigned long)*ar.in++ << ar.bits; + ar.bits += 8; + } + + inffas8664fnc(&ar); + + if (ar.status > 1) { + if (ar.status == 2) + strm->msg = "invalid literal/length code"; + else if (ar.status == 3) + strm->msg = "invalid distance code"; + else + strm->msg = "invalid distance too far back"; + state->mode = BAD; + } + else if ( ar.status == 1 ) { + state->mode = TYPE; + } + + /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ + ar.len = ar.bits >> 3; + ar.in -= ar.len; + ar.bits -= ar.len << 3; + ar.hold &= (1U << ar.bits) - 1; + + /* update state and return */ + strm->next_in = ar.in; + strm->next_out = ar.out; + strm->avail_in = (unsigned)(ar.in < ar.last ? + PAD_AVAIL_IN + (ar.last - ar.in) : + PAD_AVAIL_IN - (ar.in - ar.last)); + strm->avail_out = (unsigned)(ar.out < ar.end ? + PAD_AVAIL_OUT + (ar.end - ar.out) : + PAD_AVAIL_OUT - (ar.out - ar.end)); + state->hold = (unsigned long)ar.hold; + state->bits = ar.bits; + return; +} + +#endif diff -Nru nodejs-0.11.13/deps/zlib/contrib/masmx64/inffasx64.asm nodejs-0.11.15/deps/zlib/contrib/masmx64/inffasx64.asm --- nodejs-0.11.13/deps/zlib/contrib/masmx64/inffasx64.asm 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/masmx64/inffasx64.asm 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,396 @@ +; inffasx64.asm is a hand tuned assembler version of inffast.c - fast decoding +; version for AMD64 on Windows using Microsoft C compiler +; +; inffasx64.asm is automatically convert from AMD64 portion of inffas86.c +; inffasx64.asm is called by inffas8664.c, which contain more info. + + +; to compile this file, I use option +; ml64.exe /Flinffasx64 /c /Zi inffasx64.asm +; with Microsoft Macro Assembler (x64) for AMD64 +; + +; This file compile with Microsoft Macro Assembler (x64) for AMD64 +; +; ml64.exe is given with Visual Studio 2005/2008/2010 and Windows WDK +; +; (you can get Windows WDK with ml64 for AMD64 from +; http://www.microsoft.com/whdc/Devtools/wdk/default.mspx for low price) +; + + +.code +inffas8664fnc PROC + +; see http://weblogs.asp.net/oldnewthing/archive/2004/01/14/58579.aspx and +; http://msdn.microsoft.com/library/en-us/kmarch/hh/kmarch/64bitAMD_8e951dd2-ee77-4728-8702-55ce4b5dd24a.xml.asp +; +; All registers must be preserved across the call, except for +; rax, rcx, rdx, r8, r-9, r10, and r11, which are scratch. + + + mov [rsp-8],rsi + mov [rsp-16],rdi + mov [rsp-24],r12 + mov [rsp-32],r13 + mov [rsp-40],r14 + mov [rsp-48],r15 + mov [rsp-56],rbx + + mov rax,rcx + + mov [rax+8], rbp ; /* save regs rbp and rsp */ + mov [rax], rsp + + mov rsp, rax ; /* make rsp point to &ar */ + + mov rsi, [rsp+16] ; /* rsi = in */ + mov rdi, [rsp+32] ; /* rdi = out */ + mov r9, [rsp+24] ; /* r9 = last */ + mov r10, [rsp+48] ; /* r10 = end */ + mov rbp, [rsp+64] ; /* rbp = lcode */ + mov r11, [rsp+72] ; /* r11 = dcode */ + mov rdx, [rsp+80] ; /* rdx = hold */ + mov ebx, [rsp+88] ; /* ebx = bits */ + mov r12d, [rsp+100] ; /* r12d = lmask */ + mov r13d, [rsp+104] ; /* r13d = dmask */ + ; /* r14d = len */ + ; /* r15d = dist */ + + + cld + cmp r10, rdi + je L_one_time ; /* if only one decode left */ + cmp r9, rsi + + jne L_do_loop + + +L_one_time: + mov r8, r12 ; /* r8 = lmask */ + cmp bl, 32 + ja L_get_length_code_one_time + + lodsd ; /* eax = *(uint *)in++ */ + mov cl, bl ; /* cl = bits, needs it for shifting */ + add bl, 32 ; /* bits += 32 */ + shl rax, cl + or rdx, rax ; /* hold |= *((uint *)in)++ << bits */ + jmp L_get_length_code_one_time + +ALIGN 4 +L_while_test: + cmp r10, rdi + jbe L_break_loop + cmp r9, rsi + jbe L_break_loop + +L_do_loop: + mov r8, r12 ; /* r8 = lmask */ + cmp bl, 32 + ja L_get_length_code ; /* if (32 < bits) */ + + lodsd ; /* eax = *(uint *)in++ */ + mov cl, bl ; /* cl = bits, needs it for shifting */ + add bl, 32 ; /* bits += 32 */ + shl rax, cl + or rdx, rax ; /* hold |= *((uint *)in)++ << bits */ + +L_get_length_code: + and r8, rdx ; /* r8 &= hold */ + mov eax, [rbp+r8*4] ; /* eax = lcode[hold & lmask] */ + + mov cl, ah ; /* cl = this.bits */ + sub bl, ah ; /* bits -= this.bits */ + shr rdx, cl ; /* hold >>= this.bits */ + + test al, al + jnz L_test_for_length_base ; /* if (op != 0) 45.7% */ + + mov r8, r12 ; /* r8 = lmask */ + shr eax, 16 ; /* output this.val char */ + stosb + +L_get_length_code_one_time: + and r8, rdx ; /* r8 &= hold */ + mov eax, [rbp+r8*4] ; /* eax = lcode[hold & lmask] */ + +L_dolen: + mov cl, ah ; /* cl = this.bits */ + sub bl, ah ; /* bits -= this.bits */ + shr rdx, cl ; /* hold >>= this.bits */ + + test al, al + jnz L_test_for_length_base ; /* if (op != 0) 45.7% */ + + shr eax, 16 ; /* output this.val char */ + stosb + jmp L_while_test + +ALIGN 4 +L_test_for_length_base: + mov r14d, eax ; /* len = this */ + shr r14d, 16 ; /* len = this.val */ + mov cl, al + + test al, 16 + jz L_test_for_second_level_length ; /* if ((op & 16) == 0) 8% */ + and cl, 15 ; /* op &= 15 */ + jz L_decode_distance ; /* if (!op) */ + +L_add_bits_to_len: + sub bl, cl + xor eax, eax + inc eax + shl eax, cl + dec eax + and eax, edx ; /* eax &= hold */ + shr rdx, cl + add r14d, eax ; /* len += hold & mask[op] */ + +L_decode_distance: + mov r8, r13 ; /* r8 = dmask */ + cmp bl, 32 + ja L_get_distance_code ; /* if (32 < bits) */ + + lodsd ; /* eax = *(uint *)in++ */ + mov cl, bl ; /* cl = bits, needs it for shifting */ + add bl, 32 ; /* bits += 32 */ + shl rax, cl + or rdx, rax ; /* hold |= *((uint *)in)++ << bits */ + +L_get_distance_code: + and r8, rdx ; /* r8 &= hold */ + mov eax, [r11+r8*4] ; /* eax = dcode[hold & dmask] */ + +L_dodist: + mov r15d, eax ; /* dist = this */ + shr r15d, 16 ; /* dist = this.val */ + mov cl, ah + sub bl, ah ; /* bits -= this.bits */ + shr rdx, cl ; /* hold >>= this.bits */ + mov cl, al ; /* cl = this.op */ + + test al, 16 ; /* if ((op & 16) == 0) */ + jz L_test_for_second_level_dist + and cl, 15 ; /* op &= 15 */ + jz L_check_dist_one + +L_add_bits_to_dist: + sub bl, cl + xor eax, eax + inc eax + shl eax, cl + dec eax ; /* (1 << op) - 1 */ + and eax, edx ; /* eax &= hold */ + shr rdx, cl + add r15d, eax ; /* dist += hold & ((1 << op) - 1) */ + +L_check_window: + mov r8, rsi ; /* save in so from can use it's reg */ + mov rax, rdi + sub rax, [rsp+40] ; /* nbytes = out - beg */ + + cmp eax, r15d + jb L_clip_window ; /* if (dist > nbytes) 4.2% */ + + mov ecx, r14d ; /* ecx = len */ + mov rsi, rdi + sub rsi, r15 ; /* from = out - dist */ + + sar ecx, 1 + jnc L_copy_two ; /* if len % 2 == 0 */ + + rep movsw + mov al, [rsi] + mov [rdi], al + inc rdi + + mov rsi, r8 ; /* move in back to %rsi, toss from */ + jmp L_while_test + +L_copy_two: + rep movsw + mov rsi, r8 ; /* move in back to %rsi, toss from */ + jmp L_while_test + +ALIGN 4 +L_check_dist_one: + cmp r15d, 1 ; /* if dist 1, is a memset */ + jne L_check_window + cmp [rsp+40], rdi ; /* if out == beg, outside window */ + je L_check_window + + mov ecx, r14d ; /* ecx = len */ + mov al, [rdi-1] + mov ah, al + + sar ecx, 1 + jnc L_set_two + mov [rdi], al + inc rdi + +L_set_two: + rep stosw + jmp L_while_test + +ALIGN 4 +L_test_for_second_level_length: + test al, 64 + jnz L_test_for_end_of_block ; /* if ((op & 64) != 0) */ + + xor eax, eax + inc eax + shl eax, cl + dec eax + and eax, edx ; /* eax &= hold */ + add eax, r14d ; /* eax += len */ + mov eax, [rbp+rax*4] ; /* eax = lcode[val+(hold&mask[op])]*/ + jmp L_dolen + +ALIGN 4 +L_test_for_second_level_dist: + test al, 64 + jnz L_invalid_distance_code ; /* if ((op & 64) != 0) */ + + xor eax, eax + inc eax + shl eax, cl + dec eax + and eax, edx ; /* eax &= hold */ + add eax, r15d ; /* eax += dist */ + mov eax, [r11+rax*4] ; /* eax = dcode[val+(hold&mask[op])]*/ + jmp L_dodist + +ALIGN 4 +L_clip_window: + mov ecx, eax ; /* ecx = nbytes */ + mov eax, [rsp+92] ; /* eax = wsize, prepare for dist cmp */ + neg ecx ; /* nbytes = -nbytes */ + + cmp eax, r15d + jb L_invalid_distance_too_far ; /* if (dist > wsize) */ + + add ecx, r15d ; /* nbytes = dist - nbytes */ + cmp dword ptr [rsp+96], 0 + jne L_wrap_around_window ; /* if (write != 0) */ + + mov rsi, [rsp+56] ; /* from = window */ + sub eax, ecx ; /* eax -= nbytes */ + add rsi, rax ; /* from += wsize - nbytes */ + + mov eax, r14d ; /* eax = len */ + cmp r14d, ecx + jbe L_do_copy ; /* if (nbytes >= len) */ + + sub eax, ecx ; /* eax -= nbytes */ + rep movsb + mov rsi, rdi + sub rsi, r15 ; /* from = &out[ -dist ] */ + jmp L_do_copy + +ALIGN 4 +L_wrap_around_window: + mov eax, [rsp+96] ; /* eax = write */ + cmp ecx, eax + jbe L_contiguous_in_window ; /* if (write >= nbytes) */ + + mov esi, [rsp+92] ; /* from = wsize */ + add rsi, [rsp+56] ; /* from += window */ + add rsi, rax ; /* from += write */ + sub rsi, rcx ; /* from -= nbytes */ + sub ecx, eax ; /* nbytes -= write */ + + mov eax, r14d ; /* eax = len */ + cmp eax, ecx + jbe L_do_copy ; /* if (nbytes >= len) */ + + sub eax, ecx ; /* len -= nbytes */ + rep movsb + mov rsi, [rsp+56] ; /* from = window */ + mov ecx, [rsp+96] ; /* nbytes = write */ + cmp eax, ecx + jbe L_do_copy ; /* if (nbytes >= len) */ + + sub eax, ecx ; /* len -= nbytes */ + rep movsb + mov rsi, rdi + sub rsi, r15 ; /* from = out - dist */ + jmp L_do_copy + +ALIGN 4 +L_contiguous_in_window: + mov rsi, [rsp+56] ; /* rsi = window */ + add rsi, rax + sub rsi, rcx ; /* from += write - nbytes */ + + mov eax, r14d ; /* eax = len */ + cmp eax, ecx + jbe L_do_copy ; /* if (nbytes >= len) */ + + sub eax, ecx ; /* len -= nbytes */ + rep movsb + mov rsi, rdi + sub rsi, r15 ; /* from = out - dist */ + jmp L_do_copy ; /* if (nbytes >= len) */ + +ALIGN 4 +L_do_copy: + mov ecx, eax ; /* ecx = len */ + rep movsb + + mov rsi, r8 ; /* move in back to %esi, toss from */ + jmp L_while_test + +L_test_for_end_of_block: + test al, 32 + jz L_invalid_literal_length_code + mov dword ptr [rsp+116], 1 + jmp L_break_loop_with_status + +L_invalid_literal_length_code: + mov dword ptr [rsp+116], 2 + jmp L_break_loop_with_status + +L_invalid_distance_code: + mov dword ptr [rsp+116], 3 + jmp L_break_loop_with_status + +L_invalid_distance_too_far: + mov dword ptr [rsp+116], 4 + jmp L_break_loop_with_status + +L_break_loop: + mov dword ptr [rsp+116], 0 + +L_break_loop_with_status: +; /* put in, out, bits, and hold back into ar and pop esp */ + mov [rsp+16], rsi ; /* in */ + mov [rsp+32], rdi ; /* out */ + mov [rsp+88], ebx ; /* bits */ + mov [rsp+80], rdx ; /* hold */ + + mov rax, [rsp] ; /* restore rbp and rsp */ + mov rbp, [rsp+8] + mov rsp, rax + + + + mov rsi,[rsp-8] + mov rdi,[rsp-16] + mov r12,[rsp-24] + mov r13,[rsp-32] + mov r14,[rsp-40] + mov r15,[rsp-48] + mov rbx,[rsp-56] + + ret 0 +; : +; : "m" (ar) +; : "memory", "%rax", "%rbx", "%rcx", "%rdx", "%rsi", "%rdi", +; "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15" +; ); + +inffas8664fnc ENDP +;_TEXT ENDS +END diff -Nru nodejs-0.11.13/deps/zlib/contrib/masmx64/readme.txt nodejs-0.11.15/deps/zlib/contrib/masmx64/readme.txt --- nodejs-0.11.13/deps/zlib/contrib/masmx64/readme.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/masmx64/readme.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,31 @@ +Summary +------- +This directory contains ASM implementations of the functions +longest_match() and inflate_fast(), for 64 bits x86 (both AMD64 and Intel EM64t), +for use with Microsoft Macro Assembler (x64) for AMD64 and Microsoft C++ 64 bits. + +gvmat64.asm is written by Gilles Vollant (2005), by using Brian Raiter 686/32 bits + assembly optimized version from Jean-loup Gailly original longest_match function + +inffasx64.asm and inffas8664.c were written by Chris Anderson, by optimizing + original function from Mark Adler + +Use instructions +---------------- +Assemble the .asm files using MASM and put the object files into the zlib source +directory. You can also get object files here: + + http://www.winimage.com/zLibDll/zlib124_masm_obj.zip + +define ASMV and ASMINF in your project. Include inffas8664.c in your source tree, +and inffasx64.obj and gvmat64.obj as object to link. + + +Build instructions +------------------ +run bld_64.bat with Microsoft Macro Assembler (x64) for AMD64 (ml64.exe) + +ml64.exe is given with Visual Studio 2005, Windows 2003 server DDK + +You can get Windows 2003 server DDK with ml64 and cl for AMD64 from + http://www.microsoft.com/whdc/devtools/ddk/default.mspx for low price) diff -Nru nodejs-0.11.13/deps/zlib/contrib/masmx86/bld_ml32.bat nodejs-0.11.15/deps/zlib/contrib/masmx86/bld_ml32.bat --- nodejs-0.11.13/deps/zlib/contrib/masmx86/bld_ml32.bat 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/masmx86/bld_ml32.bat 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,2 @@ +ml /coff /Zi /c /Flmatch686.lst match686.asm +ml /coff /Zi /c /Flinffas32.lst inffas32.asm diff -Nru nodejs-0.11.13/deps/zlib/contrib/masmx86/inffas32.asm nodejs-0.11.15/deps/zlib/contrib/masmx86/inffas32.asm --- nodejs-0.11.13/deps/zlib/contrib/masmx86/inffas32.asm 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/masmx86/inffas32.asm 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1080 @@ +;/* inffas32.asm is a hand tuned assembler version of inffast.c -- fast decoding +; * +; * inffas32.asm is derivated from inffas86.c, with translation of assembly code +; * +; * Copyright (C) 1995-2003 Mark Adler +; * For conditions of distribution and use, see copyright notice in zlib.h +; * +; * Copyright (C) 2003 Chris Anderson <christop@charm.net> +; * Please use the copyright conditions above. +; * +; * Mar-13-2003 -- Most of this is derived from inffast.S which is derived from +; * the gcc -S output of zlib-1.2.0/inffast.c. Zlib-1.2.0 is in beta release at +; * the moment. I have successfully compiled and tested this code with gcc2.96, +; * gcc3.2, icc5.0, msvc6.0. It is very close to the speed of inffast.S +; * compiled with gcc -DNO_MMX, but inffast.S is still faster on the P3 with MMX +; * enabled. I will attempt to merge the MMX code into this version. Newer +; * versions of this and inffast.S can be found at +; * http://www.eetbeetee.com/zlib/ and http://www.charm.net/~christop/zlib/ +; * +; * 2005 : modification by Gilles Vollant +; */ +; For Visual C++ 4.x and higher and ML 6.x and higher +; ml.exe is in directory \MASM611C of Win95 DDK +; ml.exe is also distributed in http://www.masm32.com/masmdl.htm +; and in VC++2003 toolkit at http://msdn.microsoft.com/visualc/vctoolkit2003/ +; +; +; compile with command line option +; ml /coff /Zi /c /Flinffas32.lst inffas32.asm + +; if you define NO_GZIP (see inflate.h), compile with +; ml /coff /Zi /c /Flinffas32.lst /DNO_GUNZIP inffas32.asm + + +; zlib122sup is 0 fort zlib 1.2.2.1 and lower +; zlib122sup is 8 fort zlib 1.2.2.2 and more (with addition of dmax and head +; in inflate_state in inflate.h) +zlib1222sup equ 8 + + +IFDEF GUNZIP + INFLATE_MODE_TYPE equ 11 + INFLATE_MODE_BAD equ 26 +ELSE + IFNDEF NO_GUNZIP + INFLATE_MODE_TYPE equ 11 + INFLATE_MODE_BAD equ 26 + ELSE + INFLATE_MODE_TYPE equ 3 + INFLATE_MODE_BAD equ 17 + ENDIF +ENDIF + + +; 75 "inffast.S" +;FILE "inffast.S" + +;;;GLOBAL _inflate_fast + +;;;SECTION .text + + + + .586p + .mmx + + name inflate_fast_x86 + .MODEL FLAT + +_DATA segment +inflate_fast_use_mmx: + dd 1 + + +_TEXT segment + + + +ALIGN 4 + db 'Fast decoding Code from Chris Anderson' + db 0 + +ALIGN 4 +invalid_literal_length_code_msg: + db 'invalid literal/length code' + db 0 + +ALIGN 4 +invalid_distance_code_msg: + db 'invalid distance code' + db 0 + +ALIGN 4 +invalid_distance_too_far_msg: + db 'invalid distance too far back' + db 0 + + +ALIGN 4 +inflate_fast_mask: +dd 0 +dd 1 +dd 3 +dd 7 +dd 15 +dd 31 +dd 63 +dd 127 +dd 255 +dd 511 +dd 1023 +dd 2047 +dd 4095 +dd 8191 +dd 16383 +dd 32767 +dd 65535 +dd 131071 +dd 262143 +dd 524287 +dd 1048575 +dd 2097151 +dd 4194303 +dd 8388607 +dd 16777215 +dd 33554431 +dd 67108863 +dd 134217727 +dd 268435455 +dd 536870911 +dd 1073741823 +dd 2147483647 +dd 4294967295 + + +mode_state equ 0 ;/* state->mode */ +wsize_state equ (32+zlib1222sup) ;/* state->wsize */ +write_state equ (36+4+zlib1222sup) ;/* state->write */ +window_state equ (40+4+zlib1222sup) ;/* state->window */ +hold_state equ (44+4+zlib1222sup) ;/* state->hold */ +bits_state equ (48+4+zlib1222sup) ;/* state->bits */ +lencode_state equ (64+4+zlib1222sup) ;/* state->lencode */ +distcode_state equ (68+4+zlib1222sup) ;/* state->distcode */ +lenbits_state equ (72+4+zlib1222sup) ;/* state->lenbits */ +distbits_state equ (76+4+zlib1222sup) ;/* state->distbits */ + + +;;SECTION .text +; 205 "inffast.S" +;GLOBAL inflate_fast_use_mmx + +;SECTION .data + + +; GLOBAL inflate_fast_use_mmx:object +;.size inflate_fast_use_mmx, 4 +; 226 "inffast.S" +;SECTION .text + +ALIGN 4 +_inflate_fast proc near +.FPO (16, 4, 0, 0, 1, 0) + push edi + push esi + push ebp + push ebx + pushfd + sub esp,64 + cld + + + + + mov esi, [esp+88] + mov edi, [esi+28] + + + + + + + + mov edx, [esi+4] + mov eax, [esi+0] + + add edx,eax + sub edx,11 + + mov [esp+44],eax + mov [esp+20],edx + + mov ebp, [esp+92] + mov ecx, [esi+16] + mov ebx, [esi+12] + + sub ebp,ecx + neg ebp + add ebp,ebx + + sub ecx,257 + add ecx,ebx + + mov [esp+60],ebx + mov [esp+40],ebp + mov [esp+16],ecx +; 285 "inffast.S" + mov eax, [edi+lencode_state] + mov ecx, [edi+distcode_state] + + mov [esp+8],eax + mov [esp+12],ecx + + mov eax,1 + mov ecx, [edi+lenbits_state] + shl eax,cl + dec eax + mov [esp+0],eax + + mov eax,1 + mov ecx, [edi+distbits_state] + shl eax,cl + dec eax + mov [esp+4],eax + + mov eax, [edi+wsize_state] + mov ecx, [edi+write_state] + mov edx, [edi+window_state] + + mov [esp+52],eax + mov [esp+48],ecx + mov [esp+56],edx + + mov ebp, [edi+hold_state] + mov ebx, [edi+bits_state] +; 321 "inffast.S" + mov esi, [esp+44] + mov ecx, [esp+20] + cmp ecx,esi + ja L_align_long + + add ecx,11 + sub ecx,esi + mov eax,12 + sub eax,ecx + lea edi, [esp+28] + rep movsb + mov ecx,eax + xor eax,eax + rep stosb + lea esi, [esp+28] + mov [esp+20],esi + jmp L_is_aligned + + +L_align_long: + test esi,3 + jz L_is_aligned + xor eax,eax + mov al, [esi] + inc esi + mov ecx,ebx + add ebx,8 + shl eax,cl + or ebp,eax + jmp L_align_long + +L_is_aligned: + mov edi, [esp+60] +; 366 "inffast.S" +L_check_mmx: + cmp dword ptr [inflate_fast_use_mmx],2 + je L_init_mmx + ja L_do_loop + + push eax + push ebx + push ecx + push edx + pushfd + mov eax, [esp] + xor dword ptr [esp],0200000h + + + + + popfd + pushfd + pop edx + xor edx,eax + jz L_dont_use_mmx + xor eax,eax + cpuid + cmp ebx,0756e6547h + jne L_dont_use_mmx + cmp ecx,06c65746eh + jne L_dont_use_mmx + cmp edx,049656e69h + jne L_dont_use_mmx + mov eax,1 + cpuid + shr eax,8 + and eax,15 + cmp eax,6 + jne L_dont_use_mmx + test edx,0800000h + jnz L_use_mmx + jmp L_dont_use_mmx +L_use_mmx: + mov dword ptr [inflate_fast_use_mmx],2 + jmp L_check_mmx_pop +L_dont_use_mmx: + mov dword ptr [inflate_fast_use_mmx],3 +L_check_mmx_pop: + pop edx + pop ecx + pop ebx + pop eax + jmp L_check_mmx +; 426 "inffast.S" +ALIGN 4 +L_do_loop: +; 437 "inffast.S" + cmp bl,15 + ja L_get_length_code + + xor eax,eax + lodsw + mov cl,bl + add bl,16 + shl eax,cl + or ebp,eax + +L_get_length_code: + mov edx, [esp+0] + mov ecx, [esp+8] + and edx,ebp + mov eax, [ecx+edx*4] + +L_dolen: + + + + + + + mov cl,ah + sub bl,ah + shr ebp,cl + + + + + + + test al,al + jnz L_test_for_length_base + + shr eax,16 + stosb + +L_while_test: + + + cmp [esp+16],edi + jbe L_break_loop + + cmp [esp+20],esi + ja L_do_loop + jmp L_break_loop + +L_test_for_length_base: +; 502 "inffast.S" + mov edx,eax + shr edx,16 + mov cl,al + + test al,16 + jz L_test_for_second_level_length + and cl,15 + jz L_save_len + cmp bl,cl + jae L_add_bits_to_len + + mov ch,cl + xor eax,eax + lodsw + mov cl,bl + add bl,16 + shl eax,cl + or ebp,eax + mov cl,ch + +L_add_bits_to_len: + mov eax,1 + shl eax,cl + dec eax + sub bl,cl + and eax,ebp + shr ebp,cl + add edx,eax + +L_save_len: + mov [esp+24],edx + + +L_decode_distance: +; 549 "inffast.S" + cmp bl,15 + ja L_get_distance_code + + xor eax,eax + lodsw + mov cl,bl + add bl,16 + shl eax,cl + or ebp,eax + +L_get_distance_code: + mov edx, [esp+4] + mov ecx, [esp+12] + and edx,ebp + mov eax, [ecx+edx*4] + + +L_dodist: + mov edx,eax + shr edx,16 + mov cl,ah + sub bl,ah + shr ebp,cl +; 584 "inffast.S" + mov cl,al + + test al,16 + jz L_test_for_second_level_dist + and cl,15 + jz L_check_dist_one + cmp bl,cl + jae L_add_bits_to_dist + + mov ch,cl + xor eax,eax + lodsw + mov cl,bl + add bl,16 + shl eax,cl + or ebp,eax + mov cl,ch + +L_add_bits_to_dist: + mov eax,1 + shl eax,cl + dec eax + sub bl,cl + and eax,ebp + shr ebp,cl + add edx,eax + jmp L_check_window + +L_check_window: +; 625 "inffast.S" + mov [esp+44],esi + mov eax,edi + sub eax, [esp+40] + + cmp eax,edx + jb L_clip_window + + mov ecx, [esp+24] + mov esi,edi + sub esi,edx + + sub ecx,3 + mov al, [esi] + mov [edi],al + mov al, [esi+1] + mov dl, [esi+2] + add esi,3 + mov [edi+1],al + mov [edi+2],dl + add edi,3 + rep movsb + + mov esi, [esp+44] + jmp L_while_test + +ALIGN 4 +L_check_dist_one: + cmp edx,1 + jne L_check_window + cmp [esp+40],edi + je L_check_window + + dec edi + mov ecx, [esp+24] + mov al, [edi] + sub ecx,3 + + mov [edi+1],al + mov [edi+2],al + mov [edi+3],al + add edi,4 + rep stosb + + jmp L_while_test + +ALIGN 4 +L_test_for_second_level_length: + + + + + test al,64 + jnz L_test_for_end_of_block + + mov eax,1 + shl eax,cl + dec eax + and eax,ebp + add eax,edx + mov edx, [esp+8] + mov eax, [edx+eax*4] + jmp L_dolen + +ALIGN 4 +L_test_for_second_level_dist: + + + + + test al,64 + jnz L_invalid_distance_code + + mov eax,1 + shl eax,cl + dec eax + and eax,ebp + add eax,edx + mov edx, [esp+12] + mov eax, [edx+eax*4] + jmp L_dodist + +ALIGN 4 +L_clip_window: +; 721 "inffast.S" + mov ecx,eax + mov eax, [esp+52] + neg ecx + mov esi, [esp+56] + + cmp eax,edx + jb L_invalid_distance_too_far + + add ecx,edx + cmp dword ptr [esp+48],0 + jne L_wrap_around_window + + sub eax,ecx + add esi,eax +; 749 "inffast.S" + mov eax, [esp+24] + cmp eax,ecx + jbe L_do_copy1 + + sub eax,ecx + rep movsb + mov esi,edi + sub esi,edx + jmp L_do_copy1 + + cmp eax,ecx + jbe L_do_copy1 + + sub eax,ecx + rep movsb + mov esi,edi + sub esi,edx + jmp L_do_copy1 + +L_wrap_around_window: +; 793 "inffast.S" + mov eax, [esp+48] + cmp ecx,eax + jbe L_contiguous_in_window + + add esi, [esp+52] + add esi,eax + sub esi,ecx + sub ecx,eax + + + mov eax, [esp+24] + cmp eax,ecx + jbe L_do_copy1 + + sub eax,ecx + rep movsb + mov esi, [esp+56] + mov ecx, [esp+48] + cmp eax,ecx + jbe L_do_copy1 + + sub eax,ecx + rep movsb + mov esi,edi + sub esi,edx + jmp L_do_copy1 + +L_contiguous_in_window: +; 836 "inffast.S" + add esi,eax + sub esi,ecx + + + mov eax, [esp+24] + cmp eax,ecx + jbe L_do_copy1 + + sub eax,ecx + rep movsb + mov esi,edi + sub esi,edx + +L_do_copy1: +; 862 "inffast.S" + mov ecx,eax + rep movsb + + mov esi, [esp+44] + jmp L_while_test +; 878 "inffast.S" +ALIGN 4 +L_init_mmx: + emms + + + + + + movd mm0,ebp + mov ebp,ebx +; 896 "inffast.S" + movd mm4,dword ptr [esp+0] + movq mm3,mm4 + movd mm5,dword ptr [esp+4] + movq mm2,mm5 + pxor mm1,mm1 + mov ebx, [esp+8] + jmp L_do_loop_mmx + +ALIGN 4 +L_do_loop_mmx: + psrlq mm0,mm1 + + cmp ebp,32 + ja L_get_length_code_mmx + + movd mm6,ebp + movd mm7,dword ptr [esi] + add esi,4 + psllq mm7,mm6 + add ebp,32 + por mm0,mm7 + +L_get_length_code_mmx: + pand mm4,mm0 + movd eax,mm4 + movq mm4,mm3 + mov eax, [ebx+eax*4] + +L_dolen_mmx: + movzx ecx,ah + movd mm1,ecx + sub ebp,ecx + + test al,al + jnz L_test_for_length_base_mmx + + shr eax,16 + stosb + +L_while_test_mmx: + + + cmp [esp+16],edi + jbe L_break_loop + + cmp [esp+20],esi + ja L_do_loop_mmx + jmp L_break_loop + +L_test_for_length_base_mmx: + + mov edx,eax + shr edx,16 + + test al,16 + jz L_test_for_second_level_length_mmx + and eax,15 + jz L_decode_distance_mmx + + psrlq mm0,mm1 + movd mm1,eax + movd ecx,mm0 + sub ebp,eax + and ecx, [inflate_fast_mask+eax*4] + add edx,ecx + +L_decode_distance_mmx: + psrlq mm0,mm1 + + cmp ebp,32 + ja L_get_dist_code_mmx + + movd mm6,ebp + movd mm7,dword ptr [esi] + add esi,4 + psllq mm7,mm6 + add ebp,32 + por mm0,mm7 + +L_get_dist_code_mmx: + mov ebx, [esp+12] + pand mm5,mm0 + movd eax,mm5 + movq mm5,mm2 + mov eax, [ebx+eax*4] + +L_dodist_mmx: + + movzx ecx,ah + mov ebx,eax + shr ebx,16 + sub ebp,ecx + movd mm1,ecx + + test al,16 + jz L_test_for_second_level_dist_mmx + and eax,15 + jz L_check_dist_one_mmx + +L_add_bits_to_dist_mmx: + psrlq mm0,mm1 + movd mm1,eax + movd ecx,mm0 + sub ebp,eax + and ecx, [inflate_fast_mask+eax*4] + add ebx,ecx + +L_check_window_mmx: + mov [esp+44],esi + mov eax,edi + sub eax, [esp+40] + + cmp eax,ebx + jb L_clip_window_mmx + + mov ecx,edx + mov esi,edi + sub esi,ebx + + sub ecx,3 + mov al, [esi] + mov [edi],al + mov al, [esi+1] + mov dl, [esi+2] + add esi,3 + mov [edi+1],al + mov [edi+2],dl + add edi,3 + rep movsb + + mov esi, [esp+44] + mov ebx, [esp+8] + jmp L_while_test_mmx + +ALIGN 4 +L_check_dist_one_mmx: + cmp ebx,1 + jne L_check_window_mmx + cmp [esp+40],edi + je L_check_window_mmx + + dec edi + mov ecx,edx + mov al, [edi] + sub ecx,3 + + mov [edi+1],al + mov [edi+2],al + mov [edi+3],al + add edi,4 + rep stosb + + mov ebx, [esp+8] + jmp L_while_test_mmx + +ALIGN 4 +L_test_for_second_level_length_mmx: + test al,64 + jnz L_test_for_end_of_block + + and eax,15 + psrlq mm0,mm1 + movd ecx,mm0 + and ecx, [inflate_fast_mask+eax*4] + add ecx,edx + mov eax, [ebx+ecx*4] + jmp L_dolen_mmx + +ALIGN 4 +L_test_for_second_level_dist_mmx: + test al,64 + jnz L_invalid_distance_code + + and eax,15 + psrlq mm0,mm1 + movd ecx,mm0 + and ecx, [inflate_fast_mask+eax*4] + mov eax, [esp+12] + add ecx,ebx + mov eax, [eax+ecx*4] + jmp L_dodist_mmx + +ALIGN 4 +L_clip_window_mmx: + + mov ecx,eax + mov eax, [esp+52] + neg ecx + mov esi, [esp+56] + + cmp eax,ebx + jb L_invalid_distance_too_far + + add ecx,ebx + cmp dword ptr [esp+48],0 + jne L_wrap_around_window_mmx + + sub eax,ecx + add esi,eax + + cmp edx,ecx + jbe L_do_copy1_mmx + + sub edx,ecx + rep movsb + mov esi,edi + sub esi,ebx + jmp L_do_copy1_mmx + + cmp edx,ecx + jbe L_do_copy1_mmx + + sub edx,ecx + rep movsb + mov esi,edi + sub esi,ebx + jmp L_do_copy1_mmx + +L_wrap_around_window_mmx: + + mov eax, [esp+48] + cmp ecx,eax + jbe L_contiguous_in_window_mmx + + add esi, [esp+52] + add esi,eax + sub esi,ecx + sub ecx,eax + + + cmp edx,ecx + jbe L_do_copy1_mmx + + sub edx,ecx + rep movsb + mov esi, [esp+56] + mov ecx, [esp+48] + cmp edx,ecx + jbe L_do_copy1_mmx + + sub edx,ecx + rep movsb + mov esi,edi + sub esi,ebx + jmp L_do_copy1_mmx + +L_contiguous_in_window_mmx: + + add esi,eax + sub esi,ecx + + + cmp edx,ecx + jbe L_do_copy1_mmx + + sub edx,ecx + rep movsb + mov esi,edi + sub esi,ebx + +L_do_copy1_mmx: + + + mov ecx,edx + rep movsb + + mov esi, [esp+44] + mov ebx, [esp+8] + jmp L_while_test_mmx +; 1174 "inffast.S" +L_invalid_distance_code: + + + + + + mov ecx, invalid_distance_code_msg + mov edx,INFLATE_MODE_BAD + jmp L_update_stream_state + +L_test_for_end_of_block: + + + + + + test al,32 + jz L_invalid_literal_length_code + + mov ecx,0 + mov edx,INFLATE_MODE_TYPE + jmp L_update_stream_state + +L_invalid_literal_length_code: + + + + + + mov ecx, invalid_literal_length_code_msg + mov edx,INFLATE_MODE_BAD + jmp L_update_stream_state + +L_invalid_distance_too_far: + + + + mov esi, [esp+44] + mov ecx, invalid_distance_too_far_msg + mov edx,INFLATE_MODE_BAD + jmp L_update_stream_state + +L_update_stream_state: + + mov eax, [esp+88] + test ecx,ecx + jz L_skip_msg + mov [eax+24],ecx +L_skip_msg: + mov eax, [eax+28] + mov [eax+mode_state],edx + jmp L_break_loop + +ALIGN 4 +L_break_loop: +; 1243 "inffast.S" + cmp dword ptr [inflate_fast_use_mmx],2 + jne L_update_next_in + + + + mov ebx,ebp + +L_update_next_in: +; 1266 "inffast.S" + mov eax, [esp+88] + mov ecx,ebx + mov edx, [eax+28] + shr ecx,3 + sub esi,ecx + shl ecx,3 + sub ebx,ecx + mov [eax+12],edi + mov [edx+bits_state],ebx + mov ecx,ebx + + lea ebx, [esp+28] + cmp [esp+20],ebx + jne L_buf_not_used + + sub esi,ebx + mov ebx, [eax+0] + mov [esp+20],ebx + add esi,ebx + mov ebx, [eax+4] + sub ebx,11 + add [esp+20],ebx + +L_buf_not_used: + mov [eax+0],esi + + mov ebx,1 + shl ebx,cl + dec ebx + + + + + + cmp dword ptr [inflate_fast_use_mmx],2 + jne L_update_hold + + + + psrlq mm0,mm1 + movd ebp,mm0 + + emms + +L_update_hold: + + + + and ebp,ebx + mov [edx+hold_state],ebp + + + + + mov ebx, [esp+20] + cmp ebx,esi + jbe L_last_is_smaller + + sub ebx,esi + add ebx,11 + mov [eax+4],ebx + jmp L_fixup_out +L_last_is_smaller: + sub esi,ebx + neg esi + add esi,11 + mov [eax+4],esi + + + + +L_fixup_out: + + mov ebx, [esp+16] + cmp ebx,edi + jbe L_end_is_smaller + + sub ebx,edi + add ebx,257 + mov [eax+16],ebx + jmp L_done +L_end_is_smaller: + sub edi,ebx + neg edi + add edi,257 + mov [eax+16],edi + + + + + +L_done: + add esp,64 + popfd + pop ebx + pop ebp + pop esi + pop edi + ret +_inflate_fast endp + +_TEXT ends +end diff -Nru nodejs-0.11.13/deps/zlib/contrib/masmx86/match686.asm nodejs-0.11.15/deps/zlib/contrib/masmx86/match686.asm --- nodejs-0.11.13/deps/zlib/contrib/masmx86/match686.asm 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/masmx86/match686.asm 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,479 @@ +; match686.asm -- Asm portion of the optimized longest_match for 32 bits x86 +; Copyright (C) 1995-1996 Jean-loup Gailly, Brian Raiter and Gilles Vollant. +; File written by Gilles Vollant, by converting match686.S from Brian Raiter +; for MASM. This is as assembly version of longest_match +; from Jean-loup Gailly in deflate.c +; +; http://www.zlib.net +; http://www.winimage.com/zLibDll +; http://www.muppetlabs.com/~breadbox/software/assembly.html +; +; For Visual C++ 4.x and higher and ML 6.x and higher +; ml.exe is distributed in +; http://www.microsoft.com/downloads/details.aspx?FamilyID=7a1c9da0-0510-44a2-b042-7ef370530c64 +; +; this file contain two implementation of longest_match +; +; this longest_match was written by Brian raiter (1998), optimized for Pentium Pro +; (and the faster known version of match_init on modern Core 2 Duo and AMD Phenom) +; +; for using an assembly version of longest_match, you need define ASMV in project +; +; compile the asm file running +; ml /coff /Zi /c /Flmatch686.lst match686.asm +; and do not include match686.obj in your project +; +; note: contrib of zLib 1.2.3 and earlier contained both a deprecated version for +; Pentium (prior Pentium Pro) and this version for Pentium Pro and modern processor +; with autoselect (with cpu detection code) +; if you want support the old pentium optimization, you can still use these version +; +; this file is not optimized for old pentium, but it compatible with all x86 32 bits +; processor (starting 80386) +; +; +; see below : zlib1222add must be adjuster if you use a zlib version < 1.2.2.2 + +;uInt longest_match(s, cur_match) +; deflate_state *s; +; IPos cur_match; /* current match */ + + NbStack equ 76 + cur_match equ dword ptr[esp+NbStack-0] + str_s equ dword ptr[esp+NbStack-4] +; 5 dword on top (ret,ebp,esi,edi,ebx) + adrret equ dword ptr[esp+NbStack-8] + pushebp equ dword ptr[esp+NbStack-12] + pushedi equ dword ptr[esp+NbStack-16] + pushesi equ dword ptr[esp+NbStack-20] + pushebx equ dword ptr[esp+NbStack-24] + + chain_length equ dword ptr [esp+NbStack-28] + limit equ dword ptr [esp+NbStack-32] + best_len equ dword ptr [esp+NbStack-36] + window equ dword ptr [esp+NbStack-40] + prev equ dword ptr [esp+NbStack-44] + scan_start equ word ptr [esp+NbStack-48] + wmask equ dword ptr [esp+NbStack-52] + match_start_ptr equ dword ptr [esp+NbStack-56] + nice_match equ dword ptr [esp+NbStack-60] + scan equ dword ptr [esp+NbStack-64] + + windowlen equ dword ptr [esp+NbStack-68] + match_start equ dword ptr [esp+NbStack-72] + strend equ dword ptr [esp+NbStack-76] + NbStackAdd equ (NbStack-24) + + .386p + + name gvmatch + .MODEL FLAT + + + +; all the +zlib1222add offsets are due to the addition of fields +; in zlib in the deflate_state structure since the asm code was first written +; (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)"). +; (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0"). +; if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8"). + + zlib1222add equ 8 + +; Note : these value are good with a 8 bytes boundary pack structure + dep_chain_length equ 74h+zlib1222add + dep_window equ 30h+zlib1222add + dep_strstart equ 64h+zlib1222add + dep_prev_length equ 70h+zlib1222add + dep_nice_match equ 88h+zlib1222add + dep_w_size equ 24h+zlib1222add + dep_prev equ 38h+zlib1222add + dep_w_mask equ 2ch+zlib1222add + dep_good_match equ 84h+zlib1222add + dep_match_start equ 68h+zlib1222add + dep_lookahead equ 6ch+zlib1222add + + +_TEXT segment + +IFDEF NOUNDERLINE + public longest_match + public match_init +ELSE + public _longest_match + public _match_init +ENDIF + + MAX_MATCH equ 258 + MIN_MATCH equ 3 + MIN_LOOKAHEAD equ (MAX_MATCH+MIN_MATCH+1) + + + +MAX_MATCH equ 258 +MIN_MATCH equ 3 +MIN_LOOKAHEAD equ (MAX_MATCH + MIN_MATCH + 1) +MAX_MATCH_8_ equ ((MAX_MATCH + 7) AND 0FFF0h) + + +;;; stack frame offsets + +chainlenwmask equ esp + 0 ; high word: current chain len + ; low word: s->wmask +window equ esp + 4 ; local copy of s->window +windowbestlen equ esp + 8 ; s->window + bestlen +scanstart equ esp + 16 ; first two bytes of string +scanend equ esp + 12 ; last two bytes of string +scanalign equ esp + 20 ; dword-misalignment of string +nicematch equ esp + 24 ; a good enough match size +bestlen equ esp + 28 ; size of best match so far +scan equ esp + 32 ; ptr to string wanting match + +LocalVarsSize equ 36 +; saved ebx byte esp + 36 +; saved edi byte esp + 40 +; saved esi byte esp + 44 +; saved ebp byte esp + 48 +; return address byte esp + 52 +deflatestate equ esp + 56 ; the function arguments +curmatch equ esp + 60 + +;;; Offsets for fields in the deflate_state structure. These numbers +;;; are calculated from the definition of deflate_state, with the +;;; assumption that the compiler will dword-align the fields. (Thus, +;;; changing the definition of deflate_state could easily cause this +;;; program to crash horribly, without so much as a warning at +;;; compile time. Sigh.) + +dsWSize equ 36+zlib1222add +dsWMask equ 44+zlib1222add +dsWindow equ 48+zlib1222add +dsPrev equ 56+zlib1222add +dsMatchLen equ 88+zlib1222add +dsPrevMatch equ 92+zlib1222add +dsStrStart equ 100+zlib1222add +dsMatchStart equ 104+zlib1222add +dsLookahead equ 108+zlib1222add +dsPrevLen equ 112+zlib1222add +dsMaxChainLen equ 116+zlib1222add +dsGoodMatch equ 132+zlib1222add +dsNiceMatch equ 136+zlib1222add + + +;;; match686.asm -- Pentium-Pro-optimized version of longest_match() +;;; Written for zlib 1.1.2 +;;; Copyright (C) 1998 Brian Raiter <breadbox@muppetlabs.com> +;;; You can look at http://www.muppetlabs.com/~breadbox/software/assembly.html +;;; +;; +;; This software is provided 'as-is', without any express or implied +;; warranty. In no event will the authors be held liable for any damages +;; arising from the use of this software. +;; +;; Permission is granted to anyone to use this software for any purpose, +;; including commercial applications, and to alter it and redistribute it +;; freely, subject to the following restrictions: +;; +;; 1. The origin of this software must not be misrepresented; you must not +;; claim that you wrote the original software. If you use this software +;; in a product, an acknowledgment in the product documentation would be +;; appreciated but is not required. +;; 2. Altered source versions must be plainly marked as such, and must not be +;; misrepresented as being the original software +;; 3. This notice may not be removed or altered from any source distribution. +;; + +;GLOBAL _longest_match, _match_init + + +;SECTION .text + +;;; uInt longest_match(deflate_state *deflatestate, IPos curmatch) + +;_longest_match: + IFDEF NOUNDERLINE + longest_match proc near + ELSE + _longest_match proc near + ENDIF +.FPO (9, 4, 0, 0, 1, 0) + +;;; Save registers that the compiler may be using, and adjust esp to +;;; make room for our stack frame. + + push ebp + push edi + push esi + push ebx + sub esp, LocalVarsSize + +;;; Retrieve the function arguments. ecx will hold cur_match +;;; throughout the entire function. edx will hold the pointer to the +;;; deflate_state structure during the function's setup (before +;;; entering the main loop. + + mov edx, [deflatestate] + mov ecx, [curmatch] + +;;; uInt wmask = s->w_mask; +;;; unsigned chain_length = s->max_chain_length; +;;; if (s->prev_length >= s->good_match) { +;;; chain_length >>= 2; +;;; } + + mov eax, [edx + dsPrevLen] + mov ebx, [edx + dsGoodMatch] + cmp eax, ebx + mov eax, [edx + dsWMask] + mov ebx, [edx + dsMaxChainLen] + jl LastMatchGood + shr ebx, 2 +LastMatchGood: + +;;; chainlen is decremented once beforehand so that the function can +;;; use the sign flag instead of the zero flag for the exit test. +;;; It is then shifted into the high word, to make room for the wmask +;;; value, which it will always accompany. + + dec ebx + shl ebx, 16 + or ebx, eax + mov [chainlenwmask], ebx + +;;; if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; + + mov eax, [edx + dsNiceMatch] + mov ebx, [edx + dsLookahead] + cmp ebx, eax + jl LookaheadLess + mov ebx, eax +LookaheadLess: mov [nicematch], ebx + +;;; register Bytef *scan = s->window + s->strstart; + + mov esi, [edx + dsWindow] + mov [window], esi + mov ebp, [edx + dsStrStart] + lea edi, [esi + ebp] + mov [scan], edi + +;;; Determine how many bytes the scan ptr is off from being +;;; dword-aligned. + + mov eax, edi + neg eax + and eax, 3 + mov [scanalign], eax + +;;; IPos limit = s->strstart > (IPos)MAX_DIST(s) ? +;;; s->strstart - (IPos)MAX_DIST(s) : NIL; + + mov eax, [edx + dsWSize] + sub eax, MIN_LOOKAHEAD + sub ebp, eax + jg LimitPositive + xor ebp, ebp +LimitPositive: + +;;; int best_len = s->prev_length; + + mov eax, [edx + dsPrevLen] + mov [bestlen], eax + +;;; Store the sum of s->window + best_len in esi locally, and in esi. + + add esi, eax + mov [windowbestlen], esi + +;;; register ush scan_start = *(ushf*)scan; +;;; register ush scan_end = *(ushf*)(scan+best_len-1); +;;; Posf *prev = s->prev; + + movzx ebx, word ptr [edi] + mov [scanstart], ebx + movzx ebx, word ptr [edi + eax - 1] + mov [scanend], ebx + mov edi, [edx + dsPrev] + +;;; Jump into the main loop. + + mov edx, [chainlenwmask] + jmp short LoopEntry + +align 4 + +;;; do { +;;; match = s->window + cur_match; +;;; if (*(ushf*)(match+best_len-1) != scan_end || +;;; *(ushf*)match != scan_start) continue; +;;; [...] +;;; } while ((cur_match = prev[cur_match & wmask]) > limit +;;; && --chain_length != 0); +;;; +;;; Here is the inner loop of the function. The function will spend the +;;; majority of its time in this loop, and majority of that time will +;;; be spent in the first ten instructions. +;;; +;;; Within this loop: +;;; ebx = scanend +;;; ecx = curmatch +;;; edx = chainlenwmask - i.e., ((chainlen << 16) | wmask) +;;; esi = windowbestlen - i.e., (window + bestlen) +;;; edi = prev +;;; ebp = limit + +LookupLoop: + and ecx, edx + movzx ecx, word ptr [edi + ecx*2] + cmp ecx, ebp + jbe LeaveNow + sub edx, 00010000h + js LeaveNow +LoopEntry: movzx eax, word ptr [esi + ecx - 1] + cmp eax, ebx + jnz LookupLoop + mov eax, [window] + movzx eax, word ptr [eax + ecx] + cmp eax, [scanstart] + jnz LookupLoop + +;;; Store the current value of chainlen. + + mov [chainlenwmask], edx + +;;; Point edi to the string under scrutiny, and esi to the string we +;;; are hoping to match it up with. In actuality, esi and edi are +;;; both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and edx is +;;; initialized to -(MAX_MATCH_8 - scanalign). + + mov esi, [window] + mov edi, [scan] + add esi, ecx + mov eax, [scanalign] + mov edx, 0fffffef8h; -(MAX_MATCH_8) + lea edi, [edi + eax + 0108h] ;MAX_MATCH_8] + lea esi, [esi + eax + 0108h] ;MAX_MATCH_8] + +;;; Test the strings for equality, 8 bytes at a time. At the end, +;;; adjust edx so that it is offset to the exact byte that mismatched. +;;; +;;; We already know at this point that the first three bytes of the +;;; strings match each other, and they can be safely passed over before +;;; starting the compare loop. So what this code does is skip over 0-3 +;;; bytes, as much as necessary in order to dword-align the edi +;;; pointer. (esi will still be misaligned three times out of four.) +;;; +;;; It should be confessed that this loop usually does not represent +;;; much of the total running time. Replacing it with a more +;;; straightforward "rep cmpsb" would not drastically degrade +;;; performance. + +LoopCmps: + mov eax, [esi + edx] + xor eax, [edi + edx] + jnz LeaveLoopCmps + mov eax, [esi + edx + 4] + xor eax, [edi + edx + 4] + jnz LeaveLoopCmps4 + add edx, 8 + jnz LoopCmps + jmp short LenMaximum +LeaveLoopCmps4: add edx, 4 +LeaveLoopCmps: test eax, 0000FFFFh + jnz LenLower + add edx, 2 + shr eax, 16 +LenLower: sub al, 1 + adc edx, 0 + +;;; Calculate the length of the match. If it is longer than MAX_MATCH, +;;; then automatically accept it as the best possible match and leave. + + lea eax, [edi + edx] + mov edi, [scan] + sub eax, edi + cmp eax, MAX_MATCH + jge LenMaximum + +;;; If the length of the match is not longer than the best match we +;;; have so far, then forget it and return to the lookup loop. + + mov edx, [deflatestate] + mov ebx, [bestlen] + cmp eax, ebx + jg LongerMatch + mov esi, [windowbestlen] + mov edi, [edx + dsPrev] + mov ebx, [scanend] + mov edx, [chainlenwmask] + jmp LookupLoop + +;;; s->match_start = cur_match; +;;; best_len = len; +;;; if (len >= nice_match) break; +;;; scan_end = *(ushf*)(scan+best_len-1); + +LongerMatch: mov ebx, [nicematch] + mov [bestlen], eax + mov [edx + dsMatchStart], ecx + cmp eax, ebx + jge LeaveNow + mov esi, [window] + add esi, eax + mov [windowbestlen], esi + movzx ebx, word ptr [edi + eax - 1] + mov edi, [edx + dsPrev] + mov [scanend], ebx + mov edx, [chainlenwmask] + jmp LookupLoop + +;;; Accept the current string, with the maximum possible length. + +LenMaximum: mov edx, [deflatestate] + mov dword ptr [bestlen], MAX_MATCH + mov [edx + dsMatchStart], ecx + +;;; if ((uInt)best_len <= s->lookahead) return (uInt)best_len; +;;; return s->lookahead; + +LeaveNow: + mov edx, [deflatestate] + mov ebx, [bestlen] + mov eax, [edx + dsLookahead] + cmp ebx, eax + jg LookaheadRet + mov eax, ebx +LookaheadRet: + +;;; Restore the stack and return from whence we came. + + add esp, LocalVarsSize + pop ebx + pop esi + pop edi + pop ebp + + ret +; please don't remove this string ! +; Your can freely use match686 in any free or commercial app if you don't remove the string in the binary! + db 0dh,0ah,"asm686 with masm, optimised assembly code from Brian Raiter, written 1998",0dh,0ah + + + IFDEF NOUNDERLINE + longest_match endp + ELSE + _longest_match endp + ENDIF + + IFDEF NOUNDERLINE + match_init proc near + ret + match_init endp + ELSE + _match_init proc near + ret + _match_init endp + ENDIF + + +_TEXT ends +end diff -Nru nodejs-0.11.13/deps/zlib/contrib/masmx86/readme.txt nodejs-0.11.15/deps/zlib/contrib/masmx86/readme.txt --- nodejs-0.11.13/deps/zlib/contrib/masmx86/readme.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/masmx86/readme.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,27 @@ + +Summary +------- +This directory contains ASM implementations of the functions +longest_match() and inflate_fast(). + + +Use instructions +---------------- +Assemble using MASM, and copy the object files into the zlib source +directory, then run the appropriate makefile, as suggested below. You can +donwload MASM from here: + + http://www.microsoft.com/downloads/details.aspx?displaylang=en&FamilyID=7a1c9da0-0510-44a2-b042-7ef370530c64 + +You can also get objects files here: + + http://www.winimage.com/zLibDll/zlib124_masm_obj.zip + +Build instructions +------------------ +* With Microsoft C and MASM: +nmake -f win32/Makefile.msc LOC="-DASMV -DASMINF" OBJA="match686.obj inffas32.obj" + +* With Borland C and TASM: +make -f win32/Makefile.bor LOCAL_ZLIB="-DASMV -DASMINF" OBJA="match686.obj inffas32.obj" OBJPA="+match686c.obj+match686.obj+inffas32.obj" + diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/configure.ac nodejs-0.11.15/deps/zlib/contrib/minizip/configure.ac --- nodejs-0.11.13/deps/zlib/contrib/minizip/configure.ac 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/configure.ac 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,32 @@ +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. + +AC_INIT([minizip], [1.2.8], [bugzilla.redhat.com]) +AC_CONFIG_SRCDIR([minizip.c]) +AM_INIT_AUTOMAKE([foreign]) +LT_INIT + +AC_MSG_CHECKING([whether to build example programs]) +AC_ARG_ENABLE([demos], AC_HELP_STRING([--enable-demos], [build example programs])) +AM_CONDITIONAL([COND_DEMOS], [test "$enable_demos" = yes]) +if test "$enable_demos" = yes +then + AC_MSG_RESULT([yes]) +else + AC_MSG_RESULT([no]) +fi + +case "${host}" in + *-mingw* | mingw*) + WIN32="yes" + ;; + *) + ;; +esac +AM_CONDITIONAL([WIN32], [test "${WIN32}" = "yes"]) + + +AC_SUBST([HAVE_UNISTD_H], [0]) +AC_CHECK_HEADER([unistd.h], [HAVE_UNISTD_H=1], []) +AC_CONFIG_FILES([Makefile minizip.pc]) +AC_OUTPUT diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/crypt.h nodejs-0.11.15/deps/zlib/contrib/minizip/crypt.h --- nodejs-0.11.13/deps/zlib/contrib/minizip/crypt.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/crypt.h 2015-01-20 21:22:17.000000000 +0000 @@ -32,7 +32,7 @@ /*********************************************************************** * Return the next byte in the pseudo-random sequence */ -static int decrypt_byte(unsigned long* pkeys, const unsigned long* pcrc_32_tab) +static int decrypt_byte(unsigned long* pkeys, const z_crc_t* pcrc_32_tab) { unsigned temp; /* POTENTIAL BUG: temp*(temp^1) may overflow in an * unpredictable manner on 16-bit systems; not a problem @@ -45,7 +45,7 @@ /*********************************************************************** * Update the encryption keys with the next byte of plain text */ -static int update_keys(unsigned long* pkeys,const unsigned long* pcrc_32_tab,int c) +static int update_keys(unsigned long* pkeys,const z_crc_t* pcrc_32_tab,int c) { (*(pkeys+0)) = CRC32((*(pkeys+0)), c); (*(pkeys+1)) += (*(pkeys+0)) & 0xff; @@ -62,7 +62,7 @@ * Initialize the encryption keys and the random header according to * the given password. */ -static void init_keys(const char* passwd,unsigned long* pkeys,const unsigned long* pcrc_32_tab) +static void init_keys(const char* passwd,unsigned long* pkeys,const z_crc_t* pcrc_32_tab) { *(pkeys+0) = 305419896L; *(pkeys+1) = 591751049L; @@ -87,13 +87,12 @@ # define ZCR_SEED2 3141592654UL /* use PI as default pattern */ # endif -static int crypthead(passwd, buf, bufSize, pkeys, pcrc_32_tab, crcForCrypting) - const char *passwd; /* password string */ - unsigned char *buf; /* where to write header */ - int bufSize; - unsigned long* pkeys; - const unsigned long* pcrc_32_tab; - unsigned long crcForCrypting; +static int crypthead(const char* passwd, /* password string */ + unsigned char* buf, /* where to write header */ + int bufSize, + unsigned long* pkeys, + const z_crc_t* pcrc_32_tab, + unsigned long crcForCrypting) { int n; /* index in random header */ int t; /* temporary */ @@ -124,8 +123,8 @@ { buf[n] = (unsigned char)zencode(pkeys, pcrc_32_tab, header[n], t); } - buf[n++] = zencode(pkeys, pcrc_32_tab, (int)(crcForCrypting >> 16) & 0xff, t); - buf[n++] = zencode(pkeys, pcrc_32_tab, (int)(crcForCrypting >> 24) & 0xff, t); + buf[n++] = (unsigned char)zencode(pkeys, pcrc_32_tab, (int)(crcForCrypting >> 16) & 0xff, t); + buf[n++] = (unsigned char)zencode(pkeys, pcrc_32_tab, (int)(crcForCrypting >> 24) & 0xff, t); return n; } diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/ioapi.c nodejs-0.11.15/deps/zlib/contrib/minizip/ioapi.c --- nodejs-0.11.13/deps/zlib/contrib/minizip/ioapi.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/ioapi.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,77 +1,98 @@ -/* ioapi.c -- IO base function header for compress/uncompress .zip - files using zlib + zip or unzip API +/* ioapi.h -- IO base function header for compress/uncompress .zip + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) - Version 1.01e, February 12th, 2005 + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt - Copyright (C) 1998-2005 Gilles Vollant */ -#include <stdio.h> -#include <stdlib.h> -#include <string.h> +#if defined(_WIN32) && (!(defined(_CRT_SECURE_NO_WARNINGS))) + #define _CRT_SECURE_NO_WARNINGS +#endif -#if defined(USE_SYSTEM_ZLIB) -#include <zlib.h> +#if defined(__APPLE__) || defined(IOAPI_NO_64) +// In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions +#define FOPEN_FUNC(filename, mode) fopen(filename, mode) +#define FTELLO_FUNC(stream) ftello(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko(stream, offset, origin) #else -#include "zlib.h" +#define FOPEN_FUNC(filename, mode) fopen64(filename, mode) +#define FTELLO_FUNC(stream) ftello64(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko64(stream, offset, origin) #endif + #include "ioapi.h" -/* I've found an old Unix (a SunOS 4.1.3_U1) without all SEEK_* defined.... */ +voidpf call_zopen64 (const zlib_filefunc64_32_def* pfilefunc,const void*filename,int mode) +{ + if (pfilefunc->zfile_func64.zopen64_file != NULL) + return (*(pfilefunc->zfile_func64.zopen64_file)) (pfilefunc->zfile_func64.opaque,filename,mode); + else + { + return (*(pfilefunc->zopen32_file))(pfilefunc->zfile_func64.opaque,(const char*)filename,mode); + } +} -#ifndef SEEK_CUR -#define SEEK_CUR 1 -#endif +long call_zseek64 (const zlib_filefunc64_32_def* pfilefunc,voidpf filestream, ZPOS64_T offset, int origin) +{ + if (pfilefunc->zfile_func64.zseek64_file != NULL) + return (*(pfilefunc->zfile_func64.zseek64_file)) (pfilefunc->zfile_func64.opaque,filestream,offset,origin); + else + { + uLong offsetTruncated = (uLong)offset; + if (offsetTruncated != offset) + return -1; + else + return (*(pfilefunc->zseek32_file))(pfilefunc->zfile_func64.opaque,filestream,offsetTruncated,origin); + } +} -#ifndef SEEK_END -#define SEEK_END 2 -#endif +ZPOS64_T call_ztell64 (const zlib_filefunc64_32_def* pfilefunc,voidpf filestream) +{ + if (pfilefunc->zfile_func64.zseek64_file != NULL) + return (*(pfilefunc->zfile_func64.ztell64_file)) (pfilefunc->zfile_func64.opaque,filestream); + else + { + uLong tell_uLong = (*(pfilefunc->ztell32_file))(pfilefunc->zfile_func64.opaque,filestream); + if ((tell_uLong) == MAXU32) + return (ZPOS64_T)-1; + else + return tell_uLong; + } +} + +void fill_zlib_filefunc64_32_def_from_filefunc32(zlib_filefunc64_32_def* p_filefunc64_32,const zlib_filefunc_def* p_filefunc32) +{ + p_filefunc64_32->zfile_func64.zopen64_file = NULL; + p_filefunc64_32->zopen32_file = p_filefunc32->zopen_file; + p_filefunc64_32->zfile_func64.zerror_file = p_filefunc32->zerror_file; + p_filefunc64_32->zfile_func64.zread_file = p_filefunc32->zread_file; + p_filefunc64_32->zfile_func64.zwrite_file = p_filefunc32->zwrite_file; + p_filefunc64_32->zfile_func64.ztell64_file = NULL; + p_filefunc64_32->zfile_func64.zseek64_file = NULL; + p_filefunc64_32->zfile_func64.zclose_file = p_filefunc32->zclose_file; + p_filefunc64_32->zfile_func64.zerror_file = p_filefunc32->zerror_file; + p_filefunc64_32->zfile_func64.opaque = p_filefunc32->opaque; + p_filefunc64_32->zseek32_file = p_filefunc32->zseek_file; + p_filefunc64_32->ztell32_file = p_filefunc32->ztell_file; +} -#ifndef SEEK_SET -#define SEEK_SET 0 -#endif -voidpf ZCALLBACK fopen_file_func OF(( - voidpf opaque, - const char* filename, - int mode)); - -uLong ZCALLBACK fread_file_func OF(( - voidpf opaque, - voidpf stream, - void* buf, - uLong size)); - -uLong ZCALLBACK fwrite_file_func OF(( - voidpf opaque, - voidpf stream, - const void* buf, - uLong size)); - -long ZCALLBACK ftell_file_func OF(( - voidpf opaque, - voidpf stream)); - -long ZCALLBACK fseek_file_func OF(( - voidpf opaque, - voidpf stream, - uLong offset, - int origin)); - -int ZCALLBACK fclose_file_func OF(( - voidpf opaque, - voidpf stream)); - -int ZCALLBACK ferror_file_func OF(( - voidpf opaque, - voidpf stream)); - - -voidpf ZCALLBACK fopen_file_func (opaque, filename, mode) - voidpf opaque; - const char* filename; - int mode; + +static voidpf ZCALLBACK fopen_file_func OF((voidpf opaque, const char* filename, int mode)); +static uLong ZCALLBACK fread_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size)); +static uLong ZCALLBACK fwrite_file_func OF((voidpf opaque, voidpf stream, const void* buf,uLong size)); +static ZPOS64_T ZCALLBACK ftell64_file_func OF((voidpf opaque, voidpf stream)); +static long ZCALLBACK fseek64_file_func OF((voidpf opaque, voidpf stream, ZPOS64_T offset, int origin)); +static int ZCALLBACK fclose_file_func OF((voidpf opaque, voidpf stream)); +static int ZCALLBACK ferror_file_func OF((voidpf opaque, voidpf stream)); + +static voidpf ZCALLBACK fopen_file_func (voidpf opaque, const char* filename, int mode) { FILE* file = NULL; const char* mode_fopen = NULL; @@ -89,44 +110,78 @@ return file; } +static voidpf ZCALLBACK fopen64_file_func (voidpf opaque, const void* filename, int mode) +{ + FILE* file = NULL; + const char* mode_fopen = NULL; + if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER)==ZLIB_FILEFUNC_MODE_READ) + mode_fopen = "rb"; + else + if (mode & ZLIB_FILEFUNC_MODE_EXISTING) + mode_fopen = "r+b"; + else + if (mode & ZLIB_FILEFUNC_MODE_CREATE) + mode_fopen = "wb"; + + if ((filename!=NULL) && (mode_fopen != NULL)) + file = FOPEN_FUNC((const char*)filename, mode_fopen); + return file; +} + -uLong ZCALLBACK fread_file_func (opaque, stream, buf, size) - voidpf opaque; - voidpf stream; - void* buf; - uLong size; +static uLong ZCALLBACK fread_file_func (voidpf opaque, voidpf stream, void* buf, uLong size) { uLong ret; ret = (uLong)fread(buf, 1, (size_t)size, (FILE *)stream); return ret; } - -uLong ZCALLBACK fwrite_file_func (opaque, stream, buf, size) - voidpf opaque; - voidpf stream; - const void* buf; - uLong size; +static uLong ZCALLBACK fwrite_file_func (voidpf opaque, voidpf stream, const void* buf, uLong size) { uLong ret; ret = (uLong)fwrite(buf, 1, (size_t)size, (FILE *)stream); return ret; } -long ZCALLBACK ftell_file_func (opaque, stream) - voidpf opaque; - voidpf stream; +static long ZCALLBACK ftell_file_func (voidpf opaque, voidpf stream) { long ret; ret = ftell((FILE *)stream); return ret; } -long ZCALLBACK fseek_file_func (opaque, stream, offset, origin) - voidpf opaque; - voidpf stream; - uLong offset; - int origin; + +static ZPOS64_T ZCALLBACK ftell64_file_func (voidpf opaque, voidpf stream) +{ + ZPOS64_T ret; + ret = FTELLO_FUNC((FILE *)stream); + return ret; +} + +static long ZCALLBACK fseek_file_func (voidpf opaque, voidpf stream, uLong offset, int origin) +{ + int fseek_origin=0; + long ret; + switch (origin) + { + case ZLIB_FILEFUNC_SEEK_CUR : + fseek_origin = SEEK_CUR; + break; + case ZLIB_FILEFUNC_SEEK_END : + fseek_origin = SEEK_END; + break; + case ZLIB_FILEFUNC_SEEK_SET : + fseek_origin = SEEK_SET; + break; + default: return -1; + } + ret = 0; + if (fseek((FILE *)stream, offset, fseek_origin) != 0) + ret = -1; + return ret; +} + +static long ZCALLBACK fseek64_file_func (voidpf opaque, voidpf stream, ZPOS64_T offset, int origin) { int fseek_origin=0; long ret; @@ -144,22 +199,22 @@ default: return -1; } ret = 0; - fseek((FILE *)stream, offset, fseek_origin); + + if(FSEEKO_FUNC((FILE *)stream, offset, fseek_origin) != 0) + ret = -1; + return ret; } -int ZCALLBACK fclose_file_func (opaque, stream) - voidpf opaque; - voidpf stream; + +static int ZCALLBACK fclose_file_func (voidpf opaque, voidpf stream) { int ret; ret = fclose((FILE *)stream); return ret; } -int ZCALLBACK ferror_file_func (opaque, stream) - voidpf opaque; - voidpf stream; +static int ZCALLBACK ferror_file_func (voidpf opaque, voidpf stream) { int ret; ret = ferror((FILE *)stream); @@ -177,4 +232,16 @@ pzlib_filefunc_def->zclose_file = fclose_file_func; pzlib_filefunc_def->zerror_file = ferror_file_func; pzlib_filefunc_def->opaque = NULL; +} + +void fill_fopen64_filefunc (zlib_filefunc64_def* pzlib_filefunc_def) +{ + pzlib_filefunc_def->zopen64_file = fopen64_file_func; + pzlib_filefunc_def->zread_file = fread_file_func; + pzlib_filefunc_def->zwrite_file = fwrite_file_func; + pzlib_filefunc_def->ztell64_file = ftell64_file_func; + pzlib_filefunc_def->zseek64_file = fseek64_file_func; + pzlib_filefunc_def->zclose_file = fclose_file_func; + pzlib_filefunc_def->zerror_file = ferror_file_func; + pzlib_filefunc_def->opaque = NULL; } diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/ioapi.h nodejs-0.11.15/deps/zlib/contrib/minizip/ioapi.h --- nodejs-0.11.13/deps/zlib/contrib/minizip/ioapi.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/ioapi.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,13 +1,112 @@ /* ioapi.h -- IO base function header for compress/uncompress .zip - files using zlib + zip or unzip API + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) - Version 1.01e, February 12th, 2005 + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + + Changes + + Oct-2009 - Defined ZPOS64_T to fpos_t on windows and u_int64_t on linux. (might need to find a better why for this) + Oct-2009 - Change to fseeko64, ftello64 and fopen64 so large files would work on linux. + More if/def section may be needed to support other platforms + Oct-2009 - Defined fxxxx64 calls to normal fopen/ftell/fseek so they would compile on windows. + (but you should use iowin32.c for windows instead) - Copyright (C) 1998-2005 Gilles Vollant */ -#ifndef _ZLIBIOAPI_H -#define _ZLIBIOAPI_H +#ifndef _ZLIBIOAPI64_H +#define _ZLIBIOAPI64_H + +#if (!defined(_WIN32)) && (!defined(WIN32)) && (!defined(__APPLE__)) + + // Linux needs this to support file operation on files larger then 4+GB + // But might need better if/def to select just the platforms that needs them. + + #ifndef __USE_FILE_OFFSET64 + #define __USE_FILE_OFFSET64 + #endif + #ifndef __USE_LARGEFILE64 + #define __USE_LARGEFILE64 + #endif + #ifndef _LARGEFILE64_SOURCE + #define _LARGEFILE64_SOURCE + #endif + #ifndef _FILE_OFFSET_BIT + #define _FILE_OFFSET_BIT 64 + #endif + +#endif + +#include <stdio.h> +#include <stdlib.h> +#include "zlib.h" + +#if defined(USE_FILE32API) +#define fopen64 fopen +#define ftello64 ftell +#define fseeko64 fseek +#else +#ifdef __FreeBSD__ +#define fopen64 fopen +#define ftello64 ftello +#define fseeko64 fseeko +#endif +#ifdef _MSC_VER + #define fopen64 fopen + #if (_MSC_VER >= 1400) && (!(defined(NO_MSCVER_FILE64_FUNC))) + #define ftello64 _ftelli64 + #define fseeko64 _fseeki64 + #else // old MSC + #define ftello64 ftell + #define fseeko64 fseek + #endif +#endif +#endif + +/* +#ifndef ZPOS64_T + #ifdef _WIN32 + #define ZPOS64_T fpos_t + #else + #include <stdint.h> + #define ZPOS64_T uint64_t + #endif +#endif +*/ + +#ifdef HAVE_MINIZIP64_CONF_H +#include "mz64conf.h" +#endif + +/* a type choosen by DEFINE */ +#ifdef HAVE_64BIT_INT_CUSTOM +typedef 64BIT_INT_CUSTOM_TYPE ZPOS64_T; +#else +#ifdef HAS_STDINT_H +#include "stdint.h" +typedef uint64_t ZPOS64_T; +#else + +/* Maximum unsigned 32-bit value used as placeholder for zip64 */ +#define MAXU32 0xffffffff + +#if defined(_MSC_VER) || defined(__BORLANDC__) +typedef unsigned __int64 ZPOS64_T; +#else +typedef unsigned long long int ZPOS64_T; +#endif +#endif +#endif + + + +#ifdef __cplusplus +extern "C" { +#endif #define ZLIB_FILEFUNC_SEEK_CUR (1) @@ -23,26 +122,27 @@ #ifndef ZCALLBACK - -#if (defined(WIN32) || defined (WINDOWS) || defined (_WINDOWS)) && defined(CALLBACK) && defined (USEWINDOWS_CALLBACK) -#define ZCALLBACK CALLBACK -#else -#define ZCALLBACK -#endif + #if (defined(WIN32) || defined(_WIN32) || defined (WINDOWS) || defined (_WINDOWS)) && defined(CALLBACK) && defined (USEWINDOWS_CALLBACK) + #define ZCALLBACK CALLBACK + #else + #define ZCALLBACK + #endif #endif -#ifdef __cplusplus -extern "C" { -#endif -typedef voidpf (ZCALLBACK *open_file_func) OF((voidpf opaque, const char* filename, int mode)); -typedef uLong (ZCALLBACK *read_file_func) OF((voidpf opaque, voidpf stream, void* buf, uLong size)); -typedef uLong (ZCALLBACK *write_file_func) OF((voidpf opaque, voidpf stream, const void* buf, uLong size)); -typedef long (ZCALLBACK *tell_file_func) OF((voidpf opaque, voidpf stream)); -typedef long (ZCALLBACK *seek_file_func) OF((voidpf opaque, voidpf stream, uLong offset, int origin)); -typedef int (ZCALLBACK *close_file_func) OF((voidpf opaque, voidpf stream)); -typedef int (ZCALLBACK *testerror_file_func) OF((voidpf opaque, voidpf stream)); + +typedef voidpf (ZCALLBACK *open_file_func) OF((voidpf opaque, const char* filename, int mode)); +typedef uLong (ZCALLBACK *read_file_func) OF((voidpf opaque, voidpf stream, void* buf, uLong size)); +typedef uLong (ZCALLBACK *write_file_func) OF((voidpf opaque, voidpf stream, const void* buf, uLong size)); +typedef int (ZCALLBACK *close_file_func) OF((voidpf opaque, voidpf stream)); +typedef int (ZCALLBACK *testerror_file_func) OF((voidpf opaque, voidpf stream)); + +typedef long (ZCALLBACK *tell_file_func) OF((voidpf opaque, voidpf stream)); +typedef long (ZCALLBACK *seek_file_func) OF((voidpf opaque, voidpf stream, uLong offset, int origin)); + + +/* here is the "old" 32 bits structure structure */ typedef struct zlib_filefunc_def_s { open_file_func zopen_file; @@ -55,21 +155,54 @@ voidpf opaque; } zlib_filefunc_def; +typedef ZPOS64_T (ZCALLBACK *tell64_file_func) OF((voidpf opaque, voidpf stream)); +typedef long (ZCALLBACK *seek64_file_func) OF((voidpf opaque, voidpf stream, ZPOS64_T offset, int origin)); +typedef voidpf (ZCALLBACK *open64_file_func) OF((voidpf opaque, const void* filename, int mode)); +typedef struct zlib_filefunc64_def_s +{ + open64_file_func zopen64_file; + read_file_func zread_file; + write_file_func zwrite_file; + tell64_file_func ztell64_file; + seek64_file_func zseek64_file; + close_file_func zclose_file; + testerror_file_func zerror_file; + voidpf opaque; +} zlib_filefunc64_def; +void fill_fopen64_filefunc OF((zlib_filefunc64_def* pzlib_filefunc_def)); void fill_fopen_filefunc OF((zlib_filefunc_def* pzlib_filefunc_def)); -#define ZREAD(filefunc,filestream,buf,size) ((*((filefunc).zread_file))((filefunc).opaque,filestream,buf,size)) -#define ZWRITE(filefunc,filestream,buf,size) ((*((filefunc).zwrite_file))((filefunc).opaque,filestream,buf,size)) -#define ZTELL(filefunc,filestream) ((*((filefunc).ztell_file))((filefunc).opaque,filestream)) -#define ZSEEK(filefunc,filestream,pos,mode) ((*((filefunc).zseek_file))((filefunc).opaque,filestream,pos,mode)) -#define ZCLOSE(filefunc,filestream) ((*((filefunc).zclose_file))((filefunc).opaque,filestream)) -#define ZERROR(filefunc,filestream) ((*((filefunc).zerror_file))((filefunc).opaque,filestream)) - +/* now internal definition, only for zip.c and unzip.h */ +typedef struct zlib_filefunc64_32_def_s +{ + zlib_filefunc64_def zfile_func64; + open_file_func zopen32_file; + tell_file_func ztell32_file; + seek_file_func zseek32_file; +} zlib_filefunc64_32_def; + + +#define ZREAD64(filefunc,filestream,buf,size) ((*((filefunc).zfile_func64.zread_file)) ((filefunc).zfile_func64.opaque,filestream,buf,size)) +#define ZWRITE64(filefunc,filestream,buf,size) ((*((filefunc).zfile_func64.zwrite_file)) ((filefunc).zfile_func64.opaque,filestream,buf,size)) +//#define ZTELL64(filefunc,filestream) ((*((filefunc).ztell64_file)) ((filefunc).opaque,filestream)) +//#define ZSEEK64(filefunc,filestream,pos,mode) ((*((filefunc).zseek64_file)) ((filefunc).opaque,filestream,pos,mode)) +#define ZCLOSE64(filefunc,filestream) ((*((filefunc).zfile_func64.zclose_file)) ((filefunc).zfile_func64.opaque,filestream)) +#define ZERROR64(filefunc,filestream) ((*((filefunc).zfile_func64.zerror_file)) ((filefunc).zfile_func64.opaque,filestream)) + +voidpf call_zopen64 OF((const zlib_filefunc64_32_def* pfilefunc,const void*filename,int mode)); +long call_zseek64 OF((const zlib_filefunc64_32_def* pfilefunc,voidpf filestream, ZPOS64_T offset, int origin)); +ZPOS64_T call_ztell64 OF((const zlib_filefunc64_32_def* pfilefunc,voidpf filestream)); + +void fill_zlib_filefunc64_32_def_from_filefunc32(zlib_filefunc64_32_def* p_filefunc64_32,const zlib_filefunc_def* p_filefunc32); + +#define ZOPEN64(filefunc,filename,mode) (call_zopen64((&(filefunc)),(filename),(mode))) +#define ZTELL64(filefunc,filestream) (call_ztell64((&(filefunc)),(filestream))) +#define ZSEEK64(filefunc,filestream,pos,mode) (call_zseek64((&(filefunc)),(filestream),(pos),(mode))) #ifdef __cplusplus } #endif #endif - diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/iowin32.c nodejs-0.11.15/deps/zlib/contrib/minizip/iowin32.c --- nodejs-0.11.13/deps/zlib/contrib/minizip/iowin32.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/iowin32.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,14 @@ /* iowin32.c -- IO base function header for compress/uncompress .zip - files using zlib + zip or unzip API - This IO API version uses the Win32 API (for Microsoft Windows) + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) - Version 1.01e, February 12th, 2005 + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt - Copyright (C) 1998-2005 Gilles Vollant */ #include <stdlib.h> @@ -21,40 +25,20 @@ #define INVALID_SET_FILE_POINTER ((DWORD)-1) #endif -voidpf ZCALLBACK win32_open_file_func OF(( - voidpf opaque, - const char* filename, - int mode)); - -uLong ZCALLBACK win32_read_file_func OF(( - voidpf opaque, - voidpf stream, - void* buf, - uLong size)); - -uLong ZCALLBACK win32_write_file_func OF(( - voidpf opaque, - voidpf stream, - const void* buf, - uLong size)); - -long ZCALLBACK win32_tell_file_func OF(( - voidpf opaque, - voidpf stream)); - -long ZCALLBACK win32_seek_file_func OF(( - voidpf opaque, - voidpf stream, - uLong offset, - int origin)); - -int ZCALLBACK win32_close_file_func OF(( - voidpf opaque, - voidpf stream)); - -int ZCALLBACK win32_error_file_func OF(( - voidpf opaque, - voidpf stream)); + +#if defined(WINAPI_FAMILY_PARTITION) && (!(defined(IOWIN32_USING_WINRT_API))) +#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) +#define IOWIN32_USING_WINRT_API 1 +#endif +#endif + +voidpf ZCALLBACK win32_open_file_func OF((voidpf opaque, const char* filename, int mode)); +uLong ZCALLBACK win32_read_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size)); +uLong ZCALLBACK win32_write_file_func OF((voidpf opaque, voidpf stream, const void* buf, uLong size)); +ZPOS64_T ZCALLBACK win32_tell64_file_func OF((voidpf opaque, voidpf stream)); +long ZCALLBACK win32_seek64_file_func OF((voidpf opaque, voidpf stream, ZPOS64_T offset, int origin)); +int ZCALLBACK win32_close_file_func OF((voidpf opaque, voidpf stream)); +int ZCALLBACK win32_error_file_func OF((voidpf opaque, voidpf stream)); typedef struct { @@ -62,69 +46,163 @@ int error; } WIN32FILE_IOWIN; -voidpf ZCALLBACK win32_open_file_func (opaque, filename, mode) - voidpf opaque; - const char* filename; - int mode; -{ - const char* mode_fopen = NULL; - DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; - HANDLE hFile = 0; - voidpf ret=NULL; - dwDesiredAccess = dwShareMode = dwFlagsAndAttributes = 0; +static void win32_translate_open_mode(int mode, + DWORD* lpdwDesiredAccess, + DWORD* lpdwCreationDisposition, + DWORD* lpdwShareMode, + DWORD* lpdwFlagsAndAttributes) +{ + *lpdwDesiredAccess = *lpdwShareMode = *lpdwFlagsAndAttributes = *lpdwCreationDisposition = 0; if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER)==ZLIB_FILEFUNC_MODE_READ) { - dwDesiredAccess = GENERIC_READ; - dwCreationDisposition = OPEN_EXISTING; - dwShareMode = FILE_SHARE_READ; + *lpdwDesiredAccess = GENERIC_READ; + *lpdwCreationDisposition = OPEN_EXISTING; + *lpdwShareMode = FILE_SHARE_READ; } - else - if (mode & ZLIB_FILEFUNC_MODE_EXISTING) + else if (mode & ZLIB_FILEFUNC_MODE_EXISTING) { - dwDesiredAccess = GENERIC_WRITE | GENERIC_READ; - dwCreationDisposition = OPEN_EXISTING; + *lpdwDesiredAccess = GENERIC_WRITE | GENERIC_READ; + *lpdwCreationDisposition = OPEN_EXISTING; } - else - if (mode & ZLIB_FILEFUNC_MODE_CREATE) + else if (mode & ZLIB_FILEFUNC_MODE_CREATE) { - dwDesiredAccess = GENERIC_WRITE | GENERIC_READ; - dwCreationDisposition = CREATE_ALWAYS; + *lpdwDesiredAccess = GENERIC_WRITE | GENERIC_READ; + *lpdwCreationDisposition = CREATE_ALWAYS; } +} - if ((filename!=NULL) && (dwDesiredAccess != 0)) - hFile = CreateFile((LPCTSTR)filename, dwDesiredAccess, dwShareMode, NULL, - dwCreationDisposition, dwFlagsAndAttributes, NULL); - - if (hFile == INVALID_HANDLE_VALUE) - hFile = NULL; +static voidpf win32_build_iowin(HANDLE hFile) +{ + voidpf ret=NULL; - if (hFile != NULL) + if ((hFile != NULL) && (hFile != INVALID_HANDLE_VALUE)) { WIN32FILE_IOWIN w32fiow; w32fiow.hf = hFile; w32fiow.error = 0; ret = malloc(sizeof(WIN32FILE_IOWIN)); + if (ret==NULL) CloseHandle(hFile); - else *((WIN32FILE_IOWIN*)ret) = w32fiow; + else + *((WIN32FILE_IOWIN*)ret) = w32fiow; } return ret; } +voidpf ZCALLBACK win32_open64_file_func (voidpf opaque,const void* filename,int mode) +{ + const char* mode_fopen = NULL; + DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; + HANDLE hFile = NULL; + + win32_translate_open_mode(mode,&dwDesiredAccess,&dwCreationDisposition,&dwShareMode,&dwFlagsAndAttributes); + +#ifdef IOWIN32_USING_WINRT_API +#ifdef UNICODE + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFile2((LPCTSTR)filename, dwDesiredAccess, dwShareMode, dwCreationDisposition, NULL); +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + { + WCHAR filenameW[FILENAME_MAX + 0x200 + 1]; + MultiByteToWideChar(CP_ACP,0,(const char*)filename,-1,filenameW,FILENAME_MAX + 0x200); + hFile = CreateFile2(filenameW, dwDesiredAccess, dwShareMode, dwCreationDisposition, NULL); + } +#endif +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFile((LPCTSTR)filename, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL); +#endif + + return win32_build_iowin(hFile); +} + + +voidpf ZCALLBACK win32_open64_file_funcA (voidpf opaque,const void* filename,int mode) +{ + const char* mode_fopen = NULL; + DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; + HANDLE hFile = NULL; + + win32_translate_open_mode(mode,&dwDesiredAccess,&dwCreationDisposition,&dwShareMode,&dwFlagsAndAttributes); + +#ifdef IOWIN32_USING_WINRT_API + if ((filename!=NULL) && (dwDesiredAccess != 0)) + { + WCHAR filenameW[FILENAME_MAX + 0x200 + 1]; + MultiByteToWideChar(CP_ACP,0,(const char*)filename,-1,filenameW,FILENAME_MAX + 0x200); + hFile = CreateFile2(filenameW, dwDesiredAccess, dwShareMode, dwCreationDisposition, NULL); + } +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFileA((LPCSTR)filename, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL); +#endif + + return win32_build_iowin(hFile); +} + -uLong ZCALLBACK win32_read_file_func (opaque, stream, buf, size) - voidpf opaque; - voidpf stream; - void* buf; - uLong size; +voidpf ZCALLBACK win32_open64_file_funcW (voidpf opaque,const void* filename,int mode) +{ + const char* mode_fopen = NULL; + DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; + HANDLE hFile = NULL; + + win32_translate_open_mode(mode,&dwDesiredAccess,&dwCreationDisposition,&dwShareMode,&dwFlagsAndAttributes); + +#ifdef IOWIN32_USING_WINRT_API + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFile2((LPCWSTR)filename, dwDesiredAccess, dwShareMode, dwCreationDisposition,NULL); +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFileW((LPCWSTR)filename, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL); +#endif + + return win32_build_iowin(hFile); +} + + +voidpf ZCALLBACK win32_open_file_func (voidpf opaque,const char* filename,int mode) +{ + const char* mode_fopen = NULL; + DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; + HANDLE hFile = NULL; + + win32_translate_open_mode(mode,&dwDesiredAccess,&dwCreationDisposition,&dwShareMode,&dwFlagsAndAttributes); + +#ifdef IOWIN32_USING_WINRT_API +#ifdef UNICODE + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFile2((LPCTSTR)filename, dwDesiredAccess, dwShareMode, dwCreationDisposition, NULL); +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + { + WCHAR filenameW[FILENAME_MAX + 0x200 + 1]; + MultiByteToWideChar(CP_ACP,0,(const char*)filename,-1,filenameW,FILENAME_MAX + 0x200); + hFile = CreateFile2(filenameW, dwDesiredAccess, dwShareMode, dwCreationDisposition, NULL); + } +#endif +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFile((LPCTSTR)filename, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL); +#endif + + return win32_build_iowin(hFile); +} + + +uLong ZCALLBACK win32_read_file_func (voidpf opaque, voidpf stream, void* buf,uLong size) { uLong ret=0; HANDLE hFile = NULL; if (stream!=NULL) hFile = ((WIN32FILE_IOWIN*)stream) -> hf; + if (hFile != NULL) + { if (!ReadFile(hFile, buf, size, &ret, NULL)) { DWORD dwErr = GetLastError(); @@ -132,23 +210,21 @@ dwErr = 0; ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; } + } return ret; } -uLong ZCALLBACK win32_write_file_func (opaque, stream, buf, size) - voidpf opaque; - voidpf stream; - const void* buf; - uLong size; +uLong ZCALLBACK win32_write_file_func (voidpf opaque,voidpf stream,const void* buf,uLong size) { uLong ret=0; HANDLE hFile = NULL; if (stream!=NULL) hFile = ((WIN32FILE_IOWIN*)stream) -> hf; - if (hFile !=NULL) + if (hFile != NULL) + { if (!WriteFile(hFile, buf, size, &ret, NULL)) { DWORD dwErr = GetLastError(); @@ -156,13 +232,32 @@ dwErr = 0; ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; } + } return ret; } -long ZCALLBACK win32_tell_file_func (opaque, stream) - voidpf opaque; - voidpf stream; +static BOOL MySetFilePointerEx(HANDLE hFile, LARGE_INTEGER pos, LARGE_INTEGER *newPos, DWORD dwMoveMethod) +{ +#ifdef IOWIN32_USING_WINRT_API + return SetFilePointerEx(hFile, pos, newPos, dwMoveMethod); +#else + LONG lHigh = pos.HighPart; + DWORD dwNewPos = SetFilePointer(hFile, pos.LowPart, &lHigh, FILE_CURRENT); + BOOL fOk = TRUE; + if (dwNewPos == 0xFFFFFFFF) + if (GetLastError() != NO_ERROR) + fOk = FALSE; + if ((newPos != NULL) && (fOk)) + { + newPos->LowPart = dwNewPos; + newPos->HighPart = lHigh; + } + return fOk; +#endif +} + +long ZCALLBACK win32_tell_file_func (voidpf opaque,voidpf stream) { long ret=-1; HANDLE hFile = NULL; @@ -170,24 +265,47 @@ hFile = ((WIN32FILE_IOWIN*)stream) -> hf; if (hFile != NULL) { - DWORD dwSet = SetFilePointer(hFile, 0, NULL, FILE_CURRENT); - if (dwSet == INVALID_SET_FILE_POINTER) + LARGE_INTEGER pos; + pos.QuadPart = 0; + + if (!MySetFilePointerEx(hFile, pos, &pos, FILE_CURRENT)) { DWORD dwErr = GetLastError(); ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; ret = -1; } else - ret=(long)dwSet; + ret=(long)pos.LowPart; } return ret; } -long ZCALLBACK win32_seek_file_func (opaque, stream, offset, origin) - voidpf opaque; - voidpf stream; - uLong offset; - int origin; +ZPOS64_T ZCALLBACK win32_tell64_file_func (voidpf opaque, voidpf stream) +{ + ZPOS64_T ret= (ZPOS64_T)-1; + HANDLE hFile = NULL; + if (stream!=NULL) + hFile = ((WIN32FILE_IOWIN*)stream)->hf; + + if (hFile) + { + LARGE_INTEGER pos; + pos.QuadPart = 0; + + if (!MySetFilePointerEx(hFile, pos, &pos, FILE_CURRENT)) + { + DWORD dwErr = GetLastError(); + ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; + ret = (ZPOS64_T)-1; + } + else + ret=pos.QuadPart; + } + return ret; +} + + +long ZCALLBACK win32_seek_file_func (voidpf opaque,voidpf stream,uLong offset,int origin) { DWORD dwMoveMethod=0xFFFFFFFF; HANDLE hFile = NULL; @@ -211,8 +329,9 @@ if (hFile != NULL) { - DWORD dwSet = SetFilePointer(hFile, offset, NULL, dwMoveMethod); - if (dwSet == INVALID_SET_FILE_POINTER) + LARGE_INTEGER pos; + pos.QuadPart = offset; + if (!MySetFilePointerEx(hFile, pos, NULL, dwMoveMethod)) { DWORD dwErr = GetLastError(); ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; @@ -224,9 +343,46 @@ return ret; } -int ZCALLBACK win32_close_file_func (opaque, stream) - voidpf opaque; - voidpf stream; +long ZCALLBACK win32_seek64_file_func (voidpf opaque, voidpf stream,ZPOS64_T offset,int origin) +{ + DWORD dwMoveMethod=0xFFFFFFFF; + HANDLE hFile = NULL; + long ret=-1; + + if (stream!=NULL) + hFile = ((WIN32FILE_IOWIN*)stream)->hf; + + switch (origin) + { + case ZLIB_FILEFUNC_SEEK_CUR : + dwMoveMethod = FILE_CURRENT; + break; + case ZLIB_FILEFUNC_SEEK_END : + dwMoveMethod = FILE_END; + break; + case ZLIB_FILEFUNC_SEEK_SET : + dwMoveMethod = FILE_BEGIN; + break; + default: return -1; + } + + if (hFile) + { + LARGE_INTEGER pos; + pos.QuadPart = offset; + if (!MySetFilePointerEx(hFile, pos, NULL, FILE_CURRENT)) + { + DWORD dwErr = GetLastError(); + ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; + ret = -1; + } + else + ret=0; + } + return ret; +} + +int ZCALLBACK win32_close_file_func (voidpf opaque, voidpf stream) { int ret=-1; @@ -244,9 +400,7 @@ return ret; } -int ZCALLBACK win32_error_file_func (opaque, stream) - voidpf opaque; - voidpf stream; +int ZCALLBACK win32_error_file_func (voidpf opaque,voidpf stream) { int ret=-1; if (stream!=NULL) @@ -256,8 +410,7 @@ return ret; } -void fill_win32_filefunc (pzlib_filefunc_def) - zlib_filefunc_def* pzlib_filefunc_def; +void fill_win32_filefunc (zlib_filefunc_def* pzlib_filefunc_def) { pzlib_filefunc_def->zopen_file = win32_open_file_func; pzlib_filefunc_def->zread_file = win32_read_file_func; @@ -266,5 +419,43 @@ pzlib_filefunc_def->zseek_file = win32_seek_file_func; pzlib_filefunc_def->zclose_file = win32_close_file_func; pzlib_filefunc_def->zerror_file = win32_error_file_func; - pzlib_filefunc_def->opaque=NULL; + pzlib_filefunc_def->opaque = NULL; +} + +void fill_win32_filefunc64(zlib_filefunc64_def* pzlib_filefunc_def) +{ + pzlib_filefunc_def->zopen64_file = win32_open64_file_func; + pzlib_filefunc_def->zread_file = win32_read_file_func; + pzlib_filefunc_def->zwrite_file = win32_write_file_func; + pzlib_filefunc_def->ztell64_file = win32_tell64_file_func; + pzlib_filefunc_def->zseek64_file = win32_seek64_file_func; + pzlib_filefunc_def->zclose_file = win32_close_file_func; + pzlib_filefunc_def->zerror_file = win32_error_file_func; + pzlib_filefunc_def->opaque = NULL; +} + + +void fill_win32_filefunc64A(zlib_filefunc64_def* pzlib_filefunc_def) +{ + pzlib_filefunc_def->zopen64_file = win32_open64_file_funcA; + pzlib_filefunc_def->zread_file = win32_read_file_func; + pzlib_filefunc_def->zwrite_file = win32_write_file_func; + pzlib_filefunc_def->ztell64_file = win32_tell64_file_func; + pzlib_filefunc_def->zseek64_file = win32_seek64_file_func; + pzlib_filefunc_def->zclose_file = win32_close_file_func; + pzlib_filefunc_def->zerror_file = win32_error_file_func; + pzlib_filefunc_def->opaque = NULL; +} + + +void fill_win32_filefunc64W(zlib_filefunc64_def* pzlib_filefunc_def) +{ + pzlib_filefunc_def->zopen64_file = win32_open64_file_funcW; + pzlib_filefunc_def->zread_file = win32_read_file_func; + pzlib_filefunc_def->zwrite_file = win32_write_file_func; + pzlib_filefunc_def->ztell64_file = win32_tell64_file_func; + pzlib_filefunc_def->zseek64_file = win32_seek64_file_func; + pzlib_filefunc_def->zclose_file = win32_close_file_func; + pzlib_filefunc_def->zerror_file = win32_error_file_func; + pzlib_filefunc_def->opaque = NULL; } diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/iowin32.h nodejs-0.11.15/deps/zlib/contrib/minizip/iowin32.h --- nodejs-0.11.13/deps/zlib/contrib/minizip/iowin32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/iowin32.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,14 @@ /* iowin32.h -- IO base function header for compress/uncompress .zip - files using zlib + zip or unzip API - This IO API version uses the Win32 API (for Microsoft Windows) + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) - Version 1.01e, February 12th, 2005 + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt - Copyright (C) 1998-2005 Gilles Vollant */ #include <windows.h> @@ -15,6 +19,9 @@ #endif void fill_win32_filefunc OF((zlib_filefunc_def* pzlib_filefunc_def)); +void fill_win32_filefunc64 OF((zlib_filefunc64_def* pzlib_filefunc_def)); +void fill_win32_filefunc64A OF((zlib_filefunc64_def* pzlib_filefunc_def)); +void fill_win32_filefunc64W OF((zlib_filefunc64_def* pzlib_filefunc_def)); #ifdef __cplusplus } diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/Makefile.am nodejs-0.11.15/deps/zlib/contrib/minizip/Makefile.am --- nodejs-0.11.13/deps/zlib/contrib/minizip/Makefile.am 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/Makefile.am 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,45 @@ +lib_LTLIBRARIES = libminizip.la + +if COND_DEMOS +bin_PROGRAMS = miniunzip minizip +endif + +zlib_top_srcdir = $(top_srcdir)/../.. +zlib_top_builddir = $(top_builddir)/../.. + +AM_CPPFLAGS = -I$(zlib_top_srcdir) +AM_LDFLAGS = -L$(zlib_top_builddir) + +if WIN32 +iowin32_src = iowin32.c +iowin32_h = iowin32.h +endif + +libminizip_la_SOURCES = \ + ioapi.c \ + mztools.c \ + unzip.c \ + zip.c \ + ${iowin32_src} + +libminizip_la_LDFLAGS = $(AM_LDFLAGS) -version-info 1:0:0 -lz + +minizip_includedir = $(includedir)/minizip +minizip_include_HEADERS = \ + crypt.h \ + ioapi.h \ + mztools.h \ + unzip.h \ + zip.h \ + ${iowin32_h} + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = minizip.pc + +EXTRA_PROGRAMS = miniunzip minizip + +miniunzip_SOURCES = miniunz.c +miniunzip_LDADD = libminizip.la + +minizip_SOURCES = minizip.c +minizip_LDADD = libminizip.la -lz diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/make_vms.com nodejs-0.11.15/deps/zlib/contrib/minizip/make_vms.com --- nodejs-0.11.13/deps/zlib/contrib/minizip/make_vms.com 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/make_vms.com 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,25 @@ +$ if f$search("ioapi.h_orig") .eqs. "" then copy ioapi.h ioapi.h_orig +$ open/write zdef vmsdefs.h +$ copy sys$input: zdef +$ deck +#define unix +#define fill_zlib_filefunc64_32_def_from_filefunc32 fillzffunc64from +#define Write_Zip64EndOfCentralDirectoryLocator Write_Zip64EoDLocator +#define Write_Zip64EndOfCentralDirectoryRecord Write_Zip64EoDRecord +#define Write_EndOfCentralDirectoryRecord Write_EoDRecord +$ eod +$ close zdef +$ copy vmsdefs.h,ioapi.h_orig ioapi.h +$ cc/include=[--]/prefix=all ioapi.c +$ cc/include=[--]/prefix=all miniunz.c +$ cc/include=[--]/prefix=all unzip.c +$ cc/include=[--]/prefix=all minizip.c +$ cc/include=[--]/prefix=all zip.c +$ link miniunz,unzip,ioapi,[--]libz.olb/lib +$ link minizip,zip,ioapi,[--]libz.olb/lib +$ mcr []minizip test minizip_info.txt +$ mcr []miniunz -l test.zip +$ rename minizip_info.txt; minizip_info.txt_old +$ mcr []miniunz test.zip +$ delete test.zip;* +$exit diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/miniunz.c nodejs-0.11.15/deps/zlib/contrib/minizip/miniunz.c --- nodejs-0.11.13/deps/zlib/contrib/minizip/miniunz.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/miniunz.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,43 @@ /* miniunz.c - Version 1.01e, February 12th, 2005 + Version 1.1, February 14h, 2010 + sample part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) - Copyright (C) 1998-2005 Gilles Vollant + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications of Unzip for Zip64 + Copyright (C) 2007-2008 Even Rouault + + Modifications for Zip64 support on both zip and unzip + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) */ +#if (!defined(_WIN32)) && (!defined(WIN32)) && (!defined(__APPLE__)) + #ifndef __USE_FILE_OFFSET64 + #define __USE_FILE_OFFSET64 + #endif + #ifndef __USE_LARGEFILE64 + #define __USE_LARGEFILE64 + #endif + #ifndef _LARGEFILE64_SOURCE + #define _LARGEFILE64_SOURCE + #endif + #ifndef _FILE_OFFSET_BIT + #define _FILE_OFFSET_BIT 64 + #endif +#endif + +#ifdef __APPLE__ +// In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions +#define FOPEN_FUNC(filename, mode) fopen(filename, mode) +#define FTELLO_FUNC(stream) ftello(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko(stream, offset, origin) +#else +#define FOPEN_FUNC(filename, mode) fopen64(filename, mode) +#define FTELLO_FUNC(stream) ftello64(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko64(stream, offset, origin) +#endif + #include <stdio.h> #include <stdlib.h> @@ -13,21 +46,22 @@ #include <errno.h> #include <fcntl.h> -#ifdef unix -# include <unistd.h> -# include <utime.h> -#else +#ifdef _WIN32 # include <direct.h> # include <io.h> +#else +# include <unistd.h> +# include <utime.h> #endif + #include "unzip.h" #define CASESENSITIVITY (0) #define WRITEBUFFERSIZE (8192) #define MAXFILENAME (256) -#ifdef WIN32 +#ifdef _WIN32 #define USEWIN32IOAPI #include "iowin32.h" #endif @@ -51,11 +85,11 @@ uLong dosdate; tm_unz tmu_date; { -#ifdef WIN32 +#ifdef _WIN32 HANDLE hFile; FILETIME ftm,ftLocal,ftCreate,ftLastAcc,ftLastWrite; - hFile = CreateFile(filename,GENERIC_READ | GENERIC_WRITE, + hFile = CreateFileA(filename,GENERIC_READ | GENERIC_WRITE, 0,NULL,OPEN_EXISTING,0,NULL); GetFileTime(hFile,&ftCreate,&ftLastAcc,&ftLastWrite); DosDateTimeToFileTime((WORD)(dosdate>>16),(WORD)dosdate,&ftLocal); @@ -63,7 +97,7 @@ SetFileTime(hFile,&ftm,&ftLastAcc,&ftm); CloseHandle(hFile); #else -#ifdef unix +#ifdef unix || __APPLE__ struct utimbuf ut; struct tm newdate; newdate.tm_sec = tmu_date.tm_sec; @@ -91,12 +125,12 @@ const char* dirname; { int ret=0; -#ifdef WIN32 - ret = mkdir(dirname); -#else -#ifdef unix +#ifdef _WIN32 + ret = _mkdir(dirname); +#elif unix + ret = mkdir (dirname,0775); +#elif __APPLE__ ret = mkdir (dirname,0775); -#endif #endif return ret; } @@ -112,6 +146,11 @@ return 0; buffer = (char*)malloc(len+1); + if (buffer==NULL) + { + printf("Error allocating memory\n"); + return UNZ_INTERNALERROR; + } strcpy(buffer,newdir); if (buffer[len-1] == '/') { @@ -164,34 +203,61 @@ " -p extract crypted file using password\n\n"); } +void Display64BitsSize(ZPOS64_T n, int size_char) +{ + /* to avoid compatibility problem , we do here the conversion */ + char number[21]; + int offset=19; + int pos_string = 19; + number[20]=0; + for (;;) { + number[offset]=(char)((n%10)+'0'); + if (number[offset] != '0') + pos_string=offset; + n/=10; + if (offset==0) + break; + offset--; + } + { + int size_display_string = 19-pos_string; + while (size_char > size_display_string) + { + size_char--; + printf(" "); + } + } + + printf("%s",&number[pos_string]); +} int do_list(uf) unzFile uf; { uLong i; - unz_global_info gi; + unz_global_info64 gi; int err; - err = unzGetGlobalInfo (uf,&gi); + err = unzGetGlobalInfo64(uf,&gi); if (err!=UNZ_OK) printf("error %d with zipfile in unzGetGlobalInfo \n",err); - printf(" Length Method Size Ratio Date Time CRC-32 Name\n"); - printf(" ------ ------ ---- ----- ---- ---- ------ ----\n"); + printf(" Length Method Size Ratio Date Time CRC-32 Name\n"); + printf(" ------ ------ ---- ----- ---- ---- ------ ----\n"); for (i=0;i<gi.number_entry;i++) { char filename_inzip[256]; - unz_file_info file_info; + unz_file_info64 file_info; uLong ratio=0; const char *string_method; char charCrypt=' '; - err = unzGetCurrentFileInfo(uf,&file_info,filename_inzip,sizeof(filename_inzip),NULL,0,NULL,0); + err = unzGetCurrentFileInfo64(uf,&file_info,filename_inzip,sizeof(filename_inzip),NULL,0,NULL,0); if (err!=UNZ_OK) { printf("error %d with zipfile in unzGetCurrentFileInfo\n",err); break; } if (file_info.uncompressed_size>0) - ratio = (file_info.compressed_size*100)/file_info.uncompressed_size; + ratio = (uLong)((file_info.compressed_size*100)/file_info.uncompressed_size); /* display a '*' if the file is crypted */ if ((file_info.flag & 1) != 0) @@ -211,12 +277,17 @@ string_method="Defl:F"; /* 2:fast , 3 : extra fast*/ } else + if (file_info.compression_method==Z_BZIP2ED) + { + string_method="BZip2 "; + } + else string_method="Unkn. "; - printf("%7lu %6s%c%7lu %3lu%% %2.2lu-%2.2lu-%2.2lu %2.2lu:%2.2lu %8.8lx %s\n", - file_info.uncompressed_size,string_method, - charCrypt, - file_info.compressed_size, + Display64BitsSize(file_info.uncompressed_size,7); + printf(" %6s%c",string_method,charCrypt); + Display64BitsSize(file_info.compressed_size,7); + printf(" %3lu%% %2.2lu-%2.2lu-%2.2lu %2.2lu:%2.2lu %8.8lx %s\n", ratio, (uLong)file_info.tmu_date.tm_mon + 1, (uLong)file_info.tmu_date.tm_mday, @@ -252,9 +323,9 @@ void* buf; uInt size_buf; - unz_file_info file_info; + unz_file_info64 file_info; uLong ratio=0; - err = unzGetCurrentFileInfo(uf,&file_info,filename_inzip,sizeof(filename_inzip),NULL,0,NULL,0); + err = unzGetCurrentFileInfo64(uf,&file_info,filename_inzip,sizeof(filename_inzip),NULL,0,NULL,0); if (err!=UNZ_OK) { @@ -306,7 +377,7 @@ { char rep=0; FILE* ftestexist; - ftestexist = fopen(write_filename,"rb"); + ftestexist = FOPEN_FUNC(write_filename,"rb"); if (ftestexist!=NULL) { fclose(ftestexist); @@ -317,7 +388,7 @@ printf("The file %s exists. Overwrite ? [y]es, [n]o, [A]ll: ",write_filename); ret = scanf("%1s",answer); - if (ret != 1) + if (ret != 1) { exit(EXIT_FAILURE); } @@ -337,8 +408,7 @@ if ((skip==0) && (err==UNZ_OK)) { - fout=fopen(write_filename,"wb"); - + fout=FOPEN_FUNC(write_filename,"wb"); /* some zipfile don't contain directory alone before file */ if ((fout==NULL) && ((*popt_extract_without_path)==0) && (filename_withoutpath!=(char*)filename_inzip)) @@ -347,7 +417,7 @@ *(filename_withoutpath-1)='\0'; makedir(write_filename); *(filename_withoutpath-1)=c; - fout=fopen(write_filename,"wb"); + fout=FOPEN_FUNC(write_filename,"wb"); } if (fout==NULL) @@ -409,11 +479,11 @@ const char* password; { uLong i; - unz_global_info gi; + unz_global_info64 gi; int err; FILE* fout=NULL; - err = unzGetGlobalInfo (uf,&gi); + err = unzGetGlobalInfo64(uf,&gi); if (err!=UNZ_OK) printf("error %d with zipfile in unzGetGlobalInfo \n",err); @@ -470,6 +540,7 @@ const char *password=NULL; char filename_try[MAXFILENAME+16] = ""; int i; + int ret_value=0; int opt_do_list=0; int opt_do_extract=1; int opt_do_extract_withoutpath=0; @@ -532,7 +603,7 @@ { # ifdef USEWIN32IOAPI - zlib_filefunc_def ffunc; + zlib_filefunc64_def ffunc; # endif strncpy(filename_try, zipfilename,MAXFILENAME-1); @@ -540,18 +611,18 @@ filename_try[ MAXFILENAME ] = '\0'; # ifdef USEWIN32IOAPI - fill_win32_filefunc(&ffunc); - uf = unzOpen2(zipfilename,&ffunc); + fill_win32_filefunc64A(&ffunc); + uf = unzOpen2_64(zipfilename,&ffunc); # else - uf = unzOpen(zipfilename); + uf = unzOpen64(zipfilename); # endif if (uf==NULL) { strcat(filename_try,".zip"); # ifdef USEWIN32IOAPI - uf = unzOpen2(filename_try,&ffunc); + uf = unzOpen2_64(filename_try,&ffunc); # else - uf = unzOpen(filename_try); + uf = unzOpen64(filename_try); # endif } } @@ -564,22 +635,26 @@ printf("%s opened\n",filename_try); if (opt_do_list==1) - return do_list(uf); + ret_value = do_list(uf); else if (opt_do_extract==1) { - if (opt_extractdir && chdir(dirname)) +#ifdef _WIN32 + if (opt_extractdir && _chdir(dirname)) +#else + if (opt_extractdir && chdir(dirname)) +#endif { printf("Error changing into %s, aborting\n", dirname); exit(-1); } if (filename_to_extract == NULL) - return do_extract(uf,opt_do_extract_withoutpath,opt_overwrite,password); + ret_value = do_extract(uf, opt_do_extract_withoutpath, opt_overwrite, password); else - return do_extract_onefile(uf,filename_to_extract, - opt_do_extract_withoutpath,opt_overwrite,password); + ret_value = do_extract_onefile(uf, filename_to_extract, opt_do_extract_withoutpath, opt_overwrite, password); } - unzCloseCurrentFile(uf); - return 0; + unzClose(uf); + + return ret_value; } diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/miniunzip.1 nodejs-0.11.15/deps/zlib/contrib/minizip/miniunzip.1 --- nodejs-0.11.13/deps/zlib/contrib/minizip/miniunzip.1 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/miniunzip.1 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,63 @@ +.\" Hey, EMACS: -*- nroff -*- +.TH miniunzip 1 "Nov 7, 2001" +.\" Please adjust this date whenever revising the manpage. +.\" +.\" Some roff macros, for reference: +.\" .nh disable hyphenation +.\" .hy enable hyphenation +.\" .ad l left justify +.\" .ad b justify to both left and right margins +.\" .nf disable filling +.\" .fi enable filling +.\" .br insert line break +.\" .sp <n> insert n+1 empty lines +.\" for manpage-specific macros, see man(7) +.SH NAME +miniunzip - uncompress and examine ZIP archives +.SH SYNOPSIS +.B miniunzip +.RI [ -exvlo ] +zipfile [ files_to_extract ] [-d tempdir] +.SH DESCRIPTION +.B minizip +is a simple tool which allows the extraction of compressed file +archives in the ZIP format used by the MS-DOS utility PKZIP. It was +written as a demonstration of the +.IR zlib (3) +library and therefore lack many of the features of the +.IR unzip (1) +program. +.SH OPTIONS +A number of options are supported. With the exception of +.BI \-d\ tempdir +these must be supplied before any +other arguments and are: +.TP +.BI \-l\ ,\ \-\-v +List the files in the archive without extracting them. +.TP +.B \-o +Overwrite files without prompting for confirmation. +.TP +.B \-x +Extract files (default). +.PP +The +.I zipfile +argument is the name of the archive to process. The next argument can be used +to specify a single file to extract from the archive. + +Lastly, the following option can be specified at the end of the command-line: +.TP +.BI \-d\ tempdir +Extract the archive in the directory +.I tempdir +rather than the current directory. +.SH SEE ALSO +.BR minizip (1), +.BR zlib (3), +.BR unzip (1). +.SH AUTHOR +This program was written by Gilles Vollant. This manual page was +written by Mark Brown <broonie@sirena.org.uk>. The -d tempdir option +was added by Dirk Eddelbuettel <edd@debian.org>. diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/minizip.1 nodejs-0.11.15/deps/zlib/contrib/minizip/minizip.1 --- nodejs-0.11.13/deps/zlib/contrib/minizip/minizip.1 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/minizip.1 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,46 @@ +.\" Hey, EMACS: -*- nroff -*- +.TH minizip 1 "May 2, 2001" +.\" Please adjust this date whenever revising the manpage. +.\" +.\" Some roff macros, for reference: +.\" .nh disable hyphenation +.\" .hy enable hyphenation +.\" .ad l left justify +.\" .ad b justify to both left and right margins +.\" .nf disable filling +.\" .fi enable filling +.\" .br insert line break +.\" .sp <n> insert n+1 empty lines +.\" for manpage-specific macros, see man(7) +.SH NAME +minizip - create ZIP archives +.SH SYNOPSIS +.B minizip +.RI [ -o ] +zipfile [ " files" ... ] +.SH DESCRIPTION +.B minizip +is a simple tool which allows the creation of compressed file archives +in the ZIP format used by the MS-DOS utility PKZIP. It was written as +a demonstration of the +.IR zlib (3) +library and therefore lack many of the features of the +.IR zip (1) +program. +.SH OPTIONS +The first argument supplied is the name of the ZIP archive to create or +.RI -o +in which case it is ignored and the second argument treated as the +name of the ZIP file. If the ZIP file already exists it will be +overwritten. +.PP +Subsequent arguments specify a list of files to place in the ZIP +archive. If none are specified then an empty archive will be created. +.SH SEE ALSO +.BR miniunzip (1), +.BR zlib (3), +.BR zip (1). +.SH AUTHOR +This program was written by Gilles Vollant. This manual page was +written by Mark Brown <broonie@sirena.org.uk>. + diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/MiniZip64_Changes.txt nodejs-0.11.15/deps/zlib/contrib/minizip/MiniZip64_Changes.txt --- nodejs-0.11.13/deps/zlib/contrib/minizip/MiniZip64_Changes.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/MiniZip64_Changes.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,6 @@ + +MiniZip 1.1 was derrived from MiniZip at version 1.01f + +Change in 1.0 (Okt 2009) + - **TODO - Add history** + diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/MiniZip64_info.txt nodejs-0.11.15/deps/zlib/contrib/minizip/MiniZip64_info.txt --- nodejs-0.11.13/deps/zlib/contrib/minizip/MiniZip64_info.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/MiniZip64_info.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,74 @@ +MiniZip - Copyright (c) 1998-2010 - by Gilles Vollant - version 1.1 64 bits from Mathias Svensson + +Introduction +--------------------- +MiniZip 1.1 is built from MiniZip 1.0 by Gilles Vollant ( http://www.winimage.com/zLibDll/minizip.html ) + +When adding ZIP64 support into minizip it would result into risk of breaking compatibility with minizip 1.0. +All possible work was done for compatibility. + + +Background +--------------------- +When adding ZIP64 support Mathias Svensson found that Even Rouault have added ZIP64 +support for unzip.c into minizip for a open source project called gdal ( http://www.gdal.org/ ) + +That was used as a starting point. And after that ZIP64 support was added to zip.c +some refactoring and code cleanup was also done. + + +Changed from MiniZip 1.0 to MiniZip 1.1 +--------------------------------------- +* Added ZIP64 support for unzip ( by Even Rouault ) +* Added ZIP64 support for zip ( by Mathias Svensson ) +* Reverted some changed that Even Rouault did. +* Bunch of patches received from Gulles Vollant that he received for MiniZip from various users. +* Added unzip patch for BZIP Compression method (patch create by Daniel Borca) +* Added BZIP Compress method for zip +* Did some refactoring and code cleanup + + +Credits + + Gilles Vollant - Original MiniZip author + Even Rouault - ZIP64 unzip Support + Daniel Borca - BZip Compression method support in unzip + Mathias Svensson - ZIP64 zip support + Mathias Svensson - BZip Compression method support in zip + + Resources + + ZipLayout http://result42.com/projects/ZipFileLayout + Command line tool for Windows that shows the layout and information of the headers in a zip archive. + Used when debugging and validating the creation of zip files using MiniZip64 + + + ZIP App Note http://www.pkware.com/documents/casestudies/APPNOTE.TXT + Zip File specification + + +Notes. + * To be able to use BZip compression method in zip64.c or unzip64.c the BZIP2 lib is needed and HAVE_BZIP2 need to be defined. + +License +---------------------------------------------------------- + Condition of use and distribution are the same than zlib : + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + +---------------------------------------------------------- + diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/minizip.c nodejs-0.11.15/deps/zlib/contrib/minizip/minizip.c --- nodejs-0.11.13/deps/zlib/contrib/minizip/minizip.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/minizip.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,10 +1,46 @@ /* minizip.c - Version 1.01e, February 12th, 2005 + Version 1.1, February 14h, 2010 + sample part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) - Copyright (C) 1998-2005 Gilles Vollant + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications of Unzip for Zip64 + Copyright (C) 2007-2008 Even Rouault + + Modifications for Zip64 support on both zip and unzip + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) */ + +#if (!defined(_WIN32)) && (!defined(WIN32)) && (!defined(__APPLE__)) + #ifndef __USE_FILE_OFFSET64 + #define __USE_FILE_OFFSET64 + #endif + #ifndef __USE_LARGEFILE64 + #define __USE_LARGEFILE64 + #endif + #ifndef _LARGEFILE64_SOURCE + #define _LARGEFILE64_SOURCE + #endif + #ifndef _FILE_OFFSET_BIT + #define _FILE_OFFSET_BIT 64 + #endif +#endif + +#ifdef __APPLE__ +// In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions +#define FOPEN_FUNC(filename, mode) fopen(filename, mode) +#define FTELLO_FUNC(stream) ftello(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko(stream, offset, origin) +#else +#define FOPEN_FUNC(filename, mode) fopen64(filename, mode) +#define FTELLO_FUNC(stream) ftello64(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko64(stream, offset, origin) +#endif + + + #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -12,21 +48,21 @@ #include <errno.h> #include <fcntl.h> -#ifdef unix +#ifdef _WIN32 +# include <direct.h> +# include <io.h> +#else # include <unistd.h> # include <utime.h> # include <sys/types.h> # include <sys/stat.h> -#else -# include <direct.h> -# include <io.h> #endif #include "zip.h" -#ifdef WIN32 -#define USEWIN32IOAPI -#include "iowin32.h" +#ifdef _WIN32 + #define USEWIN32IOAPI + #include "iowin32.h" #endif @@ -34,7 +70,7 @@ #define WRITEBUFFERSIZE (16384) #define MAXFILENAME (256) -#ifdef WIN32 +#ifdef _WIN32 uLong filetime(f, tmzip, dt) char *f; /* name of file to get info on */ tm_zip *tmzip; /* return value: access, modific. and creation times */ @@ -44,9 +80,9 @@ { FILETIME ftLocal; HANDLE hFind; - WIN32_FIND_DATA ff32; + WIN32_FIND_DATAA ff32; - hFind = FindFirstFile(f,&ff32); + hFind = FindFirstFileA(f,&ff32); if (hFind != INVALID_HANDLE_VALUE) { FileTimeToLocalFileTime(&(ff32.ftLastWriteTime),&ftLocal); @@ -58,7 +94,7 @@ return ret; } #else -#ifdef unix +#ifdef unix || __APPLE__ uLong filetime(f, tmzip, dt) char *f; /* name of file to get info on */ tm_zip *tmzip; /* return value: access, modific. and creation times */ @@ -119,7 +155,7 @@ { FILE* ftestexist; int ret = 1; - ftestexist = fopen(filename,"rb"); + ftestexist = FOPEN_FUNC(filename,"rb"); if (ftestexist==NULL) ret = 0; else @@ -129,18 +165,19 @@ void do_banner() { - printf("MiniZip 1.01b, demo of zLib + Zip package written by Gilles Vollant\n"); - printf("more info at http://www.winimage.com/zLibDll/unzip.html\n\n"); + printf("MiniZip 1.1, demo of zLib + MiniZip64 package, written by Gilles Vollant\n"); + printf("more info on MiniZip at http://www.winimage.com/zLibDll/minizip.html\n\n"); } void do_help() { - printf("Usage : minizip [-o] [-a] [-0 to -9] [-p password] file.zip [files_to_add]\n\n" \ + printf("Usage : minizip [-o] [-a] [-0 to -9] [-p password] [-j] file.zip [files_to_add]\n\n" \ " -o Overwrite existing file.zip\n" \ " -a Append to existing file.zip\n" \ " -0 Store only\n" \ " -1 Compress faster\n" \ - " -9 Compress better\n\n"); + " -9 Compress better\n\n" \ + " -j exclude path. store only the file name.\n\n"); } /* calculate the CRC32 of a file, @@ -149,7 +186,8 @@ { unsigned long calculate_crc=0; int err=ZIP_OK; - FILE * fin = fopen(filenameinzip,"rb"); + FILE * fin = FOPEN_FUNC(filenameinzip,"rb"); + unsigned long size_read = 0; unsigned long total_read = 0; if (fin==NULL) @@ -179,10 +217,32 @@ fclose(fin); *result_crc=calculate_crc; - printf("file %s crc %x\n",filenameinzip,calculate_crc); + printf("file %s crc %lx\n", filenameinzip, calculate_crc); return err; } +int isLargeFile(const char* filename) +{ + int largeFile = 0; + ZPOS64_T pos = 0; + FILE* pFile = FOPEN_FUNC(filename, "rb"); + + if(pFile != NULL) + { + int n = FSEEKO_FUNC(pFile, 0, SEEK_END); + pos = FTELLO_FUNC(pFile); + + printf("File : %s is %lld bytes\n", filename, pos); + + if(pos >= 0xffffffff) + largeFile = 1; + + fclose(pFile); + } + + return largeFile; +} + int main(argc,argv) int argc; char *argv[]; @@ -190,6 +250,7 @@ int i; int opt_overwrite=0; int opt_compress_level=Z_DEFAULT_COMPRESSION; + int opt_exclude_path=0; int zipfilenamearg = 0; char filename_try[MAXFILENAME+16]; int zipok; @@ -222,6 +283,8 @@ opt_overwrite = 2; if ((c>='0') && (c<='9')) opt_compress_level = c-'0'; + if ((c=='j') || (c=='J')) + opt_exclude_path = 1; if (((c=='p') || (c=='P')) && (i+1<argc)) { @@ -231,8 +294,12 @@ } } else + { if (zipfilenamearg == 0) + { zipfilenamearg = i ; + } + } } } @@ -245,7 +312,9 @@ } if (zipfilenamearg==0) + { zipok=0; + } else { int i,len; @@ -302,11 +371,11 @@ zipFile zf; int errclose; # ifdef USEWIN32IOAPI - zlib_filefunc_def ffunc; - fill_win32_filefunc(&ffunc); - zf = zipOpen2(filename_try,(opt_overwrite==2) ? 2 : 0,NULL,&ffunc); + zlib_filefunc64_def ffunc; + fill_win32_filefunc64A(&ffunc); + zf = zipOpen2_64(filename_try,(opt_overwrite==2) ? 2 : 0,NULL,&ffunc); # else - zf = zipOpen(filename_try,(opt_overwrite==2) ? 2 : 0); + zf = zipOpen64(filename_try,(opt_overwrite==2) ? 2 : 0); # endif if (zf == NULL) @@ -329,8 +398,10 @@ FILE * fin; int size_read; const char* filenameinzip = argv[i]; + const char *savefilenameinzip; zip_fileinfo zi; unsigned long crcFile=0; + int zip64 = 0; zi.tmz_date.tm_sec = zi.tmz_date.tm_min = zi.tmz_date.tm_hour = zi.tmz_date.tm_mday = zi.tmz_date.tm_mon = zi.tmz_date.tm_year = 0; @@ -348,19 +419,48 @@ if ((password != NULL) && (err==ZIP_OK)) err = getFileCrc(filenameinzip,buf,size_buf,&crcFile); - err = zipOpenNewFileInZip3(zf,filenameinzip,&zi, + zip64 = isLargeFile(filenameinzip); + + /* The path name saved, should not include a leading slash. */ + /*if it did, windows/xp and dynazip couldn't read the zip file. */ + savefilenameinzip = filenameinzip; + while( savefilenameinzip[0] == '\\' || savefilenameinzip[0] == '/' ) + { + savefilenameinzip++; + } + + /*should the zip file contain any path at all?*/ + if( opt_exclude_path ) + { + const char *tmpptr; + const char *lastslash = 0; + for( tmpptr = savefilenameinzip; *tmpptr; tmpptr++) + { + if( *tmpptr == '\\' || *tmpptr == '/') + { + lastslash = tmpptr; + } + } + if( lastslash != NULL ) + { + savefilenameinzip = lastslash+1; // base filename follows last slash. + } + } + + /**/ + err = zipOpenNewFileInZip3_64(zf,savefilenameinzip,&zi, NULL,0,NULL,0,NULL /* comment*/, (opt_compress_level != 0) ? Z_DEFLATED : 0, opt_compress_level,0, /* -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, */ -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, - password,crcFile); + password,crcFile, zip64); if (err != ZIP_OK) printf("error in opening %s in zipfile\n",filenameinzip); else { - fin = fopen(filenameinzip,"rb"); + fin = FOPEN_FUNC(filenameinzip,"rb"); if (fin==NULL) { err=ZIP_ERRNO; diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/minizip.pc.in nodejs-0.11.15/deps/zlib/contrib/minizip/minizip.pc.in --- nodejs-0.11.13/deps/zlib/contrib/minizip/minizip.pc.in 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/minizip.pc.in 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,12 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@/minizip + +Name: minizip +Description: Minizip zip file manipulation library +Requires: +Version: @PACKAGE_VERSION@ +Libs: -L${libdir} -lminizip +Libs.private: -lz +Cflags: -I${includedir} diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/mztools.c nodejs-0.11.15/deps/zlib/contrib/minizip/mztools.c --- nodejs-0.11.13/deps/zlib/contrib/minizip/mztools.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/mztools.c 2015-01-20 21:22:17.000000000 +0000 @@ -8,12 +8,7 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> - -#if defined(USE_SYSTEM_ZLIB) -#include <zlib.h> -#else #include "zlib.h" -#endif #include "unzip.h" #define READ_8(adr) ((unsigned char)*(adr)) @@ -47,7 +42,7 @@ int entries = 0; uLong totalBytes = 0; char header[30]; - char filename[256]; + char filename[1024]; char extra[1024]; int offset = 0; int offsetCD = 0; @@ -67,7 +62,7 @@ unsigned int fnsize = READ_16(header + 26); /* file name length */ unsigned int extsize = READ_16(header + 28); /* extra field length */ filename[0] = extra[0] = '\0'; - + /* Header */ if (fwrite(header, 1, 30, fpOut) == 30) { offset += 30; @@ -75,12 +70,17 @@ err = Z_ERRNO; break; } - + /* Filename */ if (fnsize > 0) { - if (fread(filename, 1, fnsize, fpZip) == fnsize) { - if (fwrite(filename, 1, fnsize, fpOut) == fnsize) { - offset += fnsize; + if (fnsize < sizeof(filename)) { + if (fread(filename, 1, fnsize, fpZip) == fnsize) { + if (fwrite(filename, 1, fnsize, fpOut) == fnsize) { + offset += fnsize; + } else { + err = Z_ERRNO; + break; + } } else { err = Z_ERRNO; break; @@ -96,9 +96,14 @@ /* Extra field */ if (extsize > 0) { - if (fread(extra, 1, extsize, fpZip) == extsize) { - if (fwrite(extra, 1, extsize, fpOut) == extsize) { - offset += extsize; + if (extsize < sizeof(extra)) { + if (fread(extra, 1, extsize, fpZip) == extsize) { + if (fwrite(extra, 1, extsize, fpOut) == extsize) { + offset += extsize; + } else { + err = Z_ERRNO; + break; + } } else { err = Z_ERRNO; break; @@ -108,7 +113,7 @@ break; } } - + /* Data */ { int dataSize = cpsize; @@ -138,7 +143,7 @@ } } } - + /* Central directory entry */ { char header[46]; @@ -164,7 +169,7 @@ /* Header */ if (fwrite(header, 1, 46, fpOutCD) == 46) { offsetCD += 46; - + /* Filename */ if (fnsize > 0) { if (fwrite(filename, 1, fnsize, fpOutCD) == fnsize) { @@ -177,7 +182,7 @@ err = Z_STREAM_ERROR; break; } - + /* Extra field */ if (extsize > 0) { if (fwrite(extra, 1, extsize, fpOutCD) == extsize) { @@ -187,7 +192,7 @@ break; } } - + /* Comment field */ if (comsize > 0) { if ((int)fwrite(comment, 1, comsize, fpOutCD) == comsize) { @@ -197,8 +202,8 @@ break; } } - - + + } else { err = Z_ERRNO; break; @@ -230,17 +235,17 @@ WRITE_32(header + 12, offsetCD); /* size of CD */ WRITE_32(header + 16, offset); /* offset to CD */ WRITE_16(header + 20, comsize); /* comment */ - + /* Header */ if (fwrite(header, 1, 22, fpOutCD) == 22) { - + /* Comment field */ if (comsize > 0) { if ((int)fwrite(comment, 1, comsize, fpOutCD) != comsize) { err = Z_ERRNO; } } - + } else { err = Z_ERRNO; } @@ -262,14 +267,14 @@ fclose(fpOutCD); } } - + /* Close */ fclose(fpZip); fclose(fpOut); - + /* Wipe temporary file */ (void)remove(fileOutTmp); - + /* Number of recovered entries */ if (err == Z_OK) { if (nRecovered != NULL) { diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/mztools.h nodejs-0.11.15/deps/zlib/contrib/minizip/mztools.h --- nodejs-0.11.13/deps/zlib/contrib/minizip/mztools.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/mztools.h 2015-01-20 21:22:17.000000000 +0000 @@ -11,23 +11,27 @@ extern "C" { #endif -#if defined(USE_SYSTEM_ZLIB) -#include <zlib.h> -#else +#ifndef _ZLIB_H #include "zlib.h" #endif #include "unzip.h" -/* Repair a ZIP file (missing central directory) +/* Repair a ZIP file (missing central directory) file: file to recover fileOut: output file after recovery fileOutTmp: temporary file name used for recovery */ -extern int ZEXPORT unzRepair(const char* file, - const char* fileOut, - const char* fileOutTmp, +extern int ZEXPORT unzRepair(const char* file, + const char* fileOut, + const char* fileOutTmp, uLong* nRecovered, uLong* bytesRecovered); + +#ifdef __cplusplus +} +#endif + + #endif diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/unzip.c nodejs-0.11.15/deps/zlib/contrib/minizip/unzip.c --- nodejs-0.11.13/deps/zlib/contrib/minizip/unzip.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/unzip.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,48 +1,78 @@ /* unzip.c -- IO for uncompress .zip files using zlib - Version 1.01e, February 12th, 2005 + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) - Copyright (C) 1998-2005 Gilles Vollant + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) - Read unzip.h for more info -*/ + Modifications of Unzip for Zip64 + Copyright (C) 2007-2008 Even Rouault + + Modifications for Zip64 support on both zip and unzip + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + + + ------------------------------------------------------------------------------------ + Decryption code comes from crypt.c by Info-ZIP but has been greatly reduced in terms of + compatibility with older software. The following is from the original crypt.c. + Code woven in by Terry Thorsen 1/2003. -/* Decryption code comes from crypt.c by Info-ZIP but has been greatly reduced in terms of -compatibility with older software. The following is from the original crypt.c. Code -woven in by Terry Thorsen 1/2003. -*/ -/* Copyright (c) 1990-2000 Info-ZIP. All rights reserved. See the accompanying file LICENSE, version 2000-Apr-09 or later (the contents of which are also included in zip.h) for terms of use. If, for some reason, all these files are missing, the Info-ZIP license also may be found at: ftp://ftp.info-zip.org/pub/infozip/license.html -*/ -/* - crypt.c (full version) by Info-ZIP. Last revised: [see crypt.h] + + crypt.c (full version) by Info-ZIP. Last revised: [see crypt.h] The encryption/decryption parts of this source code (as opposed to the non-echoing password parts) were originally written in Europe. The whole source package can be freely distributed, including from the USA. (Prior to January 2000, re-export from the US was a violation of US law.) - */ -/* - This encryption code is a direct transcription of the algorithm from + This encryption code is a direct transcription of the algorithm from Roger Schlafly, described by Phil Katz in the file appnote.txt. This file (appnote.txt) is distributed with the PKZIP program (even in the version without encryption capabilities). - */ + + ------------------------------------------------------------------------------------ + + Changes in unzip.c + + 2007-2008 - Even Rouault - Addition of cpl_unzGetCurrentFileZStreamPos + 2007-2008 - Even Rouault - Decoration of symbol names unz* -> cpl_unz* + 2007-2008 - Even Rouault - Remove old C style function prototypes + 2007-2008 - Even Rouault - Add unzip support for ZIP64 + + Copyright (C) 2007-2008 Even Rouault + + + Oct-2009 - Mathias Svensson - Removed cpl_* from symbol names (Even Rouault added them but since this is now moved to a new project (minizip64) I renamed them again). + Oct-2009 - Mathias Svensson - Fixed problem if uncompressed size was > 4G and compressed size was <4G + should only read the compressed/uncompressed size from the Zip64 format if + the size from normal header was 0xFFFFFFFF + Oct-2009 - Mathias Svensson - Applied some bug fixes from paches recived from Gilles Vollant + Oct-2009 - Mathias Svensson - Applied support to unzip files with compression mathod BZIP2 (bzip2 lib is required) + Patch created by Daniel Borca + + Jan-2010 - back to unzip and minizip 1.0 name scheme, with compatibility layer + + Copyright (C) 1998 - 2010 Gilles Vollant, Even Rouault, Mathias Svensson + +*/ #include <stdio.h> #include <stdlib.h> #include <string.h> -#if defined(USE_SYSTEM_ZLIB) -#include <zlib.h> -#else -#include "zlib.h" + +#ifndef NOUNCRYPT + #define NOUNCRYPT #endif + +#include "zlib.h" #include "unzip.h" #ifdef STDC @@ -89,16 +119,14 @@ #define SIZEZIPLOCALHEADER (0x1e) - - const char unz_copyright[] = " unzip 1.01 Copyright 1998-2004 Gilles Vollant - http://www.winimage.com/zLibDll"; /* unz_file_info_interntal contain internal info about a file in zipfile*/ -typedef struct unz_file_info_internal_s +typedef struct unz_file_info64_internal_s { - uLong offset_curfile;/* relative offset of local header 4 bytes */ -} unz_file_info_internal; + ZPOS64_T offset_curfile;/* relative offset of local header 8 bytes */ +} unz_file_info64_internal; /* file_in_zip_read_info_s contain internal information about a file in zipfile, @@ -108,52 +136,61 @@ char *read_buffer; /* internal buffer for compressed data */ z_stream stream; /* zLib stream structure for inflate */ - uLong pos_in_zipfile; /* position in byte on the zipfile, for fseek*/ +#ifdef HAVE_BZIP2 + bz_stream bstream; /* bzLib stream structure for bziped */ +#endif + + ZPOS64_T pos_in_zipfile; /* position in byte on the zipfile, for fseek*/ uLong stream_initialised; /* flag set if stream structure is initialised*/ - uLong offset_local_extrafield;/* offset of the local extra field */ + ZPOS64_T offset_local_extrafield;/* offset of the local extra field */ uInt size_local_extrafield;/* size of the local extra field */ - uLong pos_local_extrafield; /* position in the local extra field in read*/ + ZPOS64_T pos_local_extrafield; /* position in the local extra field in read*/ + ZPOS64_T total_out_64; uLong crc32; /* crc32 of all data uncompressed */ uLong crc32_wait; /* crc32 we must obtain after decompress all */ - uLong rest_read_compressed; /* number of byte to be decompressed */ - uLong rest_read_uncompressed;/*number of byte to be obtained after decomp*/ - zlib_filefunc_def z_filefunc; + ZPOS64_T rest_read_compressed; /* number of byte to be decompressed */ + ZPOS64_T rest_read_uncompressed;/*number of byte to be obtained after decomp*/ + zlib_filefunc64_32_def z_filefunc; voidpf filestream; /* io structore of the zipfile */ uLong compression_method; /* compression method (0==store) */ - uLong byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ + ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ int raw; -} file_in_zip_read_info_s; +} file_in_zip64_read_info_s; -/* unz_s contain internal information about the zipfile +/* unz64_s contain internal information about the zipfile */ typedef struct { - zlib_filefunc_def z_filefunc; + zlib_filefunc64_32_def z_filefunc; + int is64bitOpenFunction; voidpf filestream; /* io structore of the zipfile */ - unz_global_info gi; /* public global information */ - uLong byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ - uLong num_file; /* number of the current file in the zipfile*/ - uLong pos_in_central_dir; /* pos of the current file in the central dir*/ - uLong current_file_ok; /* flag about the usability of the current file*/ - uLong central_pos; /* position of the beginning of the central dir*/ + unz_global_info64 gi; /* public global information */ + ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ + ZPOS64_T num_file; /* number of the current file in the zipfile*/ + ZPOS64_T pos_in_central_dir; /* pos of the current file in the central dir*/ + ZPOS64_T current_file_ok; /* flag about the usability of the current file*/ + ZPOS64_T central_pos; /* position of the beginning of the central dir*/ - uLong size_central_dir; /* size of the central directory */ - uLong offset_central_dir; /* offset of start of central directory with + ZPOS64_T size_central_dir; /* size of the central directory */ + ZPOS64_T offset_central_dir; /* offset of start of central directory with respect to the starting disk number */ - unz_file_info cur_file_info; /* public info about the current file in zip*/ - unz_file_info_internal cur_file_info_internal; /* private info about it*/ - file_in_zip_read_info_s* pfile_in_zip_read; /* structure about the current + unz_file_info64 cur_file_info; /* public info about the current file in zip*/ + unz_file_info64_internal cur_file_info_internal; /* private info about it*/ + file_in_zip64_read_info_s* pfile_in_zip_read; /* structure about the current file if we are decompressing it */ int encrypted; + + int isZip64; + # ifndef NOUNCRYPT unsigned long keys[3]; /* keys defining the pseudo-random sequence */ - const unsigned long* pcrc_32_tab; + const z_crc_t* pcrc_32_tab; # endif -} unz_s; +} unz64_s; #ifndef NOUNCRYPT @@ -167,18 +204,15 @@ */ -local int unzlocal_getByte OF(( - const zlib_filefunc_def* pzlib_filefunc_def, +local int unz64local_getByte OF(( + const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, int *pi)); -local int unzlocal_getByte(pzlib_filefunc_def,filestream,pi) - const zlib_filefunc_def* pzlib_filefunc_def; - voidpf filestream; - int *pi; +local int unz64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, int *pi) { unsigned char c; - int err = (int)ZREAD(*pzlib_filefunc_def,filestream,&c,1); + int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,&c,1); if (err==1) { *pi = (int)c; @@ -186,7 +220,7 @@ } else { - if (ZERROR(*pzlib_filefunc_def,filestream)) + if (ZERROR64(*pzlib_filefunc_def,filestream)) return UNZ_ERRNO; else return UNZ_EOF; @@ -197,26 +231,25 @@ /* =========================================================================== Reads a long in LSB order from the given gz_stream. Sets */ -local int unzlocal_getShort OF(( - const zlib_filefunc_def* pzlib_filefunc_def, +local int unz64local_getShort OF(( + const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX)); -local int unzlocal_getShort (pzlib_filefunc_def,filestream,pX) - const zlib_filefunc_def* pzlib_filefunc_def; - voidpf filestream; - uLong *pX; +local int unz64local_getShort (const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + uLong *pX) { uLong x ; - int i; + int i = 0; int err; - err = unzlocal_getByte(pzlib_filefunc_def,filestream,&i); + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x = (uLong)i; if (err==UNZ_OK) - err = unzlocal_getByte(pzlib_filefunc_def,filestream,&i); - x += ((uLong)i)<<8; + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((uLong)i)<<8; if (err==UNZ_OK) *pX = x; @@ -225,33 +258,32 @@ return err; } -local int unzlocal_getLong OF(( - const zlib_filefunc_def* pzlib_filefunc_def, +local int unz64local_getLong OF(( + const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX)); -local int unzlocal_getLong (pzlib_filefunc_def,filestream,pX) - const zlib_filefunc_def* pzlib_filefunc_def; - voidpf filestream; - uLong *pX; +local int unz64local_getLong (const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + uLong *pX) { uLong x ; - int i; + int i = 0; int err; - err = unzlocal_getByte(pzlib_filefunc_def,filestream,&i); + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x = (uLong)i; if (err==UNZ_OK) - err = unzlocal_getByte(pzlib_filefunc_def,filestream,&i); - x += ((uLong)i)<<8; + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((uLong)i)<<8; if (err==UNZ_OK) - err = unzlocal_getByte(pzlib_filefunc_def,filestream,&i); - x += ((uLong)i)<<16; + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((uLong)i)<<16; if (err==UNZ_OK) - err = unzlocal_getByte(pzlib_filefunc_def,filestream,&i); + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x += ((uLong)i)<<24; if (err==UNZ_OK) @@ -261,11 +293,60 @@ return err; } +local int unz64local_getLong64 OF(( + const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + ZPOS64_T *pX)); + + +local int unz64local_getLong64 (const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + ZPOS64_T *pX) +{ + ZPOS64_T x ; + int i = 0; + int err; + + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x = (ZPOS64_T)i; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<8; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<16; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<24; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<32; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<40; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<48; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<56; + + if (err==UNZ_OK) + *pX = x; + else + *pX = 0; + return err; +} /* My own strcmpi / strcasecmp */ -local int strcmpcasenosensitive_internal (fileName1,fileName2) - const char* fileName1; - const char* fileName2; +local int strcmpcasenosensitive_internal (const char* fileName1, const char* fileName2) { for (;;) { @@ -306,10 +387,10 @@ (like 1 on Unix, 2 on Windows) */ -extern int ZEXPORT unzStringFileNameCompare (fileName1,fileName2,iCaseSensitivity) - const char* fileName1; - const char* fileName2; - int iCaseSensitivity; +extern int ZEXPORT unzStringFileNameCompare (const char* fileName1, + const char* fileName2, + int iCaseSensitivity) + { if (iCaseSensitivity==0) iCaseSensitivity=CASESENSITIVITYDEFAULTVALUE; @@ -328,25 +409,20 @@ Locate the Central directory of a zipfile (at the end, just before the global comment) */ -local uLong unzlocal_SearchCentralDir OF(( - const zlib_filefunc_def* pzlib_filefunc_def, - voidpf filestream)); - -local uLong unzlocal_SearchCentralDir(pzlib_filefunc_def,filestream) - const zlib_filefunc_def* pzlib_filefunc_def; - voidpf filestream; +local ZPOS64_T unz64local_SearchCentralDir OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); +local ZPOS64_T unz64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) { unsigned char* buf; - uLong uSizeFile; - uLong uBackRead; - uLong uMaxBack=0xffff; /* maximum size of global comment */ - uLong uPosFound=0; + ZPOS64_T uSizeFile; + ZPOS64_T uBackRead; + ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ + ZPOS64_T uPosFound=0; - if (ZSEEK(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) + if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) return 0; - uSizeFile = ZTELL(*pzlib_filefunc_def,filestream); + uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); if (uMaxBack>uSizeFile) uMaxBack = uSizeFile; @@ -358,7 +434,8 @@ uBackRead = 4; while (uBackRead<uMaxBack) { - uLong uReadSize,uReadPos ; + uLong uReadSize; + ZPOS64_T uReadPos ; int i; if (uBackRead+BUFREADCOMMENT>uMaxBack) uBackRead = uMaxBack; @@ -367,11 +444,11 @@ uReadPos = uSizeFile-uBackRead ; uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? - (BUFREADCOMMENT+4) : (uSizeFile-uReadPos); - if (ZSEEK(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) + (BUFREADCOMMENT+4) : (uLong)(uSizeFile-uReadPos); + if (ZSEEK64(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) break; - if (ZREAD(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) + if (ZREAD64(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) break; for (i=(int)uReadSize-3; (i--)>0;) @@ -389,6 +466,112 @@ return uPosFound; } + +/* + Locate the Central directory 64 of a zipfile (at the end, just before + the global comment) +*/ +local ZPOS64_T unz64local_SearchCentralDir64 OF(( + const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream)); + +local ZPOS64_T unz64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream) +{ + unsigned char* buf; + ZPOS64_T uSizeFile; + ZPOS64_T uBackRead; + ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ + ZPOS64_T uPosFound=0; + uLong uL; + ZPOS64_T relativeOffset; + + if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) + return 0; + + + uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); + + if (uMaxBack>uSizeFile) + uMaxBack = uSizeFile; + + buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); + if (buf==NULL) + return 0; + + uBackRead = 4; + while (uBackRead<uMaxBack) + { + uLong uReadSize; + ZPOS64_T uReadPos; + int i; + if (uBackRead+BUFREADCOMMENT>uMaxBack) + uBackRead = uMaxBack; + else + uBackRead+=BUFREADCOMMENT; + uReadPos = uSizeFile-uBackRead ; + + uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? + (BUFREADCOMMENT+4) : (uLong)(uSizeFile-uReadPos); + if (ZSEEK64(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) + break; + + if (ZREAD64(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) + break; + + for (i=(int)uReadSize-3; (i--)>0;) + if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && + ((*(buf+i+2))==0x06) && ((*(buf+i+3))==0x07)) + { + uPosFound = uReadPos+i; + break; + } + + if (uPosFound!=0) + break; + } + TRYFREE(buf); + if (uPosFound == 0) + return 0; + + /* Zip64 end of central directory locator */ + if (ZSEEK64(*pzlib_filefunc_def,filestream, uPosFound,ZLIB_FILEFUNC_SEEK_SET)!=0) + return 0; + + /* the signature, already checked */ + if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) + return 0; + + /* number of the disk with the start of the zip64 end of central directory */ + if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) + return 0; + if (uL != 0) + return 0; + + /* relative offset of the zip64 end of central directory record */ + if (unz64local_getLong64(pzlib_filefunc_def,filestream,&relativeOffset)!=UNZ_OK) + return 0; + + /* total number of disks */ + if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) + return 0; + if (uL != 1) + return 0; + + /* Goto end of central directory record */ + if (ZSEEK64(*pzlib_filefunc_def,filestream, relativeOffset,ZLIB_FILEFUNC_SEEK_SET)!=0) + return 0; + + /* the signature */ + if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) + return 0; + + if (uL != 0x06064b50) + return 0; + + return relativeOffset; +} + /* Open a Zip file. path contain the full pathname (by example, on a Windows NT computer "c:\\test\\zlib114.zip" or on an Unix computer @@ -398,19 +581,20 @@ Else, the return value is a unzFile Handle, usable with other function of this unzip package. */ -extern unzFile ZEXPORT unzOpen2 (path, pzlib_filefunc_def) - const char *path; - zlib_filefunc_def* pzlib_filefunc_def; -{ - unz_s us; - unz_s *s; - uLong central_pos,uL; +local unzFile unzOpenInternal (const void *path, + zlib_filefunc64_32_def* pzlib_filefunc64_32_def, + int is64bitOpenFunction) +{ + unz64_s us; + unz64_s *s; + ZPOS64_T central_pos; + uLong uL; uLong number_disk; /* number of the current dist, used for spaning ZIP, unsupported, always 0*/ uLong number_disk_with_CD; /* number the the disk with central dir, used for spaning ZIP, unsupported, always 0*/ - uLong number_entry_CD; /* total number of entries in + ZPOS64_T number_entry_CD; /* total number of entries in the central dir (same than number_entry on nospan) */ @@ -419,63 +603,137 @@ if (unz_copyright[0]!=' ') return NULL; - if (pzlib_filefunc_def==NULL) - fill_fopen_filefunc(&us.z_filefunc); + us.z_filefunc.zseek32_file = NULL; + us.z_filefunc.ztell32_file = NULL; + if (pzlib_filefunc64_32_def==NULL) + fill_fopen64_filefunc(&us.z_filefunc.zfile_func64); else - us.z_filefunc = *pzlib_filefunc_def; + us.z_filefunc = *pzlib_filefunc64_32_def; + us.is64bitOpenFunction = is64bitOpenFunction; - us.filestream= (*(us.z_filefunc.zopen_file))(us.z_filefunc.opaque, + + + us.filestream = ZOPEN64(us.z_filefunc, path, ZLIB_FILEFUNC_MODE_READ | ZLIB_FILEFUNC_MODE_EXISTING); if (us.filestream==NULL) return NULL; - central_pos = unzlocal_SearchCentralDir(&us.z_filefunc,us.filestream); - if (central_pos==0) - err=UNZ_ERRNO; + central_pos = unz64local_SearchCentralDir64(&us.z_filefunc,us.filestream); + if (central_pos) + { + uLong uS; + ZPOS64_T uL64; + + us.isZip64 = 1; - if (ZSEEK(us.z_filefunc, us.filestream, + if (ZSEEK64(us.z_filefunc, us.filestream, central_pos,ZLIB_FILEFUNC_SEEK_SET)!=0) err=UNZ_ERRNO; - /* the signature, already checked */ - if (unzlocal_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) - err=UNZ_ERRNO; + /* the signature, already checked */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; - /* number of this disk */ - if (unzlocal_getShort(&us.z_filefunc, us.filestream,&number_disk)!=UNZ_OK) - err=UNZ_ERRNO; + /* size of zip64 end of central directory record */ + if (unz64local_getLong64(&us.z_filefunc, us.filestream,&uL64)!=UNZ_OK) + err=UNZ_ERRNO; - /* number of the disk with the start of the central directory */ - if (unzlocal_getShort(&us.z_filefunc, us.filestream,&number_disk_with_CD)!=UNZ_OK) - err=UNZ_ERRNO; + /* version made by */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&uS)!=UNZ_OK) + err=UNZ_ERRNO; - /* total number of entries in the central dir on this disk */ - if (unzlocal_getShort(&us.z_filefunc, us.filestream,&us.gi.number_entry)!=UNZ_OK) - err=UNZ_ERRNO; + /* version needed to extract */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&uS)!=UNZ_OK) + err=UNZ_ERRNO; - /* total number of entries in the central dir */ - if (unzlocal_getShort(&us.z_filefunc, us.filestream,&number_entry_CD)!=UNZ_OK) - err=UNZ_ERRNO; + /* number of this disk */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&number_disk)!=UNZ_OK) + err=UNZ_ERRNO; - if ((number_entry_CD!=us.gi.number_entry) || - (number_disk_with_CD!=0) || - (number_disk!=0)) - err=UNZ_BADZIPFILE; + /* number of the disk with the start of the central directory */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&number_disk_with_CD)!=UNZ_OK) + err=UNZ_ERRNO; - /* size of the central directory */ - if (unzlocal_getLong(&us.z_filefunc, us.filestream,&us.size_central_dir)!=UNZ_OK) - err=UNZ_ERRNO; + /* total number of entries in the central directory on this disk */ + if (unz64local_getLong64(&us.z_filefunc, us.filestream,&us.gi.number_entry)!=UNZ_OK) + err=UNZ_ERRNO; + + /* total number of entries in the central directory */ + if (unz64local_getLong64(&us.z_filefunc, us.filestream,&number_entry_CD)!=UNZ_OK) + err=UNZ_ERRNO; + + if ((number_entry_CD!=us.gi.number_entry) || + (number_disk_with_CD!=0) || + (number_disk!=0)) + err=UNZ_BADZIPFILE; - /* offset of start of central directory with respect to the + /* size of the central directory */ + if (unz64local_getLong64(&us.z_filefunc, us.filestream,&us.size_central_dir)!=UNZ_OK) + err=UNZ_ERRNO; + + /* offset of start of central directory with respect to the starting disk number */ - if (unzlocal_getLong(&us.z_filefunc, us.filestream,&us.offset_central_dir)!=UNZ_OK) - err=UNZ_ERRNO; + if (unz64local_getLong64(&us.z_filefunc, us.filestream,&us.offset_central_dir)!=UNZ_OK) + err=UNZ_ERRNO; - /* zipfile comment length */ - if (unzlocal_getShort(&us.z_filefunc, us.filestream,&us.gi.size_comment)!=UNZ_OK) - err=UNZ_ERRNO; + us.gi.size_comment = 0; + } + else + { + central_pos = unz64local_SearchCentralDir(&us.z_filefunc,us.filestream); + if (central_pos==0) + err=UNZ_ERRNO; + + us.isZip64 = 0; + + if (ZSEEK64(us.z_filefunc, us.filestream, + central_pos,ZLIB_FILEFUNC_SEEK_SET)!=0) + err=UNZ_ERRNO; + + /* the signature, already checked */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + + /* number of this disk */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&number_disk)!=UNZ_OK) + err=UNZ_ERRNO; + + /* number of the disk with the start of the central directory */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&number_disk_with_CD)!=UNZ_OK) + err=UNZ_ERRNO; + + /* total number of entries in the central dir on this disk */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + us.gi.number_entry = uL; + + /* total number of entries in the central dir */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + number_entry_CD = uL; + + if ((number_entry_CD!=us.gi.number_entry) || + (number_disk_with_CD!=0) || + (number_disk!=0)) + err=UNZ_BADZIPFILE; + + /* size of the central directory */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + us.size_central_dir = uL; + + /* offset of start of central directory with respect to the + starting disk number */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + us.offset_central_dir = uL; + + /* zipfile comment length */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&us.gi.size_comment)!=UNZ_OK) + err=UNZ_ERRNO; + } if ((central_pos<us.offset_central_dir+us.size_central_dir) && (err==UNZ_OK)) @@ -483,7 +741,7 @@ if (err!=UNZ_OK) { - ZCLOSE(us.z_filefunc, us.filestream); + ZCLOSE64(us.z_filefunc, us.filestream); return NULL; } @@ -494,36 +752,70 @@ us.encrypted = 0; - s=(unz_s*)ALLOC(sizeof(unz_s)); - *s=us; - unzGoToFirstFile((unzFile)s); + s=(unz64_s*)ALLOC(sizeof(unz64_s)); + if( s != NULL) + { + *s=us; + unzGoToFirstFile((unzFile)s); + } return (unzFile)s; } -extern unzFile ZEXPORT unzOpen (path) - const char *path; +extern unzFile ZEXPORT unzOpen2 (const char *path, + zlib_filefunc_def* pzlib_filefunc32_def) { - return unzOpen2(path, NULL); + if (pzlib_filefunc32_def != NULL) + { + zlib_filefunc64_32_def zlib_filefunc64_32_def_fill; + fill_zlib_filefunc64_32_def_from_filefunc32(&zlib_filefunc64_32_def_fill,pzlib_filefunc32_def); + return unzOpenInternal(path, &zlib_filefunc64_32_def_fill, 0); + } + else + return unzOpenInternal(path, NULL, 0); +} + +extern unzFile ZEXPORT unzOpen2_64 (const void *path, + zlib_filefunc64_def* pzlib_filefunc_def) +{ + if (pzlib_filefunc_def != NULL) + { + zlib_filefunc64_32_def zlib_filefunc64_32_def_fill; + zlib_filefunc64_32_def_fill.zfile_func64 = *pzlib_filefunc_def; + zlib_filefunc64_32_def_fill.ztell32_file = NULL; + zlib_filefunc64_32_def_fill.zseek32_file = NULL; + return unzOpenInternal(path, &zlib_filefunc64_32_def_fill, 1); + } + else + return unzOpenInternal(path, NULL, 1); +} + +extern unzFile ZEXPORT unzOpen (const char *path) +{ + return unzOpenInternal(path, NULL, 0); +} + +extern unzFile ZEXPORT unzOpen64 (const void *path) +{ + return unzOpenInternal(path, NULL, 1); } /* - Close a ZipFile opened with unzipOpen. - If there is files inside the .Zip opened with unzipOpenCurrentFile (see later), - these files MUST be closed with unzipCloseCurrentFile before call unzipClose. + Close a ZipFile opened with unzOpen. + If there is files inside the .Zip opened with unzOpenCurrentFile (see later), + these files MUST be closed with unzCloseCurrentFile before call unzClose. return UNZ_OK if there is no problem. */ -extern int ZEXPORT unzClose (file) - unzFile file; +extern int ZEXPORT unzClose (unzFile file) { - unz_s* s; + unz64_s* s; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; if (s->pfile_in_zip_read!=NULL) unzCloseCurrentFile(file); - ZCLOSE(s->z_filefunc, s->filestream); + ZCLOSE64(s->z_filefunc, s->filestream); TRYFREE(s); return UNZ_OK; } @@ -533,28 +825,34 @@ Write info about the ZipFile in the *pglobal_info structure. No preparation of the structure is needed return UNZ_OK if there is no problem. */ -extern int ZEXPORT unzGetGlobalInfo (file,pglobal_info) - unzFile file; - unz_global_info *pglobal_info; +extern int ZEXPORT unzGetGlobalInfo64 (unzFile file, unz_global_info64* pglobal_info) { - unz_s* s; + unz64_s* s; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; *pglobal_info=s->gi; return UNZ_OK; } - +extern int ZEXPORT unzGetGlobalInfo (unzFile file, unz_global_info* pglobal_info32) +{ + unz64_s* s; + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + /* to do : check if number_entry is not truncated */ + pglobal_info32->number_entry = (uLong)s->gi.number_entry; + pglobal_info32->size_comment = s->gi.size_comment; + return UNZ_OK; +} /* Translate date/time from Dos format to tm_unz (readable more easilty) */ -local void unzlocal_DosDateToTmuDate (ulDosDate, ptm) - uLong ulDosDate; - tm_unz* ptm; +local void unz64local_DosDateToTmuDate (ZPOS64_T ulDosDate, tm_unz* ptm) { - uLong uDate; - uDate = (uLong)(ulDosDate>>16); + ZPOS64_T uDate; + uDate = (ZPOS64_T)(ulDosDate>>16); ptm->tm_mday = (uInt)(uDate&0x1f) ; ptm->tm_mon = (uInt)((((uDate)&0x1E0)/0x20)-1) ; ptm->tm_year = (uInt)(((uDate&0x0FE00)/0x0200)+1980) ; @@ -567,9 +865,9 @@ /* Get Info about the current file in the zipfile, with internal only info */ -local int unzlocal_GetCurrentFileInfoInternal OF((unzFile file, - unz_file_info *pfile_info, - unz_file_info_internal +local int unz64local_GetCurrentFileInfoInternal OF((unzFile file, + unz_file_info64 *pfile_info, + unz_file_info64_internal *pfile_info_internal, char *szFileName, uLong fileNameBufferSize, @@ -578,92 +876,93 @@ char *szComment, uLong commentBufferSize)); -local int unzlocal_GetCurrentFileInfoInternal (file, - pfile_info, - pfile_info_internal, - szFileName, fileNameBufferSize, - extraField, extraFieldBufferSize, - szComment, commentBufferSize) - unzFile file; - unz_file_info *pfile_info; - unz_file_info_internal *pfile_info_internal; - char *szFileName; - uLong fileNameBufferSize; - void *extraField; - uLong extraFieldBufferSize; - char *szComment; - uLong commentBufferSize; -{ - unz_s* s; - unz_file_info file_info; - unz_file_info_internal file_info_internal; +local int unz64local_GetCurrentFileInfoInternal (unzFile file, + unz_file_info64 *pfile_info, + unz_file_info64_internal + *pfile_info_internal, + char *szFileName, + uLong fileNameBufferSize, + void *extraField, + uLong extraFieldBufferSize, + char *szComment, + uLong commentBufferSize) +{ + unz64_s* s; + unz_file_info64 file_info; + unz_file_info64_internal file_info_internal; int err=UNZ_OK; uLong uMagic; long lSeek=0; + uLong uL; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; - if (ZSEEK(s->z_filefunc, s->filestream, + s=(unz64_s*)file; + if (ZSEEK64(s->z_filefunc, s->filestream, s->pos_in_central_dir+s->byte_before_the_zipfile, ZLIB_FILEFUNC_SEEK_SET)!=0) err=UNZ_ERRNO; /* we check the magic */ - if (err==UNZ_OK) { - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&uMagic) != UNZ_OK) + if (err==UNZ_OK) + { + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uMagic) != UNZ_OK) err=UNZ_ERRNO; else if (uMagic!=0x02014b50) err=UNZ_BADZIPFILE; } - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&file_info.version) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.version) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&file_info.version_needed) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.version_needed) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&file_info.flag) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.flag) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&file_info.compression_method) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.compression_method) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&file_info.dosDate) != UNZ_OK) + if (unz64local_getLong(&s->z_filefunc, s->filestream,&file_info.dosDate) != UNZ_OK) err=UNZ_ERRNO; - unzlocal_DosDateToTmuDate(file_info.dosDate,&file_info.tmu_date); + unz64local_DosDateToTmuDate(file_info.dosDate,&file_info.tmu_date); - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&file_info.crc) != UNZ_OK) + if (unz64local_getLong(&s->z_filefunc, s->filestream,&file_info.crc) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&file_info.compressed_size) != UNZ_OK) + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) err=UNZ_ERRNO; + file_info.compressed_size = uL; - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&file_info.uncompressed_size) != UNZ_OK) + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) err=UNZ_ERRNO; + file_info.uncompressed_size = uL; - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&file_info.size_filename) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.size_filename) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&file_info.size_file_extra) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.size_file_extra) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&file_info.size_file_comment) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.size_file_comment) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&file_info.disk_num_start) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.disk_num_start) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&file_info.internal_fa) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.internal_fa) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&file_info.external_fa) != UNZ_OK) + if (unz64local_getLong(&s->z_filefunc, s->filestream,&file_info.external_fa) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&file_info_internal.offset_curfile) != UNZ_OK) + // relative offset of local header + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) err=UNZ_ERRNO; + file_info_internal.offset_curfile = uL; lSeek+=file_info.size_filename; if ((err==UNZ_OK) && (szFileName!=NULL)) @@ -678,34 +977,105 @@ uSizeRead = fileNameBufferSize; if ((file_info.size_filename>0) && (fileNameBufferSize>0)) - if (ZREAD(s->z_filefunc, s->filestream,szFileName,uSizeRead)!=uSizeRead) + if (ZREAD64(s->z_filefunc, s->filestream,szFileName,uSizeRead)!=uSizeRead) err=UNZ_ERRNO; lSeek -= uSizeRead; } - + // Read extrafield if ((err==UNZ_OK) && (extraField!=NULL)) { - uLong uSizeRead ; + ZPOS64_T uSizeRead ; if (file_info.size_file_extra<extraFieldBufferSize) uSizeRead = file_info.size_file_extra; else uSizeRead = extraFieldBufferSize; - if (lSeek!=0) { - if (ZSEEK(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) + if (lSeek!=0) + { + if (ZSEEK64(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) lSeek=0; else err=UNZ_ERRNO; } + if ((file_info.size_file_extra>0) && (extraFieldBufferSize>0)) - if (ZREAD(s->z_filefunc, s->filestream,extraField,uSizeRead)!=uSizeRead) + if (ZREAD64(s->z_filefunc, s->filestream,extraField,(uLong)uSizeRead)!=uSizeRead) err=UNZ_ERRNO; - lSeek += file_info.size_file_extra - uSizeRead; + + lSeek += file_info.size_file_extra - (uLong)uSizeRead; } else - lSeek+=file_info.size_file_extra; + lSeek += file_info.size_file_extra; + + if ((err==UNZ_OK) && (file_info.size_file_extra != 0)) + { + uLong acc = 0; + + // since lSeek now points to after the extra field we need to move back + lSeek -= file_info.size_file_extra; + + if (lSeek!=0) + { + if (ZSEEK64(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) + lSeek=0; + else + err=UNZ_ERRNO; + } + + while(acc < file_info.size_file_extra) + { + uLong headerId; + uLong dataSize; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&headerId) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&dataSize) != UNZ_OK) + err=UNZ_ERRNO; + + /* ZIP64 extra fields */ + if (headerId == 0x0001) + { + uLong uL; + + if(file_info.uncompressed_size == MAXU32) + { + if (unz64local_getLong64(&s->z_filefunc, s->filestream,&file_info.uncompressed_size) != UNZ_OK) + err=UNZ_ERRNO; + } + + if(file_info.compressed_size == MAXU32) + { + if (unz64local_getLong64(&s->z_filefunc, s->filestream,&file_info.compressed_size) != UNZ_OK) + err=UNZ_ERRNO; + } + + if(file_info_internal.offset_curfile == MAXU32) + { + /* Relative Header offset */ + if (unz64local_getLong64(&s->z_filefunc, s->filestream,&file_info_internal.offset_curfile) != UNZ_OK) + err=UNZ_ERRNO; + } + + if(file_info.disk_num_start == MAXU32) + { + /* Disk Start Number */ + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) + err=UNZ_ERRNO; + } + + } + else + { + if (ZSEEK64(s->z_filefunc, s->filestream,dataSize,ZLIB_FILEFUNC_SEEK_CUR)!=0) + err=UNZ_ERRNO; + } + + acc += 2 + 2 + dataSize; + } + } if ((err==UNZ_OK) && (szComment!=NULL)) { @@ -718,20 +1088,23 @@ else uSizeRead = commentBufferSize; - if (lSeek!=0) { - if (ZSEEK(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) + if (lSeek!=0) + { + if (ZSEEK64(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) lSeek=0; else err=UNZ_ERRNO; } + if ((file_info.size_file_comment>0) && (commentBufferSize>0)) - if (ZREAD(s->z_filefunc, s->filestream,szComment,uSizeRead)!=uSizeRead) + if (ZREAD64(s->z_filefunc, s->filestream,szComment,uSizeRead)!=uSizeRead) err=UNZ_ERRNO; lSeek+=file_info.size_file_comment - uSizeRead; } else lSeek+=file_info.size_file_comment; + if ((err==UNZ_OK) && (pfile_info!=NULL)) *pfile_info=file_info; @@ -748,41 +1121,70 @@ No preparation of the structure is needed return UNZ_OK if there is no problem. */ -extern int ZEXPORT unzGetCurrentFileInfo (file, - pfile_info, - szFileName, fileNameBufferSize, - extraField, extraFieldBufferSize, - szComment, commentBufferSize) - unzFile file; - unz_file_info *pfile_info; - char *szFileName; - uLong fileNameBufferSize; - void *extraField; - uLong extraFieldBufferSize; - char *szComment; - uLong commentBufferSize; +extern int ZEXPORT unzGetCurrentFileInfo64 (unzFile file, + unz_file_info64 * pfile_info, + char * szFileName, uLong fileNameBufferSize, + void *extraField, uLong extraFieldBufferSize, + char* szComment, uLong commentBufferSize) { - return unzlocal_GetCurrentFileInfoInternal(file,pfile_info,NULL, + return unz64local_GetCurrentFileInfoInternal(file,pfile_info,NULL, szFileName,fileNameBufferSize, extraField,extraFieldBufferSize, szComment,commentBufferSize); } +extern int ZEXPORT unzGetCurrentFileInfo (unzFile file, + unz_file_info * pfile_info, + char * szFileName, uLong fileNameBufferSize, + void *extraField, uLong extraFieldBufferSize, + char* szComment, uLong commentBufferSize) +{ + int err; + unz_file_info64 file_info64; + err = unz64local_GetCurrentFileInfoInternal(file,&file_info64,NULL, + szFileName,fileNameBufferSize, + extraField,extraFieldBufferSize, + szComment,commentBufferSize); + if ((err==UNZ_OK) && (pfile_info != NULL)) + { + pfile_info->version = file_info64.version; + pfile_info->version_needed = file_info64.version_needed; + pfile_info->flag = file_info64.flag; + pfile_info->compression_method = file_info64.compression_method; + pfile_info->dosDate = file_info64.dosDate; + pfile_info->crc = file_info64.crc; + + pfile_info->size_filename = file_info64.size_filename; + pfile_info->size_file_extra = file_info64.size_file_extra; + pfile_info->size_file_comment = file_info64.size_file_comment; + + pfile_info->disk_num_start = file_info64.disk_num_start; + pfile_info->internal_fa = file_info64.internal_fa; + pfile_info->external_fa = file_info64.external_fa; + + pfile_info->tmu_date = file_info64.tmu_date, + + + pfile_info->compressed_size = (uLong)file_info64.compressed_size; + pfile_info->uncompressed_size = (uLong)file_info64.uncompressed_size; + + } + return err; +} /* Set the current file of the zipfile to the first file. return UNZ_OK if there is no problem */ -extern int ZEXPORT unzGoToFirstFile (file) - unzFile file; +extern int ZEXPORT unzGoToFirstFile (unzFile file) { int err=UNZ_OK; - unz_s* s; + unz64_s* s; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; s->pos_in_central_dir=s->offset_central_dir; s->num_file=0; - err=unzlocal_GetCurrentFileInfoInternal(file,&s->cur_file_info, + err=unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, &s->cur_file_info_internal, NULL,0,NULL,0,NULL,0); s->current_file_ok = (err == UNZ_OK); @@ -794,15 +1196,14 @@ return UNZ_OK if there is no problem return UNZ_END_OF_LIST_OF_FILE if the actual file was the latest. */ -extern int ZEXPORT unzGoToNextFile (file) - unzFile file; +extern int ZEXPORT unzGoToNextFile (unzFile file) { - unz_s* s; + unz64_s* s; int err; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; if (!s->current_file_ok) return UNZ_END_OF_LIST_OF_FILE; if (s->gi.number_entry != 0xffff) /* 2^16 files overflow hack */ @@ -812,7 +1213,7 @@ s->pos_in_central_dir += SIZECENTRALDIRITEM + s->cur_file_info.size_filename + s->cur_file_info.size_file_extra + s->cur_file_info.size_file_comment ; s->num_file++; - err = unzlocal_GetCurrentFileInfoInternal(file,&s->cur_file_info, + err = unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, &s->cur_file_info_internal, NULL,0,NULL,0,NULL,0); s->current_file_ok = (err == UNZ_OK); @@ -822,27 +1223,24 @@ /* Try locate the file szFileName in the zipfile. - For the iCaseSensitivity signification, see unzipStringFileNameCompare + For the iCaseSensitivity signification, see unzStringFileNameCompare return value : UNZ_OK if the file is found. It becomes the current file. UNZ_END_OF_LIST_OF_FILE if the file is not found */ -extern int ZEXPORT unzLocateFile (file, szFileName, iCaseSensitivity) - unzFile file; - const char *szFileName; - int iCaseSensitivity; +extern int ZEXPORT unzLocateFile (unzFile file, const char *szFileName, int iCaseSensitivity) { - unz_s* s; + unz64_s* s; int err; /* We remember the 'current' position in the file so that we can jump * back there if we fail. */ - unz_file_info cur_file_infoSaved; - unz_file_info_internal cur_file_info_internalSaved; - uLong num_fileSaved; - uLong pos_in_central_dirSaved; + unz_file_info64 cur_file_infoSaved; + unz_file_info64_internal cur_file_info_internalSaved; + ZPOS64_T num_fileSaved; + ZPOS64_T pos_in_central_dirSaved; if (file==NULL) @@ -851,7 +1249,7 @@ if (strlen(szFileName)>=UNZ_MAXFILENAMEINZIP) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; if (!s->current_file_ok) return UNZ_END_OF_LIST_OF_FILE; @@ -866,7 +1264,7 @@ while (err == UNZ_OK) { char szCurrentFileName[UNZ_MAXFILENAMEINZIP+1]; - err = unzGetCurrentFileInfo(file,NULL, + err = unzGetCurrentFileInfo64(file,NULL, szCurrentFileName,sizeof(szCurrentFileName)-1, NULL,0,NULL,0); if (err == UNZ_OK) @@ -902,20 +1300,18 @@ /* typedef struct unz_file_pos_s { - uLong pos_in_zip_directory; // offset in file - uLong num_of_file; // # of file + ZPOS64_T pos_in_zip_directory; // offset in file + ZPOS64_T num_of_file; // # of file } unz_file_pos; */ -extern int ZEXPORT unzGetFilePos(file, file_pos) - unzFile file; - unz_file_pos* file_pos; +extern int ZEXPORT unzGetFilePos64(unzFile file, unz64_file_pos* file_pos) { - unz_s* s; + unz64_s* s; if (file==NULL || file_pos==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; if (!s->current_file_ok) return UNZ_END_OF_LIST_OF_FILE; @@ -925,23 +1321,35 @@ return UNZ_OK; } -extern int ZEXPORT unzGoToFilePos(file, file_pos) - unzFile file; - unz_file_pos* file_pos; +extern int ZEXPORT unzGetFilePos( + unzFile file, + unz_file_pos* file_pos) +{ + unz64_file_pos file_pos64; + int err = unzGetFilePos64(file,&file_pos64); + if (err==UNZ_OK) + { + file_pos->pos_in_zip_directory = (uLong)file_pos64.pos_in_zip_directory; + file_pos->num_of_file = (uLong)file_pos64.num_of_file; + } + return err; +} + +extern int ZEXPORT unzGoToFilePos64(unzFile file, const unz64_file_pos* file_pos) { - unz_s* s; + unz64_s* s; int err; if (file==NULL || file_pos==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; /* jump to the right spot */ s->pos_in_central_dir = file_pos->pos_in_zip_directory; s->num_file = file_pos->num_of_file; /* set the current file */ - err = unzlocal_GetCurrentFileInfoInternal(file,&s->cur_file_info, + err = unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, &s->cur_file_info_internal, NULL,0,NULL,0,NULL,0); /* return results */ @@ -949,6 +1357,19 @@ return err; } +extern int ZEXPORT unzGoToFilePos( + unzFile file, + unz_file_pos* file_pos) +{ + unz64_file_pos file_pos64; + if (file_pos == NULL) + return UNZ_PARAMERROR; + + file_pos64.pos_in_zip_directory = file_pos->pos_in_zip_directory; + file_pos64.num_of_file = file_pos->num_of_file; + return unzGoToFilePos64(file,&file_pos64); +} + /* // Unzip Helper Functions - should be here? /////////////////////////////////////////// @@ -961,13 +1382,9 @@ store in *piSizeVar the size of extra info in local header (filename and size of extra field data) */ -local int unzlocal_CheckCurrentFileCoherencyHeader (s,piSizeVar, - poffset_local_extrafield, - psize_local_extrafield) - unz_s* s; - uInt* piSizeVar; - uLong *poffset_local_extrafield; - uInt *psize_local_extrafield; +local int unz64local_CheckCurrentFileCoherencyHeader (unz64_s* s, uInt* piSizeVar, + ZPOS64_T * poffset_local_extrafield, + uInt * psize_local_extrafield) { uLong uMagic,uData,uFlags; uLong size_filename; @@ -978,66 +1395,66 @@ *poffset_local_extrafield = 0; *psize_local_extrafield = 0; - if (ZSEEK(s->z_filefunc, s->filestream,s->cur_file_info_internal.offset_curfile + + if (ZSEEK64(s->z_filefunc, s->filestream,s->cur_file_info_internal.offset_curfile + s->byte_before_the_zipfile,ZLIB_FILEFUNC_SEEK_SET)!=0) return UNZ_ERRNO; - if (err==UNZ_OK) { - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&uMagic) != UNZ_OK) + if (err==UNZ_OK) + { + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uMagic) != UNZ_OK) err=UNZ_ERRNO; else if (uMagic!=0x04034b50) err=UNZ_BADZIPFILE; } - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) err=UNZ_ERRNO; /* else if ((err==UNZ_OK) && (uData!=s->cur_file_info.wVersion)) err=UNZ_BADZIPFILE; */ - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&uFlags) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&uFlags) != UNZ_OK) err=UNZ_ERRNO; - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) err=UNZ_ERRNO; else if ((err==UNZ_OK) && (uData!=s->cur_file_info.compression_method)) err=UNZ_BADZIPFILE; if ((err==UNZ_OK) && (s->cur_file_info.compression_method!=0) && +/* #ifdef HAVE_BZIP2 */ + (s->cur_file_info.compression_method!=Z_BZIP2ED) && +/* #endif */ (s->cur_file_info.compression_method!=Z_DEFLATED)) err=UNZ_BADZIPFILE; - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* date/time */ + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* date/time */ err=UNZ_ERRNO; - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* crc */ + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* crc */ err=UNZ_ERRNO; - else if ((err==UNZ_OK) && (uData!=s->cur_file_info.crc) && - ((uFlags & 8)==0)) + else if ((err==UNZ_OK) && (uData!=s->cur_file_info.crc) && ((uFlags & 8)==0)) err=UNZ_BADZIPFILE; - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* size compr */ + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* size compr */ err=UNZ_ERRNO; - else if ((err==UNZ_OK) && (uData!=s->cur_file_info.compressed_size) && - ((uFlags & 8)==0)) + else if (uData != 0xFFFFFFFF && (err==UNZ_OK) && (uData!=s->cur_file_info.compressed_size) && ((uFlags & 8)==0)) err=UNZ_BADZIPFILE; - if (unzlocal_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* size uncompr */ + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* size uncompr */ err=UNZ_ERRNO; - else if ((err==UNZ_OK) && (uData!=s->cur_file_info.uncompressed_size) && - ((uFlags & 8)==0)) + else if (uData != 0xFFFFFFFF && (err==UNZ_OK) && (uData!=s->cur_file_info.uncompressed_size) && ((uFlags & 8)==0)) err=UNZ_BADZIPFILE; - - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&size_filename) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&size_filename) != UNZ_OK) err=UNZ_ERRNO; else if ((err==UNZ_OK) && (size_filename!=s->cur_file_info.size_filename)) err=UNZ_BADZIPFILE; *piSizeVar += (uInt)size_filename; - if (unzlocal_getShort(&s->z_filefunc, s->filestream,&size_extra_field) != UNZ_OK) + if (unz64local_getShort(&s->z_filefunc, s->filestream,&size_extra_field) != UNZ_OK) err=UNZ_ERRNO; *poffset_local_extrafield= s->cur_file_info_internal.offset_curfile + SIZEZIPLOCALHEADER + size_filename; @@ -1052,18 +1469,14 @@ Open for reading data the current file in the zipfile. If there is no error and the file is opened, the return value is UNZ_OK. */ -extern int ZEXPORT unzOpenCurrentFile3 (file, method, level, raw, password) - unzFile file; - int* method; - int* level; - int raw; - const char* password; +extern int ZEXPORT unzOpenCurrentFile3 (unzFile file, int* method, + int* level, int raw, const char* password) { int err=UNZ_OK; uInt iSizeVar; - unz_s* s; - file_in_zip_read_info_s* pfile_in_zip_read_info; - uLong offset_local_extrafield; /* offset of the local extra field */ + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + ZPOS64_T offset_local_extrafield; /* offset of the local extra field */ uInt size_local_extrafield; /* size of the local extra field */ # ifndef NOUNCRYPT char source[12]; @@ -1074,19 +1487,17 @@ if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; if (!s->current_file_ok) return UNZ_PARAMERROR; if (s->pfile_in_zip_read != NULL) unzCloseCurrentFile(file); - if (unzlocal_CheckCurrentFileCoherencyHeader(s,&iSizeVar, - &offset_local_extrafield,&size_local_extrafield)!=UNZ_OK) + if (unz64local_CheckCurrentFileCoherencyHeader(s,&iSizeVar, &offset_local_extrafield,&size_local_extrafield)!=UNZ_OK) return UNZ_BADZIPFILE; - pfile_in_zip_read_info = (file_in_zip_read_info_s*) - ALLOC(sizeof(file_in_zip_read_info_s)); + pfile_in_zip_read_info = (file_in_zip64_read_info_s*)ALLOC(sizeof(file_in_zip64_read_info_s)); if (pfile_in_zip_read_info==NULL) return UNZ_INTERNALERROR; @@ -1119,31 +1530,60 @@ } if ((s->cur_file_info.compression_method!=0) && +/* #ifdef HAVE_BZIP2 */ + (s->cur_file_info.compression_method!=Z_BZIP2ED) && +/* #endif */ (s->cur_file_info.compression_method!=Z_DEFLATED)) + err=UNZ_BADZIPFILE; pfile_in_zip_read_info->crc32_wait=s->cur_file_info.crc; pfile_in_zip_read_info->crc32=0; - pfile_in_zip_read_info->compression_method = - s->cur_file_info.compression_method; + pfile_in_zip_read_info->total_out_64=0; + pfile_in_zip_read_info->compression_method = s->cur_file_info.compression_method; pfile_in_zip_read_info->filestream=s->filestream; pfile_in_zip_read_info->z_filefunc=s->z_filefunc; pfile_in_zip_read_info->byte_before_the_zipfile=s->byte_before_the_zipfile; pfile_in_zip_read_info->stream.total_out = 0; - if ((s->cur_file_info.compression_method==Z_DEFLATED) && - (!raw)) + if ((s->cur_file_info.compression_method==Z_BZIP2ED) && (!raw)) { +#ifdef HAVE_BZIP2 + pfile_in_zip_read_info->bstream.bzalloc = (void *(*) (void *, int, int))0; + pfile_in_zip_read_info->bstream.bzfree = (free_func)0; + pfile_in_zip_read_info->bstream.opaque = (voidpf)0; + pfile_in_zip_read_info->bstream.state = (voidpf)0; + pfile_in_zip_read_info->stream.zalloc = (alloc_func)0; pfile_in_zip_read_info->stream.zfree = (free_func)0; pfile_in_zip_read_info->stream.opaque = (voidpf)0; pfile_in_zip_read_info->stream.next_in = (voidpf)0; pfile_in_zip_read_info->stream.avail_in = 0; + err=BZ2_bzDecompressInit(&pfile_in_zip_read_info->bstream, 0, 0); + if (err == Z_OK) + pfile_in_zip_read_info->stream_initialised=Z_BZIP2ED; + else + { + TRYFREE(pfile_in_zip_read_info); + return err; + } +#else + pfile_in_zip_read_info->raw=1; +#endif + } + else if ((s->cur_file_info.compression_method==Z_DEFLATED) && (!raw)) + { + pfile_in_zip_read_info->stream.zalloc = (alloc_func)0; + pfile_in_zip_read_info->stream.zfree = (free_func)0; + pfile_in_zip_read_info->stream.opaque = (voidpf)0; + pfile_in_zip_read_info->stream.next_in = 0; + pfile_in_zip_read_info->stream.avail_in = 0; + err=inflateInit2(&pfile_in_zip_read_info->stream, -MAX_WBITS); if (err == Z_OK) - pfile_in_zip_read_info->stream_initialised=1; + pfile_in_zip_read_info->stream_initialised=Z_DEFLATED; else { TRYFREE(pfile_in_zip_read_info); @@ -1170,6 +1610,7 @@ pfile_in_zip_read_info->stream.avail_in = (uInt)0; s->pfile_in_zip_read = pfile_in_zip_read_info; + s->encrypted = 0; # ifndef NOUNCRYPT if (password != NULL) @@ -1177,12 +1618,12 @@ int i; s->pcrc_32_tab = get_crc_table(); init_keys(password,s->keys,s->pcrc_32_tab); - if (ZSEEK(s->z_filefunc, s->filestream, + if (ZSEEK64(s->z_filefunc, s->filestream, s->pfile_in_zip_read->pos_in_zipfile + s->pfile_in_zip_read->byte_before_the_zipfile, SEEK_SET)!=0) return UNZ_INTERNALERROR; - if(ZREAD(s->z_filefunc, s->filestream,source, 12)<12) + if(ZREAD64(s->z_filefunc, s->filestream,source, 12)<12) return UNZ_INTERNALERROR; for (i = 0; i<12; i++) @@ -1197,28 +1638,39 @@ return UNZ_OK; } -extern int ZEXPORT unzOpenCurrentFile (file) - unzFile file; +extern int ZEXPORT unzOpenCurrentFile (unzFile file) { return unzOpenCurrentFile3(file, NULL, NULL, 0, NULL); } -extern int ZEXPORT unzOpenCurrentFilePassword (file, password) - unzFile file; - const char* password; +extern int ZEXPORT unzOpenCurrentFilePassword (unzFile file, const char* password) { return unzOpenCurrentFile3(file, NULL, NULL, 0, password); } -extern int ZEXPORT unzOpenCurrentFile2 (file,method,level,raw) - unzFile file; - int* method; - int* level; - int raw; +extern int ZEXPORT unzOpenCurrentFile2 (unzFile file, int* method, int* level, int raw) { return unzOpenCurrentFile3(file, method, level, raw, NULL); } +/** Addition for GDAL : START */ + +extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64( unzFile file) +{ + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + s=(unz64_s*)file; + if (file==NULL) + return 0; //UNZ_PARAMERROR; + pfile_in_zip_read_info=s->pfile_in_zip_read; + if (pfile_in_zip_read_info==NULL) + return 0; //UNZ_PARAMERROR; + return pfile_in_zip_read_info->pos_in_zipfile + + pfile_in_zip_read_info->byte_before_the_zipfile; +} + +/** Addition for GDAL : END */ + /* Read bytes from the current file. buf contain buffer where data must be copied @@ -1229,25 +1681,22 @@ return <0 with error code if there is an error (UNZ_ERRNO for IO error, or zLib error for uncompress error) */ -extern int ZEXPORT unzReadCurrentFile (file, buf, len) - unzFile file; - voidp buf; - unsigned len; +extern int ZEXPORT unzReadCurrentFile (unzFile file, voidp buf, unsigned len) { int err=UNZ_OK; uInt iRead = 0; - unz_s* s; - file_in_zip_read_info_s* pfile_in_zip_read_info; + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) return UNZ_PARAMERROR; - if ((pfile_in_zip_read_info->read_buffer == NULL)) + if (pfile_in_zip_read_info->read_buffer == NULL) return UNZ_END_OF_LIST_OF_FILE; if (len==0) return 0; @@ -1278,13 +1727,13 @@ uReadThis = (uInt)pfile_in_zip_read_info->rest_read_compressed; if (uReadThis == 0) return UNZ_EOF; - if (ZSEEK(pfile_in_zip_read_info->z_filefunc, + if (ZSEEK64(pfile_in_zip_read_info->z_filefunc, pfile_in_zip_read_info->filestream, pfile_in_zip_read_info->pos_in_zipfile + pfile_in_zip_read_info->byte_before_the_zipfile, ZLIB_FILEFUNC_SEEK_SET)!=0) return UNZ_ERRNO; - if (ZREAD(pfile_in_zip_read_info->z_filefunc, + if (ZREAD64(pfile_in_zip_read_info->z_filefunc, pfile_in_zip_read_info->filestream, pfile_in_zip_read_info->read_buffer, uReadThis)!=uReadThis) @@ -1330,6 +1779,8 @@ *(pfile_in_zip_read_info->stream.next_out+i) = *(pfile_in_zip_read_info->stream.next_in+i); + pfile_in_zip_read_info->total_out_64 = pfile_in_zip_read_info->total_out_64 + uDoCopy; + pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32, pfile_in_zip_read_info->stream.next_out, uDoCopy); @@ -1341,11 +1792,54 @@ pfile_in_zip_read_info->stream.total_out += uDoCopy; iRead += uDoCopy; } - else + else if (pfile_in_zip_read_info->compression_method==Z_BZIP2ED) { +#ifdef HAVE_BZIP2 uLong uTotalOutBefore,uTotalOutAfter; const Bytef *bufBefore; uLong uOutThis; + + pfile_in_zip_read_info->bstream.next_in = (char*)pfile_in_zip_read_info->stream.next_in; + pfile_in_zip_read_info->bstream.avail_in = pfile_in_zip_read_info->stream.avail_in; + pfile_in_zip_read_info->bstream.total_in_lo32 = pfile_in_zip_read_info->stream.total_in; + pfile_in_zip_read_info->bstream.total_in_hi32 = 0; + pfile_in_zip_read_info->bstream.next_out = (char*)pfile_in_zip_read_info->stream.next_out; + pfile_in_zip_read_info->bstream.avail_out = pfile_in_zip_read_info->stream.avail_out; + pfile_in_zip_read_info->bstream.total_out_lo32 = pfile_in_zip_read_info->stream.total_out; + pfile_in_zip_read_info->bstream.total_out_hi32 = 0; + + uTotalOutBefore = pfile_in_zip_read_info->bstream.total_out_lo32; + bufBefore = (const Bytef *)pfile_in_zip_read_info->bstream.next_out; + + err=BZ2_bzDecompress(&pfile_in_zip_read_info->bstream); + + uTotalOutAfter = pfile_in_zip_read_info->bstream.total_out_lo32; + uOutThis = uTotalOutAfter-uTotalOutBefore; + + pfile_in_zip_read_info->total_out_64 = pfile_in_zip_read_info->total_out_64 + uOutThis; + + pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32,bufBefore, (uInt)(uOutThis)); + pfile_in_zip_read_info->rest_read_uncompressed -= uOutThis; + iRead += (uInt)(uTotalOutAfter - uTotalOutBefore); + + pfile_in_zip_read_info->stream.next_in = (Bytef*)pfile_in_zip_read_info->bstream.next_in; + pfile_in_zip_read_info->stream.avail_in = pfile_in_zip_read_info->bstream.avail_in; + pfile_in_zip_read_info->stream.total_in = pfile_in_zip_read_info->bstream.total_in_lo32; + pfile_in_zip_read_info->stream.next_out = (Bytef*)pfile_in_zip_read_info->bstream.next_out; + pfile_in_zip_read_info->stream.avail_out = pfile_in_zip_read_info->bstream.avail_out; + pfile_in_zip_read_info->stream.total_out = pfile_in_zip_read_info->bstream.total_out_lo32; + + if (err==BZ_STREAM_END) + return (iRead==0) ? UNZ_EOF : iRead; + if (err!=BZ_OK) + break; +#endif + } // end Z_BZIP2ED + else + { + ZPOS64_T uTotalOutBefore,uTotalOutAfter; + const Bytef *bufBefore; + ZPOS64_T uOutThis; int flush=Z_SYNC_FLUSH; uTotalOutBefore = pfile_in_zip_read_info->stream.total_out; @@ -1365,6 +1859,8 @@ uTotalOutAfter = pfile_in_zip_read_info->stream.total_out; uOutThis = uTotalOutAfter-uTotalOutBefore; + pfile_in_zip_read_info->total_out_64 = pfile_in_zip_read_info->total_out_64 + uOutThis; + pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32,bufBefore, (uInt)(uOutThis)); @@ -1390,14 +1886,13 @@ /* Give the current position in uncompressed data */ -extern z_off_t ZEXPORT unztell (file) - unzFile file; +extern z_off_t ZEXPORT unztell (unzFile file) { - unz_s* s; - file_in_zip_read_info_s* pfile_in_zip_read_info; + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) @@ -1406,18 +1901,33 @@ return (z_off_t)pfile_in_zip_read_info->stream.total_out; } +extern ZPOS64_T ZEXPORT unztell64 (unzFile file) +{ + + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + if (file==NULL) + return (ZPOS64_T)-1; + s=(unz64_s*)file; + pfile_in_zip_read_info=s->pfile_in_zip_read; + + if (pfile_in_zip_read_info==NULL) + return (ZPOS64_T)-1; + + return pfile_in_zip_read_info->total_out_64; +} + /* return 1 if the end of file was reached, 0 elsewhere */ -extern int ZEXPORT unzeof (file) - unzFile file; +extern int ZEXPORT unzeof (unzFile file) { - unz_s* s; - file_in_zip_read_info_s* pfile_in_zip_read_info; + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) @@ -1432,9 +1942,9 @@ /* - Read extra field from the current file (opened by unzOpenCurrentFile) - This is the local-header version of the extra field (sometimes, there is - more info in the local-header version than in the central-header) +Read extra field from the current file (opened by unzOpenCurrentFile) +This is the local-header version of the extra field (sometimes, there is +more info in the local-header version than in the central-header) if buf==NULL, it return the size of the local extra field that can be read @@ -1443,19 +1953,16 @@ the return value is the number of bytes copied in buf, or (if <0) the error code */ -extern int ZEXPORT unzGetLocalExtrafield (file,buf,len) - unzFile file; - voidp buf; - unsigned len; +extern int ZEXPORT unzGetLocalExtrafield (unzFile file, voidp buf, unsigned len) { - unz_s* s; - file_in_zip_read_info_s* pfile_in_zip_read_info; + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; uInt read_now; - uLong size_to_read; + ZPOS64_T size_to_read; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) @@ -1475,14 +1982,14 @@ if (read_now==0) return 0; - if (ZSEEK(pfile_in_zip_read_info->z_filefunc, + if (ZSEEK64(pfile_in_zip_read_info->z_filefunc, pfile_in_zip_read_info->filestream, pfile_in_zip_read_info->offset_local_extrafield + pfile_in_zip_read_info->pos_local_extrafield, ZLIB_FILEFUNC_SEEK_SET)!=0) return UNZ_ERRNO; - if (ZREAD(pfile_in_zip_read_info->z_filefunc, + if (ZREAD64(pfile_in_zip_read_info->z_filefunc, pfile_in_zip_read_info->filestream, buf,read_now)!=read_now) return UNZ_ERRNO; @@ -1491,19 +1998,18 @@ } /* - Close the file in zip opened with unzipOpenCurrentFile + Close the file in zip opened with unzOpenCurrentFile Return UNZ_CRCERROR if all the file was read but the CRC is not good */ -extern int ZEXPORT unzCloseCurrentFile (file) - unzFile file; +extern int ZEXPORT unzCloseCurrentFile (unzFile file) { int err=UNZ_OK; - unz_s* s; - file_in_zip_read_info_s* pfile_in_zip_read_info; + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) @@ -1520,8 +2026,13 @@ TRYFREE(pfile_in_zip_read_info->read_buffer); pfile_in_zip_read_info->read_buffer = NULL; - if (pfile_in_zip_read_info->stream_initialised) + if (pfile_in_zip_read_info->stream_initialised == Z_DEFLATED) inflateEnd(&pfile_in_zip_read_info->stream); +#ifdef HAVE_BZIP2 + else if (pfile_in_zip_read_info->stream_initialised == Z_BZIP2ED) + BZ2_bzDecompressEnd(&pfile_in_zip_read_info->bstream); +#endif + pfile_in_zip_read_info->stream_initialised = 0; TRYFREE(pfile_in_zip_read_info); @@ -1537,29 +2048,25 @@ uSizeBuf is the size of the szComment buffer. return the number of byte copied or an error code <0 */ -extern int ZEXPORT unzGetGlobalComment (file, szComment, uSizeBuf) - unzFile file; - char *szComment; - uLong uSizeBuf; +extern int ZEXPORT unzGetGlobalComment (unzFile file, char * szComment, uLong uSizeBuf) { - //int err=UNZ_OK; - unz_s* s; + unz64_s* s; uLong uReadThis ; if (file==NULL) - return UNZ_PARAMERROR; - s=(unz_s*)file; + return (int)UNZ_PARAMERROR; + s=(unz64_s*)file; uReadThis = uSizeBuf; if (uReadThis>s->gi.size_comment) uReadThis = s->gi.size_comment; - if (ZSEEK(s->z_filefunc,s->filestream,s->central_pos+22,ZLIB_FILEFUNC_SEEK_SET)!=0) + if (ZSEEK64(s->z_filefunc,s->filestream,s->central_pos+22,ZLIB_FILEFUNC_SEEK_SET)!=0) return UNZ_ERRNO; if (uReadThis>0) { *szComment='\0'; - if (ZREAD(s->z_filefunc,s->filestream,szComment,uReadThis)!=uReadThis) + if (ZREAD64(s->z_filefunc,s->filestream,szComment,uReadThis)!=uReadThis) return UNZ_ERRNO; } @@ -1569,14 +2076,13 @@ } /* Additions by RX '2004 */ -extern uLong ZEXPORT unzGetOffset (file) - unzFile file; +extern ZPOS64_T ZEXPORT unzGetOffset64(unzFile file) { - unz_s* s; + unz64_s* s; if (file==NULL) - return UNZ_PARAMERROR; - s=(unz_s*)file; + return 0; //UNZ_PARAMERROR; + s=(unz64_s*)file; if (!s->current_file_ok) return 0; if (s->gi.number_entry != 0 && s->gi.number_entry != 0xffff) @@ -1585,22 +2091,35 @@ return s->pos_in_central_dir; } -extern int ZEXPORT unzSetOffset (file, pos) - unzFile file; - uLong pos; +extern uLong ZEXPORT unzGetOffset (unzFile file) +{ + ZPOS64_T offset64; + + if (file==NULL) + return 0; //UNZ_PARAMERROR; + offset64 = unzGetOffset64(file); + return (uLong)offset64; +} + +extern int ZEXPORT unzSetOffset64(unzFile file, ZPOS64_T pos) { - unz_s* s; + unz64_s* s; int err; if (file==NULL) return UNZ_PARAMERROR; - s=(unz_s*)file; + s=(unz64_s*)file; s->pos_in_central_dir = pos; s->num_file = s->gi.number_entry; /* hack */ - err = unzlocal_GetCurrentFileInfoInternal(file,&s->cur_file_info, + err = unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, &s->cur_file_info_internal, NULL,0,NULL,0,NULL,0); s->current_file_ok = (err == UNZ_OK); return err; } + +extern int ZEXPORT unzSetOffset (unzFile file, uLong pos) +{ + return unzSetOffset64(file,pos); +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/unzip.h nodejs-0.11.15/deps/zlib/contrib/minizip/unzip.h --- nodejs-0.11.13/deps/zlib/contrib/minizip/unzip.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/unzip.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,20 +1,20 @@ /* unzip.h -- IO for uncompress .zip files using zlib - Version 1.01e, February 12th, 2005 + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) - Copyright (C) 1998-2005 Gilles Vollant + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) - This unzip package allow extract file from .ZIP file, compatible with PKZip 2.04g - WinZip, InfoZip tools and compatible. + Modifications of Unzip for Zip64 + Copyright (C) 2007-2008 Even Rouault - Multi volume ZipFile (span) are not supported. - Encryption compatible with pkzip 2.04g only supported - Old compressions used by old PKZip 1.x are not supported + Modifications for Zip64 support on both zip and unzip + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + For more info read MiniZip_info.txt - I WAIT FEEDBACK at mail info@winimage.com - Visit also http://www.winimage.com/zLibDll/unzip.htm for evolution + --------------------------------------------------------------------------------- - Condition of use and distribution are the same than zlib : + Condition of use and distribution are the same than zlib : This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages @@ -32,33 +32,35 @@ misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. + --------------------------------------------------------------------------------- -*/ + Changes + + See header of unzip64.c -/* for more info about .ZIP format, see - http://www.info-zip.org/pub/infozip/doc/appnote-981119-iz.zip - http://www.info-zip.org/pub/infozip/doc/ - PkWare has also a specification at : - ftp://ftp.pkware.com/probdesc.zip */ -#ifndef _unz_H -#define _unz_H +#ifndef _unz64_H +#define _unz64_H #ifdef __cplusplus extern "C" { #endif -#if defined(USE_SYSTEM_ZLIB) -#include <zlib.h> -#else +#ifndef _ZLIB_H #include "zlib.h" #endif -#ifndef _ZLIBIOAPI_H +#ifndef _ZLIBIOAPI_H #include "ioapi.h" #endif +#ifdef HAVE_BZIP2 +#include "bzlib.h" +#endif + +#define Z_BZIP2ED 12 + #if defined(STRICTUNZIP) || defined(STRICTZIPUNZIP) /* like the STRICT of WIN32, we define a pointer that cannot be converted from (void*) without cast */ @@ -91,15 +93,42 @@ /* unz_global_info structure contain global data about the ZIPfile These data comes from the end of central dir */ +typedef struct unz_global_info64_s +{ + ZPOS64_T number_entry; /* total number of entries in + the central dir on this disk */ + uLong size_comment; /* size of the global comment of the zipfile */ +} unz_global_info64; + typedef struct unz_global_info_s { uLong number_entry; /* total number of entries in - the central dir on this disk */ + the central dir on this disk */ uLong size_comment; /* size of the global comment of the zipfile */ } unz_global_info; - /* unz_file_info contain information about a file in the zipfile */ +typedef struct unz_file_info64_s +{ + uLong version; /* version made by 2 bytes */ + uLong version_needed; /* version needed to extract 2 bytes */ + uLong flag; /* general purpose bit flag 2 bytes */ + uLong compression_method; /* compression method 2 bytes */ + uLong dosDate; /* last mod file date in Dos fmt 4 bytes */ + uLong crc; /* crc-32 4 bytes */ + ZPOS64_T compressed_size; /* compressed size 8 bytes */ + ZPOS64_T uncompressed_size; /* uncompressed size 8 bytes */ + uLong size_filename; /* filename length 2 bytes */ + uLong size_file_extra; /* extra field length 2 bytes */ + uLong size_file_comment; /* file comment length 2 bytes */ + + uLong disk_num_start; /* disk number start 2 bytes */ + uLong internal_fa; /* internal file attributes 2 bytes */ + uLong external_fa; /* external file attributes 4 bytes */ + + tm_unz tmu_date; +} unz_file_info64; + typedef struct unz_file_info_s { uLong version; /* version made by 2 bytes */ @@ -135,6 +164,7 @@ extern unzFile ZEXPORT unzOpen OF((const char *path)); +extern unzFile ZEXPORT unzOpen64 OF((const void *path)); /* Open a Zip file. path contain the full pathname (by example, on a Windows XP computer "c:\\zlib\\zlib113.zip" or on an Unix computer @@ -143,8 +173,14 @@ return value is NULL. Else, the return value is a unzFile Handle, usable with other function of this unzip package. + the "64" function take a const void* pointer, because the path is just the + value passed to the open64_file_func callback. + Under Windows, if UNICODE is defined, using fill_fopen64_filefunc, the path + is a pointer to a wide unicode string (LPCTSTR is LPCWSTR), so const char* + does not describe the reality */ + extern unzFile ZEXPORT unzOpen2 OF((const char *path, zlib_filefunc_def* pzlib_filefunc_def)); /* @@ -152,15 +188,25 @@ for read/write the zip file (see ioapi.h) */ +extern unzFile ZEXPORT unzOpen2_64 OF((const void *path, + zlib_filefunc64_def* pzlib_filefunc_def)); +/* + Open a Zip file, like unz64Open, but provide a set of file low level API + for read/write the zip file (see ioapi.h) +*/ + extern int ZEXPORT unzClose OF((unzFile file)); /* - Close a ZipFile opened with unzipOpen. + Close a ZipFile opened with unzOpen. If there is files inside the .Zip opened with unzOpenCurrentFile (see later), - these files MUST be closed with unzipCloseCurrentFile before call unzipClose. + these files MUST be closed with unzCloseCurrentFile before call unzClose. return UNZ_OK if there is no problem. */ extern int ZEXPORT unzGetGlobalInfo OF((unzFile file, unz_global_info *pglobal_info)); + +extern int ZEXPORT unzGetGlobalInfo64 OF((unzFile file, + unz_global_info64 *pglobal_info)); /* Write info about the ZipFile in the *pglobal_info structure. No preparation of the structure is needed @@ -223,8 +269,31 @@ unzFile file, unz_file_pos* file_pos); +typedef struct unz64_file_pos_s +{ + ZPOS64_T pos_in_zip_directory; /* offset in zip file directory */ + ZPOS64_T num_of_file; /* # of file */ +} unz64_file_pos; + +extern int ZEXPORT unzGetFilePos64( + unzFile file, + unz64_file_pos* file_pos); + +extern int ZEXPORT unzGoToFilePos64( + unzFile file, + const unz64_file_pos* file_pos); + /* ****************************************** */ +extern int ZEXPORT unzGetCurrentFileInfo64 OF((unzFile file, + unz_file_info64 *pfile_info, + char *szFileName, + uLong fileNameBufferSize, + void *extraField, + uLong extraFieldBufferSize, + char *szComment, + uLong commentBufferSize)); + extern int ZEXPORT unzGetCurrentFileInfo OF((unzFile file, unz_file_info *pfile_info, char *szFileName, @@ -246,6 +315,14 @@ (commentBufferSize is the size of the buffer) */ + +/** Addition for GDAL : START */ + +extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64 OF((unzFile file)); + +/** Addition for GDAL : END */ + + /***************************************************************************/ /* for reading the content of the current zipfile, you can open it, read data from it, and close it (you can close it before reading all the file) @@ -314,6 +391,8 @@ */ extern z_off_t ZEXPORT unztell OF((unzFile file)); + +extern ZPOS64_T ZEXPORT unztell64 OF((unzFile file)); /* Give the current position in uncompressed data */ @@ -342,9 +421,11 @@ /***************************************************************************/ /* Get the current file offset */ +extern ZPOS64_T ZEXPORT unzGetOffset64 (unzFile file); extern uLong ZEXPORT unzGetOffset (unzFile file); /* Set the current file offset */ +extern int ZEXPORT unzSetOffset64 (unzFile file, ZPOS64_T pos); extern int ZEXPORT unzSetOffset (unzFile file, uLong pos); @@ -353,4 +434,4 @@ } #endif -#endif /* _unz_H */ +#endif /* _unz64_H */ diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/zip.c nodejs-0.11.15/deps/zlib/contrib/minizip/zip.c --- nodejs-0.11.13/deps/zlib/contrib/minizip/zip.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/zip.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,12 +1,24 @@ /* zip.c -- IO on .zip files using zlib - Version 1.01e, February 12th, 2005 + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) - 27 Dec 2004 Rolf Kalbermatter - Modification to zipOpen2 to support globalComment retrieval. + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) - Copyright (C) 1998-2005 Gilles Vollant + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + + Changes + Oct-2009 - Mathias Svensson - Remove old C style function prototypes + Oct-2009 - Mathias Svensson - Added Zip64 Support when creating new file archives + Oct-2009 - Mathias Svensson - Did some code cleanup and refactoring to get better overview of some functions. + Oct-2009 - Mathias Svensson - Added zipRemoveExtraInfoBlock to strip extra field data from its ZIP64 data + It is used when recreting zip archive with RAW when deleting items from a zip. + ZIP64 data is automaticly added to items that needs it, and existing ZIP64 data need to be removed. + Oct-2009 - Mathias Svensson - Added support for BZIP2 as compression mode (bzip2 lib is required) + Jan-2010 - back to unzip and minizip 1.0 name scheme, with compatibility layer - Read zip.h for more info */ @@ -14,11 +26,7 @@ #include <stdlib.h> #include <string.h> #include <time.h> -#if defined(USE_SYSTEM_ZLIB) -#include <zlib.h> -#else #include "zlib.h" -#endif #include "zip.h" #ifdef STDC @@ -43,7 +51,7 @@ #endif #ifndef Z_BUFSIZE -#define Z_BUFSIZE (16384) +#define Z_BUFSIZE (64*1024) //(16384) #endif #ifndef Z_MAXFILENAMEINZIP @@ -64,6 +72,10 @@ /* I've found an old Unix (a SunOS 4.1.3_U1) without all SEEK_* defined.... */ + +// NOT sure that this work on ALL platform +#define MAKEULONG64(a, b) ((ZPOS64_T)(((unsigned long)(a)) | ((ZPOS64_T)((unsigned long)(b))) << 32)) + #ifndef SEEK_CUR #define SEEK_CUR 1 #endif @@ -83,8 +95,7 @@ # define DEF_MEM_LEVEL MAX_MEM_LEVEL #endif #endif -const char zip_copyright[] = - " zip 1.01 Copyright 1998-2004 Gilles Vollant - http://www.winimage.com/zLibDll"; +const char zip_copyright[] =" zip 1.01 Copyright 1998-2004 Gilles Vollant - http://www.winimage.com/zLibDll"; #define SIZEDATA_INDATABLOCK (4096-(4*4)) @@ -92,6 +103,8 @@ #define LOCALHEADERMAGIC (0x04034b50) #define CENTRALHEADERMAGIC (0x02014b50) #define ENDHEADERMAGIC (0x06054b50) +#define ZIP64ENDHEADERMAGIC (0x6064b50) +#define ZIP64ENDLOCHEADERMAGIC (0x7064b50) #define FLAG_LOCALHEADER_OFFSET (0x06) #define CRC_LOCALHEADER_OFFSET (0x0e) @@ -117,13 +130,19 @@ typedef struct { z_stream stream; /* zLib stream structure for inflate */ +#ifdef HAVE_BZIP2 + bz_stream bstream; /* bzLib stream structure for bziped */ +#endif + int stream_initialised; /* 1 is stream is initialised */ uInt pos_in_buffered_data; /* last written byte in buffered_data */ - uLong pos_local_header; /* offset of the local header of the file + ZPOS64_T pos_local_header; /* offset of the local header of the file currenty writing */ char* central_header; /* central header data for the current file */ + uLong size_centralExtra; uLong size_centralheader; /* size of the central header for cur file */ + uLong size_centralExtraFree; /* Extra bytes allocated to the centralheader but that are not used */ uLong flag; /* flag of the file currently writing */ int method; /* compression method of file currenty wr.*/ @@ -132,29 +151,34 @@ uLong dosDate; uLong crc32; int encrypt; + int zip64; /* Add ZIP64 extened information in the extra field */ + ZPOS64_T pos_zip64extrainfo; + ZPOS64_T totalCompressedData; + ZPOS64_T totalUncompressedData; #ifndef NOCRYPT unsigned long keys[3]; /* keys defining the pseudo-random sequence */ - const unsigned long* pcrc_32_tab; + const z_crc_t* pcrc_32_tab; int crypt_header_size; #endif -} curfile_info; +} curfile64_info; typedef struct { - zlib_filefunc_def z_filefunc; + zlib_filefunc64_32_def z_filefunc; voidpf filestream; /* io structore of the zipfile */ linkedlist_data central_dir;/* datablock with central dir in construction*/ int in_opened_file_inzip; /* 1 if a file in the zip is currently writ.*/ - curfile_info ci; /* info on the file curretly writing */ + curfile64_info ci; /* info on the file curretly writing */ + + ZPOS64_T begin_pos; /* position of the beginning of the zipfile */ + ZPOS64_T add_position_when_writting_offset; + ZPOS64_T number_entry; - uLong begin_pos; /* position of the beginning of the zipfile */ - uLong add_position_when_writting_offset; - uLong number_entry; #ifndef NO_ADDFILEINEXISTINGZIP char *globalcomment; #endif -} zip_internal; +} zip64_internal; #ifndef NOCRYPT @@ -176,8 +200,7 @@ return ldi; } -local void free_datablock(ldi) - linkedlist_datablock_internal* ldi; +local void free_datablock(linkedlist_datablock_internal* ldi) { while (ldi!=NULL) { @@ -187,24 +210,19 @@ } } -local void init_linkedlist(ll) - linkedlist_data* ll; +local void init_linkedlist(linkedlist_data* ll) { ll->first_block = ll->last_block = NULL; } -local void free_linkedlist(ll) - linkedlist_data* ll; +local void free_linkedlist(linkedlist_data* ll) { free_datablock(ll->first_block); ll->first_block = ll->last_block = NULL; } -local int add_data_in_datablock(ll,buf,len) - linkedlist_data* ll; - const void* buf; - uLong len; +local int add_data_in_datablock(linkedlist_data* ll, const void* buf, uLong len) { linkedlist_datablock_internal* ldi; const unsigned char* from_copy; @@ -262,18 +280,13 @@ #ifndef NO_ADDFILEINEXISTINGZIP /* =========================================================================== Inputs a long in LSB order to the given file - nbByte == 1, 2 or 4 (byte, short or long) + nbByte == 1, 2 ,4 or 8 (byte, short or long, ZPOS64_T) */ -local int ziplocal_putValue OF((const zlib_filefunc_def* pzlib_filefunc_def, - voidpf filestream, uLong x, int nbByte)); -local int ziplocal_putValue (pzlib_filefunc_def, filestream, x, nbByte) - const zlib_filefunc_def* pzlib_filefunc_def; - voidpf filestream; - uLong x; - int nbByte; +local int zip64local_putValue OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T x, int nbByte)); +local int zip64local_putValue (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T x, int nbByte) { - unsigned char buf[4]; + unsigned char buf[8]; int n; for (n = 0; n < nbByte; n++) { @@ -288,17 +301,14 @@ } } - if (ZWRITE(*pzlib_filefunc_def,filestream,buf,nbByte)!=(uLong)nbByte) + if (ZWRITE64(*pzlib_filefunc_def,filestream,buf,nbByte)!=(uLong)nbByte) return ZIP_ERRNO; else return ZIP_OK; } -local void ziplocal_putValue_inmemory OF((void* dest, uLong x, int nbByte)); -local void ziplocal_putValue_inmemory (dest, x, nbByte) - void* dest; - uLong x; - int nbByte; +local void zip64local_putValue_inmemory OF((void* dest, ZPOS64_T x, int nbByte)); +local void zip64local_putValue_inmemory (void* dest, ZPOS64_T x, int nbByte) { unsigned char* buf=(unsigned char*)dest; int n; @@ -319,14 +329,12 @@ /****************************************************************************/ -local uLong ziplocal_TmzDateToDosDate(ptm,dosDate) - const tm_zip* ptm; - uLong dosDate; +local uLong zip64local_TmzDateToDosDate(const tm_zip* ptm) { uLong year = (uLong)ptm->tm_year; - if (year>1980) + if (year>=1980) year-=1980; - else if (year>80) + else if (year>=80) year-=80; return (uLong) (((ptm->tm_mday) + (32 * (ptm->tm_mon+1)) + (512 * year)) << 16) | @@ -336,18 +344,12 @@ /****************************************************************************/ -local int ziplocal_getByte OF(( - const zlib_filefunc_def* pzlib_filefunc_def, - voidpf filestream, - int *pi)); - -local int ziplocal_getByte(pzlib_filefunc_def,filestream,pi) - const zlib_filefunc_def* pzlib_filefunc_def; - voidpf filestream; - int *pi; +local int zip64local_getByte OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, int *pi)); + +local int zip64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def,voidpf filestream,int* pi) { unsigned char c; - int err = (int)ZREAD(*pzlib_filefunc_def,filestream,&c,1); + int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,&c,1); if (err==1) { *pi = (int)c; @@ -355,7 +357,7 @@ } else { - if (ZERROR(*pzlib_filefunc_def,filestream)) + if (ZERROR64(*pzlib_filefunc_def,filestream)) return ZIP_ERRNO; else return ZIP_EOF; @@ -366,25 +368,19 @@ /* =========================================================================== Reads a long in LSB order from the given gz_stream. Sets */ -local int ziplocal_getShort OF(( - const zlib_filefunc_def* pzlib_filefunc_def, - voidpf filestream, - uLong *pX)); - -local int ziplocal_getShort (pzlib_filefunc_def,filestream,pX) - const zlib_filefunc_def* pzlib_filefunc_def; - voidpf filestream; - uLong *pX; +local int zip64local_getShort OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX)); + +local int zip64local_getShort (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong* pX) { uLong x ; - int i; + int i = 0; int err; - err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i); + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); x = (uLong)i; if (err==ZIP_OK) - err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i); + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); x += ((uLong)i)<<8; if (err==ZIP_OK) @@ -394,33 +390,27 @@ return err; } -local int ziplocal_getLong OF(( - const zlib_filefunc_def* pzlib_filefunc_def, - voidpf filestream, - uLong *pX)); - -local int ziplocal_getLong (pzlib_filefunc_def,filestream,pX) - const zlib_filefunc_def* pzlib_filefunc_def; - voidpf filestream; - uLong *pX; +local int zip64local_getLong OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX)); + +local int zip64local_getLong (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong* pX) { uLong x ; - int i; + int i = 0; int err; - err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i); + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); x = (uLong)i; if (err==ZIP_OK) - err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i); + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); x += ((uLong)i)<<8; if (err==ZIP_OK) - err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i); + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); x += ((uLong)i)<<16; if (err==ZIP_OK) - err = ziplocal_getByte(pzlib_filefunc_def,filestream,&i); + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); x += ((uLong)i)<<24; if (err==ZIP_OK) @@ -430,6 +420,54 @@ return err; } +local int zip64local_getLong64 OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T *pX)); + + +local int zip64local_getLong64 (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T *pX) +{ + ZPOS64_T x; + int i = 0; + int err; + + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x = (ZPOS64_T)i; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<8; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<16; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<24; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<32; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<40; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<48; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<56; + + if (err==ZIP_OK) + *pX = x; + else + *pX = 0; + + return err; +} + #ifndef BUFREADCOMMENT #define BUFREADCOMMENT (0x400) #endif @@ -437,87 +475,391 @@ Locate the Central directory of a zipfile (at the end, just before the global comment) */ -local uLong ziplocal_SearchCentralDir OF(( - const zlib_filefunc_def* pzlib_filefunc_def, - voidpf filestream)); +local ZPOS64_T zip64local_SearchCentralDir OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); -local uLong ziplocal_SearchCentralDir(pzlib_filefunc_def,filestream) - const zlib_filefunc_def* pzlib_filefunc_def; - voidpf filestream; +local ZPOS64_T zip64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) { - unsigned char* buf; - uLong uSizeFile; - uLong uBackRead; - uLong uMaxBack=0xffff; /* maximum size of global comment */ - uLong uPosFound=0; + unsigned char* buf; + ZPOS64_T uSizeFile; + ZPOS64_T uBackRead; + ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ + ZPOS64_T uPosFound=0; + + if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) + return 0; + + + uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); + + if (uMaxBack>uSizeFile) + uMaxBack = uSizeFile; + + buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); + if (buf==NULL) + return 0; + + uBackRead = 4; + while (uBackRead<uMaxBack) + { + uLong uReadSize; + ZPOS64_T uReadPos ; + int i; + if (uBackRead+BUFREADCOMMENT>uMaxBack) + uBackRead = uMaxBack; + else + uBackRead+=BUFREADCOMMENT; + uReadPos = uSizeFile-uBackRead ; + + uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? + (BUFREADCOMMENT+4) : (uLong)(uSizeFile-uReadPos); + if (ZSEEK64(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) + break; + + if (ZREAD64(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) + break; + + for (i=(int)uReadSize-3; (i--)>0;) + if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && + ((*(buf+i+2))==0x05) && ((*(buf+i+3))==0x06)) + { + uPosFound = uReadPos+i; + break; + } + + if (uPosFound!=0) + break; + } + TRYFREE(buf); + return uPosFound; +} + +/* +Locate the End of Zip64 Central directory locator and from there find the CD of a zipfile (at the end, just before +the global comment) +*/ +local ZPOS64_T zip64local_SearchCentralDir64 OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); + +local ZPOS64_T zip64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) +{ + unsigned char* buf; + ZPOS64_T uSizeFile; + ZPOS64_T uBackRead; + ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ + ZPOS64_T uPosFound=0; + uLong uL; + ZPOS64_T relativeOffset; + + if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) + return 0; + + uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); + + if (uMaxBack>uSizeFile) + uMaxBack = uSizeFile; + + buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); + if (buf==NULL) + return 0; + + uBackRead = 4; + while (uBackRead<uMaxBack) + { + uLong uReadSize; + ZPOS64_T uReadPos; + int i; + if (uBackRead+BUFREADCOMMENT>uMaxBack) + uBackRead = uMaxBack; + else + uBackRead+=BUFREADCOMMENT; + uReadPos = uSizeFile-uBackRead ; + + uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? + (BUFREADCOMMENT+4) : (uLong)(uSizeFile-uReadPos); + if (ZSEEK64(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) + break; + + if (ZREAD64(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) + break; + + for (i=(int)uReadSize-3; (i--)>0;) + { + // Signature "0x07064b50" Zip64 end of central directory locater + if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && ((*(buf+i+2))==0x06) && ((*(buf+i+3))==0x07)) + { + uPosFound = uReadPos+i; + break; + } + } - if (ZSEEK(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) - return 0; + if (uPosFound!=0) + break; + } + + TRYFREE(buf); + if (uPosFound == 0) + return 0; + + /* Zip64 end of central directory locator */ + if (ZSEEK64(*pzlib_filefunc_def,filestream, uPosFound,ZLIB_FILEFUNC_SEEK_SET)!=0) + return 0; + + /* the signature, already checked */ + if (zip64local_getLong(pzlib_filefunc_def,filestream,&uL)!=ZIP_OK) + return 0; + + /* number of the disk with the start of the zip64 end of central directory */ + if (zip64local_getLong(pzlib_filefunc_def,filestream,&uL)!=ZIP_OK) + return 0; + if (uL != 0) + return 0; + + /* relative offset of the zip64 end of central directory record */ + if (zip64local_getLong64(pzlib_filefunc_def,filestream,&relativeOffset)!=ZIP_OK) + return 0; + + /* total number of disks */ + if (zip64local_getLong(pzlib_filefunc_def,filestream,&uL)!=ZIP_OK) + return 0; + if (uL != 1) + return 0; + + /* Goto Zip64 end of central directory record */ + if (ZSEEK64(*pzlib_filefunc_def,filestream, relativeOffset,ZLIB_FILEFUNC_SEEK_SET)!=0) + return 0; + + /* the signature */ + if (zip64local_getLong(pzlib_filefunc_def,filestream,&uL)!=ZIP_OK) + return 0; + + if (uL != 0x06064b50) // signature of 'Zip64 end of central directory' + return 0; + + return relativeOffset; +} + +int LoadCentralDirectoryRecord(zip64_internal* pziinit) +{ + int err=ZIP_OK; + ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ + + ZPOS64_T size_central_dir; /* size of the central directory */ + ZPOS64_T offset_central_dir; /* offset of start of central directory */ + ZPOS64_T central_pos; + uLong uL; + + uLong number_disk; /* number of the current dist, used for + spaning ZIP, unsupported, always 0*/ + uLong number_disk_with_CD; /* number the the disk with central dir, used + for spaning ZIP, unsupported, always 0*/ + ZPOS64_T number_entry; + ZPOS64_T number_entry_CD; /* total number of entries in + the central dir + (same than number_entry on nospan) */ + uLong VersionMadeBy; + uLong VersionNeeded; + uLong size_comment; + + int hasZIP64Record = 0; + + // check first if we find a ZIP64 record + central_pos = zip64local_SearchCentralDir64(&pziinit->z_filefunc,pziinit->filestream); + if(central_pos > 0) + { + hasZIP64Record = 1; + } + else if(central_pos == 0) + { + central_pos = zip64local_SearchCentralDir(&pziinit->z_filefunc,pziinit->filestream); + } +/* disable to allow appending to empty ZIP archive + if (central_pos==0) + err=ZIP_ERRNO; +*/ + + if(hasZIP64Record) + { + ZPOS64_T sizeEndOfCentralDirectory; + if (ZSEEK64(pziinit->z_filefunc, pziinit->filestream, central_pos, ZLIB_FILEFUNC_SEEK_SET) != 0) + err=ZIP_ERRNO; + + /* the signature, already checked */ + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream,&uL)!=ZIP_OK) + err=ZIP_ERRNO; + + /* size of zip64 end of central directory record */ + if (zip64local_getLong64(&pziinit->z_filefunc, pziinit->filestream, &sizeEndOfCentralDirectory)!=ZIP_OK) + err=ZIP_ERRNO; + + /* version made by */ + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream, &VersionMadeBy)!=ZIP_OK) + err=ZIP_ERRNO; + + /* version needed to extract */ + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream, &VersionNeeded)!=ZIP_OK) + err=ZIP_ERRNO; + + /* number of this disk */ + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream,&number_disk)!=ZIP_OK) + err=ZIP_ERRNO; + + /* number of the disk with the start of the central directory */ + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream,&number_disk_with_CD)!=ZIP_OK) + err=ZIP_ERRNO; + + /* total number of entries in the central directory on this disk */ + if (zip64local_getLong64(&pziinit->z_filefunc, pziinit->filestream, &number_entry)!=ZIP_OK) + err=ZIP_ERRNO; + + /* total number of entries in the central directory */ + if (zip64local_getLong64(&pziinit->z_filefunc, pziinit->filestream,&number_entry_CD)!=ZIP_OK) + err=ZIP_ERRNO; + + if ((number_entry_CD!=number_entry) || (number_disk_with_CD!=0) || (number_disk!=0)) + err=ZIP_BADZIPFILE; + + /* size of the central directory */ + if (zip64local_getLong64(&pziinit->z_filefunc, pziinit->filestream,&size_central_dir)!=ZIP_OK) + err=ZIP_ERRNO; + + /* offset of start of central directory with respect to the + starting disk number */ + if (zip64local_getLong64(&pziinit->z_filefunc, pziinit->filestream,&offset_central_dir)!=ZIP_OK) + err=ZIP_ERRNO; + + // TODO.. + // read the comment from the standard central header. + size_comment = 0; + } + else + { + // Read End of central Directory info + if (ZSEEK64(pziinit->z_filefunc, pziinit->filestream, central_pos,ZLIB_FILEFUNC_SEEK_SET)!=0) + err=ZIP_ERRNO; + + /* the signature, already checked */ + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream,&uL)!=ZIP_OK) + err=ZIP_ERRNO; + + /* number of this disk */ + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream,&number_disk)!=ZIP_OK) + err=ZIP_ERRNO; + + /* number of the disk with the start of the central directory */ + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream,&number_disk_with_CD)!=ZIP_OK) + err=ZIP_ERRNO; + + /* total number of entries in the central dir on this disk */ + number_entry = 0; + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream, &uL)!=ZIP_OK) + err=ZIP_ERRNO; + else + number_entry = uL; - uSizeFile = ZTELL(*pzlib_filefunc_def,filestream); + /* total number of entries in the central dir */ + number_entry_CD = 0; + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream, &uL)!=ZIP_OK) + err=ZIP_ERRNO; + else + number_entry_CD = uL; + + if ((number_entry_CD!=number_entry) || (number_disk_with_CD!=0) || (number_disk!=0)) + err=ZIP_BADZIPFILE; + + /* size of the central directory */ + size_central_dir = 0; + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream, &uL)!=ZIP_OK) + err=ZIP_ERRNO; + else + size_central_dir = uL; + + /* offset of start of central directory with respect to the starting disk number */ + offset_central_dir = 0; + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream, &uL)!=ZIP_OK) + err=ZIP_ERRNO; + else + offset_central_dir = uL; - if (uMaxBack>uSizeFile) - uMaxBack = uSizeFile; - buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); - if (buf==NULL) - return 0; + /* zipfile global comment length */ + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream, &size_comment)!=ZIP_OK) + err=ZIP_ERRNO; + } - uBackRead = 4; - while (uBackRead<uMaxBack) + if ((central_pos<offset_central_dir+size_central_dir) && + (err==ZIP_OK)) + err=ZIP_BADZIPFILE; + + if (err!=ZIP_OK) + { + ZCLOSE64(pziinit->z_filefunc, pziinit->filestream); + return ZIP_ERRNO; + } + + if (size_comment>0) + { + pziinit->globalcomment = (char*)ALLOC(size_comment+1); + if (pziinit->globalcomment) { - uLong uReadSize,uReadPos ; - int i; - if (uBackRead+BUFREADCOMMENT>uMaxBack) - uBackRead = uMaxBack; - else - uBackRead+=BUFREADCOMMENT; - uReadPos = uSizeFile-uBackRead ; + size_comment = ZREAD64(pziinit->z_filefunc, pziinit->filestream, pziinit->globalcomment,size_comment); + pziinit->globalcomment[size_comment]=0; + } + } - uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? - (BUFREADCOMMENT+4) : (uSizeFile-uReadPos); - if (ZSEEK(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) - break; - - if (ZREAD(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) - break; - - for (i=(int)uReadSize-3; (i--)>0;) - if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && - ((*(buf+i+2))==0x05) && ((*(buf+i+3))==0x06)) - { - uPosFound = uReadPos+i; - break; - } + byte_before_the_zipfile = central_pos - (offset_central_dir+size_central_dir); + pziinit->add_position_when_writting_offset = byte_before_the_zipfile; - if (uPosFound!=0) - break; + { + ZPOS64_T size_central_dir_to_read = size_central_dir; + size_t buf_size = SIZEDATA_INDATABLOCK; + void* buf_read = (void*)ALLOC(buf_size); + if (ZSEEK64(pziinit->z_filefunc, pziinit->filestream, offset_central_dir + byte_before_the_zipfile, ZLIB_FILEFUNC_SEEK_SET) != 0) + err=ZIP_ERRNO; + + while ((size_central_dir_to_read>0) && (err==ZIP_OK)) + { + ZPOS64_T read_this = SIZEDATA_INDATABLOCK; + if (read_this > size_central_dir_to_read) + read_this = size_central_dir_to_read; + + if (ZREAD64(pziinit->z_filefunc, pziinit->filestream,buf_read,(uLong)read_this) != read_this) + err=ZIP_ERRNO; + + if (err==ZIP_OK) + err = add_data_in_datablock(&pziinit->central_dir,buf_read, (uLong)read_this); + + size_central_dir_to_read-=read_this; } - TRYFREE(buf); - return uPosFound; + TRYFREE(buf_read); + } + pziinit->begin_pos = byte_before_the_zipfile; + pziinit->number_entry = number_entry_CD; + + if (ZSEEK64(pziinit->z_filefunc, pziinit->filestream, offset_central_dir+byte_before_the_zipfile,ZLIB_FILEFUNC_SEEK_SET) != 0) + err=ZIP_ERRNO; + + return err; } + + #endif /* !NO_ADDFILEINEXISTINGZIP*/ + /************************************************************/ -extern zipFile ZEXPORT zipOpen2 (pathname, append, globalcomment, pzlib_filefunc_def) - const char *pathname; - int append; - zipcharpc* globalcomment; - zlib_filefunc_def* pzlib_filefunc_def; +extern zipFile ZEXPORT zipOpen3 (const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_32_def* pzlib_filefunc64_32_def) { - zip_internal ziinit; - zip_internal* zi; + zip64_internal ziinit; + zip64_internal* zi; int err=ZIP_OK; - - if (pzlib_filefunc_def==NULL) - fill_fopen_filefunc(&ziinit.z_filefunc); + ziinit.z_filefunc.zseek32_file = NULL; + ziinit.z_filefunc.ztell32_file = NULL; + if (pzlib_filefunc64_32_def==NULL) + fill_fopen64_filefunc(&ziinit.z_filefunc.zfile_func64); else - ziinit.z_filefunc = *pzlib_filefunc_def; + ziinit.z_filefunc = *pzlib_filefunc64_32_def; - ziinit.filestream = (*(ziinit.z_filefunc.zopen_file)) - (ziinit.z_filefunc.opaque, + ziinit.filestream = ZOPEN64(ziinit.z_filefunc, pathname, (append == APPEND_STATUS_CREATE) ? (ZLIB_FILEFUNC_MODE_READ | ZLIB_FILEFUNC_MODE_WRITE | ZLIB_FILEFUNC_MODE_CREATE) : @@ -525,7 +867,11 @@ if (ziinit.filestream == NULL) return NULL; - ziinit.begin_pos = ZTELL(ziinit.z_filefunc,ziinit.filestream); + + if (append == APPEND_STATUS_CREATEAFTER) + ZSEEK64(ziinit.z_filefunc,ziinit.filestream,0,SEEK_END); + + ziinit.begin_pos = ZTELL64(ziinit.z_filefunc,ziinit.filestream); ziinit.in_opened_file_inzip = 0; ziinit.ci.stream_initialised = 0; ziinit.number_entry = 0; @@ -533,10 +879,11 @@ init_linkedlist(&(ziinit.central_dir)); - zi = (zip_internal*)ALLOC(sizeof(zip_internal)); + + zi = (zip64_internal*)ALLOC(sizeof(zip64_internal)); if (zi==NULL) { - ZCLOSE(ziinit.z_filefunc,ziinit.filestream); + ZCLOSE64(ziinit.z_filefunc,ziinit.filestream); return NULL; } @@ -545,122 +892,8 @@ ziinit.globalcomment = NULL; if (append == APPEND_STATUS_ADDINZIP) { - uLong byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ - - uLong size_central_dir; /* size of the central directory */ - uLong offset_central_dir; /* offset of start of central directory */ - uLong central_pos,uL; - - uLong number_disk; /* number of the current dist, used for - spaning ZIP, unsupported, always 0*/ - uLong number_disk_with_CD; /* number the the disk with central dir, used - for spaning ZIP, unsupported, always 0*/ - uLong number_entry; - uLong number_entry_CD; /* total number of entries in - the central dir - (same than number_entry on nospan) */ - uLong size_comment; - - central_pos = ziplocal_SearchCentralDir(&ziinit.z_filefunc,ziinit.filestream); - if (central_pos==0) - err=ZIP_ERRNO; - - if (ZSEEK(ziinit.z_filefunc, ziinit.filestream, - central_pos,ZLIB_FILEFUNC_SEEK_SET)!=0) - err=ZIP_ERRNO; - - /* the signature, already checked */ - if (ziplocal_getLong(&ziinit.z_filefunc, ziinit.filestream,&uL)!=ZIP_OK) - err=ZIP_ERRNO; - - /* number of this disk */ - if (ziplocal_getShort(&ziinit.z_filefunc, ziinit.filestream,&number_disk)!=ZIP_OK) - err=ZIP_ERRNO; - - /* number of the disk with the start of the central directory */ - if (ziplocal_getShort(&ziinit.z_filefunc, ziinit.filestream,&number_disk_with_CD)!=ZIP_OK) - err=ZIP_ERRNO; - - /* total number of entries in the central dir on this disk */ - if (ziplocal_getShort(&ziinit.z_filefunc, ziinit.filestream,&number_entry)!=ZIP_OK) - err=ZIP_ERRNO; - - /* total number of entries in the central dir */ - if (ziplocal_getShort(&ziinit.z_filefunc, ziinit.filestream,&number_entry_CD)!=ZIP_OK) - err=ZIP_ERRNO; - - if ((number_entry_CD!=number_entry) || - (number_disk_with_CD!=0) || - (number_disk!=0)) - err=ZIP_BADZIPFILE; - - /* size of the central directory */ - if (ziplocal_getLong(&ziinit.z_filefunc, ziinit.filestream,&size_central_dir)!=ZIP_OK) - err=ZIP_ERRNO; - - /* offset of start of central directory with respect to the - starting disk number */ - if (ziplocal_getLong(&ziinit.z_filefunc, ziinit.filestream,&offset_central_dir)!=ZIP_OK) - err=ZIP_ERRNO; - - /* zipfile global comment length */ - if (ziplocal_getShort(&ziinit.z_filefunc, ziinit.filestream,&size_comment)!=ZIP_OK) - err=ZIP_ERRNO; - - if ((central_pos<offset_central_dir+size_central_dir) && - (err==ZIP_OK)) - err=ZIP_BADZIPFILE; - - if (err!=ZIP_OK) - { - ZCLOSE(ziinit.z_filefunc, ziinit.filestream); - return NULL; - } - - if (size_comment>0) - { - ziinit.globalcomment = ALLOC(size_comment+1); - if (ziinit.globalcomment) - { - size_comment = ZREAD(ziinit.z_filefunc, ziinit.filestream,ziinit.globalcomment,size_comment); - ziinit.globalcomment[size_comment]=0; - } - } - - byte_before_the_zipfile = central_pos - - (offset_central_dir+size_central_dir); - ziinit.add_position_when_writting_offset = byte_before_the_zipfile; - - { - uLong size_central_dir_to_read = size_central_dir; - size_t buf_size = SIZEDATA_INDATABLOCK; - void* buf_read = (void*)ALLOC(buf_size); - if (ZSEEK(ziinit.z_filefunc, ziinit.filestream, - offset_central_dir + byte_before_the_zipfile, - ZLIB_FILEFUNC_SEEK_SET) != 0) - err=ZIP_ERRNO; - - while ((size_central_dir_to_read>0) && (err==ZIP_OK)) - { - uLong read_this = SIZEDATA_INDATABLOCK; - if (read_this > size_central_dir_to_read) - read_this = size_central_dir_to_read; - if (ZREAD(ziinit.z_filefunc, ziinit.filestream,buf_read,read_this) != read_this) - err=ZIP_ERRNO; - - if (err==ZIP_OK) - err = add_data_in_datablock(&ziinit.central_dir,buf_read, - (uLong)read_this); - size_central_dir_to_read-=read_this; - } - TRYFREE(buf_read); - } - ziinit.begin_pos = byte_before_the_zipfile; - ziinit.number_entry = number_entry_CD; - - if (ZSEEK(ziinit.z_filefunc, ziinit.filestream, - offset_central_dir+byte_before_the_zipfile,ZLIB_FILEFUNC_SEEK_SET)!=0) - err=ZIP_ERRNO; + // Read and Cache Central Directory Records + err = LoadCentralDirectoryRecord(&ziinit); } if (globalcomment) @@ -684,53 +917,173 @@ } } -extern zipFile ZEXPORT zipOpen (pathname, append) - const char *pathname; - int append; -{ - return zipOpen2(pathname,append,NULL,NULL); -} - -extern int ZEXPORT zipOpenNewFileInZip3 (file, filename, zipfi, - extrafield_local, size_extrafield_local, - extrafield_global, size_extrafield_global, - comment, method, level, raw, - windowBits, memLevel, strategy, - password, crcForCrypting) - zipFile file; - const char* filename; - const zip_fileinfo* zipfi; - const void* extrafield_local; - uInt size_extrafield_local; - const void* extrafield_global; - uInt size_extrafield_global; - const char* comment; - int method; - int level; - int raw; - int windowBits; - int memLevel; - int strategy; - const char* password; - uLong crcForCrypting; +extern zipFile ZEXPORT zipOpen2 (const char *pathname, int append, zipcharpc* globalcomment, zlib_filefunc_def* pzlib_filefunc32_def) +{ + if (pzlib_filefunc32_def != NULL) + { + zlib_filefunc64_32_def zlib_filefunc64_32_def_fill; + fill_zlib_filefunc64_32_def_from_filefunc32(&zlib_filefunc64_32_def_fill,pzlib_filefunc32_def); + return zipOpen3(pathname, append, globalcomment, &zlib_filefunc64_32_def_fill); + } + else + return zipOpen3(pathname, append, globalcomment, NULL); +} + +extern zipFile ZEXPORT zipOpen2_64 (const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_def* pzlib_filefunc_def) +{ + if (pzlib_filefunc_def != NULL) + { + zlib_filefunc64_32_def zlib_filefunc64_32_def_fill; + zlib_filefunc64_32_def_fill.zfile_func64 = *pzlib_filefunc_def; + zlib_filefunc64_32_def_fill.ztell32_file = NULL; + zlib_filefunc64_32_def_fill.zseek32_file = NULL; + return zipOpen3(pathname, append, globalcomment, &zlib_filefunc64_32_def_fill); + } + else + return zipOpen3(pathname, append, globalcomment, NULL); +} + + + +extern zipFile ZEXPORT zipOpen (const char* pathname, int append) { - zip_internal* zi; + return zipOpen3((const void*)pathname,append,NULL,NULL); +} + +extern zipFile ZEXPORT zipOpen64 (const void* pathname, int append) +{ + return zipOpen3(pathname,append,NULL,NULL); +} + +int Write_LocalFileHeader(zip64_internal* zi, const char* filename, uInt size_extrafield_local, const void* extrafield_local) +{ + /* write the local header */ + int err; + uInt size_filename = (uInt)strlen(filename); + uInt size_extrafield = size_extrafield_local; + + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)LOCALHEADERMAGIC, 4); + + if (err==ZIP_OK) + { + if(zi->ci.zip64) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)45,2);/* version needed to extract */ + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)20,2);/* version needed to extract */ + } + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.flag,2); + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.method,2); + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.dosDate,4); + + // CRC / Compressed size / Uncompressed size will be filled in later and rewritten later + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* crc 32, unknown */ + if (err==ZIP_OK) + { + if(zi->ci.zip64) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0xFFFFFFFF,4); /* compressed size, unknown */ + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* compressed size, unknown */ + } + if (err==ZIP_OK) + { + if(zi->ci.zip64) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0xFFFFFFFF,4); /* uncompressed size, unknown */ + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* uncompressed size, unknown */ + } + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_filename,2); + + if(zi->ci.zip64) + { + size_extrafield += 20; + } + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_extrafield,2); + + if ((err==ZIP_OK) && (size_filename > 0)) + { + if (ZWRITE64(zi->z_filefunc,zi->filestream,filename,size_filename)!=size_filename) + err = ZIP_ERRNO; + } + + if ((err==ZIP_OK) && (size_extrafield_local > 0)) + { + if (ZWRITE64(zi->z_filefunc, zi->filestream, extrafield_local, size_extrafield_local) != size_extrafield_local) + err = ZIP_ERRNO; + } + + + if ((err==ZIP_OK) && (zi->ci.zip64)) + { + // write the Zip64 extended info + short HeaderID = 1; + short DataSize = 16; + ZPOS64_T CompressedSize = 0; + ZPOS64_T UncompressedSize = 0; + + // Remember position of Zip64 extended info for the local file header. (needed when we update size after done with file) + zi->ci.pos_zip64extrainfo = ZTELL64(zi->z_filefunc,zi->filestream); + + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (short)HeaderID,2); + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (short)DataSize,2); + + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (ZPOS64_T)UncompressedSize,8); + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (ZPOS64_T)CompressedSize,8); + } + + return err; +} + +/* + NOTE. + When writing RAW the ZIP64 extended information in extrafield_local and extrafield_global needs to be stripped + before calling this function it can be done with zipRemoveExtraInfoBlock + + It is not done here because then we need to realloc a new buffer since parameters are 'const' and I want to minimize + unnecessary allocations. + */ +extern int ZEXPORT zipOpenNewFileInZip4_64 (zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting, + uLong versionMadeBy, uLong flagBase, int zip64) +{ + zip64_internal* zi; uInt size_filename; uInt size_comment; uInt i; int err = ZIP_OK; # ifdef NOCRYPT + (crcForCrypting); if (password != NULL) return ZIP_PARAMERROR; # endif if (file == NULL) return ZIP_PARAMERROR; + +#ifdef HAVE_BZIP2 + if ((method!=0) && (method!=Z_DEFLATED) && (method!=Z_BZIP2ED)) + return ZIP_PARAMERROR; +#else if ((method!=0) && (method!=Z_DEFLATED)) - return ZIP_PARAMERROR; + return ZIP_PARAMERROR; +#endif - zi = (zip_internal*)file; + zi = (zip64_internal*)file; if (zi->in_opened_file_inzip == 1) { @@ -739,7 +1092,6 @@ return err; } - if (filename==NULL) filename="-"; @@ -756,15 +1108,16 @@ { if (zipfi->dosDate != 0) zi->ci.dosDate = zipfi->dosDate; - else zi->ci.dosDate = ziplocal_TmzDateToDosDate(&zipfi->tmz_date,zipfi->dosDate); + else + zi->ci.dosDate = zip64local_TmzDateToDosDate(&zipfi->tmz_date); } - zi->ci.flag = 0; + zi->ci.flag = flagBase; if ((level==8) || (level==9)) zi->ci.flag |= 2; - if ((level==2)) + if (level==2) zi->ci.flag |= 4; - if ((level==1)) + if (level==1) zi->ci.flag |= 6; if (password != NULL) zi->ci.flag |= 1; @@ -775,37 +1128,43 @@ zi->ci.stream_initialised = 0; zi->ci.pos_in_buffered_data = 0; zi->ci.raw = raw; - zi->ci.pos_local_header = ZTELL(zi->z_filefunc,zi->filestream) ; - zi->ci.size_centralheader = SIZECENTRALHEADER + size_filename + - size_extrafield_global + size_comment; - zi->ci.central_header = (char*)ALLOC((uInt)zi->ci.size_centralheader); + zi->ci.pos_local_header = ZTELL64(zi->z_filefunc,zi->filestream); + + zi->ci.size_centralheader = SIZECENTRALHEADER + size_filename + size_extrafield_global + size_comment; + zi->ci.size_centralExtraFree = 32; // Extra space we have reserved in case we need to add ZIP64 extra info data + + zi->ci.central_header = (char*)ALLOC((uInt)zi->ci.size_centralheader + zi->ci.size_centralExtraFree); - ziplocal_putValue_inmemory(zi->ci.central_header,(uLong)CENTRALHEADERMAGIC,4); + zi->ci.size_centralExtra = size_extrafield_global; + zip64local_putValue_inmemory(zi->ci.central_header,(uLong)CENTRALHEADERMAGIC,4); /* version info */ - ziplocal_putValue_inmemory(zi->ci.central_header+4,(uLong)VERSIONMADEBY,2); - ziplocal_putValue_inmemory(zi->ci.central_header+6,(uLong)20,2); - ziplocal_putValue_inmemory(zi->ci.central_header+8,(uLong)zi->ci.flag,2); - ziplocal_putValue_inmemory(zi->ci.central_header+10,(uLong)zi->ci.method,2); - ziplocal_putValue_inmemory(zi->ci.central_header+12,(uLong)zi->ci.dosDate,4); - ziplocal_putValue_inmemory(zi->ci.central_header+16,(uLong)0,4); /*crc*/ - ziplocal_putValue_inmemory(zi->ci.central_header+20,(uLong)0,4); /*compr size*/ - ziplocal_putValue_inmemory(zi->ci.central_header+24,(uLong)0,4); /*uncompr size*/ - ziplocal_putValue_inmemory(zi->ci.central_header+28,(uLong)size_filename,2); - ziplocal_putValue_inmemory(zi->ci.central_header+30,(uLong)size_extrafield_global,2); - ziplocal_putValue_inmemory(zi->ci.central_header+32,(uLong)size_comment,2); - ziplocal_putValue_inmemory(zi->ci.central_header+34,(uLong)0,2); /*disk nm start*/ + zip64local_putValue_inmemory(zi->ci.central_header+4,(uLong)versionMadeBy,2); + zip64local_putValue_inmemory(zi->ci.central_header+6,(uLong)20,2); + zip64local_putValue_inmemory(zi->ci.central_header+8,(uLong)zi->ci.flag,2); + zip64local_putValue_inmemory(zi->ci.central_header+10,(uLong)zi->ci.method,2); + zip64local_putValue_inmemory(zi->ci.central_header+12,(uLong)zi->ci.dosDate,4); + zip64local_putValue_inmemory(zi->ci.central_header+16,(uLong)0,4); /*crc*/ + zip64local_putValue_inmemory(zi->ci.central_header+20,(uLong)0,4); /*compr size*/ + zip64local_putValue_inmemory(zi->ci.central_header+24,(uLong)0,4); /*uncompr size*/ + zip64local_putValue_inmemory(zi->ci.central_header+28,(uLong)size_filename,2); + zip64local_putValue_inmemory(zi->ci.central_header+30,(uLong)size_extrafield_global,2); + zip64local_putValue_inmemory(zi->ci.central_header+32,(uLong)size_comment,2); + zip64local_putValue_inmemory(zi->ci.central_header+34,(uLong)0,2); /*disk nm start*/ if (zipfi==NULL) - ziplocal_putValue_inmemory(zi->ci.central_header+36,(uLong)0,2); + zip64local_putValue_inmemory(zi->ci.central_header+36,(uLong)0,2); else - ziplocal_putValue_inmemory(zi->ci.central_header+36,(uLong)zipfi->internal_fa,2); + zip64local_putValue_inmemory(zi->ci.central_header+36,(uLong)zipfi->internal_fa,2); if (zipfi==NULL) - ziplocal_putValue_inmemory(zi->ci.central_header+38,(uLong)0,4); + zip64local_putValue_inmemory(zi->ci.central_header+38,(uLong)0,4); else - ziplocal_putValue_inmemory(zi->ci.central_header+38,(uLong)zipfi->external_fa,4); + zip64local_putValue_inmemory(zi->ci.central_header+38,(uLong)zipfi->external_fa,4); - ziplocal_putValue_inmemory(zi->ci.central_header+42,(uLong)zi->ci.pos_local_header- zi->add_position_when_writting_offset,4); + if(zi->ci.pos_local_header >= 0xffffffff) + zip64local_putValue_inmemory(zi->ci.central_header+42,(uLong)0xffffffff,4); + else + zip64local_putValue_inmemory(zi->ci.central_header+42,(uLong)zi->ci.pos_local_header - zi->add_position_when_writting_offset,4); for (i=0;i<size_filename;i++) *(zi->ci.central_header+SIZECENTRALHEADER+i) = *(filename+i); @@ -820,63 +1179,66 @@ if (zi->ci.central_header == NULL) return ZIP_INTERNALERROR; - /* write the local header */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)LOCALHEADERMAGIC,4); - - if (err==ZIP_OK) - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)20,2);/* version needed to extract */ - if (err==ZIP_OK) - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.flag,2); - - if (err==ZIP_OK) - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.method,2); - - if (err==ZIP_OK) - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.dosDate,4); - - if (err==ZIP_OK) - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* crc 32, unknown */ - if (err==ZIP_OK) - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* compressed size, unknown */ - if (err==ZIP_OK) - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* uncompressed size, unknown */ - - if (err==ZIP_OK) - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_filename,2); - - if (err==ZIP_OK) - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_extrafield_local,2); - - if ((err==ZIP_OK) && (size_filename>0)) - if (ZWRITE(zi->z_filefunc,zi->filestream,filename,size_filename)!=size_filename) - err = ZIP_ERRNO; - - if ((err==ZIP_OK) && (size_extrafield_local>0)) - if (ZWRITE(zi->z_filefunc,zi->filestream,extrafield_local,size_extrafield_local) - !=size_extrafield_local) - err = ZIP_ERRNO; + zi->ci.zip64 = zip64; + zi->ci.totalCompressedData = 0; + zi->ci.totalUncompressedData = 0; + zi->ci.pos_zip64extrainfo = 0; + + err = Write_LocalFileHeader(zi, filename, size_extrafield_local, extrafield_local); + +#ifdef HAVE_BZIP2 + zi->ci.bstream.avail_in = (uInt)0; + zi->ci.bstream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.bstream.next_out = (char*)zi->ci.buffered_data; + zi->ci.bstream.total_in_hi32 = 0; + zi->ci.bstream.total_in_lo32 = 0; + zi->ci.bstream.total_out_hi32 = 0; + zi->ci.bstream.total_out_lo32 = 0; +#endif zi->ci.stream.avail_in = (uInt)0; zi->ci.stream.avail_out = (uInt)Z_BUFSIZE; zi->ci.stream.next_out = zi->ci.buffered_data; zi->ci.stream.total_in = 0; zi->ci.stream.total_out = 0; + zi->ci.stream.data_type = Z_BINARY; +#ifdef HAVE_BZIP2 + if ((err==ZIP_OK) && (zi->ci.method == Z_DEFLATED || zi->ci.method == Z_BZIP2ED) && (!zi->ci.raw)) +#else if ((err==ZIP_OK) && (zi->ci.method == Z_DEFLATED) && (!zi->ci.raw)) +#endif { - zi->ci.stream.zalloc = (alloc_func)0; - zi->ci.stream.zfree = (free_func)0; - zi->ci.stream.opaque = (voidpf)0; + if(zi->ci.method == Z_DEFLATED) + { + zi->ci.stream.zalloc = (alloc_func)0; + zi->ci.stream.zfree = (free_func)0; + zi->ci.stream.opaque = (voidpf)0; - if (windowBits>0) - windowBits = -windowBits; + if (windowBits>0) + windowBits = -windowBits; - err = deflateInit2(&zi->ci.stream, level, - Z_DEFLATED, windowBits, memLevel, strategy); + err = deflateInit2(&zi->ci.stream, level, Z_DEFLATED, windowBits, memLevel, strategy); + + if (err==Z_OK) + zi->ci.stream_initialised = Z_DEFLATED; + } + else if(zi->ci.method == Z_BZIP2ED) + { +#ifdef HAVE_BZIP2 + // Init BZip stuff here + zi->ci.bstream.bzalloc = 0; + zi->ci.bstream.bzfree = 0; + zi->ci.bstream.opaque = (voidpf)0; + + err = BZ2_bzCompressInit(&zi->ci.bstream, level, 0,35); + if(err == BZ_OK) + zi->ci.stream_initialised = Z_BZIP2ED; +#endif + } - if (err==Z_OK) - zi->ci.stream_initialised = 1; } + # ifndef NOCRYPT zi->ci.crypt_header_size = 0; if ((err==Z_OK) && (password != NULL)) @@ -890,7 +1252,7 @@ sizeHead=crypthead(password,bufHead,RAND_HEAD_LEN,zi->ci.keys,zi->ci.pcrc_32_tab,crcForCrypting); zi->ci.crypt_header_size = sizeHead; - if (ZWRITE(zi->z_filefunc,zi->filestream,bufHead,sizeHead) != sizeHead) + if (ZWRITE64(zi->z_filefunc,zi->filestream,bufHead,sizeHead) != sizeHead) err = ZIP_ERRNO; } # endif @@ -900,53 +1262,105 @@ return err; } -extern int ZEXPORT zipOpenNewFileInZip2(file, filename, zipfi, - extrafield_local, size_extrafield_local, - extrafield_global, size_extrafield_global, - comment, method, level, raw) - zipFile file; - const char* filename; - const zip_fileinfo* zipfi; - const void* extrafield_local; - uInt size_extrafield_local; - const void* extrafield_global; - uInt size_extrafield_global; - const char* comment; - int method; - int level; - int raw; +extern int ZEXPORT zipOpenNewFileInZip4 (zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting, + uLong versionMadeBy, uLong flagBase) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + windowBits, memLevel, strategy, + password, crcForCrypting, versionMadeBy, flagBase, 0); +} + +extern int ZEXPORT zipOpenNewFileInZip3 (zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + windowBits, memLevel, strategy, + password, crcForCrypting, VERSIONMADEBY, 0, 0); +} + +extern int ZEXPORT zipOpenNewFileInZip3_64(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting, int zip64) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + windowBits, memLevel, strategy, + password, crcForCrypting, VERSIONMADEBY, 0, zip64); +} + +extern int ZEXPORT zipOpenNewFileInZip2(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, 0); +} + +extern int ZEXPORT zipOpenNewFileInZip2_64(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, int zip64) { - return zipOpenNewFileInZip3 (file, filename, zipfi, + return zipOpenNewFileInZip4_64 (file, filename, zipfi, extrafield_local, size_extrafield_local, extrafield_global, size_extrafield_global, comment, method, level, raw, -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, - NULL, 0); + NULL, 0, VERSIONMADEBY, 0, zip64); } -extern int ZEXPORT zipOpenNewFileInZip (file, filename, zipfi, - extrafield_local, size_extrafield_local, - extrafield_global, size_extrafield_global, - comment, method, level) - zipFile file; - const char* filename; - const zip_fileinfo* zipfi; - const void* extrafield_local; - uInt size_extrafield_local; - const void* extrafield_global; - uInt size_extrafield_global; - const char* comment; - int method; - int level; +extern int ZEXPORT zipOpenNewFileInZip64 (zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void*extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int zip64) { - return zipOpenNewFileInZip2 (file, filename, zipfi, + return zipOpenNewFileInZip4_64 (file, filename, zipfi, extrafield_local, size_extrafield_local, extrafield_global, size_extrafield_global, - comment, method, level, 0); + comment, method, level, 0, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, zip64); +} + +extern int ZEXPORT zipOpenNewFileInZip (zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void*extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, 0, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, 0); } -local int zipFlushWriteBuffer(zi) - zip_internal* zi; +local int zip64FlushWriteBuffer(zip64_internal* zi) { int err=ZIP_OK; @@ -956,169 +1370,374 @@ uInt i; int t; for (i=0;i<zi->ci.pos_in_buffered_data;i++) - zi->ci.buffered_data[i] = zencode(zi->ci.keys, zi->ci.pcrc_32_tab, - zi->ci.buffered_data[i],t); + zi->ci.buffered_data[i] = zencode(zi->ci.keys, zi->ci.pcrc_32_tab, zi->ci.buffered_data[i],t); #endif } - if (ZWRITE(zi->z_filefunc,zi->filestream,zi->ci.buffered_data,zi->ci.pos_in_buffered_data) - !=zi->ci.pos_in_buffered_data) + + if (ZWRITE64(zi->z_filefunc,zi->filestream,zi->ci.buffered_data,zi->ci.pos_in_buffered_data) != zi->ci.pos_in_buffered_data) err = ZIP_ERRNO; + + zi->ci.totalCompressedData += zi->ci.pos_in_buffered_data; + +#ifdef HAVE_BZIP2 + if(zi->ci.method == Z_BZIP2ED) + { + zi->ci.totalUncompressedData += zi->ci.bstream.total_in_lo32; + zi->ci.bstream.total_in_lo32 = 0; + zi->ci.bstream.total_in_hi32 = 0; + } + else +#endif + { + zi->ci.totalUncompressedData += zi->ci.stream.total_in; + zi->ci.stream.total_in = 0; + } + + zi->ci.pos_in_buffered_data = 0; + return err; } -extern int ZEXPORT zipWriteInFileInZip (file, buf, len) - zipFile file; - const void* buf; - unsigned len; +extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned int len) { - zip_internal* zi; + zip64_internal* zi; int err=ZIP_OK; if (file == NULL) return ZIP_PARAMERROR; - zi = (zip_internal*)file; + zi = (zip64_internal*)file; if (zi->in_opened_file_inzip == 0) return ZIP_PARAMERROR; - zi->ci.stream.next_in = (void*)buf; - zi->ci.stream.avail_in = len; - zi->ci.crc32 = crc32(zi->ci.crc32,buf,len); + zi->ci.crc32 = crc32(zi->ci.crc32,buf,(uInt)len); - while ((err==ZIP_OK) && (zi->ci.stream.avail_in>0)) +#ifdef HAVE_BZIP2 + if(zi->ci.method == Z_BZIP2ED && (!zi->ci.raw)) { - if (zi->ci.stream.avail_out == 0) + zi->ci.bstream.next_in = (void*)buf; + zi->ci.bstream.avail_in = len; + err = BZ_RUN_OK; + + while ((err==BZ_RUN_OK) && (zi->ci.bstream.avail_in>0)) + { + if (zi->ci.bstream.avail_out == 0) { - if (zipFlushWriteBuffer(zi) == ZIP_ERRNO) - err = ZIP_ERRNO; - zi->ci.stream.avail_out = (uInt)Z_BUFSIZE; - zi->ci.stream.next_out = zi->ci.buffered_data; + if (zip64FlushWriteBuffer(zi) == ZIP_ERRNO) + err = ZIP_ERRNO; + zi->ci.bstream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.bstream.next_out = (char*)zi->ci.buffered_data; } - if(err != ZIP_OK) - break; + if(err != BZ_RUN_OK) + break; - if ((zi->ci.method == Z_DEFLATED) && (!zi->ci.raw)) + if ((zi->ci.method == Z_BZIP2ED) && (!zi->ci.raw)) { - uLong uTotalOutBefore = zi->ci.stream.total_out; - err=deflate(&zi->ci.stream, Z_NO_FLUSH); - zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ; + uLong uTotalOutBefore_lo = zi->ci.bstream.total_out_lo32; +// uLong uTotalOutBefore_hi = zi->ci.bstream.total_out_hi32; + err=BZ2_bzCompress(&zi->ci.bstream, BZ_RUN); + zi->ci.pos_in_buffered_data += (uInt)(zi->ci.bstream.total_out_lo32 - uTotalOutBefore_lo) ; } - else - { - uInt copy_this,i; - if (zi->ci.stream.avail_in < zi->ci.stream.avail_out) - copy_this = zi->ci.stream.avail_in; - else - copy_this = zi->ci.stream.avail_out; - for (i=0;i<copy_this;i++) - *(((char*)zi->ci.stream.next_out)+i) = - *(((const char*)zi->ci.stream.next_in)+i); - { - zi->ci.stream.avail_in -= copy_this; - zi->ci.stream.avail_out-= copy_this; - zi->ci.stream.next_in+= copy_this; - zi->ci.stream.next_out+= copy_this; - zi->ci.stream.total_in+= copy_this; - zi->ci.stream.total_out+= copy_this; - zi->ci.pos_in_buffered_data += copy_this; - } - } + } + + if(err == BZ_RUN_OK) + err = ZIP_OK; + } + else +#endif + { + zi->ci.stream.next_in = (Bytef*)buf; + zi->ci.stream.avail_in = len; + + while ((err==ZIP_OK) && (zi->ci.stream.avail_in>0)) + { + if (zi->ci.stream.avail_out == 0) + { + if (zip64FlushWriteBuffer(zi) == ZIP_ERRNO) + err = ZIP_ERRNO; + zi->ci.stream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.stream.next_out = zi->ci.buffered_data; + } + + + if(err != ZIP_OK) + break; + + if ((zi->ci.method == Z_DEFLATED) && (!zi->ci.raw)) + { + uLong uTotalOutBefore = zi->ci.stream.total_out; + err=deflate(&zi->ci.stream, Z_NO_FLUSH); + if(uTotalOutBefore > zi->ci.stream.total_out) + { + int bBreak = 0; + bBreak++; + } + + zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ; + } + else + { + uInt copy_this,i; + if (zi->ci.stream.avail_in < zi->ci.stream.avail_out) + copy_this = zi->ci.stream.avail_in; + else + copy_this = zi->ci.stream.avail_out; + + for (i = 0; i < copy_this; i++) + *(((char*)zi->ci.stream.next_out)+i) = + *(((const char*)zi->ci.stream.next_in)+i); + { + zi->ci.stream.avail_in -= copy_this; + zi->ci.stream.avail_out-= copy_this; + zi->ci.stream.next_in+= copy_this; + zi->ci.stream.next_out+= copy_this; + zi->ci.stream.total_in+= copy_this; + zi->ci.stream.total_out+= copy_this; + zi->ci.pos_in_buffered_data += copy_this; + } + } + }// while(...) } return err; } -extern int ZEXPORT zipCloseFileInZipRaw (file, uncompressed_size, crc32) - zipFile file; - uLong uncompressed_size; - uLong crc32; +extern int ZEXPORT zipCloseFileInZipRaw (zipFile file, uLong uncompressed_size, uLong crc32) +{ + return zipCloseFileInZipRaw64 (file, uncompressed_size, crc32); +} + +extern int ZEXPORT zipCloseFileInZipRaw64 (zipFile file, ZPOS64_T uncompressed_size, uLong crc32) { - zip_internal* zi; - uLong compressed_size; + zip64_internal* zi; + ZPOS64_T compressed_size; + uLong invalidValue = 0xffffffff; + short datasize = 0; int err=ZIP_OK; if (file == NULL) return ZIP_PARAMERROR; - zi = (zip_internal*)file; + zi = (zip64_internal*)file; if (zi->in_opened_file_inzip == 0) return ZIP_PARAMERROR; zi->ci.stream.avail_in = 0; if ((zi->ci.method == Z_DEFLATED) && (!zi->ci.raw)) - while (err==ZIP_OK) - { + { + while (err==ZIP_OK) + { + uLong uTotalOutBefore; + if (zi->ci.stream.avail_out == 0) + { + if (zip64FlushWriteBuffer(zi) == ZIP_ERRNO) + err = ZIP_ERRNO; + zi->ci.stream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.stream.next_out = zi->ci.buffered_data; + } + uTotalOutBefore = zi->ci.stream.total_out; + err=deflate(&zi->ci.stream, Z_FINISH); + zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ; + } + } + else if ((zi->ci.method == Z_BZIP2ED) && (!zi->ci.raw)) + { +#ifdef HAVE_BZIP2 + err = BZ_FINISH_OK; + while (err==BZ_FINISH_OK) + { uLong uTotalOutBefore; - if (zi->ci.stream.avail_out == 0) + if (zi->ci.bstream.avail_out == 0) { - if (zipFlushWriteBuffer(zi) == ZIP_ERRNO) - err = ZIP_ERRNO; - zi->ci.stream.avail_out = (uInt)Z_BUFSIZE; - zi->ci.stream.next_out = zi->ci.buffered_data; + if (zip64FlushWriteBuffer(zi) == ZIP_ERRNO) + err = ZIP_ERRNO; + zi->ci.bstream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.bstream.next_out = (char*)zi->ci.buffered_data; } - uTotalOutBefore = zi->ci.stream.total_out; - err=deflate(&zi->ci.stream, Z_FINISH); - zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ; + uTotalOutBefore = zi->ci.bstream.total_out_lo32; + err=BZ2_bzCompress(&zi->ci.bstream, BZ_FINISH); + if(err == BZ_STREAM_END) + err = Z_STREAM_END; + + zi->ci.pos_in_buffered_data += (uInt)(zi->ci.bstream.total_out_lo32 - uTotalOutBefore); + } + + if(err == BZ_FINISH_OK) + err = ZIP_OK; +#endif } if (err==Z_STREAM_END) err=ZIP_OK; /* this is normal */ if ((zi->ci.pos_in_buffered_data>0) && (err==ZIP_OK)) - if (zipFlushWriteBuffer(zi)==ZIP_ERRNO) + { + if (zip64FlushWriteBuffer(zi)==ZIP_ERRNO) err = ZIP_ERRNO; + } if ((zi->ci.method == Z_DEFLATED) && (!zi->ci.raw)) { - err=deflateEnd(&zi->ci.stream); + int tmp_err = deflateEnd(&zi->ci.stream); + if (err == ZIP_OK) + err = tmp_err; zi->ci.stream_initialised = 0; } +#ifdef HAVE_BZIP2 + else if((zi->ci.method == Z_BZIP2ED) && (!zi->ci.raw)) + { + int tmperr = BZ2_bzCompressEnd(&zi->ci.bstream); + if (err==ZIP_OK) + err = tmperr; + zi->ci.stream_initialised = 0; + } +#endif if (!zi->ci.raw) { crc32 = (uLong)zi->ci.crc32; - uncompressed_size = (uLong)zi->ci.stream.total_in; + uncompressed_size = zi->ci.totalUncompressedData; } - compressed_size = (uLong)zi->ci.stream.total_out; + compressed_size = zi->ci.totalCompressedData; + # ifndef NOCRYPT compressed_size += zi->ci.crypt_header_size; # endif - ziplocal_putValue_inmemory(zi->ci.central_header+16,crc32,4); /*crc*/ - ziplocal_putValue_inmemory(zi->ci.central_header+20, - compressed_size,4); /*compr size*/ + // update Current Item crc and sizes, + if(compressed_size >= 0xffffffff || uncompressed_size >= 0xffffffff || zi->ci.pos_local_header >= 0xffffffff) + { + /*version Made by*/ + zip64local_putValue_inmemory(zi->ci.central_header+4,(uLong)45,2); + /*version needed*/ + zip64local_putValue_inmemory(zi->ci.central_header+6,(uLong)45,2); + + } + + zip64local_putValue_inmemory(zi->ci.central_header+16,crc32,4); /*crc*/ + + + if(compressed_size >= 0xffffffff) + zip64local_putValue_inmemory(zi->ci.central_header+20, invalidValue,4); /*compr size*/ + else + zip64local_putValue_inmemory(zi->ci.central_header+20, compressed_size,4); /*compr size*/ + + /// set internal file attributes field if (zi->ci.stream.data_type == Z_ASCII) - ziplocal_putValue_inmemory(zi->ci.central_header+36,(uLong)Z_ASCII,2); - ziplocal_putValue_inmemory(zi->ci.central_header+24, - uncompressed_size,4); /*uncompr size*/ + zip64local_putValue_inmemory(zi->ci.central_header+36,(uLong)Z_ASCII,2); + + if(uncompressed_size >= 0xffffffff) + zip64local_putValue_inmemory(zi->ci.central_header+24, invalidValue,4); /*uncompr size*/ + else + zip64local_putValue_inmemory(zi->ci.central_header+24, uncompressed_size,4); /*uncompr size*/ + + // Add ZIP64 extra info field for uncompressed size + if(uncompressed_size >= 0xffffffff) + datasize += 8; + + // Add ZIP64 extra info field for compressed size + if(compressed_size >= 0xffffffff) + datasize += 8; + + // Add ZIP64 extra info field for relative offset to local file header of current file + if(zi->ci.pos_local_header >= 0xffffffff) + datasize += 8; + + if(datasize > 0) + { + char* p = NULL; + + if((uLong)(datasize + 4) > zi->ci.size_centralExtraFree) + { + // we can not write more data to the buffer that we have room for. + return ZIP_BADZIPFILE; + } + + p = zi->ci.central_header + zi->ci.size_centralheader; + + // Add Extra Information Header for 'ZIP64 information' + zip64local_putValue_inmemory(p, 0x0001, 2); // HeaderID + p += 2; + zip64local_putValue_inmemory(p, datasize, 2); // DataSize + p += 2; + + if(uncompressed_size >= 0xffffffff) + { + zip64local_putValue_inmemory(p, uncompressed_size, 8); + p += 8; + } + + if(compressed_size >= 0xffffffff) + { + zip64local_putValue_inmemory(p, compressed_size, 8); + p += 8; + } + + if(zi->ci.pos_local_header >= 0xffffffff) + { + zip64local_putValue_inmemory(p, zi->ci.pos_local_header, 8); + p += 8; + } + + // Update how much extra free space we got in the memory buffer + // and increase the centralheader size so the new ZIP64 fields are included + // ( 4 below is the size of HeaderID and DataSize field ) + zi->ci.size_centralExtraFree -= datasize + 4; + zi->ci.size_centralheader += datasize + 4; + + // Update the extra info size field + zi->ci.size_centralExtra += datasize + 4; + zip64local_putValue_inmemory(zi->ci.central_header+30,(uLong)zi->ci.size_centralExtra,2); + } if (err==ZIP_OK) - err = add_data_in_datablock(&zi->central_dir,zi->ci.central_header, - (uLong)zi->ci.size_centralheader); + err = add_data_in_datablock(&zi->central_dir, zi->ci.central_header, (uLong)zi->ci.size_centralheader); + free(zi->ci.central_header); if (err==ZIP_OK) { - long cur_pos_inzip = ZTELL(zi->z_filefunc,zi->filestream); - if (ZSEEK(zi->z_filefunc,zi->filestream, - zi->ci.pos_local_header + 14,ZLIB_FILEFUNC_SEEK_SET)!=0) + // Update the LocalFileHeader with the new values. + + ZPOS64_T cur_pos_inzip = ZTELL64(zi->z_filefunc,zi->filestream); + + if (ZSEEK64(zi->z_filefunc,zi->filestream, zi->ci.pos_local_header + 14,ZLIB_FILEFUNC_SEEK_SET)!=0) err = ZIP_ERRNO; if (err==ZIP_OK) - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,crc32,4); /* crc 32, unknown */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,crc32,4); /* crc 32, unknown */ - if (err==ZIP_OK) /* compressed size, unknown */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,compressed_size,4); + if(uncompressed_size >= 0xffffffff || compressed_size >= 0xffffffff ) + { + if(zi->ci.pos_zip64extrainfo > 0) + { + // Update the size in the ZIP64 extended field. + if (ZSEEK64(zi->z_filefunc,zi->filestream, zi->ci.pos_zip64extrainfo + 4,ZLIB_FILEFUNC_SEEK_SET)!=0) + err = ZIP_ERRNO; + + if (err==ZIP_OK) /* compressed size, unknown */ + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, uncompressed_size, 8); + + if (err==ZIP_OK) /* uncompressed size, unknown */ + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, compressed_size, 8); + } + else + err = ZIP_BADZIPFILE; // Caller passed zip64 = 0, so no room for zip64 info -> fatal + } + else + { + if (err==ZIP_OK) /* compressed size, unknown */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,compressed_size,4); - if (err==ZIP_OK) /* uncompressed size, unknown */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,uncompressed_size,4); + if (err==ZIP_OK) /* uncompressed size, unknown */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,uncompressed_size,4); + } - if (ZSEEK(zi->z_filefunc,zi->filestream, - cur_pos_inzip,ZLIB_FILEFUNC_SEEK_SET)!=0) + if (ZSEEK64(zi->z_filefunc,zi->filestream, cur_pos_inzip,ZLIB_FILEFUNC_SEEK_SET)!=0) err = ZIP_ERRNO; } @@ -1128,24 +1747,150 @@ return err; } -extern int ZEXPORT zipCloseFileInZip (file) - zipFile file; +extern int ZEXPORT zipCloseFileInZip (zipFile file) { return zipCloseFileInZipRaw (file,0,0); } -extern int ZEXPORT zipClose (file, global_comment) - zipFile file; - const char* global_comment; +int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eocd_pos_inzip) +{ + int err = ZIP_OK; + ZPOS64_T pos = zip64eocd_pos_inzip - zi->add_position_when_writting_offset; + + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)ZIP64ENDLOCHEADERMAGIC,4); + + /*num disks*/ + if (err==ZIP_OK) /* number of the disk with the start of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); + + /*relative offset*/ + if (err==ZIP_OK) /* Relative offset to the Zip64EndOfCentralDirectory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream, pos,8); + + /*total disks*/ /* Do not support spawning of disk so always say 1 here*/ + if (err==ZIP_OK) /* number of the disk with the start of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)1,4); + + return err; +} + +int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) { - zip_internal* zi; + int err = ZIP_OK; + + uLong Zip64DataSize = 44; + + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)ZIP64ENDHEADERMAGIC,4); + + if (err==ZIP_OK) /* size of this 'zip64 end of central directory' */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(ZPOS64_T)Zip64DataSize,8); // why ZPOS64_T of this ? + + if (err==ZIP_OK) /* version made by */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)45,2); + + if (err==ZIP_OK) /* version needed */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)45,2); + + if (err==ZIP_OK) /* number of this disk */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); + + if (err==ZIP_OK) /* number of the disk with the start of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); + + if (err==ZIP_OK) /* total number of entries in the central dir on this disk */ + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, zi->number_entry, 8); + + if (err==ZIP_OK) /* total number of entries in the central dir */ + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, zi->number_entry, 8); + + if (err==ZIP_OK) /* size of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(ZPOS64_T)size_centraldir,8); + + if (err==ZIP_OK) /* offset of start of central directory with respect to the starting disk number */ + { + ZPOS64_T pos = centraldir_pos_inzip - zi->add_position_when_writting_offset; + err = zip64local_putValue(&zi->z_filefunc,zi->filestream, (ZPOS64_T)pos,8); + } + return err; +} +int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) +{ + int err = ZIP_OK; + + /*signature*/ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)ENDHEADERMAGIC,4); + + if (err==ZIP_OK) /* number of this disk */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,2); + + if (err==ZIP_OK) /* number of the disk with the start of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,2); + + if (err==ZIP_OK) /* total number of entries in the central dir on this disk */ + { + { + if(zi->number_entry >= 0xFFFF) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0xffff,2); // use value in ZIP64 record + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->number_entry,2); + } + } + + if (err==ZIP_OK) /* total number of entries in the central dir */ + { + if(zi->number_entry >= 0xFFFF) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0xffff,2); // use value in ZIP64 record + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->number_entry,2); + } + + if (err==ZIP_OK) /* size of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_centraldir,4); + + if (err==ZIP_OK) /* offset of start of central directory with respect to the starting disk number */ + { + ZPOS64_T pos = centraldir_pos_inzip - zi->add_position_when_writting_offset; + if(pos >= 0xffffffff) + { + err = zip64local_putValue(&zi->z_filefunc,zi->filestream, (uLong)0xffffffff,4); + } + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream, (uLong)(centraldir_pos_inzip - zi->add_position_when_writting_offset),4); + } + + return err; +} + +int Write_GlobalComment(zip64_internal* zi, const char* global_comment) +{ + int err = ZIP_OK; + uInt size_global_comment = 0; + + if(global_comment != NULL) + size_global_comment = (uInt)strlen(global_comment); + + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_global_comment,2); + + if (err == ZIP_OK && size_global_comment > 0) + { + if (ZWRITE64(zi->z_filefunc,zi->filestream, global_comment, size_global_comment) != size_global_comment) + err = ZIP_ERRNO; + } + return err; +} + +extern int ZEXPORT zipClose (zipFile file, const char* global_comment) +{ + zip64_internal* zi; int err = 0; uLong size_centraldir = 0; - uLong centraldir_pos_inzip; - uInt size_global_comment; + ZPOS64_T centraldir_pos_inzip; + ZPOS64_T pos; + if (file == NULL) return ZIP_PARAMERROR; - zi = (zip_internal*)file; + + zi = (zip64_internal*)file; if (zi->in_opened_file_inzip == 1) { @@ -1156,61 +1901,42 @@ if (global_comment==NULL) global_comment = zi->globalcomment; #endif - if (global_comment==NULL) - size_global_comment = 0; - else - size_global_comment = (uInt)strlen(global_comment); - centraldir_pos_inzip = ZTELL(zi->z_filefunc,zi->filestream); + centraldir_pos_inzip = ZTELL64(zi->z_filefunc,zi->filestream); + if (err==ZIP_OK) { - linkedlist_datablock_internal* ldi = zi->central_dir.first_block ; + linkedlist_datablock_internal* ldi = zi->central_dir.first_block; while (ldi!=NULL) { if ((err==ZIP_OK) && (ldi->filled_in_this_block>0)) - if (ZWRITE(zi->z_filefunc,zi->filestream, - ldi->data,ldi->filled_in_this_block) - !=ldi->filled_in_this_block ) + { + if (ZWRITE64(zi->z_filefunc,zi->filestream, ldi->data, ldi->filled_in_this_block) != ldi->filled_in_this_block) err = ZIP_ERRNO; + } size_centraldir += ldi->filled_in_this_block; ldi = ldi->next_datablock; } } - free_datablock(zi->central_dir.first_block); - - if (err==ZIP_OK) /* Magic End */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)ENDHEADERMAGIC,4); - - if (err==ZIP_OK) /* number of this disk */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,2); - - if (err==ZIP_OK) /* number of the disk with the start of the central directory */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,2); - - if (err==ZIP_OK) /* total number of entries in the central dir on this disk */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->number_entry,2); - - if (err==ZIP_OK) /* total number of entries in the central dir */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->number_entry,2); + free_linkedlist(&(zi->central_dir)); - if (err==ZIP_OK) /* size of the central directory */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_centraldir,4); + pos = centraldir_pos_inzip - zi->add_position_when_writting_offset; + if(pos >= 0xffffffff || zi->number_entry > 0xFFFF) + { + ZPOS64_T Zip64EOCDpos = ZTELL64(zi->z_filefunc,zi->filestream); + Write_Zip64EndOfCentralDirectoryRecord(zi, size_centraldir, centraldir_pos_inzip); - if (err==ZIP_OK) /* offset of start of central directory with respect to the - starting disk number */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream, - (uLong)(centraldir_pos_inzip - zi->add_position_when_writting_offset),4); + Write_Zip64EndOfCentralDirectoryLocator(zi, Zip64EOCDpos); + } - if (err==ZIP_OK) /* zipfile comment length */ - err = ziplocal_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_global_comment,2); + if (err==ZIP_OK) + err = Write_EndOfCentralDirectoryRecord(zi, size_centraldir, centraldir_pos_inzip); - if ((err==ZIP_OK) && (size_global_comment>0)) - if (ZWRITE(zi->z_filefunc,zi->filestream, - global_comment,size_global_comment) != size_global_comment) - err = ZIP_ERRNO; + if(err == ZIP_OK) + err = Write_GlobalComment(zi, global_comment); - if (ZCLOSE(zi->z_filefunc,zi->filestream) != 0) + if (ZCLOSE64(zi->z_filefunc,zi->filestream) != 0) if (err == ZIP_OK) err = ZIP_ERRNO; @@ -1221,3 +1947,61 @@ return err; } + +extern int ZEXPORT zipRemoveExtraInfoBlock (char* pData, int* dataLen, short sHeader) +{ + char* p = pData; + int size = 0; + char* pNewHeader; + char* pTmp; + short header; + short dataSize; + + int retVal = ZIP_OK; + + if(pData == NULL || *dataLen < 4) + return ZIP_PARAMERROR; + + pNewHeader = (char*)ALLOC(*dataLen); + pTmp = pNewHeader; + + while(p < (pData + *dataLen)) + { + header = *(short*)p; + dataSize = *(((short*)p)+1); + + if( header == sHeader ) // Header found. + { + p += dataSize + 4; // skip it. do not copy to temp buffer + } + else + { + // Extra Info block should not be removed, So copy it to the temp buffer. + memcpy(pTmp, p, dataSize + 4); + p += dataSize + 4; + size += dataSize + 4; + } + + } + + if(size < *dataLen) + { + // clean old extra info block. + memset(pData,0, *dataLen); + + // copy the new extra info block over the old + if(size > 0) + memcpy(pData, pNewHeader, size); + + // set the new extra info size + *dataLen = size; + + retVal = ZIP_OK; + } + else + retVal = ZIP_ERRNO; + + TRYFREE(pNewHeader); + + return retVal; +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/minizip/zip.h nodejs-0.11.15/deps/zlib/contrib/minizip/zip.h --- nodejs-0.11.13/deps/zlib/contrib/minizip/zip.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/minizip/zip.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,19 +1,15 @@ -/* zip.h -- IO for compress .zip files using zlib - Version 1.01e, February 12th, 2005 +/* zip.h -- IO on .zip files using zlib + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) - Copyright (C) 1998-2005 Gilles Vollant + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) - This unzip package allow creates .ZIP file, compatible with PKZip 2.04g - WinZip, InfoZip tools and compatible. - Multi volume ZipFile (span) are not supported. - Encryption compatible with pkzip 2.04g only supported - Old compressions used by old PKZip 1.x are not supported + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) - For uncompress .zip file, look at unzip.h + For more info read MiniZip_info.txt - - I WAIT FEEDBACK at mail info@winimage.com - Visit also http://www.winimage.com/zLibDll/unzip.html for evolution + --------------------------------------------------------------------------- Condition of use and distribution are the same than zlib : @@ -33,26 +29,24 @@ misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. + --------------------------------------------------------------------------- -*/ + Changes + + See header of zip.h -/* for more info about .ZIP format, see - http://www.info-zip.org/pub/infozip/doc/appnote-981119-iz.zip - http://www.info-zip.org/pub/infozip/doc/ - PkWare has also a specification at : - ftp://ftp.pkware.com/probdesc.zip */ -#ifndef _zip_H -#define _zip_H +#ifndef _zip12_H +#define _zip12_H #ifdef __cplusplus extern "C" { #endif -#if defined(USE_SYSTEM_ZLIB) -#include <zlib.h> -#else +//#define HAVE_BZIP2 + +#ifndef _ZLIB_H #include "zlib.h" #endif @@ -60,6 +54,12 @@ #include "ioapi.h" #endif +#ifdef HAVE_BZIP2 +#include "bzlib.h" +#endif + +#define Z_BZIP2ED 12 + #if defined(STRICTZIP) || defined(STRICTZIPUNZIP) /* like the STRICT of WIN32, we define a pointer that cannot be converted from (void*) without cast */ @@ -114,6 +114,7 @@ #define APPEND_STATUS_ADDINZIP (2) extern zipFile ZEXPORT zipOpen OF((const char *pathname, int append)); +extern zipFile ZEXPORT zipOpen64 OF((const void *pathname, int append)); /* Create a zipfile. pathname contain on Windows XP a filename like "c:\\zlib\\zlib113.zip" or on @@ -138,6 +139,11 @@ zipcharpc* globalcomment, zlib_filefunc_def* pzlib_filefunc_def)); +extern zipFile ZEXPORT zipOpen2_64 OF((const void *pathname, + int append, + zipcharpc* globalcomment, + zlib_filefunc64_def* pzlib_filefunc_def)); + extern int ZEXPORT zipOpenNewFileInZip OF((zipFile file, const char* filename, const zip_fileinfo* zipfi, @@ -148,6 +154,19 @@ const char* comment, int method, int level)); + +extern int ZEXPORT zipOpenNewFileInZip64 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int zip64)); + /* Open a file in the ZIP for writing. filename : the filename in zip (if NULL, '-' without quote will be used @@ -159,6 +178,9 @@ if comment != NULL, comment contain the comment string method contain the compression method (0 for store, Z_DEFLATED for deflate) level contain the level of compression (can be Z_DEFAULT_COMPRESSION) + zip64 is set to 1 if a zip64 extended information block should be added to the local file header. + this MUST be '1' if the uncompressed size is >= 0xffffffff. + */ @@ -174,6 +196,19 @@ int level, int raw)); + +extern int ZEXPORT zipOpenNewFileInZip2_64 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int zip64)); /* Same than zipOpenNewFileInZip, except if raw=1, we write raw file */ @@ -193,13 +228,79 @@ int memLevel, int strategy, const char* password, - uLong crcForCtypting)); + uLong crcForCrypting)); + +extern int ZEXPORT zipOpenNewFileInZip3_64 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting, + int zip64 + )); /* Same than zipOpenNewFileInZip2, except windowBits,memLevel,,strategy : see parameter strategy in deflateInit2 password : crypting password (NULL for no crypting) - crcForCtypting : crc of file to compress (needed for crypting) + crcForCrypting : crc of file to compress (needed for crypting) + */ + +extern int ZEXPORT zipOpenNewFileInZip4 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting, + uLong versionMadeBy, + uLong flagBase + )); + + +extern int ZEXPORT zipOpenNewFileInZip4_64 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting, + uLong versionMadeBy, + uLong flagBase, + int zip64 + )); +/* + Same than zipOpenNewFileInZip4, except + versionMadeBy : value for Version made by field + flag : value for flag field (compression level info will be added) */ @@ -218,8 +319,13 @@ extern int ZEXPORT zipCloseFileInZipRaw OF((zipFile file, uLong uncompressed_size, uLong crc32)); + +extern int ZEXPORT zipCloseFileInZipRaw64 OF((zipFile file, + ZPOS64_T uncompressed_size, + uLong crc32)); + /* - Close the current file in the zipfile, for fiel opened with + Close the current file in the zipfile, for file opened with parameter raw=1 in zipOpenNewFileInZip2 uncompressed_size and crc32 are value for the uncompressed size */ @@ -230,8 +336,27 @@ Close the zipfile */ + +extern int ZEXPORT zipRemoveExtraInfoBlock OF((char* pData, int* dataLen, short sHeader)); +/* + zipRemoveExtraInfoBlock - Added by Mathias Svensson + + Remove extra information block from a extra information data for the local file header or central directory header + + It is needed to remove ZIP64 extra information blocks when before data is written if using RAW mode. + + 0x0001 is the signature header for the ZIP64 extra information blocks + + usage. + Remove ZIP64 Extra information from a central director extra field data + zipRemoveExtraInfoBlock(pCenDirExtraFieldData, &nCenDirExtraFieldDataLen, 0x0001); + + Remove ZIP64 Extra information from a Local File Header extra field data + zipRemoveExtraInfoBlock(pLocalHeaderExtraFieldData, &nLocalHeaderExtraFieldDataLen, 0x0001); +*/ + #ifdef __cplusplus } #endif -#endif /* _zip_H */ +#endif /* _zip64_H */ diff -Nru nodejs-0.11.13/deps/zlib/contrib/pascal/example.pas nodejs-0.11.15/deps/zlib/contrib/pascal/example.pas --- nodejs-0.11.13/deps/zlib/contrib/pascal/example.pas 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/pascal/example.pas 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,599 @@ +(* example.c -- usage example of the zlib compression library + * Copyright (C) 1995-2003 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + * + * Pascal translation + * Copyright (C) 1998 by Jacques Nomssi Nzali. + * For conditions of distribution and use, see copyright notice in readme.txt + * + * Adaptation to the zlibpas interface + * Copyright (C) 2003 by Cosmin Truta. + * For conditions of distribution and use, see copyright notice in readme.txt + *) + +program example; + +{$DEFINE TEST_COMPRESS} +{DO NOT $DEFINE TEST_GZIO} +{$DEFINE TEST_DEFLATE} +{$DEFINE TEST_INFLATE} +{$DEFINE TEST_FLUSH} +{$DEFINE TEST_SYNC} +{$DEFINE TEST_DICT} + +uses SysUtils, zlibpas; + +const TESTFILE = 'foo.gz'; + +(* "hello world" would be more standard, but the repeated "hello" + * stresses the compression code better, sorry... + *) +const hello: PChar = 'hello, hello!'; + +const dictionary: PChar = 'hello'; + +var dictId: LongInt; (* Adler32 value of the dictionary *) + +procedure CHECK_ERR(err: Integer; msg: String); +begin + if err <> Z_OK then + begin + WriteLn(msg, ' error: ', err); + Halt(1); + end; +end; + +procedure EXIT_ERR(const msg: String); +begin + WriteLn('Error: ', msg); + Halt(1); +end; + +(* =========================================================================== + * Test compress and uncompress + *) +{$IFDEF TEST_COMPRESS} +procedure test_compress(compr: Pointer; comprLen: LongInt; + uncompr: Pointer; uncomprLen: LongInt); +var err: Integer; + len: LongInt; +begin + len := StrLen(hello)+1; + + err := compress(compr, comprLen, hello, len); + CHECK_ERR(err, 'compress'); + + StrCopy(PChar(uncompr), 'garbage'); + + err := uncompress(uncompr, uncomprLen, compr, comprLen); + CHECK_ERR(err, 'uncompress'); + + if StrComp(PChar(uncompr), hello) <> 0 then + EXIT_ERR('bad uncompress') + else + WriteLn('uncompress(): ', PChar(uncompr)); +end; +{$ENDIF} + +(* =========================================================================== + * Test read/write of .gz files + *) +{$IFDEF TEST_GZIO} +procedure test_gzio(const fname: PChar; (* compressed file name *) + uncompr: Pointer; + uncomprLen: LongInt); +var err: Integer; + len: Integer; + zfile: gzFile; + pos: LongInt; +begin + len := StrLen(hello)+1; + + zfile := gzopen(fname, 'wb'); + if zfile = NIL then + begin + WriteLn('gzopen error'); + Halt(1); + end; + gzputc(zfile, 'h'); + if gzputs(zfile, 'ello') <> 4 then + begin + WriteLn('gzputs err: ', gzerror(zfile, err)); + Halt(1); + end; + {$IFDEF GZ_FORMAT_STRING} + if gzprintf(zfile, ', %s!', 'hello') <> 8 then + begin + WriteLn('gzprintf err: ', gzerror(zfile, err)); + Halt(1); + end; + {$ELSE} + if gzputs(zfile, ', hello!') <> 8 then + begin + WriteLn('gzputs err: ', gzerror(zfile, err)); + Halt(1); + end; + {$ENDIF} + gzseek(zfile, 1, SEEK_CUR); (* add one zero byte *) + gzclose(zfile); + + zfile := gzopen(fname, 'rb'); + if zfile = NIL then + begin + WriteLn('gzopen error'); + Halt(1); + end; + + StrCopy(PChar(uncompr), 'garbage'); + + if gzread(zfile, uncompr, uncomprLen) <> len then + begin + WriteLn('gzread err: ', gzerror(zfile, err)); + Halt(1); + end; + if StrComp(PChar(uncompr), hello) <> 0 then + begin + WriteLn('bad gzread: ', PChar(uncompr)); + Halt(1); + end + else + WriteLn('gzread(): ', PChar(uncompr)); + + pos := gzseek(zfile, -8, SEEK_CUR); + if (pos <> 6) or (gztell(zfile) <> pos) then + begin + WriteLn('gzseek error, pos=', pos, ', gztell=', gztell(zfile)); + Halt(1); + end; + + if gzgetc(zfile) <> ' ' then + begin + WriteLn('gzgetc error'); + Halt(1); + end; + + if gzungetc(' ', zfile) <> ' ' then + begin + WriteLn('gzungetc error'); + Halt(1); + end; + + gzgets(zfile, PChar(uncompr), uncomprLen); + uncomprLen := StrLen(PChar(uncompr)); + if uncomprLen <> 7 then (* " hello!" *) + begin + WriteLn('gzgets err after gzseek: ', gzerror(zfile, err)); + Halt(1); + end; + if StrComp(PChar(uncompr), hello + 6) <> 0 then + begin + WriteLn('bad gzgets after gzseek'); + Halt(1); + end + else + WriteLn('gzgets() after gzseek: ', PChar(uncompr)); + + gzclose(zfile); +end; +{$ENDIF} + +(* =========================================================================== + * Test deflate with small buffers + *) +{$IFDEF TEST_DEFLATE} +procedure test_deflate(compr: Pointer; comprLen: LongInt); +var c_stream: z_stream; (* compression stream *) + err: Integer; + len: LongInt; +begin + len := StrLen(hello)+1; + + c_stream.zalloc := NIL; + c_stream.zfree := NIL; + c_stream.opaque := NIL; + + err := deflateInit(c_stream, Z_DEFAULT_COMPRESSION); + CHECK_ERR(err, 'deflateInit'); + + c_stream.next_in := hello; + c_stream.next_out := compr; + + while (c_stream.total_in <> len) and + (c_stream.total_out < comprLen) do + begin + c_stream.avail_out := 1; { force small buffers } + c_stream.avail_in := 1; + err := deflate(c_stream, Z_NO_FLUSH); + CHECK_ERR(err, 'deflate'); + end; + + (* Finish the stream, still forcing small buffers: *) + while TRUE do + begin + c_stream.avail_out := 1; + err := deflate(c_stream, Z_FINISH); + if err = Z_STREAM_END then + break; + CHECK_ERR(err, 'deflate'); + end; + + err := deflateEnd(c_stream); + CHECK_ERR(err, 'deflateEnd'); +end; +{$ENDIF} + +(* =========================================================================== + * Test inflate with small buffers + *) +{$IFDEF TEST_INFLATE} +procedure test_inflate(compr: Pointer; comprLen : LongInt; + uncompr: Pointer; uncomprLen : LongInt); +var err: Integer; + d_stream: z_stream; (* decompression stream *) +begin + StrCopy(PChar(uncompr), 'garbage'); + + d_stream.zalloc := NIL; + d_stream.zfree := NIL; + d_stream.opaque := NIL; + + d_stream.next_in := compr; + d_stream.avail_in := 0; + d_stream.next_out := uncompr; + + err := inflateInit(d_stream); + CHECK_ERR(err, 'inflateInit'); + + while (d_stream.total_out < uncomprLen) and + (d_stream.total_in < comprLen) do + begin + d_stream.avail_out := 1; (* force small buffers *) + d_stream.avail_in := 1; + err := inflate(d_stream, Z_NO_FLUSH); + if err = Z_STREAM_END then + break; + CHECK_ERR(err, 'inflate'); + end; + + err := inflateEnd(d_stream); + CHECK_ERR(err, 'inflateEnd'); + + if StrComp(PChar(uncompr), hello) <> 0 then + EXIT_ERR('bad inflate') + else + WriteLn('inflate(): ', PChar(uncompr)); +end; +{$ENDIF} + +(* =========================================================================== + * Test deflate with large buffers and dynamic change of compression level + *) +{$IFDEF TEST_DEFLATE} +procedure test_large_deflate(compr: Pointer; comprLen: LongInt; + uncompr: Pointer; uncomprLen: LongInt); +var c_stream: z_stream; (* compression stream *) + err: Integer; +begin + c_stream.zalloc := NIL; + c_stream.zfree := NIL; + c_stream.opaque := NIL; + + err := deflateInit(c_stream, Z_BEST_SPEED); + CHECK_ERR(err, 'deflateInit'); + + c_stream.next_out := compr; + c_stream.avail_out := Integer(comprLen); + + (* At this point, uncompr is still mostly zeroes, so it should compress + * very well: + *) + c_stream.next_in := uncompr; + c_stream.avail_in := Integer(uncomprLen); + err := deflate(c_stream, Z_NO_FLUSH); + CHECK_ERR(err, 'deflate'); + if c_stream.avail_in <> 0 then + EXIT_ERR('deflate not greedy'); + + (* Feed in already compressed data and switch to no compression: *) + deflateParams(c_stream, Z_NO_COMPRESSION, Z_DEFAULT_STRATEGY); + c_stream.next_in := compr; + c_stream.avail_in := Integer(comprLen div 2); + err := deflate(c_stream, Z_NO_FLUSH); + CHECK_ERR(err, 'deflate'); + + (* Switch back to compressing mode: *) + deflateParams(c_stream, Z_BEST_COMPRESSION, Z_FILTERED); + c_stream.next_in := uncompr; + c_stream.avail_in := Integer(uncomprLen); + err := deflate(c_stream, Z_NO_FLUSH); + CHECK_ERR(err, 'deflate'); + + err := deflate(c_stream, Z_FINISH); + if err <> Z_STREAM_END then + EXIT_ERR('deflate should report Z_STREAM_END'); + + err := deflateEnd(c_stream); + CHECK_ERR(err, 'deflateEnd'); +end; +{$ENDIF} + +(* =========================================================================== + * Test inflate with large buffers + *) +{$IFDEF TEST_INFLATE} +procedure test_large_inflate(compr: Pointer; comprLen: LongInt; + uncompr: Pointer; uncomprLen: LongInt); +var err: Integer; + d_stream: z_stream; (* decompression stream *) +begin + StrCopy(PChar(uncompr), 'garbage'); + + d_stream.zalloc := NIL; + d_stream.zfree := NIL; + d_stream.opaque := NIL; + + d_stream.next_in := compr; + d_stream.avail_in := Integer(comprLen); + + err := inflateInit(d_stream); + CHECK_ERR(err, 'inflateInit'); + + while TRUE do + begin + d_stream.next_out := uncompr; (* discard the output *) + d_stream.avail_out := Integer(uncomprLen); + err := inflate(d_stream, Z_NO_FLUSH); + if err = Z_STREAM_END then + break; + CHECK_ERR(err, 'large inflate'); + end; + + err := inflateEnd(d_stream); + CHECK_ERR(err, 'inflateEnd'); + + if d_stream.total_out <> 2 * uncomprLen + comprLen div 2 then + begin + WriteLn('bad large inflate: ', d_stream.total_out); + Halt(1); + end + else + WriteLn('large_inflate(): OK'); +end; +{$ENDIF} + +(* =========================================================================== + * Test deflate with full flush + *) +{$IFDEF TEST_FLUSH} +procedure test_flush(compr: Pointer; var comprLen : LongInt); +var c_stream: z_stream; (* compression stream *) + err: Integer; + len: Integer; +begin + len := StrLen(hello)+1; + + c_stream.zalloc := NIL; + c_stream.zfree := NIL; + c_stream.opaque := NIL; + + err := deflateInit(c_stream, Z_DEFAULT_COMPRESSION); + CHECK_ERR(err, 'deflateInit'); + + c_stream.next_in := hello; + c_stream.next_out := compr; + c_stream.avail_in := 3; + c_stream.avail_out := Integer(comprLen); + err := deflate(c_stream, Z_FULL_FLUSH); + CHECK_ERR(err, 'deflate'); + + Inc(PByteArray(compr)^[3]); (* force an error in first compressed block *) + c_stream.avail_in := len - 3; + + err := deflate(c_stream, Z_FINISH); + if err <> Z_STREAM_END then + CHECK_ERR(err, 'deflate'); + + err := deflateEnd(c_stream); + CHECK_ERR(err, 'deflateEnd'); + + comprLen := c_stream.total_out; +end; +{$ENDIF} + +(* =========================================================================== + * Test inflateSync() + *) +{$IFDEF TEST_SYNC} +procedure test_sync(compr: Pointer; comprLen: LongInt; + uncompr: Pointer; uncomprLen : LongInt); +var err: Integer; + d_stream: z_stream; (* decompression stream *) +begin + StrCopy(PChar(uncompr), 'garbage'); + + d_stream.zalloc := NIL; + d_stream.zfree := NIL; + d_stream.opaque := NIL; + + d_stream.next_in := compr; + d_stream.avail_in := 2; (* just read the zlib header *) + + err := inflateInit(d_stream); + CHECK_ERR(err, 'inflateInit'); + + d_stream.next_out := uncompr; + d_stream.avail_out := Integer(uncomprLen); + + inflate(d_stream, Z_NO_FLUSH); + CHECK_ERR(err, 'inflate'); + + d_stream.avail_in := Integer(comprLen-2); (* read all compressed data *) + err := inflateSync(d_stream); (* but skip the damaged part *) + CHECK_ERR(err, 'inflateSync'); + + err := inflate(d_stream, Z_FINISH); + if err <> Z_DATA_ERROR then + EXIT_ERR('inflate should report DATA_ERROR'); + (* Because of incorrect adler32 *) + + err := inflateEnd(d_stream); + CHECK_ERR(err, 'inflateEnd'); + + WriteLn('after inflateSync(): hel', PChar(uncompr)); +end; +{$ENDIF} + +(* =========================================================================== + * Test deflate with preset dictionary + *) +{$IFDEF TEST_DICT} +procedure test_dict_deflate(compr: Pointer; comprLen: LongInt); +var c_stream: z_stream; (* compression stream *) + err: Integer; +begin + c_stream.zalloc := NIL; + c_stream.zfree := NIL; + c_stream.opaque := NIL; + + err := deflateInit(c_stream, Z_BEST_COMPRESSION); + CHECK_ERR(err, 'deflateInit'); + + err := deflateSetDictionary(c_stream, dictionary, StrLen(dictionary)); + CHECK_ERR(err, 'deflateSetDictionary'); + + dictId := c_stream.adler; + c_stream.next_out := compr; + c_stream.avail_out := Integer(comprLen); + + c_stream.next_in := hello; + c_stream.avail_in := StrLen(hello)+1; + + err := deflate(c_stream, Z_FINISH); + if err <> Z_STREAM_END then + EXIT_ERR('deflate should report Z_STREAM_END'); + + err := deflateEnd(c_stream); + CHECK_ERR(err, 'deflateEnd'); +end; +{$ENDIF} + +(* =========================================================================== + * Test inflate with a preset dictionary + *) +{$IFDEF TEST_DICT} +procedure test_dict_inflate(compr: Pointer; comprLen: LongInt; + uncompr: Pointer; uncomprLen: LongInt); +var err: Integer; + d_stream: z_stream; (* decompression stream *) +begin + StrCopy(PChar(uncompr), 'garbage'); + + d_stream.zalloc := NIL; + d_stream.zfree := NIL; + d_stream.opaque := NIL; + + d_stream.next_in := compr; + d_stream.avail_in := Integer(comprLen); + + err := inflateInit(d_stream); + CHECK_ERR(err, 'inflateInit'); + + d_stream.next_out := uncompr; + d_stream.avail_out := Integer(uncomprLen); + + while TRUE do + begin + err := inflate(d_stream, Z_NO_FLUSH); + if err = Z_STREAM_END then + break; + if err = Z_NEED_DICT then + begin + if d_stream.adler <> dictId then + EXIT_ERR('unexpected dictionary'); + err := inflateSetDictionary(d_stream, dictionary, StrLen(dictionary)); + end; + CHECK_ERR(err, 'inflate with dict'); + end; + + err := inflateEnd(d_stream); + CHECK_ERR(err, 'inflateEnd'); + + if StrComp(PChar(uncompr), hello) <> 0 then + EXIT_ERR('bad inflate with dict') + else + WriteLn('inflate with dictionary: ', PChar(uncompr)); +end; +{$ENDIF} + +var compr, uncompr: Pointer; + comprLen, uncomprLen: LongInt; + +begin + if zlibVersion^ <> ZLIB_VERSION[1] then + EXIT_ERR('Incompatible zlib version'); + + WriteLn('zlib version: ', zlibVersion); + WriteLn('zlib compile flags: ', Format('0x%x', [zlibCompileFlags])); + + comprLen := 10000 * SizeOf(Integer); (* don't overflow on MSDOS *) + uncomprLen := comprLen; + GetMem(compr, comprLen); + GetMem(uncompr, uncomprLen); + if (compr = NIL) or (uncompr = NIL) then + EXIT_ERR('Out of memory'); + (* compr and uncompr are cleared to avoid reading uninitialized + * data and to ensure that uncompr compresses well. + *) + FillChar(compr^, comprLen, 0); + FillChar(uncompr^, uncomprLen, 0); + + {$IFDEF TEST_COMPRESS} + WriteLn('** Testing compress'); + test_compress(compr, comprLen, uncompr, uncomprLen); + {$ENDIF} + + {$IFDEF TEST_GZIO} + WriteLn('** Testing gzio'); + if ParamCount >= 1 then + test_gzio(ParamStr(1), uncompr, uncomprLen) + else + test_gzio(TESTFILE, uncompr, uncomprLen); + {$ENDIF} + + {$IFDEF TEST_DEFLATE} + WriteLn('** Testing deflate with small buffers'); + test_deflate(compr, comprLen); + {$ENDIF} + {$IFDEF TEST_INFLATE} + WriteLn('** Testing inflate with small buffers'); + test_inflate(compr, comprLen, uncompr, uncomprLen); + {$ENDIF} + + {$IFDEF TEST_DEFLATE} + WriteLn('** Testing deflate with large buffers'); + test_large_deflate(compr, comprLen, uncompr, uncomprLen); + {$ENDIF} + {$IFDEF TEST_INFLATE} + WriteLn('** Testing inflate with large buffers'); + test_large_inflate(compr, comprLen, uncompr, uncomprLen); + {$ENDIF} + + {$IFDEF TEST_FLUSH} + WriteLn('** Testing deflate with full flush'); + test_flush(compr, comprLen); + {$ENDIF} + {$IFDEF TEST_SYNC} + WriteLn('** Testing inflateSync'); + test_sync(compr, comprLen, uncompr, uncomprLen); + {$ENDIF} + comprLen := uncomprLen; + + {$IFDEF TEST_DICT} + WriteLn('** Testing deflate and inflate with preset dictionary'); + test_dict_deflate(compr, comprLen); + test_dict_inflate(compr, comprLen, uncompr, uncomprLen); + {$ENDIF} + + FreeMem(compr, comprLen); + FreeMem(uncompr, uncomprLen); +end. diff -Nru nodejs-0.11.13/deps/zlib/contrib/pascal/readme.txt nodejs-0.11.15/deps/zlib/contrib/pascal/readme.txt --- nodejs-0.11.13/deps/zlib/contrib/pascal/readme.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/pascal/readme.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,76 @@ + +This directory contains a Pascal (Delphi, Kylix) interface to the +zlib data compression library. + + +Directory listing +================= + +zlibd32.mak makefile for Borland C++ +example.pas usage example of zlib +zlibpas.pas the Pascal interface to zlib +readme.txt this file + + +Compatibility notes +=================== + +- Although the name "zlib" would have been more normal for the + zlibpas unit, this name is already taken by Borland's ZLib unit. + This is somehow unfortunate, because that unit is not a genuine + interface to the full-fledged zlib functionality, but a suite of + class wrappers around zlib streams. Other essential features, + such as checksums, are missing. + It would have been more appropriate for that unit to have a name + like "ZStreams", or something similar. + +- The C and zlib-supplied types int, uInt, long, uLong, etc. are + translated directly into Pascal types of similar sizes (Integer, + LongInt, etc.), to avoid namespace pollution. In particular, + there is no conversion of unsigned int into a Pascal unsigned + integer. The Word type is non-portable and has the same size + (16 bits) both in a 16-bit and in a 32-bit environment, unlike + Integer. Even if there is a 32-bit Cardinal type, there is no + real need for unsigned int in zlib under a 32-bit environment. + +- Except for the callbacks, the zlib function interfaces are + assuming the calling convention normally used in Pascal + (__pascal for DOS and Windows16, __fastcall for Windows32). + Since the cdecl keyword is used, the old Turbo Pascal does + not work with this interface. + +- The gz* function interfaces are not translated, to avoid + interfacing problems with the C runtime library. Besides, + gzprintf(gzFile file, const char *format, ...) + cannot be translated into Pascal. + + +Legal issues +============ + +The zlibpas interface is: + Copyright (C) 1995-2003 Jean-loup Gailly and Mark Adler. + Copyright (C) 1998 by Bob Dellaca. + Copyright (C) 2003 by Cosmin Truta. + +The example program is: + Copyright (C) 1995-2003 by Jean-loup Gailly. + Copyright (C) 1998,1999,2000 by Jacques Nomssi Nzali. + Copyright (C) 2003 by Cosmin Truta. + + This software is provided 'as-is', without any express or implied + warranty. In no event will the author be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + diff -Nru nodejs-0.11.13/deps/zlib/contrib/pascal/zlibd32.mak nodejs-0.11.15/deps/zlib/contrib/pascal/zlibd32.mak --- nodejs-0.11.13/deps/zlib/contrib/pascal/zlibd32.mak 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/pascal/zlibd32.mak 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,99 @@ +# Makefile for zlib +# For use with Delphi and C++ Builder under Win32 +# Updated for zlib 1.2.x by Cosmin Truta + +# ------------ Borland C++ ------------ + +# This project uses the Delphi (fastcall/register) calling convention: +LOC = -DZEXPORT=__fastcall -DZEXPORTVA=__cdecl + +CC = bcc32 +LD = bcc32 +AR = tlib +# do not use "-pr" in CFLAGS +CFLAGS = -a -d -k- -O2 $(LOC) +LDFLAGS = + + +# variables +ZLIB_LIB = zlib.lib + +OBJ1 = adler32.obj compress.obj crc32.obj deflate.obj gzclose.obj gzlib.obj gzread.obj +OBJ2 = gzwrite.obj infback.obj inffast.obj inflate.obj inftrees.obj trees.obj uncompr.obj zutil.obj +OBJP1 = +adler32.obj+compress.obj+crc32.obj+deflate.obj+gzclose.obj+gzlib.obj+gzread.obj +OBJP2 = +gzwrite.obj+infback.obj+inffast.obj+inflate.obj+inftrees.obj+trees.obj+uncompr.obj+zutil.obj + + +# targets +all: $(ZLIB_LIB) example.exe minigzip.exe + +.c.obj: + $(CC) -c $(CFLAGS) $*.c + +adler32.obj: adler32.c zlib.h zconf.h + +compress.obj: compress.c zlib.h zconf.h + +crc32.obj: crc32.c zlib.h zconf.h crc32.h + +deflate.obj: deflate.c deflate.h zutil.h zlib.h zconf.h + +gzclose.obj: gzclose.c zlib.h zconf.h gzguts.h + +gzlib.obj: gzlib.c zlib.h zconf.h gzguts.h + +gzread.obj: gzread.c zlib.h zconf.h gzguts.h + +gzwrite.obj: gzwrite.c zlib.h zconf.h gzguts.h + +infback.obj: infback.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inffast.obj: inffast.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h + +inflate.obj: inflate.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inftrees.obj: inftrees.c zutil.h zlib.h zconf.h inftrees.h + +trees.obj: trees.c zutil.h zlib.h zconf.h deflate.h trees.h + +uncompr.obj: uncompr.c zlib.h zconf.h + +zutil.obj: zutil.c zutil.h zlib.h zconf.h + +example.obj: test/example.c zlib.h zconf.h + +minigzip.obj: test/minigzip.c zlib.h zconf.h + + +# For the sake of the old Borland make, +# the command line is cut to fit in the MS-DOS 128 byte limit: +$(ZLIB_LIB): $(OBJ1) $(OBJ2) + -del $(ZLIB_LIB) + $(AR) $(ZLIB_LIB) $(OBJP1) + $(AR) $(ZLIB_LIB) $(OBJP2) + + +# testing +test: example.exe minigzip.exe + example + echo hello world | minigzip | minigzip -d + +example.exe: example.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) example.obj $(ZLIB_LIB) + +minigzip.exe: minigzip.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) minigzip.obj $(ZLIB_LIB) + + +# cleanup +clean: + -del *.obj + -del *.exe + -del *.lib + -del *.tds + -del zlib.bak + -del foo.gz + diff -Nru nodejs-0.11.13/deps/zlib/contrib/pascal/zlibpas.pas nodejs-0.11.15/deps/zlib/contrib/pascal/zlibpas.pas --- nodejs-0.11.13/deps/zlib/contrib/pascal/zlibpas.pas 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/pascal/zlibpas.pas 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,276 @@ +(* zlibpas -- Pascal interface to the zlib data compression library + * + * Copyright (C) 2003 Cosmin Truta. + * Derived from original sources by Bob Dellaca. + * For conditions of distribution and use, see copyright notice in readme.txt + *) + +unit zlibpas; + +interface + +const + ZLIB_VERSION = '1.2.8'; + ZLIB_VERNUM = $1280; + +type + alloc_func = function(opaque: Pointer; items, size: Integer): Pointer; + cdecl; + free_func = procedure(opaque, address: Pointer); + cdecl; + + in_func = function(opaque: Pointer; var buf: PByte): Integer; + cdecl; + out_func = function(opaque: Pointer; buf: PByte; size: Integer): Integer; + cdecl; + + z_streamp = ^z_stream; + z_stream = packed record + next_in: PChar; (* next input byte *) + avail_in: Integer; (* number of bytes available at next_in *) + total_in: LongInt; (* total nb of input bytes read so far *) + + next_out: PChar; (* next output byte should be put there *) + avail_out: Integer; (* remaining free space at next_out *) + total_out: LongInt; (* total nb of bytes output so far *) + + msg: PChar; (* last error message, NULL if no error *) + state: Pointer; (* not visible by applications *) + + zalloc: alloc_func; (* used to allocate the internal state *) + zfree: free_func; (* used to free the internal state *) + opaque: Pointer; (* private data object passed to zalloc and zfree *) + + data_type: Integer; (* best guess about the data type: ascii or binary *) + adler: LongInt; (* adler32 value of the uncompressed data *) + reserved: LongInt; (* reserved for future use *) + end; + + gz_headerp = ^gz_header; + gz_header = packed record + text: Integer; (* true if compressed data believed to be text *) + time: LongInt; (* modification time *) + xflags: Integer; (* extra flags (not used when writing a gzip file) *) + os: Integer; (* operating system *) + extra: PChar; (* pointer to extra field or Z_NULL if none *) + extra_len: Integer; (* extra field length (valid if extra != Z_NULL) *) + extra_max: Integer; (* space at extra (only when reading header) *) + name: PChar; (* pointer to zero-terminated file name or Z_NULL *) + name_max: Integer; (* space at name (only when reading header) *) + comment: PChar; (* pointer to zero-terminated comment or Z_NULL *) + comm_max: Integer; (* space at comment (only when reading header) *) + hcrc: Integer; (* true if there was or will be a header crc *) + done: Integer; (* true when done reading gzip header *) + end; + +(* constants *) +const + Z_NO_FLUSH = 0; + Z_PARTIAL_FLUSH = 1; + Z_SYNC_FLUSH = 2; + Z_FULL_FLUSH = 3; + Z_FINISH = 4; + Z_BLOCK = 5; + Z_TREES = 6; + + Z_OK = 0; + Z_STREAM_END = 1; + Z_NEED_DICT = 2; + Z_ERRNO = -1; + Z_STREAM_ERROR = -2; + Z_DATA_ERROR = -3; + Z_MEM_ERROR = -4; + Z_BUF_ERROR = -5; + Z_VERSION_ERROR = -6; + + Z_NO_COMPRESSION = 0; + Z_BEST_SPEED = 1; + Z_BEST_COMPRESSION = 9; + Z_DEFAULT_COMPRESSION = -1; + + Z_FILTERED = 1; + Z_HUFFMAN_ONLY = 2; + Z_RLE = 3; + Z_FIXED = 4; + Z_DEFAULT_STRATEGY = 0; + + Z_BINARY = 0; + Z_TEXT = 1; + Z_ASCII = 1; + Z_UNKNOWN = 2; + + Z_DEFLATED = 8; + +(* basic functions *) +function zlibVersion: PChar; +function deflateInit(var strm: z_stream; level: Integer): Integer; +function deflate(var strm: z_stream; flush: Integer): Integer; +function deflateEnd(var strm: z_stream): Integer; +function inflateInit(var strm: z_stream): Integer; +function inflate(var strm: z_stream; flush: Integer): Integer; +function inflateEnd(var strm: z_stream): Integer; + +(* advanced functions *) +function deflateInit2(var strm: z_stream; level, method, windowBits, + memLevel, strategy: Integer): Integer; +function deflateSetDictionary(var strm: z_stream; const dictionary: PChar; + dictLength: Integer): Integer; +function deflateCopy(var dest, source: z_stream): Integer; +function deflateReset(var strm: z_stream): Integer; +function deflateParams(var strm: z_stream; level, strategy: Integer): Integer; +function deflateTune(var strm: z_stream; good_length, max_lazy, nice_length, max_chain: Integer): Integer; +function deflateBound(var strm: z_stream; sourceLen: LongInt): LongInt; +function deflatePending(var strm: z_stream; var pending: Integer; var bits: Integer): Integer; +function deflatePrime(var strm: z_stream; bits, value: Integer): Integer; +function deflateSetHeader(var strm: z_stream; head: gz_header): Integer; +function inflateInit2(var strm: z_stream; windowBits: Integer): Integer; +function inflateSetDictionary(var strm: z_stream; const dictionary: PChar; + dictLength: Integer): Integer; +function inflateSync(var strm: z_stream): Integer; +function inflateCopy(var dest, source: z_stream): Integer; +function inflateReset(var strm: z_stream): Integer; +function inflateReset2(var strm: z_stream; windowBits: Integer): Integer; +function inflatePrime(var strm: z_stream; bits, value: Integer): Integer; +function inflateMark(var strm: z_stream): LongInt; +function inflateGetHeader(var strm: z_stream; var head: gz_header): Integer; +function inflateBackInit(var strm: z_stream; + windowBits: Integer; window: PChar): Integer; +function inflateBack(var strm: z_stream; in_fn: in_func; in_desc: Pointer; + out_fn: out_func; out_desc: Pointer): Integer; +function inflateBackEnd(var strm: z_stream): Integer; +function zlibCompileFlags: LongInt; + +(* utility functions *) +function compress(dest: PChar; var destLen: LongInt; + const source: PChar; sourceLen: LongInt): Integer; +function compress2(dest: PChar; var destLen: LongInt; + const source: PChar; sourceLen: LongInt; + level: Integer): Integer; +function compressBound(sourceLen: LongInt): LongInt; +function uncompress(dest: PChar; var destLen: LongInt; + const source: PChar; sourceLen: LongInt): Integer; + +(* checksum functions *) +function adler32(adler: LongInt; const buf: PChar; len: Integer): LongInt; +function adler32_combine(adler1, adler2, len2: LongInt): LongInt; +function crc32(crc: LongInt; const buf: PChar; len: Integer): LongInt; +function crc32_combine(crc1, crc2, len2: LongInt): LongInt; + +(* various hacks, don't look :) *) +function deflateInit_(var strm: z_stream; level: Integer; + const version: PChar; stream_size: Integer): Integer; +function inflateInit_(var strm: z_stream; const version: PChar; + stream_size: Integer): Integer; +function deflateInit2_(var strm: z_stream; + level, method, windowBits, memLevel, strategy: Integer; + const version: PChar; stream_size: Integer): Integer; +function inflateInit2_(var strm: z_stream; windowBits: Integer; + const version: PChar; stream_size: Integer): Integer; +function inflateBackInit_(var strm: z_stream; + windowBits: Integer; window: PChar; + const version: PChar; stream_size: Integer): Integer; + + +implementation + +{$L adler32.obj} +{$L compress.obj} +{$L crc32.obj} +{$L deflate.obj} +{$L infback.obj} +{$L inffast.obj} +{$L inflate.obj} +{$L inftrees.obj} +{$L trees.obj} +{$L uncompr.obj} +{$L zutil.obj} + +function adler32; external; +function adler32_combine; external; +function compress; external; +function compress2; external; +function compressBound; external; +function crc32; external; +function crc32_combine; external; +function deflate; external; +function deflateBound; external; +function deflateCopy; external; +function deflateEnd; external; +function deflateInit_; external; +function deflateInit2_; external; +function deflateParams; external; +function deflatePending; external; +function deflatePrime; external; +function deflateReset; external; +function deflateSetDictionary; external; +function deflateSetHeader; external; +function deflateTune; external; +function inflate; external; +function inflateBack; external; +function inflateBackEnd; external; +function inflateBackInit_; external; +function inflateCopy; external; +function inflateEnd; external; +function inflateGetHeader; external; +function inflateInit_; external; +function inflateInit2_; external; +function inflateMark; external; +function inflatePrime; external; +function inflateReset; external; +function inflateReset2; external; +function inflateSetDictionary; external; +function inflateSync; external; +function uncompress; external; +function zlibCompileFlags; external; +function zlibVersion; external; + +function deflateInit(var strm: z_stream; level: Integer): Integer; +begin + Result := deflateInit_(strm, level, ZLIB_VERSION, sizeof(z_stream)); +end; + +function deflateInit2(var strm: z_stream; level, method, windowBits, memLevel, + strategy: Integer): Integer; +begin + Result := deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + ZLIB_VERSION, sizeof(z_stream)); +end; + +function inflateInit(var strm: z_stream): Integer; +begin + Result := inflateInit_(strm, ZLIB_VERSION, sizeof(z_stream)); +end; + +function inflateInit2(var strm: z_stream; windowBits: Integer): Integer; +begin + Result := inflateInit2_(strm, windowBits, ZLIB_VERSION, sizeof(z_stream)); +end; + +function inflateBackInit(var strm: z_stream; + windowBits: Integer; window: PChar): Integer; +begin + Result := inflateBackInit_(strm, windowBits, window, + ZLIB_VERSION, sizeof(z_stream)); +end; + +function _malloc(Size: Integer): Pointer; cdecl; +begin + GetMem(Result, Size); +end; + +procedure _free(Block: Pointer); cdecl; +begin + FreeMem(Block); +end; + +procedure _memset(P: Pointer; B: Byte; count: Integer); cdecl; +begin + FillChar(P^, count, B); +end; + +procedure _memcpy(dest, source: Pointer; count: Integer); cdecl; +begin + Move(source^, dest^, count); +end; + +end. diff -Nru nodejs-0.11.13/deps/zlib/contrib/puff/Makefile nodejs-0.11.15/deps/zlib/contrib/puff/Makefile --- nodejs-0.11.13/deps/zlib/contrib/puff/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/puff/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,42 @@ +CFLAGS=-O + +puff: puff.o pufftest.o + +puff.o: puff.h + +pufftest.o: puff.h + +test: puff + puff zeros.raw + +puft: puff.c puff.h pufftest.o + cc -fprofile-arcs -ftest-coverage -o puft puff.c pufftest.o + +# puff full coverage test (should say 100%) +cov: puft + @rm -f *.gcov *.gcda + @puft -w zeros.raw 2>&1 | cat > /dev/null + @echo '04' | xxd -r -p | puft 2> /dev/null || test $$? -eq 2 + @echo '00' | xxd -r -p | puft 2> /dev/null || test $$? -eq 2 + @echo '00 00 00 00 00' | xxd -r -p | puft 2> /dev/null || test $$? -eq 254 + @echo '00 01 00 fe ff' | xxd -r -p | puft 2> /dev/null || test $$? -eq 2 + @echo '01 01 00 fe ff 0a' | xxd -r -p | puft -f 2>&1 | cat > /dev/null + @echo '02 7e ff ff' | xxd -r -p | puft 2> /dev/null || test $$? -eq 246 + @echo '02' | xxd -r -p | puft 2> /dev/null || test $$? -eq 2 + @echo '04 80 49 92 24 49 92 24 0f b4 ff ff c3 04' | xxd -r -p | puft 2> /dev/null || test $$? -eq 2 + @echo '04 80 49 92 24 49 92 24 71 ff ff 93 11 00' | xxd -r -p | puft 2> /dev/null || test $$? -eq 249 + @echo '04 c0 81 08 00 00 00 00 20 7f eb 0b 00 00' | xxd -r -p | puft 2> /dev/null || test $$? -eq 246 + @echo '0b 00 00' | xxd -r -p | puft -f 2>&1 | cat > /dev/null + @echo '1a 07' | xxd -r -p | puft 2> /dev/null || test $$? -eq 246 + @echo '0c c0 81 00 00 00 00 00 90 ff 6b 04' | xxd -r -p | puft 2> /dev/null || test $$? -eq 245 + @puft -f zeros.raw 2>&1 | cat > /dev/null + @echo 'fc 00 00' | xxd -r -p | puft 2> /dev/null || test $$? -eq 253 + @echo '04 00 fe ff' | xxd -r -p | puft 2> /dev/null || test $$? -eq 252 + @echo '04 00 24 49' | xxd -r -p | puft 2> /dev/null || test $$? -eq 251 + @echo '04 80 49 92 24 49 92 24 0f b4 ff ff c3 84' | xxd -r -p | puft 2> /dev/null || test $$? -eq 248 + @echo '04 00 24 e9 ff ff' | xxd -r -p | puft 2> /dev/null || test $$? -eq 250 + @echo '04 00 24 e9 ff 6d' | xxd -r -p | puft 2> /dev/null || test $$? -eq 247 + @gcov -n puff.c + +clean: + rm -f puff puft *.o *.gc* diff -Nru nodejs-0.11.13/deps/zlib/contrib/puff/puff.c nodejs-0.11.15/deps/zlib/contrib/puff/puff.c --- nodejs-0.11.13/deps/zlib/contrib/puff/puff.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/puff/puff.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,840 @@ +/* + * puff.c + * Copyright (C) 2002-2013 Mark Adler + * For conditions of distribution and use, see copyright notice in puff.h + * version 2.3, 21 Jan 2013 + * + * puff.c is a simple inflate written to be an unambiguous way to specify the + * deflate format. It is not written for speed but rather simplicity. As a + * side benefit, this code might actually be useful when small code is more + * important than speed, such as bootstrap applications. For typical deflate + * data, zlib's inflate() is about four times as fast as puff(). zlib's + * inflate compiles to around 20K on my machine, whereas puff.c compiles to + * around 4K on my machine (a PowerPC using GNU cc). If the faster decode() + * function here is used, then puff() is only twice as slow as zlib's + * inflate(). + * + * All dynamically allocated memory comes from the stack. The stack required + * is less than 2K bytes. This code is compatible with 16-bit int's and + * assumes that long's are at least 32 bits. puff.c uses the short data type, + * assumed to be 16 bits, for arrays in order to to conserve memory. The code + * works whether integers are stored big endian or little endian. + * + * In the comments below are "Format notes" that describe the inflate process + * and document some of the less obvious aspects of the format. This source + * code is meant to supplement RFC 1951, which formally describes the deflate + * format: + * + * http://www.zlib.org/rfc-deflate.html + */ + +/* + * Change history: + * + * 1.0 10 Feb 2002 - First version + * 1.1 17 Feb 2002 - Clarifications of some comments and notes + * - Update puff() dest and source pointers on negative + * errors to facilitate debugging deflators + * - Remove longest from struct huffman -- not needed + * - Simplify offs[] index in construct() + * - Add input size and checking, using longjmp() to + * maintain easy readability + * - Use short data type for large arrays + * - Use pointers instead of long to specify source and + * destination sizes to avoid arbitrary 4 GB limits + * 1.2 17 Mar 2002 - Add faster version of decode(), doubles speed (!), + * but leave simple version for readabilty + * - Make sure invalid distances detected if pointers + * are 16 bits + * - Fix fixed codes table error + * - Provide a scanning mode for determining size of + * uncompressed data + * 1.3 20 Mar 2002 - Go back to lengths for puff() parameters [Gailly] + * - Add a puff.h file for the interface + * - Add braces in puff() for else do [Gailly] + * - Use indexes instead of pointers for readability + * 1.4 31 Mar 2002 - Simplify construct() code set check + * - Fix some comments + * - Add FIXLCODES #define + * 1.5 6 Apr 2002 - Minor comment fixes + * 1.6 7 Aug 2002 - Minor format changes + * 1.7 3 Mar 2003 - Added test code for distribution + * - Added zlib-like license + * 1.8 9 Jan 2004 - Added some comments on no distance codes case + * 1.9 21 Feb 2008 - Fix bug on 16-bit integer architectures [Pohland] + * - Catch missing end-of-block symbol error + * 2.0 25 Jul 2008 - Add #define to permit distance too far back + * - Add option in TEST code for puff to write the data + * - Add option in TEST code to skip input bytes + * - Allow TEST code to read from piped stdin + * 2.1 4 Apr 2010 - Avoid variable initialization for happier compilers + * - Avoid unsigned comparisons for even happier compilers + * 2.2 25 Apr 2010 - Fix bug in variable initializations [Oberhumer] + * - Add const where appropriate [Oberhumer] + * - Split if's and ?'s for coverage testing + * - Break out test code to separate file + * - Move NIL to puff.h + * - Allow incomplete code only if single code length is 1 + * - Add full code coverage test to Makefile + * 2.3 21 Jan 2013 - Check for invalid code length codes in dynamic blocks + */ + +#include <setjmp.h> /* for setjmp(), longjmp(), and jmp_buf */ +#include "puff.h" /* prototype for puff() */ + +#define local static /* for local function definitions */ + +/* + * Maximums for allocations and loops. It is not useful to change these -- + * they are fixed by the deflate format. + */ +#define MAXBITS 15 /* maximum bits in a code */ +#define MAXLCODES 286 /* maximum number of literal/length codes */ +#define MAXDCODES 30 /* maximum number of distance codes */ +#define MAXCODES (MAXLCODES+MAXDCODES) /* maximum codes lengths to read */ +#define FIXLCODES 288 /* number of fixed literal/length codes */ + +/* input and output state */ +struct state { + /* output state */ + unsigned char *out; /* output buffer */ + unsigned long outlen; /* available space at out */ + unsigned long outcnt; /* bytes written to out so far */ + + /* input state */ + const unsigned char *in; /* input buffer */ + unsigned long inlen; /* available input at in */ + unsigned long incnt; /* bytes read so far */ + int bitbuf; /* bit buffer */ + int bitcnt; /* number of bits in bit buffer */ + + /* input limit error return state for bits() and decode() */ + jmp_buf env; +}; + +/* + * Return need bits from the input stream. This always leaves less than + * eight bits in the buffer. bits() works properly for need == 0. + * + * Format notes: + * + * - Bits are stored in bytes from the least significant bit to the most + * significant bit. Therefore bits are dropped from the bottom of the bit + * buffer, using shift right, and new bytes are appended to the top of the + * bit buffer, using shift left. + */ +local int bits(struct state *s, int need) +{ + long val; /* bit accumulator (can use up to 20 bits) */ + + /* load at least need bits into val */ + val = s->bitbuf; + while (s->bitcnt < need) { + if (s->incnt == s->inlen) + longjmp(s->env, 1); /* out of input */ + val |= (long)(s->in[s->incnt++]) << s->bitcnt; /* load eight bits */ + s->bitcnt += 8; + } + + /* drop need bits and update buffer, always zero to seven bits left */ + s->bitbuf = (int)(val >> need); + s->bitcnt -= need; + + /* return need bits, zeroing the bits above that */ + return (int)(val & ((1L << need) - 1)); +} + +/* + * Process a stored block. + * + * Format notes: + * + * - After the two-bit stored block type (00), the stored block length and + * stored bytes are byte-aligned for fast copying. Therefore any leftover + * bits in the byte that has the last bit of the type, as many as seven, are + * discarded. The value of the discarded bits are not defined and should not + * be checked against any expectation. + * + * - The second inverted copy of the stored block length does not have to be + * checked, but it's probably a good idea to do so anyway. + * + * - A stored block can have zero length. This is sometimes used to byte-align + * subsets of the compressed data for random access or partial recovery. + */ +local int stored(struct state *s) +{ + unsigned len; /* length of stored block */ + + /* discard leftover bits from current byte (assumes s->bitcnt < 8) */ + s->bitbuf = 0; + s->bitcnt = 0; + + /* get length and check against its one's complement */ + if (s->incnt + 4 > s->inlen) + return 2; /* not enough input */ + len = s->in[s->incnt++]; + len |= s->in[s->incnt++] << 8; + if (s->in[s->incnt++] != (~len & 0xff) || + s->in[s->incnt++] != ((~len >> 8) & 0xff)) + return -2; /* didn't match complement! */ + + /* copy len bytes from in to out */ + if (s->incnt + len > s->inlen) + return 2; /* not enough input */ + if (s->out != NIL) { + if (s->outcnt + len > s->outlen) + return 1; /* not enough output space */ + while (len--) + s->out[s->outcnt++] = s->in[s->incnt++]; + } + else { /* just scanning */ + s->outcnt += len; + s->incnt += len; + } + + /* done with a valid stored block */ + return 0; +} + +/* + * Huffman code decoding tables. count[1..MAXBITS] is the number of symbols of + * each length, which for a canonical code are stepped through in order. + * symbol[] are the symbol values in canonical order, where the number of + * entries is the sum of the counts in count[]. The decoding process can be + * seen in the function decode() below. + */ +struct huffman { + short *count; /* number of symbols of each length */ + short *symbol; /* canonically ordered symbols */ +}; + +/* + * Decode a code from the stream s using huffman table h. Return the symbol or + * a negative value if there is an error. If all of the lengths are zero, i.e. + * an empty code, or if the code is incomplete and an invalid code is received, + * then -10 is returned after reading MAXBITS bits. + * + * Format notes: + * + * - The codes as stored in the compressed data are bit-reversed relative to + * a simple integer ordering of codes of the same lengths. Hence below the + * bits are pulled from the compressed data one at a time and used to + * build the code value reversed from what is in the stream in order to + * permit simple integer comparisons for decoding. A table-based decoding + * scheme (as used in zlib) does not need to do this reversal. + * + * - The first code for the shortest length is all zeros. Subsequent codes of + * the same length are simply integer increments of the previous code. When + * moving up a length, a zero bit is appended to the code. For a complete + * code, the last code of the longest length will be all ones. + * + * - Incomplete codes are handled by this decoder, since they are permitted + * in the deflate format. See the format notes for fixed() and dynamic(). + */ +#ifdef SLOW +local int decode(struct state *s, const struct huffman *h) +{ + int len; /* current number of bits in code */ + int code; /* len bits being decoded */ + int first; /* first code of length len */ + int count; /* number of codes of length len */ + int index; /* index of first code of length len in symbol table */ + + code = first = index = 0; + for (len = 1; len <= MAXBITS; len++) { + code |= bits(s, 1); /* get next bit */ + count = h->count[len]; + if (code - count < first) /* if length len, return symbol */ + return h->symbol[index + (code - first)]; + index += count; /* else update for next length */ + first += count; + first <<= 1; + code <<= 1; + } + return -10; /* ran out of codes */ +} + +/* + * A faster version of decode() for real applications of this code. It's not + * as readable, but it makes puff() twice as fast. And it only makes the code + * a few percent larger. + */ +#else /* !SLOW */ +local int decode(struct state *s, const struct huffman *h) +{ + int len; /* current number of bits in code */ + int code; /* len bits being decoded */ + int first; /* first code of length len */ + int count; /* number of codes of length len */ + int index; /* index of first code of length len in symbol table */ + int bitbuf; /* bits from stream */ + int left; /* bits left in next or left to process */ + short *next; /* next number of codes */ + + bitbuf = s->bitbuf; + left = s->bitcnt; + code = first = index = 0; + len = 1; + next = h->count + 1; + while (1) { + while (left--) { + code |= bitbuf & 1; + bitbuf >>= 1; + count = *next++; + if (code - count < first) { /* if length len, return symbol */ + s->bitbuf = bitbuf; + s->bitcnt = (s->bitcnt - len) & 7; + return h->symbol[index + (code - first)]; + } + index += count; /* else update for next length */ + first += count; + first <<= 1; + code <<= 1; + len++; + } + left = (MAXBITS+1) - len; + if (left == 0) + break; + if (s->incnt == s->inlen) + longjmp(s->env, 1); /* out of input */ + bitbuf = s->in[s->incnt++]; + if (left > 8) + left = 8; + } + return -10; /* ran out of codes */ +} +#endif /* SLOW */ + +/* + * Given the list of code lengths length[0..n-1] representing a canonical + * Huffman code for n symbols, construct the tables required to decode those + * codes. Those tables are the number of codes of each length, and the symbols + * sorted by length, retaining their original order within each length. The + * return value is zero for a complete code set, negative for an over- + * subscribed code set, and positive for an incomplete code set. The tables + * can be used if the return value is zero or positive, but they cannot be used + * if the return value is negative. If the return value is zero, it is not + * possible for decode() using that table to return an error--any stream of + * enough bits will resolve to a symbol. If the return value is positive, then + * it is possible for decode() using that table to return an error for received + * codes past the end of the incomplete lengths. + * + * Not used by decode(), but used for error checking, h->count[0] is the number + * of the n symbols not in the code. So n - h->count[0] is the number of + * codes. This is useful for checking for incomplete codes that have more than + * one symbol, which is an error in a dynamic block. + * + * Assumption: for all i in 0..n-1, 0 <= length[i] <= MAXBITS + * This is assured by the construction of the length arrays in dynamic() and + * fixed() and is not verified by construct(). + * + * Format notes: + * + * - Permitted and expected examples of incomplete codes are one of the fixed + * codes and any code with a single symbol which in deflate is coded as one + * bit instead of zero bits. See the format notes for fixed() and dynamic(). + * + * - Within a given code length, the symbols are kept in ascending order for + * the code bits definition. + */ +local int construct(struct huffman *h, const short *length, int n) +{ + int symbol; /* current symbol when stepping through length[] */ + int len; /* current length when stepping through h->count[] */ + int left; /* number of possible codes left of current length */ + short offs[MAXBITS+1]; /* offsets in symbol table for each length */ + + /* count number of codes of each length */ + for (len = 0; len <= MAXBITS; len++) + h->count[len] = 0; + for (symbol = 0; symbol < n; symbol++) + (h->count[length[symbol]])++; /* assumes lengths are within bounds */ + if (h->count[0] == n) /* no codes! */ + return 0; /* complete, but decode() will fail */ + + /* check for an over-subscribed or incomplete set of lengths */ + left = 1; /* one possible code of zero length */ + for (len = 1; len <= MAXBITS; len++) { + left <<= 1; /* one more bit, double codes left */ + left -= h->count[len]; /* deduct count from possible codes */ + if (left < 0) + return left; /* over-subscribed--return negative */ + } /* left > 0 means incomplete */ + + /* generate offsets into symbol table for each length for sorting */ + offs[1] = 0; + for (len = 1; len < MAXBITS; len++) + offs[len + 1] = offs[len] + h->count[len]; + + /* + * put symbols in table sorted by length, by symbol order within each + * length + */ + for (symbol = 0; symbol < n; symbol++) + if (length[symbol] != 0) + h->symbol[offs[length[symbol]]++] = symbol; + + /* return zero for complete set, positive for incomplete set */ + return left; +} + +/* + * Decode literal/length and distance codes until an end-of-block code. + * + * Format notes: + * + * - Compressed data that is after the block type if fixed or after the code + * description if dynamic is a combination of literals and length/distance + * pairs terminated by and end-of-block code. Literals are simply Huffman + * coded bytes. A length/distance pair is a coded length followed by a + * coded distance to represent a string that occurs earlier in the + * uncompressed data that occurs again at the current location. + * + * - Literals, lengths, and the end-of-block code are combined into a single + * code of up to 286 symbols. They are 256 literals (0..255), 29 length + * symbols (257..285), and the end-of-block symbol (256). + * + * - There are 256 possible lengths (3..258), and so 29 symbols are not enough + * to represent all of those. Lengths 3..10 and 258 are in fact represented + * by just a length symbol. Lengths 11..257 are represented as a symbol and + * some number of extra bits that are added as an integer to the base length + * of the length symbol. The number of extra bits is determined by the base + * length symbol. These are in the static arrays below, lens[] for the base + * lengths and lext[] for the corresponding number of extra bits. + * + * - The reason that 258 gets its own symbol is that the longest length is used + * often in highly redundant files. Note that 258 can also be coded as the + * base value 227 plus the maximum extra value of 31. While a good deflate + * should never do this, it is not an error, and should be decoded properly. + * + * - If a length is decoded, including its extra bits if any, then it is + * followed a distance code. There are up to 30 distance symbols. Again + * there are many more possible distances (1..32768), so extra bits are added + * to a base value represented by the symbol. The distances 1..4 get their + * own symbol, but the rest require extra bits. The base distances and + * corresponding number of extra bits are below in the static arrays dist[] + * and dext[]. + * + * - Literal bytes are simply written to the output. A length/distance pair is + * an instruction to copy previously uncompressed bytes to the output. The + * copy is from distance bytes back in the output stream, copying for length + * bytes. + * + * - Distances pointing before the beginning of the output data are not + * permitted. + * + * - Overlapped copies, where the length is greater than the distance, are + * allowed and common. For example, a distance of one and a length of 258 + * simply copies the last byte 258 times. A distance of four and a length of + * twelve copies the last four bytes three times. A simple forward copy + * ignoring whether the length is greater than the distance or not implements + * this correctly. You should not use memcpy() since its behavior is not + * defined for overlapped arrays. You should not use memmove() or bcopy() + * since though their behavior -is- defined for overlapping arrays, it is + * defined to do the wrong thing in this case. + */ +local int codes(struct state *s, + const struct huffman *lencode, + const struct huffman *distcode) +{ + int symbol; /* decoded symbol */ + int len; /* length for copy */ + unsigned dist; /* distance for copy */ + static const short lens[29] = { /* Size base for length codes 257..285 */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258}; + static const short lext[29] = { /* Extra bits for length codes 257..285 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0}; + static const short dists[30] = { /* Offset base for distance codes 0..29 */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577}; + static const short dext[30] = { /* Extra bits for distance codes 0..29 */ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 13, 13}; + + /* decode literals and length/distance pairs */ + do { + symbol = decode(s, lencode); + if (symbol < 0) + return symbol; /* invalid symbol */ + if (symbol < 256) { /* literal: symbol is the byte */ + /* write out the literal */ + if (s->out != NIL) { + if (s->outcnt == s->outlen) + return 1; + s->out[s->outcnt] = symbol; + } + s->outcnt++; + } + else if (symbol > 256) { /* length */ + /* get and compute length */ + symbol -= 257; + if (symbol >= 29) + return -10; /* invalid fixed code */ + len = lens[symbol] + bits(s, lext[symbol]); + + /* get and check distance */ + symbol = decode(s, distcode); + if (symbol < 0) + return symbol; /* invalid symbol */ + dist = dists[symbol] + bits(s, dext[symbol]); +#ifndef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + if (dist > s->outcnt) + return -11; /* distance too far back */ +#endif + + /* copy length bytes from distance bytes back */ + if (s->out != NIL) { + if (s->outcnt + len > s->outlen) + return 1; + while (len--) { + s->out[s->outcnt] = +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + dist > s->outcnt ? + 0 : +#endif + s->out[s->outcnt - dist]; + s->outcnt++; + } + } + else + s->outcnt += len; + } + } while (symbol != 256); /* end of block symbol */ + + /* done with a valid fixed or dynamic block */ + return 0; +} + +/* + * Process a fixed codes block. + * + * Format notes: + * + * - This block type can be useful for compressing small amounts of data for + * which the size of the code descriptions in a dynamic block exceeds the + * benefit of custom codes for that block. For fixed codes, no bits are + * spent on code descriptions. Instead the code lengths for literal/length + * codes and distance codes are fixed. The specific lengths for each symbol + * can be seen in the "for" loops below. + * + * - The literal/length code is complete, but has two symbols that are invalid + * and should result in an error if received. This cannot be implemented + * simply as an incomplete code since those two symbols are in the "middle" + * of the code. They are eight bits long and the longest literal/length\ + * code is nine bits. Therefore the code must be constructed with those + * symbols, and the invalid symbols must be detected after decoding. + * + * - The fixed distance codes also have two invalid symbols that should result + * in an error if received. Since all of the distance codes are the same + * length, this can be implemented as an incomplete code. Then the invalid + * codes are detected while decoding. + */ +local int fixed(struct state *s) +{ + static int virgin = 1; + static short lencnt[MAXBITS+1], lensym[FIXLCODES]; + static short distcnt[MAXBITS+1], distsym[MAXDCODES]; + static struct huffman lencode, distcode; + + /* build fixed huffman tables if first call (may not be thread safe) */ + if (virgin) { + int symbol; + short lengths[FIXLCODES]; + + /* construct lencode and distcode */ + lencode.count = lencnt; + lencode.symbol = lensym; + distcode.count = distcnt; + distcode.symbol = distsym; + + /* literal/length table */ + for (symbol = 0; symbol < 144; symbol++) + lengths[symbol] = 8; + for (; symbol < 256; symbol++) + lengths[symbol] = 9; + for (; symbol < 280; symbol++) + lengths[symbol] = 7; + for (; symbol < FIXLCODES; symbol++) + lengths[symbol] = 8; + construct(&lencode, lengths, FIXLCODES); + + /* distance table */ + for (symbol = 0; symbol < MAXDCODES; symbol++) + lengths[symbol] = 5; + construct(&distcode, lengths, MAXDCODES); + + /* do this just once */ + virgin = 0; + } + + /* decode data until end-of-block code */ + return codes(s, &lencode, &distcode); +} + +/* + * Process a dynamic codes block. + * + * Format notes: + * + * - A dynamic block starts with a description of the literal/length and + * distance codes for that block. New dynamic blocks allow the compressor to + * rapidly adapt to changing data with new codes optimized for that data. + * + * - The codes used by the deflate format are "canonical", which means that + * the actual bits of the codes are generated in an unambiguous way simply + * from the number of bits in each code. Therefore the code descriptions + * are simply a list of code lengths for each symbol. + * + * - The code lengths are stored in order for the symbols, so lengths are + * provided for each of the literal/length symbols, and for each of the + * distance symbols. + * + * - If a symbol is not used in the block, this is represented by a zero as + * as the code length. This does not mean a zero-length code, but rather + * that no code should be created for this symbol. There is no way in the + * deflate format to represent a zero-length code. + * + * - The maximum number of bits in a code is 15, so the possible lengths for + * any code are 1..15. + * + * - The fact that a length of zero is not permitted for a code has an + * interesting consequence. Normally if only one symbol is used for a given + * code, then in fact that code could be represented with zero bits. However + * in deflate, that code has to be at least one bit. So for example, if + * only a single distance base symbol appears in a block, then it will be + * represented by a single code of length one, in particular one 0 bit. This + * is an incomplete code, since if a 1 bit is received, it has no meaning, + * and should result in an error. So incomplete distance codes of one symbol + * should be permitted, and the receipt of invalid codes should be handled. + * + * - It is also possible to have a single literal/length code, but that code + * must be the end-of-block code, since every dynamic block has one. This + * is not the most efficient way to create an empty block (an empty fixed + * block is fewer bits), but it is allowed by the format. So incomplete + * literal/length codes of one symbol should also be permitted. + * + * - If there are only literal codes and no lengths, then there are no distance + * codes. This is represented by one distance code with zero bits. + * + * - The list of up to 286 length/literal lengths and up to 30 distance lengths + * are themselves compressed using Huffman codes and run-length encoding. In + * the list of code lengths, a 0 symbol means no code, a 1..15 symbol means + * that length, and the symbols 16, 17, and 18 are run-length instructions. + * Each of 16, 17, and 18 are follwed by extra bits to define the length of + * the run. 16 copies the last length 3 to 6 times. 17 represents 3 to 10 + * zero lengths, and 18 represents 11 to 138 zero lengths. Unused symbols + * are common, hence the special coding for zero lengths. + * + * - The symbols for 0..18 are Huffman coded, and so that code must be + * described first. This is simply a sequence of up to 19 three-bit values + * representing no code (0) or the code length for that symbol (1..7). + * + * - A dynamic block starts with three fixed-size counts from which is computed + * the number of literal/length code lengths, the number of distance code + * lengths, and the number of code length code lengths (ok, you come up with + * a better name!) in the code descriptions. For the literal/length and + * distance codes, lengths after those provided are considered zero, i.e. no + * code. The code length code lengths are received in a permuted order (see + * the order[] array below) to make a short code length code length list more + * likely. As it turns out, very short and very long codes are less likely + * to be seen in a dynamic code description, hence what may appear initially + * to be a peculiar ordering. + * + * - Given the number of literal/length code lengths (nlen) and distance code + * lengths (ndist), then they are treated as one long list of nlen + ndist + * code lengths. Therefore run-length coding can and often does cross the + * boundary between the two sets of lengths. + * + * - So to summarize, the code description at the start of a dynamic block is + * three counts for the number of code lengths for the literal/length codes, + * the distance codes, and the code length codes. This is followed by the + * code length code lengths, three bits each. This is used to construct the + * code length code which is used to read the remainder of the lengths. Then + * the literal/length code lengths and distance lengths are read as a single + * set of lengths using the code length codes. Codes are constructed from + * the resulting two sets of lengths, and then finally you can start + * decoding actual compressed data in the block. + * + * - For reference, a "typical" size for the code description in a dynamic + * block is around 80 bytes. + */ +local int dynamic(struct state *s) +{ + int nlen, ndist, ncode; /* number of lengths in descriptor */ + int index; /* index of lengths[] */ + int err; /* construct() return value */ + short lengths[MAXCODES]; /* descriptor code lengths */ + short lencnt[MAXBITS+1], lensym[MAXLCODES]; /* lencode memory */ + short distcnt[MAXBITS+1], distsym[MAXDCODES]; /* distcode memory */ + struct huffman lencode, distcode; /* length and distance codes */ + static const short order[19] = /* permutation of code length codes */ + {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + + /* construct lencode and distcode */ + lencode.count = lencnt; + lencode.symbol = lensym; + distcode.count = distcnt; + distcode.symbol = distsym; + + /* get number of lengths in each table, check lengths */ + nlen = bits(s, 5) + 257; + ndist = bits(s, 5) + 1; + ncode = bits(s, 4) + 4; + if (nlen > MAXLCODES || ndist > MAXDCODES) + return -3; /* bad counts */ + + /* read code length code lengths (really), missing lengths are zero */ + for (index = 0; index < ncode; index++) + lengths[order[index]] = bits(s, 3); + for (; index < 19; index++) + lengths[order[index]] = 0; + + /* build huffman table for code lengths codes (use lencode temporarily) */ + err = construct(&lencode, lengths, 19); + if (err != 0) /* require complete code set here */ + return -4; + + /* read length/literal and distance code length tables */ + index = 0; + while (index < nlen + ndist) { + int symbol; /* decoded value */ + int len; /* last length to repeat */ + + symbol = decode(s, &lencode); + if (symbol < 0) + return symbol; /* invalid symbol */ + if (symbol < 16) /* length in 0..15 */ + lengths[index++] = symbol; + else { /* repeat instruction */ + len = 0; /* assume repeating zeros */ + if (symbol == 16) { /* repeat last length 3..6 times */ + if (index == 0) + return -5; /* no last length! */ + len = lengths[index - 1]; /* last length */ + symbol = 3 + bits(s, 2); + } + else if (symbol == 17) /* repeat zero 3..10 times */ + symbol = 3 + bits(s, 3); + else /* == 18, repeat zero 11..138 times */ + symbol = 11 + bits(s, 7); + if (index + symbol > nlen + ndist) + return -6; /* too many lengths! */ + while (symbol--) /* repeat last or zero symbol times */ + lengths[index++] = len; + } + } + + /* check for end-of-block code -- there better be one! */ + if (lengths[256] == 0) + return -9; + + /* build huffman table for literal/length codes */ + err = construct(&lencode, lengths, nlen); + if (err && (err < 0 || nlen != lencode.count[0] + lencode.count[1])) + return -7; /* incomplete code ok only for single length 1 code */ + + /* build huffman table for distance codes */ + err = construct(&distcode, lengths + nlen, ndist); + if (err && (err < 0 || ndist != distcode.count[0] + distcode.count[1])) + return -8; /* incomplete code ok only for single length 1 code */ + + /* decode data until end-of-block code */ + return codes(s, &lencode, &distcode); +} + +/* + * Inflate source to dest. On return, destlen and sourcelen are updated to the + * size of the uncompressed data and the size of the deflate data respectively. + * On success, the return value of puff() is zero. If there is an error in the + * source data, i.e. it is not in the deflate format, then a negative value is + * returned. If there is not enough input available or there is not enough + * output space, then a positive error is returned. In that case, destlen and + * sourcelen are not updated to facilitate retrying from the beginning with the + * provision of more input data or more output space. In the case of invalid + * inflate data (a negative error), the dest and source pointers are updated to + * facilitate the debugging of deflators. + * + * puff() also has a mode to determine the size of the uncompressed output with + * no output written. For this dest must be (unsigned char *)0. In this case, + * the input value of *destlen is ignored, and on return *destlen is set to the + * size of the uncompressed output. + * + * The return codes are: + * + * 2: available inflate data did not terminate + * 1: output space exhausted before completing inflate + * 0: successful inflate + * -1: invalid block type (type == 3) + * -2: stored block length did not match one's complement + * -3: dynamic block code description: too many length or distance codes + * -4: dynamic block code description: code lengths codes incomplete + * -5: dynamic block code description: repeat lengths with no first length + * -6: dynamic block code description: repeat more than specified lengths + * -7: dynamic block code description: invalid literal/length code lengths + * -8: dynamic block code description: invalid distance code lengths + * -9: dynamic block code description: missing end-of-block code + * -10: invalid literal/length or distance code in fixed or dynamic block + * -11: distance is too far back in fixed or dynamic block + * + * Format notes: + * + * - Three bits are read for each block to determine the kind of block and + * whether or not it is the last block. Then the block is decoded and the + * process repeated if it was not the last block. + * + * - The leftover bits in the last byte of the deflate data after the last + * block (if it was a fixed or dynamic block) are undefined and have no + * expected values to check. + */ +int puff(unsigned char *dest, /* pointer to destination pointer */ + unsigned long *destlen, /* amount of output space */ + const unsigned char *source, /* pointer to source data pointer */ + unsigned long *sourcelen) /* amount of input available */ +{ + struct state s; /* input/output state */ + int last, type; /* block information */ + int err; /* return value */ + + /* initialize output state */ + s.out = dest; + s.outlen = *destlen; /* ignored if dest is NIL */ + s.outcnt = 0; + + /* initialize input state */ + s.in = source; + s.inlen = *sourcelen; + s.incnt = 0; + s.bitbuf = 0; + s.bitcnt = 0; + + /* return if bits() or decode() tries to read past available input */ + if (setjmp(s.env) != 0) /* if came back here via longjmp() */ + err = 2; /* then skip do-loop, return error */ + else { + /* process blocks until last block or error */ + do { + last = bits(&s, 1); /* one if last block */ + type = bits(&s, 2); /* block type 0..3 */ + err = type == 0 ? + stored(&s) : + (type == 1 ? + fixed(&s) : + (type == 2 ? + dynamic(&s) : + -1)); /* type == 3, invalid */ + if (err != 0) + break; /* return with error */ + } while (!last); + } + + /* update the lengths and return */ + if (err <= 0) { + *destlen = s.outcnt; + *sourcelen = s.incnt; + } + return err; +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/puff/puff.h nodejs-0.11.15/deps/zlib/contrib/puff/puff.h --- nodejs-0.11.13/deps/zlib/contrib/puff/puff.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/puff/puff.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,35 @@ +/* puff.h + Copyright (C) 2002-2013 Mark Adler, all rights reserved + version 2.3, 21 Jan 2013 + + This software is provided 'as-is', without any express or implied + warranty. In no event will the author be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Mark Adler madler@alumni.caltech.edu + */ + + +/* + * See puff.c for purpose and usage. + */ +#ifndef NIL +# define NIL ((unsigned char *)0) /* for no output option */ +#endif + +int puff(unsigned char *dest, /* pointer to destination pointer */ + unsigned long *destlen, /* amount of output space */ + const unsigned char *source, /* pointer to source data pointer */ + unsigned long *sourcelen); /* amount of input available */ diff -Nru nodejs-0.11.13/deps/zlib/contrib/puff/pufftest.c nodejs-0.11.15/deps/zlib/contrib/puff/pufftest.c --- nodejs-0.11.13/deps/zlib/contrib/puff/pufftest.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/puff/pufftest.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,165 @@ +/* + * pufftest.c + * Copyright (C) 2002-2013 Mark Adler + * For conditions of distribution and use, see copyright notice in puff.h + * version 2.3, 21 Jan 2013 + */ + +/* Example of how to use puff(). + + Usage: puff [-w] [-f] [-nnn] file + ... | puff [-w] [-f] [-nnn] + + where file is the input file with deflate data, nnn is the number of bytes + of input to skip before inflating (e.g. to skip a zlib or gzip header), and + -w is used to write the decompressed data to stdout. -f is for coverage + testing, and causes pufftest to fail with not enough output space (-f does + a write like -w, so -w is not required). */ + +#include <stdio.h> +#include <stdlib.h> +#include "puff.h" + +#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__) +# include <fcntl.h> +# include <io.h> +# define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY) +#else +# define SET_BINARY_MODE(file) +#endif + +#define local static + +/* Return size times approximately the cube root of 2, keeping the result as 1, + 3, or 5 times a power of 2 -- the result is always > size, until the result + is the maximum value of an unsigned long, where it remains. This is useful + to keep reallocations less than ~33% over the actual data. */ +local size_t bythirds(size_t size) +{ + int n; + size_t m; + + m = size; + for (n = 0; m; n++) + m >>= 1; + if (n < 3) + return size + 1; + n -= 3; + m = size >> n; + m += m == 6 ? 2 : 1; + m <<= n; + return m > size ? m : (size_t)(-1); +} + +/* Read the input file *name, or stdin if name is NULL, into allocated memory. + Reallocate to larger buffers until the entire file is read in. Return a + pointer to the allocated data, or NULL if there was a memory allocation + failure. *len is the number of bytes of data read from the input file (even + if load() returns NULL). If the input file was empty or could not be opened + or read, *len is zero. */ +local void *load(const char *name, size_t *len) +{ + size_t size; + void *buf, *swap; + FILE *in; + + *len = 0; + buf = malloc(size = 4096); + if (buf == NULL) + return NULL; + in = name == NULL ? stdin : fopen(name, "rb"); + if (in != NULL) { + for (;;) { + *len += fread((char *)buf + *len, 1, size - *len, in); + if (*len < size) break; + size = bythirds(size); + if (size == *len || (swap = realloc(buf, size)) == NULL) { + free(buf); + buf = NULL; + break; + } + buf = swap; + } + fclose(in); + } + return buf; +} + +int main(int argc, char **argv) +{ + int ret, put = 0, fail = 0; + unsigned skip = 0; + char *arg, *name = NULL; + unsigned char *source = NULL, *dest; + size_t len = 0; + unsigned long sourcelen, destlen; + + /* process arguments */ + while (arg = *++argv, --argc) + if (arg[0] == '-') { + if (arg[1] == 'w' && arg[2] == 0) + put = 1; + else if (arg[1] == 'f' && arg[2] == 0) + fail = 1, put = 1; + else if (arg[1] >= '0' && arg[1] <= '9') + skip = (unsigned)atoi(arg + 1); + else { + fprintf(stderr, "invalid option %s\n", arg); + return 3; + } + } + else if (name != NULL) { + fprintf(stderr, "only one file name allowed\n"); + return 3; + } + else + name = arg; + source = load(name, &len); + if (source == NULL) { + fprintf(stderr, "memory allocation failure\n"); + return 4; + } + if (len == 0) { + fprintf(stderr, "could not read %s, or it was empty\n", + name == NULL ? "<stdin>" : name); + free(source); + return 3; + } + if (skip >= len) { + fprintf(stderr, "skip request of %d leaves no input\n", skip); + free(source); + return 3; + } + + /* test inflate data with offset skip */ + len -= skip; + sourcelen = (unsigned long)len; + ret = puff(NIL, &destlen, source + skip, &sourcelen); + if (ret) + fprintf(stderr, "puff() failed with return code %d\n", ret); + else { + fprintf(stderr, "puff() succeeded uncompressing %lu bytes\n", destlen); + if (sourcelen < len) fprintf(stderr, "%lu compressed bytes unused\n", + len - sourcelen); + } + + /* if requested, inflate again and write decompressd data to stdout */ + if (put && ret == 0) { + if (fail) + destlen >>= 1; + dest = malloc(destlen); + if (dest == NULL) { + fprintf(stderr, "memory allocation failure\n"); + free(source); + return 4; + } + puff(dest, &destlen, source + skip, &sourcelen); + SET_BINARY_MODE(stdout); + fwrite(dest, 1, destlen, stdout); + free(dest); + } + + /* clean up */ + free(source); + return ret; +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/puff/README nodejs-0.11.15/deps/zlib/contrib/puff/README --- nodejs-0.11.13/deps/zlib/contrib/puff/README 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/puff/README 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,63 @@ +Puff -- A Simple Inflate +3 Mar 2003 +Mark Adler +madler@alumni.caltech.edu + +What this is -- + +puff.c provides the routine puff() to decompress the deflate data format. It +does so more slowly than zlib, but the code is about one-fifth the size of the +inflate code in zlib, and written to be very easy to read. + +Why I wrote this -- + +puff.c was written to document the deflate format unambiguously, by virtue of +being working C code. It is meant to supplement RFC 1951, which formally +describes the deflate format. I have received many questions on details of the +deflate format, and I hope that reading this code will answer those questions. +puff.c is heavily commented with details of the deflate format, especially +those little nooks and cranies of the format that might not be obvious from a +specification. + +puff.c may also be useful in applications where code size or memory usage is a +very limited resource, and speed is not as important. + +How to use it -- + +Well, most likely you should just be reading puff.c and using zlib for actual +applications, but if you must ... + +Include puff.h in your code, which provides this prototype: + +int puff(unsigned char *dest, /* pointer to destination pointer */ + unsigned long *destlen, /* amount of output space */ + unsigned char *source, /* pointer to source data pointer */ + unsigned long *sourcelen); /* amount of input available */ + +Then you can call puff() to decompress a deflate stream that is in memory in +its entirety at source, to a sufficiently sized block of memory for the +decompressed data at dest. puff() is the only external symbol in puff.c The +only C library functions that puff.c needs are setjmp() and longjmp(), which +are used to simplify error checking in the code to improve readabilty. puff.c +does no memory allocation, and uses less than 2K bytes off of the stack. + +If destlen is not enough space for the uncompressed data, then inflate will +return an error without writing more than destlen bytes. Note that this means +that in order to decompress the deflate data successfully, you need to know +the size of the uncompressed data ahead of time. + +If needed, puff() can determine the size of the uncompressed data with no +output space. This is done by passing dest equal to (unsigned char *)0. Then +the initial value of *destlen is ignored and *destlen is set to the length of +the uncompressed data. So if the size of the uncompressed data is not known, +then two passes of puff() can be used--first to determine the size, and second +to do the actual inflation after allocating the appropriate memory. Not +pretty, but it works. (This is one of the reasons you should be using zlib.) + +The deflate format is self-terminating. If the deflate stream does not end +in *sourcelen bytes, puff() will return an error without reading at or past +endsource. + +On return, *sourcelen is updated to the amount of input data consumed, and +*destlen is updated to the size of the uncompressed data. See the comments +in puff.c for the possible return codes for puff(). Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/zlib/contrib/puff/zeros.raw and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/zlib/contrib/puff/zeros.raw differ diff -Nru nodejs-0.11.13/deps/zlib/contrib/README.contrib nodejs-0.11.15/deps/zlib/contrib/README.contrib --- nodejs-0.11.13/deps/zlib/contrib/README.contrib 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/README.contrib 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,78 @@ +All files under this contrib directory are UNSUPPORTED. There were +provided by users of zlib and were not tested by the authors of zlib. +Use at your own risk. Please contact the authors of the contributions +for help about these, not the zlib authors. Thanks. + + +ada/ by Dmitriy Anisimkov <anisimkov@yahoo.com> + Support for Ada + See http://zlib-ada.sourceforge.net/ + +amd64/ by Mikhail Teterin <mi@ALDAN.algebra.com> + asm code for AMD64 + See patch at http://www.freebsd.org/cgi/query-pr.cgi?pr=bin/96393 + +asm686/ by Brian Raiter <breadbox@muppetlabs.com> + asm code for Pentium and PPro/PII, using the AT&T (GNU as) syntax + See http://www.muppetlabs.com/~breadbox/software/assembly.html + +blast/ by Mark Adler <madler@alumni.caltech.edu> + Decompressor for output of PKWare Data Compression Library (DCL) + +delphi/ by Cosmin Truta <cosmint@cs.ubbcluj.ro> + Support for Delphi and C++ Builder + +dotzlib/ by Henrik Ravn <henrik@ravn.com> + Support for Microsoft .Net and Visual C++ .Net + +gcc_gvmat64/by Gilles Vollant <info@winimage.com> + GCC Version of x86 64-bit (AMD64 and Intel EM64t) code for x64 + assembler to replace longest_match() and inflate_fast() + +infback9/ by Mark Adler <madler@alumni.caltech.edu> + Unsupported diffs to infback to decode the deflate64 format + +inflate86/ by Chris Anderson <christop@charm.net> + Tuned x86 gcc asm code to replace inflate_fast() + +iostream/ by Kevin Ruland <kevin@rodin.wustl.edu> + A C++ I/O streams interface to the zlib gz* functions + +iostream2/ by Tyge Lvset <Tyge.Lovset@cmr.no> + Another C++ I/O streams interface + +iostream3/ by Ludwig Schwardt <schwardt@sun.ac.za> + and Kevin Ruland <kevin@rodin.wustl.edu> + Yet another C++ I/O streams interface + +masmx64/ by Gilles Vollant <info@winimage.com> + x86 64-bit (AMD64 and Intel EM64t) code for x64 assembler to + replace longest_match() and inflate_fast(), also masm x86 + 64-bits translation of Chris Anderson inflate_fast() + +masmx86/ by Gilles Vollant <info@winimage.com> + x86 asm code to replace longest_match() and inflate_fast(), + for Visual C++ and MASM (32 bits). + Based on Brian Raiter (asm686) and Chris Anderson (inflate86) + +minizip/ by Gilles Vollant <info@winimage.com> + Mini zip and unzip based on zlib + Includes Zip64 support by Mathias Svensson <mathias@result42.com> + See http://www.winimage.com/zLibDll/unzip.html + +pascal/ by Bob Dellaca <bobdl@xtra.co.nz> et al. + Support for Pascal + +puff/ by Mark Adler <madler@alumni.caltech.edu> + Small, low memory usage inflate. Also serves to provide an + unambiguous description of the deflate format. + +testzlib/ by Gilles Vollant <info@winimage.com> + Example of the use of zlib + +untgz/ by Pedro A. Aranda Gutierrez <paag@tid.es> + A very simple tar.gz file extractor using zlib + +vstudio/ by Gilles Vollant <info@winimage.com> + Building a minizip-enhanced zlib with Microsoft Visual Studio + Includes vc11 from kreuzerkrieg and vc12 from davispuh diff -Nru nodejs-0.11.13/deps/zlib/contrib/testzlib/testzlib.c nodejs-0.11.15/deps/zlib/contrib/testzlib/testzlib.c --- nodejs-0.11.13/deps/zlib/contrib/testzlib/testzlib.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/testzlib/testzlib.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,275 @@ +#include <stdio.h> +#include <stdlib.h> +#include <windows.h> + +#include "zlib.h" + + +void MyDoMinus64(LARGE_INTEGER *R,LARGE_INTEGER A,LARGE_INTEGER B) +{ + R->HighPart = A.HighPart - B.HighPart; + if (A.LowPart >= B.LowPart) + R->LowPart = A.LowPart - B.LowPart; + else + { + R->LowPart = A.LowPart - B.LowPart; + R->HighPart --; + } +} + +#ifdef _M_X64 +// see http://msdn2.microsoft.com/library/twchhe95(en-us,vs.80).aspx for __rdtsc +unsigned __int64 __rdtsc(void); +void BeginCountRdtsc(LARGE_INTEGER * pbeginTime64) +{ + // printf("rdtsc = %I64x\n",__rdtsc()); + pbeginTime64->QuadPart=__rdtsc(); +} + +LARGE_INTEGER GetResRdtsc(LARGE_INTEGER beginTime64,BOOL fComputeTimeQueryPerf) +{ + LARGE_INTEGER LIres; + unsigned _int64 res=__rdtsc()-((unsigned _int64)(beginTime64.QuadPart)); + LIres.QuadPart=res; + // printf("rdtsc = %I64x\n",__rdtsc()); + return LIres; +} +#else +#ifdef _M_IX86 +void myGetRDTSC32(LARGE_INTEGER * pbeginTime64) +{ + DWORD dwEdx,dwEax; + _asm + { + rdtsc + mov dwEax,eax + mov dwEdx,edx + } + pbeginTime64->LowPart=dwEax; + pbeginTime64->HighPart=dwEdx; +} + +void BeginCountRdtsc(LARGE_INTEGER * pbeginTime64) +{ + myGetRDTSC32(pbeginTime64); +} + +LARGE_INTEGER GetResRdtsc(LARGE_INTEGER beginTime64,BOOL fComputeTimeQueryPerf) +{ + LARGE_INTEGER LIres,endTime64; + myGetRDTSC32(&endTime64); + + LIres.LowPart=LIres.HighPart=0; + MyDoMinus64(&LIres,endTime64,beginTime64); + return LIres; +} +#else +void myGetRDTSC32(LARGE_INTEGER * pbeginTime64) +{ +} + +void BeginCountRdtsc(LARGE_INTEGER * pbeginTime64) +{ +} + +LARGE_INTEGER GetResRdtsc(LARGE_INTEGER beginTime64,BOOL fComputeTimeQueryPerf) +{ + LARGE_INTEGER lr; + lr.QuadPart=0; + return lr; +} +#endif +#endif + +void BeginCountPerfCounter(LARGE_INTEGER * pbeginTime64,BOOL fComputeTimeQueryPerf) +{ + if ((!fComputeTimeQueryPerf) || (!QueryPerformanceCounter(pbeginTime64))) + { + pbeginTime64->LowPart = GetTickCount(); + pbeginTime64->HighPart = 0; + } +} + +DWORD GetMsecSincePerfCounter(LARGE_INTEGER beginTime64,BOOL fComputeTimeQueryPerf) +{ + LARGE_INTEGER endTime64,ticksPerSecond,ticks; + DWORDLONG ticksShifted,tickSecShifted; + DWORD dwLog=16+0; + DWORD dwRet; + if ((!fComputeTimeQueryPerf) || (!QueryPerformanceCounter(&endTime64))) + dwRet = (GetTickCount() - beginTime64.LowPart)*1; + else + { + MyDoMinus64(&ticks,endTime64,beginTime64); + QueryPerformanceFrequency(&ticksPerSecond); + + + { + ticksShifted = Int64ShrlMod32(*(DWORDLONG*)&ticks,dwLog); + tickSecShifted = Int64ShrlMod32(*(DWORDLONG*)&ticksPerSecond,dwLog); + + } + + dwRet = (DWORD)((((DWORD)ticksShifted)*1000)/(DWORD)(tickSecShifted)); + dwRet *=1; + } + return dwRet; +} + +int ReadFileMemory(const char* filename,long* plFileSize,unsigned char** pFilePtr) +{ + FILE* stream; + unsigned char* ptr; + int retVal=1; + stream=fopen(filename, "rb"); + if (stream==NULL) + return 0; + + fseek(stream,0,SEEK_END); + + *plFileSize=ftell(stream); + fseek(stream,0,SEEK_SET); + ptr=malloc((*plFileSize)+1); + if (ptr==NULL) + retVal=0; + else + { + if (fread(ptr, 1, *plFileSize,stream) != (*plFileSize)) + retVal=0; + } + fclose(stream); + *pFilePtr=ptr; + return retVal; +} + +int main(int argc, char *argv[]) +{ + int BlockSizeCompress=0x8000; + int BlockSizeUncompress=0x8000; + int cprLevel=Z_DEFAULT_COMPRESSION ; + long lFileSize; + unsigned char* FilePtr; + long lBufferSizeCpr; + long lBufferSizeUncpr; + long lCompressedSize=0; + unsigned char* CprPtr; + unsigned char* UncprPtr; + long lSizeCpr,lSizeUncpr; + DWORD dwGetTick,dwMsecQP; + LARGE_INTEGER li_qp,li_rdtsc,dwResRdtsc; + + if (argc<=1) + { + printf("run TestZlib <File> [BlockSizeCompress] [BlockSizeUncompress] [compres. level]\n"); + return 0; + } + + if (ReadFileMemory(argv[1],&lFileSize,&FilePtr)==0) + { + printf("error reading %s\n",argv[1]); + return 1; + } + else printf("file %s read, %u bytes\n",argv[1],lFileSize); + + if (argc>=3) + BlockSizeCompress=atol(argv[2]); + + if (argc>=4) + BlockSizeUncompress=atol(argv[3]); + + if (argc>=5) + cprLevel=(int)atol(argv[4]); + + lBufferSizeCpr = lFileSize + (lFileSize/0x10) + 0x200; + lBufferSizeUncpr = lBufferSizeCpr; + + CprPtr=(unsigned char*)malloc(lBufferSizeCpr + BlockSizeCompress); + + BeginCountPerfCounter(&li_qp,TRUE); + dwGetTick=GetTickCount(); + BeginCountRdtsc(&li_rdtsc); + { + z_stream zcpr; + int ret=Z_OK; + long lOrigToDo = lFileSize; + long lOrigDone = 0; + int step=0; + memset(&zcpr,0,sizeof(z_stream)); + deflateInit(&zcpr,cprLevel); + + zcpr.next_in = FilePtr; + zcpr.next_out = CprPtr; + + + do + { + long all_read_before = zcpr.total_in; + zcpr.avail_in = min(lOrigToDo,BlockSizeCompress); + zcpr.avail_out = BlockSizeCompress; + ret=deflate(&zcpr,(zcpr.avail_in==lOrigToDo) ? Z_FINISH : Z_SYNC_FLUSH); + lOrigDone += (zcpr.total_in-all_read_before); + lOrigToDo -= (zcpr.total_in-all_read_before); + step++; + } while (ret==Z_OK); + + lSizeCpr=zcpr.total_out; + deflateEnd(&zcpr); + dwGetTick=GetTickCount()-dwGetTick; + dwMsecQP=GetMsecSincePerfCounter(li_qp,TRUE); + dwResRdtsc=GetResRdtsc(li_rdtsc,TRUE); + printf("total compress size = %u, in %u step\n",lSizeCpr,step); + printf("time = %u msec = %f sec\n",dwGetTick,dwGetTick/(double)1000.); + printf("defcpr time QP = %u msec = %f sec\n",dwMsecQP,dwMsecQP/(double)1000.); + printf("defcpr result rdtsc = %I64x\n\n",dwResRdtsc.QuadPart); + } + + CprPtr=(unsigned char*)realloc(CprPtr,lSizeCpr); + UncprPtr=(unsigned char*)malloc(lBufferSizeUncpr + BlockSizeUncompress); + + BeginCountPerfCounter(&li_qp,TRUE); + dwGetTick=GetTickCount(); + BeginCountRdtsc(&li_rdtsc); + { + z_stream zcpr; + int ret=Z_OK; + long lOrigToDo = lSizeCpr; + long lOrigDone = 0; + int step=0; + memset(&zcpr,0,sizeof(z_stream)); + inflateInit(&zcpr); + + zcpr.next_in = CprPtr; + zcpr.next_out = UncprPtr; + + + do + { + long all_read_before = zcpr.total_in; + zcpr.avail_in = min(lOrigToDo,BlockSizeUncompress); + zcpr.avail_out = BlockSizeUncompress; + ret=inflate(&zcpr,Z_SYNC_FLUSH); + lOrigDone += (zcpr.total_in-all_read_before); + lOrigToDo -= (zcpr.total_in-all_read_before); + step++; + } while (ret==Z_OK); + + lSizeUncpr=zcpr.total_out; + inflateEnd(&zcpr); + dwGetTick=GetTickCount()-dwGetTick; + dwMsecQP=GetMsecSincePerfCounter(li_qp,TRUE); + dwResRdtsc=GetResRdtsc(li_rdtsc,TRUE); + printf("total uncompress size = %u, in %u step\n",lSizeUncpr,step); + printf("time = %u msec = %f sec\n",dwGetTick,dwGetTick/(double)1000.); + printf("uncpr time QP = %u msec = %f sec\n",dwMsecQP,dwMsecQP/(double)1000.); + printf("uncpr result rdtsc = %I64x\n\n",dwResRdtsc.QuadPart); + } + + if (lSizeUncpr==lFileSize) + { + if (memcmp(FilePtr,UncprPtr,lFileSize)==0) + printf("compare ok\n"); + + } + + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/testzlib/testzlib.txt nodejs-0.11.15/deps/zlib/contrib/testzlib/testzlib.txt --- nodejs-0.11.13/deps/zlib/contrib/testzlib/testzlib.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/testzlib/testzlib.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,10 @@ +To build testzLib with Visual Studio 2005: + +copy to a directory file from : +- root of zLib tree +- contrib/testzlib +- contrib/masmx86 +- contrib/masmx64 +- contrib/vstudio/vc7 + +and open testzlib8.sln \ No newline at end of file diff -Nru nodejs-0.11.13/deps/zlib/contrib/untgz/Makefile nodejs-0.11.15/deps/zlib/contrib/untgz/Makefile --- nodejs-0.11.13/deps/zlib/contrib/untgz/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/untgz/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,14 @@ +CC=cc +CFLAGS=-g + +untgz: untgz.o ../../libz.a + $(CC) $(CFLAGS) -o untgz untgz.o -L../.. -lz + +untgz.o: untgz.c ../../zlib.h + $(CC) $(CFLAGS) -c -I../.. untgz.c + +../../libz.a: + cd ../..; ./configure; make + +clean: + rm -f untgz untgz.o *~ diff -Nru nodejs-0.11.13/deps/zlib/contrib/untgz/Makefile.msc nodejs-0.11.15/deps/zlib/contrib/untgz/Makefile.msc --- nodejs-0.11.13/deps/zlib/contrib/untgz/Makefile.msc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/untgz/Makefile.msc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,17 @@ +CC=cl +CFLAGS=-MD + +untgz.exe: untgz.obj ..\..\zlib.lib + $(CC) $(CFLAGS) untgz.obj ..\..\zlib.lib + +untgz.obj: untgz.c ..\..\zlib.h + $(CC) $(CFLAGS) -c -I..\.. untgz.c + +..\..\zlib.lib: + cd ..\.. + $(MAKE) -f win32\makefile.msc + cd contrib\untgz + +clean: + -del untgz.obj + -del untgz.exe diff -Nru nodejs-0.11.13/deps/zlib/contrib/untgz/untgz.c nodejs-0.11.15/deps/zlib/contrib/untgz/untgz.c --- nodejs-0.11.13/deps/zlib/contrib/untgz/untgz.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/untgz/untgz.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,674 @@ +/* + * untgz.c -- Display contents and extract files from a gzip'd TAR file + * + * written by Pedro A. Aranda Gutierrez <paag@tid.es> + * adaptation to Unix by Jean-loup Gailly <jloup@gzip.org> + * various fixes by Cosmin Truta <cosmint@cs.ubbcluj.ro> + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <time.h> +#include <errno.h> + +#include "zlib.h" + +#ifdef unix +# include <unistd.h> +#else +# include <direct.h> +# include <io.h> +#endif + +#ifdef WIN32 +#include <windows.h> +# ifndef F_OK +# define F_OK 0 +# endif +# define mkdir(dirname,mode) _mkdir(dirname) +# ifdef _MSC_VER +# define access(path,mode) _access(path,mode) +# define chmod(path,mode) _chmod(path,mode) +# define strdup(str) _strdup(str) +# endif +#else +# include <utime.h> +#endif + + +/* values used in typeflag field */ + +#define REGTYPE '0' /* regular file */ +#define AREGTYPE '\0' /* regular file */ +#define LNKTYPE '1' /* link */ +#define SYMTYPE '2' /* reserved */ +#define CHRTYPE '3' /* character special */ +#define BLKTYPE '4' /* block special */ +#define DIRTYPE '5' /* directory */ +#define FIFOTYPE '6' /* FIFO special */ +#define CONTTYPE '7' /* reserved */ + +/* GNU tar extensions */ + +#define GNUTYPE_DUMPDIR 'D' /* file names from dumped directory */ +#define GNUTYPE_LONGLINK 'K' /* long link name */ +#define GNUTYPE_LONGNAME 'L' /* long file name */ +#define GNUTYPE_MULTIVOL 'M' /* continuation of file from another volume */ +#define GNUTYPE_NAMES 'N' /* file name that does not fit into main hdr */ +#define GNUTYPE_SPARSE 'S' /* sparse file */ +#define GNUTYPE_VOLHDR 'V' /* tape/volume header */ + + +/* tar header */ + +#define BLOCKSIZE 512 +#define SHORTNAMESIZE 100 + +struct tar_header +{ /* byte offset */ + char name[100]; /* 0 */ + char mode[8]; /* 100 */ + char uid[8]; /* 108 */ + char gid[8]; /* 116 */ + char size[12]; /* 124 */ + char mtime[12]; /* 136 */ + char chksum[8]; /* 148 */ + char typeflag; /* 156 */ + char linkname[100]; /* 157 */ + char magic[6]; /* 257 */ + char version[2]; /* 263 */ + char uname[32]; /* 265 */ + char gname[32]; /* 297 */ + char devmajor[8]; /* 329 */ + char devminor[8]; /* 337 */ + char prefix[155]; /* 345 */ + /* 500 */ +}; + +union tar_buffer +{ + char buffer[BLOCKSIZE]; + struct tar_header header; +}; + +struct attr_item +{ + struct attr_item *next; + char *fname; + int mode; + time_t time; +}; + +enum { TGZ_EXTRACT, TGZ_LIST, TGZ_INVALID }; + +char *TGZfname OF((const char *)); +void TGZnotfound OF((const char *)); + +int getoct OF((char *, int)); +char *strtime OF((time_t *)); +int setfiletime OF((char *, time_t)); +void push_attr OF((struct attr_item **, char *, int, time_t)); +void restore_attr OF((struct attr_item **)); + +int ExprMatch OF((char *, char *)); + +int makedir OF((char *)); +int matchname OF((int, int, char **, char *)); + +void error OF((const char *)); +int tar OF((gzFile, int, int, int, char **)); + +void help OF((int)); +int main OF((int, char **)); + +char *prog; + +const char *TGZsuffix[] = { "\0", ".tar", ".tar.gz", ".taz", ".tgz", NULL }; + +/* return the file name of the TGZ archive */ +/* or NULL if it does not exist */ + +char *TGZfname (const char *arcname) +{ + static char buffer[1024]; + int origlen,i; + + strcpy(buffer,arcname); + origlen = strlen(buffer); + + for (i=0; TGZsuffix[i]; i++) + { + strcpy(buffer+origlen,TGZsuffix[i]); + if (access(buffer,F_OK) == 0) + return buffer; + } + return NULL; +} + + +/* error message for the filename */ + +void TGZnotfound (const char *arcname) +{ + int i; + + fprintf(stderr,"%s: Couldn't find ",prog); + for (i=0;TGZsuffix[i];i++) + fprintf(stderr,(TGZsuffix[i+1]) ? "%s%s, " : "or %s%s\n", + arcname, + TGZsuffix[i]); + exit(1); +} + + +/* convert octal digits to int */ +/* on error return -1 */ + +int getoct (char *p,int width) +{ + int result = 0; + char c; + + while (width--) + { + c = *p++; + if (c == 0) + break; + if (c == ' ') + continue; + if (c < '0' || c > '7') + return -1; + result = result * 8 + (c - '0'); + } + return result; +} + + +/* convert time_t to string */ +/* use the "YYYY/MM/DD hh:mm:ss" format */ + +char *strtime (time_t *t) +{ + struct tm *local; + static char result[32]; + + local = localtime(t); + sprintf(result,"%4d/%02d/%02d %02d:%02d:%02d", + local->tm_year+1900, local->tm_mon+1, local->tm_mday, + local->tm_hour, local->tm_min, local->tm_sec); + return result; +} + + +/* set file time */ + +int setfiletime (char *fname,time_t ftime) +{ +#ifdef WIN32 + static int isWinNT = -1; + SYSTEMTIME st; + FILETIME locft, modft; + struct tm *loctm; + HANDLE hFile; + int result; + + loctm = localtime(&ftime); + if (loctm == NULL) + return -1; + + st.wYear = (WORD)loctm->tm_year + 1900; + st.wMonth = (WORD)loctm->tm_mon + 1; + st.wDayOfWeek = (WORD)loctm->tm_wday; + st.wDay = (WORD)loctm->tm_mday; + st.wHour = (WORD)loctm->tm_hour; + st.wMinute = (WORD)loctm->tm_min; + st.wSecond = (WORD)loctm->tm_sec; + st.wMilliseconds = 0; + if (!SystemTimeToFileTime(&st, &locft) || + !LocalFileTimeToFileTime(&locft, &modft)) + return -1; + + if (isWinNT < 0) + isWinNT = (GetVersion() < 0x80000000) ? 1 : 0; + hFile = CreateFile(fname, GENERIC_WRITE, 0, NULL, OPEN_EXISTING, + (isWinNT ? FILE_FLAG_BACKUP_SEMANTICS : 0), + NULL); + if (hFile == INVALID_HANDLE_VALUE) + return -1; + result = SetFileTime(hFile, NULL, NULL, &modft) ? 0 : -1; + CloseHandle(hFile); + return result; +#else + struct utimbuf settime; + + settime.actime = settime.modtime = ftime; + return utime(fname,&settime); +#endif +} + + +/* push file attributes */ + +void push_attr(struct attr_item **list,char *fname,int mode,time_t time) +{ + struct attr_item *item; + + item = (struct attr_item *)malloc(sizeof(struct attr_item)); + if (item == NULL) + error("Out of memory"); + item->fname = strdup(fname); + item->mode = mode; + item->time = time; + item->next = *list; + *list = item; +} + + +/* restore file attributes */ + +void restore_attr(struct attr_item **list) +{ + struct attr_item *item, *prev; + + for (item = *list; item != NULL; ) + { + setfiletime(item->fname,item->time); + chmod(item->fname,item->mode); + prev = item; + item = item->next; + free(prev); + } + *list = NULL; +} + + +/* match regular expression */ + +#define ISSPECIAL(c) (((c) == '*') || ((c) == '/')) + +int ExprMatch (char *string,char *expr) +{ + while (1) + { + if (ISSPECIAL(*expr)) + { + if (*expr == '/') + { + if (*string != '\\' && *string != '/') + return 0; + string ++; expr++; + } + else if (*expr == '*') + { + if (*expr ++ == 0) + return 1; + while (*++string != *expr) + if (*string == 0) + return 0; + } + } + else + { + if (*string != *expr) + return 0; + if (*expr++ == 0) + return 1; + string++; + } + } +} + + +/* recursive mkdir */ +/* abort on ENOENT; ignore other errors like "directory already exists" */ +/* return 1 if OK */ +/* 0 on error */ + +int makedir (char *newdir) +{ + char *buffer = strdup(newdir); + char *p; + int len = strlen(buffer); + + if (len <= 0) { + free(buffer); + return 0; + } + if (buffer[len-1] == '/') { + buffer[len-1] = '\0'; + } + if (mkdir(buffer, 0755) == 0) + { + free(buffer); + return 1; + } + + p = buffer+1; + while (1) + { + char hold; + + while(*p && *p != '\\' && *p != '/') + p++; + hold = *p; + *p = 0; + if ((mkdir(buffer, 0755) == -1) && (errno == ENOENT)) + { + fprintf(stderr,"%s: Couldn't create directory %s\n",prog,buffer); + free(buffer); + return 0; + } + if (hold == 0) + break; + *p++ = hold; + } + free(buffer); + return 1; +} + + +int matchname (int arg,int argc,char **argv,char *fname) +{ + if (arg == argc) /* no arguments given (untgz tgzarchive) */ + return 1; + + while (arg < argc) + if (ExprMatch(fname,argv[arg++])) + return 1; + + return 0; /* ignore this for the moment being */ +} + + +/* tar file list or extract */ + +int tar (gzFile in,int action,int arg,int argc,char **argv) +{ + union tar_buffer buffer; + int len; + int err; + int getheader = 1; + int remaining = 0; + FILE *outfile = NULL; + char fname[BLOCKSIZE]; + int tarmode; + time_t tartime; + struct attr_item *attributes = NULL; + + if (action == TGZ_LIST) + printf(" date time size file\n" + " ---------- -------- --------- -------------------------------------\n"); + while (1) + { + len = gzread(in, &buffer, BLOCKSIZE); + if (len < 0) + error(gzerror(in, &err)); + /* + * Always expect complete blocks to process + * the tar information. + */ + if (len != BLOCKSIZE) + { + action = TGZ_INVALID; /* force error exit */ + remaining = 0; /* force I/O cleanup */ + } + + /* + * If we have to get a tar header + */ + if (getheader >= 1) + { + /* + * if we met the end of the tar + * or the end-of-tar block, + * we are done + */ + if (len == 0 || buffer.header.name[0] == 0) + break; + + tarmode = getoct(buffer.header.mode,8); + tartime = (time_t)getoct(buffer.header.mtime,12); + if (tarmode == -1 || tartime == (time_t)-1) + { + buffer.header.name[0] = 0; + action = TGZ_INVALID; + } + + if (getheader == 1) + { + strncpy(fname,buffer.header.name,SHORTNAMESIZE); + if (fname[SHORTNAMESIZE-1] != 0) + fname[SHORTNAMESIZE] = 0; + } + else + { + /* + * The file name is longer than SHORTNAMESIZE + */ + if (strncmp(fname,buffer.header.name,SHORTNAMESIZE-1) != 0) + error("bad long name"); + getheader = 1; + } + + /* + * Act according to the type flag + */ + switch (buffer.header.typeflag) + { + case DIRTYPE: + if (action == TGZ_LIST) + printf(" %s <dir> %s\n",strtime(&tartime),fname); + if (action == TGZ_EXTRACT) + { + makedir(fname); + push_attr(&attributes,fname,tarmode,tartime); + } + break; + case REGTYPE: + case AREGTYPE: + remaining = getoct(buffer.header.size,12); + if (remaining == -1) + { + action = TGZ_INVALID; + break; + } + if (action == TGZ_LIST) + printf(" %s %9d %s\n",strtime(&tartime),remaining,fname); + else if (action == TGZ_EXTRACT) + { + if (matchname(arg,argc,argv,fname)) + { + outfile = fopen(fname,"wb"); + if (outfile == NULL) { + /* try creating directory */ + char *p = strrchr(fname, '/'); + if (p != NULL) { + *p = '\0'; + makedir(fname); + *p = '/'; + outfile = fopen(fname,"wb"); + } + } + if (outfile != NULL) + printf("Extracting %s\n",fname); + else + fprintf(stderr, "%s: Couldn't create %s",prog,fname); + } + else + outfile = NULL; + } + getheader = 0; + break; + case GNUTYPE_LONGLINK: + case GNUTYPE_LONGNAME: + remaining = getoct(buffer.header.size,12); + if (remaining < 0 || remaining >= BLOCKSIZE) + { + action = TGZ_INVALID; + break; + } + len = gzread(in, fname, BLOCKSIZE); + if (len < 0) + error(gzerror(in, &err)); + if (fname[BLOCKSIZE-1] != 0 || (int)strlen(fname) > remaining) + { + action = TGZ_INVALID; + break; + } + getheader = 2; + break; + default: + if (action == TGZ_LIST) + printf(" %s <---> %s\n",strtime(&tartime),fname); + break; + } + } + else + { + unsigned int bytes = (remaining > BLOCKSIZE) ? BLOCKSIZE : remaining; + + if (outfile != NULL) + { + if (fwrite(&buffer,sizeof(char),bytes,outfile) != bytes) + { + fprintf(stderr, + "%s: Error writing %s -- skipping\n",prog,fname); + fclose(outfile); + outfile = NULL; + remove(fname); + } + } + remaining -= bytes; + } + + if (remaining == 0) + { + getheader = 1; + if (outfile != NULL) + { + fclose(outfile); + outfile = NULL; + if (action != TGZ_INVALID) + push_attr(&attributes,fname,tarmode,tartime); + } + } + + /* + * Abandon if errors are found + */ + if (action == TGZ_INVALID) + { + error("broken archive"); + break; + } + } + + /* + * Restore file modes and time stamps + */ + restore_attr(&attributes); + + if (gzclose(in) != Z_OK) + error("failed gzclose"); + + return 0; +} + + +/* ============================================================ */ + +void help(int exitval) +{ + printf("untgz version 0.2.1\n" + " using zlib version %s\n\n", + zlibVersion()); + printf("Usage: untgz file.tgz extract all files\n" + " untgz file.tgz fname ... extract selected files\n" + " untgz -l file.tgz list archive contents\n" + " untgz -h display this help\n"); + exit(exitval); +} + +void error(const char *msg) +{ + fprintf(stderr, "%s: %s\n", prog, msg); + exit(1); +} + + +/* ============================================================ */ + +#if defined(WIN32) && defined(__GNUC__) +int _CRT_glob = 0; /* disable argument globbing in MinGW */ +#endif + +int main(int argc,char **argv) +{ + int action = TGZ_EXTRACT; + int arg = 1; + char *TGZfile; + gzFile *f; + + prog = strrchr(argv[0],'\\'); + if (prog == NULL) + { + prog = strrchr(argv[0],'/'); + if (prog == NULL) + { + prog = strrchr(argv[0],':'); + if (prog == NULL) + prog = argv[0]; + else + prog++; + } + else + prog++; + } + else + prog++; + + if (argc == 1) + help(0); + + if (strcmp(argv[arg],"-l") == 0) + { + action = TGZ_LIST; + if (argc == ++arg) + help(0); + } + else if (strcmp(argv[arg],"-h") == 0) + { + help(0); + } + + if ((TGZfile = TGZfname(argv[arg])) == NULL) + TGZnotfound(argv[arg]); + + ++arg; + if ((action == TGZ_LIST) && (arg != argc)) + help(1); + +/* + * Process the TGZ file + */ + switch(action) + { + case TGZ_LIST: + case TGZ_EXTRACT: + f = gzopen(TGZfile,"rb"); + if (f == NULL) + { + fprintf(stderr,"%s: Couldn't gzopen %s\n",prog,TGZfile); + return 1; + } + exit(tar(f, action, arg, argc, argv)); + break; + + default: + error("Unknown option"); + exit(1); + } + + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/contrib/vstudio/readme.txt nodejs-0.11.15/deps/zlib/contrib/vstudio/readme.txt --- nodejs-0.11.13/deps/zlib/contrib/vstudio/readme.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/vstudio/readme.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,65 @@ +Building instructions for the DLL versions of Zlib 1.2.8 +======================================================== + +This directory contains projects that build zlib and minizip using +Microsoft Visual C++ 9.0/10.0. + +You don't need to build these projects yourself. You can download the +binaries from: + http://www.winimage.com/zLibDll + +More information can be found at this site. + + + + + +Build instructions for Visual Studio 2008 (32 bits or 64 bits) +-------------------------------------------------------------- +- Uncompress current zlib, including all contrib/* files +- Compile assembly code (with Visual Studio Command Prompt) by running: + bld_ml64.bat (in contrib\masmx64) + bld_ml32.bat (in contrib\masmx86) +- Open contrib\vstudio\vc9\zlibvc.sln with Microsoft Visual C++ 2008 +- Or run: vcbuild /rebuild contrib\vstudio\vc9\zlibvc.sln "Release|Win32" + +Build instructions for Visual Studio 2010 (32 bits or 64 bits) +-------------------------------------------------------------- +- Uncompress current zlib, including all contrib/* files +- Open contrib\vstudio\vc10\zlibvc.sln with Microsoft Visual C++ 2010 + +Build instructions for Visual Studio 2012 (32 bits or 64 bits) +-------------------------------------------------------------- +- Uncompress current zlib, including all contrib/* files +- Open contrib\vstudio\vc11\zlibvc.sln with Microsoft Visual C++ 2012 + + +Important +--------- +- To use zlibwapi.dll in your application, you must define the + macro ZLIB_WINAPI when compiling your application's source files. + + +Additional notes +---------------- +- This DLL, named zlibwapi.dll, is compatible to the old zlib.dll built + by Gilles Vollant from the zlib 1.1.x sources, and distributed at + http://www.winimage.com/zLibDll + It uses the WINAPI calling convention for the exported functions, and + includes the minizip functionality. If your application needs that + particular build of zlib.dll, you can rename zlibwapi.dll to zlib.dll. + +- The new DLL was renamed because there exist several incompatible + versions of zlib.dll on the Internet. + +- There is also an official DLL build of zlib, named zlib1.dll. This one + is exporting the functions using the CDECL convention. See the file + win32\DLL_FAQ.txt found in this zlib distribution. + +- There used to be a ZLIB_DLL macro in zlib 1.1.x, but now this symbol + has a slightly different effect. To avoid compatibility problems, do + not define it here. + + +Gilles Vollant +info@winimage.com diff -Nru nodejs-0.11.13/deps/zlib/contrib/vstudio/vc10/zlib.rc nodejs-0.11.15/deps/zlib/contrib/vstudio/vc10/zlib.rc --- nodejs-0.11.13/deps/zlib/contrib/vstudio/vc10/zlib.rc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/vstudio/vc10/zlib.rc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,32 @@ +#include <windows.h> + +#define IDR_VERSION1 1 +IDR_VERSION1 VERSIONINFO MOVEABLE IMPURE LOADONCALL DISCARDABLE + FILEVERSION 1,2,8,0 + PRODUCTVERSION 1,2,8,0 + FILEFLAGSMASK VS_FFI_FILEFLAGSMASK + FILEFLAGS 0 + FILEOS VOS_DOS_WINDOWS32 + FILETYPE VFT_DLL + FILESUBTYPE 0 // not used +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904E4" + //language ID = U.S. English, char set = Windows, Multilingual + + BEGIN + VALUE "FileDescription", "zlib data compression and ZIP file I/O library\0" + VALUE "FileVersion", "1.2.8\0" + VALUE "InternalName", "zlib\0" + VALUE "OriginalFilename", "zlibwapi.dll\0" + VALUE "ProductName", "ZLib.DLL\0" + VALUE "Comments","DLL support by Alessandro Iacopetti & Gilles Vollant\0" + VALUE "LegalCopyright", "(C) 1995-2013 Jean-loup Gailly & Mark Adler\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0409, 1252 + END +END diff -Nru nodejs-0.11.13/deps/zlib/contrib/vstudio/vc10/zlibvc.def nodejs-0.11.15/deps/zlib/contrib/vstudio/vc10/zlibvc.def --- nodejs-0.11.13/deps/zlib/contrib/vstudio/vc10/zlibvc.def 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/vstudio/vc10/zlibvc.def 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,143 @@ +LIBRARY +; zlib data compression and ZIP file I/O library + +VERSION 1.2.8 + +EXPORTS + adler32 @1 + compress @2 + crc32 @3 + deflate @4 + deflateCopy @5 + deflateEnd @6 + deflateInit2_ @7 + deflateInit_ @8 + deflateParams @9 + deflateReset @10 + deflateSetDictionary @11 + gzclose @12 + gzdopen @13 + gzerror @14 + gzflush @15 + gzopen @16 + gzread @17 + gzwrite @18 + inflate @19 + inflateEnd @20 + inflateInit2_ @21 + inflateInit_ @22 + inflateReset @23 + inflateSetDictionary @24 + inflateSync @25 + uncompress @26 + zlibVersion @27 + gzprintf @28 + gzputc @29 + gzgetc @30 + gzseek @31 + gzrewind @32 + gztell @33 + gzeof @34 + gzsetparams @35 + zError @36 + inflateSyncPoint @37 + get_crc_table @38 + compress2 @39 + gzputs @40 + gzgets @41 + inflateCopy @42 + inflateBackInit_ @43 + inflateBack @44 + inflateBackEnd @45 + compressBound @46 + deflateBound @47 + gzclearerr @48 + gzungetc @49 + zlibCompileFlags @50 + deflatePrime @51 + deflatePending @52 + + unzOpen @61 + unzClose @62 + unzGetGlobalInfo @63 + unzGetCurrentFileInfo @64 + unzGoToFirstFile @65 + unzGoToNextFile @66 + unzOpenCurrentFile @67 + unzReadCurrentFile @68 + unzOpenCurrentFile3 @69 + unztell @70 + unzeof @71 + unzCloseCurrentFile @72 + unzGetGlobalComment @73 + unzStringFileNameCompare @74 + unzLocateFile @75 + unzGetLocalExtrafield @76 + unzOpen2 @77 + unzOpenCurrentFile2 @78 + unzOpenCurrentFilePassword @79 + + zipOpen @80 + zipOpenNewFileInZip @81 + zipWriteInFileInZip @82 + zipCloseFileInZip @83 + zipClose @84 + zipOpenNewFileInZip2 @86 + zipCloseFileInZipRaw @87 + zipOpen2 @88 + zipOpenNewFileInZip3 @89 + + unzGetFilePos @100 + unzGoToFilePos @101 + + fill_win32_filefunc @110 + +; zlibwapi v1.2.4 added: + fill_win32_filefunc64 @111 + fill_win32_filefunc64A @112 + fill_win32_filefunc64W @113 + + unzOpen64 @120 + unzOpen2_64 @121 + unzGetGlobalInfo64 @122 + unzGetCurrentFileInfo64 @124 + unzGetCurrentFileZStreamPos64 @125 + unztell64 @126 + unzGetFilePos64 @127 + unzGoToFilePos64 @128 + + zipOpen64 @130 + zipOpen2_64 @131 + zipOpenNewFileInZip64 @132 + zipOpenNewFileInZip2_64 @133 + zipOpenNewFileInZip3_64 @134 + zipOpenNewFileInZip4_64 @135 + zipCloseFileInZipRaw64 @136 + +; zlib1 v1.2.4 added: + adler32_combine @140 + crc32_combine @142 + deflateSetHeader @144 + deflateTune @145 + gzbuffer @146 + gzclose_r @147 + gzclose_w @148 + gzdirect @149 + gzoffset @150 + inflateGetHeader @156 + inflateMark @157 + inflatePrime @158 + inflateReset2 @159 + inflateUndermine @160 + +; zlib1 v1.2.6 added: + gzgetc_ @161 + inflateResetKeep @163 + deflateResetKeep @164 + +; zlib1 v1.2.7 added: + gzopen_w @165 + +; zlib1 v1.2.8 added: + inflateGetDictionary @166 + gzvprintf @167 diff -Nru nodejs-0.11.13/deps/zlib/contrib/vstudio/vc11/zlib.rc nodejs-0.11.15/deps/zlib/contrib/vstudio/vc11/zlib.rc --- nodejs-0.11.13/deps/zlib/contrib/vstudio/vc11/zlib.rc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/vstudio/vc11/zlib.rc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,32 @@ +#include <windows.h> + +#define IDR_VERSION1 1 +IDR_VERSION1 VERSIONINFO MOVEABLE IMPURE LOADONCALL DISCARDABLE + FILEVERSION 1,2,8,0 + PRODUCTVERSION 1,2,8,0 + FILEFLAGSMASK VS_FFI_FILEFLAGSMASK + FILEFLAGS 0 + FILEOS VOS_DOS_WINDOWS32 + FILETYPE VFT_DLL + FILESUBTYPE 0 // not used +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904E4" + //language ID = U.S. English, char set = Windows, Multilingual + + BEGIN + VALUE "FileDescription", "zlib data compression and ZIP file I/O library\0" + VALUE "FileVersion", "1.2.8\0" + VALUE "InternalName", "zlib\0" + VALUE "OriginalFilename", "zlibwapi.dll\0" + VALUE "ProductName", "ZLib.DLL\0" + VALUE "Comments","DLL support by Alessandro Iacopetti & Gilles Vollant\0" + VALUE "LegalCopyright", "(C) 1995-2013 Jean-loup Gailly & Mark Adler\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0409, 1252 + END +END diff -Nru nodejs-0.11.13/deps/zlib/contrib/vstudio/vc11/zlibvc.def nodejs-0.11.15/deps/zlib/contrib/vstudio/vc11/zlibvc.def --- nodejs-0.11.13/deps/zlib/contrib/vstudio/vc11/zlibvc.def 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/vstudio/vc11/zlibvc.def 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,143 @@ +LIBRARY +; zlib data compression and ZIP file I/O library + +VERSION 1.2.8 + +EXPORTS + adler32 @1 + compress @2 + crc32 @3 + deflate @4 + deflateCopy @5 + deflateEnd @6 + deflateInit2_ @7 + deflateInit_ @8 + deflateParams @9 + deflateReset @10 + deflateSetDictionary @11 + gzclose @12 + gzdopen @13 + gzerror @14 + gzflush @15 + gzopen @16 + gzread @17 + gzwrite @18 + inflate @19 + inflateEnd @20 + inflateInit2_ @21 + inflateInit_ @22 + inflateReset @23 + inflateSetDictionary @24 + inflateSync @25 + uncompress @26 + zlibVersion @27 + gzprintf @28 + gzputc @29 + gzgetc @30 + gzseek @31 + gzrewind @32 + gztell @33 + gzeof @34 + gzsetparams @35 + zError @36 + inflateSyncPoint @37 + get_crc_table @38 + compress2 @39 + gzputs @40 + gzgets @41 + inflateCopy @42 + inflateBackInit_ @43 + inflateBack @44 + inflateBackEnd @45 + compressBound @46 + deflateBound @47 + gzclearerr @48 + gzungetc @49 + zlibCompileFlags @50 + deflatePrime @51 + deflatePending @52 + + unzOpen @61 + unzClose @62 + unzGetGlobalInfo @63 + unzGetCurrentFileInfo @64 + unzGoToFirstFile @65 + unzGoToNextFile @66 + unzOpenCurrentFile @67 + unzReadCurrentFile @68 + unzOpenCurrentFile3 @69 + unztell @70 + unzeof @71 + unzCloseCurrentFile @72 + unzGetGlobalComment @73 + unzStringFileNameCompare @74 + unzLocateFile @75 + unzGetLocalExtrafield @76 + unzOpen2 @77 + unzOpenCurrentFile2 @78 + unzOpenCurrentFilePassword @79 + + zipOpen @80 + zipOpenNewFileInZip @81 + zipWriteInFileInZip @82 + zipCloseFileInZip @83 + zipClose @84 + zipOpenNewFileInZip2 @86 + zipCloseFileInZipRaw @87 + zipOpen2 @88 + zipOpenNewFileInZip3 @89 + + unzGetFilePos @100 + unzGoToFilePos @101 + + fill_win32_filefunc @110 + +; zlibwapi v1.2.4 added: + fill_win32_filefunc64 @111 + fill_win32_filefunc64A @112 + fill_win32_filefunc64W @113 + + unzOpen64 @120 + unzOpen2_64 @121 + unzGetGlobalInfo64 @122 + unzGetCurrentFileInfo64 @124 + unzGetCurrentFileZStreamPos64 @125 + unztell64 @126 + unzGetFilePos64 @127 + unzGoToFilePos64 @128 + + zipOpen64 @130 + zipOpen2_64 @131 + zipOpenNewFileInZip64 @132 + zipOpenNewFileInZip2_64 @133 + zipOpenNewFileInZip3_64 @134 + zipOpenNewFileInZip4_64 @135 + zipCloseFileInZipRaw64 @136 + +; zlib1 v1.2.4 added: + adler32_combine @140 + crc32_combine @142 + deflateSetHeader @144 + deflateTune @145 + gzbuffer @146 + gzclose_r @147 + gzclose_w @148 + gzdirect @149 + gzoffset @150 + inflateGetHeader @156 + inflateMark @157 + inflatePrime @158 + inflateReset2 @159 + inflateUndermine @160 + +; zlib1 v1.2.6 added: + gzgetc_ @161 + inflateResetKeep @163 + deflateResetKeep @164 + +; zlib1 v1.2.7 added: + gzopen_w @165 + +; zlib1 v1.2.8 added: + inflateGetDictionary @166 + gzvprintf @167 diff -Nru nodejs-0.11.13/deps/zlib/contrib/vstudio/vc9/zlib.rc nodejs-0.11.15/deps/zlib/contrib/vstudio/vc9/zlib.rc --- nodejs-0.11.13/deps/zlib/contrib/vstudio/vc9/zlib.rc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/vstudio/vc9/zlib.rc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,32 @@ +#include <windows.h> + +#define IDR_VERSION1 1 +IDR_VERSION1 VERSIONINFO MOVEABLE IMPURE LOADONCALL DISCARDABLE + FILEVERSION 1,2,8,0 + PRODUCTVERSION 1,2,8,0 + FILEFLAGSMASK VS_FFI_FILEFLAGSMASK + FILEFLAGS 0 + FILEOS VOS_DOS_WINDOWS32 + FILETYPE VFT_DLL + FILESUBTYPE 0 // not used +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904E4" + //language ID = U.S. English, char set = Windows, Multilingual + + BEGIN + VALUE "FileDescription", "zlib data compression and ZIP file I/O library\0" + VALUE "FileVersion", "1.2.8\0" + VALUE "InternalName", "zlib\0" + VALUE "OriginalFilename", "zlibwapi.dll\0" + VALUE "ProductName", "ZLib.DLL\0" + VALUE "Comments","DLL support by Alessandro Iacopetti & Gilles Vollant\0" + VALUE "LegalCopyright", "(C) 1995-2013 Jean-loup Gailly & Mark Adler\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0409, 1252 + END +END diff -Nru nodejs-0.11.13/deps/zlib/contrib/vstudio/vc9/zlibvc.def nodejs-0.11.15/deps/zlib/contrib/vstudio/vc9/zlibvc.def --- nodejs-0.11.13/deps/zlib/contrib/vstudio/vc9/zlibvc.def 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/contrib/vstudio/vc9/zlibvc.def 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,143 @@ +LIBRARY +; zlib data compression and ZIP file I/O library + +VERSION 1.2.8 + +EXPORTS + adler32 @1 + compress @2 + crc32 @3 + deflate @4 + deflateCopy @5 + deflateEnd @6 + deflateInit2_ @7 + deflateInit_ @8 + deflateParams @9 + deflateReset @10 + deflateSetDictionary @11 + gzclose @12 + gzdopen @13 + gzerror @14 + gzflush @15 + gzopen @16 + gzread @17 + gzwrite @18 + inflate @19 + inflateEnd @20 + inflateInit2_ @21 + inflateInit_ @22 + inflateReset @23 + inflateSetDictionary @24 + inflateSync @25 + uncompress @26 + zlibVersion @27 + gzprintf @28 + gzputc @29 + gzgetc @30 + gzseek @31 + gzrewind @32 + gztell @33 + gzeof @34 + gzsetparams @35 + zError @36 + inflateSyncPoint @37 + get_crc_table @38 + compress2 @39 + gzputs @40 + gzgets @41 + inflateCopy @42 + inflateBackInit_ @43 + inflateBack @44 + inflateBackEnd @45 + compressBound @46 + deflateBound @47 + gzclearerr @48 + gzungetc @49 + zlibCompileFlags @50 + deflatePrime @51 + deflatePending @52 + + unzOpen @61 + unzClose @62 + unzGetGlobalInfo @63 + unzGetCurrentFileInfo @64 + unzGoToFirstFile @65 + unzGoToNextFile @66 + unzOpenCurrentFile @67 + unzReadCurrentFile @68 + unzOpenCurrentFile3 @69 + unztell @70 + unzeof @71 + unzCloseCurrentFile @72 + unzGetGlobalComment @73 + unzStringFileNameCompare @74 + unzLocateFile @75 + unzGetLocalExtrafield @76 + unzOpen2 @77 + unzOpenCurrentFile2 @78 + unzOpenCurrentFilePassword @79 + + zipOpen @80 + zipOpenNewFileInZip @81 + zipWriteInFileInZip @82 + zipCloseFileInZip @83 + zipClose @84 + zipOpenNewFileInZip2 @86 + zipCloseFileInZipRaw @87 + zipOpen2 @88 + zipOpenNewFileInZip3 @89 + + unzGetFilePos @100 + unzGoToFilePos @101 + + fill_win32_filefunc @110 + +; zlibwapi v1.2.4 added: + fill_win32_filefunc64 @111 + fill_win32_filefunc64A @112 + fill_win32_filefunc64W @113 + + unzOpen64 @120 + unzOpen2_64 @121 + unzGetGlobalInfo64 @122 + unzGetCurrentFileInfo64 @124 + unzGetCurrentFileZStreamPos64 @125 + unztell64 @126 + unzGetFilePos64 @127 + unzGoToFilePos64 @128 + + zipOpen64 @130 + zipOpen2_64 @131 + zipOpenNewFileInZip64 @132 + zipOpenNewFileInZip2_64 @133 + zipOpenNewFileInZip3_64 @134 + zipOpenNewFileInZip4_64 @135 + zipCloseFileInZipRaw64 @136 + +; zlib1 v1.2.4 added: + adler32_combine @140 + crc32_combine @142 + deflateSetHeader @144 + deflateTune @145 + gzbuffer @146 + gzclose_r @147 + gzclose_w @148 + gzdirect @149 + gzoffset @150 + inflateGetHeader @156 + inflateMark @157 + inflatePrime @158 + inflateReset2 @159 + inflateUndermine @160 + +; zlib1 v1.2.6 added: + gzgetc_ @161 + inflateResetKeep @163 + deflateResetKeep @164 + +; zlib1 v1.2.7 added: + gzopen_w @165 + +; zlib1 v1.2.8 added: + inflateGetDictionary @166 + gzvprintf @167 diff -Nru nodejs-0.11.13/deps/zlib/crc32.c nodejs-0.11.15/deps/zlib/crc32.c --- nodejs-0.11.13/deps/zlib/crc32.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/crc32.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* crc32.c -- compute the CRC-32 of a data stream - * Copyright (C) 1995-2005 Mark Adler + * Copyright (C) 1995-2006, 2010, 2011, 2012 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h * * Thanks to Rodney Brown <rbrown64@csc.com.au> for his contribution of faster @@ -9,7 +9,7 @@ * factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3. */ -/* @(#) $Id: crc32.c,v 3.6 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ +/* @(#) $Id$ */ /* Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore @@ -17,6 +17,8 @@ of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should first call get_crc_table() to initialize the tables before allowing more than one thread to use crc32(). + + DYNAMIC_CRC_TABLE and MAKECRCH can be #defined to write out crc32.h. */ #ifdef MAKECRCH @@ -30,31 +32,11 @@ #define local static -/* Find a four-byte integer type for crc32_little() and crc32_big(). */ -#ifndef NOBYFOUR -# ifdef STDC /* need ANSI C limits.h to determine sizes */ -# include <limits.h> -# define BYFOUR -# if (UINT_MAX == 0xffffffffUL) - typedef unsigned int u4; -# else -# if (ULONG_MAX == 0xffffffffUL) - typedef unsigned long u4; -# else -# if (USHRT_MAX == 0xffffffffUL) - typedef unsigned short u4; -# else -# undef BYFOUR /* can't find a four-byte integer type! */ -# endif -# endif -# endif -# endif /* STDC */ -#endif /* !NOBYFOUR */ - /* Definitions for doing the crc four data bytes at a time. */ +#if !defined(NOBYFOUR) && defined(Z_U4) +# define BYFOUR +#endif #ifdef BYFOUR -# define REV(w) (((w)>>24)+(((w)>>8)&0xff00)+ \ - (((w)&0xff00)<<8)+(((w)&0xff)<<24)) local unsigned long crc32_little OF((unsigned long, const unsigned char FAR *, unsigned)); local unsigned long crc32_big OF((unsigned long, @@ -68,14 +50,16 @@ local unsigned long gf2_matrix_times OF((unsigned long *mat, unsigned long vec)); local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat)); +local uLong crc32_combine_ OF((uLong crc1, uLong crc2, z_off64_t len2)); + #ifdef DYNAMIC_CRC_TABLE local volatile int crc_table_empty = 1; -local unsigned long FAR crc_table[TBLS][256]; +local z_crc_t FAR crc_table[TBLS][256]; local void make_crc_table OF((void)); #ifdef MAKECRCH - local void write_table OF((FILE *, const unsigned long FAR *)); + local void write_table OF((FILE *, const z_crc_t FAR *)); #endif /* MAKECRCH */ /* Generate tables for a byte-wise 32-bit CRC calculation on the polynomial: @@ -105,9 +89,9 @@ */ local void make_crc_table() { - unsigned long c; + z_crc_t c; int n, k; - unsigned long poly; /* polynomial exclusive-or pattern */ + z_crc_t poly; /* polynomial exclusive-or pattern */ /* terms of polynomial defining this crc (except x^32): */ static volatile int first = 1; /* flag to limit concurrent making */ static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; @@ -119,13 +103,13 @@ first = 0; /* make exclusive-or pattern from polynomial (0xedb88320UL) */ - poly = 0UL; - for (n = 0; n < sizeof(p)/sizeof(unsigned char); n++) - poly |= 1UL << (31 - p[n]); + poly = 0; + for (n = 0; n < (int)(sizeof(p)/sizeof(unsigned char)); n++) + poly |= (z_crc_t)1 << (31 - p[n]); /* generate a crc for every 8-bit value */ for (n = 0; n < 256; n++) { - c = (unsigned long)n; + c = (z_crc_t)n; for (k = 0; k < 8; k++) c = c & 1 ? poly ^ (c >> 1) : c >> 1; crc_table[0][n] = c; @@ -136,11 +120,11 @@ and then the byte reversal of those as well as the first table */ for (n = 0; n < 256; n++) { c = crc_table[0][n]; - crc_table[4][n] = REV(c); + crc_table[4][n] = ZSWAP32(c); for (k = 1; k < 4; k++) { c = crc_table[0][c & 0xff] ^ (c >> 8); crc_table[k][n] = c; - crc_table[k + 4][n] = REV(c); + crc_table[k + 4][n] = ZSWAP32(c); } } #endif /* BYFOUR */ @@ -162,7 +146,7 @@ if (out == NULL) return; fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n"); fprintf(out, " * Generated automatically by crc32.c\n */\n\n"); - fprintf(out, "local const unsigned long FAR "); + fprintf(out, "local const z_crc_t FAR "); fprintf(out, "crc_table[TBLS][256] =\n{\n {\n"); write_table(out, crc_table[0]); # ifdef BYFOUR @@ -182,12 +166,13 @@ #ifdef MAKECRCH local void write_table(out, table) FILE *out; - const unsigned long FAR *table; + const z_crc_t FAR *table; { int n; for (n = 0; n < 256; n++) - fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ", table[n], + fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ", + (unsigned long)(table[n]), n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", ")); } #endif /* MAKECRCH */ @@ -202,13 +187,13 @@ /* ========================================================================= * This function can be used by asm versions of crc32() */ -const unsigned long FAR * ZEXPORT get_crc_table() +const z_crc_t FAR * ZEXPORT get_crc_table() { #ifdef DYNAMIC_CRC_TABLE if (crc_table_empty) make_crc_table(); #endif /* DYNAMIC_CRC_TABLE */ - return (const unsigned long FAR *)crc_table; + return (const z_crc_t FAR *)crc_table; } /* ========================================================================= */ @@ -219,7 +204,7 @@ unsigned long ZEXPORT crc32(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; - unsigned len; + uInt len; { if (buf == Z_NULL) return 0UL; @@ -230,7 +215,7 @@ #ifdef BYFOUR if (sizeof(void *) == sizeof(ptrdiff_t)) { - u4 endian; + z_crc_t endian; endian = 1; if (*((unsigned char *)(&endian))) @@ -264,17 +249,17 @@ const unsigned char FAR *buf; unsigned len; { - register u4 c; - register const u4 FAR *buf4; + register z_crc_t c; + register const z_crc_t FAR *buf4; - c = (u4)crc; + c = (z_crc_t)crc; c = ~c; while (len && ((ptrdiff_t)buf & 3)) { c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); len--; } - buf4 = (const u4 FAR *)(const void FAR *)buf; + buf4 = (const z_crc_t FAR *)(const void FAR *)buf; while (len >= 32) { DOLIT32; len -= 32; @@ -304,17 +289,17 @@ const unsigned char FAR *buf; unsigned len; { - register u4 c; - register const u4 FAR *buf4; + register z_crc_t c; + register const z_crc_t FAR *buf4; - c = REV((u4)crc); + c = ZSWAP32((z_crc_t)crc); c = ~c; while (len && ((ptrdiff_t)buf & 3)) { c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); len--; } - buf4 = (const u4 FAR *)(const void FAR *)buf; + buf4 = (const z_crc_t FAR *)(const void FAR *)buf; buf4--; while (len >= 32) { DOBIG32; @@ -331,7 +316,7 @@ c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); } while (--len); c = ~c; - return (unsigned long)(REV(c)); + return (unsigned long)(ZSWAP32(c)); } #endif /* BYFOUR */ @@ -367,22 +352,22 @@ } /* ========================================================================= */ -uLong ZEXPORT crc32_combine(crc1, crc2, len2) +local uLong crc32_combine_(crc1, crc2, len2) uLong crc1; uLong crc2; - z_off_t len2; + z_off64_t len2; { int n; unsigned long row; unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */ unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */ - /* degenerate case */ - if (len2 == 0) + /* degenerate case (also disallow negative lengths) */ + if (len2 <= 0) return crc1; /* put operator for one zero bit in odd */ - odd[0] = 0xedb88320L; /* CRC-32 polynomial */ + odd[0] = 0xedb88320UL; /* CRC-32 polynomial */ row = 1; for (n = 1; n < GF2_DIM; n++) { odd[n] = row; @@ -421,3 +406,20 @@ crc1 ^= crc2; return crc1; } + +/* ========================================================================= */ +uLong ZEXPORT crc32_combine(crc1, crc2, len2) + uLong crc1; + uLong crc2; + z_off_t len2; +{ + return crc32_combine_(crc1, crc2, len2); +} + +uLong ZEXPORT crc32_combine64(crc1, crc2, len2) + uLong crc1; + uLong crc2; + z_off64_t len2; +{ + return crc32_combine_(crc1, crc2, len2); +} diff -Nru nodejs-0.11.13/deps/zlib/crc32.h nodejs-0.11.15/deps/zlib/crc32.h --- nodejs-0.11.13/deps/zlib/crc32.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/crc32.h 2015-01-20 21:22:17.000000000 +0000 @@ -2,7 +2,7 @@ * Generated automatically by crc32.c */ -local const unsigned long FAR crc_table[TBLS][256] = +local const z_crc_t FAR crc_table[TBLS][256] = { { 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, diff -Nru nodejs-0.11.13/deps/zlib/deflate.c nodejs-0.11.15/deps/zlib/deflate.c --- nodejs-0.11.13/deps/zlib/deflate.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/deflate.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* deflate.c -- compress data using the deflation algorithm - * Copyright (C) 1995-2005 Jean-loup Gailly. + * Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -37,7 +37,7 @@ * REFERENCES * * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". - * Available in http://www.ietf.org/rfc/rfc1951.txt + * Available in http://tools.ietf.org/html/rfc1951 * * A description of the Rabin and Karp algorithm is given in the book * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. @@ -47,12 +47,12 @@ * */ -/* @(#) $Id: deflate.c,v 3.6 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ +/* @(#) $Id$ */ #include "deflate.h" const char deflate_copyright[] = - " deflate 1.2.3 Copyright 1995-2005 Jean-loup Gailly "; + " deflate 1.2.8 Copyright 1995-2013 Jean-loup Gailly and Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot @@ -79,19 +79,18 @@ #ifndef FASTEST local block_state deflate_slow OF((deflate_state *s, int flush)); #endif +local block_state deflate_rle OF((deflate_state *s, int flush)); +local block_state deflate_huff OF((deflate_state *s, int flush)); local void lm_init OF((deflate_state *s)); local void putShortMSB OF((deflate_state *s, uInt b)); local void flush_pending OF((z_streamp strm)); local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); -#ifndef FASTEST #ifdef ASMV void match_init OF((void)); /* asm code initialization */ uInt longest_match OF((deflate_state *s, IPos cur_match)); #else local uInt longest_match OF((deflate_state *s, IPos cur_match)); #endif -#endif -local uInt longest_match_fast OF((deflate_state *s, IPos cur_match)); #ifdef DEBUG local void check_match OF((deflate_state *s, IPos start, IPos match, @@ -110,11 +109,6 @@ #endif /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ -#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) -/* Minimum amount of lookahead, except at the end of the input file. - * See deflate.c for comments about the MIN_MATCH+1. - */ - /* Values for max_lazy_match, good_match and max_chain_length, depending on * the desired pack level (0..9). The values given below have been tuned to * exclude worst case performance for pathological files. Better values may be @@ -161,6 +155,9 @@ struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ #endif +/* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */ +#define RANK(f) (((f) << 1) - ((f) > 4 ? 9 : 0)) + /* =========================================================================== * Update a hash value with the given input byte * IN assertion: all calls to to UPDATE_HASH are made with consecutive @@ -241,10 +238,19 @@ strm->msg = Z_NULL; if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; +#endif } - if (strm->zfree == (free_func)0) strm->zfree = zcfree; + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif #ifdef FASTEST if (level != 0) level = 1; @@ -288,6 +294,8 @@ s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); + s->high_water = 0; /* nothing written to s->window yet */ + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); @@ -297,7 +305,7 @@ if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || s->pending_buf == Z_NULL) { s->status = FINISH_STATE; - strm->msg = (char*)ERR_MSG(Z_MEM_ERROR); + strm->msg = ERR_MSG(Z_MEM_ERROR); deflateEnd (strm); return Z_MEM_ERROR; } @@ -318,43 +326,70 @@ uInt dictLength; { deflate_state *s; - uInt length = dictLength; - uInt n; - IPos hash_head = 0; - - if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL || - strm->state->wrap == 2 || - (strm->state->wrap == 1 && strm->state->status != INIT_STATE)) - return Z_STREAM_ERROR; + uInt str, n; + int wrap; + unsigned avail; + z_const unsigned char *next; + if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL) + return Z_STREAM_ERROR; s = strm->state; - if (s->wrap) + wrap = s->wrap; + if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead) + return Z_STREAM_ERROR; + + /* when using zlib wrappers, compute Adler-32 for provided dictionary */ + if (wrap == 1) strm->adler = adler32(strm->adler, dictionary, dictLength); + s->wrap = 0; /* avoid computing Adler-32 in read_buf */ - if (length < MIN_MATCH) return Z_OK; - if (length > MAX_DIST(s)) { - length = MAX_DIST(s); - dictionary += dictLength - length; /* use the tail of the dictionary */ - } - zmemcpy(s->window, dictionary, length); - s->strstart = length; - s->block_start = (long)length; - - /* Insert all strings in the hash table (except for the last two bytes). - * s->lookahead stays null, so s->ins_h will be recomputed at the next - * call of fill_window. - */ - s->ins_h = s->window[0]; - UPDATE_HASH(s, s->ins_h, s->window[1]); - for (n = 0; n <= length - MIN_MATCH; n++) { - INSERT_STRING(s, n, hash_head); - } - if (hash_head) hash_head = 0; /* to make compiler happy */ + /* if dictionary would fill window, just replace the history */ + if (dictLength >= s->w_size) { + if (wrap == 0) { /* already empty otherwise */ + CLEAR_HASH(s); + s->strstart = 0; + s->block_start = 0L; + s->insert = 0; + } + dictionary += dictLength - s->w_size; /* use the tail */ + dictLength = s->w_size; + } + + /* insert dictionary into window and hash */ + avail = strm->avail_in; + next = strm->next_in; + strm->avail_in = dictLength; + strm->next_in = (z_const Bytef *)dictionary; + fill_window(s); + while (s->lookahead >= MIN_MATCH) { + str = s->strstart; + n = s->lookahead - (MIN_MATCH-1); + do { + UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); +#ifndef FASTEST + s->prev[str & s->w_mask] = s->head[s->ins_h]; +#endif + s->head[s->ins_h] = (Pos)str; + str++; + } while (--n); + s->strstart = str; + s->lookahead = MIN_MATCH-1; + fill_window(s); + } + s->strstart += s->lookahead; + s->block_start = (long)s->strstart; + s->insert = s->lookahead; + s->lookahead = 0; + s->match_length = s->prev_length = MIN_MATCH-1; + s->match_available = 0; + strm->next_in = next; + strm->avail_in = avail; + s->wrap = wrap; return Z_OK; } /* ========================================================================= */ -int ZEXPORT deflateReset (strm) +int ZEXPORT deflateResetKeep (strm) z_streamp strm; { deflate_state *s; @@ -384,12 +419,23 @@ s->last_flush = Z_NO_FLUSH; _tr_init(s); - lm_init(s); return Z_OK; } /* ========================================================================= */ +int ZEXPORT deflateReset (strm) + z_streamp strm; +{ + int ret; + + ret = deflateResetKeep(strm); + if (ret == Z_OK) + lm_init(strm->state); + return ret; +} + +/* ========================================================================= */ int ZEXPORT deflateSetHeader (strm, head) z_streamp strm; gz_headerp head; @@ -401,14 +447,42 @@ } /* ========================================================================= */ +int ZEXPORT deflatePending (strm, pending, bits) + unsigned *pending; + int *bits; + z_streamp strm; +{ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + if (pending != Z_NULL) + *pending = strm->state->pending; + if (bits != Z_NULL) + *bits = strm->state->bi_valid; + return Z_OK; +} + +/* ========================================================================= */ int ZEXPORT deflatePrime (strm, bits, value) z_streamp strm; int bits; int value; { + deflate_state *s; + int put; + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - strm->state->bi_valid = bits; - strm->state->bi_buf = (ush)(value & ((1 << bits) - 1)); + s = strm->state; + if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) + return Z_BUF_ERROR; + do { + put = Buf_size - s->bi_valid; + if (put > bits) + put = bits; + s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid); + s->bi_valid += put; + _tr_flush_bits(s); + value >>= put; + bits -= put; + } while (bits); return Z_OK; } @@ -435,9 +509,12 @@ } func = configuration_table[s->level].func; - if (func != configuration_table[level].func && strm->total_in != 0) { + if ((strategy != s->strategy || func != configuration_table[level].func) && + strm->total_in != 0) { /* Flush the last buffer: */ - err = deflate(strm, Z_PARTIAL_FLUSH); + err = deflate(strm, Z_BLOCK); + if (err == Z_BUF_ERROR && s->pending == 0) + err = Z_OK; } if (s->level != level) { s->level = level; @@ -481,33 +558,66 @@ * resulting from using fixed blocks instead of stored blocks, which deflate * can emit on compressed data for some combinations of the parameters. * - * This function could be more sophisticated to provide closer upper bounds - * for every combination of windowBits and memLevel, as well as wrap. - * But even the conservative upper bound of about 14% expansion does not - * seem onerous for output buffer allocation. + * This function could be more sophisticated to provide closer upper bounds for + * every combination of windowBits and memLevel. But even the conservative + * upper bound of about 14% expansion does not seem onerous for output buffer + * allocation. */ uLong ZEXPORT deflateBound(strm, sourceLen) z_streamp strm; uLong sourceLen; { deflate_state *s; - uLong destLen; + uLong complen, wraplen; + Bytef *str; - /* conservative upper bound */ - destLen = sourceLen + - ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 11; + /* conservative upper bound for compressed data */ + complen = sourceLen + + ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; - /* if can't get parameters, return conservative bound */ + /* if can't get parameters, return conservative bound plus zlib wrapper */ if (strm == Z_NULL || strm->state == Z_NULL) - return destLen; + return complen + 6; - /* if not default parameters, return conservative bound */ + /* compute wrapper length */ s = strm->state; + switch (s->wrap) { + case 0: /* raw deflate */ + wraplen = 0; + break; + case 1: /* zlib wrapper */ + wraplen = 6 + (s->strstart ? 4 : 0); + break; + case 2: /* gzip wrapper */ + wraplen = 18; + if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ + if (s->gzhead->extra != Z_NULL) + wraplen += 2 + s->gzhead->extra_len; + str = s->gzhead->name; + if (str != Z_NULL) + do { + wraplen++; + } while (*str++); + str = s->gzhead->comment; + if (str != Z_NULL) + do { + wraplen++; + } while (*str++); + if (s->gzhead->hcrc) + wraplen += 2; + } + break; + default: /* for compiler happiness */ + wraplen = 6; + } + + /* if not default parameters, return conservative bound */ if (s->w_bits != 15 || s->hash_bits != 8 + 7) - return destLen; + return complen + wraplen; /* default settings: return tight bound for that case */ - return compressBound(sourceLen); + return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + + (sourceLen >> 25) + 13 - 6 + wraplen; } /* ========================================================================= @@ -532,19 +642,22 @@ local void flush_pending(strm) z_streamp strm; { - unsigned len = strm->state->pending; + unsigned len; + deflate_state *s = strm->state; + _tr_flush_bits(s); + len = s->pending; if (len > strm->avail_out) len = strm->avail_out; if (len == 0) return; - zmemcpy(strm->next_out, strm->state->pending_out, len); + zmemcpy(strm->next_out, s->pending_out, len); strm->next_out += len; - strm->state->pending_out += len; + s->pending_out += len; strm->total_out += len; strm->avail_out -= len; - strm->state->pending -= len; - if (strm->state->pending == 0) { - strm->state->pending_out = strm->state->pending_buf; + s->pending -= len; + if (s->pending == 0) { + s->pending_out = s->pending_buf; } } @@ -557,7 +670,7 @@ deflate_state *s; if (strm == Z_NULL || strm->state == Z_NULL || - flush > Z_FINISH || flush < 0) { + flush > Z_BLOCK || flush < 0) { return Z_STREAM_ERROR; } s = strm->state; @@ -581,7 +694,7 @@ put_byte(s, 31); put_byte(s, 139); put_byte(s, 8); - if (s->gzhead == NULL) { + if (s->gzhead == Z_NULL) { put_byte(s, 0); put_byte(s, 0); put_byte(s, 0); @@ -608,7 +721,7 @@ (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 4 : 0)); put_byte(s, s->gzhead->os & 0xff); - if (s->gzhead->extra != NULL) { + if (s->gzhead->extra != Z_NULL) { put_byte(s, s->gzhead->extra_len & 0xff); put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); } @@ -650,7 +763,7 @@ } #ifdef GZIP if (s->status == EXTRA_STATE) { - if (s->gzhead->extra != NULL) { + if (s->gzhead->extra != Z_NULL) { uInt beg = s->pending; /* start of bytes to update crc */ while (s->gzindex < (s->gzhead->extra_len & 0xffff)) { @@ -678,7 +791,7 @@ s->status = NAME_STATE; } if (s->status == NAME_STATE) { - if (s->gzhead->name != NULL) { + if (s->gzhead->name != Z_NULL) { uInt beg = s->pending; /* start of bytes to update crc */ int val; @@ -709,7 +822,7 @@ s->status = COMMENT_STATE; } if (s->status == COMMENT_STATE) { - if (s->gzhead->comment != NULL) { + if (s->gzhead->comment != Z_NULL) { uInt beg = s->pending; /* start of bytes to update crc */ int val; @@ -771,7 +884,7 @@ * flushes. For repeated and useless calls with Z_FINISH, we keep * returning Z_STREAM_END instead of Z_BUF_ERROR. */ - } else if (strm->avail_in == 0 && flush <= old_flush && + } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) && flush != Z_FINISH) { ERR_RETURN(strm, Z_BUF_ERROR); } @@ -787,7 +900,9 @@ (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { block_state bstate; - bstate = (*(configuration_table[s->level].func))(s, flush); + bstate = s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : + (s->strategy == Z_RLE ? deflate_rle(s, flush) : + (*(configuration_table[s->level].func))(s, flush)); if (bstate == finish_started || bstate == finish_done) { s->status = FINISH_STATE; @@ -808,13 +923,18 @@ if (bstate == block_done) { if (flush == Z_PARTIAL_FLUSH) { _tr_align(s); - } else { /* FULL_FLUSH or SYNC_FLUSH */ + } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ _tr_stored_block(s, (char*)0, 0L, 0); /* For a full flush, this empty block will be recognized * as a special marker by inflate_sync(). */ if (flush == Z_FULL_FLUSH) { CLEAR_HASH(s); /* forget history */ + if (s->lookahead == 0) { + s->strstart = 0; + s->block_start = 0L; + s->insert = 0; + } } } flush_pending(strm); @@ -909,12 +1029,12 @@ ss = source->state; - zmemcpy(dest, source, sizeof(z_stream)); + zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); if (ds == Z_NULL) return Z_MEM_ERROR; dest->state = (struct internal_state FAR *) ds; - zmemcpy(ds, ss, sizeof(deflate_state)); + zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state)); ds->strm = dest; ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); @@ -930,8 +1050,8 @@ } /* following zmemcpy do not work for 16-bit MSDOS */ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); - zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); - zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); + zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos)); + zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos)); zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); @@ -965,15 +1085,15 @@ strm->avail_in -= len; + zmemcpy(buf, strm->next_in, len); if (strm->state->wrap == 1) { - strm->adler = adler32(strm->adler, strm->next_in, len); + strm->adler = adler32(strm->adler, buf, len); } #ifdef GZIP else if (strm->state->wrap == 2) { - strm->adler = crc32(strm->adler, strm->next_in, len); + strm->adler = crc32(strm->adler, buf, len); } #endif - zmemcpy(buf, strm->next_in, len); strm->next_in += len; strm->total_in += len; @@ -1000,6 +1120,7 @@ s->strstart = 0; s->block_start = 0L; s->lookahead = 0; + s->insert = 0; s->match_length = s->prev_length = MIN_MATCH-1; s->match_available = 0; s->ins_h = 0; @@ -1167,12 +1288,13 @@ return s->lookahead; } #endif /* ASMV */ -#endif /* FASTEST */ + +#else /* FASTEST */ /* --------------------------------------------------------------------------- - * Optimized version for level == 1 or strategy == Z_RLE only + * Optimized version for FASTEST only */ -local uInt longest_match_fast(s, cur_match) +local uInt longest_match(s, cur_match) deflate_state *s; IPos cur_match; /* current match */ { @@ -1225,6 +1347,8 @@ return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; } +#endif /* FASTEST */ + #ifdef DEBUG /* =========================================================================== * Check that the match at match_start is indeed a match. @@ -1271,6 +1395,8 @@ unsigned more; /* Amount of free space at the end of the window. */ uInt wsize = s->w_size; + Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); + do { more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); @@ -1303,7 +1429,6 @@ later. (Using level 0 permanently is not an optimal usage of zlib, so we don't care about this pathological case.) */ - /* %%% avoid this when Z_RLE */ n = s->hash_size; p = &s->head[n]; do { @@ -1324,7 +1449,7 @@ #endif more += wsize; } - if (s->strm->avail_in == 0) return; + if (s->strm->avail_in == 0) break; /* If there was no sliding: * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && @@ -1343,39 +1468,88 @@ s->lookahead += n; /* Initialize the hash value now that we have some input: */ - if (s->lookahead >= MIN_MATCH) { - s->ins_h = s->window[s->strstart]; - UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); + if (s->lookahead + s->insert >= MIN_MATCH) { + uInt str = s->strstart - s->insert; + s->ins_h = s->window[str]; + UPDATE_HASH(s, s->ins_h, s->window[str + 1]); #if MIN_MATCH != 3 Call UPDATE_HASH() MIN_MATCH-3 more times #endif + while (s->insert) { + UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); +#ifndef FASTEST + s->prev[str & s->w_mask] = s->head[s->ins_h]; +#endif + s->head[s->ins_h] = (Pos)str; + str++; + s->insert--; + if (s->lookahead + s->insert < MIN_MATCH) + break; + } } /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, * but this is not important since only literal bytes will be emitted. */ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); + + /* If the WIN_INIT bytes after the end of the current data have never been + * written, then zero those bytes in order to avoid memory check reports of + * the use of uninitialized (or uninitialised as Julian writes) bytes by + * the longest match routines. Update the high water mark for the next + * time through here. WIN_INIT is set to MAX_MATCH since the longest match + * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. + */ + if (s->high_water < s->window_size) { + ulg curr = s->strstart + (ulg)(s->lookahead); + ulg init; + + if (s->high_water < curr) { + /* Previous high water mark below current data -- zero WIN_INIT + * bytes or up to end of window, whichever is less. + */ + init = s->window_size - curr; + if (init > WIN_INIT) + init = WIN_INIT; + zmemzero(s->window + curr, (unsigned)init); + s->high_water = curr + init; + } + else if (s->high_water < (ulg)curr + WIN_INIT) { + /* High water mark at or above current data, but below current data + * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up + * to end of window, whichever is less. + */ + init = (ulg)curr + WIN_INIT - s->high_water; + if (init > s->window_size - s->high_water) + init = s->window_size - s->high_water; + zmemzero(s->window + s->high_water, (unsigned)init); + s->high_water += init; + } + } + + Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, + "not enough room for search"); } /* =========================================================================== * Flush the current block, with given end-of-file flag. * IN assertion: strstart is set to the end of the current match. */ -#define FLUSH_BLOCK_ONLY(s, eof) { \ +#define FLUSH_BLOCK_ONLY(s, last) { \ _tr_flush_block(s, (s->block_start >= 0L ? \ (charf *)&s->window[(unsigned)s->block_start] : \ (charf *)Z_NULL), \ (ulg)((long)s->strstart - s->block_start), \ - (eof)); \ + (last)); \ s->block_start = s->strstart; \ flush_pending(s->strm); \ Tracev((stderr,"[FLUSH]")); \ } /* Same but force premature exit if necessary. */ -#define FLUSH_BLOCK(s, eof) { \ - FLUSH_BLOCK_ONLY(s, eof); \ - if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \ +#define FLUSH_BLOCK(s, last) { \ + FLUSH_BLOCK_ONLY(s, last); \ + if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ } /* =========================================================================== @@ -1434,8 +1608,14 @@ FLUSH_BLOCK(s, 0); } } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if ((long)s->strstart > s->block_start) + FLUSH_BLOCK(s, 0); + return block_done; } /* =========================================================================== @@ -1449,7 +1629,7 @@ deflate_state *s; int flush; { - IPos hash_head = NIL; /* head of the hash chain */ + IPos hash_head; /* head of the hash chain */ int bflush; /* set if current block must be flushed */ for (;;) { @@ -1469,6 +1649,7 @@ /* Insert the string window[strstart .. strstart+2] in the * dictionary, and set hash_head to the head of the hash chain: */ + hash_head = NIL; if (s->lookahead >= MIN_MATCH) { INSERT_STRING(s, s->strstart, hash_head); } @@ -1481,19 +1662,8 @@ * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ -#ifdef FASTEST - if ((s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) || - (s->strategy == Z_RLE && s->strstart - hash_head == 1)) { - s->match_length = longest_match_fast (s, hash_head); - } -#else - if (s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) { - s->match_length = longest_match (s, hash_head); - } else if (s->strategy == Z_RLE && s->strstart - hash_head == 1) { - s->match_length = longest_match_fast (s, hash_head); - } -#endif - /* longest_match() or longest_match_fast() sets match_start */ + s->match_length = longest_match (s, hash_head); + /* longest_match() sets match_start */ } if (s->match_length >= MIN_MATCH) { check_match(s, s->strstart, s->match_start, s->match_length); @@ -1541,8 +1711,14 @@ } if (bflush) FLUSH_BLOCK(s, 0); } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; + s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; } #ifndef FASTEST @@ -1555,7 +1731,7 @@ deflate_state *s; int flush; { - IPos hash_head = NIL; /* head of hash chain */ + IPos hash_head; /* head of hash chain */ int bflush; /* set if current block must be flushed */ /* Process the input block. */ @@ -1576,6 +1752,7 @@ /* Insert the string window[strstart .. strstart+2] in the * dictionary, and set hash_head to the head of the hash chain: */ + hash_head = NIL; if (s->lookahead >= MIN_MATCH) { INSERT_STRING(s, s->strstart, hash_head); } @@ -1591,12 +1768,8 @@ * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ - if (s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) { - s->match_length = longest_match (s, hash_head); - } else if (s->strategy == Z_RLE && s->strstart - hash_head == 1) { - s->match_length = longest_match_fast (s, hash_head); - } - /* longest_match() or longest_match_fast() sets match_start */ + s->match_length = longest_match (s, hash_head); + /* longest_match() sets match_start */ if (s->match_length <= 5 && (s->strategy == Z_FILTERED #if TOO_FAR <= 32767 @@ -1669,12 +1842,17 @@ _tr_tally_lit(s, s->window[s->strstart-1], bflush); s->match_available = 0; } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; + s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; } #endif /* FASTEST */ -#if 0 /* =========================================================================== * For Z_RLE, simply look for runs of bytes, generate matches only of distance * one. Do not maintain a hash table. (It will be regenerated if this run of @@ -1684,43 +1862,52 @@ deflate_state *s; int flush; { - int bflush; /* set if current block must be flushed */ - uInt run; /* length of run */ - uInt max; /* maximum length of run */ - uInt prev; /* byte at distance one to match */ - Bytef *scan; /* scan for end of run */ + int bflush; /* set if current block must be flushed */ + uInt prev; /* byte at distance one to match */ + Bytef *scan, *strend; /* scan goes up to strend for length of run */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes - * for the longest encodable run. + * for the longest run, plus one for the unrolled loop. */ - if (s->lookahead < MAX_MATCH) { + if (s->lookahead <= MAX_MATCH) { fill_window(s); - if (s->lookahead < MAX_MATCH && flush == Z_NO_FLUSH) { + if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* See how many times the previous byte repeats */ - run = 0; - if (s->strstart > 0) { /* if there is a previous byte, that is */ - max = s->lookahead < MAX_MATCH ? s->lookahead : MAX_MATCH; + s->match_length = 0; + if (s->lookahead >= MIN_MATCH && s->strstart > 0) { scan = s->window + s->strstart - 1; - prev = *scan++; - do { - if (*scan++ != prev) - break; - } while (++run < max); + prev = *scan; + if (prev == *++scan && prev == *++scan && prev == *++scan) { + strend = s->window + s->strstart + MAX_MATCH; + do { + } while (prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + scan < strend); + s->match_length = MAX_MATCH - (int)(strend - scan); + if (s->match_length > s->lookahead) + s->match_length = s->lookahead; + } + Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); } /* Emit match if have run of MIN_MATCH or longer, else emit literal */ - if (run >= MIN_MATCH) { - check_match(s, s->strstart, s->strstart - 1, run); - _tr_tally_dist(s, 1, run - MIN_MATCH, bflush); - s->lookahead -= run; - s->strstart += run; + if (s->match_length >= MIN_MATCH) { + check_match(s, s->strstart, s->strstart - 1, s->match_length); + + _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); + + s->lookahead -= s->match_length; + s->strstart += s->match_length; + s->match_length = 0; } else { /* No match, output a literal byte */ Tracevv((stderr,"%c", s->window[s->strstart])); @@ -1730,7 +1917,51 @@ } if (bflush) FLUSH_BLOCK(s, 0); } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} + +/* =========================================================================== + * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. + * (It will be regenerated if this run of deflate switches away from Huffman.) + */ +local block_state deflate_huff(s, flush) + deflate_state *s; + int flush; +{ + int bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we have a literal to write. */ + if (s->lookahead == 0) { + fill_window(s); + if (s->lookahead == 0) { + if (flush == Z_NO_FLUSH) + return need_more; + break; /* flush the current block */ + } + } + + /* Output a literal byte */ + s->match_length = 0; + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; } -#endif diff -Nru nodejs-0.11.13/deps/zlib/deflate.h nodejs-0.11.15/deps/zlib/deflate.h --- nodejs-0.11.13/deps/zlib/deflate.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/deflate.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* deflate.h -- internal compression state - * Copyright (C) 1995-2004 Jean-loup Gailly + * Copyright (C) 1995-2012 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -8,7 +8,7 @@ subject to change. Applications should only use zlib.h. */ -/* @(#) $Id: deflate.h,v 3.6 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ +/* @(#) $Id$ */ #ifndef DEFLATE_H #define DEFLATE_H @@ -48,6 +48,9 @@ #define MAX_BITS 15 /* All codes must not exceed MAX_BITS bits */ +#define Buf_size 16 +/* size of bit buffer in bi_buf */ + #define INIT_STATE 42 #define EXTRA_STATE 69 #define NAME_STATE 73 @@ -101,7 +104,7 @@ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ gz_headerp gzhead; /* gzip header information to write */ uInt gzindex; /* where in extra, name, or comment */ - Byte method; /* STORED (for zip only) or DEFLATED */ + Byte method; /* can only be DEFLATED */ int last_flush; /* value of flush param for previous deflate call */ /* used by deflate.c: */ @@ -188,7 +191,7 @@ int nice_match; /* Stop searching when current match exceeds this */ /* used by trees.c: */ - /* Didn't use ct_data typedef below to supress compiler warning */ + /* Didn't use ct_data typedef below to suppress compiler warning */ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ @@ -244,7 +247,7 @@ ulg opt_len; /* bit length of current block with optimal trees */ ulg static_len; /* bit length of current block with static trees */ uInt matches; /* number of string matches in current block */ - int last_eob_len; /* bit length of EOB code for last block */ + uInt insert; /* bytes at end of window left to insert */ #ifdef DEBUG ulg compressed_len; /* total bit length of compressed file mod 2^32 */ @@ -260,6 +263,13 @@ * are always zero. */ + ulg high_water; + /* High water mark offset in window for initialized bytes -- bytes above + * this are set to zero in order to avoid memory check warnings when + * longest match routines access bytes past the input. This is then + * updated to the new high water mark. + */ + } FAR deflate_state; /* Output a byte on the stream. @@ -278,14 +288,19 @@ * distances are limited to MAX_DIST instead of WSIZE. */ +#define WIN_INIT MAX_MATCH +/* Number of bytes after end of data in window to initialize in order to avoid + memory checker errors from longest match routines */ + /* in trees.c */ -void _tr_init OF((deflate_state *s)); -int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); -void _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len, - int eof)); -void _tr_align OF((deflate_state *s)); -void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, - int eof)); +void ZLIB_INTERNAL _tr_init OF((deflate_state *s)); +int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); +void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf, + ulg stored_len, int last)); +void ZLIB_INTERNAL _tr_flush_bits OF((deflate_state *s)); +void ZLIB_INTERNAL _tr_align OF((deflate_state *s)); +void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, + ulg stored_len, int last)); #define d_code(dist) \ ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) @@ -298,11 +313,11 @@ /* Inline versions of _tr_tally for speed: */ #if defined(GEN_TREES_H) || !defined(STDC) - extern uch _length_code[]; - extern uch _dist_code[]; + extern uch ZLIB_INTERNAL _length_code[]; + extern uch ZLIB_INTERNAL _dist_code[]; #else - extern const uch _length_code[]; - extern const uch _dist_code[]; + extern const uch ZLIB_INTERNAL _length_code[]; + extern const uch ZLIB_INTERNAL _dist_code[]; #endif # define _tr_tally_lit(s, c, flush) \ diff -Nru nodejs-0.11.13/deps/zlib/doc/algorithm.txt nodejs-0.11.15/deps/zlib/doc/algorithm.txt --- nodejs-0.11.13/deps/zlib/doc/algorithm.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/doc/algorithm.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,209 @@ +1. Compression algorithm (deflate) + +The deflation algorithm used by gzip (also zip and zlib) is a variation of +LZ77 (Lempel-Ziv 1977, see reference below). It finds duplicated strings in +the input data. The second occurrence of a string is replaced by a +pointer to the previous string, in the form of a pair (distance, +length). Distances are limited to 32K bytes, and lengths are limited +to 258 bytes. When a string does not occur anywhere in the previous +32K bytes, it is emitted as a sequence of literal bytes. (In this +description, `string' must be taken as an arbitrary sequence of bytes, +and is not restricted to printable characters.) + +Literals or match lengths are compressed with one Huffman tree, and +match distances are compressed with another tree. The trees are stored +in a compact form at the start of each block. The blocks can have any +size (except that the compressed data for one block must fit in +available memory). A block is terminated when deflate() determines that +it would be useful to start another block with fresh trees. (This is +somewhat similar to the behavior of LZW-based _compress_.) + +Duplicated strings are found using a hash table. All input strings of +length 3 are inserted in the hash table. A hash index is computed for +the next 3 bytes. If the hash chain for this index is not empty, all +strings in the chain are compared with the current input string, and +the longest match is selected. + +The hash chains are searched starting with the most recent strings, to +favor small distances and thus take advantage of the Huffman encoding. +The hash chains are singly linked. There are no deletions from the +hash chains, the algorithm simply discards matches that are too old. + +To avoid a worst-case situation, very long hash chains are arbitrarily +truncated at a certain length, determined by a runtime option (level +parameter of deflateInit). So deflate() does not always find the longest +possible match but generally finds a match which is long enough. + +deflate() also defers the selection of matches with a lazy evaluation +mechanism. After a match of length N has been found, deflate() searches for +a longer match at the next input byte. If a longer match is found, the +previous match is truncated to a length of one (thus producing a single +literal byte) and the process of lazy evaluation begins again. Otherwise, +the original match is kept, and the next match search is attempted only N +steps later. + +The lazy match evaluation is also subject to a runtime parameter. If +the current match is long enough, deflate() reduces the search for a longer +match, thus speeding up the whole process. If compression ratio is more +important than speed, deflate() attempts a complete second search even if +the first match is already long enough. + +The lazy match evaluation is not performed for the fastest compression +modes (level parameter 1 to 3). For these fast modes, new strings +are inserted in the hash table only when no match was found, or +when the match is not too long. This degrades the compression ratio +but saves time since there are both fewer insertions and fewer searches. + + +2. Decompression algorithm (inflate) + +2.1 Introduction + +The key question is how to represent a Huffman code (or any prefix code) so +that you can decode fast. The most important characteristic is that shorter +codes are much more common than longer codes, so pay attention to decoding the +short codes fast, and let the long codes take longer to decode. + +inflate() sets up a first level table that covers some number of bits of +input less than the length of longest code. It gets that many bits from the +stream, and looks it up in the table. The table will tell if the next +code is that many bits or less and how many, and if it is, it will tell +the value, else it will point to the next level table for which inflate() +grabs more bits and tries to decode a longer code. + +How many bits to make the first lookup is a tradeoff between the time it +takes to decode and the time it takes to build the table. If building the +table took no time (and if you had infinite memory), then there would only +be a first level table to cover all the way to the longest code. However, +building the table ends up taking a lot longer for more bits since short +codes are replicated many times in such a table. What inflate() does is +simply to make the number of bits in the first table a variable, and then +to set that variable for the maximum speed. + +For inflate, which has 286 possible codes for the literal/length tree, the size +of the first table is nine bits. Also the distance trees have 30 possible +values, and the size of the first table is six bits. Note that for each of +those cases, the table ended up one bit longer than the ``average'' code +length, i.e. the code length of an approximately flat code which would be a +little more than eight bits for 286 symbols and a little less than five bits +for 30 symbols. + + +2.2 More details on the inflate table lookup + +Ok, you want to know what this cleverly obfuscated inflate tree actually +looks like. You are correct that it's not a Huffman tree. It is simply a +lookup table for the first, let's say, nine bits of a Huffman symbol. The +symbol could be as short as one bit or as long as 15 bits. If a particular +symbol is shorter than nine bits, then that symbol's translation is duplicated +in all those entries that start with that symbol's bits. For example, if the +symbol is four bits, then it's duplicated 32 times in a nine-bit table. If a +symbol is nine bits long, it appears in the table once. + +If the symbol is longer than nine bits, then that entry in the table points +to another similar table for the remaining bits. Again, there are duplicated +entries as needed. The idea is that most of the time the symbol will be short +and there will only be one table look up. (That's whole idea behind data +compression in the first place.) For the less frequent long symbols, there +will be two lookups. If you had a compression method with really long +symbols, you could have as many levels of lookups as is efficient. For +inflate, two is enough. + +So a table entry either points to another table (in which case nine bits in +the above example are gobbled), or it contains the translation for the symbol +and the number of bits to gobble. Then you start again with the next +ungobbled bit. + +You may wonder: why not just have one lookup table for how ever many bits the +longest symbol is? The reason is that if you do that, you end up spending +more time filling in duplicate symbol entries than you do actually decoding. +At least for deflate's output that generates new trees every several 10's of +kbytes. You can imagine that filling in a 2^15 entry table for a 15-bit code +would take too long if you're only decoding several thousand symbols. At the +other extreme, you could make a new table for every bit in the code. In fact, +that's essentially a Huffman tree. But then you spend too much time +traversing the tree while decoding, even for short symbols. + +So the number of bits for the first lookup table is a trade of the time to +fill out the table vs. the time spent looking at the second level and above of +the table. + +Here is an example, scaled down: + +The code being decoded, with 10 symbols, from 1 to 6 bits long: + +A: 0 +B: 10 +C: 1100 +D: 11010 +E: 11011 +F: 11100 +G: 11101 +H: 11110 +I: 111110 +J: 111111 + +Let's make the first table three bits long (eight entries): + +000: A,1 +001: A,1 +010: A,1 +011: A,1 +100: B,2 +101: B,2 +110: -> table X (gobble 3 bits) +111: -> table Y (gobble 3 bits) + +Each entry is what the bits decode as and how many bits that is, i.e. how +many bits to gobble. Or the entry points to another table, with the number of +bits to gobble implicit in the size of the table. + +Table X is two bits long since the longest code starting with 110 is five bits +long: + +00: C,1 +01: C,1 +10: D,2 +11: E,2 + +Table Y is three bits long since the longest code starting with 111 is six +bits long: + +000: F,2 +001: F,2 +010: G,2 +011: G,2 +100: H,2 +101: H,2 +110: I,3 +111: J,3 + +So what we have here are three tables with a total of 20 entries that had to +be constructed. That's compared to 64 entries for a single table. Or +compared to 16 entries for a Huffman tree (six two entry tables and one four +entry table). Assuming that the code ideally represents the probability of +the symbols, it takes on the average 1.25 lookups per symbol. That's compared +to one lookup for the single table, or 1.66 lookups per symbol for the +Huffman tree. + +There, I think that gives you a picture of what's going on. For inflate, the +meaning of a particular symbol is often more than just a letter. It can be a +byte (a "literal"), or it can be either a length or a distance which +indicates a base value and a number of bits to fetch after the code that is +added to the base value. Or it might be the special end-of-block code. The +data structures created in inftrees.c try to encode all that information +compactly in the tables. + + +Jean-loup Gailly Mark Adler +jloup@gzip.org madler@alumni.caltech.edu + + +References: + +[LZ77] Ziv J., Lempel A., ``A Universal Algorithm for Sequential Data +Compression,'' IEEE Transactions on Information Theory, Vol. 23, No. 3, +pp. 337-343. + +``DEFLATE Compressed Data Format Specification'' available in +http://tools.ietf.org/html/rfc1951 diff -Nru nodejs-0.11.13/deps/zlib/doc/rfc1950.txt nodejs-0.11.15/deps/zlib/doc/rfc1950.txt --- nodejs-0.11.13/deps/zlib/doc/rfc1950.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/doc/rfc1950.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,619 @@ + + + + + + +Network Working Group P. Deutsch +Request for Comments: 1950 Aladdin Enterprises +Category: Informational J-L. Gailly + Info-ZIP + May 1996 + + + ZLIB Compressed Data Format Specification version 3.3 + +Status of This Memo + + This memo provides information for the Internet community. This memo + does not specify an Internet standard of any kind. Distribution of + this memo is unlimited. + +IESG Note: + + The IESG takes no position on the validity of any Intellectual + Property Rights statements contained in this document. + +Notices + + Copyright (c) 1996 L. Peter Deutsch and Jean-Loup Gailly + + Permission is granted to copy and distribute this document for any + purpose and without charge, including translations into other + languages and incorporation into compilations, provided that the + copyright notice and this notice are preserved, and that any + substantive changes or deletions from the original are clearly + marked. + + A pointer to the latest version of this and related documentation in + HTML format can be found at the URL + <ftp://ftp.uu.net/graphics/png/documents/zlib/zdoc-index.html>. + +Abstract + + This specification defines a lossless compressed data format. The + data can be produced or consumed, even for an arbitrarily long + sequentially presented input data stream, using only an a priori + bounded amount of intermediate storage. The format presently uses + the DEFLATE compression method but can be easily extended to use + other compression methods. It can be implemented readily in a manner + not covered by patents. This specification also defines the ADLER-32 + checksum (an extension and improvement of the Fletcher checksum), + used for detection of data corruption, and provides an algorithm for + computing it. + + + + +Deutsch & Gailly Informational [Page 1] + +RFC 1950 ZLIB Compressed Data Format Specification May 1996 + + +Table of Contents + + 1. Introduction ................................................... 2 + 1.1. Purpose ................................................... 2 + 1.2. Intended audience ......................................... 3 + 1.3. Scope ..................................................... 3 + 1.4. Compliance ................................................ 3 + 1.5. Definitions of terms and conventions used ................ 3 + 1.6. Changes from previous versions ............................ 3 + 2. Detailed specification ......................................... 3 + 2.1. Overall conventions ....................................... 3 + 2.2. Data format ............................................... 4 + 2.3. Compliance ................................................ 7 + 3. References ..................................................... 7 + 4. Source code .................................................... 8 + 5. Security Considerations ........................................ 8 + 6. Acknowledgements ............................................... 8 + 7. Authors' Addresses ............................................. 8 + 8. Appendix: Rationale ............................................ 9 + 9. Appendix: Sample code ..........................................10 + +1. Introduction + + 1.1. Purpose + + The purpose of this specification is to define a lossless + compressed data format that: + + * Is independent of CPU type, operating system, file system, + and character set, and hence can be used for interchange; + + * Can be produced or consumed, even for an arbitrarily long + sequentially presented input data stream, using only an a + priori bounded amount of intermediate storage, and hence can + be used in data communications or similar structures such as + Unix filters; + + * Can use a number of different compression methods; + + * Can be implemented readily in a manner not covered by + patents, and hence can be practiced freely. + + The data format defined by this specification does not attempt to + allow random access to compressed data. + + + + + + + +Deutsch & Gailly Informational [Page 2] + +RFC 1950 ZLIB Compressed Data Format Specification May 1996 + + + 1.2. Intended audience + + This specification is intended for use by implementors of software + to compress data into zlib format and/or decompress data from zlib + format. + + The text of the specification assumes a basic background in + programming at the level of bits and other primitive data + representations. + + 1.3. Scope + + The specification specifies a compressed data format that can be + used for in-memory compression of a sequence of arbitrary bytes. + + 1.4. Compliance + + Unless otherwise indicated below, a compliant decompressor must be + able to accept and decompress any data set that conforms to all + the specifications presented here; a compliant compressor must + produce data sets that conform to all the specifications presented + here. + + 1.5. Definitions of terms and conventions used + + byte: 8 bits stored or transmitted as a unit (same as an octet). + (For this specification, a byte is exactly 8 bits, even on + machines which store a character on a number of bits different + from 8.) See below, for the numbering of bits within a byte. + + 1.6. Changes from previous versions + + Version 3.1 was the first public release of this specification. + In version 3.2, some terminology was changed and the Adler-32 + sample code was rewritten for clarity. In version 3.3, the + support for a preset dictionary was introduced, and the + specification was converted to RFC style. + +2. Detailed specification + + 2.1. Overall conventions + + In the diagrams below, a box like this: + + +---+ + | | <-- the vertical bars might be missing + +---+ + + + + +Deutsch & Gailly Informational [Page 3] + +RFC 1950 ZLIB Compressed Data Format Specification May 1996 + + + represents one byte; a box like this: + + +==============+ + | | + +==============+ + + represents a variable number of bytes. + + Bytes stored within a computer do not have a "bit order", since + they are always treated as a unit. However, a byte considered as + an integer between 0 and 255 does have a most- and least- + significant bit, and since we write numbers with the most- + significant digit on the left, we also write bytes with the most- + significant bit on the left. In the diagrams below, we number the + bits of a byte so that bit 0 is the least-significant bit, i.e., + the bits are numbered: + + +--------+ + |76543210| + +--------+ + + Within a computer, a number may occupy multiple bytes. All + multi-byte numbers in the format described here are stored with + the MOST-significant byte first (at the lower memory address). + For example, the decimal number 520 is stored as: + + 0 1 + +--------+--------+ + |00000010|00001000| + +--------+--------+ + ^ ^ + | | + | + less significant byte = 8 + + more significant byte = 2 x 256 + + 2.2. Data format + + A zlib stream has the following structure: + + 0 1 + +---+---+ + |CMF|FLG| (more-->) + +---+---+ + + + + + + + + +Deutsch & Gailly Informational [Page 4] + +RFC 1950 ZLIB Compressed Data Format Specification May 1996 + + + (if FLG.FDICT set) + + 0 1 2 3 + +---+---+---+---+ + | DICTID | (more-->) + +---+---+---+---+ + + +=====================+---+---+---+---+ + |...compressed data...| ADLER32 | + +=====================+---+---+---+---+ + + Any data which may appear after ADLER32 are not part of the zlib + stream. + + CMF (Compression Method and flags) + This byte is divided into a 4-bit compression method and a 4- + bit information field depending on the compression method. + + bits 0 to 3 CM Compression method + bits 4 to 7 CINFO Compression info + + CM (Compression method) + This identifies the compression method used in the file. CM = 8 + denotes the "deflate" compression method with a window size up + to 32K. This is the method used by gzip and PNG (see + references [1] and [2] in Chapter 3, below, for the reference + documents). CM = 15 is reserved. It might be used in a future + version of this specification to indicate the presence of an + extra field before the compressed data. + + CINFO (Compression info) + For CM = 8, CINFO is the base-2 logarithm of the LZ77 window + size, minus eight (CINFO=7 indicates a 32K window size). Values + of CINFO above 7 are not allowed in this version of the + specification. CINFO is not defined in this specification for + CM not equal to 8. + + FLG (FLaGs) + This flag byte is divided as follows: + + bits 0 to 4 FCHECK (check bits for CMF and FLG) + bit 5 FDICT (preset dictionary) + bits 6 to 7 FLEVEL (compression level) + + The FCHECK value must be such that CMF and FLG, when viewed as + a 16-bit unsigned integer stored in MSB order (CMF*256 + FLG), + is a multiple of 31. + + + + +Deutsch & Gailly Informational [Page 5] + +RFC 1950 ZLIB Compressed Data Format Specification May 1996 + + + FDICT (Preset dictionary) + If FDICT is set, a DICT dictionary identifier is present + immediately after the FLG byte. The dictionary is a sequence of + bytes which are initially fed to the compressor without + producing any compressed output. DICT is the Adler-32 checksum + of this sequence of bytes (see the definition of ADLER32 + below). The decompressor can use this identifier to determine + which dictionary has been used by the compressor. + + FLEVEL (Compression level) + These flags are available for use by specific compression + methods. The "deflate" method (CM = 8) sets these flags as + follows: + + 0 - compressor used fastest algorithm + 1 - compressor used fast algorithm + 2 - compressor used default algorithm + 3 - compressor used maximum compression, slowest algorithm + + The information in FLEVEL is not needed for decompression; it + is there to indicate if recompression might be worthwhile. + + compressed data + For compression method 8, the compressed data is stored in the + deflate compressed data format as described in the document + "DEFLATE Compressed Data Format Specification" by L. Peter + Deutsch. (See reference [3] in Chapter 3, below) + + Other compressed data formats are not specified in this version + of the zlib specification. + + ADLER32 (Adler-32 checksum) + This contains a checksum value of the uncompressed data + (excluding any dictionary data) computed according to Adler-32 + algorithm. This algorithm is a 32-bit extension and improvement + of the Fletcher algorithm, used in the ITU-T X.224 / ISO 8073 + standard. See references [4] and [5] in Chapter 3, below) + + Adler-32 is composed of two sums accumulated per byte: s1 is + the sum of all bytes, s2 is the sum of all s1 values. Both sums + are done modulo 65521. s1 is initialized to 1, s2 to zero. The + Adler-32 checksum is stored as s2*65536 + s1 in most- + significant-byte first (network) order. + + + + + + + + +Deutsch & Gailly Informational [Page 6] + +RFC 1950 ZLIB Compressed Data Format Specification May 1996 + + + 2.3. Compliance + + A compliant compressor must produce streams with correct CMF, FLG + and ADLER32, but need not support preset dictionaries. When the + zlib data format is used as part of another standard data format, + the compressor may use only preset dictionaries that are specified + by this other data format. If this other format does not use the + preset dictionary feature, the compressor must not set the FDICT + flag. + + A compliant decompressor must check CMF, FLG, and ADLER32, and + provide an error indication if any of these have incorrect values. + A compliant decompressor must give an error indication if CM is + not one of the values defined in this specification (only the + value 8 is permitted in this version), since another value could + indicate the presence of new features that would cause subsequent + data to be interpreted incorrectly. A compliant decompressor must + give an error indication if FDICT is set and DICTID is not the + identifier of a known preset dictionary. A decompressor may + ignore FLEVEL and still be compliant. When the zlib data format + is being used as a part of another standard format, a compliant + decompressor must support all the preset dictionaries specified by + the other format. When the other format does not use the preset + dictionary feature, a compliant decompressor must reject any + stream in which the FDICT flag is set. + +3. References + + [1] Deutsch, L.P.,"GZIP Compressed Data Format Specification", + available in ftp://ftp.uu.net/pub/archiving/zip/doc/ + + [2] Thomas Boutell, "PNG (Portable Network Graphics) specification", + available in ftp://ftp.uu.net/graphics/png/documents/ + + [3] Deutsch, L.P.,"DEFLATE Compressed Data Format Specification", + available in ftp://ftp.uu.net/pub/archiving/zip/doc/ + + [4] Fletcher, J. G., "An Arithmetic Checksum for Serial + Transmissions," IEEE Transactions on Communications, Vol. COM-30, + No. 1, January 1982, pp. 247-252. + + [5] ITU-T Recommendation X.224, Annex D, "Checksum Algorithms," + November, 1993, pp. 144, 145. (Available from + gopher://info.itu.ch). ITU-T X.244 is also the same as ISO 8073. + + + + + + + +Deutsch & Gailly Informational [Page 7] + +RFC 1950 ZLIB Compressed Data Format Specification May 1996 + + +4. Source code + + Source code for a C language implementation of a "zlib" compliant + library is available at ftp://ftp.uu.net/pub/archiving/zip/zlib/. + +5. Security Considerations + + A decoder that fails to check the ADLER32 checksum value may be + subject to undetected data corruption. + +6. Acknowledgements + + Trademarks cited in this document are the property of their + respective owners. + + Jean-Loup Gailly and Mark Adler designed the zlib format and wrote + the related software described in this specification. Glenn + Randers-Pehrson converted this document to RFC and HTML format. + +7. Authors' Addresses + + L. Peter Deutsch + Aladdin Enterprises + 203 Santa Margarita Ave. + Menlo Park, CA 94025 + + Phone: (415) 322-0103 (AM only) + FAX: (415) 322-1734 + EMail: <ghost@aladdin.com> + + + Jean-Loup Gailly + + EMail: <gzip@prep.ai.mit.edu> + + Questions about the technical content of this specification can be + sent by email to + + Jean-Loup Gailly <gzip@prep.ai.mit.edu> and + Mark Adler <madler@alumni.caltech.edu> + + Editorial comments on this specification can be sent by email to + + L. Peter Deutsch <ghost@aladdin.com> and + Glenn Randers-Pehrson <randeg@alumni.rpi.edu> + + + + + + +Deutsch & Gailly Informational [Page 8] + +RFC 1950 ZLIB Compressed Data Format Specification May 1996 + + +8. Appendix: Rationale + + 8.1. Preset dictionaries + + A preset dictionary is specially useful to compress short input + sequences. The compressor can take advantage of the dictionary + context to encode the input in a more compact manner. The + decompressor can be initialized with the appropriate context by + virtually decompressing a compressed version of the dictionary + without producing any output. However for certain compression + algorithms such as the deflate algorithm this operation can be + achieved without actually performing any decompression. + + The compressor and the decompressor must use exactly the same + dictionary. The dictionary may be fixed or may be chosen among a + certain number of predefined dictionaries, according to the kind + of input data. The decompressor can determine which dictionary has + been chosen by the compressor by checking the dictionary + identifier. This document does not specify the contents of + predefined dictionaries, since the optimal dictionaries are + application specific. Standard data formats using this feature of + the zlib specification must precisely define the allowed + dictionaries. + + 8.2. The Adler-32 algorithm + + The Adler-32 algorithm is much faster than the CRC32 algorithm yet + still provides an extremely low probability of undetected errors. + + The modulo on unsigned long accumulators can be delayed for 5552 + bytes, so the modulo operation time is negligible. If the bytes + are a, b, c, the second sum is 3a + 2b + c + 3, and so is position + and order sensitive, unlike the first sum, which is just a + checksum. That 65521 is prime is important to avoid a possible + large class of two-byte errors that leave the check unchanged. + (The Fletcher checksum uses 255, which is not prime and which also + makes the Fletcher check insensitive to single byte changes 0 <-> + 255.) + + The sum s1 is initialized to 1 instead of zero to make the length + of the sequence part of s2, so that the length does not have to be + checked separately. (Any sequence of zeroes has a Fletcher + checksum of zero.) + + + + + + + + +Deutsch & Gailly Informational [Page 9] + +RFC 1950 ZLIB Compressed Data Format Specification May 1996 + + +9. Appendix: Sample code + + The following C code computes the Adler-32 checksum of a data buffer. + It is written for clarity, not for speed. The sample code is in the + ANSI C programming language. Non C users may find it easier to read + with these hints: + + & Bitwise AND operator. + >> Bitwise right shift operator. When applied to an + unsigned quantity, as here, right shift inserts zero bit(s) + at the left. + << Bitwise left shift operator. Left shift inserts zero + bit(s) at the right. + ++ "n++" increments the variable n. + % modulo operator: a % b is the remainder of a divided by b. + + #define BASE 65521 /* largest prime smaller than 65536 */ + + /* + Update a running Adler-32 checksum with the bytes buf[0..len-1] + and return the updated checksum. The Adler-32 checksum should be + initialized to 1. + + Usage example: + + unsigned long adler = 1L; + + while (read_buffer(buffer, length) != EOF) { + adler = update_adler32(adler, buffer, length); + } + if (adler != original_adler) error(); + */ + unsigned long update_adler32(unsigned long adler, + unsigned char *buf, int len) + { + unsigned long s1 = adler & 0xffff; + unsigned long s2 = (adler >> 16) & 0xffff; + int n; + + for (n = 0; n < len; n++) { + s1 = (s1 + buf[n]) % BASE; + s2 = (s2 + s1) % BASE; + } + return (s2 << 16) + s1; + } + + /* Return the adler32 of the bytes buf[0..len-1] */ + + + + +Deutsch & Gailly Informational [Page 10] + +RFC 1950 ZLIB Compressed Data Format Specification May 1996 + + + unsigned long adler32(unsigned char *buf, int len) + { + return update_adler32(1L, buf, len); + } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Deutsch & Gailly Informational [Page 11] + diff -Nru nodejs-0.11.13/deps/zlib/doc/rfc1951.txt nodejs-0.11.15/deps/zlib/doc/rfc1951.txt --- nodejs-0.11.13/deps/zlib/doc/rfc1951.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/doc/rfc1951.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,955 @@ + + + + + + +Network Working Group P. Deutsch +Request for Comments: 1951 Aladdin Enterprises +Category: Informational May 1996 + + + DEFLATE Compressed Data Format Specification version 1.3 + +Status of This Memo + + This memo provides information for the Internet community. This memo + does not specify an Internet standard of any kind. Distribution of + this memo is unlimited. + +IESG Note: + + The IESG takes no position on the validity of any Intellectual + Property Rights statements contained in this document. + +Notices + + Copyright (c) 1996 L. Peter Deutsch + + Permission is granted to copy and distribute this document for any + purpose and without charge, including translations into other + languages and incorporation into compilations, provided that the + copyright notice and this notice are preserved, and that any + substantive changes or deletions from the original are clearly + marked. + + A pointer to the latest version of this and related documentation in + HTML format can be found at the URL + <ftp://ftp.uu.net/graphics/png/documents/zlib/zdoc-index.html>. + +Abstract + + This specification defines a lossless compressed data format that + compresses data using a combination of the LZ77 algorithm and Huffman + coding, with efficiency comparable to the best currently available + general-purpose compression methods. The data can be produced or + consumed, even for an arbitrarily long sequentially presented input + data stream, using only an a priori bounded amount of intermediate + storage. The format can be implemented readily in a manner not + covered by patents. + + + + + + + + +Deutsch Informational [Page 1] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + +Table of Contents + + 1. Introduction ................................................... 2 + 1.1. Purpose ................................................... 2 + 1.2. Intended audience ......................................... 3 + 1.3. Scope ..................................................... 3 + 1.4. Compliance ................................................ 3 + 1.5. Definitions of terms and conventions used ................ 3 + 1.6. Changes from previous versions ............................ 4 + 2. Compressed representation overview ............................. 4 + 3. Detailed specification ......................................... 5 + 3.1. Overall conventions ....................................... 5 + 3.1.1. Packing into bytes .................................. 5 + 3.2. Compressed block format ................................... 6 + 3.2.1. Synopsis of prefix and Huffman coding ............... 6 + 3.2.2. Use of Huffman coding in the "deflate" format ....... 7 + 3.2.3. Details of block format ............................. 9 + 3.2.4. Non-compressed blocks (BTYPE=00) ................... 11 + 3.2.5. Compressed blocks (length and distance codes) ...... 11 + 3.2.6. Compression with fixed Huffman codes (BTYPE=01) .... 12 + 3.2.7. Compression with dynamic Huffman codes (BTYPE=10) .. 13 + 3.3. Compliance ............................................... 14 + 4. Compression algorithm details ................................. 14 + 5. References .................................................... 16 + 6. Security Considerations ....................................... 16 + 7. Source code ................................................... 16 + 8. Acknowledgements .............................................. 16 + 9. Author's Address .............................................. 17 + +1. Introduction + + 1.1. Purpose + + The purpose of this specification is to define a lossless + compressed data format that: + * Is independent of CPU type, operating system, file system, + and character set, and hence can be used for interchange; + * Can be produced or consumed, even for an arbitrarily long + sequentially presented input data stream, using only an a + priori bounded amount of intermediate storage, and hence + can be used in data communications or similar structures + such as Unix filters; + * Compresses data with efficiency comparable to the best + currently available general-purpose compression methods, + and in particular considerably better than the "compress" + program; + * Can be implemented readily in a manner not covered by + patents, and hence can be practiced freely; + + + +Deutsch Informational [Page 2] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + * Is compatible with the file format produced by the current + widely used gzip utility, in that conforming decompressors + will be able to read data produced by the existing gzip + compressor. + + The data format defined by this specification does not attempt to: + + * Allow random access to compressed data; + * Compress specialized data (e.g., raster graphics) as well + as the best currently available specialized algorithms. + + A simple counting argument shows that no lossless compression + algorithm can compress every possible input data set. For the + format defined here, the worst case expansion is 5 bytes per 32K- + byte block, i.e., a size increase of 0.015% for large data sets. + English text usually compresses by a factor of 2.5 to 3; + executable files usually compress somewhat less; graphical data + such as raster images may compress much more. + + 1.2. Intended audience + + This specification is intended for use by implementors of software + to compress data into "deflate" format and/or decompress data from + "deflate" format. + + The text of the specification assumes a basic background in + programming at the level of bits and other primitive data + representations. Familiarity with the technique of Huffman coding + is helpful but not required. + + 1.3. Scope + + The specification specifies a method for representing a sequence + of bytes as a (usually shorter) sequence of bits, and a method for + packing the latter bit sequence into bytes. + + 1.4. Compliance + + Unless otherwise indicated below, a compliant decompressor must be + able to accept and decompress any data set that conforms to all + the specifications presented here; a compliant compressor must + produce data sets that conform to all the specifications presented + here. + + 1.5. Definitions of terms and conventions used + + Byte: 8 bits stored or transmitted as a unit (same as an octet). + For this specification, a byte is exactly 8 bits, even on machines + + + +Deutsch Informational [Page 3] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + which store a character on a number of bits different from eight. + See below, for the numbering of bits within a byte. + + String: a sequence of arbitrary bytes. + + 1.6. Changes from previous versions + + There have been no technical changes to the deflate format since + version 1.1 of this specification. In version 1.2, some + terminology was changed. Version 1.3 is a conversion of the + specification to RFC style. + +2. Compressed representation overview + + A compressed data set consists of a series of blocks, corresponding + to successive blocks of input data. The block sizes are arbitrary, + except that non-compressible blocks are limited to 65,535 bytes. + + Each block is compressed using a combination of the LZ77 algorithm + and Huffman coding. The Huffman trees for each block are independent + of those for previous or subsequent blocks; the LZ77 algorithm may + use a reference to a duplicated string occurring in a previous block, + up to 32K input bytes before. + + Each block consists of two parts: a pair of Huffman code trees that + describe the representation of the compressed data part, and a + compressed data part. (The Huffman trees themselves are compressed + using Huffman encoding.) The compressed data consists of a series of + elements of two types: literal bytes (of strings that have not been + detected as duplicated within the previous 32K input bytes), and + pointers to duplicated strings, where a pointer is represented as a + pair <length, backward distance>. The representation used in the + "deflate" format limits distances to 32K bytes and lengths to 258 + bytes, but does not limit the size of a block, except for + uncompressible blocks, which are limited as noted above. + + Each type of value (literals, distances, and lengths) in the + compressed data is represented using a Huffman code, using one code + tree for literals and lengths and a separate code tree for distances. + The code trees for each block appear in a compact form just before + the compressed data for that block. + + + + + + + + + + +Deutsch Informational [Page 4] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + +3. Detailed specification + + 3.1. Overall conventions In the diagrams below, a box like this: + + +---+ + | | <-- the vertical bars might be missing + +---+ + + represents one byte; a box like this: + + +==============+ + | | + +==============+ + + represents a variable number of bytes. + + Bytes stored within a computer do not have a "bit order", since + they are always treated as a unit. However, a byte considered as + an integer between 0 and 255 does have a most- and least- + significant bit, and since we write numbers with the most- + significant digit on the left, we also write bytes with the most- + significant bit on the left. In the diagrams below, we number the + bits of a byte so that bit 0 is the least-significant bit, i.e., + the bits are numbered: + + +--------+ + |76543210| + +--------+ + + Within a computer, a number may occupy multiple bytes. All + multi-byte numbers in the format described here are stored with + the least-significant byte first (at the lower memory address). + For example, the decimal number 520 is stored as: + + 0 1 + +--------+--------+ + |00001000|00000010| + +--------+--------+ + ^ ^ + | | + | + more significant byte = 2 x 256 + + less significant byte = 8 + + 3.1.1. Packing into bytes + + This document does not address the issue of the order in which + bits of a byte are transmitted on a bit-sequential medium, + since the final data format described here is byte- rather than + + + +Deutsch Informational [Page 5] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + bit-oriented. However, we describe the compressed block format + in below, as a sequence of data elements of various bit + lengths, not a sequence of bytes. We must therefore specify + how to pack these data elements into bytes to form the final + compressed byte sequence: + + * Data elements are packed into bytes in order of + increasing bit number within the byte, i.e., starting + with the least-significant bit of the byte. + * Data elements other than Huffman codes are packed + starting with the least-significant bit of the data + element. + * Huffman codes are packed starting with the most- + significant bit of the code. + + In other words, if one were to print out the compressed data as + a sequence of bytes, starting with the first byte at the + *right* margin and proceeding to the *left*, with the most- + significant bit of each byte on the left as usual, one would be + able to parse the result from right to left, with fixed-width + elements in the correct MSB-to-LSB order and Huffman codes in + bit-reversed order (i.e., with the first bit of the code in the + relative LSB position). + + 3.2. Compressed block format + + 3.2.1. Synopsis of prefix and Huffman coding + + Prefix coding represents symbols from an a priori known + alphabet by bit sequences (codes), one code for each symbol, in + a manner such that different symbols may be represented by bit + sequences of different lengths, but a parser can always parse + an encoded string unambiguously symbol-by-symbol. + + We define a prefix code in terms of a binary tree in which the + two edges descending from each non-leaf node are labeled 0 and + 1 and in which the leaf nodes correspond one-for-one with (are + labeled with) the symbols of the alphabet; then the code for a + symbol is the sequence of 0's and 1's on the edges leading from + the root to the leaf labeled with that symbol. For example: + + + + + + + + + + + +Deutsch Informational [Page 6] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + /\ Symbol Code + 0 1 ------ ---- + / \ A 00 + /\ B B 1 + 0 1 C 011 + / \ D 010 + A /\ + 0 1 + / \ + D C + + A parser can decode the next symbol from an encoded input + stream by walking down the tree from the root, at each step + choosing the edge corresponding to the next input bit. + + Given an alphabet with known symbol frequencies, the Huffman + algorithm allows the construction of an optimal prefix code + (one which represents strings with those symbol frequencies + using the fewest bits of any possible prefix codes for that + alphabet). Such a code is called a Huffman code. (See + reference [1] in Chapter 5, references for additional + information on Huffman codes.) + + Note that in the "deflate" format, the Huffman codes for the + various alphabets must not exceed certain maximum code lengths. + This constraint complicates the algorithm for computing code + lengths from symbol frequencies. Again, see Chapter 5, + references for details. + + 3.2.2. Use of Huffman coding in the "deflate" format + + The Huffman codes used for each alphabet in the "deflate" + format have two additional rules: + + * All codes of a given bit length have lexicographically + consecutive values, in the same order as the symbols + they represent; + + * Shorter codes lexicographically precede longer codes. + + + + + + + + + + + + +Deutsch Informational [Page 7] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + We could recode the example above to follow this rule as + follows, assuming that the order of the alphabet is ABCD: + + Symbol Code + ------ ---- + A 10 + B 0 + C 110 + D 111 + + I.e., 0 precedes 10 which precedes 11x, and 110 and 111 are + lexicographically consecutive. + + Given this rule, we can define the Huffman code for an alphabet + just by giving the bit lengths of the codes for each symbol of + the alphabet in order; this is sufficient to determine the + actual codes. In our example, the code is completely defined + by the sequence of bit lengths (2, 1, 3, 3). The following + algorithm generates the codes as integers, intended to be read + from most- to least-significant bit. The code lengths are + initially in tree[I].Len; the codes are produced in + tree[I].Code. + + 1) Count the number of codes for each code length. Let + bl_count[N] be the number of codes of length N, N >= 1. + + 2) Find the numerical value of the smallest code for each + code length: + + code = 0; + bl_count[0] = 0; + for (bits = 1; bits <= MAX_BITS; bits++) { + code = (code + bl_count[bits-1]) << 1; + next_code[bits] = code; + } + + 3) Assign numerical values to all codes, using consecutive + values for all codes of the same length with the base + values determined at step 2. Codes that are never used + (which have a bit length of zero) must not be assigned a + value. + + for (n = 0; n <= max_code; n++) { + len = tree[n].Len; + if (len != 0) { + tree[n].Code = next_code[len]; + next_code[len]++; + } + + + +Deutsch Informational [Page 8] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + } + + Example: + + Consider the alphabet ABCDEFGH, with bit lengths (3, 3, 3, 3, + 3, 2, 4, 4). After step 1, we have: + + N bl_count[N] + - ----------- + 2 1 + 3 5 + 4 2 + + Step 2 computes the following next_code values: + + N next_code[N] + - ------------ + 1 0 + 2 0 + 3 2 + 4 14 + + Step 3 produces the following code values: + + Symbol Length Code + ------ ------ ---- + A 3 010 + B 3 011 + C 3 100 + D 3 101 + E 3 110 + F 2 00 + G 4 1110 + H 4 1111 + + 3.2.3. Details of block format + + Each block of compressed data begins with 3 header bits + containing the following data: + + first bit BFINAL + next 2 bits BTYPE + + Note that the header bits do not necessarily begin on a byte + boundary, since a block does not necessarily occupy an integral + number of bytes. + + + + + +Deutsch Informational [Page 9] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + BFINAL is set if and only if this is the last block of the data + set. + + BTYPE specifies how the data are compressed, as follows: + + 00 - no compression + 01 - compressed with fixed Huffman codes + 10 - compressed with dynamic Huffman codes + 11 - reserved (error) + + The only difference between the two compressed cases is how the + Huffman codes for the literal/length and distance alphabets are + defined. + + In all cases, the decoding algorithm for the actual data is as + follows: + + do + read block header from input stream. + if stored with no compression + skip any remaining bits in current partially + processed byte + read LEN and NLEN (see next section) + copy LEN bytes of data to output + otherwise + if compressed with dynamic Huffman codes + read representation of code trees (see + subsection below) + loop (until end of block code recognized) + decode literal/length value from input stream + if value < 256 + copy value (literal byte) to output stream + otherwise + if value = end of block (256) + break from loop + otherwise (value = 257..285) + decode distance from input stream + + move backwards distance bytes in the output + stream, and copy length bytes from this + position to the output stream. + end loop + while not last block + + Note that a duplicated string reference may refer to a string + in a previous block; i.e., the backward distance may cross one + or more block boundaries. However a distance cannot refer past + the beginning of the output stream. (An application using a + + + +Deutsch Informational [Page 10] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + preset dictionary might discard part of the output stream; a + distance can refer to that part of the output stream anyway) + Note also that the referenced string may overlap the current + position; for example, if the last 2 bytes decoded have values + X and Y, a string reference with <length = 5, distance = 2> + adds X,Y,X,Y,X to the output stream. + + We now specify each compression method in turn. + + 3.2.4. Non-compressed blocks (BTYPE=00) + + Any bits of input up to the next byte boundary are ignored. + The rest of the block consists of the following information: + + 0 1 2 3 4... + +---+---+---+---+================================+ + | LEN | NLEN |... LEN bytes of literal data...| + +---+---+---+---+================================+ + + LEN is the number of data bytes in the block. NLEN is the + one's complement of LEN. + + 3.2.5. Compressed blocks (length and distance codes) + + As noted above, encoded data blocks in the "deflate" format + consist of sequences of symbols drawn from three conceptually + distinct alphabets: either literal bytes, from the alphabet of + byte values (0..255), or <length, backward distance> pairs, + where the length is drawn from (3..258) and the distance is + drawn from (1..32,768). In fact, the literal and length + alphabets are merged into a single alphabet (0..285), where + values 0..255 represent literal bytes, the value 256 indicates + end-of-block, and values 257..285 represent length codes + (possibly in conjunction with extra bits following the symbol + code) as follows: + + + + + + + + + + + + + + + + +Deutsch Informational [Page 11] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + Extra Extra Extra + Code Bits Length(s) Code Bits Lengths Code Bits Length(s) + ---- ---- ------ ---- ---- ------- ---- ---- ------- + 257 0 3 267 1 15,16 277 4 67-82 + 258 0 4 268 1 17,18 278 4 83-98 + 259 0 5 269 2 19-22 279 4 99-114 + 260 0 6 270 2 23-26 280 4 115-130 + 261 0 7 271 2 27-30 281 5 131-162 + 262 0 8 272 2 31-34 282 5 163-194 + 263 0 9 273 3 35-42 283 5 195-226 + 264 0 10 274 3 43-50 284 5 227-257 + 265 1 11,12 275 3 51-58 285 0 258 + 266 1 13,14 276 3 59-66 + + The extra bits should be interpreted as a machine integer + stored with the most-significant bit first, e.g., bits 1110 + represent the value 14. + + Extra Extra Extra + Code Bits Dist Code Bits Dist Code Bits Distance + ---- ---- ---- ---- ---- ------ ---- ---- -------- + 0 0 1 10 4 33-48 20 9 1025-1536 + 1 0 2 11 4 49-64 21 9 1537-2048 + 2 0 3 12 5 65-96 22 10 2049-3072 + 3 0 4 13 5 97-128 23 10 3073-4096 + 4 1 5,6 14 6 129-192 24 11 4097-6144 + 5 1 7,8 15 6 193-256 25 11 6145-8192 + 6 2 9-12 16 7 257-384 26 12 8193-12288 + 7 2 13-16 17 7 385-512 27 12 12289-16384 + 8 3 17-24 18 8 513-768 28 13 16385-24576 + 9 3 25-32 19 8 769-1024 29 13 24577-32768 + + 3.2.6. Compression with fixed Huffman codes (BTYPE=01) + + The Huffman codes for the two alphabets are fixed, and are not + represented explicitly in the data. The Huffman code lengths + for the literal/length alphabet are: + + Lit Value Bits Codes + --------- ---- ----- + 0 - 143 8 00110000 through + 10111111 + 144 - 255 9 110010000 through + 111111111 + 256 - 279 7 0000000 through + 0010111 + 280 - 287 8 11000000 through + 11000111 + + + +Deutsch Informational [Page 12] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + The code lengths are sufficient to generate the actual codes, + as described above; we show the codes in the table for added + clarity. Literal/length values 286-287 will never actually + occur in the compressed data, but participate in the code + construction. + + Distance codes 0-31 are represented by (fixed-length) 5-bit + codes, with possible additional bits as shown in the table + shown in Paragraph 3.2.5, above. Note that distance codes 30- + 31 will never actually occur in the compressed data. + + 3.2.7. Compression with dynamic Huffman codes (BTYPE=10) + + The Huffman codes for the two alphabets appear in the block + immediately after the header bits and before the actual + compressed data, first the literal/length code and then the + distance code. Each code is defined by a sequence of code + lengths, as discussed in Paragraph 3.2.2, above. For even + greater compactness, the code length sequences themselves are + compressed using a Huffman code. The alphabet for code lengths + is as follows: + + 0 - 15: Represent code lengths of 0 - 15 + 16: Copy the previous code length 3 - 6 times. + The next 2 bits indicate repeat length + (0 = 3, ... , 3 = 6) + Example: Codes 8, 16 (+2 bits 11), + 16 (+2 bits 10) will expand to + 12 code lengths of 8 (1 + 6 + 5) + 17: Repeat a code length of 0 for 3 - 10 times. + (3 bits of length) + 18: Repeat a code length of 0 for 11 - 138 times + (7 bits of length) + + A code length of 0 indicates that the corresponding symbol in + the literal/length or distance alphabet will not occur in the + block, and should not participate in the Huffman code + construction algorithm given earlier. If only one distance + code is used, it is encoded using one bit, not zero bits; in + this case there is a single code length of one, with one unused + code. One distance code of zero bits means that there are no + distance codes used at all (the data is all literals). + + We can now define the format of the block: + + 5 Bits: HLIT, # of Literal/Length codes - 257 (257 - 286) + 5 Bits: HDIST, # of Distance codes - 1 (1 - 32) + 4 Bits: HCLEN, # of Code Length codes - 4 (4 - 19) + + + +Deutsch Informational [Page 13] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + (HCLEN + 4) x 3 bits: code lengths for the code length + alphabet given just above, in the order: 16, 17, 18, + 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 + + These code lengths are interpreted as 3-bit integers + (0-7); as above, a code length of 0 means the + corresponding symbol (literal/length or distance code + length) is not used. + + HLIT + 257 code lengths for the literal/length alphabet, + encoded using the code length Huffman code + + HDIST + 1 code lengths for the distance alphabet, + encoded using the code length Huffman code + + The actual compressed data of the block, + encoded using the literal/length and distance Huffman + codes + + The literal/length symbol 256 (end of data), + encoded using the literal/length Huffman code + + The code length repeat codes can cross from HLIT + 257 to the + HDIST + 1 code lengths. In other words, all code lengths form + a single sequence of HLIT + HDIST + 258 values. + + 3.3. Compliance + + A compressor may limit further the ranges of values specified in + the previous section and still be compliant; for example, it may + limit the range of backward pointers to some value smaller than + 32K. Similarly, a compressor may limit the size of blocks so that + a compressible block fits in memory. + + A compliant decompressor must accept the full range of possible + values defined in the previous section, and must accept blocks of + arbitrary size. + +4. Compression algorithm details + + While it is the intent of this document to define the "deflate" + compressed data format without reference to any particular + compression algorithm, the format is related to the compressed + formats produced by LZ77 (Lempel-Ziv 1977, see reference [2] below); + since many variations of LZ77 are patented, it is strongly + recommended that the implementor of a compressor follow the general + algorithm presented here, which is known not to be patented per se. + The material in this section is not part of the definition of the + + + +Deutsch Informational [Page 14] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + + specification per se, and a compressor need not follow it in order to + be compliant. + + The compressor terminates a block when it determines that starting a + new block with fresh trees would be useful, or when the block size + fills up the compressor's block buffer. + + The compressor uses a chained hash table to find duplicated strings, + using a hash function that operates on 3-byte sequences. At any + given point during compression, let XYZ be the next 3 input bytes to + be examined (not necessarily all different, of course). First, the + compressor examines the hash chain for XYZ. If the chain is empty, + the compressor simply writes out X as a literal byte and advances one + byte in the input. If the hash chain is not empty, indicating that + the sequence XYZ (or, if we are unlucky, some other 3 bytes with the + same hash function value) has occurred recently, the compressor + compares all strings on the XYZ hash chain with the actual input data + sequence starting at the current point, and selects the longest + match. + + The compressor searches the hash chains starting with the most recent + strings, to favor small distances and thus take advantage of the + Huffman encoding. The hash chains are singly linked. There are no + deletions from the hash chains; the algorithm simply discards matches + that are too old. To avoid a worst-case situation, very long hash + chains are arbitrarily truncated at a certain length, determined by a + run-time parameter. + + To improve overall compression, the compressor optionally defers the + selection of matches ("lazy matching"): after a match of length N has + been found, the compressor searches for a longer match starting at + the next input byte. If it finds a longer match, it truncates the + previous match to a length of one (thus producing a single literal + byte) and then emits the longer match. Otherwise, it emits the + original match, and, as described above, advances N bytes before + continuing. + + Run-time parameters also control this "lazy match" procedure. If + compression ratio is most important, the compressor attempts a + complete second search regardless of the length of the first match. + In the normal case, if the current match is "long enough", the + compressor reduces the search for a longer match, thus speeding up + the process. If speed is most important, the compressor inserts new + strings in the hash table only when no match was found, or when the + match is not "too long". This degrades the compression ratio but + saves time since there are both fewer insertions and fewer searches. + + + + + +Deutsch Informational [Page 15] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + +5. References + + [1] Huffman, D. A., "A Method for the Construction of Minimum + Redundancy Codes", Proceedings of the Institute of Radio + Engineers, September 1952, Volume 40, Number 9, pp. 1098-1101. + + [2] Ziv J., Lempel A., "A Universal Algorithm for Sequential Data + Compression", IEEE Transactions on Information Theory, Vol. 23, + No. 3, pp. 337-343. + + [3] Gailly, J.-L., and Adler, M., ZLIB documentation and sources, + available in ftp://ftp.uu.net/pub/archiving/zip/doc/ + + [4] Gailly, J.-L., and Adler, M., GZIP documentation and sources, + available as gzip-*.tar in ftp://prep.ai.mit.edu/pub/gnu/ + + [5] Schwartz, E. S., and Kallick, B. "Generating a canonical prefix + encoding." Comm. ACM, 7,3 (Mar. 1964), pp. 166-169. + + [6] Hirschberg and Lelewer, "Efficient decoding of prefix codes," + Comm. ACM, 33,4, April 1990, pp. 449-459. + +6. Security Considerations + + Any data compression method involves the reduction of redundancy in + the data. Consequently, any corruption of the data is likely to have + severe effects and be difficult to correct. Uncompressed text, on + the other hand, will probably still be readable despite the presence + of some corrupted bytes. + + It is recommended that systems using this data format provide some + means of validating the integrity of the compressed data. See + reference [3], for example. + +7. Source code + + Source code for a C language implementation of a "deflate" compliant + compressor and decompressor is available within the zlib package at + ftp://ftp.uu.net/pub/archiving/zip/zlib/. + +8. Acknowledgements + + Trademarks cited in this document are the property of their + respective owners. + + Phil Katz designed the deflate format. Jean-Loup Gailly and Mark + Adler wrote the related software described in this specification. + Glenn Randers-Pehrson converted this document to RFC and HTML format. + + + +Deutsch Informational [Page 16] + +RFC 1951 DEFLATE Compressed Data Format Specification May 1996 + + +9. Author's Address + + L. Peter Deutsch + Aladdin Enterprises + 203 Santa Margarita Ave. + Menlo Park, CA 94025 + + Phone: (415) 322-0103 (AM only) + FAX: (415) 322-1734 + EMail: <ghost@aladdin.com> + + Questions about the technical content of this specification can be + sent by email to: + + Jean-Loup Gailly <gzip@prep.ai.mit.edu> and + Mark Adler <madler@alumni.caltech.edu> + + Editorial comments on this specification can be sent by email to: + + L. Peter Deutsch <ghost@aladdin.com> and + Glenn Randers-Pehrson <randeg@alumni.rpi.edu> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Deutsch Informational [Page 17] + diff -Nru nodejs-0.11.13/deps/zlib/doc/rfc1952.txt nodejs-0.11.15/deps/zlib/doc/rfc1952.txt --- nodejs-0.11.13/deps/zlib/doc/rfc1952.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/doc/rfc1952.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,675 @@ + + + + + + +Network Working Group P. Deutsch +Request for Comments: 1952 Aladdin Enterprises +Category: Informational May 1996 + + + GZIP file format specification version 4.3 + +Status of This Memo + + This memo provides information for the Internet community. This memo + does not specify an Internet standard of any kind. Distribution of + this memo is unlimited. + +IESG Note: + + The IESG takes no position on the validity of any Intellectual + Property Rights statements contained in this document. + +Notices + + Copyright (c) 1996 L. Peter Deutsch + + Permission is granted to copy and distribute this document for any + purpose and without charge, including translations into other + languages and incorporation into compilations, provided that the + copyright notice and this notice are preserved, and that any + substantive changes or deletions from the original are clearly + marked. + + A pointer to the latest version of this and related documentation in + HTML format can be found at the URL + <ftp://ftp.uu.net/graphics/png/documents/zlib/zdoc-index.html>. + +Abstract + + This specification defines a lossless compressed data format that is + compatible with the widely used GZIP utility. The format includes a + cyclic redundancy check value for detecting data corruption. The + format presently uses the DEFLATE method of compression but can be + easily extended to use other compression methods. The format can be + implemented readily in a manner not covered by patents. + + + + + + + + + + +Deutsch Informational [Page 1] + +RFC 1952 GZIP File Format Specification May 1996 + + +Table of Contents + + 1. Introduction ................................................... 2 + 1.1. Purpose ................................................... 2 + 1.2. Intended audience ......................................... 3 + 1.3. Scope ..................................................... 3 + 1.4. Compliance ................................................ 3 + 1.5. Definitions of terms and conventions used ................. 3 + 1.6. Changes from previous versions ............................ 3 + 2. Detailed specification ......................................... 4 + 2.1. Overall conventions ....................................... 4 + 2.2. File format ............................................... 5 + 2.3. Member format ............................................. 5 + 2.3.1. Member header and trailer ........................... 6 + 2.3.1.1. Extra field ................................... 8 + 2.3.1.2. Compliance .................................... 9 + 3. References .................................................. 9 + 4. Security Considerations .................................... 10 + 5. Acknowledgements ........................................... 10 + 6. Author's Address ........................................... 10 + 7. Appendix: Jean-Loup Gailly's gzip utility .................. 11 + 8. Appendix: Sample CRC Code .................................. 11 + +1. Introduction + + 1.1. Purpose + + The purpose of this specification is to define a lossless + compressed data format that: + + * Is independent of CPU type, operating system, file system, + and character set, and hence can be used for interchange; + * Can compress or decompress a data stream (as opposed to a + randomly accessible file) to produce another data stream, + using only an a priori bounded amount of intermediate + storage, and hence can be used in data communications or + similar structures such as Unix filters; + * Compresses data with efficiency comparable to the best + currently available general-purpose compression methods, + and in particular considerably better than the "compress" + program; + * Can be implemented readily in a manner not covered by + patents, and hence can be practiced freely; + * Is compatible with the file format produced by the current + widely used gzip utility, in that conforming decompressors + will be able to read data produced by the existing gzip + compressor. + + + + +Deutsch Informational [Page 2] + +RFC 1952 GZIP File Format Specification May 1996 + + + The data format defined by this specification does not attempt to: + + * Provide random access to compressed data; + * Compress specialized data (e.g., raster graphics) as well as + the best currently available specialized algorithms. + + 1.2. Intended audience + + This specification is intended for use by implementors of software + to compress data into gzip format and/or decompress data from gzip + format. + + The text of the specification assumes a basic background in + programming at the level of bits and other primitive data + representations. + + 1.3. Scope + + The specification specifies a compression method and a file format + (the latter assuming only that a file can store a sequence of + arbitrary bytes). It does not specify any particular interface to + a file system or anything about character sets or encodings + (except for file names and comments, which are optional). + + 1.4. Compliance + + Unless otherwise indicated below, a compliant decompressor must be + able to accept and decompress any file that conforms to all the + specifications presented here; a compliant compressor must produce + files that conform to all the specifications presented here. The + material in the appendices is not part of the specification per se + and is not relevant to compliance. + + 1.5. Definitions of terms and conventions used + + byte: 8 bits stored or transmitted as a unit (same as an octet). + (For this specification, a byte is exactly 8 bits, even on + machines which store a character on a number of bits different + from 8.) See below for the numbering of bits within a byte. + + 1.6. Changes from previous versions + + There have been no technical changes to the gzip format since + version 4.1 of this specification. In version 4.2, some + terminology was changed, and the sample CRC code was rewritten for + clarity and to eliminate the requirement for the caller to do pre- + and post-conditioning. Version 4.3 is a conversion of the + specification to RFC style. + + + +Deutsch Informational [Page 3] + +RFC 1952 GZIP File Format Specification May 1996 + + +2. Detailed specification + + 2.1. Overall conventions + + In the diagrams below, a box like this: + + +---+ + | | <-- the vertical bars might be missing + +---+ + + represents one byte; a box like this: + + +==============+ + | | + +==============+ + + represents a variable number of bytes. + + Bytes stored within a computer do not have a "bit order", since + they are always treated as a unit. However, a byte considered as + an integer between 0 and 255 does have a most- and least- + significant bit, and since we write numbers with the most- + significant digit on the left, we also write bytes with the most- + significant bit on the left. In the diagrams below, we number the + bits of a byte so that bit 0 is the least-significant bit, i.e., + the bits are numbered: + + +--------+ + |76543210| + +--------+ + + This document does not address the issue of the order in which + bits of a byte are transmitted on a bit-sequential medium, since + the data format described here is byte- rather than bit-oriented. + + Within a computer, a number may occupy multiple bytes. All + multi-byte numbers in the format described here are stored with + the least-significant byte first (at the lower memory address). + For example, the decimal number 520 is stored as: + + 0 1 + +--------+--------+ + |00001000|00000010| + +--------+--------+ + ^ ^ + | | + | + more significant byte = 2 x 256 + + less significant byte = 8 + + + +Deutsch Informational [Page 4] + +RFC 1952 GZIP File Format Specification May 1996 + + + 2.2. File format + + A gzip file consists of a series of "members" (compressed data + sets). The format of each member is specified in the following + section. The members simply appear one after another in the file, + with no additional information before, between, or after them. + + 2.3. Member format + + Each member has the following structure: + + +---+---+---+---+---+---+---+---+---+---+ + |ID1|ID2|CM |FLG| MTIME |XFL|OS | (more-->) + +---+---+---+---+---+---+---+---+---+---+ + + (if FLG.FEXTRA set) + + +---+---+=================================+ + | XLEN |...XLEN bytes of "extra field"...| (more-->) + +---+---+=================================+ + + (if FLG.FNAME set) + + +=========================================+ + |...original file name, zero-terminated...| (more-->) + +=========================================+ + + (if FLG.FCOMMENT set) + + +===================================+ + |...file comment, zero-terminated...| (more-->) + +===================================+ + + (if FLG.FHCRC set) + + +---+---+ + | CRC16 | + +---+---+ + + +=======================+ + |...compressed blocks...| (more-->) + +=======================+ + + 0 1 2 3 4 5 6 7 + +---+---+---+---+---+---+---+---+ + | CRC32 | ISIZE | + +---+---+---+---+---+---+---+---+ + + + + +Deutsch Informational [Page 5] + +RFC 1952 GZIP File Format Specification May 1996 + + + 2.3.1. Member header and trailer + + ID1 (IDentification 1) + ID2 (IDentification 2) + These have the fixed values ID1 = 31 (0x1f, \037), ID2 = 139 + (0x8b, \213), to identify the file as being in gzip format. + + CM (Compression Method) + This identifies the compression method used in the file. CM + = 0-7 are reserved. CM = 8 denotes the "deflate" + compression method, which is the one customarily used by + gzip and which is documented elsewhere. + + FLG (FLaGs) + This flag byte is divided into individual bits as follows: + + bit 0 FTEXT + bit 1 FHCRC + bit 2 FEXTRA + bit 3 FNAME + bit 4 FCOMMENT + bit 5 reserved + bit 6 reserved + bit 7 reserved + + If FTEXT is set, the file is probably ASCII text. This is + an optional indication, which the compressor may set by + checking a small amount of the input data to see whether any + non-ASCII characters are present. In case of doubt, FTEXT + is cleared, indicating binary data. For systems which have + different file formats for ascii text and binary data, the + decompressor can use FTEXT to choose the appropriate format. + We deliberately do not specify the algorithm used to set + this bit, since a compressor always has the option of + leaving it cleared and a decompressor always has the option + of ignoring it and letting some other program handle issues + of data conversion. + + If FHCRC is set, a CRC16 for the gzip header is present, + immediately before the compressed data. The CRC16 consists + of the two least significant bytes of the CRC32 for all + bytes of the gzip header up to and not including the CRC16. + [The FHCRC bit was never set by versions of gzip up to + 1.2.4, even though it was documented with a different + meaning in gzip 1.2.4.] + + If FEXTRA is set, optional extra fields are present, as + described in a following section. + + + +Deutsch Informational [Page 6] + +RFC 1952 GZIP File Format Specification May 1996 + + + If FNAME is set, an original file name is present, + terminated by a zero byte. The name must consist of ISO + 8859-1 (LATIN-1) characters; on operating systems using + EBCDIC or any other character set for file names, the name + must be translated to the ISO LATIN-1 character set. This + is the original name of the file being compressed, with any + directory components removed, and, if the file being + compressed is on a file system with case insensitive names, + forced to lower case. There is no original file name if the + data was compressed from a source other than a named file; + for example, if the source was stdin on a Unix system, there + is no file name. + + If FCOMMENT is set, a zero-terminated file comment is + present. This comment is not interpreted; it is only + intended for human consumption. The comment must consist of + ISO 8859-1 (LATIN-1) characters. Line breaks should be + denoted by a single line feed character (10 decimal). + + Reserved FLG bits must be zero. + + MTIME (Modification TIME) + This gives the most recent modification time of the original + file being compressed. The time is in Unix format, i.e., + seconds since 00:00:00 GMT, Jan. 1, 1970. (Note that this + may cause problems for MS-DOS and other systems that use + local rather than Universal time.) If the compressed data + did not come from a file, MTIME is set to the time at which + compression started. MTIME = 0 means no time stamp is + available. + + XFL (eXtra FLags) + These flags are available for use by specific compression + methods. The "deflate" method (CM = 8) sets these flags as + follows: + + XFL = 2 - compressor used maximum compression, + slowest algorithm + XFL = 4 - compressor used fastest algorithm + + OS (Operating System) + This identifies the type of file system on which compression + took place. This may be useful in determining end-of-line + convention for text files. The currently defined values are + as follows: + + + + + + +Deutsch Informational [Page 7] + +RFC 1952 GZIP File Format Specification May 1996 + + + 0 - FAT filesystem (MS-DOS, OS/2, NT/Win32) + 1 - Amiga + 2 - VMS (or OpenVMS) + 3 - Unix + 4 - VM/CMS + 5 - Atari TOS + 6 - HPFS filesystem (OS/2, NT) + 7 - Macintosh + 8 - Z-System + 9 - CP/M + 10 - TOPS-20 + 11 - NTFS filesystem (NT) + 12 - QDOS + 13 - Acorn RISCOS + 255 - unknown + + XLEN (eXtra LENgth) + If FLG.FEXTRA is set, this gives the length of the optional + extra field. See below for details. + + CRC32 (CRC-32) + This contains a Cyclic Redundancy Check value of the + uncompressed data computed according to CRC-32 algorithm + used in the ISO 3309 standard and in section 8.1.1.6.2 of + ITU-T recommendation V.42. (See http://www.iso.ch for + ordering ISO documents. See gopher://info.itu.ch for an + online version of ITU-T V.42.) + + ISIZE (Input SIZE) + This contains the size of the original (uncompressed) input + data modulo 2^32. + + 2.3.1.1. Extra field + + If the FLG.FEXTRA bit is set, an "extra field" is present in + the header, with total length XLEN bytes. It consists of a + series of subfields, each of the form: + + +---+---+---+---+==================================+ + |SI1|SI2| LEN |... LEN bytes of subfield data ...| + +---+---+---+---+==================================+ + + SI1 and SI2 provide a subfield ID, typically two ASCII letters + with some mnemonic value. Jean-Loup Gailly + <gzip@prep.ai.mit.edu> is maintaining a registry of subfield + IDs; please send him any subfield ID you wish to use. Subfield + IDs with SI2 = 0 are reserved for future use. The following + IDs are currently defined: + + + +Deutsch Informational [Page 8] + +RFC 1952 GZIP File Format Specification May 1996 + + + SI1 SI2 Data + ---------- ---------- ---- + 0x41 ('A') 0x70 ('P') Apollo file type information + + LEN gives the length of the subfield data, excluding the 4 + initial bytes. + + 2.3.1.2. Compliance + + A compliant compressor must produce files with correct ID1, + ID2, CM, CRC32, and ISIZE, but may set all the other fields in + the fixed-length part of the header to default values (255 for + OS, 0 for all others). The compressor must set all reserved + bits to zero. + + A compliant decompressor must check ID1, ID2, and CM, and + provide an error indication if any of these have incorrect + values. It must examine FEXTRA/XLEN, FNAME, FCOMMENT and FHCRC + at least so it can skip over the optional fields if they are + present. It need not examine any other part of the header or + trailer; in particular, a decompressor may ignore FTEXT and OS + and always produce binary output, and still be compliant. A + compliant decompressor must give an error indication if any + reserved bit is non-zero, since such a bit could indicate the + presence of a new field that would cause subsequent data to be + interpreted incorrectly. + +3. References + + [1] "Information Processing - 8-bit single-byte coded graphic + character sets - Part 1: Latin alphabet No.1" (ISO 8859-1:1987). + The ISO 8859-1 (Latin-1) character set is a superset of 7-bit + ASCII. Files defining this character set are available as + iso_8859-1.* in ftp://ftp.uu.net/graphics/png/documents/ + + [2] ISO 3309 + + [3] ITU-T recommendation V.42 + + [4] Deutsch, L.P.,"DEFLATE Compressed Data Format Specification", + available in ftp://ftp.uu.net/pub/archiving/zip/doc/ + + [5] Gailly, J.-L., GZIP documentation, available as gzip-*.tar in + ftp://prep.ai.mit.edu/pub/gnu/ + + [6] Sarwate, D.V., "Computation of Cyclic Redundancy Checks via Table + Look-Up", Communications of the ACM, 31(8), pp.1008-1013. + + + + +Deutsch Informational [Page 9] + +RFC 1952 GZIP File Format Specification May 1996 + + + [7] Schwaderer, W.D., "CRC Calculation", April 85 PC Tech Journal, + pp.118-133. + + [8] ftp://ftp.adelaide.edu.au/pub/rocksoft/papers/crc_v3.txt, + describing the CRC concept. + +4. Security Considerations + + Any data compression method involves the reduction of redundancy in + the data. Consequently, any corruption of the data is likely to have + severe effects and be difficult to correct. Uncompressed text, on + the other hand, will probably still be readable despite the presence + of some corrupted bytes. + + It is recommended that systems using this data format provide some + means of validating the integrity of the compressed data, such as by + setting and checking the CRC-32 check value. + +5. Acknowledgements + + Trademarks cited in this document are the property of their + respective owners. + + Jean-Loup Gailly designed the gzip format and wrote, with Mark Adler, + the related software described in this specification. Glenn + Randers-Pehrson converted this document to RFC and HTML format. + +6. Author's Address + + L. Peter Deutsch + Aladdin Enterprises + 203 Santa Margarita Ave. + Menlo Park, CA 94025 + + Phone: (415) 322-0103 (AM only) + FAX: (415) 322-1734 + EMail: <ghost@aladdin.com> + + Questions about the technical content of this specification can be + sent by email to: + + Jean-Loup Gailly <gzip@prep.ai.mit.edu> and + Mark Adler <madler@alumni.caltech.edu> + + Editorial comments on this specification can be sent by email to: + + L. Peter Deutsch <ghost@aladdin.com> and + Glenn Randers-Pehrson <randeg@alumni.rpi.edu> + + + +Deutsch Informational [Page 10] + +RFC 1952 GZIP File Format Specification May 1996 + + +7. Appendix: Jean-Loup Gailly's gzip utility + + The most widely used implementation of gzip compression, and the + original documentation on which this specification is based, were + created by Jean-Loup Gailly <gzip@prep.ai.mit.edu>. Since this + implementation is a de facto standard, we mention some more of its + features here. Again, the material in this section is not part of + the specification per se, and implementations need not follow it to + be compliant. + + When compressing or decompressing a file, gzip preserves the + protection, ownership, and modification time attributes on the local + file system, since there is no provision for representing protection + attributes in the gzip file format itself. Since the file format + includes a modification time, the gzip decompressor provides a + command line switch that assigns the modification time from the file, + rather than the local modification time of the compressed input, to + the decompressed output. + +8. Appendix: Sample CRC Code + + The following sample code represents a practical implementation of + the CRC (Cyclic Redundancy Check). (See also ISO 3309 and ITU-T V.42 + for a formal specification.) + + The sample code is in the ANSI C programming language. Non C users + may find it easier to read with these hints: + + & Bitwise AND operator. + ^ Bitwise exclusive-OR operator. + >> Bitwise right shift operator. When applied to an + unsigned quantity, as here, right shift inserts zero + bit(s) at the left. + ! Logical NOT operator. + ++ "n++" increments the variable n. + 0xNNN 0x introduces a hexadecimal (base 16) constant. + Suffix L indicates a long value (at least 32 bits). + + /* Table of CRCs of all 8-bit messages. */ + unsigned long crc_table[256]; + + /* Flag: has the table been computed? Initially false. */ + int crc_table_computed = 0; + + /* Make the table for a fast CRC. */ + void make_crc_table(void) + { + unsigned long c; + + + +Deutsch Informational [Page 11] + +RFC 1952 GZIP File Format Specification May 1996 + + + int n, k; + for (n = 0; n < 256; n++) { + c = (unsigned long) n; + for (k = 0; k < 8; k++) { + if (c & 1) { + c = 0xedb88320L ^ (c >> 1); + } else { + c = c >> 1; + } + } + crc_table[n] = c; + } + crc_table_computed = 1; + } + + /* + Update a running crc with the bytes buf[0..len-1] and return + the updated crc. The crc should be initialized to zero. Pre- and + post-conditioning (one's complement) is performed within this + function so it shouldn't be done by the caller. Usage example: + + unsigned long crc = 0L; + + while (read_buffer(buffer, length) != EOF) { + crc = update_crc(crc, buffer, length); + } + if (crc != original_crc) error(); + */ + unsigned long update_crc(unsigned long crc, + unsigned char *buf, int len) + { + unsigned long c = crc ^ 0xffffffffL; + int n; + + if (!crc_table_computed) + make_crc_table(); + for (n = 0; n < len; n++) { + c = crc_table[(c ^ buf[n]) & 0xff] ^ (c >> 8); + } + return c ^ 0xffffffffL; + } + + /* Return the CRC of the bytes buf[0..len-1]. */ + unsigned long crc(unsigned char *buf, int len) + { + return update_crc(0L, buf, len); + } + + + + +Deutsch Informational [Page 12] + diff -Nru nodejs-0.11.13/deps/zlib/doc/txtvsbin.txt nodejs-0.11.15/deps/zlib/doc/txtvsbin.txt --- nodejs-0.11.13/deps/zlib/doc/txtvsbin.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/doc/txtvsbin.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,107 @@ +A Fast Method for Identifying Plain Text Files +============================================== + + +Introduction +------------ + +Given a file coming from an unknown source, it is sometimes desirable +to find out whether the format of that file is plain text. Although +this may appear like a simple task, a fully accurate detection of the +file type requires heavy-duty semantic analysis on the file contents. +It is, however, possible to obtain satisfactory results by employing +various heuristics. + +Previous versions of PKZip and other zip-compatible compression tools +were using a crude detection scheme: if more than 80% (4/5) of the bytes +found in a certain buffer are within the range [7..127], the file is +labeled as plain text, otherwise it is labeled as binary. A prominent +limitation of this scheme is the restriction to Latin-based alphabets. +Other alphabets, like Greek, Cyrillic or Asian, make extensive use of +the bytes within the range [128..255], and texts using these alphabets +are most often misidentified by this scheme; in other words, the rate +of false negatives is sometimes too high, which means that the recall +is low. Another weakness of this scheme is a reduced precision, due to +the false positives that may occur when binary files containing large +amounts of textual characters are misidentified as plain text. + +In this article we propose a new, simple detection scheme that features +a much increased precision and a near-100% recall. This scheme is +designed to work on ASCII, Unicode and other ASCII-derived alphabets, +and it handles single-byte encodings (ISO-8859, MacRoman, KOI8, etc.) +and variable-sized encodings (ISO-2022, UTF-8, etc.). Wider encodings +(UCS-2/UTF-16 and UCS-4/UTF-32) are not handled, however. + + +The Algorithm +------------- + +The algorithm works by dividing the set of bytecodes [0..255] into three +categories: +- The white list of textual bytecodes: + 9 (TAB), 10 (LF), 13 (CR), 32 (SPACE) to 255. +- The gray list of tolerated bytecodes: + 7 (BEL), 8 (BS), 11 (VT), 12 (FF), 26 (SUB), 27 (ESC). +- The black list of undesired, non-textual bytecodes: + 0 (NUL) to 6, 14 to 31. + +If a file contains at least one byte that belongs to the white list and +no byte that belongs to the black list, then the file is categorized as +plain text; otherwise, it is categorized as binary. (The boundary case, +when the file is empty, automatically falls into the latter category.) + + +Rationale +--------- + +The idea behind this algorithm relies on two observations. + +The first observation is that, although the full range of 7-bit codes +[0..127] is properly specified by the ASCII standard, most control +characters in the range [0..31] are not used in practice. The only +widely-used, almost universally-portable control codes are 9 (TAB), +10 (LF) and 13 (CR). There are a few more control codes that are +recognized on a reduced range of platforms and text viewers/editors: +7 (BEL), 8 (BS), 11 (VT), 12 (FF), 26 (SUB) and 27 (ESC); but these +codes are rarely (if ever) used alone, without being accompanied by +some printable text. Even the newer, portable text formats such as +XML avoid using control characters outside the list mentioned here. + +The second observation is that most of the binary files tend to contain +control characters, especially 0 (NUL). Even though the older text +detection schemes observe the presence of non-ASCII codes from the range +[128..255], the precision rarely has to suffer if this upper range is +labeled as textual, because the files that are genuinely binary tend to +contain both control characters and codes from the upper range. On the +other hand, the upper range needs to be labeled as textual, because it +is used by virtually all ASCII extensions. In particular, this range is +used for encoding non-Latin scripts. + +Since there is no counting involved, other than simply observing the +presence or the absence of some byte values, the algorithm produces +consistent results, regardless what alphabet encoding is being used. +(If counting were involved, it could be possible to obtain different +results on a text encoded, say, using ISO-8859-16 versus UTF-8.) + +There is an extra category of plain text files that are "polluted" with +one or more black-listed codes, either by mistake or by peculiar design +considerations. In such cases, a scheme that tolerates a small fraction +of black-listed codes would provide an increased recall (i.e. more true +positives). This, however, incurs a reduced precision overall, since +false positives are more likely to appear in binary files that contain +large chunks of textual data. Furthermore, "polluted" plain text should +be regarded as binary by general-purpose text detection schemes, because +general-purpose text processing algorithms might not be applicable. +Under this premise, it is safe to say that our detection method provides +a near-100% recall. + +Experiments have been run on many files coming from various platforms +and applications. We tried plain text files, system logs, source code, +formatted office documents, compiled object code, etc. The results +confirm the optimistic assumptions about the capabilities of this +algorithm. + + +-- +Cosmin Truta +Last updated: 2006-May-28 diff -Nru nodejs-0.11.13/deps/zlib/examples/enough.c nodejs-0.11.15/deps/zlib/examples/enough.c --- nodejs-0.11.13/deps/zlib/examples/enough.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/enough.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,572 @@ +/* enough.c -- determine the maximum size of inflate's Huffman code tables over + * all possible valid and complete Huffman codes, subject to a length limit. + * Copyright (C) 2007, 2008, 2012 Mark Adler + * Version 1.4 18 August 2012 Mark Adler + */ + +/* Version history: + 1.0 3 Jan 2007 First version (derived from codecount.c version 1.4) + 1.1 4 Jan 2007 Use faster incremental table usage computation + Prune examine() search on previously visited states + 1.2 5 Jan 2007 Comments clean up + As inflate does, decrease root for short codes + Refuse cases where inflate would increase root + 1.3 17 Feb 2008 Add argument for initial root table size + Fix bug for initial root table size == max - 1 + Use a macro to compute the history index + 1.4 18 Aug 2012 Avoid shifts more than bits in type (caused endless loop!) + Clean up comparisons of different types + Clean up code indentation + */ + +/* + Examine all possible Huffman codes for a given number of symbols and a + maximum code length in bits to determine the maximum table size for zilb's + inflate. Only complete Huffman codes are counted. + + Two codes are considered distinct if the vectors of the number of codes per + length are not identical. So permutations of the symbol assignments result + in the same code for the counting, as do permutations of the assignments of + the bit values to the codes (i.e. only canonical codes are counted). + + We build a code from shorter to longer lengths, determining how many symbols + are coded at each length. At each step, we have how many symbols remain to + be coded, what the last code length used was, and how many bit patterns of + that length remain unused. Then we add one to the code length and double the + number of unused patterns to graduate to the next code length. We then + assign all portions of the remaining symbols to that code length that + preserve the properties of a correct and eventually complete code. Those + properties are: we cannot use more bit patterns than are available; and when + all the symbols are used, there are exactly zero possible bit patterns + remaining. + + The inflate Huffman decoding algorithm uses two-level lookup tables for + speed. There is a single first-level table to decode codes up to root bits + in length (root == 9 in the current inflate implementation). The table + has 1 << root entries and is indexed by the next root bits of input. Codes + shorter than root bits have replicated table entries, so that the correct + entry is pointed to regardless of the bits that follow the short code. If + the code is longer than root bits, then the table entry points to a second- + level table. The size of that table is determined by the longest code with + that root-bit prefix. If that longest code has length len, then the table + has size 1 << (len - root), to index the remaining bits in that set of + codes. Each subsequent root-bit prefix then has its own sub-table. The + total number of table entries required by the code is calculated + incrementally as the number of codes at each bit length is populated. When + all of the codes are shorter than root bits, then root is reduced to the + longest code length, resulting in a single, smaller, one-level table. + + The inflate algorithm also provides for small values of root (relative to + the log2 of the number of symbols), where the shortest code has more bits + than root. In that case, root is increased to the length of the shortest + code. This program, by design, does not handle that case, so it is verified + that the number of symbols is less than 2^(root + 1). + + In order to speed up the examination (by about ten orders of magnitude for + the default arguments), the intermediate states in the build-up of a code + are remembered and previously visited branches are pruned. The memory + required for this will increase rapidly with the total number of symbols and + the maximum code length in bits. However this is a very small price to pay + for the vast speedup. + + First, all of the possible Huffman codes are counted, and reachable + intermediate states are noted by a non-zero count in a saved-results array. + Second, the intermediate states that lead to (root + 1) bit or longer codes + are used to look at all sub-codes from those junctures for their inflate + memory usage. (The amount of memory used is not affected by the number of + codes of root bits or less in length.) Third, the visited states in the + construction of those sub-codes and the associated calculation of the table + size is recalled in order to avoid recalculating from the same juncture. + Beginning the code examination at (root + 1) bit codes, which is enabled by + identifying the reachable nodes, accounts for about six of the orders of + magnitude of improvement for the default arguments. About another four + orders of magnitude come from not revisiting previous states. Out of + approximately 2x10^16 possible Huffman codes, only about 2x10^6 sub-codes + need to be examined to cover all of the possible table memory usage cases + for the default arguments of 286 symbols limited to 15-bit codes. + + Note that an unsigned long long type is used for counting. It is quite easy + to exceed the capacity of an eight-byte integer with a large number of + symbols and a large maximum code length, so multiple-precision arithmetic + would need to replace the unsigned long long arithmetic in that case. This + program will abort if an overflow occurs. The big_t type identifies where + the counting takes place. + + An unsigned long long type is also used for calculating the number of + possible codes remaining at the maximum length. This limits the maximum + code length to the number of bits in a long long minus the number of bits + needed to represent the symbols in a flat code. The code_t type identifies + where the bit pattern counting takes place. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <assert.h> + +#define local static + +/* special data types */ +typedef unsigned long long big_t; /* type for code counting */ +typedef unsigned long long code_t; /* type for bit pattern counting */ +struct tab { /* type for been here check */ + size_t len; /* length of bit vector in char's */ + char *vec; /* allocated bit vector */ +}; + +/* The array for saving results, num[], is indexed with this triplet: + + syms: number of symbols remaining to code + left: number of available bit patterns at length len + len: number of bits in the codes currently being assigned + + Those indices are constrained thusly when saving results: + + syms: 3..totsym (totsym == total symbols to code) + left: 2..syms - 1, but only the evens (so syms == 8 -> 2, 4, 6) + len: 1..max - 1 (max == maximum code length in bits) + + syms == 2 is not saved since that immediately leads to a single code. left + must be even, since it represents the number of available bit patterns at + the current length, which is double the number at the previous length. + left ends at syms-1 since left == syms immediately results in a single code. + (left > sym is not allowed since that would result in an incomplete code.) + len is less than max, since the code completes immediately when len == max. + + The offset into the array is calculated for the three indices with the + first one (syms) being outermost, and the last one (len) being innermost. + We build the array with length max-1 lists for the len index, with syms-3 + of those for each symbol. There are totsym-2 of those, with each one + varying in length as a function of sym. See the calculation of index in + count() for the index, and the calculation of size in main() for the size + of the array. + + For the deflate example of 286 symbols limited to 15-bit codes, the array + has 284,284 entries, taking up 2.17 MB for an 8-byte big_t. More than + half of the space allocated for saved results is actually used -- not all + possible triplets are reached in the generation of valid Huffman codes. + */ + +/* The array for tracking visited states, done[], is itself indexed identically + to the num[] array as described above for the (syms, left, len) triplet. + Each element in the array is further indexed by the (mem, rem) doublet, + where mem is the amount of inflate table space used so far, and rem is the + remaining unused entries in the current inflate sub-table. Each indexed + element is simply one bit indicating whether the state has been visited or + not. Since the ranges for mem and rem are not known a priori, each bit + vector is of a variable size, and grows as needed to accommodate the visited + states. mem and rem are used to calculate a single index in a triangular + array. Since the range of mem is expected in the default case to be about + ten times larger than the range of rem, the array is skewed to reduce the + memory usage, with eight times the range for mem than for rem. See the + calculations for offset and bit in beenhere() for the details. + + For the deflate example of 286 symbols limited to 15-bit codes, the bit + vectors grow to total approximately 21 MB, in addition to the 4.3 MB done[] + array itself. + */ + +/* Globals to avoid propagating constants or constant pointers recursively */ +local int max; /* maximum allowed bit length for the codes */ +local int root; /* size of base code table in bits */ +local int large; /* largest code table so far */ +local size_t size; /* number of elements in num and done */ +local int *code; /* number of symbols assigned to each bit length */ +local big_t *num; /* saved results array for code counting */ +local struct tab *done; /* states already evaluated array */ + +/* Index function for num[] and done[] */ +#define INDEX(i,j,k) (((size_t)((i-1)>>1)*((i-2)>>1)+(j>>1)-1)*(max-1)+k-1) + +/* Free allocated space. Uses globals code, num, and done. */ +local void cleanup(void) +{ + size_t n; + + if (done != NULL) { + for (n = 0; n < size; n++) + if (done[n].len) + free(done[n].vec); + free(done); + } + if (num != NULL) + free(num); + if (code != NULL) + free(code); +} + +/* Return the number of possible Huffman codes using bit patterns of lengths + len through max inclusive, coding syms symbols, with left bit patterns of + length len unused -- return -1 if there is an overflow in the counting. + Keep a record of previous results in num to prevent repeating the same + calculation. Uses the globals max and num. */ +local big_t count(int syms, int len, int left) +{ + big_t sum; /* number of possible codes from this juncture */ + big_t got; /* value returned from count() */ + int least; /* least number of syms to use at this juncture */ + int most; /* most number of syms to use at this juncture */ + int use; /* number of bit patterns to use in next call */ + size_t index; /* index of this case in *num */ + + /* see if only one possible code */ + if (syms == left) + return 1; + + /* note and verify the expected state */ + assert(syms > left && left > 0 && len < max); + + /* see if we've done this one already */ + index = INDEX(syms, left, len); + got = num[index]; + if (got) + return got; /* we have -- return the saved result */ + + /* we need to use at least this many bit patterns so that the code won't be + incomplete at the next length (more bit patterns than symbols) */ + least = (left << 1) - syms; + if (least < 0) + least = 0; + + /* we can use at most this many bit patterns, lest there not be enough + available for the remaining symbols at the maximum length (if there were + no limit to the code length, this would become: most = left - 1) */ + most = (((code_t)left << (max - len)) - syms) / + (((code_t)1 << (max - len)) - 1); + + /* count all possible codes from this juncture and add them up */ + sum = 0; + for (use = least; use <= most; use++) { + got = count(syms - use, len + 1, (left - use) << 1); + sum += got; + if (got == (big_t)0 - 1 || sum < got) /* overflow */ + return (big_t)0 - 1; + } + + /* verify that all recursive calls are productive */ + assert(sum != 0); + + /* save the result and return it */ + num[index] = sum; + return sum; +} + +/* Return true if we've been here before, set to true if not. Set a bit in a + bit vector to indicate visiting this state. Each (syms,len,left) state + has a variable size bit vector indexed by (mem,rem). The bit vector is + lengthened if needed to allow setting the (mem,rem) bit. */ +local int beenhere(int syms, int len, int left, int mem, int rem) +{ + size_t index; /* index for this state's bit vector */ + size_t offset; /* offset in this state's bit vector */ + int bit; /* mask for this state's bit */ + size_t length; /* length of the bit vector in bytes */ + char *vector; /* new or enlarged bit vector */ + + /* point to vector for (syms,left,len), bit in vector for (mem,rem) */ + index = INDEX(syms, left, len); + mem -= 1 << root; + offset = (mem >> 3) + rem; + offset = ((offset * (offset + 1)) >> 1) + rem; + bit = 1 << (mem & 7); + + /* see if we've been here */ + length = done[index].len; + if (offset < length && (done[index].vec[offset] & bit) != 0) + return 1; /* done this! */ + + /* we haven't been here before -- set the bit to show we have now */ + + /* see if we need to lengthen the vector in order to set the bit */ + if (length <= offset) { + /* if we have one already, enlarge it, zero out the appended space */ + if (length) { + do { + length <<= 1; + } while (length <= offset); + vector = realloc(done[index].vec, length); + if (vector != NULL) + memset(vector + done[index].len, 0, length - done[index].len); + } + + /* otherwise we need to make a new vector and zero it out */ + else { + length = 1 << (len - root); + while (length <= offset) + length <<= 1; + vector = calloc(length, sizeof(char)); + } + + /* in either case, bail if we can't get the memory */ + if (vector == NULL) { + fputs("abort: unable to allocate enough memory\n", stderr); + cleanup(); + exit(1); + } + + /* install the new vector */ + done[index].len = length; + done[index].vec = vector; + } + + /* set the bit */ + done[index].vec[offset] |= bit; + return 0; +} + +/* Examine all possible codes from the given node (syms, len, left). Compute + the amount of memory required to build inflate's decoding tables, where the + number of code structures used so far is mem, and the number remaining in + the current sub-table is rem. Uses the globals max, code, root, large, and + done. */ +local void examine(int syms, int len, int left, int mem, int rem) +{ + int least; /* least number of syms to use at this juncture */ + int most; /* most number of syms to use at this juncture */ + int use; /* number of bit patterns to use in next call */ + + /* see if we have a complete code */ + if (syms == left) { + /* set the last code entry */ + code[len] = left; + + /* complete computation of memory used by this code */ + while (rem < left) { + left -= rem; + rem = 1 << (len - root); + mem += rem; + } + assert(rem == left); + + /* if this is a new maximum, show the entries used and the sub-code */ + if (mem > large) { + large = mem; + printf("max %d: ", mem); + for (use = root + 1; use <= max; use++) + if (code[use]) + printf("%d[%d] ", code[use], use); + putchar('\n'); + fflush(stdout); + } + + /* remove entries as we drop back down in the recursion */ + code[len] = 0; + return; + } + + /* prune the tree if we can */ + if (beenhere(syms, len, left, mem, rem)) + return; + + /* we need to use at least this many bit patterns so that the code won't be + incomplete at the next length (more bit patterns than symbols) */ + least = (left << 1) - syms; + if (least < 0) + least = 0; + + /* we can use at most this many bit patterns, lest there not be enough + available for the remaining symbols at the maximum length (if there were + no limit to the code length, this would become: most = left - 1) */ + most = (((code_t)left << (max - len)) - syms) / + (((code_t)1 << (max - len)) - 1); + + /* occupy least table spaces, creating new sub-tables as needed */ + use = least; + while (rem < use) { + use -= rem; + rem = 1 << (len - root); + mem += rem; + } + rem -= use; + + /* examine codes from here, updating table space as we go */ + for (use = least; use <= most; use++) { + code[len] = use; + examine(syms - use, len + 1, (left - use) << 1, + mem + (rem ? 1 << (len - root) : 0), rem << 1); + if (rem == 0) { + rem = 1 << (len - root); + mem += rem; + } + rem--; + } + + /* remove entries as we drop back down in the recursion */ + code[len] = 0; +} + +/* Look at all sub-codes starting with root + 1 bits. Look at only the valid + intermediate code states (syms, left, len). For each completed code, + calculate the amount of memory required by inflate to build the decoding + tables. Find the maximum amount of memory required and show the code that + requires that maximum. Uses the globals max, root, and num. */ +local void enough(int syms) +{ + int n; /* number of remaing symbols for this node */ + int left; /* number of unused bit patterns at this length */ + size_t index; /* index of this case in *num */ + + /* clear code */ + for (n = 0; n <= max; n++) + code[n] = 0; + + /* look at all (root + 1) bit and longer codes */ + large = 1 << root; /* base table */ + if (root < max) /* otherwise, there's only a base table */ + for (n = 3; n <= syms; n++) + for (left = 2; left < n; left += 2) + { + /* look at all reachable (root + 1) bit nodes, and the + resulting codes (complete at root + 2 or more) */ + index = INDEX(n, left, root + 1); + if (root + 1 < max && num[index]) /* reachable node */ + examine(n, root + 1, left, 1 << root, 0); + + /* also look at root bit codes with completions at root + 1 + bits (not saved in num, since complete), just in case */ + if (num[index - 1] && n <= left << 1) + examine((n - left) << 1, root + 1, (n - left) << 1, + 1 << root, 0); + } + + /* done */ + printf("done: maximum of %d table entries\n", large); +} + +/* + Examine and show the total number of possible Huffman codes for a given + maximum number of symbols, initial root table size, and maximum code length + in bits -- those are the command arguments in that order. The default + values are 286, 9, and 15 respectively, for the deflate literal/length code. + The possible codes are counted for each number of coded symbols from two to + the maximum. The counts for each of those and the total number of codes are + shown. The maximum number of inflate table entires is then calculated + across all possible codes. Each new maximum number of table entries and the + associated sub-code (starting at root + 1 == 10 bits) is shown. + + To count and examine Huffman codes that are not length-limited, provide a + maximum length equal to the number of symbols minus one. + + For the deflate literal/length code, use "enough". For the deflate distance + code, use "enough 30 6". + + This uses the %llu printf format to print big_t numbers, which assumes that + big_t is an unsigned long long. If the big_t type is changed (for example + to a multiple precision type), the method of printing will also need to be + updated. + */ +int main(int argc, char **argv) +{ + int syms; /* total number of symbols to code */ + int n; /* number of symbols to code for this run */ + big_t got; /* return value of count() */ + big_t sum; /* accumulated number of codes over n */ + code_t word; /* for counting bits in code_t */ + + /* set up globals for cleanup() */ + code = NULL; + num = NULL; + done = NULL; + + /* get arguments -- default to the deflate literal/length code */ + syms = 286; + root = 9; + max = 15; + if (argc > 1) { + syms = atoi(argv[1]); + if (argc > 2) { + root = atoi(argv[2]); + if (argc > 3) + max = atoi(argv[3]); + } + } + if (argc > 4 || syms < 2 || root < 1 || max < 1) { + fputs("invalid arguments, need: [sym >= 2 [root >= 1 [max >= 1]]]\n", + stderr); + return 1; + } + + /* if not restricting the code length, the longest is syms - 1 */ + if (max > syms - 1) + max = syms - 1; + + /* determine the number of bits in a code_t */ + for (n = 0, word = 1; word; n++, word <<= 1) + ; + + /* make sure that the calculation of most will not overflow */ + if (max > n || (code_t)(syms - 2) >= (((code_t)0 - 1) >> (max - 1))) { + fputs("abort: code length too long for internal types\n", stderr); + return 1; + } + + /* reject impossible code requests */ + if ((code_t)(syms - 1) > ((code_t)1 << max) - 1) { + fprintf(stderr, "%d symbols cannot be coded in %d bits\n", + syms, max); + return 1; + } + + /* allocate code vector */ + code = calloc(max + 1, sizeof(int)); + if (code == NULL) { + fputs("abort: unable to allocate enough memory\n", stderr); + return 1; + } + + /* determine size of saved results array, checking for overflows, + allocate and clear the array (set all to zero with calloc()) */ + if (syms == 2) /* iff max == 1 */ + num = NULL; /* won't be saving any results */ + else { + size = syms >> 1; + if (size > ((size_t)0 - 1) / (n = (syms - 1) >> 1) || + (size *= n, size > ((size_t)0 - 1) / (n = max - 1)) || + (size *= n, size > ((size_t)0 - 1) / sizeof(big_t)) || + (num = calloc(size, sizeof(big_t))) == NULL) { + fputs("abort: unable to allocate enough memory\n", stderr); + cleanup(); + return 1; + } + } + + /* count possible codes for all numbers of symbols, add up counts */ + sum = 0; + for (n = 2; n <= syms; n++) { + got = count(n, 1, 2); + sum += got; + if (got == (big_t)0 - 1 || sum < got) { /* overflow */ + fputs("abort: can't count that high!\n", stderr); + cleanup(); + return 1; + } + printf("%llu %d-codes\n", got, n); + } + printf("%llu total codes for 2 to %d symbols", sum, syms); + if (max < syms - 1) + printf(" (%d-bit length limit)\n", max); + else + puts(" (no length limit)"); + + /* allocate and clear done array for beenhere() */ + if (syms == 2) + done = NULL; + else if (size > ((size_t)0 - 1) / sizeof(struct tab) || + (done = calloc(size, sizeof(struct tab))) == NULL) { + fputs("abort: unable to allocate enough memory\n", stderr); + cleanup(); + return 1; + } + + /* find and show maximum inflate table usage */ + if (root > max) /* reduce root to max length */ + root = max; + if ((code_t)syms < ((code_t)1 << (root + 1))) + enough(syms); + else + puts("cannot handle minimum code lengths > root"); + + /* done */ + cleanup(); + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/examples/fitblk.c nodejs-0.11.15/deps/zlib/examples/fitblk.c --- nodejs-0.11.13/deps/zlib/examples/fitblk.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/fitblk.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,233 @@ +/* fitblk.c: example of fitting compressed output to a specified size + Not copyrighted -- provided to the public domain + Version 1.1 25 November 2004 Mark Adler */ + +/* Version history: + 1.0 24 Nov 2004 First version + 1.1 25 Nov 2004 Change deflateInit2() to deflateInit() + Use fixed-size, stack-allocated raw buffers + Simplify code moving compression to subroutines + Use assert() for internal errors + Add detailed description of approach + */ + +/* Approach to just fitting a requested compressed size: + + fitblk performs three compression passes on a portion of the input + data in order to determine how much of that input will compress to + nearly the requested output block size. The first pass generates + enough deflate blocks to produce output to fill the requested + output size plus a specfied excess amount (see the EXCESS define + below). The last deflate block may go quite a bit past that, but + is discarded. The second pass decompresses and recompresses just + the compressed data that fit in the requested plus excess sized + buffer. The deflate process is terminated after that amount of + input, which is less than the amount consumed on the first pass. + The last deflate block of the result will be of a comparable size + to the final product, so that the header for that deflate block and + the compression ratio for that block will be about the same as in + the final product. The third compression pass decompresses the + result of the second step, but only the compressed data up to the + requested size minus an amount to allow the compressed stream to + complete (see the MARGIN define below). That will result in a + final compressed stream whose length is less than or equal to the + requested size. Assuming sufficient input and a requested size + greater than a few hundred bytes, the shortfall will typically be + less than ten bytes. + + If the input is short enough that the first compression completes + before filling the requested output size, then that compressed + stream is return with no recompression. + + EXCESS is chosen to be just greater than the shortfall seen in a + two pass approach similar to the above. That shortfall is due to + the last deflate block compressing more efficiently with a smaller + header on the second pass. EXCESS is set to be large enough so + that there is enough uncompressed data for the second pass to fill + out the requested size, and small enough so that the final deflate + block of the second pass will be close in size to the final deflate + block of the third and final pass. MARGIN is chosen to be just + large enough to assure that the final compression has enough room + to complete in all cases. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> +#include "zlib.h" + +#define local static + +/* print nastygram and leave */ +local void quit(char *why) +{ + fprintf(stderr, "fitblk abort: %s\n", why); + exit(1); +} + +#define RAWLEN 4096 /* intermediate uncompressed buffer size */ + +/* compress from file to def until provided buffer is full or end of + input reached; return last deflate() return value, or Z_ERRNO if + there was read error on the file */ +local int partcompress(FILE *in, z_streamp def) +{ + int ret, flush; + unsigned char raw[RAWLEN]; + + flush = Z_NO_FLUSH; + do { + def->avail_in = fread(raw, 1, RAWLEN, in); + if (ferror(in)) + return Z_ERRNO; + def->next_in = raw; + if (feof(in)) + flush = Z_FINISH; + ret = deflate(def, flush); + assert(ret != Z_STREAM_ERROR); + } while (def->avail_out != 0 && flush == Z_NO_FLUSH); + return ret; +} + +/* recompress from inf's input to def's output; the input for inf and + the output for def are set in those structures before calling; + return last deflate() return value, or Z_MEM_ERROR if inflate() + was not able to allocate enough memory when it needed to */ +local int recompress(z_streamp inf, z_streamp def) +{ + int ret, flush; + unsigned char raw[RAWLEN]; + + flush = Z_NO_FLUSH; + do { + /* decompress */ + inf->avail_out = RAWLEN; + inf->next_out = raw; + ret = inflate(inf, Z_NO_FLUSH); + assert(ret != Z_STREAM_ERROR && ret != Z_DATA_ERROR && + ret != Z_NEED_DICT); + if (ret == Z_MEM_ERROR) + return ret; + + /* compress what was decompresed until done or no room */ + def->avail_in = RAWLEN - inf->avail_out; + def->next_in = raw; + if (inf->avail_out != 0) + flush = Z_FINISH; + ret = deflate(def, flush); + assert(ret != Z_STREAM_ERROR); + } while (ret != Z_STREAM_END && def->avail_out != 0); + return ret; +} + +#define EXCESS 256 /* empirically determined stream overage */ +#define MARGIN 8 /* amount to back off for completion */ + +/* compress from stdin to fixed-size block on stdout */ +int main(int argc, char **argv) +{ + int ret; /* return code */ + unsigned size; /* requested fixed output block size */ + unsigned have; /* bytes written by deflate() call */ + unsigned char *blk; /* intermediate and final stream */ + unsigned char *tmp; /* close to desired size stream */ + z_stream def, inf; /* zlib deflate and inflate states */ + + /* get requested output size */ + if (argc != 2) + quit("need one argument: size of output block"); + ret = strtol(argv[1], argv + 1, 10); + if (argv[1][0] != 0) + quit("argument must be a number"); + if (ret < 8) /* 8 is minimum zlib stream size */ + quit("need positive size of 8 or greater"); + size = (unsigned)ret; + + /* allocate memory for buffers and compression engine */ + blk = malloc(size + EXCESS); + def.zalloc = Z_NULL; + def.zfree = Z_NULL; + def.opaque = Z_NULL; + ret = deflateInit(&def, Z_DEFAULT_COMPRESSION); + if (ret != Z_OK || blk == NULL) + quit("out of memory"); + + /* compress from stdin until output full, or no more input */ + def.avail_out = size + EXCESS; + def.next_out = blk; + ret = partcompress(stdin, &def); + if (ret == Z_ERRNO) + quit("error reading input"); + + /* if it all fit, then size was undersubscribed -- done! */ + if (ret == Z_STREAM_END && def.avail_out >= EXCESS) { + /* write block to stdout */ + have = size + EXCESS - def.avail_out; + if (fwrite(blk, 1, have, stdout) != have || ferror(stdout)) + quit("error writing output"); + + /* clean up and print results to stderr */ + ret = deflateEnd(&def); + assert(ret != Z_STREAM_ERROR); + free(blk); + fprintf(stderr, + "%u bytes unused out of %u requested (all input)\n", + size - have, size); + return 0; + } + + /* it didn't all fit -- set up for recompression */ + inf.zalloc = Z_NULL; + inf.zfree = Z_NULL; + inf.opaque = Z_NULL; + inf.avail_in = 0; + inf.next_in = Z_NULL; + ret = inflateInit(&inf); + tmp = malloc(size + EXCESS); + if (ret != Z_OK || tmp == NULL) + quit("out of memory"); + ret = deflateReset(&def); + assert(ret != Z_STREAM_ERROR); + + /* do first recompression close to the right amount */ + inf.avail_in = size + EXCESS; + inf.next_in = blk; + def.avail_out = size + EXCESS; + def.next_out = tmp; + ret = recompress(&inf, &def); + if (ret == Z_MEM_ERROR) + quit("out of memory"); + + /* set up for next reocmpression */ + ret = inflateReset(&inf); + assert(ret != Z_STREAM_ERROR); + ret = deflateReset(&def); + assert(ret != Z_STREAM_ERROR); + + /* do second and final recompression (third compression) */ + inf.avail_in = size - MARGIN; /* assure stream will complete */ + inf.next_in = tmp; + def.avail_out = size; + def.next_out = blk; + ret = recompress(&inf, &def); + if (ret == Z_MEM_ERROR) + quit("out of memory"); + assert(ret == Z_STREAM_END); /* otherwise MARGIN too small */ + + /* done -- write block to stdout */ + have = size - def.avail_out; + if (fwrite(blk, 1, have, stdout) != have || ferror(stdout)) + quit("error writing output"); + + /* clean up and print results to stderr */ + free(tmp); + ret = inflateEnd(&inf); + assert(ret != Z_STREAM_ERROR); + ret = deflateEnd(&def); + assert(ret != Z_STREAM_ERROR); + free(blk); + fprintf(stderr, + "%u bytes unused out of %u requested (%lu input)\n", + size - have, size, def.total_in); + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/examples/gun.c nodejs-0.11.15/deps/zlib/examples/gun.c --- nodejs-0.11.13/deps/zlib/examples/gun.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/gun.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,702 @@ +/* gun.c -- simple gunzip to give an example of the use of inflateBack() + * Copyright (C) 2003, 2005, 2008, 2010, 2012 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + Version 1.7 12 August 2012 Mark Adler */ + +/* Version history: + 1.0 16 Feb 2003 First version for testing of inflateBack() + 1.1 21 Feb 2005 Decompress concatenated gzip streams + Remove use of "this" variable (C++ keyword) + Fix return value for in() + Improve allocation failure checking + Add typecasting for void * structures + Add -h option for command version and usage + Add a bunch of comments + 1.2 20 Mar 2005 Add Unix compress (LZW) decompression + Copy file attributes from input file to output file + 1.3 12 Jun 2005 Add casts for error messages [Oberhumer] + 1.4 8 Dec 2006 LZW decompression speed improvements + 1.5 9 Feb 2008 Avoid warning in latest version of gcc + 1.6 17 Jan 2010 Avoid signed/unsigned comparison warnings + 1.7 12 Aug 2012 Update for z_const usage in zlib 1.2.8 + */ + +/* + gun [ -t ] [ name ... ] + + decompresses the data in the named gzip files. If no arguments are given, + gun will decompress from stdin to stdout. The names must end in .gz, -gz, + .z, -z, _z, or .Z. The uncompressed data will be written to a file name + with the suffix stripped. On success, the original file is deleted. On + failure, the output file is deleted. For most failures, the command will + continue to process the remaining names on the command line. A memory + allocation failure will abort the command. If -t is specified, then the + listed files or stdin will be tested as gzip files for integrity (without + checking for a proper suffix), no output will be written, and no files + will be deleted. + + Like gzip, gun allows concatenated gzip streams and will decompress them, + writing all of the uncompressed data to the output. Unlike gzip, gun allows + an empty file on input, and will produce no error writing an empty output + file. + + gun will also decompress files made by Unix compress, which uses LZW + compression. These files are automatically detected by virtue of their + magic header bytes. Since the end of Unix compress stream is marked by the + end-of-file, they cannot be concantenated. If a Unix compress stream is + encountered in an input file, it is the last stream in that file. + + Like gunzip and uncompress, the file attributes of the orignal compressed + file are maintained in the final uncompressed file, to the extent that the + user permissions allow it. + + On my Mac OS X PowerPC G4, gun is almost twice as fast as gunzip (version + 1.2.4) is on the same file, when gun is linked with zlib 1.2.2. Also the + LZW decompression provided by gun is about twice as fast as the standard + Unix uncompress command. + */ + +/* external functions and related types and constants */ +#include <stdio.h> /* fprintf() */ +#include <stdlib.h> /* malloc(), free() */ +#include <string.h> /* strerror(), strcmp(), strlen(), memcpy() */ +#include <errno.h> /* errno */ +#include <fcntl.h> /* open() */ +#include <unistd.h> /* read(), write(), close(), chown(), unlink() */ +#include <sys/types.h> +#include <sys/stat.h> /* stat(), chmod() */ +#include <utime.h> /* utime() */ +#include "zlib.h" /* inflateBackInit(), inflateBack(), */ + /* inflateBackEnd(), crc32() */ + +/* function declaration */ +#define local static + +/* buffer constants */ +#define SIZE 32768U /* input and output buffer sizes */ +#define PIECE 16384 /* limits i/o chunks for 16-bit int case */ + +/* structure for infback() to pass to input function in() -- it maintains the + input file and a buffer of size SIZE */ +struct ind { + int infile; + unsigned char *inbuf; +}; + +/* Load input buffer, assumed to be empty, and return bytes loaded and a + pointer to them. read() is called until the buffer is full, or until it + returns end-of-file or error. Return 0 on error. */ +local unsigned in(void *in_desc, z_const unsigned char **buf) +{ + int ret; + unsigned len; + unsigned char *next; + struct ind *me = (struct ind *)in_desc; + + next = me->inbuf; + *buf = next; + len = 0; + do { + ret = PIECE; + if ((unsigned)ret > SIZE - len) + ret = (int)(SIZE - len); + ret = (int)read(me->infile, next, ret); + if (ret == -1) { + len = 0; + break; + } + next += ret; + len += ret; + } while (ret != 0 && len < SIZE); + return len; +} + +/* structure for infback() to pass to output function out() -- it maintains the + output file, a running CRC-32 check on the output and the total number of + bytes output, both for checking against the gzip trailer. (The length in + the gzip trailer is stored modulo 2^32, so it's ok if a long is 32 bits and + the output is greater than 4 GB.) */ +struct outd { + int outfile; + int check; /* true if checking crc and total */ + unsigned long crc; + unsigned long total; +}; + +/* Write output buffer and update the CRC-32 and total bytes written. write() + is called until all of the output is written or an error is encountered. + On success out() returns 0. For a write failure, out() returns 1. If the + output file descriptor is -1, then nothing is written. + */ +local int out(void *out_desc, unsigned char *buf, unsigned len) +{ + int ret; + struct outd *me = (struct outd *)out_desc; + + if (me->check) { + me->crc = crc32(me->crc, buf, len); + me->total += len; + } + if (me->outfile != -1) + do { + ret = PIECE; + if ((unsigned)ret > len) + ret = (int)len; + ret = (int)write(me->outfile, buf, ret); + if (ret == -1) + return 1; + buf += ret; + len -= ret; + } while (len != 0); + return 0; +} + +/* next input byte macro for use inside lunpipe() and gunpipe() */ +#define NEXT() (have ? 0 : (have = in(indp, &next)), \ + last = have ? (have--, (int)(*next++)) : -1) + +/* memory for gunpipe() and lunpipe() -- + the first 256 entries of prefix[] and suffix[] are never used, could + have offset the index, but it's faster to waste the memory */ +unsigned char inbuf[SIZE]; /* input buffer */ +unsigned char outbuf[SIZE]; /* output buffer */ +unsigned short prefix[65536]; /* index to LZW prefix string */ +unsigned char suffix[65536]; /* one-character LZW suffix */ +unsigned char match[65280 + 2]; /* buffer for reversed match or gzip + 32K sliding window */ + +/* throw out what's left in the current bits byte buffer (this is a vestigial + aspect of the compressed data format derived from an implementation that + made use of a special VAX machine instruction!) */ +#define FLUSHCODE() \ + do { \ + left = 0; \ + rem = 0; \ + if (chunk > have) { \ + chunk -= have; \ + have = 0; \ + if (NEXT() == -1) \ + break; \ + chunk--; \ + if (chunk > have) { \ + chunk = have = 0; \ + break; \ + } \ + } \ + have -= chunk; \ + next += chunk; \ + chunk = 0; \ + } while (0) + +/* Decompress a compress (LZW) file from indp to outfile. The compress magic + header (two bytes) has already been read and verified. There are have bytes + of buffered input at next. strm is used for passing error information back + to gunpipe(). + + lunpipe() will return Z_OK on success, Z_BUF_ERROR for an unexpected end of + file, read error, or write error (a write error indicated by strm->next_in + not equal to Z_NULL), or Z_DATA_ERROR for invalid input. + */ +local int lunpipe(unsigned have, z_const unsigned char *next, struct ind *indp, + int outfile, z_stream *strm) +{ + int last; /* last byte read by NEXT(), or -1 if EOF */ + unsigned chunk; /* bytes left in current chunk */ + int left; /* bits left in rem */ + unsigned rem; /* unused bits from input */ + int bits; /* current bits per code */ + unsigned code; /* code, table traversal index */ + unsigned mask; /* mask for current bits codes */ + int max; /* maximum bits per code for this stream */ + unsigned flags; /* compress flags, then block compress flag */ + unsigned end; /* last valid entry in prefix/suffix tables */ + unsigned temp; /* current code */ + unsigned prev; /* previous code */ + unsigned final; /* last character written for previous code */ + unsigned stack; /* next position for reversed string */ + unsigned outcnt; /* bytes in output buffer */ + struct outd outd; /* output structure */ + unsigned char *p; + + /* set up output */ + outd.outfile = outfile; + outd.check = 0; + + /* process remainder of compress header -- a flags byte */ + flags = NEXT(); + if (last == -1) + return Z_BUF_ERROR; + if (flags & 0x60) { + strm->msg = (char *)"unknown lzw flags set"; + return Z_DATA_ERROR; + } + max = flags & 0x1f; + if (max < 9 || max > 16) { + strm->msg = (char *)"lzw bits out of range"; + return Z_DATA_ERROR; + } + if (max == 9) /* 9 doesn't really mean 9 */ + max = 10; + flags &= 0x80; /* true if block compress */ + + /* clear table */ + bits = 9; + mask = 0x1ff; + end = flags ? 256 : 255; + + /* set up: get first 9-bit code, which is the first decompressed byte, but + don't create a table entry until the next code */ + if (NEXT() == -1) /* no compressed data is ok */ + return Z_OK; + final = prev = (unsigned)last; /* low 8 bits of code */ + if (NEXT() == -1) /* missing a bit */ + return Z_BUF_ERROR; + if (last & 1) { /* code must be < 256 */ + strm->msg = (char *)"invalid lzw code"; + return Z_DATA_ERROR; + } + rem = (unsigned)last >> 1; /* remaining 7 bits */ + left = 7; + chunk = bits - 2; /* 7 bytes left in this chunk */ + outbuf[0] = (unsigned char)final; /* write first decompressed byte */ + outcnt = 1; + + /* decode codes */ + stack = 0; + for (;;) { + /* if the table will be full after this, increment the code size */ + if (end >= mask && bits < max) { + FLUSHCODE(); + bits++; + mask <<= 1; + mask++; + } + + /* get a code of length bits */ + if (chunk == 0) /* decrement chunk modulo bits */ + chunk = bits; + code = rem; /* low bits of code */ + if (NEXT() == -1) { /* EOF is end of compressed data */ + /* write remaining buffered output */ + if (outcnt && out(&outd, outbuf, outcnt)) { + strm->next_in = outbuf; /* signal write error */ + return Z_BUF_ERROR; + } + return Z_OK; + } + code += (unsigned)last << left; /* middle (or high) bits of code */ + left += 8; + chunk--; + if (bits > left) { /* need more bits */ + if (NEXT() == -1) /* can't end in middle of code */ + return Z_BUF_ERROR; + code += (unsigned)last << left; /* high bits of code */ + left += 8; + chunk--; + } + code &= mask; /* mask to current code length */ + left -= bits; /* number of unused bits */ + rem = (unsigned)last >> (8 - left); /* unused bits from last byte */ + + /* process clear code (256) */ + if (code == 256 && flags) { + FLUSHCODE(); + bits = 9; /* initialize bits and mask */ + mask = 0x1ff; + end = 255; /* empty table */ + continue; /* get next code */ + } + + /* special code to reuse last match */ + temp = code; /* save the current code */ + if (code > end) { + /* Be picky on the allowed code here, and make sure that the code + we drop through (prev) will be a valid index so that random + input does not cause an exception. The code != end + 1 check is + empirically derived, and not checked in the original uncompress + code. If this ever causes a problem, that check could be safely + removed. Leaving this check in greatly improves gun's ability + to detect random or corrupted input after a compress header. + In any case, the prev > end check must be retained. */ + if (code != end + 1 || prev > end) { + strm->msg = (char *)"invalid lzw code"; + return Z_DATA_ERROR; + } + match[stack++] = (unsigned char)final; + code = prev; + } + + /* walk through linked list to generate output in reverse order */ + p = match + stack; + while (code >= 256) { + *p++ = suffix[code]; + code = prefix[code]; + } + stack = p - match; + match[stack++] = (unsigned char)code; + final = code; + + /* link new table entry */ + if (end < mask) { + end++; + prefix[end] = (unsigned short)prev; + suffix[end] = (unsigned char)final; + } + + /* set previous code for next iteration */ + prev = temp; + + /* write output in forward order */ + while (stack > SIZE - outcnt) { + while (outcnt < SIZE) + outbuf[outcnt++] = match[--stack]; + if (out(&outd, outbuf, outcnt)) { + strm->next_in = outbuf; /* signal write error */ + return Z_BUF_ERROR; + } + outcnt = 0; + } + p = match + stack; + do { + outbuf[outcnt++] = *--p; + } while (p > match); + stack = 0; + + /* loop for next code with final and prev as the last match, rem and + left provide the first 0..7 bits of the next code, end is the last + valid table entry */ + } +} + +/* Decompress a gzip file from infile to outfile. strm is assumed to have been + successfully initialized with inflateBackInit(). The input file may consist + of a series of gzip streams, in which case all of them will be decompressed + to the output file. If outfile is -1, then the gzip stream(s) integrity is + checked and nothing is written. + + The return value is a zlib error code: Z_MEM_ERROR if out of memory, + Z_DATA_ERROR if the header or the compressed data is invalid, or if the + trailer CRC-32 check or length doesn't match, Z_BUF_ERROR if the input ends + prematurely or a write error occurs, or Z_ERRNO if junk (not a another gzip + stream) follows a valid gzip stream. + */ +local int gunpipe(z_stream *strm, int infile, int outfile) +{ + int ret, first, last; + unsigned have, flags, len; + z_const unsigned char *next = NULL; + struct ind ind, *indp; + struct outd outd; + + /* setup input buffer */ + ind.infile = infile; + ind.inbuf = inbuf; + indp = &ind; + + /* decompress concatenated gzip streams */ + have = 0; /* no input data read in yet */ + first = 1; /* looking for first gzip header */ + strm->next_in = Z_NULL; /* so Z_BUF_ERROR means EOF */ + for (;;) { + /* look for the two magic header bytes for a gzip stream */ + if (NEXT() == -1) { + ret = Z_OK; + break; /* empty gzip stream is ok */ + } + if (last != 31 || (NEXT() != 139 && last != 157)) { + strm->msg = (char *)"incorrect header check"; + ret = first ? Z_DATA_ERROR : Z_ERRNO; + break; /* not a gzip or compress header */ + } + first = 0; /* next non-header is junk */ + + /* process a compress (LZW) file -- can't be concatenated after this */ + if (last == 157) { + ret = lunpipe(have, next, indp, outfile, strm); + break; + } + + /* process remainder of gzip header */ + ret = Z_BUF_ERROR; + if (NEXT() != 8) { /* only deflate method allowed */ + if (last == -1) break; + strm->msg = (char *)"unknown compression method"; + ret = Z_DATA_ERROR; + break; + } + flags = NEXT(); /* header flags */ + NEXT(); /* discard mod time, xflgs, os */ + NEXT(); + NEXT(); + NEXT(); + NEXT(); + NEXT(); + if (last == -1) break; + if (flags & 0xe0) { + strm->msg = (char *)"unknown header flags set"; + ret = Z_DATA_ERROR; + break; + } + if (flags & 4) { /* extra field */ + len = NEXT(); + len += (unsigned)(NEXT()) << 8; + if (last == -1) break; + while (len > have) { + len -= have; + have = 0; + if (NEXT() == -1) break; + len--; + } + if (last == -1) break; + have -= len; + next += len; + } + if (flags & 8) /* file name */ + while (NEXT() != 0 && last != -1) + ; + if (flags & 16) /* comment */ + while (NEXT() != 0 && last != -1) + ; + if (flags & 2) { /* header crc */ + NEXT(); + NEXT(); + } + if (last == -1) break; + + /* set up output */ + outd.outfile = outfile; + outd.check = 1; + outd.crc = crc32(0L, Z_NULL, 0); + outd.total = 0; + + /* decompress data to output */ + strm->next_in = next; + strm->avail_in = have; + ret = inflateBack(strm, in, indp, out, &outd); + if (ret != Z_STREAM_END) break; + next = strm->next_in; + have = strm->avail_in; + strm->next_in = Z_NULL; /* so Z_BUF_ERROR means EOF */ + + /* check trailer */ + ret = Z_BUF_ERROR; + if (NEXT() != (int)(outd.crc & 0xff) || + NEXT() != (int)((outd.crc >> 8) & 0xff) || + NEXT() != (int)((outd.crc >> 16) & 0xff) || + NEXT() != (int)((outd.crc >> 24) & 0xff)) { + /* crc error */ + if (last != -1) { + strm->msg = (char *)"incorrect data check"; + ret = Z_DATA_ERROR; + } + break; + } + if (NEXT() != (int)(outd.total & 0xff) || + NEXT() != (int)((outd.total >> 8) & 0xff) || + NEXT() != (int)((outd.total >> 16) & 0xff) || + NEXT() != (int)((outd.total >> 24) & 0xff)) { + /* length error */ + if (last != -1) { + strm->msg = (char *)"incorrect length check"; + ret = Z_DATA_ERROR; + } + break; + } + + /* go back and look for another gzip stream */ + } + + /* clean up and return */ + return ret; +} + +/* Copy file attributes, from -> to, as best we can. This is best effort, so + no errors are reported. The mode bits, including suid, sgid, and the sticky + bit are copied (if allowed), the owner's user id and group id are copied + (again if allowed), and the access and modify times are copied. */ +local void copymeta(char *from, char *to) +{ + struct stat was; + struct utimbuf when; + + /* get all of from's Unix meta data, return if not a regular file */ + if (stat(from, &was) != 0 || (was.st_mode & S_IFMT) != S_IFREG) + return; + + /* set to's mode bits, ignore errors */ + (void)chmod(to, was.st_mode & 07777); + + /* copy owner's user and group, ignore errors */ + (void)chown(to, was.st_uid, was.st_gid); + + /* copy access and modify times, ignore errors */ + when.actime = was.st_atime; + when.modtime = was.st_mtime; + (void)utime(to, &when); +} + +/* Decompress the file inname to the file outnname, of if test is true, just + decompress without writing and check the gzip trailer for integrity. If + inname is NULL or an empty string, read from stdin. If outname is NULL or + an empty string, write to stdout. strm is a pre-initialized inflateBack + structure. When appropriate, copy the file attributes from inname to + outname. + + gunzip() returns 1 if there is an out-of-memory error or an unexpected + return code from gunpipe(). Otherwise it returns 0. + */ +local int gunzip(z_stream *strm, char *inname, char *outname, int test) +{ + int ret; + int infile, outfile; + + /* open files */ + if (inname == NULL || *inname == 0) { + inname = "-"; + infile = 0; /* stdin */ + } + else { + infile = open(inname, O_RDONLY, 0); + if (infile == -1) { + fprintf(stderr, "gun cannot open %s\n", inname); + return 0; + } + } + if (test) + outfile = -1; + else if (outname == NULL || *outname == 0) { + outname = "-"; + outfile = 1; /* stdout */ + } + else { + outfile = open(outname, O_CREAT | O_TRUNC | O_WRONLY, 0666); + if (outfile == -1) { + close(infile); + fprintf(stderr, "gun cannot create %s\n", outname); + return 0; + } + } + errno = 0; + + /* decompress */ + ret = gunpipe(strm, infile, outfile); + if (outfile > 2) close(outfile); + if (infile > 2) close(infile); + + /* interpret result */ + switch (ret) { + case Z_OK: + case Z_ERRNO: + if (infile > 2 && outfile > 2) { + copymeta(inname, outname); /* copy attributes */ + unlink(inname); + } + if (ret == Z_ERRNO) + fprintf(stderr, "gun warning: trailing garbage ignored in %s\n", + inname); + break; + case Z_DATA_ERROR: + if (outfile > 2) unlink(outname); + fprintf(stderr, "gun data error on %s: %s\n", inname, strm->msg); + break; + case Z_MEM_ERROR: + if (outfile > 2) unlink(outname); + fprintf(stderr, "gun out of memory error--aborting\n"); + return 1; + case Z_BUF_ERROR: + if (outfile > 2) unlink(outname); + if (strm->next_in != Z_NULL) { + fprintf(stderr, "gun write error on %s: %s\n", + outname, strerror(errno)); + } + else if (errno) { + fprintf(stderr, "gun read error on %s: %s\n", + inname, strerror(errno)); + } + else { + fprintf(stderr, "gun unexpected end of file on %s\n", + inname); + } + break; + default: + if (outfile > 2) unlink(outname); + fprintf(stderr, "gun internal error--aborting\n"); + return 1; + } + return 0; +} + +/* Process the gun command line arguments. See the command syntax near the + beginning of this source file. */ +int main(int argc, char **argv) +{ + int ret, len, test; + char *outname; + unsigned char *window; + z_stream strm; + + /* initialize inflateBack state for repeated use */ + window = match; /* reuse LZW match buffer */ + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + ret = inflateBackInit(&strm, 15, window); + if (ret != Z_OK) { + fprintf(stderr, "gun out of memory error--aborting\n"); + return 1; + } + + /* decompress each file to the same name with the suffix removed */ + argc--; + argv++; + test = 0; + if (argc && strcmp(*argv, "-h") == 0) { + fprintf(stderr, "gun 1.6 (17 Jan 2010)\n"); + fprintf(stderr, "Copyright (C) 2003-2010 Mark Adler\n"); + fprintf(stderr, "usage: gun [-t] [file1.gz [file2.Z ...]]\n"); + return 0; + } + if (argc && strcmp(*argv, "-t") == 0) { + test = 1; + argc--; + argv++; + } + if (argc) + do { + if (test) + outname = NULL; + else { + len = (int)strlen(*argv); + if (strcmp(*argv + len - 3, ".gz") == 0 || + strcmp(*argv + len - 3, "-gz") == 0) + len -= 3; + else if (strcmp(*argv + len - 2, ".z") == 0 || + strcmp(*argv + len - 2, "-z") == 0 || + strcmp(*argv + len - 2, "_z") == 0 || + strcmp(*argv + len - 2, ".Z") == 0) + len -= 2; + else { + fprintf(stderr, "gun error: no gz type on %s--skipping\n", + *argv); + continue; + } + outname = malloc(len + 1); + if (outname == NULL) { + fprintf(stderr, "gun out of memory error--aborting\n"); + ret = 1; + break; + } + memcpy(outname, *argv, len); + outname[len] = 0; + } + ret = gunzip(&strm, *argv, outname, test); + if (outname != NULL) free(outname); + if (ret) break; + } while (argv++, --argc); + else + ret = gunzip(&strm, NULL, NULL, test); + + /* clean up */ + inflateBackEnd(&strm); + return ret; +} diff -Nru nodejs-0.11.13/deps/zlib/examples/gzappend.c nodejs-0.11.15/deps/zlib/examples/gzappend.c --- nodejs-0.11.13/deps/zlib/examples/gzappend.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/gzappend.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,504 @@ +/* gzappend -- command to append to a gzip file + + Copyright (C) 2003, 2012 Mark Adler, all rights reserved + version 1.2, 11 Oct 2012 + + This software is provided 'as-is', without any express or implied + warranty. In no event will the author be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Mark Adler madler@alumni.caltech.edu + */ + +/* + * Change history: + * + * 1.0 19 Oct 2003 - First version + * 1.1 4 Nov 2003 - Expand and clarify some comments and notes + * - Add version and copyright to help + * - Send help to stdout instead of stderr + * - Add some preemptive typecasts + * - Add L to constants in lseek() calls + * - Remove some debugging information in error messages + * - Use new data_type definition for zlib 1.2.1 + * - Simplfy and unify file operations + * - Finish off gzip file in gztack() + * - Use deflatePrime() instead of adding empty blocks + * - Keep gzip file clean on appended file read errors + * - Use in-place rotate instead of auxiliary buffer + * (Why you ask? Because it was fun to write!) + * 1.2 11 Oct 2012 - Fix for proper z_const usage + * - Check for input buffer malloc failure + */ + +/* + gzappend takes a gzip file and appends to it, compressing files from the + command line or data from stdin. The gzip file is written to directly, to + avoid copying that file, in case it's large. Note that this results in the + unfriendly behavior that if gzappend fails, the gzip file is corrupted. + + This program was written to illustrate the use of the new Z_BLOCK option of + zlib 1.2.x's inflate() function. This option returns from inflate() at each + block boundary to facilitate locating and modifying the last block bit at + the start of the final deflate block. Also whether using Z_BLOCK or not, + another required feature of zlib 1.2.x is that inflate() now provides the + number of unusued bits in the last input byte used. gzappend will not work + with versions of zlib earlier than 1.2.1. + + gzappend first decompresses the gzip file internally, discarding all but + the last 32K of uncompressed data, and noting the location of the last block + bit and the number of unused bits in the last byte of the compressed data. + The gzip trailer containing the CRC-32 and length of the uncompressed data + is verified. This trailer will be later overwritten. + + Then the last block bit is cleared by seeking back in the file and rewriting + the byte that contains it. Seeking forward, the last byte of the compressed + data is saved along with the number of unused bits to initialize deflate. + + A deflate process is initialized, using the last 32K of the uncompressed + data from the gzip file to initialize the dictionary. If the total + uncompressed data was less than 32K, then all of it is used to initialize + the dictionary. The deflate output bit buffer is also initialized with the + last bits from the original deflate stream. From here on, the data to + append is simply compressed using deflate, and written to the gzip file. + When that is complete, the new CRC-32 and uncompressed length are written + as the trailer of the gzip file. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <fcntl.h> +#include <unistd.h> +#include "zlib.h" + +#define local static +#define LGCHUNK 14 +#define CHUNK (1U << LGCHUNK) +#define DSIZE 32768U + +/* print an error message and terminate with extreme prejudice */ +local void bye(char *msg1, char *msg2) +{ + fprintf(stderr, "gzappend error: %s%s\n", msg1, msg2); + exit(1); +} + +/* return the greatest common divisor of a and b using Euclid's algorithm, + modified to be fast when one argument much greater than the other, and + coded to avoid unnecessary swapping */ +local unsigned gcd(unsigned a, unsigned b) +{ + unsigned c; + + while (a && b) + if (a > b) { + c = b; + while (a - c >= c) + c <<= 1; + a -= c; + } + else { + c = a; + while (b - c >= c) + c <<= 1; + b -= c; + } + return a + b; +} + +/* rotate list[0..len-1] left by rot positions, in place */ +local void rotate(unsigned char *list, unsigned len, unsigned rot) +{ + unsigned char tmp; + unsigned cycles; + unsigned char *start, *last, *to, *from; + + /* normalize rot and handle degenerate cases */ + if (len < 2) return; + if (rot >= len) rot %= len; + if (rot == 0) return; + + /* pointer to last entry in list */ + last = list + (len - 1); + + /* do simple left shift by one */ + if (rot == 1) { + tmp = *list; + memcpy(list, list + 1, len - 1); + *last = tmp; + return; + } + + /* do simple right shift by one */ + if (rot == len - 1) { + tmp = *last; + memmove(list + 1, list, len - 1); + *list = tmp; + return; + } + + /* otherwise do rotate as a set of cycles in place */ + cycles = gcd(len, rot); /* number of cycles */ + do { + start = from = list + cycles; /* start index is arbitrary */ + tmp = *from; /* save entry to be overwritten */ + for (;;) { + to = from; /* next step in cycle */ + from += rot; /* go right rot positions */ + if (from > last) from -= len; /* (pointer better not wrap) */ + if (from == start) break; /* all but one shifted */ + *to = *from; /* shift left */ + } + *to = tmp; /* complete the circle */ + } while (--cycles); +} + +/* structure for gzip file read operations */ +typedef struct { + int fd; /* file descriptor */ + int size; /* 1 << size is bytes in buf */ + unsigned left; /* bytes available at next */ + unsigned char *buf; /* buffer */ + z_const unsigned char *next; /* next byte in buffer */ + char *name; /* file name for error messages */ +} file; + +/* reload buffer */ +local int readin(file *in) +{ + int len; + + len = read(in->fd, in->buf, 1 << in->size); + if (len == -1) bye("error reading ", in->name); + in->left = (unsigned)len; + in->next = in->buf; + return len; +} + +/* read from file in, exit if end-of-file */ +local int readmore(file *in) +{ + if (readin(in) == 0) bye("unexpected end of ", in->name); + return 0; +} + +#define read1(in) (in->left == 0 ? readmore(in) : 0, \ + in->left--, *(in->next)++) + +/* skip over n bytes of in */ +local void skip(file *in, unsigned n) +{ + unsigned bypass; + + if (n > in->left) { + n -= in->left; + bypass = n & ~((1U << in->size) - 1); + if (bypass) { + if (lseek(in->fd, (off_t)bypass, SEEK_CUR) == -1) + bye("seeking ", in->name); + n -= bypass; + } + readmore(in); + if (n > in->left) + bye("unexpected end of ", in->name); + } + in->left -= n; + in->next += n; +} + +/* read a four-byte unsigned integer, little-endian, from in */ +unsigned long read4(file *in) +{ + unsigned long val; + + val = read1(in); + val += (unsigned)read1(in) << 8; + val += (unsigned long)read1(in) << 16; + val += (unsigned long)read1(in) << 24; + return val; +} + +/* skip over gzip header */ +local void gzheader(file *in) +{ + int flags; + unsigned n; + + if (read1(in) != 31 || read1(in) != 139) bye(in->name, " not a gzip file"); + if (read1(in) != 8) bye("unknown compression method in", in->name); + flags = read1(in); + if (flags & 0xe0) bye("unknown header flags set in", in->name); + skip(in, 6); + if (flags & 4) { + n = read1(in); + n += (unsigned)(read1(in)) << 8; + skip(in, n); + } + if (flags & 8) while (read1(in) != 0) ; + if (flags & 16) while (read1(in) != 0) ; + if (flags & 2) skip(in, 2); +} + +/* decompress gzip file "name", return strm with a deflate stream ready to + continue compression of the data in the gzip file, and return a file + descriptor pointing to where to write the compressed data -- the deflate + stream is initialized to compress using level "level" */ +local int gzscan(char *name, z_stream *strm, int level) +{ + int ret, lastbit, left, full; + unsigned have; + unsigned long crc, tot; + unsigned char *window; + off_t lastoff, end; + file gz; + + /* open gzip file */ + gz.name = name; + gz.fd = open(name, O_RDWR, 0); + if (gz.fd == -1) bye("cannot open ", name); + gz.buf = malloc(CHUNK); + if (gz.buf == NULL) bye("out of memory", ""); + gz.size = LGCHUNK; + gz.left = 0; + + /* skip gzip header */ + gzheader(&gz); + + /* prepare to decompress */ + window = malloc(DSIZE); + if (window == NULL) bye("out of memory", ""); + strm->zalloc = Z_NULL; + strm->zfree = Z_NULL; + strm->opaque = Z_NULL; + ret = inflateInit2(strm, -15); + if (ret != Z_OK) bye("out of memory", " or library mismatch"); + + /* decompress the deflate stream, saving append information */ + lastbit = 0; + lastoff = lseek(gz.fd, 0L, SEEK_CUR) - gz.left; + left = 0; + strm->avail_in = gz.left; + strm->next_in = gz.next; + crc = crc32(0L, Z_NULL, 0); + have = full = 0; + do { + /* if needed, get more input */ + if (strm->avail_in == 0) { + readmore(&gz); + strm->avail_in = gz.left; + strm->next_in = gz.next; + } + + /* set up output to next available section of sliding window */ + strm->avail_out = DSIZE - have; + strm->next_out = window + have; + + /* inflate and check for errors */ + ret = inflate(strm, Z_BLOCK); + if (ret == Z_STREAM_ERROR) bye("internal stream error!", ""); + if (ret == Z_MEM_ERROR) bye("out of memory", ""); + if (ret == Z_DATA_ERROR) + bye("invalid compressed data--format violated in", name); + + /* update crc and sliding window pointer */ + crc = crc32(crc, window + have, DSIZE - have - strm->avail_out); + if (strm->avail_out) + have = DSIZE - strm->avail_out; + else { + have = 0; + full = 1; + } + + /* process end of block */ + if (strm->data_type & 128) { + if (strm->data_type & 64) + left = strm->data_type & 0x1f; + else { + lastbit = strm->data_type & 0x1f; + lastoff = lseek(gz.fd, 0L, SEEK_CUR) - strm->avail_in; + } + } + } while (ret != Z_STREAM_END); + inflateEnd(strm); + gz.left = strm->avail_in; + gz.next = strm->next_in; + + /* save the location of the end of the compressed data */ + end = lseek(gz.fd, 0L, SEEK_CUR) - gz.left; + + /* check gzip trailer and save total for deflate */ + if (crc != read4(&gz)) + bye("invalid compressed data--crc mismatch in ", name); + tot = strm->total_out; + if ((tot & 0xffffffffUL) != read4(&gz)) + bye("invalid compressed data--length mismatch in", name); + + /* if not at end of file, warn */ + if (gz.left || readin(&gz)) + fprintf(stderr, + "gzappend warning: junk at end of gzip file overwritten\n"); + + /* clear last block bit */ + lseek(gz.fd, lastoff - (lastbit != 0), SEEK_SET); + if (read(gz.fd, gz.buf, 1) != 1) bye("reading after seek on ", name); + *gz.buf = (unsigned char)(*gz.buf ^ (1 << ((8 - lastbit) & 7))); + lseek(gz.fd, -1L, SEEK_CUR); + if (write(gz.fd, gz.buf, 1) != 1) bye("writing after seek to ", name); + + /* if window wrapped, build dictionary from window by rotating */ + if (full) { + rotate(window, DSIZE, have); + have = DSIZE; + } + + /* set up deflate stream with window, crc, total_in, and leftover bits */ + ret = deflateInit2(strm, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY); + if (ret != Z_OK) bye("out of memory", ""); + deflateSetDictionary(strm, window, have); + strm->adler = crc; + strm->total_in = tot; + if (left) { + lseek(gz.fd, --end, SEEK_SET); + if (read(gz.fd, gz.buf, 1) != 1) bye("reading after seek on ", name); + deflatePrime(strm, 8 - left, *gz.buf); + } + lseek(gz.fd, end, SEEK_SET); + + /* clean up and return */ + free(window); + free(gz.buf); + return gz.fd; +} + +/* append file "name" to gzip file gd using deflate stream strm -- if last + is true, then finish off the deflate stream at the end */ +local void gztack(char *name, int gd, z_stream *strm, int last) +{ + int fd, len, ret; + unsigned left; + unsigned char *in, *out; + + /* open file to compress and append */ + fd = 0; + if (name != NULL) { + fd = open(name, O_RDONLY, 0); + if (fd == -1) + fprintf(stderr, "gzappend warning: %s not found, skipping ...\n", + name); + } + + /* allocate buffers */ + in = malloc(CHUNK); + out = malloc(CHUNK); + if (in == NULL || out == NULL) bye("out of memory", ""); + + /* compress input file and append to gzip file */ + do { + /* get more input */ + len = read(fd, in, CHUNK); + if (len == -1) { + fprintf(stderr, + "gzappend warning: error reading %s, skipping rest ...\n", + name); + len = 0; + } + strm->avail_in = (unsigned)len; + strm->next_in = in; + if (len) strm->adler = crc32(strm->adler, in, (unsigned)len); + + /* compress and write all available output */ + do { + strm->avail_out = CHUNK; + strm->next_out = out; + ret = deflate(strm, last && len == 0 ? Z_FINISH : Z_NO_FLUSH); + left = CHUNK - strm->avail_out; + while (left) { + len = write(gd, out + CHUNK - strm->avail_out - left, left); + if (len == -1) bye("writing gzip file", ""); + left -= (unsigned)len; + } + } while (strm->avail_out == 0 && ret != Z_STREAM_END); + } while (len != 0); + + /* write trailer after last entry */ + if (last) { + deflateEnd(strm); + out[0] = (unsigned char)(strm->adler); + out[1] = (unsigned char)(strm->adler >> 8); + out[2] = (unsigned char)(strm->adler >> 16); + out[3] = (unsigned char)(strm->adler >> 24); + out[4] = (unsigned char)(strm->total_in); + out[5] = (unsigned char)(strm->total_in >> 8); + out[6] = (unsigned char)(strm->total_in >> 16); + out[7] = (unsigned char)(strm->total_in >> 24); + len = 8; + do { + ret = write(gd, out + 8 - len, len); + if (ret == -1) bye("writing gzip file", ""); + len -= ret; + } while (len); + close(gd); + } + + /* clean up and return */ + free(out); + free(in); + if (fd > 0) close(fd); +} + +/* process the compression level option if present, scan the gzip file, and + append the specified files, or append the data from stdin if no other file + names are provided on the command line -- the gzip file must be writable + and seekable */ +int main(int argc, char **argv) +{ + int gd, level; + z_stream strm; + + /* ignore command name */ + argc--; argv++; + + /* provide usage if no arguments */ + if (*argv == NULL) { + printf( + "gzappend 1.2 (11 Oct 2012) Copyright (C) 2003, 2012 Mark Adler\n" + ); + printf( + "usage: gzappend [-level] file.gz [ addthis [ andthis ... ]]\n"); + return 0; + } + + /* set compression level */ + level = Z_DEFAULT_COMPRESSION; + if (argv[0][0] == '-') { + if (argv[0][1] < '0' || argv[0][1] > '9' || argv[0][2] != 0) + bye("invalid compression level", ""); + level = argv[0][1] - '0'; + if (*++argv == NULL) bye("no gzip file name after options", ""); + } + + /* prepare to append to gzip file */ + gd = gzscan(*argv++, &strm, level); + + /* append files on command line, or from stdin if none */ + if (*argv == NULL) + gztack(NULL, gd, &strm, 1); + else + do { + gztack(*argv, gd, &strm, argv[1] == NULL); + } while (*++argv != NULL); + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/examples/gzjoin.c nodejs-0.11.15/deps/zlib/examples/gzjoin.c --- nodejs-0.11.13/deps/zlib/examples/gzjoin.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/gzjoin.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,449 @@ +/* gzjoin -- command to join gzip files into one gzip file + + Copyright (C) 2004, 2005, 2012 Mark Adler, all rights reserved + version 1.2, 14 Aug 2012 + + This software is provided 'as-is', without any express or implied + warranty. In no event will the author be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Mark Adler madler@alumni.caltech.edu + */ + +/* + * Change history: + * + * 1.0 11 Dec 2004 - First version + * 1.1 12 Jun 2005 - Changed ssize_t to long for portability + * 1.2 14 Aug 2012 - Clean up for z_const usage + */ + +/* + gzjoin takes one or more gzip files on the command line and writes out a + single gzip file that will uncompress to the concatenation of the + uncompressed data from the individual gzip files. gzjoin does this without + having to recompress any of the data and without having to calculate a new + crc32 for the concatenated uncompressed data. gzjoin does however have to + decompress all of the input data in order to find the bits in the compressed + data that need to be modified to concatenate the streams. + + gzjoin does not do an integrity check on the input gzip files other than + checking the gzip header and decompressing the compressed data. They are + otherwise assumed to be complete and correct. + + Each joint between gzip files removes at least 18 bytes of previous trailer + and subsequent header, and inserts an average of about three bytes to the + compressed data in order to connect the streams. The output gzip file + has a minimal ten-byte gzip header with no file name or modification time. + + This program was written to illustrate the use of the Z_BLOCK option of + inflate() and the crc32_combine() function. gzjoin will not compile with + versions of zlib earlier than 1.2.3. + */ + +#include <stdio.h> /* fputs(), fprintf(), fwrite(), putc() */ +#include <stdlib.h> /* exit(), malloc(), free() */ +#include <fcntl.h> /* open() */ +#include <unistd.h> /* close(), read(), lseek() */ +#include "zlib.h" + /* crc32(), crc32_combine(), inflateInit2(), inflate(), inflateEnd() */ + +#define local static + +/* exit with an error (return a value to allow use in an expression) */ +local int bail(char *why1, char *why2) +{ + fprintf(stderr, "gzjoin error: %s%s, output incomplete\n", why1, why2); + exit(1); + return 0; +} + +/* -- simple buffered file input with access to the buffer -- */ + +#define CHUNK 32768 /* must be a power of two and fit in unsigned */ + +/* bin buffered input file type */ +typedef struct { + char *name; /* name of file for error messages */ + int fd; /* file descriptor */ + unsigned left; /* bytes remaining at next */ + unsigned char *next; /* next byte to read */ + unsigned char *buf; /* allocated buffer of length CHUNK */ +} bin; + +/* close a buffered file and free allocated memory */ +local void bclose(bin *in) +{ + if (in != NULL) { + if (in->fd != -1) + close(in->fd); + if (in->buf != NULL) + free(in->buf); + free(in); + } +} + +/* open a buffered file for input, return a pointer to type bin, or NULL on + failure */ +local bin *bopen(char *name) +{ + bin *in; + + in = malloc(sizeof(bin)); + if (in == NULL) + return NULL; + in->buf = malloc(CHUNK); + in->fd = open(name, O_RDONLY, 0); + if (in->buf == NULL || in->fd == -1) { + bclose(in); + return NULL; + } + in->left = 0; + in->next = in->buf; + in->name = name; + return in; +} + +/* load buffer from file, return -1 on read error, 0 or 1 on success, with + 1 indicating that end-of-file was reached */ +local int bload(bin *in) +{ + long len; + + if (in == NULL) + return -1; + if (in->left != 0) + return 0; + in->next = in->buf; + do { + len = (long)read(in->fd, in->buf + in->left, CHUNK - in->left); + if (len < 0) + return -1; + in->left += (unsigned)len; + } while (len != 0 && in->left < CHUNK); + return len == 0 ? 1 : 0; +} + +/* get a byte from the file, bail if end of file */ +#define bget(in) (in->left ? 0 : bload(in), \ + in->left ? (in->left--, *(in->next)++) : \ + bail("unexpected end of file on ", in->name)) + +/* get a four-byte little-endian unsigned integer from file */ +local unsigned long bget4(bin *in) +{ + unsigned long val; + + val = bget(in); + val += (unsigned long)(bget(in)) << 8; + val += (unsigned long)(bget(in)) << 16; + val += (unsigned long)(bget(in)) << 24; + return val; +} + +/* skip bytes in file */ +local void bskip(bin *in, unsigned skip) +{ + /* check pointer */ + if (in == NULL) + return; + + /* easy case -- skip bytes in buffer */ + if (skip <= in->left) { + in->left -= skip; + in->next += skip; + return; + } + + /* skip what's in buffer, discard buffer contents */ + skip -= in->left; + in->left = 0; + + /* seek past multiples of CHUNK bytes */ + if (skip > CHUNK) { + unsigned left; + + left = skip & (CHUNK - 1); + if (left == 0) { + /* exact number of chunks: seek all the way minus one byte to check + for end-of-file with a read */ + lseek(in->fd, skip - 1, SEEK_CUR); + if (read(in->fd, in->buf, 1) != 1) + bail("unexpected end of file on ", in->name); + return; + } + + /* skip the integral chunks, update skip with remainder */ + lseek(in->fd, skip - left, SEEK_CUR); + skip = left; + } + + /* read more input and skip remainder */ + bload(in); + if (skip > in->left) + bail("unexpected end of file on ", in->name); + in->left -= skip; + in->next += skip; +} + +/* -- end of buffered input functions -- */ + +/* skip the gzip header from file in */ +local void gzhead(bin *in) +{ + int flags; + + /* verify gzip magic header and compression method */ + if (bget(in) != 0x1f || bget(in) != 0x8b || bget(in) != 8) + bail(in->name, " is not a valid gzip file"); + + /* get and verify flags */ + flags = bget(in); + if ((flags & 0xe0) != 0) + bail("unknown reserved bits set in ", in->name); + + /* skip modification time, extra flags, and os */ + bskip(in, 6); + + /* skip extra field if present */ + if (flags & 4) { + unsigned len; + + len = bget(in); + len += (unsigned)(bget(in)) << 8; + bskip(in, len); + } + + /* skip file name if present */ + if (flags & 8) + while (bget(in) != 0) + ; + + /* skip comment if present */ + if (flags & 16) + while (bget(in) != 0) + ; + + /* skip header crc if present */ + if (flags & 2) + bskip(in, 2); +} + +/* write a four-byte little-endian unsigned integer to out */ +local void put4(unsigned long val, FILE *out) +{ + putc(val & 0xff, out); + putc((val >> 8) & 0xff, out); + putc((val >> 16) & 0xff, out); + putc((val >> 24) & 0xff, out); +} + +/* Load up zlib stream from buffered input, bail if end of file */ +local void zpull(z_streamp strm, bin *in) +{ + if (in->left == 0) + bload(in); + if (in->left == 0) + bail("unexpected end of file on ", in->name); + strm->avail_in = in->left; + strm->next_in = in->next; +} + +/* Write header for gzip file to out and initialize trailer. */ +local void gzinit(unsigned long *crc, unsigned long *tot, FILE *out) +{ + fwrite("\x1f\x8b\x08\0\0\0\0\0\0\xff", 1, 10, out); + *crc = crc32(0L, Z_NULL, 0); + *tot = 0; +} + +/* Copy the compressed data from name, zeroing the last block bit of the last + block if clr is true, and adding empty blocks as needed to get to a byte + boundary. If clr is false, then the last block becomes the last block of + the output, and the gzip trailer is written. crc and tot maintains the + crc and length (modulo 2^32) of the output for the trailer. The resulting + gzip file is written to out. gzinit() must be called before the first call + of gzcopy() to write the gzip header and to initialize crc and tot. */ +local void gzcopy(char *name, int clr, unsigned long *crc, unsigned long *tot, + FILE *out) +{ + int ret; /* return value from zlib functions */ + int pos; /* where the "last block" bit is in byte */ + int last; /* true if processing the last block */ + bin *in; /* buffered input file */ + unsigned char *start; /* start of compressed data in buffer */ + unsigned char *junk; /* buffer for uncompressed data -- discarded */ + z_off_t len; /* length of uncompressed data (support > 4 GB) */ + z_stream strm; /* zlib inflate stream */ + + /* open gzip file and skip header */ + in = bopen(name); + if (in == NULL) + bail("could not open ", name); + gzhead(in); + + /* allocate buffer for uncompressed data and initialize raw inflate + stream */ + junk = malloc(CHUNK); + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit2(&strm, -15); + if (junk == NULL || ret != Z_OK) + bail("out of memory", ""); + + /* inflate and copy compressed data, clear last-block bit if requested */ + len = 0; + zpull(&strm, in); + start = in->next; + last = start[0] & 1; + if (last && clr) + start[0] &= ~1; + strm.avail_out = 0; + for (;;) { + /* if input used and output done, write used input and get more */ + if (strm.avail_in == 0 && strm.avail_out != 0) { + fwrite(start, 1, strm.next_in - start, out); + start = in->buf; + in->left = 0; + zpull(&strm, in); + } + + /* decompress -- return early when end-of-block reached */ + strm.avail_out = CHUNK; + strm.next_out = junk; + ret = inflate(&strm, Z_BLOCK); + switch (ret) { + case Z_MEM_ERROR: + bail("out of memory", ""); + case Z_DATA_ERROR: + bail("invalid compressed data in ", in->name); + } + + /* update length of uncompressed data */ + len += CHUNK - strm.avail_out; + + /* check for block boundary (only get this when block copied out) */ + if (strm.data_type & 128) { + /* if that was the last block, then done */ + if (last) + break; + + /* number of unused bits in last byte */ + pos = strm.data_type & 7; + + /* find the next last-block bit */ + if (pos != 0) { + /* next last-block bit is in last used byte */ + pos = 0x100 >> pos; + last = strm.next_in[-1] & pos; + if (last && clr) + in->buf[strm.next_in - in->buf - 1] &= ~pos; + } + else { + /* next last-block bit is in next unused byte */ + if (strm.avail_in == 0) { + /* don't have that byte yet -- get it */ + fwrite(start, 1, strm.next_in - start, out); + start = in->buf; + in->left = 0; + zpull(&strm, in); + } + last = strm.next_in[0] & 1; + if (last && clr) + in->buf[strm.next_in - in->buf] &= ~1; + } + } + } + + /* update buffer with unused input */ + in->left = strm.avail_in; + in->next = in->buf + (strm.next_in - in->buf); + + /* copy used input, write empty blocks to get to byte boundary */ + pos = strm.data_type & 7; + fwrite(start, 1, in->next - start - 1, out); + last = in->next[-1]; + if (pos == 0 || !clr) + /* already at byte boundary, or last file: write last byte */ + putc(last, out); + else { + /* append empty blocks to last byte */ + last &= ((0x100 >> pos) - 1); /* assure unused bits are zero */ + if (pos & 1) { + /* odd -- append an empty stored block */ + putc(last, out); + if (pos == 1) + putc(0, out); /* two more bits in block header */ + fwrite("\0\0\xff\xff", 1, 4, out); + } + else { + /* even -- append 1, 2, or 3 empty fixed blocks */ + switch (pos) { + case 6: + putc(last | 8, out); + last = 0; + case 4: + putc(last | 0x20, out); + last = 0; + case 2: + putc(last | 0x80, out); + putc(0, out); + } + } + } + + /* update crc and tot */ + *crc = crc32_combine(*crc, bget4(in), len); + *tot += (unsigned long)len; + + /* clean up */ + inflateEnd(&strm); + free(junk); + bclose(in); + + /* write trailer if this is the last gzip file */ + if (!clr) { + put4(*crc, out); + put4(*tot, out); + } +} + +/* join the gzip files on the command line, write result to stdout */ +int main(int argc, char **argv) +{ + unsigned long crc, tot; /* running crc and total uncompressed length */ + + /* skip command name */ + argc--; + argv++; + + /* show usage if no arguments */ + if (argc == 0) { + fputs("gzjoin usage: gzjoin f1.gz [f2.gz [f3.gz ...]] > fjoin.gz\n", + stderr); + return 0; + } + + /* join gzip files on command line and write to stdout */ + gzinit(&crc, &tot, stdout); + while (argc--) + gzcopy(*argv++, argc, &crc, &tot, stdout); + + /* done */ + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/examples/gzlog.c nodejs-0.11.15/deps/zlib/examples/gzlog.c --- nodejs-0.11.13/deps/zlib/examples/gzlog.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/gzlog.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,1059 @@ +/* + * gzlog.c + * Copyright (C) 2004, 2008, 2012 Mark Adler, all rights reserved + * For conditions of distribution and use, see copyright notice in gzlog.h + * version 2.2, 14 Aug 2012 + */ + +/* + gzlog provides a mechanism for frequently appending short strings to a gzip + file that is efficient both in execution time and compression ratio. The + strategy is to write the short strings in an uncompressed form to the end of + the gzip file, only compressing when the amount of uncompressed data has + reached a given threshold. + + gzlog also provides protection against interruptions in the process due to + system crashes. The status of the operation is recorded in an extra field + in the gzip file, and is only updated once the gzip file is brought to a + valid state. The last data to be appended or compressed is saved in an + auxiliary file, so that if the operation is interrupted, it can be completed + the next time an append operation is attempted. + + gzlog maintains another auxiliary file with the last 32K of data from the + compressed portion, which is preloaded for the compression of the subsequent + data. This minimizes the impact to the compression ratio of appending. + */ + +/* + Operations Concept: + + Files (log name "foo"): + foo.gz -- gzip file with the complete log + foo.add -- last message to append or last data to compress + foo.dict -- dictionary of the last 32K of data for next compression + foo.temp -- temporary dictionary file for compression after this one + foo.lock -- lock file for reading and writing the other files + foo.repairs -- log file for log file recovery operations (not compressed) + + gzip file structure: + - fixed-length (no file name) header with extra field (see below) + - compressed data ending initially with empty stored block + - uncompressed data filling out originally empty stored block and + subsequent stored blocks as needed (16K max each) + - gzip trailer + - no junk at end (no other gzip streams) + + When appending data, the information in the first three items above plus the + foo.add file are sufficient to recover an interrupted append operation. The + extra field has the necessary information to restore the start of the last + stored block and determine where to append the data in the foo.add file, as + well as the crc and length of the gzip data before the append operation. + + The foo.add file is created before the gzip file is marked for append, and + deleted after the gzip file is marked as complete. So if the append + operation is interrupted, the data to add will still be there. If due to + some external force, the foo.add file gets deleted between when the append + operation was interrupted and when recovery is attempted, the gzip file will + still be restored, but without the appended data. + + When compressing data, the information in the first two items above plus the + foo.add file are sufficient to recover an interrupted compress operation. + The extra field has the necessary information to find the end of the + compressed data, and contains both the crc and length of just the compressed + data and of the complete set of data including the contents of the foo.add + file. + + Again, the foo.add file is maintained during the compress operation in case + of an interruption. If in the unlikely event the foo.add file with the data + to be compressed is missing due to some external force, a gzip file with + just the previous compressed data will be reconstructed. In this case, all + of the data that was to be compressed is lost (approximately one megabyte). + This will not occur if all that happened was an interruption of the compress + operation. + + The third state that is marked is the replacement of the old dictionary with + the new dictionary after a compress operation. Once compression is + complete, the gzip file is marked as being in the replace state. This + completes the gzip file, so an interrupt after being so marked does not + result in recompression. Then the dictionary file is replaced, and the gzip + file is marked as completed. This state prevents the possibility of + restarting compression with the wrong dictionary file. + + All three operations are wrapped by a lock/unlock procedure. In order to + gain exclusive access to the log files, first a foo.lock file must be + exclusively created. When all operations are complete, the lock is + released by deleting the foo.lock file. If when attempting to create the + lock file, it already exists and the modify time of the lock file is more + than five minutes old (set by the PATIENCE define below), then the old + lock file is considered stale and deleted, and the exclusive creation of + the lock file is retried. To assure that there are no false assessments + of the staleness of the lock file, the operations periodically touch the + lock file to update the modified date. + + Following is the definition of the extra field with all of the information + required to enable the above append and compress operations and their + recovery if interrupted. Multi-byte values are stored little endian + (consistent with the gzip format). File pointers are eight bytes long. + The crc's and lengths for the gzip trailer are four bytes long. (Note that + the length at the end of a gzip file is used for error checking only, and + for large files is actually the length modulo 2^32.) The stored block + length is two bytes long. The gzip extra field two-byte identification is + "ap" for append. It is assumed that writing the extra field to the file is + an "atomic" operation. That is, either all of the extra field is written + to the file, or none of it is, if the operation is interrupted right at the + point of updating the extra field. This is a reasonable assumption, since + the extra field is within the first 52 bytes of the file, which is smaller + than any expected block size for a mass storage device (usually 512 bytes or + larger). + + Extra field (35 bytes): + - Pointer to first stored block length -- this points to the two-byte length + of the first stored block, which is followed by the two-byte, one's + complement of that length. The stored block length is preceded by the + three-bit header of the stored block, which is the actual start of the + stored block in the deflate format. See the bit offset field below. + - Pointer to the last stored block length. This is the same as above, but + for the last stored block of the uncompressed data in the gzip file. + Initially this is the same as the first stored block length pointer. + When the stored block gets to 16K (see the MAX_STORE define), then a new + stored block as added, at which point the last stored block length pointer + is different from the first stored block length pointer. When they are + different, the first bit of the last stored block header is eight bits, or + one byte back from the block length. + - Compressed data crc and length. This is the crc and length of the data + that is in the compressed portion of the deflate stream. These are used + only in the event that the foo.add file containing the data to compress is + lost after a compress operation is interrupted. + - Total data crc and length. This is the crc and length of all of the data + stored in the gzip file, compressed and uncompressed. It is used to + reconstruct the gzip trailer when compressing, as well as when recovering + interrupted operations. + - Final stored block length. This is used to quickly find where to append, + and allows the restoration of the original final stored block state when + an append operation is interrupted. + - First stored block start as the number of bits back from the final stored + block first length byte. This value is in the range of 3..10, and is + stored as the low three bits of the final byte of the extra field after + subtracting three (0..7). This allows the last-block bit of the stored + block header to be updated when a new stored block is added, for the case + when the first stored block and the last stored block are the same. (When + they are different, the numbers of bits back is known to be eight.) This + also allows for new compressed data to be appended to the old compressed + data in the compress operation, overwriting the previous first stored + block, or for the compressed data to be terminated and a valid gzip file + reconstructed on the off chance that a compression operation was + interrupted and the data to compress in the foo.add file was deleted. + - The operation in process. This is the next two bits in the last byte (the + bits under the mask 0x18). The are interpreted as 0: nothing in process, + 1: append in process, 2: compress in process, 3: replace in process. + - The top three bits of the last byte in the extra field are reserved and + are currently set to zero. + + Main procedure: + - Exclusively create the foo.lock file using the O_CREAT and O_EXCL modes of + the system open() call. If the modify time of an existing lock file is + more than PATIENCE seconds old, then the lock file is deleted and the + exclusive create is retried. + - Load the extra field from the foo.gz file, and see if an operation was in + progress but not completed. If so, apply the recovery procedure below. + - Perform the append procedure with the provided data. + - If the uncompressed data in the foo.gz file is 1MB or more, apply the + compress procedure. + - Delete the foo.lock file. + + Append procedure: + - Put what to append in the foo.add file so that the operation can be + restarted if this procedure is interrupted. + - Mark the foo.gz extra field with the append operation in progress. + + Restore the original last-block bit and stored block length of the last + stored block from the information in the extra field, in case a previous + append operation was interrupted. + - Append the provided data to the last stored block, creating new stored + blocks as needed and updating the stored blocks last-block bits and + lengths. + - Update the crc and length with the new data, and write the gzip trailer. + - Write over the extra field (with a single write operation) with the new + pointers, lengths, and crc's, and mark the gzip file as not in process. + Though there is still a foo.add file, it will be ignored since nothing + is in process. If a foo.add file is leftover from a previously + completed operation, it is truncated when writing new data to it. + - Delete the foo.add file. + + Compress and replace procedures: + - Read all of the uncompressed data in the stored blocks in foo.gz and write + it to foo.add. Also write foo.temp with the last 32K of that data to + provide a dictionary for the next invocation of this procedure. + - Rewrite the extra field marking foo.gz with a compression in process. + * If there is no data provided to compress (due to a missing foo.add file + when recovering), reconstruct and truncate the foo.gz file to contain + only the previous compressed data and proceed to the step after the next + one. Otherwise ... + - Compress the data with the dictionary in foo.dict, and write to the + foo.gz file starting at the bit immediately following the last previously + compressed block. If there is no foo.dict, proceed anyway with the + compression at slightly reduced efficiency. (For the foo.dict file to be + missing requires some external failure beyond simply the interruption of + a compress operation.) During this process, the foo.lock file is + periodically touched to assure that that file is not considered stale by + another process before we're done. The deflation is terminated with a + non-last empty static block (10 bits long), that is then located and + written over by a last-bit-set empty stored block. + - Append the crc and length of the data in the gzip file (previously + calculated during the append operations). + - Write over the extra field with the updated stored block offsets, bits + back, crc's, and lengths, and mark foo.gz as in process for a replacement + of the dictionary. + @ Delete the foo.add file. + - Replace foo.dict with foo.temp. + - Write over the extra field, marking foo.gz as complete. + + Recovery procedure: + - If not a replace recovery, read in the foo.add file, and provide that data + to the appropriate recovery below. If there is no foo.add file, provide + a zero data length to the recovery. In that case, the append recovery + restores the foo.gz to the previous compressed + uncompressed data state. + For the the compress recovery, a missing foo.add file results in foo.gz + being restored to the previous compressed-only data state. + - Append recovery: + - Pick up append at + step above + - Compress recovery: + - Pick up compress at * step above + - Replace recovery: + - Pick up compress at @ step above + - Log the repair with a date stamp in foo.repairs + */ + +#include <sys/types.h> +#include <stdio.h> /* rename, fopen, fprintf, fclose */ +#include <stdlib.h> /* malloc, free */ +#include <string.h> /* strlen, strrchr, strcpy, strncpy, strcmp */ +#include <fcntl.h> /* open */ +#include <unistd.h> /* lseek, read, write, close, unlink, sleep, */ + /* ftruncate, fsync */ +#include <errno.h> /* errno */ +#include <time.h> /* time, ctime */ +#include <sys/stat.h> /* stat */ +#include <sys/time.h> /* utimes */ +#include "zlib.h" /* crc32 */ + +#include "gzlog.h" /* header for external access */ + +#define local static +typedef unsigned int uint; +typedef unsigned long ulong; + +/* Macro for debugging to deterministically force recovery operations */ +#ifdef DEBUG + #include <setjmp.h> /* longjmp */ + jmp_buf gzlog_jump; /* where to go back to */ + int gzlog_bail = 0; /* which point to bail at (1..8) */ + int gzlog_count = -1; /* number of times through to wait */ +# define BAIL(n) do { if (n == gzlog_bail && gzlog_count-- == 0) \ + longjmp(gzlog_jump, gzlog_bail); } while (0) +#else +# define BAIL(n) +#endif + +/* how old the lock file can be in seconds before considering it stale */ +#define PATIENCE 300 + +/* maximum stored block size in Kbytes -- must be in 1..63 */ +#define MAX_STORE 16 + +/* number of stored Kbytes to trigger compression (must be >= 32 to allow + dictionary construction, and <= 204 * MAX_STORE, in order for >> 10 to + discard the stored block headers contribution of five bytes each) */ +#define TRIGGER 1024 + +/* size of a deflate dictionary (this cannot be changed) */ +#define DICT 32768U + +/* values for the operation (2 bits) */ +#define NO_OP 0 +#define APPEND_OP 1 +#define COMPRESS_OP 2 +#define REPLACE_OP 3 + +/* macros to extract little-endian integers from an unsigned byte buffer */ +#define PULL2(p) ((p)[0]+((uint)((p)[1])<<8)) +#define PULL4(p) (PULL2(p)+((ulong)PULL2(p+2)<<16)) +#define PULL8(p) (PULL4(p)+((off_t)PULL4(p+4)<<32)) + +/* macros to store integers into a byte buffer in little-endian order */ +#define PUT2(p,a) do {(p)[0]=a;(p)[1]=(a)>>8;} while(0) +#define PUT4(p,a) do {PUT2(p,a);PUT2(p+2,a>>16);} while(0) +#define PUT8(p,a) do {PUT4(p,a);PUT4(p+4,a>>32);} while(0) + +/* internal structure for log information */ +#define LOGID "\106\035\172" /* should be three non-zero characters */ +struct log { + char id[4]; /* contains LOGID to detect inadvertent overwrites */ + int fd; /* file descriptor for .gz file, opened read/write */ + char *path; /* allocated path, e.g. "/var/log/foo" or "foo" */ + char *end; /* end of path, for appending suffices such as ".gz" */ + off_t first; /* offset of first stored block first length byte */ + int back; /* location of first block id in bits back from first */ + uint stored; /* bytes currently in last stored block */ + off_t last; /* offset of last stored block first length byte */ + ulong ccrc; /* crc of compressed data */ + ulong clen; /* length (modulo 2^32) of compressed data */ + ulong tcrc; /* crc of total data */ + ulong tlen; /* length (modulo 2^32) of total data */ + time_t lock; /* last modify time of our lock file */ +}; + +/* gzip header for gzlog */ +local unsigned char log_gzhead[] = { + 0x1f, 0x8b, /* magic gzip id */ + 8, /* compression method is deflate */ + 4, /* there is an extra field (no file name) */ + 0, 0, 0, 0, /* no modification time provided */ + 0, 0xff, /* no extra flags, no OS specified */ + 39, 0, 'a', 'p', 35, 0 /* extra field with "ap" subfield */ + /* 35 is EXTRA, 39 is EXTRA + 4 */ +}; + +#define HEAD sizeof(log_gzhead) /* should be 16 */ + +/* initial gzip extra field content (52 == HEAD + EXTRA + 1) */ +local unsigned char log_gzext[] = { + 52, 0, 0, 0, 0, 0, 0, 0, /* offset of first stored block length */ + 52, 0, 0, 0, 0, 0, 0, 0, /* offset of last stored block length */ + 0, 0, 0, 0, 0, 0, 0, 0, /* compressed data crc and length */ + 0, 0, 0, 0, 0, 0, 0, 0, /* total data crc and length */ + 0, 0, /* final stored block data length */ + 5 /* op is NO_OP, last bit 8 bits back */ +}; + +#define EXTRA sizeof(log_gzext) /* should be 35 */ + +/* initial gzip data and trailer */ +local unsigned char log_gzbody[] = { + 1, 0, 0, 0xff, 0xff, /* empty stored block (last) */ + 0, 0, 0, 0, /* crc */ + 0, 0, 0, 0 /* uncompressed length */ +}; + +#define BODY sizeof(log_gzbody) + +/* Exclusively create foo.lock in order to negotiate exclusive access to the + foo.* files. If the modify time of an existing lock file is greater than + PATIENCE seconds in the past, then consider the lock file to have been + abandoned, delete it, and try the exclusive create again. Save the lock + file modify time for verification of ownership. Return 0 on success, or -1 + on failure, usually due to an access restriction or invalid path. Note that + if stat() or unlink() fails, it may be due to another process noticing the + abandoned lock file a smidge sooner and deleting it, so those are not + flagged as an error. */ +local int log_lock(struct log *log) +{ + int fd; + struct stat st; + + strcpy(log->end, ".lock"); + while ((fd = open(log->path, O_CREAT | O_EXCL, 0644)) < 0) { + if (errno != EEXIST) + return -1; + if (stat(log->path, &st) == 0 && time(NULL) - st.st_mtime > PATIENCE) { + unlink(log->path); + continue; + } + sleep(2); /* relinquish the CPU for two seconds while waiting */ + } + close(fd); + if (stat(log->path, &st) == 0) + log->lock = st.st_mtime; + return 0; +} + +/* Update the modify time of the lock file to now, in order to prevent another + task from thinking that the lock is stale. Save the lock file modify time + for verification of ownership. */ +local void log_touch(struct log *log) +{ + struct stat st; + + strcpy(log->end, ".lock"); + utimes(log->path, NULL); + if (stat(log->path, &st) == 0) + log->lock = st.st_mtime; +} + +/* Check the log file modify time against what is expected. Return true if + this is not our lock. If it is our lock, touch it to keep it. */ +local int log_check(struct log *log) +{ + struct stat st; + + strcpy(log->end, ".lock"); + if (stat(log->path, &st) || st.st_mtime != log->lock) + return 1; + log_touch(log); + return 0; +} + +/* Unlock a previously acquired lock, but only if it's ours. */ +local void log_unlock(struct log *log) +{ + if (log_check(log)) + return; + strcpy(log->end, ".lock"); + unlink(log->path); + log->lock = 0; +} + +/* Check the gzip header and read in the extra field, filling in the values in + the log structure. Return op on success or -1 if the gzip header was not as + expected. op is the current operation in progress last written to the extra + field. This assumes that the gzip file has already been opened, with the + file descriptor log->fd. */ +local int log_head(struct log *log) +{ + int op; + unsigned char buf[HEAD + EXTRA]; + + if (lseek(log->fd, 0, SEEK_SET) < 0 || + read(log->fd, buf, HEAD + EXTRA) != HEAD + EXTRA || + memcmp(buf, log_gzhead, HEAD)) { + return -1; + } + log->first = PULL8(buf + HEAD); + log->last = PULL8(buf + HEAD + 8); + log->ccrc = PULL4(buf + HEAD + 16); + log->clen = PULL4(buf + HEAD + 20); + log->tcrc = PULL4(buf + HEAD + 24); + log->tlen = PULL4(buf + HEAD + 28); + log->stored = PULL2(buf + HEAD + 32); + log->back = 3 + (buf[HEAD + 34] & 7); + op = (buf[HEAD + 34] >> 3) & 3; + return op; +} + +/* Write over the extra field contents, marking the operation as op. Use fsync + to assure that the device is written to, and in the requested order. This + operation, and only this operation, is assumed to be atomic in order to + assure that the log is recoverable in the event of an interruption at any + point in the process. Return -1 if the write to foo.gz failed. */ +local int log_mark(struct log *log, int op) +{ + int ret; + unsigned char ext[EXTRA]; + + PUT8(ext, log->first); + PUT8(ext + 8, log->last); + PUT4(ext + 16, log->ccrc); + PUT4(ext + 20, log->clen); + PUT4(ext + 24, log->tcrc); + PUT4(ext + 28, log->tlen); + PUT2(ext + 32, log->stored); + ext[34] = log->back - 3 + (op << 3); + fsync(log->fd); + ret = lseek(log->fd, HEAD, SEEK_SET) < 0 || + write(log->fd, ext, EXTRA) != EXTRA ? -1 : 0; + fsync(log->fd); + return ret; +} + +/* Rewrite the last block header bits and subsequent zero bits to get to a byte + boundary, setting the last block bit if last is true, and then write the + remainder of the stored block header (length and one's complement). Leave + the file pointer after the end of the last stored block data. Return -1 if + there is a read or write failure on the foo.gz file */ +local int log_last(struct log *log, int last) +{ + int back, len, mask; + unsigned char buf[6]; + + /* determine the locations of the bytes and bits to modify */ + back = log->last == log->first ? log->back : 8; + len = back > 8 ? 2 : 1; /* bytes back from log->last */ + mask = 0x80 >> ((back - 1) & 7); /* mask for block last-bit */ + + /* get the byte to modify (one or two back) into buf[0] -- don't need to + read the byte if the last-bit is eight bits back, since in that case + the entire byte will be modified */ + buf[0] = 0; + if (back != 8 && (lseek(log->fd, log->last - len, SEEK_SET) < 0 || + read(log->fd, buf, 1) != 1)) + return -1; + + /* change the last-bit of the last stored block as requested -- note + that all bits above the last-bit are set to zero, per the type bits + of a stored block being 00 and per the convention that the bits to + bring the stream to a byte boundary are also zeros */ + buf[1] = 0; + buf[2 - len] = (*buf & (mask - 1)) + (last ? mask : 0); + + /* write the modified stored block header and lengths, move the file + pointer to after the last stored block data */ + PUT2(buf + 2, log->stored); + PUT2(buf + 4, log->stored ^ 0xffff); + return lseek(log->fd, log->last - len, SEEK_SET) < 0 || + write(log->fd, buf + 2 - len, len + 4) != len + 4 || + lseek(log->fd, log->stored, SEEK_CUR) < 0 ? -1 : 0; +} + +/* Append len bytes from data to the locked and open log file. len may be zero + if recovering and no .add file was found. In that case, the previous state + of the foo.gz file is restored. The data is appended uncompressed in + deflate stored blocks. Return -1 if there was an error reading or writing + the foo.gz file. */ +local int log_append(struct log *log, unsigned char *data, size_t len) +{ + uint put; + off_t end; + unsigned char buf[8]; + + /* set the last block last-bit and length, in case recovering an + interrupted append, then position the file pointer to append to the + block */ + if (log_last(log, 1)) + return -1; + + /* append, adding stored blocks and updating the offset of the last stored + block as needed, and update the total crc and length */ + while (len) { + /* append as much as we can to the last block */ + put = (MAX_STORE << 10) - log->stored; + if (put > len) + put = (uint)len; + if (put) { + if (write(log->fd, data, put) != put) + return -1; + BAIL(1); + log->tcrc = crc32(log->tcrc, data, put); + log->tlen += put; + log->stored += put; + data += put; + len -= put; + } + + /* if we need to, add a new empty stored block */ + if (len) { + /* mark current block as not last */ + if (log_last(log, 0)) + return -1; + + /* point to new, empty stored block */ + log->last += 4 + log->stored + 1; + log->stored = 0; + } + + /* mark last block as last, update its length */ + if (log_last(log, 1)) + return -1; + BAIL(2); + } + + /* write the new crc and length trailer, and truncate just in case (could + be recovering from partial append with a missing foo.add file) */ + PUT4(buf, log->tcrc); + PUT4(buf + 4, log->tlen); + if (write(log->fd, buf, 8) != 8 || + (end = lseek(log->fd, 0, SEEK_CUR)) < 0 || ftruncate(log->fd, end)) + return -1; + + /* write the extra field, marking the log file as done, delete .add file */ + if (log_mark(log, NO_OP)) + return -1; + strcpy(log->end, ".add"); + unlink(log->path); /* ignore error, since may not exist */ + return 0; +} + +/* Replace the foo.dict file with the foo.temp file. Also delete the foo.add + file, since the compress operation may have been interrupted before that was + done. Returns 1 if memory could not be allocated, or -1 if reading or + writing foo.gz fails, or if the rename fails for some reason other than + foo.temp not existing. foo.temp not existing is a permitted error, since + the replace operation may have been interrupted after the rename is done, + but before foo.gz is marked as complete. */ +local int log_replace(struct log *log) +{ + int ret; + char *dest; + + /* delete foo.add file */ + strcpy(log->end, ".add"); + unlink(log->path); /* ignore error, since may not exist */ + BAIL(3); + + /* rename foo.name to foo.dict, replacing foo.dict if it exists */ + strcpy(log->end, ".dict"); + dest = malloc(strlen(log->path) + 1); + if (dest == NULL) + return -2; + strcpy(dest, log->path); + strcpy(log->end, ".temp"); + ret = rename(log->path, dest); + free(dest); + if (ret && errno != ENOENT) + return -1; + BAIL(4); + + /* mark the foo.gz file as done */ + return log_mark(log, NO_OP); +} + +/* Compress the len bytes at data and append the compressed data to the + foo.gz deflate data immediately after the previous compressed data. This + overwrites the previous uncompressed data, which was stored in foo.add + and is the data provided in data[0..len-1]. If this operation is + interrupted, it picks up at the start of this routine, with the foo.add + file read in again. If there is no data to compress (len == 0), then we + simply terminate the foo.gz file after the previously compressed data, + appending a final empty stored block and the gzip trailer. Return -1 if + reading or writing the log.gz file failed, or -2 if there was a memory + allocation failure. */ +local int log_compress(struct log *log, unsigned char *data, size_t len) +{ + int fd; + uint got, max; + ssize_t dict; + off_t end; + z_stream strm; + unsigned char buf[DICT]; + + /* compress and append compressed data */ + if (len) { + /* set up for deflate, allocating memory */ + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + if (deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, -15, 8, + Z_DEFAULT_STRATEGY) != Z_OK) + return -2; + + /* read in dictionary (last 32K of data that was compressed) */ + strcpy(log->end, ".dict"); + fd = open(log->path, O_RDONLY, 0); + if (fd >= 0) { + dict = read(fd, buf, DICT); + close(fd); + if (dict < 0) { + deflateEnd(&strm); + return -1; + } + if (dict) + deflateSetDictionary(&strm, buf, (uint)dict); + } + log_touch(log); + + /* prime deflate with last bits of previous block, position write + pointer to write those bits and overwrite what follows */ + if (lseek(log->fd, log->first - (log->back > 8 ? 2 : 1), + SEEK_SET) < 0 || + read(log->fd, buf, 1) != 1 || lseek(log->fd, -1, SEEK_CUR) < 0) { + deflateEnd(&strm); + return -1; + } + deflatePrime(&strm, (8 - log->back) & 7, *buf); + + /* compress, finishing with a partial non-last empty static block */ + strm.next_in = data; + max = (((uint)0 - 1) >> 1) + 1; /* in case int smaller than size_t */ + do { + strm.avail_in = len > max ? max : (uint)len; + len -= strm.avail_in; + do { + strm.avail_out = DICT; + strm.next_out = buf; + deflate(&strm, len ? Z_NO_FLUSH : Z_PARTIAL_FLUSH); + got = DICT - strm.avail_out; + if (got && write(log->fd, buf, got) != got) { + deflateEnd(&strm); + return -1; + } + log_touch(log); + } while (strm.avail_out == 0); + } while (len); + deflateEnd(&strm); + BAIL(5); + + /* find start of empty static block -- scanning backwards the first one + bit is the second bit of the block, if the last byte is zero, then + we know the byte before that has a one in the top bit, since an + empty static block is ten bits long */ + if ((log->first = lseek(log->fd, -1, SEEK_CUR)) < 0 || + read(log->fd, buf, 1) != 1) + return -1; + log->first++; + if (*buf) { + log->back = 1; + while ((*buf & ((uint)1 << (8 - log->back++))) == 0) + ; /* guaranteed to terminate, since *buf != 0 */ + } + else + log->back = 10; + + /* update compressed crc and length */ + log->ccrc = log->tcrc; + log->clen = log->tlen; + } + else { + /* no data to compress -- fix up existing gzip stream */ + log->tcrc = log->ccrc; + log->tlen = log->clen; + } + + /* complete and truncate gzip stream */ + log->last = log->first; + log->stored = 0; + PUT4(buf, log->tcrc); + PUT4(buf + 4, log->tlen); + if (log_last(log, 1) || write(log->fd, buf, 8) != 8 || + (end = lseek(log->fd, 0, SEEK_CUR)) < 0 || ftruncate(log->fd, end)) + return -1; + BAIL(6); + + /* mark as being in the replace operation */ + if (log_mark(log, REPLACE_OP)) + return -1; + + /* execute the replace operation and mark the file as done */ + return log_replace(log); +} + +/* log a repair record to the .repairs file */ +local void log_log(struct log *log, int op, char *record) +{ + time_t now; + FILE *rec; + + now = time(NULL); + strcpy(log->end, ".repairs"); + rec = fopen(log->path, "a"); + if (rec == NULL) + return; + fprintf(rec, "%.24s %s recovery: %s\n", ctime(&now), op == APPEND_OP ? + "append" : (op == COMPRESS_OP ? "compress" : "replace"), record); + fclose(rec); + return; +} + +/* Recover the interrupted operation op. First read foo.add for recovering an + append or compress operation. Return -1 if there was an error reading or + writing foo.gz or reading an existing foo.add, or -2 if there was a memory + allocation failure. */ +local int log_recover(struct log *log, int op) +{ + int fd, ret = 0; + unsigned char *data = NULL; + size_t len = 0; + struct stat st; + + /* log recovery */ + log_log(log, op, "start"); + + /* load foo.add file if expected and present */ + if (op == APPEND_OP || op == COMPRESS_OP) { + strcpy(log->end, ".add"); + if (stat(log->path, &st) == 0 && st.st_size) { + len = (size_t)(st.st_size); + if ((off_t)len != st.st_size || + (data = malloc(st.st_size)) == NULL) { + log_log(log, op, "allocation failure"); + return -2; + } + if ((fd = open(log->path, O_RDONLY, 0)) < 0) { + log_log(log, op, ".add file read failure"); + return -1; + } + ret = (size_t)read(fd, data, len) != len; + close(fd); + if (ret) { + log_log(log, op, ".add file read failure"); + return -1; + } + log_log(log, op, "loaded .add file"); + } + else + log_log(log, op, "missing .add file!"); + } + + /* recover the interrupted operation */ + switch (op) { + case APPEND_OP: + ret = log_append(log, data, len); + break; + case COMPRESS_OP: + ret = log_compress(log, data, len); + break; + case REPLACE_OP: + ret = log_replace(log); + } + + /* log status */ + log_log(log, op, ret ? "failure" : "complete"); + + /* clean up */ + if (data != NULL) + free(data); + return ret; +} + +/* Close the foo.gz file (if open) and release the lock. */ +local void log_close(struct log *log) +{ + if (log->fd >= 0) + close(log->fd); + log->fd = -1; + log_unlock(log); +} + +/* Open foo.gz, verify the header, and load the extra field contents, after + first creating the foo.lock file to gain exclusive access to the foo.* + files. If foo.gz does not exist or is empty, then write the initial header, + extra, and body content of an empty foo.gz log file. If there is an error + creating the lock file due to access restrictions, or an error reading or + writing the foo.gz file, or if the foo.gz file is not a proper log file for + this object (e.g. not a gzip file or does not contain the expected extra + field), then return true. If there is an error, the lock is released. + Otherwise, the lock is left in place. */ +local int log_open(struct log *log) +{ + int op; + + /* release open file resource if left over -- can occur if lock lost + between gzlog_open() and gzlog_write() */ + if (log->fd >= 0) + close(log->fd); + log->fd = -1; + + /* negotiate exclusive access */ + if (log_lock(log) < 0) + return -1; + + /* open the log file, foo.gz */ + strcpy(log->end, ".gz"); + log->fd = open(log->path, O_RDWR | O_CREAT, 0644); + if (log->fd < 0) { + log_close(log); + return -1; + } + + /* if new, initialize foo.gz with an empty log, delete old dictionary */ + if (lseek(log->fd, 0, SEEK_END) == 0) { + if (write(log->fd, log_gzhead, HEAD) != HEAD || + write(log->fd, log_gzext, EXTRA) != EXTRA || + write(log->fd, log_gzbody, BODY) != BODY) { + log_close(log); + return -1; + } + strcpy(log->end, ".dict"); + unlink(log->path); + } + + /* verify log file and load extra field information */ + if ((op = log_head(log)) < 0) { + log_close(log); + return -1; + } + + /* check for interrupted process and if so, recover */ + if (op != NO_OP && log_recover(log, op)) { + log_close(log); + return -1; + } + + /* touch the lock file to prevent another process from grabbing it */ + log_touch(log); + return 0; +} + +/* See gzlog.h for the description of the external methods below */ +gzlog *gzlog_open(char *path) +{ + size_t n; + struct log *log; + + /* check arguments */ + if (path == NULL || *path == 0) + return NULL; + + /* allocate and initialize log structure */ + log = malloc(sizeof(struct log)); + if (log == NULL) + return NULL; + strcpy(log->id, LOGID); + log->fd = -1; + + /* save path and end of path for name construction */ + n = strlen(path); + log->path = malloc(n + 9); /* allow for ".repairs" */ + if (log->path == NULL) { + free(log); + return NULL; + } + strcpy(log->path, path); + log->end = log->path + n; + + /* gain exclusive access and verify log file -- may perform a + recovery operation if needed */ + if (log_open(log)) { + free(log->path); + free(log); + return NULL; + } + + /* return pointer to log structure */ + return log; +} + +/* gzlog_compress() return values: + 0: all good + -1: file i/o error (usually access issue) + -2: memory allocation failure + -3: invalid log pointer argument */ +int gzlog_compress(gzlog *logd) +{ + int fd, ret; + uint block; + size_t len, next; + unsigned char *data, buf[5]; + struct log *log = logd; + + /* check arguments */ + if (log == NULL || strcmp(log->id, LOGID)) + return -3; + + /* see if we lost the lock -- if so get it again and reload the extra + field information (it probably changed), recover last operation if + necessary */ + if (log_check(log) && log_open(log)) + return -1; + + /* create space for uncompressed data */ + len = ((size_t)(log->last - log->first) & ~(((size_t)1 << 10) - 1)) + + log->stored; + if ((data = malloc(len)) == NULL) + return -2; + + /* do statement here is just a cheap trick for error handling */ + do { + /* read in the uncompressed data */ + if (lseek(log->fd, log->first - 1, SEEK_SET) < 0) + break; + next = 0; + while (next < len) { + if (read(log->fd, buf, 5) != 5) + break; + block = PULL2(buf + 1); + if (next + block > len || + read(log->fd, (char *)data + next, block) != block) + break; + next += block; + } + if (lseek(log->fd, 0, SEEK_CUR) != log->last + 4 + log->stored) + break; + log_touch(log); + + /* write the uncompressed data to the .add file */ + strcpy(log->end, ".add"); + fd = open(log->path, O_WRONLY | O_CREAT | O_TRUNC, 0644); + if (fd < 0) + break; + ret = (size_t)write(fd, data, len) != len; + if (ret | close(fd)) + break; + log_touch(log); + + /* write the dictionary for the next compress to the .temp file */ + strcpy(log->end, ".temp"); + fd = open(log->path, O_WRONLY | O_CREAT | O_TRUNC, 0644); + if (fd < 0) + break; + next = DICT > len ? len : DICT; + ret = (size_t)write(fd, (char *)data + len - next, next) != next; + if (ret | close(fd)) + break; + log_touch(log); + + /* roll back to compressed data, mark the compress in progress */ + log->last = log->first; + log->stored = 0; + if (log_mark(log, COMPRESS_OP)) + break; + BAIL(7); + + /* compress and append the data (clears mark) */ + ret = log_compress(log, data, len); + free(data); + return ret; + } while (0); + + /* broke out of do above on i/o error */ + free(data); + return -1; +} + +/* gzlog_write() return values: + 0: all good + -1: file i/o error (usually access issue) + -2: memory allocation failure + -3: invalid log pointer argument */ +int gzlog_write(gzlog *logd, void *data, size_t len) +{ + int fd, ret; + struct log *log = logd; + + /* check arguments */ + if (log == NULL || strcmp(log->id, LOGID)) + return -3; + if (data == NULL || len <= 0) + return 0; + + /* see if we lost the lock -- if so get it again and reload the extra + field information (it probably changed), recover last operation if + necessary */ + if (log_check(log) && log_open(log)) + return -1; + + /* create and write .add file */ + strcpy(log->end, ".add"); + fd = open(log->path, O_WRONLY | O_CREAT | O_TRUNC, 0644); + if (fd < 0) + return -1; + ret = (size_t)write(fd, data, len) != len; + if (ret | close(fd)) + return -1; + log_touch(log); + + /* mark log file with append in progress */ + if (log_mark(log, APPEND_OP)) + return -1; + BAIL(8); + + /* append data (clears mark) */ + if (log_append(log, data, len)) + return -1; + + /* check to see if it's time to compress -- if not, then done */ + if (((log->last - log->first) >> 10) + (log->stored >> 10) < TRIGGER) + return 0; + + /* time to compress */ + return gzlog_compress(log); +} + +/* gzlog_close() return values: + 0: ok + -3: invalid log pointer argument */ +int gzlog_close(gzlog *logd) +{ + struct log *log = logd; + + /* check arguments */ + if (log == NULL || strcmp(log->id, LOGID)) + return -3; + + /* close the log file and release the lock */ + log_close(log); + + /* free structure and return */ + if (log->path != NULL) + free(log->path); + strcpy(log->id, "bad"); + free(log); + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/examples/gzlog.h nodejs-0.11.15/deps/zlib/examples/gzlog.h --- nodejs-0.11.13/deps/zlib/examples/gzlog.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/gzlog.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,91 @@ +/* gzlog.h + Copyright (C) 2004, 2008, 2012 Mark Adler, all rights reserved + version 2.2, 14 Aug 2012 + + This software is provided 'as-is', without any express or implied + warranty. In no event will the author be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Mark Adler madler@alumni.caltech.edu + */ + +/* Version History: + 1.0 26 Nov 2004 First version + 2.0 25 Apr 2008 Complete redesign for recovery of interrupted operations + Interface changed slightly in that now path is a prefix + Compression now occurs as needed during gzlog_write() + gzlog_write() now always leaves the log file as valid gzip + 2.1 8 Jul 2012 Fix argument checks in gzlog_compress() and gzlog_write() + 2.2 14 Aug 2012 Clean up signed comparisons + */ + +/* + The gzlog object allows writing short messages to a gzipped log file, + opening the log file locked for small bursts, and then closing it. The log + object works by appending stored (uncompressed) data to the gzip file until + 1 MB has been accumulated. At that time, the stored data is compressed, and + replaces the uncompressed data in the file. The log file is truncated to + its new size at that time. After each write operation, the log file is a + valid gzip file that can decompressed to recover what was written. + + The gzlog operations can be interupted at any point due to an application or + system crash, and the log file will be recovered the next time the log is + opened with gzlog_open(). + */ + +#ifndef GZLOG_H +#define GZLOG_H + +/* gzlog object type */ +typedef void gzlog; + +/* Open a gzlog object, creating the log file if it does not exist. Return + NULL on error. Note that gzlog_open() could take a while to complete if it + has to wait to verify that a lock is stale (possibly for five minutes), or + if there is significant contention with other instantiations of this object + when locking the resource. path is the prefix of the file names created by + this object. If path is "foo", then the log file will be "foo.gz", and + other auxiliary files will be created and destroyed during the process: + "foo.dict" for a compression dictionary, "foo.temp" for a temporary (next) + dictionary, "foo.add" for data being added or compressed, "foo.lock" for the + lock file, and "foo.repairs" to log recovery operations performed due to + interrupted gzlog operations. A gzlog_open() followed by a gzlog_close() + will recover a previously interrupted operation, if any. */ +gzlog *gzlog_open(char *path); + +/* Write to a gzlog object. Return zero on success, -1 if there is a file i/o + error on any of the gzlog files (this should not happen if gzlog_open() + succeeded, unless the device has run out of space or leftover auxiliary + files have permissions or ownership that prevent their use), -2 if there is + a memory allocation failure, or -3 if the log argument is invalid (e.g. if + it was not created by gzlog_open()). This function will write data to the + file uncompressed, until 1 MB has been accumulated, at which time that data + will be compressed. The log file will be a valid gzip file upon successful + return. */ +int gzlog_write(gzlog *log, void *data, size_t len); + +/* Force compression of any uncompressed data in the log. This should be used + sparingly, if at all. The main application would be when a log file will + not be appended to again. If this is used to compress frequently while + appending, it will both significantly increase the execution time and + reduce the compression ratio. The return codes are the same as for + gzlog_write(). */ +int gzlog_compress(gzlog *log); + +/* Close a gzlog object. Return zero on success, -3 if the log argument is + invalid. The log object is freed, and so cannot be referenced again. */ +int gzlog_close(gzlog *log); + +#endif diff -Nru nodejs-0.11.13/deps/zlib/examples/README.examples nodejs-0.11.15/deps/zlib/examples/README.examples --- nodejs-0.11.13/deps/zlib/examples/README.examples 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/README.examples 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,49 @@ +This directory contains examples of the use of zlib and other relevant +programs and documentation. + +enough.c + calculation and justification of ENOUGH parameter in inftrees.h + - calculates the maximum table space used in inflate tree + construction over all possible Huffman codes + +fitblk.c + compress just enough input to nearly fill a requested output size + - zlib isn't designed to do this, but fitblk does it anyway + +gun.c + uncompress a gzip file + - illustrates the use of inflateBack() for high speed file-to-file + decompression using call-back functions + - is approximately twice as fast as gzip -d + - also provides Unix uncompress functionality, again twice as fast + +gzappend.c + append to a gzip file + - illustrates the use of the Z_BLOCK flush parameter for inflate() + - illustrates the use of deflatePrime() to start at any bit + +gzjoin.c + join gzip files without recalculating the crc or recompressing + - illustrates the use of the Z_BLOCK flush parameter for inflate() + - illustrates the use of crc32_combine() + +gzlog.c +gzlog.h + efficiently and robustly maintain a message log file in gzip format + - illustrates use of raw deflate, Z_PARTIAL_FLUSH, deflatePrime(), + and deflateSetDictionary() + - illustrates use of a gzip header extra field + +zlib_how.html + painfully comprehensive description of zpipe.c (see below) + - describes in excruciating detail the use of deflate() and inflate() + +zpipe.c + reads and writes zlib streams from stdin to stdout + - illustrates the proper use of deflate() and inflate() + - deeply commented in zlib_how.html (see above) + +zran.c + index a zlib or gzip stream and randomly access it + - illustrates the use of Z_BLOCK, inflatePrime(), and + inflateSetDictionary() to provide random access diff -Nru nodejs-0.11.13/deps/zlib/examples/zlib_how.html nodejs-0.11.15/deps/zlib/examples/zlib_how.html --- nodejs-0.11.13/deps/zlib/examples/zlib_how.html 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/zlib_how.html 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,545 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" + "http://www.w3.org/TR/REC-html40/loose.dtd"> +<html> +<head> +<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"> +<title>zlib Usage Example + + + +

zlib Usage Example

+We often get questions about how the deflate() and inflate() functions should be used. +Users wonder when they should provide more input, when they should use more output, +what to do with a Z_BUF_ERROR, how to make sure the process terminates properly, and +so on. So for those who have read zlib.h (a few times), and +would like further edification, below is an annotated example in C of simple routines to compress and decompress +from an input file to an output file using deflate() and inflate() respectively. The +annotations are interspersed between lines of the code. So please read between the lines. +We hope this helps explain some of the intricacies of zlib. +

+Without further adieu, here is the program zpipe.c: +


+/* zpipe.c: example of proper use of zlib's inflate() and deflate()
+   Not copyrighted -- provided to the public domain
+   Version 1.4  11 December 2005  Mark Adler */
+
+/* Version history:
+   1.0  30 Oct 2004  First version
+   1.1   8 Nov 2004  Add void casting for unused return values
+                     Use switch statement for inflate() return values
+   1.2   9 Nov 2004  Add assertions to document zlib guarantees
+   1.3   6 Apr 2005  Remove incorrect assertion in inf()
+   1.4  11 Dec 2005  Add hack to avoid MSDOS end-of-line conversions
+                     Avoid some compiler warnings for input and output buffers
+ */
+
+We now include the header files for the required definitions. From +stdio.h we use fopen(), fread(), fwrite(), +feof(), ferror(), and fclose() for file i/o, and +fputs() for error messages. From string.h we use +strcmp() for command line argument processing. +From assert.h we use the assert() macro. +From zlib.h +we use the basic compression functions deflateInit(), +deflate(), and deflateEnd(), and the basic decompression +functions inflateInit(), inflate(), and +inflateEnd(). +

+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include "zlib.h"
+
+This is an ugly hack required to avoid corruption of the input and output data on +Windows/MS-DOS systems. Without this, those systems would assume that the input and output +files are text, and try to convert the end-of-line characters from one standard to +another. That would corrupt binary data, and in particular would render the compressed data unusable. +This sets the input and output to binary which suppresses the end-of-line conversions. +SET_BINARY_MODE() will be used later on stdin and stdout, at the beginning of main(). +

+#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__)
+#  include <fcntl.h>
+#  include <io.h>
+#  define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY)
+#else
+#  define SET_BINARY_MODE(file)
+#endif
+
+CHUNK is simply the buffer size for feeding data to and pulling data +from the zlib routines. Larger buffer sizes would be more efficient, +especially for inflate(). If the memory is available, buffers sizes +on the order of 128K or 256K bytes should be used. +

+#define CHUNK 16384
+
+The def() routine compresses data from an input file to an output file. The output data +will be in the zlib format, which is different from the gzip or zip +formats. The zlib format has a very small header of only two bytes to identify it as +a zlib stream and to provide decoding information, and a four-byte trailer with a fast +check value to verify the integrity of the uncompressed data after decoding. +

+/* Compress from file source to file dest until EOF on source.
+   def() returns Z_OK on success, Z_MEM_ERROR if memory could not be
+   allocated for processing, Z_STREAM_ERROR if an invalid compression
+   level is supplied, Z_VERSION_ERROR if the version of zlib.h and the
+   version of the library linked do not match, or Z_ERRNO if there is
+   an error reading or writing the files. */
+int def(FILE *source, FILE *dest, int level)
+{
+
+Here are the local variables for def(). ret will be used for zlib +return codes. flush will keep track of the current flushing state for deflate(), +which is either no flushing, or flush to completion after the end of the input file is reached. +have is the amount of data returned from deflate(). The strm structure +is used to pass information to and from the zlib routines, and to maintain the +deflate() state. in and out are the input and output buffers for +deflate(). +

+    int ret, flush;
+    unsigned have;
+    z_stream strm;
+    unsigned char in[CHUNK];
+    unsigned char out[CHUNK];
+
+The first thing we do is to initialize the zlib state for compression using +deflateInit(). This must be done before the first use of deflate(). +The zalloc, zfree, and opaque fields in the strm +structure must be initialized before calling deflateInit(). Here they are +set to the zlib constant Z_NULL to request that zlib use +the default memory allocation routines. An application may also choose to provide +custom memory allocation routines here. deflateInit() will allocate on the +order of 256K bytes for the internal state. +(See zlib Technical Details.) +

+deflateInit() is called with a pointer to the structure to be initialized and +the compression level, which is an integer in the range of -1 to 9. Lower compression +levels result in faster execution, but less compression. Higher levels result in +greater compression, but slower execution. The zlib constant Z_DEFAULT_COMPRESSION, +equal to -1, +provides a good compromise between compression and speed and is equivalent to level 6. +Level 0 actually does no compression at all, and in fact expands the data slightly to produce +the zlib format (it is not a byte-for-byte copy of the input). +More advanced applications of zlib +may use deflateInit2() here instead. Such an application may want to reduce how +much memory will be used, at some price in compression. Or it may need to request a +gzip header and trailer instead of a zlib header and trailer, or raw +encoding with no header or trailer at all. +

+We must check the return value of deflateInit() against the zlib constant +Z_OK to make sure that it was able to +allocate memory for the internal state, and that the provided arguments were valid. +deflateInit() will also check that the version of zlib that the zlib.h +file came from matches the version of zlib actually linked with the program. This +is especially important for environments in which zlib is a shared library. +

+Note that an application can initialize multiple, independent zlib streams, which can +operate in parallel. The state information maintained in the structure allows the zlib +routines to be reentrant. +


+    /* allocate deflate state */
+    strm.zalloc = Z_NULL;
+    strm.zfree = Z_NULL;
+    strm.opaque = Z_NULL;
+    ret = deflateInit(&strm, level);
+    if (ret != Z_OK)
+        return ret;
+
+With the pleasantries out of the way, now we can get down to business. The outer do-loop +reads all of the input file and exits at the bottom of the loop once end-of-file is reached. +This loop contains the only call of deflate(). So we must make sure that all of the +input data has been processed and that all of the output data has been generated and consumed +before we fall out of the loop at the bottom. +

+    /* compress until end of file */
+    do {
+
+We start off by reading data from the input file. The number of bytes read is put directly +into avail_in, and a pointer to those bytes is put into next_in. We also +check to see if end-of-file on the input has been reached. If we are at the end of file, then flush is set to the +zlib constant Z_FINISH, which is later passed to deflate() to +indicate that this is the last chunk of input data to compress. We need to use feof() +to check for end-of-file as opposed to seeing if fewer than CHUNK bytes have been read. The +reason is that if the input file length is an exact multiple of CHUNK, we will miss +the fact that we got to the end-of-file, and not know to tell deflate() to finish +up the compressed stream. If we are not yet at the end of the input, then the zlib +constant Z_NO_FLUSH will be passed to deflate to indicate that we are still +in the middle of the uncompressed data. +

+If there is an error in reading from the input file, the process is aborted with +deflateEnd() being called to free the allocated zlib state before returning +the error. We wouldn't want a memory leak, now would we? deflateEnd() can be called +at any time after the state has been initialized. Once that's done, deflateInit() (or +deflateInit2()) would have to be called to start a new compression process. There is +no point here in checking the deflateEnd() return code. The deallocation can't fail. +


+        strm.avail_in = fread(in, 1, CHUNK, source);
+        if (ferror(source)) {
+            (void)deflateEnd(&strm);
+            return Z_ERRNO;
+        }
+        flush = feof(source) ? Z_FINISH : Z_NO_FLUSH;
+        strm.next_in = in;
+
+The inner do-loop passes our chunk of input data to deflate(), and then +keeps calling deflate() until it is done producing output. Once there is no more +new output, deflate() is guaranteed to have consumed all of the input, i.e., +avail_in will be zero. +

+        /* run deflate() on input until output buffer not full, finish
+           compression if all of source has been read in */
+        do {
+
+Output space is provided to deflate() by setting avail_out to the number +of available output bytes and next_out to a pointer to that space. +

+            strm.avail_out = CHUNK;
+            strm.next_out = out;
+
+Now we call the compression engine itself, deflate(). It takes as many of the +avail_in bytes at next_in as it can process, and writes as many as +avail_out bytes to next_out. Those counters and pointers are then +updated past the input data consumed and the output data written. It is the amount of +output space available that may limit how much input is consumed. +Hence the inner loop to make sure that +all of the input is consumed by providing more output space each time. Since avail_in +and next_in are updated by deflate(), we don't have to mess with those +between deflate() calls until it's all used up. +

+The parameters to deflate() are a pointer to the strm structure containing +the input and output information and the internal compression engine state, and a parameter +indicating whether and how to flush data to the output. Normally deflate will consume +several K bytes of input data before producing any output (except for the header), in order +to accumulate statistics on the data for optimum compression. It will then put out a burst of +compressed data, and proceed to consume more input before the next burst. Eventually, +deflate() +must be told to terminate the stream, complete the compression with provided input data, and +write out the trailer check value. deflate() will continue to compress normally as long +as the flush parameter is Z_NO_FLUSH. Once the Z_FINISH parameter is provided, +deflate() will begin to complete the compressed output stream. However depending on how +much output space is provided, deflate() may have to be called several times until it +has provided the complete compressed stream, even after it has consumed all of the input. The flush +parameter must continue to be Z_FINISH for those subsequent calls. +

+There are other values of the flush parameter that are used in more advanced applications. You can +force deflate() to produce a burst of output that encodes all of the input data provided +so far, even if it wouldn't have otherwise, for example to control data latency on a link with +compressed data. You can also ask that deflate() do that as well as erase any history up to +that point so that what follows can be decompressed independently, for example for random access +applications. Both requests will degrade compression by an amount depending on how often such +requests are made. +

+deflate() has a return value that can indicate errors, yet we do not check it here. Why +not? Well, it turns out that deflate() can do no wrong here. Let's go through +deflate()'s return values and dispense with them one by one. The possible values are +Z_OK, Z_STREAM_END, Z_STREAM_ERROR, or Z_BUF_ERROR. Z_OK +is, well, ok. Z_STREAM_END is also ok and will be returned for the last call of +deflate(). This is already guaranteed by calling deflate() with Z_FINISH +until it has no more output. Z_STREAM_ERROR is only possible if the stream is not +initialized properly, but we did initialize it properly. There is no harm in checking for +Z_STREAM_ERROR here, for example to check for the possibility that some +other part of the application inadvertently clobbered the memory containing the zlib state. +Z_BUF_ERROR will be explained further below, but +suffice it to say that this is simply an indication that deflate() could not consume +more input or produce more output. deflate() can be called again with more output space +or more available input, which it will be in this code. +


+            ret = deflate(&strm, flush);    /* no bad return value */
+            assert(ret != Z_STREAM_ERROR);  /* state not clobbered */
+
+Now we compute how much output deflate() provided on the last call, which is the +difference between how much space was provided before the call, and how much output space +is still available after the call. Then that data, if any, is written to the output file. +We can then reuse the output buffer for the next call of deflate(). Again if there +is a file i/o error, we call deflateEnd() before returning to avoid a memory leak. +

+            have = CHUNK - strm.avail_out;
+            if (fwrite(out, 1, have, dest) != have || ferror(dest)) {
+                (void)deflateEnd(&strm);
+                return Z_ERRNO;
+            }
+
+The inner do-loop is repeated until the last deflate() call fails to fill the +provided output buffer. Then we know that deflate() has done as much as it can with +the provided input, and that all of that input has been consumed. We can then fall out of this +loop and reuse the input buffer. +

+The way we tell that deflate() has no more output is by seeing that it did not fill +the output buffer, leaving avail_out greater than zero. However suppose that +deflate() has no more output, but just so happened to exactly fill the output buffer! +avail_out is zero, and we can't tell that deflate() has done all it can. +As far as we know, deflate() +has more output for us. So we call it again. But now deflate() produces no output +at all, and avail_out remains unchanged as CHUNK. That deflate() call +wasn't able to do anything, either consume input or produce output, and so it returns +Z_BUF_ERROR. (See, I told you I'd cover this later.) However this is not a problem at +all. Now we finally have the desired indication that deflate() is really done, +and so we drop out of the inner loop to provide more input to deflate(). +

+With flush set to Z_FINISH, this final set of deflate() calls will +complete the output stream. Once that is done, subsequent calls of deflate() would return +Z_STREAM_ERROR if the flush parameter is not Z_FINISH, and do no more processing +until the state is reinitialized. +

+Some applications of zlib have two loops that call deflate() +instead of the single inner loop we have here. The first loop would call +without flushing and feed all of the data to deflate(). The second loop would call +deflate() with no more +data and the Z_FINISH parameter to complete the process. As you can see from this +example, that can be avoided by simply keeping track of the current flush state. +


+        } while (strm.avail_out == 0);
+        assert(strm.avail_in == 0);     /* all input will be used */
+
+Now we check to see if we have already processed all of the input file. That information was +saved in the flush variable, so we see if that was set to Z_FINISH. If so, +then we're done and we fall out of the outer loop. We're guaranteed to get Z_STREAM_END +from the last deflate() call, since we ran it until the last chunk of input was +consumed and all of the output was generated. +

+        /* done when last data in file processed */
+    } while (flush != Z_FINISH);
+    assert(ret == Z_STREAM_END);        /* stream will be complete */
+
+The process is complete, but we still need to deallocate the state to avoid a memory leak +(or rather more like a memory hemorrhage if you didn't do this). Then +finally we can return with a happy return value. +

+    /* clean up and return */
+    (void)deflateEnd(&strm);
+    return Z_OK;
+}
+
+Now we do the same thing for decompression in the inf() routine. inf() +decompresses what is hopefully a valid zlib stream from the input file and writes the +uncompressed data to the output file. Much of the discussion above for def() +applies to inf() as well, so the discussion here will focus on the differences between +the two. +

+/* Decompress from file source to file dest until stream ends or EOF.
+   inf() returns Z_OK on success, Z_MEM_ERROR if memory could not be
+   allocated for processing, Z_DATA_ERROR if the deflate data is
+   invalid or incomplete, Z_VERSION_ERROR if the version of zlib.h and
+   the version of the library linked do not match, or Z_ERRNO if there
+   is an error reading or writing the files. */
+int inf(FILE *source, FILE *dest)
+{
+
+The local variables have the same functionality as they do for def(). The +only difference is that there is no flush variable, since inflate() +can tell from the zlib stream itself when the stream is complete. +

+    int ret;
+    unsigned have;
+    z_stream strm;
+    unsigned char in[CHUNK];
+    unsigned char out[CHUNK];
+
+The initialization of the state is the same, except that there is no compression level, +of course, and two more elements of the structure are initialized. avail_in +and next_in must be initialized before calling inflateInit(). This +is because the application has the option to provide the start of the zlib stream in +order for inflateInit() to have access to information about the compression +method to aid in memory allocation. In the current implementation of zlib +(up through versions 1.2.x), the method-dependent memory allocations are deferred to the first call of +inflate() anyway. However those fields must be initialized since later versions +of zlib that provide more compression methods may take advantage of this interface. +In any case, no decompression is performed by inflateInit(), so the +avail_out and next_out fields do not need to be initialized before calling. +

+Here avail_in is set to zero and next_in is set to Z_NULL to +indicate that no input data is being provided. +


+    /* allocate inflate state */
+    strm.zalloc = Z_NULL;
+    strm.zfree = Z_NULL;
+    strm.opaque = Z_NULL;
+    strm.avail_in = 0;
+    strm.next_in = Z_NULL;
+    ret = inflateInit(&strm);
+    if (ret != Z_OK)
+        return ret;
+
+The outer do-loop decompresses input until inflate() indicates +that it has reached the end of the compressed data and has produced all of the uncompressed +output. This is in contrast to def() which processes all of the input file. +If end-of-file is reached before the compressed data self-terminates, then the compressed +data is incomplete and an error is returned. +

+    /* decompress until deflate stream ends or end of file */
+    do {
+
+We read input data and set the strm structure accordingly. If we've reached the +end of the input file, then we leave the outer loop and report an error, since the +compressed data is incomplete. Note that we may read more data than is eventually consumed +by inflate(), if the input file continues past the zlib stream. +For applications where zlib streams are embedded in other data, this routine would +need to be modified to return the unused data, or at least indicate how much of the input +data was not used, so the application would know where to pick up after the zlib stream. +

+        strm.avail_in = fread(in, 1, CHUNK, source);
+        if (ferror(source)) {
+            (void)inflateEnd(&strm);
+            return Z_ERRNO;
+        }
+        if (strm.avail_in == 0)
+            break;
+        strm.next_in = in;
+
+The inner do-loop has the same function it did in def(), which is to +keep calling inflate() until has generated all of the output it can with the +provided input. +

+        /* run inflate() on input until output buffer not full */
+        do {
+
+Just like in def(), the same output space is provided for each call of inflate(). +

+            strm.avail_out = CHUNK;
+            strm.next_out = out;
+
+Now we run the decompression engine itself. There is no need to adjust the flush parameter, since +the zlib format is self-terminating. The main difference here is that there are +return values that we need to pay attention to. Z_DATA_ERROR +indicates that inflate() detected an error in the zlib compressed data format, +which means that either the data is not a zlib stream to begin with, or that the data was +corrupted somewhere along the way since it was compressed. The other error to be processed is +Z_MEM_ERROR, which can occur since memory allocation is deferred until inflate() +needs it, unlike deflate(), whose memory is allocated at the start by deflateInit(). +

+Advanced applications may use +deflateSetDictionary() to prime deflate() with a set of likely data to improve the +first 32K or so of compression. This is noted in the zlib header, so inflate() +requests that that dictionary be provided before it can start to decompress. Without the dictionary, +correct decompression is not possible. For this routine, we have no idea what the dictionary is, +so the Z_NEED_DICT indication is converted to a Z_DATA_ERROR. +

+inflate() can also return Z_STREAM_ERROR, which should not be possible here, +but could be checked for as noted above for def(). Z_BUF_ERROR does not need to be +checked for here, for the same reasons noted for def(). Z_STREAM_END will be +checked for later. +


+            ret = inflate(&strm, Z_NO_FLUSH);
+            assert(ret != Z_STREAM_ERROR);  /* state not clobbered */
+            switch (ret) {
+            case Z_NEED_DICT:
+                ret = Z_DATA_ERROR;     /* and fall through */
+            case Z_DATA_ERROR:
+            case Z_MEM_ERROR:
+                (void)inflateEnd(&strm);
+                return ret;
+            }
+
+The output of inflate() is handled identically to that of deflate(). +

+            have = CHUNK - strm.avail_out;
+            if (fwrite(out, 1, have, dest) != have || ferror(dest)) {
+                (void)inflateEnd(&strm);
+                return Z_ERRNO;
+            }
+
+The inner do-loop ends when inflate() has no more output as indicated +by not filling the output buffer, just as for deflate(). In this case, we cannot +assert that strm.avail_in will be zero, since the deflate stream may end before the file +does. +

+        } while (strm.avail_out == 0);
+
+The outer do-loop ends when inflate() reports that it has reached the +end of the input zlib stream, has completed the decompression and integrity +check, and has provided all of the output. This is indicated by the inflate() +return value Z_STREAM_END. The inner loop is guaranteed to leave ret +equal to Z_STREAM_END if the last chunk of the input file read contained the end +of the zlib stream. So if the return value is not Z_STREAM_END, the +loop continues to read more input. +

+        /* done when inflate() says it's done */
+    } while (ret != Z_STREAM_END);
+
+At this point, decompression successfully completed, or we broke out of the loop due to no +more data being available from the input file. If the last inflate() return value +is not Z_STREAM_END, then the zlib stream was incomplete and a data error +is returned. Otherwise, we return with a happy return value. Of course, inflateEnd() +is called first to avoid a memory leak. +

+    /* clean up and return */
+    (void)inflateEnd(&strm);
+    return ret == Z_STREAM_END ? Z_OK : Z_DATA_ERROR;
+}
+
+That ends the routines that directly use zlib. The following routines make this +a command-line program by running data through the above routines from stdin to +stdout, and handling any errors reported by def() or inf(). +

+zerr() is used to interpret the possible error codes from def() +and inf(), as detailed in their comments above, and print out an error message. +Note that these are only a subset of the possible return values from deflate() +and inflate(). +


+/* report a zlib or i/o error */
+void zerr(int ret)
+{
+    fputs("zpipe: ", stderr);
+    switch (ret) {
+    case Z_ERRNO:
+        if (ferror(stdin))
+            fputs("error reading stdin\n", stderr);
+        if (ferror(stdout))
+            fputs("error writing stdout\n", stderr);
+        break;
+    case Z_STREAM_ERROR:
+        fputs("invalid compression level\n", stderr);
+        break;
+    case Z_DATA_ERROR:
+        fputs("invalid or incomplete deflate data\n", stderr);
+        break;
+    case Z_MEM_ERROR:
+        fputs("out of memory\n", stderr);
+        break;
+    case Z_VERSION_ERROR:
+        fputs("zlib version mismatch!\n", stderr);
+    }
+}
+
+Here is the main() routine used to test def() and inf(). The +zpipe command is simply a compression pipe from stdin to stdout, if +no arguments are given, or it is a decompression pipe if zpipe -d is used. If any other +arguments are provided, no compression or decompression is performed. Instead a usage +message is displayed. Examples are zpipe < foo.txt > foo.txt.z to compress, and +zpipe -d < foo.txt.z > foo.txt to decompress. +

+/* compress or decompress from stdin to stdout */
+int main(int argc, char **argv)
+{
+    int ret;
+
+    /* avoid end-of-line conversions */
+    SET_BINARY_MODE(stdin);
+    SET_BINARY_MODE(stdout);
+
+    /* do compression if no arguments */
+    if (argc == 1) {
+        ret = def(stdin, stdout, Z_DEFAULT_COMPRESSION);
+        if (ret != Z_OK)
+            zerr(ret);
+        return ret;
+    }
+
+    /* do decompression if -d specified */
+    else if (argc == 2 && strcmp(argv[1], "-d") == 0) {
+        ret = inf(stdin, stdout);
+        if (ret != Z_OK)
+            zerr(ret);
+        return ret;
+    }
+
+    /* otherwise, report usage */
+    else {
+        fputs("zpipe usage: zpipe [-d] < source > dest\n", stderr);
+        return 1;
+    }
+}
+
+
+Copyright (c) 2004, 2005 by Mark Adler
Last modified 11 December 2005
+ + diff -Nru nodejs-0.11.13/deps/zlib/examples/zpipe.c nodejs-0.11.15/deps/zlib/examples/zpipe.c --- nodejs-0.11.13/deps/zlib/examples/zpipe.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/zpipe.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,205 @@ +/* zpipe.c: example of proper use of zlib's inflate() and deflate() + Not copyrighted -- provided to the public domain + Version 1.4 11 December 2005 Mark Adler */ + +/* Version history: + 1.0 30 Oct 2004 First version + 1.1 8 Nov 2004 Add void casting for unused return values + Use switch statement for inflate() return values + 1.2 9 Nov 2004 Add assertions to document zlib guarantees + 1.3 6 Apr 2005 Remove incorrect assertion in inf() + 1.4 11 Dec 2005 Add hack to avoid MSDOS end-of-line conversions + Avoid some compiler warnings for input and output buffers + */ + +#include +#include +#include +#include "zlib.h" + +#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__) +# include +# include +# define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY) +#else +# define SET_BINARY_MODE(file) +#endif + +#define CHUNK 16384 + +/* Compress from file source to file dest until EOF on source. + def() returns Z_OK on success, Z_MEM_ERROR if memory could not be + allocated for processing, Z_STREAM_ERROR if an invalid compression + level is supplied, Z_VERSION_ERROR if the version of zlib.h and the + version of the library linked do not match, or Z_ERRNO if there is + an error reading or writing the files. */ +int def(FILE *source, FILE *dest, int level) +{ + int ret, flush; + unsigned have; + z_stream strm; + unsigned char in[CHUNK]; + unsigned char out[CHUNK]; + + /* allocate deflate state */ + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + ret = deflateInit(&strm, level); + if (ret != Z_OK) + return ret; + + /* compress until end of file */ + do { + strm.avail_in = fread(in, 1, CHUNK, source); + if (ferror(source)) { + (void)deflateEnd(&strm); + return Z_ERRNO; + } + flush = feof(source) ? Z_FINISH : Z_NO_FLUSH; + strm.next_in = in; + + /* run deflate() on input until output buffer not full, finish + compression if all of source has been read in */ + do { + strm.avail_out = CHUNK; + strm.next_out = out; + ret = deflate(&strm, flush); /* no bad return value */ + assert(ret != Z_STREAM_ERROR); /* state not clobbered */ + have = CHUNK - strm.avail_out; + if (fwrite(out, 1, have, dest) != have || ferror(dest)) { + (void)deflateEnd(&strm); + return Z_ERRNO; + } + } while (strm.avail_out == 0); + assert(strm.avail_in == 0); /* all input will be used */ + + /* done when last data in file processed */ + } while (flush != Z_FINISH); + assert(ret == Z_STREAM_END); /* stream will be complete */ + + /* clean up and return */ + (void)deflateEnd(&strm); + return Z_OK; +} + +/* Decompress from file source to file dest until stream ends or EOF. + inf() returns Z_OK on success, Z_MEM_ERROR if memory could not be + allocated for processing, Z_DATA_ERROR if the deflate data is + invalid or incomplete, Z_VERSION_ERROR if the version of zlib.h and + the version of the library linked do not match, or Z_ERRNO if there + is an error reading or writing the files. */ +int inf(FILE *source, FILE *dest) +{ + int ret; + unsigned have; + z_stream strm; + unsigned char in[CHUNK]; + unsigned char out[CHUNK]; + + /* allocate inflate state */ + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit(&strm); + if (ret != Z_OK) + return ret; + + /* decompress until deflate stream ends or end of file */ + do { + strm.avail_in = fread(in, 1, CHUNK, source); + if (ferror(source)) { + (void)inflateEnd(&strm); + return Z_ERRNO; + } + if (strm.avail_in == 0) + break; + strm.next_in = in; + + /* run inflate() on input until output buffer not full */ + do { + strm.avail_out = CHUNK; + strm.next_out = out; + ret = inflate(&strm, Z_NO_FLUSH); + assert(ret != Z_STREAM_ERROR); /* state not clobbered */ + switch (ret) { + case Z_NEED_DICT: + ret = Z_DATA_ERROR; /* and fall through */ + case Z_DATA_ERROR: + case Z_MEM_ERROR: + (void)inflateEnd(&strm); + return ret; + } + have = CHUNK - strm.avail_out; + if (fwrite(out, 1, have, dest) != have || ferror(dest)) { + (void)inflateEnd(&strm); + return Z_ERRNO; + } + } while (strm.avail_out == 0); + + /* done when inflate() says it's done */ + } while (ret != Z_STREAM_END); + + /* clean up and return */ + (void)inflateEnd(&strm); + return ret == Z_STREAM_END ? Z_OK : Z_DATA_ERROR; +} + +/* report a zlib or i/o error */ +void zerr(int ret) +{ + fputs("zpipe: ", stderr); + switch (ret) { + case Z_ERRNO: + if (ferror(stdin)) + fputs("error reading stdin\n", stderr); + if (ferror(stdout)) + fputs("error writing stdout\n", stderr); + break; + case Z_STREAM_ERROR: + fputs("invalid compression level\n", stderr); + break; + case Z_DATA_ERROR: + fputs("invalid or incomplete deflate data\n", stderr); + break; + case Z_MEM_ERROR: + fputs("out of memory\n", stderr); + break; + case Z_VERSION_ERROR: + fputs("zlib version mismatch!\n", stderr); + } +} + +/* compress or decompress from stdin to stdout */ +int main(int argc, char **argv) +{ + int ret; + + /* avoid end-of-line conversions */ + SET_BINARY_MODE(stdin); + SET_BINARY_MODE(stdout); + + /* do compression if no arguments */ + if (argc == 1) { + ret = def(stdin, stdout, Z_DEFAULT_COMPRESSION); + if (ret != Z_OK) + zerr(ret); + return ret; + } + + /* do decompression if -d specified */ + else if (argc == 2 && strcmp(argv[1], "-d") == 0) { + ret = inf(stdin, stdout); + if (ret != Z_OK) + zerr(ret); + return ret; + } + + /* otherwise, report usage */ + else { + fputs("zpipe usage: zpipe [-d] < source > dest\n", stderr); + return 1; + } +} diff -Nru nodejs-0.11.13/deps/zlib/examples/zran.c nodejs-0.11.15/deps/zlib/examples/zran.c --- nodejs-0.11.13/deps/zlib/examples/zran.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/examples/zran.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,409 @@ +/* zran.c -- example of zlib/gzip stream indexing and random access + * Copyright (C) 2005, 2012 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + Version 1.1 29 Sep 2012 Mark Adler */ + +/* Version History: + 1.0 29 May 2005 First version + 1.1 29 Sep 2012 Fix memory reallocation error + */ + +/* Illustrate the use of Z_BLOCK, inflatePrime(), and inflateSetDictionary() + for random access of a compressed file. A file containing a zlib or gzip + stream is provided on the command line. The compressed stream is decoded in + its entirety, and an index built with access points about every SPAN bytes + in the uncompressed output. The compressed file is left open, and can then + be read randomly, having to decompress on the average SPAN/2 uncompressed + bytes before getting to the desired block of data. + + An access point can be created at the start of any deflate block, by saving + the starting file offset and bit of that block, and the 32K bytes of + uncompressed data that precede that block. Also the uncompressed offset of + that block is saved to provide a referece for locating a desired starting + point in the uncompressed stream. build_index() works by decompressing the + input zlib or gzip stream a block at a time, and at the end of each block + deciding if enough uncompressed data has gone by to justify the creation of + a new access point. If so, that point is saved in a data structure that + grows as needed to accommodate the points. + + To use the index, an offset in the uncompressed data is provided, for which + the latest accees point at or preceding that offset is located in the index. + The input file is positioned to the specified location in the index, and if + necessary the first few bits of the compressed data is read from the file. + inflate is initialized with those bits and the 32K of uncompressed data, and + the decompression then proceeds until the desired offset in the file is + reached. Then the decompression continues to read the desired uncompressed + data from the file. + + Another approach would be to generate the index on demand. In that case, + requests for random access reads from the compressed data would try to use + the index, but if a read far enough past the end of the index is required, + then further index entries would be generated and added. + + There is some fair bit of overhead to starting inflation for the random + access, mainly copying the 32K byte dictionary. So if small pieces of the + file are being accessed, it would make sense to implement a cache to hold + some lookahead and avoid many calls to extract() for small lengths. + + Another way to build an index would be to use inflateCopy(). That would + not be constrained to have access points at block boundaries, but requires + more memory per access point, and also cannot be saved to file due to the + use of pointers in the state. The approach here allows for storage of the + index in a file. + */ + +#include +#include +#include +#include "zlib.h" + +#define local static + +#define SPAN 1048576L /* desired distance between access points */ +#define WINSIZE 32768U /* sliding window size */ +#define CHUNK 16384 /* file input buffer size */ + +/* access point entry */ +struct point { + off_t out; /* corresponding offset in uncompressed data */ + off_t in; /* offset in input file of first full byte */ + int bits; /* number of bits (1-7) from byte at in - 1, or 0 */ + unsigned char window[WINSIZE]; /* preceding 32K of uncompressed data */ +}; + +/* access point list */ +struct access { + int have; /* number of list entries filled in */ + int size; /* number of list entries allocated */ + struct point *list; /* allocated list */ +}; + +/* Deallocate an index built by build_index() */ +local void free_index(struct access *index) +{ + if (index != NULL) { + free(index->list); + free(index); + } +} + +/* Add an entry to the access point list. If out of memory, deallocate the + existing list and return NULL. */ +local struct access *addpoint(struct access *index, int bits, + off_t in, off_t out, unsigned left, unsigned char *window) +{ + struct point *next; + + /* if list is empty, create it (start with eight points) */ + if (index == NULL) { + index = malloc(sizeof(struct access)); + if (index == NULL) return NULL; + index->list = malloc(sizeof(struct point) << 3); + if (index->list == NULL) { + free(index); + return NULL; + } + index->size = 8; + index->have = 0; + } + + /* if list is full, make it bigger */ + else if (index->have == index->size) { + index->size <<= 1; + next = realloc(index->list, sizeof(struct point) * index->size); + if (next == NULL) { + free_index(index); + return NULL; + } + index->list = next; + } + + /* fill in entry and increment how many we have */ + next = index->list + index->have; + next->bits = bits; + next->in = in; + next->out = out; + if (left) + memcpy(next->window, window + WINSIZE - left, left); + if (left < WINSIZE) + memcpy(next->window + left, window, WINSIZE - left); + index->have++; + + /* return list, possibly reallocated */ + return index; +} + +/* Make one entire pass through the compressed stream and build an index, with + access points about every span bytes of uncompressed output -- span is + chosen to balance the speed of random access against the memory requirements + of the list, about 32K bytes per access point. Note that data after the end + of the first zlib or gzip stream in the file is ignored. build_index() + returns the number of access points on success (>= 1), Z_MEM_ERROR for out + of memory, Z_DATA_ERROR for an error in the input file, or Z_ERRNO for a + file read error. On success, *built points to the resulting index. */ +local int build_index(FILE *in, off_t span, struct access **built) +{ + int ret; + off_t totin, totout; /* our own total counters to avoid 4GB limit */ + off_t last; /* totout value of last access point */ + struct access *index; /* access points being generated */ + z_stream strm; + unsigned char input[CHUNK]; + unsigned char window[WINSIZE]; + + /* initialize inflate */ + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit2(&strm, 47); /* automatic zlib or gzip decoding */ + if (ret != Z_OK) + return ret; + + /* inflate the input, maintain a sliding window, and build an index -- this + also validates the integrity of the compressed data using the check + information at the end of the gzip or zlib stream */ + totin = totout = last = 0; + index = NULL; /* will be allocated by first addpoint() */ + strm.avail_out = 0; + do { + /* get some compressed data from input file */ + strm.avail_in = fread(input, 1, CHUNK, in); + if (ferror(in)) { + ret = Z_ERRNO; + goto build_index_error; + } + if (strm.avail_in == 0) { + ret = Z_DATA_ERROR; + goto build_index_error; + } + strm.next_in = input; + + /* process all of that, or until end of stream */ + do { + /* reset sliding window if necessary */ + if (strm.avail_out == 0) { + strm.avail_out = WINSIZE; + strm.next_out = window; + } + + /* inflate until out of input, output, or at end of block -- + update the total input and output counters */ + totin += strm.avail_in; + totout += strm.avail_out; + ret = inflate(&strm, Z_BLOCK); /* return at end of block */ + totin -= strm.avail_in; + totout -= strm.avail_out; + if (ret == Z_NEED_DICT) + ret = Z_DATA_ERROR; + if (ret == Z_MEM_ERROR || ret == Z_DATA_ERROR) + goto build_index_error; + if (ret == Z_STREAM_END) + break; + + /* if at end of block, consider adding an index entry (note that if + data_type indicates an end-of-block, then all of the + uncompressed data from that block has been delivered, and none + of the compressed data after that block has been consumed, + except for up to seven bits) -- the totout == 0 provides an + entry point after the zlib or gzip header, and assures that the + index always has at least one access point; we avoid creating an + access point after the last block by checking bit 6 of data_type + */ + if ((strm.data_type & 128) && !(strm.data_type & 64) && + (totout == 0 || totout - last > span)) { + index = addpoint(index, strm.data_type & 7, totin, + totout, strm.avail_out, window); + if (index == NULL) { + ret = Z_MEM_ERROR; + goto build_index_error; + } + last = totout; + } + } while (strm.avail_in != 0); + } while (ret != Z_STREAM_END); + + /* clean up and return index (release unused entries in list) */ + (void)inflateEnd(&strm); + index->list = realloc(index->list, sizeof(struct point) * index->have); + index->size = index->have; + *built = index; + return index->size; + + /* return error */ + build_index_error: + (void)inflateEnd(&strm); + if (index != NULL) + free_index(index); + return ret; +} + +/* Use the index to read len bytes from offset into buf, return bytes read or + negative for error (Z_DATA_ERROR or Z_MEM_ERROR). If data is requested past + the end of the uncompressed data, then extract() will return a value less + than len, indicating how much as actually read into buf. This function + should not return a data error unless the file was modified since the index + was generated. extract() may also return Z_ERRNO if there is an error on + reading or seeking the input file. */ +local int extract(FILE *in, struct access *index, off_t offset, + unsigned char *buf, int len) +{ + int ret, skip; + z_stream strm; + struct point *here; + unsigned char input[CHUNK]; + unsigned char discard[WINSIZE]; + + /* proceed only if something reasonable to do */ + if (len < 0) + return 0; + + /* find where in stream to start */ + here = index->list; + ret = index->have; + while (--ret && here[1].out <= offset) + here++; + + /* initialize file and inflate state to start there */ + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit2(&strm, -15); /* raw inflate */ + if (ret != Z_OK) + return ret; + ret = fseeko(in, here->in - (here->bits ? 1 : 0), SEEK_SET); + if (ret == -1) + goto extract_ret; + if (here->bits) { + ret = getc(in); + if (ret == -1) { + ret = ferror(in) ? Z_ERRNO : Z_DATA_ERROR; + goto extract_ret; + } + (void)inflatePrime(&strm, here->bits, ret >> (8 - here->bits)); + } + (void)inflateSetDictionary(&strm, here->window, WINSIZE); + + /* skip uncompressed bytes until offset reached, then satisfy request */ + offset -= here->out; + strm.avail_in = 0; + skip = 1; /* while skipping to offset */ + do { + /* define where to put uncompressed data, and how much */ + if (offset == 0 && skip) { /* at offset now */ + strm.avail_out = len; + strm.next_out = buf; + skip = 0; /* only do this once */ + } + if (offset > WINSIZE) { /* skip WINSIZE bytes */ + strm.avail_out = WINSIZE; + strm.next_out = discard; + offset -= WINSIZE; + } + else if (offset != 0) { /* last skip */ + strm.avail_out = (unsigned)offset; + strm.next_out = discard; + offset = 0; + } + + /* uncompress until avail_out filled, or end of stream */ + do { + if (strm.avail_in == 0) { + strm.avail_in = fread(input, 1, CHUNK, in); + if (ferror(in)) { + ret = Z_ERRNO; + goto extract_ret; + } + if (strm.avail_in == 0) { + ret = Z_DATA_ERROR; + goto extract_ret; + } + strm.next_in = input; + } + ret = inflate(&strm, Z_NO_FLUSH); /* normal inflate */ + if (ret == Z_NEED_DICT) + ret = Z_DATA_ERROR; + if (ret == Z_MEM_ERROR || ret == Z_DATA_ERROR) + goto extract_ret; + if (ret == Z_STREAM_END) + break; + } while (strm.avail_out != 0); + + /* if reach end of stream, then don't keep trying to get more */ + if (ret == Z_STREAM_END) + break; + + /* do until offset reached and requested data read, or stream ends */ + } while (skip); + + /* compute number of uncompressed bytes read after offset */ + ret = skip ? 0 : len - strm.avail_out; + + /* clean up and return bytes read or error */ + extract_ret: + (void)inflateEnd(&strm); + return ret; +} + +/* Demonstrate the use of build_index() and extract() by processing the file + provided on the command line, and the extracting 16K from about 2/3rds of + the way through the uncompressed output, and writing that to stdout. */ +int main(int argc, char **argv) +{ + int len; + off_t offset; + FILE *in; + struct access *index = NULL; + unsigned char buf[CHUNK]; + + /* open input file */ + if (argc != 2) { + fprintf(stderr, "usage: zran file.gz\n"); + return 1; + } + in = fopen(argv[1], "rb"); + if (in == NULL) { + fprintf(stderr, "zran: could not open %s for reading\n", argv[1]); + return 1; + } + + /* build index */ + len = build_index(in, SPAN, &index); + if (len < 0) { + fclose(in); + switch (len) { + case Z_MEM_ERROR: + fprintf(stderr, "zran: out of memory\n"); + break; + case Z_DATA_ERROR: + fprintf(stderr, "zran: compressed data error in %s\n", argv[1]); + break; + case Z_ERRNO: + fprintf(stderr, "zran: read error on %s\n", argv[1]); + break; + default: + fprintf(stderr, "zran: error %d while building index\n", len); + } + return 1; + } + fprintf(stderr, "zran: built index with %d access points\n", len); + + /* use index by reading some bytes from an arbitrary offset */ + offset = (index->list[index->have - 1].out << 1) / 3; + len = extract(in, index, offset, buf, CHUNK); + if (len < 0) + fprintf(stderr, "zran: extraction failed: %s error\n", + len == Z_MEM_ERROR ? "out of memory" : "input corrupted"); + else { + fwrite(buf, 1, len, stdout); + fprintf(stderr, "zran: extracted %d bytes at %llu\n", len, offset); + } + + /* clean up and exit */ + free_index(index); + fclose(in); + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/FAQ nodejs-0.11.15/deps/zlib/FAQ --- nodejs-0.11.13/deps/zlib/FAQ 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/FAQ 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,368 @@ + + Frequently Asked Questions about zlib + + +If your question is not there, please check the zlib home page +http://zlib.net/ which may have more recent information. +The lastest zlib FAQ is at http://zlib.net/zlib_faq.html + + + 1. Is zlib Y2K-compliant? + + Yes. zlib doesn't handle dates. + + 2. Where can I get a Windows DLL version? + + The zlib sources can be compiled without change to produce a DLL. See the + file win32/DLL_FAQ.txt in the zlib distribution. Pointers to the + precompiled DLL are found in the zlib web site at http://zlib.net/ . + + 3. Where can I get a Visual Basic interface to zlib? + + See + * http://marknelson.us/1997/01/01/zlib-engine/ + * win32/DLL_FAQ.txt in the zlib distribution + + 4. compress() returns Z_BUF_ERROR. + + Make sure that before the call of compress(), the length of the compressed + buffer is equal to the available size of the compressed buffer and not + zero. For Visual Basic, check that this parameter is passed by reference + ("as any"), not by value ("as long"). + + 5. deflate() or inflate() returns Z_BUF_ERROR. + + Before making the call, make sure that avail_in and avail_out are not zero. + When setting the parameter flush equal to Z_FINISH, also make sure that + avail_out is big enough to allow processing all pending input. Note that a + Z_BUF_ERROR is not fatal--another call to deflate() or inflate() can be + made with more input or output space. A Z_BUF_ERROR may in fact be + unavoidable depending on how the functions are used, since it is not + possible to tell whether or not there is more output pending when + strm.avail_out returns with zero. See http://zlib.net/zlib_how.html for a + heavily annotated example. + + 6. Where's the zlib documentation (man pages, etc.)? + + It's in zlib.h . Examples of zlib usage are in the files test/example.c + and test/minigzip.c, with more in examples/ . + + 7. Why don't you use GNU autoconf or libtool or ...? + + Because we would like to keep zlib as a very small and simple package. + zlib is rather portable and doesn't need much configuration. + + 8. I found a bug in zlib. + + Most of the time, such problems are due to an incorrect usage of zlib. + Please try to reproduce the problem with a small program and send the + corresponding source to us at zlib@gzip.org . Do not send multi-megabyte + data files without prior agreement. + + 9. Why do I get "undefined reference to gzputc"? + + If "make test" produces something like + + example.o(.text+0x154): undefined reference to `gzputc' + + check that you don't have old files libz.* in /usr/lib, /usr/local/lib or + /usr/X11R6/lib. Remove any old versions, then do "make install". + +10. I need a Delphi interface to zlib. + + See the contrib/delphi directory in the zlib distribution. + +11. Can zlib handle .zip archives? + + Not by itself, no. See the directory contrib/minizip in the zlib + distribution. + +12. Can zlib handle .Z files? + + No, sorry. You have to spawn an uncompress or gunzip subprocess, or adapt + the code of uncompress on your own. + +13. How can I make a Unix shared library? + + By default a shared (and a static) library is built for Unix. So: + + make distclean + ./configure + make + +14. How do I install a shared zlib library on Unix? + + After the above, then: + + make install + + However, many flavors of Unix come with a shared zlib already installed. + Before going to the trouble of compiling a shared version of zlib and + trying to install it, you may want to check if it's already there! If you + can #include , it's there. The -lz option will probably link to + it. You can check the version at the top of zlib.h or with the + ZLIB_VERSION symbol defined in zlib.h . + +15. I have a question about OttoPDF. + + We are not the authors of OttoPDF. The real author is on the OttoPDF web + site: Joel Hainley, jhainley@myndkryme.com. + +16. Can zlib decode Flate data in an Adobe PDF file? + + Yes. See http://www.pdflib.com/ . To modify PDF forms, see + http://sourceforge.net/projects/acroformtool/ . + +17. Why am I getting this "register_frame_info not found" error on Solaris? + + After installing zlib 1.1.4 on Solaris 2.6, running applications using zlib + generates an error such as: + + ld.so.1: rpm: fatal: relocation error: file /usr/local/lib/libz.so: + symbol __register_frame_info: referenced symbol not found + + The symbol __register_frame_info is not part of zlib, it is generated by + the C compiler (cc or gcc). You must recompile applications using zlib + which have this problem. This problem is specific to Solaris. See + http://www.sunfreeware.com for Solaris versions of zlib and applications + using zlib. + +18. Why does gzip give an error on a file I make with compress/deflate? + + The compress and deflate functions produce data in the zlib format, which + is different and incompatible with the gzip format. The gz* functions in + zlib on the other hand use the gzip format. Both the zlib and gzip formats + use the same compressed data format internally, but have different headers + and trailers around the compressed data. + +19. Ok, so why are there two different formats? + + The gzip format was designed to retain the directory information about a + single file, such as the name and last modification date. The zlib format + on the other hand was designed for in-memory and communication channel + applications, and has a much more compact header and trailer and uses a + faster integrity check than gzip. + +20. Well that's nice, but how do I make a gzip file in memory? + + You can request that deflate write the gzip format instead of the zlib + format using deflateInit2(). You can also request that inflate decode the + gzip format using inflateInit2(). Read zlib.h for more details. + +21. Is zlib thread-safe? + + Yes. However any library routines that zlib uses and any application- + provided memory allocation routines must also be thread-safe. zlib's gz* + functions use stdio library routines, and most of zlib's functions use the + library memory allocation routines by default. zlib's *Init* functions + allow for the application to provide custom memory allocation routines. + + Of course, you should only operate on any given zlib or gzip stream from a + single thread at a time. + +22. Can I use zlib in my commercial application? + + Yes. Please read the license in zlib.h. + +23. Is zlib under the GNU license? + + No. Please read the license in zlib.h. + +24. The license says that altered source versions must be "plainly marked". So + what exactly do I need to do to meet that requirement? + + You need to change the ZLIB_VERSION and ZLIB_VERNUM #defines in zlib.h. In + particular, the final version number needs to be changed to "f", and an + identification string should be appended to ZLIB_VERSION. Version numbers + x.x.x.f are reserved for modifications to zlib by others than the zlib + maintainers. For example, if the version of the base zlib you are altering + is "1.2.3.4", then in zlib.h you should change ZLIB_VERNUM to 0x123f, and + ZLIB_VERSION to something like "1.2.3.f-zachary-mods-v3". You can also + update the version strings in deflate.c and inftrees.c. + + For altered source distributions, you should also note the origin and + nature of the changes in zlib.h, as well as in ChangeLog and README, along + with the dates of the alterations. The origin should include at least your + name (or your company's name), and an email address to contact for help or + issues with the library. + + Note that distributing a compiled zlib library along with zlib.h and + zconf.h is also a source distribution, and so you should change + ZLIB_VERSION and ZLIB_VERNUM and note the origin and nature of the changes + in zlib.h as you would for a full source distribution. + +25. Will zlib work on a big-endian or little-endian architecture, and can I + exchange compressed data between them? + + Yes and yes. + +26. Will zlib work on a 64-bit machine? + + Yes. It has been tested on 64-bit machines, and has no dependence on any + data types being limited to 32-bits in length. If you have any + difficulties, please provide a complete problem report to zlib@gzip.org + +27. Will zlib decompress data from the PKWare Data Compression Library? + + No. The PKWare DCL uses a completely different compressed data format than + does PKZIP and zlib. However, you can look in zlib's contrib/blast + directory for a possible solution to your problem. + +28. Can I access data randomly in a compressed stream? + + No, not without some preparation. If when compressing you periodically use + Z_FULL_FLUSH, carefully write all the pending data at those points, and + keep an index of those locations, then you can start decompression at those + points. You have to be careful to not use Z_FULL_FLUSH too often, since it + can significantly degrade compression. Alternatively, you can scan a + deflate stream once to generate an index, and then use that index for + random access. See examples/zran.c . + +29. Does zlib work on MVS, OS/390, CICS, etc.? + + It has in the past, but we have not heard of any recent evidence. There + were working ports of zlib 1.1.4 to MVS, but those links no longer work. + If you know of recent, successful applications of zlib on these operating + systems, please let us know. Thanks. + +30. Is there some simpler, easier to read version of inflate I can look at to + understand the deflate format? + + First off, you should read RFC 1951. Second, yes. Look in zlib's + contrib/puff directory. + +31. Does zlib infringe on any patents? + + As far as we know, no. In fact, that was originally the whole point behind + zlib. Look here for some more information: + + http://www.gzip.org/#faq11 + +32. Can zlib work with greater than 4 GB of data? + + Yes. inflate() and deflate() will process any amount of data correctly. + Each call of inflate() or deflate() is limited to input and output chunks + of the maximum value that can be stored in the compiler's "unsigned int" + type, but there is no limit to the number of chunks. Note however that the + strm.total_in and strm_total_out counters may be limited to 4 GB. These + counters are provided as a convenience and are not used internally by + inflate() or deflate(). The application can easily set up its own counters + updated after each call of inflate() or deflate() to count beyond 4 GB. + compress() and uncompress() may be limited to 4 GB, since they operate in a + single call. gzseek() and gztell() may be limited to 4 GB depending on how + zlib is compiled. See the zlibCompileFlags() function in zlib.h. + + The word "may" appears several times above since there is a 4 GB limit only + if the compiler's "long" type is 32 bits. If the compiler's "long" type is + 64 bits, then the limit is 16 exabytes. + +33. Does zlib have any security vulnerabilities? + + The only one that we are aware of is potentially in gzprintf(). If zlib is + compiled to use sprintf() or vsprintf(), then there is no protection + against a buffer overflow of an 8K string space (or other value as set by + gzbuffer()), other than the caller of gzprintf() assuring that the output + will not exceed 8K. On the other hand, if zlib is compiled to use + snprintf() or vsnprintf(), which should normally be the case, then there is + no vulnerability. The ./configure script will display warnings if an + insecure variation of sprintf() will be used by gzprintf(). Also the + zlibCompileFlags() function will return information on what variant of + sprintf() is used by gzprintf(). + + If you don't have snprintf() or vsnprintf() and would like one, you can + find a portable implementation here: + + http://www.ijs.si/software/snprintf/ + + Note that you should be using the most recent version of zlib. Versions + 1.1.3 and before were subject to a double-free vulnerability, and versions + 1.2.1 and 1.2.2 were subject to an access exception when decompressing + invalid compressed data. + +34. Is there a Java version of zlib? + + Probably what you want is to use zlib in Java. zlib is already included + as part of the Java SDK in the java.util.zip package. If you really want + a version of zlib written in the Java language, look on the zlib home + page for links: http://zlib.net/ . + +35. I get this or that compiler or source-code scanner warning when I crank it + up to maximally-pedantic. Can't you guys write proper code? + + Many years ago, we gave up attempting to avoid warnings on every compiler + in the universe. It just got to be a waste of time, and some compilers + were downright silly as well as contradicted each other. So now, we simply + make sure that the code always works. + +36. Valgrind (or some similar memory access checker) says that deflate is + performing a conditional jump that depends on an uninitialized value. + Isn't that a bug? + + No. That is intentional for performance reasons, and the output of deflate + is not affected. This only started showing up recently since zlib 1.2.x + uses malloc() by default for allocations, whereas earlier versions used + calloc(), which zeros out the allocated memory. Even though the code was + correct, versions 1.2.4 and later was changed to not stimulate these + checkers. + +37. Will zlib read the (insert any ancient or arcane format here) compressed + data format? + + Probably not. Look in the comp.compression FAQ for pointers to various + formats and associated software. + +38. How can I encrypt/decrypt zip files with zlib? + + zlib doesn't support encryption. The original PKZIP encryption is very + weak and can be broken with freely available programs. To get strong + encryption, use GnuPG, http://www.gnupg.org/ , which already includes zlib + compression. For PKZIP compatible "encryption", look at + http://www.info-zip.org/ + +39. What's the difference between the "gzip" and "deflate" HTTP 1.1 encodings? + + "gzip" is the gzip format, and "deflate" is the zlib format. They should + probably have called the second one "zlib" instead to avoid confusion with + the raw deflate compressed data format. While the HTTP 1.1 RFC 2616 + correctly points to the zlib specification in RFC 1950 for the "deflate" + transfer encoding, there have been reports of servers and browsers that + incorrectly produce or expect raw deflate data per the deflate + specification in RFC 1951, most notably Microsoft. So even though the + "deflate" transfer encoding using the zlib format would be the more + efficient approach (and in fact exactly what the zlib format was designed + for), using the "gzip" transfer encoding is probably more reliable due to + an unfortunate choice of name on the part of the HTTP 1.1 authors. + + Bottom line: use the gzip format for HTTP 1.1 encoding. + +40. Does zlib support the new "Deflate64" format introduced by PKWare? + + No. PKWare has apparently decided to keep that format proprietary, since + they have not documented it as they have previous compression formats. In + any case, the compression improvements are so modest compared to other more + modern approaches, that it's not worth the effort to implement. + +41. I'm having a problem with the zip functions in zlib, can you help? + + There are no zip functions in zlib. You are probably using minizip by + Giles Vollant, which is found in the contrib directory of zlib. It is not + part of zlib. In fact none of the stuff in contrib is part of zlib. The + files in there are not supported by the zlib authors. You need to contact + the authors of the respective contribution for help. + +42. The match.asm code in contrib is under the GNU General Public License. + Since it's part of zlib, doesn't that mean that all of zlib falls under the + GNU GPL? + + No. The files in contrib are not part of zlib. They were contributed by + other authors and are provided as a convenience to the user within the zlib + distribution. Each item in contrib has its own license. + +43. Is zlib subject to export controls? What is its ECCN? + + zlib is not subject to export controls, and so is classified as EAR99. + +44. Can you please sign these lengthy legal documents and fax them back to us + so that we can use your software in our product? + + No. Go away. Shoo. diff -Nru nodejs-0.11.13/deps/zlib/gzclose.c nodejs-0.11.15/deps/zlib/gzclose.c --- nodejs-0.11.13/deps/zlib/gzclose.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/gzclose.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,25 @@ +/* gzclose.c -- zlib gzclose() function + * Copyright (C) 2004, 2010 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +/* gzclose() is in a separate file so that it is linked in only if it is used. + That way the other gzclose functions can be used instead to avoid linking in + unneeded compression or decompression routines. */ +int ZEXPORT gzclose(file) + gzFile file; +{ +#ifndef NO_GZCOMPRESS + gz_statep state; + + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + + return state->mode == GZ_READ ? gzclose_r(file) : gzclose_w(file); +#else + return gzclose_r(file); +#endif +} diff -Nru nodejs-0.11.13/deps/zlib/gzguts.h nodejs-0.11.15/deps/zlib/gzguts.h --- nodejs-0.11.13/deps/zlib/gzguts.h 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/gzguts.h 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,209 @@ +/* gzguts.h -- zlib internal header definitions for gz* operations + * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#ifdef _LARGEFILE64_SOURCE +# ifndef _LARGEFILE_SOURCE +# define _LARGEFILE_SOURCE 1 +# endif +# ifdef _FILE_OFFSET_BITS +# undef _FILE_OFFSET_BITS +# endif +#endif + +#ifdef HAVE_HIDDEN +# define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) +#else +# define ZLIB_INTERNAL +#endif + +#include +#include "zlib.h" +#ifdef STDC +# include +# include +# include +#endif +#include + +#ifdef _WIN32 +# include +#endif + +#if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32) +# include +#endif + +#ifdef WINAPI_FAMILY +# define open _open +# define read _read +# define write _write +# define close _close +#endif + +#ifdef NO_DEFLATE /* for compatibility with old definition */ +# define NO_GZCOMPRESS +#endif + +#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#if defined(__CYGWIN__) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#ifndef HAVE_VSNPRINTF +# ifdef MSDOS +/* vsnprintf may exist on some MS-DOS compilers (DJGPP?), + but for now we just assume it doesn't. */ +# define NO_vsnprintf +# endif +# ifdef __TURBOC__ +# define NO_vsnprintf +# endif +# ifdef WIN32 +/* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ +# if !defined(vsnprintf) && !defined(NO_vsnprintf) +# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 ) +# define vsnprintf _vsnprintf +# endif +# endif +# endif +# ifdef __SASC +# define NO_vsnprintf +# endif +# ifdef VMS +# define NO_vsnprintf +# endif +# ifdef __OS400__ +# define NO_vsnprintf +# endif +# ifdef __MVS__ +# define NO_vsnprintf +# endif +#endif + +/* unlike snprintf (which is required in C99, yet still not supported by + Microsoft more than a decade later!), _snprintf does not guarantee null + termination of the result -- however this is only used in gzlib.c where + the result is assured to fit in the space provided */ +#ifdef _MSC_VER +# define snprintf _snprintf +#endif + +#ifndef local +# define local static +#endif +/* compile with -Dlocal if your debugger can't find static symbols */ + +/* gz* functions always use library allocation functions */ +#ifndef STDC + extern voidp malloc OF((uInt size)); + extern void free OF((voidpf ptr)); +#endif + +/* get errno and strerror definition */ +#if defined UNDER_CE +# include +# define zstrerror() gz_strwinerror((DWORD)GetLastError()) +#else +# ifndef NO_STRERROR +# include +# define zstrerror() strerror(errno) +# else +# define zstrerror() "stdio error (consult errno)" +# endif +#endif + +/* provide prototypes for these when building zlib without LFS */ +#if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); +#endif + +/* default memLevel */ +#if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +#else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +#endif + +/* default i/o buffer size -- double this for output when reading (this and + twice this must be able to fit in an unsigned type) */ +#define GZBUFSIZE 8192 + +/* gzip modes, also provide a little integrity check on the passed structure */ +#define GZ_NONE 0 +#define GZ_READ 7247 +#define GZ_WRITE 31153 +#define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */ + +/* values for gz_state how */ +#define LOOK 0 /* look for a gzip header */ +#define COPY 1 /* copy input directly */ +#define GZIP 2 /* decompress a gzip stream */ + +/* internal gzip file state data structure */ +typedef struct { + /* exposed contents for gzgetc() macro */ + struct gzFile_s x; /* "x" for exposed */ + /* x.have: number of bytes available at x.next */ + /* x.next: next output data to deliver or write */ + /* x.pos: current position in uncompressed data */ + /* used for both reading and writing */ + int mode; /* see gzip modes above */ + int fd; /* file descriptor */ + char *path; /* path or fd for error messages */ + unsigned size; /* buffer size, zero if not allocated yet */ + unsigned want; /* requested buffer size, default is GZBUFSIZE */ + unsigned char *in; /* input buffer */ + unsigned char *out; /* output buffer (double-sized when reading) */ + int direct; /* 0 if processing gzip, 1 if transparent */ + /* just for reading */ + int how; /* 0: get header, 1: copy, 2: decompress */ + z_off64_t start; /* where the gzip data started, for rewinding */ + int eof; /* true if end of input file reached */ + int past; /* true if read requested past end */ + /* just for writing */ + int level; /* compression level */ + int strategy; /* compression strategy */ + /* seek request */ + z_off64_t skip; /* amount to skip (already rewound if backwards) */ + int seek; /* true if seek request pending */ + /* error information */ + int err; /* error code */ + char *msg; /* error message */ + /* zlib inflate or deflate stream */ + z_stream strm; /* stream structure in-place (not a pointer) */ +} gz_state; +typedef gz_state FAR *gz_statep; + +/* shared functions */ +void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *)); +#if defined UNDER_CE +char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error)); +#endif + +/* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t + value -- needed when comparing unsigned to z_off64_t, which is signed + (possible z_off64_t types off_t, off64_t, and long are all signed) */ +#ifdef INT_MAX +# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX) +#else +unsigned ZLIB_INTERNAL gz_intmax OF((void)); +# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax()) +#endif diff -Nru nodejs-0.11.13/deps/zlib/gzio.c nodejs-0.11.15/deps/zlib/gzio.c --- nodejs-0.11.13/deps/zlib/gzio.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/gzio.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,1033 +0,0 @@ -/* gzio.c -- IO on .gz files - * Copyright (C) 1995-2005 Jean-loup Gailly. - * For conditions of distribution and use, see copyright notice in zlib.h - * - * Compile this file with -DNO_GZCOMPRESS to avoid the compression code. - */ - -/* @(#) $Id: gzio.c,v 3.7 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ - -#include - -#include "zutil.h" - -#ifdef NO_DEFLATE /* for compatibility with old definition */ -# define NO_GZCOMPRESS -#endif - -#ifndef NO_DUMMY_DECL -struct internal_state {int dummy;}; /* for buggy compilers */ -#endif - -#ifndef Z_BUFSIZE -# ifdef MAXSEG_64K -# define Z_BUFSIZE 4096 /* minimize memory usage for 16-bit DOS */ -# else -# define Z_BUFSIZE 16384 -# endif -#endif -#ifndef Z_PRINTF_BUFSIZE -# define Z_PRINTF_BUFSIZE 4096 -#endif - -#ifdef __MVS__ -# pragma map (fdopen , "\174\174FDOPEN") - FILE *fdopen(int, const char *); -#endif - -#ifndef STDC -extern voidp malloc OF((uInt size)); -extern void free OF((voidpf ptr)); -#endif - -#define ALLOC(size) malloc(size) -#define TRYFREE(p) {if (p) free(p);} - -static int const gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */ - -/* gzip flag byte */ -#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ -#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */ -#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ -#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ -#define COMMENT 0x10 /* bit 4 set: file comment present */ -#define RESERVED 0xE0 /* bits 5..7: reserved */ - -typedef struct gz_stream { - z_stream stream; - int z_err; /* error code for last stream operation */ - int z_eof; /* set if end of input file */ - FILE *file; /* .gz file */ - Byte *inbuf; /* input buffer */ - Byte *outbuf; /* output buffer */ - uLong crc; /* crc32 of uncompressed data */ - char *msg; /* error message */ - char *path; /* path name for debugging only */ - int transparent; /* 1 if input file is not a .gz file */ - char mode; /* 'w' or 'r' */ - z_off_t start; /* start of compressed data in file (header skipped) */ - z_off_t in; /* bytes into deflate or inflate */ - z_off_t out; /* bytes out of deflate or inflate */ - int back; /* one character push-back */ - int last; /* true if push-back is last character */ -} gz_stream; - - -local gzFile gz_open OF((const char *path, const char *mode, int fd)); -#ifndef NO_GZCOMPRESS // Google Gears addition, to avoid compile warning -local int do_flush OF((gzFile file, int flush)); -#endif -local int get_byte OF((gz_stream *s)); -local void check_header OF((gz_stream *s)); -local int destroy OF((gz_stream *s)); -#ifndef NO_GZCOMPRESS // Google Gears addition, to avoid compile warning -local void putLong OF((FILE *file, uLong x)); -#endif -local uLong getLong OF((gz_stream *s)); - -/* =========================================================================== - Opens a gzip (.gz) file for reading or writing. The mode parameter - is as in fopen ("rb" or "wb"). The file is given either by file descriptor - or path name (if fd == -1). - gz_open returns NULL if the file could not be opened or if there was - insufficient memory to allocate the (de)compression state; errno - can be checked to distinguish the two cases (if errno is zero, the - zlib error is Z_MEM_ERROR). -*/ -local gzFile gz_open (path, mode, fd) - const char *path; - const char *mode; - int fd; -{ - int err; - int level = Z_DEFAULT_COMPRESSION; /* compression level */ - int strategy = Z_DEFAULT_STRATEGY; /* compression strategy */ - char *p = (char*)mode; - gz_stream *s; - char fmode[80]; /* copy of mode, without the compression level */ - char *m = fmode; - - if (!path || !mode) return Z_NULL; - - s = (gz_stream *)ALLOC(sizeof(gz_stream)); - if (!s) return Z_NULL; - - s->stream.zalloc = (alloc_func)0; - s->stream.zfree = (free_func)0; - s->stream.opaque = (voidpf)0; - s->stream.next_in = s->inbuf = Z_NULL; - s->stream.next_out = s->outbuf = Z_NULL; - s->stream.avail_in = s->stream.avail_out = 0; - s->file = NULL; - s->z_err = Z_OK; - s->z_eof = 0; - s->in = 0; - s->out = 0; - s->back = EOF; - s->crc = crc32(0L, Z_NULL, 0); - s->msg = NULL; - s->transparent = 0; - - s->path = (char*)ALLOC(strlen(path)+1); - if (s->path == NULL) { - return destroy(s), (gzFile)Z_NULL; - } - strcpy(s->path, path); /* do this early for debugging */ - - s->mode = '\0'; - do { - if (*p == 'r') s->mode = 'r'; - if (*p == 'w' || *p == 'a') s->mode = 'w'; - if (*p >= '0' && *p <= '9') { - level = *p - '0'; - } else if (*p == 'f') { - strategy = Z_FILTERED; - } else if (*p == 'h') { - strategy = Z_HUFFMAN_ONLY; - } else if (*p == 'R') { - strategy = Z_RLE; - } else { - *m++ = *p; /* copy the mode */ - } - } while (*p++ && m != fmode + sizeof(fmode)); - if (s->mode == '\0') return destroy(s), (gzFile)Z_NULL; - - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - err = Z_STREAM_ERROR; -#else - err = deflateInit2(&(s->stream), level, - Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL, strategy); - /* windowBits is passed < 0 to suppress zlib header */ - - s->stream.next_out = s->outbuf = (Byte*)ALLOC(Z_BUFSIZE); -#endif - if (err != Z_OK || s->outbuf == Z_NULL) { - return destroy(s), (gzFile)Z_NULL; - } - } else { - s->stream.next_in = s->inbuf = (Byte*)ALLOC(Z_BUFSIZE); - - err = inflateInit2(&(s->stream), -MAX_WBITS); - /* windowBits is passed < 0 to tell that there is no zlib header. - * Note that in this case inflate *requires* an extra "dummy" byte - * after the compressed stream in order to complete decompression and - * return Z_STREAM_END. Here the gzip CRC32 ensures that 4 bytes are - * present after the compressed stream. - */ - if (err != Z_OK || s->inbuf == Z_NULL) { - return destroy(s), (gzFile)Z_NULL; - } - } - s->stream.avail_out = Z_BUFSIZE; - - errno = 0; - s->file = fd < 0 ? F_OPEN(path, fmode) : (FILE*)fdopen(fd, fmode); - - if (s->file == NULL) { - return destroy(s), (gzFile)Z_NULL; - } - if (s->mode == 'w') { - /* Write a very simple .gz header: - */ - fprintf(s->file, "%c%c%c%c%c%c%c%c%c%c", gz_magic[0], gz_magic[1], - Z_DEFLATED, 0 /*flags*/, 0,0,0,0 /*time*/, 0 /*xflags*/, OS_CODE); - s->start = 10L; - /* We use 10L instead of ftell(s->file) to because ftell causes an - * fflush on some systems. This version of the library doesn't use - * start anyway in write mode, so this initialization is not - * necessary. - */ - } else { - check_header(s); /* skip the .gz header */ - s->start = ftell(s->file) - s->stream.avail_in; - } - - return (gzFile)s; -} - -/* =========================================================================== - Opens a gzip (.gz) file for reading or writing. -*/ -gzFile ZEXPORT gzopen (path, mode) - const char *path; - const char *mode; -{ - return gz_open (path, mode, -1); -} - -/* =========================================================================== - Associate a gzFile with the file descriptor fd. fd is not dup'ed here - to mimic the behavio(u)r of fdopen. -*/ -gzFile ZEXPORT gzdopen (fd, mode) - int fd; - const char *mode; -{ - char name[46]; /* allow for up to 128-bit integers */ - - if (fd < 0) return (gzFile)Z_NULL; - sprintf(name, "", fd); /* for debugging */ - - return gz_open (name, mode, fd); -} - -/* =========================================================================== - * Update the compression level and strategy - */ -int ZEXPORT gzsetparams (file, level, strategy) - gzFile file; - int level; - int strategy; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; - - /* Make room to allow flushing */ - if (s->stream.avail_out == 0) { - - s->stream.next_out = s->outbuf; - if (fwrite(s->outbuf, 1, Z_BUFSIZE, s->file) != Z_BUFSIZE) { - s->z_err = Z_ERRNO; - } - s->stream.avail_out = Z_BUFSIZE; - } - - return deflateParams (&(s->stream), level, strategy); -} - -/* =========================================================================== - Read a byte from a gz_stream; update next_in and avail_in. Return EOF - for end of file. - IN assertion: the stream s has been sucessfully opened for reading. -*/ -local int get_byte(s) - gz_stream *s; -{ - if (s->z_eof) return EOF; - if (s->stream.avail_in == 0) { - errno = 0; - s->stream.avail_in = (uInt)fread(s->inbuf, 1, Z_BUFSIZE, s->file); - if (s->stream.avail_in == 0) { - s->z_eof = 1; - if (ferror(s->file)) s->z_err = Z_ERRNO; - return EOF; - } - s->stream.next_in = s->inbuf; - } - s->stream.avail_in--; - return *(s->stream.next_in)++; -} - -/* =========================================================================== - Check the gzip header of a gz_stream opened for reading. Set the stream - mode to transparent if the gzip magic header is not present; set s->err - to Z_DATA_ERROR if the magic header is present but the rest of the header - is incorrect. - IN assertion: the stream s has already been created sucessfully; - s->stream.avail_in is zero for the first time, but may be non-zero - for concatenated .gz files. -*/ -local void check_header(s) - gz_stream *s; -{ - int method; /* method byte */ - int flags; /* flags byte */ - uInt len; - int c; - - /* Assure two bytes in the buffer so we can peek ahead -- handle case - where first byte of header is at the end of the buffer after the last - gzip segment */ - len = s->stream.avail_in; - if (len < 2) { - if (len) s->inbuf[0] = s->stream.next_in[0]; - errno = 0; - len = (uInt)fread(s->inbuf + len, 1, Z_BUFSIZE >> len, s->file); - if (len == 0 && ferror(s->file)) s->z_err = Z_ERRNO; - s->stream.avail_in += len; - s->stream.next_in = s->inbuf; - if (s->stream.avail_in < 2) { - s->transparent = s->stream.avail_in; - return; - } - } - - /* Peek ahead to check the gzip magic header */ - if (s->stream.next_in[0] != gz_magic[0] || - s->stream.next_in[1] != gz_magic[1]) { - s->transparent = 1; - return; - } - s->stream.avail_in -= 2; - s->stream.next_in += 2; - - /* Check the rest of the gzip header */ - method = get_byte(s); - flags = get_byte(s); - if (method != Z_DEFLATED || (flags & RESERVED) != 0) { - s->z_err = Z_DATA_ERROR; - return; - } - - /* Discard time, xflags and OS code: */ - for (len = 0; len < 6; len++) (void)get_byte(s); - - if ((flags & EXTRA_FIELD) != 0) { /* skip the extra field */ - len = (uInt)get_byte(s); - len += ((uInt)get_byte(s))<<8; - /* len is garbage if EOF but the loop below will quit anyway */ - while (len-- != 0 && get_byte(s) != EOF) ; - } - if ((flags & ORIG_NAME) != 0) { /* skip the original file name */ - while ((c = get_byte(s)) != 0 && c != EOF) ; - } - if ((flags & COMMENT) != 0) { /* skip the .gz file comment */ - while ((c = get_byte(s)) != 0 && c != EOF) ; - } - if ((flags & HEAD_CRC) != 0) { /* skip the header crc */ - for (len = 0; len < 2; len++) (void)get_byte(s); - } - s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK; -} - - /* =========================================================================== - * Cleanup then free the given gz_stream. Return a zlib error code. - Try freeing in the reverse order of allocations. - */ -local int destroy (s) - gz_stream *s; -{ - int err = Z_OK; - - if (!s) return Z_STREAM_ERROR; - - TRYFREE(s->msg); - - if (s->stream.state != NULL) { - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - err = Z_STREAM_ERROR; -#else - err = deflateEnd(&(s->stream)); -#endif - } else if (s->mode == 'r') { - err = inflateEnd(&(s->stream)); - } - } - if (s->file != NULL && fclose(s->file)) { -#ifdef ESPIPE - if (errno != ESPIPE) /* fclose is broken for pipes in HP/UX */ -#endif - err = Z_ERRNO; - } - if (s->z_err < 0) err = s->z_err; - - TRYFREE(s->inbuf); - TRYFREE(s->outbuf); - TRYFREE(s->path); - TRYFREE(s); - return err; -} - -/* =========================================================================== - Reads the given number of uncompressed bytes from the compressed file. - gzread returns the number of bytes actually read (0 for end of file). -*/ -int ZEXPORT gzread (file, buf, len) - gzFile file; - voidp buf; - unsigned len; -{ - gz_stream *s = (gz_stream*)file; - Bytef *start = (Bytef*)buf; /* starting point for crc computation */ - Byte *next_out; /* == stream.next_out but not forced far (for MSDOS) */ - - if (s == NULL || s->mode != 'r') return Z_STREAM_ERROR; - - if (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO) return -1; - if (s->z_err == Z_STREAM_END) return 0; /* EOF */ - - next_out = (Byte*)buf; - s->stream.next_out = (Bytef*)buf; - s->stream.avail_out = len; - - if (s->stream.avail_out && s->back != EOF) { - *next_out++ = s->back; - s->stream.next_out++; - s->stream.avail_out--; - s->back = EOF; - s->out++; - start++; - if (s->last) { - s->z_err = Z_STREAM_END; - return 1; - } - } - - while (s->stream.avail_out != 0) { - - if (s->transparent) { - /* Copy first the lookahead bytes: */ - uInt n = s->stream.avail_in; - if (n > s->stream.avail_out) n = s->stream.avail_out; - if (n > 0) { - zmemcpy(s->stream.next_out, s->stream.next_in, n); - next_out += n; - s->stream.next_out = next_out; - s->stream.next_in += n; - s->stream.avail_out -= n; - s->stream.avail_in -= n; - } - if (s->stream.avail_out > 0) { - s->stream.avail_out -= - (uInt)fread(next_out, 1, s->stream.avail_out, s->file); - } - len -= s->stream.avail_out; - s->in += len; - s->out += len; - if (len == 0) s->z_eof = 1; - return (int)len; - } - if (s->stream.avail_in == 0 && !s->z_eof) { - - errno = 0; - s->stream.avail_in = (uInt)fread(s->inbuf, 1, Z_BUFSIZE, s->file); - if (s->stream.avail_in == 0) { - s->z_eof = 1; - if (ferror(s->file)) { - s->z_err = Z_ERRNO; - break; - } - } - s->stream.next_in = s->inbuf; - } - s->in += s->stream.avail_in; - s->out += s->stream.avail_out; - s->z_err = inflate(&(s->stream), Z_NO_FLUSH); - s->in -= s->stream.avail_in; - s->out -= s->stream.avail_out; - - if (s->z_err == Z_STREAM_END) { - /* Check CRC and original size */ - s->crc = crc32(s->crc, start, (uInt)(s->stream.next_out - start)); - start = s->stream.next_out; - - if (getLong(s) != s->crc) { - s->z_err = Z_DATA_ERROR; - } else { - (void)getLong(s); - /* The uncompressed length returned by above getlong() may be - * different from s->out in case of concatenated .gz files. - * Check for such files: - */ - check_header(s); - if (s->z_err == Z_OK) { - inflateReset(&(s->stream)); - s->crc = crc32(0L, Z_NULL, 0); - } - } - } - if (s->z_err != Z_OK || s->z_eof) break; - } - s->crc = crc32(s->crc, start, (uInt)(s->stream.next_out - start)); - - if (len == s->stream.avail_out && - (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO)) - return -1; - return (int)(len - s->stream.avail_out); -} - - -/* =========================================================================== - Reads one byte from the compressed file. gzgetc returns this byte - or -1 in case of end of file or error. -*/ -int ZEXPORT gzgetc(file) - gzFile file; -{ - unsigned char c; - - return gzread(file, &c, 1) == 1 ? c : -1; -} - - -/* =========================================================================== - Push one byte back onto the stream. -*/ -int ZEXPORT gzungetc(c, file) - int c; - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'r' || c == EOF || s->back != EOF) return EOF; - s->back = c; - s->out--; - s->last = (s->z_err == Z_STREAM_END); - if (s->last) s->z_err = Z_OK; - s->z_eof = 0; - return c; -} - - -/* =========================================================================== - Reads bytes from the compressed file until len-1 characters are - read, or a newline character is read and transferred to buf, or an - end-of-file condition is encountered. The string is then terminated - with a null character. - gzgets returns buf, or Z_NULL in case of error. - - The current implementation is not optimized at all. -*/ -char * ZEXPORT gzgets(file, buf, len) - gzFile file; - char *buf; - int len; -{ - char *b = buf; - if (buf == Z_NULL || len <= 0) return Z_NULL; - - while (--len > 0 && gzread(file, buf, 1) == 1 && *buf++ != '\n') ; - *buf = '\0'; - return b == buf && len > 0 ? Z_NULL : b; -} - - -#ifndef NO_GZCOMPRESS -/* =========================================================================== - Writes the given number of uncompressed bytes into the compressed file. - gzwrite returns the number of bytes actually written (0 in case of error). -*/ -int ZEXPORT gzwrite (file, buf, len) - gzFile file; - voidpc buf; - unsigned len; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; - - s->stream.next_in = (Bytef*)buf; - s->stream.avail_in = len; - - while (s->stream.avail_in != 0) { - - if (s->stream.avail_out == 0) { - - s->stream.next_out = s->outbuf; - if (fwrite(s->outbuf, 1, Z_BUFSIZE, s->file) != Z_BUFSIZE) { - s->z_err = Z_ERRNO; - break; - } - s->stream.avail_out = Z_BUFSIZE; - } - s->in += s->stream.avail_in; - s->out += s->stream.avail_out; - s->z_err = deflate(&(s->stream), Z_NO_FLUSH); - s->in -= s->stream.avail_in; - s->out -= s->stream.avail_out; - if (s->z_err != Z_OK) break; - } - s->crc = crc32(s->crc, (const Bytef *)buf, len); - - return (int)(len - s->stream.avail_in); -} - - -/* =========================================================================== - Converts, formats, and writes the args to the compressed file under - control of the format string, as in fprintf. gzprintf returns the number of - uncompressed bytes actually written (0 in case of error). -*/ -#ifdef STDC -#include - -int ZEXPORTVA gzprintf (gzFile file, const char *format, /* args */ ...) -{ - char buf[Z_PRINTF_BUFSIZE]; - va_list va; - int len; - - buf[sizeof(buf) - 1] = 0; - va_start(va, format); -#ifdef NO_vsnprintf -# ifdef HAS_vsprintf_void - (void)vsprintf(buf, format, va); - va_end(va); - for (len = 0; len < sizeof(buf); len++) - if (buf[len] == 0) break; -# else - len = vsprintf(buf, format, va); - va_end(va); -# endif -#else -# ifdef HAS_vsnprintf_void - (void)vsnprintf(buf, sizeof(buf), format, va); - va_end(va); - len = strlen(buf); -# else - len = vsnprintf(buf, sizeof(buf), format, va); - va_end(va); -# endif -#endif - if (len <= 0 || len >= (int)sizeof(buf) || buf[sizeof(buf) - 1] != 0) - return 0; - return gzwrite(file, buf, (unsigned)len); -} -#else /* not ANSI C */ - -int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, - a11, a12, a13, a14, a15, a16, a17, a18, a19, a20) - gzFile file; - const char *format; - int a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, - a11, a12, a13, a14, a15, a16, a17, a18, a19, a20; -{ - char buf[Z_PRINTF_BUFSIZE]; - int len; - - buf[sizeof(buf) - 1] = 0; -#ifdef NO_snprintf -# ifdef HAS_sprintf_void - sprintf(buf, format, a1, a2, a3, a4, a5, a6, a7, a8, - a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); - for (len = 0; len < sizeof(buf); len++) - if (buf[len] == 0) break; -# else - len = sprintf(buf, format, a1, a2, a3, a4, a5, a6, a7, a8, - a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); -# endif -#else -# ifdef HAS_snprintf_void - snprintf(buf, sizeof(buf), format, a1, a2, a3, a4, a5, a6, a7, a8, - a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); - len = strlen(buf); -# else - len = snprintf(buf, sizeof(buf), format, a1, a2, a3, a4, a5, a6, a7, a8, - a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); -# endif -#endif - if (len <= 0 || len >= sizeof(buf) || buf[sizeof(buf) - 1] != 0) - return 0; - return gzwrite(file, buf, len); -} -#endif - -/* =========================================================================== - Writes c, converted to an unsigned char, into the compressed file. - gzputc returns the value that was written, or -1 in case of error. -*/ -int ZEXPORT gzputc(file, c) - gzFile file; - int c; -{ - unsigned char cc = (unsigned char) c; /* required for big endian systems */ - - return gzwrite(file, &cc, 1) == 1 ? (int)cc : -1; -} - - -/* =========================================================================== - Writes the given null-terminated string to the compressed file, excluding - the terminating null character. - gzputs returns the number of characters written, or -1 in case of error. -*/ -int ZEXPORT gzputs(file, s) - gzFile file; - const char *s; -{ - return gzwrite(file, (char*)s, (unsigned)strlen(s)); -} - - -/* =========================================================================== - Flushes all pending output into the compressed file. The parameter - flush is as in the deflate() function. -*/ -local int do_flush (file, flush) - gzFile file; - int flush; -{ - uInt len; - int done = 0; - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; - - s->stream.avail_in = 0; /* should be zero already anyway */ - - for (;;) { - len = Z_BUFSIZE - s->stream.avail_out; - - if (len != 0) { - if ((uInt)fwrite(s->outbuf, 1, len, s->file) != len) { - s->z_err = Z_ERRNO; - return Z_ERRNO; - } - s->stream.next_out = s->outbuf; - s->stream.avail_out = Z_BUFSIZE; - } - if (done) break; - s->out += s->stream.avail_out; - s->z_err = deflate(&(s->stream), flush); - s->out -= s->stream.avail_out; - - /* Ignore the second of two consecutive flushes: */ - if (len == 0 && s->z_err == Z_BUF_ERROR) s->z_err = Z_OK; - - /* deflate has finished flushing only when it hasn't used up - * all the available space in the output buffer: - */ - done = (s->stream.avail_out != 0 || s->z_err == Z_STREAM_END); - - if (s->z_err != Z_OK && s->z_err != Z_STREAM_END) break; - } - return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; -} - -int ZEXPORT gzflush (file, flush) - gzFile file; - int flush; -{ - gz_stream *s = (gz_stream*)file; - int err = do_flush (file, flush); - - if (err) return err; - fflush(s->file); - return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; -} -#endif /* NO_GZCOMPRESS */ - -/* =========================================================================== - Sets the starting position for the next gzread or gzwrite on the given - compressed file. The offset represents a number of bytes in the - gzseek returns the resulting offset location as measured in bytes from - the beginning of the uncompressed stream, or -1 in case of error. - SEEK_END is not implemented, returns error. - In this version of the library, gzseek can be extremely slow. -*/ -z_off_t ZEXPORT gzseek (file, offset, whence) - gzFile file; - z_off_t offset; - int whence; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || whence == SEEK_END || - s->z_err == Z_ERRNO || s->z_err == Z_DATA_ERROR) { - return -1L; - } - - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - return -1L; -#else - if (whence == SEEK_SET) { - offset -= s->in; - } - if (offset < 0) return -1L; - - /* At this point, offset is the number of zero bytes to write. */ - if (s->inbuf == Z_NULL) { - s->inbuf = (Byte*)ALLOC(Z_BUFSIZE); /* for seeking */ - if (s->inbuf == Z_NULL) return -1L; - zmemzero(s->inbuf, Z_BUFSIZE); - } - while (offset > 0) { - uInt size = Z_BUFSIZE; - if (offset < Z_BUFSIZE) size = (uInt)offset; - - size = gzwrite(file, s->inbuf, size); - if (size == 0) return -1L; - - offset -= size; - } - return s->in; -#endif - } - /* Rest of function is for reading only */ - - /* compute absolute position */ - if (whence == SEEK_CUR) { - offset += s->out; - } - if (offset < 0) return -1L; - - if (s->transparent) { - /* map to fseek */ - s->back = EOF; - s->stream.avail_in = 0; - s->stream.next_in = s->inbuf; - if (fseek(s->file, offset, SEEK_SET) < 0) return -1L; - - s->in = s->out = offset; - return offset; - } - - /* For a negative seek, rewind and use positive seek */ - if (offset >= s->out) { - offset -= s->out; - } else if (gzrewind(file) < 0) { - return -1L; - } - /* offset is now the number of bytes to skip. */ - - if (offset != 0 && s->outbuf == Z_NULL) { - s->outbuf = (Byte*)ALLOC(Z_BUFSIZE); - if (s->outbuf == Z_NULL) return -1L; - } - if (offset && s->back != EOF) { - s->back = EOF; - s->out++; - offset--; - if (s->last) s->z_err = Z_STREAM_END; - } - while (offset > 0) { - int size = Z_BUFSIZE; - if (offset < Z_BUFSIZE) size = (int)offset; - - size = gzread(file, s->outbuf, (uInt)size); - if (size <= 0) return -1L; - offset -= size; - } - return s->out; -} - -/* =========================================================================== - Rewinds input file. -*/ -int ZEXPORT gzrewind (file) - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'r') return -1; - - s->z_err = Z_OK; - s->z_eof = 0; - s->back = EOF; - s->stream.avail_in = 0; - s->stream.next_in = s->inbuf; - s->crc = crc32(0L, Z_NULL, 0); - if (!s->transparent) (void)inflateReset(&s->stream); - s->in = 0; - s->out = 0; - return fseek(s->file, s->start, SEEK_SET); -} - -/* =========================================================================== - Returns the starting position for the next gzread or gzwrite on the - given compressed file. This position represents a number of bytes in the - uncompressed data stream. -*/ -z_off_t ZEXPORT gztell (file) - gzFile file; -{ - return gzseek(file, 0L, SEEK_CUR); -} - -/* =========================================================================== - Returns 1 when EOF has previously been detected reading the given - input stream, otherwise zero. -*/ -int ZEXPORT gzeof (file) - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - /* With concatenated compressed files that can have embedded - * crc trailers, z_eof is no longer the only/best indicator of EOF - * on a gz_stream. Handle end-of-stream error explicitly here. - */ - if (s == NULL || s->mode != 'r') return 0; - if (s->z_eof) return 1; - return s->z_err == Z_STREAM_END; -} - -/* =========================================================================== - Returns 1 if reading and doing so transparently, otherwise zero. -*/ -int ZEXPORT gzdirect (file) - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'r') return 0; - return s->transparent; -} - -#ifndef NO_GZCOMPRESS // Google Gears addition, to avoid compile warning -/* =========================================================================== - Outputs a long in LSB order to the given file -*/ -local void putLong (file, x) - FILE *file; - uLong x; -{ - int n; - for (n = 0; n < 4; n++) { - fputc((int)(x & 0xff), file); - x >>= 8; - } -} -#endif - -/* =========================================================================== - Reads a long in LSB order from the given gz_stream. Sets z_err in case - of error. -*/ -local uLong getLong (s) - gz_stream *s; -{ - uLong x = (uLong)get_byte(s); - int c; - - x += ((uLong)get_byte(s))<<8; - x += ((uLong)get_byte(s))<<16; - c = get_byte(s); - if (c == EOF) s->z_err = Z_DATA_ERROR; - x += ((uLong)c)<<24; - return x; -} - -/* =========================================================================== - Flushes all pending output if necessary, closes the compressed file - and deallocates all the (de)compression state. -*/ -int ZEXPORT gzclose (file) - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL) return Z_STREAM_ERROR; - - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - return Z_STREAM_ERROR; -#else - if (do_flush (file, Z_FINISH) != Z_OK) - return destroy((gz_stream*)file); - - putLong (s->file, s->crc); - putLong (s->file, (uLong)(s->in & 0xffffffff)); -#endif - } - return destroy((gz_stream*)file); -} - -// Google Gears modification: strerror is not present on WinCE. -#if defined(STDC) && !defined(_WIN32_WCE) -# define zstrerror(errnum) strerror(errnum) -#else -# define zstrerror(errnum) "" -#endif - -/* =========================================================================== - Returns the error message for the last error which occurred on the - given compressed file. errnum is set to zlib error number. If an - error occurred in the file system and not in the compression library, - errnum is set to Z_ERRNO and the application may consult errno - to get the exact error code. -*/ -const char * ZEXPORT gzerror (file, errnum) - gzFile file; - int *errnum; -{ - char *m; - gz_stream *s = (gz_stream*)file; - - if (s == NULL) { - *errnum = Z_STREAM_ERROR; - return (const char*)ERR_MSG(Z_STREAM_ERROR); - } - *errnum = s->z_err; - if (*errnum == Z_OK) return (const char*)""; - - m = (char*)(*errnum == Z_ERRNO ? zstrerror(errno) : s->stream.msg); - - if (m == NULL || *m == '\0') m = (char*)ERR_MSG(s->z_err); - - TRYFREE(s->msg); - s->msg = (char*)ALLOC(strlen(s->path) + strlen(m) + 3); - if (s->msg == Z_NULL) return (const char*)ERR_MSG(Z_MEM_ERROR); - strcpy(s->msg, s->path); - strcat(s->msg, ": "); - strcat(s->msg, m); - return (const char*)s->msg; -} - -/* =========================================================================== - Clear the error and end-of-file flags, and do the same for the real file. -*/ -void ZEXPORT gzclearerr (file) - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL) return; - if (s->z_err != Z_STREAM_END) s->z_err = Z_OK; - s->z_eof = 0; - clearerr(s->file); -} diff -Nru nodejs-0.11.13/deps/zlib/gzlib.c nodejs-0.11.15/deps/zlib/gzlib.c --- nodejs-0.11.13/deps/zlib/gzlib.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/gzlib.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,634 @@ +/* gzlib.c -- zlib functions common to reading and writing gzip files + * Copyright (C) 2004, 2010, 2011, 2012, 2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +#if defined(_WIN32) && !defined(__BORLANDC__) +# define LSEEK _lseeki64 +#else +#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0 +# define LSEEK lseek64 +#else +# define LSEEK lseek +#endif +#endif + +/* Local functions */ +local void gz_reset OF((gz_statep)); +local gzFile gz_open OF((const void *, int, const char *)); + +#if defined UNDER_CE + +/* Map the Windows error number in ERROR to a locale-dependent error message + string and return a pointer to it. Typically, the values for ERROR come + from GetLastError. + + The string pointed to shall not be modified by the application, but may be + overwritten by a subsequent call to gz_strwinerror + + The gz_strwinerror function does not change the current setting of + GetLastError. */ +char ZLIB_INTERNAL *gz_strwinerror (error) + DWORD error; +{ + static char buf[1024]; + + wchar_t *msgbuf; + DWORD lasterr = GetLastError(); + DWORD chars = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM + | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, + error, + 0, /* Default language */ + (LPVOID)&msgbuf, + 0, + NULL); + if (chars != 0) { + /* If there is an \r\n appended, zap it. */ + if (chars >= 2 + && msgbuf[chars - 2] == '\r' && msgbuf[chars - 1] == '\n') { + chars -= 2; + msgbuf[chars] = 0; + } + + if (chars > sizeof (buf) - 1) { + chars = sizeof (buf) - 1; + msgbuf[chars] = 0; + } + + wcstombs(buf, msgbuf, chars + 1); + LocalFree(msgbuf); + } + else { + sprintf(buf, "unknown win32 error (%ld)", error); + } + + SetLastError(lasterr); + return buf; +} + +#endif /* UNDER_CE */ + +/* Reset gzip file state */ +local void gz_reset(state) + gz_statep state; +{ + state->x.have = 0; /* no output data available */ + if (state->mode == GZ_READ) { /* for reading ... */ + state->eof = 0; /* not at end of file */ + state->past = 0; /* have not read past end yet */ + state->how = LOOK; /* look for gzip header */ + } + state->seek = 0; /* no seek request pending */ + gz_error(state, Z_OK, NULL); /* clear error */ + state->x.pos = 0; /* no uncompressed data yet */ + state->strm.avail_in = 0; /* no input data yet */ +} + +/* Open a gzip file either by name or file descriptor. */ +local gzFile gz_open(path, fd, mode) + const void *path; + int fd; + const char *mode; +{ + gz_statep state; + size_t len; + int oflag; +#ifdef O_CLOEXEC + int cloexec = 0; +#endif +#ifdef O_EXCL + int exclusive = 0; +#endif + + /* check input */ + if (path == NULL) + return NULL; + + /* allocate gzFile structure to return */ + state = (gz_statep)malloc(sizeof(gz_state)); + if (state == NULL) + return NULL; + state->size = 0; /* no buffers allocated yet */ + state->want = GZBUFSIZE; /* requested buffer size */ + state->msg = NULL; /* no error message yet */ + + /* interpret mode */ + state->mode = GZ_NONE; + state->level = Z_DEFAULT_COMPRESSION; + state->strategy = Z_DEFAULT_STRATEGY; + state->direct = 0; + while (*mode) { + if (*mode >= '0' && *mode <= '9') + state->level = *mode - '0'; + else + switch (*mode) { + case 'r': + state->mode = GZ_READ; + break; +#ifndef NO_GZCOMPRESS + case 'w': + state->mode = GZ_WRITE; + break; + case 'a': + state->mode = GZ_APPEND; + break; +#endif + case '+': /* can't read and write at the same time */ + free(state); + return NULL; + case 'b': /* ignore -- will request binary anyway */ + break; +#ifdef O_CLOEXEC + case 'e': + cloexec = 1; + break; +#endif +#ifdef O_EXCL + case 'x': + exclusive = 1; + break; +#endif + case 'f': + state->strategy = Z_FILTERED; + break; + case 'h': + state->strategy = Z_HUFFMAN_ONLY; + break; + case 'R': + state->strategy = Z_RLE; + break; + case 'F': + state->strategy = Z_FIXED; + break; + case 'T': + state->direct = 1; + break; + default: /* could consider as an error, but just ignore */ + ; + } + mode++; + } + + /* must provide an "r", "w", or "a" */ + if (state->mode == GZ_NONE) { + free(state); + return NULL; + } + + /* can't force transparent read */ + if (state->mode == GZ_READ) { + if (state->direct) { + free(state); + return NULL; + } + state->direct = 1; /* for empty file */ + } + + /* save the path name for error messages */ +#ifdef _WIN32 + if (fd == -2) { + len = wcstombs(NULL, path, 0); + if (len == (size_t)-1) + len = 0; + } + else +#endif + len = strlen((const char *)path); + state->path = (char *)malloc(len + 1); + if (state->path == NULL) { + free(state); + return NULL; + } +#ifdef _WIN32 + if (fd == -2) + if (len) + wcstombs(state->path, path, len + 1); + else + *(state->path) = 0; + else +#endif +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(state->path, len + 1, "%s", (const char *)path); +#else + strcpy(state->path, path); +#endif + + /* compute the flags for open() */ + oflag = +#ifdef O_LARGEFILE + O_LARGEFILE | +#endif +#ifdef O_BINARY + O_BINARY | +#endif +#ifdef O_CLOEXEC + (cloexec ? O_CLOEXEC : 0) | +#endif + (state->mode == GZ_READ ? + O_RDONLY : + (O_WRONLY | O_CREAT | +#ifdef O_EXCL + (exclusive ? O_EXCL : 0) | +#endif + (state->mode == GZ_WRITE ? + O_TRUNC : + O_APPEND))); + + /* open the file with the appropriate flags (or just use fd) */ + state->fd = fd > -1 ? fd : ( +#ifdef _WIN32 + fd == -2 ? _wopen(path, oflag, 0666) : +#endif + open((const char *)path, oflag, 0666)); + if (state->fd == -1) { + free(state->path); + free(state); + return NULL; + } + if (state->mode == GZ_APPEND) + state->mode = GZ_WRITE; /* simplify later checks */ + + /* save the current position for rewinding (only if reading) */ + if (state->mode == GZ_READ) { + state->start = LSEEK(state->fd, 0, SEEK_CUR); + if (state->start == -1) state->start = 0; + } + + /* initialize stream */ + gz_reset(state); + + /* return stream */ + return (gzFile)state; +} + +/* -- see zlib.h -- */ +gzFile ZEXPORT gzopen(path, mode) + const char *path; + const char *mode; +{ + return gz_open(path, -1, mode); +} + +/* -- see zlib.h -- */ +gzFile ZEXPORT gzopen64(path, mode) + const char *path; + const char *mode; +{ + return gz_open(path, -1, mode); +} + +/* -- see zlib.h -- */ +gzFile ZEXPORT gzdopen(fd, mode) + int fd; + const char *mode; +{ + char *path; /* identifier for error messages */ + gzFile gz; + + if (fd == -1 || (path = (char *)malloc(7 + 3 * sizeof(int))) == NULL) + return NULL; +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(path, 7 + 3 * sizeof(int), "", fd); /* for debugging */ +#else + sprintf(path, "", fd); /* for debugging */ +#endif + gz = gz_open(path, fd, mode); + free(path); + return gz; +} + +/* -- see zlib.h -- */ +#ifdef _WIN32 +gzFile ZEXPORT gzopen_w(path, mode) + const wchar_t *path; + const char *mode; +{ + return gz_open(path, -2, mode); +} +#endif + +/* -- see zlib.h -- */ +int ZEXPORT gzbuffer(file, size) + gzFile file; + unsigned size; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* make sure we haven't already allocated memory */ + if (state->size != 0) + return -1; + + /* check and set requested size */ + if (size < 2) + size = 2; /* need two bytes to check magic header */ + state->want = size; + return 0; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzrewind(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're reading and that there's no error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* back up and start over */ + if (LSEEK(state->fd, state->start, SEEK_SET) == -1) + return -1; + gz_reset(state); + return 0; +} + +/* -- see zlib.h -- */ +z_off64_t ZEXPORT gzseek64(file, offset, whence) + gzFile file; + z_off64_t offset; + int whence; +{ + unsigned n; + z_off64_t ret; + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* check that there's no error */ + if (state->err != Z_OK && state->err != Z_BUF_ERROR) + return -1; + + /* can only seek from start or relative to current position */ + if (whence != SEEK_SET && whence != SEEK_CUR) + return -1; + + /* normalize offset to a SEEK_CUR specification */ + if (whence == SEEK_SET) + offset -= state->x.pos; + else if (state->seek) + offset += state->skip; + state->seek = 0; + + /* if within raw area while reading, just go there */ + if (state->mode == GZ_READ && state->how == COPY && + state->x.pos + offset >= 0) { + ret = LSEEK(state->fd, offset - state->x.have, SEEK_CUR); + if (ret == -1) + return -1; + state->x.have = 0; + state->eof = 0; + state->past = 0; + state->seek = 0; + gz_error(state, Z_OK, NULL); + state->strm.avail_in = 0; + state->x.pos += offset; + return state->x.pos; + } + + /* calculate skip amount, rewinding if needed for back seek when reading */ + if (offset < 0) { + if (state->mode != GZ_READ) /* writing -- can't go backwards */ + return -1; + offset += state->x.pos; + if (offset < 0) /* before start of file! */ + return -1; + if (gzrewind(file) == -1) /* rewind, then skip to offset */ + return -1; + } + + /* if reading, skip what's in output buffer (one less gzgetc() check) */ + if (state->mode == GZ_READ) { + n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > offset ? + (unsigned)offset : state->x.have; + state->x.have -= n; + state->x.next += n; + state->x.pos += n; + offset -= n; + } + + /* request skip (if not zero) */ + if (offset) { + state->seek = 1; + state->skip = offset; + } + return state->x.pos + offset; +} + +/* -- see zlib.h -- */ +z_off_t ZEXPORT gzseek(file, offset, whence) + gzFile file; + z_off_t offset; + int whence; +{ + z_off64_t ret; + + ret = gzseek64(file, (z_off64_t)offset, whence); + return ret == (z_off_t)ret ? (z_off_t)ret : -1; +} + +/* -- see zlib.h -- */ +z_off64_t ZEXPORT gztell64(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* return position */ + return state->x.pos + (state->seek ? state->skip : 0); +} + +/* -- see zlib.h -- */ +z_off_t ZEXPORT gztell(file) + gzFile file; +{ + z_off64_t ret; + + ret = gztell64(file); + return ret == (z_off_t)ret ? (z_off_t)ret : -1; +} + +/* -- see zlib.h -- */ +z_off64_t ZEXPORT gzoffset64(file) + gzFile file; +{ + z_off64_t offset; + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* compute and return effective offset in file */ + offset = LSEEK(state->fd, 0, SEEK_CUR); + if (offset == -1) + return -1; + if (state->mode == GZ_READ) /* reading */ + offset -= state->strm.avail_in; /* don't count buffered input */ + return offset; +} + +/* -- see zlib.h -- */ +z_off_t ZEXPORT gzoffset(file) + gzFile file; +{ + z_off64_t ret; + + ret = gzoffset64(file); + return ret == (z_off_t)ret ? (z_off_t)ret : -1; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzeof(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return 0; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return 0; + + /* return end-of-file state */ + return state->mode == GZ_READ ? state->past : 0; +} + +/* -- see zlib.h -- */ +const char * ZEXPORT gzerror(file, errnum) + gzFile file; + int *errnum; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return NULL; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return NULL; + + /* return error information */ + if (errnum != NULL) + *errnum = state->err; + return state->err == Z_MEM_ERROR ? "out of memory" : + (state->msg == NULL ? "" : state->msg); +} + +/* -- see zlib.h -- */ +void ZEXPORT gzclearerr(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return; + + /* clear error and end-of-file */ + if (state->mode == GZ_READ) { + state->eof = 0; + state->past = 0; + } + gz_error(state, Z_OK, NULL); +} + +/* Create an error message in allocated memory and set state->err and + state->msg accordingly. Free any previous error message already there. Do + not try to free or allocate space if the error is Z_MEM_ERROR (out of + memory). Simply save the error message as a static string. If there is an + allocation failure constructing the error message, then convert the error to + out of memory. */ +void ZLIB_INTERNAL gz_error(state, err, msg) + gz_statep state; + int err; + const char *msg; +{ + /* free previously allocated message and clear */ + if (state->msg != NULL) { + if (state->err != Z_MEM_ERROR) + free(state->msg); + state->msg = NULL; + } + + /* if fatal, set state->x.have to 0 so that the gzgetc() macro fails */ + if (err != Z_OK && err != Z_BUF_ERROR) + state->x.have = 0; + + /* set error code, and if no message, then done */ + state->err = err; + if (msg == NULL) + return; + + /* for an out of memory error, return literal string when requested */ + if (err == Z_MEM_ERROR) + return; + + /* construct error message with path */ + if ((state->msg = (char *)malloc(strlen(state->path) + strlen(msg) + 3)) == + NULL) { + state->err = Z_MEM_ERROR; + return; + } +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(state->msg, strlen(state->path) + strlen(msg) + 3, + "%s%s%s", state->path, ": ", msg); +#else + strcpy(state->msg, state->path); + strcat(state->msg, ": "); + strcat(state->msg, msg); +#endif + return; +} + +#ifndef INT_MAX +/* portably return maximum value for an int (when limits.h presumed not + available) -- we need to do this to cover cases where 2's complement not + used, since C standard permits 1's complement and sign-bit representations, + otherwise we could just use ((unsigned)-1) >> 1 */ +unsigned ZLIB_INTERNAL gz_intmax() +{ + unsigned p, q; + + p = 1; + do { + q = p; + p <<= 1; + p++; + } while (p > q); + return q >> 1; +} +#endif diff -Nru nodejs-0.11.13/deps/zlib/gzread.c nodejs-0.11.15/deps/zlib/gzread.c --- nodejs-0.11.13/deps/zlib/gzread.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/gzread.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,594 @@ +/* gzread.c -- zlib functions for reading gzip files + * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +/* Local functions */ +local int gz_load OF((gz_statep, unsigned char *, unsigned, unsigned *)); +local int gz_avail OF((gz_statep)); +local int gz_look OF((gz_statep)); +local int gz_decomp OF((gz_statep)); +local int gz_fetch OF((gz_statep)); +local int gz_skip OF((gz_statep, z_off64_t)); + +/* Use read() to load a buffer -- return -1 on error, otherwise 0. Read from + state->fd, and update state->eof, state->err, and state->msg as appropriate. + This function needs to loop on read(), since read() is not guaranteed to + read the number of bytes requested, depending on the type of descriptor. */ +local int gz_load(state, buf, len, have) + gz_statep state; + unsigned char *buf; + unsigned len; + unsigned *have; +{ + int ret; + + *have = 0; + do { + ret = read(state->fd, buf + *have, len - *have); + if (ret <= 0) + break; + *have += ret; + } while (*have < len); + if (ret < 0) { + gz_error(state, Z_ERRNO, zstrerror()); + return -1; + } + if (ret == 0) + state->eof = 1; + return 0; +} + +/* Load up input buffer and set eof flag if last data loaded -- return -1 on + error, 0 otherwise. Note that the eof flag is set when the end of the input + file is reached, even though there may be unused data in the buffer. Once + that data has been used, no more attempts will be made to read the file. + If strm->avail_in != 0, then the current data is moved to the beginning of + the input buffer, and then the remainder of the buffer is loaded with the + available data from the input file. */ +local int gz_avail(state) + gz_statep state; +{ + unsigned got; + z_streamp strm = &(state->strm); + + if (state->err != Z_OK && state->err != Z_BUF_ERROR) + return -1; + if (state->eof == 0) { + if (strm->avail_in) { /* copy what's there to the start */ + unsigned char *p = state->in; + unsigned const char *q = strm->next_in; + unsigned n = strm->avail_in; + do { + *p++ = *q++; + } while (--n); + } + if (gz_load(state, state->in + strm->avail_in, + state->size - strm->avail_in, &got) == -1) + return -1; + strm->avail_in += got; + strm->next_in = state->in; + } + return 0; +} + +/* Look for gzip header, set up for inflate or copy. state->x.have must be 0. + If this is the first time in, allocate required memory. state->how will be + left unchanged if there is no more input data available, will be set to COPY + if there is no gzip header and direct copying will be performed, or it will + be set to GZIP for decompression. If direct copying, then leftover input + data from the input buffer will be copied to the output buffer. In that + case, all further file reads will be directly to either the output buffer or + a user buffer. If decompressing, the inflate state will be initialized. + gz_look() will return 0 on success or -1 on failure. */ +local int gz_look(state) + gz_statep state; +{ + z_streamp strm = &(state->strm); + + /* allocate read buffers and inflate memory */ + if (state->size == 0) { + /* allocate buffers */ + state->in = (unsigned char *)malloc(state->want); + state->out = (unsigned char *)malloc(state->want << 1); + if (state->in == NULL || state->out == NULL) { + if (state->out != NULL) + free(state->out); + if (state->in != NULL) + free(state->in); + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + state->size = state->want; + + /* allocate inflate memory */ + state->strm.zalloc = Z_NULL; + state->strm.zfree = Z_NULL; + state->strm.opaque = Z_NULL; + state->strm.avail_in = 0; + state->strm.next_in = Z_NULL; + if (inflateInit2(&(state->strm), 15 + 16) != Z_OK) { /* gunzip */ + free(state->out); + free(state->in); + state->size = 0; + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + } + + /* get at least the magic bytes in the input buffer */ + if (strm->avail_in < 2) { + if (gz_avail(state) == -1) + return -1; + if (strm->avail_in == 0) + return 0; + } + + /* look for gzip magic bytes -- if there, do gzip decoding (note: there is + a logical dilemma here when considering the case of a partially written + gzip file, to wit, if a single 31 byte is written, then we cannot tell + whether this is a single-byte file, or just a partially written gzip + file -- for here we assume that if a gzip file is being written, then + the header will be written in a single operation, so that reading a + single byte is sufficient indication that it is not a gzip file) */ + if (strm->avail_in > 1 && + strm->next_in[0] == 31 && strm->next_in[1] == 139) { + inflateReset(strm); + state->how = GZIP; + state->direct = 0; + return 0; + } + + /* no gzip header -- if we were decoding gzip before, then this is trailing + garbage. Ignore the trailing garbage and finish. */ + if (state->direct == 0) { + strm->avail_in = 0; + state->eof = 1; + state->x.have = 0; + return 0; + } + + /* doing raw i/o, copy any leftover input to output -- this assumes that + the output buffer is larger than the input buffer, which also assures + space for gzungetc() */ + state->x.next = state->out; + if (strm->avail_in) { + memcpy(state->x.next, strm->next_in, strm->avail_in); + state->x.have = strm->avail_in; + strm->avail_in = 0; + } + state->how = COPY; + state->direct = 1; + return 0; +} + +/* Decompress from input to the provided next_out and avail_out in the state. + On return, state->x.have and state->x.next point to the just decompressed + data. If the gzip stream completes, state->how is reset to LOOK to look for + the next gzip stream or raw data, once state->x.have is depleted. Returns 0 + on success, -1 on failure. */ +local int gz_decomp(state) + gz_statep state; +{ + int ret = Z_OK; + unsigned had; + z_streamp strm = &(state->strm); + + /* fill output buffer up to end of deflate stream */ + had = strm->avail_out; + do { + /* get more input for inflate() */ + if (strm->avail_in == 0 && gz_avail(state) == -1) + return -1; + if (strm->avail_in == 0) { + gz_error(state, Z_BUF_ERROR, "unexpected end of file"); + break; + } + + /* decompress and handle errors */ + ret = inflate(strm, Z_NO_FLUSH); + if (ret == Z_STREAM_ERROR || ret == Z_NEED_DICT) { + gz_error(state, Z_STREAM_ERROR, + "internal error: inflate stream corrupt"); + return -1; + } + if (ret == Z_MEM_ERROR) { + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + if (ret == Z_DATA_ERROR) { /* deflate stream invalid */ + gz_error(state, Z_DATA_ERROR, + strm->msg == NULL ? "compressed data error" : strm->msg); + return -1; + } + } while (strm->avail_out && ret != Z_STREAM_END); + + /* update available output */ + state->x.have = had - strm->avail_out; + state->x.next = strm->next_out - state->x.have; + + /* if the gzip stream completed successfully, look for another */ + if (ret == Z_STREAM_END) + state->how = LOOK; + + /* good decompression */ + return 0; +} + +/* Fetch data and put it in the output buffer. Assumes state->x.have is 0. + Data is either copied from the input file or decompressed from the input + file depending on state->how. If state->how is LOOK, then a gzip header is + looked for to determine whether to copy or decompress. Returns -1 on error, + otherwise 0. gz_fetch() will leave state->how as COPY or GZIP unless the + end of the input file has been reached and all data has been processed. */ +local int gz_fetch(state) + gz_statep state; +{ + z_streamp strm = &(state->strm); + + do { + switch(state->how) { + case LOOK: /* -> LOOK, COPY (only if never GZIP), or GZIP */ + if (gz_look(state) == -1) + return -1; + if (state->how == LOOK) + return 0; + break; + case COPY: /* -> COPY */ + if (gz_load(state, state->out, state->size << 1, &(state->x.have)) + == -1) + return -1; + state->x.next = state->out; + return 0; + case GZIP: /* -> GZIP or LOOK (if end of gzip stream) */ + strm->avail_out = state->size << 1; + strm->next_out = state->out; + if (gz_decomp(state) == -1) + return -1; + } + } while (state->x.have == 0 && (!state->eof || strm->avail_in)); + return 0; +} + +/* Skip len uncompressed bytes of output. Return -1 on error, 0 on success. */ +local int gz_skip(state, len) + gz_statep state; + z_off64_t len; +{ + unsigned n; + + /* skip over len bytes or reach end-of-file, whichever comes first */ + while (len) + /* skip over whatever is in output buffer */ + if (state->x.have) { + n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > len ? + (unsigned)len : state->x.have; + state->x.have -= n; + state->x.next += n; + state->x.pos += n; + len -= n; + } + + /* output buffer empty -- return if we're at the end of the input */ + else if (state->eof && state->strm.avail_in == 0) + break; + + /* need more data to skip -- load up output buffer */ + else { + /* get more output, looking for header if required */ + if (gz_fetch(state) == -1) + return -1; + } + return 0; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzread(file, buf, len) + gzFile file; + voidp buf; + unsigned len; +{ + unsigned got, n; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* since an int is returned, make sure len fits in one, otherwise return + with an error (this avoids the flaw in the interface) */ + if ((int)len < 0) { + gz_error(state, Z_DATA_ERROR, "requested length does not fit in int"); + return -1; + } + + /* if len is zero, avoid unnecessary operations */ + if (len == 0) + return 0; + + /* process a skip request */ + if (state->seek) { + state->seek = 0; + if (gz_skip(state, state->skip) == -1) + return -1; + } + + /* get len bytes to buf, or less than len if at the end */ + got = 0; + do { + /* first just try copying data from the output buffer */ + if (state->x.have) { + n = state->x.have > len ? len : state->x.have; + memcpy(buf, state->x.next, n); + state->x.next += n; + state->x.have -= n; + } + + /* output buffer empty -- return if we're at the end of the input */ + else if (state->eof && strm->avail_in == 0) { + state->past = 1; /* tried to read past end */ + break; + } + + /* need output data -- for small len or new stream load up our output + buffer */ + else if (state->how == LOOK || len < (state->size << 1)) { + /* get more output, looking for header if required */ + if (gz_fetch(state) == -1) + return -1; + continue; /* no progress yet -- go back to copy above */ + /* the copy above assures that we will leave with space in the + output buffer, allowing at least one gzungetc() to succeed */ + } + + /* large len -- read directly into user buffer */ + else if (state->how == COPY) { /* read directly */ + if (gz_load(state, (unsigned char *)buf, len, &n) == -1) + return -1; + } + + /* large len -- decompress directly into user buffer */ + else { /* state->how == GZIP */ + strm->avail_out = len; + strm->next_out = (unsigned char *)buf; + if (gz_decomp(state) == -1) + return -1; + n = state->x.have; + state->x.have = 0; + } + + /* update progress */ + len -= n; + buf = (char *)buf + n; + got += n; + state->x.pos += n; + } while (len); + + /* return number of bytes read into user buffer (will fit in int) */ + return (int)got; +} + +/* -- see zlib.h -- */ +#ifdef Z_PREFIX_SET +# undef z_gzgetc +#else +# undef gzgetc +#endif +int ZEXPORT gzgetc(file) + gzFile file; +{ + int ret; + unsigned char buf[1]; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* try output buffer (no need to check for skip request) */ + if (state->x.have) { + state->x.have--; + state->x.pos++; + return *(state->x.next)++; + } + + /* nothing there -- try gzread() */ + ret = gzread(file, buf, 1); + return ret < 1 ? -1 : buf[0]; +} + +int ZEXPORT gzgetc_(file) +gzFile file; +{ + return gzgetc(file); +} + +/* -- see zlib.h -- */ +int ZEXPORT gzungetc(c, file) + int c; + gzFile file; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* process a skip request */ + if (state->seek) { + state->seek = 0; + if (gz_skip(state, state->skip) == -1) + return -1; + } + + /* can't push EOF */ + if (c < 0) + return -1; + + /* if output buffer empty, put byte at end (allows more pushing) */ + if (state->x.have == 0) { + state->x.have = 1; + state->x.next = state->out + (state->size << 1) - 1; + state->x.next[0] = c; + state->x.pos--; + state->past = 0; + return c; + } + + /* if no room, give up (must have already done a gzungetc()) */ + if (state->x.have == (state->size << 1)) { + gz_error(state, Z_DATA_ERROR, "out of room to push characters"); + return -1; + } + + /* slide output data if needed and insert byte before existing data */ + if (state->x.next == state->out) { + unsigned char *src = state->out + state->x.have; + unsigned char *dest = state->out + (state->size << 1); + while (src > state->out) + *--dest = *--src; + state->x.next = dest; + } + state->x.have++; + state->x.next--; + state->x.next[0] = c; + state->x.pos--; + state->past = 0; + return c; +} + +/* -- see zlib.h -- */ +char * ZEXPORT gzgets(file, buf, len) + gzFile file; + char *buf; + int len; +{ + unsigned left, n; + char *str; + unsigned char *eol; + gz_statep state; + + /* check parameters and get internal structure */ + if (file == NULL || buf == NULL || len < 1) + return NULL; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return NULL; + + /* process a skip request */ + if (state->seek) { + state->seek = 0; + if (gz_skip(state, state->skip) == -1) + return NULL; + } + + /* copy output bytes up to new line or len - 1, whichever comes first -- + append a terminating zero to the string (we don't check for a zero in + the contents, let the user worry about that) */ + str = buf; + left = (unsigned)len - 1; + if (left) do { + /* assure that something is in the output buffer */ + if (state->x.have == 0 && gz_fetch(state) == -1) + return NULL; /* error */ + if (state->x.have == 0) { /* end of file */ + state->past = 1; /* read past end */ + break; /* return what we have */ + } + + /* look for end-of-line in current output buffer */ + n = state->x.have > left ? left : state->x.have; + eol = (unsigned char *)memchr(state->x.next, '\n', n); + if (eol != NULL) + n = (unsigned)(eol - state->x.next) + 1; + + /* copy through end-of-line, or remainder if not found */ + memcpy(buf, state->x.next, n); + state->x.have -= n; + state->x.next += n; + state->x.pos += n; + left -= n; + buf += n; + } while (left && eol == NULL); + + /* return terminated string, or if nothing, end of file */ + if (buf == str) + return NULL; + buf[0] = 0; + return str; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzdirect(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return 0; + state = (gz_statep)file; + + /* if the state is not known, but we can find out, then do so (this is + mainly for right after a gzopen() or gzdopen()) */ + if (state->mode == GZ_READ && state->how == LOOK && state->x.have == 0) + (void)gz_look(state); + + /* return 1 if transparent, 0 if processing a gzip stream */ + return state->direct; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzclose_r(file) + gzFile file; +{ + int ret, err; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + + /* check that we're reading */ + if (state->mode != GZ_READ) + return Z_STREAM_ERROR; + + /* free memory and close file */ + if (state->size) { + inflateEnd(&(state->strm)); + free(state->out); + free(state->in); + } + err = state->err == Z_BUF_ERROR ? Z_BUF_ERROR : Z_OK; + gz_error(state, Z_OK, NULL); + free(state->path); + ret = close(state->fd); + free(state); + return ret ? Z_ERRNO : err; +} diff -Nru nodejs-0.11.13/deps/zlib/gzwrite.c nodejs-0.11.15/deps/zlib/gzwrite.c --- nodejs-0.11.13/deps/zlib/gzwrite.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/gzwrite.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,577 @@ +/* gzwrite.c -- zlib functions for writing gzip files + * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +/* Local functions */ +local int gz_init OF((gz_statep)); +local int gz_comp OF((gz_statep, int)); +local int gz_zero OF((gz_statep, z_off64_t)); + +/* Initialize state for writing a gzip file. Mark initialization by setting + state->size to non-zero. Return -1 on failure or 0 on success. */ +local int gz_init(state) + gz_statep state; +{ + int ret; + z_streamp strm = &(state->strm); + + /* allocate input buffer */ + state->in = (unsigned char *)malloc(state->want); + if (state->in == NULL) { + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + + /* only need output buffer and deflate state if compressing */ + if (!state->direct) { + /* allocate output buffer */ + state->out = (unsigned char *)malloc(state->want); + if (state->out == NULL) { + free(state->in); + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + + /* allocate deflate memory, set up for gzip compression */ + strm->zalloc = Z_NULL; + strm->zfree = Z_NULL; + strm->opaque = Z_NULL; + ret = deflateInit2(strm, state->level, Z_DEFLATED, + MAX_WBITS + 16, DEF_MEM_LEVEL, state->strategy); + if (ret != Z_OK) { + free(state->out); + free(state->in); + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + } + + /* mark state as initialized */ + state->size = state->want; + + /* initialize write buffer if compressing */ + if (!state->direct) { + strm->avail_out = state->size; + strm->next_out = state->out; + state->x.next = strm->next_out; + } + return 0; +} + +/* Compress whatever is at avail_in and next_in and write to the output file. + Return -1 if there is an error writing to the output file, otherwise 0. + flush is assumed to be a valid deflate() flush value. If flush is Z_FINISH, + then the deflate() state is reset to start a new gzip stream. If gz->direct + is true, then simply write to the output file without compressing, and + ignore flush. */ +local int gz_comp(state, flush) + gz_statep state; + int flush; +{ + int ret, got; + unsigned have; + z_streamp strm = &(state->strm); + + /* allocate memory if this is the first time through */ + if (state->size == 0 && gz_init(state) == -1) + return -1; + + /* write directly if requested */ + if (state->direct) { + got = write(state->fd, strm->next_in, strm->avail_in); + if (got < 0 || (unsigned)got != strm->avail_in) { + gz_error(state, Z_ERRNO, zstrerror()); + return -1; + } + strm->avail_in = 0; + return 0; + } + + /* run deflate() on provided input until it produces no more output */ + ret = Z_OK; + do { + /* write out current buffer contents if full, or if flushing, but if + doing Z_FINISH then don't write until we get to Z_STREAM_END */ + if (strm->avail_out == 0 || (flush != Z_NO_FLUSH && + (flush != Z_FINISH || ret == Z_STREAM_END))) { + have = (unsigned)(strm->next_out - state->x.next); + if (have && ((got = write(state->fd, state->x.next, have)) < 0 || + (unsigned)got != have)) { + gz_error(state, Z_ERRNO, zstrerror()); + return -1; + } + if (strm->avail_out == 0) { + strm->avail_out = state->size; + strm->next_out = state->out; + } + state->x.next = strm->next_out; + } + + /* compress */ + have = strm->avail_out; + ret = deflate(strm, flush); + if (ret == Z_STREAM_ERROR) { + gz_error(state, Z_STREAM_ERROR, + "internal error: deflate stream corrupt"); + return -1; + } + have -= strm->avail_out; + } while (have); + + /* if that completed a deflate stream, allow another to start */ + if (flush == Z_FINISH) + deflateReset(strm); + + /* all done, no errors */ + return 0; +} + +/* Compress len zeros to output. Return -1 on error, 0 on success. */ +local int gz_zero(state, len) + gz_statep state; + z_off64_t len; +{ + int first; + unsigned n; + z_streamp strm = &(state->strm); + + /* consume whatever's left in the input buffer */ + if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1) + return -1; + + /* compress len zeros (len guaranteed > 0) */ + first = 1; + while (len) { + n = GT_OFF(state->size) || (z_off64_t)state->size > len ? + (unsigned)len : state->size; + if (first) { + memset(state->in, 0, n); + first = 0; + } + strm->avail_in = n; + strm->next_in = state->in; + state->x.pos += n; + if (gz_comp(state, Z_NO_FLUSH) == -1) + return -1; + len -= n; + } + return 0; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzwrite(file, buf, len) + gzFile file; + voidpc buf; + unsigned len; +{ + unsigned put = len; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return 0; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return 0; + + /* since an int is returned, make sure len fits in one, otherwise return + with an error (this avoids the flaw in the interface) */ + if ((int)len < 0) { + gz_error(state, Z_DATA_ERROR, "requested length does not fit in int"); + return 0; + } + + /* if len is zero, avoid unnecessary operations */ + if (len == 0) + return 0; + + /* allocate memory if this is the first time through */ + if (state->size == 0 && gz_init(state) == -1) + return 0; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return 0; + } + + /* for small len, copy to input buffer, otherwise compress directly */ + if (len < state->size) { + /* copy to input buffer, compress when full */ + do { + unsigned have, copy; + + if (strm->avail_in == 0) + strm->next_in = state->in; + have = (unsigned)((strm->next_in + strm->avail_in) - state->in); + copy = state->size - have; + if (copy > len) + copy = len; + memcpy(state->in + have, buf, copy); + strm->avail_in += copy; + state->x.pos += copy; + buf = (const char *)buf + copy; + len -= copy; + if (len && gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + } while (len); + } + else { + /* consume whatever's left in the input buffer */ + if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + + /* directly compress user buffer to file */ + strm->avail_in = len; + strm->next_in = (z_const Bytef *)buf; + state->x.pos += len; + if (gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + } + + /* input was all buffered or compressed (put will fit in int) */ + return (int)put; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzputc(file, c) + gzFile file; + int c; +{ + unsigned have; + unsigned char buf[1]; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return -1; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return -1; + } + + /* try writing to input buffer for speed (state->size == 0 if buffer not + initialized) */ + if (state->size) { + if (strm->avail_in == 0) + strm->next_in = state->in; + have = (unsigned)((strm->next_in + strm->avail_in) - state->in); + if (have < state->size) { + state->in[have] = c; + strm->avail_in++; + state->x.pos++; + return c & 0xff; + } + } + + /* no room in buffer or not initialized, use gz_write() */ + buf[0] = c; + if (gzwrite(file, buf, 1) != 1) + return -1; + return c & 0xff; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzputs(file, str) + gzFile file; + const char *str; +{ + int ret; + unsigned len; + + /* write string */ + len = (unsigned)strlen(str); + ret = gzwrite(file, str, len); + return ret == 0 && len != 0 ? -1 : ret; +} + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +#include + +/* -- see zlib.h -- */ +int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va) +{ + int size, len; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return 0; + + /* make sure we have some buffer space */ + if (state->size == 0 && gz_init(state) == -1) + return 0; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return 0; + } + + /* consume whatever's left in the input buffer */ + if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + + /* do the printf() into the input buffer, put length in len */ + size = (int)(state->size); + state->in[size - 1] = 0; +#ifdef NO_vsnprintf +# ifdef HAS_vsprintf_void + (void)vsprintf((char *)(state->in), format, va); + for (len = 0; len < size; len++) + if (state->in[len] == 0) break; +# else + len = vsprintf((char *)(state->in), format, va); +# endif +#else +# ifdef HAS_vsnprintf_void + (void)vsnprintf((char *)(state->in), size, format, va); + len = strlen((char *)(state->in)); +# else + len = vsnprintf((char *)(state->in), size, format, va); +# endif +#endif + + /* check that printf() results fit in buffer */ + if (len <= 0 || len >= (int)size || state->in[size - 1] != 0) + return 0; + + /* update buffer and position, defer compression until needed */ + strm->avail_in = (unsigned)len; + strm->next_in = state->in; + state->x.pos += len; + return len; +} + +int ZEXPORTVA gzprintf(gzFile file, const char *format, ...) +{ + va_list va; + int ret; + + va_start(va, format); + ret = gzvprintf(file, format, va); + va_end(va); + return ret; +} + +#else /* !STDC && !Z_HAVE_STDARG_H */ + +/* -- see zlib.h -- */ +int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, + a11, a12, a13, a14, a15, a16, a17, a18, a19, a20) + gzFile file; + const char *format; + int a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, + a11, a12, a13, a14, a15, a16, a17, a18, a19, a20; +{ + int size, len; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that can really pass pointer in ints */ + if (sizeof(int) != sizeof(void *)) + return 0; + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return 0; + + /* make sure we have some buffer space */ + if (state->size == 0 && gz_init(state) == -1) + return 0; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return 0; + } + + /* consume whatever's left in the input buffer */ + if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + + /* do the printf() into the input buffer, put length in len */ + size = (int)(state->size); + state->in[size - 1] = 0; +#ifdef NO_snprintf +# ifdef HAS_sprintf_void + sprintf((char *)(state->in), format, a1, a2, a3, a4, a5, a6, a7, a8, + a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); + for (len = 0; len < size; len++) + if (state->in[len] == 0) break; +# else + len = sprintf((char *)(state->in), format, a1, a2, a3, a4, a5, a6, a7, a8, + a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); +# endif +#else +# ifdef HAS_snprintf_void + snprintf((char *)(state->in), size, format, a1, a2, a3, a4, a5, a6, a7, a8, + a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); + len = strlen((char *)(state->in)); +# else + len = snprintf((char *)(state->in), size, format, a1, a2, a3, a4, a5, a6, + a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, + a19, a20); +# endif +#endif + + /* check that printf() results fit in buffer */ + if (len <= 0 || len >= (int)size || state->in[size - 1] != 0) + return 0; + + /* update buffer and position, defer compression until needed */ + strm->avail_in = (unsigned)len; + strm->next_in = state->in; + state->x.pos += len; + return len; +} + +#endif + +/* -- see zlib.h -- */ +int ZEXPORT gzflush(file, flush) + gzFile file; + int flush; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return Z_STREAM_ERROR; + + /* check flush parameter */ + if (flush < 0 || flush > Z_FINISH) + return Z_STREAM_ERROR; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return -1; + } + + /* compress remaining data with requested flush */ + gz_comp(state, flush); + return state->err; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzsetparams(file, level, strategy) + gzFile file; + int level; + int strategy; +{ + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return Z_STREAM_ERROR; + + /* if no change is requested, then do nothing */ + if (level == state->level && strategy == state->strategy) + return Z_OK; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return -1; + } + + /* change compression parameters for subsequent input */ + if (state->size) { + /* flush previous input with previous parameters before changing */ + if (strm->avail_in && gz_comp(state, Z_PARTIAL_FLUSH) == -1) + return state->err; + deflateParams(strm, level, strategy); + } + state->level = level; + state->strategy = strategy; + return Z_OK; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzclose_w(file) + gzFile file; +{ + int ret = Z_OK; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + + /* check that we're writing */ + if (state->mode != GZ_WRITE) + return Z_STREAM_ERROR; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + ret = state->err; + } + + /* flush, free memory, and close file */ + if (gz_comp(state, Z_FINISH) == -1) + ret = state->err; + if (state->size) { + if (!state->direct) { + (void)deflateEnd(&(state->strm)); + free(state->out); + } + free(state->in); + } + gz_error(state, Z_OK, NULL); + free(state->path); + if (close(state->fd) == -1) + ret = Z_ERRNO; + free(state); + return ret; +} diff -Nru nodejs-0.11.13/deps/zlib/INDEX nodejs-0.11.15/deps/zlib/INDEX --- nodejs-0.11.13/deps/zlib/INDEX 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/INDEX 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,68 @@ +CMakeLists.txt cmake build file +ChangeLog history of changes +FAQ Frequently Asked Questions about zlib +INDEX this file +Makefile dummy Makefile that tells you to ./configure +Makefile.in template for Unix Makefile +README guess what +configure configure script for Unix +make_vms.com makefile for VMS +test/example.c zlib usages examples for build testing +test/minigzip.c minimal gzip-like functionality for build testing +test/infcover.c inf*.c code coverage for build coverage testing +treebuild.xml XML description of source file dependencies +zconf.h.cmakein zconf.h template for cmake +zconf.h.in zconf.h template for configure +zlib.3 Man page for zlib +zlib.3.pdf Man page in PDF format +zlib.map Linux symbol information +zlib.pc.in Template for pkg-config descriptor +zlib.pc.cmakein zlib.pc template for cmake +zlib2ansi perl script to convert source files for C++ compilation + +amiga/ makefiles for Amiga SAS C +as400/ makefiles for AS/400 +doc/ documentation for formats and algorithms +msdos/ makefiles for MSDOS +nintendods/ makefile for Nintendo DS +old/ makefiles for various architectures and zlib documentation + files that have not yet been updated for zlib 1.2.x +qnx/ makefiles for QNX +watcom/ makefiles for OpenWatcom +win32/ makefiles for Windows + + zlib public header files (required for library use): +zconf.h +zlib.h + + private source files used to build the zlib library: +adler32.c +compress.c +crc32.c +crc32.h +deflate.c +deflate.h +gzclose.c +gzguts.h +gzlib.c +gzread.c +gzwrite.c +infback.c +inffast.c +inffast.h +inffixed.h +inflate.c +inflate.h +inftrees.c +inftrees.h +trees.c +trees.h +uncompr.c +zutil.c +zutil.h + + source files for sample programs +See examples/README.examples + + unsupported contributions by third parties +See contrib/README.contrib diff -Nru nodejs-0.11.13/deps/zlib/infback.c nodejs-0.11.15/deps/zlib/infback.c --- nodejs-0.11.13/deps/zlib/infback.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/infback.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* infback.c -- inflate using a call-back interface - * Copyright (C) 1995-2005 Mark Adler + * Copyright (C) 1995-2011 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -42,10 +42,19 @@ return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; +#endif } - if (strm->zfree == (free_func)0) strm->zfree = zcfree; + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif state = (struct inflate_state FAR *)ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; @@ -55,7 +64,7 @@ state->wbits = windowBits; state->wsize = 1U << windowBits; state->window = window; - state->write = 0; + state->wnext = 0; state->whave = 0; return Z_OK; } @@ -246,14 +255,14 @@ void FAR *out_desc; { struct inflate_state FAR *state; - unsigned char FAR *next; /* next input */ + z_const unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ - code this; /* current decoding table entry */ + code here; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ @@ -389,19 +398,18 @@ state->have = 0; while (state->have < state->nlen + state->ndist) { for (;;) { - this = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if (this.val < 16) { - NEEDBITS(this.bits); - DROPBITS(this.bits); - state->lens[state->have++] = this.val; + if (here.val < 16) { + DROPBITS(here.bits); + state->lens[state->have++] = here.val; } else { - if (this.val == 16) { - NEEDBITS(this.bits + 2); - DROPBITS(this.bits); + if (here.val == 16) { + NEEDBITS(here.bits + 2); + DROPBITS(here.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; @@ -411,16 +419,16 @@ copy = 3 + BITS(2); DROPBITS(2); } - else if (this.val == 17) { - NEEDBITS(this.bits + 3); - DROPBITS(this.bits); + else if (here.val == 17) { + NEEDBITS(here.bits + 3); + DROPBITS(here.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { - NEEDBITS(this.bits + 7); - DROPBITS(this.bits); + NEEDBITS(here.bits + 7); + DROPBITS(here.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); @@ -438,7 +446,16 @@ /* handle error breaks in while */ if (state->mode == BAD) break; - /* build code tables */ + /* check for end-of-block code (better have one) */ + if (state->lens[256] == 0) { + strm->msg = (char *)"invalid code -- missing end-of-block"; + state->mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 9; @@ -474,28 +491,28 @@ /* get a literal, length, or end-of-block code */ for (;;) { - this = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if (this.op && (this.op & 0xf0) == 0) { - last = this; + if (here.op && (here.op & 0xf0) == 0) { + last = here; for (;;) { - this = state->lencode[last.val + + here = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + this.bits) <= bits) break; + if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } - DROPBITS(this.bits); - state->length = (unsigned)this.val; + DROPBITS(here.bits); + state->length = (unsigned)here.val; /* process literal */ - if (this.op == 0) { - Tracevv((stderr, this.val >= 0x20 && this.val < 0x7f ? + if (here.op == 0) { + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : - "inflate: literal 0x%02x\n", this.val)); + "inflate: literal 0x%02x\n", here.val)); ROOM(); *put++ = (unsigned char)(state->length); left--; @@ -504,21 +521,21 @@ } /* process end of block */ - if (this.op & 32) { + if (here.op & 32) { Tracevv((stderr, "inflate: end of block\n")); state->mode = TYPE; break; } /* invalid code */ - if (this.op & 64) { + if (here.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } /* length code -- get extra bits, if any */ - state->extra = (unsigned)(this.op) & 15; + state->extra = (unsigned)(here.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->length += BITS(state->extra); @@ -528,30 +545,30 @@ /* get distance code */ for (;;) { - this = state->distcode[BITS(state->distbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->distcode[BITS(state->distbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if ((this.op & 0xf0) == 0) { - last = this; + if ((here.op & 0xf0) == 0) { + last = here; for (;;) { - this = state->distcode[last.val + + here = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + this.bits) <= bits) break; + if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } - DROPBITS(this.bits); - if (this.op & 64) { + DROPBITS(here.bits); + if (here.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } - state->offset = (unsigned)this.val; + state->offset = (unsigned)here.val; /* get distance extra bits, if any */ - state->extra = (unsigned)(this.op) & 15; + state->extra = (unsigned)(here.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->offset += BITS(state->extra); diff -Nru nodejs-0.11.13/deps/zlib/inffast.c nodejs-0.11.15/deps/zlib/inffast.c --- nodejs-0.11.13/deps/zlib/inffast.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/inffast.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* inffast.c -- fast decoding - * Copyright (C) 1995-2004 Mark Adler + * Copyright (C) 1995-2008, 2010, 2013 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -64,13 +64,13 @@ requires strm->avail_out >= 258 for each loop to avoid checking for output space. */ -void inflate_fast(strm, start) +void ZLIB_INTERNAL inflate_fast(strm, start) z_streamp strm; unsigned start; /* inflate()'s starting value for strm->avail_out */ { struct inflate_state FAR *state; - unsigned char FAR *in; /* local strm->next_in */ - unsigned char FAR *last; /* while in < last, enough input available */ + z_const unsigned char FAR *in; /* local strm->next_in */ + z_const unsigned char FAR *last; /* have enough input while in < last */ unsigned char FAR *out; /* local strm->next_out */ unsigned char FAR *beg; /* inflate()'s initial strm->next_out */ unsigned char FAR *end; /* while out < end, enough space available */ @@ -79,7 +79,7 @@ #endif unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ - unsigned write; /* window write index */ + unsigned wnext; /* window write index */ unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */ unsigned long hold; /* local strm->hold */ unsigned bits; /* local strm->bits */ @@ -87,7 +87,7 @@ code const FAR *dcode; /* local strm->distcode */ unsigned lmask; /* mask for first level of length codes */ unsigned dmask; /* mask for first level of distance codes */ - code this; /* retrieved table entry */ + code here; /* retrieved table entry */ unsigned op; /* code bits, operation, extra bits, or */ /* window position, window bytes to copy */ unsigned len; /* match length, unused bytes */ @@ -106,7 +106,7 @@ #endif wsize = state->wsize; whave = state->whave; - write = state->write; + wnext = state->wnext; window = state->window; hold = state->hold; bits = state->bits; @@ -124,20 +124,20 @@ hold += (unsigned long)(PUP(in)) << bits; bits += 8; } - this = lcode[hold & lmask]; + here = lcode[hold & lmask]; dolen: - op = (unsigned)(this.bits); + op = (unsigned)(here.bits); hold >>= op; bits -= op; - op = (unsigned)(this.op); + op = (unsigned)(here.op); if (op == 0) { /* literal */ - Tracevv((stderr, this.val >= 0x20 && this.val < 0x7f ? + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : - "inflate: literal 0x%02x\n", this.val)); - PUP(out) = (unsigned char)(this.val); + "inflate: literal 0x%02x\n", here.val)); + PUP(out) = (unsigned char)(here.val); } else if (op & 16) { /* length base */ - len = (unsigned)(this.val); + len = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { @@ -155,14 +155,14 @@ hold += (unsigned long)(PUP(in)) << bits; bits += 8; } - this = dcode[hold & dmask]; + here = dcode[hold & dmask]; dodist: - op = (unsigned)(this.bits); + op = (unsigned)(here.bits); hold >>= op; bits -= op; - op = (unsigned)(this.op); + op = (unsigned)(here.op); if (op & 16) { /* distance base */ - dist = (unsigned)(this.val); + dist = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (bits < op) { hold += (unsigned long)(PUP(in)) << bits; @@ -187,12 +187,34 @@ if (dist > op) { /* see if copy from window */ op = dist - op; /* distance back in window */ if (op > whave) { - strm->msg = (char *)"invalid distance too far back"; - state->mode = BAD; - break; + if (state->sane) { + strm->msg = + (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + if (len <= op - whave) { + do { + PUP(out) = 0; + } while (--len); + continue; + } + len -= op - whave; + do { + PUP(out) = 0; + } while (--op > whave); + if (op == 0) { + from = out - dist; + do { + PUP(out) = PUP(from); + } while (--len); + continue; + } +#endif } from = window - OFF; - if (write == 0) { /* very common case */ + if (wnext == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; @@ -202,17 +224,17 @@ from = out - dist; /* rest from output */ } } - else if (write < op) { /* wrap around window */ - from += wsize + write - op; - op -= write; + else if (wnext < op) { /* wrap around window */ + from += wsize + wnext - op; + op -= wnext; if (op < len) { /* some from end of window */ len -= op; do { PUP(out) = PUP(from); } while (--op); from = window - OFF; - if (write < len) { /* some from start of window */ - op = write; + if (wnext < len) { /* some from start of window */ + op = wnext; len -= op; do { PUP(out) = PUP(from); @@ -222,7 +244,7 @@ } } else { /* contiguous in window */ - from += write - op; + from += wnext - op; if (op < len) { /* some from window */ len -= op; do { @@ -259,7 +281,7 @@ } } else if ((op & 64) == 0) { /* 2nd level distance code */ - this = dcode[this.val + (hold & ((1U << op) - 1))]; + here = dcode[here.val + (hold & ((1U << op) - 1))]; goto dodist; } else { @@ -269,7 +291,7 @@ } } else if ((op & 64) == 0) { /* 2nd level length code */ - this = lcode[this.val + (hold & ((1U << op) - 1))]; + here = lcode[here.val + (hold & ((1U << op) - 1))]; goto dolen; } else if (op & 32) { /* end-of-block */ @@ -305,7 +327,7 @@ inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): - Using bit fields for code structure - Different op definition to avoid & for extra bits (do & for table bits) - - Three separate decoding do-loops for direct, window, and write == 0 + - Three separate decoding do-loops for direct, window, and wnext == 0 - Special case for distance > 1 copies to do overlapped load and store copy - Explicit branch predictions (based on measured branch probabilities) - Deferring match copy and interspersed it with decoding subsequent codes diff -Nru nodejs-0.11.13/deps/zlib/inffast.h nodejs-0.11.15/deps/zlib/inffast.h --- nodejs-0.11.13/deps/zlib/inffast.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/inffast.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* inffast.h -- header to use inffast.c - * Copyright (C) 1995-2003 Mark Adler + * Copyright (C) 1995-2003, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -8,4 +8,4 @@ subject to change. Applications should only use zlib.h. */ -void inflate_fast OF((z_streamp strm, unsigned start)); +void ZLIB_INTERNAL inflate_fast OF((z_streamp strm, unsigned start)); diff -Nru nodejs-0.11.13/deps/zlib/inffixed.h nodejs-0.11.15/deps/zlib/inffixed.h --- nodejs-0.11.13/deps/zlib/inffixed.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/inffixed.h 2015-01-20 21:22:17.000000000 +0000 @@ -2,9 +2,9 @@ * Generated automatically by makefixed(). */ - /* WARNING: this file should *not* be used by applications. It - is part of the implementation of the compression library and - is subject to change. Applications should only use zlib.h. + /* WARNING: this file should *not* be used by applications. + It is part of the implementation of this library and is + subject to change. Applications should only use zlib.h. */ static const code lenfix[512] = { diff -Nru nodejs-0.11.13/deps/zlib/inflate.c nodejs-0.11.15/deps/zlib/inflate.c --- nodejs-0.11.13/deps/zlib/inflate.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/inflate.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* inflate.c -- zlib decompression - * Copyright (C) 1995-2005 Mark Adler + * Copyright (C) 1995-2012 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -45,7 +45,7 @@ * - Rearrange window copies in inflate_fast() for speed and simplification * - Unroll last copy for window match in inflate_fast() * - Use local copies of window variables in inflate_fast() for speed - * - Pull out common write == 0 case for speed in inflate_fast() + * - Pull out common wnext == 0 case for speed in inflate_fast() * - Make op and len in inflate_fast() unsigned for consistency * - Add FAR to lcode and dcode declarations in inflate_fast() * - Simplified bad distance check in inflate_fast() @@ -93,14 +93,15 @@ /* function prototypes */ local void fixedtables OF((struct inflate_state FAR *state)); -local int updatewindow OF((z_streamp strm, unsigned out)); +local int updatewindow OF((z_streamp strm, const unsigned char FAR *end, + unsigned copy)); #ifdef BUILDFIXED void makefixed OF((void)); #endif -local unsigned syncsearch OF((unsigned FAR *have, unsigned char FAR *buf, +local unsigned syncsearch OF((unsigned FAR *have, const unsigned char FAR *buf, unsigned len)); -int ZEXPORT inflateReset(strm) +int ZEXPORT inflateResetKeep(strm) z_streamp strm; { struct inflate_state FAR *state; @@ -109,36 +110,71 @@ state = (struct inflate_state FAR *)strm->state; strm->total_in = strm->total_out = state->total = 0; strm->msg = Z_NULL; - strm->adler = 1; /* to support ill-conceived Java test suite */ + if (state->wrap) /* to support ill-conceived Java test suite */ + strm->adler = state->wrap & 1; state->mode = HEAD; state->last = 0; state->havedict = 0; state->dmax = 32768U; state->head = Z_NULL; - state->wsize = 0; - state->whave = 0; - state->write = 0; state->hold = 0; state->bits = 0; state->lencode = state->distcode = state->next = state->codes; + state->sane = 1; + state->back = -1; Tracev((stderr, "inflate: reset\n")); return Z_OK; } -int ZEXPORT inflatePrime(strm, bits, value) +int ZEXPORT inflateReset(strm) z_streamp strm; -int bits; -int value; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; - if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR; - value &= (1L << bits) - 1; - state->hold += value << state->bits; - state->bits += bits; - return Z_OK; + state->wsize = 0; + state->whave = 0; + state->wnext = 0; + return inflateResetKeep(strm); +} + +int ZEXPORT inflateReset2(strm, windowBits) +z_streamp strm; +int windowBits; +{ + int wrap; + struct inflate_state FAR *state; + + /* get the state */ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* extract wrap request from windowBits parameter */ + if (windowBits < 0) { + wrap = 0; + windowBits = -windowBits; + } + else { + wrap = (windowBits >> 4) + 1; +#ifdef GUNZIP + if (windowBits < 48) + windowBits &= 15; +#endif + } + + /* set number of window bits, free window if different */ + if (windowBits && (windowBits < 8 || windowBits > 15)) + return Z_STREAM_ERROR; + if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) { + ZFREE(strm, state->window); + state->window = Z_NULL; + } + + /* update state and reset the rest of it */ + state->wrap = wrap; + state->wbits = (unsigned)windowBits; + return inflateReset(strm); } int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size) @@ -147,6 +183,7 @@ const char *version; int stream_size; { + int ret; struct inflate_state FAR *state; if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || @@ -155,33 +192,31 @@ if (strm == Z_NULL) return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; +#endif } - if (strm->zfree == (free_func)0) strm->zfree = zcfree; + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif state = (struct inflate_state FAR *) ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; Tracev((stderr, "inflate: allocated\n")); strm->state = (struct internal_state FAR *)state; - if (windowBits < 0) { - state->wrap = 0; - windowBits = -windowBits; - } - else { - state->wrap = (windowBits >> 4) + 1; -#ifdef GUNZIP - if (windowBits < 48) windowBits &= 15; -#endif - } - if (windowBits < 8 || windowBits > 15) { + state->window = Z_NULL; + ret = inflateReset2(strm, windowBits); + if (ret != Z_OK) { ZFREE(strm, state); strm->state = Z_NULL; - return Z_STREAM_ERROR; } - state->wbits = (unsigned)windowBits; - state->window = Z_NULL; - return inflateReset(strm); + return ret; } int ZEXPORT inflateInit_(strm, version, stream_size) @@ -192,6 +227,27 @@ return inflateInit2_(strm, DEF_WBITS, version, stream_size); } +int ZEXPORT inflatePrime(strm, bits, value) +z_streamp strm; +int bits; +int value; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if (bits < 0) { + state->hold = 0; + state->bits = 0; + return Z_OK; + } + if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR; + value &= (1L << bits) - 1; + state->hold += value << state->bits; + state->bits += bits; + return Z_OK; +} + /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. Normally this returns fixed tables from inffixed.h. @@ -286,8 +342,8 @@ low = 0; for (;;) { if ((low % 7) == 0) printf("\n "); - printf("{%u,%u,%d}", state.lencode[low].op, state.lencode[low].bits, - state.lencode[low].val); + printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op, + state.lencode[low].bits, state.lencode[low].val); if (++low == size) break; putchar(','); } @@ -320,12 +376,13 @@ output will fall in the output data, making match copies simpler and faster. The advantage may be dependent on the size of the processor's data caches. */ -local int updatewindow(strm, out) +local int updatewindow(strm, end, copy) z_streamp strm; -unsigned out; +const Bytef *end; +unsigned copy; { struct inflate_state FAR *state; - unsigned copy, dist; + unsigned dist; state = (struct inflate_state FAR *)strm->state; @@ -340,30 +397,29 @@ /* if window not in use yet, initialize */ if (state->wsize == 0) { state->wsize = 1U << state->wbits; - state->write = 0; + state->wnext = 0; state->whave = 0; } /* copy state->wsize or less output bytes into the circular window */ - copy = out - strm->avail_out; if (copy >= state->wsize) { - zmemcpy(state->window, strm->next_out - state->wsize, state->wsize); - state->write = 0; + zmemcpy(state->window, end - state->wsize, state->wsize); + state->wnext = 0; state->whave = state->wsize; } else { - dist = state->wsize - state->write; + dist = state->wsize - state->wnext; if (dist > copy) dist = copy; - zmemcpy(state->window + state->write, strm->next_out - copy, dist); + zmemcpy(state->window + state->wnext, end - copy, dist); copy -= dist; if (copy) { - zmemcpy(state->window, strm->next_out - copy, copy); - state->write = copy; + zmemcpy(state->window, end - copy, copy); + state->wnext = copy; state->whave = state->wsize; } else { - state->write += dist; - if (state->write == state->wsize) state->write = 0; + state->wnext += dist; + if (state->wnext == state->wsize) state->wnext = 0; if (state->whave < state->wsize) state->whave += dist; } } @@ -464,11 +520,6 @@ bits -= bits & 7; \ } while (0) -/* Reverse the bytes in a 32-bit value */ -#define REVERSE(q) \ - ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ - (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) - /* inflate() uses a state machine to process as much input data and generate as much output data as possible before returning. The state machine is @@ -556,7 +607,7 @@ int flush; { struct inflate_state FAR *state; - unsigned char FAR *next; /* next input */ + z_const unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ @@ -564,7 +615,7 @@ unsigned in, out; /* save starting available input and output */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ - code this; /* current decoding table entry */ + code here; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ @@ -619,7 +670,9 @@ } DROPBITS(4); len = BITS(4) + 8; - if (len > state->wbits) { + if (state->wbits == 0) + state->wbits = len; + else if (len > state->wbits) { strm->msg = (char *)"invalid window size"; state->mode = BAD; break; @@ -760,7 +813,7 @@ #endif case DICTID: NEEDBITS(32); - strm->adler = state->check = REVERSE(hold); + strm->adler = state->check = ZSWAP32(hold); INITBITS(); state->mode = DICT; case DICT: @@ -771,7 +824,7 @@ strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = TYPE; case TYPE: - if (flush == Z_BLOCK) goto inf_leave; + if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave; case TYPEDO: if (state->last) { BYTEBITS(); @@ -791,7 +844,11 @@ fixedtables(state); Tracev((stderr, "inflate: fixed codes block%s\n", state->last ? " (last)" : "")); - state->mode = LEN; /* decode codes */ + state->mode = LEN_; /* decode codes */ + if (flush == Z_TREES) { + DROPBITS(2); + goto inf_leave; + } break; case 2: /* dynamic block */ Tracev((stderr, "inflate: dynamic codes block%s\n", @@ -816,6 +873,9 @@ Tracev((stderr, "inflate: stored length %u\n", state->length)); INITBITS(); + state->mode = COPY_; + if (flush == Z_TREES) goto inf_leave; + case COPY_: state->mode = COPY; case COPY: copy = state->length; @@ -861,7 +921,7 @@ while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; - state->lencode = (code const FAR *)(state->next); + state->lencode = (const code FAR *)(state->next); state->lenbits = 7; ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); @@ -876,19 +936,18 @@ case CODELENS: while (state->have < state->nlen + state->ndist) { for (;;) { - this = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if (this.val < 16) { - NEEDBITS(this.bits); - DROPBITS(this.bits); - state->lens[state->have++] = this.val; + if (here.val < 16) { + DROPBITS(here.bits); + state->lens[state->have++] = here.val; } else { - if (this.val == 16) { - NEEDBITS(this.bits + 2); - DROPBITS(this.bits); + if (here.val == 16) { + NEEDBITS(here.bits + 2); + DROPBITS(here.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; @@ -898,16 +957,16 @@ copy = 3 + BITS(2); DROPBITS(2); } - else if (this.val == 17) { - NEEDBITS(this.bits + 3); - DROPBITS(this.bits); + else if (here.val == 17) { + NEEDBITS(here.bits + 3); + DROPBITS(here.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { - NEEDBITS(this.bits + 7); - DROPBITS(this.bits); + NEEDBITS(here.bits + 7); + DROPBITS(here.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); @@ -925,9 +984,18 @@ /* handle error breaks in while */ if (state->mode == BAD) break; - /* build code tables */ + /* check for end-of-block code (better have one) */ + if (state->lens[256] == 0) { + strm->msg = (char *)"invalid code -- missing end-of-block"; + state->mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ state->next = state->codes; - state->lencode = (code const FAR *)(state->next); + state->lencode = (const code FAR *)(state->next); state->lenbits = 9; ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); @@ -936,7 +1004,7 @@ state->mode = BAD; break; } - state->distcode = (code const FAR *)(state->next); + state->distcode = (const code FAR *)(state->next); state->distbits = 6; ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); @@ -946,88 +1014,102 @@ break; } Tracev((stderr, "inflate: codes ok\n")); + state->mode = LEN_; + if (flush == Z_TREES) goto inf_leave; + case LEN_: state->mode = LEN; case LEN: if (have >= 6 && left >= 258) { RESTORE(); inflate_fast(strm, out); LOAD(); + if (state->mode == TYPE) + state->back = -1; break; } + state->back = 0; for (;;) { - this = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if (this.op && (this.op & 0xf0) == 0) { - last = this; + if (here.op && (here.op & 0xf0) == 0) { + last = here; for (;;) { - this = state->lencode[last.val + + here = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + this.bits) <= bits) break; + if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); + state->back += last.bits; } - DROPBITS(this.bits); - state->length = (unsigned)this.val; - if ((int)(this.op) == 0) { - Tracevv((stderr, this.val >= 0x20 && this.val < 0x7f ? + DROPBITS(here.bits); + state->back += here.bits; + state->length = (unsigned)here.val; + if ((int)(here.op) == 0) { + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : - "inflate: literal 0x%02x\n", this.val)); + "inflate: literal 0x%02x\n", here.val)); state->mode = LIT; break; } - if (this.op & 32) { + if (here.op & 32) { Tracevv((stderr, "inflate: end of block\n")); + state->back = -1; state->mode = TYPE; break; } - if (this.op & 64) { + if (here.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } - state->extra = (unsigned)(this.op) & 15; + state->extra = (unsigned)(here.op) & 15; state->mode = LENEXT; case LENEXT: if (state->extra) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); + state->back += state->extra; } Tracevv((stderr, "inflate: length %u\n", state->length)); + state->was = state->length; state->mode = DIST; case DIST: for (;;) { - this = state->distcode[BITS(state->distbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->distcode[BITS(state->distbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if ((this.op & 0xf0) == 0) { - last = this; + if ((here.op & 0xf0) == 0) { + last = here; for (;;) { - this = state->distcode[last.val + + here = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + this.bits) <= bits) break; + if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); + state->back += last.bits; } - DROPBITS(this.bits); - if (this.op & 64) { + DROPBITS(here.bits); + state->back += here.bits; + if (here.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } - state->offset = (unsigned)this.val; - state->extra = (unsigned)(this.op) & 15; + state->offset = (unsigned)here.val; + state->extra = (unsigned)(here.op) & 15; state->mode = DISTEXT; case DISTEXT: if (state->extra) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); + state->back += state->extra; } #ifdef INFLATE_STRICT if (state->offset > state->dmax) { @@ -1036,11 +1118,6 @@ break; } #endif - if (state->offset > state->whave + out - left) { - strm->msg = (char *)"invalid distance too far back"; - state->mode = BAD; - break; - } Tracevv((stderr, "inflate: distance %u\n", state->offset)); state->mode = MATCH; case MATCH: @@ -1048,12 +1125,32 @@ copy = out - left; if (state->offset > copy) { /* copy from window */ copy = state->offset - copy; - if (copy > state->write) { - copy -= state->write; + if (copy > state->whave) { + if (state->sane) { + strm->msg = (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + Trace((stderr, "inflate.c too far\n")); + copy -= state->whave; + if (copy > state->length) copy = state->length; + if (copy > left) copy = left; + left -= copy; + state->length -= copy; + do { + *put++ = 0; + } while (--copy); + if (state->length == 0) state->mode = LEN; + break; +#endif + } + if (copy > state->wnext) { + copy -= state->wnext; from = state->window + (state->wsize - copy); } else - from = state->window + (state->write - copy); + from = state->window + (state->wnext - copy); if (copy > state->length) copy = state->length; } else { /* copy from output */ @@ -1088,7 +1185,7 @@ #ifdef GUNZIP state->flags ? hold : #endif - REVERSE(hold)) != state->check) { + ZSWAP32(hold)) != state->check) { strm->msg = (char *)"incorrect data check"; state->mode = BAD; break; @@ -1132,8 +1229,9 @@ */ inf_leave: RESTORE(); - if (state->wsize || (state->mode < CHECK && out != strm->avail_out)) - if (updatewindow(strm, out)) { + if (state->wsize || (out != strm->avail_out && state->mode < BAD && + (state->mode < CHECK || flush != Z_FINISH))) + if (updatewindow(strm, strm->next_out, out - strm->avail_out)) { state->mode = MEM; return Z_MEM_ERROR; } @@ -1146,7 +1244,8 @@ strm->adler = state->check = UPDATE(state->check, strm->next_out - out, out); strm->data_type = state->bits + (state->last ? 64 : 0) + - (state->mode == TYPE ? 128 : 0); + (state->mode == TYPE ? 128 : 0) + + (state->mode == LEN_ || state->mode == COPY_ ? 256 : 0); if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK) ret = Z_BUF_ERROR; return ret; @@ -1166,13 +1265,37 @@ return Z_OK; } +int ZEXPORT inflateGetDictionary(strm, dictionary, dictLength) +z_streamp strm; +Bytef *dictionary; +uInt *dictLength; +{ + struct inflate_state FAR *state; + + /* check state */ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* copy dictionary */ + if (state->whave && dictionary != Z_NULL) { + zmemcpy(dictionary, state->window + state->wnext, + state->whave - state->wnext); + zmemcpy(dictionary + state->whave - state->wnext, + state->window, state->wnext); + } + if (dictLength != Z_NULL) + *dictLength = state->whave; + return Z_OK; +} + int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength) z_streamp strm; const Bytef *dictionary; uInt dictLength; { struct inflate_state FAR *state; - unsigned long id; + unsigned long dictid; + int ret; /* check state */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; @@ -1180,29 +1303,21 @@ if (state->wrap != 0 && state->mode != DICT) return Z_STREAM_ERROR; - /* check for correct dictionary id */ + /* check for correct dictionary identifier */ if (state->mode == DICT) { - id = adler32(0L, Z_NULL, 0); - id = adler32(id, dictionary, dictLength); - if (id != state->check) + dictid = adler32(0L, Z_NULL, 0); + dictid = adler32(dictid, dictionary, dictLength); + if (dictid != state->check) return Z_DATA_ERROR; } - /* copy dictionary to window */ - if (updatewindow(strm, strm->avail_out)) { + /* copy dictionary to window using updatewindow(), which will amend the + existing dictionary if appropriate */ + ret = updatewindow(strm, dictionary + dictLength, dictLength); + if (ret) { state->mode = MEM; return Z_MEM_ERROR; } - if (dictLength > state->wsize) { - zmemcpy(state->window, dictionary + dictLength - state->wsize, - state->wsize); - state->whave = state->wsize; - } - else { - zmemcpy(state->window + state->wsize - dictLength, dictionary, - dictLength); - state->whave = dictLength; - } state->havedict = 1; Tracev((stderr, "inflate: dictionary set\n")); return Z_OK; @@ -1238,7 +1353,7 @@ */ local unsigned syncsearch(have, buf, len) unsigned FAR *have; -unsigned char FAR *buf; +const unsigned char FAR *buf; unsigned len; { unsigned got; @@ -1350,8 +1465,8 @@ } /* copy state */ - zmemcpy(dest, source, sizeof(z_stream)); - zmemcpy(copy, state, sizeof(struct inflate_state)); + zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); + zmemcpy((voidpf)copy, (voidpf)state, sizeof(struct inflate_state)); if (state->lencode >= state->codes && state->lencode <= state->codes + ENOUGH - 1) { copy->lencode = copy->codes + (state->lencode - state->codes); @@ -1366,3 +1481,32 @@ dest->state = (struct internal_state FAR *)copy; return Z_OK; } + +int ZEXPORT inflateUndermine(strm, subvert) +z_streamp strm; +int subvert; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + state->sane = !subvert; +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + return Z_OK; +#else + state->sane = 1; + return Z_DATA_ERROR; +#endif +} + +long ZEXPORT inflateMark(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return -1L << 16; + state = (struct inflate_state FAR *)strm->state; + return ((long)(state->back) << 16) + + (state->mode == COPY ? state->length : + (state->mode == MATCH ? state->was - state->length : 0)); +} diff -Nru nodejs-0.11.13/deps/zlib/inflate.h nodejs-0.11.15/deps/zlib/inflate.h --- nodejs-0.11.13/deps/zlib/inflate.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/inflate.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* inflate.h -- internal inflate state definition - * Copyright (C) 1995-2004 Mark Adler + * Copyright (C) 1995-2009 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -32,11 +32,13 @@ TYPE, /* i: waiting for type bits, including last-flag bit */ TYPEDO, /* i: same, but skip check to exit inflate on new block */ STORED, /* i: waiting for stored size (length and complement) */ + COPY_, /* i/o: same as COPY below, but only first time in */ COPY, /* i/o: waiting for input or output to copy stored block */ TABLE, /* i: waiting for dynamic block table lengths */ LENLENS, /* i: waiting for code length code lengths */ CODELENS, /* i: waiting for length/lit and distance code lengths */ - LEN, /* i: waiting for length/lit code */ + LEN_, /* i: same as LEN below, but only first time in */ + LEN, /* i: waiting for length/lit/eob code */ LENEXT, /* i: waiting for length extra bits */ DIST, /* i: waiting for distance code */ DISTEXT, /* i: waiting for distance extra bits */ @@ -53,19 +55,21 @@ /* State transitions between above modes - - (most modes can go to the BAD or MEM mode -- not shown for clarity) + (most modes can go to BAD or MEM on error -- not shown for clarity) Process header: - HEAD -> (gzip) or (zlib) - (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME - NAME -> COMMENT -> HCRC -> TYPE + HEAD -> (gzip) or (zlib) or (raw) + (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME -> COMMENT -> + HCRC -> TYPE (zlib) -> DICTID or TYPE DICTID -> DICT -> TYPE + (raw) -> TYPEDO Read deflate blocks: - TYPE -> STORED or TABLE or LEN or CHECK - STORED -> COPY -> TYPE - TABLE -> LENLENS -> CODELENS -> LEN - Read deflate codes: + TYPE -> TYPEDO -> STORED or TABLE or LEN_ or CHECK + STORED -> COPY_ -> COPY -> TYPE + TABLE -> LENLENS -> CODELENS -> LEN_ + LEN_ -> LEN + Read deflate codes in fixed or dynamic block: LEN -> LENEXT or LIT or TYPE LENEXT -> DIST -> DISTEXT -> MATCH -> LEN LIT -> LEN @@ -73,7 +77,7 @@ CHECK -> LENGTH -> DONE */ -/* state maintained between inflate() calls. Approximately 7K bytes. */ +/* state maintained between inflate() calls. Approximately 10K bytes. */ struct inflate_state { inflate_mode mode; /* current inflate mode */ int last; /* true if processing last block */ @@ -88,7 +92,7 @@ unsigned wbits; /* log base 2 of requested window size */ unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ - unsigned write; /* window write index */ + unsigned wnext; /* window write index */ unsigned char FAR *window; /* allocated sliding window, if needed */ /* bit accumulator */ unsigned long hold; /* input bit accumulator */ @@ -112,4 +116,7 @@ unsigned short lens[320]; /* temporary storage for code lengths */ unsigned short work[288]; /* work area for code table building */ code codes[ENOUGH]; /* space for code tables */ + int sane; /* if false, allow invalid distance too far */ + int back; /* bits back of last unprocessed length/lit */ + unsigned was; /* initial length of match */ }; diff -Nru nodejs-0.11.13/deps/zlib/inftrees.c nodejs-0.11.15/deps/zlib/inftrees.c --- nodejs-0.11.13/deps/zlib/inftrees.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/inftrees.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* inftrees.c -- generate Huffman trees for efficient decoding - * Copyright (C) 1995-2005 Mark Adler + * Copyright (C) 1995-2013 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -9,7 +9,7 @@ #define MAXBITS 15 const char inflate_copyright[] = - " inflate 1.2.3 Copyright 1995-2005 Mark Adler "; + " inflate 1.2.8 Copyright 1995-2013 Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot @@ -29,7 +29,7 @@ table index bits. It will differ if the request is greater than the longest code or if it is less than the shortest code. */ -int inflate_table(type, lens, codes, table, bits, work) +int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work) codetype type; unsigned short FAR *lens; unsigned codes; @@ -50,7 +50,7 @@ unsigned fill; /* index for replicating entries */ unsigned low; /* low bits for current root entry */ unsigned mask; /* mask for low root bits */ - code this; /* table entry for duplication */ + code here; /* table entry for duplication */ code FAR *next; /* next available space in table */ const unsigned short FAR *base; /* base value table to use */ const unsigned short FAR *extra; /* extra bits table to use */ @@ -62,7 +62,7 @@ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const unsigned short lext[31] = { /* Length codes 257..285 extra */ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, - 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 201, 196}; + 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78}; static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, @@ -115,15 +115,15 @@ if (count[max] != 0) break; if (root > max) root = max; if (max == 0) { /* no symbols to code at all */ - this.op = (unsigned char)64; /* invalid code marker */ - this.bits = (unsigned char)1; - this.val = (unsigned short)0; - *(*table)++ = this; /* make a table to force an error */ - *(*table)++ = this; + here.op = (unsigned char)64; /* invalid code marker */ + here.bits = (unsigned char)1; + here.val = (unsigned short)0; + *(*table)++ = here; /* make a table to force an error */ + *(*table)++ = here; *bits = 1; return 0; /* no symbols, but wait for decoding to report error */ } - for (min = 1; min <= MAXBITS; min++) + for (min = 1; min < max; min++) if (count[min] != 0) break; if (root < min) root = min; @@ -166,11 +166,10 @@ entered in the tables. used keeps track of how many table entries have been allocated from the - provided *table space. It is checked when a LENS table is being made - against the space in *table, ENOUGH, minus the maximum space needed by - the worst case distance code, MAXD. This should never happen, but the - sufficiency of ENOUGH has not been proven exhaustively, hence the check. - This assumes that when type == LENS, bits == 9. + provided *table space. It is checked for LENS and DIST tables against + the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in + the initial root table size constants. See the comments in inftrees.h + for more information. sym increments through all symbols, and the loop terminates when all codes of length max, i.e. all codes, have been processed. This @@ -209,24 +208,25 @@ mask = used - 1; /* mask for comparing low */ /* check available table space */ - if (type == LENS && used >= ENOUGH - MAXD) + if ((type == LENS && used > ENOUGH_LENS) || + (type == DISTS && used > ENOUGH_DISTS)) return 1; /* process all codes and make table entries */ for (;;) { /* create table entry */ - this.bits = (unsigned char)(len - drop); + here.bits = (unsigned char)(len - drop); if ((int)(work[sym]) < end) { - this.op = (unsigned char)0; - this.val = work[sym]; + here.op = (unsigned char)0; + here.val = work[sym]; } else if ((int)(work[sym]) > end) { - this.op = (unsigned char)(extra[work[sym]]); - this.val = base[work[sym]]; + here.op = (unsigned char)(extra[work[sym]]); + here.val = base[work[sym]]; } else { - this.op = (unsigned char)(32 + 64); /* end of block */ - this.val = 0; + here.op = (unsigned char)(32 + 64); /* end of block */ + here.val = 0; } /* replicate for those indices with low len bits equal to huff */ @@ -235,7 +235,7 @@ min = fill; /* save offset to next table */ do { fill -= incr; - next[(huff >> drop) + fill] = this; + next[(huff >> drop) + fill] = here; } while (fill != 0); /* backwards increment the len-bit code huff */ @@ -277,7 +277,8 @@ /* check for enough space */ used += 1U << curr; - if (type == LENS && used >= ENOUGH - MAXD) + if ((type == LENS && used > ENOUGH_LENS) || + (type == DISTS && used > ENOUGH_DISTS)) return 1; /* point entry in root table to sub-table */ @@ -288,38 +289,14 @@ } } - /* - Fill in rest of table for incomplete codes. This loop is similar to the - loop above in incrementing huff for table indices. It is assumed that - len is equal to curr + drop, so there is no loop needed to increment - through high index bits. When the current sub-table is filled, the loop - drops back to the root table to fill in any remaining entries there. - */ - this.op = (unsigned char)64; /* invalid code marker */ - this.bits = (unsigned char)(len - drop); - this.val = (unsigned short)0; - while (huff != 0) { - /* when done with sub-table, drop back to root table */ - if (drop != 0 && (huff & mask) != low) { - drop = 0; - len = root; - next = *table; - this.bits = (unsigned char)len; - } - - /* put invalid code marker in table */ - next[huff >> drop] = this; - - /* backwards increment the len-bit code huff */ - incr = 1U << (len - 1); - while (huff & incr) - incr >>= 1; - if (incr != 0) { - huff &= incr - 1; - huff += incr; - } - else - huff = 0; + /* fill in remaining table entry if code is incomplete (guaranteed to have + at most one remaining entry, since if the code is incomplete, the + maximum code length that was allowed to get this far is one bit) */ + if (huff != 0) { + here.op = (unsigned char)64; /* invalid code marker */ + here.bits = (unsigned char)(len - drop); + here.val = (unsigned short)0; + next[huff] = here; } /* set return parameters */ diff -Nru nodejs-0.11.13/deps/zlib/inftrees.h nodejs-0.11.15/deps/zlib/inftrees.h --- nodejs-0.11.13/deps/zlib/inftrees.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/inftrees.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* inftrees.h -- header to use inftrees.c - * Copyright (C) 1995-2005 Mark Adler + * Copyright (C) 1995-2005, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -35,21 +35,28 @@ 01000000 - invalid code */ -/* Maximum size of dynamic tree. The maximum found in a long but non- - exhaustive search was 1444 code structures (852 for length/literals - and 592 for distances, the latter actually the result of an - exhaustive search). The true maximum is not known, but the value - below is more than safe. */ -#define ENOUGH 2048 -#define MAXD 592 +/* Maximum size of the dynamic table. The maximum number of code structures is + 1444, which is the sum of 852 for literal/length codes and 592 for distance + codes. These values were found by exhaustive searches using the program + examples/enough.c found in the zlib distribtution. The arguments to that + program are the number of symbols, the initial root table size, and the + maximum bit length of a code. "enough 286 9 15" for literal/length codes + returns returns 852, and "enough 30 6 15" for distance codes returns 592. + The initial root table size (9 or 6) is found in the fifth argument of the + inflate_table() calls in inflate.c and infback.c. If the root table size is + changed, then these maximum sizes would be need to be recalculated and + updated. */ +#define ENOUGH_LENS 852 +#define ENOUGH_DISTS 592 +#define ENOUGH (ENOUGH_LENS+ENOUGH_DISTS) -/* Type of code to build for inftable() */ +/* Type of code to build for inflate_table() */ typedef enum { CODES, LENS, DISTS } codetype; -extern int inflate_table OF((codetype type, unsigned short FAR *lens, +int ZLIB_INTERNAL inflate_table OF((codetype type, unsigned short FAR *lens, unsigned codes, code FAR * FAR *table, unsigned FAR *bits, unsigned short FAR *work)); diff -Nru nodejs-0.11.13/deps/zlib/LICENSE nodejs-0.11.15/deps/zlib/LICENSE --- nodejs-0.11.13/deps/zlib/LICENSE 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -/* zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.4, March 14th, 2010 - - Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly - Mark Adler - -*/ diff -Nru nodejs-0.11.13/deps/zlib/Makefile nodejs-0.11.15/deps/zlib/Makefile --- nodejs-0.11.13/deps/zlib/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +all: + -@echo "Please use ./configure first. Thank you." + +distclean: + make -f Makefile.in distclean diff -Nru nodejs-0.11.13/deps/zlib/Makefile.in nodejs-0.11.15/deps/zlib/Makefile.in --- nodejs-0.11.13/deps/zlib/Makefile.in 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/Makefile.in 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,288 @@ +# Makefile for zlib +# Copyright (C) 1995-2013 Jean-loup Gailly, Mark Adler +# For conditions of distribution and use, see copyright notice in zlib.h + +# To compile and test, type: +# ./configure; make test +# Normally configure builds both a static and a shared library. +# If you want to build just a static library, use: ./configure --static + +# To use the asm code, type: +# cp contrib/asm?86/match.S ./match.S +# make LOC=-DASMV OBJA=match.o + +# To install /usr/local/lib/libz.* and /usr/local/include/zlib.h, type: +# make install +# To install in $HOME instead of /usr/local, use: +# make install prefix=$HOME + +CC=cc + +CFLAGS=-O +#CFLAGS=-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7 +#CFLAGS=-g -DDEBUG +#CFLAGS=-O3 -Wall -Wwrite-strings -Wpointer-arith -Wconversion \ +# -Wstrict-prototypes -Wmissing-prototypes + +SFLAGS=-O +LDFLAGS= +TEST_LDFLAGS=-L. libz.a +LDSHARED=$(CC) +CPP=$(CC) -E + +STATICLIB=libz.a +SHAREDLIB=libz.so +SHAREDLIBV=libz.so.1.2.8 +SHAREDLIBM=libz.so.1 +LIBS=$(STATICLIB) $(SHAREDLIBV) + +AR=ar +ARFLAGS=rc +RANLIB=ranlib +LDCONFIG=ldconfig +LDSHAREDLIBC=-lc +TAR=tar +SHELL=/bin/sh +EXE= + +prefix = /usr/local +exec_prefix = ${prefix} +libdir = ${exec_prefix}/lib +sharedlibdir = ${libdir} +includedir = ${prefix}/include +mandir = ${prefix}/share/man +man3dir = ${mandir}/man3 +pkgconfigdir = ${libdir}/pkgconfig + +OBJZ = adler32.o crc32.o deflate.o infback.o inffast.o inflate.o inftrees.o trees.o zutil.o +OBJG = compress.o uncompr.o gzclose.o gzlib.o gzread.o gzwrite.o +OBJC = $(OBJZ) $(OBJG) + +PIC_OBJZ = adler32.lo crc32.lo deflate.lo infback.lo inffast.lo inflate.lo inftrees.lo trees.lo zutil.lo +PIC_OBJG = compress.lo uncompr.lo gzclose.lo gzlib.lo gzread.lo gzwrite.lo +PIC_OBJC = $(PIC_OBJZ) $(PIC_OBJG) + +# to use the asm code: make OBJA=match.o, PIC_OBJA=match.lo +OBJA = +PIC_OBJA = + +OBJS = $(OBJC) $(OBJA) + +PIC_OBJS = $(PIC_OBJC) $(PIC_OBJA) + +all: static shared + +static: example$(EXE) minigzip$(EXE) + +shared: examplesh$(EXE) minigzipsh$(EXE) + +all64: example64$(EXE) minigzip64$(EXE) + +check: test + +test: all teststatic testshared + +teststatic: static + @TMPST=tmpst_$$; \ + if echo hello world | ./minigzip | ./minigzip -d && ./example $$TMPST ; then \ + echo ' *** zlib test OK ***'; \ + else \ + echo ' *** zlib test FAILED ***'; false; \ + fi; \ + rm -f $$TMPST + +testshared: shared + @LD_LIBRARY_PATH=`pwd`:$(LD_LIBRARY_PATH) ; export LD_LIBRARY_PATH; \ + LD_LIBRARYN32_PATH=`pwd`:$(LD_LIBRARYN32_PATH) ; export LD_LIBRARYN32_PATH; \ + DYLD_LIBRARY_PATH=`pwd`:$(DYLD_LIBRARY_PATH) ; export DYLD_LIBRARY_PATH; \ + SHLIB_PATH=`pwd`:$(SHLIB_PATH) ; export SHLIB_PATH; \ + TMPSH=tmpsh_$$; \ + if echo hello world | ./minigzipsh | ./minigzipsh -d && ./examplesh $$TMPSH; then \ + echo ' *** zlib shared test OK ***'; \ + else \ + echo ' *** zlib shared test FAILED ***'; false; \ + fi; \ + rm -f $$TMPSH + +test64: all64 + @TMP64=tmp64_$$; \ + if echo hello world | ./minigzip64 | ./minigzip64 -d && ./example64 $$TMP64; then \ + echo ' *** zlib 64-bit test OK ***'; \ + else \ + echo ' *** zlib 64-bit test FAILED ***'; false; \ + fi; \ + rm -f $$TMP64 + +infcover.o: test/infcover.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -c -o $@ test/infcover.c + +infcover: infcover.o libz.a + $(CC) $(CFLAGS) -o $@ infcover.o libz.a + +cover: infcover + rm -f *.gcda + ./infcover + gcov inf*.c + +libz.a: $(OBJS) + $(AR) $(ARFLAGS) $@ $(OBJS) + -@ ($(RANLIB) $@ || true) >/dev/null 2>&1 + +match.o: match.S + $(CPP) match.S > _match.s + $(CC) -c _match.s + mv _match.o match.o + rm -f _match.s + +match.lo: match.S + $(CPP) match.S > _match.s + $(CC) -c -fPIC _match.s + mv _match.o match.lo + rm -f _match.s + +example.o: test/example.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -c -o $@ test/example.c + +minigzip.o: test/minigzip.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -c -o $@ test/minigzip.c + +example64.o: test/example.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -D_FILE_OFFSET_BITS=64 -c -o $@ test/example.c + +minigzip64.o: test/minigzip.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -D_FILE_OFFSET_BITS=64 -c -o $@ test/minigzip.c + +.SUFFIXES: .lo + +.c.lo: + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) -DPIC -c -o objs/$*.o $< + -@mv objs/$*.o $@ + +placebo $(SHAREDLIBV): $(PIC_OBJS) libz.a + $(LDSHARED) $(SFLAGS) -o $@ $(PIC_OBJS) $(LDSHAREDLIBC) $(LDFLAGS) + rm -f $(SHAREDLIB) $(SHAREDLIBM) + ln -s $@ $(SHAREDLIB) + ln -s $@ $(SHAREDLIBM) + -@rmdir objs + +example$(EXE): example.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ example.o $(TEST_LDFLAGS) + +minigzip$(EXE): minigzip.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ minigzip.o $(TEST_LDFLAGS) + +examplesh$(EXE): example.o $(SHAREDLIBV) + $(CC) $(CFLAGS) -o $@ example.o -L. $(SHAREDLIBV) + +minigzipsh$(EXE): minigzip.o $(SHAREDLIBV) + $(CC) $(CFLAGS) -o $@ minigzip.o -L. $(SHAREDLIBV) + +example64$(EXE): example64.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ example64.o $(TEST_LDFLAGS) + +minigzip64$(EXE): minigzip64.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ minigzip64.o $(TEST_LDFLAGS) + +install-libs: $(LIBS) + -@if [ ! -d $(DESTDIR)$(exec_prefix) ]; then mkdir -p $(DESTDIR)$(exec_prefix); fi + -@if [ ! -d $(DESTDIR)$(libdir) ]; then mkdir -p $(DESTDIR)$(libdir); fi + -@if [ ! -d $(DESTDIR)$(sharedlibdir) ]; then mkdir -p $(DESTDIR)$(sharedlibdir); fi + -@if [ ! -d $(DESTDIR)$(man3dir) ]; then mkdir -p $(DESTDIR)$(man3dir); fi + -@if [ ! -d $(DESTDIR)$(pkgconfigdir) ]; then mkdir -p $(DESTDIR)$(pkgconfigdir); fi + cp $(STATICLIB) $(DESTDIR)$(libdir) + chmod 644 $(DESTDIR)$(libdir)/$(STATICLIB) + -@($(RANLIB) $(DESTDIR)$(libdir)/libz.a || true) >/dev/null 2>&1 + -@if test -n "$(SHAREDLIBV)"; then \ + cp $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir); \ + echo "cp $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir)"; \ + chmod 755 $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBV); \ + echo "chmod 755 $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBV)"; \ + rm -f $(DESTDIR)$(sharedlibdir)/$(SHAREDLIB) $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBM); \ + ln -s $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir)/$(SHAREDLIB); \ + ln -s $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBM); \ + ($(LDCONFIG) || true) >/dev/null 2>&1; \ + fi + cp zlib.3 $(DESTDIR)$(man3dir) + chmod 644 $(DESTDIR)$(man3dir)/zlib.3 + cp zlib.pc $(DESTDIR)$(pkgconfigdir) + chmod 644 $(DESTDIR)$(pkgconfigdir)/zlib.pc +# The ranlib in install is needed on NeXTSTEP which checks file times +# ldconfig is for Linux + +install: install-libs + -@if [ ! -d $(DESTDIR)$(includedir) ]; then mkdir -p $(DESTDIR)$(includedir); fi + cp zlib.h zconf.h $(DESTDIR)$(includedir) + chmod 644 $(DESTDIR)$(includedir)/zlib.h $(DESTDIR)$(includedir)/zconf.h + +uninstall: + cd $(DESTDIR)$(includedir) && rm -f zlib.h zconf.h + cd $(DESTDIR)$(libdir) && rm -f libz.a; \ + if test -n "$(SHAREDLIBV)" -a -f $(SHAREDLIBV); then \ + rm -f $(SHAREDLIBV) $(SHAREDLIB) $(SHAREDLIBM); \ + fi + cd $(DESTDIR)$(man3dir) && rm -f zlib.3 + cd $(DESTDIR)$(pkgconfigdir) && rm -f zlib.pc + +docs: zlib.3.pdf + +zlib.3.pdf: zlib.3 + groff -mandoc -f H -T ps zlib.3 | ps2pdf - zlib.3.pdf + +zconf.h.cmakein: zconf.h.in + -@ TEMPFILE=zconfh_$$; \ + echo "/#define ZCONF_H/ a\\\\\n#cmakedefine Z_PREFIX\\\\\n#cmakedefine Z_HAVE_UNISTD_H\n" >> $$TEMPFILE &&\ + sed -f $$TEMPFILE zconf.h.in > zconf.h.cmakein &&\ + touch -r zconf.h.in zconf.h.cmakein &&\ + rm $$TEMPFILE + +zconf: zconf.h.in + cp -p zconf.h.in zconf.h + +mostlyclean: clean +clean: + rm -f *.o *.lo *~ \ + example$(EXE) minigzip$(EXE) examplesh$(EXE) minigzipsh$(EXE) \ + example64$(EXE) minigzip64$(EXE) \ + infcover \ + libz.* foo.gz so_locations \ + _match.s maketree contrib/infback9/*.o + rm -rf objs + rm -f *.gcda *.gcno *.gcov + rm -f contrib/infback9/*.gcda contrib/infback9/*.gcno contrib/infback9/*.gcov + +maintainer-clean: distclean +distclean: clean zconf zconf.h.cmakein docs + rm -f Makefile zlib.pc configure.log + -@rm -f .DS_Store + -@printf 'all:\n\t-@echo "Please use ./configure first. Thank you."\n' > Makefile + -@printf '\ndistclean:\n\tmake -f Makefile.in distclean\n' >> Makefile + -@touch -r Makefile.in Makefile + +tags: + etags *.[ch] + +depend: + makedepend -- $(CFLAGS) -- *.[ch] + +# DO NOT DELETE THIS LINE -- make depend depends on it. + +adler32.o zutil.o: zutil.h zlib.h zconf.h +gzclose.o gzlib.o gzread.o gzwrite.o: zlib.h zconf.h gzguts.h +compress.o example.o minigzip.o uncompr.o: zlib.h zconf.h +crc32.o: zutil.h zlib.h zconf.h crc32.h +deflate.o: deflate.h zutil.h zlib.h zconf.h +infback.o inflate.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h inffixed.h +inffast.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +inftrees.o: zutil.h zlib.h zconf.h inftrees.h +trees.o: deflate.h zutil.h zlib.h zconf.h trees.h + +adler32.lo zutil.lo: zutil.h zlib.h zconf.h +gzclose.lo gzlib.lo gzread.lo gzwrite.lo: zlib.h zconf.h gzguts.h +compress.lo example.lo minigzip.lo uncompr.lo: zlib.h zconf.h +crc32.lo: zutil.h zlib.h zconf.h crc32.h +deflate.lo: deflate.h zutil.h zlib.h zconf.h +infback.lo inflate.lo: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h inffixed.h +inffast.lo: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +inftrees.lo: zutil.h zlib.h zconf.h inftrees.h +trees.lo: deflate.h zutil.h zlib.h zconf.h trees.h diff -Nru nodejs-0.11.13/deps/zlib/make_vms.com nodejs-0.11.15/deps/zlib/make_vms.com --- nodejs-0.11.13/deps/zlib/make_vms.com 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/make_vms.com 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,867 @@ +$! make libz under VMS written by +$! Martin P.J. Zinser +$! +$! In case of problems with the install you might contact me at +$! zinser@zinser.no-ip.info(preferred) or +$! martin.zinser@eurexchange.com (work) +$! +$! Make procedure history for Zlib +$! +$!------------------------------------------------------------------------------ +$! Version history +$! 0.01 20060120 First version to receive a number +$! 0.02 20061008 Adapt to new Makefile.in +$! 0.03 20091224 Add support for large file check +$! 0.04 20100110 Add new gzclose, gzlib, gzread, gzwrite +$! 0.05 20100221 Exchange zlibdefs.h by zconf.h.in +$! 0.06 20120111 Fix missing amiss_err, update zconf_h.in, fix new exmples +$! subdir path, update module search in makefile.in +$! 0.07 20120115 Triggered by work done by Alexey Chupahin completly redesigned +$! shared image creation +$! 0.08 20120219 Make it work on VAX again, pre-load missing symbols to shared +$! image +$! 0.09 20120305 SMS. P1 sets builder ("MMK", "MMS", " " (built-in)). +$! "" -> automatic, preference: MMK, MMS, built-in. +$! +$ on error then goto err_exit +$! +$ true = 1 +$ false = 0 +$ tmpnam = "temp_" + f$getjpi("","pid") +$ tt = tmpnam + ".txt" +$ tc = tmpnam + ".c" +$ th = tmpnam + ".h" +$ define/nolog tconfig 'th' +$ its_decc = false +$ its_vaxc = false +$ its_gnuc = false +$ s_case = False +$! +$! Setup variables holding "config" information +$! +$ Make = "''p1'" +$ name = "Zlib" +$ version = "?.?.?" +$ v_string = "ZLIB_VERSION" +$ v_file = "zlib.h" +$ ccopt = "/include = []" +$ lopts = "" +$ dnsrl = "" +$ aconf_in_file = "zconf.h.in#zconf.h_in#zconf_h.in" +$ conf_check_string = "" +$ linkonly = false +$ optfile = name + ".opt" +$ mapfile = name + ".map" +$ libdefs = "" +$ vax = f$getsyi("HW_MODEL").lt.1024 +$ axp = f$getsyi("HW_MODEL").ge.1024 .and. f$getsyi("HW_MODEL").lt.4096 +$ ia64 = f$getsyi("HW_MODEL").ge.4096 +$! +$! 2012-03-05 SMS. +$! Why is this needed? And if it is needed, why not simply ".not. vax"? +$! +$!!! if axp .or. ia64 then set proc/parse=extended +$! +$ whoami = f$parse(f$environment("Procedure"),,,,"NO_CONCEAL") +$ mydef = F$parse(whoami,,,"DEVICE") +$ mydir = f$parse(whoami,,,"DIRECTORY") - "][" +$ myproc = f$parse(whoami,,,"Name") + f$parse(whoami,,,"type") +$! +$! Check for MMK/MMS +$! +$ if (Make .eqs. "") +$ then +$ If F$Search ("Sys$System:MMS.EXE") .nes. "" Then Make = "MMS" +$ If F$Type (MMK) .eqs. "STRING" Then Make = "MMK" +$ else +$ Make = f$edit( Make, "trim") +$ endif +$! +$ gosub find_version +$! +$ open/write topt tmp.opt +$ open/write optf 'optfile' +$! +$ gosub check_opts +$! +$! Look for the compiler used +$! +$ gosub check_compiler +$ close topt +$ close optf +$! +$ if its_decc +$ then +$ ccopt = "/prefix=all" + ccopt +$ if f$trnlnm("SYS") .eqs. "" +$ then +$ if axp +$ then +$ define sys sys$library: +$ else +$ ccopt = "/decc" + ccopt +$ define sys decc$library_include: +$ endif +$ endif +$! +$! 2012-03-05 SMS. +$! Why /NAMES = AS_IS? Why not simply ".not. vax"? And why not on VAX? +$! +$ if axp .or. ia64 +$ then +$ ccopt = ccopt + "/name=as_is/opt=(inline=speed)" +$ s_case = true +$ endif +$ endif +$ if its_vaxc .or. its_gnuc +$ then +$ if f$trnlnm("SYS").eqs."" then define sys sys$library: +$ endif +$! +$! Build a fake configure input header +$! +$ open/write conf_hin config.hin +$ write conf_hin "#undef _LARGEFILE64_SOURCE" +$ close conf_hin +$! +$! +$ i = 0 +$FIND_ACONF: +$ fname = f$element(i,"#",aconf_in_file) +$ if fname .eqs. "#" then goto AMISS_ERR +$ if f$search(fname) .eqs. "" +$ then +$ i = i + 1 +$ goto find_aconf +$ endif +$ open/read/err=aconf_err aconf_in 'fname' +$ open/write aconf zconf.h +$ACONF_LOOP: +$ read/end_of_file=aconf_exit aconf_in line +$ work = f$edit(line, "compress,trim") +$ if f$extract(0,6,work) .nes. "#undef" +$ then +$ if f$extract(0,12,work) .nes. "#cmakedefine" +$ then +$ write aconf line +$ endif +$ else +$ cdef = f$element(1," ",work) +$ gosub check_config +$ endif +$ goto aconf_loop +$ACONF_EXIT: +$ write aconf "" +$ write aconf "/* VMS specifics added by make_vms.com: */" +$ write aconf "#define VMS 1" +$ write aconf "#include " +$ write aconf "#include " +$ write aconf "#ifdef _LARGEFILE" +$ write aconf "# define off64_t __off64_t" +$ write aconf "# define fopen64 fopen" +$ write aconf "# define fseeko64 fseeko" +$ write aconf "# define lseek64 lseek" +$ write aconf "# define ftello64 ftell" +$ write aconf "#endif" +$ write aconf "#if !defined( __VAX) && (__CRTL_VER >= 70312000)" +$ write aconf "# define HAVE_VSNPRINTF" +$ write aconf "#endif" +$ close aconf_in +$ close aconf +$ if f$search("''th'") .nes. "" then delete 'th';* +$! Build the thing plain or with mms +$! +$ write sys$output "Compiling Zlib sources ..." +$ if make.eqs."" +$ then +$ if (f$search( "example.obj;*") .nes. "") then delete example.obj;* +$ if (f$search( "minigzip.obj;*") .nes. "") then delete minigzip.obj;* +$ CALL MAKE adler32.OBJ "CC ''CCOPT' adler32" - + adler32.c zlib.h zconf.h +$ CALL MAKE compress.OBJ "CC ''CCOPT' compress" - + compress.c zlib.h zconf.h +$ CALL MAKE crc32.OBJ "CC ''CCOPT' crc32" - + crc32.c zlib.h zconf.h +$ CALL MAKE deflate.OBJ "CC ''CCOPT' deflate" - + deflate.c deflate.h zutil.h zlib.h zconf.h +$ CALL MAKE gzclose.OBJ "CC ''CCOPT' gzclose" - + gzclose.c zutil.h zlib.h zconf.h +$ CALL MAKE gzlib.OBJ "CC ''CCOPT' gzlib" - + gzlib.c zutil.h zlib.h zconf.h +$ CALL MAKE gzread.OBJ "CC ''CCOPT' gzread" - + gzread.c zutil.h zlib.h zconf.h +$ CALL MAKE gzwrite.OBJ "CC ''CCOPT' gzwrite" - + gzwrite.c zutil.h zlib.h zconf.h +$ CALL MAKE infback.OBJ "CC ''CCOPT' infback" - + infback.c zutil.h inftrees.h inflate.h inffast.h inffixed.h +$ CALL MAKE inffast.OBJ "CC ''CCOPT' inffast" - + inffast.c zutil.h zlib.h zconf.h inffast.h +$ CALL MAKE inflate.OBJ "CC ''CCOPT' inflate" - + inflate.c zutil.h zlib.h zconf.h infblock.h +$ CALL MAKE inftrees.OBJ "CC ''CCOPT' inftrees" - + inftrees.c zutil.h zlib.h zconf.h inftrees.h +$ CALL MAKE trees.OBJ "CC ''CCOPT' trees" - + trees.c deflate.h zutil.h zlib.h zconf.h +$ CALL MAKE uncompr.OBJ "CC ''CCOPT' uncompr" - + uncompr.c zlib.h zconf.h +$ CALL MAKE zutil.OBJ "CC ''CCOPT' zutil" - + zutil.c zutil.h zlib.h zconf.h +$ write sys$output "Building Zlib ..." +$ CALL MAKE libz.OLB "lib/crea libz.olb *.obj" *.OBJ +$ write sys$output "Building example..." +$ CALL MAKE example.OBJ "CC ''CCOPT' [.test]example" - + [.test]example.c zlib.h zconf.h +$ call make example.exe "LINK example,libz.olb/lib" example.obj libz.olb +$ write sys$output "Building minigzip..." +$ CALL MAKE minigzip.OBJ "CC ''CCOPT' [.test]minigzip" - + [.test]minigzip.c zlib.h zconf.h +$ call make minigzip.exe - + "LINK minigzip,libz.olb/lib" - + minigzip.obj libz.olb +$ else +$ gosub crea_mms +$ write sys$output "Make ''name' ''version' with ''Make' " +$ 'make' +$ endif +$! +$! Create shareable image +$! +$ gosub crea_olist +$ write sys$output "Creating libzshr.exe" +$ call map_2_shopt 'mapfile' 'optfile' +$ LINK_'lopts'/SHARE=libzshr.exe modules.opt/opt,'optfile'/opt +$ write sys$output "Zlib build completed" +$ delete/nolog tmp.opt;* +$ exit +$AMISS_ERR: +$ write sys$output "No source for config.hin found." +$ write sys$output "Tried any of ''aconf_in_file'" +$ goto err_exit +$CC_ERR: +$ write sys$output "C compiler required to build ''name'" +$ goto err_exit +$ERR_EXIT: +$ set message/facil/ident/sever/text +$ close/nolog optf +$ close/nolog topt +$ close/nolog aconf_in +$ close/nolog aconf +$ close/nolog out +$ close/nolog min +$ close/nolog mod +$ close/nolog h_in +$ write sys$output "Exiting..." +$ exit 2 +$! +$! +$MAKE: SUBROUTINE !SUBROUTINE TO CHECK DEPENDENCIES +$ V = 'F$Verify(0) +$! P1 = What we are trying to make +$! P2 = Command to make it +$! P3 - P8 What it depends on +$ +$ If F$Search(P1) .Eqs. "" Then Goto Makeit +$ Time = F$CvTime(F$File(P1,"RDT")) +$arg=3 +$Loop: +$ Argument = P'arg +$ If Argument .Eqs. "" Then Goto Exit +$ El=0 +$Loop2: +$ File = F$Element(El," ",Argument) +$ If File .Eqs. " " Then Goto Endl +$ AFile = "" +$Loop3: +$ OFile = AFile +$ AFile = F$Search(File) +$ If AFile .Eqs. "" .Or. AFile .Eqs. OFile Then Goto NextEl +$ If F$CvTime(F$File(AFile,"RDT")) .Ges. Time Then Goto Makeit +$ Goto Loop3 +$NextEL: +$ El = El + 1 +$ Goto Loop2 +$EndL: +$ arg=arg+1 +$ If arg .Le. 8 Then Goto Loop +$ Goto Exit +$ +$Makeit: +$ VV=F$VERIFY(0) +$ write sys$output P2 +$ 'P2 +$ VV='F$Verify(VV) +$Exit: +$ If V Then Set Verify +$ENDSUBROUTINE +$!------------------------------------------------------------------------------ +$! +$! Check command line options and set symbols accordingly +$! +$!------------------------------------------------------------------------------ +$! Version history +$! 0.01 20041206 First version to receive a number +$! 0.02 20060126 Add new "HELP" target +$ CHECK_OPTS: +$ i = 1 +$ OPT_LOOP: +$ if i .lt. 9 +$ then +$ cparm = f$edit(p'i',"upcase") +$! +$! Check if parameter actually contains something +$! +$ if f$edit(cparm,"trim") .nes. "" +$ then +$ if cparm .eqs. "DEBUG" +$ then +$ ccopt = ccopt + "/noopt/deb" +$ lopts = lopts + "/deb" +$ endif +$ if f$locate("CCOPT=",cparm) .lt. f$length(cparm) +$ then +$ start = f$locate("=",cparm) + 1 +$ len = f$length(cparm) - start +$ ccopt = ccopt + f$extract(start,len,cparm) +$ if f$locate("AS_IS",f$edit(ccopt,"UPCASE")) .lt. f$length(ccopt) - + then s_case = true +$ endif +$ if cparm .eqs. "LINK" then linkonly = true +$ if f$locate("LOPTS=",cparm) .lt. f$length(cparm) +$ then +$ start = f$locate("=",cparm) + 1 +$ len = f$length(cparm) - start +$ lopts = lopts + f$extract(start,len,cparm) +$ endif +$ if f$locate("CC=",cparm) .lt. f$length(cparm) +$ then +$ start = f$locate("=",cparm) + 1 +$ len = f$length(cparm) - start +$ cc_com = f$extract(start,len,cparm) + if (cc_com .nes. "DECC") .and. - + (cc_com .nes. "VAXC") .and. - + (cc_com .nes. "GNUC") +$ then +$ write sys$output "Unsupported compiler choice ''cc_com' ignored" +$ write sys$output "Use DECC, VAXC, or GNUC instead" +$ else +$ if cc_com .eqs. "DECC" then its_decc = true +$ if cc_com .eqs. "VAXC" then its_vaxc = true +$ if cc_com .eqs. "GNUC" then its_gnuc = true +$ endif +$ endif +$ if f$locate("MAKE=",cparm) .lt. f$length(cparm) +$ then +$ start = f$locate("=",cparm) + 1 +$ len = f$length(cparm) - start +$ mmks = f$extract(start,len,cparm) +$ if (mmks .eqs. "MMK") .or. (mmks .eqs. "MMS") +$ then +$ make = mmks +$ else +$ write sys$output "Unsupported make choice ''mmks' ignored" +$ write sys$output "Use MMK or MMS instead" +$ endif +$ endif +$ if cparm .eqs. "HELP" then gosub bhelp +$ endif +$ i = i + 1 +$ goto opt_loop +$ endif +$ return +$!------------------------------------------------------------------------------ +$! +$! Look for the compiler used +$! +$! Version history +$! 0.01 20040223 First version to receive a number +$! 0.02 20040229 Save/set value of decc$no_rooted_search_lists +$! 0.03 20060202 Extend handling of GNU C +$! 0.04 20090402 Compaq -> hp +$CHECK_COMPILER: +$ if (.not. (its_decc .or. its_vaxc .or. its_gnuc)) +$ then +$ its_decc = (f$search("SYS$SYSTEM:DECC$COMPILER.EXE") .nes. "") +$ its_vaxc = .not. its_decc .and. (F$Search("SYS$System:VAXC.Exe") .nes. "") +$ its_gnuc = .not. (its_decc .or. its_vaxc) .and. (f$trnlnm("gnu_cc") .nes. "") +$ endif +$! +$! Exit if no compiler available +$! +$ if (.not. (its_decc .or. its_vaxc .or. its_gnuc)) +$ then goto CC_ERR +$ else +$ if its_decc +$ then +$ write sys$output "CC compiler check ... hp C" +$ if f$trnlnm("decc$no_rooted_search_lists") .nes. "" +$ then +$ dnrsl = f$trnlnm("decc$no_rooted_search_lists") +$ endif +$ define/nolog decc$no_rooted_search_lists 1 +$ else +$ if its_vaxc then write sys$output "CC compiler check ... VAX C" +$ if its_gnuc +$ then +$ write sys$output "CC compiler check ... GNU C" +$ if f$trnlnm(topt) then write topt "gnu_cc:[000000]gcclib.olb/lib" +$ if f$trnlnm(optf) then write optf "gnu_cc:[000000]gcclib.olb/lib" +$ cc = "gcc" +$ endif +$ if f$trnlnm(topt) then write topt "sys$share:vaxcrtl.exe/share" +$ if f$trnlnm(optf) then write optf "sys$share:vaxcrtl.exe/share" +$ endif +$ endif +$ return +$!------------------------------------------------------------------------------ +$! +$! If MMS/MMK are available dump out the descrip.mms if required +$! +$CREA_MMS: +$ write sys$output "Creating descrip.mms..." +$ create descrip.mms +$ open/append out descrip.mms +$ copy sys$input: out +$ deck +# descrip.mms: MMS description file for building zlib on VMS +# written by Martin P.J. Zinser +# + +OBJS = adler32.obj, compress.obj, crc32.obj, gzclose.obj, gzlib.obj\ + gzread.obj, gzwrite.obj, uncompr.obj, infback.obj\ + deflate.obj, trees.obj, zutil.obj, inflate.obj, \ + inftrees.obj, inffast.obj + +$ eod +$ write out "CFLAGS=", ccopt +$ write out "LOPTS=", lopts +$ write out "all : example.exe minigzip.exe libz.olb" +$ copy sys$input: out +$ deck + @ write sys$output " Example applications available" + +libz.olb : libz.olb($(OBJS)) + @ write sys$output " libz available" + +example.exe : example.obj libz.olb + link $(LOPTS) example,libz.olb/lib + +minigzip.exe : minigzip.obj libz.olb + link $(LOPTS) minigzip,libz.olb/lib + +clean : + delete *.obj;*,libz.olb;*,*.opt;*,*.exe;* + + +# Other dependencies. +adler32.obj : adler32.c zutil.h zlib.h zconf.h +compress.obj : compress.c zlib.h zconf.h +crc32.obj : crc32.c zutil.h zlib.h zconf.h +deflate.obj : deflate.c deflate.h zutil.h zlib.h zconf.h +example.obj : [.test]example.c zlib.h zconf.h +gzclose.obj : gzclose.c zutil.h zlib.h zconf.h +gzlib.obj : gzlib.c zutil.h zlib.h zconf.h +gzread.obj : gzread.c zutil.h zlib.h zconf.h +gzwrite.obj : gzwrite.c zutil.h zlib.h zconf.h +inffast.obj : inffast.c zutil.h zlib.h zconf.h inftrees.h inffast.h +inflate.obj : inflate.c zutil.h zlib.h zconf.h +inftrees.obj : inftrees.c zutil.h zlib.h zconf.h inftrees.h +minigzip.obj : [.test]minigzip.c zlib.h zconf.h +trees.obj : trees.c deflate.h zutil.h zlib.h zconf.h +uncompr.obj : uncompr.c zlib.h zconf.h +zutil.obj : zutil.c zutil.h zlib.h zconf.h +infback.obj : infback.c zutil.h inftrees.h inflate.h inffast.h inffixed.h +$ eod +$ close out +$ return +$!------------------------------------------------------------------------------ +$! +$! Read list of core library sources from makefile.in and create options +$! needed to build shareable image +$! +$CREA_OLIST: +$ open/read min makefile.in +$ open/write mod modules.opt +$ src_check_list = "OBJZ =#OBJG =" +$MRLOOP: +$ read/end=mrdone min rec +$ i = 0 +$SRC_CHECK_LOOP: +$ src_check = f$element(i, "#", src_check_list) +$ i = i+1 +$ if src_check .eqs. "#" then goto mrloop +$ if (f$extract(0,6,rec) .nes. src_check) then goto src_check_loop +$ rec = rec - src_check +$ gosub extra_filnam +$ if (f$element(1,"\",rec) .eqs. "\") then goto mrloop +$MRSLOOP: +$ read/end=mrdone min rec +$ gosub extra_filnam +$ if (f$element(1,"\",rec) .nes. "\") then goto mrsloop +$MRDONE: +$ close min +$ close mod +$ return +$!------------------------------------------------------------------------------ +$! +$! Take record extracted in crea_olist and split it into single filenames +$! +$EXTRA_FILNAM: +$ myrec = f$edit(rec - "\", "trim,compress") +$ i = 0 +$FELOOP: +$ srcfil = f$element(i," ", myrec) +$ if (srcfil .nes. " ") +$ then +$ write mod f$parse(srcfil,,,"NAME"), ".obj" +$ i = i + 1 +$ goto feloop +$ endif +$ return +$!------------------------------------------------------------------------------ +$! +$! Find current Zlib version number +$! +$FIND_VERSION: +$ open/read h_in 'v_file' +$hloop: +$ read/end=hdone h_in rec +$ rec = f$edit(rec,"TRIM") +$ if (f$extract(0,1,rec) .nes. "#") then goto hloop +$ rec = f$edit(rec - "#", "TRIM") +$ if f$element(0," ",rec) .nes. "define" then goto hloop +$ if f$element(1," ",rec) .eqs. v_string +$ then +$ version = 'f$element(2," ",rec)' +$ goto hdone +$ endif +$ goto hloop +$hdone: +$ close h_in +$ return +$!------------------------------------------------------------------------------ +$! +$CHECK_CONFIG: +$! +$ in_ldef = f$locate(cdef,libdefs) +$ if (in_ldef .lt. f$length(libdefs)) +$ then +$ write aconf "#define ''cdef' 1" +$ libdefs = f$extract(0,in_ldef,libdefs) + - + f$extract(in_ldef + f$length(cdef) + 1, - + f$length(libdefs) - in_ldef - f$length(cdef) - 1, - + libdefs) +$ else +$ if (f$type('cdef') .eqs. "INTEGER") +$ then +$ write aconf "#define ''cdef' ", 'cdef' +$ else +$ if (f$type('cdef') .eqs. "STRING") +$ then +$ write aconf "#define ''cdef' ", """", '''cdef'', """" +$ else +$ gosub check_cc_def +$ endif +$ endif +$ endif +$ return +$!------------------------------------------------------------------------------ +$! +$! Check if this is a define relating to the properties of the C/C++ +$! compiler +$! +$ CHECK_CC_DEF: +$ if (cdef .eqs. "_LARGEFILE64_SOURCE") +$ then +$ copy sys$input: 'tc' +$ deck +#include "tconfig" +#define _LARGEFILE +#include + +int main(){ +FILE *fp; + fp = fopen("temp.txt","r"); + fseeko(fp,1,SEEK_SET); + fclose(fp); +} + +$ eod +$ test_inv = false +$ comm_h = false +$ gosub cc_prop_check +$ return +$ endif +$ write aconf "/* ", line, " */" +$ return +$!------------------------------------------------------------------------------ +$! +$! Check for properties of C/C++ compiler +$! +$! Version history +$! 0.01 20031020 First version to receive a number +$! 0.02 20031022 Added logic for defines with value +$! 0.03 20040309 Make sure local config file gets not deleted +$! 0.04 20041230 Also write include for configure run +$! 0.05 20050103 Add processing of "comment defines" +$CC_PROP_CHECK: +$ cc_prop = true +$ is_need = false +$ is_need = (f$extract(0,4,cdef) .eqs. "NEED") .or. (test_inv .eq. true) +$ if f$search(th) .eqs. "" then create 'th' +$ set message/nofac/noident/nosever/notext +$ on error then continue +$ cc 'tmpnam' +$ if .not. ($status) then cc_prop = false +$ on error then continue +$! The headers might lie about the capabilities of the RTL +$ link 'tmpnam',tmp.opt/opt +$ if .not. ($status) then cc_prop = false +$ set message/fac/ident/sever/text +$ on error then goto err_exit +$ delete/nolog 'tmpnam'.*;*/exclude='th' +$ if (cc_prop .and. .not. is_need) .or. - + (.not. cc_prop .and. is_need) +$ then +$ write sys$output "Checking for ''cdef'... yes" +$ if f$type('cdef_val'_yes) .nes. "" +$ then +$ if f$type('cdef_val'_yes) .eqs. "INTEGER" - + then call write_config f$fao("#define !AS !UL",cdef,'cdef_val'_yes) +$ if f$type('cdef_val'_yes) .eqs. "STRING" - + then call write_config f$fao("#define !AS !AS",cdef,'cdef_val'_yes) +$ else +$ call write_config f$fao("#define !AS 1",cdef) +$ endif +$ if (cdef .eqs. "HAVE_FSEEKO") .or. (cdef .eqs. "_LARGE_FILES") .or. - + (cdef .eqs. "_LARGEFILE64_SOURCE") then - + call write_config f$string("#define _LARGEFILE 1") +$ else +$ write sys$output "Checking for ''cdef'... no" +$ if (comm_h) +$ then + call write_config f$fao("/* !AS */",line) +$ else +$ if f$type('cdef_val'_no) .nes. "" +$ then +$ if f$type('cdef_val'_no) .eqs. "INTEGER" - + then call write_config f$fao("#define !AS !UL",cdef,'cdef_val'_no) +$ if f$type('cdef_val'_no) .eqs. "STRING" - + then call write_config f$fao("#define !AS !AS",cdef,'cdef_val'_no) +$ else +$ call write_config f$fao("#undef !AS",cdef) +$ endif +$ endif +$ endif +$ return +$!------------------------------------------------------------------------------ +$! +$! Check for properties of C/C++ compiler with multiple result values +$! +$! Version history +$! 0.01 20040127 First version +$! 0.02 20050103 Reconcile changes from cc_prop up to version 0.05 +$CC_MPROP_CHECK: +$ cc_prop = true +$ i = 1 +$ idel = 1 +$ MT_LOOP: +$ if f$type(result_'i') .eqs. "STRING" +$ then +$ set message/nofac/noident/nosever/notext +$ on error then continue +$ cc 'tmpnam'_'i' +$ if .not. ($status) then cc_prop = false +$ on error then continue +$! The headers might lie about the capabilities of the RTL +$ link 'tmpnam'_'i',tmp.opt/opt +$ if .not. ($status) then cc_prop = false +$ set message/fac/ident/sever/text +$ on error then goto err_exit +$ delete/nolog 'tmpnam'_'i'.*;* +$ if (cc_prop) +$ then +$ write sys$output "Checking for ''cdef'... ", mdef_'i' +$ if f$type(mdef_'i') .eqs. "INTEGER" - + then call write_config f$fao("#define !AS !UL",cdef,mdef_'i') +$ if f$type('cdef_val'_yes) .eqs. "STRING" - + then call write_config f$fao("#define !AS !AS",cdef,mdef_'i') +$ goto msym_clean +$ else +$ i = i + 1 +$ goto mt_loop +$ endif +$ endif +$ write sys$output "Checking for ''cdef'... no" +$ call write_config f$fao("#undef !AS",cdef) +$ MSYM_CLEAN: +$ if (idel .le. msym_max) +$ then +$ delete/sym mdef_'idel' +$ idel = idel + 1 +$ goto msym_clean +$ endif +$ return +$!------------------------------------------------------------------------------ +$! +$! Write configuration to both permanent and temporary config file +$! +$! Version history +$! 0.01 20031029 First version to receive a number +$! +$WRITE_CONFIG: SUBROUTINE +$ write aconf 'p1' +$ open/append confh 'th' +$ write confh 'p1' +$ close confh +$ENDSUBROUTINE +$!------------------------------------------------------------------------------ +$! +$! Analyze the project map file and create the symbol vector for a shareable +$! image from it +$! +$! Version history +$! 0.01 20120128 First version +$! 0.02 20120226 Add pre-load logic +$! +$ MAP_2_SHOPT: Subroutine +$! +$ SAY := "WRITE_ SYS$OUTPUT" +$! +$ IF F$SEARCH("''P1'") .EQS. "" +$ THEN +$ SAY "MAP_2_SHOPT-E-NOSUCHFILE: Error, inputfile ''p1' not available" +$ goto exit_m2s +$ ENDIF +$ IF "''P2'" .EQS. "" +$ THEN +$ SAY "MAP_2_SHOPT: Error, no output file provided" +$ goto exit_m2s +$ ENDIF +$! +$ module1 = "deflate#deflateEnd#deflateInit_#deflateParams#deflateSetDictionary" +$ module2 = "gzclose#gzerror#gzgetc#gzgets#gzopen#gzprintf#gzputc#gzputs#gzread" +$ module3 = "gzseek#gztell#inflate#inflateEnd#inflateInit_#inflateSetDictionary" +$ module4 = "inflateSync#uncompress#zlibVersion#compress" +$ open/read map 'p1 +$ if axp .or. ia64 +$ then +$ open/write aopt a.opt +$ open/write bopt b.opt +$ write aopt " CASE_SENSITIVE=YES" +$ write bopt "SYMBOL_VECTOR= (-" +$ mod_sym_num = 1 +$ MOD_SYM_LOOP: +$ if f$type(module'mod_sym_num') .nes. "" +$ then +$ mod_in = 0 +$ MOD_SYM_IN: +$ shared_proc = f$element(mod_in, "#", module'mod_sym_num') +$ if shared_proc .nes. "#" +$ then +$ write aopt f$fao(" symbol_vector=(!AS/!AS=PROCEDURE)",- + f$edit(shared_proc,"upcase"),shared_proc) +$ write bopt f$fao("!AS=PROCEDURE,-",shared_proc) +$ mod_in = mod_in + 1 +$ goto mod_sym_in +$ endif +$ mod_sym_num = mod_sym_num + 1 +$ goto mod_sym_loop +$ endif +$MAP_LOOP: +$ read/end=map_end map line +$ if (f$locate("{",line).lt. f$length(line)) .or. - + (f$locate("global:", line) .lt. f$length(line)) +$ then +$ proc = true +$ goto map_loop +$ endif +$ if f$locate("}",line).lt. f$length(line) then proc = false +$ if f$locate("local:", line) .lt. f$length(line) then proc = false +$ if proc +$ then +$ shared_proc = f$edit(line,"collapse") +$ chop_semi = f$locate(";", shared_proc) +$ if chop_semi .lt. f$length(shared_proc) then - + shared_proc = f$extract(0, chop_semi, shared_proc) +$ write aopt f$fao(" symbol_vector=(!AS/!AS=PROCEDURE)",- + f$edit(shared_proc,"upcase"),shared_proc) +$ write bopt f$fao("!AS=PROCEDURE,-",shared_proc) +$ endif +$ goto map_loop +$MAP_END: +$ close/nolog aopt +$ close/nolog bopt +$ open/append libopt 'p2' +$ open/read aopt a.opt +$ open/read bopt b.opt +$ALOOP: +$ read/end=aloop_end aopt line +$ write libopt line +$ goto aloop +$ALOOP_END: +$ close/nolog aopt +$ sv = "" +$BLOOP: +$ read/end=bloop_end bopt svn +$ if (svn.nes."") +$ then +$ if (sv.nes."") then write libopt sv +$ sv = svn +$ endif +$ goto bloop +$BLOOP_END: +$ write libopt f$extract(0,f$length(sv)-2,sv), "-" +$ write libopt ")" +$ close/nolog bopt +$ delete/nolog/noconf a.opt;*,b.opt;* +$ else +$ if vax +$ then +$ open/append libopt 'p2' +$ mod_sym_num = 1 +$ VMOD_SYM_LOOP: +$ if f$type(module'mod_sym_num') .nes. "" +$ then +$ mod_in = 0 +$ VMOD_SYM_IN: +$ shared_proc = f$element(mod_in, "#", module'mod_sym_num') +$ if shared_proc .nes. "#" +$ then +$ write libopt f$fao("UNIVERSAL=!AS",- + f$edit(shared_proc,"upcase")) +$ mod_in = mod_in + 1 +$ goto vmod_sym_in +$ endif +$ mod_sym_num = mod_sym_num + 1 +$ goto vmod_sym_loop +$ endif +$VMAP_LOOP: +$ read/end=vmap_end map line +$ if (f$locate("{",line).lt. f$length(line)) .or. - + (f$locate("global:", line) .lt. f$length(line)) +$ then +$ proc = true +$ goto vmap_loop +$ endif +$ if f$locate("}",line).lt. f$length(line) then proc = false +$ if f$locate("local:", line) .lt. f$length(line) then proc = false +$ if proc +$ then +$ shared_proc = f$edit(line,"collapse") +$ chop_semi = f$locate(";", shared_proc) +$ if chop_semi .lt. f$length(shared_proc) then - + shared_proc = f$extract(0, chop_semi, shared_proc) +$ write libopt f$fao("UNIVERSAL=!AS",- + f$edit(shared_proc,"upcase")) +$ endif +$ goto vmap_loop +$VMAP_END: +$ else +$ write sys$output "Unknown Architecture (Not VAX, AXP, or IA64)" +$ write sys$output "No options file created" +$ endif +$ endif +$ EXIT_M2S: +$ close/nolog map +$ close/nolog libopt +$ endsubroutine diff -Nru nodejs-0.11.13/deps/zlib/mozzconf.h nodejs-0.11.15/deps/zlib/mozzconf.h --- nodejs-0.11.13/deps/zlib/mozzconf.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/mozzconf.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ -/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ -/* ***** BEGIN LICENSE BLOCK ***** - * Version: MPL 1.1/GPL 2.0/LGPL 2.1 - * - * The contents of this file are subject to the Mozilla Public License Version - * 1.1 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * http://www.mozilla.org/MPL/ - * - * Software distributed under the License is distributed on an "AS IS" basis, - * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License - * for the specific language governing rights and limitations under the - * License. - * - * The Original Code is the mozilla zlib configuration. - * - * The Initial Developer of the Original Code is IBM Corporation. - * Portions created by the Initial Developer are Copyright (C) 2004 - * the Initial Developer. All Rights Reserved. - * - * Contributor(s): - * - * Alternatively, the contents of this file may be used under the terms of - * either of the GNU General Public License Version 2 or later (the "GPL"), - * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), - * in which case the provisions of the GPL or the LGPL are applicable instead - * of those above. If you wish to allow use of your version of this file only - * under the terms of either the GPL or the LGPL, and not to allow others to - * use your version of this file under the terms of the MPL, indicate your - * decision by deleting the provisions above and replace them with the notice - * and other provisions required by the GPL or the LGPL. If you do not delete - * the provisions above, a recipient may use your version of this file under - * the terms of any one of the MPL, the GPL or the LGPL. - * - * ***** END LICENSE BLOCK ***** */ - -#ifndef MOZZCONF_H -#define MOZZCONF_H - -#if defined(XP_WIN) && defined(ZLIB_DLL) && !defined(MOZ_ENABLE_LIBXUL) -#undef ZLIB_DLL -#endif - -#ifdef HAVE_VISIBILITY_ATTRIBUTE -#define ZEXTERN __attribute__((visibility ("default"))) extern -#endif - -/* Exported Symbols */ -#define zlibVersion MOZ_Z_zlibVersion -#define deflate MOZ_Z_deflate -#define deflateEnd MOZ_Z_deflateEnd -#define inflate MOZ_Z_inflate -#define inflateEnd MOZ_Z_inflateEnd -#define deflateSetDictionary MOZ_Z_deflateSetDictionary -#define deflateCopy MOZ_Z_deflateCopy -#define deflateReset MOZ_Z_deflateReset -#define deflateParams MOZ_Z_deflateParams -#define deflateBound MOZ_Z_deflateBound -#define deflatePrime MOZ_Z_deflatePrime -#define inflateSetDictionary MOZ_Z_inflateSetDictionary -#define inflateSync MOZ_Z_inflateSync -#define inflateCopy MOZ_Z_inflateCopy -#define inflateReset MOZ_Z_inflateReset -#define inflateBack MOZ_Z_inflateBack -#define inflateBackEnd MOZ_Z_inflateBackEnd -#define zlibCompileFlags MOZ_Z_zlibCompileFlags -#define compress MOZ_Z_compress -#define compress2 MOZ_Z_compress2 -#define compressBound MOZ_Z_compressBound -#define uncompress MOZ_Z_uncompress -#define gzopen MOZ_Z_gzopen -#define gzdopen MOZ_Z_gzdopen -#define gzsetparams MOZ_Z_gzsetparams -#define gzread MOZ_Z_gzread -#define gzwrite MOZ_Z_gzwrite -#define gzprintf MOZ_Z_gzprintf -#define gzputs MOZ_Z_gzputs -#define gzgets MOZ_Z_gzgets -#define gzputc MOZ_Z_gzputc -#define gzgetc MOZ_Z_gzgetc -#define gzungetc MOZ_Z_gzungetc -#define gzflush MOZ_Z_gzflush -#define gzseek MOZ_Z_gzseek -#define gzrewind MOZ_Z_gzrewind -#define gztell MOZ_Z_gztell -#define gzeof MOZ_Z_gzeof -#define gzclose MOZ_Z_gzclose -#define gzerror MOZ_Z_gzerror -#define gzclearerr MOZ_Z_gzclearerr -#define adler32 MOZ_Z_adler32 -#define crc32 MOZ_Z_crc32 -#define deflateInit_ MOZ_Z_deflateInit_ -#define deflateInit2_ MOZ_Z_deflateInit2_ -#define inflateInit_ MOZ_Z_inflateInit_ -#define inflateInit2_ MOZ_Z_inflateInit2_ -#define inflateBackInit_ MOZ_Z_inflateBackInit_ -#define inflateSyncPoint MOZ_Z_inflateSyncPoint -#define get_crc_table MOZ_Z_get_crc_table -#define zError MOZ_Z_zError - -/* Extra global symbols */ -#define _dist_code MOZ_Z__dist_code -#define _length_code MOZ_Z__length_code -#define _tr_align MOZ_Z__tr_align -#define _tr_flush_block MOZ_Z__tr_flush_block -#define _tr_init MOZ_Z__tr_init -#define _tr_stored_block MOZ_Z__tr_stored_block -#define _tr_tally MOZ_Z__tr_tally -#define deflate_copyright MOZ_Z_deflate_copyright -#define inflate_copyright MOZ_Z_inflate_copyright -#define inflate_fast MOZ_Z_inflate_fast -#define inflate_table MOZ_Z_inflate_table -#define z_errmsg MOZ_Z_z_errmsg -#define zcalloc MOZ_Z_zcalloc -#define zcfree MOZ_Z_zcfree -#define alloc_func MOZ_Z_alloc_func -#define free_func MOZ_Z_free_func -#define in_func MOZ_Z_in_func -#define out_func MOZ_Z_out_func - -/* New as of libpng-1.2.3 */ -#define adler32_combine MOZ_Z_adler32_combine -#define crc32_combine MOZ_Z_crc32_combine -#define deflateSetHeader MOZ_Z_deflateSetHeader -#define deflateTune MOZ_Z_deflateTune -#define gzdirect MOZ_Z_gzdirect -#define inflatePrime MOZ_Z_inflatePrime -#define inflateGetHeader MOZ_Z_inflateGetHeader - -#endif diff -Nru nodejs-0.11.13/deps/zlib/msdos/Makefile.bor nodejs-0.11.15/deps/zlib/msdos/Makefile.bor --- nodejs-0.11.13/deps/zlib/msdos/Makefile.bor 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/msdos/Makefile.bor 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,115 @@ +# Makefile for zlib +# Borland C++ +# Last updated: 15-Mar-2003 + +# To use, do "make -fmakefile.bor" +# To compile in small model, set below: MODEL=s + +# WARNING: the small model is supported but only for small values of +# MAX_WBITS and MAX_MEM_LEVEL. For example: +# -DMAX_WBITS=11 -DDEF_WBITS=11 -DMAX_MEM_LEVEL=3 +# If you wish to reduce the memory requirements (default 256K for big +# objects plus a few K), you can add to the LOC macro below: +# -DMAX_MEM_LEVEL=7 -DMAX_WBITS=14 +# See zconf.h for details about the memory requirements. + +# ------------ Turbo C++, Borland C++ ------------ + +# Optional nonstandard preprocessor flags (e.g. -DMAX_MEM_LEVEL=7) +# should be added to the environment via "set LOCAL_ZLIB=-DFOO" or added +# to the declaration of LOC here: +LOC = $(LOCAL_ZLIB) + +# type for CPU required: 0: 8086, 1: 80186, 2: 80286, 3: 80386, etc. +CPU_TYP = 0 + +# memory model: one of s, m, c, l (small, medium, compact, large) +MODEL=l + +# replace bcc with tcc for Turbo C++ 1.0, with bcc32 for the 32 bit version +CC=bcc +LD=bcc +AR=tlib + +# compiler flags +# replace "-O2" by "-O -G -a -d" for Turbo C++ 1.0 +CFLAGS=-O2 -Z -m$(MODEL) $(LOC) + +LDFLAGS=-m$(MODEL) -f- + + +# variables +ZLIB_LIB = zlib_$(MODEL).lib + +OBJ1 = adler32.obj compress.obj crc32.obj deflate.obj gzclose.obj gzlib.obj gzread.obj +OBJ2 = gzwrite.obj infback.obj inffast.obj inflate.obj inftrees.obj trees.obj uncompr.obj zutil.obj +OBJP1 = +adler32.obj+compress.obj+crc32.obj+deflate.obj+gzclose.obj+gzlib.obj+gzread.obj +OBJP2 = +gzwrite.obj+infback.obj+inffast.obj+inflate.obj+inftrees.obj+trees.obj+uncompr.obj+zutil.obj + + +# targets +all: $(ZLIB_LIB) example.exe minigzip.exe + +.c.obj: + $(CC) -c $(CFLAGS) $*.c + +adler32.obj: adler32.c zlib.h zconf.h + +compress.obj: compress.c zlib.h zconf.h + +crc32.obj: crc32.c zlib.h zconf.h crc32.h + +deflate.obj: deflate.c deflate.h zutil.h zlib.h zconf.h + +gzclose.obj: gzclose.c zlib.h zconf.h gzguts.h + +gzlib.obj: gzlib.c zlib.h zconf.h gzguts.h + +gzread.obj: gzread.c zlib.h zconf.h gzguts.h + +gzwrite.obj: gzwrite.c zlib.h zconf.h gzguts.h + +infback.obj: infback.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inffast.obj: inffast.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h + +inflate.obj: inflate.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inftrees.obj: inftrees.c zutil.h zlib.h zconf.h inftrees.h + +trees.obj: trees.c zutil.h zlib.h zconf.h deflate.h trees.h + +uncompr.obj: uncompr.c zlib.h zconf.h + +zutil.obj: zutil.c zutil.h zlib.h zconf.h + +example.obj: test/example.c zlib.h zconf.h + +minigzip.obj: test/minigzip.c zlib.h zconf.h + + +# the command line is cut to fit in the MS-DOS 128 byte limit: +$(ZLIB_LIB): $(OBJ1) $(OBJ2) + -del $(ZLIB_LIB) + $(AR) $(ZLIB_LIB) $(OBJP1) + $(AR) $(ZLIB_LIB) $(OBJP2) + +example.exe: example.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) example.obj $(ZLIB_LIB) + +minigzip.exe: minigzip.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) minigzip.obj $(ZLIB_LIB) + +test: example.exe minigzip.exe + example + echo hello world | minigzip | minigzip -d + +clean: + -del *.obj + -del *.lib + -del *.exe + -del zlib_*.bak + -del foo.gz diff -Nru nodejs-0.11.13/deps/zlib/msdos/Makefile.dj2 nodejs-0.11.15/deps/zlib/msdos/Makefile.dj2 --- nodejs-0.11.13/deps/zlib/msdos/Makefile.dj2 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/msdos/Makefile.dj2 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,104 @@ +# Makefile for zlib. Modified for djgpp v2.0 by F. J. Donahoe, 3/15/96. +# Copyright (C) 1995-1998 Jean-loup Gailly. +# For conditions of distribution and use, see copyright notice in zlib.h + +# To compile, or to compile and test, type: +# +# make -fmakefile.dj2; make test -fmakefile.dj2 +# +# To install libz.a, zconf.h and zlib.h in the djgpp directories, type: +# +# make install -fmakefile.dj2 +# +# after first defining LIBRARY_PATH and INCLUDE_PATH in djgpp.env as +# in the sample below if the pattern of the DJGPP distribution is to +# be followed. Remember that, while 'es around <=> are ignored in +# makefiles, they are *not* in batch files or in djgpp.env. +# - - - - - +# [make] +# INCLUDE_PATH=%\>;INCLUDE_PATH%%\DJDIR%\include +# LIBRARY_PATH=%\>;LIBRARY_PATH%%\DJDIR%\lib +# BUTT=-m486 +# - - - - - +# Alternately, these variables may be defined below, overriding the values +# in djgpp.env, as +# INCLUDE_PATH=c:\usr\include +# LIBRARY_PATH=c:\usr\lib + +CC=gcc + +#CFLAGS=-MMD -O +#CFLAGS=-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7 +#CFLAGS=-MMD -g -DDEBUG +CFLAGS=-MMD -O3 $(BUTT) -Wall -Wwrite-strings -Wpointer-arith -Wconversion \ + -Wstrict-prototypes -Wmissing-prototypes + +# If cp.exe is available, replace "copy /Y" with "cp -fp" . +CP=copy /Y +# If gnu install.exe is available, replace $(CP) with ginstall. +INSTALL=$(CP) +# The default value of RM is "rm -f." If "rm.exe" is found, comment out: +RM=del +LDLIBS=-L. -lz +LD=$(CC) -s -o +LDSHARED=$(CC) + +INCL=zlib.h zconf.h +LIBS=libz.a + +AR=ar rcs + +prefix=/usr/local +exec_prefix = $(prefix) + +OBJS = adler32.o compress.o crc32.o gzclose.o gzlib.o gzread.o gzwrite.o \ + uncompr.o deflate.o trees.o zutil.o inflate.o infback.o inftrees.o inffast.o + +OBJA = +# to use the asm code: make OBJA=match.o + +TEST_OBJS = example.o minigzip.o + +all: example.exe minigzip.exe + +check: test +test: all + ./example + echo hello world | .\minigzip | .\minigzip -d + +%.o : %.c + $(CC) $(CFLAGS) -c $< -o $@ + +libz.a: $(OBJS) $(OBJA) + $(AR) $@ $(OBJS) $(OBJA) + +%.exe : %.o $(LIBS) + $(LD) $@ $< $(LDLIBS) + +# INCLUDE_PATH and LIBRARY_PATH were set for [make] in djgpp.env . + +.PHONY : uninstall clean + +install: $(INCL) $(LIBS) + -@if not exist $(INCLUDE_PATH)\nul mkdir $(INCLUDE_PATH) + -@if not exist $(LIBRARY_PATH)\nul mkdir $(LIBRARY_PATH) + $(INSTALL) zlib.h $(INCLUDE_PATH) + $(INSTALL) zconf.h $(INCLUDE_PATH) + $(INSTALL) libz.a $(LIBRARY_PATH) + +uninstall: + $(RM) $(INCLUDE_PATH)\zlib.h + $(RM) $(INCLUDE_PATH)\zconf.h + $(RM) $(LIBRARY_PATH)\libz.a + +clean: + $(RM) *.d + $(RM) *.o + $(RM) *.exe + $(RM) libz.a + $(RM) foo.gz + +DEPS := $(wildcard *.d) +ifneq ($(DEPS),) +include $(DEPS) +endif diff -Nru nodejs-0.11.13/deps/zlib/msdos/Makefile.emx nodejs-0.11.15/deps/zlib/msdos/Makefile.emx --- nodejs-0.11.13/deps/zlib/msdos/Makefile.emx 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/msdos/Makefile.emx 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,69 @@ +# Makefile for zlib. Modified for emx 0.9c by Chr. Spieler, 6/17/98. +# Copyright (C) 1995-1998 Jean-loup Gailly. +# For conditions of distribution and use, see copyright notice in zlib.h + +# To compile, or to compile and test, type: +# +# make -fmakefile.emx; make test -fmakefile.emx +# + +CC=gcc + +#CFLAGS=-MMD -O +#CFLAGS=-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7 +#CFLAGS=-MMD -g -DDEBUG +CFLAGS=-MMD -O3 $(BUTT) -Wall -Wwrite-strings -Wpointer-arith -Wconversion \ + -Wstrict-prototypes -Wmissing-prototypes + +# If cp.exe is available, replace "copy /Y" with "cp -fp" . +CP=copy /Y +# If gnu install.exe is available, replace $(CP) with ginstall. +INSTALL=$(CP) +# The default value of RM is "rm -f." If "rm.exe" is found, comment out: +RM=del +LDLIBS=-L. -lzlib +LD=$(CC) -s -o +LDSHARED=$(CC) + +INCL=zlib.h zconf.h +LIBS=zlib.a + +AR=ar rcs + +prefix=/usr/local +exec_prefix = $(prefix) + +OBJS = adler32.o compress.o crc32.o gzclose.o gzlib.o gzread.o gzwrite.o \ + uncompr.o deflate.o trees.o zutil.o inflate.o infback.o inftrees.o inffast.o + +TEST_OBJS = example.o minigzip.o + +all: example.exe minigzip.exe + +test: all + ./example + echo hello world | .\minigzip | .\minigzip -d + +%.o : %.c + $(CC) $(CFLAGS) -c $< -o $@ + +zlib.a: $(OBJS) + $(AR) $@ $(OBJS) + +%.exe : %.o $(LIBS) + $(LD) $@ $< $(LDLIBS) + + +.PHONY : clean + +clean: + $(RM) *.d + $(RM) *.o + $(RM) *.exe + $(RM) zlib.a + $(RM) foo.gz + +DEPS := $(wildcard *.d) +ifneq ($(DEPS),) +include $(DEPS) +endif diff -Nru nodejs-0.11.13/deps/zlib/msdos/Makefile.msc nodejs-0.11.15/deps/zlib/msdos/Makefile.msc --- nodejs-0.11.13/deps/zlib/msdos/Makefile.msc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/msdos/Makefile.msc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,112 @@ +# Makefile for zlib +# Microsoft C 5.1 or later +# Last updated: 19-Mar-2003 + +# To use, do "make makefile.msc" +# To compile in small model, set below: MODEL=S + +# If you wish to reduce the memory requirements (default 256K for big +# objects plus a few K), you can add to the LOC macro below: +# -DMAX_MEM_LEVEL=7 -DMAX_WBITS=14 +# See zconf.h for details about the memory requirements. + +# ------------- Microsoft C 5.1 and later ------------- + +# Optional nonstandard preprocessor flags (e.g. -DMAX_MEM_LEVEL=7) +# should be added to the environment via "set LOCAL_ZLIB=-DFOO" or added +# to the declaration of LOC here: +LOC = $(LOCAL_ZLIB) + +# Type for CPU required: 0: 8086, 1: 80186, 2: 80286, 3: 80386, etc. +CPU_TYP = 0 + +# Memory model: one of S, M, C, L (small, medium, compact, large) +MODEL=L + +CC=cl +CFLAGS=-nologo -A$(MODEL) -G$(CPU_TYP) -W3 -Oait -Gs $(LOC) +#-Ox generates bad code with MSC 5.1 +LIB_CFLAGS=-Zl $(CFLAGS) + +LD=link +LDFLAGS=/noi/e/st:0x1500/noe/farcall/packcode +# "/farcall/packcode" are only useful for `large code' memory models +# but should be a "no-op" for small code models. + + +# variables +ZLIB_LIB = zlib_$(MODEL).lib + +OBJ1 = adler32.obj compress.obj crc32.obj deflate.obj gzclose.obj gzlib.obj gzread.obj +OBJ2 = gzwrite.obj infback.obj inffast.obj inflate.obj inftrees.obj trees.obj uncompr.obj zutil.obj + + +# targets +all: $(ZLIB_LIB) example.exe minigzip.exe + +.c.obj: + $(CC) -c $(LIB_CFLAGS) $*.c + +adler32.obj: adler32.c zlib.h zconf.h + +compress.obj: compress.c zlib.h zconf.h + +crc32.obj: crc32.c zlib.h zconf.h crc32.h + +deflate.obj: deflate.c deflate.h zutil.h zlib.h zconf.h + +gzclose.obj: gzclose.c zlib.h zconf.h gzguts.h + +gzlib.obj: gzlib.c zlib.h zconf.h gzguts.h + +gzread.obj: gzread.c zlib.h zconf.h gzguts.h + +gzwrite.obj: gzwrite.c zlib.h zconf.h gzguts.h + +infback.obj: infback.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inffast.obj: inffast.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h + +inflate.obj: inflate.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inftrees.obj: inftrees.c zutil.h zlib.h zconf.h inftrees.h + +trees.obj: trees.c zutil.h zlib.h zconf.h deflate.h trees.h + +uncompr.obj: uncompr.c zlib.h zconf.h + +zutil.obj: zutil.c zutil.h zlib.h zconf.h + +example.obj: test/example.c zlib.h zconf.h + $(CC) -c $(CFLAGS) $*.c + +minigzip.obj: test/minigzip.c zlib.h zconf.h + $(CC) -c $(CFLAGS) $*.c + + +# the command line is cut to fit in the MS-DOS 128 byte limit: +$(ZLIB_LIB): $(OBJ1) $(OBJ2) + if exist $(ZLIB_LIB) del $(ZLIB_LIB) + lib $(ZLIB_LIB) $(OBJ1); + lib $(ZLIB_LIB) $(OBJ2); + +example.exe: example.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) example.obj,,,$(ZLIB_LIB); + +minigzip.exe: minigzip.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) minigzip.obj,,,$(ZLIB_LIB); + +test: example.exe minigzip.exe + example + echo hello world | minigzip | minigzip -d + +clean: + -del *.obj + -del *.lib + -del *.exe + -del *.map + -del zlib_*.bak + -del foo.gz diff -Nru nodejs-0.11.13/deps/zlib/msdos/Makefile.tc nodejs-0.11.15/deps/zlib/msdos/Makefile.tc --- nodejs-0.11.13/deps/zlib/msdos/Makefile.tc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/msdos/Makefile.tc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,100 @@ +# Makefile for zlib +# Turbo C 2.01, Turbo C++ 1.01 +# Last updated: 15-Mar-2003 + +# To use, do "make -fmakefile.tc" +# To compile in small model, set below: MODEL=s + +# WARNING: the small model is supported but only for small values of +# MAX_WBITS and MAX_MEM_LEVEL. For example: +# -DMAX_WBITS=11 -DMAX_MEM_LEVEL=3 +# If you wish to reduce the memory requirements (default 256K for big +# objects plus a few K), you can add to CFLAGS below: +# -DMAX_MEM_LEVEL=7 -DMAX_WBITS=14 +# See zconf.h for details about the memory requirements. + +# ------------ Turbo C 2.01, Turbo C++ 1.01 ------------ +MODEL=l +CC=tcc +LD=tcc +AR=tlib +# CFLAGS=-O2 -G -Z -m$(MODEL) -DMAX_WBITS=11 -DMAX_MEM_LEVEL=3 +CFLAGS=-O2 -G -Z -m$(MODEL) +LDFLAGS=-m$(MODEL) -f- + + +# variables +ZLIB_LIB = zlib_$(MODEL).lib + +OBJ1 = adler32.obj compress.obj crc32.obj deflate.obj gzclose.obj gzlib.obj gzread.obj +OBJ2 = gzwrite.obj infback.obj inffast.obj inflate.obj inftrees.obj trees.obj uncompr.obj zutil.obj +OBJP1 = +adler32.obj+compress.obj+crc32.obj+deflate.obj+gzclose.obj+gzlib.obj+gzread.obj +OBJP2 = +gzwrite.obj+infback.obj+inffast.obj+inflate.obj+inftrees.obj+trees.obj+uncompr.obj+zutil.obj + + +# targets +all: $(ZLIB_LIB) example.exe minigzip.exe + +.c.obj: + $(CC) -c $(CFLAGS) $*.c + +adler32.obj: adler32.c zlib.h zconf.h + +compress.obj: compress.c zlib.h zconf.h + +crc32.obj: crc32.c zlib.h zconf.h crc32.h + +deflate.obj: deflate.c deflate.h zutil.h zlib.h zconf.h + +gzclose.obj: gzclose.c zlib.h zconf.h gzguts.h + +gzlib.obj: gzlib.c zlib.h zconf.h gzguts.h + +gzread.obj: gzread.c zlib.h zconf.h gzguts.h + +gzwrite.obj: gzwrite.c zlib.h zconf.h gzguts.h + +infback.obj: infback.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inffast.obj: inffast.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h + +inflate.obj: inflate.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inftrees.obj: inftrees.c zutil.h zlib.h zconf.h inftrees.h + +trees.obj: trees.c zutil.h zlib.h zconf.h deflate.h trees.h + +uncompr.obj: uncompr.c zlib.h zconf.h + +zutil.obj: zutil.c zutil.h zlib.h zconf.h + +example.obj: test/example.c zlib.h zconf.h + +minigzip.obj: test/minigzip.c zlib.h zconf.h + + +# the command line is cut to fit in the MS-DOS 128 byte limit: +$(ZLIB_LIB): $(OBJ1) $(OBJ2) + -del $(ZLIB_LIB) + $(AR) $(ZLIB_LIB) $(OBJP1) + $(AR) $(ZLIB_LIB) $(OBJP2) + +example.exe: example.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) example.obj $(ZLIB_LIB) + +minigzip.exe: minigzip.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) minigzip.obj $(ZLIB_LIB) + +test: example.exe minigzip.exe + example + echo hello world | minigzip | minigzip -d + +clean: + -del *.obj + -del *.lib + -del *.exe + -del zlib_*.bak + -del foo.gz diff -Nru nodejs-0.11.13/deps/zlib/nintendods/Makefile nodejs-0.11.15/deps/zlib/nintendods/Makefile --- nodejs-0.11.13/deps/zlib/nintendods/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/nintendods/Makefile 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,126 @@ +#--------------------------------------------------------------------------------- +.SUFFIXES: +#--------------------------------------------------------------------------------- + +ifeq ($(strip $(DEVKITARM)),) +$(error "Please set DEVKITARM in your environment. export DEVKITARM=devkitARM") +endif + +include $(DEVKITARM)/ds_rules + +#--------------------------------------------------------------------------------- +# TARGET is the name of the output +# BUILD is the directory where object files & intermediate files will be placed +# SOURCES is a list of directories containing source code +# DATA is a list of directories containing data files +# INCLUDES is a list of directories containing header files +#--------------------------------------------------------------------------------- +TARGET := $(shell basename $(CURDIR)) +BUILD := build +SOURCES := ../../ +DATA := data +INCLUDES := include + +#--------------------------------------------------------------------------------- +# options for code generation +#--------------------------------------------------------------------------------- +ARCH := -mthumb -mthumb-interwork + +CFLAGS := -Wall -O2\ + -march=armv5te -mtune=arm946e-s \ + -fomit-frame-pointer -ffast-math \ + $(ARCH) + +CFLAGS += $(INCLUDE) -DARM9 +CXXFLAGS := $(CFLAGS) -fno-rtti -fno-exceptions + +ASFLAGS := $(ARCH) -march=armv5te -mtune=arm946e-s +LDFLAGS = -specs=ds_arm9.specs -g $(ARCH) -Wl,-Map,$(notdir $*.map) + +#--------------------------------------------------------------------------------- +# list of directories containing libraries, this must be the top level containing +# include and lib +#--------------------------------------------------------------------------------- +LIBDIRS := $(LIBNDS) + +#--------------------------------------------------------------------------------- +# no real need to edit anything past this point unless you need to add additional +# rules for different file extensions +#--------------------------------------------------------------------------------- +ifneq ($(BUILD),$(notdir $(CURDIR))) +#--------------------------------------------------------------------------------- + +export OUTPUT := $(CURDIR)/lib/libz.a + +export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) \ + $(foreach dir,$(DATA),$(CURDIR)/$(dir)) + +export DEPSDIR := $(CURDIR)/$(BUILD) + +CFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.c))) +CPPFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.cpp))) +SFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.s))) +BINFILES := $(foreach dir,$(DATA),$(notdir $(wildcard $(dir)/*.*))) + +#--------------------------------------------------------------------------------- +# use CXX for linking C++ projects, CC for standard C +#--------------------------------------------------------------------------------- +ifeq ($(strip $(CPPFILES)),) +#--------------------------------------------------------------------------------- + export LD := $(CC) +#--------------------------------------------------------------------------------- +else +#--------------------------------------------------------------------------------- + export LD := $(CXX) +#--------------------------------------------------------------------------------- +endif +#--------------------------------------------------------------------------------- + +export OFILES := $(addsuffix .o,$(BINFILES)) \ + $(CPPFILES:.cpp=.o) $(CFILES:.c=.o) $(SFILES:.s=.o) + +export INCLUDE := $(foreach dir,$(INCLUDES),-I$(CURDIR)/$(dir)) \ + $(foreach dir,$(LIBDIRS),-I$(dir)/include) \ + -I$(CURDIR)/$(BUILD) + +.PHONY: $(BUILD) clean all + +#--------------------------------------------------------------------------------- +all: $(BUILD) + @[ -d $@ ] || mkdir -p include + @cp ../../*.h include + +lib: + @[ -d $@ ] || mkdir -p $@ + +$(BUILD): lib + @[ -d $@ ] || mkdir -p $@ + @$(MAKE) --no-print-directory -C $(BUILD) -f $(CURDIR)/Makefile + +#--------------------------------------------------------------------------------- +clean: + @echo clean ... + @rm -fr $(BUILD) lib + +#--------------------------------------------------------------------------------- +else + +DEPENDS := $(OFILES:.o=.d) + +#--------------------------------------------------------------------------------- +# main targets +#--------------------------------------------------------------------------------- +$(OUTPUT) : $(OFILES) + +#--------------------------------------------------------------------------------- +%.bin.o : %.bin +#--------------------------------------------------------------------------------- + @echo $(notdir $<) + @$(bin2o) + + +-include $(DEPENDS) + +#--------------------------------------------------------------------------------------- +endif +#--------------------------------------------------------------------------------------- diff -Nru nodejs-0.11.13/deps/zlib/nintendods/README nodejs-0.11.15/deps/zlib/nintendods/README --- nodejs-0.11.13/deps/zlib/nintendods/README 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/nintendods/README 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,5 @@ +This Makefile requires devkitARM (http://www.devkitpro.org/category/devkitarm/) and works inside "contrib/nds". It is based on a devkitARM template. + +Eduardo Costa +January 3, 2009 + diff -Nru nodejs-0.11.13/deps/zlib/old/descrip.mms nodejs-0.11.15/deps/zlib/old/descrip.mms --- nodejs-0.11.13/deps/zlib/old/descrip.mms 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/old/descrip.mms 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,48 @@ +# descrip.mms: MMS description file for building zlib on VMS +# written by Martin P.J. Zinser + +cc_defs = +c_deb = + +.ifdef __DECC__ +pref = /prefix=all +.endif + +OBJS = adler32.obj, compress.obj, crc32.obj, gzio.obj, uncompr.obj,\ + deflate.obj, trees.obj, zutil.obj, inflate.obj, infblock.obj,\ + inftrees.obj, infcodes.obj, infutil.obj, inffast.obj + +CFLAGS= $(C_DEB) $(CC_DEFS) $(PREF) + +all : example.exe minigzip.exe + @ write sys$output " Example applications available" +libz.olb : libz.olb($(OBJS)) + @ write sys$output " libz available" + +example.exe : example.obj libz.olb + link example,libz.olb/lib + +minigzip.exe : minigzip.obj libz.olb + link minigzip,libz.olb/lib,x11vms:xvmsutils.olb/lib + +clean : + delete *.obj;*,libz.olb;* + + +# Other dependencies. +adler32.obj : zutil.h zlib.h zconf.h +compress.obj : zlib.h zconf.h +crc32.obj : zutil.h zlib.h zconf.h +deflate.obj : deflate.h zutil.h zlib.h zconf.h +example.obj : zlib.h zconf.h +gzio.obj : zutil.h zlib.h zconf.h +infblock.obj : zutil.h zlib.h zconf.h infblock.h inftrees.h infcodes.h infutil.h +infcodes.obj : zutil.h zlib.h zconf.h inftrees.h infutil.h infcodes.h inffast.h +inffast.obj : zutil.h zlib.h zconf.h inftrees.h infutil.h inffast.h +inflate.obj : zutil.h zlib.h zconf.h infblock.h +inftrees.obj : zutil.h zlib.h zconf.h inftrees.h +infutil.obj : zutil.h zlib.h zconf.h inftrees.h infutil.h +minigzip.obj : zlib.h zconf.h +trees.obj : deflate.h zutil.h zlib.h zconf.h +uncompr.obj : zlib.h zconf.h +zutil.obj : zutil.h zlib.h zconf.h diff -Nru nodejs-0.11.13/deps/zlib/old/Makefile.emx nodejs-0.11.15/deps/zlib/old/Makefile.emx --- nodejs-0.11.13/deps/zlib/old/Makefile.emx 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/old/Makefile.emx 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,69 @@ +# Makefile for zlib. Modified for emx/rsxnt by Chr. Spieler, 6/16/98. +# Copyright (C) 1995-1998 Jean-loup Gailly. +# For conditions of distribution and use, see copyright notice in zlib.h + +# To compile, or to compile and test, type: +# +# make -fmakefile.emx; make test -fmakefile.emx +# + +CC=gcc -Zwin32 + +#CFLAGS=-MMD -O +#CFLAGS=-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7 +#CFLAGS=-MMD -g -DDEBUG +CFLAGS=-MMD -O3 $(BUTT) -Wall -Wwrite-strings -Wpointer-arith -Wconversion \ + -Wstrict-prototypes -Wmissing-prototypes + +# If cp.exe is available, replace "copy /Y" with "cp -fp" . +CP=copy /Y +# If gnu install.exe is available, replace $(CP) with ginstall. +INSTALL=$(CP) +# The default value of RM is "rm -f." If "rm.exe" is found, comment out: +RM=del +LDLIBS=-L. -lzlib +LD=$(CC) -s -o +LDSHARED=$(CC) + +INCL=zlib.h zconf.h +LIBS=zlib.a + +AR=ar rcs + +prefix=/usr/local +exec_prefix = $(prefix) + +OBJS = adler32.o compress.o crc32.o deflate.o gzclose.o gzlib.o gzread.o \ + gzwrite.o infback.o inffast.o inflate.o inftrees.o trees.o uncompr.o zutil.o + +TEST_OBJS = example.o minigzip.o + +all: example.exe minigzip.exe + +test: all + ./example + echo hello world | .\minigzip | .\minigzip -d + +%.o : %.c + $(CC) $(CFLAGS) -c $< -o $@ + +zlib.a: $(OBJS) + $(AR) $@ $(OBJS) + +%.exe : %.o $(LIBS) + $(LD) $@ $< $(LDLIBS) + + +.PHONY : clean + +clean: + $(RM) *.d + $(RM) *.o + $(RM) *.exe + $(RM) zlib.a + $(RM) foo.gz + +DEPS := $(wildcard *.d) +ifneq ($(DEPS),) +include $(DEPS) +endif diff -Nru nodejs-0.11.13/deps/zlib/old/Makefile.riscos nodejs-0.11.15/deps/zlib/old/Makefile.riscos --- nodejs-0.11.13/deps/zlib/old/Makefile.riscos 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/old/Makefile.riscos 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,151 @@ +# Project: zlib_1_03 +# Patched for zlib 1.1.2 rw@shadow.org.uk 19980430 +# test works out-of-the-box, installs `somewhere' on demand + +# Toolflags: +CCflags = -c -depend !Depend -IC: -g -throwback -DRISCOS -fah +C++flags = -c -depend !Depend -IC: -throwback +Linkflags = -aif -c++ -o $@ +ObjAsmflags = -throwback -NoCache -depend !Depend +CMHGflags = +LibFileflags = -c -l -o $@ +Squeezeflags = -o $@ + +# change the line below to where _you_ want the library installed. +libdest = lib:zlib + +# Final targets: +@.lib: @.o.adler32 @.o.compress @.o.crc32 @.o.deflate @.o.gzio \ + @.o.infblock @.o.infcodes @.o.inffast @.o.inflate @.o.inftrees @.o.infutil @.o.trees \ + @.o.uncompr @.o.zutil + LibFile $(LibFileflags) @.o.adler32 @.o.compress @.o.crc32 @.o.deflate \ + @.o.gzio @.o.infblock @.o.infcodes @.o.inffast @.o.inflate @.o.inftrees @.o.infutil \ + @.o.trees @.o.uncompr @.o.zutil +test: @.minigzip @.example @.lib + @copy @.lib @.libc A~C~DF~L~N~P~Q~RS~TV + @echo running tests: hang on. + @/@.minigzip -f -9 libc + @/@.minigzip -d libc-gz + @/@.minigzip -f -1 libc + @/@.minigzip -d libc-gz + @/@.minigzip -h -9 libc + @/@.minigzip -d libc-gz + @/@.minigzip -h -1 libc + @/@.minigzip -d libc-gz + @/@.minigzip -9 libc + @/@.minigzip -d libc-gz + @/@.minigzip -1 libc + @/@.minigzip -d libc-gz + @diff @.lib @.libc + @echo that should have reported '@.lib and @.libc identical' if you have diff. + @/@.example @.fred @.fred + @echo that will have given lots of hello!'s. + +@.minigzip: @.o.minigzip @.lib C:o.Stubs + Link $(Linkflags) @.o.minigzip @.lib C:o.Stubs +@.example: @.o.example @.lib C:o.Stubs + Link $(Linkflags) @.o.example @.lib C:o.Stubs + +install: @.lib + cdir $(libdest) + cdir $(libdest).h + @copy @.h.zlib $(libdest).h.zlib A~C~DF~L~N~P~Q~RS~TV + @copy @.h.zconf $(libdest).h.zconf A~C~DF~L~N~P~Q~RS~TV + @copy @.lib $(libdest).lib A~C~DF~L~N~P~Q~RS~TV + @echo okay, installed zlib in $(libdest) + +clean:; remove @.minigzip + remove @.example + remove @.libc + -wipe @.o.* F~r~cV + remove @.fred + +# User-editable dependencies: +.c.o: + cc $(ccflags) -o $@ $< + +# Static dependencies: + +# Dynamic dependencies: +o.example: c.example +o.example: h.zlib +o.example: h.zconf +o.minigzip: c.minigzip +o.minigzip: h.zlib +o.minigzip: h.zconf +o.adler32: c.adler32 +o.adler32: h.zlib +o.adler32: h.zconf +o.compress: c.compress +o.compress: h.zlib +o.compress: h.zconf +o.crc32: c.crc32 +o.crc32: h.zlib +o.crc32: h.zconf +o.deflate: c.deflate +o.deflate: h.deflate +o.deflate: h.zutil +o.deflate: h.zlib +o.deflate: h.zconf +o.gzio: c.gzio +o.gzio: h.zutil +o.gzio: h.zlib +o.gzio: h.zconf +o.infblock: c.infblock +o.infblock: h.zutil +o.infblock: h.zlib +o.infblock: h.zconf +o.infblock: h.infblock +o.infblock: h.inftrees +o.infblock: h.infcodes +o.infblock: h.infutil +o.infcodes: c.infcodes +o.infcodes: h.zutil +o.infcodes: h.zlib +o.infcodes: h.zconf +o.infcodes: h.inftrees +o.infcodes: h.infblock +o.infcodes: h.infcodes +o.infcodes: h.infutil +o.infcodes: h.inffast +o.inffast: c.inffast +o.inffast: h.zutil +o.inffast: h.zlib +o.inffast: h.zconf +o.inffast: h.inftrees +o.inffast: h.infblock +o.inffast: h.infcodes +o.inffast: h.infutil +o.inffast: h.inffast +o.inflate: c.inflate +o.inflate: h.zutil +o.inflate: h.zlib +o.inflate: h.zconf +o.inflate: h.infblock +o.inftrees: c.inftrees +o.inftrees: h.zutil +o.inftrees: h.zlib +o.inftrees: h.zconf +o.inftrees: h.inftrees +o.inftrees: h.inffixed +o.infutil: c.infutil +o.infutil: h.zutil +o.infutil: h.zlib +o.infutil: h.zconf +o.infutil: h.infblock +o.infutil: h.inftrees +o.infutil: h.infcodes +o.infutil: h.infutil +o.trees: c.trees +o.trees: h.deflate +o.trees: h.zutil +o.trees: h.zlib +o.trees: h.zconf +o.trees: h.trees +o.uncompr: c.uncompr +o.uncompr: h.zlib +o.uncompr: h.zconf +o.zutil: c.zutil +o.zutil: h.zutil +o.zutil: h.zlib +o.zutil: h.zconf diff -Nru nodejs-0.11.13/deps/zlib/old/os2/Makefile.os2 nodejs-0.11.15/deps/zlib/old/os2/Makefile.os2 --- nodejs-0.11.13/deps/zlib/old/os2/Makefile.os2 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/old/os2/Makefile.os2 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,136 @@ +# Makefile for zlib under OS/2 using GCC (PGCC) +# For conditions of distribution and use, see copyright notice in zlib.h + +# To compile and test, type: +# cp Makefile.os2 .. +# cd .. +# make -f Makefile.os2 test + +# This makefile will build a static library z.lib, a shared library +# z.dll and a import library zdll.lib. You can use either z.lib or +# zdll.lib by specifying either -lz or -lzdll on gcc's command line + +CC=gcc -Zomf -s + +CFLAGS=-O6 -Wall +#CFLAGS=-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7 +#CFLAGS=-g -DDEBUG +#CFLAGS=-O3 -Wall -Wwrite-strings -Wpointer-arith -Wconversion \ +# -Wstrict-prototypes -Wmissing-prototypes + +#################### BUG WARNING: ##################### +## infcodes.c hits a bug in pgcc-1.0, so you have to use either +## -O# where # <= 4 or one of (-fno-ommit-frame-pointer or -fno-force-mem) +## This bug is reportedly fixed in pgcc >1.0, but this was not tested +CFLAGS+=-fno-force-mem + +LDFLAGS=-s -L. -lzdll -Zcrtdll +LDSHARED=$(CC) -s -Zomf -Zdll -Zcrtdll + +VER=1.1.0 +ZLIB=z.lib +SHAREDLIB=z.dll +SHAREDLIBIMP=zdll.lib +LIBS=$(ZLIB) $(SHAREDLIB) $(SHAREDLIBIMP) + +AR=emxomfar cr +IMPLIB=emximp +RANLIB=echo +TAR=tar +SHELL=bash + +prefix=/usr/local +exec_prefix = $(prefix) + +OBJS = adler32.o compress.o crc32.o gzio.o uncompr.o deflate.o trees.o \ + zutil.o inflate.o infblock.o inftrees.o infcodes.o infutil.o inffast.o + +TEST_OBJS = example.o minigzip.o + +DISTFILES = README INDEX ChangeLog configure Make*[a-z0-9] *.[ch] descrip.mms \ + algorithm.txt zlib.3 msdos/Make*[a-z0-9] msdos/zlib.def msdos/zlib.rc \ + nt/Makefile.nt nt/zlib.dnt contrib/README.contrib contrib/*.txt \ + contrib/asm386/*.asm contrib/asm386/*.c \ + contrib/asm386/*.bat contrib/asm386/zlibvc.d?? contrib/iostream/*.cpp \ + contrib/iostream/*.h contrib/iostream2/*.h contrib/iostream2/*.cpp \ + contrib/untgz/Makefile contrib/untgz/*.c contrib/untgz/*.w32 + +all: example.exe minigzip.exe + +test: all + @LD_LIBRARY_PATH=.:$(LD_LIBRARY_PATH) ; export LD_LIBRARY_PATH; \ + echo hello world | ./minigzip | ./minigzip -d || \ + echo ' *** minigzip test FAILED ***' ; \ + if ./example; then \ + echo ' *** zlib test OK ***'; \ + else \ + echo ' *** zlib test FAILED ***'; \ + fi + +$(ZLIB): $(OBJS) + $(AR) $@ $(OBJS) + -@ ($(RANLIB) $@ || true) >/dev/null 2>&1 + +$(SHAREDLIB): $(OBJS) os2/z.def + $(LDSHARED) -o $@ $^ + +$(SHAREDLIBIMP): os2/z.def + $(IMPLIB) -o $@ $^ + +example.exe: example.o $(LIBS) + $(CC) $(CFLAGS) -o $@ example.o $(LDFLAGS) + +minigzip.exe: minigzip.o $(LIBS) + $(CC) $(CFLAGS) -o $@ minigzip.o $(LDFLAGS) + +clean: + rm -f *.o *~ example minigzip libz.a libz.so* foo.gz + +distclean: clean + +zip: + mv Makefile Makefile~; cp -p Makefile.in Makefile + rm -f test.c ztest*.c + v=`sed -n -e 's/\.//g' -e '/VERSION "/s/.*"\(.*\)".*/\1/p' < zlib.h`;\ + zip -ul9 zlib$$v $(DISTFILES) + mv Makefile~ Makefile + +dist: + mv Makefile Makefile~; cp -p Makefile.in Makefile + rm -f test.c ztest*.c + d=zlib-`sed -n '/VERSION "/s/.*"\(.*\)".*/\1/p' < zlib.h`;\ + rm -f $$d.tar.gz; \ + if test ! -d ../$$d; then rm -f ../$$d; ln -s `pwd` ../$$d; fi; \ + files=""; \ + for f in $(DISTFILES); do files="$$files $$d/$$f"; done; \ + cd ..; \ + GZIP=-9 $(TAR) chofz $$d/$$d.tar.gz $$files; \ + if test ! -d $$d; then rm -f $$d; fi + mv Makefile~ Makefile + +tags: + etags *.[ch] + +depend: + makedepend -- $(CFLAGS) -- *.[ch] + +# DO NOT DELETE THIS LINE -- make depend depends on it. + +adler32.o: zlib.h zconf.h +compress.o: zlib.h zconf.h +crc32.o: zlib.h zconf.h +deflate.o: deflate.h zutil.h zlib.h zconf.h +example.o: zlib.h zconf.h +gzio.o: zutil.h zlib.h zconf.h +infblock.o: infblock.h inftrees.h infcodes.h infutil.h zutil.h zlib.h zconf.h +infcodes.o: zutil.h zlib.h zconf.h +infcodes.o: inftrees.h infblock.h infcodes.h infutil.h inffast.h +inffast.o: zutil.h zlib.h zconf.h inftrees.h +inffast.o: infblock.h infcodes.h infutil.h inffast.h +inflate.o: zutil.h zlib.h zconf.h infblock.h +inftrees.o: zutil.h zlib.h zconf.h inftrees.h +infutil.o: zutil.h zlib.h zconf.h infblock.h inftrees.h infcodes.h infutil.h +minigzip.o: zlib.h zconf.h +trees.o: deflate.h zutil.h zlib.h zconf.h trees.h +uncompr.o: zlib.h zconf.h +zutil.o: zutil.h zlib.h zconf.h diff -Nru nodejs-0.11.13/deps/zlib/old/os2/zlib.def nodejs-0.11.15/deps/zlib/old/os2/zlib.def --- nodejs-0.11.13/deps/zlib/old/os2/zlib.def 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/old/os2/zlib.def 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,51 @@ +; +; Slightly modified version of ../nt/zlib.dnt :-) +; + +LIBRARY Z +DESCRIPTION "Zlib compression library for OS/2" +CODE PRELOAD MOVEABLE DISCARDABLE +DATA PRELOAD MOVEABLE MULTIPLE + +EXPORTS + adler32 + compress + crc32 + deflate + deflateCopy + deflateEnd + deflateInit2_ + deflateInit_ + deflateParams + deflateReset + deflateSetDictionary + gzclose + gzdopen + gzerror + gzflush + gzopen + gzread + gzwrite + inflate + inflateEnd + inflateInit2_ + inflateInit_ + inflateReset + inflateSetDictionary + inflateSync + uncompress + zlibVersion + gzprintf + gzputc + gzgetc + gzseek + gzrewind + gztell + gzeof + gzsetparams + zError + inflateSyncPoint + get_crc_table + compress2 + gzputs + gzgets diff -Nru nodejs-0.11.13/deps/zlib/old/README nodejs-0.11.15/deps/zlib/old/README --- nodejs-0.11.13/deps/zlib/old/README 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/old/README 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ +This directory contains files that have not been updated for zlib 1.2.x + +(Volunteers are encouraged to help clean this up. Thanks.) diff -Nru nodejs-0.11.13/deps/zlib/old/visual-basic.txt nodejs-0.11.15/deps/zlib/old/visual-basic.txt --- nodejs-0.11.13/deps/zlib/old/visual-basic.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/old/visual-basic.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,160 @@ +See below some functions declarations for Visual Basic. + +Frequently Asked Question: + +Q: Each time I use the compress function I get the -5 error (not enough + room in the output buffer). + +A: Make sure that the length of the compressed buffer is passed by + reference ("as any"), not by value ("as long"). Also check that + before the call of compress this length is equal to the total size of + the compressed buffer and not zero. + + +From: "Jon Caruana" +Subject: Re: How to port zlib declares to vb? +Date: Mon, 28 Oct 1996 18:33:03 -0600 + +Got the answer! (I haven't had time to check this but it's what I got, and +looks correct): + +He has the following routines working: + compress + uncompress + gzopen + gzwrite + gzread + gzclose + +Declares follow: (Quoted from Carlos Rios , in Vb4 form) + +#If Win16 Then 'Use Win16 calls. +Declare Function compress Lib "ZLIB.DLL" (ByVal compr As + String, comprLen As Any, ByVal buf As String, ByVal buflen + As Long) As Integer +Declare Function uncompress Lib "ZLIB.DLL" (ByVal uncompr + As String, uncomprLen As Any, ByVal compr As String, ByVal + lcompr As Long) As Integer +Declare Function gzopen Lib "ZLIB.DLL" (ByVal filePath As + String, ByVal mode As String) As Long +Declare Function gzread Lib "ZLIB.DLL" (ByVal file As + Long, ByVal uncompr As String, ByVal uncomprLen As Integer) + As Integer +Declare Function gzwrite Lib "ZLIB.DLL" (ByVal file As + Long, ByVal uncompr As String, ByVal uncomprLen As Integer) + As Integer +Declare Function gzclose Lib "ZLIB.DLL" (ByVal file As + Long) As Integer +#Else +Declare Function compress Lib "ZLIB32.DLL" + (ByVal compr As String, comprLen As Any, ByVal buf As + String, ByVal buflen As Long) As Integer +Declare Function uncompress Lib "ZLIB32.DLL" + (ByVal uncompr As String, uncomprLen As Any, ByVal compr As + String, ByVal lcompr As Long) As Long +Declare Function gzopen Lib "ZLIB32.DLL" + (ByVal file As String, ByVal mode As String) As Long +Declare Function gzread Lib "ZLIB32.DLL" + (ByVal file As Long, ByVal uncompr As String, ByVal + uncomprLen As Long) As Long +Declare Function gzwrite Lib "ZLIB32.DLL" + (ByVal file As Long, ByVal uncompr As String, ByVal + uncomprLen As Long) As Long +Declare Function gzclose Lib "ZLIB32.DLL" + (ByVal file As Long) As Long +#End If + +-Jon Caruana +jon-net@usa.net +Microsoft Sitebuilder Network Level 1 Member - HTML Writer's Guild Member + + +Here is another example from Michael that he +says conforms to the VB guidelines, and that solves the problem of not +knowing the uncompressed size by storing it at the end of the file: + +'Calling the functions: +'bracket meaning: [optional] {Range of possible values} +'Call subCompressFile( [, , [level of compression {1..9}]]) +'Call subUncompressFile() + +Option Explicit +Private lngpvtPcnSml As Long 'Stores value for 'lngPercentSmaller' +Private Const SUCCESS As Long = 0 +Private Const strFilExt As String = ".cpr" +Private Declare Function lngfncCpr Lib "zlib.dll" Alias "compress2" (ByRef +dest As Any, ByRef destLen As Any, ByRef src As Any, ByVal srcLen As Long, +ByVal level As Integer) As Long +Private Declare Function lngfncUcp Lib "zlib.dll" Alias "uncompress" (ByRef +dest As Any, ByRef destLen As Any, ByRef src As Any, ByVal srcLen As Long) +As Long + +Public Sub subCompressFile(ByVal strargOriFilPth As String, Optional ByVal +strargCprFilPth As String, Optional ByVal intLvl As Integer = 9) + Dim strCprPth As String + Dim lngOriSiz As Long + Dim lngCprSiz As Long + Dim bytaryOri() As Byte + Dim bytaryCpr() As Byte + lngOriSiz = FileLen(strargOriFilPth) + ReDim bytaryOri(lngOriSiz - 1) + Open strargOriFilPth For Binary Access Read As #1 + Get #1, , bytaryOri() + Close #1 + strCprPth = IIf(strargCprFilPth = "", strargOriFilPth, strargCprFilPth) +'Select file path and name + strCprPth = strCprPth & IIf(Right(strCprPth, Len(strFilExt)) = +strFilExt, "", strFilExt) 'Add file extension if not exists + lngCprSiz = (lngOriSiz * 1.01) + 12 'Compression needs temporary a bit +more space then original file size + ReDim bytaryCpr(lngCprSiz - 1) + If lngfncCpr(bytaryCpr(0), lngCprSiz, bytaryOri(0), lngOriSiz, intLvl) = +SUCCESS Then + lngpvtPcnSml = (1# - (lngCprSiz / lngOriSiz)) * 100 + ReDim Preserve bytaryCpr(lngCprSiz - 1) + Open strCprPth For Binary Access Write As #1 + Put #1, , bytaryCpr() + Put #1, , lngOriSiz 'Add the the original size value to the end +(last 4 bytes) + Close #1 + Else + MsgBox "Compression error" + End If + Erase bytaryCpr + Erase bytaryOri +End Sub + +Public Sub subUncompressFile(ByVal strargFilPth As String) + Dim bytaryCpr() As Byte + Dim bytaryOri() As Byte + Dim lngOriSiz As Long + Dim lngCprSiz As Long + Dim strOriPth As String + lngCprSiz = FileLen(strargFilPth) + ReDim bytaryCpr(lngCprSiz - 1) + Open strargFilPth For Binary Access Read As #1 + Get #1, , bytaryCpr() + Close #1 + 'Read the original file size value: + lngOriSiz = bytaryCpr(lngCprSiz - 1) * (2 ^ 24) _ + + bytaryCpr(lngCprSiz - 2) * (2 ^ 16) _ + + bytaryCpr(lngCprSiz - 3) * (2 ^ 8) _ + + bytaryCpr(lngCprSiz - 4) + ReDim Preserve bytaryCpr(lngCprSiz - 5) 'Cut of the original size value + ReDim bytaryOri(lngOriSiz - 1) + If lngfncUcp(bytaryOri(0), lngOriSiz, bytaryCpr(0), lngCprSiz) = SUCCESS +Then + strOriPth = Left(strargFilPth, Len(strargFilPth) - Len(strFilExt)) + Open strOriPth For Binary Access Write As #1 + Put #1, , bytaryOri() + Close #1 + Else + MsgBox "Uncompression error" + End If + Erase bytaryCpr + Erase bytaryOri +End Sub +Public Property Get lngPercentSmaller() As Long + lngPercentSmaller = lngpvtPcnSml +End Property diff -Nru nodejs-0.11.13/deps/zlib/qnx/package.qpg nodejs-0.11.15/deps/zlib/qnx/package.qpg --- nodejs-0.11.13/deps/zlib/qnx/package.qpg 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/qnx/package.qpg 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,141 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Library + + Medium + + 2.0 + + + + zlib + zlib + alain.bonnefoy@icbt.com + Public + public + www.gzip.org/zlib + + + Jean-Loup Gailly,Mark Adler + www.gzip.org/zlib + + zlib@gzip.org + + + A massively spiffy yet delicately unobtrusive compression library. + zlib is designed to be a free, general-purpose, legally unencumbered, lossless data compression library for use on virtually any computer hardware and operating system. + http://www.gzip.org/zlib + + + + + 1.2.8 + Medium + Stable + + + + + + + No License + + + + Software Development/Libraries and Extensions/C Libraries + zlib,compression + qnx6 + qnx6 + None + Developer + + + + + + + + + + + + + + Install + Post + No + Ignore + + No + Optional + + + + + + + + + + + + + InstallOver + zlib + + + + + + + + + + + + + InstallOver + zlib-dev + + + + + + + + + diff -Nru nodejs-0.11.13/deps/zlib/README nodejs-0.11.15/deps/zlib/README --- nodejs-0.11.13/deps/zlib/README 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/README 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,115 @@ +ZLIB DATA COMPRESSION LIBRARY + +zlib 1.2.8 is a general purpose data compression library. All the code is +thread safe. The data format used by the zlib library is described by RFCs +(Request for Comments) 1950 to 1952 in the files +http://tools.ietf.org/html/rfc1950 (zlib format), rfc1951 (deflate format) and +rfc1952 (gzip format). + +All functions of the compression library are documented in the file zlib.h +(volunteer to write man pages welcome, contact zlib@gzip.org). A usage example +of the library is given in the file test/example.c which also tests that +the library is working correctly. Another example is given in the file +test/minigzip.c. The compression library itself is composed of all source +files in the root directory. + +To compile all files and run the test program, follow the instructions given at +the top of Makefile.in. In short "./configure; make test", and if that goes +well, "make install" should work for most flavors of Unix. For Windows, use +one of the special makefiles in win32/ or contrib/vstudio/ . For VMS, use +make_vms.com. + +Questions about zlib should be sent to , or to Gilles Vollant + for the Windows DLL version. The zlib home page is +http://zlib.net/ . Before reporting a problem, please check this site to +verify that you have the latest version of zlib; otherwise get the latest +version and check whether the problem still exists or not. + +PLEASE read the zlib FAQ http://zlib.net/zlib_faq.html before asking for help. + +Mark Nelson wrote an article about zlib for the Jan. 1997 +issue of Dr. Dobb's Journal; a copy of the article is available at +http://marknelson.us/1997/01/01/zlib-engine/ . + +The changes made in version 1.2.8 are documented in the file ChangeLog. + +Unsupported third party contributions are provided in directory contrib/ . + +zlib is available in Java using the java.util.zip package, documented at +http://java.sun.com/developer/technicalArticles/Programming/compression/ . + +A Perl interface to zlib written by Paul Marquess is available +at CPAN (Comprehensive Perl Archive Network) sites, including +http://search.cpan.org/~pmqs/IO-Compress-Zlib/ . + +A Python interface to zlib written by A.M. Kuchling is +available in Python 1.5 and later versions, see +http://docs.python.org/library/zlib.html . + +zlib is built into tcl: http://wiki.tcl.tk/4610 . + +An experimental package to read and write files in .zip format, written on top +of zlib by Gilles Vollant , is available in the +contrib/minizip directory of zlib. + + +Notes for some targets: + +- For Windows DLL versions, please see win32/DLL_FAQ.txt + +- For 64-bit Irix, deflate.c must be compiled without any optimization. With + -O, one libpng test fails. The test works in 32 bit mode (with the -n32 + compiler flag). The compiler bug has been reported to SGI. + +- zlib doesn't work with gcc 2.6.3 on a DEC 3000/300LX under OSF/1 2.1 it works + when compiled with cc. + +- On Digital Unix 4.0D (formely OSF/1) on AlphaServer, the cc option -std1 is + necessary to get gzprintf working correctly. This is done by configure. + +- zlib doesn't work on HP-UX 9.05 with some versions of /bin/cc. It works with + other compilers. Use "make test" to check your compiler. + +- gzdopen is not supported on RISCOS or BEOS. + +- For PalmOs, see http://palmzlib.sourceforge.net/ + + +Acknowledgments: + + The deflate format used by zlib was defined by Phil Katz. The deflate and + zlib specifications were written by L. Peter Deutsch. Thanks to all the + people who reported problems and suggested various improvements in zlib; they + are too numerous to cite here. + +Copyright notice: + + (C) 1995-2013 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +If you use the zlib library in a product, we would appreciate *not* receiving +lengthy legal documents to sign. The sources are provided for free but without +warranty of any kind. The library has been entirely written by Jean-loup +Gailly and Mark Adler; it does not include third-party code. + +If you redistribute modified sources, we would appreciate that you include in +the file ChangeLog history information documenting your changes. Please read +the FAQ for more information on the distribution of modified source versions. diff -Nru nodejs-0.11.13/deps/zlib/README.chromium nodejs-0.11.15/deps/zlib/README.chromium --- nodejs-0.11.13/deps/zlib/README.chromium 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/README.chromium 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -Name: zlib -URL: http://zlib.net/ -Version: 1.2.3 - -Description: -General purpose compression library - -Local Modifications: -A few minor changes, all marked with "Google": -- Added #ifdefs to avoid compile warnings when NO_GZCOMPRESS is defined. -- Removed use of strerror for WinCE in gzio.c. -- Added 'int z_errno' global for WinCE, to which 'errno' is defined in zutil.h. diff -Nru nodejs-0.11.13/deps/zlib/test/example.c nodejs-0.11.15/deps/zlib/test/example.c --- nodejs-0.11.13/deps/zlib/test/example.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/test/example.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,601 @@ +/* example.c -- usage example of the zlib compression library + * Copyright (C) 1995-2006, 2011 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#include "zlib.h" +#include + +#ifdef STDC +# include +# include +#endif + +#if defined(VMS) || defined(RISCOS) +# define TESTFILE "foo-gz" +#else +# define TESTFILE "foo.gz" +#endif + +#define CHECK_ERR(err, msg) { \ + if (err != Z_OK) { \ + fprintf(stderr, "%s error: %d\n", msg, err); \ + exit(1); \ + } \ +} + +z_const char hello[] = "hello, hello!"; +/* "hello world" would be more standard, but the repeated "hello" + * stresses the compression code better, sorry... + */ + +const char dictionary[] = "hello"; +uLong dictId; /* Adler32 value of the dictionary */ + +void test_deflate OF((Byte *compr, uLong comprLen)); +void test_inflate OF((Byte *compr, uLong comprLen, + Byte *uncompr, uLong uncomprLen)); +void test_large_deflate OF((Byte *compr, uLong comprLen, + Byte *uncompr, uLong uncomprLen)); +void test_large_inflate OF((Byte *compr, uLong comprLen, + Byte *uncompr, uLong uncomprLen)); +void test_flush OF((Byte *compr, uLong *comprLen)); +void test_sync OF((Byte *compr, uLong comprLen, + Byte *uncompr, uLong uncomprLen)); +void test_dict_deflate OF((Byte *compr, uLong comprLen)); +void test_dict_inflate OF((Byte *compr, uLong comprLen, + Byte *uncompr, uLong uncomprLen)); +int main OF((int argc, char *argv[])); + + +#ifdef Z_SOLO + +void *myalloc OF((void *, unsigned, unsigned)); +void myfree OF((void *, void *)); + +void *myalloc(q, n, m) + void *q; + unsigned n, m; +{ + q = Z_NULL; + return calloc(n, m); +} + +void myfree(void *q, void *p) +{ + q = Z_NULL; + free(p); +} + +static alloc_func zalloc = myalloc; +static free_func zfree = myfree; + +#else /* !Z_SOLO */ + +static alloc_func zalloc = (alloc_func)0; +static free_func zfree = (free_func)0; + +void test_compress OF((Byte *compr, uLong comprLen, + Byte *uncompr, uLong uncomprLen)); +void test_gzio OF((const char *fname, + Byte *uncompr, uLong uncomprLen)); + +/* =========================================================================== + * Test compress() and uncompress() + */ +void test_compress(compr, comprLen, uncompr, uncomprLen) + Byte *compr, *uncompr; + uLong comprLen, uncomprLen; +{ + int err; + uLong len = (uLong)strlen(hello)+1; + + err = compress(compr, &comprLen, (const Bytef*)hello, len); + CHECK_ERR(err, "compress"); + + strcpy((char*)uncompr, "garbage"); + + err = uncompress(uncompr, &uncomprLen, compr, comprLen); + CHECK_ERR(err, "uncompress"); + + if (strcmp((char*)uncompr, hello)) { + fprintf(stderr, "bad uncompress\n"); + exit(1); + } else { + printf("uncompress(): %s\n", (char *)uncompr); + } +} + +/* =========================================================================== + * Test read/write of .gz files + */ +void test_gzio(fname, uncompr, uncomprLen) + const char *fname; /* compressed file name */ + Byte *uncompr; + uLong uncomprLen; +{ +#ifdef NO_GZCOMPRESS + fprintf(stderr, "NO_GZCOMPRESS -- gz* functions cannot compress\n"); +#else + int err; + int len = (int)strlen(hello)+1; + gzFile file; + z_off_t pos; + + file = gzopen(fname, "wb"); + if (file == NULL) { + fprintf(stderr, "gzopen error\n"); + exit(1); + } + gzputc(file, 'h'); + if (gzputs(file, "ello") != 4) { + fprintf(stderr, "gzputs err: %s\n", gzerror(file, &err)); + exit(1); + } + if (gzprintf(file, ", %s!", "hello") != 8) { + fprintf(stderr, "gzprintf err: %s\n", gzerror(file, &err)); + exit(1); + } + gzseek(file, 1L, SEEK_CUR); /* add one zero byte */ + gzclose(file); + + file = gzopen(fname, "rb"); + if (file == NULL) { + fprintf(stderr, "gzopen error\n"); + exit(1); + } + strcpy((char*)uncompr, "garbage"); + + if (gzread(file, uncompr, (unsigned)uncomprLen) != len) { + fprintf(stderr, "gzread err: %s\n", gzerror(file, &err)); + exit(1); + } + if (strcmp((char*)uncompr, hello)) { + fprintf(stderr, "bad gzread: %s\n", (char*)uncompr); + exit(1); + } else { + printf("gzread(): %s\n", (char*)uncompr); + } + + pos = gzseek(file, -8L, SEEK_CUR); + if (pos != 6 || gztell(file) != pos) { + fprintf(stderr, "gzseek error, pos=%ld, gztell=%ld\n", + (long)pos, (long)gztell(file)); + exit(1); + } + + if (gzgetc(file) != ' ') { + fprintf(stderr, "gzgetc error\n"); + exit(1); + } + + if (gzungetc(' ', file) != ' ') { + fprintf(stderr, "gzungetc error\n"); + exit(1); + } + + gzgets(file, (char*)uncompr, (int)uncomprLen); + if (strlen((char*)uncompr) != 7) { /* " hello!" */ + fprintf(stderr, "gzgets err after gzseek: %s\n", gzerror(file, &err)); + exit(1); + } + if (strcmp((char*)uncompr, hello + 6)) { + fprintf(stderr, "bad gzgets after gzseek\n"); + exit(1); + } else { + printf("gzgets() after gzseek: %s\n", (char*)uncompr); + } + + gzclose(file); +#endif +} + +#endif /* Z_SOLO */ + +/* =========================================================================== + * Test deflate() with small buffers + */ +void test_deflate(compr, comprLen) + Byte *compr; + uLong comprLen; +{ + z_stream c_stream; /* compression stream */ + int err; + uLong len = (uLong)strlen(hello)+1; + + c_stream.zalloc = zalloc; + c_stream.zfree = zfree; + c_stream.opaque = (voidpf)0; + + err = deflateInit(&c_stream, Z_DEFAULT_COMPRESSION); + CHECK_ERR(err, "deflateInit"); + + c_stream.next_in = (z_const unsigned char *)hello; + c_stream.next_out = compr; + + while (c_stream.total_in != len && c_stream.total_out < comprLen) { + c_stream.avail_in = c_stream.avail_out = 1; /* force small buffers */ + err = deflate(&c_stream, Z_NO_FLUSH); + CHECK_ERR(err, "deflate"); + } + /* Finish the stream, still forcing small buffers: */ + for (;;) { + c_stream.avail_out = 1; + err = deflate(&c_stream, Z_FINISH); + if (err == Z_STREAM_END) break; + CHECK_ERR(err, "deflate"); + } + + err = deflateEnd(&c_stream); + CHECK_ERR(err, "deflateEnd"); +} + +/* =========================================================================== + * Test inflate() with small buffers + */ +void test_inflate(compr, comprLen, uncompr, uncomprLen) + Byte *compr, *uncompr; + uLong comprLen, uncomprLen; +{ + int err; + z_stream d_stream; /* decompression stream */ + + strcpy((char*)uncompr, "garbage"); + + d_stream.zalloc = zalloc; + d_stream.zfree = zfree; + d_stream.opaque = (voidpf)0; + + d_stream.next_in = compr; + d_stream.avail_in = 0; + d_stream.next_out = uncompr; + + err = inflateInit(&d_stream); + CHECK_ERR(err, "inflateInit"); + + while (d_stream.total_out < uncomprLen && d_stream.total_in < comprLen) { + d_stream.avail_in = d_stream.avail_out = 1; /* force small buffers */ + err = inflate(&d_stream, Z_NO_FLUSH); + if (err == Z_STREAM_END) break; + CHECK_ERR(err, "inflate"); + } + + err = inflateEnd(&d_stream); + CHECK_ERR(err, "inflateEnd"); + + if (strcmp((char*)uncompr, hello)) { + fprintf(stderr, "bad inflate\n"); + exit(1); + } else { + printf("inflate(): %s\n", (char *)uncompr); + } +} + +/* =========================================================================== + * Test deflate() with large buffers and dynamic change of compression level + */ +void test_large_deflate(compr, comprLen, uncompr, uncomprLen) + Byte *compr, *uncompr; + uLong comprLen, uncomprLen; +{ + z_stream c_stream; /* compression stream */ + int err; + + c_stream.zalloc = zalloc; + c_stream.zfree = zfree; + c_stream.opaque = (voidpf)0; + + err = deflateInit(&c_stream, Z_BEST_SPEED); + CHECK_ERR(err, "deflateInit"); + + c_stream.next_out = compr; + c_stream.avail_out = (uInt)comprLen; + + /* At this point, uncompr is still mostly zeroes, so it should compress + * very well: + */ + c_stream.next_in = uncompr; + c_stream.avail_in = (uInt)uncomprLen; + err = deflate(&c_stream, Z_NO_FLUSH); + CHECK_ERR(err, "deflate"); + if (c_stream.avail_in != 0) { + fprintf(stderr, "deflate not greedy\n"); + exit(1); + } + + /* Feed in already compressed data and switch to no compression: */ + deflateParams(&c_stream, Z_NO_COMPRESSION, Z_DEFAULT_STRATEGY); + c_stream.next_in = compr; + c_stream.avail_in = (uInt)comprLen/2; + err = deflate(&c_stream, Z_NO_FLUSH); + CHECK_ERR(err, "deflate"); + + /* Switch back to compressing mode: */ + deflateParams(&c_stream, Z_BEST_COMPRESSION, Z_FILTERED); + c_stream.next_in = uncompr; + c_stream.avail_in = (uInt)uncomprLen; + err = deflate(&c_stream, Z_NO_FLUSH); + CHECK_ERR(err, "deflate"); + + err = deflate(&c_stream, Z_FINISH); + if (err != Z_STREAM_END) { + fprintf(stderr, "deflate should report Z_STREAM_END\n"); + exit(1); + } + err = deflateEnd(&c_stream); + CHECK_ERR(err, "deflateEnd"); +} + +/* =========================================================================== + * Test inflate() with large buffers + */ +void test_large_inflate(compr, comprLen, uncompr, uncomprLen) + Byte *compr, *uncompr; + uLong comprLen, uncomprLen; +{ + int err; + z_stream d_stream; /* decompression stream */ + + strcpy((char*)uncompr, "garbage"); + + d_stream.zalloc = zalloc; + d_stream.zfree = zfree; + d_stream.opaque = (voidpf)0; + + d_stream.next_in = compr; + d_stream.avail_in = (uInt)comprLen; + + err = inflateInit(&d_stream); + CHECK_ERR(err, "inflateInit"); + + for (;;) { + d_stream.next_out = uncompr; /* discard the output */ + d_stream.avail_out = (uInt)uncomprLen; + err = inflate(&d_stream, Z_NO_FLUSH); + if (err == Z_STREAM_END) break; + CHECK_ERR(err, "large inflate"); + } + + err = inflateEnd(&d_stream); + CHECK_ERR(err, "inflateEnd"); + + if (d_stream.total_out != 2*uncomprLen + comprLen/2) { + fprintf(stderr, "bad large inflate: %ld\n", d_stream.total_out); + exit(1); + } else { + printf("large_inflate(): OK\n"); + } +} + +/* =========================================================================== + * Test deflate() with full flush + */ +void test_flush(compr, comprLen) + Byte *compr; + uLong *comprLen; +{ + z_stream c_stream; /* compression stream */ + int err; + uInt len = (uInt)strlen(hello)+1; + + c_stream.zalloc = zalloc; + c_stream.zfree = zfree; + c_stream.opaque = (voidpf)0; + + err = deflateInit(&c_stream, Z_DEFAULT_COMPRESSION); + CHECK_ERR(err, "deflateInit"); + + c_stream.next_in = (z_const unsigned char *)hello; + c_stream.next_out = compr; + c_stream.avail_in = 3; + c_stream.avail_out = (uInt)*comprLen; + err = deflate(&c_stream, Z_FULL_FLUSH); + CHECK_ERR(err, "deflate"); + + compr[3]++; /* force an error in first compressed block */ + c_stream.avail_in = len - 3; + + err = deflate(&c_stream, Z_FINISH); + if (err != Z_STREAM_END) { + CHECK_ERR(err, "deflate"); + } + err = deflateEnd(&c_stream); + CHECK_ERR(err, "deflateEnd"); + + *comprLen = c_stream.total_out; +} + +/* =========================================================================== + * Test inflateSync() + */ +void test_sync(compr, comprLen, uncompr, uncomprLen) + Byte *compr, *uncompr; + uLong comprLen, uncomprLen; +{ + int err; + z_stream d_stream; /* decompression stream */ + + strcpy((char*)uncompr, "garbage"); + + d_stream.zalloc = zalloc; + d_stream.zfree = zfree; + d_stream.opaque = (voidpf)0; + + d_stream.next_in = compr; + d_stream.avail_in = 2; /* just read the zlib header */ + + err = inflateInit(&d_stream); + CHECK_ERR(err, "inflateInit"); + + d_stream.next_out = uncompr; + d_stream.avail_out = (uInt)uncomprLen; + + inflate(&d_stream, Z_NO_FLUSH); + CHECK_ERR(err, "inflate"); + + d_stream.avail_in = (uInt)comprLen-2; /* read all compressed data */ + err = inflateSync(&d_stream); /* but skip the damaged part */ + CHECK_ERR(err, "inflateSync"); + + err = inflate(&d_stream, Z_FINISH); + if (err != Z_DATA_ERROR) { + fprintf(stderr, "inflate should report DATA_ERROR\n"); + /* Because of incorrect adler32 */ + exit(1); + } + err = inflateEnd(&d_stream); + CHECK_ERR(err, "inflateEnd"); + + printf("after inflateSync(): hel%s\n", (char *)uncompr); +} + +/* =========================================================================== + * Test deflate() with preset dictionary + */ +void test_dict_deflate(compr, comprLen) + Byte *compr; + uLong comprLen; +{ + z_stream c_stream; /* compression stream */ + int err; + + c_stream.zalloc = zalloc; + c_stream.zfree = zfree; + c_stream.opaque = (voidpf)0; + + err = deflateInit(&c_stream, Z_BEST_COMPRESSION); + CHECK_ERR(err, "deflateInit"); + + err = deflateSetDictionary(&c_stream, + (const Bytef*)dictionary, (int)sizeof(dictionary)); + CHECK_ERR(err, "deflateSetDictionary"); + + dictId = c_stream.adler; + c_stream.next_out = compr; + c_stream.avail_out = (uInt)comprLen; + + c_stream.next_in = (z_const unsigned char *)hello; + c_stream.avail_in = (uInt)strlen(hello)+1; + + err = deflate(&c_stream, Z_FINISH); + if (err != Z_STREAM_END) { + fprintf(stderr, "deflate should report Z_STREAM_END\n"); + exit(1); + } + err = deflateEnd(&c_stream); + CHECK_ERR(err, "deflateEnd"); +} + +/* =========================================================================== + * Test inflate() with a preset dictionary + */ +void test_dict_inflate(compr, comprLen, uncompr, uncomprLen) + Byte *compr, *uncompr; + uLong comprLen, uncomprLen; +{ + int err; + z_stream d_stream; /* decompression stream */ + + strcpy((char*)uncompr, "garbage"); + + d_stream.zalloc = zalloc; + d_stream.zfree = zfree; + d_stream.opaque = (voidpf)0; + + d_stream.next_in = compr; + d_stream.avail_in = (uInt)comprLen; + + err = inflateInit(&d_stream); + CHECK_ERR(err, "inflateInit"); + + d_stream.next_out = uncompr; + d_stream.avail_out = (uInt)uncomprLen; + + for (;;) { + err = inflate(&d_stream, Z_NO_FLUSH); + if (err == Z_STREAM_END) break; + if (err == Z_NEED_DICT) { + if (d_stream.adler != dictId) { + fprintf(stderr, "unexpected dictionary"); + exit(1); + } + err = inflateSetDictionary(&d_stream, (const Bytef*)dictionary, + (int)sizeof(dictionary)); + } + CHECK_ERR(err, "inflate with dict"); + } + + err = inflateEnd(&d_stream); + CHECK_ERR(err, "inflateEnd"); + + if (strcmp((char*)uncompr, hello)) { + fprintf(stderr, "bad inflate with dict\n"); + exit(1); + } else { + printf("inflate with dictionary: %s\n", (char *)uncompr); + } +} + +/* =========================================================================== + * Usage: example [output.gz [input.gz]] + */ + +int main(argc, argv) + int argc; + char *argv[]; +{ + Byte *compr, *uncompr; + uLong comprLen = 10000*sizeof(int); /* don't overflow on MSDOS */ + uLong uncomprLen = comprLen; + static const char* myVersion = ZLIB_VERSION; + + if (zlibVersion()[0] != myVersion[0]) { + fprintf(stderr, "incompatible zlib version\n"); + exit(1); + + } else if (strcmp(zlibVersion(), ZLIB_VERSION) != 0) { + fprintf(stderr, "warning: different zlib version\n"); + } + + printf("zlib version %s = 0x%04x, compile flags = 0x%lx\n", + ZLIB_VERSION, ZLIB_VERNUM, zlibCompileFlags()); + + compr = (Byte*)calloc((uInt)comprLen, 1); + uncompr = (Byte*)calloc((uInt)uncomprLen, 1); + /* compr and uncompr are cleared to avoid reading uninitialized + * data and to ensure that uncompr compresses well. + */ + if (compr == Z_NULL || uncompr == Z_NULL) { + printf("out of memory\n"); + exit(1); + } + +#ifdef Z_SOLO + argc = strlen(argv[0]); +#else + test_compress(compr, comprLen, uncompr, uncomprLen); + + test_gzio((argc > 1 ? argv[1] : TESTFILE), + uncompr, uncomprLen); +#endif + + test_deflate(compr, comprLen); + test_inflate(compr, comprLen, uncompr, uncomprLen); + + test_large_deflate(compr, comprLen, uncompr, uncomprLen); + test_large_inflate(compr, comprLen, uncompr, uncomprLen); + + test_flush(compr, &comprLen); + test_sync(compr, comprLen, uncompr, uncomprLen); + comprLen = uncomprLen; + + test_dict_deflate(compr, comprLen); + test_dict_inflate(compr, comprLen, uncompr, uncomprLen); + + free(compr); + free(uncompr); + + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/test/infcover.c nodejs-0.11.15/deps/zlib/test/infcover.c --- nodejs-0.11.13/deps/zlib/test/infcover.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/test/infcover.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,671 @@ +/* infcover.c -- test zlib's inflate routines with full code coverage + * Copyright (C) 2011 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* to use, do: ./configure --cover && make cover */ + +#include +#include +#include +#include +#include "zlib.h" + +/* get definition of internal structure so we can mess with it (see pull()), + and so we can call inflate_trees() (see cover5()) */ +#define ZLIB_INTERNAL +#include "inftrees.h" +#include "inflate.h" + +#define local static + +/* -- memory tracking routines -- */ + +/* + These memory tracking routines are provided to zlib and track all of zlib's + allocations and deallocations, check for LIFO operations, keep a current + and high water mark of total bytes requested, optionally set a limit on the + total memory that can be allocated, and when done check for memory leaks. + + They are used as follows: + + z_stream strm; + mem_setup(&strm) initializes the memory tracking and sets the + zalloc, zfree, and opaque members of strm to use + memory tracking for all zlib operations on strm + mem_limit(&strm, limit) sets a limit on the total bytes requested -- a + request that exceeds this limit will result in an + allocation failure (returns NULL) -- setting the + limit to zero means no limit, which is the default + after mem_setup() + mem_used(&strm, "msg") prints to stderr "msg" and the total bytes used + mem_high(&strm, "msg") prints to stderr "msg" and the high water mark + mem_done(&strm, "msg") ends memory tracking, releases all allocations + for the tracking as well as leaked zlib blocks, if + any. If there was anything unusual, such as leaked + blocks, non-FIFO frees, or frees of addresses not + allocated, then "msg" and information about the + problem is printed to stderr. If everything is + normal, nothing is printed. mem_done resets the + strm members to Z_NULL to use the default memory + allocation routines on the next zlib initialization + using strm. + */ + +/* these items are strung together in a linked list, one for each allocation */ +struct mem_item { + void *ptr; /* pointer to allocated memory */ + size_t size; /* requested size of allocation */ + struct mem_item *next; /* pointer to next item in list, or NULL */ +}; + +/* this structure is at the root of the linked list, and tracks statistics */ +struct mem_zone { + struct mem_item *first; /* pointer to first item in list, or NULL */ + size_t total, highwater; /* total allocations, and largest total */ + size_t limit; /* memory allocation limit, or 0 if no limit */ + int notlifo, rogue; /* counts of non-LIFO frees and rogue frees */ +}; + +/* memory allocation routine to pass to zlib */ +local void *mem_alloc(void *mem, unsigned count, unsigned size) +{ + void *ptr; + struct mem_item *item; + struct mem_zone *zone = mem; + size_t len = count * (size_t)size; + + /* induced allocation failure */ + if (zone == NULL || (zone->limit && zone->total + len > zone->limit)) + return NULL; + + /* perform allocation using the standard library, fill memory with a + non-zero value to make sure that the code isn't depending on zeros */ + ptr = malloc(len); + if (ptr == NULL) + return NULL; + memset(ptr, 0xa5, len); + + /* create a new item for the list */ + item = malloc(sizeof(struct mem_item)); + if (item == NULL) { + free(ptr); + return NULL; + } + item->ptr = ptr; + item->size = len; + + /* insert item at the beginning of the list */ + item->next = zone->first; + zone->first = item; + + /* update the statistics */ + zone->total += item->size; + if (zone->total > zone->highwater) + zone->highwater = zone->total; + + /* return the allocated memory */ + return ptr; +} + +/* memory free routine to pass to zlib */ +local void mem_free(void *mem, void *ptr) +{ + struct mem_item *item, *next; + struct mem_zone *zone = mem; + + /* if no zone, just do a free */ + if (zone == NULL) { + free(ptr); + return; + } + + /* point next to the item that matches ptr, or NULL if not found -- remove + the item from the linked list if found */ + next = zone->first; + if (next) { + if (next->ptr == ptr) + zone->first = next->next; /* first one is it, remove from list */ + else { + do { /* search the linked list */ + item = next; + next = item->next; + } while (next != NULL && next->ptr != ptr); + if (next) { /* if found, remove from linked list */ + item->next = next->next; + zone->notlifo++; /* not a LIFO free */ + } + + } + } + + /* if found, update the statistics and free the item */ + if (next) { + zone->total -= next->size; + free(next); + } + + /* if not found, update the rogue count */ + else + zone->rogue++; + + /* in any case, do the requested free with the standard library function */ + free(ptr); +} + +/* set up a controlled memory allocation space for monitoring, set the stream + parameters to the controlled routines, with opaque pointing to the space */ +local void mem_setup(z_stream *strm) +{ + struct mem_zone *zone; + + zone = malloc(sizeof(struct mem_zone)); + assert(zone != NULL); + zone->first = NULL; + zone->total = 0; + zone->highwater = 0; + zone->limit = 0; + zone->notlifo = 0; + zone->rogue = 0; + strm->opaque = zone; + strm->zalloc = mem_alloc; + strm->zfree = mem_free; +} + +/* set a limit on the total memory allocation, or 0 to remove the limit */ +local void mem_limit(z_stream *strm, size_t limit) +{ + struct mem_zone *zone = strm->opaque; + + zone->limit = limit; +} + +/* show the current total requested allocations in bytes */ +local void mem_used(z_stream *strm, char *prefix) +{ + struct mem_zone *zone = strm->opaque; + + fprintf(stderr, "%s: %lu allocated\n", prefix, zone->total); +} + +/* show the high water allocation in bytes */ +local void mem_high(z_stream *strm, char *prefix) +{ + struct mem_zone *zone = strm->opaque; + + fprintf(stderr, "%s: %lu high water mark\n", prefix, zone->highwater); +} + +/* release the memory allocation zone -- if there are any surprises, notify */ +local void mem_done(z_stream *strm, char *prefix) +{ + int count = 0; + struct mem_item *item, *next; + struct mem_zone *zone = strm->opaque; + + /* show high water mark */ + mem_high(strm, prefix); + + /* free leftover allocations and item structures, if any */ + item = zone->first; + while (item != NULL) { + free(item->ptr); + next = item->next; + free(item); + item = next; + count++; + } + + /* issue alerts about anything unexpected */ + if (count || zone->total) + fprintf(stderr, "** %s: %lu bytes in %d blocks not freed\n", + prefix, zone->total, count); + if (zone->notlifo) + fprintf(stderr, "** %s: %d frees not LIFO\n", prefix, zone->notlifo); + if (zone->rogue) + fprintf(stderr, "** %s: %d frees not recognized\n", + prefix, zone->rogue); + + /* free the zone and delete from the stream */ + free(zone); + strm->opaque = Z_NULL; + strm->zalloc = Z_NULL; + strm->zfree = Z_NULL; +} + +/* -- inflate test routines -- */ + +/* Decode a hexadecimal string, set *len to length, in[] to the bytes. This + decodes liberally, in that hex digits can be adjacent, in which case two in + a row writes a byte. Or they can delimited by any non-hex character, where + the delimiters are ignored except when a single hex digit is followed by a + delimiter in which case that single digit writes a byte. The returned + data is allocated and must eventually be freed. NULL is returned if out of + memory. If the length is not needed, then len can be NULL. */ +local unsigned char *h2b(const char *hex, unsigned *len) +{ + unsigned char *in; + unsigned next, val; + + in = malloc((strlen(hex) + 1) >> 1); + if (in == NULL) + return NULL; + next = 0; + val = 1; + do { + if (*hex >= '0' && *hex <= '9') + val = (val << 4) + *hex - '0'; + else if (*hex >= 'A' && *hex <= 'F') + val = (val << 4) + *hex - 'A' + 10; + else if (*hex >= 'a' && *hex <= 'f') + val = (val << 4) + *hex - 'a' + 10; + else if (val != 1 && val < 32) /* one digit followed by delimiter */ + val += 240; /* make it look like two digits */ + if (val > 255) { /* have two digits */ + in[next++] = val & 0xff; /* save the decoded byte */ + val = 1; /* start over */ + } + } while (*hex++); /* go through the loop with the terminating null */ + if (len != NULL) + *len = next; + in = reallocf(in, next); + return in; +} + +/* generic inflate() run, where hex is the hexadecimal input data, what is the + text to include in an error message, step is how much input data to feed + inflate() on each call, or zero to feed it all, win is the window bits + parameter to inflateInit2(), len is the size of the output buffer, and err + is the error code expected from the first inflate() call (the second + inflate() call is expected to return Z_STREAM_END). If win is 47, then + header information is collected with inflateGetHeader(). If a zlib stream + is looking for a dictionary, then an empty dictionary is provided. + inflate() is run until all of the input data is consumed. */ +local void inf(char *hex, char *what, unsigned step, int win, unsigned len, + int err) +{ + int ret; + unsigned have; + unsigned char *in, *out; + z_stream strm, copy; + gz_header head; + + mem_setup(&strm); + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit2(&strm, win); + if (ret != Z_OK) { + mem_done(&strm, what); + return; + } + out = malloc(len); assert(out != NULL); + if (win == 47) { + head.extra = out; + head.extra_max = len; + head.name = out; + head.name_max = len; + head.comment = out; + head.comm_max = len; + ret = inflateGetHeader(&strm, &head); assert(ret == Z_OK); + } + in = h2b(hex, &have); assert(in != NULL); + if (step == 0 || step > have) + step = have; + strm.avail_in = step; + have -= step; + strm.next_in = in; + do { + strm.avail_out = len; + strm.next_out = out; + ret = inflate(&strm, Z_NO_FLUSH); assert(err == 9 || ret == err); + if (ret != Z_OK && ret != Z_BUF_ERROR && ret != Z_NEED_DICT) + break; + if (ret == Z_NEED_DICT) { + ret = inflateSetDictionary(&strm, in, 1); + assert(ret == Z_DATA_ERROR); + mem_limit(&strm, 1); + ret = inflateSetDictionary(&strm, out, 0); + assert(ret == Z_MEM_ERROR); + mem_limit(&strm, 0); + ((struct inflate_state *)strm.state)->mode = DICT; + ret = inflateSetDictionary(&strm, out, 0); + assert(ret == Z_OK); + ret = inflate(&strm, Z_NO_FLUSH); assert(ret == Z_BUF_ERROR); + } + ret = inflateCopy(©, &strm); assert(ret == Z_OK); + ret = inflateEnd(©); assert(ret == Z_OK); + err = 9; /* don't care next time around */ + have += strm.avail_in; + strm.avail_in = step > have ? have : step; + have -= strm.avail_in; + } while (strm.avail_in); + free(in); + free(out); + ret = inflateReset2(&strm, -8); assert(ret == Z_OK); + ret = inflateEnd(&strm); assert(ret == Z_OK); + mem_done(&strm, what); +} + +/* cover all of the lines in inflate.c up to inflate() */ +local void cover_support(void) +{ + int ret; + z_stream strm; + + mem_setup(&strm); + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit(&strm); assert(ret == Z_OK); + mem_used(&strm, "inflate init"); + ret = inflatePrime(&strm, 5, 31); assert(ret == Z_OK); + ret = inflatePrime(&strm, -1, 0); assert(ret == Z_OK); + ret = inflateSetDictionary(&strm, Z_NULL, 0); + assert(ret == Z_STREAM_ERROR); + ret = inflateEnd(&strm); assert(ret == Z_OK); + mem_done(&strm, "prime"); + + inf("63 0", "force window allocation", 0, -15, 1, Z_OK); + inf("63 18 5", "force window replacement", 0, -8, 259, Z_OK); + inf("63 18 68 30 d0 0 0", "force split window update", 4, -8, 259, Z_OK); + inf("3 0", "use fixed blocks", 0, -15, 1, Z_STREAM_END); + inf("", "bad window size", 0, 1, 0, Z_STREAM_ERROR); + + mem_setup(&strm); + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit_(&strm, ZLIB_VERSION - 1, (int)sizeof(z_stream)); + assert(ret == Z_VERSION_ERROR); + mem_done(&strm, "wrong version"); + + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit(&strm); assert(ret == Z_OK); + ret = inflateEnd(&strm); assert(ret == Z_OK); + fputs("inflate built-in memory routines\n", stderr); +} + +/* cover all inflate() header and trailer cases and code after inflate() */ +local void cover_wrap(void) +{ + int ret; + z_stream strm, copy; + unsigned char dict[257]; + + ret = inflate(Z_NULL, 0); assert(ret == Z_STREAM_ERROR); + ret = inflateEnd(Z_NULL); assert(ret == Z_STREAM_ERROR); + ret = inflateCopy(Z_NULL, Z_NULL); assert(ret == Z_STREAM_ERROR); + fputs("inflate bad parameters\n", stderr); + + inf("1f 8b 0 0", "bad gzip method", 0, 31, 0, Z_DATA_ERROR); + inf("1f 8b 8 80", "bad gzip flags", 0, 31, 0, Z_DATA_ERROR); + inf("77 85", "bad zlib method", 0, 15, 0, Z_DATA_ERROR); + inf("8 99", "set window size from header", 0, 0, 0, Z_OK); + inf("78 9c", "bad zlib window size", 0, 8, 0, Z_DATA_ERROR); + inf("78 9c 63 0 0 0 1 0 1", "check adler32", 0, 15, 1, Z_STREAM_END); + inf("1f 8b 8 1e 0 0 0 0 0 0 1 0 0 0 0 0 0", "bad header crc", 0, 47, 1, + Z_DATA_ERROR); + inf("1f 8b 8 2 0 0 0 0 0 0 1d 26 3 0 0 0 0 0 0 0 0 0", "check gzip length", + 0, 47, 0, Z_STREAM_END); + inf("78 90", "bad zlib header check", 0, 47, 0, Z_DATA_ERROR); + inf("8 b8 0 0 0 1", "need dictionary", 0, 8, 0, Z_NEED_DICT); + inf("78 9c 63 0", "compute adler32", 0, 15, 1, Z_OK); + + mem_setup(&strm); + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit2(&strm, -8); + strm.avail_in = 2; + strm.next_in = (void *)"\x63"; + strm.avail_out = 1; + strm.next_out = (void *)&ret; + mem_limit(&strm, 1); + ret = inflate(&strm, Z_NO_FLUSH); assert(ret == Z_MEM_ERROR); + ret = inflate(&strm, Z_NO_FLUSH); assert(ret == Z_MEM_ERROR); + mem_limit(&strm, 0); + memset(dict, 0, 257); + ret = inflateSetDictionary(&strm, dict, 257); + assert(ret == Z_OK); + mem_limit(&strm, (sizeof(struct inflate_state) << 1) + 256); + ret = inflatePrime(&strm, 16, 0); assert(ret == Z_OK); + strm.avail_in = 2; + strm.next_in = (void *)"\x80"; + ret = inflateSync(&strm); assert(ret == Z_DATA_ERROR); + ret = inflate(&strm, Z_NO_FLUSH); assert(ret == Z_STREAM_ERROR); + strm.avail_in = 4; + strm.next_in = (void *)"\0\0\xff\xff"; + ret = inflateSync(&strm); assert(ret == Z_OK); + (void)inflateSyncPoint(&strm); + ret = inflateCopy(©, &strm); assert(ret == Z_MEM_ERROR); + mem_limit(&strm, 0); + ret = inflateUndermine(&strm, 1); assert(ret == Z_DATA_ERROR); + (void)inflateMark(&strm); + ret = inflateEnd(&strm); assert(ret == Z_OK); + mem_done(&strm, "miscellaneous, force memory errors"); +} + +/* input and output functions for inflateBack() */ +local unsigned pull(void *desc, unsigned char **buf) +{ + static unsigned int next = 0; + static unsigned char dat[] = {0x63, 0, 2, 0}; + struct inflate_state *state; + + if (desc == Z_NULL) { + next = 0; + return 0; /* no input (already provided at next_in) */ + } + state = (void *)((z_stream *)desc)->state; + if (state != Z_NULL) + state->mode = SYNC; /* force an otherwise impossible situation */ + return next < sizeof(dat) ? (*buf = dat + next++, 1) : 0; +} + +local int push(void *desc, unsigned char *buf, unsigned len) +{ + buf += len; + return desc != Z_NULL; /* force error if desc not null */ +} + +/* cover inflateBack() up to common deflate data cases and after those */ +local void cover_back(void) +{ + int ret; + z_stream strm; + unsigned char win[32768]; + + ret = inflateBackInit_(Z_NULL, 0, win, 0, 0); + assert(ret == Z_VERSION_ERROR); + ret = inflateBackInit(Z_NULL, 0, win); assert(ret == Z_STREAM_ERROR); + ret = inflateBack(Z_NULL, Z_NULL, Z_NULL, Z_NULL, Z_NULL); + assert(ret == Z_STREAM_ERROR); + ret = inflateBackEnd(Z_NULL); assert(ret == Z_STREAM_ERROR); + fputs("inflateBack bad parameters\n", stderr); + + mem_setup(&strm); + ret = inflateBackInit(&strm, 15, win); assert(ret == Z_OK); + strm.avail_in = 2; + strm.next_in = (void *)"\x03"; + ret = inflateBack(&strm, pull, Z_NULL, push, Z_NULL); + assert(ret == Z_STREAM_END); + /* force output error */ + strm.avail_in = 3; + strm.next_in = (void *)"\x63\x00"; + ret = inflateBack(&strm, pull, Z_NULL, push, &strm); + assert(ret == Z_BUF_ERROR); + /* force mode error by mucking with state */ + ret = inflateBack(&strm, pull, &strm, push, Z_NULL); + assert(ret == Z_STREAM_ERROR); + ret = inflateBackEnd(&strm); assert(ret == Z_OK); + mem_done(&strm, "inflateBack bad state"); + + ret = inflateBackInit(&strm, 15, win); assert(ret == Z_OK); + ret = inflateBackEnd(&strm); assert(ret == Z_OK); + fputs("inflateBack built-in memory routines\n", stderr); +} + +/* do a raw inflate of data in hexadecimal with both inflate and inflateBack */ +local int try(char *hex, char *id, int err) +{ + int ret; + unsigned len, size; + unsigned char *in, *out, *win; + char *prefix; + z_stream strm; + + /* convert to hex */ + in = h2b(hex, &len); + assert(in != NULL); + + /* allocate work areas */ + size = len << 3; + out = malloc(size); + assert(out != NULL); + win = malloc(32768); + assert(win != NULL); + prefix = malloc(strlen(id) + 6); + assert(prefix != NULL); + + /* first with inflate */ + strcpy(prefix, id); + strcat(prefix, "-late"); + mem_setup(&strm); + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit2(&strm, err < 0 ? 47 : -15); + assert(ret == Z_OK); + strm.avail_in = len; + strm.next_in = in; + do { + strm.avail_out = size; + strm.next_out = out; + ret = inflate(&strm, Z_TREES); + assert(ret != Z_STREAM_ERROR && ret != Z_MEM_ERROR); + if (ret == Z_DATA_ERROR || ret == Z_NEED_DICT) + break; + } while (strm.avail_in || strm.avail_out == 0); + if (err) { + assert(ret == Z_DATA_ERROR); + assert(strcmp(id, strm.msg) == 0); + } + inflateEnd(&strm); + mem_done(&strm, prefix); + + /* then with inflateBack */ + if (err >= 0) { + strcpy(prefix, id); + strcat(prefix, "-back"); + mem_setup(&strm); + ret = inflateBackInit(&strm, 15, win); + assert(ret == Z_OK); + strm.avail_in = len; + strm.next_in = in; + ret = inflateBack(&strm, pull, Z_NULL, push, Z_NULL); + assert(ret != Z_STREAM_ERROR); + if (err) { + assert(ret == Z_DATA_ERROR); + assert(strcmp(id, strm.msg) == 0); + } + inflateBackEnd(&strm); + mem_done(&strm, prefix); + } + + /* clean up */ + free(prefix); + free(win); + free(out); + free(in); + return ret; +} + +/* cover deflate data cases in both inflate() and inflateBack() */ +local void cover_inflate(void) +{ + try("0 0 0 0 0", "invalid stored block lengths", 1); + try("3 0", "fixed", 0); + try("6", "invalid block type", 1); + try("1 1 0 fe ff 0", "stored", 0); + try("fc 0 0", "too many length or distance symbols", 1); + try("4 0 fe ff", "invalid code lengths set", 1); + try("4 0 24 49 0", "invalid bit length repeat", 1); + try("4 0 24 e9 ff ff", "invalid bit length repeat", 1); + try("4 0 24 e9 ff 6d", "invalid code -- missing end-of-block", 1); + try("4 80 49 92 24 49 92 24 71 ff ff 93 11 0", + "invalid literal/lengths set", 1); + try("4 80 49 92 24 49 92 24 f b4 ff ff c3 84", "invalid distances set", 1); + try("4 c0 81 8 0 0 0 0 20 7f eb b 0 0", "invalid literal/length code", 1); + try("2 7e ff ff", "invalid distance code", 1); + try("c c0 81 0 0 0 0 0 90 ff 6b 4 0", "invalid distance too far back", 1); + + /* also trailer mismatch just in inflate() */ + try("1f 8b 8 0 0 0 0 0 0 0 3 0 0 0 0 1", "incorrect data check", -1); + try("1f 8b 8 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 1", + "incorrect length check", -1); + try("5 c0 21 d 0 0 0 80 b0 fe 6d 2f 91 6c", "pull 17", 0); + try("5 e0 81 91 24 cb b2 2c 49 e2 f 2e 8b 9a 47 56 9f fb fe ec d2 ff 1f", + "long code", 0); + try("ed c0 1 1 0 0 0 40 20 ff 57 1b 42 2c 4f", "length extra", 0); + try("ed cf c1 b1 2c 47 10 c4 30 fa 6f 35 1d 1 82 59 3d fb be 2e 2a fc f c", + "long distance and extra", 0); + try("ed c0 81 0 0 0 0 80 a0 fd a9 17 a9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 " + "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6", "window end", 0); + inf("2 8 20 80 0 3 0", "inflate_fast TYPE return", 0, -15, 258, + Z_STREAM_END); + inf("63 18 5 40 c 0", "window wrap", 3, -8, 300, Z_OK); +} + +/* cover remaining lines in inftrees.c */ +local void cover_trees(void) +{ + int ret; + unsigned bits; + unsigned short lens[16], work[16]; + code *next, table[ENOUGH_DISTS]; + + /* we need to call inflate_table() directly in order to manifest not- + enough errors, since zlib insures that enough is always enough */ + for (bits = 0; bits < 15; bits++) + lens[bits] = (unsigned short)(bits + 1); + lens[15] = 15; + next = table; + bits = 15; + ret = inflate_table(DISTS, lens, 16, &next, &bits, work); + assert(ret == 1); + next = table; + bits = 1; + ret = inflate_table(DISTS, lens, 16, &next, &bits, work); + assert(ret == 1); + fputs("inflate_table not enough errors\n", stderr); +} + +/* cover remaining inffast.c decoding and window copying */ +local void cover_fast(void) +{ + inf("e5 e0 81 ad 6d cb b2 2c c9 01 1e 59 63 ae 7d ee fb 4d fd b5 35 41 68" + " ff 7f 0f 0 0 0", "fast length extra bits", 0, -8, 258, Z_DATA_ERROR); + inf("25 fd 81 b5 6d 59 b6 6a 49 ea af 35 6 34 eb 8c b9 f6 b9 1e ef 67 49" + " 50 fe ff ff 3f 0 0", "fast distance extra bits", 0, -8, 258, + Z_DATA_ERROR); + inf("3 7e 0 0 0 0 0", "fast invalid distance code", 0, -8, 258, + Z_DATA_ERROR); + inf("1b 7 0 0 0 0 0", "fast invalid literal/length code", 0, -8, 258, + Z_DATA_ERROR); + inf("d c7 1 ae eb 38 c 4 41 a0 87 72 de df fb 1f b8 36 b1 38 5d ff ff 0", + "fast 2nd level codes and too far back", 0, -8, 258, Z_DATA_ERROR); + inf("63 18 5 8c 10 8 0 0 0 0", "very common case", 0, -8, 259, Z_OK); + inf("63 60 60 18 c9 0 8 18 18 18 26 c0 28 0 29 0 0 0", + "contiguous and wrap around window", 6, -8, 259, Z_OK); + inf("63 0 3 0 0 0 0 0", "copy direct from output", 0, -8, 259, + Z_STREAM_END); +} + +int main(void) +{ + fprintf(stderr, "%s\n", zlibVersion()); + cover_support(); + cover_wrap(); + cover_back(); + cover_inflate(); + cover_trees(); + cover_fast(); + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/test/minigzip.c nodejs-0.11.15/deps/zlib/test/minigzip.c --- nodejs-0.11.13/deps/zlib/test/minigzip.c 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/test/minigzip.c 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,651 @@ +/* minigzip.c -- simulate gzip using the zlib compression library + * Copyright (C) 1995-2006, 2010, 2011 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * minigzip is a minimal implementation of the gzip utility. This is + * only an example of using zlib and isn't meant to replace the + * full-featured gzip. No attempt is made to deal with file systems + * limiting names to 14 or 8+3 characters, etc... Error checking is + * very limited. So use minigzip only for testing; use gzip for the + * real thing. On MSDOS, use only on file names without extension + * or in pipe mode. + */ + +/* @(#) $Id$ */ + +#include "zlib.h" +#include + +#ifdef STDC +# include +# include +#endif + +#ifdef USE_MMAP +# include +# include +# include +#endif + +#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__) +# include +# include +# ifdef UNDER_CE +# include +# endif +# define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY) +#else +# define SET_BINARY_MODE(file) +#endif + +#ifdef _MSC_VER +# define snprintf _snprintf +#endif + +#ifdef VMS +# define unlink delete +# define GZ_SUFFIX "-gz" +#endif +#ifdef RISCOS +# define unlink remove +# define GZ_SUFFIX "-gz" +# define fileno(file) file->__file +#endif +#if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os +# include /* for fileno */ +#endif + +#if !defined(Z_HAVE_UNISTD_H) && !defined(_LARGEFILE64_SOURCE) +#ifndef WIN32 /* unlink already in stdio.h for WIN32 */ + extern int unlink OF((const char *)); +#endif +#endif + +#if defined(UNDER_CE) +# include +# define perror(s) pwinerror(s) + +/* Map the Windows error number in ERROR to a locale-dependent error + message string and return a pointer to it. Typically, the values + for ERROR come from GetLastError. + + The string pointed to shall not be modified by the application, + but may be overwritten by a subsequent call to strwinerror + + The strwinerror function does not change the current setting + of GetLastError. */ + +static char *strwinerror (error) + DWORD error; +{ + static char buf[1024]; + + wchar_t *msgbuf; + DWORD lasterr = GetLastError(); + DWORD chars = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM + | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, + error, + 0, /* Default language */ + (LPVOID)&msgbuf, + 0, + NULL); + if (chars != 0) { + /* If there is an \r\n appended, zap it. */ + if (chars >= 2 + && msgbuf[chars - 2] == '\r' && msgbuf[chars - 1] == '\n') { + chars -= 2; + msgbuf[chars] = 0; + } + + if (chars > sizeof (buf) - 1) { + chars = sizeof (buf) - 1; + msgbuf[chars] = 0; + } + + wcstombs(buf, msgbuf, chars + 1); + LocalFree(msgbuf); + } + else { + sprintf(buf, "unknown win32 error (%ld)", error); + } + + SetLastError(lasterr); + return buf; +} + +static void pwinerror (s) + const char *s; +{ + if (s && *s) + fprintf(stderr, "%s: %s\n", s, strwinerror(GetLastError ())); + else + fprintf(stderr, "%s\n", strwinerror(GetLastError ())); +} + +#endif /* UNDER_CE */ + +#ifndef GZ_SUFFIX +# define GZ_SUFFIX ".gz" +#endif +#define SUFFIX_LEN (sizeof(GZ_SUFFIX)-1) + +#define BUFLEN 16384 +#define MAX_NAME_LEN 1024 + +#ifdef MAXSEG_64K +# define local static + /* Needed for systems with limitation on stack size. */ +#else +# define local +#endif + +#ifdef Z_SOLO +/* for Z_SOLO, create simplified gz* functions using deflate and inflate */ + +#if defined(Z_HAVE_UNISTD_H) || defined(Z_LARGE) +# include /* for unlink() */ +#endif + +void *myalloc OF((void *, unsigned, unsigned)); +void myfree OF((void *, void *)); + +void *myalloc(q, n, m) + void *q; + unsigned n, m; +{ + q = Z_NULL; + return calloc(n, m); +} + +void myfree(q, p) + void *q, *p; +{ + q = Z_NULL; + free(p); +} + +typedef struct gzFile_s { + FILE *file; + int write; + int err; + char *msg; + z_stream strm; +} *gzFile; + +gzFile gzopen OF((const char *, const char *)); +gzFile gzdopen OF((int, const char *)); +gzFile gz_open OF((const char *, int, const char *)); + +gzFile gzopen(path, mode) +const char *path; +const char *mode; +{ + return gz_open(path, -1, mode); +} + +gzFile gzdopen(fd, mode) +int fd; +const char *mode; +{ + return gz_open(NULL, fd, mode); +} + +gzFile gz_open(path, fd, mode) + const char *path; + int fd; + const char *mode; +{ + gzFile gz; + int ret; + + gz = malloc(sizeof(struct gzFile_s)); + if (gz == NULL) + return NULL; + gz->write = strchr(mode, 'w') != NULL; + gz->strm.zalloc = myalloc; + gz->strm.zfree = myfree; + gz->strm.opaque = Z_NULL; + if (gz->write) + ret = deflateInit2(&(gz->strm), -1, 8, 15 + 16, 8, 0); + else { + gz->strm.next_in = 0; + gz->strm.avail_in = Z_NULL; + ret = inflateInit2(&(gz->strm), 15 + 16); + } + if (ret != Z_OK) { + free(gz); + return NULL; + } + gz->file = path == NULL ? fdopen(fd, gz->write ? "wb" : "rb") : + fopen(path, gz->write ? "wb" : "rb"); + if (gz->file == NULL) { + gz->write ? deflateEnd(&(gz->strm)) : inflateEnd(&(gz->strm)); + free(gz); + return NULL; + } + gz->err = 0; + gz->msg = ""; + return gz; +} + +int gzwrite OF((gzFile, const void *, unsigned)); + +int gzwrite(gz, buf, len) + gzFile gz; + const void *buf; + unsigned len; +{ + z_stream *strm; + unsigned char out[BUFLEN]; + + if (gz == NULL || !gz->write) + return 0; + strm = &(gz->strm); + strm->next_in = (void *)buf; + strm->avail_in = len; + do { + strm->next_out = out; + strm->avail_out = BUFLEN; + (void)deflate(strm, Z_NO_FLUSH); + fwrite(out, 1, BUFLEN - strm->avail_out, gz->file); + } while (strm->avail_out == 0); + return len; +} + +int gzread OF((gzFile, void *, unsigned)); + +int gzread(gz, buf, len) + gzFile gz; + void *buf; + unsigned len; +{ + int ret; + unsigned got; + unsigned char in[1]; + z_stream *strm; + + if (gz == NULL || gz->write) + return 0; + if (gz->err) + return 0; + strm = &(gz->strm); + strm->next_out = (void *)buf; + strm->avail_out = len; + do { + got = fread(in, 1, 1, gz->file); + if (got == 0) + break; + strm->next_in = in; + strm->avail_in = 1; + ret = inflate(strm, Z_NO_FLUSH); + if (ret == Z_DATA_ERROR) { + gz->err = Z_DATA_ERROR; + gz->msg = strm->msg; + return 0; + } + if (ret == Z_STREAM_END) + inflateReset(strm); + } while (strm->avail_out); + return len - strm->avail_out; +} + +int gzclose OF((gzFile)); + +int gzclose(gz) + gzFile gz; +{ + z_stream *strm; + unsigned char out[BUFLEN]; + + if (gz == NULL) + return Z_STREAM_ERROR; + strm = &(gz->strm); + if (gz->write) { + strm->next_in = Z_NULL; + strm->avail_in = 0; + do { + strm->next_out = out; + strm->avail_out = BUFLEN; + (void)deflate(strm, Z_FINISH); + fwrite(out, 1, BUFLEN - strm->avail_out, gz->file); + } while (strm->avail_out == 0); + deflateEnd(strm); + } + else + inflateEnd(strm); + fclose(gz->file); + free(gz); + return Z_OK; +} + +const char *gzerror OF((gzFile, int *)); + +const char *gzerror(gz, err) + gzFile gz; + int *err; +{ + *err = gz->err; + return gz->msg; +} + +#endif + +char *prog; + +void error OF((const char *msg)); +void gz_compress OF((FILE *in, gzFile out)); +#ifdef USE_MMAP +int gz_compress_mmap OF((FILE *in, gzFile out)); +#endif +void gz_uncompress OF((gzFile in, FILE *out)); +void file_compress OF((char *file, char *mode)); +void file_uncompress OF((char *file)); +int main OF((int argc, char *argv[])); + +/* =========================================================================== + * Display error message and exit + */ +void error(msg) + const char *msg; +{ + fprintf(stderr, "%s: %s\n", prog, msg); + exit(1); +} + +/* =========================================================================== + * Compress input to output then close both files. + */ + +void gz_compress(in, out) + FILE *in; + gzFile out; +{ + local char buf[BUFLEN]; + int len; + int err; + +#ifdef USE_MMAP + /* Try first compressing with mmap. If mmap fails (minigzip used in a + * pipe), use the normal fread loop. + */ + if (gz_compress_mmap(in, out) == Z_OK) return; +#endif + for (;;) { + len = (int)fread(buf, 1, sizeof(buf), in); + if (ferror(in)) { + perror("fread"); + exit(1); + } + if (len == 0) break; + + if (gzwrite(out, buf, (unsigned)len) != len) error(gzerror(out, &err)); + } + fclose(in); + if (gzclose(out) != Z_OK) error("failed gzclose"); +} + +#ifdef USE_MMAP /* MMAP version, Miguel Albrecht */ + +/* Try compressing the input file at once using mmap. Return Z_OK if + * if success, Z_ERRNO otherwise. + */ +int gz_compress_mmap(in, out) + FILE *in; + gzFile out; +{ + int len; + int err; + int ifd = fileno(in); + caddr_t buf; /* mmap'ed buffer for the entire input file */ + off_t buf_len; /* length of the input file */ + struct stat sb; + + /* Determine the size of the file, needed for mmap: */ + if (fstat(ifd, &sb) < 0) return Z_ERRNO; + buf_len = sb.st_size; + if (buf_len <= 0) return Z_ERRNO; + + /* Now do the actual mmap: */ + buf = mmap((caddr_t) 0, buf_len, PROT_READ, MAP_SHARED, ifd, (off_t)0); + if (buf == (caddr_t)(-1)) return Z_ERRNO; + + /* Compress the whole file at once: */ + len = gzwrite(out, (char *)buf, (unsigned)buf_len); + + if (len != (int)buf_len) error(gzerror(out, &err)); + + munmap(buf, buf_len); + fclose(in); + if (gzclose(out) != Z_OK) error("failed gzclose"); + return Z_OK; +} +#endif /* USE_MMAP */ + +/* =========================================================================== + * Uncompress input to output then close both files. + */ +void gz_uncompress(in, out) + gzFile in; + FILE *out; +{ + local char buf[BUFLEN]; + int len; + int err; + + for (;;) { + len = gzread(in, buf, sizeof(buf)); + if (len < 0) error (gzerror(in, &err)); + if (len == 0) break; + + if ((int)fwrite(buf, 1, (unsigned)len, out) != len) { + error("failed fwrite"); + } + } + if (fclose(out)) error("failed fclose"); + + if (gzclose(in) != Z_OK) error("failed gzclose"); +} + + +/* =========================================================================== + * Compress the given file: create a corresponding .gz file and remove the + * original. + */ +void file_compress(file, mode) + char *file; + char *mode; +{ + local char outfile[MAX_NAME_LEN]; + FILE *in; + gzFile out; + + if (strlen(file) + strlen(GZ_SUFFIX) >= sizeof(outfile)) { + fprintf(stderr, "%s: filename too long\n", prog); + exit(1); + } + +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(outfile, sizeof(outfile), "%s%s", file, GZ_SUFFIX); +#else + strcpy(outfile, file); + strcat(outfile, GZ_SUFFIX); +#endif + + in = fopen(file, "rb"); + if (in == NULL) { + perror(file); + exit(1); + } + out = gzopen(outfile, mode); + if (out == NULL) { + fprintf(stderr, "%s: can't gzopen %s\n", prog, outfile); + exit(1); + } + gz_compress(in, out); + + unlink(file); +} + + +/* =========================================================================== + * Uncompress the given file and remove the original. + */ +void file_uncompress(file) + char *file; +{ + local char buf[MAX_NAME_LEN]; + char *infile, *outfile; + FILE *out; + gzFile in; + size_t len = strlen(file); + + if (len + strlen(GZ_SUFFIX) >= sizeof(buf)) { + fprintf(stderr, "%s: filename too long\n", prog); + exit(1); + } + +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(buf, sizeof(buf), "%s", file); +#else + strcpy(buf, file); +#endif + + if (len > SUFFIX_LEN && strcmp(file+len-SUFFIX_LEN, GZ_SUFFIX) == 0) { + infile = file; + outfile = buf; + outfile[len-3] = '\0'; + } else { + outfile = file; + infile = buf; +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(buf + len, sizeof(buf) - len, "%s", GZ_SUFFIX); +#else + strcat(infile, GZ_SUFFIX); +#endif + } + in = gzopen(infile, "rb"); + if (in == NULL) { + fprintf(stderr, "%s: can't gzopen %s\n", prog, infile); + exit(1); + } + out = fopen(outfile, "wb"); + if (out == NULL) { + perror(file); + exit(1); + } + + gz_uncompress(in, out); + + unlink(infile); +} + + +/* =========================================================================== + * Usage: minigzip [-c] [-d] [-f] [-h] [-r] [-1 to -9] [files...] + * -c : write to standard output + * -d : decompress + * -f : compress with Z_FILTERED + * -h : compress with Z_HUFFMAN_ONLY + * -r : compress with Z_RLE + * -1 to -9 : compression level + */ + +int main(argc, argv) + int argc; + char *argv[]; +{ + int copyout = 0; + int uncompr = 0; + gzFile file; + char *bname, outmode[20]; + +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(outmode, sizeof(outmode), "%s", "wb6 "); +#else + strcpy(outmode, "wb6 "); +#endif + + prog = argv[0]; + bname = strrchr(argv[0], '/'); + if (bname) + bname++; + else + bname = argv[0]; + argc--, argv++; + + if (!strcmp(bname, "gunzip")) + uncompr = 1; + else if (!strcmp(bname, "zcat")) + copyout = uncompr = 1; + + while (argc > 0) { + if (strcmp(*argv, "-c") == 0) + copyout = 1; + else if (strcmp(*argv, "-d") == 0) + uncompr = 1; + else if (strcmp(*argv, "-f") == 0) + outmode[3] = 'f'; + else if (strcmp(*argv, "-h") == 0) + outmode[3] = 'h'; + else if (strcmp(*argv, "-r") == 0) + outmode[3] = 'R'; + else if ((*argv)[0] == '-' && (*argv)[1] >= '1' && (*argv)[1] <= '9' && + (*argv)[2] == 0) + outmode[2] = (*argv)[1]; + else + break; + argc--, argv++; + } + if (outmode[3] == ' ') + outmode[3] = 0; + if (argc == 0) { + SET_BINARY_MODE(stdin); + SET_BINARY_MODE(stdout); + if (uncompr) { + file = gzdopen(fileno(stdin), "rb"); + if (file == NULL) error("can't gzdopen stdin"); + gz_uncompress(file, stdout); + } else { + file = gzdopen(fileno(stdout), outmode); + if (file == NULL) error("can't gzdopen stdout"); + gz_compress(stdin, file); + } + } else { + if (copyout) { + SET_BINARY_MODE(stdout); + } + do { + if (uncompr) { + if (copyout) { + file = gzopen(*argv, "rb"); + if (file == NULL) + fprintf(stderr, "%s: can't gzopen %s\n", prog, *argv); + else + gz_uncompress(file, stdout); + } else { + file_uncompress(*argv); + } + } else { + if (copyout) { + FILE * in = fopen(*argv, "rb"); + + if (in == NULL) { + perror(*argv); + } else { + file = gzdopen(fileno(stdout), outmode); + if (file == NULL) error("can't gzdopen stdout"); + + gz_compress(in, file); + } + + } else { + file_compress(*argv, outmode); + } + } + } while (argv++, --argc); + } + return 0; +} diff -Nru nodejs-0.11.13/deps/zlib/treebuild.xml nodejs-0.11.15/deps/zlib/treebuild.xml --- nodejs-0.11.13/deps/zlib/treebuild.xml 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/treebuild.xml 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,116 @@ + + + + zip compression library + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru nodejs-0.11.13/deps/zlib/trees.c nodejs-0.11.15/deps/zlib/trees.c --- nodejs-0.11.13/deps/zlib/trees.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/trees.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,6 @@ /* trees.c -- output deflated data using Huffman coding - * Copyright (C) 1995-2005 Jean-loup Gailly + * Copyright (C) 1995-2012 Jean-loup Gailly + * detect_data_type() function provided freely by Cosmin Truta, 2006 * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -29,7 +30,7 @@ * Addison-Wesley, 1983. ISBN 0-201-06672-6. */ -/* @(#) $Id: trees.c,v 3.6 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ +/* @(#) $Id$ */ /* #define GEN_TREES_H */ @@ -73,11 +74,6 @@ * probability, to avoid transmitting the lengths for unused bit length codes. */ -#define Buf_size (8 * 2*sizeof(char)) -/* Number of bits used within bi_buf. (bi_buf might be implemented on - * more than 16 bits on some systems.) - */ - /* =========================================================================== * Local data. These are initialized only once. */ @@ -150,9 +146,9 @@ local int build_bl_tree OF((deflate_state *s)); local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, int blcodes)); -local void compress_block OF((deflate_state *s, ct_data *ltree, - ct_data *dtree)); -local void set_data_type OF((deflate_state *s)); +local void compress_block OF((deflate_state *s, const ct_data *ltree, + const ct_data *dtree)); +local int detect_data_type OF((deflate_state *s)); local unsigned bi_reverse OF((unsigned value, int length)); local void bi_windup OF((deflate_state *s)); local void bi_flush OF((deflate_state *s)); @@ -203,12 +199,12 @@ * unused bits in value. */ if (s->bi_valid > (int)Buf_size - length) { - s->bi_buf |= (value << s->bi_valid); + s->bi_buf |= (ush)value << s->bi_valid; put_short(s, s->bi_buf); s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); s->bi_valid += length - Buf_size; } else { - s->bi_buf |= value << s->bi_valid; + s->bi_buf |= (ush)value << s->bi_valid; s->bi_valid += length; } } @@ -218,12 +214,12 @@ { int len = length;\ if (s->bi_valid > (int)Buf_size - len) {\ int val = value;\ - s->bi_buf |= (val << s->bi_valid);\ + s->bi_buf |= (ush)val << s->bi_valid;\ put_short(s, s->bi_buf);\ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ s->bi_valid += len - Buf_size;\ } else {\ - s->bi_buf |= (value) << s->bi_valid;\ + s->bi_buf |= (ush)(value) << s->bi_valid;\ s->bi_valid += len;\ }\ } @@ -250,11 +246,13 @@ if (static_init_done) return; /* For some embedded targets, global variables are not initialized: */ +#ifdef NO_INIT_GLOBAL_POINTERS static_l_desc.static_tree = static_ltree; static_l_desc.extra_bits = extra_lbits; static_d_desc.static_tree = static_dtree; static_d_desc.extra_bits = extra_dbits; static_bl_desc.extra_bits = extra_blbits; +#endif /* Initialize the mapping length (0..255) -> length code (0..28) */ length = 0; @@ -348,13 +346,14 @@ static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5)); } - fprintf(header, "const uch _dist_code[DIST_CODE_LEN] = {\n"); + fprintf(header, "const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {\n"); for (i = 0; i < DIST_CODE_LEN; i++) { fprintf(header, "%2u%s", _dist_code[i], SEPARATOR(i, DIST_CODE_LEN-1, 20)); } - fprintf(header, "const uch _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); + fprintf(header, + "const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) { fprintf(header, "%2u%s", _length_code[i], SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20)); @@ -379,7 +378,7 @@ /* =========================================================================== * Initialize the tree data structures for a new zlib stream. */ -void _tr_init(s) +void ZLIB_INTERNAL _tr_init(s) deflate_state *s; { tr_static_init(); @@ -395,7 +394,6 @@ s->bi_buf = 0; s->bi_valid = 0; - s->last_eob_len = 8; /* enough lookahead for inflate */ #ifdef DEBUG s->compressed_len = 0L; s->bits_sent = 0L; @@ -864,13 +862,13 @@ /* =========================================================================== * Send a stored block */ -void _tr_stored_block(s, buf, stored_len, eof) +void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last) deflate_state *s; charf *buf; /* input block */ ulg stored_len; /* length of input block */ - int eof; /* true if this is the last block for a file */ + int last; /* one if this is the last block for a file */ { - send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */ + send_bits(s, (STORED_BLOCK<<1)+last, 3); /* send block type */ #ifdef DEBUG s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; s->compressed_len += (stored_len + 4) << 3; @@ -879,17 +877,19 @@ } /* =========================================================================== + * Flush the bits in the bit buffer to pending output (leaves at most 7 bits) + */ +void ZLIB_INTERNAL _tr_flush_bits(s) + deflate_state *s; +{ + bi_flush(s); +} + +/* =========================================================================== * Send one empty static block to give enough lookahead for inflate. * This takes 10 bits, of which 7 may remain in the bit buffer. - * The current inflate code requires 9 bits of lookahead. If the - * last two codes for the previous block (real code plus EOB) were coded - * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode - * the last real code. In this case we send two empty static blocks instead - * of one. (There are no problems if the previous block is stored or fixed.) - * To simplify the code, we assume the worst case of last real code encoded - * on one bit only. */ -void _tr_align(s) +void ZLIB_INTERNAL _tr_align(s) deflate_state *s; { send_bits(s, STATIC_TREES<<1, 3); @@ -898,31 +898,17 @@ s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ #endif bi_flush(s); - /* Of the 10 bits for the empty block, we have already sent - * (10 - bi_valid) bits. The lookahead for the last real code (before - * the EOB of the previous block) was thus at least one plus the length - * of the EOB plus what we have just sent of the empty static block. - */ - if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { - send_bits(s, STATIC_TREES<<1, 3); - send_code(s, END_BLOCK, static_ltree); -#ifdef DEBUG - s->compressed_len += 10L; -#endif - bi_flush(s); - } - s->last_eob_len = 7; } /* =========================================================================== * Determine the best encoding for the current block: dynamic trees, static * trees or store, and output the encoded block to the zip file. */ -void _tr_flush_block(s, buf, stored_len, eof) +void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) deflate_state *s; charf *buf; /* input block, or NULL if too old */ ulg stored_len; /* length of input block */ - int eof; /* true if this is the last block for a file */ + int last; /* one if this is the last block for a file */ { ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ int max_blindex = 0; /* index of last bit length code of non zero freq */ @@ -931,8 +917,8 @@ if (s->level > 0) { /* Check if the file is binary or text */ - if (stored_len > 0 && s->strm->data_type == Z_UNKNOWN) - set_data_type(s); + if (s->strm->data_type == Z_UNKNOWN) + s->strm->data_type = detect_data_type(s); /* Construct the literal and distance trees */ build_tree(s, (tree_desc *)(&(s->l_desc))); @@ -978,23 +964,25 @@ * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to * transform a block into a stored block. */ - _tr_stored_block(s, buf, stored_len, eof); + _tr_stored_block(s, buf, stored_len, last); #ifdef FORCE_STATIC } else if (static_lenb >= 0) { /* force static trees */ #else } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) { #endif - send_bits(s, (STATIC_TREES<<1)+eof, 3); - compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); + send_bits(s, (STATIC_TREES<<1)+last, 3); + compress_block(s, (const ct_data *)static_ltree, + (const ct_data *)static_dtree); #ifdef DEBUG s->compressed_len += 3 + s->static_len; #endif } else { - send_bits(s, (DYN_TREES<<1)+eof, 3); + send_bits(s, (DYN_TREES<<1)+last, 3); send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, max_blindex+1); - compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); + compress_block(s, (const ct_data *)s->dyn_ltree, + (const ct_data *)s->dyn_dtree); #ifdef DEBUG s->compressed_len += 3 + s->opt_len; #endif @@ -1005,21 +993,21 @@ */ init_block(s); - if (eof) { + if (last) { bi_windup(s); #ifdef DEBUG s->compressed_len += 7; /* align on byte boundary */ #endif } Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, - s->compressed_len-7*eof)); + s->compressed_len-7*last)); } /* =========================================================================== * Save the match info and tally the frequency counts. Return true if * the current block must be flushed. */ -int _tr_tally (s, dist, lc) +int ZLIB_INTERNAL _tr_tally (s, dist, lc) deflate_state *s; unsigned dist; /* distance of matched string */ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ @@ -1071,8 +1059,8 @@ */ local void compress_block(s, ltree, dtree) deflate_state *s; - ct_data *ltree; /* literal tree */ - ct_data *dtree; /* distance tree */ + const ct_data *ltree; /* literal tree */ + const ct_data *dtree; /* distance tree */ { unsigned dist; /* distance of matched string */ int lc; /* match length or unmatched char (if dist == 0) */ @@ -1114,28 +1102,48 @@ } while (lx < s->last_lit); send_code(s, END_BLOCK, ltree); - s->last_eob_len = ltree[END_BLOCK].Len; } /* =========================================================================== - * Set the data type to BINARY or TEXT, using a crude approximation: - * set it to Z_TEXT if all symbols are either printable characters (33 to 255) - * or white spaces (9 to 13, or 32); or set it to Z_BINARY otherwise. + * Check if the data type is TEXT or BINARY, using the following algorithm: + * - TEXT if the two conditions below are satisfied: + * a) There are no non-portable control characters belonging to the + * "black list" (0..6, 14..25, 28..31). + * b) There is at least one printable character belonging to the + * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). + * - BINARY otherwise. + * - The following partially-portable control characters form a + * "gray list" that is ignored in this detection algorithm: + * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). * IN assertion: the fields Freq of dyn_ltree are set. */ -local void set_data_type(s) +local int detect_data_type(s) deflate_state *s; { + /* black_mask is the bit mask of black-listed bytes + * set bits 0..6, 14..25, and 28..31 + * 0xf3ffc07f = binary 11110011111111111100000001111111 + */ + unsigned long black_mask = 0xf3ffc07fUL; int n; - for (n = 0; n < 9; n++) + /* Check for non-textual ("black-listed") bytes. */ + for (n = 0; n <= 31; n++, black_mask >>= 1) + if ((black_mask & 1) && (s->dyn_ltree[n].Freq != 0)) + return Z_BINARY; + + /* Check for textual ("white-listed") bytes. */ + if (s->dyn_ltree[9].Freq != 0 || s->dyn_ltree[10].Freq != 0 + || s->dyn_ltree[13].Freq != 0) + return Z_TEXT; + for (n = 32; n < LITERALS; n++) if (s->dyn_ltree[n].Freq != 0) - break; - if (n == 9) - for (n = 14; n < 32; n++) - if (s->dyn_ltree[n].Freq != 0) - break; - s->strm->data_type = (n == 32) ? Z_TEXT : Z_BINARY; + return Z_TEXT; + + /* There are no "black-listed" or "white-listed" bytes: + * this stream either is empty or has tolerated ("gray-listed") bytes only. + */ + return Z_BINARY; } /* =========================================================================== @@ -1201,7 +1209,6 @@ int header; /* true if block header must be written */ { bi_windup(s); /* align on byte boundary */ - s->last_eob_len = 8; /* enough lookahead for inflate */ if (header) { put_short(s, (ush)len); diff -Nru nodejs-0.11.13/deps/zlib/trees.h nodejs-0.11.15/deps/zlib/trees.h --- nodejs-0.11.13/deps/zlib/trees.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/trees.h 2015-01-20 21:22:17.000000000 +0000 @@ -70,7 +70,7 @@ {{19},{ 5}}, {{11},{ 5}}, {{27},{ 5}}, {{ 7},{ 5}}, {{23},{ 5}} }; -const uch _dist_code[DIST_CODE_LEN] = { +const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, @@ -99,7 +99,7 @@ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 }; -const uch _length_code[MAX_MATCH-MIN_MATCH+1]= { +const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= { 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, diff -Nru nodejs-0.11.13/deps/zlib/uncompr.c nodejs-0.11.15/deps/zlib/uncompr.c --- nodejs-0.11.13/deps/zlib/uncompr.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/uncompr.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,9 +1,9 @@ /* uncompr.c -- decompress a memory buffer - * Copyright (C) 1995-2003 Jean-loup Gailly. + * Copyright (C) 1995-2003, 2010 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ -/* @(#) $Id: uncompr.c,v 3.6 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ +/* @(#) $Id$ */ #define ZLIB_INTERNAL #include "zlib.h" @@ -16,8 +16,6 @@ been saved previously by the compressor and transmitted to the decompressor by some mechanism outside the scope of this compression library.) Upon exit, destLen is the actual size of the compressed buffer. - This function can be used to decompress a whole file at once if the - input file is mmap'ed. uncompress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output @@ -32,7 +30,7 @@ z_stream stream; int err; - stream.next_in = (Bytef*)source; + stream.next_in = (z_const Bytef *)source; stream.avail_in = (uInt)sourceLen; /* Check for source > 64K on 16-bit machine: */ if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR; diff -Nru nodejs-0.11.13/deps/zlib/watcom/watcom_f.mak nodejs-0.11.15/deps/zlib/watcom/watcom_f.mak --- nodejs-0.11.13/deps/zlib/watcom/watcom_f.mak 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/watcom/watcom_f.mak 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,43 @@ +# Makefile for zlib +# OpenWatcom flat model +# Last updated: 28-Dec-2005 + +# To use, do "wmake -f watcom_f.mak" + +C_SOURCE = adler32.c compress.c crc32.c deflate.c & + gzclose.c gzlib.c gzread.c gzwrite.c & + infback.c inffast.c inflate.c inftrees.c & + trees.c uncompr.c zutil.c + +OBJS = adler32.obj compress.obj crc32.obj deflate.obj & + gzclose.obj gzlib.obj gzread.obj gzwrite.obj & + infback.obj inffast.obj inflate.obj inftrees.obj & + trees.obj uncompr.obj zutil.obj + +CC = wcc386 +LINKER = wcl386 +CFLAGS = -zq -mf -3r -fp3 -s -bt=dos -oilrtfm -fr=nul -wx +ZLIB_LIB = zlib_f.lib + +.C.OBJ: + $(CC) $(CFLAGS) $[@ + +all: $(ZLIB_LIB) example.exe minigzip.exe + +$(ZLIB_LIB): $(OBJS) + wlib -b -c $(ZLIB_LIB) -+adler32.obj -+compress.obj -+crc32.obj + wlib -b -c $(ZLIB_LIB) -+gzclose.obj -+gzlib.obj -+gzread.obj -+gzwrite.obj + wlib -b -c $(ZLIB_LIB) -+deflate.obj -+infback.obj + wlib -b -c $(ZLIB_LIB) -+inffast.obj -+inflate.obj -+inftrees.obj + wlib -b -c $(ZLIB_LIB) -+trees.obj -+uncompr.obj -+zutil.obj + +example.exe: $(ZLIB_LIB) example.obj + $(LINKER) -ldos32a -fe=example.exe example.obj $(ZLIB_LIB) + +minigzip.exe: $(ZLIB_LIB) minigzip.obj + $(LINKER) -ldos32a -fe=minigzip.exe minigzip.obj $(ZLIB_LIB) + +clean: .SYMBOLIC + del *.obj + del $(ZLIB_LIB) + @echo Cleaning done diff -Nru nodejs-0.11.13/deps/zlib/watcom/watcom_l.mak nodejs-0.11.15/deps/zlib/watcom/watcom_l.mak --- nodejs-0.11.13/deps/zlib/watcom/watcom_l.mak 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/watcom/watcom_l.mak 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,43 @@ +# Makefile for zlib +# OpenWatcom large model +# Last updated: 28-Dec-2005 + +# To use, do "wmake -f watcom_l.mak" + +C_SOURCE = adler32.c compress.c crc32.c deflate.c & + gzclose.c gzlib.c gzread.c gzwrite.c & + infback.c inffast.c inflate.c inftrees.c & + trees.c uncompr.c zutil.c + +OBJS = adler32.obj compress.obj crc32.obj deflate.obj & + gzclose.obj gzlib.obj gzread.obj gzwrite.obj & + infback.obj inffast.obj inflate.obj inftrees.obj & + trees.obj uncompr.obj zutil.obj + +CC = wcc +LINKER = wcl +CFLAGS = -zq -ml -s -bt=dos -oilrtfm -fr=nul -wx +ZLIB_LIB = zlib_l.lib + +.C.OBJ: + $(CC) $(CFLAGS) $[@ + +all: $(ZLIB_LIB) example.exe minigzip.exe + +$(ZLIB_LIB): $(OBJS) + wlib -b -c $(ZLIB_LIB) -+adler32.obj -+compress.obj -+crc32.obj + wlib -b -c $(ZLIB_LIB) -+gzclose.obj -+gzlib.obj -+gzread.obj -+gzwrite.obj + wlib -b -c $(ZLIB_LIB) -+deflate.obj -+infback.obj + wlib -b -c $(ZLIB_LIB) -+inffast.obj -+inflate.obj -+inftrees.obj + wlib -b -c $(ZLIB_LIB) -+trees.obj -+uncompr.obj -+zutil.obj + +example.exe: $(ZLIB_LIB) example.obj + $(LINKER) -fe=example.exe example.obj $(ZLIB_LIB) + +minigzip.exe: $(ZLIB_LIB) minigzip.obj + $(LINKER) -fe=minigzip.exe minigzip.obj $(ZLIB_LIB) + +clean: .SYMBOLIC + del *.obj + del $(ZLIB_LIB) + @echo Cleaning done diff -Nru nodejs-0.11.13/deps/zlib/win32/DLL_FAQ.txt nodejs-0.11.15/deps/zlib/win32/DLL_FAQ.txt --- nodejs-0.11.13/deps/zlib/win32/DLL_FAQ.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/win32/DLL_FAQ.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,397 @@ + + Frequently Asked Questions about ZLIB1.DLL + + +This document describes the design, the rationale, and the usage +of the official DLL build of zlib, named ZLIB1.DLL. If you have +general questions about zlib, you should see the file "FAQ" found +in the zlib distribution, or at the following location: + http://www.gzip.org/zlib/zlib_faq.html + + + 1. What is ZLIB1.DLL, and how can I get it? + + - ZLIB1.DLL is the official build of zlib as a DLL. + (Please remark the character '1' in the name.) + + Pointers to a precompiled ZLIB1.DLL can be found in the zlib + web site at: + http://www.zlib.net/ + + Applications that link to ZLIB1.DLL can rely on the following + specification: + + * The exported symbols are exclusively defined in the source + files "zlib.h" and "zlib.def", found in an official zlib + source distribution. + * The symbols are exported by name, not by ordinal. + * The exported names are undecorated. + * The calling convention of functions is "C" (CDECL). + * The ZLIB1.DLL binary is linked to MSVCRT.DLL. + + The archive in which ZLIB1.DLL is bundled contains compiled + test programs that must run with a valid build of ZLIB1.DLL. + It is recommended to download the prebuilt DLL from the zlib + web site, instead of building it yourself, to avoid potential + incompatibilities that could be introduced by your compiler + and build settings. If you do build the DLL yourself, please + make sure that it complies with all the above requirements, + and it runs with the precompiled test programs, bundled with + the original ZLIB1.DLL distribution. + + If, for any reason, you need to build an incompatible DLL, + please use a different file name. + + + 2. Why did you change the name of the DLL to ZLIB1.DLL? + What happened to the old ZLIB.DLL? + + - The old ZLIB.DLL, built from zlib-1.1.4 or earlier, required + compilation settings that were incompatible to those used by + a static build. The DLL settings were supposed to be enabled + by defining the macro ZLIB_DLL, before including "zlib.h". + Incorrect handling of this macro was silently accepted at + build time, resulting in two major problems: + + * ZLIB_DLL was missing from the old makefile. When building + the DLL, not all people added it to the build options. In + consequence, incompatible incarnations of ZLIB.DLL started + to circulate around the net. + + * When switching from using the static library to using the + DLL, applications had to define the ZLIB_DLL macro and + to recompile all the sources that contained calls to zlib + functions. Failure to do so resulted in creating binaries + that were unable to run with the official ZLIB.DLL build. + + The only possible solution that we could foresee was to make + a binary-incompatible change in the DLL interface, in order to + remove the dependency on the ZLIB_DLL macro, and to release + the new DLL under a different name. + + We chose the name ZLIB1.DLL, where '1' indicates the major + zlib version number. We hope that we will not have to break + the binary compatibility again, at least not as long as the + zlib-1.x series will last. + + There is still a ZLIB_DLL macro, that can trigger a more + efficient build and use of the DLL, but compatibility no + longer dependents on it. + + + 3. Can I build ZLIB.DLL from the new zlib sources, and replace + an old ZLIB.DLL, that was built from zlib-1.1.4 or earlier? + + - In principle, you can do it by assigning calling convention + keywords to the macros ZEXPORT and ZEXPORTVA. In practice, + it depends on what you mean by "an old ZLIB.DLL", because the + old DLL exists in several mutually-incompatible versions. + You have to find out first what kind of calling convention is + being used in your particular ZLIB.DLL build, and to use the + same one in the new build. If you don't know what this is all + about, you might be better off if you would just leave the old + DLL intact. + + + 4. Can I compile my application using the new zlib interface, and + link it to an old ZLIB.DLL, that was built from zlib-1.1.4 or + earlier? + + - The official answer is "no"; the real answer depends again on + what kind of ZLIB.DLL you have. Even if you are lucky, this + course of action is unreliable. + + If you rebuild your application and you intend to use a newer + version of zlib (post- 1.1.4), it is strongly recommended to + link it to the new ZLIB1.DLL. + + + 5. Why are the zlib symbols exported by name, and not by ordinal? + + - Although exporting symbols by ordinal is a little faster, it + is risky. Any single glitch in the maintenance or use of the + DEF file that contains the ordinals can result in incompatible + builds and frustrating crashes. Simply put, the benefits of + exporting symbols by ordinal do not justify the risks. + + Technically, it should be possible to maintain ordinals in + the DEF file, and still export the symbols by name. Ordinals + exist in every DLL, and even if the dynamic linking performed + at the DLL startup is searching for names, ordinals serve as + hints, for a faster name lookup. However, if the DEF file + contains ordinals, the Microsoft linker automatically builds + an implib that will cause the executables linked to it to use + those ordinals, and not the names. It is interesting to + notice that the GNU linker for Win32 does not suffer from this + problem. + + It is possible to avoid the DEF file if the exported symbols + are accompanied by a "__declspec(dllexport)" attribute in the + source files. You can do this in zlib by predefining the + ZLIB_DLL macro. + + + 6. I see that the ZLIB1.DLL functions use the "C" (CDECL) calling + convention. Why not use the STDCALL convention? + STDCALL is the standard convention in Win32, and I need it in + my Visual Basic project! + + (For readability, we use CDECL to refer to the convention + triggered by the "__cdecl" keyword, STDCALL to refer to + the convention triggered by "__stdcall", and FASTCALL to + refer to the convention triggered by "__fastcall".) + + - Most of the native Windows API functions (without varargs) use + indeed the WINAPI convention (which translates to STDCALL in + Win32), but the standard C functions use CDECL. If a user + application is intrinsically tied to the Windows API (e.g. + it calls native Windows API functions such as CreateFile()), + sometimes it makes sense to decorate its own functions with + WINAPI. But if ANSI C or POSIX portability is a goal (e.g. + it calls standard C functions such as fopen()), it is not a + sound decision to request the inclusion of , or to + use non-ANSI constructs, for the sole purpose to make the user + functions STDCALL-able. + + The functionality offered by zlib is not in the category of + "Windows functionality", but is more like "C functionality". + + Technically, STDCALL is not bad; in fact, it is slightly + faster than CDECL, and it works with variable-argument + functions, just like CDECL. It is unfortunate that, in spite + of using STDCALL in the Windows API, it is not the default + convention used by the C compilers that run under Windows. + The roots of the problem reside deep inside the unsafety of + the K&R-style function prototypes, where the argument types + are not specified; but that is another story for another day. + + The remaining fact is that CDECL is the default convention. + Even if an explicit convention is hard-coded into the function + prototypes inside C headers, problems may appear. The + necessity to expose the convention in users' callbacks is one + of these problems. + + The calling convention issues are also important when using + zlib in other programming languages. Some of them, like Ada + (GNAT) and Fortran (GNU G77), have C bindings implemented + initially on Unix, and relying on the C calling convention. + On the other hand, the pre- .NET versions of Microsoft Visual + Basic require STDCALL, while Borland Delphi prefers, although + it does not require, FASTCALL. + + In fairness to all possible uses of zlib outside the C + programming language, we choose the default "C" convention. + Anyone interested in different bindings or conventions is + encouraged to maintain specialized projects. The "contrib/" + directory from the zlib distribution already holds a couple + of foreign bindings, such as Ada, C++, and Delphi. + + + 7. I need a DLL for my Visual Basic project. What can I do? + + - Define the ZLIB_WINAPI macro before including "zlib.h", when + building both the DLL and the user application (except that + you don't need to define anything when using the DLL in Visual + Basic). The ZLIB_WINAPI macro will switch on the WINAPI + (STDCALL) convention. The name of this DLL must be different + than the official ZLIB1.DLL. + + Gilles Vollant has contributed a build named ZLIBWAPI.DLL, + with the ZLIB_WINAPI macro turned on, and with the minizip + functionality built in. For more information, please read + the notes inside "contrib/vstudio/readme.txt", found in the + zlib distribution. + + + 8. I need to use zlib in my Microsoft .NET project. What can I + do? + + - Henrik Ravn has contributed a .NET wrapper around zlib. Look + into contrib/dotzlib/, inside the zlib distribution. + + + 9. If my application uses ZLIB1.DLL, should I link it to + MSVCRT.DLL? Why? + + - It is not required, but it is recommended to link your + application to MSVCRT.DLL, if it uses ZLIB1.DLL. + + The executables (.EXE, .DLL, etc.) that are involved in the + same process and are using the C run-time library (i.e. they + are calling standard C functions), must link to the same + library. There are several libraries in the Win32 system: + CRTDLL.DLL, MSVCRT.DLL, the static C libraries, etc. + Since ZLIB1.DLL is linked to MSVCRT.DLL, the executables that + depend on it should also be linked to MSVCRT.DLL. + + +10. Why are you saying that ZLIB1.DLL and my application should + be linked to the same C run-time (CRT) library? I linked my + application and my DLLs to different C libraries (e.g. my + application to a static library, and my DLLs to MSVCRT.DLL), + and everything works fine. + + - If a user library invokes only pure Win32 API (accessible via + and the related headers), its DLL build will work + in any context. But if this library invokes standard C API, + things get more complicated. + + There is a single Win32 library in a Win32 system. Every + function in this library resides in a single DLL module, that + is safe to call from anywhere. On the other hand, there are + multiple versions of the C library, and each of them has its + own separate internal state. Standalone executables and user + DLLs that call standard C functions must link to a C run-time + (CRT) library, be it static or shared (DLL). Intermixing + occurs when an executable (not necessarily standalone) and a + DLL are linked to different CRTs, and both are running in the + same process. + + Intermixing multiple CRTs is possible, as long as their + internal states are kept intact. The Microsoft Knowledge Base + articles KB94248 "HOWTO: Use the C Run-Time" and KB140584 + "HOWTO: Link with the Correct C Run-Time (CRT) Library" + mention the potential problems raised by intermixing. + + If intermixing works for you, it's because your application + and DLLs are avoiding the corruption of each of the CRTs' + internal states, maybe by careful design, or maybe by fortune. + + Also note that linking ZLIB1.DLL to non-Microsoft CRTs, such + as those provided by Borland, raises similar problems. + + +11. Why are you linking ZLIB1.DLL to MSVCRT.DLL? + + - MSVCRT.DLL exists on every Windows 95 with a new service pack + installed, or with Microsoft Internet Explorer 4 or later, and + on all other Windows 4.x or later (Windows 98, Windows NT 4, + or later). It is freely distributable; if not present in the + system, it can be downloaded from Microsoft or from other + software provider for free. + + The fact that MSVCRT.DLL does not exist on a virgin Windows 95 + is not so problematic. Windows 95 is scarcely found nowadays, + Microsoft ended its support a long time ago, and many recent + applications from various vendors, including Microsoft, do not + even run on it. Furthermore, no serious user should run + Windows 95 without a proper update installed. + + +12. Why are you not linking ZLIB1.DLL to + <> ? + + - We considered and abandoned the following alternatives: + + * Linking ZLIB1.DLL to a static C library (LIBC.LIB, or + LIBCMT.LIB) is not a good option. People are using the DLL + mainly to save disk space. If you are linking your program + to a static C library, you may as well consider linking zlib + in statically, too. + + * Linking ZLIB1.DLL to CRTDLL.DLL looks appealing, because + CRTDLL.DLL is present on every Win32 installation. + Unfortunately, it has a series of problems: it does not + work properly with Microsoft's C++ libraries, it does not + provide support for 64-bit file offsets, (and so on...), + and Microsoft discontinued its support a long time ago. + + * Linking ZLIB1.DLL to MSVCR70.DLL or MSVCR71.DLL, supplied + with the Microsoft .NET platform, and Visual C++ 7.0/7.1, + raises problems related to the status of ZLIB1.DLL as a + system component. According to the Microsoft Knowledge Base + article KB326922 "INFO: Redistribution of the Shared C + Runtime Component in Visual C++ .NET", MSVCR70.DLL and + MSVCR71.DLL are not supposed to function as system DLLs, + because they may clash with MSVCRT.DLL. Instead, the + application's installer is supposed to put these DLLs + (if needed) in the application's private directory. + If ZLIB1.DLL depends on a non-system runtime, it cannot + function as a redistributable system component. + + * Linking ZLIB1.DLL to non-Microsoft runtimes, such as + Borland's, or Cygwin's, raises problems related to the + reliable presence of these runtimes on Win32 systems. + It's easier to let the DLL build of zlib up to the people + who distribute these runtimes, and who may proceed as + explained in the answer to Question 14. + + +13. If ZLIB1.DLL cannot be linked to MSVCR70.DLL or MSVCR71.DLL, + how can I build/use ZLIB1.DLL in Microsoft Visual C++ 7.0 + (Visual Studio .NET) or newer? + + - Due to the problems explained in the Microsoft Knowledge Base + article KB326922 (see the previous answer), the C runtime that + comes with the VC7 environment is no longer considered a + system component. That is, it should not be assumed that this + runtime exists, or may be installed in a system directory. + Since ZLIB1.DLL is supposed to be a system component, it may + not depend on a non-system component. + + In order to link ZLIB1.DLL and your application to MSVCRT.DLL + in VC7, you need the library of Visual C++ 6.0 or older. If + you don't have this library at hand, it's probably best not to + use ZLIB1.DLL. + + We are hoping that, in the future, Microsoft will provide a + way to build applications linked to a proper system runtime, + from the Visual C++ environment. Until then, you have a + couple of alternatives, such as linking zlib in statically. + If your application requires dynamic linking, you may proceed + as explained in the answer to Question 14. + + +14. I need to link my own DLL build to a CRT different than + MSVCRT.DLL. What can I do? + + - Feel free to rebuild the DLL from the zlib sources, and link + it the way you want. You should, however, clearly state that + your build is unofficial. You should give it a different file + name, and/or install it in a private directory that can be + accessed by your application only, and is not visible to the + others (i.e. it's neither in the PATH, nor in the SYSTEM or + SYSTEM32 directories). Otherwise, your build may clash with + applications that link to the official build. + + For example, in Cygwin, zlib is linked to the Cygwin runtime + CYGWIN1.DLL, and it is distributed under the name CYGZ.DLL. + + +15. May I include additional pieces of code that I find useful, + link them in ZLIB1.DLL, and export them? + + - No. A legitimate build of ZLIB1.DLL must not include code + that does not originate from the official zlib source code. + But you can make your own private DLL build, under a different + file name, as suggested in the previous answer. + + For example, zlib is a part of the VCL library, distributed + with Borland Delphi and C++ Builder. The DLL build of VCL + is a redistributable file, named VCLxx.DLL. + + +16. May I remove some functionality out of ZLIB1.DLL, by enabling + macros like NO_GZCOMPRESS or NO_GZIP at compile time? + + - No. A legitimate build of ZLIB1.DLL must provide the complete + zlib functionality, as implemented in the official zlib source + code. But you can make your own private DLL build, under a + different file name, as suggested in the previous answer. + + +17. I made my own ZLIB1.DLL build. Can I test it for compliance? + + - We prefer that you download the official DLL from the zlib + web site. If you need something peculiar from this DLL, you + can send your suggestion to the zlib mailing list. + + However, in case you do rebuild the DLL yourself, you can run + it with the test programs found in the DLL distribution. + Running these test programs is not a guarantee of compliance, + but a failure can imply a detected problem. + +** + +This document is written and maintained by +Cosmin Truta diff -Nru nodejs-0.11.13/deps/zlib/win32/Makefile.bor nodejs-0.11.15/deps/zlib/win32/Makefile.bor --- nodejs-0.11.13/deps/zlib/win32/Makefile.bor 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/win32/Makefile.bor 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,110 @@ +# Makefile for zlib +# Borland C++ for Win32 +# +# Usage: +# make -f win32/Makefile.bor +# make -f win32/Makefile.bor LOCAL_ZLIB=-DASMV OBJA=match.obj OBJPA=+match.obj + +# ------------ Borland C++ ------------ + +# Optional nonstandard preprocessor flags (e.g. -DMAX_MEM_LEVEL=7) +# should be added to the environment via "set LOCAL_ZLIB=-DFOO" or +# added to the declaration of LOC here: +LOC = $(LOCAL_ZLIB) + +CC = bcc32 +AS = bcc32 +LD = bcc32 +AR = tlib +CFLAGS = -a -d -k- -O2 $(LOC) +ASFLAGS = $(LOC) +LDFLAGS = $(LOC) + + +# variables +ZLIB_LIB = zlib.lib + +OBJ1 = adler32.obj compress.obj crc32.obj deflate.obj gzclose.obj gzlib.obj gzread.obj +OBJ2 = gzwrite.obj infback.obj inffast.obj inflate.obj inftrees.obj trees.obj uncompr.obj zutil.obj +#OBJA = +OBJP1 = +adler32.obj+compress.obj+crc32.obj+deflate.obj+gzclose.obj+gzlib.obj+gzread.obj +OBJP2 = +gzwrite.obj+infback.obj+inffast.obj+inflate.obj+inftrees.obj+trees.obj+uncompr.obj+zutil.obj +#OBJPA= + + +# targets +all: $(ZLIB_LIB) example.exe minigzip.exe + +.c.obj: + $(CC) -c $(CFLAGS) $< + +.asm.obj: + $(AS) -c $(ASFLAGS) $< + +adler32.obj: adler32.c zlib.h zconf.h + +compress.obj: compress.c zlib.h zconf.h + +crc32.obj: crc32.c zlib.h zconf.h crc32.h + +deflate.obj: deflate.c deflate.h zutil.h zlib.h zconf.h + +gzclose.obj: gzclose.c zlib.h zconf.h gzguts.h + +gzlib.obj: gzlib.c zlib.h zconf.h gzguts.h + +gzread.obj: gzread.c zlib.h zconf.h gzguts.h + +gzwrite.obj: gzwrite.c zlib.h zconf.h gzguts.h + +infback.obj: infback.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inffast.obj: inffast.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h + +inflate.obj: inflate.c zutil.h zlib.h zconf.h inftrees.h inflate.h \ + inffast.h inffixed.h + +inftrees.obj: inftrees.c zutil.h zlib.h zconf.h inftrees.h + +trees.obj: trees.c zutil.h zlib.h zconf.h deflate.h trees.h + +uncompr.obj: uncompr.c zlib.h zconf.h + +zutil.obj: zutil.c zutil.h zlib.h zconf.h + +example.obj: test/example.c zlib.h zconf.h + +minigzip.obj: test/minigzip.c zlib.h zconf.h + + +# For the sake of the old Borland make, +# the command line is cut to fit in the MS-DOS 128 byte limit: +$(ZLIB_LIB): $(OBJ1) $(OBJ2) $(OBJA) + -del $(ZLIB_LIB) + $(AR) $(ZLIB_LIB) $(OBJP1) + $(AR) $(ZLIB_LIB) $(OBJP2) + $(AR) $(ZLIB_LIB) $(OBJPA) + + +# testing +test: example.exe minigzip.exe + example + echo hello world | minigzip | minigzip -d + +example.exe: example.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) example.obj $(ZLIB_LIB) + +minigzip.exe: minigzip.obj $(ZLIB_LIB) + $(LD) $(LDFLAGS) minigzip.obj $(ZLIB_LIB) + + +# cleanup +clean: + -del $(ZLIB_LIB) + -del *.obj + -del *.exe + -del *.tds + -del zlib.bak + -del foo.gz diff -Nru nodejs-0.11.13/deps/zlib/win32/Makefile.gcc nodejs-0.11.15/deps/zlib/win32/Makefile.gcc --- nodejs-0.11.13/deps/zlib/win32/Makefile.gcc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/win32/Makefile.gcc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,182 @@ +# Makefile for zlib, derived from Makefile.dj2. +# Modified for mingw32 by C. Spieler, 6/16/98. +# Updated for zlib 1.2.x by Christian Spieler and Cosmin Truta, Mar-2003. +# Last updated: Mar 2012. +# Tested under Cygwin and MinGW. + +# Copyright (C) 1995-2003 Jean-loup Gailly. +# For conditions of distribution and use, see copyright notice in zlib.h + +# To compile, or to compile and test, type from the top level zlib directory: +# +# make -fwin32/Makefile.gcc; make test testdll -fwin32/Makefile.gcc +# +# To use the asm code, type: +# cp contrib/asm?86/match.S ./match.S +# make LOC=-DASMV OBJA=match.o -fwin32/Makefile.gcc +# +# To install libz.a, zconf.h and zlib.h in the system directories, type: +# +# make install -fwin32/Makefile.gcc +# +# BINARY_PATH, INCLUDE_PATH and LIBRARY_PATH must be set. +# +# To install the shared lib, append SHARED_MODE=1 to the make command : +# +# make install -fwin32/Makefile.gcc SHARED_MODE=1 + +# Note: +# If the platform is *not* MinGW (e.g. it is Cygwin or UWIN), +# the DLL name should be changed from "zlib1.dll". + +STATICLIB = libz.a +SHAREDLIB = zlib1.dll +IMPLIB = libz.dll.a + +# +# Set to 1 if shared object needs to be installed +# +SHARED_MODE=0 + +#LOC = -DASMV +#LOC = -DDEBUG -g + +PREFIX = +CC = $(PREFIX)gcc +CFLAGS = $(LOC) -O3 -Wall + +AS = $(CC) +ASFLAGS = $(LOC) -Wall + +LD = $(CC) +LDFLAGS = $(LOC) + +AR = $(PREFIX)ar +ARFLAGS = rcs + +RC = $(PREFIX)windres +RCFLAGS = --define GCC_WINDRES + +STRIP = $(PREFIX)strip + +CP = cp -fp +# If GNU install is available, replace $(CP) with install. +INSTALL = $(CP) +RM = rm -f + +prefix ?= /usr/local +exec_prefix = $(prefix) + +OBJS = adler32.o compress.o crc32.o deflate.o gzclose.o gzlib.o gzread.o \ + gzwrite.o infback.o inffast.o inflate.o inftrees.o trees.o uncompr.o zutil.o +OBJA = + +all: $(STATICLIB) $(SHAREDLIB) $(IMPLIB) example.exe minigzip.exe example_d.exe minigzip_d.exe + +test: example.exe minigzip.exe + ./example + echo hello world | ./minigzip | ./minigzip -d + +testdll: example_d.exe minigzip_d.exe + ./example_d + echo hello world | ./minigzip_d | ./minigzip_d -d + +.c.o: + $(CC) $(CFLAGS) -c -o $@ $< + +.S.o: + $(AS) $(ASFLAGS) -c -o $@ $< + +$(STATICLIB): $(OBJS) $(OBJA) + $(AR) $(ARFLAGS) $@ $(OBJS) $(OBJA) + +$(IMPLIB): $(SHAREDLIB) + +$(SHAREDLIB): win32/zlib.def $(OBJS) $(OBJA) zlibrc.o + $(CC) -shared -Wl,--out-implib,$(IMPLIB) $(LDFLAGS) \ + -o $@ win32/zlib.def $(OBJS) $(OBJA) zlibrc.o + $(STRIP) $@ + +example.exe: example.o $(STATICLIB) + $(LD) $(LDFLAGS) -o $@ example.o $(STATICLIB) + $(STRIP) $@ + +minigzip.exe: minigzip.o $(STATICLIB) + $(LD) $(LDFLAGS) -o $@ minigzip.o $(STATICLIB) + $(STRIP) $@ + +example_d.exe: example.o $(IMPLIB) + $(LD) $(LDFLAGS) -o $@ example.o $(IMPLIB) + $(STRIP) $@ + +minigzip_d.exe: minigzip.o $(IMPLIB) + $(LD) $(LDFLAGS) -o $@ minigzip.o $(IMPLIB) + $(STRIP) $@ + +example.o: test/example.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -c -o $@ test/example.c + +minigzip.o: test/minigzip.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -c -o $@ test/minigzip.c + +zlibrc.o: win32/zlib1.rc + $(RC) $(RCFLAGS) -o $@ win32/zlib1.rc + +.PHONY: install uninstall clean + +install: zlib.h zconf.h $(STATICLIB) $(IMPLIB) + @if test -z "$(DESTDIR)$(INCLUDE_PATH)" -o -z "$(DESTDIR)$(LIBRARY_PATH)" -o -z "$(DESTDIR)$(BINARY_PATH)"; then \ + echo INCLUDE_PATH, LIBRARY_PATH, and BINARY_PATH must be specified; \ + exit 1; \ + fi + -@mkdir -p '$(DESTDIR)$(INCLUDE_PATH)' + -@mkdir -p '$(DESTDIR)$(LIBRARY_PATH)' '$(DESTDIR)$(LIBRARY_PATH)'/pkgconfig + -if [ "$(SHARED_MODE)" = "1" ]; then \ + mkdir -p '$(DESTDIR)$(BINARY_PATH)'; \ + $(INSTALL) $(SHAREDLIB) '$(DESTDIR)$(BINARY_PATH)'; \ + $(INSTALL) $(IMPLIB) '$(DESTDIR)$(LIBRARY_PATH)'; \ + fi + -$(INSTALL) zlib.h '$(DESTDIR)$(INCLUDE_PATH)' + -$(INSTALL) zconf.h '$(DESTDIR)$(INCLUDE_PATH)' + -$(INSTALL) $(STATICLIB) '$(DESTDIR)$(LIBRARY_PATH)' + sed \ + -e 's|@prefix@|${prefix}|g' \ + -e 's|@exec_prefix@|${exec_prefix}|g' \ + -e 's|@libdir@|$(LIBRARY_PATH)|g' \ + -e 's|@sharedlibdir@|$(LIBRARY_PATH)|g' \ + -e 's|@includedir@|$(INCLUDE_PATH)|g' \ + -e 's|@VERSION@|'`sed -n -e '/VERSION "/s/.*"\(.*\)".*/\1/p' zlib.h`'|g' \ + zlib.pc.in > '$(DESTDIR)$(LIBRARY_PATH)'/pkgconfig/zlib.pc + +uninstall: + -if [ "$(SHARED_MODE)" = "1" ]; then \ + $(RM) '$(DESTDIR)$(BINARY_PATH)'/$(SHAREDLIB); \ + $(RM) '$(DESTDIR)$(LIBRARY_PATH)'/$(IMPLIB); \ + fi + -$(RM) '$(DESTDIR)$(INCLUDE_PATH)'/zlib.h + -$(RM) '$(DESTDIR)$(INCLUDE_PATH)'/zconf.h + -$(RM) '$(DESTDIR)$(LIBRARY_PATH)'/$(STATICLIB) + +clean: + -$(RM) $(STATICLIB) + -$(RM) $(SHAREDLIB) + -$(RM) $(IMPLIB) + -$(RM) *.o + -$(RM) *.exe + -$(RM) foo.gz + +adler32.o: zlib.h zconf.h +compress.o: zlib.h zconf.h +crc32.o: crc32.h zlib.h zconf.h +deflate.o: deflate.h zutil.h zlib.h zconf.h +gzclose.o: zlib.h zconf.h gzguts.h +gzlib.o: zlib.h zconf.h gzguts.h +gzread.o: zlib.h zconf.h gzguts.h +gzwrite.o: zlib.h zconf.h gzguts.h +inffast.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +inflate.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +infback.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +inftrees.o: zutil.h zlib.h zconf.h inftrees.h +trees.o: deflate.h zutil.h zlib.h zconf.h trees.h +uncompr.o: zlib.h zconf.h +zutil.o: zutil.h zlib.h zconf.h diff -Nru nodejs-0.11.13/deps/zlib/win32/Makefile.msc nodejs-0.11.15/deps/zlib/win32/Makefile.msc --- nodejs-0.11.13/deps/zlib/win32/Makefile.msc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/win32/Makefile.msc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,163 @@ +# Makefile for zlib using Microsoft (Visual) C +# zlib is copyright (C) 1995-2006 Jean-loup Gailly and Mark Adler +# +# Usage: +# nmake -f win32/Makefile.msc (standard build) +# nmake -f win32/Makefile.msc LOC=-DFOO (nonstandard build) +# nmake -f win32/Makefile.msc LOC="-DASMV -DASMINF" \ +# OBJA="inffas32.obj match686.obj" (use ASM code, x86) +# nmake -f win32/Makefile.msc AS=ml64 LOC="-DASMV -DASMINF -I." \ +# OBJA="inffasx64.obj gvmat64.obj inffas8664.obj" (use ASM code, x64) + +# The toplevel directory of the source tree. +# +TOP = . + +# optional build flags +LOC = + +# variables +STATICLIB = zlib.lib +SHAREDLIB = zlib1.dll +IMPLIB = zdll.lib + +CC = cl +AS = ml +LD = link +AR = lib +RC = rc +CFLAGS = -nologo -MD -W3 -O2 -Oy- -Zi -Fd"zlib" $(LOC) +WFLAGS = -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE +ASFLAGS = -coff -Zi $(LOC) +LDFLAGS = -nologo -debug -incremental:no -opt:ref +ARFLAGS = -nologo +RCFLAGS = /dWIN32 /r + +OBJS = adler32.obj compress.obj crc32.obj deflate.obj gzclose.obj gzlib.obj gzread.obj \ + gzwrite.obj infback.obj inflate.obj inftrees.obj inffast.obj trees.obj uncompr.obj zutil.obj +OBJA = + + +# targets +all: $(STATICLIB) $(SHAREDLIB) $(IMPLIB) \ + example.exe minigzip.exe example_d.exe minigzip_d.exe + +$(STATICLIB): $(OBJS) $(OBJA) + $(AR) $(ARFLAGS) -out:$@ $(OBJS) $(OBJA) + +$(IMPLIB): $(SHAREDLIB) + +$(SHAREDLIB): $(TOP)/win32/zlib.def $(OBJS) $(OBJA) zlib1.res + $(LD) $(LDFLAGS) -def:$(TOP)/win32/zlib.def -dll -implib:$(IMPLIB) \ + -out:$@ -base:0x5A4C0000 $(OBJS) $(OBJA) zlib1.res + if exist $@.manifest \ + mt -nologo -manifest $@.manifest -outputresource:$@;2 + +example.exe: example.obj $(STATICLIB) + $(LD) $(LDFLAGS) example.obj $(STATICLIB) + if exist $@.manifest \ + mt -nologo -manifest $@.manifest -outputresource:$@;1 + +minigzip.exe: minigzip.obj $(STATICLIB) + $(LD) $(LDFLAGS) minigzip.obj $(STATICLIB) + if exist $@.manifest \ + mt -nologo -manifest $@.manifest -outputresource:$@;1 + +example_d.exe: example.obj $(IMPLIB) + $(LD) $(LDFLAGS) -out:$@ example.obj $(IMPLIB) + if exist $@.manifest \ + mt -nologo -manifest $@.manifest -outputresource:$@;1 + +minigzip_d.exe: minigzip.obj $(IMPLIB) + $(LD) $(LDFLAGS) -out:$@ minigzip.obj $(IMPLIB) + if exist $@.manifest \ + mt -nologo -manifest $@.manifest -outputresource:$@;1 + +{$(TOP)}.c.obj: + $(CC) -c $(WFLAGS) $(CFLAGS) $< + +{$(TOP)/test}.c.obj: + $(CC) -c -I$(TOP) $(WFLAGS) $(CFLAGS) $< + +{$(TOP)/contrib/masmx64}.c.obj: + $(CC) -c $(WFLAGS) $(CFLAGS) $< + +{$(TOP)/contrib/masmx64}.asm.obj: + $(AS) -c $(ASFLAGS) $< + +{$(TOP)/contrib/masmx86}.asm.obj: + $(AS) -c $(ASFLAGS) $< + +adler32.obj: $(TOP)/adler32.c $(TOP)/zlib.h $(TOP)/zconf.h + +compress.obj: $(TOP)/compress.c $(TOP)/zlib.h $(TOP)/zconf.h + +crc32.obj: $(TOP)/crc32.c $(TOP)/zlib.h $(TOP)/zconf.h $(TOP)/crc32.h + +deflate.obj: $(TOP)/deflate.c $(TOP)/deflate.h $(TOP)/zutil.h $(TOP)/zlib.h $(TOP)/zconf.h + +gzclose.obj: $(TOP)/gzclose.c $(TOP)/zlib.h $(TOP)/zconf.h $(TOP)/gzguts.h + +gzlib.obj: $(TOP)/gzlib.c $(TOP)/zlib.h $(TOP)/zconf.h $(TOP)/gzguts.h + +gzread.obj: $(TOP)/gzread.c $(TOP)/zlib.h $(TOP)/zconf.h $(TOP)/gzguts.h + +gzwrite.obj: $(TOP)/gzwrite.c $(TOP)/zlib.h $(TOP)/zconf.h $(TOP)/gzguts.h + +infback.obj: $(TOP)/infback.c $(TOP)/zutil.h $(TOP)/zlib.h $(TOP)/zconf.h $(TOP)/inftrees.h $(TOP)/inflate.h \ + $(TOP)/inffast.h $(TOP)/inffixed.h + +inffast.obj: $(TOP)/inffast.c $(TOP)/zutil.h $(TOP)/zlib.h $(TOP)/zconf.h $(TOP)/inftrees.h $(TOP)/inflate.h \ + $(TOP)/inffast.h + +inflate.obj: $(TOP)/inflate.c $(TOP)/zutil.h $(TOP)/zlib.h $(TOP)/zconf.h $(TOP)/inftrees.h $(TOP)/inflate.h \ + $(TOP)/inffast.h $(TOP)/inffixed.h + +inftrees.obj: $(TOP)/inftrees.c $(TOP)/zutil.h $(TOP)/zlib.h $(TOP)/zconf.h $(TOP)/inftrees.h + +trees.obj: $(TOP)/trees.c $(TOP)/zutil.h $(TOP)/zlib.h $(TOP)/zconf.h $(TOP)/deflate.h $(TOP)/trees.h + +uncompr.obj: $(TOP)/uncompr.c $(TOP)/zlib.h $(TOP)/zconf.h + +zutil.obj: $(TOP)/zutil.c $(TOP)/zutil.h $(TOP)/zlib.h $(TOP)/zconf.h + +gvmat64.obj: $(TOP)/contrib\masmx64\gvmat64.asm + +inffasx64.obj: $(TOP)/contrib\masmx64\inffasx64.asm + +inffas8664.obj: $(TOP)/contrib\masmx64\inffas8664.c $(TOP)/zutil.h $(TOP)/zlib.h $(TOP)/zconf.h \ + $(TOP)/inftrees.h $(TOP)/inflate.h $(TOP)/inffast.h + +inffas32.obj: $(TOP)/contrib\masmx86\inffas32.asm + +match686.obj: $(TOP)/contrib\masmx86\match686.asm + +example.obj: $(TOP)/test/example.c $(TOP)/zlib.h $(TOP)/zconf.h + +minigzip.obj: $(TOP)/test/minigzip.c $(TOP)/zlib.h $(TOP)/zconf.h + +zlib1.res: $(TOP)/win32/zlib1.rc + $(RC) $(RCFLAGS) /fo$@ $(TOP)/win32/zlib1.rc + +# testing +test: example.exe minigzip.exe + example + echo hello world | minigzip | minigzip -d + +testdll: example_d.exe minigzip_d.exe + example_d + echo hello world | minigzip_d | minigzip_d -d + + +# cleanup +clean: + -del $(STATICLIB) + -del $(SHAREDLIB) + -del $(IMPLIB) + -del *.obj + -del *.res + -del *.exp + -del *.exe + -del *.pdb + -del *.manifest + -del foo.gz diff -Nru nodejs-0.11.13/deps/zlib/win32/README-WIN32.txt nodejs-0.11.15/deps/zlib/win32/README-WIN32.txt --- nodejs-0.11.13/deps/zlib/win32/README-WIN32.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/win32/README-WIN32.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,103 @@ +ZLIB DATA COMPRESSION LIBRARY + +zlib 1.2.8 is a general purpose data compression library. All the code is +thread safe. The data format used by the zlib library is described by RFCs +(Request for Comments) 1950 to 1952 in the files +http://www.ietf.org/rfc/rfc1950.txt (zlib format), rfc1951.txt (deflate format) +and rfc1952.txt (gzip format). + +All functions of the compression library are documented in the file zlib.h +(volunteer to write man pages welcome, contact zlib@gzip.org). Two compiled +examples are distributed in this package, example and minigzip. The example_d +and minigzip_d flavors validate that the zlib1.dll file is working correctly. + +Questions about zlib should be sent to . The zlib home page +is http://zlib.net/ . Before reporting a problem, please check this site to +verify that you have the latest version of zlib; otherwise get the latest +version and check whether the problem still exists or not. + +PLEASE read DLL_FAQ.txt, and the the zlib FAQ http://zlib.net/zlib_faq.html +before asking for help. + + +Manifest: + +The package zlib-1.2.8-win32-x86.zip will contain the following files: + + README-WIN32.txt This document + ChangeLog Changes since previous zlib packages + DLL_FAQ.txt Frequently asked questions about zlib1.dll + zlib.3.pdf Documentation of this library in Adobe Acrobat format + + example.exe A statically-bound example (using zlib.lib, not the dll) + example.pdb Symbolic information for debugging example.exe + + example_d.exe A zlib1.dll bound example (using zdll.lib) + example_d.pdb Symbolic information for debugging example_d.exe + + minigzip.exe A statically-bound test program (using zlib.lib, not the dll) + minigzip.pdb Symbolic information for debugging minigzip.exe + + minigzip_d.exe A zlib1.dll bound test program (using zdll.lib) + minigzip_d.pdb Symbolic information for debugging minigzip_d.exe + + zlib.h Install these files into the compilers' INCLUDE path to + zconf.h compile programs which use zlib.lib or zdll.lib + + zdll.lib Install these files into the compilers' LIB path if linking + zdll.exp a compiled program to the zlib1.dll binary + + zlib.lib Install these files into the compilers' LIB path to link zlib + zlib.pdb into compiled programs, without zlib1.dll runtime dependency + (zlib.pdb provides debugging info to the compile time linker) + + zlib1.dll Install this binary shared library into the system PATH, or + the program's runtime directory (where the .exe resides) + zlib1.pdb Install in the same directory as zlib1.dll, in order to debug + an application crash using WinDbg or similar tools. + +All .pdb files above are entirely optional, but are very useful to a developer +attempting to diagnose program misbehavior or a crash. Many additional +important files for developers can be found in the zlib127.zip source package +available from http://zlib.net/ - review that package's README file for details. + + +Acknowledgments: + +The deflate format used by zlib was defined by Phil Katz. The deflate and +zlib specifications were written by L. Peter Deutsch. Thanks to all the +people who reported problems and suggested various improvements in zlib; they +are too numerous to cite here. + + +Copyright notice: + + (C) 1995-2012 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +If you use the zlib library in a product, we would appreciate *not* receiving +lengthy legal documents to sign. The sources are provided for free but without +warranty of any kind. The library has been entirely written by Jean-loup +Gailly and Mark Adler; it does not include third-party code. + +If you redistribute modified sources, we would appreciate that you include in +the file ChangeLog history information documenting your changes. Please read +the FAQ for more information on the distribution of modified source versions. diff -Nru nodejs-0.11.13/deps/zlib/win32/VisualC.txt nodejs-0.11.15/deps/zlib/win32/VisualC.txt --- nodejs-0.11.13/deps/zlib/win32/VisualC.txt 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/win32/VisualC.txt 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,3 @@ + +To build zlib using the Microsoft Visual C++ environment, +use the appropriate project from the projects/ directory. diff -Nru nodejs-0.11.13/deps/zlib/win32/zlib1.rc nodejs-0.11.15/deps/zlib/win32/zlib1.rc --- nodejs-0.11.13/deps/zlib/win32/zlib1.rc 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/win32/zlib1.rc 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,40 @@ +#include +#include "../zlib.h" + +#ifdef GCC_WINDRES +VS_VERSION_INFO VERSIONINFO +#else +VS_VERSION_INFO VERSIONINFO MOVEABLE IMPURE LOADONCALL DISCARDABLE +#endif + FILEVERSION ZLIB_VER_MAJOR,ZLIB_VER_MINOR,ZLIB_VER_REVISION,0 + PRODUCTVERSION ZLIB_VER_MAJOR,ZLIB_VER_MINOR,ZLIB_VER_REVISION,0 + FILEFLAGSMASK VS_FFI_FILEFLAGSMASK +#ifdef _DEBUG + FILEFLAGS 1 +#else + FILEFLAGS 0 +#endif + FILEOS VOS__WINDOWS32 + FILETYPE VFT_DLL + FILESUBTYPE 0 // not used +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904E4" + //language ID = U.S. English, char set = Windows, Multilingual + BEGIN + VALUE "FileDescription", "zlib data compression library\0" + VALUE "FileVersion", ZLIB_VERSION "\0" + VALUE "InternalName", "zlib1.dll\0" + VALUE "LegalCopyright", "(C) 1995-2013 Jean-loup Gailly & Mark Adler\0" + VALUE "OriginalFilename", "zlib1.dll\0" + VALUE "ProductName", "zlib\0" + VALUE "ProductVersion", ZLIB_VERSION "\0" + VALUE "Comments", "For more information visit http://www.zlib.net/\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0409, 1252 + END +END diff -Nru nodejs-0.11.13/deps/zlib/win32/zlib.def nodejs-0.11.15/deps/zlib/win32/zlib.def --- nodejs-0.11.13/deps/zlib/win32/zlib.def 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/win32/zlib.def 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,86 @@ +; zlib data compression library +EXPORTS +; basic functions + zlibVersion + deflate + deflateEnd + inflate + inflateEnd +; advanced functions + deflateSetDictionary + deflateCopy + deflateReset + deflateParams + deflateTune + deflateBound + deflatePending + deflatePrime + deflateSetHeader + inflateSetDictionary + inflateGetDictionary + inflateSync + inflateCopy + inflateReset + inflateReset2 + inflatePrime + inflateMark + inflateGetHeader + inflateBack + inflateBackEnd + zlibCompileFlags +; utility functions + compress + compress2 + compressBound + uncompress + gzopen + gzdopen + gzbuffer + gzsetparams + gzread + gzwrite + gzprintf + gzvprintf + gzputs + gzgets + gzputc + gzgetc + gzungetc + gzflush + gzseek + gzrewind + gztell + gzoffset + gzeof + gzdirect + gzclose + gzclose_r + gzclose_w + gzerror + gzclearerr +; large file functions + gzopen64 + gzseek64 + gztell64 + gzoffset64 + adler32_combine64 + crc32_combine64 +; checksum functions + adler32 + crc32 + adler32_combine + crc32_combine +; various hacks, don't look :) + deflateInit_ + deflateInit2_ + inflateInit_ + inflateInit2_ + inflateBackInit_ + gzgetc_ + zError + inflateSyncPoint + get_crc_table + inflateUndermine + inflateResetKeep + deflateResetKeep + gzopen_w diff -Nru nodejs-0.11.13/deps/zlib/zconf.h nodejs-0.11.15/deps/zlib/zconf.h --- nodejs-0.11.13/deps/zlib/zconf.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zconf.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,65 +1,155 @@ /* zconf.h -- configuration of the zlib compression library - * Copyright (C) 1995-2005 Jean-loup Gailly. + * Copyright (C) 1995-2013 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ -/* @(#) $Id: zconf.h,v 3.9 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ +/* @(#) $Id$ */ #ifndef ZCONF_H #define ZCONF_H -/* This include does prefixing as below, but with an updated set of names */ -#include "mozzconf.h" - /* * If you *really* need a unique prefix for all types and library functions, * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". */ -#ifdef Z_PREFIX -# define deflateInit_ z_deflateInit_ +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +# define Z_PREFIX_SET + +/* all linked symbols */ +# define _dist_code z__dist_code +# define _length_code z__length_code +# define _tr_align z__tr_align +# define _tr_flush_bits z__tr_flush_bits +# define _tr_flush_block z__tr_flush_block +# define _tr_init z__tr_init +# define _tr_stored_block z__tr_stored_block +# define _tr_tally z__tr_tally +# define adler32 z_adler32 +# define adler32_combine z_adler32_combine +# define adler32_combine64 z_adler32_combine64 +# ifndef Z_SOLO +# define compress z_compress +# define compress2 z_compress2 +# define compressBound z_compressBound +# endif +# define crc32 z_crc32 +# define crc32_combine z_crc32_combine +# define crc32_combine64 z_crc32_combine64 # define deflate z_deflate +# define deflateBound z_deflateBound +# define deflateCopy z_deflateCopy # define deflateEnd z_deflateEnd -# define inflateInit_ z_inflateInit_ -# define inflate z_inflate -# define inflateEnd z_inflateEnd # define deflateInit2_ z_deflateInit2_ -# define deflateSetDictionary z_deflateSetDictionary -# define deflateCopy z_deflateCopy -# define deflateReset z_deflateReset +# define deflateInit_ z_deflateInit_ # define deflateParams z_deflateParams -# define deflateBound z_deflateBound +# define deflatePending z_deflatePending # define deflatePrime z_deflatePrime +# define deflateReset z_deflateReset +# define deflateResetKeep z_deflateResetKeep +# define deflateSetDictionary z_deflateSetDictionary +# define deflateSetHeader z_deflateSetHeader +# define deflateTune z_deflateTune +# define deflate_copyright z_deflate_copyright +# define get_crc_table z_get_crc_table +# ifndef Z_SOLO +# define gz_error z_gz_error +# define gz_intmax z_gz_intmax +# define gz_strwinerror z_gz_strwinerror +# define gzbuffer z_gzbuffer +# define gzclearerr z_gzclearerr +# define gzclose z_gzclose +# define gzclose_r z_gzclose_r +# define gzclose_w z_gzclose_w +# define gzdirect z_gzdirect +# define gzdopen z_gzdopen +# define gzeof z_gzeof +# define gzerror z_gzerror +# define gzflush z_gzflush +# define gzgetc z_gzgetc +# define gzgetc_ z_gzgetc_ +# define gzgets z_gzgets +# define gzoffset z_gzoffset +# define gzoffset64 z_gzoffset64 +# define gzopen z_gzopen +# define gzopen64 z_gzopen64 +# ifdef _WIN32 +# define gzopen_w z_gzopen_w +# endif +# define gzprintf z_gzprintf +# define gzvprintf z_gzvprintf +# define gzputc z_gzputc +# define gzputs z_gzputs +# define gzread z_gzread +# define gzrewind z_gzrewind +# define gzseek z_gzseek +# define gzseek64 z_gzseek64 +# define gzsetparams z_gzsetparams +# define gztell z_gztell +# define gztell64 z_gztell64 +# define gzungetc z_gzungetc +# define gzwrite z_gzwrite +# endif +# define inflate z_inflate +# define inflateBack z_inflateBack +# define inflateBackEnd z_inflateBackEnd +# define inflateBackInit_ z_inflateBackInit_ +# define inflateCopy z_inflateCopy +# define inflateEnd z_inflateEnd +# define inflateGetHeader z_inflateGetHeader # define inflateInit2_ z_inflateInit2_ +# define inflateInit_ z_inflateInit_ +# define inflateMark z_inflateMark +# define inflatePrime z_inflatePrime +# define inflateReset z_inflateReset +# define inflateReset2 z_inflateReset2 # define inflateSetDictionary z_inflateSetDictionary +# define inflateGetDictionary z_inflateGetDictionary # define inflateSync z_inflateSync # define inflateSyncPoint z_inflateSyncPoint -# define inflateCopy z_inflateCopy -# define inflateReset z_inflateReset -# define inflateBack z_inflateBack -# define inflateBackEnd z_inflateBackEnd -# define compress z_compress -# define compress2 z_compress2 -# define compressBound z_compressBound -# define uncompress z_uncompress -# define adler32 z_adler32 -# define crc32 z_crc32 -# define get_crc_table z_get_crc_table +# define inflateUndermine z_inflateUndermine +# define inflateResetKeep z_inflateResetKeep +# define inflate_copyright z_inflate_copyright +# define inflate_fast z_inflate_fast +# define inflate_table z_inflate_table +# ifndef Z_SOLO +# define uncompress z_uncompress +# endif # define zError z_zError +# ifndef Z_SOLO +# define zcalloc z_zcalloc +# define zcfree z_zcfree +# endif +# define zlibCompileFlags z_zlibCompileFlags +# define zlibVersion z_zlibVersion +/* all zlib typedefs in zlib.h and zconf.h */ +# define Byte z_Byte +# define Bytef z_Bytef # define alloc_func z_alloc_func +# define charf z_charf # define free_func z_free_func +# ifndef Z_SOLO +# define gzFile z_gzFile +# endif +# define gz_header z_gz_header +# define gz_headerp z_gz_headerp # define in_func z_in_func +# define intf z_intf # define out_func z_out_func -# define Byte z_Byte # define uInt z_uInt -# define uLong z_uLong -# define Bytef z_Bytef -# define charf z_charf -# define intf z_intf # define uIntf z_uIntf +# define uLong z_uLong # define uLongf z_uLongf -# define voidpf z_voidpf # define voidp z_voidp +# define voidpc z_voidpc +# define voidpf z_voidpf + +/* all zlib structs in zlib.h and zconf.h */ +# define gz_header_s z_gz_header_s +# define internal_state z_internal_state + #endif #if defined(__MSDOS__) && !defined(MSDOS) @@ -128,6 +218,12 @@ # endif #endif +#if defined(ZLIB_CONST) && !defined(z_const) +# define z_const const +#else +# define z_const +#endif + /* Some Mac compilers merge all .h files incorrectly: */ #if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__) # define NO_DUMMY_DECL @@ -174,6 +270,14 @@ # endif #endif +#ifndef Z_ARG /* function prototypes for stdarg */ +# if defined(STDC) || defined(Z_HAVE_STDARG_H) +# define Z_ARG(args) args +# else +# define Z_ARG(args) () +# endif +#endif + /* The following definitions for FAR are needed only for MSDOS mixed * model programming (small or medium model with some far allocations). * This was tested only with MSC; for other MSDOS compilers you may have @@ -287,49 +391,121 @@ typedef Byte *voidp; #endif -#if 0 /* HAVE_UNISTD_H -- this line is updated by ./configure */ -# include /* for off_t */ -# include /* for SEEK_* and off_t */ -# ifdef VMS -# include /* for off_t */ +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +# include +# if (UINT_MAX == 0xffffffffUL) +# define Z_U4 unsigned +# elif (ULONG_MAX == 0xffffffffUL) +# define Z_U4 unsigned long +# elif (USHRT_MAX == 0xffffffffUL) +# define Z_U4 unsigned short # endif -# define z_off_t off_t #endif -#ifndef SEEK_SET + +#ifdef Z_U4 + typedef Z_U4 z_crc_t; +#else + typedef unsigned long z_crc_t; +#endif + +#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_UNISTD_H +#endif + +#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_STDARG_H +#endif + +#ifdef STDC +# ifndef Z_SOLO +# include /* for off_t */ +# endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +# include /* for va_list */ +# endif +#endif + +#ifdef _WIN32 +# ifndef Z_SOLO +# include /* for wchar_t */ +# endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +# undef _LARGEFILE64_SOURCE +#endif + +#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) +# define Z_HAVE_UNISTD_H +#endif +#ifndef Z_SOLO +# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +# ifdef VMS +# include /* for off_t */ +# endif +# ifndef z_off_t +# define z_off_t off_t +# endif +# endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 +# define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +# define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) +# define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) # define SEEK_SET 0 /* Seek from beginning of file. */ # define SEEK_CUR 1 /* Seek from current position. */ # define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ #endif + #ifndef z_off_t # define z_off_t long #endif -#if defined(__OS400__) -# define NO_vsnprintf -#endif - -#if defined(__MVS__) -# define NO_vsnprintf -# ifdef FAR -# undef FAR +#if !defined(_WIN32) && defined(Z_LARGE64) +# define z_off64_t off64_t +#else +# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) +# define z_off64_t __int64 +# else +# define z_off64_t z_off_t # endif #endif /* MVS linker does not support external names larger than 8 bytes */ #if defined(__MVS__) -# pragma map(deflateInit_,"DEIN") -# pragma map(deflateInit2_,"DEIN2") -# pragma map(deflateEnd,"DEEND") -# pragma map(deflateBound,"DEBND") -# pragma map(inflateInit_,"ININ") -# pragma map(inflateInit2_,"ININ2") -# pragma map(inflateEnd,"INEND") -# pragma map(inflateSync,"INSY") -# pragma map(inflateSetDictionary,"INSEDI") -# pragma map(compressBound,"CMBND") -# pragma map(inflate_table,"INTABL") -# pragma map(inflate_fast,"INFA") -# pragma map(inflate_copyright,"INCOPY") + #pragma map(deflateInit_,"DEIN") + #pragma map(deflateInit2_,"DEIN2") + #pragma map(deflateEnd,"DEEND") + #pragma map(deflateBound,"DEBND") + #pragma map(inflateInit_,"ININ") + #pragma map(inflateInit2_,"ININ2") + #pragma map(inflateEnd,"INEND") + #pragma map(inflateSync,"INSY") + #pragma map(inflateSetDictionary,"INSEDI") + #pragma map(compressBound,"CMBND") + #pragma map(inflate_table,"INTABL") + #pragma map(inflate_fast,"INFA") + #pragma map(inflate_copyright,"INCOPY") #endif #endif /* ZCONF_H */ diff -Nru nodejs-0.11.13/deps/zlib/zconf.h.cmakein nodejs-0.11.15/deps/zlib/zconf.h.cmakein --- nodejs-0.11.13/deps/zlib/zconf.h.cmakein 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zconf.h.cmakein 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,513 @@ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-2013 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#ifndef ZCONF_H +#define ZCONF_H +#cmakedefine Z_PREFIX +#cmakedefine Z_HAVE_UNISTD_H + +/* + * If you *really* need a unique prefix for all types and library functions, + * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". + */ +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +# define Z_PREFIX_SET + +/* all linked symbols */ +# define _dist_code z__dist_code +# define _length_code z__length_code +# define _tr_align z__tr_align +# define _tr_flush_bits z__tr_flush_bits +# define _tr_flush_block z__tr_flush_block +# define _tr_init z__tr_init +# define _tr_stored_block z__tr_stored_block +# define _tr_tally z__tr_tally +# define adler32 z_adler32 +# define adler32_combine z_adler32_combine +# define adler32_combine64 z_adler32_combine64 +# ifndef Z_SOLO +# define compress z_compress +# define compress2 z_compress2 +# define compressBound z_compressBound +# endif +# define crc32 z_crc32 +# define crc32_combine z_crc32_combine +# define crc32_combine64 z_crc32_combine64 +# define deflate z_deflate +# define deflateBound z_deflateBound +# define deflateCopy z_deflateCopy +# define deflateEnd z_deflateEnd +# define deflateInit2_ z_deflateInit2_ +# define deflateInit_ z_deflateInit_ +# define deflateParams z_deflateParams +# define deflatePending z_deflatePending +# define deflatePrime z_deflatePrime +# define deflateReset z_deflateReset +# define deflateResetKeep z_deflateResetKeep +# define deflateSetDictionary z_deflateSetDictionary +# define deflateSetHeader z_deflateSetHeader +# define deflateTune z_deflateTune +# define deflate_copyright z_deflate_copyright +# define get_crc_table z_get_crc_table +# ifndef Z_SOLO +# define gz_error z_gz_error +# define gz_intmax z_gz_intmax +# define gz_strwinerror z_gz_strwinerror +# define gzbuffer z_gzbuffer +# define gzclearerr z_gzclearerr +# define gzclose z_gzclose +# define gzclose_r z_gzclose_r +# define gzclose_w z_gzclose_w +# define gzdirect z_gzdirect +# define gzdopen z_gzdopen +# define gzeof z_gzeof +# define gzerror z_gzerror +# define gzflush z_gzflush +# define gzgetc z_gzgetc +# define gzgetc_ z_gzgetc_ +# define gzgets z_gzgets +# define gzoffset z_gzoffset +# define gzoffset64 z_gzoffset64 +# define gzopen z_gzopen +# define gzopen64 z_gzopen64 +# ifdef _WIN32 +# define gzopen_w z_gzopen_w +# endif +# define gzprintf z_gzprintf +# define gzvprintf z_gzvprintf +# define gzputc z_gzputc +# define gzputs z_gzputs +# define gzread z_gzread +# define gzrewind z_gzrewind +# define gzseek z_gzseek +# define gzseek64 z_gzseek64 +# define gzsetparams z_gzsetparams +# define gztell z_gztell +# define gztell64 z_gztell64 +# define gzungetc z_gzungetc +# define gzwrite z_gzwrite +# endif +# define inflate z_inflate +# define inflateBack z_inflateBack +# define inflateBackEnd z_inflateBackEnd +# define inflateBackInit_ z_inflateBackInit_ +# define inflateCopy z_inflateCopy +# define inflateEnd z_inflateEnd +# define inflateGetHeader z_inflateGetHeader +# define inflateInit2_ z_inflateInit2_ +# define inflateInit_ z_inflateInit_ +# define inflateMark z_inflateMark +# define inflatePrime z_inflatePrime +# define inflateReset z_inflateReset +# define inflateReset2 z_inflateReset2 +# define inflateSetDictionary z_inflateSetDictionary +# define inflateGetDictionary z_inflateGetDictionary +# define inflateSync z_inflateSync +# define inflateSyncPoint z_inflateSyncPoint +# define inflateUndermine z_inflateUndermine +# define inflateResetKeep z_inflateResetKeep +# define inflate_copyright z_inflate_copyright +# define inflate_fast z_inflate_fast +# define inflate_table z_inflate_table +# ifndef Z_SOLO +# define uncompress z_uncompress +# endif +# define zError z_zError +# ifndef Z_SOLO +# define zcalloc z_zcalloc +# define zcfree z_zcfree +# endif +# define zlibCompileFlags z_zlibCompileFlags +# define zlibVersion z_zlibVersion + +/* all zlib typedefs in zlib.h and zconf.h */ +# define Byte z_Byte +# define Bytef z_Bytef +# define alloc_func z_alloc_func +# define charf z_charf +# define free_func z_free_func +# ifndef Z_SOLO +# define gzFile z_gzFile +# endif +# define gz_header z_gz_header +# define gz_headerp z_gz_headerp +# define in_func z_in_func +# define intf z_intf +# define out_func z_out_func +# define uInt z_uInt +# define uIntf z_uIntf +# define uLong z_uLong +# define uLongf z_uLongf +# define voidp z_voidp +# define voidpc z_voidpc +# define voidpf z_voidpf + +/* all zlib structs in zlib.h and zconf.h */ +# define gz_header_s z_gz_header_s +# define internal_state z_internal_state + +#endif + +#if defined(__MSDOS__) && !defined(MSDOS) +# define MSDOS +#endif +#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) +# define OS2 +#endif +#if defined(_WINDOWS) && !defined(WINDOWS) +# define WINDOWS +#endif +#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) +# ifndef WIN32 +# define WIN32 +# endif +#endif +#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) +# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) +# ifndef SYS16BIT +# define SYS16BIT +# endif +# endif +#endif + +/* + * Compile with -DMAXSEG_64K if the alloc function cannot allocate more + * than 64k bytes at a time (needed on systems with 16-bit int). + */ +#ifdef SYS16BIT +# define MAXSEG_64K +#endif +#ifdef MSDOS +# define UNALIGNED_OK +#endif + +#ifdef __STDC_VERSION__ +# ifndef STDC +# define STDC +# endif +# if __STDC_VERSION__ >= 199901L +# ifndef STDC99 +# define STDC99 +# endif +# endif +#endif +#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) +# define STDC +#endif +#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) +# define STDC +#endif +#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) +# define STDC +#endif +#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) +# define STDC +#endif + +#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ +# define STDC +#endif + +#ifndef STDC +# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ +# define const /* note: need a more gentle solution here */ +# endif +#endif + +#if defined(ZLIB_CONST) && !defined(z_const) +# define z_const const +#else +# define z_const +#endif + +/* Some Mac compilers merge all .h files incorrectly: */ +#if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__) +# define NO_DUMMY_DECL +#endif + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +# ifdef MAXSEG_64K +# define MAX_MEM_LEVEL 8 +# else +# define MAX_MEM_LEVEL 9 +# endif +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2. + * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files + * created by gzip. (Files created by minigzip can still be extracted by + * gzip.) + */ +#ifndef MAX_WBITS +# define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* The memory requirements for deflate are (in bytes): + (1 << (windowBits+2)) + (1 << (memLevel+9)) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus a few kilobytes + for small objects. +*/ + + /* Type declarations */ + +#ifndef OF /* function prototypes */ +# ifdef STDC +# define OF(args) args +# else +# define OF(args) () +# endif +#endif + +#ifndef Z_ARG /* function prototypes for stdarg */ +# if defined(STDC) || defined(Z_HAVE_STDARG_H) +# define Z_ARG(args) args +# else +# define Z_ARG(args) () +# endif +#endif + +/* The following definitions for FAR are needed only for MSDOS mixed + * model programming (small or medium model with some far allocations). + * This was tested only with MSC; for other MSDOS compilers you may have + * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, + * just define FAR to be empty. + */ +#ifdef SYS16BIT +# if defined(M_I86SM) || defined(M_I86MM) + /* MSC small or medium model */ +# define SMALL_MEDIUM +# ifdef _MSC_VER +# define FAR _far +# else +# define FAR far +# endif +# endif +# if (defined(__SMALL__) || defined(__MEDIUM__)) + /* Turbo C small or medium model */ +# define SMALL_MEDIUM +# ifdef __BORLANDC__ +# define FAR _far +# else +# define FAR far +# endif +# endif +#endif + +#if defined(WINDOWS) || defined(WIN32) + /* If building or using zlib as a DLL, define ZLIB_DLL. + * This is not mandatory, but it offers a little performance increase. + */ +# ifdef ZLIB_DLL +# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) +# ifdef ZLIB_INTERNAL +# define ZEXTERN extern __declspec(dllexport) +# else +# define ZEXTERN extern __declspec(dllimport) +# endif +# endif +# endif /* ZLIB_DLL */ + /* If building or using zlib with the WINAPI/WINAPIV calling convention, + * define ZLIB_WINAPI. + * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. + */ +# ifdef ZLIB_WINAPI +# ifdef FAR +# undef FAR +# endif +# include + /* No need for _export, use ZLIB.DEF instead. */ + /* For complete Windows compatibility, use WINAPI, not __stdcall. */ +# define ZEXPORT WINAPI +# ifdef WIN32 +# define ZEXPORTVA WINAPIV +# else +# define ZEXPORTVA FAR CDECL +# endif +# endif +#endif + +#if defined (__BEOS__) +# ifdef ZLIB_DLL +# ifdef ZLIB_INTERNAL +# define ZEXPORT __declspec(dllexport) +# define ZEXPORTVA __declspec(dllexport) +# else +# define ZEXPORT __declspec(dllimport) +# define ZEXPORTVA __declspec(dllimport) +# endif +# endif +#endif + +#ifndef ZEXTERN +# define ZEXTERN extern +#endif +#ifndef ZEXPORT +# define ZEXPORT +#endif +#ifndef ZEXPORTVA +# define ZEXPORTVA +#endif + +#ifndef FAR +# define FAR +#endif + +#if !defined(__MACTYPES__) +typedef unsigned char Byte; /* 8 bits */ +#endif +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ + +#ifdef SMALL_MEDIUM + /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ +# define Bytef Byte FAR +#else + typedef Byte FAR Bytef; +#endif +typedef char FAR charf; +typedef int FAR intf; +typedef uInt FAR uIntf; +typedef uLong FAR uLongf; + +#ifdef STDC + typedef void const *voidpc; + typedef void FAR *voidpf; + typedef void *voidp; +#else + typedef Byte const *voidpc; + typedef Byte FAR *voidpf; + typedef Byte *voidp; +#endif + +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +# include +# if (UINT_MAX == 0xffffffffUL) +# define Z_U4 unsigned +# elif (ULONG_MAX == 0xffffffffUL) +# define Z_U4 unsigned long +# elif (USHRT_MAX == 0xffffffffUL) +# define Z_U4 unsigned short +# endif +#endif + +#ifdef Z_U4 + typedef Z_U4 z_crc_t; +#else + typedef unsigned long z_crc_t; +#endif + +#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_UNISTD_H +#endif + +#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_STDARG_H +#endif + +#ifdef STDC +# ifndef Z_SOLO +# include /* for off_t */ +# endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +# include /* for va_list */ +# endif +#endif + +#ifdef _WIN32 +# ifndef Z_SOLO +# include /* for wchar_t */ +# endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +# undef _LARGEFILE64_SOURCE +#endif + +#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) +# define Z_HAVE_UNISTD_H +#endif +#ifndef Z_SOLO +# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +# ifdef VMS +# include /* for off_t */ +# endif +# ifndef z_off_t +# define z_off_t off_t +# endif +# endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 +# define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +# define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) +# define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) +# define SEEK_SET 0 /* Seek from beginning of file. */ +# define SEEK_CUR 1 /* Seek from current position. */ +# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ +#endif + +#ifndef z_off_t +# define z_off_t long +#endif + +#if !defined(_WIN32) && defined(Z_LARGE64) +# define z_off64_t off64_t +#else +# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) +# define z_off64_t __int64 +# else +# define z_off64_t z_off_t +# endif +#endif + +/* MVS linker does not support external names larger than 8 bytes */ +#if defined(__MVS__) + #pragma map(deflateInit_,"DEIN") + #pragma map(deflateInit2_,"DEIN2") + #pragma map(deflateEnd,"DEEND") + #pragma map(deflateBound,"DEBND") + #pragma map(inflateInit_,"ININ") + #pragma map(inflateInit2_,"ININ2") + #pragma map(inflateEnd,"INEND") + #pragma map(inflateSync,"INSY") + #pragma map(inflateSetDictionary,"INSEDI") + #pragma map(compressBound,"CMBND") + #pragma map(inflate_table,"INTABL") + #pragma map(inflate_fast,"INFA") + #pragma map(inflate_copyright,"INCOPY") +#endif + +#endif /* ZCONF_H */ diff -Nru nodejs-0.11.13/deps/zlib/zconf.h.in nodejs-0.11.15/deps/zlib/zconf.h.in --- nodejs-0.11.13/deps/zlib/zconf.h.in 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zconf.h.in 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,511 @@ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-2013 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#ifndef ZCONF_H +#define ZCONF_H + +/* + * If you *really* need a unique prefix for all types and library functions, + * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". + */ +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +# define Z_PREFIX_SET + +/* all linked symbols */ +# define _dist_code z__dist_code +# define _length_code z__length_code +# define _tr_align z__tr_align +# define _tr_flush_bits z__tr_flush_bits +# define _tr_flush_block z__tr_flush_block +# define _tr_init z__tr_init +# define _tr_stored_block z__tr_stored_block +# define _tr_tally z__tr_tally +# define adler32 z_adler32 +# define adler32_combine z_adler32_combine +# define adler32_combine64 z_adler32_combine64 +# ifndef Z_SOLO +# define compress z_compress +# define compress2 z_compress2 +# define compressBound z_compressBound +# endif +# define crc32 z_crc32 +# define crc32_combine z_crc32_combine +# define crc32_combine64 z_crc32_combine64 +# define deflate z_deflate +# define deflateBound z_deflateBound +# define deflateCopy z_deflateCopy +# define deflateEnd z_deflateEnd +# define deflateInit2_ z_deflateInit2_ +# define deflateInit_ z_deflateInit_ +# define deflateParams z_deflateParams +# define deflatePending z_deflatePending +# define deflatePrime z_deflatePrime +# define deflateReset z_deflateReset +# define deflateResetKeep z_deflateResetKeep +# define deflateSetDictionary z_deflateSetDictionary +# define deflateSetHeader z_deflateSetHeader +# define deflateTune z_deflateTune +# define deflate_copyright z_deflate_copyright +# define get_crc_table z_get_crc_table +# ifndef Z_SOLO +# define gz_error z_gz_error +# define gz_intmax z_gz_intmax +# define gz_strwinerror z_gz_strwinerror +# define gzbuffer z_gzbuffer +# define gzclearerr z_gzclearerr +# define gzclose z_gzclose +# define gzclose_r z_gzclose_r +# define gzclose_w z_gzclose_w +# define gzdirect z_gzdirect +# define gzdopen z_gzdopen +# define gzeof z_gzeof +# define gzerror z_gzerror +# define gzflush z_gzflush +# define gzgetc z_gzgetc +# define gzgetc_ z_gzgetc_ +# define gzgets z_gzgets +# define gzoffset z_gzoffset +# define gzoffset64 z_gzoffset64 +# define gzopen z_gzopen +# define gzopen64 z_gzopen64 +# ifdef _WIN32 +# define gzopen_w z_gzopen_w +# endif +# define gzprintf z_gzprintf +# define gzvprintf z_gzvprintf +# define gzputc z_gzputc +# define gzputs z_gzputs +# define gzread z_gzread +# define gzrewind z_gzrewind +# define gzseek z_gzseek +# define gzseek64 z_gzseek64 +# define gzsetparams z_gzsetparams +# define gztell z_gztell +# define gztell64 z_gztell64 +# define gzungetc z_gzungetc +# define gzwrite z_gzwrite +# endif +# define inflate z_inflate +# define inflateBack z_inflateBack +# define inflateBackEnd z_inflateBackEnd +# define inflateBackInit_ z_inflateBackInit_ +# define inflateCopy z_inflateCopy +# define inflateEnd z_inflateEnd +# define inflateGetHeader z_inflateGetHeader +# define inflateInit2_ z_inflateInit2_ +# define inflateInit_ z_inflateInit_ +# define inflateMark z_inflateMark +# define inflatePrime z_inflatePrime +# define inflateReset z_inflateReset +# define inflateReset2 z_inflateReset2 +# define inflateSetDictionary z_inflateSetDictionary +# define inflateGetDictionary z_inflateGetDictionary +# define inflateSync z_inflateSync +# define inflateSyncPoint z_inflateSyncPoint +# define inflateUndermine z_inflateUndermine +# define inflateResetKeep z_inflateResetKeep +# define inflate_copyright z_inflate_copyright +# define inflate_fast z_inflate_fast +# define inflate_table z_inflate_table +# ifndef Z_SOLO +# define uncompress z_uncompress +# endif +# define zError z_zError +# ifndef Z_SOLO +# define zcalloc z_zcalloc +# define zcfree z_zcfree +# endif +# define zlibCompileFlags z_zlibCompileFlags +# define zlibVersion z_zlibVersion + +/* all zlib typedefs in zlib.h and zconf.h */ +# define Byte z_Byte +# define Bytef z_Bytef +# define alloc_func z_alloc_func +# define charf z_charf +# define free_func z_free_func +# ifndef Z_SOLO +# define gzFile z_gzFile +# endif +# define gz_header z_gz_header +# define gz_headerp z_gz_headerp +# define in_func z_in_func +# define intf z_intf +# define out_func z_out_func +# define uInt z_uInt +# define uIntf z_uIntf +# define uLong z_uLong +# define uLongf z_uLongf +# define voidp z_voidp +# define voidpc z_voidpc +# define voidpf z_voidpf + +/* all zlib structs in zlib.h and zconf.h */ +# define gz_header_s z_gz_header_s +# define internal_state z_internal_state + +#endif + +#if defined(__MSDOS__) && !defined(MSDOS) +# define MSDOS +#endif +#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) +# define OS2 +#endif +#if defined(_WINDOWS) && !defined(WINDOWS) +# define WINDOWS +#endif +#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) +# ifndef WIN32 +# define WIN32 +# endif +#endif +#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) +# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) +# ifndef SYS16BIT +# define SYS16BIT +# endif +# endif +#endif + +/* + * Compile with -DMAXSEG_64K if the alloc function cannot allocate more + * than 64k bytes at a time (needed on systems with 16-bit int). + */ +#ifdef SYS16BIT +# define MAXSEG_64K +#endif +#ifdef MSDOS +# define UNALIGNED_OK +#endif + +#ifdef __STDC_VERSION__ +# ifndef STDC +# define STDC +# endif +# if __STDC_VERSION__ >= 199901L +# ifndef STDC99 +# define STDC99 +# endif +# endif +#endif +#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) +# define STDC +#endif +#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) +# define STDC +#endif +#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) +# define STDC +#endif +#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) +# define STDC +#endif + +#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ +# define STDC +#endif + +#ifndef STDC +# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ +# define const /* note: need a more gentle solution here */ +# endif +#endif + +#if defined(ZLIB_CONST) && !defined(z_const) +# define z_const const +#else +# define z_const +#endif + +/* Some Mac compilers merge all .h files incorrectly: */ +#if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__) +# define NO_DUMMY_DECL +#endif + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +# ifdef MAXSEG_64K +# define MAX_MEM_LEVEL 8 +# else +# define MAX_MEM_LEVEL 9 +# endif +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2. + * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files + * created by gzip. (Files created by minigzip can still be extracted by + * gzip.) + */ +#ifndef MAX_WBITS +# define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* The memory requirements for deflate are (in bytes): + (1 << (windowBits+2)) + (1 << (memLevel+9)) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus a few kilobytes + for small objects. +*/ + + /* Type declarations */ + +#ifndef OF /* function prototypes */ +# ifdef STDC +# define OF(args) args +# else +# define OF(args) () +# endif +#endif + +#ifndef Z_ARG /* function prototypes for stdarg */ +# if defined(STDC) || defined(Z_HAVE_STDARG_H) +# define Z_ARG(args) args +# else +# define Z_ARG(args) () +# endif +#endif + +/* The following definitions for FAR are needed only for MSDOS mixed + * model programming (small or medium model with some far allocations). + * This was tested only with MSC; for other MSDOS compilers you may have + * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, + * just define FAR to be empty. + */ +#ifdef SYS16BIT +# if defined(M_I86SM) || defined(M_I86MM) + /* MSC small or medium model */ +# define SMALL_MEDIUM +# ifdef _MSC_VER +# define FAR _far +# else +# define FAR far +# endif +# endif +# if (defined(__SMALL__) || defined(__MEDIUM__)) + /* Turbo C small or medium model */ +# define SMALL_MEDIUM +# ifdef __BORLANDC__ +# define FAR _far +# else +# define FAR far +# endif +# endif +#endif + +#if defined(WINDOWS) || defined(WIN32) + /* If building or using zlib as a DLL, define ZLIB_DLL. + * This is not mandatory, but it offers a little performance increase. + */ +# ifdef ZLIB_DLL +# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) +# ifdef ZLIB_INTERNAL +# define ZEXTERN extern __declspec(dllexport) +# else +# define ZEXTERN extern __declspec(dllimport) +# endif +# endif +# endif /* ZLIB_DLL */ + /* If building or using zlib with the WINAPI/WINAPIV calling convention, + * define ZLIB_WINAPI. + * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. + */ +# ifdef ZLIB_WINAPI +# ifdef FAR +# undef FAR +# endif +# include + /* No need for _export, use ZLIB.DEF instead. */ + /* For complete Windows compatibility, use WINAPI, not __stdcall. */ +# define ZEXPORT WINAPI +# ifdef WIN32 +# define ZEXPORTVA WINAPIV +# else +# define ZEXPORTVA FAR CDECL +# endif +# endif +#endif + +#if defined (__BEOS__) +# ifdef ZLIB_DLL +# ifdef ZLIB_INTERNAL +# define ZEXPORT __declspec(dllexport) +# define ZEXPORTVA __declspec(dllexport) +# else +# define ZEXPORT __declspec(dllimport) +# define ZEXPORTVA __declspec(dllimport) +# endif +# endif +#endif + +#ifndef ZEXTERN +# define ZEXTERN extern +#endif +#ifndef ZEXPORT +# define ZEXPORT +#endif +#ifndef ZEXPORTVA +# define ZEXPORTVA +#endif + +#ifndef FAR +# define FAR +#endif + +#if !defined(__MACTYPES__) +typedef unsigned char Byte; /* 8 bits */ +#endif +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ + +#ifdef SMALL_MEDIUM + /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ +# define Bytef Byte FAR +#else + typedef Byte FAR Bytef; +#endif +typedef char FAR charf; +typedef int FAR intf; +typedef uInt FAR uIntf; +typedef uLong FAR uLongf; + +#ifdef STDC + typedef void const *voidpc; + typedef void FAR *voidpf; + typedef void *voidp; +#else + typedef Byte const *voidpc; + typedef Byte FAR *voidpf; + typedef Byte *voidp; +#endif + +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +# include +# if (UINT_MAX == 0xffffffffUL) +# define Z_U4 unsigned +# elif (ULONG_MAX == 0xffffffffUL) +# define Z_U4 unsigned long +# elif (USHRT_MAX == 0xffffffffUL) +# define Z_U4 unsigned short +# endif +#endif + +#ifdef Z_U4 + typedef Z_U4 z_crc_t; +#else + typedef unsigned long z_crc_t; +#endif + +#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_UNISTD_H +#endif + +#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_STDARG_H +#endif + +#ifdef STDC +# ifndef Z_SOLO +# include /* for off_t */ +# endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +# include /* for va_list */ +# endif +#endif + +#ifdef _WIN32 +# ifndef Z_SOLO +# include /* for wchar_t */ +# endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +# undef _LARGEFILE64_SOURCE +#endif + +#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) +# define Z_HAVE_UNISTD_H +#endif +#ifndef Z_SOLO +# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +# ifdef VMS +# include /* for off_t */ +# endif +# ifndef z_off_t +# define z_off_t off_t +# endif +# endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 +# define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +# define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) +# define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) +# define SEEK_SET 0 /* Seek from beginning of file. */ +# define SEEK_CUR 1 /* Seek from current position. */ +# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ +#endif + +#ifndef z_off_t +# define z_off_t long +#endif + +#if !defined(_WIN32) && defined(Z_LARGE64) +# define z_off64_t off64_t +#else +# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) +# define z_off64_t __int64 +# else +# define z_off64_t z_off_t +# endif +#endif + +/* MVS linker does not support external names larger than 8 bytes */ +#if defined(__MVS__) + #pragma map(deflateInit_,"DEIN") + #pragma map(deflateInit2_,"DEIN2") + #pragma map(deflateEnd,"DEEND") + #pragma map(deflateBound,"DEBND") + #pragma map(inflateInit_,"ININ") + #pragma map(inflateInit2_,"ININ2") + #pragma map(inflateEnd,"INEND") + #pragma map(inflateSync,"INSY") + #pragma map(inflateSetDictionary,"INSEDI") + #pragma map(compressBound,"CMBND") + #pragma map(inflate_table,"INTABL") + #pragma map(inflate_fast,"INFA") + #pragma map(inflate_copyright,"INCOPY") +#endif + +#endif /* ZCONF_H */ diff -Nru nodejs-0.11.13/deps/zlib/zlib2ansi nodejs-0.11.15/deps/zlib/zlib2ansi --- nodejs-0.11.13/deps/zlib/zlib2ansi 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zlib2ansi 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,152 @@ +#!/usr/bin/perl + +# Transform K&R C function definitions into ANSI equivalent. +# +# Author: Paul Marquess +# Version: 1.0 +# Date: 3 October 2006 + +# TODO +# +# Asumes no function pointer parameters. unless they are typedefed. +# Assumes no literal strings that look like function definitions +# Assumes functions start at the beginning of a line + +use strict; +use warnings; + +local $/; +$_ = <>; + +my $sp = qr{ \s* (?: /\* .*? \*/ )? \s* }x; # assume no nested comments + +my $d1 = qr{ $sp (?: [\w\*\s]+ $sp)* $sp \w+ $sp [\[\]\s]* $sp }x ; +my $decl = qr{ $sp (?: \w+ $sp )+ $d1 }xo ; +my $dList = qr{ $sp $decl (?: $sp , $d1 )* $sp ; $sp }xo ; + + +while (s/^ + ( # Start $1 + ( # Start $2 + .*? # Minimal eat content + ( ^ \w [\w\s\*]+ ) # $3 -- function name + \s* # optional whitespace + ) # $2 - Matched up to before parameter list + + \( \s* # Literal "(" + optional whitespace + ( [^\)]+ ) # $4 - one or more anythings except ")" + \s* \) # optional whitespace surrounding a Literal ")" + + ( (?: $dList )+ ) # $5 + + $sp ^ { # literal "{" at start of line + ) # Remember to $1 + //xsom + ) +{ + my $all = $1 ; + my $prefix = $2; + my $param_list = $4 ; + my $params = $5; + + StripComments($params); + StripComments($param_list); + $param_list =~ s/^\s+//; + $param_list =~ s/\s+$//; + + my $i = 0 ; + my %pList = map { $_ => $i++ } + split /\s*,\s*/, $param_list; + my $pMatch = '(\b' . join('|', keys %pList) . '\b)\W*$' ; + + my @params = split /\s*;\s*/, $params; + my @outParams = (); + foreach my $p (@params) + { + if ($p =~ /,/) + { + my @bits = split /\s*,\s*/, $p; + my $first = shift @bits; + $first =~ s/^\s*//; + push @outParams, $first; + $first =~ /^(\w+\s*)/; + my $type = $1 ; + push @outParams, map { $type . $_ } @bits; + } + else + { + $p =~ s/^\s+//; + push @outParams, $p; + } + } + + + my %tmp = map { /$pMatch/; $_ => $pList{$1} } + @outParams ; + + @outParams = map { " $_" } + sort { $tmp{$a} <=> $tmp{$b} } + @outParams ; + + print $prefix ; + print "(\n" . join(",\n", @outParams) . ")\n"; + print "{" ; + +} + +# Output any trailing code. +print ; +exit 0; + + +sub StripComments +{ + + no warnings; + + # Strip C & C++ coments + # From the perlfaq + $_[0] =~ + + s{ + /\* ## Start of /* ... */ comment + [^*]*\*+ ## Non-* followed by 1-or-more *'s + ( + [^/*][^*]*\*+ + )* ## 0-or-more things which don't start with / + ## but do end with '*' + / ## End of /* ... */ comment + + | ## OR C++ Comment + // ## Start of C++ comment // + [^\n]* ## followed by 0-or-more non end of line characters + + | ## OR various things which aren't comments: + + ( + " ## Start of " ... " string + ( + \\. ## Escaped char + | ## OR + [^"\\] ## Non "\ + )* + " ## End of " ... " string + + | ## OR + + ' ## Start of ' ... ' string + ( + \\. ## Escaped char + | ## OR + [^'\\] ## Non '\ + )* + ' ## End of ' ... ' string + + | ## OR + + . ## Anything other char + [^/"'\\]* ## Chars which doesn't start a comment, string or escape + ) + }{$2}gxs; + +} diff -Nru nodejs-0.11.13/deps/zlib/zlib.3 nodejs-0.11.15/deps/zlib/zlib.3 --- nodejs-0.11.13/deps/zlib/zlib.3 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zlib.3 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,151 @@ +.TH ZLIB 3 "28 Apr 2013" +.SH NAME +zlib \- compression/decompression library +.SH SYNOPSIS +[see +.I zlib.h +for full description] +.SH DESCRIPTION +The +.I zlib +library is a general purpose data compression library. +The code is thread safe, assuming that the standard library functions +used are thread safe, such as memory allocation routines. +It provides in-memory compression and decompression functions, +including integrity checks of the uncompressed data. +This version of the library supports only one compression method (deflation) +but other algorithms may be added later +with the same stream interface. +.LP +Compression can be done in a single step if the buffers are large enough +or can be done by repeated calls of the compression function. +In the latter case, +the application must provide more input and/or consume the output +(providing more output space) before each call. +.LP +The library also supports reading and writing files in +.IR gzip (1) +(.gz) format +with an interface similar to that of stdio. +.LP +The library does not install any signal handler. +The decoder checks the consistency of the compressed data, +so the library should never crash even in the case of corrupted input. +.LP +All functions of the compression library are documented in the file +.IR zlib.h . +The distribution source includes examples of use of the library +in the files +.I test/example.c +and +.IR test/minigzip.c, +as well as other examples in the +.IR examples/ +directory. +.LP +Changes to this version are documented in the file +.I ChangeLog +that accompanies the source. +.LP +.I zlib +is available in Java using the java.util.zip package: +.IP +http://java.sun.com/developer/technicalArticles/Programming/compression/ +.LP +A Perl interface to +.IR zlib , +written by Paul Marquess (pmqs@cpan.org), +is available at CPAN (Comprehensive Perl Archive Network) sites, +including: +.IP +http://search.cpan.org/~pmqs/IO-Compress-Zlib/ +.LP +A Python interface to +.IR zlib , +written by A.M. Kuchling (amk@magnet.com), +is available in Python 1.5 and later versions: +.IP +http://docs.python.org/library/zlib.html +.LP +.I zlib +is built into +.IR tcl: +.IP +http://wiki.tcl.tk/4610 +.LP +An experimental package to read and write files in .zip format, +written on top of +.I zlib +by Gilles Vollant (info@winimage.com), +is available at: +.IP +http://www.winimage.com/zLibDll/minizip.html +and also in the +.I contrib/minizip +directory of the main +.I zlib +source distribution. +.SH "SEE ALSO" +The +.I zlib +web site can be found at: +.IP +http://zlib.net/ +.LP +The data format used by the zlib library is described by RFC +(Request for Comments) 1950 to 1952 in the files: +.IP +http://tools.ietf.org/html/rfc1950 (for the zlib header and trailer format) +.br +http://tools.ietf.org/html/rfc1951 (for the deflate compressed data format) +.br +http://tools.ietf.org/html/rfc1952 (for the gzip header and trailer format) +.LP +Mark Nelson wrote an article about +.I zlib +for the Jan. 1997 issue of Dr. Dobb's Journal; +a copy of the article is available at: +.IP +http://marknelson.us/1997/01/01/zlib-engine/ +.SH "REPORTING PROBLEMS" +Before reporting a problem, +please check the +.I zlib +web site to verify that you have the latest version of +.IR zlib ; +otherwise, +obtain the latest version and see if the problem still exists. +Please read the +.I zlib +FAQ at: +.IP +http://zlib.net/zlib_faq.html +.LP +before asking for help. +Send questions and/or comments to zlib@gzip.org, +or (for the Windows DLL version) to Gilles Vollant (info@winimage.com). +.SH AUTHORS +Version 1.2.8 +Copyright (C) 1995-2013 Jean-loup Gailly (jloup@gzip.org) +and Mark Adler (madler@alumni.caltech.edu). +.LP +This software is provided "as-is," +without any express or implied warranty. +In no event will the authors be held liable for any damages +arising from the use of this software. +See the distribution directory with respect to requirements +governing redistribution. +The deflate format used by +.I zlib +was defined by Phil Katz. +The deflate and +.I zlib +specifications were written by L. Peter Deutsch. +Thanks to all the people who reported problems and suggested various +improvements in +.IR zlib ; +who are too numerous to cite here. +.LP +UNIX manual page by R. P. C. Rodgers, +U.S. National Library of Medicine (rodgers@nlm.nih.gov). +.\" end of man page Binary files /tmp/tmp3RWfEz/15LRXSvBLI/nodejs-0.11.13/deps/zlib/zlib.3.pdf and /tmp/tmp3RWfEz/Z4ZVSmd5ZH/nodejs-0.11.15/deps/zlib/zlib.3.pdf differ diff -Nru nodejs-0.11.13/deps/zlib/zlib.gyp nodejs-0.11.15/deps/zlib/zlib.gyp --- nodejs-0.11.13/deps/zlib/zlib.gyp 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zlib.gyp 2015-01-20 21:22:17.000000000 +0000 @@ -27,7 +27,11 @@ 'crc32.h', 'deflate.c', 'deflate.h', - 'gzio.c', + 'gzclose.c', + 'gzguts.h', + 'gzlib.c', + 'gzread.c', + 'gzwrite.c', 'infback.c', 'inffast.c', 'inffast.h', @@ -36,7 +40,6 @@ 'inflate.h', 'inftrees.c', 'inftrees.h', - 'mozzconf.h', 'trees.c', 'trees.h', 'uncompr.c', @@ -63,6 +66,14 @@ 'contrib/minizip/iowin32.c' ], }], + ['OS=="mac" or OS=="ios" or OS=="freebsd" or OS=="android"', { + # Mac, Android and the BSDs don't have fopen64, ftello64, or + # fseeko64. We use fopen, ftell, and fseek instead on these + # systems. + 'defines': [ + 'USE_FILE32API' + ], + }], ], }, ], diff -Nru nodejs-0.11.13/deps/zlib/zlib.h nodejs-0.11.15/deps/zlib/zlib.h --- nodejs-0.11.13/deps/zlib/zlib.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zlib.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,7 +1,7 @@ /* zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.3, July 18th, 2005 + version 1.2.8, April 28th, 2013 - Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler + Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages @@ -24,8 +24,8 @@ The data format used by the zlib library is described by RFCs (Request for - Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt - (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). + Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 + (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). */ #ifndef ZLIB_H @@ -37,41 +37,44 @@ extern "C" { #endif -#define ZLIB_VERSION "1.2.3" -#define ZLIB_VERNUM 0x1230 - -/* - The 'zlib' compression library provides in-memory compression and - decompression functions, including integrity checks of the uncompressed - data. This version of the library supports only one compression method - (deflation) but other algorithms will be added later and will have the same - stream interface. - - Compression can be done in a single step if the buffers are large - enough (for example if an input file is mmap'ed), or can be done by - repeated calls of the compression function. In the latter case, the - application must provide more input and/or consume the output +#define ZLIB_VERSION "1.2.8" +#define ZLIB_VERNUM 0x1280 +#define ZLIB_VER_MAJOR 1 +#define ZLIB_VER_MINOR 2 +#define ZLIB_VER_REVISION 8 +#define ZLIB_VER_SUBREVISION 0 + +/* + The 'zlib' compression library provides in-memory compression and + decompression functions, including integrity checks of the uncompressed data. + This version of the library supports only one compression method (deflation) + but other algorithms will be added later and will have the same stream + interface. + + Compression can be done in a single step if the buffers are large enough, + or can be done by repeated calls of the compression function. In the latter + case, the application must provide more input and/or consume the output (providing more output space) before each call. - The compressed data format used by default by the in-memory functions is + The compressed data format used by default by the in-memory functions is the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped around a deflate stream, which is itself documented in RFC 1951. - The library also supports reading and writing files in gzip (.gz) format + The library also supports reading and writing files in gzip (.gz) format with an interface similar to that of stdio using the functions that start with "gz". The gzip format is different from the zlib format. gzip is a gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. - This library can optionally read and write gzip streams in memory as well. + This library can optionally read and write gzip streams in memory as well. - The zlib format was designed to be compact and fast for use in memory + The zlib format was designed to be compact and fast for use in memory and on communications channels. The gzip format was designed for single- file compression on file systems, has a larger header than zlib to maintain directory information, and uses a different, slower check method than zlib. - The library does not install any signal handler. The decoder checks - the consistency of the compressed data, so the library should never - crash even in case of corrupted input. + The library does not install any signal handler. The decoder checks + the consistency of the compressed data, so the library should never crash + even in case of corrupted input. */ typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); @@ -80,15 +83,15 @@ struct internal_state; typedef struct z_stream_s { - Bytef *next_in; /* next input byte */ + z_const Bytef *next_in; /* next input byte */ uInt avail_in; /* number of bytes available at next_in */ - uLong total_in; /* total nb of input bytes read so far */ + uLong total_in; /* total number of input bytes read so far */ Bytef *next_out; /* next output byte should be put there */ uInt avail_out; /* remaining free space at next_out */ - uLong total_out; /* total nb of bytes output so far */ + uLong total_out; /* total number of bytes output so far */ - char *msg; /* last error message, NULL if no error */ + z_const char *msg; /* last error message, NULL if no error */ struct internal_state FAR *state; /* not visible by applications */ alloc_func zalloc; /* used to allocate the internal state */ @@ -126,45 +129,45 @@ typedef gz_header FAR *gz_headerp; /* - The application must update next_in and avail_in when avail_in has - dropped to zero. It must update next_out and avail_out when avail_out - has dropped to zero. The application must initialize zalloc, zfree and - opaque before calling the init function. All other fields are set by the - compression library and must not be updated by the application. - - The opaque value provided by the application will be passed as the first - parameter for calls of zalloc and zfree. This can be useful for custom - memory management. The compression library attaches no meaning to the + The application must update next_in and avail_in when avail_in has dropped + to zero. It must update next_out and avail_out when avail_out has dropped + to zero. The application must initialize zalloc, zfree and opaque before + calling the init function. All other fields are set by the compression + library and must not be updated by the application. + + The opaque value provided by the application will be passed as the first + parameter for calls of zalloc and zfree. This can be useful for custom + memory management. The compression library attaches no meaning to the opaque value. - zalloc must return Z_NULL if there is not enough memory for the object. + zalloc must return Z_NULL if there is not enough memory for the object. If zlib is used in a multi-threaded application, zalloc and zfree must be thread safe. - On 16-bit systems, the functions zalloc and zfree must be able to allocate - exactly 65536 bytes, but will not be required to allocate more than this - if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, - pointers returned by zalloc for objects of exactly 65536 bytes *must* - have their offset normalized to zero. The default allocation function - provided by this library ensures this (see zutil.c). To reduce memory - requirements and avoid any allocation of 64K objects, at the expense of - compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h). - - The fields total_in and total_out can be used for statistics or - progress reports. After compression, total_in holds the total size of - the uncompressed data and may be saved for use in the decompressor - (particularly if the decompressor wants to decompress everything in - a single step). + On 16-bit systems, the functions zalloc and zfree must be able to allocate + exactly 65536 bytes, but will not be required to allocate more than this if + the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers + returned by zalloc for objects of exactly 65536 bytes *must* have their + offset normalized to zero. The default allocation function provided by this + library ensures this (see zutil.c). To reduce memory requirements and avoid + any allocation of 64K objects, at the expense of compression ratio, compile + the library with -DMAX_WBITS=14 (see zconf.h). + + The fields total_in and total_out can be used for statistics or progress + reports. After compression, total_in holds the total size of the + uncompressed data and may be saved for use in the decompressor (particularly + if the decompressor wants to decompress everything in a single step). */ /* constants */ #define Z_NO_FLUSH 0 -#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */ +#define Z_PARTIAL_FLUSH 1 #define Z_SYNC_FLUSH 2 #define Z_FULL_FLUSH 3 #define Z_FINISH 4 #define Z_BLOCK 5 +#define Z_TREES 6 /* Allowed flush values; see deflate() and inflate() below for details */ #define Z_OK 0 @@ -176,8 +179,8 @@ #define Z_MEM_ERROR (-4) #define Z_BUF_ERROR (-5) #define Z_VERSION_ERROR (-6) -/* Return codes for the compression/decompression functions. Negative - * values are errors, positive values are used for special but normal events. +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. */ #define Z_NO_COMPRESSION 0 @@ -207,119 +210,141 @@ #define zlib_version zlibVersion() /* for compatibility with versions < 1.0.2 */ + /* basic functions */ ZEXTERN const char * ZEXPORT zlibVersion OF((void)); /* The application can compare zlibVersion and ZLIB_VERSION for consistency. - If the first character differs, the library code actually used is - not compatible with the zlib.h header file used by the application. - This check is automatically made by deflateInit and inflateInit. + If the first character differs, the library code actually used is not + compatible with the zlib.h header file used by the application. This check + is automatically made by deflateInit and inflateInit. */ /* ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); - Initializes the internal stream state for compression. The fields - zalloc, zfree and opaque must be initialized before by the caller. - If zalloc and zfree are set to Z_NULL, deflateInit updates them to - use default allocation functions. + Initializes the internal stream state for compression. The fields + zalloc, zfree and opaque must be initialized before by the caller. If + zalloc and zfree are set to Z_NULL, deflateInit updates them to use default + allocation functions. The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: - 1 gives best speed, 9 gives best compression, 0 gives no compression at - all (the input data is simply copied a block at a time). - Z_DEFAULT_COMPRESSION requests a default compromise between speed and - compression (currently equivalent to level 6). + 1 gives best speed, 9 gives best compression, 0 gives no compression at all + (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION + requests a default compromise between speed and compression (currently + equivalent to level 6). - deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if level is not a valid compression level, + deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if level is not a valid compression level, or Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible - with the version assumed by the caller (ZLIB_VERSION). - msg is set to null if there is no error message. deflateInit does not - perform any compression: this will be done by deflate(). + with the version assumed by the caller (ZLIB_VERSION). msg is set to null + if there is no error message. deflateInit does not perform any compression: + this will be done by deflate(). */ ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); /* deflate compresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce some - output latency (reading input without producing any output) except when + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when forced to flush. - The detailed semantics are as follows. deflate performs one or both of the + The detailed semantics are as follows. deflate performs one or both of the following actions: - Compress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not + accordingly. If not all input can be processed (because there is not enough room in the output buffer), next_in and avail_in are updated and processing will resume at this point for the next call of deflate(). - Provide more output starting at next_out and update next_out and avail_out - accordingly. This action is forced if the parameter flush is non zero. + accordingly. This action is forced if the parameter flush is non zero. Forcing flush frequently degrades the compression ratio, so this parameter - should be set only when necessary (in interactive applications). - Some output may be provided even if flush is not set. + should be set only when necessary (in interactive applications). Some + output may be provided even if flush is not set. - Before the call of deflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming - more output, and updating avail_in or avail_out accordingly; avail_out - should never be zero before the call. The application can consume the - compressed output when it wants, for example when the output buffer is full - (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK - and with zero avail_out, it must be called again after making room in the - output buffer because there might be more output pending. + Before the call of deflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating avail_in or avail_out accordingly; avail_out should + never be zero before the call. The application can consume the compressed + output when it wants, for example when the output buffer is full (avail_out + == 0), or after each call of deflate(). If deflate returns Z_OK and with + zero avail_out, it must be called again after making room in the output + buffer because there might be more output pending. Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to - decide how much data to accumualte before producing output, in order to + decide how much data to accumulate before producing output, in order to maximize compression. If the parameter flush is set to Z_SYNC_FLUSH, all pending output is flushed to the output buffer and the output is aligned on a byte boundary, so - that the decompressor can get all input data available so far. (In particular - avail_in is zero after the call if enough output space has been provided - before the call.) Flushing may degrade compression for some compression - algorithms and so it should be used only when necessary. + that the decompressor can get all input data available so far. (In + particular avail_in is zero after the call if enough output space has been + provided before the call.) Flushing may degrade compression for some + compression algorithms and so it should be used only when necessary. This + completes the current deflate block and follows it with an empty stored block + that is three bits plus filler bits to the next byte, followed by four bytes + (00 00 ff ff). + + If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the + output buffer, but the output is not aligned to a byte boundary. All of the + input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. + This completes the current deflate block and follows it with an empty fixed + codes block that is 10 bits long. This assures that enough bytes are output + in order for the decompressor to finish the block before the empty fixed code + block. + + If flush is set to Z_BLOCK, a deflate block is completed and emitted, as + for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to + seven bits of the current block are held to be written as the next byte after + the next deflate block is completed. In this case, the decompressor may not + be provided enough bits at this point in order to complete decompression of + the data provided so far to the compressor. It may need to wait for the next + block to be emitted. This is for advanced applications that need to control + the emission of deflate blocks. If flush is set to Z_FULL_FLUSH, all output is flushed as with Z_SYNC_FLUSH, and the compression state is reset so that decompression can restart from this point if previous compressed data has been damaged or if - random access is desired. Using Z_FULL_FLUSH too often can seriously degrade + random access is desired. Using Z_FULL_FLUSH too often can seriously degrade compression. If deflate returns with avail_out == 0, this function must be called again with the same value of the flush parameter and more output space (updated avail_out), until the flush is complete (deflate returns with non-zero - avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that + avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that avail_out is greater than six to avoid repeated flush markers due to avail_out == 0 on return. If the parameter flush is set to Z_FINISH, pending input is processed, - pending output is flushed and deflate returns with Z_STREAM_END if there - was enough output space; if deflate returns with Z_OK, this function must be + pending output is flushed and deflate returns with Z_STREAM_END if there was + enough output space; if deflate returns with Z_OK, this function must be called again with Z_FINISH and more output space (updated avail_out) but no - more input data, until it returns with Z_STREAM_END or an error. After - deflate has returned Z_STREAM_END, the only possible operations on the - stream are deflateReset or deflateEnd. + more input data, until it returns with Z_STREAM_END or an error. After + deflate has returned Z_STREAM_END, the only possible operations on the stream + are deflateReset or deflateEnd. Z_FINISH can be used immediately after deflateInit if all the compression - is to be done in a single step. In this case, avail_out must be at least - the value returned by deflateBound (see below). If deflate does not return - Z_STREAM_END, then it must be called again as described above. + is to be done in a single step. In this case, avail_out must be at least the + value returned by deflateBound (see below). Then deflate is guaranteed to + return Z_STREAM_END. If not enough output space is provided, deflate will + not return Z_STREAM_END, and it must be called again as described above. deflate() sets strm->adler to the adler32 checksum of all input read so far (that is, total_in bytes). deflate() may update strm->data_type if it can make a good guess about - the input data type (Z_BINARY or Z_TEXT). In doubt, the data is considered - binary. This field is only for information purposes and does not affect - the compression algorithm in any manner. + the input data type (Z_BINARY or Z_TEXT). In doubt, the data is considered + binary. This field is only for information purposes and does not affect the + compression algorithm in any manner. deflate() returns Z_OK if some progress has been made (more input processed or more output produced), Z_STREAM_END if all input has been consumed and all output has been produced (only when flush is set to Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example - if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible - (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not + if next_in or next_out was Z_NULL), Z_BUF_ERROR if no progress is possible + (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and deflate() can be called again with more input and more output space to continue compressing. */ @@ -328,13 +353,13 @@ ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm)); /* All dynamically allocated data structures for this stream are freed. - This function discards any unprocessed input and does not flush any - pending output. + This function discards any unprocessed input and does not flush any pending + output. deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state was inconsistent, Z_DATA_ERROR if the stream was freed - prematurely (some input or output was discarded). In the error case, - msg may be set but then points to a static string (which must not be + prematurely (some input or output was discarded). In the error case, msg + may be set but then points to a static string (which must not be deallocated). */ @@ -342,10 +367,10 @@ /* ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); - Initializes the internal stream state for decompression. The fields + Initializes the internal stream state for decompression. The fields next_in, avail_in, zalloc, zfree and opaque must be initialized before by - the caller. If next_in is not Z_NULL and avail_in is large enough (the exact - value depends on the compression method), inflateInit determines the + the caller. If next_in is not Z_NULL and avail_in is large enough (the + exact value depends on the compression method), inflateInit determines the compression method from the zlib header and allocates all data structures accordingly; otherwise the allocation will be deferred to the first call of inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to @@ -353,95 +378,116 @@ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_VERSION_ERROR if the zlib library version is incompatible with the - version assumed by the caller. msg is set to null if there is no error - message. inflateInit does not perform any decompression apart from reading - the zlib header if present: this will be done by inflate(). (So next_in and - avail_in may be modified, but next_out and avail_out are unchanged.) + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit() does not process any header information -- that is deferred + until inflate() is called. */ ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush)); /* inflate decompresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce + buffer becomes empty or the output buffer becomes full. It may introduce some output latency (reading input without producing any output) except when forced to flush. - The detailed semantics are as follows. inflate performs one or both of the + The detailed semantics are as follows. inflate performs one or both of the following actions: - Decompress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not - enough room in the output buffer), next_in is updated and processing - will resume at this point for the next call of inflate(). + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in is updated and processing will + resume at this point for the next call of inflate(). - Provide more output starting at next_out and update next_out and avail_out - accordingly. inflate() provides as much output as possible, until there - is no more input data or no more space in the output buffer (see below - about the flush parameter). - - Before the call of inflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming - more output, and updating the next_* and avail_* values accordingly. - The application can consume the uncompressed output when it wants, for - example when the output buffer is full (avail_out == 0), or after each - call of inflate(). If inflate returns Z_OK and with zero avail_out, it - must be called again after making room in the output buffer because there - might be more output pending. - - The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, - Z_FINISH, or Z_BLOCK. Z_SYNC_FLUSH requests that inflate() flush as much - output as possible to the output buffer. Z_BLOCK requests that inflate() stop - if and when it gets to the next deflate block boundary. When decoding the - zlib or gzip format, this will cause inflate() to return immediately after - the header and before the first block. When doing a raw inflate, inflate() - will go ahead and process the first block, and will return when it gets to - the end of that block, or when it runs out of data. + accordingly. inflate() provides as much output as possible, until there is + no more input data or no more space in the output buffer (see below about + the flush parameter). + + Before the call of inflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating the next_* and avail_* values accordingly. The + application can consume the uncompressed output when it wants, for example + when the output buffer is full (avail_out == 0), or after each call of + inflate(). If inflate returns Z_OK and with zero avail_out, it must be + called again after making room in the output buffer because there might be + more output pending. + + The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, + Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much + output as possible to the output buffer. Z_BLOCK requests that inflate() + stop if and when it gets to the next deflate block boundary. When decoding + the zlib or gzip format, this will cause inflate() to return immediately + after the header and before the first block. When doing a raw inflate, + inflate() will go ahead and process the first block, and will return when it + gets to the end of that block, or when it runs out of data. The Z_BLOCK option assists in appending to or combining deflate streams. Also to assist in this, on return inflate() will set strm->data_type to the - number of unused bits in the last byte taken from strm->next_in, plus 64 - if inflate() is currently decoding the last block in the deflate stream, - plus 128 if inflate() returned immediately after decoding an end-of-block - code or decoding the complete header up to just before the first byte of the - deflate stream. The end-of-block will not be indicated until all of the - uncompressed data from that block has been written to strm->next_out. The - number of unused bits may in general be greater than seven, except when - bit 7 of data_type is set, in which case the number of unused bits will be - less than eight. + number of unused bits in the last byte taken from strm->next_in, plus 64 if + inflate() is currently decoding the last block in the deflate stream, plus + 128 if inflate() returned immediately after decoding an end-of-block code or + decoding the complete header up to just before the first byte of the deflate + stream. The end-of-block will not be indicated until all of the uncompressed + data from that block has been written to strm->next_out. The number of + unused bits may in general be greater than seven, except when bit 7 of + data_type is set, in which case the number of unused bits will be less than + eight. data_type is set as noted here every time inflate() returns for all + flush options, and so can be used to determine the amount of currently + consumed input in bits. + + The Z_TREES option behaves as Z_BLOCK does, but it also returns when the + end of each deflate block header is reached, before any actual data in that + block is decoded. This allows the caller to determine the length of the + deflate block header for later use in random access within a deflate block. + 256 is added to the value of strm->data_type when inflate() returns + immediately after reaching the end of the deflate block header. inflate() should normally be called until it returns Z_STREAM_END or an - error. However if all decompression is to be performed in a single step - (a single call of inflate), the parameter flush should be set to - Z_FINISH. In this case all pending input is processed and all pending - output is flushed; avail_out must be large enough to hold all the - uncompressed data. (The size of the uncompressed data may have been saved - by the compressor for this purpose.) The next operation on this stream must - be inflateEnd to deallocate the decompression state. The use of Z_FINISH - is never required, but can be used to inform inflate that a faster approach - may be used for the single inflate() call. + error. However if all decompression is to be performed in a single step (a + single call of inflate), the parameter flush should be set to Z_FINISH. In + this case all pending input is processed and all pending output is flushed; + avail_out must be large enough to hold all of the uncompressed data for the + operation to complete. (The size of the uncompressed data may have been + saved by the compressor for this purpose.) The use of Z_FINISH is not + required to perform an inflation in one step. However it may be used to + inform inflate that a faster approach can be used for the single inflate() + call. Z_FINISH also informs inflate to not maintain a sliding window if the + stream completes, which reduces inflate's memory footprint. If the stream + does not complete, either because not all of the stream is provided or not + enough output space is provided, then a sliding window will be allocated and + inflate() can be called again to continue the operation as if Z_NO_FLUSH had + been used. In this implementation, inflate() always flushes as much output as possible to the output buffer, and always uses the faster approach on the - first call. So the only effect of the flush parameter in this implementation - is on the return value of inflate(), as noted below, or when it returns early - because Z_BLOCK is used. + first call. So the effects of the flush parameter in this implementation are + on the return value of inflate() as noted below, when inflate() returns early + when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of + memory for a sliding window when Z_FINISH is used. If a preset dictionary is needed after this call (see inflateSetDictionary - below), inflate sets strm->adler to the adler32 checksum of the dictionary + below), inflate sets strm->adler to the Adler-32 checksum of the dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise it sets - strm->adler to the adler32 checksum of all output produced so far (that is, + strm->adler to the Adler-32 checksum of all output produced so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described - below. At the end of the stream, inflate() checks that its computed adler32 + below. At the end of the stream, inflate() checks that its computed adler32 checksum is equal to that saved by the compressor and returns Z_STREAM_END only if the checksum is correct. - inflate() will decompress and check either zlib-wrapped or gzip-wrapped - deflate data. The header type is detected automatically. Any information - contained in the gzip header is not retained, so applications that need that - information should instead use raw inflate, see inflateInit2() below, or - inflateBack() and perform their own processing of the gzip header and - trailer. + inflate() can decompress and check either zlib-wrapped or gzip-wrapped + deflate data. The header type is detected automatically, if requested when + initializing with inflateInit2(). Any information contained in the gzip + header is not retained, so applications that need that information should + instead use raw inflate, see inflateInit2() below, or inflateBack() and + perform their own processing of the gzip header and trailer. When processing + gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output + producted so far. The CRC-32 is checked against the gzip trailer. inflate() returns Z_OK if some progress has been made (more input processed or more output produced), Z_STREAM_END if the end of the compressed data has @@ -449,27 +495,28 @@ preset dictionary is needed at this point, Z_DATA_ERROR if the input data was corrupted (input stream not conforming to the zlib format or incorrect check value), Z_STREAM_ERROR if the stream structure was inconsistent (for example - if next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory, + next_in or next_out was Z_NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no progress is possible or if there was not enough room in the - output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and + output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and inflate() can be called again with more input and more output space to - continue decompressing. If Z_DATA_ERROR is returned, the application may then - call inflateSync() to look for a good compression block if a partial recovery - of the data is desired. + continue decompressing. If Z_DATA_ERROR is returned, the application may + then call inflateSync() to look for a good compression block if a partial + recovery of the data is desired. */ ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm)); /* All dynamically allocated data structures for this stream are freed. - This function discards any unprocessed input and does not flush any - pending output. + This function discards any unprocessed input and does not flush any pending + output. inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state - was inconsistent. In the error case, msg may be set but then points to a + was inconsistent. In the error case, msg may be set but then points to a static string (which must not be deallocated). */ + /* Advanced functions */ /* @@ -484,55 +531,57 @@ int memLevel, int strategy)); - This is another version of deflateInit with more compression options. The - fields next_in, zalloc, zfree and opaque must be initialized before by - the caller. + This is another version of deflateInit with more compression options. The + fields next_in, zalloc, zfree and opaque must be initialized before by the + caller. - The method parameter is the compression method. It must be Z_DEFLATED in + The method parameter is the compression method. It must be Z_DEFLATED in this version of the library. The windowBits parameter is the base two logarithm of the window size - (the size of the history buffer). It should be in the range 8..15 for this - version of the library. Larger values of this parameter result in better - compression at the expense of memory usage. The default value is 15 if + (the size of the history buffer). It should be in the range 8..15 for this + version of the library. Larger values of this parameter result in better + compression at the expense of memory usage. The default value is 15 if deflateInit is used instead. - windowBits can also be -8..-15 for raw deflate. In this case, -windowBits - determines the window size. deflate() will then generate raw deflate data + windowBits can also be -8..-15 for raw deflate. In this case, -windowBits + determines the window size. deflate() will then generate raw deflate data with no zlib header or trailer, and will not compute an adler32 check value. - windowBits can also be greater than 15 for optional gzip encoding. Add + windowBits can also be greater than 15 for optional gzip encoding. Add 16 to windowBits to write a simple gzip header and trailer around the - compressed data instead of a zlib wrapper. The gzip header will have no - file name, no extra data, no comment, no modification time (set to zero), - no header crc, and the operating system will be set to 255 (unknown). If a + compressed data instead of a zlib wrapper. The gzip header will have no + file name, no extra data, no comment, no modification time (set to zero), no + header crc, and the operating system will be set to 255 (unknown). If a gzip stream is being written, strm->adler is a crc32 instead of an adler32. The memLevel parameter specifies how much memory should be allocated - for the internal compression state. memLevel=1 uses minimum memory but - is slow and reduces compression ratio; memLevel=9 uses maximum memory - for optimal speed. The default value is 8. See zconf.h for total memory - usage as a function of windowBits and memLevel. + for the internal compression state. memLevel=1 uses minimum memory but is + slow and reduces compression ratio; memLevel=9 uses maximum memory for + optimal speed. The default value is 8. See zconf.h for total memory usage + as a function of windowBits and memLevel. - The strategy parameter is used to tune the compression algorithm. Use the + The strategy parameter is used to tune the compression algorithm. Use the value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no string match), or Z_RLE to limit match distances to one (run-length - encoding). Filtered data consists mostly of small values with a somewhat - random distribution. In this case, the compression algorithm is tuned to - compress them better. The effect of Z_FILTERED is to force more Huffman + encoding). Filtered data consists mostly of small values with a somewhat + random distribution. In this case, the compression algorithm is tuned to + compress them better. The effect of Z_FILTERED is to force more Huffman coding and less string matching; it is somewhat intermediate between - Z_DEFAULT and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as fast as - Z_HUFFMAN_ONLY, but give better compression for PNG image data. The strategy - parameter only affects the compression ratio but not the correctness of the - compressed output even if it is not set appropriately. Z_FIXED prevents the - use of dynamic Huffman codes, allowing for a simpler decoder for special - applications. - - deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid - method). msg is set to null if there is no error message. deflateInit2 does - not perform any compression: this will be done by deflate(). + Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as + fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The + strategy parameter only affects the compression ratio but not the + correctness of the compressed output even if it is not set appropriately. + Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler + decoder for special applications. + + deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid + method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is + incompatible with the version assumed by the caller (ZLIB_VERSION). msg is + set to null if there is no error message. deflateInit2 does not perform any + compression: this will be done by deflate(). */ ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm, @@ -540,38 +589,43 @@ uInt dictLength)); /* Initializes the compression dictionary from the given byte sequence - without producing any compressed output. This function must be called - immediately after deflateInit, deflateInit2 or deflateReset, before any - call of deflate. The compressor and decompressor must use exactly the same - dictionary (see inflateSetDictionary). + without producing any compressed output. When using the zlib format, this + function must be called immediately after deflateInit, deflateInit2 or + deflateReset, and before any call of deflate. When doing raw deflate, this + function must be called either before any call of deflate, or immediately + after the completion of a deflate block, i.e. after all input has been + consumed and all output has been delivered when using any of the flush + options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The + compressor and decompressor must use exactly the same dictionary (see + inflateSetDictionary). The dictionary should consist of strings (byte sequences) that are likely to be encountered later in the data to be compressed, with the most commonly - used strings preferably put towards the end of the dictionary. Using a + used strings preferably put towards the end of the dictionary. Using a dictionary is most useful when the data to be compressed is short and can be predicted with good accuracy; the data can then be compressed better than with the default empty dictionary. Depending on the size of the compression data structures selected by deflateInit or deflateInit2, a part of the dictionary may in effect be - discarded, for example if the dictionary is larger than the window size in - deflate or deflate2. Thus the strings most likely to be useful should be - put at the end of the dictionary, not at the front. In addition, the - current implementation of deflate will use at most the window size minus - 262 bytes of the provided dictionary. + discarded, for example if the dictionary is larger than the window size + provided in deflateInit or deflateInit2. Thus the strings most likely to be + useful should be put at the end of the dictionary, not at the front. In + addition, the current implementation of deflate will use at most the window + size minus 262 bytes of the provided dictionary. Upon return of this function, strm->adler is set to the adler32 value of the dictionary; the decompressor may later use this value to determine - which dictionary has been used by the compressor. (The adler32 value + which dictionary has been used by the compressor. (The adler32 value applies to the whole dictionary even if only a subset of the dictionary is actually used by the compressor.) If a raw deflate was requested, then the adler32 value is not computed and strm->adler is not set. deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a - parameter is invalid (such as NULL dictionary) or the stream state is + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is inconsistent (for example if deflate has already been called for this stream - or if the compression method is bsort). deflateSetDictionary does not - perform any compression: this will be done by deflate(). + or if not at a block boundary for raw deflate). deflateSetDictionary does + not perform any compression: this will be done by deflate(). */ ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest, @@ -581,26 +635,26 @@ This function can be useful when several compression strategies will be tried, for example when there are several ways of pre-processing the input - data with a filter. The streams that will be discarded should then be freed + data with a filter. The streams that will be discarded should then be freed by calling deflateEnd. Note that deflateCopy duplicates the internal - compression state which can be quite large, so this strategy is slow and - can consume lots of memory. + compression state which can be quite large, so this strategy is slow and can + consume lots of memory. deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being NULL). msg is left unchanged in both source and + (such as zalloc being Z_NULL). msg is left unchanged in both source and destination. */ ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm)); /* This function is equivalent to deflateEnd followed by deflateInit, - but does not free and reallocate all the internal compression state. - The stream will keep the same compression level and any other attributes - that may have been set by deflateInit2. + but does not free and reallocate all the internal compression state. The + stream will keep the same compression level and any other attributes that + may have been set by deflateInit2. - deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being NULL). + deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). */ ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm, @@ -610,18 +664,18 @@ Dynamically update the compression level and compression strategy. The interpretation of level and strategy is as in deflateInit2. This can be used to switch between compression and straight copy of the input data, or - to switch to a different kind of input data requiring a different - strategy. If the compression level is changed, the input available so far - is compressed with the old level (and may be flushed); the new level will - take effect only at the next call of deflate(). + to switch to a different kind of input data requiring a different strategy. + If the compression level is changed, the input available so far is + compressed with the old level (and may be flushed); the new level will take + effect only at the next call of deflate(). Before the call of deflateParams, the stream state must be set as for - a call of deflate(), since the currently available input may have to - be compressed and flushed. In particular, strm->avail_out must be non-zero. + a call of deflate(), since the currently available input may have to be + compressed and flushed. In particular, strm->avail_out must be non-zero. deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source - stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR - if strm->avail_out was zero. + stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR if + strm->avail_out was zero. */ ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm, @@ -645,31 +699,53 @@ uLong sourceLen)); /* deflateBound() returns an upper bound on the compressed size after - deflation of sourceLen bytes. It must be called after deflateInit() - or deflateInit2(). This would be used to allocate an output buffer - for deflation in a single pass, and so would be called before deflate(). -*/ + deflation of sourceLen bytes. It must be called after deflateInit() or + deflateInit2(), and after deflateSetHeader(), if used. This would be used + to allocate an output buffer for deflation in a single pass, and so would be + called before deflate(). If that first deflate() call is provided the + sourceLen input bytes, an output buffer allocated to the size returned by + deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed + to return Z_STREAM_END. Note that it is possible for the compressed size to + be larger than the value returned by deflateBound() if flush options other + than Z_FINISH or Z_NO_FLUSH are used. +*/ + +ZEXTERN int ZEXPORT deflatePending OF((z_streamp strm, + unsigned *pending, + int *bits)); +/* + deflatePending() returns the number of bytes and bits of output that have + been generated, but not yet provided in the available output. The bytes not + provided would be due to the available output space having being consumed. + The number of bits of output not provided are between 0 and 7, where they + await more bits to join them in order to fill out a full byte. If pending + or bits are Z_NULL, then those values are not set. + + deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm, int bits, int value)); /* deflatePrime() inserts bits in the deflate output stream. The intent - is that this function is used to start off the deflate output with the - bits leftover from a previous deflate stream when appending to it. As such, - this function can only be used for raw deflate, and must be used before the - first deflate() call after a deflateInit2() or deflateReset(). bits must be - less than or equal to 16, and that many of the least significant bits of - value will be inserted in the output. - - deflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. + is that this function is used to start off the deflate output with the bits + leftover from a previous deflate stream when appending to it. As such, this + function can only be used for raw deflate, and must be used before the first + deflate() call after a deflateInit2() or deflateReset(). bits must be less + than or equal to 16, and that many of the least significant bits of value + will be inserted in the output. + + deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough + room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the + source stream state was inconsistent. */ ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, gz_headerp head)); /* - deflateSetHeader() provides gzip header information for when a gzip + deflateSetHeader() provides gzip header information for when a gzip stream is requested by deflateInit2(). deflateSetHeader() may be called after deflateInit2() or deflateReset() and before the first call of deflate(). The text, time, os, extra field, name, and comment information @@ -682,11 +758,11 @@ 1.3.x) do not support header crc's, and will report that it is a "multi-part gzip file" and give up. - If deflateSetHeader is not used, the default gzip header has text false, + If deflateSetHeader is not used, the default gzip header has text false, the time set to zero, and os set to 255, with no extra, name, or comment fields. The gzip header is returned to the default state by deflateReset(). - deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ @@ -694,43 +770,50 @@ ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, int windowBits)); - This is another version of inflateInit with an extra parameter. The + This is another version of inflateInit with an extra parameter. The fields next_in, avail_in, zalloc, zfree and opaque must be initialized before by the caller. The windowBits parameter is the base two logarithm of the maximum window size (the size of the history buffer). It should be in the range 8..15 for - this version of the library. The default value is 15 if inflateInit is used - instead. windowBits must be greater than or equal to the windowBits value + this version of the library. The default value is 15 if inflateInit is used + instead. windowBits must be greater than or equal to the windowBits value provided to deflateInit2() while compressing, or it must be equal to 15 if - deflateInit2() was not used. If a compressed stream with a larger window + deflateInit2() was not used. If a compressed stream with a larger window size is given as input, inflate() will return with the error code Z_DATA_ERROR instead of trying to allocate a larger window. - windowBits can also be -8..-15 for raw inflate. In this case, -windowBits - determines the window size. inflate() will then process raw deflate data, + windowBits can also be zero to request that inflate use the window size in + the zlib header of the compressed stream. + + windowBits can also be -8..-15 for raw inflate. In this case, -windowBits + determines the window size. inflate() will then process raw deflate data, not looking for a zlib or gzip header, not generating a check value, and not - looking for any check values for comparison at the end of the stream. This + looking for any check values for comparison at the end of the stream. This is for use with other formats that use the deflate compressed data format - such as zip. Those formats provide their own check values. If a custom + such as zip. Those formats provide their own check values. If a custom format is developed using the raw deflate format for compressed data, it is recommended that a check value such as an adler32 or a crc32 be applied to the uncompressed data as is done in the zlib, gzip, and zip formats. For - most applications, the zlib format should be used as is. Note that comments + most applications, the zlib format should be used as is. Note that comments above on the use in deflateInit2() applies to the magnitude of windowBits. - windowBits can also be greater than 15 for optional gzip decoding. Add + windowBits can also be greater than 15 for optional gzip decoding. Add 32 to windowBits to enable zlib and gzip decoding with automatic header detection, or add 16 to decode only the gzip format (the zlib format will - return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is - a crc32 instead of an adler32. + return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a + crc32 instead of an adler32. inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if a parameter is invalid (such as a null strm). msg - is set to null if there is no error message. inflateInit2 does not perform - any decompression apart from reading the zlib header if present: this will - be done by inflate(). (So next_in and avail_in may be modified, but next_out - and avail_out are unchanged.) + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit2 does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit2() does not process any header information -- that is + deferred until inflate() is called. */ ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm, @@ -738,36 +821,56 @@ uInt dictLength)); /* Initializes the decompression dictionary from the given uncompressed byte - sequence. This function must be called immediately after a call of inflate, - if that call returned Z_NEED_DICT. The dictionary chosen by the compressor + sequence. This function must be called immediately after a call of inflate, + if that call returned Z_NEED_DICT. The dictionary chosen by the compressor can be determined from the adler32 value returned by that call of inflate. The compressor and decompressor must use exactly the same dictionary (see - deflateSetDictionary). For raw inflate, this function can be called - immediately after inflateInit2() or inflateReset() and before any call of - inflate() to set the dictionary. The application must insure that the - dictionary that was used for compression is provided. + deflateSetDictionary). For raw inflate, this function can be called at any + time to set the dictionary. If the provided dictionary is smaller than the + window and there is already data in the window, then the provided dictionary + will amend what's there. The application must insure that the dictionary + that was used for compression is provided. inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a - parameter is invalid (such as NULL dictionary) or the stream state is + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the - expected one (incorrect adler32 value). inflateSetDictionary does not + expected one (incorrect adler32 value). inflateSetDictionary does not perform any decompression: this will be done by subsequent calls of inflate(). */ +ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm, + Bytef *dictionary, + uInt *dictLength)); +/* + Returns the sliding dictionary being maintained by inflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If inflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similary, if dictLength is Z_NULL, then it is not set. + + inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. +*/ + ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm)); /* - Skips invalid compressed data until a full flush point (see above the - description of deflate with Z_FULL_FLUSH) can be found, or until all - available input is skipped. No output is provided. - - inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR - if no more input was provided, Z_DATA_ERROR if no flush point has been found, - or Z_STREAM_ERROR if the stream structure was inconsistent. In the success - case, the application may save the current current value of total_in which - indicates where valid compressed data was found. In the error case, the - application may repeatedly call inflateSync, providing more input each time, - until success or end of the input data. + Skips invalid compressed data until a possible full flush point (see above + for the description of deflate with Z_FULL_FLUSH) can be found, or until all + available input is skipped. No output is provided. + + inflateSync searches for a 00 00 FF FF pattern in the compressed data. + All full flush points have this pattern, but not all occurrences of this + pattern are full flush points. + + inflateSync returns Z_OK if a possible full flush point has been found, + Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point + has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. + In the success case, the application may save the current current value of + total_in which indicates where valid compressed data was found. In the + error case, the application may repeatedly call inflateSync, providing more + input each time, until success or end of the input data. */ ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest, @@ -782,18 +885,30 @@ inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being NULL). msg is left unchanged in both source and + (such as zalloc being Z_NULL). msg is left unchanged in both source and destination. */ ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm)); /* This function is equivalent to inflateEnd followed by inflateInit, - but does not free and reallocate all the internal decompression state. - The stream will keep attributes that may have been set by inflateInit2. + but does not free and reallocate all the internal decompression state. The + stream will keep attributes that may have been set by inflateInit2. - inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being NULL). + inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm, + int windowBits)); +/* + This function is the same as inflateReset, but it also permits changing + the wrap and window size requests. The windowBits parameter is interpreted + the same as it is for inflateInit2. + + inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL), or if + the windowBits parameter is invalid. */ ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm, @@ -801,54 +916,87 @@ int value)); /* This function inserts bits in the inflate input stream. The intent is - that this function is used to start inflating at a bit position in the - middle of a byte. The provided bits will be used before any bytes are used - from next_in. This function should only be used with raw inflate, and - should be used before the first inflate() call after inflateInit2() or - inflateReset(). bits must be less than or equal to 16, and that many of the - least significant bits of value will be inserted in the input. + that this function is used to start inflating at a bit position in the + middle of a byte. The provided bits will be used before any bytes are used + from next_in. This function should only be used with raw inflate, and + should be used before the first inflate() call after inflateInit2() or + inflateReset(). bits must be less than or equal to 16, and that many of the + least significant bits of value will be inserted in the input. + + If bits is negative, then the input stream bit buffer is emptied. Then + inflatePrime() can be called again to put bits in the buffer. This is used + to clear out bits leftover after feeding inflate a block description prior + to feeding inflate codes. - inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source + inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ +ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm)); +/* + This function returns two values, one in the lower 16 bits of the return + value, and the other in the remaining upper bits, obtained by shifting the + return value down 16 bits. If the upper value is -1 and the lower value is + zero, then inflate() is currently decoding information outside of a block. + If the upper value is -1 and the lower value is non-zero, then inflate is in + the middle of a stored block, with the lower value equaling the number of + bytes from the input remaining to copy. If the upper value is not -1, then + it is the number of bits back from the current bit position in the input of + the code (literal or length/distance pair) currently being processed. In + that case the lower value is the number of bytes already emitted for that + code. + + A code is being processed if inflate is waiting for more input to complete + decoding of the code, or if it has completed decoding but is waiting for + more output space to write the literal or match data. + + inflateMark() is used to mark locations in the input data for random + access, which may be at bit positions, and to note those cases where the + output of a code may span boundaries of random access blocks. The current + location in the input stream can be determined from avail_in and data_type + as noted in the description for the Z_BLOCK flush parameter for inflate. + + inflateMark returns the value noted above or -1 << 16 if the provided + source stream state was inconsistent. +*/ + ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm, gz_headerp head)); /* - inflateGetHeader() requests that gzip header information be stored in the + inflateGetHeader() requests that gzip header information be stored in the provided gz_header structure. inflateGetHeader() may be called after inflateInit2() or inflateReset(), and before the first call of inflate(). As inflate() processes the gzip stream, head->done is zero until the header is completed, at which time head->done is set to one. If a zlib stream is being decoded, then head->done is set to -1 to indicate that there will be - no gzip header information forthcoming. Note that Z_BLOCK can be used to - force inflate() to return immediately after header processing is complete - and before any actual data is decompressed. + no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be + used to force inflate() to return immediately after header processing is + complete and before any actual data is decompressed. - The text, time, xflags, and os fields are filled in with the gzip header + The text, time, xflags, and os fields are filled in with the gzip header contents. hcrc is set to true if there is a header CRC. (The header CRC - was valid if done is set to one.) If extra is not Z_NULL, then extra_max + was valid if done is set to one.) If extra is not Z_NULL, then extra_max contains the maximum number of bytes to write to extra. Once done is true, extra_len contains the actual extra field length, and extra contains the extra field, or that field truncated if extra_max is less than extra_len. If name is not Z_NULL, then up to name_max characters are written there, terminated with a zero unless the length is greater than name_max. If comment is not Z_NULL, then up to comm_max characters are written there, - terminated with a zero unless the length is greater than comm_max. When - any of extra, name, or comment are not Z_NULL and the respective field is - not present in the header, then that field is set to Z_NULL to signal its + terminated with a zero unless the length is greater than comm_max. When any + of extra, name, or comment are not Z_NULL and the respective field is not + present in the header, then that field is set to Z_NULL to signal its absence. This allows the use of deflateSetHeader() with the returned structure to duplicate the header. However if those fields are set to allocated memory, then the application will need to save those pointers elsewhere so that they can be eventually freed. - If inflateGetHeader is not used, then the header information is simply + If inflateGetHeader is not used, then the header information is simply discarded. The header is always checked for validity, including the header CRC if present. inflateReset() will reset the process to discard the header information. The application would need to call inflateGetHeader() again to retrieve the header from the next gzip stream. - inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ @@ -869,12 +1017,13 @@ See inflateBack() for the usage of these routines. inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of - the paramaters are invalid, Z_MEM_ERROR if the internal state could not - be allocated, or Z_VERSION_ERROR if the version of the library does not - match the version of the header file. + the parameters are invalid, Z_MEM_ERROR if the internal state could not be + allocated, or Z_VERSION_ERROR if the version of the library does not match + the version of the header file. */ -typedef unsigned (*in_func) OF((void FAR *, unsigned char FAR * FAR *)); +typedef unsigned (*in_func) OF((void FAR *, + z_const unsigned char FAR * FAR *)); typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned)); ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm, @@ -882,24 +1031,25 @@ out_func out, void FAR *out_desc)); /* inflateBack() does a raw inflate with a single call using a call-back - interface for input and output. This is more efficient than inflate() for - file i/o applications in that it avoids copying between the output and the - sliding window by simply making the window itself the output buffer. This - function trusts the application to not change the output buffer passed by - the output function, at least until inflateBack() returns. + interface for input and output. This is potentially more efficient than + inflate() for file i/o applications, in that it avoids copying between the + output and the sliding window by simply making the window itself the output + buffer. inflate() can be faster on modern CPUs when used with large + buffers. inflateBack() trusts the application to not change the output + buffer passed by the output function, at least until inflateBack() returns. inflateBackInit() must be called first to allocate the internal state and to initialize the state with the user-provided window buffer. inflateBack() may then be used multiple times to inflate a complete, raw - deflate stream with each call. inflateBackEnd() is then called to free - the allocated state. + deflate stream with each call. inflateBackEnd() is then called to free the + allocated state. A raw deflate stream is one with no zlib or gzip header or trailer. This routine would normally be used in a utility that reads zip or gzip files and writes out uncompressed files. The utility would decode the - header and process the trailer on its own, hence this routine expects - only the raw deflate stream to decompress. This is different from the - normal behavior of inflate(), which expects either a zlib or gzip header and + header and process the trailer on its own, hence this routine expects only + the raw deflate stream to decompress. This is different from the normal + behavior of inflate(), which expects either a zlib or gzip header and trailer around the deflate stream. inflateBack() uses two subroutines supplied by the caller that are then @@ -925,7 +1075,7 @@ calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in must also be initialized, and then if strm->avail_in is not zero, input will - initially be taken from strm->next_in[0 .. strm->avail_in - 1]. + initially be taken from strm->next_in[0 .. strm->avail_in - 1]. The in_desc and out_desc parameters of inflateBack() is passed as the first parameter of in() and out() respectively when they are called. These @@ -935,15 +1085,15 @@ On return, inflateBack() will set strm->next_in and strm->avail_in to pass back any unused input that was provided by the last in() call. The return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR - if in() or out() returned an error, Z_DATA_ERROR if there was a format - error in the deflate stream (in which case strm->msg is set to indicate the - nature of the error), or Z_STREAM_ERROR if the stream was not properly - initialized. In the case of Z_BUF_ERROR, an input or output error can be - distinguished using strm->next_in which will be Z_NULL only if in() returned - an error. If strm->next is not Z_NULL, then the Z_BUF_ERROR was due to - out() returning non-zero. (in() will always be called before out(), so - strm->next_in is assured to be defined if out() returns non-zero.) Note - that inflateBack() cannot return Z_OK. + if in() or out() returned an error, Z_DATA_ERROR if there was a format error + in the deflate stream (in which case strm->msg is set to indicate the nature + of the error), or Z_STREAM_ERROR if the stream was not properly initialized. + In the case of Z_BUF_ERROR, an input or output error can be distinguished + using strm->next_in which will be Z_NULL only if in() returned an error. If + strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning + non-zero. (in() will always be called before out(), so strm->next_in is + assured to be defined if out() returns non-zero.) Note that inflateBack() + cannot return Z_OK. */ ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm)); @@ -995,27 +1145,27 @@ 27-31: 0 (reserved) */ +#ifndef Z_SOLO /* utility functions */ /* - The following utility functions are implemented on top of the - basic stream-oriented functions. To simplify the interface, some - default options are assumed (compression level and memory usage, - standard memory allocation functions). The source code of these - utility functions can easily be modified if you need special options. + The following utility functions are implemented on top of the basic + stream-oriented functions. To simplify the interface, some default options + are assumed (compression level and memory usage, standard memory allocation + functions). The source code of these utility functions can be modified if + you need special options. */ ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen)); /* Compresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total - size of the destination buffer, which must be at least the value returned - by compressBound(sourceLen). Upon exit, destLen is the actual size of the + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the compressed buffer. - This function can be used to compress a whole file at once if the - input file is mmap'ed. + compress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer. @@ -1025,11 +1175,11 @@ const Bytef *source, uLong sourceLen, int level)); /* - Compresses the source buffer into the destination buffer. The level + Compresses the source buffer into the destination buffer. The level parameter has the same meaning as in deflateInit. sourceLen is the byte - length of the source buffer. Upon entry, destLen is the total size of the + length of the source buffer. Upon entry, destLen is the total size of the destination buffer, which must be at least the value returned by - compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressBound(sourceLen). Upon exit, destLen is the actual size of the compressed buffer. compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough @@ -1040,159 +1190,255 @@ ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen)); /* compressBound() returns an upper bound on the compressed size after - compress() or compress2() on sourceLen bytes. It would be used before - a compress() or compress2() call to allocate the destination buffer. + compress() or compress2() on sourceLen bytes. It would be used before a + compress() or compress2() call to allocate the destination buffer. */ ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen)); /* Decompresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total - size of the destination buffer, which must be large enough to hold the - entire uncompressed data. (The size of the uncompressed data must have - been saved previously by the compressor and transmitted to the decompressor - by some mechanism outside the scope of this compression library.) - Upon exit, destLen is the actual size of the compressed buffer. - This function can be used to decompress a whole file at once if the - input file is mmap'ed. + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be large enough to hold the entire + uncompressed data. (The size of the uncompressed data must have been saved + previously by the compressor and transmitted to the decompressor by some + mechanism outside the scope of this compression library.) Upon exit, destLen + is the actual size of the uncompressed buffer. uncompress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. + buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In + the case where there is not enough room, uncompress() will fill the output + buffer with the uncompressed data up to that point. */ + /* gzip file access functions */ -typedef voidp gzFile; +/* + This library supports reading and writing files in gzip (.gz) format with + an interface similar to that of stdio, using the functions that start with + "gz". The gzip format is different from the zlib format. gzip is a gzip + wrapper, documented in RFC 1952, wrapped around a deflate stream. +*/ + +typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */ -ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); /* - Opens a gzip (.gz) file for reading or writing. The mode parameter - is as in fopen ("rb" or "wb") but can also include a compression level - ("wb9") or a strategy: 'f' for filtered data as in "wb6f", 'h' for - Huffman only compression as in "wb1h", or 'R' for run-length encoding - as in "wb1R". (See the description of deflateInit2 for more information - about the strategy parameter.) +ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); + + Opens a gzip (.gz) file for reading or writing. The mode parameter is as + in fopen ("rb" or "wb") but can also include a compression level ("wb9") or + a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only + compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F' + for fixed code compression as in "wb9F". (See the description of + deflateInit2 for more information about the strategy parameter.) 'T' will + request transparent writing or appending with no compression and not using + the gzip format. + + "a" can be used instead of "w" to request that the gzip stream that will + be written be appended to the file. "+" will result in an error, since + reading and writing to the same gzip file is not supported. The addition of + "x" when writing will create the file exclusively, which fails if the file + already exists. On systems that support it, the addition of "e" when + reading or writing will set the flag to close the file on an execve() call. + + These functions, as well as gzip, will read and decode a sequence of gzip + streams in a file. The append function of gzopen() can be used to create + such a file. (Also see gzflush() for another way to do this.) When + appending, gzopen does not test whether the file begins with a gzip stream, + nor does it look for the end of the gzip streams to begin appending. gzopen + will simply append a gzip stream to the existing file. gzopen can be used to read a file which is not in gzip format; in this - case gzread will directly read from the file without decompression. + case gzread will directly read from the file without decompression. When + reading, this will be detected automatically by looking for the magic two- + byte gzip header. + + gzopen returns NULL if the file could not be opened, if there was + insufficient memory to allocate the gzFile state, or if an invalid mode was + specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). + errno can be checked to determine if the reason gzopen failed was that the + file could not be opened. +*/ + +ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); +/* + gzdopen associates a gzFile with the file descriptor fd. File descriptors + are obtained from calls like open, dup, creat, pipe or fileno (if the file + has been previously opened with fopen). The mode parameter is as in gzopen. + + The next call of gzclose on the returned gzFile will also close the file + descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor + fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, + mode);. The duplicated descriptor should be saved to avoid a leak, since + gzdopen does not close fd if it fails. If you are using fileno() to get the + file descriptor from a FILE *, then you will have to use dup() to avoid + double-close()ing the file descriptor. Both gzclose() and fclose() will + close the associated file descriptor, so they need to have different file + descriptors. + + gzdopen returns NULL if there was insufficient memory to allocate the + gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not + provided, or '+' was provided), or if fd is -1. The file descriptor is not + used until the next gz* read, write, seek, or close operation, so gzdopen + will not detect if fd is invalid (unless fd is -1). +*/ + +ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); +/* + Set the internal buffer size used by this library's functions. The + default buffer size is 8192 bytes. This function must be called after + gzopen() or gzdopen(), and before any other calls that read or write the + file. The buffer memory allocation is always deferred to the first read or + write. Two buffers are allocated, either both of the specified size when + writing, or one of the specified size and the other twice that size when + reading. A larger buffer size of, for example, 64K or 128K bytes will + noticeably increase the speed of decompression (reading). - gzopen returns NULL if the file could not be opened or if there was - insufficient memory to allocate the (de)compression state; errno - can be checked to distinguish the two cases (if errno is zero, the - zlib error is Z_MEM_ERROR). */ - -ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); -/* - gzdopen() associates a gzFile with the file descriptor fd. File - descriptors are obtained from calls like open, dup, creat, pipe or - fileno (in the file has been previously opened with fopen). - The mode parameter is as in gzopen. - The next call of gzclose on the returned gzFile will also close the - file descriptor fd, just like fclose(fdopen(fd), mode) closes the file - descriptor fd. If you want to keep fd open, use gzdopen(dup(fd), mode). - gzdopen returns NULL if there was insufficient memory to allocate - the (de)compression state. + The new buffer size also affects the maximum length for gzprintf(). + + gzbuffer() returns 0 on success, or -1 on failure, such as being called + too late. */ ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy)); /* - Dynamically update the compression level or strategy. See the description + Dynamically update the compression level or strategy. See the description of deflateInit2 for the meaning of these parameters. + gzsetparams returns Z_OK if success, or Z_STREAM_ERROR if the file was not opened for writing. */ -ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); +ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); /* - Reads the given number of uncompressed bytes from the compressed file. - If the input file was not in gzip format, gzread copies the given number - of bytes into the buffer. - gzread returns the number of uncompressed bytes actually read (0 for - end of file, -1 for error). */ + Reads the given number of uncompressed bytes from the compressed file. If + the input file is not in gzip format, gzread copies the given number of + bytes into the buffer directly from the file. + + After reaching the end of a gzip stream in the input, gzread will continue + to read, looking for another gzip stream. Any number of gzip streams may be + concatenated in the input file, and will all be decompressed by gzread(). + If something other than a gzip stream is encountered after a gzip stream, + that remaining trailing garbage is ignored (and no error is returned). + + gzread can be used to read a gzip file that is being concurrently written. + Upon reaching the end of the input, gzread will return with the available + data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then + gzclearerr can be used to clear the end of file indicator in order to permit + gzread to be tried again. Z_OK indicates that a gzip stream was completed + on the last gzread. Z_BUF_ERROR indicates that the input file ended in the + middle of a gzip stream. Note that gzread does not return -1 in the event + of an incomplete gzip stream. This error is deferred until gzclose(), which + will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip + stream. Alternatively, gzerror can be used before gzclose to detect this + case. + + gzread returns the number of uncompressed bytes actually read, less than + len for end of file, or -1 for error. +*/ -ZEXTERN int ZEXPORT gzwrite OF((gzFile file, - voidpc buf, unsigned len)); +ZEXTERN int ZEXPORT gzwrite OF((gzFile file, + voidpc buf, unsigned len)); /* Writes the given number of uncompressed bytes into the compressed file. - gzwrite returns the number of uncompressed bytes actually written - (0 in case of error). + gzwrite returns the number of uncompressed bytes written or 0 in case of + error. */ -ZEXTERN int ZEXPORTVA gzprintf OF((gzFile file, const char *format, ...)); +ZEXTERN int ZEXPORTVA gzprintf Z_ARG((gzFile file, const char *format, ...)); /* - Converts, formats, and writes the args to the compressed file under - control of the format string, as in fprintf. gzprintf returns the number of - uncompressed bytes actually written (0 in case of error). The number of - uncompressed bytes written is limited to 4095. The caller should assure that - this limit is not exceeded. If it is exceeded, then gzprintf() will return - return an error (0) with nothing written. In this case, there may also be a - buffer overflow with unpredictable consequences, which is possible only if - zlib was compiled with the insecure functions sprintf() or vsprintf() - because the secure snprintf() or vsnprintf() functions were not available. + Converts, formats, and writes the arguments to the compressed file under + control of the format string, as in fprintf. gzprintf returns the number of + uncompressed bytes actually written, or 0 in case of error. The number of + uncompressed bytes written is limited to 8191, or one less than the buffer + size given to gzbuffer(). The caller should assure that this limit is not + exceeded. If it is exceeded, then gzprintf() will return an error (0) with + nothing written. In this case, there may also be a buffer overflow with + unpredictable consequences, which is possible only if zlib was compiled with + the insecure functions sprintf() or vsprintf() because the secure snprintf() + or vsnprintf() functions were not available. This can be determined using + zlibCompileFlags(). */ ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s)); /* - Writes the given null-terminated string to the compressed file, excluding + Writes the given null-terminated string to the compressed file, excluding the terminating null character. - gzputs returns the number of characters written, or -1 in case of error. + + gzputs returns the number of characters written, or -1 in case of error. */ ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len)); /* - Reads bytes from the compressed file until len-1 characters are read, or - a newline character is read and transferred to buf, or an end-of-file - condition is encountered. The string is then terminated with a null - character. - gzgets returns buf, or Z_NULL in case of error. + Reads bytes from the compressed file until len-1 characters are read, or a + newline character is read and transferred to buf, or an end-of-file + condition is encountered. If any characters are read or if len == 1, the + string is terminated with a null character. If no characters are read due + to an end-of-file or len < 1, then the buffer is left untouched. + + gzgets returns buf which is a null-terminated string, or it returns NULL + for end-of-file or in case of error. If there was an error, the contents at + buf are indeterminate. */ -ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); +ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); /* - Writes c, converted to an unsigned char, into the compressed file. - gzputc returns the value that was written, or -1 in case of error. + Writes c, converted to an unsigned char, into the compressed file. gzputc + returns the value that was written, or -1 in case of error. */ -ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); +ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); /* - Reads one byte from the compressed file. gzgetc returns this byte - or -1 in case of end of file or error. + Reads one byte from the compressed file. gzgetc returns this byte or -1 + in case of end of file or error. This is implemented as a macro for speed. + As such, it does not do all of the checking the other functions do. I.e. + it does not check to see if file is NULL, nor whether the structure file + points to has been clobbered or not. */ -ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); +ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); /* - Push one character back onto the stream to be read again later. - Only one character of push-back is allowed. gzungetc() returns the - character pushed, or -1 on failure. gzungetc() will fail if a - character has been pushed but not read yet, or if c is -1. The pushed - character will be discarded if the stream is repositioned with gzseek() - or gzrewind(). + Push one character back onto the stream to be read as the first character + on the next read. At least one character of push-back is allowed. + gzungetc() returns the character pushed, or -1 on failure. gzungetc() will + fail if c is -1, and may fail if a character has been pushed but not read + yet. If gzungetc is used immediately after gzopen or gzdopen, at least the + output buffer size of pushed characters is allowed. (See gzbuffer above.) + The pushed character will be discarded if the stream is repositioned with + gzseek() or gzrewind(). */ -ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); +ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); /* - Flushes all pending output into the compressed file. The parameter - flush is as in the deflate() function. The return value is the zlib - error number (see function gzerror below). gzflush returns Z_OK if - the flush parameter is Z_FINISH and all output could be flushed. - gzflush should be called only when strictly necessary because it can - degrade compression. + Flushes all pending output into the compressed file. The parameter flush + is as in the deflate() function. The return value is the zlib error number + (see function gzerror below). gzflush is only permitted when writing. + + If the flush parameter is Z_FINISH, the remaining data is written and the + gzip stream is completed in the output. If gzwrite() is called again, a new + gzip stream will be started in the output. gzread() is able to read such + concatented gzip streams. + + gzflush should be called only when strictly necessary because it will + degrade compression if called too often. */ -ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, - z_off_t offset, int whence)); /* - Sets the starting position for the next gzread or gzwrite on the - given compressed file. The offset represents a number of bytes in the - uncompressed data stream. The whence parameter is defined as in lseek(2); +ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, + z_off_t offset, int whence)); + + Sets the starting position for the next gzread or gzwrite on the given + compressed file. The offset represents a number of bytes in the + uncompressed data stream. The whence parameter is defined as in lseek(2); the value SEEK_END is not supported. + If the file is opened for reading, this function is emulated but can be - extremely slow. If the file is opened for writing, only forward seeks are + extremely slow. If the file is opened for writing, only forward seeks are supported; gzseek then compresses a sequence of zeroes up to the new starting position. - gzseek returns the resulting offset location as measured in bytes from + gzseek returns the resulting offset location as measured in bytes from the beginning of the uncompressed stream, or -1 in case of error, in particular if the file is opened for writing and the new starting position would be before the current position. @@ -1202,68 +1448,134 @@ /* Rewinds the given file. This function is supported only for reading. - gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) + gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) */ +/* ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file)); + + Returns the starting position for the next gzread or gzwrite on the given + compressed file. This position represents a number of bytes in the + uncompressed data stream, and is zero when starting, even if appending or + reading a gzip stream from the middle of a file using gzdopen(). + + gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) +*/ + /* - Returns the starting position for the next gzread or gzwrite on the - given compressed file. This position represents a number of bytes in the - uncompressed data stream. +ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); - gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) + Returns the current offset in the file being read or written. This offset + includes the count of bytes that precede the gzip stream, for example when + appending or when using gzdopen() for reading. When reading, the offset + does not include as yet unused buffered input. This information can be used + for a progress indicator. On error, gzoffset() returns -1. */ ZEXTERN int ZEXPORT gzeof OF((gzFile file)); /* - Returns 1 when EOF has previously been detected reading the given - input stream, otherwise zero. + Returns true (1) if the end-of-file indicator has been set while reading, + false (0) otherwise. Note that the end-of-file indicator is set only if the + read tried to go past the end of the input, but came up short. Therefore, + just like feof(), gzeof() may return false even if there is no more data to + read, in the event that the last read request was for the exact number of + bytes remaining in the input file. This will happen if the input file size + is an exact multiple of the buffer size. + + If gzeof() returns true, then the read functions will return no more data, + unless the end-of-file indicator is reset by gzclearerr() and the input file + has grown since the previous end of file was detected. */ ZEXTERN int ZEXPORT gzdirect OF((gzFile file)); /* - Returns 1 if file is being read directly without decompression, otherwise - zero. + Returns true (1) if file is being copied directly while reading, or false + (0) if file is a gzip stream being decompressed. + + If the input file is empty, gzdirect() will return true, since the input + does not contain a gzip stream. + + If gzdirect() is used immediately after gzopen() or gzdopen() it will + cause buffers to be allocated to allow reading the file to determine if it + is a gzip file. Therefore if gzbuffer() is used, it should be called before + gzdirect(). + + When writing, gzdirect() returns true (1) if transparent writing was + requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: + gzdirect() is not needed when writing. Transparent writing must be + explicitly requested, so the application already knows the answer. When + linking statically, using gzdirect() will include all of the zlib code for + gzip file reading and decompression, which may not be desired.) */ ZEXTERN int ZEXPORT gzclose OF((gzFile file)); /* - Flushes all pending output if necessary, closes the compressed file - and deallocates all the (de)compression state. The return value is the zlib - error number (see function gzerror below). + Flushes all pending output if necessary, closes the compressed file and + deallocates the (de)compression state. Note that once file is closed, you + cannot call gzerror with file, since its structures have been deallocated. + gzclose must not be called more than once on the same file, just as free + must not be called more than once on the same allocation. + + gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a + file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the + last read ended in the middle of a gzip stream, or Z_OK on success. +*/ + +ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); +ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); +/* + Same as gzclose(), but gzclose_r() is only for use when reading, and + gzclose_w() is only for use when writing or appending. The advantage to + using these instead of gzclose() is that they avoid linking in zlib + compression or decompression code that is not used when only reading or only + writing respectively. If gzclose() is used, then both compression and + decompression code will be included the application when linking to a static + zlib library. */ ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum)); /* - Returns the error message for the last error which occurred on the - given compressed file. errnum is set to zlib error number. If an - error occurred in the file system and not in the compression library, - errnum is set to Z_ERRNO and the application may consult errno - to get the exact error code. + Returns the error message for the last error which occurred on the given + compressed file. errnum is set to zlib error number. If an error occurred + in the file system and not in the compression library, errnum is set to + Z_ERRNO and the application may consult errno to get the exact error code. + + The application must not modify the returned string. Future calls to + this function may invalidate the previously returned string. If file is + closed, then the string previously returned by gzerror will no longer be + available. + + gzerror() should be used to distinguish errors from end-of-file for those + functions above that do not distinguish those cases in their return values. */ ZEXTERN void ZEXPORT gzclearerr OF((gzFile file)); /* - Clears the error and end-of-file flags for file. This is analogous to the - clearerr() function in stdio. This is useful for continuing to read a gzip + Clears the error and end-of-file flags for file. This is analogous to the + clearerr() function in stdio. This is useful for continuing to read a gzip file that is being written concurrently. */ +#endif /* !Z_SOLO */ + /* checksum functions */ /* These functions are not related to compression but are exported - anyway because they might be useful in applications using the - compression library. + anyway because they might be useful in applications using the compression + library. */ ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len)); /* Update a running Adler-32 checksum with the bytes buf[0..len-1] and - return the updated checksum. If buf is NULL, this function returns - the required initial value for the checksum. - An Adler-32 checksum is almost as reliable as a CRC32 but can be computed - much faster. Usage example: + return the updated checksum. If buf is Z_NULL, this function returns the + required initial value for the checksum. + + An Adler-32 checksum is almost as reliable as a CRC32 but can be computed + much faster. + + Usage example: uLong adler = adler32(0L, Z_NULL, 0); @@ -1273,21 +1585,25 @@ if (adler != original_adler) error(); */ +/* ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2, z_off_t len2)); -/* + Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of - seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. + seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note + that the z_off_t type (like off_t) is a signed integer. If len2 is + negative, the result has no meaning or utility. */ ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len)); /* Update a running CRC-32 with the bytes buf[0..len-1] and return the - updated CRC-32. If buf is NULL, this function returns the required initial - value for the for the crc. Pre- and post-conditioning (one's complement) is + updated CRC-32. If buf is Z_NULL, this function returns the required + initial value for the crc. Pre- and post-conditioning (one's complement) is performed within this function so it shouldn't be done by the application. + Usage example: uLong crc = crc32(0L, Z_NULL, 0); @@ -1298,9 +1614,9 @@ if (crc != original_crc) error(); */ +/* ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2)); -/* Combine two CRC-32 check values into one. For two sequences of bytes, seq1 and seq2 with lengths len1 and len2, CRC-32 check values were calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 @@ -1329,26 +1645,121 @@ const char *version, int stream_size)); #define deflateInit(strm, level) \ - deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream)) + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) #define inflateInit(strm) \ - inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream)) + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) #define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ - (strategy), ZLIB_VERSION, sizeof(z_stream)) + (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) #define inflateInit2(strm, windowBits) \ - inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream)) + inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ + (int)sizeof(z_stream)) #define inflateBackInit(strm, windowBits, window) \ inflateBackInit_((strm), (windowBits), (window), \ - ZLIB_VERSION, sizeof(z_stream)) + ZLIB_VERSION, (int)sizeof(z_stream)) + +#ifndef Z_SOLO + +/* gzgetc() macro and its supporting function and exposed data structure. Note + * that the real internal state is much larger than the exposed structure. + * This abbreviated structure exposes just enough for the gzgetc() macro. The + * user should not mess with these exposed elements, since their names or + * behavior could change in the future, perhaps even capriciously. They can + * only be used by the gzgetc() macro. You have been warned. + */ +struct gzFile_s { + unsigned have; + unsigned char *next; + z_off64_t pos; +}; +ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); /* backward compatibility */ +#ifdef Z_PREFIX_SET +# undef z_gzgetc +# define z_gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : gzgetc(g)) +#else +# define gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : gzgetc(g)) +#endif +/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or + * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if + * both are true, the application gets the *64 functions, and the regular + * functions are changed to 64 bits) -- in case these are set on systems + * without large file support, _LFS64_LARGEFILE must also be true + */ +#ifdef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t)); +#endif + +#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) +# ifdef Z_PREFIX_SET +# define z_gzopen z_gzopen64 +# define z_gzseek z_gzseek64 +# define z_gztell z_gztell64 +# define z_gzoffset z_gzoffset64 +# define z_adler32_combine z_adler32_combine64 +# define z_crc32_combine z_crc32_combine64 +# else +# define gzopen gzopen64 +# define gzseek gzseek64 +# define gztell gztell64 +# define gzoffset gzoffset64 +# define adler32_combine adler32_combine64 +# define crc32_combine crc32_combine64 +# endif +# ifndef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +# endif +#else + ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); +#endif + +#else /* Z_SOLO */ + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); + +#endif /* !Z_SOLO */ + +/* hack for buggy compilers */ #if !defined(ZUTIL_H) && !defined(NO_DUMMY_DECL) - struct internal_state {int dummy;}; /* hack for buggy compilers */ + struct internal_state {int dummy;}; #endif +/* undocumented functions */ ZEXTERN const char * ZEXPORT zError OF((int)); -ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp z)); -ZEXTERN const uLongf * ZEXPORT get_crc_table OF((void)); +ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp)); +ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table OF((void)); +ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int)); +ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp)); +ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp)); +#if defined(_WIN32) && !defined(Z_SOLO) +ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, + const char *mode)); +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +ZEXTERN int ZEXPORTVA gzvprintf Z_ARG((gzFile file, + const char *format, + va_list va)); +# endif +#endif #ifdef __cplusplus } diff -Nru nodejs-0.11.13/deps/zlib/zlib.map nodejs-0.11.15/deps/zlib/zlib.map --- nodejs-0.11.13/deps/zlib/zlib.map 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zlib.map 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,83 @@ +ZLIB_1.2.0 { + global: + compressBound; + deflateBound; + inflateBack; + inflateBackEnd; + inflateBackInit_; + inflateCopy; + local: + deflate_copyright; + inflate_copyright; + inflate_fast; + inflate_table; + zcalloc; + zcfree; + z_errmsg; + gz_error; + gz_intmax; + _*; +}; + +ZLIB_1.2.0.2 { + gzclearerr; + gzungetc; + zlibCompileFlags; +} ZLIB_1.2.0; + +ZLIB_1.2.0.8 { + deflatePrime; +} ZLIB_1.2.0.2; + +ZLIB_1.2.2 { + adler32_combine; + crc32_combine; + deflateSetHeader; + inflateGetHeader; +} ZLIB_1.2.0.8; + +ZLIB_1.2.2.3 { + deflateTune; + gzdirect; +} ZLIB_1.2.2; + +ZLIB_1.2.2.4 { + inflatePrime; +} ZLIB_1.2.2.3; + +ZLIB_1.2.3.3 { + adler32_combine64; + crc32_combine64; + gzopen64; + gzseek64; + gztell64; + inflateUndermine; +} ZLIB_1.2.2.4; + +ZLIB_1.2.3.4 { + inflateReset2; + inflateMark; +} ZLIB_1.2.3.3; + +ZLIB_1.2.3.5 { + gzbuffer; + gzoffset; + gzoffset64; + gzclose_r; + gzclose_w; +} ZLIB_1.2.3.4; + +ZLIB_1.2.5.1 { + deflatePending; +} ZLIB_1.2.3.5; + +ZLIB_1.2.5.2 { + deflateResetKeep; + gzgetc_; + inflateResetKeep; +} ZLIB_1.2.5.1; + +ZLIB_1.2.7.1 { + inflateGetDictionary; + gzvprintf; +} ZLIB_1.2.5.2; diff -Nru nodejs-0.11.13/deps/zlib/zlib.pc.cmakein nodejs-0.11.15/deps/zlib/zlib.pc.cmakein --- nodejs-0.11.13/deps/zlib/zlib.pc.cmakein 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zlib.pc.cmakein 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=@CMAKE_INSTALL_PREFIX@ +libdir=@INSTALL_LIB_DIR@ +sharedlibdir=@INSTALL_LIB_DIR@ +includedir=@INSTALL_INC_DIR@ + +Name: zlib +Description: zlib compression library +Version: @VERSION@ + +Requires: +Libs: -L${libdir} -L${sharedlibdir} -lz +Cflags: -I${includedir} diff -Nru nodejs-0.11.13/deps/zlib/zlib.pc.in nodejs-0.11.15/deps/zlib/zlib.pc.in --- nodejs-0.11.13/deps/zlib/zlib.pc.in 1970-01-01 00:00:00.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zlib.pc.in 2015-01-20 21:22:17.000000000 +0000 @@ -0,0 +1,13 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +sharedlibdir=@sharedlibdir@ +includedir=@includedir@ + +Name: zlib +Description: zlib compression library +Version: @VERSION@ + +Requires: +Libs: -L${libdir} -L${sharedlibdir} -lz +Cflags: -I${includedir} diff -Nru nodejs-0.11.13/deps/zlib/zutil.c nodejs-0.11.15/deps/zlib/zutil.c --- nodejs-0.11.13/deps/zlib/zutil.c 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zutil.c 2015-01-20 21:22:17.000000000 +0000 @@ -1,17 +1,20 @@ /* zutil.c -- target dependent utility functions for the compression library - * Copyright (C) 1995-2005 Jean-loup Gailly. + * Copyright (C) 1995-2005, 2010, 2011, 2012 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ -/* @(#) $Id: zutil.c,v 3.11 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ +/* @(#) $Id$ */ #include "zutil.h" +#ifndef Z_SOLO +# include "gzguts.h" +#endif #ifndef NO_DUMMY_DECL struct internal_state {int dummy;}; /* for buggy compilers */ #endif -const char * const z_errmsg[10] = { +z_const char * const z_errmsg[10] = { "need dictionary", /* Z_NEED_DICT 2 */ "stream end", /* Z_STREAM_END 1 */ "", /* Z_OK 0 */ @@ -34,25 +37,25 @@ uLong flags; flags = 0; - switch (sizeof(uInt)) { + switch ((int)(sizeof(uInt))) { case 2: break; case 4: flags += 1; break; case 8: flags += 2; break; default: flags += 3; } - switch (sizeof(uLong)) { + switch ((int)(sizeof(uLong))) { case 2: break; case 4: flags += 1 << 2; break; case 8: flags += 2 << 2; break; default: flags += 3 << 2; } - switch (sizeof(voidpf)) { + switch ((int)(sizeof(voidpf))) { case 2: break; case 4: flags += 1 << 4; break; case 8: flags += 2 << 4; break; default: flags += 3 << 4; } - switch (sizeof(z_off_t)) { + switch ((int)(sizeof(z_off_t))) { case 2: break; case 4: flags += 1 << 6; break; case 8: flags += 2 << 6; break; @@ -85,27 +88,27 @@ #ifdef FASTEST flags += 1L << 21; #endif -#ifdef STDC +#if defined(STDC) || defined(Z_HAVE_STDARG_H) # ifdef NO_vsnprintf - flags += 1L << 25; + flags += 1L << 25; # ifdef HAS_vsprintf_void - flags += 1L << 26; + flags += 1L << 26; # endif # else # ifdef HAS_vsnprintf_void - flags += 1L << 26; + flags += 1L << 26; # endif # endif #else - flags += 1L << 24; + flags += 1L << 24; # ifdef NO_snprintf - flags += 1L << 25; + flags += 1L << 25; # ifdef HAS_sprintf_void - flags += 1L << 26; + flags += 1L << 26; # endif # else # ifdef HAS_snprintf_void - flags += 1L << 26; + flags += 1L << 26; # endif # endif #endif @@ -117,9 +120,9 @@ # ifndef verbose # define verbose 0 # endif -int z_verbose = verbose; +int ZLIB_INTERNAL z_verbose = verbose; -void z_error (m) +void ZLIB_INTERNAL z_error (m) char *m; { fprintf(stderr, "%s\n", m); @@ -141,14 +144,12 @@ * errno. We define it as a global variable to simplify porting. * Its value is always 0 and should not be used. */ - // Google Gears modification: zutil.h defines errno as z_errno for WinCE. - //int errno = 0; - int z_errno = 0; + int errno = 0; #endif #ifndef HAVE_MEMCPY -void zmemcpy(dest, source, len) +void ZLIB_INTERNAL zmemcpy(dest, source, len) Bytef* dest; const Bytef* source; uInt len; @@ -159,7 +160,7 @@ } while (--len != 0); } -int zmemcmp(s1, s2, len) +int ZLIB_INTERNAL zmemcmp(s1, s2, len) const Bytef* s1; const Bytef* s2; uInt len; @@ -172,7 +173,7 @@ return 0; } -void zmemzero(dest, len) +void ZLIB_INTERNAL zmemzero(dest, len) Bytef* dest; uInt len; { @@ -183,6 +184,7 @@ } #endif +#ifndef Z_SOLO #ifdef SYS16BIT @@ -215,7 +217,7 @@ * a protected system like OS/2. Use Microsoft C instead. */ -voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) +voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size) { voidpf buf = opaque; /* just to make some compilers happy */ ulg bsize = (ulg)items*size; @@ -239,7 +241,7 @@ return buf; } -void zcfree (voidpf opaque, voidpf ptr) +void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) { int n; if (*(ush*)&ptr != 0) { /* object < 64K */ @@ -274,13 +276,13 @@ # define _hfree hfree #endif -voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) +voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size) { if (opaque) opaque = 0; /* to make compiler happy */ return _halloc((long)items, size); } -void zcfree (voidpf opaque, voidpf ptr) +void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) { if (opaque) opaque = 0; /* to make compiler happy */ _hfree(ptr); @@ -299,7 +301,7 @@ extern void free OF((voidpf ptr)); #endif -voidpf zcalloc (opaque, items, size) +voidpf ZLIB_INTERNAL zcalloc (opaque, items, size) voidpf opaque; unsigned items; unsigned size; @@ -309,7 +311,7 @@ (voidpf)calloc(items, size); } -void zcfree (opaque, ptr) +void ZLIB_INTERNAL zcfree (opaque, ptr) voidpf opaque; voidpf ptr; { @@ -318,3 +320,5 @@ } #endif /* MY_ZCALLOC */ + +#endif /* !Z_SOLO */ diff -Nru nodejs-0.11.13/deps/zlib/zutil.h nodejs-0.11.15/deps/zlib/zutil.h --- nodejs-0.11.13/deps/zlib/zutil.h 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/deps/zlib/zutil.h 2015-01-20 21:22:17.000000000 +0000 @@ -1,5 +1,5 @@ /* zutil.h -- internal interface and configuration of the compression library - * Copyright (C) 1995-2005 Jean-loup Gailly. + * Copyright (C) 1995-2013 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -8,35 +8,29 @@ subject to change. Applications should only use zlib.h. */ -/* @(#) $Id: zutil.h,v 3.10 2005/08/04 19:14:14 tor%cs.brown.edu Exp $ */ +/* @(#) $Id$ */ #ifndef ZUTIL_H #define ZUTIL_H -#define ZLIB_INTERNAL +#ifdef HAVE_HIDDEN +# define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) +#else +# define ZLIB_INTERNAL +#endif + #include "zlib.h" -#ifdef STDC -# ifndef _WIN32_WCE +#if defined(STDC) && !defined(Z_SOLO) +# if !(defined(_WIN32_WCE) && defined(_MSC_VER)) # include # endif # include # include #endif -#ifdef NO_ERRNO_H -# ifdef _WIN32_WCE - /* The Microsoft C Run-Time Library for Windows CE doesn't have - * errno. We define it as a global variable to simplify porting. - * Its value is always 0 and should not be used. We rename it to - * avoid conflict with other libraries that use the same workaround. - */ -# define errno z_errno -# endif - extern int errno; -#else -# ifndef _WIN32_WCE -# include -# endif + +#ifdef Z_SOLO + typedef long ptrdiff_t; /* guess -- will be caught if guess is wrong */ #endif #ifndef local @@ -50,13 +44,13 @@ typedef ush FAR ushf; typedef unsigned long ulg; -extern const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ +extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ /* (size given to avoid silly warnings with Visual C++) */ #define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] #define ERR_RETURN(strm,err) \ - return (strm->msg = (char*)ERR_MSG(err), (err)) + return (strm->msg = ERR_MSG(err), (err)) /* To be used only when the state is known to be valid */ /* common constants */ @@ -88,16 +82,18 @@ #if defined(MSDOS) || (defined(WINDOWS) && !defined(WIN32)) # define OS_CODE 0x00 -# if defined(__TURBOC__) || defined(__BORLANDC__) -# if(__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) - /* Allow compilation with ANSI keywords only enabled */ - void _Cdecl farfree( void *block ); - void *_Cdecl farmalloc( unsigned long nbytes ); -# else -# include +# ifndef Z_SOLO +# if defined(__TURBOC__) || defined(__BORLANDC__) +# if (__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) + /* Allow compilation with ANSI keywords only enabled */ + void _Cdecl farfree( void *block ); + void *_Cdecl farmalloc( unsigned long nbytes ); +# else +# include +# endif +# else /* MSC or DJGPP */ +# include # endif -# else /* MSC or DJGPP */ -# include # endif #endif @@ -117,18 +113,20 @@ #ifdef OS2 # define OS_CODE 0x06 -# ifdef M_I86 - #include +# if defined(M_I86) && !defined(Z_SOLO) +# include # endif #endif #if defined(MACOS) || defined(TARGET_OS_MAC) # define OS_CODE 0x07 -# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os -# include /* for fdopen */ -# else -# ifndef fdopen -# define fdopen(fd,mode) NULL /* No fdopen() */ +# ifndef Z_SOLO +# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os +# include /* for fdopen */ +# else +# ifndef fdopen +# define fdopen(fd,mode) NULL /* No fdopen() */ +# endif # endif # endif #endif @@ -151,7 +149,7 @@ # define fdopen(fd,mode) NULL /* No fdopen() */ #endif -#if (defined(_MSC_VER) && (_MSC_VER > 600)) +#if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX # if defined(_WIN32_WCE) # define fdopen(fd,mode) NULL /* No fdopen() */ # ifndef _PTRDIFF_T_DEFINED @@ -163,6 +161,19 @@ # endif #endif +#if defined(__BORLANDC__) && !defined(MSDOS) + #pragma warn -8004 + #pragma warn -8008 + #pragma warn -8066 +#endif + +/* provide prototypes for these when building zlib without LFS */ +#if !defined(_WIN32) && \ + (!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0) + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +#endif + /* common defaults */ #ifndef OS_CODE @@ -175,40 +186,7 @@ /* functions */ -#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) -# ifndef HAVE_VSNPRINTF -# define HAVE_VSNPRINTF -# endif -#endif -#if defined(__CYGWIN__) -# ifndef HAVE_VSNPRINTF -# define HAVE_VSNPRINTF -# endif -#endif -#ifndef HAVE_VSNPRINTF -# ifdef MSDOS - /* vsnprintf may exist on some MS-DOS compilers (DJGPP?), - but for now we just assume it doesn't. */ -# define NO_vsnprintf -# endif -# ifdef __TURBOC__ -# define NO_vsnprintf -# endif -# ifdef WIN32 - /* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ -# if !defined(vsnprintf) && !defined(NO_vsnprintf) -# define vsnprintf _vsnprintf -# endif -# endif -# ifdef __SASC -# define NO_vsnprintf -# endif -#endif -#ifdef VMS -# define NO_vsnprintf -#endif - -#if defined(pyr) +#if defined(pyr) || defined(Z_SOLO) # define NO_MEMCPY #endif #if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__) @@ -232,16 +210,16 @@ # define zmemzero(dest, len) memset(dest, 0, len) # endif #else - extern void zmemcpy OF((Bytef* dest, const Bytef* source, uInt len)); - extern int zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len)); - extern void zmemzero OF((Bytef* dest, uInt len)); + void ZLIB_INTERNAL zmemcpy OF((Bytef* dest, const Bytef* source, uInt len)); + int ZLIB_INTERNAL zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len)); + void ZLIB_INTERNAL zmemzero OF((Bytef* dest, uInt len)); #endif /* Diagnostic functions */ #ifdef DEBUG # include - extern int z_verbose; - extern void z_error OF((char *m)); + extern int ZLIB_INTERNAL z_verbose; + extern void ZLIB_INTERNAL z_error OF((char *m)); # define Assert(cond,msg) {if(!(cond)) z_error(msg);} # define Trace(x) {if (z_verbose>=0) fprintf x ;} # define Tracev(x) {if (z_verbose>0) fprintf x ;} @@ -257,13 +235,19 @@ # define Tracecv(c,x) #endif - -voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); -void zcfree OF((voidpf opaque, voidpf ptr)); +#ifndef Z_SOLO + voidpf ZLIB_INTERNAL zcalloc OF((voidpf opaque, unsigned items, + unsigned size)); + void ZLIB_INTERNAL zcfree OF((voidpf opaque, voidpf ptr)); +#endif #define ZALLOC(strm, items, size) \ (*((strm)->zalloc))((strm)->opaque, (items), (size)) #define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) #define TRY_FREE(s, p) {if (p) ZFREE(s, p);} +/* Reverse the bytes in a 32-bit value */ +#define ZSWAP32(q) ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ + (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) + #endif /* ZUTIL_H */ diff -Nru nodejs-0.11.13/doc/api/addons.html nodejs-0.11.15/doc/api/addons.html --- nodejs-0.11.13/doc/api/addons.html 2014-05-02 01:18:34.000000000 +0000 +++ nodejs-0.11.15/doc/api/addons.html 2015-01-20 22:11:38.000000000 +0000 @@ -2,36 +2,41 @@ - Addons Node.js v0.11.13 Manual & Documentation + Addons Node.js v0.11.15 Manual & Documentation + + - -
- - - + + +
- - +
-

Node.js v0.11.13 Manual & Documentation

+

Node.js v0.11.15 Manual & Documentation

Index | @@ -767,26 +772,48 @@

+
+ +
- - + + + - -
- - - + + +
- - +
-

Node.js v0.11.13 Manual & Documentation

+

Node.js v0.11.15 Manual & Documentation

Index | @@ -73,25 +78,25 @@

  • console
  • Timers
  • @@ -166,7 +171,7 @@
  • process.version
  • process.versions
  • process.config
  • -
  • process.kill(pid, [signal])
  • +
  • process.kill(pid[, signal])
  • process.pid
  • process.title
  • process.arch
  • @@ -176,13 +181,14 @@
  • process.umask([mask])
  • process.uptime()
  • process.hrtime()
  • +
  • process.mainModule
  • util
  • Crypto
  • The module.exports object is created by the Module system. Sometimes this is not -acceptable; many want their module to be an instance of some class. To do this +acceptable; many want their module to be an instance of some class. To do this, assign the desired export object to module.exports. Note that assigning the desired object to exports will simply rebind the local exports variable, which is probably not what you want to do. @@ -1957,7 +2033,8 @@ LOAD_AS_FILE(X) 1. If X is a file, load X as JavaScript text. STOP 2. If X.js is a file, load X.js as JavaScript text. STOP -3. If X.node is a file, load X.node as binary addon. STOP +3. If X.json is a file, parse X.json to a JavaScript Object. STOP +4. If X.node is a file, load X.node as binary addon. STOP LOAD_AS_DIRECTORY(X) 1. If X/package.json is a file, @@ -1965,7 +2042,8 @@ b. let M = X + (json main field) c. LOAD_AS_FILE(M) 2. If X/index.js is a file, load X/index.js as JavaScript text. STOP -3. If X/index.node is a file, load X/index.node as binary addon. STOP +3. If X/index.json is a file, parse X/index.json to a JavaScript object. STOP +4. If X/index.node is a file, load X/index.node as binary addon. STOP LOAD_NODE_MODULES(X, START) 1. let DIRS=NODE_MODULES_PATHS(START) @@ -1975,15 +2053,14 @@ NODE_MODULES_PATHS(START) 1. let PARTS = path split(START) -2. let ROOT = index of first instance of "node_modules" in PARTS, or 0 -3. let I = count of PARTS - 1 -4. let DIRS = [] -5. while I > ROOT, +2. let I = count of PARTS - 1 +3. let DIRS = [] +4. while I >= 0, a. if PARTS[I] = "node_modules" CONTINUE c. DIR = path join(PARTS[0 .. I] + "node_modules") b. DIRS = DIRS + DIR c. let I = I - 1 -6. return DIRS +5. return DIRS

    Loading from the global folders#

    @@ -2973,7 +3050,7 @@

    process.stdout#

    -

    A Writable Stream to stdout. +

    A Writable Stream to stdout (on fd 1).

    Example: the definition of console.log @@ -2983,7 +3060,8 @@ process.stdout.write(d + '\n'); };

    process.stderr and process.stdout are unlike other streams in Node in -that writes to them are usually blocking. +that they cannot be closed (end() will throw), they never emit the finish +event and that writes are usually blocking.

      @@ -3012,11 +3090,12 @@

      process.stderr#

      -

      A writable stream to stderr. +

      A writable stream to stderr (on fd 2).

      process.stderr and process.stdout are unlike other streams in Node in -that writes to them are usually blocking. +that they cannot be closed (end() will throw), they never emit the finish +event and that writes are usually blocking.

        @@ -3029,7 +3108,7 @@

      process.stdin#

      -

      A Readable Stream for stdin. +

      A Readable Stream for stdin (on fd 0).

      Example of opening standard input and listening for both events: @@ -3135,8 +3214,30 @@

      process.env#

      An object containing the user environment. See environ(7). +

      +

      An example of this object looks like: + +

      +
      { TERM: 'xterm-256color',
      +  SHELL: '/usr/local/bin/bash',
      +  USER: 'maciej',
      +  PATH: '~/.bin/:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin',
      +  PWD: '/Users/maciej',
      +  EDITOR: 'vim',
      +  SHLVL: '1',
      +  HOME: '/Users/maciej',
      +  LOGNAME: 'maciej',
      +  _: '/usr/local/bin/node' }
      +

      You can write to this object, but changes won't be reflected outside of your +process. That means that the following won't work: + +

      +
      node -e 'process.env.foo = "bar"' && echo $foo
      +

      But this will:

      +
      process.env.foo = 'bar';
      +console.log(process.env.foo);

      process.exit([code])#

      Ends the process with the specified code. If omitted, exit uses the 'success' code 0. @@ -3320,7 +3421,7 @@ strict_aliasing: 'true', target_arch: 'x64', v8_use_snapshot: 'true' } } -

      process.kill(pid, [signal])#

      +

      process.kill(pid[, signal])#

      Send a signal to a process. pid is the process id and signal is the string describing the signal to send. Signal names are strings like 'SIGINT' or 'SIGHUP'. If omitted, the signal will be 'SIGTERM'. @@ -3490,7 +3591,7 @@ given, otherwise returns the current mask.

      -
      var oldmask, newmask = 0644;
      +
      var oldmask, newmask = 0022;
       
       oldmask = process.umask(newmask);
       console.log('Changed umask from: ' + oldmask.toString(8) +
      @@ -3521,6 +3622,18 @@
         console.log('benchmark took %d nanoseconds', diff[0] * 1e9 + diff[1]);
         // benchmark took 1000000527 nanoseconds
       }, 1000);
      +

      process.mainModule#

      +

      Alternate way to retrieve +require.main. +The difference is that if the main module changes at runtime, require.main +might still refer to the original main module in modules that were required +before the change occurred. Generally it's safe to assume that the two refer +to the same module. + +

      +

      As with require.main, it will be undefined if there was no entry script. + +

      util#

      Stability: 4 - API Frozen

      These functions are in the module 'util'. Use require('util') to access them. @@ -3566,7 +3679,7 @@ comma. For example, NODE_DEBUG=fs,net,tls.

      -

      util.format(format, [...])#

      +

      util.format(format[, ...])#

      Returns a formatted string using the first argument as a printf-like format.

      @@ -3604,7 +3717,7 @@

      require('util').log('Timestamped message.');
      -

      util.inspect(object, [options])#

      +

      util.inspect(object[, options])#

      Return a string representation of object, which is useful for debugging.

      @@ -3776,6 +3889,21 @@ console.log('Received data: "' + data + '"'); }) stream.write("It works!"); // Received data: "It works!"
      +

      util.deprecate(function, string)#

      +

      Marks that a method should not be used any more. + +

      +
      exports.puts = exports.deprecate(function() {
      +  for (var i = 0, len = arguments.length; i < len; ++i) {
      +    process.stdout.write(arguments[i] + '\n');
      +  }
      +}, 'util.puts: Use console.log instead')
      +

      It returns a modified function which warns once by default. If +--no-deprecation is set then this function is a NO-OP. If +--throw-deprecation is set then the application will throw an exception +if the deprecated API is used. + +

      util.debug(string)#

      Stability: 0 - Deprecated: use console.error() instead.

      Deprecated predecessor of console.error. @@ -3793,7 +3921,7 @@

      -

      util.pump(readableStream, writableStream, [callback])#

      +

      util.pump(readableStream, writableStream[, callback])#

      Stability: 0 - Deprecated: Use readableStream.pipe(writableStream)

      Deprecated predecessor of stream.pipe().

      @@ -3833,7 +3961,10 @@

      emitter.addListener(event, listener)#

      emitter.on(event, listener)#

      -

      Adds a listener to the end of the listeners array for the specified event. +

      Adds a listener to the end of the listeners array for the specified event. +No checks are made to see if the listener has already been added. Multiple +calls passing the same combination of event and listener will result in the +listener being added multiple times.

      server.on('connection', function (stream) {
      @@ -3865,6 +3996,12 @@
       server.on('connection', callback);
       // ...
       server.removeListener('connection', callback);
      +

      removeListener will remove, at most, one instance of a listener from the +listener array. If any single listener has been added multiple times to the +listener array for the specified event, then removeListener must be called +multiple times to remove each instance. + +

      Returns emitter, so calls can be chained.

      @@ -3906,7 +4043,7 @@ console.log('someone connected!'); }); console.log(util.inspect(server.listeners('connection'))); // [ [Function] ] -

      emitter.emit(event, [arg1], [arg2], [...])#

      +

      emitter.emit(event[, arg1][, arg2][, ...])#

      Execute each of the listeners in order with the supplied arguments.

      @@ -3924,8 +4061,8 @@
    • event String The event name
    • listener Function The event handler function
    -

    This event is emitted any time someone adds a new listener. It is unspecified -if listener is in the list returned by emitter.listeners(event). +

    This event is emitted any time a listener is added. When this event is triggered, +the listener may not yet have been added to the array of listeners for the event.

    @@ -3934,8 +4071,8 @@
  • event String The event name
  • listener Function The event handler function
  • -

    This event is emitted any time someone removes a listener. It is unspecified -if listener is in the list returned by emitter.listeners(event). +

    This event is emitted any time someone removes a listener. When this event is triggered, +the listener may not yet have been removed from the array of listeners for the event.

    Domain#

    @@ -4422,6 +4559,27 @@
  • 'hex' - Encode each byte as two hexadecimal characters.

  • +

    Creating a typed array from a Buffer works with the following caveats: + +

    +
      +
    1. The buffer's memory is copied, not shared.

      +
    2. +
    3. The buffer's memory is interpreted as an array, not a byte array. That is, +new Uint32Array(new Buffer([1,2,3,4])) creates a 4-element Uint32Array +with elements [1,2,3,4], not an Uint32Array with a single element +[0x1020304] or [0x4030201].

      +
    4. +
    +

    NOTE: Node.js v0.8 simply retained a reference to the buffer in array.buffer +instead of cloning it. + +

    +

    While more efficient, it introduces subtle incompatibilities with the typed +arrays specification. ArrayBuffer#slice() makes a copy of the slice while +Buffer#slice() creates a view. + +

    Class: Buffer#

    The Buffer class is a global type for dealing with binary data directly. It can be constructed in a variety of ways. @@ -4431,7 +4589,9 @@

    • size Number
    -

    Allocates a new buffer of size octets. +

    Allocates a new buffer of size octets. Note, size must be no more than +kMaxLength. Otherwise, a RangeError +will be thrown here.

    new Buffer(array)#

    @@ -4441,7 +4601,14 @@

    Allocates a new buffer using an array of octets.

    -

    new Buffer(str, [encoding])#

    +

    new Buffer(buffer)#

    +
      +
    • buffer Buffer
    • +
    +

    Copies the passed buffer data onto a new Buffer instance. + +

    +

    new Buffer(str[, encoding])#

    • str String - string to encode.
    • encoding String - encoding to use, Optional.
    • @@ -4466,7 +4633,7 @@

      Tests if obj is a Buffer.

      -

      Class Method: Buffer.byteLength(string, [encoding])#

      +

      Class Method: Buffer.byteLength(string[, encoding])#

      • string String
      • encoding String, Optional, Default: 'utf8'
      • @@ -4486,7 +4653,7 @@ Buffer.byteLength(str, 'utf8') + " bytes"); // ½ + ¼ = ¾: 9 characters, 12 bytes -

        Class Method: Buffer.concat(list, [totalLength])#

        +

        Class Method: Buffer.concat(list[, totalLength])#

        • list Array List of Buffer objects to concat
        • totalLength Number Total length of the buffers when concatenated
        • @@ -4539,7 +4706,18 @@ // 1234 // 1234 -

          buf.write(string, [offset], [length], [encoding])#

          +

          While the length property is not immutable, changing the value of length +can result in undefined and inconsistent behavior. Applications that wish to +modify the length of a buffer should therefore treat length as read-only and +use buf.slice to create a new buffer. + +

          +
          buf = new Buffer(10);
          +buf.write("abcdefghj", 0, "ascii");
          +console.log(buf.length); // 10
          +buf = buf.slice(0,5);
          +console.log(buf.length); // 5
          +

          buf.write(string[, offset][, length][, encoding])#

          • string String - data to be written to buffer
          • offset Number, Optional, Default: 0
          • @@ -4557,17 +4735,71 @@
            buf = new Buffer(256);
             len = buf.write('\u00bd + \u00bc = \u00be', 0);
             console.log(len + " bytes: " + buf.toString('utf8', 0, len));
            -

            buf.toString([encoding], [start], [end])#

            +

            buf.writeUIntLE(value, offset, byteLength[, noAssert])#

            +

            buf.writeUIntBE(value, offset, byteLength[, noAssert])#

            +

            buf.writeIntLE(value, offset, byteLength[, noAssert])#

            +

            buf.writeIntBE(value, offset, byteLength[, noAssert])#

            +
              +
            • value {Number} Bytes to be written to buffer
            • +
            • offset {Number} 0 <= offset <= buf.length
            • +
            • byteLength {Number} 0 < byteLength <= 6
            • +
            • noAssert {Boolean} Default: false
            • +
            • Return: {Number}
            • +
            +

            Writes value to the buffer at the specified offset and byteLength. +Supports up to 48 bits of accuracy. For example: + +

            +
            var b = new Buffer(6);
            +b.writeUIntBE(0x1234567890ab, 0, 6);
            +// <Buffer 12 34 56 78 90 ab>
            +

            Set noAssert to true to skip validation of value and offset. Defaults +to false. + +

            +

            buf.readUIntLE(offset, byteLength[, noAssert])#

            +

            buf.readUIntBE(offset, byteLength[, noAssert])#

            +

            buf.readIntLE(offset, byteLength[, noAssert])#

            +

            buf.readIntBE(offset, byteLength[, noAssert])#

            +
              +
            • offset {Number} 0 <= offset <= buf.length
            • +
            • byteLength {Number} 0 < byteLength <= 6
            • +
            • noAssert {Boolean} Default: false
            • +
            • Return: {Number}
            • +
            +

            A generalized version of all numeric read methods. Supports up to 48 bits of +accuracy. For example: + +

            +
            var b = new Buffer(6);
            +b.writeUint16LE(0x90ab, 0);
            +b.writeUInt32LE(0x12345678, 2);
            +b.readUIntLE(0, 6).toString(16);  // Specify 6 bytes (48 bits)
            +// output: '1234567890ab'
            +

            Set noAssert to true to skip validation of offset. This means that offset +may be beyond the end of the buffer. Defaults to false. + +

            +

            buf.toString([encoding][, start][, end])#

            • encoding String, Optional, Default: 'utf8'
            • start Number, Optional, Default: 0
            • end Number, Optional, Default: buffer.length
          -

          Decodes and returns a string from buffer data encoded with encoding -(defaults to 'utf8') beginning at start (defaults to 0) and ending at -end (defaults to buffer.length). +

          Decodes and returns a string from buffer data encoded using the specified +character set encoding. If encoding is undefined or null, then encoding +defaults to 'utf8'. The start and end parameters default to 0 and +buffer.length when undefined`.

          +
          buf = new Buffer(26);
          +for (var i = 0 ; i < 26 ; i++) {
          +  buf[i] = i + 97; // 97 is ASCII a
          +}
          +buf.toString('ascii'); // outputs: abcdefghijklmnopqrstuvwxyz
          +buf.toString('ascii',0,5); // outputs: abcde
          +buf.toString('utf8',0,5); // outputs: abcde
          +buf.toString(undefined,0,5); // encoding defaults to 'utf8', outputs abcde

          See buffer.write() example, above. @@ -4630,22 +4862,18 @@

          Returns a number indicating whether this comes before or after or is the same as the otherBuffer in sort order. -

          -

          buf.copy(targetBuffer, [targetStart], [sourceStart], [sourceEnd])#

          +

          buf.copy(targetBuffer[, targetStart][, sourceStart][, sourceEnd])#

          • targetBuffer Buffer object - Buffer to copy into
          • targetStart Number, Optional, Default: 0
          • sourceStart Number, Optional, Default: 0
          • sourceEnd Number, Optional, Default: buffer.length
        -

        Does copy between buffers. The source and target regions can be overlapped. -targetStart and sourceStart default to 0. -sourceEnd defaults to buffer.length. - -

        -

        All values passed that are undefined/NaN or are out of bounds are set equal -to their respective defaults. +

        Copies data from a region of this buffer to a region in the target buffer even +if the target memory region overlaps with the source. If undefined the +targetStart and sourceStart parameters default to 0 while sourceEnd +defaults to buffer.length.

        Example: build two Buffers, then copy buf1 from byte 16 through byte 19 @@ -4664,7 +4892,21 @@ console.log(buf2.toString('ascii', 0, 25)); // !!!!!!!!qrst!!!!!!!!!!!!! -

        buf.slice([start], [end])#

        +

        Example: Build a single buffer, then copy data from one region to an overlapping +region in the same buffer + +

        +
        buf = new Buffer(26);
        +
        +for (var i = 0 ; i < 26 ; i++) {
        +  buf[i] = i + 97; // 97 is ASCII a
        +}
        +
        +buf.copy(buf, 0, 4, 10);
        +console.log(buf.toString());
        +
        +// efghijghijklmnopqrstuvwxyz
        +

        buf.slice([start][, end])#

        • start Number, Optional, Default: 0
        • end Number, Optional, Default: buffer.length
        • @@ -4694,7 +4936,7 @@ // abc // !bc -

          buf.readUInt8(offset, [noAssert])#

          +

          buf.readUInt8(offset[, noAssert])#

          • offset Number
          • noAssert Boolean, Optional, Default: false
          • @@ -4725,8 +4967,8 @@ // 0x4 // 0x23 // 0x42 -

            buf.readUInt16LE(offset, [noAssert])#

            -

            buf.readUInt16BE(offset, [noAssert])#

            +

            buf.readUInt16LE(offset[, noAssert])#

            +

            buf.readUInt16BE(offset[, noAssert])#

            • offset Number
            • noAssert Boolean, Optional, Default: false
            • @@ -4763,8 +5005,8 @@ // 0x2304 // 0x2342 // 0x4223 -

              buf.readUInt32LE(offset, [noAssert])#

              -

              buf.readUInt32BE(offset, [noAssert])#

              +

              buf.readUInt32LE(offset[, noAssert])#

              +

              buf.readUInt32BE(offset[, noAssert])#

              • offset Number
              • noAssert Boolean, Optional, Default: false
              • @@ -4793,7 +5035,7 @@ // 0x03042342 // 0x42230403 -

                buf.readInt8(offset, [noAssert])#

                +

                buf.readInt8(offset[, noAssert])#

                • offset Number
                • noAssert Boolean, Optional, Default: false
                • @@ -4810,8 +5052,8 @@ complement signed values.

                  -

                  buf.readInt16LE(offset, [noAssert])#

                  -

                  buf.readInt16BE(offset, [noAssert])#

                  +

                  buf.readInt16LE(offset[, noAssert])#

                  +

                  buf.readInt16BE(offset[, noAssert])#

                  • offset Number
                  • noAssert Boolean, Optional, Default: false
                  • @@ -4829,8 +5071,8 @@ complement signed values.

                    -

                    buf.readInt32LE(offset, [noAssert])#

                    -

                    buf.readInt32BE(offset, [noAssert])#

                    +

                    buf.readInt32LE(offset[, noAssert])#

                    +

                    buf.readInt32BE(offset[, noAssert])#

                    • offset Number
                    • noAssert Boolean, Optional, Default: false
                    • @@ -4848,8 +5090,8 @@ complement signed values.

                      -

                      buf.readFloatLE(offset, [noAssert])#

                      -

                      buf.readFloatBE(offset, [noAssert])#

                      +

                      buf.readFloatLE(offset[, noAssert])#

                      +

                      buf.readFloatBE(offset[, noAssert])#

                      • offset Number
                      • noAssert Boolean, Optional, Default: false
                      • @@ -4876,8 +5118,8 @@ console.log(buf.readFloatLE(0)); // 0x01 -

                        buf.readDoubleLE(offset, [noAssert])#

                        -

                        buf.readDoubleBE(offset, [noAssert])#

                        +

                        buf.readDoubleLE(offset[, noAssert])#

                        +

                        buf.readDoubleBE(offset[, noAssert])#

                        • offset Number
                        • noAssert Boolean, Optional, Default: false
                        • @@ -4908,7 +5150,7 @@ console.log(buf.readDoubleLE(0)); // 0.3333333333333333 -

                          buf.writeUInt8(value, offset, [noAssert])#

                          +

                          buf.writeUInt8(value, offset[, noAssert])#

                          • value Number
                          • offset Number
                          • @@ -4936,8 +5178,8 @@ console.log(buf); // <Buffer 03 04 23 42> -

                            buf.writeUInt16LE(value, offset, [noAssert])#

                            -

                            buf.writeUInt16BE(value, offset, [noAssert])#

                            +

                            buf.writeUInt16LE(value, offset[, noAssert])#

                            +

                            buf.writeUInt16BE(value, offset[, noAssert])#

                            • value Number
                            • offset Number
                            • @@ -4969,8 +5211,8 @@ // <Buffer de ad be ef> // <Buffer ad de ef be> -

                              buf.writeUInt32LE(value, offset, [noAssert])#

                              -

                              buf.writeUInt32BE(value, offset, [noAssert])#

                              +

                              buf.writeUInt32LE(value, offset[, noAssert])#

                              +

                              buf.writeUInt32BE(value, offset[, noAssert])#

                              • value Number
                              • offset Number
                              • @@ -5000,7 +5242,7 @@ // <Buffer fe ed fa ce> // <Buffer ce fa ed fe> -

                                buf.writeInt8(value, offset, [noAssert])#

                                +

                                buf.writeInt8(value, offset[, noAssert])#

                                • value Number
                                • offset Number
                                • @@ -5020,8 +5262,8 @@ signed integer into buffer.

                                  -

                                  buf.writeInt16LE(value, offset, [noAssert])#

                                  -

                                  buf.writeInt16BE(value, offset, [noAssert])#

                                  +

                                  buf.writeInt16LE(value, offset[, noAssert])#

                                  +

                                  buf.writeInt16BE(value, offset[, noAssert])#

                                  • value Number
                                  • offset Number
                                  • @@ -5041,8 +5283,8 @@ complement signed integer into buffer.

                                    -

                                    buf.writeInt32LE(value, offset, [noAssert])#

                                    -

                                    buf.writeInt32BE(value, offset, [noAssert])#

                                    +

                                    buf.writeInt32LE(value, offset[, noAssert])#

                                    +

                                    buf.writeInt32BE(value, offset[, noAssert])#

                                    • value Number
                                    • offset Number
                                    • @@ -5062,8 +5304,8 @@ complement signed integer into buffer.

                                      -

                                      buf.writeFloatLE(value, offset, [noAssert])#

                                      -

                                      buf.writeFloatBE(value, offset, [noAssert])#

                                      +

                                      buf.writeFloatLE(value, offset[, noAssert])#

                                      +

                                      buf.writeFloatBE(value, offset[, noAssert])#

                                      • value Number
                                      • offset Number
                                      • @@ -5093,8 +5335,8 @@ // <Buffer 4f 4a fe bb> // <Buffer bb fe 4a 4f> -

                                        buf.writeDoubleLE(value, offset, [noAssert])#

                                        -

                                        buf.writeDoubleBE(value, offset, [noAssert])#

                                        +

                                        buf.writeDoubleLE(value, offset[, noAssert])#

                                        +

                                        buf.writeDoubleBE(value, offset[, noAssert])#

                                        • value Number
                                        • offset Number
                                        • @@ -5124,7 +5366,7 @@ // <Buffer 43 eb d5 b7 dd f9 5f d7> // <Buffer d7 5f f9 dd b7 d5 eb 43> -

                                          buf.fill(value, [offset], [end])#

                                          +

                                          buf.fill(value[, offset][, end])#

                                          • value
                                          • offset Number, Optional
                                          • @@ -5137,10 +5379,6 @@

                                            var b = new Buffer(50);
                                             b.fill("h");
                                            -

                                            buf.toArrayBuffer()#

                                            -

                                            Creates a new ArrayBuffer with the copied memory of the buffer instance. - -

                                            buffer.INSPECT_MAX_BYTES#

                                            • Number, Default: 50
                                            • @@ -5255,7 +5493,7 @@ // Readable streams emit 'data' events once a listener is added req.on('data', function (chunk) { body += chunk; - }) + }); // the end event tells you that you have entire body req.on('end', function () { @@ -5270,8 +5508,8 @@ // write back something interesting to the user: res.write(typeof data); res.end(); - }) -}) + }); +}); server.listen(1337); @@ -5356,7 +5594,7 @@
                                              var readable = getReadableStreamSomehow();
                                               readable.on('readable', function() {
                                                 // there is some data to read now
                                              -})
                                              +});

                                              Once the internal buffer is drained, a readable event will fire again when more data is available. @@ -5377,9 +5615,9 @@

                                              var readable = getReadableStreamSomehow();
                                               readable.on('data', function(chunk) {
                                                 console.log('got %d bytes of data', chunk.length);
                                              -})
                                              +});

                                              Event: 'end'#

                                              -

                                              This event fires when no more data will be provided. +

                                              This event fires when there will be no more data to read.

                                              Note that the end event will not fire unless the data is @@ -5390,7 +5628,7 @@

                                              var readable = getReadableStreamSomehow();
                                               readable.on('data', function(chunk) {
                                                 console.log('got %d bytes of data', chunk.length);
                                              -})
                                              +});
                                               readable.on('end', function() {
                                                 console.log('there will be no more data.');
                                               });
                                              @@ -5400,6 +5638,9 @@

                                              Event: 'error'#

                                              +
                                                +
                                              • Error Object
                                              • +

                                            Emitted if there was an error receiving data.

                                            @@ -5461,7 +5702,7 @@ readable.on('data', function(chunk) { assert.equal(typeof chunk, 'string'); console.log('got %d characters of string data', chunk.length); -}) +});

                                            readable.resume()#

                                            • Return: this
                                            • @@ -5480,7 +5721,7 @@ readable.resume(); readable.on('end', function(chunk) { console.log('got to the end, but did not read anything'); -}) +});

                                              readable.pause()#

                                              • Return: this
                                              • @@ -5499,8 +5740,24 @@ console.log('now data will start flowing again'); readable.resume(); }, 1000); -}) -

                                                readable.pipe(destination, [options])#

                                                +}); +

                                                readable.isPaused()#

                                                +
                                                  +
                                                • Return: Boolean
                                                • +
                                              +

                                              This method returns whether or not the readable has been explicitly +paused by client code (using readable.pause() without a corresponding +readable.resume()). + +

                                              +
                                              var readable = new stream.Readable
                                              +
                                              +readable.isPaused() // === false
                                              +readable.pause()
                                              +readable.isPaused() // === true
                                              +readable.resume()
                                              +readable.isPaused() // === false
                                              +

                                              readable.pipe(destination[, options])#

                                              • destination Writable Stream The destination for writing data
                                              • options Object Pipe options -

                                                writable.write(chunk, [encoding], [callback])#

                                                +

                                                writable.write(chunk[, encoding][, callback])#

                                                • chunk String | Buffer The data to write
                                                • encoding String The encoding, if chunk is a String
                                                • @@ -5735,7 +5992,16 @@

                                                  Flush all data, buffered since .cork() call.

                                                  -

                                                  writable.end([chunk], [encoding], [callback])#

                                                  +

                                                  writable.setDefaultEncoding(encoding)#

                                                  +
                                                    +
                                                  • encoding String The new default encoding
                                                  • +
                                                  • Return: Boolean
                                                  • +
                                                +

                                                Sets the default encoding for a writable stream. Returns true if the encoding +is valid and is set. Otherwise returns false. + +

                                                +

                                                writable.end([chunk][, encoding][, callback])#

                                                • chunk String | Buffer Optional data to write
                                                • encoding String The encoding, if chunk is a String
                                                • @@ -5749,11 +6015,10 @@

                                                  // write 'hello, ' and then end with 'world!'
                                                  -http.createServer(function (req, res) {
                                                  -  res.write('hello, ');
                                                  -  res.end('world!');
                                                  -  // writing more now is not allowed!
                                                  -});
                                                  +var file = fs.createWriteStream('example.txt'); +file.write('hello, '); +file.end('world!'); +// writing more now is not allowed!

                                                  Event: 'finish'#

                                                  When the end() method has been called, and all data has been flushed to the underlying system, this event is emitted. @@ -5799,6 +6064,9 @@ reader.pipe(writer); reader.unpipe(writer);

                                                  Event: 'error'#

                                                  +
                                                    +
                                                  • Error object
                                                  • +

                                                Emitted if there was an error when writing or piping data.

                                                @@ -6113,7 +6381,7 @@ size bytes are available before calling stream.push(chunk).

                                                -

                                                readable.push(chunk, [encoding])#

                                                +

                                                readable.push(chunk[, encoding])#

                                                • chunk Buffer | null | String Chunk of data to push into the read queue
                                                • encoding String Encoding of String chunks. Must be a valid @@ -6163,7 +6431,7 @@ self._source.readStop(); }; - // When the source ends, we push the EOF-signalling `null` chunk + // When the source ends, we push the EOF-signaling `null` chunk this._source.onend = function() { self.push(null); }; @@ -6338,7 +6606,7 @@
                                                • encoding String If the chunk is a string, then this is the encoding type. (Ignore if decodeStrings chunk is a buffer.)
                                                • callback Function Call this function (optionally with an error -argument) when you are done processing the supplied chunk.
                                                • +argument and data) when you are done processing the supplied chunk.

                                              Note: This function MUST NOT be called directly. It should be implemented by child classes, and called by the internal Transform @@ -6362,9 +6630,19 @@

                                              Call the callback function only when the current chunk is completely consumed. Note that there may or may not be output as a result of any -particular input chunk. +particular input chunk. If you supply as the second argument to the +it will be passed to push method, in other words the following are +equivalent:

                                              +
                                              transform.prototype._transform = function (data, encoding, callback) {
                                              +  this.push(data);
                                              +  callback();
                                              +}
                                              +
                                              +transform.prototype._transform = function (data, encoding, callback) {
                                              +  callback(null, data);
                                              +}

                                              This method is prefixed with an underscore because it is internal to the class that defines it, and should not be called directly by user programs. However, you are expected to override this method in @@ -6402,6 +6680,14 @@ your own extension classes.

                                              +

                                              Events: 'finish' and 'end'#

                                              +

                                              The finish and end events are from the parent Writable +and Readable classes respectively. The finish event is fired after +.end() is called and all chunks have been processed by _transform, +end is fired after all data has been output which is after the callback +in _flush has been called. + +

                                              Example: SimpleProtocol parser v2#

                                              The example above of a simple protocol parser can be implemented simply by using the higher level Transform stream class, similar to @@ -6662,7 +6948,7 @@

                                              For Duplex streams objectMode can be set exclusively for readable or writable side with readableObjectMode and writableObjectMode -respectivly. These options can be used to implement parsers and +respectively. These options can be used to implement parsers and serializers with Transform streams.

                                              @@ -6731,7 +7017,7 @@

                                              -

                                              crypto.setEngine(engine, [flags])#

                                              +

                                              crypto.setEngine(engine[, flags])#

                                              Load and set engine for some/all OpenSSL functions (selected by flags).

                                              @@ -6776,7 +7062,7 @@
                                              var hashes = crypto.getHashes();
                                               console.log(hashes); // ['sha', 'sha1', 'sha1WithRSAEncryption', ...]

                                              crypto.createCredentials(details)#

                                              -

                                              Creates a credentials object, with the optional details being a +

                                              Stability: 0 - Deprecated. Use tls.createSecureContext instead.

                                              Creates a credentials object, with the optional details being a dictionary with keys:

                                              @@ -6845,7 +7131,7 @@

                                              Returned by crypto.createHash.

                                              -

                                              hash.update(data, [input_encoding])#

                                              +

                                              hash.update(data[, input_encoding])#

                                              Updates the hash content with the given data, the encoding of which is given in input_encoding and can be 'utf8', 'ascii' or 'binary'. If no encoding is provided and the input is a string an @@ -6918,8 +7204,20 @@

                                              It is a stream that is both readable and writable. The written data is used to compute the hash. Once the writable side of -the stream is ended, use the read() method to get the computed hash -digest. The legacy update and digest methods are also supported. +the stream is ended, use the read() method to get the enciphered +contents. The legacy update and final methods are also supported. + +

                                              +

                                              Note: createCipher derives keys with the OpenSSL function EVP_BytesToKey +with the digest algorithm set to MD5, one iteration, and no salt. The lack of +salt allows dictionary attacks as the same password always creates the same key. +The low iteration count and non-cryptographically secure hash algorithm allow +passwords to be tested very rapidly. + +

                                              +

                                              In line with OpenSSL's recommendation to use pbkdf2 instead of EVP_BytesToKey it +is recommended you derive a key and iv yourself with crypto.pbkdf2 and to +then use createCipheriv() to create the cipher stream.

                                              crypto.createCipheriv(algorithm, key, iv)#

                                              @@ -6949,7 +7247,7 @@ methods are also supported.

                                              -

                                              cipher.update(data, [input_encoding], [output_encoding])#

                                              +

                                              cipher.update(data[, input_encoding][, output_encoding])#

                                              Updates the cipher with data, the encoding of which is given in input_encoding and can be 'utf8', 'ascii' or 'binary'. If no encoding is provided, then a buffer is expected. @@ -7020,7 +7318,7 @@ final methods are also supported.

                                              -

                                              decipher.update(data, [input_encoding], [output_encoding])#

                                              +

                                              decipher.update(data[, input_encoding][, output_encoding])#

                                              Updates the decipher with data, which is encoded in 'binary', 'base64' or 'hex'. If no encoding is provided, then a buffer is expected. @@ -7089,7 +7387,7 @@ with new data as it is streamed.

                                              -

                                              sign.sign(private_key, [output_format])#

                                              +

                                              sign.sign(private_key[, output_format])#

                                              Calculates the signature on all the updated data passed through the sign. @@ -7138,7 +7436,7 @@ with new data as it is streamed.

                                              -

                                              verifier.verify(object, signature, [signature_format])#

                                              +

                                              verifier.verify(object, signature[, signature_format])#

                                              Verifies the signed data by using the object and signature. object is a string containing a PEM encoded object, which can be one of RSA public key, DSA public key, or X.509 certificate. @@ -7155,13 +7453,13 @@ called.

                                              -

                                              crypto.createDiffieHellman(prime_length, [generator])#

                                              +

                                              crypto.createDiffieHellman(prime_length[, generator])#

                                              Creates a Diffie-Hellman key exchange object and generates a prime of prime_length bits and using an optional specific numeric generator. If no generator is specified, then 2 is used.

                                              -

                                              crypto.createDiffieHellman(prime, [prime_encoding], [generator], [generator_encoding])#

                                              +

                                              crypto.createDiffieHellman(prime[, prime_encoding][, generator][, generator_encoding])#

                                              Creates a Diffie-Hellman key exchange object using the supplied prime and an optional specific generator. generator can be a number, string, or Buffer. @@ -7197,7 +7495,7 @@ or 'base64'. If no encoding is provided, then a buffer is returned.

                                              -

                                              diffieHellman.computeSecret(other_public_key, [input_encoding], [output_encoding])#

                                              +

                                              diffieHellman.computeSecret(other_public_key[, input_encoding][, output_encoding])#

                                              Computes the shared secret using other_public_key as the other party's public key and returns the computed shared secret. Supplied key is interpreted using specified input_encoding, and secret is @@ -7233,13 +7531,13 @@ provided, then a buffer is returned.

                                              -

                                              diffieHellman.setPublicKey(public_key, [encoding])#

                                              +

                                              diffieHellman.setPublicKey(public_key[, encoding])#

                                              Sets the Diffie-Hellman public key. Key encoding can be 'binary', 'hex' or 'base64'. If no encoding is provided, then a buffer is expected.

                                              -

                                              diffieHellman.setPrivateKey(private_key, [encoding])#

                                              +

                                              diffieHellman.setPrivateKey(private_key[, encoding])#

                                              Sets the Diffie-Hellman private key. Key encoding can be 'binary', 'hex' or 'base64'. If no encoding is provided, then a buffer is expected. @@ -7273,7 +7571,92 @@ /* alice_secret and bob_secret should be the same */ console.log(alice_secret == bob_secret); -

                                              crypto.pbkdf2(password, salt, iterations, keylen, [digest], callback)#

                                              +

                                              crypto.createECDH(curve_name)#

                                              +

                                              Creates a Elliptic Curve (EC) Diffie-Hellman key exchange object using a +predefined curve specified by curve_name string. + +

                                              +

                                              Class: ECDH#

                                              +

                                              The class for creating EC Diffie-Hellman key exchanges. + +

                                              +

                                              Returned by crypto.createECDH. + +

                                              +

                                              ECDH.generateKeys([encoding[, format]])#

                                              +

                                              Generates private and public EC Diffie-Hellman key values, and returns +the public key in the specified format and encoding. This key should be +transferred to the other party. + +

                                              +

                                              Format specifies point encoding and can be 'compressed', 'uncompressed', or +'hybrid'. If no format is provided - the point will be returned in +'uncompressed' format. + +

                                              +

                                              Encoding can be 'binary', 'hex', or 'base64'. If no encoding is provided, +then a buffer is returned. + +

                                              +

                                              ECDH.computeSecret(other_public_key[, input_encoding][, output_encoding])#

                                              +

                                              Computes the shared secret using other_public_key as the other +party's public key and returns the computed shared secret. Supplied +key is interpreted using specified input_encoding, and secret is +encoded using specified output_encoding. Encodings can be +'binary', 'hex', or 'base64'. If the input encoding is not +provided, then a buffer is expected. + +

                                              +

                                              If no output encoding is given, then a buffer is returned. + +

                                              +

                                              ECDH.getPublicKey([encoding[, format]])#

                                              +

                                              Returns the EC Diffie-Hellman public key in the specified encoding and format. + +

                                              +

                                              Format specifies point encoding and can be 'compressed', 'uncompressed', or +'hybrid'. If no format is provided - the point will be returned in +'uncompressed' format. + +

                                              +

                                              Encoding can be 'binary', 'hex', or 'base64'. If no encoding is provided, +then a buffer is returned. + +

                                              +

                                              ECDH.getPrivateKey([encoding])#

                                              +

                                              Returns the EC Diffie-Hellman private key in the specified encoding, +which can be 'binary', 'hex', or 'base64'. If no encoding is +provided, then a buffer is returned. + +

                                              +

                                              ECDH.setPublicKey(public_key[, encoding])#

                                              +

                                              Sets the EC Diffie-Hellman public key. Key encoding can be 'binary', +'hex' or 'base64'. If no encoding is provided, then a buffer is +expected. + +

                                              +

                                              ECDH.setPrivateKey(private_key[, encoding])#

                                              +

                                              Sets the EC Diffie-Hellman private key. Key encoding can be 'binary', +'hex' or 'base64'. If no encoding is provided, then a buffer is +expected. + +

                                              +

                                              Example (obtaining a shared secret): + +

                                              +
                                              var crypto = require('crypto');
                                              +var alice = crypto.createECDH('secp256k1');
                                              +var bob = crypto.createECDH('secp256k1');
                                              +
                                              +alice.generateKeys();
                                              +bob.generateKeys();
                                              +
                                              +var alice_secret = alice.computeSecret(bob.getPublicKey(), null, 'hex');
                                              +var bob_secret = bob.computeSecret(alice.getPublicKey(), null, 'hex');
                                              +
                                              +/* alice_secret and bob_secret should be the same */
                                              +console.log(alice_secret == bob_secret);
                                              +

                                              crypto.pbkdf2(password, salt, iterations, keylen[, digest], callback)#

                                              Asynchronous PBKDF2 function. Applies the selected HMAC digest function (default: SHA1) to derive a key of the requested length from the password, salt and number of iterations. The callback gets two arguments: @@ -7292,11 +7675,11 @@ crypto.getHashes().

                                              -

                                              crypto.pbkdf2Sync(password, salt, iterations, keylen, [digest])#

                                              +

                                              crypto.pbkdf2Sync(password, salt, iterations, keylen[, digest])#

                                              Synchronous PBKDF2 function. Returns derivedKey or throws error.

                                              -

                                              crypto.randomBytes(size, [callback])#

                                              +

                                              crypto.randomBytes(size[, callback])#

                                              Generates cryptographically strong pseudo-random data. Usage:

                                              @@ -7320,7 +7703,7 @@ are drained.

                                              -

                                              crypto.pseudoRandomBytes(size, [callback])#

                                              +

                                              crypto.pseudoRandomBytes(size[, callback])#

                                              Generates non-cryptographically strong pseudo-random data. The data returned will be unique if it is sufficiently long, but is not necessarily unpredictable. For this reason, the output of this @@ -7352,6 +7735,53 @@

                                              Exports the encoded challenge associated with the SPKAC.

                                              +

                                              crypto.publicEncrypt(public_key, buffer)#

                                              +

                                              Encrypts buffer with public_key. Only RSA is currently supported. + +

                                              +

                                              public_key can be an object or a string. If public_key is a string, it is +treated as the key with no passphrase and will use RSA_PKCS1_OAEP_PADDING. + +

                                              +

                                              public_key: + +

                                              +
                                                +
                                              • key : A string holding the PEM encoded private key
                                              • +
                                              • padding : An optional padding value, one of the following:
                                                  +
                                                • constants.RSA_NO_PADDING
                                                • +
                                                • constants.RSA_PKCS1_PADDING
                                                • +
                                                • constants.RSA_PKCS1_OAEP_PADDING
                                                • +
                                                +
                                              • +
                                              +

                                              NOTE: All paddings are defined in constants module. + +

                                              +

                                              crypto.privateDecrypt(private_key, buffer)#

                                              +

                                              Decrypts buffer with private_key. + +

                                              +

                                              private_key can be an object or a string. If private_key is a string, it is +treated as the key with no passphrase and will use RSA_PKCS1_OAEP_PADDING. + +

                                              +

                                              private_key: + +

                                              +
                                                +
                                              • key : A string holding the PEM encoded private key
                                              • +
                                              • passphrase : An optional string of passphrase for the private key
                                              • +
                                              • padding : An optional padding value, one of the following:
                                                  +
                                                • constants.RSA_NO_PADDING
                                                • +
                                                • constants.RSA_PKCS1_PADDING
                                                • +
                                                • constants.RSA_PKCS1_OAEP_PADDING
                                                • +
                                                +
                                              • +
                                              +

                                              NOTE: All paddings are defined in constants module. + +

                                              crypto.DEFAULT_ENCODING#

                                              The default encoding to use for functions that can take either strings or buffers. The default value is 'buffer', which makes it default @@ -7411,14 +7841,14 @@ server must have a private key. A private key is created like this:

                                              -
                                              openssl genrsa -out ryans-key.pem 1024
                                              +
                                              openssl genrsa -out ryans-key.pem 2048

                                              All servers and some clients need to have a certificate. Certificates are public keys signed by a Certificate Authority or self-signed. The first step to getting a certificate is to create a "Certificate Signing Request" (CSR) file. This is done with:

                                              -
                                              openssl req -new -key ryans-key.pem -out ryans-csr.pem
                                              +
                                              openssl req -new -sha256 -key ryans-key.pem -out ryans-csr.pem

                                              To create a self-signed certificate with the CSR, do this:

                                              @@ -7441,6 +7871,46 @@
                                            • certfile: all CA certs concatenated in one file like cat ca1-cert.pem ca2-cert.pem > ca-cert.pem
                                            +

                                            Protocol support#

                                            +

                                            Node.js is compiled with SSLv2 and SSLv3 protocol support by default, but these +protocols are disabled. They are considered insecure and could be easily +compromised as was shown by CVE-2014-3566. However, in some situations, it +may cause problems with legacy clients/servers (such as Internet Explorer 6). +If you wish to enable SSLv2 or SSLv3, run node with the --enable-ssl2 or +--enable-ssl3 flag respectively. In future versions of Node.js SSLv2 and +SSLv3 will not be compiled in by default. + +

                                            +

                                            There is a way to force node into using SSLv3 or SSLv2 only mode by explicitly +specifying secureProtocol to 'SSLv3_method' or 'SSLv2_method'. + +

                                            +

                                            The default protocol method Node.js uses is SSLv23_method which would be more +accurately named AutoNegotiate_method. This method will try and negotiate +from the highest level down to whatever the client supports. To provide a +secure default, Node.js (since v0.10.33) explicitly disables the use of SSLv3 +and SSLv2 by setting the secureOptions to be +SSL_OP_NO_SSLv3|SSL_OP_NO_SSLv2 (again, unless you have passed +--enable-ssl3, or --enable-ssl2, or SSLv3_method as secureProtocol). + +

                                            +

                                            If you have set secureOptions to anything, we will not override your +options. + +

                                            +

                                            The ramifications of this behavior change: + +

                                            +
                                              +
                                            • If your application is behaving as a secure server, clients who are SSLv3 +only will now not be able to appropriately negotiate a connection and will be +refused. In this case your server will emit a clientError event. The error +message will include 'wrong version number'.
                                            • +
                                            • If your application is behaving as a secure client and communicating with a +server that doesn't support methods more secure than SSLv3 then your connection +won't be able to negotiate and will fail. In this case your client will emit a +an error event. The error message will include 'wrong version number'.
                                            • +

                                            Client-initiated renegotiation attack mitigation#

                                            @@ -7498,7 +7968,7 @@ this technique, thus offering Perfect Forward Secrecy, are called "ephemeral".

                                            -

                                            Currently two methods are commonly used to achieve Perfect Forward Secrecy (note +

                                            Currently two methods are commonly used to achieve Perfect Forward Secrecy (note the character "E" appended to the traditional abbreviations):

                                            @@ -7521,7 +7991,7 @@

                                            var ciphers = tls.getCiphers();
                                             console.log(ciphers); // ['AES128-SHA', 'AES256-SHA', ...]
                                            -

                                            tls.createServer(options, [secureConnectionListener])#

                                            +

                                            tls.createServer(options[, secureConnectionListener])#

                                            Creates a new tls.Server. The connectionListener argument is automatically set as a listener for the secureConnection event. The options object has these possibilities: @@ -7533,12 +8003,12 @@ the key, cert and ca options.)

                                          • key: A string or Buffer containing the private key of the server in -PEM format. (Required)

                                            +PEM format. (Could be an array of keys). (Required)

                                          • passphrase: A string of passphrase for the private key or pfx.

                                          • cert: A string or Buffer containing the certificate key of the server in -PEM format. (Required)

                                            +PEM format. (Could be an array of certs). (Required)

                                          • ca: An array of strings or Buffers of trusted certificates in PEM format. If this is omitted several well known "root" CAs will be used, @@ -7551,17 +8021,19 @@

                                            To mitigate BEAST attacks it is recommended that you use this option in conjunction with the honorCipherOrder option described below to prioritize the non-CBC cipher.

                                            -

                                            Defaults to ECDHE-RSA-AES128-SHA256:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH. -Consult the OpenSSL cipher list format documentation for details on the -format.

                                            -

                                            ECDHE-RSA-AES128-SHA256 and AES128-GCM-SHA256 are TLS v1.2 ciphers and -used when node.js is linked against OpenSSL 1.0.1 or newer, such as the -bundled version of OpenSSL. Note that it is still possible for a TLS v1.2 -client to negotiate a weaker cipher unless honorCipherOrder is enabled.

                                            +

                                            Defaults to +ECDHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA256:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL. +Consult the OpenSSL cipher list format documentation for details +on the format.

                                            +

                                            ECDHE-RSA-AES128-SHA256, DHE-RSA-AES128-SHA256 and +AES128-GCM-SHA256 are TLS v1.2 ciphers and used when node.js is +linked against OpenSSL 1.0.1 or newer, such as the bundled version +of OpenSSL. Note that it is still possible for a TLS v1.2 client +to negotiate a weaker cipher unless honorCipherOrder is enabled.

                                            RC4 is used as a fallback for clients that speak on older version of the TLS protocol. RC4 has in recent years come under suspicion and should be considered compromised for anything that is truly sensitive. -It is speculated that state-level actors posess the ability to break it.

                                            +It is speculated that state-level actors possess the ability to break it.

                                            NOTE: Previous revisions of this section suggested AES256-SHA as an acceptable cipher. Unfortunately, AES256-SHA is a CBC cipher and therefore susceptible to BEAST attacks. Do not use it.

                                            @@ -7570,6 +8042,10 @@ or false to disable ECDH.

                                            Defaults to prime256v1. Consult RFC 4492 for more details.

                                          • +
                                          • dhparam: DH parameter file to use for DHE key agreement. Use +openssl dhparam command to create it. If the file is invalid to +load, it is silently discarded.

                                            +
                                          • handshakeTimeout: Abort the connection if the SSL/TLS handshake does not finish in this many milliseconds. The default is 120 seconds.

                                            A 'clientError' is emitted on the tls.Server object whenever a handshake @@ -7592,6 +8068,10 @@ which is not authorized with the list of supplied CAs. This option only has an effect if requestCert is true. Default: false.

                                          • +
                                          • checkServerIdentity(servername, cert): Provide an override for checking +server's hostname against the certificate. Should return an error if verification +fails. Return undefined if passing.

                                            +
                                          • NPNProtocols: An array or Buffer of possible NPN protocols. (Protocols should be ordered by their priority).

                                          • @@ -7612,7 +8092,7 @@ tickets on multiple instances of tls server.

                                            NOTE: Automatically shared between cluster module workers.

                                            -
                                          • sessionIdContext: A string containing a opaque identifier for session +

                                          • sessionIdContext: A string containing an opaque identifier for session resumption. If requestCert is true, the default is MD5 hash value generated from command-line. Otherwise, the default is not provided.

                                          • @@ -7620,6 +8100,10 @@ SSL version 3. The possible values depend on your installation of OpenSSL and are defined in the constant SSL_METHODS.

                                            +
                                          • secureOptions: Set server options. For example, to disable the SSLv3 +protocol set the SSL_OP_NO_SSLv3 flag. See SSL_CTX_set_options +for all available options.

                                            +

                                          Here is a simple example echo server: @@ -7677,8 +8161,8 @@

                                          openssl s_client -connect 127.0.0.1:8000
                                          -

                                          tls.connect(options, [callback])#

                                          -

                                          tls.connect(port, [host], [options], [callback])#

                                          +

                                          tls.connect(options[, callback])#

                                          +

                                          tls.connect(port[, host][, options][, callback])#

                                          Creates a new client connection to the given port and host (old API) or options.port and options.host. (If host is omitted, it defaults to localhost.) options should be an object which specifies: @@ -7700,12 +8184,12 @@ CA certs of the client in PFX or PKCS12 format.

                                        • key: A string or Buffer containing the private key of the client in -PEM format.

                                          +PEM format. (Could be an array of keys).

                                        • passphrase: A string of passphrase for the private key or pfx.

                                        • cert: A string or Buffer containing the certificate key of the client in -PEM format.

                                          +PEM format. (Could be an array of certs).

                                        • ca: An array of strings or Buffers of trusted certificates in PEM format. If this is omitted several well known "root" CAs will be used, @@ -7825,8 +8309,38 @@ before establishing secure communication

                                        -

                                        tls.createSecurePair([context], [isServer], [requestCert], [rejectUnauthorized])#

                                        -
                                        Stability: 0 - Deprecated. Use tls.TLSSocket instead.

                                        Creates a new secure pair object with two streams, one of which reads/writes +

                                        tls.createSecureContext(details)#

                                        +

                                        Creates a credentials object, with the optional details being a +dictionary with keys: + +

                                        +
                                          +
                                        • pfx : A string or buffer holding the PFX or PKCS12 encoded private +key, certificate and CA certificates
                                        • +
                                        • key : A string holding the PEM encoded private key
                                        • +
                                        • passphrase : A string of passphrase for the private key or pfx
                                        • +
                                        • cert : A string holding the PEM encoded certificate
                                        • +
                                        • ca : Either a string or list of strings of PEM encoded CA +certificates to trust.
                                        • +
                                        • crl : Either a string or list of strings of PEM encoded CRLs +(Certificate Revocation List)
                                        • +
                                        • ciphers: A string describing the ciphers to use or exclude. +Consult +http://www.openssl.org/docs/apps/ciphers.html#CIPHER_LIST_FORMAT +for details on the format.
                                        • +
                                        • honorCipherOrder : When choosing a cipher, use the server's preferences +instead of the client preferences. For further details see tls module +documentation.
                                        • +
                                        +

                                        If no 'ca' details are given, then node.js will use the default +publicly trusted list of CAs as given in +

                                        +

                                        http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt. + + +

                                        +

                                        tls.createSecurePair([context][, isServer][, requestCert][, rejectUnauthorized])#

                                        +

                                        Creates a new secure pair object with two streams, one of which reads/writes encrypted data, and one reads/writes cleartext data. Generally the encrypted one is piped to/from an incoming encrypted data stream, and the cleartext one is used as a replacement for the initial encrypted stream. @@ -7878,7 +8392,7 @@

                                        This event is emitted after a new connection has been successfully -handshaked. The argument is a instance of tls.TLSSocket. It has all the +handshaked. The argument is an instance of tls.TLSSocket. It has all the common stream methods and events.

                                        @@ -7969,8 +8483,9 @@
                                      • Client validates the response and either destroys socket or performs a handshake.
                                      • -

                                        NOTE: issuer could be null, if certficiate is self-signed or if issuer is not -in the root certificates list. (You could provide an issuer via ca option.) +

                                        NOTE: issuer could be null, if the certificate is self-signed or if the issuer +is not in the root certificates list. (You could provide an issuer via ca +option.)

                                        NOTE: adding this event listener will have an effect only on connections @@ -7982,7 +8497,7 @@

                                        -

                                        server.listen(port, [host], [callback])#

                                        +

                                        server.listen(port[, host][, callback])#

                                        Begin accepting connections on the specified port and host. If the host is omitted, the server will accept connections directed to any IPv4 address (INADDR_ANY). @@ -8011,7 +8526,8 @@

                                        server.addContext(hostname, context)#

                                        Add secure context that will be used if client request's SNI hostname is matching passed hostname (wildcards can be used). context can contain -key, cert and ca. +key, cert, ca and/or any other properties from tls.createSecureContext +options argument.

                                        server.maxConnections#

                                        @@ -8181,6 +8697,10 @@ '74.125.127.100' or '2001:4860:a005::68'.

                                        +

                                        tlsSocket.remoteFamily#

                                        +

                                        The string representation of the remote IP family. 'IPv4' or 'IPv6'. + +

                                        tlsSocket.remotePort#

                                        The numeric representation of the remote port. For example, 443. @@ -8431,7 +8951,7 @@

                                        Synchronous link(2).

                                        -

                                        fs.symlink(srcpath, dstpath, [type], callback)#

                                        +

                                        fs.symlink(srcpath, dstpath[, type], callback)#

                                        Asynchronous symlink(2). No arguments other than a possible exception are given to the completion callback. The type argument can be set to 'dir', 'file', or 'junction' (default @@ -8440,7 +8960,7 @@ 'junction', the destination argument will automatically be normalized to absolute path.

                                        -

                                        fs.symlinkSync(srcpath, dstpath, [type])#

                                        +

                                        fs.symlinkSync(srcpath, dstpath[, type])#

                                        Synchronous symlink(2).

                                        @@ -8453,7 +8973,7 @@

                                        Synchronous readlink(2). Returns the symbolic link's string value.

                                        -

                                        fs.realpath(path, [cache], callback)#

                                        +

                                        fs.realpath(path[, cache], callback)#

                                        Asynchronous realpath(2). The callback gets two arguments (err, resolvedPath). May use process.cwd to resolve relative paths. cache is an object literal of mapped paths that can be used to force a specific path @@ -8468,7 +8988,7 @@ if (err) throw err; console.log(resolvedPath); }); -

                                        fs.realpathSync(path, [cache])#

                                        +

                                        fs.realpathSync(path[, cache])#

                                        Synchronous realpath(2). Returns the resolved path.

                                        @@ -8490,12 +9010,12 @@

                                        Synchronous rmdir(2).

                                        -

                                        fs.mkdir(path, [mode], callback)#

                                        +

                                        fs.mkdir(path[, mode], callback)#

                                        Asynchronous mkdir(2). No arguments other than a possible exception are given to the completion callback. mode defaults to 0777.

                                        -

                                        fs.mkdirSync(path, [mode])#

                                        +

                                        fs.mkdirSync(path[, mode])#

                                        Synchronous mkdir(2).

                                        @@ -8519,7 +9039,7 @@

                                        Synchronous close(2).

                                        -

                                        fs.open(path, flags, [mode], callback)#

                                        +

                                        fs.open(path, flags[, mode], callback)#

                                        Asynchronous file open. See open(2). flags can be:

                                        @@ -8580,7 +9100,7 @@ the end of the file.

                                        -

                                        fs.openSync(path, flags, [mode])#

                                        +

                                        fs.openSync(path, flags[, mode])#

                                        Synchronous version of fs.open().

                                        @@ -8693,7 +9213,7 @@

                                        Synchronous version of fs.read. Returns the number of bytesRead.

                                        -

                                        fs.readFile(filename, [options], callback)#

                                        +

                                        fs.readFile(filename[, options], callback)#

                                        • filename String
                                        • options Object
                                            @@ -8718,7 +9238,7 @@

                                            -

                                            fs.readFileSync(filename, [options])#

                                            +

                                            fs.readFileSync(filename[, options])#

                                            Synchronous version of fs.readFile. Returns the contents of the filename.

                                            @@ -8727,7 +9247,7 @@

                                            -

                                            fs.writeFile(filename, data, [options], callback)#

                                            +

                                            fs.writeFile(filename, data[, options], callback)#

                                            • filename String
                                            • data String | Buffer
                                            • @@ -8754,11 +9274,11 @@ if (err) throw err; console.log('It\'s saved!'); }); -

                                              fs.writeFileSync(filename, data, [options])#

                                              +

                                              fs.writeFileSync(filename, data[, options])#

                                              The synchronous version of fs.writeFile.

                                              -

                                              fs.appendFile(filename, data, [options], callback)#

                                              +

                                              fs.appendFile(filename, data[, options], callback)#

                                              • filename String
                                              • data String | Buffer
                                              • @@ -8781,11 +9301,11 @@ if (err) throw err; console.log('The "data to append" was appended to file!'); }); -

                                                fs.appendFileSync(filename, data, [options])#

                                                +

                                                fs.appendFileSync(filename, data[, options])#

                                                The synchronous version of fs.appendFile.

                                                -

                                                fs.watchFile(filename, [options], listener)#

                                                +

                                                fs.watchFile(filename[, options], listener)#

                                                Stability: 2 - Unstable.  Use fs.watch instead, if possible.

                                                Watch for changes on filename. The callback listener will be called each time the file is accessed. @@ -8812,7 +9332,7 @@ you need to compare curr.mtime and prev.mtime.

                                                -

                                                fs.unwatchFile(filename, [listener])#

                                                +

                                                fs.unwatchFile(filename[, listener])#

                                                Stability: 2 - Unstable.  Use fs.watch instead, if possible.

                                                Stop watching for changes on filename. If listener is specified, only that particular listener is removed. Otherwise, all listeners are removed and you have effectively stopped watching filename. @@ -8822,7 +9342,7 @@ no-op, not an error.

                                                -

                                                fs.watch(filename, [options], [listener])#

                                                +

                                                fs.watch(filename[, options][, listener])#

                                                Stability: 2 - Unstable.

                                                Watch for changes on filename, where filename is either a file or a directory. The returned object is a fs.FSWatcher. @@ -8915,10 +9435,46 @@ and handle the error when it's not there.

                                                -

                                                fs.existsSync(path)#

                                                +

                                                fs.exists() will be deprecated. + +

                                                +

                                                fs.existsSync(path)#

                                                Synchronous version of fs.exists.

                                                +

                                                fs.existsSync() will be deprecated. + +

                                                +

                                                fs.access(path[, mode], callback)#

                                                +

                                                Tests a user's permissions for the file specified by path. mode is an +optional integer that specifies the accessibility checks to be performed. The +following constants define the possible values of mode. It is possible to +create a mask consisting of the bitwise OR of two or more values. + +

                                                +
                                                  +
                                                • fs.F_OK - File is visible to the calling process. This is useful for +determining if a file exists, but says nothing about rwx permissions. +Default if no mode is specified.
                                                • +
                                                • fs.R_OK - File can be read by the calling process.
                                                • +
                                                • fs.W_OK - File can be written by the calling process.
                                                • +
                                                • fs.X_OK - File can be executed by the calling process. This has no effect +on Windows (will behave like fs.F_OK).
                                                • +
                                                +

                                                The final argument, callback, is a callback function that is invoked with +a possible error argument. If any of the accessibility checks fail, the error +argument will be populated. The following example checks if the file +/etc/passwd can be read and written by the current process. + +

                                                +
                                                fs.access('/etc/passwd', fs.R_OK | fs.W_OK, function(err) {
                                                +  util.debug(err ? 'no access!' : 'can read/write');
                                                +});
                                                +

                                                fs.accessSync(path[, mode])#

                                                +

                                                Synchronous version of fs.access. This throws if any accessibility checks +fail, and does nothing otherwise. + +

                                                Class: fs.Stats#

                                                Objects returned from fs.stat(), fs.lstat() and fs.fstat() and their synchronous counterparts are of this type. @@ -8988,7 +9544,7 @@ on Unix systems, it never was.

                                                -

                                                fs.createReadStream(path, [options])#

                                                +

                                                fs.createReadStream(path[, options])#

                                                Returns a new ReadStream object (See Readable Stream).

                                                @@ -9006,6 +9562,10 @@ start at 0. The encoding can be 'utf8', 'ascii', or 'base64'.

                                                +

                                                If fd is specified, ReadStream will ignore the path argument and will use +the specified file descriptor. This means that no open event will be emitted. + +

                                                If autoClose is false, then the file descriptor won't be closed, even if there's an error. It is your responsibility to close it and make sure there's no file descriptor leak. If autoClose is set to true (default @@ -9029,7 +9589,7 @@

                                                -

                                                fs.createWriteStream(path, [options])#

                                                +

                                                fs.createWriteStream(path[, options])#

                                                Returns a new WriteStream object (See Writable Stream).

                                                @@ -9038,6 +9598,7 @@

                                                { flags: 'w',
                                                   encoding: null,
                                                +  fd: null,
                                                   mode: 0666 }

                                                options may also include a start option to allow writing data at some position past the beginning of the file. Modifying a file rather @@ -9045,6 +9606,12 @@ default mode w.

                                                +

                                                Like ReadStream above, if fd is specified, WriteStream will ignore the +path argument and will use the specified file descriptor. This means that no +open event will be emitted. + + +

                                                Class: fs.WriteStream#

                                                WriteStream is a Writable Stream. @@ -9109,7 +9676,7 @@

                                                path.normalize('/foo/bar//baz/asdf/quux/..')
                                                 // returns
                                                 '/foo/bar/baz/asdf'
                                                -

                                                path.join([path1], [path2], [...])#

                                                +

                                                path.join([path1][, path2][, ...])#

                                                Join all arguments together and normalize the resulting path.

                                                @@ -9135,7 +9702,7 @@ order, until an absolute path is found. If after using all from paths still no absolute path is found, the current working directory is used as well. The resulting path is normalized, and trailing slashes are removed unless the path -gets resolved to the root directory. Non-string arguments are ignored. +gets resolved to the root directory. Non-string from arguments are ignored.

                                                Another way to think of it is as a sequence of cd commands in a shell. @@ -9217,7 +9784,7 @@

                                                path.dirname('/foo/bar/baz/asdf/quux')
                                                 // returns
                                                 '/foo/bar/baz/asdf'
                                                -

                                                path.basename(p, [ext])#

                                                +

                                                path.basename(p[, ext])#

                                                Return the last portion of a path. Similar to the Unix basename command.

                                                @@ -9291,13 +9858,64 @@ process.env.PATH.split(path.delimiter) // returns ['C:\Windows\system32', 'C:\Windows', 'C:\Program Files\nodejs\'] +

                                                path.parse(pathString)#

                                                +

                                                Returns an object from a path string. + +

                                                +

                                                An example on *nix: + +

                                                +
                                                path.parse('/home/user/dir/file.txt')
                                                +// returns
                                                +{
                                                +    root : "/",
                                                +    dir : "/home/user/dir",
                                                +    base : "file.txt",
                                                +    ext : ".txt",
                                                +    name : "file"
                                                +}
                                                +

                                                An example on Windows: + +

                                                +
                                                path.parse('C:\\path\\dir\\index.html')
                                                +// returns
                                                +{
                                                +    root : "C:\",
                                                +    dir : "C:\path\dir",
                                                +    base : "index.html",
                                                +    ext : ".html",
                                                +    name : "index"
                                                +}
                                                +

                                                path.format(pathObject)#

                                                +

                                                Returns a path string from an object, the opposite of path.parse above. + +

                                                +
                                                path.format({
                                                +    root : "/",
                                                +    dir : "/home/user/dir",
                                                +    base : "file.txt",
                                                +    ext : ".txt",
                                                +    name : "file"
                                                +})
                                                +// returns
                                                +'/home/user/dir/file.txt'
                                                +

                                                path.posix#

                                                +

                                                Provide access to aforementioned path methods but always interact in a posix +compatible way. + +

                                                +

                                                path.win32#

                                                +

                                                Provide access to aforementioned path methods but always interact in a win32 +compatible way. + +

                                                net#

                                                Stability: 3 - Stable

                                                The net module provides you with an asynchronous network wrapper. It contains methods for creating both servers and clients (called streams). You can include this module with require('net');

                                                -

                                                net.createServer([options], [connectionListener])#

                                                +

                                                net.createServer([options][, connectionListener])#

                                                Creates a new TCP server. The connectionListener argument is automatically set as a listener for the 'connection' event. @@ -9305,7 +9923,9 @@

                                                options is an object with the following defaults:

                                                -
                                                { allowHalfOpen: false
                                                +
                                                {
                                                +  allowHalfOpen: false,
                                                +  pauseOnConnect: false
                                                 }

                                                If allowHalfOpen is true, then the socket won't automatically send a FIN packet when the other end of the socket sends a FIN packet. The socket becomes @@ -9313,15 +9933,21 @@ See 'end' event for more information.

                                                +

                                                If pauseOnConnect is true, then the socket associated with each incoming +connection will be paused, and no data will be read from its handle. This allows +connections to be passed between processes without any data being read by the +original process. To begin reading data from a paused socket, call resume(). + +

                                                Here is an example of an echo server which listens for connections on port 8124:

                                                var net = require('net');
                                                 var server = net.createServer(function(c) { //'connection' listener
                                                -  console.log('server connected');
                                                +  console.log('client connected');
                                                   c.on('end', function() {
                                                -    console.log('server disconnected');
                                                +    console.log('client disconnected');
                                                   });
                                                   c.write('hello\r\n');
                                                   c.pipe(c);
                                                @@ -9342,10 +9968,16 @@
                                                 
                                                 

                                                nc -U /tmp/echo.sock
                                                -

                                                net.connect(options, [connectionListener])#

                                                -

                                                net.createConnection(options, [connectionListener])#

                                                -

                                                Constructs a new socket object and opens the socket to the given location. -When the socket is established, the 'connect' event will be emitted. +

                                                net.connect(options[, connectionListener])#

                                                +

                                                net.createConnection(options[, connectionListener])#

                                                +

                                                A factory method, which returns a new 'net.Socket' +and connects to the supplied address and port. + +

                                                +

                                                When the socket is established, the 'connect' event will be emitted. + +

                                                +

                                                Has the same events as 'net.Socket'.

                                                For TCP sockets, options argument should be an object which specifies: @@ -9388,7 +10020,7 @@

                                                var net = require('net');
                                                 var client = net.connect({port: 8124},
                                                     function() { //'connect' listener
                                                -  console.log('client connected');
                                                +  console.log('connected to server!');
                                                   client.write('world!\r\n');
                                                 });
                                                 client.on('data', function(data) {
                                                @@ -9396,33 +10028,39 @@
                                                   client.end();
                                                 });
                                                 client.on('end', function() {
                                                -  console.log('client disconnected');
                                                +  console.log('disconnected from server');
                                                 });

                                                To connect on the socket /tmp/echo.sock the second line would just be changed to

                                                var client = net.connect({path: '/tmp/echo.sock'});
                                                -

                                                net.connect(port, [host], [connectListener])#

                                                -

                                                net.createConnection(port, [host], [connectListener])#

                                                +

                                                net.connect(port[, host][, connectListener])#

                                                +

                                                net.createConnection(port[, host][, connectListener])#

                                                Creates a TCP connection to port on host. If host is omitted, 'localhost' will be assumed. The connectListener parameter will be added as an listener for the 'connect' event.

                                                -

                                                net.connect(path, [connectListener])#

                                                -

                                                net.createConnection(path, [connectListener])#

                                                +

                                                Is a factory method which returns a new 'net.Socket'. + +

                                                +

                                                net.connect(path[, connectListener])#

                                                +

                                                net.createConnection(path[, connectListener])#

                                                Creates unix socket connection to path. The connectListener parameter will be added as an listener for the 'connect' event.

                                                +

                                                A factory method which returns a new 'net.Socket'. + +

                                                Class: net.Server#

                                                This class is used to create a TCP or local server.

                                                -

                                                server.listen(port, [host], [backlog], [callback])#

                                                +

                                                server.listen(port[, host][, backlog][, callback])#

                                                Begin accepting connections on the specified port and host. If the host is omitted, the server will accept connections directed to any IPv4 address (INADDR_ANY). A port value of zero will assign a random port. @@ -9457,7 +10095,7 @@

                                                -

                                                server.listen(path, [callback])#

                                                +

                                                server.listen(path[, callback])#

                                                • path String
                                                • callback Function
                                                • @@ -9487,7 +10125,7 @@

                                                  net.createServer().listen(
                                                       path.join('\\\\?\\pipe', process.cwd(), 'myctl'))
                                                  -

                                                  server.listen(handle, [callback])#

                                                  +

                                                  server.listen(handle[, callback])#

                                                  • handle Object
                                                  • callback Function
                                                  • @@ -9510,12 +10148,44 @@ 'listening' event.

                                                    +

                                                    server.listen(options[, callback])#

                                                    +
                                                      +
                                                    • options Object - Required. Supports the following properties:
                                                        +
                                                      • port Number - Optional.
                                                      • +
                                                      • host String - Optional.
                                                      • +
                                                      • backlog Number - Optional.
                                                      • +
                                                      • path String - Optional.
                                                      • +
                                                      • exclusive Boolean - Optional.
                                                      • +
                                                      +
                                                    • +
                                                    • callback Function - Optional.
                                                    • +
                                                  +

                                                  The port, host, and backlog properties of options, as well as the +optional callback function, behave as they do on a call to +server.listen(port, [host], [backlog], [callback]) +. Alternatively, the path +option can be used to specify a UNIX socket. + +

                                                  +

                                                  If exclusive is false (default), then cluster workers will use the same +underlying handle, allowing connection handling duties to be shared. When +exclusive is true, the handle is not shared, and attempted port sharing +results in an error. An example which listens on an exclusive port is +shown below. + +

                                                  +
                                                  server.listen({
                                                  +  host: 'localhost',
                                                  +  port: 80,
                                                  +  exclusive: true
                                                  +});

                                                  server.close([callback])#

                                                  Stops the server from accepting new connections and keeps existing connections. This function is asynchronous, the server is finally closed when all connections are ended and the server emits a 'close' event. Optionally, you can pass a callback to listen for the 'close' -event. +event. If present, the callback is invoked with any potential error +as the first and only argument.

                                                  server.address()#

                                                  @@ -9633,8 +10303,8 @@ About allowHalfOpen, refer to createServer() and 'end' event.

                                                  -

                                                  socket.connect(port, [host], [connectListener])#

                                                  -

                                                  socket.connect(path, [connectListener])#

                                                  +

                                                  socket.connect(port[, host][, connectListener])#

                                                  +

                                                  socket.connect(path[, connectListener])#

                                                  Opens the connection for a given socket. If port and host are given, then the socket will be opened as a TCP socket, if host is omitted, localhost will be assumed. If a path is given, the socket will be @@ -9681,7 +10351,7 @@ stream.setEncoding() for more information.

                                                  -

                                                  socket.write(data, [encoding], [callback])#

                                                  +

                                                  socket.write(data[, encoding][, callback])#

                                                  Sends data on the socket. The second parameter specifies the encoding in the case of a string--it defaults to UTF8 encoding. @@ -9695,7 +10365,7 @@ written out - this may not be immediately.

                                                  -

                                                  socket.end([data], [encoding])#

                                                  +

                                                  socket.end([data][, encoding])#

                                                  Half-closes the socket. i.e., it sends a FIN packet. It is possible the server will still send some data. @@ -9718,7 +10388,7 @@

                                                  Resumes reading after a call to pause().

                                                  -

                                                  socket.setTimeout(timeout, [callback])#

                                                  +

                                                  socket.setTimeout(timeout[, callback])#

                                                  Sets the socket to timeout after timeout milliseconds of inactivity on the socket. By default net.Socket do not have a timeout. @@ -9742,7 +10412,7 @@ noDelay defaults to true.

                                                  -

                                                  socket.setKeepAlive([enable], [initialDelay])#

                                                  +

                                                  socket.setKeepAlive([enable][, initialDelay])#

                                                  Enable/disable keep-alive functionality, and optionally set the initial delay before the first keepalive probe is sent on an idle socket. enable defaults to false. @@ -9778,6 +10448,10 @@ '74.125.127.100' or '2001:4860:a005::68'.

                                                  +

                                                  socket.remoteFamily#

                                                  +

                                                  The string representation of the remote IP family. 'IPv4' or 'IPv6'. + +

                                                  socket.remotePort#

                                                  The numeric representation of the remote port. For example, 80 or 21. @@ -9812,9 +10486,9 @@

                                                    -
                                                  • err {Error | Null} The error object. See [dns.lookup()][].
                                                  • +
                                                  • err {Error | Null} The error object. See dns.lookup().
                                                  • address {String} The IP address.
                                                  • -
                                                  • family {String | Null} The address type. See [dns.lookup()][].
                                                  • +
                                                  • family {String | Null} The address type. See dns.lookup().

                                                  Event: 'connect'#

                                                  Emitted when a socket connection is successfully established. @@ -9913,18 +10587,13 @@ s.bind(1234, function() { s.addMembership('224.0.0.114'); });

                                                -

                                                dgram.createSocket(type, [callback])#

                                                -

                                                dgram.createSocket(options, [callback])#

                                                -
                                                  +

                                                  dgram.createSocket(type[, callback])#

                                                  +
                                                  • type String. Either 'udp4' or 'udp6'
                                                  • -
                                                  • options Object. Should contain a type property and could contain -reuseAddr property. false by default. -When reuseAddr is true - socket.bind() will reuse address, even if the -other process has already bound a socket on it.
                                                  • callback Function. Attached as a listener to message events. Optional
                                                  • Returns: Socket object
                                                  • -
                                                  +

                                                Creates a datagram Socket of the specified types. Valid types are udp4 and udp6. @@ -9932,9 +10601,33 @@

                                                Takes an optional callback which is added as a listener for message events.

                                                -

                                                Call socket.bind if you want to receive datagrams. socket.bind() will bind -to the "all interfaces" address on a random port (it does the right thing for -both udp4 and udp6 sockets). You can then retrieve the address and port +

                                                Call socket.bind() if you want to receive datagrams. socket.bind() will +bind to the "all interfaces" address on a random port (it does the right thing +for both udp4 and udp6 sockets). You can then retrieve the address and port +with socket.address().address and socket.address().port. + +

                                                +

                                                dgram.createSocket(options[, callback])#

                                                +
                                                  +
                                                • options Object
                                                • +
                                                • callback Function. Attached as a listener to message events.
                                                • +
                                                • Returns: Socket object
                                                • +
                                              +

                                              The options object should contain a type field of either udp4 or udp6 +and an optional boolean reuseAddr field. + +

                                              +

                                              When reuseAddr is true socket.bind() will reuse the address, even if +another process has already bound a socket on it. reuseAddr defaults to +false. + +

                                              +

                                              Takes an optional callback which is added as a listener for message events. + +

                                              +

                                              Call socket.bind() if you want to receive datagrams. socket.bind() will +bind to the "all interfaces" address on a random port (it does the right thing +for both udp4 and udp6 sockets). You can then retrieve the address and port with socket.address().address and socket.address().port.

                                              @@ -9973,7 +10666,7 @@

                                              Emitted when an error occurs.

                                              -

                                              socket.send(buf, offset, length, port, address, [callback])#

                                              +

                                              socket.send(buf, offset, length, port, address[, callback])#

                                              • buf Buffer object or string. Message to be sent
                                              • offset Integer. Offset in the buffer where the message starts.
                                              • @@ -10047,7 +10740,7 @@ informing the source that the data did not reach its intended recipient).

                                                -

                                                socket.bind(port, [address], [callback])#

                                                +

                                                socket.bind(port[, address][, callback])#

                                                • port Integer
                                                • address String, Optional
                                                • @@ -10095,6 +10788,34 @@ server.bind(41234); // server listening 0.0.0.0:41234
                                                  +

                                                  socket.bind(options[, callback])#

                                                  +
                                                    +
                                                  • options Object - Required. Supports the following properties:
                                                      +
                                                    • port Number - Required.
                                                    • +
                                                    • address String - Optional.
                                                    • +
                                                    • exclusive Boolean - Optional.
                                                    • +
                                                    +
                                                  • +
                                                  • callback Function - Optional.
                                                  • +
                                                +

                                                The port and address properties of options, as well as the optional +callback function, behave as they do on a call to +socket.bind(port, [address], [callback]) +. + +

                                                +

                                                If exclusive is false (default), then cluster workers will use the same +underlying handle, allowing connection handling duties to be shared. When +exclusive is true, the handle is not shared, and attempted port sharing +results in an error. An example which listens on an exclusive port is +shown below. + +

                                                +
                                                socket.bind({
                                                +  address: 'localhost',
                                                +  port: 8000,
                                                +  exclusive: true
                                                +});

                                                socket.close()#

                                                Close the underlying socket and stop listening for data on it. @@ -10149,7 +10870,7 @@ packets will also be received on the local interface.

                                                -

                                                socket.addMembership(multicastAddress, [multicastInterface])#

                                                +

                                                socket.addMembership(multicastAddress[, multicastInterface])#

                                                • multicastAddress String
                                                • multicastInterface String, Optional
                                                • @@ -10161,7 +10882,7 @@ interfaces.

                                                  -

                                                  socket.dropMembership(multicastAddress, [multicastInterface])#

                                                  +

                                                  socket.dropMembership(multicastAddress[, multicastInterface])#

                                                  • multicastAddress String
                                                  • multicastInterface String, Optional
                                                  • @@ -10189,13 +10910,35 @@

                                                    DNS#

                                                    -
                                                    Stability: 3 - Stable

                                                    Use require('dns') to access this module. All methods in the dns module -use C-Ares except for dns.lookup which uses getaddrinfo(3) in a thread -pool. C-Ares is much faster than getaddrinfo but the system resolver is -more consistent with how other programs operate. When a user does -net.connect(80, 'google.com') or http.get({ host: 'google.com' }) the -dns.lookup method is used. Users who need to do a large number of lookups -quickly should use the methods that go through C-Ares. +

                                                    Stability: 3 - Stable

                                                    Use require('dns') to access this module. + +

                                                    +

                                                    This module contains functions that belong to two different categories: + +

                                                    +

                                                    1) Functions that use the underlying operating system facilities to perform +name resolution, and that do not necessarily do any network communication. +This category contains only one function: dns.lookup. Developers looking +to perform name resolution in the same way that other applications on the same +operating system behave should use dns.lookup. + +

                                                    +

                                                    Here is an example that does a lookup of www.google.com. + +

                                                    +
                                                    var dns = require('dns');
                                                    +
                                                    +dns.lookup('www.google.com', function onLookup(err, addresses, family) {
                                                    +  console.log('addresses:', addresses);
                                                    +});
                                                    +

                                                    2) Functions that connect to an actual DNS server to perform name resolution, +and that always use the network to perform DNS queries. This category +contains all functions in the dns module but dns.lookup. These functions +do not use the same set of configuration files than what dns.lookup uses. +For instance, they do not use the configuration from /etc/hosts. These +functions should be used by developers who do not want to use the underlying +operating system's facilities for name resolution, and instead want to +always perform DNS queries.

                                                    Here is an example which resolves 'www.google.com' then reverse @@ -10219,11 +10962,34 @@ }); }); }); -

                                                    dns.lookup(hostname, [family], callback)#

                                                    +

                                                    There are subtle consequences in choosing one or another, please consult the +Implementation considerations section +for more information. + +

                                                    +

                                                    dns.lookup(hostname[, options], callback)#

                                                    Resolves a hostname (e.g. 'google.com') into the first found A (IPv4) or -AAAA (IPv6) record. -The family can be the integer 4 or 6. Defaults to null that indicates -both Ip v4 and v6 address family. +AAAA (IPv6) record. options can be an object or integer. If options is +not provided, then IP v4 and v6 addresses are both valid. If options is +an integer, then it must be 4 or 6. + +

                                                    +

                                                    Alternatively, options can be an object containing two properties, +family and hints. Both properties are optional. If family is provided, +it must be the integer 4 or 6. If family is not provided then IP v4 +and v6 addresses are accepted. The hints field, if present, should be one +or more of the supported getaddrinfo flags. If hints is not provided, +then no flags are passed to getaddrinfo. Multiple flags can be passed +through hints by logically ORing their values. An example usage of +options is shown below. + +

                                                    +
                                                    {
                                                    +  family: 4,
                                                    +  hints: dns.ADDRCONFIG | dns.V4MAPPED
                                                    +}
                                                    +

                                                    See supported getaddrinfo flags below for +more information on supported flags.

                                                    The callback has arguments (err, address, family). The address argument @@ -10237,9 +11003,31 @@ the hostname does not exist but also when the lookup fails in other ways such as no available file descriptors. +

                                                    +

                                                    dns.lookup doesn't necessarily have anything to do with the DNS protocol. +It's only an operating system facility that can associate name with addresses, +and vice versa. + +

                                                    +

                                                    Its implementation can have subtle but important consequences on the behavior +of any Node.js program. Please take some time to consult the Implementation +considerations section before using it. + +

                                                    +

                                                    dns.lookupService(address, port, callback)#

                                                    +

                                                    Resolves the given address and port into a hostname and service using +getnameinfo.

                                                    -

                                                    dns.resolve(hostname, [rrtype], callback)#

                                                    +

                                                    The callback has arguments (err, hostname, service). The hostname and +service arguments are strings (e.g. 'localhost' and 'http' respectively). + +

                                                    +

                                                    On error, err is an Error object, where err.code is the error code. + + +

                                                    +

                                                    dns.resolve(hostname[, rrtype], callback)#

                                                    Resolves a hostname (e.g. 'google.com') into an array of the record types specified by rrtype. @@ -10299,11 +11087,11 @@

                                                    The same as dns.resolve(), but only for service records (SRV records). addresses is an array of the SRV records available for hostname. Properties of SRV records are priority, weight, port, and name (e.g., -[{'priority': 10, {'weight': 5, 'port': 21223, 'name': 'service.example.com'}, ...]). +[{'priority': 10, 'weight': 5, 'port': 21223, 'name': 'service.example.com'}, ...]).

                                                    dns.resolveSoa(hostname, callback)#

                                                    -

                                                    The same as dns.resolve(), but only for start of authority record queries +

                                                    The same as dns.resolve(), but only for start of authority record queries (SOA record).

                                                    @@ -10389,6 +11177,63 @@
                                                  • dns.ADDRGETNETWORKPARAMS: Could not find GetNetworkParams function.
                                                  • dns.CANCELLED: DNS query cancelled.
                                                  +

                                                  Supported getaddrinfo flags#

                                                  +

                                                  The following flags can be passed as hints to dns.lookup. + +

                                                  +
                                                    +
                                                  • dns.ADDRCONFIG: Returned address types are determined by the types +of addresses supported by the current system. For example, IPv4 addresses +are only returned if the current system has at least one IPv4 address +configured. Loopback addresses are not considered.
                                                  • +
                                                  • dns.V4MAPPED: If the IPv6 family was specified, but no IPv6 addresses +were found, then return IPv4 mapped IPv6 addresses.
                                                  • +
                                                  +

                                                  Implementation considerations#

                                                  +

                                                  Although dns.lookup and dns.resolve*/dns.reverse functions have the same +goal of associating a network name with a network address (or vice versa), +their behavior is quite different. These differences can have subtle but +significant consequences on the behavior of Node.js programs. + +

                                                  +

                                                  dns.lookup#

                                                  +

                                                  Under the hood, dns.lookup uses the same operating system facilities as most +other programs. For instance, dns.lookup will almost always resolve a given +name the same way as the ping command. On most POSIX-like operating systems, +the behavior of the dns.lookup function can be tweaked by changing settings +in nsswitch.conf(5) and/or resolv.conf(5), but be careful that changing +these files will change the behavior of all other programs running on the same +operating system. + +

                                                  +

                                                  Though the call will be asynchronous from JavaScript's perspective, it is +implemented as a synchronous call to getaddrinfo(3) that runs on libuv's +threadpool. Because libuv's threadpool has a fixed size, it means that if for +whatever reason the call to getaddrinfo(3) takes a long time, other +operations that could run on libuv's threadpool (such as filesystem +operations) will experience degraded performance. In order to mitigate this +issue, one potential solution is to increase the size of libuv's threadpool by +setting the 'UV_THREADPOOL_SIZE' environment variable to a value greater than +4 (its current default value). For more information on libuv's threadpool, see +the official libuv +documentation. + +

                                                  +

                                                  dns.resolve, functions starting with dns.resolve and dns.reverse#

                                                  +

                                                  These functions are implemented quite differently than dns.lookup. They do +not use getaddrinfo(3) and they always perform a DNS query on the network. +This network communication is always done asynchronously, and does not use +libuv's threadpool. + +

                                                  +

                                                  As a result, these functions cannot have the same negative impact on other +processing that happens on libuv's threadpool that dns.lookup can have. + +

                                                  +

                                                  They do not use the same set of configuration files than what dns.lookup +uses. For instance, they do not use the configuration from /etc/hosts. + +

                                                  HTTP#

                                                  Stability: 3 - Stable

                                                  To use the HTTP server and client one must require('http'). @@ -10453,14 +11298,14 @@

                                                  http.createServer([requestListener])#

                                                  -

                                                  Returns a new web server object. +

                                                  Returns a new instance of http.Server.

                                                  The requestListener is a function which is automatically added to the 'request' event.

                                                  -

                                                  http.createClient([port], [host])#

                                                  +

                                                  http.createClient([port][, host])#

                                                  This function is deprecated; please use http.request() instead. Constructs a new HTTP client. port and host refer to the server to be connected to. @@ -10563,14 +11408,14 @@

                                                  function (exception, socket) { }

                                                  -

                                                  If a client connection emits an 'error' event - it will forwarded here. +

                                                  If a client connection emits an 'error' event, it will be forwarded here.

                                                  socket is the net.Socket object that the error originated from.

                                                  -

                                                  server.listen(port, [hostname], [backlog], [callback])#

                                                  +

                                                  server.listen(port[, hostname][, backlog][, callback])#

                                                  Begin accepting connections on the specified port and hostname. If the hostname is omitted, the server will accept connections directed to any IPv4 address (INADDR_ANY). @@ -10590,7 +11435,7 @@

                                                  -

                                                  server.listen(path, [callback])#

                                                  +

                                                  server.listen(path[, callback])#

                                                  Start a UNIX socket server listening for connections on the given path.

                                                  @@ -10599,7 +11444,7 @@

                                                  -

                                                  server.listen(handle, [callback])#

                                                  +

                                                  server.listen(handle[, callback])#

                                                  • handle Object
                                                  • callback Function
                                                  • @@ -10703,7 +11548,7 @@ the request body should be sent. See the 'checkContinue' event on Server.

                                                    -

                                                    response.writeHead(statusCode, [statusMessage], [headers])#

                                                    +

                                                    response.writeHead(statusCode[, statusMessage][, headers])#

                                                    Sends a response header to the request. The status code is a 3-digit HTTP status code, like 404. The last argument, headers, are the response headers. Optionally one can give a human-readable statusMessage as the second @@ -10725,7 +11570,7 @@ implicit/mutable headers will be calculated and call this function for you.

                                                    -

                                                    Note: that Content-Length is given in bytes not characters. The above example +

                                                    Note that Content-Length is given in bytes not characters. The above example works because the string 'hello world' contains only single byte characters. If the body contains higher coded characters then Buffer.byteLength() should be used to determine the number of bytes in a given encoding. @@ -10824,7 +11669,7 @@

                                                    response.removeHeader("Content-Encoding");
                                                    -

                                                    response.write(chunk, [encoding])#

                                                    +

                                                    response.write(chunk[, encoding][, callback])#

                                                    If this method is called and response.writeHead() has not been called, it will switch to implicit header mode and flush the implicit headers. @@ -10835,7 +11680,8 @@

                                                    chunk can be a string or a buffer. If chunk is a string, the second parameter specifies how to encode it into a byte stream. -By default the encoding is 'utf8'. +By default the encoding is 'utf8'. The last parameter callback +will be called when this chunk of data is flushed.

                                                    Note: This is the raw HTTP body and has nothing to do with @@ -10851,7 +11697,7 @@

                                                    Returns true if the entire data was flushed successfully to the kernel buffer. Returns false if all or part of the data was queued in user memory. -'drain' will be emitted when the buffer is again free. +'drain' will be emitted when the buffer is free again.

                                                    response.addTrailers(headers)#

                                                    @@ -10873,19 +11719,22 @@ response.write(fileData); response.addTrailers({'Content-MD5': "7895bf4b8828b55ceaf47747b4bca667"}); response.end(); -

                                                    response.end([data], [encoding])#

                                                    +

                                                    response.end([data][, encoding][, callback])#

                                                    This method signals to the server that all of the response headers and body have been sent; that server should consider this message complete. The method, response.end(), MUST be called on each response.

                                                    -

                                                    If data is specified, it is equivalent to calling response.write(data, encoding) -followed by response.end(). +

                                                    If data is specified, it is equivalent to calling +response.write(data, encoding) followed by response.end(callback). +

                                                    +

                                                    If callback is specified, it will be called when the response stream +is finished.

                                                    -

                                                    http.request(options, [callback])#

                                                    +

                                                    http.request(options[, callback])#

                                                    Node maintains several connections per server to make HTTP requests. This function allows one to transparently issue requests. @@ -10938,11 +11787,19 @@

                                                    Example:

                                                    -
                                                    var options = {
                                                    +
                                                    var postData = querystring.stringify({
                                                    +  'msg' : 'Hello World!'
                                                    +});
                                                    +
                                                    +var options = {
                                                       hostname: 'www.google.com',
                                                       port: 80,
                                                       path: '/upload',
                                                    -  method: 'POST'
                                                    +  method: 'POST',
                                                    +  headers: {
                                                    +    'Content-Type': 'application/x-www-form-urlencoded',
                                                    +    'Content-Length': postData.length
                                                    +  }
                                                     };
                                                     
                                                     var req = http.request(options, function(res) {
                                                    @@ -10959,8 +11816,7 @@
                                                     });
                                                     
                                                     // write data to request body
                                                    -req.write('data\n');
                                                    -req.write('data\n');
                                                    +req.write(postData);
                                                     req.end();

                                                    Note that in the example req.end() was called. With http.request() one must always call req.end() to signify that you're done with the request - @@ -10990,7 +11846,7 @@ to compute basic authentication.

                                                  -

                                                  http.get(options, [callback])#

                                                  +

                                                  http.get(options[, callback])#

                                                  Since most requests are GET requests without bodies, Node provides this convenience method. The only difference between this method and http.request() is that it sets the method to GET and calls req.end() automatically. @@ -11313,11 +12169,11 @@ the client should send the request body.

                                                  -

                                                  request.flush()#

                                                  +

                                                  request.flushHeaders()#

                                                  Flush the request headers.

                                                  -

                                                  For effiency reasons, node.js normally buffers the request headers until you +

                                                  For efficiency reasons, node.js normally buffers the request headers until you call request.end() or write the first chunk of request data. It then tries hard to pack the request headers and data into a single TCP packet. @@ -11327,7 +12183,7 @@ the optimization and kickstart the request.

                                                  -

                                                  request.write(chunk, [encoding])#

                                                  +

                                                  request.write(chunk[, encoding][, callback])#

                                                  Sends a chunk of the body. By calling this method many times, the user can stream a request body to a server--in that case it is suggested to use the @@ -11341,23 +12197,30 @@

                                                  The encoding argument is optional and only applies when chunk is a string. Defaults to 'utf8'. +

                                                  +

                                                  The callback argument is optional and will be called when this chunk of data +is flushed.

                                                  -

                                                  request.end([data], [encoding])#

                                                  +

                                                  request.end([data][, encoding][, callback])#

                                                  Finishes sending the request. If any parts of the body are unsent, it will flush them to the stream. If the request is chunked, this will send the terminating '0\r\n\r\n'.

                                                  If data is specified, it is equivalent to calling -request.write(data, encoding) followed by request.end(). +request.write(data, encoding) followed by request.end(callback). + +

                                                  +

                                                  If callback is specified, it will be called when the request stream +is finished.

                                                  request.abort()#

                                                  Aborts a request. (New since v0.3.8.)

                                                  -

                                                  request.setTimeout(timeout, [callback])#

                                                  +

                                                  request.setTimeout(timeout[, callback])#

                                                  Once a socket is assigned to this request and is connected socket.setTimeout() will be called. @@ -11367,7 +12230,7 @@ socket.setNoDelay() will be called.

                                                  -

                                                  request.setSocketKeepAlive([enable], [initialDelay])#

                                                  +

                                                  request.setSocketKeepAlive([enable][, initialDelay])#

                                                  Once a socket is assigned to this request and is connected socket.setKeepAlive() will be called. @@ -11388,7 +12251,7 @@

                                                  function () { }

                                                  -

                                                  Indicates that the underlaying connection was closed. +

                                                  Indicates that the underlying connection was closed. Just like 'end', this event occurs only once per response.

                                                  @@ -11540,7 +12403,7 @@

                                                  See http.Server#timeout.

                                                  -

                                                  https.createServer(options, [requestListener])#

                                                  +

                                                  https.createServer(options[, requestListener])#

                                                  Returns a new HTTPS web server object. The options is similar to tls.createServer(). The requestListener is a function which is automatically added to the 'request' event. @@ -11576,9 +12439,9 @@ res.writeHead(200); res.end("hello world\n"); }).listen(8000); -

                                                  server.listen(port, [host], [backlog], [callback])#

                                                  -

                                                  server.listen(path, [callback])#

                                                  -

                                                  server.listen(handle, [callback])#

                                                  +

                                                  server.listen(port[, host][, backlog][, callback])#

                                                  +

                                                  server.listen(path[, callback])#

                                                  +

                                                  server.listen(handle[, callback])#

                                                  See http.listen() for details.

                                                  @@ -11663,8 +12526,8 @@ the list of supplied CAs. An 'error' event is emitted if verification fails. Verification happens at the connection level, before the HTTP request is sent. Default true. -
                                                • secureProtocol: The SSL method to use, e.g. SSLv3_method to force -SSL version 3. The possible values depend on your installation of +
                                                • secureProtocol: The SSL method to use, e.g. TLSv1_method to force +TLS version 1. The possible values depend on your installation of OpenSSL and are defined in the constant SSL_METHODS.

                                                In order to specify these options, use a custom Agent. @@ -11759,6 +12622,9 @@

                                              • protocol: The request protocol, lowercased.

                                                Example: 'http:'

                                              • +
                                              • slashes: The protocol requires slashes after the colon

                                                +

                                                Example: true or false

                                                +
                                              • host: The full lowercased host portion of the URL, including port information.

                                                Example: 'host.com:8080'

                                                @@ -11794,13 +12660,14 @@

                                                The following methods are provided by the URL module:

                                                -

                                                url.parse(urlStr, [parseQueryString], [slashesDenoteHost])#

                                                +

                                                url.parse(urlStr[, parseQueryString][, slashesDenoteHost])#

                                                Take a URL string, and return an object.

                                                -

                                                Pass true as the second argument to also parse -the query string using the querystring module. -Defaults to false. +

                                                Pass true as the second argument to also parse the query string using the +querystring module. If true then the query property will always be +assigned an object, and the search property will always be a (possibly +empty) string. Defaults to false.

                                                Pass true as the third argument to treat //foo/bar as @@ -11812,6 +12679,9 @@

                                                Take a parsed URL object, and return a formatted URL string.

                                                +

                                                Here's how the formatting process works: + +

                                                • href will be ignored.
                                                • protocol is treated the same with or without the trailing : (colon).
                                                    @@ -11821,15 +12691,23 @@ be postfixed with : (colon)
                                                • +
                                                • slashes set to true if the protocol requires :// (colon-slash-slash)
                                                    +
                                                  • Only needs to be set for protocols not previously listed as requiring +slashes, such as mongodb://localhost:8000/
                                                  • +
                                                  +
                                                • auth will be used if present.
                                                • hostname will only be used if host is absent.
                                                • port will only be used if host is absent.
                                                • host will be used in place of hostname and port
                                                • -
                                                • pathname is treated the same with or without the leading / (slash)
                                                • -
                                                • search will be used in place of query
                                                • +
                                                • pathname is treated the same with or without the leading / (slash).
                                                • +
                                                • path is treated the same with pathname but able to contain query as well.
                                                • +
                                                • search will be used in place of query.
                                                    +
                                                  • It is treated the same with or without the leading ? (question mark)
                                                  • +
                                                  +
                                                • query (object; see querystring) will only be used if search is absent.
                                                • -
                                                • search is treated the same with or without the leading ? (question mark)
                                                • -
                                                • hash is treated the same with or without the leading # (pound sign, anchor)
                                                • +
                                                • hash is treated the same with or without the leading # (pound sign, anchor).

                                                url.resolve(from, to)#

                                                Take a base URL, and a href URL, and resolve them as a browser would for @@ -11846,12 +12724,16 @@ It provides the following methods:

                                                -

                                                querystring.stringify(obj, [sep], [eq])#

                                                +

                                                querystring.stringify(obj[, sep][, eq][, options])#

                                                Serialize an object to a query string. Optionally override the default separator ('&') and assignment ('=') characters.

                                                +

                                                Options object may contain encodeURIComponent property (querystring.escape by default), +it can be used to encode string with non-utf8 encoding if necessary. + +

                                                Example:

                                                @@ -11861,8 +12743,15 @@ querystring.stringify({foo: 'bar', baz: 'qux'}, ';', ':') // returns -'foo:bar;baz:qux' -

                                                querystring.parse(str, [sep], [eq], [options])#

                                                +'foo:bar;baz:qux' + +// Suppose gbkEncodeURIComponent function already exists, +// it can encode string with `gbk` encoding +querystring.stringify({ w: '中文', foo: 'bar' }, null, null, + { encodeURIComponent: gbkEncodeURIComponent }) +// returns +'w=%D6%D0%CE%C4&foo=bar' +

                                                querystring.parse(str[, sep][, eq][, options])#

                                                Deserialize a query string to an object. Optionally override the default separator ('&') and assignment ('=') characters. @@ -11872,12 +12761,23 @@ be used to limit processed keys. Set it to 0 to remove key count limitation.

                                                +

                                                Options object may contain decodeURIComponent property (decodeURIComponent by default), +it can be used to decode non-utf8 encoding string if necessary. + +

                                                Example:

                                                querystring.parse('foo=bar&baz=qux&baz=quux&corge')
                                                 // returns
                                                -{ foo: 'bar', baz: ['qux', 'quux'], corge: '' }
                                                +{ foo: 'bar', baz: ['qux', 'quux'], corge: '' } + +// Suppose gbkDecodeURIComponent function already exists, +// it can decode `gbk` encoding string +querystring.parse('w=%D6%D0%CE%C4&foo=bar', null, null, + { decodeURIComponent: gbkDecodeURIComponent }) +// returns +{ w: '中文', foo: 'bar' }

                                                querystring.escape#

                                                The escape function used by querystring.stringify, provided so that it could be overridden if necessary. @@ -11979,7 +12879,7 @@

                                                • input - the readable stream to listen to (Required).

                                                • -
                                                • output - the writable stream to write readline data to (Required).

                                                  +
                                                • output - the writable stream to write readline data to (Optional).

                                                • completer - an optional function that is used for Tab autocompletion. See below for an example of using this.

                                                  @@ -12057,6 +12957,10 @@ been paused.

                                                  +

                                                  If output is set to null or undefined when calling createInterface, the +prompt is not written. + +

                                                  rl.question(query, callback)#

                                                  Prepends the prompt with query and invokes callback with the user's response. Displays the query to the user, and then invokes callback @@ -12067,6 +12971,10 @@ it has been paused.

                                                  +

                                                  If output is set to null or undefined when calling createInterface, +nothing is displayed. + +

                                                  Example usage:

                                                  @@ -12077,6 +12985,9 @@

                                                  Pauses the readline input stream, allowing it to be resumed later if needed.

                                                  +

                                                  Note that this doesn't immediately pause the stream of events. Several events may be emitted after calling pause, including line. + +

                                                  rl.resume()#

                                                  Resumes the readline input stream. @@ -12086,9 +12997,10 @@ output streams. The "close" event will also be emitted.

                                                  -

                                                  rl.write(data, [key])#

                                                  -

                                                  Writes data to output stream. key is an object literal to represent a key -sequence; available if the terminal is a TTY. +

                                                  rl.write(data[, key])#

                                                  +

                                                  Writes data to output stream, unless output is set to null or +undefined when calling createInterface. key is an object literal to +represent a key sequence; available if the terminal is a TTY.

                                                  This will also resume the input stream if it has been paused. @@ -12509,7 +13421,7 @@ later.

                                                  -

                                                  vm.runInThisContext(code, [options])#

                                                  +

                                                  vm.runInThisContext(code[, options])#

                                                  vm.runInThisContext() compiles code, runs it and returns the result. Running code does not have access to local scope, but does have access to the current global object. @@ -12573,7 +13485,7 @@

                                                  -

                                                  vm.runInContext(code, contextifiedSandbox, [options])#

                                                  +

                                                  vm.runInContext(code, contextifiedSandbox[, options])#

                                                  vm.runInContext compiles code, then runs it in contextifiedSandbox and returns the result. Running code does not have access to local scope. The contextifiedSandbox object must have been previously contextified via @@ -12583,7 +13495,7 @@

                                                  vm.runInContext takes the same options as vm.runInThisContext.

                                                  -

                                                  Example: compile and execute differnt scripts in a single existing context. +

                                                  Example: compile and execute different scripts in a single existing context.

                                                  var util = require('util');
                                                  @@ -12604,7 +13516,7 @@
                                                   
                                                   
                                                   

                                                  -

                                                  vm.runInNewContext(code, [sandbox], [options])#

                                                  +

                                                  vm.runInNewContext(code[, sandbox][, options])#

                                                  vm.runInNewContext compiles code, contextifies sandbox if passed or creates a new contextified sandbox if it's omitted, and then runs the code with the sandbox as the global object and returns the result. @@ -12635,6 +13547,21 @@

                                                  +

                                                  vm.runInDebugContext(code)#

                                                  +

                                                  vm.runInDebugContext compiles and executes code inside the V8 debug context. +The primary use case is to get access to the V8 debug object: + +

                                                  +
                                                  var Debug = vm.runInDebugContext('Debug');
                                                  +Debug.scripts().forEach(function(script) { console.log(script.name); });
                                                  +

                                                  Note that the debug context and object are intrinsically tied to V8's debugger +implementation and may change (or even get removed) without prior warning. + +

                                                  +

                                                  The debug object can also be exposed with the --expose_debug_as= switch. + + +

                                                  Class: Script#

                                                  A class for holding precompiled scripts, and running them in specific sandboxes. @@ -12693,7 +13620,7 @@

                                                • timeout: a number of milliseconds to execute the script before terminating execution. If execution is terminated, an Error will be thrown.
                                                -

                                                script.runInContext(contextifiedSandbox, [options])#

                                                +

                                                script.runInContext(contextifiedSandbox[, options])#

                                                Similar to vm.runInContext but a method of a precompiled Script object. script.runInContext runs script's compiled code in contextifiedSandbox and returns the result. Running code does not have access to local scope. @@ -12729,7 +13656,7 @@

                                                -

                                                script.runInNewContext([sandbox], [options])#

                                                +

                                                script.runInNewContext([sandbox][, options])#

                                                Similar to vm.runInNewContext but a method of a precompiled Script object. script.runInNewContext contextifies sandbox if passed or creates a new contextified sandbox if it's omitted, and then runs script's compiled code @@ -12772,12 +13699,17 @@

                                                It is possible to stream data through a child's stdin, stdout, and stderr in a fully non-blocking way. (Note that some programs use line-buffered I/O internally. That doesn't affect node.js but it means -data you send to the child process is not immediately consumed.) +data you send to the child process may not be immediately consumed.)

                                                To create a child process use require('child_process').spawn() or require('child_process').fork(). The semantics of each are slightly -different, and explained below. +different, and explained below. + +

                                                +

                                                For scripting purposes you may find the +synchronous counterparts more +convenient.

                                                Class: ChildProcess#

                                                @@ -12791,7 +13723,8 @@

                                                The ChildProcess class is not intended to be used directly. Use the -spawn() or fork() methods to create a Child Process instance. +spawn(), exec(), execFile(), or fork() methods to create a Child +Process instance.

                                                Event: 'error'#

                                                @@ -12806,7 +13739,7 @@
                                              • The process could not be killed, or
                                              • Sending a message to the child process failed for whatever reason.
                                              • -

                                                Note that the exit-event may or may not fire after an error has occured. If +

                                                Note that the exit-event may or may not fire after an error has occurred. If you are listening on both events to fire a function, remember to guard against calling your function twice. @@ -12868,13 +13801,18 @@

                                              • Stream object

                                              A Writable Stream that represents the child process's stdin. -Closing this stream via end() often causes the child process to terminate. +If the child is waiting to read all its input, it will not continue until this +stream has been closed via end().

                                              -

                                              If the child stdio streams are shared with the parent, then this will +

                                              If the child was not spawned with stdio[0] set to 'pipe', then this will not be set.

                                              +

                                              child.stdin is shorthand for child.stdio[0]. Both properties will refer +to the same object, or null. + +

                                              child.stdout#

                                              • Stream object
                                              • @@ -12882,10 +13820,14 @@

                                                A Readable Stream that represents the child process's stdout.

                                                -

                                                If the child stdio streams are shared with the parent, then this will +

                                                If the child was not spawned with stdio[1] set to 'pipe', then this will not be set.

                                                +

                                                child.stdout is shorthand for child.stdio[1]. Both properties will refer +to the same object, or null. + +

                                                child.stderr#

                                                • Stream object
                                                • @@ -12893,10 +13835,47 @@

                                                  A Readable Stream that represents the child process's stderr.

                                                  -

                                                  If the child stdio streams are shared with the parent, then this will +

                                                  If the child was not spawned with stdio[2] set to 'pipe', then this will not be set.

                                                  +

                                                  child.stderr is shorthand for child.stdio[2]. Both properties will refer +to the same object, or null. + +

                                                  +

                                                  child.stdio#

                                                  +
                                                    +
                                                  • Array
                                                  • +
                                                +

                                                A sparse array of pipes to the child process, corresponding with positions in +the stdio option to +spawn that have been +set to 'pipe'. +Note that streams 0-2 are also available as ChildProcess.stdin, +ChildProcess.stdout, and ChildProcess.stderr, respectively. + +

                                                +

                                                In the following example, only the child's fd 1 is setup as a pipe, so only +the parent's child.stdio[1] is a stream, all other values in the array are +null. + +

                                                +
                                                child = child_process.spawn("ls", {
                                                +    stdio: [
                                                +      0, // use parents stdin for child
                                                +      'pipe', // pipe child's stdout to parent
                                                +      fs.openSync("err.out", "w") // direct child's stderr to a file
                                                +    ]
                                                +});
                                                +
                                                +assert.equal(child.stdio[0], null);
                                                +assert.equal(child.stdio[0], child.stdin);
                                                +
                                                +assert(child.stdout);
                                                +assert.equal(child.stdio[1], child.stdout);
                                                +
                                                +assert.equal(child.stdio[2], null);
                                                +assert.equal(child.stdio[2], child.stderr);

                                                child.pid#

                                                • Integer
                                                • @@ -12951,7 +13930,7 @@

                                                  See kill(2)

                                                  -

                                                  child.send(message, [sendHandle])#

                                                  +

                                                  child.send(message[, sendHandle])#

                                                  • message Object
                                                  • sendHandle Handle object
                                                  • @@ -13087,20 +14066,28 @@ of being received, most likely immediately.

                                                    -

                                                    Note that you can also call process.disconnect() in the child process. +

                                                    Note that you can also call process.disconnect() in the child process when the +child process has any open IPC channels with the parent (i.e fork()). + +

                                                    +

                                                    Asynchronous Process Creation#

                                                    +

                                                    These methods follow the common async programming patterns (accepting a +callback or returning an EventEmitter).

                                                    -

                                                    child_process.spawn(command, [args], [options])#

                                                    +

                                                    child_process.spawn(command[, args][, options])#

                                                    • command String The command to run
                                                    • args Array List of string arguments
                                                    • options Object
                                                      • cwd String Current working directory of the child process
                                                      • -
                                                      • stdio Array|String Child's stdio configuration. (See below)
                                                      • -
                                                      • customFds Array Deprecated File descriptors for the child to use -for stdio. (See below)
                                                      • env Object Environment key-value pairs
                                                      • -
                                                      • detached Boolean The child will be a process group leader. (See below)
                                                      • +
                                                      • stdio Array|String Child's stdio configuration. (See +below)
                                                      • +
                                                      • customFds Array Deprecated File descriptors for the child to use +for stdio. (See below)
                                                      • +
                                                      • detached Boolean The child will be a process group leader. (See +below)
                                                      • uid Number Sets the user identity of the process. (See setuid(2).)
                                                      • gid Number Sets the group identity of the process. (See setgid(2).)
                                                      @@ -13111,14 +14098,18 @@ If omitted, args defaults to an empty Array.

                                                      -

                                                      The third argument is used to specify additional options, which defaults to: +

                                                      The third argument is used to specify additional options, with these defaults:

                                                      { cwd: undefined,
                                                         env: process.env
                                                       }
                                                      -

                                                      cwd allows you to specify the working directory from which the process is spawned. -Use env to specify environment variables that will be visible to the new process. +

                                                      Use cwd to specify the working directory from which the process is spawned. +If not given, the default is to inherit the current working directory. + +

                                                      +

                                                      Use env to specify environment variables that will be visible to the new +process, the default is process.env.

                                                      Example of running ls -lh /usr, capturing stdout, stderr, and the exit code: @@ -13173,25 +14164,17 @@ console.log('grep process exited with code ' + code); } }); -

                                                      Example of checking for failed exec: - -

                                                      -
                                                      var spawn = require('child_process').spawn,
                                                      -    child = spawn('bad_command');
                                                      -
                                                      -child.stderr.setEncoding('utf8');
                                                      -child.stderr.on('data', function (data) {
                                                      -  if (/^execvp\(\)/.test(data)) {
                                                      -    console.log('Failed to start child process.');
                                                      -  }
                                                      -});
                                                      -

                                                      Note that if spawn receives an empty options object, it will result in -spawning the process with an empty environment rather than using -process.env. This due to backwards compatibility issues with a deprecated -API. +

                                                      options.stdio#

                                                      +

                                                      As a shorthand, the stdio argument may also be one of the following +strings:

                                                      -

                                                      The 'stdio' option to child_process.spawn() is an array where each +

                                                        +
                                                      • 'pipe' - ['pipe', 'pipe', 'pipe'], this is the default value
                                                      • +
                                                      • 'ignore' - ['ignore', 'ignore', 'ignore']
                                                      • +
                                                      • 'inherit' - [process.stdin, process.stdout, process.stderr] or [0,1,2]
                                                      • +
                                                      +

                                                      Otherwise, the 'stdio' option to child_process.spawn() is an array where each index corresponds to a fd in the child. The value is one of the following:

                                                      @@ -13224,15 +14207,6 @@ words, stdin, stdout, and stderr) a pipe is created. For fd 3 and up, the default is 'ignore'.
                                                    • -

                                                      As a shorthand, the stdio argument may also be one of the following -strings, rather than an array: - -

                                                      -
                                                        -
                                                      • ignore - ['ignore', 'ignore', 'ignore']
                                                      • -
                                                      • pipe - ['pipe', 'pipe', 'pipe']
                                                      • -
                                                      • inherit - [process.stdin, process.stdout, process.stderr] or [0,1,2]
                                                      • -

                                                      Example:

                                                      @@ -13247,6 +14221,7 @@ // Open an extra fd=4, to interact with programs present a // startd-style interface. spawn('prg', [], { stdio: ['pipe', null, null, null, 'pipe'] });
                                                      +

                                                      options.detached#

                                                      If the detached option is set, the child process will be made the leader of a new process group. This makes it possible for the child to continue running after the parent exits. @@ -13278,6 +14253,7 @@ inherited, the child will remain attached to the controlling terminal.

                                                      +

                                                      options.customFds#

                                                      There is a deprecated option called customFds which allows one to specify specific file descriptors for the stdio of the child process. This API was not portable to all platforms and therefore removed. @@ -13289,7 +14265,7 @@

                                                      See also: child_process.exec() and child_process.fork()

                                                      -

                                                      child_process.exec(command, [options], callback)#

                                                      +

                                                      child_process.exec(command[, options], callback)#

                                                      • command String The command to run, with space-separated arguments
                                                      • options Object
                                                          @@ -13303,6 +14279,8 @@
                                                        • timeout Number (Default: 0)
                                                        • maxBuffer Number (Default: 200*1024)
                                                        • killSignal String (Default: 'SIGTERM')
                                                        • +
                                                        • uid Number Sets the user identity of the process. (See setuid(2).)
                                                        • +
                                                        • gid Number Sets the group identity of the process. (See setgid(2).)
                                                      • callback Function called with the output when process terminates
                                                          @@ -13351,7 +14329,7 @@

                                                          -

                                                          child_process.execFile(file, [args], [options], [callback])#

                                                          +

                                                          child_process.execFile(file[, args][, options][, callback])#

                                                          • file String The filename of the program to run
                                                          • args Array List of string arguments
                                                          • @@ -13362,6 +14340,8 @@
                                                          • timeout Number (Default: 0)
                                                          • maxBuffer Number (Default: 200*1024)
                                                          • killSignal String (Default: 'SIGTERM')
                                                          • +
                                                          • uid Number Sets the user identity of the process. (See setuid(2).)
                                                          • +
                                                          • gid Number Sets the group identity of the process. (See setgid(2).)
                                                        • callback Function called with the output when process terminates
                                                            @@ -13378,14 +14358,13 @@

                                                            -

                                                            child_process.fork(modulePath, [args], [options])#

                                                            +

                                                            child_process.fork(modulePath[, args][, options])#

                                                            • modulePath String The module to run in the child
                                                            • args Array List of string arguments
                                                            • options Object
                                                              • cwd String Current working directory of the child process
                                                              • env Object Environment key-value pairs
                                                              • -
                                                              • encoding String (Default: 'utf8')
                                                              • execPath String Executable used to create the child process
                                                              • execArgv Array List of string arguments passed to the executable (Default: process.execArgv)
                                                              • @@ -13393,6 +14372,8 @@ piped to the parent, otherwise they will be inherited from the parent, see the "pipe" and "inherit" options for spawn()'s stdio for more details (default is false) +
                                                              • uid Number Sets the user identity of the process. (See setuid(2).)
                                                              • +
                                                              • gid Number Sets the group identity of the process. (See setgid(2).)
                                                            • Return: ChildProcess object
                                                            • @@ -13415,7 +14396,17 @@ output on this fd is expected to be line delimited JSON objects.

                                                              -

                                                              child_process.spawnSync(command, [args], [options])#

                                                              +

                                                              Synchronous Process Creation#

                                                              +

                                                              These methods are synchronous, meaning they WILL block the event loop, +pausing execution of your code until the spawned process exits. + +

                                                              +

                                                              Blocking calls like these are mostly useful for simplifying general purpose +scripting tasks and for simplifying the loading/processing of application +configuration at startup. + +

                                                              +

                                                              child_process.spawnSync(command[, args][, options])#

                                                              • command String The command to run
                                                              • args Array List of string arguments
                                                              • @@ -13442,7 +14433,7 @@
                                                              • stderr Buffer|String The contents of output[2]
                                                              • status Number The exit code of the child process
                                                              • signal String The signal used to kill the child process
                                                              • -
                                                              • error Error The error object if the child process failed or timedout
                                                              • +
                                                              • error Error The error object if the child process failed or timed out
                                                            @@ -13453,7 +14444,7 @@ process has exited.

                                                            -

                                                            child_process.execFileSync(command, [args], [options])#

                                                            +

                                                            child_process.execFileSync(command[, args][, options])#

                                                            • command String The command to run
                                                            • args Array List of string arguments
                                                            • @@ -13492,7 +14483,7 @@

                                                              -

                                                              child_process.execSync(command, [options])#

                                                              +

                                                              child_process.execSync(command[, options])#

                                                              • command String The command to run
                                                              • options Object
                                                                  @@ -13538,35 +14529,35 @@

                                                                  Throws an exception that displays the values for actual and expected separated by the provided operator.

                                                                  -

                                                                  assert(value, message), assert.ok(value, [message])#

                                                                  +

                                                                  assert(value[, message]), assert.ok(value[, message])#

                                                                  Tests if value is truthy, it is equivalent to assert.equal(true, !!value, message);

                                                                  -

                                                                  assert.equal(actual, expected, [message])#

                                                                  +

                                                                  assert.equal(actual, expected[, message])#

                                                                  Tests shallow, coercive equality with the equal comparison operator ( == ).

                                                                  -

                                                                  assert.notEqual(actual, expected, [message])#

                                                                  +

                                                                  assert.notEqual(actual, expected[, message])#

                                                                  Tests shallow, coercive non-equality with the not equal comparison operator ( != ).

                                                                  -

                                                                  assert.deepEqual(actual, expected, [message])#

                                                                  +

                                                                  assert.deepEqual(actual, expected[, message])#

                                                                  Tests for deep equality.

                                                                  -

                                                                  assert.notDeepEqual(actual, expected, [message])#

                                                                  +

                                                                  assert.notDeepEqual(actual, expected[, message])#

                                                                  Tests for any deep inequality.

                                                                  -

                                                                  assert.strictEqual(actual, expected, [message])#

                                                                  +

                                                                  assert.strictEqual(actual, expected[, message])#

                                                                  Tests strict equality, as determined by the strict equality operator ( === )

                                                                  -

                                                                  assert.notStrictEqual(actual, expected, [message])#

                                                                  +

                                                                  assert.notStrictEqual(actual, expected[, message])#

                                                                  Tests strict non-equality, as determined by the strict not equal operator ( !== )

                                                                  -

                                                                  assert.throws(block, [error], [message])#

                                                                  +

                                                                  assert.throws(block[, error][, message])#

                                                                  Expects block to throw an error. error can be constructor, RegExp or validation function. @@ -13603,7 +14594,7 @@ }, "unexpected error" ); -

                                                                  assert.doesNotThrow(block, [message])#

                                                                  +

                                                                  assert.doesNotThrow(block[, message])#

                                                                  Expects block not to throw an error, see assert.throws for details.

                                                                  @@ -13888,38 +14879,38 @@ without a callback.

                                                                  -

                                                                  zlib.deflate(buf, [options], callback)#

                                                                  -

                                                                  zlib.deflateSync(buf, [options])#

                                                                  +

                                                                  zlib.deflate(buf[, options], callback)#

                                                                  +

                                                                  zlib.deflateSync(buf[, options])#

                                                                  Compress a string with Deflate.

                                                                  -

                                                                  zlib.deflateRaw(buf, [options], callback)#

                                                                  -

                                                                  zlib.deflateRawSync(buf, [options])#

                                                                  +

                                                                  zlib.deflateRaw(buf[, options], callback)#

                                                                  +

                                                                  zlib.deflateRawSync(buf[, options])#

                                                                  Compress a string with DeflateRaw.

                                                                  -

                                                                  zlib.gzip(buf, [options], callback)#

                                                                  -

                                                                  zlib.gzipSync(buf, [options])#

                                                                  +

                                                                  zlib.gzip(buf[, options], callback)#

                                                                  +

                                                                  zlib.gzipSync(buf[, options])#

                                                                  Compress a string with Gzip.

                                                                  -

                                                                  zlib.gunzip(buf, [options], callback)#

                                                                  -

                                                                  zlib.gunzipSync(buf, [options])#

                                                                  +

                                                                  zlib.gunzip(buf[, options], callback)#

                                                                  +

                                                                  zlib.gunzipSync(buf[, options])#

                                                                  Decompress a raw Buffer with Gunzip.

                                                                  -

                                                                  zlib.inflate(buf, [options], callback)#

                                                                  -

                                                                  zlib.inflateSync(buf, [options])#

                                                                  +

                                                                  zlib.inflate(buf[, options], callback)#

                                                                  +

                                                                  zlib.inflateSync(buf[, options])#

                                                                  Decompress a raw Buffer with Inflate.

                                                                  -

                                                                  zlib.inflateRaw(buf, [options], callback)#

                                                                  -

                                                                  zlib.inflateRawSync(buf, [options])#

                                                                  +

                                                                  zlib.inflateRaw(buf[, options], callback)#

                                                                  +

                                                                  zlib.inflateRawSync(buf[, options])#

                                                                  Decompress a raw Buffer with InflateRaw.

                                                                  -

                                                                  zlib.unzip(buf, [options], callback)#

                                                                  -

                                                                  zlib.unzipSync(buf, [options])#

                                                                  +

                                                                  zlib.unzip(buf[, options], callback)#

                                                                  +

                                                                  zlib.unzipSync(buf[, options])#

                                                                  Decompress a raw Buffer with Unzip.

                                                                  @@ -14549,6 +15540,8 @@ (Default=process.argv.slice(2))
                                                                • silent Boolean whether or not to send output to parent's stdio. (Default=false)
                                                                • +
                                                                • uid Number Sets the user identity of the process. (See setuid(2).)
                                                                • +
                                                                • gid Number Sets the group identity of the process. (See setgid(2).)
                                                            @@ -14681,7 +15674,18 @@

                                                            Event: 'setup'#

                                                            -

                                                            Emitted the first time that .setupMaster() is called. +

                                                              +
                                                            • settings Object
                                                            • +
                                                          +

                                                          Emitted every time .setupMaster() is called. + +

                                                          +

                                                          The settings object is the cluster.settings object at the time +.setupMaster() was called and is advisory only, since multiple calls to +.setupMaster() can be made in a single tick. + +

                                                          +

                                                          If accuracy is important, use cluster.settings.

                                                          cluster.setupMaster([settings])#

                                                          @@ -14703,24 +15707,27 @@

                                                            -
                                                          • Only the first call to .setupMaster() has any effect, subsequent calls are -ignored
                                                          • -
                                                          • That because of the above, the only attribute of a worker that may be -customized per-worker is the env passed to .fork()
                                                          • -
                                                          • .fork() calls .setupMaster() internally to establish the defaults, so to -have any effect, .setupMaster() must be called before any calls to -.fork()
                                                          • +
                                                          • any settings changes only affect future calls to .fork() and have no +effect on workers that are already running
                                                          • +
                                                          • The only attribute of a worker that cannot be set via .setupMaster() is +the env passed to .fork()
                                                          • +
                                                          • the defaults above apply to the first call only, the defaults for later +calls is the current value at the time of cluster.setupMaster() is called

                                                          Example:

                                                          -
                                                          var cluster = require("cluster");
                                                          +
                                                          var cluster = require('cluster');
                                                           cluster.setupMaster({
                                                          -  exec : "worker.js",
                                                          -  args : ["--use", "https"],
                                                          -  silent : true
                                                          +  exec: 'worker.js',
                                                          +  args: ['--use', 'https'],
                                                          +  silent: true
                                                           });
                                                          -cluster.fork();
                                                          +cluster.fork(); // https worker +cluster.setupMaster({ + args: ['--use', 'http'] +}); +cluster.fork(); // http worker

                                                          This can only be called from the master process.

                                                          @@ -14778,8 +15785,10 @@ process.

                                                          -

                                                          A worker is removed from cluster.workers just before the 'disconnect' or -'exit' event is emitted. +

                                                          A worker is removed from cluster.workers after the worker has disconnected and +exited. The order between these two events cannot be determined in advance. +However, it is guaranteed that the removal from the cluster.workers list happens +before last 'disconnect' or 'exit' event is emitted.

                                                          // Go through all workers
                                                          @@ -14852,7 +15861,7 @@
                                                           
                                                           // kill worker
                                                           worker.kill();
                                                          -

                                                          worker.send(message, [sendHandle])#

                                                          +

                                                          worker.send(message[, sendHandle])#

                                                          • message Object
                                                          • sendHandle Handle object
                                                          • @@ -14961,6 +15970,17 @@ } }); } +

                                                            worker.isDead()#

                                                            +

                                                            This function returns true if the worker's process has terminated (either +because of exiting or being signaled). Otherwise, it returns false. + +

                                                            +

                                                            worker.isConnected()#

                                                            +

                                                            This function returns true if the worker is connected to its master via its IPC +channel, false otherwise. A worker is connected to its master after it's been +created. It is disconnected after the disconnect event is emitted. + +

                                                            Event: 'message'#

                                                            • message Object
                                                            • @@ -15071,20 +16091,21 @@

                                                              Smalloc#

                                                              -
                                                              Stability: 1 - Experimental

                                                              smalloc.alloc(length[, receiver][, type])#

                                                              -
                                                                -
                                                              • length {Number} <= smalloc.kMaxLength
                                                              • -
                                                              • receiver {Object}, Optional, Default: new Object
                                                              • -
                                                              • type {Enum}, Optional, Default: Uint8
                                                              • -
                                                              -

                                                              Returns receiver with allocated external array data. If no receiver is -passed then a new Object will be created and returned. - -

                                                              +
                                                              Stability: 1 - Experimental

                                                              Class: smalloc#

                                                              Buffers are backed by a simple allocator that only handles the assignation of external raw memory. Smalloc exposes that functionality.

                                                              +

                                                              smalloc.alloc(length[, receiver][, type])#

                                                              +
                                                                +
                                                              • length Number <= smalloc.kMaxLength
                                                              • +
                                                              • receiver Object Default: new Object
                                                              • +
                                                              • type Enum Default: Uint8
                                                              • +
                                                            +

                                                            Returns receiver with allocated external array data. If no receiver is +passed then a new Object will be created and returned. + +

                                                            This can be used to create your own Buffer-like classes. No other properties are set, so the user will need to keep track of other necessary information (e.g. length of the allocation). @@ -15118,13 +16139,18 @@ doubleArr = i / 10; // { '0': 0, '1': 0.1, '2': 0.2 } -

                                                            smalloc.copyOnto(source, sourceStart, dest, destStart, copyLength);#

                                                            -
                                                              -
                                                            • source Object with external array allocation
                                                            • -
                                                            • sourceStart Position to begin copying from
                                                            • -
                                                            • dest Object with external array allocation
                                                            • -
                                                            • destStart Position to begin copying onto
                                                            • -
                                                            • copyLength Length of copy
                                                            • +

                                                              It is not possible to freeze, seal and prevent extensions of objects with +external data using Object.freeze, Object.seal and +Object.preventExtensions respectively. + +

                                                              +

                                                              smalloc.copyOnto(source, sourceStart, dest, destStart, copyLength);#

                                                              +
                                                                +
                                                              • source Object with external array allocation
                                                              • +
                                                              • sourceStart Number Position to begin copying from
                                                              • +
                                                              • dest Object with external array allocation
                                                              • +
                                                              • destStart Number Position to begin copying onto
                                                              • +
                                                              • copyLength Number Length of copy

                                                            Copy memory from one external array allocation to another. No arguments are optional, and any violation will throw. @@ -15148,7 +16174,7 @@ need to set any additional properties for this to work.

                                                            -

                                                            smalloc.dispose(obj)#

                                                            +

                                                            smalloc.dispose(obj)#

                                                            • obj Object
                                                          @@ -15177,22 +16203,24 @@ smalloc.copyOnto(b, 2, a, 0, 2); // now results in: -// Error: source has no external array data -

                                                          dispose() does not support Buffers, and will throw if passed. +// RangeError: copy_length > source_length +

                                                          After dispose() is called object still behaves as one with external data, for +example smalloc.hasExternalData() returns true. +dispose() does not support Buffers, and will throw if passed.

                                                          -

                                                          smalloc.hasExternalData(obj)#

                                                          +

                                                          smalloc.hasExternalData(obj)#

                                                          • obj Object

                                                        Returns true if the obj has externally allocated memory.

                                                        -

                                                        smalloc.kMaxLength#

                                                        +

                                                        smalloc.kMaxLength#

                                                        Size of maximum allocation. This is also applicable to Buffer creation.

                                                        -

                                                        smalloc.Types#

                                                        +

                                                        smalloc.Types#

                                                        Enum of possible external array types. Contains:

                                                        @@ -15207,306 +16235,52 @@
                                                      • Double
                                                      • Uint8Clamped
                                                      -

                                                      Tracing#

                                                      -
                                                      Stability: 1 - Experimental

                                                      The tracing module is designed for instrumenting your Node application. It is -not meant for general purpose use. - -

                                                      -

                                                      Be very careful with callbacks used in conjunction with this module - -

                                                      -

                                                      Many of these callbacks interact directly with asynchronous subsystems in a -synchronous fashion. That is to say, you may be in a callback where a call to -console.log() could result in an infinite recursive loop. Also of note, many -of these callbacks are in hot execution code paths. That is to say your -callbacks are executed quite often in the normal operation of Node, so be wary -of doing CPU bound or synchronous workloads in these functions. Consider a ring -buffer and a timer to defer processing. - -

                                                      -

                                                      require('tracing') to use this module. - -

                                                      -

                                                      v8#

                                                      -

                                                      The v8 property is an EventEmitter, it exposes events and interfaces -specific to the version of v8 built with node. These interfaces are subject -to change by upstream and are therefore not covered under the stability index. - -

                                                      -

                                                      Event: 'gc'#

                                                      -

                                                      function (before, after) { } - -

                                                      -

                                                      Emitted each time a GC run is completed. - -

                                                      -

                                                      before and after are objects with the following properties: - -

                                                      -
                                                      {
                                                      -  type: 'mark-sweep-compact',
                                                      -  flags: 0,
                                                      -  timestamp: 905535650119053,
                                                      -  total_heap_size: 6295040,
                                                      -  total_heap_size_executable: 4194304,
                                                      -  total_physical_size: 6295040,
                                                      -  used_heap_size: 2855416,
                                                      -  heap_size_limit: 1535115264
                                                      -}
                                                      -

                                                      getHeapStatistics()#

                                                      -

                                                      Returns an object with the following properties - -

                                                      -
                                                      {
                                                      -  total_heap_size: 7326976,
                                                      -  total_heap_size_executable: 4194304,
                                                      -  total_physical_size: 7326976,
                                                      -  used_heap_size: 3476208,
                                                      -  heap_size_limit: 1535115264
                                                      -}
                                                      -

                                                      Async Listeners#

                                                      -

                                                      The AsyncListener API is the JavaScript interface for the AsyncWrap -class which allows developers to be notified about key events in the -lifetime of an asynchronous event. Node performs a lot of asynchronous -events internally, and significant use of this API may have a -significant performance impact on your application. - - -

                                                      -

                                                      tracing.createAsyncListener(callbacksObj[, userData])#

                                                      -
                                                        -
                                                      • callbacksObj Object Contains optional callbacks that will fire at -specific times in the life cycle of the asynchronous event.
                                                      • -
                                                      • userData Value a value that will be passed to all callbacks.
                                                      • -
                                                    -

                                                    Returns a constructed AsyncListener object. - -

                                                    -

                                                    To begin capturing asynchronous events pass either the callbacksObj or -pass an existing AsyncListener instance to tracing.addAsyncListener(). -The same AsyncListener instance can only be added once to the active -queue, and subsequent attempts to add the instance will be ignored. - -

                                                    -

                                                    To stop capturing pass the AsyncListener instance to -tracing.removeAsyncListener(). This does not mean the -AsyncListener previously added will stop triggering callbacks. Once -attached to an asynchronous event it will persist with the lifetime of the -asynchronous call stack. - -

                                                    -

                                                    Explanation of function parameters: - - -

                                                    -

                                                    callbacksObj: An Object which may contain several optional fields: - -

                                                    -
                                                      -
                                                    • create(userData): A Function called when an asynchronous -event is instantiated. If a Value is returned then it will be attached -to the event and overwrite any value that had been passed to -tracing.createAsyncListener()'s userData argument. If an initial -userData was passed when created, then create() will -receive that as a function argument.

                                                      -
                                                    • -
                                                    • before(context, userData): A Function that is called immediately -before the asynchronous callback is about to run. It will be passed both -the context (i.e. this) of the calling function and the userData -either returned from create() or passed during construction (if -either occurred).

                                                      -
                                                    • -
                                                    • after(context, userData): A Function called immediately after -the asynchronous event's callback has run. Note this will not be called -if the callback throws and the error is not handled.

                                                      -
                                                    • -
                                                    • error(userData, error): A Function called if the event's -callback threw. If this registered callback returns true then Node will -assume the error has been properly handled and resume execution normally. -When multiple error() callbacks have been registered only one of -those callbacks needs to return true for AsyncListener to accept that -the error has been handled, but all error() callbacks will always be run.

                                                      -
                                                    • -
                                                    -

                                                    userData: A Value (i.e. anything) that will be, by default, -attached to all new event instances. This will be overwritten if a Value -is returned by create(). - -

                                                    -

                                                    Here is an example of overwriting the userData: - -

                                                    -
                                                    tracing.createAsyncListener({
                                                    -  create: function listener(value) {
                                                    -    // value === true
                                                    -    return false;
                                                    -}, {
                                                    -  before: function before(context, value) {
                                                    -    // value === false
                                                    -  }
                                                    -}, true);
                                                    -

                                                    Note: The EventEmitter, while used to emit status of an asynchronous -event, is not itself asynchronous. So create() will not fire when -an event is added, and before()/after() will not fire when emitted -callbacks are called. - - -

                                                    -

                                                    tracing.addAsyncListener(callbacksObj[, userData])#

                                                    -

                                                    tracing.addAsyncListener(asyncListener)#

                                                    -

                                                    Returns a constructed AsyncListener object and immediately adds it to -the listening queue to begin capturing asynchronous events. - -

                                                    -

                                                    Function parameters can either be the same as -tracing.createAsyncListener(), or a constructed AsyncListener -object. - -

                                                    -

                                                    Example usage for capturing errors: - -

                                                    -
                                                    var fs = require('fs');
                                                    -
                                                    -var cntr = 0;
                                                    -var key = tracing.addAsyncListener({
                                                    -  create: function onCreate() {
                                                    -    return { uid: cntr++ };
                                                    -  },
                                                    -  before: function onBefore(context, storage) {
                                                    -    // Write directly to stdout or we'll enter a recursive loop
                                                    -    fs.writeSync(1, 'uid: ' + storage.uid + ' is about to run\n');
                                                    -  },
                                                    -  after: function onAfter(context, storage) {
                                                    -    fs.writeSync(1, 'uid: ' + storage.uid + ' ran\n');
                                                    -  },
                                                    -  error: function onError(storage, err) {
                                                    -    // Handle known errors
                                                    -    if (err.message === 'everything is fine') {
                                                    -      // Writing to stderr this time.
                                                    -      fs.writeSync(2, 'handled error just threw:\n');
                                                    -      fs.writeSync(2, err.stack + '\n');
                                                    -      return true;
                                                    -    }
                                                    -  }
                                                    -});
                                                    -
                                                    -process.nextTick(function() {
                                                    -  throw new Error('everything is fine');
                                                    -});
                                                    -
                                                    -// Output:
                                                    -// uid: 0 is about to run
                                                    -// handled error just threw:
                                                    -// Error: really, it's ok
                                                    -//     at /tmp/test2.js:27:9
                                                    -//     at process._tickCallback (node.js:583:11)
                                                    -//     at Function.Module.runMain (module.js:492:11)
                                                    -//     at startup (node.js:123:16)
                                                    -//     at node.js:1012:3
                                                    -

                                                    tracing.removeAsyncListener(asyncListener)#

                                                    -

                                                    Removes the AsyncListener from the listening queue. - -

                                                    -

                                                    Removing the AsyncListener from the active queue does not mean the -asyncListener callbacks will cease to fire on the events they've been -registered. Subsequently, any asynchronous events fired during the -execution of a callback will also have the same asyncListener callbacks -attached for future execution. For example: - -

                                                    -
                                                    var fs = require('fs');
                                                    -
                                                    -var key = tracing.createAsyncListener({
                                                    -  create: function asyncListener() {
                                                    -    // Write directly to stdout or we'll enter a recursive loop
                                                    -    fs.writeSync(1, 'You summoned me?\n');
                                                    -  }
                                                    -});
                                                    -
                                                    -// We want to begin capturing async events some time in the future.
                                                    -setTimeout(function() {
                                                    -  tracing.addAsyncListener(key);
                                                    -
                                                    -  // Perform a few additional async events.
                                                    -  setTimeout(function() {
                                                    -    setImmediate(function() {
                                                    -      process.nextTick(function() { });
                                                    -    });
                                                    -  });
                                                    -
                                                    -  // Removing the listener doesn't mean to stop capturing events that
                                                    -  // have already been added.
                                                    -  tracing.removeAsyncListener(key);
                                                    -}, 100);
                                                    -
                                                    -// Output:
                                                    -// You summoned me?
                                                    -// You summoned me?
                                                    -// You summoned me?
                                                    -// You summoned me?
                                                    -

                                                    The fact that we logged 4 asynchronous events is an implementation detail -of Node's Timers. - -

                                                    -

                                                    To stop capturing from a specific asynchronous event stack -tracing.removeAsyncListener() must be called from within the call -stack itself. For example: - -

                                                    -
                                                    var fs = require('fs');
                                                    -
                                                    -var key = tracing.createAsyncListener({
                                                    -  create: function asyncListener() {
                                                    -    // Write directly to stdout or we'll enter a recursive loop
                                                    -    fs.writeSync(1, 'You summoned me?\n');
                                                    -  }
                                                    -});
                                                    -
                                                    -// We want to begin capturing async events some time in the future.
                                                    -setTimeout(function() {
                                                    -  tracing.addAsyncListener(key);
                                                    -
                                                    -  // Perform a few additional async events.
                                                    -  setImmediate(function() {
                                                    -    // Stop capturing from this call stack.
                                                    -    tracing.removeAsyncListener(key);
                                                    -
                                                    -    process.nextTick(function() { });
                                                    -  });
                                                    -}, 100);
                                                    -
                                                    -// Output:
                                                    -// You summoned me?
                                                    -

                                                    The user must be explicit and always pass the AsyncListener they wish -to remove. It is not possible to simply remove all listeners at once. - - -

                                                +
                                                + +
                                              - - + + + - -
                                              - - - + + +
                                              - - +
                                              -

                                              Node.js v0.11.13 Manual & Documentation

                                              +

                                              Node.js v0.11.15 Manual & Documentation

                                              Index | @@ -47,15 +52,15 @@

                                              +
                                              + +
                                              - - + + + - -
                                              - - - + + +
                                              - - +
                                              -

                                              Node.js v0.11.13 Manual & Documentation

                                              +

                                              Node.js v0.11.15 Manual & Documentation

                                              Index | @@ -49,51 +54,59 @@

                                            • Class: Buffer
                                            • buffer.INSPECT_MAX_BYTES
                                            • @@ -147,6 +160,27 @@
                                            • 'hex' - Encode each byte as two hexadecimal characters.

                                            +

                                            Creating a typed array from a Buffer works with the following caveats: + +

                                            +
                                              +
                                            1. The buffer's memory is copied, not shared.

                                              +
                                            2. +
                                            3. The buffer's memory is interpreted as an array, not a byte array. That is, +new Uint32Array(new Buffer([1,2,3,4])) creates a 4-element Uint32Array +with elements [1,2,3,4], not an Uint32Array with a single element +[0x1020304] or [0x4030201].

                                              +
                                            4. +
                                            +

                                            NOTE: Node.js v0.8 simply retained a reference to the buffer in array.buffer +instead of cloning it. + +

                                            +

                                            While more efficient, it introduces subtle incompatibilities with the typed +arrays specification. ArrayBuffer#slice() makes a copy of the slice while +Buffer#slice() creates a view. + +

                                            Class: Buffer#

                                            The Buffer class is a global type for dealing with binary data directly. It can be constructed in a variety of ways. @@ -156,7 +190,9 @@

                                            • size Number
                                          -

                                          Allocates a new buffer of size octets. +

                                          Allocates a new buffer of size octets. Note, size must be no more than +kMaxLength. Otherwise, a RangeError +will be thrown here.

                                          new Buffer(array)#

                                          @@ -166,7 +202,14 @@

                                          Allocates a new buffer using an array of octets.

                                          -

                                          new Buffer(str, [encoding])#

                                          +

                                          new Buffer(buffer)#

                                          +
                                            +
                                          • buffer Buffer
                                          • +
                                        +

                                        Copies the passed buffer data onto a new Buffer instance. + +

                                        +

                                        new Buffer(str[, encoding])#

                                        • str String - string to encode.
                                        • encoding String - encoding to use, Optional.
                                        • @@ -191,7 +234,7 @@

                                          Tests if obj is a Buffer.

                                          -

                                          Class Method: Buffer.byteLength(string, [encoding])#

                                          +

                                          Class Method: Buffer.byteLength(string[, encoding])#

                                          • string String
                                          • encoding String, Optional, Default: 'utf8'
                                          • @@ -211,7 +254,7 @@ Buffer.byteLength(str, 'utf8') + " bytes"); // ½ + ¼ = ¾: 9 characters, 12 bytes -

                                            Class Method: Buffer.concat(list, [totalLength])#

                                            +

                                            Class Method: Buffer.concat(list[, totalLength])#

                                            • list Array List of Buffer objects to concat
                                            • totalLength Number Total length of the buffers when concatenated
                                            • @@ -264,7 +307,18 @@ // 1234 // 1234 -

                                              buf.write(string, [offset], [length], [encoding])#

                                              +

                                              While the length property is not immutable, changing the value of length +can result in undefined and inconsistent behavior. Applications that wish to +modify the length of a buffer should therefore treat length as read-only and +use buf.slice to create a new buffer. + +

                                              +
                                              buf = new Buffer(10);
                                              +buf.write("abcdefghj", 0, "ascii");
                                              +console.log(buf.length); // 10
                                              +buf = buf.slice(0,5);
                                              +console.log(buf.length); // 5
                                              +

                                              buf.write(string[, offset][, length][, encoding])#

                                              • string String - data to be written to buffer
                                              • offset Number, Optional, Default: 0
                                              • @@ -282,17 +336,71 @@
                                                buf = new Buffer(256);
                                                 len = buf.write('\u00bd + \u00bc = \u00be', 0);
                                                 console.log(len + " bytes: " + buf.toString('utf8', 0, len));
                                                -

                                                buf.toString([encoding], [start], [end])#

                                                +

                                                buf.writeUIntLE(value, offset, byteLength[, noAssert])#

                                                +

                                                buf.writeUIntBE(value, offset, byteLength[, noAssert])#

                                                +

                                                buf.writeIntLE(value, offset, byteLength[, noAssert])#

                                                +

                                                buf.writeIntBE(value, offset, byteLength[, noAssert])#

                                                +
                                                  +
                                                • value {Number} Bytes to be written to buffer
                                                • +
                                                • offset {Number} 0 <= offset <= buf.length
                                                • +
                                                • byteLength {Number} 0 < byteLength <= 6
                                                • +
                                                • noAssert {Boolean} Default: false
                                                • +
                                                • Return: {Number}
                                                • +
                                                +

                                                Writes value to the buffer at the specified offset and byteLength. +Supports up to 48 bits of accuracy. For example: + +

                                                +
                                                var b = new Buffer(6);
                                                +b.writeUIntBE(0x1234567890ab, 0, 6);
                                                +// <Buffer 12 34 56 78 90 ab>
                                                +

                                                Set noAssert to true to skip validation of value and offset. Defaults +to false. + +

                                                +

                                                buf.readUIntLE(offset, byteLength[, noAssert])#

                                                +

                                                buf.readUIntBE(offset, byteLength[, noAssert])#

                                                +

                                                buf.readIntLE(offset, byteLength[, noAssert])#

                                                +

                                                buf.readIntBE(offset, byteLength[, noAssert])#

                                                +
                                                  +
                                                • offset {Number} 0 <= offset <= buf.length
                                                • +
                                                • byteLength {Number} 0 < byteLength <= 6
                                                • +
                                                • noAssert {Boolean} Default: false
                                                • +
                                                • Return: {Number}
                                                • +
                                                +

                                                A generalized version of all numeric read methods. Supports up to 48 bits of +accuracy. For example: + +

                                                +
                                                var b = new Buffer(6);
                                                +b.writeUint16LE(0x90ab, 0);
                                                +b.writeUInt32LE(0x12345678, 2);
                                                +b.readUIntLE(0, 6).toString(16);  // Specify 6 bytes (48 bits)
                                                +// output: '1234567890ab'
                                                +

                                                Set noAssert to true to skip validation of offset. This means that offset +may be beyond the end of the buffer. Defaults to false. + +

                                                +

                                                buf.toString([encoding][, start][, end])#

                                                • encoding String, Optional, Default: 'utf8'
                                                • start Number, Optional, Default: 0
                                                • end Number, Optional, Default: buffer.length
                                              -

                                              Decodes and returns a string from buffer data encoded with encoding -(defaults to 'utf8') beginning at start (defaults to 0) and ending at -end (defaults to buffer.length). +

                                              Decodes and returns a string from buffer data encoded using the specified +character set encoding. If encoding is undefined or null, then encoding +defaults to 'utf8'. The start and end parameters default to 0 and +buffer.length when undefined`.

                                              +
                                              buf = new Buffer(26);
                                              +for (var i = 0 ; i < 26 ; i++) {
                                              +  buf[i] = i + 97; // 97 is ASCII a
                                              +}
                                              +buf.toString('ascii'); // outputs: abcdefghijklmnopqrstuvwxyz
                                              +buf.toString('ascii',0,5); // outputs: abcde
                                              +buf.toString('utf8',0,5); // outputs: abcde
                                              +buf.toString(undefined,0,5); // encoding defaults to 'utf8', outputs abcde

                                              See buffer.write() example, above. @@ -355,22 +463,18 @@

                                              Returns a number indicating whether this comes before or after or is the same as the otherBuffer in sort order. -

                                              -

                                              buf.copy(targetBuffer, [targetStart], [sourceStart], [sourceEnd])#

                                              +

                                              buf.copy(targetBuffer[, targetStart][, sourceStart][, sourceEnd])#

                                              • targetBuffer Buffer object - Buffer to copy into
                                              • targetStart Number, Optional, Default: 0
                                              • sourceStart Number, Optional, Default: 0
                                              • sourceEnd Number, Optional, Default: buffer.length
                                            -

                                            Does copy between buffers. The source and target regions can be overlapped. -targetStart and sourceStart default to 0. -sourceEnd defaults to buffer.length. - -

                                            -

                                            All values passed that are undefined/NaN or are out of bounds are set equal -to their respective defaults. +

                                            Copies data from a region of this buffer to a region in the target buffer even +if the target memory region overlaps with the source. If undefined the +targetStart and sourceStart parameters default to 0 while sourceEnd +defaults to buffer.length.

                                            Example: build two Buffers, then copy buf1 from byte 16 through byte 19 @@ -389,7 +493,21 @@ console.log(buf2.toString('ascii', 0, 25)); // !!!!!!!!qrst!!!!!!!!!!!!! -

                                            buf.slice([start], [end])#

                                            +

                                            Example: Build a single buffer, then copy data from one region to an overlapping +region in the same buffer + +

                                            +
                                            buf = new Buffer(26);
                                            +
                                            +for (var i = 0 ; i < 26 ; i++) {
                                            +  buf[i] = i + 97; // 97 is ASCII a
                                            +}
                                            +
                                            +buf.copy(buf, 0, 4, 10);
                                            +console.log(buf.toString());
                                            +
                                            +// efghijghijklmnopqrstuvwxyz
                                            +

                                            buf.slice([start][, end])#

                                            • start Number, Optional, Default: 0
                                            • end Number, Optional, Default: buffer.length
                                            • @@ -419,7 +537,7 @@ // abc // !bc -

                                              buf.readUInt8(offset, [noAssert])#

                                              +

                                              buf.readUInt8(offset[, noAssert])#

                                              • offset Number
                                              • noAssert Boolean, Optional, Default: false
                                              • @@ -450,8 +568,8 @@ // 0x4 // 0x23 // 0x42 -

                                                buf.readUInt16LE(offset, [noAssert])#

                                                -

                                                buf.readUInt16BE(offset, [noAssert])#

                                                +

                                                buf.readUInt16LE(offset[, noAssert])#

                                                +

                                                buf.readUInt16BE(offset[, noAssert])#

                                                • offset Number
                                                • noAssert Boolean, Optional, Default: false
                                                • @@ -488,8 +606,8 @@ // 0x2304 // 0x2342 // 0x4223 -

                                                  buf.readUInt32LE(offset, [noAssert])#

                                                  -

                                                  buf.readUInt32BE(offset, [noAssert])#

                                                  +

                                                  buf.readUInt32LE(offset[, noAssert])#

                                                  +

                                                  buf.readUInt32BE(offset[, noAssert])#

                                                  • offset Number
                                                  • noAssert Boolean, Optional, Default: false
                                                  • @@ -518,7 +636,7 @@ // 0x03042342 // 0x42230403 -

                                                    buf.readInt8(offset, [noAssert])#

                                                    +

                                                    buf.readInt8(offset[, noAssert])#

                                                    • offset Number
                                                    • noAssert Boolean, Optional, Default: false
                                                    • @@ -535,8 +653,8 @@ complement signed values.

                                                      -

                                                      buf.readInt16LE(offset, [noAssert])#

                                                      -

                                                      buf.readInt16BE(offset, [noAssert])#

                                                      +

                                                      buf.readInt16LE(offset[, noAssert])#

                                                      +

                                                      buf.readInt16BE(offset[, noAssert])#

                                                      • offset Number
                                                      • noAssert Boolean, Optional, Default: false
                                                      • @@ -554,8 +672,8 @@ complement signed values.

                                                        -

                                                        buf.readInt32LE(offset, [noAssert])#

                                                        -

                                                        buf.readInt32BE(offset, [noAssert])#

                                                        +

                                                        buf.readInt32LE(offset[, noAssert])#

                                                        +

                                                        buf.readInt32BE(offset[, noAssert])#

                                                        • offset Number
                                                        • noAssert Boolean, Optional, Default: false
                                                        • @@ -573,8 +691,8 @@ complement signed values.

                                                          -

                                                          buf.readFloatLE(offset, [noAssert])#

                                                          -

                                                          buf.readFloatBE(offset, [noAssert])#

                                                          +

                                                          buf.readFloatLE(offset[, noAssert])#

                                                          +

                                                          buf.readFloatBE(offset[, noAssert])#

                                                          • offset Number
                                                          • noAssert Boolean, Optional, Default: false
                                                          • @@ -601,8 +719,8 @@ console.log(buf.readFloatLE(0)); // 0x01 -

                                                            buf.readDoubleLE(offset, [noAssert])#

                                                            -

                                                            buf.readDoubleBE(offset, [noAssert])#

                                                            +

                                                            buf.readDoubleLE(offset[, noAssert])#

                                                            +

                                                            buf.readDoubleBE(offset[, noAssert])#

                                                            • offset Number
                                                            • noAssert Boolean, Optional, Default: false
                                                            • @@ -633,7 +751,7 @@ console.log(buf.readDoubleLE(0)); // 0.3333333333333333 -

                                                              buf.writeUInt8(value, offset, [noAssert])#

                                                              +

                                                              buf.writeUInt8(value, offset[, noAssert])#

                                                              • value Number
                                                              • offset Number
                                                              • @@ -661,8 +779,8 @@ console.log(buf); // <Buffer 03 04 23 42> -

                                                                buf.writeUInt16LE(value, offset, [noAssert])#

                                                                -

                                                                buf.writeUInt16BE(value, offset, [noAssert])#

                                                                +

                                                                buf.writeUInt16LE(value, offset[, noAssert])#

                                                                +

                                                                buf.writeUInt16BE(value, offset[, noAssert])#

                                                                • value Number
                                                                • offset Number
                                                                • @@ -694,8 +812,8 @@ // <Buffer de ad be ef> // <Buffer ad de ef be> -

                                                                  buf.writeUInt32LE(value, offset, [noAssert])#

                                                                  -

                                                                  buf.writeUInt32BE(value, offset, [noAssert])#

                                                                  +

                                                                  buf.writeUInt32LE(value, offset[, noAssert])#

                                                                  +

                                                                  buf.writeUInt32BE(value, offset[, noAssert])#

                                                                  • value Number
                                                                  • offset Number
                                                                  • @@ -725,7 +843,7 @@ // <Buffer fe ed fa ce> // <Buffer ce fa ed fe> -

                                                                    buf.writeInt8(value, offset, [noAssert])#

                                                                    +

                                                                    buf.writeInt8(value, offset[, noAssert])#

                                                                    • value Number
                                                                    • offset Number
                                                                    • @@ -745,8 +863,8 @@ signed integer into buffer.

                                                                      -

                                                                      buf.writeInt16LE(value, offset, [noAssert])#

                                                                      -

                                                                      buf.writeInt16BE(value, offset, [noAssert])#

                                                                      +

                                                                      buf.writeInt16LE(value, offset[, noAssert])#

                                                                      +

                                                                      buf.writeInt16BE(value, offset[, noAssert])#

                                                                      • value Number
                                                                      • offset Number
                                                                      • @@ -766,8 +884,8 @@ complement signed integer into buffer.

                                                                        -

                                                                        buf.writeInt32LE(value, offset, [noAssert])#

                                                                        -

                                                                        buf.writeInt32BE(value, offset, [noAssert])#

                                                                        +

                                                                        buf.writeInt32LE(value, offset[, noAssert])#

                                                                        +

                                                                        buf.writeInt32BE(value, offset[, noAssert])#

                                                                        • value Number
                                                                        • offset Number
                                                                        • @@ -787,8 +905,8 @@ complement signed integer into buffer.

                                                                          -

                                                                          buf.writeFloatLE(value, offset, [noAssert])#

                                                                          -

                                                                          buf.writeFloatBE(value, offset, [noAssert])#

                                                                          +

                                                                          buf.writeFloatLE(value, offset[, noAssert])#

                                                                          +

                                                                          buf.writeFloatBE(value, offset[, noAssert])#

                                                                          • value Number
                                                                          • offset Number
                                                                          • @@ -818,8 +936,8 @@ // <Buffer 4f 4a fe bb> // <Buffer bb fe 4a 4f> -

                                                                            buf.writeDoubleLE(value, offset, [noAssert])#

                                                                            -

                                                                            buf.writeDoubleBE(value, offset, [noAssert])#

                                                                            +

                                                                            buf.writeDoubleLE(value, offset[, noAssert])#

                                                                            +

                                                                            buf.writeDoubleBE(value, offset[, noAssert])#

                                                                            • value Number
                                                                            • offset Number
                                                                            • @@ -849,7 +967,7 @@ // <Buffer 43 eb d5 b7 dd f9 5f d7> // <Buffer d7 5f f9 dd b7 d5 eb 43> -

                                                                              buf.fill(value, [offset], [end])#

                                                                              +

                                                                              buf.fill(value[, offset][, end])#

                                                                              • value
                                                                              • offset Number, Optional
                                                                              • @@ -862,10 +980,6 @@

                                                                                var b = new Buffer(50);
                                                                                 b.fill("h");
                                                                                -

                                                                                buf.toArrayBuffer()#

                                                                                -

                                                                                Creates a new ArrayBuffer with the copied memory of the buffer instance. - -

                                                                                buffer.INSPECT_MAX_BYTES#

                                                                                • Number, Default: 50
                                                                                • @@ -911,26 +1025,48 @@
                                                                    +
                                                              + +
                                              - - + + + - -
                                          @@ -88,12 +103,17 @@

                                          It is possible to stream data through a child's stdin, stdout, and stderr in a fully non-blocking way. (Note that some programs use line-buffered I/O internally. That doesn't affect node.js but it means -data you send to the child process is not immediately consumed.) +data you send to the child process may not be immediately consumed.)

                                          To create a child process use require('child_process').spawn() or require('child_process').fork(). The semantics of each are slightly -different, and explained below. +different, and explained below. + +

                                          +

                                          For scripting purposes you may find the +synchronous counterparts more +convenient.

                                          Class: ChildProcess#

                                          @@ -107,7 +127,8 @@

                                          The ChildProcess class is not intended to be used directly. Use the -spawn() or fork() methods to create a Child Process instance. +spawn(), exec(), execFile(), or fork() methods to create a Child +Process instance.

                                          Event: 'error'#

                                          @@ -122,7 +143,7 @@
                                        • The process could not be killed, or
                                        • Sending a message to the child process failed for whatever reason.
                                        • -

                                          Note that the exit-event may or may not fire after an error has occured. If +

                                          Note that the exit-event may or may not fire after an error has occurred. If you are listening on both events to fire a function, remember to guard against calling your function twice. @@ -184,13 +205,18 @@

                                        • Stream object

                                        A Writable Stream that represents the child process's stdin. -Closing this stream via end() often causes the child process to terminate. +If the child is waiting to read all its input, it will not continue until this +stream has been closed via end().

                                        -

                                        If the child stdio streams are shared with the parent, then this will +

                                        If the child was not spawned with stdio[0] set to 'pipe', then this will not be set.

                                        +

                                        child.stdin is shorthand for child.stdio[0]. Both properties will refer +to the same object, or null. + +

                                        child.stdout#

                                        • Stream object
                                        • @@ -198,10 +224,14 @@

                                          A Readable Stream that represents the child process's stdout.

                                          -

                                          If the child stdio streams are shared with the parent, then this will +

                                          If the child was not spawned with stdio[1] set to 'pipe', then this will not be set.

                                          +

                                          child.stdout is shorthand for child.stdio[1]. Both properties will refer +to the same object, or null. + +

                                          child.stderr#

                                          • Stream object
                                          • @@ -209,10 +239,47 @@

                                            A Readable Stream that represents the child process's stderr.

                                            -

                                            If the child stdio streams are shared with the parent, then this will +

                                            If the child was not spawned with stdio[2] set to 'pipe', then this will not be set.

                                            +

                                            child.stderr is shorthand for child.stdio[2]. Both properties will refer +to the same object, or null. + +

                                            +

                                            child.stdio#

                                            +
                                              +
                                            • Array
                                            • +
                                          +

                                          A sparse array of pipes to the child process, corresponding with positions in +the stdio option to +spawn that have been +set to 'pipe'. +Note that streams 0-2 are also available as ChildProcess.stdin, +ChildProcess.stdout, and ChildProcess.stderr, respectively. + +

                                          +

                                          In the following example, only the child's fd 1 is setup as a pipe, so only +the parent's child.stdio[1] is a stream, all other values in the array are +null. + +

                                          +
                                          child = child_process.spawn("ls", {
                                          +    stdio: [
                                          +      0, // use parents stdin for child
                                          +      'pipe', // pipe child's stdout to parent
                                          +      fs.openSync("err.out", "w") // direct child's stderr to a file
                                          +    ]
                                          +});
                                          +
                                          +assert.equal(child.stdio[0], null);
                                          +assert.equal(child.stdio[0], child.stdin);
                                          +
                                          +assert(child.stdout);
                                          +assert.equal(child.stdio[1], child.stdout);
                                          +
                                          +assert.equal(child.stdio[2], null);
                                          +assert.equal(child.stdio[2], child.stderr);

                                          child.pid#

                                          • Integer
                                          • @@ -267,7 +334,7 @@

                                            See kill(2)

                                            -

                                            child.send(message, [sendHandle])#

                                            +

                                            child.send(message[, sendHandle])#

                                            • message Object
                                            • sendHandle Handle object
                                            • @@ -403,20 +470,28 @@ of being received, most likely immediately.

                                              -

                                              Note that you can also call process.disconnect() in the child process. +

                                              Note that you can also call process.disconnect() in the child process when the +child process has any open IPC channels with the parent (i.e fork()). + +

                                              +

                                              Asynchronous Process Creation#

                                              +

                                              These methods follow the common async programming patterns (accepting a +callback or returning an EventEmitter).

                                              -

                                              child_process.spawn(command, [args], [options])#

                                              +

                                              child_process.spawn(command[, args][, options])#

                                              • command String The command to run
                                              • args Array List of string arguments
                                              • options Object
                                                • cwd String Current working directory of the child process
                                                • -
                                                • stdio Array|String Child's stdio configuration. (See below)
                                                • -
                                                • customFds Array Deprecated File descriptors for the child to use -for stdio. (See below)
                                                • env Object Environment key-value pairs
                                                • -
                                                • detached Boolean The child will be a process group leader. (See below)
                                                • +
                                                • stdio Array|String Child's stdio configuration. (See +below)
                                                • +
                                                • customFds Array Deprecated File descriptors for the child to use +for stdio. (See below)
                                                • +
                                                • detached Boolean The child will be a process group leader. (See +below)
                                                • uid Number Sets the user identity of the process. (See setuid(2).)
                                                • gid Number Sets the group identity of the process. (See setgid(2).)
                                                @@ -427,14 +502,18 @@ If omitted, args defaults to an empty Array.

                                                -

                                                The third argument is used to specify additional options, which defaults to: +

                                                The third argument is used to specify additional options, with these defaults:

                                                { cwd: undefined,
                                                   env: process.env
                                                 }
                                                -

                                                cwd allows you to specify the working directory from which the process is spawned. -Use env to specify environment variables that will be visible to the new process. +

                                                Use cwd to specify the working directory from which the process is spawned. +If not given, the default is to inherit the current working directory. + +

                                                +

                                                Use env to specify environment variables that will be visible to the new +process, the default is process.env.

                                                Example of running ls -lh /usr, capturing stdout, stderr, and the exit code: @@ -489,25 +568,17 @@ console.log('grep process exited with code ' + code); } }); -

                                                Example of checking for failed exec: - -

                                                -
                                                var spawn = require('child_process').spawn,
                                                -    child = spawn('bad_command');
                                                -
                                                -child.stderr.setEncoding('utf8');
                                                -child.stderr.on('data', function (data) {
                                                -  if (/^execvp\(\)/.test(data)) {
                                                -    console.log('Failed to start child process.');
                                                -  }
                                                -});
                                                -

                                                Note that if spawn receives an empty options object, it will result in -spawning the process with an empty environment rather than using -process.env. This due to backwards compatibility issues with a deprecated -API. +

                                                options.stdio#

                                                +

                                                As a shorthand, the stdio argument may also be one of the following +strings:

                                                -

                                                The 'stdio' option to child_process.spawn() is an array where each +

                                                  +
                                                • 'pipe' - ['pipe', 'pipe', 'pipe'], this is the default value
                                                • +
                                                • 'ignore' - ['ignore', 'ignore', 'ignore']
                                                • +
                                                • 'inherit' - [process.stdin, process.stdout, process.stderr] or [0,1,2]
                                                • +
                                                +

                                                Otherwise, the 'stdio' option to child_process.spawn() is an array where each index corresponds to a fd in the child. The value is one of the following:

                                                @@ -540,15 +611,6 @@ words, stdin, stdout, and stderr) a pipe is created. For fd 3 and up, the default is 'ignore'.
                                              • -

                                                As a shorthand, the stdio argument may also be one of the following -strings, rather than an array: - -

                                                -
                                                  -
                                                • ignore - ['ignore', 'ignore', 'ignore']
                                                • -
                                                • pipe - ['pipe', 'pipe', 'pipe']
                                                • -
                                                • inherit - [process.stdin, process.stdout, process.stderr] or [0,1,2]
                                                • -

                                                Example:

                                                @@ -563,6 +625,7 @@ // Open an extra fd=4, to interact with programs present a // startd-style interface. spawn('prg', [], { stdio: ['pipe', null, null, null, 'pipe'] }); +

                                                options.detached#

                                                If the detached option is set, the child process will be made the leader of a new process group. This makes it possible for the child to continue running after the parent exits. @@ -594,6 +657,7 @@ inherited, the child will remain attached to the controlling terminal.

                                                +

                                                options.customFds#

                                                There is a deprecated option called customFds which allows one to specify specific file descriptors for the stdio of the child process. This API was not portable to all platforms and therefore removed. @@ -605,7 +669,7 @@

                                                See also: child_process.exec() and child_process.fork()

                                                -

                                                child_process.exec(command, [options], callback)#

                                                +

                                                child_process.exec(command[, options], callback)#

                                                • command String The command to run, with space-separated arguments
                                                • options Object
                                                    @@ -619,6 +683,8 @@
                                                  • timeout Number (Default: 0)
                                                  • maxBuffer Number (Default: 200*1024)
                                                  • killSignal String (Default: 'SIGTERM')
                                                  • +
                                                  • uid Number Sets the user identity of the process. (See setuid(2).)
                                                  • +
                                                  • gid Number Sets the group identity of the process. (See setgid(2).)
                                                • callback Function called with the output when process terminates
                                                    @@ -667,7 +733,7 @@

                                                    -

                                                    child_process.execFile(file, [args], [options], [callback])#

                                                    +

                                                    child_process.execFile(file[, args][, options][, callback])#

                                                    • file String The filename of the program to run
                                                    • args Array List of string arguments
                                                    • @@ -678,6 +744,8 @@
                                                    • timeout Number (Default: 0)
                                                    • maxBuffer Number (Default: 200*1024)
                                                    • killSignal String (Default: 'SIGTERM')
                                                    • +
                                                    • uid Number Sets the user identity of the process. (See setuid(2).)
                                                    • +
                                                    • gid Number Sets the group identity of the process. (See setgid(2).)
                                                  • callback Function called with the output when process terminates
                                                      @@ -694,14 +762,13 @@

                                                      -

                                                      child_process.fork(modulePath, [args], [options])#

                                                      +

                                                      child_process.fork(modulePath[, args][, options])#

                                                      • modulePath String The module to run in the child
                                                      • args Array List of string arguments
                                                      • options Object
                                                        • cwd String Current working directory of the child process
                                                        • env Object Environment key-value pairs
                                                        • -
                                                        • encoding String (Default: 'utf8')
                                                        • execPath String Executable used to create the child process
                                                        • execArgv Array List of string arguments passed to the executable (Default: process.execArgv)
                                                        • @@ -709,6 +776,8 @@ piped to the parent, otherwise they will be inherited from the parent, see the "pipe" and "inherit" options for spawn()'s stdio for more details (default is false) +
                                                        • uid Number Sets the user identity of the process. (See setuid(2).)
                                                        • +
                                                        • gid Number Sets the group identity of the process. (See setgid(2).)
                                                      • Return: ChildProcess object
                                                      • @@ -731,7 +800,17 @@ output on this fd is expected to be line delimited JSON objects.

                                                        -

                                                        child_process.spawnSync(command, [args], [options])#

                                                        +

                                                        Synchronous Process Creation#

                                                        +

                                                        These methods are synchronous, meaning they WILL block the event loop, +pausing execution of your code until the spawned process exits. + +

                                                        +

                                                        Blocking calls like these are mostly useful for simplifying general purpose +scripting tasks and for simplifying the loading/processing of application +configuration at startup. + +

                                                        +

                                                        child_process.spawnSync(command[, args][, options])#

                                                        • command String The command to run
                                                        • args Array List of string arguments
                                                        • @@ -758,7 +837,7 @@
                                                        • stderr Buffer|String The contents of output[2]
                                                        • status Number The exit code of the child process
                                                        • signal String The signal used to kill the child process
                                                        • -
                                                        • error Error The error object if the child process failed or timedout
                                                        • +
                                                        • error Error The error object if the child process failed or timed out
                                                      @@ -769,7 +848,7 @@ process has exited.

                                                      -

                                                      child_process.execFileSync(command, [args], [options])#

                                                      +

                                                      child_process.execFileSync(command[, args][, options])#

                                                      • command String The command to run
                                                      • args Array List of string arguments
                                                      • @@ -808,7 +887,7 @@

                                                        -

                                                        child_process.execSync(command, [options])#

                                                        +

                                                        child_process.execSync(command[, options])#

                                                        • command String The command to run
                                                        • options Object
                                                            @@ -849,26 +928,48 @@
                                                      +
                                                  • + +
                                              - - + + + - -
                                              - - - + + +
                                              - - +
                                              -

                                              Node.js v0.11.13 Manual & Documentation

                                              +

                                              Node.js v0.11.15 Manual & Documentation

                                              Index | @@ -66,9 +71,11 @@

                                            • worker.id
                                            • worker.process
                                            • worker.suicide
                                            • -
                                            • worker.send(message, [sendHandle])
                                            • +
                                            • worker.send(message[, sendHandle])
                                            • worker.kill([signal='SIGTERM'])
                                            • worker.disconnect()
                                            • +
                                            • worker.isDead()
                                            • +
                                            • worker.isConnected()
                                            • Event: 'message'
                                            • Event: 'online'
                                            • Event: 'listening'
                                            • @@ -224,6 +231,8 @@ (Default=process.argv.slice(2))
                                            • silent Boolean whether or not to send output to parent's stdio. (Default=false)
                                            • +
                                            • uid Number Sets the user identity of the process. (See setuid(2).)
                                            • +
                                            • gid Number Sets the group identity of the process. (See setgid(2).)
                                          @@ -356,7 +365,18 @@

                                          Event: 'setup'#

                                          -

                                          Emitted the first time that .setupMaster() is called. +

                                            +
                                          • settings Object
                                          • +
                                        +

                                        Emitted every time .setupMaster() is called. + +

                                        +

                                        The settings object is the cluster.settings object at the time +.setupMaster() was called and is advisory only, since multiple calls to +.setupMaster() can be made in a single tick. + +

                                        +

                                        If accuracy is important, use cluster.settings.

                                        cluster.setupMaster([settings])#

                                        @@ -378,24 +398,27 @@

                                          -
                                        • Only the first call to .setupMaster() has any effect, subsequent calls are -ignored
                                        • -
                                        • That because of the above, the only attribute of a worker that may be -customized per-worker is the env passed to .fork()
                                        • -
                                        • .fork() calls .setupMaster() internally to establish the defaults, so to -have any effect, .setupMaster() must be called before any calls to -.fork()
                                        • +
                                        • any settings changes only affect future calls to .fork() and have no +effect on workers that are already running
                                        • +
                                        • The only attribute of a worker that cannot be set via .setupMaster() is +the env passed to .fork()
                                        • +
                                        • the defaults above apply to the first call only, the defaults for later +calls is the current value at the time of cluster.setupMaster() is called

                                        Example:

                                        -
                                        var cluster = require("cluster");
                                        +
                                        var cluster = require('cluster');
                                         cluster.setupMaster({
                                        -  exec : "worker.js",
                                        -  args : ["--use", "https"],
                                        -  silent : true
                                        +  exec: 'worker.js',
                                        +  args: ['--use', 'https'],
                                        +  silent: true
                                         });
                                        -cluster.fork();
                                        +cluster.fork(); // https worker +cluster.setupMaster({ + args: ['--use', 'http'] +}); +cluster.fork(); // http worker

                                        This can only be called from the master process.

                                        @@ -453,8 +476,10 @@ process.

                                        -

                                        A worker is removed from cluster.workers just before the 'disconnect' or -'exit' event is emitted. +

                                        A worker is removed from cluster.workers after the worker has disconnected and +exited. The order between these two events cannot be determined in advance. +However, it is guaranteed that the removal from the cluster.workers list happens +before last 'disconnect' or 'exit' event is emitted.

                                        // Go through all workers
                                        @@ -527,7 +552,7 @@
                                         
                                         // kill worker
                                         worker.kill();
                                        -

                                        worker.send(message, [sendHandle])#

                                        +

                                        worker.send(message[, sendHandle])#

                                        • message Object
                                        • sendHandle Handle object
                                        • @@ -636,6 +661,17 @@ } }); } +

                                          worker.isDead()#

                                          +

                                          This function returns true if the worker's process has terminated (either +because of exiting or being signaled). Otherwise, it returns false. + +

                                          +

                                          worker.isConnected()#

                                          +

                                          This function returns true if the worker is connected to its master via its IPC +channel, false otherwise. A worker is connected to its master after it's been +created. It is disconnected after the disconnect event is emitted. + +

                                          Event: 'message'#

                                          • message Object
                                          • @@ -748,26 +784,48 @@
                                        +
                                        + +
                                - - + + + - -
                                - - - + + +
                                - - +
                                -

                                Node.js v0.11.13 Manual & Documentation

                                +

                                Node.js v0.11.15 Manual & Documentation

                                Index | @@ -46,15 +51,15 @@

                                Table of Contents

                                @@ -87,33 +92,48 @@

                                -

                                console.log([data], [...])#

                                +

                                console.log([data][, ...])#

                                Prints to stdout with newline. This function can take multiple arguments in a printf()-like way. Example:

                                -
                                console.log('count: %d', count);
                                +
                                var count = 5;
                                +console.log('count: %d', count);
                                +// prints 'count: 5'

                                If formatting elements are not found in the first string then util.inspect is used on each argument. See util.format() for more information.

                                -

                                console.info([data], [...])#

                                +

                                console.info([data][, ...])#

                                Same as console.log.

                                -

                                console.error([data], [...])#

                                +

                                console.error([data][, ...])#

                                Same as console.log but prints to stderr.

                                -

                                console.warn([data], [...])#

                                +

                                console.warn([data][, ...])#

                                Same as console.error.

                                -

                                console.dir(obj)#

                                +

                                console.dir(obj[, options])#

                                Uses util.inspect on obj and prints resulting string to stdout. This function -bypasses any custom inspect() function on obj. +bypasses any custom inspect() function on obj. An optional options object +may be passed that alters certain aspects of the formatted string:

                                +
                                  +
                                • showHidden - if true then the object's non-enumerable properties will be +shown too. Defaults to false.

                                  +
                                • +
                                • depth - tells inspect how many times to recurse while formatting the +object. This is useful for inspecting large complicated objects. Defaults to +2. To make it recurse indefinitely pass null.

                                  +
                                • +
                                • colors - if true, then the output will be styled with ANSI color codes. +Defaults to false. Colors are customizable, see below.

                                  +
                                • +

                                console.time(label)#

                                Mark a time. @@ -126,40 +146,64 @@ for (var i = 0; i < 100; i++) { ; } -console.timeEnd('100-elements'); -

                                console.trace(label)#

                                -

                                Print a stack trace to stderr of the current position. +console.timeEnd('100-elements'); +// prints 100-elements: 262ms +

                                console.trace(message[, ...])#

                                +

                                Print to stderr 'Trace :', followed by the formatted message and stack trace +to the current position.

                                -

                                console.assert(expression, [message])#

                                -

                                Same as assert.ok() where if the expression evaluates as false throw an -AssertionError with message. +

                                console.assert(value[, message][, ...])#

                                +

                                Similar to assert.ok(), but the error message is formatted as +util.format(message...).

                                +
                                + +
                          - - + + + - -
                          - - - + + +
                          - - +
                          -

                          Node.js v0.11.13 Manual & Documentation

                          +

                          Node.js v0.11.15 Manual & Documentation

                          Index | @@ -46,13 +51,13 @@

                          Table of Contents

                          +
                          + +
                - - + + + - -
                - - - + + +
                - - +
                -

                Node.js v0.11.13 Manual & Documentation

                +

                Node.js v0.11.15 Manual & Documentation

                Index | @@ -237,26 +242,48 @@

                +
                + +
          - - + + + - -
          - - - + + +
          - - +
          -

          Node.js v0.11.13 Manual & Documentation

          +

          Node.js v0.11.15 Manual & Documentation

          Index | @@ -46,23 +51,24 @@

          Table of Contents

          - - + + + - -
          - - - + + +
          - - +
          -

          Node.js v0.11.13 Manual & Documentation

          +

          Node.js v0.11.15 Manual & Documentation

          Index | @@ -46,8 +51,11 @@

          Table of Contents

          @@ -68,13 +82,35 @@

          DNS#

          -
          Stability: 3 - Stable

          Use require('dns') to access this module. All methods in the dns module -use C-Ares except for dns.lookup which uses getaddrinfo(3) in a thread -pool. C-Ares is much faster than getaddrinfo but the system resolver is -more consistent with how other programs operate. When a user does -net.connect(80, 'google.com') or http.get({ host: 'google.com' }) the -dns.lookup method is used. Users who need to do a large number of lookups -quickly should use the methods that go through C-Ares. +

          Stability: 3 - Stable

          Use require('dns') to access this module. + +

          +

          This module contains functions that belong to two different categories: + +

          +

          1) Functions that use the underlying operating system facilities to perform +name resolution, and that do not necessarily do any network communication. +This category contains only one function: dns.lookup. Developers looking +to perform name resolution in the same way that other applications on the same +operating system behave should use dns.lookup. + +

          +

          Here is an example that does a lookup of www.google.com. + +

          +
          var dns = require('dns');
          +
          +dns.lookup('www.google.com', function onLookup(err, addresses, family) {
          +  console.log('addresses:', addresses);
          +});
          +

          2) Functions that connect to an actual DNS server to perform name resolution, +and that always use the network to perform DNS queries. This category +contains all functions in the dns module but dns.lookup. These functions +do not use the same set of configuration files than what dns.lookup uses. +For instance, they do not use the configuration from /etc/hosts. These +functions should be used by developers who do not want to use the underlying +operating system's facilities for name resolution, and instead want to +always perform DNS queries.

          Here is an example which resolves 'www.google.com' then reverse @@ -98,11 +134,34 @@ }); }); }); -

          dns.lookup(hostname, [family], callback)#

          +

          There are subtle consequences in choosing one or another, please consult the +Implementation considerations section +for more information. + +

          +

          dns.lookup(hostname[, options], callback)#

          Resolves a hostname (e.g. 'google.com') into the first found A (IPv4) or -AAAA (IPv6) record. -The family can be the integer 4 or 6. Defaults to null that indicates -both Ip v4 and v6 address family. +AAAA (IPv6) record. options can be an object or integer. If options is +not provided, then IP v4 and v6 addresses are both valid. If options is +an integer, then it must be 4 or 6. + +

          +

          Alternatively, options can be an object containing two properties, +family and hints. Both properties are optional. If family is provided, +it must be the integer 4 or 6. If family is not provided then IP v4 +and v6 addresses are accepted. The hints field, if present, should be one +or more of the supported getaddrinfo flags. If hints is not provided, +then no flags are passed to getaddrinfo. Multiple flags can be passed +through hints by logically ORing their values. An example usage of +options is shown below. + +

          +
          {
          +  family: 4,
          +  hints: dns.ADDRCONFIG | dns.V4MAPPED
          +}
          +

          See supported getaddrinfo flags below for +more information on supported flags.

          The callback has arguments (err, address, family). The address argument @@ -116,9 +175,31 @@ the hostname does not exist but also when the lookup fails in other ways such as no available file descriptors. +

          +

          dns.lookup doesn't necessarily have anything to do with the DNS protocol. +It's only an operating system facility that can associate name with addresses, +and vice versa. + +

          +

          Its implementation can have subtle but important consequences on the behavior +of any Node.js program. Please take some time to consult the Implementation +considerations section before using it.

          -

          dns.resolve(hostname, [rrtype], callback)#

          +

          dns.lookupService(address, port, callback)#

          +

          Resolves the given address and port into a hostname and service using +getnameinfo. + +

          +

          The callback has arguments (err, hostname, service). The hostname and +service arguments are strings (e.g. 'localhost' and 'http' respectively). + +

          +

          On error, err is an Error object, where err.code is the error code. + + +

          +

          dns.resolve(hostname[, rrtype], callback)#

          Resolves a hostname (e.g. 'google.com') into an array of the record types specified by rrtype. @@ -178,11 +259,11 @@

          The same as dns.resolve(), but only for service records (SRV records). addresses is an array of the SRV records available for hostname. Properties of SRV records are priority, weight, port, and name (e.g., -[{'priority': 10, {'weight': 5, 'port': 21223, 'name': 'service.example.com'}, ...]). +[{'priority': 10, 'weight': 5, 'port': 21223, 'name': 'service.example.com'}, ...]).

          dns.resolveSoa(hostname, callback)#

          -

          The same as dns.resolve(), but only for start of authority record queries +

          The same as dns.resolve(), but only for start of authority record queries (SOA record).

          @@ -268,30 +349,108 @@
        • dns.ADDRGETNETWORKPARAMS: Could not find GetNetworkParams function.
        • dns.CANCELLED: DNS query cancelled.
        +

        Supported getaddrinfo flags#

        +

        The following flags can be passed as hints to dns.lookup. + +

        +
          +
        • dns.ADDRCONFIG: Returned address types are determined by the types +of addresses supported by the current system. For example, IPv4 addresses +are only returned if the current system has at least one IPv4 address +configured. Loopback addresses are not considered.
        • +
        • dns.V4MAPPED: If the IPv6 family was specified, but no IPv6 addresses +were found, then return IPv4 mapped IPv6 addresses.
        • +
        +

        Implementation considerations#

        +

        Although dns.lookup and dns.resolve*/dns.reverse functions have the same +goal of associating a network name with a network address (or vice versa), +their behavior is quite different. These differences can have subtle but +significant consequences on the behavior of Node.js programs. + +

        +

        dns.lookup#

        +

        Under the hood, dns.lookup uses the same operating system facilities as most +other programs. For instance, dns.lookup will almost always resolve a given +name the same way as the ping command. On most POSIX-like operating systems, +the behavior of the dns.lookup function can be tweaked by changing settings +in nsswitch.conf(5) and/or resolv.conf(5), but be careful that changing +these files will change the behavior of all other programs running on the same +operating system. + +

        +

        Though the call will be asynchronous from JavaScript's perspective, it is +implemented as a synchronous call to getaddrinfo(3) that runs on libuv's +threadpool. Because libuv's threadpool has a fixed size, it means that if for +whatever reason the call to getaddrinfo(3) takes a long time, other +operations that could run on libuv's threadpool (such as filesystem +operations) will experience degraded performance. In order to mitigate this +issue, one potential solution is to increase the size of libuv's threadpool by +setting the 'UV_THREADPOOL_SIZE' environment variable to a value greater than +4 (its current default value). For more information on libuv's threadpool, see +the official libuv +documentation. + +

        +

        dns.resolve, functions starting with dns.resolve and dns.reverse#

        +

        These functions are implemented quite differently than dns.lookup. They do +not use getaddrinfo(3) and they always perform a DNS query on the network. +This network communication is always done asynchronously, and does not use +libuv's threadpool. + +

        +

        As a result, these functions cannot have the same negative impact on other +processing that happens on libuv's threadpool that dns.lookup can have. + +

        +

        They do not use the same set of configuration files than what dns.lookup +uses. For instance, they do not use the configuration from /etc/hosts. +

      +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -120,26 +125,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -522,26 +527,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -55,7 +60,7 @@

  • emitter.setMaxListeners(n)
  • EventEmitter.defaultMaxListeners
  • emitter.listeners(event)
  • -
  • emitter.emit(event, [arg1], [arg2], [...])
  • +
  • emitter.emit(event[, arg1][, arg2][, ...])
  • Class Method: EventEmitter.listenerCount(emitter, event)
  • Event: 'newListener'
  • Event: 'removeListener'
  • @@ -104,7 +109,10 @@

    emitter.addListener(event, listener)#

    emitter.on(event, listener)#

    -

    Adds a listener to the end of the listeners array for the specified event. +

    Adds a listener to the end of the listeners array for the specified event. +No checks are made to see if the listener has already been added. Multiple +calls passing the same combination of event and listener will result in the +listener being added multiple times.

    server.on('connection', function (stream) {
    @@ -136,6 +144,12 @@
     server.on('connection', callback);
     // ...
     server.removeListener('connection', callback);
    +

    removeListener will remove, at most, one instance of a listener from the +listener array. If any single listener has been added multiple times to the +listener array for the specified event, then removeListener must be called +multiple times to remove each instance. + +

    Returns emitter, so calls can be chained.

    @@ -177,7 +191,7 @@ console.log('someone connected!'); }); console.log(util.inspect(server.listeners('connection'))); // [ [Function] ] -

    emitter.emit(event, [arg1], [arg2], [...])#

    +

    emitter.emit(event[, arg1][, arg2][, ...])#

    Execute each of the listeners in order with the supplied arguments.

    @@ -195,8 +209,8 @@
  • event String The event name
  • listener Function The event handler function
  • -

    This event is emitted any time someone adds a new listener. It is unspecified -if listener is in the list returned by emitter.listeners(event). +

    This event is emitted any time a listener is added. When this event is triggered, +the listener may not yet have been added to the array of listeners for the event.

    @@ -205,33 +219,55 @@
  • event String The event name
  • listener Function The event handler function
  • -

    This event is emitted any time someone removes a listener. It is unspecified -if listener is in the list returned by emitter.listeners(event). +

    This event is emitted any time someone removes a listener. When this event is triggered, +the listener may not yet have been removed from the array of listeners for the event.

    + + + - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -72,24 +77,24 @@

  • fs.fstatSync(fd)
  • fs.link(srcpath, dstpath, callback)
  • fs.linkSync(srcpath, dstpath)
  • -
  • fs.symlink(srcpath, dstpath, [type], callback)
  • -
  • fs.symlinkSync(srcpath, dstpath, [type])
  • +
  • fs.symlink(srcpath, dstpath[, type], callback)
  • +
  • fs.symlinkSync(srcpath, dstpath[, type])
  • fs.readlink(path, callback)
  • fs.readlinkSync(path)
  • -
  • fs.realpath(path, [cache], callback)
  • -
  • fs.realpathSync(path, [cache])
  • +
  • fs.realpath(path[, cache], callback)
  • +
  • fs.realpathSync(path[, cache])
  • fs.unlink(path, callback)
  • fs.unlinkSync(path)
  • fs.rmdir(path, callback)
  • fs.rmdirSync(path)
  • -
  • fs.mkdir(path, [mode], callback)
  • -
  • fs.mkdirSync(path, [mode])
  • +
  • fs.mkdir(path[, mode], callback)
  • +
  • fs.mkdirSync(path[, mode])
  • fs.readdir(path, callback)
  • fs.readdirSync(path)
  • fs.close(fd, callback)
  • fs.closeSync(fd)
  • -
  • fs.open(path, flags, [mode], callback)
  • -
  • fs.openSync(path, flags, [mode])
  • +
  • fs.open(path, flags[, mode], callback)
  • +
  • fs.openSync(path, flags[, mode])
  • fs.utimes(path, atime, mtime, callback)
  • fs.utimesSync(path, atime, mtime)
  • fs.futimes(fd, atime, mtime, callback)
  • @@ -102,15 +107,15 @@
  • fs.writeSync(fd, data[, position[, encoding]])
  • fs.read(fd, buffer, offset, length, position, callback)
  • fs.readSync(fd, buffer, offset, length, position)
  • -
  • fs.readFile(filename, [options], callback)
  • -
  • fs.readFileSync(filename, [options])
  • -
  • fs.writeFile(filename, data, [options], callback)
  • -
  • fs.writeFileSync(filename, data, [options])
  • -
  • fs.appendFile(filename, data, [options], callback)
  • -
  • fs.appendFileSync(filename, data, [options])
  • -
  • fs.watchFile(filename, [options], listener)
  • -
  • fs.unwatchFile(filename, [listener])
  • -
  • fs.watch(filename, [options], [listener])
      +
    • fs.readFile(filename[, options], callback)
    • +
    • fs.readFileSync(filename[, options])
    • +
    • fs.writeFile(filename, data[, options], callback)
    • +
    • fs.writeFileSync(filename, data[, options])
    • +
    • fs.appendFile(filename, data[, options], callback)
    • +
    • fs.appendFileSync(filename, data[, options])
    • +
    • fs.watchFile(filename[, options], listener)
    • +
    • fs.unwatchFile(filename[, listener])
    • +
    • fs.watch(filename[, options][, listener])
      • Caveats
        • Availability
        • Filename Argument
        • @@ -120,16 +125,18 @@
        • fs.exists(path, callback)
        • fs.existsSync(path)
        • +
        • fs.access(path[, mode], callback)
        • +
        • fs.accessSync(path[, mode])
        • Class: fs.Stats
        • -
        • fs.createReadStream(path, [options])
        • +
        • fs.createReadStream(path[, options])
        • Class: fs.ReadStream
        • -
        • fs.createWriteStream(path, [options])
        • +
        • fs.createWriteStream(path[, options])
        • Class: fs.WriteStream
          • Event: 'open'
          • file.bytesWritten
          • @@ -360,7 +367,7 @@

            Synchronous link(2).

            -

            fs.symlink(srcpath, dstpath, [type], callback)#

            +

            fs.symlink(srcpath, dstpath[, type], callback)#

            Asynchronous symlink(2). No arguments other than a possible exception are given to the completion callback. The type argument can be set to 'dir', 'file', or 'junction' (default @@ -369,7 +376,7 @@ 'junction', the destination argument will automatically be normalized to absolute path.

            -

            fs.symlinkSync(srcpath, dstpath, [type])#

            +

            fs.symlinkSync(srcpath, dstpath[, type])#

            Synchronous symlink(2).

            @@ -382,7 +389,7 @@

            Synchronous readlink(2). Returns the symbolic link's string value.

            -

            fs.realpath(path, [cache], callback)#

            +

            fs.realpath(path[, cache], callback)#

            Asynchronous realpath(2). The callback gets two arguments (err, resolvedPath). May use process.cwd to resolve relative paths. cache is an object literal of mapped paths that can be used to force a specific path @@ -397,7 +404,7 @@ if (err) throw err; console.log(resolvedPath); }); -

            fs.realpathSync(path, [cache])#

            +

            fs.realpathSync(path[, cache])#

            Synchronous realpath(2). Returns the resolved path.

            @@ -419,12 +426,12 @@

            Synchronous rmdir(2).

            -

            fs.mkdir(path, [mode], callback)#

            +

            fs.mkdir(path[, mode], callback)#

            Asynchronous mkdir(2). No arguments other than a possible exception are given to the completion callback. mode defaults to 0777.

            -

            fs.mkdirSync(path, [mode])#

            +

            fs.mkdirSync(path[, mode])#

            Synchronous mkdir(2).

            @@ -448,7 +455,7 @@

            Synchronous close(2).

            -

            fs.open(path, flags, [mode], callback)#

            +

            fs.open(path, flags[, mode], callback)#

            Asynchronous file open. See open(2). flags can be:

            @@ -509,7 +516,7 @@ the end of the file.

            -

            fs.openSync(path, flags, [mode])#

            +

            fs.openSync(path, flags[, mode])#

            Synchronous version of fs.open().

            @@ -622,7 +629,7 @@

            Synchronous version of fs.read. Returns the number of bytesRead.

            -

            fs.readFile(filename, [options], callback)#

            +

            fs.readFile(filename[, options], callback)#

            • filename String
            • options Object
                @@ -647,7 +654,7 @@

                -

                fs.readFileSync(filename, [options])#

                +

                fs.readFileSync(filename[, options])#

                Synchronous version of fs.readFile. Returns the contents of the filename.

                @@ -656,7 +663,7 @@

                -

                fs.writeFile(filename, data, [options], callback)#

                +

                fs.writeFile(filename, data[, options], callback)#

                • filename String
                • data String | Buffer
                • @@ -683,11 +690,11 @@ if (err) throw err; console.log('It\'s saved!'); }); -

                  fs.writeFileSync(filename, data, [options])#

                  +

                  fs.writeFileSync(filename, data[, options])#

                  The synchronous version of fs.writeFile.

                  -

                  fs.appendFile(filename, data, [options], callback)#

                  +

                  fs.appendFile(filename, data[, options], callback)#

                  • filename String
                  • data String | Buffer
                  • @@ -710,11 +717,11 @@ if (err) throw err; console.log('The "data to append" was appended to file!'); }); -

                    fs.appendFileSync(filename, data, [options])#

                    +

                    fs.appendFileSync(filename, data[, options])#

                    The synchronous version of fs.appendFile.

                    -

                    fs.watchFile(filename, [options], listener)#

                    +

                    fs.watchFile(filename[, options], listener)#

                    Stability: 2 - Unstable.  Use fs.watch instead, if possible.

                    Watch for changes on filename. The callback listener will be called each time the file is accessed. @@ -741,7 +748,7 @@ you need to compare curr.mtime and prev.mtime.

                    -

                    fs.unwatchFile(filename, [listener])#

                    +

                    fs.unwatchFile(filename[, listener])#

                    Stability: 2 - Unstable.  Use fs.watch instead, if possible.

                    Stop watching for changes on filename. If listener is specified, only that particular listener is removed. Otherwise, all listeners are removed and you have effectively stopped watching filename. @@ -751,7 +758,7 @@ no-op, not an error.

                    -

                    fs.watch(filename, [options], [listener])#

                    +

                    fs.watch(filename[, options][, listener])#

                    Stability: 2 - Unstable.

                    Watch for changes on filename, where filename is either a file or a directory. The returned object is a fs.FSWatcher. @@ -844,10 +851,46 @@ and handle the error when it's not there.

                    +

                    fs.exists() will be deprecated. + +

                    fs.existsSync(path)#

                    Synchronous version of fs.exists.

                    +

                    fs.existsSync() will be deprecated. + +

                    +

                    fs.access(path[, mode], callback)#

                    +

                    Tests a user's permissions for the file specified by path. mode is an +optional integer that specifies the accessibility checks to be performed. The +following constants define the possible values of mode. It is possible to +create a mask consisting of the bitwise OR of two or more values. + +

                    +
                      +
                    • fs.F_OK - File is visible to the calling process. This is useful for +determining if a file exists, but says nothing about rwx permissions. +Default if no mode is specified.
                    • +
                    • fs.R_OK - File can be read by the calling process.
                    • +
                    • fs.W_OK - File can be written by the calling process.
                    • +
                    • fs.X_OK - File can be executed by the calling process. This has no effect +on Windows (will behave like fs.F_OK).
                    • +
                    +

                    The final argument, callback, is a callback function that is invoked with +a possible error argument. If any of the accessibility checks fail, the error +argument will be populated. The following example checks if the file +/etc/passwd can be read and written by the current process. + +

                    +
                    fs.access('/etc/passwd', fs.R_OK | fs.W_OK, function(err) {
                    +  util.debug(err ? 'no access!' : 'can read/write');
                    +});
                    +

                    fs.accessSync(path[, mode])#

                    +

                    Synchronous version of fs.access. This throws if any accessibility checks +fail, and does nothing otherwise. + +

                    Class: fs.Stats#

                    Objects returned from fs.stat(), fs.lstat() and fs.fstat() and their synchronous counterparts are of this type. @@ -917,7 +960,7 @@ on Unix systems, it never was.

                    -

                    fs.createReadStream(path, [options])#

                    +

                    fs.createReadStream(path[, options])#

                    Returns a new ReadStream object (See Readable Stream).

                    @@ -935,6 +978,10 @@ start at 0. The encoding can be 'utf8', 'ascii', or 'base64'.

                    +

                    If fd is specified, ReadStream will ignore the path argument and will use +the specified file descriptor. This means that no open event will be emitted. + +

                    If autoClose is false, then the file descriptor won't be closed, even if there's an error. It is your responsibility to close it and make sure there's no file descriptor leak. If autoClose is set to true (default @@ -958,7 +1005,7 @@

                    -

                    fs.createWriteStream(path, [options])#

                    +

                    fs.createWriteStream(path[, options])#

                    Returns a new WriteStream object (See Writable Stream).

                    @@ -967,6 +1014,7 @@

                    { flags: 'w',
                       encoding: null,
                    +  fd: null,
                       mode: 0666 }

                    options may also include a start option to allow writing data at some position past the beginning of the file. Modifying a file rather @@ -974,6 +1022,12 @@ default mode w.

                    +

                    Like ReadStream above, if fd is specified, WriteStream will ignore the +path argument and will use the specified file descriptor. This means that no +open event will be emitted. + + +

                    Class: fs.WriteStream#

                    WriteStream is a Writable Stream. @@ -1017,26 +1071,48 @@

            +
  • + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -281,26 +286,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -49,7 +54,7 @@

  • http.METHODS
  • http.STATUS_CODES
  • http.createServer([requestListener])
  • -
  • http.createClient([port], [host])
  • +
  • http.createClient([port][, host])
  • Class: http.Server
  • -
  • http.request(options, [callback])
  • -
  • http.get(options, [callback])
  • +
  • http.request(options[, callback])
  • +
  • http.get(options[, callback])
  • Class: http.Agent
  • http.IncomingMessage
      @@ -200,14 +205,14 @@

      http.createServer([requestListener])#

      -

      Returns a new web server object. +

      Returns a new instance of http.Server.

      The requestListener is a function which is automatically added to the 'request' event.

      -

      http.createClient([port], [host])#

      +

      http.createClient([port][, host])#

      This function is deprecated; please use http.request() instead. Constructs a new HTTP client. port and host refer to the server to be connected to. @@ -310,14 +315,14 @@

      function (exception, socket) { }

      -

      If a client connection emits an 'error' event - it will forwarded here. +

      If a client connection emits an 'error' event, it will be forwarded here.

      socket is the net.Socket object that the error originated from.

      -

      server.listen(port, [hostname], [backlog], [callback])#

      +

      server.listen(port[, hostname][, backlog][, callback])#

      Begin accepting connections on the specified port and hostname. If the hostname is omitted, the server will accept connections directed to any IPv4 address (INADDR_ANY). @@ -337,7 +342,7 @@

      -

      server.listen(path, [callback])#

      +

      server.listen(path[, callback])#

      Start a UNIX socket server listening for connections on the given path.

      @@ -346,7 +351,7 @@

      -

      server.listen(handle, [callback])#

      +

      server.listen(handle[, callback])#

      • handle Object
      • callback Function
      • @@ -450,7 +455,7 @@ the request body should be sent. See the 'checkContinue' event on Server.

        -

        response.writeHead(statusCode, [statusMessage], [headers])#

        +

        response.writeHead(statusCode[, statusMessage][, headers])#

        Sends a response header to the request. The status code is a 3-digit HTTP status code, like 404. The last argument, headers, are the response headers. Optionally one can give a human-readable statusMessage as the second @@ -472,7 +477,7 @@ implicit/mutable headers will be calculated and call this function for you.

        -

        Note: that Content-Length is given in bytes not characters. The above example +

        Note that Content-Length is given in bytes not characters. The above example works because the string 'hello world' contains only single byte characters. If the body contains higher coded characters then Buffer.byteLength() should be used to determine the number of bytes in a given encoding. @@ -571,7 +576,7 @@

        response.removeHeader("Content-Encoding");
        -

        response.write(chunk, [encoding])#

        +

        response.write(chunk[, encoding][, callback])#

        If this method is called and response.writeHead() has not been called, it will switch to implicit header mode and flush the implicit headers. @@ -582,7 +587,8 @@

        chunk can be a string or a buffer. If chunk is a string, the second parameter specifies how to encode it into a byte stream. -By default the encoding is 'utf8'. +By default the encoding is 'utf8'. The last parameter callback +will be called when this chunk of data is flushed.

        Note: This is the raw HTTP body and has nothing to do with @@ -598,7 +604,7 @@

        Returns true if the entire data was flushed successfully to the kernel buffer. Returns false if all or part of the data was queued in user memory. -'drain' will be emitted when the buffer is again free. +'drain' will be emitted when the buffer is free again.

        response.addTrailers(headers)#

        @@ -620,19 +626,22 @@ response.write(fileData); response.addTrailers({'Content-MD5': "7895bf4b8828b55ceaf47747b4bca667"}); response.end(); -

        response.end([data], [encoding])#

        +

        response.end([data][, encoding][, callback])#

        This method signals to the server that all of the response headers and body have been sent; that server should consider this message complete. The method, response.end(), MUST be called on each response.

        -

        If data is specified, it is equivalent to calling response.write(data, encoding) -followed by response.end(). +

        If data is specified, it is equivalent to calling +response.write(data, encoding) followed by response.end(callback). +

        +

        If callback is specified, it will be called when the response stream +is finished.

        -

        http.request(options, [callback])#

        +

        http.request(options[, callback])#

        Node maintains several connections per server to make HTTP requests. This function allows one to transparently issue requests. @@ -685,11 +694,19 @@

        Example:

        -
        var options = {
        +
        var postData = querystring.stringify({
        +  'msg' : 'Hello World!'
        +});
        +
        +var options = {
           hostname: 'www.google.com',
           port: 80,
           path: '/upload',
        -  method: 'POST'
        +  method: 'POST',
        +  headers: {
        +    'Content-Type': 'application/x-www-form-urlencoded',
        +    'Content-Length': postData.length
        +  }
         };
         
         var req = http.request(options, function(res) {
        @@ -706,8 +723,7 @@
         });
         
         // write data to request body
        -req.write('data\n');
        -req.write('data\n');
        +req.write(postData);
         req.end();

        Note that in the example req.end() was called. With http.request() one must always call req.end() to signify that you're done with the request - @@ -737,7 +753,7 @@ to compute basic authentication.

      -

      http.get(options, [callback])#

      +

      http.get(options[, callback])#

      Since most requests are GET requests without bodies, Node provides this convenience method. The only difference between this method and http.request() is that it sets the method to GET and calls req.end() automatically. @@ -1060,11 +1076,11 @@ the client should send the request body.

      -

      request.flush()#

      +

      request.flushHeaders()#

      Flush the request headers.

      -

      For effiency reasons, node.js normally buffers the request headers until you +

      For efficiency reasons, node.js normally buffers the request headers until you call request.end() or write the first chunk of request data. It then tries hard to pack the request headers and data into a single TCP packet. @@ -1074,7 +1090,7 @@ the optimization and kickstart the request.

      -

      request.write(chunk, [encoding])#

      +

      request.write(chunk[, encoding][, callback])#

      Sends a chunk of the body. By calling this method many times, the user can stream a request body to a server--in that case it is suggested to use the @@ -1088,23 +1104,30 @@

      The encoding argument is optional and only applies when chunk is a string. Defaults to 'utf8'. +

      +

      The callback argument is optional and will be called when this chunk of data +is flushed.

      -

      request.end([data], [encoding])#

      +

      request.end([data][, encoding][, callback])#

      Finishes sending the request. If any parts of the body are unsent, it will flush them to the stream. If the request is chunked, this will send the terminating '0\r\n\r\n'.

      If data is specified, it is equivalent to calling -request.write(data, encoding) followed by request.end(). +request.write(data, encoding) followed by request.end(callback). + +

      +

      If callback is specified, it will be called when the request stream +is finished.

      request.abort()#

      Aborts a request. (New since v0.3.8.)

      -

      request.setTimeout(timeout, [callback])#

      +

      request.setTimeout(timeout[, callback])#

      Once a socket is assigned to this request and is connected socket.setTimeout() will be called. @@ -1114,7 +1137,7 @@ socket.setNoDelay() will be called.

      -

      request.setSocketKeepAlive([enable], [initialDelay])#

      +

      request.setSocketKeepAlive([enable][, initialDelay])#

      Once a socket is assigned to this request and is connected socket.setKeepAlive() will be called. @@ -1135,7 +1158,7 @@

      function () { }

      -

      Indicates that the underlaying connection was closed. +

      Indicates that the underlying connection was closed. Just like 'end', this event occurs only once per response.

      @@ -1273,26 +1296,48 @@
  • +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -51,10 +56,10 @@

  • server.timeout
  • -
  • https.createServer(options, [requestListener])

    In order to specify these options, use a custom Agent. @@ -290,26 +295,48 @@

  • +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -92,26 +97,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -143,8 +148,8 @@

    Cycles#

    -

    When there are circular require() calls, a module might not be -done being executed when it is returned. +

    When there are circular require() calls, a module might not have finished +executing when it is returned.

    Consider this situation: @@ -177,7 +182,7 @@ console.log('in main, a.done=%j, b.done=%j', a.done, b.done);

    When main.js loads a.js, then a.js in turn loads b.js. At that point, b.js tries to load a.js. In order to prevent an infinite -loop an unfinished copy of the a.js exports object is returned to the +loop, an unfinished copy of the a.js exports object is returned to the b.js module. b.js then finishes loading, and its exports object is provided to the a.js module. @@ -254,7 +259,7 @@

    If it is not found there, then it moves to the parent directory, and so -on, until the root of the tree is reached. +on, until the root of the file system is reached.

    For example, if the file at '/home/ry/projects/foo.js' called @@ -272,6 +277,13 @@ clash.

    +

    You can require specific files or sub modules distributed with a module by +including a path suffix after the module name. For instance +require('example-module/path/to/file') would resolve path/to/file +relative to where example-module is located. The suffixed path follows the +same module resolution semantics. + +

    Folders as Modules#

    @@ -352,7 +364,7 @@
  • Object
  • The module.exports object is created by the Module system. Sometimes this is not -acceptable; many want their module to be an instance of some class. To do this +acceptable; many want their module to be an instance of some class. To do this, assign the desired export object to module.exports. Note that assigning the desired object to exports will simply rebind the local exports variable, which is probably not what you want to do. @@ -499,7 +511,8 @@ LOAD_AS_FILE(X) 1. If X is a file, load X as JavaScript text. STOP 2. If X.js is a file, load X.js as JavaScript text. STOP -3. If X.node is a file, load X.node as binary addon. STOP +3. If X.json is a file, parse X.json to a JavaScript Object. STOP +4. If X.node is a file, load X.node as binary addon. STOP LOAD_AS_DIRECTORY(X) 1. If X/package.json is a file, @@ -507,7 +520,8 @@ b. let M = X + (json main field) c. LOAD_AS_FILE(M) 2. If X/index.js is a file, load X/index.js as JavaScript text. STOP -3. If X/index.node is a file, load X/index.node as binary addon. STOP +3. If X/index.json is a file, parse X/index.json to a JavaScript object. STOP +4. If X/index.node is a file, load X/index.node as binary addon. STOP LOAD_NODE_MODULES(X, START) 1. let DIRS=NODE_MODULES_PATHS(START) @@ -517,15 +531,14 @@ NODE_MODULES_PATHS(START) 1. let PARTS = path split(START) -2. let ROOT = index of first instance of "node_modules" in PARTS, or 0 -3. let I = count of PARTS - 1 -4. let DIRS = [] -5. while I > ROOT, +2. let I = count of PARTS - 1 +3. let DIRS = [] +4. while I >= 0, a. if PARTS[I] = "node_modules" CONTINUE c. DIR = path join(PARTS[0 .. I] + "node_modules") b. DIRS = DIRS + DIR c. let I = I - 1 -6. return DIRS +5. return DIRS

    Loading from the global folders#

    @@ -636,26 +649,48 @@
    +
    + + - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -46,17 +51,18 @@

    Table of Contents

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -235,26 +240,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -47,15 +52,19 @@

    @@ -87,7 +96,7 @@
    path.normalize('/foo/bar//baz/asdf/quux/..')
     // returns
     '/foo/bar/baz/asdf'
    -

    path.join([path1], [path2], [...])#

    +

    path.join([path1][, path2][, ...])#

    Join all arguments together and normalize the resulting path.

    @@ -113,7 +122,7 @@ order, until an absolute path is found. If after using all from paths still no absolute path is found, the current working directory is used as well. The resulting path is normalized, and trailing slashes are removed unless the path -gets resolved to the root directory. Non-string arguments are ignored. +gets resolved to the root directory. Non-string from arguments are ignored.

    Another way to think of it is as a sequence of cd commands in a shell. @@ -195,7 +204,7 @@

    path.dirname('/foo/bar/baz/asdf/quux')
     // returns
     '/foo/bar/baz/asdf'
    -

    path.basename(p, [ext])#

    +

    path.basename(p[, ext])#

    Return the last portion of a path. Similar to the Unix basename command.

    @@ -269,30 +278,102 @@ process.env.PATH.split(path.delimiter) // returns ['C:\Windows\system32', 'C:\Windows', 'C:\Program Files\nodejs\']
    +

    path.parse(pathString)#

    +

    Returns an object from a path string. + +

    +

    An example on *nix: + +

    +
    path.parse('/home/user/dir/file.txt')
    +// returns
    +{
    +    root : "/",
    +    dir : "/home/user/dir",
    +    base : "file.txt",
    +    ext : ".txt",
    +    name : "file"
    +}
    +

    An example on Windows: + +

    +
    path.parse('C:\\path\\dir\\index.html')
    +// returns
    +{
    +    root : "C:\",
    +    dir : "C:\path\dir",
    +    base : "index.html",
    +    ext : ".html",
    +    name : "index"
    +}
    +

    path.format(pathObject)#

    +

    Returns a path string from an object, the opposite of path.parse above. + +

    +
    path.format({
    +    root : "/",
    +    dir : "/home/user/dir",
    +    base : "file.txt",
    +    ext : ".txt",
    +    name : "file"
    +})
    +// returns
    +'/home/user/dir/file.txt'
    +

    path.posix#

    +

    Provide access to aforementioned path methods but always interact in a posix +compatible way. + +

    +

    path.win32#

    +

    Provide access to aforementioned path methods but always interact in a win32 +compatible way. +

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -73,7 +78,7 @@

  • process.version
  • process.versions
  • process.config
  • -
  • process.kill(pid, [signal])
  • +
  • process.kill(pid[, signal])
  • process.pid
  • process.title
  • process.arch
  • @@ -83,6 +88,7 @@
  • process.umask([mask])
  • process.uptime()
  • process.hrtime()
  • +
  • process.mainModule
  • @@ -273,7 +279,7 @@

    process.stdout#

    -

    A Writable Stream to stdout. +

    A Writable Stream to stdout (on fd 1).

    Example: the definition of console.log @@ -283,7 +289,8 @@ process.stdout.write(d + '\n'); };

    process.stderr and process.stdout are unlike other streams in Node in -that writes to them are usually blocking. +that they cannot be closed (end() will throw), they never emit the finish +event and that writes are usually blocking.

      @@ -312,11 +319,12 @@

      process.stderr#

      -

      A writable stream to stderr. +

      A writable stream to stderr (on fd 2).

      process.stderr and process.stdout are unlike other streams in Node in -that writes to them are usually blocking. +that they cannot be closed (end() will throw), they never emit the finish +event and that writes are usually blocking.

        @@ -329,7 +337,7 @@

      process.stdin#

      -

      A Readable Stream for stdin. +

      A Readable Stream for stdin (on fd 0).

      Example of opening standard input and listening for both events: @@ -435,8 +443,30 @@

      process.env#

      An object containing the user environment. See environ(7). +

      +

      An example of this object looks like: + +

      +
      { TERM: 'xterm-256color',
      +  SHELL: '/usr/local/bin/bash',
      +  USER: 'maciej',
      +  PATH: '~/.bin/:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin',
      +  PWD: '/Users/maciej',
      +  EDITOR: 'vim',
      +  SHLVL: '1',
      +  HOME: '/Users/maciej',
      +  LOGNAME: 'maciej',
      +  _: '/usr/local/bin/node' }
      +

      You can write to this object, but changes won't be reflected outside of your +process. That means that the following won't work: + +

      +
      node -e 'process.env.foo = "bar"' && echo $foo
      +

      But this will:

      +
      process.env.foo = 'bar';
      +console.log(process.env.foo);

      process.exit([code])#

      Ends the process with the specified code. If omitted, exit uses the 'success' code 0. @@ -620,7 +650,7 @@ strict_aliasing: 'true', target_arch: 'x64', v8_use_snapshot: 'true' } } -

      process.kill(pid, [signal])#

      +

      process.kill(pid[, signal])#

      Send a signal to a process. pid is the process id and signal is the string describing the signal to send. Signal names are strings like 'SIGINT' or 'SIGHUP'. If omitted, the signal will be 'SIGTERM'. @@ -790,7 +820,7 @@ given, otherwise returns the current mask.

      -
      var oldmask, newmask = 0644;
      +
      var oldmask, newmask = 0022;
       
       oldmask = process.umask(newmask);
       console.log('Changed umask from: ' + oldmask.toString(8) +
      @@ -821,30 +851,64 @@
         console.log('benchmark took %d nanoseconds', diff[0] * 1e9 + diff[1]);
         // benchmark took 1000000527 nanoseconds
       }, 1000);
      +

      process.mainModule#

      +

      Alternate way to retrieve +require.main. +The difference is that if the main module changes at runtime, require.main +might still refer to the original main module in modules that were required +before the change occurred. Generally it's safe to assume that the two refer +to the same module. + +

      +

      As with require.main, it will be undefined if there was no entry script. + +

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -126,26 +131,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -46,8 +51,8 @@

    Table of Contents

    • Query String @@ -64,12 +69,16 @@ It provides the following methods:

      -

      querystring.stringify(obj, [sep], [eq])#

      +

      querystring.stringify(obj[, sep][, eq][, options])#

      Serialize an object to a query string. Optionally override the default separator ('&') and assignment ('=') characters.

      +

      Options object may contain encodeURIComponent property (querystring.escape by default), +it can be used to encode string with non-utf8 encoding if necessary. + +

      Example:

      @@ -79,8 +88,15 @@ querystring.stringify({foo: 'bar', baz: 'qux'}, ';', ':') // returns -'foo:bar;baz:qux' -

      querystring.parse(str, [sep], [eq], [options])#

      +'foo:bar;baz:qux' + +// Suppose gbkEncodeURIComponent function already exists, +// it can encode string with `gbk` encoding +querystring.stringify({ w: '中文', foo: 'bar' }, null, null, + { encodeURIComponent: gbkEncodeURIComponent }) +// returns +'w=%D6%D0%CE%C4&foo=bar'
      +

      querystring.parse(str[, sep][, eq][, options])#

      Deserialize a query string to an object. Optionally override the default separator ('&') and assignment ('=') characters. @@ -90,12 +106,23 @@ be used to limit processed keys. Set it to 0 to remove key count limitation.

      +

      Options object may contain decodeURIComponent property (decodeURIComponent by default), +it can be used to decode non-utf8 encoding string if necessary. + +

      Example:

      querystring.parse('foo=bar&baz=qux&baz=quux&corge')
       // returns
      -{ foo: 'bar', baz: ['qux', 'quux'], corge: '' }
      +{ foo: 'bar', baz: ['qux', 'quux'], corge: '' } + +// Suppose gbkDecodeURIComponent function already exists, +// it can decode `gbk` encoding string +querystring.parse('w=%D6%D0%CE%C4&foo=bar', null, null, + { decodeURIComponent: gbkDecodeURIComponent }) +// returns +{ w: '中文', foo: 'bar' }

      querystring.escape#

      The escape function used by querystring.stringify, provided so that it could be overridden if necessary. @@ -109,26 +136,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -54,7 +59,7 @@

  • rl.pause()
  • rl.resume()
  • rl.close()
  • -
  • rl.write(data, [key])
  • +
  • rl.write(data[, key])
  • Events
      @@ -110,7 +115,7 @@
      • input - the readable stream to listen to (Required).

      • -
      • output - the writable stream to write readline data to (Required).

        +
      • output - the writable stream to write readline data to (Optional).

      • completer - an optional function that is used for Tab autocompletion. See below for an example of using this.

        @@ -188,6 +193,10 @@ been paused.

        +

        If output is set to null or undefined when calling createInterface, the +prompt is not written. + +

        rl.question(query, callback)#

        Prepends the prompt with query and invokes callback with the user's response. Displays the query to the user, and then invokes callback @@ -198,6 +207,10 @@ it has been paused.

        +

        If output is set to null or undefined when calling createInterface, +nothing is displayed. + +

        Example usage:

        @@ -208,6 +221,9 @@

        Pauses the readline input stream, allowing it to be resumed later if needed.

        +

        Note that this doesn't immediately pause the stream of events. Several events may be emitted after calling pause, including line. + +

        rl.resume()#

        Resumes the readline input stream. @@ -217,9 +233,10 @@ output streams. The "close" event will also be emitted.

        -

        rl.write(data, [key])#

        -

        Writes data to output stream. key is an object literal to represent a key -sequence; available if the terminal is a TTY. +

        rl.write(data[, key])#

        +

        Writes data to output stream, unless output is set to null or +undefined when calling createInterface. key is an object literal to +represent a key sequence; available if the terminal is a TTY.

        This will also resume the input stream if it has been paused. @@ -407,26 +424,48 @@

  • +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -288,26 +293,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Smalloc#

    -
    Stability: 1 - Experimental

    smalloc.alloc(length[, receiver][, type])#

    -
      -
    • length {Number} <= smalloc.kMaxLength
    • -
    • receiver {Object}, Optional, Default: new Object
    • -
    • type {Enum}, Optional, Default: Uint8
    • -
    -

    Returns receiver with allocated external array data. If no receiver is -passed then a new Object will be created and returned. - -

    +
    Stability: 1 - Experimental

    Class: smalloc#

    Buffers are backed by a simple allocator that only handles the assignation of external raw memory. Smalloc exposes that functionality.

    +

    smalloc.alloc(length[, receiver][, type])#

    +
      +
    • length Number <= smalloc.kMaxLength
    • +
    • receiver Object Default: new Object
    • +
    • type Enum Default: Uint8
    • +
    +

    Returns receiver with allocated external array data. If no receiver is +passed then a new Object will be created and returned. + +

    This can be used to create your own Buffer-like classes. No other properties are set, so the user will need to keep track of other necessary information (e.g. length of the allocation). @@ -107,13 +116,18 @@ doubleArr = i / 10; // { '0': 0, '1': 0.1, '2': 0.2 } -

    smalloc.copyOnto(source, sourceStart, dest, destStart, copyLength);#

    +

    It is not possible to freeze, seal and prevent extensions of objects with +external data using Object.freeze, Object.seal and +Object.preventExtensions respectively. + +

    +

    smalloc.copyOnto(source, sourceStart, dest, destStart, copyLength);#

      -
    • source Object with external array allocation
    • -
    • sourceStart Position to begin copying from
    • -
    • dest Object with external array allocation
    • -
    • destStart Position to begin copying onto
    • -
    • copyLength Length of copy
    • +
    • source Object with external array allocation
    • +
    • sourceStart Number Position to begin copying from
    • +
    • dest Object with external array allocation
    • +
    • destStart Number Position to begin copying onto
    • +
    • copyLength Number Length of copy

    Copy memory from one external array allocation to another. No arguments are optional, and any violation will throw. @@ -137,7 +151,7 @@ need to set any additional properties for this to work.

    -

    smalloc.dispose(obj)#

    +

    smalloc.dispose(obj)#

    • obj Object
    @@ -166,22 +180,24 @@ smalloc.copyOnto(b, 2, a, 0, 2); // now results in: -// Error: source has no external array data -

    dispose() does not support Buffers, and will throw if passed. +// RangeError: copy_length > source_length +

    After dispose() is called object still behaves as one with external data, for +example smalloc.hasExternalData() returns true. +dispose() does not support Buffers, and will throw if passed.

    -

    smalloc.hasExternalData(obj)#

    +

    smalloc.hasExternalData(obj)#

    • obj Object

    Returns true if the obj has externally allocated memory.

    -

    smalloc.kMaxLength#

    +

    smalloc.kMaxLength#

    Size of maximum allocation. This is also applicable to Buffer creation.

    -

    smalloc.Types#

    +

    smalloc.Types#

    Enum of possible external array types. Contains:

    @@ -200,26 +216,48 @@
    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -57,18 +62,20 @@

  • readable.setEncoding(encoding)
  • readable.resume()
  • readable.pause()
  • -
  • readable.pipe(destination, [options])
  • +
  • readable.isPaused()
  • +
  • readable.pipe(destination[, options])
  • readable.unpipe([destination])
  • readable.unshift(chunk)
  • readable.wrap(stream)
  • Class: stream.Writable
  • Class: stream.Writable
  • @@ -195,7 +203,7 @@ // Readable streams emit 'data' events once a listener is added req.on('data', function (chunk) { body += chunk; - }) + }); // the end event tells you that you have entire body req.on('end', function () { @@ -210,8 +218,8 @@ // write back something interesting to the user: res.write(typeof data); res.end(); - }) -}) + }); +}); server.listen(1337); @@ -296,7 +304,7 @@
    var readable = getReadableStreamSomehow();
     readable.on('readable', function() {
       // there is some data to read now
    -})
    +});

    Once the internal buffer is drained, a readable event will fire again when more data is available. @@ -317,9 +325,9 @@

    var readable = getReadableStreamSomehow();
     readable.on('data', function(chunk) {
       console.log('got %d bytes of data', chunk.length);
    -})
    +});

    Event: 'end'#

    -

    This event fires when no more data will be provided. +

    This event fires when there will be no more data to read.

    Note that the end event will not fire unless the data is @@ -330,7 +338,7 @@

    var readable = getReadableStreamSomehow();
     readable.on('data', function(chunk) {
       console.log('got %d bytes of data', chunk.length);
    -})
    +});
     readable.on('end', function() {
       console.log('there will be no more data.');
     });
    @@ -340,6 +348,9 @@

    Event: 'error'#

    +
      +
    • Error Object
    • +

    Emitted if there was an error receiving data.

    @@ -401,7 +412,7 @@ readable.on('data', function(chunk) { assert.equal(typeof chunk, 'string'); console.log('got %d characters of string data', chunk.length); -}) +});

    readable.resume()#

    • Return: this
    • @@ -420,7 +431,7 @@ readable.resume(); readable.on('end', function(chunk) { console.log('got to the end, but did not read anything'); -}) +});

      readable.pause()#

      • Return: this
      • @@ -439,8 +450,24 @@ console.log('now data will start flowing again'); readable.resume(); }, 1000); -}) -

        readable.pipe(destination, [options])#

        +}); +

        readable.isPaused()#

        +
          +
        • Return: Boolean
        • +
      +

      This method returns whether or not the readable has been explicitly +paused by client code (using readable.pause() without a corresponding +readable.resume()). + +

      +
      var readable = new stream.Readable
      +
      +readable.isPaused() // === false
      +readable.pause()
      +readable.isPaused() // === true
      +readable.resume()
      +readable.isPaused() // === false
      +

      readable.pipe(destination[, options])#

      • destination Writable Stream The destination for writing data
      • options Object Pipe options -

        writable.write(chunk, [encoding], [callback])#

        +

        writable.write(chunk[, encoding][, callback])#

        • chunk String | Buffer The data to write
        • encoding String The encoding, if chunk is a String
        • @@ -675,7 +702,16 @@

          Flush all data, buffered since .cork() call.

          -

          writable.end([chunk], [encoding], [callback])#

          +

          writable.setDefaultEncoding(encoding)#

          +
            +
          • encoding String The new default encoding
          • +
          • Return: Boolean
          • +
        +

        Sets the default encoding for a writable stream. Returns true if the encoding +is valid and is set. Otherwise returns false. + +

        +

        writable.end([chunk][, encoding][, callback])#

        • chunk String | Buffer Optional data to write
        • encoding String The encoding, if chunk is a String
        • @@ -689,11 +725,10 @@

          // write 'hello, ' and then end with 'world!'
          -http.createServer(function (req, res) {
          -  res.write('hello, ');
          -  res.end('world!');
          -  // writing more now is not allowed!
          -});
          +var file = fs.createWriteStream('example.txt'); +file.write('hello, '); +file.end('world!'); +// writing more now is not allowed!

          Event: 'finish'#

          When the end() method has been called, and all data has been flushed to the underlying system, this event is emitted. @@ -739,6 +774,9 @@ reader.pipe(writer); reader.unpipe(writer);

          Event: 'error'#

          +
            +
          • Error object
          • +

        Emitted if there was an error when writing or piping data.

        @@ -1053,7 +1091,7 @@ size bytes are available before calling stream.push(chunk).

        -

        readable.push(chunk, [encoding])#

        +

        readable.push(chunk[, encoding])#

        • chunk Buffer | null | String Chunk of data to push into the read queue
        • encoding String Encoding of String chunks. Must be a valid @@ -1103,7 +1141,7 @@ self._source.readStop(); }; - // When the source ends, we push the EOF-signalling `null` chunk + // When the source ends, we push the EOF-signaling `null` chunk this._source.onend = function() { self.push(null); }; @@ -1278,7 +1316,7 @@
        • encoding String If the chunk is a string, then this is the encoding type. (Ignore if decodeStrings chunk is a buffer.)
        • callback Function Call this function (optionally with an error -argument) when you are done processing the supplied chunk.
        • +argument and data) when you are done processing the supplied chunk.

      Note: This function MUST NOT be called directly. It should be implemented by child classes, and called by the internal Transform @@ -1302,9 +1340,19 @@

      Call the callback function only when the current chunk is completely consumed. Note that there may or may not be output as a result of any -particular input chunk. +particular input chunk. If you supply as the second argument to the +it will be passed to push method, in other words the following are +equivalent:

      +
      transform.prototype._transform = function (data, encoding, callback) {
      +  this.push(data);
      +  callback();
      +}
      +
      +transform.prototype._transform = function (data, encoding, callback) {
      +  callback(null, data);
      +}

      This method is prefixed with an underscore because it is internal to the class that defines it, and should not be called directly by user programs. However, you are expected to override this method in @@ -1342,6 +1390,14 @@ your own extension classes.

      +

      Events: 'finish' and 'end'#

      +

      The finish and end events are from the parent Writable +and Readable classes respectively. The finish event is fired after +.end() is called and all chunks have been processed by _transform, +end is fired after all data has been output which is after the callback +in _flush has been called. + +

      Example: SimpleProtocol parser v2#

      The example above of a simple protocol parser can be implemented simply by using the higher level Transform stream class, similar to @@ -1602,7 +1658,7 @@

      For Duplex streams objectMode can be set exclusively for readable or writable side with readableObjectMode and writableObjectMode -respectivly. These options can be used to implement parsers and +respectively. These options can be used to implement parsers and serializers with Transform streams.

      @@ -1661,26 +1717,48 @@
    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -87,26 +92,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -78,26 +83,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -46,13 +51,13 @@

    Table of Contents

    • Timers
    • @@ -66,7 +71,7 @@ this module in order to use them.

      -

      setTimeout(callback, delay, [arg], [...])#

      +

      setTimeout(callback, delay[, arg][, ...])#

      To schedule execution of a one-time callback after delay milliseconds. Returns a timeoutObject for possible use with clearTimeout(). Optionally you can also pass arguments to the callback. @@ -82,14 +87,14 @@

      Prevents a timeout from triggering.

      -

      setInterval(callback, delay, [arg], [...])#

      +

      setInterval(callback, delay[, arg][, ...])#

      To schedule the repeated execution of callback every delay milliseconds. Returns a intervalObject for possible use with clearInterval(). Optionally you can also pass arguments to the callback.

      clearInterval(intervalObject)#

      -

      Stops a interval from triggering. +

      Stops an interval from triggering.

      unref()#

      @@ -110,7 +115,7 @@ ref again will have no effect.

      -

      setImmediate(callback, [arg], [...])#

      +

      setImmediate(callback[, arg][, ...])#

      To schedule the "immediate" execution of callback after I/O events callbacks and before setTimeout and setInterval . Returns an immediateObject for possible use with clearImmediate(). Optionally you @@ -119,7 +124,7 @@

      Callbacks for immediates are queued in the order in which they were created. The entire callback queue is processed every event loop iteration. If you queue -an immediate from a inside an executing callback that immediate won't fire +an immediate from inside an executing callback, that immediate won't fire until the next event loop iteration.

      @@ -130,26 +135,48 @@
    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -46,16 +51,18 @@

    Table of Contents

    • TLS (SSL) +

      Protocol support#

      +

      Node.js is compiled with SSLv2 and SSLv3 protocol support by default, but these +protocols are disabled. They are considered insecure and could be easily +compromised as was shown by CVE-2014-3566. However, in some situations, it +may cause problems with legacy clients/servers (such as Internet Explorer 6). +If you wish to enable SSLv2 or SSLv3, run node with the --enable-ssl2 or +--enable-ssl3 flag respectively. In future versions of Node.js SSLv2 and +SSLv3 will not be compiled in by default. + +

      +

      There is a way to force node into using SSLv3 or SSLv2 only mode by explicitly +specifying secureProtocol to 'SSLv3_method' or 'SSLv2_method'. + +

      +

      The default protocol method Node.js uses is SSLv23_method which would be more +accurately named AutoNegotiate_method. This method will try and negotiate +from the highest level down to whatever the client supports. To provide a +secure default, Node.js (since v0.10.33) explicitly disables the use of SSLv3 +and SSLv2 by setting the secureOptions to be +SSL_OP_NO_SSLv3|SSL_OP_NO_SSLv2 (again, unless you have passed +--enable-ssl3, or --enable-ssl2, or SSLv3_method as secureProtocol). + +

      +

      If you have set secureOptions to anything, we will not override your +options. + +

      +

      The ramifications of this behavior change: + +

      +
        +
      • If your application is behaving as a secure server, clients who are SSLv3 +only will now not be able to appropriately negotiate a connection and will be +refused. In this case your server will emit a clientError event. The error +message will include 'wrong version number'.
      • +
      • If your application is behaving as a secure client and communicating with a +server that doesn't support methods more secure than SSLv3 then your connection +won't be able to negotiate and will fail. In this case your client will emit a +an error event. The error message will include 'wrong version number'.
      • +

      Client-initiated renegotiation attack mitigation#

      @@ -203,7 +251,7 @@ this technique, thus offering Perfect Forward Secrecy, are called "ephemeral".

      -

      Currently two methods are commonly used to achieve Perfect Forward Secrecy (note +

      Currently two methods are commonly used to achieve Perfect Forward Secrecy (note the character "E" appended to the traditional abbreviations):

      @@ -226,7 +274,7 @@

      var ciphers = tls.getCiphers();
       console.log(ciphers); // ['AES128-SHA', 'AES256-SHA', ...]
      -

      tls.createServer(options, [secureConnectionListener])#

      +

      tls.createServer(options[, secureConnectionListener])#

      Creates a new tls.Server. The connectionListener argument is automatically set as a listener for the secureConnection event. The options object has these possibilities: @@ -238,12 +286,12 @@ the key, cert and ca options.)

    • key: A string or Buffer containing the private key of the server in -PEM format. (Required)

      +PEM format. (Could be an array of keys). (Required)

    • passphrase: A string of passphrase for the private key or pfx.

    • cert: A string or Buffer containing the certificate key of the server in -PEM format. (Required)

      +PEM format. (Could be an array of certs). (Required)

    • ca: An array of strings or Buffers of trusted certificates in PEM format. If this is omitted several well known "root" CAs will be used, @@ -256,17 +304,19 @@

      To mitigate BEAST attacks it is recommended that you use this option in conjunction with the honorCipherOrder option described below to prioritize the non-CBC cipher.

      -

      Defaults to ECDHE-RSA-AES128-SHA256:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH. -Consult the OpenSSL cipher list format documentation for details on the -format.

      -

      ECDHE-RSA-AES128-SHA256 and AES128-GCM-SHA256 are TLS v1.2 ciphers and -used when node.js is linked against OpenSSL 1.0.1 or newer, such as the -bundled version of OpenSSL. Note that it is still possible for a TLS v1.2 -client to negotiate a weaker cipher unless honorCipherOrder is enabled.

      +

      Defaults to +ECDHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA256:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL. +Consult the OpenSSL cipher list format documentation for details +on the format.

      +

      ECDHE-RSA-AES128-SHA256, DHE-RSA-AES128-SHA256 and +AES128-GCM-SHA256 are TLS v1.2 ciphers and used when node.js is +linked against OpenSSL 1.0.1 or newer, such as the bundled version +of OpenSSL. Note that it is still possible for a TLS v1.2 client +to negotiate a weaker cipher unless honorCipherOrder is enabled.

      RC4 is used as a fallback for clients that speak on older version of the TLS protocol. RC4 has in recent years come under suspicion and should be considered compromised for anything that is truly sensitive. -It is speculated that state-level actors posess the ability to break it.

      +It is speculated that state-level actors possess the ability to break it.

      NOTE: Previous revisions of this section suggested AES256-SHA as an acceptable cipher. Unfortunately, AES256-SHA is a CBC cipher and therefore susceptible to BEAST attacks. Do not use it.

      @@ -275,6 +325,10 @@ or false to disable ECDH.

      Defaults to prime256v1. Consult RFC 4492 for more details.

    • +
    • dhparam: DH parameter file to use for DHE key agreement. Use +openssl dhparam command to create it. If the file is invalid to +load, it is silently discarded.

      +
    • handshakeTimeout: Abort the connection if the SSL/TLS handshake does not finish in this many milliseconds. The default is 120 seconds.

      A 'clientError' is emitted on the tls.Server object whenever a handshake @@ -297,6 +351,10 @@ which is not authorized with the list of supplied CAs. This option only has an effect if requestCert is true. Default: false.

    • +
    • checkServerIdentity(servername, cert): Provide an override for checking +server's hostname against the certificate. Should return an error if verification +fails. Return undefined if passing.

      +
    • NPNProtocols: An array or Buffer of possible NPN protocols. (Protocols should be ordered by their priority).

    • @@ -317,7 +375,7 @@ tickets on multiple instances of tls server.

      NOTE: Automatically shared between cluster module workers.

      -
    • sessionIdContext: A string containing a opaque identifier for session +

    • sessionIdContext: A string containing an opaque identifier for session resumption. If requestCert is true, the default is MD5 hash value generated from command-line. Otherwise, the default is not provided.

    • @@ -325,6 +383,10 @@ SSL version 3. The possible values depend on your installation of OpenSSL and are defined in the constant SSL_METHODS.

      +
    • secureOptions: Set server options. For example, to disable the SSLv3 +protocol set the SSL_OP_NO_SSLv3 flag. See SSL_CTX_set_options +for all available options.

      +

    Here is a simple example echo server: @@ -382,8 +444,8 @@

    openssl s_client -connect 127.0.0.1:8000
    -

    tls.connect(options, [callback])#

    -

    tls.connect(port, [host], [options], [callback])#

    +

    tls.connect(options[, callback])#

    +

    tls.connect(port[, host][, options][, callback])#

    Creates a new client connection to the given port and host (old API) or options.port and options.host. (If host is omitted, it defaults to localhost.) options should be an object which specifies: @@ -405,12 +467,12 @@ CA certs of the client in PFX or PKCS12 format.

  • key: A string or Buffer containing the private key of the client in -PEM format.

    +PEM format. (Could be an array of keys).

  • passphrase: A string of passphrase for the private key or pfx.

  • cert: A string or Buffer containing the certificate key of the client in -PEM format.

    +PEM format. (Could be an array of certs).

  • ca: An array of strings or Buffers of trusted certificates in PEM format. If this is omitted several well known "root" CAs will be used, @@ -530,8 +592,38 @@ before establishing secure communication

  • -

    tls.createSecurePair([context], [isServer], [requestCert], [rejectUnauthorized])#

    -
    Stability: 0 - Deprecated. Use tls.TLSSocket instead.

    Creates a new secure pair object with two streams, one of which reads/writes +

    tls.createSecureContext(details)#

    +

    Creates a credentials object, with the optional details being a +dictionary with keys: + +

    +
      +
    • pfx : A string or buffer holding the PFX or PKCS12 encoded private +key, certificate and CA certificates
    • +
    • key : A string holding the PEM encoded private key
    • +
    • passphrase : A string of passphrase for the private key or pfx
    • +
    • cert : A string holding the PEM encoded certificate
    • +
    • ca : Either a string or list of strings of PEM encoded CA +certificates to trust.
    • +
    • crl : Either a string or list of strings of PEM encoded CRLs +(Certificate Revocation List)
    • +
    • ciphers: A string describing the ciphers to use or exclude. +Consult +http://www.openssl.org/docs/apps/ciphers.html#CIPHER_LIST_FORMAT +for details on the format.
    • +
    • honorCipherOrder : When choosing a cipher, use the server's preferences +instead of the client preferences. For further details see tls module +documentation.
    • +
    +

    If no 'ca' details are given, then node.js will use the default +publicly trusted list of CAs as given in +

    +

    http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt. + + +

    +

    tls.createSecurePair([context][, isServer][, requestCert][, rejectUnauthorized])#

    +

    Creates a new secure pair object with two streams, one of which reads/writes encrypted data, and one reads/writes cleartext data. Generally the encrypted one is piped to/from an incoming encrypted data stream, and the cleartext one is used as a replacement for the initial encrypted stream. @@ -583,7 +675,7 @@

    This event is emitted after a new connection has been successfully -handshaked. The argument is a instance of tls.TLSSocket. It has all the +handshaked. The argument is an instance of tls.TLSSocket. It has all the common stream methods and events.

    @@ -674,8 +766,9 @@
  • Client validates the response and either destroys socket or performs a handshake.
  • -

    NOTE: issuer could be null, if certficiate is self-signed or if issuer is not -in the root certificates list. (You could provide an issuer via ca option.) +

    NOTE: issuer could be null, if the certificate is self-signed or if the issuer +is not in the root certificates list. (You could provide an issuer via ca +option.)

    NOTE: adding this event listener will have an effect only on connections @@ -687,7 +780,7 @@

    -

    server.listen(port, [host], [callback])#

    +

    server.listen(port[, host][, callback])#

    Begin accepting connections on the specified port and host. If the host is omitted, the server will accept connections directed to any IPv4 address (INADDR_ANY). @@ -716,7 +809,8 @@

    server.addContext(hostname, context)#

    Add secure context that will be used if client request's SNI hostname is matching passed hostname (wildcards can be used). context can contain -key, cert and ca. +key, cert, ca and/or any other properties from tls.createSecureContext +options argument.

    server.maxConnections#

    @@ -886,6 +980,10 @@ '74.125.127.100' or '2001:4860:a005::68'.

    +

    tlsSocket.remoteFamily#

    +

    The string representation of the remote IP family. 'IPv4' or 'IPv6'. + +

    tlsSocket.remotePort#

    The numeric representation of the remote port. For example, 443. @@ -902,26 +1000,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -92,26 +97,48 @@

    +
    + +
    - - + + - - - - - - diff -Nru nodejs-0.11.13/doc/api/tracing.json nodejs-0.11.15/doc/api/tracing.json --- nodejs-0.11.13/doc/api/tracing.json 2014-05-02 01:18:34.000000000 +0000 +++ nodejs-0.11.15/doc/api/tracing.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -{ - "source": "doc/api/tracing.markdown", - "modules": [ - { - "textRaw": "Tracing", - "name": "tracing", - "stability": 1, - "stabilityText": "Experimental", - "desc": "

    The tracing module is designed for instrumenting your Node application. It is\nnot meant for general purpose use.\n\n

    \n

    Be very careful with callbacks used in conjunction with this module\n\n

    \n

    Many of these callbacks interact directly with asynchronous subsystems in a\nsynchronous fashion. That is to say, you may be in a callback where a call to\nconsole.log() could result in an infinite recursive loop. Also of note, many\nof these callbacks are in hot execution code paths. That is to say your\ncallbacks are executed quite often in the normal operation of Node, so be wary\nof doing CPU bound or synchronous workloads in these functions. Consider a ring\nbuffer and a timer to defer processing.\n\n

    \n

    require('tracing') to use this module.\n\n

    \n", - "modules": [ - { - "textRaw": "v8", - "name": "v8", - "desc": "

    The v8 property is an [EventEmitter][], it exposes events and interfaces\nspecific to the version of v8 built with node. These interfaces are subject\nto change by upstream and are therefore not covered under the stability index.\n\n

    \n", - "events": [ - { - "textRaw": "Event: 'gc'", - "type": "event", - "name": "gc", - "desc": "

    function (before, after) { }\n\n

    \n

    Emitted each time a GC run is completed.\n\n

    \n

    before and after are objects with the following properties:\n\n

    \n
    {\n  type: 'mark-sweep-compact',\n  flags: 0,\n  timestamp: 905535650119053,\n  total_heap_size: 6295040,\n  total_heap_size_executable: 4194304,\n  total_physical_size: 6295040,\n  used_heap_size: 2855416,\n  heap_size_limit: 1535115264\n}
    \n", - "params": [] - } - ], - "methods": [ - { - "textRaw": "getHeapStatistics()", - "type": "method", - "name": "getHeapStatistics", - "desc": "

    Returns an object with the following properties\n\n

    \n
    {\n  total_heap_size: 7326976,\n  total_heap_size_executable: 4194304,\n  total_physical_size: 7326976,\n  used_heap_size: 3476208,\n  heap_size_limit: 1535115264\n}
    \n", - "signatures": [ - { - "params": [] - } - ] - } - ], - "type": "module", - "displayName": "v8" - } - ], - "type": "module", - "displayName": "Tracing" - }, - { - "textRaw": "Async Listeners", - "name": "async_listeners", - "desc": "

    The AsyncListener API is the JavaScript interface for the AsyncWrap\nclass which allows developers to be notified about key events in the\nlifetime of an asynchronous event. Node performs a lot of asynchronous\nevents internally, and significant use of this API may have a\nsignificant performance impact on your application.\n\n\n

    \n", - "methods": [ - { - "textRaw": "tracing.createAsyncListener(callbacksObj[, userData])", - "type": "method", - "name": "createAsyncListener", - "signatures": [ - { - "params": [ - { - "textRaw": "`callbacksObj` {Object} Contains optional callbacks that will fire at specific times in the life cycle of the asynchronous event. ", - "name": "callbacksObj", - "type": "Object", - "desc": "Contains optional callbacks that will fire at specific times in the life cycle of the asynchronous event." - }, - { - "textRaw": "`userData` {Value} a value that will be passed to all callbacks. ", - "name": "userData", - "type": "Value", - "desc": "a value that will be passed to all callbacks." - } - ] - }, - { - "params": [ - { - "name": "callbacksObj[" - }, - { - "name": "userData" - } - ] - } - ], - "desc": "

    Returns a constructed AsyncListener object.\n\n

    \n

    To begin capturing asynchronous events pass either the callbacksObj or\npass an existing AsyncListener instance to [tracing.addAsyncListener()][].\nThe same AsyncListener instance can only be added once to the active\nqueue, and subsequent attempts to add the instance will be ignored.\n\n

    \n

    To stop capturing pass the AsyncListener instance to\n[tracing.removeAsyncListener()][]. This does not mean the\nAsyncListener previously added will stop triggering callbacks. Once\nattached to an asynchronous event it will persist with the lifetime of the\nasynchronous call stack.\n\n

    \n

    Explanation of function parameters:\n\n\n

    \n

    callbacksObj: An Object which may contain several optional fields:\n\n

    \n
      \n
    • create(userData): A Function called when an asynchronous\nevent is instantiated. If a Value is returned then it will be attached\nto the event and overwrite any value that had been passed to\ntracing.createAsyncListener()'s userData argument. If an initial\nuserData was passed when created, then create() will\nreceive that as a function argument.

      \n
    • \n
    • before(context, userData): A Function that is called immediately\nbefore the asynchronous callback is about to run. It will be passed both\nthe context (i.e. this) of the calling function and the userData\neither returned from create() or passed during construction (if\neither occurred).

      \n
    • \n
    • after(context, userData): A Function called immediately after\nthe asynchronous event's callback has run. Note this will not be called\nif the callback throws and the error is not handled.

      \n
    • \n
    • error(userData, error): A Function called if the event's\ncallback threw. If this registered callback returns true then Node will\nassume the error has been properly handled and resume execution normally.\nWhen multiple error() callbacks have been registered only one of\nthose callbacks needs to return true for AsyncListener to accept that\nthe error has been handled, but all error() callbacks will always be run.

      \n
    • \n
    \n

    userData: A Value (i.e. anything) that will be, by default,\nattached to all new event instances. This will be overwritten if a Value\nis returned by create().\n\n

    \n

    Here is an example of overwriting the userData:\n\n

    \n
    tracing.createAsyncListener({\n  create: function listener(value) {\n    // value === true\n    return false;\n}, {\n  before: function before(context, value) {\n    // value === false\n  }\n}, true);
    \n

    Note: The [EventEmitter][], while used to emit status of an asynchronous\nevent, is not itself asynchronous. So create() will not fire when\nan event is added, and before()/after() will not fire when emitted\ncallbacks are called.\n\n\n

    \n" - }, - { - "textRaw": "tracing.addAsyncListener(callbacksObj[, userData])", - "type": "method", - "name": "addAsyncListener", - "desc": "

    Returns a constructed AsyncListener object and immediately adds it to\nthe listening queue to begin capturing asynchronous events.\n\n

    \n

    Function parameters can either be the same as\n[tracing.createAsyncListener()][], or a constructed AsyncListener\nobject.\n\n

    \n

    Example usage for capturing errors:\n\n

    \n
    var fs = require('fs');\n\nvar cntr = 0;\nvar key = tracing.addAsyncListener({\n  create: function onCreate() {\n    return { uid: cntr++ };\n  },\n  before: function onBefore(context, storage) {\n    // Write directly to stdout or we'll enter a recursive loop\n    fs.writeSync(1, 'uid: ' + storage.uid + ' is about to run\\n');\n  },\n  after: function onAfter(context, storage) {\n    fs.writeSync(1, 'uid: ' + storage.uid + ' ran\\n');\n  },\n  error: function onError(storage, err) {\n    // Handle known errors\n    if (err.message === 'everything is fine') {\n      // Writing to stderr this time.\n      fs.writeSync(2, 'handled error just threw:\\n');\n      fs.writeSync(2, err.stack + '\\n');\n      return true;\n    }\n  }\n});\n\nprocess.nextTick(function() {\n  throw new Error('everything is fine');\n});\n\n// Output:\n// uid: 0 is about to run\n// handled error just threw:\n// Error: really, it's ok\n//     at /tmp/test2.js:27:9\n//     at process._tickCallback (node.js:583:11)\n//     at Function.Module.runMain (module.js:492:11)\n//     at startup (node.js:123:16)\n//     at node.js:1012:3
    \n", - "signatures": [ - { - "params": [ - { - "name": "asyncListener" - } - ] - }, - { - "params": [ - { - "name": "callbacksObj[" - }, - { - "name": "userData" - } - ] - } - ] - }, - { - "textRaw": "tracing.addAsyncListener(asyncListener)", - "type": "method", - "name": "addAsyncListener", - "desc": "

    Returns a constructed AsyncListener object and immediately adds it to\nthe listening queue to begin capturing asynchronous events.\n\n

    \n

    Function parameters can either be the same as\n[tracing.createAsyncListener()][], or a constructed AsyncListener\nobject.\n\n

    \n

    Example usage for capturing errors:\n\n

    \n
    var fs = require('fs');\n\nvar cntr = 0;\nvar key = tracing.addAsyncListener({\n  create: function onCreate() {\n    return { uid: cntr++ };\n  },\n  before: function onBefore(context, storage) {\n    // Write directly to stdout or we'll enter a recursive loop\n    fs.writeSync(1, 'uid: ' + storage.uid + ' is about to run\\n');\n  },\n  after: function onAfter(context, storage) {\n    fs.writeSync(1, 'uid: ' + storage.uid + ' ran\\n');\n  },\n  error: function onError(storage, err) {\n    // Handle known errors\n    if (err.message === 'everything is fine') {\n      // Writing to stderr this time.\n      fs.writeSync(2, 'handled error just threw:\\n');\n      fs.writeSync(2, err.stack + '\\n');\n      return true;\n    }\n  }\n});\n\nprocess.nextTick(function() {\n  throw new Error('everything is fine');\n});\n\n// Output:\n// uid: 0 is about to run\n// handled error just threw:\n// Error: really, it's ok\n//     at /tmp/test2.js:27:9\n//     at process._tickCallback (node.js:583:11)\n//     at Function.Module.runMain (module.js:492:11)\n//     at startup (node.js:123:16)\n//     at node.js:1012:3
    \n", - "signatures": [ - { - "params": [ - { - "name": "asyncListener" - } - ] - } - ] - }, - { - "textRaw": "tracing.removeAsyncListener(asyncListener)", - "type": "method", - "name": "removeAsyncListener", - "desc": "

    Removes the AsyncListener from the listening queue.\n\n

    \n

    Removing the AsyncListener from the active queue does not mean the\nasyncListener callbacks will cease to fire on the events they've been\nregistered. Subsequently, any asynchronous events fired during the\nexecution of a callback will also have the same asyncListener callbacks\nattached for future execution. For example:\n\n

    \n
    var fs = require('fs');\n\nvar key = tracing.createAsyncListener({\n  create: function asyncListener() {\n    // Write directly to stdout or we'll enter a recursive loop\n    fs.writeSync(1, 'You summoned me?\\n');\n  }\n});\n\n// We want to begin capturing async events some time in the future.\nsetTimeout(function() {\n  tracing.addAsyncListener(key);\n\n  // Perform a few additional async events.\n  setTimeout(function() {\n    setImmediate(function() {\n      process.nextTick(function() { });\n    });\n  });\n\n  // Removing the listener doesn't mean to stop capturing events that\n  // have already been added.\n  tracing.removeAsyncListener(key);\n}, 100);\n\n// Output:\n// You summoned me?\n// You summoned me?\n// You summoned me?\n// You summoned me?
    \n

    The fact that we logged 4 asynchronous events is an implementation detail\nof Node's [Timers][].\n\n

    \n

    To stop capturing from a specific asynchronous event stack\ntracing.removeAsyncListener() must be called from within the call\nstack itself. For example:\n\n

    \n
    var fs = require('fs');\n\nvar key = tracing.createAsyncListener({\n  create: function asyncListener() {\n    // Write directly to stdout or we'll enter a recursive loop\n    fs.writeSync(1, 'You summoned me?\\n');\n  }\n});\n\n// We want to begin capturing async events some time in the future.\nsetTimeout(function() {\n  tracing.addAsyncListener(key);\n\n  // Perform a few additional async events.\n  setImmediate(function() {\n    // Stop capturing from this call stack.\n    tracing.removeAsyncListener(key);\n\n    process.nextTick(function() { });\n  });\n}, 100);\n\n// Output:\n// You summoned me?
    \n

    The user must be explicit and always pass the AsyncListener they wish\nto remove. It is not possible to simply remove all listeners at once.\n\n\n

    \n", - "signatures": [ - { - "params": [ - { - "name": "asyncListener" - } - ] - } - ] - } - ], - "type": "module", - "displayName": "Async Listeners" - } - ] -} diff -Nru nodejs-0.11.13/doc/api/tracing.markdown nodejs-0.11.15/doc/api/tracing.markdown --- nodejs-0.11.13/doc/api/tracing.markdown 2014-05-02 00:49:51.000000000 +0000 +++ nodejs-0.11.15/doc/api/tracing.markdown 1970-01-01 00:00:00.000000000 +0000 @@ -1,273 +0,0 @@ -# Tracing - - Stability: 1 - Experimental - -The tracing module is designed for instrumenting your Node application. It is -not meant for general purpose use. - -***Be very careful with callbacks used in conjunction with this module*** - -Many of these callbacks interact directly with asynchronous subsystems in a -synchronous fashion. That is to say, you may be in a callback where a call to -`console.log()` could result in an infinite recursive loop. Also of note, many -of these callbacks are in hot execution code paths. That is to say your -callbacks are executed quite often in the normal operation of Node, so be wary -of doing CPU bound or synchronous workloads in these functions. Consider a ring -buffer and a timer to defer processing. - -`require('tracing')` to use this module. - -## v8 - -The `v8` property is an [EventEmitter][], it exposes events and interfaces -specific to the version of `v8` built with node. These interfaces are subject -to change by upstream and are therefore not covered under the stability index. - -### Event: 'gc' - -`function (before, after) { }` - -Emitted each time a GC run is completed. - -`before` and `after` are objects with the following properties: - -``` -{ - type: 'mark-sweep-compact', - flags: 0, - timestamp: 905535650119053, - total_heap_size: 6295040, - total_heap_size_executable: 4194304, - total_physical_size: 6295040, - used_heap_size: 2855416, - heap_size_limit: 1535115264 -} -``` - -### getHeapStatistics() - -Returns an object with the following properties - -``` -{ - total_heap_size: 7326976, - total_heap_size_executable: 4194304, - total_physical_size: 7326976, - used_heap_size: 3476208, - heap_size_limit: 1535115264 -} -``` - - -# Async Listeners - -The `AsyncListener` API is the JavaScript interface for the `AsyncWrap` -class which allows developers to be notified about key events in the -lifetime of an asynchronous event. Node performs a lot of asynchronous -events internally, and significant use of this API may have a -**significant performance impact** on your application. - - -## tracing.createAsyncListener(callbacksObj[, userData]) - -* `callbacksObj` {Object} Contains optional callbacks that will fire at -specific times in the life cycle of the asynchronous event. -* `userData` {Value} a value that will be passed to all callbacks. - -Returns a constructed `AsyncListener` object. - -To begin capturing asynchronous events pass either the `callbacksObj` or -pass an existing `AsyncListener` instance to [`tracing.addAsyncListener()`][]. -The same `AsyncListener` instance can only be added once to the active -queue, and subsequent attempts to add the instance will be ignored. - -To stop capturing pass the `AsyncListener` instance to -[`tracing.removeAsyncListener()`][]. This does _not_ mean the -`AsyncListener` previously added will stop triggering callbacks. Once -attached to an asynchronous event it will persist with the lifetime of the -asynchronous call stack. - -Explanation of function parameters: - - -`callbacksObj`: An `Object` which may contain several optional fields: - -* `create(userData)`: A `Function` called when an asynchronous -event is instantiated. If a `Value` is returned then it will be attached -to the event and overwrite any value that had been passed to -`tracing.createAsyncListener()`'s `userData` argument. If an initial -`userData` was passed when created, then `create()` will -receive that as a function argument. - -* `before(context, userData)`: A `Function` that is called immediately -before the asynchronous callback is about to run. It will be passed both -the `context` (i.e. `this`) of the calling function and the `userData` -either returned from `create()` or passed during construction (if -either occurred). - -* `after(context, userData)`: A `Function` called immediately after -the asynchronous event's callback has run. Note this will not be called -if the callback throws and the error is not handled. - -* `error(userData, error)`: A `Function` called if the event's -callback threw. If this registered callback returns `true` then Node will -assume the error has been properly handled and resume execution normally. -When multiple `error()` callbacks have been registered only **one** of -those callbacks needs to return `true` for `AsyncListener` to accept that -the error has been handled, but all `error()` callbacks will always be run. - -`userData`: A `Value` (i.e. anything) that will be, by default, -attached to all new event instances. This will be overwritten if a `Value` -is returned by `create()`. - -Here is an example of overwriting the `userData`: - - tracing.createAsyncListener({ - create: function listener(value) { - // value === true - return false; - }, { - before: function before(context, value) { - // value === false - } - }, true); - -**Note:** The [EventEmitter][], while used to emit status of an asynchronous -event, is not itself asynchronous. So `create()` will not fire when -an event is added, and `before()`/`after()` will not fire when emitted -callbacks are called. - - -## tracing.addAsyncListener(callbacksObj[, userData]) -## tracing.addAsyncListener(asyncListener) - -Returns a constructed `AsyncListener` object and immediately adds it to -the listening queue to begin capturing asynchronous events. - -Function parameters can either be the same as -[`tracing.createAsyncListener()`][], or a constructed `AsyncListener` -object. - -Example usage for capturing errors: - - var fs = require('fs'); - - var cntr = 0; - var key = tracing.addAsyncListener({ - create: function onCreate() { - return { uid: cntr++ }; - }, - before: function onBefore(context, storage) { - // Write directly to stdout or we'll enter a recursive loop - fs.writeSync(1, 'uid: ' + storage.uid + ' is about to run\n'); - }, - after: function onAfter(context, storage) { - fs.writeSync(1, 'uid: ' + storage.uid + ' ran\n'); - }, - error: function onError(storage, err) { - // Handle known errors - if (err.message === 'everything is fine') { - // Writing to stderr this time. - fs.writeSync(2, 'handled error just threw:\n'); - fs.writeSync(2, err.stack + '\n'); - return true; - } - } - }); - - process.nextTick(function() { - throw new Error('everything is fine'); - }); - - // Output: - // uid: 0 is about to run - // handled error just threw: - // Error: really, it's ok - // at /tmp/test2.js:27:9 - // at process._tickCallback (node.js:583:11) - // at Function.Module.runMain (module.js:492:11) - // at startup (node.js:123:16) - // at node.js:1012:3 - -## tracing.removeAsyncListener(asyncListener) - -Removes the `AsyncListener` from the listening queue. - -Removing the `AsyncListener` from the active queue does _not_ mean the -`asyncListener` callbacks will cease to fire on the events they've been -registered. Subsequently, any asynchronous events fired during the -execution of a callback will also have the same `asyncListener` callbacks -attached for future execution. For example: - - var fs = require('fs'); - - var key = tracing.createAsyncListener({ - create: function asyncListener() { - // Write directly to stdout or we'll enter a recursive loop - fs.writeSync(1, 'You summoned me?\n'); - } - }); - - // We want to begin capturing async events some time in the future. - setTimeout(function() { - tracing.addAsyncListener(key); - - // Perform a few additional async events. - setTimeout(function() { - setImmediate(function() { - process.nextTick(function() { }); - }); - }); - - // Removing the listener doesn't mean to stop capturing events that - // have already been added. - tracing.removeAsyncListener(key); - }, 100); - - // Output: - // You summoned me? - // You summoned me? - // You summoned me? - // You summoned me? - -The fact that we logged 4 asynchronous events is an implementation detail -of Node's [Timers][]. - -To stop capturing from a specific asynchronous event stack -`tracing.removeAsyncListener()` must be called from within the call -stack itself. For example: - - var fs = require('fs'); - - var key = tracing.createAsyncListener({ - create: function asyncListener() { - // Write directly to stdout or we'll enter a recursive loop - fs.writeSync(1, 'You summoned me?\n'); - } - }); - - // We want to begin capturing async events some time in the future. - setTimeout(function() { - tracing.addAsyncListener(key); - - // Perform a few additional async events. - setImmediate(function() { - // Stop capturing from this call stack. - tracing.removeAsyncListener(key); - - process.nextTick(function() { }); - }); - }, 100); - - // Output: - // You summoned me? - -The user must be explicit and always pass the `AsyncListener` they wish -to remove. It is not possible to simply remove all listeners at once. - - -[EventEmitter]: events.html#events_class_events_eventemitter -[Timers]: timers.html -[`tracing.createAsyncListener()`]: #tracing_tracing_createasynclistener_asynclistener_callbacksobj_storagevalue -[`tracing.addAsyncListener()`]: #tracing_tracing_addasynclistener_asynclistener -[`tracing.removeAsyncListener()`]: #tracing_tracing_removeasynclistener_asynclistener diff -Nru nodejs-0.11.13/doc/api/tty.html nodejs-0.11.15/doc/api/tty.html --- nodejs-0.11.13/doc/api/tty.html 2014-05-02 01:18:34.000000000 +0000 +++ nodejs-0.11.15/doc/api/tty.html 2015-01-20 22:11:38.000000000 +0000 @@ -2,36 +2,41 @@ - TTY Node.js v0.11.13 Manual & Documentation + TTY Node.js v0.11.15 Manual & Documentation + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -143,26 +148,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -46,7 +51,7 @@

    Table of Contents

    • URL @@ -76,6 +81,9 @@
    • protocol: The request protocol, lowercased.

      Example: 'http:'

    • +
    • slashes: The protocol requires slashes after the colon

      +

      Example: true or false

      +
    • host: The full lowercased host portion of the URL, including port information.

      Example: 'host.com:8080'

      @@ -111,13 +119,14 @@

      The following methods are provided by the URL module:

      -

      url.parse(urlStr, [parseQueryString], [slashesDenoteHost])#

      +

      url.parse(urlStr[, parseQueryString][, slashesDenoteHost])#

      Take a URL string, and return an object.

      -

      Pass true as the second argument to also parse -the query string using the querystring module. -Defaults to false. +

      Pass true as the second argument to also parse the query string using the +querystring module. If true then the query property will always be +assigned an object, and the search property will always be a (possibly +empty) string. Defaults to false.

      Pass true as the third argument to treat //foo/bar as @@ -129,6 +138,9 @@

      Take a parsed URL object, and return a formatted URL string.

      +

      Here's how the formatting process works: + +

      • href will be ignored.
      • protocol is treated the same with or without the trailing : (colon).
          @@ -138,15 +150,23 @@ be postfixed with : (colon)
      • +
      • slashes set to true if the protocol requires :// (colon-slash-slash)
          +
        • Only needs to be set for protocols not previously listed as requiring +slashes, such as mongodb://localhost:8000/
        • +
        +
      • auth will be used if present.
      • hostname will only be used if host is absent.
      • port will only be used if host is absent.
      • host will be used in place of hostname and port
      • -
      • pathname is treated the same with or without the leading / (slash)
      • -
      • search will be used in place of query
      • +
      • pathname is treated the same with or without the leading / (slash).
      • +
      • path is treated the same with pathname but able to contain query as well.
      • +
      • search will be used in place of query.
          +
        • It is treated the same with or without the leading ? (question mark)
        • +
        +
      • query (object; see querystring) will only be used if search is absent.
      • -
      • search is treated the same with or without the leading ? (question mark)
      • -
      • hash is treated the same with or without the leading # (pound sign, anchor)
      • +
      • hash is treated the same with or without the leading # (pound sign, anchor).

      url.resolve(from, to)#

      Take a base URL, and a href URL, and resolve them as a browser would for @@ -160,26 +180,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -47,9 +52,9 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -46,16 +51,17 @@

    Table of Contents

    -

    script.runInContext(contextifiedSandbox, [options])#

    +

    script.runInContext(contextifiedSandbox[, options])#

    Similar to vm.runInContext but a method of a precompiled Script object. script.runInContext runs script's compiled code in contextifiedSandbox and returns the result. Running code does not have access to local scope. @@ -296,7 +317,7 @@

    -

    script.runInNewContext([sandbox], [options])#

    +

    script.runInNewContext([sandbox][, options])#

    Similar to vm.runInNewContext but a method of a precompiled Script object. script.runInNewContext contextifies sandbox if passed or creates a new contextified sandbox if it's omitted, and then runs script's compiled code @@ -334,26 +355,48 @@

    +
    + +
    - - + + + - -
    - - - + + +
    - - +
    -

    Node.js v0.11.13 Manual & Documentation

    +

    Node.js v0.11.15 Manual & Documentation

    Index | @@ -68,20 +73,20 @@

  • Class: zlib.InflateRaw
  • Class: zlib.Unzip
  • Convenience Methods
  • -
  • zlib.deflate(buf, [options], callback)
  • -
  • zlib.deflateSync(buf, [options])
  • -
  • zlib.deflateRaw(buf, [options], callback)
  • -
  • zlib.deflateRawSync(buf, [options])
  • -
  • zlib.gzip(buf, [options], callback)
  • -
  • zlib.gzipSync(buf, [options])
  • -
  • zlib.gunzip(buf, [options], callback)
  • -
  • zlib.gunzipSync(buf, [options])
  • -
  • zlib.inflate(buf, [options], callback)
  • -
  • zlib.inflateSync(buf, [options])
  • -
  • zlib.inflateRaw(buf, [options], callback)
  • -
  • zlib.inflateRawSync(buf, [options])
  • -
  • zlib.unzip(buf, [options], callback)
  • -
  • zlib.unzipSync(buf, [options])
  • +
  • zlib.deflate(buf[, options], callback)
  • +
  • zlib.deflateSync(buf[, options])
  • +
  • zlib.deflateRaw(buf[, options], callback)
  • +
  • zlib.deflateRawSync(buf[, options])
  • +
  • zlib.gzip(buf[, options], callback)
  • +
  • zlib.gzipSync(buf[, options])
  • +
  • zlib.gunzip(buf[, options], callback)
  • +
  • zlib.gunzipSync(buf[, options])
  • +
  • zlib.inflate(buf[, options], callback)
  • +
  • zlib.inflateSync(buf[, options])
  • +
  • zlib.inflateRaw(buf[, options], callback)
  • +
  • zlib.inflateRawSync(buf[, options])
  • +
  • zlib.unzip(buf[, options], callback)
  • +
  • zlib.unzipSync(buf[, options])
  • Options
  • Memory Usage Tuning
  • Constants
  • @@ -295,38 +300,38 @@ without a callback.

    -

    zlib.deflate(buf, [options], callback)#

    -

    zlib.deflateSync(buf, [options])#

    +

    zlib.deflate(buf[, options], callback)#

    +

    zlib.deflateSync(buf[, options])#

    Compress a string with Deflate.

    -

    zlib.deflateRaw(buf, [options], callback)#

    -

    zlib.deflateRawSync(buf, [options])#

    +

    zlib.deflateRaw(buf[, options], callback)#

    +

    zlib.deflateRawSync(buf[, options])#

    Compress a string with DeflateRaw.

    -

    zlib.gzip(buf, [options], callback)#

    -

    zlib.gzipSync(buf, [options])#

    +

    zlib.gzip(buf[, options], callback)#

    +

    zlib.gzipSync(buf[, options])#

    Compress a string with Gzip.

    -

    zlib.gunzip(buf, [options], callback)#

    -

    zlib.gunzipSync(buf, [options])#

    +

    zlib.gunzip(buf[, options], callback)#

    +

    zlib.gunzipSync(buf[, options])#

    Decompress a raw Buffer with Gunzip.

    -

    zlib.inflate(buf, [options], callback)#

    -

    zlib.inflateSync(buf, [options])#

    +

    zlib.inflate(buf[, options], callback)#

    +

    zlib.inflateSync(buf[, options])#

    Decompress a raw Buffer with Inflate.

    -

    zlib.inflateRaw(buf, [options], callback)#

    -

    zlib.inflateRawSync(buf, [options])#

    +

    zlib.inflateRaw(buf[, options], callback)#

    +

    zlib.inflateRawSync(buf[, options])#

    Decompress a raw Buffer with InflateRaw.

    -

    zlib.unzip(buf, [options], callback)#

    -

    zlib.unzipSync(buf, [options])#

    +

    zlib.unzip(buf[, options], callback)#

    +

    zlib.unzipSync(buf[, options])#

    Decompress a raw Buffer with Unzip.

    @@ -485,26 +490,48 @@
    +
    + +
    - - + + + - -
    - - - + + +
    - - +

    Node.js __VERSION__ Manual & Documentation

    @@ -52,26 +57,48 @@
    +
    + + - - + +